Merge tag 'docs-for-linus' of git://git.lwn.net/linux

Pull Documentation updates from Jon Corbet:
 "A bit busier this time around.

  The most interesting thing (IMO) this time around is some beginning
  infrastructural work to allow documents to be written using
  restructured text.  Maybe someday, in a galaxy far far away, we'll be
  able to eliminate the DocBook dependency and have a much better
  integrated set of kernel docs.  Someday.

  Beyond that, there's a new document on security hardening from Kees,
  the movement of some sample code over to samples/, a number of
  improvements to the serial docs from Geert, and the usual collection
  of corrections, typo fixes, etc"

* tag 'docs-for-linus' of git://git.lwn.net/linux: (55 commits)
  doc: self-protection: provide initial details
  serial: doc: Use port->state instead of info
  serial: doc: Always refer to tty_port->mutex
  Documentation: vm: Spelling s/paltform/platform/g
  Documentation/memcg: update kmem limit doc as codes behavior
  docproc: print a comment about autogeneration for rst output
  docproc: add support for reStructuredText format via --rst option
  docproc: abstract terminating lines at first space
  docproc: abstract docproc directive detection
  docproc: reduce unnecessary indentation
  docproc: add variables for subcommand and filename
  kernel-doc: use rst C domain directives and references for types
  kernel-doc: produce RestructuredText output
  kernel-doc: rewrite usage description, remove duplicated comments
  Doc: correct the location of sysrq.c
  Documentation: fix common spelling mistakes
  samples: v4l: from Documentation to samples directory
  samples: connector: from Documentation to samples directory
  Documentation: xillybus: fix spelling mistake
  Documentation: x86: fix spelling mistakes
  ...
diff --git a/.gitignore b/.gitignore
index fd3a355..0c320bf 100644
--- a/.gitignore
+++ b/.gitignore
@@ -62,7 +62,7 @@
 /tar-install/
 
 #
-# git files that we don't want to ignore even it they are dot-files
+# git files that we don't want to ignore even if they are dot-files
 #
 !.gitignore
 !.mailmap
diff --git a/.mailmap b/.mailmap
index 7e6c533..08b8042 100644
--- a/.mailmap
+++ b/.mailmap
@@ -33,6 +33,7 @@
 Brian Avery <b.avery@hp.com>
 Brian King <brking@us.ibm.com>
 Christoph Hellwig <hch@lst.de>
+Christophe Ricard <christophe.ricard@gmail.com>
 Corey Minyard <minyard@acm.org>
 Damian Hobson-Garcia <dhobsong@igel.co.jp>
 David Brownell <david-b@pacbell.net>
@@ -47,6 +48,9 @@
 Felix Moeller <felix@derklecks.de>
 Filipe Lautert <filipe@icewall.org>
 Franck Bui-Huu <vagabon.xyz@gmail.com>
+Frank Rowand <frowand.list@gmail.com> <frowand@mvista.com>
+Frank Rowand <frowand.list@gmail.com> <frank.rowand@am.sony.com>
+Frank Rowand <frowand.list@gmail.com> <frank.rowand@sonymobile.com>
 Frank Zago <fzago@systemfabricworks.com>
 Greg Kroah-Hartman <greg@echidna.(none)>
 Greg Kroah-Hartman <gregkh@suse.de>
@@ -65,6 +69,7 @@
 Jeff Garzik <jgarzik@pretzel.yyz.us>
 Jens Axboe <axboe@suse.de>
 Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
+John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
 John Stultz <johnstul@us.ibm.com>
 <josh@joshtriplett.org> <josh@freedesktop.org>
 <josh@joshtriplett.org> <josh@kernel.org>
@@ -78,6 +83,7 @@
 Kenneth W Chen <kenneth.w.chen@intel.com>
 Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
 Koushik <raghavendra.koushik@neterion.com>
+Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
 Leonid I Ananiev <leonid.i.ananiev@intel.com>
 Linas Vepstas <linas@austin.ibm.com>
diff --git a/Documentation/ABI/testing/sysfs-ibft b/Documentation/ABI/testing/sysfs-ibft
index cac3930..7d6725f 100644
--- a/Documentation/ABI/testing/sysfs-ibft
+++ b/Documentation/ABI/testing/sysfs-ibft
@@ -21,3 +21,13 @@
 Description:	The /sys/firmware/ibft/ethernetX directory will contain
 		files that expose the iSCSI Boot Firmware Table NIC data.
 		Usually this contains the IP address, MAC, and gateway of the NIC.
+
+What:		/sys/firmware/ibft/acpi_header
+Date:		March 2016
+Contact:	David Bond <dbond@suse.com>
+Description:	The /sys/firmware/ibft/acpi_header directory will contain files
+		that expose the SIGNATURE, OEM_ID, and OEM_TABLE_ID fields of the
+		acpi table header of the iBFT structure.  This will allow for
+		identification of the creator of the table which is useful in
+		determining quirks associated with some adapters when used in
+		hardware vs software iscsi initiator mode.
diff --git a/Documentation/ABI/testing/sysfs-platform-hidma b/Documentation/ABI/testing/sysfs-platform-hidma
new file mode 100644
index 0000000..d364415
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-hidma
@@ -0,0 +1,9 @@
+What:		/sys/devices/platform/hidma-*/chid
+		/sys/devices/platform/QCOM8061:*/chid
+Date:		Dec 2015
+KernelVersion:	4.4
+Contact:	"Sinan Kaya <okaya@cudeaurora.org>"
+Description:
+		Contains the ID of the channel within the HIDMA instance.
+		It is used to associate a given HIDMA channel with the
+		priority and weight calls in the management interface.
diff --git a/Documentation/ABI/testing/sysfs-platform-i2c-demux-pinctrl b/Documentation/ABI/testing/sysfs-platform-i2c-demux-pinctrl
index 7ac7d726..3c35148 100644
--- a/Documentation/ABI/testing/sysfs-platform-i2c-demux-pinctrl
+++ b/Documentation/ABI/testing/sysfs-platform-i2c-demux-pinctrl
@@ -1,23 +1,18 @@
-What:		/sys/devices/platform/<i2c-demux-name>/cur_master
+What:		/sys/devices/platform/<i2c-demux-name>/available_masters
 Date:		January 2016
 KernelVersion:	4.6
 Contact:	Wolfram Sang <wsa@the-dreams.de>
 Description:
+		Reading the file will give you a list of masters which can be
+		selected for a demultiplexed bus. The format is
+		"<index>:<name>". Example from a Renesas Lager board:
 
-This file selects the active I2C master for a demultiplexed bus.
+		0:/i2c@e6500000 1:/i2c@e6508000
 
-Write 0 there for the first master, 1 for the second etc. Reading the file will
-give you a list with the active master marked. Example from a Renesas Lager
-board:
-
-root@Lager:~# cat /sys/devices/platform/i2c@8/cur_master
-* 0 - /i2c@9
-  1 - /i2c@e6520000
-  2 - /i2c@e6530000
-
-root@Lager:~# echo 2 > /sys/devices/platform/i2c@8/cur_master
-
-root@Lager:~# cat /sys/devices/platform/i2c@8/cur_master
-  0 - /i2c@9
-  1 - /i2c@e6520000
-* 2 - /i2c@e6530000
+What:		/sys/devices/platform/<i2c-demux-name>/current_master
+Date:		January 2016
+KernelVersion:	4.6
+Contact:	Wolfram Sang <wsa@the-dreams.de>
+Description:
+		This file selects/shows the active I2C master for a demultiplexed
+		bus. It uses the <index> value from the file 'available_masters'.
diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl
index f9b9ad7..5f7c559 100644
--- a/Documentation/DocBook/80211.tmpl
+++ b/Documentation/DocBook/80211.tmpl
@@ -75,7 +75,6 @@
       <chapter>
       <title>Device registration</title>
 !Pinclude/net/cfg80211.h Device registration
-!Finclude/net/cfg80211.h ieee80211_band
 !Finclude/net/cfg80211.h ieee80211_channel_flags
 !Finclude/net/cfg80211.h ieee80211_channel
 !Finclude/net/cfg80211.h ieee80211_rate_flags
@@ -136,6 +135,7 @@
 !Finclude/net/cfg80211.h cfg80211_tx_mlme_mgmt
 !Finclude/net/cfg80211.h cfg80211_ibss_joined
 !Finclude/net/cfg80211.h cfg80211_connect_result
+!Finclude/net/cfg80211.h cfg80211_connect_bss
 !Finclude/net/cfg80211.h cfg80211_roamed
 !Finclude/net/cfg80211.h cfg80211_disconnected
 !Finclude/net/cfg80211.h cfg80211_ready_on_channel
diff --git a/Documentation/DocBook/crypto-API.tmpl b/Documentation/DocBook/crypto-API.tmpl
index 348619f..d55dc5a 100644
--- a/Documentation/DocBook/crypto-API.tmpl
+++ b/Documentation/DocBook/crypto-API.tmpl
@@ -1936,9 +1936,9 @@
 	}
 
 	req = skcipher_request_alloc(skcipher, GFP_KERNEL);
-	if (IS_ERR(req)) {
-		pr_info("could not allocate request queue\n");
-		ret = PTR_ERR(req);
+	if (!req) {
+		pr_info("could not allocate skcipher request\n");
+		ret = -ENOMEM;
 		goto out;
 	}
 
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index 184f3c7..893b2ca 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -233,6 +233,7 @@
 !Iinclude/media/v4l2-mediabus.h
 !Iinclude/media/v4l2-mem2mem.h
 !Iinclude/media/v4l2-of.h
+!Iinclude/media/v4l2-rect.h
 !Iinclude/media/v4l2-subdev.h
 !Iinclude/media/videobuf2-core.h
 !Iinclude/media/videobuf2-v4l2.h
diff --git a/Documentation/DocBook/media/dvb/net.xml b/Documentation/DocBook/media/dvb/net.xml
index d2e44b7..da095ed 100644
--- a/Documentation/DocBook/media/dvb/net.xml
+++ b/Documentation/DocBook/media/dvb/net.xml
@@ -15,7 +15,7 @@
     that are present on the transport stream. This is done through
     <constant>/dev/dvb/adapter?/net?</constant> device node.
     The data will be available via virtual <constant>dvb?_?</constant>
-    network interfaces, and will be controled/routed via the standard
+    network interfaces, and will be controlled/routed via the standard
     ip tools (like ip, route, netstat, ifconfig, etc).</para>
 <para> Data types and and ioctl definitions are defined via
     <constant>linux/dvb/net.h</constant> header.</para>
diff --git a/Documentation/DocBook/media/v4l/compat.xml b/Documentation/DocBook/media/v4l/compat.xml
index 5399e89..82fa328 100644
--- a/Documentation/DocBook/media/v4l/compat.xml
+++ b/Documentation/DocBook/media/v4l/compat.xml
@@ -2686,50 +2686,12 @@
 
       <itemizedlist>
         <listitem>
-	  <para>Video Output Overlay (OSD) Interface, <xref
-	    linkend="osd" />.</para>
-        </listitem>
-        <listitem>
 	  <para>&VIDIOC-DBG-G-REGISTER; and &VIDIOC-DBG-S-REGISTER;
 ioctls.</para>
         </listitem>
         <listitem>
 	  <para>&VIDIOC-DBG-G-CHIP-INFO; ioctl.</para>
         </listitem>
-        <listitem>
-	  <para>&VIDIOC-ENUM-DV-TIMINGS;, &VIDIOC-QUERY-DV-TIMINGS; and
-	  &VIDIOC-DV-TIMINGS-CAP; ioctls.</para>
-        </listitem>
-        <listitem>
-	  <para>Flash API. <xref linkend="flash-controls" /></para>
-        </listitem>
-        <listitem>
-	  <para>&VIDIOC-CREATE-BUFS; and &VIDIOC-PREPARE-BUF; ioctls.</para>
-        </listitem>
-        <listitem>
-	  <para>Selection API. <xref linkend="selection-api" /></para>
-        </listitem>
-        <listitem>
-	  <para>Sub-device selection API: &VIDIOC-SUBDEV-G-SELECTION;
-	  and &VIDIOC-SUBDEV-S-SELECTION; ioctls.</para>
-        </listitem>
-        <listitem>
-	  <para>Support for frequency band enumeration: &VIDIOC-ENUM-FREQ-BANDS; ioctl.</para>
-        </listitem>
-        <listitem>
-	  <para>Vendor and device specific media bus pixel formats.
-	    <xref linkend="v4l2-mbus-vendor-spec-fmts" />.</para>
-        </listitem>
-        <listitem>
-	  <para>Importing DMABUF file descriptors as a new IO method described
-	  in <xref linkend="dmabuf" />.</para>
-        </listitem>
-        <listitem>
-	  <para>Exporting DMABUF files using &VIDIOC-EXPBUF; ioctl.</para>
-        </listitem>
-        <listitem>
-	  <para>Software Defined Radio (SDR) Interface, <xref linkend="sdr" />.</para>
-        </listitem>
       </itemizedlist>
     </section>
 
diff --git a/Documentation/DocBook/media/v4l/controls.xml b/Documentation/DocBook/media/v4l/controls.xml
index f5f5ce8..e2e5484 100644
--- a/Documentation/DocBook/media/v4l/controls.xml
+++ b/Documentation/DocBook/media/v4l/controls.xml
@@ -4272,13 +4272,6 @@
     <section id="flash-controls">
       <title>Flash Control Reference</title>
 
-      <note>
-	<title>Experimental</title>
-
-	<para>This is an <link linkend="experimental">experimental</link>
-interface and may change in the future.</para>
-      </note>
-
       <para>
 	The V4L2 flash controls are intended to provide generic access
 	to flash controller devices. Flash controller devices are
@@ -4743,14 +4736,6 @@
     <section id="image-source-controls">
       <title>Image Source Control Reference</title>
 
-      <note>
-	<title>Experimental</title>
-
-	<para>This is an <link
-	linkend="experimental">experimental</link> interface and may
-	change in the future.</para>
-      </note>
-
       <para>
 	The Image Source control class is intended for low-level
 	control of image source devices such as image sensors. The
@@ -4862,14 +4847,6 @@
     <section id="image-process-controls">
       <title>Image Process Control Reference</title>
 
-      <note>
-	<title>Experimental</title>
-
-	<para>This is an <link
-	linkend="experimental">experimental</link> interface and may
-	change in the future.</para>
-      </note>
-
       <para>
 	The Image Process control class is intended for low-level control of
 	image processing functions. Unlike
@@ -4955,14 +4932,6 @@
     <section id="dv-controls">
       <title>Digital Video Control Reference</title>
 
-      <note>
-	<title>Experimental</title>
-
-	<para>This is an <link
-	linkend="experimental">experimental</link> interface and may
-	change in the future.</para>
-      </note>
-
       <para>
 	The Digital Video control class is intended to control receivers
 	and transmitters for <ulink url="http://en.wikipedia.org/wiki/Vga">VGA</ulink>,
diff --git a/Documentation/DocBook/media/v4l/dev-sdr.xml b/Documentation/DocBook/media/v4l/dev-sdr.xml
index a659771..6da1157 100644
--- a/Documentation/DocBook/media/v4l/dev-sdr.xml
+++ b/Documentation/DocBook/media/v4l/dev-sdr.xml
@@ -1,11 +1,5 @@
   <title>Software Defined Radio Interface (SDR)</title>
 
-  <note>
-    <title>Experimental</title>
-    <para>This is an <link linkend="experimental"> experimental </link>
-    interface and may change in the future.</para>
-  </note>
-
   <para>
 SDR is an abbreviation of Software Defined Radio, the radio device
 which uses application software for modulation or demodulation. This interface
diff --git a/Documentation/DocBook/media/v4l/dev-subdev.xml b/Documentation/DocBook/media/v4l/dev-subdev.xml
index 4f0ba58..f4bc27a 100644
--- a/Documentation/DocBook/media/v4l/dev-subdev.xml
+++ b/Documentation/DocBook/media/v4l/dev-subdev.xml
@@ -1,11 +1,5 @@
   <title>Sub-device Interface</title>
 
-  <note>
-    <title>Experimental</title>
-    <para>This is an <link linkend="experimental">experimental</link>
-    interface and may change in the future.</para>
-  </note>
-
   <para>The complex nature of V4L2 devices, where hardware is often made of
   several integrated circuits that need to interact with each other in a
   controlled way, leads to complex V4L2 drivers. The drivers usually reflect
diff --git a/Documentation/DocBook/media/v4l/io.xml b/Documentation/DocBook/media/v4l/io.xml
index 144158b..e09025d 100644
--- a/Documentation/DocBook/media/v4l/io.xml
+++ b/Documentation/DocBook/media/v4l/io.xml
@@ -475,12 +475,6 @@
   <section id="dmabuf">
     <title>Streaming I/O (DMA buffer importing)</title>
 
-    <note>
-      <title>Experimental</title>
-      <para>This is an <link linkend="experimental">experimental</link>
-      interface and may change in the future.</para>
-    </note>
-
 <para>The DMABUF framework provides a generic method for sharing buffers
 between multiple devices. Device drivers that support DMABUF can export a DMA
 buffer to userspace as a file descriptor (known as the exporter role), import a
diff --git a/Documentation/DocBook/media/v4l/selection-api.xml b/Documentation/DocBook/media/v4l/selection-api.xml
index 28cbded..b764cba 100644
--- a/Documentation/DocBook/media/v4l/selection-api.xml
+++ b/Documentation/DocBook/media/v4l/selection-api.xml
@@ -1,13 +1,6 @@
 <section id="selection-api">
 
-  <title>Experimental API for cropping, composing and scaling</title>
-
-      <note>
-	<title>Experimental</title>
-
-	<para>This is an <link linkend="experimental">experimental</link>
-interface and may change in the future.</para>
-      </note>
+  <title>API for cropping, composing and scaling</title>
 
   <section>
     <title>Introduction</title>
diff --git a/Documentation/DocBook/media/v4l/subdev-formats.xml b/Documentation/DocBook/media/v4l/subdev-formats.xml
index 4e73345..199c84e 100644
--- a/Documentation/DocBook/media/v4l/subdev-formats.xml
+++ b/Documentation/DocBook/media/v4l/subdev-formats.xml
@@ -4002,12 +4002,6 @@
     <section id="v4l2-mbus-vendor-spec-fmts">
       <title>Vendor and Device Specific Formats</title>
 
-      <note>
-	<title>Experimental</title>
-	<para>This is an <link linkend="experimental">experimental</link>
-interface and may change in the future.</para>
-      </note>
-
       <para>This section lists complex data formats that are either vendor or
 	device specific.
       </para>
diff --git a/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml b/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml
index d81fa0d..6528e97 100644
--- a/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml
@@ -49,12 +49,6 @@
   <refsect1>
     <title>Description</title>
 
-    <note>
-      <title>Experimental</title>
-      <para>This is an <link linkend="experimental"> experimental </link>
-      interface and may change in the future.</para>
-    </note>
-
     <para>This ioctl is used to create buffers for <link linkend="mmap">memory
 mapped</link> or <link linkend="userp">user pointer</link> or <link
 linkend="dmabuf">DMA buffer</link> I/O. It can be used as an alternative or in
diff --git a/Documentation/DocBook/media/v4l/vidioc-dv-timings-cap.xml b/Documentation/DocBook/media/v4l/vidioc-dv-timings-cap.xml
index a2017bf..ca9ffce 100644
--- a/Documentation/DocBook/media/v4l/vidioc-dv-timings-cap.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-dv-timings-cap.xml
@@ -49,14 +49,9 @@
   <refsect1>
     <title>Description</title>
 
-    <note>
-      <title>Experimental</title>
-      <para>This is an <link linkend="experimental"> experimental </link>
-      interface and may change in the future.</para>
-    </note>
-
-    <para>To query the capabilities of the DV receiver/transmitter applications
-can call the <constant>VIDIOC_DV_TIMINGS_CAP</constant> ioctl on a video node
+    <para>To query the capabilities of the DV receiver/transmitter applications initialize the
+<structfield>pad</structfield> field to 0, zero the reserved array of &v4l2-dv-timings-cap;
+and call the <constant>VIDIOC_DV_TIMINGS_CAP</constant> ioctl on a video node
 and the driver will fill in the structure. Note that drivers may return
 different values after switching the video input or output.</para>
 
@@ -65,8 +60,8 @@
 directly on a subdevice node. The capabilities are specific to inputs (for DV
 receivers) or outputs (for DV transmitters), applications must specify the
 desired pad number in the &v4l2-dv-timings-cap; <structfield>pad</structfield>
-field. Attempts to query capabilities on a pad that doesn't support them will
-return an &EINVAL;.</para>
+field and zero the <structfield>reserved</structfield> array. Attempts to query
+capabilities on a pad that doesn't support them will return an &EINVAL;.</para>
 
     <table pgwide="1" frame="none" id="v4l2-bt-timings-cap">
       <title>struct <structname>v4l2_bt_timings_cap</structname></title>
@@ -145,7 +140,8 @@
 	  <row>
 	    <entry>__u32</entry>
 	    <entry><structfield>reserved</structfield>[2]</entry>
-	    <entry>Reserved for future extensions. Drivers must set the array to zero.</entry>
+	    <entry>Reserved for future extensions. Drivers and applications must
+	    set the array to zero.</entry>
 	  </row>
 	  <row>
 	    <entry>union</entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-enum-dv-timings.xml b/Documentation/DocBook/media/v4l/vidioc-enum-dv-timings.xml
index 6e3cadd..9b3d420 100644
--- a/Documentation/DocBook/media/v4l/vidioc-enum-dv-timings.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-enum-dv-timings.xml
@@ -49,20 +49,15 @@
   <refsect1>
     <title>Description</title>
 
-    <note>
-      <title>Experimental</title>
-      <para>This is an <link linkend="experimental"> experimental </link>
-      interface and may change in the future.</para>
-    </note>
-
     <para>While some DV receivers or transmitters support a wide range of timings, others
 support only a limited number of timings. With this ioctl applications can enumerate a list
 of known supported timings. Call &VIDIOC-DV-TIMINGS-CAP; to check if it also supports other
 standards or even custom timings that are not in this list.</para>
 
     <para>To query the available timings, applications initialize the
-<structfield>index</structfield> field and zero the reserved array of &v4l2-enum-dv-timings;
-and call the <constant>VIDIOC_ENUM_DV_TIMINGS</constant> ioctl on a video node with a
+<structfield>index</structfield> field, set the <structfield>pad</structfield> field to 0,
+zero the reserved array of &v4l2-enum-dv-timings; and call the
+<constant>VIDIOC_ENUM_DV_TIMINGS</constant> ioctl on a video node with a
 pointer to this structure. Drivers fill the rest of the structure or return an
 &EINVAL; when the index is out of bounds. To enumerate all supported DV timings,
 applications shall begin at index zero, incrementing by one until the
diff --git a/Documentation/DocBook/media/v4l/vidioc-enum-freq-bands.xml b/Documentation/DocBook/media/v4l/vidioc-enum-freq-bands.xml
index 4e8ea65..a0608ab 100644
--- a/Documentation/DocBook/media/v4l/vidioc-enum-freq-bands.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-enum-freq-bands.xml
@@ -49,12 +49,6 @@
   <refsect1>
     <title>Description</title>
 
-    <note>
-      <title>Experimental</title>
-      <para>This is an <link linkend="experimental"> experimental </link>
-      interface and may change in the future.</para>
-    </note>
-
     <para>Enumerates the frequency bands that a tuner or modulator supports.
 To do this applications initialize the <structfield>tuner</structfield>,
 <structfield>type</structfield> and <structfield>index</structfield> fields,
diff --git a/Documentation/DocBook/media/v4l/vidioc-expbuf.xml b/Documentation/DocBook/media/v4l/vidioc-expbuf.xml
index 0ae0b6a..a6558a6 100644
--- a/Documentation/DocBook/media/v4l/vidioc-expbuf.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-expbuf.xml
@@ -49,12 +49,6 @@
   <refsect1>
     <title>Description</title>
 
-    <note>
-      <title>Experimental</title>
-      <para>This is an <link linkend="experimental"> experimental </link>
-      interface and may change in the future.</para>
-    </note>
-
 <para>This ioctl is an extension to the <link linkend="mmap">memory
 mapping</link> I/O method, therefore it is available only for
 <constant>V4L2_MEMORY_MMAP</constant> buffers.  It can be used to export a
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-edid.xml b/Documentation/DocBook/media/v4l/vidioc-g-edid.xml
index 2702536..b7602d3 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-edid.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-edid.xml
@@ -1,6 +1,6 @@
 <refentry id="vidioc-g-edid">
   <refmeta>
-    <refentrytitle>ioctl VIDIOC_G_EDID, VIDIOC_S_EDID</refentrytitle>
+    <refentrytitle>ioctl VIDIOC_G_EDID, VIDIOC_S_EDID, VIDIOC_SUBDEV_G_EDID, VIDIOC_SUBDEV_S_EDID</refentrytitle>
     &manvol;
   </refmeta>
 
@@ -71,7 +71,8 @@
 
     <para>To get the EDID data the application has to fill in the <structfield>pad</structfield>,
     <structfield>start_block</structfield>, <structfield>blocks</structfield> and <structfield>edid</structfield>
-    fields and call <constant>VIDIOC_G_EDID</constant>. The current EDID from block
+    fields, zero the <structfield>reserved</structfield> array and call
+    <constant>VIDIOC_G_EDID</constant>. The current EDID from block
     <structfield>start_block</structfield> and of size <structfield>blocks</structfield>
     will be placed in the memory <structfield>edid</structfield> points to. The <structfield>edid</structfield>
     pointer must point to memory at least <structfield>blocks</structfield>&nbsp;*&nbsp;128 bytes
@@ -92,8 +93,9 @@
     the driver will set <structfield>blocks</structfield> to 0 and it returns 0.</para>
 
     <para>To set the EDID blocks of a receiver the application has to fill in the <structfield>pad</structfield>,
-    <structfield>blocks</structfield> and <structfield>edid</structfield> fields and set
-    <structfield>start_block</structfield> to 0. It is not possible to set part of an EDID,
+    <structfield>blocks</structfield> and <structfield>edid</structfield> fields, set
+    <structfield>start_block</structfield> to 0 and zero the <structfield>reserved</structfield> array.
+    It is not possible to set part of an EDID,
     it is always all or nothing. Setting the EDID data is only valid for receivers as it makes
     no sense for a transmitter.</para>
 
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-selection.xml b/Documentation/DocBook/media/v4l/vidioc-g-selection.xml
index a9c0d1dc..997f4e9 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-selection.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-selection.xml
@@ -50,12 +50,6 @@
   <refsect1>
     <title>Description</title>
 
-    <note>
-      <title>Experimental</title>
-      <para>This is an <link linkend="experimental"> experimental </link>
-      interface and may change in the future.</para>
-    </note>
-
     <para>The ioctls are used to query and configure selection rectangles.</para>
 
 <para>To query the cropping (composing) rectangle set &v4l2-selection;
diff --git a/Documentation/DocBook/media/v4l/vidioc-prepare-buf.xml b/Documentation/DocBook/media/v4l/vidioc-prepare-buf.xml
index fa7ad7e..7bde698 100644
--- a/Documentation/DocBook/media/v4l/vidioc-prepare-buf.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-prepare-buf.xml
@@ -48,12 +48,6 @@
   <refsect1>
     <title>Description</title>
 
-    <note>
-      <title>Experimental</title>
-      <para>This is an <link linkend="experimental"> experimental </link>
-      interface and may change in the future.</para>
-    </note>
-
     <para>Applications can optionally call the
 <constant>VIDIOC_PREPARE_BUF</constant> ioctl to pass ownership of the buffer
 to the driver before actually enqueuing it, using the
diff --git a/Documentation/DocBook/media/v4l/vidioc-query-dv-timings.xml b/Documentation/DocBook/media/v4l/vidioc-query-dv-timings.xml
index 0c93677..d41bf47 100644
--- a/Documentation/DocBook/media/v4l/vidioc-query-dv-timings.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-query-dv-timings.xml
@@ -50,12 +50,6 @@
   <refsect1>
     <title>Description</title>
 
-    <note>
-      <title>Experimental</title>
-      <para>This is an <link linkend="experimental"> experimental </link>
-      interface and may change in the future.</para>
-    </note>
-
     <para>The hardware may be able to detect the current DV timings
 automatically, similar to sensing the video standard. To do so, applications
 call <constant>VIDIOC_QUERY_DV_TIMINGS</constant> with a pointer to a
diff --git a/Documentation/DocBook/media/v4l/vidioc-streamon.xml b/Documentation/DocBook/media/v4l/vidioc-streamon.xml
index df2c63d..89fd7ce 100644
--- a/Documentation/DocBook/media/v4l/vidioc-streamon.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-streamon.xml
@@ -123,6 +123,14 @@
 	  </para>
 	</listitem>
       </varlistentry>
+      <varlistentry>
+	<term><errorcode>ENOLINK</errorcode></term>
+	<listitem>
+	  <para>The driver implements Media Controller interface and
+	  the pipeline link configuration is invalid.
+	  </para>
+	</listitem>
+      </varlistentry>
     </variablelist>
   </refsect1>
 </refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-subdev-enum-frame-interval.xml b/Documentation/DocBook/media/v4l/vidioc-subdev-enum-frame-interval.xml
index cff59f5..9d0251a 100644
--- a/Documentation/DocBook/media/v4l/vidioc-subdev-enum-frame-interval.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-subdev-enum-frame-interval.xml
@@ -49,12 +49,6 @@
   <refsect1>
     <title>Description</title>
 
-    <note>
-      <title>Experimental</title>
-      <para>This is an <link linkend="experimental">experimental</link>
-      interface and may change in the future.</para>
-    </note>
-
     <para>This ioctl lets applications enumerate available frame intervals on a
     given sub-device pad. Frame intervals only makes sense for sub-devices that
     can control the frame period on their own. This includes, for instance,
diff --git a/Documentation/DocBook/media/v4l/vidioc-subdev-enum-frame-size.xml b/Documentation/DocBook/media/v4l/vidioc-subdev-enum-frame-size.xml
index abd545e..9b91b83 100644
--- a/Documentation/DocBook/media/v4l/vidioc-subdev-enum-frame-size.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-subdev-enum-frame-size.xml
@@ -49,12 +49,6 @@
   <refsect1>
     <title>Description</title>
 
-    <note>
-      <title>Experimental</title>
-      <para>This is an <link linkend="experimental">experimental</link>
-      interface and may change in the future.</para>
-    </note>
-
     <para>This ioctl allows applications to enumerate all frame sizes
     supported by a sub-device on the given pad for the given media bus format.
     Supported formats can be retrieved with the &VIDIOC-SUBDEV-ENUM-MBUS-CODE;
diff --git a/Documentation/DocBook/media/v4l/vidioc-subdev-enum-mbus-code.xml b/Documentation/DocBook/media/v4l/vidioc-subdev-enum-mbus-code.xml
index 0bcb278..c67256a 100644
--- a/Documentation/DocBook/media/v4l/vidioc-subdev-enum-mbus-code.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-subdev-enum-mbus-code.xml
@@ -49,12 +49,6 @@
   <refsect1>
     <title>Description</title>
 
-    <note>
-      <title>Experimental</title>
-      <para>This is an <link linkend="experimental">experimental</link>
-      interface and may change in the future.</para>
-    </note>
-
     <para>To enumerate media bus formats available at a given sub-device pad
     applications initialize the <structfield>pad</structfield>, <structfield>which</structfield>
     and <structfield>index</structfield> fields of &v4l2-subdev-mbus-code-enum; and
diff --git a/Documentation/DocBook/media/v4l/vidioc-subdev-g-fmt.xml b/Documentation/DocBook/media/v4l/vidioc-subdev-g-fmt.xml
index a67cde6..781089c 100644
--- a/Documentation/DocBook/media/v4l/vidioc-subdev-g-fmt.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-subdev-g-fmt.xml
@@ -50,12 +50,6 @@
   <refsect1>
     <title>Description</title>
 
-    <note>
-      <title>Experimental</title>
-      <para>This is an <link linkend="experimental">experimental</link>
-      interface and may change in the future.</para>
-    </note>
-
     <para>These ioctls are used to negotiate the frame format at specific
     subdev pads in the image pipeline.</para>
 
diff --git a/Documentation/DocBook/media/v4l/vidioc-subdev-g-frame-interval.xml b/Documentation/DocBook/media/v4l/vidioc-subdev-g-frame-interval.xml
index 0bc3ea22..848ec78 100644
--- a/Documentation/DocBook/media/v4l/vidioc-subdev-g-frame-interval.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-subdev-g-frame-interval.xml
@@ -50,12 +50,6 @@
   <refsect1>
     <title>Description</title>
 
-    <note>
-      <title>Experimental</title>
-      <para>This is an <link linkend="experimental">experimental</link>
-      interface and may change in the future.</para>
-    </note>
-
     <para>These ioctls are used to get and set the frame interval at specific
     subdev pads in the image pipeline. The frame interval only makes sense for
     sub-devices that can control the frame period on their own. This includes,
diff --git a/Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml b/Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml
index c62a736..8346b2e 100644
--- a/Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml
@@ -49,12 +49,6 @@
   <refsect1>
     <title>Description</title>
 
-    <note>
-      <title>Experimental</title>
-      <para>This is an <link linkend="experimental">experimental</link>
-      interface and may change in the future.</para>
-    </note>
-
     <para>The selections are used to configure various image
     processing functionality performed by the subdevs which affect the
     image size. This currently includes cropping, scaling and
diff --git a/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCU.svg b/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCU.svg
new file mode 100644
index 0000000..727e270
--- /dev/null
+++ b/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCU.svg
@@ -0,0 +1,474 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:28:20 2015 -->
+
+<!-- Magnification: 3.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="9.1in"
+   height="8.9in"
+   viewBox="-66 -66 10932 10707"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="BigTreeClassicRCU.fig">
+  <metadata
+     id="metadata106">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs104">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3864"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="973"
+     inkscape:window-height="1137"
+     id="namedview102"
+     showgrid="false"
+     inkscape:zoom="0.9743589"
+     inkscape:cx="409.50003"
+     inkscape:cy="400.49997"
+     inkscape:window-x="915"
+     inkscape:window-y="24"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="0"
+       width="10800"
+       height="5625"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="1125"
+       y="3600"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="3825"
+       y="900"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="6525"
+       y="3600"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect12" />
+    <!-- Line -->
+    <polyline
+       points="3375,6525 3375,5046 "
+       style="stroke:#00d1d1;stroke-width:44.9934641;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline14" />
+    <!-- Arrowhead on XXXpoint 3375 6525 - 3375 4860-->
+    <!-- Circle -->
+    <circle
+       cx="7425"
+       cy="6075"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle18" />
+    <!-- Circle -->
+    <circle
+       cx="7875"
+       cy="6075"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle20" />
+    <!-- Circle -->
+    <circle
+       cx="8325"
+       cy="6075"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle22" />
+    <!-- Circle -->
+    <circle
+       cx="2025"
+       cy="6075"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle24" />
+    <!-- Circle -->
+    <circle
+       cx="2475"
+       cy="6075"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle26" />
+    <!-- Circle -->
+    <circle
+       cx="2925"
+       cy="6075"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle28" />
+    <!-- Circle -->
+    <circle
+       cx="4725"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle30" />
+    <!-- Circle -->
+    <circle
+       cx="5175"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle32" />
+    <!-- Circle -->
+    <circle
+       cx="5625"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle34" />
+    <!-- Line: box -->
+    <rect
+       x="2025"
+       y="6525"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect36" />
+    <!-- Line -->
+    <polyline
+       points="2475,3600 3975,2310 "
+       style="stroke:#00d1d1;stroke-width:44.9934641;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline38" />
+    <!-- Arrowhead on XXXpoint 2475 3600 - 4116 2190-->
+    <!-- Line -->
+    <polyline
+       points="7875,3600 6372,2310 "
+       style="stroke:#00d1d1;stroke-width:44.9934641;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline42" />
+    <!-- Arrowhead on XXXpoint 7875 3600 - 6231 2190-->
+    <!-- Line -->
+    <polyline
+       points="6975,8775 6975,5046 "
+       style="stroke:#00d1d1;stroke-width:44.9934641;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline46" />
+    <!-- Arrowhead on XXXpoint 6975 8775 - 6975 4860-->
+    <!-- Line -->
+    <polyline
+       points="1575,8775 1575,5046 "
+       style="stroke:#00d1d1;stroke-width:44.9934641;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline50" />
+    <!-- Arrowhead on XXXpoint 1575 8775 - 1575 4860-->
+    <!-- Line -->
+    <polyline
+       points="8775,6525 8775,5046 "
+       style="stroke:#00d1d1;stroke-width:44.9934641;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline54" />
+    <!-- Arrowhead on XXXpoint 8775 6525 - 8775 4860-->
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1575"
+       y="9225"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text58">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1575"
+       y="9675"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text60">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1575"
+       y="10350"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text62">CPU 0</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3375"
+       y="6975"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text64">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3375"
+       y="7425"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text66">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3375"
+       y="8100"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text68">CPU 15</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6975"
+       y="9225"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text70">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6975"
+       y="9675"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text72">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6975"
+       y="10350"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text74">CPU 1007</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8730"
+       y="6930"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text76">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8730"
+       y="7380"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text78">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8730"
+       y="8055"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text80">CPU 1023</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="225"
+       y="450"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="start"
+       id="text82">struct rcu_state</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2475"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text84">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2475"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text86">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="7875"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text88">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="7875"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text90">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5175"
+       y="1350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text92">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5175"
+       y="1800"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text94">rcu_node</text>
+    <!-- Line: box -->
+    <rect
+       x="225"
+       y="8775"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect96" />
+    <!-- Line: box -->
+    <rect
+       x="5625"
+       y="8775"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect98" />
+    <!-- Line: box -->
+    <rect
+       x="7380"
+       y="6480"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect100" />
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBH.svg b/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBH.svg
new file mode 100644
index 0000000..9bbb194
--- /dev/null
+++ b/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBH.svg
@@ -0,0 +1,499 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:26:09 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="5.7in"
+   height="6.6in"
+   viewBox="-44 -44 6838 7888"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="BigTreeClassicRCUBH.fig">
+  <metadata
+     id="metadata110">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs108">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3868"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow2Mend"
+       style="overflow:visible;">
+      <path
+         id="path3886"
+         style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(0.6) rotate(180) translate(0,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="878"
+     inkscape:window-height="1148"
+     id="namedview106"
+     showgrid="false"
+     inkscape:zoom="1.3547758"
+     inkscape:cx="256.5"
+     inkscape:cy="297"
+     inkscape:window-x="45"
+     inkscape:window-y="24"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="450"
+       y="0"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="4950"
+       y="4950"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="600"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="450"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect12" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="1050"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect14" />
+    <!-- Circle -->
+    <circle
+       cx="2850"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle16" />
+    <!-- Circle -->
+    <circle
+       cx="3150"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle18" />
+    <!-- Circle -->
+    <circle
+       cx="3450"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle20" />
+    <!-- Circle -->
+    <circle
+       cx="1350"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle22" />
+    <!-- Circle -->
+    <circle
+       cx="1650"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle24" />
+    <!-- Circle -->
+    <circle
+       cx="1950"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle26" />
+    <!-- Circle -->
+    <circle
+       cx="4350"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle28" />
+    <!-- Circle -->
+    <circle
+       cx="4650"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle30" />
+    <!-- Circle -->
+    <circle
+       cx="4950"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle32" />
+    <!-- Line -->
+    <polyline
+       points="1350,3450 2350,2590 "
+       style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline34" />
+    <!-- Arrowhead on XXXpoint 1350 3450 - 2444 2510-->
+    <!-- Line -->
+    <polyline
+       points="4950,3450 3948,2590 "
+       style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline38" />
+    <!-- Arrowhead on XXXpoint 4950 3450 - 3854 2510-->
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="3450"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect42" />
+    <!-- Line -->
+    <polyline
+       points="2250,5400 2250,4414 "
+       style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline44" />
+    <!-- Arrowhead on XXXpoint 2250 5400 - 2250 4290-->
+    <!-- Line: box -->
+    <rect
+       x="1500"
+       y="5400"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect48" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="6600"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect50" />
+    <!-- Line: box -->
+    <rect
+       x="3750"
+       y="3450"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect52" />
+    <!-- Line: box -->
+    <rect
+       x="4500"
+       y="5400"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect54" />
+    <!-- Line: box -->
+    <rect
+       x="3300"
+       y="6600"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect56" />
+    <!-- Line: box -->
+    <rect
+       x="2250"
+       y="1650"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect58" />
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6450"
+       y="300"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text60">rcu_bh</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="1950"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text62">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="2250"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text64">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="3750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text66">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text68">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text70">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="3750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text72">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="5700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text74">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="6000"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text76">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="6900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text78">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text80">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="5700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text82">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="6000"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text84">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="6900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text86">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text88">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="450"
+       y="1350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="start"
+       id="text90">struct rcu_state</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6000"
+       y="750"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text92">rcu_sched</text>
+    <!-- Line -->
+    <polyline
+       points="5250,5400 5250,4414 "
+       style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline94" />
+    <!-- Arrowhead on XXXpoint 5250 5400 - 5250 4290-->
+    <!-- Line -->
+    <polyline
+       points="4050,6600 4050,4414 "
+       style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline98" />
+    <!-- Arrowhead on XXXpoint 4050 6600 - 4050 4290-->
+    <!-- Line -->
+    <polyline
+       points="1050,6600 1050,4414 "
+       style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline102" />
+    <!-- Arrowhead on XXXpoint 1050 6600 - 1050 4290-->
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBHdyntick.svg b/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBHdyntick.svg
new file mode 100644
index 0000000..21ba782
--- /dev/null
+++ b/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBHdyntick.svg
@@ -0,0 +1,695 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:20:02 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="5.7in"
+   height="8.6in"
+   viewBox="-44 -44 6838 10288"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="BigTreeClassicRCUBHdyntick.fig">
+  <metadata
+     id="metadata166">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs164">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3924"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow2Lend"
+       style="overflow:visible;">
+      <path
+         id="path3936"
+         style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="845"
+     inkscape:window-height="988"
+     id="namedview162"
+     showgrid="false"
+     inkscape:zoom="1.0452196"
+     inkscape:cx="256.5"
+     inkscape:cy="387.00003"
+     inkscape:window-x="356"
+     inkscape:window-y="61"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="450"
+       y="0"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="4950"
+       y="4950"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="600"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect10" />
+    <!-- Line -->
+    <polyline
+       points="5250,8100 5688,5912 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline12" />
+    <!-- Arrowhead on XXXpoint 5250 8100 - 5710 5790-->
+    <polyline
+       points="5714 6068 5704 5822 5598 6044 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline14" />
+    <!-- Line -->
+    <polyline
+       points="4050,9300 4486,7262 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline16" />
+    <!-- Arrowhead on XXXpoint 4050 9300 - 4512 7140-->
+    <polyline
+       points="4514 7418 4506 7172 4396 7394 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline18" />
+    <!-- Line -->
+    <polyline
+       points="1040,9300 1476,7262 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline20" />
+    <!-- Arrowhead on XXXpoint 1040 9300 - 1502 7140-->
+    <polyline
+       points="1504 7418 1496 7172 1386 7394 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline22" />
+    <!-- Line -->
+    <polyline
+       points="2240,8100 2676,6062 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline24" />
+    <!-- Arrowhead on XXXpoint 2240 8100 - 2702 5940-->
+    <polyline
+       points="2704 6218 2696 5972 2586 6194 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline26" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="450"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect28" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="1050"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect30" />
+    <!-- Line -->
+    <polyline
+       points="1350,3450 2350,2590 "
+       style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline32" />
+    <!-- Arrowhead on XXXpoint 1350 3450 - 2444 2510-->
+    <!-- Line -->
+    <polyline
+       points="4950,3450 3948,2590 "
+       style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline36" />
+    <!-- Arrowhead on XXXpoint 4950 3450 - 3854 2510-->
+    <!-- Line -->
+    <polyline
+       points="4050,6600 4050,4414 "
+       style="stroke:#00d1d1;stroke-width:30.00455750000000066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline40" />
+    <!-- Arrowhead on XXXpoint 4050 6600 - 4050 4290-->
+    <!-- Line -->
+    <polyline
+       points="1050,6600 1050,4414 "
+       style="stroke:#00d1d1;stroke-width:30.00455750000000066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline44" />
+    <!-- Arrowhead on XXXpoint 1050 6600 - 1050 4290-->
+    <!-- Line -->
+    <polyline
+       points="2250,5400 2250,4414 "
+       style="stroke:#00d1d1;stroke-width:30.00455750000000066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline48" />
+    <!-- Arrowhead on XXXpoint 2250 5400 - 2250 4290-->
+    <!-- Line -->
+    <polyline
+       points="2250,8100 2250,6364 "
+       style="stroke:#00ff00;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
+       id="polyline52" />
+    <!-- Arrowhead on XXXpoint 2250 8100 - 2250 6240-->
+    <!-- Line -->
+    <polyline
+       points="1050,9300 1050,7564 "
+       style="stroke:#00ff00;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
+       id="polyline56" />
+    <!-- Arrowhead on XXXpoint 1050 9300 - 1050 7440-->
+    <!-- Line -->
+    <polyline
+       points="4050,9300 4050,7564 "
+       style="stroke:#00ff00;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
+       id="polyline60" />
+    <!-- Arrowhead on XXXpoint 4050 9300 - 4050 7440-->
+    <!-- Line -->
+    <polyline
+       points="5250,8100 5250,6364 "
+       style="stroke:#00ff00;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
+       id="polyline64" />
+    <!-- Arrowhead on XXXpoint 5250 8100 - 5250 6240-->
+    <!-- Circle -->
+    <circle
+       cx="2850"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle68" />
+    <!-- Circle -->
+    <circle
+       cx="3150"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle70" />
+    <!-- Circle -->
+    <circle
+       cx="3450"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle72" />
+    <!-- Circle -->
+    <circle
+       cx="1350"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle74" />
+    <!-- Circle -->
+    <circle
+       cx="1650"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle76" />
+    <!-- Circle -->
+    <circle
+       cx="1950"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle78" />
+    <!-- Circle -->
+    <circle
+       cx="4350"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle80" />
+    <!-- Circle -->
+    <circle
+       cx="4650"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle82" />
+    <!-- Circle -->
+    <circle
+       cx="4950"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle84" />
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="3450"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect86" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="6600"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect88" />
+    <!-- Line: box -->
+    <rect
+       x="3750"
+       y="3450"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect90" />
+    <!-- Line: box -->
+    <rect
+       x="4500"
+       y="5400"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect92" />
+    <!-- Line: box -->
+    <rect
+       x="3300"
+       y="6600"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect94" />
+    <!-- Line: box -->
+    <rect
+       x="2250"
+       y="1650"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect96" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="9300"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect98" />
+    <!-- Line: box -->
+    <rect
+       x="1350"
+       y="8100"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect100" />
+    <!-- Line: box -->
+    <rect
+       x="3000"
+       y="9300"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect102" />
+    <!-- Line: box -->
+    <rect
+       x="4350"
+       y="8100"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect104" />
+    <!-- Line: box -->
+    <rect
+       x="1500"
+       y="5400"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect106" />
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6450"
+       y="300"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text108">rcu_bh</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="1950"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text110">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="2250"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text112">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="3750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text114">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text116">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text118">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="3750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text120">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="5700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text122">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="6000"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text124">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="6900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text126">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text128">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="5700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text130">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="6000"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text132">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="6900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text134">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text136">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="450"
+       y="1350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="start"
+       id="text138">struct rcu_state</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="9600"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text140">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="9900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text142">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="9600"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text144">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="9900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text146">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="8400"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text148">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="8700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text150">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5400"
+       y="8400"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text152">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5400"
+       y="8700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text154">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6000"
+       y="750"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text156">rcu_sched</text>
+    <!-- Line -->
+    <polyline
+       points="5250,5400 5250,4414 "
+       style="stroke:#00d1d1;stroke-width:30.00455750000000066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline158" />
+    <!-- Arrowhead on XXXpoint 5250 5400 - 5250 4290-->
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntick.svg b/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntick.svg
new file mode 100644
index 0000000..15adcac
--- /dev/null
+++ b/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntick.svg
@@ -0,0 +1,741 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:32:59 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="6.1in"
+   height="8.9in"
+   viewBox="-44 -44 7288 10738"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="BigTreePreemptRCUBHdyntick.fig">
+  <metadata
+     id="metadata182">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs180">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3940"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="874"
+     inkscape:window-height="1148"
+     id="namedview178"
+     showgrid="false"
+     inkscape:zoom="1.2097379"
+     inkscape:cx="274.5"
+     inkscape:cy="400.49997"
+     inkscape:window-x="946"
+     inkscape:window-y="24"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="900"
+       y="0"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="1200"
+       y="600"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="5400"
+       y="4950"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="450"
+       y="450"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect12" />
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="1050"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect14" />
+    <!-- Line: box -->
+    <rect
+       x="4950"
+       y="5400"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect16" />
+    <!-- Line -->
+    <polyline
+       points="5250,8550 5688,6362 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline18" />
+    <!-- Arrowhead on XXXpoint 5250 8550 - 5710 6240-->
+    <polyline
+       points="5714 6518 5704 6272 5598 6494 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline20" />
+    <!-- Line -->
+    <polyline
+       points="4050,9750 4486,7712 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline22" />
+    <!-- Arrowhead on XXXpoint 4050 9750 - 4512 7590-->
+    <polyline
+       points="4514 7868 4506 7622 4396 7844 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline24" />
+    <!-- Line -->
+    <polyline
+       points="1040,9750 1476,7712 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline26" />
+    <!-- Arrowhead on XXXpoint 1040 9750 - 1502 7590-->
+    <polyline
+       points="1504 7868 1496 7622 1386 7844 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline28" />
+    <!-- Line -->
+    <polyline
+       points="2240,8550 2676,6512 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline30" />
+    <!-- Arrowhead on XXXpoint 2240 8550 - 2702 6390-->
+    <polyline
+       points="2704 6668 2696 6422 2586 6644 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline32" />
+    <!-- Line -->
+    <polyline
+       points="4050,9750 5682,6360 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline34" />
+    <!-- Arrowhead on XXXpoint 4050 9750 - 5736 6246-->
+    <polyline
+       points="5672 6518 5722 6276 5562 6466 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline36" />
+    <!-- Line -->
+    <polyline
+       points="1010,9750 2642,6360 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline38" />
+    <!-- Arrowhead on XXXpoint 1010 9750 - 2696 6246-->
+    <polyline
+       points="2632 6518 2682 6276 2522 6466 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline40" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="900"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect42" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="1500"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect44" />
+    <!-- Line -->
+    <polyline
+       points="1350,3900 2350,3040 "
+       style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline46" />
+    <!-- Arrowhead on XXXpoint 1350 3900 - 2444 2960-->
+    <!-- Line -->
+    <polyline
+       points="4950,3900 3948,3040 "
+       style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline50" />
+    <!-- Arrowhead on XXXpoint 4950 3900 - 3854 2960-->
+    <!-- Line -->
+    <polyline
+       points="4050,7050 4050,4864 "
+       style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline54" />
+    <!-- Arrowhead on XXXpoint 4050 7050 - 4050 4740-->
+    <!-- Line -->
+    <polyline
+       points="1050,7050 1050,4864 "
+       style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline58" />
+    <!-- Arrowhead on XXXpoint 1050 7050 - 1050 4740-->
+    <!-- Line -->
+    <polyline
+       points="2250,5850 2250,4864 "
+       style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline62" />
+    <!-- Arrowhead on XXXpoint 2250 5850 - 2250 4740-->
+    <!-- Line -->
+    <polyline
+       points="2250,8550 2250,6814 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline66" />
+    <!-- Arrowhead on XXXpoint 2250 8550 - 2250 6690-->
+    <!-- Line -->
+    <polyline
+       points="1050,9750 1050,8014 "
+       style="stroke:#00ff00;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline70" />
+    <!-- Arrowhead on XXXpoint 1050 9750 - 1050 7890-->
+    <!-- Line -->
+    <polyline
+       points="4050,9750 4050,8014 "
+       style="stroke:#00ff00;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline74" />
+    <!-- Arrowhead on XXXpoint 4050 9750 - 4050 7890-->
+    <!-- Line -->
+    <polyline
+       points="5250,8550 5250,6814 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline78" />
+    <!-- Arrowhead on XXXpoint 5250 8550 - 5250 6690-->
+    <!-- Circle -->
+    <circle
+       cx="2850"
+       cy="4350"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle82" />
+    <!-- Circle -->
+    <circle
+       cx="3150"
+       cy="4350"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle84" />
+    <!-- Circle -->
+    <circle
+       cx="3450"
+       cy="4350"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle86" />
+    <!-- Circle -->
+    <circle
+       cx="1350"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle88" />
+    <!-- Circle -->
+    <circle
+       cx="1650"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle90" />
+    <!-- Circle -->
+    <circle
+       cx="1950"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle92" />
+    <!-- Circle -->
+    <circle
+       cx="4350"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle94" />
+    <!-- Circle -->
+    <circle
+       cx="4650"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle96" />
+    <!-- Circle -->
+    <circle
+       cx="4950"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle98" />
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="3900"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect100" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="7050"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect102" />
+    <!-- Line: box -->
+    <rect
+       x="3750"
+       y="3900"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect104" />
+    <!-- Line: box -->
+    <rect
+       x="4500"
+       y="5850"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect106" />
+    <!-- Line: box -->
+    <rect
+       x="3300"
+       y="7050"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect108" />
+    <!-- Line: box -->
+    <rect
+       x="2250"
+       y="2100"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect110" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="9750"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect112" />
+    <!-- Line: box -->
+    <rect
+       x="1350"
+       y="8550"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect114" />
+    <!-- Line: box -->
+    <rect
+       x="3000"
+       y="9750"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect116" />
+    <!-- Line: box -->
+    <rect
+       x="4350"
+       y="8550"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect118" />
+    <!-- Line: box -->
+    <rect
+       x="1500"
+       y="5850"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect120" />
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6450"
+       y="750"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text122">rcu_bh</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="2400"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text124">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="2700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text126">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="4200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text128">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text130">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text132">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="4200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text134">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="6150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text136">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="6450"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text138">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="7350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text140">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="7650"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text142">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="6150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text144">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="6450"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text146">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="7350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text148">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="7650"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text150">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="450"
+       y="1800"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="start"
+       id="text152">struct rcu_state</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="10050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text154">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="10350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text156">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="10050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text158">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="10350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text160">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="8850"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text162">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="9150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text164">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5400"
+       y="8850"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text166">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5400"
+       y="9150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text168">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6900"
+       y="300"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text170">rcu_preempt</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6000"
+       y="1200"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text172">rcu_sched</text>
+    <!-- Line -->
+    <polyline
+       points="5250,5850 5250,4864 "
+       style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline174" />
+    <!-- Arrowhead on XXXpoint 5250 5850 - 5250 4740-->
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntickCB.svg b/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntickCB.svg
new file mode 100644
index 0000000..bbc3801
--- /dev/null
+++ b/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntickCB.svg
@@ -0,0 +1,858 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:29:48 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="7.4in"
+   height="9.9in"
+   viewBox="-44 -44 8938 11938"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="BigTreePreemptRCUBHdyntickCB.svg">
+  <metadata
+     id="metadata212">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs210">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3970"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="881"
+     inkscape:window-height="1128"
+     id="namedview208"
+     showgrid="false"
+     inkscape:zoom="1.0195195"
+     inkscape:cx="333"
+     inkscape:cy="445.49997"
+     inkscape:window-x="936"
+     inkscape:window-y="24"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="900"
+       y="0"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="1200"
+       y="600"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="5400"
+       y="4950"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="450"
+       y="450"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect12" />
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="1050"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect14" />
+    <!-- Line: box -->
+    <rect
+       x="4950"
+       y="5400"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect16" />
+    <!-- Line -->
+    <polyline
+       points="5250,8550 5688,6362 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline18" />
+    <!-- Arrowhead on XXXpoint 5250 8550 - 5710 6240-->
+    <polyline
+       points="5714 6518 5704 6272 5598 6494 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline20" />
+    <!-- Line -->
+    <polyline
+       points="4050,9750 4486,7712 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline22" />
+    <!-- Arrowhead on XXXpoint 4050 9750 - 4512 7590-->
+    <polyline
+       points="4514 7868 4506 7622 4396 7844 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline24" />
+    <!-- Line -->
+    <polyline
+       points="1040,9750 1476,7712 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline26" />
+    <!-- Arrowhead on XXXpoint 1040 9750 - 1502 7590-->
+    <polyline
+       points="1504 7868 1496 7622 1386 7844 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline28" />
+    <!-- Line -->
+    <polyline
+       points="2240,8550 2676,6512 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline30" />
+    <!-- Arrowhead on XXXpoint 2240 8550 - 2702 6390-->
+    <polyline
+       points="2704 6668 2696 6422 2586 6644 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline32" />
+    <!-- Line -->
+    <polyline
+       points="4050,9600 5692,6062 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline34" />
+    <!-- Arrowhead on XXXpoint 4050 9600 - 5744 5948-->
+    <polyline
+       points="5682 6220 5730 5978 5574 6170 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline36" />
+    <!-- Line -->
+    <polyline
+       points="1086,9600 2728,6062 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline38" />
+    <!-- Arrowhead on XXXpoint 1086 9600 - 2780 5948-->
+    <polyline
+       points="2718 6220 2766 5978 2610 6170 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline40" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="900"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect42" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="1500"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect44" />
+    <!-- Line -->
+    <polyline
+       points="1350,3900 2350,3040 "
+       style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline46" />
+    <!-- Arrowhead on XXXpoint 1350 3900 - 2444 2960-->
+    <!-- Line -->
+    <polyline
+       points="4950,3900 3948,3040 "
+       style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline50" />
+    <!-- Arrowhead on XXXpoint 4950 3900 - 3854 2960-->
+    <!-- Line -->
+    <polyline
+       points="4050,7050 4050,4864 "
+       style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline54" />
+    <!-- Arrowhead on XXXpoint 4050 7050 - 4050 4740-->
+    <!-- Line -->
+    <polyline
+       points="1050,7050 1050,4864 "
+       style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline58" />
+    <!-- Arrowhead on XXXpoint 1050 7050 - 1050 4740-->
+    <!-- Line -->
+    <polyline
+       points="2250,5850 2250,4864 "
+       style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline62" />
+    <!-- Arrowhead on XXXpoint 2250 5850 - 2250 4740-->
+    <!-- Line -->
+    <polyline
+       points="2250,8550 2250,6814 "
+       style="stroke:#00ff00;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline66" />
+    <!-- Arrowhead on XXXpoint 2250 8550 - 2250 6690-->
+    <!-- Line -->
+    <polyline
+       points="1050,9750 1050,8014 "
+       style="stroke:#00ff00;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline70" />
+    <!-- Arrowhead on XXXpoint 1050 9750 - 1050 7890-->
+    <!-- Line -->
+    <polyline
+       points="4050,9750 4050,8014 "
+       style="stroke:#00ff00;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline74" />
+    <!-- Arrowhead on XXXpoint 4050 9750 - 4050 7890-->
+    <!-- Line -->
+    <polyline
+       points="5250,8550 5250,6814 "
+       style="stroke:#00ff00;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline78" />
+    <!-- Arrowhead on XXXpoint 5250 8550 - 5250 6690-->
+    <!-- Line -->
+    <polyline
+       points="6000,6300 8048,7910 "
+       style="stroke:#87cfff;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
+       id="polyline82" />
+    <!-- Arrowhead on XXXpoint 6000 6300 - 8146 7986-->
+    <!-- Circle -->
+    <circle
+       cx="2850"
+       cy="4350"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle86" />
+    <!-- Circle -->
+    <circle
+       cx="3150"
+       cy="4350"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle88" />
+    <!-- Circle -->
+    <circle
+       cx="3450"
+       cy="4350"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle90" />
+    <!-- Circle -->
+    <circle
+       cx="1350"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle92" />
+    <!-- Circle -->
+    <circle
+       cx="1650"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle94" />
+    <!-- Circle -->
+    <circle
+       cx="1950"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle96" />
+    <!-- Circle -->
+    <circle
+       cx="4350"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle98" />
+    <!-- Circle -->
+    <circle
+       cx="4650"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle100" />
+    <!-- Circle -->
+    <circle
+       cx="4950"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle102" />
+    <!-- Line: box -->
+    <rect
+       x="7350"
+       y="7950"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect104" />
+    <!-- Line: box -->
+    <rect
+       x="7350"
+       y="9450"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect106" />
+    <!-- Line -->
+    <polyline
+       points="8100,8850 8100,9384 "
+       style="stroke:#000000;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
+       id="polyline108" />
+    <!-- Arrowhead on XXXpoint 8100 8850 - 8100 9510-->
+    <!-- Line: box -->
+    <rect
+       x="7350"
+       y="10950"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect112" />
+    <!-- Line -->
+    <polyline
+       points="8100,10350 8100,10884 "
+       style="stroke:#000000;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
+       id="polyline114" />
+    <!-- Arrowhead on XXXpoint 8100 10350 - 8100 11010-->
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="3900"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect118" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="7050"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect120" />
+    <!-- Line: box -->
+    <rect
+       x="3750"
+       y="3900"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect122" />
+    <!-- Line: box -->
+    <rect
+       x="4500"
+       y="5850"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect124" />
+    <!-- Line: box -->
+    <rect
+       x="3300"
+       y="7050"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect126" />
+    <!-- Line: box -->
+    <rect
+       x="2250"
+       y="2100"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect128" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="9750"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect130" />
+    <!-- Line: box -->
+    <rect
+       x="1350"
+       y="8550"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect132" />
+    <!-- Line: box -->
+    <rect
+       x="3000"
+       y="9750"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect134" />
+    <!-- Line: box -->
+    <rect
+       x="4350"
+       y="8550"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect136" />
+    <!-- Line: box -->
+    <rect
+       x="1500"
+       y="5850"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect138" />
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8100"
+       y="8250"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text140">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8100"
+       y="8550"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text142">rcu_head</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8100"
+       y="9750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text144">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8100"
+       y="10050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text146">rcu_head</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8100"
+       y="11250"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text148">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8100"
+       y="11550"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text150">rcu_head</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6000"
+       y="1200"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text152">rcu_sched</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6450"
+       y="750"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text154">rcu_bh</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="2400"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text156">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="2700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text158">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="4200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text160">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text162">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text164">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="4200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text166">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="6150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text168">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="6450"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text170">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="7350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text172">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="7650"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text174">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="6150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text176">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="6450"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text178">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="7350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text180">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="7650"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text182">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="450"
+       y="1800"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="start"
+       id="text184">struct rcu_state</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="10050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text186">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="10350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text188">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="10050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text190">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="10350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text192">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="8850"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text194">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="9150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text196">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5400"
+       y="8850"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text198">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5400"
+       y="9150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text200">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6900"
+       y="300"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text202">rcu_preempt</text>
+    <!-- Line -->
+    <polyline
+       points="5250,5850 5250,4864 "
+       style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline204" />
+    <!-- Arrowhead on XXXpoint 5250 5850 - 5250 4740-->
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.html b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
new file mode 100644
index 0000000..7eb47ac
--- /dev/null
+++ b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
@@ -0,0 +1,1333 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
+        "http://www.w3.org/TR/html4/loose.dtd">
+        <html>
+        <head><title>A Tour Through TREE_RCU's Data Structures [LWN.net]</title>
+        <meta HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+
+           <p>January 27, 2016</p>
+           <p>This article was contributed by Paul E.&nbsp;McKenney</p>
+
+<h3>Introduction</h3>
+
+This document describes RCU's major data structures and their relationship
+to each other.
+
+<ol>
+<li>	<a href="#Data-Structure Relationships">
+	Data-Structure Relationships</a>
+<li>	<a href="#The rcu_state Structure">
+	The <tt>rcu_state</tt> Structure</a>
+<li>	<a href="#The rcu_node Structure">
+	The <tt>rcu_node</tt> Structure</a>
+<li>	<a href="#The rcu_data Structure">
+	The <tt>rcu_data</tt> Structure</a>
+<li>	<a href="#The rcu_dynticks Structure">
+	The <tt>rcu_dynticks</tt> Structure</a>
+<li>	<a href="#The rcu_head Structure">
+	The <tt>rcu_head</tt> Structure</a>
+<li>	<a href="#RCU-Specific Fields in the task_struct Structure">
+	RCU-Specific Fields in the <tt>task_struct</tt> Structure</a>
+<li>	<a href="#Accessor Functions">
+	Accessor Functions</a>
+</ol>
+
+At the end we have the
+<a href="#Answers to Quick Quizzes">answers to the quick quizzes</a>.
+
+<h3><a name="Data-Structure Relationships">Data-Structure Relationships</a></h3>
+
+<p>RCU is for all intents and purposes a large state machine, and its
+data structures maintain the state in such a way as to allow RCU readers
+to execute extremely quickly, while also processing the RCU grace periods
+requested by updaters in an efficient and extremely scalable fashion.
+The efficiency and scalability of RCU updaters is provided primarily
+by a combining tree, as shown below:
+
+</p><p><img src="BigTreeClassicRCU.svg" alt="BigTreeClassicRCU.svg" width="30%">
+
+</p><p>This diagram shows an enclosing <tt>rcu_state</tt> structure
+containing a tree of <tt>rcu_node</tt> structures.
+Each leaf node of the <tt>rcu_node</tt> tree has up to 16
+<tt>rcu_data</tt> structures associated with it, so that there
+are <tt>NR_CPUS</tt> number of <tt>rcu_data</tt> structures,
+one for each possible CPU.
+This structure is adjusted at boot time, if needed, to handle the
+common case where <tt>nr_cpu_ids</tt> is much less than
+<tt>NR_CPUs</tt>.
+For example, a number of Linux distributions set <tt>NR_CPUs=4096</tt>,
+which results in a three-level <tt>rcu_node</tt> tree.
+If the actual hardware has only 16 CPUs, RCU will adjust itself
+at boot time, resulting in an <tt>rcu_node</tt> tree with only a single node.
+
+</p><p>The purpose of this combining tree is to allow per-CPU events
+such as quiescent states, dyntick-idle transitions,
+and CPU hotplug operations to be processed efficiently
+and scalably.
+Quiescent states are recorded by the per-CPU <tt>rcu_data</tt> structures,
+and other events are recorded by the leaf-level <tt>rcu_node</tt>
+structures.
+All of these events are combined at each level of the tree until finally
+grace periods are completed at the tree's root <tt>rcu_node</tt>
+structure.
+A grace period can be completed at the root once every CPU
+(or, in the case of <tt>CONFIG_PREEMPT_RCU</tt>, task)
+has passed through a quiescent state.
+Once a grace period has completed, record of that fact is propagated
+back down the tree.
+
+</p><p>As can be seen from the diagram, on a 64-bit system
+a two-level tree with 64 leaves can accommodate 1,024 CPUs, with a fanout
+of 64 at the root and a fanout of 16 at the leaves.
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+	Why isn't the fanout at the leaves also 64?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+	Because there are more types of events that affect the leaf-level
+	<tt>rcu_node</tt> structures than further up the tree.
+	Therefore, if the leaf <tt>rcu_node</tt> structures have fanout of
+	64, the contention on these structures' <tt>-&gt;structures</tt>
+	becomes excessive.
+	Experimentation on a wide variety of systems has shown that a fanout
+	of 16 works well for the leaves of the <tt>rcu_node</tt> tree.
+	</font>
+
+	<p><font color="ffffff">Of course, further experience with
+	systems having hundreds or thousands of CPUs may demonstrate
+	that the fanout for the non-leaf <tt>rcu_node</tt> structures
+	must also be reduced.
+	Such reduction can be easily carried out when and if it proves
+	necessary.
+	In the meantime, if you are using such a system and running into
+	contention problems on the non-leaf <tt>rcu_node</tt> structures,
+	you may use the <tt>CONFIG_RCU_FANOUT</tt> kernel configuration
+	parameter to reduce the non-leaf fanout as needed.
+	</font>
+
+	<p><font color="ffffff">Kernels built for systems with
+	strong NUMA characteristics might also need to adjust
+	<tt>CONFIG_RCU_FANOUT</tt> so that the domains of the
+	<tt>rcu_node</tt> structures align with hardware boundaries.
+	However, there has thus far been no need for this.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<p>If your system has more than 1,024 CPUs (or more than 512 CPUs on
+a 32-bit system), then RCU will automatically add more levels to the
+tree.
+For example, if you are crazy enough to build a 64-bit system with 65,536
+CPUs, RCU would configure the <tt>rcu_node</tt> tree as follows:
+
+</p><p><img src="HugeTreeClassicRCU.svg" alt="HugeTreeClassicRCU.svg" width="50%">
+
+</p><p>RCU currently permits up to a four-level tree, which on a 64-bit system
+accommodates up to 4,194,304 CPUs, though only a mere 524,288 CPUs for
+32-bit systems.
+On the other hand, you can set <tt>CONFIG_RCU_FANOUT</tt> to be
+as small as 2 if you wish, which would permit only 16 CPUs, which
+is useful for testing.
+
+</p><p>This multi-level combining tree allows us to get most of the
+performance and scalability
+benefits of partitioning, even though RCU grace-period detection is
+inherently a global operation.
+The trick here is that only the last CPU to report a quiescent state
+into a given <tt>rcu_node</tt> structure need advance to the <tt>rcu_node</tt>
+structure at the next level up the tree.
+This means that at the leaf-level <tt>rcu_node</tt> structure, only
+one access out of sixteen will progress up the tree.
+For the internal <tt>rcu_node</tt> structures, the situation is even
+more extreme:  Only one access out of sixty-four will progress up
+the tree.
+Because the vast majority of the CPUs do not progress up the tree,
+the lock contention remains roughly constant up the tree.
+No matter how many CPUs there are in the system, at most 64 quiescent-state
+reports per grace period will progress all the way to the root
+<tt>rcu_node</tt> structure, thus ensuring that the lock contention
+on that root <tt>rcu_node</tt> structure remains acceptably low.
+
+</p><p>In effect, the combining tree acts like a big shock absorber,
+keeping lock contention under control at all tree levels regardless
+of the level of loading on the system.
+
+</p><p>The Linux kernel actually supports multiple flavors of RCU
+running concurrently, so RCU builds separate data structures for each
+flavor.
+For example, for <tt>CONFIG_TREE_RCU=y</tt> kernels, RCU provides
+rcu_sched and rcu_bh, as shown below:
+
+</p><p><img src="BigTreeClassicRCUBH.svg" alt="BigTreeClassicRCUBH.svg" width="33%">
+
+</p><p>Energy efficiency is increasingly important, and for that
+reason the Linux kernel provides <tt>CONFIG_NO_HZ_IDLE</tt>, which
+turns off the scheduling-clock interrupts on idle CPUs, which in
+turn allows those CPUs to attain deeper sleep states and to consume
+less energy.
+CPUs whose scheduling-clock interrupts have been turned off are
+said to be in <i>dyntick-idle mode</i>.
+RCU must handle dyntick-idle CPUs specially
+because RCU would otherwise wake up each CPU on every grace period,
+which would defeat the whole purpose of <tt>CONFIG_NO_HZ_IDLE</tt>.
+RCU uses the <tt>rcu_dynticks</tt> structure to track
+which CPUs are in dyntick idle mode, as shown below:
+
+</p><p><img src="BigTreeClassicRCUBHdyntick.svg" alt="BigTreeClassicRCUBHdyntick.svg" width="33%">
+
+</p><p>However, if a CPU is in dyntick-idle mode, it is in that mode
+for all flavors of RCU.
+Therefore, a single <tt>rcu_dynticks</tt> structure is allocated per
+CPU, and all of a given CPU's <tt>rcu_data</tt> structures share
+that <tt>rcu_dynticks</tt>, as shown in the figure.
+
+</p><p>Kernels built with <tt>CONFIG_PREEMPT_RCU</tt> support
+rcu_preempt in addition to rcu_sched and rcu_bh, as shown below:
+
+</p><p><img src="BigTreePreemptRCUBHdyntick.svg" alt="BigTreePreemptRCUBHdyntick.svg" width="35%">
+
+</p><p>RCU updaters wait for normal grace periods by registering
+RCU callbacks, either directly via <tt>call_rcu()</tt> and
+friends (namely <tt>call_rcu_bh()</tt> and <tt>call_rcu_sched()</tt>),
+there being a separate interface per flavor of RCU)
+or indirectly via <tt>synchronize_rcu()</tt> and friends.
+RCU callbacks are represented by <tt>rcu_head</tt> structures,
+which are queued on <tt>rcu_data</tt> structures while they are
+waiting for a grace period to elapse, as shown in the following figure:
+
+</p><p><img src="BigTreePreemptRCUBHdyntickCB.svg" alt="BigTreePreemptRCUBHdyntickCB.svg" width="40%">
+
+</p><p>This figure shows how <tt>TREE_RCU</tt>'s and
+<tt>PREEMPT_RCU</tt>'s major data structures are related.
+Lesser data structures will be introduced with the algorithms that
+make use of them.
+
+</p><p>Note that each of the data structures in the above figure has
+its own synchronization:
+
+<p><ol>
+<li>	Each <tt>rcu_state</tt> structures has a lock and a mutex,
+	and some fields are protected by the corresponding root
+	<tt>rcu_node</tt> structure's lock.
+<li>	Each <tt>rcu_node</tt> structure has a spinlock.
+<li>	The fields in <tt>rcu_data</tt> are private to the corresponding
+	CPU, although a few can be read and written by other CPUs.
+<li>	Similarly, the fields in <tt>rcu_dynticks</tt> are private
+	to the corresponding CPU, although a few can be read by
+	other CPUs.
+</ol>
+
+<p>It is important to note that different data structures can have
+very different ideas about the state of RCU at any given time.
+For but one example, awareness of the start or end of a given RCU
+grace period propagates slowly through the data structures.
+This slow propagation is absolutely necessary for RCU to have good
+read-side performance.
+If this balkanized implementation seems foreign to you, one useful
+trick is to consider each instance of these data structures to be
+a different person, each having the usual slightly different
+view of reality.
+
+</p><p>The general role of each of these data structures is as
+follows:
+
+</p><ol>
+<li>	<tt>rcu_state</tt>:
+	This structure forms the interconnection between the
+	<tt>rcu_node</tt> and <tt>rcu_data</tt> structures,
+	tracks grace periods, serves as short-term repository
+	for callbacks orphaned by CPU-hotplug events,
+	maintains <tt>rcu_barrier()</tt> state,
+	tracks expedited grace-period state,
+	and maintains state used to force quiescent states when
+	grace periods extend too long,
+<li>	<tt>rcu_node</tt>: This structure forms the combining
+	tree that propagates quiescent-state
+	information from the leaves to the root, and also propagates
+	grace-period information from the root to the leaves.
+	It provides local copies of the grace-period state in order
+	to allow this information to be accessed in a synchronized
+	manner without suffering the scalability limitations that
+	would otherwise be imposed by global locking.
+	In <tt>CONFIG_PREEMPT_RCU</tt> kernels, it manages the lists
+	of tasks that have blocked while in their current
+	RCU read-side critical section.
+	In <tt>CONFIG_PREEMPT_RCU</tt> with
+	<tt>CONFIG_RCU_BOOST</tt>, it manages the
+	per-<tt>rcu_node</tt> priority-boosting
+	kernel threads (kthreads) and state.
+	Finally, it records CPU-hotplug state in order to determine
+	which CPUs should be ignored during a given grace period.
+<li>	<tt>rcu_data</tt>: This per-CPU structure is the
+	focus of quiescent-state detection and RCU callback queuing.
+	It also tracks its relationship to the corresponding leaf
+	<tt>rcu_node</tt> structure to allow more-efficient
+	propagation of quiescent states up the <tt>rcu_node</tt>
+	combining tree.
+	Like the <tt>rcu_node</tt> structure, it provides a local
+	copy of the grace-period information to allow for-free
+	synchronized
+	access to this information from the corresponding CPU.
+	Finally, this structure records past dyntick-idle state
+	for the corresponding CPU and also tracks statistics.
+<li>	<tt>rcu_dynticks</tt>:
+	This per-CPU structure tracks the current dyntick-idle
+	state for the corresponding CPU.
+	Unlike the other three structures, the <tt>rcu_dynticks</tt>
+	structure is not replicated per RCU flavor.
+<li>	<tt>rcu_head</tt>:
+	This structure represents RCU callbacks, and is the
+	only structure allocated and managed by RCU users.
+	The <tt>rcu_head</tt> structure is normally embedded
+	within the RCU-protected data structure.
+</ol>
+
+<p>If all you wanted from this article was a general notion of how
+RCU's data structures are related, you are done.
+Otherwise, each of the following sections give more details on
+the <tt>rcu_state</tt>, <tt>rcu_node</tt>, <tt>rcu_data</tt>,
+and <tt>rcu_dynticks</tt> data structures.
+
+<h3><a name="The rcu_state Structure">
+The <tt>rcu_state</tt> Structure</a></h3>
+
+<p>The <tt>rcu_state</tt> structure is the base structure that
+represents a flavor of RCU.
+This structure forms the interconnection between the
+<tt>rcu_node</tt> and <tt>rcu_data</tt> structures,
+tracks grace periods, contains the lock used to
+synchronize with CPU-hotplug events,
+and maintains state used to force quiescent states when
+grace periods extend too long,
+
+</p><p>A few of the <tt>rcu_state</tt> structure's fields are discussed,
+singly and in groups, in the following sections.
+The more specialized fields are covered in the discussion of their
+use.
+
+<h5>Relationship to rcu_node and rcu_data Structures</h5>
+
+This portion of the <tt>rcu_state</tt> structure is declared
+as follows:
+
+<pre>
+  1   struct rcu_node node[NUM_RCU_NODES];
+  2   struct rcu_node *level[NUM_RCU_LVLS + 1];
+  3   struct rcu_data __percpu *rda;
+</pre>
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+	Wait a minute!
+	You said that the <tt>rcu_node</tt> structures formed a tree,
+	but they are declared as a flat array!
+	What gives?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+	The tree is laid out in the array.
+	The first node In the array is the head, the next set of nodes in the
+	array are children of the head node, and so on until the last set of
+	nodes in the array are the leaves.
+	</font>
+
+	<p><font color="ffffff">See the following diagrams to see how
+	this works.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<p>The <tt>rcu_node</tt> tree is embedded into the
+<tt>-&gt;node[]</tt> array as shown in the following figure:
+
+</p><p><img src="TreeMapping.svg" alt="TreeMapping.svg" width="40%">
+
+</p><p>One interesting consequence of this mapping is that a
+breadth-first traversal of the tree is implemented as a simple
+linear scan of the array, which is in fact what the
+<tt>rcu_for_each_node_breadth_first()</tt> macro does.
+This macro is used at the beginning and ends of grace periods.
+
+</p><p>Each entry of the <tt>-&gt;level</tt> array references
+the first <tt>rcu_node</tt> structure on the corresponding level
+of the tree, for example, as shown below:
+
+</p><p><img src="TreeMappingLevel.svg" alt="TreeMappingLevel.svg" width="40%">
+
+</p><p>The zero<sup>th</sup> element of the array references the root
+<tt>rcu_node</tt> structure, the first element references the
+first child of the root <tt>rcu_node</tt>, and finally the second
+element references the first leaf <tt>rcu_node</tt> structure.
+
+</p><p>For whatever it is worth, if you draw the tree to be tree-shaped
+rather than array-shaped, it is easy to draw a planar representation:
+
+</p><p><img src="TreeLevel.svg" alt="TreeLevel.svg" width="60%">
+
+</p><p>Finally, the <tt>-&gt;rda</tt> field references a per-CPU
+pointer to the corresponding CPU's <tt>rcu_data</tt> structure.
+
+</p><p>All of these fields are constant once initialization is complete,
+and therefore need no protection.
+
+<h5>Grace-Period Tracking</h5>
+
+<p>This portion of the <tt>rcu_state</tt> structure is declared
+as follows:
+
+<pre>
+  1   unsigned long gpnum;
+  2   unsigned long completed;
+</pre>
+
+<p>RCU grace periods are numbered, and
+the <tt>-&gt;gpnum</tt> field contains the number of the grace
+period that started most recently.
+The <tt>-&gt;completed</tt> field contains the number of the
+grace period that completed most recently.
+If the two fields are equal, the RCU grace period that most recently
+started has already completed, and therefore the corresponding
+flavor of RCU is idle.
+If <tt>-&gt;gpnum</tt> is one greater than <tt>-&gt;completed</tt>,
+then <tt>-&gt;gpnum</tt> gives the number of the current RCU
+grace period, which has not yet completed.
+Any other combination of values indicates that something is broken.
+These two fields are protected by the root <tt>rcu_node</tt>'s
+<tt>-&gt;lock</tt> field.
+
+</p><p>There are <tt>-&gt;gpnum</tt> and <tt>-&gt;completed</tt> fields
+in the <tt>rcu_node</tt> and <tt>rcu_data</tt> structures
+as well.
+The fields in the <tt>rcu_state</tt> structure represent the
+most current values, and those of the other structures are compared
+in order to detect the start of a new grace period in a distributed
+fashion.
+The values flow from <tt>rcu_state</tt> to <tt>rcu_node</tt>
+(down the tree from the root to the leaves) to <tt>rcu_data</tt>.
+
+<h5>Miscellaneous</h5>
+
+<p>This portion of the <tt>rcu_state</tt> structure is declared
+as follows:
+
+<pre>
+  1   unsigned long gp_max;
+  2   char abbr;
+  3   char *name;
+</pre>
+
+<p>The <tt>-&gt;gp_max</tt> field tracks the duration of the longest
+grace period in jiffies.
+It is protected by the root <tt>rcu_node</tt>'s <tt>-&gt;lock</tt>.
+
+<p>The <tt>-&gt;name</tt> field points to the name of the RCU flavor
+(for example, &ldquo;rcu_sched&rdquo;), and is constant.
+The <tt>-&gt;abbr</tt> field contains a one-character abbreviation,
+for example, &ldquo;s&rdquo; for RCU-sched.
+
+<h3><a name="The rcu_node Structure">
+The <tt>rcu_node</tt> Structure</a></h3>
+
+<p>The <tt>rcu_node</tt> structures form the combining
+tree that propagates quiescent-state
+information from the leaves to the root and also that propagates
+grace-period information from the root down to the leaves.
+They provides local copies of the grace-period state in order
+to allow this information to be accessed in a synchronized
+manner without suffering the scalability limitations that
+would otherwise be imposed by global locking.
+In <tt>CONFIG_PREEMPT_RCU</tt> kernels, they manage the lists
+of tasks that have blocked while in their current
+RCU read-side critical section.
+In <tt>CONFIG_PREEMPT_RCU</tt> with
+<tt>CONFIG_RCU_BOOST</tt>, they manage the
+per-<tt>rcu_node</tt> priority-boosting
+kernel threads (kthreads) and state.
+Finally, they record CPU-hotplug state in order to determine
+which CPUs should be ignored during a given grace period.
+
+</p><p>The <tt>rcu_node</tt> structure's fields are discussed,
+singly and in groups, in the following sections.
+
+<h5>Connection to Combining Tree</h5>
+
+<p>This portion of the <tt>rcu_node</tt> structure is declared
+as follows:
+
+<pre>
+  1   struct rcu_node *parent;
+  2   u8 level;
+  3   u8 grpnum;
+  4   unsigned long grpmask;
+  5   int grplo;
+  6   int grphi;
+</pre>
+
+<p>The <tt>-&gt;parent</tt> pointer references the <tt>rcu_node</tt>
+one level up in the tree, and is <tt>NULL</tt> for the root
+<tt>rcu_node</tt>.
+The RCU implementation makes heavy use of this field to push quiescent
+states up the tree.
+The <tt>-&gt;level</tt> field gives the level in the tree, with
+the root being at level zero, its children at level one, and so on.
+The <tt>-&gt;grpnum</tt> field gives this node's position within
+the children of its parent, so this number can range between 0 and 31
+on 32-bit systems and between 0 and 63 on 64-bit systems.
+The <tt>-&gt;level</tt> and <tt>-&gt;grpnum</tt> fields are
+used only during initialization and for tracing.
+The <tt>-&gt;grpmask</tt> field is the bitmask counterpart of
+<tt>-&gt;grpnum</tt>, and therefore always has exactly one bit set.
+This mask is used to clear the bit corresponding to this <tt>rcu_node</tt>
+structure in its parent's bitmasks, which are described later.
+Finally, the <tt>-&gt;grplo</tt> and <tt>-&gt;grphi</tt> fields
+contain the lowest and highest numbered CPU served by this
+<tt>rcu_node</tt> structure, respectively.
+
+</p><p>All of these fields are constant, and thus do not require any
+synchronization.
+
+<h5>Synchronization</h5>
+
+<p>This field of the <tt>rcu_node</tt> structure is declared
+as follows:
+
+<pre>
+  1   raw_spinlock_t lock;
+</pre>
+
+<p>This field is used to protect the remaining fields in this structure,
+unless otherwise stated.
+That said, all of the fields in this structure can be accessed without
+locking for tracing purposes.
+Yes, this can result in confusing traces, but better some tracing confusion
+than to be heisenbugged out of existence.
+
+<h5>Grace-Period Tracking</h5>
+
+<p>This portion of the <tt>rcu_node</tt> structure is declared
+as follows:
+
+<pre>
+  1   unsigned long gpnum;
+  2   unsigned long completed;
+</pre>
+
+<p>These fields are the counterparts of the fields of the same name in
+the <tt>rcu_state</tt> structure.
+They each may lag up to one behind their <tt>rcu_state</tt>
+counterparts.
+If a given <tt>rcu_node</tt> structure's <tt>-&gt;gpnum</tt> and
+<tt>-&gt;complete</tt> fields are equal, then this <tt>rcu_node</tt>
+structure believes that RCU is idle.
+Otherwise, as with the <tt>rcu_state</tt> structure,
+the <tt>-&gt;gpnum</tt> field will be one greater than the
+<tt>-&gt;complete</tt> fields, with <tt>-&gt;gpnum</tt>
+indicating which grace period this <tt>rcu_node</tt> believes
+is still being waited for.
+
+</p><p>The <tt>&gt;gpnum</tt> field of each <tt>rcu_node</tt>
+structure is updated at the beginning
+of each grace period, and the <tt>-&gt;completed</tt> fields are
+updated at the end of each grace period.
+
+<h5>Quiescent-State Tracking</h5>
+
+<p>These fields manage the propagation of quiescent states up the
+combining tree.
+
+</p><p>This portion of the <tt>rcu_node</tt> structure has fields
+as follows:
+
+<pre>
+  1   unsigned long qsmask;
+  2   unsigned long expmask;
+  3   unsigned long qsmaskinit;
+  4   unsigned long expmaskinit;
+</pre>
+
+<p>The <tt>-&gt;qsmask</tt> field tracks which of this
+<tt>rcu_node</tt> structure's children still need to report
+quiescent states for the current normal grace period.
+Such children will have a value of 1 in their corresponding bit.
+Note that the leaf <tt>rcu_node</tt> structures should be
+thought of as having <tt>rcu_data</tt> structures as their
+children.
+Similarly, the <tt>-&gt;expmask</tt> field tracks which
+of this <tt>rcu_node</tt> structure's children still need to report
+quiescent states for the current expedited grace period.
+An expedited grace period has
+the same conceptual properties as a normal grace period, but the
+expedited implementation accepts extreme CPU overhead to obtain
+much lower grace-period latency, for example, consuming a few
+tens of microseconds worth of CPU time to reduce grace-period
+duration from milliseconds to tens of microseconds.
+The <tt>-&gt;qsmaskinit</tt> field tracks which of this
+<tt>rcu_node</tt> structure's children cover for at least
+one online CPU.
+This mask is used to initialize <tt>-&gt;qsmask</tt>,
+and <tt>-&gt;expmaskinit</tt> is used to initialize
+<tt>-&gt;expmask</tt> and the beginning of the
+normal and expedited grace periods, respectively.
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+	Why are these bitmasks protected by locking?
+	Come on, haven't you heard of atomic instructions???
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+	Lockless grace-period computation!  Such a tantalizing possibility!
+	</font>
+
+	<p><font color="ffffff">But consider the following sequence of events:
+	</font>
+
+	<ol>
+	<li>	<font color="ffffff">CPU&nbsp;0 has been in dyntick-idle
+		mode for quite some time.
+		When it wakes up, it notices that the current RCU
+		grace period needs it to report in, so it sets a
+		flag where the scheduling clock interrupt will find it.
+		</font><p>
+	<li>	<font color="ffffff">Meanwhile, CPU&nbsp;1 is running
+		<tt>force_quiescent_state()</tt>,
+		and notices that CPU&nbsp;0 has been in dyntick idle mode,
+		which qualifies as an extended quiescent state.
+		</font><p>
+	<li>	<font color="ffffff">CPU&nbsp;0's scheduling clock
+		interrupt fires in the
+		middle of an RCU read-side critical section, and notices
+		that the RCU core needs something, so commences RCU softirq
+		processing.
+		</font>
+		<p>
+	<li>	<font color="ffffff">CPU&nbsp;0's softirq handler
+		executes and is just about ready
+		to report its quiescent state up the <tt>rcu_node</tt>
+		tree.
+		</font><p>
+	<li>	<font color="ffffff">But CPU&nbsp;1 beats it to the punch,
+		completing the current
+		grace period and starting a new one.
+		</font><p>
+	<li>	<font color="ffffff">CPU&nbsp;0 now reports its quiescent
+		state for the wrong
+		grace period.
+		That grace period might now end before the RCU read-side
+		critical section.
+		If that happens, disaster will ensue.
+		</font>
+	</ol>
+
+	<p><font color="ffffff">So the locking is absolutely required in
+	order to coordinate
+	clearing of the bits with the grace-period numbers in
+	<tt>-&gt;gpnum</tt> and <tt>-&gt;completed</tt>.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<h5>Blocked-Task Management</h5>
+
+<p><tt>PREEMPT_RCU</tt> allows tasks to be preempted in the
+midst of their RCU read-side critical sections, and these tasks
+must be tracked explicitly.
+The details of exactly why and how they are tracked will be covered
+in a separate article on RCU read-side processing.
+For now, it is enough to know that the <tt>rcu_node</tt>
+structure tracks them.
+
+<pre>
+  1   struct list_head blkd_tasks;
+  2   struct list_head *gp_tasks;
+  3   struct list_head *exp_tasks;
+  4   bool wait_blkd_tasks;
+</pre>
+
+<p>The <tt>-&gt;blkd_tasks</tt> field is a list header for
+the list of blocked and preempted tasks.
+As tasks undergo context switches within RCU read-side critical
+sections, their <tt>task_struct</tt> structures are enqueued
+(via the <tt>task_struct</tt>'s <tt>-&gt;rcu_node_entry</tt>
+field) onto the head of the <tt>-&gt;blkd_tasks</tt> list for the
+leaf <tt>rcu_node</tt> structure corresponding to the CPU
+on which the outgoing context switch executed.
+As these tasks later exit their RCU read-side critical sections,
+they remove themselves from the list.
+This list is therefore in reverse time order, so that if one of the tasks
+is blocking the current grace period, all subsequent tasks must
+also be blocking that same grace period.
+Therefore, a single pointer into this list suffices to track
+all tasks blocking a given grace period.
+That pointer is stored in <tt>-&gt;gp_tasks</tt> for normal
+grace periods and in <tt>-&gt;exp_tasks</tt> for expedited
+grace periods.
+These last two fields are <tt>NULL</tt> if either there is
+no grace period in flight or if there are no blocked tasks
+preventing that grace period from completing.
+If either of these two pointers is referencing a task that
+removes itself from the <tt>-&gt;blkd_tasks</tt> list,
+then that task must advance the pointer to the next task on
+the list, or set the pointer to <tt>NULL</tt> if there
+are no subsequent tasks on the list.
+
+</p><p>For example, suppose that tasks&nbsp;T1, T2, and&nbsp;T3 are
+all hard-affinitied to the largest-numbered CPU in the system.
+Then if task&nbsp;T1 blocked in an RCU read-side
+critical section, then an expedited grace period started,
+then task&nbsp;T2 blocked in an RCU read-side critical section,
+then a normal grace period started, and finally task&nbsp;3 blocked
+in an RCU read-side critical section, then the state of the
+last leaf <tt>rcu_node</tt> structure's blocked-task list
+would be as shown below:
+
+</p><p><img src="blkd_task.svg" alt="blkd_task.svg" width="60%">
+
+</p><p>Task&nbsp;T1 is blocking both grace periods, task&nbsp;T2 is
+blocking only the normal grace period, and task&nbsp;T3 is blocking
+neither grace period.
+Note that these tasks will not remove themselves from this list
+immediately upon resuming execution.
+They will instead remain on the list until they execute the outermost
+<tt>rcu_read_unlock()</tt> that ends their RCU read-side critical
+section.
+
+<p>
+The <tt>-&gt;wait_blkd_tasks</tt> field indicates whether or not
+the current grace period is waiting on a blocked task.
+
+<h5>Sizing the <tt>rcu_node</tt> Array</h5>
+
+<p>The <tt>rcu_node</tt> array is sized via a series of
+C-preprocessor expressions as follows:
+
+<pre>
+ 1 #ifdef CONFIG_RCU_FANOUT
+ 2 #define RCU_FANOUT CONFIG_RCU_FANOUT
+ 3 #else
+ 4 # ifdef CONFIG_64BIT
+ 5 # define RCU_FANOUT 64
+ 6 # else
+ 7 # define RCU_FANOUT 32
+ 8 # endif
+ 9 #endif
+10
+11 #ifdef CONFIG_RCU_FANOUT_LEAF
+12 #define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF
+13 #else
+14 # ifdef CONFIG_64BIT
+15 # define RCU_FANOUT_LEAF 64
+16 # else
+17 # define RCU_FANOUT_LEAF 32
+18 # endif
+19 #endif
+20
+21 #define RCU_FANOUT_1        (RCU_FANOUT_LEAF)
+22 #define RCU_FANOUT_2        (RCU_FANOUT_1 * RCU_FANOUT)
+23 #define RCU_FANOUT_3        (RCU_FANOUT_2 * RCU_FANOUT)
+24 #define RCU_FANOUT_4        (RCU_FANOUT_3 * RCU_FANOUT)
+25
+26 #if NR_CPUS &lt;= RCU_FANOUT_1
+27 #  define RCU_NUM_LVLS        1
+28 #  define NUM_RCU_LVL_0        1
+29 #  define NUM_RCU_NODES        NUM_RCU_LVL_0
+30 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0 }
+31 #  define RCU_NODE_NAME_INIT  { "rcu_node_0" }
+32 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0" }
+33 #  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0" }
+34 #elif NR_CPUS &lt;= RCU_FANOUT_2
+35 #  define RCU_NUM_LVLS        2
+36 #  define NUM_RCU_LVL_0        1
+37 #  define NUM_RCU_LVL_1        DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
+38 #  define NUM_RCU_NODES        (NUM_RCU_LVL_0 + NUM_RCU_LVL_1)
+39 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1 }
+40 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1" }
+41 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1" }
+42 #  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1" }
+43 #elif NR_CPUS &lt;= RCU_FANOUT_3
+44 #  define RCU_NUM_LVLS        3
+45 #  define NUM_RCU_LVL_0        1
+46 #  define NUM_RCU_LVL_1        DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
+47 #  define NUM_RCU_LVL_2        DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
+48 #  define NUM_RCU_NODES        (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2)
+49 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 }
+50 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1", "rcu_node_2" }
+51 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" }
+52 #  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2" }
+53 #elif NR_CPUS &lt;= RCU_FANOUT_4
+54 #  define RCU_NUM_LVLS        4
+55 #  define NUM_RCU_LVL_0        1
+56 #  define NUM_RCU_LVL_1        DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
+57 #  define NUM_RCU_LVL_2        DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
+58 #  define NUM_RCU_LVL_3        DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
+59 #  define NUM_RCU_NODES        (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
+60 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 }
+61 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" }
+62 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" }
+63 #  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2", "rcu_node_exp_3" }
+64 #else
+65 # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
+66 #endif
+</pre>
+
+<p>The maximum number of levels in the <tt>rcu_node</tt> structure
+is currently limited to four, as specified by lines&nbsp;21-24
+and the structure of the subsequent &ldquo;if&rdquo; statement.
+For 32-bit systems, this allows 16*32*32*32=524,288 CPUs, which
+should be sufficient for the next few years at least.
+For 64-bit systems, 16*64*64*64=4,194,304 CPUs is allowed, which
+should see us through the next decade or so.
+This four-level tree also allows kernels built with
+<tt>CONFIG_RCU_FANOUT=8</tt> to support up to 4096 CPUs,
+which might be useful in very large systems having eight CPUs per
+socket (but please note that no one has yet shown any measurable
+performance degradation due to misaligned socket and <tt>rcu_node</tt>
+boundaries).
+In addition, building kernels with a full four levels of <tt>rcu_node</tt>
+tree permits better testing of RCU's combining-tree code.
+
+</p><p>The <tt>RCU_FANOUT</tt> symbol controls how many children
+are permitted at each non-leaf level of the <tt>rcu_node</tt> tree.
+If the <tt>CONFIG_RCU_FANOUT</tt> Kconfig option is not specified,
+it is set based on the word size of the system, which is also
+the Kconfig default.
+
+</p><p>The <tt>RCU_FANOUT_LEAF</tt> symbol controls how many CPUs are
+handled by each leaf <tt>rcu_node</tt> structure.
+Experience has shown that allowing a given leaf <tt>rcu_node</tt>
+structure to handle 64 CPUs, as permitted by the number of bits in
+the <tt>-&gt;qsmask</tt> field on a 64-bit system, results in
+excessive contention for the leaf <tt>rcu_node</tt> structures'
+<tt>-&gt;lock</tt> fields.
+The number of CPUs per leaf <tt>rcu_node</tt> structure is therefore
+limited to 16 given the default value of <tt>CONFIG_RCU_FANOUT_LEAF</tt>.
+If <tt>CONFIG_RCU_FANOUT_LEAF</tt> is unspecified, the value
+selected is based on the word size of the system, just as for
+<tt>CONFIG_RCU_FANOUT</tt>.
+Lines&nbsp;11-19 perform this computation.
+
+</p><p>Lines&nbsp;21-24 compute the maximum number of CPUs supported by
+a single-level (which contains a single <tt>rcu_node</tt> structure),
+two-level, three-level, and four-level <tt>rcu_node</tt> tree,
+respectively, given the fanout specified by <tt>RCU_FANOUT</tt>
+and <tt>RCU_FANOUT_LEAF</tt>.
+These numbers of CPUs are retained in the
+<tt>RCU_FANOUT_1</tt>,
+<tt>RCU_FANOUT_2</tt>,
+<tt>RCU_FANOUT_3</tt>, and
+<tt>RCU_FANOUT_4</tt>
+C-preprocessor variables, respectively.
+
+</p><p>These variables are used to control the C-preprocessor <tt>#if</tt>
+statement spanning lines&nbsp;26-66 that computes the number of
+<tt>rcu_node</tt> structures required for each level of the tree,
+as well as the number of levels required.
+The number of levels is placed in the <tt>NUM_RCU_LVLS</tt>
+C-preprocessor variable by lines&nbsp;27, 35, 44, and&nbsp;54.
+The number of <tt>rcu_node</tt> structures for the topmost level
+of the tree is always exactly one, and this value is unconditionally
+placed into <tt>NUM_RCU_LVL_0</tt> by lines&nbsp;28, 36, 45, and&nbsp;55.
+The rest of the levels (if any) of the <tt>rcu_node</tt> tree
+are computed by dividing the maximum number of CPUs by the
+fanout supported by the number of levels from the current level down,
+rounding up.  This computation is performed by lines&nbsp;37,
+46-47, and&nbsp;56-58.
+Lines&nbsp;31-33, 40-42, 50-52, and&nbsp;62-63 create initializers
+for lockdep lock-class names.
+Finally, lines&nbsp;64-66 produce an error if the maximum number of
+CPUs is too large for the specified fanout.
+
+<h3><a name="The rcu_data Structure">
+The <tt>rcu_data</tt> Structure</a></h3>
+
+<p>The <tt>rcu_data</tt> maintains the per-CPU state for the
+corresponding flavor of RCU.
+The fields in this structure may be accessed only from the corresponding
+CPU (and from tracing) unless otherwise stated.
+This structure is the
+focus of quiescent-state detection and RCU callback queuing.
+It also tracks its relationship to the corresponding leaf
+<tt>rcu_node</tt> structure to allow more-efficient
+propagation of quiescent states up the <tt>rcu_node</tt>
+combining tree.
+Like the <tt>rcu_node</tt> structure, it provides a local
+copy of the grace-period information to allow for-free
+synchronized
+access to this information from the corresponding CPU.
+Finally, this structure records past dyntick-idle state
+for the corresponding CPU and also tracks statistics.
+
+</p><p>The <tt>rcu_data</tt> structure's fields are discussed,
+singly and in groups, in the following sections.
+
+<h5>Connection to Other Data Structures</h5>
+
+<p>This portion of the <tt>rcu_data</tt> structure is declared
+as follows:
+
+<pre>
+  1   int cpu;
+  2   struct rcu_state *rsp;
+  3   struct rcu_node *mynode;
+  4   struct rcu_dynticks *dynticks;
+  5   unsigned long grpmask;
+  6   bool beenonline;
+</pre>
+
+<p>The <tt>-&gt;cpu</tt> field contains the number of the
+corresponding CPU, the <tt>-&gt;rsp</tt> pointer references
+the corresponding <tt>rcu_state</tt> structure (and is most frequently
+used to locate the name of the corresponding flavor of RCU for tracing),
+and the <tt>-&gt;mynode</tt> field references the corresponding
+<tt>rcu_node</tt> structure.
+The <tt>-&gt;mynode</tt> is used to propagate quiescent states
+up the combining tree.
+<p>The <tt>-&gt;dynticks</tt> pointer references the
+<tt>rcu_dynticks</tt> structure corresponding to this
+CPU.
+Recall that a single per-CPU instance of the <tt>rcu_dynticks</tt>
+structure is shared among all flavors of RCU.
+These first four fields are constant and therefore require not
+synchronization.
+
+</p><p>The <tt>-&gt;grpmask</tt> field indicates the bit in
+the <tt>-&gt;mynode-&gt;qsmask</tt> corresponding to this
+<tt>rcu_data</tt> structure, and is also used when propagating
+quiescent states.
+The <tt>-&gt;beenonline</tt> flag is set whenever the corresponding
+CPU comes online, which means that the debugfs tracing need not dump
+out any <tt>rcu_data</tt> structure for which this flag is not set.
+
+<h5>Quiescent-State and Grace-Period Tracking</h5>
+
+<p>This portion of the <tt>rcu_data</tt> structure is declared
+as follows:
+
+<pre>
+  1   unsigned long completed;
+  2   unsigned long gpnum;
+  3   bool cpu_no_qs;
+  4   bool core_needs_qs;
+  5   bool gpwrap;
+  6   unsigned long rcu_qs_ctr_snap;
+</pre>
+
+<p>The <tt>completed</tt> and <tt>gpnum</tt>
+fields are the counterparts of the fields of the same name
+in the <tt>rcu_state</tt> and <tt>rcu_node</tt> structures.
+They may each lag up to one behind their <tt>rcu_node</tt>
+counterparts, but in <tt>CONFIG_NO_HZ_IDLE</tt> and
+<tt>CONFIG_NO_HZ_FULL</tt> kernels can lag
+arbitrarily far behind for CPUs in dyntick-idle mode (but these counters
+will catch up upon exit from dyntick-idle mode).
+If a given <tt>rcu_data</tt> structure's <tt>-&gt;gpnum</tt> and
+<tt>-&gt;complete</tt> fields are equal, then this <tt>rcu_data</tt>
+structure believes that RCU is idle.
+Otherwise, as with the <tt>rcu_state</tt> and <tt>rcu_node</tt>
+structure,
+the <tt>-&gt;gpnum</tt> field will be one greater than the
+<tt>-&gt;complete</tt> fields, with <tt>-&gt;gpnum</tt>
+indicating which grace period this <tt>rcu_data</tt> believes
+is still being waited for.
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+	All this replication of the grace period numbers can only cause
+	massive confusion.
+	Why not just keep a global pair of counters and be done with it???
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+	Because if there was only a single global pair of grace-period
+	numbers, there would need to be a single global lock to allow
+	safely accessing and updating them.
+	And if we are not going to have a single global lock, we need
+	to carefully manage the numbers on a per-node basis.
+	Recall from the answer to a previous Quick Quiz that the consequences
+	of applying a previously sampled quiescent state to the wrong
+	grace period are quite severe.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<p>The <tt>-&gt;cpu_no_qs</tt> flag indicates that the
+CPU has not yet passed through a quiescent state,
+while the <tt>-&gt;core_needs_qs</tt> flag indicates that the
+RCU core needs a quiescent state from the corresponding CPU.
+The <tt>-&gt;gpwrap</tt> field indicates that the corresponding
+CPU has remained idle for so long that the <tt>completed</tt>
+and <tt>gpnum</tt> counters are in danger of overflow, which
+will cause the CPU to disregard the values of its counters on
+its next exit from idle.
+Finally, the <tt>rcu_qs_ctr_snap</tt> field is used to detect
+cases where a given operation has resulted in a quiescent state
+for all flavors of RCU, for example, <tt>cond_resched_rcu_qs()</tt>.
+
+<h5>RCU Callback Handling</h5>
+
+<p>In the absence of CPU-hotplug events, RCU callbacks are invoked by
+the same CPU that registered them.
+This is strictly a cache-locality optimization: callbacks can and
+do get invoked on CPUs other than the one that registered them.
+After all, if the CPU that registered a given callback has gone
+offline before the callback can be invoked, there really is no other
+choice.
+
+</p><p>This portion of the <tt>rcu_data</tt> structure is declared
+as follows:
+
+<pre>
+ 1 struct rcu_head *nxtlist;
+ 2 struct rcu_head **nxttail[RCU_NEXT_SIZE];
+ 3 unsigned long nxtcompleted[RCU_NEXT_SIZE];
+ 4 long qlen_lazy;
+ 5 long qlen;
+ 6 long qlen_last_fqs_check;
+ 7 unsigned long n_force_qs_snap;
+ 8 unsigned long n_cbs_invoked;
+ 9 unsigned long n_cbs_orphaned;
+10 unsigned long n_cbs_adopted;
+11 long blimit;
+</pre>
+
+<p>The <tt>-&gt;nxtlist</tt> pointer and the
+<tt>-&gt;nxttail[]</tt> array form a four-segment list with
+older callbacks near the head and newer ones near the tail.
+Each segment contains callbacks with the corresponding relationship
+to the current grace period.
+The pointer out of the end of each of the four segments is referenced
+by the element of the <tt>-&gt;nxttail[]</tt> array indexed by
+<tt>RCU_DONE_TAIL</tt> (for callbacks handled by a prior grace period),
+<tt>RCU_WAIT_TAIL</tt> (for callbacks waiting on the current grace period),
+<tt>RCU_NEXT_READY_TAIL</tt> (for callbacks that will wait on the next
+grace period), and
+<tt>RCU_NEXT_TAIL</tt> (for callbacks that are not yet associated
+with a specific grace period)
+respectively, as shown in the following figure.
+
+</p><p><img src="nxtlist.svg" alt="nxtlist.svg" width="40%">
+
+</p><p>In this figure, the <tt>-&gt;nxtlist</tt> pointer references the
+first
+RCU callback in the list.
+The <tt>-&gt;nxttail[RCU_DONE_TAIL]</tt> array element references
+the <tt>-&gt;nxtlist</tt> pointer itself, indicating that none
+of the callbacks is ready to invoke.
+The <tt>-&gt;nxttail[RCU_WAIT_TAIL]</tt> array element references callback
+CB&nbsp;2's <tt>-&gt;next</tt> pointer, which indicates that
+CB&nbsp;1 and CB&nbsp;2 are both waiting on the current grace period.
+The <tt>-&gt;nxttail[RCU_NEXT_READY_TAIL]</tt> array element
+references the same RCU callback that <tt>-&gt;nxttail[RCU_WAIT_TAIL]</tt>
+does, which indicates that there are no callbacks waiting on the next
+RCU grace period.
+The <tt>-&gt;nxttail[RCU_NEXT_TAIL]</tt> array element references
+CB&nbsp;4's <tt>-&gt;next</tt> pointer, indicating that all the
+remaining RCU callbacks have not yet been assigned to an RCU grace
+period.
+Note that the <tt>-&gt;nxttail[RCU_NEXT_TAIL]</tt> array element
+always references the last RCU callback's <tt>-&gt;next</tt> pointer
+unless the callback list is empty, in which case it references
+the <tt>-&gt;nxtlist</tt> pointer.
+
+</p><p>CPUs advance their callbacks from the
+<tt>RCU_NEXT_TAIL</tt> to the <tt>RCU_NEXT_READY_TAIL</tt> to the
+<tt>RCU_WAIT_TAIL</tt> to the <tt>RCU_DONE_TAIL</tt> list segments
+as grace periods advance.
+The CPU advances the callbacks in its <tt>rcu_data</tt> structure
+whenever it notices that another RCU grace period has completed.
+The CPU detects the completion of an RCU grace period by noticing
+that the value of its <tt>rcu_data</tt> structure's
+<tt>-&gt;completed</tt> field differs from that of its leaf
+<tt>rcu_node</tt> structure.
+Recall that each <tt>rcu_node</tt> structure's
+<tt>-&gt;completed</tt> field is updated at the end of each
+grace period.
+
+</p><p>The <tt>-&gt;nxtcompleted[]</tt> array records grace-period
+numbers corresponding to the list segments.
+This allows CPUs that go idle for extended periods to determine
+which of their callbacks are ready to be invoked after reawakening.
+
+</p><p>The <tt>-&gt;qlen</tt> counter contains the number of
+callbacks in <tt>-&gt;nxtlist</tt>, and the
+<tt>-&gt;qlen_lazy</tt> contains the number of those callbacks that
+are known to only free memory, and whose invocation can therefore
+be safely deferred.
+The <tt>-&gt;qlen_last_fqs_check</tt> and
+<tt>-&gt;n_force_qs_snap</tt> coordinate the forcing of quiescent
+states from <tt>call_rcu()</tt> and friends when callback
+lists grow excessively long.
+
+</p><p>The <tt>-&gt;n_cbs_invoked</tt>,
+<tt>-&gt;n_cbs_orphaned</tt>, and <tt>-&gt;n_cbs_adopted</tt>
+fields count the number of callbacks invoked,
+sent to other CPUs when this CPU goes offline,
+and received from other CPUs when those other CPUs go offline.
+Finally, the <tt>-&gt;blimit</tt> counter is the maximum number of
+RCU callbacks that may be invoked at a given time.
+
+<h5>Dyntick-Idle Handling</h5>
+
+<p>This portion of the <tt>rcu_data</tt> structure is declared
+as follows:
+
+<pre>
+  1   int dynticks_snap;
+  2   unsigned long dynticks_fqs;
+</pre>
+
+The <tt>-&gt;dynticks_snap</tt> field is used to take a snapshot
+of the corresponding CPU's dyntick-idle state when forcing
+quiescent states, and is therefore accessed from other CPUs.
+Finally, the <tt>-&gt;dynticks_fqs</tt> field is used to
+count the number of times this CPU is determined to be in
+dyntick-idle state, and is used for tracing and debugging purposes.
+
+<h3><a name="The rcu_dynticks Structure">
+The <tt>rcu_dynticks</tt> Structure</a></h3>
+
+<p>The <tt>rcu_dynticks</tt> maintains the per-CPU dyntick-idle state
+for the corresponding CPU.
+Unlike the other structures, <tt>rcu_dynticks</tt> is not
+replicated over the different flavors of RCU.
+The fields in this structure may be accessed only from the corresponding
+CPU (and from tracing) unless otherwise stated.
+Its fields are as follows:
+
+<pre>
+  1   int dynticks_nesting;
+  2   int dynticks_nmi_nesting;
+  3   atomic_t dynticks;
+</pre>
+
+<p>The <tt>-&gt;dynticks_nesting</tt> field counts the
+nesting depth of normal interrupts.
+In addition, this counter is incremented when exiting dyntick-idle
+mode and decremented when entering it.
+This counter can therefore be thought of as counting the number
+of reasons why this CPU cannot be permitted to enter dyntick-idle
+mode, aside from non-maskable interrupts (NMIs).
+NMIs are counted by the <tt>-&gt;dynticks_nmi_nesting</tt>
+field, except that NMIs that interrupt non-dyntick-idle execution
+are not counted.
+
+</p><p>Finally, the <tt>-&gt;dynticks</tt> field counts the corresponding
+CPU's transitions to and from dyntick-idle mode, so that this counter
+has an even value when the CPU is in dyntick-idle mode and an odd
+value otherwise.
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+	Why not just count all NMIs?
+	Wouldn't that be simpler and less error prone?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+	It seems simpler only until you think hard about how to go about
+	updating the <tt>rcu_dynticks</tt> structure's
+	<tt>-&gt;dynticks</tt> field.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<p>Additional fields are present for some special-purpose
+builds, and are discussed separately.
+
+<h3><a name="The rcu_head Structure">
+The <tt>rcu_head</tt> Structure</a></h3>
+
+<p>Each <tt>rcu_head</tt> structure represents an RCU callback.
+These structures are normally embedded within RCU-protected data
+structures whose algorithms use asynchronous grace periods.
+In contrast, when using algorithms that block waiting for RCU grace periods,
+RCU users need not provide <tt>rcu_head</tt> structures.
+
+</p><p>The <tt>rcu_head</tt> structure has fields as follows:
+
+<pre>
+  1   struct rcu_head *next;
+  2   void (*func)(struct rcu_head *head);
+</pre>
+
+<p>The <tt>-&gt;next</tt> field is used
+to link the <tt>rcu_head</tt> structures together in the
+lists within the <tt>rcu_data</tt> structures.
+The <tt>-&gt;func</tt> field is a pointer to the function
+to be called when the callback is ready to be invoked, and
+this function is passed a pointer to the <tt>rcu_head</tt>
+structure.
+However, <tt>kfree_rcu()</tt> uses the <tt>-&gt;func</tt>
+field to record the offset of the <tt>rcu_head</tt>
+structure within the enclosing RCU-protected data structure.
+
+</p><p>Both of these fields are used internally by RCU.
+From the viewpoint of RCU users, this structure is an
+opaque &ldquo;cookie&rdquo;.
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+	Given that the callback function <tt>-&gt;func</tt>
+	is passed a pointer to the <tt>rcu_head</tt> structure,
+	how is that function supposed to find the beginning of the
+	enclosing RCU-protected data structure?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+	In actual practice, there is a separate callback function per
+	type of RCU-protected data structure.
+	The callback function can therefore use the <tt>container_of()</tt>
+	macro in the Linux kernel (or other pointer-manipulation facilities
+	in other software environments) to find the beginning of the
+	enclosing structure.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<h3><a name="RCU-Specific Fields in the task_struct Structure">
+RCU-Specific Fields in the <tt>task_struct</tt> Structure</a></h3>
+
+<p>The <tt>CONFIG_PREEMPT_RCU</tt> implementation uses some
+additional fields in the <tt>task_struct</tt> structure:
+
+<pre>
+ 1 #ifdef CONFIG_PREEMPT_RCU
+ 2   int rcu_read_lock_nesting;
+ 3   union rcu_special rcu_read_unlock_special;
+ 4   struct list_head rcu_node_entry;
+ 5   struct rcu_node *rcu_blocked_node;
+ 6 #endif /* #ifdef CONFIG_PREEMPT_RCU */
+ 7 #ifdef CONFIG_TASKS_RCU
+ 8   unsigned long rcu_tasks_nvcsw;
+ 9   bool rcu_tasks_holdout;
+10   struct list_head rcu_tasks_holdout_list;
+11   int rcu_tasks_idle_cpu;
+12 #endif /* #ifdef CONFIG_TASKS_RCU */
+</pre>
+
+<p>The <tt>-&gt;rcu_read_lock_nesting</tt> field records the
+nesting level for RCU read-side critical sections, and
+the <tt>-&gt;rcu_read_unlock_special</tt> field is a bitmask
+that records special conditions that require <tt>rcu_read_unlock()</tt>
+to do additional work.
+The <tt>-&gt;rcu_node_entry</tt> field is used to form lists of
+tasks that have blocked within preemptible-RCU read-side critical
+sections and the <tt>-&gt;rcu_blocked_node</tt> field references
+the <tt>rcu_node</tt> structure whose list this task is a member of,
+or <tt>NULL</tt> if it is not blocked within a preemptible-RCU
+read-side critical section.
+
+<p>The <tt>-&gt;rcu_tasks_nvcsw</tt> field tracks the number of
+voluntary context switches that this task had undergone at the
+beginning of the current tasks-RCU grace period,
+<tt>-&gt;rcu_tasks_holdout</tt> is set if the current tasks-RCU
+grace period is waiting on this task, <tt>-&gt;rcu_tasks_holdout_list</tt>
+is a list element enqueuing this task on the holdout list,
+and <tt>-&gt;rcu_tasks_idle_cpu</tt> tracks which CPU this
+idle task is running, but only if the task is currently running,
+that is, if the CPU is currently idle.
+
+<h3><a name="Accessor Functions">
+Accessor Functions</a></h3>
+
+<p>The following listing shows the
+<tt>rcu_get_root()</tt>, <tt>rcu_for_each_node_breadth_first</tt>,
+<tt>rcu_for_each_nonleaf_node_breadth_first()</tt>, and
+<tt>rcu_for_each_leaf_node()</tt> function and macros:
+
+<pre>
+  1 static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
+  2 {
+  3   return &amp;rsp-&gt;node[0];
+  4 }
+  5
+  6 #define rcu_for_each_node_breadth_first(rsp, rnp) \
+  7   for ((rnp) = &amp;(rsp)-&gt;node[0]; \
+  8        (rnp) &lt; &amp;(rsp)-&gt;node[NUM_RCU_NODES]; (rnp)++)
+  9
+ 10 #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
+ 11   for ((rnp) = &amp;(rsp)-&gt;node[0]; \
+ 12        (rnp) &lt; (rsp)-&gt;level[NUM_RCU_LVLS - 1]; (rnp)++)
+ 13
+ 14 #define rcu_for_each_leaf_node(rsp, rnp) \
+ 15   for ((rnp) = (rsp)-&gt;level[NUM_RCU_LVLS - 1]; \
+ 16        (rnp) &lt; &amp;(rsp)-&gt;node[NUM_RCU_NODES]; (rnp)++)
+</pre>
+
+<p>The <tt>rcu_get_root()</tt> simply returns a pointer to the
+first element of the specified <tt>rcu_state</tt> structure's
+<tt>-&gt;node[]</tt> array, which is the root <tt>rcu_node</tt>
+structure.
+
+</p><p>As noted earlier, the <tt>rcu_for_each_node_breadth_first()</tt>
+macro takes advantage of the layout of the <tt>rcu_node</tt>
+structures in the <tt>rcu_state</tt> structure's
+<tt>-&gt;node[]</tt> array, performing a breadth-first traversal by
+simply traversing the array in order.
+The <tt>rcu_for_each_nonleaf_node_breadth_first()</tt> macro operates
+similarly, but traverses only the first part of the array, thus excluding
+the leaf <tt>rcu_node</tt> structures.
+Finally, the <tt>rcu_for_each_leaf_node()</tt> macro traverses only
+the last part of the array, thus traversing only the leaf
+<tt>rcu_node</tt> structures.
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+	What do <tt>rcu_for_each_nonleaf_node_breadth_first()</tt> and
+	<tt>rcu_for_each_leaf_node()</tt> do if the <tt>rcu_node</tt> tree
+	contains only a single node?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+	In the single-node case,
+	<tt>rcu_for_each_nonleaf_node_breadth_first()</tt> is a no-op
+	and <tt>rcu_for_each_leaf_node()</tt> traverses the single node.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<h3><a name="Summary">
+Summary</a></h3>
+
+So each flavor of RCU is represented by an <tt>rcu_state</tt> structure,
+which contains a combining tree of <tt>rcu_node</tt> and
+<tt>rcu_data</tt> structures.
+Finally, in <tt>CONFIG_NO_HZ_IDLE</tt> kernels, each CPU's dyntick-idle
+state is tracked by an <tt>rcu_dynticks</tt> structure.
+
+If you made it this far, you are well prepared to read the code
+walkthroughs in the other articles in this series.
+
+<h3><a name="Acknowledgments">
+Acknowledgments</a></h3>
+
+I owe thanks to Cyrill Gorcunov, Mathieu Desnoyers, Dhaval Giani, Paul
+Turner, Abhishek Srivastava, Matt Kowalczyk, and Serge Hallyn
+for helping me get this document into a more human-readable state.
+
+<h3><a name="Legal Statement">
+Legal Statement</a></h3>
+
+<p>This work represents the view of the author and does not necessarily
+represent the view of IBM.
+
+</p><p>Linux is a registered trademark of Linus Torvalds.
+
+</p><p>Other company, product, and service names may be trademarks or
+service marks of others.
+
+</body></html>
diff --git a/Documentation/RCU/Design/Data-Structures/HugeTreeClassicRCU.svg b/Documentation/RCU/Design/Data-Structures/HugeTreeClassicRCU.svg
new file mode 100644
index 0000000..2bf12b4
--- /dev/null
+++ b/Documentation/RCU/Design/Data-Structures/HugeTreeClassicRCU.svg
@@ -0,0 +1,939 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:37:22 2015 -->
+
+<!-- Magnification: 3.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="15.1in"
+   height="11.2in"
+   viewBox="-66 -66 18087 13407"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="HugeTreeClassicRCU.fig">
+  <metadata
+     id="metadata224">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs222">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3982"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="1134"
+     inkscape:window-height="789"
+     id="namedview220"
+     showgrid="false"
+     inkscape:zoom="0.60515873"
+     inkscape:cx="679.5"
+     inkscape:cy="504"
+     inkscape:window-x="786"
+     inkscape:window-y="24"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="450"
+       y="0"
+       width="17100"
+       height="8325"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="11025"
+       y="3600"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="4275"
+       y="3600"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="5400"
+       y="6300"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect12" />
+    <!-- Line: box -->
+    <rect
+       x="9900"
+       y="6300"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect14" />
+    <!-- Line: box -->
+    <rect
+       x="14400"
+       y="6300"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect16" />
+    <!-- Line: box -->
+    <rect
+       x="900"
+       y="6300"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect18" />
+    <!-- Line: box -->
+    <rect
+       x="7650"
+       y="900"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect20" />
+    <!-- Line -->
+    <polyline
+       points="3150,9225 3150,7746 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline22" />
+    <!-- Arrowhead on XXXpoint 3150 9225 - 3150 7560-->
+    <!-- Circle -->
+    <circle
+       cx="8550"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle26" />
+    <!-- Circle -->
+    <circle
+       cx="9000"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle28" />
+    <!-- Circle -->
+    <circle
+       cx="9450"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle30" />
+    <!-- Line -->
+    <polyline
+       points="6750,6300 8250,5010 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline32" />
+    <!-- Arrowhead on XXXpoint 6750 6300 - 8391 4890-->
+    <!-- Line -->
+    <polyline
+       points="11250,6300 9747,5010 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline36" />
+    <!-- Arrowhead on XXXpoint 11250 6300 - 9606 4890-->
+    <!-- Circle -->
+    <circle
+       cx="13950"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle40" />
+    <!-- Circle -->
+    <circle
+       cx="13500"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle42" />
+    <!-- Circle -->
+    <circle
+       cx="13050"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle44" />
+    <!-- Circle -->
+    <circle
+       cx="9450"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle46" />
+    <!-- Circle -->
+    <circle
+       cx="9000"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle48" />
+    <!-- Circle -->
+    <circle
+       cx="8550"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle50" />
+    <!-- Circle -->
+    <circle
+       cx="4950"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle52" />
+    <!-- Circle -->
+    <circle
+       cx="4500"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle54" />
+    <!-- Circle -->
+    <circle
+       cx="4050"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle56" />
+    <!-- Circle -->
+    <circle
+       cx="1800"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle58" />
+    <!-- Circle -->
+    <circle
+       cx="2250"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle60" />
+    <!-- Circle -->
+    <circle
+       cx="2700"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle62" />
+    <!-- Circle -->
+    <circle
+       cx="15300"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle64" />
+    <!-- Circle -->
+    <circle
+       cx="15750"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle66" />
+    <!-- Circle -->
+    <circle
+       cx="16200"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle68" />
+    <!-- Circle -->
+    <circle
+       cx="10800"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle70" />
+    <!-- Circle -->
+    <circle
+       cx="11250"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle72" />
+    <!-- Circle -->
+    <circle
+       cx="11700"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle74" />
+    <!-- Circle -->
+    <circle
+       cx="6300"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle76" />
+    <!-- Circle -->
+    <circle
+       cx="6750"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle78" />
+    <!-- Circle -->
+    <circle
+       cx="7200"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle80" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="11475"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect82" />
+    <!-- Line: box -->
+    <rect
+       x="1800"
+       y="9225"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect84" />
+    <!-- Line: box -->
+    <rect
+       x="4500"
+       y="11475"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect86" />
+    <!-- Line: box -->
+    <rect
+       x="6300"
+       y="9270"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect88" />
+    <!-- Line: box -->
+    <rect
+       x="8955"
+       y="11475"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect90" />
+    <!-- Line: box -->
+    <rect
+       x="10755"
+       y="9270"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect92" />
+    <!-- Line: box -->
+    <rect
+       x="13455"
+       y="11475"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect94" />
+    <!-- Line: box -->
+    <rect
+       x="15255"
+       y="9270"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect96" />
+    <!-- Line -->
+    <polyline
+       points="11700,3600 10197,2310 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline98" />
+    <!-- Arrowhead on XXXpoint 11700 3600 - 10056 2190-->
+    <!-- Line -->
+    <polyline
+       points="6300,3600 7800,2310 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline102" />
+    <!-- Arrowhead on XXXpoint 6300 3600 - 7941 2190-->
+    <!-- Line -->
+    <polyline
+       points="3150,6300 4650,5010 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline106" />
+    <!-- Arrowhead on XXXpoint 3150 6300 - 4791 4890-->
+    <!-- Line -->
+    <polyline
+       points="14850,6300 13347,5010 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline110" />
+    <!-- Arrowhead on XXXpoint 14850 6300 - 13206 4890-->
+    <!-- Line -->
+    <polyline
+       points="1350,11475 1350,7746 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline114" />
+    <!-- Arrowhead on XXXpoint 1350 11475 - 1350 7560-->
+    <!-- Line -->
+    <polyline
+       points="16650,9225 16650,7746 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline118" />
+    <!-- Arrowhead on XXXpoint 16650 9225 - 16650 7560-->
+    <!-- Line -->
+    <polyline
+       points="14850,11475 14850,7746 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline122" />
+    <!-- Arrowhead on XXXpoint 14850 11475 - 14850 7560-->
+    <!-- Line -->
+    <polyline
+       points="12150,9225 12150,7746 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline126" />
+    <!-- Arrowhead on XXXpoint 12150 9225 - 12150 7560-->
+    <!-- Line -->
+    <polyline
+       points="10350,11475 10350,7746 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline130" />
+    <!-- Arrowhead on XXXpoint 10350 11475 - 10350 7560-->
+    <!-- Line -->
+    <polyline
+       points="7650,9225 7650,7746 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline134" />
+    <!-- Arrowhead on XXXpoint 7650 9225 - 7650 7560-->
+    <!-- Line -->
+    <polyline
+       points="5850,11475 5850,7746 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline138" />
+    <!-- Arrowhead on XXXpoint 5850 11475 - 5850 7560-->
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="12375"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text142">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="12375"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text144">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5625"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text146">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5625"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text148">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6750"
+       y="6750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text150">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6750"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text152">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11250"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text154">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11250"
+       y="6750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text156">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="15750"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text158">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="15750"
+       y="6750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text160">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="6750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text162">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text164">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1350"
+       y="13050"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text166">CPU 0</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1350"
+       y="11925"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text168">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1350"
+       y="12375"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text170">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="10800"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text172">CPU 15</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="9675"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text174">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="10125"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text176">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5850"
+       y="11925"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text178">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5850"
+       y="12375"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text180">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5850"
+       y="13050"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text182">CPU 21823</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="7650"
+       y="10845"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text184">CPU 21839</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="7650"
+       y="10170"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text186">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="7650"
+       y="9720"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text188">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="10305"
+       y="11925"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text190">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="10305"
+       y="12375"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text192">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="10305"
+       y="13050"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text194">CPU 43679</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="12105"
+       y="10845"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text196">CPU 43695</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="12105"
+       y="10170"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text198">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="12105"
+       y="9720"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text200">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="14805"
+       y="11925"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text202">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="14805"
+       y="12375"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text204">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="14805"
+       y="13050"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text206">CPU 65519</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="16605"
+       y="10845"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text208">CPU 65535</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="16605"
+       y="10170"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text210">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="16605"
+       y="9720"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text212">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="675"
+       y="450"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="start"
+       id="text214">struct rcu_state</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="9000"
+       y="1350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text216">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="9000"
+       y="1800"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text218">rcu_node</text>
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/TreeLevel.svg b/Documentation/RCU/Design/Data-Structures/TreeLevel.svg
new file mode 100644
index 0000000..7a7eb3b
--- /dev/null
+++ b/Documentation/RCU/Design/Data-Structures/TreeLevel.svg
@@ -0,0 +1,828 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:41:29 2015 -->
+
+<!-- Magnification: 3.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="17.7in"
+   height="10.4in"
+   viewBox="-66 -66 21237 12507"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="TreeLevel.fig">
+  <metadata
+     id="metadata216">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs214">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3974"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="1023"
+     inkscape:window-height="1148"
+     id="namedview212"
+     showgrid="false"
+     inkscape:zoom="0.55869424"
+     inkscape:cx="796.50006"
+     inkscape:cy="467.99997"
+     inkscape:window-x="897"
+     inkscape:window-y="24"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="0"
+       width="20655"
+       height="8325"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="14130"
+       y="3600"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="7380"
+       y="3600"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="8505"
+       y="6300"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect12" />
+    <!-- Line: box -->
+    <rect
+       x="13005"
+       y="6300"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect14" />
+    <!-- Line: box -->
+    <rect
+       x="17505"
+       y="6300"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect16" />
+    <!-- Line: box -->
+    <rect
+       x="4005"
+       y="6300"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect18" />
+    <!-- Line: box -->
+    <rect
+       x="10755"
+       y="900"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect20" />
+    <!-- Line -->
+    <polyline
+       points="6255,9225 6255,7746 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline22" />
+    <!-- Arrowhead on XXXpoint 6255 9225 - 6255 7560-->
+    <!-- Circle -->
+    <circle
+       cx="11655"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle26" />
+    <!-- Circle -->
+    <circle
+       cx="12105"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle28" />
+    <!-- Circle -->
+    <circle
+       cx="12555"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle30" />
+    <!-- Line -->
+    <polyline
+       points="9855,6300 11355,5010 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline32" />
+    <!-- Arrowhead on XXXpoint 9855 6300 - 11496 4890-->
+    <!-- Line -->
+    <polyline
+       points="14355,6300 12852,5010 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline36" />
+    <!-- Arrowhead on XXXpoint 14355 6300 - 12711 4890-->
+    <!-- Circle -->
+    <circle
+       cx="17055"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle40" />
+    <!-- Circle -->
+    <circle
+       cx="16605"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle42" />
+    <!-- Circle -->
+    <circle
+       cx="16155"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle44" />
+    <!-- Circle -->
+    <circle
+       cx="12555"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle46" />
+    <!-- Circle -->
+    <circle
+       cx="12105"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle48" />
+    <!-- Circle -->
+    <circle
+       cx="11655"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle50" />
+    <!-- Circle -->
+    <circle
+       cx="8055"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle52" />
+    <!-- Circle -->
+    <circle
+       cx="7605"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle54" />
+    <!-- Circle -->
+    <circle
+       cx="7155"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle56" />
+    <!-- Circle -->
+    <circle
+       cx="4905"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle58" />
+    <!-- Circle -->
+    <circle
+       cx="5355"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle60" />
+    <!-- Circle -->
+    <circle
+       cx="5805"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle62" />
+    <!-- Circle -->
+    <circle
+       cx="18405"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle64" />
+    <!-- Circle -->
+    <circle
+       cx="18855"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle66" />
+    <!-- Circle -->
+    <circle
+       cx="19305"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle68" />
+    <!-- Circle -->
+    <circle
+       cx="13905"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle70" />
+    <!-- Circle -->
+    <circle
+       cx="14355"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle72" />
+    <!-- Circle -->
+    <circle
+       cx="14805"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle74" />
+    <!-- Circle -->
+    <circle
+       cx="9405"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle76" />
+    <!-- Circle -->
+    <circle
+       cx="9855"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle78" />
+    <!-- Circle -->
+    <circle
+       cx="10305"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle80" />
+    <!-- Line: box -->
+    <rect
+       x="225"
+       y="1125"
+       width="3150"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:21; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect82" />
+    <!-- Line: box -->
+    <rect
+       x="225"
+       y="2250"
+       width="3150"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:21; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect84" />
+    <!-- Line: box -->
+    <rect
+       x="225"
+       y="3375"
+       width="3150"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:21; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect86" />
+    <!-- Line -->
+    <polyline
+       points="14805,3600 13302,2310 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline88" />
+    <!-- Arrowhead on XXXpoint 14805 3600 - 13161 2190-->
+    <!-- Line -->
+    <polyline
+       points="9405,3600 10905,2310 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline92" />
+    <!-- Arrowhead on XXXpoint 9405 3600 - 11046 2190-->
+    <!-- Line -->
+    <polyline
+       points="6255,6300 7755,5010 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline96" />
+    <!-- Arrowhead on XXXpoint 6255 6300 - 7896 4890-->
+    <!-- Line -->
+    <polyline
+       points="17955,6300 16452,5010 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline100" />
+    <!-- Arrowhead on XXXpoint 17955 6300 - 16311 4890-->
+    <!-- Line -->
+    <polyline
+       points="4455,11025 4455,7746 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline104" />
+    <!-- Arrowhead on XXXpoint 4455 11025 - 4455 7560-->
+    <!-- Line -->
+    <polyline
+       points="19755,9225 19755,7746 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline108" />
+    <!-- Arrowhead on XXXpoint 19755 9225 - 19755 7560-->
+    <!-- Line -->
+    <polyline
+       points="17955,11025 17955,7746 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline112" />
+    <!-- Arrowhead on XXXpoint 17955 11025 - 17955 7560-->
+    <!-- Line -->
+    <polyline
+       points="15255,9225 15255,7746 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline116" />
+    <!-- Arrowhead on XXXpoint 15255 9225 - 15255 7560-->
+    <!-- Line -->
+    <polyline
+       points="13455,11025 13455,7746 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline120" />
+    <!-- Arrowhead on XXXpoint 13455 11025 - 13455 7560-->
+    <!-- Line -->
+    <polyline
+       points="10755,9225 10755,7746 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline124" />
+    <!-- Arrowhead on XXXpoint 10755 9225 - 10755 7560-->
+    <!-- Line -->
+    <polyline
+       points="8955,11025 8955,7746 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline128" />
+    <!-- Arrowhead on XXXpoint 8955 11025 - 8955 7560-->
+    <!-- Line: box -->
+    <rect
+       x="12105"
+       y="11025"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect132" />
+    <!-- Line: box -->
+    <rect
+       x="13905"
+       y="9225"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect134" />
+    <!-- Line: box -->
+    <rect
+       x="16605"
+       y="11025"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect136" />
+    <!-- Line: box -->
+    <rect
+       x="18405"
+       y="9225"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect138" />
+    <!-- Line: box -->
+    <rect
+       x="9405"
+       y="9225"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect140" />
+    <!-- Line: box -->
+    <rect
+       x="7605"
+       y="11025"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect142" />
+    <!-- Line: box -->
+    <rect
+       x="4905"
+       y="9225"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect144" />
+    <!-- Line: box -->
+    <rect
+       x="3105"
+       y="11025"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect146" />
+    <!-- Line -->
+    <polyline
+       points="3375,1575 10701,1575 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline148" />
+    <!-- Arrowhead on XXXpoint 3375 1575 - 10890 1575-->
+    <!-- Line -->
+    <polyline
+       points="3375,3825 4050,3825 4050,5400 2700,5400 2700,6975 3951,6975 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline152" />
+    <!-- Arrowhead on XXXpoint 2700 6975 - 4140 6975-->
+    <!-- Line -->
+    <polyline
+       points="3375,2700 5175,2700 5175,4275 7326,4275 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline156" />
+    <!-- Arrowhead on XXXpoint 5175 4275 - 7515 4275-->
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="15480"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text160">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="15480"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text162">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8730"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text164">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8730"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text166">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="9855"
+       y="6750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text168">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="9855"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text170">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="14355"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text172">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="14355"
+       y="6750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text174">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="18855"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text176">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="18855"
+       y="6750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text178">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5355"
+       y="6750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text180">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5355"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text182">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="450"
+       y="1800"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="324"
+       text-anchor="start"
+       id="text184">-&gt;level[0]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="450"
+       y="2925"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="324"
+       text-anchor="start"
+       id="text186">-&gt;level[1]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="450"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="324"
+       text-anchor="start"
+       id="text188">-&gt;level[2]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="12105"
+       y="1350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text190">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="12105"
+       y="1800"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text192">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6255"
+       y="10125"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text194">CPU 15</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4455"
+       y="11925"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text196">CPU 0</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="19755"
+       y="10125"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text198">CPU 65535</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="17955"
+       y="11925"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text200">CPU 65519</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="15255"
+       y="10125"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text202">CPU 43695</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="13455"
+       y="11925"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text204">CPU 43679</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="10755"
+       y="10125"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text206">CPU 21839</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8955"
+       y="11925"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text208">CPU 21823</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="225"
+       y="450"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="start"
+       id="text210">struct rcu_state</text>
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/TreeMapping.svg b/Documentation/RCU/Design/Data-Structures/TreeMapping.svg
new file mode 100644
index 0000000..729cfa9
--- /dev/null
+++ b/Documentation/RCU/Design/Data-Structures/TreeMapping.svg
@@ -0,0 +1,305 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:43:22 2015 -->
+
+<!-- Magnification: 1.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="3.1in"
+   height="0.9in"
+   viewBox="-12 -12 3699 1074"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="TreeMapping.fig">
+  <metadata
+     id="metadata66">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs64">
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow2Lend"
+       style="overflow:visible;">
+      <path
+         id="path3836"
+         style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow2Mend"
+       style="overflow:visible;">
+      <path
+         id="path3842"
+         style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(0.6) rotate(180) translate(0,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3824"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="991"
+     inkscape:window-height="606"
+     id="namedview62"
+     showgrid="false"
+     inkscape:zoom="3.0752688"
+     inkscape:cx="139.5"
+     inkscape:cy="40.5"
+     inkscape:window-x="891"
+     inkscape:window-y="177"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="0"
+       width="3675"
+       height="1050"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="75"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="600"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="1125"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect12" />
+    <!-- Line: box -->
+    <rect
+       x="1650"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect14" />
+    <!-- Line: box -->
+    <rect
+       x="2175"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect16" />
+    <!-- Line: box -->
+    <rect
+       x="3225"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect18" />
+    <!-- Line -->
+    <polyline
+       points="675,375 675,150 300,150 300,358 "
+       style="stroke:#000000;stroke-width:7.00088889;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline20" />
+    <!-- Arrowhead on XXXpoint 300 150 - 300 390-->
+    <!-- Line -->
+    <polyline
+       points="1200,675 1200,900 300,900 300,691 "
+       style="stroke:#000000;stroke-width:7.00088889;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline24" />
+    <!-- Arrowhead on XXXpoint 300 900 - 300 660-->
+    <!-- Line -->
+    <polyline
+       points="1725,375 1725,150 900,150 900,358 "
+       style="stroke:#000000;stroke-width:7.00088889;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline28" />
+    <!-- Arrowhead on XXXpoint 900 150 - 900 390-->
+    <!-- Line -->
+    <polyline
+       points="2250,375 2250,75 825,75 825,358 "
+       style="stroke:#000000;stroke-width:7.00088889;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline32" />
+    <!-- Arrowhead on XXXpoint 825 75 - 825 390-->
+    <!-- Line -->
+    <polyline
+       points="2775,675 2775,900 1425,900 1425,691 "
+       style="stroke:#000000;stroke-width:7.00088889;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline36" />
+    <!-- Arrowhead on XXXpoint 1425 900 - 1425 660-->
+    <!-- Line -->
+    <polyline
+       points="3300,675 3300,975 1350,975 1350,691 "
+       style="stroke:#000000;stroke-width:7.00088889;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline40" />
+    <!-- Arrowhead on XXXpoint 1350 975 - 1350 660-->
+    <!-- Line: box -->
+    <rect
+       x="2700"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect44" />
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="300"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text46">0:7  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1350"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text48">4:7  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1875"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text50">0:1  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text52">2:3  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2925"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text54">4:5  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3450"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text56">6:7  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="825"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text58">0:3  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3600"
+       y="150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="end"
+       id="text60">struct rcu_state</text>
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/TreeMappingLevel.svg b/Documentation/RCU/Design/Data-Structures/TreeMappingLevel.svg
new file mode 100644
index 0000000..5b416a4
--- /dev/null
+++ b/Documentation/RCU/Design/Data-Structures/TreeMappingLevel.svg
@@ -0,0 +1,380 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:45:19 2015 -->
+
+<!-- Magnification: 1.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="3.1in"
+   height="1.8in"
+   viewBox="-12 -12 3699 2124"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="TreeMappingLevel.svg">
+  <metadata
+     id="metadata98">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title />
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs96">
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow2Lend"
+       style="overflow:visible;">
+      <path
+         id="path3868"
+         style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="1598"
+     inkscape:window-height="1211"
+     id="namedview94"
+     showgrid="false"
+     inkscape:zoom="5.2508961"
+     inkscape:cx="139.5"
+     inkscape:cy="81"
+     inkscape:window-x="840"
+     inkscape:window-y="122"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="0"
+       width="3675"
+       height="2100"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="75"
+       y="1350"
+       width="750"
+       height="225"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="75"
+       y="1575"
+       width="750"
+       height="225"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="75"
+       y="1800"
+       width="750"
+       height="225"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect12" />
+    <!-- Arc -->
+    <path
+       style="stroke:#000000;stroke-width:7;stroke-linecap:butt;"
+       d="M 1800,900 A 118 118  0  0  0  1800  1125 "
+       id="path14" />
+    <!-- Arc -->
+    <path
+       style="stroke:#000000;stroke-width:7;stroke-linecap:butt;"
+       d="M 750,900 A 75 75  0  0  0  750  1050 "
+       id="path16" />
+    <!-- Line -->
+    <polyline
+       points="750,900 750,691 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline18" />
+    <!-- Arrowhead on XXXpoint 750 900 - 750 660-->
+    <!-- Line: box -->
+    <rect
+       x="75"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect22" />
+    <!-- Line: box -->
+    <rect
+       x="600"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect24" />
+    <!-- Line: box -->
+    <rect
+       x="1650"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect26" />
+    <!-- Line: box -->
+    <rect
+       x="2175"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect28" />
+    <!-- Line: box -->
+    <rect
+       x="3225"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect30" />
+    <!-- Line -->
+    <polyline
+       points="675,375 675,150 300,150 300,358 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline32" />
+    <!-- Arrowhead on XXXpoint 300 150 - 300 390-->
+    <!-- Line -->
+    <polyline
+       points="1725,375 1725,150 900,150 900,358 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline36" />
+    <!-- Arrowhead on XXXpoint 900 150 - 900 390-->
+    <!-- Line -->
+    <polyline
+       points="2250,375 2250,75 825,75 825,358 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline40" />
+    <!-- Arrowhead on XXXpoint 825 75 - 825 390-->
+    <!-- Line -->
+    <polyline
+       points="2775,675 2775,975 1425,975 1425,691 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline44" />
+    <!-- Arrowhead on XXXpoint 1425 975 - 1425 660-->
+    <!-- Line: box -->
+    <rect
+       x="2700"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect48" />
+    <!-- Line: box -->
+    <rect
+       x="1125"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect50" />
+    <!-- Line -->
+    <polyline
+       points="3300,675 3300,1050 1350,1050 1350,691 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline52" />
+    <!-- Arrowhead on XXXpoint 1350 1050 - 1350 660-->
+    <!-- Line -->
+    <polyline
+       points="825,1425 975,1425 975,1200 225,1200 225,691 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline56" />
+    <!-- Arrowhead on XXXpoint 225 1200 - 225 660-->
+    <!-- Line -->
+    <polyline
+       points="1200,675 1200,975 300,975 300,691 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline60" />
+    <!-- Arrowhead on XXXpoint 300 975 - 300 660-->
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="150"
+       y="1500"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="108"
+       text-anchor="start"
+       id="text64">-&gt;level[0]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="150"
+       y="1725"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="108"
+       text-anchor="start"
+       id="text66">-&gt;level[1]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="150"
+       y="1950"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="108"
+       text-anchor="start"
+       id="text68">-&gt;level[2]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="300"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text70">0:7  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1350"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text72">4:7  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1875"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text74">0:1  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text76">2:3  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2925"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text78">4:5  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3450"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text80">6:7  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="825"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text82">0:3  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3600"
+       y="150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="end"
+       id="text84">struct rcu_state</text>
+    <!-- Line -->
+    <polyline
+       points="825,1875 1800,1875 1800,1125 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:none"
+       id="polyline86" />
+    <!-- Line -->
+    <polyline
+       points="1800,900 1800,691 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline88" />
+    <!-- Arrowhead on XXXpoint 1800 900 - 1800 660-->
+    <!-- Line -->
+    <polyline
+       points="825,1650 1200,1650 1200,1125 750,1125 750,1050 "
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline92" />
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/blkd_task.svg b/Documentation/RCU/Design/Data-Structures/blkd_task.svg
new file mode 100644
index 0000000..00e810b
--- /dev/null
+++ b/Documentation/RCU/Design/Data-Structures/blkd_task.svg
@@ -0,0 +1,843 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:35:03 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="10.1in"
+   height="8.6in"
+   viewBox="-44 -44 12088 10288"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="blkd_task.fig">
+  <metadata
+     id="metadata212">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs210">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3970"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="1087"
+     inkscape:window-height="1144"
+     id="namedview208"
+     showgrid="false"
+     inkscape:zoom="1.0495049"
+     inkscape:cx="454.50003"
+     inkscape:cy="387.00003"
+     inkscape:window-x="833"
+     inkscape:window-y="28"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="450"
+       y="0"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="4950"
+       y="4950"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="600"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect10" />
+    <!-- Line -->
+    <polyline
+       points="5250,8100 5688,5912 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline12" />
+    <!-- Arrowhead on XXXpoint 5250 8100 - 5710 5790-->
+    <polyline
+       points="5714 6068 5704 5822 5598 6044 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline14" />
+    <!-- Line -->
+    <polyline
+       points="4050,9300 4486,7262 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline16" />
+    <!-- Arrowhead on XXXpoint 4050 9300 - 4512 7140-->
+    <polyline
+       points="4514 7418 4506 7172 4396 7394 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline18" />
+    <!-- Line -->
+    <polyline
+       points="1040,9300 1476,7262 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline20" />
+    <!-- Arrowhead on XXXpoint 1040 9300 - 1502 7140-->
+    <polyline
+       points="1504 7418 1496 7172 1386 7394 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline22" />
+    <!-- Line -->
+    <polyline
+       points="2240,8100 2676,6062 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline24" />
+    <!-- Arrowhead on XXXpoint 2240 8100 - 2702 5940-->
+    <polyline
+       points="2704 6218 2696 5972 2586 6194 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline26" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="450"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect28" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="1050"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect30" />
+    <!-- Line -->
+    <polyline
+       points="1350,3450 2350,2590 "
+       style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline32" />
+    <!-- Arrowhead on XXXpoint 1350 3450 - 2444 2510-->
+    <!-- Line -->
+    <polyline
+       points="4950,3450 3948,2590 "
+       style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline36" />
+    <!-- Arrowhead on XXXpoint 4950 3450 - 3854 2510-->
+    <!-- Line -->
+    <polyline
+       points="4050,6600 4050,4414 "
+       style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline40" />
+    <!-- Arrowhead on XXXpoint 4050 6600 - 4050 4290-->
+    <!-- Line -->
+    <polyline
+       points="1050,6600 1050,4414 "
+       style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline44" />
+    <!-- Arrowhead on XXXpoint 1050 6600 - 1050 4290-->
+    <!-- Line -->
+    <polyline
+       points="2250,5400 2250,4414 "
+       style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline48" />
+    <!-- Arrowhead on XXXpoint 2250 5400 - 2250 4290-->
+    <!-- Line -->
+    <polyline
+       points="2250,8100 2250,6364 "
+       style="stroke:#00ff00;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline52" />
+    <!-- Arrowhead on XXXpoint 2250 8100 - 2250 6240-->
+    <!-- Line -->
+    <polyline
+       points="1050,9300 1050,7564 "
+       style="stroke:#00ff00;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline56" />
+    <!-- Arrowhead on XXXpoint 1050 9300 - 1050 7440-->
+    <!-- Line -->
+    <polyline
+       points="4050,9300 4050,7564 "
+       style="stroke:#00ff00;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline60" />
+    <!-- Arrowhead on XXXpoint 4050 9300 - 4050 7440-->
+    <!-- Line -->
+    <polyline
+       points="5250,8100 5250,6364 "
+       style="stroke:#00ff00;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline64" />
+    <!-- Arrowhead on XXXpoint 5250 8100 - 5250 6240-->
+    <!-- Circle -->
+    <circle
+       cx="2850"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle68" />
+    <!-- Circle -->
+    <circle
+       cx="3150"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle70" />
+    <!-- Circle -->
+    <circle
+       cx="3450"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle72" />
+    <!-- Circle -->
+    <circle
+       cx="1350"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle74" />
+    <!-- Circle -->
+    <circle
+       cx="1650"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle76" />
+    <!-- Circle -->
+    <circle
+       cx="1950"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle78" />
+    <!-- Circle -->
+    <circle
+       cx="4350"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle80" />
+    <!-- Circle -->
+    <circle
+       cx="4650"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle82" />
+    <!-- Circle -->
+    <circle
+       cx="4950"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle84" />
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="3450"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect86" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="6600"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect88" />
+    <!-- Line: box -->
+    <rect
+       x="4500"
+       y="5400"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect90" />
+    <!-- Line: box -->
+    <rect
+       x="3300"
+       y="6600"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect92" />
+    <!-- Line: box -->
+    <rect
+       x="2250"
+       y="1650"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect94" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="9300"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect96" />
+    <!-- Line: box -->
+    <rect
+       x="1350"
+       y="8100"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect98" />
+    <!-- Line: box -->
+    <rect
+       x="3000"
+       y="9300"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect100" />
+    <!-- Line: box -->
+    <rect
+       x="4350"
+       y="8100"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect102" />
+    <!-- Line: box -->
+    <rect
+       x="1500"
+       y="5400"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect104" />
+    <!-- Line -->
+    <polygon
+       points="5550,3450 7350,2850 7350,5100 5550,4350 5550,3450 "
+       style="stroke:#000000;stroke-width:14; stroke-linejoin:miter; stroke-linecap:butt; stroke-dasharray:120 120;fill:#ffbfbf; "
+       id="polygon106" />
+    <!-- Line -->
+    <polyline
+       points="9300,3150 10734,3150 "
+       style="stroke:#000000;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline108" />
+    <!-- Arrowhead on XXXpoint 9300 3150 - 10860 3150-->
+    <!-- Line: box -->
+    <rect
+       x="10800"
+       y="2850"
+       width="1200"
+       height="750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect112" />
+    <!-- Line -->
+    <polyline
+       points="11400,3600 11400,4284 "
+       style="stroke:#000000;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline114" />
+    <!-- Arrowhead on XXXpoint 11400 3600 - 11400 4410-->
+    <!-- Line: box -->
+    <rect
+       x="10800"
+       y="4350"
+       width="1200"
+       height="750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect118" />
+    <!-- Line -->
+    <polyline
+       points="11400,5100 11400,5784 "
+       style="stroke:#000000;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline120" />
+    <!-- Arrowhead on XXXpoint 11400 5100 - 11400 5910-->
+    <!-- Line: box -->
+    <rect
+       x="10800"
+       y="5850"
+       width="1200"
+       height="750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect124" />
+    <!-- Line -->
+    <polyline
+       points="9300,3900 9900,3900 9900,4650 10734,4650 "
+       style="stroke:#000000;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline126" />
+    <!-- Arrowhead on XXXpoint 9900 4650 - 10860 4650-->
+    <!-- Line -->
+    <polyline
+       points="9300,4650 9600,4650 9600,6150 10734,6150 "
+       style="stroke:#000000;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline130" />
+    <!-- Arrowhead on XXXpoint 9600 6150 - 10860 6150-->
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6450"
+       y="300"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text134">rcu_bh</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="1950"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text136">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="2250"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text138">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="3750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text140">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text142">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="5700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text144">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="6000"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text146">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="6900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text148">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text150">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="5700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text152">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="6000"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text154">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="6900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text156">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text158">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="450"
+       y="1350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="start"
+       id="text160">struct rcu_state</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="9600"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text162">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="9900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text164">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="9600"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text166">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="9900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text168">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="8400"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text170">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="8700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text172">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5400"
+       y="8400"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text174">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5400"
+       y="8700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text176">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6000"
+       y="750"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text178">rcu_sched</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11400"
+       y="3300"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="216"
+       text-anchor="middle"
+       id="text180">T3</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11400"
+       y="4800"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="216"
+       text-anchor="middle"
+       id="text182">T2</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11400"
+       y="6300"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="216"
+       text-anchor="middle"
+       id="text184">T1</text>
+    <!-- Line -->
+    <polyline
+       points="5250,5400 5250,4414 "
+       style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline186" />
+    <!-- Arrowhead on XXXpoint 5250 5400 - 5250 4290-->
+    <!-- Line: box -->
+    <rect
+       x="3750"
+       y="3450"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect190" />
+    <!-- Line: box -->
+    <rect
+       x="7350"
+       y="2850"
+       width="1950"
+       height="750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect192" />
+    <!-- Line: box -->
+    <rect
+       x="7350"
+       y="3600"
+       width="1950"
+       height="750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect194" />
+    <!-- Line: box -->
+    <rect
+       x="7350"
+       y="4350"
+       width="1950"
+       height="750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect196" />
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text198">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="3750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text200">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="7500"
+       y="3300"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="start"
+       id="text202">blkd_tasks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="7500"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="start"
+       id="text204">gp_tasks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="7500"
+       y="4800"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="start"
+       id="text206">exp_tasks</text>
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/nxtlist.svg b/Documentation/RCU/Design/Data-Structures/nxtlist.svg
new file mode 100644
index 0000000..abc4cc7
--- /dev/null
+++ b/Documentation/RCU/Design/Data-Structures/nxtlist.svg
@@ -0,0 +1,396 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:39:46 2015 -->
+
+<!-- Magnification: 3.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="10.4in"
+   height="10.4in"
+   viewBox="-66 -66 12507 12507"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="nxtlist.fig">
+  <metadata
+     id="metadata94">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs92">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3852"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="925"
+     inkscape:window-height="928"
+     id="namedview90"
+     showgrid="false"
+     inkscape:zoom="0.80021373"
+     inkscape:cx="467.99997"
+     inkscape:cy="467.99997"
+     inkscape:window-x="948"
+     inkscape:window-y="73"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="0"
+       width="7875"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="1125"
+       width="7875"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="2250"
+       width="7875"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="3375"
+       width="7875"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect12" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="4500"
+       width="7875"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect14" />
+    <!-- Line: box -->
+    <rect
+       x="10575"
+       y="0"
+       width="1800"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect16" />
+    <!-- Line: box -->
+    <rect
+       x="10575"
+       y="1125"
+       width="1800"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect18" />
+    <!-- Line -->
+    <polyline
+       points="11475,2250 11475,3276 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline20" />
+    <!-- Arrowhead on XXXpoint 11475 2250 - 11475 3465-->
+    <!-- Line: box -->
+    <rect
+       x="10575"
+       y="6750"
+       width="1800"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect24" />
+    <!-- Line: box -->
+    <rect
+       x="10575"
+       y="7875"
+       width="1800"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect26" />
+    <!-- Line: box -->
+    <rect
+       x="10575"
+       y="10125"
+       width="1800"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect28" />
+    <!-- Line: box -->
+    <rect
+       x="10575"
+       y="11250"
+       width="1800"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect30" />
+    <!-- Line: box -->
+    <rect
+       x="10575"
+       y="3375"
+       width="1800"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect32" />
+    <!-- Line -->
+    <polyline
+       points="11475,5625 11475,6651 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline34" />
+    <!-- Arrowhead on XXXpoint 11475 5625 - 11475 6840-->
+    <!-- Line -->
+    <polyline
+       points="7875,225 10476,225 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline38" />
+    <!-- Arrowhead on XXXpoint 7875 225 - 10665 225-->
+    <!-- Line -->
+    <polyline
+       points="7875,1350 9675,1350 9675,675 7971,675 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline42" />
+    <!-- Arrowhead on XXXpoint 9675 675 - 7785 675-->
+    <!-- Line -->
+    <polyline
+       points="7875,2475 9675,2475 9675,4725 10476,4725 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline46" />
+    <!-- Arrowhead on XXXpoint 9675 4725 - 10665 4725-->
+    <!-- Line -->
+    <polyline
+       points="7875,3600 9225,3600 9225,5175 10476,5175 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline50" />
+    <!-- Arrowhead on XXXpoint 9225 5175 - 10665 5175-->
+    <!-- Line -->
+    <polyline
+       points="7875,4725 8775,4725 8775,11475 10476,11475 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline54" />
+    <!-- Arrowhead on XXXpoint 8775 11475 - 10665 11475-->
+    <!-- Line: box -->
+    <rect
+       x="10575"
+       y="4500"
+       width="1800"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect58" />
+    <!-- Line -->
+    <polyline
+       points="11475,9000 11475,10026 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline60" />
+    <!-- Arrowhead on XXXpoint 11475 9000 - 11475 10215-->
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="225"
+       y="675"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="324"
+       text-anchor="start"
+       id="text64">nxtlist</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="225"
+       y="1800"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="324"
+       text-anchor="start"
+       id="text66">nxttail[RCU_DONE_TAIL]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="225"
+       y="2925"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="324"
+       text-anchor="start"
+       id="text68">nxttail[RCU_WAIT_TAIL]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="225"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="324"
+       text-anchor="start"
+       id="text70">nxttail[RCU_NEXT_READY_TAIL]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="225"
+       y="5175"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="324"
+       text-anchor="start"
+       id="text72">nxttail[RCU_NEXT_TAIL]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11475"
+       y="675"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text74">CB 1</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11475"
+       y="1800"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text76">next</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11475"
+       y="7425"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text78">CB 3</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11475"
+       y="8550"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text80">next</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11475"
+       y="10800"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text82">CB 4</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11475"
+       y="11925"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text84">next</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11475"
+       y="4050"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text86">CB 2</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11475"
+       y="5175"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text88">next</text>
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Requirements/2013-08-is-it-dead.png b/Documentation/RCU/Design/Requirements/2013-08-is-it-dead.png
deleted file mode 100644
index 7496a55..0000000
--- a/Documentation/RCU/Design/Requirements/2013-08-is-it-dead.png
+++ /dev/null
Binary files differ
diff --git a/Documentation/RCU/Design/Requirements/RCUApplicability.svg b/Documentation/RCU/Design/Requirements/RCUApplicability.svg
deleted file mode 100644
index ebcbeee..0000000
--- a/Documentation/RCU/Design/Requirements/RCUApplicability.svg
+++ /dev/null
@@ -1,237 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Creator: fig2dev Version 3.2 Patchlevel 5d -->
-
-<!-- CreationDate: Tue Mar  4 18:34:25 2014 -->
-
-<!-- Magnification: 3.000 -->
-
-<svg
-   xmlns:dc="http://purl.org/dc/elements/1.1/"
-   xmlns:cc="http://creativecommons.org/ns#"
-   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
-   xmlns:svg="http://www.w3.org/2000/svg"
-   xmlns="http://www.w3.org/2000/svg"
-   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
-   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
-   width="1089.1382"
-   height="668.21368"
-   viewBox="-2121 -36 14554.634 8876.4061"
-   id="svg2"
-   version="1.1"
-   inkscape:version="0.48.3.1 r9886"
-   sodipodi:docname="RCUApplicability.svg">
-  <metadata
-     id="metadata40">
-    <rdf:RDF>
-      <cc:Work
-         rdf:about="">
-        <dc:format>image/svg+xml</dc:format>
-        <dc:type
-           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
-        <dc:title />
-      </cc:Work>
-    </rdf:RDF>
-  </metadata>
-  <defs
-     id="defs38" />
-  <sodipodi:namedview
-     pagecolor="#ffffff"
-     bordercolor="#666666"
-     borderopacity="1"
-     objecttolerance="10"
-     gridtolerance="10"
-     guidetolerance="10"
-     inkscape:pageopacity="0"
-     inkscape:pageshadow="2"
-     inkscape:window-width="849"
-     inkscape:window-height="639"
-     id="namedview36"
-     showgrid="false"
-     inkscape:zoom="0.51326165"
-     inkscape:cx="544.56912"
-     inkscape:cy="334.10686"
-     inkscape:window-x="149"
-     inkscape:window-y="448"
-     inkscape:window-maximized="0"
-     inkscape:current-layer="g4"
-     fit-margin-top="5"
-     fit-margin-left="5"
-     fit-margin-right="5"
-     fit-margin-bottom="5" />
-  <g
-     style="fill:none;stroke-width:0.025in"
-     id="g4"
-     transform="translate(-2043.6828,14.791398)">
-    <!-- Line: box -->
-    <rect
-       x="0"
-       y="0"
-       width="14400"
-       height="8775"
-       rx="0"
-       style="fill:#ffa1a1;stroke:#000000;stroke-width:21;stroke-linecap:butt;stroke-linejoin:miter"
-       id="rect6" />
-    <!-- Line: box -->
-    <rect
-       x="1350"
-       y="0"
-       width="11700"
-       height="6075"
-       rx="0"
-       style="fill:#ffff00;stroke:#000000;stroke-width:21;stroke-linecap:butt;stroke-linejoin:miter"
-       id="rect8" />
-    <!-- Line: box -->
-    <rect
-       x="2700"
-       y="0"
-       width="9000"
-       height="4275"
-       rx="0"
-       style="fill:#00ff00;stroke:#000000;stroke-width:21;stroke-linecap:butt;stroke-linejoin:miter"
-       id="rect10" />
-    <!-- Line: box -->
-    <rect
-       x="4050"
-       y="0"
-       width="6300"
-       height="2475"
-       rx="0"
-       style="fill:#87cfff;stroke:#000000;stroke-width:21;stroke-linecap:butt;stroke-linejoin:miter"
-       id="rect12" />
-    <!-- Text -->
-    <text
-       xml:space="preserve"
-       x="7200"
-       y="900"
-       font-style="normal"
-       font-weight="normal"
-       font-size="324"
-       id="text14"
-       sodipodi:linespacing="125%"
-       style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"><tspan
-         style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"
-         id="tspan3017">Read-Mostly, Stale &amp;</tspan></text>
-    <!-- Text -->
-    <text
-       xml:space="preserve"
-       x="7200"
-       y="1350"
-       font-style="normal"
-       font-weight="normal"
-       font-size="324"
-       id="text16"
-       sodipodi:linespacing="125%"
-       style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"><tspan
-         style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"
-         id="tspan3019">Inconsistent Data OK</tspan></text>
-    <!-- Text -->
-    <text
-       xml:space="preserve"
-       x="7200"
-       y="1800"
-       font-style="normal"
-       font-weight="normal"
-       font-size="324"
-       id="text18"
-       sodipodi:linespacing="125%"
-       style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"><tspan
-         style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"
-         id="tspan3021">(RCU Works Great!!!)</tspan></text>
-    <!-- Text -->
-    <text
-       xml:space="preserve"
-       x="7200"
-       y="3825"
-       font-style="normal"
-       font-weight="normal"
-       font-size="324"
-       id="text20"
-       sodipodi:linespacing="125%"
-       style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"><tspan
-         style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"
-         id="tspan3023">(RCU Works Well)</tspan></text>
-    <!-- Text -->
-    <text
-       xml:space="preserve"
-       x="7200"
-       y="3375"
-       font-style="normal"
-       font-weight="normal"
-       font-size="324"
-       id="text22"
-       sodipodi:linespacing="125%"
-       style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"><tspan
-         style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"
-         id="tspan3025">Read-Mostly, Need Consistent Data</tspan></text>
-    <!-- Text -->
-    <text
-       xml:space="preserve"
-       x="7200"
-       y="5175"
-       font-style="normal"
-       font-weight="normal"
-       font-size="324"
-       id="text24"
-       sodipodi:linespacing="125%"
-       style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"><tspan
-         style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"
-         id="tspan3027">Read-Write, Need Consistent Data</tspan></text>
-    <!-- Text -->
-    <text
-       xml:space="preserve"
-       x="7200"
-       y="6975"
-       font-style="normal"
-       font-weight="normal"
-       font-size="324"
-       id="text26"
-       style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"
-       sodipodi:linespacing="125%">Update-Mostly, Need Consistent Data</text>
-    <!-- Text -->
-    <text
-       xml:space="preserve"
-       x="7200"
-       y="5625"
-       font-style="normal"
-       font-weight="normal"
-       font-size="324"
-       id="text28"
-       sodipodi:linespacing="125%"
-       style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"><tspan
-         style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"
-         id="tspan3029">(RCU Might Be OK...)</tspan></text>
-    <!-- Text -->
-    <text
-       xml:space="preserve"
-       x="7200"
-       y="7875"
-       font-style="normal"
-       font-weight="normal"
-       font-size="324"
-       id="text30"
-       style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"
-       sodipodi:linespacing="125%">(1) Provide Existence Guarantees For Update-Friendly Mechanisms</text>
-    <!-- Text -->
-    <text
-       xml:space="preserve"
-       x="7200"
-       y="8325"
-       font-style="normal"
-       font-weight="normal"
-       font-size="324"
-       id="text32"
-       style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"
-       sodipodi:linespacing="125%">(2) Provide Wait-Free Read-Side Primitives for Real-Time Use)</text>
-    <!-- Text -->
-    <text
-       xml:space="preserve"
-       x="7200"
-       y="7425"
-       font-style="normal"
-       font-weight="normal"
-       font-size="324"
-       id="text34"
-       style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"
-       sodipodi:linespacing="125%">(RCU is Very Unlikely to be the Right Tool For The Job, But it Can:</text>
-  </g>
-</svg>
diff --git a/Documentation/RCU/Design/Requirements/Requirements.html b/Documentation/RCU/Design/Requirements/Requirements.html
index a725f99..e7e24b3 100644
--- a/Documentation/RCU/Design/Requirements/Requirements.html
+++ b/Documentation/RCU/Design/Requirements/Requirements.html
@@ -1,5 +1,3 @@
-<!-- DO NOT HAND EDIT. -->
-<!-- Instead, edit Documentation/RCU/Design/Requirements/Requirements.htmlx and run 'sh htmlqqz.sh Documentation/RCU/Design/Requirements/Requirements' -->
 <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
         "http://www.w3.org/TR/html4/loose.dtd">
         <html>
@@ -65,8 +63,8 @@
 
 <p>
 This is followed by a <a href="#Summary">summary</a>,
-which is in turn followed by the inevitable
-<a href="#Answers to Quick Quizzes">answers to the quick quizzes</a>.
+however, the answers to each quick quiz immediately follows the quiz.
+Select the big white space with your mouse to see the answer.
 
 <h2><a name="Fundamental Requirements">Fundamental Requirements</a></h2>
 
@@ -153,13 +151,27 @@
 </blockquote>
 cannot happen.
 
-<p><a name="Quick Quiz 1"><b>Quick Quiz 1</b>:</a>
-Wait a minute!
-You said that updaters can make useful forward progress concurrently
-with readers, but pre-existing readers will block
-<tt>synchronize_rcu()</tt>!!!
-Just who are you trying to fool???
-<br><a href="#qq1answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+	Wait a minute!
+	You said that updaters can make useful forward progress concurrently
+	with readers, but pre-existing readers will block
+	<tt>synchronize_rcu()</tt>!!!
+	Just who are you trying to fool???
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+	First, if updaters do not wish to be blocked by readers, they can use
+	<tt>call_rcu()</tt> or <tt>kfree_rcu()</tt>, which will
+	be discussed later.
+	Second, even when using <tt>synchronize_rcu()</tt>, the other
+	update-side code does run concurrently with readers, whether
+	pre-existing or not.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <p>
 This scenario resembles one of the first uses of RCU in
@@ -210,9 +222,20 @@
 with <tt>recovery()</tt>, but with little or no synchronization
 overhead in <tt>do_something_dlm()</tt>.
 
-<p><a name="Quick Quiz 2"><b>Quick Quiz 2</b>:</a>
-Why is the <tt>synchronize_rcu()</tt> on line&nbsp;28 needed?
-<br><a href="#qq2answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+	Why is the <tt>synchronize_rcu()</tt> on line&nbsp;28 needed?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+	Without that extra grace period, memory reordering could result in
+	<tt>do_something_dlm()</tt> executing <tt>do_something()</tt>
+	concurrently with the last bits of <tt>recovery()</tt>.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <p>
 In order to avoid fatal problems such as deadlocks,
@@ -332,12 +355,27 @@
 optimizations, for example, the use of <tt>gp</tt> as a scratch
 location immediately preceding the assignment.
 
-<p><a name="Quick Quiz 3"><b>Quick Quiz 3</b>:</a>
-But <tt>rcu_assign_pointer()</tt> does nothing to prevent the
-two assignments to <tt>p-&gt;a</tt> and <tt>p-&gt;b</tt>
-from being reordered.
-Can't that also cause problems?
-<br><a href="#qq3answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+	But <tt>rcu_assign_pointer()</tt> does nothing to prevent the
+	two assignments to <tt>p-&gt;a</tt> and <tt>p-&gt;b</tt>
+	from being reordered.
+	Can't that also cause problems?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+	No, it cannot.
+	The readers cannot see either of these two fields until
+	the assignment to <tt>gp</tt>, by which time both fields are
+	fully initialized.
+	So reordering the assignments
+	to <tt>p-&gt;a</tt> and <tt>p-&gt;b</tt> cannot possibly
+	cause any problems.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <p>
 It is tempting to assume that the reader need not do anything special
@@ -494,11 +532,42 @@
 	code protected by the corresponding update-side lock.
 </ol>
 
-<p><a name="Quick Quiz 4"><b>Quick Quiz 4</b>:</a>
-Without the <tt>rcu_dereference()</tt> or the
-<tt>rcu_access_pointer()</tt>, what destructive optimizations
-might the compiler make use of?
-<br><a href="#qq4answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+	Without the <tt>rcu_dereference()</tt> or the
+	<tt>rcu_access_pointer()</tt>, what destructive optimizations
+	might the compiler make use of?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+	Let's start with what happens to <tt>do_something_gp()</tt>
+	if it fails to use <tt>rcu_dereference()</tt>.
+	It could reuse a value formerly fetched from this same pointer.
+	It could also fetch the pointer from <tt>gp</tt> in a byte-at-a-time
+	manner, resulting in <i>load tearing</i>, in turn resulting a bytewise
+	mash-up of two distince pointer values.
+	It might even use value-speculation optimizations, where it makes
+	a wrong guess, but by the time it gets around to checking the
+	value, an update has changed the pointer to match the wrong guess.
+	Too bad about any dereferences that returned pre-initialization garbage
+	in the meantime!
+	</font>
+
+	<p><font color="ffffff">
+	For <tt>remove_gp_synchronous()</tt>, as long as all modifications
+	to <tt>gp</tt> are carried out while holding <tt>gp_lock</tt>,
+	the above optimizations are harmless.
+	However,
+	with <tt>CONFIG_SPARSE_RCU_POINTER=y</tt>,
+	<tt>sparse</tt> will complain if you
+	define <tt>gp</tt> with <tt>__rcu</tt> and then
+	access it without using
+	either <tt>rcu_access_pointer()</tt> or <tt>rcu_dereference()</tt>.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <p>
 In short, RCU's publish-subscribe guarantee is provided by the combination
@@ -571,17 +640,156 @@
 	<tt>synchronize_rcu()</tt> migrates in the meantime.
 </ol>
 
-<p><a name="Quick Quiz 5"><b>Quick Quiz 5</b>:</a>
-Given that multiple CPUs can start RCU read-side critical sections
-at any time without any ordering whatsoever, how can RCU possibly tell whether
-or not a given RCU read-side critical section starts before a
-given instance of <tt>synchronize_rcu()</tt>?
-<br><a href="#qq5answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+	Given that multiple CPUs can start RCU read-side critical sections
+	at any time without any ordering whatsoever, how can RCU possibly
+	tell whether or not a given RCU read-side critical section starts
+	before a given instance of <tt>synchronize_rcu()</tt>?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+	If RCU cannot tell whether or not a given
+	RCU read-side critical section starts before a
+	given instance of <tt>synchronize_rcu()</tt>,
+	then it must assume that the RCU read-side critical section
+	started first.
+	In other words, a given instance of <tt>synchronize_rcu()</tt>
+	can avoid waiting on a given RCU read-side critical section only
+	if it can prove that <tt>synchronize_rcu()</tt> started first.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
-<p><a name="Quick Quiz 6"><b>Quick Quiz 6</b>:</a>
-The first and second guarantees require unbelievably strict ordering!
-Are all these memory barriers <i> really</i> required?
-<br><a href="#qq6answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+	The first and second guarantees require unbelievably strict ordering!
+	Are all these memory barriers <i> really</i> required?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+	Yes, they really are required.
+	To see why the first guarantee is required, consider the following
+	sequence of events:
+	</font>
+
+	<ol>
+	<li>	<font color="ffffff">
+		CPU 1: <tt>rcu_read_lock()</tt>
+		</font>
+	<li>	<font color="ffffff">
+		CPU 1: <tt>q = rcu_dereference(gp);
+		/* Very likely to return p. */</tt>
+		</font>
+	<li>	<font color="ffffff">
+		CPU 0: <tt>list_del_rcu(p);</tt>
+		</font>
+	<li>	<font color="ffffff">
+		CPU 0: <tt>synchronize_rcu()</tt> starts.
+		</font>
+	<li>	<font color="ffffff">
+		CPU 1: <tt>do_something_with(q-&gt;a);
+		/* No smp_mb(), so might happen after kfree(). */</tt>
+		</font>
+	<li>	<font color="ffffff">
+		CPU 1: <tt>rcu_read_unlock()</tt>
+		</font>
+	<li>	<font color="ffffff">
+		CPU 0: <tt>synchronize_rcu()</tt> returns.
+		</font>
+	<li>	<font color="ffffff">
+		CPU 0: <tt>kfree(p);</tt>
+		</font>
+	</ol>
+
+	<p><font color="ffffff">
+	Therefore, there absolutely must be a full memory barrier between the
+	end of the RCU read-side critical section and the end of the
+	grace period.
+	</font>
+
+	<p><font color="ffffff">
+	The sequence of events demonstrating the necessity of the second rule
+	is roughly similar:
+	</font>
+
+	<ol>
+	<li>	<font color="ffffff">CPU 0: <tt>list_del_rcu(p);</tt>
+		</font>
+	<li>	<font color="ffffff">CPU 0: <tt>synchronize_rcu()</tt> starts.
+		</font>
+	<li>	<font color="ffffff">CPU 1: <tt>rcu_read_lock()</tt>
+		</font>
+	<li>	<font color="ffffff">CPU 1: <tt>q = rcu_dereference(gp);
+		/* Might return p if no memory barrier. */</tt>
+		</font>
+	<li>	<font color="ffffff">CPU 0: <tt>synchronize_rcu()</tt> returns.
+		</font>
+	<li>	<font color="ffffff">CPU 0: <tt>kfree(p);</tt>
+		</font>
+	<li>	<font color="ffffff">
+		CPU 1: <tt>do_something_with(q-&gt;a); /* Boom!!! */</tt>
+		</font>
+	<li>	<font color="ffffff">CPU 1: <tt>rcu_read_unlock()</tt>
+		</font>
+	</ol>
+
+	<p><font color="ffffff">
+	And similarly, without a memory barrier between the beginning of the
+	grace period and the beginning of the RCU read-side critical section,
+	CPU&nbsp;1 might end up accessing the freelist.
+	</font>
+
+	<p><font color="ffffff">
+	The &ldquo;as if&rdquo; rule of course applies, so that any
+	implementation that acts as if the appropriate memory barriers
+	were in place is a correct implementation.
+	That said, it is much easier to fool yourself into believing
+	that you have adhered to the as-if rule than it is to actually
+	adhere to it!
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+	You claim that <tt>rcu_read_lock()</tt> and <tt>rcu_read_unlock()</tt>
+	generate absolutely no code in some kernel builds.
+	This means that the compiler might arbitrarily rearrange consecutive
+	RCU read-side critical sections.
+	Given such rearrangement, if a given RCU read-side critical section
+	is done, how can you be sure that all prior RCU read-side critical
+	sections are done?
+	Won't the compiler rearrangements make that impossible to determine?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+	In cases where <tt>rcu_read_lock()</tt> and <tt>rcu_read_unlock()</tt>
+	generate absolutely no code, RCU infers quiescent states only at
+	special locations, for example, within the scheduler.
+	Because calls to <tt>schedule()</tt> had better prevent calling-code
+	accesses to shared variables from being rearranged across the call to
+	<tt>schedule()</tt>, if RCU detects the end of a given RCU read-side
+	critical section, it will necessarily detect the end of all prior
+	RCU read-side critical sections, no matter how aggressively the
+	compiler scrambles the code.
+	</font>
+
+	<p><font color="ffffff">
+	Again, this all assumes that the compiler cannot scramble code across
+	calls to the scheduler, out of interrupt handlers, into the idle loop,
+	into user-mode code, and so on.
+	But if your kernel build allows that sort of scrambling, you have broken
+	far more than just RCU!
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <p>
 Note that these memory-barrier requirements do not replace the fundamental
@@ -626,9 +834,19 @@
 <tt>call_rcu()</tt> and <tt>kfree_rcu()</tt> API members
 described later in this document.
 
-<p><a name="Quick Quiz 7"><b>Quick Quiz 7</b>:</a>
-But how does the upgrade-to-write operation exclude other readers?
-<br><a href="#qq7answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+	But how does the upgrade-to-write operation exclude other readers?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+	It doesn't, just like normal RCU updates, which also do not exclude
+	RCU readers.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <p>
 This guarantee allows lookup code to be shared between read-side
@@ -714,9 +932,20 @@
 This is by design:  Any significant ordering constraints would slow down
 these fast-path APIs.
 
-<p><a name="Quick Quiz 8"><b>Quick Quiz 8</b>:</a>
-Can't the compiler also reorder this code?
-<br><a href="#qq8answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+	Can't the compiler also reorder this code?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+	No, the volatile casts in <tt>READ_ONCE()</tt> and
+	<tt>WRITE_ONCE()</tt> prevent the compiler from reordering in
+	this particular case.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <h3><a name="Readers Do Not Exclude Updaters">Readers Do Not Exclude Updaters</a></h3>
 
@@ -769,10 +998,28 @@
 starts, and <tt>synchronize_rcu()</tt> is under no
 obligation to wait for these new readers.
 
-<p><a name="Quick Quiz 9"><b>Quick Quiz 9</b>:</a>
-Suppose that synchronize_rcu() did wait until all readers had completed.
-Would the updater be able to rely on this?
-<br><a href="#qq9answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+	Suppose that synchronize_rcu() did wait until <i>all</i>
+	readers had completed instead of waiting only on
+	pre-existing readers.
+	For how long would the updater be able to rely on there
+	being no readers?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+	For no time at all.
+	Even if <tt>synchronize_rcu()</tt> were to wait until
+	all readers had completed, a new reader might start immediately after
+	<tt>synchronize_rcu()</tt> completed.
+	Therefore, the code following
+	<tt>synchronize_rcu()</tt> can <i>never</i> rely on there being
+	no readers.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <h3><a name="Grace Periods Don't Partition Read-Side Critical Sections">
 Grace Periods Don't Partition Read-Side Critical Sections</a></h3>
@@ -969,11 +1216,24 @@
 As a result, an RCU read-side critical section cannot partition a pair
 of RCU grace periods.
 
-<p><a name="Quick Quiz 10"><b>Quick Quiz 10</b>:</a>
-How long a sequence of grace periods, each separated by an RCU read-side
-critical section, would be required to partition the RCU read-side
-critical sections at the beginning and end of the chain?
-<br><a href="#qq10answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+	How long a sequence of grace periods, each separated by an RCU
+	read-side critical section, would be required to partition the RCU
+	read-side critical sections at the beginning and end of the chain?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+	In theory, an infinite number.
+	In practice, an unknown number that is sensitive to both implementation
+	details and timing considerations.
+	Therefore, even in practice, RCU users must abide by the
+	theoretical rather than the practical answer.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <h3><a name="Disabling Preemption Does Not Block Grace Periods">
 Disabling Preemption Does Not Block Grace Periods</a></h3>
@@ -1109,12 +1369,27 @@
 <h3><a name="Specialization">Specialization</a></h3>
 
 <p>
-RCU is and always has been intended primarily for read-mostly situations, as
-illustrated by the following figure.
-This means that RCU's read-side primitives are optimized, often at the
+RCU is and always has been intended primarily for read-mostly situations,
+which means that RCU's read-side primitives are optimized, often at the
 expense of its update-side primitives.
+Experience thus far is captured by the following list of situations:
 
-<p><img src="RCUApplicability.svg" alt="RCUApplicability.svg" width="70%"></p>
+<ol>
+<li>	Read-mostly data, where stale and inconsistent data is not
+	a problem:   RCU works great!
+<li>	Read-mostly data, where data must be consistent:
+	RCU works well.
+<li>	Read-write data, where data must be consistent:
+	RCU <i>might</i> work OK.
+	Or not.
+<li>	Write-mostly data, where data must be consistent:
+	RCU is very unlikely to be the right tool for the job,
+	with the following exceptions, where RCU can provide:
+	<ol type=a>
+	<li>	Existence guarantees for update-friendly mechanisms.
+	<li>	Wait-free read-side primitives for real-time use.
+	</ol>
+</ol>
 
 <p>
 This focus on read-mostly situations means that RCU must interoperate
@@ -1127,9 +1402,43 @@
 including spinlocks, sequence locks, atomic operations, reference
 counters, and memory barriers.
 
-<p><a name="Quick Quiz 11"><b>Quick Quiz 11</b>:</a>
-What about sleeping locks?
-<br><a href="#qq11answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+	What about sleeping locks?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+	These are forbidden within Linux-kernel RCU read-side critical
+	sections because it is not legal to place a quiescent state
+	(in this case, voluntary context switch) within an RCU read-side
+	critical section.
+	However, sleeping locks may be used within userspace RCU read-side
+	critical sections, and also within Linux-kernel sleepable RCU
+	<a href="#Sleepable RCU"><font color="ffffff">(SRCU)</font></a>
+	read-side critical sections.
+	In addition, the -rt patchset turns spinlocks into a
+	sleeping locks so that the corresponding critical sections
+	can be preempted, which also means that these sleeplockified
+	spinlocks (but not other sleeping locks!)  may be acquire within
+	-rt-Linux-kernel RCU read-side critical sections.
+	</font>
+
+	<p><font color="ffffff">
+	Note that it <i>is</i> legal for a normal RCU read-side
+	critical section to conditionally acquire a sleeping locks
+	(as in <tt>mutex_trylock()</tt>), but only as long as it does
+	not loop indefinitely attempting to conditionally acquire that
+	sleeping locks.
+	The key point is that things like <tt>mutex_trylock()</tt>
+	either return with the mutex held, or return an error indication if
+	the mutex was not immediately available.
+	Either way, <tt>mutex_trylock()</tt> returns immediately without
+	sleeping.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <p>
 It often comes as a surprise that many algorithms do not require a
@@ -1160,10 +1469,7 @@
 One of our pair of veternarians might wait 30 seconds before pronouncing
 the cat dead, while the other might insist on waiting a full minute.
 The two veternarians would then disagree on the state of the cat during
-the final 30 seconds of the minute following the last heartbeat, as
-fancifully illustrated below:
-
-<p><img src="2013-08-is-it-dead.png" alt="2013-08-is-it-dead.png" width="431"></p>
+the final 30 seconds of the minute following the last heartbeat.
 
 <p>
 Interestingly enough, this same situation applies to hardware.
@@ -1343,7 +1649,8 @@
 <tt>synchronize_rcu_expedited()</tt> would be legal,
 including within preempt-disable code, <tt>local_bh_disable()</tt> code,
 interrupt-disable code, and interrupt handlers.
-However, even <tt>call_rcu()</tt> is illegal within NMI handlers.
+However, even <tt>call_rcu()</tt> is illegal within NMI handlers
+and from idle and offline CPUs.
 The callback function (<tt>remove_gp_cb()</tt> in this case) will be
 executed within softirq (software interrupt) environment within the
 Linux kernel,
@@ -1354,12 +1661,27 @@
 Long-running operations should be relegated to separate threads or
 (in the Linux kernel) workqueues.
 
-<p><a name="Quick Quiz 12"><b>Quick Quiz 12</b>:</a>
-Why does line&nbsp;19 use <tt>rcu_access_pointer()</tt>?
-After all, <tt>call_rcu()</tt> on line&nbsp;25 stores into the
-structure, which would interact badly with concurrent insertions.
-Doesn't this mean that <tt>rcu_dereference()</tt> is required?
-<br><a href="#qq12answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+	Why does line&nbsp;19 use <tt>rcu_access_pointer()</tt>?
+	After all, <tt>call_rcu()</tt> on line&nbsp;25 stores into the
+	structure, which would interact badly with concurrent insertions.
+	Doesn't this mean that <tt>rcu_dereference()</tt> is required?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+	Presumably the <tt>-&gt;gp_lock</tt> acquired on line&nbsp;18 excludes
+	any changes, including any insertions that <tt>rcu_dereference()</tt>
+	would protect against.
+	Therefore, any insertions will be delayed until after
+	<tt>-&gt;gp_lock</tt>
+	is released on line&nbsp;25, which in turn means that
+	<tt>rcu_access_pointer()</tt> suffices.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <p>
 However, all that <tt>remove_gp_cb()</tt> is doing is
@@ -1406,14 +1728,31 @@
 so the very few places that needed something like
 <tt>synchronize_rcu()</tt> simply open-coded it.
 
-<p><a name="Quick Quiz 13"><b>Quick Quiz 13</b>:</a>
-Earlier it was claimed that <tt>call_rcu()</tt> and
-<tt>kfree_rcu()</tt> allowed updaters to avoid being blocked
-by readers.
-But how can that be correct, given that the invocation of the callback
-and the freeing of the memory (respectively) must still wait for
-a grace period to elapse?
-<br><a href="#qq13answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+	Earlier it was claimed that <tt>call_rcu()</tt> and
+	<tt>kfree_rcu()</tt> allowed updaters to avoid being blocked
+	by readers.
+	But how can that be correct, given that the invocation of the callback
+	and the freeing of the memory (respectively) must still wait for
+	a grace period to elapse?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+	We could define things this way, but keep in mind that this sort of
+	definition would say that updates in garbage-collected languages
+	cannot complete until the next time the garbage collector runs,
+	which does not seem at all reasonable.
+	The key point is that in most cases, an updater using either
+	<tt>call_rcu()</tt> or <tt>kfree_rcu()</tt> can proceed to the
+	next update as soon as it has invoked <tt>call_rcu()</tt> or
+	<tt>kfree_rcu()</tt>, without having to wait for a subsequent
+	grace period.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <p>
 But what if the updater must wait for the completion of code to be
@@ -1838,11 +2177,26 @@
 Therefore, invoking <tt>synchronize_rcu()</tt> during scheduler
 initialization can result in deadlock.
 
-<p><a name="Quick Quiz 14"><b>Quick Quiz 14</b>:</a>
-So what happens with <tt>synchronize_rcu()</tt> during
-scheduler initialization for <tt>CONFIG_PREEMPT=n</tt>
-kernels?
-<br><a href="#qq14answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+	So what happens with <tt>synchronize_rcu()</tt> during
+	scheduler initialization for <tt>CONFIG_PREEMPT=n</tt>
+	kernels?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+	In <tt>CONFIG_PREEMPT=n</tt> kernel, <tt>synchronize_rcu()</tt>
+	maps directly to <tt>synchronize_sched()</tt>.
+	Therefore, <tt>synchronize_rcu()</tt> works normally throughout
+	boot in <tt>CONFIG_PREEMPT=n</tt> kernels.
+	However, your code must also work in <tt>CONFIG_PREEMPT=y</tt> kernels,
+	so it is still necessary to avoid invoking <tt>synchronize_rcu()</tt>
+	during scheduler initialization.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <p>
 I learned of these boot-time requirements as a result of a series of
@@ -2171,6 +2525,14 @@
 also simplified handling of a number of race conditions.
 
 <p>
+RCU must avoid degrading real-time response for CPU-bound threads, whether
+executing in usermode (which is one use case for
+<tt>CONFIG_NO_HZ_FULL=y</tt>) or in the kernel.
+That said, CPU-bound loops in the kernel must execute
+<tt>cond_resched_rcu_qs()</tt> at least once per few tens of milliseconds
+in order to avoid receiving an IPI from RCU.
+
+<p>
 Finally, RCU's status as a synchronization primitive means that
 any RCU failure can result in arbitrary memory corruption that can be
 extremely difficult to debug.
@@ -2223,6 +2585,8 @@
 <li>	<a href="#Sched Flavor">Sched Flavor</a>
 <li>	<a href="#Sleepable RCU">Sleepable RCU</a>
 <li>	<a href="#Tasks RCU">Tasks RCU</a>
+<li>	<a href="#Waiting for Multiple Grace Periods">
+	Waiting for Multiple Grace Periods</a>
 </ol>
 
 <h3><a name="Bottom-Half Flavor">Bottom-Half Flavor</a></h3>
@@ -2472,6 +2836,94 @@
 <tt>synchronize_rcu_tasks()</tt>, and
 <tt>rcu_barrier_tasks()</tt>.
 
+<h3><a name="Waiting for Multiple Grace Periods">
+Waiting for Multiple Grace Periods</a></h3>
+
+<p>
+Perhaps you have an RCU protected data structure that is accessed from
+RCU read-side critical sections, from softirq handlers, and from
+hardware interrupt handlers.
+That is three flavors of RCU, the normal flavor, the bottom-half flavor,
+and the sched flavor.
+How to wait for a compound grace period?
+
+<p>
+The best approach is usually to &ldquo;just say no!&rdquo; and
+insert <tt>rcu_read_lock()</tt> and <tt>rcu_read_unlock()</tt>
+around each RCU read-side critical section, regardless of what
+environment it happens to be in.
+But suppose that some of the RCU read-side critical sections are
+on extremely hot code paths, and that use of <tt>CONFIG_PREEMPT=n</tt>
+is not a viable option, so that <tt>rcu_read_lock()</tt> and
+<tt>rcu_read_unlock()</tt> are not free.
+What then?
+
+<p>
+You <i>could</i> wait on all three grace periods in succession, as follows:
+
+<blockquote>
+<pre>
+ 1 synchronize_rcu();
+ 2 synchronize_rcu_bh();
+ 3 synchronize_sched();
+</pre>
+</blockquote>
+
+<p>
+This works, but triples the update-side latency penalty.
+In cases where this is not acceptable, <tt>synchronize_rcu_mult()</tt>
+may be used to wait on all three flavors of grace period concurrently:
+
+<blockquote>
+<pre>
+ 1 synchronize_rcu_mult(call_rcu, call_rcu_bh, call_rcu_sched);
+</pre>
+</blockquote>
+
+<p>
+But what if it is necessary to also wait on SRCU?
+This can be done as follows:
+
+<blockquote>
+<pre>
+ 1 static void call_my_srcu(struct rcu_head *head,
+ 2        void (*func)(struct rcu_head *head))
+ 3 {
+ 4   call_srcu(&amp;my_srcu, head, func);
+ 5 }
+ 6
+ 7 synchronize_rcu_mult(call_rcu, call_rcu_bh, call_rcu_sched, call_my_srcu);
+</pre>
+</blockquote>
+
+<p>
+If you needed to wait on multiple different flavors of SRCU
+(but why???), you would need to create a wrapper function resembling
+<tt>call_my_srcu()</tt> for each SRCU flavor.
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+	But what if I need to wait for multiple RCU flavors, but I also need
+	the grace periods to be expedited?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+	If you are using expedited grace periods, there should be less penalty
+	for waiting on them in succession.
+	But if that is nevertheless a problem, you can use workqueues
+	or multiple kthreads to wait on the various expedited grace
+	periods concurrently.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<p>
+Again, it is usually better to adjust the RCU read-side critical sections
+to use a single flavor of RCU, but when this is not feasible, you can use
+<tt>synchronize_rcu_mult()</tt>.
+
 <h2><a name="Possible Future Changes">Possible Future Changes</a></h2>
 
 <p>
@@ -2569,329 +3021,4 @@
 under the terms of the Creative Commons Attribution-Share Alike 3.0
 United States license.
 
-<h3><a name="Answers to Quick Quizzes">
-Answers to Quick Quizzes</a></h3>
-
-<a name="qq1answer"></a>
-<p><b>Quick Quiz 1</b>:
-Wait a minute!
-You said that updaters can make useful forward progress concurrently
-with readers, but pre-existing readers will block
-<tt>synchronize_rcu()</tt>!!!
-Just who are you trying to fool???
-
-
-</p><p><b>Answer</b>:
-First, if updaters do not wish to be blocked by readers, they can use
-<tt>call_rcu()</tt> or <tt>kfree_rcu()</tt>, which will
-be discussed later.
-Second, even when using <tt>synchronize_rcu()</tt>, the other
-update-side code does run concurrently with readers, whether pre-existing
-or not.
-
-
-</p><p><a href="#Quick%20Quiz%201"><b>Back to Quick Quiz 1</b>.</a>
-
-<a name="qq2answer"></a>
-<p><b>Quick Quiz 2</b>:
-Why is the <tt>synchronize_rcu()</tt> on line&nbsp;28 needed?
-
-
-</p><p><b>Answer</b>:
-Without that extra grace period, memory reordering could result in
-<tt>do_something_dlm()</tt> executing <tt>do_something()</tt>
-concurrently with the last bits of <tt>recovery()</tt>.
-
-
-</p><p><a href="#Quick%20Quiz%202"><b>Back to Quick Quiz 2</b>.</a>
-
-<a name="qq3answer"></a>
-<p><b>Quick Quiz 3</b>:
-But <tt>rcu_assign_pointer()</tt> does nothing to prevent the
-two assignments to <tt>p-&gt;a</tt> and <tt>p-&gt;b</tt>
-from being reordered.
-Can't that also cause problems?
-
-
-</p><p><b>Answer</b>:
-No, it cannot.
-The readers cannot see either of these two fields until
-the assignment to <tt>gp</tt>, by which time both fields are
-fully initialized.
-So reordering the assignments
-to <tt>p-&gt;a</tt> and <tt>p-&gt;b</tt> cannot possibly
-cause any problems.
-
-
-</p><p><a href="#Quick%20Quiz%203"><b>Back to Quick Quiz 3</b>.</a>
-
-<a name="qq4answer"></a>
-<p><b>Quick Quiz 4</b>:
-Without the <tt>rcu_dereference()</tt> or the
-<tt>rcu_access_pointer()</tt>, what destructive optimizations
-might the compiler make use of?
-
-
-</p><p><b>Answer</b>:
-Let's start with what happens to <tt>do_something_gp()</tt>
-if it fails to use <tt>rcu_dereference()</tt>.
-It could reuse a value formerly fetched from this same pointer.
-It could also fetch the pointer from <tt>gp</tt> in a byte-at-a-time
-manner, resulting in <i>load tearing</i>, in turn resulting a bytewise
-mash-up of two distince pointer values.
-It might even use value-speculation optimizations, where it makes a wrong
-guess, but by the time it gets around to checking the value, an update
-has changed the pointer to match the wrong guess.
-Too bad about any dereferences that returned pre-initialization garbage
-in the meantime!
-
-<p>
-For <tt>remove_gp_synchronous()</tt>, as long as all modifications
-to <tt>gp</tt> are carried out while holding <tt>gp_lock</tt>,
-the above optimizations are harmless.
-However,
-with <tt>CONFIG_SPARSE_RCU_POINTER=y</tt>,
-<tt>sparse</tt> will complain if you
-define <tt>gp</tt> with <tt>__rcu</tt> and then
-access it without using
-either <tt>rcu_access_pointer()</tt> or <tt>rcu_dereference()</tt>.
-
-
-</p><p><a href="#Quick%20Quiz%204"><b>Back to Quick Quiz 4</b>.</a>
-
-<a name="qq5answer"></a>
-<p><b>Quick Quiz 5</b>:
-Given that multiple CPUs can start RCU read-side critical sections
-at any time without any ordering whatsoever, how can RCU possibly tell whether
-or not a given RCU read-side critical section starts before a
-given instance of <tt>synchronize_rcu()</tt>?
-
-
-</p><p><b>Answer</b>:
-If RCU cannot tell whether or not a given
-RCU read-side critical section starts before a
-given instance of <tt>synchronize_rcu()</tt>,
-then it must assume that the RCU read-side critical section
-started first.
-In other words, a given instance of <tt>synchronize_rcu()</tt>
-can avoid waiting on a given RCU read-side critical section only
-if it can prove that <tt>synchronize_rcu()</tt> started first.
-
-
-</p><p><a href="#Quick%20Quiz%205"><b>Back to Quick Quiz 5</b>.</a>
-
-<a name="qq6answer"></a>
-<p><b>Quick Quiz 6</b>:
-The first and second guarantees require unbelievably strict ordering!
-Are all these memory barriers <i> really</i> required?
-
-
-</p><p><b>Answer</b>:
-Yes, they really are required.
-To see why the first guarantee is required, consider the following
-sequence of events:
-
-<ol>
-<li>	CPU 1: <tt>rcu_read_lock()</tt>
-<li>	CPU 1: <tt>q = rcu_dereference(gp);
-	/* Very likely to return p. */</tt>
-<li>	CPU 0: <tt>list_del_rcu(p);</tt>
-<li>	CPU 0: <tt>synchronize_rcu()</tt> starts.
-<li>	CPU 1: <tt>do_something_with(q-&gt;a);
-	/* No smp_mb(), so might happen after kfree(). */</tt>
-<li>	CPU 1: <tt>rcu_read_unlock()</tt>
-<li>	CPU 0: <tt>synchronize_rcu()</tt> returns.
-<li>	CPU 0: <tt>kfree(p);</tt>
-</ol>
-
-<p>
-Therefore, there absolutely must be a full memory barrier between the
-end of the RCU read-side critical section and the end of the
-grace period.
-
-<p>
-The sequence of events demonstrating the necessity of the second rule
-is roughly similar:
-
-<ol>
-<li>	CPU 0: <tt>list_del_rcu(p);</tt>
-<li>	CPU 0: <tt>synchronize_rcu()</tt> starts.
-<li>	CPU 1: <tt>rcu_read_lock()</tt>
-<li>	CPU 1: <tt>q = rcu_dereference(gp);
-	/* Might return p if no memory barrier. */</tt>
-<li>	CPU 0: <tt>synchronize_rcu()</tt> returns.
-<li>	CPU 0: <tt>kfree(p);</tt>
-<li>	CPU 1: <tt>do_something_with(q-&gt;a); /* Boom!!! */</tt>
-<li>	CPU 1: <tt>rcu_read_unlock()</tt>
-</ol>
-
-<p>
-And similarly, without a memory barrier between the beginning of the
-grace period and the beginning of the RCU read-side critical section,
-CPU&nbsp;1 might end up accessing the freelist.
-
-<p>
-The &ldquo;as if&rdquo; rule of course applies, so that any implementation
-that acts as if the appropriate memory barriers were in place is a
-correct implementation.
-That said, it is much easier to fool yourself into believing that you have
-adhered to the as-if rule than it is to actually adhere to it!
-
-
-</p><p><a href="#Quick%20Quiz%206"><b>Back to Quick Quiz 6</b>.</a>
-
-<a name="qq7answer"></a>
-<p><b>Quick Quiz 7</b>:
-But how does the upgrade-to-write operation exclude other readers?
-
-
-</p><p><b>Answer</b>:
-It doesn't, just like normal RCU updates, which also do not exclude
-RCU readers.
-
-
-</p><p><a href="#Quick%20Quiz%207"><b>Back to Quick Quiz 7</b>.</a>
-
-<a name="qq8answer"></a>
-<p><b>Quick Quiz 8</b>:
-Can't the compiler also reorder this code?
-
-
-</p><p><b>Answer</b>:
-No, the volatile casts in <tt>READ_ONCE()</tt> and
-<tt>WRITE_ONCE()</tt> prevent the compiler from reordering in
-this particular case.
-
-
-</p><p><a href="#Quick%20Quiz%208"><b>Back to Quick Quiz 8</b>.</a>
-
-<a name="qq9answer"></a>
-<p><b>Quick Quiz 9</b>:
-Suppose that synchronize_rcu() did wait until all readers had completed.
-Would the updater be able to rely on this?
-
-
-</p><p><b>Answer</b>:
-No.
-Even if <tt>synchronize_rcu()</tt> were to wait until
-all readers had completed, a new reader might start immediately after
-<tt>synchronize_rcu()</tt> completed.
-Therefore, the code following
-<tt>synchronize_rcu()</tt> cannot rely on there being no readers
-in any case.
-
-
-</p><p><a href="#Quick%20Quiz%209"><b>Back to Quick Quiz 9</b>.</a>
-
-<a name="qq10answer"></a>
-<p><b>Quick Quiz 10</b>:
-How long a sequence of grace periods, each separated by an RCU read-side
-critical section, would be required to partition the RCU read-side
-critical sections at the beginning and end of the chain?
-
-
-</p><p><b>Answer</b>:
-In theory, an infinite number.
-In practice, an unknown number that is sensitive to both implementation
-details and timing considerations.
-Therefore, even in practice, RCU users must abide by the theoretical rather
-than the practical answer.
-
-
-</p><p><a href="#Quick%20Quiz%2010"><b>Back to Quick Quiz 10</b>.</a>
-
-<a name="qq11answer"></a>
-<p><b>Quick Quiz 11</b>:
-What about sleeping locks?
-
-
-</p><p><b>Answer</b>:
-These are forbidden within Linux-kernel RCU read-side critical sections
-because it is not legal to place a quiescent state (in this case,
-voluntary context switch) within an RCU read-side critical section.
-However, sleeping locks may be used within userspace RCU read-side critical
-sections, and also within Linux-kernel sleepable RCU
-<a href="#Sleepable RCU">(SRCU)</a>
-read-side critical sections.
-In addition, the -rt patchset turns spinlocks into a sleeping locks so
-that the corresponding critical sections can be preempted, which
-also means that these sleeplockified spinlocks (but not other sleeping locks!)
-may be acquire within -rt-Linux-kernel RCU read-side critical sections.
-
-<p>
-Note that it <i>is</i> legal for a normal RCU read-side critical section
-to conditionally acquire a sleeping locks (as in <tt>mutex_trylock()</tt>),
-but only as long as it does not loop indefinitely attempting to
-conditionally acquire that sleeping locks.
-The key point is that things like <tt>mutex_trylock()</tt>
-either return with the mutex held, or return an error indication if
-the mutex was not immediately available.
-Either way, <tt>mutex_trylock()</tt> returns immediately without sleeping.
-
-
-</p><p><a href="#Quick%20Quiz%2011"><b>Back to Quick Quiz 11</b>.</a>
-
-<a name="qq12answer"></a>
-<p><b>Quick Quiz 12</b>:
-Why does line&nbsp;19 use <tt>rcu_access_pointer()</tt>?
-After all, <tt>call_rcu()</tt> on line&nbsp;25 stores into the
-structure, which would interact badly with concurrent insertions.
-Doesn't this mean that <tt>rcu_dereference()</tt> is required?
-
-
-</p><p><b>Answer</b>:
-Presumably the <tt>-&gt;gp_lock</tt> acquired on line&nbsp;18 excludes
-any changes, including any insertions that <tt>rcu_dereference()</tt>
-would protect against.
-Therefore, any insertions will be delayed until after <tt>-&gt;gp_lock</tt>
-is released on line&nbsp;25, which in turn means that
-<tt>rcu_access_pointer()</tt> suffices.
-
-
-</p><p><a href="#Quick%20Quiz%2012"><b>Back to Quick Quiz 12</b>.</a>
-
-<a name="qq13answer"></a>
-<p><b>Quick Quiz 13</b>:
-Earlier it was claimed that <tt>call_rcu()</tt> and
-<tt>kfree_rcu()</tt> allowed updaters to avoid being blocked
-by readers.
-But how can that be correct, given that the invocation of the callback
-and the freeing of the memory (respectively) must still wait for
-a grace period to elapse?
-
-
-</p><p><b>Answer</b>:
-We could define things this way, but keep in mind that this sort of
-definition would say that updates in garbage-collected languages
-cannot complete until the next time the garbage collector runs,
-which does not seem at all reasonable.
-The key point is that in most cases, an updater using either
-<tt>call_rcu()</tt> or <tt>kfree_rcu()</tt> can proceed to the
-next update as soon as it has invoked <tt>call_rcu()</tt> or
-<tt>kfree_rcu()</tt>, without having to wait for a subsequent
-grace period.
-
-
-</p><p><a href="#Quick%20Quiz%2013"><b>Back to Quick Quiz 13</b>.</a>
-
-<a name="qq14answer"></a>
-<p><b>Quick Quiz 14</b>:
-So what happens with <tt>synchronize_rcu()</tt> during
-scheduler initialization for <tt>CONFIG_PREEMPT=n</tt>
-kernels?
-
-
-</p><p><b>Answer</b>:
-In <tt>CONFIG_PREEMPT=n</tt> kernel, <tt>synchronize_rcu()</tt>
-maps directly to <tt>synchronize_sched()</tt>.
-Therefore, <tt>synchronize_rcu()</tt> works normally throughout
-boot in <tt>CONFIG_PREEMPT=n</tt> kernels.
-However, your code must also work in <tt>CONFIG_PREEMPT=y</tt> kernels,
-so it is still necessary to avoid invoking <tt>synchronize_rcu()</tt>
-during scheduler initialization.
-
-
-</p><p><a href="#Quick%20Quiz%2014"><b>Back to Quick Quiz 14</b>.</a>
-
-
 </body></html>
diff --git a/Documentation/RCU/Design/Requirements/Requirements.htmlx b/Documentation/RCU/Design/Requirements/Requirements.htmlx
deleted file mode 100644
index 3a97ba4..0000000
--- a/Documentation/RCU/Design/Requirements/Requirements.htmlx
+++ /dev/null
@@ -1,2741 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
-        "http://www.w3.org/TR/html4/loose.dtd">
-        <html>
-        <head><title>A Tour Through RCU's Requirements [LWN.net]</title>
-        <meta HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=utf-8">
-
-<h1>A Tour Through RCU's Requirements</h1>
-
-<p>Copyright IBM Corporation, 2015</p>
-<p>Author: Paul E.&nbsp;McKenney</p>
-<p><i>The initial version of this document appeared in the
-<a href="https://lwn.net/">LWN</a> articles
-<a href="https://lwn.net/Articles/652156/">here</a>,
-<a href="https://lwn.net/Articles/652677/">here</a>, and
-<a href="https://lwn.net/Articles/653326/">here</a>.</i></p>
-
-<h2>Introduction</h2>
-
-<p>
-Read-copy update (RCU) is a synchronization mechanism that is often
-used as a replacement for reader-writer locking.
-RCU is unusual in that updaters do not block readers,
-which means that RCU's read-side primitives can be exceedingly fast
-and scalable.
-In addition, updaters can make useful forward progress concurrently
-with readers.
-However, all this concurrency between RCU readers and updaters does raise
-the question of exactly what RCU readers are doing, which in turn
-raises the question of exactly what RCU's requirements are.
-
-<p>
-This document therefore summarizes RCU's requirements, and can be thought
-of as an informal, high-level specification for RCU.
-It is important to understand that RCU's specification is primarily
-empirical in nature;
-in fact, I learned about many of these requirements the hard way.
-This situation might cause some consternation, however, not only
-has this learning process been a lot of fun, but it has also been
-a great privilege to work with so many people willing to apply
-technologies in interesting new ways.
-
-<p>
-All that aside, here are the categories of currently known RCU requirements:
-</p>
-
-<ol>
-<li>	<a href="#Fundamental Requirements">
-	Fundamental Requirements</a>
-<li>	<a href="#Fundamental Non-Requirements">Fundamental Non-Requirements</a>
-<li>	<a href="#Parallelism Facts of Life">
-	Parallelism Facts of Life</a>
-<li>	<a href="#Quality-of-Implementation Requirements">
-	Quality-of-Implementation Requirements</a>
-<li>	<a href="#Linux Kernel Complications">
-	Linux Kernel Complications</a>
-<li>	<a href="#Software-Engineering Requirements">
-	Software-Engineering Requirements</a>
-<li>	<a href="#Other RCU Flavors">
-	Other RCU Flavors</a>
-<li>	<a href="#Possible Future Changes">
-	Possible Future Changes</a>
-</ol>
-
-<p>
-This is followed by a <a href="#Summary">summary</a>,
-which is in turn followed by the inevitable
-<a href="#Answers to Quick Quizzes">answers to the quick quizzes</a>.
-
-<h2><a name="Fundamental Requirements">Fundamental Requirements</a></h2>
-
-<p>
-RCU's fundamental requirements are the closest thing RCU has to hard
-mathematical requirements.
-These are:
-
-<ol>
-<li>	<a href="#Grace-Period Guarantee">
-	Grace-Period Guarantee</a>
-<li>	<a href="#Publish-Subscribe Guarantee">
-	Publish-Subscribe Guarantee</a>
-<li>	<a href="#Memory-Barrier Guarantees">
-	Memory-Barrier Guarantees</a>
-<li>	<a href="#RCU Primitives Guaranteed to Execute Unconditionally">
-	RCU Primitives Guaranteed to Execute Unconditionally</a>
-<li>	<a href="#Guaranteed Read-to-Write Upgrade">
-	Guaranteed Read-to-Write Upgrade</a>
-</ol>
-
-<h3><a name="Grace-Period Guarantee">Grace-Period Guarantee</a></h3>
-
-<p>
-RCU's grace-period guarantee is unusual in being premeditated:
-Jack Slingwine and I had this guarantee firmly in mind when we started
-work on RCU (then called &ldquo;rclock&rdquo;) in the early 1990s.
-That said, the past two decades of experience with RCU have produced
-a much more detailed understanding of this guarantee.
-
-<p>
-RCU's grace-period guarantee allows updaters to wait for the completion
-of all pre-existing RCU read-side critical sections.
-An RCU read-side critical section
-begins with the marker <tt>rcu_read_lock()</tt> and ends with
-the marker <tt>rcu_read_unlock()</tt>.
-These markers may be nested, and RCU treats a nested set as one
-big RCU read-side critical section.
-Production-quality implementations of <tt>rcu_read_lock()</tt> and
-<tt>rcu_read_unlock()</tt> are extremely lightweight, and in
-fact have exactly zero overhead in Linux kernels built for production
-use with <tt>CONFIG_PREEMPT=n</tt>.
-
-<p>
-This guarantee allows ordering to be enforced with extremely low
-overhead to readers, for example:
-
-<blockquote>
-<pre>
- 1 int x, y;
- 2
- 3 void thread0(void)
- 4 {
- 5   rcu_read_lock();
- 6   r1 = READ_ONCE(x);
- 7   r2 = READ_ONCE(y);
- 8   rcu_read_unlock();
- 9 }
-10
-11 void thread1(void)
-12 {
-13   WRITE_ONCE(x, 1);
-14   synchronize_rcu();
-15   WRITE_ONCE(y, 1);
-16 }
-</pre>
-</blockquote>
-
-<p>
-Because the <tt>synchronize_rcu()</tt> on line&nbsp;14 waits for
-all pre-existing readers, any instance of <tt>thread0()</tt> that
-loads a value of zero from <tt>x</tt> must complete before
-<tt>thread1()</tt> stores to <tt>y</tt>, so that instance must
-also load a value of zero from <tt>y</tt>.
-Similarly, any instance of <tt>thread0()</tt> that loads a value of
-one from <tt>y</tt> must have started after the
-<tt>synchronize_rcu()</tt> started, and must therefore also load
-a value of one from <tt>x</tt>.
-Therefore, the outcome:
-<blockquote>
-<pre>
-(r1 == 0 &amp;&amp; r2 == 1)
-</pre>
-</blockquote>
-cannot happen.
-
-<p>@@QQ@@
-Wait a minute!
-You said that updaters can make useful forward progress concurrently
-with readers, but pre-existing readers will block
-<tt>synchronize_rcu()</tt>!!!
-Just who are you trying to fool???
-<p>@@QQA@@
-First, if updaters do not wish to be blocked by readers, they can use
-<tt>call_rcu()</tt> or <tt>kfree_rcu()</tt>, which will
-be discussed later.
-Second, even when using <tt>synchronize_rcu()</tt>, the other
-update-side code does run concurrently with readers, whether pre-existing
-or not.
-<p>@@QQE@@
-
-<p>
-This scenario resembles one of the first uses of RCU in
-<a href="https://en.wikipedia.org/wiki/DYNIX">DYNIX/ptx</a>,
-which managed a distributed lock manager's transition into
-a state suitable for handling recovery from node failure,
-more or less as follows:
-
-<blockquote>
-<pre>
- 1 #define STATE_NORMAL        0
- 2 #define STATE_WANT_RECOVERY 1
- 3 #define STATE_RECOVERING    2
- 4 #define STATE_WANT_NORMAL   3
- 5
- 6 int state = STATE_NORMAL;
- 7
- 8 void do_something_dlm(void)
- 9 {
-10   int state_snap;
-11
-12   rcu_read_lock();
-13   state_snap = READ_ONCE(state);
-14   if (state_snap == STATE_NORMAL)
-15     do_something();
-16   else
-17     do_something_carefully();
-18   rcu_read_unlock();
-19 }
-20
-21 void start_recovery(void)
-22 {
-23   WRITE_ONCE(state, STATE_WANT_RECOVERY);
-24   synchronize_rcu();
-25   WRITE_ONCE(state, STATE_RECOVERING);
-26   recovery();
-27   WRITE_ONCE(state, STATE_WANT_NORMAL);
-28   synchronize_rcu();
-29   WRITE_ONCE(state, STATE_NORMAL);
-30 }
-</pre>
-</blockquote>
-
-<p>
-The RCU read-side critical section in <tt>do_something_dlm()</tt>
-works with the <tt>synchronize_rcu()</tt> in <tt>start_recovery()</tt>
-to guarantee that <tt>do_something()</tt> never runs concurrently
-with <tt>recovery()</tt>, but with little or no synchronization
-overhead in <tt>do_something_dlm()</tt>.
-
-<p>@@QQ@@
-Why is the <tt>synchronize_rcu()</tt> on line&nbsp;28 needed?
-<p>@@QQA@@
-Without that extra grace period, memory reordering could result in
-<tt>do_something_dlm()</tt> executing <tt>do_something()</tt>
-concurrently with the last bits of <tt>recovery()</tt>.
-<p>@@QQE@@
-
-<p>
-In order to avoid fatal problems such as deadlocks,
-an RCU read-side critical section must not contain calls to
-<tt>synchronize_rcu()</tt>.
-Similarly, an RCU read-side critical section must not
-contain anything that waits, directly or indirectly, on completion of
-an invocation of <tt>synchronize_rcu()</tt>.
-
-<p>
-Although RCU's grace-period guarantee is useful in and of itself, with
-<a href="https://lwn.net/Articles/573497/">quite a few use cases</a>,
-it would be good to be able to use RCU to coordinate read-side
-access to linked data structures.
-For this, the grace-period guarantee is not sufficient, as can
-be seen in function <tt>add_gp_buggy()</tt> below.
-We will look at the reader's code later, but in the meantime, just think of
-the reader as locklessly picking up the <tt>gp</tt> pointer,
-and, if the value loaded is non-<tt>NULL</tt>, locklessly accessing the
-<tt>-&gt;a</tt> and <tt>-&gt;b</tt> fields.
-
-<blockquote>
-<pre>
- 1 bool add_gp_buggy(int a, int b)
- 2 {
- 3   p = kmalloc(sizeof(*p), GFP_KERNEL);
- 4   if (!p)
- 5     return -ENOMEM;
- 6   spin_lock(&amp;gp_lock);
- 7   if (rcu_access_pointer(gp)) {
- 8     spin_unlock(&amp;gp_lock);
- 9     return false;
-10   }
-11   p-&gt;a = a;
-12   p-&gt;b = a;
-13   gp = p; /* ORDERING BUG */
-14   spin_unlock(&amp;gp_lock);
-15   return true;
-16 }
-</pre>
-</blockquote>
-
-<p>
-The problem is that both the compiler and weakly ordered CPUs are within
-their rights to reorder this code as follows:
-
-<blockquote>
-<pre>
- 1 bool add_gp_buggy_optimized(int a, int b)
- 2 {
- 3   p = kmalloc(sizeof(*p), GFP_KERNEL);
- 4   if (!p)
- 5     return -ENOMEM;
- 6   spin_lock(&amp;gp_lock);
- 7   if (rcu_access_pointer(gp)) {
- 8     spin_unlock(&amp;gp_lock);
- 9     return false;
-10   }
-<b>11   gp = p; /* ORDERING BUG */
-12   p-&gt;a = a;
-13   p-&gt;b = a;</b>
-14   spin_unlock(&amp;gp_lock);
-15   return true;
-16 }
-</pre>
-</blockquote>
-
-<p>
-If an RCU reader fetches <tt>gp</tt> just after
-<tt>add_gp_buggy_optimized</tt> executes line&nbsp;11,
-it will see garbage in the <tt>-&gt;a</tt> and <tt>-&gt;b</tt>
-fields.
-And this is but one of many ways in which compiler and hardware optimizations
-could cause trouble.
-Therefore, we clearly need some way to prevent the compiler and the CPU from
-reordering in this manner, which brings us to the publish-subscribe
-guarantee discussed in the next section.
-
-<h3><a name="Publish-Subscribe Guarantee">Publish/Subscribe Guarantee</a></h3>
-
-<p>
-RCU's publish-subscribe guarantee allows data to be inserted
-into a linked data structure without disrupting RCU readers.
-The updater uses <tt>rcu_assign_pointer()</tt> to insert the
-new data, and readers use <tt>rcu_dereference()</tt> to
-access data, whether new or old.
-The following shows an example of insertion:
-
-<blockquote>
-<pre>
- 1 bool add_gp(int a, int b)
- 2 {
- 3   p = kmalloc(sizeof(*p), GFP_KERNEL);
- 4   if (!p)
- 5     return -ENOMEM;
- 6   spin_lock(&amp;gp_lock);
- 7   if (rcu_access_pointer(gp)) {
- 8     spin_unlock(&amp;gp_lock);
- 9     return false;
-10   }
-11   p-&gt;a = a;
-12   p-&gt;b = a;
-13   rcu_assign_pointer(gp, p);
-14   spin_unlock(&amp;gp_lock);
-15   return true;
-16 }
-</pre>
-</blockquote>
-
-<p>
-The <tt>rcu_assign_pointer()</tt> on line&nbsp;13 is conceptually
-equivalent to a simple assignment statement, but also guarantees
-that its assignment will
-happen after the two assignments in lines&nbsp;11 and&nbsp;12,
-similar to the C11 <tt>memory_order_release</tt> store operation.
-It also prevents any number of &ldquo;interesting&rdquo; compiler
-optimizations, for example, the use of <tt>gp</tt> as a scratch
-location immediately preceding the assignment.
-
-<p>@@QQ@@
-But <tt>rcu_assign_pointer()</tt> does nothing to prevent the
-two assignments to <tt>p-&gt;a</tt> and <tt>p-&gt;b</tt>
-from being reordered.
-Can't that also cause problems?
-<p>@@QQA@@
-No, it cannot.
-The readers cannot see either of these two fields until
-the assignment to <tt>gp</tt>, by which time both fields are
-fully initialized.
-So reordering the assignments
-to <tt>p-&gt;a</tt> and <tt>p-&gt;b</tt> cannot possibly
-cause any problems.
-<p>@@QQE@@
-
-<p>
-It is tempting to assume that the reader need not do anything special
-to control its accesses to the RCU-protected data,
-as shown in <tt>do_something_gp_buggy()</tt> below:
-
-<blockquote>
-<pre>
- 1 bool do_something_gp_buggy(void)
- 2 {
- 3   rcu_read_lock();
- 4   p = gp;  /* OPTIMIZATIONS GALORE!!! */
- 5   if (p) {
- 6     do_something(p-&gt;a, p-&gt;b);
- 7     rcu_read_unlock();
- 8     return true;
- 9   }
-10   rcu_read_unlock();
-11   return false;
-12 }
-</pre>
-</blockquote>
-
-<p>
-However, this temptation must be resisted because there are a
-surprisingly large number of ways that the compiler
-(to say nothing of
-<a href="https://h71000.www7.hp.com/wizard/wiz_2637.html">DEC Alpha CPUs</a>)
-can trip this code up.
-For but one example, if the compiler were short of registers, it
-might choose to refetch from <tt>gp</tt> rather than keeping
-a separate copy in <tt>p</tt> as follows:
-
-<blockquote>
-<pre>
- 1 bool do_something_gp_buggy_optimized(void)
- 2 {
- 3   rcu_read_lock();
- 4   if (gp) { /* OPTIMIZATIONS GALORE!!! */
-<b> 5     do_something(gp-&gt;a, gp-&gt;b);</b>
- 6     rcu_read_unlock();
- 7     return true;
- 8   }
- 9   rcu_read_unlock();
-10   return false;
-11 }
-</pre>
-</blockquote>
-
-<p>
-If this function ran concurrently with a series of updates that
-replaced the current structure with a new one,
-the fetches of <tt>gp-&gt;a</tt>
-and <tt>gp-&gt;b</tt> might well come from two different structures,
-which could cause serious confusion.
-To prevent this (and much else besides), <tt>do_something_gp()</tt> uses
-<tt>rcu_dereference()</tt> to fetch from <tt>gp</tt>:
-
-<blockquote>
-<pre>
- 1 bool do_something_gp(void)
- 2 {
- 3   rcu_read_lock();
- 4   p = rcu_dereference(gp);
- 5   if (p) {
- 6     do_something(p-&gt;a, p-&gt;b);
- 7     rcu_read_unlock();
- 8     return true;
- 9   }
-10   rcu_read_unlock();
-11   return false;
-12 }
-</pre>
-</blockquote>
-
-<p>
-The <tt>rcu_dereference()</tt> uses volatile casts and (for DEC Alpha)
-memory barriers in the Linux kernel.
-Should a
-<a href="http://www.rdrop.com/users/paulmck/RCU/consume.2015.07.13a.pdf">high-quality implementation of C11 <tt>memory_order_consume</tt> [PDF]</a>
-ever appear, then <tt>rcu_dereference()</tt> could be implemented
-as a <tt>memory_order_consume</tt> load.
-Regardless of the exact implementation, a pointer fetched by
-<tt>rcu_dereference()</tt> may not be used outside of the
-outermost RCU read-side critical section containing that
-<tt>rcu_dereference()</tt>, unless protection of
-the corresponding data element has been passed from RCU to some
-other synchronization mechanism, most commonly locking or
-<a href="https://www.kernel.org/doc/Documentation/RCU/rcuref.txt">reference counting</a>.
-
-<p>
-In short, updaters use <tt>rcu_assign_pointer()</tt> and readers
-use <tt>rcu_dereference()</tt>, and these two RCU API elements
-work together to ensure that readers have a consistent view of
-newly added data elements.
-
-<p>
-Of course, it is also necessary to remove elements from RCU-protected
-data structures, for example, using the following process:
-
-<ol>
-<li>	Remove the data element from the enclosing structure.
-<li>	Wait for all pre-existing RCU read-side critical sections
-	to complete (because only pre-existing readers can possibly have
-	a reference to the newly removed data element).
-<li>	At this point, only the updater has a reference to the
-	newly removed data element, so it can safely reclaim
-	the data element, for example, by passing it to <tt>kfree()</tt>.
-</ol>
-
-This process is implemented by <tt>remove_gp_synchronous()</tt>:
-
-<blockquote>
-<pre>
- 1 bool remove_gp_synchronous(void)
- 2 {
- 3   struct foo *p;
- 4
- 5   spin_lock(&amp;gp_lock);
- 6   p = rcu_access_pointer(gp);
- 7   if (!p) {
- 8     spin_unlock(&amp;gp_lock);
- 9     return false;
-10   }
-11   rcu_assign_pointer(gp, NULL);
-12   spin_unlock(&amp;gp_lock);
-13   synchronize_rcu();
-14   kfree(p);
-15   return true;
-16 }
-</pre>
-</blockquote>
-
-<p>
-This function is straightforward, with line&nbsp;13 waiting for a grace
-period before line&nbsp;14 frees the old data element.
-This waiting ensures that readers will reach line&nbsp;7 of
-<tt>do_something_gp()</tt> before the data element referenced by
-<tt>p</tt> is freed.
-The <tt>rcu_access_pointer()</tt> on line&nbsp;6 is similar to
-<tt>rcu_dereference()</tt>, except that:
-
-<ol>
-<li>	The value returned by <tt>rcu_access_pointer()</tt>
-	cannot be dereferenced.
-	If you want to access the value pointed to as well as
-	the pointer itself, use <tt>rcu_dereference()</tt>
-	instead of <tt>rcu_access_pointer()</tt>.
-<li>	The call to <tt>rcu_access_pointer()</tt> need not be
-	protected.
-	In contrast, <tt>rcu_dereference()</tt> must either be
-	within an RCU read-side critical section or in a code
-	segment where the pointer cannot change, for example, in
-	code protected by the corresponding update-side lock.
-</ol>
-
-<p>@@QQ@@
-Without the <tt>rcu_dereference()</tt> or the
-<tt>rcu_access_pointer()</tt>, what destructive optimizations
-might the compiler make use of?
-<p>@@QQA@@
-Let's start with what happens to <tt>do_something_gp()</tt>
-if it fails to use <tt>rcu_dereference()</tt>.
-It could reuse a value formerly fetched from this same pointer.
-It could also fetch the pointer from <tt>gp</tt> in a byte-at-a-time
-manner, resulting in <i>load tearing</i>, in turn resulting a bytewise
-mash-up of two distince pointer values.
-It might even use value-speculation optimizations, where it makes a wrong
-guess, but by the time it gets around to checking the value, an update
-has changed the pointer to match the wrong guess.
-Too bad about any dereferences that returned pre-initialization garbage
-in the meantime!
-
-<p>
-For <tt>remove_gp_synchronous()</tt>, as long as all modifications
-to <tt>gp</tt> are carried out while holding <tt>gp_lock</tt>,
-the above optimizations are harmless.
-However,
-with <tt>CONFIG_SPARSE_RCU_POINTER=y</tt>,
-<tt>sparse</tt> will complain if you
-define <tt>gp</tt> with <tt>__rcu</tt> and then
-access it without using
-either <tt>rcu_access_pointer()</tt> or <tt>rcu_dereference()</tt>.
-<p>@@QQE@@
-
-<p>
-In short, RCU's publish-subscribe guarantee is provided by the combination
-of <tt>rcu_assign_pointer()</tt> and <tt>rcu_dereference()</tt>.
-This guarantee allows data elements to be safely added to RCU-protected
-linked data structures without disrupting RCU readers.
-This guarantee can be used in combination with the grace-period
-guarantee to also allow data elements to be removed from RCU-protected
-linked data structures, again without disrupting RCU readers.
-
-<p>
-This guarantee was only partially premeditated.
-DYNIX/ptx used an explicit memory barrier for publication, but had nothing
-resembling <tt>rcu_dereference()</tt> for subscription, nor did it
-have anything resembling the <tt>smp_read_barrier_depends()</tt>
-that was later subsumed into <tt>rcu_dereference()</tt>.
-The need for these operations made itself known quite suddenly at a
-late-1990s meeting with the DEC Alpha architects, back in the days when
-DEC was still a free-standing company.
-It took the Alpha architects a good hour to convince me that any sort
-of barrier would ever be needed, and it then took me a good <i>two</i> hours
-to convince them that their documentation did not make this point clear.
-More recent work with the C and C++ standards committees have provided
-much education on tricks and traps from the compiler.
-In short, compilers were much less tricky in the early 1990s, but in
-2015, don't even think about omitting <tt>rcu_dereference()</tt>!
-
-<h3><a name="Memory-Barrier Guarantees">Memory-Barrier Guarantees</a></h3>
-
-<p>
-The previous section's simple linked-data-structure scenario clearly
-demonstrates the need for RCU's stringent memory-ordering guarantees on
-systems with more than one CPU:
-
-<ol>
-<li>	Each CPU that has an RCU read-side critical section that
-	begins before <tt>synchronize_rcu()</tt> starts is
-	guaranteed to execute a full memory barrier between the time
-	that the RCU read-side critical section ends and the time that
-	<tt>synchronize_rcu()</tt> returns.
-	Without this guarantee, a pre-existing RCU read-side critical section
-	might hold a reference to the newly removed <tt>struct foo</tt>
-	after the <tt>kfree()</tt> on line&nbsp;14 of
-	<tt>remove_gp_synchronous()</tt>.
-<li>	Each CPU that has an RCU read-side critical section that ends
-	after <tt>synchronize_rcu()</tt> returns is guaranteed
-	to execute a full memory barrier between the time that
-	<tt>synchronize_rcu()</tt> begins and the time that the RCU
-	read-side critical section begins.
-	Without this guarantee, a later RCU read-side critical section
-	running after the <tt>kfree()</tt> on line&nbsp;14 of
-	<tt>remove_gp_synchronous()</tt> might
-	later run <tt>do_something_gp()</tt> and find the
-	newly deleted <tt>struct foo</tt>.
-<li>	If the task invoking <tt>synchronize_rcu()</tt> remains
-	on a given CPU, then that CPU is guaranteed to execute a full
-	memory barrier sometime during the execution of
-	<tt>synchronize_rcu()</tt>.
-	This guarantee ensures that the <tt>kfree()</tt> on
-	line&nbsp;14 of <tt>remove_gp_synchronous()</tt> really does
-	execute after the removal on line&nbsp;11.
-<li>	If the task invoking <tt>synchronize_rcu()</tt> migrates
-	among a group of CPUs during that invocation, then each of the
-	CPUs in that group is guaranteed to execute a full memory barrier
-	sometime during the execution of <tt>synchronize_rcu()</tt>.
-	This guarantee also ensures that the <tt>kfree()</tt> on
-	line&nbsp;14 of <tt>remove_gp_synchronous()</tt> really does
-	execute after the removal on
-	line&nbsp;11, but also in the case where the thread executing the
-	<tt>synchronize_rcu()</tt> migrates in the meantime.
-</ol>
-
-<p>@@QQ@@
-Given that multiple CPUs can start RCU read-side critical sections
-at any time without any ordering whatsoever, how can RCU possibly tell whether
-or not a given RCU read-side critical section starts before a
-given instance of <tt>synchronize_rcu()</tt>?
-<p>@@QQA@@
-If RCU cannot tell whether or not a given
-RCU read-side critical section starts before a
-given instance of <tt>synchronize_rcu()</tt>,
-then it must assume that the RCU read-side critical section
-started first.
-In other words, a given instance of <tt>synchronize_rcu()</tt>
-can avoid waiting on a given RCU read-side critical section only
-if it can prove that <tt>synchronize_rcu()</tt> started first.
-<p>@@QQE@@
-
-<p>@@QQ@@
-The first and second guarantees require unbelievably strict ordering!
-Are all these memory barriers <i> really</i> required?
-<p>@@QQA@@
-Yes, they really are required.
-To see why the first guarantee is required, consider the following
-sequence of events:
-
-<ol>
-<li>	CPU 1: <tt>rcu_read_lock()</tt>
-<li>	CPU 1: <tt>q = rcu_dereference(gp);
-	/* Very likely to return p. */</tt>
-<li>	CPU 0: <tt>list_del_rcu(p);</tt>
-<li>	CPU 0: <tt>synchronize_rcu()</tt> starts.
-<li>	CPU 1: <tt>do_something_with(q-&gt;a);
-	/* No smp_mb(), so might happen after kfree(). */</tt>
-<li>	CPU 1: <tt>rcu_read_unlock()</tt>
-<li>	CPU 0: <tt>synchronize_rcu()</tt> returns.
-<li>	CPU 0: <tt>kfree(p);</tt>
-</ol>
-
-<p>
-Therefore, there absolutely must be a full memory barrier between the
-end of the RCU read-side critical section and the end of the
-grace period.
-
-<p>
-The sequence of events demonstrating the necessity of the second rule
-is roughly similar:
-
-<ol>
-<li>	CPU 0: <tt>list_del_rcu(p);</tt>
-<li>	CPU 0: <tt>synchronize_rcu()</tt> starts.
-<li>	CPU 1: <tt>rcu_read_lock()</tt>
-<li>	CPU 1: <tt>q = rcu_dereference(gp);
-	/* Might return p if no memory barrier. */</tt>
-<li>	CPU 0: <tt>synchronize_rcu()</tt> returns.
-<li>	CPU 0: <tt>kfree(p);</tt>
-<li>	CPU 1: <tt>do_something_with(q-&gt;a); /* Boom!!! */</tt>
-<li>	CPU 1: <tt>rcu_read_unlock()</tt>
-</ol>
-
-<p>
-And similarly, without a memory barrier between the beginning of the
-grace period and the beginning of the RCU read-side critical section,
-CPU&nbsp;1 might end up accessing the freelist.
-
-<p>
-The &ldquo;as if&rdquo; rule of course applies, so that any implementation
-that acts as if the appropriate memory barriers were in place is a
-correct implementation.
-That said, it is much easier to fool yourself into believing that you have
-adhered to the as-if rule than it is to actually adhere to it!
-<p>@@QQE@@
-
-<p>
-Note that these memory-barrier requirements do not replace the fundamental
-RCU requirement that a grace period wait for all pre-existing readers.
-On the contrary, the memory barriers called out in this section must operate in
-such a way as to <i>enforce</i> this fundamental requirement.
-Of course, different implementations enforce this requirement in different
-ways, but enforce it they must.
-
-<h3><a name="RCU Primitives Guaranteed to Execute Unconditionally">RCU Primitives Guaranteed to Execute Unconditionally</a></h3>
-
-<p>
-The common-case RCU primitives are unconditional.
-They are invoked, they do their job, and they return, with no possibility
-of error, and no need to retry.
-This is a key RCU design philosophy.
-
-<p>
-However, this philosophy is pragmatic rather than pigheaded.
-If someone comes up with a good justification for a particular conditional
-RCU primitive, it might well be implemented and added.
-After all, this guarantee was reverse-engineered, not premeditated.
-The unconditional nature of the RCU primitives was initially an
-accident of implementation, and later experience with synchronization
-primitives with conditional primitives caused me to elevate this
-accident to a guarantee.
-Therefore, the justification for adding a conditional primitive to
-RCU would need to be based on detailed and compelling use cases.
-
-<h3><a name="Guaranteed Read-to-Write Upgrade">Guaranteed Read-to-Write Upgrade</a></h3>
-
-<p>
-As far as RCU is concerned, it is always possible to carry out an
-update within an RCU read-side critical section.
-For example, that RCU read-side critical section might search for
-a given data element, and then might acquire the update-side
-spinlock in order to update that element, all while remaining
-in that RCU read-side critical section.
-Of course, it is necessary to exit the RCU read-side critical section
-before invoking <tt>synchronize_rcu()</tt>, however, this
-inconvenience can be avoided through use of the
-<tt>call_rcu()</tt> and <tt>kfree_rcu()</tt> API members
-described later in this document.
-
-<p>@@QQ@@
-But how does the upgrade-to-write operation exclude other readers?
-<p>@@QQA@@
-It doesn't, just like normal RCU updates, which also do not exclude
-RCU readers.
-<p>@@QQE@@
-
-<p>
-This guarantee allows lookup code to be shared between read-side
-and update-side code, and was premeditated, appearing in the earliest
-DYNIX/ptx RCU documentation.
-
-<h2><a name="Fundamental Non-Requirements">Fundamental Non-Requirements</a></h2>
-
-<p>
-RCU provides extremely lightweight readers, and its read-side guarantees,
-though quite useful, are correspondingly lightweight.
-It is therefore all too easy to assume that RCU is guaranteeing more
-than it really is.
-Of course, the list of things that RCU does not guarantee is infinitely
-long, however, the following sections list a few non-guarantees that
-have caused confusion.
-Except where otherwise noted, these non-guarantees were premeditated.
-
-<ol>
-<li>	<a href="#Readers Impose Minimal Ordering">
-	Readers Impose Minimal Ordering</a>
-<li>	<a href="#Readers Do Not Exclude Updaters">
-	Readers Do Not Exclude Updaters</a>
-<li>	<a href="#Updaters Only Wait For Old Readers">
-	Updaters Only Wait For Old Readers</a>
-<li>	<a href="#Grace Periods Don't Partition Read-Side Critical Sections">
-	Grace Periods Don't Partition Read-Side Critical Sections</a>
-<li>	<a href="#Read-Side Critical Sections Don't Partition Grace Periods">
-	Read-Side Critical Sections Don't Partition Grace Periods</a>
-<li>	<a href="#Disabling Preemption Does Not Block Grace Periods">
-	Disabling Preemption Does Not Block Grace Periods</a>
-</ol>
-
-<h3><a name="Readers Impose Minimal Ordering">Readers Impose Minimal Ordering</a></h3>
-
-<p>
-Reader-side markers such as <tt>rcu_read_lock()</tt> and
-<tt>rcu_read_unlock()</tt> provide absolutely no ordering guarantees
-except through their interaction with the grace-period APIs such as
-<tt>synchronize_rcu()</tt>.
-To see this, consider the following pair of threads:
-
-<blockquote>
-<pre>
- 1 void thread0(void)
- 2 {
- 3   rcu_read_lock();
- 4   WRITE_ONCE(x, 1);
- 5   rcu_read_unlock();
- 6   rcu_read_lock();
- 7   WRITE_ONCE(y, 1);
- 8   rcu_read_unlock();
- 9 }
-10
-11 void thread1(void)
-12 {
-13   rcu_read_lock();
-14   r1 = READ_ONCE(y);
-15   rcu_read_unlock();
-16   rcu_read_lock();
-17   r2 = READ_ONCE(x);
-18   rcu_read_unlock();
-19 }
-</pre>
-</blockquote>
-
-<p>
-After <tt>thread0()</tt> and <tt>thread1()</tt> execute
-concurrently, it is quite possible to have
-
-<blockquote>
-<pre>
-(r1 == 1 &amp;&amp; r2 == 0)
-</pre>
-</blockquote>
-
-(that is, <tt>y</tt> appears to have been assigned before <tt>x</tt>),
-which would not be possible if <tt>rcu_read_lock()</tt> and
-<tt>rcu_read_unlock()</tt> had much in the way of ordering
-properties.
-But they do not, so the CPU is within its rights
-to do significant reordering.
-This is by design:  Any significant ordering constraints would slow down
-these fast-path APIs.
-
-<p>@@QQ@@
-Can't the compiler also reorder this code?
-<p>@@QQA@@
-No, the volatile casts in <tt>READ_ONCE()</tt> and
-<tt>WRITE_ONCE()</tt> prevent the compiler from reordering in
-this particular case.
-<p>@@QQE@@
-
-<h3><a name="Readers Do Not Exclude Updaters">Readers Do Not Exclude Updaters</a></h3>
-
-<p>
-Neither <tt>rcu_read_lock()</tt> nor <tt>rcu_read_unlock()</tt>
-exclude updates.
-All they do is to prevent grace periods from ending.
-The following example illustrates this:
-
-<blockquote>
-<pre>
- 1 void thread0(void)
- 2 {
- 3   rcu_read_lock();
- 4   r1 = READ_ONCE(y);
- 5   if (r1) {
- 6     do_something_with_nonzero_x();
- 7     r2 = READ_ONCE(x);
- 8     WARN_ON(!r2); /* BUG!!! */
- 9   }
-10   rcu_read_unlock();
-11 }
-12
-13 void thread1(void)
-14 {
-15   spin_lock(&amp;my_lock);
-16   WRITE_ONCE(x, 1);
-17   WRITE_ONCE(y, 1);
-18   spin_unlock(&amp;my_lock);
-19 }
-</pre>
-</blockquote>
-
-<p>
-If the <tt>thread0()</tt> function's <tt>rcu_read_lock()</tt>
-excluded the <tt>thread1()</tt> function's update,
-the <tt>WARN_ON()</tt> could never fire.
-But the fact is that <tt>rcu_read_lock()</tt> does not exclude
-much of anything aside from subsequent grace periods, of which
-<tt>thread1()</tt> has none, so the
-<tt>WARN_ON()</tt> can and does fire.
-
-<h3><a name="Updaters Only Wait For Old Readers">Updaters Only Wait For Old Readers</a></h3>
-
-<p>
-It might be tempting to assume that after <tt>synchronize_rcu()</tt>
-completes, there are no readers executing.
-This temptation must be avoided because
-new readers can start immediately after <tt>synchronize_rcu()</tt>
-starts, and <tt>synchronize_rcu()</tt> is under no
-obligation to wait for these new readers.
-
-<p>@@QQ@@
-Suppose that synchronize_rcu() did wait until all readers had completed.
-Would the updater be able to rely on this?
-<p>@@QQA@@
-No.
-Even if <tt>synchronize_rcu()</tt> were to wait until
-all readers had completed, a new reader might start immediately after
-<tt>synchronize_rcu()</tt> completed.
-Therefore, the code following
-<tt>synchronize_rcu()</tt> cannot rely on there being no readers
-in any case.
-<p>@@QQE@@
-
-<h3><a name="Grace Periods Don't Partition Read-Side Critical Sections">
-Grace Periods Don't Partition Read-Side Critical Sections</a></h3>
-
-<p>
-It is tempting to assume that if any part of one RCU read-side critical
-section precedes a given grace period, and if any part of another RCU
-read-side critical section follows that same grace period, then all of
-the first RCU read-side critical section must precede all of the second.
-However, this just isn't the case: A single grace period does not
-partition the set of RCU read-side critical sections.
-An example of this situation can be illustrated as follows, where
-<tt>x</tt>, <tt>y</tt>, and <tt>z</tt> are initially all zero:
-
-<blockquote>
-<pre>
- 1 void thread0(void)
- 2 {
- 3   rcu_read_lock();
- 4   WRITE_ONCE(a, 1);
- 5   WRITE_ONCE(b, 1);
- 6   rcu_read_unlock();
- 7 }
- 8
- 9 void thread1(void)
-10 {
-11   r1 = READ_ONCE(a);
-12   synchronize_rcu();
-13   WRITE_ONCE(c, 1);
-14 }
-15
-16 void thread2(void)
-17 {
-18   rcu_read_lock();
-19   r2 = READ_ONCE(b);
-20   r3 = READ_ONCE(c);
-21   rcu_read_unlock();
-22 }
-</pre>
-</blockquote>
-
-<p>
-It turns out that the outcome:
-
-<blockquote>
-<pre>
-(r1 == 1 &amp;&amp; r2 == 0 &amp;&amp; r3 == 1)
-</pre>
-</blockquote>
-
-is entirely possible.
-The following figure show how this can happen, with each circled
-<tt>QS</tt> indicating the point at which RCU recorded a
-<i>quiescent state</i> for each thread, that is, a state in which
-RCU knows that the thread cannot be in the midst of an RCU read-side
-critical section that started before the current grace period:
-
-<p><img src="GPpartitionReaders1.svg" alt="GPpartitionReaders1.svg" width="60%"></p>
-
-<p>
-If it is necessary to partition RCU read-side critical sections in this
-manner, it is necessary to use two grace periods, where the first
-grace period is known to end before the second grace period starts:
-
-<blockquote>
-<pre>
- 1 void thread0(void)
- 2 {
- 3   rcu_read_lock();
- 4   WRITE_ONCE(a, 1);
- 5   WRITE_ONCE(b, 1);
- 6   rcu_read_unlock();
- 7 }
- 8
- 9 void thread1(void)
-10 {
-11   r1 = READ_ONCE(a);
-12   synchronize_rcu();
-13   WRITE_ONCE(c, 1);
-14 }
-15
-16 void thread2(void)
-17 {
-18   r2 = READ_ONCE(c);
-19   synchronize_rcu();
-20   WRITE_ONCE(d, 1);
-21 }
-22
-23 void thread3(void)
-24 {
-25   rcu_read_lock();
-26   r3 = READ_ONCE(b);
-27   r4 = READ_ONCE(d);
-28   rcu_read_unlock();
-29 }
-</pre>
-</blockquote>
-
-<p>
-Here, if <tt>(r1 == 1)</tt>, then
-<tt>thread0()</tt>'s write to <tt>b</tt> must happen
-before the end of <tt>thread1()</tt>'s grace period.
-If in addition <tt>(r4 == 1)</tt>, then
-<tt>thread3()</tt>'s read from <tt>b</tt> must happen
-after the beginning of <tt>thread2()</tt>'s grace period.
-If it is also the case that <tt>(r2 == 1)</tt>, then the
-end of <tt>thread1()</tt>'s grace period must precede the
-beginning of <tt>thread2()</tt>'s grace period.
-This mean that the two RCU read-side critical sections cannot overlap,
-guaranteeing that <tt>(r3 == 1)</tt>.
-As a result, the outcome:
-
-<blockquote>
-<pre>
-(r1 == 1 &amp;&amp; r2 == 1 &amp;&amp; r3 == 0 &amp;&amp; r4 == 1)
-</pre>
-</blockquote>
-
-cannot happen.
-
-<p>
-This non-requirement was also non-premeditated, but became apparent
-when studying RCU's interaction with memory ordering.
-
-<h3><a name="Read-Side Critical Sections Don't Partition Grace Periods">
-Read-Side Critical Sections Don't Partition Grace Periods</a></h3>
-
-<p>
-It is also tempting to assume that if an RCU read-side critical section
-happens between a pair of grace periods, then those grace periods cannot
-overlap.
-However, this temptation leads nowhere good, as can be illustrated by
-the following, with all variables initially zero:
-
-<blockquote>
-<pre>
- 1 void thread0(void)
- 2 {
- 3   rcu_read_lock();
- 4   WRITE_ONCE(a, 1);
- 5   WRITE_ONCE(b, 1);
- 6   rcu_read_unlock();
- 7 }
- 8
- 9 void thread1(void)
-10 {
-11   r1 = READ_ONCE(a);
-12   synchronize_rcu();
-13   WRITE_ONCE(c, 1);
-14 }
-15
-16 void thread2(void)
-17 {
-18   rcu_read_lock();
-19   WRITE_ONCE(d, 1);
-20   r2 = READ_ONCE(c);
-21   rcu_read_unlock();
-22 }
-23
-24 void thread3(void)
-25 {
-26   r3 = READ_ONCE(d);
-27   synchronize_rcu();
-28   WRITE_ONCE(e, 1);
-29 }
-30
-31 void thread4(void)
-32 {
-33   rcu_read_lock();
-34   r4 = READ_ONCE(b);
-35   r5 = READ_ONCE(e);
-36   rcu_read_unlock();
-37 }
-</pre>
-</blockquote>
-
-<p>
-In this case, the outcome:
-
-<blockquote>
-<pre>
-(r1 == 1 &amp;&amp; r2 == 1 &amp;&amp; r3 == 1 &amp;&amp; r4 == 0 &amp&amp; r5 == 1)
-</pre>
-</blockquote>
-
-is entirely possible, as illustrated below:
-
-<p><img src="ReadersPartitionGP1.svg" alt="ReadersPartitionGP1.svg" width="100%"></p>
-
-<p>
-Again, an RCU read-side critical section can overlap almost all of a
-given grace period, just so long as it does not overlap the entire
-grace period.
-As a result, an RCU read-side critical section cannot partition a pair
-of RCU grace periods.
-
-<p>@@QQ@@
-How long a sequence of grace periods, each separated by an RCU read-side
-critical section, would be required to partition the RCU read-side
-critical sections at the beginning and end of the chain?
-<p>@@QQA@@
-In theory, an infinite number.
-In practice, an unknown number that is sensitive to both implementation
-details and timing considerations.
-Therefore, even in practice, RCU users must abide by the theoretical rather
-than the practical answer.
-<p>@@QQE@@
-
-<h3><a name="Disabling Preemption Does Not Block Grace Periods">
-Disabling Preemption Does Not Block Grace Periods</a></h3>
-
-<p>
-There was a time when disabling preemption on any given CPU would block
-subsequent grace periods.
-However, this was an accident of implementation and is not a requirement.
-And in the current Linux-kernel implementation, disabling preemption
-on a given CPU in fact does not block grace periods, as Oleg Nesterov
-<a href="https://lkml.kernel.org/g/20150614193825.GA19582@redhat.com">demonstrated</a>.
-
-<p>
-If you need a preempt-disable region to block grace periods, you need to add
-<tt>rcu_read_lock()</tt> and <tt>rcu_read_unlock()</tt>, for example
-as follows:
-
-<blockquote>
-<pre>
- 1 preempt_disable();
- 2 rcu_read_lock();
- 3 do_something();
- 4 rcu_read_unlock();
- 5 preempt_enable();
- 6
- 7 /* Spinlocks implicitly disable preemption. */
- 8 spin_lock(&amp;mylock);
- 9 rcu_read_lock();
-10 do_something();
-11 rcu_read_unlock();
-12 spin_unlock(&amp;mylock);
-</pre>
-</blockquote>
-
-<p>
-In theory, you could enter the RCU read-side critical section first,
-but it is more efficient to keep the entire RCU read-side critical
-section contained in the preempt-disable region as shown above.
-Of course, RCU read-side critical sections that extend outside of
-preempt-disable regions will work correctly, but such critical sections
-can be preempted, which forces <tt>rcu_read_unlock()</tt> to do
-more work.
-And no, this is <i>not</i> an invitation to enclose all of your RCU
-read-side critical sections within preempt-disable regions, because
-doing so would degrade real-time response.
-
-<p>
-This non-requirement appeared with preemptible RCU.
-If you need a grace period that waits on non-preemptible code regions, use
-<a href="#Sched Flavor">RCU-sched</a>.
-
-<h2><a name="Parallelism Facts of Life">Parallelism Facts of Life</a></h2>
-
-<p>
-These parallelism facts of life are by no means specific to RCU, but
-the RCU implementation must abide by them.
-They therefore bear repeating:
-
-<ol>
-<li>	Any CPU or task may be delayed at any time,
-	and any attempts to avoid these delays by disabling
-	preemption, interrupts, or whatever are completely futile.
-	This is most obvious in preemptible user-level
-	environments and in virtualized environments (where
-	a given guest OS's VCPUs can be preempted at any time by
-	the underlying hypervisor), but can also happen in bare-metal
-	environments due to ECC errors, NMIs, and other hardware
-	events.
-	Although a delay of more than about 20 seconds can result
-	in splats, the RCU implementation is obligated to use
-	algorithms that can tolerate extremely long delays, but where
-	&ldquo;extremely long&rdquo; is not long enough to allow
-	wrap-around when incrementing a 64-bit counter.
-<li>	Both the compiler and the CPU can reorder memory accesses.
-	Where it matters, RCU must use compiler directives and
-	memory-barrier instructions to preserve ordering.
-<li>	Conflicting writes to memory locations in any given cache line
-	will result in expensive cache misses.
-	Greater numbers of concurrent writes and more-frequent
-	concurrent writes will result in more dramatic slowdowns.
-	RCU is therefore obligated to use algorithms that have
-	sufficient locality to avoid significant performance and
-	scalability problems.
-<li>	As a rough rule of thumb, only one CPU's worth of processing
-	may be carried out under the protection of any given exclusive
-	lock.
-	RCU must therefore use scalable locking designs.
-<li>	Counters are finite, especially on 32-bit systems.
-	RCU's use of counters must therefore tolerate counter wrap,
-	or be designed such that counter wrap would take way more
-	time than a single system is likely to run.
-	An uptime of ten years is quite possible, a runtime
-	of a century much less so.
-	As an example of the latter, RCU's dyntick-idle nesting counter
-	allows 54 bits for interrupt nesting level (this counter
-	is 64 bits even on a 32-bit system).
-	Overflowing this counter requires 2<sup>54</sup>
-	half-interrupts on a given CPU without that CPU ever going idle.
-	If a half-interrupt happened every microsecond, it would take
-	570 years of runtime to overflow this counter, which is currently
-	believed to be an acceptably long time.
-<li>	Linux systems can have thousands of CPUs running a single
-	Linux kernel in a single shared-memory environment.
-	RCU must therefore pay close attention to high-end scalability.
-</ol>
-
-<p>
-This last parallelism fact of life means that RCU must pay special
-attention to the preceding facts of life.
-The idea that Linux might scale to systems with thousands of CPUs would
-have been met with some skepticism in the 1990s, but these requirements
-would have otherwise have been unsurprising, even in the early 1990s.
-
-<h2><a name="Quality-of-Implementation Requirements">Quality-of-Implementation Requirements</a></h2>
-
-<p>
-These sections list quality-of-implementation requirements.
-Although an RCU implementation that ignores these requirements could
-still be used, it would likely be subject to limitations that would
-make it inappropriate for industrial-strength production use.
-Classes of quality-of-implementation requirements are as follows:
-
-<ol>
-<li>	<a href="#Specialization">Specialization</a>
-<li>	<a href="#Performance and Scalability">Performance and Scalability</a>
-<li>	<a href="#Composability">Composability</a>
-<li>	<a href="#Corner Cases">Corner Cases</a>
-</ol>
-
-<p>
-These classes is covered in the following sections.
-
-<h3><a name="Specialization">Specialization</a></h3>
-
-<p>
-RCU is and always has been intended primarily for read-mostly situations, as
-illustrated by the following figure.
-This means that RCU's read-side primitives are optimized, often at the
-expense of its update-side primitives.
-
-<p><img src="RCUApplicability.svg" alt="RCUApplicability.svg" width="70%"></p>
-
-<p>
-This focus on read-mostly situations means that RCU must interoperate
-with other synchronization primitives.
-For example, the <tt>add_gp()</tt> and <tt>remove_gp_synchronous()</tt>
-examples discussed earlier use RCU to protect readers and locking to
-coordinate updaters.
-However, the need extends much farther, requiring that a variety of
-synchronization primitives be legal within RCU read-side critical sections,
-including spinlocks, sequence locks, atomic operations, reference
-counters, and memory barriers.
-
-<p>@@QQ@@
-What about sleeping locks?
-<p>@@QQA@@
-These are forbidden within Linux-kernel RCU read-side critical sections
-because it is not legal to place a quiescent state (in this case,
-voluntary context switch) within an RCU read-side critical section.
-However, sleeping locks may be used within userspace RCU read-side critical
-sections, and also within Linux-kernel sleepable RCU
-<a href="#Sleepable RCU">(SRCU)</a>
-read-side critical sections.
-In addition, the -rt patchset turns spinlocks into a sleeping locks so
-that the corresponding critical sections can be preempted, which
-also means that these sleeplockified spinlocks (but not other sleeping locks!)
-may be acquire within -rt-Linux-kernel RCU read-side critical sections.
-
-<p>
-Note that it <i>is</i> legal for a normal RCU read-side critical section
-to conditionally acquire a sleeping locks (as in <tt>mutex_trylock()</tt>),
-but only as long as it does not loop indefinitely attempting to
-conditionally acquire that sleeping locks.
-The key point is that things like <tt>mutex_trylock()</tt>
-either return with the mutex held, or return an error indication if
-the mutex was not immediately available.
-Either way, <tt>mutex_trylock()</tt> returns immediately without sleeping.
-<p>@@QQE@@
-
-<p>
-It often comes as a surprise that many algorithms do not require a
-consistent view of data, but many can function in that mode,
-with network routing being the poster child.
-Internet routing algorithms take significant time to propagate
-updates, so that by the time an update arrives at a given system,
-that system has been sending network traffic the wrong way for
-a considerable length of time.
-Having a few threads continue to send traffic the wrong way for a
-few more milliseconds is clearly not a problem:  In the worst case,
-TCP retransmissions will eventually get the data where it needs to go.
-In general, when tracking the state of the universe outside of the
-computer, some level of inconsistency must be tolerated due to
-speed-of-light delays if nothing else.
-
-<p>
-Furthermore, uncertainty about external state is inherent in many cases.
-For example, a pair of veternarians might use heartbeat to determine
-whether or not a given cat was alive.
-But how long should they wait after the last heartbeat to decide that
-the cat is in fact dead?
-Waiting less than 400 milliseconds makes no sense because this would
-mean that a relaxed cat would be considered to cycle between death
-and life more than 100 times per minute.
-Moreover, just as with human beings, a cat's heart might stop for
-some period of time, so the exact wait period is a judgment call.
-One of our pair of veternarians might wait 30 seconds before pronouncing
-the cat dead, while the other might insist on waiting a full minute.
-The two veternarians would then disagree on the state of the cat during
-the final 30 seconds of the minute following the last heartbeat, as
-fancifully illustrated below:
-
-<p><img src="2013-08-is-it-dead.png" alt="2013-08-is-it-dead.png" width="431"></p>
-
-<p>
-Interestingly enough, this same situation applies to hardware.
-When push comes to shove, how do we tell whether or not some
-external server has failed?
-We send messages to it periodically, and declare it failed if we
-don't receive a response within a given period of time.
-Policy decisions can usually tolerate short
-periods of inconsistency.
-The policy was decided some time ago, and is only now being put into
-effect, so a few milliseconds of delay is normally inconsequential.
-
-<p>
-However, there are algorithms that absolutely must see consistent data.
-For example, the translation between a user-level SystemV semaphore
-ID to the corresponding in-kernel data structure is protected by RCU,
-but it is absolutely forbidden to update a semaphore that has just been
-removed.
-In the Linux kernel, this need for consistency is accommodated by acquiring
-spinlocks located in the in-kernel data structure from within
-the RCU read-side critical section, and this is indicated by the
-green box in the figure above.
-Many other techniques may be used, and are in fact used within the
-Linux kernel.
-
-<p>
-In short, RCU is not required to maintain consistency, and other
-mechanisms may be used in concert with RCU when consistency is required.
-RCU's specialization allows it to do its job extremely well, and its
-ability to interoperate with other synchronization mechanisms allows
-the right mix of synchronization tools to be used for a given job.
-
-<h3><a name="Performance and Scalability">Performance and Scalability</a></h3>
-
-<p>
-Energy efficiency is a critical component of performance today,
-and Linux-kernel RCU implementations must therefore avoid unnecessarily
-awakening idle CPUs.
-I cannot claim that this requirement was premeditated.
-In fact, I learned of it during a telephone conversation in which I
-was given &ldquo;frank and open&rdquo; feedback on the importance
-of energy efficiency in battery-powered systems and on specific
-energy-efficiency shortcomings of the Linux-kernel RCU implementation.
-In my experience, the battery-powered embedded community will consider
-any unnecessary wakeups to be extremely unfriendly acts.
-So much so that mere Linux-kernel-mailing-list posts are
-insufficient to vent their ire.
-
-<p>
-Memory consumption is not particularly important for in most
-situations, and has become decreasingly
-so as memory sizes have expanded and memory
-costs have plummeted.
-However, as I learned from Matt Mackall's
-<a href="http://elinux.org/Linux_Tiny-FAQ">bloatwatch</a>
-efforts, memory footprint is critically important on single-CPU systems with
-non-preemptible (<tt>CONFIG_PREEMPT=n</tt>) kernels, and thus
-<a href="https://lkml.kernel.org/g/20090113221724.GA15307@linux.vnet.ibm.com">tiny RCU</a>
-was born.
-Josh Triplett has since taken over the small-memory banner with his
-<a href="https://tiny.wiki.kernel.org/">Linux kernel tinification</a>
-project, which resulted in
-<a href="#Sleepable RCU">SRCU</a>
-becoming optional for those kernels not needing it.
-
-<p>
-The remaining performance requirements are, for the most part,
-unsurprising.
-For example, in keeping with RCU's read-side specialization,
-<tt>rcu_dereference()</tt> should have negligible overhead (for
-example, suppression of a few minor compiler optimizations).
-Similarly, in non-preemptible environments, <tt>rcu_read_lock()</tt> and
-<tt>rcu_read_unlock()</tt> should have exactly zero overhead.
-
-<p>
-In preemptible environments, in the case where the RCU read-side
-critical section was not preempted (as will be the case for the
-highest-priority real-time process), <tt>rcu_read_lock()</tt> and
-<tt>rcu_read_unlock()</tt> should have minimal overhead.
-In particular, they should not contain atomic read-modify-write
-operations, memory-barrier instructions, preemption disabling,
-interrupt disabling, or backwards branches.
-However, in the case where the RCU read-side critical section was preempted,
-<tt>rcu_read_unlock()</tt> may acquire spinlocks and disable interrupts.
-This is why it is better to nest an RCU read-side critical section
-within a preempt-disable region than vice versa, at least in cases
-where that critical section is short enough to avoid unduly degrading
-real-time latencies.
-
-<p>
-The <tt>synchronize_rcu()</tt> grace-period-wait primitive is
-optimized for throughput.
-It may therefore incur several milliseconds of latency in addition to
-the duration of the longest RCU read-side critical section.
-On the other hand, multiple concurrent invocations of
-<tt>synchronize_rcu()</tt> are required to use batching optimizations
-so that they can be satisfied by a single underlying grace-period-wait
-operation.
-For example, in the Linux kernel, it is not unusual for a single
-grace-period-wait operation to serve more than
-<a href="https://www.usenix.org/conference/2004-usenix-annual-technical-conference/making-rcu-safe-deep-sub-millisecond-response">1,000 separate invocations</a>
-of <tt>synchronize_rcu()</tt>, thus amortizing the per-invocation
-overhead down to nearly zero.
-However, the grace-period optimization is also required to avoid
-measurable degradation of real-time scheduling and interrupt latencies.
-
-<p>
-In some cases, the multi-millisecond <tt>synchronize_rcu()</tt>
-latencies are unacceptable.
-In these cases, <tt>synchronize_rcu_expedited()</tt> may be used
-instead, reducing the grace-period latency down to a few tens of
-microseconds on small systems, at least in cases where the RCU read-side
-critical sections are short.
-There are currently no special latency requirements for
-<tt>synchronize_rcu_expedited()</tt> on large systems, but,
-consistent with the empirical nature of the RCU specification,
-that is subject to change.
-However, there most definitely are scalability requirements:
-A storm of <tt>synchronize_rcu_expedited()</tt> invocations on 4096
-CPUs should at least make reasonable forward progress.
-In return for its shorter latencies, <tt>synchronize_rcu_expedited()</tt>
-is permitted to impose modest degradation of real-time latency
-on non-idle online CPUs.
-That said, it will likely be necessary to take further steps to reduce this
-degradation, hopefully to roughly that of a scheduling-clock interrupt.
-
-<p>
-There are a number of situations where even
-<tt>synchronize_rcu_expedited()</tt>'s reduced grace-period
-latency is unacceptable.
-In these situations, the asynchronous <tt>call_rcu()</tt> can be
-used in place of <tt>synchronize_rcu()</tt> as follows:
-
-<blockquote>
-<pre>
- 1 struct foo {
- 2   int a;
- 3   int b;
- 4   struct rcu_head rh;
- 5 };
- 6
- 7 static void remove_gp_cb(struct rcu_head *rhp)
- 8 {
- 9   struct foo *p = container_of(rhp, struct foo, rh);
-10
-11   kfree(p);
-12 }
-13
-14 bool remove_gp_asynchronous(void)
-15 {
-16   struct foo *p;
-17
-18   spin_lock(&amp;gp_lock);
-19   p = rcu_dereference(gp);
-20   if (!p) {
-21     spin_unlock(&amp;gp_lock);
-22     return false;
-23   }
-24   rcu_assign_pointer(gp, NULL);
-25   call_rcu(&amp;p-&gt;rh, remove_gp_cb);
-26   spin_unlock(&amp;gp_lock);
-27   return true;
-28 }
-</pre>
-</blockquote>
-
-<p>
-A definition of <tt>struct foo</tt> is finally needed, and appears
-on lines&nbsp;1-5.
-The function <tt>remove_gp_cb()</tt> is passed to <tt>call_rcu()</tt>
-on line&nbsp;25, and will be invoked after the end of a subsequent
-grace period.
-This gets the same effect as <tt>remove_gp_synchronous()</tt>,
-but without forcing the updater to wait for a grace period to elapse.
-The <tt>call_rcu()</tt> function may be used in a number of
-situations where neither <tt>synchronize_rcu()</tt> nor
-<tt>synchronize_rcu_expedited()</tt> would be legal,
-including within preempt-disable code, <tt>local_bh_disable()</tt> code,
-interrupt-disable code, and interrupt handlers.
-However, even <tt>call_rcu()</tt> is illegal within NMI handlers.
-The callback function (<tt>remove_gp_cb()</tt> in this case) will be
-executed within softirq (software interrupt) environment within the
-Linux kernel,
-either within a real softirq handler or under the protection
-of <tt>local_bh_disable()</tt>.
-In both the Linux kernel and in userspace, it is bad practice to
-write an RCU callback function that takes too long.
-Long-running operations should be relegated to separate threads or
-(in the Linux kernel) workqueues.
-
-<p>@@QQ@@
-Why does line&nbsp;19 use <tt>rcu_access_pointer()</tt>?
-After all, <tt>call_rcu()</tt> on line&nbsp;25 stores into the
-structure, which would interact badly with concurrent insertions.
-Doesn't this mean that <tt>rcu_dereference()</tt> is required?
-<p>@@QQA@@
-Presumably the <tt>-&gt;gp_lock</tt> acquired on line&nbsp;18 excludes
-any changes, including any insertions that <tt>rcu_dereference()</tt>
-would protect against.
-Therefore, any insertions will be delayed until after <tt>-&gt;gp_lock</tt>
-is released on line&nbsp;25, which in turn means that
-<tt>rcu_access_pointer()</tt> suffices.
-<p>@@QQE@@
-
-<p>
-However, all that <tt>remove_gp_cb()</tt> is doing is
-invoking <tt>kfree()</tt> on the data element.
-This is a common idiom, and is supported by <tt>kfree_rcu()</tt>,
-which allows &ldquo;fire and forget&rdquo; operation as shown below:
-
-<blockquote>
-<pre>
- 1 struct foo {
- 2   int a;
- 3   int b;
- 4   struct rcu_head rh;
- 5 };
- 6
- 7 bool remove_gp_faf(void)
- 8 {
- 9   struct foo *p;
-10
-11   spin_lock(&amp;gp_lock);
-12   p = rcu_dereference(gp);
-13   if (!p) {
-14     spin_unlock(&amp;gp_lock);
-15     return false;
-16   }
-17   rcu_assign_pointer(gp, NULL);
-18   kfree_rcu(p, rh);
-19   spin_unlock(&amp;gp_lock);
-20   return true;
-21 }
-</pre>
-</blockquote>
-
-<p>
-Note that <tt>remove_gp_faf()</tt> simply invokes
-<tt>kfree_rcu()</tt> and proceeds, without any need to pay any
-further attention to the subsequent grace period and <tt>kfree()</tt>.
-It is permissible to invoke <tt>kfree_rcu()</tt> from the same
-environments as for <tt>call_rcu()</tt>.
-Interestingly enough, DYNIX/ptx had the equivalents of
-<tt>call_rcu()</tt> and <tt>kfree_rcu()</tt>, but not
-<tt>synchronize_rcu()</tt>.
-This was due to the fact that RCU was not heavily used within DYNIX/ptx,
-so the very few places that needed something like
-<tt>synchronize_rcu()</tt> simply open-coded it.
-
-<p>@@QQ@@
-Earlier it was claimed that <tt>call_rcu()</tt> and
-<tt>kfree_rcu()</tt> allowed updaters to avoid being blocked
-by readers.
-But how can that be correct, given that the invocation of the callback
-and the freeing of the memory (respectively) must still wait for
-a grace period to elapse?
-<p>@@QQA@@
-We could define things this way, but keep in mind that this sort of
-definition would say that updates in garbage-collected languages
-cannot complete until the next time the garbage collector runs,
-which does not seem at all reasonable.
-The key point is that in most cases, an updater using either
-<tt>call_rcu()</tt> or <tt>kfree_rcu()</tt> can proceed to the
-next update as soon as it has invoked <tt>call_rcu()</tt> or
-<tt>kfree_rcu()</tt>, without having to wait for a subsequent
-grace period.
-<p>@@QQE@@
-
-<p>
-But what if the updater must wait for the completion of code to be
-executed after the end of the grace period, but has other tasks
-that can be carried out in the meantime?
-The polling-style <tt>get_state_synchronize_rcu()</tt> and
-<tt>cond_synchronize_rcu()</tt> functions may be used for this
-purpose, as shown below:
-
-<blockquote>
-<pre>
- 1 bool remove_gp_poll(void)
- 2 {
- 3   struct foo *p;
- 4   unsigned long s;
- 5
- 6   spin_lock(&amp;gp_lock);
- 7   p = rcu_access_pointer(gp);
- 8   if (!p) {
- 9     spin_unlock(&amp;gp_lock);
-10     return false;
-11   }
-12   rcu_assign_pointer(gp, NULL);
-13   spin_unlock(&amp;gp_lock);
-14   s = get_state_synchronize_rcu();
-15   do_something_while_waiting();
-16   cond_synchronize_rcu(s);
-17   kfree(p);
-18   return true;
-19 }
-</pre>
-</blockquote>
-
-<p>
-On line&nbsp;14, <tt>get_state_synchronize_rcu()</tt> obtains a
-&ldquo;cookie&rdquo; from RCU,
-then line&nbsp;15 carries out other tasks,
-and finally, line&nbsp;16 returns immediately if a grace period has
-elapsed in the meantime, but otherwise waits as required.
-The need for <tt>get_state_synchronize_rcu</tt> and
-<tt>cond_synchronize_rcu()</tt> has appeared quite recently,
-so it is too early to tell whether they will stand the test of time.
-
-<p>
-RCU thus provides a range of tools to allow updaters to strike the
-required tradeoff between latency, flexibility and CPU overhead.
-
-<h3><a name="Composability">Composability</a></h3>
-
-<p>
-Composability has received much attention in recent years, perhaps in part
-due to the collision of multicore hardware with object-oriented techniques
-designed in single-threaded environments for single-threaded use.
-And in theory, RCU read-side critical sections may be composed, and in
-fact may be nested arbitrarily deeply.
-In practice, as with all real-world implementations of composable
-constructs, there are limitations.
-
-<p>
-Implementations of RCU for which <tt>rcu_read_lock()</tt>
-and <tt>rcu_read_unlock()</tt> generate no code, such as
-Linux-kernel RCU when <tt>CONFIG_PREEMPT=n</tt>, can be
-nested arbitrarily deeply.
-After all, there is no overhead.
-Except that if all these instances of <tt>rcu_read_lock()</tt>
-and <tt>rcu_read_unlock()</tt> are visible to the compiler,
-compilation will eventually fail due to exhausting memory,
-mass storage, or user patience, whichever comes first.
-If the nesting is not visible to the compiler, as is the case with
-mutually recursive functions each in its own translation unit,
-stack overflow will result.
-If the nesting takes the form of loops, either the control variable
-will overflow or (in the Linux kernel) you will get an RCU CPU stall warning.
-Nevertheless, this class of RCU implementations is one
-of the most composable constructs in existence.
-
-<p>
-RCU implementations that explicitly track nesting depth
-are limited by the nesting-depth counter.
-For example, the Linux kernel's preemptible RCU limits nesting to
-<tt>INT_MAX</tt>.
-This should suffice for almost all practical purposes.
-That said, a consecutive pair of RCU read-side critical sections
-between which there is an operation that waits for a grace period
-cannot be enclosed in another RCU read-side critical section.
-This is because it is not legal to wait for a grace period within
-an RCU read-side critical section:  To do so would result either
-in deadlock or
-in RCU implicitly splitting the enclosing RCU read-side critical
-section, neither of which is conducive to a long-lived and prosperous
-kernel.
-
-<p>
-It is worth noting that RCU is not alone in limiting composability.
-For example, many transactional-memory implementations prohibit
-composing a pair of transactions separated by an irrevocable
-operation (for example, a network receive operation).
-For another example, lock-based critical sections can be composed
-surprisingly freely, but only if deadlock is avoided.
-
-<p>
-In short, although RCU read-side critical sections are highly composable,
-care is required in some situations, just as is the case for any other
-composable synchronization mechanism.
-
-<h3><a name="Corner Cases">Corner Cases</a></h3>
-
-<p>
-A given RCU workload might have an endless and intense stream of
-RCU read-side critical sections, perhaps even so intense that there
-was never a point in time during which there was not at least one
-RCU read-side critical section in flight.
-RCU cannot allow this situation to block grace periods:  As long as
-all the RCU read-side critical sections are finite, grace periods
-must also be finite.
-
-<p>
-That said, preemptible RCU implementations could potentially result
-in RCU read-side critical sections being preempted for long durations,
-which has the effect of creating a long-duration RCU read-side
-critical section.
-This situation can arise only in heavily loaded systems, but systems using
-real-time priorities are of course more vulnerable.
-Therefore, RCU priority boosting is provided to help deal with this
-case.
-That said, the exact requirements on RCU priority boosting will likely
-evolve as more experience accumulates.
-
-<p>
-Other workloads might have very high update rates.
-Although one can argue that such workloads should instead use
-something other than RCU, the fact remains that RCU must
-handle such workloads gracefully.
-This requirement is another factor driving batching of grace periods,
-but it is also the driving force behind the checks for large numbers
-of queued RCU callbacks in the <tt>call_rcu()</tt> code path.
-Finally, high update rates should not delay RCU read-side critical
-sections, although some read-side delays can occur when using
-<tt>synchronize_rcu_expedited()</tt>, courtesy of this function's use
-of <tt>try_stop_cpus()</tt>.
-(In the future, <tt>synchronize_rcu_expedited()</tt> will be
-converted to use lighter-weight inter-processor interrupts (IPIs),
-but this will still disturb readers, though to a much smaller degree.)
-
-<p>
-Although all three of these corner cases were understood in the early
-1990s, a simple user-level test consisting of <tt>close(open(path))</tt>
-in a tight loop
-in the early 2000s suddenly provided a much deeper appreciation of the
-high-update-rate corner case.
-This test also motivated addition of some RCU code to react to high update
-rates, for example, if a given CPU finds itself with more than 10,000
-RCU callbacks queued, it will cause RCU to take evasive action by
-more aggressively starting grace periods and more aggressively forcing
-completion of grace-period processing.
-This evasive action causes the grace period to complete more quickly,
-but at the cost of restricting RCU's batching optimizations, thus
-increasing the CPU overhead incurred by that grace period.
-
-<h2><a name="Software-Engineering Requirements">
-Software-Engineering Requirements</a></h2>
-
-<p>
-Between Murphy's Law and &ldquo;To err is human&rdquo;, it is necessary to
-guard against mishaps and misuse:
-
-<ol>
-<li>	It is all too easy to forget to use <tt>rcu_read_lock()</tt>
-	everywhere that it is needed, so kernels built with
-	<tt>CONFIG_PROVE_RCU=y</tt> will spat if
-	<tt>rcu_dereference()</tt> is used outside of an
-	RCU read-side critical section.
-	Update-side code can use <tt>rcu_dereference_protected()</tt>,
-	which takes a
-	<a href="https://lwn.net/Articles/371986/">lockdep expression</a>
-	to indicate what is providing the protection.
-	If the indicated protection is not provided, a lockdep splat
-	is emitted.
-
-	<p>
-	Code shared between readers and updaters can use
-	<tt>rcu_dereference_check()</tt>, which also takes a
-	lockdep expression, and emits a lockdep splat if neither
-	<tt>rcu_read_lock()</tt> nor the indicated protection
-	is in place.
-	In addition, <tt>rcu_dereference_raw()</tt> is used in those
-	(hopefully rare) cases where the required protection cannot
-	be easily described.
-	Finally, <tt>rcu_read_lock_held()</tt> is provided to
-	allow a function to verify that it has been invoked within
-	an RCU read-side critical section.
-	I was made aware of this set of requirements shortly after Thomas
-	Gleixner audited a number of RCU uses.
-<li>	A given function might wish to check for RCU-related preconditions
-	upon entry, before using any other RCU API.
-	The <tt>rcu_lockdep_assert()</tt> does this job,
-	asserting the expression in kernels having lockdep enabled
-	and doing nothing otherwise.
-<li>	It is also easy to forget to use <tt>rcu_assign_pointer()</tt>
-	and <tt>rcu_dereference()</tt>, perhaps (incorrectly)
-	substituting a simple assignment.
-	To catch this sort of error, a given RCU-protected pointer may be
-	tagged with <tt>__rcu</tt>, after which running sparse
-	with <tt>CONFIG_SPARSE_RCU_POINTER=y</tt> will complain
-	about simple-assignment accesses to that pointer.
-	Arnd Bergmann made me aware of this requirement, and also
-	supplied the needed
-	<a href="https://lwn.net/Articles/376011/">patch series</a>.
-<li>	Kernels built with <tt>CONFIG_DEBUG_OBJECTS_RCU_HEAD=y</tt>
-	will splat if a data element is passed to <tt>call_rcu()</tt>
-	twice in a row, without a grace period in between.
-	(This error is similar to a double free.)
-	The corresponding <tt>rcu_head</tt> structures that are
-	dynamically allocated are automatically tracked, but
-	<tt>rcu_head</tt> structures allocated on the stack
-	must be initialized with <tt>init_rcu_head_on_stack()</tt>
-	and cleaned up with <tt>destroy_rcu_head_on_stack()</tt>.
-	Similarly, statically allocated non-stack <tt>rcu_head</tt>
-	structures must be initialized with <tt>init_rcu_head()</tt>
-	and cleaned up with <tt>destroy_rcu_head()</tt>.
-	Mathieu Desnoyers made me aware of this requirement, and also
-	supplied the needed
-	<a href="https://lkml.kernel.org/g/20100319013024.GA28456@Krystal">patch</a>.
-<li>	An infinite loop in an RCU read-side critical section will
-	eventually trigger an RCU CPU stall warning splat, with
-	the duration of &ldquo;eventually&rdquo; being controlled by the
-	<tt>RCU_CPU_STALL_TIMEOUT</tt> <tt>Kconfig</tt> option, or,
-	alternatively, by the
-	<tt>rcupdate.rcu_cpu_stall_timeout</tt> boot/sysfs
-	parameter.
-	However, RCU is not obligated to produce this splat
-	unless there is a grace period waiting on that particular
-	RCU read-side critical section.
-	<p>
-	Some extreme workloads might intentionally delay
-	RCU grace periods, and systems running those workloads can
-	be booted with <tt>rcupdate.rcu_cpu_stall_suppress</tt>
-	to suppress the splats.
-	This kernel parameter may also be set via <tt>sysfs</tt>.
-	Furthermore, RCU CPU stall warnings are counter-productive
-	during sysrq dumps and during panics.
-	RCU therefore supplies the <tt>rcu_sysrq_start()</tt> and
-	<tt>rcu_sysrq_end()</tt> API members to be called before
-	and after long sysrq dumps.
-	RCU also supplies the <tt>rcu_panic()</tt> notifier that is
-	automatically invoked at the beginning of a panic to suppress
-	further RCU CPU stall warnings.
-
-	<p>
-	This requirement made itself known in the early 1990s, pretty
-	much the first time that it was necessary to debug a CPU stall.
-	That said, the initial implementation in DYNIX/ptx was quite
-	generic in comparison with that of Linux.
-<li>	Although it would be very good to detect pointers leaking out
-	of RCU read-side critical sections, there is currently no
-	good way of doing this.
-	One complication is the need to distinguish between pointers
-	leaking and pointers that have been handed off from RCU to
-	some other synchronization mechanism, for example, reference
-	counting.
-<li>	In kernels built with <tt>CONFIG_RCU_TRACE=y</tt>, RCU-related
-	information is provided via both debugfs and event tracing.
-<li>	Open-coded use of <tt>rcu_assign_pointer()</tt> and
-	<tt>rcu_dereference()</tt> to create typical linked
-	data structures can be surprisingly error-prone.
-	Therefore, RCU-protected
-	<a href="https://lwn.net/Articles/609973/#RCU List APIs">linked lists</a>
-	and, more recently, RCU-protected
-	<a href="https://lwn.net/Articles/612100/">hash tables</a>
-	are available.
-	Many other special-purpose RCU-protected data structures are
-	available in the Linux kernel and the userspace RCU library.
-<li>	Some linked structures are created at compile time, but still
-	require <tt>__rcu</tt> checking.
-	The <tt>RCU_POINTER_INITIALIZER()</tt> macro serves this
-	purpose.
-<li>	It is not necessary to use <tt>rcu_assign_pointer()</tt>
-	when creating linked structures that are to be published via
-	a single external pointer.
-	The <tt>RCU_INIT_POINTER()</tt> macro is provided for
-	this task and also for assigning <tt>NULL</tt> pointers
-	at runtime.
-</ol>
-
-<p>
-This not a hard-and-fast list:  RCU's diagnostic capabilities will
-continue to be guided by the number and type of usage bugs found
-in real-world RCU usage.
-
-<h2><a name="Linux Kernel Complications">Linux Kernel Complications</a></h2>
-
-<p>
-The Linux kernel provides an interesting environment for all kinds of
-software, including RCU.
-Some of the relevant points of interest are as follows:
-
-<ol>
-<li>	<a href="#Configuration">Configuration</a>.
-<li>	<a href="#Firmware Interface">Firmware Interface</a>.
-<li>	<a href="#Early Boot">Early Boot</a>.
-<li>	<a href="#Interrupts and NMIs">
-	Interrupts and non-maskable interrupts (NMIs)</a>.
-<li>	<a href="#Loadable Modules">Loadable Modules</a>.
-<li>	<a href="#Hotplug CPU">Hotplug CPU</a>.
-<li>	<a href="#Scheduler and RCU">Scheduler and RCU</a>.
-<li>	<a href="#Tracing and RCU">Tracing and RCU</a>.
-<li>	<a href="#Energy Efficiency">Energy Efficiency</a>.
-<li>	<a href="#Memory Efficiency">Memory Efficiency</a>.
-<li>	<a href="#Performance, Scalability, Response Time, and Reliability">
-	Performance, Scalability, Response Time, and Reliability</a>.
-</ol>
-
-<p>
-This list is probably incomplete, but it does give a feel for the
-most notable Linux-kernel complications.
-Each of the following sections covers one of the above topics.
-
-<h3><a name="Configuration">Configuration</a></h3>
-
-<p>
-RCU's goal is automatic configuration, so that almost nobody
-needs to worry about RCU's <tt>Kconfig</tt> options.
-And for almost all users, RCU does in fact work well
-&ldquo;out of the box.&rdquo;
-
-<p>
-However, there are specialized use cases that are handled by
-kernel boot parameters and <tt>Kconfig</tt> options.
-Unfortunately, the <tt>Kconfig</tt> system will explicitly ask users
-about new <tt>Kconfig</tt> options, which requires almost all of them
-be hidden behind a <tt>CONFIG_RCU_EXPERT</tt> <tt>Kconfig</tt> option.
-
-<p>
-This all should be quite obvious, but the fact remains that
-Linus Torvalds recently had to
-<a href="https://lkml.kernel.org/g/CA+55aFy4wcCwaL4okTs8wXhGZ5h-ibecy_Meg9C4MNQrUnwMcg@mail.gmail.com">remind</a>
-me of this requirement.
-
-<h3><a name="Firmware Interface">Firmware Interface</a></h3>
-
-<p>
-In many cases, kernel obtains information about the system from the
-firmware, and sometimes things are lost in translation.
-Or the translation is accurate, but the original message is bogus.
-
-<p>
-For example, some systems' firmware overreports the number of CPUs,
-sometimes by a large factor.
-If RCU naively believed the firmware, as it used to do,
-it would create too many per-CPU kthreads.
-Although the resulting system will still run correctly, the extra
-kthreads needlessly consume memory and can cause confusion
-when they show up in <tt>ps</tt> listings.
-
-<p>
-RCU must therefore wait for a given CPU to actually come online before
-it can allow itself to believe that the CPU actually exists.
-The resulting &ldquo;ghost CPUs&rdquo; (which are never going to
-come online) cause a number of
-<a href="https://paulmck.livejournal.com/37494.html">interesting complications</a>.
-
-<h3><a name="Early Boot">Early Boot</a></h3>
-
-<p>
-The Linux kernel's boot sequence is an interesting process,
-and RCU is used early, even before <tt>rcu_init()</tt>
-is invoked.
-In fact, a number of RCU's primitives can be used as soon as the
-initial task's <tt>task_struct</tt> is available and the
-boot CPU's per-CPU variables are set up.
-The read-side primitives (<tt>rcu_read_lock()</tt>,
-<tt>rcu_read_unlock()</tt>, <tt>rcu_dereference()</tt>,
-and <tt>rcu_access_pointer()</tt>) will operate normally very early on,
-as will <tt>rcu_assign_pointer()</tt>.
-
-<p>
-Although <tt>call_rcu()</tt> may be invoked at any
-time during boot, callbacks are not guaranteed to be invoked until after
-the scheduler is fully up and running.
-This delay in callback invocation is due to the fact that RCU does not
-invoke callbacks until it is fully initialized, and this full initialization
-cannot occur until after the scheduler has initialized itself to the
-point where RCU can spawn and run its kthreads.
-In theory, it would be possible to invoke callbacks earlier,
-however, this is not a panacea because there would be severe restrictions
-on what operations those callbacks could invoke.
-
-<p>
-Perhaps surprisingly, <tt>synchronize_rcu()</tt>,
-<a href="#Bottom-Half Flavor"><tt>synchronize_rcu_bh()</tt></a>
-(<a href="#Bottom-Half Flavor">discussed below</a>),
-and
-<a href="#Sched Flavor"><tt>synchronize_sched()</tt></a>
-will all operate normally
-during very early boot, the reason being that there is only one CPU
-and preemption is disabled.
-This means that the call <tt>synchronize_rcu()</tt> (or friends)
-itself is a quiescent
-state and thus a grace period, so the early-boot implementation can
-be a no-op.
-
-<p>
-Both <tt>synchronize_rcu_bh()</tt> and <tt>synchronize_sched()</tt>
-continue to operate normally through the remainder of boot, courtesy
-of the fact that preemption is disabled across their RCU read-side
-critical sections and also courtesy of the fact that there is still
-only one CPU.
-However, once the scheduler starts initializing, preemption is enabled.
-There is still only a single CPU, but the fact that preemption is enabled
-means that the no-op implementation of <tt>synchronize_rcu()</tt> no
-longer works in <tt>CONFIG_PREEMPT=y</tt> kernels.
-Therefore, as soon as the scheduler starts initializing, the early-boot
-fastpath is disabled.
-This means that <tt>synchronize_rcu()</tt> switches to its runtime
-mode of operation where it posts callbacks, which in turn means that
-any call to <tt>synchronize_rcu()</tt> will block until the corresponding
-callback is invoked.
-Unfortunately, the callback cannot be invoked until RCU's runtime
-grace-period machinery is up and running, which cannot happen until
-the scheduler has initialized itself sufficiently to allow RCU's
-kthreads to be spawned.
-Therefore, invoking <tt>synchronize_rcu()</tt> during scheduler
-initialization can result in deadlock.
-
-<p>@@QQ@@
-So what happens with <tt>synchronize_rcu()</tt> during
-scheduler initialization for <tt>CONFIG_PREEMPT=n</tt>
-kernels?
-<p>@@QQA@@
-In <tt>CONFIG_PREEMPT=n</tt> kernel, <tt>synchronize_rcu()</tt>
-maps directly to <tt>synchronize_sched()</tt>.
-Therefore, <tt>synchronize_rcu()</tt> works normally throughout
-boot in <tt>CONFIG_PREEMPT=n</tt> kernels.
-However, your code must also work in <tt>CONFIG_PREEMPT=y</tt> kernels,
-so it is still necessary to avoid invoking <tt>synchronize_rcu()</tt>
-during scheduler initialization.
-<p>@@QQE@@
-
-<p>
-I learned of these boot-time requirements as a result of a series of
-system hangs.
-
-<h3><a name="Interrupts and NMIs">Interrupts and NMIs</a></h3>
-
-<p>
-The Linux kernel has interrupts, and RCU read-side critical sections are
-legal within interrupt handlers and within interrupt-disabled regions
-of code, as are invocations of <tt>call_rcu()</tt>.
-
-<p>
-Some Linux-kernel architectures can enter an interrupt handler from
-non-idle process context, and then just never leave it, instead stealthily
-transitioning back to process context.
-This trick is sometimes used to invoke system calls from inside the kernel.
-These &ldquo;half-interrupts&rdquo; mean that RCU has to be very careful
-about how it counts interrupt nesting levels.
-I learned of this requirement the hard way during a rewrite
-of RCU's dyntick-idle code.
-
-<p>
-The Linux kernel has non-maskable interrupts (NMIs), and
-RCU read-side critical sections are legal within NMI handlers.
-Thankfully, RCU update-side primitives, including
-<tt>call_rcu()</tt>, are prohibited within NMI handlers.
-
-<p>
-The name notwithstanding, some Linux-kernel architectures
-can have nested NMIs, which RCU must handle correctly.
-Andy Lutomirski
-<a href="https://lkml.kernel.org/g/CALCETrXLq1y7e_dKFPgou-FKHB6Pu-r8+t-6Ds+8=va7anBWDA@mail.gmail.com">surprised me</a>
-with this requirement;
-he also kindly surprised me with
-<a href="https://lkml.kernel.org/g/CALCETrXSY9JpW3uE6H8WYk81sg56qasA2aqmjMPsq5dOtzso=g@mail.gmail.com">an algorithm</a>
-that meets this requirement.
-
-<h3><a name="Loadable Modules">Loadable Modules</a></h3>
-
-<p>
-The Linux kernel has loadable modules, and these modules can
-also be unloaded.
-After a given module has been unloaded, any attempt to call
-one of its functions results in a segmentation fault.
-The module-unload functions must therefore cancel any
-delayed calls to loadable-module functions, for example,
-any outstanding <tt>mod_timer()</tt> must be dealt with
-via <tt>del_timer_sync()</tt> or similar.
-
-<p>
-Unfortunately, there is no way to cancel an RCU callback;
-once you invoke <tt>call_rcu()</tt>, the callback function is
-going to eventually be invoked, unless the system goes down first.
-Because it is normally considered socially irresponsible to crash the system
-in response to a module unload request, we need some other way
-to deal with in-flight RCU callbacks.
-
-<p>
-RCU therefore provides
-<tt><a href="https://lwn.net/Articles/217484/">rcu_barrier()</a></tt>,
-which waits until all in-flight RCU callbacks have been invoked.
-If a module uses <tt>call_rcu()</tt>, its exit function should therefore
-prevent any future invocation of <tt>call_rcu()</tt>, then invoke
-<tt>rcu_barrier()</tt>.
-In theory, the underlying module-unload code could invoke
-<tt>rcu_barrier()</tt> unconditionally, but in practice this would
-incur unacceptable latencies.
-
-<p>
-Nikita Danilov noted this requirement for an analogous filesystem-unmount
-situation, and Dipankar Sarma incorporated <tt>rcu_barrier()</tt> into RCU.
-The need for <tt>rcu_barrier()</tt> for module unloading became
-apparent later.
-
-<h3><a name="Hotplug CPU">Hotplug CPU</a></h3>
-
-<p>
-The Linux kernel supports CPU hotplug, which means that CPUs
-can come and go.
-It is of course illegal to use any RCU API member from an offline CPU.
-This requirement was present from day one in DYNIX/ptx, but
-on the other hand, the Linux kernel's CPU-hotplug implementation
-is &ldquo;interesting.&rdquo;
-
-<p>
-The Linux-kernel CPU-hotplug implementation has notifiers that
-are used to allow the various kernel subsystems (including RCU)
-to respond appropriately to a given CPU-hotplug operation.
-Most RCU operations may be invoked from CPU-hotplug notifiers,
-including even normal synchronous grace-period operations
-such as <tt>synchronize_rcu()</tt>.
-However, expedited grace-period operations such as
-<tt>synchronize_rcu_expedited()</tt> are not supported,
-due to the fact that current implementations block CPU-hotplug
-operations, which could result in deadlock.
-
-<p>
-In addition, all-callback-wait operations such as
-<tt>rcu_barrier()</tt> are also not supported, due to the
-fact that there are phases of CPU-hotplug operations where
-the outgoing CPU's callbacks will not be invoked until after
-the CPU-hotplug operation ends, which could also result in deadlock.
-
-<h3><a name="Scheduler and RCU">Scheduler and RCU</a></h3>
-
-<p>
-RCU depends on the scheduler, and the scheduler uses RCU to
-protect some of its data structures.
-This means the scheduler is forbidden from acquiring
-the runqueue locks and the priority-inheritance locks
-in the middle of an outermost RCU read-side critical section unless either
-(1)&nbsp;it releases them before exiting that same
-RCU read-side critical section, or
-(2)&nbsp;interrupts are disabled across
-that entire RCU read-side critical section.
-This same prohibition also applies (recursively!) to any lock that is acquired
-while holding any lock to which this prohibition applies.
-Adhering to this rule prevents preemptible RCU from invoking
-<tt>rcu_read_unlock_special()</tt> while either runqueue or
-priority-inheritance locks are held, thus avoiding deadlock.
-
-<p>
-Prior to v4.4, it was only necessary to disable preemption across
-RCU read-side critical sections that acquired scheduler locks.
-In v4.4, expedited grace periods started using IPIs, and these
-IPIs could force a <tt>rcu_read_unlock()</tt> to take the slowpath.
-Therefore, this expedited-grace-period change required disabling of
-interrupts, not just preemption.
-
-<p>
-For RCU's part, the preemptible-RCU <tt>rcu_read_unlock()</tt>
-implementation must be written carefully to avoid similar deadlocks.
-In particular, <tt>rcu_read_unlock()</tt> must tolerate an
-interrupt where the interrupt handler invokes both
-<tt>rcu_read_lock()</tt> and <tt>rcu_read_unlock()</tt>.
-This possibility requires <tt>rcu_read_unlock()</tt> to use
-negative nesting levels to avoid destructive recursion via
-interrupt handler's use of RCU.
-
-<p>
-This pair of mutual scheduler-RCU requirements came as a
-<a href="https://lwn.net/Articles/453002/">complete surprise</a>.
-
-<p>
-As noted above, RCU makes use of kthreads, and it is necessary to
-avoid excessive CPU-time accumulation by these kthreads.
-This requirement was no surprise, but RCU's violation of it
-when running context-switch-heavy workloads when built with
-<tt>CONFIG_NO_HZ_FULL=y</tt>
-<a href="http://www.rdrop.com/users/paulmck/scalability/paper/BareMetal.2015.01.15b.pdf">did come as a surprise [PDF]</a>.
-RCU has made good progress towards meeting this requirement, even
-for context-switch-have <tt>CONFIG_NO_HZ_FULL=y</tt> workloads,
-but there is room for further improvement.
-
-<h3><a name="Tracing and RCU">Tracing and RCU</a></h3>
-
-<p>
-It is possible to use tracing on RCU code, but tracing itself
-uses RCU.
-For this reason, <tt>rcu_dereference_raw_notrace()</tt>
-is provided for use by tracing, which avoids the destructive
-recursion that could otherwise ensue.
-This API is also used by virtualization in some architectures,
-where RCU readers execute in environments in which tracing
-cannot be used.
-The tracing folks both located the requirement and provided the
-needed fix, so this surprise requirement was relatively painless.
-
-<h3><a name="Energy Efficiency">Energy Efficiency</a></h3>
-
-<p>
-Interrupting idle CPUs is considered socially unacceptable,
-especially by people with battery-powered embedded systems.
-RCU therefore conserves energy by detecting which CPUs are
-idle, including tracking CPUs that have been interrupted from idle.
-This is a large part of the energy-efficiency requirement,
-so I learned of this via an irate phone call.
-
-<p>
-Because RCU avoids interrupting idle CPUs, it is illegal to
-execute an RCU read-side critical section on an idle CPU.
-(Kernels built with <tt>CONFIG_PROVE_RCU=y</tt> will splat
-if you try it.)
-The <tt>RCU_NONIDLE()</tt> macro and <tt>_rcuidle</tt>
-event tracing is provided to work around this restriction.
-In addition, <tt>rcu_is_watching()</tt> may be used to
-test whether or not it is currently legal to run RCU read-side
-critical sections on this CPU.
-I learned of the need for diagnostics on the one hand
-and <tt>RCU_NONIDLE()</tt> on the other while inspecting
-idle-loop code.
-Steven Rostedt supplied <tt>_rcuidle</tt> event tracing,
-which is used quite heavily in the idle loop.
-
-<p>
-It is similarly socially unacceptable to interrupt an
-<tt>nohz_full</tt> CPU running in userspace.
-RCU must therefore track <tt>nohz_full</tt> userspace
-execution.
-And in
-<a href="https://lwn.net/Articles/558284/"><tt>CONFIG_NO_HZ_FULL_SYSIDLE=y</tt></a>
-kernels, RCU must separately track idle CPUs on the one hand and
-CPUs that are either idle or executing in userspace on the other.
-In both cases, RCU must be able to sample state at two points in
-time, and be able to determine whether or not some other CPU spent
-any time idle and/or executing in userspace.
-
-<p>
-These energy-efficiency requirements have proven quite difficult to
-understand and to meet, for example, there have been more than five
-clean-sheet rewrites of RCU's energy-efficiency code, the last of
-which was finally able to demonstrate
-<a href="http://www.rdrop.com/users/paulmck/realtime/paper/AMPenergy.2013.04.19a.pdf">real energy savings running on real hardware [PDF]</a>.
-As noted earlier,
-I learned of many of these requirements via angry phone calls:
-Flaming me on the Linux-kernel mailing list was apparently not
-sufficient to fully vent their ire at RCU's energy-efficiency bugs!
-
-<h3><a name="Memory Efficiency">Memory Efficiency</a></h3>
-
-<p>
-Although small-memory non-realtime systems can simply use Tiny RCU,
-code size is only one aspect of memory efficiency.
-Another aspect is the size of the <tt>rcu_head</tt> structure
-used by <tt>call_rcu()</tt> and <tt>kfree_rcu()</tt>.
-Although this structure contains nothing more than a pair of pointers,
-it does appear in many RCU-protected data structures, including
-some that are size critical.
-The <tt>page</tt> structure is a case in point, as evidenced by
-the many occurrences of the <tt>union</tt> keyword within that structure.
-
-<p>
-This need for memory efficiency is one reason that RCU uses hand-crafted
-singly linked lists to track the <tt>rcu_head</tt> structures that
-are waiting for a grace period to elapse.
-It is also the reason why <tt>rcu_head</tt> structures do not contain
-debug information, such as fields tracking the file and line of the
-<tt>call_rcu()</tt> or <tt>kfree_rcu()</tt> that posted them.
-Although this information might appear in debug-only kernel builds at some
-point, in the meantime, the <tt>-&gt;func</tt> field will often provide
-the needed debug information.
-
-<p>
-However, in some cases, the need for memory efficiency leads to even
-more extreme measures.
-Returning to the <tt>page</tt> structure, the <tt>rcu_head</tt> field
-shares storage with a great many other structures that are used at
-various points in the corresponding page's lifetime.
-In order to correctly resolve certain
-<a href="https://lkml.kernel.org/g/1439976106-137226-1-git-send-email-kirill.shutemov@linux.intel.com">race conditions</a>,
-the Linux kernel's memory-management subsystem needs a particular bit
-to remain zero during all phases of grace-period processing,
-and that bit happens to map to the bottom bit of the
-<tt>rcu_head</tt> structure's <tt>-&gt;next</tt> field.
-RCU makes this guarantee as long as <tt>call_rcu()</tt>
-is used to post the callback, as opposed to <tt>kfree_rcu()</tt>
-or some future &ldquo;lazy&rdquo;
-variant of <tt>call_rcu()</tt> that might one day be created for
-energy-efficiency purposes.
-
-<h3><a name="Performance, Scalability, Response Time, and Reliability">
-Performance, Scalability, Response Time, and Reliability</a></h3>
-
-<p>
-Expanding on the
-<a href="#Performance and Scalability">earlier discussion</a>,
-RCU is used heavily by hot code paths in performance-critical
-portions of the Linux kernel's networking, security, virtualization,
-and scheduling code paths.
-RCU must therefore use efficient implementations, especially in its
-read-side primitives.
-To that end, it would be good if preemptible RCU's implementation
-of <tt>rcu_read_lock()</tt> could be inlined, however, doing
-this requires resolving <tt>#include</tt> issues with the
-<tt>task_struct</tt> structure.
-
-<p>
-The Linux kernel supports hardware configurations with up to
-4096 CPUs, which means that RCU must be extremely scalable.
-Algorithms that involve frequent acquisitions of global locks or
-frequent atomic operations on global variables simply cannot be
-tolerated within the RCU implementation.
-RCU therefore makes heavy use of a combining tree based on the
-<tt>rcu_node</tt> structure.
-RCU is required to tolerate all CPUs continuously invoking any
-combination of RCU's runtime primitives with minimal per-operation
-overhead.
-In fact, in many cases, increasing load must <i>decrease</i> the
-per-operation overhead, witness the batching optimizations for
-<tt>synchronize_rcu()</tt>, <tt>call_rcu()</tt>,
-<tt>synchronize_rcu_expedited()</tt>, and <tt>rcu_barrier()</tt>.
-As a general rule, RCU must cheerfully accept whatever the
-rest of the Linux kernel decides to throw at it.
-
-<p>
-The Linux kernel is used for real-time workloads, especially
-in conjunction with the
-<a href="https://rt.wiki.kernel.org/index.php/Main_Page">-rt patchset</a>.
-The real-time-latency response requirements are such that the
-traditional approach of disabling preemption across RCU
-read-side critical sections is inappropriate.
-Kernels built with <tt>CONFIG_PREEMPT=y</tt> therefore
-use an RCU implementation that allows RCU read-side critical
-sections to be preempted.
-This requirement made its presence known after users made it
-clear that an earlier
-<a href="https://lwn.net/Articles/107930/">real-time patch</a>
-did not meet their needs, in conjunction with some
-<a href="https://lkml.kernel.org/g/20050318002026.GA2693@us.ibm.com">RCU issues</a>
-encountered by a very early version of the -rt patchset.
-
-<p>
-In addition, RCU must make do with a sub-100-microsecond real-time latency
-budget.
-In fact, on smaller systems with the -rt patchset, the Linux kernel
-provides sub-20-microsecond real-time latencies for the whole kernel,
-including RCU.
-RCU's scalability and latency must therefore be sufficient for
-these sorts of configurations.
-To my surprise, the sub-100-microsecond real-time latency budget
-<a href="http://www.rdrop.com/users/paulmck/realtime/paper/bigrt.2013.01.31a.LCA.pdf">
-applies to even the largest systems [PDF]</a>,
-up to and including systems with 4096 CPUs.
-This real-time requirement motivated the grace-period kthread, which
-also simplified handling of a number of race conditions.
-
-<p>
-Finally, RCU's status as a synchronization primitive means that
-any RCU failure can result in arbitrary memory corruption that can be
-extremely difficult to debug.
-This means that RCU must be extremely reliable, which in
-practice also means that RCU must have an aggressive stress-test
-suite.
-This stress-test suite is called <tt>rcutorture</tt>.
-
-<p>
-Although the need for <tt>rcutorture</tt> was no surprise,
-the current immense popularity of the Linux kernel is posing
-interesting&mdash;and perhaps unprecedented&mdash;validation
-challenges.
-To see this, keep in mind that there are well over one billion
-instances of the Linux kernel running today, given Android
-smartphones, Linux-powered televisions, and servers.
-This number can be expected to increase sharply with the advent of
-the celebrated Internet of Things.
-
-<p>
-Suppose that RCU contains a race condition that manifests on average
-once per million years of runtime.
-This bug will be occurring about three times per <i>day</i> across
-the installed base.
-RCU could simply hide behind hardware error rates, given that no one
-should really expect their smartphone to last for a million years.
-However, anyone taking too much comfort from this thought should
-consider the fact that in most jurisdictions, a successful multi-year
-test of a given mechanism, which might include a Linux kernel,
-suffices for a number of types of safety-critical certifications.
-In fact, rumor has it that the Linux kernel is already being used
-in production for safety-critical applications.
-I don't know about you, but I would feel quite bad if a bug in RCU
-killed someone.
-Which might explain my recent focus on validation and verification.
-
-<h2><a name="Other RCU Flavors">Other RCU Flavors</a></h2>
-
-<p>
-One of the more surprising things about RCU is that there are now
-no fewer than five <i>flavors</i>, or API families.
-In addition, the primary flavor that has been the sole focus up to
-this point has two different implementations, non-preemptible and
-preemptible.
-The other four flavors are listed below, with requirements for each
-described in a separate section.
-
-<ol>
-<li>	<a href="#Bottom-Half Flavor">Bottom-Half Flavor</a>
-<li>	<a href="#Sched Flavor">Sched Flavor</a>
-<li>	<a href="#Sleepable RCU">Sleepable RCU</a>
-<li>	<a href="#Tasks RCU">Tasks RCU</a>
-</ol>
-
-<h3><a name="Bottom-Half Flavor">Bottom-Half Flavor</a></h3>
-
-<p>
-The softirq-disable (AKA &ldquo;bottom-half&rdquo;,
-hence the &ldquo;_bh&rdquo; abbreviations)
-flavor of RCU, or <i>RCU-bh</i>, was developed by
-Dipankar Sarma to provide a flavor of RCU that could withstand the
-network-based denial-of-service attacks researched by Robert
-Olsson.
-These attacks placed so much networking load on the system
-that some of the CPUs never exited softirq execution,
-which in turn prevented those CPUs from ever executing a context switch,
-which, in the RCU implementation of that time, prevented grace periods
-from ever ending.
-The result was an out-of-memory condition and a system hang.
-
-<p>
-The solution was the creation of RCU-bh, which does
-<tt>local_bh_disable()</tt>
-across its read-side critical sections, and which uses the transition
-from one type of softirq processing to another as a quiescent state
-in addition to context switch, idle, user mode, and offline.
-This means that RCU-bh grace periods can complete even when some of
-the CPUs execute in softirq indefinitely, thus allowing algorithms
-based on RCU-bh to withstand network-based denial-of-service attacks.
-
-<p>
-Because
-<tt>rcu_read_lock_bh()</tt> and <tt>rcu_read_unlock_bh()</tt>
-disable and re-enable softirq handlers, any attempt to start a softirq
-handlers during the
-RCU-bh read-side critical section will be deferred.
-In this case, <tt>rcu_read_unlock_bh()</tt>
-will invoke softirq processing, which can take considerable time.
-One can of course argue that this softirq overhead should be associated
-with the code following the RCU-bh read-side critical section rather
-than <tt>rcu_read_unlock_bh()</tt>, but the fact
-is that most profiling tools cannot be expected to make this sort
-of fine distinction.
-For example, suppose that a three-millisecond-long RCU-bh read-side
-critical section executes during a time of heavy networking load.
-There will very likely be an attempt to invoke at least one softirq
-handler during that three milliseconds, but any such invocation will
-be delayed until the time of the <tt>rcu_read_unlock_bh()</tt>.
-This can of course make it appear at first glance as if
-<tt>rcu_read_unlock_bh()</tt> was executing very slowly.
-
-<p>
-The
-<a href="https://lwn.net/Articles/609973/#RCU Per-Flavor API Table">RCU-bh API</a>
-includes
-<tt>rcu_read_lock_bh()</tt>,
-<tt>rcu_read_unlock_bh()</tt>,
-<tt>rcu_dereference_bh()</tt>,
-<tt>rcu_dereference_bh_check()</tt>,
-<tt>synchronize_rcu_bh()</tt>,
-<tt>synchronize_rcu_bh_expedited()</tt>,
-<tt>call_rcu_bh()</tt>,
-<tt>rcu_barrier_bh()</tt>, and
-<tt>rcu_read_lock_bh_held()</tt>.
-
-<h3><a name="Sched Flavor">Sched Flavor</a></h3>
-
-<p>
-Before preemptible RCU, waiting for an RCU grace period had the
-side effect of also waiting for all pre-existing interrupt
-and NMI handlers.
-However, there are legitimate preemptible-RCU implementations that
-do not have this property, given that any point in the code outside
-of an RCU read-side critical section can be a quiescent state.
-Therefore, <i>RCU-sched</i> was created, which follows &ldquo;classic&rdquo;
-RCU in that an RCU-sched grace period waits for for pre-existing
-interrupt and NMI handlers.
-In kernels built with <tt>CONFIG_PREEMPT=n</tt>, the RCU and RCU-sched
-APIs have identical implementations, while kernels built with
-<tt>CONFIG_PREEMPT=y</tt> provide a separate implementation for each.
-
-<p>
-Note well that in <tt>CONFIG_PREEMPT=y</tt> kernels,
-<tt>rcu_read_lock_sched()</tt> and <tt>rcu_read_unlock_sched()</tt>
-disable and re-enable preemption, respectively.
-This means that if there was a preemption attempt during the
-RCU-sched read-side critical section, <tt>rcu_read_unlock_sched()</tt>
-will enter the scheduler, with all the latency and overhead entailed.
-Just as with <tt>rcu_read_unlock_bh()</tt>, this can make it look
-as if <tt>rcu_read_unlock_sched()</tt> was executing very slowly.
-However, the highest-priority task won't be preempted, so that task
-will enjoy low-overhead <tt>rcu_read_unlock_sched()</tt> invocations.
-
-<p>
-The
-<a href="https://lwn.net/Articles/609973/#RCU Per-Flavor API Table">RCU-sched API</a>
-includes
-<tt>rcu_read_lock_sched()</tt>,
-<tt>rcu_read_unlock_sched()</tt>,
-<tt>rcu_read_lock_sched_notrace()</tt>,
-<tt>rcu_read_unlock_sched_notrace()</tt>,
-<tt>rcu_dereference_sched()</tt>,
-<tt>rcu_dereference_sched_check()</tt>,
-<tt>synchronize_sched()</tt>,
-<tt>synchronize_rcu_sched_expedited()</tt>,
-<tt>call_rcu_sched()</tt>,
-<tt>rcu_barrier_sched()</tt>, and
-<tt>rcu_read_lock_sched_held()</tt>.
-However, anything that disables preemption also marks an RCU-sched
-read-side critical section, including
-<tt>preempt_disable()</tt> and <tt>preempt_enable()</tt>,
-<tt>local_irq_save()</tt> and <tt>local_irq_restore()</tt>,
-and so on.
-
-<h3><a name="Sleepable RCU">Sleepable RCU</a></h3>
-
-<p>
-For well over a decade, someone saying &ldquo;I need to block within
-an RCU read-side critical section&rdquo; was a reliable indication
-that this someone did not understand RCU.
-After all, if you are always blocking in an RCU read-side critical
-section, you can probably afford to use a higher-overhead synchronization
-mechanism.
-However, that changed with the advent of the Linux kernel's notifiers,
-whose RCU read-side critical
-sections almost never sleep, but sometimes need to.
-This resulted in the introduction of
-<a href="https://lwn.net/Articles/202847/">sleepable RCU</a>,
-or <i>SRCU</i>.
-
-<p>
-SRCU allows different domains to be defined, with each such domain
-defined by an instance of an <tt>srcu_struct</tt> structure.
-A pointer to this structure must be passed in to each SRCU function,
-for example, <tt>synchronize_srcu(&amp;ss)</tt>, where
-<tt>ss</tt> is the <tt>srcu_struct</tt> structure.
-The key benefit of these domains is that a slow SRCU reader in one
-domain does not delay an SRCU grace period in some other domain.
-That said, one consequence of these domains is that read-side code
-must pass a &ldquo;cookie&rdquo; from <tt>srcu_read_lock()</tt>
-to <tt>srcu_read_unlock()</tt>, for example, as follows:
-
-<blockquote>
-<pre>
- 1 int idx;
- 2
- 3 idx = srcu_read_lock(&amp;ss);
- 4 do_something();
- 5 srcu_read_unlock(&amp;ss, idx);
-</pre>
-</blockquote>
-
-<p>
-As noted above, it is legal to block within SRCU read-side critical sections,
-however, with great power comes great responsibility.
-If you block forever in one of a given domain's SRCU read-side critical
-sections, then that domain's grace periods will also be blocked forever.
-Of course, one good way to block forever is to deadlock, which can
-happen if any operation in a given domain's SRCU read-side critical
-section can block waiting, either directly or indirectly, for that domain's
-grace period to elapse.
-For example, this results in a self-deadlock:
-
-<blockquote>
-<pre>
- 1 int idx;
- 2
- 3 idx = srcu_read_lock(&amp;ss);
- 4 do_something();
- 5 synchronize_srcu(&amp;ss);
- 6 srcu_read_unlock(&amp;ss, idx);
-</pre>
-</blockquote>
-
-<p>
-However, if line&nbsp;5 acquired a mutex that was held across
-a <tt>synchronize_srcu()</tt> for domain <tt>ss</tt>,
-deadlock would still be possible.
-Furthermore, if line&nbsp;5 acquired a mutex that was held across
-a <tt>synchronize_srcu()</tt> for some other domain <tt>ss1</tt>,
-and if an <tt>ss1</tt>-domain SRCU read-side critical section
-acquired another mutex that was held across as <tt>ss</tt>-domain
-<tt>synchronize_srcu()</tt>,
-deadlock would again be possible.
-Such a deadlock cycle could extend across an arbitrarily large number
-of different SRCU domains.
-Again, with great power comes great responsibility.
-
-<p>
-Unlike the other RCU flavors, SRCU read-side critical sections can
-run on idle and even offline CPUs.
-This ability requires that <tt>srcu_read_lock()</tt> and
-<tt>srcu_read_unlock()</tt> contain memory barriers, which means
-that SRCU readers will run a bit slower than would RCU readers.
-It also motivates the <tt>smp_mb__after_srcu_read_unlock()</tt>
-API, which, in combination with <tt>srcu_read_unlock()</tt>,
-guarantees a full memory barrier.
-
-<p>
-The
-<a href="https://lwn.net/Articles/609973/#RCU Per-Flavor API Table">SRCU API</a>
-includes
-<tt>srcu_read_lock()</tt>,
-<tt>srcu_read_unlock()</tt>,
-<tt>srcu_dereference()</tt>,
-<tt>srcu_dereference_check()</tt>,
-<tt>synchronize_srcu()</tt>,
-<tt>synchronize_srcu_expedited()</tt>,
-<tt>call_srcu()</tt>,
-<tt>srcu_barrier()</tt>, and
-<tt>srcu_read_lock_held()</tt>.
-It also includes
-<tt>DEFINE_SRCU()</tt>,
-<tt>DEFINE_STATIC_SRCU()</tt>, and
-<tt>init_srcu_struct()</tt>
-APIs for defining and initializing <tt>srcu_struct</tt> structures.
-
-<h3><a name="Tasks RCU">Tasks RCU</a></h3>
-
-<p>
-Some forms of tracing use &ldquo;tramopolines&rdquo; to handle the
-binary rewriting required to install different types of probes.
-It would be good to be able to free old trampolines, which sounds
-like a job for some form of RCU.
-However, because it is necessary to be able to install a trace
-anywhere in the code, it is not possible to use read-side markers
-such as <tt>rcu_read_lock()</tt> and <tt>rcu_read_unlock()</tt>.
-In addition, it does not work to have these markers in the trampoline
-itself, because there would need to be instructions following
-<tt>rcu_read_unlock()</tt>.
-Although <tt>synchronize_rcu()</tt> would guarantee that execution
-reached the <tt>rcu_read_unlock()</tt>, it would not be able to
-guarantee that execution had completely left the trampoline.
-
-<p>
-The solution, in the form of
-<a href="https://lwn.net/Articles/607117/"><i>Tasks RCU</i></a>,
-is to have implicit
-read-side critical sections that are delimited by voluntary context
-switches, that is, calls to <tt>schedule()</tt>,
-<tt>cond_resched_rcu_qs()</tt>, and
-<tt>synchronize_rcu_tasks()</tt>.
-In addition, transitions to and from userspace execution also delimit
-tasks-RCU read-side critical sections.
-
-<p>
-The tasks-RCU API is quite compact, consisting only of
-<tt>call_rcu_tasks()</tt>,
-<tt>synchronize_rcu_tasks()</tt>, and
-<tt>rcu_barrier_tasks()</tt>.
-
-<h2><a name="Possible Future Changes">Possible Future Changes</a></h2>
-
-<p>
-One of the tricks that RCU uses to attain update-side scalability is
-to increase grace-period latency with increasing numbers of CPUs.
-If this becomes a serious problem, it will be necessary to rework the
-grace-period state machine so as to avoid the need for the additional
-latency.
-
-<p>
-Expedited grace periods scan the CPUs, so their latency and overhead
-increases with increasing numbers of CPUs.
-If this becomes a serious problem on large systems, it will be necessary
-to do some redesign to avoid this scalability problem.
-
-<p>
-RCU disables CPU hotplug in a few places, perhaps most notably in the
-expedited grace-period and <tt>rcu_barrier()</tt> operations.
-If there is a strong reason to use expedited grace periods in CPU-hotplug
-notifiers, it will be necessary to avoid disabling CPU hotplug.
-This would introduce some complexity, so there had better be a <i>very</i>
-good reason.
-
-<p>
-The tradeoff between grace-period latency on the one hand and interruptions
-of other CPUs on the other hand may need to be re-examined.
-The desire is of course for zero grace-period latency as well as zero
-interprocessor interrupts undertaken during an expedited grace period
-operation.
-While this ideal is unlikely to be achievable, it is quite possible that
-further improvements can be made.
-
-<p>
-The multiprocessor implementations of RCU use a combining tree that
-groups CPUs so as to reduce lock contention and increase cache locality.
-However, this combining tree does not spread its memory across NUMA
-nodes nor does it align the CPU groups with hardware features such
-as sockets or cores.
-Such spreading and alignment is currently believed to be unnecessary
-because the hotpath read-side primitives do not access the combining
-tree, nor does <tt>call_rcu()</tt> in the common case.
-If you believe that your architecture needs such spreading and alignment,
-then your architecture should also benefit from the
-<tt>rcutree.rcu_fanout_leaf</tt> boot parameter, which can be set
-to the number of CPUs in a socket, NUMA node, or whatever.
-If the number of CPUs is too large, use a fraction of the number of
-CPUs.
-If the number of CPUs is a large prime number, well, that certainly
-is an &ldquo;interesting&rdquo; architectural choice!
-More flexible arrangements might be considered, but only if
-<tt>rcutree.rcu_fanout_leaf</tt> has proven inadequate, and only
-if the inadequacy has been demonstrated by a carefully run and
-realistic system-level workload.
-
-<p>
-Please note that arrangements that require RCU to remap CPU numbers will
-require extremely good demonstration of need and full exploration of
-alternatives.
-
-<p>
-There is an embarrassingly large number of flavors of RCU, and this
-number has been increasing over time.
-Perhaps it will be possible to combine some at some future date.
-
-<p>
-RCU's various kthreads are reasonably recent additions.
-It is quite likely that adjustments will be required to more gracefully
-handle extreme loads.
-It might also be necessary to be able to relate CPU utilization by
-RCU's kthreads and softirq handlers to the code that instigated this
-CPU utilization.
-For example, RCU callback overhead might be charged back to the
-originating <tt>call_rcu()</tt> instance, though probably not
-in production kernels.
-
-<h2><a name="Summary">Summary</a></h2>
-
-<p>
-This document has presented more than two decade's worth of RCU
-requirements.
-Given that the requirements keep changing, this will not be the last
-word on this subject, but at least it serves to get an important
-subset of the requirements set forth.
-
-<h2><a name="Acknowledgments">Acknowledgments</a></h2>
-
-I am grateful to Steven Rostedt, Lai Jiangshan, Ingo Molnar,
-Oleg Nesterov, Borislav Petkov, Peter Zijlstra, Boqun Feng, and
-Andy Lutomirski for their help in rendering
-this article human readable, and to Michelle Rankin for her support
-of this effort.
-Other contributions are acknowledged in the Linux kernel's git archive.
-The cartoon is copyright (c) 2013 by Melissa Broussard,
-and is provided
-under the terms of the Creative Commons Attribution-Share Alike 3.0
-United States license.
-
-<p>@@QQAL@@
-
-</body></html>
diff --git a/Documentation/RCU/Design/htmlqqz.sh b/Documentation/RCU/Design/htmlqqz.sh
deleted file mode 100755
index d354f06..0000000
--- a/Documentation/RCU/Design/htmlqqz.sh
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/bin/sh
-#
-# Usage: sh htmlqqz.sh file
-#
-# Extracts and converts quick quizzes in a proto-HTML document file.htmlx.
-# Commands, all of which must be on a line by themselves:
-#
-#	"<p>@@QQ@@": Start of a quick quiz.
-#	"<p>@@QQA@@": Start of a quick-quiz answer.
-#	"<p>@@QQE@@": End of a quick-quiz answer, and thus of the quick quiz.
-#	"<p>@@QQAL@@": Place to put quick-quiz answer list.
-#
-# Places the result in file.html.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
-# Copyright (c) 2013 Paul E. McKenney, IBM Corporation.
-
-fn=$1
-if test ! -r $fn.htmlx
-then
-	echo "Error: $fn.htmlx unreadable."
-	exit 1
-fi
-
-echo "<!-- DO NOT HAND EDIT. -->" > $fn.html
-echo "<!-- Instead, edit $fn.htmlx and run 'sh htmlqqz.sh $fn' -->" >> $fn.html
-awk < $fn.htmlx >> $fn.html '
-
-state == "" && $1 != "<p>@@QQ@@" && $1 != "<p>@@QQAL@@" {
-	print $0;
-	if ($0 ~ /^<p>@@QQ/)
-		print "Bad Quick Quiz command: " NR " (expected <p>@@QQ@@ or <p>@@QQAL@@)." > "/dev/stderr"
-	next;
-}
-
-state == "" && $1 == "<p>@@QQ@@" {
-	qqn++;
-	qqlineno = NR;
-	haveqq = 1;
-	state = "qq";
-	print "<p><a name=\"Quick Quiz " qqn "\"><b>Quick Quiz " qqn "</b>:</a>"
-	next;
-}
-
-state == "qq" && $1 != "<p>@@QQA@@" {
-	qq[qqn] = qq[qqn] $0 "\n";
-	print $0
-	if ($0 ~ /^<p>@@QQ/)
-		print "Bad Quick Quiz command: " NR ". (expected <p>@@QQA@@)" > "/dev/stderr"
-	next;
-}
-
-state == "qq" && $1 == "<p>@@QQA@@" {
-	state = "qqa";
-	print "<br><a href=\"#qq" qqn "answer\">Answer</a>"
-	next;
-}
-
-state == "qqa" && $1 != "<p>@@QQE@@" {
-	qqa[qqn] = qqa[qqn] $0 "\n";
-	if ($0 ~ /^<p>@@QQ/)
-		print "Bad Quick Quiz command: " NR " (expected <p>@@QQE@@)." > "/dev/stderr"
-	next;
-}
-
-state == "qqa" && $1 == "<p>@@QQE@@" {
-	state = "";
-	next;
-}
-
-state == "" && $1 == "<p>@@QQAL@@" {
-	haveqq = "";
-	print "<h3><a name=\"Answers to Quick Quizzes\">"
-	print "Answers to Quick Quizzes</a></h3>"
-	print "";
-	for (i = 1; i <= qqn; i++) {
-		print "<a name=\"qq" i "answer\"></a>"
-		print "<p><b>Quick Quiz " i "</b>:"
-		print qq[i];
-		print "";
-		print "</p><p><b>Answer</b>:"
-		print qqa[i];
-		print "";
-		print "</p><p><a href=\"#Quick%20Quiz%20" i "\"><b>Back to Quick Quiz " i "</b>.</a>"
-		print "";
-	}
-	next;
-}
-
-END {
-	if (state != "")
-		print "Unterminated Quick Quiz: " qqlineno "." > "/dev/stderr"
-	else if (haveqq)
-		print "Missing \"<p>@@QQAL@@\", no Quick Quiz." > "/dev/stderr"
-}'
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index ec6998b..00a3a38 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -237,17 +237,17 @@
 
 The output of "cat rcu/rcu_preempt/rcuexp" looks as follows:
 
-s=21872 wd0=0 wd1=0 wd2=0 wd3=5 n=0 enq=0 sc=21872
+s=21872 wd1=0 wd2=0 wd3=5 n=0 enq=0 sc=21872
 
 These fields are as follows:
 
 o	"s" is the sequence number, with an odd number indicating that
 	an expedited grace period is in progress.
 
-o	"wd0", "wd1", "wd2", and "wd3" are the number of times that an
-	attempt to start an expedited grace period found that someone
-	else had completed an expedited grace period that satisfies the
-	attempted request.  "Our work is done."
+o	"wd1", "wd2", and "wd3" are the number of times that an attempt
+	to start an expedited grace period found that someone else had
+	completed an expedited grace period that satisfies the attempted
+	request.  "Our work is done."
 
 o	"n" is number of times that a concurrent CPU-hotplug operation
 	forced a fallback to a normal grace period.
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index dc49c67..111770f 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -681,22 +681,30 @@
 RCU is analogous to reader-writer locking.  The following unified
 diff shows how closely related RCU and reader-writer locking can be.
 
+	@@ -5,5 +5,5 @@ struct el {
+	 	int data;
+	 	/* Other data fields */
+	 };
+	-rwlock_t listmutex;
+	+spinlock_t listmutex;
+	 struct el head;
+
 	@@ -13,15 +14,15 @@
 		struct list_head *lp;
 		struct el *p;
 
-	-	read_lock();
+	-	read_lock(&listmutex);
 	-	list_for_each_entry(p, head, lp) {
 	+	rcu_read_lock();
 	+	list_for_each_entry_rcu(p, head, lp) {
 			if (p->key == key) {
 				*result = p->data;
-	-			read_unlock();
+	-			read_unlock(&listmutex);
 	+			rcu_read_unlock();
 				return 1;
 			}
 		}
-	-	read_unlock();
+	-	read_unlock(&listmutex);
 	+	rcu_read_unlock();
 		return 0;
 	 }
@@ -732,7 +740,7 @@
  5   int data;                          5   int data;
  6   /* Other data fields */            6   /* Other data fields */
  7 };                                   7 };
- 8 spinlock_t listmutex;                8 spinlock_t listmutex;
+ 8 rwlock_t listmutex;                  8 spinlock_t listmutex;
  9 struct el head;                      9 struct el head;
 
  1 int search(long key, int *result)    1 int search(long key, int *result)
@@ -740,15 +748,15 @@
  3   struct list_head *lp;              3   struct list_head *lp;
  4   struct el *p;                      4   struct el *p;
  5                                      5
- 6   read_lock();                       6   rcu_read_lock();
+ 6   read_lock(&listmutex);             6   rcu_read_lock();
  7   list_for_each_entry(p, head, lp) { 7   list_for_each_entry_rcu(p, head, lp) {
  8     if (p->key == key) {             8     if (p->key == key) {
  9       *result = p->data;             9       *result = p->data;
-10       read_unlock();                10       rcu_read_unlock();
+10       read_unlock(&listmutex);      10       rcu_read_unlock();
 11       return 1;                     11       return 1;
 12     }                               12     }
 13   }                                 13   }
-14   read_unlock();                    14   rcu_read_unlock();
+14   read_unlock(&listmutex);          14   rcu_read_unlock();
 15   return 0;                         15   return 0;
 16 }                                   16 }
 
diff --git a/Documentation/accounting/getdelays.c b/Documentation/accounting/getdelays.c
index 7785fb5..b5ca536 100644
--- a/Documentation/accounting/getdelays.c
+++ b/Documentation/accounting/getdelays.c
@@ -505,6 +505,8 @@
 						if (!loop)
 							goto done;
 						break;
+					case TASKSTATS_TYPE_NULL:
+						break;
 					default:
 						fprintf(stderr, "Unknown nested"
 							" nla_type %d\n",
@@ -512,7 +514,8 @@
 						break;
 					}
 					len2 += NLA_ALIGN(na->nla_len);
-					na = (struct nlattr *) ((char *) na + len2);
+					na = (struct nlattr *)((char *)na +
+							       NLA_ALIGN(na->nla_len));
 				}
 				break;
 
diff --git a/Documentation/acpi/initrd_table_override.txt b/Documentation/acpi/initrd_table_override.txt
index 35c3f54..eb651a6 100644
--- a/Documentation/acpi/initrd_table_override.txt
+++ b/Documentation/acpi/initrd_table_override.txt
@@ -1,5 +1,5 @@
-Overriding ACPI tables via initrd
-=================================
+Upgrading ACPI tables via initrd
+================================
 
 1) Introduction (What is this about)
 2) What is this for
@@ -9,12 +9,14 @@
 1) What is this about
 ---------------------
 
-If the ACPI_INITRD_TABLE_OVERRIDE compile option is true, it is possible to
-override nearly any ACPI table provided by the BIOS with an instrumented,
-modified one.
+If the ACPI_TABLE_UPGRADE compile option is true, it is possible to
+upgrade the ACPI execution environment that is defined by the ACPI tables
+via upgrading the ACPI tables provided by the BIOS with an instrumented,
+modified, more recent version one, or installing brand new ACPI tables.
 
-For a full list of ACPI tables that can be overridden, take a look at
-the char *table_sigs[MAX_ACPI_SIGNATURE]; definition in drivers/acpi/osl.c
+For a full list of ACPI tables that can be upgraded/installed, take a look
+at the char *table_sigs[MAX_ACPI_SIGNATURE]; definition in
+drivers/acpi/tables.c.
 All ACPI tables iasl (Intel's ACPI compiler and disassembler) knows should
 be overridable, except:
    - ACPI_SIG_RSDP (has a signature of 6 bytes)
@@ -25,17 +27,20 @@
 2) What is this for
 -------------------
 
-Please keep in mind that this is a debug option.
-ACPI tables should not get overridden for productive use.
-If BIOS ACPI tables are overridden the kernel will get tainted with the
-TAINT_OVERRIDDEN_ACPI_TABLE flag.
-Complain to your platform/BIOS vendor if you find a bug which is so sever
-that a workaround is not accepted in the Linux kernel.
+Complain to your platform/BIOS vendor if you find a bug which is so severe
+that a workaround is not accepted in the Linux kernel. And this facility
+allows you to upgrade the buggy tables before your platform/BIOS vendor
+releases an upgraded BIOS binary.
 
-Still, it can and should be enabled in any kernel, because:
-  - There is no functional change with not instrumented initrds
-  - It provides a powerful feature to easily debug and test ACPI BIOS table
-    compatibility with the Linux kernel.
+This facility can be used by platform/BIOS vendors to provide a Linux
+compatible environment without modifying the underlying platform firmware.
+
+This facility also provides a powerful feature to easily debug and test
+ACPI BIOS table compatibility with the Linux kernel by modifying old
+platform provided ACPI tables or inserting new ACPI tables.
+
+It can and should be enabled in any kernel because there is no functional
+change with not instrumented initrds.
 
 
 3) How does it work
@@ -50,23 +55,31 @@
 # For example add this statement into a _PRT (PCI Routing Table) function
 # of the DSDT:
 Store("HELLO WORLD", debug)
+# And increase the OEM Revision. For example, before modification:
+DefinitionBlock ("DSDT.aml", "DSDT", 2, "INTEL ", "TEMPLATE", 0x00000000)
+# After modification:
+DefinitionBlock ("DSDT.aml", "DSDT", 2, "INTEL ", "TEMPLATE", 0x00000001)
 iasl -sa dsdt.dsl
 # Add the raw ACPI tables to an uncompressed cpio archive.
-# They must be put into a /kernel/firmware/acpi directory inside the
-# cpio archive.
-# The uncompressed cpio archive must be the first.
-# Other, typically compressed cpio archives, must be
-# concatenated on top of the uncompressed one.
+# They must be put into a /kernel/firmware/acpi directory inside the cpio
+# archive. Note that if the table put here matches a platform table
+# (similar Table Signature, and similar OEMID, and similar OEM Table ID)
+# with a more recent OEM Revision, the platform table will be upgraded by
+# this table. If the table put here doesn't match a platform table
+# (dissimilar Table Signature, or dissimilar OEMID, or dissimilar OEM Table
+# ID), this table will be appended.
 mkdir -p kernel/firmware/acpi
 cp dsdt.aml kernel/firmware/acpi
-# A maximum of: #define ACPI_OVERRIDE_TABLES 10
-# tables are  currently allowed (see osl.c):
+# A maximum of "NR_ACPI_INITRD_TABLES (64)" tables are currently allowed
+# (see osl.c):
 iasl -sa facp.dsl
 iasl -sa ssdt1.dsl
 cp facp.aml kernel/firmware/acpi
 cp ssdt1.aml kernel/firmware/acpi
-# Create the uncompressed cpio archive and concatenate the original initrd
-# on top:
+# The uncompressed cpio archive must be the first. Other, typically
+# compressed cpio archives, must be concatenated on top of the uncompressed
+# one. Following command creates the uncompressed cpio archive and
+# concatenates the original initrd on top:
 find kernel | cpio -H newc --create > /boot/instrumented_initrd
 cat /boot/initrd >>/boot/instrumented_initrd
 # reboot with increased acpi debug level, e.g. boot params:
diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt
index 56d6d8b..8d0df62 100644
--- a/Documentation/arm64/booting.txt
+++ b/Documentation/arm64/booting.txt
@@ -132,6 +132,10 @@
 physical offset of the Image so it is recommended that the Image be
 placed as close as possible to the start of system RAM.
 
+If an initrd/initramfs is passed to the kernel at boot, it must reside
+entirely within a 1 GB aligned physical memory window of up to 32 GB in
+size that fully covers the kernel Image as well.
+
 Any memory described to the kernel (even that below the start of the
 image) which is not marked as reserved from the kernel (e.g., with a
 memreserve region in the device tree) will be considered as available to
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index ba4b6ac..c6938e5 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -53,7 +53,9 @@
 | ARM            | Cortex-A57      | #832075         | ARM64_ERRATUM_832075    |
 | ARM            | Cortex-A57      | #852523         | N/A                     |
 | ARM            | Cortex-A57      | #834220         | ARM64_ERRATUM_834220    |
+| ARM            | MMU-500         | #841119,#826419 | N/A                     |
 |                |                 |                 |                         |
 | Cavium         | ThunderX ITS    | #22375, #24313  | CAVIUM_ERRATUM_22375    |
 | Cavium         | ThunderX GICv3  | #23154          | CAVIUM_ERRATUM_23154    |
 | Cavium         | ThunderX Core   | #27456          | CAVIUM_ERRATUM_27456    |
+| Cavium         | ThunderX SMMUv2 | #27704          | N/A		       |
diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt
index e5d9148..dce25d8 100644
--- a/Documentation/block/queue-sysfs.txt
+++ b/Documentation/block/queue-sysfs.txt
@@ -141,6 +141,15 @@
 an IO scheduler name to this file will attempt to load that IO scheduler
 module, if it isn't already present in the system.
 
+write_cache (RW)
+----------------
+When read, this file will display whether the device has write back
+caching enabled or not. It will return "write back" for the former
+case, and "write through" for the latter. Writing to this file can
+change the kernels view of the device, but it doesn't alter the
+device state. This means that it might not be safe to toggle the
+setting from "write back" to "write through", since that will also
+eliminate cache flushes issued by the kernel.
 
 
 Jens Axboe <jens.axboe@oracle.com>, February 2009
diff --git a/Documentation/block/writeback_cache_control.txt b/Documentation/block/writeback_cache_control.txt
index 83407d3..59e0516 100644
--- a/Documentation/block/writeback_cache_control.txt
+++ b/Documentation/block/writeback_cache_control.txt
@@ -71,7 +71,7 @@
 driver needs to tell the block layer that it supports flushing caches by
 doing:
 
-	blk_queue_flush(sdkp->disk->queue, REQ_FLUSH);
+	blk_queue_write_cache(sdkp->disk->queue, true, false);
 
 and handle empty REQ_FLUSH requests in its prep_fn/request_fn.  Note that
 REQ_FLUSH requests with a payload are automatically turned into a sequence
@@ -79,7 +79,7 @@
 layer.  For devices that also support the FUA bit the block layer needs
 to be told to pass through the REQ_FUA bit using:
 
-	blk_queue_flush(sdkp->disk->queue, REQ_FLUSH | REQ_FUA);
+	blk_queue_write_cache(sdkp->disk->queue, true, true);
 
 and the driver must handle write requests that have the REQ_FUA bit set
 in prep_fn/request_fn.  If the FUA bit is not natively supported the block
diff --git a/Documentation/device-mapper/cache-policies.txt b/Documentation/device-mapper/cache-policies.txt
index e5062ad..d3ca8af 100644
--- a/Documentation/device-mapper/cache-policies.txt
+++ b/Documentation/device-mapper/cache-policies.txt
@@ -11,7 +11,7 @@
 The policy can return a simple HIT or MISS or issue a migration.
 
 Currently there's no way for the policy to issue background work,
-e.g. to start writing back dirty blocks that are going to be evicte
+e.g. to start writing back dirty blocks that are going to be evicted
 soon.
 
 Because we map bios, rather than requests it's easy for the policy
@@ -48,7 +48,7 @@
 
 The smq policy (vs mq) offers the promise of less memory utilization,
 improved performance and increased adaptability in the face of changing
-workloads.  SMQ also does not have any cumbersome tuning knobs.
+workloads.  smq also does not have any cumbersome tuning knobs.
 
 Users may switch from "mq" to "smq" simply by appropriately reloading a
 DM table that is using the cache target.  Doing so will cause all of the
@@ -57,47 +57,45 @@
 that should be cached.
 
 Memory usage:
-The mq policy uses a lot of memory; 88 bytes per cache block on a 64
+The mq policy used a lot of memory; 88 bytes per cache block on a 64
 bit machine.
 
-SMQ uses 28bit indexes to implement it's data structures rather than
+smq uses 28bit indexes to implement it's data structures rather than
 pointers.  It avoids storing an explicit hit count for each block.  It
-has a 'hotspot' queue rather than a pre cache which uses a quarter of
+has a 'hotspot' queue, rather than a pre-cache, which uses a quarter of
 the entries (each hotspot block covers a larger area than a single
 cache block).
 
-All these mean smq uses ~25bytes per cache block.  Still a lot of
+All this means smq uses ~25bytes per cache block.  Still a lot of
 memory, but a substantial improvement nontheless.
 
 Level balancing:
-MQ places entries in different levels of the multiqueue structures
-based on their hit count (~ln(hit count)).  This means the bottom
-levels generally have the most entries, and the top ones have very
-few.  Having unbalanced levels like this reduces the efficacy of the
+mq placed entries in different levels of the multiqueue structures
+based on their hit count (~ln(hit count)).  This meant the bottom
+levels generally had the most entries, and the top ones had very
+few.  Having unbalanced levels like this reduced the efficacy of the
 multiqueue.
 
-SMQ does not maintain a hit count, instead it swaps hit entries with
-the least recently used entry from the level above.  The over all
+smq does not maintain a hit count, instead it swaps hit entries with
+the least recently used entry from the level above.  The overall
 ordering being a side effect of this stochastic process.  With this
 scheme we can decide how many entries occupy each multiqueue level,
 resulting in better promotion/demotion decisions.
 
 Adaptability:
-The MQ policy maintains a hit count for each cache block.  For a
+The mq policy maintained a hit count for each cache block.  For a
 different block to get promoted to the cache it's hit count has to
-exceed the lowest currently in the cache.  This means it can take a
+exceed the lowest currently in the cache.  This meant it could take a
 long time for the cache to adapt between varying IO patterns.
-Periodically degrading the hit counts could help with this, but I
-haven't found a nice general solution.
 
-SMQ doesn't maintain hit counts, so a lot of this problem just goes
+smq doesn't maintain hit counts, so a lot of this problem just goes
 away.  In addition it tracks performance of the hotspot queue, which
 is used to decide which blocks to promote.  If the hotspot queue is
 performing badly then it starts moving entries more quickly between
 levels.  This lets it adapt to new IO patterns very quickly.
 
 Performance:
-Testing SMQ shows substantially better performance than MQ.
+Testing smq shows substantially better performance than mq.
 
 cleaner
 -------
diff --git a/Documentation/device-mapper/statistics.txt b/Documentation/device-mapper/statistics.txt
index 6f5ef94..170ac02 100644
--- a/Documentation/device-mapper/statistics.txt
+++ b/Documentation/device-mapper/statistics.txt
@@ -205,7 +205,7 @@
 
   dmsetup message vol 0 @stats_create - /100
 
-Set the auxillary data string to "foo bar baz" (the escape for each
+Set the auxiliary data string to "foo bar baz" (the escape for each
 space must also be escaped, otherwise the shell will consume them):
 
   dmsetup message vol 0 @stats_set_aux 0 foo\\ bar\\ baz
diff --git a/Documentation/devicetree/bindings/arc/archs-pct.txt b/Documentation/devicetree/bindings/arc/archs-pct.txt
index 1ae98b87..e4b9dce 100644
--- a/Documentation/devicetree/bindings/arc/archs-pct.txt
+++ b/Documentation/devicetree/bindings/arc/archs-pct.txt
@@ -2,7 +2,7 @@
 
 The ARC HS can be configured with a pipeline performance monitor for counting
 CPU and cache events like cache misses and hits. Like conventional PCT there
-are 100+ hardware conditions dynamically mapped to upto 32 counters.
+are 100+ hardware conditions dynamically mapped to up to 32 counters.
 It also supports overflow interrupts.
 
 Required properties:
diff --git a/Documentation/devicetree/bindings/arc/eznps.txt b/Documentation/devicetree/bindings/arc/eznps.txt
new file mode 100644
index 0000000..1aa50c6
--- /dev/null
+++ b/Documentation/devicetree/bindings/arc/eznps.txt
@@ -0,0 +1,7 @@
+EZchip NPS Network Processor Platforms Device Tree Bindings
+---------------------------------------------------------------------------
+
+Appliance main board with NPS400 ASIC.
+
+Required root node properties:
+    - compatible = "ezchip,arc-nps";
diff --git a/Documentation/devicetree/bindings/arc/pct.txt b/Documentation/devicetree/bindings/arc/pct.txt
index 7b95884..4e874d9 100644
--- a/Documentation/devicetree/bindings/arc/pct.txt
+++ b/Documentation/devicetree/bindings/arc/pct.txt
@@ -2,7 +2,7 @@
 
 The ARC700 can be configured with a pipeline performance monitor for counting
 CPU and cache events like cache misses and hits. Like conventional PCT there
-are 100+ hardware conditions dynamically mapped to upto 32 counters
+are 100+ hardware conditions dynamically mapped to up to 32 counters
 
 Note that:
  * The ARC 700 PCT does not support interrupts; although HW events may be
diff --git a/Documentation/devicetree/bindings/arm/altera/socfpga-eccmgr.txt b/Documentation/devicetree/bindings/arm/altera/socfpga-eccmgr.txt
index 885f93d..5a6b160 100644
--- a/Documentation/devicetree/bindings/arm/altera/socfpga-eccmgr.txt
+++ b/Documentation/devicetree/bindings/arm/altera/socfpga-eccmgr.txt
@@ -3,6 +3,7 @@
 The ECC Manager counts and corrects single bit errors and counts/handles
 double bit errors which are uncorrectable.
 
+Cyclone5 and Arria5 ECC Manager
 Required Properties:
 - compatible : Should be "altr,socfpga-ecc-manager"
 - #address-cells: must be 1
@@ -47,3 +48,52 @@
 			interrupts = <0 178 1>, <0 179 1>;
 		};
 	};
+
+Arria10 SoCFPGA ECC Manager
+The Arria10 SoC ECC Manager handles the IRQs for each peripheral
+in a shared register instead of individual IRQs like the Cyclone5
+and Arria5. Therefore the device tree is different as well.
+
+Required Properties:
+- compatible : Should be "altr,socfpga-a10-ecc-manager"
+- altr,sysgr-syscon : phandle to Arria10 System Manager Block
+	containing the ECC manager registers.
+- #address-cells: must be 1
+- #size-cells: must be 1
+- interrupts : Should be single bit error interrupt, then double bit error
+	interrupt. Note the rising edge type.
+- ranges : standard definition, should translate from local addresses
+
+Subcomponents:
+
+L2 Cache ECC
+Required Properties:
+- compatible : Should be "altr,socfpga-a10-l2-ecc"
+- reg : Address and size for ECC error interrupt clear registers.
+
+On-Chip RAM ECC
+Required Properties:
+- compatible : Should be "altr,socfpga-a10-ocram-ecc"
+- reg        : Address and size for ECC block registers.
+
+Example:
+
+	eccmgr: eccmgr@ffd06000 {
+		compatible = "altr,socfpga-a10-ecc-manager";
+		altr,sysmgr-syscon = <&sysmgr>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>,
+			     <0 0 IRQ_TYPE_LEVEL_HIGH>;
+		ranges;
+
+		l2-ecc@ffd06010 {
+			compatible = "altr,socfpga-a10-l2-ecc";
+			reg = <0xffd06010 0x4>;
+		};
+
+		ocram-ecc@ff8c3000 {
+			compatible = "altr,socfpga-a10-ocram-ecc";
+			reg = <0xff8c3000 0x90>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/arm/amlogic.txt b/Documentation/devicetree/bindings/arm/amlogic.txt
index 8a5122a..fcc6f6c 100644
--- a/Documentation/devicetree/bindings/arm/amlogic.txt
+++ b/Documentation/devicetree/bindings/arm/amlogic.txt
@@ -25,3 +25,6 @@
   - "tronsmart,vega-s95-pro", "tronsmart,vega-s95" (Meson gxbb)
   - "tronsmart,vega-s95-meta", "tronsmart,vega-s95" (Meson gxbb)
   - "tronsmart,vega-s95-telos", "tronsmart,vega-s95" (Meson gxbb)
+  - "hardkernel,odroid-c2" (Meson gxbb)
+  - "amlogic,p200" (Meson gxbb)
+  - "amlogic,p201" (Meson gxbb)
diff --git a/Documentation/devicetree/bindings/arm/arm-boards b/Documentation/devicetree/bindings/arm/arm-boards
index 0226bc2..ab318a5 100644
--- a/Documentation/devicetree/bindings/arm/arm-boards
+++ b/Documentation/devicetree/bindings/arm/arm-boards
@@ -93,6 +93,14 @@
   a core-module with regs and the compatible strings
   "arm,core-module-versatile", "syscon"
 
+Optional nodes:
+
+- arm,versatile-ib2-syscon : if the Versatile has an IB2 interface
+  board mounted, this has a separate system controller that is
+  defined in this node.
+  Required properties:
+  compatible = "arm,versatile-ib2-syscon", "syscon"
+
 ARM RealView Boards
 -------------------
 The RealView boards cover tailored evaluation boards that are used to explore
diff --git a/Documentation/devicetree/bindings/arm/atmel-at91.txt b/Documentation/devicetree/bindings/arm/atmel-at91.txt
index 7fd64ec..1d80046 100644
--- a/Documentation/devicetree/bindings/arm/atmel-at91.txt
+++ b/Documentation/devicetree/bindings/arm/atmel-at91.txt
@@ -41,6 +41,10 @@
        - "atmel,sama5d43"
        - "atmel,sama5d44"
 
+Chipid required properties:
+- compatible: Should be "atmel,sama5d2-chipid"
+- reg : Should contain registers location and length
+
 PIT Timer required properties:
 - compatible: Should be "atmel,at91sam9260-pit"
 - reg: Should contain registers location and length
@@ -155,7 +159,7 @@
 
 required properties:
 - compatible: Should be "atmel,<chip>-sfr", "syscon".
-  <chip> can be "sama5d3" or "sama5d4".
+  <chip> can be "sama5d3", "sama5d4" or "sama5d2".
 - reg: Should contain registers location and length
 
 	sfr@f0038000 {
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt
index ccc62f1..3f0cbbb 100644
--- a/Documentation/devicetree/bindings/arm/cpus.txt
+++ b/Documentation/devicetree/bindings/arm/cpus.txt
@@ -192,7 +192,6 @@
 			  can be one of:
 			    "allwinner,sun6i-a31"
 			    "allwinner,sun8i-a23"
-			    "arm,psci"
 			    "arm,realview-smp"
 			    "brcm,bcm-nsp-smp"
 			    "brcm,brahma-b15"
diff --git a/Documentation/devicetree/bindings/arm/fsl.txt b/Documentation/devicetree/bindings/arm/fsl.txt
index 752a685..dbbc095 100644
--- a/Documentation/devicetree/bindings/arm/fsl.txt
+++ b/Documentation/devicetree/bindings/arm/fsl.txt
@@ -135,6 +135,10 @@
 Required root node properties:
     - compatible = "fsl,ls1043a-rdb", "fsl,ls1043a";
 
+LS1043A ARMv8 based QDS Board
+Required root node properties:
+    - compatible = "fsl,ls1043a-qds", "fsl,ls1043a";
+
 LS2080A ARMv8 based Simulator model
 Required root node properties:
     - compatible = "fsl,ls2080a-simu", "fsl,ls2080a";
diff --git a/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt b/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt
index e3ccab1..83fe816 100644
--- a/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt
+++ b/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt
@@ -1,29 +1,33 @@
 Hisilicon Platforms Device Tree Bindings
 ----------------------------------------------------
-Hi6220 SoC
-Required root node properties:
-	- compatible = "hisilicon,hi6220";
-
 Hi4511 Board
 Required root node properties:
 	- compatible = "hisilicon,hi3620-hi4511";
 
-HiP04 D01 Board
+Hi6220 SoC
 Required root node properties:
-	- compatible = "hisilicon,hip04-d01";
-
-HiP01 ca9x2 Board
-Required root node properties:
-	- compatible = "hisilicon,hip01-ca9x2";
+	- compatible = "hisilicon,hi6220";
 
 HiKey Board
 Required root node properties:
 	- compatible = "hisilicon,hi6220-hikey", "hisilicon,hi6220";
 
+HiP01 ca9x2 Board
+Required root node properties:
+	- compatible = "hisilicon,hip01-ca9x2";
+
+HiP04 D01 Board
+Required root node properties:
+	- compatible = "hisilicon,hip04-d01";
+
 HiP05 D02 Board
 Required root node properties:
 	- compatible = "hisilicon,hip05-d02";
 
+HiP06 D03 Board
+Required root node properties:
+	- compatible = "hisilicon,hip06-d03";
+
 Hisilicon system controller
 
 Required properties:
diff --git a/Documentation/devicetree/bindings/arm/omap/omap.txt b/Documentation/devicetree/bindings/arm/omap/omap.txt
index 21e71a5e8..94b57f2 100644
--- a/Documentation/devicetree/bindings/arm/omap/omap.txt
+++ b/Documentation/devicetree/bindings/arm/omap/omap.txt
@@ -133,6 +133,9 @@
 - AM335X Bone : Low cost community board
   compatible = "ti,am335x-bone", "ti,am33xx", "ti,omap3"
 
+- AM3359 ICEv2 : Low cost Industrial Communication Engine EVM.
+  compatible = "ti,am3359-icev2", "ti,am33xx", "ti,omap3"
+
 - AM335X OrionLXm : Substation Automation Platform
   compatible = "novatech,am335x-lxm", "ti,am33xx"
 
@@ -169,6 +172,9 @@
 - AM57XX SBC-AM57x
   compatible = "compulab,sbc-am57x", "compulab,cl-som-am57x", "ti,am5728", "ti,dra742", "ti,dra74", "ti,dra7"
 
+- AM5728 IDK
+  compatible = "ti,am5728-idk", "ti,am5728", "ti,dra742", "ti,dra74", "ti,dra7"
+
 - DRA742 EVM:  Software Development Board for DRA742
   compatible = "ti,dra7-evm", "ti,dra742", "ti,dra74", "ti,dra7"
 
diff --git a/Documentation/devicetree/bindings/arm/oxnas.txt b/Documentation/devicetree/bindings/arm/oxnas.txt
new file mode 100644
index 0000000..b9e4971
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/oxnas.txt
@@ -0,0 +1,9 @@
+Oxford Semiconductor OXNAS SoCs Family device tree bindings
+-------------------------------------------
+
+Boards with the OX810SE SoC shall have the following properties:
+  Required root node property:
+    compatible: "oxsemi,ox810se"
+
+Board compatible values:
+  - "wd,mbwe" (OX810SE)
diff --git a/Documentation/devicetree/bindings/arm/pmu.txt b/Documentation/devicetree/bindings/arm/pmu.txt
index 6eb73be..74d5417 100644
--- a/Documentation/devicetree/bindings/arm/pmu.txt
+++ b/Documentation/devicetree/bindings/arm/pmu.txt
@@ -22,10 +22,11 @@
 	"arm,arm11mpcore-pmu"
 	"arm,arm1176-pmu"
 	"arm,arm1136-pmu"
+	"brcm,vulcan-pmu"
+	"cavium,thunder-pmu"
 	"qcom,scorpion-pmu"
 	"qcom,scorpion-mp-pmu"
 	"qcom,krait-pmu"
-	"cavium,thunder-pmu"
 - interrupts : 1 combined interrupt or 1 per core. If the interrupt is a per-cpu
                interrupt (PPI) then 1 interrupt should be specified.
 
diff --git a/Documentation/devicetree/bindings/arm/rockchip.txt b/Documentation/devicetree/bindings/arm/rockchip.txt
index 078c14f..715d960 100644
--- a/Documentation/devicetree/bindings/arm/rockchip.txt
+++ b/Documentation/devicetree/bindings/arm/rockchip.txt
@@ -39,6 +39,10 @@
     Required root node properties:
       - compatible = "netxeon,r89", "rockchip,rk3288";
 
+- GeekBuying GeekBox:
+    Required root node properties:
+      - compatible = "geekbuying,geekbox", "rockchip,rk3368";
+
 - Google Brain (dev-board):
     Required root node properties:
       - compatible = "google,veyron-brain-rev0", "google,veyron-brain",
@@ -87,6 +91,10 @@
 		     "google,veyron-speedy-rev3", "google,veyron-speedy-rev2",
 		     "google,veyron-speedy", "google,veyron", "rockchip,rk3288";
 
+- mqmaker MiQi:
+    Required root node properties:
+      - compatible = "mqmaker,miqi", "rockchip,rk3288";
+
 - Rockchip RK3368 evb:
     Required root node properties:
       - compatible = "rockchip,rk3368-evb-act8846", "rockchip,rk3368";
@@ -97,4 +105,8 @@
 
 - Rockchip RK3228 Evaluation board:
     Required root node properties:
-      - compatible = "rockchip,rk3228-evb", "rockchip,rk3228";
+     - compatible = "rockchip,rk3228-evb", "rockchip,rk3228";
+
+- Rockchip RK3399 evb:
+    Required root node properties:
+      - compatible = "rockchip,rk3399-evb", "rockchip,rk3399";
diff --git a/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt b/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt
index 12129c0..f5deace 100644
--- a/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt
+++ b/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt
@@ -2,6 +2,8 @@
 
 Required root node properties:
     - compatible = should be one or more of the following.
+	- "samsung,artik5"	- for Exynos3250-based Samsung ARTIK5 module.
+	- "samsung,artik5-eval" - for Exynos3250-based Samsung ARTIK5 eval board.
 	- "samsung,monk"	- for Exynos3250-based Samsung Simband board.
 	- "samsung,rinato"	- for Exynos3250-based Samsung Gear2 board.
 	- "samsung,smdkv310"	- for Exynos4210-based Samsung SMDKV310 eval board.
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.txt b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.txt
index 02c2700..a74b37b 100644
--- a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.txt
+++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.txt
@@ -1,16 +1,20 @@
 NVIDIA Tegra Power Management Controller (PMC)
 
+== Power Management Controller Node ==
+
 The PMC block interacts with an external Power Management Unit. The PMC
 mostly controls the entry and exit of the system from different sleep
 modes. It provides power-gating controllers for SoC and CPU power-islands.
 
 Required properties:
 - name : Should be pmc
-- compatible : For Tegra20, must contain "nvidia,tegra20-pmc".  For Tegra30,
-  must contain "nvidia,tegra30-pmc".  For Tegra114, must contain
-  "nvidia,tegra114-pmc".  For Tegra124, must contain "nvidia,tegra124-pmc".
-  Otherwise, must contain "nvidia,<chip>-pmc", plus at least one of the
-  above, where <chip> is tegra132.
+- compatible : Should contain one of the following:
+	For Tegra20 must contain "nvidia,tegra20-pmc".
+	For Tegra30 must contain "nvidia,tegra30-pmc".
+	For Tegra114 must contain "nvidia,tegra114-pmc"
+	For Tegra124 must contain "nvidia,tegra124-pmc"
+	For Tegra132 must contain "nvidia,tegra124-pmc"
+	For Tegra210 must contain "nvidia,tegra210-pmc"
 - reg : Offset and length of the register set for the device
 - clocks : Must contain an entry for each entry in clock-names.
   See ../clocks/clock-bindings.txt for details.
@@ -68,6 +72,11 @@
                      Defaults to 0. Valid values are described in section 12.5.2
                      "Pinmux Support" of the Tegra4 Technical Reference Manual.
 
+Optional nodes:
+- powergates : This node contains a hierarchy of power domain nodes, which
+	       should match the powergates on the Tegra SoC. See "Powergate
+	       Nodes" below.
+
 Example:
 
 / SoC dts including file
@@ -113,3 +122,76 @@
 	};
 	...
 };
+
+
+== Powergate Nodes ==
+
+Each of the powergate nodes represents a power-domain on the Tegra SoC
+that can be power-gated by the Tegra PMC. The name of the powergate node
+should be one of the below. Note that not every powergate is applicable
+to all Tegra devices and the following list shows which powergates are
+applicable to which devices. Please refer to the Tegra TRM for more
+details on the various powergates.
+
+ Name		Description			Devices Applicable
+ 3d		3D Graphics			Tegra20/114/124/210
+ 3d0		3D Graphics 0			Tegra30
+ 3d1		3D Graphics 1			Tegra30
+ aud		Audio				Tegra210
+ dfd		Debug				Tegra210
+ dis		Display A			Tegra114/124/210
+ disb		Display B			Tegra114/124/210
+ heg		2D Graphics			Tegra30/114/124/210
+ iram		Internal RAM			Tegra124/210
+ mpe		MPEG Encode			All
+ nvdec		NVIDIA Video Decode Engine	Tegra210
+ nvjpg		NVIDIA JPEG Engine		Tegra210
+ pcie		PCIE				Tegra20/30/124/210
+ sata		SATA				Tegra30/124/210
+ sor		Display interfaces		Tegra124/210
+ ve2		Video Encode Engine 2		Tegra210
+ venc		Video Encode Engine		All
+ vdec		Video Decode Engine		Tegra20/30/114/124
+ vic		Video Imaging Compositor	Tegra124/210
+ xusba		USB Partition A			Tegra114/124/210
+ xusbb		USB Partition B 		Tegra114/124/210
+ xusbc		USB Partition C			Tegra114/124/210
+
+Required properties:
+  - clocks: Must contain an entry for each clock required by the PMC for
+    controlling a power-gate. See ../clocks/clock-bindings.txt for details.
+  - resets: Must contain an entry for each reset required by the PMC for
+    controlling a power-gate. See ../reset/reset.txt for details.
+  - #power-domain-cells: Must be 0.
+
+Example:
+
+	pmc: pmc@7000e400 {
+		compatible = "nvidia,tegra210-pmc";
+		reg = <0x0 0x7000e400 0x0 0x400>;
+		clocks = <&tegra_car TEGRA210_CLK_PCLK>, <&clk32k_in>;
+		clock-names = "pclk", "clk32k_in";
+
+		powergates {
+			pd_audio: aud {
+				clocks = <&tegra_car TEGRA210_CLK_APE>,
+					 <&tegra_car TEGRA210_CLK_APB2APE>;
+				resets = <&tegra_car 198>;
+				#power-domain-cells = <0>;
+			};
+		};
+	};
+
+
+== Powergate Clients ==
+
+Hardware blocks belonging to a power domain should contain a "power-domains"
+property that is a phandle pointing to the corresponding powergate node.
+
+Example:
+
+	adma: adma@702e2000 {
+		...
+		power-domains = <&pd_audio>;
+		...
+	};
diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt
index 30df832..87adfb2 100644
--- a/Documentation/devicetree/bindings/ata/ahci-platform.txt
+++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt
@@ -32,6 +32,10 @@
 - target-supply     : regulator for SATA target power
 - phys              : reference to the SATA PHY node
 - phy-names         : must be "sata-phy"
+- ports-implemented : Mask that indicates which ports that the HBA supports
+		      are available for software to use. Useful if PORTS_IMPL
+		      is not programmed by the BIOS, which is true with
+		      some embedded SOC's.
 
 Required properties when using sub-nodes:
 - #address-cells    : number of cells to encode an address
diff --git a/Documentation/devicetree/bindings/btmrvl.txt b/Documentation/devicetree/bindings/btmrvl.txt
deleted file mode 100644
index 58f964b..0000000
--- a/Documentation/devicetree/bindings/btmrvl.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-btmrvl
-------
-
-Required properties:
-
-  - compatible : must be "btmrvl,cfgdata"
-
-Optional properties:
-
-  - btmrvl,cal-data : Calibration data downloaded to the device during
-		      initialization. This is an array of 28 values(u8).
-
-  - btmrvl,gpio-gap : gpio and gap (in msecs) combination to be
-		      configured.
-
-Example:
-
-GPIO pin 13 is configured as a wakeup source and GAP is set to 100 msecs
-in below example.
-
-btmrvl {
-	compatible = "btmrvl,cfgdata";
-
-	btmrvl,cal-data = /bits/ 8 <
-		0x37 0x01 0x1c 0x00 0xff 0xff 0xff 0xff 0x01 0x7f 0x04 0x02
-		0x00 0x00 0xba 0xce 0xc0 0xc6 0x2d 0x00 0x00 0x00 0x00 0x00
-		0x00 0x00 0xf0 0x00>;
-	btmrvl,gpio-gap = <0x0d64>;
-};
diff --git a/Documentation/devicetree/bindings/clock/microchip,pic32.txt b/Documentation/devicetree/bindings/clock/microchip,pic32.txt
new file mode 100644
index 0000000..c93d88f
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/microchip,pic32.txt
@@ -0,0 +1,39 @@
+Microchip PIC32 Clock Controller Binding
+----------------------------------------
+Microchip clock controller is consists of few oscillators, PLL, multiplexer
+and few divider modules.
+
+This binding uses common clock bindings.
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible: shall be "microchip,pic32mzda-clk".
+- reg: shall contain base address and length of clock registers.
+- #clock-cells: shall be 1.
+
+Optional properties:
+- microchip,pic32mzda-sosc: shall be added only if platform has
+  secondary oscillator connected.
+
+Example:
+	rootclk: clock-controller@1f801200 {
+		compatible = "microchip,pic32mzda-clk";
+		reg = <0x1f801200 0x200>;
+		#clock-cells = <1>;
+		/* optional */
+		microchip,pic32mzda-sosc;
+	};
+
+
+The clock consumer shall specify the desired clock-output of the clock
+controller (as defined in [2]) by specifying output-id in its "clock"
+phandle cell.
+[2] include/dt-bindings/clock/microchip,pic32-clock.h
+
+For example for UART2:
+uart2: serial@2 {
+	compatible = "microchip,pic32mzda-uart";
+	reg = <>;
+	interrupts = <>;
+	clocks = <&rootclk PB2CLK>;
+};
diff --git a/Documentation/devicetree/bindings/clock/qca,ath79-pll.txt b/Documentation/devicetree/bindings/clock/qca,ath79-pll.txt
index e0fc2c1..241fb05 100644
--- a/Documentation/devicetree/bindings/clock/qca,ath79-pll.txt
+++ b/Documentation/devicetree/bindings/clock/qca,ath79-pll.txt
@@ -3,7 +3,7 @@
 The PPL controller provides the 3 main clocks of the SoC: CPU, DDR and AHB.
 
 Required Properties:
-- compatible: has to be "qca,<soctype>-cpu-intc" and one of the following
+- compatible: has to be "qca,<soctype>-pll" and one of the following
   fallbacks:
   - "qca,ar7100-pll"
   - "qca,ar7240-pll"
@@ -21,8 +21,8 @@
 
 Example:
 
-	memory-controller@18050000 {
-		compatible = "qca,ar9132-ppl", "qca,ar9130-pll";
+	pll-controller@18050000 {
+		compatible = "qca,ar9132-pll", "qca,ar9130-pll";
 		reg = <0x18050000 0x20>;
 
 		clock-names = "ref";
diff --git a/Documentation/devicetree/bindings/crypto/fsl-imx-scc.txt b/Documentation/devicetree/bindings/crypto/fsl-imx-scc.txt
new file mode 100644
index 0000000..7aad448
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/fsl-imx-scc.txt
@@ -0,0 +1,21 @@
+Freescale Security Controller (SCC)
+
+Required properties:
+- compatible : Should be "fsl,imx25-scc".
+- reg : Should contain register location and length.
+- interrupts : Should contain interrupt numbers for SCM IRQ and SMN IRQ.
+- interrupt-names : Should specify the names "scm" and "smn" for the
+		    SCM IRQ and SMN IRQ.
+- clocks: Should contain the clock driving the SCC core.
+- clock-names: Should be set to "ipg".
+
+Example:
+
+	scc: crypto@53fac000 {
+		compatible = "fsl,imx25-scc";
+		reg = <0x53fac000 0x4000>;
+		clocks = <&clks 111>;
+		clock-names = "ipg";
+		interrupts = <49>, <50>;
+		interrupt-names = "scm", "smn";
+	};
diff --git a/Documentation/devicetree/bindings/crypto/samsung-sss.txt b/Documentation/devicetree/bindings/crypto/samsung-sss.txt
index a6dafa8..7a5ca56 100644
--- a/Documentation/devicetree/bindings/crypto/samsung-sss.txt
+++ b/Documentation/devicetree/bindings/crypto/samsung-sss.txt
@@ -23,10 +23,8 @@
   - "samsung,exynos4210-secss" for Exynos4210, Exynos4212, Exynos4412, Exynos5250,
 		Exynos5260 and Exynos5420 SoCs.
 - reg : Offset and length of the register set for the module
-- interrupts : interrupt specifiers of SSS module interrupts, should contain
-		following entries:
-		- first : feed control interrupt (required for all variants),
-		- second : hash interrupt (required only for samsung,s5pv210-secss).
+- interrupts : interrupt specifiers of SSS module interrupts (one feed
+		control interrupt).
 
 - clocks : list of clock phandle and specifier pairs for all clocks  listed in
 		clock-names property.
diff --git a/Documentation/devicetree/bindings/devfreq/event/exynos-nocp.txt b/Documentation/devicetree/bindings/devfreq/event/exynos-nocp.txt
new file mode 100644
index 0000000..fd459f0
--- /dev/null
+++ b/Documentation/devicetree/bindings/devfreq/event/exynos-nocp.txt
@@ -0,0 +1,26 @@
+
+* Samsung Exynos NoC (Network on Chip) Probe device
+
+The Samsung Exynos542x SoC has NoC (Network on Chip) Probe for NoC bus.
+NoC provides the primitive values to get the performance data. The packets
+that the Network on Chip (NoC) probes detects are transported over
+the network infrastructure to observer units. You can configure probes to
+capture packets with header or data on the data request response network,
+or as traffic debug or statistic collectors. Exynos542x bus has multiple
+NoC probes to provide bandwidth information about behavior of the SoC
+that you can use while analyzing system performance.
+
+Required properties:
+- compatible: Should be "samsung,exynos5420-nocp"
+- reg: physical base address of each NoC Probe and length of memory mapped region.
+
+Optional properties:
+- clock-names : the name of clock used by the NoC Probe, "nocp"
+- clocks : phandles for clock specified in "clock-names" property
+
+Example : NoC Probe nodes in Device Tree are listed below.
+
+	nocp_mem0_0: nocp@10CA1000 {
+		compatible = "samsung,exynos5420-nocp";
+		reg = <0x10CA1000 0x200>;
+	};
diff --git a/Documentation/devicetree/bindings/devfreq/exynos-bus.txt b/Documentation/devicetree/bindings/devfreq/exynos-bus.txt
new file mode 100644
index 0000000..d3ec8e6
--- /dev/null
+++ b/Documentation/devicetree/bindings/devfreq/exynos-bus.txt
@@ -0,0 +1,409 @@
+* Generic Exynos Bus frequency device
+
+The Samsung Exynos SoC has many buses for data transfer between DRAM
+and sub-blocks in SoC. Most Exynos SoCs share the common architecture
+for buses. Generally, each bus of Exynos SoC includes a source clock
+and a power line, which are able to change the clock frequency
+of the bus in runtime. To monitor the usage of each bus in runtime,
+the driver uses the PPMU (Platform Performance Monitoring Unit), which
+is able to measure the current load of sub-blocks.
+
+The Exynos SoC includes the various sub-blocks which have the each AXI bus.
+The each AXI bus has the owned source clock but, has not the only owned
+power line. The power line might be shared among one more sub-blocks.
+So, we can divide into two type of device as the role of each sub-block.
+There are two type of bus devices as following:
+- parent bus device
+- passive bus device
+
+Basically, parent and passive bus device share the same power line.
+The parent bus device can only change the voltage of shared power line
+and the rest bus devices (passive bus device) depend on the decision of
+the parent bus device. If there are three blocks which share the VDD_xxx
+power line, Only one block should be parent device and then the rest blocks
+should depend on the parent device as passive device.
+
+	VDD_xxx |--- A block (parent)
+		|--- B block (passive)
+		|--- C block (passive)
+
+There are a little different composition among Exynos SoC because each Exynos
+SoC has different sub-blocks. Therefore, such difference should be specified
+in devicetree file instead of each device driver. In result, this driver
+is able to support the bus frequency for all Exynos SoCs.
+
+Required properties for all bus devices:
+- compatible: Should be "samsung,exynos-bus".
+- clock-names : the name of clock used by the bus, "bus".
+- clocks : phandles for clock specified in "clock-names" property.
+- operating-points-v2: the OPP table including frequency/voltage information
+  to support DVFS (Dynamic Voltage/Frequency Scaling) feature.
+
+Required properties only for parent bus device:
+- vdd-supply: the regulator to provide the buses with the voltage.
+- devfreq-events: the devfreq-event device to monitor the current utilization
+  of buses.
+
+Required properties only for passive bus device:
+- devfreq: the parent bus device.
+
+Optional properties only for parent bus device:
+- exynos,saturation-ratio: the percentage value which is used to calibrate
+			the performance count against total cycle count.
+- exynos,voltage-tolerance: the percentage value for bus voltage tolerance
+			which is used to calculate the max voltage.
+
+Detailed correlation between sub-blocks and power line according to Exynos SoC:
+- In case of Exynos3250, there are two power line as following:
+	VDD_MIF |--- DMC
+
+	VDD_INT |--- LEFTBUS (parent device)
+		|--- PERIL
+		|--- MFC
+		|--- G3D
+		|--- RIGHTBUS
+		|--- PERIR
+		|--- FSYS
+		|--- LCD0
+		|--- PERIR
+		|--- ISP
+		|--- CAM
+
+- In case of Exynos4210, there is one power line as following:
+	VDD_INT |--- DMC (parent device)
+		|--- LEFTBUS
+		|--- PERIL
+		|--- MFC(L)
+		|--- G3D
+		|--- TV
+		|--- LCD0
+		|--- RIGHTBUS
+		|--- PERIR
+		|--- MFC(R)
+		|--- CAM
+		|--- FSYS
+		|--- GPS
+		|--- LCD0
+		|--- LCD1
+
+- In case of Exynos4x12, there are two power line as following:
+	VDD_MIF |--- DMC
+
+	VDD_INT |--- LEFTBUS (parent device)
+		|--- PERIL
+		|--- MFC(L)
+		|--- G3D
+		|--- TV
+		|--- IMAGE
+		|--- RIGHTBUS
+		|--- PERIR
+		|--- MFC(R)
+		|--- CAM
+		|--- FSYS
+		|--- GPS
+		|--- LCD0
+		|--- ISP
+
+- In case of Exynos5422, there are two power line as following:
+	VDD_MIF |--- DREX 0 (parent device, DRAM EXpress controller)
+	        |--- DREX 1
+
+	VDD_INT |--- NoC_Core (parent device)
+		|--- G2D
+		|--- G3D
+		|--- DISP1
+		|--- NoC_WCORE
+		|--- GSCL
+		|--- MSCL
+		|--- ISP
+		|--- MFC
+		|--- GEN
+		|--- PERIS
+		|--- PERIC
+		|--- FSYS
+		|--- FSYS2
+
+Example1:
+	Show the AXI buses of Exynos3250 SoC. Exynos3250 divides the buses to
+	power line (regulator). The MIF (Memory Interface) AXI bus is used to
+	transfer data between DRAM and CPU and uses the VDD_MIF regulator.
+
+	- MIF (Memory Interface) block
+	: VDD_MIF |--- DMC (Dynamic Memory Controller)
+
+	- INT (Internal) block
+	: VDD_INT |--- LEFTBUS (parent device)
+		  |--- PERIL
+		  |--- MFC
+		  |--- G3D
+		  |--- RIGHTBUS
+		  |--- FSYS
+		  |--- LCD0
+		  |--- PERIR
+		  |--- ISP
+		  |--- CAM
+
+	- MIF bus's frequency/voltage table
+	-----------------------
+	|Lv| Freq   | Voltage |
+	-----------------------
+	|L1| 50000  |800000   |
+	|L2| 100000 |800000   |
+	|L3| 134000 |800000   |
+	|L4| 200000 |825000   |
+	|L5| 400000 |875000   |
+	-----------------------
+
+	- INT bus's frequency/voltage table
+	----------------------------------------------------------
+	|Block|LEFTBUS|RIGHTBUS|MCUISP |ISP    |PERIL  ||VDD_INT |
+	| name|       |LCD0    |       |       |       ||        |
+	|     |       |FSYS    |       |       |       ||        |
+	|     |       |MFC     |       |       |       ||        |
+	----------------------------------------------------------
+	|Mode |*parent|passive |passive|passive|passive||        |
+	----------------------------------------------------------
+	|Lv   |Frequency                               ||Voltage |
+	----------------------------------------------------------
+	|L1   |50000  |50000   |50000  |50000  |50000  ||900000  |
+	|L2   |80000  |80000   |80000  |80000  |80000  ||900000  |
+	|L3   |100000 |100000  |100000 |100000 |100000 ||1000000 |
+	|L4   |134000 |134000  |200000 |200000 |       ||1000000 |
+	|L5   |200000 |200000  |400000 |300000 |       ||1000000 |
+	----------------------------------------------------------
+
+Example2 :
+	The bus of DMC (Dynamic Memory Controller) block in exynos3250.dtsi
+	is listed below:
+
+	bus_dmc: bus_dmc {
+		compatible = "samsung,exynos-bus";
+		clocks = <&cmu_dmc CLK_DIV_DMC>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_dmc_opp_table>;
+		status = "disabled";
+	};
+
+	bus_dmc_opp_table: opp_table1 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp@50000000 {
+			opp-hz = /bits/ 64 <50000000>;
+			opp-microvolt = <800000>;
+		};
+		opp@100000000 {
+			opp-hz = /bits/ 64 <100000000>;
+			opp-microvolt = <800000>;
+		};
+		opp@134000000 {
+			opp-hz = /bits/ 64 <134000000>;
+			opp-microvolt = <800000>;
+		};
+		opp@200000000 {
+			opp-hz = /bits/ 64 <200000000>;
+			opp-microvolt = <825000>;
+		};
+		opp@400000000 {
+			opp-hz = /bits/ 64 <400000000>;
+			opp-microvolt = <875000>;
+		};
+	};
+
+	bus_leftbus: bus_leftbus {
+		compatible = "samsung,exynos-bus";
+		clocks = <&cmu CLK_DIV_GDL>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_leftbus_opp_table>;
+		status = "disabled";
+	};
+
+	bus_rightbus: bus_rightbus {
+		compatible = "samsung,exynos-bus";
+		clocks = <&cmu CLK_DIV_GDR>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_leftbus_opp_table>;
+		status = "disabled";
+	};
+
+	bus_lcd0: bus_lcd0 {
+		compatible = "samsung,exynos-bus";
+		clocks = <&cmu CLK_DIV_ACLK_160>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_leftbus_opp_table>;
+		status = "disabled";
+	};
+
+	bus_fsys: bus_fsys {
+		compatible = "samsung,exynos-bus";
+		clocks = <&cmu CLK_DIV_ACLK_200>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_leftbus_opp_table>;
+		status = "disabled";
+	};
+
+	bus_mcuisp: bus_mcuisp {
+		compatible = "samsung,exynos-bus";
+		clocks = <&cmu CLK_DIV_ACLK_400_MCUISP>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_mcuisp_opp_table>;
+		status = "disabled";
+	};
+
+	bus_isp: bus_isp {
+		compatible = "samsung,exynos-bus";
+		clocks = <&cmu CLK_DIV_ACLK_266>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_isp_opp_table>;
+		status = "disabled";
+	};
+
+	bus_peril: bus_peril {
+		compatible = "samsung,exynos-bus";
+		clocks = <&cmu CLK_DIV_ACLK_100>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_peril_opp_table>;
+		status = "disabled";
+	};
+
+	bus_mfc: bus_mfc {
+		compatible = "samsung,exynos-bus";
+		clocks = <&cmu CLK_SCLK_MFC>;
+		clock-names = "bus";
+		operating-points-v2 = <&bus_leftbus_opp_table>;
+		status = "disabled";
+	};
+
+	bus_leftbus_opp_table: opp_table1 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp@50000000 {
+			opp-hz = /bits/ 64 <50000000>;
+			opp-microvolt = <900000>;
+		};
+		opp@80000000 {
+			opp-hz = /bits/ 64 <80000000>;
+			opp-microvolt = <900000>;
+		};
+		opp@100000000 {
+			opp-hz = /bits/ 64 <100000000>;
+			opp-microvolt = <1000000>;
+		};
+		opp@134000000 {
+			opp-hz = /bits/ 64 <134000000>;
+			opp-microvolt = <1000000>;
+		};
+		opp@200000000 {
+			opp-hz = /bits/ 64 <200000000>;
+			opp-microvolt = <1000000>;
+		};
+	};
+
+	bus_mcuisp_opp_table: opp_table2 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp@50000000 {
+			opp-hz = /bits/ 64 <50000000>;
+		};
+		opp@80000000 {
+			opp-hz = /bits/ 64 <80000000>;
+		};
+		opp@100000000 {
+			opp-hz = /bits/ 64 <100000000>;
+		};
+		opp@200000000 {
+			opp-hz = /bits/ 64 <200000000>;
+		};
+		opp@400000000 {
+			opp-hz = /bits/ 64 <400000000>;
+		};
+	};
+
+	bus_isp_opp_table: opp_table3 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp@50000000 {
+			opp-hz = /bits/ 64 <50000000>;
+		};
+		opp@80000000 {
+			opp-hz = /bits/ 64 <80000000>;
+		};
+		opp@100000000 {
+			opp-hz = /bits/ 64 <100000000>;
+		};
+		opp@200000000 {
+			opp-hz = /bits/ 64 <200000000>;
+		};
+		opp@300000000 {
+			opp-hz = /bits/ 64 <300000000>;
+		};
+	};
+
+	bus_peril_opp_table: opp_table4 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp@50000000 {
+			opp-hz = /bits/ 64 <50000000>;
+		};
+		opp@80000000 {
+			opp-hz = /bits/ 64 <80000000>;
+		};
+		opp@100000000 {
+			opp-hz = /bits/ 64 <100000000>;
+		};
+	};
+
+
+	Usage case to handle the frequency and voltage of bus on runtime
+	in exynos3250-rinato.dts is listed below:
+
+	&bus_dmc {
+		devfreq-events = <&ppmu_dmc0_3>, <&ppmu_dmc1_3>;
+		vdd-supply = <&buck1_reg>;	/* VDD_MIF */
+		status = "okay";
+	};
+
+	&bus_leftbus {
+		devfreq-events = <&ppmu_leftbus_3>, <&ppmu_rightbus_3>;
+		vdd-supply = <&buck3_reg>;
+		status = "okay";
+	};
+
+	&bus_rightbus {
+		devfreq = <&bus_leftbus>;
+		status = "okay";
+	};
+
+	&bus_lcd0 {
+		devfreq = <&bus_leftbus>;
+		status = "okay";
+	};
+
+	&bus_fsys {
+		devfreq = <&bus_leftbus>;
+		status = "okay";
+	};
+
+	&bus_mcuisp {
+		devfreq = <&bus_leftbus>;
+		status = "okay";
+	};
+
+	&bus_isp {
+		devfreq = <&bus_leftbus>;
+		status = "okay";
+	};
+
+	&bus_peril {
+		devfreq = <&bus_leftbus>;
+		status = "okay";
+	};
+
+	&bus_mfc {
+		devfreq = <&bus_leftbus>;
+		status = "okay";
+	};
diff --git a/Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.txt b/Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.txt
index 1396078..baf9b34 100644
--- a/Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.txt
+++ b/Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.txt
@@ -12,6 +12,10 @@
 - reg: Should contain DMA registers location and length.
 - interrupts: Should contain the DMA interrupts associated
 		to the DMA channels in ascending order.
+- interrupt-names: Should contain the names of the interrupt
+		   in the form "dmaXX".
+		   Use "dma-shared-all" for the common interrupt line
+		   that is shared by all dma channels.
 - #dma-cells: Must be <1>, the cell in the dmas property of the
 		client device represents the DREQ number.
 - brcm,dma-channel-mask: Bit mask representing the channels
@@ -34,13 +38,35 @@
 		     <1 24>,
 		     <1 25>,
 		     <1 26>,
+		     /* dma channel 11-14 share one irq */
 		     <1 27>,
+		     <1 27>,
+		     <1 27>,
+		     <1 27>,
+		     /* unused shared irq for all channels */
 		     <1 28>;
+	interrupt-names = "dma0",
+			  "dma1",
+			  "dma2",
+			  "dma3",
+			  "dma4",
+			  "dma5",
+			  "dma6",
+			  "dma7",
+			  "dma8",
+			  "dma9",
+			  "dma10",
+			  "dma11",
+			  "dma12",
+			  "dma13",
+			  "dma14",
+			  "dma-shared-all";
 
 	#dma-cells = <1>;
 	brcm,dma-channel-mask = <0x7f35>;
 };
 
+
 DMA clients connected to the BCM2835 DMA controller must use the format
 described in the dma.txt file, using a two-cell specifier for each channel.
 
diff --git a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
index dc8d3aa..175f0e4 100644
--- a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
+++ b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
@@ -58,6 +58,15 @@
 	1	Medium
 	2	Low
 
+Optional properties:
+
+- gpr : The phandle to the General Purpose Register (GPR) node.
+- fsl,sdma-event-remap : Register bits of sdma event remap, the format is
+  <reg shift val>.
+    reg is the GPR register offset.
+    shift is the bit position inside the GPR register.
+    val is the value of the bit (0 or 1).
+
 Examples:
 
 sdma@83fb0000 {
@@ -83,3 +92,21 @@
 	dma-names = "rx", "tx";
 	fsl,fifo-depth = <15>;
 };
+
+Using the fsl,sdma-event-remap property:
+
+If we want to use SDMA on the SAI1 port on a MX6SX:
+
+&sdma {
+	gpr = <&gpr>;
+	/* SDMA events remap for SAI1_RX and SAI1_TX */
+	fsl,sdma-event-remap = <0 15 1>, <0 16 1>;
+};
+
+The fsl,sdma-event-remap property in this case has two values:
+- <0 15 1> means that the offset is 0, so GPR0 is the register of the
+SDMA remap. Bit 15 of GPR0 selects between UART4_RX and SAI1_RX.
+Setting bit 15 to 1 selects SAI1_RX.
+- <0 16 1> means that the offset is 0, so GPR0 is the register of the
+SDMA remap. Bit 16 of GPR0 selects between UART4_TX and SAI1_TX.
+Setting bit 16 to 1 selects SAI1_TX.
diff --git a/Documentation/devicetree/bindings/dma/mv-xor.txt b/Documentation/devicetree/bindings/dma/mv-xor.txt
index 276ef81..c075f59 100644
--- a/Documentation/devicetree/bindings/dma/mv-xor.txt
+++ b/Documentation/devicetree/bindings/dma/mv-xor.txt
@@ -1,7 +1,10 @@
 * Marvell XOR engines
 
 Required properties:
-- compatible: Should be "marvell,orion-xor" or "marvell,armada-380-xor"
+- compatible: Should be one of the following:
+  - "marvell,orion-xor"
+  - "marvell,armada-380-xor"
+  - "marvell,armada-3700-xor".
 - reg: Should contain registers location and length (two sets)
     the first set is the low registers, the second set the high
     registers for the XOR engine.
diff --git a/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.txt b/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.txt
new file mode 100644
index 0000000..1e1dc8f9
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.txt
@@ -0,0 +1,55 @@
+* NVIDIA Tegra Audio DMA (ADMA) controller
+
+The Tegra Audio DMA controller that is used for transferring data
+between system memory and the Audio Processing Engine (APE).
+
+Required properties:
+- compatible: Must be "nvidia,tegra210-adma".
+- reg: Should contain DMA registers location and length. This should be
+  a single entry that includes all of the per-channel registers in one
+  contiguous bank.
+- interrupt-parent: Phandle to the interrupt parent controller.
+- interrupts: Should contain all of the per-channel DMA interrupts in
+  ascending order with respect to the DMA channel index.
+- clocks: Must contain one entry for the ADMA module clock
+  (TEGRA210_CLK_D_AUDIO).
+- clock-names: Must contain the name "d_audio" for the corresponding
+  'clocks' entry.
+- #dma-cells : Must be 1. The first cell denotes the receive/transmit
+  request number and should be between 1 and the maximum number of
+  requests supported. This value corresponds to the RX/TX_REQUEST_SELECT
+  fields in the ADMA_CHn_CTRL register.
+
+
+Example:
+
+adma: dma@702e2000 {
+	compatible = "nvidia,tegra210-adma";
+	reg = <0x0 0x702e2000 0x0 0x2000>;
+	interrupt-parent = <&tegra_agic>;
+	interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+	clocks = <&tegra_car TEGRA210_CLK_D_AUDIO>;
+	clock-names = "d_audio";
+	#dma-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt b/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt
index 1c9d48e..9cbf5d9 100644
--- a/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt
+++ b/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt
@@ -13,6 +13,8 @@
 - clock-names: must contain "bam_clk" entry
 - qcom,ee : indicates the active Execution Environment identifier (0-7) used in
   the secure world.
+- qcom,controlled-remotely : optional, indicates that the bam is controlled by
+  remote proccessor i.e. execution environment.
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt
index c261598..0f55832 100644
--- a/Documentation/devicetree/bindings/dma/snps-dma.txt
+++ b/Documentation/devicetree/bindings/dma/snps-dma.txt
@@ -13,6 +13,11 @@
 - chan_priority: priority of channels. 0 (default): increase from chan 0->n, 1:
   increase from chan n->0
 - block_size: Maximum block size supported by the controller
+- data-width: Maximum data width supported by hardware per AHB master
+  (in bytes, power of 2)
+
+
+Deprecated properties:
 - data_width: Maximum data width supported by hardware per AHB master
   (0 - 8bits, 1 - 16bits, ..., 5 - 256bits)
 
@@ -38,7 +43,7 @@
 		chan_allocation_order = <1>;
 		chan_priority = <1>;
 		block_size = <0xfff>;
-		data_width = <3 3>;
+		data-width = <8 8>;
 	};
 
 DMA clients connected to the Designware DMA controller must use the format
@@ -47,8 +52,8 @@
 
 1. A phandle pointing to the DMA controller
 2. The DMA request line number
-3. Source master for transfers on allocated channel
-4. Destination master for transfers on allocated channel
+3. Memory master for transfers on allocated channel
+4. Peripheral master for transfers on allocated channel
 
 Example:
 	
diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_vdma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_vdma.txt
index e4c4d47..a1f2683 100644
--- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_vdma.txt
+++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_vdma.txt
@@ -3,18 +3,44 @@
 as two channels, one is to transmit to the video device and another is
 to receive from the video device.
 
+Xilinx AXI DMA engine, it does transfers between memory and AXI4 stream
+target devices. It can be configured to have one channel or two channels.
+If configured as two channels, one is to transmit to the device and another
+is to receive from the device.
+
+Xilinx AXI CDMA engine, it does transfers between memory-mapped source
+address and a memory-mapped destination address.
+
 Required properties:
-- compatible: Should be "xlnx,axi-vdma-1.00.a"
+- compatible: Should be "xlnx,axi-vdma-1.00.a" or "xlnx,axi-dma-1.00.a" or
+	      "xlnx,axi-cdma-1.00.a""
 - #dma-cells: Should be <1>, see "dmas" property below
 - reg: Should contain VDMA registers location and length.
-- xlnx,num-fstores: Should be the number of framebuffers as configured in h/w.
+- xlnx,addrwidth: Should be the vdma addressing size in bits(ex: 32 bits).
+- dma-ranges: Should be as the following <dma_addr cpu_addr max_len>.
 - dma-channel child node: Should have at least one channel and can have up to
 	two channels per device. This node specifies the properties of each
 	DMA channel (see child node properties below).
+- clocks: Input clock specifier. Refer to common clock bindings.
+- clock-names: List of input clocks
+	For VDMA:
+	Required elements: "s_axi_lite_aclk"
+	Optional elements: "m_axi_mm2s_aclk" "m_axi_s2mm_aclk",
+			   "m_axis_mm2s_aclk", "s_axis_s2mm_aclk"
+	For CDMA:
+	Required elements: "s_axi_lite_aclk", "m_axi_aclk"
+	FOR AXIDMA:
+	Required elements: "s_axi_lite_aclk"
+	Optional elements: "m_axi_mm2s_aclk", "m_axi_s2mm_aclk",
+			   "m_axi_sg_aclk"
+
+Required properties for VDMA:
+- xlnx,num-fstores: Should be the number of framebuffers as configured in h/w.
 
 Optional properties:
 - xlnx,include-sg: Tells configured for Scatter-mode in
 	the hardware.
+Optional properties for VDMA:
 - xlnx,flush-fsync: Tells which channel to Flush on Frame sync.
 	It takes following values:
 	{1}, flush both channels
@@ -31,6 +57,7 @@
 Optional child node properties:
 - xlnx,include-dre: Tells hardware is configured for Data
 	Realignment Engine.
+Optional child node properties for VDMA:
 - xlnx,genlock-mode: Tells Genlock synchronization is
 	enabled/disabled in hardware.
 
@@ -41,8 +68,13 @@
 	compatible = "xlnx,axi-vdma-1.00.a";
 	#dma_cells = <1>;
 	reg = < 0x40030000 0x10000 >;
+	dma-ranges = <0x00000000 0x00000000 0x40000000>;
 	xlnx,num-fstores = <0x8>;
 	xlnx,flush-fsync = <0x1>;
+	xlnx,addrwidth = <0x20>;
+	clocks = <&clk 0>, <&clk 1>, <&clk 2>, <&clk 3>, <&clk 4>;
+	clock-names = "s_axi_lite_aclk", "m_axi_mm2s_aclk", "m_axi_s2mm_aclk",
+		      "m_axis_mm2s_aclk", "s_axis_s2mm_aclk";
 	dma-channel@40030000 {
 		compatible = "xlnx,axi-vdma-mm2s-channel";
 		interrupts = < 0 54 4 >;
diff --git a/Documentation/devicetree/bindings/gpio/gpio-74x164.txt b/Documentation/devicetree/bindings/gpio/gpio-74x164.txt
index cc26080..ce1b223 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-74x164.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-74x164.txt
@@ -1,7 +1,9 @@
 * Generic 8-bits shift register GPIO driver
 
 Required properties:
-- compatible : Should be "fairchild,74hc595"
+- compatible: Should contain one of the following:
+    "fairchild,74hc595"
+    "nxp,74lvc594"
 - reg : chip select number
 - gpio-controller : Marks the device node as a gpio controller.
 - #gpio-cells : Should be two.  The first cell is the pin number and
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt b/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt
index 120bc49..4b6cc63 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt
@@ -1,9 +1,10 @@
-* Freescale MPC512x/MPC8xxx/Layerscape GPIO controller
+* Freescale MPC512x/MPC8xxx/QorIQ/Layerscape GPIO controller
 
 Required properties:
 - compatible : Should be "fsl,<soc>-gpio"
   The following <soc>s are known to be supported:
-    mpc5121, mpc5125, mpc8349, mpc8572, mpc8610, pq3, qoriq.
+	mpc5121, mpc5125, mpc8349, mpc8572, mpc8610, pq3, qoriq,
+	ls1021a, ls1043a, ls2080a.
 - reg : Address and length of the register set for the device
 - interrupts : Should be the port interrupt shared by all 32 pins.
 - #gpio-cells : Should be two.  The first cell is the pin number and
@@ -15,7 +16,7 @@
 - little-endian : GPIO registers are used as little endian. If not
                   present registers are used as big endian by default.
 
-Example:
+Example of gpio-controller node for a mpc5125 SoC:
 
 gpio0: gpio@1100 {
 	compatible = "fsl,mpc5125-gpio";
@@ -24,3 +25,16 @@
 	interrupts = <78 0x8>;
 	status = "okay";
 };
+
+Example of gpio-controller node for a ls2080a SoC:
+
+gpio0: gpio@2300000 {
+	compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
+	reg = <0x0 0x2300000 0x0 0x10000>;
+	interrupts = <0 36 0x4>; /* Level high type */
+	gpio-controller;
+	little-endian;
+	#gpio-cells = <2>;
+	interrupt-controller;
+	#interrupt-cells = <2>;
+};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-xlp.txt b/Documentation/devicetree/bindings/gpio/gpio-xlp.txt
index 262ee4d..28662d8 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-xlp.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-xlp.txt
@@ -3,6 +3,8 @@
 
 This GPIO driver is used for following Netlogic XLP SoCs:
 	XLP832, XLP316, XLP208, XLP980, XLP532
+This GPIO driver is also compatible with GPIO controller found on
+Broadcom Vulcan ARM64.
 
 Required properties:
 -------------------
@@ -13,6 +15,7 @@
   - "netlogic,xlp208-gpio": For Netlogic XLP208
   - "netlogic,xlp980-gpio": For Netlogic XLP980
   - "netlogic,xlp532-gpio": For Netlogic XLP532
+  - "brcm,vulcan-gpio": For Broadcom Vulcan ARM64
 - reg: Physical base address and length of the controller's registers.
 - #gpio-cells: Should be two. The first cell is the pin number and the second
   cell is used to specify optional parameters (currently unused).
diff --git a/Documentation/devicetree/bindings/gpio/gpio.txt b/Documentation/devicetree/bindings/gpio/gpio.txt
index 069cdf6..68d28f6 100644
--- a/Documentation/devicetree/bindings/gpio/gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio.txt
@@ -131,6 +131,13 @@
 property, and a #gpio-cells integer property, which indicates the number of
 cells in a gpio-specifier.
 
+Some system-on-chips (SoCs) use the concept of GPIO banks. A GPIO bank is an
+instance of a hardware IP core on a silicon die, usually exposed to the
+programmer as a coherent range of I/O addresses. Usually each such bank is
+exposed in the device tree as an individual gpio-controller node, reflecting
+the fact that the hardware was synthesized by reusing the same IP block a
+few times over.
+
 Optionally, a GPIO controller may have a "ngpios" property. This property
 indicates the number of in-use slots of available slots for GPIOs. The
 typical example is something like this: the hardware register is 32 bits
@@ -145,6 +152,21 @@
 and which are dummies. The bindings for this case has not yet been
 specified, but should be specified if/when such hardware appears.
 
+Optionally, a GPIO controller may have a "gpio-line-names" property. This is
+an array of strings defining the names of the GPIO lines going out of the
+GPIO controller. This name should be the most meaningful producer name
+for the system, such as a rail name indicating the usage. Package names
+such as pin name are discouraged: such lines have opaque names (since they
+are by definition generic purpose) and such names are usually not very
+helpful. For example "MMC-CD", "Red LED Vdd" and "ethernet reset" are
+reasonable line names as they describe what the line is used for. "GPIO0"
+is not a good name to give to a GPIO line. Placeholders are discouraged:
+rather use the "" (blank string) if the use of the GPIO line is undefined
+in your design. The names are assigned starting from line offset 0 from
+left to right from the passed array. An incomplete array (where the number
+of passed named are less than ngpios) will still be used up until the last
+provided valid line index.
+
 Example:
 
 gpio-controller@00000000 {
@@ -153,6 +175,10 @@
 	gpio-controller;
 	#gpio-cells = <2>;
 	ngpios = <18>;
+	gpio-line-names = "MMC-CD", "MMC-WP", "VDD eth", "RST eth", "LED R",
+		"LED G", "LED B", "Col A", "Col B", "Col C", "Col D",
+		"Row A", "Row B", "Row C", "Row D", "NMI button",
+		"poweroff", "reset";
 }
 
 The GPIO chip may contain GPIO hog definitions. GPIO hogging is a mechanism
diff --git a/Documentation/devicetree/bindings/gpio/nvidia,tegra186-gpio.txt b/Documentation/devicetree/bindings/gpio/nvidia,tegra186-gpio.txt
new file mode 100644
index 0000000..c82a2e2
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/nvidia,tegra186-gpio.txt
@@ -0,0 +1,161 @@
+NVIDIA Tegra186 GPIO controllers
+
+Tegra186 contains two GPIO controllers; a main controller and an "AON"
+controller. This binding document applies to both controllers. The register
+layouts for the controllers share many similarities, but also some significant
+differences. Hence, this document describes closely related but different
+bindings and compatible values.
+
+The Tegra186 GPIO controller allows software to set the IO direction of, and
+read/write the value of, numerous GPIO signals. Routing of GPIO signals to
+package balls is under the control of a separate pin controller HW block. Two
+major sets of registers exist:
+
+a) Security registers, which allow configuration of allowed access to the GPIO
+register set. These registers exist in a single contiguous block of physical
+address space. The size of this block, and the security features available,
+varies between the different GPIO controllers.
+
+Access to this set of registers is not necessary in all circumstances. Code
+that wishes to configure access to the GPIO registers needs access to these
+registers to do so. Code which simply wishes to read or write GPIO data does not
+need access to these registers.
+
+b) GPIO registers, which allow manipulation of the GPIO signals. In some GPIO
+controllers, these registers are exposed via multiple "physical aliases" in
+address space, each of which access the same underlying state. See the hardware
+documentation for rationale. Any particular GPIO client is expected to access
+just one of these physical aliases.
+
+Tegra HW documentation describes a unified naming convention for all GPIOs
+implemented by the SoC. Each GPIO is assigned to a port, and a port may control
+a number of GPIOs. Thus, each GPIO is named according to an alphabetical port
+name and an integer GPIO name within the port. For example, GPIO_PA0, GPIO_PN6,
+or GPIO_PCC3.
+
+The number of ports implemented by each GPIO controller varies. The number of
+implemented GPIOs within each port varies. GPIO registers within a controller
+are grouped and laid out according to the port they affect.
+
+The mapping from port name to the GPIO controller that implements that port, and
+the mapping from port name to register offset within a controller, are both
+extremely non-linear. The header file <dt-bindings/gpio/tegra186-gpio.h>
+describes the port-level mapping. In that file, the naming convention for ports
+matches the HW documentation. The values chosen for the names are alphabetically
+sorted within a particular controller. Drivers need to map between the DT GPIO
+IDs and HW register offsets using a lookup table.
+
+Each GPIO controller can generate a number of interrupt signals. Each signal
+represents the aggregate status for all GPIOs within a set of ports. Thus, the
+number of interrupt signals generated by a controller varies as a rough function
+of the number of ports it implements. Note that the HW documentation refers to
+both the overall controller HW module and the sets-of-ports as "controllers".
+
+Each GPIO controller in fact generates multiple interrupts signals for each set
+of ports. Each GPIO may be configured to feed into a specific one of the
+interrupt signals generated by a set-of-ports. The intent is for each generated
+signal to be routed to a different CPU, thus allowing different CPUs to each
+handle subsets of the interrupts within a port. The status of each of these
+per-port-set signals is reported via a separate register. Thus, a driver needs
+to know which status register to observe. This binding currently defines no
+configuration mechanism for this. By default, drivers should use register
+GPIO_${port}_INTERRUPT_STATUS_G1_0. Future revisions to the binding could
+define a property to configure this.
+
+Required properties:
+- compatible
+    Array of strings.
+    One of:
+    - "nvidia,tegra186-gpio".
+    - "nvidia,tegra186-gpio-aon".
+- reg-names
+    Array of strings.
+    Contains a list of names for the register spaces described by the reg
+    property. May contain the following entries, in any order:
+    - "gpio": Mandatory. GPIO control registers. This may cover either:
+        a) The single physical alias that this OS should use.
+        b) All physical aliases that exist in the controller. This is
+           appropriate when the OS is responsible for managing assignment of
+           the physical aliases.
+    - "security": Optional. Security configuration registers.
+    Users of this binding MUST look up entries in the reg property by name,
+    using this reg-names property to do so.
+- reg
+    Array of (physical base address, length) tuples.
+    Must contain one entry per entry in the reg-names property, in a matching
+    order.
+- interrupts
+    Array of interrupt specifiers.
+    The interrupt outputs from the HW block, one per set of ports, in the
+    order the HW manual describes them. The number of entries required varies
+    depending on compatible value:
+    - "nvidia,tegra186-gpio": 6 entries.
+    - "nvidia,tegra186-gpio-aon": 1 entry.
+- gpio-controller
+    Boolean.
+    Marks the device node as a GPIO controller/provider.
+- #gpio-cells
+    Single-cell integer.
+    Must be <2>.
+    Indicates how many cells are used in a consumer's GPIO specifier.
+    In the specifier:
+    - The first cell is the pin number.
+        See <dt-bindings/gpio/tegra186-gpio.h>.
+    - The second cell contains flags:
+        - Bit 0 specifies polarity
+            - 0: Active-high (normal).
+            - 1: Active-low (inverted).
+- interrupt-controller
+    Boolean.
+    Marks the device node as an interrupt controller/provider.
+- #interrupt-cells
+    Single-cell integer.
+    Must be <2>.
+    Indicates how many cells are used in a consumer's interrupt specifier.
+    In the specifier:
+    - The first cell is the GPIO number.
+        See <dt-bindings/gpio/tegra186-gpio.h>.
+    - The second cell is contains flags:
+        - Bits [3:0] indicate trigger type and level:
+            - 1: Low-to-high edge triggered.
+            - 2: High-to-low edge triggered.
+            - 4: Active high level-sensitive.
+            - 8: Active low level-sensitive.
+            Valid combinations are 1, 2, 3, 4, 8.
+
+Example:
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+gpio@2200000 {
+	compatible = "nvidia,tegra186-gpio";
+	reg-names = "security", "gpio";
+	reg =
+		<0x0 0x2200000 0x0 0x10000>,
+		<0x0 0x2210000 0x0 0x10000>;
+	interrupts =
+		<0 47 IRQ_TYPE_LEVEL_HIGH>,
+		<0 50 IRQ_TYPE_LEVEL_HIGH>,
+		<0 53 IRQ_TYPE_LEVEL_HIGH>,
+		<0 56 IRQ_TYPE_LEVEL_HIGH>,
+		<0 59 IRQ_TYPE_LEVEL_HIGH>,
+		<0 180 IRQ_TYPE_LEVEL_HIGH>;
+	gpio-controller;
+	#gpio-cells = <2>;
+	interrupt-controller;
+	#interrupt-cells = <2>;
+};
+
+gpio@c2f0000 {
+	compatible = "nvidia,tegra186-gpio-aon";
+	reg-names = "security", "gpio";
+	reg =
+		<0x0 0xc2f0000 0x0 0x1000>,
+		<0x0 0xc2f1000 0x0 0x1000>;
+	interrupts =
+		<0 60 IRQ_TYPE_LEVEL_HIGH>;
+	gpio-controller;
+	#gpio-cells = <2>;
+	interrupt-controller;
+	#interrupt-cells = <2>;
+};
diff --git a/Documentation/devicetree/bindings/gpio/wd,mbl-gpio.txt b/Documentation/devicetree/bindings/gpio/wd,mbl-gpio.txt
new file mode 100644
index 0000000..038c3a6
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/wd,mbl-gpio.txt
@@ -0,0 +1,38 @@
+Bindings for the Western Digital's MyBook Live memory-mapped GPIO controllers.
+
+The Western Digital MyBook Live has two memory-mapped GPIO controllers.
+Both GPIO controller only have a single 8-bit data register, where GPIO
+state can be read and/or written.
+
+Required properties:
+	- compatible: should be "wd,mbl-gpio"
+	- reg-names: must contain
+		"dat" - data register
+	- reg: address + size pairs describing the GPIO register sets;
+		order must correspond with the order of entries in reg-names
+	- #gpio-cells: must be set to 2. The first cell is the pin number and
+			the second cell is used to specify the gpio polarity:
+			0 = active high
+			1 = active low
+	- gpio-controller: Marks the device node as a gpio controller.
+
+Optional properties:
+	- no-output: GPIOs are read-only.
+
+Examples:
+	gpio0: gpio0@e0000000 {
+		compatible = "wd,mbl-gpio";
+		reg-names = "dat";
+		reg = <0xe0000000 0x1>;
+		#gpio-cells = <2>;
+		gpio-controller;
+	};
+
+	gpio1: gpio1@e0100000 {
+		compatible = "wd,mbl-gpio";
+		reg-names = "dat";
+		reg = <0xe0100000 0x1>;
+		#gpio-cells = <2>;
+		gpio-controller;
+		no-output;
+	};
diff --git a/Documentation/devicetree/bindings/gpu/nvidia,gk20a.txt b/Documentation/devicetree/bindings/gpu/nvidia,gk20a.txt
index 23bfe8e1f..ff3db65 100644
--- a/Documentation/devicetree/bindings/gpu/nvidia,gk20a.txt
+++ b/Documentation/devicetree/bindings/gpu/nvidia,gk20a.txt
@@ -1,9 +1,10 @@
-NVIDIA GK20A Graphics Processing Unit
+NVIDIA Tegra Graphics Processing Units
 
 Required properties:
-- compatible: "nvidia,<chip>-<gpu>"
+- compatible: "nvidia,<gpu>"
   Currently recognized values:
-  - nvidia,tegra124-gk20a
+  - nvidia,gk20a
+  - nvidia,gm20b
 - reg: Physical base address and length of the controller's registers.
   Must contain two entries:
   - first entry for bar0
@@ -19,14 +20,20 @@
 - clock-names: Must include the following entries:
   - gpu
   - pwr
+If the compatible string is "nvidia,gm20b", then the following clock
+is also required:
+  - ref
 - resets: Must contain an entry for each entry in reset-names.
   See ../reset/reset.txt for details.
 - reset-names: Must include the following entries:
   - gpu
 
-Example:
+Optional properties:
+- iommus: A reference to the IOMMU. See ../iommu/iommu.txt for details.
 
-	gpu@0,57000000 {
+Example for GK20A:
+
+	gpu@57000000 {
 		compatible = "nvidia,gk20a";
 		reg = <0x0 0x57000000 0x0 0x01000000>,
 		      <0x0 0x58000000 0x0 0x01000000>;
@@ -39,5 +46,25 @@
 		clock-names = "gpu", "pwr";
 		resets = <&tegra_car 184>;
 		reset-names = "gpu";
+		iommus = <&mc TEGRA_SWGROUP_GPU>;
+		status = "disabled";
+	};
+
+Example for GM20B:
+
+	gpu@57000000 {
+		compatible = "nvidia,gm20b";
+		reg = <0x0 0x57000000 0x0 0x01000000>,
+		      <0x0 0x58000000 0x0 0x01000000>;
+		interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "stall", "nonstall";
+		clocks = <&tegra_car TEGRA210_CLK_GPU>,
+			 <&tegra_car TEGRA210_CLK_PLL_P_OUT5>,
+			 <&tegra_car TEGRA210_CLK_PLL_G_REF>;
+		clock-names = "gpu", "pwr", "ref";
+		resets = <&tegra_car 184>;
+		reset-names = "gpu";
+		iommus = <&mc TEGRA_SWGROUP_GPU>;
 		status = "disabled";
 	};
diff --git a/Documentation/devicetree/bindings/hwmon/ltc2978.txt b/Documentation/devicetree/bindings/hwmon/ltc2978.txt
index a7afbf6..bf2a47b 100644
--- a/Documentation/devicetree/bindings/hwmon/ltc2978.txt
+++ b/Documentation/devicetree/bindings/hwmon/ltc2978.txt
@@ -13,6 +13,7 @@
   * "lltc,ltc3886"
   * "lltc,ltc3887"
   * "lltc,ltm2987"
+  * "lltc,ltm4675"
   * "lltc,ltm4676"
 - reg: I2C slave address
 
diff --git a/Documentation/devicetree/bindings/i2c/i2c-octeon.txt b/Documentation/devicetree/bindings/i2c/i2c-octeon.txt
index dced82e..872d485 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-octeon.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-octeon.txt
@@ -4,6 +4,12 @@
 
   Compatibility with all cn3XXX, cn5XXX and cn6XXX SOCs.
 
+  or
+
+  compatible: "cavium,octeon-7890-twsi"
+
+  Compatibility with cn78XX SOCs.
+
 - reg: The base address of the TWSI/I2C bus controller register bank.
 
 - #address-cells: Must be <1>.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rcar.txt b/Documentation/devicetree/bindings/i2c/i2c-rcar.txt
index cf8bfc9..5f0cb50 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-rcar.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-rcar.txt
@@ -19,6 +19,9 @@
 - clock-frequency: desired I2C bus clock frequency in Hz. The absence of this
   property indicates the default frequency 100 kHz.
 - clocks: clock specifier.
+- dmas: Must contain a list of two references to DMA specifiers, one for
+  transmission, and one for reception.
+- dma-names: Must contain a list of two DMA names, "tx" and "rx".
 
 - i2c-scl-falling-time-ns: see i2c.txt
 - i2c-scl-internal-delay-ns: see i2c.txt
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
index f0d71bc..0b4a85f 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
@@ -6,8 +6,8 @@
 Required properties :
 
  - reg : Offset and length of the register set for the device
- - compatible : should be "rockchip,rk3066-i2c", "rockchip,rk3188-i2c" or
-		"rockchip,rk3288-i2c".
+ - compatible : should be "rockchip,rk3066-i2c", "rockchip,rk3188-i2c",
+		"rockchip,rk3228-i2c" or "rockchip,rk3288-i2c".
  - interrupts : interrupt number
  - clocks : parent clock
 
diff --git a/Documentation/devicetree/bindings/input/gpio-keys.txt b/Documentation/devicetree/bindings/input/gpio-keys.txt
index 2164123..a949404 100644
--- a/Documentation/devicetree/bindings/input/gpio-keys.txt
+++ b/Documentation/devicetree/bindings/input/gpio-keys.txt
@@ -32,17 +32,17 @@
 
 Example nodes:
 
-	gpio_keys {
+	gpio-keys {
 			compatible = "gpio-keys";
-			#address-cells = <1>;
-			#size-cells = <0>;
 			autorepeat;
-			button@21 {
+
+			up {
 				label = "GPIO Key UP";
 				linux,code = <103>;
 				gpios = <&gpio1 0 1>;
 			};
-			button@22 {
+
+			down {
 				label = "GPIO Key DOWN";
 				linux,code = <108>;
 				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
diff --git a/Documentation/devicetree/bindings/input/touchscreen/brcm,iproc-touchscreen.txt b/Documentation/devicetree/bindings/input/touchscreen/brcm,iproc-touchscreen.txt
index 34e3382..ac5dff4 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/brcm,iproc-touchscreen.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/brcm,iproc-touchscreen.txt
@@ -2,11 +2,17 @@
 
 Required properties:
 - compatible: must be "brcm,iproc-touchscreen"
-- reg: physical base address of the controller and length of memory mapped
-  region.
+- ts_syscon: handler of syscon node defining physical base
+  address of the controller and length of memory mapped region.
+  If this property is selected please make sure MFD_SYSCON config
+  is enabled in the defconfig file.
 - clocks:  The clock provided by the SOC to driver the tsc
-- clock-name:  name for the clock
+- clock-names:  name for the clock
 - interrupts: The touchscreen controller's interrupt
+- address-cells: Specify the number of u32 entries needed in child nodes.
+  Should set to 1.
+- size-cells: Specify number of u32 entries needed to specify child nodes size
+  in reg property. Should set to 1.
 
 Optional properties:
 - scanning_period: Time between scans. Each step is 1024 us.  Valid 1-256.
@@ -53,13 +59,18 @@
 - touchscreen-inverted-x: X axis is inverted (boolean)
 - touchscreen-inverted-y: Y axis is inverted (boolean)
 
-Example:
+Example: An example of touchscreen node
 
-	touchscreen: tsc@0x180A6000 {
+	ts_adc_syscon: ts_adc_syscon@180a6000 {
+		compatible = "brcm,iproc-ts-adc-syscon","syscon";
+		reg = <0x180a6000 0xc30>;
+	};
+
+	touchscreen: touchscreen@180A6000 {
 		compatible = "brcm,iproc-touchscreen";
 		#address-cells = <1>;
 		#size-cells = <1>;
-		reg = <0x180A6000 0x40>;
+		ts_syscon = <&ts_adc_syscon>;
 		clocks = <&adc_clk>;
 		clock-names = "tsc_clk";
 		interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
index 007a5b4..4c29cda 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
@@ -11,6 +11,8 @@
 - interrupt-controller : Identifies the node as an interrupt controller
 - #interrupt-cells : Specifies the number of cells needed to encode an
   interrupt source. Must be a single cell with a value of at least 3.
+  If the system requires describing PPI affinity, then the value must
+  be at least 4.
 
   The 1st cell is the interrupt type; 0 for SPI interrupts, 1 for PPI
   interrupts. Other values are reserved for future use.
@@ -24,7 +26,14 @@
 		1 = edge triggered
 		4 = level triggered
 
-  Cells 4 and beyond are reserved for future use and must have a value
+  The 4th cell is a phandle to a node describing a set of CPUs this
+  interrupt is affine to. The interrupt must be a PPI, and the node
+  pointed must be a subnode of the "ppi-partitions" subnode. For
+  interrupt types other than PPI or PPIs that are not partitionned,
+  this cell must be zero. See the "ppi-partitions" node description
+  below.
+
+  Cells 5 and beyond are reserved for future use and must have a value
   of 0 if present.
 
 - reg : Specifies base physical address(s) and size of the GIC
@@ -50,6 +59,11 @@
 
 Sub-nodes:
 
+PPI affinity can be expressed as a single "ppi-partitions" node,
+containing a set of sub-nodes, each with the following property:
+- affinity: Should be a list of phandles to CPU nodes (as described in
+Documentation/devicetree/bindings/arm/cpus.txt).
+
 GICv3 has one or more Interrupt Translation Services (ITS) that are
 used to route Message Signalled Interrupts (MSI) to the CPUs.
 
@@ -91,7 +105,7 @@
 
 	gic: interrupt-controller@2c010000 {
 		compatible = "arm,gic-v3";
-		#interrupt-cells = <3>;
+		#interrupt-cells = <4>;
 		#address-cells = <2>;
 		#size-cells = <2>;
 		ranges;
@@ -119,4 +133,20 @@
 			#msi-cells = <1>;
 			reg = <0x0 0x2c400000 0 0x200000>;
 		};
+
+		ppi-partitions {
+			part0: interrupt-partition-0 {
+				affinity = <&cpu0 &cpu2>;
+			};
+
+			part1: interrupt-partition-1 {
+				affinity = <&cpu1 &cpu3>;
+			};
+		};
+	};
+
+
+	device@0 {
+		reg = <0 0 0 4>;
+		interrupts = <1 1 4 &part0>;
 	};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,versatile-fpga-irq.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,versatile-fpga-irq.txt
index c9cf605..2a1d16b 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,versatile-fpga-irq.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,versatile-fpga-irq.txt
@@ -6,7 +6,7 @@
 instance can handle up to 32 interrupts.
 
 Required properties:
-- compatible: "arm,versatile-fpga-irq"
+- compatible: "arm,versatile-fpga-irq" or "oxsemi,ox810se-rps-irq"
 - interrupt-controller: Identifies the node as an interrupt controller
 - #interrupt-cells: The number of cells to define the interrupts.  Must be 1
   as the FPGA IRQ controller has no configuration options for interrupt
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
index 2d6c8bb..6428a6b 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
@@ -71,8 +71,8 @@
 24: DMA8
 25: DMA9
 26: DMA10
-27: DMA11
-28: DMA12
+27: DMA11-14 - shared interrupt for DMA 11 to 14
+28: DMAALL - triggers on all dma interrupts (including chanel 15)
 29: AUX
 30: ARM
 31: VPUDMA
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm6345-l1-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm6345-l1-intc.txt
new file mode 100644
index 0000000..4040905
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm6345-l1-intc.txt
@@ -0,0 +1,57 @@
+Broadcom BCM6345-style Level 1 interrupt controller
+
+This block is a first level interrupt controller that is typically connected
+directly to one of the HW INT lines on each CPU.
+
+Key elements of the hardware design include:
+
+- 32, 64 or 128 incoming level IRQ lines
+
+- Most onchip peripherals are wired directly to an L1 input
+
+- A separate instance of the register set for each CPU, allowing individual
+  peripheral IRQs to be routed to any CPU
+
+- Contains one or more enable/status word pairs per CPU
+
+- No atomic set/clear operations
+
+- No polarity/level/edge settings
+
+- No FIFO or priority encoder logic; software is expected to read all
+  2-4 status words to determine which IRQs are pending
+
+Required properties:
+
+- compatible: should be "brcm,bcm<soc>-l1-intc", "brcm,bcm6345-l1-intc"
+- reg: specifies the base physical address and size of the registers;
+  the number of supported IRQs is inferred from the size argument
+- interrupt-controller: identifies the node as an interrupt controller
+- #interrupt-cells: specifies the number of cells needed to encode an interrupt
+  source, should be 1.
+- interrupt-parent: specifies the phandle to the parent interrupt controller(s)
+  this one is cascaded from
+- interrupts: specifies the interrupt line(s) in the interrupt-parent controller
+  node; valid values depend on the type of parent interrupt controller
+
+If multiple reg ranges and interrupt-parent entries are present on an SMP
+system, the driver will allow IRQ SMP affinity to be set up through the
+/proc/irq/ interface.  In the simplest possible configuration, only one
+reg range and one interrupt-parent is needed.
+
+The driver operates in native CPU endian by default, there is no support for
+specifying an alternative endianness.
+
+Example:
+
+periph_intc: interrupt-controller@10000000 {
+        compatible = "brcm,bcm63168-l1-intc", "brcm,bcm6345-l1-intc";
+        reg = <0x10000020 0x20>,
+              <0x10000040 0x20>;
+
+        interrupt-controller;
+        #interrupt-cells = <1>;
+
+        interrupt-parent = <&cpu_intc>;
+        interrupts = <2>, <3>;
+};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ezchip,nps400-ic.txt b/Documentation/devicetree/bindings/interrupt-controller/ezchip,nps400-ic.txt
new file mode 100644
index 0000000..888b2b9
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/ezchip,nps400-ic.txt
@@ -0,0 +1,17 @@
+EZchip NPS Interrupt Controller
+
+Required properties:
+
+- compatible : should be "ezchip,nps400-ic"
+- interrupt-controller : Identifies the node as an interrupt controller
+- #interrupt-cells : Specifies the number of cells needed to encode an
+  interrupt source. The value shall be 1.
+
+
+Example:
+
+intc: interrupt-controller {
+	compatible = "ezchip,nps400-ic";
+	interrupt-controller;
+	#interrupt-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt b/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt
new file mode 100644
index 0000000..9e38949
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt
@@ -0,0 +1,30 @@
+* Freescale Layerscape SCFG PCIe MSI controller
+
+Required properties:
+
+- compatible: should be "fsl,<soc-name>-msi" to identify
+	      Layerscape PCIe MSI controller block such as:
+              "fsl,1s1021a-msi"
+              "fsl,1s1043a-msi"
+- msi-controller: indicates that this is a PCIe MSI controller node
+- reg: physical base address of the controller and length of memory mapped.
+- interrupts: an interrupt to the parent interrupt controller.
+
+Optional properties:
+- interrupt-parent: the phandle to the parent interrupt controller.
+
+This interrupt controller hardware is a second level interrupt controller that
+is hooked to a parent interrupt controller: e.g: ARM GIC for ARM-based
+platforms. If interrupt-parent is not provided, the default parent interrupt
+controller will be used.
+Each PCIe node needs to have property msi-parent that points to
+MSI controller node
+
+Examples:
+
+	msi1: msi-controller@1571000 {
+		compatible = "fsl,1s1043a-msi";
+		reg = <0x0 0x1571000 0x0 0x8>,
+		msi-controller;
+		interrupts = <0 116 0x4>;
+	};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/nxp,lpc3220-mic.txt b/Documentation/devicetree/bindings/interrupt-controller/nxp,lpc3220-mic.txt
index 539adca..38211f3 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/nxp,lpc3220-mic.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/nxp,lpc3220-mic.txt
@@ -1,38 +1,60 @@
-* NXP LPC32xx Main Interrupt Controller
-  (MIC, including SIC1 and SIC2 secondary controllers)
+* NXP LPC32xx MIC, SIC1 and SIC2 Interrupt Controllers
 
 Required properties:
-- compatible: Should be "nxp,lpc3220-mic"
-- interrupt-controller: Identifies the node as an interrupt controller.
-- interrupt-parent: Empty for the interrupt controller itself
-- #interrupt-cells: The number of cells to define the interrupts. Should be 2.
-  The first cell is the IRQ number
-  The second cell is used to specify mode:
-      1 = low-to-high edge triggered
-      2 = high-to-low edge triggered
-      4 = active high level-sensitive
-      8 = active low level-sensitive
-      Default for internal sources should be set to 4 (active high).
-- reg: Should contain MIC registers location and length
+- compatible: "nxp,lpc3220-mic" or "nxp,lpc3220-sic".
+- reg: should contain IC registers location and length.
+- interrupt-controller: identifies the node as an interrupt controller.
+- #interrupt-cells: the number of cells to define an interrupt, should be 2.
+  The first cell is the IRQ number, the second cell is used to specify
+  one of the supported IRQ types:
+      IRQ_TYPE_EDGE_RISING = low-to-high edge triggered,
+      IRQ_TYPE_EDGE_FALLING = high-to-low edge triggered,
+      IRQ_TYPE_LEVEL_HIGH = active high level-sensitive,
+      IRQ_TYPE_LEVEL_LOW = active low level-sensitive.
+  Reset value is IRQ_TYPE_LEVEL_LOW.
+
+Optional properties:
+- interrupt-parent: empty for MIC interrupt controller, link to parent
+  MIC interrupt controller for SIC1 and SIC2
+- interrupts: empty for MIC interrupt controller, cascaded MIC
+  hardware interrupts for SIC1 and SIC2
 
 Examples:
-	/*
-	 * MIC
-	 */
+
+	/* LPC32xx MIC, SIC1 and SIC2 interrupt controllers */
 	mic: interrupt-controller@40008000 {
 		compatible = "nxp,lpc3220-mic";
+		reg = <0x40008000 0x4000>;
 		interrupt-controller;
-		interrupt-parent;
 		#interrupt-cells = <2>;
-		reg = <0x40008000 0xC000>;
 	};
 
-	/*
-	 * ADC
-	 */
+	sic1: interrupt-controller@4000c000 {
+		compatible = "nxp,lpc3220-sic";
+		reg = <0x4000c000 0x4000>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+
+		interrupt-parent = <&mic>;
+		interrupts = <0 IRQ_TYPE_LEVEL_LOW>,
+			     <30 IRQ_TYPE_LEVEL_LOW>;
+	};
+
+	sic2: interrupt-controller@40010000 {
+		compatible = "nxp,lpc3220-sic";
+		reg = <0x40010000 0x4000>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+
+		interrupt-parent = <&mic>;
+		interrupts = <1 IRQ_TYPE_LEVEL_LOW>,
+			     <31 IRQ_TYPE_LEVEL_LOW>;
+	};
+
+	/* ADC */
 	adc@40048000 {
 		compatible = "nxp,lpc3220-adc";
 		reg = <0x40048000 0x1000>;
-		interrupt-parent = <&mic>;
-		interrupts = <39 4>;
+		interrupt-parent = <&sic1>;
+		interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
 	};
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index 7180745..19fe6f2 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -16,6 +16,7 @@
                         "arm,mmu-400"
                         "arm,mmu-401"
                         "arm,mmu-500"
+                        "cavium,smmu-v2"
 
                   depending on the particular implementation and/or the
                   version of the architecture implemented.
diff --git a/Documentation/devicetree/bindings/leds/common.txt b/Documentation/devicetree/bindings/leds/common.txt
index 6841984..af10678 100644
--- a/Documentation/devicetree/bindings/leds/common.txt
+++ b/Documentation/devicetree/bindings/leds/common.txt
@@ -37,6 +37,9 @@
                      property is mandatory for the LEDs in the non-flash modes
                      (e.g. torch or indicator).
 
+- panic-indicator : This property specifies that the LED should be used,
+		    if at all possible, as a panic indicator.
+
 Required properties for flash LED child nodes:
 - flash-max-microamp : Maximum flash LED supply current in microamperes.
 - flash-max-timeout-us : Maximum timeout in microseconds after which the flash
diff --git a/Documentation/devicetree/bindings/leds/leds-gpio.txt b/Documentation/devicetree/bindings/leds/leds-gpio.txt
index fea1ebf..cbbeb18 100644
--- a/Documentation/devicetree/bindings/leds/leds-gpio.txt
+++ b/Documentation/devicetree/bindings/leds/leds-gpio.txt
@@ -23,6 +23,8 @@
   property is not present.
 - retain-state-suspended: (optional) The suspend state can be retained.Such
   as charge-led gpio.
+- panic-indicator : (optional)
+  see Documentation/devicetree/bindings/leds/common.txt
 
 Examples:
 
diff --git a/Documentation/devicetree/bindings/media/i2c/adv7180.txt b/Documentation/devicetree/bindings/media/i2c/adv7180.txt
new file mode 100644
index 0000000..0d50115
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/adv7180.txt
@@ -0,0 +1,29 @@
+* Analog Devices ADV7180 analog video decoder family
+
+The adv7180 family devices are used to capture analog video to different
+digital interfaces like MIPI CSI-2 or parallel video.
+
+Required Properties :
+- compatible : value must be one of
+		"adi,adv7180"
+		"adi,adv7182"
+		"adi,adv7280"
+		"adi,adv7280-m"
+		"adi,adv7281"
+		"adi,adv7281-m"
+		"adi,adv7281-ma"
+		"adi,adv7282"
+		"adi,adv7282-m"
+
+Example:
+
+	i2c0@1c22000 {
+		...
+		...
+		adv7180@21 {
+			compatible = "adi,adv7180";
+			reg = <0x21>;
+		};
+		...
+	};
+
diff --git a/Documentation/devicetree/bindings/media/rcar_vin.txt b/Documentation/devicetree/bindings/media/rcar_vin.txt
index 619193c..6a4e61c 100644
--- a/Documentation/devicetree/bindings/media/rcar_vin.txt
+++ b/Documentation/devicetree/bindings/media/rcar_vin.txt
@@ -5,14 +5,22 @@
 family of devices. The current blocks are always slaves and suppot one input
 channel which can be either RGB, YUYV or BT656.
 
- - compatible: Must be one of the following
+ - compatible: Must be one or more of the following
    - "renesas,vin-r8a7795" for the R8A7795 device
    - "renesas,vin-r8a7794" for the R8A7794 device
    - "renesas,vin-r8a7793" for the R8A7793 device
+   - "renesas,vin-r8a7792" for the R8A7792 device
    - "renesas,vin-r8a7791" for the R8A7791 device
    - "renesas,vin-r8a7790" for the R8A7790 device
    - "renesas,vin-r8a7779" for the R8A7779 device
    - "renesas,vin-r8a7778" for the R8A7778 device
+   - "renesas,rcar-gen2-vin" for a generic R-Car Gen2 compatible device.
+   - "renesas,rcar-gen3-vin" for a generic R-Car Gen3 compatible device.
+
+   When compatible with the generic version nodes must list the
+   SoC-specific version corresponding to the platform first
+   followed by the generic version.
+
  - reg: the register base and size for the device registers
  - interrupts: the interrupt for the device
  - clocks: Reference to the parent clock
@@ -37,7 +45,7 @@
 	};
 
         vin0: vin@0xe6ef0000 {
-                compatible = "renesas,vin-r8a7790";
+                compatible = "renesas,vin-r8a7790", "renesas,rcar-gen2-vin";
                 clocks = <&mstp8_clks R8A7790_CLK_VIN0>;
                 reg = <0 0xe6ef0000 0 0x1000>;
                 interrupts = <0 188 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/media/xilinx/video.txt b/Documentation/devicetree/bindings/media/xilinx/video.txt
index cbd46fa..68ac210 100644
--- a/Documentation/devicetree/bindings/media/xilinx/video.txt
+++ b/Documentation/devicetree/bindings/media/xilinx/video.txt
@@ -20,7 +20,7 @@
 - xlnx,video-format: This property represents a video format transmitted on an
   AXI bus between video IP cores, using its VF code as defined in "AXI4-Stream
   Video IP and System Design Guide" [UG934]. How the format relates to the IP
-  core is decribed in the IP core bindings documentation.
+  core is described in the IP core bindings documentation.
 
 - xlnx,video-width: This property qualifies the video format with the sample
   width expressed as a number of bits per pixel component. All components must
diff --git a/Documentation/devicetree/bindings/memory-controllers/exynos-srom.txt b/Documentation/devicetree/bindings/memory-controllers/exynos-srom.txt
new file mode 100644
index 0000000..f633b5d
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/exynos-srom.txt
@@ -0,0 +1,79 @@
+SAMSUNG Exynos SoCs SROM Controller driver.
+
+Required properties:
+- compatible : Should contain "samsung,exynos4210-srom".
+
+- reg: offset and length of the register set
+
+Optional properties:
+The SROM controller can be used to attach external peripherals. In this case
+extra properties, describing the bus behind it, should be specified as below:
+
+- #address-cells: Must be set to 2 to allow device address translation.
+		  Address is specified as (bank#, offset).
+
+- #size-cells: Must be set to 1 to allow device size passing
+
+- ranges: Must be set up to reflect the memory layout with four integer values
+	  per bank:
+		<bank-number> 0 <parent address of bank> <size>
+
+Sub-nodes:
+The actual device nodes should be added as subnodes to the SROMc node. These
+subnodes, in addition to regular device specification, should contain the following
+properties, describing configuration of the relevant SROM bank:
+
+Required properties:
+- reg: bank number, base address (relative to start of the bank) and size of
+       the memory mapped for the device. Note that base address will be
+       typically 0 as this is the start of the bank.
+
+- samsung,srom-timing : array of 6 integers, specifying bank timings in the
+                        following order: Tacp, Tcah, Tcoh, Tacc, Tcos, Tacs.
+                        Each value is specified in cycles and has the following
+                        meaning and valid range:
+                        Tacp : Page mode access cycle at Page mode (0 - 15)
+                        Tcah : Address holding time after CSn (0 - 15)
+                        Tcoh : Chip selection hold on OEn (0 - 15)
+                        Tacc : Access cycle (0 - 31, the actual time is N + 1)
+                        Tcos : Chip selection set-up before OEn (0 - 15)
+                        Tacs : Address set-up before CSn (0 - 15)
+
+Optional properties:
+- reg-io-width : data width in bytes (1 or 2). If omitted, default of 1 is used.
+
+- samsung,srom-page-mode : if page mode is set, 4 data page mode will be configured,
+			   else normal (1 data) page mode will be set.
+
+Example: basic definition, no banks are configured
+	memory-controller@12570000 {
+		compatible = "samsung,exynos4210-srom";
+		reg = <0x12570000 0x14>;
+	};
+
+Example: SROMc with SMSC911x ethernet chip on bank 3
+	memory-controller@12570000 {
+		#address-cells = <2>;
+		#size-cells = <1>;
+		ranges = <0 0 0x04000000 0x20000   // Bank0
+			  1 0 0x05000000 0x20000   // Bank1
+			  2 0 0x06000000 0x20000   // Bank2
+			  3 0 0x07000000 0x20000>; // Bank3
+
+		compatible = "samsung,exynos4210-srom";
+		reg = <0x12570000 0x14>;
+
+		ethernet@3,0 {
+			compatible = "smsc,lan9115";
+			reg = <3 0 0x10000>;	   // Bank 3, offset = 0
+			phy-mode = "mii";
+			interrupt-parent = <&gpx0>;
+			interrupts = <5 8>;
+			reg-io-width = <2>;
+			smsc,irq-push-pull;
+			smsc,force-internal-phy;
+
+			samsung,srom-page-mode;
+			samsung,srom-timing = <9 12 1 9 1 1>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/mips/brcm/soc.txt b/Documentation/devicetree/bindings/mips/brcm/soc.txt
index 7bab90c..4a7e030 100644
--- a/Documentation/devicetree/bindings/mips/brcm/soc.txt
+++ b/Documentation/devicetree/bindings/mips/brcm/soc.txt
@@ -4,7 +4,8 @@
 
 - compatible: "brcm,bcm3384", "brcm,bcm33843"
               "brcm,bcm3384-viper", "brcm,bcm33843-viper"
-              "brcm,bcm6328", "brcm,bcm6368",
+              "brcm,bcm6328", "brcm,bcm6358", "brcm,bcm6368",
+              "brcm,bcm63168", "brcm,bcm63268",
               "brcm,bcm7125", "brcm,bcm7346", "brcm,bcm7358", "brcm,bcm7360",
               "brcm,bcm7362", "brcm,bcm7420", "brcm,bcm7425"
 
diff --git a/Documentation/devicetree/bindings/mips/cavium/ciu3.txt b/Documentation/devicetree/bindings/mips/cavium/ciu3.txt
new file mode 100644
index 0000000..616862a
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/cavium/ciu3.txt
@@ -0,0 +1,27 @@
+* Central Interrupt Unit v3
+
+Properties:
+- compatible: "cavium,octeon-7890-ciu3"
+
+  Compatibility with 78XX and 73XX SOCs.
+
+- interrupt-controller:  This is an interrupt controller.
+
+- reg: The base address of the CIU's register bank.
+
+- #interrupt-cells: Must be <2>.  The first cell is source number.
+  The second cell indicates the triggering semantics, and may have a
+  value of either 4 for level semantics, or 1 for edge semantics.
+
+Example:
+	interrupt-controller@1010000000000 {
+		compatible = "cavium,octeon-7890-ciu3";
+		interrupt-controller;
+		/* Interrupts are specified by two parts:
+		 * 1) Source number (20 significant bits)
+		 * 2) Trigger type: (4 == level, 1 == edge)
+		 */
+		#address-cells = <0>;
+		#interrupt-cells = <2>;
+		reg = <0x10100 0x00000000 0x0 0xb0000000>;
+	};
diff --git a/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt b/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
index c7a26ca..6611a7c 100644
--- a/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
+++ b/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
@@ -30,11 +30,90 @@
                         region may not be present in some scenarios, such
                         as in the device tree presented to a virtual machine.
 
+    - msi-parent
+        Value type: <phandle>
+        Definition: Must be present and point to the MSI controller node
+                    handling message interrupts for the MC.
+
+    - ranges
+        Value type: <prop-encoded-array>
+        Definition: A standard property.  Defines the mapping between the child
+                    MC address space and the parent system address space.
+
+                    The MC address space is defined by 3 components:
+                       <region type> <offset hi> <offset lo>
+
+                    Valid values for region type are
+                       0x0 - MC portals
+                       0x1 - QBMAN portals
+
+    - #address-cells
+        Value type: <u32>
+        Definition: Must be 3.  (see definition in 'ranges' property)
+
+    - #size-cells
+        Value type: <u32>
+        Definition: Must be 1.
+
+Sub-nodes:
+
+        The fsl-mc node may optionally have dpmac sub-nodes that describe
+        the relationship between the Ethernet MACs which belong to the MC
+        and the Ethernet PHYs on the system board.
+
+        The dpmac nodes must be under a node named "dpmacs" which contains
+        the following properties:
+
+            - #address-cells
+              Value type: <u32>
+              Definition: Must be present if dpmac sub-nodes are defined and must
+                          have a value of 1.
+
+            - #size-cells
+              Value type: <u32>
+              Definition: Must be present if dpmac sub-nodes are defined and must
+                          have a value of 0.
+
+        These nodes must have the following properties:
+
+            - compatible
+              Value type: <string>
+              Definition: Must be "fsl,qoriq-mc-dpmac".
+
+            - reg
+              Value type: <prop-encoded-array>
+              Definition: Specifies the id of the dpmac.
+
+            - phy-handle
+              Value type: <phandle>
+              Definition: Specifies the phandle to the PHY device node associated
+                          with the this dpmac.
+
 Example:
 
         fsl_mc: fsl-mc@80c000000 {
                 compatible = "fsl,qoriq-mc";
                 reg = <0x00000008 0x0c000000 0 0x40>,    /* MC portal base */
                       <0x00000000 0x08340000 0 0x40000>; /* MC control reg */
-        };
+                msi-parent = <&its>;
+                #address-cells = <3>;
+                #size-cells = <1>;
 
+                /*
+                 * Region type 0x0 - MC portals
+                 * Region type 0x1 - QBMAN portals
+                 */
+                ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000
+                          0x1 0x0 0x0 0x8 0x18000000 0x8000000>;
+
+                dpmacs {
+                    #address-cells = <1>;
+                    #size-cells = <0>;
+
+                    dpmac@1 {
+                        compatible = "fsl,qoriq-mc-dpmac";
+                        reg = <1>;
+                        phy-handle = <&mdio0_phy0>;
+                    }
+                }
+        };
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
index ea5614b..07184e8 100644
--- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
@@ -15,6 +15,7 @@
 	- "rockchip,rk3288-dw-mshc": for Rockchip RK3288
 	- "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036
 	- "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368
+	- "rockchip,rk3399-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3399
 
 Optional Properties:
 * clocks: from common clock binding: if ciu_drive and ciu_sample are
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-st.txt b/Documentation/devicetree/bindings/mmc/sdhci-st.txt
index 18d950d..88faa91 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-st.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-st.txt
@@ -38,7 +38,7 @@
 - bus-width:		Number of data lines.
 			See:  Documentation/devicetree/bindings/mmc/mmc.txt.
 
-- max-frequency: 	Can be 200MHz, 100Mz or 50MHz (default) and used for
+- max-frequency:	Can be 200MHz, 100Mz or 50MHz (default) and used for
 			configuring the CCONFIG3 in the mmcss.
 			See:  Documentation/devicetree/bindings/mmc/mmc.txt.
 
@@ -48,7 +48,7 @@
 - vqmmc-supply:		Phandle to the regulator dt node, mentioned as the vcc/vdd
 			supply in eMMC/SD specs.
 
-- sd-uhs--sdr50:	To enable the SDR50 in the mmcss.
+- sd-uhs-sdr50:	To enable the SDR50 in the mmcss.
 			See:  Documentation/devicetree/bindings/mmc/mmc.txt.
 
 - sd-uhs-sdr104:	To enable the SDR104 in the mmcss.
diff --git a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
index 7fb746d..0f610d4 100644
--- a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
@@ -26,3 +26,6 @@
 
 Optional properties:
 - toshiba,mmc-wrprotect-disable: write-protect detection is unavailable
+- pinctrl-names: should be "default", "state_uhs"
+- pinctrl-0: should contain default/high speed pin ctrl
+- pinctrl-1: should contain uhs mode pin ctrl
diff --git a/Documentation/devicetree/bindings/mmc/usdhi6rol0.txt b/Documentation/devicetree/bindings/mmc/usdhi6rol0.txt
index 8babdaa..6d1b797 100644
--- a/Documentation/devicetree/bindings/mmc/usdhi6rol0.txt
+++ b/Documentation/devicetree/bindings/mmc/usdhi6rol0.txt
@@ -12,6 +12,12 @@
 
 - vmmc-supply:	a phandle of a regulator, supplying Vcc to the card
 - vqmmc-supply:	a phandle of a regulator, supplying VccQ to the card
+- pinctrl-names: Can contain a "default" entry and a "state_uhs"
+                 entry. The state_uhs entry is used together with the default
+                 entry when the board requires distinct settings for UHS speeds.
+
+- pinctrl-N: One property for each name listed in pinctrl-names, see
+             ../pinctrl/pinctrl-bindings.txt.
 
 Additionally any standard mmc bindings from mmc.txt can be used.
 
diff --git a/Documentation/devicetree/bindings/mtd/arm-versatile.txt b/Documentation/devicetree/bindings/mtd/arm-versatile.txt
index beace4b..4ec2879 100644
--- a/Documentation/devicetree/bindings/mtd/arm-versatile.txt
+++ b/Documentation/devicetree/bindings/mtd/arm-versatile.txt
@@ -1,8 +1,26 @@
 Flash device on ARM Versatile board
 
+These flash chips are found in the ARM reference designs like Integrator,
+Versatile, RealView, Versatile Express etc.
+
+They are regular CFI compatible (Intel or AMD extended) flash chips with
+some special write protect/VPP bits that can be controlled by the machine's
+system controller.
+
 Required properties:
-- compatible : must be "arm,versatile-flash";
+- compatible : must be "arm,versatile-flash", "cfi-flash";
+- reg : memory address for the flash chip
 - bank-width : width in bytes of flash interface.
 
+For the rest of the properties, see mtd-physmap.txt.
+
 The device tree may optionally contain sub-nodes describing partitions of the
 address space. See partition.txt for more detail.
+
+Example:
+
+flash@34000000 {
+	compatible = "arm,versatile-flash", "cfi-flash";
+	reg = <0x34000000 0x4000000>;
+	bank-width = <4>;
+};
diff --git a/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt b/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt
index 0333ec8..c34aa6f 100644
--- a/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt
+++ b/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt
@@ -5,7 +5,8 @@
 		 "fsl,imx7d-qspi", "fsl,imx6ul-qspi",
 		 "fsl,ls1021a-qspi"
 		 or
-		 "fsl,ls2080a-qspi" followed by "fsl,ls1021a-qspi"
+		 "fsl,ls2080a-qspi" followed by "fsl,ls1021a-qspi",
+		 "fsl,ls1043a-qspi" followed by "fsl,ls1021a-qspi"
   - reg : the first contains the register location and length,
           the second contains the memory mapping address and length
   - reg-names: Should contain the reg names "QuadSPI" and "QuadSPI-memory"
diff --git a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
index 078060a..05f705e3 100644
--- a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
+++ b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
@@ -18,6 +18,8 @@
   - First is the Rx interrupt.  This irq is mandatory.
   - Second is the Tx completion interrupt.
     This is supported only on SGMII based 1GbE and 10GbE interfaces.
+- channel: Ethernet to CPU, start channel (prefetch buffer) number
+  - Must map to the first irq and irqs must be sequential
 - port-id: Port number (0 or 1)
 - clocks: Reference to the clock entry.
 - local-mac-address: MAC address assigned to this device
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt
index 28a4781..0ae0649 100644
--- a/Documentation/devicetree/bindings/net/cpsw.txt
+++ b/Documentation/devicetree/bindings/net/cpsw.txt
@@ -45,13 +45,13 @@
 Optional properties:
 - dual_emac_res_vlan	: Specifies VID to be used to segregate the ports
 - mac-address		: See ethernet.txt file in the same directory
-- phy_id		: Specifies slave phy id
+- phy_id		: Specifies slave phy id (deprecated, use phy-handle)
 - phy-handle		: See ethernet.txt file in the same directory
 
 Slave sub-nodes:
 - fixed-link		: See fixed-link.txt file in the same directory
-			  Either the property phy_id, or the sub-node
-			  fixed-link can be specified
+
+Note: Exactly one of phy_id, phy-handle, or fixed-link must be specified.
 
 Note: "ti,hwmods" field is used to fetch the base address and irq
 resources from TI, omap hwmod data base during device registration.
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt
index 5fdbbcd..9f4807f 100644
--- a/Documentation/devicetree/bindings/net/dsa/dsa.txt
+++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt
@@ -31,8 +31,6 @@
 			  switch. Must be set if the switch can not detect
 			  the presence and/or size of a connected EEPROM,
 			  otherwise optional.
-- reset-gpios		: phandle and specifier to a gpio line connected to
-			  reset pin of the switch chip.
 
 A switch may have multiple "port" children nodes
 
diff --git a/Documentation/devicetree/bindings/net/dsa/marvell.txt b/Documentation/devicetree/bindings/net/dsa/marvell.txt
new file mode 100644
index 0000000..7629189
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/marvell.txt
@@ -0,0 +1,35 @@
+Marvell DSA Switch Device Tree Bindings
+---------------------------------------
+
+WARNING: This binding is currently unstable. Do not program it into a
+FLASH never to be changed again. Once this binding is stable, this
+warning will be removed.
+
+If you need a stable binding, use the old dsa.txt binding.
+
+Marvell Switches are MDIO devices. The following properties should be
+placed as a child node of an mdio device.
+
+The properties described here are those specific to Marvell devices.
+Additional required and optional properties can be found in dsa.txt.
+
+Required properties:
+- compatible           : Should be one of "marvell,mv88e6085",
+- reg                  : Address on the MII bus for the switch.
+
+Optional properties:
+
+- reset-gpios		: Should be a gpio specifier for a reset line
+
+Example:
+
+       mdio {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               switch0: switch@0 {
+                       compatible = "marvell,mv88e6085";
+                       reg = <0>;
+		       reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
+               };
+       };
diff --git a/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt b/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt
index ecacfa4..d4b7f2e 100644
--- a/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt
+++ b/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt
@@ -7,19 +7,45 @@
 - mode: dsa fabric mode string. only support one of dsaf modes like these:
 		"2port-64vf",
 		"6port-16rss",
-		"6port-16vf".
+		"6port-16vf",
+		"single-port".
 - interrupt-parent: the interrupt parent of this device.
 - interrupts: should contain the DSA Fabric and rcb interrupt.
 - reg: specifies base physical address(es) and size of the device registers.
-  The first region is external interface control register base and size.
-  The second region is SerDes base register and size.
+  The first region is external interface control register base and size(optional,
+  only used when subctrl-syscon does not exist). It is recommended using
+  subctrl-syscon rather than this address.
+  The second region is SerDes base register and size(optional, only used when
+  serdes-syscon in port node does not exist). It is recommended using
+  serdes-syscon rather than this address.
   The third region is the PPE register base and size.
-  The fourth region is dsa fabric base register and size.
-  The fifth region is cpld base register and size, it is not required if do not use cpld.
-- phy-handle: phy handle of physicl port, 0 if not any phy device. see ethernet.txt [1].
+  The fourth region is dsa fabric base register and size. It is not required for
+  single-port mode.
+- reg-names: may be ppe-base and(or) dsaf-base. It is used to find the
+  corresponding reg's index.
+
+- phy-handle: phy handle of physical port, 0 if not any phy device. It is optional
+  attribute. If port node exists, phy-handle in each port node will be used.
+  see ethernet.txt [1].
+- subctrl-syscon: is syscon handle for external interface control register.
+- reset-field-offset: is offset of reset field. Its value depends on the hardware
+  user manual.
 - buf-size: rx buffer size, should be 16-1024.
 - desc-num: number of description in TX and RX queue, should be 512, 1024, 2048 or 4096.
 
+- port: subnodes of dsaf. A dsaf node may contain several port nodes(Depending
+  on mode of dsaf). Port node contain some attributes listed below:
+- reg: is physical port index in one dsaf.
+- phy-handle: phy handle of physical port. It is not required if there isn't
+  phy device. see ethernet.txt [1].
+- serdes-syscon: is syscon handle for SerDes register.
+- cpld-syscon: is syscon handle + register offset pair for cpld register. It is
+  not required if there isn't cpld device.
+- port-rst-offset: is offset of reset field for each port in dsaf. Its value
+  depends on the hardware user manual.
+- port-mode-offset: is offset of port mode field for each port in dsaf. Its
+  value depends on the hardware user manual.
+
 [1] Documentation/devicetree/bindings/net/phy.txt
 
 Example:
@@ -28,11 +54,11 @@
 	compatible = "hisilicon,hns-dsaf-v1";
 	mode = "6port-16rss";
 	interrupt-parent = <&mbigen_dsa>;
-	reg = <0x0 0xC0000000 0x0 0x420000
-	       0x0 0xC2000000 0x0 0x300000
-	       0x0 0xc5000000 0x0 0x890000
+	reg = <0x0 0xc5000000 0x0 0x890000
 	       0x0 0xc7000000 0x0 0x60000>;
-	phy-handle = <0 0 0 0 &soc0_phy4 &soc0_phy5 0 0>;
+	reg-names = "ppe-base", "dsaf-base";
+	subctrl-syscon = <&subctrl>;
+	reset-field-offset = 0;
 	interrupts = <131 4>,<132 4>, <133 4>,<134 4>,
 		     <135 4>,<136 4>, <137 4>,<138 4>,
 		     <139 4>,<140 4>, <141 4>,<142 4>,
@@ -43,4 +69,15 @@
 	buf-size = <4096>;
 	desc-num = <1024>;
 	dma-coherent;
+
+	port@0 {
+		reg = 0;
+		phy-handle = <&phy0>;
+		serdes-syscon = <&serdes>;
+	};
+
+	port@1 {
+                reg = 1;
+                serdes-syscon = <&serdes>;
+        };
 };
diff --git a/Documentation/devicetree/bindings/net/hisilicon-hns-nic.txt b/Documentation/devicetree/bindings/net/hisilicon-hns-nic.txt
index e6a9d1c..b9ff4ba 100644
--- a/Documentation/devicetree/bindings/net/hisilicon-hns-nic.txt
+++ b/Documentation/devicetree/bindings/net/hisilicon-hns-nic.txt
@@ -36,6 +36,34 @@
                        | | | | | |
                       external port
 
+  This attribute is remained for compatible purpose. It is not recommended to
+  use it in new code.
+
+- port-idx-in-ae: is the index of port provided by AE.
+  In NIC mode of DSAF, all 6 PHYs of service DSAF are taken as ethernet ports
+  to the CPU. The port-idx-in-ae can be 0 to 5. Here is the diagram:
+            +-----+---------------+
+            |            CPU      |
+            +-+-+-+---+-+-+-+-+-+-+
+              |    |   | | | | | |
+           debug debug   service
+           port  port     port
+           (0)   (0)     (0-5)
+
+  In Switch mode of DSAF, all 6 PHYs of service DSAF are taken as physical
+  ports connected to a LAN Switch while the CPU side assume itself have one
+  single NIC connected to this switch. In this case, the port-idx-in-ae
+  will be 0 only.
+            +-----+-----+------+------+
+            |                CPU      |
+            +-+-+-+-+-+-+-+-+-+-+-+-+-+
+              |    |     service| port(0)
+            debug debug  +------------+
+            port  port   |   switch   |
+            (0)   (0)    +-+-+-+-+-+-++
+                          | | | | | |
+                         external port
+
 - local-mac-address: mac addr of the ethernet interface
 
 Example:
@@ -43,6 +71,6 @@
 	ethernet@0{
 		compatible = "hisilicon,hns-nic-v1";
 		ae-handle = <&dsaf0>;
-		port-id = <0>;
+		port-idx-in-ae = <0>;
 		local-mac-address = [a2 14 e4 4b 56 76];
 	};
diff --git a/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt b/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt
new file mode 100644
index 0000000..14aa6cf
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt
@@ -0,0 +1,56 @@
+Marvell 8897/8997 (sd8897/sd8997) bluetooth SDIO devices
+------
+
+Required properties:
+
+  - compatible : should be one of the following:
+	* "marvell,sd8897-bt"
+	* "marvell,sd8997-bt"
+
+Optional properties:
+
+  - marvell,cal-data: Calibration data downloaded to the device during
+		      initialization. This is an array of 28 values(u8).
+
+  - marvell,wakeup-pin: It represents wakeup pin number of the bluetooth chip.
+		        firmware will use the pin to wakeup host system.
+  - marvell,wakeup-gap-ms: wakeup gap represents wakeup latency of the host
+		      platform. The value will be configured to firmware. This
+		      is needed to work chip's sleep feature as expected.
+  - interrupt-parent: phandle of the parent interrupt controller
+  - interrupts : interrupt pin number to the cpu. Driver will request an irq based
+		 on this interrupt number. During system suspend, the irq will be
+		 enabled so that the bluetooth chip can wakeup host platform under
+		 certain condition. During system resume, the irq will be disabled
+		 to make sure unnecessary interrupt is not received.
+
+Example:
+
+IRQ pin 119 is used as system wakeup source interrupt.
+wakeup pin 13 and gap 100ms are configured so that firmware can wakeup host
+using this device side pin and wakeup latency.
+calibration data is also available in below example.
+
+&mmc3 {
+	status = "okay";
+	vmmc-supply = <&wlan_en_reg>;
+	bus-width = <4>;
+	cap-power-off-card;
+	keep-power-in-suspend;
+
+	#address-cells = <1>;
+	#size-cells = <0>;
+	btmrvl: bluetooth@2 {
+		compatible = "marvell,sd8897-bt";
+		reg = <2>;
+		interrupt-parent = <&pio>;
+		interrupts = <119 IRQ_TYPE_LEVEL_LOW>;
+
+		marvell,cal-data = /bits/ 8 <
+			0x37 0x01 0x1c 0x00 0xff 0xff 0xff 0xff 0x01 0x7f 0x04 0x02
+			0x00 0x00 0xba 0xce 0xc0 0xc6 0x2d 0x00 0x00 0x00 0x00 0x00
+			0x00 0x00 0xf0 0x00>;
+		marvell,wakeup-pin = <0x0d>;
+		marvell,wakeup-gap-ms = <0x64>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt
index 5ca7929..32eaaca 100644
--- a/Documentation/devicetree/bindings/net/mediatek-net.txt
+++ b/Documentation/devicetree/bindings/net/mediatek-net.txt
@@ -9,7 +9,8 @@
 Required properties:
 - compatible: Should be "mediatek,mt7623-eth"
 - reg: Address and length of the register set for the device
-- interrupts: Should contain the frame engines interrupt
+- interrupts: Should contain the three frame engines interrupts in numeric
+	order. These are fe_int0, fe_int1 and fe_int2.
 - clocks: the clock used by the core
 - clock-names: the names of the clock listed in the clocks property. These are
 	"ethif", "esw", "gp2", "gp1"
@@ -42,7 +43,9 @@
 		 <&ethsys CLK_ETHSYS_GP2>,
 		 <&ethsys CLK_ETHSYS_GP1>;
 	clock-names = "ethif", "esw", "gp2", "gp1";
-	interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_LOW>;
+	interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_LOW
+		      GIC_SPI 199 IRQ_TYPE_LEVEL_LOW
+		      GIC_SPI 198 IRQ_TYPE_LEVEL_LOW>;
 	power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>;
 	resets = <&ethsys MT2701_ETHSYS_ETH_RST>;
 	reset-names = "eth";
diff --git a/Documentation/devicetree/bindings/net/microchip,enc28j60.txt b/Documentation/devicetree/bindings/net/microchip,enc28j60.txt
new file mode 100644
index 0000000..1dc3bc7
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/microchip,enc28j60.txt
@@ -0,0 +1,59 @@
+* Microchip ENC28J60
+
+This is a standalone 10 MBit ethernet controller with SPI interface.
+
+For each device connected to a SPI bus, define a child node within
+the SPI master node.
+
+Required properties:
+- compatible: Should be "microchip,enc28j60"
+- reg: Specify the SPI chip select the ENC28J60 is wired to
+- interrupt-parent: Specify the phandle of the source interrupt, see interrupt
+                    binding documentation for details. Usually this is the GPIO bank
+                    the interrupt line is wired to.
+- interrupts: Specify the interrupt index within the interrupt controller (referred
+              to above in interrupt-parent) and interrupt type. The ENC28J60 natively
+              generates falling edge interrupts, however, additional board logic
+              might invert the signal.
+- pinctrl-names: List of assigned state names, see pinctrl binding documentation.
+- pinctrl-0: List of phandles to configure the GPIO pin used as interrupt line,
+             see also generic and your platform specific pinctrl binding
+             documentation.
+
+Optional properties:
+- spi-max-frequency: Maximum frequency of the SPI bus when accessing the ENC28J60.
+  According to the ENC28J80 datasheet, the chip allows a maximum of 20 MHz, however,
+  board designs may need to limit this value.
+- local-mac-address: See ethernet.txt in the same directory.
+
+
+Example (for NXP i.MX28 with pin control stuff for GPIO irq):
+
+        ssp2: ssp@80014000 {
+                compatible = "fsl,imx28-spi";
+                pinctrl-names = "default";
+                pinctrl-0 = <&spi2_pins_b &spi2_sck_cfg>;
+                status = "okay";
+
+                enc28j60: ethernet@0 {
+                        compatible = "microchip,enc28j60";
+                        pinctrl-names = "default";
+                        pinctrl-0 = <&enc28j60_pins>;
+                        reg = <0>;
+                        interrupt-parent = <&gpio3>;
+                        interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+                        spi-max-frequency = <12000000>;
+                };
+        };
+
+        pinctrl@80018000 {
+                enc28j60_pins: enc28j60_pins@0 {
+                        reg = <0>;
+                        fsl,pinmux-ids = <
+                                MX28_PAD_AUART0_RTS__GPIO_3_3    /* Interrupt */
+                        >;
+                        fsl,drive-strength = <MXS_DRIVE_4mA>;
+                        fsl,voltage = <MXS_VOLTAGE_HIGH>;
+                        fsl,pull-up = <MXS_PULL_DISABLE>;
+                };
+        };
diff --git a/Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt b/Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt
new file mode 100644
index 0000000..1aea822
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/nfc/pn533-i2c.txt
@@ -0,0 +1,31 @@
+* NXP Semiconductors PN532 NFC Controller
+
+Required properties:
+- compatible: Should be "nxp,pn532-i2c" or "nxp,pn533-i2c".
+- clock-frequency: I²C work frequency.
+- reg: address on the bus
+- interrupt-parent: phandle for the interrupt gpio controller
+- interrupts: GPIO interrupt to which the chip is connected
+
+Optional SoC Specific Properties:
+- pinctrl-names: Contains only one value - "default".
+- pintctrl-0: Specifies the pin control groups used for this controller.
+
+Example (for ARM-based BeagleBone with PN532 on I2C2):
+
+&i2c2 {
+
+	status = "okay";
+
+	pn532: pn532@24 {
+
+		compatible = "nxp,pn532-i2c";
+
+		reg = <0x24>;
+		clock-frequency = <400000>;
+
+		interrupt-parent = <&gpio1>;
+		interrupts = <17 IRQ_TYPE_EDGE_FALLING>;
+
+	};
+};
diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt
index bc1c3c8..c00a9a8 100644
--- a/Documentation/devicetree/bindings/net/phy.txt
+++ b/Documentation/devicetree/bindings/net/phy.txt
@@ -35,6 +35,8 @@
 - broken-turn-around: If set, indicates the PHY device does not correctly
   release the turn around line low at the end of a MDIO transaction.
 
+- reset-gpios: Reference to a GPIO used to reset the phy.
+
 Example:
 
 ethernet-phy@0 {
@@ -42,4 +44,5 @@
 	interrupt-parent = <40000>;
 	interrupts = <35 1>;
 	reg = <0>;
+	reset-gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
 };
diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt
index 6605d19..4d302db 100644
--- a/Documentation/devicetree/bindings/net/stmmac.txt
+++ b/Documentation/devicetree/bindings/net/stmmac.txt
@@ -59,6 +59,8 @@
 	- snps,fb: fixed-burst
 	- snps,mb: mixed-burst
 	- snps,rb: rebuild INCRx Burst
+	- snps,tso: this enables the TSO feature otherwise it will be managed by
+	    MAC HW capability register.
 - mdio: with compatible = "snps,dwmac-mdio", create and register mdio bus.
 
 Examples:
diff --git a/Documentation/devicetree/bindings/net/wireless/marvell-sd8xxx.txt b/Documentation/devicetree/bindings/net/wireless/marvell-sd8xxx.txt
new file mode 100644
index 0000000..c421aba
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/wireless/marvell-sd8xxx.txt
@@ -0,0 +1,63 @@
+Marvell 8897/8997 (sd8897/sd8997) SDIO devices
+------
+
+This node provides properties for controlling the marvell sdio wireless device.
+The node is expected to be specified as a child node to the SDIO controller that
+connects the device to the system.
+
+Required properties:
+
+  - compatible : should be one of the following:
+	* "marvell,sd8897"
+	* "marvell,sd8997"
+
+Optional properties:
+
+  - marvell,caldata* : A series of properties with marvell,caldata prefix,
+		      represent calibration data downloaded to the device during
+		      initialization. This is an array of unsigned 8-bit values.
+		      the properties should follow below property name and
+		      corresponding array length:
+	"marvell,caldata-txpwrlimit-2g" (length = 566).
+	"marvell,caldata-txpwrlimit-5g-sub0" (length = 502).
+	"marvell,caldata-txpwrlimit-5g-sub1" (length = 688).
+	"marvell,caldata-txpwrlimit-5g-sub2" (length = 750).
+	"marvell,caldata-txpwrlimit-5g-sub3" (length = 502).
+  - marvell,wakeup-pin : a wakeup pin number of wifi chip which will be configured
+		      to firmware. Firmware will wakeup the host using this pin
+		      during suspend/resume.
+  - interrupt-parent: phandle of the parent interrupt controller
+  - interrupts : interrupt pin number to the cpu. driver will request an irq based on
+		 this interrupt number. during system suspend, the irq will be enabled
+		 so that the wifi chip can wakeup host platform under certain condition.
+		 during system resume, the irq will be disabled to make sure
+		 unnecessary interrupt is not received.
+
+Example:
+
+Tx power limit calibration data is configured in below example.
+The calibration data is an array of unsigned values, the length
+can vary between hw versions.
+IRQ pin 38 is used as system wakeup source interrupt. wakeup pin 3 is configured
+so that firmware can wakeup host using this device side pin.
+
+&mmc3 {
+	status = "okay";
+	vmmc-supply = <&wlan_en_reg>;
+	bus-width = <4>;
+	cap-power-off-card;
+	keep-power-in-suspend;
+
+	#address-cells = <1>;
+	#size-cells = <0>;
+	mwifiex: wifi@1 {
+		compatible = "marvell,sd8897";
+		reg = <1>;
+		interrupt-parent = <&pio>;
+		interrupts = <38 IRQ_TYPE_LEVEL_LOW>;
+
+		marvell,caldata_00_txpwrlimit_2g_cfg_set = /bits/ 8 <
+	0x01 0x00 0x06 0x00 0x08 0x02 0x89 0x01>;
+		marvell,wakeup-pin = <3>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
index 96aae6b..74d7f0a 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
@@ -5,12 +5,18 @@
 	* "qcom,ath10k"
 	* "qcom,ipq4019-wifi"
 
-PCI based devices uses compatible string "qcom,ath10k" and takes only
-calibration data via "qcom,ath10k-calibration-data". Rest of the properties
-are not applicable for PCI based devices.
+PCI based devices uses compatible string "qcom,ath10k" and takes calibration
+data along with board specific data via "qcom,ath10k-calibration-data".
+Rest of the properties are not applicable for PCI based devices.
 
 AHB based devices (i.e. ipq4019) uses compatible string "qcom,ipq4019-wifi"
-and also uses most of the properties defined in this doc.
+and also uses most of the properties defined in this doc (except
+"qcom,ath10k-calibration-data"). It uses "qcom,ath10k-pre-calibration-data"
+to carry pre calibration data.
+
+In general, entry "qcom,ath10k-pre-calibration-data" and
+"qcom,ath10k-calibration-data" conflict with each other and only one
+can be provided per device.
 
 Optional properties:
 - reg: Address and length of the register set for the device.
@@ -35,8 +41,11 @@
 - qcom,msi_addr: MSI interrupt address.
 - qcom,msi_base: Base value to add before writing MSI data into
 		MSI address register.
-- qcom,ath10k-calibration-data : calibration data as an array, the
-				 length can vary between hw versions
+- qcom,ath10k-calibration-data : calibration data + board specific data
+				 as an array, the length can vary between
+				 hw versions.
+- qcom,ath10k-pre-calibration-data : pre calibration data as an array,
+				     the length can vary between hw versions.
 
 Example (to supply the calibration data alone):
 
@@ -105,5 +114,5 @@
 			  "legacy";
 	qcom,msi_addr = <0x0b006040>;
 	qcom,msi_base = <0x40>;
-	qcom,ath10k-calibration-data = [ 01 02 03 ... ];
+	qcom,ath10k-pre-calibration-data = [ 01 02 03 ... ];
 };
diff --git a/Documentation/devicetree/bindings/numa.txt b/Documentation/devicetree/bindings/numa.txt
new file mode 100644
index 0000000..21b3505
--- /dev/null
+++ b/Documentation/devicetree/bindings/numa.txt
@@ -0,0 +1,275 @@
+==============================================================================
+NUMA binding description.
+==============================================================================
+
+==============================================================================
+1 - Introduction
+==============================================================================
+
+Systems employing a Non Uniform Memory Access (NUMA) architecture contain
+collections of hardware resources including processors, memory, and I/O buses,
+that comprise what is commonly known as a NUMA node.
+Processor accesses to memory within the local NUMA node is generally faster
+than processor accesses to memory outside of the local NUMA node.
+DT defines interfaces that allow the platform to convey NUMA node
+topology information to OS.
+
+==============================================================================
+2 - numa-node-id
+==============================================================================
+
+For the purpose of identification, each NUMA node is associated with a unique
+token known as a node id. For the purpose of this binding
+a node id is a 32-bit integer.
+
+A device node is associated with a NUMA node by the presence of a
+numa-node-id property which contains the node id of the device.
+
+Example:
+	/* numa node 0 */
+	numa-node-id = <0>;
+
+	/* numa node 1 */
+	numa-node-id = <1>;
+
+==============================================================================
+3 - distance-map
+==============================================================================
+
+The optional device tree node distance-map describes the relative
+distance (memory latency) between all numa nodes.
+
+- compatible : Should at least contain "numa-distance-map-v1".
+
+- distance-matrix
+  This property defines a matrix to describe the relative distances
+  between all numa nodes.
+  It is represented as a list of node pairs and their relative distance.
+
+  Note:
+	1. Each entry represents distance from first node to second node.
+	The distances are equal in either direction.
+	2. The distance from a node to self (local distance) is represented
+	with value 10 and all internode distance should be represented with
+	a value greater than 10.
+	3. distance-matrix should have entries in lexicographical ascending
+	order of nodes.
+	4. There must be only one device node distance-map which must
+	reside in the root node.
+	5. If the distance-map node is not present, a default
+	distance-matrix is used.
+
+Example:
+	4 nodes connected in mesh/ring topology as below,
+
+		0_______20______1
+		|               |
+		|               |
+		20             20
+		|               |
+		|               |
+		|_______________|
+		3       20      2
+
+	if relative distance for each hop is 20,
+	then internode distance would be,
+	      0 -> 1 = 20
+	      1 -> 2 = 20
+	      2 -> 3 = 20
+	      3 -> 0 = 20
+	      0 -> 2 = 40
+	      1 -> 3 = 40
+
+     and dt presentation for this distance matrix is,
+
+		distance-map {
+			 compatible = "numa-distance-map-v1";
+			 distance-matrix = <0 0  10>,
+					   <0 1  20>,
+					   <0 2  40>,
+					   <0 3  20>,
+					   <1 0  20>,
+					   <1 1  10>,
+					   <1 2  20>,
+					   <1 3  40>,
+					   <2 0  40>,
+					   <2 1  20>,
+					   <2 2  10>,
+					   <2 3  20>,
+					   <3 0  20>,
+					   <3 1  40>,
+					   <3 2  20>,
+					   <3 3  10>;
+		};
+
+==============================================================================
+4 - Example dts
+==============================================================================
+
+Dual socket system consists of 2 boards connected through ccn bus and
+each board having one socket/soc of 8 cpus, memory and pci bus.
+
+	memory@c00000 {
+		device_type = "memory";
+		reg = <0x0 0xc00000 0x0 0x80000000>;
+		/* node 0 */
+		numa-node-id = <0>;
+	};
+
+	memory@10000000000 {
+		device_type = "memory";
+		reg = <0x100 0x0 0x0 0x80000000>;
+		/* node 1 */
+		numa-node-id = <1>;
+	};
+
+	cpus {
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		cpu@0 {
+			device_type = "cpu";
+			compatible =  "arm,armv8";
+			reg = <0x0 0x0>;
+			enable-method = "psci";
+			/* node 0 */
+			numa-node-id = <0>;
+		};
+		cpu@1 {
+			device_type = "cpu";
+			compatible =  "arm,armv8";
+			reg = <0x0 0x1>;
+			enable-method = "psci";
+			numa-node-id = <0>;
+		};
+		cpu@2 {
+			device_type = "cpu";
+			compatible =  "arm,armv8";
+			reg = <0x0 0x2>;
+			enable-method = "psci";
+			numa-node-id = <0>;
+		};
+		cpu@3 {
+			device_type = "cpu";
+			compatible =  "arm,armv8";
+			reg = <0x0 0x3>;
+			enable-method = "psci";
+			numa-node-id = <0>;
+		};
+		cpu@4 {
+			device_type = "cpu";
+			compatible =  "arm,armv8";
+			reg = <0x0 0x4>;
+			enable-method = "psci";
+			numa-node-id = <0>;
+		};
+		cpu@5 {
+			device_type = "cpu";
+			compatible =  "arm,armv8";
+			reg = <0x0 0x5>;
+			enable-method = "psci";
+			numa-node-id = <0>;
+		};
+		cpu@6 {
+			device_type = "cpu";
+			compatible =  "arm,armv8";
+			reg = <0x0 0x6>;
+			enable-method = "psci";
+			numa-node-id = <0>;
+		};
+		cpu@7 {
+			device_type = "cpu";
+			compatible =  "arm,armv8";
+			reg = <0x0 0x7>;
+			enable-method = "psci";
+			numa-node-id = <0>;
+		};
+		cpu@8 {
+			device_type = "cpu";
+			compatible =  "arm,armv8";
+			reg = <0x0 0x8>;
+			enable-method = "psci";
+			/* node 1 */
+			numa-node-id = <1>;
+		};
+		cpu@9 {
+			device_type = "cpu";
+			compatible =  "arm,armv8";
+			reg = <0x0 0x9>;
+			enable-method = "psci";
+			numa-node-id = <1>;
+		};
+		cpu@a {
+			device_type = "cpu";
+			compatible =  "arm,armv8";
+			reg = <0x0 0xa>;
+			enable-method = "psci";
+			numa-node-id = <1>;
+		};
+		cpu@b {
+			device_type = "cpu";
+			compatible =  "arm,armv8";
+			reg = <0x0 0xb>;
+			enable-method = "psci";
+			numa-node-id = <1>;
+		};
+		cpu@c {
+			device_type = "cpu";
+			compatible =  "arm,armv8";
+			reg = <0x0 0xc>;
+			enable-method = "psci";
+			numa-node-id = <1>;
+		};
+		cpu@d {
+			device_type = "cpu";
+			compatible =  "arm,armv8";
+			reg = <0x0 0xd>;
+			enable-method = "psci";
+			numa-node-id = <1>;
+		};
+		cpu@e {
+			device_type = "cpu";
+			compatible =  "arm,armv8";
+			reg = <0x0 0xe>;
+			enable-method = "psci";
+			numa-node-id = <1>;
+		};
+		cpu@f {
+			device_type = "cpu";
+			compatible =  "arm,armv8";
+			reg = <0x0 0xf>;
+			enable-method = "psci";
+			numa-node-id = <1>;
+		};
+	};
+
+	pcie0: pcie0@848000000000 {
+		compatible = "arm,armv8";
+		device_type = "pci";
+		bus-range = <0 255>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		reg = <0x8480 0x00000000 0 0x10000000>;  /* Configuration space */
+		ranges = <0x03000000 0x8010 0x00000000 0x8010 0x00000000 0x70 0x00000000>;
+		/* node 0 */
+		numa-node-id = <0>;
+        };
+
+	pcie1: pcie1@948000000000 {
+		compatible = "arm,armv8";
+		device_type = "pci";
+		bus-range = <0 255>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		reg = <0x9480 0x00000000 0 0x10000000>;  /* Configuration space */
+		ranges = <0x03000000 0x9010 0x00000000 0x9010 0x00000000 0x70 0x00000000>;
+		/* node 1 */
+		numa-node-id = <1>;
+        };
+
+	distance-map {
+		compatible = "numa-distance-map-v1";
+		distance-matrix = <0 0 10>,
+				  <0 1 20>,
+				  <1 1 10>;
+	};
diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
index 3be80c6..83aeb1f 100644
--- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
@@ -4,8 +4,8 @@
 and thus inherits all the common properties defined in designware-pcie.txt.
 
 Required properties:
-- compatible: "fsl,imx6q-pcie"
-- reg: base addresse and length of the pcie controller
+- compatible: "fsl,imx6q-pcie", "fsl,imx6sx-pcie", "fsl,imx6qp-pcie"
+- reg: base address and length of the PCIe controller
 - interrupts: A list of interrupt outputs of the controller. Must contain an
   entry for each entry in the interrupt-names property.
 - interrupt-names: Must include the following entries:
@@ -19,6 +19,20 @@
 - fsl,tx-deemph-gen2-6db: Gen2 (6db) De-emphasis value. Default: 20
 - fsl,tx-swing-full: Gen2 TX SWING FULL value. Default: 127
 - fsl,tx-swing-low: TX launch amplitude swing_low value. Default: 127
+- fsl,max-link-speed: Specify PCI gen for link capability. Must be '2' for
+  gen2, otherwise will default to gen1. Note that the IMX6 LVDS clock outputs
+  do not meet gen2 jitter requirements and thus for gen2 capability a gen2
+  compliant clock generator should be used and configured.
+- reset-gpio: Should specify the GPIO for controlling the PCI bus device reset
+  signal. It's not polarity aware and defaults to active-low reset sequence
+  (L=reset state, H=operation state).
+- reset-gpio-active-high: If present then the reset sequence using the GPIO
+  specified in the "reset-gpio" property is reversed (H=reset state,
+  L=operation state).
+
+Additional required properties for imx6sx-pcie:
+- clock names: Must include the following additional entries:
+	- "pcie_inbound_axi"
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt b/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
index 75321ae..b8cc395 100644
--- a/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
@@ -60,11 +60,14 @@
   - afi
   - pcie_x
 
-Required properties on Tegra124 and later:
+Required properties on Tegra124 and later (deprecated):
 - phys: Must contain an entry for each entry in phy-names.
 - phy-names: Must include the following entries:
   - pcie
 
+These properties are deprecated in favour of per-lane PHYs define in each of
+the root ports (see below).
+
 Power supplies for Tegra20:
 - avdd-pex-supply: Power supply for analog PCIe logic. Must supply 1.05 V.
 - vdd-pex-supply: Power supply for digital PCIe I/O. Must supply 1.05 V.
@@ -122,11 +125,22 @@
   - Root port 0 uses 4 lanes, root port 1 is unused.
   - Both root ports use 2 lanes.
 
-Example:
+Required properties for Tegra124 and later:
+- phys: Must contain an phandle to a PHY for each entry in phy-names.
+- phy-names: Must include an entry for each active lane. Note that the number
+  of entries does not have to (though usually will) be equal to the specified
+  number of lanes in the nvidia,num-lanes property. Entries are of the form
+  "pcie-N": where N ranges from 0 to the value specified in nvidia,num-lanes.
+
+Examples:
+=========
+
+Tegra20:
+--------
 
 SoC DTSI:
 
-	pcie-controller {
+	pcie-controller@80003000 {
 		compatible = "nvidia,tegra20-pcie";
 		device_type = "pci";
 		reg = <0x80003000 0x00000800   /* PADS registers */
@@ -186,10 +200,9 @@
 		};
 	};
 
-
 Board DTS:
 
-	pcie-controller {
+	pcie-controller@80003000 {
 		status = "okay";
 
 		vdd-supply = <&pci_vdd_reg>;
@@ -222,3 +235,204 @@
 device nodes need to be added in order to allow the bus' children to be
 instantiated at the proper location in the operating system's device tree (as
 illustrated by the optional nodes in the example above).
+
+Tegra30:
+--------
+
+SoC DTSI:
+
+	pcie-controller@00003000 {
+		compatible = "nvidia,tegra30-pcie";
+		device_type = "pci";
+		reg = <0x00003000 0x00000800   /* PADS registers */
+		       0x00003800 0x00000200   /* AFI registers */
+		       0x10000000 0x10000000>; /* configuration space */
+		reg-names = "pads", "afi", "cs";
+		interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH   /* controller interrupt */
+			      GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>; /* MSI interrupt */
+		interrupt-names = "intr", "msi";
+
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 0 0 0>;
+		interrupt-map = <0 0 0 0 &intc GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+
+		bus-range = <0x00 0xff>;
+		#address-cells = <3>;
+		#size-cells = <2>;
+
+		ranges = <0x82000000 0 0x00000000 0x00000000 0 0x00001000   /* port 0 configuration space */
+			  0x82000000 0 0x00001000 0x00001000 0 0x00001000   /* port 1 configuration space */
+			  0x82000000 0 0x00004000 0x00004000 0 0x00001000   /* port 2 configuration space */
+			  0x81000000 0 0          0x02000000 0 0x00010000   /* downstream I/O */
+			  0x82000000 0 0x20000000 0x20000000 0 0x08000000   /* non-prefetchable memory */
+			  0xc2000000 0 0x28000000 0x28000000 0 0x18000000>; /* prefetchable memory */
+
+		clocks = <&tegra_car TEGRA30_CLK_PCIE>,
+			 <&tegra_car TEGRA30_CLK_AFI>,
+			 <&tegra_car TEGRA30_CLK_PLL_E>,
+			 <&tegra_car TEGRA30_CLK_CML0>;
+		clock-names = "pex", "afi", "pll_e", "cml";
+		resets = <&tegra_car 70>,
+			 <&tegra_car 72>,
+			 <&tegra_car 74>;
+		reset-names = "pex", "afi", "pcie_x";
+		status = "disabled";
+
+		pci@1,0 {
+			device_type = "pci";
+			assigned-addresses = <0x82000800 0 0x00000000 0 0x1000>;
+			reg = <0x000800 0 0 0 0>;
+			status = "disabled";
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+			ranges;
+
+			nvidia,num-lanes = <2>;
+		};
+
+		pci@2,0 {
+			device_type = "pci";
+			assigned-addresses = <0x82001000 0 0x00001000 0 0x1000>;
+			reg = <0x001000 0 0 0 0>;
+			status = "disabled";
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+			ranges;
+
+			nvidia,num-lanes = <2>;
+		};
+
+		pci@3,0 {
+			device_type = "pci";
+			assigned-addresses = <0x82001800 0 0x00004000 0 0x1000>;
+			reg = <0x001800 0 0 0 0>;
+			status = "disabled";
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+			ranges;
+
+			nvidia,num-lanes = <2>;
+		};
+	};
+
+Board DTS:
+
+	pcie-controller@00003000 {
+		status = "okay";
+
+		avdd-pexa-supply = <&ldo1_reg>;
+		vdd-pexa-supply = <&ldo1_reg>;
+		avdd-pexb-supply = <&ldo1_reg>;
+		vdd-pexb-supply = <&ldo1_reg>;
+		avdd-pex-pll-supply = <&ldo1_reg>;
+		avdd-plle-supply = <&ldo1_reg>;
+		vddio-pex-ctl-supply = <&sys_3v3_reg>;
+		hvdd-pex-supply = <&sys_3v3_pexs_reg>;
+
+		pci@1,0 {
+			status = "okay";
+		};
+
+		pci@3,0 {
+			status = "okay";
+		};
+	};
+
+Tegra124:
+---------
+
+SoC DTSI:
+
+	pcie-controller@01003000 {
+		compatible = "nvidia,tegra124-pcie";
+		device_type = "pci";
+		reg = <0x0 0x01003000 0x0 0x00000800   /* PADS registers */
+		       0x0 0x01003800 0x0 0x00000800   /* AFI registers */
+		       0x0 0x02000000 0x0 0x10000000>; /* configuration space */
+		reg-names = "pads", "afi", "cs";
+		interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>, /* controller interrupt */
+			     <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>; /* MSI interrupt */
+		interrupt-names = "intr", "msi";
+
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 0 0 0>;
+		interrupt-map = <0 0 0 0 &gic GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+
+		bus-range = <0x00 0xff>;
+		#address-cells = <3>;
+		#size-cells = <2>;
+
+		ranges = <0x82000000 0 0x01000000 0x0 0x01000000 0 0x00001000   /* port 0 configuration space */
+			  0x82000000 0 0x01001000 0x0 0x01001000 0 0x00001000   /* port 1 configuration space */
+			  0x81000000 0 0x0        0x0 0x12000000 0 0x00010000   /* downstream I/O (64 KiB) */
+			  0x82000000 0 0x13000000 0x0 0x13000000 0 0x0d000000   /* non-prefetchable memory (208 MiB) */
+			  0xc2000000 0 0x20000000 0x0 0x20000000 0 0x20000000>; /* prefetchable memory (512 MiB) */
+
+		clocks = <&tegra_car TEGRA124_CLK_PCIE>,
+			 <&tegra_car TEGRA124_CLK_AFI>,
+			 <&tegra_car TEGRA124_CLK_PLL_E>,
+			 <&tegra_car TEGRA124_CLK_CML0>;
+		clock-names = "pex", "afi", "pll_e", "cml";
+		resets = <&tegra_car 70>,
+			 <&tegra_car 72>,
+			 <&tegra_car 74>;
+		reset-names = "pex", "afi", "pcie_x";
+		status = "disabled";
+
+		pci@1,0 {
+			device_type = "pci";
+			assigned-addresses = <0x82000800 0 0x01000000 0 0x1000>;
+			reg = <0x000800 0 0 0 0>;
+			status = "disabled";
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+			ranges;
+
+			nvidia,num-lanes = <2>;
+		};
+
+		pci@2,0 {
+			device_type = "pci";
+			assigned-addresses = <0x82001000 0 0x01001000 0 0x1000>;
+			reg = <0x001000 0 0 0 0>;
+			status = "disabled";
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+			ranges;
+
+			nvidia,num-lanes = <1>;
+		};
+	};
+
+Board DTS:
+
+	pcie-controller@01003000 {
+		status = "okay";
+
+		avddio-pex-supply = <&vdd_1v05_run>;
+		dvddio-pex-supply = <&vdd_1v05_run>;
+		avdd-pex-pll-supply = <&vdd_1v05_run>;
+		hvdd-pex-supply = <&vdd_3v3_lp0>;
+		hvdd-pex-pll-e-supply = <&vdd_3v3_lp0>;
+		vddio-pex-ctl-supply = <&vdd_3v3_lp0>;
+		avdd-pll-erefe-supply = <&avdd_1v05_run>;
+
+		/* Mini PCIe */
+		pci@1,0 {
+			phys = <&{/padctl@7009f000/pads/pcie/lanes/pcie-4}>;
+			phy-names = "pcie-0";
+			status = "okay";
+		};
+
+		/* Gigabit Ethernet */
+		pci@2,0 {
+			phys = <&{/padctl@7009f000/pads/pcie/lanes/pcie-2}>;
+			phy-names = "pcie-0";
+			status = "okay";
+		};
+	};
diff --git a/Documentation/devicetree/bindings/pci/pci-armada8k.txt b/Documentation/devicetree/bindings/pci/pci-armada8k.txt
new file mode 100644
index 0000000..598533a
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/pci-armada8k.txt
@@ -0,0 +1,38 @@
+* Marvell Armada 7K/8K PCIe interface
+
+This PCIe host controller is based on the Synopsis Designware PCIe IP
+and thus inherits all the common properties defined in designware-pcie.txt.
+
+Required properties:
+- compatible: "marvell,armada8k-pcie"
+- reg: must contain two register regions
+   - the control register region
+   - the config space region
+- reg-names:
+   - "ctrl" for the control register region
+   - "config" for the config space region
+- interrupts: Interrupt specifier for the PCIe controler
+- clocks: reference to the PCIe controller clock
+
+Example:
+
+	pcie@f2600000 {
+		compatible = "marvell,armada8k-pcie", "snps,dw-pcie";
+		reg = <0 0xf2600000 0 0x10000>, <0 0xf6f00000 0 0x80000>;
+		reg-names = "ctrl", "config";
+		#address-cells = <3>;
+		#size-cells = <2>;
+		#interrupt-cells = <1>;
+		device_type = "pci";
+		dma-coherent;
+
+		bus-range = <0 0xff>;
+		ranges = <0x81000000 0 0xf9000000 0  0xf9000000 0 0x10000	/* downstream I/O */
+			  0x82000000 0 0xf6000000 0  0xf6000000 0 0xf00000>;	/* non-prefetchable memory */
+		interrupt-map-mask = <0 0 0 0>;
+		interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+		num-lanes = <1>;
+		clocks = <&cpm_syscon0 1 13>;
+		status = "disabled";
+	};
diff --git a/Documentation/devicetree/bindings/pci/pci-keystone.txt b/Documentation/devicetree/bindings/pci/pci-keystone.txt
index 54eae29..d08a4d5 100644
--- a/Documentation/devicetree/bindings/pci/pci-keystone.txt
+++ b/Documentation/devicetree/bindings/pci/pci-keystone.txt
@@ -56,6 +56,7 @@
 	phy-names: name of the Generic Keystine SerDes phy for PCI
 	  - If boot loader already does PCI link establishment, then phys and
 	    phy-names shouldn't be present.
+	interrupts: platform interrupt for error interrupts.
 
 Designware DT Properties not applicable for Keystone PCI
 
diff --git a/Documentation/devicetree/bindings/phy/nvidia,tegra124-xusb-padctl.txt b/Documentation/devicetree/bindings/phy/nvidia,tegra124-xusb-padctl.txt
new file mode 100644
index 0000000..0bf1ae2
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/nvidia,tegra124-xusb-padctl.txt
@@ -0,0 +1,733 @@
+Device tree binding for NVIDIA Tegra XUSB pad controller
+========================================================
+
+The Tegra XUSB pad controller manages a set of I/O lanes (with differential
+signals) which connect directly to pins/pads on the SoC package. Each lane
+is controlled by a HW block referred to as a "pad" in the Tegra hardware
+documentation. Each such "pad" may control either one or multiple lanes,
+and thus contains any logic common to all its lanes. Each lane can be
+separately configured and powered up.
+
+Some of the lanes are high-speed lanes, which can be used for PCIe, SATA or
+super-speed USB. Other lanes are for various types of low-speed, full-speed
+or high-speed USB (such as UTMI, ULPI and HSIC). The XUSB pad controller
+contains a software-configurable mux that sits between the I/O controller
+ports (e.g. PCIe) and the lanes.
+
+In addition to per-lane configuration, USB 3.0 ports may require additional
+settings on a per-board basis.
+
+Pads will be represented as children of the top-level XUSB pad controller
+device tree node. Each lane exposed by the pad will be represented by its
+own subnode and can be referenced by users of the lane using the standard
+PHY bindings, as described by the phy-bindings.txt file in this directory.
+
+The Tegra hardware documentation refers to the connection between the XUSB
+pad controller and the XUSB controller as "ports". This is confusing since
+"port" is typically used to denote the physical USB receptacle. The device
+tree binding in this document uses the term "port" to refer to the logical
+abstraction of the signals that are routed to a USB receptacle (i.e. a PHY
+for the USB signal, the VBUS power supply, the USB 2.0 companion port for
+USB 3.0 receptacles, ...).
+
+Required properties:
+--------------------
+- compatible: Must be:
+  - Tegra124: "nvidia,tegra124-xusb-padctl"
+  - Tegra132: "nvidia,tegra132-xusb-padctl", "nvidia,tegra124-xusb-padctl"
+  - Tegra210: "nvidia,tegra210-xusb-padctl"
+- reg: Physical base address and length of the controller's registers.
+- resets: Must contain an entry for each entry in reset-names.
+- reset-names: Must include the following entries:
+  - "padctl"
+
+
+Pad nodes:
+==========
+
+A required child node named "pads" contains a list of subnodes, one for each
+of the pads exposed by the XUSB pad controller. Each pad may need additional
+resources that can be referenced in its pad node.
+
+The "status" property is used to enable or disable the use of a pad. If set
+to "disabled", the pad will not be used on the given board. In order to use
+the pad and any of its lanes, this property must be set to "okay".
+
+For Tegra124 and Tegra132, the following pads exist: usb2, ulpi, hsic, pcie
+and sata. No extra resources are required for operation of these pads.
+
+For Tegra210, the following pads exist: usb2, hsic, pcie and sata. Below is
+a description of the properties of each pad.
+
+UTMI pad:
+---------
+
+Required properties:
+- clocks: Must contain an entry for each entry in clock-names.
+- clock-names: Must contain the following entries:
+  - "trk": phandle and specifier referring to the USB2 tracking clock
+
+HSIC pad:
+---------
+
+Required properties:
+- clocks: Must contain an entry for each entry in clock-names.
+- clock-names: Must contain the following entries:
+  - "trk": phandle and specifier referring to the HSIC tracking clock
+
+PCIe pad:
+---------
+
+Required properties:
+- clocks: Must contain an entry for each entry in clock-names.
+- clock-names: Must contain the following entries:
+  - "pll": phandle and specifier referring to the PLLE
+- resets: Must contain an entry for each entry in reset-names.
+- reset-names: Must contain the following entries:
+  - "phy": reset for the PCIe UPHY block
+
+SATA pad:
+---------
+
+Required properties:
+- resets: Must contain an entry for each entry in reset-names.
+- reset-names: Must contain the following entries:
+  - "phy": reset for the SATA UPHY block
+
+
+PHY nodes:
+==========
+
+Each pad node has a child named "lanes" that contains one or more children of
+its own, each representing one of the lanes controlled by the pad.
+
+Required properties:
+--------------------
+- status: Defines the operation status of the PHY. Valid values are:
+  - "disabled": the PHY is disabled
+  - "okay": the PHY is enabled
+- #phy-cells: Should be 0. Since each lane represents a single PHY, there is
+  no need for an additional specifier.
+- nvidia,function: The output function of the PHY. See below for a list of
+  valid functions per SoC generation.
+
+For Tegra124 and Tegra132, the list of valid PHY nodes is given below:
+- usb2: usb2-0, usb2-1, usb2-2
+  - functions: "snps", "xusb", "uart"
+- ulpi: ulpi-0
+  - functions: "snps", "xusb"
+- hsic: hsic-0, hsic-1
+  - functions: "snps", "xusb"
+- pcie: pcie-0, pcie-1, pcie-2, pcie-3, pcie-4
+  - functions: "pcie", "usb3-ss"
+- sata: sata-0
+  - functions: "usb3-ss", "sata"
+
+For Tegra210, the list of valid PHY nodes is given below:
+- utmi: utmi-0, utmi-1, utmi-2, utmi-3
+  - functions: "snps", "xusb", "uart"
+- hsic: hsic-0, hsic-1
+  - functions: "snps", "xusb"
+- pcie: pcie-0, pcie-1, pcie-2, pcie-3, pcie-4, pcie-5, pcie-6
+  - functions: "pcie-x1", "usb3-ss", "pcie-x4"
+- sata: sata-0
+  - functions: "usb3-ss", "sata"
+
+
+Port nodes:
+===========
+
+A required child node named "ports" contains a list of all the ports exposed
+by the XUSB pad controller. Per-port configuration is only required for USB.
+
+USB2 ports:
+-----------
+
+Required properties:
+- status: Defines the operation status of the port. Valid values are:
+  - "disabled": the port is disabled
+  - "okay": the port is enabled
+- mode: A string that determines the mode in which to run the port. Valid
+  values are:
+  - "host": for USB host mode
+  - "device": for USB device mode
+  - "otg": for USB OTG mode
+
+Optional properties:
+- nvidia,internal: A boolean property whose presence determines that a port
+  is internal. In the absence of this property the port is considered to be
+  external.
+- vbus-supply: phandle to a regulator supplying the VBUS voltage.
+
+ULPI ports:
+-----------
+
+Optional properties:
+- status: Defines the operation status of the port. Valid values are:
+  - "disabled": the port is disabled
+  - "okay": the port is enabled
+- nvidia,internal: A boolean property whose presence determines that a port
+  is internal. In the absence of this property the port is considered to be
+  external.
+- vbus-supply: phandle to a regulator supplying the VBUS voltage.
+
+HSIC ports:
+-----------
+
+Required properties:
+- status: Defines the operation status of the port. Valid values are:
+  - "disabled": the port is disabled
+  - "okay": the port is enabled
+
+Optional properties:
+- vbus-supply: phandle to a regulator supplying the VBUS voltage.
+
+Super-speed USB ports:
+----------------------
+
+Required properties:
+- status: Defines the operation status of the port. Valid values are:
+  - "disabled": the port is disabled
+  - "okay": the port is enabled
+- nvidia,usb2-companion: A single cell that specifies the physical port number
+  to map this super-speed USB port to. The range of valid port numbers varies
+  with the SoC generation:
+  - 0-2: for Tegra124 and Tegra132
+  - 0-3: for Tegra210
+
+Optional properties:
+- nvidia,internal: A boolean property whose presence determines that a port
+  is internal. In the absence of this property the port is considered to be
+  external.
+
+For Tegra124 and Tegra132, the XUSB pad controller exposes the following
+ports:
+- 3x USB2: usb2-0, usb2-1, usb2-2
+- 1x ULPI: ulpi-0
+- 2x HSIC: hsic-0, hsic-1
+- 2x super-speed USB: usb3-0, usb3-1
+
+For Tegra210, the XUSB pad controller exposes the following ports:
+- 4x USB2: usb2-0, usb2-1, usb2-2, usb2-3
+- 2x HSIC: hsic-0, hsic-1
+- 4x super-speed USB: usb3-0, usb3-1, usb3-2, usb3-3
+
+
+Examples:
+=========
+
+Tegra124 and Tegra132:
+----------------------
+
+SoC include:
+
+	padctl@7009f000 {
+		/* for Tegra124 */
+		compatible = "nvidia,tegra124-xusb-padctl";
+		/* for Tegra132 */
+		compatible = "nvidia,tegra132-xusb-padctl",
+			     "nvidia,tegra124-xusb-padctl";
+		reg = <0x0 0x7009f000 0x0 0x1000>;
+		resets = <&tegra_car 142>;
+		reset-names = "padctl";
+
+		pads {
+			usb2 {
+				status = "disabled";
+
+				lanes {
+					usb2-0 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+
+					usb2-1 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+
+					usb2-2 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+				};
+			};
+
+			ulpi {
+				status = "disabled";
+
+				lanes {
+					ulpi-0 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+				};
+			};
+
+			hsic {
+				status = "disabled";
+
+				lanes {
+					hsic-0 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+
+					hsic-1 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+				};
+			};
+
+			pcie {
+				status = "disabled";
+
+				lanes {
+					pcie-0 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+
+					pcie-1 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+
+					pcie-2 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+
+					pcie-3 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+
+					pcie-4 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+				};
+			};
+
+			sata {
+				status = "disabled";
+
+				lanes {
+					sata-0 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+				};
+			};
+		};
+
+		ports {
+			usb2-0 {
+				status = "disabled";
+			};
+
+			usb2-1 {
+				status = "disabled";
+			};
+
+			usb2-2 {
+				status = "disabled";
+			};
+
+			ulpi-0 {
+				status = "disabled";
+			};
+
+			hsic-0 {
+				status = "disabled";
+			};
+
+			hsic-1 {
+				status = "disabled";
+			};
+
+			usb3-0 {
+				status = "disabled";
+			};
+
+			usb3-1 {
+				status = "disabled";
+			};
+		};
+	};
+
+Board file:
+
+	padctl@7009f000 {
+		status = "okay";
+
+		pads {
+			usb2 {
+				status = "okay";
+
+				lanes {
+					usb2-0 {
+						nvidia,function = "xusb";
+						status = "okay";
+					};
+
+					usb2-1 {
+						nvidia,function = "xusb";
+						status = "okay";
+					};
+
+					usb2-2 {
+						nvidia,function = "xusb";
+						status = "okay";
+					};
+				};
+			};
+
+			pcie {
+				status = "okay";
+
+				lanes {
+					pcie-0 {
+						nvidia,function = "usb3-ss";
+						status = "okay";
+					};
+
+					pcie-2 {
+						nvidia,function = "pcie";
+						status = "okay";
+					};
+
+					pcie-4 {
+						nvidia,function = "pcie";
+						status = "okay";
+					};
+				};
+			};
+
+			sata {
+				status = "okay";
+
+				lanes {
+					sata-0 {
+						nvidia,function = "sata";
+						status = "okay";
+					};
+				};
+			};
+		};
+
+		ports {
+			/* Micro A/B */
+			usb2-0 {
+				status = "okay";
+				mode = "otg";
+			};
+
+			/* Mini PCIe */
+			usb2-1 {
+				status = "okay";
+				mode = "host";
+			};
+
+			/* USB3 */
+			usb2-2 {
+				status = "okay";
+				mode = "host";
+
+				vbus-supply = <&vdd_usb3_vbus>;
+			};
+
+			usb3-0 {
+				nvidia,port = <2>;
+				status = "okay";
+			};
+		};
+	};
+
+Tegra210:
+---------
+
+SoC include:
+
+	padctl@7009f000 {
+		compatible = "nvidia,tegra210-xusb-padctl";
+		reg = <0x0 0x7009f000 0x0 0x1000>;
+		resets = <&tegra_car 142>;
+		reset-names = "padctl";
+
+		status = "disabled";
+
+		pads {
+			usb2 {
+				clocks = <&tegra_car TEGRA210_CLK_USB2_TRK>;
+				clock-names = "trk";
+				status = "disabled";
+
+				lanes {
+					usb2-0 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+
+					usb2-1 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+
+					usb2-2 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+
+					usb2-3 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+				};
+			};
+
+			hsic {
+				clocks = <&tegra_car TEGRA210_CLK_HSIC_TRK>;
+				clock-names = "trk";
+				status = "disabled";
+
+				lanes {
+					hsic-0 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+
+					hsic-1 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+				};
+			};
+
+			pcie {
+				clocks = <&tegra_car TEGRA210_CLK_PLL_E>;
+				clock-names = "pll";
+				resets = <&tegra_car 205>;
+				reset-names = "phy";
+				status = "disabled";
+
+				lanes {
+					pcie-0 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+
+					pcie-1 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+
+					pcie-2 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+
+					pcie-3 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+
+					pcie-4 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+
+					pcie-5 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+
+					pcie-6 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+				};
+			};
+
+			sata {
+				clocks = <&tegra_car TEGRA210_CLK_PLL_E>;
+				clock-names = "pll";
+				resets = <&tegra_car 204>;
+				reset-names = "phy";
+				status = "disabled";
+
+				lanes {
+					sata-0 {
+						status = "disabled";
+						#phy-cells = <0>;
+					};
+				};
+			};
+		};
+
+		ports {
+			usb2-0 {
+				status = "disabled";
+			};
+
+			usb2-1 {
+				status = "disabled";
+			};
+
+			usb2-2 {
+				status = "disabled";
+			};
+
+			usb2-3 {
+				status = "disabled";
+			};
+
+			hsic-0 {
+				status = "disabled";
+			};
+
+			hsic-1 {
+				status = "disabled";
+			};
+
+			usb3-0 {
+				status = "disabled";
+			};
+
+			usb3-1 {
+				status = "disabled";
+			};
+
+			usb3-2 {
+				status = "disabled";
+			};
+
+			usb3-3 {
+				status = "disabled";
+			};
+		};
+	};
+
+Board file:
+
+	padctl@7009f000 {
+		status = "okay";
+
+		pads {
+			usb2 {
+				status = "okay";
+
+				lanes {
+					usb2-0 {
+						nvidia,function = "xusb";
+						status = "okay";
+					};
+
+					usb2-1 {
+						nvidia,function = "xusb";
+						status = "okay";
+					};
+
+					usb2-2 {
+						nvidia,function = "xusb";
+						status = "okay";
+					};
+
+					usb2-3 {
+						nvidia,function = "xusb";
+						status = "okay";
+					};
+				};
+			};
+
+			pcie {
+				status = "okay";
+
+				lanes {
+					pcie-0 {
+						nvidia,function = "pcie-x1";
+						status = "okay";
+					};
+
+					pcie-1 {
+						nvidia,function = "pcie-x4";
+						status = "okay";
+					};
+
+					pcie-2 {
+						nvidia,function = "pcie-x4";
+						status = "okay";
+					};
+
+					pcie-3 {
+						nvidia,function = "pcie-x4";
+						status = "okay";
+					};
+
+					pcie-4 {
+						nvidia,function = "pcie-x4";
+						status = "okay";
+					};
+
+					pcie-5 {
+						nvidia,function = "usb3-ss";
+						status = "okay";
+					};
+
+					pcie-6 {
+						nvidia,function = "usb3-ss";
+						status = "okay";
+					};
+				};
+			};
+
+			sata {
+				status = "okay";
+
+				lanes {
+					sata-0 {
+						nvidia,function = "sata";
+						status = "okay";
+					};
+				};
+			};
+		};
+
+		ports {
+			usb2-0 {
+				status = "okay";
+				mode = "otg";
+			};
+
+			usb2-1 {
+				status = "okay";
+				vbus-supply = <&vdd_5v0_rtl>;
+				mode = "host";
+			};
+
+			usb2-2 {
+				status = "okay";
+				vbus-supply = <&vdd_usb_vbus>;
+				mode = "host";
+			};
+
+			usb2-3 {
+				status = "okay";
+				mode = "host";
+			};
+
+			usb3-0 {
+				status = "okay";
+				nvidia,lanes = "pcie-6";
+				nvidia,port = <1>;
+			};
+
+			usb3-1 {
+				status = "okay";
+				nvidia,lanes = "pcie-5";
+				nvidia,port = <2>;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/phy/phy-lpc18xx-usb-otg.txt b/Documentation/devicetree/bindings/phy/phy-lpc18xx-usb-otg.txt
index bd61b46..3bb821c 100644
--- a/Documentation/devicetree/bindings/phy/phy-lpc18xx-usb-otg.txt
+++ b/Documentation/devicetree/bindings/phy/phy-lpc18xx-usb-otg.txt
@@ -18,7 +18,7 @@
 	compatible = "nxp,lpc1850-creg", "syscon", "simple-mfd";
 	reg = <0x40043000 0x1000>;
 
-	usb0_otg_phy: phy@004 {
+	usb0_otg_phy: phy {
 		compatible = "nxp,lpc1850-usb-otg-phy";
 		clocks = <&ccu1 CLK_USB0>;
 		#phy-cells = <0>;
diff --git a/Documentation/devicetree/bindings/phy/rockchip-dp-phy.txt b/Documentation/devicetree/bindings/phy/rockchip-dp-phy.txt
index 50c4f9b..e3b4809 100644
--- a/Documentation/devicetree/bindings/phy/rockchip-dp-phy.txt
+++ b/Documentation/devicetree/bindings/phy/rockchip-dp-phy.txt
@@ -8,15 +8,19 @@
 	of memory mapped region.
 - clock-names: from common clock binding:
 	Required elements: "24m"
-- rockchip,grf: phandle to the syscon managing the "general register files"
 - #phy-cells : from the generic PHY bindings, must be 0;
 
 Example:
 
-edp_phy: edp-phy {
-	compatible = "rockchip,rk3288-dp-phy";
-	rockchip,grf = <&grf>;
-	clocks = <&cru SCLK_EDP_24M>;
-	clock-names = "24m";
-	#phy-cells = <0>;
+grf: syscon@ff770000 {
+	compatible = "rockchip,rk3288-grf", "syscon", "simple-mfd";
+
+...
+
+	edp_phy: edp-phy {
+		compatible = "rockchip,rk3288-dp-phy";
+		clocks = <&cru SCLK_EDP_24M>;
+		clock-names = "24m";
+		#phy-cells = <0>;
+	};
 };
diff --git a/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt b/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
index 61916f1..555cb0f 100644
--- a/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
+++ b/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
@@ -3,17 +3,23 @@
 
 Required properties:
  - compatible: rockchip,rk3399-emmc-phy
- - rockchip,grf : phandle to the syscon managing the "general
-   register files"
  - #phy-cells: must be 0
- - reg: PHY configure reg address offset in "general
+ - reg: PHY register address offset and length in "general
    register files"
 
 Example:
 
-emmcphy: phy {
-	compatible = "rockchip,rk3399-emmc-phy";
-	rockchip,grf = <&grf>;
-	reg = <0xf780>;
-	#phy-cells = <0>;
+
+grf: syscon@ff770000 {
+	compatible = "rockchip,rk3399-grf", "syscon", "simple-mfd";
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+...
+
+	emmcphy: phy@f780 {
+		compatible = "rockchip,rk3399-emmc-phy";
+		reg = <0xf780 0x20>;
+		#phy-cells = <0>;
+	};
 };
diff --git a/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
index 08a4a32..0326154 100644
--- a/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
@@ -134,12 +134,12 @@
 mfio81		dreq0, mips_trace_data, eth_debug
 mfio82		dreq1, mips_trace_data, eth_debug
 mfio83		mips_pll_lock, mips_trace_data, usb_debug
-mfio84		sys_pll_lock, mips_trace_data, usb_debug
-mfio85		wifi_pll_lock, mips_trace_data, sdhost_debug
-mfio86		bt_pll_lock, mips_trace_data, sdhost_debug
-mfio87		rpu_v_pll_lock, dreq2, socif_debug
-mfio88		rpu_l_pll_lock, dreq3, socif_debug
-mfio89		audio_pll_lock, dreq4, dreq5
+mfio84		audio_pll_lock, mips_trace_data, usb_debug
+mfio85		rpu_v_pll_lock, mips_trace_data, sdhost_debug
+mfio86		rpu_l_pll_lock, mips_trace_data, sdhost_debug
+mfio87		sys_pll_lock, dreq2, socif_debug
+mfio88		wifi_pll_lock, dreq3, socif_debug
+mfio89		bt_pll_lock, dreq4, dreq5
 tck
 trstn
 tdi
diff --git a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
index 3f6a524..32f4a2d 100644
--- a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
@@ -1,13 +1,16 @@
 == Amlogic Meson pinmux controller ==
 
 Required properties for the root node:
- - compatible: "amlogic,meson8-pinctrl" or "amlogic,meson8b-pinctrl"
+ - compatible: one of "amlogic,meson8-cbus-pinctrl"
+		      "amlogic,meson8b-cbus-pinctrl"
+		      "amlogic,meson8-aobus-pinctrl"
+		      "amlogic,meson8b-aobus-pinctrl"
  - reg: address and size of registers controlling irq functionality
 
 === GPIO sub-nodes ===
 
-The 2 power domains of the controller (regular and always-on) are
-represented as sub-nodes and each of them acts as a GPIO controller.
+The GPIO bank for the controller is represented as a sub-node and it acts as a
+GPIO controller.
 
 Required properties for sub-nodes are:
  - reg: should contain address and size for mux, pull-enable, pull and
@@ -18,10 +21,6 @@
  - gpio-controller: identifies the node as a gpio controller
  - #gpio-cells: must be 2
 
-Valid sub-node names are:
- - "banks" for the regular domain
- - "ao-bank" for the always-on domain
-
 === Other sub-nodes ===
 
 Child nodes without the "gpio-controller" represent some desired
@@ -45,7 +44,7 @@
 === Example ===
 
 	pinctrl: pinctrl@c1109880 {
-		compatible = "amlogic,meson8-pinctrl";
+		compatible = "amlogic,meson8-cbus-pinctrl";
 		reg = <0xc1109880 0x10>;
 		#address-cells = <1>;
 		#size-cells = <1>;
@@ -61,15 +60,6 @@
 			#gpio-cells = <2>;
                };
 
-		gpio_ao: ao-bank@c1108030 {
-			reg = <0xc8100014 0x4>,
-			      <0xc810002c 0x4>,
-			      <0xc8100024 0x8>;
-			reg-names = "mux", "pull", "gpio";
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
 		nand {
 			mux {
 				groups = "nand_io", "nand_io_ce0", "nand_io_ce1",
@@ -79,18 +69,4 @@
 				function = "nand";
 			};
 		};
-
-		uart_ao_a {
-			mux {
-				groups = "uart_tx_ao_a", "uart_rx_ao_a",
-					 "uart_cts_ao_a", "uart_rts_ao_a";
-				function = "uart_ao";
-			};
-
-			conf {
-				pins = "GPIOAO_0", "GPIOAO_1",
-				       "GPIOAO_2", "GPIOAO_3";
-				bias-disable;
-			};
-		};
 	};
diff --git a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-xusb-padctl.txt b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-xusb-padctl.txt
index 30676de..8a6223d 100644
--- a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-xusb-padctl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-xusb-padctl.txt
@@ -1,6 +1,12 @@
 Device tree binding for NVIDIA Tegra XUSB pad controller
 ========================================================
 
+NOTE: It turns out that this binding isn't an accurate description of the XUSB
+pad controller. While the description is good enough for the functional subset
+required for PCIe and SATA, it lacks the flexibility to represent the features
+needed for USB. For the new binding, see ../phy/nvidia,tegra-xusb-padctl.txt.
+The binding described in this file is deprecated and should not be used.
+
 The Tegra XUSB pad controller manages a set of lanes, each of which can be
 assigned to one out of a set of different pads. Some of these pads have an
 associated PHY that must be powered up before the pad can be used.
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
index ffadb7a..74e6ec0 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
@@ -72,8 +72,8 @@
 
 The pin configuration parameters use the generic pinconf bindings defined in
 pinctrl-bindings.txt in this directory. The supported parameters are
-bias-disable, bias-pull-up, bias-pull-down and power-source. For pins that
-have a configurable I/O voltage, the power-source value should be the
+bias-disable, bias-pull-up, bias-pull-down, drive strength and power-source. For
+pins that have a configurable I/O voltage, the power-source value should be the
 nominal I/O voltage in millivolts.
 
 
diff --git a/Documentation/devicetree/bindings/power/renesas,rcar-sysc.txt b/Documentation/devicetree/bindings/power/renesas,rcar-sysc.txt
new file mode 100644
index 0000000..b74e4d4
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/renesas,rcar-sysc.txt
@@ -0,0 +1,48 @@
+DT bindings for the Renesas R-Car System Controller
+
+== System Controller Node ==
+
+The R-Car System Controller provides power management for the CPU cores and
+various coprocessors.
+
+Required properties:
+  - compatible: Must contain exactly one of the following:
+      - "renesas,r8a7779-sysc" (R-Car H1)
+      - "renesas,r8a7790-sysc" (R-Car H2)
+      - "renesas,r8a7791-sysc" (R-Car M2-W)
+      - "renesas,r8a7792-sysc" (R-Car V2H)
+      - "renesas,r8a7793-sysc" (R-Car M2-N)
+      - "renesas,r8a7794-sysc" (R-Car E2)
+      - "renesas,r8a7795-sysc" (R-Car H3)
+  - reg: Address start and address range for the device.
+  - #power-domain-cells: Must be 1.
+
+
+Example:
+
+	sysc: system-controller@e6180000 {
+		compatible = "renesas,r8a7791-sysc";
+		reg = <0 0xe6180000 0 0x0200>;
+		#power-domain-cells = <1>;
+	};
+
+
+== PM Domain Consumers ==
+
+Devices residing in a power area must refer to that power area, as documented
+by the generic PM domain bindings in
+Documentation/devicetree/bindings/power/power_domain.txt.
+
+Required properties:
+  - power-domains: A phandle and symbolic PM domain specifier, as defined in
+		   <dt-bindings/power/r8a77*-sysc.h>.
+
+
+Example:
+
+	L2_CA15: cache-controller@0 {
+		compatible = "cache";
+		power-domains = <&sysc R8A7791_PD_CA15_SCU>;
+		cache-unified;
+		cache-level = <2>;
+	};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-poweroff.txt b/Documentation/devicetree/bindings/power/reset/gpio-poweroff.txt
similarity index 100%
rename from Documentation/devicetree/bindings/gpio/gpio-poweroff.txt
rename to Documentation/devicetree/bindings/power/reset/gpio-poweroff.txt
diff --git a/Documentation/devicetree/bindings/gpio/gpio-restart.txt b/Documentation/devicetree/bindings/power/reset/gpio-restart.txt
similarity index 100%
rename from Documentation/devicetree/bindings/gpio/gpio-restart.txt
rename to Documentation/devicetree/bindings/power/reset/gpio-restart.txt
diff --git a/Documentation/devicetree/bindings/power/rockchip-io-domain.txt b/Documentation/devicetree/bindings/power/rockchip-io-domain.txt
index c84fb47..d23dc00 100644
--- a/Documentation/devicetree/bindings/power/rockchip-io-domain.txt
+++ b/Documentation/devicetree/bindings/power/rockchip-io-domain.txt
@@ -37,8 +37,10 @@
   - "rockchip,rk3368-pmu-io-voltage-domain" for rk3368 pmu-domains
   - "rockchip,rk3399-io-voltage-domain" for rk3399
   - "rockchip,rk3399-pmu-io-voltage-domain" for rk3399 pmu-domains
-- rockchip,grf: phandle to the syscon managing the "general register files"
 
+Deprecated properties:
+- rockchip,grf: phandle to the syscon managing the "general register files"
+    Systems should move the io-domains to a sub-node of the grf simple-mfd.
 
 You specify supplies using the standard regulator bindings by including
 a phandle the relevant regulator.  All specified supplies must be able
diff --git a/Documentation/devicetree/bindings/regmap/regmap.txt b/Documentation/devicetree/bindings/regmap/regmap.txt
index e98a9652..0127be3 100644
--- a/Documentation/devicetree/bindings/regmap/regmap.txt
+++ b/Documentation/devicetree/bindings/regmap/regmap.txt
@@ -1,50 +1,29 @@
-Device-Tree binding for regmap
-
-The endianness mode of CPU & Device scenarios:
-Index     Device     Endianness properties
----------------------------------------------------
-1         BE         'big-endian'
-2         LE         'little-endian'
-3	  Native     'native-endian'
-
-For one device driver, which will run in different scenarios above
-on different SoCs using the devicetree, we need one way to simplify
-this.
+Devicetree binding for regmap
 
 Optional properties:
-- {big,little,native}-endian: these are boolean properties, if absent
-  then the implementation will choose a default based on the device
-  being controlled.  These properties are for register values and all
-  the buffers only.  Native endian means that the CPU and device have
-  the same endianness.
+
+   little-endian,
+   big-endian,
+   native-endian:	See common-properties.txt for a definition
+
+Note:
+Regmap defaults to little-endian register access on MMIO based
+devices, this is by far the most common setting. On CPU
+architectures that typically run big-endian operating systems
+(e.g. PowerPC), registers can be defined as big-endian and must
+be marked that way in the devicetree.
+
+On SoCs that can be operated in both big-endian and little-endian
+modes, with a single hardware switch controlling both the endianess
+of the CPU and a byteswap for MMIO registers (e.g. many Broadcom MIPS
+chips), "native-endian" is used to allow using the same device tree
+blob in both cases.
 
 Examples:
-Scenario 1 : CPU in LE mode & device in LE mode.
+Scenario 1 : a register set in big-endian mode.
 dev: dev@40031000 {
-	      compatible = "name";
+	      compatible = "syscon";
 	      reg = <0x40031000 0x1000>;
-	      ...
-};
-
-Scenario 2 : CPU in LE mode & device in BE mode.
-dev: dev@40031000 {
-	      compatible = "name";
-	      reg = <0x40031000 0x1000>;
-	      ...
 	      big-endian;
-};
-
-Scenario 3 : CPU in BE mode & device in BE mode.
-dev: dev@40031000 {
-	      compatible = "name";
-	      reg = <0x40031000 0x1000>;
 	      ...
 };
-
-Scenario 4 : CPU in BE mode & device in LE mode.
-dev: dev@40031000 {
-	      compatible = "name";
-	      reg = <0x40031000 0x1000>;
-	      ...
-	      little-endian;
-};
diff --git a/Documentation/devicetree/bindings/regulator/max8973-regulator.txt b/Documentation/devicetree/bindings/regulator/max8973-regulator.txt
index f80ea2f..c2c68fc 100644
--- a/Documentation/devicetree/bindings/regulator/max8973-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/max8973-regulator.txt
@@ -32,6 +32,13 @@
 
 Enhanced transient response (ETR) will affect the configuration of CKADV.
 
+-junction-warn-millicelsius: u32, junction warning temperature threshold
+		in millicelsius. If die temperature crosses this level then
+		device generates the warning interrupts.
+
+Please note that thermal functionality is only supported on MAX77621. The
+supported threshold warning temperature for MAX77621 are 120 degC and 140 degC.
+
 Example:
 
 	max8973@1b {
diff --git a/Documentation/devicetree/bindings/regulator/pv88080.txt b/Documentation/devicetree/bindings/regulator/pv88080.txt
new file mode 100644
index 0000000..38a6142
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/pv88080.txt
@@ -0,0 +1,49 @@
+* Powerventure Semiconductor PV88080 Voltage Regulator
+
+Required properties:
+- compatible: "pvs,pv88080".
+- reg: I2C slave address, usually 0x49.
+- interrupts: the interrupt outputs of the controller
+- regulators: A node that houses a sub-node for each regulator within the
+  device. Each sub-node is identified using the node's name, with valid
+  values listed below. The content of each sub-node is defined by the
+  standard binding for regulators; see regulator.txt.
+  BUCK1, BUCK2, and BUCK3.
+
+Optional properties:
+- Any optional property defined in regulator.txt
+
+Example
+
+	pmic: pv88080@49 {
+		compatible = "pvs,pv88080";
+		reg = <0x49>;
+		interrupt-parent = <&gpio>;
+		interrupts = <24 24>;
+
+		regulators {
+			BUCK1 {
+				regulator-name = "buck1";
+				regulator-min-microvolt = < 600000>;
+				regulator-max-microvolt = <1393750>;
+				regulator-min-microamp 	= < 220000>;
+				regulator-max-microamp 	= <7040000>;
+			};
+
+			BUCK2 {
+				regulator-name = "buck2";
+				regulator-min-microvolt = < 600000>;
+				regulator-max-microvolt = <1393750>;
+				regulator-min-microamp 	= <1496000>;
+				regulator-max-microamp 	= <4189000>;
+			};
+
+			BUCK3 {
+				regulator-name = "buck3";
+				regulator-min-microvolt = <1400000>;
+				regulator-max-microvolt = <2193750>;
+				regulator-min-microamp 	= <1496000>;
+				regulator-max-microamp 	= <4189000>;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
index d00bfd8..46c6f3e 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
@@ -7,6 +7,7 @@
 			"qcom,pm8841-regulators"
 			"qcom,pm8916-regulators"
 			"qcom,pm8941-regulators"
+			"qcom,pm8994-regulators"
 
 - interrupts:
 	Usage: optional
@@ -68,6 +69,37 @@
 	Definition: Reference to regulator supplying the input pin, as
 		    described in the data sheet.
 
+- vdd_s1-supply:
+- vdd_s2-supply:
+- vdd_s3-supply:
+- vdd_s4-supply:
+- vdd_s5-supply:
+- vdd_s6-supply:
+- vdd_s7-supply:
+- vdd_s8-supply:
+- vdd_s9-supply:
+- vdd_s10-supply:
+- vdd_s11-supply:
+- vdd_s12-supply:
+- vdd_l1-supply:
+- vdd_l2_l26_l28-supply:
+- vdd_l3_l11-supply:
+- vdd_l4_l27_l31-supply:
+- vdd_l5_l7-supply:
+- vdd_l6_l12_l32-supply:
+- vdd_l8_l16_l30-supply:
+- vdd_l9_l10_l18_l22-supply:
+- vdd_l13_l19_l23_l24-supply:
+- vdd_l14_l15-supply:
+- vdd_l17_l29-supply:
+- vdd_l20_l21-supply:
+- vdd_l25-supply:
+- vdd_lvs_1_2-supply:
+	Usage: optional (pm8994 only)
+	Value type: <phandle>
+	Definition: Reference to regulator supplying the input pin, as
+		    described in the data sheet.
+
 
 The regulator node houses sub-nodes for each regulator within the device. Each
 sub-node is identified using the node's name, with valid values listed for each
@@ -85,6 +117,11 @@
 	l15, l16, l17, l18, l19, l20, l21, l22, l23, l24, lvs1, lvs2, lvs3,
 	mvs1, mvs2
 
+pm8994:
+	s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, l1, l2, l3, l4, l5,
+	l6, l7, l8, l9, l10, l11, l12, l13, l14, l15, l16, l17, l18, l19, l20,
+	l21, l22, l23, l24, l25, l26, l27, l28, l29, l30, l31, l32, lvs1, lvs2
+
 The content of each sub-node is defined by the standard binding for regulators -
 see regulator.txt - with additional custom properties described below:
 
diff --git a/Documentation/devicetree/bindings/regulator/regulator-max77620.txt b/Documentation/devicetree/bindings/regulator/regulator-max77620.txt
index b3c8ca6..1c4bfe7 100644
--- a/Documentation/devicetree/bindings/regulator/regulator-max77620.txt
+++ b/Documentation/devicetree/bindings/regulator/regulator-max77620.txt
@@ -94,6 +94,28 @@
 					This is applicable if suspend state
 					FPS source is selected as FPS0, FPS1 or
 					FPS2.
+- maxim,ramp-rate-setting:		integer, ramp rate(uV/us) setting to be
+					configured to the device.
+					The platform may have different ramp
+					rate than advertised ramp rate if it has
+					design variation from Maxim's
+					recommended. On this case, platform
+					specific ramp rate is used for ramp time
+					calculation and this property is used
+					for device register configurations.
+					The measured ramp rate of platform is
+					provided by the regulator-ramp-delay
+					as described in <devicetree/bindings/
+					regulator/regulator.txt>.
+					Maxim Max77620 supports following ramp
+					delay:
+					  SD: 13.75mV/us, 27.5mV/us, 55mV/us
+					  LDOs: 5mV/us, 100mV/us
+
+Note: If the measured ramp delay is same as advertised ramp delay then it is not
+required to provide the ramp delay with property "maxim,ramp-rate-setting". The
+ramp rate can be provided by the regulator-ramp-delay which will be used for
+ramp time calculation for voltage change as well as for device configuration.
 
 Example:
 --------
diff --git a/Documentation/devicetree/bindings/regulator/ti-abb-regulator.txt b/Documentation/devicetree/bindings/regulator/ti-abb-regulator.txt
index c58db75..c3f6546 100644
--- a/Documentation/devicetree/bindings/regulator/ti-abb-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/ti-abb-regulator.txt
@@ -14,8 +14,8 @@
   - "setup-address"	- contains setup register address of ABB module (ti,abb-v3)
   - "int-address"	- contains address of interrupt register for ABB module
   (also see Optional properties)
-- #address-cell: should be 0
-- #size-cell: should be 0
+- #address-cells: should be 0
+- #size-cells: should be 0
 - clocks: should point to the clock node used by ABB module
 - ti,settling-time: Settling time in uSecs from SoC documentation for ABB module
 	to settle down(target time for SR2_WTCNT_VALUE).
@@ -69,7 +69,7 @@
 abb_x: regulator-abb-x {
 	compatible = "ti,abb-v1";
 	regulator-name = "abb_x";
-	#address-cell = <0>;
+	#address-cells = <0>;
 	#size-cells = <0>;
 	reg = <0x483072f0 0x8>, <0x48306818 0x4>;
 	reg-names = "base-address", "int-address";
@@ -89,7 +89,7 @@
 abb_y: regulator-abb-y {
 	compatible = "ti,abb-v2";
 	regulator-name = "abb_y";
-	#address-cell = <0>;
+	#address-cells = <0>;
 	#size-cells = <0>;
 	reg = <0x4a307bd0 0x8>, <0x4a306014 0x4>, <0x4A002268 0x8>;
 	reg-names = "base-address", "int-address", "efuse-address";
@@ -110,7 +110,7 @@
 abb_z: regulator-abb-z {
 	compatible = "ti,abb-v2";
 	regulator-name = "abb_z";
-	#address-cell = <0>;
+	#address-cells = <0>;
 	#size-cells = <0>;
 	reg = <0x4ae07ce4 0x8>, <0x4ae06010 0x4>,
 	      <0x4a002194 0x8>, <0x4ae0C314 0x4>;
diff --git a/Documentation/devicetree/bindings/regulator/twl-regulator.txt b/Documentation/devicetree/bindings/regulator/twl-regulator.txt
index 75b0c16..74a91c4 100644
--- a/Documentation/devicetree/bindings/regulator/twl-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/twl-regulator.txt
@@ -57,6 +57,12 @@
 
 Optional properties:
 - Any optional property defined in bindings/regulator/regulator.txt
+For twl4030 regulators/LDOs:
+ - regulator-initial-mode:
+  - 0x08 - Sleep mode, the nominal output voltage is maintained with low power
+           consumption with low load current capability.
+  - 0x0e - Active mode, the regulator can deliver its nominal output voltage
+           with full-load current capability.
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/reset/oxnas,reset.txt b/Documentation/devicetree/bindings/reset/oxnas,reset.txt
new file mode 100644
index 0000000..6f06db9
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/oxnas,reset.txt
@@ -0,0 +1,58 @@
+Oxford Semiconductor OXNAS SoC Family RESET Controller
+================================================
+
+Please also refer to reset.txt in this directory for common reset
+controller binding usage.
+
+Required properties:
+- compatible: Should be "oxsemi,ox810se-reset"
+- #reset-cells: 1, see below
+
+Parent node should have the following properties :
+- compatible: Should be "oxsemi,ox810se-sys-ctrl", "syscon", "simple-mfd"
+
+For OX810SE, the indices are :
+ - 0 : ARM
+ - 1 : COPRO
+ - 2 : Reserved
+ - 3 : Reserved
+ - 4 : USBHS
+ - 5 : USBHSPHY
+ - 6 : MAC
+ - 7 : PCI
+ - 8 : DMA
+ - 9 : DPE
+ - 10 : DDR
+ - 11 : SATA
+ - 12 : SATA_LINK
+ - 13 : SATA_PHY
+ - 14 : Reserved
+ - 15 : NAND
+ - 16 : GPIO
+ - 17 : UART1
+ - 18 : UART2
+ - 19 : MISC
+ - 20 : I2S
+ - 21 : AHB_MON
+ - 22 : UART3
+ - 23 : UART4
+ - 24 : SGDMA
+ - 25 : Reserved
+ - 26 : Reserved
+ - 27 : Reserved
+ - 28 : Reserved
+ - 29 : Reserved
+ - 30 : Reserved
+ - 31 : BUS
+
+example:
+
+sys: sys-ctrl@000000 {
+	compatible = "oxsemi,ox810se-sys-ctrl", "syscon", "simple-mfd";
+	reg = <0x000000 0x100000>;
+
+	reset: reset-controller {
+		compatible = "oxsemi,ox810se-reset";
+		#reset-cells = <1>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/rng/hisi-rng.txt b/Documentation/devicetree/bindings/rng/hisi-rng.txt
new file mode 100644
index 0000000..d04d55a
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/hisi-rng.txt
@@ -0,0 +1,12 @@
+Hisilicon Random Number Generator
+
+Required properties:
+- compatible : Should be "hisilicon,hip04-rng" or "hisilicon,hip05-rng"
+- reg : Offset and length of the register set of this block
+
+Example:
+
+rng@d1010000 {
+	compatible = "hisilicon,hip05-rng";
+	reg = <0xd1010000 0x100>;
+};
diff --git a/Documentation/devicetree/bindings/rtc/s3c-rtc.txt b/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
index 1068ffc..fdde63a 100644
--- a/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
@@ -15,9 +15,10 @@
   is the rtc tick interrupt. The number of cells representing a interrupt
   depends on the parent interrupt controller.
 - clocks: Must contain a list of phandle and clock specifier for the rtc
-          and source clocks.
-- clock-names: Must contain "rtc" and "rtc_src" entries sorted in the
-               same order as the clocks property.
+          clock and in the case of a s3c6410 compatible controller, also
+          a source clock.
+- clock-names: Must contain "rtc" and for a s3c6410 compatible controller,
+               a "rtc_src" sorted in the same order as the clocks property.
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/serial/microchip,pic32-uart.txt b/Documentation/devicetree/bindings/serial/microchip,pic32-uart.txt
new file mode 100644
index 0000000..65b38bf
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/microchip,pic32-uart.txt
@@ -0,0 +1,29 @@
+* Microchip Universal Asynchronous Receiver Transmitter (UART)
+
+Required properties:
+- compatible: Should be "microchip,pic32mzda-uart"
+- reg: Should contain registers location and length
+- interrupts: Should contain interrupt
+- clocks: Phandle to the clock.
+          See: Documentation/devicetree/bindings/clock/clock-bindings.txt
+- pinctrl-names: A pinctrl state names "default" must be defined.
+- pinctrl-0: Phandle referencing pin configuration of the UART peripheral.
+             See: Documentation/devicetree/bindings/pinctrl/pinctrl-binding.txt
+
+Optional properties:
+- cts-gpios: CTS pin for UART
+
+Example:
+	uart1: serial@1f822000 {
+		compatible = "microchip,pic32mzda-uart";
+		reg = <0x1f822000 0x50>;
+		interrupts = <112 IRQ_TYPE_LEVEL_HIGH>,
+			<113 IRQ_TYPE_LEVEL_HIGH>,
+			<114 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&PBCLK2>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_uart1
+				&pinctrl_uart1_cts
+				&pinctrl_uart1_rts>;
+		cts-gpios = <&gpio1 15 0>;
+	};
diff --git a/Documentation/devicetree/bindings/soc/mediatek/auxadc.txt b/Documentation/devicetree/bindings/soc/mediatek/auxadc.txt
new file mode 100644
index 0000000..bdb7829
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/mediatek/auxadc.txt
@@ -0,0 +1,21 @@
+MediaTek AUXADC
+===============
+
+The Auxiliary Analog/Digital Converter (AUXADC) is an ADC found
+in some Mediatek SoCs which among other things measures the temperatures
+in the SoC. It can be used directly with register accesses, but it is also
+used by thermal controller which reads the temperatures from the AUXADC
+directly via its own bus interface. See
+Documentation/devicetree/bindings/thermal/mediatek-thermal.txt
+for the Thermal Controller which holds a phandle to the AUXADC.
+
+Required properties:
+- compatible: Must be "mediatek,mt8173-auxadc"
+- reg: Address range of the AUXADC unit
+
+Example:
+
+auxadc: auxadc@11001000 {
+	compatible = "mediatek,mt8173-auxadc";
+	reg = <0 0x11001000 0 0x1000>;
+};
diff --git a/Documentation/devicetree/bindings/soc/mediatek/pwrap.txt b/Documentation/devicetree/bindings/soc/mediatek/pwrap.txt
index ddeb5b6..107700d 100644
--- a/Documentation/devicetree/bindings/soc/mediatek/pwrap.txt
+++ b/Documentation/devicetree/bindings/soc/mediatek/pwrap.txt
@@ -18,6 +18,7 @@
 
 Required properties in pwrap device node.
 - compatible:
+	"mediatek,mt2701-pwrap" for MT2701/7623 SoCs
 	"mediatek,mt8135-pwrap" for MT8135 SoCs
 	"mediatek,mt8173-pwrap" for MT8173 SoCs
 - interrupts: IRQ for pwrap in SOC
diff --git a/Documentation/devicetree/bindings/soc/rockchip/grf.txt b/Documentation/devicetree/bindings/soc/rockchip/grf.txt
new file mode 100644
index 0000000..013e71a
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/rockchip/grf.txt
@@ -0,0 +1,35 @@
+* Rockchip General Register Files (GRF)
+
+The general register file will be used to do static set by software, which
+is composed of many registers for system control.
+
+From RK3368 SoCs, the GRF is divided into two sections,
+- GRF, used for general non-secure system,
+- PMUGRF, used for always on system
+
+Required Properties:
+
+- compatible: GRF should be one of the followings
+   - "rockchip,rk3066-grf", "syscon": for rk3066
+   - "rockchip,rk3188-grf", "syscon": for rk3188
+   - "rockchip,rk3228-grf", "syscon": for rk3228
+   - "rockchip,rk3288-grf", "syscon": for rk3288
+   - "rockchip,rk3368-grf", "syscon": for rk3368
+   - "rockchip,rk3399-grf", "syscon": for rk3399
+- compatible: PMUGRF should be one of the followings
+   - "rockchip,rk3368-pmugrf", "syscon": for rk3368
+   - "rockchip,rk3399-pmugrf", "syscon": for rk3399
+- reg: physical base address of the controller and length of memory mapped
+  region.
+
+Example: GRF and PMUGRF of RK3399 SoCs
+
+	pmugrf: syscon@ff320000 {
+		compatible = "rockchip,rk3399-pmugrf", "syscon";
+		reg = <0x0 0xff320000 0x0 0x1000>;
+	};
+
+	grf: syscon@ff770000 {
+		compatible = "rockchip,rk3399-grf", "syscon";
+		reg = <0x0 0xff770000 0x0 0x10000>;
+	};
diff --git a/Documentation/devicetree/bindings/soc/rockchip/power_domain.txt b/Documentation/devicetree/bindings/soc/rockchip/power_domain.txt
index 13dc6a3..f909ce0 100644
--- a/Documentation/devicetree/bindings/soc/rockchip/power_domain.txt
+++ b/Documentation/devicetree/bindings/soc/rockchip/power_domain.txt
@@ -7,6 +7,7 @@
 - compatible: Should be one of the following.
 	"rockchip,rk3288-power-controller" - for RK3288 SoCs.
 	"rockchip,rk3368-power-controller" - for RK3368 SoCs.
+	"rockchip,rk3399-power-controller" - for RK3399 SoCs.
 - #power-domain-cells: Number of cells in a power-domain specifier.
 	Should be 1 for multiple PM domains.
 - #address-cells: Should be 1.
@@ -16,8 +17,18 @@
 - reg: index of the power domain, should use macros in:
 	"include/dt-bindings/power/rk3288-power.h" - for RK3288 type power domain.
 	"include/dt-bindings/power/rk3368-power.h" - for RK3368 type power domain.
+	"include/dt-bindings/power/rk3399-power.h" - for RK3399 type power domain.
 - clocks (optional): phandles to clocks which need to be enabled while power domain
 	switches state.
+- pm_qos (optional): phandles to qos blocks which need to be saved and restored
+	while power domain switches state.
+
+Qos Example:
+
+	qos_gpu: qos_gpu@ffaf0000 {
+		compatible ="syscon";
+		reg = <0x0 0xffaf0000 0x0 0x20>;
+	};
 
 Example:
 
@@ -30,6 +41,7 @@
 		pd_gpu {
 			reg = <RK3288_PD_GPU>;
 			clocks = <&cru ACLK_GPU>;
+			pm_qos = <&qos_gpu>;
 		};
 	};
 
@@ -45,12 +57,41 @@
                 };
         };
 
+Example 2:
+		power: power-controller {
+			compatible = "rockchip,rk3399-power-controller";
+			#power-domain-cells = <1>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			pd_vio {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				reg = <RK3399_PD_VIO>;
+
+				pd_vo {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					reg = <RK3399_PD_VO>;
+
+					pd_vopb {
+						reg = <RK3399_PD_VOPB>;
+					};
+
+					pd_vopl {
+						reg = <RK3399_PD_VOPL>;
+					};
+				};
+			};
+		};
+
 Node of a device using power domains must have a power-domains property,
 containing a phandle to the power device node and an index specifying which
 power domain to use.
 The index should use macros in:
 	"include/dt-bindings/power/rk3288-power.h" - for rk3288 type power domain.
 	"include/dt-bindings/power/rk3368-power.h" - for rk3368 type power domain.
+	"include/dt-bindings/power/rk3399-power.h" - for rk3399 type power domain.
 
 Example of the node using power domain:
 
@@ -65,3 +106,9 @@
                 power-domains = <&power RK3368_PD_GPU_1>;
                 /* ... */
         };
+
+	node {
+		/* ... */
+		power-domains = <&power RK3399_PD_VOPB>;
+		/* ... */
+	};
diff --git a/Documentation/devicetree/bindings/sound/davinci-mcbsp.txt b/Documentation/devicetree/bindings/sound/davinci-mcbsp.txt
new file mode 100644
index 0000000..55b53e1
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/davinci-mcbsp.txt
@@ -0,0 +1,51 @@
+Texas Instruments DaVinci McBSP module
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This binding describes the "Multi-channel Buffered Serial Port" (McBSP)
+audio interface found in some TI DaVinci processors like the OMAP-L138 or AM180x.
+
+
+Required properties:
+~~~~~~~~~~~~~~~~~~~~
+- compatible :
+        "ti,da850-mcbsp" : for DA850, AM180x and OPAM-L138 platforms
+
+- reg : physical base address and length of the controller memory mapped
+        region(s).
+- reg-names : Should contain:
+        * "mpu" for the main registers (required).
+        * "dat" for the data FIFO (optional).
+
+- dmas: three element list of DMA controller phandles, DMA request line and
+	TC channel ordered triplets.
+- dma-names: identifier string for each DMA request line in the dmas property.
+	These strings correspond 1:1 with the ordered pairs in dmas. The dma
+	identifiers must be "rx" and "tx".
+
+Optional properties:
+~~~~~~~~~~~~~~~~~~~~
+- interrupts : Interrupt numbers for McBSP
+- interrupt-names : Known interrupt names are "rx" and "tx"
+
+- pinctrl-0: Should specify pin control group used for this controller.
+- pinctrl-names: Should contain only one value - "default", for more details
+        please refer to pinctrl-bindings.txt
+
+Example (AM1808):
+~~~~~~~~~~~~~~~~~
+
+mcbsp0: mcbsp@1d10000 {
+	compatible = "ti,da850-mcbsp";
+	pinctrl-names = "default";
+	pinctrl-0 = <&mcbsp0_pins>;
+
+	reg = 	<0x00110000 0x1000>,
+		<0x00310000 0x1000>;
+	reg-names = "mpu", "dat";
+	interrupts = <97 98>;
+	interrupts-names = "rx", "tx";
+	dmas = <&edma0 3 1
+		&edma0 2 1>;
+	dma-names = "tx", "rx";
+	status = "okay";
+};
diff --git a/Documentation/devicetree/bindings/sound/fsl-sai.txt b/Documentation/devicetree/bindings/sound/fsl-sai.txt
index 044e5d7..740b467 100644
--- a/Documentation/devicetree/bindings/sound/fsl-sai.txt
+++ b/Documentation/devicetree/bindings/sound/fsl-sai.txt
@@ -7,8 +7,8 @@
 
 Required properties:
 
-  - compatible		: Compatible list, contains "fsl,vf610-sai" or
-			  "fsl,imx6sx-sai".
+  - compatible		: Compatible list, contains "fsl,vf610-sai",
+			  "fsl,imx6sx-sai" or "fsl,imx6ul-sai"
 
   - reg			: Offset and length of the register set for the device.
 
@@ -48,6 +48,11 @@
 			  receive data by following their own bit clocks and
 			  frame sync clocks separately.
 
+Optional properties (for mx6ul):
+
+  - fsl,sai-mclk-direction-output: This is a boolean property. If present,
+			 indicates that SAI will output the SAI MCLK clock.
+
 Note:
 - If both fsl,sai-asynchronous and fsl,sai-synchronous-rx are absent, the
   default synchronous mode (sync Rx with Tx) will be used, which means both
diff --git a/Documentation/devicetree/bindings/sound/pcm5102a.txt b/Documentation/devicetree/bindings/sound/pcm5102a.txt
new file mode 100644
index 0000000..c63ab0b6
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/pcm5102a.txt
@@ -0,0 +1,13 @@
+PCM5102a audio CODECs
+
+These devices does not use I2C or SPI.
+
+Required properties:
+
+  - compatible : set as "ti,pcm5102a"
+
+Examples:
+
+	pcm5102a: pcm5102a {
+		compatible = "ti,pcm5102a";
+	};
diff --git a/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt b/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt
index fa77f87..1ad0fe3 100644
--- a/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt
+++ b/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt
@@ -1,7 +1,10 @@
 ARM Freescale DSPI controller
 
 Required properties:
-- compatible : "fsl,vf610-dspi", "fsl,ls1021a-v1.0-dspi", "fsl,ls2085a-dspi"
+- compatible : "fsl,vf610-dspi", "fsl,ls1021a-v1.0-dspi",
+		"fsl,ls2085a-dspi"
+		or
+		"fsl,ls2080a-dspi" followed by "fsl,ls2085a-dspi"
 - reg : Offset and length of the register set for the device
 - interrupts : Should contain SPI controller interrupt
 - clocks: from common clock binding: handle to dspi clock.
diff --git a/Documentation/devicetree/bindings/spi/ti_qspi.txt b/Documentation/devicetree/bindings/spi/ti_qspi.txt
index cc8304a..50b14f6 100644
--- a/Documentation/devicetree/bindings/spi/ti_qspi.txt
+++ b/Documentation/devicetree/bindings/spi/ti_qspi.txt
@@ -19,6 +19,13 @@
 - syscon-chipselects: Handle to system control region contains QSPI
 		      chipselect register and offset of that register.
 
+NOTE: TI QSPI controller requires different pinmux and IODelay
+paramaters for Mode-0 and Mode-3 operations, which needs to be set up by
+the bootloader (U-Boot). Default configuration only supports Mode-0
+operation. Hence, "spi-cpol" and "spi-cpha" DT properties cannot be
+specified in the slave nodes of TI QSPI controller without appropriate
+modification to bootloader.
+
 Example:
 
 For am4372:
diff --git a/Documentation/devicetree/bindings/timer/arm,mps2-timer.txt b/Documentation/devicetree/bindings/timer/arm,mps2-timer.txt
new file mode 100644
index 0000000..48f84d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/arm,mps2-timer.txt
@@ -0,0 +1,28 @@
+ARM MPS2 timer
+
+The MPS2 platform has simple general-purpose 32 bits timers.
+
+Required properties:
+- compatible	: Should be "arm,mps2-timer"
+- reg		: Address and length of the register set
+- interrupts	: Reference to the timer interrupt
+
+Required clocking property, have to be one of:
+- clocks	  : The input clock of the timer
+- clock-frequency : The rate in HZ in input of the ARM MPS2 timer
+
+Examples:
+
+timer1: mps2-timer@40000000 {
+	compatible = "arm,mps2-timer";
+	reg = <0x40000000 0x1000>;
+	interrupts = <8>;
+	clocks = <&sysclk>;
+};
+
+timer2: mps2-timer@40001000 {
+	compatible = "arm,mps2-timer";
+	reg = <0x40001000 0x1000>;
+	interrupts = <9>;
+	clock-frequency = <25000000>;
+};
diff --git a/Documentation/devicetree/bindings/timer/ezchip,nps400-timer.txt b/Documentation/devicetree/bindings/timer/ezchip,nps400-timer.txt
new file mode 100644
index 0000000..c8c03d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/ezchip,nps400-timer.txt
@@ -0,0 +1,15 @@
+NPS Network Processor
+
+Required properties:
+
+- compatible :	should be "ezchip,nps400-timer"
+
+Clocks required for compatible = "ezchip,nps400-timer":
+- clocks : Must contain a single entry describing the clock input
+
+Example:
+
+timer {
+	compatible = "ezchip,nps400-timer";
+	clocks = <&sysclk>;
+};
diff --git a/Documentation/devicetree/bindings/timer/snps,arc-timer.txt b/Documentation/devicetree/bindings/timer/snps,arc-timer.txt
new file mode 100644
index 0000000..4ef0246
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/snps,arc-timer.txt
@@ -0,0 +1,31 @@
+Synopsys ARC Local Timer with Interrupt Capabilities
+- Found on all ARC CPUs (ARC700/ARCHS)
+- Can be optionally programmed to interrupt on Limit
+- Two idential copies TIMER0 and TIMER1 exist in ARC cores and historically
+  TIMER0 used as clockevent provider (true for all ARC cores)
+  TIMER1 used for clocksource (mandatory for ARC700, optional for ARC HS)
+
+Required properties:
+
+- compatible : should be "snps,arc-timer"
+- interrupts : single Interrupt going into parent intc
+	       (16 for ARCHS cores, 3 for ARC700 cores)
+- clocks     : phandle to the source clock
+
+Optional properties:
+
+- interrupt-parent : phandle to parent intc
+
+Example:
+
+	timer0 {
+		compatible = "snps,arc-timer";
+		interrupts = <3>;
+		interrupt-parent = <&core_intc>;
+		clocks = <&core_clk>;
+	};
+
+	timer1 {
+		compatible = "snps,arc-timer";
+		clocks = <&core_clk>;
+	};
diff --git a/Documentation/devicetree/bindings/timer/snps,archs-gfrc.txt b/Documentation/devicetree/bindings/timer/snps,archs-gfrc.txt
new file mode 100644
index 0000000..b6cd1b3
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/snps,archs-gfrc.txt
@@ -0,0 +1,14 @@
+Synopsys ARC Free Running 64-bit Global Timer for ARC HS CPUs
+- clocksource provider for SMP SoC
+
+Required properties:
+
+- compatible : should be "snps,archs-gfrc"
+- clocks     : phandle to the source clock
+
+Example:
+
+	gfrc {
+		compatible = "snps,archs-gfrc";
+		clocks = <&core_clk>;
+	};
diff --git a/Documentation/devicetree/bindings/timer/snps,archs-rtc.txt b/Documentation/devicetree/bindings/timer/snps,archs-rtc.txt
new file mode 100644
index 0000000..47bd7a7
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/snps,archs-rtc.txt
@@ -0,0 +1,14 @@
+Synopsys ARC Free Running 64-bit Local Timer for ARC HS CPUs
+- clocksource provider for UP SoC
+
+Required properties:
+
+- compatible : should be "snps,archs-rtc"
+- clocks     : phandle to the source clock
+
+Example:
+
+	rtc {
+		compatible = "snps,arc-rtc";
+		clocks = <&core_clk>;
+	};
diff --git a/Documentation/devicetree/bindings/usb/nvidia,tegra124-xusb.txt b/Documentation/devicetree/bindings/usb/nvidia,tegra124-xusb.txt
new file mode 100644
index 0000000..d28295a
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/nvidia,tegra124-xusb.txt
@@ -0,0 +1,120 @@
+NVIDIA Tegra xHCI controller
+============================
+
+The Tegra xHCI controller supports both USB2 and USB3 interfaces exposed by
+the Tegra XUSB pad controller.
+
+Required properties:
+--------------------
+- compatible: Must be:
+  - Tegra124: "nvidia,tegra124-xusb"
+  - Tegra132: "nvidia,tegra132-xusb", "nvidia,tegra124-xusb"
+  - Tegra210: "nvidia,tegra210-xusb"
+- reg: Must contain the base and length of the xHCI host registers, XUSB FPCI
+  registers and XUSB IPFS registers.
+- reg-names: Must contain the following entries:
+  - "hcd"
+  - "fpci"
+  - "ipfs"
+- interrupts: Must contain the xHCI host interrupt and the mailbox interrupt.
+- clocks: Must contain an entry for each entry in clock-names.
+  See ../clock/clock-bindings.txt for details.
+- clock-names: Must include the following entries:
+   - xusb_host
+   - xusb_host_src
+   - xusb_falcon_src
+   - xusb_ss
+   - xusb_ss_src
+   - xusb_ss_div2
+   - xusb_hs_src
+   - xusb_fs_src
+   - pll_u_480m
+   - clk_m
+   - pll_e
+- resets: Must contain an entry for each entry in reset-names.
+  See ../reset/reset.txt for details.
+- reset-names: Must include the following entries:
+  - xusb_host
+  - xusb_ss
+  - xusb_src
+  Note that xusb_src is the shared reset for xusb_{ss,hs,fs,falcon,host}_src.
+- nvidia,xusb-padctl: phandle to the XUSB pad controller that is used to
+  configure the USB pads used by the XHCI controller
+
+For Tegra124 and Tegra132:
+- avddio-pex-supply: PCIe/USB3 analog logic power supply. Must supply 1.05 V.
+- dvddio-pex-supply: PCIe/USB3 digital logic power supply. Must supply 1.05 V.
+- avdd-usb-supply: USB controller power supply. Must supply 3.3 V.
+- avdd-pll-utmip-supply: UTMI PLL power supply. Must supply 1.8 V.
+- avdd-pll-erefe-supply: PLLE reference PLL power supply. Must supply 1.05 V.
+- avdd-usb-ss-pll-supply: PCIe/USB3 PLL power supply. Must supply 1.05 V.
+- hvdd-usb-ss-supply: High-voltage PCIe/USB3 power supply. Must supply 3.3 V.
+- hvdd-usb-ss-pll-e-supply: High-voltage PLLE power supply. Must supply 3.3 V.
+
+For Tegra210:
+- dvddio-pex-supply: PCIe/USB3 analog logic power supply. Must supply 1.05 V.
+- hvddio-pex-supply: High-voltage PCIe/USB3 power supply. Must supply 1.8 V.
+- avdd-usb-supply: USB controller power supply. Must supply 3.3 V.
+- avdd-pll-utmip-supply: UTMI PLL power supply. Must supply 1.8 V.
+- avdd-pll-uerefe-supply: PLLE reference PLL power supply. Must supply 1.05 V.
+- dvdd-pex-pll-supply: PCIe/USB3 PLL power supply. Must supply 1.05 V.
+- hvdd-pex-pll-e-supply: High-voltage PLLE power supply. Must supply 1.8 V.
+
+Optional properties:
+--------------------
+- phys: Must contain an entry for each entry in phy-names.
+  See ../phy/phy-bindings.txt for details.
+- phy-names: Should include an entry for each PHY used by the controller. The
+  following PHYs are available:
+  - Tegra124: usb2-0, usb2-1, usb2-2, hsic-0, hsic-1, usb3-0, usb3-1
+  - Tegra132: usb2-0, usb2-1, usb2-2, hsic-0, hsic-1, usb3-0, usb3-1
+  - Tegra210: usb2-0, usb2-1, usb2-2, usb2-3, hsic-0, usb3-0, usb3-1, usb3-2,
+              usb3-3
+
+Example:
+--------
+
+	usb@0,70090000 {
+		compatible = "nvidia,tegra124-xusb";
+		reg = <0x0 0x70090000 0x0 0x8000>,
+		      <0x0 0x70098000 0x0 0x1000>,
+		      <0x0 0x70099000 0x0 0x1000>;
+		reg-names = "hcd", "fpci", "ipfs";
+
+		interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+
+		clocks = <&tegra_car TEGRA124_CLK_XUSB_HOST>,
+			 <&tegra_car TEGRA124_CLK_XUSB_HOST_SRC>,
+			 <&tegra_car TEGRA124_CLK_XUSB_FALCON_SRC>,
+			 <&tegra_car TEGRA124_CLK_XUSB_SS>,
+			 <&tegra_car TEGRA124_CLK_XUSB_SS_DIV2>,
+			 <&tegra_car TEGRA124_CLK_XUSB_SS_SRC>,
+			 <&tegra_car TEGRA124_CLK_XUSB_HS_SRC>,
+			 <&tegra_car TEGRA124_CLK_XUSB_FS_SRC>,
+			 <&tegra_car TEGRA124_CLK_PLL_U_480M>,
+			 <&tegra_car TEGRA124_CLK_CLK_M>,
+			 <&tegra_car TEGRA124_CLK_PLL_E>;
+		clock-names = "xusb_host", "xusb_host_src", "xusb_falcon_src",
+			      "xusb_ss", "xusb_ss_div2", "xusb_ss_src",
+			      "xusb_hs_src", "xusb_fs_src", "pll_u_480m",
+			      "clk_m", "pll_e";
+		resets = <&tegra_car 89>, <&tegra_car 156>, <&tegra_car 143>;
+		reset-names = "xusb_host", "xusb_ss", "xusb_src";
+
+		nvidia,xusb-padctl = <&padctl>;
+
+		phys = <&{/padctl@0,7009f000/pads/usb2/usb2-1}>, /* mini-PCIe USB */
+		       <&{/padctl@0,7009f000/pads/usb2/usb2-2}>, /* USB A */
+		       <&{/padctl@0,7009f000/pads/pcie/pcie-0}>; /* USB A */
+		phy-names = "utmi-1", "utmi-2", "usb3-0";
+
+		avddio-pex-supply = <&vdd_1v05_run>;
+		dvddio-pex-supply = <&vdd_1v05_run>;
+		avdd-usb-supply = <&vdd_3v3_lp0>;
+		avdd-pll-utmip-supply = <&vddio_1v8>;
+		avdd-pll-erefe-supply = <&avdd_1v05_run>;
+		avdd-usb-ss-pll-supply = <&vdd_1v05_run>;
+		hvdd-usb-ss-supply = <&vdd_3v3_lp0>;
+		hvdd-usb-ss-pll-e-supply = <&vdd_3v3_lp0>;
+	};
diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt
index 6a17aa8..966885c 100644
--- a/Documentation/devicetree/bindings/usb/usb-xhci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt
@@ -4,6 +4,7 @@
   - compatible: should be one or more of
 
     - "generic-xhci" for generic XHCI device
+    - "marvell,armada3700-xhci" for Armada 37xx SoCs
     - "marvell,armada-375-xhci" for Armada 375 SoCs
     - "marvell,armada-380-xhci" for Armada 38x SoCs
     - "renesas,xhci-r8a7790" for r8a7790 SoC
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 86740d4..316412d 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -16,6 +16,7 @@
 allwinner	Allwinner Technology Co., Ltd.
 alphascale	AlphaScale Integrated Circuits Systems, Inc.
 altr	Altera Corp.
+amazon	Amazon.com, Inc.
 amcc	Applied Micro Circuits Corporation (APM, formally AMCC)
 amd	Advanced Micro Devices (AMD), Inc.
 amlogic	Amlogic, Inc.
@@ -29,6 +30,7 @@
 armadeus	ARMadeus Systems SARL
 artesyn	Artesyn Embedded Technologies Inc.
 asahi-kasei	Asahi Kasei Corp.
+aspeed	ASPEED Technology Inc.
 atlas	Atlas Scientific LLC
 atmel	Atmel Corporation
 auo	AU Optronics Corporation
@@ -70,6 +72,8 @@
 dlg	Dialog Semiconductor
 dlink	D-Link Corporation
 dmo	Data Modul AG
+dptechnics	DPTechnics
+dragino	Dragino Technology Co., Limited
 ea	Embedded Artists AB
 ebv	EBV Elektronik
 edt	Emerging Display Technologies
@@ -86,11 +90,13 @@
 everest	Everest Semiconductor Co. Ltd.
 everspin	Everspin Technologies, Inc.
 excito	Excito
+ezchip	EZchip Semiconductor
 fcs	Fairchild Semiconductor
 firefly	Firefly
 focaltech	FocalTech Systems Co.,Ltd
 fsl	Freescale Semiconductor
 ge	General Electric Company
+geekbuying	GeekBuying
 GEFanuc	GE Fanuc Intelligent Platforms Embedded Systems, Inc.
 gef	GE Fanuc Intelligent Platforms Embedded Systems, Inc.
 geniatech	Geniatech, Inc.
@@ -152,6 +158,7 @@
 mosaixtech	Mosaix Technologies, Inc.
 moxa	Moxa
 mpl	MPL AG
+mqmaker	mqmaker Inc.
 msi	Micro-Star International Co. Ltd.
 mti	Imagination Technologies Ltd. (formerly MIPS Technologies Inc.)
 mundoreader	Mundo Reader S.L.
@@ -171,12 +178,14 @@
 nxp	NXP Semiconductors
 okaya	Okaya Electric America, Inc.
 olimex	OLIMEX Ltd.
+onion	Onion Corporation
 onnn	ON Semiconductor Corp.
 opencores	OpenCores.org
 option	Option NV
 ortustech	Ortus Technology Co., Ltd.
 ovti	OmniVision Technologies
 ORCL	Oracle Corporation
+oxsemi	Oxford Semiconductor, Ltd.
 panasonic	Panasonic Corporation
 parade	Parade Technologies Inc.
 pericom	Pericom Technology Inc.
@@ -250,6 +259,7 @@
 tronfy	Tronfy
 tronsmart	Tronsmart
 truly	Truly Semiconductors Limited
+tyan	Tyan Computer Corporation
 upisemi	uPI Semiconductor Corp.
 urt	United Radiant Technology Corporation
 usi	Universal Scientific Industrial Co., Ltd.
@@ -259,6 +269,7 @@
 virtio	Virtual I/O Device Specification, developed by the OASIS consortium
 vivante	Vivante Corporation
 voipac	Voipac Technologies s.r.o.
+wd	Western Digital Corp.
 wexler	Wexler
 winbond Winbond Electronics corp.
 wlf	Wolfson Microelectronics
diff --git a/Documentation/devicetree/bindings/watchdog/microchip,pic32-dmt.txt b/Documentation/devicetree/bindings/watchdog/microchip,pic32-dmt.txt
new file mode 100644
index 0000000..852f694
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/microchip,pic32-dmt.txt
@@ -0,0 +1,19 @@
+* Microchip PIC32 Deadman Timer
+
+The deadman timer is used to reset the processor in the event of a software
+malfunction. It is a free-running instruction fetch timer, which is clocked
+whenever an instruction fetch occurs until a count match occurs.
+
+Required properties:
+- compatible: must be "microchip,pic32mzda-dmt".
+- reg: physical base address of the controller and length of memory mapped
+  region.
+- clocks: phandle of parent clock (should be &PBCLK7).
+
+Example:
+
+	watchdog@1f800a00 {
+		compatible = "microchip,pic32mzda-dmt";
+		reg = <0x1f800a00 0x80>;
+		clocks = <&PBCLK7>;
+	};
diff --git a/Documentation/devicetree/bindings/watchdog/microchip,pic32-wdt.txt b/Documentation/devicetree/bindings/watchdog/microchip,pic32-wdt.txt
new file mode 100644
index 0000000..d140103
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/microchip,pic32-wdt.txt
@@ -0,0 +1,18 @@
+* Microchip PIC32 Watchdog Timer
+
+When enabled, the watchdog peripheral can be used to reset the device if the
+WDT is not cleared periodically in software.
+
+Required properties:
+- compatible: must be "microchip,pic32mzda-wdt".
+- reg: physical base address of the controller and length of memory mapped
+  region.
+- clocks: phandle of source clk. should be <&LPRC> clk.
+
+Example:
+
+	watchdog@1f800800 {
+		compatible = "microchip,pic32mzda-wdt";
+		reg = <0x1f800800 0x200>;
+		clocks = <&LPRC>;
+	};
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index 25e8f3e..7281fb4 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -331,6 +331,8 @@
 PINCTRL
   devm_pinctrl_get()
   devm_pinctrl_put()
+  devm_pinctrl_register()
+  devm_pinctrl_unregister()
 
 PWM
   devm_pwm_get()
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 619af9b..75eea7c 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -194,7 +194,7 @@
 	void (*invalidatepage) (struct page *, unsigned int, unsigned int);
 	int (*releasepage) (struct page *, int);
 	void (*freepage)(struct page *);
-	int (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset);
+	int (*direct_IO)(struct kiocb *, struct iov_iter *iter);
 	int (*migratepage)(struct address_space *, struct page *, struct page *);
 	int (*launder_page)(struct page *);
 	int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);
diff --git a/Documentation/filesystems/cramfs.txt b/Documentation/filesystems/cramfs.txt
index 31f53f0..4006298 100644
--- a/Documentation/filesystems/cramfs.txt
+++ b/Documentation/filesystems/cramfs.txt
@@ -38,7 +38,7 @@
 which the timestamp reverts to 1970, i.e. moves backwards in time.
 
 Currently, cramfs must be written and read with architectures of the
-same endianness, and can be read only by kernels with PAGE_CACHE_SIZE
+same endianness, and can be read only by kernels with PAGE_SIZE
 == 4096.  At least the latter of these is a bug, but it hasn't been
 decided what the best fix is.  For the moment if you have larger pages
 you can just change the #define in mkcramfs.c, so long as you don't
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index f1b87d8..46f3bb7 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -525,3 +525,56 @@
 	set_delayed_call() where it used to set *cookie.
 	->put_link() is gone - just give the destructor to set_delayed_call()
 	in ->get_link().
+--
+[mandatory]
+	->getxattr() and xattr_handler.get() get dentry and inode passed separately.
+	dentry might be yet to be attached to inode, so do _not_ use its ->d_inode
+	in the instances.  Rationale: !@#!@# security_d_instantiate() needs to be
+	called before we attach dentry to inode.
+--
+[mandatory]
+	symlinks are no longer the only inodes that do *not* have i_bdev/i_cdev/
+	i_pipe/i_link union zeroed out at inode eviction.  As the result, you can't
+	assume that non-NULL value in ->i_nlink at ->destroy_inode() implies that
+	it's a symlink.  Checking ->i_mode is really needed now.  In-tree we had
+	to fix shmem_destroy_callback() that used to take that kind of shortcut;
+	watch out, since that shortcut is no longer valid.
+--
+[mandatory]
+	->i_mutex is replaced with ->i_rwsem now.  inode_lock() et.al. work as
+	they used to - they just take it exclusive.  However, ->lookup() may be
+	called with parent locked shared.  Its instances must not
+		* use d_instantiate) and d_rehash() separately - use d_add() or
+		  d_splice_alias() instead.
+		* use d_rehash() alone - call d_add(new_dentry, NULL) instead.
+		* in the unlikely case when (read-only) access to filesystem
+		  data structures needs exclusion for some reason, arrange it
+		  yourself.  None of the in-tree filesystems needed that.
+		* rely on ->d_parent and ->d_name not changing after dentry has
+		  been fed to d_add() or d_splice_alias().  Again, none of the
+		  in-tree instances relied upon that.
+	We are guaranteed that lookups of the same name in the same directory
+	will not happen in parallel ("same" in the sense of your ->d_compare()).
+	Lookups on different names in the same directory can and do happen in
+	parallel now.
+--
+[recommended]
+	->iterate_shared() is added; it's a parallel variant of ->iterate().
+	Exclusion on struct file level is still provided (as well as that
+	between it and lseek on the same struct file), but if your directory
+	has been opened several times, you can get these called in parallel.
+	Exclusion between that method and all directory-modifying ones is
+	still provided, of course.
+
+	Often enough ->iterate() can serve as ->iterate_shared() without any
+	changes - it is a read-only operation, after all.  If you have any
+	per-inode or per-dentry in-core data structures modified by ->iterate(),
+	you might need something to serialize the access to them.  If you
+	do dcache pre-seeding, you'll need to switch to d_alloc_parallel() for
+	that; look for in-tree examples.
+
+	Old method is only used if the new one is absent; eventually it will
+	be removed.  Switch while you still can; the old one won't stay.
+--
+[mandatory]
+	->atomic_open() calls without O_CREAT may happen in parallel.
diff --git a/Documentation/filesystems/tmpfs.txt b/Documentation/filesystems/tmpfs.txt
index d392e15..d9c11d2 100644
--- a/Documentation/filesystems/tmpfs.txt
+++ b/Documentation/filesystems/tmpfs.txt
@@ -60,7 +60,7 @@
            default is half of your physical RAM without swap. If you
            oversize your tmpfs instances the machine will deadlock
            since the OOM handler will not be able to free that memory.
-nr_blocks: The same as size, but in blocks of PAGE_CACHE_SIZE.
+nr_blocks: The same as size, but in blocks of PAGE_SIZE.
 nr_inodes: The maximum number of inodes for this instance. The default
            is half of the number of your physical RAM pages, or (on a
            machine with highmem) the number of lowmem RAM pages,
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index b02a7d5..c61a223 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -591,7 +591,7 @@
 	void (*invalidatepage) (struct page *, unsigned int, unsigned int);
 	int (*releasepage) (struct page *, int);
 	void (*freepage)(struct page *);
-	ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset);
+	ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
 	/* migrate the contents of a page to the specified target */
 	int (*migratepage) (struct page *, struct page *);
 	int (*launder_page) (struct page *);
@@ -708,9 +708,9 @@
 	from the address space.  This generally corresponds to either a
 	truncation, punch hole  or a complete invalidation of the address
 	space (in the latter case 'offset' will always be 0 and 'length'
-	will be PAGE_CACHE_SIZE). Any private data associated with the page
+	will be PAGE_SIZE). Any private data associated with the page
 	should be updated to reflect this truncation.  If offset is 0 and
-	length is PAGE_CACHE_SIZE, then the private data should be released,
+	length is PAGE_SIZE, then the private data should be released,
 	because the page must be able to be completely discarded.  This may
 	be done by calling the ->releasepage function, but in this case the
 	release MUST succeed.
diff --git a/Documentation/gpio/driver.txt b/Documentation/gpio/driver.txt
index bbeec41..6cb35a7 100644
--- a/Documentation/gpio/driver.txt
+++ b/Documentation/gpio/driver.txt
@@ -68,6 +68,103 @@
 on -RT (inside hard IRQ handlers and similar contexts). Normally this should
 not be required.
 
+
+GPIOs with open drain/source support
+------------------------------------
+
+Open drain (CMOS) or open collector (TTL) means the line is not actively driven
+high: instead you provide the drain/collector as output, so when the transistor
+is not open, it will present a high-impedance (tristate) to the external rail.
+
+
+   CMOS CONFIGURATION      TTL CONFIGURATION
+
+            ||--- out              +--- out
+     in ----||                   |/
+            ||--+         in ----|
+                |                |\
+               GND	           GND
+
+This configuration is normally used as a way to achieve one of two things:
+
+- Level-shifting: to reach a logical level higher than that of the silicon
+  where the output resides.
+
+- inverse wire-OR on an I/O line, for example a GPIO line, making it possible
+  for any driving stage on the line to drive it low even if any other output
+  to the same line is simultaneously driving it high. A special case of this
+  is driving the SCL and SCA lines of an I2C bus, which is by definition a
+  wire-OR bus.
+
+Both usecases require that the line be equipped with a pull-up resistor. This
+resistor will make the line tend to high level unless one of the transistors on
+the rail actively pulls it down.
+
+The level on the line will go as high as the VDD on the pull-up resistor, which
+may be higher than the level supported by the transistor, achieveing a
+level-shift to the higher VDD.
+
+Integrated electronics often have an output driver stage in the form of a CMOS
+"totem-pole" with one N-MOS and one P-MOS transistor where one of them drives
+the line high and one of them drives the line low. This is called a push-pull
+output. The "totem-pole" looks like so:
+
+                 VDD
+                  |
+        OD    ||--+
+     +--/ ---o||     P-MOS-FET
+     |        ||--+
+IN --+            +----- out
+     |        ||--+
+     +--/ ----||     N-MOS-FET
+        OS    ||--+
+                  |
+                 GND
+
+The desired output signal (e.g. coming directly from some GPIO output register)
+arrives at IN. The switches named "OD" and "OS" are normally closed, creating
+a push-pull circuit.
+
+Consider the little "switches" named "OD" and "OS" that enable/disable the
+P-MOS or N-MOS transistor right after the split of the input. As you can see,
+either transistor will go totally numb if this switch is open. The totem-pole
+is then halved and give high impedance instead of actively driving the line
+high or low respectively. That is usually how software-controlled open
+drain/source works.
+
+Some GPIO hardware come in open drain / open source configuration. Some are
+hard-wired lines that will only support open drain or open source no matter
+what: there is only one transistor there. Some are software-configurable:
+by flipping a bit in a register the output can be configured as open drain
+or open source, in practice by flicking open the switches labeled "OD" and "OS"
+in the drawing above.
+
+By disabling the P-MOS transistor, the output can be driven between GND and
+high impedance (open drain), and by disabling the N-MOS transistor, the output
+can be driven between VDD and high impedance (open source). In the first case,
+a pull-up resistor is needed on the outgoing rail to complete the circuit, and
+in the second case, a pull-down resistor is needed on the rail.
+
+Hardware that supports open drain or open source or both, can implement a
+special callback in the gpio_chip: .set_single_ended() that takes an enum flag
+telling whether to configure the line as open drain, open source or push-pull.
+This will happen in response to the GPIO_OPEN_DRAIN or GPIO_OPEN_SOURCE flag
+set in the machine file, or coming from other hardware descriptions.
+
+If this state can not be configured in hardware, i.e. if the GPIO hardware does
+not support open drain/open source in hardware, the GPIO library will instead
+use a trick: when a line is set as output, if the line is flagged as open
+drain, and the IN output value is low, it will be driven low as usual. But
+if the IN output value is set to high, it will instead *NOT* be driven high,
+instead it will be switched to input, as input mode is high impedance, thus
+achieveing an "open drain emulation" of sorts: electrically the behaviour will
+be identical, with the exception of possible hardware glitches when switching
+the mode of the line.
+
+For open source configuration the same principle is used, just that instead
+of actively driving the line low, it is set to input.
+
+
 GPIO drivers providing IRQs
 ---------------------------
 It is custom that GPIO drivers (GPIO chips) are also providing interrupts,
diff --git a/Documentation/hwmon/fam15h_power b/Documentation/hwmon/fam15h_power
index e2b1b69..fb594c2 100644
--- a/Documentation/hwmon/fam15h_power
+++ b/Documentation/hwmon/fam15h_power
@@ -10,14 +10,22 @@
   Datasheets:
   BIOS and Kernel Developer's Guide (BKDG) For AMD Family 15h Processors
   BIOS and Kernel Developer's Guide (BKDG) For AMD Family 16h Processors
+  AMD64 Architecture Programmer's Manual Volume 2: System Programming
 
 Author: Andreas Herrmann <herrmann.der.user@googlemail.com>
 
 Description
 -----------
 
+1) Processor TDP (Thermal design power)
+
+Given a fixed frequency and voltage, the power consumption of a
+processor varies based on the workload being executed. Derated power
+is the power consumed when running a specific application. Thermal
+design power (TDP) is an example of derated power.
+
 This driver permits reading of registers providing power information
-of AMD Family 15h and 16h processors.
+of AMD Family 15h and 16h processors via TDP algorithm.
 
 For AMD Family 15h and 16h processors the following power values can
 be calculated using different processor northbridge function
@@ -37,3 +45,58 @@
 On multi-node processors the calculated value is for the entire
 package and not for a single node. Thus the driver creates sysfs
 attributes only for internal node0 of a multi-node processor.
+
+2) Accumulated Power Mechanism
+
+This driver also introduces an algorithm that should be used to
+calculate the average power consumed by a processor during a
+measurement interval Tm. The feature of accumulated power mechanism is
+indicated by CPUID Fn8000_0007_EDX[12].
+
+* Tsample: compute unit power accumulator sample period
+* Tref: the PTSC counter period
+* PTSC: performance timestamp counter
+* N: the ratio of compute unit power accumulator sample period to the
+  PTSC period
+* Jmax: max compute unit accumulated power which is indicated by
+  MaxCpuSwPwrAcc MSR C001007b
+* Jx/Jy: compute unit accumulated power which is indicated by
+  CpuSwPwrAcc MSR C001007a
+* Tx/Ty: the value of performance timestamp counter which is indicated
+  by CU_PTSC MSR C0010280
+* PwrCPUave: CPU average power
+
+i. Determine the ratio of Tsample to Tref by executing CPUID Fn8000_0007.
+	N = value of CPUID Fn8000_0007_ECX[CpuPwrSampleTimeRatio[15:0]].
+
+ii. Read the full range of the cumulative energy value from the new
+MSR MaxCpuSwPwrAcc.
+	Jmax = value returned.
+iii. At time x, SW reads CpuSwPwrAcc MSR and samples the PTSC.
+	Jx = value read from CpuSwPwrAcc and Tx = value read from
+PTSC.
+
+iv. At time y, SW reads CpuSwPwrAcc MSR and samples the PTSC.
+	Jy = value read from CpuSwPwrAcc and Ty = value read from
+PTSC.
+
+v. Calculate the average power consumption for a compute unit over
+time period (y-x). Unit of result is uWatt.
+	if (Jy < Jx) // Rollover has occurred
+		Jdelta = (Jy + Jmax) - Jx
+	else
+		Jdelta = Jy - Jx
+	PwrCPUave = N * Jdelta * 1000 / (Ty - Tx)
+
+This driver provides PwrCPUave and interval(default is 10 millisecond
+and maximum is 1 second):
+* power1_average (PwrCPUave)
+* power1_average_interval (Interval)
+
+The power1_average_interval can be updated at /etc/sensors3.conf file
+as below:
+
+chip "fam15h_power-*"
+	set power1_average_interval 0.01
+
+Then save it with "sensors -s".
diff --git a/Documentation/hwmon/it87 b/Documentation/hwmon/it87
index 733296d..fff6f6b 100644
--- a/Documentation/hwmon/it87
+++ b/Documentation/hwmon/it87
@@ -9,6 +9,9 @@
   * IT8620E
     Prefix: 'it8620'
     Addresses scanned: from Super I/O config space (8 I/O ports)
+  * IT8628E
+    Prefix: 'it8628'
+    Addresses scanned: from Super I/O config space (8 I/O ports)
     Datasheet: Not publicly available
   * IT8705F
     Prefix: 'it87'
@@ -114,8 +117,8 @@
 Description
 -----------
 
-This driver implements support for the IT8603E, IT8620E, IT8623E, IT8705F,
-IT8712F, IT8716F, IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8732F,
+This driver implements support for the IT8603E, IT8620E, IT8623E, IT8628E,
+IT8705F, IT8712F, IT8716F, IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8732F,
 IT8758E, IT8771E, IT8772E, IT8781F, IT8782F, IT8783E/F, IT8786E, IT8790E, and
 SiS950 chips.
 
@@ -158,8 +161,8 @@
 IT8728F. It only supports 3 fans, 16-bit fan mode, and the full speed mode
 of the fan is not supported (value 0 of pwmX_enable).
 
-The IT8620E is another custom design, hardware monitoring part is similar to
-IT8728F. It only supports 16-bit fan mode.
+The IT8620E and IT8628E are custom designs, hardware monitoring part is similar
+to IT8728F. It only supports 16-bit fan mode. Both chips support up to 6 fans.
 
 The IT8790E supports up to 3 fans. 16-bit fan mode is always enabled.
 
@@ -187,8 +190,8 @@
 2.8 volts with a resolution of 0.0109 volt.  The battery voltage in8 does not
 have limit registers.
 
-On the IT8603E, IT8721F/IT8758E, IT8732F, IT8781F, IT8782F, and IT8783E/F, some
-voltage inputs are internal and scaled inside the chip:
+On the IT8603E, IT8620E, IT8628E, IT8721F/IT8758E, IT8732F, IT8781F, IT8782F,
+and IT8783E/F, some voltage inputs are internal and scaled inside the chip:
 * in3 (optional)
 * in7 (optional for IT8781F, IT8782F, and IT8783E/F)
 * in8 (always)
diff --git a/Documentation/hwmon/max31722 b/Documentation/hwmon/max31722
new file mode 100644
index 0000000..090da845
--- /dev/null
+++ b/Documentation/hwmon/max31722
@@ -0,0 +1,34 @@
+Kernel driver max31722
+======================
+
+Supported chips:
+  * Maxim Integrated MAX31722
+    Prefix: 'max31722'
+    ACPI ID: MAX31722
+    Addresses scanned: -
+    Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX31722-MAX31723.pdf
+  * Maxim Integrated MAX31723
+    Prefix: 'max31723'
+    ACPI ID: MAX31723
+    Addresses scanned: -
+    Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX31722-MAX31723.pdf
+
+Author: Tiberiu Breana <tiberiu.a.breana@intel.com>
+
+Description
+-----------
+
+This driver adds support for the Maxim Integrated MAX31722/MAX31723 thermometers
+and thermostats running over an SPI interface.
+
+Usage Notes
+-----------
+
+This driver uses ACPI to auto-detect devices. See ACPI IDs in the above section.
+
+Sysfs entries
+-------------
+
+The following attribute is supported:
+
+temp1_input		Measured temperature. Read-only.
diff --git a/Documentation/i2c/i2c-topology b/Documentation/i2c/i2c-topology
new file mode 100644
index 0000000..e0aefee
--- /dev/null
+++ b/Documentation/i2c/i2c-topology
@@ -0,0 +1,370 @@
+I2C topology
+============
+
+There are a couple of reasons for building more complex i2c topologies
+than a straight-forward i2c bus with one adapter and one or more devices.
+
+1. A mux may be needed on the bus to prevent address collisions.
+
+2. The bus may be accessible from some external bus master, and arbitration
+   may be needed to determine if it is ok to access the bus.
+
+3. A device (particularly RF tuners) may want to avoid the digital noise
+   from the i2c bus, at least most of the time, and sits behind a gate
+   that has to be operated before the device can be accessed.
+
+Etc
+
+These constructs are represented as i2c adapter trees by Linux, where
+each adapter has a parent adapter (except the root adapter) and zero or
+more child adapters. The root adapter is the actual adapter that issues
+i2c transfers, and all adapters with a parent are part of an "i2c-mux"
+object (quoted, since it can also be an arbitrator or a gate).
+
+Depending of the particular mux driver, something happens when there is
+an i2c transfer on one of its child adapters. The mux driver can
+obviously operate a mux, but it can also do arbitration with an external
+bus master or open a gate. The mux driver has two operations for this,
+select and deselect. select is called before the transfer and (the
+optional) deselect is called after the transfer.
+
+
+Locking
+=======
+
+There are two variants of locking available to i2c muxes, they can be
+mux-locked or parent-locked muxes. As is evident from below, it can be
+useful to know if a mux is mux-locked or if it is parent-locked. The
+following list was correct at the time of writing:
+
+In drivers/i2c/muxes/
+i2c-arb-gpio-challenge    Parent-locked
+i2c-mux-gpio              Normally parent-locked, mux-locked iff
+                          all involved gpio pins are controlled by the
+                          same i2c root adapter that they mux.
+i2c-mux-pca9541           Parent-locked
+i2c-mux-pca954x           Parent-locked
+i2c-mux-pinctrl           Normally parent-locked, mux-locked iff
+                          all involved pinctrl devices are controlled
+                          by the same i2c root adapter that they mux.
+i2c-mux-reg               Parent-locked
+
+In drivers/iio/
+imu/inv_mpu6050/          Mux-locked
+
+In drivers/media/
+dvb-frontends/m88ds3103   Parent-locked
+dvb-frontends/rtl2830     Parent-locked
+dvb-frontends/rtl2832     Mux-locked
+dvb-frontends/si2168      Mux-locked
+usb/cx231xx/              Parent-locked
+
+
+Mux-locked muxes
+----------------
+
+Mux-locked muxes does not lock the entire parent adapter during the
+full select-transfer-deselect transaction, only the muxes on the parent
+adapter are locked. Mux-locked muxes are mostly interesting if the
+select and/or deselect operations must use i2c transfers to complete
+their tasks. Since the parent adapter is not fully locked during the
+full transaction, unrelated i2c transfers may interleave the different
+stages of the transaction. This has the benefit that the mux driver
+may be easier and cleaner to implement, but it has some caveats.
+
+ML1. If you build a topology with a mux-locked mux being the parent
+     of a parent-locked mux, this might break the expectation from the
+     parent-locked mux that the root adapter is locked during the
+     transaction.
+
+ML2. It is not safe to build arbitrary topologies with two (or more)
+     mux-locked muxes that are not siblings, when there are address
+     collisions between the devices on the child adapters of these
+     non-sibling muxes.
+
+     I.e. the select-transfer-deselect transaction targeting e.g. device
+     address 0x42 behind mux-one may be interleaved with a similar
+     operation targeting device address 0x42 behind mux-two. The
+     intension with such a topology would in this hypothetical example
+     be that mux-one and mux-two should not be selected simultaneously,
+     but mux-locked muxes do not guarantee that in all topologies.
+
+ML3. A mux-locked mux cannot be used by a driver for auto-closing
+     gates/muxes, i.e. something that closes automatically after a given
+     number (one, in most cases) of i2c transfers. Unrelated i2c transfers
+     may creep in and close prematurely.
+
+ML4. If any non-i2c operation in the mux driver changes the i2c mux state,
+     the driver has to lock the root adapter during that operation.
+     Otherwise garbage may appear on the bus as seen from devices
+     behind the mux, when an unrelated i2c transfer is in flight during
+     the non-i2c mux-changing operation.
+
+
+Mux-locked Example
+------------------
+
+                   .----------.     .--------.
+    .--------.     |   mux-   |-----| dev D1 |
+    |  root  |--+--|  locked  |     '--------'
+    '--------'  |  |  mux M1  |--.  .--------.
+                |  '----------'  '--| dev D2 |
+                |  .--------.       '--------'
+                '--| dev D3 |
+                   '--------'
+
+When there is an access to D1, this happens:
+
+ 1. Someone issues an i2c-transfer to D1.
+ 2. M1 locks muxes on its parent (the root adapter in this case).
+ 3. M1 calls ->select to ready the mux.
+ 4. M1 (presumably) does some i2c-transfers as part of its select.
+    These transfers are normal i2c-transfers that locks the parent
+    adapter.
+ 5. M1 feeds the i2c-transfer from step 1 to its parent adapter as a
+    normal i2c-transfer that locks the parent adapter.
+ 6. M1 calls ->deselect, if it has one.
+ 7. Same rules as in step 4, but for ->deselect.
+ 8. M1 unlocks muxes on its parent.
+
+This means that accesses to D2 are lockout out for the full duration
+of the entire operation. But accesses to D3 are possibly interleaved
+at any point.
+
+
+Parent-locked muxes
+-------------------
+
+Parent-locked muxes lock the parent adapter during the full select-
+transfer-deselect transaction. The implication is that the mux driver
+has to ensure that any and all i2c transfers through that parent
+adapter during the transaction are unlocked i2c transfers (using e.g.
+__i2c_transfer), or a deadlock will follow. There are a couple of
+caveats.
+
+PL1. If you build a topology with a parent-locked mux being the child
+     of another mux, this might break a possible assumption from the
+     child mux that the root adapter is unused between its select op
+     and the actual transfer (e.g. if the child mux is auto-closing
+     and the parent mux issus i2c-transfers as part of its select).
+     This is especially the case if the parent mux is mux-locked, but
+     it may also happen if the parent mux is parent-locked.
+
+PL2. If select/deselect calls out to other subsystems such as gpio,
+     pinctrl, regmap or iio, it is essential that any i2c transfers
+     caused by these subsystems are unlocked. This can be convoluted to
+     accomplish, maybe even impossible if an acceptably clean solution
+     is sought.
+
+
+Parent-locked Example
+---------------------
+
+                   .----------.     .--------.
+    .--------.     |  parent- |-----| dev D1 |
+    |  root  |--+--|  locked  |     '--------'
+    '--------'  |  |  mux M1  |--.  .--------.
+                |  '----------'  '--| dev D2 |
+                |  .--------.       '--------'
+                '--| dev D3 |
+                   '--------'
+
+When there is an access to D1, this happens:
+
+ 1. Someone issues an i2c-transfer to D1.
+ 2. M1 locks muxes on its parent (the root adapter in this case).
+ 3. M1 locks its parent adapter.
+ 4. M1 calls ->select to ready the mux.
+ 5. If M1 does any i2c-transfers (on this root adapter) as part of
+    its select, those transfers must be unlocked i2c-transfers so
+    that they do not deadlock the root adapter.
+ 6. M1 feeds the i2c-transfer from step 1 to the root adapter as an
+    unlocked i2c-transfer, so that it does not deadlock the parent
+    adapter.
+ 7. M1 calls ->deselect, if it has one.
+ 8. Same rules as in step 5, but for ->deselect.
+ 9. M1 unlocks its parent adapter.
+10. M1 unlocks muxes on its parent.
+
+
+This means that accesses to both D2 and D3 are locked out for the full
+duration of the entire operation.
+
+
+Complex Examples
+================
+
+Parent-locked mux as parent of parent-locked mux
+------------------------------------------------
+
+This is a useful topology, but it can be bad.
+
+                   .----------.     .----------.     .--------.
+    .--------.     |  parent- |-----|  parent- |-----| dev D1 |
+    |  root  |--+--|  locked  |     |  locked  |     '--------'
+    '--------'  |  |  mux M1  |--.  |  mux M2  |--.  .--------.
+                |  '----------'  |  '----------'  '--| dev D2 |
+                |  .--------.    |  .--------.       '--------'
+                '--| dev D4 |    '--| dev D3 |
+                   '--------'       '--------'
+
+When any device is accessed, all other devices are locked out for
+the full duration of the operation (both muxes lock their parent,
+and specifically when M2 requests its parent to lock, M1 passes
+the buck to the root adapter).
+
+This topology is bad if M2 is an auto-closing mux and M1->select
+issues any unlocked i2c transfers on the root adapter that may leak
+through and be seen by the M2 adapter, thus closing M2 prematurely.
+
+
+Mux-locked mux as parent of mux-locked mux
+------------------------------------------
+
+This is a good topology.
+
+                   .----------.     .----------.     .--------.
+    .--------.     |   mux-   |-----|   mux-   |-----| dev D1 |
+    |  root  |--+--|  locked  |     |  locked  |     '--------'
+    '--------'  |  |  mux M1  |--.  |  mux M2  |--.  .--------.
+                |  '----------'  |  '----------'  '--| dev D2 |
+                |  .--------.    |  .--------.       '--------'
+                '--| dev D4 |    '--| dev D3 |
+                   '--------'       '--------'
+
+When device D1 is accessed, accesses to D2 are locked out for the
+full duration of the operation (muxes on the top child adapter of M1
+are locked). But accesses to D3 and D4 are possibly interleaved at
+any point. Accesses to D3 locks out D1 and D2, but accesses to D4
+are still possibly interleaved.
+
+
+Mux-locked mux as parent of parent-locked mux
+---------------------------------------------
+
+This is probably a bad topology.
+
+                   .----------.     .----------.     .--------.
+    .--------.     |   mux-   |-----|  parent- |-----| dev D1 |
+    |  root  |--+--|  locked  |     |  locked  |     '--------'
+    '--------'  |  |  mux M1  |--.  |  mux M2  |--.  .--------.
+                |  '----------'  |  '----------'  '--| dev D2 |
+                |  .--------.    |  .--------.       '--------'
+                '--| dev D4 |    '--| dev D3 |
+                   '--------'       '--------'
+
+When device D1 is accessed, accesses to D2 and D3 are locked out
+for the full duration of the operation (M1 locks child muxes on the
+root adapter). But accesses to D4 are possibly interleaved at any
+point.
+
+This kind of topology is generally not suitable and should probably
+be avoided. The reason is that M2 probably assumes that there will
+be no i2c transfers during its calls to ->select and ->deselect, and
+if there are, any such transfers might appear on the slave side of M2
+as partial i2c transfers, i.e. garbage or worse. This might cause
+device lockups and/or other problems.
+
+The topology is especially troublesome if M2 is an auto-closing
+mux. In that case, any interleaved accesses to D4 might close M2
+prematurely, as might any i2c-transfers part of M1->select.
+
+But if M2 is not making the above stated assumption, and if M2 is not
+auto-closing, the topology is fine.
+
+
+Parent-locked mux as parent of mux-locked mux
+---------------------------------------------
+
+This is a good topology.
+
+                   .----------.     .----------.     .--------.
+    .--------.     |  parent- |-----|   mux-   |-----| dev D1 |
+    |  root  |--+--|  locked  |     |  locked  |     '--------'
+    '--------'  |  |  mux M1  |--.  |  mux M2  |--.  .--------.
+                |  '----------'  |  '----------'  '--| dev D2 |
+                |  .--------.    |  .--------.       '--------'
+                '--| dev D4 |    '--| dev D3 |
+                   '--------'       '--------'
+
+When D1 is accessed, accesses to D2 are locked out for the full
+duration of the operation (muxes on the top child adapter of M1
+are locked). Accesses to D3 and D4 are possibly interleaved at
+any point, just as is expected for mux-locked muxes.
+
+When D3 or D4 are accessed, everything else is locked out. For D3
+accesses, M1 locks the root adapter. For D4 accesses, the root
+adapter is locked directly.
+
+
+Two mux-locked sibling muxes
+----------------------------
+
+This is a good topology.
+
+                                    .--------.
+                   .----------.  .--| dev D1 |
+                   |   mux-   |--'  '--------'
+                .--|  locked  |     .--------.
+                |  |  mux M1  |-----| dev D2 |
+                |  '----------'     '--------'
+                |  .----------.     .--------.
+    .--------.  |  |   mux-   |-----| dev D3 |
+    |  root  |--+--|  locked  |     '--------'
+    '--------'  |  |  mux M2  |--.  .--------.
+                |  '----------'  '--| dev D4 |
+                |  .--------.       '--------'
+                '--| dev D5 |
+                   '--------'
+
+When D1 is accessed, accesses to D2, D3 and D4 are locked out. But
+accesses to D5 may be interleaved at any time.
+
+
+Two parent-locked sibling muxes
+-------------------------------
+
+This is a good topology.
+
+                                   .--------.
+                   .----------.  .--| dev D1 |
+                   |  parent- |--'  '--------'
+                .--|  locked  |     .--------.
+                |  |  mux M1  |-----| dev D2 |
+                |  '----------'     '--------'
+                |  .----------.     .--------.
+    .--------.  |  |  parent- |-----| dev D3 |
+    |  root  |--+--|  locked  |     '--------'
+    '--------'  |  |  mux M2  |--.  .--------.
+                |  '----------'  '--| dev D4 |
+                |  .--------.       '--------'
+                '--| dev D5 |
+                   '--------'
+
+When any device is accessed, accesses to all other devices are locked
+out.
+
+
+Mux-locked and parent-locked sibling muxes
+------------------------------------------
+
+This is a good topology.
+
+                                   .--------.
+                   .----------.  .--| dev D1 |
+                   |   mux-   |--'  '--------'
+                .--|  locked  |     .--------.
+                |  |  mux M1  |-----| dev D2 |
+                |  '----------'     '--------'
+                |  .----------.     .--------.
+    .--------.  |  |  parent- |-----| dev D3 |
+    |  root  |--+--|  locked  |     '--------'
+    '--------'  |  |  mux M2  |--.  .--------.
+                |  '----------'  '--| dev D4 |
+                |  .--------.       '--------'
+                '--| dev D5 |
+                   '--------'
+
+When D1 or D2 are accessed, accesses to D3 and D4 are locked out while
+accesses to D5 may interleave. When D3 or D4 are accessed, accesses to
+all other devices are locked out.
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt
index 3f0f5ce..36ea940 100644
--- a/Documentation/input/event-codes.txt
+++ b/Documentation/input/event-codes.txt
@@ -173,6 +173,10 @@
     proximity of the device and while the value of the BTN_TOUCH code is 0. If
     the input device may be used freely in three dimensions, consider ABS_Z
     instead.
+  - BTN_TOOL_<name> should be set to 1 when the tool comes into detectable
+    proximity and set to 0 when the tool leaves detectable proximity.
+    BTN_TOOL_<name> signals the type of tool that is currently detected by the
+    hardware and is otherwise independent of ABS_DISTANCE and/or BTN_TOUCH.
 
 * ABS_MT_<name>:
   - Used to describe multitouch input events. Please see
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index ecc74fa..f5c3590 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -131,6 +131,7 @@
 			More X86-64 boot options can be found in
 			Documentation/x86/x86_64/boot-options.txt .
 	X86	Either 32-bit or 64-bit x86 (same as X86-32+X86-64)
+	X86_UV	SGI UV support is enabled.
 	XEN	Xen support is enabled
 
 In addition, the following text indicates that the option:
@@ -167,16 +168,18 @@
 
 	acpi=		[HW,ACPI,X86,ARM64]
 			Advanced Configuration and Power Interface
-			Format: { force | off | strict | noirq | rsdt |
+			Format: { force | on | off | strict | noirq | rsdt |
 				  copy_dsdt }
 			force -- enable ACPI if default was off
+			on -- enable ACPI but allow fallback to DT [arm64]
 			off -- disable ACPI if default was on
 			noirq -- do not use ACPI for IRQ routing
 			strict -- Be less tolerant of platforms that are not
 				strictly ACPI specification compliant.
 			rsdt -- prefer RSDT over (default) XSDT
 			copy_dsdt -- copy DSDT to memory
-			For ARM64, ONLY "acpi=off" or "acpi=force" are available
+			For ARM64, ONLY "acpi=off", "acpi=on" or "acpi=force"
+			are available
 
 			See also Documentation/power/runtime_pm.txt, pci=noacpi
 
@@ -312,6 +315,8 @@
 			acpi_osi=!*		# remove all strings
 			acpi_osi=!		# disable all built-in OS vendor
 						  strings
+			acpi_osi=!!		# enable all built-in OS vendor
+						  strings
 			acpi_osi=		# disable all strings
 
 			'acpi_osi=!' can be used in combination with single or
@@ -542,6 +547,13 @@
 			Format: <int> (must be >=0)
 			Default: 64
 
+	bau=		[X86_UV] Enable the BAU on SGI UV.  The default
+			behavior is to disable the BAU (i.e. bau=0).
+			Format: { "0" | "1" }
+			0 - Disable the BAU.
+			1 - Enable the BAU.
+			unset - Disable the BAU.
+
 	baycom_epp=	[HW,AX25]
 			Format: <io>,<mode>
 
@@ -826,6 +838,9 @@
 			It will be ignored when crashkernel=X,high is not used
 			or memory reserved is below 4G.
 
+	cryptomgr.notests
+                        [KNL] Disable crypto self-tests
+
 	cs89x0_dma=	[HW,NET]
 			Format: <dma>
 
@@ -1661,6 +1676,11 @@
 		hwp_only
 			Only load intel_pstate on systems which support
 			hardware P state control (HWP) if available.
+		support_acpi_ppc
+			Enforce ACPI _PPC performance limits. If the Fixed ACPI
+			Description Table, specifies preferred power management
+			profile as "Enterprise Server" or "Performance Server",
+			then this feature is turned on by default.
 
 	intremap=	[X86-64, Intel-IOMMU]
 			on	enable Interrupt Remapping (default)
@@ -1767,6 +1787,13 @@
 			PCI device 00:14.0 write the parameter as:
 				ivrs_hpet[0]=00:14.0
 
+	ivrs_acpihid	[HW,X86_64]
+			Provide an override to the ACPI-HID:UID<->DEVICE-ID
+			mapping provided in the IVRS ACPI table. For
+			example, to map UART-HID:UID AMD0020:0 to
+			PCI device 00:14.5 write the parameter as:
+				ivrs_acpihid[00:14.5]=AMD0020:0
+
 	js=		[HW,JOY] Analog joystick
 			See Documentation/input/joystick.txt.
 
@@ -2538,6 +2565,9 @@
 
 	nohugeiomap	[KNL,x86] Disable kernel huge I/O mappings.
 
+	nosmt		[KNL,S390] Disable symmetric multithreading (SMT).
+			Equivalent to smt=1.
+
 	noxsave		[BUGS=X86] Disables x86 extended register state save
 			and restore using xsave. The kernel will fallback to
 			enabling legacy floating-point and sse state.
@@ -3284,6 +3314,44 @@
 			Lazy RCU callbacks are those which RCU can
 			prove do nothing more than free memory.
 
+	rcuperf.gp_exp= [KNL]
+			Measure performance of expedited synchronous
+			grace-period primitives.
+
+	rcuperf.holdoff= [KNL]
+			Set test-start holdoff period.  The purpose of
+			this parameter is to delay the start of the
+			test until boot completes in order to avoid
+			interference.
+
+	rcuperf.nreaders= [KNL]
+			Set number of RCU readers.  The value -1 selects
+			N, where N is the number of CPUs.  A value
+			"n" less than -1 selects N-n+1, where N is again
+			the number of CPUs.  For example, -2 selects N
+			(the number of CPUs), -3 selects N+1, and so on.
+			A value of "n" less than or equal to -N selects
+			a single reader.
+
+	rcuperf.nwriters= [KNL]
+			Set number of RCU writers.  The values operate
+			the same as for rcuperf.nreaders.
+			N, where N is the number of CPUs
+
+	rcuperf.perf_runnable= [BOOT]
+			Start rcuperf running at boot time.
+
+	rcuperf.shutdown= [KNL]
+			Shut the system down after performance tests
+			complete.  This is useful for hands-off automated
+			testing.
+
+	rcuperf.perf_type= [KNL]
+			Specify the RCU implementation to test.
+
+	rcuperf.verbose= [KNL]
+			Enable additional printk() statements.
+
 	rcutorture.cbflood_inter_holdoff= [KNL]
 			Set holdoff time (jiffies) between successive
 			callback-flood tests.
@@ -3695,6 +3763,13 @@
 				1: Fast pin select (default)
 				2: ATC IRMode
 
+	smt		[KNL,S390] Set the maximum number of threads (logical
+			CPUs) to use per physical CPU on systems capable of
+			symmetric multithreading (SMT). Will be capped to the
+			actual hardware limit.
+			Format: <integer>
+			Default: -1 (no limit)
+
 	softlockup_panic=
 			[KNL] Should the soft-lockup detector generate panics.
 			Format: <integer>
@@ -4077,6 +4152,8 @@
 					sector if the number is odd);
 				i = IGNORE_DEVICE (don't bind to this
 					device);
+				j = NO_REPORT_LUNS (don't use report luns
+					command, uas only);
 				l = NOT_LOCKABLE (don't try to lock and
 					unlock ejectable media);
 				m = MAX_SECTORS_64 (don't transfer more
diff --git a/Documentation/livepatch/livepatch.txt b/Documentation/livepatch/livepatch.txt
new file mode 100644
index 0000000..6c43f6e
--- /dev/null
+++ b/Documentation/livepatch/livepatch.txt
@@ -0,0 +1,394 @@
+=========
+Livepatch
+=========
+
+This document outlines basic information about kernel livepatching.
+
+Table of Contents:
+
+1. Motivation
+2. Kprobes, Ftrace, Livepatching
+3. Consistency model
+4. Livepatch module
+   4.1. New functions
+   4.2. Metadata
+   4.3. Livepatch module handling
+5. Livepatch life-cycle
+   5.1. Registration
+   5.2. Enabling
+   5.3. Disabling
+   5.4. Unregistration
+6. Sysfs
+7. Limitations
+
+
+1. Motivation
+=============
+
+There are many situations where users are reluctant to reboot a system. It may
+be because their system is performing complex scientific computations or under
+heavy load during peak usage. In addition to keeping systems up and running,
+users want to also have a stable and secure system. Livepatching gives users
+both by allowing for function calls to be redirected; thus, fixing critical
+functions without a system reboot.
+
+
+2. Kprobes, Ftrace, Livepatching
+================================
+
+There are multiple mechanisms in the Linux kernel that are directly related
+to redirection of code execution; namely: kernel probes, function tracing,
+and livepatching:
+
+  + The kernel probes are the most generic. The code can be redirected by
+    putting a breakpoint instruction instead of any instruction.
+
+  + The function tracer calls the code from a predefined location that is
+    close to the function entry point. This location is generated by the
+    compiler using the '-pg' gcc option.
+
+  + Livepatching typically needs to redirect the code at the very beginning
+    of the function entry before the function parameters or the stack
+    are in any way modified.
+
+All three approaches need to modify the existing code at runtime. Therefore
+they need to be aware of each other and not step over each other's toes.
+Most of these problems are solved by using the dynamic ftrace framework as
+a base. A Kprobe is registered as a ftrace handler when the function entry
+is probed, see CONFIG_KPROBES_ON_FTRACE. Also an alternative function from
+a live patch is called with the help of a custom ftrace handler. But there are
+some limitations, see below.
+
+
+3. Consistency model
+====================
+
+Functions are there for a reason. They take some input parameters, get or
+release locks, read, process, and even write some data in a defined way,
+have return values. In other words, each function has a defined semantic.
+
+Many fixes do not change the semantic of the modified functions. For
+example, they add a NULL pointer or a boundary check, fix a race by adding
+a missing memory barrier, or add some locking around a critical section.
+Most of these changes are self contained and the function presents itself
+the same way to the rest of the system. In this case, the functions might
+be updated independently one by one.
+
+But there are more complex fixes. For example, a patch might change
+ordering of locking in multiple functions at the same time. Or a patch
+might exchange meaning of some temporary structures and update
+all the relevant functions. In this case, the affected unit
+(thread, whole kernel) need to start using all new versions of
+the functions at the same time. Also the switch must happen only
+when it is safe to do so, e.g. when the affected locks are released
+or no data are stored in the modified structures at the moment.
+
+The theory about how to apply functions a safe way is rather complex.
+The aim is to define a so-called consistency model. It attempts to define
+conditions when the new implementation could be used so that the system
+stays consistent. The theory is not yet finished. See the discussion at
+http://thread.gmane.org/gmane.linux.kernel/1823033/focus=1828189
+
+The current consistency model is very simple. It guarantees that either
+the old or the new function is called. But various functions get redirected
+one by one without any synchronization.
+
+In other words, the current implementation _never_ modifies the behavior
+in the middle of the call. It is because it does _not_ rewrite the entire
+function in the memory. Instead, the function gets redirected at the
+very beginning. But this redirection is used immediately even when
+some other functions from the same patch have not been redirected yet.
+
+See also the section "Limitations" below.
+
+
+4. Livepatch module
+===================
+
+Livepatches are distributed using kernel modules, see
+samples/livepatch/livepatch-sample.c.
+
+The module includes a new implementation of functions that we want
+to replace. In addition, it defines some structures describing the
+relation between the original and the new implementation. Then there
+is code that makes the kernel start using the new code when the livepatch
+module is loaded. Also there is code that cleans up before the
+livepatch module is removed. All this is explained in more details in
+the next sections.
+
+
+4.1. New functions
+------------------
+
+New versions of functions are typically just copied from the original
+sources. A good practice is to add a prefix to the names so that they
+can be distinguished from the original ones, e.g. in a backtrace. Also
+they can be declared as static because they are not called directly
+and do not need the global visibility.
+
+The patch contains only functions that are really modified. But they
+might want to access functions or data from the original source file
+that may only be locally accessible. This can be solved by a special
+relocation section in the generated livepatch module, see
+Documentation/livepatch/module-elf-format.txt for more details.
+
+
+4.2. Metadata
+------------
+
+The patch is described by several structures that split the information
+into three levels:
+
+  + struct klp_func is defined for each patched function. It describes
+    the relation between the original and the new implementation of a
+    particular function.
+
+    The structure includes the name, as a string, of the original function.
+    The function address is found via kallsyms at runtime.
+
+    Then it includes the address of the new function. It is defined
+    directly by assigning the function pointer. Note that the new
+    function is typically defined in the same source file.
+
+    As an optional parameter, the symbol position in the kallsyms database can
+    be used to disambiguate functions of the same name. This is not the
+    absolute position in the database, but rather the order it has been found
+    only for a particular object ( vmlinux or a kernel module ). Note that
+    kallsyms allows for searching symbols according to the object name.
+
+  + struct klp_object defines an array of patched functions (struct
+    klp_func) in the same object. Where the object is either vmlinux
+    (NULL) or a module name.
+
+    The structure helps to group and handle functions for each object
+    together. Note that patched modules might be loaded later than
+    the patch itself and the relevant functions might be patched
+    only when they are available.
+
+
+  + struct klp_patch defines an array of patched objects (struct
+    klp_object).
+
+    This structure handles all patched functions consistently and eventually,
+    synchronously. The whole patch is applied only when all patched
+    symbols are found. The only exception are symbols from objects
+    (kernel modules) that have not been loaded yet. Also if a more complex
+    consistency model is supported then a selected unit (thread,
+    kernel as a whole) will see the new code from the entire patch
+    only when it is in a safe state.
+
+
+4.3. Livepatch module handling
+------------------------------
+
+The usual behavior is that the new functions will get used when
+the livepatch module is loaded. For this, the module init() function
+has to register the patch (struct klp_patch) and enable it. See the
+section "Livepatch life-cycle" below for more details about these
+two operations.
+
+Module removal is only safe when there are no users of the underlying
+functions.  The immediate consistency model is not able to detect this;
+therefore livepatch modules cannot be removed. See "Limitations" below.
+
+5. Livepatch life-cycle
+=======================
+
+Livepatching defines four basic operations that define the life cycle of each
+live patch: registration, enabling, disabling and unregistration.  There are
+several reasons why it is done this way.
+
+First, the patch is applied only when all patched symbols for already
+loaded objects are found. The error handling is much easier if this
+check is done before particular functions get redirected.
+
+Second, the immediate consistency model does not guarantee that anyone is not
+sleeping in the new code after the patch is reverted. This means that the new
+code needs to stay around "forever". If the code is there, one could apply it
+again. Therefore it makes sense to separate the operations that might be done
+once and those that need to be repeated when the patch is enabled (applied)
+again.
+
+Third, it might take some time until the entire system is migrated
+when a more complex consistency model is used. The patch revert might
+block the livepatch module removal for too long. Therefore it is useful
+to revert the patch using a separate operation that might be called
+explicitly. But it does not make sense to remove all information
+until the livepatch module is really removed.
+
+
+5.1. Registration
+-----------------
+
+Each patch first has to be registered using klp_register_patch(). This makes
+the patch known to the livepatch framework. Also it does some preliminary
+computing and checks.
+
+In particular, the patch is added into the list of known patches. The
+addresses of the patched functions are found according to their names.
+The special relocations, mentioned in the section "New functions", are
+applied. The relevant entries are created under
+/sys/kernel/livepatch/<name>. The patch is rejected when any operation
+fails.
+
+
+5.2. Enabling
+-------------
+
+Registered patches might be enabled either by calling klp_enable_patch() or
+by writing '1' to /sys/kernel/livepatch/<name>/enabled. The system will
+start using the new implementation of the patched functions at this stage.
+
+In particular, if an original function is patched for the first time, a
+function specific struct klp_ops is created and an universal ftrace handler
+is registered.
+
+Functions might be patched multiple times. The ftrace handler is registered
+only once for the given function. Further patches just add an entry to the
+list (see field `func_stack`) of the struct klp_ops. The last added
+entry is chosen by the ftrace handler and becomes the active function
+replacement.
+
+Note that the patches might be enabled in a different order than they were
+registered.
+
+
+5.3. Disabling
+--------------
+
+Enabled patches might get disabled either by calling klp_disable_patch() or
+by writing '0' to /sys/kernel/livepatch/<name>/enabled. At this stage
+either the code from the previously enabled patch or even the original
+code gets used.
+
+Here all the functions (struct klp_func) associated with the to-be-disabled
+patch are removed from the corresponding struct klp_ops. The ftrace handler
+is unregistered and the struct klp_ops is freed when the func_stack list
+becomes empty.
+
+Patches must be disabled in exactly the reverse order in which they were
+enabled. It makes the problem and the implementation much easier.
+
+
+5.4. Unregistration
+-------------------
+
+Disabled patches might be unregistered by calling klp_unregister_patch().
+This can be done only when the patch is disabled and the code is no longer
+used. It must be called before the livepatch module gets unloaded.
+
+At this stage, all the relevant sys-fs entries are removed and the patch
+is removed from the list of known patches.
+
+
+6. Sysfs
+========
+
+Information about the registered patches can be found under
+/sys/kernel/livepatch. The patches could be enabled and disabled
+by writing there.
+
+See Documentation/ABI/testing/sysfs-kernel-livepatch for more details.
+
+
+7. Limitations
+==============
+
+The current Livepatch implementation has several limitations:
+
+
+  + The patch must not change the semantic of the patched functions.
+
+    The current implementation guarantees only that either the old
+    or the new function is called. The functions are patched one
+    by one. It means that the patch must _not_ change the semantic
+    of the function.
+
+
+  + Data structures can not be patched.
+
+    There is no support to version data structures or anyhow migrate
+    one structure into another. Also the simple consistency model does
+    not allow to switch more functions atomically.
+
+    Once there is more complex consistency mode, it will be possible to
+    use some workarounds. For example, it will be possible to use a hole
+    for a new member because the data structure is aligned. Or it will
+    be possible to use an existing member for something else.
+
+    There are no plans to add more generic support for modified structures
+    at the moment.
+
+
+  + Only functions that can be traced could be patched.
+
+    Livepatch is based on the dynamic ftrace. In particular, functions
+    implementing ftrace or the livepatch ftrace handler could not be
+    patched. Otherwise, the code would end up in an infinite loop. A
+    potential mistake is prevented by marking the problematic functions
+    by "notrace".
+
+
+  + Anything inlined into __schedule() can not be patched.
+
+    The switch_to macro is inlined into __schedule(). It switches the
+    context between two processes in the middle of the macro. It does
+    not save RIP in x86_64 version (contrary to 32-bit version). Instead,
+    the currently used __schedule()/switch_to() handles both processes.
+
+    Now, let's have two different tasks. One calls the original
+    __schedule(), its registers are stored in a defined order and it
+    goes to sleep in the switch_to macro and some other task is restored
+    using the original __schedule(). Then there is the second task which
+    calls patched__schedule(), it goes to sleep there and the first task
+    is picked by the patched__schedule(). Its RSP is restored and now
+    the registers should be restored as well. But the order is different
+    in the new patched__schedule(), so...
+
+    There is work in progress to remove this limitation.
+
+
+  + Livepatch modules can not be removed.
+
+    The current implementation just redirects the functions at the very
+    beginning. It does not check if the functions are in use. In other
+    words, it knows when the functions get called but it does not
+    know when the functions return. Therefore it can not decide when
+    the livepatch module can be safely removed.
+
+    This will get most likely solved once a more complex consistency model
+    is supported. The idea is that a safe state for patching should also
+    mean a safe state for removing the patch.
+
+    Note that the patch itself might get disabled by writing zero
+    to /sys/kernel/livepatch/<patch>/enabled. It causes that the new
+    code will not longer get called. But it does not guarantee
+    that anyone is not sleeping anywhere in the new code.
+
+
+  + Livepatch works reliably only when the dynamic ftrace is located at
+    the very beginning of the function.
+
+    The function need to be redirected before the stack or the function
+    parameters are modified in any way. For example, livepatch requires
+    using -fentry gcc compiler option on x86_64.
+
+    One exception is the PPC port. It uses relative addressing and TOC.
+    Each function has to handle TOC and save LR before it could call
+    the ftrace handler. This operation has to be reverted on return.
+    Fortunately, the generic ftrace code has the same problem and all
+    this is is handled on the ftrace level.
+
+
+  + Kretprobes using the ftrace framework conflict with the patched
+    functions.
+
+    Both kretprobes and livepatches use a ftrace handler that modifies
+    the return address. The first user wins. Either the probe or the patch
+    is rejected when the handler is already in use by the other.
+
+
+  + Kprobes in the original function are ignored when the code is
+    redirected to the new implementation.
+
+    There is a work in progress to add warnings about this situation.
diff --git a/Documentation/livepatch/module-elf-format.txt b/Documentation/livepatch/module-elf-format.txt
new file mode 100644
index 0000000..eedbdcf
--- /dev/null
+++ b/Documentation/livepatch/module-elf-format.txt
@@ -0,0 +1,311 @@
+===========================
+Livepatch module Elf format
+===========================
+
+This document outlines the Elf format requirements that livepatch modules must follow.
+
+-----------------
+Table of Contents
+-----------------
+0. Background and motivation
+1. Livepatch modinfo field
+2. Livepatch relocation sections
+   2.1 What are livepatch relocation sections?
+   2.2 Livepatch relocation section format
+       2.2.1 Required flags
+       2.2.2 Required name format
+       2.2.3 Example livepatch relocation section names
+       2.2.4 Example `readelf --sections` output
+       2.2.5 Example `readelf --relocs` output
+3. Livepatch symbols
+   3.1 What are livepatch symbols?
+   3.2 A livepatch module's symbol table
+   3.3 Livepatch symbol format
+       3.3.1 Required flags
+       3.3.2 Required name format
+       3.3.3 Example livepatch symbol names
+       3.3.4 Example `readelf --symbols` output
+4. Symbol table and Elf section access
+
+----------------------------
+0. Background and motivation
+----------------------------
+
+Formerly, livepatch required separate architecture-specific code to write
+relocations. However, arch-specific code to write relocations already
+exists in the module loader, so this former approach produced redundant
+code. So, instead of duplicating code and re-implementing what the module
+loader can already do, livepatch leverages existing code in the module
+loader to perform the all the arch-specific relocation work. Specifically,
+livepatch reuses the apply_relocate_add() function in the module loader to
+write relocations. The patch module Elf format described in this document
+enables livepatch to be able to do this. The hope is that this will make
+livepatch more easily portable to other architectures and reduce the amount
+of arch-specific code required to port livepatch to a particular
+architecture.
+
+Since apply_relocate_add() requires access to a module's section header
+table, symbol table, and relocation section indices, Elf information is
+preserved for livepatch modules (see section 4). Livepatch manages its own
+relocation sections and symbols, which are described in this document. The
+Elf constants used to mark livepatch symbols and relocation sections were
+selected from OS-specific ranges according to the definitions from glibc.
+
+0.1 Why does livepatch need to write its own relocations?
+---------------------------------------------------------
+A typical livepatch module contains patched versions of functions that can
+reference non-exported global symbols and non-included local symbols.
+Relocations referencing these types of symbols cannot be left in as-is
+since the kernel module loader cannot resolve them and will therefore
+reject the livepatch module. Furthermore, we cannot apply relocations that
+affect modules not yet loaded at patch module load time (e.g. a patch to a
+driver that is not loaded). Formerly, livepatch solved this problem by
+embedding special "dynrela" (dynamic rela) sections in the resulting patch
+module Elf output. Using these dynrela sections, livepatch could resolve
+symbols while taking into account its scope and what module the symbol
+belongs to, and then manually apply the dynamic relocations. However this
+approach required livepatch to supply arch-specific code in order to write
+these relocations. In the new format, livepatch manages its own SHT_RELA
+relocation sections in place of dynrela sections, and the symbols that the
+relas reference are special livepatch symbols (see section 2 and 3). The
+arch-specific livepatch relocation code is replaced by a call to
+apply_relocate_add().
+
+================================
+PATCH MODULE FORMAT REQUIREMENTS
+================================
+
+--------------------------
+1. Livepatch modinfo field
+--------------------------
+
+Livepatch modules are required to have the "livepatch" modinfo attribute.
+See the sample livepatch module in samples/livepatch/ for how this is done.
+
+Livepatch modules can be identified by users by using the 'modinfo' command
+and looking for the presence of the "livepatch" field. This field is also
+used by the kernel module loader to identify livepatch modules.
+
+Example modinfo output:
+-----------------------
+% modinfo livepatch-meminfo.ko
+filename:		livepatch-meminfo.ko
+livepatch:		Y
+license:		GPL
+depends:
+vermagic:		4.3.0+ SMP mod_unload
+
+--------------------------------
+2. Livepatch relocation sections
+--------------------------------
+
+-------------------------------------------
+2.1 What are livepatch relocation sections?
+-------------------------------------------
+A livepatch module manages its own Elf relocation sections to apply
+relocations to modules as well as to the kernel (vmlinux) at the
+appropriate time. For example, if a patch module patches a driver that is
+not currently loaded, livepatch will apply the corresponding livepatch
+relocation section(s) to the driver once it loads.
+
+Each "object" (e.g. vmlinux, or a module) within a patch module may have
+multiple livepatch relocation sections associated with it (e.g. patches to
+multiple functions within the same object). There is a 1-1 correspondence
+between a livepatch relocation section and the target section (usually the
+text section of a function) to which the relocation(s) apply. It is
+also possible for a livepatch module to have no livepatch relocation
+sections, as in the case of the sample livepatch module (see
+samples/livepatch).
+
+Since Elf information is preserved for livepatch modules (see Section 4), a
+livepatch relocation section can be applied simply by passing in the
+appropriate section index to apply_relocate_add(), which then uses it to
+access the relocation section and apply the relocations.
+
+Every symbol referenced by a rela in a livepatch relocation section is a
+livepatch symbol. These must be resolved before livepatch can call
+apply_relocate_add(). See Section 3 for more information.
+
+---------------------------------------
+2.2 Livepatch relocation section format
+---------------------------------------
+
+2.2.1 Required flags
+--------------------
+Livepatch relocation sections must be marked with the SHF_RELA_LIVEPATCH
+section flag. See include/uapi/linux/elf.h for the definition. The module
+loader recognizes this flag and will avoid applying those relocation sections
+at patch module load time. These sections must also be marked with SHF_ALLOC,
+so that the module loader doesn't discard them on module load (i.e. they will
+be copied into memory along with the other SHF_ALLOC sections).
+
+2.2.2 Required name format
+--------------------------
+The name of a livepatch relocation section must conform to the following format:
+
+.klp.rela.objname.section_name
+^        ^^     ^ ^          ^
+|________||_____| |__________|
+   [A]      [B]        [C]
+
+[A] The relocation section name is prefixed with the string ".klp.rela."
+[B] The name of the object (i.e. "vmlinux" or name of module) to
+    which the relocation section belongs follows immediately after the prefix.
+[C] The actual name of the section to which this relocation section applies.
+
+2.2.3 Example livepatch relocation section names:
+-------------------------------------------------
+.klp.rela.ext4.text.ext4_attr_store
+.klp.rela.vmlinux.text.cmdline_proc_show
+
+2.2.4 Example `readelf --sections` output for a patch
+module that patches vmlinux and modules 9p, btrfs, ext4:
+--------------------------------------------------------
+  Section Headers:
+  [Nr] Name                          Type                    Address          Off    Size   ES Flg Lk Inf Al
+  [ snip ]
+  [29] .klp.rela.9p.text.caches.show RELA                    0000000000000000 002d58 0000c0 18 AIo 64   9  8
+  [30] .klp.rela.btrfs.text.btrfs.feature.attr.show RELA     0000000000000000 002e18 000060 18 AIo 64  11  8
+  [ snip ]
+  [34] .klp.rela.ext4.text.ext4.attr.store RELA              0000000000000000 002fd8 0000d8 18 AIo 64  13  8
+  [35] .klp.rela.ext4.text.ext4.attr.show RELA               0000000000000000 0030b0 000150 18 AIo 64  15  8
+  [36] .klp.rela.vmlinux.text.cmdline.proc.show RELA         0000000000000000 003200 000018 18 AIo 64  17  8
+  [37] .klp.rela.vmlinux.text.meminfo.proc.show RELA         0000000000000000 003218 0000f0 18 AIo 64  19  8
+  [ snip ]                                       ^                                             ^
+                                                 |                                             |
+                                                [*]                                           [*]
+[*] Livepatch relocation sections are SHT_RELA sections but with a few special
+characteristics. Notice that they are marked SHF_ALLOC ("A") so that they will
+not be discarded when the module is loaded into memory, as well as with the
+SHF_RELA_LIVEPATCH flag ("o" - for OS-specific).
+
+2.2.5 Example `readelf --relocs` output for a patch module:
+-----------------------------------------------------------
+Relocation section '.klp.rela.btrfs.text.btrfs_feature_attr_show' at offset 0x2ba0 contains 4 entries:
+    Offset             Info             Type               Symbol's Value  Symbol's Name + Addend
+000000000000001f  0000005e00000002 R_X86_64_PC32          0000000000000000 .klp.sym.vmlinux.printk,0 - 4
+0000000000000028  0000003d0000000b R_X86_64_32S           0000000000000000 .klp.sym.btrfs.btrfs_ktype,0 + 0
+0000000000000036  0000003b00000002 R_X86_64_PC32          0000000000000000 .klp.sym.btrfs.can_modify_feature.isra.3,0 - 4
+000000000000004c  0000004900000002 R_X86_64_PC32          0000000000000000 .klp.sym.vmlinux.snprintf,0 - 4
+[ snip ]                                                                   ^
+                                                                           |
+                                                                          [*]
+[*] Every symbol referenced by a relocation is a livepatch symbol.
+
+--------------------
+3. Livepatch symbols
+--------------------
+
+-------------------------------
+3.1 What are livepatch symbols?
+-------------------------------
+Livepatch symbols are symbols referred to by livepatch relocation sections.
+These are symbols accessed from new versions of functions for patched
+objects, whose addresses cannot be resolved by the module loader (because
+they are local or unexported global syms). Since the module loader only
+resolves exported syms, and not every symbol referenced by the new patched
+functions is exported, livepatch symbols were introduced. They are used
+also in cases where we cannot immediately know the address of a symbol when
+a patch module loads. For example, this is the case when livepatch patches
+a module that is not loaded yet. In this case, the relevant livepatch
+symbols are resolved simply when the target module loads. In any case, for
+any livepatch relocation section, all livepatch symbols referenced by that
+section must be resolved before livepatch can call apply_relocate_add() for
+that reloc section.
+
+Livepatch symbols must be marked with SHN_LIVEPATCH so that the module
+loader can identify and ignore them. Livepatch modules keep these symbols
+in their symbol tables, and the symbol table is made accessible through
+module->symtab.
+
+-------------------------------------
+3.2 A livepatch module's symbol table
+-------------------------------------
+Normally, a stripped down copy of a module's symbol table (containing only
+"core" symbols) is made available through module->symtab (See layout_symtab()
+in kernel/module.c). For livepatch modules, the symbol table copied into memory
+on module load must be exactly the same as the symbol table produced when the
+patch module was compiled. This is because the relocations in each livepatch
+relocation section refer to their respective symbols with their symbol indices,
+and the original symbol indices (and thus the symtab ordering) must be
+preserved in order for apply_relocate_add() to find the right symbol.
+
+For example, take this particular rela from a livepatch module:
+Relocation section '.klp.rela.btrfs.text.btrfs_feature_attr_show' at offset 0x2ba0 contains 4 entries:
+    Offset             Info             Type               Symbol's Value  Symbol's Name + Addend
+000000000000001f  0000005e00000002 R_X86_64_PC32          0000000000000000 .klp.sym.vmlinux.printk,0 - 4
+
+This rela refers to the symbol '.klp.sym.vmlinux.printk,0', and the symbol index is encoded
+in 'Info'. Here its symbol index is 0x5e, which is 94 in decimal, which refers to the
+symbol index 94.
+And in this patch module's corresponding symbol table, symbol index 94 refers to that very symbol:
+[ snip ]
+94: 0000000000000000     0 NOTYPE  GLOBAL DEFAULT OS [0xff20] .klp.sym.vmlinux.printk,0
+[ snip ]
+
+---------------------------
+3.3 Livepatch symbol format
+---------------------------
+
+3.3.1 Required flags
+--------------------
+Livepatch symbols must have their section index marked as SHN_LIVEPATCH, so
+that the module loader can identify them and not attempt to resolve them.
+See include/uapi/linux/elf.h for the actual definitions.
+
+3.3.2 Required name format
+--------------------------
+Livepatch symbol names must conform to the following format:
+
+.klp.sym.objname.symbol_name,sympos
+^       ^^     ^ ^         ^ ^
+|_______||_____| |_________| |
+   [A]     [B]       [C]    [D]
+
+[A] The symbol name is prefixed with the string ".klp.sym."
+[B] The name of the object (i.e. "vmlinux" or name of module) to
+    which the symbol belongs follows immediately after the prefix.
+[C] The actual name of the symbol.
+[D] The position of the symbol in the object (as according to kallsyms)
+    This is used to differentiate duplicate symbols within the same
+    object. The symbol position is expressed numerically (0, 1, 2...).
+    The symbol position of a unique symbol is 0.
+
+3.3.3 Example livepatch symbol names:
+-------------------------------------
+.klp.sym.vmlinux.snprintf,0
+.klp.sym.vmlinux.printk,0
+.klp.sym.btrfs.btrfs_ktype,0
+
+3.3.4 Example `readelf --symbols` output for a patch module:
+------------------------------------------------------------
+Symbol table '.symtab' contains 127 entries:
+   Num:    Value          Size Type    Bind   Vis     Ndx         Name
+   [ snip ]
+    73: 0000000000000000     0 NOTYPE  GLOBAL DEFAULT OS [0xff20] .klp.sym.vmlinux.snprintf,0
+    74: 0000000000000000     0 NOTYPE  GLOBAL DEFAULT OS [0xff20] .klp.sym.vmlinux.capable,0
+    75: 0000000000000000     0 NOTYPE  GLOBAL DEFAULT OS [0xff20] .klp.sym.vmlinux.find_next_bit,0
+    76: 0000000000000000     0 NOTYPE  GLOBAL DEFAULT OS [0xff20] .klp.sym.vmlinux.si_swapinfo,0
+  [ snip ]                                               ^
+                                                         |
+                                                        [*]
+[*] Note that the 'Ndx' (Section index) for these symbols is SHN_LIVEPATCH (0xff20).
+    "OS" means OS-specific.
+
+--------------------------------------
+4. Symbol table and Elf section access
+--------------------------------------
+A livepatch module's symbol table is accessible through module->symtab.
+
+Since apply_relocate_add() requires access to a module's section headers,
+symbol table, and relocation section indices, Elf information is preserved for
+livepatch modules and is made accessible by the module loader through
+module->klp_info, which is a klp_modinfo struct. When a livepatch module loads,
+this struct is filled in by the module loader. Its fields are documented below:
+
+struct klp_modinfo {
+	Elf_Ehdr hdr; /* Elf header */
+	Elf_Shdr *sechdrs; /* Section header table */
+	char *secstrings; /* String table for the section headers */
+	unsigned int symndx; /* The symbol table section index */
+};
diff --git a/Documentation/locking/lockdep-design.txt b/Documentation/locking/lockdep-design.txt
index 5001280..9de1c15 100644
--- a/Documentation/locking/lockdep-design.txt
+++ b/Documentation/locking/lockdep-design.txt
@@ -97,7 +97,7 @@
    <hardirq-safe>   ->  <hardirq-unsafe>
    <softirq-safe>   ->  <softirq-unsafe>
 
-The first rule comes from the fact the a hardirq-safe lock could be
+The first rule comes from the fact that a hardirq-safe lock could be
 taken by a hardirq context, interrupting a hardirq-unsafe lock - and
 thus could result in a lock inversion deadlock. Likewise, a softirq-safe
 lock could be taken by an softirq context, interrupting a softirq-unsafe
@@ -220,7 +220,7 @@
 when the chain is validated for the first time, is then put into a hash
 table, which hash-table can be checked in a lockfree manner. If the
 locking chain occurs again later on, the hash table tells us that we
-dont have to validate the chain again.
+don't have to validate the chain again.
 
 Troubleshooting:
 ----------------
diff --git a/Documentation/md-cluster.txt b/Documentation/md-cluster.txt
index c100c71..3888327 100644
--- a/Documentation/md-cluster.txt
+++ b/Documentation/md-cluster.txt
@@ -316,3 +316,9 @@
  nodes are using the raid which is achieved by lock all bitmap
  locks within the cluster, and also those locks are unlocked
  accordingly.
+
+7. Unsupported features
+
+There are somethings which are not supported by cluster MD yet.
+
+- update size and change array_sectors.
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 3729cbe..147ae8e 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -4,8 +4,40 @@
 
 By: David Howells <dhowells@redhat.com>
     Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+    Will Deacon <will.deacon@arm.com>
+    Peter Zijlstra <peterz@infradead.org>
 
-Contents:
+==========
+DISCLAIMER
+==========
+
+This document is not a specification; it is intentionally (for the sake of
+brevity) and unintentionally (due to being human) incomplete. This document is
+meant as a guide to using the various memory barriers provided by Linux, but
+in case of any doubt (and there are many) please ask.
+
+To repeat, this document is not a specification of what Linux expects from
+hardware.
+
+The purpose of this document is twofold:
+
+ (1) to specify the minimum functionality that one can rely on for any
+     particular barrier, and
+
+ (2) to provide a guide as to how to use the barriers that are available.
+
+Note that an architecture can provide more than the minimum requirement
+for any particular barrier, but if the architecure provides less than
+that, that architecture is incorrect.
+
+Note also that it is possible that a barrier may be a no-op for an
+architecture because the way that arch works renders an explicit barrier
+unnecessary in that case.
+
+
+========
+CONTENTS
+========
 
  (*) Abstract memory access model.
 
@@ -31,15 +63,15 @@
 
  (*) Implicit kernel memory barriers.
 
-     - Locking functions.
+     - Lock acquisition functions.
      - Interrupt disabling functions.
      - Sleep and wake-up functions.
      - Miscellaneous functions.
 
- (*) Inter-CPU locking barrier effects.
+ (*) Inter-CPU acquiring barrier effects.
 
-     - Locks vs memory accesses.
-     - Locks vs I/O accesses.
+     - Acquires vs memory accesses.
+     - Acquires vs I/O accesses.
 
  (*) Where are memory barriers needed?
 
@@ -61,6 +93,7 @@
  (*) The things CPUs get up to.
 
      - And then there's the Alpha.
+     - Virtual Machine Guests.
 
  (*) Example uses.
 
@@ -148,7 +181,7 @@
 
 	CPU 1		CPU 2
 	===============	===============
-	{ A == 1, B == 2, C = 3, P == &A, Q == &C }
+	{ A == 1, B == 2, C == 3, P == &A, Q == &C }
 	B = 4;		Q = P;
 	P = &B		D = *Q;
 
@@ -430,8 +463,9 @@
      This acts as a one-way permeable barrier.  It guarantees that all memory
      operations after the ACQUIRE operation will appear to happen after the
      ACQUIRE operation with respect to the other components of the system.
-     ACQUIRE operations include LOCK operations and smp_load_acquire()
-     operations.
+     ACQUIRE operations include LOCK operations and both smp_load_acquire()
+     and smp_cond_acquire() operations. The later builds the necessary ACQUIRE
+     semantics from relying on a control dependency and smp_rmb().
 
      Memory operations that occur before an ACQUIRE operation may appear to
      happen after it completes.
@@ -464,6 +498,11 @@
      This means that ACQUIRE acts as a minimal "acquire" operation and
      RELEASE acts as a minimal "release" operation.
 
+A subset of the atomic operations described in atomic_ops.txt have ACQUIRE
+and RELEASE variants in addition to fully-ordered and relaxed (no barrier
+semantics) definitions.  For compound atomics performing both a load and a
+store, ACQUIRE semantics apply only to the load and RELEASE semantics apply
+only to the store portion of the operation.
 
 Memory barriers are only required where there's a possibility of interaction
 between two CPUs or between a CPU and a device.  If it can be guaranteed that
@@ -517,7 +556,7 @@
 
 	CPU 1		      CPU 2
 	===============	      ===============
-	{ A == 1, B == 2, C = 3, P == &A, Q == &C }
+	{ A == 1, B == 2, C == 3, P == &A, Q == &C }
 	B = 4;
 	<write barrier>
 	WRITE_ONCE(P, &B)
@@ -544,7 +583,7 @@
 
 	CPU 1		      CPU 2
 	===============	      ===============
-	{ A == 1, B == 2, C = 3, P == &A, Q == &C }
+	{ A == 1, B == 2, C == 3, P == &A, Q == &C }
 	B = 4;
 	<write barrier>
 	WRITE_ONCE(P, &B);
@@ -813,9 +852,10 @@
       the same variable, then those stores must be ordered, either by
       preceding both of them with smp_mb() or by using smp_store_release()
       to carry out the stores.  Please note that it is -not- sufficient
-      to use barrier() at beginning of each leg of the "if" statement,
-      as optimizing compilers do not necessarily respect barrier()
-      in this case.
+      to use barrier() at beginning of each leg of the "if" statement
+      because, as shown by the example above, optimizing compilers can
+      destroy the control dependency while respecting the letter of the
+      barrier() law.
 
   (*) Control dependencies require at least one run-time conditional
       between the prior load and the subsequent store, and this
@@ -1731,15 +1771,15 @@
 
 
 All memory barriers except the data dependency barriers imply a compiler
-barrier. Data dependencies do not impose any additional compiler ordering.
+barrier.  Data dependencies do not impose any additional compiler ordering.
 
 Aside: In the case of data dependencies, the compiler would be expected
 to issue the loads in the correct order (eg. `a[b]` would have to load
 the value of b before loading a[b]), however there is no guarantee in
 the C specification that the compiler may not speculate the value of b
 (eg. is equal to 1) and load a before b (eg. tmp = a[1]; if (b != 1)
-tmp = a[b]; ). There is also the problem of a compiler reloading b after
-having loaded a[b], thus having a newer copy of b than a[b]. A consensus
+tmp = a[b]; ).  There is also the problem of a compiler reloading b after
+having loaded a[b], thus having a newer copy of b than a[b].  A consensus
 has not yet been reached about these problems, however the READ_ONCE()
 macro is a good place to start looking.
 
@@ -1794,6 +1834,7 @@
 
 
  (*) lockless_dereference();
+
      This can be thought of as a pointer-fetch wrapper around the
      smp_read_barrier_depends() data-dependency barrier.
 
@@ -1858,7 +1899,7 @@
 ordered I/O regions to be partially ordered.  Its effects may go beyond the
 CPU->Hardware interface and actually affect the hardware at some level.
 
-See the subsection "Locks vs I/O accesses" for more information.
+See the subsection "Acquires vs I/O accesses" for more information.
 
 
 ===============================
@@ -1873,8 +1914,8 @@
 of arch specific code.
 
 
-ACQUIRING FUNCTIONS
--------------------
+LOCK ACQUISITION FUNCTIONS
+--------------------------
 
 The Linux kernel has a number of locking constructs:
 
@@ -1895,7 +1936,7 @@
      Memory operations issued before the ACQUIRE may be completed after
      the ACQUIRE operation has completed.  An smp_mb__before_spinlock(),
      combined with a following ACQUIRE, orders prior stores against
-     subsequent loads and stores. Note that this is weaker than smp_mb()!
+     subsequent loads and stores.  Note that this is weaker than smp_mb()!
      The smp_mb__before_spinlock() primitive is free on many architectures.
 
  (2) RELEASE operation implication:
@@ -2090,9 +2131,9 @@
 	event_indicated = 1;
 	wake_up_process(event_daemon);
 
-A write memory barrier is implied by wake_up() and co. if and only if they wake
-something up.  The barrier occurs before the task state is cleared, and so sits
-between the STORE to indicate the event and the STORE to set TASK_RUNNING:
+A write memory barrier is implied by wake_up() and co.  if and only if they
+wake something up.  The barrier occurs before the task state is cleared, and so
+sits between the STORE to indicate the event and the STORE to set TASK_RUNNING:
 
 	CPU 1				CPU 2
 	===============================	===============================
@@ -2206,7 +2247,7 @@
 
 Then there is no guarantee as to what order CPU 3 will see the accesses to *A
 through *H occur in, other than the constraints imposed by the separate locks
-on the separate CPUs. It might, for example, see:
+on the separate CPUs.  It might, for example, see:
 
 	*E, ACQUIRE M, ACQUIRE Q, *G, *C, *F, *A, *B, RELEASE Q, *D, *H, RELEASE M
 
@@ -2486,9 +2527,9 @@
 	clear_bit_unlock();
 	__clear_bit_unlock();
 
-These implement ACQUIRE-class and RELEASE-class operations. These should be used in
-preference to other operations when implementing locking primitives, because
-their implementations can be optimised on many architectures.
+These implement ACQUIRE-class and RELEASE-class operations.  These should be
+used in preference to other operations when implementing locking primitives,
+because their implementations can be optimised on many architectures.
 
 [!] Note that special memory barrier primitives are available for these
 situations because on some CPUs the atomic instructions used imply full memory
@@ -2568,12 +2609,12 @@
 
 Normally this won't be a problem because the I/O accesses done inside such
 sections will include synchronous load operations on strictly ordered I/O
-registers that form implicit I/O barriers. If this isn't sufficient then an
+registers that form implicit I/O barriers.  If this isn't sufficient then an
 mmiowb() may need to be used explicitly.
 
 
 A similar situation may occur between an interrupt routine and two routines
-running on separate CPUs that communicate with each other. If such a case is
+running on separate CPUs that communicate with each other.  If such a case is
 likely, then interrupt-disabling locks should be used to guarantee ordering.
 
 
@@ -2587,8 +2628,8 @@
  (*) inX(), outX():
 
      These are intended to talk to I/O space rather than memory space, but
-     that's primarily a CPU-specific concept. The i386 and x86_64 processors do
-     indeed have special I/O space access cycles and instructions, but many
+     that's primarily a CPU-specific concept.  The i386 and x86_64 processors
+     do indeed have special I/O space access cycles and instructions, but many
      CPUs don't have such a concept.
 
      The PCI bus, amongst others, defines an I/O space concept which - on such
@@ -2610,7 +2651,7 @@
 
      Whether these are guaranteed to be fully ordered and uncombined with
      respect to each other on the issuing CPU depends on the characteristics
-     defined for the memory window through which they're accessing. On later
+     defined for the memory window through which they're accessing.  On later
      i386 architecture machines, for example, this is controlled by way of the
      MTRR registers.
 
@@ -2635,10 +2676,10 @@
  (*) readX_relaxed(), writeX_relaxed()
 
      These are similar to readX() and writeX(), but provide weaker memory
-     ordering guarantees. Specifically, they do not guarantee ordering with
+     ordering guarantees.  Specifically, they do not guarantee ordering with
      respect to normal memory accesses (e.g. DMA buffers) nor do they guarantee
-     ordering with respect to LOCK or UNLOCK operations. If the latter is
-     required, an mmiowb() barrier can be used. Note that relaxed accesses to
+     ordering with respect to LOCK or UNLOCK operations.  If the latter is
+     required, an mmiowb() barrier can be used.  Note that relaxed accesses to
      the same peripheral are guaranteed to be ordered with respect to each
      other.
 
@@ -3040,8 +3081,9 @@
 
 See the subsection on "Cache Coherency" above.
 
+
 VIRTUAL MACHINE GUESTS
--------------------
+----------------------
 
 Guests running within virtual machines might be affected by SMP effects even if
 the guest itself is compiled without SMP support.  This is an artifact of
@@ -3050,7 +3092,7 @@
 
 To handle this case optimally, low-level virt_mb() etc macros are available.
 These have the same effect as smp_mb() etc when SMP is enabled, but generate
-identical code for SMP and non-SMP systems. For example, virtual machine guests
+identical code for SMP and non-SMP systems.  For example, virtual machine guests
 should use virt_mb() rather than smp_mb() when synchronizing against a
 (possibly SMP) host.
 
@@ -3058,6 +3100,7 @@
 in particular, they do not control MMIO effects: to control
 MMIO effects, use mandatory barriers.
 
+
 ============
 EXAMPLE USES
 ============
diff --git a/Documentation/networking/altera_tse.txt b/Documentation/networking/altera_tse.txt
index cd417d7..50b8589 100644
--- a/Documentation/networking/altera_tse.txt
+++ b/Documentation/networking/altera_tse.txt
@@ -65,14 +65,14 @@
 4.1) Transmit process
 When the driver's transmit routine is called by the kernel, it sets up a
 transmit descriptor by calling the underlying DMA transmit routine (SGDMA or
-MSGDMA), and initites a transmit operation. Once the transmit is complete, an
+MSGDMA), and initiates a transmit operation. Once the transmit is complete, an
 interrupt is driven by the transmit DMA logic. The driver handles the transmit
 completion in the context of the interrupt handling chain by recycling
 resource required to send and track the requested transmit operation.
 
 4.2) Receive process
 The driver will post receive buffers to the receive DMA logic during driver
-intialization. Receive buffers may or may not be queued depending upon the
+initialization. Receive buffers may or may not be queued depending upon the
 underlying DMA logic (MSGDMA is able queue receive buffers, SGDMA is not able
 to queue receive buffers to the SGDMA receive logic). When a packet is
 received, the DMA logic generates an interrupt. The driver handles a receive
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 334b49e..57f52cd 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -1880,8 +1880,8 @@
 
 	The ARP monitor relies on the device driver itself to verify
 that traffic is flowing.  In particular, the driver must keep up to
-date the last receive time, dev->last_rx, and transmit start time,
-dev->trans_start.  If these are not updated by the driver, then the
+date the last receive time, dev->last_rx.  Drivers that use NETIF_F_LLTX
+flag must also update netdev_queue->trans_start.  If they do not, then the
 ARP monitor will immediately fail any slaves using that driver, and
 those slaves will stay down.  If networking monitoring (tcpdump, etc)
 shows the ARP requests and replies on the network, then it may be that
diff --git a/Documentation/networking/checksum-offloads.txt b/Documentation/networking/checksum-offloads.txt
index de2a327..56e3686 100644
--- a/Documentation/networking/checksum-offloads.txt
+++ b/Documentation/networking/checksum-offloads.txt
@@ -69,18 +69,18 @@
 LCO is a technique for efficiently computing the outer checksum of an
  encapsulated datagram when the inner checksum is due to be offloaded.
 The ones-complement sum of a correctly checksummed TCP or UDP packet is
- equal to the sum of the pseudo header, because everything else gets
- 'cancelled out' by the checksum field.  This is because the sum was
+ equal to the complement of the sum of the pseudo header, because everything
+ else gets 'cancelled out' by the checksum field.  This is because the sum was
  complemented before being written to the checksum field.
 More generally, this holds in any case where the 'IP-style' ones complement
  checksum is used, and thus any checksum that TX Checksum Offload supports.
 That is, if we have set up TX Checksum Offload with a start/offset pair, we
- know that _after the device has filled in that checksum_, the ones
+ know that after the device has filled in that checksum, the ones
  complement sum from csum_start to the end of the packet will be equal to
- _whatever value we put in the checksum field beforehand_.  This allows us
- to compute the outer checksum without looking at the payload: we simply
- stop summing when we get to csum_start, then add the 16-bit word at
- (csum_start + csum_offset).
+ the complement of whatever value we put in the checksum field beforehand.
+ This allows us to compute the outer checksum without looking at the payload:
+ we simply stop summing when we get to csum_start, then add the complement of
+ the 16-bit word at (csum_start + csum_offset).
 Then, when the true inner checksum is filled in (either by hardware or by
  skb_checksum_help()), the outer checksum will become correct by virtue of
  the arithmetic.
diff --git a/Documentation/networking/dsa/bcm_sf2.txt b/Documentation/networking/dsa/bcm_sf2.txt
index d999d0c..eba3a24 100644
--- a/Documentation/networking/dsa/bcm_sf2.txt
+++ b/Documentation/networking/dsa/bcm_sf2.txt
@@ -38,7 +38,7 @@
 ======================
 
 The driver is located in drivers/net/dsa/bcm_sf2.c and is implemented as a DSA
-driver; see Documentation/networking/dsa/dsa.txt for details on the subsytem
+driver; see Documentation/networking/dsa/dsa.txt for details on the subsystem
 and what it provides.
 
 The SF2 switch is configured to enable a Broadcom specific 4-bytes switch tag
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt
index 3b196c30..631b0f7 100644
--- a/Documentation/networking/dsa/dsa.txt
+++ b/Documentation/networking/dsa/dsa.txt
@@ -334,7 +334,7 @@
 of per-port slave network devices. Since DSA primarily deals with
 MDIO-connected switches, although not exclusively, SWITCHDEV's
 prepare/abort/commit phases are often simplified into a prepare phase which
-checks whether the operation is supporte by the DSA switch driver, and a commit
+checks whether the operation is supported by the DSA switch driver, and a commit
 phase which applies the changes.
 
 As of today, the only SWITCHDEV objects supported by DSA are the FDB and VLAN
@@ -533,7 +533,7 @@
   out at the switch hardware for the switch to (re) learn MAC addresses behind
   this port.
 
-- port_stp_update: bridge layer function invoked when a given switch port STP
+- port_stp_state_set: bridge layer function invoked when a given switch port STP
   state is computed by the bridge layer and should be propagated to switch
   hardware to forward/block/learn traffic. The switch driver is responsible for
   computing a STP state change based on current and asked parameters and perform
@@ -542,6 +542,12 @@
 Bridge VLAN filtering
 ---------------------
 
+- port_vlan_prepare: bridge layer function invoked when the bridge prepares the
+  configuration of a VLAN on the given port. If the operation is not supported
+  by the hardware, this function should return -EOPNOTSUPP to inform the bridge
+  code to fallback to a software implementation. No hardware setup must be done
+  in this function. See port_vlan_add for this and details.
+
 - port_vlan_add: bridge layer function invoked when a VLAN is configured
   (tagged or untagged) for the given switch port
 
@@ -552,6 +558,12 @@
   function that the driver has to call for each VLAN the given port is a member
   of. A switchdev object is used to carry the VID and bridge flags.
 
+- port_fdb_prepare: bridge layer function invoked when the bridge prepares the
+  installation of a Forwarding Database entry. If the operation is not
+  supported, this function should return -EOPNOTSUPP to inform the bridge code
+  to fallback to a software implementation. No hardware setup must be done in
+  this function. See port_fdb_add for this and details.
+
 - port_fdb_add: bridge layer function invoked when the bridge wants to install a
   Forwarding Database entry, the switch hardware should be programmed with the
   specified address in the specified VLAN Id in the forwarding database
@@ -565,6 +577,10 @@
   the specified MAC address from the specified VLAN ID if it was mapped into
   this port forwarding database
 
+- port_fdb_dump: bridge layer function invoked with a switchdev callback
+  function that the driver has to call for each MAC address known to be behind
+  the given port. A switchdev object is used to carry the VID and FDB info.
+
 TODO
 ====
 
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt
index 11f67f1..683ada5 100644
--- a/Documentation/networking/filter.txt
+++ b/Documentation/networking/filter.txt
@@ -216,14 +216,14 @@
 
   jmp              6                    Jump to label
   ja               6                    Jump to label
-  jeq              7, 8                 Jump on k == A
-  jneq             8                    Jump on k != A
-  jne              8                    Jump on k != A
-  jlt              8                    Jump on k < A
-  jle              8                    Jump on k <= A
-  jgt              7, 8                 Jump on k > A
-  jge              7, 8                 Jump on k >= A
-  jset             7, 8                 Jump on k & A
+  jeq              7, 8                 Jump on A == k
+  jneq             8                    Jump on A != k
+  jne              8                    Jump on A != k
+  jlt              8                    Jump on A <  k
+  jle              8                    Jump on A <= k
+  jgt              7, 8                 Jump on A >  k
+  jge              7, 8                 Jump on A >= k
+  jset             7, 8                 Jump on A &  k
 
   add              0, 4                 A + <x>
   sub              0, 4                 A - <x>
@@ -1095,6 +1095,87 @@
 
 See details of eBPF verifier in kernel/bpf/verifier.c
 
+Direct packet access
+--------------------
+In cls_bpf and act_bpf programs the verifier allows direct access to the packet
+data via skb->data and skb->data_end pointers.
+Ex:
+1:  r4 = *(u32 *)(r1 +80)  /* load skb->data_end */
+2:  r3 = *(u32 *)(r1 +76)  /* load skb->data */
+3:  r5 = r3
+4:  r5 += 14
+5:  if r5 > r4 goto pc+16
+R1=ctx R3=pkt(id=0,off=0,r=14) R4=pkt_end R5=pkt(id=0,off=14,r=14) R10=fp
+6:  r0 = *(u16 *)(r3 +12) /* access 12 and 13 bytes of the packet */
+
+this 2byte load from the packet is safe to do, since the program author
+did check 'if (skb->data + 14 > skb->data_end) goto err' at insn #5 which
+means that in the fall-through case the register R3 (which points to skb->data)
+has at least 14 directly accessible bytes. The verifier marks it
+as R3=pkt(id=0,off=0,r=14).
+id=0 means that no additional variables were added to the register.
+off=0 means that no additional constants were added.
+r=14 is the range of safe access which means that bytes [R3, R3 + 14) are ok.
+Note that R5 is marked as R5=pkt(id=0,off=14,r=14). It also points
+to the packet data, but constant 14 was added to the register, so
+it now points to 'skb->data + 14' and accessible range is [R5, R5 + 14 - 14)
+which is zero bytes.
+
+More complex packet access may look like:
+ R0=imm1 R1=ctx R3=pkt(id=0,off=0,r=14) R4=pkt_end R5=pkt(id=0,off=14,r=14) R10=fp
+ 6:  r0 = *(u8 *)(r3 +7) /* load 7th byte from the packet */
+ 7:  r4 = *(u8 *)(r3 +12)
+ 8:  r4 *= 14
+ 9:  r3 = *(u32 *)(r1 +76) /* load skb->data */
+10:  r3 += r4
+11:  r2 = r1
+12:  r2 <<= 48
+13:  r2 >>= 48
+14:  r3 += r2
+15:  r2 = r3
+16:  r2 += 8
+17:  r1 = *(u32 *)(r1 +80) /* load skb->data_end */
+18:  if r2 > r1 goto pc+2
+ R0=inv56 R1=pkt_end R2=pkt(id=2,off=8,r=8) R3=pkt(id=2,off=0,r=8) R4=inv52 R5=pkt(id=0,off=14,r=14) R10=fp
+19:  r1 = *(u8 *)(r3 +4)
+The state of the register R3 is R3=pkt(id=2,off=0,r=8)
+id=2 means that two 'r3 += rX' instructions were seen, so r3 points to some
+offset within a packet and since the program author did
+'if (r3 + 8 > r1) goto err' at insn #18, the safe range is [R3, R3 + 8).
+The verifier only allows 'add' operation on packet registers. Any other
+operation will set the register state to 'unknown_value' and it won't be
+available for direct packet access.
+Operation 'r3 += rX' may overflow and become less than original skb->data,
+therefore the verifier has to prevent that. So it tracks the number of
+upper zero bits in all 'uknown_value' registers, so when it sees
+'r3 += rX' instruction and rX is more than 16-bit value, it will error as:
+"cannot add integer value with N upper zero bits to ptr_to_packet"
+Ex. after insn 'r4 = *(u8 *)(r3 +12)' (insn #7 above) the state of r4 is
+R4=inv56 which means that upper 56 bits on the register are guaranteed
+to be zero. After insn 'r4 *= 14' the state becomes R4=inv52, since
+multiplying 8-bit value by constant 14 will keep upper 52 bits as zero.
+Similarly 'r2 >>= 48' will make R2=inv48, since the shift is not sign
+extending. This logic is implemented in evaluate_reg_alu() function.
+
+The end result is that bpf program author can access packet directly
+using normal C code as:
+  void *data = (void *)(long)skb->data;
+  void *data_end = (void *)(long)skb->data_end;
+  struct eth_hdr *eth = data;
+  struct iphdr *iph = data + sizeof(*eth);
+  struct udphdr *udp = data + sizeof(*eth) + sizeof(*iph);
+
+  if (data + sizeof(*eth) + sizeof(*iph) + sizeof(*udp) > data_end)
+          return 0;
+  if (eth->h_proto != htons(ETH_P_IP))
+          return 0;
+  if (iph->protocol != IPPROTO_UDP || iph->ihl != 5)
+          return 0;
+  if (udp->dest == 53 || udp->source == 9)
+          ...;
+which makes such programs easier to write comparing to LD_ABS insn
+and significantly faster.
+
 eBPF maps
 ---------
 'maps' is a generic storage of different types for sharing data between kernel
@@ -1293,5 +1374,5 @@
 the underlying architecture.
 
 Jay Schulist <jschlst@samba.org>
-Daniel Borkmann <dborkman@redhat.com>
-Alexei Starovoitov <ast@plumgrid.com>
+Daniel Borkmann <daniel@iogearbox.net>
+Alexei Starovoitov <ast@kernel.org>
diff --git a/Documentation/networking/gen_stats.txt b/Documentation/networking/gen_stats.txt
index 70e6275..ff630a8 100644
--- a/Documentation/networking/gen_stats.txt
+++ b/Documentation/networking/gen_stats.txt
@@ -33,7 +33,8 @@
 {
 	struct gnet_dump dump;
 
-	if (gnet_stats_start_copy(skb, TCA_STATS2, &mystruct->lock, &dump) < 0)
+	if (gnet_stats_start_copy(skb, TCA_STATS2, &mystruct->lock, &dump,
+				  TCA_PAD) < 0)
 		goto rtattr_failure;
 
 	if (gnet_stats_copy_basic(&dump, &mystruct->bstats) < 0 ||
@@ -56,7 +57,8 @@
 my_dumping_routine(struct sk_buff *skb, ...)
 {
     if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
-		TCA_XSTATS, &mystruct->lock, &dump) < 0)
+				     TCA_XSTATS, &mystruct->lock, &dump,
+				     TCA_PAD) < 0)
 		goto rtattr_failure;
 	...
 }
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index b183e2b..6c7f365b 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -63,6 +63,16 @@
 	fwmark of the packet they are replying to.
 	Default: 0
 
+fib_multipath_use_neigh - BOOLEAN
+	Use status of existing neighbor entry when determining nexthop for
+	multipath routes. If disabled, neighbor information is not used and
+	packets could be directed to a failed nexthop. Only valid for kernels
+	built with CONFIG_IP_ROUTE_MULTIPATH enabled.
+	Default: 0 (disabled)
+	Possible values:
+	0 - disabled
+	1 - enabled
+
 route/max_size - INTEGER
 	Maximum number of routes allowed in the kernel.  Increase
 	this when using large numbers of interfaces and/or routes.
diff --git a/Documentation/networking/ipvlan.txt b/Documentation/networking/ipvlan.txt
index cf99639..14422f8 100644
--- a/Documentation/networking/ipvlan.txt
+++ b/Documentation/networking/ipvlan.txt
@@ -8,7 +8,7 @@
 	This is conceptually very similar to the macvlan driver with one major
 exception of using L3 for mux-ing /demux-ing among slaves. This property makes
 the master device share the L2 with it's slave devices. I have developed this
-driver in conjuntion with network namespaces and not sure if there is use case
+driver in conjunction with network namespaces and not sure if there is use case
 outside of it.
 
 
@@ -42,7 +42,7 @@
 as well.
 
 4.2 L3 mode:
-	In this mode TX processing upto L3 happens on the stack instance attached
+	In this mode TX processing up to L3 happens on the stack instance attached
 to the slave device and packets are switched to the stack instance of the
 master device for the L2 processing and routing from that instance will be
 used before packets are queued on the outbound device. In this mode the slaves
@@ -56,7 +56,7 @@
 	(a) The Linux host that is connected to the external switch / router has
 policy configured that allows only one mac per port.
 	(b) No of virtual devices created on a master exceed the mac capacity and
-puts the NIC in promiscous mode and degraded performance is a concern.
+puts the NIC in promiscuous mode and degraded performance is a concern.
 	(c) If the slave device is to be put into the hostile / untrusted network
 namespace where L2 on the slave could be changed / misused.
 
diff --git a/Documentation/networking/mac80211-injection.txt b/Documentation/networking/mac80211-injection.txt
index ec8f934..d58d78d 100644
--- a/Documentation/networking/mac80211-injection.txt
+++ b/Documentation/networking/mac80211-injection.txt
@@ -37,14 +37,27 @@
    HT rate for the transmission (only for devices without own rate control).
    Also some flags are parsed
 
-   IEEE80211_TX_RC_SHORT_GI: use short guard interval
-   IEEE80211_TX_RC_40_MHZ_WIDTH: send in HT40 mode
+   IEEE80211_RADIOTAP_MCS_SGI: use short guard interval
+   IEEE80211_RADIOTAP_MCS_BW_40: send in HT40 mode
 
  * IEEE80211_RADIOTAP_DATA_RETRIES
 
    number of retries when either IEEE80211_RADIOTAP_RATE or
    IEEE80211_RADIOTAP_MCS was used
 
+ * IEEE80211_RADIOTAP_VHT
+
+   VHT mcs and number of streams used in the transmission (only for devices
+   without own rate control). Also other fields are parsed
+
+   flags field
+   IEEE80211_RADIOTAP_VHT_FLAG_SGI: use short guard interval
+
+   bandwidth field
+   1: send using 40MHz channel width
+   4: send using 80MHz channel width
+   11: send using 160MHz channel width
+
 The injection code can also skip all other currently defined radiotap fields
 facilitating replay of captured radiotap headers directly.
 
diff --git a/Documentation/networking/netdev-features.txt b/Documentation/networking/netdev-features.txt
index f310ede..7413eb0 100644
--- a/Documentation/networking/netdev-features.txt
+++ b/Documentation/networking/netdev-features.txt
@@ -131,13 +131,11 @@
 
  * LLTX driver (deprecated for hardware drivers)
 
-NETIF_F_LLTX should be set in drivers that implement their own locking in
-transmit path or don't need locking at all (e.g. software tunnels).
-In ndo_start_xmit, it is recommended to use a try_lock and return
-NETDEV_TX_LOCKED when the spin lock fails.  The locking should also properly
-protect against other callbacks (the rules you need to find out).
+NETIF_F_LLTX is meant to be used by drivers that don't need locking at all,
+e.g. software tunnels.
 
-Don't use it for new drivers.
+This is also used in a few legacy drivers that implement their
+own locking, don't use it for new (hardware) drivers.
 
  * netns-local device
 
diff --git a/Documentation/networking/netdevices.txt b/Documentation/networking/netdevices.txt
index 0b1cf6b..7fec206 100644
--- a/Documentation/networking/netdevices.txt
+++ b/Documentation/networking/netdevices.txt
@@ -69,10 +69,9 @@
 
 	When the driver sets NETIF_F_LLTX in dev->features this will be
 	called without holding netif_tx_lock. In this case the driver
-	has to lock by itself when needed. It is recommended to use a try lock
-	for this and return NETDEV_TX_LOCKED when the spin lock fails.
-	The locking there should also properly protect against 
-	set_rx_mode. Note that the use of NETIF_F_LLTX is deprecated.
+	has to lock by itself when needed.
+	The locking there should also properly protect against
+	set_rx_mode. WARNING: use of NETIF_F_LLTX is deprecated.
 	Don't use it for new drivers.
 
 	Context: Process with BHs disabled or BH (timer),
@@ -83,8 +82,6 @@
 	o NETDEV_TX_BUSY Cannot transmit packet, try later 
 	  Usually a bug, means queue start/stop flow control is broken in
 	  the driver. Note: the driver must NOT put the skb in its DMA ring.
-	o NETDEV_TX_LOCKED Locking failed, please retry quickly.
-	  Only valid when NETIF_F_LLTX is set.
 
 ndo_tx_timeout:
 	Synchronization: netif_tx_lock spinlock; all TX queues frozen.
diff --git a/Documentation/networking/pktgen.txt b/Documentation/networking/pktgen.txt
index f4be85e..2c4e335 100644
--- a/Documentation/networking/pktgen.txt
+++ b/Documentation/networking/pktgen.txt
@@ -67,12 +67,12 @@
  * add_device DEVICE@NAME -- adds a single device
  * rem_device_all         -- remove all associated devices
 
-When adding a device to a thread, a corrosponding procfile is created
+When adding a device to a thread, a corresponding procfile is created
 which is used for configuring this device. Thus, device names need to
 be unique.
 
 To support adding the same device to multiple threads, which is useful
-with multi queue NICs, a the device naming scheme is extended with "@":
+with multi queue NICs, the device naming scheme is extended with "@":
  device@something
 
 The part after "@" can be anything, but it is custom to use the thread
@@ -221,7 +221,7 @@
 
 A collection of tutorial scripts and helpers for pktgen is in the
 samples/pktgen directory. The helper parameters.sh file support easy
-and consistant parameter parsing across the sample scripts.
+and consistent parameter parsing across the sample scripts.
 
 Usage example and help:
  ./pktgen_sample01_simple.sh -i eth4 -m 00:1B:21:3C:9D:F8 -d 192.168.8.2
diff --git a/Documentation/networking/segmentation-offloads.txt b/Documentation/networking/segmentation-offloads.txt
new file mode 100644
index 0000000..f200467
--- /dev/null
+++ b/Documentation/networking/segmentation-offloads.txt
@@ -0,0 +1,130 @@
+Segmentation Offloads in the Linux Networking Stack
+
+Introduction
+============
+
+This document describes a set of techniques in the Linux networking stack
+to take advantage of segmentation offload capabilities of various NICs.
+
+The following technologies are described:
+ * TCP Segmentation Offload - TSO
+ * UDP Fragmentation Offload - UFO
+ * IPIP, SIT, GRE, and UDP Tunnel Offloads
+ * Generic Segmentation Offload - GSO
+ * Generic Receive Offload - GRO
+ * Partial Generic Segmentation Offload - GSO_PARTIAL
+
+TCP Segmentation Offload
+========================
+
+TCP segmentation allows a device to segment a single frame into multiple
+frames with a data payload size specified in skb_shinfo()->gso_size.
+When TCP segmentation requested the bit for either SKB_GSO_TCP or
+SKB_GSO_TCP6 should be set in skb_shinfo()->gso_type and
+skb_shinfo()->gso_size should be set to a non-zero value.
+
+TCP segmentation is dependent on support for the use of partial checksum
+offload.  For this reason TSO is normally disabled if the Tx checksum
+offload for a given device is disabled.
+
+In order to support TCP segmentation offload it is necessary to populate
+the network and transport header offsets of the skbuff so that the device
+drivers will be able determine the offsets of the IP or IPv6 header and the
+TCP header.  In addition as CHECKSUM_PARTIAL is required csum_start should
+also point to the TCP header of the packet.
+
+For IPv4 segmentation we support one of two types in terms of the IP ID.
+The default behavior is to increment the IP ID with every segment.  If the
+GSO type SKB_GSO_TCP_FIXEDID is specified then we will not increment the IP
+ID and all segments will use the same IP ID.  If a device has
+NETIF_F_TSO_MANGLEID set then the IP ID can be ignored when performing TSO
+and we will either increment the IP ID for all frames, or leave it at a
+static value based on driver preference.
+
+UDP Fragmentation Offload
+=========================
+
+UDP fragmentation offload allows a device to fragment an oversized UDP
+datagram into multiple IPv4 fragments.  Many of the requirements for UDP
+fragmentation offload are the same as TSO.  However the IPv4 ID for
+fragments should not increment as a single IPv4 datagram is fragmented.
+
+IPIP, SIT, GRE, UDP Tunnel, and Remote Checksum Offloads
+========================================================
+
+In addition to the offloads described above it is possible for a frame to
+contain additional headers such as an outer tunnel.  In order to account
+for such instances an additional set of segmentation offload types were
+introduced including SKB_GSO_IPIP, SKB_GSO_SIT, SKB_GSO_GRE, and
+SKB_GSO_UDP_TUNNEL.  These extra segmentation types are used to identify
+cases where there are more than just 1 set of headers.  For example in the
+case of IPIP and SIT we should have the network and transport headers moved
+from the standard list of headers to "inner" header offsets.
+
+Currently only two levels of headers are supported.  The convention is to
+refer to the tunnel headers as the outer headers, while the encapsulated
+data is normally referred to as the inner headers.  Below is the list of
+calls to access the given headers:
+
+IPIP/SIT Tunnel:
+		Outer			Inner
+MAC		skb_mac_header
+Network		skb_network_header	skb_inner_network_header
+Transport	skb_transport_header
+
+UDP/GRE Tunnel:
+		Outer			Inner
+MAC		skb_mac_header		skb_inner_mac_header
+Network		skb_network_header	skb_inner_network_header
+Transport	skb_transport_header	skb_inner_transport_header
+
+In addition to the above tunnel types there are also SKB_GSO_GRE_CSUM and
+SKB_GSO_UDP_TUNNEL_CSUM.  These two additional tunnel types reflect the
+fact that the outer header also requests to have a non-zero checksum
+included in the outer header.
+
+Finally there is SKB_GSO_REMCSUM which indicates that a given tunnel header
+has requested a remote checksum offload.  In this case the inner headers
+will be left with a partial checksum and only the outer header checksum
+will be computed.
+
+Generic Segmentation Offload
+============================
+
+Generic segmentation offload is a pure software offload that is meant to
+deal with cases where device drivers cannot perform the offloads described
+above.  What occurs in GSO is that a given skbuff will have its data broken
+out over multiple skbuffs that have been resized to match the MSS provided
+via skb_shinfo()->gso_size.
+
+Before enabling any hardware segmentation offload a corresponding software
+offload is required in GSO.  Otherwise it becomes possible for a frame to
+be re-routed between devices and end up being unable to be transmitted.
+
+Generic Receive Offload
+=======================
+
+Generic receive offload is the complement to GSO.  Ideally any frame
+assembled by GRO should be segmented to create an identical sequence of
+frames using GSO, and any sequence of frames segmented by GSO should be
+able to be reassembled back to the original by GRO.  The only exception to
+this is IPv4 ID in the case that the DF bit is set for a given IP header.
+If the value of the IPv4 ID is not sequentially incrementing it will be
+altered so that it is when a frame assembled via GRO is segmented via GSO.
+
+Partial Generic Segmentation Offload
+====================================
+
+Partial generic segmentation offload is a hybrid between TSO and GSO.  What
+it effectively does is take advantage of certain traits of TCP and tunnels
+so that instead of having to rewrite the packet headers for each segment
+only the inner-most transport header and possibly the outer-most network
+header need to be updated.  This allows devices that do not support tunnel
+offloads or tunnel offloads with checksum to still make use of segmentation.
+
+With the partial offload what occurs is that all headers excluding the
+inner transport header are updated such that they will contain the correct
+values for if the header was simply duplicated.  The one exception to this
+is the outer IPv4 ID field.  It is up to the device drivers to guarantee
+that the IPv4 ID field is incremented in the case that a given header does
+not have the DF bit set.
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index d64a147..671fe3d 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -1,6 +1,6 @@
        STMicroelectronics 10/100/1000 Synopsys Ethernet driver
 
-Copyright (C) 2007-2014  STMicroelectronics Ltd
+Copyright (C) 2007-2015  STMicroelectronics Ltd
 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 
 This is the driver for the MAC 10/100/1000 on-chip Ethernet controllers
@@ -138,6 +138,8 @@
 	int (*init)(struct platform_device *pdev, void *priv);
 	void (*exit)(struct platform_device *pdev, void *priv);
 	void *bsp_priv;
+	int has_gmac4;
+	bool tso_en;
 };
 
 Where:
@@ -181,6 +183,8 @@
 	     registers.  init/exit callbacks should not use or modify
 	     platform data.
  o bsp_priv: another private pointer.
+ o has_gmac4: uses GMAC4 core.
+ o tso_en: Enables TSO (TCP Segmentation Offload) feature.
 
 For MDIO bus The we have:
 
@@ -278,6 +282,13 @@
  o stmmac_ethtool.c: to implement the ethtool support;
  o stmmac.h: private driver structure;
  o common.h: common definitions and VFTs;
+ o mmc_core.c/mmc.h: Management MAC Counters;
+ o stmmac_hwtstamp.c: HW timestamp support for PTP;
+ o stmmac_ptp.c: PTP 1588 clock;
+ o dwmac-<XXX>.c: these are for the platform glue-logic file; e.g. dwmac-sti.c
+   for STMicroelectronics SoCs.
+
+- GMAC 3.x
  o descs.h: descriptor structure definitions;
  o dwmac1000_core.c: dwmac GiGa core functions;
  o dwmac1000_dma.c: dma functions for the GMAC chip;
@@ -289,11 +300,32 @@
  o enh_desc.c: functions for handling enhanced descriptors;
  o norm_desc.c: functions for handling normal descriptors;
  o chain_mode.c/ring_mode.c:: functions to manage RING/CHAINED modes;
- o mmc_core.c/mmc.h: Management MAC Counters;
- o stmmac_hwtstamp.c: HW timestamp support for PTP;
- o stmmac_ptp.c: PTP 1588 clock;
- o dwmac-<XXX>.c: these are for the platform glue-logic file; e.g. dwmac-sti.c
-   for STMicroelectronics SoCs.
+
+- GMAC4.x generation
+ o dwmac4_core.c: dwmac GMAC4.x core functions;
+ o dwmac4_desc.c: functions for handling GMAC4.x descriptors;
+ o dwmac4_descs.h: descriptor definitions;
+ o dwmac4_dma.c: dma functions for the GMAC4.x chip;
+ o dwmac4_dma.h: dma definitions for the GMAC4.x chip;
+ o dwmac4.h: core definitions for the GMAC4.x chip;
+ o dwmac4_lib.c: generic GMAC4.x functions;
+
+4.12) TSO support (GMAC4.x)
+
+TSO (Tcp Segmentation Offload) feature is supported by GMAC 4.x chip family.
+When a packet is sent through TCP protocol, the TCP stack ensures that
+the SKB provided to the low level driver (stmmac in our case) matches with
+the maximum frame len (IP header + TCP header + payload <= 1500 bytes (for
+MTU set to 1500)). It means that if an application using TCP want to send a
+packet which will have a length (after adding headers) > 1514 the packet
+will be split in several TCP packets: The data payload is split and headers
+(TCP/IP ..) are added. It is done by software.
+
+When TSO is enabled, the TCP stack doesn't care about the maximum frame
+length and provide SKB packet to stmmac as it is. The GMAC IP will have to
+perform the segmentation by it self to match with maximum frame length.
+
+This feature can be enabled in device tree through "snps,tso" entry.
 
 5) Debug Information
 
diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt
index fad6313..31c3911 100644
--- a/Documentation/networking/switchdev.txt
+++ b/Documentation/networking/switchdev.txt
@@ -89,6 +89,18 @@
 is loaded with a different driver, such as a NIC driver, on the management port
 device.
 
+Switch ID
+^^^^^^^^^
+
+The switchdev driver must implement the switchdev op switchdev_port_attr_get
+for SWITCHDEV_ATTR_ID_PORT_PARENT_ID for each port netdev, returning the same
+physical ID for each port of a switch.  The ID must be unique between switches
+on the same system.  The ID does not need to be unique between switches on
+different systems.
+
+The switch ID is used to locate ports on a switch and to know if aggregated
+ports belong to the same switch.
+
 Port Netdev Naming
 ^^^^^^^^^^^^^^^^^^
 
@@ -104,25 +116,13 @@
 into 4 10G ports, resulting in 4 port netdevs, the device can give a unique
 name for each port using port PHYS name.  The udev rule would be:
 
-SUBSYSTEM=="net", ACTION=="add", DRIVER="<driver>", ATTR{phys_port_name}!="", \
-	NAME="$attr{phys_port_name}"
+SUBSYSTEM=="net", ACTION=="add", ATTR{phys_switch_id}=="<phys_switch_id>", \
+	ATTR{phys_port_name}!="", NAME="swX$attr{phys_port_name}"
 
 Suggested naming convention is "swXpYsZ", where X is the switch name or ID, Y
 is the port name or ID, and Z is the sub-port name or ID.  For example, sw1p1s0
 would be sub-port 0 on port 1 on switch 1.
 
-Switch ID
-^^^^^^^^^
-
-The switchdev driver must implement the switchdev op switchdev_port_attr_get
-for SWITCHDEV_ATTR_ID_PORT_PARENT_ID for each port netdev, returning the same
-physical ID for each port of a switch.  The ID must be unique between switches
-on the same system.  The ID does not need to be unique between switches on
-different systems.
-
-The switch ID is used to locate ports on a switch and to know if aggregated
-ports belong to the same switch.
-
 Port Features
 ^^^^^^^^^^^^^
 
@@ -386,7 +386,7 @@
 memory allocation, etc. The goal is to handle the stuff that is not unlikely
 to fail here. The second phase is to "commit" the actual changes.
 
-Switchdev provides an inftrastructure for sharing items (for example memory
+Switchdev provides an infrastructure for sharing items (for example memory
 allocations) between the two phases.
 
 The object created by a driver in "prepare" phase and it is queued up by:
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt
index a977339..671cccf 100644
--- a/Documentation/networking/timestamping.txt
+++ b/Documentation/networking/timestamping.txt
@@ -44,11 +44,17 @@
 Supports multiple types of timestamp requests. As a result, this
 socket option takes a bitmap of flags, not a boolean. In
 
-  err = setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING, (void *) val, &val);
+  err = setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING, (void *) val,
+                   sizeof(val));
 
 val is an integer with any of the following bits set. Setting other
 bit returns EINVAL and does not change the current state.
 
+The socket option configures timestamp generation for individual
+sk_buffs (1.3.1), timestamp reporting to the socket's error
+queue (1.3.2) and options (1.3.3). Timestamp generation can also
+be enabled for individual sendmsg calls using cmsg (1.3.4).
+
 
 1.3.1 Timestamp Generation
 
@@ -71,13 +77,16 @@
   kernel receive stack.
 
 SOF_TIMESTAMPING_TX_HARDWARE:
-  Request tx timestamps generated by the network adapter.
+  Request tx timestamps generated by the network adapter. This flag
+  can be enabled via both socket options and control messages.
 
 SOF_TIMESTAMPING_TX_SOFTWARE:
   Request tx timestamps when data leaves the kernel. These timestamps
   are generated in the device driver as close as possible, but always
   prior to, passing the packet to the network interface. Hence, they
   require driver support and may not be available for all devices.
+  This flag can be enabled via both socket options and control messages.
+
 
 SOF_TIMESTAMPING_TX_SCHED:
   Request tx timestamps prior to entering the packet scheduler. Kernel
@@ -90,7 +99,8 @@
   machines with virtual devices where a transmitted packet travels
   through multiple devices and, hence, multiple packet schedulers,
   a timestamp is generated at each layer. This allows for fine
-  grained measurement of queuing delay.
+  grained measurement of queuing delay. This flag can be enabled
+  via both socket options and control messages.
 
 SOF_TIMESTAMPING_TX_ACK:
   Request tx timestamps when all data in the send buffer has been
@@ -99,6 +109,7 @@
   over-report measurement, because the timestamp is generated when all
   data up to and including the buffer at send() was acknowledged: the
   cumulative acknowledgment. The mechanism ignores SACK and FACK.
+  This flag can be enabled via both socket options and control messages.
 
 
 1.3.2 Timestamp Reporting
@@ -183,6 +194,37 @@
 combined with SOF_TIMESTAMPING_OPT_TSONLY.
 
 
+1.3.4. Enabling timestamps via control messages
+
+In addition to socket options, timestamp generation can be requested
+per write via cmsg, only for SOF_TIMESTAMPING_TX_* (see Section 1.3.1).
+Using this feature, applications can sample timestamps per sendmsg()
+without paying the overhead of enabling and disabling timestamps via
+setsockopt:
+
+  struct msghdr *msg;
+  ...
+  cmsg			       = CMSG_FIRSTHDR(msg);
+  cmsg->cmsg_level	       = SOL_SOCKET;
+  cmsg->cmsg_type	       = SO_TIMESTAMPING;
+  cmsg->cmsg_len	       = CMSG_LEN(sizeof(__u32));
+  *((__u32 *) CMSG_DATA(cmsg)) = SOF_TIMESTAMPING_TX_SCHED |
+				 SOF_TIMESTAMPING_TX_SOFTWARE |
+				 SOF_TIMESTAMPING_TX_ACK;
+  err = sendmsg(fd, msg, 0);
+
+The SOF_TIMESTAMPING_TX_* flags set via cmsg will override
+the SOF_TIMESTAMPING_TX_* flags set via setsockopt.
+
+Moreover, applications must still enable timestamp reporting via
+setsockopt to receive timestamps:
+
+  __u32 val = SOF_TIMESTAMPING_SOFTWARE |
+	      SOF_TIMESTAMPING_OPT_ID /* or any other flag */;
+  err = setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING, (void *) val,
+                   sizeof(val));
+
+
 1.4 Bytestream Timestamps
 
 The SO_TIMESTAMPING interface supports timestamping of bytes in a
diff --git a/Documentation/networking/vrf.txt b/Documentation/networking/vrf.txt
index d52aa10..5da679c 100644
--- a/Documentation/networking/vrf.txt
+++ b/Documentation/networking/vrf.txt
@@ -41,7 +41,7 @@
 the VRF device. Similarly on egress routing rules are used to send packets
 to the VRF device driver before getting sent out the actual interface. This
 allows tcpdump on a VRF device to capture all packets into and out of the
-VRF as a whole.[1] Similiarly, netfilter [2] and tc rules can be applied
+VRF as a whole.[1] Similarly, netfilter [2] and tc rules can be applied
 using the VRF device to specify rules that apply to the VRF domain as a whole.
 
 [1] Packets in the forwarded state do not flow through the device, so those
diff --git a/Documentation/networking/xfrm_sync.txt b/Documentation/networking/xfrm_sync.txt
index d7aac9d..8d88e0f 100644
--- a/Documentation/networking/xfrm_sync.txt
+++ b/Documentation/networking/xfrm_sync.txt
@@ -4,7 +4,7 @@
 from Jamal <hadi@cyberus.ca>.
 
 The end goal for syncing is to be able to insert attributes + generate
-events so that the an SA can be safely moved from one machine to another
+events so that the SA can be safely moved from one machine to another
 for HA purposes.
 The idea is to synchronize the SA so that the takeover machine can do
 the processing of the SA as accurate as possible if it has access to it.
@@ -13,7 +13,7 @@
 These patches add ability to sync and have accurate lifetime byte (to
 ensure proper decay of SAs) and replay counters to avoid replay attacks
 with as minimal loss at failover time.
-This way a backup stays as closely uptodate as an active member.
+This way a backup stays as closely up-to-date as an active member.
 
 Because the above items change for every packet the SA receives,
 it is possible for a lot of the events to be generated.
@@ -163,7 +163,7 @@
 there is a period where the timer threshold expires with no packets
 seen, then an odd behavior is seen as follows:
 The first packet arrival after a timer expiry will trigger a timeout
-aevent; i.e we dont wait for a timeout period or a packet threshold
+event; i.e we don't wait for a timeout period or a packet threshold
 to be reached. This is done for simplicity and efficiency reasons.
 
 -JHS
diff --git a/Documentation/phy.txt b/Documentation/phy.txt
index b388c5a..0aa994b 100644
--- a/Documentation/phy.txt
+++ b/Documentation/phy.txt
@@ -31,16 +31,28 @@
 dt boot case.
 
 #define of_phy_provider_register(dev, xlate)    \
-        __of_phy_provider_register((dev), THIS_MODULE, (xlate))
+        __of_phy_provider_register((dev), NULL, THIS_MODULE, (xlate))
 
 #define devm_of_phy_provider_register(dev, xlate)       \
-        __devm_of_phy_provider_register((dev), THIS_MODULE, (xlate))
+        __devm_of_phy_provider_register((dev), NULL, THIS_MODULE, (xlate))
 
 of_phy_provider_register and devm_of_phy_provider_register macros can be used to
 register the phy_provider and it takes device and of_xlate as
 arguments. For the dt boot case, all PHY providers should use one of the above
 2 macros to register the PHY provider.
 
+Often the device tree nodes associated with a PHY provider will contain a set
+of children that each represent a single PHY. Some bindings may nest the child
+nodes within extra levels for context and extensibility, in which case the low
+level of_phy_provider_register_full() and devm_of_phy_provider_register_full()
+macros can be used to override the node containing the children.
+
+#define of_phy_provider_register_full(dev, children, xlate) \
+	__of_phy_provider_register(dev, children, THIS_MODULE, xlate)
+
+#define devm_of_phy_provider_register_full(dev, children, xlate) \
+	__devm_of_phy_provider_register_full(dev, children, THIS_MODULE, xlate)
+
 void devm_of_phy_provider_unregister(struct device *dev,
 	struct phy_provider *phy_provider);
 void of_phy_provider_unregister(struct phy_provider *phy_provider);
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 7328cf8..1fd1fbe 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -586,6 +586,10 @@
 but also it allows of more flexibility in the handling of devices during the
 removal of their drivers.
 
+Drivers in ->remove() callback should undo the runtime PM changes done
+in ->probe(). Usually this means calling pm_runtime_disable(),
+pm_runtime_dont_use_autosuspend() etc.
+
 The user space can effectively disallow the driver of the device to power manage
 it at run time by changing the value of its /sys/devices/.../power/control
 attribute to "on", which causes pm_runtime_forbid() to be called.  In principle,
diff --git a/Documentation/rpmsg.txt b/Documentation/rpmsg.txt
index f7edc3a..a95e36a 100644
--- a/Documentation/rpmsg.txt
+++ b/Documentation/rpmsg.txt
@@ -249,24 +249,12 @@
 
 static struct rpmsg_driver rpmsg_sample_client = {
 	.drv.name	= KBUILD_MODNAME,
-	.drv.owner	= THIS_MODULE,
 	.id_table	= rpmsg_driver_sample_id_table,
 	.probe		= rpmsg_sample_probe,
 	.callback	= rpmsg_sample_cb,
 	.remove		= rpmsg_sample_remove,
 };
-
-static int __init init(void)
-{
-	return register_rpmsg_driver(&rpmsg_sample_client);
-}
-module_init(init);
-
-static void __exit fini(void)
-{
-	unregister_rpmsg_driver(&rpmsg_sample_client);
-}
-module_exit(fini);
+module_rpmsg_driver(rpmsg_sample_client);
 
 Note: a similar sample which can be built and loaded can be found
 in samples/rpmsg/.
diff --git a/Documentation/scsi/g_NCR5380.txt b/Documentation/scsi/g_NCR5380.txt
index 3b80f56..fd88015 100644
--- a/Documentation/scsi/g_NCR5380.txt
+++ b/Documentation/scsi/g_NCR5380.txt
@@ -23,11 +23,10 @@
 
 If the default configuration does not work for you, you can use the kernel
 command lines (eg using the lilo append command):
-	ncr5380=port,irq,dma
-	ncr53c400=port,irq
-or
-	ncr5380=base,irq,dma
-	ncr53c400=base,irq
+	ncr5380=addr,irq
+	ncr53c400=addr,irq
+	ncr53c400a=addr,irq
+	dtc3181e=addr,irq
 
 The driver does not probe for any addresses or ports other than those in
 the OVERRIDE or given to the kernel as above.
@@ -36,19 +35,17 @@
 /proc/scsi/g_NCR5380/x where x is the scsi card number as detected at boot
 time. More info to come in the future.
 
-When NCR53c400 support is compiled in, BIOS parameters will be returned by
-the driver (the raw 5380 driver does not and I don't plan to fiddle with
-it!).
-
 This driver works as a module.
 When included as a module, parameters can be passed on the insmod/modprobe
 command line:
   ncr_irq=xx   the interrupt
   ncr_addr=xx  the port or base address (for port or memory
                mapped, resp.)
-  ncr_dma=xx   the DMA
   ncr_5380=1   to set up for a NCR5380 board
   ncr_53c400=1 to set up for a NCR53C400 board
+  ncr_53c400a=1 to set up for a NCR53C400A board
+  dtc_3181e=1  to set up for a Domex Technology Corp 3181E board
+  hp_c2502=1   to set up for a Hewlett Packard C2502 board
 e.g.
 modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_5380=1
   for a port mapped NCR5380 board or
diff --git a/Documentation/scsi/scsi-parameters.txt b/Documentation/scsi/scsi-parameters.txt
index 2bfd6f6..1241ac1 100644
--- a/Documentation/scsi/scsi-parameters.txt
+++ b/Documentation/scsi/scsi-parameters.txt
@@ -27,13 +27,15 @@
 	aic79xx=	[HW,SCSI]
 			See Documentation/scsi/aic79xx.txt.
 
-	atascsi=	[HW,SCSI] Atari SCSI
+	atascsi=	[HW,SCSI]
+			See drivers/scsi/atari_scsi.c.
 
 	BusLogic=	[HW,SCSI]
 			See drivers/scsi/BusLogic.c, comment before function
 			BusLogic_ParseDriverOptions().
 
 	dtc3181e=	[HW,SCSI]
+			See Documentation/scsi/g_NCR5380.txt.
 
 	eata=		[HW,SCSI]
 
@@ -51,8 +53,8 @@
 	ips=		[HW,SCSI] Adaptec / IBM ServeRAID controller
 			See header of drivers/scsi/ips.c.
 
-	mac5380=	[HW,SCSI] Format:
-			<can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags>
+	mac5380=	[HW,SCSI]
+			See drivers/scsi/mac_scsi.c.
 
 	max_luns=	[SCSI] Maximum number of LUNs to probe.
 			Should be between 1 and 2^32-1.
@@ -65,10 +67,13 @@
 			See header of drivers/scsi/NCR_D700.c.
 
 	ncr5380=	[HW,SCSI]
+			See Documentation/scsi/g_NCR5380.txt.
 
 	ncr53c400=	[HW,SCSI]
+			See Documentation/scsi/g_NCR5380.txt.
 
 	ncr53c400a=	[HW,SCSI]
+			See Documentation/scsi/g_NCR5380.txt.
 
 	ncr53c406a=	[HW,SCSI]
 
diff --git a/Documentation/security/LoadPin.txt b/Documentation/security/LoadPin.txt
new file mode 100644
index 0000000..e11877f
--- /dev/null
+++ b/Documentation/security/LoadPin.txt
@@ -0,0 +1,17 @@
+LoadPin is a Linux Security Module that ensures all kernel-loaded files
+(modules, firmware, etc) all originate from the same filesystem, with
+the expectation that such a filesystem is backed by a read-only device
+such as dm-verity or CDROM. This allows systems that have a verified
+and/or unchangeable filesystem to enforce module and firmware loading
+restrictions without needing to sign the files individually.
+
+The LSM is selectable at build-time with CONFIG_SECURITY_LOADPIN, and
+can be controlled at boot-time with the kernel command line option
+"loadpin.enabled". By default, it is enabled, but can be disabled at
+boot ("loadpin.enabled=0").
+
+LoadPin starts pinning when it sees the first file loaded. If the
+block device backing the filesystem is not read-only, a sysctl is
+created to toggle pinning: /proc/sys/kernel/loadpin/enabled. (Having
+a mutable filesystem means pinning is mutable too, but having the
+sysctl allows for easy testing on systems with a mutable filesystem.)
diff --git a/Documentation/security/keys.txt b/Documentation/security/keys.txt
index 8c18387..20d0571 100644
--- a/Documentation/security/keys.txt
+++ b/Documentation/security/keys.txt
@@ -823,6 +823,36 @@
      A process must have search permission on the key for this function to be
      successful.
 
+ (*) Compute a Diffie-Hellman shared secret or public key
+
+       long keyctl(KEYCTL_DH_COMPUTE, struct keyctl_dh_params *params,
+		   char *buffer, size_t buflen);
+
+     The params struct contains serial numbers for three keys:
+
+	 - The prime, p, known to both parties
+	 - The local private key
+	 - The base integer, which is either a shared generator or the
+	   remote public key
+
+     The value computed is:
+
+	result = base ^ private (mod prime)
+
+     If the base is the shared generator, the result is the local
+     public key.  If the base is the remote public key, the result is
+     the shared secret.
+
+     The buffer length must be at least the length of the prime, or zero.
+
+     If the buffer length is nonzero, the length of the result is
+     returned when it is successfully calculated and copied in to the
+     buffer. When the buffer length is zero, the minimum required
+     buffer length is returned.
+
+     This function will return error EOPNOTSUPP if the key type is not
+     supported, error ENOKEY if the key could not be found, or error
+     EACCES if the key is not readable by the caller.
 
 ===============
 KERNEL SERVICES
@@ -999,6 +1029,10 @@
 	struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
 				  const struct cred *cred,
 				  key_perm_t perm,
+				  int (*restrict_link)(struct key *,
+						       const struct key_type *,
+						       unsigned long,
+						       const union key_payload *),
 				  unsigned long flags,
 				  struct key *dest);
 
@@ -1010,6 +1044,24 @@
     KEY_ALLOC_NOT_IN_QUOTA in flags if the keyring shouldn't be accounted
     towards the user's quota).  Error ENOMEM can also be returned.
 
+    If restrict_link not NULL, it should point to a function that will be
+    called each time an attempt is made to link a key into the new keyring.
+    This function is called to check whether a key may be added into the keying
+    or not.  Callers of key_create_or_update() within the kernel can pass
+    KEY_ALLOC_BYPASS_RESTRICTION to suppress the check.  An example of using
+    this is to manage rings of cryptographic keys that are set up when the
+    kernel boots where userspace is also permitted to add keys - provided they
+    can be verified by a key the kernel already has.
+
+    When called, the restriction function will be passed the keyring being
+    added to, the key flags value and the type and payload of the key being
+    added.  Note that when a new key is being created, this is called between
+    payload preparsing and actual key creation.  The function should return 0
+    to allow the link or an error to reject it.
+
+    A convenience function, restrict_link_reject, exists to always return
+    -EPERM to in this case.
+
 
 (*) To check the validity of a key, this function can be called:
 
diff --git a/Documentation/sound/alsa/HD-Audio.txt b/Documentation/sound/alsa/HD-Audio.txt
index e7193aa..d4510eb 100644
--- a/Documentation/sound/alsa/HD-Audio.txt
+++ b/Documentation/sound/alsa/HD-Audio.txt
@@ -655,17 +655,6 @@
 and next kernels are found in for-linus and for-next branches,
 respectively.
 
-If you are using the latest Linus tree, it'd be better to pull the
-above GIT tree onto it.  If you are using the older kernels, an easy
-way to try the latest ALSA code is to build from the snapshot
-tarball.  There are daily tarballs and the latest snapshot tarball.
-All can be built just like normal alsa-driver release packages, that
-is, installed via the usual spells: configure, make and make
-install(-modules).  See INSTALL in the package.  The snapshot tarballs
-are found at:
-
-- ftp://ftp.suse.com/pub/people/tiwai/snapshot/
-
 
 Sending a Bug Report
 ~~~~~~~~~~~~~~~~~~~~
@@ -699,7 +688,12 @@
 alsa-info
 ~~~~~~~~~
 The script `alsa-info.sh` is a very useful tool to gather the audio
-device information.  You can fetch the latest version from:
+device information.  It's included in alsa-utils package.  The latest
+version can be found on git repository:
+
+- git://git.alsa-project.org/alsa-utils.git
+
+The script can be fetched directly from the following URL, too:
 
 - http://www.alsa-project.org/alsa-info.sh
 
@@ -836,15 +830,11 @@
 (mixer) elements, set/get the control element value, simulate the PCM
 operation, the jack plugging simulation, etc.
 
-The package is found in:
-
-- ftp://ftp.suse.com/pub/people/tiwai/misc/
-
-A git repository is available:
+The program is found in the git repository below:
 
 - git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/hda-emu.git
 
-See README file in the tarball for more details about hda-emu
+See README file in the repository for more details about hda-emu
 program.
 
 
diff --git a/Documentation/sound/alsa/compress_offload.txt b/Documentation/sound/alsa/compress_offload.txt
index 630c492..8ba556a 100644
--- a/Documentation/sound/alsa/compress_offload.txt
+++ b/Documentation/sound/alsa/compress_offload.txt
@@ -149,7 +149,7 @@
 ================
 When playing thru an album, the decoders have the ability to skip the encoder
 delay and padding and directly move from one track content to another. The end
-user can perceive this as gapless playback as we dont have silence while
+user can perceive this as gapless playback as we don't have silence while
 switching from one track to another
 
 Also, there might be low-intensity noises due to encoding. Perfect gapless is
@@ -184,7 +184,7 @@
 - Fill data of the first track
 - Trigger start
 - User-space finished sending all,
-- Indicaite next track data by sending set_next_track
+- Indicate next track data by sending set_next_track
 - Set metadata of the next track
 - then call partial_drain to flush most of buffer in DSP
 - Fill data of the next track
diff --git a/Documentation/sound/alsa/soc/dapm.txt b/Documentation/sound/alsa/soc/dapm.txt
index 6faab48..c45bd79 100644
--- a/Documentation/sound/alsa/soc/dapm.txt
+++ b/Documentation/sound/alsa/soc/dapm.txt
@@ -132,7 +132,7 @@
 SND_SOC_DAPM_MIXER("Output Mixer", WM8731_PWR, 4, 1, wm8731_output_mixer_controls,
 	ARRAY_SIZE(wm8731_output_mixer_controls)),
 
-If you dont want the mixer elements prefixed with the name of the mixer widget,
+If you don't want the mixer elements prefixed with the name of the mixer widget,
 you can use SND_SOC_DAPM_MIXER_NAMED_CTL instead. the parameters are the same
 as for SND_SOC_DAPM_MIXER.
 
diff --git a/Documentation/sound/alsa/soc/overview.txt b/Documentation/sound/alsa/soc/overview.txt
index ff88f52..f3f28b7 100644
--- a/Documentation/sound/alsa/soc/overview.txt
+++ b/Documentation/sound/alsa/soc/overview.txt
@@ -63,7 +63,7 @@
     and any audio DSP drivers for that platform.
 
   * Machine class driver: The machine driver class acts as the glue that
-    decribes and binds the other component drivers together to form an ALSA
+    describes and binds the other component drivers together to form an ALSA
     "sound card device". It handles any machine specific controls and
     machine level audio events (e.g. turning on an amp at start of playback).
 
diff --git a/Documentation/sound/alsa/timestamping.txt b/Documentation/sound/alsa/timestamping.txt
index 0b191a2..1b6473f 100644
--- a/Documentation/sound/alsa/timestamping.txt
+++ b/Documentation/sound/alsa/timestamping.txt
@@ -129,7 +129,7 @@
 interpolation of the results
 
 In some hardware-specific configuration, the system timestamp is
-latched by a low-level audio subsytem, and the information provided
+latched by a low-level audio subsystem, and the information provided
 back to the driver. Due to potential delays in the communication with
 the hardware, there is a risk of misalignment with the avail and delay
 information. To make sure applications are not confused, a
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 57653a4..daabdd7 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -60,6 +60,7 @@
 - panic_on_warn
 - perf_cpu_time_max_percent
 - perf_event_paranoid
+- perf_event_max_stack
 - pid_max
 - powersave-nap               [ PPC only ]
 - printk
@@ -645,7 +646,7 @@
 perf_event_paranoid:
 
 Controls use of the performance events system by unprivileged
-users (without CAP_SYS_ADMIN).  The default value is 1.
+users (without CAP_SYS_ADMIN).  The default value is 2.
 
  -1: Allow use of (almost) all events by all users
 >=0: Disallow raw tracepoint access by users without CAP_IOC_LOCK
@@ -654,6 +655,19 @@
 
 ==============================================================
 
+perf_event_max_stack:
+
+Controls maximum number of stack frames to copy for (attr.sample_type &
+PERF_SAMPLE_CALLCHAIN) configured events, for instance, when using
+'perf record -g' or 'perf trace --call-graph fp'.
+
+This can only be done when no events are in use that have callchains
+enabled, otherwise writing to this file will return -EBUSY.
+
+The default value is 127.
+
+==============================================================
+
 pid_max:
 
 PID allocation wrap value.  When the kernel's next PID value
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index 809ab6e..f0480f7 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -43,6 +43,17 @@
 	1 - enable the JIT
 	2 - enable the JIT and ask the compiler to emit traces on kernel log.
 
+bpf_jit_harden
+--------------
+
+This enables hardening for the Berkeley Packet Filter Just in Time compiler.
+Supported are eBPF JIT backends. Enabling hardening trades off performance,
+but can mitigate JIT spraying.
+Values :
+	0 - disable JIT hardening (default value)
+	1 - enable JIT hardening for unprivileged users only
+	2 - enable JIT hardening for all users
+
 dev_weight
 --------------
 
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index cb03684..34a5fec 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -581,15 +581,16 @@
 "Zone Order" orders the zonelists by zone type, then by node within each
 zone.  Specify "[Zz]one" for zone order.
 
-Specify "[Dd]efault" to request automatic configuration.  Autoconfiguration
-will select "node" order in following case.
-(1) if the DMA zone does not exist or
-(2) if the DMA zone comprises greater than 50% of the available memory or
-(3) if any node's DMA zone comprises greater than 70% of its local memory and
-    the amount of local memory is big enough.
+Specify "[Dd]efault" to request automatic configuration.
 
-Otherwise, "zone" order will be selected. Default order is recommended unless
-this is causing problems for your system/application.
+On 32-bit, the Normal zone needs to be preserved for allocations accessible
+by the kernel, so "zone" order will be selected.
+
+On 64-bit, devices that require DMA32/DMA are relatively rare, so "node"
+order will be selected.
+
+Default order is recommended unless this is causing problems for your
+system/application.
 
 ==============================================================
 
diff --git a/Documentation/trace/events.txt b/Documentation/trace/events.txt
index c010be8..08d74d7 100644
--- a/Documentation/trace/events.txt
+++ b/Documentation/trace/events.txt
@@ -512,3 +512,1558 @@
 
   Note that there can be only one traceon or traceoff trigger per
   triggering event.
+
+- hist
+
+  This command aggregates event hits into a hash table keyed on one or
+  more trace event format fields (or stacktrace) and a set of running
+  totals derived from one or more trace event format fields and/or
+  event counts (hitcount).
+
+  The format of a hist trigger is as follows:
+
+        hist:keys=<field1[,field2,...]>[:values=<field1[,field2,...]>]
+          [:sort=<field1[,field2,...]>][:size=#entries][:pause][:continue]
+          [:clear][:name=histname1] [if <filter>]
+
+  When a matching event is hit, an entry is added to a hash table
+  using the key(s) and value(s) named.  Keys and values correspond to
+  fields in the event's format description.  Values must correspond to
+  numeric fields - on an event hit, the value(s) will be added to a
+  sum kept for that field.  The special string 'hitcount' can be used
+  in place of an explicit value field - this is simply a count of
+  event hits.  If 'values' isn't specified, an implicit 'hitcount'
+  value will be automatically created and used as the only value.
+  Keys can be any field, or the special string 'stacktrace', which
+  will use the event's kernel stacktrace as the key.  The keywords
+  'keys' or 'key' can be used to specify keys, and the keywords
+  'values', 'vals', or 'val' can be used to specify values.  Compound
+  keys consisting of up to two fields can be specified by the 'keys'
+  keyword.  Hashing a compound key produces a unique entry in the
+  table for each unique combination of component keys, and can be
+  useful for providing more fine-grained summaries of event data.
+  Additionally, sort keys consisting of up to two fields can be
+  specified by the 'sort' keyword.  If more than one field is
+  specified, the result will be a 'sort within a sort': the first key
+  is taken to be the primary sort key and the second the secondary
+  key.  If a hist trigger is given a name using the 'name' parameter,
+  its histogram data will be shared with other triggers of the same
+  name, and trigger hits will update this common data.  Only triggers
+  with 'compatible' fields can be combined in this way; triggers are
+  'compatible' if the fields named in the trigger share the same
+  number and type of fields and those fields also have the same names.
+  Note that any two events always share the compatible 'hitcount' and
+  'stacktrace' fields and can therefore be combined using those
+  fields, however pointless that may be.
+
+  'hist' triggers add a 'hist' file to each event's subdirectory.
+  Reading the 'hist' file for the event will dump the hash table in
+  its entirety to stdout.  If there are multiple hist triggers
+  attached to an event, there will be a table for each trigger in the
+  output.  The table displayed for a named trigger will be the same as
+  any other instance having the same name. Each printed hash table
+  entry is a simple list of the keys and values comprising the entry;
+  keys are printed first and are delineated by curly braces, and are
+  followed by the set of value fields for the entry.  By default,
+  numeric fields are displayed as base-10 integers.  This can be
+  modified by appending any of the following modifiers to the field
+  name:
+
+        .hex        display a number as a hex value
+	.sym        display an address as a symbol
+	.sym-offset display an address as a symbol and offset
+	.syscall    display a syscall id as a system call name
+	.execname   display a common_pid as a program name
+
+  Note that in general the semantics of a given field aren't
+  interpreted when applying a modifier to it, but there are some
+  restrictions to be aware of in this regard:
+
+    - only the 'hex' modifier can be used for values (because values
+      are essentially sums, and the other modifiers don't make sense
+      in that context).
+    - the 'execname' modifier can only be used on a 'common_pid'.  The
+      reason for this is that the execname is simply the 'comm' value
+      saved for the 'current' process when an event was triggered,
+      which is the same as the common_pid value saved by the event
+      tracing code.  Trying to apply that comm value to other pid
+      values wouldn't be correct, and typically events that care save
+      pid-specific comm fields in the event itself.
+
+  A typical usage scenario would be the following to enable a hist
+  trigger, read its current contents, and then turn it off:
+
+  # echo 'hist:keys=skbaddr.hex:vals=len' > \
+    /sys/kernel/debug/tracing/events/net/netif_rx/trigger
+
+  # cat /sys/kernel/debug/tracing/events/net/netif_rx/hist
+
+  # echo '!hist:keys=skbaddr.hex:vals=len' > \
+    /sys/kernel/debug/tracing/events/net/netif_rx/trigger
+
+  The trigger file itself can be read to show the details of the
+  currently attached hist trigger.  This information is also displayed
+  at the top of the 'hist' file when read.
+
+  By default, the size of the hash table is 2048 entries.  The 'size'
+  parameter can be used to specify more or fewer than that.  The units
+  are in terms of hashtable entries - if a run uses more entries than
+  specified, the results will show the number of 'drops', the number
+  of hits that were ignored.  The size should be a power of 2 between
+  128 and 131072 (any non- power-of-2 number specified will be rounded
+  up).
+
+  The 'sort' parameter can be used to specify a value field to sort
+  on.  The default if unspecified is 'hitcount' and the default sort
+  order is 'ascending'.  To sort in the opposite direction, append
+  .descending' to the sort key.
+
+  The 'pause' parameter can be used to pause an existing hist trigger
+  or to start a hist trigger but not log any events until told to do
+  so.  'continue' or 'cont' can be used to start or restart a paused
+  hist trigger.
+
+  The 'clear' parameter will clear the contents of a running hist
+  trigger and leave its current paused/active state.
+
+  Note that the 'pause', 'cont', and 'clear' parameters should be
+  applied using 'append' shell operator ('>>') if applied to an
+  existing trigger, rather than via the '>' operator, which will cause
+  the trigger to be removed through truncation.
+
+- enable_hist/disable_hist
+
+  The enable_hist and disable_hist triggers can be used to have one
+  event conditionally start and stop another event's already-attached
+  hist trigger.  Any number of enable_hist and disable_hist triggers
+  can be attached to a given event, allowing that event to kick off
+  and stop aggregations on a host of other events.
+
+  The format is very similar to the enable/disable_event triggers:
+
+      enable_hist:<system>:<event>[:count]
+      disable_hist:<system>:<event>[:count]
+
+  Instead of enabling or disabling the tracing of the target event
+  into the trace buffer as the enable/disable_event triggers do, the
+  enable/disable_hist triggers enable or disable the aggregation of
+  the target event into a hash table.
+
+  A typical usage scenario for the enable_hist/disable_hist triggers
+  would be to first set up a paused hist trigger on some event,
+  followed by an enable_hist/disable_hist pair that turns the hist
+  aggregation on and off when conditions of interest are hit:
+
+  # echo 'hist:keys=skbaddr.hex:vals=len:pause' > \
+    /sys/kernel/debug/tracing/events/net/netif_receive_skb/trigger
+
+  # echo 'enable_hist:net:netif_receive_skb if filename==/usr/bin/wget' > \
+    /sys/kernel/debug/tracing/events/sched/sched_process_exec/trigger
+
+  # echo 'disable_hist:net:netif_receive_skb if comm==wget' > \
+    /sys/kernel/debug/tracing/events/sched/sched_process_exit/trigger
+
+  The above sets up an initially paused hist trigger which is unpaused
+  and starts aggregating events when a given program is executed, and
+  which stops aggregating when the process exits and the hist trigger
+  is paused again.
+
+  The examples below provide a more concrete illustration of the
+  concepts and typical usage patterns discussed above.
+
+
+6.2 'hist' trigger examples
+---------------------------
+
+  The first set of examples creates aggregations using the kmalloc
+  event.  The fields that can be used for the hist trigger are listed
+  in the kmalloc event's format file:
+
+    # cat /sys/kernel/debug/tracing/events/kmem/kmalloc/format
+    name: kmalloc
+    ID: 374
+    format:
+	field:unsigned short common_type;	offset:0;	size:2;	signed:0;
+	field:unsigned char common_flags;	offset:2;	size:1;	signed:0;
+	field:unsigned char common_preempt_count;		offset:3;	size:1;	signed:0;
+	field:int common_pid;					offset:4;	size:4;	signed:1;
+
+	field:unsigned long call_site;				offset:8;	size:8;	signed:0;
+	field:const void * ptr;					offset:16;	size:8;	signed:0;
+	field:size_t bytes_req;					offset:24;	size:8;	signed:0;
+	field:size_t bytes_alloc;				offset:32;	size:8;	signed:0;
+	field:gfp_t gfp_flags;					offset:40;	size:4;	signed:0;
+
+  We'll start by creating a hist trigger that generates a simple table
+  that lists the total number of bytes requested for each function in
+  the kernel that made one or more calls to kmalloc:
+
+    # echo 'hist:key=call_site:val=bytes_req' > \
+            /sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
+
+  This tells the tracing system to create a 'hist' trigger using the
+  call_site field of the kmalloc event as the key for the table, which
+  just means that each unique call_site address will have an entry
+  created for it in the table.  The 'val=bytes_req' parameter tells
+  the hist trigger that for each unique entry (call_site) in the
+  table, it should keep a running total of the number of bytes
+  requested by that call_site.
+
+  We'll let it run for awhile and then dump the contents of the 'hist'
+  file in the kmalloc event's subdirectory (for readability, a number
+  of entries have been omitted):
+
+    # cat /sys/kernel/debug/tracing/events/kmem/kmalloc/hist
+    # trigger info: hist:keys=call_site:vals=bytes_req:sort=hitcount:size=2048 [active]
+
+    { call_site: 18446744072106379007 } hitcount:          1  bytes_req:        176
+    { call_site: 18446744071579557049 } hitcount:          1  bytes_req:       1024
+    { call_site: 18446744071580608289 } hitcount:          1  bytes_req:      16384
+    { call_site: 18446744071581827654 } hitcount:          1  bytes_req:         24
+    { call_site: 18446744071580700980 } hitcount:          1  bytes_req:          8
+    { call_site: 18446744071579359876 } hitcount:          1  bytes_req:        152
+    { call_site: 18446744071580795365 } hitcount:          3  bytes_req:        144
+    { call_site: 18446744071581303129 } hitcount:          3  bytes_req:        144
+    { call_site: 18446744071580713234 } hitcount:          4  bytes_req:       2560
+    { call_site: 18446744071580933750 } hitcount:          4  bytes_req:        736
+    .
+    .
+    .
+    { call_site: 18446744072106047046 } hitcount:         69  bytes_req:       5576
+    { call_site: 18446744071582116407 } hitcount:         73  bytes_req:       2336
+    { call_site: 18446744072106054684 } hitcount:        136  bytes_req:     140504
+    { call_site: 18446744072106224230 } hitcount:        136  bytes_req:      19584
+    { call_site: 18446744072106078074 } hitcount:        153  bytes_req:       2448
+    { call_site: 18446744072106062406 } hitcount:        153  bytes_req:      36720
+    { call_site: 18446744071582507929 } hitcount:        153  bytes_req:      37088
+    { call_site: 18446744072102520590 } hitcount:        273  bytes_req:      10920
+    { call_site: 18446744071582143559 } hitcount:        358  bytes_req:        716
+    { call_site: 18446744072106465852 } hitcount:        417  bytes_req:      56712
+    { call_site: 18446744072102523378 } hitcount:        485  bytes_req:      27160
+    { call_site: 18446744072099568646 } hitcount:       1676  bytes_req:      33520
+
+    Totals:
+        Hits: 4610
+        Entries: 45
+        Dropped: 0
+
+  The output displays a line for each entry, beginning with the key
+  specified in the trigger, followed by the value(s) also specified in
+  the trigger.  At the beginning of the output is a line that displays
+  the trigger info, which can also be displayed by reading the
+  'trigger' file:
+
+    # cat /sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
+    hist:keys=call_site:vals=bytes_req:sort=hitcount:size=2048 [active]
+
+  At the end of the output are a few lines that display the overall
+  totals for the run.  The 'Hits' field shows the total number of
+  times the event trigger was hit, the 'Entries' field shows the total
+  number of used entries in the hash table, and the 'Dropped' field
+  shows the number of hits that were dropped because the number of
+  used entries for the run exceeded the maximum number of entries
+  allowed for the table (normally 0, but if not a hint that you may
+  want to increase the size of the table using the 'size' parameter).
+
+  Notice in the above output that there's an extra field, 'hitcount',
+  which wasn't specified in the trigger.  Also notice that in the
+  trigger info output, there's a parameter, 'sort=hitcount', which
+  wasn't specified in the trigger either.  The reason for that is that
+  every trigger implicitly keeps a count of the total number of hits
+  attributed to a given entry, called the 'hitcount'.  That hitcount
+  information is explicitly displayed in the output, and in the
+  absence of a user-specified sort parameter, is used as the default
+  sort field.
+
+  The value 'hitcount' can be used in place of an explicit value in
+  the 'values' parameter if you don't really need to have any
+  particular field summed and are mainly interested in hit
+  frequencies.
+
+  To turn the hist trigger off, simply call up the trigger in the
+  command history and re-execute it with a '!' prepended:
+
+    # echo '!hist:key=call_site:val=bytes_req' > \
+           /sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
+
+  Finally, notice that the call_site as displayed in the output above
+  isn't really very useful.  It's an address, but normally addresses
+  are displayed in hex.  To have a numeric field displayed as a hex
+  value, simply append '.hex' to the field name in the trigger:
+
+    # echo 'hist:key=call_site.hex:val=bytes_req' > \
+           /sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
+
+    # cat /sys/kernel/debug/tracing/events/kmem/kmalloc/hist
+    # trigger info: hist:keys=call_site.hex:vals=bytes_req:sort=hitcount:size=2048 [active]
+
+    { call_site: ffffffffa026b291 } hitcount:          1  bytes_req:        433
+    { call_site: ffffffffa07186ff } hitcount:          1  bytes_req:        176
+    { call_site: ffffffff811ae721 } hitcount:          1  bytes_req:      16384
+    { call_site: ffffffff811c5134 } hitcount:          1  bytes_req:          8
+    { call_site: ffffffffa04a9ebb } hitcount:          1  bytes_req:        511
+    { call_site: ffffffff8122e0a6 } hitcount:          1  bytes_req:         12
+    { call_site: ffffffff8107da84 } hitcount:          1  bytes_req:        152
+    { call_site: ffffffff812d8246 } hitcount:          1  bytes_req:         24
+    { call_site: ffffffff811dc1e5 } hitcount:          3  bytes_req:        144
+    { call_site: ffffffffa02515e8 } hitcount:          3  bytes_req:        648
+    { call_site: ffffffff81258159 } hitcount:          3  bytes_req:        144
+    { call_site: ffffffff811c80f4 } hitcount:          4  bytes_req:        544
+    .
+    .
+    .
+    { call_site: ffffffffa06c7646 } hitcount:        106  bytes_req:       8024
+    { call_site: ffffffffa06cb246 } hitcount:        132  bytes_req:      31680
+    { call_site: ffffffffa06cef7a } hitcount:        132  bytes_req:       2112
+    { call_site: ffffffff8137e399 } hitcount:        132  bytes_req:      23232
+    { call_site: ffffffffa06c941c } hitcount:        185  bytes_req:     171360
+    { call_site: ffffffffa06f2a66 } hitcount:        185  bytes_req:      26640
+    { call_site: ffffffffa036a70e } hitcount:        265  bytes_req:      10600
+    { call_site: ffffffff81325447 } hitcount:        292  bytes_req:        584
+    { call_site: ffffffffa072da3c } hitcount:        446  bytes_req:      60656
+    { call_site: ffffffffa036b1f2 } hitcount:        526  bytes_req:      29456
+    { call_site: ffffffffa0099c06 } hitcount:       1780  bytes_req:      35600
+
+    Totals:
+        Hits: 4775
+        Entries: 46
+        Dropped: 0
+
+  Even that's only marginally more useful - while hex values do look
+  more like addresses, what users are typically more interested in
+  when looking at text addresses are the corresponding symbols
+  instead.  To have an address displayed as symbolic value instead,
+  simply append '.sym' or '.sym-offset' to the field name in the
+  trigger:
+
+    # echo 'hist:key=call_site.sym:val=bytes_req' > \
+           /sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
+
+    # cat /sys/kernel/debug/tracing/events/kmem/kmalloc/hist
+    # trigger info: hist:keys=call_site.sym:vals=bytes_req:sort=hitcount:size=2048 [active]
+
+    { call_site: [ffffffff810adcb9] syslog_print_all                              } hitcount:          1  bytes_req:       1024
+    { call_site: [ffffffff8154bc62] usb_control_msg                               } hitcount:          1  bytes_req:          8
+    { call_site: [ffffffffa00bf6fe] hidraw_send_report [hid]                      } hitcount:          1  bytes_req:          7
+    { call_site: [ffffffff8154acbe] usb_alloc_urb                                 } hitcount:          1  bytes_req:        192
+    { call_site: [ffffffffa00bf1ca] hidraw_report_event [hid]                     } hitcount:          1  bytes_req:          7
+    { call_site: [ffffffff811e3a25] __seq_open_private                            } hitcount:          1  bytes_req:         40
+    { call_site: [ffffffff8109524a] alloc_fair_sched_group                        } hitcount:          2  bytes_req:        128
+    { call_site: [ffffffff811febd5] fsnotify_alloc_group                          } hitcount:          2  bytes_req:        528
+    { call_site: [ffffffff81440f58] __tty_buffer_request_room                     } hitcount:          2  bytes_req:       2624
+    { call_site: [ffffffff81200ba6] inotify_new_group                             } hitcount:          2  bytes_req:         96
+    { call_site: [ffffffffa05e19af] ieee80211_start_tx_ba_session [mac80211]      } hitcount:          2  bytes_req:        464
+    { call_site: [ffffffff81672406] tcp_get_metrics                               } hitcount:          2  bytes_req:        304
+    { call_site: [ffffffff81097ec2] alloc_rt_sched_group                          } hitcount:          2  bytes_req:        128
+    { call_site: [ffffffff81089b05] sched_create_group                            } hitcount:          2  bytes_req:       1424
+    .
+    .
+    .
+    { call_site: [ffffffffa04a580c] intel_crtc_page_flip [i915]                   } hitcount:       1185  bytes_req:     123240
+    { call_site: [ffffffffa0287592] drm_mode_page_flip_ioctl [drm]                } hitcount:       1185  bytes_req:     104280
+    { call_site: [ffffffffa04c4a3c] intel_plane_duplicate_state [i915]            } hitcount:       1402  bytes_req:     190672
+    { call_site: [ffffffff812891ca] ext4_find_extent                              } hitcount:       1518  bytes_req:     146208
+    { call_site: [ffffffffa029070e] drm_vma_node_allow [drm]                      } hitcount:       1746  bytes_req:      69840
+    { call_site: [ffffffffa045e7c4] i915_gem_do_execbuffer.isra.23 [i915]         } hitcount:       2021  bytes_req:     792312
+    { call_site: [ffffffffa02911f2] drm_modeset_lock_crtc [drm]                   } hitcount:       2592  bytes_req:     145152
+    { call_site: [ffffffffa0489a66] intel_ring_begin [i915]                       } hitcount:       2629  bytes_req:     378576
+    { call_site: [ffffffffa046041c] i915_gem_execbuffer2 [i915]                   } hitcount:       2629  bytes_req:    3783248
+    { call_site: [ffffffff81325607] apparmor_file_alloc_security                  } hitcount:       5192  bytes_req:      10384
+    { call_site: [ffffffffa00b7c06] hid_report_raw_event [hid]                    } hitcount:       5529  bytes_req:     110584
+    { call_site: [ffffffff8131ebf7] aa_alloc_task_context                         } hitcount:      21943  bytes_req:     702176
+    { call_site: [ffffffff8125847d] ext4_htree_store_dirent                       } hitcount:      55759  bytes_req:    5074265
+
+    Totals:
+        Hits: 109928
+        Entries: 71
+        Dropped: 0
+
+  Because the default sort key above is 'hitcount', the above shows a
+  the list of call_sites by increasing hitcount, so that at the bottom
+  we see the functions that made the most kmalloc calls during the
+  run.  If instead we we wanted to see the top kmalloc callers in
+  terms of the number of bytes requested rather than the number of
+  calls, and we wanted the top caller to appear at the top, we can use
+  the 'sort' parameter, along with the 'descending' modifier:
+
+    # echo 'hist:key=call_site.sym:val=bytes_req:sort=bytes_req.descending' > \
+           /sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
+
+    # cat /sys/kernel/debug/tracing/events/kmem/kmalloc/hist
+    # trigger info: hist:keys=call_site.sym:vals=bytes_req:sort=bytes_req.descending:size=2048 [active]
+
+    { call_site: [ffffffffa046041c] i915_gem_execbuffer2 [i915]                   } hitcount:       2186  bytes_req:    3397464
+    { call_site: [ffffffffa045e7c4] i915_gem_do_execbuffer.isra.23 [i915]         } hitcount:       1790  bytes_req:     712176
+    { call_site: [ffffffff8125847d] ext4_htree_store_dirent                       } hitcount:       8132  bytes_req:     513135
+    { call_site: [ffffffff811e2a1b] seq_buf_alloc                                 } hitcount:        106  bytes_req:     440128
+    { call_site: [ffffffffa0489a66] intel_ring_begin [i915]                       } hitcount:       2186  bytes_req:     314784
+    { call_site: [ffffffff812891ca] ext4_find_extent                              } hitcount:       2174  bytes_req:     208992
+    { call_site: [ffffffff811ae8e1] __kmalloc                                     } hitcount:          8  bytes_req:     131072
+    { call_site: [ffffffffa04c4a3c] intel_plane_duplicate_state [i915]            } hitcount:        859  bytes_req:     116824
+    { call_site: [ffffffffa02911f2] drm_modeset_lock_crtc [drm]                   } hitcount:       1834  bytes_req:     102704
+    { call_site: [ffffffffa04a580c] intel_crtc_page_flip [i915]                   } hitcount:        972  bytes_req:     101088
+    { call_site: [ffffffffa0287592] drm_mode_page_flip_ioctl [drm]                } hitcount:        972  bytes_req:      85536
+    { call_site: [ffffffffa00b7c06] hid_report_raw_event [hid]                    } hitcount:       3333  bytes_req:      66664
+    { call_site: [ffffffff8137e559] sg_kmalloc                                    } hitcount:        209  bytes_req:      61632
+    .
+    .
+    .
+    { call_site: [ffffffff81095225] alloc_fair_sched_group                        } hitcount:          2  bytes_req:        128
+    { call_site: [ffffffff81097ec2] alloc_rt_sched_group                          } hitcount:          2  bytes_req:        128
+    { call_site: [ffffffff812d8406] copy_semundo                                  } hitcount:          2  bytes_req:         48
+    { call_site: [ffffffff81200ba6] inotify_new_group                             } hitcount:          1  bytes_req:         48
+    { call_site: [ffffffffa027121a] drm_getmagic [drm]                            } hitcount:          1  bytes_req:         48
+    { call_site: [ffffffff811e3a25] __seq_open_private                            } hitcount:          1  bytes_req:         40
+    { call_site: [ffffffff811c52f4] bprm_change_interp                            } hitcount:          2  bytes_req:         16
+    { call_site: [ffffffff8154bc62] usb_control_msg                               } hitcount:          1  bytes_req:          8
+    { call_site: [ffffffffa00bf1ca] hidraw_report_event [hid]                     } hitcount:          1  bytes_req:          7
+    { call_site: [ffffffffa00bf6fe] hidraw_send_report [hid]                      } hitcount:          1  bytes_req:          7
+
+    Totals:
+        Hits: 32133
+        Entries: 81
+        Dropped: 0
+
+  To display the offset and size information in addition to the symbol
+  name, just use 'sym-offset' instead:
+
+    # echo 'hist:key=call_site.sym-offset:val=bytes_req:sort=bytes_req.descending' > \
+           /sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
+
+    # cat /sys/kernel/debug/tracing/events/kmem/kmalloc/hist
+    # trigger info: hist:keys=call_site.sym-offset:vals=bytes_req:sort=bytes_req.descending:size=2048 [active]
+
+    { call_site: [ffffffffa046041c] i915_gem_execbuffer2+0x6c/0x2c0 [i915]                  } hitcount:       4569  bytes_req:    3163720
+    { call_site: [ffffffffa0489a66] intel_ring_begin+0xc6/0x1f0 [i915]                      } hitcount:       4569  bytes_req:     657936
+    { call_site: [ffffffffa045e7c4] i915_gem_do_execbuffer.isra.23+0x694/0x1020 [i915]      } hitcount:       1519  bytes_req:     472936
+    { call_site: [ffffffffa045e646] i915_gem_do_execbuffer.isra.23+0x516/0x1020 [i915]      } hitcount:       3050  bytes_req:     211832
+    { call_site: [ffffffff811e2a1b] seq_buf_alloc+0x1b/0x50                                 } hitcount:         34  bytes_req:     148384
+    { call_site: [ffffffffa04a580c] intel_crtc_page_flip+0xbc/0x870 [i915]                  } hitcount:       1385  bytes_req:     144040
+    { call_site: [ffffffff811ae8e1] __kmalloc+0x191/0x1b0                                   } hitcount:          8  bytes_req:     131072
+    { call_site: [ffffffffa0287592] drm_mode_page_flip_ioctl+0x282/0x360 [drm]              } hitcount:       1385  bytes_req:     121880
+    { call_site: [ffffffffa02911f2] drm_modeset_lock_crtc+0x32/0x100 [drm]                  } hitcount:       1848  bytes_req:     103488
+    { call_site: [ffffffffa04c4a3c] intel_plane_duplicate_state+0x2c/0xa0 [i915]            } hitcount:        461  bytes_req:      62696
+    { call_site: [ffffffffa029070e] drm_vma_node_allow+0x2e/0xd0 [drm]                      } hitcount:       1541  bytes_req:      61640
+    { call_site: [ffffffff815f8d7b] sk_prot_alloc+0xcb/0x1b0                                } hitcount:         57  bytes_req:      57456
+    .
+    .
+    .
+    { call_site: [ffffffff8109524a] alloc_fair_sched_group+0x5a/0x1a0                       } hitcount:          2  bytes_req:        128
+    { call_site: [ffffffffa027b921] drm_vm_open_locked+0x31/0xa0 [drm]                      } hitcount:          3  bytes_req:         96
+    { call_site: [ffffffff8122e266] proc_self_follow_link+0x76/0xb0                         } hitcount:          8  bytes_req:         96
+    { call_site: [ffffffff81213e80] load_elf_binary+0x240/0x1650                            } hitcount:          3  bytes_req:         84
+    { call_site: [ffffffff8154bc62] usb_control_msg+0x42/0x110                              } hitcount:          1  bytes_req:          8
+    { call_site: [ffffffffa00bf6fe] hidraw_send_report+0x7e/0x1a0 [hid]                     } hitcount:          1  bytes_req:          7
+    { call_site: [ffffffffa00bf1ca] hidraw_report_event+0x8a/0x120 [hid]                    } hitcount:          1  bytes_req:          7
+
+    Totals:
+        Hits: 26098
+        Entries: 64
+        Dropped: 0
+
+  We can also add multiple fields to the 'values' parameter.  For
+  example, we might want to see the total number of bytes allocated
+  alongside bytes requested, and display the result sorted by bytes
+  allocated in a descending order:
+
+    # echo 'hist:keys=call_site.sym:values=bytes_req,bytes_alloc:sort=bytes_alloc.descending' > \
+           /sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
+
+    # cat /sys/kernel/debug/tracing/events/kmem/kmalloc/hist
+    # trigger info: hist:keys=call_site.sym:vals=bytes_req,bytes_alloc:sort=bytes_alloc.descending:size=2048 [active]
+
+    { call_site: [ffffffffa046041c] i915_gem_execbuffer2 [i915]                   } hitcount:       7403  bytes_req:    4084360  bytes_alloc:    5958016
+    { call_site: [ffffffff811e2a1b] seq_buf_alloc                                 } hitcount:        541  bytes_req:    2213968  bytes_alloc:    2228224
+    { call_site: [ffffffffa0489a66] intel_ring_begin [i915]                       } hitcount:       7404  bytes_req:    1066176  bytes_alloc:    1421568
+    { call_site: [ffffffffa045e7c4] i915_gem_do_execbuffer.isra.23 [i915]         } hitcount:       1565  bytes_req:     557368  bytes_alloc:    1037760
+    { call_site: [ffffffff8125847d] ext4_htree_store_dirent                       } hitcount:       9557  bytes_req:     595778  bytes_alloc:     695744
+    { call_site: [ffffffffa045e646] i915_gem_do_execbuffer.isra.23 [i915]         } hitcount:       5839  bytes_req:     430680  bytes_alloc:     470400
+    { call_site: [ffffffffa04c4a3c] intel_plane_duplicate_state [i915]            } hitcount:       2388  bytes_req:     324768  bytes_alloc:     458496
+    { call_site: [ffffffffa02911f2] drm_modeset_lock_crtc [drm]                   } hitcount:       3911  bytes_req:     219016  bytes_alloc:     250304
+    { call_site: [ffffffff815f8d7b] sk_prot_alloc                                 } hitcount:        235  bytes_req:     236880  bytes_alloc:     240640
+    { call_site: [ffffffff8137e559] sg_kmalloc                                    } hitcount:        557  bytes_req:     169024  bytes_alloc:     221760
+    { call_site: [ffffffffa00b7c06] hid_report_raw_event [hid]                    } hitcount:       9378  bytes_req:     187548  bytes_alloc:     206312
+    { call_site: [ffffffffa04a580c] intel_crtc_page_flip [i915]                   } hitcount:       1519  bytes_req:     157976  bytes_alloc:     194432
+    .
+    .
+    .
+    { call_site: [ffffffff8109bd3b] sched_autogroup_create_attach                 } hitcount:          2  bytes_req:        144  bytes_alloc:        192
+    { call_site: [ffffffff81097ee8] alloc_rt_sched_group                          } hitcount:          2  bytes_req:        128  bytes_alloc:        128
+    { call_site: [ffffffff8109524a] alloc_fair_sched_group                        } hitcount:          2  bytes_req:        128  bytes_alloc:        128
+    { call_site: [ffffffff81095225] alloc_fair_sched_group                        } hitcount:          2  bytes_req:        128  bytes_alloc:        128
+    { call_site: [ffffffff81097ec2] alloc_rt_sched_group                          } hitcount:          2  bytes_req:        128  bytes_alloc:        128
+    { call_site: [ffffffff81213e80] load_elf_binary                               } hitcount:          3  bytes_req:         84  bytes_alloc:         96
+    { call_site: [ffffffff81079a2e] kthread_create_on_node                        } hitcount:          1  bytes_req:         56  bytes_alloc:         64
+    { call_site: [ffffffffa00bf6fe] hidraw_send_report [hid]                      } hitcount:          1  bytes_req:          7  bytes_alloc:          8
+    { call_site: [ffffffff8154bc62] usb_control_msg                               } hitcount:          1  bytes_req:          8  bytes_alloc:          8
+    { call_site: [ffffffffa00bf1ca] hidraw_report_event [hid]                     } hitcount:          1  bytes_req:          7  bytes_alloc:          8
+
+    Totals:
+        Hits: 66598
+        Entries: 65
+        Dropped: 0
+
+  Finally, to finish off our kmalloc example, instead of simply having
+  the hist trigger display symbolic call_sites, we can have the hist
+  trigger additionally display the complete set of kernel stack traces
+  that led to each call_site.  To do that, we simply use the special
+  value 'stacktrace' for the key parameter:
+
+    # echo 'hist:keys=stacktrace:values=bytes_req,bytes_alloc:sort=bytes_alloc' > \
+           /sys/kernel/debug/tracing/events/kmem/kmalloc/trigger
+
+  The above trigger will use the kernel stack trace in effect when an
+  event is triggered as the key for the hash table.  This allows the
+  enumeration of every kernel callpath that led up to a particular
+  event, along with a running total of any of the event fields for
+  that event.  Here we tally bytes requested and bytes allocated for
+  every callpath in the system that led up to a kmalloc (in this case
+  every callpath to a kmalloc for a kernel compile):
+
+    # cat /sys/kernel/debug/tracing/events/kmem/kmalloc/hist
+    # trigger info: hist:keys=stacktrace:vals=bytes_req,bytes_alloc:sort=bytes_alloc:size=2048 [active]
+
+    { stacktrace:
+         __kmalloc_track_caller+0x10b/0x1a0
+         kmemdup+0x20/0x50
+         hidraw_report_event+0x8a/0x120 [hid]
+         hid_report_raw_event+0x3ea/0x440 [hid]
+         hid_input_report+0x112/0x190 [hid]
+         hid_irq_in+0xc2/0x260 [usbhid]
+         __usb_hcd_giveback_urb+0x72/0x120
+         usb_giveback_urb_bh+0x9e/0xe0
+         tasklet_hi_action+0xf8/0x100
+         __do_softirq+0x114/0x2c0
+         irq_exit+0xa5/0xb0
+         do_IRQ+0x5a/0xf0
+         ret_from_intr+0x0/0x30
+         cpuidle_enter+0x17/0x20
+         cpu_startup_entry+0x315/0x3e0
+         rest_init+0x7c/0x80
+    } hitcount:          3  bytes_req:         21  bytes_alloc:         24
+    { stacktrace:
+         __kmalloc_track_caller+0x10b/0x1a0
+         kmemdup+0x20/0x50
+         hidraw_report_event+0x8a/0x120 [hid]
+         hid_report_raw_event+0x3ea/0x440 [hid]
+         hid_input_report+0x112/0x190 [hid]
+         hid_irq_in+0xc2/0x260 [usbhid]
+         __usb_hcd_giveback_urb+0x72/0x120
+         usb_giveback_urb_bh+0x9e/0xe0
+         tasklet_hi_action+0xf8/0x100
+         __do_softirq+0x114/0x2c0
+         irq_exit+0xa5/0xb0
+         do_IRQ+0x5a/0xf0
+         ret_from_intr+0x0/0x30
+    } hitcount:          3  bytes_req:         21  bytes_alloc:         24
+    { stacktrace:
+         kmem_cache_alloc_trace+0xeb/0x150
+         aa_alloc_task_context+0x27/0x40
+         apparmor_cred_prepare+0x1f/0x50
+         security_prepare_creds+0x16/0x20
+         prepare_creds+0xdf/0x1a0
+         SyS_capset+0xb5/0x200
+         system_call_fastpath+0x12/0x6a
+    } hitcount:          1  bytes_req:         32  bytes_alloc:         32
+    .
+    .
+    .
+    { stacktrace:
+         __kmalloc+0x11b/0x1b0
+         i915_gem_execbuffer2+0x6c/0x2c0 [i915]
+         drm_ioctl+0x349/0x670 [drm]
+         do_vfs_ioctl+0x2f0/0x4f0
+         SyS_ioctl+0x81/0xa0
+         system_call_fastpath+0x12/0x6a
+    } hitcount:      17726  bytes_req:   13944120  bytes_alloc:   19593808
+    { stacktrace:
+         __kmalloc+0x11b/0x1b0
+         load_elf_phdrs+0x76/0xa0
+         load_elf_binary+0x102/0x1650
+         search_binary_handler+0x97/0x1d0
+         do_execveat_common.isra.34+0x551/0x6e0
+         SyS_execve+0x3a/0x50
+         return_from_execve+0x0/0x23
+    } hitcount:      33348  bytes_req:   17152128  bytes_alloc:   20226048
+    { stacktrace:
+         kmem_cache_alloc_trace+0xeb/0x150
+         apparmor_file_alloc_security+0x27/0x40
+         security_file_alloc+0x16/0x20
+         get_empty_filp+0x93/0x1c0
+         path_openat+0x31/0x5f0
+         do_filp_open+0x3a/0x90
+         do_sys_open+0x128/0x220
+         SyS_open+0x1e/0x20
+         system_call_fastpath+0x12/0x6a
+    } hitcount:    4766422  bytes_req:    9532844  bytes_alloc:   38131376
+    { stacktrace:
+         __kmalloc+0x11b/0x1b0
+         seq_buf_alloc+0x1b/0x50
+         seq_read+0x2cc/0x370
+         proc_reg_read+0x3d/0x80
+         __vfs_read+0x28/0xe0
+         vfs_read+0x86/0x140
+         SyS_read+0x46/0xb0
+         system_call_fastpath+0x12/0x6a
+    } hitcount:      19133  bytes_req:   78368768  bytes_alloc:   78368768
+
+    Totals:
+        Hits: 6085872
+        Entries: 253
+        Dropped: 0
+
+  If you key a hist trigger on common_pid, in order for example to
+  gather and display sorted totals for each process, you can use the
+  special .execname modifier to display the executable names for the
+  processes in the table rather than raw pids.  The example below
+  keeps a per-process sum of total bytes read:
+
+    # echo 'hist:key=common_pid.execname:val=count:sort=count.descending' > \
+           /sys/kernel/debug/tracing/events/syscalls/sys_enter_read/trigger
+
+    # cat /sys/kernel/debug/tracing/events/syscalls/sys_enter_read/hist
+    # trigger info: hist:keys=common_pid.execname:vals=count:sort=count.descending:size=2048 [active]
+
+    { common_pid: gnome-terminal  [      3196] } hitcount:        280  count:    1093512
+    { common_pid: Xorg            [      1309] } hitcount:        525  count:     256640
+    { common_pid: compiz          [      2889] } hitcount:         59  count:     254400
+    { common_pid: bash            [      8710] } hitcount:          3  count:      66369
+    { common_pid: dbus-daemon-lau [      8703] } hitcount:         49  count:      47739
+    { common_pid: irqbalance      [      1252] } hitcount:         27  count:      27648
+    { common_pid: 01ifupdown      [      8705] } hitcount:          3  count:      17216
+    { common_pid: dbus-daemon     [       772] } hitcount:         10  count:      12396
+    { common_pid: Socket Thread   [      8342] } hitcount:         11  count:      11264
+    { common_pid: nm-dhcp-client. [      8701] } hitcount:          6  count:       7424
+    { common_pid: gmain           [      1315] } hitcount:         18  count:       6336
+    .
+    .
+    .
+    { common_pid: postgres        [      1892] } hitcount:          2  count:         32
+    { common_pid: postgres        [      1891] } hitcount:          2  count:         32
+    { common_pid: gmain           [      8704] } hitcount:          2  count:         32
+    { common_pid: upstart-dbus-br [      2740] } hitcount:         21  count:         21
+    { common_pid: nm-dispatcher.a [      8696] } hitcount:          1  count:         16
+    { common_pid: indicator-datet [      2904] } hitcount:          1  count:         16
+    { common_pid: gdbus           [      2998] } hitcount:          1  count:         16
+    { common_pid: rtkit-daemon    [      2052] } hitcount:          1  count:          8
+    { common_pid: init            [         1] } hitcount:          2  count:          2
+
+    Totals:
+        Hits: 2116
+        Entries: 51
+        Dropped: 0
+
+  Similarly, if you key a hist trigger on syscall id, for example to
+  gather and display a list of systemwide syscall hits, you can use
+  the special .syscall modifier to display the syscall names rather
+  than raw ids.  The example below keeps a running total of syscall
+  counts for the system during the run:
+
+    # echo 'hist:key=id.syscall:val=hitcount' > \
+           /sys/kernel/debug/tracing/events/raw_syscalls/sys_enter/trigger
+
+    # cat /sys/kernel/debug/tracing/events/raw_syscalls/sys_enter/hist
+    # trigger info: hist:keys=id.syscall:vals=hitcount:sort=hitcount:size=2048 [active]
+
+    { id: sys_fsync                     [ 74] } hitcount:          1
+    { id: sys_newuname                  [ 63] } hitcount:          1
+    { id: sys_prctl                     [157] } hitcount:          1
+    { id: sys_statfs                    [137] } hitcount:          1
+    { id: sys_symlink                   [ 88] } hitcount:          1
+    { id: sys_sendmmsg                  [307] } hitcount:          1
+    { id: sys_semctl                    [ 66] } hitcount:          1
+    { id: sys_readlink                  [ 89] } hitcount:          3
+    { id: sys_bind                      [ 49] } hitcount:          3
+    { id: sys_getsockname               [ 51] } hitcount:          3
+    { id: sys_unlink                    [ 87] } hitcount:          3
+    { id: sys_rename                    [ 82] } hitcount:          4
+    { id: unknown_syscall               [ 58] } hitcount:          4
+    { id: sys_connect                   [ 42] } hitcount:          4
+    { id: sys_getpid                    [ 39] } hitcount:          4
+    .
+    .
+    .
+    { id: sys_rt_sigprocmask            [ 14] } hitcount:        952
+    { id: sys_futex                     [202] } hitcount:       1534
+    { id: sys_write                     [  1] } hitcount:       2689
+    { id: sys_setitimer                 [ 38] } hitcount:       2797
+    { id: sys_read                      [  0] } hitcount:       3202
+    { id: sys_select                    [ 23] } hitcount:       3773
+    { id: sys_writev                    [ 20] } hitcount:       4531
+    { id: sys_poll                      [  7] } hitcount:       8314
+    { id: sys_recvmsg                   [ 47] } hitcount:      13738
+    { id: sys_ioctl                     [ 16] } hitcount:      21843
+
+    Totals:
+        Hits: 67612
+        Entries: 72
+        Dropped: 0
+
+    The syscall counts above provide a rough overall picture of system
+    call activity on the system; we can see for example that the most
+    popular system call on this system was the 'sys_ioctl' system call.
+
+    We can use 'compound' keys to refine that number and provide some
+    further insight as to which processes exactly contribute to the
+    overall ioctl count.
+
+    The command below keeps a hitcount for every unique combination of
+    system call id and pid - the end result is essentially a table
+    that keeps a per-pid sum of system call hits.  The results are
+    sorted using the system call id as the primary key, and the
+    hitcount sum as the secondary key:
+
+    # echo 'hist:key=id.syscall,common_pid.execname:val=hitcount:sort=id,hitcount' > \
+           /sys/kernel/debug/tracing/events/raw_syscalls/sys_enter/trigger
+
+    # cat /sys/kernel/debug/tracing/events/raw_syscalls/sys_enter/hist
+    # trigger info: hist:keys=id.syscall,common_pid.execname:vals=hitcount:sort=id.syscall,hitcount:size=2048 [active]
+
+    { id: sys_read                      [  0], common_pid: rtkit-daemon    [      1877] } hitcount:          1
+    { id: sys_read                      [  0], common_pid: gdbus           [      2976] } hitcount:          1
+    { id: sys_read                      [  0], common_pid: console-kit-dae [      3400] } hitcount:          1
+    { id: sys_read                      [  0], common_pid: postgres        [      1865] } hitcount:          1
+    { id: sys_read                      [  0], common_pid: deja-dup-monito [      3543] } hitcount:          2
+    { id: sys_read                      [  0], common_pid: NetworkManager  [       890] } hitcount:          2
+    { id: sys_read                      [  0], common_pid: evolution-calen [      3048] } hitcount:          2
+    { id: sys_read                      [  0], common_pid: postgres        [      1864] } hitcount:          2
+    { id: sys_read                      [  0], common_pid: nm-applet       [      3022] } hitcount:          2
+    { id: sys_read                      [  0], common_pid: whoopsie        [      1212] } hitcount:          2
+    .
+    .
+    .
+    { id: sys_ioctl                     [ 16], common_pid: bash            [      8479] } hitcount:          1
+    { id: sys_ioctl                     [ 16], common_pid: bash            [      3472] } hitcount:         12
+    { id: sys_ioctl                     [ 16], common_pid: gnome-terminal  [      3199] } hitcount:         16
+    { id: sys_ioctl                     [ 16], common_pid: Xorg            [      1267] } hitcount:       1808
+    { id: sys_ioctl                     [ 16], common_pid: compiz          [      2994] } hitcount:       5580
+    .
+    .
+    .
+    { id: sys_waitid                    [247], common_pid: upstart-dbus-br [      2690] } hitcount:          3
+    { id: sys_waitid                    [247], common_pid: upstart-dbus-br [      2688] } hitcount:         16
+    { id: sys_inotify_add_watch         [254], common_pid: gmain           [       975] } hitcount:          2
+    { id: sys_inotify_add_watch         [254], common_pid: gmain           [      3204] } hitcount:          4
+    { id: sys_inotify_add_watch         [254], common_pid: gmain           [      2888] } hitcount:          4
+    { id: sys_inotify_add_watch         [254], common_pid: gmain           [      3003] } hitcount:          4
+    { id: sys_inotify_add_watch         [254], common_pid: gmain           [      2873] } hitcount:          4
+    { id: sys_inotify_add_watch         [254], common_pid: gmain           [      3196] } hitcount:          6
+    { id: sys_openat                    [257], common_pid: java            [      2623] } hitcount:          2
+    { id: sys_eventfd2                  [290], common_pid: ibus-ui-gtk3    [      2760] } hitcount:          4
+    { id: sys_eventfd2                  [290], common_pid: compiz          [      2994] } hitcount:          6
+
+    Totals:
+        Hits: 31536
+        Entries: 323
+        Dropped: 0
+
+    The above list does give us a breakdown of the ioctl syscall by
+    pid, but it also gives us quite a bit more than that, which we
+    don't really care about at the moment.  Since we know the syscall
+    id for sys_ioctl (16, displayed next to the sys_ioctl name), we
+    can use that to filter out all the other syscalls:
+
+    # echo 'hist:key=id.syscall,common_pid.execname:val=hitcount:sort=id,hitcount if id == 16' > \
+           /sys/kernel/debug/tracing/events/raw_syscalls/sys_enter/trigger
+
+    # cat /sys/kernel/debug/tracing/events/raw_syscalls/sys_enter/hist
+    # trigger info: hist:keys=id.syscall,common_pid.execname:vals=hitcount:sort=id.syscall,hitcount:size=2048 if id == 16 [active]
+
+    { id: sys_ioctl                     [ 16], common_pid: gmain           [      2769] } hitcount:          1
+    { id: sys_ioctl                     [ 16], common_pid: evolution-addre [      8571] } hitcount:          1
+    { id: sys_ioctl                     [ 16], common_pid: gmain           [      3003] } hitcount:          1
+    { id: sys_ioctl                     [ 16], common_pid: gmain           [      2781] } hitcount:          1
+    { id: sys_ioctl                     [ 16], common_pid: gmain           [      2829] } hitcount:          1
+    { id: sys_ioctl                     [ 16], common_pid: bash            [      8726] } hitcount:          1
+    { id: sys_ioctl                     [ 16], common_pid: bash            [      8508] } hitcount:          1
+    { id: sys_ioctl                     [ 16], common_pid: gmain           [      2970] } hitcount:          1
+    { id: sys_ioctl                     [ 16], common_pid: gmain           [      2768] } hitcount:          1
+    .
+    .
+    .
+    { id: sys_ioctl                     [ 16], common_pid: pool            [      8559] } hitcount:         45
+    { id: sys_ioctl                     [ 16], common_pid: pool            [      8555] } hitcount:         48
+    { id: sys_ioctl                     [ 16], common_pid: pool            [      8551] } hitcount:         48
+    { id: sys_ioctl                     [ 16], common_pid: avahi-daemon    [       896] } hitcount:         66
+    { id: sys_ioctl                     [ 16], common_pid: Xorg            [      1267] } hitcount:      26674
+    { id: sys_ioctl                     [ 16], common_pid: compiz          [      2994] } hitcount:      73443
+
+    Totals:
+        Hits: 101162
+        Entries: 103
+        Dropped: 0
+
+    The above output shows that 'compiz' and 'Xorg' are far and away
+    the heaviest ioctl callers (which might lead to questions about
+    whether they really need to be making all those calls and to
+    possible avenues for further investigation.)
+
+    The compound key examples used a key and a sum value (hitcount) to
+    sort the output, but we can just as easily use two keys instead.
+    Here's an example where we use a compound key composed of the the
+    common_pid and size event fields.  Sorting with pid as the primary
+    key and 'size' as the secondary key allows us to display an
+    ordered summary of the recvfrom sizes, with counts, received by
+    each process:
+
+    # echo 'hist:key=common_pid.execname,size:val=hitcount:sort=common_pid,size' > \
+           /sys/kernel/debug/tracing/events/syscalls/sys_enter_recvfrom/trigger
+
+    # cat /sys/kernel/debug/tracing/events/syscalls/sys_enter_recvfrom/hist
+    # trigger info: hist:keys=common_pid.execname,size:vals=hitcount:sort=common_pid.execname,size:size=2048 [active]
+
+    { common_pid: smbd            [       784], size:          4 } hitcount:          1
+    { common_pid: dnsmasq         [      1412], size:       4096 } hitcount:        672
+    { common_pid: postgres        [      1796], size:       1000 } hitcount:          6
+    { common_pid: postgres        [      1867], size:       1000 } hitcount:         10
+    { common_pid: bamfdaemon      [      2787], size:         28 } hitcount:          2
+    { common_pid: bamfdaemon      [      2787], size:      14360 } hitcount:          1
+    { common_pid: compiz          [      2994], size:          8 } hitcount:          1
+    { common_pid: compiz          [      2994], size:         20 } hitcount:         11
+    { common_pid: gnome-terminal  [      3199], size:          4 } hitcount:          2
+    { common_pid: firefox         [      8817], size:          4 } hitcount:          1
+    { common_pid: firefox         [      8817], size:          8 } hitcount:          5
+    { common_pid: firefox         [      8817], size:        588 } hitcount:          2
+    { common_pid: firefox         [      8817], size:        628 } hitcount:          1
+    { common_pid: firefox         [      8817], size:       6944 } hitcount:          1
+    { common_pid: firefox         [      8817], size:     408880 } hitcount:          2
+    { common_pid: firefox         [      8822], size:          8 } hitcount:          2
+    { common_pid: firefox         [      8822], size:        160 } hitcount:          2
+    { common_pid: firefox         [      8822], size:        320 } hitcount:          2
+    { common_pid: firefox         [      8822], size:        352 } hitcount:          1
+    .
+    .
+    .
+    { common_pid: pool            [      8923], size:       1960 } hitcount:         10
+    { common_pid: pool            [      8923], size:       2048 } hitcount:         10
+    { common_pid: pool            [      8924], size:       1960 } hitcount:         10
+    { common_pid: pool            [      8924], size:       2048 } hitcount:         10
+    { common_pid: pool            [      8928], size:       1964 } hitcount:          4
+    { common_pid: pool            [      8928], size:       1965 } hitcount:          2
+    { common_pid: pool            [      8928], size:       2048 } hitcount:          6
+    { common_pid: pool            [      8929], size:       1982 } hitcount:          1
+    { common_pid: pool            [      8929], size:       2048 } hitcount:          1
+
+    Totals:
+        Hits: 2016
+        Entries: 224
+        Dropped: 0
+
+  The above example also illustrates the fact that although a compound
+  key is treated as a single entity for hashing purposes, the sub-keys
+  it's composed of can be accessed independently.
+
+  The next example uses a string field as the hash key and
+  demonstrates how you can manually pause and continue a hist trigger.
+  In this example, we'll aggregate fork counts and don't expect a
+  large number of entries in the hash table, so we'll drop it to a
+  much smaller number, say 256:
+
+    # echo 'hist:key=child_comm:val=hitcount:size=256' > \
+           /sys/kernel/debug/tracing/events/sched/sched_process_fork/trigger
+
+    # cat /sys/kernel/debug/tracing/events/sched/sched_process_fork/hist
+    # trigger info: hist:keys=child_comm:vals=hitcount:sort=hitcount:size=256 [active]
+
+    { child_comm: dconf worker                        } hitcount:          1
+    { child_comm: ibus-daemon                         } hitcount:          1
+    { child_comm: whoopsie                            } hitcount:          1
+    { child_comm: smbd                                } hitcount:          1
+    { child_comm: gdbus                               } hitcount:          1
+    { child_comm: kthreadd                            } hitcount:          1
+    { child_comm: dconf worker                        } hitcount:          1
+    { child_comm: evolution-alarm                     } hitcount:          2
+    { child_comm: Socket Thread                       } hitcount:          2
+    { child_comm: postgres                            } hitcount:          2
+    { child_comm: bash                                } hitcount:          3
+    { child_comm: compiz                              } hitcount:          3
+    { child_comm: evolution-sourc                     } hitcount:          4
+    { child_comm: dhclient                            } hitcount:          4
+    { child_comm: pool                                } hitcount:          5
+    { child_comm: nm-dispatcher.a                     } hitcount:          8
+    { child_comm: firefox                             } hitcount:          8
+    { child_comm: dbus-daemon                         } hitcount:          8
+    { child_comm: glib-pacrunner                      } hitcount:         10
+    { child_comm: evolution                           } hitcount:         23
+
+    Totals:
+        Hits: 89
+        Entries: 20
+        Dropped: 0
+
+  If we want to pause the hist trigger, we can simply append :pause to
+  the command that started the trigger.  Notice that the trigger info
+  displays as [paused]:
+
+    # echo 'hist:key=child_comm:val=hitcount:size=256:pause' >> \
+           /sys/kernel/debug/tracing/events/sched/sched_process_fork/trigger
+
+    # cat /sys/kernel/debug/tracing/events/sched/sched_process_fork/hist
+    # trigger info: hist:keys=child_comm:vals=hitcount:sort=hitcount:size=256 [paused]
+
+    { child_comm: dconf worker                        } hitcount:          1
+    { child_comm: kthreadd                            } hitcount:          1
+    { child_comm: dconf worker                        } hitcount:          1
+    { child_comm: gdbus                               } hitcount:          1
+    { child_comm: ibus-daemon                         } hitcount:          1
+    { child_comm: Socket Thread                       } hitcount:          2
+    { child_comm: evolution-alarm                     } hitcount:          2
+    { child_comm: smbd                                } hitcount:          2
+    { child_comm: bash                                } hitcount:          3
+    { child_comm: whoopsie                            } hitcount:          3
+    { child_comm: compiz                              } hitcount:          3
+    { child_comm: evolution-sourc                     } hitcount:          4
+    { child_comm: pool                                } hitcount:          5
+    { child_comm: postgres                            } hitcount:          6
+    { child_comm: firefox                             } hitcount:          8
+    { child_comm: dhclient                            } hitcount:         10
+    { child_comm: emacs                               } hitcount:         12
+    { child_comm: dbus-daemon                         } hitcount:         20
+    { child_comm: nm-dispatcher.a                     } hitcount:         20
+    { child_comm: evolution                           } hitcount:         35
+    { child_comm: glib-pacrunner                      } hitcount:         59
+
+    Totals:
+        Hits: 199
+        Entries: 21
+        Dropped: 0
+
+  To manually continue having the trigger aggregate events, append
+  :cont instead.  Notice that the trigger info displays as [active]
+  again, and the data has changed:
+
+    # echo 'hist:key=child_comm:val=hitcount:size=256:cont' >> \
+           /sys/kernel/debug/tracing/events/sched/sched_process_fork/trigger
+
+    # cat /sys/kernel/debug/tracing/events/sched/sched_process_fork/hist
+    # trigger info: hist:keys=child_comm:vals=hitcount:sort=hitcount:size=256 [active]
+
+    { child_comm: dconf worker                        } hitcount:          1
+    { child_comm: dconf worker                        } hitcount:          1
+    { child_comm: kthreadd                            } hitcount:          1
+    { child_comm: gdbus                               } hitcount:          1
+    { child_comm: ibus-daemon                         } hitcount:          1
+    { child_comm: Socket Thread                       } hitcount:          2
+    { child_comm: evolution-alarm                     } hitcount:          2
+    { child_comm: smbd                                } hitcount:          2
+    { child_comm: whoopsie                            } hitcount:          3
+    { child_comm: compiz                              } hitcount:          3
+    { child_comm: evolution-sourc                     } hitcount:          4
+    { child_comm: bash                                } hitcount:          5
+    { child_comm: pool                                } hitcount:          5
+    { child_comm: postgres                            } hitcount:          6
+    { child_comm: firefox                             } hitcount:          8
+    { child_comm: dhclient                            } hitcount:         11
+    { child_comm: emacs                               } hitcount:         12
+    { child_comm: dbus-daemon                         } hitcount:         22
+    { child_comm: nm-dispatcher.a                     } hitcount:         22
+    { child_comm: evolution                           } hitcount:         35
+    { child_comm: glib-pacrunner                      } hitcount:         59
+
+    Totals:
+        Hits: 206
+        Entries: 21
+        Dropped: 0
+
+  The previous example showed how to start and stop a hist trigger by
+  appending 'pause' and 'continue' to the hist trigger command.  A
+  hist trigger can also be started in a paused state by initially
+  starting the trigger with ':pause' appended.  This allows you to
+  start the trigger only when you're ready to start collecting data
+  and not before.  For example, you could start the trigger in a
+  paused state, then unpause it and do something you want to measure,
+  then pause the trigger again when done.
+
+  Of course, doing this manually can be difficult and error-prone, but
+  it is possible to automatically start and stop a hist trigger based
+  on some condition, via the enable_hist and disable_hist triggers.
+
+  For example, suppose we wanted to take a look at the relative
+  weights in terms of skb length for each callpath that leads to a
+  netif_receieve_skb event when downloading a decent-sized file using
+  wget.
+
+  First we set up an initially paused stacktrace trigger on the
+  netif_receive_skb event:
+
+    # echo 'hist:key=stacktrace:vals=len:pause' > \
+           /sys/kernel/debug/tracing/events/net/netif_receive_skb/trigger
+
+  Next, we set up an 'enable_hist' trigger on the sched_process_exec
+  event, with an 'if filename==/usr/bin/wget' filter.  The effect of
+  this new trigger is that it will 'unpause' the hist trigger we just
+  set up on netif_receive_skb if and only if it sees a
+  sched_process_exec event with a filename of '/usr/bin/wget'.  When
+  that happens, all netif_receive_skb events are aggregated into a
+  hash table keyed on stacktrace:
+
+    # echo 'enable_hist:net:netif_receive_skb if filename==/usr/bin/wget' > \
+           /sys/kernel/debug/tracing/events/sched/sched_process_exec/trigger
+
+  The aggregation continues until the netif_receive_skb is paused
+  again, which is what the following disable_hist event does by
+  creating a similar setup on the sched_process_exit event, using the
+  filter 'comm==wget':
+
+    # echo 'disable_hist:net:netif_receive_skb if comm==wget' > \
+           /sys/kernel/debug/tracing/events/sched/sched_process_exit/trigger
+
+  Whenever a process exits and the comm field of the disable_hist
+  trigger filter matches 'comm==wget', the netif_receive_skb hist
+  trigger is disabled.
+
+  The overall effect is that netif_receive_skb events are aggregated
+  into the hash table for only the duration of the wget.  Executing a
+  wget command and then listing the 'hist' file will display the
+  output generated by the wget command:
+
+    $ wget https://www.kernel.org/pub/linux/kernel/v3.x/patch-3.19.xz
+
+    # cat /sys/kernel/debug/tracing/events/net/netif_receive_skb/hist
+    # trigger info: hist:keys=stacktrace:vals=len:sort=hitcount:size=2048 [paused]
+
+    { stacktrace:
+         __netif_receive_skb_core+0x46d/0x990
+         __netif_receive_skb+0x18/0x60
+         netif_receive_skb_internal+0x23/0x90
+         napi_gro_receive+0xc8/0x100
+         ieee80211_deliver_skb+0xd6/0x270 [mac80211]
+         ieee80211_rx_handlers+0xccf/0x22f0 [mac80211]
+         ieee80211_prepare_and_rx_handle+0x4e7/0xc40 [mac80211]
+         ieee80211_rx+0x31d/0x900 [mac80211]
+         iwlagn_rx_reply_rx+0x3db/0x6f0 [iwldvm]
+         iwl_rx_dispatch+0x8e/0xf0 [iwldvm]
+         iwl_pcie_irq_handler+0xe3c/0x12f0 [iwlwifi]
+         irq_thread_fn+0x20/0x50
+         irq_thread+0x11f/0x150
+         kthread+0xd2/0xf0
+         ret_from_fork+0x42/0x70
+    } hitcount:         85  len:      28884
+    { stacktrace:
+         __netif_receive_skb_core+0x46d/0x990
+         __netif_receive_skb+0x18/0x60
+         netif_receive_skb_internal+0x23/0x90
+         napi_gro_complete+0xa4/0xe0
+         dev_gro_receive+0x23a/0x360
+         napi_gro_receive+0x30/0x100
+         ieee80211_deliver_skb+0xd6/0x270 [mac80211]
+         ieee80211_rx_handlers+0xccf/0x22f0 [mac80211]
+         ieee80211_prepare_and_rx_handle+0x4e7/0xc40 [mac80211]
+         ieee80211_rx+0x31d/0x900 [mac80211]
+         iwlagn_rx_reply_rx+0x3db/0x6f0 [iwldvm]
+         iwl_rx_dispatch+0x8e/0xf0 [iwldvm]
+         iwl_pcie_irq_handler+0xe3c/0x12f0 [iwlwifi]
+         irq_thread_fn+0x20/0x50
+         irq_thread+0x11f/0x150
+         kthread+0xd2/0xf0
+    } hitcount:         98  len:     664329
+    { stacktrace:
+         __netif_receive_skb_core+0x46d/0x990
+         __netif_receive_skb+0x18/0x60
+         process_backlog+0xa8/0x150
+         net_rx_action+0x15d/0x340
+         __do_softirq+0x114/0x2c0
+         do_softirq_own_stack+0x1c/0x30
+         do_softirq+0x65/0x70
+         __local_bh_enable_ip+0xb5/0xc0
+         ip_finish_output+0x1f4/0x840
+         ip_output+0x6b/0xc0
+         ip_local_out_sk+0x31/0x40
+         ip_send_skb+0x1a/0x50
+         udp_send_skb+0x173/0x2a0
+         udp_sendmsg+0x2bf/0x9f0
+         inet_sendmsg+0x64/0xa0
+         sock_sendmsg+0x3d/0x50
+    } hitcount:        115  len:      13030
+    { stacktrace:
+         __netif_receive_skb_core+0x46d/0x990
+         __netif_receive_skb+0x18/0x60
+         netif_receive_skb_internal+0x23/0x90
+         napi_gro_complete+0xa4/0xe0
+         napi_gro_flush+0x6d/0x90
+         iwl_pcie_irq_handler+0x92a/0x12f0 [iwlwifi]
+         irq_thread_fn+0x20/0x50
+         irq_thread+0x11f/0x150
+         kthread+0xd2/0xf0
+         ret_from_fork+0x42/0x70
+    } hitcount:        934  len:    5512212
+
+    Totals:
+        Hits: 1232
+        Entries: 4
+        Dropped: 0
+
+  The above shows all the netif_receive_skb callpaths and their total
+  lengths for the duration of the wget command.
+
+  The 'clear' hist trigger param can be used to clear the hash table.
+  Suppose we wanted to try another run of the previous example but
+  this time also wanted to see the complete list of events that went
+  into the histogram.  In order to avoid having to set everything up
+  again, we can just clear the histogram first:
+
+    # echo 'hist:key=stacktrace:vals=len:clear' >> \
+           /sys/kernel/debug/tracing/events/net/netif_receive_skb/trigger
+
+  Just to verify that it is in fact cleared, here's what we now see in
+  the hist file:
+
+    # cat /sys/kernel/debug/tracing/events/net/netif_receive_skb/hist
+    # trigger info: hist:keys=stacktrace:vals=len:sort=hitcount:size=2048 [paused]
+
+    Totals:
+        Hits: 0
+        Entries: 0
+        Dropped: 0
+
+  Since we want to see the detailed list of every netif_receive_skb
+  event occurring during the new run, which are in fact the same
+  events being aggregated into the hash table, we add some additional
+  'enable_event' events to the triggering sched_process_exec and
+  sched_process_exit events as such:
+
+    # echo 'enable_event:net:netif_receive_skb if filename==/usr/bin/wget' > \
+           /sys/kernel/debug/tracing/events/sched/sched_process_exec/trigger
+
+    # echo 'disable_event:net:netif_receive_skb if comm==wget' > \
+           /sys/kernel/debug/tracing/events/sched/sched_process_exit/trigger
+
+  If you read the trigger files for the sched_process_exec and
+  sched_process_exit triggers, you should see two triggers for each:
+  one enabling/disabling the hist aggregation and the other
+  enabling/disabling the logging of events:
+
+    # cat /sys/kernel/debug/tracing/events/sched/sched_process_exec/trigger
+    enable_event:net:netif_receive_skb:unlimited if filename==/usr/bin/wget
+    enable_hist:net:netif_receive_skb:unlimited if filename==/usr/bin/wget
+
+    # cat /sys/kernel/debug/tracing/events/sched/sched_process_exit/trigger
+    enable_event:net:netif_receive_skb:unlimited if comm==wget
+    disable_hist:net:netif_receive_skb:unlimited if comm==wget
+
+  In other words, whenever either of the sched_process_exec or
+  sched_process_exit events is hit and matches 'wget', it enables or
+  disables both the histogram and the event log, and what you end up
+  with is a hash table and set of events just covering the specified
+  duration.  Run the wget command again:
+
+    $ wget https://www.kernel.org/pub/linux/kernel/v3.x/patch-3.19.xz
+
+  Displaying the 'hist' file should show something similar to what you
+  saw in the last run, but this time you should also see the
+  individual events in the trace file:
+
+    # cat /sys/kernel/debug/tracing/trace
+
+    # tracer: nop
+    #
+    # entries-in-buffer/entries-written: 183/1426   #P:4
+    #
+    #                              _-----=> irqs-off
+    #                             / _----=> need-resched
+    #                            | / _---=> hardirq/softirq
+    #                            || / _--=> preempt-depth
+    #                            ||| /     delay
+    #           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION
+    #              | |       |   ||||       |         |
+                wget-15108 [000] ..s1 31769.606929: netif_receive_skb: dev=lo skbaddr=ffff88009c353100 len=60
+                wget-15108 [000] ..s1 31769.606999: netif_receive_skb: dev=lo skbaddr=ffff88009c353200 len=60
+             dnsmasq-1382  [000] ..s1 31769.677652: netif_receive_skb: dev=lo skbaddr=ffff88009c352b00 len=130
+             dnsmasq-1382  [000] ..s1 31769.685917: netif_receive_skb: dev=lo skbaddr=ffff88009c352200 len=138
+    ##### CPU 2 buffer started ####
+      irq/29-iwlwifi-559   [002] ..s. 31772.031529: netif_receive_skb: dev=wlan0 skbaddr=ffff88009d433d00 len=2948
+      irq/29-iwlwifi-559   [002] ..s. 31772.031572: netif_receive_skb: dev=wlan0 skbaddr=ffff88009d432200 len=1500
+      irq/29-iwlwifi-559   [002] ..s. 31772.032196: netif_receive_skb: dev=wlan0 skbaddr=ffff88009d433100 len=2948
+      irq/29-iwlwifi-559   [002] ..s. 31772.032761: netif_receive_skb: dev=wlan0 skbaddr=ffff88009d433000 len=2948
+      irq/29-iwlwifi-559   [002] ..s. 31772.033220: netif_receive_skb: dev=wlan0 skbaddr=ffff88009d432e00 len=1500
+    .
+    .
+    .
+
+  The following example demonstrates how multiple hist triggers can be
+  attached to a given event.  This capability can be useful for
+  creating a set of different summaries derived from the same set of
+  events, or for comparing the effects of different filters, among
+  other things.
+
+    # echo 'hist:keys=skbaddr.hex:vals=len if len < 0' >> \
+           /sys/kernel/debug/tracing/events/net/netif_receive_skb/trigger
+    # echo 'hist:keys=skbaddr.hex:vals=len if len > 4096' >> \
+           /sys/kernel/debug/tracing/events/net/netif_receive_skb/trigger
+    # echo 'hist:keys=skbaddr.hex:vals=len if len == 256' >> \
+           /sys/kernel/debug/tracing/events/net/netif_receive_skb/trigger
+    # echo 'hist:keys=skbaddr.hex:vals=len' >> \
+           /sys/kernel/debug/tracing/events/net/netif_receive_skb/trigger
+    # echo 'hist:keys=len:vals=common_preempt_count' >> \
+           /sys/kernel/debug/tracing/events/net/netif_receive_skb/trigger
+
+  The above set of commands create four triggers differing only in
+  their filters, along with a completely different though fairly
+  nonsensical trigger.  Note that in order to append multiple hist
+  triggers to the same file, you should use the '>>' operator to
+  append them ('>' will also add the new hist trigger, but will remove
+  any existing hist triggers beforehand).
+
+  Displaying the contents of the 'hist' file for the event shows the
+  contents of all five histograms:
+
+    # cat /sys/kernel/debug/tracing/events/net/netif_receive_skb/hist
+
+    # event histogram
+    #
+    # trigger info: hist:keys=len:vals=hitcount,common_preempt_count:sort=hitcount:size=2048 [active]
+    #
+
+    { len:        176 } hitcount:          1  common_preempt_count:          0
+    { len:        223 } hitcount:          1  common_preempt_count:          0
+    { len:       4854 } hitcount:          1  common_preempt_count:          0
+    { len:        395 } hitcount:          1  common_preempt_count:          0
+    { len:        177 } hitcount:          1  common_preempt_count:          0
+    { len:        446 } hitcount:          1  common_preempt_count:          0
+    { len:       1601 } hitcount:          1  common_preempt_count:          0
+    .
+    .
+    .
+    { len:       1280 } hitcount:         66  common_preempt_count:          0
+    { len:        116 } hitcount:         81  common_preempt_count:         40
+    { len:        708 } hitcount:        112  common_preempt_count:          0
+    { len:         46 } hitcount:        221  common_preempt_count:          0
+    { len:       1264 } hitcount:        458  common_preempt_count:          0
+
+    Totals:
+        Hits: 1428
+        Entries: 147
+        Dropped: 0
+
+
+    # event histogram
+    #
+    # trigger info: hist:keys=skbaddr.hex:vals=hitcount,len:sort=hitcount:size=2048 [active]
+    #
+
+    { skbaddr: ffff8800baee5e00 } hitcount:          1  len:        130
+    { skbaddr: ffff88005f3d5600 } hitcount:          1  len:       1280
+    { skbaddr: ffff88005f3d4900 } hitcount:          1  len:       1280
+    { skbaddr: ffff88009fed6300 } hitcount:          1  len:        115
+    { skbaddr: ffff88009fe0ad00 } hitcount:          1  len:        115
+    { skbaddr: ffff88008cdb1900 } hitcount:          1  len:         46
+    { skbaddr: ffff880064b5ef00 } hitcount:          1  len:        118
+    { skbaddr: ffff880044e3c700 } hitcount:          1  len:         60
+    { skbaddr: ffff880100065900 } hitcount:          1  len:         46
+    { skbaddr: ffff8800d46bd500 } hitcount:          1  len:        116
+    { skbaddr: ffff88005f3d5f00 } hitcount:          1  len:       1280
+    { skbaddr: ffff880100064700 } hitcount:          1  len:        365
+    { skbaddr: ffff8800badb6f00 } hitcount:          1  len:         60
+    .
+    .
+    .
+    { skbaddr: ffff88009fe0be00 } hitcount:         27  len:      24677
+    { skbaddr: ffff88009fe0a400 } hitcount:         27  len:      23052
+    { skbaddr: ffff88009fe0b700 } hitcount:         31  len:      25589
+    { skbaddr: ffff88009fe0b600 } hitcount:         32  len:      27326
+    { skbaddr: ffff88006a462800 } hitcount:         68  len:      71678
+    { skbaddr: ffff88006a463700 } hitcount:         70  len:      72678
+    { skbaddr: ffff88006a462b00 } hitcount:         71  len:      77589
+    { skbaddr: ffff88006a463600 } hitcount:         73  len:      71307
+    { skbaddr: ffff88006a462200 } hitcount:         81  len:      81032
+
+    Totals:
+        Hits: 1451
+        Entries: 318
+        Dropped: 0
+
+
+    # event histogram
+    #
+    # trigger info: hist:keys=skbaddr.hex:vals=hitcount,len:sort=hitcount:size=2048 if len == 256 [active]
+    #
+
+
+    Totals:
+        Hits: 0
+        Entries: 0
+        Dropped: 0
+
+
+    # event histogram
+    #
+    # trigger info: hist:keys=skbaddr.hex:vals=hitcount,len:sort=hitcount:size=2048 if len > 4096 [active]
+    #
+
+    { skbaddr: ffff88009fd2c300 } hitcount:          1  len:       7212
+    { skbaddr: ffff8800d2bcce00 } hitcount:          1  len:       7212
+    { skbaddr: ffff8800d2bcd700 } hitcount:          1  len:       7212
+    { skbaddr: ffff8800d2bcda00 } hitcount:          1  len:      21492
+    { skbaddr: ffff8800ae2e2d00 } hitcount:          1  len:       7212
+    { skbaddr: ffff8800d2bcdb00 } hitcount:          1  len:       7212
+    { skbaddr: ffff88006a4df500 } hitcount:          1  len:       4854
+    { skbaddr: ffff88008ce47b00 } hitcount:          1  len:      18636
+    { skbaddr: ffff8800ae2e2200 } hitcount:          1  len:      12924
+    { skbaddr: ffff88005f3e1000 } hitcount:          1  len:       4356
+    { skbaddr: ffff8800d2bcdc00 } hitcount:          2  len:      24420
+    { skbaddr: ffff8800d2bcc200 } hitcount:          2  len:      12996
+
+    Totals:
+        Hits: 14
+        Entries: 12
+        Dropped: 0
+
+
+    # event histogram
+    #
+    # trigger info: hist:keys=skbaddr.hex:vals=hitcount,len:sort=hitcount:size=2048 if len < 0 [active]
+    #
+
+
+    Totals:
+        Hits: 0
+        Entries: 0
+        Dropped: 0
+
+  Named triggers can be used to have triggers share a common set of
+  histogram data.  This capability is mostly useful for combining the
+  output of events generated by tracepoints contained inside inline
+  functions, but names can be used in a hist trigger on any event.
+  For example, these two triggers when hit will update the same 'len'
+  field in the shared 'foo' histogram data:
+
+    # echo 'hist:name=foo:keys=skbaddr.hex:vals=len' > \
+           /sys/kernel/debug/tracing/events/net/netif_receive_skb/trigger
+    # echo 'hist:name=foo:keys=skbaddr.hex:vals=len' > \
+           /sys/kernel/debug/tracing/events/net/netif_rx/trigger
+
+  You can see that they're updating common histogram data by reading
+  each event's hist files at the same time:
+
+    # cat /sys/kernel/debug/tracing/events/net/netif_receive_skb/hist;
+      cat /sys/kernel/debug/tracing/events/net/netif_rx/hist
+
+    # event histogram
+    #
+    # trigger info: hist:name=foo:keys=skbaddr.hex:vals=hitcount,len:sort=hitcount:size=2048 [active]
+    #
+
+    { skbaddr: ffff88000ad53500 } hitcount:          1  len:         46
+    { skbaddr: ffff8800af5a1500 } hitcount:          1  len:         76
+    { skbaddr: ffff8800d62a1900 } hitcount:          1  len:         46
+    { skbaddr: ffff8800d2bccb00 } hitcount:          1  len:        468
+    { skbaddr: ffff8800d3c69900 } hitcount:          1  len:         46
+    { skbaddr: ffff88009ff09100 } hitcount:          1  len:         52
+    { skbaddr: ffff88010f13ab00 } hitcount:          1  len:        168
+    { skbaddr: ffff88006a54f400 } hitcount:          1  len:         46
+    { skbaddr: ffff8800d2bcc500 } hitcount:          1  len:        260
+    { skbaddr: ffff880064505000 } hitcount:          1  len:         46
+    { skbaddr: ffff8800baf24e00 } hitcount:          1  len:         32
+    { skbaddr: ffff88009fe0ad00 } hitcount:          1  len:         46
+    { skbaddr: ffff8800d3edff00 } hitcount:          1  len:         44
+    { skbaddr: ffff88009fe0b400 } hitcount:          1  len:        168
+    { skbaddr: ffff8800a1c55a00 } hitcount:          1  len:         40
+    { skbaddr: ffff8800d2bcd100 } hitcount:          1  len:         40
+    { skbaddr: ffff880064505f00 } hitcount:          1  len:        174
+    { skbaddr: ffff8800a8bff200 } hitcount:          1  len:        160
+    { skbaddr: ffff880044e3cc00 } hitcount:          1  len:         76
+    { skbaddr: ffff8800a8bfe700 } hitcount:          1  len:         46
+    { skbaddr: ffff8800d2bcdc00 } hitcount:          1  len:         32
+    { skbaddr: ffff8800a1f64800 } hitcount:          1  len:         46
+    { skbaddr: ffff8800d2bcde00 } hitcount:          1  len:        988
+    { skbaddr: ffff88006a5dea00 } hitcount:          1  len:         46
+    { skbaddr: ffff88002e37a200 } hitcount:          1  len:         44
+    { skbaddr: ffff8800a1f32c00 } hitcount:          2  len:        676
+    { skbaddr: ffff88000ad52600 } hitcount:          2  len:        107
+    { skbaddr: ffff8800a1f91e00 } hitcount:          2  len:         92
+    { skbaddr: ffff8800af5a0200 } hitcount:          2  len:        142
+    { skbaddr: ffff8800d2bcc600 } hitcount:          2  len:        220
+    { skbaddr: ffff8800ba36f500 } hitcount:          2  len:         92
+    { skbaddr: ffff8800d021f800 } hitcount:          2  len:         92
+    { skbaddr: ffff8800a1f33600 } hitcount:          2  len:        675
+    { skbaddr: ffff8800a8bfff00 } hitcount:          3  len:        138
+    { skbaddr: ffff8800d62a1300 } hitcount:          3  len:        138
+    { skbaddr: ffff88002e37a100 } hitcount:          4  len:        184
+    { skbaddr: ffff880064504400 } hitcount:          4  len:        184
+    { skbaddr: ffff8800a8bfec00 } hitcount:          4  len:        184
+    { skbaddr: ffff88000ad53700 } hitcount:          5  len:        230
+    { skbaddr: ffff8800d2bcdb00 } hitcount:          5  len:        196
+    { skbaddr: ffff8800a1f90000 } hitcount:          6  len:        276
+    { skbaddr: ffff88006a54f900 } hitcount:          6  len:        276
+
+    Totals:
+        Hits: 81
+        Entries: 42
+        Dropped: 0
+    # event histogram
+    #
+    # trigger info: hist:name=foo:keys=skbaddr.hex:vals=hitcount,len:sort=hitcount:size=2048 [active]
+    #
+
+    { skbaddr: ffff88000ad53500 } hitcount:          1  len:         46
+    { skbaddr: ffff8800af5a1500 } hitcount:          1  len:         76
+    { skbaddr: ffff8800d62a1900 } hitcount:          1  len:         46
+    { skbaddr: ffff8800d2bccb00 } hitcount:          1  len:        468
+    { skbaddr: ffff8800d3c69900 } hitcount:          1  len:         46
+    { skbaddr: ffff88009ff09100 } hitcount:          1  len:         52
+    { skbaddr: ffff88010f13ab00 } hitcount:          1  len:        168
+    { skbaddr: ffff88006a54f400 } hitcount:          1  len:         46
+    { skbaddr: ffff8800d2bcc500 } hitcount:          1  len:        260
+    { skbaddr: ffff880064505000 } hitcount:          1  len:         46
+    { skbaddr: ffff8800baf24e00 } hitcount:          1  len:         32
+    { skbaddr: ffff88009fe0ad00 } hitcount:          1  len:         46
+    { skbaddr: ffff8800d3edff00 } hitcount:          1  len:         44
+    { skbaddr: ffff88009fe0b400 } hitcount:          1  len:        168
+    { skbaddr: ffff8800a1c55a00 } hitcount:          1  len:         40
+    { skbaddr: ffff8800d2bcd100 } hitcount:          1  len:         40
+    { skbaddr: ffff880064505f00 } hitcount:          1  len:        174
+    { skbaddr: ffff8800a8bff200 } hitcount:          1  len:        160
+    { skbaddr: ffff880044e3cc00 } hitcount:          1  len:         76
+    { skbaddr: ffff8800a8bfe700 } hitcount:          1  len:         46
+    { skbaddr: ffff8800d2bcdc00 } hitcount:          1  len:         32
+    { skbaddr: ffff8800a1f64800 } hitcount:          1  len:         46
+    { skbaddr: ffff8800d2bcde00 } hitcount:          1  len:        988
+    { skbaddr: ffff88006a5dea00 } hitcount:          1  len:         46
+    { skbaddr: ffff88002e37a200 } hitcount:          1  len:         44
+    { skbaddr: ffff8800a1f32c00 } hitcount:          2  len:        676
+    { skbaddr: ffff88000ad52600 } hitcount:          2  len:        107
+    { skbaddr: ffff8800a1f91e00 } hitcount:          2  len:         92
+    { skbaddr: ffff8800af5a0200 } hitcount:          2  len:        142
+    { skbaddr: ffff8800d2bcc600 } hitcount:          2  len:        220
+    { skbaddr: ffff8800ba36f500 } hitcount:          2  len:         92
+    { skbaddr: ffff8800d021f800 } hitcount:          2  len:         92
+    { skbaddr: ffff8800a1f33600 } hitcount:          2  len:        675
+    { skbaddr: ffff8800a8bfff00 } hitcount:          3  len:        138
+    { skbaddr: ffff8800d62a1300 } hitcount:          3  len:        138
+    { skbaddr: ffff88002e37a100 } hitcount:          4  len:        184
+    { skbaddr: ffff880064504400 } hitcount:          4  len:        184
+    { skbaddr: ffff8800a8bfec00 } hitcount:          4  len:        184
+    { skbaddr: ffff88000ad53700 } hitcount:          5  len:        230
+    { skbaddr: ffff8800d2bcdb00 } hitcount:          5  len:        196
+    { skbaddr: ffff8800a1f90000 } hitcount:          6  len:        276
+    { skbaddr: ffff88006a54f900 } hitcount:          6  len:        276
+
+    Totals:
+        Hits: 81
+        Entries: 42
+        Dropped: 0
+
+  And here's an example that shows how to combine histogram data from
+  any two events even if they don't share any 'compatible' fields
+  other than 'hitcount' and 'stacktrace'.  These commands create a
+  couple of triggers named 'bar' using those fields:
+
+    # echo 'hist:name=bar:key=stacktrace:val=hitcount' > \
+           /sys/kernel/debug/tracing/events/sched/sched_process_fork/trigger
+    # echo 'hist:name=bar:key=stacktrace:val=hitcount' > \
+          /sys/kernel/debug/tracing/events/net/netif_rx/trigger
+
+  And displaying the output of either shows some interesting if
+  somewhat confusing output:
+
+    # cat /sys/kernel/debug/tracing/events/sched/sched_process_fork/hist
+    # cat /sys/kernel/debug/tracing/events/net/netif_rx/hist
+
+    # event histogram
+    #
+    # trigger info: hist:name=bar:keys=stacktrace:vals=hitcount:sort=hitcount:size=2048 [active]
+    #
+
+    { stacktrace:
+             _do_fork+0x18e/0x330
+             kernel_thread+0x29/0x30
+             kthreadd+0x154/0x1b0
+             ret_from_fork+0x3f/0x70
+    } hitcount:          1
+    { stacktrace:
+             netif_rx_internal+0xb2/0xd0
+             netif_rx_ni+0x20/0x70
+             dev_loopback_xmit+0xaa/0xd0
+             ip_mc_output+0x126/0x240
+             ip_local_out_sk+0x31/0x40
+             igmp_send_report+0x1e9/0x230
+             igmp_timer_expire+0xe9/0x120
+             call_timer_fn+0x39/0xf0
+             run_timer_softirq+0x1e1/0x290
+             __do_softirq+0xfd/0x290
+             irq_exit+0x98/0xb0
+             smp_apic_timer_interrupt+0x4a/0x60
+             apic_timer_interrupt+0x6d/0x80
+             cpuidle_enter+0x17/0x20
+             call_cpuidle+0x3b/0x60
+             cpu_startup_entry+0x22d/0x310
+    } hitcount:          1
+    { stacktrace:
+             netif_rx_internal+0xb2/0xd0
+             netif_rx_ni+0x20/0x70
+             dev_loopback_xmit+0xaa/0xd0
+             ip_mc_output+0x17f/0x240
+             ip_local_out_sk+0x31/0x40
+             ip_send_skb+0x1a/0x50
+             udp_send_skb+0x13e/0x270
+             udp_sendmsg+0x2bf/0x980
+             inet_sendmsg+0x67/0xa0
+             sock_sendmsg+0x38/0x50
+             SYSC_sendto+0xef/0x170
+             SyS_sendto+0xe/0x10
+             entry_SYSCALL_64_fastpath+0x12/0x6a
+    } hitcount:          2
+    { stacktrace:
+             netif_rx_internal+0xb2/0xd0
+             netif_rx+0x1c/0x60
+             loopback_xmit+0x6c/0xb0
+             dev_hard_start_xmit+0x219/0x3a0
+             __dev_queue_xmit+0x415/0x4f0
+             dev_queue_xmit_sk+0x13/0x20
+             ip_finish_output2+0x237/0x340
+             ip_finish_output+0x113/0x1d0
+             ip_output+0x66/0xc0
+             ip_local_out_sk+0x31/0x40
+             ip_send_skb+0x1a/0x50
+             udp_send_skb+0x16d/0x270
+             udp_sendmsg+0x2bf/0x980
+             inet_sendmsg+0x67/0xa0
+             sock_sendmsg+0x38/0x50
+             ___sys_sendmsg+0x14e/0x270
+    } hitcount:         76
+    { stacktrace:
+             netif_rx_internal+0xb2/0xd0
+             netif_rx+0x1c/0x60
+             loopback_xmit+0x6c/0xb0
+             dev_hard_start_xmit+0x219/0x3a0
+             __dev_queue_xmit+0x415/0x4f0
+             dev_queue_xmit_sk+0x13/0x20
+             ip_finish_output2+0x237/0x340
+             ip_finish_output+0x113/0x1d0
+             ip_output+0x66/0xc0
+             ip_local_out_sk+0x31/0x40
+             ip_send_skb+0x1a/0x50
+             udp_send_skb+0x16d/0x270
+             udp_sendmsg+0x2bf/0x980
+             inet_sendmsg+0x67/0xa0
+             sock_sendmsg+0x38/0x50
+             ___sys_sendmsg+0x269/0x270
+    } hitcount:         77
+    { stacktrace:
+             netif_rx_internal+0xb2/0xd0
+             netif_rx+0x1c/0x60
+             loopback_xmit+0x6c/0xb0
+             dev_hard_start_xmit+0x219/0x3a0
+             __dev_queue_xmit+0x415/0x4f0
+             dev_queue_xmit_sk+0x13/0x20
+             ip_finish_output2+0x237/0x340
+             ip_finish_output+0x113/0x1d0
+             ip_output+0x66/0xc0
+             ip_local_out_sk+0x31/0x40
+             ip_send_skb+0x1a/0x50
+             udp_send_skb+0x16d/0x270
+             udp_sendmsg+0x2bf/0x980
+             inet_sendmsg+0x67/0xa0
+             sock_sendmsg+0x38/0x50
+             SYSC_sendto+0xef/0x170
+    } hitcount:         88
+    { stacktrace:
+             _do_fork+0x18e/0x330
+             SyS_clone+0x19/0x20
+             entry_SYSCALL_64_fastpath+0x12/0x6a
+    } hitcount:        244
+
+    Totals:
+        Hits: 489
+        Entries: 7
+        Dropped: 0
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index f52f297..a6b3705 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -210,6 +210,11 @@
 	Note, sched_switch and sched_wake_up will also trace events
 	listed in this file.
 
+	To have the PIDs of children of tasks with their PID in this file
+	added on fork, enable the "event-fork" option. That option will also
+	cause the PIDs of tasks to be removed from this file when the task
+	exits.
+
   set_graph_function:
 
 	Set a "trigger" function where tracing should start
@@ -725,16 +730,14 @@
 nohex
 nobin
 noblock
-nostacktrace
 trace_printk
-noftrace_preempt
 nobranch
 annotate
 nouserstacktrace
 nosym-userobj
 noprintk-msg-only
 context-info
-latency-format
+nolatency-format
 sleep-time
 graph-time
 record-cmd
@@ -742,7 +745,10 @@
 nodisable_on_free
 irq-info
 markers
+noevent-fork
 function-trace
+nodisplay-graph
+nostacktrace
 
 To disable one of the options, echo in the option prepended with
 "no".
@@ -796,11 +802,6 @@
 
   block - When set, reading trace_pipe will not block when polled.
 
-  stacktrace - This is one of the options that changes the trace
-	       itself. When a trace is recorded, so is the stack
-	       of functions. This allows for back traces of
-	       trace sites.
-
   trace_printk - Can disable trace_printk() from writing into the buffer.
 
   branch - Enable branch tracing with the tracer.
@@ -897,6 +898,10 @@
   	    When disabled, the trace_marker will error with EINVAL
 	    on write.
 
+  event-fork - When set, tasks with PIDs listed in set_event_pid will have
+	       the PIDs of their children added to set_event_pid when those
+	       tasks fork. Also, when tasks with PIDs in set_event_pid exit,
+	       their PIDs will be removed from the file.
 
   function-trace - The latency tracers will enable function tracing
   	    if this option is enabled (default it is). When
@@ -904,8 +909,17 @@
 	    functions. This keeps the overhead of the tracer down
 	    when performing latency tests.
 
- Note: Some tracers have their own options. They only appear
-       when the tracer is active.
+  display-graph - When set, the latency tracers (irqsoff, wakeup, etc) will
+	          use function graph tracing instead of function tracing.
+
+  stacktrace - This is one of the options that changes the trace
+	       itself. When a trace is recorded, so is the stack
+	       of functions. This allows for back traces of
+	       trace sites.
+
+ Note: Some tracers have their own options. They only appear in this
+       file when the tracer is active. They always appear in the
+       options directory.
 
 
 
@@ -1562,12 +1576,12 @@
   <idle>-0       3dN.1   12us : menu_hrtimer_cancel <-tick_nohz_idle_exit
   <idle>-0       3dN.1   12us : ktime_get <-tick_nohz_idle_exit
   <idle>-0       3dN.1   12us : tick_do_update_jiffies64 <-tick_nohz_idle_exit
-  <idle>-0       3dN.1   13us : update_cpu_load_nohz <-tick_nohz_idle_exit
-  <idle>-0       3dN.1   13us : _raw_spin_lock <-update_cpu_load_nohz
+  <idle>-0       3dN.1   13us : cpu_load_update_nohz <-tick_nohz_idle_exit
+  <idle>-0       3dN.1   13us : _raw_spin_lock <-cpu_load_update_nohz
   <idle>-0       3dN.1   13us : add_preempt_count <-_raw_spin_lock
-  <idle>-0       3dN.2   13us : __update_cpu_load <-update_cpu_load_nohz
-  <idle>-0       3dN.2   14us : sched_avg_update <-__update_cpu_load
-  <idle>-0       3dN.2   14us : _raw_spin_unlock <-update_cpu_load_nohz
+  <idle>-0       3dN.2   13us : __cpu_load_update <-cpu_load_update_nohz
+  <idle>-0       3dN.2   14us : sched_avg_update <-__cpu_load_update
+  <idle>-0       3dN.2   14us : _raw_spin_unlock <-cpu_load_update_nohz
   <idle>-0       3dN.2   14us : sub_preempt_count <-_raw_spin_unlock
   <idle>-0       3dN.1   15us : calc_load_exit_idle <-tick_nohz_idle_exit
   <idle>-0       3dN.1   15us : touch_softlockup_watchdog <-tick_nohz_idle_exit
diff --git a/Documentation/video4linux/CARDLIST.cx23885 b/Documentation/video4linux/CARDLIST.cx23885
index 44a4cfb..85a8fdc 100644
--- a/Documentation/video4linux/CARDLIST.cx23885
+++ b/Documentation/video4linux/CARDLIST.cx23885
@@ -52,3 +52,5 @@
  51 -> DVBSky T982                                         [4254:0982]
  52 -> Hauppauge WinTV-HVR5525                             [0070:f038]
  53 -> Hauppauge WinTV Starburst                           [0070:c12a]
+ 54 -> ViewCast 260e                                       [1576:0260]
+ 55 -> ViewCast 460e                                       [1576:0460]
diff --git a/Documentation/video4linux/CARDLIST.em28xx b/Documentation/video4linux/CARDLIST.em28xx
index 6720999..6784220 100644
--- a/Documentation/video4linux/CARDLIST.em28xx
+++ b/Documentation/video4linux/CARDLIST.em28xx
@@ -76,9 +76,9 @@
  75 -> Dikom DK300                              (em2882)
  76 -> KWorld PlusTV 340U or UB435-Q (ATSC)     (em2870)        [1b80:a340]
  77 -> EM2874 Leadership ISDBT                  (em2874)
- 78 -> PCTV nanoStick T2 290e                   (em28174)
+ 78 -> PCTV nanoStick T2 290e                   (em28174)       [2013:024f]
  79 -> Terratec Cinergy H5                      (em2884)        [eb1a:2885,0ccd:10a2,0ccd:10ad,0ccd:10b6]
- 80 -> PCTV DVB-S2 Stick (460e)                 (em28174)
+ 80 -> PCTV DVB-S2 Stick (460e)                 (em28174)       [2013:024c]
  81 -> Hauppauge WinTV HVR 930C                 (em2884)        [2040:1605]
  82 -> Terratec Cinergy HTC Stick               (em2884)        [0ccd:00b2]
  83 -> Honestech Vidbox NW03                    (em2860)        [eb1a:5006]
@@ -90,9 +90,11 @@
  89 -> Delock 61959                             (em2874)        [1b80:e1cc]
  90 -> KWorld USB ATSC TV Stick UB435-Q V2      (em2874)        [1b80:e346]
  91 -> SpeedLink Vicious And Devine Laplace webcam (em2765)        [1ae7:9003,1ae7:9004]
- 92 -> PCTV DVB-S2 Stick (461e)                 (em28178)
+ 92 -> PCTV DVB-S2 Stick (461e)                 (em28178)       [2013:0258]
  93 -> KWorld USB ATSC TV Stick UB435-Q V3      (em2874)        [1b80:e34c]
- 94 -> PCTV tripleStick (292e)                  (em28178)
+ 94 -> PCTV tripleStick (292e)                  (em28178)       [2013:025f,2040:0264]
  95 -> Leadtek VC100                            (em2861)        [0413:6f07]
- 96 -> Terratec Cinergy T2 Stick HD             (em28178)
+ 96 -> Terratec Cinergy T2 Stick HD             (em28178)       [eb1a:8179]
  97 -> Elgato EyeTV Hybrid 2008 INT             (em2884)        [0fd9:0018]
+ 98 -> PLEX PX-BCUD                             (em28178)       [3275:0085]
+ 99 -> Hauppauge WinTV-dualHD DVB               (em28174)       [2040:0265]
diff --git a/Documentation/video4linux/vivid.txt b/Documentation/video4linux/vivid.txt
index e35d376..8da5d2a 100644
--- a/Documentation/video4linux/vivid.txt
+++ b/Documentation/video4linux/vivid.txt
@@ -294,7 +294,7 @@
 
 These inputs support all combinations of the field setting. Special care has
 been taken to faithfully reproduce how fields are handled for the different
-TV standards. This is particularly noticable when generating a horizontally
+TV standards. This is particularly noticeable when generating a horizontally
 moving image so the temporal effect of using interlaced formats becomes clearly
 visible. For 50 Hz standards the top field is the oldest and the bottom field
 is the newest in time. For 60 Hz standards that is reversed: the bottom field
@@ -313,7 +313,7 @@
 The pixel aspect ratio will depend on the TV standard. The video aspect ratio
 can be selected through the 'Standard Aspect Ratio' Vivid control.
 Choices are '4x3', '16x9' which will give letterboxed widescreen video and
-'16x9 Anomorphic' which will give full screen squashed anamorphic widescreen
+'16x9 Anamorphic' which will give full screen squashed anamorphic widescreen
 video that will need to be scaled accordingly.
 
 The TV 'tuner' supports a frequency range of 44-958 MHz. Channels are available
@@ -862,7 +862,7 @@
 RDS Stereo:
 RDS Artificial Head:
 RDS Compressed:
-RDS Dymanic PTY:
+RDS Dynamic PTY:
 RDS Traffic Announcement:
 RDS Traffic Program:
 RDS Music: these are all controls that set the RDS data that is transmitted by
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 4d0542c..a4482cc 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -199,8 +199,8 @@
 Parameters: vcpu id (apic id on x86)
 Returns: vcpu fd on success, -1 on error
 
-This API adds a vcpu to a virtual machine.  The vcpu id is a small integer
-in the range [0, max_vcpus).
+This API adds a vcpu to a virtual machine. No more than max_vcpus may be added.
+The vcpu id is an integer in the range [0, max_vcpu_id).
 
 The recommended max_vcpus value can be retrieved using the KVM_CAP_NR_VCPUS of
 the KVM_CHECK_EXTENSION ioctl() at run-time.
@@ -212,6 +212,12 @@
 If the KVM_CAP_MAX_VCPUS does not exist, you should assume that max_vcpus is
 same as the value returned from KVM_CAP_NR_VCPUS.
 
+The maximum possible value for max_vcpu_id can be retrieved using the
+KVM_CAP_MAX_VCPU_ID of the KVM_CHECK_EXTENSION ioctl() at run-time.
+
+If the KVM_CAP_MAX_VCPU_ID does not exist, you should assume that max_vcpu_id
+is the same as the value returned from KVM_CAP_MAX_VCPUS.
+
 On powerpc using book3s_hv mode, the vcpus are mapped onto virtual
 threads in one or more virtual CPU cores.  (This is because the
 hardware requires all the hardware threads in a CPU core to be in the
@@ -3788,6 +3794,14 @@
 Fails if VCPU has already been created, or if the irqchip is already in the
 kernel (i.e. KVM_CREATE_IRQCHIP has already been called).
 
+7.6 KVM_CAP_S390_RI
+
+Architectures: s390
+Parameters: none
+
+Allows use of runtime-instrumentation introduced with zEC12 processor.
+Will return -EINVAL if the machine does not support runtime-instrumentation.
+Will return -EBUSY if a VCPU has already been created.
 
 8. Other capabilities.
 ----------------------
diff --git a/Documentation/virtual/kvm/devices/s390_flic.txt b/Documentation/virtual/kvm/devices/s390_flic.txt
index e3e314c..6b0e115 100644
--- a/Documentation/virtual/kvm/devices/s390_flic.txt
+++ b/Documentation/virtual/kvm/devices/s390_flic.txt
@@ -11,6 +11,7 @@
 - add interrupts (KVM_DEV_FLIC_ENQUEUE)
 - inspect currently pending interrupts (KVM_FLIC_GET_ALL_IRQS)
 - purge all pending floating interrupts (KVM_DEV_FLIC_CLEAR_IRQS)
+- purge one pending floating I/O interrupt (KVM_DEV_FLIC_CLEAR_IO_IRQ)
 - enable/disable for the guest transparent async page faults
 - register and modify adapter interrupt sources (KVM_DEV_FLIC_ADAPTER_*)
 
@@ -40,6 +41,11 @@
     Simply deletes all elements from the list of currently pending floating
     interrupts.  No interrupts are injected into the guest.
 
+  KVM_DEV_FLIC_CLEAR_IO_IRQ
+    Deletes one (if any) I/O interrupt for a subchannel identified by the
+    subsystem identification word passed via the buffer specified by
+    attr->addr (address) and attr->attr (length).
+
   KVM_DEV_FLIC_APF_ENABLE
     Enables async page faults for the guest. So in case of a major page fault
     the host is allowed to handle this async and continues the guest.
@@ -68,7 +74,7 @@
 
   KVM_DEV_FLIC_ADAPTER_MODIFY
     Modifies attributes of an existing I/O adapter interrupt source. Takes
-    a kvm_s390_io_adapter_req specifiying the adapter and the operation:
+    a kvm_s390_io_adapter_req specifying the adapter and the operation:
 
 struct kvm_s390_io_adapter_req {
 	__u32 id;
@@ -94,3 +100,9 @@
     KVM_S390_IO_ADAPTER_UNMAP
       release a userspace page for the translated address specified in addr
       from the list of mappings
+
+Note: The KVM_SET_DEVICE_ATTR/KVM_GET_DEVICE_ATTR device ioctls executed on
+FLIC with an unknown group or attribute gives the error code EINVAL (instead of
+ENXIO, as specified in the API documentation). It is not possible to conclude
+that a FLIC operation is unavailable based on the error code resulting from a
+usage attempt.
diff --git a/Documentation/x86/pat.txt b/Documentation/x86/pat.txt
index 54944c7..2a4ee63 100644
--- a/Documentation/x86/pat.txt
+++ b/Documentation/x86/pat.txt
@@ -196,3 +196,35 @@
 "debugpat" boot parameter. With this parameter, various debug messages are
 printed to dmesg log.
 
+PAT Initialization
+------------------
+
+The following table describes how PAT is initialized under various
+configurations. The PAT MSR must be updated by Linux in order to support WC
+and WT attributes. Otherwise, the PAT MSR has the value programmed in it
+by the firmware. Note, Xen enables WC attribute in the PAT MSR for guests.
+
+ MTRR PAT   Call Sequence               PAT State  PAT MSR
+ =========================================================
+ E    E     MTRR -> PAT init            Enabled    OS
+ E    D     MTRR -> PAT init            Disabled    -
+ D    E     MTRR -> PAT disable         Disabled   BIOS
+ D    D     MTRR -> PAT disable         Disabled    -
+ -    np/E  PAT  -> PAT disable         Disabled   BIOS
+ -    np/D  PAT  -> PAT disable         Disabled    -
+ E    !P/E  MTRR -> PAT init            Disabled   BIOS
+ D    !P/E  MTRR -> PAT disable         Disabled   BIOS
+ !M   !P/E  MTRR stub -> PAT disable    Disabled   BIOS
+
+ Legend
+ ------------------------------------------------
+ E         Feature enabled in CPU
+ D	   Feature disabled/unsupported in CPU
+ np	   "nopat" boot option specified
+ !P	   CONFIG_X86_PAT option unset
+ !M	   CONFIG_MTRR option unset
+ Enabled   PAT state set to enabled
+ Disabled  PAT state set to disabled
+ OS        PAT initializes PAT MSR with OS setting
+ BIOS      PAT keeps PAT MSR with BIOS setting
+
diff --git a/Documentation/x86/protection-keys.txt b/Documentation/x86/protection-keys.txt
new file mode 100644
index 0000000..c281ded
--- /dev/null
+++ b/Documentation/x86/protection-keys.txt
@@ -0,0 +1,27 @@
+Memory Protection Keys for Userspace (PKU aka PKEYs) is a CPU feature
+which will be found on future Intel CPUs.
+
+Memory Protection Keys provides a mechanism for enforcing page-based
+protections, but without requiring modification of the page tables
+when an application changes protection domains.  It works by
+dedicating 4 previously ignored bits in each page table entry to a
+"protection key", giving 16 possible keys.
+
+There is also a new user-accessible register (PKRU) with two separate
+bits (Access Disable and Write Disable) for each key.  Being a CPU
+register, PKRU is inherently thread-local, potentially giving each
+thread a different set of protections from every other thread.
+
+There are two new instructions (RDPKRU/WRPKRU) for reading and writing
+to the new register.  The feature is only available in 64-bit mode,
+even though there is theoretically space in the PAE PTEs.  These
+permissions are enforced on data access only and have no effect on
+instruction fetches.
+
+=========================== Config Option ===========================
+
+This config option adds approximately 1.5kb of text. and 50 bytes of
+data to the executable.  A workload which does large O_DIRECT reads
+of holes in XFS files was run to exercise get_user_pages_fast().  No
+performance delta was observed with the config option
+enabled or disabled.
diff --git a/Documentation/x86/topology.txt b/Documentation/x86/topology.txt
new file mode 100644
index 0000000..06afac2
--- /dev/null
+++ b/Documentation/x86/topology.txt
@@ -0,0 +1,208 @@
+x86 Topology
+============
+
+This documents and clarifies the main aspects of x86 topology modelling and
+representation in the kernel. Update/change when doing changes to the
+respective code.
+
+The architecture-agnostic topology definitions are in
+Documentation/cputopology.txt. This file holds x86-specific
+differences/specialities which must not necessarily apply to the generic
+definitions. Thus, the way to read up on Linux topology on x86 is to start
+with the generic one and look at this one in parallel for the x86 specifics.
+
+Needless to say, code should use the generic functions - this file is *only*
+here to *document* the inner workings of x86 topology.
+
+Started by Thomas Gleixner <tglx@linutronix.de> and Borislav Petkov <bp@alien8.de>.
+
+The main aim of the topology facilities is to present adequate interfaces to
+code which needs to know/query/use the structure of the running system wrt
+threads, cores, packages, etc.
+
+The kernel does not care about the concept of physical sockets because a
+socket has no relevance to software. It's an electromechanical component. In
+the past a socket always contained a single package (see below), but with the
+advent of Multi Chip Modules (MCM) a socket can hold more than one package. So
+there might be still references to sockets in the code, but they are of
+historical nature and should be cleaned up.
+
+The topology of a system is described in the units of:
+
+    - packages
+    - cores
+    - threads
+
+* Package:
+
+  Packages contain a number of cores plus shared resources, e.g. DRAM
+  controller, shared caches etc.
+
+  AMD nomenclature for package is 'Node'.
+
+  Package-related topology information in the kernel:
+
+  - cpuinfo_x86.x86_max_cores:
+
+    The number of cores in a package. This information is retrieved via CPUID.
+
+  - cpuinfo_x86.phys_proc_id:
+
+    The physical ID of the package. This information is retrieved via CPUID
+    and deduced from the APIC IDs of the cores in the package.
+
+  - cpuinfo_x86.logical_id:
+
+    The logical ID of the package. As we do not trust BIOSes to enumerate the
+    packages in a consistent way, we introduced the concept of logical package
+    ID so we can sanely calculate the number of maximum possible packages in
+    the system and have the packages enumerated linearly.
+
+  - topology_max_packages():
+
+    The maximum possible number of packages in the system. Helpful for per
+    package facilities to preallocate per package information.
+
+
+* Cores:
+
+  A core consists of 1 or more threads. It does not matter whether the threads
+  are SMT- or CMT-type threads.
+
+  AMDs nomenclature for a CMT core is "Compute Unit". The kernel always uses
+  "core".
+
+  Core-related topology information in the kernel:
+
+  - smp_num_siblings:
+
+    The number of threads in a core. The number of threads in a package can be
+    calculated by:
+
+	threads_per_package = cpuinfo_x86.x86_max_cores * smp_num_siblings
+
+
+* Threads:
+
+  A thread is a single scheduling unit. It's the equivalent to a logical Linux
+  CPU.
+
+  AMDs nomenclature for CMT threads is "Compute Unit Core". The kernel always
+  uses "thread".
+
+  Thread-related topology information in the kernel:
+
+  - topology_core_cpumask():
+
+    The cpumask contains all online threads in the package to which a thread
+    belongs.
+
+    The number of online threads is also printed in /proc/cpuinfo "siblings."
+
+  - topology_sibling_mask():
+
+    The cpumask contains all online threads in the core to which a thread
+    belongs.
+
+   - topology_logical_package_id():
+
+    The logical package ID to which a thread belongs.
+
+   - topology_physical_package_id():
+
+    The physical package ID to which a thread belongs.
+
+   - topology_core_id();
+
+    The ID of the core to which a thread belongs. It is also printed in /proc/cpuinfo
+    "core_id."
+
+
+
+System topology examples
+
+Note:
+
+The alternative Linux CPU enumeration depends on how the BIOS enumerates the
+threads. Many BIOSes enumerate all threads 0 first and then all threads 1.
+That has the "advantage" that the logical Linux CPU numbers of threads 0 stay
+the same whether threads are enabled or not. That's merely an implementation
+detail and has no practical impact.
+
+1) Single Package, Single Core
+
+   [package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
+
+2) Single Package, Dual Core
+
+   a) One thread per core
+
+	[package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
+		    -> [core 1] -> [thread 0] -> Linux CPU 1
+
+   b) Two threads per core
+
+	[package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
+				-> [thread 1] -> Linux CPU 1
+		    -> [core 1] -> [thread 0] -> Linux CPU 2
+				-> [thread 1] -> Linux CPU 3
+
+      Alternative enumeration:
+
+	[package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
+				-> [thread 1] -> Linux CPU 2
+		    -> [core 1] -> [thread 0] -> Linux CPU 1
+				-> [thread 1] -> Linux CPU 3
+
+      AMD nomenclature for CMT systems:
+
+	[node 0] -> [Compute Unit 0] -> [Compute Unit Core 0] -> Linux CPU 0
+				     -> [Compute Unit Core 1] -> Linux CPU 1
+		 -> [Compute Unit 1] -> [Compute Unit Core 0] -> Linux CPU 2
+				     -> [Compute Unit Core 1] -> Linux CPU 3
+
+4) Dual Package, Dual Core
+
+   a) One thread per core
+
+	[package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
+		    -> [core 1] -> [thread 0] -> Linux CPU 1
+
+	[package 1] -> [core 0] -> [thread 0] -> Linux CPU 2
+		    -> [core 1] -> [thread 0] -> Linux CPU 3
+
+   b) Two threads per core
+
+	[package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
+				-> [thread 1] -> Linux CPU 1
+		    -> [core 1] -> [thread 0] -> Linux CPU 2
+				-> [thread 1] -> Linux CPU 3
+
+	[package 1] -> [core 0] -> [thread 0] -> Linux CPU 4
+				-> [thread 1] -> Linux CPU 5
+		    -> [core 1] -> [thread 0] -> Linux CPU 6
+				-> [thread 1] -> Linux CPU 7
+
+      Alternative enumeration:
+
+	[package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
+				-> [thread 1] -> Linux CPU 4
+		    -> [core 1] -> [thread 0] -> Linux CPU 1
+				-> [thread 1] -> Linux CPU 5
+
+	[package 1] -> [core 0] -> [thread 0] -> Linux CPU 2
+				-> [thread 1] -> Linux CPU 6
+		    -> [core 1] -> [thread 0] -> Linux CPU 3
+				-> [thread 1] -> Linux CPU 7
+
+      AMD nomenclature for CMT systems:
+
+	[node 0] -> [Compute Unit 0] -> [Compute Unit Core 0] -> Linux CPU 0
+				     -> [Compute Unit Core 1] -> Linux CPU 1
+		 -> [Compute Unit 1] -> [Compute Unit Core 0] -> Linux CPU 2
+				     -> [Compute Unit Core 1] -> Linux CPU 3
+
+	[node 1] -> [Compute Unit 0] -> [Compute Unit Core 0] -> Linux CPU 4
+				     -> [Compute Unit Core 1] -> Linux CPU 5
+		 -> [Compute Unit 1] -> [Compute Unit Core 0] -> Linux CPU 6
+				     -> [Compute Unit Core 1] -> Linux CPU 7
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
index c518dce..5aa7383 100644
--- a/Documentation/x86/x86_64/mm.txt
+++ b/Documentation/x86/x86_64/mm.txt
@@ -19,7 +19,7 @@
 ffffffef00000000 - ffffffff00000000 (=64 GB) EFI region mapping space
 ... unused hole ...
 ffffffff80000000 - ffffffffa0000000 (=512 MB)  kernel text mapping, from phys 0
-ffffffffa0000000 - ffffffffff5fffff (=1525 MB) module mapping space
+ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space
 ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
 ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
 
@@ -31,8 +31,8 @@
 the processes using the page fault handler, with init_level4_pgt as
 reference.
 
-Current X86-64 implementations only support 40 bits of address space,
-but we support up to 46 bits. This expands into MBZ space in the page tables.
+Current X86-64 implementations support up to 46 bits of address space (64 TB),
+which is our current limit. This expands into MBZ space in the page tables.
 
 We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual
 memory window (this size is arbitrary, it can be raised later if needed).
diff --git a/MAINTAINERS b/MAINTAINERS
index 03e00c7..65f3277a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -627,6 +627,7 @@
 
 AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER
 M:	Tom Lendacky <thomas.lendacky@amd.com>
+M:	Gary Hook <gary.hook@amd.com>
 L:	linux-crypto@vger.kernel.org
 S:	Supported
 F:	drivers/crypto/ccp/
@@ -872,9 +873,9 @@
 F:	include/linux/perf/arm_pmu.h
 
 ARM PORT
-M:	Russell King <linux@arm.linux.org.uk>
+M:	Russell King <linux@armlinux.org.uk>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:	http://www.arm.linux.org.uk/
+W:	http://www.armlinux.org.uk/
 S:	Maintained
 F:	arch/arm/
 
@@ -886,35 +887,35 @@
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git
 
 ARM PRIMECELL AACI PL041 DRIVER
-M:	Russell King <linux@arm.linux.org.uk>
+M:	Russell King <linux@armlinux.org.uk>
 S:	Maintained
 F:	sound/arm/aaci.*
 
 ARM PRIMECELL CLCD PL110 DRIVER
-M:	Russell King <linux@arm.linux.org.uk>
+M:	Russell King <linux@armlinux.org.uk>
 S:	Maintained
 F:	drivers/video/fbdev/amba-clcd.*
 
 ARM PRIMECELL KMI PL050 DRIVER
-M:	Russell King <linux@arm.linux.org.uk>
+M:	Russell King <linux@armlinux.org.uk>
 S:	Maintained
 F:	drivers/input/serio/ambakmi.*
 F:	include/linux/amba/kmi.h
 
 ARM PRIMECELL MMCI PL180/1 DRIVER
-M:	Russell King <linux@arm.linux.org.uk>
+M:	Russell King <linux@armlinux.org.uk>
 S:	Maintained
 F:	drivers/mmc/host/mmci.*
 F:	include/linux/amba/mmci.h
 
 ARM PRIMECELL UART PL010 AND PL011 DRIVERS
-M:	Russell King <linux@arm.linux.org.uk>
+M:	Russell King <linux@armlinux.org.uk>
 S:	Maintained
 F:	drivers/tty/serial/amba-pl01*.c
 F:	include/linux/amba/serial.h
 
 ARM PRIMECELL BUS SUPPORT
-M:	Russell King <linux@arm.linux.org.uk>
+M:	Russell King <linux@armlinux.org.uk>
 S:	Maintained
 F:	drivers/amba/
 F:	include/linux/amba/bus.h
@@ -948,12 +949,15 @@
 
 ARM/Amlogic Meson SoC support
 M:	Carlo Caione <carlo@caione.org>
+M:	Kevin Hilman <khilman@baylibre.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-L:	linux-meson@googlegroups.com
+L:	linux-amlogic@lists.infradead.org
 W:	http://linux-meson.com/
 S:	Maintained
 F:	arch/arm/mach-meson/
 F:	arch/arm/boot/dts/meson*
+F:	arch/arm64/boot/dts/amlogic/
+F: 	drivers/pinctrl/meson/
 N:	meson
 
 ARM/Annapurna Labs ALPINE ARCHITECTURE
@@ -975,6 +979,13 @@
 F:	arch/arm/boot/dts/artpec6*
 F:	drivers/clk/clk-artpec6.c
 
+ARM/ASPEED MACHINE SUPPORT
+M:	Joel Stanley <joel@jms.id.au>
+S:	Maintained
+F:	arch/arm/mach-aspeed/
+F:	arch/arm/boot/dts/aspeed-*
+F:	drivers/*/*aspeed*
+
 ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
 M:	Nicolas Ferre <nicolas.ferre@atmel.com>
 M:	Alexandre Belloni <alexandre.belloni@free-electrons.com>
@@ -1036,7 +1047,7 @@
 S:	Maintained
 
 ARM/CLKDEV SUPPORT
-M:	Russell King <linux@arm.linux.org.uk>
+M:	Russell King <linux@armlinux.org.uk>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 F:	arch/arm/include/asm/clkdev.h
@@ -1093,9 +1104,9 @@
 N:	digicolor
 
 ARM/EBSA110 MACHINE SUPPORT
-M:	Russell King <linux@arm.linux.org.uk>
+M:	Russell King <linux@armlinux.org.uk>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:	http://www.arm.linux.org.uk/
+W:	http://www.armlinux.org.uk/
 S:	Maintained
 F:	arch/arm/mach-ebsa110/
 F:	drivers/net/ethernet/amd/am79c961a.*
@@ -1124,9 +1135,9 @@
 F:	arch/arm/mm/*-fa*
 
 ARM/FOOTBRIDGE ARCHITECTURE
-M:	Russell King <linux@arm.linux.org.uk>
+M:	Russell King <linux@armlinux.org.uk>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:	http://www.arm.linux.org.uk/
+W:	http://www.armlinux.org.uk/
 S:	Maintained
 F:	arch/arm/include/asm/hardware/dec21285.h
 F:	arch/arm/mach-footbridge/
@@ -1260,7 +1271,7 @@
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 F:	arch/arm/mach-keystone/
-F:	arch/arm/boot/dts/k2*
+F:	arch/arm/boot/dts/keystone-*
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
 
 ARM/TEXAS INSTRUMENT KEYSTONE CLOCK FRAMEWORK
@@ -1288,6 +1299,12 @@
 S:	Maintained
 F:	drivers/memory/*emif*
 
+ARM/LG1K ARCHITECTURE
+M:	Chanho Min <chanho.min@lge.com>
+L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:	Maintained
+F:	arch/arm64/boot/dts/lg/
+
 ARM/LOGICPD PXA270 MACHINE SUPPORT
 M:	Lennert Buytenhek <kernel@wantstofly.org>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1306,11 +1323,25 @@
 F:	drivers/rtc/rtc-lpc24xx.c
 N:	lpc18xx
 
+ARM/LPC32XX SOC SUPPORT
+M:	Vladimir Zapolskiy <vz@mleia.com>
+M:	Sylvain Lemieux <slemieux.tyco@gmail.com>
+L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+T:	git git://github.com/vzapolskiy/linux-lpc32xx.git
+S:	Maintained
+F:	arch/arm/boot/dts/lpc32*
+F:	arch/arm/mach-lpc32xx/
+F:	drivers/i2c/busses/i2c-pnx.c
+F:	drivers/net/ethernet/nxp/lpc_eth.c
+F:	drivers/usb/host/ohci-nxp.c
+F:	drivers/watchdog/pnx4008_wdt.c
+N:	lpc32xx
+
 ARM/MAGICIAN MACHINE SUPPORT
 M:	Philipp Zabel <philipp.zabel@gmail.com>
 S:	Maintained
 
-ARM/Marvell Kirkwood and Armada 370, 375, 38x, XP SOC support
+ARM/Marvell Kirkwood and Armada 370, 375, 38x, 39x, XP, 3700, 7K/8K SOC support
 M:	Jason Cooper <jason@lakedaemon.net>
 M:	Andrew Lunn <andrew@lunn.ch>
 M:	Gregory Clement <gregory.clement@free-electrons.com>
@@ -1322,7 +1353,8 @@
 F:	arch/arm/boot/dts/armada*
 F:	arch/arm/boot/dts/kirkwood*
 F:	arch/arm64/boot/dts/marvell/armada*
-
+F:	drivers/cpufreq/mvebu-cpufreq.c
+F:	arch/arm/configs/mvebu_*_defconfig
 
 ARM/Marvell Berlin SoC support
 M:	Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
@@ -1355,6 +1387,15 @@
 S:	Maintained
 F:	arch/arm/mach-orion5x/ts78xx-*
 
+ARM/OXNAS platform support
+M:	Neil Armstrong <narmstrong@baylibre.com>
+L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:	Maintained
+F:	arch/arm/mach-oxnas/
+F:	arch/arm/boot/dts/oxnas*
+F:	arch/arm/boot/dts/wd-mbwe.dts
+N:	oxnas
+
 ARM/Mediatek RTC DRIVER
 M:	Eddie Huang <eddie.huang@mediatek.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1457,7 +1498,7 @@
 ARM/PT DIGITAL BOARD PORT
 M:	Stefan Eletzhofer <stefan.eletzhofer@eletztrick.de>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:	http://www.arm.linux.org.uk/
+W:	http://www.armlinux.org.uk/
 S:	Maintained
 
 ARM/QUALCOMM SUPPORT
@@ -1470,7 +1511,10 @@
 F:	arch/arm/boot/dts/qcom-*.dtsi
 F:	arch/arm/mach-qcom/
 F:	arch/arm64/boot/dts/qcom/*
+F:	drivers/i2c/busses/i2c-qup.c
+F:	drivers/clk/qcom/
 F:	drivers/soc/qcom/
+F:	drivers/spi/spi-qup.c
 F:	drivers/tty/serial/msm_serial.h
 F:	drivers/tty/serial/msm_serial.c
 F:	drivers/*/pm8???-*
@@ -1491,11 +1535,13 @@
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
 S:	Supported
 F:	arch/arm64/boot/dts/renesas/
+F:	drivers/soc/renesas/
+F:	include/linux/soc/renesas/
 
 ARM/RISCPC ARCHITECTURE
-M:	Russell King <linux@arm.linux.org.uk>
+M:	Russell King <linux@armlinux.org.uk>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:	http://www.arm.linux.org.uk/
+W:	http://www.armlinux.org.uk/
 S:	Maintained
 F:	arch/arm/include/asm/hardware/entry-macro-iomd.S
 F:	arch/arm/include/asm/hardware/ioc.h
@@ -1540,6 +1586,7 @@
 F:	arch/arm/mach-exynos*/
 F:	drivers/*/*s3c2410*
 F:	drivers/*/*/*s3c2410*
+F:	drivers/memory/samsung/*
 F:	drivers/soc/samsung/*
 F:	drivers/spi/spi-s3c*
 F:	sound/soc/samsung/*
@@ -1604,6 +1651,8 @@
 F:	arch/arm/include/debug/renesas-scif.S
 F:	arch/arm/mach-shmobile/
 F:	drivers/sh/
+F:	drivers/soc/renesas/
+F:	include/linux/soc/renesas/
 
 ARM/SOCFPGA ARCHITECTURE
 M:	Dinh Nguyen <dinguyen@opensource.altera.com>
@@ -1638,6 +1687,7 @@
 F:	drivers/char/hw_random/st-rng.c
 F:	drivers/clocksource/arm_global_timer.c
 F:	drivers/clocksource/clksrc_st_lpc.c
+F:	drivers/cpufreq/sti-cpufreq.c
 F:	drivers/i2c/busses/i2c-st.c
 F:	drivers/media/rc/st_rc.c
 F:	drivers/media/platform/sti/c8sectpfe/
@@ -1647,6 +1697,7 @@
 F:	drivers/phy/phy-stih407-usb.c
 F:	drivers/phy/phy-stih41x-usb.c
 F:	drivers/pinctrl/pinctrl-st.c
+F:	drivers/remoteproc/st_remoteproc.c
 F:	drivers/reset/sti/
 F:	drivers/rtc/rtc-st-lpc.c
 F:	drivers/tty/serial/st-asc.c
@@ -1771,11 +1822,12 @@
 F:	*/*/*/vexpress*
 F:	drivers/clk/versatile/clk-vexpress-osc.c
 F:	drivers/clocksource/versatile.c
+N:	mps2
 
 ARM/VFP SUPPORT
-M:	Russell King <linux@arm.linux.org.uk>
+M:	Russell King <linux@armlinux.org.uk>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:	http://www.arm.linux.org.uk/
+W:	http://www.armlinux.org.uk/
 S:	Maintained
 F:	arch/arm/vfp/
 
@@ -2203,10 +2255,13 @@
 M:	Marek Lindner <mareklindner@neomailbox.ch>
 M:	Simon Wunderlich <sw@simonwunderlich.de>
 M:	Antonio Quartulli <a@unstable.cc>
-L:	b.a.t.m.a.n@lists.open-mesh.org
+L:	b.a.t.m.a.n@lists.open-mesh.org (moderated for non-subscribers)
 W:	https://www.open-mesh.org/
 Q:	https://patchwork.open-mesh.org/project/batman/list/
 S:	Maintained
+F:	Documentation/ABI/testing/sysfs-class-net-batman-adv
+F:	Documentation/ABI/testing/sysfs-class-net-mesh
+F:	Documentation/networking/batman-adv.txt
 F:	net/batman-adv/
 
 BAYCOM/HDLCDRV DRIVERS FOR AX.25
@@ -2921,7 +2976,7 @@
 F:	include/linux/cleancache.h
 
 CLK API
-M:	Russell King <linux@arm.linux.org.uk>
+M:	Russell King <linux@armlinux.org.uk>
 L:	linux-clk@vger.kernel.org
 S:	Maintained
 F:	include/linux/clk.h
@@ -3348,15 +3403,16 @@
 
 STMMAC ETHERNET DRIVER
 M:	Giuseppe Cavallaro <peppe.cavallaro@st.com>
+M:	Alexandre Torgue <alexandre.torgue@st.com>
 L:	netdev@vger.kernel.org
 W:	http://www.stlinux.com
 S:	Supported
 F:	drivers/net/ethernet/stmicro/stmmac/
 
 CYBERPRO FB DRIVER
-M:	Russell King <linux@arm.linux.org.uk>
+M:	Russell King <linux@armlinux.org.uk>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:	http://www.arm.linux.org.uk/
+W:	http://www.armlinux.org.uk/
 S:	Maintained
 F:	drivers/video/fbdev/cyber2000fb.*
 
@@ -3539,6 +3595,15 @@
 F:	include/linux/devfreq-event.h
 F:	Documentation/devicetree/bindings/devfreq/event/
 
+BUS FREQUENCY DRIVER FOR SAMSUNG EXYNOS
+M:	Chanwoo Choi <cw00.choi@samsung.com>
+L:	linux-pm@vger.kernel.org
+L:	linux-samsung-soc@vger.kernel.org
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/mzx/devfreq.git
+S:	Maintained
+F:	drivers/devfreq/exynos-bus.c
+F:	Documentation/devicetree/bindings/devfreq/exynos-bus.txt
+
 DEVICE NUMBER REGISTRY
 M:	Torben Mathiasen <device@lanana.org>
 W:	http://lanana.org/docs/device-list/index.html
@@ -3881,7 +3946,7 @@
 
 DRM DRIVERS FOR VIVANTE GPU IP
 M:	Lucas Stach <l.stach@pengutronix.de>
-R:	Russell King <linux+etnaviv@arm.linux.org.uk>
+R:	Russell King <linux+etnaviv@armlinux.org.uk>
 R:	Christian Gmeiner <christian.gmeiner@gmail.com>
 L:	dri-devel@lists.freedesktop.org
 S:	Maintained
@@ -4223,8 +4288,8 @@
 F:	arch/ia64/kernel/efi.c
 F:	arch/x86/boot/compressed/eboot.[ch]
 F:	arch/x86/include/asm/efi.h
-F:	arch/x86/platform/efi/*
-F:	drivers/firmware/efi/*
+F:	arch/x86/platform/efi/
+F:	drivers/firmware/efi/
 F:	include/linux/efi*.h
 
 EFI VARIABLE FILESYSTEM
@@ -4302,7 +4367,7 @@
 
 ETHERNET BRIDGE
 M:	Stephen Hemminger <stephen@networkplumber.org>
-L:	bridge@lists.linux-foundation.org
+L:	bridge@lists.linux-foundation.org (moderated for non-subscribers)
 L:	netdev@vger.kernel.org
 W:	http://www.linuxfoundation.org/en/Net:Bridge
 S:	Maintained
@@ -4379,6 +4444,12 @@
 F:	drivers/video/fbdev/exynos/exynos_mipi*
 F:	include/video/exynos_mipi*
 
+EZchip NPS platform support
+M:	Noam Camus <noamc@ezchip.com>
+S:	Supported
+F:	arch/arc/plat-eznps
+F:	arch/arc/boot/dts/eznps.dts
+
 F71805F HARDWARE MONITORING DRIVER
 M:	Jean Delvare <jdelvare@suse.com>
 L:	linux-hwmon@vger.kernel.org
@@ -4661,6 +4732,7 @@
 M:	Timur Tabi <timur@tabi.org>
 M:	Nicolin Chen <nicoleotsuka@gmail.com>
 M:	Xiubo Li <Xiubo.Lee@gmail.com>
+R:	Fabio Estevam <fabio.estevam@nxp.com>
 L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
 L:	linuxppc-dev@lists.ozlabs.org
 S:	Maintained
@@ -4744,7 +4816,7 @@
 
 FUSE: FILESYSTEM IN USERSPACE
 M:	Miklos Szeredi <miklos@szeredi.hu>
-L:	fuse-devel@lists.sourceforge.net
+L:	linux-fsdevel@vger.kernel.org
 W:	http://fuse.sourceforge.net/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse.git
 S:	Maintained
@@ -4884,6 +4956,7 @@
 L:	linux-gpio@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
 S:	Maintained
+F:	Documentation/devicetree/bindings/gpio/
 F:	Documentation/gpio/
 F:	Documentation/ABI/testing/gpio-cdev
 F:	Documentation/ABI/obsolete/sysfs-gpio
@@ -4903,7 +4976,7 @@
 F:	include/net/gre.h
 
 GRETH 10/100/1G Ethernet MAC device driver
-M:	Kristoffer Glembo <kristoffer@gaisler.com>
+M:	Andreas Larsson <andreas@gaisler.com>
 L:	netdev@vger.kernel.org
 S:	Maintained
 F:	drivers/net/ethernet/aeroflex/
@@ -5042,6 +5115,7 @@
 HARDWARE SPINLOCK CORE
 M:	Ohad Ben-Cohen <ohad@wizery.com>
 M:	Bjorn Andersson <bjorn.andersson@linaro.org>
+L:	linux-remoteproc@vger.kernel.org
 S:	Maintained
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/hwspinlock.git
 F:	Documentation/hwspinlock.txt
@@ -5274,6 +5348,7 @@
 M:	Peter Rosin <peda@axentia.se>
 L:	linux-i2c@vger.kernel.org
 S:	Maintained
+F:	Documentation/i2c/i2c-topology
 F:	Documentation/i2c/muxes/
 F:	Documentation/devicetree/bindings/i2c/i2c-mux*
 F:	drivers/i2c/i2c-mux.c
@@ -5743,14 +5818,7 @@
 
 INTEL ETHERNET DRIVERS
 M:	Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-R:	Jesse Brandeburg <jesse.brandeburg@intel.com>
-R:	Shannon Nelson <shannon.nelson@intel.com>
-R:	Carolyn Wyborny <carolyn.wyborny@intel.com>
-R:	Don Skidmore <donald.c.skidmore@intel.com>
-R:	Bruce Allan <bruce.w.allan@intel.com>
-R:	John Ronciak <john.ronciak@intel.com>
-R:	Mitch Williams <mitch.a.williams@intel.com>
-L:	intel-wired-lan@lists.osuosl.org
+L:	intel-wired-lan@lists.osuosl.org (moderated for non-subscribers)
 W:	http://www.intel.com/support/feedback.htm
 W:	http://e1000.sourceforge.net/
 Q:	http://patchwork.ozlabs.org/project/intel-wired-lan/list/
@@ -6026,7 +6094,7 @@
 
 ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
 M:	Or Gerlitz <ogerlitz@mellanox.com>
-M:	Sagi Grimberg <sagig@mellanox.com>
+M:	Sagi Grimberg <sagi@grimberg.me>
 M:	Roi Dayan <roid@mellanox.com>
 L:	linux-rdma@vger.kernel.org
 S:	Supported
@@ -6036,7 +6104,7 @@
 F:	drivers/infiniband/ulp/iser/
 
 ISCSI EXTENSIONS FOR RDMA (ISER) TARGET
-M:	Sagi Grimberg <sagig@mellanox.com>
+M:	Sagi Grimberg <sagi@grimberg.me>
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
 L:	linux-rdma@vger.kernel.org
 L:	target-devel@vger.kernel.org
@@ -6251,8 +6319,8 @@
 F:	tools/testing/selftests
 
 KERNEL VIRTUAL MACHINE (KVM)
-M:	Gleb Natapov <gleb@kernel.org>
 M:	Paolo Bonzini <pbonzini@redhat.com>
+M:	Radim Krčmář <rkrcmar@redhat.com>
 L:	kvm@vger.kernel.org
 W:	http://www.linux-kvm.org
 T:	git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
@@ -6399,10 +6467,10 @@
 F:	mm/kmemleak-test.c
 
 KPROBES
-M:	Ananth N Mavinakayanahalli <ananth@in.ibm.com>
+M:	Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
 M:	Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
 M:	"David S. Miller" <davem@davemloft.net>
-M:	Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+M:	Masami Hiramatsu <mhiramat@kernel.org>
 S:	Maintained
 F:	Documentation/kprobes.txt
 F:	include/linux/kprobes.h
@@ -6425,7 +6493,7 @@
 F:	include/net/l3mdev.h
 
 LANTIQ MIPS ARCHITECTURE
-M:	John Crispin <blogic@openwrt.org>
+M:	John Crispin <john@phrozen.org>
 L:	linux-mips@linux-mips.org
 S:	Maintained
 F:	arch/mips/lantiq
@@ -6687,6 +6755,7 @@
 F:	include/linux/livepatch.h
 F:	arch/x86/include/asm/livepatch.h
 F:	arch/x86/kernel/livepatch.c
+F:	Documentation/livepatch/
 F:	Documentation/ABI/testing/sysfs-kernel-livepatch
 F:	samples/livepatch/
 L:	live-patching@vger.kernel.org
@@ -6775,12 +6844,6 @@
 S:	Maintained
 F:	fs/logfs/
 
-LPC32XX MACHINE SUPPORT
-M:	Roland Stigge <stigge@antcom.de>
-L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S:	Maintained
-F:	arch/arm/mach-lpc32xx/
-
 LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
 M:	Sathya Prakash <sathya.prakash@broadcom.com>
 M:	Chaitra P B <chaitra.basappa@broadcom.com>
@@ -6904,7 +6967,7 @@
 S:	Maintained
 
 MARVELL ARMADA DRM SUPPORT
-M:	Russell King <rmk+kernel@arm.linux.org.uk>
+M:	Russell King <rmk+kernel@armlinux.org.uk>
 S:	Maintained
 F:	drivers/gpu/drm/armada/
 
@@ -7019,9 +7082,9 @@
 M:	Krzysztof Kozlowski <k.kozlowski@samsung.com>
 L:	linux-kernel@vger.kernel.org
 S:	Supported
-F:	drivers/*/max14577.c
+F:	drivers/*/max14577*.c
 F:	drivers/*/max77686*.c
-F:	drivers/*/max77693.c
+F:	drivers/*/max77693*.c
 F:	drivers/extcon/extcon-max14577.c
 F:	drivers/extcon/extcon-max77693.c
 F:	drivers/rtc/rtc-max77686.c
@@ -7271,6 +7334,15 @@
 F:	Documentation/mips/
 F:	arch/mips/
 
+MIPS/LOONGSON1 ARCHITECTURE
+M:	Keguang Zhang <keguang.zhang@gmail.com>
+L:	linux-mips@linux-mips.org
+S:	Maintained
+F:	arch/mips/loongson32/
+F:	arch/mips/include/asm/mach-loongson32/
+F:	drivers/*/*loongson1*
+F:	drivers/*/*/*loongson1*
+
 MIROSOUND PCM20 FM RADIO RECEIVER DRIVER
 M:	Hans Verkuil <hverkuil@xs4all.nl>
 L:	linux-media@vger.kernel.org
@@ -7538,10 +7610,10 @@
 L:	linux-scsi@vger.kernel.org
 S:	Maintained
 F:	Documentation/scsi/g_NCR5380.txt
+F:	Documentation/scsi/dtc3x80.txt
 F:	drivers/scsi/NCR5380.*
 F:	drivers/scsi/arm/cumana_1.c
 F:	drivers/scsi/arm/oak.c
-F:	drivers/scsi/atari_NCR5380.c
 F:	drivers/scsi/atari_scsi.*
 F:	drivers/scsi/dmx3191d.c
 F:	drivers/scsi/dtc.*
@@ -7575,7 +7647,7 @@
 
 NETEM NETWORK EMULATOR
 M:	Stephen Hemminger <stephen@networkplumber.org>
-L:	netem@lists.linux-foundation.org
+L:	netem@lists.linux-foundation.org (moderated for non-subscribers)
 S:	Maintained
 F:	net/sched/sch_netem.c
 
@@ -7904,7 +7976,7 @@
 F:	drivers/nfc/nxp-nci
 
 NXP TDA998X DRM DRIVER
-M:	Russell King <rmk+kernel@arm.linux.org.uk>
+M:	Russell King <rmk+kernel@armlinux.org.uk>
 S:	Supported
 F:	drivers/gpu/drm/i2c/tda998x_drv.c
 F:	include/drm/i2c/tda998x.h
@@ -7977,7 +8049,7 @@
 F:	drivers/cpufreq/omap-cpufreq.c
 
 OMAP POWERDOMAIN SOC ADAPTATION LAYER SUPPORT
-M:	Rajendra Nayak <rnayak@ti.com>
+M:	Rajendra Nayak <rnayak@codeaurora.org>
 M:	Paul Walmsley <paul@pwsan.com>
 L:	linux-omap@vger.kernel.org
 S:	Maintained
@@ -8253,7 +8325,7 @@
 
 ORANGEFS FILESYSTEM
 M:	Mike Marshall <hubcap@omnibond.com>
-L:	pvfs2-developers@beowulf-underground.org
+L:	pvfs2-developers@beowulf-underground.org (subscribers-only)
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/hubcap/linux.git
 S:	Supported
 F:	fs/orangefs/
@@ -8711,6 +8783,8 @@
 
 PIN CONTROLLER - SAMSUNG
 M:	Tomasz Figa <tomasz.figa@gmail.com>
+M:	Krzysztof Kozlowski <k.kozlowski@samsung.com>
+M:	Sylwester Nawrocki <s.nawrocki@samsung.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:	linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 S:	Maintained
@@ -9139,6 +9213,13 @@
 S:	Supported
 F:	drivers/net/wireless/ath/wcn36xx/
 
+QEMU MACHINE EMULATOR AND VIRTUALIZER SUPPORT
+M:	Gabriel Somlo <somlo@cmu.edu>
+M:	"Michael S. Tsirkin" <mst@redhat.com>
+L:	qemu-devel@nongnu.org
+S:	Maintained
+F:	drivers/firmware/qemu_fw_cfg.c
+
 RADOS BLOCK DEVICE (RBD)
 M:	Ilya Dryomov <idryomov@gmail.com>
 M:	Sage Weil <sage@redhat.com>
@@ -9181,7 +9262,7 @@
 F:	drivers/video/fbdev/aty/aty128fb.c
 
 RALINK MIPS ARCHITECTURE
-M:	John Crispin <blogic@openwrt.org>
+M:	John Crispin <john@phrozen.org>
 L:	linux-mips@linux-mips.org
 S:	Maintained
 F:	arch/mips/ralink
@@ -9314,6 +9395,7 @@
 REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM
 M:	Ohad Ben-Cohen <ohad@wizery.com>
 M:	Bjorn Andersson <bjorn.andersson@linaro.org>
+L:	linux-remoteproc@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/remoteproc.git
 S:	Maintained
 F:	drivers/remoteproc/
@@ -9323,6 +9405,7 @@
 REMOTE PROCESSOR MESSAGING (RPMSG) SUBSYSTEM
 M:	Ohad Ben-Cohen <ohad@wizery.com>
 M:	Bjorn Andersson <bjorn.andersson@linaro.org>
+L:	linux-remoteproc@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/rpmsg.git
 S:	Maintained
 F:	drivers/rpmsg/
@@ -9477,7 +9560,7 @@
 RTL8XXXU WIRELESS DRIVER (rtl8xxxu)
 M:	Jes Sorensen <Jes.Sorensen@redhat.com>
 L:	linux-wireless@vger.kernel.org
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8723au-mac80211
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8xxxu-devel
 S:	Maintained
 F:	drivers/net/wireless/realtek/rtl8xxxu/
 
@@ -9959,6 +10042,12 @@
 S:	Supported
 F:	security/apparmor/
 
+LOADPIN SECURITY MODULE
+M:	Kees Cook <keescook@chromium.org>
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git lsm/loadpin
+S:	Supported
+F:	security/loadpin/
+
 YAMA SECURITY MODULE
 M:	Kees Cook <keescook@chromium.org>
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git yama/tip
@@ -10002,7 +10091,8 @@
 
 SFC NETWORK DRIVER
 M:	Solarflare linux maintainers <linux-net-drivers@solarflare.com>
-M:	Shradha Shah <sshah@solarflare.com>
+M:	Edward Cree <ecree@solarflare.com>
+M:	Bert Kenward <bkenward@solarflare.com>
 L:	netdev@vger.kernel.org
 S:	Supported
 F:	drivers/net/ethernet/sfc/
@@ -10148,8 +10238,8 @@
 TI DAVINCI MACHINE SUPPORT
 M:	Sekhar Nori <nsekhar@ti.com>
 M:	Kevin Hilman <khilman@kernel.org>
-T:	git git://gitorious.org/linux-davinci/linux-davinci.git
-Q:	http://patchwork.kernel.org/project/linux-davinci/list/
+L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git
 S:	Supported
 F:	arch/arm/mach-davinci/
 F:	drivers/i2c/busses/i2c-davinci.c
@@ -10583,6 +10673,14 @@
 S:	Maintained
 F:	drivers/staging/nvec/
 
+STAGING - OLPC SECONDARY DISPLAY CONTROLLER (DCON)
+M:	Jens Frederich <jfrederich@gmail.com>
+M:	Daniel Drake <dsd@laptop.org>
+M:	Jon Nettleton <jon.nettleton@gmail.com>
+W:	http://wiki.laptop.org/go/DCON
+S:	Maintained
+F:	drivers/staging/olpc_dcon/
+
 STAGING - REALTEK RTL8712U DRIVERS
 M:	Larry Finger <Larry.Finger@lwfinger.net>
 M:	Florian Schilhabel <florian.c.schilhabel@googlemail.com>.
@@ -10921,10 +11019,11 @@
 S:	Supported
 F:	drivers/clk/tegra/
 
-TEGRA DMA DRIVER
+TEGRA DMA DRIVERS
 M:	Laxman Dewangan <ldewangan@nvidia.com>
+M:	Jon Hunter <jonathanh@nvidia.com>
 S:	Supported
-F:	drivers/dma/tegra20-apb-dma.c
+F:	drivers/dma/tegra*
 
 TEGRA I2C DRIVER
 M:	Laxman Dewangan <ldewangan@nvidia.com>
@@ -11051,6 +11150,15 @@
 F:	drivers/clk/ti/
 F:	include/linux/clk/ti.h
 
+TI ETHERNET SWITCH DRIVER (CPSW)
+M:	Mugunthan V N <mugunthanvnm@ti.com>
+R:	Grygorii Strashko <grygorii.strashko@ti.com>
+L:	linux-omap@vger.kernel.org
+L:	netdev@vger.kernel.org
+S:	Maintained
+F:	drivers/net/ethernet/ti/cpsw*
+F:	drivers/net/ethernet/ti/davinci*
+
 TI FLASH MEDIA INTERFACE DRIVER
 M:	Alex Dubov <oakad@yahoo.com>
 S:	Maintained
@@ -11137,8 +11245,8 @@
 F:	net/tipc/
 
 TILE ARCHITECTURE
-M:	Chris Metcalf <cmetcalf@ezchip.com>
-W:	http://www.ezchip.com/scm/
+M:	Chris Metcalf <cmetcalf@mellanox.com>
+W:	http://www.mellanox.com/repository/solutions/tile-scm/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile.git
 S:	Supported
 F:	arch/tile/
@@ -11216,14 +11324,13 @@
 F:	drivers/media/i2c/tc358743*
 F:	include/media/i2c/tc358743.h
 
-TMIO MMC DRIVER
-M:	Ian Molton <ian@mnementh.co.uk>
+TMIO/SDHI MMC DRIVER
+M:	Wolfram Sang <wsa+renesas@sang-engineering.com>
 L:	linux-mmc@vger.kernel.org
-S:	Maintained
+S:	Supported
 F:	drivers/mmc/host/tmio_mmc*
 F:	drivers/mmc/host/sh_mobile_sdhi.c
-F:	include/linux/mmc/tmio.h
-F:	include/linux/mmc/sh_mobile_sdhi.h
+F:	include/linux/mfd/tmio.h
 
 TMP401 HARDWARE MONITOR DRIVER
 M:	Guenter Roeck <linux@roeck-us.net>
@@ -11255,6 +11362,14 @@
 S:	Odd Fixes
 F:	drivers/media/pci/tw68/
 
+TW686X VIDEO4LINUX DRIVER
+M:	Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
+L:	linux-media@vger.kernel.org
+T:	git git://linuxtv.org/media_tree.git
+W:	http://linuxtv.org
+S:	Maintained
+F:	drivers/media/pci/tw686x/
+
 TPM DEVICE DRIVER
 M:	Peter Huewe <peterhuewe@gmx.de>
 M:	Marcel Selhorst <tpmdd@selhorst.net>
@@ -11288,6 +11403,20 @@
 F:	kernel/trace/
 F:	tools/testing/selftests/ftrace/
 
+TRACING MMIO ACCESSES (MMIOTRACE)
+M:	Steven Rostedt <rostedt@goodmis.org>
+M:	Ingo Molnar <mingo@kernel.org>
+R:	Karol Herbst <karolherbst@gmail.com>
+R:	Pekka Paalanen <ppaalanen@gmail.com>
+S:	Maintained
+L:	linux-kernel@vger.kernel.org
+L:	nouveau@lists.freedesktop.org
+F:	kernel/trace/trace_mmiotrace.c
+F:	include/linux/mmiotrace.h
+F:	arch/x86/mm/kmmio.c
+F:	arch/x86/mm/mmio-mod.c
+F:	arch/x86/mm/testmmiotrace.c
+
 TRIVIAL PATCHES
 M:	Jiri Kosina <trivial@kernel.org>
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial.git
@@ -11967,7 +12096,9 @@
 W:	http://www.slimlogic.co.uk/?p=48
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regulator.git
 S:	Supported
+F:	Documentation/devicetree/bindings/regulator/
 F:	drivers/regulator/
+F:	include/dt-bindings/regulator/
 F:	include/linux/regulator/
 
 VRF
@@ -12202,9 +12333,9 @@
 F:	drivers/media/tuners/tuner-xc2028.*
 
 XEN HYPERVISOR INTERFACE
-M:	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
 M:	Boris Ostrovsky <boris.ostrovsky@oracle.com>
 M:	David Vrabel <david.vrabel@citrix.com>
+M:	Juergen Gross <jgross@suse.com>
 L:	xen-devel@lists.xenproject.org (moderated for non-subscribers)
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git
 S:	Supported
@@ -12216,16 +12347,16 @@
 F:	include/uapi/xen/
 
 XEN HYPERVISOR ARM
-M:	Stefano Stabellini <stefano.stabellini@eu.citrix.com>
+M:	Stefano Stabellini <sstabellini@kernel.org>
 L:	xen-devel@lists.xenproject.org (moderated for non-subscribers)
-S:	Supported
+S:	Maintained
 F:	arch/arm/xen/
 F:	arch/arm/include/asm/xen/
 
 XEN HYPERVISOR ARM64
-M:	Stefano Stabellini <stefano.stabellini@eu.citrix.com>
+M:	Stefano Stabellini <sstabellini@kernel.org>
 L:	xen-devel@lists.xenproject.org (moderated for non-subscribers)
-S:	Supported
+S:	Maintained
 F:	arch/arm64/xen/
 F:	arch/arm64/include/asm/xen/
 
diff --git a/Makefile b/Makefile
index 916b26e..0f9cb36 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 VERSION = 4
 PATCHLEVEL = 6
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
-NAME = Blurry Fish Butt
+EXTRAVERSION =
+NAME = Charred Weasel
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
@@ -1008,7 +1008,8 @@
 prepare: prepare0 prepare-objtool
 
 ifdef CONFIG_STACK_VALIDATION
-  has_libelf := $(shell echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf - &> /dev/null && echo 1 || echo 0)
+  has_libelf := $(call try-run,\
+		echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf -,1,0)
   ifeq ($(has_libelf),1)
     objtool_target := tools/objtool FORCE
   else
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 9d8a858..fe99f89 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -13,7 +13,6 @@
 	select GENERIC_IRQ_PROBE
 	select AUTO_IRQ_AFFINITY if SMP
 	select GENERIC_IRQ_SHOW
-	select ARCH_WANT_OPTIONAL_GPIOLIB
 	select ARCH_WANT_IPC_PARSE_VERSION
 	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 	select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h
index a83bbea6..0131a70 100644
--- a/arch/alpha/include/asm/rwsem.h
+++ b/arch/alpha/include/asm/rwsem.h
@@ -63,7 +63,7 @@
 	return res >= 0 ? 1 : 0;
 }
 
-static inline void __down_write(struct rw_semaphore *sem)
+static inline long ___down_write(struct rw_semaphore *sem)
 {
 	long oldcount;
 #ifndef	CONFIG_SMP
@@ -83,10 +83,24 @@
 	:"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
 	:"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
 #endif
-	if (unlikely(oldcount))
+	return oldcount;
+}
+
+static inline void __down_write(struct rw_semaphore *sem)
+{
+	if (unlikely(___down_write(sem)))
 		rwsem_down_write_failed(sem);
 }
 
+static inline int __down_write_killable(struct rw_semaphore *sem)
+{
+	if (unlikely(___down_write(sem)))
+		if (IS_ERR(rwsem_down_write_failed_killable(sem)))
+			return -EINTR;
+
+	return 0;
+}
+
 /*
  * trylock for writing -- returns 1 if successful, 0 if contention
  */
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 6cc0816..ffb93f49 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -147,7 +147,7 @@
 		long __user *, basep)
 {
 	int error;
-	struct fd arg = fdget(fd);
+	struct fd arg = fdget_pos(fd);
 	struct osf_dirent_callback buf = {
 		.ctx.actor = osf_filldir,
 		.dirent = dirent,
@@ -164,7 +164,7 @@
 	if (count != buf.count)
 		error = count - buf.count;
 
-	fdput(arg);
+	fdput_pos(arg);
 	return error;
 }
 
diff --git a/arch/alpha/kernel/pci-sysfs.c b/arch/alpha/kernel/pci-sysfs.c
index 99e8d47..92c0d46 100644
--- a/arch/alpha/kernel/pci-sysfs.c
+++ b/arch/alpha/kernel/pci-sysfs.c
@@ -77,10 +77,10 @@
 	if (i >= PCI_ROM_RESOURCE)
 		return -ENODEV;
 
-	if (!__pci_mmap_fits(pdev, i, vma, sparse))
+	if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start))
 		return -EINVAL;
 
-	if (iomem_is_exclusive(res->start))
+	if (!__pci_mmap_fits(pdev, i, vma, sparse))
 		return -EINVAL;
 
 	pcibios_resource_to_bus(pdev->bus, &bar, res);
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 208aae0..8894f7e 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -10,8 +10,9 @@
 	def_bool y
 	select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC
 	select BUILDTIME_EXTABLE_SORT
-	select COMMON_CLK
+	select CLKSRC_OF
 	select CLONE_BACKWARDS
+	select COMMON_CLK
 	select GENERIC_ATOMIC64
 	select GENERIC_CLOCKEVENTS
 	select GENERIC_FIND_FIRST_BIT
@@ -30,13 +31,16 @@
 	select HAVE_MOD_ARCH_SPECIFIC if ARC_DW2_UNWIND
 	select HAVE_OPROFILE
 	select HAVE_PERF_EVENTS
+	select HANDLE_DOMAIN_IRQ
 	select IRQ_DOMAIN
 	select MODULES_USE_ELF_RELA
 	select NO_BOOTMEM
 	select OF
 	select OF_EARLY_FLATTREE
+	select OF_RESERVED_MEM
 	select PERF_USE_VMALLOC
 	select HAVE_DEBUG_STACKOVERFLOW
+	select HAVE_GENERIC_DMA_COHERENT
 
 config MIGHT_HAVE_PCI
 	bool
@@ -56,6 +60,9 @@
 config RWSEM_GENERIC_SPINLOCK
 	def_bool y
 
+config ARCH_DISCONTIGMEM_ENABLE
+	def_bool y
+
 config ARCH_FLATMEM_ENABLE
 	def_bool y
 
@@ -90,6 +97,7 @@
 source "arch/arc/plat-tb10x/Kconfig"
 source "arch/arc/plat-axs10x/Kconfig"
 #New platform adds here
+source "arch/arc/plat-eznps/Kconfig"
 
 endmenu
 
@@ -345,6 +353,15 @@
 
 endchoice
 
+config NODES_SHIFT
+	int "Maximum NUMA Nodes (as a power of 2)"
+	default "1" if !DISCONTIGMEM
+	default "2" if DISCONTIGMEM
+	depends on NEED_MULTIPLE_NODES
+	---help---
+	  Accessing memory beyond 1GB (with or w/o PAE) requires 2 memory
+	  zones.
+
 if ISA_ARCOMPACT
 
 config ARC_COMPACT_IRQ_LEVELS
@@ -453,6 +470,7 @@
 
 config HIGHMEM
 	bool "High Memory Support"
+	select DISCONTIGMEM
 	help
 	  With ARC 2G:2G address split, only upper 2G is directly addressable by
 	  kernel. Enable this to potentially allow access to rest of 2G and PAE
@@ -475,6 +493,17 @@
 config ARC_PLAT_NEEDS_PHYS_TO_DMA
 	bool
 
+config ARC_KVADDR_SIZE
+	int "Kernel Virtaul Address Space size (MB)"
+	range 0 512
+	default "256"
+	help
+	  The kernel address space is carved out of 256MB of translated address
+	  space for catering to vmalloc, modules, pkmap, fixmap. This however may
+	  not suffice vmalloc requirements of a 4K CPU EZChip system. So allow
+	  this to be stretched to 512 MB (by extending into the reserved
+	  kernel-user gutter)
+
 config ARC_CURR_IN_REG
 	bool "Dedicate Register r25 for current_task pointer"
 	default y
@@ -593,7 +622,6 @@
 	def_bool PCI
 
 source "drivers/pci/Kconfig"
-source "drivers/pci/pcie/Kconfig"
 
 endmenu
 
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index def69e3..02fabef 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -115,6 +115,11 @@
 core-$(CONFIG_ARC_PLAT_SIM)	+= arch/arc/plat-sim/
 core-$(CONFIG_ARC_PLAT_TB10X)	+= arch/arc/plat-tb10x/
 core-$(CONFIG_ARC_PLAT_AXS10X)	+= arch/arc/plat-axs10x/
+core-$(CONFIG_ARC_PLAT_EZNPS)	+= arch/arc/plat-eznps/
+
+ifdef CONFIG_ARC_PLAT_EZNPS
+KBUILD_CPPFLAGS += -I$(srctree)/arch/arc/plat-eznps/include
+endif
 
 drivers-$(CONFIG_OPROFILE)	+= arch/arc/oprofile/
 
diff --git a/arch/arc/boot/dts/abilis_tb10x.dtsi b/arch/arc/boot/dts/abilis_tb10x.dtsi
index cfb5052..de53f5c 100644
--- a/arch/arc/boot/dts/abilis_tb10x.dtsi
+++ b/arch/arc/boot/dts/abilis_tb10x.dtsi
@@ -35,6 +35,20 @@
 		};
 	};
 
+	/* TIMER0 with interrupt for clockevent */
+	timer0 {
+		compatible = "snps,arc-timer";
+		interrupts = <3>;
+		interrupt-parent = <&intc>;
+		clocks = <&cpu_clk>;
+	};
+
+	/* TIMER1 for free running clocksource */
+	timer1 {
+		compatible = "snps,arc-timer";
+		clocks = <&cpu_clk>;
+	};
+
 	soc100 {
 		#address-cells	= <1>;
 		#size-cells	= <1>;
@@ -112,7 +126,7 @@
 			chan_allocation_order = <0>;
 			chan_priority = <1>;
 			block_size = <0x7ff>;
-			data_width = <2>;
+			data-width = <4>;
 			clocks = <&ahb_clk>;
 			clock-names = "hclk";
 		};
diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi
index 420dcfd..40bcecf 100644
--- a/arch/arc/boot/dts/axc001.dtsi
+++ b/arch/arc/boot/dts/axc001.dtsi
@@ -11,6 +11,8 @@
  * Note that this file only supports the 770D CPU
  */
 
+/include/ "skeleton.dtsi"
+
 / {
 	compatible = "snps,arc";
 	clock-frequency = <750000000>;	/* 750 MHZ */
@@ -24,7 +26,13 @@
 
 		ranges = <0x00000000 0xf0000000 0x10000000>;
 
-		cpu_intc: arc700-intc@cpu {
+		core_clk: core_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <750000000>;
+		};
+
+		core_intc: arc700-intc@cpu {
 			compatible = "snps,arc700-intc";
 			interrupt-controller;
 			#interrupt-cells = <1>;
@@ -48,7 +56,7 @@
 				reg = <0>;
 				interrupt-controller;
 				#interrupt-cells = <2>;
-				interrupt-parent = <&cpu_intc>;
+				interrupt-parent = <&core_intc>;
 				interrupts = <15>;
 			};
 		};
@@ -86,7 +94,7 @@
 		compatible = "snps,dw-apb-ictl";
 		reg = < 0xe0012000 0x200 >;
 		interrupt-controller;
-		interrupt-parent = <&cpu_intc>;
+		interrupt-parent = <&core_intc>;
 		interrupts = < 7 >;
 	};
 
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
index f90fadf..cabe0de 100644
--- a/arch/arc/boot/dts/axc003.dtsi
+++ b/arch/arc/boot/dts/axc003.dtsi
@@ -10,6 +10,8 @@
  * Device tree for AXC003 CPU card: HS38x UP configuration
  */
 
+/include/ "skeleton_hs.dtsi"
+
 / {
 	compatible = "snps,arc";
 	clock-frequency = <90000000>;
@@ -23,7 +25,13 @@
 
 		ranges = <0x00000000 0xf0000000 0x10000000>;
 
-		cpu_intc: archs-intc@cpu {
+		core_clk: core_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <90000000>;
+		};
+
+		core_intc: archs-intc@cpu {
 			compatible = "snps,archs-intc";
 			interrupt-controller;
 			#interrupt-cells = <1>;
@@ -47,7 +55,7 @@
 				reg = <0>;
 				interrupt-controller;
 				#interrupt-cells = <2>;
-				interrupt-parent = <&cpu_intc>;
+				interrupt-parent = <&core_intc>;
 				interrupts = <25>;
 			};
 		};
@@ -66,7 +74,7 @@
 		arcpct0: pct {
 			compatible = "snps,archs-pct";
 			#interrupt-cells = <1>;
-			interrupt-parent = <&cpu_intc>;
+			interrupt-parent = <&core_intc>;
 			interrupts = <20>;
 		};
 	};
@@ -89,7 +97,7 @@
 		compatible = "snps,dw-apb-ictl";
 		reg = < 0xe0012000 0x200 >;
 		interrupt-controller;
-		interrupt-parent = <&cpu_intc>;
+		interrupt-parent = <&core_intc>;
 		interrupts = < 24 >;
 	};
 
diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi
index 06a9f29..ed1674b 100644
--- a/arch/arc/boot/dts/axc003_idu.dtsi
+++ b/arch/arc/boot/dts/axc003_idu.dtsi
@@ -10,6 +10,8 @@
  * Device tree for AXC003 CPU card: HS38x2 (Dual Core) with IDU intc
  */
 
+/include/ "skeleton_hs_idu.dtsi"
+
 / {
 	compatible = "snps,arc";
 	clock-frequency = <90000000>;
@@ -23,7 +25,13 @@
 
 		ranges = <0x00000000 0xf0000000 0x10000000>;
 
-		cpu_intc: archs-intc@cpu {
+		core_clk: core_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <100000000>;
+		};
+
+		core_intc: archs-intc@cpu {
 			compatible = "snps,archs-intc";
 			interrupt-controller;
 			#interrupt-cells = <1>;
@@ -32,7 +40,7 @@
 		idu_intc: idu-interrupt-controller {
 			compatible = "snps,archs-idu-intc";
 			interrupt-controller;
-			interrupt-parent = <&cpu_intc>;
+			interrupt-parent = <&core_intc>;
 
 			/*
 			 * <hwirq  distribution>
@@ -89,7 +97,7 @@
 		arcpct0: pct {
 			compatible = "snps,archs-pct";
 			#interrupt-cells = <1>;
-			interrupt-parent = <&cpu_intc>;
+			interrupt-parent = <&core_intc>;
 			interrupts = <20>;
 		};
 	};
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
index ab5d570..68c84a2 100644
--- a/arch/arc/boot/dts/axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/axs10x_mb.dtsi
@@ -16,7 +16,20 @@
 		ranges = <0x00000000 0xe0000000 0x10000000>;
 		interrupt-parent = <&mb_intc>;
 
+		i2sclk: i2sclk@100a0 {
+			compatible = "snps,axs10x-i2s-pll-clock";
+			reg = <0x100a0 0x10>;
+			clocks = <&i2spll_clk>;
+			#clock-cells = <0>;
+		};
+
 		clocks {
+			i2spll_clk: i2spll_clk {
+				compatible = "fixed-clock";
+				clock-frequency = <27000000>;
+				#clock-cells = <0>;
+			};
+
 			i2cclk: i2cclk {
 				compatible = "fixed-clock";
 				clock-frequency = <50000000>;
@@ -47,14 +60,6 @@
 			clocks = <&apbclk>;
 			clock-names = "stmmaceth";
 			max-speed = <100>;
-			mdio0 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				compatible = "snps,dwmac-mdio";
-				phy1: ethernet-phy@1 {
-					reg = <1>;
-				};
-			};
 		};
 
 		ehci@0x40000 {
diff --git a/arch/arc/boot/dts/eznps.dts b/arch/arc/boot/dts/eznps.dts
new file mode 100644
index 0000000..b89f6c3
--- /dev/null
+++ b/arch/arc/boot/dts/eznps.dts
@@ -0,0 +1,96 @@
+/*
+ * Copyright(c) 2015 EZchip Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ */
+
+/dts-v1/;
+
+/ {
+	compatible = "ezchip,arc-nps";
+	clock-frequency = <83333333>;	/* 83.333333 MHZ */
+	#address-cells = <1>;
+	#size-cells = <1>;
+	interrupt-parent = <&intc>;
+	present-cpus = "0-1,16-17";
+	possible-cpus = "0-4095";
+
+	aliases {
+		ethernet0 = &gmac0;
+	};
+
+	chosen {
+		bootargs = "earlycon=uart8250,mmio32be,0xf7209000,115200n8 console=ttyS0,115200n8";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x80000000 0x20000000>;	/* 512M */
+	};
+
+	clocks {
+		sysclk: sysclk {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <83333333>;
+		};
+	};
+
+	soc {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		/* child and parent address space 1:1 mapped */
+		ranges;
+
+		intc: interrupt-controller {
+			compatible = "ezchip,nps400-ic";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+		};
+
+		timer0: timer_clkevt {
+			compatible = "snps,arc-timer";
+			interrupts = <3>;
+			clocks = <&sysclk>;
+		};
+
+		timer1: timer_clksrc {
+			compatible = "ezchip,nps400-timer";
+			clocks = <&sysclk>;
+			clock-names="sysclk";
+		};
+
+		uart@f7209000 {
+			compatible = "snps,dw-apb-uart";
+			device_type = "serial";
+			reg = <0xf7209000 0x100>;
+			interrupts = <6>;
+			clocks = <&sysclk>;
+			clock-names="baudclk";
+			baud = <115200>;
+			reg-shift = <2>;
+			reg-io-width = <4>;
+			native-endian;
+		};
+
+		gmac0: ethernet@f7470000 {
+			compatible = "ezchip,nps-mgt-enet";
+			reg = <0xf7470000 0x1940>;
+			interrupts = <7>;
+			/* Filled in by U-Boot */
+			mac-address = [ 00 C0 00 F0 04 03 ];
+		};
+	};
+};
diff --git a/arch/arc/boot/dts/nsim_700.dts b/arch/arc/boot/dts/nsim_700.dts
index 105a001..5d5e373 100644
--- a/arch/arc/boot/dts/nsim_700.dts
+++ b/arch/arc/boot/dts/nsim_700.dts
@@ -14,7 +14,7 @@
 	clock-frequency = <80000000>;	/* 80 MHZ */
 	#address-cells = <1>;
 	#size-cells = <1>;
-	interrupt-parent = <&intc>;
+	interrupt-parent = <&core_intc>;
 
 	chosen {
 		bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8";
@@ -32,7 +32,13 @@
 		/* child and parent address space 1:1 mapped */
 		ranges;
 
-		intc: interrupt-controller {
+		core_clk: core_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <80000000>;
+		};
+
+		core_intc: interrupt-controller {
 			compatible = "snps,arc700-intc";
 			interrupt-controller;
 			#interrupt-cells = <1>;
diff --git a/arch/arc/boot/dts/nsim_hs.dts b/arch/arc/boot/dts/nsim_hs.dts
index f46633e..bf05fe5 100644
--- a/arch/arc/boot/dts/nsim_hs.dts
+++ b/arch/arc/boot/dts/nsim_hs.dts
@@ -7,7 +7,7 @@
  */
 /dts-v1/;
 
-/include/ "skeleton.dtsi"
+/include/ "skeleton_hs.dtsi"
 
 / {
 	compatible = "snps,nsim_hs";
@@ -39,6 +39,12 @@
 			 bus addr,   parent bus addr, size */
 		ranges = <0x80000000 0x0 0x80000000 0x80000000>;
 
+		core_clk: core_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <80000000>;
+		};
+
 		core_intc: core-interrupt-controller {
 			compatible = "snps,archs-intc";
 			interrupt-controller;
diff --git a/arch/arc/boot/dts/nsim_hs_idu.dts b/arch/arc/boot/dts/nsim_hs_idu.dts
index 46ab319..99eabe1 100644
--- a/arch/arc/boot/dts/nsim_hs_idu.dts
+++ b/arch/arc/boot/dts/nsim_hs_idu.dts
@@ -7,7 +7,7 @@
  */
 /dts-v1/;
 
-/include/ "skeleton.dtsi"
+/include/ "skeleton_hs_idu.dtsi"
 
 / {
 	compatible = "snps,nsim_hs";
@@ -29,6 +29,12 @@
 		/* child and parent address space 1:1 mapped */
 		ranges;
 
+		core_clk: core_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <80000000>;
+		};
+
 		core_intc: core-interrupt-controller {
 			compatible = "snps,archs-intc";
 			interrupt-controller;
diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
index d94b4ce..b5b060a 100644
--- a/arch/arc/boot/dts/nsimosci.dts
+++ b/arch/arc/boot/dts/nsimosci.dts
@@ -14,7 +14,7 @@
 	clock-frequency = <20000000>;	/* 20 MHZ */
 	#address-cells = <1>;
 	#size-cells = <1>;
-	interrupt-parent = <&intc>;
+	interrupt-parent = <&core_intc>;
 
 	chosen {
 		/* this is for console on PGU */
@@ -35,7 +35,13 @@
 		/* child and parent address space 1:1 mapped */
 		ranges;
 
-		intc: interrupt-controller {
+		core_clk: core_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <20000000>;
+		};
+
+		core_intc: interrupt-controller {
 			compatible = "snps,arc700-intc";
 			interrupt-controller;
 			#interrupt-cells = <1>;
diff --git a/arch/arc/boot/dts/nsimosci_hs.dts b/arch/arc/boot/dts/nsimosci_hs.dts
index 034a313..325e730 100644
--- a/arch/arc/boot/dts/nsimosci_hs.dts
+++ b/arch/arc/boot/dts/nsimosci_hs.dts
@@ -7,7 +7,7 @@
  */
 /dts-v1/;
 
-/include/ "skeleton.dtsi"
+/include/ "skeleton_hs.dtsi"
 
 / {
 	compatible = "snps,nsimosci_hs";
@@ -35,6 +35,12 @@
 		/* child and parent address space 1:1 mapped */
 		ranges;
 
+		core_clk: core_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <20000000>;
+		};
+
 		core_intc: core-interrupt-controller {
 			compatible = "snps,archs-intc";
 			interrupt-controller;
diff --git a/arch/arc/boot/dts/nsimosci_hs_idu.dts b/arch/arc/boot/dts/nsimosci_hs_idu.dts
index 8a1297e..ee03d71 100644
--- a/arch/arc/boot/dts/nsimosci_hs_idu.dts
+++ b/arch/arc/boot/dts/nsimosci_hs_idu.dts
@@ -7,7 +7,7 @@
  */
 /dts-v1/;
 
-/include/ "skeleton.dtsi"
+/include/ "skeleton_hs_idu.dtsi"
 
 / {
 	compatible = "snps,nsimosci_hs";
@@ -33,6 +33,12 @@
 		/* child and parent address space 1:1 mapped */
 		ranges;
 
+		core_clk: core_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <5000000>;
+		};
+
 		core_intc: core-interrupt-controller {
 			compatible = "snps,archs-intc";
 			interrupt-controller;
diff --git a/arch/arc/boot/dts/skeleton.dtsi b/arch/arc/boot/dts/skeleton.dtsi
index 296d371..3a10cc6 100644
--- a/arch/arc/boot/dts/skeleton.dtsi
+++ b/arch/arc/boot/dts/skeleton.dtsi
@@ -30,6 +30,20 @@
 		};
 	};
 
+	/* TIMER0 with interrupt for clockevent */
+	timer0 {
+		compatible = "snps,arc-timer";
+		interrupts = <3>;
+		interrupt-parent = <&core_intc>;
+		clocks = <&core_clk>;
+	};
+
+	/* TIMER1 for free running clocksource */
+	timer1 {
+		compatible = "snps,arc-timer";
+		clocks = <&core_clk>;
+	};
+
 	memory {
 		device_type = "memory";
 		reg = <0x80000000 0x10000000>;	/* 256M */
diff --git a/arch/arc/boot/dts/skeleton_hs.dtsi b/arch/arc/boot/dts/skeleton_hs.dtsi
new file mode 100644
index 0000000..71fd308
--- /dev/null
+++ b/arch/arc/boot/dts/skeleton_hs.dtsi
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/ {
+	compatible = "snps,arc";
+	clock-frequency = <80000000>;	/* 80 MHZ */
+	#address-cells = <1>;
+	#size-cells = <1>;
+	chosen { };
+	aliases { };
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu@0 {
+			device_type = "cpu";
+			compatible = "snps,archs38";
+			reg = <0>;
+		};
+	};
+
+	/* TIMER0 with interrupt for clockevent */
+	timer0 {
+		compatible = "snps,arc-timer";
+		interrupts = <16>;
+		interrupt-parent = <&core_intc>;
+		clocks = <&core_clk>;
+	};
+
+	/* 64-bit Local RTC: preferred clocksource for UP */
+	rtc {
+		compatible = "snps,archs-timer-rtc";
+		clocks = <&core_clk>;
+	};
+
+	/* TIMER1 for free running clocksource: Fallback if rtc not found */
+	timer1 {
+		compatible = "snps,arc-timer";
+		clocks = <&core_clk>;
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x80000000 0x10000000>;	/* 256M */
+	};
+};
diff --git a/arch/arc/boot/dts/skeleton_hs_idu.dtsi b/arch/arc/boot/dts/skeleton_hs_idu.dtsi
new file mode 100644
index 0000000..d1cb25a
--- /dev/null
+++ b/arch/arc/boot/dts/skeleton_hs_idu.dtsi
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/ {
+	compatible = "snps,arc";
+	clock-frequency = <80000000>;	/* 80 MHZ */
+	#address-cells = <1>;
+	#size-cells = <1>;
+	chosen { };
+	aliases { };
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu@0 {
+			device_type = "cpu";
+			compatible = "snps,archs38xN";
+			reg = <0>;
+		};
+	};
+
+	/* TIMER0 with interrupt for clockevent */
+	timer0 {
+		compatible = "snps,arc-timer";
+		interrupts = <16>;
+		interrupt-parent = <&core_intc>;
+		clocks = <&core_clk>;
+	};
+
+	/* 64-bit Global Free Running Counter */
+	gfrc {
+		compatible = "snps,archs-timer-gfrc";
+		clocks = <&core_clk>;
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x80000000 0x10000000>;	/* 256M */
+	};
+};
diff --git a/arch/arc/boot/dts/vdk_axc003.dtsi b/arch/arc/boot/dts/vdk_axc003.dtsi
index 84226bd..ad4ee43 100644
--- a/arch/arc/boot/dts/vdk_axc003.dtsi
+++ b/arch/arc/boot/dts/vdk_axc003.dtsi
@@ -10,6 +10,8 @@
  * Device tree for AXC003 CPU card: HS38x UP configuration (VDK version)
  */
 
+/include/ "skeleton_hs.dtsi"
+
 / {
 	compatible = "snps,arc";
 	clock-frequency = <50000000>;
@@ -23,7 +25,13 @@
 
 		ranges = <0x00000000 0xf0000000 0x10000000>;
 
-		cpu_intc: archs-intc@cpu {
+		core_clk: core_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <50000000>;
+		};
+
+		core_intc: archs-intc@cpu {
 			compatible = "snps,archs-intc";
 			interrupt-controller;
 			#interrupt-cells = <1>;
@@ -33,7 +41,7 @@
 			compatible = "snps,dw-apb-uart";
 			reg = <0x5000 0x100>;
 			clock-frequency = <2403200>;
-			interrupt-parent = <&cpu_intc>;
+			interrupt-parent = <&core_intc>;
 			interrupts = <19>;
 			baud = <115200>;
 			reg-shift = <2>;
@@ -47,7 +55,7 @@
 		compatible = "snps,dw-apb-ictl";
 		reg = < 0xe0012000 0x200 >;
 		interrupt-controller;
-		interrupt-parent = <&cpu_intc>;
+		interrupt-parent = <&core_intc>;
 		interrupts = < 18 >;
 	};
 
diff --git a/arch/arc/boot/dts/vdk_axc003_idu.dtsi b/arch/arc/boot/dts/vdk_axc003_idu.dtsi
index 31f0fb5..a3cb626 100644
--- a/arch/arc/boot/dts/vdk_axc003_idu.dtsi
+++ b/arch/arc/boot/dts/vdk_axc003_idu.dtsi
@@ -11,6 +11,8 @@
  * HS38x2 (Dual Core) with IDU intc (VDK version)
  */
 
+/include/ "skeleton_hs_idu.dtsi"
+
 / {
 	compatible = "snps,arc";
 	clock-frequency = <50000000>;
@@ -24,7 +26,13 @@
 
 		ranges = <0x00000000 0xf0000000 0x10000000>;
 
-		cpu_intc: archs-intc@cpu {
+		core_clk: core_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <50000000>;
+		};
+
+		core_intc: archs-intc@cpu {
 			compatible = "snps,archs-intc";
 			interrupt-controller;
 			#interrupt-cells = <1>;
@@ -33,7 +41,7 @@
 		idu_intc: idu-interrupt-controller {
 			compatible = "snps,archs-idu-intc";
 			interrupt-controller;
-			interrupt-parent = <&cpu_intc>;
+			interrupt-parent = <&core_intc>;
 
 			/*
 			 * <hwirq  distribution>
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index f8b396c..491b3b5 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -42,6 +42,7 @@
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_BLK_DEV_LOOP=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_NETDEVICES=y
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index 56128ea..b25ee73 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -43,6 +43,7 @@
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 # CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_BLK_DEV_LOOP=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_NETDEVICES=y
diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig
new file mode 100644
index 0000000..ede625c
--- /dev/null
+++ b/arch/arc/configs/nps_defconfig
@@ -0,0 +1,84 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_SYSCTL_SYSCALL=y
+# CONFIG_EPOLL is not set
+# CONFIG_SIGNALFD is not set
+# CONFIG_TIMERFD is not set
+# CONFIG_EVENTFD is not set
+# CONFIG_AIO is not set
+CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_KPROBES=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARC_PLAT_EZNPS=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=4096
+CONFIG_ARC_CACHE_LINE_SHIFT=5
+# CONFIG_ARC_CACHE_PAGES is not set
+# CONFIG_ARC_HAS_LLSC is not set
+CONFIG_ARC_KVADDR_SIZE=402
+CONFIG_ARC_EMUL_UNALIGNED=y
+CONFIG_ARC_UBOOT_SUPPORT=y
+CONFIG_PREEMPT=y
+CONFIG_NET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=1
+CONFIG_BLK_DEV_RAM_SIZE=2048
+CONFIG_NETDEVICES=y
+CONFIG_NETCONSOLE=y
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_ENABLE_DEFAULT_TRACERS=y
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 7730d30..5f3dcbb 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -17,6 +17,8 @@
 #include <asm/barrier.h>
 #include <asm/smp.h>
 
+#ifndef CONFIG_ARC_PLAT_EZNPS
+
 #define atomic_read(v)  READ_ONCE((v)->counter)
 
 #ifdef CONFIG_ARC_HAS_LLSC
@@ -180,13 +182,88 @@
 ATOMIC_OP(or, |=, or)
 ATOMIC_OP(xor, ^=, xor)
 
-#undef ATOMIC_OPS
-#undef ATOMIC_OP_RETURN
-#undef ATOMIC_OP
 #undef SCOND_FAIL_RETRY_VAR_DEF
 #undef SCOND_FAIL_RETRY_ASM
 #undef SCOND_FAIL_RETRY_VARS
 
+#else /* CONFIG_ARC_PLAT_EZNPS */
+
+static inline int atomic_read(const atomic_t *v)
+{
+	int temp;
+
+	__asm__ __volatile__(
+	"	ld.di %0, [%1]"
+	: "=r"(temp)
+	: "r"(&v->counter)
+	: "memory");
+	return temp;
+}
+
+static inline void atomic_set(atomic_t *v, int i)
+{
+	__asm__ __volatile__(
+	"	st.di %0,[%1]"
+	:
+	: "r"(i), "r"(&v->counter)
+	: "memory");
+}
+
+#define ATOMIC_OP(op, c_op, asm_op)					\
+static inline void atomic_##op(int i, atomic_t *v)			\
+{									\
+	__asm__ __volatile__(						\
+	"	mov r2, %0\n"						\
+	"	mov r3, %1\n"						\
+	"       .word %2\n"						\
+	:								\
+	: "r"(i), "r"(&v->counter), "i"(asm_op)				\
+	: "r2", "r3", "memory");					\
+}									\
+
+#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
+static inline int atomic_##op##_return(int i, atomic_t *v)		\
+{									\
+	unsigned int temp = i;						\
+									\
+	/* Explicit full memory barrier needed before/after */		\
+	smp_mb();							\
+									\
+	__asm__ __volatile__(						\
+	"	mov r2, %0\n"						\
+	"	mov r3, %1\n"						\
+	"       .word %2\n"						\
+	"	mov %0, r2"						\
+	: "+r"(temp)							\
+	: "r"(&v->counter), "i"(asm_op)					\
+	: "r2", "r3", "memory");					\
+									\
+	smp_mb();							\
+									\
+	temp c_op i;							\
+									\
+	return temp;							\
+}
+
+#define ATOMIC_OPS(op, c_op, asm_op)					\
+	ATOMIC_OP(op, c_op, asm_op)					\
+	ATOMIC_OP_RETURN(op, c_op, asm_op)
+
+ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
+#define atomic_sub(i, v) atomic_add(-(i), (v))
+#define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
+
+ATOMIC_OP(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
+#define atomic_andnot(mask, v) atomic_and(~(mask), (v))
+ATOMIC_OP(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
+ATOMIC_OP(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
+
+#endif /* CONFIG_ARC_PLAT_EZNPS */
+
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
 /**
  * __atomic_add_unless - add unless the number is a given value
  * @v: pointer of type atomic_t
diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h
index a720998..b1e3274 100644
--- a/arch/arc/include/asm/barrier.h
+++ b/arch/arc/include/asm/barrier.h
@@ -30,9 +30,7 @@
 #define rmb()	asm volatile("dmb 1\n" : : : "memory")
 #define wmb()	asm volatile("dmb 2\n" : : : "memory")
 
-#endif
-
-#ifdef CONFIG_ISA_ARCOMPACT
+#elif !defined(CONFIG_ARC_PLAT_EZNPS)  /* CONFIG_ISA_ARCOMPACT */
 
 /*
  * ARCompact based cores (ARC700) only have SYNC instruction which is super
@@ -41,6 +39,14 @@
  */
 
 #define mb()	asm volatile("sync\n" : : : "memory")
+
+#else	/* CONFIG_ARC_PLAT_EZNPS */
+
+#include <plat/ctop.h>
+
+#define mb()	asm volatile (".word %0" : : "i"(CTOP_INST_SCHD_RW) : "memory")
+#define rmb()	asm volatile (".word %0" : : "i"(CTOP_INST_SCHD_RD) : "memory")
+
 #endif
 
 #include <asm-generic/barrier.h>
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index 0352fb8..8da87fe 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -22,7 +22,7 @@
 #include <asm/smp.h>
 #endif
 
-#if defined(CONFIG_ARC_HAS_LLSC)
+#ifdef CONFIG_ARC_HAS_LLSC
 
 /*
  * Hardware assisted Atomic-R-M-W
@@ -88,7 +88,7 @@
 	return (old & (1 << nr)) != 0;					\
 }
 
-#else	/* !CONFIG_ARC_HAS_LLSC */
+#elif !defined(CONFIG_ARC_PLAT_EZNPS)
 
 /*
  * Non hardware assisted Atomic-R-M-W
@@ -139,7 +139,55 @@
 	return (old & (1UL << (nr & 0x1f))) != 0;			\
 }
 
-#endif /* CONFIG_ARC_HAS_LLSC */
+#else /* CONFIG_ARC_PLAT_EZNPS */
+
+#define BIT_OP(op, c_op, asm_op)					\
+static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
+{									\
+	m += nr >> 5;							\
+									\
+	nr = (1UL << (nr & 0x1f));					\
+	if (asm_op == CTOP_INST_AAND_DI_R2_R2_R3)			\
+		nr = ~nr;						\
+									\
+	__asm__ __volatile__(						\
+	"	mov r2, %0\n"						\
+	"	mov r3, %1\n"						\
+	"	.word %2\n"						\
+	:								\
+	: "r"(nr), "r"(m), "i"(asm_op)					\
+	: "r2", "r3", "memory");					\
+}
+
+#define TEST_N_BIT_OP(op, c_op, asm_op)					\
+static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
+{									\
+	unsigned long old;						\
+									\
+	m += nr >> 5;							\
+									\
+	nr = old = (1UL << (nr & 0x1f));				\
+	if (asm_op == CTOP_INST_AAND_DI_R2_R2_R3)			\
+		old = ~old;						\
+									\
+	/* Explicit full memory barrier needed before/after */		\
+	smp_mb();							\
+									\
+	__asm__ __volatile__(						\
+	"	mov r2, %0\n"						\
+	"	mov r3, %1\n"						\
+	"       .word %2\n"						\
+	"	mov %0, r2"						\
+	: "+r"(old)							\
+	: "r"(m), "i"(asm_op)						\
+	: "r2", "r3", "memory");					\
+									\
+	smp_mb();							\
+									\
+	return (old & nr) != 0;					\
+}
+
+#endif /* CONFIG_ARC_PLAT_EZNPS */
 
 /***************************************
  * Non atomic variants
@@ -181,9 +229,15 @@
 	/* __test_and_set_bit(), __test_and_clear_bit(), __test_and_change_bit() */\
 	__TEST_N_BIT_OP(op, c_op, asm_op)
 
+#ifndef CONFIG_ARC_PLAT_EZNPS
 BIT_OPS(set, |, bset)
 BIT_OPS(clear, & ~, bclr)
 BIT_OPS(change, ^, bxor)
+#else
+BIT_OPS(set, |, CTOP_INST_AOR_DI_R2_R2_R3)
+BIT_OPS(clear, & ~, CTOP_INST_AAND_DI_R2_R2_R3)
+BIT_OPS(change, ^, CTOP_INST_AXOR_DI_R2_R2_R3)
+#endif
 
 /*
  * This routine doesn't need to be atomic.
diff --git a/arch/arc/include/asm/clk.h b/arch/arc/include/asm/clk.h
deleted file mode 100644
index bf9d29f..0000000
--- a/arch/arc/include/asm/clk.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_ARC_CLK_H
-#define _ASM_ARC_CLK_H
-
-/* Although we can't really hide core_freq, the accessor is still better way */
-extern unsigned long core_freq;
-
-static inline unsigned long arc_get_core_freq(void)
-{
-	return core_freq;
-}
-
-extern int arc_set_core_freq(unsigned long);
-
-#endif
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
index a444be6..d819de1 100644
--- a/arch/arc/include/asm/cmpxchg.h
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -44,7 +44,7 @@
 	return prev;
 }
 
-#else
+#elif !defined(CONFIG_ARC_PLAT_EZNPS)
 
 static inline unsigned long
 __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
@@ -64,23 +64,48 @@
 	return prev;
 }
 
+#else /* CONFIG_ARC_PLAT_EZNPS */
+
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
+{
+	/*
+	 * Explicit full memory barrier needed before/after
+	 */
+	smp_mb();
+
+	write_aux_reg(CTOP_AUX_GPA1, expected);
+
+	__asm__ __volatile__(
+	"	mov r2, %0\n"
+	"	mov r3, %1\n"
+	"	.word %2\n"
+	"	mov %0, r2"
+	: "+r"(new)
+	: "r"(ptr), "i"(CTOP_INST_EXC_DI_R2_R2_R3)
+	: "r2", "r3", "memory");
+
+	smp_mb();
+
+	return new;
+}
+
 #endif /* CONFIG_ARC_HAS_LLSC */
 
 #define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
 				(unsigned long)(o), (unsigned long)(n)))
 
 /*
- * Since not supported natively, ARC cmpxchg() uses atomic_ops_lock (UP/SMP)
- * just to gaurantee semantics.
- * atomic_cmpxchg() needs to use the same locks as it's other atomic siblings
- * which also happens to be atomic_ops_lock.
- *
- * Thus despite semantically being different, implementation of atomic_cmpxchg()
- * is same as cmpxchg().
+ * atomic_cmpxchg is same as cmpxchg
+ *   LLSC: only different in data-type, semantics are exactly same
+ *  !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee
+ *         semantics, and this lock also happens to be used by atomic_*()
  */
 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
 
 
+#ifndef CONFIG_ARC_PLAT_EZNPS
+
 /*
  * xchg (reg with memory) based on "Native atomic" EX insn
  */
@@ -143,6 +168,41 @@
 
 #endif
 
+#else /* CONFIG_ARC_PLAT_EZNPS */
+
+static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
+				   int size)
+{
+	extern unsigned long __xchg_bad_pointer(void);
+
+	switch (size) {
+	case 4:
+		/*
+		 * Explicit full memory barrier needed before/after
+		 */
+		smp_mb();
+
+		__asm__ __volatile__(
+		"	mov r2, %0\n"
+		"	mov r3, %1\n"
+		"	.word %2\n"
+		"	mov %0, r2\n"
+		: "+r"(val)
+		: "r"(ptr), "i"(CTOP_INST_XEX_DI_R2_R2_R3)
+		: "r2", "r3", "memory");
+
+		smp_mb();
+
+		return val;
+	}
+	return __xchg_bad_pointer();
+}
+
+#define xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
+						 sizeof(*(ptr))))
+
+#endif /* CONFIG_ARC_PLAT_EZNPS */
+
 /*
  * "atomic" variant of xchg()
  * REQ: It needs to follow the same serialization rules as other atomic_xxx()
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h
index 1d8f57c..e0e1faf 100644
--- a/arch/arc/include/asm/entry-compact.h
+++ b/arch/arc/include/asm/entry-compact.h
@@ -36,6 +36,10 @@
 #include <asm/irqflags-compact.h>
 #include <asm/thread_info.h>	/* For THREAD_SIZE */
 
+#ifdef CONFIG_ARC_PLAT_EZNPS
+#include <plat/ctop.h>
+#endif
+
 /*--------------------------------------------------------------
  * Switch to Kernel Mode stack if SP points to User Mode stack
  *
@@ -296,11 +300,13 @@
 	bic \reg, sp, (THREAD_SIZE - 1)
 .endm
 
+#ifndef CONFIG_ARC_PLAT_EZNPS
 /* Get CPU-ID of this core */
 .macro  GET_CPU_ID  reg
 	lr  \reg, [identity]
 	lsr \reg, \reg, 8
 	bmsk \reg, \reg, 7
 .endm
+#endif
 
 #endif  /* __ASM_ARC_ENTRY_COMPACT_H */
diff --git a/arch/arc/include/asm/fb.h b/arch/arc/include/asm/fb.h
new file mode 100644
index 0000000..bd3f68c
--- /dev/null
+++ b/arch/arc/include/asm/fb.h
@@ -0,0 +1,19 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+				unsigned long off)
+{
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+	return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
index 17f85c9..c22b181 100644
--- a/arch/arc/include/asm/io.h
+++ b/arch/arc/include/asm/io.h
@@ -13,6 +13,15 @@
 #include <asm/byteorder.h>
 #include <asm/page.h>
 
+#ifdef CONFIG_ISA_ARCV2
+#include <asm/barrier.h>
+#define __iormb()		rmb()
+#define __iowmb()		wmb()
+#else
+#define __iormb()		do { } while (0)
+#define __iowmb()		do { } while (0)
+#endif
+
 extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
 extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
 				  unsigned long flags);
@@ -31,6 +40,15 @@
 #define ioremap_wc(phy, sz)		ioremap(phy, sz)
 #define ioremap_wt(phy, sz)		ioremap(phy, sz)
 
+/*
+ * io{read,write}{16,32}be() macros
+ */
+#define ioread16be(p)		({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
+#define ioread32be(p)		({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
+
+#define iowrite16be(v,p)	({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
+#define iowrite32be(v,p)	({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
+
 /* Change struct page to physical address */
 #define page_to_phys(page)		(page_to_pfn(page) << PAGE_SHIFT)
 
@@ -108,15 +126,6 @@
 
 }
 
-#ifdef CONFIG_ISA_ARCV2
-#include <asm/barrier.h>
-#define __iormb()		rmb()
-#define __iowmb()		wmb()
-#else
-#define __iormb()		do { } while (0)
-#define __iowmb()		do { } while (0)
-#endif
-
 /*
  * MMIO can also get buffered/optimized in micro-arch, so barriers needed
  * Based on ARM model for the typical use case
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
index 49014f0..c0fa0d2 100644
--- a/arch/arc/include/asm/irq.h
+++ b/arch/arc/include/asm/irq.h
@@ -13,21 +13,14 @@
 #define NR_IRQS		128 /* allow some CPU external IRQ handling */
 
 /* Platform Independent IRQs */
-#ifdef CONFIG_ISA_ARCOMPACT
-#define TIMER0_IRQ      3
-#define TIMER1_IRQ      4
-#else
-#define TIMER0_IRQ      16
-#define TIMER1_IRQ      17
+#ifdef CONFIG_ISA_ARCV2
+#define IPI_IRQ		19
+#define SOFTIRQ_IRQ	21
 #endif
 
 #include <linux/interrupt.h>
 #include <asm-generic/irq.h>
 
 extern void arc_init_IRQ(void);
-void arc_local_timer_setup(void);
-void arc_request_percpu_irq(int irq, int cpu,
-                            irqreturn_t (*isr)(int irq, void *dev),
-                            const char *irq_nm, void *percpu_dev);
 
 #endif
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
index 37c2f75..d1ec7f6 100644
--- a/arch/arc/include/asm/irqflags-arcv2.h
+++ b/arch/arc/include/asm/irqflags-arcv2.h
@@ -18,6 +18,12 @@
 #define STATUS_AD_MASK		(1<<STATUS_AD_BIT)
 #define STATUS_IE_MASK		(1<<STATUS_IE_BIT)
 
+/* status32 Bits as encoded/expected by CLRI/SETI */
+#define CLRI_STATUS_IE_BIT	4
+
+#define CLRI_STATUS_E_MASK	0xF
+#define CLRI_STATUS_IE_MASK	(1 << CLRI_STATUS_IE_BIT)
+
 #define AUX_USER_SP		0x00D
 #define AUX_IRQ_CTRL		0x00E
 #define AUX_IRQ_ACT		0x043	/* Active Intr across all levels */
@@ -100,6 +106,13 @@
 	:
 	: "memory");
 
+	/* To be compatible with irq_save()/irq_restore()
+	 * encode the irq bits as expected by CLRI/SETI
+	 * (this was needed to make CONFIG_TRACE_IRQFLAGS work)
+	 */
+	temp = (1 << 5) |
+		((!!(temp & STATUS_IE_MASK)) << CLRI_STATUS_IE_BIT) |
+		(temp & CLRI_STATUS_E_MASK);
 	return temp;
 }
 
@@ -108,7 +121,7 @@
  */
 static inline int arch_irqs_disabled_flags(unsigned long flags)
 {
-	return !(flags & (STATUS_IE_MASK));
+	return !(flags & CLRI_STATUS_IE_MASK);
 }
 
 static inline int arch_irqs_disabled(void)
@@ -128,11 +141,32 @@
 
 #else
 
+#ifdef CONFIG_TRACE_IRQFLAGS
+
+.macro TRACE_ASM_IRQ_DISABLE
+	bl	trace_hardirqs_off
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+	bl	trace_hardirqs_on
+.endm
+
+#else
+
+.macro TRACE_ASM_IRQ_DISABLE
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+.endm
+
+#endif
 .macro IRQ_DISABLE  scratch
 	clri
+	TRACE_ASM_IRQ_DISABLE
 .endm
 
 .macro IRQ_ENABLE  scratch
+	TRACE_ASM_IRQ_ENABLE
 	seti
 .endm
 
diff --git a/arch/arc/include/asm/mmzone.h b/arch/arc/include/asm/mmzone.h
new file mode 100644
index 0000000..8e97136
--- /dev/null
+++ b/arch/arc/include/asm/mmzone.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_MMZONE_H
+#define _ASM_ARC_MMZONE_H
+
+#ifdef CONFIG_DISCONTIGMEM
+
+extern struct pglist_data node_data[];
+#define NODE_DATA(nid) (&node_data[nid])
+
+static inline int pfn_to_nid(unsigned long pfn)
+{
+	int is_end_low = 1;
+
+	if (IS_ENABLED(CONFIG_ARC_HAS_PAE40))
+		is_end_low = pfn <= virt_to_pfn(0xFFFFFFFFUL);
+
+	/*
+	 * node 0: lowmem:             0x8000_0000   to 0xFFFF_FFFF
+	 * node 1: HIGHMEM w/o  PAE40: 0x0           to 0x7FFF_FFFF
+	 *         HIGHMEM with PAE40: 0x1_0000_0000 to ...
+	 */
+	if (pfn >= ARCH_PFN_OFFSET && is_end_low)
+		return 0;
+
+	return 1;
+}
+
+static inline int pfn_valid(unsigned long pfn)
+{
+	int nid = pfn_to_nid(pfn);
+
+	return (pfn <= node_end_pfn(nid));
+}
+#endif /* CONFIG_DISCONTIGMEM  */
+
+#endif
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index 36da89e..296c342 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -31,7 +31,11 @@
  * These are used to make use of C type-checking..
  */
 typedef struct {
+#ifdef CONFIG_ARC_HAS_PAE40
+	unsigned long long pte;
+#else
 	unsigned long pte;
+#endif
 } pte_t;
 typedef struct {
 	unsigned long pgd;
@@ -72,11 +76,20 @@
 
 typedef pte_t * pgtable_t;
 
+/*
+ * Use virt_to_pfn with caution:
+ * If used in pte or paddr related macros, it could cause truncation
+ * in PAE40 builds
+ * As a rule of thumb, only use it in helpers starting with virt_
+ * You have been warned !
+ */
 #define virt_to_pfn(kaddr)	(__pa(kaddr) >> PAGE_SHIFT)
 
 #define ARCH_PFN_OFFSET		virt_to_pfn(CONFIG_LINUX_LINK_BASE)
 
+#ifdef CONFIG_FLATMEM
 #define pfn_valid(pfn)		(((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
+#endif
 
 /*
  * __pa, __va, virt_to_page (ALERT: deprecated, don't use them)
@@ -85,12 +98,10 @@
  * virt here means link-address/program-address as embedded in object code.
  * And for ARC, link-addr = physical address
  */
-#define __pa(vaddr)  ((unsigned long)vaddr)
+#define __pa(vaddr)  ((unsigned long)(vaddr))
 #define __va(paddr)  ((void *)((unsigned long)(paddr)))
 
-#define virt_to_page(kaddr)	\
-	(mem_map + virt_to_pfn((kaddr) - CONFIG_LINUX_LINK_BASE))
-
+#define virt_to_page(kaddr)	pfn_to_page(virt_to_pfn(kaddr))
 #define virt_addr_valid(kaddr)  pfn_valid(virt_to_pfn(kaddr))
 
 /* Default Permissions for stack/heaps pages (Non Executable) */
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 7d6c93e..034bbdc 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -217,7 +217,7 @@
 #define BITS_FOR_PTE	(PGDIR_SHIFT - PAGE_SHIFT)
 #define BITS_FOR_PGD	(32 - PGDIR_SHIFT)
 
-#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)	/* vaddr span, not PDG sz */
+#define PGDIR_SIZE	_BITUL(PGDIR_SHIFT)	/* vaddr span, not PDG sz */
 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
 
 #define	PTRS_PER_PTE	_BITUL(BITS_FOR_PTE)
@@ -278,14 +278,13 @@
 #define pmd_present(x)			(pmd_val(x))
 #define pmd_clear(xp)			do { pmd_val(*(xp)) = 0; } while (0)
 
-#define pte_page(pte)	\
-	(mem_map + virt_to_pfn(pte_val(pte) - CONFIG_LINUX_LINK_BASE))
-
+#define pte_page(pte)		pfn_to_page(pte_pfn(pte))
 #define mk_pte(page, prot)	pfn_pte(page_to_pfn(page), prot)
-#define pte_pfn(pte)		virt_to_pfn(pte_val(pte))
-#define pfn_pte(pfn, prot)	(__pte(((pte_t)(pfn) << PAGE_SHIFT) | \
-				 pgprot_val(prot)))
-#define __pte_index(addr)	(virt_to_pfn(addr) & (PTRS_PER_PTE - 1))
+#define pfn_pte(pfn, prot)	(__pte(((pte_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+
+/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
+#define pte_pfn(pte)		(pte_val(pte) >> PAGE_SHIFT)
+#define __pte_index(addr)	(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 
 /*
  * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index 1d694c1..f9048994 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -57,9 +57,19 @@
  * A lot of busy-wait loops in SMP are based off of non-volatile data otherwise
  * get optimised away by gcc
  */
-#define cpu_relax()	__asm__ __volatile__ ("" : : : "memory")
+#ifndef CONFIG_EZNPS_MTM_EXT
 
-#define cpu_relax_lowlatency() cpu_relax()
+#define cpu_relax()		barrier()
+#define cpu_relax_lowlatency()	cpu_relax()
+
+#else
+
+#define cpu_relax()     \
+	__asm__ __volatile__ (".word %0" : : "i"(CTOP_INST_SCHD_RW) : "memory")
+
+#define cpu_relax_lowlatency()	barrier()
+
+#endif
 
 #define copy_segments(tsk, mm)      do { } while (0)
 #define release_segments(mm)        do { } while (0)
@@ -97,7 +107,7 @@
 #endif /* !__ASSEMBLY__ */
 
 /*
- * System Memory Map on ARC
+ * Default System Memory Map on ARC
  *
  * ---------------------------- (lower 2G, Translated) -------------------------
  * 0x0000_0000		0x5FFF_FFFF	(user vaddr: TASK_SIZE)
@@ -109,20 +119,37 @@
  * 0xC000_0000		0xFFFF_FFFF	(peripheral uncached space)
  * -----------------------------------------------------------------------------
  */
-#define VMALLOC_START	0x70000000
 
-/*
- * 1 PGDIR_SIZE each for fixmap/pkmap, 2 PGDIR_SIZE gutter
- * See asm/highmem.h for details
- */
-#define VMALLOC_SIZE	(PAGE_OFFSET - VMALLOC_START - PGDIR_SIZE * 4)
+#define TASK_SIZE	0x60000000
+
+#define VMALLOC_START	(PAGE_OFFSET - (CONFIG_ARC_KVADDR_SIZE << 20))
+
+/* 1 PGDIR_SIZE each for fixmap/pkmap, 2 PGDIR_SIZE gutter (see asm/highmem.h) */
+#define VMALLOC_SIZE	((CONFIG_ARC_KVADDR_SIZE << 20) - PGDIR_SIZE * 4)
+
 #define VMALLOC_END	(VMALLOC_START + VMALLOC_SIZE)
 
-#define USER_KERNEL_GUTTER    0x10000000
+#define USER_KERNEL_GUTTER    (VMALLOC_START - TASK_SIZE)
 
-#define TASK_SIZE	(VMALLOC_START - USER_KERNEL_GUTTER)
-
+#ifdef CONFIG_ARC_PLAT_EZNPS
+/* NPS architecture defines special window of 129M in user address space for
+ * special memory areas, when accessing this window the MMU do not use TLB.
+ * Instead MMU direct the access to:
+ * 0x57f00000:0x57ffffff -- 1M of closely coupled memory (aka CMEM)
+ * 0x58000000:0x5fffffff -- 16 huge pages, 8M each, with fixed map (aka FMTs)
+ *
+ * CMEM - is the fastest memory we got and its size is 16K.
+ * FMT  - is used to map either to internal/external memory.
+ * Internal memory is the second fast memory and its size is 16M
+ * External memory is the biggest memory (16G) and also the slowest.
+ *
+ * STACK_TOP need to be PMD align (21bit) that is why we supply 0x57e00000.
+ */
+#define STACK_TOP       0x57e00000
+#else
 #define STACK_TOP       TASK_SIZE
+#endif
+
 #define STACK_TOP_MAX   STACK_TOP
 
 /* This decides where the kernel will search for a free chunk of vm
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
index 3078466..48b37c6 100644
--- a/arch/arc/include/asm/setup.h
+++ b/arch/arc/include/asm/setup.h
@@ -12,7 +12,11 @@
 #include <linux/types.h>
 #include <uapi/asm/setup.h>
 
+#ifdef CONFIG_ARC_PLAT_EZNPS
+#define COMMAND_LINE_SIZE 2048
+#else
 #define COMMAND_LINE_SIZE 256
+#endif
 
 /*
  * Data structure to map a ID to string
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index db8c59d..800e7c4 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -610,7 +610,9 @@
 static inline int arch_read_trylock(arch_rwlock_t *rw)
 {
 	int ret = 0;
+	unsigned long flags;
 
+	local_irq_save(flags);
 	arch_spin_lock(&(rw->lock_mutex));
 
 	/*
@@ -623,6 +625,7 @@
 	}
 
 	arch_spin_unlock(&(rw->lock_mutex));
+	local_irq_restore(flags);
 
 	smp_mb();
 	return ret;
@@ -632,7 +635,9 @@
 static inline int arch_write_trylock(arch_rwlock_t *rw)
 {
 	int ret = 0;
+	unsigned long flags;
 
+	local_irq_save(flags);
 	arch_spin_lock(&(rw->lock_mutex));
 
 	/*
@@ -646,6 +651,7 @@
 		ret = 1;
 	}
 	arch_spin_unlock(&(rw->lock_mutex));
+	local_irq_restore(flags);
 
 	return ret;
 }
@@ -664,16 +670,24 @@
 
 static inline void arch_read_unlock(arch_rwlock_t *rw)
 {
+	unsigned long flags;
+
+	local_irq_save(flags);
 	arch_spin_lock(&(rw->lock_mutex));
 	rw->counter++;
 	arch_spin_unlock(&(rw->lock_mutex));
+	local_irq_restore(flags);
 }
 
 static inline void arch_write_unlock(arch_rwlock_t *rw)
 {
+	unsigned long flags;
+
+	local_irq_save(flags);
 	arch_spin_lock(&(rw->lock_mutex));
 	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
 	arch_spin_unlock(&(rw->lock_mutex));
+	local_irq_restore(flags);
 }
 
 #endif
diff --git a/arch/arc/include/uapi/asm/byteorder.h b/arch/arc/include/uapi/asm/byteorder.h
index 9da71d4..ea5ca444 100644
--- a/arch/arc/include/uapi/asm/byteorder.h
+++ b/arch/arc/include/uapi/asm/byteorder.h
@@ -9,7 +9,7 @@
 #ifndef __ASM_ARC_BYTEORDER_H
 #define __ASM_ARC_BYTEORDER_H
 
-#ifdef CONFIG_CPU_BIG_ENDIAN
+#ifdef __BIG_ENDIAN__
 #include <linux/byteorder/big_endian.h>
 #else
 #include <linux/byteorder/little_endian.h>
diff --git a/arch/arc/kernel/Makefile b/arch/arc/kernel/Makefile
index 1bc2036..cfcdedf 100644
--- a/arch/arc/kernel/Makefile
+++ b/arch/arc/kernel/Makefile
@@ -9,7 +9,7 @@
 CFLAGS_ptrace.o		+= -DUTS_MACHINE='"$(UTS_MACHINE)"'
 
 obj-y	:= arcksyms.o setup.o irq.o time.o reset.o ptrace.o process.o devtree.o
-obj-y	+= signal.o traps.o sys.o troubleshoot.o stacktrace.o disasm.o clk.o
+obj-y	+= signal.o traps.o sys.o troubleshoot.o stacktrace.o disasm.o
 obj-$(CONFIG_ISA_ARCOMPACT)		+= entry-compact.o intc-compact.o
 obj-$(CONFIG_ISA_ARCV2)			+= entry-arcv2.o intc-arcv2.o
 obj-$(CONFIG_PCI)  			+= pcibios.o
diff --git a/arch/arc/kernel/clk.c b/arch/arc/kernel/clk.c
deleted file mode 100644
index 10c7b0b..0000000
--- a/arch/arc/kernel/clk.c
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <asm/clk.h>
-
-unsigned long core_freq = 80000000;
-
-/*
- * As of now we default to device-tree provided clock
- * In future we can determine this in early boot
- */
-int arc_set_core_freq(unsigned long freq)
-{
-	core_freq = freq;
-	return 0;
-}
diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c
index 5d446df..6f4cb0d 100644
--- a/arch/arc/kernel/ctx_sw.c
+++ b/arch/arc/kernel/ctx_sw.c
@@ -16,6 +16,9 @@
 
 #include <asm/asm-offsets.h>
 #include <linux/sched.h>
+#ifdef CONFIG_ARC_PLAT_EZNPS
+#include <plat/ctop.h>
+#endif
 
 #define KSP_WORD_OFF 	((TASK_THREAD + THREAD_KSP) / 4)
 
@@ -67,9 +70,16 @@
 #ifndef CONFIG_SMP
 		"st  %2, [@_current_task]	\n\t"
 #else
+#ifdef CONFIG_ARC_PLAT_EZNPS
+		"lr   r24, [%4]		\n\t"
+#ifndef CONFIG_EZNPS_MTM_EXT
+		"lsr  r24, r24, 4		\n\t"
+#endif
+#else
 		"lr   r24, [identity]		\n\t"
 		"lsr  r24, r24, 8		\n\t"
 		"bmsk r24, r24, 7		\n\t"
+#endif
 		"add2 r24, @_current_task, r24	\n\t"
 		"st   %2,  [r24]		\n\t"
 #endif
@@ -107,6 +117,9 @@
 
 		: "=r"(tmp)
 		: "n"(KSP_WORD_OFF), "r"(next), "r"(prev)
+#ifdef CONFIG_ARC_PLAT_EZNPS
+		, "i"(CTOP_AUX_LOGIC_GLOBAL_ID)
+#endif
 		: "blink"
 	);
 
diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c
index 7e844fd..f1e07c2 100644
--- a/arch/arc/kernel/devtree.c
+++ b/arch/arc/kernel/devtree.c
@@ -14,7 +14,6 @@
 #include <linux/memblock.h>
 #include <linux/of.h>
 #include <linux/of_fdt.h>
-#include <asm/clk.h>
 #include <asm/mach_desc.h>
 
 #ifdef CONFIG_SERIAL_EARLYCON
@@ -28,14 +27,12 @@
 
 static void __init arc_set_early_base_baud(unsigned long dt_root)
 {
-	unsigned int core_clk = arc_get_core_freq();
-
 	if (of_flat_dt_is_compatible(dt_root, "abilis,arc-tb10x"))
-		arc_base_baud = core_clk/3;
+		arc_base_baud = 166666666;	/* Fixed 166.6MHz clk (TB10x) */
 	else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp"))
 		arc_base_baud = 33333333;	/* Fixed 33MHz clk (AXS10x) */
 	else
-		arc_base_baud = core_clk;
+		arc_base_baud = 50000000;	/* Fixed default 50MHz */
 }
 #else
 #define arc_set_early_base_baud(dt_root)
@@ -65,8 +62,6 @@
 {
 	const struct machine_desc *mdesc;
 	unsigned long dt_root;
-	const void *clk;
-	int len;
 
 	if (!early_init_dt_scan(dt))
 		return NULL;
@@ -76,10 +71,6 @@
 		machine_halt();
 
 	dt_root = of_get_flat_dt_root();
-	clk = of_get_flat_dt_prop(dt_root, "clock-frequency", &len);
-	if (clk)
-		arc_set_core_freq(of_read_ulong(clk, len/4));
-
 	arc_set_early_base_baud(dt_root);
 
 	return mdesc;
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index c126460..7a1c124 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -69,8 +69,11 @@
 
 	clri		; To make status32.IE agree with CPU internal state
 
-	lr  r0, [ICAUSE]
+#ifdef CONFIG_TRACE_IRQFLAGS
+	TRACE_ASM_IRQ_DISABLE
+#endif
 
+	lr  r0, [ICAUSE]
 	mov   blink, ret_from_exception
 
 	b.d  arch_do_IRQ
@@ -169,6 +172,11 @@
 
 .Lrestore_regs:
 
+	# Interrpts are actually disabled from this point on, but will get
+	# reenabled after we return from interrupt/exception.
+	# But irq tracer needs to be told now...
+	TRACE_ASM_IRQ_ENABLE
+
 	ld	r0, [sp, PT_status32]	; U/K mode at time of entry
 	lr	r10, [AUX_IRQ_ACT]
 
diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S
index 4314339..0cb0aba 100644
--- a/arch/arc/kernel/entry-compact.S
+++ b/arch/arc/kernel/entry-compact.S
@@ -341,6 +341,9 @@
 
 .Lrestore_regs:
 
+	# Interrpts are actually disabled from this point on, but will get
+	# reenabled after we return from interrupt/exception.
+	# But irq tracer needs to be told now...
 	TRACE_ASM_IRQ_ENABLE
 
 	lr	r10, [status32]
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 9425263..6c24faf 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -137,23 +137,30 @@
 	.map = arcv2_irq_map,
 };
 
-static struct irq_domain *root_domain;
 
 static int __init
 init_onchip_IRQ(struct device_node *intc, struct device_node *parent)
 {
+	struct irq_domain *root_domain;
+
 	if (parent)
 		panic("DeviceTree incore intc not a root irq controller\n");
 
-	root_domain = irq_domain_add_legacy(intc, NR_CPU_IRQS, 0, 0,
-					    &arcv2_irq_ops, NULL);
-
+	root_domain = irq_domain_add_linear(intc, NR_CPU_IRQS, &arcv2_irq_ops, NULL);
 	if (!root_domain)
 		panic("root irq domain not avail\n");
 
-	/* with this we don't need to export root_domain */
+	/*
+	 * Needed for primary domain lookup to succeed
+	 * This is a primary irqchip, and can never have a parent
+	 */
 	irq_set_default_host(root_domain);
 
+#ifdef CONFIG_SMP
+	irq_create_mapping(root_domain, IPI_IRQ);
+#endif
+	irq_create_mapping(root_domain, SOFTIRQ_IRQ);
+
 	return 0;
 }
 
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c
index 224d1c3..c5cceca 100644
--- a/arch/arc/kernel/intc-compact.c
+++ b/arch/arc/kernel/intc-compact.c
@@ -14,6 +14,8 @@
 #include <linux/irqchip.h>
 #include <asm/irq.h>
 
+#define TIMER0_IRQ	3	/* Fixed by ISA */
+
 /*
  * Early Hardware specific Interrupt setup
  * -Platform independent, needed for each CPU (not foldable into init_IRQ)
@@ -79,8 +81,9 @@
 static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq,
 			       irq_hw_number_t hw)
 {
-	switch (irq) {
+	switch (hw) {
 	case TIMER0_IRQ:
+		irq_set_percpu_devid(irq);
 		irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq);
 		break;
 	default:
@@ -94,21 +97,23 @@
 	.map = arc_intc_domain_map,
 };
 
-static struct irq_domain *root_domain;
-
 static int __init
 init_onchip_IRQ(struct device_node *intc, struct device_node *parent)
 {
+	struct irq_domain *root_domain;
+
 	if (parent)
 		panic("DeviceTree incore intc not a root irq controller\n");
 
-	root_domain = irq_domain_add_legacy(intc, NR_CPU_IRQS, 0, 0,
+	root_domain = irq_domain_add_linear(intc, NR_CPU_IRQS,
 					    &arc_intc_domain_ops, NULL);
-
 	if (!root_domain)
 		panic("root irq domain not avail\n");
 
-	/* with this we don't need to export root_domain */
+	/*
+	 * Needed for primary domain lookup to succeed
+	 * This is a primary irqchip, and can never have a parent
+	 */
 	irq_set_default_host(root_domain);
 
 	return 0;
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
index ba17f85..538b36a 100644
--- a/arch/arc/kernel/irq.c
+++ b/arch/arc/kernel/irq.c
@@ -41,53 +41,7 @@
  * "C" Entry point for any ARC ISR, called from low level vector handler
  * @irq is the vector number read from ICAUSE reg of on-chip intc
  */
-void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
+void arch_do_IRQ(unsigned int hwirq, struct pt_regs *regs)
 {
-	struct pt_regs *old_regs = set_irq_regs(regs);
-
-	irq_enter();
-	generic_handle_irq(irq);
-	irq_exit();
-	set_irq_regs(old_regs);
-}
-
-/*
- * API called for requesting percpu interrupts - called by each CPU
- *  - For boot CPU, actually request the IRQ with genirq core + enables
- *  - For subsequent callers only enable called locally
- *
- * Relies on being called by boot cpu first (i.e. request called ahead) of
- * any enable as expected by genirq. Hence Suitable only for TIMER, IPI
- * which are guaranteed to be setup on boot core first.
- * Late probed peripherals such as perf can't use this as there no guarantee
- * of being called on boot CPU first.
- */
-
-void arc_request_percpu_irq(int irq, int cpu,
-                            irqreturn_t (*isr)(int irq, void *dev),
-                            const char *irq_nm,
-                            void *percpu_dev)
-{
-	/* Boot cpu calls request, all call enable */
-	if (!cpu) {
-		int rc;
-
-#ifdef CONFIG_ISA_ARCOMPACT
-		/*
-		 * A subsequent request_percpu_irq() fails if percpu_devid is
-		 * not set. That in turns sets NOAUTOEN, meaning each core needs
-		 * to call enable_percpu_irq()
-		 *
-		 * For ARCv2, this is done in irq map function since we know
-		 * which irqs are strictly per cpu
-		 */
-		irq_set_percpu_devid(irq);
-#endif
-
-		rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev);
-		if (rc)
-			panic("Percpu IRQ request failed for %d\n", irq);
-	}
-
-	enable_percpu_irq(irq, 0);
+	handle_domain_irq(NULL, hwirq, regs);
 }
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index c41c364..72f9179 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -15,9 +15,6 @@
 #include <asm/mcip.h>
 #include <asm/setup.h>
 
-#define IPI_IRQ		19
-#define SOFTIRQ_IRQ	21
-
 static char smp_cpuinfo_buf[128];
 static int idu_detected;
 
@@ -116,15 +113,13 @@
 		IS_AVAIL1(mp.dbg, "DEBUG "),
 		IS_AVAIL1(mp.gfrc, "GFRC"));
 
+	cpuinfo_arc700[0].extn.gfrc = mp.gfrc;
 	idu_detected = mp.idu;
 
 	if (mp.dbg) {
 		__mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf);
 		__mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf);
 	}
-
-	if (IS_ENABLED(CONFIG_ARC_HAS_GFRC) && !mp.gfrc)
-		panic("kernel trying to use non-existent GFRC\n");
 }
 
 struct plat_smp_ops plat_smp_ops = {
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 151acf0..f63b8bf 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -13,7 +13,6 @@
 #include <linux/console.h>
 #include <linux/module.h>
 #include <linux/cpu.h>
-#include <linux/clk-provider.h>
 #include <linux/of_fdt.h>
 #include <linux/of_platform.h>
 #include <linux/cache.h>
@@ -24,7 +23,6 @@
 #include <asm/page.h>
 #include <asm/irq.h>
 #include <asm/unwind.h>
-#include <asm/clk.h>
 #include <asm/mach_desc.h>
 #include <asm/smp.h>
 
@@ -220,10 +218,6 @@
 	if (tbl->info.id == 0)
 		n += scnprintf(buf + n, len - n, "UNKNOWN ARC Processor\n");
 
-	n += scnprintf(buf + n, len - n, "CPU speed\t: %u.%02u Mhz\n",
-		       (unsigned int)(arc_get_core_freq() / 1000000),
-		       (unsigned int)(arc_get_core_freq() / 10000) % 100);
-
 	n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ",
 		       IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
 		       IS_AVAIL1(cpu->extn.timer1, "Timer1 "),
@@ -314,9 +308,6 @@
 	if (!cpu->extn.timer1)
 		panic("Timer1 is not present!\n");
 
-	if (IS_ENABLED(CONFIG_ARC_HAS_RTC) && !cpu->extn.rtc)
-		panic("RTC is not present\n");
-
 #ifdef CONFIG_ARC_HAS_DCCM
 	/*
 	 * DCCM can be arbit placed in hardware.
@@ -444,7 +435,6 @@
 
 static int __init customize_machine(void)
 {
-	of_clk_init(NULL);
 	/*
 	 * Traverses flattened DeviceTree - registering platform devices
 	 * (if any) complete with their resources
@@ -477,6 +467,8 @@
 {
 	char *str;
 	int cpu_id = ptr_to_cpu(v);
+	struct device_node *core_clk = of_find_node_by_name(NULL, "core_clk");
+	u32 freq = 0;
 
 	if (!cpu_online(cpu_id)) {
 		seq_printf(m, "processor [%d]\t: Offline\n", cpu_id);
@@ -489,6 +481,11 @@
 
 	seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE));
 
+	of_property_read_u32(core_clk, "clock-frequency", &freq);
+	if (freq)
+		seq_printf(m, "CPU speed\t: %u.%02u Mhz\n",
+			   freq / 1000000, (freq / 10000) % 100);
+
 	seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n",
 		   loops_per_jiffy / (500000 / HZ),
 		   (loops_per_jiffy / (5000 / HZ)) % 100);
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 4cb3add..f183cc6 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -126,11 +126,6 @@
 	current->active_mm = mm;
 	cpumask_set_cpu(cpu, mm_cpumask(mm));
 
-	notify_cpu_starting(cpu);
-	set_cpu_online(cpu, true);
-
-	pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
-
 	/* Some SMP H/w setup - for each cpu */
 	if (plat_smp_ops.init_per_cpu)
 		plat_smp_ops.init_per_cpu(cpu);
@@ -138,7 +133,10 @@
 	if (machine_desc->init_per_cpu)
 		machine_desc->init_per_cpu(cpu);
 
-	arc_local_timer_setup();
+	notify_cpu_starting(cpu);
+	set_cpu_online(cpu, true);
+
+	pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
 
 	local_irq_enable();
 	preempt_disable();
@@ -346,6 +344,10 @@
 
 /*
  * API called by platform code to hookup arch-common ISR to their IPI IRQ
+ *
+ * Note: If IPI is provided by platform (vs. say ARC MCIP), their intc setup/map
+ * function needs to call call irq_set_percpu_devid() for IPI IRQ, otherwise
+ * request_percpu_irq() below will fail
  */
 static DEFINE_PER_CPU(int, ipi_dev);
 
@@ -353,7 +355,16 @@
 {
 	int *dev = per_cpu_ptr(&ipi_dev, cpu);
 
-	arc_request_percpu_irq(irq, cpu, do_IPI, "IPI Interrupt", dev);
+	/* Boot cpu calls request, all call enable */
+	if (!cpu) {
+		int rc;
+
+		rc = request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev);
+		if (rc)
+			panic("Percpu IRQ request failed for %d\n", irq);
+	}
+
+	enable_percpu_irq(irq, 0);
 
 	return 0;
 }
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index 7d9a736..4549ab2 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -29,21 +29,16 @@
  * which however is currently broken
  */
 
-#include <linux/spinlock.h>
 #include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/timex.h>
-#include <linux/profile.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
 #include <linux/clocksource.h>
 #include <linux/clockchips.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
 #include <asm/irq.h>
 #include <asm/arcregs.h>
-#include <asm/clk.h>
-#include <asm/mach_desc.h>
 
 #include <asm/mcip.h>
 
@@ -60,16 +55,35 @@
 
 #define ARC_TIMER_MAX	0xFFFFFFFF
 
+static unsigned long arc_timer_freq;
+
+static int noinline arc_get_timer_clk(struct device_node *node)
+{
+	struct clk *clk;
+	int ret;
+
+	clk = of_clk_get(node, 0);
+	if (IS_ERR(clk)) {
+		pr_err("timer missing clk");
+		return PTR_ERR(clk);
+	}
+
+	ret = clk_prepare_enable(clk);
+	if (ret) {
+		pr_err("Couldn't enable parent clk\n");
+		return ret;
+	}
+
+	arc_timer_freq = clk_get_rate(clk);
+
+	return 0;
+}
+
 /********** Clock Source Device *********/
 
 #ifdef CONFIG_ARC_HAS_GFRC
 
-static int arc_counter_setup(void)
-{
-	return 1;
-}
-
-static cycle_t arc_counter_read(struct clocksource *cs)
+static cycle_t arc_read_gfrc(struct clocksource *cs)
 {
 	unsigned long flags;
 	union {
@@ -94,15 +108,31 @@
 	return stamp.full;
 }
 
-static struct clocksource arc_counter = {
+static struct clocksource arc_counter_gfrc = {
 	.name   = "ARConnect GFRC",
 	.rating = 400,
-	.read   = arc_counter_read,
+	.read   = arc_read_gfrc,
 	.mask   = CLOCKSOURCE_MASK(64),
 	.flags  = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-#else
+static void __init arc_cs_setup_gfrc(struct device_node *node)
+{
+	int exists = cpuinfo_arc700[0].extn.gfrc;
+	int ret;
+
+	if (WARN(!exists, "Global-64-bit-Ctr clocksource not detected"))
+		return;
+
+	ret = arc_get_timer_clk(node);
+	if (ret)
+		return;
+
+	clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
+}
+CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
+
+#endif
 
 #ifdef CONFIG_ARC_HAS_RTC
 
@@ -110,15 +140,7 @@
 #define AUX_RTC_LOW	0x104
 #define AUX_RTC_HIGH	0x105
 
-int arc_counter_setup(void)
-{
-	write_aux_reg(AUX_RTC_CTRL, 1);
-
-	/* Not usable in SMP */
-	return !IS_ENABLED(CONFIG_SMP);
-}
-
-static cycle_t arc_counter_read(struct clocksource *cs)
+static cycle_t arc_read_rtc(struct clocksource *cs)
 {
 	unsigned long status;
 	union {
@@ -142,47 +164,78 @@
 	return stamp.full;
 }
 
-static struct clocksource arc_counter = {
+static struct clocksource arc_counter_rtc = {
 	.name   = "ARCv2 RTC",
 	.rating = 350,
-	.read   = arc_counter_read,
+	.read   = arc_read_rtc,
 	.mask   = CLOCKSOURCE_MASK(64),
 	.flags  = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-#else /* !CONFIG_ARC_HAS_RTC */
+static void __init arc_cs_setup_rtc(struct device_node *node)
+{
+	int exists = cpuinfo_arc700[smp_processor_id()].extn.rtc;
+	int ret;
+
+	if (WARN(!exists, "Local-64-bit-Ctr clocksource not detected"))
+		return;
+
+	/* Local to CPU hence not usable in SMP */
+	if (WARN(IS_ENABLED(CONFIG_SMP), "Local-64-bit-Ctr not usable in SMP"))
+		return;
+
+	ret = arc_get_timer_clk(node);
+	if (ret)
+		return;
+
+	write_aux_reg(AUX_RTC_CTRL, 1);
+
+	clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
+}
+CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
+
+#endif
 
 /*
- * set 32bit TIMER1 to keep counting monotonically and wraparound
+ * 32bit TIMER1 to keep counting monotonically and wraparound
  */
-int arc_counter_setup(void)
-{
-	write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
-	write_aux_reg(ARC_REG_TIMER1_CNT, 0);
-	write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
 
-	/* Not usable in SMP */
-	return !IS_ENABLED(CONFIG_SMP);
-}
-
-static cycle_t arc_counter_read(struct clocksource *cs)
+static cycle_t arc_read_timer1(struct clocksource *cs)
 {
 	return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT);
 }
 
-static struct clocksource arc_counter = {
+static struct clocksource arc_counter_timer1 = {
 	.name   = "ARC Timer1",
 	.rating = 300,
-	.read   = arc_counter_read,
+	.read   = arc_read_timer1,
 	.mask   = CLOCKSOURCE_MASK(32),
 	.flags  = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-#endif
-#endif
+static void __init arc_cs_setup_timer1(struct device_node *node)
+{
+	int ret;
+
+	/* Local to CPU hence not usable in SMP */
+	if (IS_ENABLED(CONFIG_SMP))
+		return;
+
+	ret = arc_get_timer_clk(node);
+	if (ret)
+		return;
+
+	write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
+	write_aux_reg(ARC_REG_TIMER1_CNT, 0);
+	write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
+
+	clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
+}
 
 /********** Clock Event Device *********/
 
+static int arc_timer_irq;
+
 /*
  * Arm the timer to interrupt after @cycles
  * The distinction for oneshot/periodic is done in arc_event_timer_ack() below
@@ -209,7 +262,7 @@
 	 * At X Hz, 1 sec = 1000ms -> X cycles;
 	 *		      10ms -> X / 100 cycles
 	 */
-	arc_timer_event_setup(arc_get_core_freq() / HZ);
+	arc_timer_event_setup(arc_timer_freq / HZ);
 	return 0;
 }
 
@@ -218,7 +271,6 @@
 	.features		= CLOCK_EVT_FEAT_ONESHOT |
 				  CLOCK_EVT_FEAT_PERIODIC,
 	.rating			= 300,
-	.irq			= TIMER0_IRQ,	/* hardwired, no need for resources */
 	.set_next_event		= arc_clkevent_set_next_event,
 	.set_state_periodic	= arc_clkevent_set_periodic,
 };
@@ -244,45 +296,81 @@
 	return IRQ_HANDLED;
 }
 
-/*
- * Setup the local event timer for @cpu
- */
-void arc_local_timer_setup()
+static int arc_timer_cpu_notify(struct notifier_block *self,
+				unsigned long action, void *hcpu)
 {
 	struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
-	int cpu = smp_processor_id();
 
-	evt->cpumask = cpumask_of(cpu);
-	clockevents_config_and_register(evt, arc_get_core_freq(),
+	evt->cpumask = cpumask_of(smp_processor_id());
+
+	switch (action & ~CPU_TASKS_FROZEN) {
+	case CPU_STARTING:
+		clockevents_config_and_register(evt, arc_timer_freq,
+						0, ULONG_MAX);
+		enable_percpu_irq(arc_timer_irq, 0);
+		break;
+	case CPU_DYING:
+		disable_percpu_irq(arc_timer_irq);
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block arc_timer_cpu_nb = {
+	.notifier_call = arc_timer_cpu_notify,
+};
+
+/*
+ * clockevent setup for boot CPU
+ */
+static void __init arc_clockevent_setup(struct device_node *node)
+{
+	struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
+	int ret;
+
+	register_cpu_notifier(&arc_timer_cpu_nb);
+
+	arc_timer_irq = irq_of_parse_and_map(node, 0);
+	if (arc_timer_irq <= 0)
+		panic("clockevent: missing irq");
+
+	ret = arc_get_timer_clk(node);
+	if (ret)
+		panic("clockevent: missing clk");
+
+	evt->irq = arc_timer_irq;
+	evt->cpumask = cpumask_of(smp_processor_id());
+	clockevents_config_and_register(evt, arc_timer_freq,
 					0, ARC_TIMER_MAX);
 
-	/* setup the per-cpu timer IRQ handler - for all cpus */
-	arc_request_percpu_irq(TIMER0_IRQ, cpu, timer_irq_handler,
-			       "Timer0 (per-cpu-tick)", evt);
+	/* Needs apriori irq_set_percpu_devid() done in intc map function */
+	ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
+				 "Timer0 (per-cpu-tick)", evt);
+	if (ret)
+		panic("clockevent: unable to request irq\n");
+
+	enable_percpu_irq(arc_timer_irq, 0);
 }
 
+static void __init arc_of_timer_init(struct device_node *np)
+{
+	static int init_count = 0;
+
+	if (!init_count) {
+		init_count = 1;
+		arc_clockevent_setup(np);
+	} else {
+		arc_cs_setup_timer1(np);
+	}
+}
+CLOCKSOURCE_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init);
+
 /*
  * Called from start_kernel() - boot CPU only
- *
- * -Sets up h/w timers as applicable on boot cpu
- * -Also sets up any global state needed for timer subsystem:
- *    - for "counting" timer, registers a clocksource, usable across CPUs
- *      (provided that underlying counter h/w is synchronized across cores)
- *    - for "event" timer, sets up TIMER0 IRQ (as that is platform agnostic)
  */
 void __init time_init(void)
 {
-	/*
-	 * sets up the timekeeping free-flowing counter which also returns
-	 * whether the counter is usable as clocksource
-	 */
-	if (arc_counter_setup())
-		/*
-		 * CLK upto 4.29 GHz can be safely represented in 32 bits
-		 * because Max 32 bit number is 4,294,967,295
-		 */
-		clocksource_register_hz(&arc_counter, arc_get_core_freq());
-
-	/* sets up the periodic event timer */
-	arc_local_timer_setup();
+	of_clk_init(NULL);
+	clocksource_probe();
 }
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index d7709e3..9e5eddb 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -628,7 +628,7 @@
 
 		/* kernel reading from page with U-mapping */
 		phys_addr_t paddr = (unsigned long)page_address(page);
-		unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
+		unsigned long vaddr = page->index << PAGE_SHIFT;
 
 		if (addr_not_cache_congruent(paddr, vaddr))
 			__flush_dcache_page(paddr, vaddr);
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 7d2c4fb..8be9303 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -13,6 +13,7 @@
 #ifdef CONFIG_BLK_DEV_INITRD
 #include <linux/initrd.h>
 #endif
+#include <linux/of_fdt.h>
 #include <linux/swap.h>
 #include <linux/module.h>
 #include <linux/highmem.h>
@@ -29,11 +30,16 @@
 static unsigned long low_mem_sz;
 
 #ifdef CONFIG_HIGHMEM
-static unsigned long min_high_pfn;
+static unsigned long min_high_pfn, max_high_pfn;
 static u64 high_mem_start;
 static u64 high_mem_sz;
 #endif
 
+#ifdef CONFIG_DISCONTIGMEM
+struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
+EXPORT_SYMBOL(node_data);
+#endif
+
 /* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
 static int __init setup_mem_sz(char *str)
 {
@@ -108,13 +114,11 @@
 	/* Last usable page of low mem */
 	max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz);
 
-#ifdef CONFIG_HIGHMEM
-	min_high_pfn = PFN_DOWN(high_mem_start);
-	max_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
+#ifdef CONFIG_FLATMEM
+	/* pfn_valid() uses this */
+	max_mapnr = max_low_pfn - min_low_pfn;
 #endif
 
-	max_mapnr = max_pfn - min_low_pfn;
-
 	/*------------- bootmem allocator setup -----------------------*/
 
 	/*
@@ -128,7 +132,7 @@
 	 * the crash
 	 */
 
-	memblock_add(low_mem_start, low_mem_sz);
+	memblock_add_node(low_mem_start, low_mem_sz, 0);
 	memblock_reserve(low_mem_start, __pa(_end) - low_mem_start);
 
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -136,6 +140,9 @@
 		memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
 #endif
 
+	early_init_fdt_reserve_self();
+	early_init_fdt_scan_reserved_mem();
+
 	memblock_dump_all();
 
 	/*----------------- node/zones setup --------------------------*/
@@ -145,13 +152,6 @@
 	zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
 	zones_holes[ZONE_NORMAL] = 0;
 
-#ifdef CONFIG_HIGHMEM
-	zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
-
-	/* This handles the peripheral address space hole */
-	zones_holes[ZONE_HIGHMEM] = min_high_pfn - max_low_pfn;
-#endif
-
 	/*
 	 * We can't use the helper free_area_init(zones[]) because it uses
 	 * PAGE_OFFSET to compute the @min_low_pfn which would be wrong
@@ -164,6 +164,34 @@
 			    zones_holes);	/* holes */
 
 #ifdef CONFIG_HIGHMEM
+	/*
+	 * Populate a new node with highmem
+	 *
+	 * On ARC (w/o PAE) HIGHMEM addresses are actually smaller (0 based)
+	 * than addresses in normal ala low memory (0x8000_0000 based).
+	 * Even with PAE, the huge peripheral space hole would waste a lot of
+	 * mem with single mem_map[]. This warrants a mem_map per region design.
+	 * Thus HIGHMEM on ARC is imlemented with DISCONTIGMEM.
+	 *
+	 * DISCONTIGMEM in turns requires multiple nodes. node 0 above is
+	 * populated with normal memory zone while node 1 only has highmem
+	 */
+	node_set_online(1);
+
+	min_high_pfn = PFN_DOWN(high_mem_start);
+	max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
+
+	zones_size[ZONE_NORMAL] = 0;
+	zones_holes[ZONE_NORMAL] = 0;
+
+	zones_size[ZONE_HIGHMEM] = max_high_pfn - min_high_pfn;
+	zones_holes[ZONE_HIGHMEM] = 0;
+
+	free_area_init_node(1,			/* node-id */
+			    zones_size,		/* num pages per zone */
+			    min_high_pfn,	/* first pfn of node */
+			    zones_holes);	/* holes */
+
 	high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
 	kmap_init();
 #endif
@@ -181,7 +209,7 @@
 	unsigned long tmp;
 
 	reset_all_zones_managed_pages();
-	for (tmp = min_high_pfn; tmp < max_pfn; tmp++)
+	for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++)
 		free_highmem_page(pfn_to_page(tmp));
 #endif
 
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 7046c12..ec868a9 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -814,6 +814,17 @@
 
 	printk(arc_mmu_mumbojumbo(0, str, sizeof(str)));
 
+	/*
+	 * Can't be done in processor.h due to header include depenedencies
+	 */
+	BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE));
+
+	/*
+	 * stack top size sanity check,
+	 * Can't be done in processor.h due to header include depenedencies
+	 */
+	BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
+
 	/* For efficiency sake, kernel is compile time built for a MMU ver
 	 * This must match the hardware it is running on.
 	 * Linux built for MMU V2, if run on MMU V1 will break down because V1
diff --git a/arch/arc/plat-axs10x/Kconfig b/arch/arc/plat-axs10x/Kconfig
index 426ac4b..c54d1ae 100644
--- a/arch/arc/plat-axs10x/Kconfig
+++ b/arch/arc/plat-axs10x/Kconfig
@@ -13,7 +13,7 @@
 	select OF_GPIO
 	select MIGHT_HAVE_PCI
 	select GENERIC_IRQ_CHIP
-	select ARCH_REQUIRE_GPIOLIB
+	select GPIOLIB
 	help
 	  Support for the ARC AXS10x Software Development Platforms.
 
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
index 1b0f0f4..8654870 100644
--- a/arch/arc/plat-axs10x/axs10x.c
+++ b/arch/arc/plat-axs10x/axs10x.c
@@ -14,10 +14,11 @@
  *
  */
 
+#include <linux/of_fdt.h>
 #include <linux/of_platform.h>
+#include <linux/libfdt.h>
 
 #include <asm/asm-offsets.h>
-#include <asm/clk.h>
 #include <asm/io.h>
 #include <asm/mach_desc.h>
 #include <asm/mcip.h>
@@ -389,6 +390,13 @@
 
 static void __init axs103_early_init(void)
 {
+	int offset = fdt_path_offset(initial_boot_params, "/cpu_card/core_clk");
+	const struct fdt_property *prop = fdt_get_property(initial_boot_params,
+							   offset,
+							   "clock-frequency",
+							   NULL);
+	u32 freq = be32_to_cpu(*(u32*)(prop->data)) / 1000000, orig = freq;
+
 	/*
 	 * AXS103 configurations for SMP/QUAD configurations share device tree
 	 * which defaults to 90 MHz. However recent failures of Quad config
@@ -401,12 +409,10 @@
 #ifdef CONFIG_ARC_MCIP
 	unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F;
 	if (num_cores > 2)
-		arc_set_core_freq(50 * 1000000);
-	else if (num_cores == 2)
-		arc_set_core_freq(75 * 1000000);
+		freq = 50;
 #endif
 
-	switch (arc_get_core_freq()/1000000) {
+	switch (freq) {
 	case 33:
 		axs103_set_freq(1, 1, 1);
 		break;
@@ -431,11 +437,18 @@
 		 * DT "clock-frequency" might not match with board value.
 		 * Hence update it to match the board value.
 		 */
-		arc_set_core_freq(axs103_get_freq() * 1000000);
+		freq = axs103_get_freq();
 		break;
 	}
 
-	pr_info("Freq is %dMHz\n", axs103_get_freq());
+	pr_info("Freq is %dMHz\n", freq);
+
+	/* Patching .dtb in-place with new core clock value */
+	if (freq != orig ) {
+		freq = cpu_to_be32(freq * 1000000);
+		fdt_setprop_inplace(initial_boot_params, offset,
+				    "clock-frequency", &freq, sizeof(freq));
+	}
 
 	/* Memory maps already config in pre-bootloader */
 
diff --git a/arch/arc/plat-eznps/Kconfig b/arch/arc/plat-eznps/Kconfig
new file mode 100644
index 0000000..1d175cc
--- /dev/null
+++ b/arch/arc/plat-eznps/Kconfig
@@ -0,0 +1,35 @@
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.txt.
+#
+
+menuconfig ARC_PLAT_EZNPS
+	bool "\"EZchip\" ARC dev platform"
+	select ARC_HAS_COH_CACHES if SMP
+	select CPU_BIG_ENDIAN
+	select CLKSRC_NPS
+	select EZNPS_GIC
+	select EZCHIP_NPS_MANAGEMENT_ENET if ETHERNET
+	help
+	  Support for EZchip development platforms,
+	  based on ARC700 cores.
+	  We handle few flavours:
+	    - Hardware Emulator AKA HE which is FPGA based chasis
+	    - Simulator based on MetaWare nSIM
+	    - NPS400 chip based on ASIC
+
+config EZNPS_MTM_EXT
+	bool "ARC-EZchip MTM Extensions"
+	select CPUMASK_OFFSTACK
+	depends on ARC_PLAT_EZNPS && SMP
+	default y
+	help
+	  Here we add new hierarchy for CPUs topology.
+	  We got:
+		Core
+		Thread
+	  At the new thread level each CPU represent one HW thread.
+	  At highest hierarchy each core contain 16 threads,
+	  any of them seem like CPU from Linux point of view.
+	  All threads within same core share the execution unit of the
+	  core and HW scheduler round robin between them.
diff --git a/arch/arc/plat-eznps/Makefile b/arch/arc/plat-eznps/Makefile
new file mode 100644
index 0000000..21091b1
--- /dev/null
+++ b/arch/arc/plat-eznps/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the linux kernel.
+#
+
+obj-y := entry.o platform.o
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_EZNPS_MTM_EXT) += mtm.o
diff --git a/arch/arc/plat-eznps/entry.S b/arch/arc/plat-eznps/entry.S
new file mode 100644
index 0000000..328261c
--- /dev/null
+++ b/arch/arc/plat-eznps/entry.S
@@ -0,0 +1,70 @@
+/*******************************************************************************
+
+  EZNPS CPU startup Code
+  Copyright(c) 2012 EZchip Technologies.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+  more details.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+*******************************************************************************/
+#include <linux/linkage.h>
+#include <asm/entry.h>
+#include <asm/cache.h>
+#include <plat/ctop.h>
+
+	.cpu A7
+
+	.section .init.text, "ax",@progbits
+	.align 1024	; HW requierment for restart first PC
+
+ENTRY(res_service)
+#ifdef CONFIG_EZNPS_MTM_EXT
+	; There is no work for HW thread id != 0
+	lr	r3, [CTOP_AUX_THREAD_ID]
+	cmp	r3, 0
+	jne	stext
+#endif
+
+#ifdef CONFIG_ARC_HAS_DCACHE
+	; With no cache coherency mechanism D$ need to be used very carefully.
+	; Address space:
+	; 0G-2G: We disable CONFIG_ARC_CACHE_PAGES.
+	; 2G-3G: We disable D$ by setting this bit.
+	; 3G-4G: D$ is disabled by architecture.
+	; FMT are huge pages for user application reside at 0-2G.
+	; Only FMT left as one who can use D$ where each such page got
+	; disable/enable bit for cachability.
+	; Programmer will use FMT pages for private data so cache coherency
+	; would not be a problem.
+	; First thing we invalidate D$
+	sr	1, [ARC_REG_DC_IVDC]
+	sr	HW_COMPLY_KRN_NOT_D_CACHED, [CTOP_AUX_HW_COMPLY]
+#endif
+
+#ifdef CONFIG_SMP
+	; We set logical cpuid to be used by GET_CPUID
+	; We do not use physical cpuid since we want ids to be continious when
+	; it comes to cpus on the same quad cluster.
+	; This is useful for applications that used shared resources of a quad
+	; cluster such SRAMS.
+	lr 	r3, [CTOP_AUX_CORE_ID]
+	sr	r3, [CTOP_AUX_LOGIC_CORE_ID]
+	lr	r3, [CTOP_AUX_CLUSTER_ID]
+	; Set logical is acheived by swap of 2 middle bits of cluster id (4 bit)
+	; r3 is used since we use short instruction and we need q-class reg
+	.short	CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST
+	.word 	CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM
+	 sr	r3, [CTOP_AUX_LOGIC_CLUSTER_ID]
+#endif
+
+	j	stext
+END(res_service)
diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h
new file mode 100644
index 0000000..9d6718c
--- /dev/null
+++ b/arch/arc/plat-eznps/include/plat/ctop.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright(c) 2015 EZchip Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ */
+
+#ifndef _PLAT_EZNPS_CTOP_H
+#define _PLAT_EZNPS_CTOP_H
+
+#ifndef CONFIG_ARC_PLAT_EZNPS
+#error "Incorrect ctop.h include"
+#endif
+
+#include <soc/nps/common.h>
+
+/* core auxiliary registers */
+#ifdef __ASSEMBLY__
+#define CTOP_AUX_BASE				(-0x800)
+#else
+#define CTOP_AUX_BASE				0xFFFFF800
+#endif
+
+#define CTOP_AUX_GLOBAL_ID			(CTOP_AUX_BASE + 0x000)
+#define CTOP_AUX_CLUSTER_ID			(CTOP_AUX_BASE + 0x004)
+#define CTOP_AUX_CORE_ID			(CTOP_AUX_BASE + 0x008)
+#define CTOP_AUX_THREAD_ID			(CTOP_AUX_BASE + 0x00C)
+#define CTOP_AUX_LOGIC_GLOBAL_ID		(CTOP_AUX_BASE + 0x010)
+#define CTOP_AUX_LOGIC_CLUSTER_ID		(CTOP_AUX_BASE + 0x014)
+#define CTOP_AUX_LOGIC_CORE_ID			(CTOP_AUX_BASE + 0x018)
+#define CTOP_AUX_MT_CTRL			(CTOP_AUX_BASE + 0x020)
+#define CTOP_AUX_HW_COMPLY			(CTOP_AUX_BASE + 0x024)
+#define CTOP_AUX_LPC				(CTOP_AUX_BASE + 0x030)
+#define CTOP_AUX_EFLAGS				(CTOP_AUX_BASE + 0x080)
+#define CTOP_AUX_IACK				(CTOP_AUX_BASE + 0x088)
+#define CTOP_AUX_GPA1				(CTOP_AUX_BASE + 0x08C)
+#define CTOP_AUX_UDMC				(CTOP_AUX_BASE + 0x300)
+
+/* EZchip core instructions */
+#define CTOP_INST_HWSCHD_OFF_R3			0x3B6F00BF
+#define CTOP_INST_HWSCHD_OFF_R4			0x3C6F00BF
+#define CTOP_INST_HWSCHD_RESTORE_R3		0x3E6F70C3
+#define CTOP_INST_HWSCHD_RESTORE_R4		0x3E6F7103
+#define CTOP_INST_SCHD_RW			0x3E6F7004
+#define CTOP_INST_SCHD_RD			0x3E6F7084
+#define CTOP_INST_ASRI_0_R3			0x3B56003E
+#define CTOP_INST_XEX_DI_R2_R2_R3		0x4A664C00
+#define CTOP_INST_EXC_DI_R2_R2_R3		0x4A664C01
+#define CTOP_INST_AADD_DI_R2_R2_R3		0x4A664C02
+#define CTOP_INST_AAND_DI_R2_R2_R3		0x4A664C04
+#define CTOP_INST_AOR_DI_R2_R2_R3		0x4A664C05
+#define CTOP_INST_AXOR_DI_R2_R2_R3		0x4A664C06
+
+/* Do not use D$ for address in 2G-3G */
+#define HW_COMPLY_KRN_NOT_D_CACHED		_BITUL(28)
+
+#define NPS_MSU_EN_CFG				0x80
+#define NPS_CRG_BLKID				0x480
+#define NPS_CRG_SYNC_BIT			_BITUL(0)
+#define NPS_GIM_BLKID				0x5C0
+
+/* GIM registers and fields*/
+#define NPS_GIM_UART_LINE			_BITUL(7)
+#define NPS_GIM_DBG_LAN_EAST_TX_DONE_LINE	_BITUL(10)
+#define NPS_GIM_DBG_LAN_EAST_RX_RDY_LINE	_BITUL(11)
+#define NPS_GIM_DBG_LAN_WEST_TX_DONE_LINE	_BITUL(25)
+#define NPS_GIM_DBG_LAN_WEST_RX_RDY_LINE	_BITUL(26)
+
+#ifndef __ASSEMBLY__
+/* Functional registers definition */
+struct nps_host_reg_mtm_cfg {
+	union {
+		struct {
+			u32 gen:1, gdis:1, clk_gate_dis:1, asb:1,
+			__reserved:9, nat:3, ten:16;
+		};
+		u32 value;
+	};
+};
+
+struct nps_host_reg_mtm_cpu_cfg {
+	union {
+		struct {
+			u32 csa:22, dmsid:6, __reserved:3, cs:1;
+		};
+		u32 value;
+	};
+};
+
+struct nps_host_reg_thr_init {
+	union {
+		struct {
+			u32 str:1, __reserved:27, thr_id:4;
+		};
+		u32 value;
+	};
+};
+
+struct nps_host_reg_thr_init_sts {
+	union {
+		struct {
+			u32 bsy:1, err:1, __reserved:26, thr_id:4;
+		};
+		u32 value;
+	};
+};
+
+struct nps_host_reg_msu_en_cfg {
+	union {
+		struct {
+			u32     __reserved1:11,
+			rtc_en:1, ipc_en:1, gim_1_en:1,
+			gim_0_en:1, ipi_en:1, buff_e_rls_bmuw:1,
+			buff_e_alc_bmuw:1, buff_i_rls_bmuw:1, buff_i_alc_bmuw:1,
+			buff_e_rls_bmue:1, buff_e_alc_bmue:1, buff_i_rls_bmue:1,
+			buff_i_alc_bmue:1, __reserved2:1, buff_e_pre_en:1,
+			buff_i_pre_en:1, pmuw_ja_en:1, pmue_ja_en:1,
+			pmuw_nj_en:1, pmue_nj_en:1, msu_en:1;
+		};
+		u32 value;
+	};
+};
+
+struct nps_host_reg_gim_p_int_dst {
+	union {
+		struct {
+			u32 int_out_en:1, __reserved1:4,
+			is:1, intm:2, __reserved2:4,
+			nid:4, __reserved3:4, cid:4,
+			 __reserved4:4, tid:4;
+		};
+		u32 value;
+	};
+};
+
+/* AUX registers definition */
+struct nps_host_reg_aux_udmc {
+	union {
+		struct {
+			u32 dcp:1, cme:1, __reserved:19, nat:3,
+			__reserved2:5, dcas:3;
+		};
+		u32 value;
+	};
+};
+
+struct nps_host_reg_aux_mt_ctrl {
+	union {
+		struct {
+			u32 mten:1, hsen:1, scd:1, sten:1,
+			st_cnt:8, __reserved:8,
+			hs_cnt:8, __reserved1:4;
+		};
+		u32 value;
+	};
+};
+
+struct nps_host_reg_aux_hw_comply {
+	union {
+		struct {
+			u32 me:1, le:1, te:1, knc:1, __reserved:28;
+		};
+		u32 value;
+	};
+};
+
+struct nps_host_reg_aux_lpc {
+	union {
+		struct {
+			u32 mep:1, __reserved:31;
+		};
+		u32 value;
+	};
+};
+
+/* CRG registers */
+#define REG_GEN_PURP_0          nps_host_reg_non_cl(NPS_CRG_BLKID, 0x1BF)
+
+/* GIM registers */
+#define REG_GIM_P_INT_EN_0      nps_host_reg_non_cl(NPS_GIM_BLKID, 0x100)
+#define REG_GIM_P_INT_POL_0     nps_host_reg_non_cl(NPS_GIM_BLKID, 0x110)
+#define REG_GIM_P_INT_SENS_0    nps_host_reg_non_cl(NPS_GIM_BLKID, 0x114)
+#define REG_GIM_P_INT_BLK_0     nps_host_reg_non_cl(NPS_GIM_BLKID, 0x118)
+#define REG_GIM_P_INT_DST_10    nps_host_reg_non_cl(NPS_GIM_BLKID, 0x13A)
+#define REG_GIM_P_INT_DST_11    nps_host_reg_non_cl(NPS_GIM_BLKID, 0x13B)
+#define REG_GIM_P_INT_DST_25    nps_host_reg_non_cl(NPS_GIM_BLKID, 0x149)
+#define REG_GIM_P_INT_DST_26    nps_host_reg_non_cl(NPS_GIM_BLKID, 0x14A)
+
+#else
+
+.macro  GET_CPU_ID  reg
+	lr  \reg, [CTOP_AUX_LOGIC_GLOBAL_ID]
+#ifndef CONFIG_EZNPS_MTM_EXT
+	lsr \reg, \reg, 4
+#endif
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _PLAT_EZNPS_CTOP_H */
diff --git a/arch/arc/plat-eznps/include/plat/mtm.h b/arch/arc/plat-eznps/include/plat/mtm.h
new file mode 100644
index 0000000..29b91b5
--- /dev/null
+++ b/arch/arc/plat-eznps/include/plat/mtm.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright(c) 2015 EZchip Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ */
+
+#ifndef _PLAT_EZNPS_MTM_H
+#define _PLAT_EZNPS_MTM_H
+
+#include <plat/ctop.h>
+
+static inline void *nps_mtm_reg_addr(u32 cpu, u32 reg)
+{
+	struct global_id gid;
+	u32 core, blkid;
+
+	gid.value = cpu;
+	core = gid.core;
+	blkid = (((core & 0x0C) << 2) | (core & 0x03));
+
+	return nps_host_reg(cpu, blkid, reg);
+}
+
+#ifdef CONFIG_EZNPS_MTM_EXT
+#define NPS_CPU_TO_THREAD_NUM(cpu) \
+	({ struct global_id gid; gid.value = cpu; gid.thread; })
+
+/* MTM registers */
+#define MTM_CFG(cpu)			nps_mtm_reg_addr(cpu, 0x81)
+#define MTM_THR_INIT(cpu)		nps_mtm_reg_addr(cpu, 0x92)
+#define MTM_THR_INIT_STS(cpu)		nps_mtm_reg_addr(cpu, 0x93)
+
+#define get_thread(map) map.thread
+#define eznps_max_cpus 4096
+#define eznps_cpus_per_cluster	256
+
+void mtm_enable_core(unsigned int cpu);
+int mtm_enable_thread(int cpu);
+#else /* !CONFIG_EZNPS_MTM_EXT */
+
+#define get_thread(map) 0
+#define eznps_max_cpus 256
+#define eznps_cpus_per_cluster	16
+#define mtm_enable_core(cpu)
+#define mtm_enable_thread(cpu) 1
+#define NPS_CPU_TO_THREAD_NUM(cpu) 0
+
+#endif /* CONFIG_EZNPS_MTM_EXT */
+
+#endif /* _PLAT_EZNPS_MTM_H */
diff --git a/arch/arc/plat-eznps/include/plat/smp.h b/arch/arc/plat-eznps/include/plat/smp.h
new file mode 100644
index 0000000..06b59bd
--- /dev/null
+++ b/arch/arc/plat-eznps/include/plat/smp.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright(c) 2015 EZchip Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ */
+
+#ifndef __PLAT_EZNPS_SMP_H
+#define __PLAT_EZNPS_SMP_H
+
+#ifdef CONFIG_SMP
+
+extern void res_service(void);
+
+#endif /* CONFIG_SMP */
+
+#endif
diff --git a/arch/arc/plat-eznps/mtm.c b/arch/arc/plat-eznps/mtm.c
new file mode 100644
index 0000000..aaaaffd
--- /dev/null
+++ b/arch/arc/plat-eznps/mtm.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright(c) 2015 EZchip Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ */
+
+#include <linux/smp.h>
+#include <linux/io.h>
+#include <linux/log2.h>
+#include <asm/arcregs.h>
+#include <plat/mtm.h>
+#include <plat/smp.h>
+
+#define MT_CTRL_HS_CNT		0xFF
+#define MT_CTRL_ST_CNT		0xF
+#define NPS_NUM_HW_THREADS	0x10
+
+static void mtm_init_nat(int cpu)
+{
+	struct nps_host_reg_mtm_cfg mtm_cfg;
+	struct nps_host_reg_aux_udmc udmc;
+	int log_nat, nat = 0, i, t;
+
+	/* Iterate core threads and update nat */
+	for (i = 0, t = cpu; i < NPS_NUM_HW_THREADS; i++, t++)
+		nat += test_bit(t, cpumask_bits(cpu_possible_mask));
+
+	log_nat = ilog2(nat);
+
+	udmc.value = read_aux_reg(CTOP_AUX_UDMC);
+	udmc.nat = log_nat;
+	write_aux_reg(CTOP_AUX_UDMC, udmc.value);
+
+	mtm_cfg.value = ioread32be(MTM_CFG(cpu));
+	mtm_cfg.nat = log_nat;
+	iowrite32be(mtm_cfg.value, MTM_CFG(cpu));
+}
+
+static void mtm_init_thread(int cpu)
+{
+	int i, tries = 5;
+	struct nps_host_reg_thr_init thr_init;
+	struct nps_host_reg_thr_init_sts thr_init_sts;
+
+	/* Set thread init register */
+	thr_init.value = 0;
+	iowrite32be(thr_init.value, MTM_THR_INIT(cpu));
+	thr_init.thr_id = NPS_CPU_TO_THREAD_NUM(cpu);
+	thr_init.str = 1;
+	iowrite32be(thr_init.value, MTM_THR_INIT(cpu));
+
+	/* Poll till thread init is done */
+	for (i = 0; i < tries; i++) {
+		thr_init_sts.value = ioread32be(MTM_THR_INIT_STS(cpu));
+		if (thr_init_sts.thr_id == thr_init.thr_id) {
+			if (thr_init_sts.bsy)
+				continue;
+			else if (thr_init_sts.err)
+				pr_warn("Failed to thread init cpu %u\n", cpu);
+			break;
+		}
+
+		pr_warn("Wrong thread id in thread init for cpu %u\n", cpu);
+		break;
+	}
+
+	if (i == tries)
+		pr_warn("Got thread init timeout for cpu %u\n", cpu);
+}
+
+int mtm_enable_thread(int cpu)
+{
+	struct nps_host_reg_mtm_cfg mtm_cfg;
+
+	if (NPS_CPU_TO_THREAD_NUM(cpu) == 0)
+		return 1;
+
+	/* Enable thread in mtm */
+	mtm_cfg.value = ioread32be(MTM_CFG(cpu));
+	mtm_cfg.ten |= (1 << (NPS_CPU_TO_THREAD_NUM(cpu)));
+	iowrite32be(mtm_cfg.value, MTM_CFG(cpu));
+
+	return 0;
+}
+
+void mtm_enable_core(unsigned int cpu)
+{
+	int i;
+	struct nps_host_reg_aux_mt_ctrl mt_ctrl;
+	struct nps_host_reg_mtm_cfg mtm_cfg;
+
+	if (NPS_CPU_TO_THREAD_NUM(cpu) != 0)
+		return;
+
+	/* Initialize Number of Active Threads */
+	mtm_init_nat(cpu);
+
+	/* Initialize mtm_cfg */
+	mtm_cfg.value = ioread32be(MTM_CFG(cpu));
+	mtm_cfg.ten = 1;
+	iowrite32be(mtm_cfg.value, MTM_CFG(cpu));
+
+	/* Initialize all other threads in core */
+	for (i = 1; i < NPS_NUM_HW_THREADS; i++)
+		mtm_init_thread(cpu + i);
+
+
+	/* Enable HW schedule, stall counter, mtm */
+	mt_ctrl.value = 0;
+	mt_ctrl.hsen = 1;
+	mt_ctrl.hs_cnt = MT_CTRL_HS_CNT;
+	mt_ctrl.sten = 1;
+	mt_ctrl.st_cnt = MT_CTRL_ST_CNT;
+	mt_ctrl.mten = 1;
+	write_aux_reg(CTOP_AUX_MT_CTRL, mt_ctrl.value);
+
+	/*
+	 * HW scheduling mechanism will start working
+	 * Only after call to instruction "schd.rw".
+	 * cpu_relax() calls "schd.rw" instruction.
+	 */
+	cpu_relax();
+}
diff --git a/arch/arc/plat-eznps/platform.c b/arch/arc/plat-eznps/platform.c
new file mode 100644
index 0000000..7ad6d2b
--- /dev/null
+++ b/arch/arc/plat-eznps/platform.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright(c) 2015 EZchip Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <asm/mach_desc.h>
+#include <plat/mtm.h>
+
+static void __init eznps_configure_msu(void)
+{
+	int cpu;
+	struct nps_host_reg_msu_en_cfg msu_en_cfg = {.value = 0};
+
+	msu_en_cfg.msu_en = 1;
+	msu_en_cfg.ipi_en = 1;
+	msu_en_cfg.gim_0_en = 1;
+	msu_en_cfg.gim_1_en = 1;
+
+	/* enable IPI and GIM messages on all clusters */
+	for (cpu = 0 ; cpu < eznps_max_cpus; cpu += eznps_cpus_per_cluster)
+		iowrite32be(msu_en_cfg.value,
+			    nps_host_reg(cpu, NPS_MSU_BLKID, NPS_MSU_EN_CFG));
+}
+
+static void __init eznps_configure_gim(void)
+{
+	u32 reg_value;
+	u32 gim_int_lines;
+	struct nps_host_reg_gim_p_int_dst gim_p_int_dst = {.value = 0};
+
+	gim_int_lines = NPS_GIM_UART_LINE;
+	gim_int_lines |= NPS_GIM_DBG_LAN_EAST_TX_DONE_LINE;
+	gim_int_lines |= NPS_GIM_DBG_LAN_EAST_RX_RDY_LINE;
+	gim_int_lines |= NPS_GIM_DBG_LAN_WEST_TX_DONE_LINE;
+	gim_int_lines |= NPS_GIM_DBG_LAN_WEST_RX_RDY_LINE;
+
+	/*
+	 * IRQ polarity
+	 * low or high level
+	 * negative or positive edge
+	 */
+	reg_value = ioread32be(REG_GIM_P_INT_POL_0);
+	reg_value &= ~gim_int_lines;
+	iowrite32be(reg_value, REG_GIM_P_INT_POL_0);
+
+	/* IRQ type level or edge */
+	reg_value = ioread32be(REG_GIM_P_INT_SENS_0);
+	reg_value |= NPS_GIM_DBG_LAN_EAST_TX_DONE_LINE;
+	reg_value |= NPS_GIM_DBG_LAN_WEST_TX_DONE_LINE;
+	iowrite32be(reg_value, REG_GIM_P_INT_SENS_0);
+
+	/*
+	 * GIM interrupt select type for
+	 * dbg_lan TX and RX interrupts
+	 * should be type 1
+	 * type 0 = IRQ line 6
+	 * type 1 = IRQ line 7
+	 */
+	gim_p_int_dst.is = 1;
+	iowrite32be(gim_p_int_dst.value, REG_GIM_P_INT_DST_10);
+	iowrite32be(gim_p_int_dst.value, REG_GIM_P_INT_DST_11);
+	iowrite32be(gim_p_int_dst.value, REG_GIM_P_INT_DST_25);
+	iowrite32be(gim_p_int_dst.value, REG_GIM_P_INT_DST_26);
+
+	/*
+	 * CTOP IRQ lines should be defined
+	 * as blocking in GIM
+	*/
+	iowrite32be(gim_int_lines, REG_GIM_P_INT_BLK_0);
+
+	/* enable CTOP IRQ lines in GIM */
+	iowrite32be(gim_int_lines, REG_GIM_P_INT_EN_0);
+}
+
+static void __init eznps_early_init(void)
+{
+	eznps_configure_msu();
+	eznps_configure_gim();
+}
+
+static const char *eznps_compat[] __initconst = {
+	"ezchip,arc-nps",
+	NULL,
+};
+
+MACHINE_START(NPS, "nps")
+	.dt_compat	= eznps_compat,
+	.init_early	= eznps_early_init,
+MACHINE_END
diff --git a/arch/arc/plat-eznps/smp.c b/arch/arc/plat-eznps/smp.c
new file mode 100644
index 0000000..5e901f8
--- /dev/null
+++ b/arch/arc/plat-eznps/smp.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright(c) 2015 EZchip Technologies.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ */
+
+#include <linux/smp.h>
+#include <linux/of_fdt.h>
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <asm/irq.h>
+#include <plat/ctop.h>
+#include <plat/smp.h>
+#include <plat/mtm.h>
+
+#define NPS_DEFAULT_MSID	0x34
+#define NPS_MTM_CPU_CFG		0x90
+
+static char smp_cpuinfo_buf[128] = {"Extn [EZNPS-SMP]\t: On\n"};
+
+/* Get cpu map from device tree */
+static int __init eznps_get_map(const char *name, struct cpumask *cpumask)
+{
+	unsigned long dt_root = of_get_flat_dt_root();
+	const char *buf;
+
+	buf = of_get_flat_dt_prop(dt_root, name, NULL);
+	if (!buf)
+		return 1;
+
+	cpulist_parse(buf, cpumask);
+
+	return 0;
+}
+
+/* Update board cpu maps */
+static void __init eznps_init_cpumasks(void)
+{
+	struct cpumask cpumask;
+
+	if (eznps_get_map("present-cpus", &cpumask)) {
+		pr_err("Failed to get present-cpus from dtb");
+		return;
+	}
+	init_cpu_present(&cpumask);
+
+	if (eznps_get_map("possible-cpus", &cpumask)) {
+		pr_err("Failed to get possible-cpus from dtb");
+		return;
+	}
+	init_cpu_possible(&cpumask);
+}
+
+static void eznps_init_core(unsigned int cpu)
+{
+	u32 sync_value;
+	struct nps_host_reg_aux_hw_comply hw_comply;
+	struct nps_host_reg_aux_lpc lpc;
+
+	if (NPS_CPU_TO_THREAD_NUM(cpu) != 0)
+		return;
+
+	hw_comply.value = read_aux_reg(CTOP_AUX_HW_COMPLY);
+	hw_comply.me  = 1;
+	hw_comply.le  = 1;
+	hw_comply.te  = 1;
+	write_aux_reg(CTOP_AUX_HW_COMPLY, hw_comply.value);
+
+	/* Enable MMU clock */
+	lpc.mep = 1;
+	write_aux_reg(CTOP_AUX_LPC, lpc.value);
+
+	/* Boot CPU only */
+	if (!cpu) {
+		/* Write to general purpose register in CRG */
+		sync_value = ioread32be(REG_GEN_PURP_0);
+		sync_value |= NPS_CRG_SYNC_BIT;
+		iowrite32be(sync_value, REG_GEN_PURP_0);
+	}
+}
+
+/*
+ * Master kick starting another CPU
+ */
+static void __init eznps_smp_wakeup_cpu(int cpu, unsigned long pc)
+{
+	struct nps_host_reg_mtm_cpu_cfg cpu_cfg;
+
+	if (mtm_enable_thread(cpu) == 0)
+		return;
+
+	/* set PC, dmsid, and start CPU */
+	cpu_cfg.value = (u32)res_service;
+	cpu_cfg.dmsid = NPS_DEFAULT_MSID;
+	cpu_cfg.cs = 1;
+	iowrite32be(cpu_cfg.value, nps_mtm_reg_addr(cpu, NPS_MTM_CPU_CFG));
+}
+
+static void eznps_ipi_send(int cpu)
+{
+	struct global_id gid;
+	struct {
+		union {
+			struct {
+				u32 num:8, cluster:8, core:8, thread:8;
+			};
+			u32 value;
+		};
+	} ipi;
+
+	gid.value = cpu;
+	ipi.thread = get_thread(gid);
+	ipi.core = gid.core;
+	ipi.cluster = nps_cluster_logic_to_phys(gid.cluster);
+	ipi.num = NPS_IPI_IRQ;
+
+	__asm__ __volatile__(
+	"	mov r3, %0\n"
+	"	.word %1\n"
+	:
+	: "r"(ipi.value), "i"(CTOP_INST_ASRI_0_R3)
+	: "r3");
+}
+
+static void eznps_init_per_cpu(int cpu)
+{
+	smp_ipi_irq_setup(cpu, NPS_IPI_IRQ);
+
+	eznps_init_core(cpu);
+	mtm_enable_core(cpu);
+}
+
+static void eznps_ipi_clear(int irq)
+{
+	write_aux_reg(CTOP_AUX_IACK, 1 << irq);
+}
+
+struct plat_smp_ops plat_smp_ops = {
+	.info		= smp_cpuinfo_buf,
+	.init_early_smp	= eznps_init_cpumasks,
+	.cpu_kick	= eznps_smp_wakeup_cpu,
+	.ipi_send	= eznps_ipi_send,
+	.init_per_cpu	= eznps_init_per_cpu,
+	.ipi_clear	= eznps_ipi_clear,
+};
diff --git a/arch/arc/plat-tb10x/Kconfig b/arch/arc/plat-tb10x/Kconfig
index d14b3d3..149e091 100644
--- a/arch/arc/plat-tb10x/Kconfig
+++ b/arch/arc/plat-tb10x/Kconfig
@@ -21,7 +21,7 @@
 	select PINCTRL
 	select PINCTRL_TB10X
 	select PINMUX
-	select ARCH_REQUIRE_GPIOLIB
+	select GPIOLIB
 	select GPIO_TB10X
 	select TB10X_IRQC
 	help
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index cdfa6c2..b99d25b 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -41,7 +41,7 @@
 	select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_ARM_SMCCC if CPU_V7
-	select HAVE_BPF_JIT
+	select HAVE_CBPF_JIT
 	select HAVE_CC_STACKPROTECTOR
 	select HAVE_CONTEXT_TRACKING
 	select HAVE_C_RECORDMCOUNT
@@ -531,6 +531,8 @@
 	select COMMON_CLK
 	select CPU_ARM926T
 	select GENERIC_CLOCKEVENTS
+	select MULTI_IRQ_HANDLER
+	select SPARSE_IRQ
 	select USE_OF
 	help
 	  Support for the NXP LPC32XX family of processors
@@ -775,6 +777,8 @@
 
 source "arch/arm/mach-moxart/Kconfig"
 
+source "arch/arm/mach-aspeed/Kconfig"
+
 source "arch/arm/mach-mv78xx0/Kconfig"
 
 source "arch/arm/mach-imx/Kconfig"
@@ -804,6 +808,8 @@
 
 source "arch/arm/mach-mmp/Kconfig"
 
+source "arch/arm/mach-oxnas/Kconfig"
+
 source "arch/arm/mach-qcom/Kconfig"
 
 source "arch/arm/mach-realview/Kconfig"
@@ -892,6 +898,18 @@
 	depends on ARCH_STM32
 	default y
 
+config ARCH_MPS2
+	bool "ARM MPS2 paltform"
+	depends on ARM_SINGLE_ARMV7M
+	select ARM_AMBA
+	select CLKSRC_MPS2
+	help
+	  Support for Cortex-M Prototyping System (or V2M-MPS2) which comes
+	  with a range of available cores like Cortex-M3/M4/M7.
+
+	  Please, note that depends which Application Note is used memory map
+	  for the platform may vary, so adjustment of RAM base might be needed.
+
 # Definitions to make life easier
 config ARCH_ACORN
 	bool
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 1098e91..19a3dcf 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -268,14 +268,6 @@
 		  Say Y here if you want kernel low-level debugging support
 		  on HI3620 UART.
 
-	config DEBUG_HI3716_UART
-		bool "Hisilicon Hi3716 Debug UART"
-		depends on ARCH_HI3xxx
-		select DEBUG_UART_PL01X
-		help
-		  Say Y here if you want kernel low-level debugging support
-		  on HI3716 UART.
-
 	config DEBUG_HIGHBANK_UART
 		bool "Kernel low-level debugging messages via Highbank UART"
 		depends on ARCH_HIGHBANK
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 8c3ce2a..274e8a6 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -183,6 +183,7 @@
 machine-$(CONFIG_ARCH_LPC32XX)		+= lpc32xx
 machine-$(CONFIG_ARCH_MESON)		+= meson
 machine-$(CONFIG_ARCH_MMP)		+= mmp
+machine-$(CONFIG_ARCH_MPS2)		+= vexpress
 machine-$(CONFIG_ARCH_MOXART)		+= moxart
 machine-$(CONFIG_ARCH_MV78XX0)		+= mv78xx0
 machine-$(CONFIG_ARCH_MVEBU)		+= mvebu
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 95c1923..0f89d87 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -112,6 +112,7 @@
 dtb-$(CONFIG_ARCH_EFM32) += \
 	efm32gg-dk3750.dtb
 dtb-$(CONFIG_ARCH_EXYNOS3) += \
+	exynos3250-artik5-eval.dtb \
 	exynos3250-monk.dtb \
 	exynos3250-rinato.dtb
 dtb-$(CONFIG_ARCH_EXYNOS4) += \
@@ -158,9 +159,9 @@
 	integratorap.dtb \
 	integratorcp.dtb
 dtb-$(CONFIG_ARCH_KEYSTONE) += \
-	k2hk-evm.dtb \
-	k2l-evm.dtb \
-	k2e-evm.dtb \
+	keystone-k2hk-evm.dtb \
+	keystone-k2l-evm.dtb \
+	keystone-k2e-evm.dtb \
 	keystone-k2g-evm.dtb
 dtb-$(CONFIG_MACH_KIRKWOOD) += \
 	kirkwood-b3.dtb \
@@ -177,6 +178,7 @@
 	kirkwood-ds109.dtb \
 	kirkwood-ds110jv10.dtb \
 	kirkwood-ds111.dtb \
+	kirkwood-ds112.dtb \
 	kirkwood-ds209.dtb \
 	kirkwood-ds210.dtb \
 	kirkwood-ds212.dtb \
@@ -199,6 +201,7 @@
 	kirkwood-linkstation-lswsxl.dtb \
 	kirkwood-linkstation-lswvl.dtb \
 	kirkwood-linkstation-lswxl.dtb \
+	kirkwood-linksys-viper.dtb \
 	kirkwood-lschlv2.dtb \
 	kirkwood-lsxhl.dtb \
 	kirkwood-mplcec4.dtb \
@@ -214,6 +217,7 @@
 	kirkwood-ns2mini.dtb \
 	kirkwood-nsa310.dtb \
 	kirkwood-nsa310a.dtb \
+	kirkwood-nsa320.dtb \
 	kirkwood-nsa325.dtb \
 	kirkwood-openblocks_a6.dtb \
 	kirkwood-openblocks_a7.dtb \
@@ -241,7 +245,8 @@
 	lpc4350-hitex-eval.dtb \
 	lpc4357-ea4357-devkit.dtb
 dtb-$(CONFIG_ARCH_LPC32XX) += \
-	ea3250.dtb phy3250.dtb
+	lpc3250-ea3250.dtb \
+	lpc3250-phy3250.dtb
 dtb-$(CONFIG_MACH_MESON6) += \
 	meson6-atv1200.dtb
 dtb-$(CONFIG_MACH_MESON8) += \
@@ -253,6 +258,9 @@
 dtb-$(CONFIG_MACH_MESON8B) += \
 	meson8b-mxq.dtb \
 	meson8b-odroidc1.dtb
+dtb-$(CONFIG_ARCH_MPS2) += \
+	mps2-an385.dtb \
+	mps2-an399.dtb
 dtb-$(CONFIG_ARCH_MOXART) += \
 	moxart-uc7112lx.dtb
 dtb-$(CONFIG_SOC_IMX1) += \
@@ -320,8 +328,12 @@
 	imx6dl-sabrelite.dtb \
 	imx6dl-sabresd.dtb \
 	imx6dl-tx6dl-comtft.dtb \
+	imx6dl-tx6s-8034.dtb \
+	imx6dl-tx6s-8035.dtb \
 	imx6dl-tx6u-801x.dtb \
+	imx6dl-tx6u-8033.dtb \
 	imx6dl-tx6u-811x.dtb \
+	imx6dl-tx6u-81xx-mb7.dtb \
 	imx6dl-udoo.dtb \
 	imx6dl-wandboard.dtb \
 	imx6dl-wandboard-revb1.dtb \
@@ -346,6 +358,7 @@
 	imx6q-gw552x.dtb \
 	imx6q-hummingboard.dtb \
 	imx6q-icore-rqs.dtb \
+	imx6q-marsboard.dtb \
 	imx6q-nitrogen6x.dtb \
 	imx6q-nitrogen6_max.dtb \
 	imx6q-novena.dtb \
@@ -360,21 +373,30 @@
 	imx6q-tx6q-1010-comtft.dtb \
 	imx6q-tx6q-1020.dtb \
 	imx6q-tx6q-1020-comtft.dtb \
+	imx6q-tx6q-1036.dtb \
 	imx6q-tx6q-1110.dtb \
+	imx6q-tx6q-11x0-mb7.dtb \
 	imx6q-udoo.dtb \
 	imx6q-wandboard.dtb \
 	imx6q-wandboard-revb1.dtb \
+	imx6qp-nitrogen6_max.dtb \
 	imx6qp-sabreauto.dtb \
 	imx6qp-sabresd.dtb
 dtb-$(CONFIG_SOC_IMX6SL) += \
 	imx6sl-evk.dtb \
 	imx6sl-warp.dtb
 dtb-$(CONFIG_SOC_IMX6SX) += \
+	imx6sx-nitrogen6sx.dtb \
 	imx6sx-sabreauto.dtb \
 	imx6sx-sdb-reva.dtb \
+	imx6sx-sdb-sai.dtb \
 	imx6sx-sdb.dtb
 dtb-$(CONFIG_SOC_IMX6UL) += \
-	imx6ul-14x14-evk.dtb
+	imx6ul-14x14-evk.dtb \
+	imx6ul-pico-hobbit.dtb \
+	imx6ul-tx6ul-0010.dtb \
+	imx6ul-tx6ul-0011.dtb \
+	imx6ul-tx6ul-mainboard.dtb
 dtb-$(CONFIG_SOC_IMX7D) += \
 	imx7d-cl-som-imx7.dtb \
 	imx7d-sbc-imx7.dtb \
@@ -388,7 +410,8 @@
 	vf610m4-colibri.dtb \
 	vf610-cosmic.dtb \
 	vf610m4-cosmic.dtb \
-	vf610-twr.dtb
+	vf610-twr.dtb \
+	vf610-zii-dev-rev-b.dtb
 dtb-$(CONFIG_ARCH_MXS) += \
 	imx23-evk.dtb \
 	imx23-olinuxino.dtb \
@@ -485,6 +508,8 @@
 	dm8168-evm.dtb \
 	dra62x-j5eco-evm.dtb
 dtb-$(CONFIG_SOC_AM33XX) += \
+	am335x-baltos-ir2110.dtb \
+	am335x-baltos-ir3220.dtb \
 	am335x-baltos-ir5221.dtb \
 	am335x-base0033.dtb \
 	am335x-bone.dtb \
@@ -494,6 +519,7 @@
 	am335x-cm-t335.dtb \
 	am335x-evm.dtb \
 	am335x-evmsk.dtb \
+	am335x-icev2.dtb \
 	am335x-lxm.dtb \
 	am335x-nano.dtb \
 	am335x-pepper.dtb \
@@ -503,6 +529,7 @@
 	am335x-wega-rdk.dtb
 dtb-$(CONFIG_ARCH_OMAP4) += \
 	omap4-duovero-parlor.dtb \
+	omap4-kc1.dtb \
 	omap4-panda.dtb \
 	omap4-panda-a4.dtb \
 	omap4-panda-es.dtb \
@@ -526,9 +553,12 @@
 	am57xx-beagle-x15.dtb \
 	am57xx-cl-som-am57x.dtb \
 	am57xx-sbc-am57x.dtb \
+	am572x-idk.dtb \
 	dra7-evm.dtb \
-	dra72-evm.dtb
+	dra72-evm.dtb \
+	dra72-evm-revc.dtb
 dtb-$(CONFIG_ARCH_ORION5X) += \
+	orion5x-kuroboxpro.dtb \
 	orion5x-lacie-d2-network.dtb \
 	orion5x-lacie-ethernet-disk-mini-v2.dtb \
 	orion5x-linkstation-lsgl.dtb \
@@ -538,7 +568,10 @@
 	orion5x-rd88f5182-nas.dtb
 dtb-$(CONFIG_ARCH_PRIMA2) += \
 	prima2-evb.dtb
+dtb-$(CONFIG_ARCH_OXNAS) += \
+	wd-mbwe.dtb
 dtb-$(CONFIG_ARCH_QCOM) += \
+	qcom-apq8064-arrow-db600c.dtb \
 	qcom-apq8064-cm-qs600.dtb \
 	qcom-apq8064-ifc6410.dtb \
 	qcom-apq8064-sony-xperia-yuga.dtb \
@@ -546,13 +579,20 @@
 	qcom-apq8074-dragonboard.dtb \
 	qcom-apq8084-ifc6540.dtb \
 	qcom-apq8084-mtp.dtb \
+	qcom-ipq4019-ap.dk01.1-c1.dtb \
 	qcom-ipq8064-ap148.dtb \
 	qcom-msm8660-surf.dtb \
 	qcom-msm8960-cdp.dtb \
 	qcom-msm8974-sony-xperia-honami.dtb
 dtb-$(CONFIG_ARCH_REALVIEW) += \
 	arm-realview-pb1176.dtb \
-	arm-realview-pb11mp.dtb
+	arm-realview-pb11mp.dtb \
+	arm-realview-eb.dtb \
+	arm-realview-eb-11mp.dtb \
+	arm-realview-eb-11mp-revb.dtb \
+	arm-realview-eb-a9mp.dtb \
+	arm-realview-pba8.dtb \
+	arm-realview-pbx-a9.dtb
 dtb-$(CONFIG_ARCH_ROCKCHIP) += \
 	rk3036-evb.dtb \
 	rk3036-kylin.dtb \
@@ -565,6 +605,7 @@
 	rk3288-evb-rk808.dtb \
 	rk3288-firefly-beta.dtb \
 	rk3288-firefly.dtb \
+	rk3288-miqi.dtb \
 	rk3288-popmetal.dtb \
 	rk3288-r89.dtb \
 	rk3288-rock2-square.dtb \
@@ -608,6 +649,7 @@
 	socfpga_cyclone5_de0_sockit.dtb \
 	socfpga_cyclone5_sockit.dtb \
 	socfpga_cyclone5_socrates.dtb \
+	socfpga_cyclone5_vining_fpga.dtb \
 	socfpga_vt.dtb
 dtb-$(CONFIG_ARCH_SPEAR13XX) += \
 	spear1310-evb.dtb \
@@ -637,6 +679,7 @@
 	sun4i-a10-ba10-tvbox.dtb \
 	sun4i-a10-chuwi-v7-cw0825.dtb \
 	sun4i-a10-cubieboard.dtb \
+	sun4i-a10-dserve-dsrv9703c.dtb \
 	sun4i-a10-gemei-g9.dtb \
 	sun4i-a10-hackberry.dtb \
 	sun4i-a10-hyundai-a7hd.dtb \
@@ -660,6 +703,7 @@
 	sun5i-a10s-olinuxino-micro.dtb \
 	sun5i-a10s-r7-tv-dongle.dtb \
 	sun5i-a10s-wobo-i5.dtb \
+	sun5i-a13-difrnce-dit4350.dtb \
 	sun5i-a13-empire-electronix-d709.dtb \
 	sun5i-a13-hsg-h702.dtb \
 	sun5i-a13-inet-98v-rev2.dtb \
@@ -675,6 +719,7 @@
 	sun6i-a31-i7.dtb \
 	sun6i-a31-m9.dtb \
 	sun6i-a31-mele-a1000g-quad.dtb \
+	sun6i-a31s-colorfly-e708-q1.dtb \
 	sun6i-a31s-cs908.dtb \
 	sun6i-a31s-primo81.dtb \
 	sun6i-a31s-sina31s.dtb \
@@ -707,6 +752,7 @@
 	sun8i-a23-gt90h-v4.dtb \
 	sun8i-a23-ippo-q8h-v5.dtb \
 	sun8i-a23-ippo-q8h-v1.2.dtb \
+	sun8i-a23-polaroid-mid2809pxe04.dtb \
 	sun8i-a23-q8-tablet.dtb \
 	sun8i-a33-et-q8-v1.6.dtb \
 	sun8i-a33-ga10h-v1.1.dtb \
@@ -715,6 +761,9 @@
 	sun8i-a33-sinlinx-sina33.dtb \
 	sun8i-a83t-allwinner-h8homlet-v2.dtb \
 	sun8i-a83t-cubietruck-plus.dtb \
+	sun8i-h3-orangepi-2.dtb \
+	sun8i-h3-orangepi-one.dtb \
+	sun8i-h3-orangepi-pc.dtb \
 	sun8i-h3-orangepi-plus.dtb
 dtb-$(CONFIG_MACH_SUN9I) += \
 	sun9i-a80-optimus.dtb \
@@ -839,6 +888,8 @@
 	mt8127-moose.dtb \
 	mt8135-evbp1.dtb
 dtb-$(CONFIG_ARCH_ZX) += zx296702-ad1.dtb
+dtb-$(CONFIG_ARCH_ASPEED) += aspeed-bmc-opp-palmetto.dtb \
+	aspeed-ast2500-evb.dtb
 endif
 
 dtstree		:= $(srctree)/$(src)
diff --git a/arch/arm/boot/dts/am335x-baltos-ir2110.dts b/arch/arm/boot/dts/am335x-baltos-ir2110.dts
new file mode 100644
index 0000000..a9a9730
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-baltos-ir2110.dts
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * VScom OnRISC
+ * http://www.vscom.de
+ */
+
+/dts-v1/;
+
+#include "am335x-baltos.dtsi"
+
+/ {
+	model = "OnRISC Baltos iR 2110";
+};
+
+&am33xx_pinmux {
+	uart1_pins: pinmux_uart1_pins {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x980, PIN_INPUT | MUX_MODE0)      /* uart1_rxd */
+			AM33XX_IOPAD(0x984, PIN_INPUT | MUX_MODE0)      /* uart1_txd */
+			AM33XX_IOPAD(0x978, PIN_INPUT_PULLDOWN | MUX_MODE0)      /* uart1_ctsn */
+			AM33XX_IOPAD(0x97c, PIN_OUTPUT_PULLDOWN | MUX_MODE0)      /* uart1_rtsn */
+			AM33XX_IOPAD(0x8e0, PIN_OUTPUT_PULLDOWN | MUX_MODE7)      /* lcd_vsync.gpio2[22] DTR */
+			AM33XX_IOPAD(0x8e4, PIN_INPUT_PULLDOWN | MUX_MODE7)      /* lcd_hsync.gpio2[23] DSR */
+			AM33XX_IOPAD(0x8e8, PIN_INPUT_PULLDOWN | MUX_MODE7)      /* lcd_pclk.gpio2[24] DCD */
+			AM33XX_IOPAD(0x8ec, PIN_INPUT_PULLDOWN | MUX_MODE7)      /* lcd_ac_bias_en.gpio2[25] RI */
+		>;
+	};
+};
+
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart1_pins>;
+	dtr-gpios = <&gpio2 22 GPIO_ACTIVE_LOW>;
+	dsr-gpios = <&gpio2 23 GPIO_ACTIVE_LOW>;
+	dcd-gpios = <&gpio2 24 GPIO_ACTIVE_LOW>;
+	rng-gpios = <&gpio2 25 GPIO_ACTIVE_LOW>;
+
+	status = "okay";
+};
+
+&usb0_phy {
+	status = "okay";
+};
+
+&usb0 {
+	status = "okay";
+	dr_mode = "host";
+};
+
+&cpsw_emac0 {
+	phy_id = <&davinci_mdio>, <1>;
+	phy-mode = "rmii";
+	dual_emac_res_vlan = <1>;
+};
+
+&cpsw_emac1 {
+	phy_id = <&davinci_mdio>, <7>;
+	phy-mode = "rgmii-txid";
+	dual_emac_res_vlan = <2>;
+};
+
+&phy_sel {
+	rmii-clock-ext = <1>;
+};
diff --git a/arch/arm/boot/dts/am335x-baltos-ir3220.dts b/arch/arm/boot/dts/am335x-baltos-ir3220.dts
new file mode 100644
index 0000000..fe002a1
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-baltos-ir3220.dts
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * VScom OnRISC
+ * http://www.vscom.de
+ */
+
+/dts-v1/;
+
+#include "am335x-baltos.dtsi"
+
+/ {
+	model = "OnRISC Baltos iR 3220";
+};
+
+&am33xx_pinmux {
+	tca6416_pins: pinmux_tca6416_pins {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x9b4, PIN_INPUT_PULLUP | MUX_MODE7)      /* xdma_event_intr1.gpio0[20] tca6416 stuff */
+		>;
+	};
+
+	uart1_pins: pinmux_uart1_pins {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x980, PIN_INPUT | MUX_MODE0)      /* uart1_rxd */
+			AM33XX_IOPAD(0x984, PIN_INPUT | MUX_MODE0)      /* uart1_txd */
+			AM33XX_IOPAD(0x978, PIN_INPUT_PULLDOWN | MUX_MODE0)      /* uart1_ctsn */
+			AM33XX_IOPAD(0x97c, PIN_OUTPUT_PULLDOWN | MUX_MODE0)      /* uart1_rtsn */
+			AM33XX_IOPAD(0x8e0, PIN_OUTPUT_PULLDOWN | MUX_MODE7)      /* lcd_vsync.gpio2[22] DTR */
+			AM33XX_IOPAD(0x8e4, PIN_INPUT_PULLDOWN | MUX_MODE7)      /* lcd_hsync.gpio2[23] DSR */
+			AM33XX_IOPAD(0x8e8, PIN_INPUT_PULLDOWN | MUX_MODE7)      /* lcd_pclk.gpio2[24] DCD */
+			AM33XX_IOPAD(0x8ec, PIN_INPUT_PULLDOWN | MUX_MODE7)      /* lcd_ac_bias_en.gpio2[25] RI */
+		>;
+	};
+
+	uart2_pins: pinmux_uart2_pins {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x950, PIN_INPUT | MUX_MODE1)      /* spi0_sclk.uart2_rxd_mux3 */
+			AM33XX_IOPAD(0x954, PIN_OUTPUT | MUX_MODE1)      /* spi0_d0.uart2_txd_mux3 */
+			AM33XX_IOPAD(0x988, PIN_INPUT_PULLDOWN | MUX_MODE2)      /* i2c0_sda.uart2_ctsn_mux0 */
+			AM33XX_IOPAD(0x98c, PIN_OUTPUT_PULLDOWN | MUX_MODE2)      /* i2c0_scl.uart2_rtsn_mux0 */
+			AM33XX_IOPAD(0x830, PIN_OUTPUT_PULLDOWN | MUX_MODE7)      /* gpmc_ad12.gpio1[12] DTR */
+			AM33XX_IOPAD(0x834, PIN_INPUT_PULLDOWN | MUX_MODE7)      /* gpmc_ad13.gpio1[13] DSR */
+			AM33XX_IOPAD(0x838, PIN_INPUT_PULLDOWN | MUX_MODE7)      /* gpmc_ad14.gpio1[14] DCD */
+			AM33XX_IOPAD(0x83c, PIN_INPUT_PULLDOWN | MUX_MODE7)     /* gpmc_ad15.gpio1[15] RI */
+
+			AM33XX_IOPAD(0x9a0, PIN_INPUT_PULLUP | MUX_MODE7)      /* mcasp0_aclkr.gpio3[18], INPUT_PULLDOWN | MODE7 */
+		>;
+	};
+};
+
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart1_pins>;
+	dtr-gpios = <&gpio2 22 GPIO_ACTIVE_LOW>;
+	dsr-gpios = <&gpio2 23 GPIO_ACTIVE_LOW>;
+	dcd-gpios = <&gpio2 24 GPIO_ACTIVE_LOW>;
+	rng-gpios = <&gpio2 25 GPIO_ACTIVE_LOW>;
+
+	status = "okay";
+};
+
+&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart2_pins>;
+	dtr-gpios = <&gpio1 12 GPIO_ACTIVE_LOW>;
+	dsr-gpios = <&gpio1 13 GPIO_ACTIVE_LOW>;
+	dcd-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
+	rng-gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
+
+	status = "okay";
+};
+
+&i2c1 {
+	tca6416: gpio@20 {
+		compatible = "ti,tca6416";
+		reg = <0x20>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-parent = <&gpio0>;
+		interrupts = <20 GPIO_ACTIVE_LOW>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&tca6416_pins>;
+	};
+};
+
+&usb0_phy {
+	status = "okay";
+};
+
+&usb0 {
+	status = "okay";
+	dr_mode = "host";
+};
+
+&cpsw_emac0 {
+	phy-mode = "rmii";
+	dual_emac_res_vlan = <1>;
+	fixed-link {
+		speed = <100>;
+		full-duplex;
+	};
+};
+
+&cpsw_emac1 {
+	phy_id = <&davinci_mdio>, <7>;
+	phy-mode = "rgmii-txid";
+	dual_emac_res_vlan = <2>;
+};
+
+&phy_sel {
+	rmii-clock-ext = <1>;
+};
diff --git a/arch/arm/boot/dts/am335x-baltos-ir5221.dts b/arch/arm/boot/dts/am335x-baltos-ir5221.dts
index 6c667fb..d0faa7b 100644
--- a/arch/arm/boot/dts/am335x-baltos-ir5221.dts
+++ b/arch/arm/boot/dts/am335x-baltos-ir5221.dts
@@ -13,83 +13,19 @@
 
 /dts-v1/;
 
-#include "am33xx.dtsi"
-#include <dt-bindings/pwm/pwm.h>
-#include <dt-bindings/interrupt-controller/irq.h>
+#include "am335x-baltos.dtsi"
 
 / {
 	model = "OnRISC Baltos iR 5221";
-	compatible = "vscom,onrisc", "ti,am33xx";
-
-	cpus {
-		cpu@0 {
-			cpu0-supply = <&vdd1_reg>;
-		};
-	};
-
-	memory {
-		device_type = "memory";
-		reg = <0x80000000 0x10000000>; /* 256 MB */
-	};
-
-	vbat: fixedregulator@0 {
-		compatible = "regulator-fixed";
-		regulator-name = "vbat";
-		regulator-min-microvolt = <5000000>;
-		regulator-max-microvolt = <5000000>;
-		regulator-boot-on;
-	};
-
-	wl12xx_vmmc: fixedregulator@2 {
-		pinctrl-names = "default";
-		pinctrl-0 = <&wl12xx_gpio>;
-		compatible = "regulator-fixed";
-		regulator-name = "vwl1271";
-		regulator-min-microvolt = <3300000>;
-		regulator-max-microvolt = <3300000>;
-		gpio = <&gpio3 8 0>;
-		startup-delay-us = <70000>;
-		enable-active-high;
-	};
 };
 
 &am33xx_pinmux {
-	mmc2_pins: pinmux_mmc2_pins {
-		pinctrl-single,pins = <
-			AM33XX_IOPAD(0x820, PIN_INPUT_PULLUP | MUX_MODE2)      /* gpmc_ad8.mmc1_dat0_mux0 */
-			AM33XX_IOPAD(0x824, PIN_INPUT_PULLUP | MUX_MODE2)      /* gpmc_ad9.mmc1_dat1_mux0 */
-			AM33XX_IOPAD(0x828, PIN_INPUT_PULLUP | MUX_MODE2)      /* gpmc_ad10.mmc1_dat2_mux0 */
-			AM33XX_IOPAD(0x82c, PIN_INPUT_PULLUP | MUX_MODE2)      /* gpmc_ad11.mmc1_dat3_mux0 */
-			AM33XX_IOPAD(0x880, PIN_INPUT_PULLUP | MUX_MODE2)      /* gpmc_csn1.mmc1_clk_mux0 */
-			AM33XX_IOPAD(0x884, PIN_INPUT_PULLUP | MUX_MODE2)      /* gpmc_csn2.mmc1_cmd_mux0 */
-			AM33XX_IOPAD(0x9e4, PIN_INPUT_PULLUP | MUX_MODE7)      /* emu0.gpio3[7] */
-		>;
-	};
-
-	wl12xx_gpio: pinmux_wl12xx_gpio {
-		pinctrl-single,pins = <
-			AM33XX_IOPAD(0x9e8, PIN_OUTPUT_PULLUP | MUX_MODE7)      /* emu1.gpio3[8] */
-		>;
-	};
-
-	tps65910_pins: pinmux_tps65910_pins {
-		pinctrl-single,pins = <
-			AM33XX_IOPAD(0x878, PIN_INPUT_PULLUP | MUX_MODE7)      /* gpmc_ben1.gpio1[28] */
-		>;
-	};
-
 	tca6416_pins: pinmux_tca6416_pins {
 		pinctrl-single,pins = <
 			AM33XX_IOPAD(0x9b4, PIN_INPUT_PULLUP | MUX_MODE7)      /* xdma_event_intr1.gpio0[20] tca6416 stuff */
 		>;
 	};
 
-	i2c1_pins: pinmux_i2c1_pins {
-		pinctrl-single,pins = <
-			AM33XX_IOPAD(0x958, PIN_INPUT | MUX_MODE2)      /* spi0_d1.i2c1_sda_mux3 */
-			AM33XX_IOPAD(0x95c, PIN_INPUT | MUX_MODE2)      /* spi0_cs0.i2c1_scl_mux3 */
-		>;
-	};
 
 	dcan1_pins: pinmux_dcan1_pins {
 		pinctrl-single,pins = <
@@ -98,19 +34,12 @@
 		>;
 	};
 
-	uart0_pins: pinmux_uart0_pins {
-		pinctrl-single,pins = <
-			AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0)	/* uart0_rxd.uart0_rxd */
-			AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0)		/* uart0_txd.uart0_txd */
-		>;
-	};
-
 	uart1_pins: pinmux_uart1_pins {
 		pinctrl-single,pins = <
 			AM33XX_IOPAD(0x980, PIN_INPUT | MUX_MODE0)      /* uart1_rxd */
 			AM33XX_IOPAD(0x984, PIN_INPUT | MUX_MODE0)      /* uart1_txd */
-			AM33XX_IOPAD(0x978, PIN_INPUT_PULLDOWN | MUX_MODE7)      /* uart1_ctsn, INPUT | MODE0 */
-			AM33XX_IOPAD(0x97c, PIN_OUTPUT_PULLDOWN | MUX_MODE7)      /* uart1_rtsn, OUTPUT | MODE0 */
+			AM33XX_IOPAD(0x978, PIN_INPUT_PULLDOWN | MUX_MODE0)      /* uart1_ctsn */
+			AM33XX_IOPAD(0x97c, PIN_OUTPUT_PULLDOWN | MUX_MODE0)      /* uart1_rtsn */
 			AM33XX_IOPAD(0x8e0, PIN_OUTPUT_PULLDOWN | MUX_MODE7)      /* lcd_vsync.gpio2[22] DTR */
 			AM33XX_IOPAD(0x8e4, PIN_INPUT_PULLDOWN | MUX_MODE7)      /* lcd_hsync.gpio2[23] DSR */
 			AM33XX_IOPAD(0x8e8, PIN_INPUT_PULLDOWN | MUX_MODE7)      /* lcd_pclk.gpio2[24] DCD */
@@ -122,8 +51,8 @@
 		pinctrl-single,pins = <
 			AM33XX_IOPAD(0x950, PIN_INPUT | MUX_MODE1)      /* spi0_sclk.uart2_rxd_mux3 */
 			AM33XX_IOPAD(0x954, PIN_OUTPUT | MUX_MODE1)      /* spi0_d0.uart2_txd_mux3 */
-			AM33XX_IOPAD(0x988, PIN_INPUT_PULLDOWN | MUX_MODE7)      /* i2c0_sda.uart2_ctsn_mux0 */
-			AM33XX_IOPAD(0x98c, PIN_OUTPUT_PULLDOWN | MUX_MODE7)      /* i2c0_scl.uart2_rtsn_mux0 */
+			AM33XX_IOPAD(0x988, PIN_INPUT_PULLDOWN | MUX_MODE2)      /* i2c0_sda.uart2_ctsn_mux0 */
+			AM33XX_IOPAD(0x98c, PIN_OUTPUT_PULLDOWN | MUX_MODE2)      /* i2c0_scl.uart2_rtsn_mux0 */
 			AM33XX_IOPAD(0x830, PIN_OUTPUT_PULLDOWN | MUX_MODE7)      /* gpmc_ad12.gpio1[12] DTR */
 			AM33XX_IOPAD(0x834, PIN_INPUT_PULLDOWN | MUX_MODE7)      /* gpmc_ad13.gpio1[13] DSR */
 			AM33XX_IOPAD(0x838, PIN_INPUT_PULLDOWN | MUX_MODE7)      /* gpmc_ad14.gpio1[14] DCD */
@@ -133,151 +62,6 @@
 		>;
 	};
 
-	cpsw_default: cpsw_default {
-		pinctrl-single,pins = <
-			/* Slave 1 */
-			AM33XX_IOPAD(0x90c, PIN_INPUT_PULLDOWN | MUX_MODE1)       /* mii1_crs.rmii1_crs_dv */
-			AM33XX_IOPAD(0x914, PIN_OUTPUT_PULLDOWN | MUX_MODE1)      /* mii1_tx_en.rmii1_txen */
-			AM33XX_IOPAD(0x924, PIN_OUTPUT_PULLDOWN | MUX_MODE1)      /* mii1_txd1.rmii1_txd1 */
-			AM33XX_IOPAD(0x928, PIN_OUTPUT_PULLDOWN | MUX_MODE1)      /* mii1_txd0.rmii1_txd0 */
-			AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE1)      /* mii1_rxd1.rmii1_rxd1 */
-			AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE1)      /* mii1_rxd0.rmii1_rxd0 */
-			AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE0)      /* rmii1_ref_clk.rmii1_refclk */
-
-
-			/* Slave 2 */
-			AM33XX_IOPAD(0x840, PIN_OUTPUT_PULLDOWN | MUX_MODE2)	/* gpmc_a0.rgmii2_tctl */
-			AM33XX_IOPAD(0x844, PIN_INPUT_PULLDOWN | MUX_MODE2)	/* gpmc_a1.rgmii2_rctl */
-			AM33XX_IOPAD(0x848, PIN_OUTPUT_PULLDOWN | MUX_MODE2)	/* gpmc_a2.rgmii2_td3 */
-			AM33XX_IOPAD(0x84c, PIN_OUTPUT_PULLDOWN | MUX_MODE2)	/* gpmc_a3.rgmii2_td2 */
-			AM33XX_IOPAD(0x850, PIN_OUTPUT_PULLDOWN | MUX_MODE2)	/* gpmc_a4.rgmii2_td1 */
-			AM33XX_IOPAD(0x854, PIN_OUTPUT_PULLDOWN | MUX_MODE2)	/* gpmc_a5.rgmii2_td0 */
-			AM33XX_IOPAD(0x858, PIN_OUTPUT_PULLDOWN | MUX_MODE2)	/* gpmc_a6.rgmii2_tclk */
-			AM33XX_IOPAD(0x85c, PIN_INPUT_PULLDOWN | MUX_MODE2)	/* gpmc_a7.rgmii2_rclk */
-			AM33XX_IOPAD(0x860, PIN_INPUT_PULLDOWN | MUX_MODE2)	/* gpmc_a8.rgmii2_rd3 */
-			AM33XX_IOPAD(0x864, PIN_INPUT_PULLDOWN | MUX_MODE2)	/* gpmc_a9.rgmii2_rd2 */
-			AM33XX_IOPAD(0x868, PIN_INPUT_PULLDOWN | MUX_MODE2)	/* gpmc_a10.rgmii2_rd1 */
-			AM33XX_IOPAD(0x86c, PIN_INPUT_PULLDOWN | MUX_MODE2)	/* gpmc_a11.rgmii2_rd0 */
-		>;
-	};
-
-	cpsw_sleep: cpsw_sleep {
-		pinctrl-single,pins = <
-			/* Slave 1 reset value */
-			AM33XX_IOPAD(0x90c, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x914, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x924, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x928, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE7)
-
-			/* Slave 2 reset value*/
-			AM33XX_IOPAD(0x840, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x844, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x848, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x84c, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x850, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x854, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x858, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x85c, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x860, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x864, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x868, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x86c, PIN_INPUT_PULLDOWN | MUX_MODE7)
-		>;
-	};
-
-	davinci_mdio_default: davinci_mdio_default {
-		pinctrl-single,pins = <
-			/* MDIO */
-			AM33XX_IOPAD(0x948, PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0)	/* mdio_data.mdio_data */
-			AM33XX_IOPAD(0x94c, PIN_OUTPUT_PULLUP | MUX_MODE0)			/* mdio_clk.mdio_clk */
-		>;
-	};
-
-	davinci_mdio_sleep: davinci_mdio_sleep {
-		pinctrl-single,pins = <
-			/* MDIO reset value */
-			AM33XX_IOPAD(0x948, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x94c, PIN_INPUT_PULLDOWN | MUX_MODE7)
-		>;
-	};
-
-	nandflash_pins_s0: nandflash_pins_s0 {
-		pinctrl-single,pins = <
-			AM33XX_IOPAD(0x800, PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_ad0.gpmc_ad0 */
-			AM33XX_IOPAD(0x804, PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_ad1.gpmc_ad1 */
-			AM33XX_IOPAD(0x808, PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_ad2.gpmc_ad2 */
-			AM33XX_IOPAD(0x80c, PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_ad3.gpmc_ad3 */
-			AM33XX_IOPAD(0x810, PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_ad4.gpmc_ad4 */
-			AM33XX_IOPAD(0x814, PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_ad5.gpmc_ad5 */
-			AM33XX_IOPAD(0x818, PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_ad6.gpmc_ad6 */
-			AM33XX_IOPAD(0x81c, PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_ad7.gpmc_ad7 */
-			AM33XX_IOPAD(0x870, PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_wait0.gpmc_wait0 */
-			AM33XX_IOPAD(0x874, PIN_INPUT_PULLUP | MUX_MODE7)	/* gpmc_wpn.gpio0_30 */
-			AM33XX_IOPAD(0x87c, PIN_OUTPUT | MUX_MODE0)		/* gpmc_csn0.gpmc_csn0  */
-			AM33XX_IOPAD(0x890, PIN_OUTPUT | MUX_MODE0)		/* gpmc_advn_ale.gpmc_advn_ale */
-			AM33XX_IOPAD(0x894, PIN_OUTPUT | MUX_MODE0)		/* gpmc_oen_ren.gpmc_oen_ren */
-			AM33XX_IOPAD(0x898, PIN_OUTPUT | MUX_MODE0)		/* gpmc_wen.gpmc_wen */
-			AM33XX_IOPAD(0x89c, PIN_OUTPUT | MUX_MODE0)		/* gpmc_be0n_cle.gpmc_be0n_cle */
-		>;
-	};
-};
-
-&elm {
-	status = "okay";
-};
-
-&gpmc {
-	pinctrl-names = "default";
-	pinctrl-0 = <&nandflash_pins_s0>;
-	ranges = <0 0 0x08000000 0x10000000>;	/* CS0: NAND */
-	status = "okay";
-
-	nand@0,0 {
-		compatible = "ti,omap2-nand";
-		reg = <0 0 4>; /* CS0, offset 0, IO size 4 */
-		interrupt-parent = <&gpmc>;
-		interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
-			     <1 IRQ_TYPE_NONE>;	/* termcount */
-		nand-bus-width = <8>;
-		ti,nand-ecc-opt = "bch8";
-		ti,nand-xfer-type = "polled";
-
-		gpmc,device-nand = "true";
-		gpmc,device-width = <1>;
-		gpmc,sync-clk-ps = <0>;
-		gpmc,cs-on-ns = <0>;
-		gpmc,cs-rd-off-ns = <44>;
-		gpmc,cs-wr-off-ns = <44>;
-		gpmc,adv-on-ns = <6>;
-		gpmc,adv-rd-off-ns = <34>;
-		gpmc,adv-wr-off-ns = <44>;
-		gpmc,we-on-ns = <0>;
-		gpmc,we-off-ns = <40>;
-		gpmc,oe-on-ns = <0>;
-		gpmc,oe-off-ns = <54>;
-		gpmc,access-ns = <64>;
-		gpmc,rd-cycle-ns = <82>;
-		gpmc,wr-cycle-ns = <82>;
-		gpmc,bus-turnaround-ns = <0>;
-		gpmc,cycle2cycle-delay-ns = <0>;
-		gpmc,clk-activation-ns = <0>;
-		gpmc,wr-access-ns = <40>;
-		gpmc,wr-data-mux-bus-ns = <0>;
-
-		#address-cells = <1>;
-		#size-cells = <1>;
-		elm_id = <&elm>;
-	};
-};
-
-&uart0 {
-	pinctrl-names = "default";
-	pinctrl-0 = <&uart0_pins>;
-
-	status = "okay";
 };
 
 &uart1 {
@@ -287,8 +71,6 @@
 	dsr-gpios = <&gpio2 23 GPIO_ACTIVE_LOW>;
 	dcd-gpios = <&gpio2 24 GPIO_ACTIVE_LOW>;
 	rng-gpios = <&gpio2 25 GPIO_ACTIVE_LOW>;
-	cts-gpios = <&gpio0 12 GPIO_ACTIVE_LOW>;
-	rts-gpios = <&gpio0 13 GPIO_ACTIVE_LOW>;
 
 	status = "okay";
 };
@@ -300,35 +82,11 @@
 	dsr-gpios = <&gpio1 13 GPIO_ACTIVE_LOW>;
 	dcd-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
 	rng-gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
-	cts-gpios = <&gpio3 5 GPIO_ACTIVE_LOW>;
-	rts-gpios = <&gpio3 6 GPIO_ACTIVE_LOW>;
 
 	status = "okay";
 };
 
 &i2c1 {
-	pinctrl-names = "default";
-	pinctrl-0 = <&i2c1_pins>;
-
-	status = "okay";
-	clock-frequency = <400000>;
-
-	tps: tps@2d {
-		reg = <0x2d>;
-		gpio-controller;
-		#gpio-cells = <2>;
-		interrupt-parent = <&gpio1>;
-		interrupts = <28 GPIO_ACTIVE_LOW>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&tps65910_pins>;
-	};
-
-	at24@50 {
-		compatible = "at24,24c02";
-		pagesize = <8>;
-		reg = <0x50>;
-	};
-
 	tca6416: gpio@20 {
 		compatible = "ti,tca6416";
 		reg = <0x20>;
@@ -341,14 +99,6 @@
 	};
 };
 
-&usb {
-	status = "okay";
-};
-
-&usb_ctrl_mod {
-	status = "okay";
-};
-
 &usb0_phy {
 	status = "okay";
 };
@@ -367,112 +117,13 @@
 	dr_mode = "otg";
 };
 
-&cppi41dma  {
-	status = "okay";
-};
-
-#include "tps65910.dtsi"
-
-&tps {
-	vcc1-supply = <&vbat>;
-	vcc2-supply = <&vbat>;
-	vcc3-supply = <&vbat>;
-	vcc4-supply = <&vbat>;
-	vcc5-supply = <&vbat>;
-	vcc6-supply = <&vbat>;
-	vcc7-supply = <&vbat>;
-	vccio-supply = <&vbat>;
-
-	ti,en-ck32k-xtal = <1>;
-
-	regulators {
-		vrtc_reg: regulator@0 {
-			regulator-always-on;
-		};
-
-		vio_reg: regulator@1 {
-			regulator-always-on;
-		};
-
-		vdd1_reg: regulator@2 {
-			/* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
-			regulator-name = "vdd_mpu";
-			regulator-min-microvolt = <912500>;
-			regulator-max-microvolt = <1312500>;
-			regulator-boot-on;
-			regulator-always-on;
-		};
-
-		vdd2_reg: regulator@3 {
-			/* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
-			regulator-name = "vdd_core";
-			regulator-min-microvolt = <912500>;
-			regulator-max-microvolt = <1150000>;
-			regulator-boot-on;
-			regulator-always-on;
-		};
-
-		vdd3_reg: regulator@4 {
-			regulator-always-on;
-		};
-
-		vdig1_reg: regulator@5 {
-			regulator-always-on;
-		};
-
-		vdig2_reg: regulator@6 {
-			regulator-always-on;
-		};
-
-		vpll_reg: regulator@7 {
-			regulator-always-on;
-		};
-
-		vdac_reg: regulator@8 {
-			regulator-always-on;
-		};
-
-		vaux1_reg: regulator@9 {
-			regulator-always-on;
-		};
-
-		vaux2_reg: regulator@10 {
-			regulator-always-on;
-		};
-
-		vaux33_reg: regulator@11 {
-			regulator-always-on;
-		};
-
-		vmmc_reg: regulator@12 {
-			regulator-min-microvolt = <1800000>;
-			regulator-max-microvolt = <3300000>;
-			regulator-always-on;
-		};
-	};
-};
-
-&mac {
-	pinctrl-names = "default", "sleep";
-	pinctrl-0 = <&cpsw_default>;
-	pinctrl-1 = <&cpsw_sleep>;
-	dual_emac = <1>;
-
-	status = "okay";
-};
-
-&davinci_mdio {
-	pinctrl-names = "default", "sleep";
-	pinctrl-0 = <&davinci_mdio_default>;
-	pinctrl-1 = <&davinci_mdio_sleep>;
-
-	status = "okay";
-};
-
 &cpsw_emac0 {
-	phy_id = <&davinci_mdio>, <0>;
 	phy-mode = "rmii";
 	dual_emac_res_vlan = <1>;
+	fixed-link {
+		speed = <100>;
+		full-duplex;
+	};
 };
 
 &cpsw_emac1 {
@@ -485,42 +136,6 @@
 	rmii-clock-ext = <1>;
 };
 
-&mmc1 {
-	vmmc-supply = <&vmmc_reg>;
-	status = "okay";
-};
-
-&mmc2 {
-	status = "okay";
-	vmmc-supply = <&wl12xx_vmmc>;
-	ti,non-removable;
-	bus-width = <4>;
-	cap-power-off-card;
-	pinctrl-names = "default";
-	pinctrl-0 = <&mmc2_pins>;
-
-	#address-cells = <1>;
-	#size-cells = <0>;
-	wlcore: wlcore@2 {
-		compatible = "ti,wl1835";
-		reg = <2>;
-		interrupt-parent = <&gpio3>;
-		interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
-	};
-};
-
-&sham {
-	status = "okay";
-};
-
-&aes {
-	status = "okay";
-};
-
-&gpio0 {
-	ti,no-reset-on-init;
-};
-
 &dcan1 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&dcan1_pins>;
diff --git a/arch/arm/boot/dts/am335x-baltos.dtsi b/arch/arm/boot/dts/am335x-baltos.dtsi
new file mode 100644
index 0000000..c8609d8
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-baltos.dtsi
@@ -0,0 +1,408 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * VScom OnRISC
+ * http://www.vscom.de
+ */
+
+#include "am33xx.dtsi"
+#include <dt-bindings/pwm/pwm.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+	compatible = "vscom,onrisc", "ti,am33xx";
+
+	cpus {
+		cpu@0 {
+			cpu0-supply = <&vdd1_reg>;
+		};
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x80000000 0x10000000>; /* 256 MB */
+	};
+
+	vbat: fixedregulator@0 {
+		compatible = "regulator-fixed";
+		regulator-name = "vbat";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-boot-on;
+	};
+
+	wl12xx_vmmc: fixedregulator@2 {
+		pinctrl-names = "default";
+		pinctrl-0 = <&wl12xx_gpio>;
+		compatible = "regulator-fixed";
+		regulator-name = "vwl1271";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		gpio = <&gpio3 8 0>;
+		startup-delay-us = <70000>;
+		enable-active-high;
+	};
+};
+
+&am33xx_pinmux {
+	mmc2_pins: pinmux_mmc2_pins {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x820, PIN_INPUT_PULLUP | MUX_MODE2)      /* gpmc_ad8.mmc1_dat0_mux0 */
+			AM33XX_IOPAD(0x824, PIN_INPUT_PULLUP | MUX_MODE2)      /* gpmc_ad9.mmc1_dat1_mux0 */
+			AM33XX_IOPAD(0x828, PIN_INPUT_PULLUP | MUX_MODE2)      /* gpmc_ad10.mmc1_dat2_mux0 */
+			AM33XX_IOPAD(0x82c, PIN_INPUT_PULLUP | MUX_MODE2)      /* gpmc_ad11.mmc1_dat3_mux0 */
+			AM33XX_IOPAD(0x880, PIN_INPUT_PULLUP | MUX_MODE2)      /* gpmc_csn1.mmc1_clk_mux0 */
+			AM33XX_IOPAD(0x884, PIN_INPUT_PULLUP | MUX_MODE2)      /* gpmc_csn2.mmc1_cmd_mux0 */
+			AM33XX_IOPAD(0x9e4, PIN_INPUT_PULLUP | MUX_MODE7)      /* emu0.gpio3[7] */
+		>;
+	};
+
+	wl12xx_gpio: pinmux_wl12xx_gpio {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x9e8, PIN_OUTPUT_PULLUP | MUX_MODE7)      /* emu1.gpio3[8] */
+		>;
+	};
+
+	tps65910_pins: pinmux_tps65910_pins {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x878, PIN_INPUT_PULLUP | MUX_MODE7)      /* gpmc_ben1.gpio1[28] */
+		>;
+	};
+
+	i2c1_pins: pinmux_i2c1_pins {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x958, PIN_INPUT | MUX_MODE2)      /* spi0_d1.i2c1_sda_mux3 */
+			AM33XX_IOPAD(0x95c, PIN_INPUT | MUX_MODE2)      /* spi0_cs0.i2c1_scl_mux3 */
+		>;
+	};
+
+	uart0_pins: pinmux_uart0_pins {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0)	/* uart0_rxd.uart0_rxd */
+			AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0)		/* uart0_txd.uart0_txd */
+		>;
+	};
+
+	cpsw_default: cpsw_default {
+		pinctrl-single,pins = <
+			/* Slave 1 */
+			AM33XX_IOPAD(0x90c, PIN_INPUT_PULLDOWN | MUX_MODE1)       /* mii1_crs.rmii1_crs_dv */
+			AM33XX_IOPAD(0x914, PIN_OUTPUT_PULLDOWN | MUX_MODE1)      /* mii1_tx_en.rmii1_txen */
+			AM33XX_IOPAD(0x924, PIN_OUTPUT_PULLDOWN | MUX_MODE1)      /* mii1_txd1.rmii1_txd1 */
+			AM33XX_IOPAD(0x928, PIN_OUTPUT_PULLDOWN | MUX_MODE1)      /* mii1_txd0.rmii1_txd0 */
+			AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE1)      /* mii1_rxd1.rmii1_rxd1 */
+			AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE1)      /* mii1_rxd0.rmii1_rxd0 */
+			AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE0)      /* rmii1_ref_clk.rmii1_refclk */
+
+
+			/* Slave 2 */
+			AM33XX_IOPAD(0x840, PIN_OUTPUT_PULLDOWN | MUX_MODE2)	/* gpmc_a0.rgmii2_tctl */
+			AM33XX_IOPAD(0x844, PIN_INPUT_PULLDOWN | MUX_MODE2)	/* gpmc_a1.rgmii2_rctl */
+			AM33XX_IOPAD(0x848, PIN_OUTPUT_PULLDOWN | MUX_MODE2)	/* gpmc_a2.rgmii2_td3 */
+			AM33XX_IOPAD(0x84c, PIN_OUTPUT_PULLDOWN | MUX_MODE2)	/* gpmc_a3.rgmii2_td2 */
+			AM33XX_IOPAD(0x850, PIN_OUTPUT_PULLDOWN | MUX_MODE2)	/* gpmc_a4.rgmii2_td1 */
+			AM33XX_IOPAD(0x854, PIN_OUTPUT_PULLDOWN | MUX_MODE2)	/* gpmc_a5.rgmii2_td0 */
+			AM33XX_IOPAD(0x858, PIN_OUTPUT_PULLDOWN | MUX_MODE2)	/* gpmc_a6.rgmii2_tclk */
+			AM33XX_IOPAD(0x85c, PIN_INPUT_PULLDOWN | MUX_MODE2)	/* gpmc_a7.rgmii2_rclk */
+			AM33XX_IOPAD(0x860, PIN_INPUT_PULLDOWN | MUX_MODE2)	/* gpmc_a8.rgmii2_rd3 */
+			AM33XX_IOPAD(0x864, PIN_INPUT_PULLDOWN | MUX_MODE2)	/* gpmc_a9.rgmii2_rd2 */
+			AM33XX_IOPAD(0x868, PIN_INPUT_PULLDOWN | MUX_MODE2)	/* gpmc_a10.rgmii2_rd1 */
+			AM33XX_IOPAD(0x86c, PIN_INPUT_PULLDOWN | MUX_MODE2)	/* gpmc_a11.rgmii2_rd0 */
+		>;
+	};
+
+	cpsw_sleep: cpsw_sleep {
+		pinctrl-single,pins = <
+			/* Slave 1 reset value */
+			AM33XX_IOPAD(0x90c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x914, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x924, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x928, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE7)
+
+			/* Slave 2 reset value*/
+			AM33XX_IOPAD(0x840, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x844, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x848, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x84c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x850, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x854, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x858, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x85c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x860, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x864, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x868, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x86c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+		>;
+	};
+
+	davinci_mdio_default: davinci_mdio_default {
+		pinctrl-single,pins = <
+			/* MDIO */
+			AM33XX_IOPAD(0x948, PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0)	/* mdio_data.mdio_data */
+			AM33XX_IOPAD(0x94c, PIN_OUTPUT_PULLUP | MUX_MODE0)			/* mdio_clk.mdio_clk */
+		>;
+	};
+
+	davinci_mdio_sleep: davinci_mdio_sleep {
+		pinctrl-single,pins = <
+			/* MDIO reset value */
+			AM33XX_IOPAD(0x948, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x94c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+		>;
+	};
+
+	nandflash_pins_s0: nandflash_pins_s0 {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x800, PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_ad0.gpmc_ad0 */
+			AM33XX_IOPAD(0x804, PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_ad1.gpmc_ad1 */
+			AM33XX_IOPAD(0x808, PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_ad2.gpmc_ad2 */
+			AM33XX_IOPAD(0x80c, PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_ad3.gpmc_ad3 */
+			AM33XX_IOPAD(0x810, PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_ad4.gpmc_ad4 */
+			AM33XX_IOPAD(0x814, PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_ad5.gpmc_ad5 */
+			AM33XX_IOPAD(0x818, PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_ad6.gpmc_ad6 */
+			AM33XX_IOPAD(0x81c, PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_ad7.gpmc_ad7 */
+			AM33XX_IOPAD(0x870, PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_wait0.gpmc_wait0 */
+			AM33XX_IOPAD(0x874, PIN_INPUT_PULLUP | MUX_MODE7)	/* gpmc_wpn.gpio0_30 */
+			AM33XX_IOPAD(0x87c, PIN_OUTPUT | MUX_MODE0)		/* gpmc_csn0.gpmc_csn0  */
+			AM33XX_IOPAD(0x890, PIN_OUTPUT | MUX_MODE0)		/* gpmc_advn_ale.gpmc_advn_ale */
+			AM33XX_IOPAD(0x894, PIN_OUTPUT | MUX_MODE0)		/* gpmc_oen_ren.gpmc_oen_ren */
+			AM33XX_IOPAD(0x898, PIN_OUTPUT | MUX_MODE0)		/* gpmc_wen.gpmc_wen */
+			AM33XX_IOPAD(0x89c, PIN_OUTPUT | MUX_MODE0)		/* gpmc_be0n_cle.gpmc_be0n_cle */
+		>;
+	};
+};
+
+&elm {
+	status = "okay";
+};
+
+&gpmc {
+	pinctrl-names = "default";
+	pinctrl-0 = <&nandflash_pins_s0>;
+	ranges = <0 0 0x08000000 0x10000000>;	/* CS0: NAND */
+	status = "okay";
+
+	nand@0,0 {
+		compatible = "ti,omap2-nand";
+		reg = <0 0 4>; /* CS0, offset 0, IO size 4 */
+		interrupt-parent = <&gpmc>;
+		interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
+			     <1 IRQ_TYPE_NONE>;	/* termcount */
+		rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */
+		nand-bus-width = <8>;
+		ti,nand-ecc-opt = "bch8";
+		ti,nand-xfer-type = "polled";
+
+		gpmc,device-nand = "true";
+		gpmc,device-width = <1>;
+		gpmc,sync-clk-ps = <0>;
+		gpmc,cs-on-ns = <0>;
+		gpmc,cs-rd-off-ns = <44>;
+		gpmc,cs-wr-off-ns = <44>;
+		gpmc,adv-on-ns = <6>;
+		gpmc,adv-rd-off-ns = <34>;
+		gpmc,adv-wr-off-ns = <44>;
+		gpmc,we-on-ns = <0>;
+		gpmc,we-off-ns = <40>;
+		gpmc,oe-on-ns = <0>;
+		gpmc,oe-off-ns = <54>;
+		gpmc,access-ns = <64>;
+		gpmc,rd-cycle-ns = <82>;
+		gpmc,wr-cycle-ns = <82>;
+		gpmc,bus-turnaround-ns = <0>;
+		gpmc,cycle2cycle-delay-ns = <0>;
+		gpmc,clk-activation-ns = <0>;
+		gpmc,wr-access-ns = <40>;
+		gpmc,wr-data-mux-bus-ns = <0>;
+
+		#address-cells = <1>;
+		#size-cells = <1>;
+		elm_id = <&elm>;
+	};
+};
+
+&uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_pins>;
+
+	status = "okay";
+};
+
+&i2c1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c1_pins>;
+
+	status = "okay";
+	clock-frequency = <400000>;
+
+	tps: tps@2d {
+		reg = <0x2d>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-parent = <&gpio1>;
+		interrupts = <28 GPIO_ACTIVE_LOW>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&tps65910_pins>;
+	};
+
+	at24@50 {
+		compatible = "at24,24c02";
+		pagesize = <8>;
+		reg = <0x50>;
+	};
+};
+
+&usb {
+	status = "okay";
+};
+
+&usb_ctrl_mod {
+	status = "okay";
+};
+
+&cppi41dma  {
+	status = "okay";
+};
+
+#include "tps65910.dtsi"
+
+&tps {
+	vcc1-supply = <&vbat>;
+	vcc2-supply = <&vbat>;
+	vcc3-supply = <&vbat>;
+	vcc4-supply = <&vbat>;
+	vcc5-supply = <&vbat>;
+	vcc6-supply = <&vbat>;
+	vcc7-supply = <&vbat>;
+	vccio-supply = <&vbat>;
+
+	ti,en-ck32k-xtal = <1>;
+
+	regulators {
+		vrtc_reg: regulator@0 {
+			regulator-always-on;
+		};
+
+		vio_reg: regulator@1 {
+			regulator-always-on;
+		};
+
+		vdd1_reg: regulator@2 {
+			/* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
+			regulator-name = "vdd_mpu";
+			regulator-min-microvolt = <912500>;
+			regulator-max-microvolt = <1312500>;
+			regulator-boot-on;
+			regulator-always-on;
+		};
+
+		vdd2_reg: regulator@3 {
+			/* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
+			regulator-name = "vdd_core";
+			regulator-min-microvolt = <912500>;
+			regulator-max-microvolt = <1150000>;
+			regulator-boot-on;
+			regulator-always-on;
+		};
+
+		vdd3_reg: regulator@4 {
+			regulator-always-on;
+		};
+
+		vdig1_reg: regulator@5 {
+			regulator-always-on;
+		};
+
+		vdig2_reg: regulator@6 {
+			regulator-always-on;
+		};
+
+		vpll_reg: regulator@7 {
+			regulator-always-on;
+		};
+
+		vdac_reg: regulator@8 {
+			regulator-always-on;
+		};
+
+		vaux1_reg: regulator@9 {
+			regulator-always-on;
+		};
+
+		vaux2_reg: regulator@10 {
+			regulator-always-on;
+		};
+
+		vaux33_reg: regulator@11 {
+			regulator-always-on;
+		};
+
+		vmmc_reg: regulator@12 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <3300000>;
+			regulator-always-on;
+		};
+	};
+};
+
+&mac {
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&cpsw_default>;
+	pinctrl-1 = <&cpsw_sleep>;
+	dual_emac = <1>;
+
+	status = "okay";
+};
+
+&davinci_mdio {
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&davinci_mdio_default>;
+	pinctrl-1 = <&davinci_mdio_sleep>;
+
+	status = "okay";
+};
+
+&mmc1 {
+	vmmc-supply = <&vmmc_reg>;
+	status = "okay";
+};
+
+&mmc2 {
+	status = "okay";
+	vmmc-supply = <&wl12xx_vmmc>;
+	ti,non-removable;
+	bus-width = <4>;
+	cap-power-off-card;
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc2_pins>;
+
+	#address-cells = <1>;
+	#size-cells = <0>;
+	wlcore: wlcore@2 {
+		compatible = "ti,wl1835";
+		reg = <2>;
+		interrupt-parent = <&gpio3>;
+		interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
+	};
+};
+
+&sham {
+	status = "okay";
+};
+
+&aes {
+	status = "okay";
+};
+
+&gpio0 {
+	ti,no-reset-on-init;
+};
diff --git a/arch/arm/boot/dts/am335x-chiliboard.dts b/arch/arm/boot/dts/am335x-chiliboard.dts
index 15d47ab..2a624b3 100644
--- a/arch/arm/boot/dts/am335x-chiliboard.dts
+++ b/arch/arm/boot/dts/am335x-chiliboard.dts
@@ -35,6 +35,59 @@
 };
 
 &am33xx_pinmux {
+	uart0_pins: pinmux_uart0_pins {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0)	/* uart0_rxd.uart0_rxd */
+			AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0)	/* uart0_txd.uart0_txd */
+		>;
+	};
+
+	cpsw_default: cpsw_default {
+		pinctrl-single,pins = <
+			/* Slave 1 */
+			AM33XX_IOPAD(0x90c, PIN_INPUT_PULLDOWN | MUX_MODE1)  /* mii1_crs.rmii1_crs */
+			AM33XX_IOPAD(0x910, PIN_INPUT_PULLUP | MUX_MODE1)	/* mii1_rxerr.rmii1_rxerr */
+			AM33XX_IOPAD(0x914, PIN_OUTPUT_PULLDOWN | MUX_MODE1)	/* mii1_txen.rmii1_txen */
+			AM33XX_IOPAD(0x924, PIN_OUTPUT_PULLDOWN | MUX_MODE1)	/* mii1_txd1.rmii1_txd1 */
+			AM33XX_IOPAD(0x928, PIN_OUTPUT_PULLDOWN | MUX_MODE1)	/* mii1_txd0.rmii1_txd0 */
+			AM33XX_IOPAD(0x93c, PIN_INPUT_PULLUP | MUX_MODE1)	/* mii1_rxd1.rmii1_rxd1 */
+			AM33XX_IOPAD(0x940, PIN_INPUT_PULLUP | MUX_MODE1)	/* mii1_rxd0.rmii1_rxd0 */
+			AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE0)	/* rmii1_ref_clk.rmii_ref_clk */
+		>;
+	};
+
+	cpsw_sleep: cpsw_sleep {
+		pinctrl-single,pins = <
+			/* Slave 1 reset value */
+			AM33XX_IOPAD(0x90c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x910, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x914, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x918, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x924, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x928, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE7)
+		>;
+	};
+
+	davinci_mdio_default: davinci_mdio_default {
+		pinctrl-single,pins = <
+			/* mdio_data.mdio_data */
+			AM33XX_IOPAD(0x948, PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0)
+			/* mdio_clk.mdio_clk */
+			AM33XX_IOPAD(0x94c, PIN_OUTPUT_PULLUP | MUX_MODE0)
+		>;
+	};
+
+	davinci_mdio_sleep: davinci_mdio_sleep {
+		pinctrl-single,pins = <
+			/* MDIO reset value */
+			AM33XX_IOPAD(0x948, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM33XX_IOPAD(0x94c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+		>;
+	};
+
 	usb1_drvvbus: usb1_drvvbus {
 		pinctrl-single,pins = <
 			AM33XX_IOPAD(0xa34, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* usb1_drvvbus.usb1_drvvbus */
@@ -61,12 +114,34 @@
 	};
 };
 
+&uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_pins>;
+
+	status = "okay";
+};
+
 &ldo4_reg {
 	regulator-min-microvolt = <3300000>;
 	regulator-max-microvolt = <3300000>;
 };
 
 /* Ethernet */
+&mac {
+	slaves = <1>;
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&cpsw_default>;
+	pinctrl-1 = <&cpsw_sleep>;
+	status = "okay";
+};
+
+&davinci_mdio {
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&davinci_mdio_default>;
+	pinctrl-1 = <&davinci_mdio_sleep>;
+	status = "okay";
+};
+
 &cpsw_emac0 {
 	phy_id = <&davinci_mdio>, <0>;
 	phy-mode = "rmii";
diff --git a/arch/arm/boot/dts/am335x-chilisom.dtsi b/arch/arm/boot/dts/am335x-chilisom.dtsi
index 95461a2..1d64735 100644
--- a/arch/arm/boot/dts/am335x-chilisom.dtsi
+++ b/arch/arm/boot/dts/am335x-chilisom.dtsi
@@ -35,59 +35,6 @@
 		>;
 	};
 
-	uart0_pins: pinmux_uart0_pins {
-		pinctrl-single,pins = <
-			AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0)	/* uart0_rxd.uart0_rxd */
-			AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0)	/* uart0_txd.uart0_txd */
-		>;
-	};
-
-	cpsw_default: cpsw_default {
-		pinctrl-single,pins = <
-			/* Slave 1 */
-			AM33XX_IOPAD(0x90c, PIN_INPUT_PULLDOWN | MUX_MODE1)  /* mii1_crs.rmii1_crs */
-			AM33XX_IOPAD(0x910, PIN_INPUT_PULLUP | MUX_MODE1)	/* mii1_rxerr.rmii1_rxerr */
-			AM33XX_IOPAD(0x914, PIN_OUTPUT_PULLDOWN | MUX_MODE1)	/* mii1_txen.rmii1_txen */
-			AM33XX_IOPAD(0x924, PIN_OUTPUT_PULLDOWN | MUX_MODE1)	/* mii1_txd1.rmii1_txd1 */
-			AM33XX_IOPAD(0x928, PIN_OUTPUT_PULLDOWN | MUX_MODE1)	/* mii1_txd0.rmii1_txd0 */
-			AM33XX_IOPAD(0x93c, PIN_INPUT_PULLUP | MUX_MODE1)	/* mii1_rxd1.rmii1_rxd1 */
-			AM33XX_IOPAD(0x940, PIN_INPUT_PULLUP | MUX_MODE1)	/* mii1_rxd0.rmii1_rxd0 */
-			AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE0)	/* rmii1_ref_clk.rmii_ref_clk */
-		>;
-	};
-
-	cpsw_sleep: cpsw_sleep {
-		pinctrl-single,pins = <
-			/* Slave 1 reset value */
-			AM33XX_IOPAD(0x90c, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x910, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x914, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x918, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x924, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x928, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE7)
-		>;
-	};
-
-	davinci_mdio_default: davinci_mdio_default {
-		pinctrl-single,pins = <
-			/* mdio_data.mdio_data */
-			AM33XX_IOPAD(0x948, PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0)
-			/* mdio_clk.mdio_clk */
-			AM33XX_IOPAD(0x94c, PIN_OUTPUT_PULLUP | MUX_MODE0)
-		>;
-	};
-
-	davinci_mdio_sleep: davinci_mdio_sleep {
-		pinctrl-single,pins = <
-			/* MDIO reset value */
-			AM33XX_IOPAD(0x948, PIN_INPUT_PULLDOWN | MUX_MODE7)
-			AM33XX_IOPAD(0x94c, PIN_INPUT_PULLDOWN | MUX_MODE7)
-		>;
-	};
-
 	nandflash_pins: nandflash_pins {
 		pinctrl-single,pins = <
 			AM33XX_IOPAD(0x800, PIN_INPUT_PULLDOWN | MUX_MODE0)	/* gpmc_ad0.gpmc_ad0 */
@@ -109,13 +56,6 @@
 	};
 };
 
-&uart0 {
-	pinctrl-names = "default";
-	pinctrl-0 = <&uart0_pins>;
-
-	status = "okay";
-};
-
 &i2c0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&i2c0_pins>;
@@ -182,20 +122,8 @@
 	};
 };
 
-/* Ethernet MAC */
-&mac {
-	slaves = <1>;
-	pinctrl-names = "default", "sleep";
-	pinctrl-0 = <&cpsw_default>;
-	pinctrl-1 = <&cpsw_sleep>;
-	status = "okay";
-};
-
-&davinci_mdio {
-	pinctrl-names = "default", "sleep";
-	pinctrl-0 = <&davinci_mdio_default>;
-	pinctrl-1 = <&davinci_mdio_sleep>;
-	status = "okay";
+&rtc {
+	system-power-controller;
 };
 
 /* NAND Flash */
@@ -214,6 +142,7 @@
 		interrupt-parent = <&gpmc>;
 		interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
 			     <1 IRQ_TYPE_NONE>;	/* termcount */
+		rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */
 		ti,nand-ecc-opt = "bch8";
 		ti,elm-id = <&elm>;
 		nand-bus-width = <8>;
diff --git a/arch/arm/boot/dts/am335x-cm-t335.dts b/arch/arm/boot/dts/am335x-cm-t335.dts
index e835644..817b1de 100644
--- a/arch/arm/boot/dts/am335x-cm-t335.dts
+++ b/arch/arm/boot/dts/am335x-cm-t335.dts
@@ -411,6 +411,7 @@
 		interrupt-parent = <&gpmc>;
 		interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
 			     <1 IRQ_TYPE_NONE>;	/* termcount */
+		rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */
 		ti,nand-ecc-opt = "bch8";
 		ti,elm-id = <&elm>;
 		nand-bus-width = <8>;
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index 28b9162..516673b 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -524,6 +524,7 @@
 		interrupt-parent = <&gpmc>;
 		interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
 			     <1 IRQ_TYPE_NONE>;	/* termcount */
+		rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */
 		ti,nand-ecc-opt = "bch8";
 		ti,elm-id = <&elm>;
 		nand-bus-width = <8>;
diff --git a/arch/arm/boot/dts/am335x-icev2.dts b/arch/arm/boot/dts/am335x-icev2.dts
new file mode 100644
index 0000000..e271013
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-icev2.dts
@@ -0,0 +1,306 @@
+/*
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * AM335x ICE V2 board
+ * http://www.ti.com/tool/tmdsice3359
+ */
+
+/dts-v1/;
+
+#include "am33xx.dtsi"
+
+/ {
+	model = "TI AM3359 ICE-V2";
+	compatible = "ti,am3359-icev2", "ti,am33xx";
+
+	memory {
+		device_type = "memory";
+		reg = <0x80000000 0x10000000>; /* 256 MB */
+	};
+
+	vbat: fixedregulator@0 {
+		compatible = "regulator-fixed";
+		regulator-name = "vbat";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-boot-on;
+	};
+
+	vtt_fixed: fixedregulator@1 {
+		compatible = "regulator-fixed";
+		regulator-name = "vtt";
+		regulator-min-microvolt = <1500000>;
+		regulator-max-microvolt = <1500000>;
+		gpio = <&gpio0 18 GPIO_ACTIVE_HIGH>;
+		regulator-always-on;
+		regulator-boot-on;
+		enable-active-high;
+	};
+
+	leds@0 {
+		compatible = "gpio-leds";
+
+		led@0 {
+			label = "out0";
+			gpios = <&tpic2810 0 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		led@1 {
+			label = "out1";
+			gpios = <&tpic2810 1 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		led@2 {
+			label = "out2";
+			gpios = <&tpic2810 2 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		led@3 {
+			label = "out3";
+			gpios = <&tpic2810 3 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		led@4 {
+			label = "out4";
+			gpios = <&tpic2810 4 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		led@5 {
+			label = "out5";
+			gpios = <&tpic2810 5 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		led@6 {
+			label = "out6";
+			gpios = <&tpic2810 6 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		led@7 {
+			label = "out7";
+			gpios = <&tpic2810 7 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+	};
+
+	/* Tricolor status LEDs */
+	leds@1 {
+		compatible = "gpio-leds";
+		pinctrl-names = "default";
+		pinctrl-0 = <&user_leds>;
+
+		led@0 {
+			label = "status0:red:cpu0";
+			gpios = <&gpio0 17 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+			linux,default-trigger = "cpu0";
+		};
+
+		led@1 {
+			label = "status0:green:usr";
+			gpios = <&gpio0 16 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		led@2 {
+			label = "status0:yellow:usr";
+			gpios = <&gpio3 9 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		led@3 {
+			label = "status1:red:mmc0";
+			gpios = <&gpio1 30 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+			linux,default-trigger = "mmc0";
+		};
+
+		led@4 {
+			label = "status1:green:usr";
+			gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		led@5 {
+			label = "status1:yellow:usr";
+			gpios = <&gpio0 19 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+	};
+};
+
+&am33xx_pinmux {
+	user_leds: user_leds {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x91c, PIN_OUTPUT | MUX_MODE7) /* (J18) gmii1_txd3.gpio0[16] */
+			AM33XX_IOPAD(0x920, PIN_OUTPUT | MUX_MODE7) /* (K15) gmii1_txd2.gpio0[17] */
+			AM33XX_IOPAD(0x9b0, PIN_OUTPUT | MUX_MODE7) /* (A15) xdma_event_intr0.gpio0[19] */
+			AM33XX_IOPAD(0x9b4, PIN_OUTPUT | MUX_MODE7) /* (D14) xdma_event_intr1.gpio0[20] */
+			AM33XX_IOPAD(0x880, PIN_OUTPUT | MUX_MODE7) /* (U9) gpmc_csn1.gpio1[30] */
+			AM33XX_IOPAD(0x92c, PIN_OUTPUT | MUX_MODE7) /* (K18) gmii1_txclk.gpio3[9] */
+		>;
+	};
+
+	mmc0_pins_default: mmc0_pins_default {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0) /* (F17) mmc0_dat3.mmc0_dat3 */
+			AM33XX_IOPAD(0x8f4, PIN_INPUT_PULLUP | MUX_MODE0) /* (F18) mmc0_dat2.mmc0_dat2 */
+			AM33XX_IOPAD(0x8f8, PIN_INPUT_PULLUP | MUX_MODE0) /* (G15) mmc0_dat1.mmc0_dat1 */
+			AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* (G16) mmc0_dat0.mmc0_dat0 */
+			AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* (G17) mmc0_clk.mmc0_clk */
+			AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* (G18) mmc0_cmd.mmc0_cmd */
+			AM33XX_IOPAD(0x960, PIN_INPUT_PULLUP | MUX_MODE5) /* (C15) spi0_cs1.mmc0_sdcd */
+		>;
+	};
+
+	i2c0_pins_default: i2c0_pins_default {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x988, PIN_INPUT | MUX_MODE0) /* (C17) I2C0_SDA.I2C0_SDA */
+			AM33XX_IOPAD(0x98c, PIN_INPUT | MUX_MODE0) /* (C16) I2C0_SCL.I2C0_SCL */
+		>;
+	};
+
+	spi0_pins_default: spi0_pins_default {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x950, PIN_INPUT_PULLUP | MUX_MODE0) /* (A17) spi0_sclk.spi0_sclk */
+			AM33XX_IOPAD(0x954, PIN_INPUT_PULLUP | MUX_MODE0) /* (B17) spi0_d0.spi0_d0 */
+			AM33XX_IOPAD(0x958, PIN_INPUT_PULLUP | MUX_MODE0) /* (B16) spi0_d1.spi0_d1 */
+			AM33XX_IOPAD(0x95c, PIN_INPUT_PULLUP | MUX_MODE0) /* (A16) spi0_cs0.spi0_cs0 */
+		>;
+	};
+
+	uart3_pins_default: uart3_pins_default {
+		pinctrl-single,pins = <
+			AM33XX_IOPAD(0x934, PIN_INPUT_PULLUP | MUX_MODE1) /* (L17) gmii1_rxd3.uart3_rxd */
+			AM33XX_IOPAD(0x938, PIN_OUTPUT_PULLUP | MUX_MODE1) /* (L16) gmii1_rxd2.uart3_txd */
+		>;
+	};
+};
+
+&i2c0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c0_pins_default>;
+
+	status = "okay";
+	clock-frequency = <400000>;
+
+	tps: power-controller@2d {
+		reg = <0x2d>;
+	};
+
+	tpic2810: gpio@60 {
+		compatible = "ti,tpic2810";
+		reg = <0x60>;
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+};
+
+#include "tps65910.dtsi"
+
+&tps {
+	vcc1-supply = <&vbat>;
+	vcc2-supply = <&vbat>;
+	vcc3-supply = <&vbat>;
+	vcc4-supply = <&vbat>;
+	vcc5-supply = <&vbat>;
+	vcc6-supply = <&vbat>;
+	vcc7-supply = <&vbat>;
+	vccio-supply = <&vbat>;
+
+	regulators {
+		vrtc_reg: regulator@0 {
+			regulator-always-on;
+		};
+
+		vio_reg: regulator@1 {
+			regulator-always-on;
+		};
+
+		vdd1_reg: regulator@2 {
+			regulator-name = "vdd_mpu";
+			regulator-min-microvolt = <912500>;
+			regulator-max-microvolt = <1326000>;
+			regulator-boot-on;
+			regulator-always-on;
+		};
+
+		vdd2_reg: regulator@3 {
+			regulator-name = "vdd_core";
+			regulator-min-microvolt = <912500>;
+			regulator-max-microvolt = <1144000>;
+			regulator-boot-on;
+			regulator-always-on;
+		};
+
+		vdd3_reg: regulator@4 {
+			regulator-always-on;
+		};
+
+		vdig1_reg: regulator@5 {
+			regulator-always-on;
+		};
+
+		vdig2_reg: regulator@6 {
+			regulator-always-on;
+		};
+
+		vpll_reg: regulator@7 {
+			regulator-always-on;
+		};
+
+		vdac_reg: regulator@8 {
+			regulator-always-on;
+		};
+
+		vaux1_reg: regulator@9 {
+			regulator-always-on;
+		};
+
+		vaux2_reg: regulator@10 {
+			regulator-always-on;
+		};
+
+		vaux33_reg: regulator@11 {
+			regulator-always-on;
+		};
+
+		vmmc_reg: regulator@12 {
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <3300000>;
+			regulator-always-on;
+		};
+	};
+};
+
+&mmc1 {
+	status = "okay";
+	vmmc-supply = <&vmmc_reg>;
+	bus-width = <4>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc0_pins_default>;
+};
+
+&gpio0 {
+	/* Do not idle the GPIO used for holding the VTT regulator */
+	ti,no-reset-on-init;
+	ti,no-idle-on-init;
+};
+
+&uart3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart3_pins_default>;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/am335x-igep0033.dtsi b/arch/arm/boot/dts/am335x-igep0033.dtsi
index 6c3a9bf..df63484 100644
--- a/arch/arm/boot/dts/am335x-igep0033.dtsi
+++ b/arch/arm/boot/dts/am335x-igep0033.dtsi
@@ -135,6 +135,7 @@
 		interrupt-parent = <&gpmc>;
 		interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
 			     <1 IRQ_TYPE_NONE>;	/* termcount */
+		rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */
 		nand-bus-width = <8>;
 		ti,nand-ecc-opt = "bch8";
 		gpmc,device-width = <1>;
diff --git a/arch/arm/boot/dts/am335x-phycore-som.dtsi b/arch/arm/boot/dts/am335x-phycore-som.dtsi
index d4b7f3b..86f7731 100644
--- a/arch/arm/boot/dts/am335x-phycore-som.dtsi
+++ b/arch/arm/boot/dts/am335x-phycore-som.dtsi
@@ -171,6 +171,7 @@
 		interrupt-parent = <&gpmc>;
 		interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
 			     <1 IRQ_TYPE_NONE>;	/* termcount */
+		rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */
 		nand-bus-width = <8>;
 		ti,nand-ecc-opt = "bch8";
 		gpmc,device-nand = "true";
diff --git a/arch/arm/boot/dts/am335x-shc.dts b/arch/arm/boot/dts/am335x-shc.dts
index 865de85..837d5b8 100644
--- a/arch/arm/boot/dts/am335x-shc.dts
+++ b/arch/arm/boot/dts/am335x-shc.dts
@@ -138,7 +138,7 @@
 &epwmss1 {
 	status = "okay";
 
-	ehrpwm1: ehrpwm@48302200 {
+	ehrpwm1: pwm@48302200 {
 		pinctrl-names = "default";
 		pinctrl-0 = <&ehrpwm1_pins>;
 		status = "okay";
diff --git a/arch/arm/boot/dts/am33xx-clocks.dtsi b/arch/arm/boot/dts/am33xx-clocks.dtsi
index afb4b3a..8d83195 100644
--- a/arch/arm/boot/dts/am33xx-clocks.dtsi
+++ b/arch/arm/boot/dts/am33xx-clocks.dtsi
@@ -8,7 +8,7 @@
  * published by the Free Software Foundation.
  */
 &scm_clocks {
-	sys_clkin_ck: sys_clkin_ck {
+	sys_clkin_ck: sys_clkin_ck@40 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&virt_19200000_ck>, <&virt_24000000_ck>, <&virt_25000000_ck>, <&virt_26000000_ck>;
@@ -163,7 +163,7 @@
 		clock-frequency = <12000000>;
 	};
 
-	dpll_core_ck: dpll_core_ck {
+	dpll_core_ck: dpll_core_ck@490 {
 		#clock-cells = <0>;
 		compatible = "ti,am3-dpll-core-clock";
 		clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
@@ -176,7 +176,7 @@
 		clocks = <&dpll_core_ck>;
 	};
 
-	dpll_core_m4_ck: dpll_core_m4_ck {
+	dpll_core_m4_ck: dpll_core_m4_ck@480 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -185,7 +185,7 @@
 		ti,index-starts-at-one;
 	};
 
-	dpll_core_m5_ck: dpll_core_m5_ck {
+	dpll_core_m5_ck: dpll_core_m5_ck@484 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -194,7 +194,7 @@
 		ti,index-starts-at-one;
 	};
 
-	dpll_core_m6_ck: dpll_core_m6_ck {
+	dpll_core_m6_ck: dpll_core_m6_ck@4d8 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -203,14 +203,14 @@
 		ti,index-starts-at-one;
 	};
 
-	dpll_mpu_ck: dpll_mpu_ck {
+	dpll_mpu_ck: dpll_mpu_ck@488 {
 		#clock-cells = <0>;
 		compatible = "ti,am3-dpll-clock";
 		clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
 		reg = <0x0488>, <0x0420>, <0x042c>;
 	};
 
-	dpll_mpu_m2_ck: dpll_mpu_m2_ck {
+	dpll_mpu_m2_ck: dpll_mpu_m2_ck@4a8 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_mpu_ck>;
@@ -219,14 +219,14 @@
 		ti,index-starts-at-one;
 	};
 
-	dpll_ddr_ck: dpll_ddr_ck {
+	dpll_ddr_ck: dpll_ddr_ck@494 {
 		#clock-cells = <0>;
 		compatible = "ti,am3-dpll-no-gate-clock";
 		clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
 		reg = <0x0494>, <0x0434>, <0x0440>;
 	};
 
-	dpll_ddr_m2_ck: dpll_ddr_m2_ck {
+	dpll_ddr_m2_ck: dpll_ddr_m2_ck@4a0 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_ddr_ck>;
@@ -243,14 +243,14 @@
 		clock-div = <2>;
 	};
 
-	dpll_disp_ck: dpll_disp_ck {
+	dpll_disp_ck: dpll_disp_ck@498 {
 		#clock-cells = <0>;
 		compatible = "ti,am3-dpll-no-gate-clock";
 		clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
 		reg = <0x0498>, <0x0448>, <0x0454>;
 	};
 
-	dpll_disp_m2_ck: dpll_disp_m2_ck {
+	dpll_disp_m2_ck: dpll_disp_m2_ck@4a4 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_disp_ck>;
@@ -260,14 +260,14 @@
 		ti,set-rate-parent;
 	};
 
-	dpll_per_ck: dpll_per_ck {
+	dpll_per_ck: dpll_per_ck@48c {
 		#clock-cells = <0>;
 		compatible = "ti,am3-dpll-no-gate-j-type-clock";
 		clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
 		reg = <0x048c>, <0x0470>, <0x049c>;
 	};
 
-	dpll_per_m2_ck: dpll_per_m2_ck {
+	dpll_per_m2_ck: dpll_per_m2_ck@4ac {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_ck>;
@@ -292,7 +292,7 @@
 		clock-div = <4>;
 	};
 
-	cefuse_fck: cefuse_fck {
+	cefuse_fck: cefuse_fck@a20 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_clkin_ck>;
@@ -316,7 +316,7 @@
 		clock-div = <732>;
 	};
 
-	clkdiv32k_ick: clkdiv32k_ick {
+	clkdiv32k_ick: clkdiv32k_ick@14c {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&clkdiv32k_ck>;
@@ -332,14 +332,14 @@
 		clock-div = <1>;
 	};
 
-	pruss_ocp_gclk: pruss_ocp_gclk {
+	pruss_ocp_gclk: pruss_ocp_gclk@530 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&l3_gclk>, <&dpll_disp_m2_ck>;
 		reg = <0x0530>;
 	};
 
-	mmu_fck: mmu_fck {
+	mmu_fck: mmu_fck@914 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll_core_m4_ck>;
@@ -347,56 +347,56 @@
 		reg = <0x0914>;
 	};
 
-	timer1_fck: timer1_fck {
+	timer1_fck: timer1_fck@528 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin_ck>, <&clkdiv32k_ick>, <&tclkin_ck>, <&clk_rc32k_ck>, <&clk_32768_ck>;
 		reg = <0x0528>;
 	};
 
-	timer2_fck: timer2_fck {
+	timer2_fck: timer2_fck@508 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>;
 		reg = <0x0508>;
 	};
 
-	timer3_fck: timer3_fck {
+	timer3_fck: timer3_fck@50c {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>;
 		reg = <0x050c>;
 	};
 
-	timer4_fck: timer4_fck {
+	timer4_fck: timer4_fck@510 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>;
 		reg = <0x0510>;
 	};
 
-	timer5_fck: timer5_fck {
+	timer5_fck: timer5_fck@518 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>;
 		reg = <0x0518>;
 	};
 
-	timer6_fck: timer6_fck {
+	timer6_fck: timer6_fck@51c {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>;
 		reg = <0x051c>;
 	};
 
-	timer7_fck: timer7_fck {
+	timer7_fck: timer7_fck@504 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>;
 		reg = <0x0504>;
 	};
 
-	usbotg_fck: usbotg_fck {
+	usbotg_fck: usbotg_fck@47c {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll_per_ck>;
@@ -412,7 +412,7 @@
 		clock-div = <2>;
 	};
 
-	ieee5000_fck: ieee5000_fck {
+	ieee5000_fck: ieee5000_fck@e4 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll_core_m4_div2_ck>;
@@ -420,7 +420,7 @@
 		reg = <0x00e4>;
 	};
 
-	wdt1_fck: wdt1_fck {
+	wdt1_fck: wdt1_fck@538 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&clk_rc32k_ck>, <&clkdiv32k_ick>;
@@ -483,21 +483,21 @@
 		clock-div = <2>;
 	};
 
-	cpsw_cpts_rft_clk: cpsw_cpts_rft_clk {
+	cpsw_cpts_rft_clk: cpsw_cpts_rft_clk@520 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&dpll_core_m5_ck>, <&dpll_core_m4_ck>;
 		reg = <0x0520>;
 	};
 
-	gpio0_dbclk_mux_ck: gpio0_dbclk_mux_ck {
+	gpio0_dbclk_mux_ck: gpio0_dbclk_mux_ck@53c {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&clk_rc32k_ck>, <&clk_32768_ck>, <&clkdiv32k_ick>;
 		reg = <0x053c>;
 	};
 
-	gpio0_dbclk: gpio0_dbclk {
+	gpio0_dbclk: gpio0_dbclk@408 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&gpio0_dbclk_mux_ck>;
@@ -505,7 +505,7 @@
 		reg = <0x0408>;
 	};
 
-	gpio1_dbclk: gpio1_dbclk {
+	gpio1_dbclk: gpio1_dbclk@ac {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&clkdiv32k_ick>;
@@ -513,7 +513,7 @@
 		reg = <0x00ac>;
 	};
 
-	gpio2_dbclk: gpio2_dbclk {
+	gpio2_dbclk: gpio2_dbclk@b0 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&clkdiv32k_ick>;
@@ -521,7 +521,7 @@
 		reg = <0x00b0>;
 	};
 
-	gpio3_dbclk: gpio3_dbclk {
+	gpio3_dbclk: gpio3_dbclk@b4 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&clkdiv32k_ick>;
@@ -529,7 +529,7 @@
 		reg = <0x00b4>;
 	};
 
-	lcd_gclk: lcd_gclk {
+	lcd_gclk: lcd_gclk@534 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&dpll_disp_m2_ck>, <&dpll_core_m5_ck>, <&dpll_per_m2_ck>;
@@ -545,7 +545,7 @@
 		clock-div = <2>;
 	};
 
-	gfx_fclk_clksel_ck: gfx_fclk_clksel_ck {
+	gfx_fclk_clksel_ck: gfx_fclk_clksel_ck@52c {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&dpll_core_m4_ck>, <&dpll_per_m2_ck>;
@@ -553,7 +553,7 @@
 		reg = <0x052c>;
 	};
 
-	gfx_fck_div_ck: gfx_fck_div_ck {
+	gfx_fck_div_ck: gfx_fck_div_ck@52c {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&gfx_fclk_clksel_ck>;
@@ -561,14 +561,14 @@
 		ti,max-div = <2>;
 	};
 
-	sysclkout_pre_ck: sysclkout_pre_ck {
+	sysclkout_pre_ck: sysclkout_pre_ck@700 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&clk_32768_ck>, <&l3_gclk>, <&dpll_ddr_m2_ck>, <&dpll_per_m2_ck>, <&lcd_gclk>;
 		reg = <0x0700>;
 	};
 
-	clkout2_div_ck: clkout2_div_ck {
+	clkout2_div_ck: clkout2_div_ck@700 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&sysclkout_pre_ck>;
@@ -577,7 +577,7 @@
 		reg = <0x0700>;
 	};
 
-	dbg_sysclk_ck: dbg_sysclk_ck {
+	dbg_sysclk_ck: dbg_sysclk_ck@414 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_clkin_ck>;
@@ -585,7 +585,7 @@
 		reg = <0x0414>;
 	};
 
-	dbg_clka_ck: dbg_clka_ck {
+	dbg_clka_ck: dbg_clka_ck@414 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll_core_m4_ck>;
@@ -593,7 +593,7 @@
 		reg = <0x0414>;
 	};
 
-	stm_pmd_clock_mux_ck: stm_pmd_clock_mux_ck {
+	stm_pmd_clock_mux_ck: stm_pmd_clock_mux_ck@414 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&dbg_sysclk_ck>, <&dbg_clka_ck>;
@@ -601,7 +601,7 @@
 		reg = <0x0414>;
 	};
 
-	trace_pmd_clk_mux_ck: trace_pmd_clk_mux_ck {
+	trace_pmd_clk_mux_ck: trace_pmd_clk_mux_ck@414 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&dbg_sysclk_ck>, <&dbg_clka_ck>;
@@ -609,7 +609,7 @@
 		reg = <0x0414>;
 	};
 
-	stm_clk_div_ck: stm_clk_div_ck {
+	stm_clk_div_ck: stm_clk_div_ck@414 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&stm_pmd_clock_mux_ck>;
@@ -619,7 +619,7 @@
 		ti,index-power-of-two;
 	};
 
-	trace_clk_div_ck: trace_clk_div_ck {
+	trace_clk_div_ck: trace_clk_div_ck@414 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&trace_pmd_clk_mux_ck>;
@@ -629,7 +629,7 @@
 		ti,index-power-of-two;
 	};
 
-	clkout2_ck: clkout2_ck {
+	clkout2_ck: clkout2_ck@700 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&clkout2_div_ck>;
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 55ca9c7..52be48b 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -688,7 +688,7 @@
 				status = "disabled";
 			};
 
-			ehrpwm0: ehrpwm@48300200 {
+			ehrpwm0: pwm@48300200 {
 				compatible = "ti,am33xx-ehrpwm";
 				#pwm-cells = <3>;
 				reg = <0x48300200 0x80>;
@@ -718,7 +718,7 @@
 				status = "disabled";
 			};
 
-			ehrpwm1: ehrpwm@48302200 {
+			ehrpwm1: pwm@48302200 {
 				compatible = "ti,am33xx-ehrpwm";
 				#pwm-cells = <3>;
 				reg = <0x48302200 0x80>;
@@ -748,7 +748,7 @@
 				status = "disabled";
 			};
 
-			ehrpwm2: ehrpwm@48304200 {
+			ehrpwm2: pwm@48304200 {
 				compatible = "ti,am33xx-ehrpwm";
 				#pwm-cells = <3>;
 				reg = <0x48304200 0x80>;
@@ -860,7 +860,7 @@
 			ti,no-idle-on-init;
 			reg = <0x50000000 0x2000>;
 			interrupts = <100>;
-			dmas = <&edma 52>;
+			dmas = <&edma 52 0>;
 			dma-names = "rxtx";
 			gpmc,num-cs = <7>;
 			gpmc,num-waitpins = <2>;
@@ -868,6 +868,8 @@
 			#size-cells = <1>;
 			interrupt-controller;
 			#interrupt-cells = <2>;
+			gpio-controller;
+			#gpio-cells = <2>;
 			status = "disabled";
 		};
 
diff --git a/arch/arm/boot/dts/am35xx-clocks.dtsi b/arch/arm/boot/dts/am35xx-clocks.dtsi
index 18cc826..00dd1f0 100644
--- a/arch/arm/boot/dts/am35xx-clocks.dtsi
+++ b/arch/arm/boot/dts/am35xx-clocks.dtsi
@@ -8,7 +8,7 @@
  * published by the Free Software Foundation.
  */
 &scm_clocks {
-	emac_ick: emac_ick {
+	emac_ick: emac_ick@32c {
 		#clock-cells = <0>;
 		compatible = "ti,am35xx-gate-clock";
 		clocks = <&ipss_ick>;
@@ -16,7 +16,7 @@
 		ti,bit-shift = <1>;
 	};
 
-	emac_fck: emac_fck {
+	emac_fck: emac_fck@32c {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&rmii_ck>;
@@ -24,7 +24,7 @@
 		ti,bit-shift = <9>;
 	};
 
-	vpfe_ick: vpfe_ick {
+	vpfe_ick: vpfe_ick@32c {
 		#clock-cells = <0>;
 		compatible = "ti,am35xx-gate-clock";
 		clocks = <&ipss_ick>;
@@ -32,7 +32,7 @@
 		ti,bit-shift = <2>;
 	};
 
-	vpfe_fck: vpfe_fck {
+	vpfe_fck: vpfe_fck@32c {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&pclk_ck>;
@@ -40,7 +40,7 @@
 		ti,bit-shift = <10>;
 	};
 
-	hsotgusb_ick_am35xx: hsotgusb_ick_am35xx {
+	hsotgusb_ick_am35xx: hsotgusb_ick_am35xx@32c {
 		#clock-cells = <0>;
 		compatible = "ti,am35xx-gate-clock";
 		clocks = <&ipss_ick>;
@@ -48,7 +48,7 @@
 		ti,bit-shift = <0>;
 	};
 
-	hsotgusb_fck_am35xx: hsotgusb_fck_am35xx {
+	hsotgusb_fck_am35xx: hsotgusb_fck_am35xx@32c {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_ck>;
@@ -56,7 +56,7 @@
 		ti,bit-shift = <8>;
 	};
 
-	hecc_ck: hecc_ck {
+	hecc_ck: hecc_ck@32c {
 		#clock-cells = <0>;
 		compatible = "ti,am35xx-gate-clock";
 		clocks = <&sys_ck>;
@@ -65,7 +65,7 @@
 	};
 };
 &cm_clocks {
-	ipss_ick: ipss_ick {
+	ipss_ick: ipss_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,am35xx-interface-clock";
 		clocks = <&core_l3_ick>;
@@ -85,7 +85,7 @@
 		clock-frequency = <27000000>;
 	};
 
-	uart4_ick_am35xx: uart4_ick_am35xx {
+	uart4_ick_am35xx: uart4_ick_am35xx@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -93,7 +93,7 @@
 		ti,bit-shift = <23>;
 	};
 
-	uart4_fck_am35xx: uart4_fck_am35xx {
+	uart4_fck_am35xx: uart4_fck_am35xx@a00 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&core_48m_fck>;
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index 6e4f5af..12fcde4 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -207,7 +207,7 @@
 			ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 5>,
 				   <&edma_tptc2 0>;
 
-			ti,edma-memcpy-channels = <32 33>;
+			ti,edma-memcpy-channels = <58 59>;
 		};
 
 		edma_tptc0: tptc@49800000 {
@@ -679,7 +679,7 @@
 				status = "disabled";
 			};
 
-			ehrpwm0: ehrpwm@48300200 {
+			ehrpwm0: pwm@48300200 {
 				compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm";
 				#pwm-cells = <3>;
 				reg = <0x48300200 0x80>;
@@ -705,7 +705,7 @@
 				status = "disabled";
 			};
 
-			ehrpwm1: ehrpwm@48302200 {
+			ehrpwm1: pwm@48302200 {
 				compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm";
 				#pwm-cells = <3>;
 				reg = <0x48302200 0x80>;
@@ -731,7 +731,7 @@
 				status = "disabled";
 			};
 
-			ehrpwm2: ehrpwm@48304200 {
+			ehrpwm2: pwm@48304200 {
 				compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm";
 				#pwm-cells = <3>;
 				reg = <0x48304200 0x80>;
@@ -749,7 +749,7 @@
 			ti,hwmods = "epwmss3";
 			status = "disabled";
 
-			ehrpwm3: ehrpwm@48306200 {
+			ehrpwm3: pwm@48306200 {
 				compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm";
 				#pwm-cells = <3>;
 				reg = <0x48306200 0x80>;
@@ -767,7 +767,7 @@
 			ti,hwmods = "epwmss4";
 			status = "disabled";
 
-			ehrpwm4: ehrpwm@48308200 {
+			ehrpwm4: pwm@48308200 {
 				compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm";
 				#pwm-cells = <3>;
 				reg = <0x48308200 0x80>;
@@ -785,7 +785,7 @@
 			ti,hwmods = "epwmss5";
 			status = "disabled";
 
-			ehrpwm5: ehrpwm@4830a200 {
+			ehrpwm5: pwm@4830a200 {
 				compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm";
 				#pwm-cells = <3>;
 				reg = <0x4830a200 0x80>;
@@ -884,7 +884,7 @@
 		gpmc: gpmc@50000000 {
 			compatible = "ti,am3352-gpmc";
 			ti,hwmods = "gpmc";
-			dmas = <&edma 52>;
+			dmas = <&edma 52 0>;
 			dma-names = "rxtx";
 			clocks = <&l3s_gclk>;
 			clock-names = "fck";
@@ -896,6 +896,8 @@
 			#size-cells = <1>;
 			interrupt-controller;
 			#interrupt-cells = <2>;
+			gpio-controller;
+			#gpio-cells = <2>;
 			status = "disabled";
 		};
 
diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts
index 8889be1..5bcd3aa 100644
--- a/arch/arm/boot/dts/am437x-gp-evm.dts
+++ b/arch/arm/boot/dts/am437x-gp-evm.dts
@@ -119,7 +119,7 @@
 		clock-frequency = <32768>;
 	};
 
-	sound0: sound@0 {
+	sound0: sound0 {
 		compatible = "simple-audio-card";
 		simple-audio-card,name = "AM437x-GP-EVM";
 		simple-audio-card,widgets =
@@ -817,6 +817,7 @@
 		interrupt-parent = <&gpmc>;
 		interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
 			     <1 IRQ_TYPE_NONE>;	/* termcount */
+		rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>;	/* gpmc_wait0 */
 		ti,nand-ecc-opt = "bch16";
 		ti,elm-id = <&elm>;
 		nand-bus-width = <8>;
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
index 83dfafa..3549b8c 100644
--- a/arch/arm/boot/dts/am43x-epos-evm.dts
+++ b/arch/arm/boot/dts/am43x-epos-evm.dts
@@ -107,7 +107,7 @@
 		default-brightness-level = <8>;
 	};
 
-	sound0: sound@0 {
+	sound0: sound0 {
 		compatible = "simple-audio-card";
 		simple-audio-card,name = "AM43-EPOS-EVM";
 		simple-audio-card,widgets =
@@ -568,6 +568,7 @@
 		interrupt-parent = <&gpmc>;
 		interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
 			     <1 IRQ_TYPE_NONE>;	/* termcount */
+		rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>;	/* gpmc_wait0 */
 		ti,nand-ecc-opt = "bch16";
 		ti,elm-id = <&elm>;
 		nand-bus-width = <8>;
@@ -794,3 +795,8 @@
 	tx-num-evt = <32>;
 	rx-num-evt = <32>;
 };
+
+&synctimer_32kclk {
+	assigned-clocks = <&mux_synctimer32k_ck>;
+	assigned-clock-parents = <&clkdiv32k_ick>;
+};
diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi
index a38af2b..7630ba1 100644
--- a/arch/arm/boot/dts/am43xx-clocks.dtsi
+++ b/arch/arm/boot/dts/am43xx-clocks.dtsi
@@ -8,7 +8,7 @@
  * published by the Free Software Foundation.
  */
 &scm_clocks {
-	sys_clkin_ck: sys_clkin_ck {
+	sys_clkin_ck: sys_clkin_ck@40 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sysboot_freq_sel_ck>, <&crystal_freq_sel_ck>;
@@ -16,7 +16,7 @@
 		reg = <0x0040>;
 	};
 
-	crystal_freq_sel_ck: crystal_freq_sel_ck {
+	crystal_freq_sel_ck: crystal_freq_sel_ck@40 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&virt_19200000_ck>, <&virt_24000000_ck>, <&virt_25000000_ck>, <&virt_26000000_ck>;
@@ -104,7 +104,7 @@
 		clock-div = <1>;
 	};
 
-	ehrpwm0_tbclk: ehrpwm0_tbclk {
+	ehrpwm0_tbclk: ehrpwm0_tbclk@664 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l4ls_gclk>;
@@ -112,7 +112,7 @@
 		reg = <0x0664>;
 	};
 
-	ehrpwm1_tbclk: ehrpwm1_tbclk {
+	ehrpwm1_tbclk: ehrpwm1_tbclk@664 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l4ls_gclk>;
@@ -120,7 +120,7 @@
 		reg = <0x0664>;
 	};
 
-	ehrpwm2_tbclk: ehrpwm2_tbclk {
+	ehrpwm2_tbclk: ehrpwm2_tbclk@664 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l4ls_gclk>;
@@ -128,7 +128,7 @@
 		reg = <0x0664>;
 	};
 
-	ehrpwm3_tbclk: ehrpwm3_tbclk {
+	ehrpwm3_tbclk: ehrpwm3_tbclk@664 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l4ls_gclk>;
@@ -136,7 +136,7 @@
 		reg = <0x0664>;
 	};
 
-	ehrpwm4_tbclk: ehrpwm4_tbclk {
+	ehrpwm4_tbclk: ehrpwm4_tbclk@664 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l4ls_gclk>;
@@ -144,7 +144,7 @@
 		reg = <0x0664>;
 	};
 
-	ehrpwm5_tbclk: ehrpwm5_tbclk {
+	ehrpwm5_tbclk: ehrpwm5_tbclk@664 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l4ls_gclk>;
@@ -195,7 +195,7 @@
 		clock-frequency = <26000000>;
 	};
 
-	dpll_core_ck: dpll_core_ck {
+	dpll_core_ck: dpll_core_ck@2d20 {
 		#clock-cells = <0>;
 		compatible = "ti,am3-dpll-core-clock";
 		clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
@@ -208,7 +208,7 @@
 		clocks = <&dpll_core_ck>;
 	};
 
-	dpll_core_m4_ck: dpll_core_m4_ck {
+	dpll_core_m4_ck: dpll_core_m4_ck@2d38 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -219,7 +219,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_core_m5_ck: dpll_core_m5_ck {
+	dpll_core_m5_ck: dpll_core_m5_ck@2d3c {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -230,7 +230,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_core_m6_ck: dpll_core_m6_ck {
+	dpll_core_m6_ck: dpll_core_m6_ck@2d40 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -241,14 +241,14 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_mpu_ck: dpll_mpu_ck {
+	dpll_mpu_ck: dpll_mpu_ck@2d60 {
 		#clock-cells = <0>;
 		compatible = "ti,am3-dpll-clock";
 		clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
 		reg = <0x2d60>, <0x2d64>, <0x2d6c>;
 	};
 
-	dpll_mpu_m2_ck: dpll_mpu_m2_ck {
+	dpll_mpu_m2_ck: dpll_mpu_m2_ck@2d70 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_mpu_ck>;
@@ -267,14 +267,14 @@
 		clock-div = <2>;
 	};
 
-	dpll_ddr_ck: dpll_ddr_ck {
+	dpll_ddr_ck: dpll_ddr_ck@2da0 {
 		#clock-cells = <0>;
 		compatible = "ti,am3-dpll-clock";
 		clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
 		reg = <0x2da0>, <0x2da4>, <0x2dac>;
 	};
 
-	dpll_ddr_m2_ck: dpll_ddr_m2_ck {
+	dpll_ddr_m2_ck: dpll_ddr_m2_ck@2db0 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_ddr_ck>;
@@ -285,14 +285,14 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_disp_ck: dpll_disp_ck {
+	dpll_disp_ck: dpll_disp_ck@2e20 {
 		#clock-cells = <0>;
 		compatible = "ti,am3-dpll-clock";
 		clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
 		reg = <0x2e20>, <0x2e24>, <0x2e2c>;
 	};
 
-	dpll_disp_m2_ck: dpll_disp_m2_ck {
+	dpll_disp_m2_ck: dpll_disp_m2_ck@2e30 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_disp_ck>;
@@ -304,14 +304,14 @@
 		ti,set-rate-parent;
 	};
 
-	dpll_per_ck: dpll_per_ck {
+	dpll_per_ck: dpll_per_ck@2de0 {
 		#clock-cells = <0>;
 		compatible = "ti,am3-dpll-j-type-clock";
 		clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
 		reg = <0x2de0>, <0x2de4>, <0x2dec>;
 	};
 
-	dpll_per_m2_ck: dpll_per_m2_ck {
+	dpll_per_m2_ck: dpll_per_m2_ck@2df0 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_ck>;
@@ -354,7 +354,7 @@
 		clock-div = <732>;
 	};
 
-	clkdiv32k_ick: clkdiv32k_ick {
+	clkdiv32k_ick: clkdiv32k_ick@2a38 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&clkdiv32k_ck>;
@@ -370,7 +370,7 @@
 		clock-div = <1>;
 	};
 
-	pruss_ocp_gclk: pruss_ocp_gclk {
+	pruss_ocp_gclk: pruss_ocp_gclk@4248 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sysclk_div>, <&dpll_disp_m2_ck>;
@@ -383,56 +383,56 @@
 		clock-frequency = <32768>;
 	};
 
-	timer1_fck: timer1_fck {
+	timer1_fck: timer1_fck@4200 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin_ck>, <&clkdiv32k_ick>, <&tclkin_ck>, <&clk_rc32k_ck>, <&clk_32768_ck>, <&clk_32k_tpm_ck>;
 		reg = <0x4200>;
 	};
 
-	timer2_fck: timer2_fck {
+	timer2_fck: timer2_fck@4204 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>;
 		reg = <0x4204>;
 	};
 
-	timer3_fck: timer3_fck {
+	timer3_fck: timer3_fck@4208 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>;
 		reg = <0x4208>;
 	};
 
-	timer4_fck: timer4_fck {
+	timer4_fck: timer4_fck@420c {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>;
 		reg = <0x420c>;
 	};
 
-	timer5_fck: timer5_fck {
+	timer5_fck: timer5_fck@4210 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>;
 		reg = <0x4210>;
 	};
 
-	timer6_fck: timer6_fck {
+	timer6_fck: timer6_fck@4214 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>;
 		reg = <0x4214>;
 	};
 
-	timer7_fck: timer7_fck {
+	timer7_fck: timer7_fck@4218 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>;
 		reg = <0x4218>;
 	};
 
-	wdt1_fck: wdt1_fck {
+	wdt1_fck: wdt1_fck@422c {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&clk_rc32k_ck>, <&clkdiv32k_ick>;
@@ -487,14 +487,14 @@
 		clock-div = <2>;
 	};
 
-	cpsw_cpts_rft_clk: cpsw_cpts_rft_clk {
+	cpsw_cpts_rft_clk: cpsw_cpts_rft_clk@4238 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sysclk_div>, <&dpll_core_m5_ck>, <&dpll_disp_m2_ck>;
 		reg = <0x4238>;
 	};
 
-	dpll_clksel_mac_clk: dpll_clksel_mac_clk {
+	dpll_clksel_mac_clk: dpll_clksel_mac_clk@4234 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_m5_ck>;
@@ -509,14 +509,14 @@
 		clock-frequency = <32768>;
 	};
 
-	gpio0_dbclk_mux_ck: gpio0_dbclk_mux_ck {
+	gpio0_dbclk_mux_ck: gpio0_dbclk_mux_ck@4240 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&clk_rc32k_ck>, <&clk_32768_ck>, <&clkdiv32k_ick>, <&clk_32k_mosc_ck>, <&clk_32k_tpm_ck>;
 		reg = <0x4240>;
 	};
 
-	gpio0_dbclk: gpio0_dbclk {
+	gpio0_dbclk: gpio0_dbclk@2b68 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&gpio0_dbclk_mux_ck>;
@@ -524,7 +524,7 @@
 		reg = <0x2b68>;
 	};
 
-	gpio1_dbclk: gpio1_dbclk {
+	gpio1_dbclk: gpio1_dbclk@8c78 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&clkdiv32k_ick>;
@@ -532,7 +532,7 @@
 		reg = <0x8c78>;
 	};
 
-	gpio2_dbclk: gpio2_dbclk {
+	gpio2_dbclk: gpio2_dbclk@8c80 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&clkdiv32k_ick>;
@@ -540,7 +540,7 @@
 		reg = <0x8c80>;
 	};
 
-	gpio3_dbclk: gpio3_dbclk {
+	gpio3_dbclk: gpio3_dbclk@8c88 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&clkdiv32k_ick>;
@@ -548,7 +548,7 @@
 		reg = <0x8c88>;
 	};
 
-	gpio4_dbclk: gpio4_dbclk {
+	gpio4_dbclk: gpio4_dbclk@8c90 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&clkdiv32k_ick>;
@@ -556,7 +556,7 @@
 		reg = <0x8c90>;
 	};
 
-	gpio5_dbclk: gpio5_dbclk {
+	gpio5_dbclk: gpio5_dbclk@8c98 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&clkdiv32k_ick>;
@@ -572,7 +572,7 @@
 		clock-div = <2>;
 	};
 
-	gfx_fclk_clksel_ck: gfx_fclk_clksel_ck {
+	gfx_fclk_clksel_ck: gfx_fclk_clksel_ck@423c {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sysclk_div>, <&dpll_per_m2_ck>;
@@ -580,7 +580,7 @@
 		reg = <0x423c>;
 	};
 
-	gfx_fck_div_ck: gfx_fck_div_ck {
+	gfx_fck_div_ck: gfx_fck_div_ck@423c {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&gfx_fclk_clksel_ck>;
@@ -588,7 +588,7 @@
 		ti,max-div = <2>;
 	};
 
-	disp_clk: disp_clk {
+	disp_clk: disp_clk@4244 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&dpll_disp_m2_ck>, <&dpll_core_m5_ck>, <&dpll_per_m2_ck>;
@@ -596,14 +596,14 @@
 		ti,set-rate-parent;
 	};
 
-	dpll_extdev_ck: dpll_extdev_ck {
+	dpll_extdev_ck: dpll_extdev_ck@2e60 {
 		#clock-cells = <0>;
 		compatible = "ti,am3-dpll-clock";
 		clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
 		reg = <0x2e60>, <0x2e64>, <0x2e6c>;
 	};
 
-	dpll_extdev_m2_ck: dpll_extdev_m2_ck {
+	dpll_extdev_m2_ck: dpll_extdev_m2_ck@2e70 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_extdev_ck>;
@@ -614,14 +614,14 @@
 		ti,invert-autoidle-bit;
 	};
 
-	mux_synctimer32k_ck: mux_synctimer32k_ck {
+	mux_synctimer32k_ck: mux_synctimer32k_ck@4230 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&clk_32768_ck>, <&clk_32k_tpm_ck>, <&clkdiv32k_ick>;
 		reg = <0x4230>;
 	};
 
-	synctimer_32kclk: synctimer_32kclk {
+	synctimer_32kclk: synctimer_32kclk@2a30 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&mux_synctimer32k_ck>;
@@ -629,28 +629,28 @@
 		reg = <0x2a30>;
 	};
 
-	timer8_fck: timer8_fck {
+	timer8_fck: timer8_fck@421c {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>, <&clk_32k_tpm_ck>;
 		reg = <0x421c>;
 	};
 
-	timer9_fck: timer9_fck {
+	timer9_fck: timer9_fck@4220 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>, <&clk_32k_tpm_ck>;
 		reg = <0x4220>;
 	};
 
-	timer10_fck: timer10_fck {
+	timer10_fck: timer10_fck@4224 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>, <&clk_32k_tpm_ck>;
 		reg = <0x4224>;
 	};
 
-	timer11_fck: timer11_fck {
+	timer11_fck: timer11_fck@4228 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&tclkin_ck>, <&sys_clkin_ck>, <&clkdiv32k_ick>, <&clk_32k_tpm_ck>;
@@ -679,7 +679,7 @@
 		clocks = <&dpll_ddr_ck>;
 	};
 
-	dpll_ddr_m4_ck: dpll_ddr_m4_ck {
+	dpll_ddr_m4_ck: dpll_ddr_m4_ck@2db8 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_ddr_x2_ck>;
@@ -690,7 +690,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_per_clkdcoldo: dpll_per_clkdcoldo {
+	dpll_per_clkdcoldo: dpll_per_clkdcoldo@2e14 {
 		#clock-cells = <0>;
 		compatible = "ti,fixed-factor-clock";
 		clocks = <&dpll_per_ck>;
@@ -701,7 +701,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dll_aging_clk_div: dll_aging_clk_div {
+	dll_aging_clk_div: dll_aging_clk_div@4250 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&sys_clkin_ck>;
@@ -733,14 +733,14 @@
 		clock-div = <2>;
 	};
 
-	usbphy_32khz_clkmux: usbphy_32khz_clkmux {
+	usbphy_32khz_clkmux: usbphy_32khz_clkmux@4260 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&clk_32768_ck>, <&clk_32k_tpm_ck>;
 		reg = <0x4260>;
 	};
 
-	usb_phy0_always_on_clk32k: usb_phy0_always_on_clk32k {
+	usb_phy0_always_on_clk32k: usb_phy0_always_on_clk32k@2a40 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&usbphy_32khz_clkmux>;
@@ -748,7 +748,7 @@
 		reg = <0x2a40>;
 	};
 
-	usb_phy1_always_on_clk32k: usb_phy1_always_on_clk32k {
+	usb_phy1_always_on_clk32k: usb_phy1_always_on_clk32k@2a48 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&usbphy_32khz_clkmux>;
@@ -756,7 +756,7 @@
 		reg = <0x2a48>;
 	};
 
-	usb_otg_ss0_refclk960m: usb_otg_ss0_refclk960m {
+	usb_otg_ss0_refclk960m: usb_otg_ss0_refclk960m@8a60 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll_per_clkdcoldo>;
@@ -764,11 +764,65 @@
 		reg = <0x8a60>;
 	};
 
-	usb_otg_ss1_refclk960m: usb_otg_ss1_refclk960m {
+	usb_otg_ss1_refclk960m: usb_otg_ss1_refclk960m@8a68 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll_per_clkdcoldo>;
 		ti,bit-shift = <8>;
 		reg = <0x8a68>;
 	};
+
+	clkout1_osc_div_ck: clkout1_osc_div_ck {
+		#clock-cells = <0>;
+		compatible = "ti,divider-clock";
+		clocks = <&sys_clkin_ck>;
+		ti,bit-shift = <20>;
+		ti,max-div = <4>;
+		reg = <0x4100>;
+	};
+
+	clkout1_src2_mux_ck: clkout1_src2_mux_ck {
+		#clock-cells = <0>;
+		compatible = "ti,mux-clock";
+		clocks = <&clk_rc32k_ck>, <&sysclk_div>, <&dpll_ddr_m2_ck>,
+			 <&dpll_per_m2_ck>, <&dpll_disp_m2_ck>,
+			 <&dpll_mpu_m2_ck>;
+		reg = <0x4100>;
+	};
+
+	clkout1_src2_pre_div_ck: clkout1_src2_pre_div_ck {
+		#clock-cells = <0>;
+		compatible = "ti,divider-clock";
+		clocks = <&clkout1_src2_mux_ck>;
+		ti,bit-shift = <4>;
+		ti,max-div = <8>;
+		reg = <0x4100>;
+	};
+
+	clkout1_src2_post_div_ck: clkout1_src2_post_div_ck {
+		#clock-cells = <0>;
+		compatible = "ti,divider-clock";
+		clocks = <&clkout1_src2_pre_div_ck>;
+		ti,bit-shift = <8>;
+		ti,max-div = <32>;
+		ti,index-power-of-two;
+		reg = <0x4100>;
+	};
+
+	clkout1_mux_ck: clkout1_mux_ck {
+		#clock-cells = <0>;
+		compatible = "ti,mux-clock";
+		clocks = <&clkout1_osc_div_ck>, <&clk_rc32k_ck>,
+			 <&clkout1_src2_post_div_ck>, <&dpll_extdev_m2_ck>;
+		ti,bit-shift = <16>;
+		reg = <0x4100>;
+	};
+
+	clkout1_ck: clkout1_ck {
+		#clock-cells = <0>;
+		compatible = "ti,gate-clock";
+		clocks = <&clkout1_mux_ck>;
+		ti,bit-shift = <23>;
+		reg = <0x4100>;
+	};
 };
diff --git a/arch/arm/boot/dts/am572x-idk.dts b/arch/arm/boot/dts/am572x-idk.dts
new file mode 100644
index 0000000..e3acb99
--- /dev/null
+++ b/arch/arm/boot/dts/am572x-idk.dts
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/dts-v1/;
+
+#include "dra74x.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include "am57xx-idk-common.dtsi"
+
+/ {
+	model = "TI AM5728 IDK";
+	compatible = "ti,am5728-idk", "ti,am5728", "ti,dra742", "ti,dra74",
+		     "ti,dra7";
+
+	memory {
+		device_type = "memory";
+		reg = <0x0 0x80000000 0x0 0x80000000>;
+	};
+
+	extcon_usb2: extcon_usb2 {
+		compatible = "linux,extcon-usb-gpio";
+		id-gpio = <&gpio3 16 GPIO_ACTIVE_HIGH>;
+	};
+
+	status-leds {
+		compatible = "gpio-leds";
+		cpu0-led {
+			label = "status0:red:cpu0";
+			gpios = <&gpio4 0 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+			linux,default-trigger = "cpu0";
+		};
+
+		usr0-led {
+			label = "status0:green:usr";
+			gpios = <&gpio3 11 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		heartbeat-led {
+			label = "status0:blue:heartbeat";
+			gpios = <&gpio3 12 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+			linux,default-trigger = "heartbeat";
+		};
+
+		cpu1-led {
+			label = "status1:red:cpu1";
+			gpios = <&gpio3 10 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+			linux,default-trigger = "cpu1";
+		};
+
+		usr1-led {
+			label = "status1:green:usr";
+			gpios = <&gpio7 23 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		mmc0-led {
+			label = "status1:blue:mmc0";
+			gpios = <&gpio7 22 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+			linux,default-trigger = "mmc0";
+		};
+	};
+};
+
+&omap_dwc3_2 {
+	extcon = <&extcon_usb2>;
+};
+
+&mmc1 {
+	status = "okay";
+	vmmc-supply = <&v3_3d>;
+	vmmc_aux-supply = <&ldo1_reg>;
+	bus-width = <4>;
+	cd-gpios = <&gpio6 27 0>; /* gpio 219 */
+};
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index 0a5fc5d..81d6c30 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -8,6 +8,7 @@
 /dts-v1/;
 
 #include "dra74x.dtsi"
+#include "am57xx-commercial-grade.dtsi"
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 
@@ -99,13 +100,6 @@
 		#cooling-cells = <2>;
 	};
 
-	extcon_usb1: extcon_usb1 {
-		compatible = "linux,extcon-usb-gpio";
-		id-gpio = <&gpio7 25 GPIO_ACTIVE_HIGH>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&extcon_usb1_pins>;
-	};
-
 	hdmi0: connector {
 		compatible = "hdmi-connector";
 		label = "hdmi";
@@ -151,7 +145,7 @@
 		};
 	};
 
-	sound0: sound@0 {
+	sound0: sound0 {
 		compatible = "simple-audio-card";
 		simple-audio-card,name = "BeagleBoard-X15";
 		simple-audio-card,widgets =
@@ -173,8 +167,6 @@
 
 		sound0_master: simple-audio-card,codec {
 			sound-dai = <&tlv320aic3104>;
-			assigned-clocks = <&clkoutmux2_clk_mux>;
-			assigned-clock-parents = <&sys_clk2_dclk_div>;
 			clocks = <&clkout2_clk>;
 		};
 	};
@@ -349,12 +341,6 @@
 		>;
 	};
 
-	extcon_usb1_pins: extcon_usb1_pins {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x37ec, PIN_INPUT_PULLUP | MUX_MODE14) /* uart1_rtsn.gpio7_25 */
-		>;
-	};
-
 	tpd12s015_pins: pinmux_tpd12s015_pins {
 		pinctrl-single,pins = <
 			DRA7XX_CORE_IOPAD(0x37b0, PIN_OUTPUT | MUX_MODE14)		/* gpio7_10 CT_CP_HPD */
@@ -440,7 +426,7 @@
 					/* VDD_DSPEVE, VDD_IVA, VDD_GPU */
 					regulator-name = "smps45";
 					regulator-min-microvolt = < 850000>;
-					regulator-max-microvolt = <1150000>;
+					regulator-max-microvolt = <1250000>;
 					regulator-always-on;
 					regulator-boot-on;
 				};
@@ -449,7 +435,7 @@
 					/* VDD_CORE */
 					regulator-name = "smps6";
 					regulator-min-microvolt = <850000>;
-					regulator-max-microvolt = <1030000>;
+					regulator-max-microvolt = <1150000>;
 					regulator-always-on;
 					regulator-boot-on;
 				};
@@ -584,6 +570,9 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&clkout2_pins_default>;
 		pinctrl-1 = <&clkout2_pins_sleep>;
+		assigned-clocks = <&clkoutmux2_clk_mux>;
+		assigned-clock-parents = <&sys_clk2_dclk_div>;
+
 		status = "okay";
 		adc-settle-ms = <40>;
 
@@ -706,10 +695,6 @@
 	pinctrl-0 = <&usb1_pins>;
 };
 
-&omap_dwc3_1 {
-	extcon = <&extcon_usb1>;
-};
-
 &omap_dwc3_2 {
 	extcon = <&extcon_usb2>;
 };
@@ -812,6 +797,8 @@
 	serial-dir = <	/* 0: INACTIVE, 1: TX, 2: RX */
 		1 2 0 0
 	>;
+	tx-num-evt = <32>;
+	rx-num-evt = <32>;
 };
 
 &mailbox5 {
diff --git a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
index 14f912a1..378b142 100644
--- a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
+++ b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
@@ -51,7 +51,7 @@
 		regulator-max-microvolt = <3300000>;
 	};
 
-	sound0: sound@0 {
+	sound0: sound0 {
 		compatible = "simple-audio-card";
 		simple-audio-card,name = "CL-SOM-AM57x-Sound-Card";
 		simple-audio-card,format = "i2s";
diff --git a/arch/arm/boot/dts/am57xx-commercial-grade.dtsi b/arch/arm/boot/dts/am57xx-commercial-grade.dtsi
new file mode 100644
index 0000000..c183654
--- /dev/null
+++ b/arch/arm/boot/dts/am57xx-commercial-grade.dtsi
@@ -0,0 +1,23 @@
+&cpu_alert0 {
+	temperature = <80000>; /* milliCelsius */
+};
+
+&cpu_crit {
+	temperature = <90000>; /* milliCelsius */
+};
+
+&gpu_crit {
+	temperature = <90000>; /* milliCelsius */
+};
+
+&core_crit {
+	temperature = <90000>; /* milliCelsius */
+};
+
+&dspeve_crit {
+	temperature = <90000>; /* milliCelsius */
+};
+
+&iva_crit {
+	temperature = <90000>; /* milliCelsius */
+};
diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi
new file mode 100644
index 0000000..b01a594
--- /dev/null
+++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "am57xx-industrial-grade.dtsi"
+
+/ {
+	aliases {
+		rtc0 = &tps659038_rtc;
+		rtc1 = &rtc;
+	};
+
+	vmain: fixedregulator-vmain {
+		compatible = "regulator-fixed";
+		regulator-name = "VMAIN";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-always-on;
+		regulator-boot-on;
+	};
+
+	v3_3d: fixedregulator-v3_3d {
+		compatible = "regulator-fixed";
+		regulator-name = "V3_3D";
+		vin-supply = <&smps9_reg>;
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-always-on;
+		regulator-boot-on;
+	};
+
+	vtt_fixed: fixedregulator-vtt {
+		/* TPS51200 */
+		compatible = "regulator-fixed";
+		regulator-name = "vtt_fixed";
+		vin-supply = <&v3_3d>;
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-always-on;
+		regulator-boot-on;
+	};
+};
+
+&i2c1 {
+	status = "okay";
+	clock-frequency = <400000>;
+
+	tps659038: tps659038@58 {
+		compatible = "ti,tps659038";
+		reg = <0x58>;
+		interrupts-extended = <&gpio6 16 IRQ_TYPE_LEVEL_HIGH
+			       &dra7_pmx_core 0x418>;
+		#interrupt-cells = <2>;
+		interrupt-controller;
+		ti,system-power-controller;
+
+		tps659038_pmic {
+			compatible = "ti,tps659038-pmic";
+			regulators {
+				smps12_reg: smps12 {
+					/* VDD_MPU */
+					vin-supply = <&vmain>;
+					regulator-name = "smps12";
+					regulator-min-microvolt = <850000>;
+					regulator-max-microvolt = <1250000>;
+					regulator-always-on;
+					regulator-boot-on;
+				};
+
+				smps3_reg: smps3 {
+					/* VDD_DDR EMIF1 EMIF2 */
+					vin-supply = <&vmain>;
+					regulator-name = "smps3";
+					regulator-min-microvolt = <1350000>;
+					regulator-max-microvolt = <1350000>;
+					regulator-always-on;
+					regulator-boot-on;
+				};
+
+				smps45_reg: smps45 {
+					/* VDD_DSPEVE on AM572 */
+					/* VDD_IVA + VDD_DSP on AM571 */
+					vin-supply = <&vmain>;
+					regulator-name = "smps45";
+					regulator-min-microvolt = <850000>;
+					regulator-max-microvolt = <1250000>;
+					regulator-always-on;
+					regulator-boot-on;
+				};
+
+				smps6_reg: smps6 {
+					/* VDD_GPU */
+					vin-supply = <&vmain>;
+					regulator-name = "smps6";
+					regulator-min-microvolt = <850000>;
+					regulator-max-microvolt = <1250000>;
+					regulator-always-on;
+					regulator-boot-on;
+				};
+
+				smps7_reg: smps7 {
+					/* VDD_CORE */
+					vin-supply = <&vmain>;
+					regulator-name = "smps7";
+					regulator-min-microvolt = <850000>;
+					regulator-max-microvolt = <1150000>;
+					regulator-always-on;
+					regulator-boot-on;
+				};
+
+				smps8_reg: smps8 {
+					/* 5728 - VDD_IVAHD */
+					/* 5718 - N.C. test point */
+					vin-supply = <&vmain>;
+					regulator-name = "smps8";
+				};
+
+				smps9_reg: smps9 {
+					/* VDD_3_3D */
+					vin-supply = <&vmain>;
+					regulator-name = "smps9";
+					regulator-min-microvolt = <3300000>;
+					regulator-max-microvolt = <3300000>;
+					regulator-always-on;
+					regulator-boot-on;
+				};
+
+				ldo1_reg: ldo1 {
+					/* VDDSHV8 - VSDMMC  */
+					/* NOTE: on rev 1.3a, data supply */
+					vin-supply = <&vmain>;
+					regulator-name = "ldo1";
+					regulator-min-microvolt = <1800000>;
+					regulator-max-microvolt = <3300000>;
+					regulator-boot-on;
+					regulator-always-on;
+				};
+
+				ldo2_reg: ldo2 {
+					/* VDDSH18V */
+					vin-supply = <&vmain>;
+					regulator-name = "ldo2";
+					regulator-min-microvolt = <1800000>;
+					regulator-max-microvolt = <1800000>;
+					regulator-always-on;
+					regulator-boot-on;
+				};
+
+				ldo3_reg: ldo3 {
+					/* R1.3a 572x V1_8PHY_LDO3: USB, SATA */
+					vin-supply = <&vmain>;
+					regulator-name = "ldo3";
+					regulator-min-microvolt = <1800000>;
+					regulator-max-microvolt = <1800000>;
+					regulator-always-on;
+					regulator-boot-on;
+				};
+
+				ldo4_reg: ldo4 {
+					/* R1.3a 572x V1_8PHY_LDO4: PCIE, HDMI*/
+					vin-supply = <&vmain>;
+					regulator-name = "ldo4";
+					regulator-min-microvolt = <1800000>;
+					regulator-max-microvolt = <1800000>;
+					regulator-always-on;
+					regulator-boot-on;
+				};
+
+				/* LDO5-8 unused */
+
+				ldo9_reg: ldo9 {
+					/* VDD_RTC  */
+					vin-supply = <&vmain>;
+					regulator-name = "ldo9";
+					regulator-min-microvolt = <840000>;
+					regulator-max-microvolt = <1160000>;
+					regulator-always-on;
+					regulator-boot-on;
+				};
+
+				ldoln_reg: ldoln {
+					/* VDDA_1V8_PLL */
+					vin-supply = <&vmain>;
+					regulator-name = "ldoln";
+					regulator-min-microvolt = <1800000>;
+					regulator-max-microvolt = <1800000>;
+					regulator-always-on;
+					regulator-boot-on;
+				};
+
+				ldousb_reg: ldousb {
+					/* VDDA_3V_USB: VDDA_USBHS33 */
+					vin-supply = <&vmain>;
+					regulator-name = "ldousb";
+					regulator-min-microvolt = <3300000>;
+					regulator-max-microvolt = <3300000>;
+					regulator-always-on;
+					regulator-boot-on;
+				};
+
+				ldortc_reg: ldortc {
+					/* VDDA_RTC  */
+					vin-supply = <&vmain>;
+					regulator-name = "ldortc";
+					regulator-min-microvolt = <1800000>;
+					regulator-max-microvolt = <1800000>;
+					regulator-always-on;
+					regulator-boot-on;
+				};
+
+				regen1: regen1 {
+					/* VDD_3V3_ON */
+					regulator-name = "regen1";
+					regulator-boot-on;
+					regulator-always-on;
+				};
+
+				regen2: regen2 {
+					/* Needed for PMIC internal resource */
+					regulator-name = "regen2";
+					regulator-boot-on;
+					regulator-always-on;
+				};
+			};
+		};
+
+		tps659038_rtc: tps659038_rtc {
+			compatible = "ti,palmas-rtc";
+			interrupt-parent = <&tps659038>;
+			interrupts = <8 IRQ_TYPE_EDGE_FALLING>;
+			wakeup-source;
+		};
+
+		tps659038_pwr_button: tps659038_pwr_button {
+			compatible = "ti,palmas-pwrbutton";
+			interrupt-parent = <&tps659038>;
+			interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
+			wakeup-source;
+			ti,palmas-long-press-seconds = <12>;
+		};
+
+		tps659038_gpio: tps659038_gpio {
+			compatible = "ti,palmas-gpio";
+			gpio-controller;
+			#gpio-cells = <2>;
+		};
+	};
+};
+
+&uart3 {
+	status = "okay";
+	interrupts-extended = <&crossbar_mpu GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH
+			       &dra7_pmx_core 0x248>;
+};
+
+&rtc {
+	status = "okay";
+	ext-clk-src;
+};
+
+&mac {
+	status = "okay";
+	dual_emac;
+};
+
+&cpsw_emac0 {
+	phy_id = <&davinci_mdio>, <0>;
+	phy-mode = "rgmii";
+	dual_emac_res_vlan = <1>;
+};
+
+&cpsw_emac1 {
+	phy_id = <&davinci_mdio>, <1>;
+	phy-mode = "rgmii";
+	dual_emac_res_vlan = <2>;
+};
+
+&usb2_phy1 {
+	phy-supply = <&ldousb_reg>;
+};
+
+&usb2_phy2 {
+	phy-supply = <&ldousb_reg>;
+};
+
+&usb1 {
+	dr_mode = "host";
+};
+
+&usb2 {
+	dr_mode = "otg";
+};
+
+&mmc2 {
+	status = "okay";
+	vmmc-supply = <&v3_3d>;
+	bus-width = <8>;
+	ti,non-removable;
+	max-frequency = <96000000>;
+};
diff --git a/arch/arm/boot/dts/am57xx-industrial-grade.dtsi b/arch/arm/boot/dts/am57xx-industrial-grade.dtsi
new file mode 100644
index 0000000..70c8c4b
--- /dev/null
+++ b/arch/arm/boot/dts/am57xx-industrial-grade.dtsi
@@ -0,0 +1,23 @@
+&cpu_alert0 {
+	temperature = <90000>; /* milliCelsius */
+};
+
+&cpu_crit {
+	temperature = <105000>; /* milliCelsius */
+};
+
+&gpu_crit {
+	temperature = <105000>; /* milliCelsius */
+};
+
+&core_crit {
+	temperature = <105000>; /* milliCelsius */
+};
+
+&dspeve_crit {
+	temperature = <105000>; /* milliCelsius */
+};
+
+&iva_crit {
+	temperature = <105000>; /* milliCelsius */
+};
diff --git a/arch/arm/boot/dts/arm-realview-eb-11mp-revb.dts b/arch/arm/boot/dts/arm-realview-eb-11mp-revb.dts
new file mode 100644
index 0000000..e68527b
--- /dev/null
+++ b/arch/arm/boot/dts/arm-realview-eb-11mp-revb.dts
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2016 Linaro Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "arm-realview-eb-11mp.dts"
+
+/ {
+	model = "ARM RealView Emulation Baseboard with ARM11MPCore Rev B";
+};
+
+/*
+ * The revision B has a distinctly different layout of the syscon, so
+ * append a specific compatible-string.
+ */
+&syscon {
+	compatible = "arm,realview-eb11mp-revb-syscon", "arm,realview-eb-syscon", "syscon", "simple-mfd";
+};
+
+&intc {
+	reg = <0x10101000 0x1000>,
+	      <0x10100100 0x100>;
+};
+
+&L2 {
+	reg = <0x10102000 0x1000>;
+};
+
+&scu {
+	reg = <0x10100000 0x100>;
+};
+
+&twd_timer {
+	reg = <0x10100600 0x20>;
+};
+
+&twd_wdog {
+	reg = <0x10100620 0x20>;
+};
+
+/*
+ * On revision B, we cannot reach the secondary interrupt
+ * controller, as a result, some peripherals that are dependent
+ * on their IRQ cannot be reached, so disable them.
+ */
+&intc_second {
+	status = "disabled";
+};
+
+&gpio0 {
+	status = "disabled";
+};
+
+&gpio1 {
+	status = "disabled";
+};
+
+&gpio2 {
+	status = "disabled";
+};
+
+&serial2 {
+	status = "disabled";
+};
+
+&serial3 {
+	status = "disabled";
+};
+
+&ssp {
+	status = "disabled";
+};
+
+&wdog {
+	status = "disabled";
+};
diff --git a/arch/arm/boot/dts/arm-realview-eb-11mp.dts b/arch/arm/boot/dts/arm-realview-eb-11mp.dts
new file mode 100644
index 0000000..87ff602
--- /dev/null
+++ b/arch/arm/boot/dts/arm-realview-eb-11mp.dts
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2016 Linaro Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "arm-realview-eb-mp.dtsi"
+
+/ {
+	model = "ARM RealView Emulation Baseboard with ARM11MPCore Rev C";
+	arm,hbi = <0x146>;
+
+	/*
+	 * This is the ARM11 MPCore tile (HBI-0146) used with the RealView EB.
+	 * Reference: ARM DUI 0318F
+	 *
+	 * To run this machine with QEMU, specify the following:
+	 * qemu-system-arm -M realview-eb-mpcore -smp cpus=4
+	 */
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		enable-method = "arm,realview-smp";
+
+		MP11_0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,arm11mpcore";
+			reg = <0>;
+			next-level-cache = <&L2>;
+		};
+
+		MP11_1: cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,arm11mpcore";
+			reg = <1>;
+			next-level-cache = <&L2>;
+		};
+
+		MP11_2: cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,arm11mpcore";
+			reg = <2>;
+			next-level-cache = <&L2>;
+		};
+
+		MP11_3: cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,arm11mpcore";
+			reg = <3>;
+			next-level-cache = <&L2>;
+		};
+	};
+};
+
+&pmu {
+	interrupt-affinity = <&MP11_0>, <&MP11_1>, <&MP11_2>, <&MP11_3>;
+};
diff --git a/arch/arm/boot/dts/arm-realview-eb-a9mp.dts b/arch/arm/boot/dts/arm-realview-eb-a9mp.dts
new file mode 100644
index 0000000..967684b
--- /dev/null
+++ b/arch/arm/boot/dts/arm-realview-eb-a9mp.dts
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2016 Linaro Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "arm-realview-eb-mp.dtsi"
+
+/ {
+	model = "ARM RealView EB Cortex A9 MPCore";
+
+	/*
+	 * This is the Cortex A9 MPCore tile used with the
+	 * RealView EB.
+	 */
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		enable-method = "arm,realview-smp";
+
+		A9_0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a9";
+			reg = <0>;
+			next-level-cache = <&L2>;
+		};
+
+		A9_1: cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a9";
+			reg = <1>;
+			next-level-cache = <&L2>;
+		};
+
+		A9_2: cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a9";
+			reg = <2>;
+			next-level-cache = <&L2>;
+		};
+
+		A9_3: cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a9";
+			reg = <3>;
+			next-level-cache = <&L2>;
+		};
+	};
+};
+
+&pmu {
+	interrupt-affinity = <&A9_0>, <&A9_1>, <&A9_2>, <&A9_3>;
+};
diff --git a/arch/arm/boot/dts/arm-realview-eb-mp.dtsi b/arch/arm/boot/dts/arm-realview-eb-mp.dtsi
new file mode 100644
index 0000000..7b8d90b
--- /dev/null
+++ b/arch/arm/boot/dts/arm-realview-eb-mp.dtsi
@@ -0,0 +1,225 @@
+/*
+ * Copyright 2016 Linaro Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/gpio/gpio.h>
+#include "arm-realview-eb.dtsi"
+
+/*
+ * This is the common include file for all MPCore variants of the
+ * Evaluation Baseboard, i.e. ARM11MPCore, ARM11MPCore Revision B
+ * and Cortex-A9 MPCore.
+ */
+/ {
+	soc {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "arm,realview-eb-soc", "simple-bus";
+		regmap = <&syscon>;
+		ranges;
+
+		/* Primary interrupt controller in the test chip */
+		intc: interrupt-controller@1f000100 {
+			compatible = "arm,eb11mp-gic";
+			#interrupt-cells = <3>;
+			#address-cells = <1>;
+			interrupt-controller;
+			reg = <0x1f001000 0x1000>,
+			      <0x1f000100 0x100>;
+		};
+
+		/* Secondary interrupt controller on the FPGA */
+		intc_second: interrupt-controller@10040000 {
+			compatible = "arm,pl390";
+			#interrupt-cells = <3>;
+			#address-cells = <1>;
+			interrupt-controller;
+			reg = <0x10041000 0x1000>,
+			      <0x10040000 0x100>;
+			interrupt-parent = <&intc>;
+			interrupts = <0 10 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		L2: l2-cache {
+			compatible = "arm,l220-cache";
+			reg = <0x1f002000 0x1000>;
+			interrupt-parent = <&intc>;
+			interrupts = <0 29 IRQ_TYPE_LEVEL_HIGH>,
+			     <0 30 IRQ_TYPE_LEVEL_HIGH>,
+			     <0 31 IRQ_TYPE_LEVEL_HIGH>;
+			cache-unified;
+			cache-level = <2>;
+			/*
+			 * Override default cache size, sets and
+			 * associativity as these may be erroneously set
+			 * up by boot loader(s), probably for safety
+			 * since th outer sync operation can cause the
+			 * cache to hang unless disabled.
+			 */
+			cache-size = <1048576>; // 1MB
+			cache-sets = <4096>;
+			cache-line-size = <32>;
+			arm,shared-override;
+			arm,parity-enable;
+			arm,outer-sync-disable;
+		};
+
+		scu: scu@1f000000 {
+			compatible = "arm,arm11mp-scu";
+			reg = <0x1f000000 0x100>;
+		};
+
+		twd_timer: timer@1f000600 {
+			compatible = "arm,arm11mp-twd-timer";
+			reg = <0x1f000600 0x20>;
+			interrupt-parent = <&intc>;
+			interrupts = <1 13 0xf04>;
+		};
+
+		twd_wdog: watchdog@1f000620 {
+			compatible = "arm,arm11mp-twd-wdt";
+			reg = <0x1f000620 0x20>;
+			interrupt-parent = <&intc>;
+			interrupts = <1 14 0xf04>;
+		};
+
+		/* PMU with one IRQ line per core */
+		pmu: pmu@0 {
+			compatible = "arm,arm11mpcore-pmu";
+			interrupt-parent = <&intc>;
+			interrupts = <0 17 IRQ_TYPE_LEVEL_HIGH>,
+			     <0 18 IRQ_TYPE_LEVEL_HIGH>,
+			     <0 19 IRQ_TYPE_LEVEL_HIGH>,
+			     <0 20 IRQ_TYPE_LEVEL_HIGH>;
+		};
+	};
+};
+
+/*
+ * This adapts all the peripherals to the interrupt routing
+ * to the GIC on the core tile.
+ */
+
+&ethernet {
+	interrupt-parent = <&intc>;
+	interrupts = <0 9 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&usb {
+	interrupt-parent = <&intc>;
+	interrupts = <0 3 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&aaci {
+	interrupt-parent = <&intc>;
+	interrupts = <0 0 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&mmc {
+	interrupt-parent = <&intc>;
+	interrupts = <0 14 IRQ_TYPE_LEVEL_HIGH>,
+			<0 15 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&kmi0 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 7 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&kmi1 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&charlcd {
+	interrupt-parent = <&intc>;
+	interrupts = <0  IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&serial0 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 4 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&serial1 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 5 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&timer01 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 1 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&timer23 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&rtc {
+	interrupt-parent = <&intc>;
+	interrupts = <0 6 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+/*
+ * On revision A, these peripherals does not have their IRQ lines
+ * routed to the core tile, but they can be reached on the secondary
+ * GIC.
+ */
+&gpio0 {
+	interrupt-parent = <&intc_second>;
+	interrupts = <0 6 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&gpio1 {
+	interrupt-parent = <&intc_second>;
+	interrupts = <0 7 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&gpio2 {
+	interrupt-parent = <&intc_second>;
+	interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&serial2 {
+	interrupt-parent = <&intc_second>;
+	interrupts = <0 14 IRQ_TYPE_LEVEL_HIGH>;
+	status = "okay";
+};
+
+&serial3 {
+	interrupt-parent = <&intc_second>;
+	interrupts = <0 15 IRQ_TYPE_LEVEL_HIGH>;
+	status = "okay";
+};
+
+&ssp {
+	interrupt-parent = <&intc_second>;
+	interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>;
+	status = "okay";
+};
+
+&wdog {
+	interrupt-parent = <&intc_second>;
+	interrupts = <0 0 IRQ_TYPE_LEVEL_HIGH>;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/arm-realview-eb.dts b/arch/arm/boot/dts/arm-realview-eb.dts
new file mode 100644
index 0000000..1543107
--- /dev/null
+++ b/arch/arm/boot/dts/arm-realview-eb.dts
@@ -0,0 +1,166 @@
+/*
+ * Copyright 2016 Linaro Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/gpio/gpio.h>
+#include "arm-realview-eb.dtsi"
+
+/ {
+	model = "ARM RealView Emulation Baseboard";
+	compatible = "arm,realview-eb";
+	arm,hbi = <0x140>;
+
+	/*
+	 * This is the core tile with the CPU and GIC etc for the
+	 * ARM926EJ-S, ARM1136, ARM1176 that does not have L2 cache
+	 * or PMU.
+	 *
+	 * To run this machine with QEMU, specify the following:
+	 * qemu-system-arm -M realview-eb
+	 * Unless specified, QEMU will emulate an ARM926EJ-S core tile.
+	 * Switches -cpu arm1136 or -cpu arm1176 emulates the other
+	 * core tiles.
+	 */
+	soc {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "arm,realview-eb-soc", "simple-bus";
+		regmap = <&syscon>;
+		ranges;
+
+		intc: interrupt-controller@10040000 {
+			compatible = "arm,pl390";
+			#interrupt-cells = <3>;
+			#address-cells = <1>;
+			interrupt-controller;
+			reg = <0x10041000 0x1000>,
+			      <0x10040000 0x100>;
+		};
+	};
+};
+
+/*
+ * This adapts all the peripherals to the interrupt routing
+ * to the GIC on the core tile.
+ */
+
+&ethernet {
+	interrupt-parent = <&intc>;
+	interrupts = <0 28 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&usb {
+	interrupt-parent = <&intc>;
+	interrupts = <0 29 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&aaci {
+	interrupt-parent = <&intc>;
+	interrupts = <0 19 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&mmc {
+	interrupt-parent = <&intc>;
+	interrupts = <0 17 IRQ_TYPE_LEVEL_HIGH>,
+			<0 18 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&kmi0 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 20 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&kmi1 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 21 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&charlcd {
+	interrupt-parent = <&intc>;
+	interrupts = <0 22 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&serial0 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 12 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&serial1 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 13 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&serial2 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 14 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&serial3 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 15 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&ssp {
+	interrupt-parent = <&intc>;
+	interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&wdog {
+	interrupt-parent = <&intc>;
+	interrupts = <0 0 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&timer01 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 4 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&timer23 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 5 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&gpio0 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 6 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&gpio1 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 7 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&gpio2 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&rtc {
+	interrupt-parent = <&intc>;
+	interrupts = <0 10 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&clcd {
+	interrupt-parent = <&intc>;
+	interrupts = <0 23 IRQ_TYPE_LEVEL_HIGH>;
+};
diff --git a/arch/arm/boot/dts/arm-realview-eb.dtsi b/arch/arm/boot/dts/arm-realview-eb.dtsi
new file mode 100644
index 0000000..1c6a040
--- /dev/null
+++ b/arch/arm/boot/dts/arm-realview-eb.dtsi
@@ -0,0 +1,453 @@
+/*
+ * Copyright 2016 Linaro Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/gpio/gpio.h>
+#include "skeleton.dtsi"
+
+/ {
+	compatible = "arm,realview-eb";
+
+	chosen { };
+
+	aliases {
+		serial0 = &serial0;
+		serial1 = &serial1;
+		serial2 = &serial2;
+		serial3 = &serial3;
+		i2c0 = &i2c;
+	};
+
+	memory {
+		/* 128 MiB memory @ 0x0 */
+		reg = <0x00000000 0x08000000>;
+	};
+
+	/* The voltage to the MMC card is hardwired at 3.3V */
+	vmmc: fixedregulator@0 {
+		compatible = "regulator-fixed";
+		regulator-name = "vmmc";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-boot-on;
+        };
+
+	veth: fixedregulator@0 {
+		compatible = "regulator-fixed";
+		regulator-name = "veth";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-boot-on;
+	};
+
+	xtal24mhz: xtal24mhz@24M {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <24000000>;
+	};
+
+	timclk: timclk@1M {
+		#clock-cells = <0>;
+		compatible = "fixed-factor-clock";
+		clock-div = <24>;
+		clock-mult = <1>;
+		clocks = <&xtal24mhz>;
+	};
+
+	mclk: mclk@24M {
+		#clock-cells = <0>;
+		compatible = "fixed-factor-clock";
+		clock-div = <1>;
+		clock-mult = <1>;
+		clocks = <&xtal24mhz>;
+	};
+
+	kmiclk: kmiclk@24M {
+		#clock-cells = <0>;
+		compatible = "fixed-factor-clock";
+		clock-div = <1>;
+		clock-mult = <1>;
+		clocks = <&xtal24mhz>;
+	};
+
+	sspclk: sspclk@24M {
+		#clock-cells = <0>;
+		compatible = "fixed-factor-clock";
+		clock-div = <1>;
+		clock-mult = <1>;
+		clocks = <&xtal24mhz>;
+	};
+
+	uartclk: uartclk@24M {
+		#clock-cells = <0>;
+		compatible = "fixed-factor-clock";
+		clock-div = <1>;
+		clock-mult = <1>;
+		clocks = <&xtal24mhz>;
+	};
+
+	wdogclk: wdogclk@24M {
+		#clock-cells = <0>;
+		compatible = "fixed-factor-clock";
+		clock-div = <1>;
+		clock-mult = <1>;
+		clocks = <&xtal24mhz>;
+	};
+
+	/* FIXME: this actually hangs off the PLL clocks */
+	pclk: pclk@0 {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <0>;
+	};
+
+	flash0@40000000 {
+		/* 2 * 32MiB NOR Flash memory */
+		compatible = "arm,versatile-flash", "cfi-flash";
+		reg = <0x40000000 0x04000000>;
+		bank-width = <4>;
+	};
+
+	flash1@44000000 {
+		/* 2 * 32MiB NOR Flash memory */
+		compatible = "arm,versatile-flash", "cfi-flash";
+		reg = <0x44000000 0x04000000>;
+		bank-width = <4>;
+	};
+
+	/* SMSC 9118 ethernet with PHY and EEPROM */
+	ethernet: ethernet@4e000000 {
+		compatible = "smsc,lan9118", "smsc,lan9115";
+		reg = <0x4e000000 0x10000>;
+		phy-mode = "mii";
+		reg-io-width = <4>;
+		smsc,irq-active-high;
+		smsc,irq-push-pull;
+		vdd33a-supply = <&veth>;
+		vddvario-supply = <&veth>;
+	};
+
+	usb: usb@4f000000 {
+		compatible = "nxp,usb-isp1761";
+		reg = <0x4f000000 0x20000>;
+		port1-otg;
+	};
+
+	/* These peripherals are inside the FPGA */
+	fpga {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "simple-bus";
+		ranges;
+
+		syscon: syscon@10000000 {
+			compatible = "arm,realview-eb-syscon", "syscon", "simple-mfd";
+			reg = <0x10000000 0x1000>;
+
+			led@08.0 {
+				compatible = "register-bit-led";
+				offset = <0x08>;
+				mask = <0x01>;
+				label = "versatile:0";
+				linux,default-trigger = "heartbeat";
+				default-state = "on";
+			};
+			led@08.1 {
+				compatible = "register-bit-led";
+				offset = <0x08>;
+				mask = <0x02>;
+				label = "versatile:1";
+				linux,default-trigger = "mmc0";
+				default-state = "off";
+			};
+			led@08.2 {
+				compatible = "register-bit-led";
+				offset = <0x08>;
+				mask = <0x04>;
+				label = "versatile:2";
+				linux,default-trigger = "cpu0";
+				default-state = "off";
+			};
+			led@08.3 {
+				compatible = "register-bit-led";
+				offset = <0x08>;
+				mask = <0x08>;
+				label = "versatile:3";
+				default-state = "off";
+			};
+			led@08.4 {
+				compatible = "register-bit-led";
+				offset = <0x08>;
+				mask = <0x10>;
+				label = "versatile:4";
+				default-state = "off";
+			};
+			led@08.5 {
+				compatible = "register-bit-led";
+				offset = <0x08>;
+				mask = <0x20>;
+				label = "versatile:5";
+				default-state = "off";
+			};
+			led@08.6 {
+				compatible = "register-bit-led";
+				offset = <0x08>;
+				mask = <0x40>;
+				label = "versatile:6";
+				default-state = "off";
+			};
+			led@08.7 {
+				compatible = "register-bit-led";
+				offset = <0x08>;
+				mask = <0x80>;
+				label = "versatile:7";
+				default-state = "off";
+			};
+			oscclk0: osc0@0c {
+				compatible = "arm,syscon-icst307";
+				#clock-cells = <0>;
+				lock-offset = <0x20>;
+				vco-offset = <0x0C>;
+				clocks = <&xtal24mhz>;
+			};
+			oscclk1: osc1@10 {
+				compatible = "arm,syscon-icst307";
+				#clock-cells = <0>;
+				lock-offset = <0x20>;
+				vco-offset = <0x10>;
+				clocks = <&xtal24mhz>;
+			};
+			oscclk2: osc2@14 {
+				compatible = "arm,syscon-icst307";
+				#clock-cells = <0>;
+				lock-offset = <0x20>;
+				vco-offset = <0x14>;
+				clocks = <&xtal24mhz>;
+			};
+			oscclk3: osc3@18 {
+				compatible = "arm,syscon-icst307";
+				#clock-cells = <0>;
+				lock-offset = <0x20>;
+				vco-offset = <0x18>;
+				clocks = <&xtal24mhz>;
+			};
+			oscclk4: osc4@1c {
+				compatible = "arm,syscon-icst307";
+				#clock-cells = <0>;
+				lock-offset = <0x20>;
+				vco-offset = <0x1c>;
+				clocks = <&xtal24mhz>;
+			};
+		};
+
+		i2c: i2c@10002000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "arm,versatile-i2c";
+			reg = <0x10002000 0x1000>;
+
+			rtc@68 {
+				compatible = "dallas,ds1338";
+				reg = <0x68>;
+			};
+		};
+
+		aaci: aaci@10004000 {
+			compatible = "arm,pl041", "arm,primecell";
+			reg = <0x10004000 0x1000>;
+			clocks = <&pclk>;
+			clock-names = "apb_pclk";
+		};
+
+		mmc: mmcsd@10005000 {
+			compatible = "arm,pl18x", "arm,primecell";
+			reg = <0x10005000 0x1000>;
+
+			/* Due to frequent FIFO overruns, use just 500 kHz */
+			max-frequency = <500000>;
+			bus-width = <4>;
+			cap-sd-highspeed;
+			cap-mmc-highspeed;
+			clocks = <&mclk>, <&pclk>;
+			clock-names = "mclk", "apb_pclk";
+			vmmc-supply = <&vmmc>;
+			cd-gpios = <&gpio1 0 GPIO_ACTIVE_LOW>;
+			wp-gpios = <&gpio1 1 GPIO_ACTIVE_HIGH>;
+		};
+
+		kmi0: kmi@10006000 {
+			compatible = "arm,pl050", "arm,primecell";
+			reg = <0x10006000 0x1000>;
+			clocks = <&kmiclk>, <&pclk>;
+			clock-names = "KMIREFCLK", "apb_pclk";
+		};
+
+		kmi1: kmi@10007000 {
+			compatible = "arm,pl050", "arm,primecell";
+			reg = <0x10007000 0x1000>;
+			clocks = <&kmiclk>, <&pclk>;
+			clock-names = "KMIREFCLK", "apb_pclk";
+		};
+
+		charlcd: fpga_charlcd: charlcd@10008000 {
+			compatible = "arm,versatile-lcd";
+			reg = <0x10008000 0x1000>;
+			clocks = <&pclk>;
+			clock-names = "apb_pclk";
+		};
+
+		serial0: serial@10009000 {
+			compatible = "arm,pl011", "arm,primecell";
+			reg = <0x10009000 0x1000>;
+			clocks = <&uartclk>, <&pclk>;
+			clock-names = "uartclk", "apb_pclk";
+		};
+
+		serial1: serial@1000a000 {
+			compatible = "arm,pl011", "arm,primecell";
+			reg = <0x1000a000 0x1000>;
+			clocks = <&uartclk>, <&pclk>;
+			clock-names = "uartclk", "apb_pclk";
+		};
+
+		serial2: serial@1000b000 {
+			compatible = "arm,pl011", "arm,primecell";
+			reg = <0x1000b000 0x1000>;
+			clocks = <&uartclk>, <&pclk>;
+			clock-names = "uartclk", "apb_pclk";
+		};
+
+		serial3: serial@1000c000 {
+			compatible = "arm,pl011", "arm,primecell";
+			reg = <0x1000c000 0x1000>;
+			clocks = <&uartclk>, <&pclk>;
+			clock-names = "uartclk", "apb_pclk";
+		};
+
+		ssp: ssp@1000d000 {
+			compatible = "arm,pl022", "arm,primecell";
+			reg = <0x1000d000 0x1000>;
+			clocks = <&sspclk>, <&pclk>;
+			clock-names = "SSPCLK", "apb_pclk";
+		};
+
+		wdog: watchdog@10010000 {
+			compatible = "arm,sp805", "arm,primecell";
+			reg = <0x10010000 0x1000>;
+			clocks = <&wdogclk>, <&pclk>;
+			clock-names = "wdogclk", "apb_pclk";
+			status = "disabled";
+		};
+
+		timer01: timer@10011000 {
+			compatible = "arm,sp804", "arm,primecell";
+			reg = <0x10011000 0x1000>;
+			clocks = <&timclk>, <&timclk>, <&pclk>;
+			clock-names = "timer1", "timer2", "apb_pclk";
+		};
+
+		timer23: timer@10012000 {
+			compatible = "arm,sp804", "arm,primecell";
+			reg = <0x10012000 0x1000>;
+			clocks = <&timclk>, <&timclk>, <&pclk>;
+			clock-names = "timer1", "timer2", "apb_pclk";
+		};
+
+		gpio0: gpio@10013000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0x10013000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&pclk>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio1: gpio@10014000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0x10014000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&pclk>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio2: gpio@10015000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0x10015000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&pclk>;
+			clock-names = "apb_pclk";
+		};
+
+		rtc: rtc@10017000 {
+			compatible = "arm,pl031", "arm,primecell";
+			reg = <0x10017000 0x1000>;
+			clocks = <&pclk>;
+			clock-names = "apb_pclk";
+		};
+
+		clcd: clcd@10020000 {
+			compatible = "arm,pl111", "arm,primecell";
+			reg = <0x10020000 0x1000>;
+			interrupt-names = "combined";
+			clocks = <&oscclk0>, <&pclk>;
+			clock-names = "clcdclk", "apb_pclk";
+
+			port {
+				clcd_pads: endpoint {
+					remote-endpoint = <&clcd_panel>;
+					arm,pl11x,tft-r0g0b0-pads = <0 8 16>;
+				};
+			};
+
+			panel {
+				compatible = "panel-dpi";
+
+				port {
+					clcd_panel: endpoint {
+						remote-endpoint = <&clcd_pads>;
+					};
+				};
+
+				/* Standard 640x480 VGA timings */
+				panel-timing {
+					clock-frequency = <25175000>;
+					hactive = <640>;
+					hback-porch = <48>;
+					hfront-porch = <16>;
+					hsync-len = <96>;
+					vactive = <480>;
+					vback-porch = <33>;
+					vfront-porch = <10>;
+					vsync-len = <2>;
+				};
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/arm-realview-pb1176.dts b/arch/arm/boot/dts/arm-realview-pb1176.dts
index 652d85b..c789564 100644
--- a/arch/arm/boot/dts/arm-realview-pb1176.dts
+++ b/arch/arm/boot/dts/arm-realview-pb1176.dts
@@ -394,6 +394,46 @@
 			reg = <0x10200000 0x4000>;
 			bank-width = <1>;
 		};
+
+		clcd@10112000 {
+			compatible = "arm,pl111", "arm,primecell";
+			reg = <0x10112000 0x1000>;
+			interrupt-parent = <&intc_dc1176>;
+			interrupt-names = "combined";
+			interrupts = <0 47 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&oscclk0>, <&pclk>;
+			clock-names = "clcdclk", "apb_pclk";
+
+			port {
+				clcd_pads: endpoint {
+					remote-endpoint = <&clcd_panel>;
+					arm,pl11x,tft-r0g0b0-pads = <0 8 16>;
+				};
+			};
+
+			panel {
+				compatible = "panel-dpi";
+
+				port {
+					clcd_panel: endpoint {
+						remote-endpoint = <&clcd_pads>;
+					};
+				};
+
+				/* Standard 640x480 VGA timings */
+				panel-timing {
+					clock-frequency = <25175000>;
+					hactive = <640>;
+					hback-porch = <48>;
+					hfront-porch = <16>;
+					hsync-len = <96>;
+					vactive = <480>;
+					vback-porch = <33>;
+					vfront-porch = <10>;
+					vsync-len = <2>;
+				};
+			};
+		};
 	};
 
 	/* These peripherals are inside the FPGA rather than the DevChip */
diff --git a/arch/arm/boot/dts/arm-realview-pb11mp.dts b/arch/arm/boot/dts/arm-realview-pb11mp.dts
index 63d6a05..3944765 100644
--- a/arch/arm/boot/dts/arm-realview-pb11mp.dts
+++ b/arch/arm/boot/dts/arm-realview-pb11mp.dts
@@ -627,16 +627,17 @@
 					};
 				};
 
+				/* Standard 640x480 VGA timings */
 				panel-timing {
-					clock-frequency = <63500127>;
-					hactive = <1024>;
-					hback-porch = <152>;
-					hfront-porch = <48>;
-					hsync-len = <104>;
-					vactive = <768>;
-					vback-porch = <23>;
-					vfront-porch = <3>;
-					vsync-len = <4>;
+					clock-frequency = <25175000>;
+					hactive = <640>;
+					hback-porch = <48>;
+					hfront-porch = <16>;
+					hsync-len = <96>;
+					vactive = <480>;
+					vback-porch = <33>;
+					vfront-porch = <10>;
+					vsync-len = <2>;
 				};
 			};
 		};
diff --git a/arch/arm/boot/dts/arm-realview-pba8.dts b/arch/arm/boot/dts/arm-realview-pba8.dts
new file mode 100644
index 0000000..d3238c2
--- /dev/null
+++ b/arch/arm/boot/dts/arm-realview-pba8.dts
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2016 Linaro Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "arm-realview-pbx.dtsi"
+
+/ {
+	model = "ARM RealView Platform Baseboard for Cortex-A8";
+	compatible = "arm,realview-pba8";
+	arm,hbi = <0x178>;
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		enable-method = "arm,realview-smp";
+
+		cpu0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a8";
+			reg = <0>;
+		};
+	};
+
+	pmu: pmu@0 {
+		compatible = "arm,cortex-a8-pmu";
+		interrupt-parent = <&intc>;
+		interrupts = <0 47 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-affinity = <&cpu0>;
+	};
+
+	/* Primary GIC PL390 interrupt controller in the test chip */
+	intc: interrupt-controller@1e000000 {
+		compatible = "arm,pl390";
+		#interrupt-cells = <3>;
+		#address-cells = <1>;
+		interrupt-controller;
+		reg = <0x1e001000 0x1000>,
+		      <0x1e000000 0x100>;
+	};
+};
+
+&ethernet {
+	interrupt-parent = <&intc>;
+	interrupts = <0 28 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&usb {
+	interrupt-parent = <&intc>;
+	interrupts = <0 29 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&soc {
+	compatible = "arm,realview-pba8-soc", "simple-bus";
+};
+
+&syscon {
+	compatible = "arm,realview-pba8-syscon", "syscon", "simple-mfd";
+};
+
+&serial0 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 12 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&serial1 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 13 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&serial2 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 14 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&serial3 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 15 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&ssp {
+	interrupt-parent = <&intc>;
+	interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&wdog0 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 0 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&wdog1 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 40 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&timer01 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 4 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&timer23 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 5 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&gpio0 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 6 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&gpio1 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 7 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&gpio2 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&rtc {
+	interrupt-parent = <&intc>;
+	interrupts = <0 10 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&timer45 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 41 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&timer67 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 42 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&aaci {
+	interrupt-parent = <&intc>;
+	interrupts = <0 19 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&mmc {
+	interrupt-parent = <&intc>;
+	interrupts = <0 17 IRQ_TYPE_LEVEL_HIGH>,
+		     <0 18 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&kmi0 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 20 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&kmi1 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 21 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&clcd {
+	interrupt-parent = <&intc>;
+	interrupts = <0 23 IRQ_TYPE_LEVEL_HIGH>;
+};
diff --git a/arch/arm/boot/dts/arm-realview-pbx-a9.dts b/arch/arm/boot/dts/arm-realview-pbx-a9.dts
new file mode 100644
index 0000000..db808f9
--- /dev/null
+++ b/arch/arm/boot/dts/arm-realview-pbx-a9.dts
@@ -0,0 +1,229 @@
+/*
+ * Copyright 2016 Linaro Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "arm-realview-pbx.dtsi"
+
+/ {
+	/*
+	 * This is the RealView Platform Baseboard Explore for Cortex-A9
+	 * (HBI0182 + HBI0183) as described in ARM DUI 0440B
+	 */
+	model = "ARM RealView Platform Baseboard Explore for Cortex-A9";
+	arm,hbi = <0x182>;
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		enable-method = "arm,realview-smp";
+
+		cpu-map {
+			cluster0 {
+				core0 {
+					cpu = <&CPU0>;
+				};
+				core1 {
+					cpu = <&CPU1>;
+				};
+			};
+		};
+		CPU0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a9";
+			reg = <0x0>;
+			next-level-cache = <&L2>;
+		};
+		CPU1: cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a9";
+			reg = <0x1>;
+			next-level-cache = <&L2>;
+		};
+	};
+
+	L2: l2-cache {
+		compatible = "arm,pl310-cache";
+		reg = <0x1f002000 0x1000>;
+		cache-unified;
+		cache-level = <2>;
+		/*
+		 * Override default cache size, sets and
+		 * associativity as these may be erroneously set
+		 * up by boot loader(s).
+		 */
+		cache-size = <1048576>; // 1MB
+		cache-sets = <4096>;
+		cache-line-size = <32>;
+		arm,parity-disable;
+		arm,tag-latency = <1>;
+		arm,data-latency = <1 1>;
+		arm,dirty-latency = <1>;
+	};
+
+	scu: scu@1f000000 {
+		compatible = "arm,cortex-a9-scu";
+		reg = <0x1f000000 0x100>;
+	};
+
+	twd_timer: timer@1f000600 {
+		compatible = "arm,cortex-a9-twd-timer";
+		reg = <0x1f000600 0x20>;
+		interrupt-parent = <&intc>;
+		interrupts = <1 13 0xf04>;
+	};
+
+	twd_wdog: watchdog@1f000620 {
+		compatible = "arm,cortex-a9-twd-wdt";
+		reg = <0x1f000620 0x20>;
+		interrupt-parent = <&intc>;
+		interrupts = <1 14 0xf04>;
+	};
+
+	pmu: pmu@0 {
+		compatible = "arm,cortex-a9-pmu";
+		interrupt-parent = <&intc>;
+		interrupts = <0 44 IRQ_TYPE_LEVEL_HIGH>,
+			     <0 45 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-affinity = <&CPU0>, <&CPU1>;
+	};
+
+	/* Primary GIC PL390 interrupt controller in the test chip */
+	intc: interrupt-controller@1f000000 {
+		compatible = "arm,cortex-a9-gic";
+		#interrupt-cells = <3>;
+		#address-cells = <1>;
+		interrupt-controller;
+		reg = <0x1f001000 0x1000>,
+		      <0x1f000100 0x100>;
+	};
+};
+
+&ethernet {
+	interrupt-parent = <&intc>;
+	interrupts = <0 28 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&usb {
+	interrupt-parent = <&intc>;
+	interrupts = <0 29 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&serial0 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 12 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&serial1 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 13 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&serial2 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 14 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&serial3 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 15 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&ssp {
+	interrupt-parent = <&intc>;
+	interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&wdog0 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 0 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&wdog1 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 40 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&timer01 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 4 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&timer23 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 5 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&gpio0 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 6 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&gpio1 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 7 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&gpio2 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&rtc {
+	interrupt-parent = <&intc>;
+	interrupts = <0 10 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&timer45 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 41 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&timer67 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 42 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&aaci {
+	interrupt-parent = <&intc>;
+	interrupts = <0 19 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&mmc {
+	interrupt-parent = <&intc>;
+	interrupts = <0 17 IRQ_TYPE_LEVEL_HIGH>,
+		     <0 18 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&kmi0 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 20 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&kmi1 {
+	interrupt-parent = <&intc>;
+	interrupts = <0 21 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+&clcd {
+	interrupt-parent = <&intc>;
+	interrupts = <0 23 IRQ_TYPE_LEVEL_HIGH>;
+};
diff --git a/arch/arm/boot/dts/arm-realview-pbx.dtsi b/arch/arm/boot/dts/arm-realview-pbx.dtsi
new file mode 100644
index 0000000..aeb49c4
--- /dev/null
+++ b/arch/arm/boot/dts/arm-realview-pbx.dtsi
@@ -0,0 +1,542 @@
+/*
+ * Copyright 2016 Linaro Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/gpio/gpio.h>
+#include "skeleton.dtsi"
+
+/ {
+	compatible = "arm,realview-pbx";
+
+	chosen { };
+
+	aliases {
+		serial0 = &serial0;
+		serial1 = &serial1;
+		serial2 = &serial2;
+		serial3 = &serial3;
+		i2c0 = &i2c;
+	};
+
+	memory {
+		/* 128 MiB memory @ 0x0 */
+		reg = <0x00000000 0x08000000>;
+	};
+
+	/* The voltage to the MMC card is hardwired at 3.3V */
+	vmmc: fixedregulator@0 {
+		compatible = "regulator-fixed";
+		regulator-name = "vmmc";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-boot-on;
+        };
+
+	veth: fixedregulator@0 {
+		compatible = "regulator-fixed";
+		regulator-name = "veth";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-boot-on;
+	};
+
+	xtal24mhz: xtal24mhz@24M {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <24000000>;
+	};
+
+	refclk32khz: refclk32khz {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <32768>;
+	};
+
+	timclk: timclk@1M {
+		#clock-cells = <0>;
+		compatible = "fixed-factor-clock";
+		clock-div = <24>;
+		clock-mult = <1>;
+		clocks = <&xtal24mhz>;
+	};
+
+	mclk: mclk@24M {
+		#clock-cells = <0>;
+		compatible = "fixed-factor-clock";
+		clock-div = <1>;
+		clock-mult = <1>;
+		clocks = <&xtal24mhz>;
+	};
+
+	kmiclk: kmiclk@24M {
+		#clock-cells = <0>;
+		compatible = "fixed-factor-clock";
+		clock-div = <1>;
+		clock-mult = <1>;
+		clocks = <&xtal24mhz>;
+	};
+
+	sspclk: sspclk@24M {
+		#clock-cells = <0>;
+		compatible = "fixed-factor-clock";
+		clock-div = <1>;
+		clock-mult = <1>;
+		clocks = <&xtal24mhz>;
+	};
+
+	uartclk: uartclk@24M {
+		#clock-cells = <0>;
+		compatible = "fixed-factor-clock";
+		clock-div = <1>;
+		clock-mult = <1>;
+		clocks = <&xtal24mhz>;
+	};
+
+	wdogclk: wdogclk@24M {
+		#clock-cells = <0>;
+		compatible = "fixed-factor-clock";
+		clock-div = <1>;
+		clock-mult = <1>;
+		clocks = <&xtal24mhz>;
+	};
+
+	/* FIXME: this actually hangs off the PLL clocks */
+	pclk: pclk@0 {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <0>;
+	};
+
+	flash0@40000000 {
+		/* 2 * 32MiB NOR Flash memory */
+		compatible = "arm,versatile-flash", "cfi-flash";
+		reg = <0x40000000 0x04000000>;
+		bank-width = <4>;
+	};
+
+	flash1@44000000 {
+		/* 2 * 32MiB NOR Flash memory */
+		compatible = "arm,versatile-flash", "cfi-flash";
+		reg = <0x44000000 0x04000000>;
+		bank-width = <4>;
+	};
+
+	/* SMSC 9118 ethernet with PHY and EEPROM */
+	ethernet: ethernet@4e000000 {
+		compatible = "smsc,lan9118", "smsc,lan9115";
+		reg = <0x4e000000 0x10000>;
+		phy-mode = "mii";
+		reg-io-width = <4>;
+		smsc,irq-active-high;
+		smsc,irq-push-pull;
+		vdd33a-supply = <&veth>;
+		vddvario-supply = <&veth>;
+	};
+
+	usb: usb@4f000000 {
+		compatible = "nxp,usb-isp1761";
+		reg = <0x4f000000 0x20000>;
+		port1-otg;
+	};
+
+	soc: soc@0 {
+		compatible = "arm,realview-pbx-soc", "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		regmap = <&syscon>;
+		ranges;
+
+		syscon: syscon@10000000 {
+			compatible = "arm,realview-pbx-syscon", "syscon", "simple-mfd";
+			reg = <0x10000000 0x1000>;
+
+			led@08.0 {
+				compatible = "register-bit-led";
+				offset = <0x08>;
+				mask = <0x01>;
+				label = "versatile:0";
+				linux,default-trigger = "heartbeat";
+				default-state = "on";
+			};
+			led@08.1 {
+				compatible = "register-bit-led";
+				offset = <0x08>;
+				mask = <0x02>;
+				label = "versatile:1";
+				linux,default-trigger = "mmc0";
+				default-state = "off";
+			};
+			led@08.2 {
+				compatible = "register-bit-led";
+				offset = <0x08>;
+				mask = <0x04>;
+				label = "versatile:2";
+				linux,default-trigger = "cpu0";
+				default-state = "off";
+			};
+			led@08.3 {
+				compatible = "register-bit-led";
+				offset = <0x08>;
+				mask = <0x08>;
+				label = "versatile:3";
+				default-state = "off";
+			};
+			led@08.4 {
+				compatible = "register-bit-led";
+				offset = <0x08>;
+				mask = <0x10>;
+				label = "versatile:4";
+				default-state = "off";
+			};
+			led@08.5 {
+				compatible = "register-bit-led";
+				offset = <0x08>;
+				mask = <0x20>;
+				label = "versatile:5";
+				default-state = "off";
+			};
+			led@08.6 {
+				compatible = "register-bit-led";
+				offset = <0x08>;
+				mask = <0x40>;
+				label = "versatile:6";
+				default-state = "off";
+			};
+			led@08.7 {
+				compatible = "register-bit-led";
+				offset = <0x08>;
+				mask = <0x80>;
+				label = "versatile:7";
+				default-state = "off";
+			};
+			oscclk0: osc0@0c {
+				compatible = "arm,syscon-icst307";
+				#clock-cells = <0>;
+				lock-offset = <0x20>;
+				vco-offset = <0x0C>;
+				clocks = <&xtal24mhz>;
+			};
+			oscclk1: osc1@10 {
+				compatible = "arm,syscon-icst307";
+				#clock-cells = <0>;
+				lock-offset = <0x20>;
+				vco-offset = <0x10>;
+				clocks = <&xtal24mhz>;
+			};
+			oscclk2: osc2@14 {
+				compatible = "arm,syscon-icst307";
+				#clock-cells = <0>;
+				lock-offset = <0x20>;
+				vco-offset = <0x14>;
+				clocks = <&xtal24mhz>;
+			};
+			oscclk3: osc3@18 {
+				compatible = "arm,syscon-icst307";
+				#clock-cells = <0>;
+				lock-offset = <0x20>;
+				vco-offset = <0x18>;
+				clocks = <&xtal24mhz>;
+			};
+			oscclk4: osc4@1c {
+				compatible = "arm,syscon-icst307";
+				#clock-cells = <0>;
+				lock-offset = <0x20>;
+				vco-offset = <0x1c>;
+				clocks = <&xtal24mhz>;
+			};
+		};
+
+		sp810_syscon0: sysctl@10001000 {
+			compatible = "arm,sp810", "arm,primecell";
+			reg = <0x10001000 0x1000>;
+			clocks = <&refclk32khz>, <&timclk>, <&xtal24mhz>;
+			clock-names = "refclk", "timclk", "apb_pclk";
+			#clock-cells = <1>;
+			clock-output-names = "timerclk0",
+					     "timerclk1",
+					     "timerclk2",
+					     "timerclk3";
+			assigned-clocks = <&sp810_syscon0 0>,
+					  <&sp810_syscon0 1>,
+					  <&sp810_syscon0 2>,
+					  <&sp810_syscon0 3>;
+			assigned-clock-parents = <&timclk>,
+					       <&timclk>,
+					       <&timclk>,
+					       <&timclk>;
+		};
+
+		i2c: i2c@10002000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "arm,versatile-i2c";
+			reg = <0x10002000 0x1000>;
+
+			rtc@68 {
+				compatible = "dallas,ds1338";
+				reg = <0x68>;
+			};
+		};
+
+		serial0: serial@10009000 {
+			compatible = "arm,pl011", "arm,primecell";
+			reg = <0x10009000 0x1000>;
+			clocks = <&uartclk>, <&pclk>;
+			clock-names = "uartclk", "apb_pclk";
+		};
+
+		serial1: serial@1000a000 {
+			compatible = "arm,pl011", "arm,primecell";
+			reg = <0x1000a000 0x1000>;
+			clocks = <&uartclk>, <&pclk>;
+			clock-names = "uartclk", "apb_pclk";
+		};
+
+		serial2: serial@1000b000 {
+			compatible = "arm,pl011", "arm,primecell";
+			reg = <0x1000b000 0x1000>;
+			clocks = <&uartclk>, <&pclk>;
+			clock-names = "uartclk", "apb_pclk";
+		};
+
+		ssp: ssp@1000d000 {
+			compatible = "arm,pl022", "arm,primecell";
+			reg = <0x1000d000 0x1000>;
+			clocks = <&sspclk>, <&pclk>;
+			clock-names = "SSPCLK", "apb_pclk";
+		};
+
+		wdog0: watchdog@1000f000 {
+			compatible = "arm,sp805", "arm,primecell";
+			reg = <0x1000f000 0x1000>;
+			clocks = <&wdogclk>, <&pclk>;
+			clock-names = "wdogclk", "apb_pclk";
+			status = "disabled";
+		};
+
+		wdog1: watchdog@10010000 {
+			compatible = "arm,sp805", "arm,primecell";
+			reg = <0x10010000 0x1000>;
+			clocks = <&wdogclk>, <&pclk>;
+			clock-names = "wdogclk", "apb_pclk";
+			status = "disabled";
+		};
+
+		timer01: timer@10011000 {
+			compatible = "arm,sp804", "arm,primecell";
+			reg = <0x10011000 0x1000>;
+			clocks = <&sp810_syscon0 0>,
+			         <&sp810_syscon0 1>,
+				 <&pclk>;
+			clock-names = "timerclk0",
+				    "timerclk1",
+				    "apb_pclk";
+		};
+
+		timer23: timer@10012000 {
+			compatible = "arm,sp804", "arm,primecell";
+			reg = <0x10012000 0x1000>;
+			clocks = <&sp810_syscon0 2>,
+			         <&sp810_syscon0 3>,
+				 <&pclk>;
+			clock-names = "timerclk2",
+				    "timerclk3",
+				    "apb_pclk";
+		};
+
+		gpio0: gpio@10013000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0x10013000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&pclk>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio1: gpio@10014000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0x10014000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&pclk>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio2: gpio@10015000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0x10015000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&pclk>;
+			clock-names = "apb_pclk";
+		};
+
+		/* DVI serial bus control is at 10016000 */
+
+		rtc: rtc@10017000 {
+			compatible = "arm,pl031", "arm,primecell";
+			reg = <0x10017000 0x1000>;
+			clocks = <&pclk>;
+			clock-names = "apb_pclk";
+		};
+
+		timer45: timer@10018000 {
+			compatible = "arm,sp804", "arm,primecell";
+			reg = <0x10018000 0x1000>;
+			clocks = <&timclk>, <&timclk>, <&pclk>;
+			clock-names = "timerclk4", "timerclk5", "apb_pclk";
+		};
+
+		timer67: timer@10019000 {
+			compatible = "arm,sp804", "arm,primecell";
+			reg = <0x10019000 0x1000>;
+			clocks = <&timclk>, <&timclk>, <&pclk>;
+			clock-names = "timerclk6", "timerclk7", "apb_pclk";
+		};
+
+		sp810_syscon1: sysctl@1001a000 {
+			compatible = "arm,sp810", "arm,primecell";
+			reg = <0x1001a000 0x1000>;
+			clocks = <&refclk32khz>, <&timclk>, <&xtal24mhz>;
+			clock-names = "refclk", "timclk", "apb_pclk";
+			#clock-cells = <1>;
+			clock-output-names = "timerclk4",
+					     "timerclk5",
+					     "timerclk6",
+					     "timerclk7";
+			assigned-clocks = <&sp810_syscon1 0>,
+					  <&sp810_syscon1 1>,
+					  <&sp810_syscon1 2>,
+					  <&sp810_syscon1 3>;
+			assigned-clock-parents = <&timclk>,
+					       <&timclk>,
+					       <&timclk>,
+					       <&timclk>;
+		};
+	};
+
+
+	/* These peripherals are inside the FPGA */
+	fpga {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "simple-bus";
+		ranges;
+
+		aaci: aaci@10004000 {
+			compatible = "arm,pl041", "arm,primecell";
+			reg = <0x10004000 0x1000>;
+			clocks = <&pclk>;
+			clock-names = "apb_pclk";
+		};
+
+		mmc: mmcsd@10005000 {
+			compatible = "arm,pl18x", "arm,primecell";
+			reg = <0x10005000 0x1000>;
+
+			/* Due to frequent FIFO overruns, use just 500 kHz */
+			max-frequency = <500000>;
+			bus-width = <4>;
+			cap-sd-highspeed;
+			cap-mmc-highspeed;
+			clocks = <&mclk>, <&pclk>;
+			clock-names = "mclk", "apb_pclk";
+			vmmc-supply = <&vmmc>;
+			cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
+			wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
+		};
+
+		kmi0: kmi@10006000 {
+			compatible = "arm,pl050", "arm,primecell";
+			reg = <0x10006000 0x1000>;
+			clocks = <&kmiclk>, <&pclk>;
+			clock-names = "KMIREFCLK", "apb_pclk";
+		};
+
+		kmi1: kmi@10007000 {
+			compatible = "arm,pl050", "arm,primecell";
+			reg = <0x10007000 0x1000>;
+			clocks = <&kmiclk>, <&pclk>;
+			clock-names = "KMIREFCLK", "apb_pclk";
+		};
+
+		serial3: serial@1000c000 {
+			compatible = "arm,pl011", "arm,primecell";
+			reg = <0x1000c000 0x1000>;
+			clocks = <&uartclk>, <&pclk>;
+			clock-names = "uartclk", "apb_pclk";
+		};
+	};
+
+	/* These peripherals are inside the NEC ISSP */
+	issp {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "simple-bus";
+		ranges;
+
+		clcd: clcd@10020000 {
+			compatible = "arm,pl111", "arm,primecell";
+			reg = <0x10020000 0x1000>;
+			interrupt-names = "combined";
+			clocks = <&oscclk4>, <&pclk>;
+			clock-names = "clcdclk", "apb_pclk";
+
+			port {
+				clcd_pads: endpoint {
+					remote-endpoint = <&clcd_panel>;
+					arm,pl11x,tft-r0g0b0-pads = <0 8 16>;
+				};
+			};
+
+			panel {
+				compatible = "panel-dpi";
+
+				port {
+					clcd_panel: endpoint {
+						remote-endpoint = <&clcd_pads>;
+					};
+				};
+
+				/* Standard 640x480 VGA timings */
+				panel-timing {
+					clock-frequency = <25175000>;
+					hactive = <640>;
+					hback-porch = <48>;
+					hfront-porch = <16>;
+					hsync-len = <96>;
+					vactive = <480>;
+					vback-porch = <33>;
+					vfront-porch = <10>;
+					vsync-len = <2>;
+				};
+			};
+		};
+	};
+};
+
diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
index 3710755..8450944 100644
--- a/arch/arm/boot/dts/armada-385-linksys.dtsi
+++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
@@ -117,7 +117,7 @@
 			};
 
 			/* USB part of the eSATA/USB 2.0 port */
-			usb@50000 {
+			usb@58000 {
 				status = "okay";
 			};
 
@@ -245,7 +245,7 @@
 		button@2 {
 			label = "Factory Reset Button";
 			linux,code = <KEY_RESTART>;
-			gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
+			gpios = <&gpio0 29 GPIO_ACTIVE_LOW>;
 		};
 	};
 
@@ -260,7 +260,7 @@
 		};
 
 		sata {
-			gpios = <&gpio1 22 GPIO_ACTIVE_HIGH>;
+			gpios = <&gpio1 22 GPIO_ACTIVE_LOW>;
 			default-state = "off";
 		};
 	};
@@ -313,7 +313,7 @@
 
 &pinctrl {
 	keys_pin: keys-pin {
-		marvell,pins = "mpp24", "mpp47";
+		marvell,pins = "mpp24", "mpp29";
 		marvell,function = "gpio";
 	};
 
diff --git a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
index b89e6cf..7a46154 100644
--- a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
+++ b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
@@ -304,13 +304,13 @@
 		button@1 {
 			label = "WPS";
 			linux,code = <KEY_WPS_BUTTON>;
-			gpios = <&gpio1 0 GPIO_ACTIVE_HIGH>;
+			gpios = <&gpio1 0 GPIO_ACTIVE_LOW>;
 		};
 
 		button@2 {
 			label = "Factory Reset Button";
 			linux,code = <KEY_RESTART>;
-			gpios = <&gpio1 1 GPIO_ACTIVE_HIGH>;
+			gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
 		};
 	};
 
diff --git a/arch/arm/boot/dts/armv7-m.dtsi b/arch/arm/boot/dts/armv7-m.dtsi
index b1ad7cf..16331aa 100644
--- a/arch/arm/boot/dts/armv7-m.dtsi
+++ b/arch/arm/boot/dts/armv7-m.dtsi
@@ -1,7 +1,7 @@
 #include "skeleton.dtsi"
 
 / {
-	nvic: nv-interrupt-controller  {
+	nvic: interrupt-controller@e000e100  {
 		compatible = "arm,armv7m-nvic";
 		interrupt-controller;
 		#interrupt-cells = <1>;
diff --git a/arch/arm/boot/dts/artpec6.dtsi b/arch/arm/boot/dts/artpec6.dtsi
index 3043016..3fac4c4 100644
--- a/arch/arm/boot/dts/artpec6.dtsi
+++ b/arch/arm/boot/dts/artpec6.dtsi
@@ -91,96 +91,32 @@
 		clock-frequency = <50000000>;
 	};
 
-	/* PLL1 is used by CPU and some peripherals */
-	pll1_clk: pll1_clk@f8000000 {
+	eth_phy_ref_clk: eth_phy_ref_clk {
 		#clock-cells = <0>;
-		compatible = "axis,artpec6-pll1-clock";
-		reg = <0xf8000000 4>;
+		compatible = "fixed-clock";
+		clock-frequency = <125000000>;
+	};
+
+	clkctrl: clkctrl@0xf8000000 {
+		#clock-cells = <1>;
+		compatible = "axis,artpec6-clkctrl";
+		reg = <0xf8000000 0x48>;
 		clocks = <&ext_clk>;
+		clock-names = "sys_refclk";
 	};
 
-	cpu_clk: cpu_clk {
-		#clock-cells = <0>;
-		compatible = "fixed-factor-clock";
-		clock-div = <1>;
-		clock-mult = <1>;
-		clocks = <&pll1_clk>;
-		clock-output-names = "cpu_clk";
-	};
-
-	cpu_clkdiv2: cpu_clkdiv2 {
-		#clock-cells = <0>;
-		compatible = "fixed-factor-clock";
-		clock-div = <2>;
-		clock-mult = <1>;
-		clocks = <&cpu_clk>;
-	};
-
-	cpu_clkdiv4: cpu_clkdiv4 {
-		#clock-cells = <0>;
-		compatible = "fixed-factor-clock";
-		clock-div = <4>;
-		clock-mult = <1>;
-		clocks = <&cpu_clk>;
-	};
-
-	apb_pclk: apb_pclk {
-		#clock-cells = <0>;
-		compatible = "fixed-factor-clock";
-		clock-div = <8>;
-		clock-mult = <1>;
-		clocks = <&cpu_clk>;
-		clock-output-names = "apb_pclk";
-	};
-
-	/* PLL2 is used by a number of peripherals, including UDL */
-	pll2: pll2 {
-		#clock-cells = <0>;
-		compatible = "fixed-factor-clock";
-		clock-div = <1>;
-		clock-mult = <24>;
-		clocks = <&ext_clk>;
-	};
-
-	/* PLL2DIV2 is used by the Fractional Clock Divider, for i2s */
-	pll2div2: pll2div2 {
-		#clock-cells = <0>;
-		compatible = "fixed-factor-clock";
-		clock-div = <2>;
-		clock-mult = <1>;
-		clocks = <&pll2>;
-	};
-
-	pll2div12: pll2div12 {
-		#clock-cells = <0>;
-		compatible = "fixed-factor-clock";
-		clock-div = <12>;
-		clock-mult = <1>;
-		clocks = <&pll2>;
-	};
-
-	pll2div24: pll2div24 {
-		#clock-cells = <0>;
-		compatible = "fixed-factor-clock";
-		clock-div = <24>;
-		clock-mult = <1>;
-		clocks = <&pll2>;
-		clock-output-names = "uart_clk";
-	};
-
-
 	gtimer@faf00200 {
 		compatible = "arm,cortex-a9-global-timer";
 		reg = <0xfaf00200 0x20>;
 		interrupts = <GIC_PPI 11 0xf01>;
-		clocks = <&cpu_clkdiv2>;
+		clocks = <&clkctrl 1>;
 	};
 
 	timer@faf00600 {
 		compatible = "arm,cortex-a9-twd-timer";
 		reg = <0xfaf00600 0x20>;
 		interrupts = <GIC_PPI 13 0xf04>;
-		clocks = <&cpu_clkdiv2>;
+		clocks = <&clkctrl 1>;
 		status = "disabled";
 	};
 
@@ -220,7 +156,8 @@
 
 		ethernet: ethernet@f8010000 {
 			clock-names = "phy_ref_clk", "apb_pclk";
-			clocks = <&ext_clk>, <&apb_pclk>;
+			clocks = <&eth_phy_ref_clk>,
+				<&clkctrl 4>;
 			compatible = "snps,dwc-qos-ethernet-4.10";
 			interrupt-parent = <&intc>;
 			interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
@@ -238,7 +175,8 @@
 			compatible = "arm,pl011", "arm,primecell";
 			reg = <0xf8036000 0x1000>;
 			interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&pll2div24>, <&apb_pclk>;
+			clocks = <&clkctrl 13>,
+				<&clkctrl 12>;
 			clock-names = "uart_clk", "apb_pclk";
 			status = "disabled";
 		};
@@ -246,7 +184,8 @@
 			compatible = "arm,pl011", "arm,primecell";
 			reg = <0xf8037000 0x1000>;
 			interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&pll2div24>, <&apb_pclk>;
+			clocks = <&clkctrl 13>,
+				<&clkctrl 12>;
 			clock-names = "uart_clk", "apb_pclk";
 			status = "disabled";
 		};
@@ -254,7 +193,8 @@
 			compatible = "arm,pl011", "arm,primecell";
 			reg = <0xf8038000 0x1000>;
 			interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&pll2div24>, <&apb_pclk>;
+			clocks = <&clkctrl 13>,
+				<&clkctrl 12>;
 			clock-names = "uart_clk", "apb_pclk";
 			status = "disabled";
 		};
@@ -262,7 +202,8 @@
 			compatible = "arm,pl011", "arm,primecell";
 			reg = <0xf8039000 0x1000>;
 			interrupts = <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&pll2div24>, <&apb_pclk>;
+			clocks = <&clkctrl 13>,
+				<&clkctrl 12>;
 			clock-names = "uart_clk", "apb_pclk";
 			status = "disabled";
 		};
diff --git a/arch/arm/boot/dts/aspeed-ast2500-evb.dts b/arch/arm/boot/dts/aspeed-ast2500-evb.dts
new file mode 100644
index 0000000..1b7a5ff
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed-ast2500-evb.dts
@@ -0,0 +1,25 @@
+/dts-v1/;
+
+#include "aspeed-g5.dtsi"
+
+/ {
+	model = "AST2500 EVB";
+	compatible = "aspeed,ast2500";
+
+	aliases {
+		serial4 = &uart5;
+	};
+
+	chosen {
+		stdout-path = &uart5;
+		bootargs = "console=ttyS4,115200 earlyprintk";
+	};
+
+	memory {
+		reg = <0x80000000 0x20000000>;
+	};
+};
+
+&uart5 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts b/arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts
new file mode 100644
index 0000000..cc5fcf2
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts
@@ -0,0 +1,25 @@
+/dts-v1/;
+
+#include "aspeed-g4.dtsi"
+
+/ {
+	model = "Palmetto BMC";
+	compatible = "tyan,palmetto-bmc", "aspeed,ast2400";
+
+	aliases {
+		serial4 = &uart5;
+	};
+
+	chosen {
+		stdout-path = &uart5;
+		bootargs = "console=ttyS4,38400 earlyprintk";
+	};
+
+	memory {
+		reg = <0x40000000 0x10000000>;
+	};
+};
+
+&uart5 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi
new file mode 100644
index 0000000..22dee59
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed-g4.dtsi
@@ -0,0 +1,161 @@
+#include "skeleton.dtsi"
+
+/ {
+	model = "Aspeed BMC";
+	compatible = "aspeed,ast2400";
+	#address-cells = <1>;
+	#size-cells = <1>;
+	interrupt-parent = <&vic>;
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu@0 {
+			compatible = "arm,arm926ej-s";
+			device_type = "cpu";
+			reg = <0>;
+		};
+	};
+
+	clocks {
+		clk_clkin: clk_clkin {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <48000000>;
+		};
+
+	};
+
+	ahb {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		vic: interrupt-controller@1e6c0080 {
+			compatible = "aspeed,ast2400-vic";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			valid-sources = <0xffffffff 0x0007ffff>;
+			reg = <0x1e6c0080 0x80>;
+		};
+
+		apb {
+			compatible = "simple-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+
+			clk_hpll: clk_hpll@1e6e2070 {
+				#clock-cells = <0>;
+				compatible = "aspeed,g4-hpll-clock";
+				reg = <0x1e6e2070 0x4>;
+				clocks = <&clk_clkin>;
+			};
+
+			clk_apb: clk_apb@1e6e2008 {
+				#clock-cells = <0>;
+				compatible = "aspeed,g4-apb-clock";
+				reg = <0x1e6e2008 0x4>;
+				clocks = <&clk_hpll>;
+			};
+
+			clk_uart: clk_uart@1e6e2008 {
+				#clock-cells = <0>;
+				compatible = "aspeed,uart-clock";
+				reg = <0x1e6e202c 0x4>;
+			};
+
+			sram@1e720000 {
+				compatible = "mmio-sram";
+				reg = <0x1e720000 0x8000>;	// 32K
+			};
+
+			timer: timer@1e782000 {
+				compatible = "aspeed,ast2400-timer";
+				reg = <0x1e782000 0x90>;
+				// The moxart_timer driver registers only one
+				// interrupt and assumes it's for timer 1
+				//interrupts = <16 17 18 35 36 37 38 39>;
+				interrupts = <16>;
+				clocks = <&clk_apb>;
+			};
+
+			wdt1: wdt@1e785000 {
+				compatible = "aspeed,wdt";
+				reg = <0x1e785000 0x1c>;
+				interrupts = <27>;
+			};
+
+			wdt2: wdt@1e785020 {
+				compatible = "aspeed,wdt";
+				reg = <0x1e785020 0x1c>;
+				interrupts = <27>;
+				clocks = <&clk_apb>;
+				status = "disabled";
+			};
+
+			uart1: serial@1e783000 {
+				compatible = "ns16550a";
+				reg = <0x1e783000 0x1000>;
+				reg-shift = <2>;
+				interrupts = <9>;
+				clocks = <&clk_uart>;
+				no-loopback-test;
+				status = "disabled";
+			};
+
+			uart2: serial@1e78d000 {
+				compatible = "ns16550a";
+				reg = <0x1e78d000 0x1000>;
+				reg-shift = <2>;
+				interrupts = <32>;
+				clocks = <&clk_uart>;
+				no-loopback-test;
+				status = "disabled";
+			};
+
+			uart3: serial@1e78e000 {
+				compatible = "ns16550a";
+				reg = <0x1e78e000 0x1000>;
+				reg-shift = <2>;
+				interrupts = <33>;
+				clocks = <&clk_uart>;
+				no-loopback-test;
+				status = "disabled";
+			};
+
+			uart4: serial@1e78f000 {
+				compatible = "ns16550a";
+				reg = <0x1e78f000 0x1000>;
+				reg-shift = <2>;
+				interrupts = <34>;
+				clocks = <&clk_uart>;
+				no-loopback-test;
+				status = "disabled";
+			};
+
+			uart5: serial@1e784000 {
+				compatible = "ns16550a";
+				reg = <0x1e784000 0x1000>;
+				reg-shift = <2>;
+				interrupts = <10>;
+				clocks = <&clk_uart>;
+				current-speed = <38400>;
+				no-loopback-test;
+				status = "disabled";
+			};
+
+			uart6: serial@1e787000 {
+				compatible = "ns16550a";
+				reg = <0x1e787000 0x1000>;
+				reg-shift = <2>;
+				interrupts = <10>;
+				clocks = <&clk_uart>;
+				no-loopback-test;
+				status = "disabled";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi
new file mode 100644
index 0000000..dd94d93
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed-g5.dtsi
@@ -0,0 +1,170 @@
+#include "skeleton.dtsi"
+
+/ {
+	model = "Aspeed BMC";
+	compatible = "aspeed,ast2500";
+	#address-cells = <1>;
+	#size-cells = <1>;
+	interrupt-parent = <&vic>;
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu@0 {
+			compatible = "arm,arm1176jzf-s";
+			device_type = "cpu";
+			reg = <0>;
+		};
+	};
+
+	ahb {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		vic: interrupt-controller@1e6c0080 {
+			compatible = "aspeed,ast2400-vic";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			valid-sources = <0xfefff7ff 0x0807ffff>;
+			reg = <0x1e6c0080 0x80>;
+		};
+
+		apb {
+			compatible = "simple-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+
+			clk_clkin: clk_clkin@1e6e2070 {
+				#clock-cells = <0>;
+				compatible = "aspeed,g5-clkin-clock";
+				reg = <0x1e6e2070 0x04>;
+			};
+
+			clk_hpll: clk_hpll@1e6e2024 {
+				#clock-cells = <0>;
+				compatible = "aspeed,g5-hpll-clock";
+				reg = <0x1e6e2024 0x4>;
+				clocks = <&clk_clkin>;
+			};
+
+			clk_ahb: clk_ahb@1e6e2070 {
+				#clock-cells = <0>;
+				compatible = "aspeed,g5-ahb-clock";
+				reg = <0x1e6e2070 0x4>;
+				clocks = <&clk_hpll>;
+			};
+
+			clk_apb: clk_apb@1e6e2008 {
+				#clock-cells = <0>;
+				compatible = "aspeed,g5-apb-clock";
+				reg = <0x1e6e2008 0x4>;
+				clocks = <&clk_hpll>;
+			};
+
+			clk_uart: clk_uart@1e6e2008 {
+				#clock-cells = <0>;
+				compatible = "aspeed,uart-clock";
+				reg = <0x1e6e202c 0x4>;
+			};
+
+			sram@1e720000 {
+				compatible = "mmio-sram";
+				reg = <0x1e720000 0x9000>;	// 36K
+			};
+
+			timer: timer@1e782000 {
+				compatible = "aspeed,ast2400-timer";
+				reg = <0x1e782000 0x90>;
+				// The moxart_timer driver registers only one
+				// interrupt and assumes it's for timer 1
+				//interrupts = <16 17 18 35 36 37 38 39>;
+				interrupts = <16>;
+				clocks = <&clk_apb>;
+			};
+
+			wdt1: wdt@1e785000 {
+				compatible = "aspeed,wdt";
+				reg = <0x1e785000 0x1c>;
+				interrupts = <27>;
+			};
+
+			wdt2: wdt@1e785020 {
+				compatible = "aspeed,wdt";
+				reg = <0x1e785020 0x1c>;
+				interrupts = <27>;
+				status = "disabled";
+			};
+
+			wdt3: wdt@1e785040 {
+				compatible = "aspeed,wdt";
+				reg = <0x1e785074 0x1c>;
+				status = "disabled";
+			};
+
+			uart1: serial@1e783000 {
+				compatible = "ns16550a";
+				reg = <0x1e783000 0x1000>;
+				reg-shift = <2>;
+				interrupts = <9>;
+				clocks = <&clk_uart>;
+				no-loopback-test;
+				status = "disabled";
+			};
+
+			uart2: serial@1e78d000 {
+				compatible = "ns16550a";
+				reg = <0x1e78d000 0x1000>;
+				reg-shift = <2>;
+				interrupts = <32>;
+				clocks = <&clk_uart>;
+				no-loopback-test;
+				status = "disabled";
+			};
+
+			uart3: serial@1e78e000 {
+				compatible = "ns16550a";
+				reg = <0x1e78e000 0x1000>;
+				reg-shift = <2>;
+				interrupts = <33>;
+				clocks = <&clk_uart>;
+				no-loopback-test;
+				status = "disabled";
+			};
+
+			uart4: serial@1e78f000 {
+				compatible = "ns16550a";
+				reg = <0x1e78f000 0x1000>;
+				reg-shift = <2>;
+				interrupts = <34>;
+				clocks = <&clk_uart>;
+				no-loopback-test;
+				status = "disabled";
+			};
+
+			uart5: serial@1e784000 {
+				compatible = "ns16550a";
+				reg = <0x1e784000 0x1000>;
+				reg-shift = <2>;
+				interrupts = <10>;
+				clocks = <&clk_uart>;
+				current-speed = <38400>;
+				no-loopback-test;
+				status = "disabled";
+			};
+
+			uart6: serial@1e787000 {
+				compatible = "ns16550a";
+				reg = <0x1e787000 0x1000>;
+				reg-shift = <2>;
+				interrupts = <10>;
+				clocks = <&clk_uart>;
+				no-loopback-test;
+				status = "disabled";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
index 21c780f..eb4f1ac 100644
--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
@@ -234,6 +234,15 @@
 				};
 			};
 
+			shdwc@f8048010 {
+				atmel,shdwc-debouncer = <976>;
+
+				input@0 {
+					reg = <0>;
+					atmel,wakeup-type = "low";
+				};
+			};
+
 			watchdog@f8048040 {
 				status = "okay";
 			};
diff --git a/arch/arm/boot/dts/at91-vinco.dts b/arch/arm/boot/dts/at91-vinco.dts
index 79aec55..6a366ee 100644
--- a/arch/arm/boot/dts/at91-vinco.dts
+++ b/arch/arm/boot/dts/at91-vinco.dts
@@ -118,7 +118,7 @@
 
 				ethernet-phy@1 {
 					reg = <0x1>;
-					reset-gpios = <&pioE 8 GPIO_ACTIVE_HIGH>;
+					reset-gpios = <&pioE 8 GPIO_ACTIVE_LOW>;
 					interrupt-parent = <&pioB>;
 					interrupts = <15 IRQ_TYPE_EDGE_FALLING>;
 				};
@@ -162,7 +162,7 @@
 					reg = <0x1>;
 					interrupt-parent = <&pioB>;
 					interrupts = <31 IRQ_TYPE_EDGE_FALLING>;
-					reset-gpios = <&pioE 6 GPIO_ACTIVE_HIGH>;
+					reset-gpios = <&pioE 6 GPIO_ACTIVE_LOW>;
 				};
 			};
 
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index af8b708..8837b7e 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -978,7 +978,7 @@
 
 			trng@fffcc000 {
 				compatible = "atmel,at91sam9g45-trng";
-				reg = <0xfffcc000 0x4000>;
+				reg = <0xfffcc000 0x100>;
 				interrupts = <6 IRQ_TYPE_LEVEL_HIGH 0>;
 				clocks = <&trng_clk>;
 			};
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index 0827d59..cd0cd5f 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -106,7 +106,7 @@
 
 			pmc: pmc@fffffc00 {
 				compatible = "atmel,at91sam9x5-pmc", "syscon";
-				reg = <0xfffffc00 0x100>;
+				reg = <0xfffffc00 0x200>;
 				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
 				interrupt-controller;
 				#address-cells = <1>;
diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi
index 3878793..b42fe55 100644
--- a/arch/arm/boot/dts/bcm-cygnus.dtsi
+++ b/arch/arm/boot/dts/bcm-cygnus.dtsi
@@ -351,9 +351,16 @@
 					<&pinctrl 142 10 1>;
 		};
 
-		touchscreen: tsc@180a6000 {
+		ts_adc_syscon: ts_adc_syscon@180a6000 {
+			compatible = "brcm,iproc-ts-adc-syscon", "syscon";
+			reg = <0x180a6000 0xc30>;
+		};
+
+		touchscreen: touchscreen@180a6000 {
 			compatible = "brcm,iproc-touchscreen";
-			reg = <0x180a6000 0x40>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ts_syscon = <&ts_adc_syscon>;
 			clocks = <&asiu_clks BCM_CYGNUS_ASIU_ADC_CLK>;
 			clock-names = "tsc_clk";
 			interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts b/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts
index 228614f..35ff4e7a 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts
@@ -29,3 +29,7 @@
 		brcm,function = <BCM2835_FSEL_ALT0>;
 	};
 };
+
+&hdmi {
+	hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>;
+};
diff --git a/arch/arm/boot/dts/bcm2835-rpi-a.dts b/arch/arm/boot/dts/bcm2835-rpi-a.dts
index ddbbbbd..306a84e 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-a.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-a.dts
@@ -22,3 +22,7 @@
 		brcm,function = <BCM2835_FSEL_ALT2>;
 	};
 };
+
+&hdmi {
+	hpd-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>;
+};
diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts b/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts
index ef54050..57d313b 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts
@@ -29,3 +29,7 @@
 		brcm,function = <BCM2835_FSEL_ALT0>;
 	};
 };
+
+&hdmi {
+	hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>;
+};
diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
index 86f1f2f..cf2774e 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
@@ -22,3 +22,7 @@
 		brcm,function = <BCM2835_FSEL_ALT2>;
 	};
 };
+
+&hdmi {
+	hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>;
+};
diff --git a/arch/arm/boot/dts/bcm2835-rpi-b.dts b/arch/arm/boot/dts/bcm2835-rpi-b.dts
index 4859e9d..8b15f9c 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-b.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-b.dts
@@ -16,3 +16,7 @@
 &gpio {
 	pinctrl-0 = <&gpioout &alt0 &alt3>;
 };
+
+&hdmi {
+	hpd-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>;
+};
diff --git a/arch/arm/boot/dts/bcm2835-rpi.dtsi b/arch/arm/boot/dts/bcm2835-rpi.dtsi
index 76bdbca..caf2707 100644
--- a/arch/arm/boot/dts/bcm2835-rpi.dtsi
+++ b/arch/arm/boot/dts/bcm2835-rpi.dtsi
@@ -74,3 +74,12 @@
 &usb {
 	power-domains = <&power RPI_POWER_DOMAIN_USB>;
 };
+
+&v3d {
+	power-domains = <&power RPI_POWER_DOMAIN_V3D>;
+};
+
+&hdmi {
+	power-domains = <&power RPI_POWER_DOMAIN_HDMI>;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/bcm2835.dtsi b/arch/arm/boot/dts/bcm2835.dtsi
index b83b326..a78759e 100644
--- a/arch/arm/boot/dts/bcm2835.dtsi
+++ b/arch/arm/boot/dts/bcm2835.dtsi
@@ -3,6 +3,17 @@
 / {
 	compatible = "brcm,bcm2835";
 
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,arm1176jzf-s";
+			reg = <0x0>;
+		};
+	};
+
 	soc {
 		ranges = <0x7e000000 0x20000000 0x02000000>;
 		dma-ranges = <0x40000000 0x00000000 0x20000000>;
diff --git a/arch/arm/boot/dts/bcm2836-rpi-2-b.dts b/arch/arm/boot/dts/bcm2836-rpi-2-b.dts
index ff94666..c4743f4 100644
--- a/arch/arm/boot/dts/bcm2836-rpi-2-b.dts
+++ b/arch/arm/boot/dts/bcm2836-rpi-2-b.dts
@@ -33,3 +33,7 @@
 		brcm,function = <BCM2835_FSEL_ALT0>;
 	};
 };
+
+&hdmi {
+	hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>;
+};
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index 8aaf193..10b27b9 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -1,6 +1,7 @@
 #include <dt-bindings/pinctrl/bcm2835.h>
 #include <dt-bindings/clock/bcm2835.h>
 #include <dt-bindings/clock/bcm2835-aux.h>
+#include <dt-bindings/gpio/gpio.h>
 #include "skeleton.dtsi"
 
 /* This include file covers the common peripherals and configuration between
@@ -47,9 +48,29 @@
 				     <1 24>,
 				     <1 25>,
 				     <1 26>,
+				     /* dma channel 11-14 share one irq */
 				     <1 27>,
+				     <1 27>,
+				     <1 27>,
+				     <1 27>,
+				     /* unused shared irq for all channels */
 				     <1 28>;
-
+			interrupt-names = "dma0",
+					  "dma1",
+					  "dma2",
+					  "dma3",
+					  "dma4",
+					  "dma5",
+					  "dma6",
+					  "dma7",
+					  "dma8",
+					  "dma9",
+					  "dma10",
+					  "dma11",
+					  "dma12",
+					  "dma13",
+					  "dma14",
+					  "dma-shared-all";
 			#dma-cells = <1>;
 			brcm,dma-channel-mask = <0x7f35>;
 		};
@@ -153,6 +174,18 @@
 			status = "disabled";
 		};
 
+		pixelvalve@7e206000 {
+			compatible = "brcm,bcm2835-pixelvalve0";
+			reg = <0x7e206000 0x100>;
+			interrupts = <2 13>; /* pwa0 */
+		};
+
+		pixelvalve@7e207000 {
+			compatible = "brcm,bcm2835-pixelvalve1";
+			reg = <0x7e207000 0x100>;
+			interrupts = <2 14>; /* pwa1 */
+		};
+
 		aux: aux@0x7e215000 {
 			compatible = "brcm,bcm2835-aux";
 			#clock-cells = <1>;
@@ -206,6 +239,12 @@
 			status = "disabled";
 		};
 
+		hvs@7e400000 {
+			compatible = "brcm,bcm2835-hvs";
+			reg = <0x7e400000 0x6000>;
+			interrupts = <2 1>;
+		};
+
 		i2c1: i2c@7e804000 {
 			compatible = "brcm,bcm2835-i2c";
 			reg = <0x7e804000 0x1000>;
@@ -226,11 +265,39 @@
 			status = "disabled";
 		};
 
+		pixelvalve@7e807000 {
+			compatible = "brcm,bcm2835-pixelvalve2";
+			reg = <0x7e807000 0x100>;
+			interrupts = <2 10>; /* pixelvalve */
+		};
+
+		hdmi: hdmi@7e902000 {
+			compatible = "brcm,bcm2835-hdmi";
+			reg = <0x7e902000 0x600>,
+			      <0x7e808000 0x100>;
+			interrupts = <2 8>, <2 9>;
+			ddc = <&i2c2>;
+			clocks = <&clocks BCM2835_PLLH_PIX>,
+				 <&clocks BCM2835_CLOCK_HSM>;
+			clock-names = "pixel", "hdmi";
+			status = "disabled";
+		};
+
 		usb: usb@7e980000 {
 			compatible = "brcm,bcm2835-usb";
 			reg = <0x7e980000 0x10000>;
 			interrupts = <1 9>;
 		};
+
+		v3d: v3d@7ec00000 {
+			compatible = "brcm,bcm2835-v3d";
+			reg = <0x7ec00000 0x1000>;
+			interrupts = <1 10>;
+		};
+
+		vc4: gpu {
+			compatible = "brcm,bcm2835-vc4";
+		};
 	};
 
 	clocks {
diff --git a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
index 42dcdfb..5087aa8 100644
--- a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
+++ b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
@@ -17,7 +17,7 @@
 	model = "Buffalo WZR-1750DHP (BCM4708)";
 
 	chosen {
-		bootargs = "console=ttyS0,115200";
+		bootargs = "console=ttyS0,115200 earlycon";
 	};
 
 	memory {
@@ -139,3 +139,11 @@
 &uart0 {
 	status = "okay";
 };
+
+&usb2 {
+	vcc-gpio = <&chipcommon 9 GPIO_ACTIVE_HIGH>;
+};
+
+&usb3 {
+	vcc-gpio = <&chipcommon 10 GPIO_ACTIVE_LOW>;
+};
diff --git a/arch/arm/boot/dts/bcm4708-luxul-xwc-1000.dts b/arch/arm/boot/dts/bcm4708-luxul-xwc-1000.dts
index f18e80e..1c7e53d 100644
--- a/arch/arm/boot/dts/bcm4708-luxul-xwc-1000.dts
+++ b/arch/arm/boot/dts/bcm4708-luxul-xwc-1000.dts
@@ -17,7 +17,7 @@
 	model = "Luxul XWC-1000 (BCM4708)";
 
 	chosen {
-		bootargs = "console=ttyS0,115200";
+		bootargs = "console=ttyS0,115200 earlycon";
 	};
 
 	memory {
@@ -59,3 +59,7 @@
 &uart0 {
 	status = "okay";
 };
+
+&spi_nor {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
index ca92bba..1049ab1 100644
--- a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
+++ b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
@@ -17,24 +17,13 @@
 	model = "Netgear R6250 V1 (BCM4708)";
 
 	chosen {
-		bootargs = "console=ttyS0,115200";
+		bootargs = "console=ttyS0,115200 earlycon";
 	};
 
 	memory {
 		reg = <0x00000000 0x08000000>;
 	};
 
-	axi@18000000 {
-		usb3@23000 {
-			reg = <0x00023000 0x1000>;
-
-			#address-cells = <1>;
-			#size-cells = <1>;
-
-			vcc-gpio = <&chipcommon 0 GPIO_ACTIVE_HIGH>;
-		};
-	};
-
 	leds {
 		compatible = "gpio-leds";
 
@@ -97,3 +86,7 @@
 &uart0 {
 	status = "okay";
 };
+
+&usb3 {
+	vcc-gpio = <&chipcommon 0 GPIO_ACTIVE_HIGH>;
+};
diff --git a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
index 64a5e8a..8b0c440 100644
--- a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
+++ b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
@@ -17,7 +17,7 @@
 	model = "SmartRG SR400ac";
 
 	chosen {
-		bootargs = "console=ttyS0,115200";
+		bootargs = "console=ttyS0,115200 earlycon";
 	};
 
 	memory {
@@ -122,3 +122,7 @@
 &uart0 {
 	status = "okay";
 };
+
+&spi_nor {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
index 38f0c00..a9c8def 100644
--- a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
+++ b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
@@ -17,7 +17,7 @@
 	model = "Buffalo WZR-600DHP2 (BCM47081)";
 
 	chosen {
-		bootargs = "console=ttyS0,115200";
+		bootargs = "console=ttyS0,115200 earlycon";
 	};
 
 	memory {
diff --git a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
index 2a92e8d..791d722 100644
--- a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
+++ b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
@@ -126,3 +126,8 @@
 		};
 	};
 };
+
+
+&usb2 {
+	vcc-gpio = <&chipcommon 13 GPIO_ACTIVE_HIGH>;
+};
diff --git a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
index b52927c..ca18151 100644
--- a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
+++ b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
@@ -106,3 +106,11 @@
 		};
 	};
 };
+
+&usb2 {
+	vcc-gpio = <&chipcommon 0 GPIO_ACTIVE_HIGH>;
+};
+
+&usb3 {
+	vcc-gpio = <&chipcommon 0 GPIO_ACTIVE_HIGH>;
+};
diff --git a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
index 6c83538..ace38ef 100644
--- a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
+++ b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
@@ -17,7 +17,7 @@
 	model = "D-Link DIR-885L";
 
 	chosen {
-		bootargs = "console=ttyS0,115200";
+		bootargs = "console=ttyS0,115200 earlycon";
 	};
 
 	memory {
@@ -109,3 +109,7 @@
 	status = "okay";
 	clock-frequency = <125000000>;
 };
+
+&usb3 {
+	vcc-gpio = <&chipcommon 18 GPIO_ACTIVE_HIGH>;
+};
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index 65a1309..7d4d29b 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -18,6 +18,10 @@
 / {
 	interrupt-parent = <&gic>;
 
+	chosen {
+		stdout-path = &uart0;
+	};
+
 	chipcommonA {
 		compatible = "simple-bus";
 		ranges = <0x00000000 0x18000000 0x00001000>;
@@ -207,6 +211,34 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 		};
+
+		usb2: usb2@21000 {
+			reg = <0x00021000 0x1000>;
+
+			#address-cells = <1>;
+			#size-cells = <1>;
+		};
+
+		usb3: usb3@23000 {
+			reg = <0x00023000 0x1000>;
+
+			#address-cells = <1>;
+			#size-cells = <1>;
+		};
+
+		spi@29000 {
+			reg = <0x00029000 0x1000>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			spi_nor: spi-nor@0 {
+				compatible = "jedec,spi-nor";
+				reg = <0>;
+				spi-max-frequency = <20000000>;
+				linux,part-probe = "ofpart", "bcm47xxpart";
+				status = "disabled";
+			};
+		};
 	};
 
 	lcpll0: lcpll0@1800c100 {
diff --git a/arch/arm/boot/dts/cros-adc-thermistors.dtsi b/arch/arm/boot/dts/cros-adc-thermistors.dtsi
index acd4fe1..ce7fca7 100644
--- a/arch/arm/boot/dts/cros-adc-thermistors.dtsi
+++ b/arch/arm/boot/dts/cros-adc-thermistors.dtsi
@@ -13,28 +13,28 @@
 */
 
 &adc {
-	ncp15wb473@3 {
+	thermistor3 {
 		compatible = "murata,ncp15wb473";
 		pullup-uv = <1800000>;
 		pullup-ohm = <47000>;
 		pulldown-ohm = <0>;
 		io-channels = <&adc 3>;
 	};
-	ncp15wb473@4 {
+	thermistor4 {
 		compatible = "murata,ncp15wb473";
 		pullup-uv = <1800000>;
 		pullup-ohm = <47000>;
 		pulldown-ohm = <0>;
 		io-channels = <&adc 4>;
 	};
-	ncp15wb473@5 {
+	thermistor5 {
 		compatible = "murata,ncp15wb473";
 		pullup-uv = <1800000>;
 		pullup-ohm = <47000>;
 		pulldown-ohm = <0>;
 		io-channels = <&adc 5>;
 	};
-	ncp15wb473@6 {
+	thermistor6 {
 		compatible = "murata,ncp15wb473";
 		pullup-uv = <1800000>;
 		pullup-ohm = <47000>;
diff --git a/arch/arm/boot/dts/da850-enbw-cmc.dts b/arch/arm/boot/dts/da850-enbw-cmc.dts
index 645549e..14dff3e 100644
--- a/arch/arm/boot/dts/da850-enbw-cmc.dts
+++ b/arch/arm/boot/dts/da850-enbw-cmc.dts
@@ -16,14 +16,20 @@
 	compatible = "enbw,cmc", "ti,da850";
 	model = "EnBW CMC";
 
-	soc {
-		serial0: serial@1c42000 {
+	soc@1c00000 {
+		serial0: serial@42000 {
 			status = "okay";
 		};
-		serial1: serial@1d0c000 {
+		serial1: serial@10c000 {
 			status = "okay";
 		};
-		serial2: serial@1d0d000 {
+		serial2: serial@10d000 {
+			status = "okay";
+		};
+		mdio: mdio@224000 {
+			status = "okay";
+		};
+		eth0: ethernet@220000 {
 			status = "okay";
 		};
 	};
diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
index ef061e9..1a15db8 100644
--- a/arch/arm/boot/dts/da850-evm.dts
+++ b/arch/arm/boot/dts/da850-evm.dts
@@ -14,8 +14,8 @@
 	compatible = "ti,da850-evm", "ti,da850";
 	model = "DA850/AM1808/OMAP-L138 EVM";
 
-	soc {
-		pmx_core: pinmux@1c14120 {
+	soc@1c00000 {
+		pmx_core: pinmux@14120 {
 			status = "okay";
 
 			mcasp0_pins: pinmux_mcasp0_pins {
@@ -30,19 +30,19 @@
 				>;
 			};
 		};
-		serial0: serial@1c42000 {
+		serial0: serial@42000 {
 			status = "okay";
 		};
-		serial1: serial@1d0c000 {
+		serial1: serial@10c000 {
 			status = "okay";
 		};
-		serial2: serial@1d0d000 {
+		serial2: serial@10d000 {
 			status = "okay";
 		};
-		rtc0: rtc@1c23000 {
+		rtc0: rtc@23000 {
 			status = "okay";
 		};
-		i2c0: i2c@1c22000 {
+		i2c0: i2c@22000 {
 			status = "okay";
 			clock-frequency = <100000>;
 			pinctrl-names = "default";
@@ -66,17 +66,17 @@
 			};
 
 		};
-		wdt: wdt@1c21000 {
+		wdt: wdt@21000 {
 			status = "okay";
 		};
-		mmc0: mmc@1c40000 {
+		mmc0: mmc@40000 {
 			max-frequency = <50000000>;
 			bus-width = <4>;
 			status = "okay";
 			pinctrl-names = "default";
 			pinctrl-0 = <&mmc0_pins>;
 		};
-		spi1: spi@1f0e000 {
+		spi1: spi@30e000 {
 			status = "okay";
 			pinctrl-names = "default";
 			pinctrl-0 = <&spi1_pins &spi1_cs0_pin>;
@@ -116,18 +116,18 @@
 				};
 			};
 		};
-		mdio: mdio@1e24000 {
+		mdio: mdio@224000 {
 			status = "okay";
 			pinctrl-names = "default";
 			pinctrl-0 = <&mdio_pins>;
 			bus_freq = <2200000>;
 		};
-		eth0: ethernet@1e20000 {
+		eth0: ethernet@220000 {
 			status = "okay";
 			pinctrl-names = "default";
 			pinctrl-0 = <&mii_pins>;
 		};
-		gpio: gpio@1e26000 {
+		gpio: gpio@226000 {
 			status = "okay";
 		};
 	};
diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi
index 226cda7..25f0f8e 100644
--- a/arch/arm/boot/dts/da850.dtsi
+++ b/arch/arm/boot/dts/da850.dtsi
@@ -15,15 +15,15 @@
 		#address-cells = <1>;
 		#size-cells = <1>;
 		ranges;
-		intc: interrupt-controller {
+		intc: interrupt-controller@fffee000 {
 			compatible = "ti,cp-intc";
 			interrupt-controller;
 			#interrupt-cells = <1>;
-			ti,intc-size = <100>;
+			ti,intc-size = <101>;
 			reg = <0xfffee000 0x2000>;
 		};
 	};
-	soc {
+	soc@1c00000 {
 		compatible = "simple-bus";
 		model = "da850";
 		#address-cells = <1>;
@@ -31,7 +31,7 @@
 		ranges = <0x0 0x01c00000 0x400000>;
 		interrupt-parent = <&intc>;
 
-		pmx_core: pinmux@1c14120 {
+		pmx_core: pinmux@14120 {
 			compatible = "pinctrl-single";
 			reg = <0x14120 0x50>;
 			#address-cells = <1>;
@@ -63,6 +63,12 @@
 					0x10 0x00002200 0x0000ff00
 				>;
 			};
+			i2c1_pins: pinmux_i2c1_pins {
+				pinctrl-single,bits = <
+					/* I2C1_SDA, I2C1_SCL */
+					0x10 0x00440000 0x00ff0000
+				>;
+			};
 			mmc0_pins: pinmux_mmc_pins {
 				pinctrl-single,bits = <
 					/* MMCSD0_DAT[3] MMCSD0_DAT[2]
@@ -114,7 +120,19 @@
 					0x4 0x00000004 0x0000000f
 				>;
 			};
-			spi1_pins: pinmux_spi_pins {
+			spi0_pins: pinmux_spi0_pins {
+				pinctrl-single,bits = <
+					/* SIMO, SOMI, CLK */
+					0xc 0x00001101 0x0000ff0f
+				>;
+			};
+			spi0_cs0_pin: pinmux_spi0_cs0 {
+				pinctrl-single,bits = <
+					/* CS0 */
+					0x10 0x00000010 0x000000f0
+				>;
+			};
+			spi1_pins: pinmux_spi1_pins {
 				pinctrl-single,bits = <
 					/* SIMO, SOMI, CLK */
 					0x14 0x00110100 0x00ff0f00
@@ -150,7 +168,7 @@
 			};
 
 		};
-		edma0: edma@01c00000 {
+		edma0: edma@0 {
 			compatible = "ti,edma3-tpcc";
 			/* eDMA3 CC0: 0x01c0 0000 - 0x01c0 7fff */
 			reg =	<0x0 0x8000>;
@@ -161,19 +179,19 @@
 
 			ti,tptcs = <&edma0_tptc0 7>, <&edma0_tptc1 0>;
 		};
-		edma0_tptc0: tptc@01c08000 {
+		edma0_tptc0: tptc@8000 {
 			compatible = "ti,edma3-tptc";
 			reg =	<0x8000 0x400>;
 			interrupts = <13>;
 			interrupt-names = "edm3_tcerrint";
 		};
-		edma0_tptc1: tptc@01c08400 {
+		edma0_tptc1: tptc@8400 {
 			compatible = "ti,edma3-tptc";
 			reg =	<0x8400 0x400>;
 			interrupts = <32>;
 			interrupt-names = "edm3_tcerrint";
 		};
-		edma1: edma@01e30000 {
+		edma1: edma@230000 {
 			compatible = "ti,edma3-tpcc";
 			/* eDMA3 CC1: 0x01e3 0000 - 0x01e3 7fff */
 			reg =	<0x230000 0x8000>;
@@ -184,41 +202,41 @@
 
 			ti,tptcs = <&edma1_tptc0 7>;
 		};
-		edma1_tptc0: tptc@01e38000 {
+		edma1_tptc0: tptc@238000 {
 			compatible = "ti,edma3-tptc";
 			reg =	<0x238000 0x400>;
 			interrupts = <95>;
 			interrupt-names = "edm3_tcerrint";
 		};
-		serial0: serial@1c42000 {
+		serial0: serial@42000 {
 			compatible = "ns16550a";
 			reg = <0x42000 0x100>;
 			reg-shift = <2>;
 			interrupts = <25>;
 			status = "disabled";
 		};
-		serial1: serial@1d0c000 {
+		serial1: serial@10c000 {
 			compatible = "ns16550a";
 			reg = <0x10c000 0x100>;
 			reg-shift = <2>;
 			interrupts = <53>;
 			status = "disabled";
 		};
-		serial2: serial@1d0d000 {
+		serial2: serial@10d000 {
 			compatible = "ns16550a";
 			reg = <0x10d000 0x100>;
 			reg-shift = <2>;
 			interrupts = <61>;
 			status = "disabled";
 		};
-		rtc0: rtc@1c23000 {
+		rtc0: rtc@23000 {
 			compatible = "ti,da830-rtc";
 			reg = <0x23000 0x1000>;
 			interrupts = <19
 				      19>;
 			status = "disabled";
 		};
-		i2c0: i2c@1c22000 {
+		i2c0: i2c@22000 {
 			compatible = "ti,davinci-i2c";
 			reg = <0x22000 0x1000>;
 			interrupts = <15>;
@@ -226,12 +244,20 @@
 			#size-cells = <0>;
 			status = "disabled";
 		};
-		wdt: wdt@1c21000 {
+		i2c1: i2c@228000 {
+			compatible = "ti,davinci-i2c";
+			reg = <0x228000 0x1000>;
+			interrupts = <51>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+		wdt: wdt@21000 {
 			compatible = "ti,davinci-wdt";
 			reg = <0x21000 0x1000>;
 			status = "disabled";
 		};
-		mmc0: mmc@1c40000 {
+		mmc0: mmc@40000 {
 			compatible = "ti,da830-mmc";
 			reg = <0x40000 0x1000>;
 			interrupts = <16>;
@@ -239,7 +265,7 @@
 			dma-names = "rx", "tx";
 			status = "disabled";
 		};
-		mmc1: mmc@1e1b000 {
+		mmc1: mmc@21b000 {
 			compatible = "ti,da830-mmc";
 			reg = <0x21b000 0x1000>;
 			interrupts = <72>;
@@ -247,37 +273,47 @@
 			dma-names = "rx", "tx";
 			status = "disabled";
 		};
-		ehrpwm0: ehrpwm@01f00000 {
+		ehrpwm0: pwm@300000 {
 			compatible = "ti,da850-ehrpwm", "ti,am33xx-ehrpwm";
 			#pwm-cells = <3>;
 			reg = <0x300000 0x2000>;
 			status = "disabled";
 		};
-		ehrpwm1: ehrpwm@01f02000 {
+		ehrpwm1: pwm@302000 {
 			compatible = "ti,da850-ehrpwm", "ti,am33xx-ehrpwm";
 			#pwm-cells = <3>;
 			reg = <0x302000 0x2000>;
 			status = "disabled";
 		};
-		ecap0: ecap@01f06000 {
+		ecap0: ecap@306000 {
 			compatible = "ti,da850-ecap", "ti,am33xx-ecap";
 			#pwm-cells = <3>;
 			reg = <0x306000 0x80>;
 			status = "disabled";
 		};
-		ecap1: ecap@01f07000 {
+		ecap1: ecap@307000 {
 			compatible = "ti,da850-ecap", "ti,am33xx-ecap";
 			#pwm-cells = <3>;
 			reg = <0x307000 0x80>;
 			status = "disabled";
 		};
-		ecap2: ecap@01f08000 {
+		ecap2: ecap@308000 {
 			compatible = "ti,da850-ecap", "ti,am33xx-ecap";
 			#pwm-cells = <3>;
 			reg = <0x308000 0x80>;
 			status = "disabled";
 		};
-		spi1: spi@1f0e000 {
+		spi0: spi@41000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "ti,da830-spi";
+			reg = <0x41000 0x1000>;
+			num-cs = <6>;
+			ti,davinci-spi-intr-line = <1>;
+			interrupts = <20>;
+			status = "disabled";
+		};
+		spi1: spi@30e000 {
 			#address-cells = <1>;
 			#size-cells = <0>;
 			compatible = "ti,da830-spi";
@@ -289,13 +325,14 @@
 			dma-names = "rx", "tx";
 			status = "disabled";
 		};
-		mdio: mdio@1e24000 {
+		mdio: mdio@224000 {
 			compatible = "ti,davinci_mdio";
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <0x224000 0x1000>;
+			status = "disabled";
 		};
-		eth0: ethernet@1e20000 {
+		eth0: ethernet@220000 {
 			compatible = "ti,davinci-dm6467-emac";
 			reg = <0x220000 0x4000>;
 			ti,davinci-ctrl-reg-offset = <0x3000>;
@@ -308,10 +345,12 @@
 					35
 					36
 					>;
+			status = "disabled";
 		};
-		gpio: gpio@1e26000 {
+		gpio: gpio@226000 {
 			compatible = "ti,dm6441-gpio";
 			gpio-controller;
+			#gpio-cells = <2>;
 			reg = <0x226000 0x1000>;
 			interrupts = <42 IRQ_TYPE_EDGE_BOTH
 				43 IRQ_TYPE_EDGE_BOTH 44 IRQ_TYPE_EDGE_BOTH
@@ -323,7 +362,7 @@
 			status = "disabled";
 		};
 
-		mcasp0: mcasp@01d00000 {
+		mcasp0: mcasp@100000 {
 			compatible = "ti,da830-mcasp-audio";
 			reg = <0x100000 0x2000>,
 			      <0x102000 0x400000>;
diff --git a/arch/arm/boot/dts/dm814x-clocks.dtsi b/arch/arm/boot/dts/dm814x-clocks.dtsi
index e0ea6a9..c4671af 100644
--- a/arch/arm/boot/dts/dm814x-clocks.dtsi
+++ b/arch/arm/boot/dts/dm814x-clocks.dtsi
@@ -4,8 +4,159 @@
  * published by the Free Software Foundation.
  */
 
+&pllss {
+	/*
+	 * See TRM "2.6.10 Connected outputso DPLLS" and
+	 * "2.6.11 Connected Outputs of DPLLJ". Only clkout is
+	 * connected except for hdmi and usb.
+	 */
+	adpll_mpu_ck: adpll@40 {
+		#clock-cells = <1>;
+		compatible = "ti,dm814-adpll-s-clock";
+		reg = <0x40 0x40>;
+		clocks = <&devosc_ck &devosc_ck &devosc_ck>;
+		clock-names = "clkinp", "clkinpulow", "clkinphif";
+		clock-output-names = "481c5040.adpll.dcoclkldo",
+				     "481c5040.adpll.clkout",
+				     "481c5040.adpll.clkoutx2",
+				     "481c5040.adpll.clkouthif";
+	};
+
+	adpll_dsp_ck: adpll@80 {
+		#clock-cells = <1>;
+		compatible = "ti,dm814-adpll-lj-clock";
+		reg = <0x80 0x30>;
+		clocks = <&devosc_ck &devosc_ck>;
+		clock-names = "clkinp", "clkinpulow";
+		clock-output-names = "481c5080.adpll.dcoclkldo",
+				     "481c5080.adpll.clkout",
+				     "481c5080.adpll.clkoutldo";
+	};
+
+	adpll_sgx_ck: adpll@b0 {
+		#clock-cells = <1>;
+		compatible = "ti,dm814-adpll-lj-clock";
+		reg = <0xb0 0x30>;
+		clocks = <&devosc_ck &devosc_ck>;
+		clock-names = "clkinp", "clkinpulow";
+		clock-output-names = "481c50b0.adpll.dcoclkldo",
+				     "481c50b0.adpll.clkout",
+				     "481c50b0.adpll.clkoutldo";
+	};
+
+	adpll_hdvic_ck: adpll@e0 {
+		#clock-cells = <1>;
+		compatible = "ti,dm814-adpll-lj-clock";
+		reg = <0xe0 0x30>;
+		clocks = <&devosc_ck &devosc_ck>;
+		clock-names = "clkinp", "clkinpulow";
+		clock-output-names = "481c50e0.adpll.dcoclkldo",
+				     "481c50e0.adpll.clkout",
+				     "481c50e0.adpll.clkoutldo";
+	};
+
+	adpll_l3_ck: adpll@110 {
+		#clock-cells = <1>;
+		compatible = "ti,dm814-adpll-lj-clock";
+		reg = <0x110 0x30>;
+		clocks = <&devosc_ck &devosc_ck>;
+		clock-names = "clkinp", "clkinpulow";
+		clock-output-names = "481c5110.adpll.dcoclkldo",
+				     "481c5110.adpll.clkout",
+				     "481c5110.adpll.clkoutldo";
+	};
+
+	adpll_isp_ck: adpll@140 {
+		#clock-cells = <1>;
+		compatible = "ti,dm814-adpll-lj-clock";
+		reg = <0x140 0x30>;
+		clocks = <&devosc_ck &devosc_ck>;
+		clock-names = "clkinp", "clkinpulow";
+		clock-output-names = "481c5140.adpll.dcoclkldo",
+				     "481c5140.adpll.clkout",
+				     "481c5140.adpll.clkoutldo";
+	};
+
+	adpll_dss_ck: adpll@170 {
+		#clock-cells = <1>;
+		compatible = "ti,dm814-adpll-lj-clock";
+		reg = <0x170 0x30>;
+		clocks = <&devosc_ck &devosc_ck>;
+		clock-names = "clkinp", "clkinpulow";
+		clock-output-names = "481c5170.adpll.dcoclkldo",
+				     "481c5170.adpll.clkout",
+				     "481c5170.adpll.clkoutldo";
+	};
+
+	adpll_video0_ck: adpll@1a0 {
+		#clock-cells = <1>;
+		compatible = "ti,dm814-adpll-lj-clock";
+		reg = <0x1a0 0x30>;
+		clocks = <&devosc_ck &devosc_ck>;
+		clock-names = "clkinp", "clkinpulow";
+		clock-output-names = "481c51a0.adpll.dcoclkldo",
+				     "481c51a0.adpll.clkout",
+				     "481c51a0.adpll.clkoutldo";
+	};
+
+	adpll_video1_ck: adpll@1d0 {
+		#clock-cells = <1>;
+		compatible = "ti,dm814-adpll-lj-clock";
+		reg = <0x1d0 0x30>;
+		clocks = <&devosc_ck &devosc_ck>;
+		clock-names = "clkinp", "clkinpulow";
+		clock-output-names = "481c51d0.adpll.dcoclkldo",
+				     "481c51d0.adpll.clkout",
+				     "481c51d0.adpll.clkoutldo";
+	};
+
+	adpll_hdmi_ck: adpll@200 {
+		#clock-cells = <1>;
+		compatible = "ti,dm814-adpll-lj-clock";
+		reg = <0x200 0x30>;
+		clocks = <&devosc_ck &devosc_ck>;
+		clock-names = "clkinp", "clkinpulow";
+		clock-output-names = "481c5200.adpll.dcoclkldo",
+				     "481c5200.adpll.clkout",
+				     "481c5200.adpll.clkoutldo";
+	};
+
+	adpll_audio_ck: adpll@230 {
+		#clock-cells = <1>;
+		compatible = "ti,dm814-adpll-lj-clock";
+		reg = <0x230 0x30>;
+		clocks = <&devosc_ck &devosc_ck>;
+		clock-names = "clkinp", "clkinpulow";
+		clock-output-names = "481c5230.adpll.dcoclkldo",
+				     "481c5230.adpll.clkout",
+				     "481c5230.adpll.clkoutldo";
+	};
+
+	adpll_usb_ck: adpll@260 {
+		#clock-cells = <1>;
+		compatible = "ti,dm814-adpll-lj-clock";
+		reg = <0x260 0x30>;
+		clocks = <&devosc_ck &devosc_ck>;
+		clock-names = "clkinp", "clkinpulow";
+		clock-output-names = "481c5260.adpll.dcoclkldo",
+				     "481c5260.adpll.clkout",
+				     "481c5260.adpll.clkoutldo";
+	};
+
+	adpll_ddr_ck: adpll@290 {
+		#clock-cells = <1>;
+		compatible = "ti,dm814-adpll-lj-clock";
+		reg = <0x290 0x30>;
+		clocks = <&devosc_ck &devosc_ck>;
+		clock-names = "clkinp", "clkinpulow";
+		clock-output-names = "481c5290.adpll.dcoclkldo",
+				     "481c5290.adpll.clkout",
+				     "481c5290.adpll.clkoutldo";
+	};
+};
+
 &pllss_clocks {
-	timer1_fck: timer1_fck {
+	timer1_fck: timer1_fck@2e0 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sysclk18_ck &aud_clkin0_ck &aud_clkin1_ck
@@ -14,7 +165,7 @@
 		reg = <0x2e0>;
 	};
 
-	timer2_fck: timer2_fck {
+	timer2_fck: timer2_fck@2e0 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sysclk18_ck &aud_clkin0_ck &aud_clkin1_ck
@@ -23,7 +174,25 @@
 		reg = <0x2e0>;
 	};
 
-	sysclk18_ck: sysclk18_ck {
+	/* CPTS_RFT_CLK in RMII_REFCLK_SRC, usually sourced from auiod */
+	cpsw_cpts_rft_clk: cpsw_cpts_rft_clk {
+		#clock-cells = <0>;
+		compatible = "ti,mux-clock";
+		clocks = <&adpll_video0_ck 1
+			  &adpll_video1_ck 1
+			  &adpll_audio_ck 1>;
+		ti,bit-shift = <1>;
+		reg = <0x2e8>;
+	};
+
+	/* REVISIT: Set up with a proper mux using RMII_REFCLK_SRC */
+	cpsw_125mhz_gclk: cpsw_125mhz_gclk {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <125000000>;
+	};
+
+	sysclk18_ck: sysclk18_ck@2f0 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&rtcosc_ck>, <&rtcdivider_ck>;
@@ -33,7 +202,7 @@
 };
 
 &scm_clocks {
-	devosc_ck: devosc_ck {
+	devosc_ck: devosc_ck@40 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&virt_20000000_ck>, <&virt_19200000_ck>;
@@ -79,37 +248,6 @@
 		compatible = "fixed-clock";
 		clock-frequency = <1000000000>;
 	};
-
-	sysclk4_ck: sysclk4_ck {
-		#clock-cells = <0>;
-		compatible = "fixed-clock";
-		clock-frequency = <222000000>;
-	};
-
-	sysclk6_ck: sysclk6_ck {
-		#clock-cells = <0>;
-		compatible = "fixed-clock";
-		clock-frequency = <100000000>;
-	};
-
-	sysclk10_ck: sysclk10_ck {
-		#clock-cells = <0>;
-		compatible = "fixed-clock";
-		clock-frequency = <48000000>;
-	};
-
-        cpsw_125mhz_gclk: cpsw_125mhz_gclk {
-		#clock-cells = <0>;
-		compatible = "fixed-clock";
-		clock-frequency = <125000000>;
-	};
-
-	cpsw_cpts_rft_clk: cpsw_cpts_rft_clk {
-		#clock-cells = <0>;
-		compatible = "fixed-clock";
-		clock-frequency = <250000000>;
-	};
-
 };
 
 &prcm_clocks {
@@ -121,7 +259,7 @@
 		clock-div = <1>;
 	};
 
-	mpu_clksrc_ck: mpu_clksrc_ck {
+	mpu_clksrc_ck: mpu_clksrc_ck@40 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&devosc_ck>, <&rtcdivider_ck>;
@@ -138,6 +276,49 @@
 		clock-div = <78125>;
 	};
 
+	/* L4_HS 220 MHz*/
+	sysclk4_ck: sysclk4_ck {
+		#clock-cells = <0>;
+		compatible = "ti,fixed-factor-clock";
+		clocks = <&adpll_l3_ck 1>;
+		ti,clock-mult = <1>;
+		ti,clock-div = <1>;
+	};
+
+	/* L4_FWCFG */
+	sysclk5_ck: sysclk5_ck {
+		#clock-cells = <0>;
+		compatible = "ti,fixed-factor-clock";
+		clocks = <&adpll_l3_ck 1>;
+		ti,clock-mult = <1>;
+		ti,clock-div = <2>;
+	};
+
+	/* L4_LS 110 MHz */
+	sysclk6_ck: sysclk6_ck {
+		#clock-cells = <0>;
+		compatible = "ti,fixed-factor-clock";
+		clocks = <&adpll_l3_ck 1>;
+		ti,clock-mult = <1>;
+		ti,clock-div = <2>;
+	};
+
+	sysclk8_ck: sysclk8_ck {
+		#clock-cells = <0>;
+		compatible = "ti,fixed-factor-clock";
+		clocks = <&adpll_usb_ck 1>;
+		ti,clock-mult = <1>;
+		ti,clock-div = <1>;
+	};
+
+	sysclk10_ck: sysclk10_ck {
+		compatible = "ti,divider-clock";
+		reg = <0x324>;
+		ti,max-div = <7>;
+		#clock-cells = <0>;
+		clocks = <&adpll_usb_ck 1>;
+	};
+
 	aud_clkin0_ck: aud_clkin0_ck {
 		#clock-cells = <0>;
 		compatible = "fixed-clock";
diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi
index 4a6ce8c..d4537dc 100644
--- a/arch/arm/boot/dts/dm814x.dtsi
+++ b/arch/arm/boot/dts/dm814x.dtsi
@@ -568,6 +568,8 @@
 			#size-cells = <1>;
 			interrupt-controller;
 			#interrupt-cells = <2>;
+			gpio-controller;
+			#gpio-cells = <2>;
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/dm816x-clocks.dtsi b/arch/arm/boot/dts/dm816x-clocks.dtsi
index 50d9d33..51865eb 100644
--- a/arch/arm/boot/dts/dm816x-clocks.dtsi
+++ b/arch/arm/boot/dts/dm816x-clocks.dtsi
@@ -86,7 +86,7 @@
 
 /* 0x48180000 */
 &prcm_clocks {
-	clkout_pre_ck: clkout_pre_ck {
+	clkout_pre_ck: clkout_pre_ck@100 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&main_fapll 5 &ddr_fapll 1 &video_fapll 1
@@ -94,7 +94,7 @@
 		reg = <0x100>;
 	};
 
-	clkout_div_ck: clkout_div_ck {
+	clkout_div_ck: clkout_div_ck@100 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&clkout_pre_ck>;
@@ -103,7 +103,7 @@
 		reg = <0x100>;
 	};
 
-	clkout_ck: clkout_ck {
+	clkout_ck: clkout_ck@100 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&clkout_div_ck>;
@@ -112,7 +112,7 @@
 	};
 
 	/* CM_DPLL clocks p1795 */
-	sysclk1_ck: sysclk1_ck {
+	sysclk1_ck: sysclk1_ck@300 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&main_fapll 1>;
@@ -120,7 +120,7 @@
 		reg = <0x0300>;
 	};
 
-	sysclk2_ck: sysclk2_ck {
+	sysclk2_ck: sysclk2_ck@304 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&main_fapll 2>;
@@ -128,7 +128,7 @@
 		reg = <0x0304>;
 	};
 
-	sysclk3_ck: sysclk3_ck {
+	sysclk3_ck: sysclk3_ck@308 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&main_fapll 3>;
@@ -136,7 +136,7 @@
 		reg = <0x0308>;
 	};
 
-	sysclk4_ck: sysclk4_ck {
+	sysclk4_ck: sysclk4_ck@30c {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&main_fapll 4>;
@@ -144,7 +144,7 @@
 		reg = <0x030c>;
 	};
 
-	sysclk5_ck: sysclk5_ck {
+	sysclk5_ck: sysclk5_ck@310 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&sysclk4_ck>;
@@ -152,7 +152,7 @@
 		reg = <0x0310>;
 	};
 
-	sysclk6_ck: sysclk6_ck {
+	sysclk6_ck: sysclk6_ck@314 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&main_fapll 4>;
@@ -160,7 +160,7 @@
 		reg = <0x0314>;
 	};
 
-	sysclk10_ck: sysclk10_ck {
+	sysclk10_ck: sysclk10_ck@324 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&ddr_fapll 2>;
@@ -168,7 +168,7 @@
 		reg = <0x0324>;
 	};
 
-	sysclk24_ck: sysclk24_ck {
+	sysclk24_ck: sysclk24_ck@3b4 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&main_fapll 5>;
@@ -176,7 +176,7 @@
 		reg = <0x03b4>;
 	};
 
-	mpu_ck: mpu_ck {
+	mpu_ck: mpu_ck@15dc {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sysclk2_ck>;
@@ -184,7 +184,7 @@
                 reg = <0x15dc>;
 	};
 
-	audio_pll_a_ck: audio_pll_a_ck {
+	audio_pll_a_ck: audio_pll_a_ck@35c {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&audio_fapll 1>;
@@ -192,56 +192,56 @@
 		reg = <0x035c>;
 	};
 
-	sysclk18_ck: sysclk18_ck {
+	sysclk18_ck: sysclk18_ck@378 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_32k_ck>, <&audio_pll_a_ck>;
 		reg = <0x0378>;
 	};
 
-	timer1_fck: timer1_fck {
+	timer1_fck: timer1_fck@390 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&tclkin_ck>, <&sysclk18_ck>, <&sys_clkin_ck>;
 		reg = <0x0390>;
 	};
 
-	timer2_fck: timer2_fck {
+	timer2_fck: timer2_fck@394 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&tclkin_ck>, <&sysclk18_ck>, <&sys_clkin_ck>;
 		reg = <0x0394>;
 	};
 
-	timer3_fck: timer3_fck {
+	timer3_fck: timer3_fck@398 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&tclkin_ck>, <&sysclk18_ck>, <&sys_clkin_ck>;
 		reg = <0x0398>;
 	};
 
-	timer4_fck: timer4_fck {
+	timer4_fck: timer4_fck@39c {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&tclkin_ck>, <&sysclk18_ck>, <&sys_clkin_ck>;
 		reg = <0x039c>;
 	};
 
-	timer5_fck: timer5_fck {
+	timer5_fck: timer5_fck@3a0 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&tclkin_ck>, <&sysclk18_ck>, <&sys_clkin_ck>;
 		reg = <0x03a0>;
 	};
 
-	timer6_fck: timer6_fck {
+	timer6_fck: timer6_fck@3a4 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&tclkin_ck>, <&sysclk18_ck>, <&sys_clkin_ck>;
 		reg = <0x03a4>;
 	};
 
-	timer7_fck: timer7_fck {
+	timer7_fck: timer7_fck@3a8 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&tclkin_ck>, <&sysclk18_ck>, <&sys_clkin_ck>;
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi
index d9309a0..44e39c74 100644
--- a/arch/arm/boot/dts/dm816x.dtsi
+++ b/arch/arm/boot/dts/dm816x.dtsi
@@ -185,6 +185,8 @@
 			gpmc,num-waitpins = <2>;
 			interrupt-controller;
 			#interrupt-cells = <2>;
+			gpio-controller;
+			#gpio-cells = <2>;
 		};
 
 		i2c1: i2c@48028000 {
diff --git a/arch/arm/boot/dts/dra62x-clocks.dtsi b/arch/arm/boot/dts/dra62x-clocks.dtsi
index 6f98dc8..0e49741 100644
--- a/arch/arm/boot/dts/dra62x-clocks.dtsi
+++ b/arch/arm/boot/dts/dra62x-clocks.dtsi
@@ -6,6 +6,32 @@
 
 #include "dm814x-clocks.dtsi"
 
+/* Compared to dm814x, dra62x does not have hdic, l3 or dss PLLs */
+&adpll_hdvic_ck {
+	status = "disabled";
+};
+
+&adpll_l3_ck {
+	status = "disabled";
+};
+
+&adpll_dss_ck {
+	status = "disabled";
+};
+
+/* Compared to dm814x, dra62x has interconnect clocks on isp PLL */
+&sysclk4_ck {
+	clocks = <&adpll_isp_ck 1>;
+};
+
+&sysclk5_ck {
+	clocks = <&adpll_isp_ck 1>;
+};
+
+&sysclk6_ck {
+	clocks = <&adpll_isp_ck 1>;
+};
+
 /*
  * Compared to dm814x, dra62x has different shifts and more mux options.
  * Please add the extra options for ysclk_14 and 16 if really needed.
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index d9b8723..bafcfac 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -33,6 +33,7 @@
 	evm_3v3_sw: fixedregulator-evm_3v3_sw {
 		compatible = "regulator-fixed";
 		regulator-name = "evm_3v3_sw";
+		vin-supply = <&sysen1>;
 		regulator-min-microvolt = <3300000>;
 		regulator-max-microvolt = <3300000>;
 	};
@@ -64,10 +65,11 @@
 		regulator-always-on;
 		regulator-boot-on;
 		enable-active-high;
+		vin-supply = <&sysen2>;
 		gpio = <&gpio7 11 GPIO_ACTIVE_HIGH>;
 	};
 
-	sound0: sound@0 {
+	sound0: sound0 {
 		compatible = "simple-audio-card";
 		simple-audio-card,name = "DRA7xx-EVM";
 		simple-audio-card,widgets =
@@ -224,21 +226,6 @@
 		>;
 	};
 
-	qspi1_pins: pinmux_qspi1_pins {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x344c, PIN_INPUT | MUX_MODE1)  /* gpmc_a3.qspi1_cs2 */
-			DRA7XX_CORE_IOPAD(0x3450, PIN_INPUT | MUX_MODE1)  /* gpmc_a4.qspi1_cs3 */
-			DRA7XX_CORE_IOPAD(0x3474, PIN_INPUT | MUX_MODE1)  /* gpmc_a13.qspi1_rtclk */
-			DRA7XX_CORE_IOPAD(0x3478, PIN_INPUT | MUX_MODE1)  /* gpmc_a14.qspi1_d3 */
-			DRA7XX_CORE_IOPAD(0x347c, PIN_INPUT | MUX_MODE1)  /* gpmc_a15.qspi1_d2 */
-			DRA7XX_CORE_IOPAD(0x3480, PIN_INPUT | MUX_MODE1) /* gpmc_a16.qspi1_d1 */
-			DRA7XX_CORE_IOPAD(0x3484, PIN_INPUT | MUX_MODE1)  /* gpmc_a17.qspi1_d0 */
-			DRA7XX_CORE_IOPAD(0x3488, PIN_INPUT | MUX_MODE1)  /* qpmc_a18.qspi1_sclk */
-			DRA7XX_CORE_IOPAD(0x34b8, PIN_INPUT_PULLUP | MUX_MODE1)  /* gpmc_cs2.qspi1_cs0 */
-			DRA7XX_CORE_IOPAD(0x34bc, PIN_INPUT_PULLUP | MUX_MODE1)  /* gpmc_cs3.qspi1_cs1 */
-		>;
-	};
-
 	usb1_pins: pinmux_usb1_pins {
                 pinctrl-single,pins = <
 			DRA7XX_CORE_IOPAD(0x3680, PIN_INPUT_SLEW | MUX_MODE0) /* usb1_drvvbus */
@@ -254,8 +241,9 @@
 	nand_flash_x16: nand_flash_x16 {
 		/* On DRA7 EVM, GPMC_WPN and NAND_BOOTn comes from DIP switch
 		 * So NAND flash requires following switch settings:
-		 * SW5.9 (GPMC_WPN) = LOW
-		 * SW5.1 (NAND_BOOTn) = HIGH */
+		 * SW5.1 (NAND_BOOTn) = ON (LOW)
+		 * SW5.9 (GPMC_WPN) = OFF (HIGH)
+		 */
 		pinctrl-single,pins = <
 			DRA7XX_CORE_IOPAD(0x3400, PIN_INPUT  | MUX_MODE0)	/* gpmc_ad0	*/
 			DRA7XX_CORE_IOPAD(0x3404, PIN_INPUT  | MUX_MODE0)	/* gpmc_ad1	*/
@@ -428,7 +416,7 @@
 					/* VDD_DSPEVE */
 					regulator-name = "smps45";
 					regulator-min-microvolt = < 850000>;
-					regulator-max-microvolt = <1150000>;
+					regulator-max-microvolt = <1250000>;
 					regulator-always-on;
 					regulator-boot-on;
 				};
@@ -446,7 +434,7 @@
 					/* CORE_VDD */
 					regulator-name = "smps7";
 					regulator-min-microvolt = <850000>;
-					regulator-max-microvolt = <1060000>;
+					regulator-max-microvolt = <1150000>;
 					regulator-always-on;
 					regulator-boot-on;
 				};
@@ -523,12 +511,37 @@
 					regulator-max-microvolt = <3300000>;
 					regulator-boot-on;
 				};
+
+				/* REGEN1 is unused */
+
+				regen2: regen2 {
+					/* Needed for PMIC internal resources */
+					regulator-name = "regen2";
+					regulator-boot-on;
+					regulator-always-on;
+				};
+
+				/* REGEN3 is unused */
+
+				sysen1: sysen1 {
+					/* PMIC_REGEN_3V3 */
+					regulator-name = "sysen1";
+					regulator-boot-on;
+					regulator-always-on;
+				};
+
+				sysen2: sysen2 {
+					/* PMIC_REGEN_DDR */
+					regulator-name = "sysen2";
+					regulator-boot-on;
+					regulator-always-on;
+				};
 			};
 		};
 	};
 
 	pcf_lcd: gpio@20 {
-		compatible = "nxp,pcf8575";
+		compatible = "ti,pcf8575", "nxp,pcf8575";
 		reg = <0x20>;
 		gpio-controller;
 		#gpio-cells = <2>;
@@ -539,7 +552,7 @@
 	};
 
 	pcf_gpio_21: gpio@21 {
-		compatible = "ti,pcf8575";
+		compatible = "ti,pcf8575", "nxp,pcf8575";
 		reg = <0x21>;
 		lines-initial-states = <0x1408>;
 		gpio-controller;
@@ -573,7 +586,7 @@
 	clock-frequency = <400000>;
 
 	pcf_hdmi: gpio@26 {
-		compatible = "nxp,pcf8575";
+		compatible = "ti,pcf8575", "nxp,pcf8575";
 		reg = <0x26>;
 		gpio-controller;
 		#gpio-cells = <2>;
@@ -650,18 +663,14 @@
 
 &qspi {
 	status = "okay";
-	pinctrl-names = "default";
-	pinctrl-0 = <&qspi1_pins>;
 
-	spi-max-frequency = <48000000>;
+	spi-max-frequency = <64000000>;
 	m25p80@0 {
 		compatible = "s25fl256s1";
-		spi-max-frequency = <48000000>;
+		spi-max-frequency = <64000000>;
 		reg = <0>;
 		spi-tx-bus-width = <1>;
 		spi-rx-bus-width = <4>;
-		spi-cpol;
-		spi-cpha;
 		#address-cells = <1>;
 		#size-cells = <1>;
 
@@ -748,6 +757,7 @@
 		interrupt-parent = <&gpmc>;
 		interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
 			     <1 IRQ_TYPE_NONE>; /* termcount */
+		rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 pin */
 		ti,nand-ecc-opt = "bch8";
 		ti,elm-id = <&elm>;
 		nand-bus-width = <16>;
@@ -904,6 +914,8 @@
 	serial-dir = <  /* 0: INACTIVE, 1: TX, 2: RX */
 		1 2 0 0
 	>;
+	tx-num-evt = <32>;
+	rx-num-evt = <32>;
 };
 
 &mailbox5 {
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 13ac882..e007401 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -123,7 +123,7 @@
 					#size-cells = <1>;
 					ranges = <0 0x0 0x1400>;
 
-					pbias_regulator: pbias_regulator {
+					pbias_regulator: pbias_regulator@e00 {
 						compatible = "ti,pbias-dra7", "ti,pbias-omap";
 						reg = <0xe00 0x4>;
 						syscon = <&scm_conf>;
@@ -161,6 +161,24 @@
 					compatible = "syscon";
 					reg = <0x1c24 0x0024>;
 				};
+
+				sdma_xbar: dma-router@b78 {
+					compatible = "ti,dra7-dma-crossbar";
+					reg = <0xb78 0xfc>;
+					#dma-cells = <1>;
+					dma-requests = <205>;
+					ti,dma-safe-map = <0>;
+					dma-masters = <&sdma>;
+				};
+
+				edma_xbar: dma-router@c78 {
+					compatible = "ti,dra7-dma-crossbar";
+					reg = <0xc78 0x7c>;
+					#dma-cells = <2>;
+					dma-requests = <204>;
+					ti,dma-safe-map = <0>;
+					dma-masters = <&edma>;
+				};
 			};
 
 			cm_core_aon: cm_core_aon@5000 {
@@ -315,13 +333,43 @@
 			dma-requests = <127>;
 		};
 
-		sdma_xbar: dma-router@4a002b78 {
-			compatible = "ti,dra7-dma-crossbar";
-			reg = <0x4a002b78 0xfc>;
-			#dma-cells = <1>;
-			dma-requests = <205>;
-			ti,dma-safe-map = <0>;
-			dma-masters = <&sdma>;
+		edma: edma@43300000 {
+			compatible = "ti,edma3-tpcc";
+			ti,hwmods = "tpcc";
+			reg = <0x43300000 0x100000>;
+			reg-names = "edma3_cc";
+			interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "edma3_ccint", "emda3_mperr",
+					  "edma3_ccerrint";
+			dma-requests = <64>;
+			#dma-cells = <2>;
+
+			ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 0>;
+
+			/*
+			 * memcpy is disabled, can be enabled with:
+			 * ti,edma-memcpy-channels = <20 21>;
+			 * for example. Note that these channels need to be
+			 * masked in the xbar as well.
+			 */
+		};
+
+		edma_tptc0: tptc@43400000 {
+			compatible = "ti,edma3-tptc";
+			ti,hwmods = "tptc0";
+			reg =	<0x43400000 0x100000>;
+			interrupts = <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "edma3_tcerrint";
+		};
+
+		edma_tptc1: tptc@43500000 {
+			compatible = "ti,edma3-tptc";
+			ti,hwmods = "tptc1";
+			reg =	<0x43500000 0x100000>;
+			interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "edma3_tcerrint";
 		};
 
 		gpio1: gpio@4ae10000 {
@@ -773,12 +821,20 @@
 			ti,hwmods = "timer11";
 		};
 
+		timer12: timer@4ae20000 {
+			compatible = "ti,omap5430-timer";
+			reg = <0x4ae20000 0x80>;
+			interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
+			ti,hwmods = "timer12";
+			ti,timer-alwon;
+			ti,timer-secure;
+		};
+
 		timer13: timer@48828000 {
 			compatible = "ti,omap5430-timer";
 			reg = <0x48828000 0x80>;
 			interrupts = <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>;
 			ti,hwmods = "timer13";
-			status = "disabled";
 		};
 
 		timer14: timer@4882a000 {
@@ -786,7 +842,6 @@
 			reg = <0x4882a000 0x80>;
 			interrupts = <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>;
 			ti,hwmods = "timer14";
-			status = "disabled";
 		};
 
 		timer15: timer@4882c000 {
@@ -794,7 +849,6 @@
 			reg = <0x4882c000 0x80>;
 			interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>;
 			ti,hwmods = "timer15";
-			status = "disabled";
 		};
 
 		timer16: timer@4882e000 {
@@ -802,7 +856,6 @@
 			reg = <0x4882e000 0x80>;
 			interrupts = <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>;
 			ti,hwmods = "timer16";
-			status = "disabled";
 		};
 
 		wdt2: wdt@4ae14000 {
@@ -1404,6 +1457,8 @@
 			#size-cells = <1>;
 			interrupt-controller;
 			#interrupt-cells = <2>;
+			gpio-controller;
+			#gpio-cells = <2>;
 			status = "disabled";
 		};
 
@@ -1418,21 +1473,136 @@
 			status = "disabled";
 		};
 
+		mcasp1: mcasp@48460000 {
+			compatible = "ti,dra7-mcasp-audio";
+			ti,hwmods = "mcasp1";
+			reg = <0x48460000 0x2000>,
+			      <0x45800000 0x1000>;
+			reg-names = "mpu","dat";
+			interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "tx", "rx";
+			dmas = <&edma_xbar 129 1>, <&edma_xbar 128 1>;
+			dma-names = "tx", "rx";
+			clocks = <&mcasp1_aux_gfclk_mux>, <&mcasp1_ahclkx_mux>,
+				 <&mcasp1_ahclkr_mux>;
+			clock-names = "fck", "ahclkx", "ahclkr";
+			status = "disabled";
+		};
+
+		mcasp2: mcasp@48464000 {
+			compatible = "ti,dra7-mcasp-audio";
+			ti,hwmods = "mcasp2";
+			reg = <0x48464000 0x2000>,
+			      <0x45c00000 0x1000>;
+			reg-names = "mpu","dat";
+			interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "tx", "rx";
+			dmas = <&edma_xbar 131 1>, <&edma_xbar 130 1>;
+			dma-names = "tx", "rx";
+			clocks = <&mcasp2_aux_gfclk_mux>, <&mcasp2_ahclkx_mux>,
+				 <&mcasp2_ahclkr_mux>;
+			clock-names = "fck", "ahclkx", "ahclkr";
+			status = "disabled";
+		};
+
 		mcasp3: mcasp@48468000 {
 			compatible = "ti,dra7-mcasp-audio";
 			ti,hwmods = "mcasp3";
-			reg = <0x48468000 0x2000>;
-			reg-names = "mpu";
+			reg = <0x48468000 0x2000>,
+			      <0x46000000 0x1000>;
+			reg-names = "mpu","dat";
 			interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "tx", "rx";
-			dmas = <&sdma_xbar 133>, <&sdma_xbar 132>;
+			dmas = <&edma_xbar 133 1>, <&edma_xbar 132 1>;
 			dma-names = "tx", "rx";
 			clocks = <&mcasp3_aux_gfclk_mux>, <&mcasp3_ahclkx_mux>;
 			clock-names = "fck", "ahclkx";
 			status = "disabled";
 		};
 
+		mcasp4: mcasp@4846c000 {
+			compatible = "ti,dra7-mcasp-audio";
+			ti,hwmods = "mcasp4";
+			reg = <0x4846c000 0x2000>,
+			      <0x48436000 0x1000>;
+			reg-names = "mpu","dat";
+			interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "tx", "rx";
+			dmas = <&edma_xbar 135 1>, <&edma_xbar 134 1>;
+			dma-names = "tx", "rx";
+			clocks = <&mcasp4_aux_gfclk_mux>, <&mcasp4_ahclkx_mux>;
+			clock-names = "fck", "ahclkx";
+			status = "disabled";
+		};
+
+		mcasp5: mcasp@48470000 {
+			compatible = "ti,dra7-mcasp-audio";
+			ti,hwmods = "mcasp5";
+			reg = <0x48470000 0x2000>,
+			      <0x4843a000 0x1000>;
+			reg-names = "mpu","dat";
+			interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "tx", "rx";
+			dmas = <&edma_xbar 137 1>, <&edma_xbar 136 1>;
+			dma-names = "tx", "rx";
+			clocks = <&mcasp5_aux_gfclk_mux>, <&mcasp5_ahclkx_mux>;
+			clock-names = "fck", "ahclkx";
+			status = "disabled";
+		};
+
+		mcasp6: mcasp@48474000 {
+			compatible = "ti,dra7-mcasp-audio";
+			ti,hwmods = "mcasp6";
+			reg = <0x48474000 0x2000>,
+			      <0x4844c000 0x1000>;
+			reg-names = "mpu","dat";
+			interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "tx", "rx";
+			dmas = <&edma_xbar 139 1>, <&edma_xbar 138 1>;
+			dma-names = "tx", "rx";
+			clocks = <&mcasp6_aux_gfclk_mux>, <&mcasp6_ahclkx_mux>;
+			clock-names = "fck", "ahclkx";
+			status = "disabled";
+		};
+
+		mcasp7: mcasp@48478000 {
+			compatible = "ti,dra7-mcasp-audio";
+			ti,hwmods = "mcasp7";
+			reg = <0x48478000 0x2000>,
+			      <0x48450000 0x1000>;
+			reg-names = "mpu","dat";
+			interrupts = <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "tx", "rx";
+			dmas = <&edma_xbar 141 1>, <&edma_xbar 140 1>;
+			dma-names = "tx", "rx";
+			clocks = <&mcasp7_aux_gfclk_mux>, <&mcasp7_ahclkx_mux>;
+			clock-names = "fck", "ahclkx";
+			status = "disabled";
+		};
+
+		mcasp8: mcasp@4847c000 {
+			compatible = "ti,dra7-mcasp-audio";
+			ti,hwmods = "mcasp8";
+			reg = <0x4847c000 0x2000>,
+			      <0x48454000 0x1000>;
+			reg-names = "mpu","dat";
+			interrupts = <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "tx", "rx";
+			dmas = <&edma_xbar 143 1>, <&edma_xbar 142 1>;
+			dma-names = "tx", "rx";
+			clocks = <&mcasp8_aux_gfclk_mux>, <&mcasp8_ahclkx_mux>;
+			clock-names = "fck", "ahclkx";
+			status = "disabled";
+		};
+
 		crossbar_mpu: crossbar@4a002a48 {
 			compatible = "ti,irq-crossbar";
 			reg = <0x4a002a48 0x130>;
diff --git a/arch/arm/boot/dts/dra72-evm-common.dtsi b/arch/arm/boot/dts/dra72-evm-common.dtsi
new file mode 100644
index 0000000..093538e
--- /dev/null
+++ b/arch/arm/boot/dts/dra72-evm-common.dtsi
@@ -0,0 +1,817 @@
+/*
+ * Copyright (C) 2014-2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+#include "dra72x.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/clk/ti-dra7-atl.h>
+
+/ {
+	compatible = "ti,dra72-evm", "ti,dra722", "ti,dra72", "ti,dra7";
+
+	aliases {
+		display0 = &hdmi0;
+	};
+
+	evm_3v3: fixedregulator-evm_3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "evm_3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	aic_dvdd: fixedregulator-aic_dvdd {
+		/* TPS77018DBVT */
+		compatible = "regulator-fixed";
+		regulator-name = "aic_dvdd";
+		vin-supply = <&evm_3v3>;
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	evm_3v3_sd: fixedregulator-sd {
+		compatible = "regulator-fixed";
+		regulator-name = "evm_3v3_sd";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		enable-active-high;
+		gpio = <&pcf_gpio_21 5 GPIO_ACTIVE_HIGH>;
+	};
+
+	extcon_usb1: extcon_usb1 {
+		compatible = "linux,extcon-usb-gpio";
+		id-gpio = <&pcf_gpio_21 1 GPIO_ACTIVE_HIGH>;
+	};
+
+	extcon_usb2: extcon_usb2 {
+		compatible = "linux,extcon-usb-gpio";
+		id-gpio = <&pcf_gpio_21 2 GPIO_ACTIVE_HIGH>;
+	};
+
+	hdmi0: connector {
+		compatible = "hdmi-connector";
+		label = "hdmi";
+
+		type = "a";
+
+		port {
+			hdmi_connector_in: endpoint {
+				remote-endpoint = <&tpd12s015_out>;
+			};
+		};
+	};
+
+	tpd12s015: encoder {
+		compatible = "ti,tpd12s015";
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&tpd12s015_pins>;
+
+		gpios = <&pcf_hdmi 4 GPIO_ACTIVE_HIGH>,	/* P4, CT CP HPD */
+			<&pcf_hdmi 5 GPIO_ACTIVE_HIGH>,	/* P5, LS OE */
+			<&gpio7 12 GPIO_ACTIVE_HIGH>;	/* gpio7_12/sp1_cs2, HPD */
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+
+				tpd12s015_in: endpoint {
+					remote-endpoint = <&hdmi_out>;
+				};
+			};
+
+			port@1 {
+				reg = <1>;
+
+				tpd12s015_out: endpoint {
+					remote-endpoint = <&hdmi_connector_in>;
+				};
+			};
+		};
+	};
+
+	sound0: sound0 {
+		compatible = "simple-audio-card";
+		simple-audio-card,name = "DRA7xx-EVM";
+		simple-audio-card,widgets =
+			"Headphone", "Headphone Jack",
+			"Line", "Line Out",
+			"Microphone", "Mic Jack",
+			"Line", "Line In";
+		simple-audio-card,routing =
+			"Headphone Jack",       "HPLOUT",
+			"Headphone Jack",       "HPROUT",
+			"Line Out",		"LLOUT",
+			"Line Out",		"RLOUT",
+			"MIC3L",		"Mic Jack",
+			"MIC3R",		"Mic Jack",
+			"Mic Jack",		"Mic Bias",
+			"LINE1L",               "Line In",
+			"LINE1R",               "Line In";
+		simple-audio-card,format = "dsp_b";
+		simple-audio-card,bitclock-master = <&sound0_master>;
+		simple-audio-card,frame-master = <&sound0_master>;
+		simple-audio-card,bitclock-inversion;
+
+		sound0_master: simple-audio-card,cpu {
+			sound-dai = <&mcasp3>;
+			system-clock-frequency = <5644800>;
+		};
+
+		simple-audio-card,codec {
+			sound-dai = <&tlv320aic3106>;
+			clocks = <&atl_clkin2_ck>;
+		};
+	};
+};
+
+&dra7_pmx_core {
+	i2c1_pins: pinmux_i2c1_pins {
+		pinctrl-single,pins = <
+			DRA7XX_CORE_IOPAD(0x3800, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
+			DRA7XX_CORE_IOPAD(0x3804, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
+		>;
+	};
+
+	i2c5_pins: pinmux_i2c5_pins {
+		pinctrl-single,pins = <
+			DRA7XX_CORE_IOPAD(0x36b4, PIN_INPUT | MUX_MODE10) /* mcasp1_axr0.i2c5_sda */
+			DRA7XX_CORE_IOPAD(0x36b8, PIN_INPUT | MUX_MODE10) /* mcasp1_axr1.i2c5_scl */
+		>;
+	};
+
+	i2c5_pins: pinmux_i2c5_pins {
+		pinctrl-single,pins = <
+			DRA7XX_CORE_IOPAD(0x36b4, PIN_INPUT | MUX_MODE10) /* mcasp1_axr0.i2c5_sda */
+			DRA7XX_CORE_IOPAD(0x36b8, PIN_INPUT | MUX_MODE10) /* mcasp1_axr1.i2c5_scl */
+		>;
+	};
+
+	nand_default: nand_default {
+		pinctrl-single,pins = <
+			DRA7XX_CORE_IOPAD(0x3400, PIN_INPUT  | MUX_MODE0) /* gpmc_ad0 */
+			DRA7XX_CORE_IOPAD(0x3404, PIN_INPUT  | MUX_MODE0) /* gpmc_ad1 */
+			DRA7XX_CORE_IOPAD(0x3408, PIN_INPUT  | MUX_MODE0) /* gpmc_ad2 */
+			DRA7XX_CORE_IOPAD(0x340c, PIN_INPUT  | MUX_MODE0) /* gpmc_ad3 */
+			DRA7XX_CORE_IOPAD(0x3410, PIN_INPUT  | MUX_MODE0) /* gpmc_ad4 */
+			DRA7XX_CORE_IOPAD(0x3414, PIN_INPUT  | MUX_MODE0) /* gpmc_ad5 */
+			DRA7XX_CORE_IOPAD(0x3418, PIN_INPUT  | MUX_MODE0) /* gpmc_ad6 */
+			DRA7XX_CORE_IOPAD(0x341c, PIN_INPUT  | MUX_MODE0) /* gpmc_ad7 */
+			DRA7XX_CORE_IOPAD(0x3420, PIN_INPUT  | MUX_MODE0) /* gpmc_ad8 */
+			DRA7XX_CORE_IOPAD(0x3424, PIN_INPUT  | MUX_MODE0) /* gpmc_ad9 */
+			DRA7XX_CORE_IOPAD(0x3428, PIN_INPUT  | MUX_MODE0) /* gpmc_ad10 */
+			DRA7XX_CORE_IOPAD(0x342c, PIN_INPUT  | MUX_MODE0) /* gpmc_ad11 */
+			DRA7XX_CORE_IOPAD(0x3430, PIN_INPUT  | MUX_MODE0) /* gpmc_ad12 */
+			DRA7XX_CORE_IOPAD(0x3434, PIN_INPUT  | MUX_MODE0) /* gpmc_ad13 */
+			DRA7XX_CORE_IOPAD(0x3438, PIN_INPUT  | MUX_MODE0) /* gpmc_ad14 */
+			DRA7XX_CORE_IOPAD(0x343c, PIN_INPUT  | MUX_MODE0) /* gpmc_ad15 */
+			DRA7XX_CORE_IOPAD(0x34b4, PIN_OUTPUT | MUX_MODE0) /* gpmc_cs0 */
+			DRA7XX_CORE_IOPAD(0x34c4, PIN_OUTPUT | MUX_MODE0) /* gpmc_advn_ale */
+			DRA7XX_CORE_IOPAD(0x34cc, PIN_OUTPUT | MUX_MODE0) /* gpmc_wen */
+			DRA7XX_CORE_IOPAD(0x34c8, PIN_OUTPUT | MUX_MODE0) /* gpmc_oen_ren */
+			DRA7XX_CORE_IOPAD(0x34d0, PIN_OUTPUT | MUX_MODE0) /* gpmc_ben0 */
+			DRA7XX_CORE_IOPAD(0x34d8, PIN_INPUT  | MUX_MODE0) /* gpmc_wait0 */
+		>;
+	};
+
+	usb1_pins: pinmux_usb1_pins {
+		pinctrl-single,pins = <
+			DRA7XX_CORE_IOPAD(0x3680, PIN_INPUT_SLEW | MUX_MODE0) /* usb1_drvvbus */
+		>;
+	};
+
+	usb2_pins: pinmux_usb2_pins {
+		pinctrl-single,pins = <
+			DRA7XX_CORE_IOPAD(0x3684, PIN_INPUT_SLEW | MUX_MODE0) /* usb2_drvvbus */
+		>;
+	};
+
+	tps65917_pins_default: tps65917_pins_default {
+		pinctrl-single,pins = <
+			DRA7XX_CORE_IOPAD(0x3824, PIN_INPUT_PULLUP | MUX_MODE1) /* wakeup3.sys_nirq1 */
+		>;
+	};
+
+	mmc1_pins_default: mmc1_pins_default {
+		pinctrl-single,pins = <
+			DRA7XX_CORE_IOPAD(0x376c, PIN_INPUT | MUX_MODE14)	/* mmc1sdcd.gpio219 */
+			DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_clk.clk */
+			DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_cmd.cmd */
+			DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat0.dat0 */
+			DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat1.dat1 */
+			DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat2.dat2 */
+			DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat3.dat3 */
+		>;
+	};
+
+	mmc2_pins_default: mmc2_pins_default {
+		pinctrl-single,pins = <
+			DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a23.mmc2_clk */
+			DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */
+			DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */
+			DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */
+			DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */
+			DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */
+			DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */
+			DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */
+			DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */
+			DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */
+		>;
+	};
+
+	dcan1_pins_default: dcan1_pins_default {
+		pinctrl-single,pins = <
+			DRA7XX_CORE_IOPAD(0x37d0, PIN_OUTPUT_PULLUP | MUX_MODE0) /* dcan1_tx */
+			DRA7XX_CORE_IOPAD(0x3818, PULL_UP | MUX_MODE1)	/* wakeup0.dcan1_rx */
+		>;
+	};
+
+	dcan1_pins_sleep: dcan1_pins_sleep {
+		pinctrl-single,pins = <
+			DRA7XX_CORE_IOPAD(0x37d0, MUX_MODE15 | PULL_UP)	/* dcan1_tx.off */
+			DRA7XX_CORE_IOPAD(0x3818, MUX_MODE15 | PULL_UP)	/* wakeup0.off */
+		>;
+	};
+
+	hdmi_pins: pinmux_hdmi_pins {
+		pinctrl-single,pins = <
+			DRA7XX_CORE_IOPAD(0x3808, PIN_INPUT | MUX_MODE1) /* i2c2_sda.hdmi1_ddc_scl */
+			DRA7XX_CORE_IOPAD(0x380c, PIN_INPUT | MUX_MODE1) /* i2c2_scl.hdmi1_ddc_sda */
+		>;
+	};
+
+	tpd12s015_pins: pinmux_tpd12s015_pins {
+		pinctrl-single,pins = <
+			DRA7XX_CORE_IOPAD(0x37b8, PIN_INPUT_PULLDOWN | MUX_MODE14) /* gpio7_12 HPD */
+		>;
+	};
+
+	atl_pins: pinmux_atl_pins {
+		pinctrl-single,pins = <
+			DRA7XX_CORE_IOPAD(0x3698, PIN_OUTPUT | MUX_MODE5)	/* xref_clk1.atl_clk1 */
+			DRA7XX_CORE_IOPAD(0x369c, PIN_OUTPUT | MUX_MODE5)	/* xref_clk2.atl_clk2 */
+		>;
+	};
+
+	mcasp3_pins: pinmux_mcasp3_pins {
+		pinctrl-single,pins = <
+			DRA7XX_CORE_IOPAD(0x3724, PIN_OUTPUT_PULLDOWN | MUX_MODE0)	/* mcasp3_aclkx */
+			DRA7XX_CORE_IOPAD(0x3728, PIN_OUTPUT_PULLDOWN | MUX_MODE0)	/* mcasp3_fsx */
+			DRA7XX_CORE_IOPAD(0x372c, PIN_OUTPUT_PULLDOWN | MUX_MODE0)	/* mcasp3_axr0 */
+			DRA7XX_CORE_IOPAD(0x3730, PIN_INPUT_PULLDOWN | MUX_MODE0)	/* mcasp3_axr1 */
+		>;
+	};
+
+	mcasp3_sleep_pins: pinmux_mcasp3_sleep_pins {
+		pinctrl-single,pins = <
+			DRA7XX_CORE_IOPAD(0x3724, PIN_INPUT_PULLDOWN | MUX_MODE15)
+			DRA7XX_CORE_IOPAD(0x3728, PIN_INPUT_PULLDOWN | MUX_MODE15)
+			DRA7XX_CORE_IOPAD(0x372c, PIN_INPUT_PULLDOWN | MUX_MODE15)
+			DRA7XX_CORE_IOPAD(0x3730, PIN_INPUT_PULLDOWN | MUX_MODE15)
+		>;
+	};
+};
+
+&i2c1 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c1_pins>;
+	clock-frequency = <400000>;
+
+	tps65917: tps65917@58 {
+		compatible = "ti,tps65917";
+		reg = <0x58>;
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&tps65917_pins_default>;
+
+		interrupts = <GIC_SPI 2 IRQ_TYPE_NONE>;  /* IRQ_SYS_1N */
+		interrupt-controller;
+		#interrupt-cells = <2>;
+
+		ti,system-power-controller;
+
+		tps65917_pmic {
+			compatible = "ti,tps65917-pmic";
+
+			tps65917_regulators: regulators {
+				smps1_reg: smps1 {
+					/* VDD_MPU */
+					regulator-name = "smps1";
+					regulator-min-microvolt = <850000>;
+					regulator-max-microvolt = <1250000>;
+					regulator-always-on;
+					regulator-boot-on;
+				};
+
+				smps2_reg: smps2 {
+					/* VDD_CORE */
+					regulator-name = "smps2";
+					regulator-min-microvolt = <850000>;
+					regulator-max-microvolt = <1150000>;
+					regulator-boot-on;
+					regulator-always-on;
+				};
+
+				smps3_reg: smps3 {
+					/* VDD_GPU IVA DSPEVE */
+					regulator-name = "smps3";
+					regulator-min-microvolt = <850000>;
+					regulator-max-microvolt = <1250000>;
+					regulator-boot-on;
+					regulator-always-on;
+				};
+
+				smps4_reg: smps4 {
+					/* VDDS1V8 */
+					regulator-name = "smps4";
+					regulator-min-microvolt = <1800000>;
+					regulator-max-microvolt = <1800000>;
+					regulator-always-on;
+					regulator-boot-on;
+				};
+
+				smps5_reg: smps5 {
+					/* VDD_DDR */
+					regulator-name = "smps5";
+					regulator-min-microvolt = <1350000>;
+					regulator-max-microvolt = <1350000>;
+					regulator-boot-on;
+					regulator-always-on;
+				};
+
+				ldo1_reg: ldo1 {
+					/* LDO1_OUT --> SDIO  */
+					regulator-name = "ldo1";
+					regulator-min-microvolt = <1800000>;
+					regulator-max-microvolt = <3300000>;
+					regulator-always-on;
+					regulator-boot-on;
+					regulator-allow-bypass;
+				};
+
+				ldo3_reg: ldo3 {
+					/* VDDA_1V8_PHY */
+					regulator-name = "ldo3";
+					regulator-min-microvolt = <1800000>;
+					regulator-max-microvolt = <1800000>;
+					regulator-boot-on;
+					regulator-always-on;
+				};
+
+				ldo5_reg: ldo5 {
+					/* VDDA_1V8_PLL */
+					regulator-name = "ldo5";
+					regulator-min-microvolt = <1800000>;
+					regulator-max-microvolt = <1800000>;
+					regulator-always-on;
+					regulator-boot-on;
+				};
+
+				ldo4_reg: ldo4 {
+					/* VDDA_3V_USB: VDDA_USBHS33 */
+					regulator-name = "ldo4";
+					regulator-min-microvolt = <3300000>;
+					regulator-max-microvolt = <3300000>;
+					regulator-boot-on;
+				};
+			};
+		};
+
+		tps65917_power_button {
+			compatible = "ti,palmas-pwrbutton";
+			interrupt-parent = <&tps65917>;
+			interrupts = <1 IRQ_TYPE_NONE>;
+			wakeup-source;
+			ti,palmas-long-press-seconds = <6>;
+		};
+	};
+
+	pcf_gpio_21: gpio@21 {
+		compatible = "ti,pcf8575", "nxp,pcf8575";
+		reg = <0x21>;
+		lines-initial-states = <0x1408>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	tlv320aic3106: tlv320aic3106@19 {
+		#sound-dai-cells = <0>;
+		compatible = "ti,tlv320aic3106";
+		reg = <0x19>;
+		adc-settle-ms = <40>;
+		ai3x-micbias-vg = <1>;		/* 2.0V */
+		status = "okay";
+
+		/* Regulators */
+		AVDD-supply = <&evm_3v3>;
+		IOVDD-supply = <&evm_3v3>;
+		DRVDD-supply = <&evm_3v3>;
+		DVDD-supply = <&aic_dvdd>;
+	};
+};
+
+&i2c5 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c5_pins>;
+	clock-frequency = <400000>;
+
+	pcf_hdmi: pcf8575@26 {
+		compatible = "ti,pcf8575", "nxp,pcf8575";
+		reg = <0x26>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		/*
+		 * initial state is used here to keep the mdio interface
+		 * selected on RU89 through SEL_VIN4_MUX_S0, VIN2_S1 and
+		 * VIN2_S0 driven high otherwise Ethernet stops working
+		 * VIN6_SEL_S0 is low, thus selecting McASP3 over VIN6
+		 */
+		lines-initial-states = <0x0f2b>;
+
+		p1 {
+			/* vin6_sel_s0: high: VIN6, low: audio */
+			gpio-hog;
+			gpios = <1 GPIO_ACTIVE_HIGH>;
+			output-low;
+			line-name = "vin6_sel_s0";
+		};
+	};
+};
+
+&uart1 {
+	status = "okay";
+	interrupts-extended = <&crossbar_mpu GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
+			      <&dra7_pmx_core 0x3e0>;
+};
+
+&elm {
+	status = "okay";
+};
+
+&gpmc {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&nand_default>;
+	ranges = <0 0 0x08000000 0x01000000>;	/* minimum GPMC partition = 16MB */
+	nand@0,0 {
+		/* To use NAND, DIP switch SW5 must be set like so:
+		 * SW5.1 (NAND_SELn) = ON (LOW)
+		 * SW5.9 (GPMC_WPN) = OFF (HIGH)
+		 */
+		compatible = "ti,omap2-nand";
+		reg = <0 0 4>;		/* device IO registers */
+		interrupt-parent = <&gpmc>;
+		interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
+			     <1 IRQ_TYPE_NONE>;	/* termcount */
+		rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 pin */
+		ti,nand-ecc-opt = "bch8";
+		ti,elm-id = <&elm>;
+		nand-bus-width = <16>;
+		gpmc,device-width = <2>;
+		gpmc,sync-clk-ps = <0>;
+		gpmc,cs-on-ns = <0>;
+		gpmc,cs-rd-off-ns = <80>;
+		gpmc,cs-wr-off-ns = <80>;
+		gpmc,adv-on-ns = <0>;
+		gpmc,adv-rd-off-ns = <60>;
+		gpmc,adv-wr-off-ns = <60>;
+		gpmc,we-on-ns = <10>;
+		gpmc,we-off-ns = <50>;
+		gpmc,oe-on-ns = <4>;
+		gpmc,oe-off-ns = <40>;
+		gpmc,access-ns = <40>;
+		gpmc,wr-access-ns = <80>;
+		gpmc,rd-cycle-ns = <80>;
+		gpmc,wr-cycle-ns = <80>;
+		gpmc,bus-turnaround-ns = <0>;
+		gpmc,cycle2cycle-delay-ns = <0>;
+		gpmc,clk-activation-ns = <0>;
+		gpmc,wr-data-mux-bus-ns = <0>;
+		/* MTD partition table */
+		/* All SPL-* partitions are sized to minimal length
+		 * which can be independently programmable. For
+		 * NAND flash this is equal to size of erase-block */
+		#address-cells = <1>;
+		#size-cells = <1>;
+		partition@0 {
+			label = "NAND.SPL";
+			reg = <0x00000000 0x000020000>;
+		};
+		partition@1 {
+			label = "NAND.SPL.backup1";
+			reg = <0x00020000 0x00020000>;
+		};
+		partition@2 {
+			label = "NAND.SPL.backup2";
+			reg = <0x00040000 0x00020000>;
+		};
+		partition@3 {
+			label = "NAND.SPL.backup3";
+			reg = <0x00060000 0x00020000>;
+		};
+		partition@4 {
+			label = "NAND.u-boot-spl-os";
+			reg = <0x00080000 0x00040000>;
+		};
+		partition@5 {
+			label = "NAND.u-boot";
+			reg = <0x000c0000 0x00100000>;
+		};
+		partition@6 {
+			label = "NAND.u-boot-env";
+			reg = <0x001c0000 0x00020000>;
+		};
+		partition@7 {
+			label = "NAND.u-boot-env.backup1";
+			reg = <0x001e0000 0x00020000>;
+		};
+		partition@8 {
+			label = "NAND.kernel";
+			reg = <0x00200000 0x00800000>;
+		};
+		partition@9 {
+			label = "NAND.file-system";
+			reg = <0x00a00000 0x0f600000>;
+		};
+	};
+};
+
+&usb2_phy1 {
+	phy-supply = <&ldo4_reg>;
+};
+
+&usb2_phy2 {
+	phy-supply = <&ldo4_reg>;
+};
+
+&omap_dwc3_1 {
+	extcon = <&extcon_usb1>;
+};
+
+&omap_dwc3_2 {
+	extcon = <&extcon_usb2>;
+};
+
+&usb1 {
+	dr_mode = "peripheral";
+	pinctrl-names = "default";
+	pinctrl-0 = <&usb1_pins>;
+};
+
+&usb2 {
+	dr_mode = "host";
+	pinctrl-names = "default";
+	pinctrl-0 = <&usb2_pins>;
+};
+
+&mmc1 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc1_pins_default>;
+	vmmc-supply = <&evm_3v3_sd>;
+	vmmc_aux-supply = <&ldo1_reg>;
+	bus-width = <4>;
+	/*
+	 * SDCD signal is not being used here - using the fact that GPIO mode
+	 * is a viable alternative
+	 */
+	cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>;
+	max-frequency = <192000000>;
+};
+
+&mmc2 {
+	/* SW5-3 in ON position */
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc2_pins_default>;
+
+	vmmc-supply = <&evm_3v3>;
+	bus-width = <8>;
+	ti,non-removable;
+	max-frequency = <192000000>;
+};
+
+&dra7_pmx_core {
+	cpsw_default: cpsw_default {
+		pinctrl-single,pins = <
+			/* Slave 2 */
+			DRA7XX_CORE_IOPAD(0x3598, PIN_OUTPUT | MUX_MODE3)	/* vin2a_d12.rgmii1_txc */
+			DRA7XX_CORE_IOPAD(0x359c, PIN_OUTPUT | MUX_MODE3)	/* vin2a_d13.rgmii1_tctl */
+			DRA7XX_CORE_IOPAD(0x35a0, PIN_OUTPUT | MUX_MODE3)	/* vin2a_d14.rgmii1_td3 */
+			DRA7XX_CORE_IOPAD(0x35a4, PIN_OUTPUT | MUX_MODE3)	/* vin2a_d15.rgmii1_td2 */
+			DRA7XX_CORE_IOPAD(0x35a8, PIN_OUTPUT | MUX_MODE3)	/* vin2a_d16.rgmii1_td1 */
+			DRA7XX_CORE_IOPAD(0x35ac, PIN_OUTPUT | MUX_MODE3)	/* vin2a_d17.rgmii1_td0 */
+			DRA7XX_CORE_IOPAD(0x35b0, PIN_INPUT | MUX_MODE3)	/* vin2a_d18.rgmii1_rclk */
+			DRA7XX_CORE_IOPAD(0x35b4, PIN_INPUT | MUX_MODE3)	/* vin2a_d19.rgmii1_rctl */
+			DRA7XX_CORE_IOPAD(0x35b8, PIN_INPUT | MUX_MODE3)	/* vin2a_d20.rgmii1_rd3 */
+			DRA7XX_CORE_IOPAD(0x35bc, PIN_INPUT | MUX_MODE3)	/* vin2a_d21.rgmii1_rd2 */
+			DRA7XX_CORE_IOPAD(0x35c0, PIN_INPUT | MUX_MODE3)	/* vin2a_d22.rgmii1_rd1 */
+			DRA7XX_CORE_IOPAD(0x35c4, PIN_INPUT | MUX_MODE3)	/* vin2a_d23.rgmii1_rd0 */
+		>;
+
+	};
+
+	cpsw_sleep: cpsw_sleep {
+		pinctrl-single,pins = <
+			/* Slave 2 */
+			DRA7XX_CORE_IOPAD(0x3598, MUX_MODE15)
+			DRA7XX_CORE_IOPAD(0x359c, MUX_MODE15)
+			DRA7XX_CORE_IOPAD(0x35a0, MUX_MODE15)
+			DRA7XX_CORE_IOPAD(0x35a4, MUX_MODE15)
+			DRA7XX_CORE_IOPAD(0x35a8, MUX_MODE15)
+			DRA7XX_CORE_IOPAD(0x35ac, MUX_MODE15)
+			DRA7XX_CORE_IOPAD(0x35b0, MUX_MODE15)
+			DRA7XX_CORE_IOPAD(0x35b4, MUX_MODE15)
+			DRA7XX_CORE_IOPAD(0x35b8, MUX_MODE15)
+			DRA7XX_CORE_IOPAD(0x35bc, MUX_MODE15)
+			DRA7XX_CORE_IOPAD(0x35c0, MUX_MODE15)
+			DRA7XX_CORE_IOPAD(0x35c4, MUX_MODE15)
+		>;
+	};
+
+	davinci_mdio_default: davinci_mdio_default {
+		pinctrl-single,pins = <
+			/* MDIO */
+			DRA7XX_CORE_IOPAD(0x363c, PIN_OUTPUT_PULLUP | MUX_MODE0)	/* mdio_d.mdio_d */
+			DRA7XX_CORE_IOPAD(0x3640, PIN_INPUT_PULLUP | MUX_MODE0)	/* mdio_clk.mdio_clk */
+		>;
+	};
+
+	davinci_mdio_sleep: davinci_mdio_sleep {
+		pinctrl-single,pins = <
+			DRA7XX_CORE_IOPAD(0x363c, MUX_MODE15)
+			DRA7XX_CORE_IOPAD(0x3640, MUX_MODE15)
+		>;
+	};
+};
+
+&mac {
+	status = "okay";
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&cpsw_default>;
+	pinctrl-1 = <&cpsw_sleep>;
+};
+
+&davinci_mdio {
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&davinci_mdio_default>;
+	pinctrl-1 = <&davinci_mdio_sleep>;
+};
+
+&dcan1 {
+	status = "ok";
+	pinctrl-names = "default", "sleep", "active";
+	pinctrl-0 = <&dcan1_pins_sleep>;
+	pinctrl-1 = <&dcan1_pins_sleep>;
+	pinctrl-2 = <&dcan1_pins_default>;
+};
+
+&qspi {
+	status = "okay";
+
+	spi-max-frequency = <64000000>;
+	m25p80@0 {
+		compatible = "s25fl256s1";
+		spi-max-frequency = <64000000>;
+		reg = <0>;
+		spi-tx-bus-width = <1>;
+		spi-rx-bus-width = <4>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		/* MTD partition table.
+		 * The ROM checks the first four physical blocks
+		 * for a valid file to boot and the flash here is
+		 * 64KiB block size.
+		 */
+		partition@0 {
+			label = "QSPI.SPL";
+			reg = <0x00000000 0x000010000>;
+		};
+		partition@1 {
+			label = "QSPI.SPL.backup1";
+			reg = <0x00010000 0x00010000>;
+		};
+		partition@2 {
+			label = "QSPI.SPL.backup2";
+			reg = <0x00020000 0x00010000>;
+		};
+		partition@3 {
+			label = "QSPI.SPL.backup3";
+			reg = <0x00030000 0x00010000>;
+		};
+		partition@4 {
+			label = "QSPI.u-boot";
+			reg = <0x00040000 0x00100000>;
+		};
+		partition@5 {
+			label = "QSPI.u-boot-spl-os";
+			reg = <0x00140000 0x00080000>;
+		};
+		partition@6 {
+			label = "QSPI.u-boot-env";
+			reg = <0x001c0000 0x00010000>;
+		};
+		partition@7 {
+			label = "QSPI.u-boot-env.backup1";
+			reg = <0x001d0000 0x0010000>;
+		};
+		partition@8 {
+			label = "QSPI.kernel";
+			reg = <0x001e0000 0x0800000>;
+		};
+		partition@9 {
+			label = "QSPI.file-system";
+			reg = <0x009e0000 0x01620000>;
+		};
+	};
+};
+
+&dss {
+	status = "ok";
+
+	vdda_video-supply = <&ldo5_reg>;
+};
+
+&hdmi {
+	status = "ok";
+
+	pinctrl-names = "default";
+	pinctrl-0 = <&hdmi_pins>;
+
+	port {
+		hdmi_out: endpoint {
+			remote-endpoint = <&tpd12s015_in>;
+		};
+	};
+};
+
+&atl {
+	pinctrl-names = "default";
+	pinctrl-0 = <&atl_pins>;
+
+	assigned-clocks = <&abe_dpll_sys_clk_mux>,
+			  <&atl_gfclk_mux>,
+			  <&dpll_abe_ck>,
+			  <&dpll_abe_m2x2_ck>,
+			  <&atl_clkin2_ck>;
+	assigned-clock-parents = <&sys_clkin2>, <&dpll_abe_m2_ck>;
+	assigned-clock-rates = <0>, <0>, <180633600>, <361267200>, <5644800>;
+
+	status = "okay";
+
+	atl2 {
+		bws = <DRA7_ATL_WS_MCASP2_FSX>;
+		aws = <DRA7_ATL_WS_MCASP3_FSX>;
+	};
+};
+
+&mcasp3 {
+	#sound-dai-cells = <0>;
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&mcasp3_pins>;
+	pinctrl-1 = <&mcasp3_sleep_pins>;
+
+	assigned-clocks = <&mcasp3_ahclkx_mux>;
+	assigned-clock-parents = <&atl_clkin2_ck>;
+
+	status = "okay";
+
+	op-mode = <0>;          /* MCASP_IIS_MODE */
+	tdm-slots = <2>;
+	/* 4 serializer */
+	serial-dir = <  /* 0: INACTIVE, 1: TX, 2: RX */
+		1 2 0 0
+	>;
+	tx-num-evt = <32>;
+	rx-num-evt = <32>;
+};
+
+&mailbox5 {
+	status = "okay";
+	mbox_ipu1_ipc3x: mbox_ipu1_ipc3x {
+		status = "okay";
+	};
+	mbox_dsp1_ipc3x: mbox_dsp1_ipc3x {
+		status = "okay";
+	};
+};
+
+&mailbox6 {
+	status = "okay";
+	mbox_ipu2_ipc3x: mbox_ipu2_ipc3x {
+		status = "okay";
+	};
+};
diff --git a/arch/arm/boot/dts/dra72-evm-revc.dts b/arch/arm/boot/dts/dra72-evm-revc.dts
new file mode 100644
index 0000000..f9cfd3b
--- /dev/null
+++ b/arch/arm/boot/dts/dra72-evm-revc.dts
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include "dra72-evm-common.dtsi"
+#include <dt-bindings/net/ti-dp83867.h>
+
+/ {
+	model = "TI DRA722 Rev C EVM";
+
+	memory {
+		device_type = "memory";
+		reg = <0x0 0x80000000 0x0 0x80000000>; /* 2GB */
+	};
+};
+
+&tps65917_regulators {
+	ldo2_reg: ldo2 {
+		/* LDO2_OUT --> VDDA_1V8_PHY2 */
+		regulator-name = "ldo2";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		regulator-always-on;
+		regulator-boot-on;
+	};
+};
+
+&hdmi {
+	vdda-supply = <&ldo2_reg>;
+};
+
+&pcf_gpio_21 {
+	interrupt-parent = <&gpio3>;
+	interrupts = <30 IRQ_TYPE_EDGE_FALLING>;
+};
+
+&mac {
+	mode-gpios = <&pcf_gpio_21 4 GPIO_ACTIVE_LOW>,
+		     <&pcf_hdmi 9 GPIO_ACTIVE_LOW>,	/* P11 */
+		     <&pcf_hdmi 10 GPIO_ACTIVE_LOW>;	/* P12 */
+	dual_emac;
+};
+
+&cpsw_emac0 {
+	phy_id = <&davinci_mdio>, <2>;
+	phy-mode = "rgmii-id";
+	dual_emac_res_vlan = <1>;
+};
+
+&cpsw_emac1 {
+	phy_id = <&davinci_mdio>, <3>;
+	phy-mode = "rgmii-id";
+	dual_emac_res_vlan = <2>;
+};
+
+&davinci_mdio {
+	dp83867_0: ethernet-phy@2 {
+		reg = <2>;
+		ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+		ti,tx-internal-delay = <DP83867_RGMIIDCTL_1_NS>;
+		ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_8_B_NIB>;
+	};
+
+	dp83867_1: ethernet-phy@3 {
+		reg = <3>;
+		ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+		ti,tx-internal-delay = <DP83867_RGMIIDCTL_1_NS>;
+		ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_8_B_NIB>;
+	};
+};
diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts
index 6affe2d..cc1d32c 100644
--- a/arch/arm/boot/dts/dra72-evm.dts
+++ b/arch/arm/boot/dts/dra72-evm.dts
@@ -1,694 +1,40 @@
 /*
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2014-2016 Texas Instruments Incorporated - http://www.ti.com/
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-/dts-v1/;
-
-#include "dra72x.dtsi"
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/clk/ti-dra7-atl.h>
-
+#include "dra72-evm-common.dtsi"
 / {
 	model = "TI DRA722";
-	compatible = "ti,dra72-evm", "ti,dra722", "ti,dra72", "ti,dra7";
 
 	memory {
 		device_type = "memory";
 		reg = <0x0 0x80000000 0x0 0x40000000>; /* 1024 MB */
 	};
+};
 
-	aliases {
-		display0 = &hdmi0;
-	};
-
-	evm_3v3: fixedregulator-evm_3v3 {
-		compatible = "regulator-fixed";
-		regulator-name = "evm_3v3";
-		regulator-min-microvolt = <3300000>;
-		regulator-max-microvolt = <3300000>;
-	};
-
-	aic_dvdd: fixedregulator-aic_dvdd {
-		/* TPS77018DBVT */
-		compatible = "regulator-fixed";
-		regulator-name = "aic_dvdd";
-		vin-supply = <&evm_3v3>;
+&tps65917_regulators {
+	ldo2_reg: ldo2 {
+		/* LDO2_OUT --> TP1017 (UNUSED)  */
+		regulator-name = "ldo2";
 		regulator-min-microvolt = <1800000>;
-		regulator-max-microvolt = <1800000>;
-	};
-
-	evm_3v3_sd: fixedregulator-sd {
-		compatible = "regulator-fixed";
-		regulator-name = "evm_3v3_sd";
-		regulator-min-microvolt = <3300000>;
 		regulator-max-microvolt = <3300000>;
-		enable-active-high;
-		gpio = <&pcf_gpio_21 5 GPIO_ACTIVE_HIGH>;
-	};
-
-	extcon_usb1: extcon_usb1 {
-		compatible = "linux,extcon-usb-gpio";
-		id-gpio = <&pcf_gpio_21 1 GPIO_ACTIVE_HIGH>;
-	};
-
-	extcon_usb2: extcon_usb2 {
-		compatible = "linux,extcon-usb-gpio";
-		id-gpio = <&pcf_gpio_21 2 GPIO_ACTIVE_HIGH>;
-	};
-
-	hdmi0: connector {
-		compatible = "hdmi-connector";
-		label = "hdmi";
-
-		type = "a";
-
-		port {
-			hdmi_connector_in: endpoint {
-				remote-endpoint = <&tpd12s015_out>;
-			};
-		};
-	};
-
-	tpd12s015: encoder {
-		compatible = "ti,tpd12s015";
-
-		pinctrl-names = "default";
-		pinctrl-0 = <&tpd12s015_pins>;
-
-		gpios = <&pcf_hdmi 4 GPIO_ACTIVE_HIGH>,	/* P4, CT CP HPD */
-			<&pcf_hdmi 5 GPIO_ACTIVE_HIGH>,	/* P5, LS OE */
-			<&gpio7 12 GPIO_ACTIVE_HIGH>;	/* gpio7_12/sp1_cs2, HPD */
-
-		ports {
-			#address-cells = <1>;
-			#size-cells = <0>;
-
-			port@0 {
-				reg = <0>;
-
-				tpd12s015_in: endpoint {
-					remote-endpoint = <&hdmi_out>;
-				};
-			};
-
-			port@1 {
-				reg = <1>;
-
-				tpd12s015_out: endpoint {
-					remote-endpoint = <&hdmi_connector_in>;
-				};
-			};
-		};
-	};
-
-	sound0: sound@0 {
-		compatible = "simple-audio-card";
-		simple-audio-card,name = "DRA7xx-EVM";
-		simple-audio-card,widgets =
-			"Headphone", "Headphone Jack",
-			"Line", "Line Out",
-			"Microphone", "Mic Jack",
-			"Line", "Line In";
-		simple-audio-card,routing =
-			"Headphone Jack",       "HPLOUT",
-			"Headphone Jack",       "HPROUT",
-			"Line Out",		"LLOUT",
-			"Line Out",		"RLOUT",
-			"MIC3L",		"Mic Jack",
-			"MIC3R",		"Mic Jack",
-			"Mic Jack",		"Mic Bias",
-			"LINE1L",               "Line In",
-			"LINE1R",               "Line In";
-		simple-audio-card,format = "dsp_b";
-		simple-audio-card,bitclock-master = <&sound0_master>;
-		simple-audio-card,frame-master = <&sound0_master>;
-		simple-audio-card,bitclock-inversion;
-
-		sound0_master: simple-audio-card,cpu {
-			sound-dai = <&mcasp3>;
-			system-clock-frequency = <5644800>;
-		};
-
-		simple-audio-card,codec {
-			sound-dai = <&tlv320aic3106>;
-			clocks = <&atl_clkin2_ck>;
-		};
+		regulator-allow-bypass;
 	};
 };
 
-&dra7_pmx_core {
-	i2c1_pins: pinmux_i2c1_pins {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x3800, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
-			DRA7XX_CORE_IOPAD(0x3804, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
-		>;
-	};
-
-	i2c5_pins: pinmux_i2c5_pins {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x36b4, PIN_INPUT | MUX_MODE10) /* mcasp1_axr0.i2c5_sda */
-			DRA7XX_CORE_IOPAD(0x36b8, PIN_INPUT | MUX_MODE10) /* mcasp1_axr1.i2c5_scl */
-		>;
-	};
-
-	i2c5_pins: pinmux_i2c5_pins {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x36b4, PIN_INPUT | MUX_MODE10) /* mcasp1_axr0.i2c5_sda */
-			DRA7XX_CORE_IOPAD(0x36b8, PIN_INPUT | MUX_MODE10) /* mcasp1_axr1.i2c5_scl */
-		>;
-	};
-
-	nand_default: nand_default {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x3400, PIN_INPUT  | MUX_MODE0) /* gpmc_ad0 */
-			DRA7XX_CORE_IOPAD(0x3404, PIN_INPUT  | MUX_MODE0) /* gpmc_ad1 */
-			DRA7XX_CORE_IOPAD(0x3408, PIN_INPUT  | MUX_MODE0) /* gpmc_ad2 */
-			DRA7XX_CORE_IOPAD(0x340c, PIN_INPUT  | MUX_MODE0) /* gpmc_ad3 */
-			DRA7XX_CORE_IOPAD(0x3410, PIN_INPUT  | MUX_MODE0) /* gpmc_ad4 */
-			DRA7XX_CORE_IOPAD(0x3414, PIN_INPUT  | MUX_MODE0) /* gpmc_ad5 */
-			DRA7XX_CORE_IOPAD(0x3418, PIN_INPUT  | MUX_MODE0) /* gpmc_ad6 */
-			DRA7XX_CORE_IOPAD(0x341c, PIN_INPUT  | MUX_MODE0) /* gpmc_ad7 */
-			DRA7XX_CORE_IOPAD(0x3420, PIN_INPUT  | MUX_MODE0) /* gpmc_ad8 */
-			DRA7XX_CORE_IOPAD(0x3424, PIN_INPUT  | MUX_MODE0) /* gpmc_ad9 */
-			DRA7XX_CORE_IOPAD(0x3428, PIN_INPUT  | MUX_MODE0) /* gpmc_ad10 */
-			DRA7XX_CORE_IOPAD(0x342c, PIN_INPUT  | MUX_MODE0) /* gpmc_ad11 */
-			DRA7XX_CORE_IOPAD(0x3430, PIN_INPUT  | MUX_MODE0) /* gpmc_ad12 */
-			DRA7XX_CORE_IOPAD(0x3434, PIN_INPUT  | MUX_MODE0) /* gpmc_ad13 */
-			DRA7XX_CORE_IOPAD(0x3438, PIN_INPUT  | MUX_MODE0) /* gpmc_ad14 */
-			DRA7XX_CORE_IOPAD(0x343c, PIN_INPUT  | MUX_MODE0) /* gpmc_ad15 */
-			DRA7XX_CORE_IOPAD(0x34b4, PIN_OUTPUT | MUX_MODE0) /* gpmc_cs0 */
-			DRA7XX_CORE_IOPAD(0x34c4, PIN_OUTPUT | MUX_MODE0) /* gpmc_advn_ale */
-			DRA7XX_CORE_IOPAD(0x34cc, PIN_OUTPUT | MUX_MODE0) /* gpmc_wen */
-			DRA7XX_CORE_IOPAD(0x34c8, PIN_OUTPUT | MUX_MODE0) /* gpmc_oen_ren */
-			DRA7XX_CORE_IOPAD(0x34d0, PIN_OUTPUT | MUX_MODE0) /* gpmc_ben0 */
-			DRA7XX_CORE_IOPAD(0x34d8, PIN_INPUT  | MUX_MODE0) /* gpmc_wait0 */
-		>;
-	};
-
-	usb1_pins: pinmux_usb1_pins {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x3680, PIN_INPUT_SLEW | MUX_MODE0) /* usb1_drvvbus */
-		>;
-	};
-
-	usb2_pins: pinmux_usb2_pins {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x3684, PIN_INPUT_SLEW | MUX_MODE0) /* usb2_drvvbus */
-		>;
-	};
-
-	tps65917_pins_default: tps65917_pins_default {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x3824, PIN_INPUT_PULLUP | MUX_MODE1) /* wakeup3.sys_nirq1 */
-		>;
-	};
-
-	mmc1_pins_default: mmc1_pins_default {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x376c, PIN_INPUT | MUX_MODE14)	/* mmc1sdcd.gpio219 */
-			DRA7XX_CORE_IOPAD(0x3754, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_clk.clk */
-			DRA7XX_CORE_IOPAD(0x3758, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_cmd.cmd */
-			DRA7XX_CORE_IOPAD(0x375c, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat0.dat0 */
-			DRA7XX_CORE_IOPAD(0x3760, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat1.dat1 */
-			DRA7XX_CORE_IOPAD(0x3764, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat2.dat2 */
-			DRA7XX_CORE_IOPAD(0x3768, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc1_dat3.dat3 */
-		>;
-	};
-
-	mmc2_pins_default: mmc2_pins_default {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x349c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a23.mmc2_clk */
-			DRA7XX_CORE_IOPAD(0x34b0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_cs1.mmc2_cmd */
-			DRA7XX_CORE_IOPAD(0x34a0, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a24.mmc2_dat0 */
-			DRA7XX_CORE_IOPAD(0x34a4, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a25.mmc2_dat1 */
-			DRA7XX_CORE_IOPAD(0x34a8, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a26.mmc2_dat2 */
-			DRA7XX_CORE_IOPAD(0x34ac, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a27.mmc2_dat3 */
-			DRA7XX_CORE_IOPAD(0x348c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a19.mmc2_dat4 */
-			DRA7XX_CORE_IOPAD(0x3490, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a20.mmc2_dat5 */
-			DRA7XX_CORE_IOPAD(0x3494, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a21.mmc2_dat6 */
-			DRA7XX_CORE_IOPAD(0x3498, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_a22.mmc2_dat7 */
-		>;
-	};
-
-	dcan1_pins_default: dcan1_pins_default {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x37d0, PIN_OUTPUT_PULLUP | MUX_MODE0) /* dcan1_tx */
-			DRA7XX_CORE_IOPAD(0x3818, PULL_UP | MUX_MODE1)	/* wakeup0.dcan1_rx */
-		>;
-	};
-
-	dcan1_pins_sleep: dcan1_pins_sleep {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x37d0, MUX_MODE15 | PULL_UP)	/* dcan1_tx.off */
-			DRA7XX_CORE_IOPAD(0x3818, MUX_MODE15 | PULL_UP)	/* wakeup0.off */
-		>;
-	};
-
-	qspi1_pins: pinmux_qspi1_pins {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x3474, PIN_OUTPUT | MUX_MODE1)	/* gpmc_a13.qspi1_rtclk */
-			DRA7XX_CORE_IOPAD(0x3478, PIN_INPUT | MUX_MODE1)	/* gpmc_a14.qspi1_d3 */
-			DRA7XX_CORE_IOPAD(0x347c, PIN_INPUT | MUX_MODE1)	/* gpmc_a15.qspi1_d2 */
-			DRA7XX_CORE_IOPAD(0x3480, PIN_INPUT | MUX_MODE1)	/* gpmc_a16.qspi1_d1 */
-			DRA7XX_CORE_IOPAD(0x3484, PIN_INPUT | MUX_MODE1)	/* gpmc_a17.qspi1_d0 */
-			DRA7XX_CORE_IOPAD(0x3488, PIN_OUTPUT | MUX_MODE1)	/* qpmc_a18.qspi1_sclk */
-			DRA7XX_CORE_IOPAD(0x34b8, PIN_OUTPUT | MUX_MODE1)	/* gpmc_cs2.qspi1_cs0 */
-		>;
-	};
-
-	hdmi_pins: pinmux_hdmi_pins {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x3808, PIN_INPUT | MUX_MODE1) /* i2c2_sda.hdmi1_ddc_scl */
-			DRA7XX_CORE_IOPAD(0x380c, PIN_INPUT | MUX_MODE1) /* i2c2_scl.hdmi1_ddc_sda */
-		>;
-	};
-
-	tpd12s015_pins: pinmux_tpd12s015_pins {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x37b8, PIN_INPUT_PULLDOWN | MUX_MODE14) /* gpio7_12 HPD */
-		>;
-	};
-
-	atl_pins: pinmux_atl_pins {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x3698, PIN_OUTPUT | MUX_MODE5)	/* xref_clk1.atl_clk1 */
-			DRA7XX_CORE_IOPAD(0x369c, PIN_OUTPUT | MUX_MODE5)	/* xref_clk2.atl_clk2 */
-		>;
-	};
-
-	mcasp3_pins: pinmux_mcasp3_pins {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x3724, PIN_OUTPUT_PULLDOWN | MUX_MODE0)	/* mcasp3_aclkx */
-			DRA7XX_CORE_IOPAD(0x3728, PIN_OUTPUT_PULLDOWN | MUX_MODE0)	/* mcasp3_fsx */
-			DRA7XX_CORE_IOPAD(0x372c, PIN_OUTPUT_PULLDOWN | MUX_MODE0)	/* mcasp3_axr0 */
-			DRA7XX_CORE_IOPAD(0x3730, PIN_INPUT_PULLDOWN | MUX_MODE0)	/* mcasp3_axr1 */
-		>;
-	};
-
-	mcasp3_sleep_pins: pinmux_mcasp3_sleep_pins {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x3724, PIN_INPUT_PULLDOWN | MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x3728, PIN_INPUT_PULLDOWN | MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x372c, PIN_INPUT_PULLDOWN | MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x3730, PIN_INPUT_PULLDOWN | MUX_MODE15)
-		>;
-	};
+&hdmi {
+	vdda-supply = <&ldo3_reg>;
 };
 
-&i2c1 {
-	status = "okay";
-	pinctrl-names = "default";
-	pinctrl-0 = <&i2c1_pins>;
-	clock-frequency = <400000>;
-
-	tps65917: tps65917@58 {
-		compatible = "ti,tps65917";
-		reg = <0x58>;
-
-		pinctrl-names = "default";
-		pinctrl-0 = <&tps65917_pins_default>;
-
-		interrupts = <GIC_SPI 2 IRQ_TYPE_NONE>;  /* IRQ_SYS_1N */
-		interrupt-controller;
-		#interrupt-cells = <2>;
-
-		ti,system-power-controller;
-
-		tps65917_pmic {
-			compatible = "ti,tps65917-pmic";
-
-			regulators {
-				smps1_reg: smps1 {
-					/* VDD_MPU */
-					regulator-name = "smps1";
-					regulator-min-microvolt = <850000>;
-					regulator-max-microvolt = <1250000>;
-					regulator-always-on;
-					regulator-boot-on;
-				};
-
-				smps2_reg: smps2 {
-					/* VDD_CORE */
-					regulator-name = "smps2";
-					regulator-min-microvolt = <850000>;
-					regulator-max-microvolt = <1060000>;
-					regulator-boot-on;
-					regulator-always-on;
-				};
-
-				smps3_reg: smps3 {
-					/* VDD_GPU IVA DSPEVE */
-					regulator-name = "smps3";
-					regulator-min-microvolt = <850000>;
-					regulator-max-microvolt = <1250000>;
-					regulator-boot-on;
-					regulator-always-on;
-				};
-
-				smps4_reg: smps4 {
-					/* VDDS1V8 */
-					regulator-name = "smps4";
-					regulator-min-microvolt = <1800000>;
-					regulator-max-microvolt = <1800000>;
-					regulator-always-on;
-					regulator-boot-on;
-				};
-
-				smps5_reg: smps5 {
-					/* VDD_DDR */
-					regulator-name = "smps5";
-					regulator-min-microvolt = <1350000>;
-					regulator-max-microvolt = <1350000>;
-					regulator-boot-on;
-					regulator-always-on;
-				};
-
-				ldo1_reg: ldo1 {
-					/* LDO1_OUT --> SDIO  */
-					regulator-name = "ldo1";
-					regulator-min-microvolt = <1800000>;
-					regulator-max-microvolt = <3300000>;
-					regulator-always-on;
-					regulator-boot-on;
-					regulator-allow-bypass;
-				};
-
-				ldo2_reg: ldo2 {
-					/* LDO2_OUT --> TP1017 (UNUSED)  */
-					regulator-name = "ldo2";
-					regulator-min-microvolt = <1800000>;
-					regulator-max-microvolt = <3300000>;
-					regulator-allow-bypass;
-				};
-
-				ldo3_reg: ldo3 {
-					/* VDDA_1V8_PHY */
-					regulator-name = "ldo3";
-					regulator-min-microvolt = <1800000>;
-					regulator-max-microvolt = <1800000>;
-					regulator-boot-on;
-					regulator-always-on;
-				};
-
-				ldo5_reg: ldo5 {
-					/* VDDA_1V8_PLL */
-					regulator-name = "ldo5";
-					regulator-min-microvolt = <1800000>;
-					regulator-max-microvolt = <1800000>;
-					regulator-always-on;
-					regulator-boot-on;
-				};
-
-				ldo4_reg: ldo4 {
-					/* VDDA_3V_USB: VDDA_USBHS33 */
-					regulator-name = "ldo4";
-					regulator-min-microvolt = <3300000>;
-					regulator-max-microvolt = <3300000>;
-					regulator-boot-on;
-				};
-			};
-		};
-
-		tps65917_power_button {
-			compatible = "ti,palmas-pwrbutton";
-			interrupt-parent = <&tps65917>;
-			interrupts = <1 IRQ_TYPE_NONE>;
-			wakeup-source;
-			ti,palmas-long-press-seconds = <6>;
-		};
-	};
-
-	pcf_gpio_21: gpio@21 {
-		compatible = "ti,pcf8575";
-		reg = <0x21>;
-		lines-initial-states = <0x1408>;
-		gpio-controller;
-		#gpio-cells = <2>;
-		interrupt-parent = <&gpio6>;
-		interrupts = <11 IRQ_TYPE_EDGE_FALLING>;
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
-
-	tlv320aic3106: tlv320aic3106@19 {
-		#sound-dai-cells = <0>;
-		compatible = "ti,tlv320aic3106";
-		reg = <0x19>;
-		adc-settle-ms = <40>;
-		ai3x-micbias-vg = <1>;		/* 2.0V */
-		status = "okay";
-
-		/* Regulators */
-		AVDD-supply = <&evm_3v3>;
-		IOVDD-supply = <&evm_3v3>;
-		DRVDD-supply = <&evm_3v3>;
-		DVDD-supply = <&aic_dvdd>;
-	};
-};
-
-&i2c5 {
-	status = "okay";
-	pinctrl-names = "default";
-	pinctrl-0 = <&i2c5_pins>;
-	clock-frequency = <400000>;
-
-	pcf_hdmi: pcf8575@26 {
-		compatible = "nxp,pcf8575";
-		reg = <0x26>;
-		gpio-controller;
-		#gpio-cells = <2>;
-		/*
-		 * initial state is used here to keep the mdio interface
-		 * selected on RU89 through SEL_VIN4_MUX_S0, VIN2_S1 and
-		 * VIN2_S0 driven high otherwise Ethernet stops working
-		 * VIN6_SEL_S0 is low, thus selecting McASP3 over VIN6
-		 */
-		lines-initial-states = <0x0f2b>;
-
-		p1 {
-			/* vin6_sel_s0: high: VIN6, low: audio */
-			gpio-hog;
-			gpios = <1 GPIO_ACTIVE_HIGH>;
-			output-low;
-			line-name = "vin6_sel_s0";
-		};
-	};
-};
-
-&uart1 {
-	status = "okay";
-	interrupts-extended = <&crossbar_mpu GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
-			      <&dra7_pmx_core 0x3e0>;
-};
-
-&elm {
-	status = "okay";
-};
-
-&gpmc {
-	status = "okay";
-	pinctrl-names = "default";
-	pinctrl-0 = <&nand_default>;
-	ranges = <0 0 0x08000000 0x01000000>;	/* minimum GPMC partition = 16MB */
-	nand@0,0 {
-		/* To use NAND, DIP switch SW5 must be set like so:
-		 * SW5.1 (NAND_SELn) = ON (LOW)
-		 * SW5.9 (GPMC_WPN) = OFF (HIGH)
-		 */
-		compatible = "ti,omap2-nand";
-		reg = <0 0 4>;		/* device IO registers */
-		interrupt-parent = <&gpmc>;
-		interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
-			     <1 IRQ_TYPE_NONE>;	/* termcount */
-		ti,nand-ecc-opt = "bch8";
-		ti,elm-id = <&elm>;
-		nand-bus-width = <16>;
-		gpmc,device-width = <2>;
-		gpmc,sync-clk-ps = <0>;
-		gpmc,cs-on-ns = <0>;
-		gpmc,cs-rd-off-ns = <80>;
-		gpmc,cs-wr-off-ns = <80>;
-		gpmc,adv-on-ns = <0>;
-		gpmc,adv-rd-off-ns = <60>;
-		gpmc,adv-wr-off-ns = <60>;
-		gpmc,we-on-ns = <10>;
-		gpmc,we-off-ns = <50>;
-		gpmc,oe-on-ns = <4>;
-		gpmc,oe-off-ns = <40>;
-		gpmc,access-ns = <40>;
-		gpmc,wr-access-ns = <80>;
-		gpmc,rd-cycle-ns = <80>;
-		gpmc,wr-cycle-ns = <80>;
-		gpmc,bus-turnaround-ns = <0>;
-		gpmc,cycle2cycle-delay-ns = <0>;
-		gpmc,clk-activation-ns = <0>;
-		gpmc,wr-data-mux-bus-ns = <0>;
-		/* MTD partition table */
-		/* All SPL-* partitions are sized to minimal length
-		 * which can be independently programmable. For
-		 * NAND flash this is equal to size of erase-block */
-		#address-cells = <1>;
-		#size-cells = <1>;
-		partition@0 {
-			label = "NAND.SPL";
-			reg = <0x00000000 0x000020000>;
-		};
-		partition@1 {
-			label = "NAND.SPL.backup1";
-			reg = <0x00020000 0x00020000>;
-		};
-		partition@2 {
-			label = "NAND.SPL.backup2";
-			reg = <0x00040000 0x00020000>;
-		};
-		partition@3 {
-			label = "NAND.SPL.backup3";
-			reg = <0x00060000 0x00020000>;
-		};
-		partition@4 {
-			label = "NAND.u-boot-spl-os";
-			reg = <0x00080000 0x00040000>;
-		};
-		partition@5 {
-			label = "NAND.u-boot";
-			reg = <0x000c0000 0x00100000>;
-		};
-		partition@6 {
-			label = "NAND.u-boot-env";
-			reg = <0x001c0000 0x00020000>;
-		};
-		partition@7 {
-			label = "NAND.u-boot-env.backup1";
-			reg = <0x001e0000 0x00020000>;
-		};
-		partition@8 {
-			label = "NAND.kernel";
-			reg = <0x00200000 0x00800000>;
-		};
-		partition@9 {
-			label = "NAND.file-system";
-			reg = <0x00a00000 0x0f600000>;
-		};
-	};
-};
-
-&usb2_phy1 {
-	phy-supply = <&ldo4_reg>;
-};
-
-&usb2_phy2 {
-	phy-supply = <&ldo4_reg>;
-};
-
-&omap_dwc3_1 {
-	extcon = <&extcon_usb1>;
-};
-
-&omap_dwc3_2 {
-	extcon = <&extcon_usb2>;
-};
-
-&usb1 {
-	dr_mode = "peripheral";
-	pinctrl-names = "default";
-	pinctrl-0 = <&usb1_pins>;
-};
-
-&usb2 {
-	dr_mode = "host";
-	pinctrl-names = "default";
-	pinctrl-0 = <&usb2_pins>;
-};
-
-&mmc1 {
-	status = "okay";
-	pinctrl-names = "default";
-	pinctrl-0 = <&mmc1_pins_default>;
-	vmmc-supply = <&evm_3v3_sd>;
-	vmmc_aux-supply = <&ldo1_reg>;
-	bus-width = <4>;
-	/*
-	 * SDCD signal is not being used here - using the fact that GPIO mode
-	 * is a viable alternative
-	 */
-	cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>;
-	max-frequency = <192000000>;
-};
-
-&mmc2 {
-	/* SW5-3 in ON position */
-	status = "okay";
-	pinctrl-names = "default";
-	pinctrl-0 = <&mmc2_pins_default>;
-
-	vmmc-supply = <&evm_3v3>;
-	bus-width = <8>;
-	ti,non-removable;
-	max-frequency = <192000000>;
-};
-
-&dra7_pmx_core {
-	cpsw_default: cpsw_default {
-		pinctrl-single,pins = <
-			/* Slave 2 */
-			DRA7XX_CORE_IOPAD(0x3598, PIN_OUTPUT | MUX_MODE3)	/* vin2a_d12.rgmii1_txc */
-			DRA7XX_CORE_IOPAD(0x359c, PIN_OUTPUT | MUX_MODE3)	/* vin2a_d13.rgmii1_tctl */
-			DRA7XX_CORE_IOPAD(0x35a0, PIN_OUTPUT | MUX_MODE3)	/* vin2a_d14.rgmii1_td3 */
-			DRA7XX_CORE_IOPAD(0x35a4, PIN_OUTPUT | MUX_MODE3)	/* vin2a_d15.rgmii1_td2 */
-			DRA7XX_CORE_IOPAD(0x35a8, PIN_OUTPUT | MUX_MODE3)	/* vin2a_d16.rgmii1_td1 */
-			DRA7XX_CORE_IOPAD(0x35ac, PIN_OUTPUT | MUX_MODE3)	/* vin2a_d17.rgmii1_td0 */
-			DRA7XX_CORE_IOPAD(0x35b0, PIN_INPUT | MUX_MODE3)	/* vin2a_d18.rgmii1_rclk */
-			DRA7XX_CORE_IOPAD(0x35b4, PIN_INPUT | MUX_MODE3)	/* vin2a_d19.rgmii1_rctl */
-			DRA7XX_CORE_IOPAD(0x35b8, PIN_INPUT | MUX_MODE3)	/* vin2a_d20.rgmii1_rd3 */
-			DRA7XX_CORE_IOPAD(0x35bc, PIN_INPUT | MUX_MODE3)	/* vin2a_d21.rgmii1_rd2 */
-			DRA7XX_CORE_IOPAD(0x35c0, PIN_INPUT | MUX_MODE3)	/* vin2a_d22.rgmii1_rd1 */
-			DRA7XX_CORE_IOPAD(0x35c4, PIN_INPUT | MUX_MODE3)	/* vin2a_d23.rgmii1_rd0 */
-		>;
-
-	};
-
-	cpsw_sleep: cpsw_sleep {
-		pinctrl-single,pins = <
-			/* Slave 2 */
-			DRA7XX_CORE_IOPAD(0x3598, MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x359c, MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x35a0, MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x35a4, MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x35a8, MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x35ac, MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x35b0, MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x35b4, MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x35b8, MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x35bc, MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x35c0, MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x35c4, MUX_MODE15)
-		>;
-	};
-
-	davinci_mdio_default: davinci_mdio_default {
-		pinctrl-single,pins = <
-			/* MDIO */
-			DRA7XX_CORE_IOPAD(0x363c, PIN_OUTPUT_PULLUP | MUX_MODE0)	/* mdio_d.mdio_d */
-			DRA7XX_CORE_IOPAD(0x3640, PIN_INPUT_PULLUP | MUX_MODE0)	/* mdio_clk.mdio_clk */
-		>;
-	};
-
-	davinci_mdio_sleep: davinci_mdio_sleep {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x363c, MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x3640, MUX_MODE15)
-		>;
-	};
+&pcf_gpio_21 {
+	interrupt-parent = <&gpio6>;
+	interrupts = <11 IRQ_TYPE_EDGE_FALLING>;
 };
 
 &mac {
-	status = "okay";
-	pinctrl-names = "default", "sleep";
-	pinctrl-0 = <&cpsw_default>;
-	pinctrl-1 = <&cpsw_sleep>;
 	slaves = <1>;
 	mode-gpios = <&pcf_gpio_21 4 GPIO_ACTIVE_HIGH>;
 };
@@ -697,158 +43,3 @@
 	phy_id = <&davinci_mdio>, <3>;
 	phy-mode = "rgmii";
 };
-
-&davinci_mdio {
-	pinctrl-names = "default", "sleep";
-	pinctrl-0 = <&davinci_mdio_default>;
-	pinctrl-1 = <&davinci_mdio_sleep>;
-};
-
-&dcan1 {
-	status = "ok";
-	pinctrl-names = "default", "sleep", "active";
-	pinctrl-0 = <&dcan1_pins_sleep>;
-	pinctrl-1 = <&dcan1_pins_sleep>;
-	pinctrl-2 = <&dcan1_pins_default>;
-};
-
-&qspi {
-	status = "okay";
-	pinctrl-names = "default";
-	pinctrl-0 = <&qspi1_pins>;
-
-	spi-max-frequency = <48000000>;
-	m25p80@0 {
-		compatible = "s25fl256s1";
-		spi-max-frequency = <48000000>;
-		reg = <0>;
-		spi-tx-bus-width = <1>;
-		spi-rx-bus-width = <4>;
-		spi-cpol;
-		spi-cpha;
-		#address-cells = <1>;
-		#size-cells = <1>;
-
-		/* MTD partition table.
-		 * The ROM checks the first four physical blocks
-		 * for a valid file to boot and the flash here is
-		 * 64KiB block size.
-		 */
-		partition@0 {
-			label = "QSPI.SPL";
-			reg = <0x00000000 0x000010000>;
-		};
-		partition@1 {
-			label = "QSPI.SPL.backup1";
-			reg = <0x00010000 0x00010000>;
-		};
-		partition@2 {
-			label = "QSPI.SPL.backup2";
-			reg = <0x00020000 0x00010000>;
-		};
-		partition@3 {
-			label = "QSPI.SPL.backup3";
-			reg = <0x00030000 0x00010000>;
-		};
-		partition@4 {
-			label = "QSPI.u-boot";
-			reg = <0x00040000 0x00100000>;
-		};
-		partition@5 {
-			label = "QSPI.u-boot-spl-os";
-			reg = <0x00140000 0x00080000>;
-		};
-		partition@6 {
-			label = "QSPI.u-boot-env";
-			reg = <0x001c0000 0x00010000>;
-		};
-		partition@7 {
-			label = "QSPI.u-boot-env.backup1";
-			reg = <0x001d0000 0x0010000>;
-		};
-		partition@8 {
-			label = "QSPI.kernel";
-			reg = <0x001e0000 0x0800000>;
-		};
-		partition@9 {
-			label = "QSPI.file-system";
-			reg = <0x009e0000 0x01620000>;
-		};
-	};
-};
-
-&dss {
-	status = "ok";
-
-	vdda_video-supply = <&ldo5_reg>;
-};
-
-&hdmi {
-	status = "ok";
-	vdda-supply = <&ldo3_reg>;
-
-	pinctrl-names = "default";
-	pinctrl-0 = <&hdmi_pins>;
-
-	port {
-		hdmi_out: endpoint {
-			remote-endpoint = <&tpd12s015_in>;
-		};
-	};
-};
-
-&atl {
-	pinctrl-names = "default";
-	pinctrl-0 = <&atl_pins>;
-
-	assigned-clocks = <&abe_dpll_sys_clk_mux>,
-			  <&atl_gfclk_mux>,
-			  <&dpll_abe_ck>,
-			  <&dpll_abe_m2x2_ck>,
-			  <&atl_clkin2_ck>;
-	assigned-clock-parents = <&sys_clkin2>, <&dpll_abe_m2_ck>;
-	assigned-clock-rates = <0>, <0>, <180633600>, <361267200>, <5644800>;
-
-	status = "okay";
-
-	atl2 {
-		bws = <DRA7_ATL_WS_MCASP2_FSX>;
-		aws = <DRA7_ATL_WS_MCASP3_FSX>;
-	};
-};
-
-&mcasp3 {
-	#sound-dai-cells = <0>;
-	pinctrl-names = "default", "sleep";
-	pinctrl-0 = <&mcasp3_pins>;
-	pinctrl-1 = <&mcasp3_sleep_pins>;
-
-	assigned-clocks = <&mcasp3_ahclkx_mux>;
-	assigned-clock-parents = <&atl_clkin2_ck>;
-
-	status = "okay";
-
-	op-mode = <0>;          /* MCASP_IIS_MODE */
-	tdm-slots = <2>;
-	/* 4 serializer */
-	serial-dir = <  /* 0: INACTIVE, 1: TX, 2: RX */
-		1 2 0 0
-	>;
-};
-
-&mailbox5 {
-	status = "okay";
-	mbox_ipu1_ipc3x: mbox_ipu1_ipc3x {
-		status = "okay";
-	};
-	mbox_dsp1_ipc3x: mbox_dsp1_ipc3x {
-		status = "okay";
-	};
-};
-
-&mailbox6 {
-	status = "okay";
-	mbox_ipu2_ipc3x: mbox_ipu2_ipc3x {
-		status = "okay";
-	};
-};
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
index d0bae06..8378b44 100644
--- a/arch/arm/boot/dts/dra7xx-clocks.dtsi
+++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi
@@ -98,12 +98,20 @@
 		clock-frequency = <32768>;
 	};
 
-	sys_32k_ck: sys_32k_ck {
+	sys_clk32_crystal_ck: sys_clk32_crystal_ck {
 		#clock-cells = <0>;
 		compatible = "fixed-clock";
 		clock-frequency = <32768>;
 	};
 
+	sys_clk32_pseudo_ck: sys_clk32_pseudo_ck {
+		#clock-cells = <0>;
+		compatible = "fixed-factor-clock";
+		clocks = <&sys_clkin1>;
+		clock-mult = <1>;
+		clock-div = <610>;
+	};
+
 	virt_12000000_ck: virt_12000000_ck {
 		#clock-cells = <0>;
 		compatible = "fixed-clock";
@@ -188,7 +196,7 @@
 		clock-frequency = <0>;
 	};
 
-	dpll_abe_ck: dpll_abe_ck {
+	dpll_abe_ck: dpll_abe_ck@1e0 {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-m4xen-clock";
 		clocks = <&abe_dpll_clk_mux>, <&abe_dpll_bypass_clk_mux>;
@@ -201,7 +209,7 @@
 		clocks = <&dpll_abe_ck>;
 	};
 
-	dpll_abe_m2x2_ck: dpll_abe_m2x2_ck {
+	dpll_abe_m2x2_ck: dpll_abe_m2x2_ck@1f0 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_abe_x2_ck>;
@@ -212,7 +220,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	abe_clk: abe_clk {
+	abe_clk: abe_clk@108 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_abe_m2x2_ck>;
@@ -221,7 +229,7 @@
 		ti,index-power-of-two;
 	};
 
-	dpll_abe_m2_ck: dpll_abe_m2_ck {
+	dpll_abe_m2_ck: dpll_abe_m2_ck@1f0 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_abe_ck>;
@@ -232,7 +240,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_abe_m3x2_ck: dpll_abe_m3x2_ck {
+	dpll_abe_m3x2_ck: dpll_abe_m3x2_ck@1f4 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_abe_x2_ck>;
@@ -243,7 +251,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_core_byp_mux: dpll_core_byp_mux {
+	dpll_core_byp_mux: dpll_core_byp_mux@12c {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
@@ -251,7 +259,7 @@
 		reg = <0x012c>;
 	};
 
-	dpll_core_ck: dpll_core_ck {
+	dpll_core_ck: dpll_core_ck@120 {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-core-clock";
 		clocks = <&sys_clkin1>, <&dpll_core_byp_mux>;
@@ -264,7 +272,7 @@
 		clocks = <&dpll_core_ck>;
 	};
 
-	dpll_core_h12x2_ck: dpll_core_h12x2_ck {
+	dpll_core_h12x2_ck: dpll_core_h12x2_ck@13c {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -283,14 +291,14 @@
 		clock-div = <1>;
 	};
 
-	dpll_mpu_ck: dpll_mpu_ck {
+	dpll_mpu_ck: dpll_mpu_ck@160 {
 		#clock-cells = <0>;
 		compatible = "ti,omap5-mpu-dpll-clock";
 		clocks = <&sys_clkin1>, <&mpu_dpll_hs_clk_div>;
 		reg = <0x0160>, <0x0164>, <0x016c>, <0x0168>;
 	};
 
-	dpll_mpu_m2_ck: dpll_mpu_m2_ck {
+	dpll_mpu_m2_ck: dpll_mpu_m2_ck@170 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_mpu_ck>;
@@ -317,7 +325,7 @@
 		clock-div = <1>;
 	};
 
-	dpll_dsp_byp_mux: dpll_dsp_byp_mux {
+	dpll_dsp_byp_mux: dpll_dsp_byp_mux@240 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>;
@@ -325,14 +333,14 @@
 		reg = <0x0240>;
 	};
 
-	dpll_dsp_ck: dpll_dsp_ck {
+	dpll_dsp_ck: dpll_dsp_ck@234 {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-clock";
 		clocks = <&sys_clkin1>, <&dpll_dsp_byp_mux>;
 		reg = <0x0234>, <0x0238>, <0x0240>, <0x023c>;
 	};
 
-	dpll_dsp_m2_ck: dpll_dsp_m2_ck {
+	dpll_dsp_m2_ck: dpll_dsp_m2_ck@244 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_dsp_ck>;
@@ -351,7 +359,7 @@
 		clock-div = <1>;
 	};
 
-	dpll_iva_byp_mux: dpll_iva_byp_mux {
+	dpll_iva_byp_mux: dpll_iva_byp_mux@1ac {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>;
@@ -359,14 +367,14 @@
 		reg = <0x01ac>;
 	};
 
-	dpll_iva_ck: dpll_iva_ck {
+	dpll_iva_ck: dpll_iva_ck@1a0 {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-clock";
 		clocks = <&sys_clkin1>, <&dpll_iva_byp_mux>;
 		reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>;
 	};
 
-	dpll_iva_m2_ck: dpll_iva_m2_ck {
+	dpll_iva_m2_ck: dpll_iva_m2_ck@1b0 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_iva_ck>;
@@ -385,7 +393,7 @@
 		clock-div = <1>;
 	};
 
-	dpll_gpu_byp_mux: dpll_gpu_byp_mux {
+	dpll_gpu_byp_mux: dpll_gpu_byp_mux@2e4 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
@@ -393,14 +401,14 @@
 		reg = <0x02e4>;
 	};
 
-	dpll_gpu_ck: dpll_gpu_ck {
+	dpll_gpu_ck: dpll_gpu_ck@2d8 {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-clock";
 		clocks = <&sys_clkin1>, <&dpll_gpu_byp_mux>;
 		reg = <0x02d8>, <0x02dc>, <0x02e4>, <0x02e0>;
 	};
 
-	dpll_gpu_m2_ck: dpll_gpu_m2_ck {
+	dpll_gpu_m2_ck: dpll_gpu_m2_ck@2e8 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_gpu_ck>;
@@ -411,7 +419,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_core_m2_ck: dpll_core_m2_ck {
+	dpll_core_m2_ck: dpll_core_m2_ck@130 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_ck>;
@@ -430,7 +438,7 @@
 		clock-div = <1>;
 	};
 
-	dpll_ddr_byp_mux: dpll_ddr_byp_mux {
+	dpll_ddr_byp_mux: dpll_ddr_byp_mux@21c {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
@@ -438,14 +446,14 @@
 		reg = <0x021c>;
 	};
 
-	dpll_ddr_ck: dpll_ddr_ck {
+	dpll_ddr_ck: dpll_ddr_ck@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-clock";
 		clocks = <&sys_clkin1>, <&dpll_ddr_byp_mux>;
 		reg = <0x0210>, <0x0214>, <0x021c>, <0x0218>;
 	};
 
-	dpll_ddr_m2_ck: dpll_ddr_m2_ck {
+	dpll_ddr_m2_ck: dpll_ddr_m2_ck@220 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_ddr_ck>;
@@ -456,7 +464,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_gmac_byp_mux: dpll_gmac_byp_mux {
+	dpll_gmac_byp_mux: dpll_gmac_byp_mux@2b4 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
@@ -464,14 +472,14 @@
 		reg = <0x02b4>;
 	};
 
-	dpll_gmac_ck: dpll_gmac_ck {
+	dpll_gmac_ck: dpll_gmac_ck@2a8 {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-clock";
 		clocks = <&sys_clkin1>, <&dpll_gmac_byp_mux>;
 		reg = <0x02a8>, <0x02ac>, <0x02b4>, <0x02b0>;
 	};
 
-	dpll_gmac_m2_ck: dpll_gmac_m2_ck {
+	dpll_gmac_m2_ck: dpll_gmac_m2_ck@2b8 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_gmac_ck>;
@@ -530,7 +538,7 @@
 		clock-div = <1>;
 	};
 
-	dpll_eve_byp_mux: dpll_eve_byp_mux {
+	dpll_eve_byp_mux: dpll_eve_byp_mux@290 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>;
@@ -538,14 +546,14 @@
 		reg = <0x0290>;
 	};
 
-	dpll_eve_ck: dpll_eve_ck {
+	dpll_eve_ck: dpll_eve_ck@284 {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-clock";
 		clocks = <&sys_clkin1>, <&dpll_eve_byp_mux>;
 		reg = <0x0284>, <0x0288>, <0x0290>, <0x028c>;
 	};
 
-	dpll_eve_m2_ck: dpll_eve_m2_ck {
+	dpll_eve_m2_ck: dpll_eve_m2_ck@294 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_eve_ck>;
@@ -564,7 +572,7 @@
 		clock-div = <1>;
 	};
 
-	dpll_core_h13x2_ck: dpll_core_h13x2_ck {
+	dpll_core_h13x2_ck: dpll_core_h13x2_ck@140 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -575,7 +583,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_core_h14x2_ck: dpll_core_h14x2_ck {
+	dpll_core_h14x2_ck: dpll_core_h14x2_ck@144 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -586,7 +594,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_core_h22x2_ck: dpll_core_h22x2_ck {
+	dpll_core_h22x2_ck: dpll_core_h22x2_ck@154 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -597,7 +605,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_core_h23x2_ck: dpll_core_h23x2_ck {
+	dpll_core_h23x2_ck: dpll_core_h23x2_ck@158 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -608,7 +616,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_core_h24x2_ck: dpll_core_h24x2_ck {
+	dpll_core_h24x2_ck: dpll_core_h24x2_ck@15c {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -625,7 +633,7 @@
 		clocks = <&dpll_ddr_ck>;
 	};
 
-	dpll_ddr_h11x2_ck: dpll_ddr_h11x2_ck {
+	dpll_ddr_h11x2_ck: dpll_ddr_h11x2_ck@228 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_ddr_x2_ck>;
@@ -642,7 +650,7 @@
 		clocks = <&dpll_dsp_ck>;
 	};
 
-	dpll_dsp_m3x2_ck: dpll_dsp_m3x2_ck {
+	dpll_dsp_m3x2_ck: dpll_dsp_m3x2_ck@248 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_dsp_x2_ck>;
@@ -659,7 +667,7 @@
 		clocks = <&dpll_gmac_ck>;
 	};
 
-	dpll_gmac_h11x2_ck: dpll_gmac_h11x2_ck {
+	dpll_gmac_h11x2_ck: dpll_gmac_h11x2_ck@2c0 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_gmac_x2_ck>;
@@ -670,7 +678,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_gmac_h12x2_ck: dpll_gmac_h12x2_ck {
+	dpll_gmac_h12x2_ck: dpll_gmac_h12x2_ck@2c4 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_gmac_x2_ck>;
@@ -681,7 +689,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_gmac_h13x2_ck: dpll_gmac_h13x2_ck {
+	dpll_gmac_h13x2_ck: dpll_gmac_h13x2_ck@2c8 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_gmac_x2_ck>;
@@ -692,7 +700,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_gmac_m3x2_ck: dpll_gmac_m3x2_ck {
+	dpll_gmac_m3x2_ck: dpll_gmac_m3x2_ck@2bc {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_gmac_x2_ck>;
@@ -727,7 +735,7 @@
 		clock-div = <1>;
 	};
 
-	l3_iclk_div: l3_iclk_div {
+	l3_iclk_div: l3_iclk_div@100 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		ti,max-div = <2>;
@@ -777,7 +785,7 @@
 		clock-div = <1>;
 	};
 
-	ipu1_gfclk_mux: ipu1_gfclk_mux {
+	ipu1_gfclk_mux: ipu1_gfclk_mux@520 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&dpll_abe_m2x2_ck>, <&dpll_core_h22x2_ck>;
@@ -785,7 +793,7 @@
 		reg = <0x0520>;
 	};
 
-	mcasp1_ahclkr_mux: mcasp1_ahclkr_mux {
+	mcasp1_ahclkr_mux: mcasp1_ahclkr_mux@550 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
@@ -793,7 +801,7 @@
 		reg = <0x0550>;
 	};
 
-	mcasp1_ahclkx_mux: mcasp1_ahclkx_mux {
+	mcasp1_ahclkx_mux: mcasp1_ahclkx_mux@550 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
@@ -801,7 +809,7 @@
 		reg = <0x0550>;
 	};
 
-	mcasp1_aux_gfclk_mux: mcasp1_aux_gfclk_mux {
+	mcasp1_aux_gfclk_mux: mcasp1_aux_gfclk_mux@550 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&per_abe_x1_gfclk2_div>, <&video1_clk2_div>, <&video2_clk2_div>, <&hdmi_clk2_div>;
@@ -809,7 +817,7 @@
 		reg = <0x0550>;
 	};
 
-	timer5_gfclk_mux: timer5_gfclk_mux {
+	timer5_gfclk_mux: timer5_gfclk_mux@558 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>, <&clkoutmux0_clk_mux>;
@@ -817,7 +825,7 @@
 		reg = <0x0558>;
 	};
 
-	timer6_gfclk_mux: timer6_gfclk_mux {
+	timer6_gfclk_mux: timer6_gfclk_mux@560 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>, <&clkoutmux0_clk_mux>;
@@ -825,7 +833,7 @@
 		reg = <0x0560>;
 	};
 
-	timer7_gfclk_mux: timer7_gfclk_mux {
+	timer7_gfclk_mux: timer7_gfclk_mux@568 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>, <&clkoutmux0_clk_mux>;
@@ -833,7 +841,7 @@
 		reg = <0x0568>;
 	};
 
-	timer8_gfclk_mux: timer8_gfclk_mux {
+	timer8_gfclk_mux: timer8_gfclk_mux@570 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>, <&clkoutmux0_clk_mux>;
@@ -841,7 +849,7 @@
 		reg = <0x0570>;
 	};
 
-	uart6_gfclk_mux: uart6_gfclk_mux {
+	uart6_gfclk_mux: uart6_gfclk_mux@580 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&func_48m_fclk>, <&dpll_per_m2x2_ck>;
@@ -856,7 +864,7 @@
 	};
 };
 &prm_clocks {
-	sys_clkin1: sys_clkin1 {
+	sys_clkin1: sys_clkin1@110 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&virt_12000000_ck>, <&virt_20000000_ck>, <&virt_16800000_ck>, <&virt_19200000_ck>, <&virt_26000000_ck>, <&virt_27000000_ck>, <&virt_38400000_ck>;
@@ -864,28 +872,28 @@
 		ti,index-starts-at-one;
 	};
 
-	abe_dpll_sys_clk_mux: abe_dpll_sys_clk_mux {
+	abe_dpll_sys_clk_mux: abe_dpll_sys_clk_mux@118 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin1>, <&sys_clkin2>;
 		reg = <0x0118>;
 	};
 
-	abe_dpll_bypass_clk_mux: abe_dpll_bypass_clk_mux {
+	abe_dpll_bypass_clk_mux: abe_dpll_bypass_clk_mux@114 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&abe_dpll_sys_clk_mux>, <&sys_32k_ck>;
 		reg = <0x0114>;
 	};
 
-	abe_dpll_clk_mux: abe_dpll_clk_mux {
+	abe_dpll_clk_mux: abe_dpll_clk_mux@10c {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&abe_dpll_sys_clk_mux>, <&sys_32k_ck>;
 		reg = <0x010c>;
 	};
 
-	abe_24m_fclk: abe_24m_fclk {
+	abe_24m_fclk: abe_24m_fclk@11c {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_abe_m2x2_ck>;
@@ -893,7 +901,7 @@
 		ti,dividers = <8>, <16>;
 	};
 
-	aess_fclk: aess_fclk {
+	aess_fclk: aess_fclk@178 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&abe_clk>;
@@ -901,7 +909,7 @@
 		ti,max-div = <2>;
 	};
 
-	abe_giclk_div: abe_giclk_div {
+	abe_giclk_div: abe_giclk_div@174 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&aess_fclk>;
@@ -909,7 +917,7 @@
 		ti,max-div = <2>;
 	};
 
-	abe_lp_clk_div: abe_lp_clk_div {
+	abe_lp_clk_div: abe_lp_clk_div@1d8 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_abe_m2x2_ck>;
@@ -917,7 +925,7 @@
 		ti,dividers = <16>, <32>;
 	};
 
-	abe_sys_clk_div: abe_sys_clk_div {
+	abe_sys_clk_div: abe_sys_clk_div@120 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&sys_clkin1>;
@@ -925,14 +933,14 @@
 		ti,max-div = <2>;
 	};
 
-	adc_gfclk_mux: adc_gfclk_mux {
+	adc_gfclk_mux: adc_gfclk_mux@1dc {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin1>, <&sys_clkin2>, <&sys_32k_ck>;
 		reg = <0x01dc>;
 	};
 
-	sys_clk1_dclk_div: sys_clk1_dclk_div {
+	sys_clk1_dclk_div: sys_clk1_dclk_div@1c8 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&sys_clkin1>;
@@ -941,7 +949,7 @@
 		ti,index-power-of-two;
 	};
 
-	sys_clk2_dclk_div: sys_clk2_dclk_div {
+	sys_clk2_dclk_div: sys_clk2_dclk_div@1cc {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&sys_clkin2>;
@@ -950,7 +958,7 @@
 		ti,index-power-of-two;
 	};
 
-	per_abe_x1_dclk_div: per_abe_x1_dclk_div {
+	per_abe_x1_dclk_div: per_abe_x1_dclk_div@1bc {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_abe_m2_ck>;
@@ -959,7 +967,7 @@
 		ti,index-power-of-two;
 	};
 
-	dsp_gclk_div: dsp_gclk_div {
+	dsp_gclk_div: dsp_gclk_div@18c {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_dsp_m2_ck>;
@@ -968,7 +976,7 @@
 		ti,index-power-of-two;
 	};
 
-	gpu_dclk: gpu_dclk {
+	gpu_dclk: gpu_dclk@1a0 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_gpu_m2_ck>;
@@ -977,7 +985,7 @@
 		ti,index-power-of-two;
 	};
 
-	emif_phy_dclk_div: emif_phy_dclk_div {
+	emif_phy_dclk_div: emif_phy_dclk_div@190 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_ddr_m2_ck>;
@@ -986,7 +994,7 @@
 		ti,index-power-of-two;
 	};
 
-	gmac_250m_dclk_div: gmac_250m_dclk_div {
+	gmac_250m_dclk_div: gmac_250m_dclk_div@19c {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_gmac_m2_ck>;
@@ -995,7 +1003,7 @@
 		ti,index-power-of-two;
 	};
 
-	l3init_480m_dclk_div: l3init_480m_dclk_div {
+	l3init_480m_dclk_div: l3init_480m_dclk_div@1ac {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_usb_m2_ck>;
@@ -1004,7 +1012,7 @@
 		ti,index-power-of-two;
 	};
 
-	usb_otg_dclk_div: usb_otg_dclk_div {
+	usb_otg_dclk_div: usb_otg_dclk_div@184 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&usb_otg_clkin_ck>;
@@ -1013,7 +1021,7 @@
 		ti,index-power-of-two;
 	};
 
-	sata_dclk_div: sata_dclk_div {
+	sata_dclk_div: sata_dclk_div@1c0 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&sys_clkin1>;
@@ -1022,7 +1030,7 @@
 		ti,index-power-of-two;
 	};
 
-	pcie2_dclk_div: pcie2_dclk_div {
+	pcie2_dclk_div: pcie2_dclk_div@1b8 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_pcie_ref_m2_ck>;
@@ -1031,7 +1039,7 @@
 		ti,index-power-of-two;
 	};
 
-	pcie_dclk_div: pcie_dclk_div {
+	pcie_dclk_div: pcie_dclk_div@1b4 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&apll_pcie_m2_ck>;
@@ -1040,7 +1048,7 @@
 		ti,index-power-of-two;
 	};
 
-	emu_dclk_div: emu_dclk_div {
+	emu_dclk_div: emu_dclk_div@194 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&sys_clkin1>;
@@ -1049,7 +1057,7 @@
 		ti,index-power-of-two;
 	};
 
-	secure_32k_dclk_div: secure_32k_dclk_div {
+	secure_32k_dclk_div: secure_32k_dclk_div@1c4 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&secure_32k_clk_src_ck>;
@@ -1058,21 +1066,21 @@
 		ti,index-power-of-two;
 	};
 
-	clkoutmux0_clk_mux: clkoutmux0_clk_mux {
+	clkoutmux0_clk_mux: clkoutmux0_clk_mux@158 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clk1_dclk_div>, <&sys_clk2_dclk_div>, <&per_abe_x1_dclk_div>, <&mpu_dclk_div>, <&dsp_gclk_div>, <&iva_dclk>, <&gpu_dclk>, <&core_dpll_out_dclk_div>, <&emif_phy_dclk_div>, <&gmac_250m_dclk_div>, <&video2_dclk_div>, <&video1_dclk_div>, <&hdmi_dclk_div>, <&func_96m_aon_dclk_div>, <&l3init_480m_dclk_div>, <&usb_otg_dclk_div>, <&sata_dclk_div>, <&pcie2_dclk_div>, <&pcie_dclk_div>, <&emu_dclk_div>, <&secure_32k_dclk_div>, <&eve_dclk_div>;
 		reg = <0x0158>;
 	};
 
-	clkoutmux1_clk_mux: clkoutmux1_clk_mux {
+	clkoutmux1_clk_mux: clkoutmux1_clk_mux@15c {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clk1_dclk_div>, <&sys_clk2_dclk_div>, <&per_abe_x1_dclk_div>, <&mpu_dclk_div>, <&dsp_gclk_div>, <&iva_dclk>, <&gpu_dclk>, <&core_dpll_out_dclk_div>, <&emif_phy_dclk_div>, <&gmac_250m_dclk_div>, <&video2_dclk_div>, <&video1_dclk_div>, <&hdmi_dclk_div>, <&func_96m_aon_dclk_div>, <&l3init_480m_dclk_div>, <&usb_otg_dclk_div>, <&sata_dclk_div>, <&pcie2_dclk_div>, <&pcie_dclk_div>, <&emu_dclk_div>, <&secure_32k_dclk_div>, <&eve_dclk_div>;
 		reg = <0x015c>;
 	};
 
-	clkoutmux2_clk_mux: clkoutmux2_clk_mux {
+	clkoutmux2_clk_mux: clkoutmux2_clk_mux@160 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clk1_dclk_div>, <&sys_clk2_dclk_div>, <&per_abe_x1_dclk_div>, <&mpu_dclk_div>, <&dsp_gclk_div>, <&iva_dclk>, <&gpu_dclk>, <&core_dpll_out_dclk_div>, <&emif_phy_dclk_div>, <&gmac_250m_dclk_div>, <&video2_dclk_div>, <&video1_dclk_div>, <&hdmi_dclk_div>, <&func_96m_aon_dclk_div>, <&l3init_480m_dclk_div>, <&usb_otg_dclk_div>, <&sata_dclk_div>, <&pcie2_dclk_div>, <&pcie_dclk_div>, <&emu_dclk_div>, <&secure_32k_dclk_div>, <&eve_dclk_div>;
@@ -1087,21 +1095,21 @@
 		clock-div = <2>;
 	};
 
-	eve_clk: eve_clk {
+	eve_clk: eve_clk@180 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&dpll_eve_m2_ck>, <&dpll_dsp_m3x2_ck>;
 		reg = <0x0180>;
 	};
 
-	hdmi_dpll_clk_mux: hdmi_dpll_clk_mux {
+	hdmi_dpll_clk_mux: hdmi_dpll_clk_mux@164 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin1>, <&sys_clkin2>;
 		reg = <0x0164>;
 	};
 
-	mlb_clk: mlb_clk {
+	mlb_clk: mlb_clk@134 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&mlb_clkin_ck>;
@@ -1110,7 +1118,7 @@
 		ti,index-power-of-two;
 	};
 
-	mlbp_clk: mlbp_clk {
+	mlbp_clk: mlbp_clk@130 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&mlbp_clkin_ck>;
@@ -1119,7 +1127,7 @@
 		ti,index-power-of-two;
 	};
 
-	per_abe_x1_gfclk2_div: per_abe_x1_gfclk2_div {
+	per_abe_x1_gfclk2_div: per_abe_x1_gfclk2_div@138 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_abe_m2_ck>;
@@ -1128,7 +1136,7 @@
 		ti,index-power-of-two;
 	};
 
-	timer_sys_clk_div: timer_sys_clk_div {
+	timer_sys_clk_div: timer_sys_clk_div@144 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&sys_clkin1>;
@@ -1136,28 +1144,28 @@
 		ti,max-div = <2>;
 	};
 
-	video1_dpll_clk_mux: video1_dpll_clk_mux {
+	video1_dpll_clk_mux: video1_dpll_clk_mux@168 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin1>, <&sys_clkin2>;
 		reg = <0x0168>;
 	};
 
-	video2_dpll_clk_mux: video2_dpll_clk_mux {
+	video2_dpll_clk_mux: video2_dpll_clk_mux@16c {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin1>, <&sys_clkin2>;
 		reg = <0x016c>;
 	};
 
-	wkupaon_iclk_mux: wkupaon_iclk_mux {
+	wkupaon_iclk_mux: wkupaon_iclk_mux@108 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin1>, <&abe_lp_clk_div>;
 		reg = <0x0108>;
 	};
 
-	gpio1_dbclk: gpio1_dbclk {
+	gpio1_dbclk: gpio1_dbclk@1838 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -1165,7 +1173,7 @@
 		reg = <0x1838>;
 	};
 
-	dcan1_sys_clk_mux: dcan1_sys_clk_mux {
+	dcan1_sys_clk_mux: dcan1_sys_clk_mux@1888 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin1>, <&sys_clkin2>;
@@ -1173,7 +1181,7 @@
 		reg = <0x1888>;
 	};
 
-	timer1_gfclk_mux: timer1_gfclk_mux {
+	timer1_gfclk_mux: timer1_gfclk_mux@1840 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>;
@@ -1181,7 +1189,7 @@
 		reg = <0x1840>;
 	};
 
-	uart10_gfclk_mux: uart10_gfclk_mux {
+	uart10_gfclk_mux: uart10_gfclk_mux@1880 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&func_48m_fclk>, <&dpll_per_m2x2_ck>;
@@ -1190,14 +1198,14 @@
 	};
 };
 &cm_core_clocks {
-	dpll_pcie_ref_ck: dpll_pcie_ref_ck {
+	dpll_pcie_ref_ck: dpll_pcie_ref_ck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-clock";
 		clocks = <&sys_clkin1>, <&sys_clkin1>;
 		reg = <0x0200>, <0x0204>, <0x020c>, <0x0208>;
 	};
 
-	dpll_pcie_ref_m2ldo_ck: dpll_pcie_ref_m2ldo_ck {
+	dpll_pcie_ref_m2ldo_ck: dpll_pcie_ref_m2ldo_ck@210 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_pcie_ref_ck>;
@@ -1216,7 +1224,7 @@
 		ti,bit-shift = <7>;
 	};
 
-	apll_pcie_ck: apll_pcie_ck {
+	apll_pcie_ck: apll_pcie_ck@21c {
 		#clock-cells = <0>;
 		compatible = "ti,dra7-apll-clock";
 		clocks = <&apll_pcie_in_clk_mux>, <&dpll_pcie_ref_ck>;
@@ -1305,7 +1313,7 @@
 		clock-div = <1>;
 	};
 
-	dpll_per_byp_mux: dpll_per_byp_mux {
+	dpll_per_byp_mux: dpll_per_byp_mux@14c {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>;
@@ -1313,14 +1321,14 @@
 		reg = <0x014c>;
 	};
 
-	dpll_per_ck: dpll_per_ck {
+	dpll_per_ck: dpll_per_ck@140 {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-clock";
 		clocks = <&sys_clkin1>, <&dpll_per_byp_mux>;
 		reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>;
 	};
 
-	dpll_per_m2_ck: dpll_per_m2_ck {
+	dpll_per_m2_ck: dpll_per_m2_ck@150 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_ck>;
@@ -1339,7 +1347,7 @@
 		clock-div = <1>;
 	};
 
-	dpll_usb_byp_mux: dpll_usb_byp_mux {
+	dpll_usb_byp_mux: dpll_usb_byp_mux@18c {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>;
@@ -1347,14 +1355,14 @@
 		reg = <0x018c>;
 	};
 
-	dpll_usb_ck: dpll_usb_ck {
+	dpll_usb_ck: dpll_usb_ck@180 {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-j-type-clock";
 		clocks = <&sys_clkin1>, <&dpll_usb_byp_mux>;
 		reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>;
 	};
 
-	dpll_usb_m2_ck: dpll_usb_m2_ck {
+	dpll_usb_m2_ck: dpll_usb_m2_ck@190 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_usb_ck>;
@@ -1365,7 +1373,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_pcie_ref_m2_ck: dpll_pcie_ref_m2_ck {
+	dpll_pcie_ref_m2_ck: dpll_pcie_ref_m2_ck@210 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_pcie_ref_ck>;
@@ -1382,7 +1390,7 @@
 		clocks = <&dpll_per_ck>;
 	};
 
-	dpll_per_h11x2_ck: dpll_per_h11x2_ck {
+	dpll_per_h11x2_ck: dpll_per_h11x2_ck@158 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_x2_ck>;
@@ -1393,7 +1401,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_per_h12x2_ck: dpll_per_h12x2_ck {
+	dpll_per_h12x2_ck: dpll_per_h12x2_ck@15c {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_x2_ck>;
@@ -1404,7 +1412,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_per_h13x2_ck: dpll_per_h13x2_ck {
+	dpll_per_h13x2_ck: dpll_per_h13x2_ck@160 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_x2_ck>;
@@ -1415,7 +1423,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_per_h14x2_ck: dpll_per_h14x2_ck {
+	dpll_per_h14x2_ck: dpll_per_h14x2_ck@164 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_x2_ck>;
@@ -1426,7 +1434,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_per_m2x2_ck: dpll_per_m2x2_ck {
+	dpll_per_m2x2_ck: dpll_per_m2x2_ck@150 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_x2_ck>;
@@ -1485,7 +1493,7 @@
 		clock-div = <2>;
 	};
 
-	l3init_60m_fclk: l3init_60m_fclk {
+	l3init_60m_fclk: l3init_60m_fclk@104 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_usb_m2_ck>;
@@ -1493,7 +1501,7 @@
 		ti,dividers = <1>, <8>;
 	};
 
-	clkout2_clk: clkout2_clk {
+	clkout2_clk: clkout2_clk@6b0 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&clkoutmux2_clk_mux>;
@@ -1501,7 +1509,7 @@
 		reg = <0x06b0>;
 	};
 
-	l3init_960m_gfclk: l3init_960m_gfclk {
+	l3init_960m_gfclk: l3init_960m_gfclk@6c0 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll_usb_clkdcoldo>;
@@ -1509,7 +1517,7 @@
 		reg = <0x06c0>;
 	};
 
-	dss_32khz_clk: dss_32khz_clk {
+	dss_32khz_clk: dss_32khz_clk@1120 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -1517,7 +1525,7 @@
 		reg = <0x1120>;
 	};
 
-	dss_48mhz_clk: dss_48mhz_clk {
+	dss_48mhz_clk: dss_48mhz_clk@1120 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&func_48m_fclk>;
@@ -1525,7 +1533,7 @@
 		reg = <0x1120>;
 	};
 
-	dss_dss_clk: dss_dss_clk {
+	dss_dss_clk: dss_dss_clk@1120 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll_per_h12x2_ck>;
@@ -1534,7 +1542,7 @@
 		ti,set-rate-parent;
 	};
 
-	dss_hdmi_clk: dss_hdmi_clk {
+	dss_hdmi_clk: dss_hdmi_clk@1120 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&hdmi_dpll_clk_mux>;
@@ -1542,7 +1550,7 @@
 		reg = <0x1120>;
 	};
 
-	dss_video1_clk: dss_video1_clk {
+	dss_video1_clk: dss_video1_clk@1120 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&video1_dpll_clk_mux>;
@@ -1550,7 +1558,7 @@
 		reg = <0x1120>;
 	};
 
-	dss_video2_clk: dss_video2_clk {
+	dss_video2_clk: dss_video2_clk@1120 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&video2_dpll_clk_mux>;
@@ -1558,7 +1566,7 @@
 		reg = <0x1120>;
 	};
 
-	gpio2_dbclk: gpio2_dbclk {
+	gpio2_dbclk: gpio2_dbclk@1760 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -1566,7 +1574,7 @@
 		reg = <0x1760>;
 	};
 
-	gpio3_dbclk: gpio3_dbclk {
+	gpio3_dbclk: gpio3_dbclk@1768 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -1574,7 +1582,7 @@
 		reg = <0x1768>;
 	};
 
-	gpio4_dbclk: gpio4_dbclk {
+	gpio4_dbclk: gpio4_dbclk@1770 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -1582,7 +1590,7 @@
 		reg = <0x1770>;
 	};
 
-	gpio5_dbclk: gpio5_dbclk {
+	gpio5_dbclk: gpio5_dbclk@1778 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -1590,7 +1598,7 @@
 		reg = <0x1778>;
 	};
 
-	gpio6_dbclk: gpio6_dbclk {
+	gpio6_dbclk: gpio6_dbclk@1780 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -1598,7 +1606,7 @@
 		reg = <0x1780>;
 	};
 
-	gpio7_dbclk: gpio7_dbclk {
+	gpio7_dbclk: gpio7_dbclk@1810 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -1606,7 +1614,7 @@
 		reg = <0x1810>;
 	};
 
-	gpio8_dbclk: gpio8_dbclk {
+	gpio8_dbclk: gpio8_dbclk@1818 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -1614,7 +1622,7 @@
 		reg = <0x1818>;
 	};
 
-	mmc1_clk32k: mmc1_clk32k {
+	mmc1_clk32k: mmc1_clk32k@1328 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -1622,7 +1630,7 @@
 		reg = <0x1328>;
 	};
 
-	mmc2_clk32k: mmc2_clk32k {
+	mmc2_clk32k: mmc2_clk32k@1330 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -1630,7 +1638,7 @@
 		reg = <0x1330>;
 	};
 
-	mmc3_clk32k: mmc3_clk32k {
+	mmc3_clk32k: mmc3_clk32k@1820 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -1638,7 +1646,7 @@
 		reg = <0x1820>;
 	};
 
-	mmc4_clk32k: mmc4_clk32k {
+	mmc4_clk32k: mmc4_clk32k@1828 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -1646,7 +1654,7 @@
 		reg = <0x1828>;
 	};
 
-	sata_ref_clk: sata_ref_clk {
+	sata_ref_clk: sata_ref_clk@1388 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_clkin1>;
@@ -1654,7 +1662,7 @@
 		reg = <0x1388>;
 	};
 
-	usb_otg_ss1_refclk960m: usb_otg_ss1_refclk960m {
+	usb_otg_ss1_refclk960m: usb_otg_ss1_refclk960m@13f0 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l3init_960m_gfclk>;
@@ -1662,7 +1670,7 @@
 		reg = <0x13f0>;
 	};
 
-	usb_otg_ss2_refclk960m: usb_otg_ss2_refclk960m {
+	usb_otg_ss2_refclk960m: usb_otg_ss2_refclk960m@1340 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l3init_960m_gfclk>;
@@ -1670,7 +1678,7 @@
 		reg = <0x1340>;
 	};
 
-	usb_phy1_always_on_clk32k: usb_phy1_always_on_clk32k {
+	usb_phy1_always_on_clk32k: usb_phy1_always_on_clk32k@640 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -1678,7 +1686,7 @@
 		reg = <0x0640>;
 	};
 
-	usb_phy2_always_on_clk32k: usb_phy2_always_on_clk32k {
+	usb_phy2_always_on_clk32k: usb_phy2_always_on_clk32k@688 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -1686,7 +1694,7 @@
 		reg = <0x0688>;
 	};
 
-	usb_phy3_always_on_clk32k: usb_phy3_always_on_clk32k {
+	usb_phy3_always_on_clk32k: usb_phy3_always_on_clk32k@698 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -1694,7 +1702,7 @@
 		reg = <0x0698>;
 	};
 
-	atl_dpll_clk_mux: atl_dpll_clk_mux {
+	atl_dpll_clk_mux: atl_dpll_clk_mux@c00 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_32k_ck>, <&video1_clkin_ck>, <&video2_clkin_ck>, <&hdmi_clkin_ck>;
@@ -1702,7 +1710,7 @@
 		reg = <0x0c00>;
 	};
 
-	atl_gfclk_mux: atl_gfclk_mux {
+	atl_gfclk_mux: atl_gfclk_mux@c00 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&l3_iclk_div>, <&dpll_abe_m2_ck>, <&atl_dpll_clk_mux>;
@@ -1710,7 +1718,7 @@
 		reg = <0x0c00>;
 	};
 
-	gmac_gmii_ref_clk_div: gmac_gmii_ref_clk_div {
+	gmac_gmii_ref_clk_div: gmac_gmii_ref_clk_div@13d0 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_gmac_m2_ck>;
@@ -1719,7 +1727,7 @@
 		ti,dividers = <2>;
 	};
 
-	gmac_rft_clk_mux: gmac_rft_clk_mux {
+	gmac_rft_clk_mux: gmac_rft_clk_mux@13d0 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&video1_clkin_ck>, <&video2_clkin_ck>, <&dpll_abe_m2_ck>, <&hdmi_clkin_ck>, <&l3_iclk_div>;
@@ -1727,7 +1735,7 @@
 		reg = <0x13d0>;
 	};
 
-	gpu_core_gclk_mux: gpu_core_gclk_mux {
+	gpu_core_gclk_mux: gpu_core_gclk_mux@1220 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&dpll_core_h14x2_ck>, <&dpll_per_h14x2_ck>, <&dpll_gpu_m2_ck>;
@@ -1735,7 +1743,7 @@
 		reg = <0x1220>;
 	};
 
-	gpu_hyd_gclk_mux: gpu_hyd_gclk_mux {
+	gpu_hyd_gclk_mux: gpu_hyd_gclk_mux@1220 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&dpll_core_h14x2_ck>, <&dpll_per_h14x2_ck>, <&dpll_gpu_m2_ck>;
@@ -1743,7 +1751,7 @@
 		reg = <0x1220>;
 	};
 
-	l3instr_ts_gclk_div: l3instr_ts_gclk_div {
+	l3instr_ts_gclk_div: l3instr_ts_gclk_div@e50 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&wkupaon_iclk_mux>;
@@ -1752,7 +1760,7 @@
 		ti,dividers = <8>, <16>, <32>;
 	};
 
-	mcasp2_ahclkr_mux: mcasp2_ahclkr_mux {
+	mcasp2_ahclkr_mux: mcasp2_ahclkr_mux@1860 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
@@ -1760,7 +1768,7 @@
 		reg = <0x1860>;
 	};
 
-	mcasp2_ahclkx_mux: mcasp2_ahclkx_mux {
+	mcasp2_ahclkx_mux: mcasp2_ahclkx_mux@1860 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
@@ -1768,7 +1776,7 @@
 		reg = <0x1860>;
 	};
 
-	mcasp2_aux_gfclk_mux: mcasp2_aux_gfclk_mux {
+	mcasp2_aux_gfclk_mux: mcasp2_aux_gfclk_mux@1860 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&per_abe_x1_gfclk2_div>, <&video1_clk2_div>, <&video2_clk2_div>, <&hdmi_clk2_div>;
@@ -1776,7 +1784,7 @@
 		reg = <0x1860>;
 	};
 
-	mcasp3_ahclkx_mux: mcasp3_ahclkx_mux {
+	mcasp3_ahclkx_mux: mcasp3_ahclkx_mux@1868 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
@@ -1784,7 +1792,7 @@
 		reg = <0x1868>;
 	};
 
-	mcasp3_aux_gfclk_mux: mcasp3_aux_gfclk_mux {
+	mcasp3_aux_gfclk_mux: mcasp3_aux_gfclk_mux@1868 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&per_abe_x1_gfclk2_div>, <&video1_clk2_div>, <&video2_clk2_div>, <&hdmi_clk2_div>;
@@ -1792,7 +1800,7 @@
 		reg = <0x1868>;
 	};
 
-	mcasp4_ahclkx_mux: mcasp4_ahclkx_mux {
+	mcasp4_ahclkx_mux: mcasp4_ahclkx_mux@1898 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
@@ -1800,7 +1808,7 @@
 		reg = <0x1898>;
 	};
 
-	mcasp4_aux_gfclk_mux: mcasp4_aux_gfclk_mux {
+	mcasp4_aux_gfclk_mux: mcasp4_aux_gfclk_mux@1898 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&per_abe_x1_gfclk2_div>, <&video1_clk2_div>, <&video2_clk2_div>, <&hdmi_clk2_div>;
@@ -1808,7 +1816,7 @@
 		reg = <0x1898>;
 	};
 
-	mcasp5_ahclkx_mux: mcasp5_ahclkx_mux {
+	mcasp5_ahclkx_mux: mcasp5_ahclkx_mux@1878 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
@@ -1816,7 +1824,7 @@
 		reg = <0x1878>;
 	};
 
-	mcasp5_aux_gfclk_mux: mcasp5_aux_gfclk_mux {
+	mcasp5_aux_gfclk_mux: mcasp5_aux_gfclk_mux@1878 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&per_abe_x1_gfclk2_div>, <&video1_clk2_div>, <&video2_clk2_div>, <&hdmi_clk2_div>;
@@ -1824,7 +1832,7 @@
 		reg = <0x1878>;
 	};
 
-	mcasp6_ahclkx_mux: mcasp6_ahclkx_mux {
+	mcasp6_ahclkx_mux: mcasp6_ahclkx_mux@1904 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
@@ -1832,7 +1840,7 @@
 		reg = <0x1904>;
 	};
 
-	mcasp6_aux_gfclk_mux: mcasp6_aux_gfclk_mux {
+	mcasp6_aux_gfclk_mux: mcasp6_aux_gfclk_mux@1904 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&per_abe_x1_gfclk2_div>, <&video1_clk2_div>, <&video2_clk2_div>, <&hdmi_clk2_div>;
@@ -1840,7 +1848,7 @@
 		reg = <0x1904>;
 	};
 
-	mcasp7_ahclkx_mux: mcasp7_ahclkx_mux {
+	mcasp7_ahclkx_mux: mcasp7_ahclkx_mux@1908 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
@@ -1848,7 +1856,7 @@
 		reg = <0x1908>;
 	};
 
-	mcasp7_aux_gfclk_mux: mcasp7_aux_gfclk_mux {
+	mcasp7_aux_gfclk_mux: mcasp7_aux_gfclk_mux@1908 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&per_abe_x1_gfclk2_div>, <&video1_clk2_div>, <&video2_clk2_div>, <&hdmi_clk2_div>;
@@ -1856,7 +1864,7 @@
 		reg = <0x1908>;
 	};
 
-	mcasp8_ahclk_mux: mcasp8_ahclk_mux {
+	mcasp8_ahclkx_mux: mcasp8_ahclkx_mux@1890 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
@@ -1864,7 +1872,7 @@
 		reg = <0x1890>;
 	};
 
-	mcasp8_aux_gfclk_mux: mcasp8_aux_gfclk_mux {
+	mcasp8_aux_gfclk_mux: mcasp8_aux_gfclk_mux@1890 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&per_abe_x1_gfclk2_div>, <&video1_clk2_div>, <&video2_clk2_div>, <&hdmi_clk2_div>;
@@ -1872,7 +1880,7 @@
 		reg = <0x1890>;
 	};
 
-	mmc1_fclk_mux: mmc1_fclk_mux {
+	mmc1_fclk_mux: mmc1_fclk_mux@1328 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&func_128m_clk>, <&dpll_per_m2x2_ck>;
@@ -1880,7 +1888,7 @@
 		reg = <0x1328>;
 	};
 
-	mmc1_fclk_div: mmc1_fclk_div {
+	mmc1_fclk_div: mmc1_fclk_div@1328 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&mmc1_fclk_mux>;
@@ -1890,7 +1898,7 @@
 		ti,index-power-of-two;
 	};
 
-	mmc2_fclk_mux: mmc2_fclk_mux {
+	mmc2_fclk_mux: mmc2_fclk_mux@1330 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&func_128m_clk>, <&dpll_per_m2x2_ck>;
@@ -1898,7 +1906,7 @@
 		reg = <0x1330>;
 	};
 
-	mmc2_fclk_div: mmc2_fclk_div {
+	mmc2_fclk_div: mmc2_fclk_div@1330 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&mmc2_fclk_mux>;
@@ -1908,7 +1916,7 @@
 		ti,index-power-of-two;
 	};
 
-	mmc3_gfclk_mux: mmc3_gfclk_mux {
+	mmc3_gfclk_mux: mmc3_gfclk_mux@1820 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&func_48m_fclk>, <&dpll_per_m2x2_ck>;
@@ -1916,7 +1924,7 @@
 		reg = <0x1820>;
 	};
 
-	mmc3_gfclk_div: mmc3_gfclk_div {
+	mmc3_gfclk_div: mmc3_gfclk_div@1820 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&mmc3_gfclk_mux>;
@@ -1926,7 +1934,7 @@
 		ti,index-power-of-two;
 	};
 
-	mmc4_gfclk_mux: mmc4_gfclk_mux {
+	mmc4_gfclk_mux: mmc4_gfclk_mux@1828 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&func_48m_fclk>, <&dpll_per_m2x2_ck>;
@@ -1934,7 +1942,7 @@
 		reg = <0x1828>;
 	};
 
-	mmc4_gfclk_div: mmc4_gfclk_div {
+	mmc4_gfclk_div: mmc4_gfclk_div@1828 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&mmc4_gfclk_mux>;
@@ -1944,7 +1952,7 @@
 		ti,index-power-of-two;
 	};
 
-	qspi_gfclk_mux: qspi_gfclk_mux {
+	qspi_gfclk_mux: qspi_gfclk_mux@1838 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&func_128m_clk>, <&dpll_per_h13x2_ck>;
@@ -1952,7 +1960,7 @@
 		reg = <0x1838>;
 	};
 
-	qspi_gfclk_div: qspi_gfclk_div {
+	qspi_gfclk_div: qspi_gfclk_div@1838 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&qspi_gfclk_mux>;
@@ -1962,7 +1970,7 @@
 		ti,index-power-of-two;
 	};
 
-	timer10_gfclk_mux: timer10_gfclk_mux {
+	timer10_gfclk_mux: timer10_gfclk_mux@1728 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>;
@@ -1970,7 +1978,7 @@
 		reg = <0x1728>;
 	};
 
-	timer11_gfclk_mux: timer11_gfclk_mux {
+	timer11_gfclk_mux: timer11_gfclk_mux@1730 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>;
@@ -1978,7 +1986,7 @@
 		reg = <0x1730>;
 	};
 
-	timer13_gfclk_mux: timer13_gfclk_mux {
+	timer13_gfclk_mux: timer13_gfclk_mux@17c8 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>;
@@ -1986,7 +1994,7 @@
 		reg = <0x17c8>;
 	};
 
-	timer14_gfclk_mux: timer14_gfclk_mux {
+	timer14_gfclk_mux: timer14_gfclk_mux@17d0 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>;
@@ -1994,7 +2002,7 @@
 		reg = <0x17d0>;
 	};
 
-	timer15_gfclk_mux: timer15_gfclk_mux {
+	timer15_gfclk_mux: timer15_gfclk_mux@17d8 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>;
@@ -2002,7 +2010,7 @@
 		reg = <0x17d8>;
 	};
 
-	timer16_gfclk_mux: timer16_gfclk_mux {
+	timer16_gfclk_mux: timer16_gfclk_mux@1830 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>;
@@ -2010,7 +2018,7 @@
 		reg = <0x1830>;
 	};
 
-	timer2_gfclk_mux: timer2_gfclk_mux {
+	timer2_gfclk_mux: timer2_gfclk_mux@1738 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>;
@@ -2018,7 +2026,7 @@
 		reg = <0x1738>;
 	};
 
-	timer3_gfclk_mux: timer3_gfclk_mux {
+	timer3_gfclk_mux: timer3_gfclk_mux@1740 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>;
@@ -2026,7 +2034,7 @@
 		reg = <0x1740>;
 	};
 
-	timer4_gfclk_mux: timer4_gfclk_mux {
+	timer4_gfclk_mux: timer4_gfclk_mux@1748 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>;
@@ -2034,7 +2042,7 @@
 		reg = <0x1748>;
 	};
 
-	timer9_gfclk_mux: timer9_gfclk_mux {
+	timer9_gfclk_mux: timer9_gfclk_mux@1750 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&timer_sys_clk_div>, <&sys_32k_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&abe_giclk_div>, <&video1_div_clk>, <&video2_div_clk>, <&hdmi_div_clk>;
@@ -2042,7 +2050,7 @@
 		reg = <0x1750>;
 	};
 
-	uart1_gfclk_mux: uart1_gfclk_mux {
+	uart1_gfclk_mux: uart1_gfclk_mux@1840 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&func_48m_fclk>, <&dpll_per_m2x2_ck>;
@@ -2050,7 +2058,7 @@
 		reg = <0x1840>;
 	};
 
-	uart2_gfclk_mux: uart2_gfclk_mux {
+	uart2_gfclk_mux: uart2_gfclk_mux@1848 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&func_48m_fclk>, <&dpll_per_m2x2_ck>;
@@ -2058,7 +2066,7 @@
 		reg = <0x1848>;
 	};
 
-	uart3_gfclk_mux: uart3_gfclk_mux {
+	uart3_gfclk_mux: uart3_gfclk_mux@1850 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&func_48m_fclk>, <&dpll_per_m2x2_ck>;
@@ -2066,7 +2074,7 @@
 		reg = <0x1850>;
 	};
 
-	uart4_gfclk_mux: uart4_gfclk_mux {
+	uart4_gfclk_mux: uart4_gfclk_mux@1858 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&func_48m_fclk>, <&dpll_per_m2x2_ck>;
@@ -2074,7 +2082,7 @@
 		reg = <0x1858>;
 	};
 
-	uart5_gfclk_mux: uart5_gfclk_mux {
+	uart5_gfclk_mux: uart5_gfclk_mux@1870 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&func_48m_fclk>, <&dpll_per_m2x2_ck>;
@@ -2082,7 +2090,7 @@
 		reg = <0x1870>;
 	};
 
-	uart7_gfclk_mux: uart7_gfclk_mux {
+	uart7_gfclk_mux: uart7_gfclk_mux@18d0 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&func_48m_fclk>, <&dpll_per_m2x2_ck>;
@@ -2090,7 +2098,7 @@
 		reg = <0x18d0>;
 	};
 
-	uart8_gfclk_mux: uart8_gfclk_mux {
+	uart8_gfclk_mux: uart8_gfclk_mux@18e0 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&func_48m_fclk>, <&dpll_per_m2x2_ck>;
@@ -2098,7 +2106,7 @@
 		reg = <0x18e0>;
 	};
 
-	uart9_gfclk_mux: uart9_gfclk_mux {
+	uart9_gfclk_mux: uart9_gfclk_mux@18e8 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&func_48m_fclk>, <&dpll_per_m2x2_ck>;
@@ -2106,7 +2114,7 @@
 		reg = <0x18e8>;
 	};
 
-	vip1_gclk_mux: vip1_gclk_mux {
+	vip1_gclk_mux: vip1_gclk_mux@1020 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&l3_iclk_div>, <&dpll_core_h23x2_ck>;
@@ -2114,7 +2122,7 @@
 		reg = <0x1020>;
 	};
 
-	vip2_gclk_mux: vip2_gclk_mux {
+	vip2_gclk_mux: vip2_gclk_mux@1028 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&l3_iclk_div>, <&dpll_core_h23x2_ck>;
@@ -2122,7 +2130,7 @@
 		reg = <0x1028>;
 	};
 
-	vip3_gclk_mux: vip3_gclk_mux {
+	vip3_gclk_mux: vip3_gclk_mux@1030 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&l3_iclk_div>, <&dpll_core_h23x2_ck>;
@@ -2139,7 +2147,7 @@
 };
 
 &scm_conf_clocks {
-	dss_deshdcp_clk: dss_deshdcp_clk {
+	dss_deshdcp_clk: dss_deshdcp_clk@558 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l3_iclk_div>;
@@ -2147,7 +2155,7 @@
 		reg = <0x558>;
 	};
 
-       ehrpwm0_tbclk: ehrpwm0_tbclk {
+       ehrpwm0_tbclk: ehrpwm0_tbclk@558 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l4_root_clk_div>;
@@ -2155,7 +2163,7 @@
 		reg = <0x0558>;
 	};
 
-	ehrpwm1_tbclk: ehrpwm1_tbclk {
+	ehrpwm1_tbclk: ehrpwm1_tbclk@558 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l4_root_clk_div>;
@@ -2163,11 +2171,19 @@
 		reg = <0x0558>;
 	};
 
-	ehrpwm2_tbclk: ehrpwm2_tbclk {
+	ehrpwm2_tbclk: ehrpwm2_tbclk@558 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l4_root_clk_div>;
 		ti,bit-shift = <22>;
 		reg = <0x0558>;
 	};
+
+	sys_32k_ck: sys_32k_ck {
+		#clock-cells = <0>;
+		compatible = "ti,mux-clock";
+		clocks = <&sys_clk32_crystal_ck>, <&sys_clk32_pseudo_ck>, <&sys_clk32_pseudo_ck>, <&sys_clk32_pseudo_ck>;
+		ti,bit-shift = <8>;
+		reg = <0x6c4>;
+	};
 };
diff --git a/arch/arm/boot/dts/ea3250.dts b/arch/arm/boot/dts/ea3250.dts
deleted file mode 100644
index a4a281f..0000000
--- a/arch/arm/boot/dts/ea3250.dts
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- * Embedded Artists LPC3250 board
- *
- * Copyright 2012 Roland Stigge <stigge@antcom.de>
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-
-/dts-v1/;
-#include "lpc32xx.dtsi"
-
-/ {
-	model = "Embedded Artists LPC3250 board based on NXP LPC3250";
-	compatible = "ea,ea3250", "nxp,lpc3250";
-	#address-cells = <1>;
-	#size-cells = <1>;
-
-	memory {
-		device_type = "memory";
-		reg = <0x80000000 0x4000000>;
-	};
-
-	ahb {
-		mac: ethernet@31060000 {
-			phy-mode = "rmii";
-			use-iram;
-		};
-
-		/* 128MB Flash via SLC NAND controller */
-		slc: flash@20020000 {
-			status = "okay";
-			#address-cells = <1>;
-			#size-cells = <1>;
-
-			nxp,wdr-clks = <14>;
-			nxp,wwidth = <260000000>;
-			nxp,whold = <104000000>;
-			nxp,wsetup = <200000000>;
-			nxp,rdr-clks = <14>;
-			nxp,rwidth = <34666666>;
-			nxp,rhold = <104000000>;
-			nxp,rsetup = <200000000>;
-			nand-on-flash-bbt;
-			gpios = <&gpio 5 19 1>; /* GPO_P3 19, active low */
-
-			mtd0@00000000 {
-				label = "ea3250-boot";
-				reg = <0x00000000 0x00080000>;
-				read-only;
-			};
-
-			mtd1@00080000 {
-				label = "ea3250-uboot";
-				reg = <0x00080000 0x000c0000>;
-				read-only;
-			};
-
-			mtd2@00140000 {
-				label = "ea3250-kernel";
-				reg = <0x00140000 0x00400000>;
-			};
-
-			mtd3@00540000 {
-				label = "ea3250-rootfs";
-				reg = <0x00540000 0x07ac0000>;
-			};
-		};
-
-		apb {
-			uart5: serial@40090000 {
-				status = "okay";
-			};
-
-			uart3: serial@40080000 {
-				status = "okay";
-			};
-
-			uart6: serial@40098000 {
-				status = "okay";
-			};
-
-			i2c1: i2c@400A0000 {
-				clock-frequency = <100000>;
-
-				eeprom@50 {
-					compatible = "at,24c256";
-					reg = <0x50>;
-				};
-
-				eeprom@57 {
-					compatible = "at,24c64";
-					reg = <0x57>;
-				};
-
-				uda1380: uda1380@18 {
-					compatible = "nxp,uda1380";
-					reg = <0x18>;
-					power-gpio = <&gpio 0x59 0>;
-					reset-gpio = <&gpio 0x51 0>;
-					dac-clk = "wspll";
-				};
-
-				pca9532: pca9532@60 {
-					compatible = "nxp,pca9532";
-					gpio-controller;
-					#gpio-cells = <2>;
-					reg = <0x60>;
-				};
-			};
-
-			i2c2: i2c@400A8000 {
-				clock-frequency = <100000>;
-			};
-
-			sd@20098000 {
-				wp-gpios = <&pca9532 5 0>;
-				cd-gpios = <&pca9532 4 0>;
-				cd-inverted;
-				bus-width = <4>;
-				status = "okay";
-			};
-		};
-
-		fab {
-			uart1: serial@40014000 {
-				status = "okay";
-			};
-
-			/* 3-axis accelerometer X,Y,Z (or AD-IN instead of Z) */
-			adc@40048000 {
-				status = "okay";
-			};
-		};
-	};
-
-	gpio_keys {
-		compatible = "gpio-keys";
-		#address-cells = <1>;
-		#size-cells = <0>;
-		autorepeat;
-		button@21 {
-			label = "Interrupt Key";
-			linux,code = <103>;
-			gpios = <&gpio 4 1 0>; /* GPI_P3 1 */
-		};
-		key1 {
-			label = "KEY1";
-			linux,code = <1>;
-			gpios = <&pca9532 0 0>;
-		};
-		key2 {
-			label = "KEY2";
-			linux,code = <2>;
-			gpios = <&pca9532 1 0>;
-		};
-		key3 {
-			label = "KEY3";
-			linux,code = <3>;
-			gpios = <&pca9532 2 0>;
-		};
-		key4 {
-			label = "KEY4";
-			linux,code = <4>;
-			gpios = <&pca9532 3 0>;
-		};
-		joy0 {
-			label = "Joystick Key 0";
-			linux,code = <10>;
-			gpios = <&gpio 2 0 0>; /* P2.0 */
-		};
-		joy1 {
-			label = "Joystick Key 1";
-			linux,code = <11>;
-			gpios = <&gpio 2 1 0>; /* P2.1 */
-		};
-		joy2 {
-			label = "Joystick Key 2";
-			linux,code = <12>;
-			gpios = <&gpio 2 2 0>; /* P2.2 */
-		};
-		joy3 {
-			label = "Joystick Key 3";
-			linux,code = <13>;
-			gpios = <&gpio 2 3 0>; /* P2.3 */
-		};
-		joy4 {
-			label = "Joystick Key 4";
-			linux,code = <14>;
-			gpios = <&gpio 2 4 0>; /* P2.4 */
-		};
-	};
-
-	leds {
-		compatible = "gpio-leds";
-
-		/* LEDs on OEM Board */
-
-		led1 {
-			gpios = <&gpio 5 14 1>; /* GPO_P3 14, GPIO 93, active low */
-			linux,default-trigger = "timer";
-			default-state = "off";
-		};
-
-		led2 {
-			gpios = <&gpio 2 10 1>; /* P2.10, active low */
-			default-state = "off";
-		};
-
-		led3 {
-			gpios = <&gpio 2 11 1>; /* P2.11, active low */
-			default-state = "off";
-		};
-
-		led4 {
-			gpios = <&gpio 2 12 1>; /* P2.12, active low */
-			default-state = "off";
-		};
-
-		/* LEDs on Base Board */
-
-		lede1 {
-			gpios = <&pca9532 8 0>;
-			default-state = "off";
-		};
-		lede2 {
-			gpios = <&pca9532 9 0>;
-			default-state = "off";
-		};
-		lede3 {
-			gpios = <&pca9532 10 0>;
-			default-state = "off";
-		};
-		lede4 {
-			gpios = <&pca9532 11 0>;
-			default-state = "off";
-		};
-		lede5 {
-			gpios = <&pca9532 12 0>;
-			default-state = "off";
-		};
-		lede6 {
-			gpios = <&pca9532 13 0>;
-			default-state = "off";
-		};
-		lede7 {
-			gpios = <&pca9532 14 0>;
-			default-state = "off";
-		};
-		lede8 {
-			gpios = <&pca9532 15 0>;
-			default-state = "off";
-		};
-	};
-};
-
-/* Here, choose exactly one from: ohci, usbd */
-&ohci /* &usbd */ {
-	transceiver = <&isp1301>;
-	status = "okay";
-};
-
-&i2cusb {
-	clock-frequency = <100000>;
-
-	isp1301: usb-transceiver@2d {
-		compatible = "nxp,isp1301";
-		reg = <0x2d>;
-	};
-};
diff --git a/arch/arm/boot/dts/emev2-kzm9d.dts b/arch/arm/boot/dts/emev2-kzm9d.dts
index 8c24975..a35b851 100644
--- a/arch/arm/boot/dts/emev2-kzm9d.dts
+++ b/arch/arm/boot/dts/emev2-kzm9d.dts
@@ -105,8 +105,8 @@
 
 &pfc {
 	uart1_pins: serial@e1030000 {
-		renesas,groups = "uart1_ctrl", "uart1_data";
-		renesas,function = "uart1";
+		groups = "uart1_ctrl", "uart1_data";
+		function = "uart1";
 	};
 };
 
diff --git a/arch/arm/boot/dts/exynos3250-artik5-eval.dts b/arch/arm/boot/dts/exynos3250-artik5-eval.dts
new file mode 100644
index 0000000..be4d6aa
--- /dev/null
+++ b/arch/arm/boot/dts/exynos3250-artik5-eval.dts
@@ -0,0 +1,43 @@
+/*
+ * Samsung's Exynos3250 based ARTIK5 evaluation board device tree source
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Device tree source file for Samsung's ARTIK5 evaluation board
+ * which is based on Samsung Exynos3250 SoC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/dts-v1/;
+#include "exynos3250-artik5.dtsi"
+
+/ {
+	model = "Samsung ARTIK5 evaluation board";
+	compatible = "samsung,artik5-eval", "samsung,artik5",
+			"samsung,exynos3250", "samsung,exynos3";
+};
+
+&mshc_2 {
+	num-slots = <1>;
+	cap-sd-highspeed;
+	disable-wp;
+	vqmmc-supply = <&ldo3_reg>;
+	card-detect-delay = <200>;
+	clock-frequency = <100000000>;
+	clock-freq-min-max = <400000 100000000>;
+	samsung,dw-mshc-ciu-div = <1>;
+	samsung,dw-mshc-sdr-timing = <0 1>;
+	samsung,dw-mshc-ddr-timing = <1 2>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&sd2_cmd &sd2_clk &sd2_cd &sd2_bus1 &sd2_bus4>;
+	bus-width = <4>;
+	status = "okay";
+};
+
+&serial_2 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/exynos3250-artik5.dtsi b/arch/arm/boot/dts/exynos3250-artik5.dtsi
new file mode 100644
index 0000000..130e946
--- /dev/null
+++ b/arch/arm/boot/dts/exynos3250-artik5.dtsi
@@ -0,0 +1,334 @@
+/*
+ * Samsung's Exynos3250 based ARTIK5 module device tree source
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Device tree source file for Samsung's ARTIK5 module which is based on
+ * Samsung Exynos3250 SoC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "exynos3250.dtsi"
+#include <dt-bindings/clock/samsung,s2mps11.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+	compatible = "samsung,artik5", "samsung,exynos3250", "samsung,exynos3";
+
+	chosen {
+		stdout-path = &serial_2;
+	};
+
+	memory {
+		reg = <0x40000000 0x1ff00000>;
+	};
+
+	firmware@0205f000 {
+		compatible = "samsung,secure-firmware";
+		reg = <0x0205f000 0x1000>;
+	};
+
+	thermal-zones {
+		cpu_thermal: cpu-thermal {
+			cooling-maps {
+				map0 {
+					/* Corresponds to 500MHz */
+					cooling-device = <&cpu0 5 5>;
+				};
+				map1 {
+					/* Corresponds to 200MHz */
+					cooling-device = <&cpu0 8 8>;
+				};
+			};
+		};
+	};
+};
+
+&adc {
+	vdd-supply = <&ldo7_reg>;
+	assigned-clocks = <&cmu CLK_SCLK_TSADC>;
+	assigned-clock-rates = <6000000>;
+};
+
+&cpu0 {
+	cpu0-supply = <&buck2_reg>;
+};
+
+&i2c_0 {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	samsung,i2c-sda-delay = <100>;
+	samsung,i2c-slave-addr = <0x10>;
+	samsung,i2c-max-bus-freq = <100000>;
+	status = "okay";
+
+	s2mps14_pmic@66 {
+		compatible = "samsung,s2mps14-pmic";
+		interrupt-parent = <&gpx3>;
+		interrupts = <5 IRQ_TYPE_NONE>;
+		reg = <0x66>;
+
+		s2mps14_osc: clocks {
+			compatible = "samsung,s2mps14-clk";
+			#clock-cells = <1>;
+			clock-output-names = "s2mps14_ap", "unused",
+				"s2mps14_bt";
+		};
+
+		regulators {
+			ldo1_reg: LDO1 {
+				/* VDD_ALIVE15x */
+				regulator-name = "VLDO1_1.0V";
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1000000>;
+				regulator-always-on;
+			};
+
+			ldo2_reg: LDO2 {
+				/* VDDQM176 ~ VDDQM185 */
+				regulator-name = "VLDO2_1.2V";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1200000>;
+				regulator-always-on;
+			};
+
+			ldo3_reg: LDO3 {
+				/*
+				 * VDD1_E106 ~ VDD1_E111
+				 * DVDD_RTC_AP, DVDD_MMC2_AP
+				 */
+				regulator-name = "VLDO3_1.8V";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
+			};
+
+			ldo4_reg: LDO4 {
+				/*  AVDD_PLL1120 ~ AVDD_PLL11201 */
+				regulator-name = "VLDO4_1.8V";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
+			};
+
+			ldo5_reg: LDO5 {
+				/* VDDI_PLL_ISO141 ~ VDDI_PLL_ISO142 */
+				regulator-name = "VLDO5_1.0V";
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1000000>;
+				regulator-always-on;
+			};
+
+			ldo6_reg: LDO6 {
+				/* VDD_USB, VDD10_HSIC */
+				regulator-name = "VLDO6_1.0V";
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1000000>;
+				regulator-always-on;
+			};
+
+			ldo7_reg: LDO7 {
+				/*
+				 * VDD18P, AVDD18_TS, AVDD18_HSIC, AVDD_PLL2,
+				 * AVDD_ADC, AVDD_ABB_0, M4S_VDD18
+				 */
+				regulator-name = "VLDO7_1.8V";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
+			};
+
+			ldo8_reg: LDO8 {
+				/* AVDD33_UOTG */
+				regulator-name = "VLDO8_3.0V";
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3000000>;
+				regulator-always-on;
+			};
+
+			ldo9_reg: LDO9 {
+				/* VDDQ_E86 ~ VDDQ_E105*/
+				regulator-name = "VLDO9_1.2V";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1200000>;
+				regulator-always-on;
+			};
+
+			ldo10_reg: LDO10 {
+				regulator-name = "VLDO10_1.0V";
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1000000>;
+			};
+
+			ldo11_reg: LDO11 {
+				/* VDD74 ~ VDD75 */
+				regulator-name = "VLDO11_1.8V";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				samsung,ext-control-gpios = <&gpk0 2 GPIO_ACTIVE_HIGH>;
+			};
+
+			ldo12_reg: LDO12 {
+				/* VDD72 ~ VDD73 */
+				regulator-name = "VLDO12_2.8V";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+				samsung,ext-control-gpios = <&gpk0 2 GPIO_ACTIVE_HIGH>;
+			};
+
+			ldo13_reg: LDO13 {
+				regulator-name = "VLDO13_2.8V";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+			};
+
+			ldo14_reg: LDO14 {
+				regulator-name = "VLDO14_2.7V";
+				regulator-min-microvolt = <2700000>;
+				regulator-max-microvolt = <2700000>;
+			};
+
+			ldo15_reg: LDO15 {
+				regulator-name = "VLDO_3.3V";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+			};
+
+			ldo16_reg: LDO16 {
+				regulator-name = "VLDO16_3.3V";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+			};
+
+			ldo17_reg: LDO17 {
+				regulator-name = "VLDO17_3.0V";
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3000000>;
+			};
+
+			ldo18_reg: LDO18 {
+				/* DVDD_MMC2_AP */
+				regulator-name = "VLDO18_2.8V";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+			};
+
+			ldo19_reg: LDO19 {
+				regulator-name = "VLDO19_1.8V";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+			};
+
+			ldo20_reg: LDO20 {
+				regulator-name = "VLDO20_1.8V";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+			};
+
+			ldo21_reg: LDO21 {
+				regulator-name = "VLDO21_1.25V";
+				regulator-min-microvolt = <1250000>;
+				regulator-max-microvolt = <1250000>;
+			};
+
+			ldo22_reg: LDO22 {
+				regulator-name = "VLDO22_1.2V";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1200000>;
+			};
+
+			ldo23_reg: LDO23 {
+				/* Xi2c3_SDA/SCL, Xi2c7_SDA/SCL, WLAN_SDIO */
+				regulator-name = "VLDO23_1.8V";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+			};
+
+			ldo24_reg: LDO24 {
+				regulator-name = "VLDO24_3.0V";
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3000000>;
+			};
+
+			ldo25_reg: LDO25 {
+				regulator-name = "VLDO25_3.0V";
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3000000>;
+			};
+
+			buck1_reg: BUCK1 {
+				/* VDD_MIF */
+				regulator-name = "VBUCK1_1.0V";
+				regulator-min-microvolt = <800000>;
+				regulator-max-microvolt = <1000000>;
+				regulator-always-on;
+			};
+
+			buck2_reg: BUCK2 {
+				/* VDD_CPU */
+				regulator-name = "VBUCK2_1.2V";
+				regulator-min-microvolt = <850000>;
+				regulator-max-microvolt = <1200000>;
+				regulator-always-on;
+			};
+
+			buck3_reg: BUCK3 {
+				/* VDD_G3D */
+				regulator-name = "VBUCK3_1.0V";
+				regulator-min-microvolt = <850000>;
+				regulator-max-microvolt = <1000000>;
+				regulator-always-on;
+			};
+
+			buck4_reg: BUCK4 {
+				regulator-name = "VBUCK4_1.95V";
+				regulator-min-microvolt = <1950000>;
+				regulator-max-microvolt = <1950000>;
+				regulator-always-on;
+			};
+
+			buck5_reg: BUCK5 {
+				regulator-name = "VBUCK5_1.35V";
+				regulator-min-microvolt = <1350000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-always-on;
+			};
+		};
+	};
+};
+
+&mshc_0 {
+	num-slots = <1>;
+	non-removable;
+	cap-mmc-highspeed;
+	card-detect-delay = <200>;
+	vmmc-supply = <&ldo12_reg>;
+	clock-frequency = <100000000>;
+	clock-freq-min-max = <400000 100000000>;
+	samsung,dw-mshc-ciu-div = <1>;
+	samsung,dw-mshc-sdr-timing = <0 1>;
+	samsung,dw-mshc-ddr-timing = <1 2>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&sd0_cmd &sd0_bus1 &sd0_bus4 &sd0_bus8>;
+	bus-width = <8>;
+	status = "okay";
+};
+
+&rtc {
+	clocks = <&cmu CLK_RTC>, <&s2mps14_osc S2MPS11_CLK_AP>;
+	clock-names = "rtc", "rtc_src";
+	status = "okay";
+};
+
+&tmu {
+	status = "okay";
+};
+
+&xusbxti {
+	clock-frequency = <24000000>;
+};
diff --git a/arch/arm/boot/dts/exynos3250-monk.dts b/arch/arm/boot/dts/exynos3250-monk.dts
index 9e2840b..267f81a 100644
--- a/arch/arm/boot/dts/exynos3250-monk.dts
+++ b/arch/arm/boot/dts/exynos3250-monk.dts
@@ -558,7 +558,17 @@
 
 &pinctrl_1 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&sleep1>;
+	pinctrl-0 = <&initial1 &sleep1>;
+
+	initial1: initial-state {
+		PIN_IN(gpk2-0, DOWN, LV1);
+		PIN_IN(gpk2-1, DOWN, LV1);
+		PIN_IN(gpk2-2, DOWN, LV1);
+		PIN_IN(gpk2-3, DOWN, LV1);
+		PIN_IN(gpk2-4, DOWN, LV1);
+		PIN_IN(gpk2-5, DOWN, LV1);
+		PIN_IN(gpk2-6, DOWN, LV1);
+	};
 
 	sleep1: sleep-state {
 		PIN_SLP(gpe0-0, PREV, NONE);
diff --git a/arch/arm/boot/dts/exynos3250-pinctrl.dtsi b/arch/arm/boot/dts/exynos3250-pinctrl.dtsi
index 5ab81c3..40ea7de 100644
--- a/arch/arm/boot/dts/exynos3250-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos3250-pinctrl.dtsi
@@ -16,11 +16,49 @@
 #define PIN_PULL_DOWN		1
 #define PIN_PULL_UP		3
 
+#define PIN_DRV_LV1		0
+#define PIN_DRV_LV2		2
+#define PIN_DRV_LV3		1
+#define PIN_DRV_LV4		3
+
 #define PIN_PDN_OUT0		0
 #define PIN_PDN_OUT1		1
 #define PIN_PDN_INPUT		2
 #define PIN_PDN_PREV		3
 
+#define PIN_IN(_pin, _pull, _drv)			\
+	_pin {						\
+		samsung,pins = #_pin;			\
+		samsung,pin-function = <0>;		\
+		samsung,pin-pud = <PIN_PULL_ ##_pull>;	\
+		samsung,pin-drv = <PIN_DRV_ ##_drv>;	\
+	}
+
+#define PIN_OUT(_pin, _drv)				\
+	_pin {						\
+		samsung,pins = #_pin;			\
+		samsung,pin-function = <1>;		\
+		samsung,pin-pud = <0>;			\
+		samsung,pin-drv = <PIN_DRV_ ##_drv>;	\
+	}
+
+#define PIN_OUT_SET(_pin, _val, _drv)			\
+	_pin {						\
+		samsung,pins = #_pin;			\
+		samsung,pin-function = <1>;		\
+		samsung,pin-pud = <0>;			\
+		samsung,pin-drv = <PIN_DRV_ ##_drv>;	\
+		samsung,pin-val = <_val>;		\
+	}
+
+#define PIN_CFG(_pin, _sel, _pull, _drv)		\
+	_pin {						\
+		samsung,pins = #_pin;			\
+		samsung,pin-function = <_sel>;		\
+		samsung,pin-pud = <PIN_PULL_ ##_pull>;	\
+		samsung,pin-drv = <PIN_DRV_ ##_drv>;	\
+	}
+
 #define PIN_SLP(_pin, _mode, _pull)				\
 	_pin {							\
 		samsung,pins = #_pin;				\
@@ -120,6 +158,13 @@
 		samsung,pin-drv = <0>;
 	};
 
+	uart2_data: uart2-data {
+		samsung,pins = "gpa1-0", "gpa1-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
 	i2c3_bus: i2c3-bus {
 		samsung,pins = "gpa1-2", "gpa1-3";
 		samsung,pin-function = <3>;
@@ -445,6 +490,41 @@
 		samsung,pin-drv = <3>;
 	};
 
+	sd2_clk: sd2-clk {
+		samsung,pins = "gpk2-0";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd2_cmd: sd2-cmd {
+		samsung,pins = "gpk2-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd2_cd: sd2-cd {
+		samsung,pins = "gpk2-2";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd2_bus1: sd2-bus-width1 {
+		samsung,pins = "gpk2-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd2_bus4: sd2-bus-width4 {
+		samsung,pins = "gpk2-4", "gpk2-5", "gpk2-6";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
 	cam_port_b_io: cam-port-b-io {
 		samsung,pins = "gpm0-0", "gpm0-1", "gpm0-2", "gpm0-3",
 				"gpm0-4", "gpm0-5", "gpm0-6", "gpm0-7",
diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts
index 1f102f3..31eb09b 100644
--- a/arch/arm/boot/dts/exynos3250-rinato.dts
+++ b/arch/arm/boot/dts/exynos3250-rinato.dts
@@ -681,7 +681,21 @@
 
 &pinctrl_0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&sleep0>;
+	pinctrl-0 = <&initial0 &sleep0>;
+
+	initial0: initial-state {
+		PIN_IN(gpa1-4, DOWN, LV1);
+		PIN_IN(gpa1-5, DOWN, LV1);
+
+		PIN_IN(gpc0-0, DOWN, LV1);
+		PIN_IN(gpc0-1, DOWN, LV1);
+		PIN_IN(gpc0-2, DOWN, LV1);
+		PIN_IN(gpc0-3, DOWN, LV1);
+		PIN_IN(gpc0-4, DOWN, LV1);
+
+		PIN_IN(gpd0-0, DOWN, LV1);
+		PIN_IN(gpd0-1, DOWN, LV1);
+	};
 
 	sleep0: sleep-state {
 		PIN_SLP(gpa0-0, INPUT, DOWN);
@@ -735,7 +749,60 @@
 
 &pinctrl_1 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&sleep1>;
+	pinctrl-0 = <&initial1 &sleep1>;
+
+	initial1: initial-state {
+		PIN_IN(gpe0-6, DOWN, LV1);
+		PIN_IN(gpe0-7, DOWN, LV1);
+
+		PIN_IN(gpe1-0, DOWN, LV1);
+		PIN_IN(gpe1-3, DOWN, LV1);
+		PIN_IN(gpe1-4, DOWN, LV1);
+		PIN_IN(gpe1-5, DOWN, LV1);
+		PIN_IN(gpe1-6, DOWN, LV1);
+
+		PIN_IN(gpk2-0, DOWN, LV1);
+		PIN_IN(gpk2-1, DOWN, LV1);
+		PIN_IN(gpk2-2, DOWN, LV1);
+		PIN_IN(gpk2-3, DOWN, LV1);
+		PIN_IN(gpk2-4, DOWN, LV1);
+		PIN_IN(gpk2-5, DOWN, LV1);
+		PIN_IN(gpk2-6, DOWN, LV1);
+
+		PIN_IN(gpm0-0, DOWN, LV1);
+		PIN_IN(gpm0-1, DOWN, LV1);
+		PIN_IN(gpm0-2, DOWN, LV1);
+		PIN_IN(gpm0-3, DOWN, LV1);
+		PIN_IN(gpm0-4, DOWN, LV1);
+		PIN_IN(gpm0-5, DOWN, LV1);
+		PIN_IN(gpm0-6, DOWN, LV1);
+		PIN_IN(gpm0-7, DOWN, LV1);
+
+		PIN_IN(gpm1-0, DOWN, LV1);
+		PIN_IN(gpm1-1, DOWN, LV1);
+		PIN_IN(gpm1-2, DOWN, LV1);
+		PIN_IN(gpm1-3, DOWN, LV1);
+		PIN_IN(gpm1-4, DOWN, LV1);
+		PIN_IN(gpm1-5, DOWN, LV1);
+		PIN_IN(gpm1-6, DOWN, LV1);
+
+		PIN_IN(gpm2-0, DOWN, LV1);
+		PIN_IN(gpm2-1, DOWN, LV1);
+
+		PIN_IN(gpm3-0, DOWN, LV1);
+		PIN_IN(gpm3-1, DOWN, LV1);
+		PIN_IN(gpm3-2, DOWN, LV1);
+		PIN_IN(gpm3-3, DOWN, LV1);
+		PIN_IN(gpm3-4, DOWN, LV1);
+
+		PIN_IN(gpm4-1, DOWN, LV1);
+		PIN_IN(gpm4-2, DOWN, LV1);
+		PIN_IN(gpm4-3, DOWN, LV1);
+		PIN_IN(gpm4-4, DOWN, LV1);
+		PIN_IN(gpm4-5, DOWN, LV1);
+		PIN_IN(gpm4-6, DOWN, LV1);
+		PIN_IN(gpm4-7, DOWN, LV1);
+	};
 
 	sleep1: sleep-state {
 		PIN_SLP(gpe0-0, PREV, NONE);
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index 137f901..094782b 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -31,6 +31,7 @@
 		pinctrl1 = &pinctrl_1;
 		mshc0 = &mshc_0;
 		mshc1 = &mshc_1;
+		mshc2 = &mshc_2;
 		spi0 = &spi_0;
 		spi1 = &spi_1;
 		i2c0 = &i2c_0;
@@ -43,6 +44,7 @@
 		i2c7 = &i2c_7;
 		serial0 = &serial_0;
 		serial1 = &serial_1;
+		serial2 = &serial_2;
 	};
 
 	cpus {
@@ -153,7 +155,7 @@
 			interrupt-parent = <&gic>;
 		};
 
-		mipi_phy: video-phy@10020710 {
+		mipi_phy: video-phy {
 			compatible = "samsung,s5pv210-mipi-video-phy";
 			#phy-cells = <1>;
 			syscon = <&pmu_system_controller>;
@@ -357,6 +359,18 @@
 			status = "disabled";
 		};
 
+		mshc_2: mshc@12530000 {
+			compatible = "samsung,exynos5250-dw-mshc";
+			reg = <0x12530000 0x1000>;
+			interrupts = <0 144 0>;
+			clocks = <&cmu CLK_SDMMC2>, <&cmu CLK_SCLK_MMC2>;
+			clock-names = "biu", "ciu";
+			fifo-depth = <0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
 		exynos_usbphy: exynos-usbphy@125B0000 {
 			compatible = "samsung,exynos3250-usb2-phy";
 			reg = <0x125B0000 0x100>;
@@ -452,6 +466,17 @@
 			status = "disabled";
 		};
 
+		serial_2: serial@13820000 {
+			compatible = "samsung,exynos4210-uart";
+			reg = <0x13820000 0x100>;
+			interrupts = <0 111 0>;
+			clocks = <&cmu CLK_UART2>, <&cmu CLK_SCLK_UART2>;
+			clock-names = "uart", "clk_uart_baud0";
+			pinctrl-names = "default";
+			pinctrl-0 = <&uart2_data>;
+			status = "disabled";
+		};
+
 		i2c_0: i2c@13860000 {
 			#address-cells = <1>;
 			#size-cells = <0>;
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index c679b3c..ca8f3e3 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -77,12 +77,12 @@
 		reg = <0x10000000 0x100>;
 	};
 
-	sromc@12570000 {
-		compatible = "samsung,exynos-srom";
+	memory-controller@12570000 {
+		compatible = "samsung,exynos4210-srom";
 		reg = <0x12570000 0x14>;
 	};
 
-	mipi_phy: video-phy@10020710 {
+	mipi_phy: video-phy {
 		compatible = "samsung,s5pv210-mipi-video-phy";
 		#phy-cells = <1>;
 		syscon = <&pmu_system_controller>;
@@ -743,6 +743,18 @@
 		status = "disabled";
 	};
 
+	hdmicec: cec@100B0000 {
+		compatible = "samsung,s5p-cec";
+		reg = <0x100B0000 0x200>;
+		interrupts = <0 114 0>;
+		clocks = <&clock CLK_HDMI_CEC>;
+		clock-names = "hdmicec";
+		samsung,syscon-phandle = <&pmu_system_controller>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&hdmi_cec>;
+		status = "disabled";
+	};
+
 	mixer: mixer@12C10000 {
 		compatible = "samsung,exynos4210-mixer";
 		interrupts = <0 91 0>;
@@ -969,11 +981,18 @@
 		#iommu-cells = <0>;
 	};
 
+	sss: sss@10830000 {
+		compatible = "samsung,exynos4210-secss";
+		reg = <0x10830000 0x300>;
+		interrupts = <0 112 0>;
+		clocks = <&clock CLK_SSS>;
+		clock-names = "secss";
+	};
+
 	prng: rng@10830400 {
 		compatible = "samsung,exynos4-rng";
 		reg = <0x10830400 0x200>;
 		clocks = <&clock CLK_SSS>;
 		clock-names = "secss";
-		status = "disabled";
 	};
 };
diff --git a/arch/arm/boot/dts/exynos4210-pinctrl.dtsi b/arch/arm/boot/dts/exynos4210-pinctrl.dtsi
index a7c2128..9331c62 100644
--- a/arch/arm/boot/dts/exynos4210-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos4210-pinctrl.dtsi
@@ -820,6 +820,13 @@
 			samsung,pin-pud = <1>;
 			samsung,pin-drv = <0>;
 		};
+
+		hdmi_cec: hdmi-cec {
+			samsung,pins = "gpx3-6";
+			samsung,pin-function = <3>;
+			samsung,pin-pud = <0>;
+			samsung,pin-drv = <0>;
+		};
 	};
 
 	pinctrl@03860000 {
diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts
index 1df2f0b..79d98303 100644
--- a/arch/arm/boot/dts/exynos4210-trats.dts
+++ b/arch/arm/boot/dts/exynos4210-trats.dts
@@ -298,6 +298,8 @@
 		compatible = "maxim,max8997-pmic";
 
 		reg = <0x66>;
+		interrupt-parent = <&gpx0>;
+		interrupts = <7 0>;
 
 		max8997,pmic-buck1-uses-gpio-dvs;
 		max8997,pmic-buck2-uses-gpio-dvs;
@@ -359,7 +361,7 @@
 			};
 
 			vusbdac_reg: LDO8 {
-			     regulator-name = "VUSB/VDAC_3.3V_C210";
+			     regulator-name = "VUSB+VDAC_3.3V_C210";
 			     regulator-min-microvolt = <3300000>;
 			     regulator-max-microvolt = <3300000>;
 			};
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index 5e5d3fe..cab0f07 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -188,6 +188,10 @@
 	status = "okay";
 };
 
+&hdmicec {
+	status = "okay";
+};
+
 &hsotg {
 	dr_mode = "peripheral";
 	status = "okay";
diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts
index ed017cc..5d1eaea 100644
--- a/arch/arm/boot/dts/exynos4412-trats2.dts
+++ b/arch/arm/boot/dts/exynos4412-trats2.dts
@@ -146,13 +146,13 @@
 			reg = <0x66>;
 
 			regulators {
-				esafeout1_reg: ESAFEOUT1@1 {
+				esafeout1_reg: ESAFEOUT1 {
 					regulator-name = "ESAFEOUT1";
 				};
-				esafeout2_reg: ESAFEOUT2@2 {
+				esafeout2_reg: ESAFEOUT2 {
 					regulator-name = "ESAFEOUT2";
 				};
-				charger_reg: CHARGER@0 {
+				charger_reg: CHARGER {
 					regulator-name = "CHARGER";
 					regulator-min-microamp = <60000>;
 					regulator-max-microamp = <2580000>;
@@ -251,7 +251,7 @@
 			"SPK", "SPKOUTRP";
 	};
 
-	thermistor-ap@0 {
+	thermistor-ap {
 		compatible = "ntc,ncp15wb473";
 		pullup-uv = <1800000>;	 /* VCC_1.8V_AP */
 		pullup-ohm = <100000>;	 /* 100K */
@@ -259,7 +259,7 @@
 		io-channels = <&adc 1>;  /* AP temperature */
 	};
 
-	thermistor-battery@1 {
+	thermistor-battery {
 		compatible = "ntc,ncp15wb473";
 		pullup-uv = <1800000>;	 /* VCC_1.8V_AP */
 		pullup-ohm = <100000>;	 /* 100K */
@@ -1234,10 +1234,6 @@
 	status = "okay";
 };
 
-&prng {
-	status = "okay";
-};
-
 &rtc {
 	status = "okay";
 	clocks = <&clock CLK_RTC>, <&max77686 MAX77686_CLK_AP>;
@@ -1276,7 +1272,7 @@
 	cs-gpios = <&gpb 5 GPIO_ACTIVE_HIGH>;
 	status = "okay";
 
-	s5c73m3_spi: s5c73m3 {
+	s5c73m3_spi: s5c73m3@0 {
 		compatible = "samsung,s5c73m3";
 		spi-max-frequency = <50000000>;
 		reg = <0>;
diff --git a/arch/arm/boot/dts/exynos4x12-pinctrl.dtsi b/arch/arm/boot/dts/exynos4x12-pinctrl.dtsi
index bac25c6..856b292 100644
--- a/arch/arm/boot/dts/exynos4x12-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos4x12-pinctrl.dtsi
@@ -885,6 +885,13 @@
 			samsung,pin-pud = <0>;
 			samsung,pin-drv = <0>;
 		};
+
+		hdmi_cec: hdmi-cec {
+			samsung,pins = "gpx3-6";
+			samsung,pin-function = <3>;
+			samsung,pin-pud = <0>;
+			samsung,pin-drv = <0>;
+		};
 	};
 
 	pinctrl_2: pinctrl@03860000 {
diff --git a/arch/arm/boot/dts/exynos4x12.dtsi b/arch/arm/boot/dts/exynos4x12.dtsi
index 84a23f9..b7490ea0 100644
--- a/arch/arm/boot/dts/exynos4x12.dtsi
+++ b/arch/arm/boot/dts/exynos4x12.dtsi
@@ -179,7 +179,7 @@
 			ranges;
 			status = "disabled";
 
-			pmu {
+			pmu@10020000 {
 				reg = <0x10020000 0x3000>;
 			};
 
diff --git a/arch/arm/boot/dts/exynos5.dtsi b/arch/arm/boot/dts/exynos5.dtsi
index 92313ca..d5c0f18a 100644
--- a/arch/arm/boot/dts/exynos5.dtsi
+++ b/arch/arm/boot/dts/exynos5.dtsi
@@ -31,8 +31,8 @@
 		reg = <0x10000000 0x100>;
 	};
 
-	sromc@12250000 {
-		compatible = "samsung,exynos-srom";
+	memory-controller@12250000 {
+		compatible = "samsung,exynos4210-srom";
 		reg = <0x12250000 0x14>;
 	};
 
diff --git a/arch/arm/boot/dts/exynos5250-arndale.dts b/arch/arm/boot/dts/exynos5250-arndale.dts
index 8b2acc7..1e25152 100644
--- a/arch/arm/boot/dts/exynos5250-arndale.dts
+++ b/arch/arm/boot/dts/exynos5250-arndale.dts
@@ -133,7 +133,7 @@
 	display-timings {
 		native-mode = <&timing0>;
 
-		timing0: timing@0 {
+		timing0: timing {
 			/* 2560x1600 DP panel */
 			clock-frequency = <50000>;
 			hactive = <2560>;
diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
index 0f5dcd4..0e2eb3f 100644
--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
+++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
@@ -29,7 +29,7 @@
 		bootargs = "root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC2,115200 init=/linuxrc";
 	};
 
-	vdd: fixed-regulator@0 {
+	vdd: fixed-regulator-vdd {
 		compatible = "regulator-fixed";
 		regulator-name = "vdd-supply";
 		regulator-min-microvolt = <1800000>;
@@ -37,7 +37,7 @@
 		regulator-always-on;
 	};
 
-	dbvdd: fixed-regulator@1 {
+	dbvdd: fixed-regulator-dbvdd {
 		compatible = "regulator-fixed";
 		regulator-name = "dbvdd-supply";
 		regulator-min-microvolt = <3300000>;
@@ -45,7 +45,7 @@
 		regulator-always-on;
 	};
 
-	spkvdd: fixed-regulator@2 {
+	spkvdd: fixed-regulator-spkvdd {
 		compatible = "regulator-fixed";
 		regulator-name = "spkvdd-supply";
 		regulator-min-microvolt = <5000000>;
@@ -93,7 +93,7 @@
 	display-timings {
 		native-mode = <&timing0>;
 
-		timing0: timing@0 {
+		timing0: timing {
 			/* 1280x800 */
 			clock-frequency = <50000>;
 			hactive = <1280>;
diff --git a/arch/arm/boot/dts/exynos5250-snow-common.dtsi b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
index 95210ef..c9889b1 100644
--- a/arch/arm/boot/dts/exynos5250-snow-common.dtsi
+++ b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
@@ -84,7 +84,7 @@
 				sbs,poll-retry-count = <1>;
 			};
 
-			cros_ec: embedded-controller {
+			cros_ec: embedded-controller@1e {
 				compatible = "google,cros-ec-i2c";
 				reg = <0x1e>;
 				interrupts = <6 IRQ_TYPE_NONE>;
@@ -94,7 +94,7 @@
 				wakeup-source;
 			};
 
-			power-regulator {
+			power-regulator@48 {
 				compatible = "ti,tps65090";
 				reg = <0x48>;
 
@@ -244,7 +244,7 @@
 	samsung,hpd-gpio = <&gpx0 7 GPIO_ACTIVE_HIGH>;
 
 	ports {
-		port@0 {
+		port0 {
 			dp_out: endpoint {
 				remote-endpoint = <&bridge_in>;
 			};
@@ -428,7 +428,7 @@
 	samsung,i2c-sda-delay = <100>;
 	samsung,i2c-max-bus-freq = <378000>;
 
-	trackpad {
+	trackpad@67 {
 		reg = <0x67>;
 		compatible = "cypress,cyapa";
 		interrupts = <2 IRQ_TYPE_NONE>;
@@ -487,13 +487,13 @@
 		edid-emulation = <5>;
 
 		ports {
-			port@0 {
+			port0 {
 				bridge_out: endpoint {
 					remote-endpoint = <&panel_in>;
 				};
 			};
 
-			port@1 {
+			port1 {
 				bridge_in: endpoint {
 					remote-endpoint = <&dp_out>;
 				};
diff --git a/arch/arm/boot/dts/exynos5250-spring.dts b/arch/arm/boot/dts/exynos5250-spring.dts
index 0f500cb..273d662 100644
--- a/arch/arm/boot/dts/exynos5250-spring.dts
+++ b/arch/arm/boot/dts/exynos5250-spring.dts
@@ -383,7 +383,7 @@
 	samsung,i2c-sda-delay = <100>;
 	samsung,i2c-max-bus-freq = <66000>;
 
-	cros_ec: embedded-controller {
+	cros_ec: embedded-controller@1e {
 		compatible = "google,cros-ec-i2c";
 		reg = <0x1e>;
 		interrupts = <6 IRQ_TYPE_NONE>;
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index e653ae0..c7158b2 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -596,7 +596,7 @@
 		pinctrl-0 = <&i2s2_bus>;
 	};
 
-	usb@12000000 {
+	usb_dwc3 {
 		compatible = "samsung,exynos5250-dwusb3";
 		clocks = <&clock CLK_USB3>;
 		clock-names = "usbdrd30";
@@ -604,7 +604,7 @@
 		#size-cells = <1>;
 		ranges;
 
-		usbdrd_dwc3: dwc3 {
+		usbdrd_dwc3: dwc3@12000000 {
 			compatible = "synopsys,dwc3";
 			reg = <0x12000000 0x10000>;
 			interrupts = <0 72 0>;
@@ -763,7 +763,7 @@
 		iommu = <&sysmmu_gsc3>;
 	};
 
-	hdmi: hdmi {
+	hdmi: hdmi@14530000 {
 		compatible = "samsung,exynos4212-hdmi";
 		reg = <0x14530000 0x70000>;
 		power-domains = <&pd_disp1>;
@@ -776,7 +776,7 @@
 		samsung,syscon-phandle = <&pmu_system_controller>;
 	};
 
-	mixer {
+	mixer@14450000 {
 		compatible = "samsung,exynos5250-mixer";
 		reg = <0x14450000 0x10000>;
 		power-domains = <&pd_disp1>;
@@ -787,7 +787,7 @@
 		iommus = <&sysmmu_tv>;
 	};
 
-	dp_phy: video-phy@10040720 {
+	dp_phy: video-phy {
 		compatible = "samsung,exynos5250-dp-video-phy";
 		samsung,pmu-syscon = <&pmu_system_controller>;
 		#phy-cells = <0>;
diff --git a/arch/arm/boot/dts/exynos5410-smdk5410.dts b/arch/arm/boot/dts/exynos5410-smdk5410.dts
index a731fbe..0f6429e 100644
--- a/arch/arm/boot/dts/exynos5410-smdk5410.dts
+++ b/arch/arm/boot/dts/exynos5410-smdk5410.dts
@@ -97,7 +97,7 @@
 		smsc,irq-push-pull;
 		smsc,force-internal-phy;
 
-		samsung,srom-page-mode = <1>;
+		samsung,srom-page-mode;
 		samsung,srom-timing = <9 12 1 9 1 1>;
 	};
 };
diff --git a/arch/arm/boot/dts/exynos5410.dtsi b/arch/arm/boot/dts/exynos5410.dtsi
index fa55867..7a56aec 100644
--- a/arch/arm/boot/dts/exynos5410.dtsi
+++ b/arch/arm/boot/dts/exynos5410.dtsi
@@ -102,8 +102,8 @@
 			reg = <0x10000000 0x100>;
 		};
 
-		sromc: sromc@12250000 {
-			compatible = "samsung,exynos-srom";
+		sromc: memory-controller@12250000 {
+			compatible = "samsung,exynos4210-srom";
 			reg = <0x12250000 0x14>;
 			#address-cells = <2>;
 			#size-cells = <1>;
diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
index a103ce8..60bc861 100644
--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
@@ -75,13 +75,6 @@
 	s2mps11_pmic@66 {
 		compatible = "samsung,s2mps11-pmic";
 		reg = <0x66>;
-		s2mps11,buck2-ramp-delay = <12>;
-		s2mps11,buck34-ramp-delay = <12>;
-		s2mps11,buck16-ramp-delay = <12>;
-		s2mps11,buck6-ramp-enable = <1>;
-		s2mps11,buck2-ramp-enable = <1>;
-		s2mps11,buck3-ramp-enable = <1>;
-		s2mps11,buck4-ramp-enable = <1>;
 
 		interrupt-parent = <&gpx3>;
 		interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts
index 3981ddb..8811e17 100644
--- a/arch/arm/boot/dts/exynos5420-peach-pit.dts
+++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts
@@ -165,7 +165,7 @@
 	samsung,hpd-gpio = <&gpx2 6 GPIO_ACTIVE_HIGH>;
 
 	ports {
-		port@0 {
+		port0 {
 			dp_out: endpoint {
 				remote-endpoint = <&bridge_in>;
 			};
@@ -633,13 +633,13 @@
 		use-external-pwm;
 
 		ports {
-			port@0 {
+			port0 {
 				bridge_out: endpoint {
 					remote-endpoint = <&panel_in>;
 				};
 			};
 
-			port@1 {
+			port1 {
 				bridge_in: endpoint {
 					remote-endpoint = <&dp_out>;
 				};
@@ -696,6 +696,11 @@
 	status = "okay";
 };
 
+&mfc {
+	samsung,mfc-r = <0x43000000 0x800000>;
+	samsung,mfc-l = <0x51000000 0x800000>;
+};
+
 &mmc_0 {
 	status = "okay";
 	num-slots = <1>;
diff --git a/arch/arm/boot/dts/exynos5420-smdk5420.dts b/arch/arm/boot/dts/exynos5420-smdk5420.dts
index 0785fed..9b77940 100644
--- a/arch/arm/boot/dts/exynos5420-smdk5420.dts
+++ b/arch/arm/boot/dts/exynos5420-smdk5420.dts
@@ -111,7 +111,7 @@
 
 	display-timings {
 		native-mode = <&timing0>;
-		timing0: timing@0 {
+		timing0: timing {
 			clock-frequency = <50000>;
 			hactive = <2560>;
 			vactive = <1600>;
@@ -142,13 +142,6 @@
 	s2mps11_pmic@66 {
 		compatible = "samsung,s2mps11-pmic";
 		reg = <0x66>;
-		s2mps11,buck2-ramp-delay = <12>;
-		s2mps11,buck34-ramp-delay = <12>;
-		s2mps11,buck16-ramp-delay = <12>;
-		s2mps11,buck6-ramp-enable = <1>;
-		s2mps11,buck2-ramp-enable = <1>;
-		s2mps11,buck3-ramp-enable = <1>;
-		s2mps11,buck4-ramp-enable = <1>;
 
 		s2mps11_osc: clocks {
 			#clock-cells = <1>;
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index 7b99cb5..4c85234 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -551,13 +551,13 @@
 		clock-names = "timers";
 	};
 
-	dp_phy: video-phy@10040728 {
+	dp_phy: dp-video-phy {
 		compatible = "samsung,exynos5420-dp-video-phy";
 		samsung,pmu-syscon = <&pmu_system_controller>;
 		#phy-cells = <0>;
 	};
 
-	mipi_phy: video-phy@10040714 {
+	mipi_phy: mipi-video-phy {
 		compatible = "samsung,s5pv210-mipi-video-phy";
 		syscon = <&pmu_system_controller>;
 		#phy-cells = <1>;
@@ -913,7 +913,7 @@
 		clock-names = "secss";
 	};
 
-	usbdrd3_0: usb@12000000 {
+	usbdrd3_0: usb3-0 {
 		compatible = "samsung,exynos5250-dwusb3";
 		clocks = <&clock CLK_USBD300>;
 		clock-names = "usbdrd30";
@@ -921,7 +921,7 @@
 		#size-cells = <1>;
 		ranges;
 
-		usbdrd_dwc3_0: dwc3 {
+		usbdrd_dwc3_0: dwc3@12000000 {
 			compatible = "snps,dwc3";
 			reg = <0x12000000 0x10000>;
 			interrupts = <0 72 0>;
@@ -939,7 +939,7 @@
 		#phy-cells = <1>;
 	};
 
-	usbdrd3_1: usb@12400000 {
+	usbdrd3_1: usb3-1 {
 		compatible = "samsung,exynos5250-dwusb3";
 		clocks = <&clock CLK_USBD301>;
 		clock-names = "usbdrd30";
@@ -947,7 +947,7 @@
 		#size-cells = <1>;
 		ranges;
 
-		usbdrd_dwc3_1: dwc3 {
+		usbdrd_dwc3_1: dwc3@12400000 {
 			compatible = "snps,dwc3";
 			reg = <0x12400000 0x10000>;
 			interrupts = <0 73 0>;
@@ -1199,6 +1199,7 @@
 };
 
 &fimd {
+	compatible = "samsung,exynos5420-fimd";
 	clocks = <&clock CLK_SCLK_FIMD1>, <&clock CLK_FIMD1>;
 	clock-names = "sclk_fimd", "fimd";
 	power-domains = <&disp_pd>;
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
index 1bd507b..20fa761 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
@@ -92,13 +92,6 @@
 	s2mps11_pmic@66 {
 		compatible = "samsung,s2mps11-pmic";
 		reg = <0x66>;
-		s2mps11,buck2-ramp-delay = <12>;
-		s2mps11,buck34-ramp-delay = <12>;
-		s2mps11,buck16-ramp-delay = <12>;
-		s2mps11,buck6-ramp-enable = <1>;
-		s2mps11,buck2-ramp-enable = <1>;
-		s2mps11,buck3-ramp-enable = <1>;
-		s2mps11,buck4-ramp-enable = <1>;
 		samsung,s2mps11-acokb-ground;
 
 		interrupt-parent = <&gpx0>;
@@ -121,10 +114,9 @@
 			};
 
 			ldo3_reg: LDO3 {
-				regulator-name = "vdd_ldo3";
+				regulator-name = "vddq_mmc0";
 				regulator-min-microvolt = <1800000>;
 				regulator-max-microvolt = <1800000>;
-				regulator-always-on;
 			};
 
 			ldo5_reg: LDO5 {
@@ -184,10 +176,9 @@
 			};
 
 			ldo13_reg: LDO13 {
-				regulator-name = "vdd_ldo13";
+				regulator-name = "vddq_mmc2";
 				regulator-min-microvolt = <2800000>;
 				regulator-max-microvolt = <2800000>;
-				regulator-always-on;
 			};
 
 			ldo15_reg: LDO15 {
@@ -211,11 +202,16 @@
 				regulator-always-on;
 			};
 
+			ldo18_reg: LDO18 {
+				regulator-name = "vdd_emmc_1V8";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+			};
+
 			ldo19_reg: LDO19 {
 				regulator-name = "vdd_sd";
 				regulator-min-microvolt = <2800000>;
 				regulator-max-microvolt = <2800000>;
-				regulator-always-on;
 			};
 
 			ldo24_reg: LDO24 {
@@ -347,6 +343,8 @@
 	cap-mmc-highspeed;
 	mmc-hs200-1_8v;
 	mmc-hs400-1_8v;
+	vmmc-supply = <&ldo18_reg>;
+	vqmmc-supply = <&ldo3_reg>;
 };
 
 &mmc_2 {
@@ -359,6 +357,8 @@
 	pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_cd &sd2_bus1 &sd2_bus4>;
 	bus-width = <4>;
 	cap-sd-highspeed;
+	vmmc-supply = <&ldo19_reg>;
+	vqmmc-supply = <&ldo13_reg>;
 };
 
 &pinctrl_0 {
diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi
index b9342ec..fd17681 100644
--- a/arch/arm/boot/dts/exynos5440.dtsi
+++ b/arch/arm/boot/dts/exynos5440.dtsi
@@ -132,7 +132,7 @@
 		clock-names = "spi", "spi_busclk0";
 	};
 
-	pin_ctrl: pinctrl {
+	pin_ctrl: pinctrl@E0000 {
 		compatible = "samsung,exynos5440-pinctrl";
 		reg = <0xE0000 0x1000>;
 		interrupts = <0 37 0>, <0 38 0>, <0 39 0>, <0 40 0>,
@@ -205,7 +205,7 @@
 		ranges;
 	};
 
-	rtc {
+	rtc@130000 {
 		compatible = "samsung,s3c6410-rtc";
 		reg = <0x130000 0x1000>;
 		interrupts = <0 17 0>, <0 16 0>;
diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts
index 6e9edc1..f959925 100644
--- a/arch/arm/boot/dts/exynos5800-peach-pi.dts
+++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts
@@ -671,6 +671,11 @@
 	status = "okay";
 };
 
+&mfc {
+	samsung,mfc-r = <0x43000000 0x800000>;
+	samsung,mfc-l = <0x51000000 0x800000>;
+};
+
 &mmc_0 {
 	status = "okay";
 	num-slots = <1>;
diff --git a/arch/arm/boot/dts/imx25-pinfunc.h b/arch/arm/boot/dts/imx25-pinfunc.h
index 848ffa78..f96fa2d 100644
--- a/arch/arm/boot/dts/imx25-pinfunc.h
+++ b/arch/arm/boot/dts/imx25-pinfunc.h
@@ -110,20 +110,20 @@
 #define MX25_PAD_CS4__UART5_CTS			0x054 0x264 0x000 0x13 0x000
 #define MX25_PAD_CS4__GPIO_3_20			0x054 0x264 0x000 0x15 0x000
 
-#define MX25_PAD_CS5__CS5			0x058 0x268 0x000 0x10 0x000
+#define MX25_PAD_CS5__CS5			0x058 0x268 0x000 0x00 0x000
 #define MX25_PAD_CS5__NF_CE2			0x058 0x268 0x000 0x01 0x000
-#define MX25_PAD_CS5__UART5_RTS			0x058 0x268 0x574 0x13 0x000
-#define MX25_PAD_CS5__GPIO_3_21			0x058 0x268 0x000 0x15 0x000
+#define MX25_PAD_CS5__UART5_RTS			0x058 0x268 0x574 0x03 0x000
+#define MX25_PAD_CS5__GPIO_3_21			0x058 0x268 0x000 0x05 0x000
 
 #define MX25_PAD_NF_CE0__NF_CE0			0x05c 0x26c 0x000 0x10 0x000
 #define MX25_PAD_NF_CE0__GPIO_3_22		0x05c 0x26c 0x000 0x15 0x000
 
 #define MX25_PAD_ECB__ECB			0x060 0x270 0x000 0x10 0x000
-#define MX25_PAD_ECB__UART5_TXD_MUX		0x060 0x270 0x000 0x13 0x000
+#define MX25_PAD_ECB__UART5_TXD			0x060 0x270 0x000 0x13 0x000
 #define MX25_PAD_ECB__GPIO_3_23			0x060 0x270 0x000 0x15 0x000
 
 #define MX25_PAD_LBA__LBA			0x064 0x274 0x000 0x10 0x000
-#define MX25_PAD_LBA__UART5_RXD_MUX		0x064 0x274 0x578 0x13 0x000
+#define MX25_PAD_LBA__UART5_RXD			0x064 0x274 0x578 0x13 0x000
 #define MX25_PAD_LBA__GPIO_3_24			0x064 0x274 0x000 0x15 0x000
 
 #define MX25_PAD_BCLK__BCLK			0x068 0x000 0x000 0x00 0x000
@@ -237,17 +237,21 @@
 #define MX25_PAD_LD7__GPIO_1_21			0x0e4 0x2dc 0x000 0x15 0x000
 
 #define MX25_PAD_LD8__LD8			0x0e8 0x2e0 0x000 0x10 0x000
+#define MX25_PAD_LD8__UART4_RXD			0x0e8 0x2e0 0x570 0x12 0x000
 #define MX25_PAD_LD8__FEC_TX_ERR		0x0e8 0x2e0 0x000 0x15 0x000
 #define MX25_PAD_LD8__SDHC2_CMD			0x0e8 0x2e0 0x4e0 0x06 0x000
 
 #define MX25_PAD_LD9__LD9			0x0ec 0x2e4 0x000 0x10 0x000
+#define MX25_PAD_LD9__UART4_TXD			0x0ec 0x2e4 0x000 0x12 0x000
 #define MX25_PAD_LD9__FEC_COL			0x0ec 0x2e4 0x504 0x15 0x001
 #define MX25_PAD_LD9__SDHC2_CLK			0x0ec 0x2e4 0x4dc 0x06 0x000
 
-#define MX25_PAD_LD10__LD10			0x0f0 0x2e8 0x000 0x10 0x000
-#define MX25_PAD_LD10__FEC_RX_ERR		0x0f0 0x2e8 0x518 0x15 0x001
+#define MX25_PAD_LD10__LD10			0x0f0 0x2e8 0x000 0x00 0x000
+#define MX25_PAD_LD10__UART4_RTS		0x0f0 0x2e8 0x56c 0x02 0x000
+#define MX25_PAD_LD10__FEC_RX_ERR		0x0f0 0x2e8 0x518 0x05 0x001
 
 #define MX25_PAD_LD11__LD11			0x0f4 0x2ec 0x000 0x10 0x000
+#define MX25_PAD_LD11__UART4_CTS		0x0f4 0x2ec 0x000 0x12 0x000
 #define MX25_PAD_LD11__FEC_RDATA2		0x0f4 0x2ec 0x50c 0x15 0x001
 #define MX25_PAD_LD11__SDHC2_DAT1		0x0f4 0x2ec 0x4e8 0x06 0x000
 
@@ -291,22 +295,22 @@
 #define MX25_PAD_PWM__USBH2_OC			0x11c 0x314 0x580 0x16 0x001
 
 #define MX25_PAD_CSI_D2__CSI_D2			0x120 0x318 0x000 0x10 0x000
-#define MX25_PAD_CSI_D2__UART5_RXD_MUX		0x120 0x318 0x578 0x11 0x001
+#define MX25_PAD_CSI_D2__UART5_RXD		0x120 0x318 0x578 0x11 0x001
 #define MX25_PAD_CSI_D2__SIM1_CLK0		0x120 0x318 0x000 0x04 0x000
 #define MX25_PAD_CSI_D2__GPIO_1_27		0x120 0x318 0x000 0x15 0x000
 #define MX25_PAD_CSI_D2__CSPI3_MOSI		0x120 0x318 0x000 0x17 0x000
 
 #define MX25_PAD_CSI_D3__CSI_D3			0x124 0x31c 0x000 0x10 0x000
-#define MX25_PAD_CSI_D3__UART5_TXD_MUX		0x124 0x31c 0x000 0x11 0x000
+#define MX25_PAD_CSI_D3__UART5_TXD		0x124 0x31c 0x000 0x11 0x000
 #define MX25_PAD_CSI_D3__SIM1_RST0		0x124 0x31c 0x000 0x04 0x000
 #define MX25_PAD_CSI_D3__GPIO_1_28		0x124 0x31c 0x000 0x15 0x000
 #define MX25_PAD_CSI_D3__CSPI3_MISO		0x124 0x31c 0x4b4 0x17 0x001
 
-#define MX25_PAD_CSI_D4__CSI_D4			0x128 0x320 0x000 0x10 0x000
-#define MX25_PAD_CSI_D4__UART5_RTS		0x128 0x320 0x574 0x11 0x001
+#define MX25_PAD_CSI_D4__CSI_D4			0x128 0x320 0x000 0x00 0x000
+#define MX25_PAD_CSI_D4__UART5_RTS		0x128 0x320 0x574 0x01 0x001
 #define MX25_PAD_CSI_D4__SIM1_VEN0		0x128 0x320 0x000 0x04 0x000
-#define MX25_PAD_CSI_D4__GPIO_1_29		0x128 0x320 0x000 0x15 0x000
-#define MX25_PAD_CSI_D4__CSPI3_SCLK		0x128 0x320 0x000 0x17 0x000
+#define MX25_PAD_CSI_D4__GPIO_1_29		0x128 0x320 0x000 0x05 0x000
+#define MX25_PAD_CSI_D4__CSPI3_SCLK		0x128 0x320 0x000 0x07 0x000
 
 #define MX25_PAD_CSI_D5__CSI_D5			0x12c 0x324 0x000 0x10 0x000
 #define MX25_PAD_CSI_D5__UART5_CTS		0x12c 0x324 0x000 0x11 0x000
@@ -360,7 +364,7 @@
 #define MX25_PAD_I2C1_DAT__GPIO_1_13		0x154 0x34c 0x000 0x15 0x000
 
 #define MX25_PAD_CSPI1_MOSI__CSPI1_MOSI		0x158 0x350 0x000 0x10 0x000
-#define MX25_PAD_CSPI1_MOSI__UART3_RXD		0x158 0x350 0x000 0x12 0x000
+#define MX25_PAD_CSPI1_MOSI__UART3_RXD		0x158 0x350 0x568 0x12 0x000
 #define MX25_PAD_CSPI1_MOSI__GPIO_1_14		0x158 0x350 0x000 0x15 0x000
 
 #define MX25_PAD_CSPI1_MISO__CSPI1_MISO		0x15c 0x354 0x000 0x10 0x000
@@ -371,10 +375,10 @@
 #define MX25_PAD_CSPI1_SS0__PWM2_PWMO		0x160 0x358 0x000 0x12 0x000
 #define MX25_PAD_CSPI1_SS0__GPIO_1_16		0x160 0x358 0x000 0x15 0x000
 
-#define MX25_PAD_CSPI1_SS1__CSPI1_SS1		0x164 0x35c 0x000 0x10 0x000
-#define MX25_PAD_CSPI1_SS1__I2C3_DAT		0x164 0x35C 0x528 0x11 0x001
-#define MX25_PAD_CSPI1_SS1__UART3_RTS		0x164 0x35c 0x000 0x12 0x000
-#define MX25_PAD_CSPI1_SS1__GPIO_1_17		0x164 0x35c 0x000 0x15 0x000
+#define MX25_PAD_CSPI1_SS1__CSPI1_SS1		0x164 0x35c 0x000 0x00 0x000
+#define MX25_PAD_CSPI1_SS1__I2C3_DAT		0x164 0x35C 0x528 0x01 0x001
+#define MX25_PAD_CSPI1_SS1__UART3_RTS		0x164 0x35c 0x000 0x02 0x000
+#define MX25_PAD_CSPI1_SS1__GPIO_1_17		0x164 0x35c 0x000 0x05 0x000
 
 #define MX25_PAD_CSPI1_SCLK__CSPI1_SCLK		0x168 0x360 0x000 0x10 0x000
 #define MX25_PAD_CSPI1_SCLK__UART3_CTS		0x168 0x360 0x000 0x12 0x000
@@ -383,20 +387,24 @@
 #define MX25_PAD_CSPI1_RDY__CSPI1_RDY		0x16c 0x364 0x000 0x10 0x000
 #define MX25_PAD_CSPI1_RDY__GPIO_2_22		0x16c 0x364 0x000 0x15 0x000
 
-#define MX25_PAD_UART1_RXD__UART1_RXD		0x170 0x368 0x000 0x10 0x000
-#define MX25_PAD_UART1_RXD__GPIO_4_22		0x170 0x368 0x000 0x15 0x000
+#define MX25_PAD_UART1_RXD__UART1_RXD		0x170 0x368 0x000 0x00 0x000
+#define MX25_PAD_UART1_RXD__UART2_DTR		0x170 0x368 0x000 0x03 0x000
+#define MX25_PAD_UART1_RXD__GPIO_4_22		0x170 0x368 0x000 0x05 0x000
 
-#define MX25_PAD_UART1_TXD__UART1_TXD		0x174 0x36c 0x000 0x10 0x000
-#define MX25_PAD_UART1_TXD__GPIO_4_23		0x174 0x36c 0x000 0x15 0x000
+#define MX25_PAD_UART1_TXD__UART1_TXD		0x174 0x36c 0x000 0x00 0x000
+#define MX25_PAD_UART1_TXD__UART2_DSR		0x174 0x36c 0x000 0x03 0x000
+#define MX25_PAD_UART1_TXD__GPIO_4_23		0x174 0x36c 0x000 0x05 0x000
 
-#define MX25_PAD_UART1_RTS__UART1_RTS		0x178 0x370 0x000 0x10 0x000
-#define MX25_PAD_UART1_RTS__CSI_D0		0x178 0x370 0x488 0x11 0x001
-#define MX25_PAD_UART1_RTS__CC3			0x178 0x370 0x000 0x12 0x000
-#define MX25_PAD_UART1_RTS__GPIO_4_24		0x178 0x370 0x000 0x15 0x000
+#define MX25_PAD_UART1_RTS__UART1_RTS		0x178 0x370 0x000 0x00 0x000
+#define MX25_PAD_UART1_RTS__CSI_D0		0x178 0x370 0x488 0x01 0x001
+#define MX25_PAD_UART1_RTS__CC3			0x178 0x370 0x000 0x02 0x000
+#define MX25_PAD_UART1_RTS__UART2_DCD		0x178 0x370 0x000 0x03 0x000
+#define MX25_PAD_UART1_RTS__GPIO_4_24		0x178 0x370 0x000 0x05 0x000
 
-#define MX25_PAD_UART1_CTS__UART1_CTS		0x17c 0x374 0x000 0x10 0x000
-#define MX25_PAD_UART1_CTS__CSI_D1		0x17c 0x374 0x48c 0x11 0x001
-#define MX25_PAD_UART1_CTS__GPIO_4_25		0x17c 0x374 0x000 0x15 0x000
+#define MX25_PAD_UART1_CTS__UART1_CTS		0x17c 0x374 0x000 0x00 0x000
+#define MX25_PAD_UART1_CTS__CSI_D1		0x17c 0x374 0x48c 0x01 0x001
+#define MX25_PAD_UART1_CTS__UART2_RI		0x17c 0x374 0x000 0x03 0x001
+#define MX25_PAD_UART1_CTS__GPIO_4_25		0x17c 0x374 0x000 0x05 0x000
 
 #define MX25_PAD_UART2_RXD__UART2_RXD		0x180 0x378 0x000 0x10 0x000
 #define MX25_PAD_UART2_RXD__GPIO_4_26		0x180 0x378 0x000 0x15 0x000
@@ -404,10 +412,10 @@
 #define MX25_PAD_UART2_TXD__UART2_TXD		0x184 0x37c 0x000 0x10 0x000
 #define MX25_PAD_UART2_TXD__GPIO_4_27		0x184 0x37c 0x000 0x15 0x000
 
-#define MX25_PAD_UART2_RTS__UART2_RTS		0x188 0x380 0x000 0x10 0x000
-#define MX25_PAD_UART2_RTS__FEC_COL		0x188 0x380 0x504 0x12 0x002
-#define MX25_PAD_UART2_RTS__CC1			0x188 0x380 0x000 0x13 0x000
-#define MX25_PAD_UART2_RTS__GPIO_4_28		0x188 0x380 0x000 0x15 0x000
+#define MX25_PAD_UART2_RTS__UART2_RTS		0x188 0x380 0x000 0x00 0x000
+#define MX25_PAD_UART2_RTS__FEC_COL		0x188 0x380 0x504 0x02 0x002
+#define MX25_PAD_UART2_RTS__CC1			0x188 0x380 0x000 0x03 0x000
+#define MX25_PAD_UART2_RTS__GPIO_4_28		0x188 0x380 0x000 0x05 0x000
 
 #define MX25_PAD_UART2_CTS__UART2_CTS		0x18c 0x384 0x000 0x10 0x000
 #define MX25_PAD_UART2_CTS__FEC_RX_ERR		0x18c 0x384 0x518 0x12 0x002
@@ -439,36 +447,42 @@
 #define MX25_PAD_SD1_DATA3__FEC_CRS		0x1a4 0x39c 0x508 0x12 0x002
 #define MX25_PAD_SD1_DATA3__GPIO_2_28		0x1a4 0x39c 0x000 0x15 0x000
 
-#define MX25_PAD_KPP_ROW0__KPP_ROW0		0x1a8 0x3a0 0x000 0x10 0x000
-#define MX25_PAD_KPP_ROW0__UART1_DTR		0x1a8 0x3a0 0x000 0x14 0x000
-#define MX25_PAD_KPP_ROW0__GPIO_2_29		0x1a8 0x3a0 0x000 0x15 0x000
+#define MX25_PAD_KPP_ROW0__KPP_ROW0		0x1a8 0x3a0 0x000 0x00 0x000
+#define MX25_PAD_KPP_ROW0__UART3_RXD		0x1a8 0x3a0 0x568 0x01 0x001
+#define MX25_PAD_KPP_ROW0__UART1_DTR		0x1a8 0x3a0 0x000 0x04 0x000
+#define MX25_PAD_KPP_ROW0__GPIO_2_29		0x1a8 0x3a0 0x000 0x05 0x000
 
-#define MX25_PAD_KPP_ROW1__KPP_ROW1		0x1ac 0x3a4 0x000 0x10 0x000
-#define MX25_PAD_KPP_ROW1__GPIO_2_30		0x1ac 0x3a4 0x000 0x15 0x000
+#define MX25_PAD_KPP_ROW1__KPP_ROW1		0x1ac 0x3a4 0x000 0x00 0x000
+#define MX25_PAD_KPP_ROW1__UART3_TXD		0x1ac 0x3a4 0x000 0x01 0x000
+#define MX25_PAD_KPP_ROW1__UART1_DSR		0x1ac 0x3a4 0x000 0x04 0x000
+#define MX25_PAD_KPP_ROW1__GPIO_2_30		0x1ac 0x3a4 0x000 0x05 0x000
 
-#define MX25_PAD_KPP_ROW2__KPP_ROW2		0x1b0 0x3a8 0x000 0x10 0x000
-#define MX25_PAD_KPP_ROW2__CSI_D0		0x1b0 0x3a8 0x488 0x13 0x002
-#define MX25_PAD_KPP_ROW2__UART1_DCD		0x1b0 0x3a8 0x000 0x14 0x000
-#define MX25_PAD_KPP_ROW2__GPIO_2_31		0x1b0 0x3a8 0x000 0x15 0x000
+#define MX25_PAD_KPP_ROW2__KPP_ROW2		0x1b0 0x3a8 0x000 0x00 0x000
+#define MX25_PAD_KPP_ROW2__UART3_RTS		0x1b0 0x3a8 0x000 0x01 0x000
+#define MX25_PAD_KPP_ROW2__CSI_D0		0x1b0 0x3a8 0x488 0x03 0x002
+#define MX25_PAD_KPP_ROW2__UART1_DCD		0x1b0 0x3a8 0x000 0x04 0x000
+#define MX25_PAD_KPP_ROW2__GPIO_2_31		0x1b0 0x3a8 0x000 0x05 0x000
 
-#define MX25_PAD_KPP_ROW3__KPP_ROW3		0x1b4 0x3ac 0x000 0x10 0x000
-#define MX25_PAD_KPP_ROW3__CSI_D1		0x1b4 0x3ac 0x48c 0x13 0x002
-#define MX25_PAD_KPP_ROW3__GPIO_3_0		0x1b4 0x3ac 0x000 0x15 0x000
+#define MX25_PAD_KPP_ROW3__KPP_ROW3		0x1b4 0x3ac 0x000 0x00 0x000
+#define MX25_PAD_KPP_ROW3__UART3_CTS		0x1b4 0x3ac 0x000 0x01 0x000
+#define MX25_PAD_KPP_ROW3__CSI_D1		0x1b4 0x3ac 0x48c 0x03 0x002
+#define MX25_PAD_KPP_ROW3__UART1_RI		0x1b4 0x3ac 0x000 0x04 0x000
+#define MX25_PAD_KPP_ROW3__GPIO_3_0		0x1b4 0x3ac 0x000 0x05 0x000
 
 #define MX25_PAD_KPP_COL0__KPP_COL0		0x1b8 0x3b0 0x000 0x10 0x000
-#define MX25_PAD_KPP_COL0__UART4_RXD_MUX	0x1b8 0x3b0 0x570 0x11 0x001
+#define MX25_PAD_KPP_COL0__UART4_RXD		0x1b8 0x3b0 0x570 0x11 0x001
 #define MX25_PAD_KPP_COL0__AUD5_TXD		0x1b8 0x3b0 0x000 0x12 0x000
 #define MX25_PAD_KPP_COL0__GPIO_3_1		0x1b8 0x3b0 0x000 0x15 0x000
 
 #define MX25_PAD_KPP_COL1__KPP_COL1		0x1bc 0x3b4 0x000 0x10 0x000
-#define MX25_PAD_KPP_COL1__UART4_TXD_MUX	0x1bc 0x3b4 0x000 0x11 0x000
+#define MX25_PAD_KPP_COL1__UART4_TXD		0x1bc 0x3b4 0x000 0x11 0x000
 #define MX25_PAD_KPP_COL1__AUD5_RXD		0x1bc 0x3b4 0x000 0x12 0x000
 #define MX25_PAD_KPP_COL1__GPIO_3_2		0x1bc 0x3b4 0x000 0x15 0x000
 
-#define MX25_PAD_KPP_COL2__KPP_COL2		0x1c0 0x3b8 0x000 0x10 0x000
-#define MX25_PAD_KPP_COL2__UART4_RTS		0x1c0 0x3b8 0x000 0x11 0x000
-#define MX25_PAD_KPP_COL2__AUD5_TXC		0x1c0 0x3b8 0x000 0x12 0x000
-#define MX25_PAD_KPP_COL2__GPIO_3_3		0x1c0 0x3b8 0x000 0x15 0x000
+#define MX25_PAD_KPP_COL2__KPP_COL2		0x1c0 0x3b8 0x000 0x00 0x000
+#define MX25_PAD_KPP_COL2__UART4_RTS		0x1c0 0x3b8 0x56c 0x01 0x001
+#define MX25_PAD_KPP_COL2__AUD5_TXC		0x1c0 0x3b8 0x000 0x02 0x000
+#define MX25_PAD_KPP_COL2__GPIO_3_3		0x1c0 0x3b8 0x000 0x05 0x000
 
 #define MX25_PAD_KPP_COL3__KPP_COL3		0x1c4 0x3bc 0x000 0x10 0x000
 #define MX25_PAD_KPP_COL3__UART4_CTS		0x1c4 0x3bc 0x000 0x11 0x000
@@ -557,9 +571,10 @@
 #define MX25_PAD_UPLL_BYPCLK__UPLL_BYPCLK	0x210 0x000 0x000 0x10 0x000
 #define MX25_PAD_UPLL_BYPCLK__GPIO_3_16		0x210 0x000 0x000 0x15 0x000
 
-#define MX25_PAD_VSTBY_REQ__VSTBY_REQ		0x214 0x408 0x000 0x10 0x000
-#define MX25_PAD_VSTBY_REQ__AUD7_TXFS		0x214 0x408 0x000 0x14 0x000
-#define MX25_PAD_VSTBY_REQ__GPIO_3_17		0x214 0x408 0x000 0x15 0x000
+#define MX25_PAD_VSTBY_REQ__VSTBY_REQ		0x214 0x408 0x000 0x00 0x000
+#define MX25_PAD_VSTBY_REQ__AUD7_TXFS		0x214 0x408 0x000 0x04 0x000
+#define MX25_PAD_VSTBY_REQ__GPIO_3_17		0x214 0x408 0x000 0x05 0x000
+#define MX25_PAD_VSTBY_REQ__UART4_RTS		0x214 0x408 0x56c 0x06 0x002
 
 #define MX25_PAD_VSTBY_ACK__VSTBY_ACK		0x218 0x40c 0x000 0x10 0x000
 #define MX25_PAD_VSTBY_ACK__GPIO_3_18		0x218 0x40c 0x000 0x15 0x000
@@ -567,6 +582,7 @@
 #define MX25_PAD_POWER_FAIL__POWER_FAIL		0x21c 0x410 0x000 0x10 0x000
 #define MX25_PAD_POWER_FAIL__AUD7_RXD		0x21c 0x410 0x478 0x14 0x001
 #define MX25_PAD_POWER_FAIL__GPIO_3_19		0x21c 0x410 0x000 0x15 0x000
+#define MX25_PAD_POWER_FAIL__UART4_CTS		0x21c 0x410 0x000 0x16 0x000
 
 #define MX25_PAD_CLKO__CLKO			0x220 0x414 0x000 0x10 0x000
 #define MX25_PAD_CLKO__GPIO_2_21		0x220 0x414 0x000 0x15 0x000
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
index 6b1f4bb..af6af87 100644
--- a/arch/arm/boot/dts/imx25.dtsi
+++ b/arch/arm/boot/dts/imx25.dtsi
@@ -420,6 +420,15 @@
 				interrupts = <41>;
 			};
 
+			scc: crypto@53fac000 {
+				compatible = "fsl,imx25-scc";
+				reg = <0x53fac000 0x4000>;
+				clocks = <&clks 111>;
+				clock-names = "ipg";
+				interrupts = <49>, <50>;
+				interrupt-names = "scm", "smn";
+			};
+
 			esdhc1: esdhc@53fb4000 {
 				compatible = "fsl,imx25-esdhc";
 				reg = <0x53fb4000 0x4000>;
diff --git a/arch/arm/boot/dts/imx28-m28.dtsi b/arch/arm/boot/dts/imx28-m28.dtsi
index 759cc56..6cebaa6 100644
--- a/arch/arm/boot/dts/imx28-m28.dtsi
+++ b/arch/arm/boot/dts/imx28-m28.dtsi
@@ -27,32 +27,6 @@
 				pinctrl-names = "default";
 				pinctrl-0 = <&gpmi_pins_a &gpmi_status_cfg>;
 				status = "okay";
-
-				partition@0 {
-					label = "bootloader";
-					reg = <0x00000000 0x00300000>;
-					read-only;
-				};
-
-				partition@1 {
-					label = "environment";
-					reg = <0x00300000 0x00080000>;
-				};
-
-				partition@2 {
-					label = "redundant-environment";
-					reg = <0x00380000 0x00080000>;
-				};
-
-				partition@3 {
-					label = "kernel";
-					reg = <0x00400000 0x00400000>;
-				};
-
-				partition@4 {
-					label = "filesystem";
-					reg = <0x00800000 0x0f800000>;
-				};
 			};
 		};
 
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index f637ec9..74aa151 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -434,6 +434,32 @@
 					fsl,pull-up = <MXS_PULL_ENABLE>;
 				};
 
+				mac0_pins_b: mac0@1 {
+					reg = <1>;
+					fsl,pinmux-ids = <
+						MX28_PAD_ENET0_MDC__ENET0_MDC
+						MX28_PAD_ENET0_MDIO__ENET0_MDIO
+						MX28_PAD_ENET0_RX_EN__ENET0_RX_EN
+						MX28_PAD_ENET0_RXD0__ENET0_RXD0
+						MX28_PAD_ENET0_RXD1__ENET0_RXD1
+						MX28_PAD_ENET0_RXD2__ENET0_RXD2
+						MX28_PAD_ENET0_RXD3__ENET0_RXD3
+						MX28_PAD_ENET0_TX_EN__ENET0_TX_EN
+						MX28_PAD_ENET0_TXD0__ENET0_TXD0
+						MX28_PAD_ENET0_TXD1__ENET0_TXD1
+						MX28_PAD_ENET0_TXD2__ENET0_TXD2
+						MX28_PAD_ENET0_TXD3__ENET0_TXD3
+						MX28_PAD_ENET_CLK__CLKCTRL_ENET
+						MX28_PAD_ENET0_COL__ENET0_COL
+						MX28_PAD_ENET0_CRS__ENET0_CRS
+						MX28_PAD_ENET0_TX_CLK__ENET0_TX_CLK
+						MX28_PAD_ENET0_RX_CLK__ENET0_RX_CLK
+						>;
+					fsl,drive-strength = <MXS_DRIVE_8mA>;
+					fsl,voltage = <MXS_VOLTAGE_HIGH>;
+					fsl,pull-up = <MXS_PULL_ENABLE>;
+				};
+
 				mac1_pins_a: mac1@0 {
 					reg = <0>;
 					fsl,pinmux-ids = <
diff --git a/arch/arm/boot/dts/imx31.dtsi b/arch/arm/boot/dts/imx31.dtsi
index 5fdb222..1ce7ae9 100644
--- a/arch/arm/boot/dts/imx31.dtsi
+++ b/arch/arm/boot/dts/imx31.dtsi
@@ -69,6 +69,14 @@
 				status = "disabled";
 			};
 
+			kpp: kpp@43fa8000 {
+				compatible = "fsl,imx31-kpp", "fsl,imx21-kpp";
+				reg = <0x43fa8000 0x4000>;
+				interrupts = <24>;
+				clocks = <&clks 46>;
+				status = "disabled";
+			};
+
 			uart4: serial@43fb0000 {
 				compatible = "fsl,imx31-uart", "fsl,imx21-uart";
 				reg = <0x43fb0000 0x4000>;
diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
index 14e1320..490b7b4 100644
--- a/arch/arm/boot/dts/imx35.dtsi
+++ b/arch/arm/boot/dts/imx35.dtsi
@@ -137,6 +137,14 @@
 				status = "disabled";
 			};
 
+			kpp: kpp@43fa8000 {
+				compatible = "fsl,imx35-kpp", "fsl,imx21-kpp";
+				reg = <0x43fa8000 0x4000>;
+				interrupts = <24>;
+				clocks = <&clks 56>;
+				status = "disabled";
+			};
+
 			iomuxc: iomuxc@43fac000 {
 				compatible = "fsl,imx35-iomuxc";
 				reg = <0x43fac000 0x4000>;
diff --git a/arch/arm/boot/dts/imx53-m53evk.dts b/arch/arm/boot/dts/imx53-m53evk.dts
index 53f4088..dcee1e0f 100644
--- a/arch/arm/boot/dts/imx53-m53evk.dts
+++ b/arch/arm/boot/dts/imx53-m53evk.dts
@@ -84,6 +84,15 @@
 			regulator-max-microvolt = <5000000>;
 			gpio = <&gpio1 2 0>;
 		};
+
+		reg_usb_otg_vbus: regulator@4 {
+			compatible = "regulator-fixed";
+			reg = <4>;
+			regulator-name = "usb_otg_vbus";
+			regulator-min-microvolt = <5000000>;
+			regulator-max-microvolt = <5000000>;
+			gpio = <&gpio1 4 0>;
+		};
 	};
 
 	sound {
@@ -168,6 +177,12 @@
 			>;
 		};
 
+		pinctrl_usbotg: usbotggrp {
+			fsl,pins = <
+				MX53_PAD_GPIO_4__GPIO1_4		0x000b0
+			>;
+		};
+
 		led_pin_gpio: led_gpio@0 {
 			fsl,pins = <
 				MX53_PAD_PATA_DATA8__GPIO2_8		0x80000000
@@ -351,6 +366,10 @@
 };
 
 &usbotg {
-	dr_mode = "peripheral";
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usbotg>;
+	dr_mode = "otg";
+	vbus-supply = <&reg_usb_otg_vbus>;
+	disable-over-current;
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/imx6dl-riotboard.dts b/arch/arm/boot/dts/imx6dl-riotboard.dts
index 5111f51..bfbed52 100644
--- a/arch/arm/boot/dts/imx6dl-riotboard.dts
+++ b/arch/arm/boot/dts/imx6dl-riotboard.dts
@@ -114,7 +114,7 @@
 	codec: sgtl5000@0a {
 		compatible = "fsl,sgtl5000";
 		reg = <0x0a>;
-		clocks = <&clks 201>;
+		clocks = <&clks IMX6QDL_CLK_CKO>;
 		VDDA-supply = <&reg_2p5v>;
 		VDDIO-supply = <&reg_3p3v>;
 	};
diff --git a/arch/arm/boot/dts/imx6dl-tx6dl-comtft.dts b/arch/arm/boot/dts/imx6dl-tx6dl-comtft.dts
index 913bb9a..063fe75 100644
--- a/arch/arm/boot/dts/imx6dl-tx6dl-comtft.dts
+++ b/arch/arm/boot/dts/imx6dl-tx6dl-comtft.dts
@@ -1,12 +1,42 @@
 /*
- * Copyright 2014 Lothar Waßmann <LW@KARO-electronics.de>
+ * Copyright 2014-2016 Lothar Waßmann <LW@KARO-electronics.de>
  *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 at the following locations:
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
  *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
 /dts-v1/;
diff --git a/arch/arm/boot/dts/imx6dl-tx6s-8034.dts b/arch/arm/boot/dts/imx6dl-tx6s-8034.dts
new file mode 100644
index 0000000..ff8f7b1
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-tx6s-8034.dts
@@ -0,0 +1,237 @@
+/*
+ * Copyright 2015-2016 Lothar Waßmann <LW@KARO-electronics.de>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "imx6dl.dtsi"
+#include "imx6qdl-tx6.dtsi"
+
+/ {
+	model = "Ka-Ro electronics TX6S-8034 Module";
+	compatible = "karo,imx6dl-tx6dl", "fsl,imx6dl";
+
+	aliases {
+		display = &display;
+		ipu1 = &ipu1;
+	};
+
+	cpus {
+		/delete-node/ cpu@1;
+	};
+
+	backlight: backlight {
+		compatible = "pwm-backlight";
+		pwms = <&pwm2 0 500000 PWM_POLARITY_INVERTED>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_lcd0_pwr>;
+		enable-gpios = <&gpio3 29 GPIO_ACTIVE_HIGH>;
+		power-supply = <&reg_lcd1_pwr>;
+		/*
+		 * a poor man's way to create a 1:1 relationship between
+		 * the PWM value and the actual duty cycle
+		 */
+		brightness-levels = < 0  1  2  3  4  5  6  7  8  9
+				     10 11 12 13 14 15 16 17 18 19
+				     20 21 22 23 24 25 26 27 28 29
+				     30 31 32 33 34 35 36 37 38 39
+				     40 41 42 43 44 45 46 47 48 49
+				     50 51 52 53 54 55 56 57 58 59
+				     60 61 62 63 64 65 66 67 68 69
+				     70 71 72 73 74 75 76 77 78 79
+				     80 81 82 83 84 85 86 87 88 89
+				     90 91 92 93 94 95 96 97 98 99
+				    100>;
+		default-brightness-level = <50>;
+	};
+
+	display: display@di0 {
+		compatible = "fsl,imx-parallel-display";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_disp0_2>;
+		interface-pix-fmt = "rgb24";
+		status = "okay";
+
+		port {
+			display0_in: endpoint {
+				remote-endpoint = <&ipu1_di0_disp0>;
+			};
+		};
+
+		display-timings {
+			native-mode = <&vga>;
+
+			vga: VGA {
+				clock-frequency = <25200000>;
+				hactive = <640>;
+				vactive = <480>;
+				hback-porch = <48>;
+				hsync-len = <96>;
+				hfront-porch = <16>;
+				vback-porch = <31>;
+				vsync-len = <2>;
+				vfront-porch = <12>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+
+			ETV570 {
+				clock-frequency = <25200000>;
+				hactive = <640>;
+				vactive = <480>;
+				hback-porch = <114>;
+				hsync-len = <30>;
+				hfront-porch = <16>;
+				vback-porch = <32>;
+				vsync-len = <3>;
+				vfront-porch = <10>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+
+			ET0350 {
+				clock-frequency = <6413760>;
+				hactive = <320>;
+				vactive = <240>;
+				hback-porch = <34>;
+				hsync-len = <34>;
+				hfront-porch = <20>;
+				vback-porch = <15>;
+				vsync-len = <3>;
+				vfront-porch = <4>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+
+			ET0430 {
+				clock-frequency = <9009000>;
+				hactive = <480>;
+				vactive = <272>;
+				hback-porch = <2>;
+				hsync-len = <41>;
+				hfront-porch = <2>;
+				vback-porch = <2>;
+				vsync-len = <10>;
+				vfront-porch = <2>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <1>;
+			};
+
+			ET0500 {
+				clock-frequency = <33264000>;
+				hactive = <800>;
+				vactive = <480>;
+				hback-porch = <88>;
+				hsync-len = <128>;
+				hfront-porch = <40>;
+				vback-porch = <33>;
+				vsync-len = <2>;
+				vfront-porch = <10>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+
+			ET0700 { /* same as ET0500 */
+				clock-frequency = <33264000>;
+				hactive = <800>;
+				vactive = <480>;
+				hback-porch = <88>;
+				hsync-len = <128>;
+				hfront-porch = <40>;
+				vback-porch = <33>;
+				vsync-len = <2>;
+				vfront-porch = <10>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+
+			ETQ570 {
+				clock-frequency = <6596040>;
+				hactive = <320>;
+				vactive = <240>;
+				hback-porch = <38>;
+				hsync-len = <30>;
+				hfront-porch = <30>;
+				vback-porch = <16>;
+				vsync-len = <3>;
+				vfront-porch = <4>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+		};
+	};
+};
+
+&ds1339 {
+	status = "disabled";
+};
+
+&pinctrl_usdhc1 {
+	fsl,pins = <
+		MX6QDL_PAD_SD1_CMD__SD1_CMD		0x070b1
+		MX6QDL_PAD_SD1_CLK__SD1_CLK		0x070b1
+		MX6QDL_PAD_SD1_DAT0__SD1_DATA0		0x070b1
+		MX6QDL_PAD_SD1_DAT1__SD1_DATA1		0x070b1
+		MX6QDL_PAD_SD1_DAT2__SD1_DATA2		0x070b1
+		MX6QDL_PAD_SD1_DAT3__SD1_DATA3		0x070b1
+		MX6QDL_PAD_SD3_CMD__GPIO7_IO02		0x170b0 /* SD1 CD */
+	>;
+};
+
+&ipu1_di0_disp0 {
+	remote-endpoint = <&display0_in>;
+};
+
+&reg_lcd0_pwr {
+	status = "disabled";
+};
diff --git a/arch/arm/boot/dts/imx6dl-tx6s-8035.dts b/arch/arm/boot/dts/imx6dl-tx6s-8035.dts
new file mode 100644
index 0000000..f988950
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-tx6s-8035.dts
@@ -0,0 +1,253 @@
+/*
+ * Copyright 2015-2016 Lothar Waßmann <LW@KARO-electronics.de>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "imx6dl.dtsi"
+#include "imx6qdl-tx6.dtsi"
+
+/ {
+	model = "Ka-Ro electronics TX6S-8035 Module";
+	compatible = "karo,imx6dl-tx6dl", "fsl,imx6dl";
+
+	aliases {
+		display = &display;
+		ipu1 = &ipu1;
+	};
+
+	cpus {
+		/delete-node/ cpu@1;
+	};
+
+	backlight: backlight {
+		compatible = "pwm-backlight";
+		pwms = <&pwm2 0 500000 PWM_POLARITY_INVERTED>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_lcd0_pwr>;
+		enable-gpios = <&gpio3 29 GPIO_ACTIVE_HIGH>;
+		power-supply = <&reg_lcd1_pwr>;
+		/*
+		 * a poor man's way to create a 1:1 relationship between
+		 * the PWM value and the actual duty cycle
+		 */
+		brightness-levels = < 0  1  2  3  4  5  6  7  8  9
+				     10 11 12 13 14 15 16 17 18 19
+				     20 21 22 23 24 25 26 27 28 29
+				     30 31 32 33 34 35 36 37 38 39
+				     40 41 42 43 44 45 46 47 48 49
+				     50 51 52 53 54 55 56 57 58 59
+				     60 61 62 63 64 65 66 67 68 69
+				     70 71 72 73 74 75 76 77 78 79
+				     80 81 82 83 84 85 86 87 88 89
+				     90 91 92 93 94 95 96 97 98 99
+				    100>;
+		default-brightness-level = <50>;
+	};
+
+	display: display@di0 {
+		compatible = "fsl,imx-parallel-display";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_disp0_2>;
+		interface-pix-fmt = "rgb24";
+		status = "okay";
+
+		port {
+			display0_in: endpoint {
+				remote-endpoint = <&ipu1_di0_disp0>;
+			};
+		};
+
+		display-timings {
+			native-mode = <&vga>;
+
+			vga: VGA {
+				clock-frequency = <25200000>;
+				hactive = <640>;
+				vactive = <480>;
+				hback-porch = <48>;
+				hsync-len = <96>;
+				hfront-porch = <16>;
+				vback-porch = <31>;
+				vsync-len = <2>;
+				vfront-porch = <12>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+
+			ETV570 {
+				clock-frequency = <25200000>;
+				hactive = <640>;
+				vactive = <480>;
+				hback-porch = <114>;
+				hsync-len = <30>;
+				hfront-porch = <16>;
+				vback-porch = <32>;
+				vsync-len = <3>;
+				vfront-porch = <10>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+
+			ET0350 {
+				clock-frequency = <6413760>;
+				hactive = <320>;
+				vactive = <240>;
+				hback-porch = <34>;
+				hsync-len = <34>;
+				hfront-porch = <20>;
+				vback-porch = <15>;
+				vsync-len = <3>;
+				vfront-porch = <4>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+
+			ET0430 {
+				clock-frequency = <9009000>;
+				hactive = <480>;
+				vactive = <272>;
+				hback-porch = <2>;
+				hsync-len = <41>;
+				hfront-porch = <2>;
+				vback-porch = <2>;
+				vsync-len = <10>;
+				vfront-porch = <2>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <1>;
+			};
+
+			ET0500 {
+				clock-frequency = <33264000>;
+				hactive = <800>;
+				vactive = <480>;
+				hback-porch = <88>;
+				hsync-len = <128>;
+				hfront-porch = <40>;
+				vback-porch = <33>;
+				vsync-len = <2>;
+				vfront-porch = <10>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+
+			ET0700 { /* same as ET0500 */
+				clock-frequency = <33264000>;
+				hactive = <800>;
+				vactive = <480>;
+				hback-porch = <88>;
+				hsync-len = <128>;
+				hfront-porch = <40>;
+				vback-porch = <33>;
+				vsync-len = <2>;
+				vfront-porch = <10>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+
+			ETQ570 {
+				clock-frequency = <6596040>;
+				hactive = <320>;
+				vactive = <240>;
+				hback-porch = <38>;
+				hsync-len = <30>;
+				hfront-porch = <30>;
+				vback-porch = <16>;
+				vsync-len = <3>;
+				vfront-porch = <4>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+		};
+	};
+};
+
+&ds1339 {
+	status = "disabled";
+};
+
+&gpmi {
+	status = "disabled";
+};
+
+&ipu1_di0_disp0 {
+	remote-endpoint = <&display0_in>;
+};
+
+&reg_lcd0_pwr {
+	status = "disabled";
+};
+
+&usdhc4 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc4>;
+	bus-width = <4>;
+	non-removable;
+	no-1-8-v;
+	fsl,wp-controller;
+	status = "okay";
+};
+
+&iomuxc {
+	pinctrl_usdhc4: usdhc4grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD4_CMD__SD4_CMD		0x070b1
+			MX6QDL_PAD_SD4_CLK__SD4_CLK		0x070b1
+			MX6QDL_PAD_SD4_DAT0__SD4_DATA0		0x070b1
+			MX6QDL_PAD_SD4_DAT1__SD4_DATA1		0x070b1
+			MX6QDL_PAD_SD4_DAT2__SD4_DATA2		0x070b1
+			MX6QDL_PAD_SD4_DAT3__SD4_DATA3		0x070b1
+			MX6QDL_PAD_NANDF_ALE__SD4_RESET		0x0b0b1
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx6dl-tx6u-801x.dts b/arch/arm/boot/dts/imx6dl-tx6u-801x.dts
index 5fe465c..b7a7284 100644
--- a/arch/arm/boot/dts/imx6dl-tx6u-801x.dts
+++ b/arch/arm/boot/dts/imx6dl-tx6u-801x.dts
@@ -1,12 +1,42 @@
 /*
- * Copyright 2014 Lothar Waßmann <LW@KARO-electronics.de>
+ * Copyright 2014-2016 Lothar Waßmann <LW@KARO-electronics.de>
  *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 at the following locations:
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
  *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
 /dts-v1/;
diff --git a/arch/arm/boot/dts/imx6dl-tx6u-8033.dts b/arch/arm/boot/dts/imx6dl-tx6u-8033.dts
new file mode 100644
index 0000000..4d3204a
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-tx6u-8033.dts
@@ -0,0 +1,248 @@
+/*
+ * Copyright 2014-2016 Lothar Waßmann <LW@KARO-electronics.de>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "imx6dl.dtsi"
+#include "imx6qdl-tx6.dtsi"
+
+/ {
+	model = "Ka-Ro electronics TX6U-8033 Module";
+	compatible = "karo,imx6dl-tx6dl", "fsl,imx6dl";
+
+	aliases {
+		display = &display;
+	};
+
+	backlight: backlight {
+		compatible = "pwm-backlight";
+		pwms = <&pwm2 0 500000 PWM_POLARITY_INVERTED>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_lcd0_pwr>;
+		enable-gpios = <&gpio3 29 GPIO_ACTIVE_HIGH>;
+		power-supply = <&reg_lcd1_pwr>;
+		/*
+		 * a poor man's way to create a 1:1 relationship between
+		 * the PWM value and the actual duty cycle
+		 */
+		brightness-levels = < 0  1  2  3  4  5  6  7  8  9
+				     10 11 12 13 14 15 16 17 18 19
+				     20 21 22 23 24 25 26 27 28 29
+				     30 31 32 33 34 35 36 37 38 39
+				     40 41 42 43 44 45 46 47 48 49
+				     50 51 52 53 54 55 56 57 58 59
+				     60 61 62 63 64 65 66 67 68 69
+				     70 71 72 73 74 75 76 77 78 79
+				     80 81 82 83 84 85 86 87 88 89
+				     90 91 92 93 94 95 96 97 98 99
+				    100>;
+		default-brightness-level = <50>;
+	};
+
+	display: display@di0 {
+		compatible = "fsl,imx-parallel-display";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_disp0_2>;
+		interface-pix-fmt = "rgb24";
+		status = "okay";
+
+		port {
+			display0_in: endpoint {
+				remote-endpoint = <&ipu1_di0_disp0>;
+			};
+		};
+
+		display-timings {
+			native-mode = <&vga>;
+
+			vga: VGA {
+				clock-frequency = <25200000>;
+				hactive = <640>;
+				vactive = <480>;
+				hback-porch = <48>;
+				hsync-len = <96>;
+				hfront-porch = <16>;
+				vback-porch = <31>;
+				vsync-len = <2>;
+				vfront-porch = <12>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+
+			ETV570 {
+				clock-frequency = <25200000>;
+				hactive = <640>;
+				vactive = <480>;
+				hback-porch = <114>;
+				hsync-len = <30>;
+				hfront-porch = <16>;
+				vback-porch = <32>;
+				vsync-len = <3>;
+				vfront-porch = <10>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+
+			ET0350 {
+				clock-frequency = <6413760>;
+				hactive = <320>;
+				vactive = <240>;
+				hback-porch = <34>;
+				hsync-len = <34>;
+				hfront-porch = <20>;
+				vback-porch = <15>;
+				vsync-len = <3>;
+				vfront-porch = <4>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+
+			ET0430 {
+				clock-frequency = <9009000>;
+				hactive = <480>;
+				vactive = <272>;
+				hback-porch = <2>;
+				hsync-len = <41>;
+				hfront-porch = <2>;
+				vback-porch = <2>;
+				vsync-len = <10>;
+				vfront-porch = <2>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <1>;
+			};
+
+			ET0500 {
+				clock-frequency = <33264000>;
+				hactive = <800>;
+				vactive = <480>;
+				hback-porch = <88>;
+				hsync-len = <128>;
+				hfront-porch = <40>;
+				vback-porch = <33>;
+				vsync-len = <2>;
+				vfront-porch = <10>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+
+			ET0700 { /* same as ET0500 */
+				clock-frequency = <33264000>;
+				hactive = <800>;
+				vactive = <480>;
+				hback-porch = <88>;
+				hsync-len = <128>;
+				hfront-porch = <40>;
+				vback-porch = <33>;
+				vsync-len = <2>;
+				vfront-porch = <10>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+
+			ETQ570 {
+				clock-frequency = <6596040>;
+				hactive = <320>;
+				vactive = <240>;
+				hback-porch = <38>;
+				hsync-len = <30>;
+				hfront-porch = <30>;
+				vback-porch = <16>;
+				vsync-len = <3>;
+				vfront-porch = <4>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+		};
+	};
+};
+
+&ds1339 {
+	status = "disabled";
+};
+
+&gpmi {
+	status = "disabled";
+};
+
+&ipu1_di0_disp0 {
+	remote-endpoint = <&display0_in>;
+};
+
+&reg_lcd0_pwr {
+	status = "disabled";
+};
+
+&usdhc4 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc4>;
+	bus-width = <4>;
+	non-removable;
+	no-1-8-v;
+	fsl,wp-controller;
+	status = "okay";
+};
+
+&iomuxc {
+	pinctrl_usdhc4: usdhc4grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD4_CMD__SD4_CMD		0x070b1
+			MX6QDL_PAD_SD4_CLK__SD4_CLK		0x070b1
+			MX6QDL_PAD_SD4_DAT0__SD4_DATA0		0x070b1
+			MX6QDL_PAD_SD4_DAT1__SD4_DATA1		0x070b1
+			MX6QDL_PAD_SD4_DAT2__SD4_DATA2		0x070b1
+			MX6QDL_PAD_SD4_DAT3__SD4_DATA3		0x070b1
+			MX6QDL_PAD_NANDF_ALE__SD4_RESET		0x0b0b1
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx6dl-tx6u-811x.dts b/arch/arm/boot/dts/imx6dl-tx6u-811x.dts
index d35a5cd..5e0c6bb 100644
--- a/arch/arm/boot/dts/imx6dl-tx6u-811x.dts
+++ b/arch/arm/boot/dts/imx6dl-tx6u-811x.dts
@@ -1,12 +1,42 @@
 /*
- * Copyright 2014 Lothar Waßmann <LW@KARO-electronics.de>
+ * Copyright 2014-2016 Lothar Waßmann <LW@KARO-electronics.de>
  *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 at the following locations:
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
  *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
 /dts-v1/;
@@ -81,16 +111,6 @@
 	};
 };
 
-&iomuxc {
-	imx6dl-tx6u-811x {
-		pinctrl_eeti: eetigrp {
-			fsl,pins = <
-				MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b1 /* Interrupt */
-			>;
-		};
-	};
-};
-
 &kpp {
 	status = "disabled"; /* pad conflict with backlight1 PWM */
 };
@@ -148,3 +168,11 @@
 &pwm1 {
 	status = "okay";
 };
+
+&iomuxc {
+	pinctrl_eeti: eetigrp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b1 /* Interrupt */
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx6dl-tx6u-81xx-mb7.dts b/arch/arm/boot/dts/imx6dl-tx6u-81xx-mb7.dts
new file mode 100644
index 0000000..b9a783f
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-tx6u-81xx-mb7.dts
@@ -0,0 +1,255 @@
+/*
+ * Copyright 2016 Lothar Waßmann <LW@KARO-electronics.de>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "imx6dl.dtsi"
+#include "imx6qdl-tx6.dtsi"
+
+/ {
+	model = "Ka-Ro electronics TX6U-81xx Module on MB7 baseboard";
+	compatible = "karo,imx6dl-tx6dl", "fsl,imx6dl";
+
+	aliases {
+		display = &lvds0;
+		lvds0 = &lvds0;
+		lvds1 = &lvds1;
+	};
+
+	backlight0: backlight0 {
+		compatible = "pwm-backlight";
+		pwms = <&pwm2 0 500000 PWM_POLARITY_INVERTED>;
+		power-supply = <&reg_lcd0_pwr>;
+		/*
+		 * a poor man's way to create a 1:1 relationship between
+		 * the PWM value and the actual duty cycle
+		 */
+		brightness-levels = < 0  1  2  3  4  5  6  7  8  9
+				     10 11 12 13 14 15 16 17 18 19
+				     20 21 22 23 24 25 26 27 28 29
+				     30 31 32 33 34 35 36 37 38 39
+				     40 41 42 43 44 45 46 47 48 49
+				     50 51 52 53 54 55 56 57 58 59
+				     60 61 62 63 64 65 66 67 68 69
+				     70 71 72 73 74 75 76 77 78 79
+				     80 81 82 83 84 85 86 87 88 89
+				     90 91 92 93 94 95 96 97 98 99
+				    100>;
+		default-brightness-level = <50>;
+	};
+
+	backlight1: backlight1 {
+		compatible = "pwm-backlight";
+		pwms = <&pwm1 0 500000 PWM_POLARITY_INVERTED>;
+		power-supply = <&reg_lcd1_pwr>;
+		/*
+		 * a poor man's way to create a 1:1 relationship between
+		 * the PWM value and the actual duty cycle
+		 */
+		brightness-levels = < 0  1  2  3  4  5  6  7  8  9
+				     10 11 12 13 14 15 16 17 18 19
+				     20 21 22 23 24 25 26 27 28 29
+				     30 31 32 33 34 35 36 37 38 39
+				     40 41 42 43 44 45 46 47 48 49
+				     50 51 52 53 54 55 56 57 58 59
+				     60 61 62 63 64 65 66 67 68 69
+				     70 71 72 73 74 75 76 77 78 79
+				     80 81 82 83 84 85 86 87 88 89
+				     90 91 92 93 94 95 96 97 98 99
+				    100>;
+		default-brightness-level = <50>;
+	};
+};
+
+&can1 {
+	status = "disabled";
+};
+
+&can2 {
+	xceiver-supply = <&reg_3v3>;
+};
+
+&i2c3 {
+	polytouch1: eeti@04 {
+		compatible = "eeti,egalax_ts";
+		reg = <0x04>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_eeti>;
+		interrupts-extended = <&gpio3 22 IRQ_TYPE_EDGE_FALLING>;
+		wakeup-gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>;
+		wakeup-source;
+	};
+};
+
+&kpp {
+	status = "disabled"; /* pads partially clash with backlight1 PWM */
+};
+
+&ldb {
+	status = "okay";
+
+	lvds0: lvds-channel@0 {
+		fsl,data-mapping = "spwg";
+		fsl,data-width = <18>;
+		status = "okay";
+
+		display-timings {
+			native-mode = <&lvds0_timing1>;
+
+			lvds0_timing0: hsd100pxn1 {
+				clock-frequency = <65000000>;
+				hactive = <1024>;
+				vactive = <768>;
+				hback-porch = <220>;
+				hfront-porch = <40>;
+				vback-porch = <21>;
+				vfront-porch = <7>;
+				hsync-len = <60>;
+				vsync-len = <10>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <1>;
+			};
+
+			lvds0_timing1: VGA {
+				clock-frequency = <25200000>;
+				hactive = <640>;
+				vactive = <480>;
+				hback-porch = <48>;
+				hfront-porch = <16>;
+				vback-porch = <31>;
+				vfront-porch = <12>;
+				hsync-len = <96>;
+				vsync-len = <2>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+
+			lvds0_timing2: nl12880bc20 {
+				clock-frequency = <71000000>;
+				hactive = <1280>;
+				vactive = <800>;
+				hback-porch = <50>;
+				hfront-porch = <50>;
+				vback-porch = <5>;
+				vfront-porch = <5>;
+				hsync-len = <60>;
+				vsync-len = <13>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <1>;
+			};
+		};
+	};
+
+	lvds1: lvds-channel@1 {
+		fsl,data-mapping = "spwg";
+		fsl,data-width = <18>;
+		status = "okay";
+
+		display-timings {
+			native-mode = <&lvds1_timing2>;
+
+			lvds1_timing0: hsd100pxn1 {
+				clock-frequency = <65000000>;
+				hactive = <1024>;
+				vactive = <768>;
+				hback-porch = <220>;
+				hfront-porch = <40>;
+				vback-porch = <21>;
+				vfront-porch = <7>;
+				hsync-len = <60>;
+				vsync-len = <10>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <1>;
+			};
+
+			lvds1_timing1: VGA {
+				clock-frequency = <25200000>;
+				hactive = <640>;
+				vactive = <480>;
+				hback-porch = <48>;
+				hfront-porch = <16>;
+				vback-porch = <31>;
+				vfront-porch = <12>;
+				hsync-len = <96>;
+				vsync-len = <2>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+
+			lvds1_timing2: nl12880bc20 {
+				clock-frequency = <71000000>;
+				hactive = <1280>;
+				vactive = <800>;
+				hback-porch = <50>;
+				hfront-porch = <50>;
+				vback-porch = <5>;
+				vfront-porch = <5>;
+				hsync-len = <60>;
+				vsync-len = <13>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <1>;
+			};
+		};
+	};
+};
+
+&pwm1 {
+	status = "okay";
+};
+
+&iomuxc {
+	pinctrl_eeti: eetigrp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b1 /* Interrupt */
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx6dl.dtsi b/arch/arm/boot/dts/imx6dl.dtsi
index c13a73a..9a4c22c 100644
--- a/arch/arm/boot/dts/imx6dl.dtsi
+++ b/arch/arm/boot/dts/imx6dl.dtsi
@@ -30,7 +30,7 @@
 				/* kHz    uV */
 				996000  1250000
 				792000  1175000
-				396000  1075000
+				396000  1150000
 			>;
 			fsl,soc-operating-points = <
 				/* ARM kHz  SOC-PU uV */
diff --git a/arch/arm/boot/dts/imx6q-apalis-ixora.dts b/arch/arm/boot/dts/imx6q-apalis-ixora.dts
index 2cba82d..8e67ca2 100644
--- a/arch/arm/boot/dts/imx6q-apalis-ixora.dts
+++ b/arch/arm/boot/dts/imx6q-apalis-ixora.dts
@@ -80,6 +80,47 @@
 		};
 	};
 
+	lcd_display: display@di0 {
+		compatible = "fsl,imx-parallel-display";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		interface-pix-fmt = "rgb24";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_ipu1_lcdif>;
+		status = "okay";
+
+		port@0 {
+			reg = <0>;
+
+			lcd_display_in: endpoint {
+				remote-endpoint = <&ipu1_di0_disp1>;
+			};
+		};
+
+		port@1 {
+			reg = <1>;
+
+			lcd_display_out: endpoint {
+				remote-endpoint = <&lcd_panel_in>;
+			};
+		};
+	};
+
+	panel: panel {
+		/*
+		 * edt,et057090dhu: EDT 5.7" LCD TFT
+		 * edt,et070080dh6: EDT 7.0" LCD TFT
+		 */
+		compatible = "edt,et057090dhu";
+		backlight = <&backlight>;
+
+		port {
+			lcd_panel_in: endpoint {
+				remote-endpoint = <&lcd_display_out>;
+			};
+		};
+	};
+
 	leds {
 		compatible = "gpio-leds";
 
@@ -169,13 +210,18 @@
 	};
 };
 
+&ipu1_di0_disp1 {
+	remote-endpoint = <&lcd_display_in>;
+};
+
 &ldb {
 	status = "okay";
 };
 
 &pcie {
-	/* active-low meaning opposite of regular PERST# active-low polarity */
-	reset-gpio = <&gpio1 28 GPIO_ACTIVE_LOW>;
+	/* active-high meaning opposite of regular PERST# active-low polarity */
+	reset-gpio = <&gpio1 28 GPIO_ACTIVE_HIGH>;
+	reset-gpio-active-high;
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/imx6q-b450v3.dts b/arch/arm/boot/dts/imx6q-b450v3.dts
index 3101be5..f0a2be5 100644
--- a/arch/arm/boot/dts/imx6q-b450v3.dts
+++ b/arch/arm/boot/dts/imx6q-b450v3.dts
@@ -65,11 +65,14 @@
 	};
 };
 
-&ldb {
+&clks {
 	assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
 			  <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
 	assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
 				 <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
+};
+
+&ldb {
 	status = "okay";
 
 	lvds0: lvds-channel@0 {
diff --git a/arch/arm/boot/dts/imx6q-b650v3.dts b/arch/arm/boot/dts/imx6q-b650v3.dts
index 823f55c..33cb71a 100644
--- a/arch/arm/boot/dts/imx6q-b650v3.dts
+++ b/arch/arm/boot/dts/imx6q-b650v3.dts
@@ -65,11 +65,14 @@
 	};
 };
 
-&ldb {
+&clks {
 	assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
 			  <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
 	assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
 				 <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
+};
+
+&ldb {
 	status = "okay";
 
 	lvds0: lvds-channel@0 {
diff --git a/arch/arm/boot/dts/imx6q-b850v3.dts b/arch/arm/boot/dts/imx6q-b850v3.dts
index 984d000..167f744 100644
--- a/arch/arm/boot/dts/imx6q-b850v3.dts
+++ b/arch/arm/boot/dts/imx6q-b850v3.dts
@@ -51,25 +51,20 @@
 	chosen {
 		stdout-path = &uart3;
 	};
+};
 
-	panel-lvds0 {
-		compatible = "auo,b133htn01";
-		backlight = <&backlight_lvds>;
-		ddc-i2c-bus = <&mux2_i2c2>;
-
-		port {
-			panel_in_lvds0: endpoint {
-				remote-endpoint = <&lvds0_out>;
-			};
-		};
-	};
+&clks {
+	assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
+			  <&clks IMX6QDL_CLK_LDB_DI1_SEL>,
+			  <&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>,
+			  <&clks IMX6QDL_CLK_IPU1_DI1_PRE_SEL>;
+	assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
+				 <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
+				 <&clks IMX6QDL_CLK_PLL2_PFD2_396M>,
+				 <&clks IMX6QDL_CLK_PLL2_PFD2_396M>;
 };
 
 &ldb {
-	assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
-			  <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
-	assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
-				 <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
 	fsl,dual-channel;
 	status = "okay";
 
@@ -77,14 +72,6 @@
 		fsl,data-mapping = "spwg";
 		fsl,data-width = <24>;
 		status = "okay";
-
-		port@4 {
-			reg = <4>;
-
-			lvds0_out: endpoint {
-				remote-endpoint = <&panel_in_lvds0>;
-			};
-		};
 	};
 };
 
diff --git a/arch/arm/boot/dts/imx6q-ba16.dtsi b/arch/arm/boot/dts/imx6q-ba16.dtsi
index 8f6e603..f7e17e2 100644
--- a/arch/arm/boot/dts/imx6q-ba16.dtsi
+++ b/arch/arm/boot/dts/imx6q-ba16.dtsi
@@ -323,6 +323,8 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_pcie>;
 	reset-gpio = <&gpio7 12 GPIO_ACTIVE_HIGH>;
+	fsl,tx-swing-full = <103>;
+	fsl,tx-swing-low = <103>;
 	status = "okay";
 };
 
@@ -335,7 +337,7 @@
 &pwm2 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_pwm2>;
-	status = "okay";
+	status = "disabled";
 };
 
 &sata {
@@ -390,7 +392,6 @@
 	pinctrl-0 = <&pinctrl_usdhc3 &pinctrl_usdhc3_reset>;
 	bus-width = <8>;
 	vmmc-supply = <&vdd_bperi>;
-	vqmmc-supply = <&vdd_bio>;
 	non-removable;
 	keep-power-in-suspend;
 	status = "okay";
@@ -399,6 +400,7 @@
 &wdog1 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_wdog>;
+	fsl,ext-reset-output;
 };
 
 &iomuxc {
diff --git a/arch/arm/boot/dts/imx6q-gw5400-a.dts b/arch/arm/boot/dts/imx6q-gw5400-a.dts
index a51834e..0511137 100644
--- a/arch/arm/boot/dts/imx6q-gw5400-a.dts
+++ b/arch/arm/boot/dts/imx6q-gw5400-a.dts
@@ -327,7 +327,7 @@
 	codec: sgtl5000@0a {
 		compatible = "fsl,sgtl5000";
 		reg = <0x0a>;
-		clocks = <&clks 201>;
+		clocks = <&clks IMX6QDL_CLK_CKO>;
 		VDDA-supply = <&sw4_reg>;
 		VDDIO-supply = <&reg_3p3v>;
 	};
diff --git a/arch/arm/boot/dts/imx6q-marsboard.dts b/arch/arm/boot/dts/imx6q-marsboard.dts
new file mode 100644
index 0000000..3f8013c
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-marsboard.dts
@@ -0,0 +1,403 @@
+/*
+ * Copyright (C) 2016 Sergio Prado (sergio.prado@e-labworks.com)
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "imx6q.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+	model = "Embest MarS Board i.MX6Dual";
+	compatible = "embest,imx6q-marsboard", "fsl,imx6q";
+
+	memory {
+		reg = <0x10000000 0x40000000>;
+	};
+
+	reg_3p3v: regulator-3p3v {
+		compatible = "regulator-fixed";
+		regulator-name = "3P3V";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	reg_usb_otg_vbus: regulator-usb-otg-vbus {
+		compatible = "regulator-fixed";
+		regulator-name = "usb_otg_vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		gpio = <&gpio3 22 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_led>;
+
+		user1 {
+			label = "imx6:green:user1";
+			gpios = <&gpio5 2 GPIO_ACTIVE_LOW>;
+			default-state = "off";
+			linux,default-trigger = "heartbeat";
+		};
+
+		user2 {
+			label = "imx6:green:user2";
+			gpios = <&gpio3 28 GPIO_ACTIVE_LOW>;
+			default-state = "off";
+		};
+	};
+};
+
+&audmux {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_audmux>;
+	status = "okay";
+};
+
+&ecspi1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_ecspi1>;
+	cs-gpios = <&gpio2 30 GPIO_ACTIVE_LOW>;
+	fsl,spi-num-chipselects = <1>;
+	status = "okay";
+
+	m25p80@0 {
+		compatible = "microchip,sst25vf016b";
+		spi-max-frequency = <20000000>;
+		reg = <0>;
+	};
+};
+
+&fec {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_enet>;
+	phy-mode = "rgmii";
+	phy-reset-gpios = <&gpio3 31 GPIO_ACTIVE_LOW>;
+	status = "okay";
+};
+
+&hdmi {
+	ddc-i2c-bus = <&i2c2>;
+	status = "okay";
+};
+
+&i2c1 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c1>;
+	status = "okay";
+};
+
+&i2c2 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c2>;
+	status = "okay";
+};
+
+&i2c3 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c3>;
+	status = "okay";
+};
+
+&pwm1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm1>;
+	status = "okay";
+};
+
+&pwm2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm2>;
+	status = "okay";
+};
+
+&pwm3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm3>;
+	status = "okay";
+};
+
+&pwm4 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm4>;
+	status = "okay";
+};
+
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart1>;
+	status = "okay";
+};
+
+&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart2>;
+	status = "okay";
+};
+
+&uart3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart3>;
+	status = "okay";
+};
+
+&uart4 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart4>;
+	status = "okay";
+};
+
+&uart5 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart5>;
+	status = "okay";
+};
+
+&usbh1 {
+	dr_mode = "host";
+	disable-over-current;
+	status = "okay";
+};
+
+&usbotg {
+	vbus-supply = <&reg_usb_otg_vbus>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usbotg>;
+	dr_mode = "otg";
+	disable-over-current;
+	status = "okay";
+};
+
+&usdhc2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc2>;
+	vmmc-supply = <&reg_3p3v>;
+	cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
+	wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+	status = "okay";
+};
+
+&usdhc3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc3>;
+	vmmc-supply = <&reg_3p3v>;
+	non-removable;
+	status = "okay";
+};
+
+&iomuxc {
+
+	pinctrl_audmux: audmuxgrp {
+		fsl,pins = <
+			MX6QDL_PAD_CSI0_DAT7__AUD3_RXD		0x130b0
+			MX6QDL_PAD_CSI0_DAT4__AUD3_TXC		0x130b0
+			MX6QDL_PAD_CSI0_DAT5__AUD3_TXD		0x110b0
+			MX6QDL_PAD_CSI0_DAT6__AUD3_TXFS		0x130b0
+			MX6QDL_PAD_GPIO_0__CCM_CLKO1		0x130b0	/* CAM_MCLK */
+		>;
+	};
+
+	pinctrl_ecspi1: ecspi1grp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D16__ECSPI1_SCLK		0x100b1
+			MX6QDL_PAD_EIM_D17__ECSPI1_MISO		0x100b1
+			MX6QDL_PAD_EIM_D18__ECSPI1_MOSI		0x100b1
+			MX6QDL_PAD_EIM_EB2__GPIO2_IO30		0x000b1	/* CS0 */
+		>;
+	};
+
+	pinctrl_enet: enetgrp {
+		fsl,pins = <
+			MX6QDL_PAD_ENET_MDIO__ENET_MDIO		0x1b0b0
+			MX6QDL_PAD_ENET_MDC__ENET_MDC		0x1b0b0
+			MX6QDL_PAD_RGMII_TXC__RGMII_TXC		0x1b0b0
+			MX6QDL_PAD_RGMII_TD0__RGMII_TD0		0x1b0b0
+			MX6QDL_PAD_RGMII_TD1__RGMII_TD1		0x1b0b0
+			MX6QDL_PAD_RGMII_TD2__RGMII_TD2		0x1b0b0
+			MX6QDL_PAD_RGMII_TD3__RGMII_TD3		0x1b0b0
+			MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL	0x1b0b0
+			/* AR8035 CLK_25M --> ENET_REF_CLK (V22) */
+			MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK	0x0a0b1
+			/* AR8035 pin strapping: IO voltage: pull up */
+			MX6QDL_PAD_RGMII_RXC__RGMII_RXC		0x1b0b0
+			/* AR8035 pin strapping: PHYADDR#0: pull down */
+			MX6QDL_PAD_RGMII_RD0__RGMII_RD0		0x130b0
+			/* AR8035 pin strapping: PHYADDR#1: pull down */
+			MX6QDL_PAD_RGMII_RD1__RGMII_RD1		0x130b0
+			/* AR8035 pin strapping: MODE#1: pull up */
+			MX6QDL_PAD_RGMII_RD2__RGMII_RD2		0x1b0b0
+			/* AR8035 pin strapping: MODE#3: pull up */
+			MX6QDL_PAD_RGMII_RD3__RGMII_RD3		0x1b0b0
+			/* AR8035 pin strapping: MODE#0: pull down */
+			MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL	0x130b0
+			/* GPIO16 -> AR8035 25MHz */
+			MX6QDL_PAD_GPIO_16__ENET_REF_CLK	0x4001b0a8
+			/* RGMII_nRST */
+			MX6QDL_PAD_EIM_D31__GPIO3_IO31		0x130b0
+			/* AR8035 interrupt */
+			MX6QDL_PAD_ENET_TX_EN__GPIO1_IO28	0x180b0
+		>;
+	};
+
+	pinctrl_i2c1: i2c1grp {
+		fsl,pins = <
+			MX6QDL_PAD_CSI0_DAT8__I2C1_SDA		0x4001b8b1
+			MX6QDL_PAD_CSI0_DAT9__I2C1_SCL		0x4001b8b1
+		>;
+	};
+
+	pinctrl_i2c2: i2c2grp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_COL3__I2C2_SCL		0x4001b8b1
+			MX6QDL_PAD_KEY_ROW3__I2C2_SDA		0x4001b8b1
+		>;
+	};
+
+	pinctrl_i2c3: i2c3grp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_5__I2C3_SCL		0x4001b8b1
+			MX6QDL_PAD_GPIO_6__I2C3_SDA		0x4001b8b1
+		>;
+	};
+
+	pinctrl_led: ledgrp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_A25__GPIO5_IO02		0x1b0b1	/* LED1 */
+			MX6QDL_PAD_EIM_D28__GPIO3_IO28		0x1b0b1	/* LED2 */
+		>;
+	};
+
+	pinctrl_pwm1: pwm1grp {
+		fsl,pins = <
+			MX6QDL_PAD_DISP0_DAT8__PWM1_OUT		0x1b0b1
+		>;
+	};
+
+	pinctrl_pwm2: pwm2grp {
+		fsl,pins = <
+			MX6QDL_PAD_DISP0_DAT9__PWM2_OUT		0x1b0b1
+		>;
+	};
+
+	pinctrl_pwm3: pwm3grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD1_DAT1__PWM3_OUT		0x1b0b1
+		>;
+	};
+
+	pinctrl_pwm4: pwm4grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD1_CMD__PWM4_OUT		0x1b0b1
+		>;
+	};
+
+	pinctrl_uart1: uart1grp {
+		fsl,pins = <
+			MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA	0x1b0b1
+			MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA	0x1b0b1
+		>;
+	};
+
+	pinctrl_uart2: uart2grp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D26__UART2_TX_DATA	0x1b0b1
+			MX6QDL_PAD_EIM_D27__UART2_RX_DATA	0x1b0b1
+		>;
+	};
+
+	pinctrl_uart3: uart3grp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D24__UART3_TX_DATA	0x1b0b1
+			MX6QDL_PAD_EIM_D25__UART3_RX_DATA	0x1b0b1
+		>;
+	};
+
+	pinctrl_uart4: uart4grp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_COL0__UART4_TX_DATA	0x1b0b1
+			MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA	0x1b0b1
+		>;
+	};
+
+	pinctrl_uart5: uart5grp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_COL1__UART5_TX_DATA	0x1b0b1
+			MX6QDL_PAD_KEY_ROW1__UART5_RX_DATA	0x1b0b1
+		>;
+	};
+
+	pinctrl_usbotg: usbotggrp {
+		fsl,pins = <
+			MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID	0x17059
+			MX6QDL_PAD_EIM_D21__USB_OTG_OC		0x1b0b0
+			MX6QDL_PAD_EIM_D22__GPIO3_IO22		0x000b0	/* USB OTG POWER ENABLE */
+		>;
+	};
+
+	pinctrl_usdhc2: usdhc2grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD2_CMD__SD2_CMD		0x17059
+			MX6QDL_PAD_SD2_CLK__SD2_CLK		0x10059
+			MX6QDL_PAD_SD2_DAT0__SD2_DATA0		0x17059
+			MX6QDL_PAD_SD2_DAT1__SD2_DATA1		0x17059
+			MX6QDL_PAD_SD2_DAT2__SD2_DATA2		0x17059
+			MX6QDL_PAD_SD2_DAT3__SD2_DATA3		0x17059
+			MX6QDL_PAD_GPIO_4__GPIO1_IO04		0x1b0b0	/* CD */
+			MX6QDL_PAD_GPIO_2__GPIO1_IO02		0x1f0b0	/* WP */
+		>;
+	};
+
+	pinctrl_usdhc3: usdhc3grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD3_CMD__SD3_CMD		0x17009
+			MX6QDL_PAD_SD3_CLK__SD3_CLK		0x10009
+			MX6QDL_PAD_SD3_DAT0__SD3_DATA0		0x17009
+			MX6QDL_PAD_SD3_DAT1__SD3_DATA1		0x17009
+			MX6QDL_PAD_SD3_DAT2__SD3_DATA2		0x17009
+			MX6QDL_PAD_SD3_DAT3__SD3_DATA3		0x17009
+			MX6QDL_PAD_SD3_RST__SD3_RESET		0x17009
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx6q-tbs2910.dts b/arch/arm/boot/dts/imx6q-tbs2910.dts
index 0da81bc..1926b13 100644
--- a/arch/arm/boot/dts/imx6q-tbs2910.dts
+++ b/arch/arm/boot/dts/imx6q-tbs2910.dts
@@ -141,7 +141,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_enet>;
 	phy-mode = "rgmii";
-	phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_HIGH>;
+	phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
 	status = "okay";
 };
 
@@ -159,7 +159,7 @@
 	status = "okay";
 
 	sgtl5000: sgtl5000@0a {
-		clocks = <&clks 201>;
+		clocks = <&clks IMX6QDL_CLK_CKO>;
 		compatible = "fsl,sgtl5000";
 		pinctrl-names = "default";
 		pinctrl-0 = <&pinctrl_sgtl5000>;
diff --git a/arch/arm/boot/dts/imx6q-tx6q-1010-comtft.dts b/arch/arm/boot/dts/imx6q-tx6q-1010-comtft.dts
index b18fae1..65e95ae 100644
--- a/arch/arm/boot/dts/imx6q-tx6q-1010-comtft.dts
+++ b/arch/arm/boot/dts/imx6q-tx6q-1010-comtft.dts
@@ -1,12 +1,42 @@
 /*
- * Copyright 2014 Lothar Waßmann <LW@KARO-electronics.de>
+ * Copyright 2014-2016 Lothar Waßmann <LW@KARO-electronics.de>
  *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 at the following locations:
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
  *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
 /dts-v1/;
diff --git a/arch/arm/boot/dts/imx6q-tx6q-1010.dts b/arch/arm/boot/dts/imx6q-tx6q-1010.dts
index b58ec9c..20cd0e7 100644
--- a/arch/arm/boot/dts/imx6q-tx6q-1010.dts
+++ b/arch/arm/boot/dts/imx6q-tx6q-1010.dts
@@ -1,12 +1,42 @@
 /*
- * Copyright 2014 Lothar Waßmann <LW@KARO-electronics.de>
+ * Copyright 2014-2016 Lothar Waßmann <LW@KARO-electronics.de>
  *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 at the following locations:
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
  *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
 /dts-v1/;
diff --git a/arch/arm/boot/dts/imx6q-tx6q-1020-comtft.dts b/arch/arm/boot/dts/imx6q-tx6q-1020-comtft.dts
index 0bb9a9d..9ed243b 100644
--- a/arch/arm/boot/dts/imx6q-tx6q-1020-comtft.dts
+++ b/arch/arm/boot/dts/imx6q-tx6q-1020-comtft.dts
@@ -1,12 +1,42 @@
 /*
- * Copyright 2014 Lothar Waßmann <LW@KARO-electronics.de>
+ * Copyright 2014-2016 Lothar Waßmann <LW@KARO-electronics.de>
  *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 at the following locations:
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
  *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
 /dts-v1/;
@@ -94,22 +124,6 @@
 	status = "disabled";
 };
 
-&iomuxc {
-	imx6qdl-tx6 {
-		pinctrl_usdhc4: usdhc4grp {
-			fsl,pins = <
-				MX6QDL_PAD_SD4_CMD__SD4_CMD		0x070b1
-				MX6QDL_PAD_SD4_CLK__SD4_CLK		0x070b1
-				MX6QDL_PAD_SD4_DAT0__SD4_DATA0		0x070b1
-				MX6QDL_PAD_SD4_DAT1__SD4_DATA1		0x070b1
-				MX6QDL_PAD_SD4_DAT2__SD4_DATA2		0x070b1
-				MX6QDL_PAD_SD4_DAT3__SD4_DATA3		0x070b1
-				MX6QDL_PAD_NANDF_ALE__SD4_RESET		0x0b0b1
-			>;
-		};
-	};
-};
-
 &ipu1_di0_disp0 {
 	remote-endpoint = <&display0_in>;
 };
@@ -134,3 +148,17 @@
 	fsl,wp-controller;
 	status = "okay";
 };
+
+&iomuxc {
+	pinctrl_usdhc4: usdhc4grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD4_CMD__SD4_CMD		0x070b1
+			MX6QDL_PAD_SD4_CLK__SD4_CLK		0x070b1
+			MX6QDL_PAD_SD4_DAT0__SD4_DATA0		0x070b1
+			MX6QDL_PAD_SD4_DAT1__SD4_DATA1		0x070b1
+			MX6QDL_PAD_SD4_DAT2__SD4_DATA2		0x070b1
+			MX6QDL_PAD_SD4_DAT3__SD4_DATA3		0x070b1
+			MX6QDL_PAD_NANDF_ALE__SD4_RESET		0x0b0b1
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx6q-tx6q-1020.dts b/arch/arm/boot/dts/imx6q-tx6q-1020.dts
index b96d80a..347b531 100644
--- a/arch/arm/boot/dts/imx6q-tx6q-1020.dts
+++ b/arch/arm/boot/dts/imx6q-tx6q-1020.dts
@@ -1,12 +1,42 @@
 /*
- * Copyright 2014 Lothar Waßmann <LW@KARO-electronics.de>
+ * Copyright 2014-2016 Lothar Waßmann <LW@KARO-electronics.de>
  *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 at the following locations:
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
  *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
 /dts-v1/;
@@ -180,22 +210,6 @@
 	status = "disabled";
 };
 
-&iomuxc {
-	imx6qdl-tx6 {
-		pinctrl_usdhc4: usdhc4grp {
-			fsl,pins = <
-				MX6QDL_PAD_SD4_CMD__SD4_CMD		0x070b1
-				MX6QDL_PAD_SD4_CLK__SD4_CLK		0x070b1
-				MX6QDL_PAD_SD4_DAT0__SD4_DATA0		0x070b1
-				MX6QDL_PAD_SD4_DAT1__SD4_DATA1		0x070b1
-				MX6QDL_PAD_SD4_DAT2__SD4_DATA2		0x070b1
-				MX6QDL_PAD_SD4_DAT3__SD4_DATA3		0x070b1
-				MX6QDL_PAD_NANDF_ALE__SD4_RESET		0x0b0b1
-			>;
-		};
-	};
-};
-
 &ipu1_di0_disp0 {
 	remote-endpoint = <&display0_in>;
 };
@@ -208,3 +222,17 @@
 	fsl,wp-controller;
 	status = "okay";
 };
+
+&iomuxc {
+	pinctrl_usdhc4: usdhc4grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD4_CMD__SD4_CMD		0x070b1
+			MX6QDL_PAD_SD4_CLK__SD4_CLK		0x070b1
+			MX6QDL_PAD_SD4_DAT0__SD4_DATA0		0x070b1
+			MX6QDL_PAD_SD4_DAT1__SD4_DATA1		0x070b1
+			MX6QDL_PAD_SD4_DAT2__SD4_DATA2		0x070b1
+			MX6QDL_PAD_SD4_DAT3__SD4_DATA3		0x070b1
+			MX6QDL_PAD_NANDF_ALE__SD4_RESET		0x0b0b1
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx6q-tx6q-1036.dts b/arch/arm/boot/dts/imx6q-tx6q-1036.dts
new file mode 100644
index 0000000..7c152e3
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-tx6q-1036.dts
@@ -0,0 +1,252 @@
+/*
+ * Copyright 2014-2016 Lothar Waßmann <LW@KARO-electronics.de>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "imx6q.dtsi"
+#include "imx6qdl-tx6.dtsi"
+
+/ {
+	model = "Ka-Ro electronics TX6Q-1036 Module";
+	compatible = "karo,imx6q-tx6q", "fsl,imx6q";
+
+	aliases {
+		display = &display;
+	};
+
+	backlight: backlight {
+		compatible = "pwm-backlight";
+		pwms = <&pwm2 0 500000 PWM_POLARITY_INVERTED>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_lcd0_pwr>;
+		enable-gpios = <&gpio3 29 GPIO_ACTIVE_HIGH>;
+		power-supply = <&reg_lcd1_pwr>;
+		/*
+		 * a poor man's way to create a 1:1 relationship between
+		 * the PWM value and the actual duty cycle
+		 */
+		brightness-levels = < 0  1  2  3  4  5  6  7  8  9
+				     10 11 12 13 14 15 16 17 18 19
+				     20 21 22 23 24 25 26 27 28 29
+				     30 31 32 33 34 35 36 37 38 39
+				     40 41 42 43 44 45 46 47 48 49
+				     50 51 52 53 54 55 56 57 58 59
+				     60 61 62 63 64 65 66 67 68 69
+				     70 71 72 73 74 75 76 77 78 79
+				     80 81 82 83 84 85 86 87 88 89
+				     90 91 92 93 94 95 96 97 98 99
+				    100>;
+		default-brightness-level = <50>;
+	};
+
+	display: display@di0 {
+		compatible = "fsl,imx-parallel-display";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_disp0_2>;
+		interface-pix-fmt = "rgb24";
+		status = "okay";
+
+		port {
+			display0_in: endpoint {
+				remote-endpoint = <&ipu1_di0_disp0>;
+			};
+		};
+
+		display-timings {
+			native-mode = <&vga>;
+
+			vga: VGA {
+				clock-frequency = <25200000>;
+				hactive = <640>;
+				vactive = <480>;
+				hback-porch = <48>;
+				hsync-len = <96>;
+				hfront-porch = <16>;
+				vback-porch = <31>;
+				vsync-len = <2>;
+				vfront-porch = <12>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+
+			ETV570 {
+				clock-frequency = <25200000>;
+				hactive = <640>;
+				vactive = <480>;
+				hback-porch = <114>;
+				hsync-len = <30>;
+				hfront-porch = <16>;
+				vback-porch = <32>;
+				vsync-len = <3>;
+				vfront-porch = <10>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+
+			ET0350 {
+				clock-frequency = <6413760>;
+				hactive = <320>;
+				vactive = <240>;
+				hback-porch = <34>;
+				hsync-len = <34>;
+				hfront-porch = <20>;
+				vback-porch = <15>;
+				vsync-len = <3>;
+				vfront-porch = <4>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+
+			ET0430 {
+				clock-frequency = <9009000>;
+				hactive = <480>;
+				vactive = <272>;
+				hback-porch = <2>;
+				hsync-len = <41>;
+				hfront-porch = <2>;
+				vback-porch = <2>;
+				vsync-len = <10>;
+				vfront-porch = <2>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <1>;
+			};
+
+			ET0500 {
+				clock-frequency = <33264000>;
+				hactive = <800>;
+				vactive = <480>;
+				hback-porch = <88>;
+				hsync-len = <128>;
+				hfront-porch = <40>;
+				vback-porch = <33>;
+				vsync-len = <2>;
+				vfront-porch = <10>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+
+			ET0700 { /* same as ET0500 */
+				clock-frequency = <33264000>;
+				hactive = <800>;
+				vactive = <480>;
+				hback-porch = <88>;
+				hsync-len = <128>;
+				hfront-porch = <40>;
+				vback-porch = <33>;
+				vsync-len = <2>;
+				vfront-porch = <10>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+
+			ETQ570 {
+				clock-frequency = <6596040>;
+				hactive = <320>;
+				vactive = <240>;
+				hback-porch = <38>;
+				hsync-len = <30>;
+				hfront-porch = <30>;
+				vback-porch = <16>;
+				vsync-len = <3>;
+				vfront-porch = <4>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+		};
+	};
+};
+
+&ds1339 {
+	status = "disabled";
+};
+
+&gpmi {
+	status = "disabled";
+};
+
+&ipu1_di0_disp0 {
+	remote-endpoint = <&display0_in>;
+};
+
+&ipu2 {
+	status = "disabled";
+};
+
+&reg_lcd0_pwr {
+	status = "disabled";
+};
+
+&usdhc4 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc4>;
+	bus-width = <4>;
+	non-removable;
+	no-1-8-v;
+	fsl,wp-controller;
+	status = "okay";
+};
+
+&iomuxc {
+	pinctrl_usdhc4: usdhc4grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD4_CMD__SD4_CMD		0x070b1
+			MX6QDL_PAD_SD4_CLK__SD4_CLK		0x070b1
+			MX6QDL_PAD_SD4_DAT0__SD4_DATA0		0x070b1
+			MX6QDL_PAD_SD4_DAT1__SD4_DATA1		0x070b1
+			MX6QDL_PAD_SD4_DAT2__SD4_DATA2		0x070b1
+			MX6QDL_PAD_SD4_DAT3__SD4_DATA3		0x070b1
+			MX6QDL_PAD_NANDF_ALE__SD4_RESET		0x0b0b1
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx6q-tx6q-1110.dts b/arch/arm/boot/dts/imx6q-tx6q-1110.dts
index 2792da9..0433e22 100644
--- a/arch/arm/boot/dts/imx6q-tx6q-1110.dts
+++ b/arch/arm/boot/dts/imx6q-tx6q-1110.dts
@@ -1,12 +1,42 @@
 /*
- * Copyright 2014 Lothar Waßmann <LW@KARO-electronics.de>
+ * Copyright 2014-2016 Lothar Waßmann <LW@KARO-electronics.de>
  *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 at the following locations:
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
  *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
 /dts-v1/;
@@ -81,16 +111,6 @@
 	};
 };
 
-&iomuxc {
-	imx6q-tx6q-1110 {
-		pinctrl_eeti: eetigrp {
-			fsl,pins = <
-				MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b1 /* Interrupt */
-			>;
-		};
-	};
-};
-
 &kpp {
 	status = "disabled"; /* pad conflict with backlight1 PWM */
 };
@@ -152,3 +172,11 @@
 &sata {
 	status = "okay";
 };
+
+&iomuxc {
+	pinctrl_eeti: eetigrp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b1 /* Interrupt */
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx6q-tx6q-11x0-mb7.dts b/arch/arm/boot/dts/imx6q-tx6q-11x0-mb7.dts
new file mode 100644
index 0000000..d78b129
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-tx6q-11x0-mb7.dts
@@ -0,0 +1,264 @@
+/*
+ * Copyright 2016 Lothar Waßmann <LW@KARO-electronics.de>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "imx6q.dtsi"
+#include "imx6qdl-tx6.dtsi"
+
+/ {
+	model = "Ka-Ro electronics TX6Q-1110/-1130 Module on MB7 baseboard";
+	compatible = "karo,imx6q-tx6q", "fsl,imx6q";
+
+	aliases {
+		display = &lvds0;
+		ipu1 = &ipu2;
+		lvds0 = &lvds0;
+		lvds1 = &lvds1;
+	};
+
+	backlight0: backlight0 {
+		compatible = "pwm-backlight";
+		pwms = <&pwm2 0 500000 PWM_POLARITY_INVERTED>;
+		power-supply = <&reg_lcd0_pwr>;
+		/*
+		 * a poor man's way to create a 1:1 relationship between
+		 * the PWM value and the actual duty cycle
+		 */
+		brightness-levels = < 0  1  2  3  4  5  6  7  8  9
+				     10 11 12 13 14 15 16 17 18 19
+				     20 21 22 23 24 25 26 27 28 29
+				     30 31 32 33 34 35 36 37 38 39
+				     40 41 42 43 44 45 46 47 48 49
+				     50 51 52 53 54 55 56 57 58 59
+				     60 61 62 63 64 65 66 67 68 69
+				     70 71 72 73 74 75 76 77 78 79
+				     80 81 82 83 84 85 86 87 88 89
+				     90 91 92 93 94 95 96 97 98 99
+				    100>;
+		default-brightness-level = <50>;
+	};
+
+	backlight1: backlight1 {
+		compatible = "pwm-backlight";
+		pwms = <&pwm1 0 500000 PWM_POLARITY_INVERTED>;
+		power-supply = <&reg_lcd1_pwr>;
+		/*
+		 * a poor man's way to create a 1:1 relationship between
+		 * the PWM value and the actual duty cycle
+		 */
+		brightness-levels = < 0  1  2  3  4  5  6  7  8  9
+				     10 11 12 13 14 15 16 17 18 19
+				     20 21 22 23 24 25 26 27 28 29
+				     30 31 32 33 34 35 36 37 38 39
+				     40 41 42 43 44 45 46 47 48 49
+				     50 51 52 53 54 55 56 57 58 59
+				     60 61 62 63 64 65 66 67 68 69
+				     70 71 72 73 74 75 76 77 78 79
+				     80 81 82 83 84 85 86 87 88 89
+				     90 91 92 93 94 95 96 97 98 99
+				    100>;
+		default-brightness-level = <50>;
+	};
+};
+
+&can1 {
+	status = "disabled";
+};
+
+&can2 {
+	xceiver-supply = <&reg_3v3>;
+};
+
+&i2c3 {
+	polytouch1: eeti@04 {
+		compatible = "eeti,egalax_ts";
+		reg = <0x04>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_eeti>;
+		interrupts-extended = <&gpio3 22 IRQ_TYPE_EDGE_FALLING>;
+		wakeup-gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>;
+		wakeup-source;
+	};
+};
+
+&ipu2 {
+	status = "disabled";
+};
+
+&kpp {
+	status = "disabled"; /* pads partially clash with backlight1 PWM */
+};
+
+&ldb {
+	status = "okay";
+
+	lvds0: lvds-channel@0 {
+		fsl,data-mapping = "spwg";
+		fsl,data-width = <18>;
+		status = "okay";
+
+		display-timings {
+			native-mode = <&lvds0_timing1>;
+
+			lvds0_timing0: hsd100pxn1 {
+				clock-frequency = <65000000>;
+				hactive = <1024>;
+				vactive = <768>;
+				hback-porch = <220>;
+				hfront-porch = <40>;
+				vback-porch = <21>;
+				vfront-porch = <7>;
+				hsync-len = <60>;
+				vsync-len = <10>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <1>;
+			};
+
+			lvds0_timing1: VGA {
+				clock-frequency = <25200000>;
+				hactive = <640>;
+				vactive = <480>;
+				hback-porch = <48>;
+				hfront-porch = <16>;
+				vback-porch = <31>;
+				vfront-porch = <12>;
+				hsync-len = <96>;
+				vsync-len = <2>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+
+			lvds0_timing2: nl12880bc20 {
+				clock-frequency = <71000000>;
+				hactive = <1280>;
+				vactive = <800>;
+				hback-porch = <50>;
+				hfront-porch = <50>;
+				vback-porch = <5>;
+				vfront-porch = <5>;
+				hsync-len = <60>;
+				vsync-len = <13>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <1>;
+			};
+		};
+	};
+
+	lvds1: lvds-channel@1 {
+		fsl,data-mapping = "spwg";
+		fsl,data-width = <18>;
+		status = "okay";
+
+		display-timings {
+			native-mode = <&lvds1_timing2>;
+
+			lvds1_timing0: hsd100pxn1 {
+				clock-frequency = <65000000>;
+				hactive = <1024>;
+				vactive = <768>;
+				hback-porch = <220>;
+				hfront-porch = <40>;
+				vback-porch = <21>;
+				vfront-porch = <7>;
+				hsync-len = <60>;
+				vsync-len = <10>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <1>;
+			};
+
+			lvds1_timing1: VGA {
+				clock-frequency = <25200000>;
+				hactive = <640>;
+				vactive = <480>;
+				hback-porch = <48>;
+				hfront-porch = <16>;
+				vback-porch = <31>;
+				vfront-porch = <12>;
+				hsync-len = <96>;
+				vsync-len = <2>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+
+			lvds1_timing2: nl12880bc20 {
+				clock-frequency = <71000000>;
+				hactive = <1280>;
+				vactive = <800>;
+				hback-porch = <50>;
+				hfront-porch = <50>;
+				vback-porch = <5>;
+				vfront-porch = <5>;
+				hsync-len = <60>;
+				vsync-len = <13>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <1>;
+			};
+		};
+	};
+};
+
+&pwm1 {
+	status = "okay";
+};
+
+&sata {
+	status = "okay";
+};
+
+&iomuxc {
+	pinctrl_eeti: eetigrp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b1 /* Interrupt */
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
index cd10c8d..c30c836 100644
--- a/arch/arm/boot/dts/imx6q.dtsi
+++ b/arch/arm/boot/dts/imx6q.dtsi
@@ -154,22 +154,22 @@
 				#size-cells = <0>;
 				reg = <2>;
 
-				ipu2_di0_disp0: endpoint@0 {
+				ipu2_di0_disp0: disp0-endpoint {
 				};
 
-				ipu2_di0_hdmi: endpoint@1 {
+				ipu2_di0_hdmi: hdmi-endpoint {
 					remote-endpoint = <&hdmi_mux_2>;
 				};
 
-				ipu2_di0_mipi: endpoint@2 {
+				ipu2_di0_mipi: mipi-endpoint {
 					remote-endpoint = <&mipi_mux_2>;
 				};
 
-				ipu2_di0_lvds0: endpoint@3 {
+				ipu2_di0_lvds0: lvds0-endpoint {
 					remote-endpoint = <&lvds0_mux_2>;
 				};
 
-				ipu2_di0_lvds1: endpoint@4 {
+				ipu2_di0_lvds1: lvds1-endpoint {
 					remote-endpoint = <&lvds1_mux_2>;
 				};
 			};
@@ -179,19 +179,19 @@
 				#size-cells = <0>;
 				reg = <3>;
 
-				ipu2_di1_hdmi: endpoint@1 {
+				ipu2_di1_hdmi: hdmi-endpoint {
 					remote-endpoint = <&hdmi_mux_3>;
 				};
 
-				ipu2_di1_mipi: endpoint@2 {
+				ipu2_di1_mipi: mipi-endpoint {
 					remote-endpoint = <&mipi_mux_3>;
 				};
 
-				ipu2_di1_lvds0: endpoint@3 {
+				ipu2_di1_lvds0: lvds0-endpoint {
 					remote-endpoint = <&lvds0_mux_3>;
 				};
 
-				ipu2_di1_lvds1: endpoint@4 {
+				ipu2_di1_lvds1: lvds1-endpoint {
 					remote-endpoint = <&lvds1_mux_3>;
 				};
 			};
diff --git a/arch/arm/boot/dts/imx6qdl-apalis.dtsi b/arch/arm/boot/dts/imx6qdl-apalis.dtsi
index b33e5a9..922b1dd 100644
--- a/arch/arm/boot/dts/imx6qdl-apalis.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-apalis.dtsi
@@ -324,7 +324,7 @@
 	codec: sgtl5000@0a {
 		compatible = "fsl,sgtl5000";
 		reg = <0x0a>;
-		clocks = <&clks 201>;
+		clocks = <&clks IMX6QDL_CLK_CKO>;
 		VDDA-supply = <&reg_2p5v>;
 		VDDIO-supply = <&reg_3p3v>;
 	};
diff --git a/arch/arm/boot/dts/imx6qdl-apf6dev.dtsi b/arch/arm/boot/dts/imx6qdl-apf6dev.dtsi
index a8f3500..865c9a2 100644
--- a/arch/arm/boot/dts/imx6qdl-apf6dev.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-apf6dev.dtsi
@@ -213,7 +213,7 @@
 	codec: sgtl5000@0a {
 		compatible = "fsl,sgtl5000";
 		reg = <0x0a>;
-		clocks = <&clks 201>;
+		clocks = <&clks IMX6QDL_CLK_CKO>;
 		VDDA-supply = <&reg_3p3v>;
 		VDDIO-supply = <&reg_3p3v>;
 	};
diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
index 8dd74e9..7191b84 100644
--- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
@@ -244,7 +244,7 @@
 	codec: sgtl5000@0a {
 		compatible = "fsl,sgtl5000";
 		reg = <0x0a>;
-		clocks = <&clks 201>;
+		clocks = <&clks IMX6QDL_CLK_CKO>;
 		VDDA-supply = <&reg_1p8v>;
 		VDDIO-supply = <&reg_3p3v>;
 	};
diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
index ec3fe74..40d06b0 100644
--- a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
@@ -237,7 +237,7 @@
 	codec: sgtl5000@0a {
 		compatible = "fsl,sgtl5000";
 		reg = <0x0a>;
-		clocks = <&clks 201>;
+		clocks = <&clks IMX6QDL_CLK_CKO>;
 		VDDA-supply = <&reg_1p8v>;
 		VDDIO-supply = <&reg_3p3v>;
 	};
diff --git a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
index 367cc49..d6dbe2a 100644
--- a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
@@ -328,7 +328,7 @@
 	codec: sgtl5000@0a {
 		compatible = "fsl,sgtl5000";
 		reg = <0x0a>;
-		clocks = <&clks 201>;
+		clocks = <&clks IMX6QDL_CLK_CKO>;
 		VDDA-supply = <&sw4_reg>;
 		VDDIO-supply = <&reg_3p3v>;
 	};
diff --git a/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi b/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi
index 24d7d3f..e456b5c 100644
--- a/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi
@@ -269,7 +269,7 @@
 		pinctrl-names = "default";
 		pinctrl-0 = <&pinctrl_sgtl5000>;
 		reg = <0x0a>;
-		clocks = <&clks 201>;
+		clocks = <&clks IMX6QDL_CLK_CKO>;
 		VDDA-supply = <&reg_2p5v>;
 		VDDIO-supply = <&reg_3p3v>;
 	};
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
index dc74aa3..657da6b 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
@@ -402,7 +402,7 @@
 	codec: sgtl5000@0a {
 		compatible = "fsl,sgtl5000";
 		reg = <0x0a>;
-		clocks = <&clks 201>;
+		clocks = <&clks IMX6QDL_CLK_CKO>;
 		VDDA-supply = <&reg_2p5v>;
 		VDDIO-supply = <&reg_3p3v>;
 	};
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
index c6c590d..73915db 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
@@ -304,7 +304,7 @@
 	codec: sgtl5000@0a {
 		compatible = "fsl,sgtl5000";
 		reg = <0x0a>;
-		clocks = <&clks 201>;
+		clocks = <&clks IMX6QDL_CLK_CKO>;
 		VDDA-supply = <&reg_2p5v>;
 		VDDIO-supply = <&reg_3p3v>;
 	};
diff --git a/arch/arm/boot/dts/imx6qdl-rex.dtsi b/arch/arm/boot/dts/imx6qdl-rex.dtsi
index a503562..cacf593 100644
--- a/arch/arm/boot/dts/imx6qdl-rex.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-rex.dtsi
@@ -126,7 +126,7 @@
 	codec: sgtl5000@0a {
 		compatible = "fsl,sgtl5000";
 		reg = <0x0a>;
-		clocks = <&clks 201>;
+		clocks = <&clks IMX6QDL_CLK_CKO>;
 		VDDA-supply = <&reg_3p3v>;
 		VDDIO-supply = <&reg_3p3v>;
 	};
diff --git a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
index 0f1aca4..c47fe6c 100644
--- a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
@@ -290,7 +290,7 @@
 	codec: sgtl5000@0a {
 		compatible = "fsl,sgtl5000";
 		reg = <0x0a>;
-		clocks = <&clks 201>;
+		clocks = <&clks IMX6QDL_CLK_CKO>;
 		VDDA-supply = <&reg_2p5v>;
 		VDDIO-supply = <&reg_3p3v>;
 	};
diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
index 0b5c4de..5248e7b 100644
--- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
@@ -115,7 +115,7 @@
 		mux-ext-port = <3>;
 	};
 
-	backlight {
+	backlight_lvds: backlight-lvds {
 		compatible = "pwm-backlight";
 		pwms = <&pwm1 0 5000000>;
 		brightness-levels = <0 4 8 16 32 64 128 255>;
@@ -133,6 +133,17 @@
 		        default-state = "on";
 		};
 	};
+
+	panel {
+		compatible = "hannstar,hsd100pxn1";
+		backlight = <&backlight_lvds>;
+
+		port {
+			panel_in: endpoint {
+				remote-endpoint = <&lvds0_out>;
+			};
+		};
+	};
 };
 
 &audmux {
@@ -509,18 +520,11 @@
 		fsl,data-width = <18>;
 		status = "okay";
 
-		display-timings {
-			native-mode = <&timing0>;
-			timing0: hsd100pxn1 {
-				clock-frequency = <65000000>;
-				hactive = <1024>;
-				vactive = <768>;
-				hback-porch = <220>;
-				hfront-porch = <40>;
-				vback-porch = <21>;
-				vfront-porch = <7>;
-				hsync-len = <60>;
-				vsync-len = <10>;
+		port@4 {
+			reg = <4>;
+
+			lvds0_out: endpoint {
+				remote-endpoint = <&panel_in>;
 			};
 		};
 	};
diff --git a/arch/arm/boot/dts/imx6qdl-tx6.dtsi b/arch/arm/boot/dts/imx6qdl-tx6.dtsi
index efd06b5..39b85ae 100644
--- a/arch/arm/boot/dts/imx6qdl-tx6.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-tx6.dtsi
@@ -1,12 +1,42 @@
 /*
- * Copyright 2014 Lothar Waßmann <LW@KARO-electronics.de>
+ * Copyright 2014-2016 Lothar Waßmann <LW@KARO-electronics.de>
  *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 at the following locations:
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
  *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
 #include <dt-bindings/gpio/gpio.h>
@@ -37,6 +67,7 @@
 	clocks {
 		#address-cells = <1>;
 		#size-cells = <0>;
+
 		mclk: clock@0 {
 			compatible = "fixed-clock";
 			reg = <0>;
@@ -61,109 +92,95 @@
 
 		user_led: user {
 			label = "Heartbeat";
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_user_led>;
 			gpios = <&gpio2 20 GPIO_ACTIVE_HIGH>;
 			linux,default-trigger = "heartbeat";
 		};
 	};
 
-	regulators {
-		compatible = "simple-bus";
-		#address-cells = <1>;
-		#size-cells = <0>;
+	reg_3v3_etn: regulator-3v3-etn {
+		compatible = "regulator-fixed";
+		regulator-name = "3V3_ETN";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_etnphy_power>;
+		gpio = <&gpio3 20 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
 
-		reg_3v3_etn: regulator@0 {
-			compatible = "regulator-fixed";
-			reg = <0>;
-			regulator-name = "3V3_ETN";
-			regulator-min-microvolt = <3300000>;
-			regulator-max-microvolt = <3300000>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_etnphy_power>;
-			gpio = <&gpio3 20 GPIO_ACTIVE_HIGH>;
-			enable-active-high;
-		};
+	reg_2v5: regulator-2v5 {
+		compatible = "regulator-fixed";
+		regulator-name = "2V5";
+		regulator-min-microvolt = <2500000>;
+		regulator-max-microvolt = <2500000>;
+		regulator-always-on;
+	};
 
-		reg_2v5: regulator@1 {
-			compatible = "regulator-fixed";
-			reg = <1>;
-			regulator-name = "2V5";
-			regulator-min-microvolt = <2500000>;
-			regulator-max-microvolt = <2500000>;
-			regulator-always-on;
-		};
+	reg_3v3: regulator-3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "3V3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-always-on;
+	};
 
-		reg_3v3: regulator@2 {
-			compatible = "regulator-fixed";
-			reg = <2>;
-			regulator-name = "3V3";
-			regulator-min-microvolt = <3300000>;
-			regulator-max-microvolt = <3300000>;
-			regulator-always-on;
-		};
+	reg_can_xcvr: regulator-can-xcvr {
+		compatible = "regulator-fixed";
+		regulator-name = "CAN XCVR";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_flexcan_xcvr>;
+		gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>;
+		enable-active-low;
+	};
 
-		reg_can_xcvr: regulator@3 {
-			compatible = "regulator-fixed";
-			reg = <3>;
-			regulator-name = "CAN XCVR";
-			regulator-min-microvolt = <3300000>;
-			regulator-max-microvolt = <3300000>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_flexcan_xcvr>;
-			gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>;
-			enable-active-low;
-		};
+	reg_lcd0_pwr: regulator-lcd0-pwr {
+		compatible = "regulator-fixed";
+		regulator-name = "LCD0 POWER";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_lcd0_pwr>;
+		gpio = <&gpio3 29 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+		regulator-boot-on;
+	};
 
-		reg_lcd0_pwr: regulator@4 {
-			compatible = "regulator-fixed";
-			reg = <4>;
-			regulator-name = "LCD0 POWER";
-			regulator-min-microvolt = <3300000>;
-			regulator-max-microvolt = <3300000>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_lcd0_pwr>;
-			gpio = <&gpio3 29 GPIO_ACTIVE_HIGH>;
-			enable-active-high;
-			regulator-boot-on;
-			regulator-always-on;
-		};
+	reg_lcd1_pwr: regulator-lcd1-pwr {
+		compatible = "regulator-fixed";
+		regulator-name = "LCD1 POWER";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_lcd1_pwr>;
+		gpio = <&gpio2 31 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+		regulator-boot-on;
+	};
 
-		reg_lcd1_pwr: regulator@5 {
-			compatible = "regulator-fixed";
-			reg = <5>;
-			regulator-name = "LCD1 POWER";
-			regulator-min-microvolt = <3300000>;
-			regulator-max-microvolt = <3300000>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_lcd1_pwr>;
-			gpio = <&gpio2 31 GPIO_ACTIVE_HIGH>;
-			enable-active-high;
-			regulator-boot-on;
-			regulator-always-on;
-		};
+	reg_usbh1_vbus: regulator-usbh1-vbus {
+		compatible = "regulator-fixed";
+		regulator-name = "usbh1_vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_usbh1_vbus>;
+		gpio = <&gpio3 31 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
 
-		reg_usbh1_vbus: regulator@6 {
-			compatible = "regulator-fixed";
-			reg = <6>;
-			regulator-name = "usbh1_vbus";
-			regulator-min-microvolt = <5000000>;
-			regulator-max-microvolt = <5000000>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_usbh1_vbus>;
-			gpio = <&gpio3 31 GPIO_ACTIVE_HIGH>;
-			enable-active-high;
-		};
-
-		reg_usbotg_vbus: regulator@7 {
-			compatible = "regulator-fixed";
-			reg = <7>;
-			regulator-name = "usbotg_vbus";
-			regulator-min-microvolt = <5000000>;
-			regulator-max-microvolt = <5000000>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_usbotg_vbus>;
-			gpio = <&gpio1 7 GPIO_ACTIVE_HIGH>;
-			enable-active-high;
-		};
+	reg_usbotg_vbus: regulator-usbotg-vbus {
+		compatible = "regulator-fixed";
+		regulator-name = "usbotg_vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_usbotg_vbus>;
+		gpio = <&gpio1 7 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
 	};
 
 	sound {
@@ -209,7 +226,7 @@
 		&gpio2 30 GPIO_ACTIVE_HIGH
 		&gpio3 19 GPIO_ACTIVE_HIGH
 	>;
-	status = "okay";
+	status = "disabled";
 
 	spidev0: spi@0 {
 		compatible = "spidev";
@@ -234,8 +251,22 @@
 	clock-names = "ipg", "ahb", "ptp", "enet_out";
 	phy-mode = "rmii";
 	phy-reset-gpios = <&gpio7 6 GPIO_ACTIVE_HIGH>;
+	phy-handle = <&etnphy>;
 	phy-supply = <&reg_3v3_etn>;
 	status = "okay";
+
+	mdio {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		etnphy: ethernet-phy@0 {
+			compatible = "ethernet-phy-ieee802.3-c22";
+			reg = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_enet_mdio>;
+			interrupts-extended = <&gpio7 1 IRQ_TYPE_EDGE_FALLING>;
+		};
+	};
 };
 
 &gpmi {
@@ -301,310 +332,318 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_hog>;
 
-	imx6qdl-tx6 {
-		pinctrl_hog: hoggrp {
-			fsl,pins = <
-				MX6QDL_PAD_EIM_A18__GPIO2_IO20		0x1b0b1 /* LED */
-				MX6QDL_PAD_SD3_DAT2__GPIO7_IO06		0x1b0b1 /* ETN PHY RESET */
-				MX6QDL_PAD_SD3_DAT4__GPIO7_IO01		0x1b0b1 /* ETN PHY INT */
-				MX6QDL_PAD_EIM_A25__GPIO5_IO02		0x1b0b1 /* PWR BTN */
-			>;
-		};
+	pinctrl_hog: hoggrp {
+		fsl,pins = <
+			MX6QDL_PAD_SD3_DAT2__GPIO7_IO06		0x1b0b1 /* ETN PHY RESET */
+			MX6QDL_PAD_SD3_DAT4__GPIO7_IO01		0x1b0b1 /* ETN PHY INT */
+			MX6QDL_PAD_EIM_A25__GPIO5_IO02		0x1b0b1 /* PWR BTN */
+		>;
+	};
 
-		pinctrl_audmux: audmuxgrp {
-			fsl,pins = <
-				MX6QDL_PAD_KEY_ROW1__AUD5_RXD		0x130b0 /* SSI1_RXD */
-				MX6QDL_PAD_KEY_ROW0__AUD5_TXD		0x110b0 /* SSI1_TXD */
-				MX6QDL_PAD_KEY_COL0__AUD5_TXC		0x130b0 /* SSI1_CLK */
-				MX6QDL_PAD_KEY_COL1__AUD5_TXFS		0x130b0 /* SSI1_FS */
-			>;
-		};
+	pinctrl_audmux: audmuxgrp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_ROW1__AUD5_RXD		0x130b0 /* SSI1_RXD */
+			MX6QDL_PAD_KEY_ROW0__AUD5_TXD		0x110b0 /* SSI1_TXD */
+			MX6QDL_PAD_KEY_COL0__AUD5_TXC		0x130b0 /* SSI1_CLK */
+			MX6QDL_PAD_KEY_COL1__AUD5_TXFS		0x130b0 /* SSI1_FS */
+		>;
+	};
 
-		pinctrl_disp0_1: disp0grp-1 {
-			fsl,pins = <
-				MX6QDL_PAD_DI0_DISP_CLK__IPU1_DI0_DISP_CLK 0x10
-				MX6QDL_PAD_DI0_PIN15__IPU1_DI0_PIN15       0x10
-				MX6QDL_PAD_DI0_PIN2__IPU1_DI0_PIN02        0x10
-				MX6QDL_PAD_DI0_PIN3__IPU1_DI0_PIN03        0x10
-				/* PAD DISP0_DAT0 is used for the Flexcan transceiver control */
-				MX6QDL_PAD_DISP0_DAT1__IPU1_DISP0_DATA01   0x10
-				MX6QDL_PAD_DISP0_DAT2__IPU1_DISP0_DATA02   0x10
-				MX6QDL_PAD_DISP0_DAT3__IPU1_DISP0_DATA03   0x10
-				MX6QDL_PAD_DISP0_DAT4__IPU1_DISP0_DATA04   0x10
-				MX6QDL_PAD_DISP0_DAT5__IPU1_DISP0_DATA05   0x10
-				MX6QDL_PAD_DISP0_DAT6__IPU1_DISP0_DATA06   0x10
-				MX6QDL_PAD_DISP0_DAT7__IPU1_DISP0_DATA07   0x10
-				MX6QDL_PAD_DISP0_DAT8__IPU1_DISP0_DATA08   0x10
-				MX6QDL_PAD_DISP0_DAT9__IPU1_DISP0_DATA09   0x10
-				MX6QDL_PAD_DISP0_DAT10__IPU1_DISP0_DATA10  0x10
-				MX6QDL_PAD_DISP0_DAT11__IPU1_DISP0_DATA11  0x10
-				MX6QDL_PAD_DISP0_DAT12__IPU1_DISP0_DATA12  0x10
-				MX6QDL_PAD_DISP0_DAT13__IPU1_DISP0_DATA13  0x10
-				MX6QDL_PAD_DISP0_DAT14__IPU1_DISP0_DATA14  0x10
-				MX6QDL_PAD_DISP0_DAT15__IPU1_DISP0_DATA15  0x10
-				MX6QDL_PAD_DISP0_DAT16__IPU1_DISP0_DATA16  0x10
-				MX6QDL_PAD_DISP0_DAT17__IPU1_DISP0_DATA17  0x10
-				MX6QDL_PAD_DISP0_DAT18__IPU1_DISP0_DATA18  0x10
-				MX6QDL_PAD_DISP0_DAT19__IPU1_DISP0_DATA19  0x10
-				MX6QDL_PAD_DISP0_DAT20__IPU1_DISP0_DATA20  0x10
-				MX6QDL_PAD_DISP0_DAT21__IPU1_DISP0_DATA21  0x10
-				MX6QDL_PAD_DISP0_DAT22__IPU1_DISP0_DATA22  0x10
-				MX6QDL_PAD_DISP0_DAT23__IPU1_DISP0_DATA23  0x10
-			>;
-		};
+	pinctrl_disp0_1: disp0grp-1 {
+		fsl,pins = <
+			MX6QDL_PAD_DI0_DISP_CLK__IPU1_DI0_DISP_CLK 0x10
+			MX6QDL_PAD_DI0_PIN15__IPU1_DI0_PIN15       0x10
+			MX6QDL_PAD_DI0_PIN2__IPU1_DI0_PIN02        0x10
+			MX6QDL_PAD_DI0_PIN3__IPU1_DI0_PIN03        0x10
+			/* PAD DISP0_DAT0 is used for the Flexcan transceiver control */
+			MX6QDL_PAD_DISP0_DAT1__IPU1_DISP0_DATA01   0x10
+			MX6QDL_PAD_DISP0_DAT2__IPU1_DISP0_DATA02   0x10
+			MX6QDL_PAD_DISP0_DAT3__IPU1_DISP0_DATA03   0x10
+			MX6QDL_PAD_DISP0_DAT4__IPU1_DISP0_DATA04   0x10
+			MX6QDL_PAD_DISP0_DAT5__IPU1_DISP0_DATA05   0x10
+			MX6QDL_PAD_DISP0_DAT6__IPU1_DISP0_DATA06   0x10
+			MX6QDL_PAD_DISP0_DAT7__IPU1_DISP0_DATA07   0x10
+			MX6QDL_PAD_DISP0_DAT8__IPU1_DISP0_DATA08   0x10
+			MX6QDL_PAD_DISP0_DAT9__IPU1_DISP0_DATA09   0x10
+			MX6QDL_PAD_DISP0_DAT10__IPU1_DISP0_DATA10  0x10
+			MX6QDL_PAD_DISP0_DAT11__IPU1_DISP0_DATA11  0x10
+			MX6QDL_PAD_DISP0_DAT12__IPU1_DISP0_DATA12  0x10
+			MX6QDL_PAD_DISP0_DAT13__IPU1_DISP0_DATA13  0x10
+			MX6QDL_PAD_DISP0_DAT14__IPU1_DISP0_DATA14  0x10
+			MX6QDL_PAD_DISP0_DAT15__IPU1_DISP0_DATA15  0x10
+			MX6QDL_PAD_DISP0_DAT16__IPU1_DISP0_DATA16  0x10
+			MX6QDL_PAD_DISP0_DAT17__IPU1_DISP0_DATA17  0x10
+			MX6QDL_PAD_DISP0_DAT18__IPU1_DISP0_DATA18  0x10
+			MX6QDL_PAD_DISP0_DAT19__IPU1_DISP0_DATA19  0x10
+			MX6QDL_PAD_DISP0_DAT20__IPU1_DISP0_DATA20  0x10
+			MX6QDL_PAD_DISP0_DAT21__IPU1_DISP0_DATA21  0x10
+			MX6QDL_PAD_DISP0_DAT22__IPU1_DISP0_DATA22  0x10
+			MX6QDL_PAD_DISP0_DAT23__IPU1_DISP0_DATA23  0x10
+		>;
+	};
 
-		pinctrl_disp0_2: disp0grp-2 {
-			fsl,pins = <
-				MX6QDL_PAD_DI0_DISP_CLK__IPU1_DI0_DISP_CLK 0x10
-				MX6QDL_PAD_DI0_PIN15__IPU1_DI0_PIN15       0x10
-				MX6QDL_PAD_DI0_PIN2__IPU1_DI0_PIN02        0x10
-				MX6QDL_PAD_DI0_PIN3__IPU1_DI0_PIN03        0x10
-				MX6QDL_PAD_DISP0_DAT0__IPU1_DISP0_DATA00   0x10
-				MX6QDL_PAD_DISP0_DAT1__IPU1_DISP0_DATA01   0x10
-				MX6QDL_PAD_DISP0_DAT2__IPU1_DISP0_DATA02   0x10
-				MX6QDL_PAD_DISP0_DAT3__IPU1_DISP0_DATA03   0x10
-				MX6QDL_PAD_DISP0_DAT4__IPU1_DISP0_DATA04   0x10
-				MX6QDL_PAD_DISP0_DAT5__IPU1_DISP0_DATA05   0x10
-				MX6QDL_PAD_DISP0_DAT6__IPU1_DISP0_DATA06   0x10
-				MX6QDL_PAD_DISP0_DAT7__IPU1_DISP0_DATA07   0x10
-				MX6QDL_PAD_DISP0_DAT8__IPU1_DISP0_DATA08   0x10
-				MX6QDL_PAD_DISP0_DAT9__IPU1_DISP0_DATA09   0x10
-				MX6QDL_PAD_DISP0_DAT10__IPU1_DISP0_DATA10  0x10
-				MX6QDL_PAD_DISP0_DAT11__IPU1_DISP0_DATA11  0x10
-				MX6QDL_PAD_DISP0_DAT12__IPU1_DISP0_DATA12  0x10
-				MX6QDL_PAD_DISP0_DAT13__IPU1_DISP0_DATA13  0x10
-				MX6QDL_PAD_DISP0_DAT14__IPU1_DISP0_DATA14  0x10
-				MX6QDL_PAD_DISP0_DAT15__IPU1_DISP0_DATA15  0x10
-				MX6QDL_PAD_DISP0_DAT16__IPU1_DISP0_DATA16  0x10
-				MX6QDL_PAD_DISP0_DAT17__IPU1_DISP0_DATA17  0x10
-				MX6QDL_PAD_DISP0_DAT18__IPU1_DISP0_DATA18  0x10
-				MX6QDL_PAD_DISP0_DAT19__IPU1_DISP0_DATA19  0x10
-				MX6QDL_PAD_DISP0_DAT20__IPU1_DISP0_DATA20  0x10
-				MX6QDL_PAD_DISP0_DAT21__IPU1_DISP0_DATA21  0x10
-				MX6QDL_PAD_DISP0_DAT22__IPU1_DISP0_DATA22  0x10
-				MX6QDL_PAD_DISP0_DAT23__IPU1_DISP0_DATA23  0x10
-			>;
-		};
+	pinctrl_disp0_2: disp0grp-2 {
+		fsl,pins = <
+			MX6QDL_PAD_DI0_DISP_CLK__IPU1_DI0_DISP_CLK 0x10
+			MX6QDL_PAD_DI0_PIN15__IPU1_DI0_PIN15       0x10
+			MX6QDL_PAD_DI0_PIN2__IPU1_DI0_PIN02        0x10
+			MX6QDL_PAD_DI0_PIN3__IPU1_DI0_PIN03        0x10
+			MX6QDL_PAD_DISP0_DAT0__IPU1_DISP0_DATA00   0x10
+			MX6QDL_PAD_DISP0_DAT1__IPU1_DISP0_DATA01   0x10
+			MX6QDL_PAD_DISP0_DAT2__IPU1_DISP0_DATA02   0x10
+			MX6QDL_PAD_DISP0_DAT3__IPU1_DISP0_DATA03   0x10
+			MX6QDL_PAD_DISP0_DAT4__IPU1_DISP0_DATA04   0x10
+			MX6QDL_PAD_DISP0_DAT5__IPU1_DISP0_DATA05   0x10
+			MX6QDL_PAD_DISP0_DAT6__IPU1_DISP0_DATA06   0x10
+			MX6QDL_PAD_DISP0_DAT7__IPU1_DISP0_DATA07   0x10
+			MX6QDL_PAD_DISP0_DAT8__IPU1_DISP0_DATA08   0x10
+			MX6QDL_PAD_DISP0_DAT9__IPU1_DISP0_DATA09   0x10
+			MX6QDL_PAD_DISP0_DAT10__IPU1_DISP0_DATA10  0x10
+			MX6QDL_PAD_DISP0_DAT11__IPU1_DISP0_DATA11  0x10
+			MX6QDL_PAD_DISP0_DAT12__IPU1_DISP0_DATA12  0x10
+			MX6QDL_PAD_DISP0_DAT13__IPU1_DISP0_DATA13  0x10
+			MX6QDL_PAD_DISP0_DAT14__IPU1_DISP0_DATA14  0x10
+			MX6QDL_PAD_DISP0_DAT15__IPU1_DISP0_DATA15  0x10
+			MX6QDL_PAD_DISP0_DAT16__IPU1_DISP0_DATA16  0x10
+			MX6QDL_PAD_DISP0_DAT17__IPU1_DISP0_DATA17  0x10
+			MX6QDL_PAD_DISP0_DAT18__IPU1_DISP0_DATA18  0x10
+			MX6QDL_PAD_DISP0_DAT19__IPU1_DISP0_DATA19  0x10
+			MX6QDL_PAD_DISP0_DAT20__IPU1_DISP0_DATA20  0x10
+			MX6QDL_PAD_DISP0_DAT21__IPU1_DISP0_DATA21  0x10
+			MX6QDL_PAD_DISP0_DAT22__IPU1_DISP0_DATA22  0x10
+			MX6QDL_PAD_DISP0_DAT23__IPU1_DISP0_DATA23  0x10
+		>;
+	};
 
-		pinctrl_ecspi1: ecspi1grp {
-			fsl,pins = <
-				MX6QDL_PAD_EIM_D18__ECSPI1_MOSI		0x0b0b0
-				MX6QDL_PAD_EIM_D17__ECSPI1_MISO		0x0b0b0
-				MX6QDL_PAD_EIM_D16__ECSPI1_SCLK		0x0b0b0
-				MX6QDL_PAD_GPIO_19__ECSPI1_RDY		0x0b0b0
-				MX6QDL_PAD_EIM_EB2__GPIO2_IO30		0x0b0b0 /* SPI CS0 */
-				MX6QDL_PAD_EIM_D19__GPIO3_IO19		0x0b0b0 /* SPI CS1 */
-			>;
-		};
+	pinctrl_ecspi1: ecspi1grp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D18__ECSPI1_MOSI		0x0b0b0
+			MX6QDL_PAD_EIM_D17__ECSPI1_MISO		0x0b0b0
+			MX6QDL_PAD_EIM_D16__ECSPI1_SCLK		0x0b0b0
+			MX6QDL_PAD_GPIO_19__ECSPI1_RDY		0x0b0b0
+			MX6QDL_PAD_EIM_EB2__GPIO2_IO30		0x0b0b0 /* SPI CS0 */
+			MX6QDL_PAD_EIM_D19__GPIO3_IO19		0x0b0b0 /* SPI CS1 */
+		>;
+	};
 
-		pinctrl_edt_ft5x06: edt-ft5x06grp {
-			fsl,pins = <
-				MX6QDL_PAD_NANDF_CS2__GPIO6_IO15	0x1b0b0 /* Interrupt */
-				MX6QDL_PAD_EIM_A16__GPIO2_IO22  	0x1b0b0 /* Reset */
-				MX6QDL_PAD_EIM_A17__GPIO2_IO21  	0x1b0b0 /* Wake */
-			>;
-		};
+	pinctrl_edt_ft5x06: edt-ft5x06grp {
+		fsl,pins = <
+			MX6QDL_PAD_NANDF_CS2__GPIO6_IO15	0x1b0b0 /* Interrupt */
+			MX6QDL_PAD_EIM_A16__GPIO2_IO22  	0x1b0b0 /* Reset */
+			MX6QDL_PAD_EIM_A17__GPIO2_IO21  	0x1b0b0 /* Wake */
+		>;
+	};
 
-		pinctrl_enet: enetgrp {
-			fsl,pins = <
-				MX6QDL_PAD_ENET_MDC__ENET_MDC		0x1b0b0
-				MX6QDL_PAD_ENET_MDIO__ENET_MDIO		0x1b0b0
-				MX6QDL_PAD_ENET_RXD0__ENET_RX_DATA0	0x1b0b0
-				MX6QDL_PAD_ENET_RXD1__ENET_RX_DATA1	0x1b0b0
-				MX6QDL_PAD_ENET_RX_ER__ENET_RX_ER	0x1b0b0
-				MX6QDL_PAD_ENET_TX_EN__ENET_TX_EN	0x1b0b0
-				MX6QDL_PAD_ENET_TXD0__ENET_TX_DATA0	0x1b0b0
-				MX6QDL_PAD_ENET_TXD1__ENET_TX_DATA1	0x1b0b0
-				MX6QDL_PAD_ENET_CRS_DV__ENET_RX_EN	0x1b0b0
-			>;
-		};
+	pinctrl_enet: enetgrp {
+		fsl,pins = <
+			MX6QDL_PAD_ENET_RXD0__ENET_RX_DATA0	0x1b0b0
+			MX6QDL_PAD_ENET_RXD1__ENET_RX_DATA1	0x1b0b0
+			MX6QDL_PAD_ENET_RX_ER__ENET_RX_ER	0x1b0b0
+			MX6QDL_PAD_ENET_TX_EN__ENET_TX_EN	0x1b0b0
+			MX6QDL_PAD_ENET_TXD0__ENET_TX_DATA0	0x1b0b0
+			MX6QDL_PAD_ENET_TXD1__ENET_TX_DATA1	0x1b0b0
+			MX6QDL_PAD_ENET_CRS_DV__ENET_RX_EN	0x1b0b0
+		>;
+	};
 
-		pinctrl_etnphy_power: etnphy-pwrgrp {
-			fsl,pins = <
-				MX6QDL_PAD_EIM_D20__GPIO3_IO20		0x1b0b1 /* ETN PHY POWER */
-			>;
-		};
+	pinctrl_enet_mdio: enet-mdiogrp {
+		fsl,pins = <
+			MX6QDL_PAD_ENET_MDC__ENET_MDC		0x1b0b0
+			MX6QDL_PAD_ENET_MDIO__ENET_MDIO		0x1b0b0
+		>;
+	};
 
-		pinctrl_flexcan1: flexcan1grp {
-			fsl,pins = <
-				MX6QDL_PAD_GPIO_7__FLEXCAN1_TX		0x1b0b0
-				MX6QDL_PAD_GPIO_8__FLEXCAN1_RX		0x1b0b0
-			>;
-		};
+	pinctrl_etnphy_power: etnphy-pwrgrp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D20__GPIO3_IO20		0x1b0b1 /* ETN PHY POWER */
+		>;
+	};
 
-		pinctrl_flexcan2: flexcan2grp {
-			fsl,pins = <
-				MX6QDL_PAD_KEY_COL4__FLEXCAN2_TX	0x1b0b0
-				MX6QDL_PAD_KEY_ROW4__FLEXCAN2_RX	0x1b0b0
-			>;
-		};
+	pinctrl_flexcan1: flexcan1grp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_7__FLEXCAN1_TX		0x1b0b0
+			MX6QDL_PAD_GPIO_8__FLEXCAN1_RX		0x1b0b0
+		>;
+	};
 
-		pinctrl_flexcan_xcvr: flexcan-xcvrgrp {
-			fsl,pins = <
-				MX6QDL_PAD_DISP0_DAT0__GPIO4_IO21	0x1b0b0 /* Flexcan XCVR enable */
-			>;
-		};
+	pinctrl_flexcan2: flexcan2grp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_COL4__FLEXCAN2_TX	0x1b0b0
+			MX6QDL_PAD_KEY_ROW4__FLEXCAN2_RX	0x1b0b0
+		>;
+	};
 
-		pinctrl_gpmi_nand: gpminandgrp {
-			fsl,pins = <
-				MX6QDL_PAD_NANDF_CLE__NAND_CLE    	0x0b0b1
-				MX6QDL_PAD_NANDF_ALE__NAND_ALE    	0x0b0b1
-				MX6QDL_PAD_NANDF_WP_B__NAND_WP_B  	0x0b0b1
-				MX6QDL_PAD_NANDF_RB0__NAND_READY_B	0x0b000
-				MX6QDL_PAD_NANDF_CS0__NAND_CE0_B  	0x0b0b1
-				MX6QDL_PAD_SD4_CMD__NAND_RE_B     	0x0b0b1
-				MX6QDL_PAD_SD4_CLK__NAND_WE_B     	0x0b0b1
-				MX6QDL_PAD_NANDF_D0__NAND_DATA00  	0x0b0b1
-				MX6QDL_PAD_NANDF_D1__NAND_DATA01  	0x0b0b1
-				MX6QDL_PAD_NANDF_D2__NAND_DATA02  	0x0b0b1
-				MX6QDL_PAD_NANDF_D3__NAND_DATA03  	0x0b0b1
-				MX6QDL_PAD_NANDF_D4__NAND_DATA04  	0x0b0b1
-				MX6QDL_PAD_NANDF_D5__NAND_DATA05  	0x0b0b1
-				MX6QDL_PAD_NANDF_D6__NAND_DATA06  	0x0b0b1
-				MX6QDL_PAD_NANDF_D7__NAND_DATA07  	0x0b0b1
-			>;
-		};
+	pinctrl_flexcan_xcvr: flexcan-xcvrgrp {
+		fsl,pins = <
+			MX6QDL_PAD_DISP0_DAT0__GPIO4_IO21	0x1b0b0 /* Flexcan XCVR enable */
+		>;
+	};
 
-		pinctrl_i2c1: i2c1grp {
-			fsl,pins = <
-				MX6QDL_PAD_EIM_D21__I2C1_SCL		0x4001b8b1
-				MX6QDL_PAD_EIM_D28__I2C1_SDA		0x4001b8b1
-			>;
-		};
+	pinctrl_gpmi_nand: gpminandgrp {
+		fsl,pins = <
+			MX6QDL_PAD_NANDF_CLE__NAND_CLE    	0x0b0b1
+			MX6QDL_PAD_NANDF_ALE__NAND_ALE    	0x0b0b1
+			MX6QDL_PAD_NANDF_WP_B__NAND_WP_B  	0x0b0b1
+			MX6QDL_PAD_NANDF_RB0__NAND_READY_B	0x0b000
+			MX6QDL_PAD_NANDF_CS0__NAND_CE0_B  	0x0b0b1
+			MX6QDL_PAD_SD4_CMD__NAND_RE_B     	0x0b0b1
+			MX6QDL_PAD_SD4_CLK__NAND_WE_B     	0x0b0b1
+			MX6QDL_PAD_NANDF_D0__NAND_DATA00  	0x0b0b1
+			MX6QDL_PAD_NANDF_D1__NAND_DATA01  	0x0b0b1
+			MX6QDL_PAD_NANDF_D2__NAND_DATA02  	0x0b0b1
+			MX6QDL_PAD_NANDF_D3__NAND_DATA03  	0x0b0b1
+			MX6QDL_PAD_NANDF_D4__NAND_DATA04  	0x0b0b1
+			MX6QDL_PAD_NANDF_D5__NAND_DATA05  	0x0b0b1
+			MX6QDL_PAD_NANDF_D6__NAND_DATA06  	0x0b0b1
+			MX6QDL_PAD_NANDF_D7__NAND_DATA07  	0x0b0b1
+		>;
+	};
 
-		pinctrl_i2c3: i2c3grp {
-			fsl,pins = <
-				MX6QDL_PAD_GPIO_3__I2C3_SCL		0x4001b8b1
-				MX6QDL_PAD_GPIO_6__I2C3_SDA		0x4001b8b1
-			>;
-		};
+	pinctrl_i2c1: i2c1grp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D21__I2C1_SCL		0x4001b8b1
+			MX6QDL_PAD_EIM_D28__I2C1_SDA		0x4001b8b1
+		>;
+	};
 
-		pinctrl_kpp: kppgrp {
-			fsl,pins = <
-				MX6QDL_PAD_GPIO_9__KEY_COL6		0x1b0b1
-				MX6QDL_PAD_GPIO_4__KEY_COL7		0x1b0b1
-				MX6QDL_PAD_KEY_COL2__KEY_COL2		0x1b0b1
-				MX6QDL_PAD_KEY_COL3__KEY_COL3		0x1b0b1
-				MX6QDL_PAD_GPIO_2__KEY_ROW6		0x1b0b1
-				MX6QDL_PAD_GPIO_5__KEY_ROW7		0x1b0b1
-				MX6QDL_PAD_KEY_ROW2__KEY_ROW2		0x1b0b1
-				MX6QDL_PAD_KEY_ROW3__KEY_ROW3		0x1b0b1
-			>;
-		};
+	pinctrl_i2c3: i2c3grp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_3__I2C3_SCL		0x4001b8b1
+			MX6QDL_PAD_GPIO_6__I2C3_SDA		0x4001b8b1
+		>;
+	};
 
-		pinctrl_lcd0_pwr: lcd0-pwrgrp {
-			fsl,pins = <
-				MX6QDL_PAD_EIM_D29__GPIO3_IO29		0x1b0b1 /* LCD Reset */
-			>;
-		};
+	pinctrl_kpp: kppgrp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_9__KEY_COL6		0x1b0b1
+			MX6QDL_PAD_GPIO_4__KEY_COL7		0x1b0b1
+			MX6QDL_PAD_KEY_COL2__KEY_COL2		0x1b0b1
+			MX6QDL_PAD_KEY_COL3__KEY_COL3		0x1b0b1
+			MX6QDL_PAD_GPIO_2__KEY_ROW6		0x1b0b1
+			MX6QDL_PAD_GPIO_5__KEY_ROW7		0x1b0b1
+			MX6QDL_PAD_KEY_ROW2__KEY_ROW2		0x1b0b1
+			MX6QDL_PAD_KEY_ROW3__KEY_ROW3		0x1b0b1
+		>;
+	};
 
-		pinctrl_lcd1_pwr: lcd1-pwrgrp {
-			fsl,pins = <
-				MX6QDL_PAD_EIM_EB3__GPIO2_IO31		0x1b0b1 /* LCD Power Enable */
-			>;
-		};
+	pinctrl_lcd0_pwr: lcd0-pwrgrp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D29__GPIO3_IO29		0x1b0b1 /* LCD Reset */
+		>;
+	};
 
-		pinctrl_pwm1: pwm1grp {
-			fsl,pins = <
-				MX6QDL_PAD_GPIO_9__PWM1_OUT		0x1b0b1
-			>;
-		};
+	pinctrl_lcd1_pwr: lcd-pwrgrp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_EB3__GPIO2_IO31		0x1b0b1 /* LCD Power Enable */
+		>;
+	};
 
-		pinctrl_pwm2: pwm2grp {
-			fsl,pins = <
-				MX6QDL_PAD_GPIO_1__PWM2_OUT		0x1b0b1
-			>;
-		};
+	pinctrl_pwm1: pwm1grp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_9__PWM1_OUT		0x1b0b1
+		>;
+	};
 
-		pinctrl_tsc2007: tsc2007grp {
-			fsl,pins = <
-				MX6QDL_PAD_EIM_D26__GPIO3_IO26		0x1b0b0 /* Interrupt */
-			>;
-		};
+	pinctrl_pwm2: pwm2grp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_1__PWM2_OUT		0x1b0b1
+		>;
+	};
 
-		pinctrl_uart1: uart1grp {
-			fsl,pins = <
-				MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA	0x1b0b1
-				MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA	0x1b0b1
-			>;
-		};
+	pinctrl_tsc2007: tsc2007grp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D26__GPIO3_IO26		0x1b0b0 /* Interrupt */
+		>;
+	};
 
-		pinctrl_uart1_rtscts: uart1_rtsctsgrp {
-			fsl,pins = <
-				MX6QDL_PAD_SD3_DAT1__UART1_RTS_B	0x1b0b1
-				MX6QDL_PAD_SD3_DAT0__UART1_CTS_B	0x1b0b1
-			>;
-		};
+	pinctrl_uart1: uart1grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA	0x1b0b1
+			MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA	0x1b0b1
+		>;
+	};
 
-		pinctrl_uart2: uart2grp {
-			fsl,pins = <
-				MX6QDL_PAD_SD4_DAT7__UART2_TX_DATA	0x1b0b1
-				MX6QDL_PAD_SD4_DAT4__UART2_RX_DATA	0x1b0b1
-			>;
-		};
+	pinctrl_uart1_rtscts: uart1_rtsctsgrp {
+		fsl,pins = <
+			MX6QDL_PAD_SD3_DAT1__UART1_RTS_B	0x1b0b1
+			MX6QDL_PAD_SD3_DAT0__UART1_CTS_B	0x1b0b1
+		>;
+	};
 
-		pinctrl_uart2_rtscts: uart2_rtsctsgrp {
-			fsl,pins = <
-				MX6QDL_PAD_SD4_DAT5__UART2_RTS_B	0x1b0b1
-				MX6QDL_PAD_SD4_DAT6__UART2_CTS_B	0x1b0b1
-			>;
-		};
+	pinctrl_uart2: uart2grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD4_DAT7__UART2_TX_DATA	0x1b0b1
+			MX6QDL_PAD_SD4_DAT4__UART2_RX_DATA	0x1b0b1
+		>;
+	};
 
-		pinctrl_uart3: uart3grp {
-			fsl,pins = <
-				MX6QDL_PAD_EIM_D24__UART3_TX_DATA	0x1b0b1
-				MX6QDL_PAD_EIM_D25__UART3_RX_DATA	0x1b0b1
-			>;
-		};
+	pinctrl_uart2_rtscts: uart2_rtsctsgrp {
+		fsl,pins = <
+			MX6QDL_PAD_SD4_DAT5__UART2_RTS_B	0x1b0b1
+			MX6QDL_PAD_SD4_DAT6__UART2_CTS_B	0x1b0b1
+		>;
+	};
 
-		pinctrl_uart3_rtscts: uart3_rtsctsgrp {
-			fsl,pins = <
-				MX6QDL_PAD_SD3_DAT3__UART3_CTS_B	0x1b0b1
-				MX6QDL_PAD_SD3_RST__UART3_RTS_B		0x1b0b1
-			>;
-		};
+	pinctrl_uart3: uart3grp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D24__UART3_TX_DATA	0x1b0b1
+			MX6QDL_PAD_EIM_D25__UART3_RX_DATA	0x1b0b1
+		>;
+	};
 
-		pinctrl_usbh1_vbus: usbh1-vbusgrp {
-			fsl,pins = <
-				MX6QDL_PAD_EIM_D31__GPIO3_IO31		0x1b0b0 /* USBH1_VBUSEN */
-			>;
-		};
+	pinctrl_uart3_rtscts: uart3_rtsctsgrp {
+		fsl,pins = <
+			MX6QDL_PAD_SD3_DAT3__UART3_CTS_B	0x1b0b1
+			MX6QDL_PAD_SD3_RST__UART3_RTS_B		0x1b0b1
+		>;
+	};
 
-		pinctrl_usbotg: usbotggrp {
-			fsl,pins = <
-				MX6QDL_PAD_EIM_D23__GPIO3_IO23		0x17059
-			>;
-		};
+	pinctrl_usbh1_vbus: usbh1-vbusgrp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D31__GPIO3_IO31		0x1b0b0 /* USBH1_VBUSEN */
+		>;
+	};
 
-		pinctrl_usbotg_vbus: usbotg-vbusgrp {
-			fsl,pins = <
-				MX6QDL_PAD_GPIO_7__GPIO1_IO07		0x1b0b0 /* USBOTG_VBUSEN */
-			>;
-		};
+	pinctrl_usbotg: usbotggrp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D23__GPIO3_IO23		0x17059
+		>;
+	};
 
-		pinctrl_usdhc1: usdhc1grp {
-			fsl,pins = <
-				MX6QDL_PAD_SD1_CMD__SD1_CMD		0x070b1
-				MX6QDL_PAD_SD1_CLK__SD1_CLK		0x070b1
-				MX6QDL_PAD_SD1_DAT0__SD1_DATA0		0x070b1
-				MX6QDL_PAD_SD1_DAT1__SD1_DATA1		0x070b1
-				MX6QDL_PAD_SD1_DAT2__SD1_DATA2		0x070b1
-				MX6QDL_PAD_SD1_DAT3__SD1_DATA3		0x070b1
-				MX6QDL_PAD_SD3_CMD__GPIO7_IO02		0x170b0 /* SD1 CD */
-			>;
-		};
+	pinctrl_usbotg_vbus: usbotg-vbusgrp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_7__GPIO1_IO07		0x1b0b0 /* USBOTG_VBUSEN */
+		>;
+	};
 
-		pinctrl_usdhc2: usdhc2grp {
-			fsl,pins = <
-				MX6QDL_PAD_SD2_CMD__SD2_CMD		0x070b1
-				MX6QDL_PAD_SD2_CLK__SD2_CLK		0x070b1
-				MX6QDL_PAD_SD2_DAT0__SD2_DATA0		0x070b1
-				MX6QDL_PAD_SD2_DAT1__SD2_DATA1		0x070b1
-				MX6QDL_PAD_SD2_DAT2__SD2_DATA2		0x070b1
-				MX6QDL_PAD_SD2_DAT3__SD2_DATA3		0x070b1
-				MX6QDL_PAD_SD3_CLK__GPIO7_IO03		0x170b0 /* SD2 CD */
-			>;
-		};
+	pinctrl_usdhc1: usdhc1grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD1_CMD__SD1_CMD		0x070b1
+			MX6QDL_PAD_SD1_CLK__SD1_CLK		0x070b1
+			MX6QDL_PAD_SD1_DAT0__SD1_DATA0		0x070b1
+			MX6QDL_PAD_SD1_DAT1__SD1_DATA1		0x070b1
+			MX6QDL_PAD_SD1_DAT2__SD1_DATA2		0x070b1
+			MX6QDL_PAD_SD1_DAT3__SD1_DATA3		0x070b1
+			MX6QDL_PAD_SD3_CMD__GPIO7_IO02		0x170b0 /* SD1 CD */
+		>;
+	};
+
+	pinctrl_usdhc2: usdhc2grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD2_CMD__SD2_CMD		0x070b1
+			MX6QDL_PAD_SD2_CLK__SD2_CLK		0x070b1
+			MX6QDL_PAD_SD2_DAT0__SD2_DATA0		0x070b1
+			MX6QDL_PAD_SD2_DAT1__SD2_DATA1		0x070b1
+			MX6QDL_PAD_SD2_DAT2__SD2_DATA2		0x070b1
+			MX6QDL_PAD_SD2_DAT3__SD2_DATA3		0x070b1
+			MX6QDL_PAD_SD3_CLK__GPIO7_IO03		0x170b0 /* SD2 CD */
+		>;
+	};
+
+	pinctrl_user_led: user-ledgrp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_A18__GPIO2_IO20		0x1b0b1 /* LED */
+		>;
 	};
 };
 
@@ -649,19 +688,22 @@
 
 &uart1 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&pinctrl_uart1>;
+	pinctrl-0 = <&pinctrl_uart1 &pinctrl_uart1_rtscts>;
+	fsl,uart-has-rtscts;
 	status = "okay";
 };
 
 &uart2 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_uart2 &pinctrl_uart2_rtscts>;
+	fsl,uart-has-rtscts;
 	status = "okay";
 };
 
 &uart3 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_uart3 &pinctrl_uart3_rtscts>;
+	fsl,uart-has-rtscts;
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/imx6qdl-udoo.dtsi b/arch/arm/boot/dts/imx6qdl-udoo.dtsi
index d3e54e4..3bee2f9 100644
--- a/arch/arm/boot/dts/imx6qdl-udoo.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-udoo.dtsi
@@ -10,14 +10,49 @@
  */
 
 / {
+	aliases {
+		backlight = &backlight;
+		panelchan = &panelchan;
+		panel7 = &panel7;
+		touchscreenp7 = &touchscreenp7;
+	};
+
 	chosen {
 		stdout-path = &uart2;
 	};
 
+	backlight: backlight {
+		compatible = "gpio-backlight";
+		gpios = <&gpio1 4 0>;
+		default-on;
+		status = "disabled";
+	};
+
 	memory {
 		reg = <0x10000000 0x40000000>;
 	};
 
+	panel7: panel7 {
+		/*
+		 * in reality it is a -20t (parallel) model,
+		 * but with LVDS bridge chip attached,
+		 * so it is equivalent to -19t model in drive
+		 * characteristics
+		 */
+		compatible = "urt,umsh-8596md-19t";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_panel>;
+		power-supply = <&reg_panel>;
+		backlight = <&backlight>;
+		status = "disabled";
+
+		port {
+			panel_in: endpoint {
+				remote-endpoint = <&lvds0_out>;
+			};
+		};
+	};
+
 	regulators {
 		compatible = "simple-bus";
 		#address-cells = <1>;
@@ -33,6 +68,14 @@
 			startup-delay-us = <2>; /* USB2415 requires a POR of 1 us minimum */
 			gpio = <&gpio7 12 0>;
 		};
+
+		reg_panel: regulator@1 {
+			compatible = "regulator-fixed";
+			reg = <1>;
+			regulator-name = "lcd_panel";
+			enable-active-high;
+			gpio = <&gpio1 2 0>;
+		};
 	};
 
 	sound {
@@ -67,6 +110,24 @@
 	status = "okay";
 };
 
+&i2c3 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c3>;
+	status = "okay";
+
+	touchscreenp7: touchscreenp7@55 {
+		compatible = "sitronix,st1232";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_touchscreenp7>;
+		reg = <0x55>;
+		interrupt-parent = <&gpio1>;
+		interrupts = <13 8>;
+		gpios = <&gpio1 15 0>;
+		status = "disabled";
+	};
+};
+
 &iomuxc {
 	imx6q-udoo {
 		pinctrl_enet: enetgrp {
@@ -97,6 +158,27 @@
 			>;
 		};
 
+		pinctrl_i2c3: i2c3grp {
+			fsl,pins = <
+				MX6QDL_PAD_GPIO_5__I2C3_SCL		0x4001f8b1
+				MX6QDL_PAD_GPIO_6__I2C3_SDA		0x4001f8b1
+			>;
+		};
+
+		pinctrl_panel: panelgrp {
+			fsl,pins = <
+				MX6QDL_PAD_GPIO_2__GPIO1_IO02		0x70
+				MX6QDL_PAD_GPIO_4__GPIO1_IO04		0x70
+			>;
+		};
+
+		pinctrl_touchscreenp7: touchscreenp7grp {
+			fsl,pins = <
+				MX6QDL_PAD_SD2_DAT0__GPIO1_IO15		0x70
+				MX6QDL_PAD_SD2_DAT2__GPIO1_IO13		0x1b0b0
+			>;
+		};
+
 		pinctrl_uart2: uart2grp {
 			fsl,pins = <
 				MX6QDL_PAD_EIM_D26__UART2_TX_DATA	0x1b0b1
@@ -154,6 +236,20 @@
 	};
 };
 
+&ldb {
+	status = "okay";
+
+	panelchan: lvds-channel@0 {
+		port@4 {
+			reg = <4>;
+
+			lvds0_out: endpoint {
+				remote-endpoint = <&panel_in>;
+			};
+		};
+	};
+};
+
 &uart2 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_uart2>;
@@ -164,7 +260,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_usbh>;
 	vbus-supply = <&reg_usb_h1_vbus>;
-	clocks = <&clks 201>;
+	clocks = <&clks IMX6QDL_CLK_CKO>;
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
index 9e096d8..8e7c40e 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
@@ -85,7 +85,7 @@
 	codec: sgtl5000@0a {
 		compatible = "fsl,sgtl5000";
 		reg = <0x0a>;
-		clocks = <&clks 201>;
+		clocks = <&clks IMX6QDL_CLK_CKO>;
 		VDDA-supply = <&reg_2p5v>;
 		VDDIO-supply = <&reg_3p3v>;
 	};
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index b42822a..ed613eb 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -621,7 +621,7 @@
 					     <0 54 IRQ_TYPE_LEVEL_HIGH>,
 					     <0 127 IRQ_TYPE_LEVEL_HIGH>;
 
-				regulator-1p1@110 {
+				regulator-1p1 {
 					compatible = "fsl,anatop-regulator";
 					regulator-name = "vdd1p1";
 					regulator-min-microvolt = <800000>;
@@ -635,7 +635,7 @@
 					anatop-max-voltage = <1375000>;
 				};
 
-				regulator-3p0@120 {
+				regulator-3p0 {
 					compatible = "fsl,anatop-regulator";
 					regulator-name = "vdd3p0";
 					regulator-min-microvolt = <2800000>;
@@ -649,7 +649,7 @@
 					anatop-max-voltage = <3400000>;
 				};
 
-				regulator-2p5@130 {
+				regulator-2p5 {
 					compatible = "fsl,anatop-regulator";
 					regulator-name = "vdd2p5";
 					regulator-min-microvolt = <2000000>;
@@ -663,7 +663,7 @@
 					anatop-max-voltage = <2750000>;
 				};
 
-				reg_arm: regulator-vddcore@140 {
+				reg_arm: regulator-vddcore {
 					compatible = "fsl,anatop-regulator";
 					regulator-name = "vddarm";
 					regulator-min-microvolt = <725000>;
@@ -680,7 +680,7 @@
 					anatop-max-voltage = <1450000>;
 				};
 
-				reg_pu: regulator-vddpu@140 {
+				reg_pu: regulator-vddpu {
 					compatible = "fsl,anatop-regulator";
 					regulator-name = "vddpu";
 					regulator-min-microvolt = <725000>;
@@ -697,7 +697,7 @@
 					anatop-max-voltage = <1450000>;
 				};
 
-				reg_soc: regulator-vddsoc@140 {
+				reg_soc: regulator-vddsoc {
 					compatible = "fsl,anatop-regulator";
 					regulator-name = "vddsoc";
 					regulator-min-microvolt = <725000>;
@@ -1230,22 +1230,22 @@
 				#size-cells = <0>;
 				reg = <2>;
 
-				ipu1_di0_disp0: endpoint@0 {
+				ipu1_di0_disp0: disp0-endpoint {
 				};
 
-				ipu1_di0_hdmi: endpoint@1 {
+				ipu1_di0_hdmi: hdmi-endpoint {
 					remote-endpoint = <&hdmi_mux_0>;
 				};
 
-				ipu1_di0_mipi: endpoint@2 {
+				ipu1_di0_mipi: mipi-endpoint {
 					remote-endpoint = <&mipi_mux_0>;
 				};
 
-				ipu1_di0_lvds0: endpoint@3 {
+				ipu1_di0_lvds0: lvds0-endpoint {
 					remote-endpoint = <&lvds0_mux_0>;
 				};
 
-				ipu1_di0_lvds1: endpoint@4 {
+				ipu1_di0_lvds1: lvds1-endpoint {
 					remote-endpoint = <&lvds1_mux_0>;
 				};
 			};
@@ -1255,22 +1255,22 @@
 				#size-cells = <0>;
 				reg = <3>;
 
-				ipu1_di0_disp1: endpoint@0 {
+				ipu1_di0_disp1: disp1-endpoint {
 				};
 
-				ipu1_di1_hdmi: endpoint@1 {
+				ipu1_di1_hdmi: hdmi-endpoint {
 					remote-endpoint = <&hdmi_mux_1>;
 				};
 
-				ipu1_di1_mipi: endpoint@2 {
+				ipu1_di1_mipi: mipi-endpoint {
 					remote-endpoint = <&mipi_mux_1>;
 				};
 
-				ipu1_di1_lvds0: endpoint@3 {
+				ipu1_di1_lvds0: lvds0-endpoint {
 					remote-endpoint = <&lvds0_mux_1>;
 				};
 
-				ipu1_di1_lvds1: endpoint@4 {
+				ipu1_di1_lvds1: lvds1-endpoint {
 					remote-endpoint = <&lvds1_mux_1>;
 				};
 			};
diff --git a/arch/arm/boot/dts/imx6qp-nitrogen6_max.dts b/arch/arm/boot/dts/imx6qp-nitrogen6_max.dts
new file mode 100644
index 0000000..a39b860
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qp-nitrogen6_max.dts
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2016 Boundary Devices, Inc.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "imx6qp.dtsi"
+#include "imx6qdl-nitrogen6_max.dtsi"
+
+/ {
+	model = "Boundary Devices i.MX6 Quad Plus Nitrogen6_MAX Board";
+	compatible = "boundary,imx6qp-nitrogen6_max", "fsl,imx6qp";
+};
+
+&pcie {
+	status = "disabled";
+};
+
+&sata {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6qp.dtsi b/arch/arm/boot/dts/imx6qp.dtsi
index 1ada714..886dbf2 100644
--- a/arch/arm/boot/dts/imx6qp.dtsi
+++ b/arch/arm/boot/dts/imx6qp.dtsi
@@ -82,5 +82,8 @@
 				      "ldb_di0", "ldb_di1", "prg";
 		};
 
+		pcie: pcie@0x01000000 {
+			compatible = "fsl,imx6qp-pcie", "snps,dw-pcie";
+		};
 	};
 };
diff --git a/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts b/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts
new file mode 100644
index 0000000..ba62348
--- /dev/null
+++ b/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts
@@ -0,0 +1,709 @@
+/*
+ * Copyright (C) 2016 Boundary Devices, Inc.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "imx6sx.dtsi"
+
+/ {
+	model = "Boundary Devices i.MX6 SoloX Nitrogen6sx Board";
+	compatible = "boundary,imx6sx-nitrogen6sx", "fsl,imx6sx";
+
+	aliases {
+		fb_lcd = &lcdif1;
+		t_lcd = &t_lcd;
+	};
+
+	memory {
+		reg = <0x80000000 0x40000000>;
+	};
+
+	backlight-lvds {
+		compatible = "pwm-backlight";
+		pwms = <&pwm4 0 5000000>;
+		brightness-levels = <0 4 8 16 32 64 128 255>;
+		default-brightness-level = <6>;
+		power-supply = <&reg_3p3v>;
+	};
+
+	reg_1p8v: regulator-1p8v {
+		compatible = "regulator-fixed";
+		regulator-name = "1P8V";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		regulator-always-on;
+	};
+
+	reg_3p3v: regulator-3p3v {
+		compatible = "regulator-fixed";
+		regulator-name = "3P3V";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-always-on;
+	};
+
+	reg_can1_3v3: regulator-can1-3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "can1-3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		gpio = <&gpio4 27 GPIO_ACTIVE_LOW>;
+	};
+
+	reg_can2_3v3: regulator-can2-3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "can2-3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		gpio = <&gpio4 24 GPIO_ACTIVE_LOW>;
+	};
+
+	reg_usb_otg1_vbus: regulator-usb-otg1-vbus {
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_usbotg1_vbus>;
+		compatible = "regulator-fixed";
+		regulator-name = "usb_otg1_vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		gpio = <&gpio1 9 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	reg_wlan: regulator-wlan {
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_reg_wlan>;
+		compatible = "regulator-fixed";
+		clocks = <&clks IMX6SX_CLK_CKO>;
+		clock-names = "slow";
+		regulator-name = "wlan-en";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		startup-delay-us = <70000>;
+		gpio = <&gpio7 6 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	sound {
+		compatible = "fsl,imx-audio-sgtl5000";
+		model = "imx6sx-nitrogen6sx-sgtl5000";
+		cpu-dai = <&ssi1>;
+		audio-codec = <&codec>;
+		audio-routing =
+			"MIC_IN", "Mic Jack",
+			"Mic Jack", "Mic Bias",
+			"Headphone Jack", "HP_OUT";
+		mux-int-port = <1>;
+		mux-ext-port = <5>;
+	};
+};
+
+&audmux {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_audmux>;
+	status = "okay";
+};
+
+&ecspi1 {
+	fsl,spi-num-chipselects = <1>;
+	cs-gpios = <&gpio2 16 GPIO_ACTIVE_LOW>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_ecspi1>;
+	status = "okay";
+
+	flash: m25p80@0 {
+		compatible = "microchip,sst25vf016b";
+		spi-max-frequency = <20000000>;
+		reg = <0>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		partition@0 {
+			label = "U-Boot";
+			reg = <0x0 0xc0000>;
+			read-only;
+		};
+
+		partition@c0000 {
+			label = "env";
+			reg = <0xc0000 0x2000>;
+			read-only;
+		};
+
+		partition@c2000 {
+			label = "Kernel";
+			reg = <0xc2000 0x11e000>;
+		};
+
+		partition@1e0000 {
+			label = "M4";
+			reg = <0x1e0000 0x20000>;
+		};
+	};
+};
+
+&fec1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_enet1>;
+	phy-mode = "rgmii";
+	phy-handle = <&ethphy1>;
+	phy-supply = <&reg_3p3v>;
+	fsl,magic-packet;
+	status = "okay";
+
+	mdio {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		ethphy1: ethernet-phy@4 {
+			reg = <4>;
+		};
+
+		ethphy2: ethernet-phy@5 {
+			reg = <5>;
+		};
+	};
+};
+
+&fec2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_enet2>;
+	phy-mode = "rgmii";
+	phy-handle = <&ethphy2>;
+	phy-supply = <&reg_3p3v>;
+	fsl,magic-packet;
+	status = "okay";
+};
+
+&flexcan1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_flexcan1>;
+	xceiver-supply = <&reg_can1_3v3>;
+	status = "okay";
+};
+
+&flexcan2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_flexcan2>;
+	xceiver-supply = <&reg_can2_3v3>;
+	status = "okay";
+};
+
+&i2c1 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c1>;
+	status = "okay";
+
+	codec: sgtl5000@0a {
+		compatible = "fsl,sgtl5000";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_sgtl5000>;
+		reg = <0x0a>;
+		clocks = <&clks IMX6SX_CLK_CKO2>;
+		VDDA-supply = <&reg_1p8v>;
+		VDDIO-supply = <&reg_1p8v>;
+		VDDD-supply = <&reg_1p8v>;
+		assigned-clocks = <&clks IMX6SX_CLK_CKO2_SEL>,
+				  <&clks IMX6SX_CLK_CKO2>;
+		assigned-clock-parents = <&clks IMX6SX_CLK_OSC>;
+		assigned-clock-rates = <0>, <24000000>;
+	};
+};
+
+&i2c2 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c2>;
+	status = "okay";
+};
+
+&i2c3 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c3>;
+	status = "okay";
+};
+
+&lcdif1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_lcdif1>;
+	lcd-supply = <&reg_3p3v>;
+	display = <&display0>;
+	status = "okay";
+
+	display0: display0 {
+		bits-per-pixel = <16>;
+		bus-width = <24>;
+
+		display-timings {
+			native-mode = <&t_lcd>;
+			t_lcd: t_lcd_default {
+				clock-frequency = <74160000>;
+				hactive = <1280>;
+				vactive = <720>;
+				hback-porch = <220>;
+				hfront-porch = <110>;
+				vback-porch = <20>;
+				vfront-porch = <5>;
+				hsync-len = <40>;
+				vsync-len = <5>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+		};
+	};
+};
+
+&pcie {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pcie>;
+	reset-gpio = <&gpio4 10 GPIO_ACTIVE_HIGH>;
+	status = "okay";
+};
+
+&pwm4 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm4>;
+	status = "okay";
+};
+
+&ssi1 {
+	status = "okay";
+};
+
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart1>;
+	status = "okay";
+};
+
+&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart2>;
+	status = "okay";
+};
+
+&uart3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart3>;
+	fsl,uart-has-rtscts;
+	status = "okay";
+};
+
+&uart5 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart5>;
+	status = "okay";
+};
+
+&usbotg1 {
+	vbus-supply = <&reg_usb_otg1_vbus>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usbotg1>;
+	status = "okay";
+};
+
+&usbotg2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usbotg2>;
+	dr_mode = "host";
+	disable-over-current;
+	reset-gpios = <&gpio4 26 GPIO_ACTIVE_LOW>;
+	status = "okay";
+};
+
+&usdhc2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc2>;
+	bus-width = <4>;
+	cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
+	keep-power-in-suspend;
+	wakeup-source;
+	status = "okay";
+};
+
+&usdhc3 {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc3>;
+	bus-width = <4>;
+	non-removable;
+	keep-power-in-suspend;
+	vmmc-supply = <&reg_wlan>;
+	cap-power-off-card;
+	cap-sdio-irq;
+	status = "okay";
+
+	brcmf: bcrmf@1 {
+		reg = <1>;
+		compatible = "brcm,bcm4329-fmac";
+		interrupt-parent = <&gpio7>;
+		interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+	};
+
+	wlcore: wlcore@2 {
+		compatible = "ti,wl1271";
+		reg = <2>;
+		interrupt-parent = <&gpio7>;
+		interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+		ref-clock-frequency = <38400000>;
+	};
+};
+
+&usdhc4 {
+	pinctrl-names = "default", "state_100mhz", "state_200mhz";
+	pinctrl-0 = <&pinctrl_usdhc4_50mhz>;
+	pinctrl-1 = <&pinctrl_usdhc4_100mhz>;
+	pinctrl-2 = <&pinctrl_usdhc4_200mhz>;
+	bus-width = <8>;
+	non-removable;
+	vmmc-supply = <&reg_1p8v>;
+	keep-power-in-suspend;
+	status = "okay";
+};
+
+&iomuxc {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_hog>;
+
+	pinctrl_audmux: audmuxgrp {
+		fsl,pins = <
+			MX6SX_PAD_SD1_DATA0__AUDMUX_AUD5_RXD	0x1b0b0
+			MX6SX_PAD_SD1_DATA1__AUDMUX_AUD5_TXC	0x1b0b0
+			MX6SX_PAD_SD1_DATA2__AUDMUX_AUD5_TXFS	0x1b0b0
+			MX6SX_PAD_SD1_DATA3__AUDMUX_AUD5_TXD	0x1b0b0
+		>;
+	};
+
+	pinctrl_ecspi1: ecspi1grp {
+		fsl,pins = <
+			MX6SX_PAD_KEY_COL1__ECSPI1_MISO		0x100b1
+			MX6SX_PAD_KEY_ROW0__ECSPI1_MOSI		0x100b1
+			MX6SX_PAD_KEY_COL0__ECSPI1_SCLK		0x100b1
+			MX6SX_PAD_KEY_ROW1__GPIO2_IO_16		0x0b0b1
+		>;
+	};
+
+	pinctrl_enet1: enet1grp {
+		fsl,pins = <
+			MX6SX_PAD_ENET1_MDIO__ENET1_MDIO	0x1b0b0
+			MX6SX_PAD_ENET1_MDC__ENET1_MDC		0x1b0b0
+			MX6SX_PAD_RGMII1_TD0__ENET1_TX_DATA_0	0x30b1
+			MX6SX_PAD_RGMII1_TD1__ENET1_TX_DATA_1	0x30b1
+			MX6SX_PAD_RGMII1_TD2__ENET1_TX_DATA_2	0x30b1
+			MX6SX_PAD_RGMII1_TD3__ENET1_TX_DATA_3	0x30b1
+			MX6SX_PAD_RGMII1_TXC__ENET1_RGMII_TXC	0x30b1
+			MX6SX_PAD_RGMII1_TX_CTL__ENET1_TX_EN	0x30b1
+			MX6SX_PAD_RGMII1_RD0__ENET1_RX_DATA_0	0x3081
+			MX6SX_PAD_RGMII1_RD1__ENET1_RX_DATA_1	0x3081
+			MX6SX_PAD_RGMII1_RX_CTL__ENET1_RX_EN	0x3081
+			MX6SX_PAD_RGMII1_RD2__ENET1_RX_DATA_2	0x3081
+			MX6SX_PAD_RGMII1_RD3__ENET1_RX_DATA_3	0x3081
+			MX6SX_PAD_RGMII1_RXC__ENET1_RX_CLK	0x3081
+			MX6SX_PAD_ENET2_CRS__GPIO2_IO_7		0xb0b0
+			MX6SX_PAD_ENET1_RX_CLK__GPIO2_IO_4	0xb0b0
+			MX6SX_PAD_ENET1_TX_CLK__GPIO2_IO_5	0xb0b0
+		>;
+	};
+
+	pinctrl_enet2: enet2grp {
+		fsl,pins = <
+			MX6SX_PAD_RGMII2_TD0__ENET2_TX_DATA_0	0x30b1
+			MX6SX_PAD_RGMII2_TD1__ENET2_TX_DATA_1	0x30b1
+			MX6SX_PAD_RGMII2_TD2__ENET2_TX_DATA_2	0x30b1
+			MX6SX_PAD_RGMII2_TD3__ENET2_TX_DATA_3	0x30b1
+			MX6SX_PAD_RGMII2_TXC__ENET2_RGMII_TXC	0x30b1
+			MX6SX_PAD_RGMII2_TX_CTL__ENET2_TX_EN	0x30b1
+			MX6SX_PAD_RGMII2_RD0__ENET2_RX_DATA_0	0x3081
+			MX6SX_PAD_RGMII2_RD1__ENET2_RX_DATA_1	0x3081
+			MX6SX_PAD_RGMII2_RX_CTL__ENET2_RX_EN	0x3081
+			MX6SX_PAD_RGMII2_RD2__ENET2_RX_DATA_2	0x3081
+			MX6SX_PAD_RGMII2_RD3__ENET2_RX_DATA_3	0x3081
+			MX6SX_PAD_RGMII2_RXC__ENET2_RX_CLK	0x3081
+			MX6SX_PAD_ENET2_COL__GPIO2_IO_6		0xb0b0
+			MX6SX_PAD_ENET2_RX_CLK__GPIO2_IO_8	0xb0b0
+			MX6SX_PAD_ENET2_TX_CLK__GPIO2_IO_9	0xb0b0
+		>;
+	};
+
+	pinctrl_flexcan1: flexcan1grp {
+		fsl,pins = <
+			MX6SX_PAD_QSPI1B_DQS__CAN1_TX		0x1b0b0
+			MX6SX_PAD_QSPI1A_SS1_B__CAN1_RX		0x1b0b0
+			MX6SX_PAD_QSPI1B_DATA3__GPIO4_IO_27	0x1b0b0
+			MX6SX_PAD_QSPI1B_DATA3__GPIO4_IO_27	0x0b0b0
+		>;
+	};
+
+	pinctrl_flexcan2: flexcan2grp {
+		fsl,pins = <
+			MX6SX_PAD_QSPI1A_DQS__CAN2_TX		0x1b0b0
+			MX6SX_PAD_QSPI1B_SS1_B__CAN2_RX		0x1b0b0
+			MX6SX_PAD_QSPI1B_DATA0__GPIO4_IO_24	0x0b0b0
+		>;
+	};
+
+	pinctrl_hog: hoggrp {
+		fsl,pins = <
+			MX6SX_PAD_NAND_CE0_B__GPIO4_IO_1	0x1b0b0
+			MX6SX_PAD_NAND_CLE__GPIO4_IO_3		0x1b0b0
+			MX6SX_PAD_NAND_RE_B__GPIO4_IO_12	0x1b0b0
+			MX6SX_PAD_NAND_WE_B__GPIO4_IO_14	0x1b0b0
+			MX6SX_PAD_NAND_WP_B__GPIO4_IO_15	0x1b0b0
+			MX6SX_PAD_NAND_READY_B__GPIO4_IO_13	0x1b0b0
+			MX6SX_PAD_QSPI1A_DATA0__GPIO4_IO_16	0x1b0b0
+			MX6SX_PAD_QSPI1A_DATA1__GPIO4_IO_17	0x1b0b0
+			MX6SX_PAD_QSPI1A_DATA2__GPIO4_IO_18	0x1b0b0
+			MX6SX_PAD_QSPI1A_DATA3__GPIO4_IO_19	0x1b0b0
+			MX6SX_PAD_SD1_CMD__CCM_CLKO1		0x000b0
+			MX6SX_PAD_SD3_DATA5__GPIO7_IO_7		0x1b0b0
+			/* Test points */
+			MX6SX_PAD_NAND_DATA04__GPIO4_IO_8	0x1b0b0
+			MX6SX_PAD_QSPI1B_DATA1__GPIO4_IO_25	0x1b0b0
+		>;
+	};
+
+	pinctrl_i2c1: i2c1grp {
+		fsl,pins = <
+			MX6SX_PAD_GPIO1_IO00__I2C1_SCL		0x4001b8b1
+			MX6SX_PAD_GPIO1_IO01__I2C1_SDA		0x4001b8b1
+		>;
+	};
+
+	pinctrl_i2c2: i2c2grp {
+		fsl,pins = <
+			MX6SX_PAD_GPIO1_IO02__I2C2_SCL		0x4001b8b1
+			MX6SX_PAD_GPIO1_IO03__I2C2_SDA		0x4001b8b1
+		>;
+	};
+
+	pinctrl_i2c3: i2c3grp {
+		fsl,pins = <
+			MX6SX_PAD_KEY_COL4__I2C3_SCL		0x4001b8b1
+			MX6SX_PAD_KEY_ROW4__I2C3_SDA		0x4001b8b1
+		>;
+	};
+
+	pinctrl_lcdif1: lcdif1grp {
+		fsl,pins = <
+			MX6SX_PAD_LCD1_CLK__LCDIF1_CLK		0x4001b0b0
+			MX6SX_PAD_LCD1_ENABLE__LCDIF1_ENABLE	0x4001b0b0
+			MX6SX_PAD_LCD1_HSYNC__LCDIF1_HSYNC	0x4001b0b0
+			MX6SX_PAD_LCD1_VSYNC__LCDIF1_VSYNC	0x4001b0b0
+			MX6SX_PAD_LCD1_RESET__GPIO3_IO_27	0x4001b0b0
+			MX6SX_PAD_LCD1_DATA00__LCDIF1_DATA_0	0x4001b0b0
+			MX6SX_PAD_LCD1_DATA01__LCDIF1_DATA_1	0x4001b0b0
+			MX6SX_PAD_LCD1_DATA02__LCDIF1_DATA_2	0x4001b0b0
+			MX6SX_PAD_LCD1_DATA03__LCDIF1_DATA_3	0x4001b0b0
+			MX6SX_PAD_LCD1_DATA04__LCDIF1_DATA_4	0x4001b0b0
+			MX6SX_PAD_LCD1_DATA05__LCDIF1_DATA_5	0x4001b0b0
+			MX6SX_PAD_LCD1_DATA06__LCDIF1_DATA_6	0x4001b0b0
+			MX6SX_PAD_LCD1_DATA07__LCDIF1_DATA_7	0x4001b0b0
+			MX6SX_PAD_LCD1_DATA08__LCDIF1_DATA_8	0x4001b0b0
+			MX6SX_PAD_LCD1_DATA09__LCDIF1_DATA_9	0x4001b0b0
+			MX6SX_PAD_LCD1_DATA10__LCDIF1_DATA_10	0x4001b0b0
+			MX6SX_PAD_LCD1_DATA11__LCDIF1_DATA_11	0x4001b0b0
+			MX6SX_PAD_LCD1_DATA12__LCDIF1_DATA_12	0x4001b0b0
+			MX6SX_PAD_LCD1_DATA13__LCDIF1_DATA_13	0x4001b0b0
+			MX6SX_PAD_LCD1_DATA14__LCDIF1_DATA_14	0x4001b0b0
+			MX6SX_PAD_LCD1_DATA15__LCDIF1_DATA_15	0x4001b0b0
+			MX6SX_PAD_LCD1_DATA16__LCDIF1_DATA_16	0x4001b0b0
+			MX6SX_PAD_LCD1_DATA17__LCDIF1_DATA_17	0x4001b0b0
+			MX6SX_PAD_LCD1_DATA18__LCDIF1_DATA_18	0x4001b0b0
+			MX6SX_PAD_LCD1_DATA19__LCDIF1_DATA_19	0x4001b0b0
+			MX6SX_PAD_LCD1_DATA20__LCDIF1_DATA_20	0x4001b0b0
+			MX6SX_PAD_LCD1_DATA21__LCDIF1_DATA_21	0x4001b0b0
+			MX6SX_PAD_LCD1_DATA22__LCDIF1_DATA_22	0x4001b0b0
+			MX6SX_PAD_LCD1_DATA23__LCDIF1_DATA_23	0x4001b0b0
+		>;
+	};
+
+	pinctrl_pcie: pciegrp {
+		fsl,pins = <
+			MX6SX_PAD_NAND_DATA05__GPIO4_IO_9	0xb0b0
+			MX6SX_PAD_NAND_DATA06__GPIO4_IO_10	0xb0b0
+			MX6SX_PAD_NAND_DATA07__GPIO4_IO_11	0xb0b0
+		>;
+	};
+
+	pinctrl_pwm4: pwm4grp {
+		fsl,pins = <
+			MX6SX_PAD_GPIO1_IO13__PWM4_OUT		0x110b0
+		>;
+	};
+
+	pinctrl_reg_wlan: reg-wlangrp {
+		fsl,pins = <
+			MX6SX_PAD_SD3_DATA4__GPIO7_IO_6		0x1b0b0
+			MX6SX_PAD_GPIO1_IO11__CCM_CLKO1		0x000b0
+		>;
+	};
+
+	pinctrl_sgtl5000: sgtl5000grp {
+		fsl,pins = <
+			MX6SX_PAD_GPIO1_IO12__CCM_CLKO2		0x000b0
+			MX6SX_PAD_ENET1_COL__GPIO2_IO_0		0x1b0b0
+			MX6SX_PAD_ENET1_CRS__GPIO2_IO_1		0x1b0b0
+			MX6SX_PAD_QSPI1A_SS0_B__GPIO4_IO_22	0xb0b0
+		>;
+	};
+
+	pinctrl_uart1: uart1grp {
+		fsl,pins = <
+			MX6SX_PAD_GPIO1_IO04__UART1_TX		0x1b0b1
+			MX6SX_PAD_GPIO1_IO05__UART1_RX		0x1b0b1
+		>;
+	};
+
+	pinctrl_uart2: uart2grp {
+		fsl,pins = <
+			MX6SX_PAD_GPIO1_IO06__UART2_TX		0x1b0b1
+			MX6SX_PAD_GPIO1_IO07__UART2_RX		0x1b0b1
+		>;
+	};
+
+	pinctrl_uart3: uart3grp {
+		fsl,pins = <
+			MX6SX_PAD_QSPI1B_SS0_B__UART3_TX	0x1b0b1
+			MX6SX_PAD_QSPI1B_SCLK__UART3_RX		0x1b0b1
+		>;
+	};
+
+	pinctrl_uart5: uart5grp {
+		fsl,pins = <
+			MX6SX_PAD_KEY_COL3__UART5_TX		0x1b0b1
+			MX6SX_PAD_KEY_ROW3__UART5_RX		0x1b0b1
+			MX6SX_PAD_SD3_DATA6__UART3_RTS_B	0x1b0b1
+			MX6SX_PAD_SD3_DATA7__UART3_CTS_B	0x1b0b1
+		>;
+	};
+
+	pinctrl_usbotg1: usbotg1grp {
+		fsl,pins = <
+			MX6SX_PAD_GPIO1_IO08__USB_OTG1_OC	0x1b0b0
+			MX6SX_PAD_GPIO1_IO10__ANATOP_OTG1_ID	0x170b1
+		>;
+	};
+
+	pinctrl_usbotg1_vbus: usbotg1-vbusgrp {
+		fsl,pins = <
+			MX6SX_PAD_GPIO1_IO09__GPIO1_IO_9	0x1b0b0
+		>;
+	};
+
+	pinctrl_usbotg2: usbotg2grp {
+		fsl,pins = <
+			MX6SX_PAD_QSPI1B_DATA2__GPIO4_IO_26	0xb0b0
+		>;
+	};
+
+	pinctrl_usdhc2: usdhc2grp {
+		fsl,pins = <
+			MX6SX_PAD_SD2_CMD__USDHC2_CMD		0x17059
+			MX6SX_PAD_SD2_CLK__USDHC2_CLK		0x10059
+			MX6SX_PAD_SD2_DATA0__USDHC2_DATA0	0x17059
+			MX6SX_PAD_SD2_DATA1__USDHC2_DATA1	0x17059
+			MX6SX_PAD_SD2_DATA2__USDHC2_DATA2	0x17059
+			MX6SX_PAD_SD2_DATA3__USDHC2_DATA3	0x17059
+			MX6SX_PAD_KEY_COL2__GPIO2_IO_12		0x1b0b0
+		>;
+	};
+
+	pinctrl_usdhc3: usdhc3grp {
+		fsl,pins = <
+			MX6SX_PAD_SD3_CLK__USDHC3_CLK		0x10071
+			MX6SX_PAD_SD3_CMD__USDHC3_CMD		0x17071
+			MX6SX_PAD_SD3_DATA0__USDHC3_DATA0	0x17071
+			MX6SX_PAD_SD3_DATA1__USDHC3_DATA1	0x17071
+			MX6SX_PAD_SD3_DATA2__USDHC3_DATA2	0x17071
+			MX6SX_PAD_SD3_DATA3__USDHC3_DATA3	0x17071
+		>;
+	};
+
+	pinctrl_usdhc4_50mhz: usdhc4-50mhzgrp {
+		fsl,pins = <
+			MX6SX_PAD_SD4_CLK__USDHC4_CLK		0x10071
+			MX6SX_PAD_SD4_CMD__USDHC4_CMD		0x17071
+			MX6SX_PAD_SD4_RESET_B__USDHC4_RESET_B	0x17071
+			MX6SX_PAD_SD4_DATA0__USDHC4_DATA0	0x17071
+			MX6SX_PAD_SD4_DATA1__USDHC4_DATA1	0x17071
+			MX6SX_PAD_SD4_DATA2__USDHC4_DATA2	0x17071
+			MX6SX_PAD_SD4_DATA3__USDHC4_DATA3	0x17071
+			MX6SX_PAD_SD4_DATA4__USDHC4_DATA4	0x17071
+			MX6SX_PAD_SD4_DATA5__USDHC4_DATA5	0x17071
+			MX6SX_PAD_SD4_DATA6__USDHC4_DATA6	0x17071
+			MX6SX_PAD_SD4_DATA7__USDHC4_DATA7	0x17071
+		>;
+	};
+
+	pinctrl_usdhc4_100mhz: usdhc4-100mhzgrp {
+		fsl,pins = <
+			MX6SX_PAD_SD4_CLK__USDHC4_CLK		0x100b9
+			MX6SX_PAD_SD4_CMD__USDHC4_CMD		0x170b9
+			MX6SX_PAD_SD4_DATA0__USDHC4_DATA0	0x170b9
+			MX6SX_PAD_SD4_DATA1__USDHC4_DATA1	0x170b9
+			MX6SX_PAD_SD4_DATA2__USDHC4_DATA2	0x170b9
+			MX6SX_PAD_SD4_DATA3__USDHC4_DATA3	0x170b9
+			MX6SX_PAD_SD4_DATA4__USDHC4_DATA4	0x170b9
+			MX6SX_PAD_SD4_DATA5__USDHC4_DATA5	0x170b9
+			MX6SX_PAD_SD4_DATA6__USDHC4_DATA6	0x170b9
+			MX6SX_PAD_SD4_DATA7__USDHC4_DATA7	0x170b9
+		>;
+	};
+
+	pinctrl_usdhc4_200mhz: usdhc4-200mhzgrp {
+		fsl,pins = <
+			MX6SX_PAD_SD4_CLK__USDHC4_CLK		0x100f9
+			MX6SX_PAD_SD4_CMD__USDHC4_CMD		0x170f9
+			MX6SX_PAD_SD4_DATA0__USDHC4_DATA0	0x170f9
+			MX6SX_PAD_SD4_DATA1__USDHC4_DATA1	0x170f9
+			MX6SX_PAD_SD4_DATA2__USDHC4_DATA2	0x170f9
+			MX6SX_PAD_SD4_DATA3__USDHC4_DATA3	0x170f9
+			MX6SX_PAD_SD4_DATA4__USDHC4_DATA4	0x170f9
+			MX6SX_PAD_SD4_DATA5__USDHC4_DATA5	0x170f9
+			MX6SX_PAD_SD4_DATA6__USDHC4_DATA6	0x170f9
+			MX6SX_PAD_SD4_DATA7__USDHC4_DATA7	0x170f9
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx6sx-sdb-sai.dts b/arch/arm/boot/dts/imx6sx-sdb-sai.dts
new file mode 100644
index 0000000..0155450
--- /dev/null
+++ b/arch/arm/boot/dts/imx6sx-sdb-sai.dts
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2016 NXP Semiconductors
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "imx6sx-sdb.dts"
+
+/ {
+	sound {
+		audio-cpu = <&sai1>;
+	};
+};
+
+&audmux {
+	/* pin conflict with sai */
+	status = "disabled";
+};
+
+&sai1 {
+	status = "okay";
+};
+
+&sdma {
+	gpr = <&gpr>;
+	/* SDMA event remap for SAI1 */
+	fsl,sdma-event-remap = <0 15 1>, <0 16 1>;
+};
+
+&ssi2 {
+	status = "disabled";
+};
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts
index 0ad164a..5bb8fd5 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dts
+++ b/arch/arm/boot/dts/imx6sx-sdb.dts
@@ -18,12 +18,14 @@
 		996000  1250000
 		792000  1175000
 		396000  1175000
+		198000  1175000
 		>;
 	fsl,soc-operating-points = <
 		/* ARM kHz      SOC uV */
 		996000	1250000
 		792000	1175000
 		396000	1175000
+		198000  1175000
 	>;
 };
 
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi
index f1d3730..e5eafe4 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dtsi
+++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi
@@ -254,6 +254,12 @@
 	status = "okay";
 };
 
+&sai1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_sai1>;
+	status = "disabled";
+};
+
 &ssi2 {
 	status = "okay";
 };
@@ -468,6 +474,16 @@
 			>;
 		};
 
+		pinctrl_sai1: sai1grp {
+			fsl,pins = <
+				MX6SX_PAD_CSI_DATA00__SAI1_TX_BCLK	0x130b0
+				MX6SX_PAD_CSI_DATA01__SAI1_TX_SYNC	0x130b0
+				MX6SX_PAD_CSI_HSYNC__SAI1_TX_DATA_0	0x120b0
+				MX6SX_PAD_CSI_VSYNC__SAI1_RX_DATA_0	0x130b0
+				MX6SX_PAD_CSI_PIXCLK__AUDMUX_MCLK	0x130b0
+			>;
+		};
+
 		pinctrl_uart1: uart1grp {
 			fsl,pins = <
 				MX6SX_PAD_GPIO1_IO04__UART1_TX		0x1b0b1
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
index a5f7602..6a993bfda 100644
--- a/arch/arm/boot/dts/imx6sx.dtsi
+++ b/arch/arm/boot/dts/imx6sx.dtsi
@@ -63,12 +63,14 @@
 				996000  1250000
 				792000  1175000
 				396000  1075000
+				198000	975000
 			>;
 			fsl,soc-operating-points = <
 				/* ARM kHz  SOC uV */
 				996000      1175000
 				792000      1175000
 				396000      1175000
+				198000	    1175000
 			>;
 			clock-latency = <61036>; /* two CLK32 periods */
 			clocks = <&clks IMX6SX_CLK_ARM>,
@@ -970,8 +972,7 @@
 					 <&clks 0>, <&clks 0>;
 				clock-names = "bus", "mclk1", "mclk2", "mclk3";
 				dma-names = "rx", "tx";
-				dmas = <&sdma 31 23 0>, <&sdma 32 23 0>;
-				dma-source = <&gpr 0 15 0 16>;
+				dmas = <&sdma 31 24 0>, <&sdma 32 24 0>;
 				status = "disabled";
 			};
 
@@ -990,8 +991,7 @@
 					 <&clks 0>, <&clks 0>;
 				clock-names = "bus", "mclk1", "mclk2", "mclk3";
 				dma-names = "rx", "tx";
-				dmas = <&sdma 33 23 0>, <&sdma 34 23 0>;
-				dma-source = <&gpr 0 17 0 18>;
+				dmas = <&sdma 33 24 0>, <&sdma 34 24 0>;
 				status = "disabled";
 			};
 
diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dts b/arch/arm/boot/dts/imx6ul-14x14-evk.dts
index 7207280..668a729 100644
--- a/arch/arm/boot/dts/imx6ul-14x14-evk.dts
+++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dts
@@ -36,6 +36,45 @@
 			enable-active-high;
 		};
 	};
+
+	sound {
+		compatible = "simple-audio-card";
+		simple-audio-card,name = "mx6ul-wm8960";
+		simple-audio-card,format = "i2s";
+		simple-audio-card,bitclock-master = <&dailink_master>;
+		simple-audio-card,frame-master = <&dailink_master>;
+		simple-audio-card,widgets =
+			"Microphone", "Mic Jack",
+			"Line", "Line In",
+			"Line", "Line Out",
+			"Speaker", "Speaker",
+			"Headphone", "Headphone Jack";
+		simple-audio-card,routing =
+			"Headphone Jack", "HP_L",
+			"Headphone Jack", "HP_R",
+			"Speaker", "SPK_LP",
+			"Speaker", "SPK_LN",
+			"Speaker", "SPK_RP",
+			"Speaker", "SPK_RN",
+			"LINPUT1", "Mic Jack",
+			"LINPUT3", "Mic Jack",
+			"RINPUT1", "Mic Jack",
+			"RINPUT2", "Mic Jack";
+
+		simple-audio-card,cpu {
+			sound-dai = <&sai2>;
+		};
+
+		dailink_master: simple-audio-card,codec {
+			sound-dai = <&codec>;
+			clocks = <&clks IMX6UL_CLK_SAI2>;
+		};
+	};
+};
+
+&clks {
+	assigned-clocks = <&clks IMX6UL_CLK_PLL4_AUDIO_DIV>;
+	assigned-clock-rates = <786432000>;
 };
 
 &cpu0 {
@@ -43,6 +82,20 @@
 	soc-supply = <&reg_soc>;
 };
 
+&i2c2 {
+	clock_frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c2>;
+	status = "okay";
+
+	codec: wm8960@1a {
+		#sound-dai-cells = <0>;
+		compatible = "wlf,wm8960";
+		reg = <0x1a>;
+		wlf,shared-lrclk;
+	};
+};
+
 &fec1 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_enet1>;
@@ -86,6 +139,16 @@
 	};
 };
 
+&sai2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_sai2>;
+	assigned-clocks = <&clks IMX6UL_CLK_SAI2_SEL>,
+			  <&clks IMX6UL_CLK_SAI2>;
+	assigned-clock-parents = <&clks IMX6UL_CLK_PLL4_AUDIO_DIV>;
+	assigned-clock-rates = <0>, <12288000>;
+	status = "okay";
+};
+
 &snvs_poweroff {
 	status = "okay";
 };
@@ -272,6 +335,17 @@
 		>;
 	};
 
+	pinctrl_sai2: sai2grp {
+		fsl,pins = <
+			MX6UL_PAD_JTAG_TDI__SAI2_TX_BCLK	0x17088
+			MX6UL_PAD_JTAG_TDO__SAI2_TX_SYNC	0x17088
+			MX6UL_PAD_JTAG_TRST_B__SAI2_TX_DATA	0x11088
+			MX6UL_PAD_JTAG_TCK__SAI2_RX_DATA	0x11088
+			MX6UL_PAD_JTAG_TMS__SAI2_MCLK		0x17088
+			MX6UL_PAD_SNVS_TAMPER4__GPIO5_IO04	0x17059
+		>;
+	};
+
 	pinctrl_pwm1: pwm1grp {
 		fsl,pins = <
 			MX6UL_PAD_GPIO1_IO08__PWM1_OUT   0x110b0
diff --git a/arch/arm/boot/dts/imx6ul-pico-hobbit.dts b/arch/arm/boot/dts/imx6ul-pico-hobbit.dts
new file mode 100644
index 0000000..8ce1fec
--- /dev/null
+++ b/arch/arm/boot/dts/imx6ul-pico-hobbit.dts
@@ -0,0 +1,516 @@
+/*
+ * Copyright 2015 Technexion Ltd.
+ *
+ * Author: Wig Cheng  <wig.cheng@technexion.com>
+ *	   Richard Hu <richard.hu@technexion.com>
+ *	   Tapani Utriainen <tapani@technexion.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "imx6ul.dtsi"
+
+/ {
+	model = "Technexion Pico i.MX6UL Board";
+	compatible = "technexion,imx6ul-pico-hobbit", "fsl,imx6ul";
+
+	memory {
+		reg = <0x80000000 0x10000000>;
+	};
+
+	chosen {
+		stdout-path = &uart6;
+	};
+
+	backlight {
+		compatible = "pwm-backlight";
+		pwms = <&pwm3 0 5000000>;
+		brightness-levels = <0 4 8 16 32 64 128 255>;
+		default-brightness-level = <6>;
+		status = "okay";
+	};
+
+	reg_2p5v: regulator-2p5v {
+		compatible = "regulator-fixed";
+		regulator-name = "2P5V";
+		regulator-min-microvolt = <2500000>;
+		regulator-max-microvolt = <2500000>;
+	};
+
+	reg_3p3v: regulator-3p3v {
+		compatible = "regulator-fixed";
+		regulator-name = "3P3V";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	reg_sd1_vmmc: regulator-sd1-vmmc {
+		compatible = "regulator-fixed";
+		regulator-name = "VSD_3V3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		gpio = <&gpio1 9 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	reg_usb_otg_vbus: regulator-usb-otg-vbus {
+		compatible = "regulator-fixed";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_usb_otg1>;
+		regulator-name = "usb_otg_vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		gpio = <&gpio1 6 0>;
+	};
+
+	sound {
+		compatible = "fsl,imx-audio-sgtl5000";
+		model = "imx6ul-sgtl5000";
+		audio-cpu = <&sai1>;
+		audio-codec = <&codec>;
+		audio-routing =
+			"LINE_IN", "Line In Jack",
+			"MIC_IN", "Mic Jack",
+			"Mic Jack", "Mic Bias",
+			"Headphone Jack", "HP_OUT";
+	};
+
+	sys_mclk: clock-sys-mclk {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <24576000>;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		hobbitled {
+			label = "hobbitled";
+			gpios = <&gpio1 29 GPIO_ACTIVE_LOW>;
+		};
+	};
+};
+
+&can1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_flexcan1>;
+	status = "okay";
+};
+
+&can2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_flexcan2>;
+	status = "okay";
+};
+
+&clks {
+	assigned-clocks = <&clks IMX6UL_CLK_PLL4_AUDIO_DIV>;
+	assigned-clock-rates = <786432000>;
+};
+
+&fec2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_enet2>;
+	phy-mode = "rmii";
+	phy-handle = <&ethphy1>;
+	status = "okay";
+	phy-reset-gpios = <&gpio2 15 GPIO_ACTIVE_LOW>;
+	phy-reset-duration = <11>;
+
+	mdio {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		ethphy1: ethernet-phy@1 {
+			compatible = "ethernet-phy-ieee802.3-c22";
+			reg = <1>;
+			max-speed = <100>;
+			interrupt-parent = <&gpio5>;
+			interrupts = <6 IRQ_TYPE_LEVEL_LOW 0>;
+		};
+	};
+};
+
+&i2c1 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c1>;
+	status = "okay";
+
+	pmic: pfuze3000@08 {
+		compatible = "fsl,pfuze3000";
+		reg = <0x08>;
+
+		regulators {
+			/* VDD_ARM_SOC_IN*/
+			sw1b_reg: sw1b {
+				regulator-min-microvolt = <700000>;
+				regulator-max-microvolt = <1475000>;
+				regulator-boot-on;
+				regulator-always-on;
+				regulator-ramp-delay = <6250>;
+			};
+
+			/* DRAM */
+			sw3a_reg: sw3 {
+				regulator-min-microvolt = <900000>;
+				regulator-max-microvolt = <1650000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			/* DRAM */
+			vref_reg: vrefddr {
+				regulator-boot-on;
+				regulator-always-on;
+			};
+		};
+	};
+};
+
+&i2c2 {
+	clock_frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c2>;
+	status = "okay";
+
+	codec: sgtl5000@0a {
+		reg = <0x0a>;
+		compatible = "fsl,sgtl5000";
+		clocks = <&sys_mclk>;
+		VDDA-supply = <&reg_2p5v>;
+		VDDIO-supply = <&reg_3p3v>;
+	};
+};
+
+&i2c3 {
+	clock_frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c3>;
+	status = "okay";
+};
+
+&lcdif {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_lcdif_dat &pinctrl_lcdif_ctrl>;
+	display = <&display0>;
+	status = "okay";
+
+	display0: display0 {
+		bits-per-pixel = <32>;
+		bus-width = <24>;
+
+		display-timings {
+			native-mode = <&timing0>;
+
+			timing0: timing0 {
+				clock-frequency = <33200000>;
+				hactive = <800>;
+				vactive = <480>;
+				hfront-porch = <210>;
+				hback-porch = <46>;
+				hsync-len = <1>;
+				vback-porch = <22>;
+				vfront-porch = <23>;
+				vsync-len = <1>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+		};
+	};
+};
+
+&pwm3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm3>;
+	status = "okay";
+};
+
+&pwm7 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm7>;
+	status = "okay";
+};
+
+&pwm8 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm8>;
+	status = "okay";
+};
+
+&sai1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_sai1>;
+	status = "okay";
+};
+
+&uart3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart3>;
+	fsl,uart-has-rtscts;
+	status = "okay";
+};
+
+&uart6 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart6>;
+	status = "okay";
+};
+
+&usbotg1 {
+	vbus-supply = <&reg_usb_otg_vbus>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usb_otg1_id>;
+	dr_mode = "otg";
+	disable-over-current;
+	status = "okay";
+};
+
+&usbotg2 {
+	dr_mode = "host";
+	disable-over-current;
+	status = "okay";
+};
+
+&usdhc1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc1>;
+	bus-width = <8>;
+	no-1-8-v;
+	non-removable;
+	keep-power-in-suspend;
+	status = "okay";
+};
+
+&usdhc2 {  /* Wifi SDIO */
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc2>;
+	no-1-8-v;
+	keep-power-in-suspend;
+	wakeup-source;
+	status = "okay";
+};
+
+&iomuxc {
+	pinctrl_enet2: enet2grp {
+		fsl,pins = <
+			MX6UL_PAD_ENET1_TX_DATA1__ENET2_MDIO	0x1b0b0
+			MX6UL_PAD_ENET1_TX_EN__ENET2_MDC	0x1b0b0
+			MX6UL_PAD_ENET2_RX_EN__ENET2_RX_EN	0x1b0b0
+			MX6UL_PAD_ENET2_RX_ER__ENET2_RX_ER	0x1b0b0
+			MX6UL_PAD_ENET2_RX_DATA0__ENET2_RDATA00	0x1b0b0
+			MX6UL_PAD_ENET2_RX_DATA1__ENET2_RDATA01	0x1b0b0
+			MX6UL_PAD_ENET2_TX_EN__ENET2_TX_EN	0x1b0b0
+			MX6UL_PAD_ENET2_TX_DATA0__ENET2_TDATA00	0x1b0b0
+			MX6UL_PAD_ENET2_TX_DATA1__ENET2_TDATA01	0x1b0b0
+			MX6UL_PAD_ENET2_TX_CLK__ENET2_REF_CLK2	0x4001b031
+			MX6UL_PAD_SNVS_TAMPER6__GPIO5_IO06	0x800
+			MX6UL_PAD_UART4_TX_DATA__GPIO1_IO28	0x79
+		>;
+	};
+
+	pinctrl_flexcan1: flexcan1grp {
+		fsl,pins = <
+			MX6UL_PAD_ENET1_RX_DATA0__FLEXCAN1_TX	0x1b020
+			MX6UL_PAD_ENET1_RX_DATA1__FLEXCAN1_RX	0x1b020
+		>;
+	};
+
+	pinctrl_flexcan2: flexcan2grp {
+		fsl,pins = <
+			MX6UL_PAD_ENET1_TX_DATA0__FLEXCAN2_RX	0x1b020
+			MX6UL_PAD_ENET1_RX_EN__FLEXCAN2_TX	0x1b020
+		>;
+	};
+
+	pinctrl_i2c1: i2c1grp {
+		fsl,pins = <
+			MX6UL_PAD_GPIO1_IO02__I2C1_SCL		0x4001b8b0
+			MX6UL_PAD_GPIO1_IO03__I2C1_SDA		0x4001b8b0
+		>;
+	};
+
+	pinctrl_i2c2: i2c2grp {
+		fsl,pins = <
+			MX6UL_PAD_UART5_TX_DATA__I2C2_SCL	0x4001b8b0
+			MX6UL_PAD_UART5_RX_DATA__I2C2_SDA	0x4001b8b0
+		>;
+	};
+
+	pinctrl_i2c3: i2c3grp {
+		fsl,pins = <
+			MX6UL_PAD_UART1_TX_DATA__I2C3_SCL	0x4001b8b0
+			MX6UL_PAD_UART1_RX_DATA__I2C3_SDA	0x4001b8b0
+			>;
+	};
+
+	pinctrl_lcdif_dat: lcdifdatgrp {
+		fsl,pins = <
+			MX6UL_PAD_LCD_DATA00__LCDIF_DATA00	0x79
+			MX6UL_PAD_LCD_DATA01__LCDIF_DATA01	0x79
+			MX6UL_PAD_LCD_DATA02__LCDIF_DATA02	0x79
+			MX6UL_PAD_LCD_DATA03__LCDIF_DATA03	0x79
+			MX6UL_PAD_LCD_DATA04__LCDIF_DATA04	0x79
+			MX6UL_PAD_LCD_DATA05__LCDIF_DATA05	0x79
+			MX6UL_PAD_LCD_DATA06__LCDIF_DATA06	0x79
+			MX6UL_PAD_LCD_DATA07__LCDIF_DATA07	0x79
+			MX6UL_PAD_LCD_DATA08__LCDIF_DATA08	0x79
+			MX6UL_PAD_LCD_DATA09__LCDIF_DATA09	0x79
+			MX6UL_PAD_LCD_DATA10__LCDIF_DATA10	0x79
+			MX6UL_PAD_LCD_DATA11__LCDIF_DATA11	0x79
+			MX6UL_PAD_LCD_DATA12__LCDIF_DATA12	0x79
+			MX6UL_PAD_LCD_DATA13__LCDIF_DATA13	0x79
+			MX6UL_PAD_LCD_DATA14__LCDIF_DATA14	0x79
+			MX6UL_PAD_LCD_DATA15__LCDIF_DATA15	0x79
+			MX6UL_PAD_LCD_DATA16__LCDIF_DATA16	0x79
+			MX6UL_PAD_LCD_DATA17__LCDIF_DATA17	0x79
+			MX6UL_PAD_LCD_DATA18__LCDIF_DATA18	0x79
+			MX6UL_PAD_LCD_DATA19__LCDIF_DATA19	0x79
+			MX6UL_PAD_LCD_DATA20__LCDIF_DATA20	0x79
+			MX6UL_PAD_LCD_DATA21__LCDIF_DATA21	0x79
+			MX6UL_PAD_LCD_DATA22__LCDIF_DATA22	0x79
+			MX6UL_PAD_LCD_DATA23__LCDIF_DATA23	0x79
+		>;
+	};
+
+	pinctrl_lcdif_ctrl: lcdifctrlgrp {
+		fsl,pins = <
+			MX6UL_PAD_LCD_CLK__LCDIF_CLK		0x79
+			MX6UL_PAD_LCD_ENABLE__LCDIF_ENABLE	0x79
+			MX6UL_PAD_LCD_HSYNC__LCDIF_HSYNC	0x79
+			MX6UL_PAD_LCD_VSYNC__LCDIF_VSYNC	0x79
+			/* LCD reset */
+			MX6UL_PAD_SNVS_TAMPER9__GPIO5_IO09	0x79
+		>;
+	};
+
+	pinctrl_pwm3: pwm3grp {
+		fsl,pins = <
+			MX6UL_PAD_NAND_ALE__PWM3_OUT		0x110b0
+		>;
+	};
+
+	pinctrl_pwm7: pwm7grp {
+		fsl,pins = <
+			MX6UL_PAD_ENET1_TX_CLK__PWM7_OUT	0x110b0
+		>;
+	};
+
+	pinctrl_pwm8: pwm8grp {
+		fsl,pins = <
+			MX6UL_PAD_ENET1_RX_ER__PWM8_OUT		0x110b0
+		>;
+	};
+
+	pinctrl_sai1: sai1grp {
+		fsl,pins = <
+			MX6UL_PAD_CSI_DATA04__SAI1_TX_SYNC	0x1b0b0
+			MX6UL_PAD_CSI_DATA05__SAI1_TX_BCLK	0x1b0b0
+			MX6UL_PAD_CSI_DATA06__SAI1_RX_DATA	0x110b0
+			MX6UL_PAD_CSI_DATA07__SAI1_TX_DATA	0x1f0b8
+		>;
+	};
+
+	pinctrl_uart3: uart3grp {
+		fsl,pins = <
+			MX6UL_PAD_UART3_TX_DATA__UART3_DCE_TX	0x1b0b0
+			MX6UL_PAD_UART3_RX_DATA__UART3_DCE_RX	0x1b0b0
+			MX6UL_PAD_UART3_RTS_B__UART3_DCE_RTS	0x1b0b0
+			MX6UL_PAD_UART3_CTS_B__UART3_DCE_CTS	0x1b0b0
+		>;
+	};
+
+	pinctrl_uart5: uart5grp {
+		fsl,pins = <
+			MX6UL_PAD_GPIO1_IO04__UART5_DCE_TX	0x1b0b1
+			MX6UL_PAD_GPIO1_IO05__UART5_DCE_RX	0x1b0b1
+			MX6UL_PAD_GPIO1_IO08__UART5_DCE_RTS	0x1b0b1
+			MX6UL_PAD_GPIO1_IO09__UART5_DCE_CTS	0x1b0b1
+		>;
+	};
+
+	pinctrl_uart6: uart6grp {
+		fsl,pins = <
+			MX6UL_PAD_CSI_MCLK__UART6_DCE_TX	0x1b0b1
+			MX6UL_PAD_CSI_PIXCLK__UART6_DCE_RX	0x1b0b1
+		>;
+	};
+
+	pinctrl_usb_otg1: usbotg1grp {
+		fsl,pins = <
+			MX6UL_PAD_GPIO1_IO06__GPIO1_IO06	0x10b0
+			>;
+	};
+
+	pinctrl_usb_otg1_id: usbotg1idgrp {
+		fsl,pins = <
+			MX6UL_PAD_GPIO1_IO00__ANATOP_OTG1_ID	0x17059
+		>;
+	};
+
+	pinctrl_usdhc1: usdhc1grp {
+		fsl,pins = <
+			MX6UL_PAD_SD1_CMD__USDHC1_CMD		0x17059
+			MX6UL_PAD_SD1_CLK__USDHC1_CLK		0x10071
+			MX6UL_PAD_SD1_DATA0__USDHC1_DATA0	0x17059
+			MX6UL_PAD_SD1_DATA1__USDHC1_DATA1	0x17059
+			MX6UL_PAD_SD1_DATA2__USDHC1_DATA2	0x17059
+			MX6UL_PAD_SD1_DATA3__USDHC1_DATA3	0x17059
+			MX6UL_PAD_UART1_RTS_B__USDHC1_CD_B	0x03029
+			MX6UL_PAD_NAND_READY_B__USDHC1_DATA4	0x17059
+			MX6UL_PAD_NAND_CE0_B__USDHC1_DATA5	0x17059
+			MX6UL_PAD_NAND_CE1_B__USDHC1_DATA6	0x17059
+			MX6UL_PAD_NAND_CLE__USDHC1_DATA7	0x17059
+		>;
+	};
+
+	pinctrl_usdhc2: usdhc2grp {
+		fsl,pins = <
+			MX6UL_PAD_NAND_WE_B__USDHC2_CMD		0x17059
+			MX6UL_PAD_NAND_RE_B__USDHC2_CLK		0x10059
+			MX6UL_PAD_NAND_DATA00__USDHC2_DATA0	0x17059
+			MX6UL_PAD_NAND_DATA01__USDHC2_DATA1	0x17059
+			MX6UL_PAD_NAND_DATA02__USDHC2_DATA2	0x17059
+			MX6UL_PAD_NAND_DATA03__USDHC2_DATA3	0x17059
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx6ul-tx6ul-0010.dts b/arch/arm/boot/dts/imx6ul-tx6ul-0010.dts
new file mode 100644
index 0000000..8c2f3df
--- /dev/null
+++ b/arch/arm/boot/dts/imx6ul-tx6ul-0010.dts
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2015 Lothar Waßmann <LW@KARO-electronics.de>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "imx6ul.dtsi"
+#include "imx6ul-tx6ul.dtsi"
+
+/ {
+	model = "Ka-Ro electronics TXUL-0010 Module";
+	compatible = "karo,imx6ul-tx6ul", "fsl,imx6ul";
+
+	aliases {
+		/delete-property/ mmc1;
+	};
+};
diff --git a/arch/arm/boot/dts/imx6ul-tx6ul-0011.dts b/arch/arm/boot/dts/imx6ul-tx6ul-0011.dts
new file mode 100644
index 0000000..d82698e
--- /dev/null
+++ b/arch/arm/boot/dts/imx6ul-tx6ul-0011.dts
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2015 Lothar Waßmann <LW@KARO-electronics.de>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "imx6ul.dtsi"
+#include "imx6ul-tx6ul.dtsi"
+
+/ {
+	model = "Ka-Ro electronics TXUL-0011 Module";
+	compatible = "karo,imx6ul-tx6ul", "fsl,imx6ul";
+
+	aliases {
+		mmc0 = &usdhc2;
+		mmc1 = &usdhc1;
+	};
+};
+
+&gpmi {
+	status = "disabled";
+};
+
+&usdhc2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc2>;
+	bus-width = <4>;
+	no-1-8-v;
+	non-removable;
+	fsl,wp-controller;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6ul-tx6ul-mainboard.dts b/arch/arm/boot/dts/imx6ul-tx6ul-mainboard.dts
new file mode 100644
index 0000000..d25899b
--- /dev/null
+++ b/arch/arm/boot/dts/imx6ul-tx6ul-mainboard.dts
@@ -0,0 +1,271 @@
+/*
+ * Copyright 2015 Lothar Waßmann <LW@KARO-electronics.de>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "imx6ul.dtsi"
+#include "imx6ul-tx6ul.dtsi"
+
+/ {
+	model = "Ka-Ro electronics TXUL-0010 Module on TXUL Mainboard";
+	compatible = "karo,imx6ul-tx6ul", "fsl,imx6ul";
+
+	aliases {
+		lcdif_24bit_pins_a = &pinctrl_disp0_3;
+		mmc0 = &usdhc1;
+		/delete-property/ mmc1;
+		serial2 = &uart3;
+		serial4 = &uart5;
+	};
+	/delete-node/ sound;
+};
+
+&can1 {
+	xceiver-supply = <&reg_3v3>;
+};
+
+&can2 {
+	xceiver-supply = <&reg_3v3>;
+};
+
+&ds1339 {
+	status = "disabled";
+};
+
+&fec1 {
+	pinctrl-0 = <&pinctrl_enet1 &pinctrl_etnphy0_rst>;
+	/delete-node/ mdio;
+};
+
+&fec2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_enet2 &pinctrl_enet2_mdio &pinctrl_etnphy1_rst>;
+	phy-mode = "rmii";
+	phy-reset-gpios = <&gpio4 28 GPIO_ACTIVE_HIGH>;
+	phy-supply = <&reg_3v3_etn>;
+	phy-handle = <&etnphy1>;
+	status = "okay";
+
+	mdio {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		etnphy0: ethernet-phy@0 {
+			compatible = "ethernet-phy-ieee802.3-c22";
+			reg = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_etnphy0_int>;
+			interrupt-parent = <&gpio5>;
+			interrupts = <5 IRQ_TYPE_EDGE_FALLING>;
+			interrupts-extended = <&gpio5 5 IRQ_TYPE_EDGE_FALLING>;
+			status = "okay";
+		};
+
+		etnphy1: ethernet-phy@2 {
+			compatible = "ethernet-phy-ieee802.3-c22";
+			reg = <2>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_etnphy1_int>;
+			interrupt-parent = <&gpio4>;
+			interrupts = <27 IRQ_TYPE_EDGE_FALLING>;
+			interrupts-extended = <&gpio4 27 IRQ_TYPE_EDGE_FALLING>;
+			status = "okay";
+		};
+	};
+};
+
+&i2c_gpio {
+	status = "disabled";
+};
+
+&i2c2 {
+	/delete-node/ codec@0a;
+	/delete-node/ touchscreen@48;
+
+	rtc: mcp7940x@6f {
+		compatible = "microchip,mcp7940x";
+		reg = <0x6f>;
+	};
+};
+
+&kpp {
+	status = "disabled";
+};
+
+&lcdif {
+	pinctrl-0 = <&pinctrl_disp0_3>;
+};
+
+&reg_usbotg_vbus{
+	status = "disabled";
+};
+
+&usdhc1 {
+	pinctrl-0 = <&pinctrl_usdhc1>;
+	non-removable;
+	/delete-property/ cd-gpios;
+	cap-sdio-irq;
+};
+
+&uart1 {
+	pinctrl-0 = <&pinctrl_uart1>;
+	/delete-property/ fsl,uart-has-rtscts;
+};
+
+&uart2 {
+	pinctrl-0 = <&pinctrl_uart2>;
+	/delete-property/ fsl,uart-has-rtscts;
+	status = "okay";
+};
+
+&uart3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart3>;
+	status = "okay";
+};
+
+&uart4 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart4>;
+	status = "okay";
+};
+
+&uart5 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart5>;
+	status = "okay";
+};
+
+&uart6 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart6>;
+	status = "okay";
+};
+
+&uart7 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart7>;
+	status = "okay";
+};
+
+&uart8 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart8>;
+	status = "disabled"; /* conflicts with LCDIF */
+};
+
+&iomuxc {
+	hoggrp {
+		fsl,pins = <
+			MX6UL_PAD_CSI_DATA01__GPIO4_IO22	0x0b0b0 /* WLAN_RESET */
+		>;
+	};
+
+	pinctrl_disp0_3: disp0grp-3 {
+		fsl,pins = <
+			MX6UL_PAD_LCD_CLK__LCDIF_CLK		0x10 /* LSCLK */
+			MX6UL_PAD_LCD_ENABLE__LCDIF_ENABLE	0x10 /* OE_ACD */
+			MX6UL_PAD_LCD_HSYNC__LCDIF_HSYNC	0x10 /* HSYNC */
+			MX6UL_PAD_LCD_VSYNC__LCDIF_VSYNC	0x10 /* VSYNC */
+			MX6UL_PAD_LCD_DATA02__LCDIF_DATA02	0x10
+			MX6UL_PAD_LCD_DATA03__LCDIF_DATA03	0x10
+			MX6UL_PAD_LCD_DATA04__LCDIF_DATA04	0x10
+			MX6UL_PAD_LCD_DATA05__LCDIF_DATA05	0x10
+			MX6UL_PAD_LCD_DATA06__LCDIF_DATA06	0x10
+			MX6UL_PAD_LCD_DATA07__LCDIF_DATA07	0x10
+			/* LCD_DATA08..09 not wired */
+			MX6UL_PAD_LCD_DATA10__LCDIF_DATA10	0x10
+			MX6UL_PAD_LCD_DATA11__LCDIF_DATA11	0x10
+			MX6UL_PAD_LCD_DATA12__LCDIF_DATA12	0x10
+			MX6UL_PAD_LCD_DATA13__LCDIF_DATA13	0x10
+			MX6UL_PAD_LCD_DATA14__LCDIF_DATA14	0x10
+			MX6UL_PAD_LCD_DATA15__LCDIF_DATA15	0x10
+			/* LCD_DATA16..17 not wired */
+			MX6UL_PAD_LCD_DATA18__LCDIF_DATA18	0x10
+			MX6UL_PAD_LCD_DATA19__LCDIF_DATA19	0x10
+			MX6UL_PAD_LCD_DATA20__LCDIF_DATA20	0x10
+			MX6UL_PAD_LCD_DATA21__LCDIF_DATA21	0x10
+			MX6UL_PAD_LCD_DATA22__LCDIF_DATA22	0x10
+			MX6UL_PAD_LCD_DATA23__LCDIF_DATA23	0x10
+		>;
+	};
+
+	pinctrl_enet2_mdio: enet2-mdiogrp {
+		fsl,pins = <
+			MX6UL_PAD_GPIO1_IO07__ENET2_MDC		0x0b0b0
+			MX6UL_PAD_GPIO1_IO06__ENET2_MDIO	0x1b0b0
+		>;
+	};
+
+	pinctrl_uart3: uart3grp {
+		fsl,pins = <
+			MX6UL_PAD_UART3_TX_DATA__UART3_DCE_TX	0x0b0b0
+			MX6UL_PAD_UART3_RX_DATA__UART3_DCE_RX	0x0b0b0
+		>;
+	};
+
+	pinctrl_uart4: uart4grp {
+		fsl,pins = <
+			MX6UL_PAD_UART4_TX_DATA__UART4_DCE_TX	0x0b0b0
+			MX6UL_PAD_UART4_RX_DATA__UART4_DCE_RX	0x0b0b0
+		>;
+	};
+
+	pinctrl_uart6: uart6grp {
+		fsl,pins = <
+			MX6UL_PAD_CSI_MCLK__UART6_DCE_TX	0x0b0b0
+			MX6UL_PAD_CSI_PIXCLK__UART6_DCE_RX	0x0b0b0
+		>;
+	};
+
+	pinctrl_uart7: uart7grp {
+		fsl,pins = <
+			MX6UL_PAD_LCD_DATA16__UART7_DCE_TX	0x0b0b0
+			MX6UL_PAD_LCD_DATA17__UART7_DCE_RX	0x0b0b0
+		>;
+	};
+
+	pinctrl_uart8: uart8grp {
+		fsl,pins = <
+			MX6UL_PAD_LCD_DATA20__UART8_DCE_TX	0x0b0b0
+			MX6UL_PAD_LCD_DATA21__UART8_DCE_RX	0x0b0b0
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx6ul-tx6ul.dtsi b/arch/arm/boot/dts/imx6ul-tx6ul.dtsi
new file mode 100644
index 0000000..437e9aa
--- /dev/null
+++ b/arch/arm/boot/dts/imx6ul-tx6ul.dtsi
@@ -0,0 +1,973 @@
+/*
+ * Copyright 2015 Lothar Waßmann <LW@KARO-electronics.de>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/pwm/pwm.h>
+
+/ {
+	aliases {
+		can0 = &can2;
+		can1 = &can1;
+		display = &display;
+		i2c0 = &i2c2;
+		i2c1 = &i2c_gpio;
+		i2c2 = &i2c1;
+		i2c3 = &i2c3;
+		i2c4 = &i2c4;
+		lcdif_23bit_pins_a = &pinctrl_disp0_1;
+		lcdif_24bit_pins_a = &pinctrl_disp0_2;
+		pwm0 = &pwm5;
+		reg_can_xcvr = &reg_can_xcvr;
+		serial2 = &uart5;
+		serial4 = &uart3;
+		spi0 = &ecspi2;
+		spi1 = &spi_gpio;
+		stk5led = &user_led;
+		usbh1 = &usbotg2;
+		usbotg = &usbotg1;
+	};
+
+	chosen {
+		stdout-path = &uart1;
+	};
+
+	memory {
+		reg = <0 0>; /* will be filled by U-Boot */
+	};
+
+	clocks {
+		mclk: mclk {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <26000000>;
+		};
+	};
+
+	backlight: backlight {
+		compatible = "pwm-backlight";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_lcd_rst>;
+		enable-gpios = <&gpio3 4 GPIO_ACTIVE_HIGH>;
+		pwms = <&pwm5 0 500000 PWM_POLARITY_INVERTED>;
+		power-supply = <&reg_lcd_pwr>;
+		/*
+		 * a poor man's way to create a 1:1 relationship between
+		 * the PWM value and the actual duty cycle
+		 */
+		brightness-levels = < 0  1  2  3  4  5  6  7  8  9
+				     10 11 12 13 14 15 16 17 18 19
+				     20 21 22 23 24 25 26 27 28 29
+				     30 31 32 33 34 35 36 37 38 39
+				     40 41 42 43 44 45 46 47 48 49
+				     50 51 52 53 54 55 56 57 58 59
+				     60 61 62 63 64 65 66 67 68 69
+				     70 71 72 73 74 75 76 77 78 79
+				     80 81 82 83 84 85 86 87 88 89
+				     90 91 92 93 94 95 96 97 98 99
+				    100>;
+		default-brightness-level = <50>;
+	};
+
+	i2c_gpio: i2c-gpio {
+		compatible = "i2c-gpio";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_i2c_gpio>;
+		gpios = <
+			&gpio5 1 GPIO_ACTIVE_HIGH /* SDA */
+			&gpio5 0 GPIO_ACTIVE_HIGH /* SCL */
+		>;
+		clock-frequency = <400000>;
+		status = "okay";
+
+		ds1339: rtc@68 {
+			compatible = "dallas,ds1339";
+			reg = <0x68>;
+			status = "disabled";
+		};
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		user_led: user {
+			label = "Heartbeat";
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_led>;
+			gpios = <&gpio5 9 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "heartbeat";
+		};
+	};
+
+	reg_3v3_etn: regulator-3v3etn {
+		compatible = "regulator-fixed";
+		regulator-name = "3V3_ETN";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_etnphy_power>;
+		gpio = <&gpio5 7 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	reg_2v5: regulator-2v5 {
+		compatible = "regulator-fixed";
+		regulator-name = "2V5";
+		regulator-min-microvolt = <2500000>;
+		regulator-max-microvolt = <2500000>;
+		regulator-always-on;
+	};
+
+	reg_3v3: regulator-3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "3V3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-always-on;
+	};
+
+	reg_can_xcvr: regulator-canxcvr {
+		compatible = "regulator-fixed";
+		regulator-name = "CAN XCVR";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_flexcan_xcvr>;
+		gpio = <&gpio3 5 GPIO_ACTIVE_HIGH>;
+		enable-active-low;
+	};
+
+	reg_lcd_pwr: regulator-lcdpwr {
+		compatible = "regulator-fixed";
+		regulator-name = "LCD POWER";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_lcd_pwr>;
+		gpio = <&gpio5 4 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+		regulator-boot-on;
+		regulator-always-on;
+	};
+
+	reg_usbh1_vbus: regulator-usbh1vbus {
+		compatible = "regulator-fixed";
+		regulator-name = "usbh1_vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_usbh1_vbus &pinctrl_usbh1_oc>;
+		gpio = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	reg_usbotg_vbus: regulator-usbotgvbus {
+		compatible = "regulator-fixed";
+		regulator-name = "usbotg_vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_usbotg_vbus &pinctrl_usbotg_oc>;
+		gpio = <&gpio1 26 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	spi_gpio: spi-gpio {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "spi-gpio";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_spi_gpio>;
+		gpio-mosi = <&gpio1 30 GPIO_ACTIVE_HIGH>;
+		gpio-miso = <&gpio1 31 GPIO_ACTIVE_HIGH>;
+		gpio-sck = <&gpio1 28 GPIO_ACTIVE_HIGH>;
+		num-chipselects = <2>;
+		cs-gpios = <
+			&gpio1 29 GPIO_ACTIVE_HIGH
+			&gpio1 10 GPIO_ACTIVE_HIGH
+		>;
+		status = "disabled";
+
+		spi@0 {
+			compatible = "spidev";
+			reg = <0>;
+			spi-max-frequency = <660000>;
+		};
+
+		spi@1 {
+			compatible = "spidev";
+			reg = <1>;
+			spi-max-frequency = <660000>;
+		};
+	};
+
+	sound {
+		compatible = "karo,imx6ul-tx6ul-sgtl5000",
+			     "simple-audio-card";
+		simple-audio-card,name = "imx6ul-tx6ul-sgtl5000-audio";
+		simple-audio-card,format = "i2s";
+		simple-audio-card,bitclock-master = <&codec_dai>;
+		simple-audio-card,frame-master = <&codec_dai>;
+		simple-audio-card,widgets =
+			"Microphone", "Mic Jack",
+			"Line", "Line In",
+			"Line", "Line Out",
+			"Headphone", "Headphone Jack";
+		simple-audio-card,routing =
+			"MIC_IN", "Mic Jack",
+			"Mic Jack", "Mic Bias",
+			"Headphone Jack", "HP_OUT";
+
+		cpu_dai: simple-audio-card,cpu {
+			sound-dai = <&sai2>;
+		};
+
+		codec_dai: simple-audio-card,codec {
+			sound-dai = <&sgtl5000>;
+		};
+	};
+};
+
+&can1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_flexcan1>;
+	xceiver-supply = <&reg_can_xcvr>;
+	status = "okay";
+};
+
+&can2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_flexcan2>;
+	xceiver-supply = <&reg_can_xcvr>;
+	status = "okay";
+};
+
+&ecspi2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_ecspi2>;
+	fsl,spi-num-chipselects = <2>;
+	cs-gpios = <
+		&gpio1 29 GPIO_ACTIVE_HIGH
+		&gpio1 10 GPIO_ACTIVE_HIGH
+	>;
+	status = "disabled";
+
+	spidev0: spi@0 {
+		compatible = "spidev";
+		reg = <0>;
+		spi-max-frequency = <60000000>;
+	};
+
+	spidev1: spi@1 {
+		compatible = "spidev";
+		reg = <1>;
+		spi-max-frequency = <60000000>;
+	};
+};
+
+&fec1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_enet1 &pinctrl_enet1_mdio &pinctrl_etnphy0_rst>;
+	phy-mode = "rmii";
+	phy-reset-gpios = <&gpio5 6 GPIO_ACTIVE_HIGH>;
+	phy-supply = <&reg_3v3_etn>;
+	phy-handle = <&etnphy0>;
+	status = "okay";
+
+	mdio {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		etnphy0: ethernet-phy@0 {
+			compatible = "ethernet-phy-ieee802.3-c22";
+			reg = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_etnphy0_int>;
+			interrupt-parent = <&gpio5>;
+			interrupts = <5 IRQ_TYPE_EDGE_FALLING>;
+			status = "okay";
+		};
+
+		etnphy1: ethernet-phy@2 {
+			compatible = "ethernet-phy-ieee802.3-c22";
+			reg = <2>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_etnphy1_int>;
+			interrupt-parent = <&gpio4>;
+			interrupts = <27 IRQ_TYPE_EDGE_FALLING>;
+			status = "okay";
+		};
+	};
+};
+
+&fec2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_enet2 &pinctrl_etnphy1_rst>;
+	phy-mode = "rmii";
+	phy-reset-gpios = <&gpio4 28 GPIO_ACTIVE_HIGH>;
+	phy-supply = <&reg_3v3_etn>;
+	phy-handle = <&etnphy1>;
+	status = "disabled";
+};
+
+&gpmi {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_gpmi_nand>;
+	nand-on-flash-bbt;
+	fsl,no-blockmark-swap;
+	status = "okay";
+};
+
+&i2c2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c2>;
+	clock-frequency = <400000>;
+	status = "okay";
+
+	sgtl5000: codec@0a {
+		compatible = "fsl,sgtl5000";
+		reg = <0x0a>;
+		#sound-dai-cells = <0>;
+		VDDA-supply = <&reg_2v5>;
+		VDDIO-supply = <&reg_3v3>;
+		clocks = <&mclk>;
+	};
+
+	polytouch: polytouch@38 {
+		compatible = "edt,edt-ft5x06";
+		reg = <0x38>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_edt_ft5x06>;
+		interrupt-parent = <&gpio5>;
+		interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
+		reset-gpios = <&gpio5 3 GPIO_ACTIVE_LOW>;
+		wake-gpios = <&gpio5 8 GPIO_ACTIVE_HIGH>;
+		wakeup-source;
+	};
+
+	touchscreen: touchscreen@48 {
+		compatible = "ti,tsc2007";
+		reg = <0x48>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_tsc2007>;
+		interrupt-parent = <&gpio3>;
+		interrupts = <26 IRQ_TYPE_NONE>;
+		gpios = <&gpio3 26 GPIO_ACTIVE_LOW>;
+		ti,x-plate-ohms = <660>;
+		wakeup-source;
+	};
+};
+
+&kpp {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_kpp>;
+	/* sample keymap */
+	/* row/col 0..3 are mapped to KPP row/col 4..7 */
+	linux,keymap = <
+		MATRIX_KEY(4, 4, KEY_POWER)
+		MATRIX_KEY(4, 5, KEY_KP0)
+		MATRIX_KEY(4, 6, KEY_KP1)
+		MATRIX_KEY(4, 7, KEY_KP2)
+		MATRIX_KEY(5, 4, KEY_KP3)
+		MATRIX_KEY(5, 5, KEY_KP4)
+		MATRIX_KEY(5, 6, KEY_KP5)
+		MATRIX_KEY(5, 7, KEY_KP6)
+		MATRIX_KEY(6, 4, KEY_KP7)
+		MATRIX_KEY(6, 5, KEY_KP8)
+		MATRIX_KEY(6, 6, KEY_KP9)
+	>;
+	status = "okay";
+};
+
+&lcdif {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_disp0_1>;
+	lcd-supply = <&reg_lcd_pwr>;
+	display = <&display>;
+	status = "okay";
+
+	display: display@di0 {
+		bits-per-pixel = <32>;
+		bus-width = <24>;
+		status = "okay";
+
+		display-timings {
+			VGA {
+				clock-frequency = <25200000>;
+				hactive = <640>;
+				vactive = <480>;
+				hback-porch = <48>;
+				hsync-len = <96>;
+				hfront-porch = <16>;
+				vback-porch = <31>;
+				vsync-len = <2>;
+				vfront-porch = <12>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <1>;
+			};
+
+			ETV570 {
+				clock-frequency = <25200000>;
+				hactive = <640>;
+				vactive = <480>;
+				hback-porch = <114>;
+				hsync-len = <30>;
+				hfront-porch = <16>;
+				vback-porch = <32>;
+				vsync-len = <3>;
+				vfront-porch = <10>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <1>;
+			};
+
+			ET0350 {
+				clock-frequency = <6413760>;
+				hactive = <320>;
+				vactive = <240>;
+				hback-porch = <34>;
+				hsync-len = <34>;
+				hfront-porch = <20>;
+				vback-porch = <15>;
+				vsync-len = <3>;
+				vfront-porch = <4>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <1>;
+			};
+
+			ET0430 {
+				clock-frequency = <9009000>;
+				hactive = <480>;
+				vactive = <272>;
+				hback-porch = <2>;
+				hsync-len = <41>;
+				hfront-porch = <2>;
+				vback-porch = <2>;
+				vsync-len = <10>;
+				vfront-porch = <2>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <0>;
+			};
+
+			ET0500 {
+				clock-frequency = <33264000>;
+				hactive = <800>;
+				vactive = <480>;
+				hback-porch = <88>;
+				hsync-len = <128>;
+				hfront-porch = <40>;
+				vback-porch = <33>;
+				vsync-len = <2>;
+				vfront-porch = <10>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <1>;
+			};
+
+			ET0700 { /* same as ET0500 */
+				clock-frequency = <33264000>;
+				hactive = <800>;
+				vactive = <480>;
+				hback-porch = <88>;
+				hsync-len = <128>;
+				hfront-porch = <40>;
+				vback-porch = <33>;
+				vsync-len = <2>;
+				vfront-porch = <10>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <1>;
+			};
+
+			ETQ570 {
+				clock-frequency = <6596040>;
+				hactive = <320>;
+				vactive = <240>;
+				hback-porch = <38>;
+				hsync-len = <30>;
+				hfront-porch = <30>;
+				vback-porch = <16>;
+				vsync-len = <3>;
+				vfront-porch = <4>;
+				hsync-active = <0>;
+				vsync-active = <0>;
+				de-active = <1>;
+				pixelclk-active = <1>;
+			};
+		};
+	};
+};
+
+&pwm5 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm5>;
+	#pwm-cells = <3>;
+	status = "okay";
+};
+
+&sai2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_sai2>;
+	status = "okay";
+};
+
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart1 &pinctrl_uart1_rtscts>;
+	fsl,uart-has-rtscts;
+	status = "okay";
+};
+
+&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart2 &pinctrl_uart2_rtscts>;
+	fsl,uart-has-rtscts;
+	status = "okay";
+};
+
+&uart5 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart5 &pinctrl_uart5_rtscts>;
+	fsl,uart-has-rtscts;
+	status = "okay";
+};
+
+&usbotg1 {
+	vbus-supply = <&reg_usbotg_vbus>;
+	dr_mode = "peripheral";
+	disable-over-current;
+	status = "okay";
+};
+
+&usbotg2 {
+	vbus-supply = <&reg_usbh1_vbus>;
+	dr_mode = "host";
+	disable-over-current;
+	status = "okay";
+};
+
+&usdhc1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc1 &pinctrl_usdhc1_cd>;
+	bus-width = <4>;
+	no-1-8-v;
+	cd-gpios = <&gpio4 14 GPIO_ACTIVE_LOW>;
+	fsl,wp-controller;
+	status = "okay";
+};
+
+&iomuxc {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_hog>;
+
+	pinctrl_hog: hoggrp {
+	};
+
+	pinctrl_led: ledgrp {
+		fsl,pins = <
+			MX6UL_PAD_SNVS_TAMPER9__GPIO5_IO09	0x0b0b0 /* LED */
+		>;
+	};
+
+	pinctrl_disp0_1: disp0grp-1 {
+		fsl,pins = <
+			MX6UL_PAD_LCD_CLK__LCDIF_CLK		0x10 /* LSCLK */
+			MX6UL_PAD_LCD_ENABLE__LCDIF_ENABLE	0x10 /* OE_ACD */
+			MX6UL_PAD_LCD_HSYNC__LCDIF_HSYNC	0x10 /* HSYNC */
+			MX6UL_PAD_LCD_VSYNC__LCDIF_VSYNC	0x10 /* VSYNC */
+			/* PAD DISP0_DAT0 is used for the Flexcan transceiver control on STK5-v5 */
+			MX6UL_PAD_LCD_DATA01__LCDIF_DATA01	0x10
+			MX6UL_PAD_LCD_DATA02__LCDIF_DATA02	0x10
+			MX6UL_PAD_LCD_DATA03__LCDIF_DATA03	0x10
+			MX6UL_PAD_LCD_DATA04__LCDIF_DATA04	0x10
+			MX6UL_PAD_LCD_DATA05__LCDIF_DATA05	0x10
+			MX6UL_PAD_LCD_DATA06__LCDIF_DATA06	0x10
+			MX6UL_PAD_LCD_DATA07__LCDIF_DATA07	0x10
+			MX6UL_PAD_LCD_DATA08__LCDIF_DATA08	0x10
+			MX6UL_PAD_LCD_DATA09__LCDIF_DATA09	0x10
+			MX6UL_PAD_LCD_DATA10__LCDIF_DATA10	0x10
+			MX6UL_PAD_LCD_DATA11__LCDIF_DATA11	0x10
+			MX6UL_PAD_LCD_DATA12__LCDIF_DATA12	0x10
+			MX6UL_PAD_LCD_DATA13__LCDIF_DATA13	0x10
+			MX6UL_PAD_LCD_DATA14__LCDIF_DATA14	0x10
+			MX6UL_PAD_LCD_DATA15__LCDIF_DATA15	0x10
+			MX6UL_PAD_LCD_DATA16__LCDIF_DATA16	0x10
+			MX6UL_PAD_LCD_DATA17__LCDIF_DATA17	0x10
+			MX6UL_PAD_LCD_DATA18__LCDIF_DATA18	0x10
+			MX6UL_PAD_LCD_DATA19__LCDIF_DATA19	0x10
+			MX6UL_PAD_LCD_DATA20__LCDIF_DATA20	0x10
+			MX6UL_PAD_LCD_DATA21__LCDIF_DATA21	0x10
+			MX6UL_PAD_LCD_DATA22__LCDIF_DATA22	0x10
+			MX6UL_PAD_LCD_DATA23__LCDIF_DATA23	0x10
+		>;
+	};
+
+	pinctrl_disp0_2: disp0grp-2 {
+		fsl,pins = <
+			MX6UL_PAD_LCD_CLK__LCDIF_CLK		0x10 /* LSCLK */
+			MX6UL_PAD_LCD_ENABLE__LCDIF_ENABLE	0x10 /* OE_ACD */
+			MX6UL_PAD_LCD_HSYNC__LCDIF_HSYNC	0x10 /* HSYNC */
+			MX6UL_PAD_LCD_VSYNC__LCDIF_VSYNC	0x10 /* VSYNC */
+			MX6UL_PAD_LCD_DATA00__LCDIF_DATA00	0x10
+			MX6UL_PAD_LCD_DATA01__LCDIF_DATA01	0x10
+			MX6UL_PAD_LCD_DATA02__LCDIF_DATA02	0x10
+			MX6UL_PAD_LCD_DATA03__LCDIF_DATA03	0x10
+			MX6UL_PAD_LCD_DATA04__LCDIF_DATA04	0x10
+			MX6UL_PAD_LCD_DATA05__LCDIF_DATA05	0x10
+			MX6UL_PAD_LCD_DATA06__LCDIF_DATA06	0x10
+			MX6UL_PAD_LCD_DATA07__LCDIF_DATA07	0x10
+			MX6UL_PAD_LCD_DATA08__LCDIF_DATA08	0x10
+			MX6UL_PAD_LCD_DATA09__LCDIF_DATA09	0x10
+			MX6UL_PAD_LCD_DATA10__LCDIF_DATA10	0x10
+			MX6UL_PAD_LCD_DATA11__LCDIF_DATA11	0x10
+			MX6UL_PAD_LCD_DATA12__LCDIF_DATA12	0x10
+			MX6UL_PAD_LCD_DATA13__LCDIF_DATA13	0x10
+			MX6UL_PAD_LCD_DATA14__LCDIF_DATA14	0x10
+			MX6UL_PAD_LCD_DATA15__LCDIF_DATA15	0x10
+			MX6UL_PAD_LCD_DATA16__LCDIF_DATA16	0x10
+			MX6UL_PAD_LCD_DATA17__LCDIF_DATA17	0x10
+			MX6UL_PAD_LCD_DATA18__LCDIF_DATA18	0x10
+			MX6UL_PAD_LCD_DATA19__LCDIF_DATA19	0x10
+			MX6UL_PAD_LCD_DATA20__LCDIF_DATA20	0x10
+			MX6UL_PAD_LCD_DATA21__LCDIF_DATA21	0x10
+			MX6UL_PAD_LCD_DATA22__LCDIF_DATA22	0x10
+			MX6UL_PAD_LCD_DATA23__LCDIF_DATA23	0x10
+		>;
+	};
+
+	pinctrl_ecspi2: ecspi2grp {
+		fsl,pins = <
+			MX6UL_PAD_UART4_RX_DATA__GPIO1_IO29	0x0b0b0 /* CSPI_SS */
+			MX6UL_PAD_JTAG_MOD__GPIO1_IO10		0x0b0b0 /* CSPI_SS */
+			MX6UL_PAD_UART5_TX_DATA__ECSPI2_MOSI	0x0b0b0 /* CSPI_MOSI */
+			MX6UL_PAD_UART5_RX_DATA__ECSPI2_MISO	0x0b0b0 /* CSPI_MISO */
+			MX6UL_PAD_UART4_TX_DATA__ECSPI2_SCLK	0x0b0b0 /* CSPI_SCLK */
+		>;
+	};
+
+	pinctrl_edt_ft5x06: edt-ft5x06grp {
+		fsl,pins = <
+			MX6UL_PAD_SNVS_TAMPER2__GPIO5_IO02	0x1b0b0 /* Interrupt */
+			MX6UL_PAD_SNVS_TAMPER3__GPIO5_IO03	0x1b0b0 /* Reset */
+			MX6UL_PAD_SNVS_TAMPER8__GPIO5_IO08	0x1b0b0 /* Wake */
+		>;
+	};
+
+	pinctrl_enet1: enet1grp {
+		fsl,pins = <
+			MX6UL_PAD_ENET1_RX_DATA0__ENET1_RDATA00	0x000b0
+			MX6UL_PAD_ENET1_RX_DATA1__ENET1_RDATA01	0x000b0
+			MX6UL_PAD_ENET1_RX_EN__ENET1_RX_EN	0x000b0
+			MX6UL_PAD_ENET1_RX_ER__ENET1_RX_ER	0x000b0
+			MX6UL_PAD_ENET1_TX_EN__ENET1_TX_EN	0x000b0
+			MX6UL_PAD_ENET1_TX_DATA0__ENET1_TDATA00	0x000b0
+			MX6UL_PAD_ENET1_TX_DATA1__ENET1_TDATA01	0x000b0
+			MX6UL_PAD_ENET1_TX_CLK__ENET1_REF_CLK1	0x400000b1
+		>;
+	};
+
+	pinctrl_enet2: enet2grp {
+		fsl,pins = <
+			MX6UL_PAD_ENET2_RX_DATA0__ENET2_RDATA00	0x000b0
+			MX6UL_PAD_ENET2_RX_DATA1__ENET2_RDATA01	0x000b0
+			MX6UL_PAD_ENET2_RX_EN__ENET2_RX_EN	0x000b0
+			MX6UL_PAD_ENET2_RX_ER__ENET2_RX_ER	0x000b0
+			MX6UL_PAD_ENET2_TX_EN__ENET2_TX_EN	0x000b0
+			MX6UL_PAD_ENET2_TX_DATA0__ENET2_TDATA00	0x000b0
+			MX6UL_PAD_ENET2_TX_DATA1__ENET2_TDATA01	0x000b0
+			MX6UL_PAD_ENET2_TX_CLK__ENET2_REF_CLK2	0x400000b1
+		>;
+	};
+
+	pinctrl_enet1_mdio: enet1-mdiogrp {
+		fsl,pins = <
+			MX6UL_PAD_GPIO1_IO07__ENET1_MDC		0x0b0b0
+			MX6UL_PAD_GPIO1_IO06__ENET1_MDIO	0x1b0b0
+		>;
+	};
+
+	pinctrl_etnphy_power: etnphy-pwrgrp {
+		fsl,pins = <
+			MX6UL_PAD_SNVS_TAMPER7__GPIO5_IO07	0x0b0b0 /* ETN PHY POWER */
+		>;
+	};
+
+	pinctrl_etnphy0_int: etnphy-intgrp-0 {
+		fsl,pins = <
+			MX6UL_PAD_SNVS_TAMPER5__GPIO5_IO05	0x0b0b0 /* ETN PHY INT */
+		>;
+	};
+
+	pinctrl_etnphy0_rst: etnphy-rstgrp-0 {
+		fsl,pins = <
+			MX6UL_PAD_SNVS_TAMPER6__GPIO5_IO06	0x0b0b0 /* ETN PHY RESET */
+		>;
+	};
+
+	pinctrl_etnphy1_int: etnphy-intgrp-1 {
+		fsl,pins = <
+			MX6UL_PAD_CSI_DATA06__GPIO4_IO27	0x0b0b0 /* ETN PHY INT */
+		>;
+	};
+
+	pinctrl_etnphy1_rst: etnphy-rstgrp-1 {
+		fsl,pins = <
+			MX6UL_PAD_CSI_DATA07__GPIO4_IO28	0x0b0b0 /* ETN PHY RESET */
+		>;
+	};
+
+	pinctrl_flexcan1: flexcan1grp {
+		fsl,pins = <
+			MX6UL_PAD_UART3_CTS_B__FLEXCAN1_TX	0x0b0b0
+			MX6UL_PAD_UART3_RTS_B__FLEXCAN1_RX	0x0b0b0
+		>;
+	};
+
+	pinctrl_flexcan2: flexcan2grp {
+		fsl,pins = <
+			MX6UL_PAD_UART2_CTS_B__FLEXCAN2_TX	0x0b0b0
+			MX6UL_PAD_UART2_RTS_B__FLEXCAN2_RX	0x0b0b0
+		>;
+	};
+
+	pinctrl_flexcan_xcvr: flexcan-xcvrgrp {
+		fsl,pins = <
+			MX6UL_PAD_LCD_DATA00__GPIO3_IO05	0x0b0b0 /* Flexcan XCVR enable */
+		>;
+	};
+
+	pinctrl_gpmi_nand: gpminandgrp {
+		fsl,pins = <
+			MX6UL_PAD_NAND_CLE__RAWNAND_CLE		0x0b0b1
+			MX6UL_PAD_NAND_ALE__RAWNAND_ALE		0x0b0b1
+			MX6UL_PAD_NAND_WP_B__RAWNAND_WP_B	0x0b0b1
+			MX6UL_PAD_NAND_READY_B__RAWNAND_READY_B	0x0b000
+			MX6UL_PAD_NAND_CE0_B__RAWNAND_CE0_B	0x0b0b1
+			MX6UL_PAD_NAND_RE_B__RAWNAND_RE_B	0x0b0b1
+			MX6UL_PAD_NAND_WE_B__RAWNAND_WE_B	0x0b0b1
+			MX6UL_PAD_NAND_DATA00__RAWNAND_DATA00	0x0b0b1
+			MX6UL_PAD_NAND_DATA01__RAWNAND_DATA01	0x0b0b1
+			MX6UL_PAD_NAND_DATA02__RAWNAND_DATA02	0x0b0b1
+			MX6UL_PAD_NAND_DATA03__RAWNAND_DATA03	0x0b0b1
+			MX6UL_PAD_NAND_DATA04__RAWNAND_DATA04	0x0b0b1
+			MX6UL_PAD_NAND_DATA05__RAWNAND_DATA05	0x0b0b1
+			MX6UL_PAD_NAND_DATA06__RAWNAND_DATA06	0x0b0b1
+			MX6UL_PAD_NAND_DATA07__RAWNAND_DATA07	0x0b0b1
+		>;
+	};
+
+	pinctrl_i2c_gpio: i2c-gpiogrp {
+		fsl,pins = <
+			MX6UL_PAD_SNVS_TAMPER0__GPIO5_IO00	0x4001b8b1 /* I2C SCL */
+			MX6UL_PAD_SNVS_TAMPER1__GPIO5_IO01	0x4001b8b1 /* I2C SDA */
+		>;
+	};
+
+	pinctrl_i2c2: i2c2grp {
+		fsl,pins = <
+			MX6UL_PAD_GPIO1_IO00__I2C2_SCL		0x4001b8b1
+			MX6UL_PAD_GPIO1_IO01__I2C2_SDA		0x4001b8b1
+		>;
+	};
+
+	pinctrl_kpp: kppgrp {
+		fsl,pins = <
+			MX6UL_PAD_ENET2_RX_DATA1__KPP_COL04	0x1b0b0
+			MX6UL_PAD_ENET2_TX_DATA0__KPP_COL05	0x1b0b0
+			MX6UL_PAD_ENET2_TX_EN__KPP_COL06	0x1b0b0
+			MX6UL_PAD_ENET2_RX_ER__KPP_COL07	0x1b0b0
+			MX6UL_PAD_ENET2_RX_DATA0__KPP_ROW04	0x1b0b0
+			MX6UL_PAD_ENET2_RX_EN__KPP_ROW05	0x1b0b0
+			MX6UL_PAD_ENET2_TX_DATA1__KPP_ROW06	0x1b0b0
+			MX6UL_PAD_ENET2_TX_CLK__KPP_ROW07	0x1b0b0
+		>;
+	};
+
+	pinctrl_lcd_pwr: lcd-pwrgrp {
+		fsl,pins = <
+			MX6UL_PAD_SNVS_TAMPER4__GPIO5_IO04	0x0b0b0 /* LCD Power Enable */
+		>;
+	};
+
+	pinctrl_lcd_rst: lcd-rstgrp {
+		fsl,pins = <
+			MX6UL_PAD_LCD_RESET__GPIO3_IO04	0x0b0b0 /* LCD Reset */
+		>;
+	};
+
+	pinctrl_pwm5: pwm5grp {
+		fsl,pins = <
+			MX6UL_PAD_NAND_DQS__PWM5_OUT		0x0b0b0
+		>;
+	};
+
+	pinctrl_sai2: sai2grp {
+		fsl,pins = <
+			MX6UL_PAD_JTAG_TCK__SAI2_RX_DATA	0x0b0b0 /* SSI1_RXD */
+			MX6UL_PAD_JTAG_TRST_B__SAI2_TX_DATA	0x0b0b0 /* SSI1_TXD */
+			MX6UL_PAD_JTAG_TDI__SAI2_TX_BCLK	0x0b0b0 /* SSI1_CLK */
+			MX6UL_PAD_JTAG_TDO__SAI2_TX_SYNC	0x0b0b0 /* SSI1_FS */
+		>;
+	};
+
+	pinctrl_spi_gpio: spi-gpiogrp {
+		fsl,pins = <
+			MX6UL_PAD_UART4_RX_DATA__GPIO1_IO29	0x0b0b0 /* CSPI_SS */
+			MX6UL_PAD_JTAG_MOD__GPIO1_IO10		0x0b0b0 /* CSPI_SS */
+			MX6UL_PAD_UART5_TX_DATA__GPIO1_IO30	0x0b0b0 /* CSPI_MOSI */
+			MX6UL_PAD_UART5_RX_DATA__GPIO1_IO31	0x0b0b0 /* CSPI_MISO */
+			MX6UL_PAD_UART4_TX_DATA__GPIO1_IO28	0x0b0b0 /* CSPI_SCLK */
+		>;
+	};
+
+	pinctrl_tsc2007: tsc2007grp {
+		fsl,pins = <
+			MX6UL_PAD_JTAG_TMS__GPIO1_IO11		0x1b0b0 /* Interrupt */
+		>;
+	};
+
+	pinctrl_uart1: uart1grp {
+		fsl,pins = <
+			MX6UL_PAD_UART1_TX_DATA__UART1_DCE_TX	0x0b0b0
+			MX6UL_PAD_UART1_RX_DATA__UART1_DCE_RX	0x0b0b0
+		>;
+	};
+
+	pinctrl_uart1_rtscts: uart1-rtsctsgrp {
+		fsl,pins = <
+			MX6UL_PAD_UART1_RTS_B__UART1_DCE_RTS	0x0b0b0
+			MX6UL_PAD_UART1_CTS_B__UART1_DCE_CTS	0x0b0b0
+		>;
+	};
+
+	pinctrl_uart2: uart2grp {
+		fsl,pins = <
+			MX6UL_PAD_UART2_TX_DATA__UART2_DCE_TX	0x0b0b0
+			MX6UL_PAD_UART2_RX_DATA__UART2_DCE_RX	0x0b0b0
+		>;
+	};
+
+	pinctrl_uart2_rtscts: uart2-rtsctsgrp {
+		fsl,pins = <
+			MX6UL_PAD_UART3_RX_DATA__UART2_DCE_RTS	0x0b0b0
+			MX6UL_PAD_UART3_TX_DATA__UART2_DCE_CTS	0x0b0b0
+		>;
+	};
+
+	pinctrl_uart5: uart5grp {
+		fsl,pins = <
+			MX6UL_PAD_GPIO1_IO04__UART5_DCE_TX	0x0b0b0
+			MX6UL_PAD_GPIO1_IO05__UART5_DCE_RX	0x0b0b0
+		>;
+	};
+
+	pinctrl_uart5_rtscts: uart5-rtsctsgrp {
+		fsl,pins = <
+			MX6UL_PAD_GPIO1_IO08__UART5_DCE_RTS	0x0b0b0
+			MX6UL_PAD_GPIO1_IO09__UART5_DCE_CTS	0x0b0b0
+		>;
+	};
+
+	pinctrl_usbh1_oc: usbh1-ocgrp {
+		fsl,pins = <
+			MX6UL_PAD_GPIO1_IO03__GPIO1_IO03	0x17059 /* USBH1_OC */
+		>;
+	};
+
+	pinctrl_usbh1_vbus: usbh1-vbusgrp {
+		fsl,pins = <
+			MX6UL_PAD_GPIO1_IO02__GPIO1_IO02	0x0b0b0 /* USBH1_VBUSEN */
+		>;
+	};
+
+	pinctrl_usbotg_oc: usbotg-ocgrp {
+		fsl,pins = <
+			MX6UL_PAD_UART3_RTS_B__GPIO1_IO27	0x17059 /* USBOTG_OC */
+		>;
+	};
+
+	pinctrl_usbotg_vbus: usbotg-vbusgrp {
+		fsl,pins = <
+			MX6UL_PAD_UART3_CTS_B__GPIO1_IO26	0x1b0b0 /* USBOTG_VBUSEN */
+		>;
+	};
+
+	pinctrl_usdhc1: usdhc1grp {
+		fsl,pins = <
+			MX6UL_PAD_SD1_CMD__USDHC1_CMD		0x070b1
+			MX6UL_PAD_SD1_CLK__USDHC1_CLK		0x07099
+			MX6UL_PAD_SD1_DATA0__USDHC1_DATA0	0x070b1
+			MX6UL_PAD_SD1_DATA1__USDHC1_DATA1	0x070b1
+			MX6UL_PAD_SD1_DATA2__USDHC1_DATA2	0x070b1
+			MX6UL_PAD_SD1_DATA3__USDHC1_DATA3	0x070b1
+		>;
+	};
+
+	pinctrl_usdhc1_cd: usdhc1cdgrp {
+		fsl,pins = <
+			MX6UL_PAD_NAND_CE1_B__GPIO4_IO14	0x170b0 /* SD1 CD */
+		>;
+	};
+
+	pinctrl_usdhc2: usdhc2grp {
+		fsl,pins = <
+			MX6UL_PAD_NAND_WE_B__USDHC2_CMD		0x070b1
+			MX6UL_PAD_NAND_RE_B__USDHC2_CLK		0x070b1
+			MX6UL_PAD_NAND_DATA00__USDHC2_DATA0	0x070b1
+			MX6UL_PAD_NAND_DATA01__USDHC2_DATA1	0x070b1
+			MX6UL_PAD_NAND_DATA02__USDHC2_DATA2	0x070b1
+			MX6UL_PAD_NAND_DATA03__USDHC2_DATA3	0x070b1
+			/* eMMC RESET */
+			MX6UL_PAD_NAND_ALE__USDHC2_RESET_B	0x170b0
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi
index 7177899..4356b65 100644
--- a/arch/arm/boot/dts/imx6ul.dtsi
+++ b/arch/arm/boot/dts/imx6ul.dtsi
@@ -55,15 +55,15 @@
 			clock-latency = <61036>; /* two CLK32 periods */
 			operating-points = <
 				/* kHz	uV */
-				528000	1250000
-				396000	1150000
-				198000	1150000
+				528000	1175000
+				396000	1025000
+				198000	950000
 			>;
 			fsl,soc-operating-points = <
 				/* KHz	uV */
-				528000	1250000
-				396000	1150000
-				198000	1150000
+				528000	1175000
+				396000	1175000
+				198000	1175000
 			>;
 			clocks = <&clks IMX6UL_CLK_ARM>,
 				 <&clks IMX6UL_CLK_PLL2_BUS>,
diff --git a/arch/arm/boot/dts/integrator.dtsi b/arch/arm/boot/dts/integrator.dtsi
index b82f0e6..6fe0dd1 100644
--- a/arch/arm/boot/dts/integrator.dtsi
+++ b/arch/arm/boot/dts/integrator.dtsi
@@ -52,8 +52,9 @@
 	};
 
 	flash@24000000 {
-		compatible = "cfi-flash";
+		compatible = "arm,versatile-flash", "cfi-flash";
 		reg = <0x24000000 0x02000000>;
+		bank-width = <4>;
 	};
 
 	fpga {
diff --git a/arch/arm/boot/dts/k2e-evm.dts b/arch/arm/boot/dts/k2e-evm.dts
deleted file mode 100644
index b7e9980..0000000
--- a/arch/arm/boot/dts/k2e-evm.dts
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Copyright 2013-2014 Texas Instruments, Inc.
- *
- * Keystone 2 Edison EVM device tree
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-/dts-v1/;
-
-#include "keystone.dtsi"
-#include "k2e.dtsi"
-
-/ {
-	compatible = "ti,k2e-evm", "ti,k2e", "ti,keystone";
-	model = "Texas Instruments Keystone 2 Edison EVM";
-
-	soc {
-
-		clocks {
-			refclksys: refclksys {
-				#clock-cells = <0>;
-				compatible = "fixed-clock";
-				clock-frequency = <100000000>;
-				clock-output-names = "refclk-sys";
-			};
-
-			refclkpass: refclkpass {
-				#clock-cells = <0>;
-				compatible = "fixed-clock";
-				clock-frequency = <100000000>;
-				clock-output-names = "refclk-pass";
-			};
-
-			refclkddr3a: refclkddr3a {
-				#clock-cells = <0>;
-				compatible = "fixed-clock";
-				clock-frequency = <100000000>;
-				clock-output-names = "refclk-ddr3a";
-			};
-		};
-	};
-};
-
-&usb_phy {
-	status = "okay";
-};
-
-&usb {
-	status = "okay";
-};
-
-&usb1_phy {
-	status = "okay";
-};
-
-&usb1 {
-	status = "okay";
-};
-
-&i2c0 {
-	dtt@50 {
-		compatible = "at,24c1024";
-		reg = <0x50>;
-	};
-};
-
-&aemif {
-	cs0 {
-		#address-cells = <2>;
-		#size-cells = <1>;
-		clock-ranges;
-		ranges;
-
-		ti,cs-chipselect = <0>;
-		/* all timings in nanoseconds */
-		ti,cs-min-turnaround-ns = <12>;
-		ti,cs-read-hold-ns = <6>;
-		ti,cs-read-strobe-ns = <23>;
-		ti,cs-read-setup-ns = <9>;
-		ti,cs-write-hold-ns = <8>;
-		ti,cs-write-strobe-ns = <23>;
-		ti,cs-write-setup-ns = <8>;
-
-		nand@0,0 {
-			compatible = "ti,keystone-nand","ti,davinci-nand";
-			#address-cells = <1>;
-			#size-cells = <1>;
-			reg = <0 0 0x4000000
-			       1 0 0x0000100>;
-
-			ti,davinci-chipselect = <0>;
-			ti,davinci-mask-ale = <0x2000>;
-			ti,davinci-mask-cle = <0x4000>;
-			ti,davinci-mask-chipsel = <0>;
-			nand-ecc-mode = "hw";
-			ti,davinci-ecc-bits = <4>;
-			nand-on-flash-bbt;
-
-			partition@0 {
-				label = "u-boot";
-				reg = <0x0 0x100000>;
-				read-only;
-			};
-
-			partition@100000 {
-				label = "params";
-				reg = <0x100000 0x80000>;
-				read-only;
-			};
-
-			partition@180000 {
-				label = "ubifs";
-				reg = <0x180000 0x1FE80000>;
-			};
-		};
-	};
-};
-
-&spi0 {
-	nor_flash: n25q128a11@0 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		compatible = "Micron,n25q128a11";
-		spi-max-frequency = <54000000>;
-		m25p,fast-read;
-		reg = <0>;
-
-		partition@0 {
-			label = "u-boot-spl";
-			reg = <0x0 0x80000>;
-			read-only;
-		};
-
-		partition@1 {
-			label = "misc";
-			reg = <0x80000 0xf80000>;
-		};
-	};
-};
-
-&mdio {
-	status = "ok";
-	ethphy0: ethernet-phy@0 {
-		compatible = "marvell,88E1514", "marvell,88E1510", "ethernet-phy-ieee802.3-c22";
-		reg = <0>;
-	};
-
-	ethphy1: ethernet-phy@1 {
-		compatible = "marvell,88E1514", "marvell,88E1510", "ethernet-phy-ieee802.3-c22";
-		reg = <1>;
-	};
-};
diff --git a/arch/arm/boot/dts/k2e.dtsi b/arch/arm/boot/dts/k2e.dtsi
deleted file mode 100644
index 1097dad..0000000
--- a/arch/arm/boot/dts/k2e.dtsi
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright 2013-2014 Texas Instruments, Inc.
- *
- * Keystone 2 Edison soc device tree
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/ {
-	compatible = "ti,k2e", "ti,keystone";
-	model = "Texas Instruments Keystone 2 Edison SoC";
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		interrupt-parent = <&gic>;
-
-		cpu@0 {
-			compatible = "arm,cortex-a15";
-			device_type = "cpu";
-			reg = <0>;
-		};
-
-		cpu@1 {
-			compatible = "arm,cortex-a15";
-			device_type = "cpu";
-			reg = <1>;
-		};
-
-		cpu@2 {
-			compatible = "arm,cortex-a15";
-			device_type = "cpu";
-			reg = <2>;
-		};
-
-		cpu@3 {
-			compatible = "arm,cortex-a15";
-			device_type = "cpu";
-			reg = <3>;
-		};
-	};
-
-	soc {
-		/include/ "k2e-clocks.dtsi"
-
-		usb: usb@2680000 {
-			interrupts = <GIC_SPI 152 IRQ_TYPE_EDGE_RISING>;
-			dwc3@2690000 {
-				interrupts = <GIC_SPI 152 IRQ_TYPE_EDGE_RISING>;
-			};
-		};
-
-		usb1_phy: usb_phy@2620750 {
-			compatible = "ti,keystone-usbphy";
-			#address-cells = <1>;
-			#size-cells = <1>;
-			reg = <0x2620750 24>;
-			status = "disabled";
-		};
-
-		usb1: usb@25000000 {
-			compatible = "ti,keystone-dwc3";
-			#address-cells = <1>;
-			#size-cells = <1>;
-			reg = <0x25000000 0x10000>;
-			clocks = <&clkusb1>;
-			clock-names = "usb";
-			interrupts = <GIC_SPI 414 IRQ_TYPE_EDGE_RISING>;
-			ranges;
-			dma-coherent;
-			dma-ranges;
-			status = "disabled";
-
-			dwc3@25010000 {
-				compatible = "synopsys,dwc3";
-				reg = <0x25010000 0x70000>;
-				interrupts = <GIC_SPI 414 IRQ_TYPE_EDGE_RISING>;
-				usb-phy = <&usb1_phy>, <&usb1_phy>;
-			};
-		};
-
-		dspgpio0: keystone_dsp_gpio@02620240 {
-			compatible = "ti,keystone-dsp-gpio";
-			gpio-controller;
-			#gpio-cells = <2>;
-			gpio,syscon-dev = <&devctrl 0x240>;
-		};
-
-		pcie1: pcie@21020000 {
-			compatible = "ti,keystone-pcie","snps,dw-pcie";
-			clocks = <&clkpcie1>;
-			clock-names = "pcie";
-			#address-cells = <3>;
-			#size-cells = <2>;
-			reg =  <0x21021000 0x2000>, <0x21020000 0x1000>, <0x02620128 4>;
-			ranges = <0x81000000 0 0 0x23260000 0x4000 0x4000
-				0x82000000 0 0x60000000 0x60000000 0 0x10000000>;
-
-			status = "disabled";
-			device_type = "pci";
-			num-lanes = <2>;
-
-			#interrupt-cells = <1>;
-			interrupt-map-mask = <0 0 0 7>;
-			interrupt-map = <0 0 0 1 &pcie_intc1 0>, /* INT A */
-					<0 0 0 2 &pcie_intc1 1>, /* INT B */
-					<0 0 0 3 &pcie_intc1 2>, /* INT C */
-					<0 0 0 4 &pcie_intc1 3>; /* INT D */
-
-			pcie_msi_intc1: msi-interrupt-controller {
-				interrupt-controller;
-				#interrupt-cells = <1>;
-				interrupt-parent = <&gic>;
-				interrupts = <GIC_SPI 377 IRQ_TYPE_EDGE_RISING>,
-					<GIC_SPI 378 IRQ_TYPE_EDGE_RISING>,
-					<GIC_SPI 379 IRQ_TYPE_EDGE_RISING>,
-					<GIC_SPI 380 IRQ_TYPE_EDGE_RISING>,
-					<GIC_SPI 381 IRQ_TYPE_EDGE_RISING>,
-					<GIC_SPI 382 IRQ_TYPE_EDGE_RISING>,
-					<GIC_SPI 383 IRQ_TYPE_EDGE_RISING>,
-					<GIC_SPI 384 IRQ_TYPE_EDGE_RISING>;
-			};
-
-			pcie_intc1: legacy-interrupt-controller {
-				interrupt-controller;
-				#interrupt-cells = <1>;
-				interrupt-parent = <&gic>;
-				interrupts = <GIC_SPI 373 IRQ_TYPE_EDGE_RISING>,
-					<GIC_SPI 374 IRQ_TYPE_EDGE_RISING>,
-					<GIC_SPI 375 IRQ_TYPE_EDGE_RISING>,
-					<GIC_SPI 376 IRQ_TYPE_EDGE_RISING>;
-			};
-		};
-
-		mdio: mdio@24200f00 {
-			compatible	= "ti,keystone_mdio", "ti,davinci_mdio";
-			#address-cells = <1>;
-			#size-cells = <0>;
-			reg = <0x24200f00 0x100>;
-			status = "disabled";
-			clocks = <&clkcpgmac>;
-			clock-names = "fck";
-			bus_freq	= <2500000>;
-		};
-		/include/ "k2e-netcp.dtsi"
-	};
-};
diff --git a/arch/arm/boot/dts/k2hk-evm.dts b/arch/arm/boot/dts/k2hk-evm.dts
deleted file mode 100644
index 8161bf5..0000000
--- a/arch/arm/boot/dts/k2hk-evm.dts
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Copyright 2013-2014 Texas Instruments, Inc.
- *
- * Keystone 2 Kepler/Hawking EVM device tree
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-/dts-v1/;
-
-#include "keystone.dtsi"
-#include "k2hk.dtsi"
-
-/ {
-	compatible =  "ti,k2hk-evm", "ti,k2hk", "ti,keystone";
-	model = "Texas Instruments Keystone 2 Kepler/Hawking EVM";
-
-	soc {
-		clocks {
-			refclksys: refclksys {
-				#clock-cells = <0>;
-				compatible = "fixed-clock";
-				clock-frequency = <122880000>;
-				clock-output-names = "refclk-sys";
-			};
-
-			refclkpass: refclkpass {
-				#clock-cells = <0>;
-				compatible = "fixed-clock";
-				clock-frequency = <122880000>;
-				clock-output-names = "refclk-pass";
-			};
-
-			refclkarm: refclkarm {
-				#clock-cells = <0>;
-				compatible = "fixed-clock";
-				clock-frequency = <125000000>;
-				clock-output-names = "refclk-arm";
-			};
-
-			refclkddr3a: refclkddr3a {
-				#clock-cells = <0>;
-				compatible = "fixed-clock";
-				clock-frequency = <100000000>;
-				clock-output-names = "refclk-ddr3a";
-			};
-
-			refclkddr3b: refclkddr3b {
-				#clock-cells = <0>;
-				compatible = "fixed-clock";
-				clock-frequency = <100000000>;
-				clock-output-names = "refclk-ddr3b";
-			};
-		};
-	};
-
-	leds {
-		compatible = "gpio-leds";
-		debug1_1 {
-			label = "keystone:green:debug1";
-			gpios = <&gpio0 12 GPIO_ACTIVE_HIGH>; /* 12 */
-		};
-
-		debug1_2 {
-			label = "keystone:red:debug1";
-			gpios = <&gpio0 13 GPIO_ACTIVE_HIGH>; /* 13 */
-		};
-
-		debug2 {
-			label = "keystone:blue:debug2";
-			gpios = <&gpio0 14 GPIO_ACTIVE_HIGH>; /* 14 */
-		};
-
-		debug3 {
-			label = "keystone:blue:debug3";
-			gpios = <&gpio0 15 GPIO_ACTIVE_HIGH>; /* 15 */
-		};
-	};
-};
-
-&usb_phy {
-	status = "okay";
-};
-
-&usb {
-	status = "okay";
-};
-
-&aemif {
-	cs0 {
-		#address-cells = <2>;
-		#size-cells = <1>;
-		clock-ranges;
-		ranges;
-
-		ti,cs-chipselect = <0>;
-		/* all timings in nanoseconds */
-		ti,cs-min-turnaround-ns = <12>;
-		ti,cs-read-hold-ns = <6>;
-		ti,cs-read-strobe-ns = <23>;
-		ti,cs-read-setup-ns = <9>;
-		ti,cs-write-hold-ns = <8>;
-		ti,cs-write-strobe-ns = <23>;
-		ti,cs-write-setup-ns = <8>;
-
-		nand@0,0 {
-			compatible = "ti,keystone-nand","ti,davinci-nand";
-			#address-cells = <1>;
-			#size-cells = <1>;
-			reg = <0 0 0x4000000
-			       1 0 0x0000100>;
-
-			ti,davinci-chipselect = <0>;
-			ti,davinci-mask-ale = <0x2000>;
-			ti,davinci-mask-cle = <0x4000>;
-			ti,davinci-mask-chipsel = <0>;
-			nand-ecc-mode = "hw";
-			ti,davinci-ecc-bits = <4>;
-			nand-on-flash-bbt;
-
-			partition@0 {
-				label = "u-boot";
-				reg = <0x0 0x100000>;
-				read-only;
-			};
-
-			partition@100000 {
-				label = "params";
-				reg = <0x100000 0x80000>;
-				read-only;
-			};
-
-			partition@180000 {
-				label = "ubifs";
-				reg = <0x180000 0x1fe80000>;
-			};
-		};
-	};
-};
-
-&i2c0 {
-	dtt@50 {
-		compatible = "at,24c1024";
-		reg = <0x50>;
-	};
-};
-
-&spi0 {
-	nor_flash: n25q128a11@0 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		compatible = "Micron,n25q128a11";
-		spi-max-frequency = <54000000>;
-		m25p,fast-read;
-		reg = <0>;
-
-		partition@0 {
-			label = "u-boot-spl";
-			reg = <0x0 0x80000>;
-			read-only;
-		};
-
-		partition@1 {
-			label = "misc";
-			reg = <0x80000 0xf80000>;
-		};
-	};
-};
-
-&mdio {
-	status = "ok";
-	ethphy0: ethernet-phy@0 {
-		compatible = "marvell,88E1111", "ethernet-phy-ieee802.3-c22";
-		reg = <0>;
-	};
-
-	ethphy1: ethernet-phy@1 {
-		compatible = "marvell,88E1111", "ethernet-phy-ieee802.3-c22";
-		reg = <1>;
-	};
-};
diff --git a/arch/arm/boot/dts/k2hk.dtsi b/arch/arm/boot/dts/k2hk.dtsi
deleted file mode 100644
index ada4c7a..0000000
--- a/arch/arm/boot/dts/k2hk.dtsi
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright 2013-2014 Texas Instruments, Inc.
- *
- * Keystone 2 Kepler/Hawking soc specific device tree
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/ {
-	compatible = "ti,k2hk", "ti,keystone";
-	model = "Texas Instruments Keystone 2 Kepler/Hawking SoC";
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		interrupt-parent = <&gic>;
-
-		cpu@0 {
-			compatible = "arm,cortex-a15";
-			device_type = "cpu";
-			reg = <0>;
-		};
-
-		cpu@1 {
-			compatible = "arm,cortex-a15";
-			device_type = "cpu";
-			reg = <1>;
-		};
-
-		cpu@2 {
-			compatible = "arm,cortex-a15";
-			device_type = "cpu";
-			reg = <2>;
-		};
-
-		cpu@3 {
-			compatible = "arm,cortex-a15";
-			device_type = "cpu";
-			reg = <3>;
-		};
-	};
-
-	soc {
-		/include/ "k2hk-clocks.dtsi"
-
-		dspgpio0: keystone_dsp_gpio@02620240 {
-			compatible = "ti,keystone-dsp-gpio";
-			gpio-controller;
-			#gpio-cells = <2>;
-			gpio,syscon-dev = <&devctrl 0x240>;
-		};
-
-		dspgpio1: keystone_dsp_gpio@2620244 {
-			compatible = "ti,keystone-dsp-gpio";
-			gpio-controller;
-			#gpio-cells = <2>;
-			gpio,syscon-dev = <&devctrl 0x244>;
-		};
-
-		dspgpio2: keystone_dsp_gpio@2620248 {
-			compatible = "ti,keystone-dsp-gpio";
-			gpio-controller;
-			#gpio-cells = <2>;
-			gpio,syscon-dev = <&devctrl 0x248>;
-		};
-
-		dspgpio3: keystone_dsp_gpio@262024c {
-			compatible = "ti,keystone-dsp-gpio";
-			gpio-controller;
-			#gpio-cells = <2>;
-			gpio,syscon-dev = <&devctrl 0x24c>;
-		};
-
-		dspgpio4: keystone_dsp_gpio@2620250 {
-			compatible = "ti,keystone-dsp-gpio";
-			gpio-controller;
-			#gpio-cells = <2>;
-			gpio,syscon-dev = <&devctrl 0x250>;
-		};
-
-		dspgpio5: keystone_dsp_gpio@2620254 {
-			compatible = "ti,keystone-dsp-gpio";
-			gpio-controller;
-			#gpio-cells = <2>;
-			gpio,syscon-dev = <&devctrl 0x254>;
-		};
-
-		dspgpio6: keystone_dsp_gpio@2620258 {
-			compatible = "ti,keystone-dsp-gpio";
-			gpio-controller;
-			#gpio-cells = <2>;
-			gpio,syscon-dev = <&devctrl 0x258>;
-		};
-
-		dspgpio7: keystone_dsp_gpio@262025c {
-			compatible = "ti,keystone-dsp-gpio";
-			gpio-controller;
-			#gpio-cells = <2>;
-			gpio,syscon-dev = <&devctrl 0x25c>;
-		};
-
-		mdio: mdio@02090300 {
-			compatible	= "ti,keystone_mdio", "ti,davinci_mdio";
-			#address-cells = <1>;
-			#size-cells = <0>;
-			reg = <0x02090300 0x100>;
-			status = "disabled";
-			clocks = <&clkcpgmac>;
-			clock-names = "fck";
-			bus_freq	= <2500000>;
-		};
-		/include/ "k2hk-netcp.dtsi"
-	};
-};
diff --git a/arch/arm/boot/dts/k2l-evm.dts b/arch/arm/boot/dts/k2l-evm.dts
deleted file mode 100644
index 0086124..0000000
--- a/arch/arm/boot/dts/k2l-evm.dts
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Copyright 2014 Texas Instruments, Inc.
- *
- * Keystone 2 Lamarr EVM device tree
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-/dts-v1/;
-
-#include "keystone.dtsi"
-#include "k2l.dtsi"
-
-/ {
-	compatible = "ti,k2l-evm", "ti,k2l", "ti,keystone";
-	model = "Texas Instruments Keystone 2 Lamarr EVM";
-
-	soc {
-		clocks {
-			refclksys: refclksys {
-				#clock-cells = <0>;
-				compatible = "fixed-clock";
-				clock-frequency = <122880000>;
-				clock-output-names = "refclk-sys";
-			};
-		};
-	};
-};
-
-&usb_phy {
-	status = "okay";
-};
-
-&usb {
-	status = "okay";
-};
-
-&i2c0 {
-	dtt@50 {
-		compatible = "at,24c1024";
-		reg = <0x50>;
-	};
-};
-
-&aemif {
-	cs0 {
-		#address-cells = <2>;
-		#size-cells = <1>;
-		clock-ranges;
-		ranges;
-
-		ti,cs-chipselect = <0>;
-		/* all timings in nanoseconds */
-		ti,cs-min-turnaround-ns = <12>;
-		ti,cs-read-hold-ns = <6>;
-		ti,cs-read-strobe-ns = <23>;
-		ti,cs-read-setup-ns = <9>;
-		ti,cs-write-hold-ns = <8>;
-		ti,cs-write-strobe-ns = <23>;
-		ti,cs-write-setup-ns = <8>;
-
-		nand@0,0 {
-			compatible = "ti,keystone-nand","ti,davinci-nand";
-			#address-cells = <1>;
-			#size-cells = <1>;
-			reg = <0 0 0x4000000
-			       1 0 0x0000100>;
-
-			ti,davinci-chipselect = <0>;
-			ti,davinci-mask-ale = <0x2000>;
-			ti,davinci-mask-cle = <0x4000>;
-			ti,davinci-mask-chipsel = <0>;
-			nand-ecc-mode = "hw";
-			ti,davinci-ecc-bits = <4>;
-			nand-on-flash-bbt;
-
-			partition@0 {
-				label = "u-boot";
-				reg = <0x0 0x100000>;
-				read-only;
-			};
-
-			partition@100000 {
-				label = "params";
-				reg = <0x100000 0x80000>;
-				read-only;
-			};
-
-			partition@180000 {
-				label = "ubifs";
-				reg = <0x180000 0x7FE80000>;
-			};
-		};
-	};
-};
-
-&spi0 {
-	nor_flash: n25q128a11@0 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		compatible = "Micron,n25q128a11";
-		spi-max-frequency = <54000000>;
-		m25p,fast-read;
-		reg = <0>;
-
-		partition@0 {
-			label = "u-boot-spl";
-			reg = <0x0 0x80000>;
-			read-only;
-		};
-
-		partition@1 {
-			label = "misc";
-			reg = <0x80000 0xf80000>;
-		};
-	};
-};
-
-&mdio {
-	status = "ok";
-	ethphy0: ethernet-phy@0 {
-		compatible = "marvell,88E1514", "marvell,88E1510", "ethernet-phy-ieee802.3-c22";
-		reg = <0>;
-	};
-
-	ethphy1: ethernet-phy@1 {
-		compatible = "marvell,88E1514", "marvell,88E1510", "ethernet-phy-ieee802.3-c22";
-		reg = <1>;
-	};
-};
diff --git a/arch/arm/boot/dts/k2l.dtsi b/arch/arm/boot/dts/k2l.dtsi
deleted file mode 100644
index 4446da7..0000000
--- a/arch/arm/boot/dts/k2l.dtsi
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright 2014 Texas Instruments, Inc.
- *
- * Keystone 2 Lamarr SoC specific device tree
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/ {
-	compatible = "ti,k2l", "ti,keystone";
-	model = "Texas Instruments Keystone 2 Lamarr SoC";
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		interrupt-parent = <&gic>;
-
-		cpu@0 {
-			compatible = "arm,cortex-a15";
-			device_type = "cpu";
-			reg = <0>;
-		};
-
-		cpu@1 {
-			compatible = "arm,cortex-a15";
-			device_type = "cpu";
-			reg = <1>;
-		};
-	};
-
-	soc {
-		/include/ "k2l-clocks.dtsi"
-
-		uart2: serial@02348400 {
-			compatible = "ns16550a";
-			current-speed = <115200>;
-			reg-shift = <2>;
-			reg-io-width = <4>;
-			reg = <0x02348400 0x100>;
-			clocks	= <&clkuart2>;
-			interrupts = <GIC_SPI 432 IRQ_TYPE_EDGE_RISING>;
-		};
-
-		uart3:	serial@02348800 {
-			compatible = "ns16550a";
-			current-speed = <115200>;
-			reg-shift = <2>;
-			reg-io-width = <4>;
-			reg = <0x02348800 0x100>;
-			clocks	= <&clkuart3>;
-			interrupts = <GIC_SPI 435 IRQ_TYPE_EDGE_RISING>;
-		};
-
-		dspgpio0: keystone_dsp_gpio@02620240 {
-			compatible = "ti,keystone-dsp-gpio";
-			gpio-controller;
-			#gpio-cells = <2>;
-			gpio,syscon-dev = <&devctrl 0x240>;
-		};
-
-		dspgpio1: keystone_dsp_gpio@2620244 {
-			compatible = "ti,keystone-dsp-gpio";
-			gpio-controller;
-			#gpio-cells = <2>;
-			gpio,syscon-dev = <&devctrl 0x244>;
-		};
-
-		dspgpio2: keystone_dsp_gpio@2620248 {
-			compatible = "ti,keystone-dsp-gpio";
-			gpio-controller;
-			#gpio-cells = <2>;
-			gpio,syscon-dev = <&devctrl 0x248>;
-		};
-
-		dspgpio3: keystone_dsp_gpio@262024c {
-			compatible = "ti,keystone-dsp-gpio";
-			gpio-controller;
-			#gpio-cells = <2>;
-			gpio,syscon-dev = <&devctrl 0x24c>;
-		};
-
-		mdio: mdio@26200f00 {
-			compatible	= "ti,keystone_mdio", "ti,davinci_mdio";
-			#address-cells = <1>;
-			#size-cells = <0>;
-			reg = <0x26200f00 0x100>;
-			status = "disabled";
-			clocks = <&clkcpgmac>;
-			clock-names = "fck";
-			bus_freq	= <2500000>;
-		};
-		/include/ "k2l-netcp.dtsi"
-	};
-};
-
-&spi0 {
-       ti,davinci-spi-num-cs = <5>;
-};
-
-&spi1 {
-       ti,davinci-spi-num-cs = <3>;
-};
-
-&spi2 {
-       ti,davinci-spi-num-cs = <5>;
-       /* Pin muxed. Enabled and configured by Bootloader */
-       status = "disabled";
-};
diff --git a/arch/arm/boot/dts/k2e-clocks.dtsi b/arch/arm/boot/dts/keystone-k2e-clocks.dtsi
similarity index 100%
rename from arch/arm/boot/dts/k2e-clocks.dtsi
rename to arch/arm/boot/dts/keystone-k2e-clocks.dtsi
diff --git a/arch/arm/boot/dts/keystone-k2e-evm.dts b/arch/arm/boot/dts/keystone-k2e-evm.dts
new file mode 100644
index 0000000..4c32ebc
--- /dev/null
+++ b/arch/arm/boot/dts/keystone-k2e-evm.dts
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2013-2014 Texas Instruments, Inc.
+ *
+ * Keystone 2 Edison EVM device tree
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+#include "keystone.dtsi"
+#include "keystone-k2e.dtsi"
+
+/ {
+	compatible = "ti,k2e-evm", "ti,k2e", "ti,keystone";
+	model = "Texas Instruments Keystone 2 Edison EVM";
+
+	soc {
+
+		clocks {
+			refclksys: refclksys {
+				#clock-cells = <0>;
+				compatible = "fixed-clock";
+				clock-frequency = <100000000>;
+				clock-output-names = "refclk-sys";
+			};
+
+			refclkpass: refclkpass {
+				#clock-cells = <0>;
+				compatible = "fixed-clock";
+				clock-frequency = <100000000>;
+				clock-output-names = "refclk-pass";
+			};
+
+			refclkddr3a: refclkddr3a {
+				#clock-cells = <0>;
+				compatible = "fixed-clock";
+				clock-frequency = <100000000>;
+				clock-output-names = "refclk-ddr3a";
+			};
+		};
+	};
+};
+
+&usb_phy {
+	status = "okay";
+};
+
+&usb {
+	status = "okay";
+};
+
+&usb1_phy {
+	status = "okay";
+};
+
+&usb1 {
+	status = "okay";
+};
+
+&i2c0 {
+	dtt@50 {
+		compatible = "at,24c1024";
+		reg = <0x50>;
+	};
+};
+
+&aemif {
+	cs0 {
+		#address-cells = <2>;
+		#size-cells = <1>;
+		clock-ranges;
+		ranges;
+
+		ti,cs-chipselect = <0>;
+		/* all timings in nanoseconds */
+		ti,cs-min-turnaround-ns = <12>;
+		ti,cs-read-hold-ns = <6>;
+		ti,cs-read-strobe-ns = <23>;
+		ti,cs-read-setup-ns = <9>;
+		ti,cs-write-hold-ns = <8>;
+		ti,cs-write-strobe-ns = <23>;
+		ti,cs-write-setup-ns = <8>;
+
+		nand@0,0 {
+			compatible = "ti,keystone-nand","ti,davinci-nand";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			reg = <0 0 0x4000000
+			       1 0 0x0000100>;
+
+			ti,davinci-chipselect = <0>;
+			ti,davinci-mask-ale = <0x2000>;
+			ti,davinci-mask-cle = <0x4000>;
+			ti,davinci-mask-chipsel = <0>;
+			nand-ecc-mode = "hw";
+			ti,davinci-ecc-bits = <4>;
+			nand-on-flash-bbt;
+
+			partition@0 {
+				label = "u-boot";
+				reg = <0x0 0x100000>;
+				read-only;
+			};
+
+			partition@100000 {
+				label = "params";
+				reg = <0x100000 0x80000>;
+				read-only;
+			};
+
+			partition@180000 {
+				label = "ubifs";
+				reg = <0x180000 0x1FE80000>;
+			};
+		};
+	};
+};
+
+&spi0 {
+	nor_flash: n25q128a11@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "Micron,n25q128a11";
+		spi-max-frequency = <54000000>;
+		m25p,fast-read;
+		reg = <0>;
+
+		partition@0 {
+			label = "u-boot-spl";
+			reg = <0x0 0x80000>;
+			read-only;
+		};
+
+		partition@1 {
+			label = "misc";
+			reg = <0x80000 0xf80000>;
+		};
+	};
+};
+
+&mdio {
+	status = "ok";
+	ethphy0: ethernet-phy@0 {
+		compatible = "marvell,88E1514", "marvell,88E1510", "ethernet-phy-ieee802.3-c22";
+		reg = <0>;
+	};
+
+	ethphy1: ethernet-phy@1 {
+		compatible = "marvell,88E1514", "marvell,88E1510", "ethernet-phy-ieee802.3-c22";
+		reg = <1>;
+	};
+};
diff --git a/arch/arm/boot/dts/k2e-netcp.dtsi b/arch/arm/boot/dts/keystone-k2e-netcp.dtsi
similarity index 100%
rename from arch/arm/boot/dts/k2e-netcp.dtsi
rename to arch/arm/boot/dts/keystone-k2e-netcp.dtsi
diff --git a/arch/arm/boot/dts/keystone-k2e.dtsi b/arch/arm/boot/dts/keystone-k2e.dtsi
new file mode 100644
index 0000000..96b349f
--- /dev/null
+++ b/arch/arm/boot/dts/keystone-k2e.dtsi
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2013-2014 Texas Instruments, Inc.
+ *
+ * Keystone 2 Edison soc device tree
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/ {
+	compatible = "ti,k2e", "ti,keystone";
+	model = "Texas Instruments Keystone 2 Edison SoC";
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		interrupt-parent = <&gic>;
+
+		cpu@0 {
+			compatible = "arm,cortex-a15";
+			device_type = "cpu";
+			reg = <0>;
+		};
+
+		cpu@1 {
+			compatible = "arm,cortex-a15";
+			device_type = "cpu";
+			reg = <1>;
+		};
+
+		cpu@2 {
+			compatible = "arm,cortex-a15";
+			device_type = "cpu";
+			reg = <2>;
+		};
+
+		cpu@3 {
+			compatible = "arm,cortex-a15";
+			device_type = "cpu";
+			reg = <3>;
+		};
+	};
+
+	soc {
+		/include/ "keystone-k2e-clocks.dtsi"
+
+		usb: usb@2680000 {
+			interrupts = <GIC_SPI 152 IRQ_TYPE_EDGE_RISING>;
+			dwc3@2690000 {
+				interrupts = <GIC_SPI 152 IRQ_TYPE_EDGE_RISING>;
+			};
+		};
+
+		usb1_phy: usb_phy@2620750 {
+			compatible = "ti,keystone-usbphy";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			reg = <0x2620750 24>;
+			status = "disabled";
+		};
+
+		usb1: usb@25000000 {
+			compatible = "ti,keystone-dwc3";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			reg = <0x25000000 0x10000>;
+			clocks = <&clkusb1>;
+			clock-names = "usb";
+			interrupts = <GIC_SPI 414 IRQ_TYPE_EDGE_RISING>;
+			ranges;
+			dma-coherent;
+			dma-ranges;
+			status = "disabled";
+
+			dwc3@25010000 {
+				compatible = "synopsys,dwc3";
+				reg = <0x25010000 0x70000>;
+				interrupts = <GIC_SPI 414 IRQ_TYPE_EDGE_RISING>;
+				usb-phy = <&usb1_phy>, <&usb1_phy>;
+			};
+		};
+
+		dspgpio0: keystone_dsp_gpio@02620240 {
+			compatible = "ti,keystone-dsp-gpio";
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio,syscon-dev = <&devctrl 0x240>;
+		};
+
+		pcie1: pcie@21020000 {
+			compatible = "ti,keystone-pcie","snps,dw-pcie";
+			clocks = <&clkpcie1>;
+			clock-names = "pcie";
+			#address-cells = <3>;
+			#size-cells = <2>;
+			reg =  <0x21021000 0x2000>, <0x21020000 0x1000>, <0x02620128 4>;
+			ranges = <0x81000000 0 0 0x23260000 0x4000 0x4000
+				0x82000000 0 0x60000000 0x60000000 0 0x10000000>;
+
+			status = "disabled";
+			device_type = "pci";
+			num-lanes = <2>;
+
+			#interrupt-cells = <1>;
+			interrupt-map-mask = <0 0 0 7>;
+			interrupt-map = <0 0 0 1 &pcie_intc1 0>, /* INT A */
+					<0 0 0 2 &pcie_intc1 1>, /* INT B */
+					<0 0 0 3 &pcie_intc1 2>, /* INT C */
+					<0 0 0 4 &pcie_intc1 3>; /* INT D */
+
+			pcie_msi_intc1: msi-interrupt-controller {
+				interrupt-controller;
+				#interrupt-cells = <1>;
+				interrupt-parent = <&gic>;
+				interrupts = <GIC_SPI 377 IRQ_TYPE_EDGE_RISING>,
+					<GIC_SPI 378 IRQ_TYPE_EDGE_RISING>,
+					<GIC_SPI 379 IRQ_TYPE_EDGE_RISING>,
+					<GIC_SPI 380 IRQ_TYPE_EDGE_RISING>,
+					<GIC_SPI 381 IRQ_TYPE_EDGE_RISING>,
+					<GIC_SPI 382 IRQ_TYPE_EDGE_RISING>,
+					<GIC_SPI 383 IRQ_TYPE_EDGE_RISING>,
+					<GIC_SPI 384 IRQ_TYPE_EDGE_RISING>;
+			};
+
+			pcie_intc1: legacy-interrupt-controller {
+				interrupt-controller;
+				#interrupt-cells = <1>;
+				interrupt-parent = <&gic>;
+				interrupts = <GIC_SPI 373 IRQ_TYPE_EDGE_RISING>,
+					<GIC_SPI 374 IRQ_TYPE_EDGE_RISING>,
+					<GIC_SPI 375 IRQ_TYPE_EDGE_RISING>,
+					<GIC_SPI 376 IRQ_TYPE_EDGE_RISING>;
+			};
+		};
+
+		mdio: mdio@24200f00 {
+			compatible	= "ti,keystone_mdio", "ti,davinci_mdio";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0x24200f00 0x100>;
+			status = "disabled";
+			clocks = <&clkcpgmac>;
+			clock-names = "fck";
+			bus_freq	= <2500000>;
+		};
+		/include/ "keystone-k2e-netcp.dtsi"
+	};
+};
diff --git a/arch/arm/boot/dts/k2hk-clocks.dtsi b/arch/arm/boot/dts/keystone-k2hk-clocks.dtsi
similarity index 100%
rename from arch/arm/boot/dts/k2hk-clocks.dtsi
rename to arch/arm/boot/dts/keystone-k2hk-clocks.dtsi
diff --git a/arch/arm/boot/dts/keystone-k2hk-evm.dts b/arch/arm/boot/dts/keystone-k2hk-evm.dts
new file mode 100644
index 0000000..b38b344
--- /dev/null
+++ b/arch/arm/boot/dts/keystone-k2hk-evm.dts
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2013-2014 Texas Instruments, Inc.
+ *
+ * Keystone 2 Kepler/Hawking EVM device tree
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+#include "keystone.dtsi"
+#include "keystone-k2hk.dtsi"
+
+/ {
+	compatible =  "ti,k2hk-evm", "ti,k2hk", "ti,keystone";
+	model = "Texas Instruments Keystone 2 Kepler/Hawking EVM";
+
+	soc {
+		clocks {
+			refclksys: refclksys {
+				#clock-cells = <0>;
+				compatible = "fixed-clock";
+				clock-frequency = <122880000>;
+				clock-output-names = "refclk-sys";
+			};
+
+			refclkpass: refclkpass {
+				#clock-cells = <0>;
+				compatible = "fixed-clock";
+				clock-frequency = <122880000>;
+				clock-output-names = "refclk-pass";
+			};
+
+			refclkarm: refclkarm {
+				#clock-cells = <0>;
+				compatible = "fixed-clock";
+				clock-frequency = <125000000>;
+				clock-output-names = "refclk-arm";
+			};
+
+			refclkddr3a: refclkddr3a {
+				#clock-cells = <0>;
+				compatible = "fixed-clock";
+				clock-frequency = <100000000>;
+				clock-output-names = "refclk-ddr3a";
+			};
+
+			refclkddr3b: refclkddr3b {
+				#clock-cells = <0>;
+				compatible = "fixed-clock";
+				clock-frequency = <100000000>;
+				clock-output-names = "refclk-ddr3b";
+			};
+		};
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		debug1_1 {
+			label = "keystone:green:debug1";
+			gpios = <&gpio0 12 GPIO_ACTIVE_HIGH>; /* 12 */
+		};
+
+		debug1_2 {
+			label = "keystone:red:debug1";
+			gpios = <&gpio0 13 GPIO_ACTIVE_HIGH>; /* 13 */
+		};
+
+		debug2 {
+			label = "keystone:blue:debug2";
+			gpios = <&gpio0 14 GPIO_ACTIVE_HIGH>; /* 14 */
+		};
+
+		debug3 {
+			label = "keystone:blue:debug3";
+			gpios = <&gpio0 15 GPIO_ACTIVE_HIGH>; /* 15 */
+		};
+	};
+};
+
+&usb_phy {
+	status = "okay";
+};
+
+&usb {
+	status = "okay";
+};
+
+&aemif {
+	cs0 {
+		#address-cells = <2>;
+		#size-cells = <1>;
+		clock-ranges;
+		ranges;
+
+		ti,cs-chipselect = <0>;
+		/* all timings in nanoseconds */
+		ti,cs-min-turnaround-ns = <12>;
+		ti,cs-read-hold-ns = <6>;
+		ti,cs-read-strobe-ns = <23>;
+		ti,cs-read-setup-ns = <9>;
+		ti,cs-write-hold-ns = <8>;
+		ti,cs-write-strobe-ns = <23>;
+		ti,cs-write-setup-ns = <8>;
+
+		nand@0,0 {
+			compatible = "ti,keystone-nand","ti,davinci-nand";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			reg = <0 0 0x4000000
+			       1 0 0x0000100>;
+
+			ti,davinci-chipselect = <0>;
+			ti,davinci-mask-ale = <0x2000>;
+			ti,davinci-mask-cle = <0x4000>;
+			ti,davinci-mask-chipsel = <0>;
+			nand-ecc-mode = "hw";
+			ti,davinci-ecc-bits = <4>;
+			nand-on-flash-bbt;
+
+			partition@0 {
+				label = "u-boot";
+				reg = <0x0 0x100000>;
+				read-only;
+			};
+
+			partition@100000 {
+				label = "params";
+				reg = <0x100000 0x80000>;
+				read-only;
+			};
+
+			partition@180000 {
+				label = "ubifs";
+				reg = <0x180000 0x1fe80000>;
+			};
+		};
+	};
+};
+
+&i2c0 {
+	dtt@50 {
+		compatible = "at,24c1024";
+		reg = <0x50>;
+	};
+};
+
+&spi0 {
+	nor_flash: n25q128a11@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "Micron,n25q128a11";
+		spi-max-frequency = <54000000>;
+		m25p,fast-read;
+		reg = <0>;
+
+		partition@0 {
+			label = "u-boot-spl";
+			reg = <0x0 0x80000>;
+			read-only;
+		};
+
+		partition@1 {
+			label = "misc";
+			reg = <0x80000 0xf80000>;
+		};
+	};
+};
+
+&mdio {
+	status = "ok";
+	ethphy0: ethernet-phy@0 {
+		compatible = "marvell,88E1111", "ethernet-phy-ieee802.3-c22";
+		reg = <0>;
+	};
+
+	ethphy1: ethernet-phy@1 {
+		compatible = "marvell,88E1111", "ethernet-phy-ieee802.3-c22";
+		reg = <1>;
+	};
+};
diff --git a/arch/arm/boot/dts/k2hk-netcp.dtsi b/arch/arm/boot/dts/keystone-k2hk-netcp.dtsi
similarity index 100%
rename from arch/arm/boot/dts/k2hk-netcp.dtsi
rename to arch/arm/boot/dts/keystone-k2hk-netcp.dtsi
diff --git a/arch/arm/boot/dts/keystone-k2hk.dtsi b/arch/arm/boot/dts/keystone-k2hk.dtsi
new file mode 100644
index 0000000..8f67fa8
--- /dev/null
+++ b/arch/arm/boot/dts/keystone-k2hk.dtsi
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2013-2014 Texas Instruments, Inc.
+ *
+ * Keystone 2 Kepler/Hawking soc specific device tree
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/ {
+	compatible = "ti,k2hk", "ti,keystone";
+	model = "Texas Instruments Keystone 2 Kepler/Hawking SoC";
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		interrupt-parent = <&gic>;
+
+		cpu@0 {
+			compatible = "arm,cortex-a15";
+			device_type = "cpu";
+			reg = <0>;
+		};
+
+		cpu@1 {
+			compatible = "arm,cortex-a15";
+			device_type = "cpu";
+			reg = <1>;
+		};
+
+		cpu@2 {
+			compatible = "arm,cortex-a15";
+			device_type = "cpu";
+			reg = <2>;
+		};
+
+		cpu@3 {
+			compatible = "arm,cortex-a15";
+			device_type = "cpu";
+			reg = <3>;
+		};
+	};
+
+	soc {
+		/include/ "keystone-k2hk-clocks.dtsi"
+
+		dspgpio0: keystone_dsp_gpio@02620240 {
+			compatible = "ti,keystone-dsp-gpio";
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio,syscon-dev = <&devctrl 0x240>;
+		};
+
+		dspgpio1: keystone_dsp_gpio@2620244 {
+			compatible = "ti,keystone-dsp-gpio";
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio,syscon-dev = <&devctrl 0x244>;
+		};
+
+		dspgpio2: keystone_dsp_gpio@2620248 {
+			compatible = "ti,keystone-dsp-gpio";
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio,syscon-dev = <&devctrl 0x248>;
+		};
+
+		dspgpio3: keystone_dsp_gpio@262024c {
+			compatible = "ti,keystone-dsp-gpio";
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio,syscon-dev = <&devctrl 0x24c>;
+		};
+
+		dspgpio4: keystone_dsp_gpio@2620250 {
+			compatible = "ti,keystone-dsp-gpio";
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio,syscon-dev = <&devctrl 0x250>;
+		};
+
+		dspgpio5: keystone_dsp_gpio@2620254 {
+			compatible = "ti,keystone-dsp-gpio";
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio,syscon-dev = <&devctrl 0x254>;
+		};
+
+		dspgpio6: keystone_dsp_gpio@2620258 {
+			compatible = "ti,keystone-dsp-gpio";
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio,syscon-dev = <&devctrl 0x258>;
+		};
+
+		dspgpio7: keystone_dsp_gpio@262025c {
+			compatible = "ti,keystone-dsp-gpio";
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio,syscon-dev = <&devctrl 0x25c>;
+		};
+
+		mdio: mdio@02090300 {
+			compatible	= "ti,keystone_mdio", "ti,davinci_mdio";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0x02090300 0x100>;
+			status = "disabled";
+			clocks = <&clkcpgmac>;
+			clock-names = "fck";
+			bus_freq	= <2500000>;
+		};
+		/include/ "keystone-k2hk-netcp.dtsi"
+	};
+};
diff --git a/arch/arm/boot/dts/k2l-clocks.dtsi b/arch/arm/boot/dts/keystone-k2l-clocks.dtsi
similarity index 100%
rename from arch/arm/boot/dts/k2l-clocks.dtsi
rename to arch/arm/boot/dts/keystone-k2l-clocks.dtsi
diff --git a/arch/arm/boot/dts/keystone-k2l-evm.dts b/arch/arm/boot/dts/keystone-k2l-evm.dts
new file mode 100644
index 0000000..7f9c2e9
--- /dev/null
+++ b/arch/arm/boot/dts/keystone-k2l-evm.dts
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2014 Texas Instruments, Inc.
+ *
+ * Keystone 2 Lamarr EVM device tree
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+#include "keystone.dtsi"
+#include "keystone-k2l.dtsi"
+
+/ {
+	compatible = "ti,k2l-evm", "ti,k2l", "ti,keystone";
+	model = "Texas Instruments Keystone 2 Lamarr EVM";
+
+	soc {
+		clocks {
+			refclksys: refclksys {
+				#clock-cells = <0>;
+				compatible = "fixed-clock";
+				clock-frequency = <122880000>;
+				clock-output-names = "refclk-sys";
+			};
+		};
+	};
+};
+
+&usb_phy {
+	status = "okay";
+};
+
+&usb {
+	status = "okay";
+};
+
+&i2c0 {
+	dtt@50 {
+		compatible = "at,24c1024";
+		reg = <0x50>;
+	};
+};
+
+&aemif {
+	cs0 {
+		#address-cells = <2>;
+		#size-cells = <1>;
+		clock-ranges;
+		ranges;
+
+		ti,cs-chipselect = <0>;
+		/* all timings in nanoseconds */
+		ti,cs-min-turnaround-ns = <12>;
+		ti,cs-read-hold-ns = <6>;
+		ti,cs-read-strobe-ns = <23>;
+		ti,cs-read-setup-ns = <9>;
+		ti,cs-write-hold-ns = <8>;
+		ti,cs-write-strobe-ns = <23>;
+		ti,cs-write-setup-ns = <8>;
+
+		nand@0,0 {
+			compatible = "ti,keystone-nand","ti,davinci-nand";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			reg = <0 0 0x4000000
+			       1 0 0x0000100>;
+
+			ti,davinci-chipselect = <0>;
+			ti,davinci-mask-ale = <0x2000>;
+			ti,davinci-mask-cle = <0x4000>;
+			ti,davinci-mask-chipsel = <0>;
+			nand-ecc-mode = "hw";
+			ti,davinci-ecc-bits = <4>;
+			nand-on-flash-bbt;
+
+			partition@0 {
+				label = "u-boot";
+				reg = <0x0 0x100000>;
+				read-only;
+			};
+
+			partition@100000 {
+				label = "params";
+				reg = <0x100000 0x80000>;
+				read-only;
+			};
+
+			partition@180000 {
+				label = "ubifs";
+				reg = <0x180000 0x7FE80000>;
+			};
+		};
+	};
+};
+
+&spi0 {
+	nor_flash: n25q128a11@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "Micron,n25q128a11";
+		spi-max-frequency = <54000000>;
+		m25p,fast-read;
+		reg = <0>;
+
+		partition@0 {
+			label = "u-boot-spl";
+			reg = <0x0 0x80000>;
+			read-only;
+		};
+
+		partition@1 {
+			label = "misc";
+			reg = <0x80000 0xf80000>;
+		};
+	};
+};
+
+&mdio {
+	status = "ok";
+	ethphy0: ethernet-phy@0 {
+		compatible = "marvell,88E1514", "marvell,88E1510", "ethernet-phy-ieee802.3-c22";
+		reg = <0>;
+	};
+
+	ethphy1: ethernet-phy@1 {
+		compatible = "marvell,88E1514", "marvell,88E1510", "ethernet-phy-ieee802.3-c22";
+		reg = <1>;
+	};
+};
diff --git a/arch/arm/boot/dts/k2l-netcp.dtsi b/arch/arm/boot/dts/keystone-k2l-netcp.dtsi
similarity index 100%
rename from arch/arm/boot/dts/k2l-netcp.dtsi
rename to arch/arm/boot/dts/keystone-k2l-netcp.dtsi
diff --git a/arch/arm/boot/dts/keystone-k2l.dtsi b/arch/arm/boot/dts/keystone-k2l.dtsi
new file mode 100644
index 0000000..ff22ffc
--- /dev/null
+++ b/arch/arm/boot/dts/keystone-k2l.dtsi
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2014 Texas Instruments, Inc.
+ *
+ * Keystone 2 Lamarr SoC specific device tree
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/ {
+	compatible = "ti,k2l", "ti,keystone";
+	model = "Texas Instruments Keystone 2 Lamarr SoC";
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		interrupt-parent = <&gic>;
+
+		cpu@0 {
+			compatible = "arm,cortex-a15";
+			device_type = "cpu";
+			reg = <0>;
+		};
+
+		cpu@1 {
+			compatible = "arm,cortex-a15";
+			device_type = "cpu";
+			reg = <1>;
+		};
+	};
+
+	soc {
+		/include/ "keystone-k2l-clocks.dtsi"
+
+		uart2: serial@02348400 {
+			compatible = "ns16550a";
+			current-speed = <115200>;
+			reg-shift = <2>;
+			reg-io-width = <4>;
+			reg = <0x02348400 0x100>;
+			clocks	= <&clkuart2>;
+			interrupts = <GIC_SPI 432 IRQ_TYPE_EDGE_RISING>;
+		};
+
+		uart3:	serial@02348800 {
+			compatible = "ns16550a";
+			current-speed = <115200>;
+			reg-shift = <2>;
+			reg-io-width = <4>;
+			reg = <0x02348800 0x100>;
+			clocks	= <&clkuart3>;
+			interrupts = <GIC_SPI 435 IRQ_TYPE_EDGE_RISING>;
+		};
+
+		dspgpio0: keystone_dsp_gpio@02620240 {
+			compatible = "ti,keystone-dsp-gpio";
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio,syscon-dev = <&devctrl 0x240>;
+		};
+
+		dspgpio1: keystone_dsp_gpio@2620244 {
+			compatible = "ti,keystone-dsp-gpio";
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio,syscon-dev = <&devctrl 0x244>;
+		};
+
+		dspgpio2: keystone_dsp_gpio@2620248 {
+			compatible = "ti,keystone-dsp-gpio";
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio,syscon-dev = <&devctrl 0x248>;
+		};
+
+		dspgpio3: keystone_dsp_gpio@262024c {
+			compatible = "ti,keystone-dsp-gpio";
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio,syscon-dev = <&devctrl 0x24c>;
+		};
+
+		mdio: mdio@26200f00 {
+			compatible	= "ti,keystone_mdio", "ti,davinci_mdio";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0x26200f00 0x100>;
+			status = "disabled";
+			clocks = <&clkcpgmac>;
+			clock-names = "fck";
+			bus_freq	= <2500000>;
+		};
+		/include/ "keystone-k2l-netcp.dtsi"
+	};
+};
+
+&spi0 {
+       ti,davinci-spi-num-cs = <5>;
+};
+
+&spi1 {
+       ti,davinci-spi-num-cs = <3>;
+};
+
+&spi2 {
+       ti,davinci-spi-num-cs = <5>;
+       /* Pin muxed. Enabled and configured by Bootloader */
+       status = "disabled";
+};
diff --git a/arch/arm/boot/dts/keystone.dtsi b/arch/arm/boot/dts/keystone.dtsi
index 3f27282..e34b226 100644
--- a/arch/arm/boot/dts/keystone.dtsi
+++ b/arch/arm/boot/dts/keystone.dtsi
@@ -20,6 +20,9 @@
 
 	aliases {
 		serial0	= &uart0;
+		spi0 = &spi0;
+		spi1 = &spi1;
+		spi2 = &spi2;
 	};
 
 	memory {
@@ -59,6 +62,14 @@
 			     <GIC_SPI 23 IRQ_TYPE_EDGE_RISING>;
 	};
 
+	psci {
+		compatible	= "arm,psci";
+		method		= "smc";
+		cpu_suspend	= <0x84000001>;
+		cpu_off		= <0x84000002>;
+		cpu_on		= <0x84000003>;
+	};
+
 	soc {
 		#address-cells = <1>;
 		#size-cells = <1>;
diff --git a/arch/arm/boot/dts/kirkwood-6192.dtsi b/arch/arm/boot/dts/kirkwood-6192.dtsi
index 9e6e9e2..d573e03 100644
--- a/arch/arm/boot/dts/kirkwood-6192.dtsi
+++ b/arch/arm/boot/dts/kirkwood-6192.dtsi
@@ -1,6 +1,6 @@
 / {
-	mbus {
-		pciec: pcie-controller {
+	mbus@f1000000 {
+		pciec: pcie-controller@82000000 {
 			compatible = "marvell,kirkwood-pcie";
 			status = "disabled";
 			device_type = "pci";
diff --git a/arch/arm/boot/dts/kirkwood-6281.dtsi b/arch/arm/boot/dts/kirkwood-6281.dtsi
index 7dc7d67..748d0b6 100644
--- a/arch/arm/boot/dts/kirkwood-6281.dtsi
+++ b/arch/arm/boot/dts/kirkwood-6281.dtsi
@@ -1,6 +1,6 @@
 / {
-	mbus {
-		pciec: pcie-controller {
+	mbus@f1000000 {
+		pciec: pcie-controller@82000000 {
 			compatible = "marvell,kirkwood-pcie";
 			status = "disabled";
 			device_type = "pci";
diff --git a/arch/arm/boot/dts/kirkwood-6282.dtsi b/arch/arm/boot/dts/kirkwood-6282.dtsi
index 4680eec..bb63d2d 100644
--- a/arch/arm/boot/dts/kirkwood-6282.dtsi
+++ b/arch/arm/boot/dts/kirkwood-6282.dtsi
@@ -1,6 +1,6 @@
 / {
-	mbus {
-		pciec: pcie-controller {
+	mbus@f1000000 {
+		pciec: pcie-controller@82000000 {
 			compatible = "marvell,kirkwood-pcie";
 			status = "disabled";
 			device_type = "pci";
diff --git a/arch/arm/boot/dts/kirkwood-98dx4122.dtsi b/arch/arm/boot/dts/kirkwood-98dx4122.dtsi
index 9e1f741..720c210 100644
--- a/arch/arm/boot/dts/kirkwood-98dx4122.dtsi
+++ b/arch/arm/boot/dts/kirkwood-98dx4122.dtsi
@@ -1,6 +1,6 @@
 / {
-	mbus {
-		pciec: pcie-controller {
+	mbus@f1000000 {
+		pciec: pcie-controller@82000000 {
 			compatible = "marvell,kirkwood-pcie";
 			status = "disabled";
 			device_type = "pci";
diff --git a/arch/arm/boot/dts/kirkwood-b3.dts b/arch/arm/boot/dts/kirkwood-b3.dts
index d2936ad..d091ecb 100644
--- a/arch/arm/boot/dts/kirkwood-b3.dts
+++ b/arch/arm/boot/dts/kirkwood-b3.dts
@@ -33,17 +33,6 @@
 		stdout-path = &uart0;
 	};
 
-	mbus {
-		pcie-controller {
-			status = "okay";
-
-			/* Wifi model has Atheros chipset on pcie port */
-			pcie@1,0 {
-				status = "okay";
-			};
-		};
-	};
-
 	ocp@f1000000 {
 		pinctrl: pin-controller@10000 {
 			pmx_button_power: pmx-button-power {
@@ -199,3 +188,11 @@
 	};
 };
 
+/* Wifi model has Atheros chipset on pcie port */
+&pciec {
+        status = "okay";
+};
+
+&pcie0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/kirkwood-blackarmor-nas220.dts b/arch/arm/boot/dts/kirkwood-blackarmor-nas220.dts
index fa02a9a..f16a73e 100644
--- a/arch/arm/boot/dts/kirkwood-blackarmor-nas220.dts
+++ b/arch/arm/boot/dts/kirkwood-blackarmor-nas220.dts
@@ -36,13 +36,13 @@
 	gpio_keys {
 		compatible = "gpio-keys";
 
-		button@1{
+		reset {
 			label = "Reset";
 			linux,code = <KEY_POWER>;
 			gpios = <&gpio0 29 GPIO_ACTIVE_HIGH>;
 		};
 
-		button@2{
+		button {
 			label = "Power";
 			linux,code = <KEY_SLEEP>;
 			gpios = <&gpio0 26 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/kirkwood-cloudbox.dts b/arch/arm/boot/dts/kirkwood-cloudbox.dts
index 7ec7656..555b7e4 100644
--- a/arch/arm/boot/dts/kirkwood-cloudbox.dts
+++ b/arch/arm/boot/dts/kirkwood-cloudbox.dts
@@ -60,7 +60,7 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 
-		button@1 {
+		power {
 			label = "Power push button";
 			linux,code = <KEY_POWER>;
 			gpios = <&gpio0 16 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/kirkwood-db-88f6281.dts b/arch/arm/boot/dts/kirkwood-db-88f6281.dts
index c39dd76..aee6f02 100644
--- a/arch/arm/boot/dts/kirkwood-db-88f6281.dts
+++ b/arch/arm/boot/dts/kirkwood-db-88f6281.dts
@@ -17,14 +17,12 @@
 / {
 	model = "Marvell DB-88F6281-BP Development Board";
 	compatible = "marvell,db-88f6281-bp", "marvell,kirkwood-88f6281", "marvell,kirkwood";
+};
 
-	mbus {
-		pcie-controller {
-			status = "okay";
+&pciec {
+        status = "okay";
+};
 
-			pcie@1,0 {
-				status = "okay";
-			};
-		};
-	};
+&pcie0 {
+	status = "okay";
 };
diff --git a/arch/arm/boot/dts/kirkwood-db-88f6282.dts b/arch/arm/boot/dts/kirkwood-db-88f6282.dts
index 701c6b6..e8b23e1 100644
--- a/arch/arm/boot/dts/kirkwood-db-88f6282.dts
+++ b/arch/arm/boot/dts/kirkwood-db-88f6282.dts
@@ -17,18 +17,16 @@
 / {
 	model = "Marvell DB-88F6282-BP Development Board";
 	compatible = "marvell,db-88f6282-bp", "marvell,kirkwood-88f6282", "marvell,kirkwood";
+};
 
-	mbus {
-		pcie-controller {
-			status = "okay";
+&pciec {
+        status = "okay";
+};
 
-			pcie@1,0 {
-				status = "okay";
-			};
+&pcie0 {
+	status = "okay";
+};
 
-			pcie@2,0 {
-				status = "okay";
-			};
-		};
-	};
+&pcie1 {
+	status = "okay";
 };
diff --git a/arch/arm/boot/dts/kirkwood-dir665.dts b/arch/arm/boot/dts/kirkwood-dir665.dts
index 0473fcc..41acbb6 100644
--- a/arch/arm/boot/dts/kirkwood-dir665.dts
+++ b/arch/arm/boot/dts/kirkwood-dir665.dts
@@ -25,16 +25,6 @@
 		stdout-path = &uart0;
 	};
 
-	mbus {
-		pcie-controller {
-			status = "okay";
-
-			pcie@1,0 {
-				status = "okay";
-			};
-		};
-	};
-
 	ocp@f1000000 {
 		pinctrl: pin-controller@10000 {
 			pinctrl-0 =< &pmx_led_usb
@@ -203,7 +193,7 @@
 		};
 	};
 
-	dsa@0 {
+	dsa {
 		compatible = "marvell,dsa";
 		#address-cells = <2>;
 		#size-cells = <0>;
@@ -276,3 +266,11 @@
 &rtc {
 	status = "disabled";
 };
+
+&pciec {
+        status = "okay";
+};
+
+&pcie0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/kirkwood-dnskw.dtsi b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
index 113dcf0..d8fca9d 100644
--- a/arch/arm/boot/dts/kirkwood-dnskw.dtsi
+++ b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
@@ -13,17 +13,17 @@
 			     &pmx_button_reset>;
 		pinctrl-names = "default";
 
-		button@1 {
+		power {
 			label = "Power button";
 			linux,code = <KEY_POWER>;
 			gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
 		};
-		button@2 {
+		eject {
 			label = "USB unmount button";
 			linux,code = <KEY_EJECTCD>;
 			gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
 		};
-		button@3 {
+		reset {
 			label = "Reset button";
 			linux,code = <KEY_RESTART>;
 			gpios = <&gpio1 16 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/kirkwood-ds111.dts b/arch/arm/boot/dts/kirkwood-ds111.dts
index 61f47fb..a85a466 100644
--- a/arch/arm/boot/dts/kirkwood-ds111.dts
+++ b/arch/arm/boot/dts/kirkwood-ds111.dts
@@ -40,6 +40,6 @@
 	status = "okay";
 };
 
-&pcie2 {
+&pcie1 {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/kirkwood-ds112.dts b/arch/arm/boot/dts/kirkwood-ds112.dts
index b84af3d..6cef4bd 100644
--- a/arch/arm/boot/dts/kirkwood-ds112.dts
+++ b/arch/arm/boot/dts/kirkwood-ds112.dts
@@ -44,6 +44,10 @@
 	status = "okay";
 };
 
-&pcie2 {
+&pciec {
+        status = "okay";
+};
+
+&pcie1 {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/kirkwood-ds212.dts b/arch/arm/boot/dts/kirkwood-ds212.dts
index 99afd46..7f32e7a 100644
--- a/arch/arm/boot/dts/kirkwood-ds212.dts
+++ b/arch/arm/boot/dts/kirkwood-ds212.dts
@@ -43,6 +43,6 @@
 	status = "okay";
 };
 
-&pcie2 {
+&pcie1 {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/kirkwood-ds411.dts b/arch/arm/boot/dts/kirkwood-ds411.dts
index 623cd4a..72e5830 100644
--- a/arch/arm/boot/dts/kirkwood-ds411.dts
+++ b/arch/arm/boot/dts/kirkwood-ds411.dts
@@ -48,6 +48,10 @@
 	status = "okay";
 };
 
-&pcie2 {
+&pciec {
+        status = "okay";
+};
+
+&pcie1 {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/kirkwood-ds411slim.dts b/arch/arm/boot/dts/kirkwood-ds411slim.dts
index a0a1fad..aaaf31b 100644
--- a/arch/arm/boot/dts/kirkwood-ds411slim.dts
+++ b/arch/arm/boot/dts/kirkwood-ds411slim.dts
@@ -44,6 +44,6 @@
 	status = "okay";
 };
 
-&pcie2 {
+&pcie1 {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/kirkwood-ib62x0.dts b/arch/arm/boot/dts/kirkwood-ib62x0.dts
index bfa5edd..ef84d86 100644
--- a/arch/arm/boot/dts/kirkwood-ib62x0.dts
+++ b/arch/arm/boot/dts/kirkwood-ib62x0.dts
@@ -62,12 +62,12 @@
 		pinctrl-0 = <&pmx_button_reset &pmx_button_usb_copy>;
 		pinctrl-names = "default";
 
-		button@1 {
+		copy {
 			label = "USB Copy";
 			linux,code = <KEY_COPY>;
 			gpios = <&gpio0 29 GPIO_ACTIVE_LOW>;
 		};
-		button@2 {
+		reset {
 			label = "Reset";
 			linux,code = <KEY_RESTART>;
 			gpios = <&gpio0 28 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/kirkwood-iconnect.dts b/arch/arm/boot/dts/kirkwood-iconnect.dts
index 38e31d1..d25184a 100644
--- a/arch/arm/boot/dts/kirkwood-iconnect.dts
+++ b/arch/arm/boot/dts/kirkwood-iconnect.dts
@@ -19,16 +19,6 @@
 		linux,initrd-end   = <0x4800000>;
 	};
 
-	mbus {
-		pcie-controller {
-			status = "okay";
-
-			pcie@1,0 {
-				status = "okay";
-			};
-		};
-	};
-
 	ocp@f1000000 {
 		pinctrl: pin-controller@10000 {
 			pmx_button_reset: pmx-button-reset {
@@ -136,13 +126,13 @@
 		pinctrl-0 = < &pmx_button_reset &pmx_button_otb >;
 		pinctrl-names = "default";
 
-		button@1 {
+		otb {
 			label = "OTB Button";
 			linux,code = <KEY_COPY>;
 			gpios = <&gpio1 3 GPIO_ACTIVE_LOW>;
 			debounce-interval = <100>;
 		};
-		button@2 {
+		reset {
 			label = "Reset";
 			linux,code = <KEY_RESTART>;
 			gpios = <&gpio0 12 GPIO_ACTIVE_LOW>;
@@ -194,3 +184,11 @@
 		phy-handle = <&ethphy0>;
 	};
 };
+
+&pciec {
+        status = "okay";
+};
+
+&pcie0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/kirkwood-km_common.dtsi b/arch/arm/boot/dts/kirkwood-km_common.dtsi
index 8367c77..7962bde 100644
--- a/arch/arm/boot/dts/kirkwood-km_common.dtsi
+++ b/arch/arm/boot/dts/kirkwood-km_common.dtsi
@@ -4,16 +4,6 @@
 		stdout-path = &uart0;
 	};
 
-	mbus {
-		pcie-controller {
-			status = "okay";
-
-			pcie@1,0 {
-				status = "okay";
-			};
-		};
-	};
-
 	ocp@f1000000 {
 		pinctrl: pin-controller@10000 {
 			pinctrl-0 = < &pmx_i2c_gpio_sda &pmx_i2c_gpio_scl >;
@@ -34,7 +24,7 @@
 		};
 	};
 
-	i2c@0 {
+	i2c {
 		compatible = "i2c-gpio";
 		gpios = < &gpio0 8 GPIO_ACTIVE_HIGH		/* sda */
 			  &gpio0 9 GPIO_ACTIVE_HIGH>;		/* scl */
@@ -46,3 +36,11 @@
 	status = "okay";
 	chip-delay = <25>;
 };
+
+&pciec {
+        status = "okay";
+};
+
+&pcie0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/kirkwood-laplug.dts b/arch/arm/boot/dts/kirkwood-laplug.dts
index 2442566..1b0f070 100644
--- a/arch/arm/boot/dts/kirkwood-laplug.dts
+++ b/arch/arm/boot/dts/kirkwood-laplug.dts
@@ -27,15 +27,6 @@
 		stdout-path = &uart0;
 	};
 
-	mbus {
-		pcie-controller {
-			status = "okay";
-			pcie@1,0 {
-				status = "okay";
-			};
-		};
-	};
-
 	ocp@f1000000 {
 		serial@12000 {
 			status = "okay";
@@ -62,7 +53,7 @@
 	gpio_keys {
 		compatible = "gpio-keys";
 
-		button@1{
+		power {
 			label = "Power push button";
 			linux,code = <KEY_POWER>;
 			gpios = <&gpio1 0 GPIO_ACTIVE_HIGH>;
@@ -169,3 +160,11 @@
 		phy-handle = <&ethphy0>;
 	};
 };
+
+&pciec {
+        status = "okay";
+};
+
+&pcie0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/kirkwood-linkstation.dtsi b/arch/arm/boot/dts/kirkwood-linkstation.dtsi
index 69061b6..36c54c9 100644
--- a/arch/arm/boot/dts/kirkwood-linkstation.dtsi
+++ b/arch/arm/boot/dts/kirkwood-linkstation.dtsi
@@ -49,15 +49,6 @@
 		stdout-path = &uart0;
 	};
 
-	mbus {
-		pcie-controller {
-			status = "okay";
-			pcie@1,0 {
-				status = "okay";
-			};
-		};
-	};
-
 	ocp@f1000000 {
 		pinctrl: pin-controller@10000 {
 			pmx_power_hdd0: pmx-power-hdd0 {
@@ -200,3 +191,11 @@
 		};
 	};
 };
+
+&pciec {
+        status = "okay";
+};
+
+&pcie0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/kirkwood-linksys-viper.dts b/arch/arm/boot/dts/kirkwood-linksys-viper.dts
new file mode 100644
index 0000000..345fcac
--- /dev/null
+++ b/arch/arm/boot/dts/kirkwood-linksys-viper.dts
@@ -0,0 +1,240 @@
+/*
+ * kirkwood-viper.dts - Device Tree file for Linksys viper (E4200v2 / EA4500)
+ *
+ * (c) 2013 Jonas Gorski <jogo@openwrt.org>
+ * (c) 2013 Deutsche Telekom Innovation Laboratories
+ * (c) 2014 Luka Perkov <luka@openwrt.org>
+ * (c) 2014 Randy C. Will <randall.will@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+
+#include "kirkwood.dtsi"
+#include "kirkwood-6282.dtsi"
+
+/ {
+	model = "Linksys Viper (E4200v2 / EA4500)";
+	compatible = "linksys,viper", "marvell,kirkwood-88f6282", "marvell,kirkwood";
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x8000000>;
+	};
+
+	aliases {
+		serial0 = &uart0;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	gpio_keys {
+		compatible = "gpio-keys";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		pinctrl-0 = < &pmx_btn_wps &pmx_btn_reset >;
+		pinctrl-names = "default";
+
+		wps {
+			label = "WPS Button";
+			linux,code = <KEY_WPS_BUTTON>;
+			gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
+		};
+
+		reset {
+			label = "Reset Button";
+			linux,code = <KEY_RESTART>;
+			gpios = <&gpio1 16 GPIO_ACTIVE_LOW>;
+		};
+	};
+
+	gpio-leds {
+		compatible = "gpio-leds";
+		pinctrl-0 = < &pmx_led_white_health &pmx_led_white_pulse >;
+		pinctrl-names = "default";
+
+		white-health {
+			label = "viper:white:health";
+			gpios = <&gpio0 7 GPIO_ACTIVE_HIGH>;
+		};
+
+		white-pulse {
+			label = "viper:white:pulse";
+			gpios = <&gpio0 14 GPIO_ACTIVE_HIGH>;
+		};
+	};
+
+	dsa {
+		compatible = "marvell,dsa";
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		dsa,ethernet = <&eth0port>;
+		dsa,mii-bus = <&mdio>;
+
+		switch@16,0 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <16 0>;	/* MDIO address 16, switch 0 in tree */
+
+			port@0 {
+				reg = <0>;
+				label = "ethernet1";
+			};
+
+			port@1 {
+				reg = <1>;
+				label = "ethernet2";
+			};
+
+			port@2 {
+				reg = <2>;
+				label = "ethernet3";
+			};
+
+			port@3 {
+				reg = <3>;
+				label = "ethernet4";
+			};
+
+			port@4 {
+				reg = <4>;
+				label = "internet";
+			};
+
+			port@5 {
+				reg = <5>;
+				label = "cpu";
+			};
+		};
+	};
+};
+
+&pinctrl {
+	pmx_led_white_health: pmx-led-white-health {
+		marvell,pins = "mpp7";
+		marvell,function = "gpo";
+	};
+	pmx_led_white_pulse: pmx-led-white-pulse {
+		marvell,pins = "mpp14";
+		marvell,function = "gpio";
+	};
+	pmx_btn_wps: pmx-btn-wps {
+		marvell,pins = "mpp47";
+		marvell,function = "gpio";
+	};
+	pmx_btn_reset: pmx-btn-reset {
+		marvell,pins = "mpp48";
+		marvell,function = "gpio";
+	};
+};
+
+&nand {
+	status = "okay";
+	pinctrl-0 = <&pmx_nand>;
+	pinctrl-names = "default";
+
+	partitions {
+		compatible = "fixed-partitions";
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		partition@0 {
+			label = "u-boot";
+			reg = <0x0 0x80000>;
+			read-only;
+		};
+
+		partition@80000 {
+			label = "u_env";
+			reg = <0x80000 0x20000>;
+		};
+
+		partition@A0000 {
+			label = "s_env";
+			reg = <0xA0000 0x20000>;
+		};
+
+		partition@200000 {
+			label = "kernel";
+			reg = <0x200000 0x2A0000>;
+		};
+
+		partition@4A0000 {
+			label = "rootfs";
+			reg = <0x4A0000 0x1760000>;
+		};
+
+		partition@1C00000 {
+			label = "alt_kernel";
+			reg = <0x1C00000 0x2A0000>;
+		};
+
+		partition@1EA0000 {
+			label = "alt_rootfs";
+			reg = <0x1EA0000 0x1760000>;
+		};
+
+		partition@3600000 {
+			label = "syscfg";
+			reg = <0x3600000 0x4A00000>;
+		};
+
+		partition@C0000 {
+			label = "unused";
+			reg = <0xC0000 0x140000>;
+		};
+
+	};
+};
+
+&pciec {
+	status = "okay";
+};
+
+&pcie0 {
+	status = "okay";
+};
+
+&pcie1 {
+	status = "okay";
+};
+
+&mdio {
+	status = "okay";
+};
+
+&uart0 {
+	status = "okay";
+};
+
+/* eth0 is connected to a Marvell 88E6171 switch, without a PHY. So set
+ * fixed speed and duplex.
+ */
+&eth0 {
+	status = "okay";
+	ethernet0-port@0 {
+		speed = <1000>;
+		duplex = <1>;
+	};
+};
+
+/* eth1 is connected to the switch at port 6. However DSA only supports a
+ * single CPU port. So leave this port disabled to avoid confusion.
+ */
+&eth1 {
+	status = "disabled";
+};
+
+/* There is no battery on the board, so the RTC does not keep
+ * time when there is no power, making it useless.
+ */
+&rtc {
+	status = "disabled";
+};
+
diff --git a/arch/arm/boot/dts/kirkwood-lsxl.dtsi b/arch/arm/boot/dts/kirkwood-lsxl.dtsi
index 1d6528d..8b7c6ce 100644
--- a/arch/arm/boot/dts/kirkwood-lsxl.dtsi
+++ b/arch/arm/boot/dts/kirkwood-lsxl.dtsi
@@ -107,18 +107,18 @@
 			     &pmx_power_auto_switch>;
 		pinctrl-names = "default";
 
-		button@1 {
+		option {
 			label = "Function Button";
 			linux,code = <KEY_OPTION>;
 			gpios = <&gpio1 9 GPIO_ACTIVE_LOW>;
 		};
-		button@2 {
+		reserved {
 			label = "Power-on Switch";
 			linux,code = <KEY_RESERVED>;
 			linux,input-type = <5>;
 			gpios = <&gpio1 10 GPIO_ACTIVE_LOW>;
 		};
-		button@3 {
+		power {
 			label = "Power-auto Switch";
 			linux,code = <KEY_ESC>;
 			linux,input-type = <5>;
@@ -133,28 +133,28 @@
 			     &pmx_led_function_blue>;
 		pinctrl-names = "default";
 
-		led@1 {
+		func_blue {
 			label = "lsxl:blue:func";
 			gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
 		};
 
-		led@2 {
+		alarm {
 			label = "lsxl:red:alarm";
 			gpios = <&gpio1 5 GPIO_ACTIVE_LOW>;
 		};
 
-		led@3 {
+		info {
 			label = "lsxl:amber:info";
 			gpios = <&gpio1 6 GPIO_ACTIVE_LOW>;
 		};
 
-		led@4 {
+		power {
 			label = "lsxl:blue:power";
 			gpios = <&gpio1 7 GPIO_ACTIVE_LOW>;
 			default-state = "keep";
 		};
 
-		led@5 {
+		func_red {
 			label = "lsxl:red:func";
 			gpios = <&gpio1 16 GPIO_ACTIVE_LOW>;
 		};
diff --git a/arch/arm/boot/dts/kirkwood-mplcec4.dts b/arch/arm/boot/dts/kirkwood-mplcec4.dts
index f3a9918..aa413b0 100644
--- a/arch/arm/boot/dts/kirkwood-mplcec4.dts
+++ b/arch/arm/boot/dts/kirkwood-mplcec4.dts
@@ -17,16 +17,6 @@
 		stdout-path = &uart0;
 	};
 
-	mbus {
-		pcie-controller {
-			status = "okay";
-
-			pcie@1,0 {
-				status = "okay";
-			};
-		};
-	};
-
 	ocp@f1000000 {
 		pinctrl: pin-controller@10000 {
 			pmx_led_health: pmx-led-health {
@@ -215,3 +205,11 @@
 		phy-handle = <&ethphy1>;
 	};
 };
+
+&pciec {
+        status = "okay";
+};
+
+&pcie0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts b/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts
index b7e7d78..172a38c 100644
--- a/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts
+++ b/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts
@@ -31,16 +31,6 @@
 		stdout-path = &uart0;
 	};
 
-	mbus {
-		pcie-controller {
-			status = "okay";
-
-			pcie@1,0 {
-				status = "okay";
-			};
-		};
-        };
-
 	ocp@f1000000 {
 		pin-controller@10000 {
 			pmx_usb_led: pmx-usb-led {
@@ -109,19 +99,19 @@
 		pinctrl-0 = <&pmx_keys>;
 		pinctrl-names = "default";
 
-		button@1 {
+		restart {
 			label = "SWR Button";
 			linux,code = <KEY_RESTART>;
 			gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
 		};
-		button@2 {
+		wps {
 			label = "WPS Button";
 			linux,code = <KEY_WPS_BUTTON>;
 			gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
 		};
 	};
 
-	dsa@0 {
+	dsa {
 		compatible = "marvell,dsa";
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -179,3 +169,11 @@
 		duplex = <1>;
 	};
 };
+
+&pciec {
+        status = "okay";
+};
+
+&pcie0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/kirkwood-nas2big.dts b/arch/arm/boot/dts/kirkwood-nas2big.dts
index 7427ec5..f53bcac 100644
--- a/arch/arm/boot/dts/kirkwood-nas2big.dts
+++ b/arch/arm/boot/dts/kirkwood-nas2big.dts
@@ -28,16 +28,6 @@
 		stdout-path = &uart0;
 	};
 
-	mbus {
-		pcie-controller {
-			status = "okay";
-
-			pcie@1,0 {
-				status = "okay";
-			};
-		};
-	};
-
 	ocp@f1000000 {
 		rtc@10300 {
 			/* The on-chip RTC is not powered (no supercap). */
@@ -141,3 +131,11 @@
 		reg = <0x9100000 0x6f00000>;
 	};
 };
+
+&pciec {
+        status = "okay";
+};
+
+&pcie0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/kirkwood-netgear_readynas_duo_v2.dts b/arch/arm/boot/dts/kirkwood-netgear_readynas_duo_v2.dts
index fd733c6..c0413b6 100644
--- a/arch/arm/boot/dts/kirkwood-netgear_readynas_duo_v2.dts
+++ b/arch/arm/boot/dts/kirkwood-netgear_readynas_duo_v2.dts
@@ -28,16 +28,6 @@
 		stdout-path = &uart0;
 	};
 
-	mbus {
-		pcie-controller {
-			status = "okay";
-
-			pcie@1,0 {
-				status = "okay";
-			};
-		};
-	};
-
 	ocp@f1000000 {
 		pinctrl: pin-controller@10000 {
 			pmx_button_power: pmx-button-power {
@@ -193,7 +183,7 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 
-		usb3_regulator: usb3-regulator {
+		usb3_regulator: usb3-regulator@1 {
 			compatible = "regulator-fixed";
 			reg = <1>;
 			regulator-name = "USB 3.0 Power";
@@ -251,3 +241,11 @@
 		phy-handle = <&ethphy0>;
 	};
 };
+
+&pciec {
+        status = "okay";
+};
+
+&pcie0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/kirkwood-netgear_readynas_nv+_v2.dts b/arch/arm/boot/dts/kirkwood-netgear_readynas_nv+_v2.dts
index b514d64..2bfc6cf 100644
--- a/arch/arm/boot/dts/kirkwood-netgear_readynas_nv+_v2.dts
+++ b/arch/arm/boot/dts/kirkwood-netgear_readynas_nv+_v2.dts
@@ -28,18 +28,6 @@
 		stdout-path = &uart0;
 	};
 
-	mbus {
-		pcie-controller {
-			status = "okay";
-
-			/* Connected to NEC uPD720200 USB 3.0 controller */
-			pcie@1,0 {
-				/* Port 0, Lane 0 */
-				status = "okay";
-			};
-		};
-	};
-
 	ocp@f1000000 {
 		pinctrl: pin-controller@10000 {
 			pmx_button_power: pmx-button-power {
@@ -205,7 +193,7 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 
-		usb3_regulator: usb3-regulator {
+		usb3_regulator: usb3-regulator@1 {
 			compatible = "regulator-fixed";
 			reg = <1>;
 			regulator-name = "USB 3.0 Power";
@@ -265,3 +253,12 @@
 		phy-handle = <&ethphy0>;
 	};
 };
+
+/* Connected to NEC uPD720200 USB 3.0 controller */
+&pciec {
+        status = "okay";
+};
+
+&pcie0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/kirkwood-netxbig.dtsi b/arch/arm/boot/dts/kirkwood-netxbig.dtsi
index 62515a8..52b58fe 100644
--- a/arch/arm/boot/dts/kirkwood-netxbig.dtsi
+++ b/arch/arm/boot/dts/kirkwood-netxbig.dtsi
@@ -59,22 +59,22 @@
 		#size-cells = <0>;
 
 		/*
-		 * button@1 and button@2 represent a three position rocker
+		 * esc and power represent a three position rocker
 		 * switch. Thus the conventional KEY_POWER does not fit
 		 */
-		button@1 {
+		exc {
 			label = "Back power switch (on|auto)";
 			linux,code = <KEY_ESC>;
 			linux,input-type = <5>;
 			gpios = <&gpio0 13 GPIO_ACTIVE_LOW>;
 		};
-		button@2 {
+		power {
 			label = "Back power switch (auto|off)";
 			linux,code = <KEY_1>;
 			linux,input-type = <5>;
 			gpios = <&gpio0 15 GPIO_ACTIVE_LOW>;
 		};
-		button@3 {
+		option {
 			label = "Function button";
 			linux,code = <KEY_OPTION>;
 			gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/kirkwood-ns2-common.dtsi b/arch/arm/boot/dts/kirkwood-ns2-common.dtsi
index e832b63..282605f 100644
--- a/arch/arm/boot/dts/kirkwood-ns2-common.dtsi
+++ b/arch/arm/boot/dts/kirkwood-ns2-common.dtsi
@@ -57,7 +57,7 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 
-		button@1 {
+		power {
 			label = "Power push button";
 			linux,code = <KEY_POWER>;
 			gpios = <&gpio1 0 GPIO_ACTIVE_HIGH>;
@@ -83,7 +83,7 @@
 &mdio {
 	status = "okay";
 
-	ethphy0: ethernet-phy {
+	ethphy0: ethernet-phy@X {
                 /* overwrite reg property in board file */
 	};
 };
diff --git a/arch/arm/boot/dts/kirkwood-nsa310.dts b/arch/arm/boot/dts/kirkwood-nsa310.dts
index 6139df0..0b69ee4 100644
--- a/arch/arm/boot/dts/kirkwood-nsa310.dts
+++ b/arch/arm/boot/dts/kirkwood-nsa310.dts
@@ -15,16 +15,6 @@
 		stdout-path = &uart0;
 	};
 
-	mbus {
-		pcie-controller {
-			status = "okay";
-
-			pcie@1,0 {
-				status = "okay";
-			};
-		};
-	};
-
 	ocp@f1000000 {
 		pinctrl: pin-controller@10000 {
 			pinctrl-0 = <&pmx_unknown>;
@@ -138,3 +128,11 @@
 		};
 	};
 };
+
+&pciec {
+        status = "okay";
+};
+
+&pcie0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/kirkwood-nsa320.dts b/arch/arm/boot/dts/kirkwood-nsa320.dts
index 24f686d1..6ab104b 100644
--- a/arch/arm/boot/dts/kirkwood-nsa320.dts
+++ b/arch/arm/boot/dts/kirkwood-nsa320.dts
@@ -27,16 +27,6 @@
 		stdout-path = &uart0;
 	};
 
-	mbus {
-		pcie-controller {
-			status = "okay";
-
-			pcie@1,0 {
-				status = "okay";
-			};
-		};
-	};
-
 	ocp@f1000000 {
 		pinctrl: pin-controller@10000 {
 			pinctrl-names = "default";
@@ -193,10 +183,19 @@
 		};
 	};
 
+	hwmon {
+		compatible = "zyxel,nsa320-mcu";
+		pinctrl-0 = <&pmx_mcu_data &pmx_mcu_clk &pmx_mcu_act>;
+		pinctrl-names = "default";
+
+		data-gpios = <&gpio0 14 GPIO_ACTIVE_HIGH>;
+		clk-gpios = <&gpio0 16 GPIO_ACTIVE_HIGH>;
+		act-gpios = <&gpio0 17 GPIO_ACTIVE_LOW>;
+	};
+
 	/* The following pins are currently not assigned to a driver,
 	   some of them should be configured as inputs.
-	pinctrl-0 = <&pmx_mcu_data &pmx_mcu_clk &pmx_mcu_act
-		     &pmx_htp &pmx_vid_b1
+	pinctrl-0 = <&pmx_htp &pmx_vid_b1
 		     &pmx_power_resume_data &pmx_power_resume_clk>; */
 };
 
@@ -213,3 +212,11 @@
 		phy-handle = <&ethphy0>;
 	};
 };
+
+&pciec {
+        status = "okay";
+};
+
+&pcie0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/kirkwood-nsa325.dts b/arch/arm/boot/dts/kirkwood-nsa325.dts
index bc4ec93..36c6481 100644
--- a/arch/arm/boot/dts/kirkwood-nsa325.dts
+++ b/arch/arm/boot/dts/kirkwood-nsa325.dts
@@ -28,16 +28,6 @@
 		stdout-path = &uart0;
 	};
 
-	mbus {
-		pcie-controller {
-			status = "okay";
-
-			pcie@1,0 {
-				status = "okay";
-			};
-		};
-	};
-
 	ocp@f1000000 {
 		pinctrl: pin-controller@10000 {
 			pinctrl-names = "default";
@@ -236,3 +226,10 @@
 	};
 };
 
+&pciec {
+        status = "okay";
+};
+
+&pcie0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/kirkwood-nsa3x0-common.dtsi b/arch/arm/boot/dts/kirkwood-nsa3x0-common.dtsi
index 2075a2e..e09b79a 100644
--- a/arch/arm/boot/dts/kirkwood-nsa3x0-common.dtsi
+++ b/arch/arm/boot/dts/kirkwood-nsa3x0-common.dtsi
@@ -4,16 +4,6 @@
 / {
 	model = "ZyXEL NSA310";
 
-	mbus {
-		pcie-controller {
-			status = "okay";
-
-			pcie@1,0 {
-				status = "okay";
-			};
-		};
-	};
-
 	ocp@f1000000 {
 		pinctrl: pin-controller@10000 {
 
@@ -77,17 +67,17 @@
 		pinctrl-0 = <&pmx_btn_reset &pmx_btn_copy &pmx_btn_power>;
 		pinctrl-names = "default";
 
-		button@1 {
+		power {
 			label = "Power Button";
 			linux,code = <KEY_POWER>;
 			gpios = <&gpio1 14 GPIO_ACTIVE_HIGH>;
 		};
-		button@2 {
+		copy {
 			label = "Copy Button";
 			linux,code = <KEY_COPY>;
 			gpios = <&gpio1 5 GPIO_ACTIVE_LOW>;
 		};
-		button@3 {
+		reset {
 			label = "Reset Button";
 			linux,code = <KEY_RESTART>;
 			gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
@@ -157,3 +147,11 @@
 		reg = <0x5040000 0x2fc0000>;
 	};
 };
+
+&pciec {
+        status = "okay";
+};
+
+&pcie0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/kirkwood-openblocks_a6.dts b/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
index fb9dc22..0db0e3e 100644
--- a/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
+++ b/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
@@ -117,7 +117,7 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 
-		button@1 {
+		power {
 			label = "Init Button";
 			linux,code = <KEY_POWER>;
 			gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/kirkwood-openblocks_a7.dts b/arch/arm/boot/dts/kirkwood-openblocks_a7.dts
index d5e3bc5..cf2f524 100644
--- a/arch/arm/boot/dts/kirkwood-openblocks_a7.dts
+++ b/arch/arm/boot/dts/kirkwood-openblocks_a7.dts
@@ -135,7 +135,7 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 
-		button@1 {
+		button {
 			label = "Init Button";
 			linux,code = <KEY_POWER>;
 			gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/kirkwood-openrd.dtsi b/arch/arm/boot/dts/kirkwood-openrd.dtsi
index 24f1d30..e4ecab1 100644
--- a/arch/arm/boot/dts/kirkwood-openrd.dtsi
+++ b/arch/arm/boot/dts/kirkwood-openrd.dtsi
@@ -25,16 +25,6 @@
 		stdout-path = &uart0;
 	};
 
-	mbus {
-		pcie-controller {
-			status = "okay";
-
-			pcie@1,0 {
-				status = "okay";
-			};
-		};
-	};
-
 	ocp@f1000000 {
 		pinctrl: pin-controller@10000 {
 			pinctrl-0 = <&pmx_select28 &pmx_sdio_cd &pmx_select34>;
@@ -125,3 +115,7 @@
 		reg = <0x0600000 0x1FA00000>;
 	};
 };
+
+&pcie0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/kirkwood-pogoplug-series-4.dts b/arch/arm/boot/dts/kirkwood-pogoplug-series-4.dts
index 8082d64..b2f2623 100644
--- a/arch/arm/boot/dts/kirkwood-pogoplug-series-4.dts
+++ b/arch/arm/boot/dts/kirkwood-pogoplug-series-4.dts
@@ -33,7 +33,7 @@
 		pinctrl-0 = <&pmx_button_eject>;
 		pinctrl-names = "default";
 
-		button@1 {
+		eject {
 			debounce_interval = <50>;
 			wakeup-source;
 			linux,code = <KEY_EJECTCD>;
diff --git a/arch/arm/boot/dts/kirkwood-rd88f6192.dts b/arch/arm/boot/dts/kirkwood-rd88f6192.dts
index e0b9593..b8af907 100644
--- a/arch/arm/boot/dts/kirkwood-rd88f6192.dts
+++ b/arch/arm/boot/dts/kirkwood-rd88f6192.dts
@@ -29,16 +29,6 @@
 		stdout-path = &uart0;
 	};
 
-	mbus {
-		pcie-controller {
-			status = "okay";
-
-			pcie@1,0 {
-				status = "okay";
-			};
-		};
-	};
-
 	ocp@f1000000 {
 		pinctrl: pin-controller@10000 {
 			pinctrl-0 = <&pmx_usb_power>;
@@ -108,4 +98,12 @@
         ethernet0-port@0 {
                 phy-handle = <&ethphy0>;
         };
-};
\ No newline at end of file
+};
+
+&pciec {
+        status = "okay";
+};
+
+&pcie0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/kirkwood-rd88f6281-a.dts b/arch/arm/boot/dts/kirkwood-rd88f6281-a.dts
index f2e08b3..6f771a9 100644
--- a/arch/arm/boot/dts/kirkwood-rd88f6281-a.dts
+++ b/arch/arm/boot/dts/kirkwood-rd88f6281-a.dts
@@ -19,7 +19,7 @@
 	model = "Marvell RD88f6281 Reference design, with A0 or higher SoC";
 	compatible = "marvell,rd88f6281-a", "marvell,rd88f6281","marvell,kirkwood-88f6281", "marvell,kirkwood";
 
-	dsa@0 {
+	dsa {
 		switch@0 {
 			reg = <10 0>;	 /* MDIO address 10, switch 0 in tree */
 		};
diff --git a/arch/arm/boot/dts/kirkwood-rd88f6281-z0.dts b/arch/arm/boot/dts/kirkwood-rd88f6281-z0.dts
index f4272b6..1a79738 100644
--- a/arch/arm/boot/dts/kirkwood-rd88f6281-z0.dts
+++ b/arch/arm/boot/dts/kirkwood-rd88f6281-z0.dts
@@ -19,7 +19,7 @@
 	model = "Marvell RD88f6281 Reference design, with Z0 SoC";
 	compatible = "marvell,rd88f6281-z0", "marvell,rd88f6281","marvell,kirkwood-88f6281", "marvell,kirkwood";
 
-	dsa@0 {
+	dsa {
 		switch@0 {
 			reg = <0 0>;    /* MDIO address 0, switch 0 in tree */
 			port@4 {
diff --git a/arch/arm/boot/dts/kirkwood-rd88f6281.dtsi b/arch/arm/boot/dts/kirkwood-rd88f6281.dtsi
index d195e88..d5aacf1 100644
--- a/arch/arm/boot/dts/kirkwood-rd88f6281.dtsi
+++ b/arch/arm/boot/dts/kirkwood-rd88f6281.dtsi
@@ -25,16 +25,6 @@
 		stdout-path = &uart0;
 	};
 
-	mbus {
-		pcie-controller {
-			status = "okay";
-
-			pcie@1,0 {
-				status = "okay";
-			};
-		};
-	};
-
 	ocp@f1000000 {
 		pinctrl: pin-controller@10000 {
 			pinctrl-names = "default";
@@ -63,7 +53,7 @@
 		};
 	};
 
-	dsa@0 {
+	dsa {
 		compatible = "marvell,dsa";
 		#address-cells = <2>;
 		#size-cells = <0>;
@@ -134,3 +124,11 @@
 		duplex = <1>;
 	};
 };
+
+&pciec {
+        status = "okay";
+};
+
+&pcie0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/kirkwood-rs212.dts b/arch/arm/boot/dts/kirkwood-rs212.dts
index 3b19f1f..2c722ec 100644
--- a/arch/arm/boot/dts/kirkwood-rs212.dts
+++ b/arch/arm/boot/dts/kirkwood-rs212.dts
@@ -44,6 +44,10 @@
 	status = "okay";
 };
 
-&pcie2 {
+&pciec {
+        status = "okay";
+};
+
+&pcie1 {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/kirkwood-synology.dtsi b/arch/arm/boot/dts/kirkwood-synology.dtsi
index 04015c17..65e9524 100644
--- a/arch/arm/boot/dts/kirkwood-synology.dtsi
+++ b/arch/arm/boot/dts/kirkwood-synology.dtsi
@@ -10,20 +10,6 @@
  */
 
 / {
-	mbus {
-		pcie-controller {
-			status = "okay";
-
-			pcie@1,0 {
-				status = "okay";
-			};
-
-			pcie2: pcie@2,0 {
-				status = "disabled";
-			};
-		};
-	};
-
 	ocp@f1000000 {
 		pinctrl: pin-controller@10000 {
 			pmx_alarmled_12: pmx-alarmled-12 {
@@ -861,3 +847,11 @@
 		phy-handle = <&ethphy1>;
 	};
 };
+
+&pciec {
+        status = "okay";
+};
+
+&pcie0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/kirkwood-t5325.dts b/arch/arm/boot/dts/kirkwood-t5325.dts
index ed956b8..3500f473 100644
--- a/arch/arm/boot/dts/kirkwood-t5325.dts
+++ b/arch/arm/boot/dts/kirkwood-t5325.dts
@@ -30,16 +30,6 @@
 		stdout-path = &uart0;
 	};
 
-	mbus {
-		pcie-controller {
-			status = "okay";
-
-			pcie@1,0 {
-				status = "okay";
-			};
-		};
-	};
-
 	ocp@f1000000 {
 		pinctrl: pin-controller@10000 {
 			pinctrl-0 = <&pmx_i2s &pmx_sysrst>;
@@ -173,7 +163,7 @@
 		pinctrl-0 = <&pmx_button_power>;
 		pinctrl-names = "default";
 
-		button@1 {
+		power {
 			label = "Power Button";
 			linux,code = <KEY_POWER>;
 			gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
@@ -217,7 +207,7 @@
 &mdio {
 	status = "okay";
 
-	ethphy0: ethernet-phy {
+	ethphy0: ethernet-phy@8 {
 		device_type = "ethernet-phy";
 		reg = <8>;
 	};
@@ -229,3 +219,11 @@
 		phy-handle = <&ethphy0>;
 	};
 };
+
+&pciec {
+        status = "okay";
+};
+
+&pcie0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/kirkwood-ts219-6281.dts b/arch/arm/boot/dts/kirkwood-ts219-6281.dts
index 9767d73..ee62204 100644
--- a/arch/arm/boot/dts/kirkwood-ts219-6281.dts
+++ b/arch/arm/boot/dts/kirkwood-ts219-6281.dts
@@ -39,12 +39,12 @@
 		pinctrl-0 = <&pmx_reset_button &pmx_USB_copy_button>;
 		pinctrl-names = "default";
 
-		button@1 {
+		copy {
 			label = "USB Copy";
 			linux,code = <KEY_COPY>;
 			gpios = <&gpio0 15 GPIO_ACTIVE_LOW>;
 		};
-		button@2 {
+		reset {
 			label = "Reset";
 			linux,code = <KEY_RESTART>;
 			gpios = <&gpio0 16 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/kirkwood-ts219-6282.dts b/arch/arm/boot/dts/kirkwood-ts219-6282.dts
index bfc1a32..3437bb3 100644
--- a/arch/arm/boot/dts/kirkwood-ts219-6282.dts
+++ b/arch/arm/boot/dts/kirkwood-ts219-6282.dts
@@ -5,16 +5,6 @@
 #include "kirkwood-ts219.dtsi"
 
 / {
-	mbus {
-		pcie-controller {
-			status = "okay";
-
-			pcie@2,0 {
-				status = "okay";
-			};
-		};
-	};
-
 	ocp@f1000000 {
 		pinctrl: pin-controller@10000 {
 
@@ -49,12 +39,12 @@
 		pinctrl-0 = <&pmx_reset_button &pmx_USB_copy_button>;
 		pinctrl-names = "default";
 
-		button@1 {
+		copy {
 			label = "USB Copy";
 			linux,code = <KEY_COPY>;
 			gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
 		};
-		button@2 {
+		reset {
 			label = "Reset";
 			linux,code = <KEY_RESTART>;
 			gpios = <&gpio1 5 GPIO_ACTIVE_LOW>;
@@ -63,3 +53,5 @@
 };
 
 &ethphy0 { reg = <0>; };
+
+&pcie1 { status = "okay"; };
diff --git a/arch/arm/boot/dts/kirkwood-ts219.dtsi b/arch/arm/boot/dts/kirkwood-ts219.dtsi
index 0e46560..62e5e2d 100644
--- a/arch/arm/boot/dts/kirkwood-ts219.dtsi
+++ b/arch/arm/boot/dts/kirkwood-ts219.dtsi
@@ -12,16 +12,6 @@
 		stdout-path = &uart0;
 	};
 
-	mbus {
-		pcie-controller {
-			status = "okay";
-
-			pcie@1,0 {
-				status = "okay";
-			};
-		};
-	};
-
 	ocp@f1000000 {
 		i2c@11000 {
 			status = "okay";
@@ -94,7 +84,7 @@
 &mdio {
 	status = "okay";
 
-	ethphy0: ethernet-phy {
+	ethphy0: ethernet-phy@X {
                 /* overwrite reg property in board file */
 	};
 };
@@ -105,3 +95,11 @@
 		phy-handle = <&ethphy0>;
 	};
 };
+
+&pciec {
+        status = "okay";
+};
+
+&pcie0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/kirkwood-ts419-6282.dts b/arch/arm/boot/dts/kirkwood-ts419-6282.dts
index d7512d4..e3e71f4 100644
--- a/arch/arm/boot/dts/kirkwood-ts419-6282.dts
+++ b/arch/arm/boot/dts/kirkwood-ts419-6282.dts
@@ -16,17 +16,8 @@
 #include "kirkwood-ts219.dtsi"
 #include "kirkwood-ts419.dtsi"
 
-/ {
-	mbus {
-		pcie-controller {
-			status = "okay";
-
-			pcie@2,0 {
-				status = "okay";
-			};
-		};
-	};
-};
-
 &ethphy0 { reg = <0>; };
 &ethphy1 { reg = <1>; };
+
+&pciec { status = "okay"; };
+&pcie1 { status = "okay"; };
diff --git a/arch/arm/boot/dts/kirkwood-ts419.dtsi b/arch/arm/boot/dts/kirkwood-ts419.dtsi
index 30ab93b..02bd537 100644
--- a/arch/arm/boot/dts/kirkwood-ts419.dtsi
+++ b/arch/arm/boot/dts/kirkwood-ts419.dtsi
@@ -45,12 +45,12 @@
 		pinctrl-0 = <&pmx_reset_button &pmx_USB_copy_button>;
 		pinctrl-names = "default";
 
-		button@1 {
+		copy {
 			label = "USB Copy";
 			linux,code = <KEY_COPY>;
 			gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
 		};
-		button@2 {
+		reset {
 			label = "Reset";
 			linux,code = <KEY_RESTART>;
 			gpios = <&gpio1 5 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/kirkwood.dtsi b/arch/arm/boot/dts/kirkwood.dtsi
index 7445a15..29b8bd7 100644
--- a/arch/arm/boot/dts/kirkwood.dtsi
+++ b/arch/arm/boot/dts/kirkwood.dtsi
@@ -27,7 +27,7 @@
 	       i2c0 = &i2c0;
 	};
 
-	mbus {
+	mbus@f1000000 {
 		compatible = "marvell,kirkwood-mbus", "simple-bus";
 		#address-cells = <2>;
 		#size-cells = <1>;
diff --git a/arch/arm/boot/dts/lpc18xx.dtsi b/arch/arm/boot/dts/lpc18xx.dtsi
index 053a1f5..fdb736c 100644
--- a/arch/arm/boot/dts/lpc18xx.dtsi
+++ b/arch/arm/boot/dts/lpc18xx.dtsi
@@ -195,13 +195,19 @@
 			clocks = <&ccu1 CLK_CPU_CREG>;
 			resets = <&rgu 5>;
 
-			usb0_otg_phy: phy@004 {
+			creg_clk: clock-controller {
+				compatible = "nxp,lpc1850-creg-clk";
+				clocks = <&xtal32>;
+				#clock-cells = <1>;
+			};
+
+			usb0_otg_phy: phy {
 				compatible = "nxp,lpc1850-usb-otg-phy";
 				clocks = <&ccu1 CLK_USB0>;
 				#phy-cells = <0>;
 			};
 
-			dmamux: dma-mux@11c {
+			dmamux: dma-mux {
 				compatible = "nxp,lpc1850-dmamux";
 				#dma-cells = <3>;
 				dma-requests = <64>;
@@ -209,11 +215,19 @@
 			};
 		};
 
+		rtc: rtc@40046000 {
+			compatible = "nxp,lpc1850-rtc", "nxp,lpc1788-rtc";
+			reg = <0x40046000 0x1000>;
+			interrupts = <47>;
+			clocks = <&creg_clk 0>, <&ccu1 CLK_CPU_BUS>;
+			clock-names = "rtc", "reg";
+		};
+
 		cgu: clock-controller@40050000 {
 			compatible = "nxp,lpc1850-cgu";
 			reg = <0x40050000 0x1000>;
 			#clock-cells = <1>;
-			clocks = <&xtal>, <&xtal32>, <&enet_rx_clk>, <&enet_tx_clk>, <&gp_clkin>;
+			clocks = <&xtal>, <&creg_clk 1>, <&enet_rx_clk>, <&enet_tx_clk>, <&gp_clkin>;
 		};
 
 		ccu1: clock-controller@40051000 {
@@ -430,6 +444,15 @@
 			status = "disabled";
 		};
 
+		dac: dac@400e1000 {
+			compatible = "nxp,lpc1850-dac";
+			reg = <0x400e1000 0x1000>;
+			interrupts = <0>;
+			clocks = <&ccu1 CLK_APB3_DAC>;
+			resets = <&rgu 42>;
+			status = "disabled";
+		};
+
 		can0: can@400e2000 {
 			compatible = "bosch,c_can";
 			reg = <0x400e2000 0x1000>;
@@ -439,6 +462,24 @@
 			status = "disabled";
 		};
 
+		adc0: adc@400e3000 {
+			compatible = "nxp,lpc1850-adc";
+			reg = <0x400e3000 0x1000>;
+			interrupts = <17>;
+			clocks = <&ccu1 CLK_APB3_ADC0>;
+			resets = <&rgu 40>;
+			status = "disabled";
+		};
+
+		adc1: adc@400e4000 {
+			compatible = "nxp,lpc1850-adc";
+			reg = <0x400e4000 0x1000>;
+			interrupts = <21>;
+			clocks = <&ccu1 CLK_APB3_ADC1>;
+			resets = <&rgu 41>;
+			status = "disabled";
+		};
+
 		gpio: gpio@400f4000 {
 			compatible = "nxp,lpc1850-gpio";
 			reg = <0x400f4000 0x4000>;
diff --git a/arch/arm/boot/dts/lpc3250-ea3250.dts b/arch/arm/boot/dts/lpc3250-ea3250.dts
new file mode 100644
index 0000000..52b3ed1
--- /dev/null
+++ b/arch/arm/boot/dts/lpc3250-ea3250.dts
@@ -0,0 +1,272 @@
+/*
+ * Embedded Artists LPC3250 board
+ *
+ * Copyright 2012 Roland Stigge <stigge@antcom.de>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+#include "lpc32xx.dtsi"
+
+/ {
+	model = "Embedded Artists LPC3250 board based on NXP LPC3250";
+	compatible = "ea,ea3250", "nxp,lpc3250";
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	memory {
+		device_type = "memory";
+		reg = <0x80000000 0x4000000>;
+	};
+
+	gpio_keys {
+		compatible = "gpio-keys";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		autorepeat;
+		button@21 {
+			label = "Interrupt Key";
+			linux,code = <103>;
+			gpios = <&gpio 4 1 0>; /* GPI_P3 1 */
+		};
+		key1 {
+			label = "KEY1";
+			linux,code = <1>;
+			gpios = <&pca9532 0 0>;
+		};
+		key2 {
+			label = "KEY2";
+			linux,code = <2>;
+			gpios = <&pca9532 1 0>;
+		};
+		key3 {
+			label = "KEY3";
+			linux,code = <3>;
+			gpios = <&pca9532 2 0>;
+		};
+		key4 {
+			label = "KEY4";
+			linux,code = <4>;
+			gpios = <&pca9532 3 0>;
+		};
+		joy0 {
+			label = "Joystick Key 0";
+			linux,code = <10>;
+			gpios = <&gpio 2 0 0>; /* P2.0 */
+		};
+		joy1 {
+			label = "Joystick Key 1";
+			linux,code = <11>;
+			gpios = <&gpio 2 1 0>; /* P2.1 */
+		};
+		joy2 {
+			label = "Joystick Key 2";
+			linux,code = <12>;
+			gpios = <&gpio 2 2 0>; /* P2.2 */
+		};
+		joy3 {
+			label = "Joystick Key 3";
+			linux,code = <13>;
+			gpios = <&gpio 2 3 0>; /* P2.3 */
+		};
+		joy4 {
+			label = "Joystick Key 4";
+			linux,code = <14>;
+			gpios = <&gpio 2 4 0>; /* P2.4 */
+		};
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		/* LEDs on OEM Board */
+
+		led1 {
+			gpios = <&gpio 5 14 1>; /* GPO_P3 14, GPIO 93, active low */
+			linux,default-trigger = "timer";
+			default-state = "off";
+		};
+
+		led2 {
+			gpios = <&gpio 2 10 1>; /* P2.10, active low */
+			default-state = "off";
+		};
+
+		led3 {
+			gpios = <&gpio 2 11 1>; /* P2.11, active low */
+			default-state = "off";
+		};
+
+		led4 {
+			gpios = <&gpio 2 12 1>; /* P2.12, active low */
+			default-state = "off";
+		};
+
+		/* LEDs on Base Board */
+
+		lede1 {
+			gpios = <&pca9532 8 0>;
+			default-state = "off";
+		};
+		lede2 {
+			gpios = <&pca9532 9 0>;
+			default-state = "off";
+		};
+		lede3 {
+			gpios = <&pca9532 10 0>;
+			default-state = "off";
+		};
+		lede4 {
+			gpios = <&pca9532 11 0>;
+			default-state = "off";
+		};
+		lede5 {
+			gpios = <&pca9532 12 0>;
+			default-state = "off";
+		};
+		lede6 {
+			gpios = <&pca9532 13 0>;
+			default-state = "off";
+		};
+		lede7 {
+			gpios = <&pca9532 14 0>;
+			default-state = "off";
+		};
+		lede8 {
+			gpios = <&pca9532 15 0>;
+			default-state = "off";
+		};
+	};
+};
+
+/* 3-axis accelerometer X,Y,Z (or AD-IN instead of Z) */
+&adc {
+	status = "okay";
+};
+
+&i2c1 {
+	clock-frequency = <100000>;
+
+	uda1380: uda1380@18 {
+		compatible = "nxp,uda1380";
+		reg = <0x18>;
+		power-gpio = <&gpio 0x59 0>;
+		reset-gpio = <&gpio 0x51 0>;
+		dac-clk = "wspll";
+	};
+
+	eeprom@50 {
+		compatible = "atmel,24c256";
+		reg = <0x50>;
+	};
+
+	eeprom@57 {
+		compatible = "atmel,24c64";
+		reg = <0x57>;
+	};
+
+	pca9532: pca9532@60 {
+		compatible = "nxp,pca9532";
+		gpio-controller;
+		#gpio-cells = <2>;
+		reg = <0x60>;
+	};
+};
+
+&i2c2 {
+	clock-frequency = <100000>;
+};
+
+&i2cusb {
+	clock-frequency = <100000>;
+
+	isp1301: usb-transceiver@2d {
+		compatible = "nxp,isp1301";
+		reg = <0x2d>;
+	};
+};
+
+&mac {
+	phy-mode = "rmii";
+	use-iram;
+};
+
+/* Here, choose exactly one from: ohci, usbd */
+&ohci /* &usbd */ {
+	transceiver = <&isp1301>;
+	status = "okay";
+};
+
+&sd {
+	wp-gpios = <&pca9532 5 0>;
+	cd-gpios = <&pca9532 4 0>;
+	cd-inverted;
+	bus-width = <4>;
+	status = "okay";
+};
+
+/* 128MB Flash via SLC NAND controller */
+&slc {
+	status = "okay";
+
+	nxp,wdr-clks = <14>;
+	nxp,wwidth = <260000000>;
+	nxp,whold = <104000000>;
+	nxp,wsetup = <200000000>;
+	nxp,rdr-clks = <14>;
+	nxp,rwidth = <34666666>;
+	nxp,rhold = <104000000>;
+	nxp,rsetup = <200000000>;
+	nand-on-flash-bbt;
+	gpios = <&gpio 5 19 1>; /* GPO_P3 19, active low */
+
+	partitions {
+		compatible = "fixed-partitions";
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		mtd0@00000000 {
+			label = "ea3250-boot";
+			reg = <0x00000000 0x00080000>;
+			read-only;
+		};
+
+		mtd1@00080000 {
+			label = "ea3250-uboot";
+			reg = <0x00080000 0x000c0000>;
+			read-only;
+		};
+
+		mtd2@00140000 {
+			label = "ea3250-kernel";
+			reg = <0x00140000 0x00400000>;
+		};
+
+		mtd3@00540000 {
+			label = "ea3250-rootfs";
+			reg = <0x00540000 0x07ac0000>;
+		};
+	};
+};
+
+&uart1 {
+	status = "okay";
+};
+
+&uart3 {
+	status = "okay";
+};
+
+&uart5 {
+	status = "okay";
+};
+
+&uart6 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/lpc3250-phy3250.dts b/arch/arm/boot/dts/lpc3250-phy3250.dts
new file mode 100644
index 0000000..fd95e2b
--- /dev/null
+++ b/arch/arm/boot/dts/lpc3250-phy3250.dts
@@ -0,0 +1,226 @@
+/*
+ * PHYTEC phyCORE-LPC3250 board
+ *
+ * Copyright 2012 Roland Stigge <stigge@antcom.de>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+#include "lpc32xx.dtsi"
+
+/ {
+	model = "PHYTEC phyCORE-LPC3250 board based on NXP LPC3250";
+	compatible = "phytec,phy3250", "nxp,lpc3250";
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	memory {
+		device_type = "memory";
+		reg = <0x80000000 0x4000000>;
+	};
+
+	regulators {
+		backlight_reg: regulator@0 {
+			compatible = "regulator-fixed";
+			regulator-name = "backlight_reg";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			gpio = <&gpio 5 4 0>;
+			enable-active-high;
+			regulator-boot-on;
+		};
+
+		lcd_reg: regulator@1 {
+			compatible = "regulator-fixed";
+			regulator-name = "lcd_reg";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			gpio = <&gpio 5 0 0>;
+			enable-active-high;
+			regulator-boot-on;
+		};
+
+		sd_reg: regulator@2 {
+			compatible = "regulator-fixed";
+			regulator-name = "sd_reg";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			gpio = <&gpio 5 5 0>;
+			enable-active-high;
+		};
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		led0 { /* red */
+			gpios = <&gpio 5 1 0>; /* GPO_P3 1, GPIO 80, active high */
+			default-state = "off";
+		};
+
+		led1 { /* green */
+			gpios = <&gpio 5 14 0>; /* GPO_P3 14, GPIO 93, active high */
+			linux,default-trigger = "heartbeat";
+		};
+	};
+};
+
+&clcd {
+	status = "okay";
+};
+
+&i2c1 {
+	clock-frequency = <100000>;
+
+	uda1380: uda1380@18 {
+		compatible = "nxp,uda1380";
+		reg = <0x18>;
+		power-gpio = <&gpio 0x59 0>;
+		reset-gpio = <&gpio 0x51 0>;
+		dac-clk = "wspll";
+	};
+
+	pcf8563: rtc@51 {
+		compatible = "nxp,pcf8563";
+		reg = <0x51>;
+	};
+};
+
+&i2c2 {
+	clock-frequency = <100000>;
+};
+
+&i2cusb {
+	clock-frequency = <100000>;
+
+	isp1301: usb-transceiver@2c {
+		compatible = "nxp,isp1301";
+		reg = <0x2c>;
+	};
+};
+
+&key {
+	keypad,num-rows = <1>;
+	keypad,num-columns = <1>;
+	nxp,debounce-delay-ms = <3>;
+	nxp,scan-delay-ms = <34>;
+	linux,keymap = <0x00000002>;
+	status = "okay";
+};
+
+&mac {
+	phy-mode = "rmii";
+	use-iram;
+};
+
+/* Here, choose exactly one from: ohci, usbd */
+&ohci /* &usbd */ {
+	transceiver = <&isp1301>;
+	status = "okay";
+};
+
+&sd {
+	wp-gpios = <&gpio 3 0 0>;
+	cd-gpios = <&gpio 3 1 0>;
+	cd-inverted;
+	bus-width = <4>;
+	vmmc-supply = <&sd_reg>;
+	status = "okay";
+};
+
+/* 64MB Flash via SLC NAND controller */
+&slc {
+	status = "okay";
+
+	nxp,wdr-clks = <14>;
+	nxp,wwidth = <40000000>;
+	nxp,whold = <100000000>;
+	nxp,wsetup = <100000000>;
+	nxp,rdr-clks = <14>;
+	nxp,rwidth = <40000000>;
+	nxp,rhold = <66666666>;
+	nxp,rsetup = <100000000>;
+	nand-on-flash-bbt;
+	gpios = <&gpio 5 19 1>; /* GPO_P3 19, active low */
+
+	partitions {
+		compatible = "fixed-partitions";
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		mtd0@00000000 {
+			label = "phy3250-boot";
+			reg = <0x00000000 0x00064000>;
+			read-only;
+		};
+
+		mtd1@00064000 {
+			label = "phy3250-uboot";
+			reg = <0x00064000 0x00190000>;
+			read-only;
+		};
+
+		mtd2@001f4000 {
+			label = "phy3250-ubt-prms";
+			reg = <0x001f4000 0x00010000>;
+		};
+
+		mtd3@00204000 {
+			label = "phy3250-kernel";
+			reg = <0x00204000 0x00400000>;
+		};
+
+		mtd4@00604000 {
+			label = "phy3250-rootfs";
+			reg = <0x00604000 0x039fc000>;
+		};
+	};
+};
+
+&ssp0 {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	num-cs = <1>;
+	cs-gpios = <&gpio 3 5 0>;
+	status = "okay";
+
+	eeprom: at25@0 {
+		compatible = "atmel,at25";
+		reg = <0>;
+		spi-max-frequency = <5000000>;
+
+		pl022,interface = <0>;
+		pl022,com-mode = <0>;
+		pl022,rx-level-trig = <1>;
+		pl022,tx-level-trig = <1>;
+		pl022,ctrl-len = <11>;
+		pl022,wait-state = <0>;
+		pl022,duplex = <0>;
+
+		at25,byte-len = <0x8000>;
+		at25,addr-mode = <2>;
+		at25,page-size = <64>;
+	};
+};
+
+&tsc {
+	status = "okay";
+};
+
+&uart2 {
+	status = "okay";
+};
+
+&uart3 {
+	status = "okay";
+};
+
+&uart5 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi
index c58d8da..e295e1e 100644
--- a/arch/arm/boot/dts/lpc32xx.dtsi
+++ b/arch/arm/boot/dts/lpc32xx.dtsi
@@ -92,7 +92,8 @@
 			ohci: ohci@0 {
 				compatible = "nxp,ohci-nxp", "usb-ohci";
 				reg = <0x0 0x300>;
-				interrupts = <59 IRQ_TYPE_LEVEL_HIGH>;
+				interrupt-parent = <&sic1>;
+				interrupts = <27 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&usbclk LPC32XX_USB_CLK_HOST>;
 				status = "disabled";
 			};
@@ -100,10 +101,11 @@
 			usbd: usbd@0 {
 				compatible = "nxp,lpc3220-udc";
 				reg = <0x0 0x300>;
-				interrupts = <61 IRQ_TYPE_LEVEL_HIGH>,
-					     <62 IRQ_TYPE_LEVEL_HIGH>,
-					     <60 IRQ_TYPE_LEVEL_HIGH>,
-					     <58 IRQ_TYPE_LEVEL_LOW>;
+				interrupt-parent = <&sic1>;
+				interrupts = <29 IRQ_TYPE_LEVEL_HIGH>,
+					     <30 IRQ_TYPE_LEVEL_HIGH>,
+					     <28 IRQ_TYPE_LEVEL_HIGH>,
+					     <26 IRQ_TYPE_LEVEL_LOW>;
 				clocks = <&usbclk LPC32XX_USB_CLK_DEVICE>;
 				status = "disabled";
 			};
@@ -111,7 +113,8 @@
 			i2cusb: i2c@300 {
 				compatible = "nxp,pnx-i2c";
 				reg = <0x300 0x100>;
-				interrupts = <63 IRQ_TYPE_LEVEL_HIGH>;
+				interrupt-parent = <&sic1>;
+				interrupts = <31 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&usbclk LPC32XX_USB_CLK_I2C>;
 				#address-cells = <1>;
 				#size-cells = <0>;
@@ -162,30 +165,44 @@
 			compatible = "simple-bus";
 			ranges = <0x20000000 0x20000000 0x30000000>;
 
+			/*
+			 * ssp0 and spi1 are shared pins;
+			 * enable one in your board dts, as needed.
+			 */
 			ssp0: ssp@20084000 {
 				compatible = "arm,pl022", "arm,primecell";
 				reg = <0x20084000 0x1000>;
 				interrupts = <20 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clk LPC32XX_CLK_SSP0>;
 				clock-names = "apb_pclk";
+				status = "disabled";
 			};
 
 			spi1: spi@20088000 {
 				compatible = "nxp,lpc3220-spi";
 				reg = <0x20088000 0x1000>;
+				clocks = <&clk LPC32XX_CLK_SPI1>;
+				status = "disabled";
 			};
 
+			/*
+			 * ssp1 and spi2 are shared pins;
+			 * enable one in your board dts, as needed.
+			 */
 			ssp1: ssp@2008c000 {
 				compatible = "arm,pl022", "arm,primecell";
 				reg = <0x2008c000 0x1000>;
 				interrupts = <21 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clk LPC32XX_CLK_SSP1>;
 				clock-names = "apb_pclk";
+				status = "disabled";
 			};
 
 			spi2: spi@20090000 {
 				compatible = "nxp,lpc3220-spi";
 				reg = <0x20090000 0x1000>;
+				clocks = <&clk LPC32XX_CLK_SPI2>;
+				status = "disabled";
 			};
 
 			i2s0: i2s@20094000 {
@@ -249,7 +266,8 @@
 			i2c1: i2c@400A0000 {
 				compatible = "nxp,pnx-i2c";
 				reg = <0x400A0000 0x100>;
-				interrupts = <51 IRQ_TYPE_LEVEL_LOW>;
+				interrupt-parent = <&sic1>;
+				interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
 				#address-cells = <1>;
 				#size-cells = <0>;
 				pnx,timeout = <0x64>;
@@ -259,7 +277,8 @@
 			i2c2: i2c@400A8000 {
 				compatible = "nxp,pnx-i2c";
 				reg = <0x400A8000 0x100>;
-				interrupts = <50 IRQ_TYPE_LEVEL_LOW>;
+				interrupt-parent = <&sic1>;
+				interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
 				#address-cells = <1>;
 				#size-cells = <0>;
 				pnx,timeout = <0x64>;
@@ -294,22 +313,41 @@
 
 					clocks = <&xtal_32k>, <&xtal>;
 					clock-names = "xtal_32k", "xtal";
+
+					assigned-clocks = <&clk LPC32XX_CLK_HCLK_PLL>;
+					assigned-clock-rates = <208000000>;
 				};
 			};
 
-			/*
-			 * MIC Interrupt controller includes:
-			 *   MIC @40008000
-			 *   SIC1 @4000C000
-			 *   SIC2 @40010000
-			 */
 			mic: interrupt-controller@40008000 {
 				compatible = "nxp,lpc3220-mic";
+				reg = <0x40008000 0x4000>;
 				interrupt-controller;
-				reg = <0x40008000 0xC000>;
 				#interrupt-cells = <2>;
 			};
 
+			sic1: interrupt-controller@4000c000 {
+				compatible = "nxp,lpc3220-sic";
+				reg = <0x4000c000 0x4000>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+
+				interrupt-parent = <&mic>;
+				interrupts = <0 IRQ_TYPE_LEVEL_LOW>,
+					     <30 IRQ_TYPE_LEVEL_LOW>;
+				};
+
+			sic2: interrupt-controller@40010000 {
+				compatible = "nxp,lpc3220-sic";
+				reg = <0x40010000 0x4000>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+
+				interrupt-parent = <&mic>;
+				interrupts = <1 IRQ_TYPE_LEVEL_LOW>,
+					     <31 IRQ_TYPE_LEVEL_LOW>;
+			};
+
 			uart1: serial@40014000 {
 				compatible = "nxp,lpc3220-hsuart";
 				reg = <0x40014000 0x1000>;
@@ -334,7 +372,8 @@
 			rtc: rtc@40024000 {
 				compatible = "nxp,lpc3220-rtc";
 				reg = <0x40024000 0x1000>;
-				interrupts = <52 IRQ_TYPE_LEVEL_HIGH>;
+				interrupt-parent = <&sic1>;
+				interrupts = <20 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clk LPC32XX_CLK_RTC>;
 			};
 
@@ -387,7 +426,8 @@
 			adc: adc@40048000 {
 				compatible = "nxp,lpc3220-adc";
 				reg = <0x40048000 0x1000>;
-				interrupts = <39 IRQ_TYPE_LEVEL_HIGH>;
+				interrupt-parent = <&sic1>;
+				interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clk LPC32XX_CLK_ADC>;
 				status = "disabled";
 			};
@@ -395,7 +435,8 @@
 			tsc: tsc@40048000 {
 				compatible = "nxp,lpc3220-tsc";
 				reg = <0x40048000 0x1000>;
-				interrupts = <39 IRQ_TYPE_LEVEL_HIGH>;
+				interrupt-parent = <&sic1>;
+				interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clk LPC32XX_CLK_ADC>;
 				status = "disabled";
 			};
diff --git a/arch/arm/boot/dts/lpc4350-hitex-eval.dts b/arch/arm/boot/dts/lpc4350-hitex-eval.dts
index 022d495..6c9048d 100644
--- a/arch/arm/boot/dts/lpc4350-hitex-eval.dts
+++ b/arch/arm/boot/dts/lpc4350-hitex-eval.dts
@@ -45,50 +45,50 @@
 		poll-interval = <100>;
 		autorepeat;
 
-		button@0 {
+		button0 {
 			label = "joy:right";
 			linux,code = <KEY_RIGHT>;
 			gpios = <&pca_gpio 8 GPIO_ACTIVE_LOW>;
 		};
 
-		button@1 {
+		button1 {
 			label = "joy:up";
 			linux,code = <KEY_UP>;
 			gpios = <&pca_gpio 9 GPIO_ACTIVE_LOW>;
 		};
 
 
-		button@2 {
+		button2 {
 			label = "joy:enter";
 			linux,code = <KEY_ENTER>;
 			gpios = <&pca_gpio 10 GPIO_ACTIVE_LOW>;
 		};
 
-		button@3 {
+		button3 {
 			label = "joy:left";
 			linux,code = <KEY_LEFT>;
 			gpios = <&pca_gpio 11 GPIO_ACTIVE_LOW>;
 		};
 
-		button@4 {
+		button4 {
 			label = "joy:down";
 			linux,code = <KEY_DOWN>;
 			gpios = <&pca_gpio 12 GPIO_ACTIVE_LOW>;
 		};
 
-		button@5 {
+		button5 {
 			label = "user:sw3";
 			linux,code = <KEY_F1>;
 			gpios = <&pca_gpio 13 GPIO_ACTIVE_LOW>;
 		};
 
-		button@6 {
+		button6 {
 			label = "user:sw4";
 			linux,code = <KEY_F2>;
 			gpios = <&pca_gpio 14 GPIO_ACTIVE_LOW>;
 		};
 
-		button@7 {
+		button7 {
 			label = "user:sw5";
 			linux,code = <KEY_F3>;
 			gpios = <&pca_gpio 15 GPIO_ACTIVE_LOW>;
@@ -119,9 +119,25 @@
 			gpios = <&pca_gpio 3 GPIO_ACTIVE_LOW>;
 		};
 	};
+
+	vcc: vcc_fixed {
+		compatible = "regulator-fixed";
+		regulator-name = "3v3io";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
 };
 
 &pinctrl {
+	adc1_pins: adc1-pins {
+		adc1_pins_cfg {
+			pins = "pf_9";
+			function = "adc";
+			input-disable;
+			bias-disable;
+		};
+	};
+
 	emc_pins: emc-pins {
 		emc_addr0_23_cfg {
 			pins =	"p2_9",  "p2_10", "p2_11", "p2_12",
@@ -325,6 +341,13 @@
 	};
 };
 
+&adc1 {
+	status = "okay";
+	vref-supply = <&vcc>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&adc1_pins>;
+};
+
 &emc {
 	status = "okay";
 	pinctrl-names = "default";
@@ -430,7 +453,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&spifi_pins>;
 
-	flash@0 {
+	flash {
 		compatible = "jedec,spi-nor";
 		spi-rx-bus-width = <4>;
 		#address-cells = <1>;
diff --git a/arch/arm/boot/dts/lpc4357-ea4357-devkit.dts b/arch/arm/boot/dts/lpc4357-ea4357-devkit.dts
index 079d3cf..1919be4 100644
--- a/arch/arm/boot/dts/lpc4357-ea4357-devkit.dts
+++ b/arch/arm/boot/dts/lpc4357-ea4357-devkit.dts
@@ -38,6 +38,13 @@
 		reg = <0x28000000 0x2000000>; /* 32 MB */
 	};
 
+	vcc: vcc_fixed {
+		compatible = "regulator-fixed";
+		regulator-name = "3v3-supply";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
 	/* vmmc is controlled by sdmmc host internally */
 	vmmc: vmmc_fixed {
 		compatible = "regulator-fixed";
@@ -55,31 +62,31 @@
 		poll-interval = <100>;
 		autorepeat;
 
-		button@0 {
+		button0 {
 			label = "joy_enter";
 			linux,code = <KEY_ENTER>;
 			gpios = <&gpio LPC_GPIO(4,8) GPIO_ACTIVE_LOW>;
 		};
 
-		button@1 {
+		button1 {
 			label = "joy_left";
 			linux,code = <KEY_LEFT>;
 			gpios = <&gpio LPC_GPIO(4,9) GPIO_ACTIVE_LOW>;
 		};
 
-		button@2 {
+		button2 {
 			label = "joy_up";
 			linux,code = <KEY_UP>;
 			gpios = <&gpio LPC_GPIO(4,10) GPIO_ACTIVE_LOW>;
 		};
 
-		button@3 {
+		button3 {
 			label = "joy_right";
 			linux,code = <KEY_RIGHT>;
 			gpios = <&gpio LPC_GPIO(4,12) GPIO_ACTIVE_LOW>;
 		};
 
-		button@4 {
+		button4 {
 			label = "joy_down";
 			linux,code = <KEY_DOWN>;
 			gpios = <&gpio LPC_GPIO(4,13) GPIO_ACTIVE_LOW>;
@@ -461,6 +468,11 @@
 	};
 };
 
+&adc0 {
+	status = "okay";
+	vref-supply = <&vcc>;
+};
+
 &i2c0 {
 	status = "okay";
 	pinctrl-names = "default";
@@ -483,6 +495,11 @@
 	};
 };
 
+&dac {
+	status = "okay";
+	vref-supply = <&vcc>;
+};
+
 &emc {
 	status = "okay";
 	pinctrl-names = "default";
@@ -567,7 +584,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&spifi_pins>;
 
-	flash@0 {
+	flash {
 		compatible = "jedec,spi-nor";
 		spi-cpol;
 		spi-cpha;
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 726372d..5ae8e92 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -119,6 +119,20 @@
 
 		};
 
+		msi1: msi-controller@1570e00 {
+			compatible = "fsl,1s1021a-msi";
+			reg = <0x0 0x1570e00 0x0 0x8>;
+			msi-controller;
+			interrupts =  <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		msi2: msi-controller@1570e08 {
+			compatible = "fsl,1s1021a-msi";
+			reg = <0x0 0x1570e08 0x0 0x8>;
+			msi-controller;
+			interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
 		ifc: ifc@1530000 {
 			compatible = "fsl,ifc", "simple-bus";
 			reg = <0x0 0x1530000 0x0 0x10000>;
@@ -245,7 +259,7 @@
 			interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
 			clock-names = "dspi";
 			clocks = <&platform_clk 1>;
-			spi-num-chipselects = <5>;
+			spi-num-chipselects = <6>;
 			big-endian;
 			status = "disabled";
 		};
@@ -258,7 +272,7 @@
 			interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
 			clock-names = "dspi";
 			clocks = <&platform_clk 1>;
-			spi-num-chipselects = <5>;
+			spi-num-chipselects = <6>;
 			big-endian;
 			status = "disabled";
 		};
@@ -332,6 +346,46 @@
 			status = "disabled";
 		};
 
+		gpio0: gpio@2300000 {
+			compatible = "fsl,ls1021a-gpio", "fsl,qoriq-gpio";
+			reg = <0x0 0x2300000 0x0 0x10000>;
+			interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+
+		gpio1: gpio@2310000 {
+			compatible = "fsl,ls1021a-gpio", "fsl,qoriq-gpio";
+			reg = <0x0 0x2310000 0x0 0x10000>;
+			interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+
+		gpio2: gpio@2320000 {
+			compatible = "fsl,ls1021a-gpio", "fsl,qoriq-gpio";
+			reg = <0x0 0x2320000 0x0 0x10000>;
+			interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+
+		gpio3: gpio@2330000 {
+			compatible = "fsl,ls1021a-gpio", "fsl,qoriq-gpio";
+			reg = <0x0 0x2330000 0x0 0x10000>;
+			interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+
 		lpuart0: serial@2950000 {
 			compatible = "fsl,ls1021a-lpuart";
 			reg = <0x0 0x2950000 0x0 0x1000>;
@@ -443,8 +497,9 @@
 			compatible = "fsl,ls1021a-dcu";
 			reg = <0x0 0x2ce0000 0x0 0x10000>;
 			interrupts = <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&platform_clk 0>;
-			clock-names = "dcu";
+			clocks = <&platform_clk 0>,
+				<&platform_clk 0>;
+			clock-names = "dcu", "pix";
 			big-endian;
 			status = "disabled";
 		};
@@ -587,6 +642,7 @@
 			bus-range = <0x0 0xff>;
 			ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000   /* downstream I/O */
 				  0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+			msi-parent = <&msi1>;
 			#interrupt-cells = <1>;
 			interrupt-map-mask = <0 0 0 7>;
 			interrupt-map = <0000 0 0 1 &gic GIC_SPI 91  IRQ_TYPE_LEVEL_HIGH>,
@@ -609,6 +665,7 @@
 			bus-range = <0x0 0xff>;
 			ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000   /* downstream I/O */
 				  0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+			msi-parent = <&msi2>;
 			#interrupt-cells = <1>;
 			interrupt-map-mask = <0 0 0 7>;
 			interrupt-map = <0000 0 0 1 &gic GIC_SPI 92  IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
index a2ddcb8..45619f6 100644
--- a/arch/arm/boot/dts/meson8.dtsi
+++ b/arch/arm/boot/dts/meson8.dtsi
@@ -91,8 +91,8 @@
 		clock-frequency = <141666666>;
 	};
 
-	pinctrl: pinctrl@c1109880 {
-		compatible = "amlogic,meson8-pinctrl";
+	pinctrl_cbus: pinctrl@c1109880 {
+		compatible = "amlogic,meson8-cbus-pinctrl";
 		reg = <0xc1109880 0x10>;
 		#address-cells = <1>;
 		#size-cells = <1>;
@@ -108,29 +108,6 @@
 			#gpio-cells = <2>;
 		};
 
-		gpio_ao: ao-bank@c1108030 {
-			reg = <0xc8100014 0x4>,
-			      <0xc810002c 0x4>,
-			      <0xc8100024 0x8>;
-			reg-names = "mux", "pull", "gpio";
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		uart_ao_a_pins: uart_ao_a {
-			mux {
-				groups = "uart_tx_ao_a", "uart_rx_ao_a";
-				function = "uart_ao";
-			};
-		};
-
-		i2c_ao_pins: i2c_mst_ao {
-			mux {
-				groups = "i2c_mst_sck_ao", "i2c_mst_sda_ao";
-				function = "i2c_mst_ao";
-			};
-		};
-
 		spi_nor_pins: nor {
 			mux {
 				groups = "nor_d", "nor_q", "nor_c", "nor_cs";
@@ -157,4 +134,34 @@
 		};
 	};
 
+	pinctrl_aobus: pinctrl@c8100084 {
+		compatible = "amlogic,meson8-aobus-pinctrl";
+		reg = <0xc8100084 0xc>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		gpio_ao: ao-bank@c1108030 {
+			reg = <0xc8100014 0x4>,
+			      <0xc810002c 0x4>,
+			      <0xc8100024 0x8>;
+			reg-names = "mux", "pull", "gpio";
+			gpio-controller;
+			#gpio-cells = <2>;
+		};
+
+		uart_ao_a_pins: uart_ao_a {
+			mux {
+				groups = "uart_tx_ao_a", "uart_rx_ao_a";
+				function = "uart_ao";
+			};
+		};
+
+		i2c_ao_pins: i2c_mst_ao {
+			mux {
+				groups = "i2c_mst_sck_ao", "i2c_mst_sda_ao";
+				function = "i2c_mst_ao";
+			};
+		};
+	};
 }; /* end of / */
diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi
index 8bad557..2bfe401 100644
--- a/arch/arm/boot/dts/meson8b.dtsi
+++ b/arch/arm/boot/dts/meson8b.dtsi
@@ -155,8 +155,8 @@
 			reg = <0xc1108000 0x4>, <0xc1104000 0x460>;
 		};
 
-		pinctrl: pinctrl@c1109880 {
-			compatible = "amlogic,meson8b-pinctrl";
+		pinctrl_cbus: pinctrl@c1109880 {
+			compatible = "amlogic,meson8b-cbus-pinctrl";
 			reg = <0xc1109880 0x10>;
 			#address-cells = <1>;
 			#size-cells = <1>;
@@ -171,6 +171,14 @@
 				gpio-controller;
 				#gpio-cells = <2>;
 			};
+		};
+
+		pinctrl_aobus: pinctrl@c8100084 {
+			compatible = "amlogic,meson8b-aobus-pinctrl";
+			reg = <0xc8100084 0xc>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
 
 			gpio_ao: ao-bank@c1108030 {
 				reg = <0xc8100014 0x4>,
diff --git a/arch/arm/boot/dts/mps2-an385.dts b/arch/arm/boot/dts/mps2-an385.dts
new file mode 100644
index 0000000..31c374d
--- /dev/null
+++ b/arch/arm/boot/dts/mps2-an385.dts
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2015 ARM Limited
+ *
+ * Author: Vladimir Murzin <vladimir.murzin@arm.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "mps2.dtsi"
+
+/ {
+	model = "ARM MPS2 Application Note 385/386";
+	compatible = "arm,mps2";
+
+	aliases {
+		serial0 = &uart0;
+	};
+
+	chosen {
+		bootargs = "earlycon";
+		stdout-path = "serial0:9600n8";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x21000000 0x1000000>;
+	};
+
+	smb {
+		ethernet@0,0 {
+			compatible = "smsc,lan9220", "smsc,lan9115";
+			reg = <0 0x0 0x10000>;
+			interrupts = <13>;
+			interrupt-parent = <&nvic>;
+			smsc,irq-active-high;
+		};
+	};
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&timer0 {
+	status = "okay";
+};
+
+&timer1 {
+	status = "okay";
+};
+
+&wdt {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/mps2-an399.dts b/arch/arm/boot/dts/mps2-an399.dts
new file mode 100644
index 0000000..5e7e5ca
--- /dev/null
+++ b/arch/arm/boot/dts/mps2-an399.dts
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2015 ARM Limited
+ *
+ * Author: Vladimir Murzin <vladimir.murzin@arm.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "mps2.dtsi"
+
+/ {
+	model = "ARM MPS2 Application Note 399/400";
+	compatible = "arm,mps2";
+
+	aliases {
+		serial0 = &uart0;
+	};
+
+	chosen {
+		bootargs = "earlycon";
+		stdout-path = "serial0:9600n8";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x60000000 0x1000000>;
+	};
+
+	smb {
+		ethernet@1,0 {
+			compatible = "smsc,lan9220", "smsc,lan9115";
+			reg = <1 0x0 0x10000>;
+			interrupts = <13>;
+			interrupt-parent = <&nvic>;
+			smsc,irq-active-high;
+		};
+	};
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&timer0 {
+	status = "okay";
+};
+
+&timer1 {
+	status = "okay";
+};
+
+&wdt {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/mps2.dtsi b/arch/arm/boot/dts/mps2.dtsi
new file mode 100644
index 0000000..e3fed8d
--- /dev/null
+++ b/arch/arm/boot/dts/mps2.dtsi
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2015 ARM Limited
+ *
+ * Author: Vladimir Murzin <vladimir.murzin@arm.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "armv7-m.dtsi"
+
+/ {
+	oscclk0: clk-osc0 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <50000000>;
+	};
+
+	oscclk1: clk-osc1 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <24576000>;
+	};
+
+	oscclk2: clk-osc2 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <25000000>;
+	};
+
+	cfgclk: clk-cfg {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <5000000>;
+	};
+
+	spicfgclk: clk-spicfg {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <75000000>;
+	};
+
+	sysclk: clk-sys {
+		compatible = "fixed-factor-clock";
+		clocks = <&oscclk0>;
+		#clock-cells = <0>;
+		clock-div = <2>;
+		clock-mult = <1>;
+	};
+
+	audmclk: clk-audm {
+		compatible = "fixed-factor-clock";
+		clocks = <&oscclk1>;
+		#clock-cells = <0>;
+		clock-div = <2>;
+		clock-mult = <1>;
+	};
+
+	audsclk: clk-auds {
+		compatible = "fixed-factor-clock";
+		clocks = <&oscclk1>;
+		#clock-cells = <0>;
+		clock-div = <8>;
+		clock-mult = <1>;
+	};
+
+	spiclcd: clk-cpiclcd {
+		compatible = "fixed-factor-clock";
+		clocks = <&oscclk0>;
+		#clock-cells = <0>;
+		clock-div = <2>;
+		clock-mult = <1>;
+	};
+
+	spicon: clk-spicon {
+		compatible = "fixed-factor-clock";
+		clocks = <&oscclk0>;
+		#clock-cells = <0>;
+		clock-div = <2>;
+		clock-mult = <1>;
+	};
+
+	i2cclcd: clk-i2cclcd {
+		compatible = "fixed-factor-clock";
+		clocks = <&oscclk0>;
+		#clock-cells = <0>;
+		clock-div = <2>;
+		clock-mult = <1>;
+	};
+
+	i2caud: clk-i2caud {
+		compatible = "fixed-factor-clock";
+		clocks = <&oscclk0>;
+		#clock-cells = <0>;
+		clock-div = <2>;
+		clock-mult = <1>;
+	};
+
+	soc {
+		compatible = "simple-bus";
+		ranges;
+
+		apb@40000000 {
+			compatible = "simple-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0x40000000 0x10000>;
+
+			timer0: mps2-timer0@0 {
+				compatible = "arm,mps2-timer";
+				reg = <0x0 0x1000>;
+				interrupts = <8>;
+				clocks = <&sysclk>;
+				status = "disabled";
+			};
+
+			timer1: mps2-timer1@1000 {
+				compatible = "arm,mps2-timer";
+				reg = <0x1000 0x1000>;
+				interrupts = <9>;
+				clocks = <&sysclk>;
+				status = "disabled";
+			};
+
+			timer2: dual-timer@2000 {
+				compatible = "arm,sp804";
+				reg = <0x2000 0x1000>;
+				clocks = <&sysclk>;
+				interrupts = <10>;
+				status = "disabled";
+			};
+
+			uart0: serial@4000 {
+				compatible = "arm,mps2-uart";
+				reg = <0x4000 0x1000>;
+				interrupts = <0 1 12>;
+				clocks = <&sysclk>;
+				status = "disabled";
+			};
+
+			uart1: serial@5000 {
+				compatible = "arm,mps2-uart";
+				reg = <0x5000 0x1000>;
+				interrupts = <2 3 12>;
+				clocks = <&sysclk>;
+				status = "disabled";
+			};
+
+			uart2: serial@6000 {
+				compatible = "arm,mps2-uart";
+				reg = <0x6000 0x1000>;
+				interrupts = <4 5 12>;
+				clocks = <&sysclk>;
+				status = "disabled";
+			};
+
+			wdt: watchdog@8000 {
+				compatible = "arm,sp805", "arm,primecell";
+				arm,primecell-periphid = <0x00141805>;
+				reg = <0x8000 0x1000>;
+				interrupts = <0>;
+				clocks = <&sysclk>;
+				clock-names = "apb_pclk";
+				status = "disabled";
+			};
+		};
+	};
+
+	fpga@40020000 {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0 0x40020000 0x10000>;
+
+		fpgaio@8000 {
+			compatible = "syscon", "simple-mfd";
+			reg = <0x8000 0x10>;
+
+			led0 {
+				compatible = "register-bit-led";
+				offset = <0x0>;
+				mask = <0x01>;
+				label = "userled:0";
+				linux,default-trigger = "heartbeat";
+				default-state = "on";
+			};
+
+			led1 {
+				compatible = "register-bit-led";
+				offset = <0x0>;
+				mask = <0x02>;
+				label = "userled:1";
+				linux,default-trigger = "usr";
+				default-state = "off";
+			};
+		};
+	};
+
+	smb {
+		compatible = "simple-bus";
+		#address-cells = <2>;
+		#size-cells = <1>;
+		ranges = <0 0 0x40200000 0x10000>,
+			 <1 0 0xa0000000 0x10000>;
+	};
+};
diff --git a/arch/arm/boot/dts/mt2701.dtsi b/arch/arm/boot/dts/mt2701.dtsi
index 8343768..18596a2 100644
--- a/arch/arm/boot/dts/mt2701.dtsi
+++ b/arch/arm/boot/dts/mt2701.dtsi
@@ -15,6 +15,7 @@
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include "skeleton64.dtsi"
+#include "mt2701-pinfunc.h"
 
 / {
 	compatible = "mediatek,mt2701";
@@ -85,6 +86,24 @@
 			     <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
 	};
 
+	pio: pinctrl@10005000 {
+		compatible = "mediatek,mt2701-pinctrl";
+		reg = <0 0x1000b000 0 0x1000>;
+		mediatek,pctl-regmap = <&syscfg_pctl_a>;
+		pins-are-numbered;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+		interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	syscfg_pctl_a: syscfg@10005000 {
+		compatible = "mediatek,mt2701-pctl-a-syscfg", "syscon";
+		reg = <0 0x10005000 0 0x1000>;
+	};
+
 	watchdog: watchdog@10007000 {
 		compatible = "mediatek,mt2701-wdt",
 			     "mediatek,mt6589-wdt";
diff --git a/arch/arm/boot/dts/omap2420-clocks.dtsi b/arch/arm/boot/dts/omap2420-clocks.dtsi
index ce8c742..f8e5bd3 100644
--- a/arch/arm/boot/dts/omap2420-clocks.dtsi
+++ b/arch/arm/boot/dts/omap2420-clocks.dtsi
@@ -9,7 +9,7 @@
  */
 
 &prcm_clocks {
-	sys_clkout2_src_gate: sys_clkout2_src_gate {
+	sys_clkout2_src_gate: sys_clkout2_src_gate@70 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-no-wait-gate-clock";
 		clocks = <&core_ck>;
@@ -17,7 +17,7 @@
 		reg = <0x0070>;
 	};
 
-	sys_clkout2_src_mux: sys_clkout2_src_mux {
+	sys_clkout2_src_mux: sys_clkout2_src_mux@70 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&core_ck>, <&sys_ck>, <&func_96m_ck>, <&func_54m_ck>;
@@ -31,7 +31,7 @@
 		clocks = <&sys_clkout2_src_gate>, <&sys_clkout2_src_mux>;
 	};
 
-	sys_clkout2: sys_clkout2 {
+	sys_clkout2: sys_clkout2@70 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&sys_clkout2_src>;
@@ -41,7 +41,7 @@
 		ti,index-power-of-two;
 	};
 
-	dsp_gate_ick: dsp_gate_ick {
+	dsp_gate_ick: dsp_gate_ick@810 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-interface-clock";
 		clocks = <&dsp_fck>;
@@ -49,7 +49,7 @@
 		reg = <0x0810>;
 	};
 
-	dsp_div_ick: dsp_div_ick {
+	dsp_div_ick: dsp_div_ick@840 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-divider-clock";
 		clocks = <&dsp_fck>;
@@ -65,7 +65,7 @@
 		clocks = <&dsp_gate_ick>, <&dsp_div_ick>;
 	};
 
-	iva1_gate_ifck: iva1_gate_ifck {
+	iva1_gate_ifck: iva1_gate_ifck@800 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&core_ck>;
@@ -73,7 +73,7 @@
 		reg = <0x0800>;
 	};
 
-	iva1_div_ifck: iva1_div_ifck {
+	iva1_div_ifck: iva1_div_ifck@840 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-divider-clock";
 		clocks = <&core_ck>;
@@ -96,7 +96,7 @@
 		clock-div = <2>;
 	};
 
-	iva1_mpu_int_ifck: iva1_mpu_int_ifck {
+	iva1_mpu_int_ifck: iva1_mpu_int_ifck@800 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&iva1_ifck_div>;
@@ -104,7 +104,7 @@
 		reg = <0x0800>;
 	};
 
-	wdt3_ick: wdt3_ick {
+	wdt3_ick: wdt3_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -112,7 +112,7 @@
 		reg = <0x0210>;
 	};
 
-	wdt3_fck: wdt3_fck {
+	wdt3_fck: wdt3_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&func_32k_ck>;
@@ -120,7 +120,7 @@
 		reg = <0x0200>;
 	};
 
-	mmc_ick: mmc_ick {
+	mmc_ick: mmc_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -128,7 +128,7 @@
 		reg = <0x0210>;
 	};
 
-	mmc_fck: mmc_fck {
+	mmc_fck: mmc_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&func_96m_ck>;
@@ -136,7 +136,7 @@
 		reg = <0x0200>;
 	};
 
-	eac_ick: eac_ick {
+	eac_ick: eac_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -144,7 +144,7 @@
 		reg = <0x0210>;
 	};
 
-	eac_fck: eac_fck {
+	eac_fck: eac_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&func_96m_ck>;
@@ -152,7 +152,7 @@
 		reg = <0x0200>;
 	};
 
-	i2c1_fck: i2c1_fck {
+	i2c1_fck: i2c1_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&func_12m_ck>;
@@ -160,7 +160,7 @@
 		reg = <0x0200>;
 	};
 
-	i2c2_fck: i2c2_fck {
+	i2c2_fck: i2c2_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&func_12m_ck>;
@@ -168,7 +168,7 @@
 		reg = <0x0200>;
 	};
 
-	vlynq_ick: vlynq_ick {
+	vlynq_ick: vlynq_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l3_ck>;
@@ -176,7 +176,7 @@
 		reg = <0x0210>;
 	};
 
-	vlynq_gate_fck: vlynq_gate_fck {
+	vlynq_gate_fck: vlynq_gate_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&core_ck>;
@@ -192,7 +192,7 @@
 		clock-div = <18>;
 	};
 
-	vlynq_mux_fck: vlynq_mux_fck {
+	vlynq_mux_fck: vlynq_mux_fck@240 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&func_96m_ck>, <&core_ck>, <&core_d2_ck>, <&core_d3_ck>, <&core_d4_ck>, <&dummy_ck>, <&core_d6_ck>, <&dummy_ck>, <&core_d8_ck>, <&core_d9_ck>, <&dummy_ck>, <&dummy_ck>, <&core_d12_ck>, <&dummy_ck>, <&dummy_ck>, <&dummy_ck>, <&core_d16_ck>, <&dummy_ck>, <&core_d18_ck>;
diff --git a/arch/arm/boot/dts/omap2420-n8x0-common.dtsi b/arch/arm/boot/dts/omap2420-n8x0-common.dtsi
index 8491f46..db95aad 100644
--- a/arch/arm/boot/dts/omap2420-n8x0-common.dtsi
+++ b/arch/arm/boot/dts/omap2420-n8x0-common.dtsi
@@ -7,7 +7,7 @@
 	};
 
 	ocp {
-		i2c@0 {
+		i2c0 {
 			compatible = "i2c-cbus-gpio";
 			gpios = <&gpio3 2 GPIO_ACTIVE_HIGH /* gpio66 clk */
 				 &gpio3 1 GPIO_ACTIVE_HIGH /* gpio65 dat */
diff --git a/arch/arm/boot/dts/omap2420.dtsi b/arch/arm/boot/dts/omap2420.dtsi
index 5b9a376..fb712b9 100644
--- a/arch/arm/boot/dts/omap2420.dtsi
+++ b/arch/arm/boot/dts/omap2420.dtsi
@@ -130,6 +130,10 @@
 			gpmc,num-cs = <8>;
 			gpmc,num-waitpins = <4>;
 			ti,hwmods = "gpmc";
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			gpio-controller;
+			#gpio-cells = <2>;
 		};
 
 		mcbsp1: mcbsp@48074000 {
diff --git a/arch/arm/boot/dts/omap2430-clocks.dtsi b/arch/arm/boot/dts/omap2430-clocks.dtsi
index 93fed68..a5aa7d6 100644
--- a/arch/arm/boot/dts/omap2430-clocks.dtsi
+++ b/arch/arm/boot/dts/omap2430-clocks.dtsi
@@ -9,7 +9,7 @@
  */
 
 &scm_clocks {
-	mcbsp3_mux_fck: mcbsp3_mux_fck {
+	mcbsp3_mux_fck: mcbsp3_mux_fck@78 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&func_96m_ck>, <&mcbsp_clks>;
@@ -22,7 +22,7 @@
 		clocks = <&mcbsp3_gate_fck>, <&mcbsp3_mux_fck>;
 	};
 
-	mcbsp4_mux_fck: mcbsp4_mux_fck {
+	mcbsp4_mux_fck: mcbsp4_mux_fck@78 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&func_96m_ck>, <&mcbsp_clks>;
@@ -36,7 +36,7 @@
 		clocks = <&mcbsp4_gate_fck>, <&mcbsp4_mux_fck>;
 	};
 
-	mcbsp5_mux_fck: mcbsp5_mux_fck {
+	mcbsp5_mux_fck: mcbsp5_mux_fck@78 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&func_96m_ck>, <&mcbsp_clks>;
@@ -52,7 +52,7 @@
 };
 
 &prcm_clocks {
-	iva2_1_gate_ick: iva2_1_gate_ick {
+	iva2_1_gate_ick: iva2_1_gate_ick@800 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&dsp_fck>;
@@ -60,7 +60,7 @@
 		reg = <0x0800>;
 	};
 
-	iva2_1_div_ick: iva2_1_div_ick {
+	iva2_1_div_ick: iva2_1_div_ick@840 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-divider-clock";
 		clocks = <&dsp_fck>;
@@ -76,7 +76,7 @@
 		clocks = <&iva2_1_gate_ick>, <&iva2_1_div_ick>;
 	};
 
-	mdm_gate_ick: mdm_gate_ick {
+	mdm_gate_ick: mdm_gate_ick@c10 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-interface-clock";
 		clocks = <&core_ck>;
@@ -84,7 +84,7 @@
 		reg = <0x0c10>;
 	};
 
-	mdm_div_ick: mdm_div_ick {
+	mdm_div_ick: mdm_div_ick@c40 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-divider-clock";
 		clocks = <&core_ck>;
@@ -98,7 +98,7 @@
 		clocks = <&mdm_gate_ick>, <&mdm_div_ick>;
 	};
 
-	mdm_osc_ck: mdm_osc_ck {
+	mdm_osc_ck: mdm_osc_ck@c00 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&osc_ck>;
@@ -106,7 +106,7 @@
 		reg = <0x0c00>;
 	};
 
-	mcbsp3_ick: mcbsp3_ick {
+	mcbsp3_ick: mcbsp3_ick@214 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -114,7 +114,7 @@
 		reg = <0x0214>;
 	};
 
-	mcbsp3_gate_fck: mcbsp3_gate_fck {
+	mcbsp3_gate_fck: mcbsp3_gate_fck@204 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&mcbsp_clks>;
@@ -122,7 +122,7 @@
 		reg = <0x0204>;
 	};
 
-	mcbsp4_ick: mcbsp4_ick {
+	mcbsp4_ick: mcbsp4_ick@214 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -130,7 +130,7 @@
 		reg = <0x0214>;
 	};
 
-	mcbsp4_gate_fck: mcbsp4_gate_fck {
+	mcbsp4_gate_fck: mcbsp4_gate_fck@204 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&mcbsp_clks>;
@@ -138,7 +138,7 @@
 		reg = <0x0204>;
 	};
 
-	mcbsp5_ick: mcbsp5_ick {
+	mcbsp5_ick: mcbsp5_ick@214 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -146,7 +146,7 @@
 		reg = <0x0214>;
 	};
 
-	mcbsp5_gate_fck: mcbsp5_gate_fck {
+	mcbsp5_gate_fck: mcbsp5_gate_fck@204 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&mcbsp_clks>;
@@ -154,7 +154,7 @@
 		reg = <0x0204>;
 	};
 
-	mcspi3_ick: mcspi3_ick {
+	mcspi3_ick: mcspi3_ick@214 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -162,7 +162,7 @@
 		reg = <0x0214>;
 	};
 
-	mcspi3_fck: mcspi3_fck {
+	mcspi3_fck: mcspi3_fck@204 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&func_48m_ck>;
@@ -170,7 +170,7 @@
 		reg = <0x0204>;
 	};
 
-	icr_ick: icr_ick {
+	icr_ick: icr_ick@410 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&sys_ck>;
@@ -178,7 +178,7 @@
 		reg = <0x0410>;
 	};
 
-	i2chs1_fck: i2chs1_fck {
+	i2chs1_fck: i2chs1_fck@204 {
 		#clock-cells = <0>;
 		compatible = "ti,omap2430-interface-clock";
 		clocks = <&func_96m_ck>;
@@ -186,7 +186,7 @@
 		reg = <0x0204>;
 	};
 
-	i2chs2_fck: i2chs2_fck {
+	i2chs2_fck: i2chs2_fck@204 {
 		#clock-cells = <0>;
 		compatible = "ti,omap2430-interface-clock";
 		clocks = <&func_96m_ck>;
@@ -194,7 +194,7 @@
 		reg = <0x0204>;
 	};
 
-	usbhs_ick: usbhs_ick {
+	usbhs_ick: usbhs_ick@214 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l3_ck>;
@@ -202,7 +202,7 @@
 		reg = <0x0214>;
 	};
 
-	mmchs1_ick: mmchs1_ick {
+	mmchs1_ick: mmchs1_ick@214 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -210,7 +210,7 @@
 		reg = <0x0214>;
 	};
 
-	mmchs1_fck: mmchs1_fck {
+	mmchs1_fck: mmchs1_fck@204 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&func_96m_ck>;
@@ -218,7 +218,7 @@
 		reg = <0x0204>;
 	};
 
-	mmchs2_ick: mmchs2_ick {
+	mmchs2_ick: mmchs2_ick@214 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -226,7 +226,7 @@
 		reg = <0x0214>;
 	};
 
-	mmchs2_fck: mmchs2_fck {
+	mmchs2_fck: mmchs2_fck@204 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&func_96m_ck>;
@@ -234,7 +234,7 @@
 		reg = <0x0204>;
 	};
 
-	gpio5_ick: gpio5_ick {
+	gpio5_ick: gpio5_ick@214 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -242,7 +242,7 @@
 		reg = <0x0214>;
 	};
 
-	gpio5_fck: gpio5_fck {
+	gpio5_fck: gpio5_fck@204 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&func_32k_ck>;
@@ -250,7 +250,7 @@
 		reg = <0x0204>;
 	};
 
-	mdm_intc_ick: mdm_intc_ick {
+	mdm_intc_ick: mdm_intc_ick@214 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -258,7 +258,7 @@
 		reg = <0x0214>;
 	};
 
-	mmchsdb1_fck: mmchsdb1_fck {
+	mmchsdb1_fck: mmchsdb1_fck@204 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&func_32k_ck>;
@@ -266,7 +266,7 @@
 		reg = <0x0204>;
 	};
 
-	mmchsdb2_fck: mmchsdb2_fck {
+	mmchsdb2_fck: mmchsdb2_fck@204 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&func_32k_ck>;
diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi
index 798dda0..455aaea 100644
--- a/arch/arm/boot/dts/omap2430.dtsi
+++ b/arch/arm/boot/dts/omap2430.dtsi
@@ -63,7 +63,7 @@
 						#size-cells = <0>;
 					};
 
-					pbias_regulator: pbias_regulator {
+					pbias_regulator: pbias_regulator@230 {
 						compatible = "ti,pbias-omap2", "ti,pbias-omap";
 						reg = <0x230 0x4>;
 						syscon = <&scm_conf>;
@@ -154,6 +154,10 @@
 			gpmc,num-cs = <8>;
 			gpmc,num-waitpins = <4>;
 			ti,hwmods = "gpmc";
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			gpio-controller;
+			#gpio-cells = <2>;
 		};
 
 		mcbsp1: mcbsp@48074000 {
diff --git a/arch/arm/boot/dts/omap24xx-clocks.dtsi b/arch/arm/boot/dts/omap24xx-clocks.dtsi
index 63965b8..ca73722 100644
--- a/arch/arm/boot/dts/omap24xx-clocks.dtsi
+++ b/arch/arm/boot/dts/omap24xx-clocks.dtsi
@@ -8,7 +8,7 @@
  * published by the Free Software Foundation.
  */
 &scm_clocks {
-	mcbsp1_mux_fck: mcbsp1_mux_fck {
+	mcbsp1_mux_fck: mcbsp1_mux_fck@4 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&func_96m_ck>, <&mcbsp_clks>;
@@ -22,7 +22,7 @@
 		clocks = <&mcbsp1_gate_fck>, <&mcbsp1_mux_fck>;
 	};
 
-	mcbsp2_mux_fck: mcbsp2_mux_fck {
+	mcbsp2_mux_fck: mcbsp2_mux_fck@4 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&func_96m_ck>, <&mcbsp_clks>;
@@ -74,7 +74,7 @@
 		clock-frequency = <26000000>;
 	};
 
-	aplls_clkin_ck: aplls_clkin_ck {
+	aplls_clkin_ck: aplls_clkin_ck@540 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&virt_19200000_ck>, <&virt_26m_ck>, <&virt_13m_ck>, <&virt_12m_ck>;
@@ -90,7 +90,7 @@
 		clock-div = <1>;
 	};
 
-	osc_ck: osc_ck {
+	osc_ck: osc_ck@60 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&aplls_clkin_ck>, <&aplls_clkin_x2_ck>;
@@ -99,7 +99,7 @@
 		ti,index-starts-at-one;
 	};
 
-	sys_ck: sys_ck {
+	sys_ck: sys_ck@60 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&osc_ck>;
@@ -121,14 +121,14 @@
 		clock-frequency = <0x0>;
 	};
 
-	dpll_ck: dpll_ck {
+	dpll_ck: dpll_ck@500 {
 		#clock-cells = <0>;
 		compatible = "ti,omap2-dpll-core-clock";
 		clocks = <&sys_ck>, <&sys_ck>;
 		reg = <0x0500>, <0x0540>;
 	};
 
-	apll96_ck: apll96_ck {
+	apll96_ck: apll96_ck@500 {
 		#clock-cells = <0>;
 		compatible = "ti,omap2-apll-clock";
 		clocks = <&sys_ck>;
@@ -138,7 +138,7 @@
 		reg = <0x0500>, <0x0530>, <0x0520>;
 	};
 
-	apll54_ck: apll54_ck {
+	apll54_ck: apll54_ck@500 {
 		#clock-cells = <0>;
 		compatible = "ti,omap2-apll-clock";
 		clocks = <&sys_ck>;
@@ -148,7 +148,7 @@
 		reg = <0x0500>, <0x0530>, <0x0520>;
 	};
 
-	func_54m_ck: func_54m_ck {
+	func_54m_ck: func_54m_ck@540 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&apll54_ck>, <&alt_ck>;
@@ -176,7 +176,7 @@
 		clock-div = <2>;
 	};
 
-	func_48m_ck: func_48m_ck {
+	func_48m_ck: func_48m_ck@540 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&apll96_d2_ck>, <&alt_ck>;
@@ -192,7 +192,7 @@
 		clock-div = <4>;
 	};
 
-	sys_clkout_src_gate: sys_clkout_src_gate {
+	sys_clkout_src_gate: sys_clkout_src_gate@70 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-no-wait-gate-clock";
 		clocks = <&core_ck>;
@@ -200,7 +200,7 @@
 		reg = <0x0070>;
 	};
 
-	sys_clkout_src_mux: sys_clkout_src_mux {
+	sys_clkout_src_mux: sys_clkout_src_mux@70 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&core_ck>, <&sys_ck>, <&func_96m_ck>, <&func_54m_ck>;
@@ -213,7 +213,7 @@
 		clocks = <&sys_clkout_src_gate>, <&sys_clkout_src_mux>;
 	};
 
-	sys_clkout: sys_clkout {
+	sys_clkout: sys_clkout@70 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&sys_clkout_src>;
@@ -223,7 +223,7 @@
 		ti,index-power-of-two;
 	};
 
-	emul_ck: emul_ck {
+	emul_ck: emul_ck@78 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&func_54m_ck>;
@@ -231,7 +231,7 @@
 		reg = <0x0078>;
 	};
 
-	mpu_ck: mpu_ck {
+	mpu_ck: mpu_ck@140 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&core_ck>;
@@ -240,7 +240,7 @@
 		ti,index-starts-at-one;
 	};
 
-	dsp_gate_fck: dsp_gate_fck {
+	dsp_gate_fck: dsp_gate_fck@800 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&core_ck>;
@@ -248,7 +248,7 @@
 		reg = <0x0800>;
 	};
 
-	dsp_div_fck: dsp_div_fck {
+	dsp_div_fck: dsp_div_fck@840 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-divider-clock";
 		clocks = <&core_ck>;
@@ -261,7 +261,7 @@
 		clocks = <&dsp_gate_fck>, <&dsp_div_fck>;
 	};
 
-	core_l3_ck: core_l3_ck {
+	core_l3_ck: core_l3_ck@240 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&core_ck>;
@@ -270,7 +270,7 @@
 		ti,index-starts-at-one;
 	};
 
-	gfx_3d_gate_fck: gfx_3d_gate_fck {
+	gfx_3d_gate_fck: gfx_3d_gate_fck@300 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&core_l3_ck>;
@@ -278,7 +278,7 @@
 		reg = <0x0300>;
 	};
 
-	gfx_3d_div_fck: gfx_3d_div_fck {
+	gfx_3d_div_fck: gfx_3d_div_fck@340 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-divider-clock";
 		clocks = <&core_l3_ck>;
@@ -293,7 +293,7 @@
 		clocks = <&gfx_3d_gate_fck>, <&gfx_3d_div_fck>;
 	};
 
-	gfx_2d_gate_fck: gfx_2d_gate_fck {
+	gfx_2d_gate_fck: gfx_2d_gate_fck@300 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&core_l3_ck>;
@@ -301,7 +301,7 @@
 		reg = <0x0300>;
 	};
 
-	gfx_2d_div_fck: gfx_2d_div_fck {
+	gfx_2d_div_fck: gfx_2d_div_fck@340 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-divider-clock";
 		clocks = <&core_l3_ck>;
@@ -316,7 +316,7 @@
 		clocks = <&gfx_2d_gate_fck>, <&gfx_2d_div_fck>;
 	};
 
-	gfx_ick: gfx_ick {
+	gfx_ick: gfx_ick@310 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&core_l3_ck>;
@@ -324,7 +324,7 @@
 		reg = <0x0310>;
 	};
 
-	l4_ck: l4_ck {
+	l4_ck: l4_ck@240 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&core_l3_ck>;
@@ -334,7 +334,7 @@
 		ti,index-starts-at-one;
 	};
 
-	dss_ick: dss_ick {
+	dss_ick: dss_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-no-wait-interface-clock";
 		clocks = <&l4_ck>;
@@ -342,7 +342,7 @@
 		reg = <0x0210>;
 	};
 
-	dss1_gate_fck: dss1_gate_fck {
+	dss1_gate_fck: dss1_gate_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-no-wait-gate-clock";
 		clocks = <&core_ck>;
@@ -428,7 +428,7 @@
 		clock-div = <16>;
 	};
 
-	dss1_mux_fck: dss1_mux_fck {
+	dss1_mux_fck: dss1_mux_fck@240 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&sys_ck>, <&core_ck>, <&core_d2_ck>, <&core_d3_ck>, <&core_d4_ck>, <&core_d5_ck>, <&core_d6_ck>, <&core_d8_ck>, <&core_d9_ck>, <&core_d12_ck>, <&core_d16_ck>;
@@ -442,7 +442,7 @@
 		clocks = <&dss1_gate_fck>, <&dss1_mux_fck>;
 	};
 
-	dss2_gate_fck: dss2_gate_fck {
+	dss2_gate_fck: dss2_gate_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-no-wait-gate-clock";
 		clocks = <&func_48m_ck>;
@@ -450,7 +450,7 @@
 		reg = <0x0200>;
 	};
 
-	dss2_mux_fck: dss2_mux_fck {
+	dss2_mux_fck: dss2_mux_fck@240 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&sys_ck>, <&func_48m_ck>;
@@ -464,7 +464,7 @@
 		clocks = <&dss2_gate_fck>, <&dss2_mux_fck>;
 	};
 
-	dss_54m_fck: dss_54m_fck {
+	dss_54m_fck: dss_54m_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&func_54m_ck>;
@@ -472,7 +472,7 @@
 		reg = <0x0200>;
 	};
 
-	ssi_ssr_sst_gate_fck: ssi_ssr_sst_gate_fck {
+	ssi_ssr_sst_gate_fck: ssi_ssr_sst_gate_fck@204 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&core_ck>;
@@ -480,7 +480,7 @@
 		reg = <0x0204>;
 	};
 
-	ssi_ssr_sst_div_fck: ssi_ssr_sst_div_fck {
+	ssi_ssr_sst_div_fck: ssi_ssr_sst_div_fck@240 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-divider-clock";
 		clocks = <&core_ck>;
@@ -494,7 +494,7 @@
 		clocks = <&ssi_ssr_sst_gate_fck>, <&ssi_ssr_sst_div_fck>;
 	};
 
-	usb_l4_gate_ick: usb_l4_gate_ick {
+	usb_l4_gate_ick: usb_l4_gate_ick@214 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-interface-clock";
 		clocks = <&core_l3_ck>;
@@ -502,7 +502,7 @@
 		reg = <0x0214>;
 	};
 
-	usb_l4_div_ick: usb_l4_div_ick {
+	usb_l4_div_ick: usb_l4_div_ick@240 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-divider-clock";
 		clocks = <&core_l3_ck>;
@@ -517,7 +517,7 @@
 		clocks = <&usb_l4_gate_ick>, <&usb_l4_div_ick>;
 	};
 
-	ssi_l4_ick: ssi_l4_ick {
+	ssi_l4_ick: ssi_l4_ick@214 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -525,7 +525,7 @@
 		reg = <0x0214>;
 	};
 
-	gpt1_ick: gpt1_ick {
+	gpt1_ick: gpt1_ick@410 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&sys_ck>;
@@ -533,7 +533,7 @@
 		reg = <0x0410>;
 	};
 
-	gpt1_gate_fck: gpt1_gate_fck {
+	gpt1_gate_fck: gpt1_gate_fck@400 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&func_32k_ck>;
@@ -541,7 +541,7 @@
 		reg = <0x0400>;
 	};
 
-	gpt1_mux_fck: gpt1_mux_fck {
+	gpt1_mux_fck: gpt1_mux_fck@440 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&func_32k_ck>, <&sys_ck>, <&alt_ck>;
@@ -554,7 +554,7 @@
 		clocks = <&gpt1_gate_fck>, <&gpt1_mux_fck>;
 	};
 
-	gpt2_ick: gpt2_ick {
+	gpt2_ick: gpt2_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -562,7 +562,7 @@
 		reg = <0x0210>;
 	};
 
-	gpt2_gate_fck: gpt2_gate_fck {
+	gpt2_gate_fck: gpt2_gate_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&func_32k_ck>;
@@ -570,7 +570,7 @@
 		reg = <0x0200>;
 	};
 
-	gpt2_mux_fck: gpt2_mux_fck {
+	gpt2_mux_fck: gpt2_mux_fck@244 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&func_32k_ck>, <&sys_ck>, <&alt_ck>;
@@ -584,7 +584,7 @@
 		clocks = <&gpt2_gate_fck>, <&gpt2_mux_fck>;
 	};
 
-	gpt3_ick: gpt3_ick {
+	gpt3_ick: gpt3_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -592,7 +592,7 @@
 		reg = <0x0210>;
 	};
 
-	gpt3_gate_fck: gpt3_gate_fck {
+	gpt3_gate_fck: gpt3_gate_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&func_32k_ck>;
@@ -600,7 +600,7 @@
 		reg = <0x0200>;
 	};
 
-	gpt3_mux_fck: gpt3_mux_fck {
+	gpt3_mux_fck: gpt3_mux_fck@244 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&func_32k_ck>, <&sys_ck>, <&alt_ck>;
@@ -614,7 +614,7 @@
 		clocks = <&gpt3_gate_fck>, <&gpt3_mux_fck>;
 	};
 
-	gpt4_ick: gpt4_ick {
+	gpt4_ick: gpt4_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -622,7 +622,7 @@
 		reg = <0x0210>;
 	};
 
-	gpt4_gate_fck: gpt4_gate_fck {
+	gpt4_gate_fck: gpt4_gate_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&func_32k_ck>;
@@ -630,7 +630,7 @@
 		reg = <0x0200>;
 	};
 
-	gpt4_mux_fck: gpt4_mux_fck {
+	gpt4_mux_fck: gpt4_mux_fck@244 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&func_32k_ck>, <&sys_ck>, <&alt_ck>;
@@ -644,7 +644,7 @@
 		clocks = <&gpt4_gate_fck>, <&gpt4_mux_fck>;
 	};
 
-	gpt5_ick: gpt5_ick {
+	gpt5_ick: gpt5_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -652,7 +652,7 @@
 		reg = <0x0210>;
 	};
 
-	gpt5_gate_fck: gpt5_gate_fck {
+	gpt5_gate_fck: gpt5_gate_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&func_32k_ck>;
@@ -660,7 +660,7 @@
 		reg = <0x0200>;
 	};
 
-	gpt5_mux_fck: gpt5_mux_fck {
+	gpt5_mux_fck: gpt5_mux_fck@244 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&func_32k_ck>, <&sys_ck>, <&alt_ck>;
@@ -674,7 +674,7 @@
 		clocks = <&gpt5_gate_fck>, <&gpt5_mux_fck>;
 	};
 
-	gpt6_ick: gpt6_ick {
+	gpt6_ick: gpt6_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -682,7 +682,7 @@
 		reg = <0x0210>;
 	};
 
-	gpt6_gate_fck: gpt6_gate_fck {
+	gpt6_gate_fck: gpt6_gate_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&func_32k_ck>;
@@ -690,7 +690,7 @@
 		reg = <0x0200>;
 	};
 
-	gpt6_mux_fck: gpt6_mux_fck {
+	gpt6_mux_fck: gpt6_mux_fck@244 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&func_32k_ck>, <&sys_ck>, <&alt_ck>;
@@ -704,7 +704,7 @@
 		clocks = <&gpt6_gate_fck>, <&gpt6_mux_fck>;
 	};
 
-	gpt7_ick: gpt7_ick {
+	gpt7_ick: gpt7_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -712,7 +712,7 @@
 		reg = <0x0210>;
 	};
 
-	gpt7_gate_fck: gpt7_gate_fck {
+	gpt7_gate_fck: gpt7_gate_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&func_32k_ck>;
@@ -720,7 +720,7 @@
 		reg = <0x0200>;
 	};
 
-	gpt7_mux_fck: gpt7_mux_fck {
+	gpt7_mux_fck: gpt7_mux_fck@244 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&func_32k_ck>, <&sys_ck>, <&alt_ck>;
@@ -734,7 +734,7 @@
 		clocks = <&gpt7_gate_fck>, <&gpt7_mux_fck>;
 	};
 
-	gpt8_ick: gpt8_ick {
+	gpt8_ick: gpt8_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -742,7 +742,7 @@
 		reg = <0x0210>;
 	};
 
-	gpt8_gate_fck: gpt8_gate_fck {
+	gpt8_gate_fck: gpt8_gate_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&func_32k_ck>;
@@ -750,7 +750,7 @@
 		reg = <0x0200>;
 	};
 
-	gpt8_mux_fck: gpt8_mux_fck {
+	gpt8_mux_fck: gpt8_mux_fck@244 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&func_32k_ck>, <&sys_ck>, <&alt_ck>;
@@ -764,7 +764,7 @@
 		clocks = <&gpt8_gate_fck>, <&gpt8_mux_fck>;
 	};
 
-	gpt9_ick: gpt9_ick {
+	gpt9_ick: gpt9_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -772,7 +772,7 @@
 		reg = <0x0210>;
 	};
 
-	gpt9_gate_fck: gpt9_gate_fck {
+	gpt9_gate_fck: gpt9_gate_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&func_32k_ck>;
@@ -780,7 +780,7 @@
 		reg = <0x0200>;
 	};
 
-	gpt9_mux_fck: gpt9_mux_fck {
+	gpt9_mux_fck: gpt9_mux_fck@244 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&func_32k_ck>, <&sys_ck>, <&alt_ck>;
@@ -794,7 +794,7 @@
 		clocks = <&gpt9_gate_fck>, <&gpt9_mux_fck>;
 	};
 
-	gpt10_ick: gpt10_ick {
+	gpt10_ick: gpt10_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -802,7 +802,7 @@
 		reg = <0x0210>;
 	};
 
-	gpt10_gate_fck: gpt10_gate_fck {
+	gpt10_gate_fck: gpt10_gate_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&func_32k_ck>;
@@ -810,7 +810,7 @@
 		reg = <0x0200>;
 	};
 
-	gpt10_mux_fck: gpt10_mux_fck {
+	gpt10_mux_fck: gpt10_mux_fck@244 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&func_32k_ck>, <&sys_ck>, <&alt_ck>;
@@ -824,7 +824,7 @@
 		clocks = <&gpt10_gate_fck>, <&gpt10_mux_fck>;
 	};
 
-	gpt11_ick: gpt11_ick {
+	gpt11_ick: gpt11_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -832,7 +832,7 @@
 		reg = <0x0210>;
 	};
 
-	gpt11_gate_fck: gpt11_gate_fck {
+	gpt11_gate_fck: gpt11_gate_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&func_32k_ck>;
@@ -840,7 +840,7 @@
 		reg = <0x0200>;
 	};
 
-	gpt11_mux_fck: gpt11_mux_fck {
+	gpt11_mux_fck: gpt11_mux_fck@244 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&func_32k_ck>, <&sys_ck>, <&alt_ck>;
@@ -854,7 +854,7 @@
 		clocks = <&gpt11_gate_fck>, <&gpt11_mux_fck>;
 	};
 
-	gpt12_ick: gpt12_ick {
+	gpt12_ick: gpt12_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -862,7 +862,7 @@
 		reg = <0x0210>;
 	};
 
-	gpt12_gate_fck: gpt12_gate_fck {
+	gpt12_gate_fck: gpt12_gate_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&func_32k_ck>;
@@ -870,7 +870,7 @@
 		reg = <0x0200>;
 	};
 
-	gpt12_mux_fck: gpt12_mux_fck {
+	gpt12_mux_fck: gpt12_mux_fck@244 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&func_32k_ck>, <&sys_ck>, <&alt_ck>;
@@ -884,7 +884,7 @@
 		clocks = <&gpt12_gate_fck>, <&gpt12_mux_fck>;
 	};
 
-	mcbsp1_ick: mcbsp1_ick {
+	mcbsp1_ick: mcbsp1_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -892,7 +892,7 @@
 		reg = <0x0210>;
 	};
 
-	mcbsp1_gate_fck: mcbsp1_gate_fck {
+	mcbsp1_gate_fck: mcbsp1_gate_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&mcbsp_clks>;
@@ -900,7 +900,7 @@
 		reg = <0x0200>;
 	};
 
-	mcbsp2_ick: mcbsp2_ick {
+	mcbsp2_ick: mcbsp2_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -908,7 +908,7 @@
 		reg = <0x0210>;
 	};
 
-	mcbsp2_gate_fck: mcbsp2_gate_fck {
+	mcbsp2_gate_fck: mcbsp2_gate_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&mcbsp_clks>;
@@ -916,7 +916,7 @@
 		reg = <0x0200>;
 	};
 
-	mcspi1_ick: mcspi1_ick {
+	mcspi1_ick: mcspi1_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -924,7 +924,7 @@
 		reg = <0x0210>;
 	};
 
-	mcspi1_fck: mcspi1_fck {
+	mcspi1_fck: mcspi1_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&func_48m_ck>;
@@ -932,7 +932,7 @@
 		reg = <0x0200>;
 	};
 
-	mcspi2_ick: mcspi2_ick {
+	mcspi2_ick: mcspi2_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -940,7 +940,7 @@
 		reg = <0x0210>;
 	};
 
-	mcspi2_fck: mcspi2_fck {
+	mcspi2_fck: mcspi2_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&func_48m_ck>;
@@ -948,7 +948,7 @@
 		reg = <0x0200>;
 	};
 
-	uart1_ick: uart1_ick {
+	uart1_ick: uart1_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -956,7 +956,7 @@
 		reg = <0x0210>;
 	};
 
-	uart1_fck: uart1_fck {
+	uart1_fck: uart1_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&func_48m_ck>;
@@ -964,7 +964,7 @@
 		reg = <0x0200>;
 	};
 
-	uart2_ick: uart2_ick {
+	uart2_ick: uart2_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -972,7 +972,7 @@
 		reg = <0x0210>;
 	};
 
-	uart2_fck: uart2_fck {
+	uart2_fck: uart2_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&func_48m_ck>;
@@ -980,7 +980,7 @@
 		reg = <0x0200>;
 	};
 
-	uart3_ick: uart3_ick {
+	uart3_ick: uart3_ick@214 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -988,7 +988,7 @@
 		reg = <0x0214>;
 	};
 
-	uart3_fck: uart3_fck {
+	uart3_fck: uart3_fck@204 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&func_48m_ck>;
@@ -996,7 +996,7 @@
 		reg = <0x0204>;
 	};
 
-	gpios_ick: gpios_ick {
+	gpios_ick: gpios_ick@410 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&sys_ck>;
@@ -1004,7 +1004,7 @@
 		reg = <0x0410>;
 	};
 
-	gpios_fck: gpios_fck {
+	gpios_fck: gpios_fck@400 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&func_32k_ck>;
@@ -1012,7 +1012,7 @@
 		reg = <0x0400>;
 	};
 
-	mpu_wdt_ick: mpu_wdt_ick {
+	mpu_wdt_ick: mpu_wdt_ick@410 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&sys_ck>;
@@ -1020,7 +1020,7 @@
 		reg = <0x0410>;
 	};
 
-	mpu_wdt_fck: mpu_wdt_fck {
+	mpu_wdt_fck: mpu_wdt_fck@400 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&func_32k_ck>;
@@ -1028,7 +1028,7 @@
 		reg = <0x0400>;
 	};
 
-	sync_32k_ick: sync_32k_ick {
+	sync_32k_ick: sync_32k_ick@410 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&sys_ck>;
@@ -1036,7 +1036,7 @@
 		reg = <0x0410>;
 	};
 
-	wdt1_ick: wdt1_ick {
+	wdt1_ick: wdt1_ick@410 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&sys_ck>;
@@ -1044,7 +1044,7 @@
 		reg = <0x0410>;
 	};
 
-	omapctrl_ick: omapctrl_ick {
+	omapctrl_ick: omapctrl_ick@410 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&sys_ck>;
@@ -1052,7 +1052,7 @@
 		reg = <0x0410>;
 	};
 
-	cam_fck: cam_fck {
+	cam_fck: cam_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&func_96m_ck>;
@@ -1060,7 +1060,7 @@
 		reg = <0x0200>;
 	};
 
-	cam_ick: cam_ick {
+	cam_ick: cam_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-no-wait-interface-clock";
 		clocks = <&l4_ck>;
@@ -1068,7 +1068,7 @@
 		reg = <0x0210>;
 	};
 
-	mailboxes_ick: mailboxes_ick {
+	mailboxes_ick: mailboxes_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -1076,7 +1076,7 @@
 		reg = <0x0210>;
 	};
 
-	wdt4_ick: wdt4_ick {
+	wdt4_ick: wdt4_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -1084,7 +1084,7 @@
 		reg = <0x0210>;
 	};
 
-	wdt4_fck: wdt4_fck {
+	wdt4_fck: wdt4_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&func_32k_ck>;
@@ -1092,7 +1092,7 @@
 		reg = <0x0200>;
 	};
 
-	mspro_ick: mspro_ick {
+	mspro_ick: mspro_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -1100,7 +1100,7 @@
 		reg = <0x0210>;
 	};
 
-	mspro_fck: mspro_fck {
+	mspro_fck: mspro_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&func_96m_ck>;
@@ -1108,7 +1108,7 @@
 		reg = <0x0200>;
 	};
 
-	fac_ick: fac_ick {
+	fac_ick: fac_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -1116,7 +1116,7 @@
 		reg = <0x0210>;
 	};
 
-	fac_fck: fac_fck {
+	fac_fck: fac_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&func_12m_ck>;
@@ -1124,7 +1124,7 @@
 		reg = <0x0200>;
 	};
 
-	hdq_ick: hdq_ick {
+	hdq_ick: hdq_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -1132,7 +1132,7 @@
 		reg = <0x0210>;
 	};
 
-	hdq_fck: hdq_fck {
+	hdq_fck: hdq_fck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&func_12m_ck>;
@@ -1140,7 +1140,7 @@
 		reg = <0x0200>;
 	};
 
-	i2c1_ick: i2c1_ick {
+	i2c1_ick: i2c1_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -1148,7 +1148,7 @@
 		reg = <0x0210>;
 	};
 
-	i2c2_ick: i2c2_ick {
+	i2c2_ick: i2c2_ick@210 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -1156,7 +1156,7 @@
 		reg = <0x0210>;
 	};
 
-	gpmc_fck: gpmc_fck {
+	gpmc_fck: gpmc_fck@238 {
 		#clock-cells = <0>;
 		compatible = "ti,fixed-factor-clock";
 		clocks = <&core_l3_ck>;
@@ -1174,7 +1174,7 @@
 		clock-div = <1>;
 	};
 
-	sdma_ick: sdma_ick {
+	sdma_ick: sdma_ick@238 {
 		#clock-cells = <0>;
 		compatible = "ti,fixed-factor-clock";
 		clocks = <&core_l3_ck>;
@@ -1184,7 +1184,7 @@
 		ti,clock-mult = <1>;
 	};
 
-	sdrc_ick: sdrc_ick {
+	sdrc_ick: sdrc_ick@238 {
 		#clock-cells = <0>;
 		compatible = "ti,fixed-factor-clock";
 		clocks = <&core_l3_ck>;
@@ -1194,7 +1194,7 @@
 		ti,clock-mult = <1>;
 	};
 
-	des_ick: des_ick {
+	des_ick: des_ick@21c {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -1202,7 +1202,7 @@
 		reg = <0x021c>;
 	};
 
-	sha_ick: sha_ick {
+	sha_ick: sha_ick@21c {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -1210,7 +1210,7 @@
 		reg = <0x021c>;
 	};
 
-	rng_ick: rng_ick {
+	rng_ick: rng_ick@21c {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -1218,7 +1218,7 @@
 		reg = <0x021c>;
 	};
 
-	aes_ick: aes_ick {
+	aes_ick: aes_ick@21c {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -1226,7 +1226,7 @@
 		reg = <0x021c>;
 	};
 
-	pka_ick: pka_ick {
+	pka_ick: pka_ick@21c {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l4_ck>;
@@ -1234,7 +1234,7 @@
 		reg = <0x021c>;
 	};
 
-	usb_fck: usb_fck {
+	usb_fck: usb_fck@204 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&func_48m_ck>;
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index 4602866..a4deff0 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -390,6 +390,7 @@
 		interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
 			     <1 IRQ_TYPE_NONE>;	/* termcount */
 		ti,nand-ecc-opt = "ham1";
+		rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */
 		nand-bus-width = <16>;
 		#address-cells = <1>;
 		#size-cells = <1>;
diff --git a/arch/arm/boot/dts/omap3-n9.dts b/arch/arm/boot/dts/omap3-n9.dts
index 5c67429..b9e58c5 100644
--- a/arch/arm/boot/dts/omap3-n9.dts
+++ b/arch/arm/boot/dts/omap3-n9.dts
@@ -57,3 +57,17 @@
 &modem {
 	compatible = "nokia,n9-modem";
 };
+
+&lis302 {
+	st,axis-x = <1>;    /* LIS3_DEV_X */
+	st,axis-y = <(-2)>; /* LIS3_INV_DEV_Y */
+	st,axis-z = <(-3)>; /* LIS3_INV_DEV_Z */
+
+	st,min-limit-x = <(-46)>;
+	st,min-limit-y = <3>;
+	st,min-limit-z = <3>;
+
+	st,max-limit-x = <(-3)>;
+	st,max-limit-y = <46>;
+	st,max-limit-z = <46>;
+};
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index b3c26a9..d9e2d9c 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -329,6 +329,7 @@
 	regulator-name = "V28";
 	regulator-min-microvolt = <2800000>;
 	regulator-max-microvolt = <2800000>;
+	regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
 	regulator-always-on; /* due to battery cover sensor */
 };
 
@@ -336,30 +337,35 @@
 	regulator-name = "VCSI";
 	regulator-min-microvolt = <1800000>;
 	regulator-max-microvolt = <1800000>;
+	regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
 };
 
 &vaux3 {
 	regulator-name = "VMMC2_30";
 	regulator-min-microvolt = <2800000>;
 	regulator-max-microvolt = <3000000>;
+	regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
 };
 
 &vaux4 {
 	regulator-name = "VCAM_ANA_28";
 	regulator-min-microvolt = <2800000>;
 	regulator-max-microvolt = <2800000>;
+	regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
 };
 
 &vmmc1 {
 	regulator-name = "VMMC1";
 	regulator-min-microvolt = <1850000>;
 	regulator-max-microvolt = <3150000>;
+	regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
 };
 
 &vmmc2 {
 	regulator-name = "V28_A";
 	regulator-min-microvolt = <2800000>;
 	regulator-max-microvolt = <3000000>;
+	regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
 	regulator-always-on; /* due VIO leak to AIC34 VDDs */
 };
 
@@ -367,6 +373,7 @@
 	regulator-name = "VPLL";
 	regulator-min-microvolt = <1800000>;
 	regulator-max-microvolt = <1800000>;
+	regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
 	regulator-always-on;
 };
 
@@ -374,6 +381,7 @@
 	regulator-name = "VSDI_CSI";
 	regulator-min-microvolt = <1800000>;
 	regulator-max-microvolt = <1800000>;
+	regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
 	regulator-always-on;
 };
 
@@ -381,6 +389,7 @@
 	regulator-name = "VMMC2_IO_18";
 	regulator-min-microvolt = <1800000>;
 	regulator-max-microvolt = <1800000>;
+	regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
 };
 
 &vio {
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
index 858a250..a00ca76 100644
--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
+++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
@@ -14,6 +14,13 @@
 	cpus {
 		cpu@0 {
 			cpu0-supply = <&vcc>;
+			operating-points = <
+				/* kHz    uV */
+				300000  1012500
+				600000  1200000
+				800000  1325000
+				1000000	1375000
+			>;
 		};
 	};
 
@@ -39,9 +46,34 @@
 		enable-active-high;
 		regulator-boot-off;
 	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		heartbeat {
+			label = "debug::sleep";
+			gpios = <&gpio3 28 GPIO_ACTIVE_HIGH>;  /* gpio92 */
+			linux,default-trigger = "default-on";
+			pinctrl-names = "default";
+			pinctrl-0 = <&debug_leds>;
+		};
+	};
 };
 
 &omap3_pmx_core {
+	accelerator_pins: pinmux_accelerator_pins {
+		pinctrl-single,pins = <
+			OMAP3_CORE1_IOPAD(0x21da, PIN_INPUT | MUX_MODE4)	/* mcspi2_somi.gpio_180 -> LIS302 INT1 */
+			OMAP3_CORE1_IOPAD(0x21dc, PIN_INPUT | MUX_MODE4)	/* mcspi2_cs0.gpio_181 -> LIS302 INT2 */
+		>;
+	};
+
+	debug_leds: pinmux_debug_led_pins {
+		pinctrl-single,pins = <
+			OMAP3_CORE1_IOPAD(0x2108, PIN_OUTPUT | MUX_MODE4)       /* dss_data22.gpio_92 */
+		>;
+	};
+
 	mmc2_pins: pinmux_mmc2_pins {
 		pinctrl-single,pins = <
 			OMAP3_CORE1_IOPAD(0x2158, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_clk */
@@ -129,6 +161,30 @@
 	ti,pulldowns	= <0x008106>; /* BIT(1) | BIT(2) | BIT(8) | BIT(15) */
 };
 
+&vdac {
+	regulator-name = "vdac";
+	regulator-min-microvolt = <1800000>;
+	regulator-max-microvolt = <1800000>;
+};
+
+&vpll1 {
+	regulator-name = "vpll1";
+	regulator-min-microvolt = <1800000>;
+	regulator-max-microvolt = <1800000>;
+};
+
+&vpll2 {
+	regulator-name = "vpll2";
+	regulator-min-microvolt = <1800000>;
+	regulator-max-microvolt = <1800000>;
+};
+
+&vaux1 {
+	regulator-name = "vaux1";
+	regulator-min-microvolt = <2800000>;
+	regulator-max-microvolt = <2800000>;
+};
+
 /* CSI-2 receiver */
 &vaux2 {
 	regulator-name = "vaux2";
@@ -143,12 +199,107 @@
 	regulator-max-microvolt = <2800000>;
 };
 
+&vaux4 {
+	regulator-name = "vaux4";
+	regulator-min-microvolt = <2800000>;
+	regulator-max-microvolt = <2800000>;
+};
+
+&vmmc1 {
+	regulator-name = "vmmc1";
+	regulator-min-microvolt = <1850000>;
+	regulator-max-microvolt = <3150000>;
+};
+
+&vmmc2 {
+	regulator-name = "vmmc2";
+	regulator-min-microvolt = <3000000>;
+	regulator-max-microvolt = <3000000>;
+};
+
+&vintana1 {
+	regulator-name = "vintana1";
+	regulator-min-microvolt = <1500000>;
+	regulator-max-microvolt = <1500000>;
+};
+
+&vintana2 {
+	regulator-name = "vintana2";
+	regulator-min-microvolt = <2750000>;
+	regulator-max-microvolt = <2750000>;
+};
+
+&vintdig {
+	regulator-name = "vintdig";
+	regulator-min-microvolt = <1500000>;
+	regulator-max-microvolt = <1500000>;
+};
+
+&vsim {
+	regulator-name = "vsim";
+	regulator-min-microvolt = <1800000>;
+	regulator-max-microvolt = <1800000>;
+};
+
+&vio {
+	regulator-name = "vio";
+	regulator-min-microvolt = <1800000>;
+	regulator-max-microvolt = <1800000>;
+};
+
 &i2c2 {
 	clock-frequency = <400000>;
 };
 
 &i2c3 {
 	clock-frequency = <400000>;
+
+	lis302: lis302@1d {
+		compatible = "st,lis3lv02d";
+		reg = <0x1d>;
+
+		Vdd-supply = <&vaux1>;
+		Vdd_IO-supply = <&vio>;
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&accelerator_pins>;
+
+                interrupts-extended = <&gpio6 20 IRQ_TYPE_EDGE_FALLING>, <&gpio6 21 IRQ_TYPE_EDGE_FALLING>; /* 180, 181 */
+
+		/* click flags */
+		st,click-single-x;
+		st,click-single-y;
+		st,click-single-z;
+
+		/* Limits are 0.5g * value */
+		st,click-threshold-x = <8>;
+		st,click-threshold-y = <8>;
+		st,click-threshold-z = <10>;
+
+		/* Click must be longer than time limit */
+		st,click-time-limit = <9>;
+
+		/* Kind of debounce filter */
+		st,click-latency = <50>;
+
+		st,wakeup-x-hi;
+		st,wakeup-y-hi;
+		st,wakeup-threshold = <(800/18)>; /* millig-value / 18 to get HW values */
+
+		st,wakeup2-z-hi;
+		st,wakeup2-threshold = <(1000/18)>; /* millig-value / 18 to get HW values */
+
+		st,highpass-cutoff-hz = <2>;
+
+		/* Interrupt line 1 for thresholds */
+		st,irq1-ff-wu-1;
+		st,irq1-ff-wu-2;
+		/* Interrupt line 2 for click detection */
+		st,irq2-click;
+
+		st,wu-duration-1 = <8>;
+		st,wu-duration-2 = <8>;
+	};
 };
 
 &mmc1 {
diff --git a/arch/arm/boot/dts/omap3-n950.dts b/arch/arm/boot/dts/omap3-n950.dts
index 7f219a9..646601a 100644
--- a/arch/arm/boot/dts/omap3-n950.dts
+++ b/arch/arm/boot/dts/omap3-n950.dts
@@ -11,10 +11,33 @@
 /dts-v1/;
 
 #include "omap3-n950-n9.dtsi"
+#include <dt-bindings/input/input.h>
 
 / {
 	model = "Nokia N950";
 	compatible = "nokia,omap3-n950", "ti,omap36xx", "ti,omap3";
+
+	keys {
+		compatible = "gpio-keys";
+
+		keypad_slide {
+			label = "Keypad Slide";
+			gpios = <&gpio4 13 GPIO_ACTIVE_LOW>; /* 109 */
+			linux,input-type = <EV_SW>;
+			linux,code = <SW_KEYPAD_SLIDE>;
+			wakeup-source;
+			pinctrl-names = "default";
+			pinctrl-0 = <&keypad_slide_pins>;
+		};
+	};
+};
+
+&omap3_pmx_core {
+	keypad_slide_pins: pinmux_debug_led_pins {
+		pinctrl-single,pins = <
+			OMAP3_CORE1_IOPAD(0x212a, PIN_INPUT | MUX_MODE4)       /* cam_d10.gpio_109 */
+		>;
+	};
 };
 
 &omap3_pmx_core {
@@ -86,3 +109,79 @@
 &modem {
 	compatible = "nokia,n950-modem";
 };
+
+&twl {
+	twl_audio: audio {
+		compatible = "ti,twl4030-audio";
+		ti,enable-vibra = <1>;
+	};
+};
+
+&twl_keypad {
+	linux,keymap = < MATRIX_KEY(0x00, 0x00, KEY_BACKSLASH)
+			 MATRIX_KEY(0x01, 0x00, KEY_LEFTSHIFT)
+			 MATRIX_KEY(0x02, 0x00, KEY_COMPOSE)
+			 MATRIX_KEY(0x03, 0x00, KEY_LEFTMETA)
+			 MATRIX_KEY(0x04, 0x00, KEY_RIGHTCTRL)
+			 MATRIX_KEY(0x05, 0x00, KEY_BACKSPACE)
+			 MATRIX_KEY(0x06, 0x00, KEY_VOLUMEDOWN)
+			 MATRIX_KEY(0x07, 0x00, KEY_VOLUMEUP)
+
+			 MATRIX_KEY(0x03, 0x01, KEY_Z)
+			 MATRIX_KEY(0x04, 0x01, KEY_A)
+			 MATRIX_KEY(0x05, 0x01, KEY_Q)
+			 MATRIX_KEY(0x06, 0x01, KEY_W)
+			 MATRIX_KEY(0x07, 0x01, KEY_E)
+
+			 MATRIX_KEY(0x03, 0x02, KEY_X)
+			 MATRIX_KEY(0x04, 0x02, KEY_S)
+			 MATRIX_KEY(0x05, 0x02, KEY_D)
+			 MATRIX_KEY(0x06, 0x02, KEY_C)
+			 MATRIX_KEY(0x07, 0x02, KEY_V)
+
+			 MATRIX_KEY(0x03, 0x03, KEY_O)
+			 MATRIX_KEY(0x04, 0x03, KEY_I)
+			 MATRIX_KEY(0x05, 0x03, KEY_U)
+			 MATRIX_KEY(0x06, 0x03, KEY_L)
+			 MATRIX_KEY(0x07, 0x03, KEY_APOSTROPHE)
+
+			 MATRIX_KEY(0x03, 0x04, KEY_Y)
+			 MATRIX_KEY(0x04, 0x04, KEY_K)
+			 MATRIX_KEY(0x05, 0x04, KEY_J)
+			 MATRIX_KEY(0x06, 0x04, KEY_H)
+			 MATRIX_KEY(0x07, 0x04, KEY_G)
+
+			 MATRIX_KEY(0x03, 0x05, KEY_B)
+			 MATRIX_KEY(0x04, 0x05, KEY_COMMA)
+			 MATRIX_KEY(0x05, 0x05, KEY_M)
+			 MATRIX_KEY(0x06, 0x05, KEY_N)
+			 MATRIX_KEY(0x07, 0x05, KEY_DOT)
+
+			 MATRIX_KEY(0x00, 0x06, KEY_SPACE)
+			 MATRIX_KEY(0x03, 0x06, KEY_T)
+			 MATRIX_KEY(0x04, 0x06, KEY_UP)
+			 MATRIX_KEY(0x05, 0x06, KEY_LEFT)
+			 MATRIX_KEY(0x06, 0x06, KEY_RIGHT)
+			 MATRIX_KEY(0x07, 0x06, KEY_DOWN)
+
+			 MATRIX_KEY(0x03, 0x07, KEY_P)
+			 MATRIX_KEY(0x04, 0x07, KEY_ENTER)
+			 MATRIX_KEY(0x05, 0x07, KEY_SLASH)
+			 MATRIX_KEY(0x06, 0x07, KEY_F)
+			 MATRIX_KEY(0x07, 0x07, KEY_R)
+			 >;
+};
+
+&lis302 {
+	st,axis-x = <(-2)>; /* LIS3_INV_DEV_Y */
+	st,axis-y = <(-1)>; /* LIS3_INV_DEV_X */
+	st,axis-z = <(-3)>; /* LIS3_INV_DEV_Z */
+
+	st,min-limit-x = <(-32)>;
+	st,min-limit-y = <3>;
+	st,min-limit-z = <3>;
+
+	st,max-limit-x = <(-3)>;
+	st,max-limit-y = <32>;
+	st,max-limit-z = <32>;
+};
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index b41d07e..9fbda38 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -43,7 +43,7 @@
 		};
 	};
 
-	pmu {
+	pmu@54000000 {
 		compatible = "arm,cortex-a8-pmu";
 		reg = <0x54000000 0x800000>;
 		interrupts = <3>;
@@ -119,7 +119,7 @@
 					#size-cells = <1>;
 					ranges = <0 0x270 0x330>;
 
-					pbias_regulator: pbias_regulator {
+					pbias_regulator: pbias_regulator@2b0 {
 						compatible = "ti,pbias-omap3", "ti,pbias-omap";
 						reg = <0x2b0 0x4>;
 						syscon = <&scm_conf>;
@@ -725,6 +725,8 @@
 			#size-cells = <1>;
 			interrupt-controller;
 			#interrupt-cells = <2>;
+			gpio-controller;
+			#gpio-cells = <2>;
 		};
 
 		usb_otg_hs: usb_otg_hs@480ab000 {
diff --git a/arch/arm/boot/dts/omap3430es1-clocks.dtsi b/arch/arm/boot/dts/omap3430es1-clocks.dtsi
index 4c22f3a..86de819 100644
--- a/arch/arm/boot/dts/omap3430es1-clocks.dtsi
+++ b/arch/arm/boot/dts/omap3430es1-clocks.dtsi
@@ -8,7 +8,7 @@
  * published by the Free Software Foundation.
  */
 &cm_clocks {
-	gfx_l3_ck: gfx_l3_ck {
+	gfx_l3_ck: gfx_l3_ck@b10 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&l3_ick>;
@@ -16,7 +16,7 @@
 		ti,bit-shift = <0>;
 	};
 
-	gfx_l3_fck: gfx_l3_fck {
+	gfx_l3_fck: gfx_l3_fck@b40 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&l3_ick>;
@@ -33,7 +33,7 @@
 		clock-div = <1>;
 	};
 
-	gfx_cg1_ck: gfx_cg1_ck {
+	gfx_cg1_ck: gfx_cg1_ck@b00 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&gfx_l3_fck>;
@@ -41,7 +41,7 @@
 		ti,bit-shift = <1>;
 	};
 
-	gfx_cg2_ck: gfx_cg2_ck {
+	gfx_cg2_ck: gfx_cg2_ck@b00 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&gfx_l3_fck>;
@@ -49,7 +49,7 @@
 		ti,bit-shift = <2>;
 	};
 
-	d2d_26m_fck: d2d_26m_fck {
+	d2d_26m_fck: d2d_26m_fck@a00 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&sys_ck>;
@@ -57,7 +57,7 @@
 		ti,bit-shift = <3>;
 	};
 
-	fshostusb_fck: fshostusb_fck {
+	fshostusb_fck: fshostusb_fck@a00 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&core_48m_fck>;
@@ -65,7 +65,7 @@
 		ti,bit-shift = <5>;
 	};
 
-	ssi_ssr_gate_fck_3430es1: ssi_ssr_gate_fck_3430es1 {
+	ssi_ssr_gate_fck_3430es1: ssi_ssr_gate_fck_3430es1@a00 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-no-wait-gate-clock";
 		clocks = <&corex2_fck>;
@@ -73,7 +73,7 @@
 		reg = <0x0a00>;
 	};
 
-	ssi_ssr_div_fck_3430es1: ssi_ssr_div_fck_3430es1 {
+	ssi_ssr_div_fck_3430es1: ssi_ssr_div_fck_3430es1@a40 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-divider-clock";
 		clocks = <&corex2_fck>;
@@ -96,7 +96,7 @@
 		clock-div = <2>;
 	};
 
-	hsotgusb_ick_3430es1: hsotgusb_ick_3430es1 {
+	hsotgusb_ick_3430es1: hsotgusb_ick_3430es1@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-no-wait-interface-clock";
 		clocks = <&core_l3_ick>;
@@ -104,7 +104,7 @@
 		ti,bit-shift = <4>;
 	};
 
-	fac_ick: fac_ick {
+	fac_ick: fac_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -120,7 +120,7 @@
 		clock-div = <1>;
 	};
 
-	ssi_ick: ssi_ick_3430es1 {
+	ssi_ick: ssi_ick_3430es1@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-no-wait-interface-clock";
 		clocks = <&ssi_l4_ick>;
@@ -128,7 +128,7 @@
 		ti,bit-shift = <0>;
 	};
 
-	usb_l4_gate_ick: usb_l4_gate_ick {
+	usb_l4_gate_ick: usb_l4_gate_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-interface-clock";
 		clocks = <&l4_ick>;
@@ -136,7 +136,7 @@
 		reg = <0x0a10>;
 	};
 
-	usb_l4_div_ick: usb_l4_div_ick {
+	usb_l4_div_ick: usb_l4_div_ick@a40 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-divider-clock";
 		clocks = <&l4_ick>;
@@ -152,7 +152,7 @@
 		clocks = <&usb_l4_gate_ick>, <&usb_l4_div_ick>;
 	};
 
-	dss1_alwon_fck: dss1_alwon_fck_3430es1 {
+	dss1_alwon_fck: dss1_alwon_fck_3430es1@e00 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll4_m4x2_ck>;
@@ -161,7 +161,7 @@
 		ti,set-rate-parent;
 	};
 
-	dss_ick: dss_ick_3430es1 {
+	dss_ick: dss_ick_3430es1@e10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-no-wait-interface-clock";
 		clocks = <&l4_ick>;
diff --git a/arch/arm/boot/dts/omap34xx-omap36xx-clocks.dtsi b/arch/arm/boot/dts/omap34xx-omap36xx-clocks.dtsi
index b02017b..858aa07 100644
--- a/arch/arm/boot/dts/omap34xx-omap36xx-clocks.dtsi
+++ b/arch/arm/boot/dts/omap34xx-omap36xx-clocks.dtsi
@@ -16,7 +16,7 @@
 		clock-div = <1>;
 	};
 
-	aes1_ick: aes1_ick {
+	aes1_ick: aes1_ick@a14 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&security_l4_ick2>;
@@ -24,7 +24,7 @@
 		reg = <0x0a14>;
 	};
 
-	rng_ick: rng_ick {
+	rng_ick: rng_ick@a14 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&security_l4_ick2>;
@@ -32,7 +32,7 @@
 		ti,bit-shift = <2>;
 	};
 
-	sha11_ick: sha11_ick {
+	sha11_ick: sha11_ick@a14 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&security_l4_ick2>;
@@ -40,7 +40,7 @@
 		ti,bit-shift = <1>;
 	};
 
-	des1_ick: des1_ick {
+	des1_ick: des1_ick@a14 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&security_l4_ick2>;
@@ -48,7 +48,7 @@
 		ti,bit-shift = <0>;
 	};
 
-	cam_mclk: cam_mclk {
+	cam_mclk: cam_mclk@f00 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll4_m5x2_ck>;
@@ -57,7 +57,7 @@
 		ti,set-rate-parent;
 	};
 
-	cam_ick: cam_ick {
+	cam_ick: cam_ick@f10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-no-wait-interface-clock";
 		clocks = <&l4_ick>;
@@ -65,7 +65,7 @@
 		ti,bit-shift = <0>;
 	};
 
-	csi2_96m_fck: csi2_96m_fck {
+	csi2_96m_fck: csi2_96m_fck@f00 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&core_96m_fck>;
@@ -81,7 +81,7 @@
 		clock-div = <1>;
 	};
 
-	pka_ick: pka_ick {
+	pka_ick: pka_ick@a14 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&security_l3_ick>;
@@ -89,7 +89,7 @@
 		ti,bit-shift = <4>;
 	};
 
-	icr_ick: icr_ick {
+	icr_ick: icr_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -97,7 +97,7 @@
 		ti,bit-shift = <29>;
 	};
 
-	des2_ick: des2_ick {
+	des2_ick: des2_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -105,7 +105,7 @@
 		ti,bit-shift = <26>;
 	};
 
-	mspro_ick: mspro_ick {
+	mspro_ick: mspro_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -113,7 +113,7 @@
 		ti,bit-shift = <23>;
 	};
 
-	mailboxes_ick: mailboxes_ick {
+	mailboxes_ick: mailboxes_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -129,7 +129,7 @@
 		clock-div = <1>;
 	};
 
-	sr1_fck: sr1_fck {
+	sr1_fck: sr1_fck@c00 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&sys_ck>;
@@ -137,7 +137,7 @@
 		ti,bit-shift = <6>;
 	};
 
-	sr2_fck: sr2_fck {
+	sr2_fck: sr2_fck@c00 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&sys_ck>;
@@ -153,7 +153,7 @@
 		clock-div = <1>;
 	};
 
-	dpll2_fck: dpll2_fck {
+	dpll2_fck: dpll2_fck@40 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&core_ck>;
@@ -163,7 +163,7 @@
 		ti,index-starts-at-one;
 	};
 
-	dpll2_ck: dpll2_ck {
+	dpll2_ck: dpll2_ck@4 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-dpll-clock";
 		clocks = <&sys_ck>, <&dpll2_fck>;
@@ -173,7 +173,7 @@
 		ti,low-power-bypass;
 	};
 
-	dpll2_m2_ck: dpll2_m2_ck {
+	dpll2_m2_ck: dpll2_m2_ck@44 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll2_ck>;
@@ -182,7 +182,7 @@
 		ti,index-starts-at-one;
 	};
 
-	iva2_ck: iva2_ck {
+	iva2_ck: iva2_ck@0 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&dpll2_m2_ck>;
@@ -190,7 +190,7 @@
 		ti,bit-shift = <0>;
 	};
 
-	modem_fck: modem_fck {
+	modem_fck: modem_fck@a00 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&sys_ck>;
@@ -198,7 +198,7 @@
 		ti,bit-shift = <31>;
 	};
 
-	sad2d_ick: sad2d_ick {
+	sad2d_ick: sad2d_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l3_ick>;
@@ -206,7 +206,7 @@
 		ti,bit-shift = <3>;
 	};
 
-	mad2d_ick: mad2d_ick {
+	mad2d_ick: mad2d_ick@a18 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&l3_ick>;
@@ -214,7 +214,7 @@
 		ti,bit-shift = <3>;
 	};
 
-	mspro_fck: mspro_fck {
+	mspro_fck: mspro_fck@a00 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&core_96m_fck>;
diff --git a/arch/arm/boot/dts/omap34xx.dtsi b/arch/arm/boot/dts/omap34xx.dtsi
index 387dc31..e446562 100644
--- a/arch/arm/boot/dts/omap34xx.dtsi
+++ b/arch/arm/boot/dts/omap34xx.dtsi
@@ -46,7 +46,7 @@
 			       0x480bd800 0x017c>;
 			interrupts = <24>;
 			iommus = <&mmu_isp>;
-			syscon = <&scm_conf 0xdc>;
+			syscon = <&scm_conf 0x6c>;
 			ti,phy-type = <OMAP3ISP_PHY_TYPE_COMPLEX_IO>;
 			#clock-cells = <1>;
 			ports {
@@ -55,7 +55,7 @@
 			};
 		};
 
-		bandgap {
+		bandgap@48002524 {
 			reg = <0x48002524 0x4>;
 			compatible = "ti,omap34xx-bandgap";
 			#thermal-sensor-cells = <0>;
diff --git a/arch/arm/boot/dts/omap36xx-am35xx-omap3430es2plus-clocks.dtsi b/arch/arm/boot/dts/omap36xx-am35xx-omap3430es2plus-clocks.dtsi
index 080fb3f..15d1866 100644
--- a/arch/arm/boot/dts/omap36xx-am35xx-omap3430es2plus-clocks.dtsi
+++ b/arch/arm/boot/dts/omap36xx-am35xx-omap3430es2plus-clocks.dtsi
@@ -25,7 +25,7 @@
 	};
 };
 &cm_clocks {
-	dpll5_ck: dpll5_ck {
+	dpll5_ck: dpll5_ck@d04 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-dpll-clock";
 		clocks = <&sys_ck>, <&sys_ck>;
@@ -34,7 +34,7 @@
 		ti,lock;
 	};
 
-	dpll5_m2_ck: dpll5_m2_ck {
+	dpll5_m2_ck: dpll5_m2_ck@d50 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll5_ck>;
@@ -43,7 +43,7 @@
 		ti,index-starts-at-one;
 	};
 
-	sgx_gate_fck: sgx_gate_fck {
+	sgx_gate_fck: sgx_gate_fck@b00 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&core_ck>;
@@ -91,7 +91,7 @@
 		clock-div = <2>;
 	};
 
-	sgx_mux_fck: sgx_mux_fck {
+	sgx_mux_fck: sgx_mux_fck@b40 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&core_d3_ck>, <&core_d4_ck>, <&core_d6_ck>, <&cm_96m_fck>, <&omap_192m_alwon_fck>, <&core_d2_ck>, <&corex2_d3_fck>, <&corex2_d5_fck>;
@@ -104,7 +104,7 @@
 		clocks = <&sgx_gate_fck>, <&sgx_mux_fck>;
 	};
 
-	sgx_ick: sgx_ick {
+	sgx_ick: sgx_ick@b10 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&l3_ick>;
@@ -112,7 +112,7 @@
 		ti,bit-shift = <0>;
 	};
 
-	cpefuse_fck: cpefuse_fck {
+	cpefuse_fck: cpefuse_fck@a08 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_ck>;
@@ -120,7 +120,7 @@
 		ti,bit-shift = <0>;
 	};
 
-	ts_fck: ts_fck {
+	ts_fck: ts_fck@a08 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&omap_32k_fck>;
@@ -128,7 +128,7 @@
 		ti,bit-shift = <1>;
 	};
 
-	usbtll_fck: usbtll_fck {
+	usbtll_fck: usbtll_fck@a08 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&dpll5_m2_ck>;
@@ -136,7 +136,7 @@
 		ti,bit-shift = <2>;
 	};
 
-	usbtll_ick: usbtll_ick {
+	usbtll_ick: usbtll_ick@a18 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -144,7 +144,7 @@
 		ti,bit-shift = <2>;
 	};
 
-	mmchs3_ick: mmchs3_ick {
+	mmchs3_ick: mmchs3_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -152,7 +152,7 @@
 		ti,bit-shift = <30>;
 	};
 
-	mmchs3_fck: mmchs3_fck {
+	mmchs3_fck: mmchs3_fck@a00 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&core_96m_fck>;
@@ -160,7 +160,7 @@
 		ti,bit-shift = <30>;
 	};
 
-	dss1_alwon_fck: dss1_alwon_fck_3430es2 {
+	dss1_alwon_fck: dss1_alwon_fck_3430es2@e00 {
 		#clock-cells = <0>;
 		compatible = "ti,dss-gate-clock";
 		clocks = <&dpll4_m4x2_ck>;
@@ -169,7 +169,7 @@
 		ti,set-rate-parent;
 	};
 
-	dss_ick: dss_ick_3430es2 {
+	dss_ick: dss_ick_3430es2@e10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-dss-interface-clock";
 		clocks = <&l4_ick>;
@@ -177,7 +177,7 @@
 		ti,bit-shift = <0>;
 	};
 
-	usbhost_120m_fck: usbhost_120m_fck {
+	usbhost_120m_fck: usbhost_120m_fck@1400 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll5_m2_ck>;
@@ -185,7 +185,7 @@
 		ti,bit-shift = <1>;
 	};
 
-	usbhost_48m_fck: usbhost_48m_fck {
+	usbhost_48m_fck: usbhost_48m_fck@1400 {
 		#clock-cells = <0>;
 		compatible = "ti,dss-gate-clock";
 		clocks = <&omap_48m_fck>;
@@ -193,7 +193,7 @@
 		ti,bit-shift = <0>;
 	};
 
-	usbhost_ick: usbhost_ick {
+	usbhost_ick: usbhost_ick@1410 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-dss-interface-clock";
 		clocks = <&l4_ick>;
diff --git a/arch/arm/boot/dts/omap36xx-clocks.dtsi b/arch/arm/boot/dts/omap36xx-clocks.dtsi
index 200ae3a..a21d1f0 100644
--- a/arch/arm/boot/dts/omap36xx-clocks.dtsi
+++ b/arch/arm/boot/dts/omap36xx-clocks.dtsi
@@ -8,14 +8,14 @@
  * published by the Free Software Foundation.
  */
 &cm_clocks {
-	dpll4_ck: dpll4_ck {
+	dpll4_ck: dpll4_ck@d00 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-dpll-per-j-type-clock";
 		clocks = <&sys_ck>, <&sys_ck>;
 		reg = <0x0d00>, <0x0d20>, <0x0d44>, <0x0d30>;
 	};
 
-	dpll4_m5x2_ck: dpll4_m5x2_ck {
+	dpll4_m5x2_ck: dpll4_m5x2_ck@d00 {
 		#clock-cells = <0>;
 		compatible = "ti,hsdiv-gate-clock";
 		clocks = <&dpll4_m5x2_mul_ck>;
@@ -25,7 +25,7 @@
 		ti,set-bit-to-disable;
 	};
 
-	dpll4_m2x2_ck: dpll4_m2x2_ck {
+	dpll4_m2x2_ck: dpll4_m2x2_ck@d00 {
 		#clock-cells = <0>;
 		compatible = "ti,hsdiv-gate-clock";
 		clocks = <&dpll4_m2x2_mul_ck>;
@@ -34,7 +34,7 @@
 		ti,set-bit-to-disable;
 	};
 
-	dpll3_m3x2_ck: dpll3_m3x2_ck {
+	dpll3_m3x2_ck: dpll3_m3x2_ck@d00 {
 		#clock-cells = <0>;
 		compatible = "ti,hsdiv-gate-clock";
 		clocks = <&dpll3_m3x2_mul_ck>;
@@ -43,7 +43,7 @@
 		ti,set-bit-to-disable;
 	};
 
-	dpll4_m3x2_ck: dpll4_m3x2_ck {
+	dpll4_m3x2_ck: dpll4_m3x2_ck@d00 {
 		#clock-cells = <0>;
 		compatible = "ti,hsdiv-gate-clock";
 		clocks = <&dpll4_m3x2_mul_ck>;
@@ -52,7 +52,7 @@
 		ti,set-bit-to-disable;
 	};
 
-	dpll4_m6x2_ck: dpll4_m6x2_ck {
+	dpll4_m6x2_ck: dpll4_m6x2_ck@d00 {
 		#clock-cells = <0>;
 		compatible = "ti,hsdiv-gate-clock";
 		clocks = <&dpll4_m6x2_mul_ck>;
@@ -61,7 +61,7 @@
 		ti,set-bit-to-disable;
 	};
 
-	uart4_fck: uart4_fck {
+	uart4_fck: uart4_fck@1000 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&per_48m_fck>;
diff --git a/arch/arm/boot/dts/omap36xx-omap3430es2plus-clocks.dtsi b/arch/arm/boot/dts/omap36xx-omap3430es2plus-clocks.dtsi
index 877318c..1a4fbdf 100644
--- a/arch/arm/boot/dts/omap36xx-omap3430es2plus-clocks.dtsi
+++ b/arch/arm/boot/dts/omap36xx-omap3430es2plus-clocks.dtsi
@@ -8,7 +8,7 @@
  * published by the Free Software Foundation.
  */
 &cm_clocks {
-	ssi_ssr_gate_fck_3430es2: ssi_ssr_gate_fck_3430es2 {
+	ssi_ssr_gate_fck_3430es2: ssi_ssr_gate_fck_3430es2@a00 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-no-wait-gate-clock";
 		clocks = <&corex2_fck>;
@@ -16,7 +16,7 @@
 		reg = <0x0a00>;
 	};
 
-	ssi_ssr_div_fck_3430es2: ssi_ssr_div_fck_3430es2 {
+	ssi_ssr_div_fck_3430es2: ssi_ssr_div_fck_3430es2@a40 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-divider-clock";
 		clocks = <&corex2_fck>;
@@ -39,7 +39,7 @@
 		clock-div = <2>;
 	};
 
-	hsotgusb_ick_3430es2: hsotgusb_ick_3430es2 {
+	hsotgusb_ick_3430es2: hsotgusb_ick_3430es2@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-hsotgusb-interface-clock";
 		clocks = <&core_l3_ick>;
@@ -55,7 +55,7 @@
 		clock-div = <1>;
 	};
 
-	ssi_ick: ssi_ick_3430es2 {
+	ssi_ick: ssi_ick_3430es2@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-ssi-interface-clock";
 		clocks = <&ssi_l4_ick>;
@@ -63,7 +63,7 @@
 		ti,bit-shift = <0>;
 	};
 
-	usim_gate_fck: usim_gate_fck {
+	usim_gate_fck: usim_gate_fck@c00 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&omap_96m_fck>;
@@ -143,7 +143,7 @@
 		clock-div = <20>;
 	};
 
-	usim_mux_fck: usim_mux_fck {
+	usim_mux_fck: usim_mux_fck@c40 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&sys_ck>, <&sys_d2_ck>, <&omap_96m_d2_fck>, <&omap_96m_d4_fck>, <&omap_96m_d8_fck>, <&omap_96m_d10_fck>, <&dpll5_m2_d4_ck>, <&dpll5_m2_d8_ck>, <&dpll5_m2_d16_ck>, <&dpll5_m2_d20_ck>;
@@ -158,7 +158,7 @@
 		clocks = <&usim_gate_fck>, <&usim_mux_fck>;
 	};
 
-	usim_ick: usim_ick {
+	usim_ick: usim_ick@c10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&wkup_l4_ick>;
diff --git a/arch/arm/boot/dts/omap36xx.dtsi b/arch/arm/boot/dts/omap36xx.dtsi
index f19c87b..8b797915 100644
--- a/arch/arm/boot/dts/omap36xx.dtsi
+++ b/arch/arm/boot/dts/omap36xx.dtsi
@@ -44,7 +44,7 @@
 		abb_mpu_iva: regulator-abb-mpu {
 			compatible = "ti,abb-v1";
 			regulator-name = "abb_mpu_iva";
-			#address-cell = <0>;
+			#address-cells = <0>;
 			#size-cells = <0>;
 			reg = <0x483072f0 0x8>, <0x48306818 0x4>;
 			reg-names = "base-address", "int-address";
@@ -87,7 +87,7 @@
 			};
 		};
 
-		bandgap {
+		bandgap@48002524 {
 			reg = <0x48002524 0x4>;
 			compatible = "ti,omap36xx-bandgap";
 			#thermal-sensor-cells = <0>;
diff --git a/arch/arm/boot/dts/omap3xxx-clocks.dtsi b/arch/arm/boot/dts/omap3xxx-clocks.dtsi
index bbba5bd..9bd9164 100644
--- a/arch/arm/boot/dts/omap3xxx-clocks.dtsi
+++ b/arch/arm/boot/dts/omap3xxx-clocks.dtsi
@@ -14,14 +14,14 @@
 		clock-frequency = <16800000>;
 	};
 
-	osc_sys_ck: osc_sys_ck {
+	osc_sys_ck: osc_sys_ck@d40 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&virt_12m_ck>, <&virt_13m_ck>, <&virt_19200000_ck>, <&virt_26000000_ck>, <&virt_38_4m_ck>, <&virt_16_8m_ck>;
 		reg = <0x0d40>;
 	};
 
-	sys_ck: sys_ck {
+	sys_ck: sys_ck@1270 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&osc_sys_ck>;
@@ -31,7 +31,7 @@
 		ti,index-starts-at-one;
 	};
 
-	sys_clkout1: sys_clkout1 {
+	sys_clkout1: sys_clkout1@d70 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&osc_sys_ck>;
@@ -81,7 +81,7 @@
 };
 
 &scm_clocks {
-	mcbsp5_mux_fck: mcbsp5_mux_fck {
+	mcbsp5_mux_fck: mcbsp5_mux_fck@68 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&core_96m_fck>, <&mcbsp_clks>;
@@ -95,7 +95,7 @@
 		clocks = <&mcbsp5_gate_fck>, <&mcbsp5_mux_fck>;
 	};
 
-	mcbsp1_mux_fck: mcbsp1_mux_fck {
+	mcbsp1_mux_fck: mcbsp1_mux_fck@4 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&core_96m_fck>, <&mcbsp_clks>;
@@ -109,7 +109,7 @@
 		clocks = <&mcbsp1_gate_fck>, <&mcbsp1_mux_fck>;
 	};
 
-	mcbsp2_mux_fck: mcbsp2_mux_fck {
+	mcbsp2_mux_fck: mcbsp2_mux_fck@4 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&per_96m_fck>, <&mcbsp_clks>;
@@ -123,7 +123,7 @@
 		clocks = <&mcbsp2_gate_fck>, <&mcbsp2_mux_fck>;
 	};
 
-	mcbsp3_mux_fck: mcbsp3_mux_fck {
+	mcbsp3_mux_fck: mcbsp3_mux_fck@68 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&per_96m_fck>, <&mcbsp_clks>;
@@ -136,7 +136,7 @@
 		clocks = <&mcbsp3_gate_fck>, <&mcbsp3_mux_fck>;
 	};
 
-	mcbsp4_mux_fck: mcbsp4_mux_fck {
+	mcbsp4_mux_fck: mcbsp4_mux_fck@68 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&per_96m_fck>, <&mcbsp_clks>;
@@ -193,14 +193,14 @@
 		clock-frequency = <38400000>;
 	};
 
-	dpll4_ck: dpll4_ck {
+	dpll4_ck: dpll4_ck@d00 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-dpll-per-clock";
 		clocks = <&sys_ck>, <&sys_ck>;
 		reg = <0x0d00>, <0x0d20>, <0x0d44>, <0x0d30>;
 	};
 
-	dpll4_m2_ck: dpll4_m2_ck {
+	dpll4_m2_ck: dpll4_m2_ck@d48 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll4_ck>;
@@ -217,7 +217,7 @@
 		clock-div = <1>;
 	};
 
-	dpll4_m2x2_ck: dpll4_m2x2_ck {
+	dpll4_m2x2_ck: dpll4_m2x2_ck@d00 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll4_m2x2_mul_ck>;
@@ -234,14 +234,14 @@
 		clock-div = <1>;
 	};
 
-	dpll3_ck: dpll3_ck {
+	dpll3_ck: dpll3_ck@d00 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-dpll-core-clock";
 		clocks = <&sys_ck>, <&sys_ck>;
 		reg = <0x0d00>, <0x0d20>, <0x0d40>, <0x0d30>;
 	};
 
-	dpll3_m3_ck: dpll3_m3_ck {
+	dpll3_m3_ck: dpll3_m3_ck@1140 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll3_ck>;
@@ -259,7 +259,7 @@
 		clock-div = <1>;
 	};
 
-	dpll3_m3x2_ck: dpll3_m3x2_ck {
+	dpll3_m3x2_ck: dpll3_m3x2_ck@d00 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll3_m3x2_mul_ck>;
@@ -288,7 +288,7 @@
 		clock-frequency = <0x0>;
 	};
 
-	dpll3_m2_ck: dpll3_m2_ck {
+	dpll3_m2_ck: dpll3_m2_ck@d40 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll3_ck>;
@@ -306,7 +306,7 @@
 		clock-div = <1>;
 	};
 
-	dpll1_fck: dpll1_fck {
+	dpll1_fck: dpll1_fck@940 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&core_ck>;
@@ -316,7 +316,7 @@
 		ti,index-starts-at-one;
 	};
 
-	dpll1_ck: dpll1_ck {
+	dpll1_ck: dpll1_ck@904 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-dpll-clock";
 		clocks = <&sys_ck>, <&dpll1_fck>;
@@ -331,7 +331,7 @@
 		clock-div = <1>;
 	};
 
-	dpll1_x2m2_ck: dpll1_x2m2_ck {
+	dpll1_x2m2_ck: dpll1_x2m2_ck@944 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll1_x2_ck>;
@@ -348,7 +348,7 @@
 		clock-div = <1>;
 	};
 
-	omap_96m_fck: omap_96m_fck {
+	omap_96m_fck: omap_96m_fck@d40 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&cm_96m_fck>, <&sys_ck>;
@@ -356,7 +356,7 @@
 		reg = <0x0d40>;
 	};
 
-	dpll4_m3_ck: dpll4_m3_ck {
+	dpll4_m3_ck: dpll4_m3_ck@e40 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll4_ck>;
@@ -374,7 +374,7 @@
 		clock-div = <1>;
 	};
 
-	dpll4_m3x2_ck: dpll4_m3x2_ck {
+	dpll4_m3x2_ck: dpll4_m3x2_ck@d00 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll4_m3x2_mul_ck>;
@@ -383,7 +383,7 @@
 		ti,set-bit-to-disable;
 	};
 
-	omap_54m_fck: omap_54m_fck {
+	omap_54m_fck: omap_54m_fck@d40 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&dpll4_m3x2_ck>, <&sys_altclk>;
@@ -399,7 +399,7 @@
 		clock-div = <2>;
 	};
 
-	omap_48m_fck: omap_48m_fck {
+	omap_48m_fck: omap_48m_fck@d40 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&cm_96m_d2_fck>, <&sys_altclk>;
@@ -415,7 +415,7 @@
 		clock-div = <4>;
 	};
 
-	dpll4_m4_ck: dpll4_m4_ck {
+	dpll4_m4_ck: dpll4_m4_ck@e40 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll4_ck>;
@@ -433,7 +433,7 @@
 		ti,set-rate-parent;
 	};
 
-	dpll4_m4x2_ck: dpll4_m4x2_ck {
+	dpll4_m4x2_ck: dpll4_m4x2_ck@d00 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll4_m4x2_mul_ck>;
@@ -443,7 +443,7 @@
 		ti,set-rate-parent;
 	};
 
-	dpll4_m5_ck: dpll4_m5_ck {
+	dpll4_m5_ck: dpll4_m5_ck@f40 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll4_ck>;
@@ -461,7 +461,7 @@
 		ti,set-rate-parent;
 	};
 
-	dpll4_m5x2_ck: dpll4_m5x2_ck {
+	dpll4_m5x2_ck: dpll4_m5x2_ck@d00 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll4_m5x2_mul_ck>;
@@ -471,7 +471,7 @@
 		ti,set-rate-parent;
 	};
 
-	dpll4_m6_ck: dpll4_m6_ck {
+	dpll4_m6_ck: dpll4_m6_ck@1140 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll4_ck>;
@@ -489,7 +489,7 @@
 		clock-div = <1>;
 	};
 
-	dpll4_m6x2_ck: dpll4_m6x2_ck {
+	dpll4_m6x2_ck: dpll4_m6x2_ck@d00 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll4_m6x2_mul_ck>;
@@ -506,7 +506,7 @@
 		clock-div = <1>;
 	};
 
-	clkout2_src_gate_ck: clkout2_src_gate_ck {
+	clkout2_src_gate_ck: clkout2_src_gate_ck@d70 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-no-wait-gate-clock";
 		clocks = <&core_ck>;
@@ -514,7 +514,7 @@
 		reg = <0x0d70>;
 	};
 
-	clkout2_src_mux_ck: clkout2_src_mux_ck {
+	clkout2_src_mux_ck: clkout2_src_mux_ck@d70 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&core_ck>, <&sys_ck>, <&cm_96m_fck>, <&omap_54m_fck>;
@@ -527,7 +527,7 @@
 		clocks = <&clkout2_src_gate_ck>, <&clkout2_src_mux_ck>;
 	};
 
-	sys_clkout2: sys_clkout2 {
+	sys_clkout2: sys_clkout2@d70 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&clkout2_src_ck>;
@@ -545,7 +545,7 @@
 		clock-div = <1>;
 	};
 
-	arm_fck: arm_fck {
+	arm_fck: arm_fck@924 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&mpu_ck>;
@@ -561,7 +561,7 @@
 		clock-div = <1>;
 	};
 
-	l3_ick: l3_ick {
+	l3_ick: l3_ick@a40 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&core_ck>;
@@ -570,7 +570,7 @@
 		ti,index-starts-at-one;
 	};
 
-	l4_ick: l4_ick {
+	l4_ick: l4_ick@a40 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&l3_ick>;
@@ -580,7 +580,7 @@
 		ti,index-starts-at-one;
 	};
 
-	rm_ick: rm_ick {
+	rm_ick: rm_ick@c40 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&l4_ick>;
@@ -590,7 +590,7 @@
 		ti,index-starts-at-one;
 	};
 
-	gpt10_gate_fck: gpt10_gate_fck {
+	gpt10_gate_fck: gpt10_gate_fck@a00 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&sys_ck>;
@@ -598,7 +598,7 @@
 		reg = <0x0a00>;
 	};
 
-	gpt10_mux_fck: gpt10_mux_fck {
+	gpt10_mux_fck: gpt10_mux_fck@a40 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&omap_32k_fck>, <&sys_ck>;
@@ -612,7 +612,7 @@
 		clocks = <&gpt10_gate_fck>, <&gpt10_mux_fck>;
 	};
 
-	gpt11_gate_fck: gpt11_gate_fck {
+	gpt11_gate_fck: gpt11_gate_fck@a00 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&sys_ck>;
@@ -620,7 +620,7 @@
 		reg = <0x0a00>;
 	};
 
-	gpt11_mux_fck: gpt11_mux_fck {
+	gpt11_mux_fck: gpt11_mux_fck@a40 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&omap_32k_fck>, <&sys_ck>;
@@ -642,7 +642,7 @@
 		clock-div = <1>;
 	};
 
-	mmchs2_fck: mmchs2_fck {
+	mmchs2_fck: mmchs2_fck@a00 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&core_96m_fck>;
@@ -650,7 +650,7 @@
 		ti,bit-shift = <25>;
 	};
 
-	mmchs1_fck: mmchs1_fck {
+	mmchs1_fck: mmchs1_fck@a00 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&core_96m_fck>;
@@ -658,7 +658,7 @@
 		ti,bit-shift = <24>;
 	};
 
-	i2c3_fck: i2c3_fck {
+	i2c3_fck: i2c3_fck@a00 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&core_96m_fck>;
@@ -666,7 +666,7 @@
 		ti,bit-shift = <17>;
 	};
 
-	i2c2_fck: i2c2_fck {
+	i2c2_fck: i2c2_fck@a00 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&core_96m_fck>;
@@ -674,7 +674,7 @@
 		ti,bit-shift = <16>;
 	};
 
-	i2c1_fck: i2c1_fck {
+	i2c1_fck: i2c1_fck@a00 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&core_96m_fck>;
@@ -682,7 +682,7 @@
 		ti,bit-shift = <15>;
 	};
 
-	mcbsp5_gate_fck: mcbsp5_gate_fck {
+	mcbsp5_gate_fck: mcbsp5_gate_fck@a00 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&mcbsp_clks>;
@@ -690,7 +690,7 @@
 		reg = <0x0a00>;
 	};
 
-	mcbsp1_gate_fck: mcbsp1_gate_fck {
+	mcbsp1_gate_fck: mcbsp1_gate_fck@a00 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&mcbsp_clks>;
@@ -706,7 +706,7 @@
 		clock-div = <1>;
 	};
 
-	mcspi4_fck: mcspi4_fck {
+	mcspi4_fck: mcspi4_fck@a00 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&core_48m_fck>;
@@ -714,7 +714,7 @@
 		ti,bit-shift = <21>;
 	};
 
-	mcspi3_fck: mcspi3_fck {
+	mcspi3_fck: mcspi3_fck@a00 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&core_48m_fck>;
@@ -722,7 +722,7 @@
 		ti,bit-shift = <20>;
 	};
 
-	mcspi2_fck: mcspi2_fck {
+	mcspi2_fck: mcspi2_fck@a00 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&core_48m_fck>;
@@ -730,7 +730,7 @@
 		ti,bit-shift = <19>;
 	};
 
-	mcspi1_fck: mcspi1_fck {
+	mcspi1_fck: mcspi1_fck@a00 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&core_48m_fck>;
@@ -738,7 +738,7 @@
 		ti,bit-shift = <18>;
 	};
 
-	uart2_fck: uart2_fck {
+	uart2_fck: uart2_fck@a00 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&core_48m_fck>;
@@ -746,7 +746,7 @@
 		ti,bit-shift = <14>;
 	};
 
-	uart1_fck: uart1_fck {
+	uart1_fck: uart1_fck@a00 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&core_48m_fck>;
@@ -762,7 +762,7 @@
 		clock-div = <1>;
 	};
 
-	hdq_fck: hdq_fck {
+	hdq_fck: hdq_fck@a00 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&core_12m_fck>;
@@ -778,7 +778,7 @@
 		clock-div = <1>;
 	};
 
-	sdrc_ick: sdrc_ick {
+	sdrc_ick: sdrc_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&core_l3_ick>;
@@ -802,7 +802,7 @@
 		clock-div = <1>;
 	};
 
-	mmchs2_ick: mmchs2_ick {
+	mmchs2_ick: mmchs2_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -810,7 +810,7 @@
 		ti,bit-shift = <25>;
 	};
 
-	mmchs1_ick: mmchs1_ick {
+	mmchs1_ick: mmchs1_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -818,7 +818,7 @@
 		ti,bit-shift = <24>;
 	};
 
-	hdq_ick: hdq_ick {
+	hdq_ick: hdq_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -826,7 +826,7 @@
 		ti,bit-shift = <22>;
 	};
 
-	mcspi4_ick: mcspi4_ick {
+	mcspi4_ick: mcspi4_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -834,7 +834,7 @@
 		ti,bit-shift = <21>;
 	};
 
-	mcspi3_ick: mcspi3_ick {
+	mcspi3_ick: mcspi3_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -842,7 +842,7 @@
 		ti,bit-shift = <20>;
 	};
 
-	mcspi2_ick: mcspi2_ick {
+	mcspi2_ick: mcspi2_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -850,7 +850,7 @@
 		ti,bit-shift = <19>;
 	};
 
-	mcspi1_ick: mcspi1_ick {
+	mcspi1_ick: mcspi1_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -858,7 +858,7 @@
 		ti,bit-shift = <18>;
 	};
 
-	i2c3_ick: i2c3_ick {
+	i2c3_ick: i2c3_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -866,7 +866,7 @@
 		ti,bit-shift = <17>;
 	};
 
-	i2c2_ick: i2c2_ick {
+	i2c2_ick: i2c2_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -874,7 +874,7 @@
 		ti,bit-shift = <16>;
 	};
 
-	i2c1_ick: i2c1_ick {
+	i2c1_ick: i2c1_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -882,7 +882,7 @@
 		ti,bit-shift = <15>;
 	};
 
-	uart2_ick: uart2_ick {
+	uart2_ick: uart2_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -890,7 +890,7 @@
 		ti,bit-shift = <14>;
 	};
 
-	uart1_ick: uart1_ick {
+	uart1_ick: uart1_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -898,7 +898,7 @@
 		ti,bit-shift = <13>;
 	};
 
-	gpt11_ick: gpt11_ick {
+	gpt11_ick: gpt11_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -906,7 +906,7 @@
 		ti,bit-shift = <12>;
 	};
 
-	gpt10_ick: gpt10_ick {
+	gpt10_ick: gpt10_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -914,7 +914,7 @@
 		ti,bit-shift = <11>;
 	};
 
-	mcbsp5_ick: mcbsp5_ick {
+	mcbsp5_ick: mcbsp5_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -922,7 +922,7 @@
 		ti,bit-shift = <10>;
 	};
 
-	mcbsp1_ick: mcbsp1_ick {
+	mcbsp1_ick: mcbsp1_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -930,7 +930,7 @@
 		ti,bit-shift = <9>;
 	};
 
-	omapctrl_ick: omapctrl_ick {
+	omapctrl_ick: omapctrl_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -938,7 +938,7 @@
 		ti,bit-shift = <6>;
 	};
 
-	dss_tv_fck: dss_tv_fck {
+	dss_tv_fck: dss_tv_fck@e00 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&omap_54m_fck>;
@@ -946,7 +946,7 @@
 		ti,bit-shift = <2>;
 	};
 
-	dss_96m_fck: dss_96m_fck {
+	dss_96m_fck: dss_96m_fck@e00 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&omap_96m_fck>;
@@ -954,7 +954,7 @@
 		ti,bit-shift = <2>;
 	};
 
-	dss2_alwon_fck: dss2_alwon_fck {
+	dss2_alwon_fck: dss2_alwon_fck@e00 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_ck>;
@@ -968,7 +968,7 @@
 		clock-frequency = <0>;
 	};
 
-	gpt1_gate_fck: gpt1_gate_fck {
+	gpt1_gate_fck: gpt1_gate_fck@c00 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&sys_ck>;
@@ -976,7 +976,7 @@
 		reg = <0x0c00>;
 	};
 
-	gpt1_mux_fck: gpt1_mux_fck {
+	gpt1_mux_fck: gpt1_mux_fck@c40 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&omap_32k_fck>, <&sys_ck>;
@@ -989,7 +989,7 @@
 		clocks = <&gpt1_gate_fck>, <&gpt1_mux_fck>;
 	};
 
-	aes2_ick: aes2_ick {
+	aes2_ick: aes2_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -1005,7 +1005,7 @@
 		clock-div = <1>;
 	};
 
-	gpio1_dbck: gpio1_dbck {
+	gpio1_dbck: gpio1_dbck@c00 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&wkup_32k_fck>;
@@ -1013,7 +1013,7 @@
 		ti,bit-shift = <3>;
 	};
 
-	sha12_ick: sha12_ick {
+	sha12_ick: sha12_ick@a10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&core_l4_ick>;
@@ -1021,7 +1021,7 @@
 		ti,bit-shift = <27>;
 	};
 
-	wdt2_fck: wdt2_fck {
+	wdt2_fck: wdt2_fck@c00 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&wkup_32k_fck>;
@@ -1029,7 +1029,7 @@
 		ti,bit-shift = <5>;
 	};
 
-	wdt2_ick: wdt2_ick {
+	wdt2_ick: wdt2_ick@c10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&wkup_l4_ick>;
@@ -1037,7 +1037,7 @@
 		ti,bit-shift = <5>;
 	};
 
-	wdt1_ick: wdt1_ick {
+	wdt1_ick: wdt1_ick@c10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&wkup_l4_ick>;
@@ -1045,7 +1045,7 @@
 		ti,bit-shift = <4>;
 	};
 
-	gpio1_ick: gpio1_ick {
+	gpio1_ick: gpio1_ick@c10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&wkup_l4_ick>;
@@ -1053,7 +1053,7 @@
 		ti,bit-shift = <3>;
 	};
 
-	omap_32ksync_ick: omap_32ksync_ick {
+	omap_32ksync_ick: omap_32ksync_ick@c10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&wkup_l4_ick>;
@@ -1061,7 +1061,7 @@
 		ti,bit-shift = <2>;
 	};
 
-	gpt12_ick: gpt12_ick {
+	gpt12_ick: gpt12_ick@c10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&wkup_l4_ick>;
@@ -1069,7 +1069,7 @@
 		ti,bit-shift = <1>;
 	};
 
-	gpt1_ick: gpt1_ick {
+	gpt1_ick: gpt1_ick@c10 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&wkup_l4_ick>;
@@ -1093,7 +1093,7 @@
 		clock-div = <1>;
 	};
 
-	uart3_fck: uart3_fck {
+	uart3_fck: uart3_fck@1000 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&per_48m_fck>;
@@ -1101,7 +1101,7 @@
 		ti,bit-shift = <11>;
 	};
 
-	gpt2_gate_fck: gpt2_gate_fck {
+	gpt2_gate_fck: gpt2_gate_fck@1000 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&sys_ck>;
@@ -1109,7 +1109,7 @@
 		reg = <0x1000>;
 	};
 
-	gpt2_mux_fck: gpt2_mux_fck {
+	gpt2_mux_fck: gpt2_mux_fck@1040 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&omap_32k_fck>, <&sys_ck>;
@@ -1122,7 +1122,7 @@
 		clocks = <&gpt2_gate_fck>, <&gpt2_mux_fck>;
 	};
 
-	gpt3_gate_fck: gpt3_gate_fck {
+	gpt3_gate_fck: gpt3_gate_fck@1000 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&sys_ck>;
@@ -1130,7 +1130,7 @@
 		reg = <0x1000>;
 	};
 
-	gpt3_mux_fck: gpt3_mux_fck {
+	gpt3_mux_fck: gpt3_mux_fck@1040 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&omap_32k_fck>, <&sys_ck>;
@@ -1144,7 +1144,7 @@
 		clocks = <&gpt3_gate_fck>, <&gpt3_mux_fck>;
 	};
 
-	gpt4_gate_fck: gpt4_gate_fck {
+	gpt4_gate_fck: gpt4_gate_fck@1000 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&sys_ck>;
@@ -1152,7 +1152,7 @@
 		reg = <0x1000>;
 	};
 
-	gpt4_mux_fck: gpt4_mux_fck {
+	gpt4_mux_fck: gpt4_mux_fck@1040 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&omap_32k_fck>, <&sys_ck>;
@@ -1166,7 +1166,7 @@
 		clocks = <&gpt4_gate_fck>, <&gpt4_mux_fck>;
 	};
 
-	gpt5_gate_fck: gpt5_gate_fck {
+	gpt5_gate_fck: gpt5_gate_fck@1000 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&sys_ck>;
@@ -1174,7 +1174,7 @@
 		reg = <0x1000>;
 	};
 
-	gpt5_mux_fck: gpt5_mux_fck {
+	gpt5_mux_fck: gpt5_mux_fck@1040 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&omap_32k_fck>, <&sys_ck>;
@@ -1188,7 +1188,7 @@
 		clocks = <&gpt5_gate_fck>, <&gpt5_mux_fck>;
 	};
 
-	gpt6_gate_fck: gpt6_gate_fck {
+	gpt6_gate_fck: gpt6_gate_fck@1000 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&sys_ck>;
@@ -1196,7 +1196,7 @@
 		reg = <0x1000>;
 	};
 
-	gpt6_mux_fck: gpt6_mux_fck {
+	gpt6_mux_fck: gpt6_mux_fck@1040 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&omap_32k_fck>, <&sys_ck>;
@@ -1210,7 +1210,7 @@
 		clocks = <&gpt6_gate_fck>, <&gpt6_mux_fck>;
 	};
 
-	gpt7_gate_fck: gpt7_gate_fck {
+	gpt7_gate_fck: gpt7_gate_fck@1000 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&sys_ck>;
@@ -1218,7 +1218,7 @@
 		reg = <0x1000>;
 	};
 
-	gpt7_mux_fck: gpt7_mux_fck {
+	gpt7_mux_fck: gpt7_mux_fck@1040 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&omap_32k_fck>, <&sys_ck>;
@@ -1232,7 +1232,7 @@
 		clocks = <&gpt7_gate_fck>, <&gpt7_mux_fck>;
 	};
 
-	gpt8_gate_fck: gpt8_gate_fck {
+	gpt8_gate_fck: gpt8_gate_fck@1000 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&sys_ck>;
@@ -1240,7 +1240,7 @@
 		reg = <0x1000>;
 	};
 
-	gpt8_mux_fck: gpt8_mux_fck {
+	gpt8_mux_fck: gpt8_mux_fck@1040 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&omap_32k_fck>, <&sys_ck>;
@@ -1254,7 +1254,7 @@
 		clocks = <&gpt8_gate_fck>, <&gpt8_mux_fck>;
 	};
 
-	gpt9_gate_fck: gpt9_gate_fck {
+	gpt9_gate_fck: gpt9_gate_fck@1000 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&sys_ck>;
@@ -1262,7 +1262,7 @@
 		reg = <0x1000>;
 	};
 
-	gpt9_mux_fck: gpt9_mux_fck {
+	gpt9_mux_fck: gpt9_mux_fck@1040 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&omap_32k_fck>, <&sys_ck>;
@@ -1284,7 +1284,7 @@
 		clock-div = <1>;
 	};
 
-	gpio6_dbck: gpio6_dbck {
+	gpio6_dbck: gpio6_dbck@1000 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&per_32k_alwon_fck>;
@@ -1292,7 +1292,7 @@
 		ti,bit-shift = <17>;
 	};
 
-	gpio5_dbck: gpio5_dbck {
+	gpio5_dbck: gpio5_dbck@1000 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&per_32k_alwon_fck>;
@@ -1300,7 +1300,7 @@
 		ti,bit-shift = <16>;
 	};
 
-	gpio4_dbck: gpio4_dbck {
+	gpio4_dbck: gpio4_dbck@1000 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&per_32k_alwon_fck>;
@@ -1308,7 +1308,7 @@
 		ti,bit-shift = <15>;
 	};
 
-	gpio3_dbck: gpio3_dbck {
+	gpio3_dbck: gpio3_dbck@1000 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&per_32k_alwon_fck>;
@@ -1316,7 +1316,7 @@
 		ti,bit-shift = <14>;
 	};
 
-	gpio2_dbck: gpio2_dbck {
+	gpio2_dbck: gpio2_dbck@1000 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&per_32k_alwon_fck>;
@@ -1324,7 +1324,7 @@
 		ti,bit-shift = <13>;
 	};
 
-	wdt3_fck: wdt3_fck {
+	wdt3_fck: wdt3_fck@1000 {
 		#clock-cells = <0>;
 		compatible = "ti,wait-gate-clock";
 		clocks = <&per_32k_alwon_fck>;
@@ -1340,7 +1340,7 @@
 		clock-div = <1>;
 	};
 
-	gpio6_ick: gpio6_ick {
+	gpio6_ick: gpio6_ick@1010 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&per_l4_ick>;
@@ -1348,7 +1348,7 @@
 		ti,bit-shift = <17>;
 	};
 
-	gpio5_ick: gpio5_ick {
+	gpio5_ick: gpio5_ick@1010 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&per_l4_ick>;
@@ -1356,7 +1356,7 @@
 		ti,bit-shift = <16>;
 	};
 
-	gpio4_ick: gpio4_ick {
+	gpio4_ick: gpio4_ick@1010 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&per_l4_ick>;
@@ -1364,7 +1364,7 @@
 		ti,bit-shift = <15>;
 	};
 
-	gpio3_ick: gpio3_ick {
+	gpio3_ick: gpio3_ick@1010 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&per_l4_ick>;
@@ -1372,7 +1372,7 @@
 		ti,bit-shift = <14>;
 	};
 
-	gpio2_ick: gpio2_ick {
+	gpio2_ick: gpio2_ick@1010 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&per_l4_ick>;
@@ -1380,7 +1380,7 @@
 		ti,bit-shift = <13>;
 	};
 
-	wdt3_ick: wdt3_ick {
+	wdt3_ick: wdt3_ick@1010 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&per_l4_ick>;
@@ -1388,7 +1388,7 @@
 		ti,bit-shift = <12>;
 	};
 
-	uart3_ick: uart3_ick {
+	uart3_ick: uart3_ick@1010 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&per_l4_ick>;
@@ -1396,7 +1396,7 @@
 		ti,bit-shift = <11>;
 	};
 
-	uart4_ick: uart4_ick {
+	uart4_ick: uart4_ick@1010 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&per_l4_ick>;
@@ -1404,7 +1404,7 @@
 		ti,bit-shift = <18>;
 	};
 
-	gpt9_ick: gpt9_ick {
+	gpt9_ick: gpt9_ick@1010 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&per_l4_ick>;
@@ -1412,7 +1412,7 @@
 		ti,bit-shift = <10>;
 	};
 
-	gpt8_ick: gpt8_ick {
+	gpt8_ick: gpt8_ick@1010 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&per_l4_ick>;
@@ -1420,7 +1420,7 @@
 		ti,bit-shift = <9>;
 	};
 
-	gpt7_ick: gpt7_ick {
+	gpt7_ick: gpt7_ick@1010 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&per_l4_ick>;
@@ -1428,7 +1428,7 @@
 		ti,bit-shift = <8>;
 	};
 
-	gpt6_ick: gpt6_ick {
+	gpt6_ick: gpt6_ick@1010 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&per_l4_ick>;
@@ -1436,7 +1436,7 @@
 		ti,bit-shift = <7>;
 	};
 
-	gpt5_ick: gpt5_ick {
+	gpt5_ick: gpt5_ick@1010 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&per_l4_ick>;
@@ -1444,7 +1444,7 @@
 		ti,bit-shift = <6>;
 	};
 
-	gpt4_ick: gpt4_ick {
+	gpt4_ick: gpt4_ick@1010 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&per_l4_ick>;
@@ -1452,7 +1452,7 @@
 		ti,bit-shift = <5>;
 	};
 
-	gpt3_ick: gpt3_ick {
+	gpt3_ick: gpt3_ick@1010 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&per_l4_ick>;
@@ -1460,7 +1460,7 @@
 		ti,bit-shift = <4>;
 	};
 
-	gpt2_ick: gpt2_ick {
+	gpt2_ick: gpt2_ick@1010 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&per_l4_ick>;
@@ -1468,7 +1468,7 @@
 		ti,bit-shift = <3>;
 	};
 
-	mcbsp2_ick: mcbsp2_ick {
+	mcbsp2_ick: mcbsp2_ick@1010 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&per_l4_ick>;
@@ -1476,7 +1476,7 @@
 		ti,bit-shift = <0>;
 	};
 
-	mcbsp3_ick: mcbsp3_ick {
+	mcbsp3_ick: mcbsp3_ick@1010 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&per_l4_ick>;
@@ -1484,7 +1484,7 @@
 		ti,bit-shift = <1>;
 	};
 
-	mcbsp4_ick: mcbsp4_ick {
+	mcbsp4_ick: mcbsp4_ick@1010 {
 		#clock-cells = <0>;
 		compatible = "ti,omap3-interface-clock";
 		clocks = <&per_l4_ick>;
@@ -1492,7 +1492,7 @@
 		ti,bit-shift = <2>;
 	};
 
-	mcbsp2_gate_fck: mcbsp2_gate_fck {
+	mcbsp2_gate_fck: mcbsp2_gate_fck@1000 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&mcbsp_clks>;
@@ -1500,7 +1500,7 @@
 		reg = <0x1000>;
 	};
 
-	mcbsp3_gate_fck: mcbsp3_gate_fck {
+	mcbsp3_gate_fck: mcbsp3_gate_fck@1000 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&mcbsp_clks>;
@@ -1508,7 +1508,7 @@
 		reg = <0x1000>;
 	};
 
-	mcbsp4_gate_fck: mcbsp4_gate_fck {
+	mcbsp4_gate_fck: mcbsp4_gate_fck@1000 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-gate-clock";
 		clocks = <&mcbsp_clks>;
@@ -1516,7 +1516,7 @@
 		reg = <0x1000>;
 	};
 
-	emu_src_mux_ck: emu_src_mux_ck {
+	emu_src_mux_ck: emu_src_mux_ck@1140 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_ck>, <&emu_core_alwon_ck>, <&emu_per_alwon_ck>, <&emu_mpu_alwon_ck>;
@@ -1529,7 +1529,7 @@
 		clocks = <&emu_src_mux_ck>;
 	};
 
-	pclk_fck: pclk_fck {
+	pclk_fck: pclk_fck@1140 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&emu_src_ck>;
@@ -1539,7 +1539,7 @@
 		ti,index-starts-at-one;
 	};
 
-	pclkx2_fck: pclkx2_fck {
+	pclkx2_fck: pclkx2_fck@1140 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&emu_src_ck>;
@@ -1549,7 +1549,7 @@
 		ti,index-starts-at-one;
 	};
 
-	atclk_fck: atclk_fck {
+	atclk_fck: atclk_fck@1140 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&emu_src_ck>;
@@ -1559,7 +1559,7 @@
 		ti,index-starts-at-one;
 	};
 
-	traceclk_src_fck: traceclk_src_fck {
+	traceclk_src_fck: traceclk_src_fck@1140 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_ck>, <&emu_core_alwon_ck>, <&emu_per_alwon_ck>, <&emu_mpu_alwon_ck>;
@@ -1567,7 +1567,7 @@
 		reg = <0x1140>;
 	};
 
-	traceclk_fck: traceclk_fck {
+	traceclk_fck: traceclk_fck@1140 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&traceclk_src_fck>;
diff --git a/arch/arm/boot/dts/omap4-kc1.dts b/arch/arm/boot/dts/omap4-kc1.dts
new file mode 100644
index 0000000..2251bd5
--- /dev/null
+++ b/arch/arm/boot/dts/omap4-kc1.dts
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2016 Paul Kocialkowski <contact@paulk.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+#include "omap443x.dtsi"
+
+/ {
+	model = "Amazon Kindle Fire (first generation)";
+	compatible = "amazon,omap4-kc1", "ti,omap4430", "ti,omap4";
+
+	memory {
+		device_type = "memory";
+		reg = <0x80000000 0x20000000>; /* 512 MB */
+	};
+
+	pwmleds {
+		compatible = "pwm-leds";
+
+		green {
+			label = "green";
+			pwms = <&twl_pwm 0 7812500>;
+			max-brightness = <127>;
+		};
+
+		orange {
+			label = "orange";
+			pwms = <&twl_pwm 1 7812500>;
+			max-brightness = <127>;
+		};
+	};
+};
+
+&omap4_pmx_core {
+	pinctrl-names = "default";
+
+	uart3_pins: pinmux_uart3_pins {
+		pinctrl-single,pins = <
+			OMAP4_IOPAD(0x144, PIN_INPUT | MUX_MODE0)		/* uart3_rx_irrx */
+			OMAP4_IOPAD(0x146, PIN_OUTPUT | MUX_MODE0)		/* uart3_tx_irtx */
+		>;
+	};
+
+	i2c1_pins: pinmux_i2c1_pins {
+		pinctrl-single,pins = <
+			OMAP4_IOPAD(0x122, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c1_scl */
+			OMAP4_IOPAD(0x124, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c1_sda */
+		>;
+	};
+
+	i2c2_pins: pinmux_i2c2_pins {
+		pinctrl-single,pins = <
+			OMAP4_IOPAD(0x126, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c2_scl */
+			OMAP4_IOPAD(0x128, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c2_sda */
+		>;
+	};
+
+	i2c3_pins: pinmux_i2c3_pins {
+		pinctrl-single,pins = <
+			OMAP4_IOPAD(0x12a, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c3_scl */
+			OMAP4_IOPAD(0x12c, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c3_sda */
+		>;
+	};
+
+	i2c4_pins: pinmux_i2c4_pins {
+		pinctrl-single,pins = <
+			OMAP4_IOPAD(0x12e, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c4_scl */
+			OMAP4_IOPAD(0x130, PIN_INPUT_PULLUP | MUX_MODE0)	/* i2c4_sda */
+		>;
+	};
+
+	mmc2_pins: pinmux_mmc2_pins {
+		pinctrl-single,pins = <
+			OMAP4_IOPAD(0x040, PIN_INPUT_PULLUP | MUX_MODE1)	/* sdmmc2_dat0 */
+			OMAP4_IOPAD(0x042, PIN_INPUT_PULLUP | MUX_MODE1)	/* sdmmc2_dat1 */
+			OMAP4_IOPAD(0x044, PIN_INPUT_PULLUP | MUX_MODE1)	/* sdmmc2_dat2 */
+			OMAP4_IOPAD(0x046, PIN_INPUT_PULLUP | MUX_MODE1)	/* sdmmc2_dat3 */
+			OMAP4_IOPAD(0x048, PIN_INPUT_PULLUP | MUX_MODE1)	/* sdmmc2_dat4 */
+			OMAP4_IOPAD(0x04a, PIN_INPUT_PULLUP | MUX_MODE1)	/* sdmmc2_dat5 */
+			OMAP4_IOPAD(0x04c, PIN_INPUT_PULLUP | MUX_MODE1)	/* sdmmc2_dat6 */
+			OMAP4_IOPAD(0x04e, PIN_INPUT_PULLUP | MUX_MODE1)	/* sdmmc2_dat7 */
+			OMAP4_IOPAD(0x082, PIN_INPUT_PULLUP | MUX_MODE1)	/* sdmmc2_clk */
+			OMAP4_IOPAD(0x084, PIN_INPUT_PULLUP | MUX_MODE1)	/* sdmmc2_cmd */
+		>;
+	};
+
+	usb_otg_hs_pins: pinmux_usb_otg_hs_pins {
+		pinctrl-single,pins = <
+			OMAP4_IOPAD(0x194, PIN_OUTPUT_PULLDOWN | MUX_MODE0)	/* usba0_otg_ce */
+			OMAP4_IOPAD(0x196, PIN_INPUT | MUX_MODE0)		/* usba0_otg_dp */
+			OMAP4_IOPAD(0x198, PIN_INPUT | MUX_MODE0)		/* usba0_otg_dm */
+		>;
+	};
+};
+
+&uart3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart3_pins>;
+
+	interrupts-extended = <&wakeupgen GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH
+			       &omap4_pmx_core OMAP4_UART3_RX>;
+};
+
+&i2c1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c1_pins>;
+
+	clock-frequency = <400000>;
+
+	twl: twl@48 {
+		reg = <0x48>;
+		/* IRQ# = 7 */
+		interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>; /* IRQ_SYS_1N cascaded to gic */
+
+		twl_power: power {
+			compatible = "ti,twl6030-power";
+			ti,system-power-controller;
+		};
+	};
+};
+
+&i2c2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c2_pins>;
+
+	clock-frequency = <400000>;
+};
+
+&i2c3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c3_pins>;
+
+	clock-frequency = <400000>;
+};
+
+&i2c4 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c4_pins>;
+
+	clock-frequency = <400000>;
+};
+
+&mmc1 {
+	status = "disabled";
+};
+
+&mmc2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc2_pins>;
+
+	vmmc-supply = <&vaux1>;
+	ti,non-removable;
+	bus-width = <8>;
+};
+
+&mmc3 {
+	status = "disabled";
+};
+
+&mmc4 {
+	status = "disabled";
+};
+
+&usb_otg_hs {
+	pinctrl-names = "default";
+	pinctrl-0 = <&usb_otg_hs_pins>;
+
+	interface-type = <1>;
+	mode = <3>;
+	power = <50>;
+};
+
+#include "twl6030.dtsi"
+#include "twl6030_omap4.dtsi"
+
+&twl_usb_comparator {
+	usb-supply = <&vusb>;
+};
diff --git a/arch/arm/boot/dts/omap4-var-som-om44.dtsi b/arch/arm/boot/dts/omap4-var-som-om44.dtsi
index 49d032b..a17997f 100644
--- a/arch/arm/boot/dts/omap4-var-som-om44.dtsi
+++ b/arch/arm/boot/dts/omap4-var-som-om44.dtsi
@@ -17,7 +17,7 @@
 		reg = <0x80000000 0x40000000>; /* 1 GB */
 	};
 
-	sound: sound@0 {
+	sound: sound {
 		compatible = "ti,abe-twl6040";
 		ti,model = "VAR-SOM-OM44";
 
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 2bd9c83..3fdc51c 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -70,7 +70,7 @@
 		compatible = "arm,cortex-a9-twd-timer";
 		clocks = <&mpu_periphclk>;
 		reg = <0x48240600 0x20>;
-		interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(3) | IRQ_TYPE_LEVEL_HIGH)>;
+		interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(3) | IRQ_TYPE_EDGE_RISING)>;
 		interrupt-parent = <&gic>;
 	};
 
@@ -198,7 +198,7 @@
 					#size-cells = <1>;
 					ranges = <0 0x5a0 0x170>;
 
-					pbias_regulator: pbias_regulator {
+					pbias_regulator: pbias_regulator@60 {
 						compatible = "ti,pbias-omap4", "ti,pbias-omap";
 						reg = <0x60 0x4>;
 						syscon = <&omap4_padconf_global>;
@@ -370,6 +370,10 @@
 			ti,no-idle-on-init;
 			clocks = <&l3_div_ck>;
 			clock-names = "fck";
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			gpio-controller;
+			#gpio-cells = <2>;
 		};
 
 		uart1: serial@4806a000 {
diff --git a/arch/arm/boot/dts/omap443x-clocks.dtsi b/arch/arm/boot/dts/omap443x-clocks.dtsi
index 2bd2166..f370d96 100644
--- a/arch/arm/boot/dts/omap443x-clocks.dtsi
+++ b/arch/arm/boot/dts/omap443x-clocks.dtsi
@@ -8,7 +8,7 @@
  * published by the Free Software Foundation.
  */
 &prm_clocks {
-	bandgap_fclk: bandgap_fclk {
+	bandgap_fclk: bandgap_fclk@1888 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
diff --git a/arch/arm/boot/dts/omap443x.dtsi b/arch/arm/boot/dts/omap443x.dtsi
index 0adfa1d..fc6a861 100644
--- a/arch/arm/boot/dts/omap443x.dtsi
+++ b/arch/arm/boot/dts/omap443x.dtsi
@@ -35,7 +35,7 @@
 	};
 
 	ocp {
-		bandgap: bandgap {
+		bandgap: bandgap@4a002260 {
 			reg = <0x4a002260 0x4
 			       0x4a00232C 0x4>;
 			compatible = "ti,omap4430-bandgap";
diff --git a/arch/arm/boot/dts/omap4460.dtsi b/arch/arm/boot/dts/omap4460.dtsi
index 5fa68f19..ef66e12 100644
--- a/arch/arm/boot/dts/omap4460.dtsi
+++ b/arch/arm/boot/dts/omap4460.dtsi
@@ -40,7 +40,7 @@
 	};
 
 	ocp {
-		bandgap: bandgap {
+		bandgap: bandgap@4a002260 {
 			reg = <0x4a002260 0x4
 			       0x4a00232C 0x4
 			       0x4a002378 0x18>;
diff --git a/arch/arm/boot/dts/omap446x-clocks.dtsi b/arch/arm/boot/dts/omap446x-clocks.dtsi
index be033e9..fb5929b 100644
--- a/arch/arm/boot/dts/omap446x-clocks.dtsi
+++ b/arch/arm/boot/dts/omap446x-clocks.dtsi
@@ -8,7 +8,7 @@
  * published by the Free Software Foundation.
  */
 &prm_clocks {
-	div_ts_ck: div_ts_ck {
+	div_ts_ck: div_ts_ck@1888 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&l4_wkup_clk_mux_ck>;
@@ -17,7 +17,7 @@
 		ti,dividers = <8>, <16>, <32>;
 	};
 
-	bandgap_ts_fclk: bandgap_ts_fclk {
+	bandgap_ts_fclk: bandgap_ts_fclk@1888 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&div_ts_ck>;
diff --git a/arch/arm/boot/dts/omap44xx-clocks.dtsi b/arch/arm/boot/dts/omap44xx-clocks.dtsi
index f2c48f0..9573b37 100644
--- a/arch/arm/boot/dts/omap44xx-clocks.dtsi
+++ b/arch/arm/boot/dts/omap44xx-clocks.dtsi
@@ -20,7 +20,7 @@
 		clock-frequency = <12000000>;
 	};
 
-	pad_clks_ck: pad_clks_ck {
+	pad_clks_ck: pad_clks_ck@108 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&pad_clks_src_ck>;
@@ -46,7 +46,7 @@
 		clock-frequency = <12000000>;
 	};
 
-	slimbus_clk: slimbus_clk {
+	slimbus_clk: slimbus_clk@108 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&slimbus_src_clk>;
@@ -132,21 +132,21 @@
 		clock-frequency = <60000000>;
 	};
 
-	dpll_abe_ck: dpll_abe_ck {
+	dpll_abe_ck: dpll_abe_ck@1e0 {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-m4xen-clock";
 		clocks = <&abe_dpll_refclk_mux_ck>, <&abe_dpll_bypass_clk_mux_ck>;
 		reg = <0x01e0>, <0x01e4>, <0x01ec>, <0x01e8>;
 	};
 
-	dpll_abe_x2_ck: dpll_abe_x2_ck {
+	dpll_abe_x2_ck: dpll_abe_x2_ck@1f0 {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-x2-clock";
 		clocks = <&dpll_abe_ck>;
 		reg = <0x01f0>;
 	};
 
-	dpll_abe_m2x2_ck: dpll_abe_m2x2_ck {
+	dpll_abe_m2x2_ck: dpll_abe_m2x2_ck@1f0 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_abe_x2_ck>;
@@ -165,7 +165,7 @@
 		clock-div = <8>;
 	};
 
-	abe_clk: abe_clk {
+	abe_clk: abe_clk@108 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_abe_m2x2_ck>;
@@ -174,7 +174,7 @@
 		ti,index-power-of-two;
 	};
 
-	aess_fclk: aess_fclk {
+	aess_fclk: aess_fclk@528 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&abe_clk>;
@@ -183,7 +183,7 @@
 		reg = <0x0528>;
 	};
 
-	dpll_abe_m3x2_ck: dpll_abe_m3x2_ck {
+	dpll_abe_m3x2_ck: dpll_abe_m3x2_ck@1f4 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_abe_x2_ck>;
@@ -194,7 +194,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	core_hsd_byp_clk_mux_ck: core_hsd_byp_clk_mux_ck {
+	core_hsd_byp_clk_mux_ck: core_hsd_byp_clk_mux_ck@12c {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin_ck>, <&dpll_abe_m3x2_ck>;
@@ -202,7 +202,7 @@
 		reg = <0x012c>;
 	};
 
-	dpll_core_ck: dpll_core_ck {
+	dpll_core_ck: dpll_core_ck@120 {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-core-clock";
 		clocks = <&sys_clkin_ck>, <&core_hsd_byp_clk_mux_ck>;
@@ -215,7 +215,7 @@
 		clocks = <&dpll_core_ck>;
 	};
 
-	dpll_core_m6x2_ck: dpll_core_m6x2_ck {
+	dpll_core_m6x2_ck: dpll_core_m6x2_ck@140 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -226,7 +226,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_core_m2_ck: dpll_core_m2_ck {
+	dpll_core_m2_ck: dpll_core_m2_ck@130 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_ck>;
@@ -245,7 +245,7 @@
 		clock-div = <2>;
 	};
 
-	dpll_core_m5x2_ck: dpll_core_m5x2_ck {
+	dpll_core_m5x2_ck: dpll_core_m5x2_ck@13c {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -256,7 +256,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	div_core_ck: div_core_ck {
+	div_core_ck: div_core_ck@100 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_m5x2_ck>;
@@ -264,7 +264,7 @@
 		ti,max-div = <2>;
 	};
 
-	div_iva_hs_clk: div_iva_hs_clk {
+	div_iva_hs_clk: div_iva_hs_clk@1dc {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_m5x2_ck>;
@@ -273,7 +273,7 @@
 		ti,index-power-of-two;
 	};
 
-	div_mpu_hs_clk: div_mpu_hs_clk {
+	div_mpu_hs_clk: div_mpu_hs_clk@19c {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_m5x2_ck>;
@@ -282,7 +282,7 @@
 		ti,index-power-of-two;
 	};
 
-	dpll_core_m4x2_ck: dpll_core_m4x2_ck {
+	dpll_core_m4x2_ck: dpll_core_m4x2_ck@138 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -301,7 +301,7 @@
 		clock-div = <2>;
 	};
 
-	dpll_abe_m2_ck: dpll_abe_m2_ck {
+	dpll_abe_m2_ck: dpll_abe_m2_ck@1f0 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_abe_ck>;
@@ -310,7 +310,7 @@
 		ti,index-starts-at-one;
 	};
 
-	dpll_core_m3x2_gate_ck: dpll_core_m3x2_gate_ck {
+	dpll_core_m3x2_gate_ck: dpll_core_m3x2_gate_ck@134 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-no-wait-gate-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -318,7 +318,7 @@
 		reg = <0x0134>;
 	};
 
-	dpll_core_m3x2_div_ck: dpll_core_m3x2_div_ck {
+	dpll_core_m3x2_div_ck: dpll_core_m3x2_div_ck@134 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-divider-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -333,7 +333,7 @@
 		clocks = <&dpll_core_m3x2_gate_ck>, <&dpll_core_m3x2_div_ck>;
 	};
 
-	dpll_core_m7x2_ck: dpll_core_m7x2_ck {
+	dpll_core_m7x2_ck: dpll_core_m7x2_ck@144 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -344,7 +344,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	iva_hsd_byp_clk_mux_ck: iva_hsd_byp_clk_mux_ck {
+	iva_hsd_byp_clk_mux_ck: iva_hsd_byp_clk_mux_ck@1ac {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin_ck>, <&div_iva_hs_clk>;
@@ -352,7 +352,7 @@
 		reg = <0x01ac>;
 	};
 
-	dpll_iva_ck: dpll_iva_ck {
+	dpll_iva_ck: dpll_iva_ck@1a0 {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-clock";
 		clocks = <&sys_clkin_ck>, <&iva_hsd_byp_clk_mux_ck>;
@@ -365,7 +365,7 @@
 		clocks = <&dpll_iva_ck>;
 	};
 
-	dpll_iva_m4x2_ck: dpll_iva_m4x2_ck {
+	dpll_iva_m4x2_ck: dpll_iva_m4x2_ck@1b8 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_iva_x2_ck>;
@@ -376,7 +376,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_iva_m5x2_ck: dpll_iva_m5x2_ck {
+	dpll_iva_m5x2_ck: dpll_iva_m5x2_ck@1bc {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_iva_x2_ck>;
@@ -387,14 +387,14 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_mpu_ck: dpll_mpu_ck {
+	dpll_mpu_ck: dpll_mpu_ck@160 {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-clock";
 		clocks = <&sys_clkin_ck>, <&div_mpu_hs_clk>;
 		reg = <0x0160>, <0x0164>, <0x016c>, <0x0168>;
 	};
 
-	dpll_mpu_m2_ck: dpll_mpu_m2_ck {
+	dpll_mpu_m2_ck: dpll_mpu_m2_ck@170 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_mpu_ck>;
@@ -421,7 +421,7 @@
 		clock-div = <3>;
 	};
 
-	l3_div_ck: l3_div_ck {
+	l3_div_ck: l3_div_ck@100 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&div_core_ck>;
@@ -430,7 +430,7 @@
 		reg = <0x0100>;
 	};
 
-	l4_div_ck: l4_div_ck {
+	l4_div_ck: l4_div_ck@100 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&l3_div_ck>;
@@ -455,7 +455,7 @@
 		clock-div = <2>;
 	};
 
-	ocp_abe_iclk: ocp_abe_iclk {
+	ocp_abe_iclk: ocp_abe_iclk@528 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&aess_fclk>;
@@ -472,7 +472,7 @@
 		clock-div = <4>;
 	};
 
-	dmic_sync_mux_ck: dmic_sync_mux_ck {
+	dmic_sync_mux_ck: dmic_sync_mux_ck@538 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&abe_24m_fclk>, <&syc_clk_div_ck>, <&func_24m_clk>;
@@ -480,7 +480,7 @@
 		reg = <0x0538>;
 	};
 
-	func_dmic_abe_gfclk: func_dmic_abe_gfclk {
+	func_dmic_abe_gfclk: func_dmic_abe_gfclk@538 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&dmic_sync_mux_ck>, <&pad_clks_ck>, <&slimbus_clk>;
@@ -488,7 +488,7 @@
 		reg = <0x0538>;
 	};
 
-	mcasp_sync_mux_ck: mcasp_sync_mux_ck {
+	mcasp_sync_mux_ck: mcasp_sync_mux_ck@540 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&abe_24m_fclk>, <&syc_clk_div_ck>, <&func_24m_clk>;
@@ -496,7 +496,7 @@
 		reg = <0x0540>;
 	};
 
-	func_mcasp_abe_gfclk: func_mcasp_abe_gfclk {
+	func_mcasp_abe_gfclk: func_mcasp_abe_gfclk@540 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&mcasp_sync_mux_ck>, <&pad_clks_ck>, <&slimbus_clk>;
@@ -504,7 +504,7 @@
 		reg = <0x0540>;
 	};
 
-	mcbsp1_sync_mux_ck: mcbsp1_sync_mux_ck {
+	mcbsp1_sync_mux_ck: mcbsp1_sync_mux_ck@548 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&abe_24m_fclk>, <&syc_clk_div_ck>, <&func_24m_clk>;
@@ -512,7 +512,7 @@
 		reg = <0x0548>;
 	};
 
-	func_mcbsp1_gfclk: func_mcbsp1_gfclk {
+	func_mcbsp1_gfclk: func_mcbsp1_gfclk@548 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&mcbsp1_sync_mux_ck>, <&pad_clks_ck>, <&slimbus_clk>;
@@ -520,7 +520,7 @@
 		reg = <0x0548>;
 	};
 
-	mcbsp2_sync_mux_ck: mcbsp2_sync_mux_ck {
+	mcbsp2_sync_mux_ck: mcbsp2_sync_mux_ck@550 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&abe_24m_fclk>, <&syc_clk_div_ck>, <&func_24m_clk>;
@@ -528,7 +528,7 @@
 		reg = <0x0550>;
 	};
 
-	func_mcbsp2_gfclk: func_mcbsp2_gfclk {
+	func_mcbsp2_gfclk: func_mcbsp2_gfclk@550 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&mcbsp2_sync_mux_ck>, <&pad_clks_ck>, <&slimbus_clk>;
@@ -536,7 +536,7 @@
 		reg = <0x0550>;
 	};
 
-	mcbsp3_sync_mux_ck: mcbsp3_sync_mux_ck {
+	mcbsp3_sync_mux_ck: mcbsp3_sync_mux_ck@558 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&abe_24m_fclk>, <&syc_clk_div_ck>, <&func_24m_clk>;
@@ -544,7 +544,7 @@
 		reg = <0x0558>;
 	};
 
-	func_mcbsp3_gfclk: func_mcbsp3_gfclk {
+	func_mcbsp3_gfclk: func_mcbsp3_gfclk@558 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&mcbsp3_sync_mux_ck>, <&pad_clks_ck>, <&slimbus_clk>;
@@ -552,7 +552,7 @@
 		reg = <0x0558>;
 	};
 
-	slimbus1_fclk_1: slimbus1_fclk_1 {
+	slimbus1_fclk_1: slimbus1_fclk_1@560 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&func_24m_clk>;
@@ -560,7 +560,7 @@
 		reg = <0x0560>;
 	};
 
-	slimbus1_fclk_0: slimbus1_fclk_0 {
+	slimbus1_fclk_0: slimbus1_fclk_0@560 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&abe_24m_fclk>;
@@ -568,7 +568,7 @@
 		reg = <0x0560>;
 	};
 
-	slimbus1_fclk_2: slimbus1_fclk_2 {
+	slimbus1_fclk_2: slimbus1_fclk_2@560 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&pad_clks_ck>;
@@ -576,7 +576,7 @@
 		reg = <0x0560>;
 	};
 
-	slimbus1_slimbus_clk: slimbus1_slimbus_clk {
+	slimbus1_slimbus_clk: slimbus1_slimbus_clk@560 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&slimbus_clk>;
@@ -584,7 +584,7 @@
 		reg = <0x0560>;
 	};
 
-	timer5_sync_mux: timer5_sync_mux {
+	timer5_sync_mux: timer5_sync_mux@568 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&syc_clk_div_ck>, <&sys_32k_ck>;
@@ -592,7 +592,7 @@
 		reg = <0x0568>;
 	};
 
-	timer6_sync_mux: timer6_sync_mux {
+	timer6_sync_mux: timer6_sync_mux@570 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&syc_clk_div_ck>, <&sys_32k_ck>;
@@ -600,7 +600,7 @@
 		reg = <0x0570>;
 	};
 
-	timer7_sync_mux: timer7_sync_mux {
+	timer7_sync_mux: timer7_sync_mux@578 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&syc_clk_div_ck>, <&sys_32k_ck>;
@@ -608,7 +608,7 @@
 		reg = <0x0578>;
 	};
 
-	timer8_sync_mux: timer8_sync_mux {
+	timer8_sync_mux: timer8_sync_mux@580 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&syc_clk_div_ck>, <&sys_32k_ck>;
@@ -623,7 +623,7 @@
 	};
 };
 &prm_clocks {
-	sys_clkin_ck: sys_clkin_ck {
+	sys_clkin_ck: sys_clkin_ck@110 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&virt_12000000_ck>, <&virt_13000000_ck>, <&virt_16800000_ck>, <&virt_19200000_ck>, <&virt_26000000_ck>, <&virt_27000000_ck>, <&virt_38400000_ck>;
@@ -631,7 +631,7 @@
 		ti,index-starts-at-one;
 	};
 
-	abe_dpll_bypass_clk_mux_ck: abe_dpll_bypass_clk_mux_ck {
+	abe_dpll_bypass_clk_mux_ck: abe_dpll_bypass_clk_mux_ck@108 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin_ck>, <&sys_32k_ck>;
@@ -639,7 +639,7 @@
 		reg = <0x0108>;
 	};
 
-	abe_dpll_refclk_mux_ck: abe_dpll_refclk_mux_ck {
+	abe_dpll_refclk_mux_ck: abe_dpll_refclk_mux_ck@10c {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin_ck>, <&sys_32k_ck>;
@@ -654,14 +654,14 @@
 		clock-div = <1>;
 	};
 
-	l4_wkup_clk_mux_ck: l4_wkup_clk_mux_ck {
+	l4_wkup_clk_mux_ck: l4_wkup_clk_mux_ck@108 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin_ck>, <&lp_clk_div_ck>;
 		reg = <0x0108>;
 	};
 
-	syc_clk_div_ck: syc_clk_div_ck {
+	syc_clk_div_ck: syc_clk_div_ck@100 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&sys_clkin_ck>;
@@ -669,7 +669,7 @@
 		ti,max-div = <2>;
 	};
 
-	gpio1_dbclk: gpio1_dbclk {
+	gpio1_dbclk: gpio1_dbclk@1838 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -677,7 +677,7 @@
 		reg = <0x1838>;
 	};
 
-	dmt1_clk_mux: dmt1_clk_mux {
+	dmt1_clk_mux: dmt1_clk_mux@1840 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin_ck>, <&sys_32k_ck>;
@@ -685,7 +685,7 @@
 		reg = <0x1840>;
 	};
 
-	usim_ck: usim_ck {
+	usim_ck: usim_ck@1858 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_m4x2_ck>;
@@ -694,7 +694,7 @@
 		ti,dividers = <14>, <18>;
 	};
 
-	usim_fclk: usim_fclk {
+	usim_fclk: usim_fclk@1858 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&usim_ck>;
@@ -702,7 +702,7 @@
 		reg = <0x1858>;
 	};
 
-	pmd_stm_clock_mux_ck: pmd_stm_clock_mux_ck {
+	pmd_stm_clock_mux_ck: pmd_stm_clock_mux_ck@1a20 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin_ck>, <&dpll_core_m6x2_ck>, <&tie_low_clock_ck>;
@@ -710,7 +710,7 @@
 		reg = <0x1a20>;
 	};
 
-	pmd_trace_clk_mux_ck: pmd_trace_clk_mux_ck {
+	pmd_trace_clk_mux_ck: pmd_trace_clk_mux_ck@1a20 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin_ck>, <&dpll_core_m6x2_ck>, <&tie_low_clock_ck>;
@@ -718,7 +718,7 @@
 		reg = <0x1a20>;
 	};
 
-	stm_clk_div_ck: stm_clk_div_ck {
+	stm_clk_div_ck: stm_clk_div_ck@1a20 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&pmd_stm_clock_mux_ck>;
@@ -728,7 +728,7 @@
 		ti,index-power-of-two;
 	};
 
-	trace_clk_div_div_ck: trace_clk_div_div_ck {
+	trace_clk_div_div_ck: trace_clk_div_div_ck@1a20 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&pmd_trace_clk_mux_ck>;
@@ -752,7 +752,7 @@
 };
 
 &cm2_clocks {
-	per_hsd_byp_clk_mux_ck: per_hsd_byp_clk_mux_ck {
+	per_hsd_byp_clk_mux_ck: per_hsd_byp_clk_mux_ck@14c {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin_ck>, <&per_hs_clk_div_ck>;
@@ -760,14 +760,14 @@
 		reg = <0x014c>;
 	};
 
-	dpll_per_ck: dpll_per_ck {
+	dpll_per_ck: dpll_per_ck@140 {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-clock";
 		clocks = <&sys_clkin_ck>, <&per_hsd_byp_clk_mux_ck>;
 		reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>;
 	};
 
-	dpll_per_m2_ck: dpll_per_m2_ck {
+	dpll_per_m2_ck: dpll_per_m2_ck@150 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_ck>;
@@ -776,14 +776,14 @@
 		ti,index-starts-at-one;
 	};
 
-	dpll_per_x2_ck: dpll_per_x2_ck {
+	dpll_per_x2_ck: dpll_per_x2_ck@150 {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-x2-clock";
 		clocks = <&dpll_per_ck>;
 		reg = <0x0150>;
 	};
 
-	dpll_per_m2x2_ck: dpll_per_m2x2_ck {
+	dpll_per_m2x2_ck: dpll_per_m2x2_ck@150 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_x2_ck>;
@@ -794,7 +794,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_per_m3x2_gate_ck: dpll_per_m3x2_gate_ck {
+	dpll_per_m3x2_gate_ck: dpll_per_m3x2_gate_ck@154 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-no-wait-gate-clock";
 		clocks = <&dpll_per_x2_ck>;
@@ -802,7 +802,7 @@
 		reg = <0x0154>;
 	};
 
-	dpll_per_m3x2_div_ck: dpll_per_m3x2_div_ck {
+	dpll_per_m3x2_div_ck: dpll_per_m3x2_div_ck@154 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-divider-clock";
 		clocks = <&dpll_per_x2_ck>;
@@ -817,7 +817,7 @@
 		clocks = <&dpll_per_m3x2_gate_ck>, <&dpll_per_m3x2_div_ck>;
 	};
 
-	dpll_per_m4x2_ck: dpll_per_m4x2_ck {
+	dpll_per_m4x2_ck: dpll_per_m4x2_ck@158 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_x2_ck>;
@@ -828,7 +828,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_per_m5x2_ck: dpll_per_m5x2_ck {
+	dpll_per_m5x2_ck: dpll_per_m5x2_ck@15c {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_x2_ck>;
@@ -839,7 +839,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_per_m6x2_ck: dpll_per_m6x2_ck {
+	dpll_per_m6x2_ck: dpll_per_m6x2_ck@160 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_x2_ck>;
@@ -850,7 +850,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_per_m7x2_ck: dpll_per_m7x2_ck {
+	dpll_per_m7x2_ck: dpll_per_m7x2_ck@164 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_x2_ck>;
@@ -861,14 +861,14 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_usb_ck: dpll_usb_ck {
+	dpll_usb_ck: dpll_usb_ck@180 {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-j-type-clock";
 		clocks = <&sys_clkin_ck>, <&usb_hs_clk_div_ck>;
 		reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>;
 	};
 
-	dpll_usb_clkdcoldo_ck: dpll_usb_clkdcoldo_ck {
+	dpll_usb_clkdcoldo_ck: dpll_usb_clkdcoldo_ck@1b4 {
 		#clock-cells = <0>;
 		compatible = "ti,fixed-factor-clock";
 		clocks = <&dpll_usb_ck>;
@@ -879,7 +879,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	dpll_usb_m2_ck: dpll_usb_m2_ck {
+	dpll_usb_m2_ck: dpll_usb_m2_ck@190 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_usb_ck>;
@@ -890,7 +890,7 @@
 		ti,invert-autoidle-bit;
 	};
 
-	ducati_clk_mux_ck: ducati_clk_mux_ck {
+	ducati_clk_mux_ck: ducati_clk_mux_ck@100 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&div_core_ck>, <&dpll_per_m6x2_ck>;
@@ -921,7 +921,7 @@
 		clock-div = <8>;
 	};
 
-	func_48m_fclk: func_48m_fclk {
+	func_48m_fclk: func_48m_fclk@108 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_m2x2_ck>;
@@ -937,7 +937,7 @@
 		clock-div = <4>;
 	};
 
-	func_64m_fclk: func_64m_fclk {
+	func_64m_fclk: func_64m_fclk@108 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_m4x2_ck>;
@@ -945,7 +945,7 @@
 		ti,dividers = <2>, <4>;
 	};
 
-	func_96m_fclk: func_96m_fclk {
+	func_96m_fclk: func_96m_fclk@108 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_m2x2_ck>;
@@ -953,7 +953,7 @@
 		ti,dividers = <2>, <4>;
 	};
 
-	init_60m_fclk: init_60m_fclk {
+	init_60m_fclk: init_60m_fclk@104 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_usb_m2_ck>;
@@ -961,7 +961,7 @@
 		ti,dividers = <1>, <8>;
 	};
 
-	per_abe_nc_fclk: per_abe_nc_fclk {
+	per_abe_nc_fclk: per_abe_nc_fclk@108 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_abe_m2_ck>;
@@ -969,7 +969,7 @@
 		ti,max-div = <2>;
 	};
 
-	aes1_fck: aes1_fck {
+	aes1_fck: aes1_fck@15a0 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l3_div_ck>;
@@ -977,7 +977,7 @@
 		reg = <0x15a0>;
 	};
 
-	aes2_fck: aes2_fck {
+	aes2_fck: aes2_fck@15a8 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l3_div_ck>;
@@ -985,7 +985,7 @@
 		reg = <0x15a8>;
 	};
 
-	dss_sys_clk: dss_sys_clk {
+	dss_sys_clk: dss_sys_clk@1120 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&syc_clk_div_ck>;
@@ -993,7 +993,7 @@
 		reg = <0x1120>;
 	};
 
-	dss_tv_clk: dss_tv_clk {
+	dss_tv_clk: dss_tv_clk@1120 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&extalt_clkin_ck>;
@@ -1001,7 +1001,7 @@
 		reg = <0x1120>;
 	};
 
-	dss_dss_clk: dss_dss_clk {
+	dss_dss_clk: dss_dss_clk@1120 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll_per_m5x2_ck>;
@@ -1010,7 +1010,7 @@
 		ti,set-rate-parent;
 	};
 
-	dss_48mhz_clk: dss_48mhz_clk {
+	dss_48mhz_clk: dss_48mhz_clk@1120 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&func_48mc_fclk>;
@@ -1018,7 +1018,7 @@
 		reg = <0x1120>;
 	};
 
-	fdif_fck: fdif_fck {
+	fdif_fck: fdif_fck@1028 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_m4x2_ck>;
@@ -1028,7 +1028,7 @@
 		ti,index-power-of-two;
 	};
 
-	gpio2_dbclk: gpio2_dbclk {
+	gpio2_dbclk: gpio2_dbclk@1460 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -1036,7 +1036,7 @@
 		reg = <0x1460>;
 	};
 
-	gpio3_dbclk: gpio3_dbclk {
+	gpio3_dbclk: gpio3_dbclk@1468 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -1044,7 +1044,7 @@
 		reg = <0x1468>;
 	};
 
-	gpio4_dbclk: gpio4_dbclk {
+	gpio4_dbclk: gpio4_dbclk@1470 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -1052,7 +1052,7 @@
 		reg = <0x1470>;
 	};
 
-	gpio5_dbclk: gpio5_dbclk {
+	gpio5_dbclk: gpio5_dbclk@1478 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -1060,7 +1060,7 @@
 		reg = <0x1478>;
 	};
 
-	gpio6_dbclk: gpio6_dbclk {
+	gpio6_dbclk: gpio6_dbclk@1480 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -1068,7 +1068,7 @@
 		reg = <0x1480>;
 	};
 
-	sgx_clk_mux: sgx_clk_mux {
+	sgx_clk_mux: sgx_clk_mux@1220 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&dpll_core_m7x2_ck>, <&dpll_per_m7x2_ck>;
@@ -1076,7 +1076,7 @@
 		reg = <0x1220>;
 	};
 
-	hsi_fck: hsi_fck {
+	hsi_fck: hsi_fck@1338 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_m2x2_ck>;
@@ -1086,7 +1086,7 @@
 		ti,index-power-of-two;
 	};
 
-	iss_ctrlclk: iss_ctrlclk {
+	iss_ctrlclk: iss_ctrlclk@1020 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&func_96m_fclk>;
@@ -1094,7 +1094,7 @@
 		reg = <0x1020>;
 	};
 
-	mcbsp4_sync_mux_ck: mcbsp4_sync_mux_ck {
+	mcbsp4_sync_mux_ck: mcbsp4_sync_mux_ck@14e0 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&func_96m_fclk>, <&per_abe_nc_fclk>;
@@ -1102,7 +1102,7 @@
 		reg = <0x14e0>;
 	};
 
-	per_mcbsp4_gfclk: per_mcbsp4_gfclk {
+	per_mcbsp4_gfclk: per_mcbsp4_gfclk@14e0 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&mcbsp4_sync_mux_ck>, <&pad_clks_ck>;
@@ -1110,7 +1110,7 @@
 		reg = <0x14e0>;
 	};
 
-	hsmmc1_fclk: hsmmc1_fclk {
+	hsmmc1_fclk: hsmmc1_fclk@1328 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&func_64m_fclk>, <&func_96m_fclk>;
@@ -1118,7 +1118,7 @@
 		reg = <0x1328>;
 	};
 
-	hsmmc2_fclk: hsmmc2_fclk {
+	hsmmc2_fclk: hsmmc2_fclk@1330 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&func_64m_fclk>, <&func_96m_fclk>;
@@ -1126,7 +1126,7 @@
 		reg = <0x1330>;
 	};
 
-	ocp2scp_usb_phy_phy_48m: ocp2scp_usb_phy_phy_48m {
+	ocp2scp_usb_phy_phy_48m: ocp2scp_usb_phy_phy_48m@13e0 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&func_48m_fclk>;
@@ -1134,7 +1134,7 @@
 		reg = <0x13e0>;
 	};
 
-	sha2md5_fck: sha2md5_fck {
+	sha2md5_fck: sha2md5_fck@15c8 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l3_div_ck>;
@@ -1142,7 +1142,7 @@
 		reg = <0x15c8>;
 	};
 
-	slimbus2_fclk_1: slimbus2_fclk_1 {
+	slimbus2_fclk_1: slimbus2_fclk_1@1538 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&per_abe_24m_fclk>;
@@ -1150,7 +1150,7 @@
 		reg = <0x1538>;
 	};
 
-	slimbus2_fclk_0: slimbus2_fclk_0 {
+	slimbus2_fclk_0: slimbus2_fclk_0@1538 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&func_24mc_fclk>;
@@ -1158,7 +1158,7 @@
 		reg = <0x1538>;
 	};
 
-	slimbus2_slimbus_clk: slimbus2_slimbus_clk {
+	slimbus2_slimbus_clk: slimbus2_slimbus_clk@1538 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&pad_slimbus_core_clks_ck>;
@@ -1166,7 +1166,7 @@
 		reg = <0x1538>;
 	};
 
-	smartreflex_core_fck: smartreflex_core_fck {
+	smartreflex_core_fck: smartreflex_core_fck@638 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l4_wkup_clk_mux_ck>;
@@ -1174,7 +1174,7 @@
 		reg = <0x0638>;
 	};
 
-	smartreflex_iva_fck: smartreflex_iva_fck {
+	smartreflex_iva_fck: smartreflex_iva_fck@630 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l4_wkup_clk_mux_ck>;
@@ -1182,7 +1182,7 @@
 		reg = <0x0630>;
 	};
 
-	smartreflex_mpu_fck: smartreflex_mpu_fck {
+	smartreflex_mpu_fck: smartreflex_mpu_fck@628 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l4_wkup_clk_mux_ck>;
@@ -1190,7 +1190,7 @@
 		reg = <0x0628>;
 	};
 
-	cm2_dm10_mux: cm2_dm10_mux {
+	cm2_dm10_mux: cm2_dm10_mux@1428 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin_ck>, <&sys_32k_ck>;
@@ -1198,7 +1198,7 @@
 		reg = <0x1428>;
 	};
 
-	cm2_dm11_mux: cm2_dm11_mux {
+	cm2_dm11_mux: cm2_dm11_mux@1430 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin_ck>, <&sys_32k_ck>;
@@ -1206,7 +1206,7 @@
 		reg = <0x1430>;
 	};
 
-	cm2_dm2_mux: cm2_dm2_mux {
+	cm2_dm2_mux: cm2_dm2_mux@1438 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin_ck>, <&sys_32k_ck>;
@@ -1214,7 +1214,7 @@
 		reg = <0x1438>;
 	};
 
-	cm2_dm3_mux: cm2_dm3_mux {
+	cm2_dm3_mux: cm2_dm3_mux@1440 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin_ck>, <&sys_32k_ck>;
@@ -1222,7 +1222,7 @@
 		reg = <0x1440>;
 	};
 
-	cm2_dm4_mux: cm2_dm4_mux {
+	cm2_dm4_mux: cm2_dm4_mux@1448 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin_ck>, <&sys_32k_ck>;
@@ -1230,7 +1230,7 @@
 		reg = <0x1448>;
 	};
 
-	cm2_dm9_mux: cm2_dm9_mux {
+	cm2_dm9_mux: cm2_dm9_mux@1450 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin_ck>, <&sys_32k_ck>;
@@ -1238,7 +1238,7 @@
 		reg = <0x1450>;
 	};
 
-	usb_host_fs_fck: usb_host_fs_fck {
+	usb_host_fs_fck: usb_host_fs_fck@13d0 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&func_48mc_fclk>;
@@ -1246,7 +1246,7 @@
 		reg = <0x13d0>;
 	};
 
-	utmi_p1_gfclk: utmi_p1_gfclk {
+	utmi_p1_gfclk: utmi_p1_gfclk@1358 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&init_60m_fclk>, <&xclk60mhsp1_ck>;
@@ -1254,7 +1254,7 @@
 		reg = <0x1358>;
 	};
 
-	usb_host_hs_utmi_p1_clk: usb_host_hs_utmi_p1_clk {
+	usb_host_hs_utmi_p1_clk: usb_host_hs_utmi_p1_clk@1358 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&utmi_p1_gfclk>;
@@ -1262,7 +1262,7 @@
 		reg = <0x1358>;
 	};
 
-	utmi_p2_gfclk: utmi_p2_gfclk {
+	utmi_p2_gfclk: utmi_p2_gfclk@1358 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&init_60m_fclk>, <&xclk60mhsp2_ck>;
@@ -1270,7 +1270,7 @@
 		reg = <0x1358>;
 	};
 
-	usb_host_hs_utmi_p2_clk: usb_host_hs_utmi_p2_clk {
+	usb_host_hs_utmi_p2_clk: usb_host_hs_utmi_p2_clk@1358 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&utmi_p2_gfclk>;
@@ -1278,7 +1278,7 @@
 		reg = <0x1358>;
 	};
 
-	usb_host_hs_utmi_p3_clk: usb_host_hs_utmi_p3_clk {
+	usb_host_hs_utmi_p3_clk: usb_host_hs_utmi_p3_clk@1358 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&init_60m_fclk>;
@@ -1286,7 +1286,7 @@
 		reg = <0x1358>;
 	};
 
-	usb_host_hs_hsic480m_p1_clk: usb_host_hs_hsic480m_p1_clk {
+	usb_host_hs_hsic480m_p1_clk: usb_host_hs_hsic480m_p1_clk@1358 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll_usb_m2_ck>;
@@ -1294,7 +1294,7 @@
 		reg = <0x1358>;
 	};
 
-	usb_host_hs_hsic60m_p1_clk: usb_host_hs_hsic60m_p1_clk {
+	usb_host_hs_hsic60m_p1_clk: usb_host_hs_hsic60m_p1_clk@1358 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&init_60m_fclk>;
@@ -1302,7 +1302,7 @@
 		reg = <0x1358>;
 	};
 
-	usb_host_hs_hsic60m_p2_clk: usb_host_hs_hsic60m_p2_clk {
+	usb_host_hs_hsic60m_p2_clk: usb_host_hs_hsic60m_p2_clk@1358 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&init_60m_fclk>;
@@ -1310,7 +1310,7 @@
 		reg = <0x1358>;
 	};
 
-	usb_host_hs_hsic480m_p2_clk: usb_host_hs_hsic480m_p2_clk {
+	usb_host_hs_hsic480m_p2_clk: usb_host_hs_hsic480m_p2_clk@1358 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll_usb_m2_ck>;
@@ -1318,7 +1318,7 @@
 		reg = <0x1358>;
 	};
 
-	usb_host_hs_func48mclk: usb_host_hs_func48mclk {
+	usb_host_hs_func48mclk: usb_host_hs_func48mclk@1358 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&func_48mc_fclk>;
@@ -1326,7 +1326,7 @@
 		reg = <0x1358>;
 	};
 
-	usb_host_hs_fck: usb_host_hs_fck {
+	usb_host_hs_fck: usb_host_hs_fck@1358 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&init_60m_fclk>;
@@ -1334,7 +1334,7 @@
 		reg = <0x1358>;
 	};
 
-	otg_60m_gfclk: otg_60m_gfclk {
+	otg_60m_gfclk: otg_60m_gfclk@1360 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&utmi_phy_clkout_ck>, <&xclk60motg_ck>;
@@ -1342,7 +1342,7 @@
 		reg = <0x1360>;
 	};
 
-	usb_otg_hs_xclk: usb_otg_hs_xclk {
+	usb_otg_hs_xclk: usb_otg_hs_xclk@1360 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&otg_60m_gfclk>;
@@ -1350,7 +1350,7 @@
 		reg = <0x1360>;
 	};
 
-	usb_otg_hs_ick: usb_otg_hs_ick {
+	usb_otg_hs_ick: usb_otg_hs_ick@1360 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l3_div_ck>;
@@ -1358,7 +1358,7 @@
 		reg = <0x1360>;
 	};
 
-	usb_phy_cm_clk32k: usb_phy_cm_clk32k {
+	usb_phy_cm_clk32k: usb_phy_cm_clk32k@640 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -1366,7 +1366,7 @@
 		reg = <0x0640>;
 	};
 
-	usb_tll_hs_usb_ch2_clk: usb_tll_hs_usb_ch2_clk {
+	usb_tll_hs_usb_ch2_clk: usb_tll_hs_usb_ch2_clk@1368 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&init_60m_fclk>;
@@ -1374,7 +1374,7 @@
 		reg = <0x1368>;
 	};
 
-	usb_tll_hs_usb_ch0_clk: usb_tll_hs_usb_ch0_clk {
+	usb_tll_hs_usb_ch0_clk: usb_tll_hs_usb_ch0_clk@1368 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&init_60m_fclk>;
@@ -1382,7 +1382,7 @@
 		reg = <0x1368>;
 	};
 
-	usb_tll_hs_usb_ch1_clk: usb_tll_hs_usb_ch1_clk {
+	usb_tll_hs_usb_ch1_clk: usb_tll_hs_usb_ch1_clk@1368 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&init_60m_fclk>;
@@ -1390,7 +1390,7 @@
 		reg = <0x1368>;
 	};
 
-	usb_tll_hs_ick: usb_tll_hs_ick {
+	usb_tll_hs_ick: usb_tll_hs_ick@1368 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l4_div_ck>;
@@ -1407,7 +1407,7 @@
 };
 
 &scrm_clocks {
-	auxclk0_src_gate_ck: auxclk0_src_gate_ck {
+	auxclk0_src_gate_ck: auxclk0_src_gate_ck@310 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-no-wait-gate-clock";
 		clocks = <&dpll_core_m3x2_ck>;
@@ -1415,7 +1415,7 @@
 		reg = <0x0310>;
 	};
 
-	auxclk0_src_mux_ck: auxclk0_src_mux_ck {
+	auxclk0_src_mux_ck: auxclk0_src_mux_ck@310 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&sys_clkin_ck>, <&dpll_core_m3x2_ck>, <&dpll_per_m3x2_ck>;
@@ -1429,7 +1429,7 @@
 		clocks = <&auxclk0_src_gate_ck>, <&auxclk0_src_mux_ck>;
 	};
 
-	auxclk0_ck: auxclk0_ck {
+	auxclk0_ck: auxclk0_ck@310 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&auxclk0_src_ck>;
@@ -1438,7 +1438,7 @@
 		reg = <0x0310>;
 	};
 
-	auxclk1_src_gate_ck: auxclk1_src_gate_ck {
+	auxclk1_src_gate_ck: auxclk1_src_gate_ck@314 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-no-wait-gate-clock";
 		clocks = <&dpll_core_m3x2_ck>;
@@ -1446,7 +1446,7 @@
 		reg = <0x0314>;
 	};
 
-	auxclk1_src_mux_ck: auxclk1_src_mux_ck {
+	auxclk1_src_mux_ck: auxclk1_src_mux_ck@314 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&sys_clkin_ck>, <&dpll_core_m3x2_ck>, <&dpll_per_m3x2_ck>;
@@ -1460,7 +1460,7 @@
 		clocks = <&auxclk1_src_gate_ck>, <&auxclk1_src_mux_ck>;
 	};
 
-	auxclk1_ck: auxclk1_ck {
+	auxclk1_ck: auxclk1_ck@314 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&auxclk1_src_ck>;
@@ -1469,7 +1469,7 @@
 		reg = <0x0314>;
 	};
 
-	auxclk2_src_gate_ck: auxclk2_src_gate_ck {
+	auxclk2_src_gate_ck: auxclk2_src_gate_ck@318 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-no-wait-gate-clock";
 		clocks = <&dpll_core_m3x2_ck>;
@@ -1477,7 +1477,7 @@
 		reg = <0x0318>;
 	};
 
-	auxclk2_src_mux_ck: auxclk2_src_mux_ck {
+	auxclk2_src_mux_ck: auxclk2_src_mux_ck@318 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&sys_clkin_ck>, <&dpll_core_m3x2_ck>, <&dpll_per_m3x2_ck>;
@@ -1491,7 +1491,7 @@
 		clocks = <&auxclk2_src_gate_ck>, <&auxclk2_src_mux_ck>;
 	};
 
-	auxclk2_ck: auxclk2_ck {
+	auxclk2_ck: auxclk2_ck@318 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&auxclk2_src_ck>;
@@ -1500,7 +1500,7 @@
 		reg = <0x0318>;
 	};
 
-	auxclk3_src_gate_ck: auxclk3_src_gate_ck {
+	auxclk3_src_gate_ck: auxclk3_src_gate_ck@31c {
 		#clock-cells = <0>;
 		compatible = "ti,composite-no-wait-gate-clock";
 		clocks = <&dpll_core_m3x2_ck>;
@@ -1508,7 +1508,7 @@
 		reg = <0x031c>;
 	};
 
-	auxclk3_src_mux_ck: auxclk3_src_mux_ck {
+	auxclk3_src_mux_ck: auxclk3_src_mux_ck@31c {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&sys_clkin_ck>, <&dpll_core_m3x2_ck>, <&dpll_per_m3x2_ck>;
@@ -1522,7 +1522,7 @@
 		clocks = <&auxclk3_src_gate_ck>, <&auxclk3_src_mux_ck>;
 	};
 
-	auxclk3_ck: auxclk3_ck {
+	auxclk3_ck: auxclk3_ck@31c {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&auxclk3_src_ck>;
@@ -1531,7 +1531,7 @@
 		reg = <0x031c>;
 	};
 
-	auxclk4_src_gate_ck: auxclk4_src_gate_ck {
+	auxclk4_src_gate_ck: auxclk4_src_gate_ck@320 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-no-wait-gate-clock";
 		clocks = <&dpll_core_m3x2_ck>;
@@ -1539,7 +1539,7 @@
 		reg = <0x0320>;
 	};
 
-	auxclk4_src_mux_ck: auxclk4_src_mux_ck {
+	auxclk4_src_mux_ck: auxclk4_src_mux_ck@320 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&sys_clkin_ck>, <&dpll_core_m3x2_ck>, <&dpll_per_m3x2_ck>;
@@ -1553,7 +1553,7 @@
 		clocks = <&auxclk4_src_gate_ck>, <&auxclk4_src_mux_ck>;
 	};
 
-	auxclk4_ck: auxclk4_ck {
+	auxclk4_ck: auxclk4_ck@320 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&auxclk4_src_ck>;
@@ -1562,7 +1562,7 @@
 		reg = <0x0320>;
 	};
 
-	auxclk5_src_gate_ck: auxclk5_src_gate_ck {
+	auxclk5_src_gate_ck: auxclk5_src_gate_ck@324 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-no-wait-gate-clock";
 		clocks = <&dpll_core_m3x2_ck>;
@@ -1570,7 +1570,7 @@
 		reg = <0x0324>;
 	};
 
-	auxclk5_src_mux_ck: auxclk5_src_mux_ck {
+	auxclk5_src_mux_ck: auxclk5_src_mux_ck@324 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&sys_clkin_ck>, <&dpll_core_m3x2_ck>, <&dpll_per_m3x2_ck>;
@@ -1584,7 +1584,7 @@
 		clocks = <&auxclk5_src_gate_ck>, <&auxclk5_src_mux_ck>;
 	};
 
-	auxclk5_ck: auxclk5_ck {
+	auxclk5_ck: auxclk5_ck@324 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&auxclk5_src_ck>;
@@ -1593,7 +1593,7 @@
 		reg = <0x0324>;
 	};
 
-	auxclkreq0_ck: auxclkreq0_ck {
+	auxclkreq0_ck: auxclkreq0_ck@210 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&auxclk0_ck>, <&auxclk1_ck>, <&auxclk2_ck>, <&auxclk3_ck>, <&auxclk4_ck>, <&auxclk5_ck>;
@@ -1601,7 +1601,7 @@
 		reg = <0x0210>;
 	};
 
-	auxclkreq1_ck: auxclkreq1_ck {
+	auxclkreq1_ck: auxclkreq1_ck@214 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&auxclk0_ck>, <&auxclk1_ck>, <&auxclk2_ck>, <&auxclk3_ck>, <&auxclk4_ck>, <&auxclk5_ck>;
@@ -1609,7 +1609,7 @@
 		reg = <0x0214>;
 	};
 
-	auxclkreq2_ck: auxclkreq2_ck {
+	auxclkreq2_ck: auxclkreq2_ck@218 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&auxclk0_ck>, <&auxclk1_ck>, <&auxclk2_ck>, <&auxclk3_ck>, <&auxclk4_ck>, <&auxclk5_ck>;
@@ -1617,7 +1617,7 @@
 		reg = <0x0218>;
 	};
 
-	auxclkreq3_ck: auxclkreq3_ck {
+	auxclkreq3_ck: auxclkreq3_ck@21c {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&auxclk0_ck>, <&auxclk1_ck>, <&auxclk2_ck>, <&auxclk3_ck>, <&auxclk4_ck>, <&auxclk5_ck>;
@@ -1625,7 +1625,7 @@
 		reg = <0x021c>;
 	};
 
-	auxclkreq4_ck: auxclkreq4_ck {
+	auxclkreq4_ck: auxclkreq4_ck@220 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&auxclk0_ck>, <&auxclk1_ck>, <&auxclk2_ck>, <&auxclk3_ck>, <&auxclk4_ck>, <&auxclk5_ck>;
@@ -1633,7 +1633,7 @@
 		reg = <0x0220>;
 	};
 
-	auxclkreq5_ck: auxclkreq5_ck {
+	auxclkreq5_ck: auxclkreq5_ck@224 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&auxclk0_ck>, <&auxclk1_ck>, <&auxclk2_ck>, <&auxclk3_ck>, <&auxclk4_ck>, <&auxclk5_ck>;
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
index 902657d..dc759a3 100644
--- a/arch/arm/boot/dts/omap5-board-common.dtsi
+++ b/arch/arm/boot/dts/omap5-board-common.dtsi
@@ -391,11 +391,21 @@
 			ti,backup-battery-charge-high-current;
 		};
 
+		gpadc {
+			compatible = "ti,palmas-gpadc";
+			interrupts = <18 0
+				      16 0
+				      17 0>;
+			#io-channel-cells = <1>;
+			ti,channel0-current-microamp = <5>;
+			ti,channel3-current-microamp = <10>;
+		};
+
 		palmas_pmic {
 			compatible = "ti,palmas-pmic";
 			interrupt-parent = <&palmas>;
 			interrupts = <14 IRQ_TYPE_NONE>;
-			interrupt-name = "short-irq";
+			interrupt-names = "short-irq";
 
 			ti,ldo6-vibrator;
 
@@ -472,7 +482,7 @@
 				ldo1_reg: ldo1 {
 					/* VDDAPHY_CAM: vdda_csiport */
 					regulator-name = "ldo1";
-					regulator-min-microvolt = <1500000>;
+					regulator-min-microvolt = <1800000>;
 					regulator-max-microvolt = <1800000>;
 				};
 
@@ -498,7 +508,7 @@
 				ldo4_reg: ldo4 {
 					/* VDDAPHY_DISP: vdda_dsiport/hdmi */
 					regulator-name = "ldo4";
-					regulator-min-microvolt = <1500000>;
+					regulator-min-microvolt = <1800000>;
 					regulator-max-microvolt = <1800000>;
 				};
 
diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts
index ecc591d..93fdfa9 100644
--- a/arch/arm/boot/dts/omap5-cm-t54.dts
+++ b/arch/arm/boot/dts/omap5-cm-t54.dts
@@ -434,7 +434,7 @@
 			compatible = "ti,palmas-pmic";
 			interrupt-parent = <&palmas>;
 			interrupts = <14 IRQ_TYPE_NONE>;
-			interrupt-name = "short-irq";
+			interrupt-names = "short-irq";
 
 			ti,ldo6-vibrator;
 
@@ -513,7 +513,7 @@
 				ldo1_reg: ldo1 {
 					/* VDDAPHY_CAM: vdda_csiport */
 					regulator-name = "ldo1";
-					regulator-min-microvolt = <1500000>;
+					regulator-min-microvolt = <1800000>;
 					regulator-max-microvolt = <1800000>;
 				};
 
@@ -537,7 +537,7 @@
 				ldo4_reg: ldo4 {
 					/* VDDAPHY_DISP: vdda_dsiport/hdmi */
 					regulator-name = "ldo4";
-					regulator-min-microvolt = <1500000>;
+					regulator-min-microvolt = <1800000>;
 					regulator-max-microvolt = <1800000>;
 				};
 
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 38805eb..84c1019 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -187,7 +187,7 @@
 					#size-cells = <1>;
 					ranges = <0 0x5a0 0xec>;
 
-					pbias_regulator: pbias_regulator {
+					pbias_regulator: pbias_regulator@60 {
 						compatible = "ti,pbias-omap5", "ti,pbias-omap";
 						reg = <0x60 0x4>;
 						syscon = <&omap5_padconf_global>;
@@ -269,7 +269,7 @@
 			omap5_pmx_wkup: pinmux@c840 {
 				compatible = "ti,omap5-padconf",
 					     "pinctrl-single";
-				reg = <0xc840 0x0038>;
+				reg = <0xc840 0x003c>;
 				#address-cells = <1>;
 				#size-cells = <0>;
 				#interrupt-cells = <1>;
@@ -398,6 +398,10 @@
 			ti,hwmods = "gpmc";
 			clocks = <&l3_iclk_div>;
 			clock-names = "fck";
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			gpio-controller;
+			#gpio-cells = <2>;
 		};
 
 		i2c1: i2c@48070000 {
diff --git a/arch/arm/boot/dts/omap54xx-clocks.dtsi b/arch/arm/boot/dts/omap54xx-clocks.dtsi
index 83b425f..4899c23 100644
--- a/arch/arm/boot/dts/omap54xx-clocks.dtsi
+++ b/arch/arm/boot/dts/omap54xx-clocks.dtsi
@@ -14,7 +14,7 @@
 		clock-frequency = <12000000>;
 	};
 
-	pad_clks_ck: pad_clks_ck {
+	pad_clks_ck: pad_clks_ck@108 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&pad_clks_src_ck>;
@@ -34,7 +34,7 @@
 		clock-frequency = <12000000>;
 	};
 
-	slimbus_clk: slimbus_clk {
+	slimbus_clk: slimbus_clk@108 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&slimbus_src_clk>;
@@ -102,7 +102,7 @@
 		clock-frequency = <60000000>;
 	};
 
-	dpll_abe_ck: dpll_abe_ck {
+	dpll_abe_ck: dpll_abe_ck@1e0 {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-m4xen-clock";
 		clocks = <&abe_dpll_clk_mux>, <&abe_dpll_bypass_clk_mux>;
@@ -115,7 +115,7 @@
 		clocks = <&dpll_abe_ck>;
 	};
 
-	dpll_abe_m2x2_ck: dpll_abe_m2x2_ck {
+	dpll_abe_m2x2_ck: dpll_abe_m2x2_ck@1f0 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_abe_x2_ck>;
@@ -132,7 +132,7 @@
 		clock-div = <8>;
 	};
 
-	abe_clk: abe_clk {
+	abe_clk: abe_clk@108 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_abe_m2x2_ck>;
@@ -141,7 +141,7 @@
 		ti,index-power-of-two;
 	};
 
-	abe_iclk: abe_iclk {
+	abe_iclk: abe_iclk@528 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&aess_fclk>;
@@ -158,7 +158,7 @@
 		clock-div = <16>;
 	};
 
-	dpll_abe_m3x2_ck: dpll_abe_m3x2_ck {
+	dpll_abe_m3x2_ck: dpll_abe_m3x2_ck@1f4 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_abe_x2_ck>;
@@ -167,7 +167,7 @@
 		ti,index-starts-at-one;
 	};
 
-	dpll_core_byp_mux: dpll_core_byp_mux {
+	dpll_core_byp_mux: dpll_core_byp_mux@12c {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin>, <&dpll_abe_m3x2_ck>;
@@ -175,7 +175,7 @@
 		reg = <0x012c>;
 	};
 
-	dpll_core_ck: dpll_core_ck {
+	dpll_core_ck: dpll_core_ck@120 {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-core-clock";
 		clocks = <&sys_clkin>, <&dpll_core_byp_mux>;
@@ -188,7 +188,7 @@
 		clocks = <&dpll_core_ck>;
 	};
 
-	dpll_core_h21x2_ck: dpll_core_h21x2_ck {
+	dpll_core_h21x2_ck: dpll_core_h21x2_ck@150 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -213,7 +213,7 @@
 		clock-div = <2>;
 	};
 
-	dpll_core_h11x2_ck: dpll_core_h11x2_ck {
+	dpll_core_h11x2_ck: dpll_core_h11x2_ck@138 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -222,7 +222,7 @@
 		ti,index-starts-at-one;
 	};
 
-	dpll_core_h12x2_ck: dpll_core_h12x2_ck {
+	dpll_core_h12x2_ck: dpll_core_h12x2_ck@13c {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -231,7 +231,7 @@
 		ti,index-starts-at-one;
 	};
 
-	dpll_core_h13x2_ck: dpll_core_h13x2_ck {
+	dpll_core_h13x2_ck: dpll_core_h13x2_ck@140 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -240,7 +240,7 @@
 		ti,index-starts-at-one;
 	};
 
-	dpll_core_h14x2_ck: dpll_core_h14x2_ck {
+	dpll_core_h14x2_ck: dpll_core_h14x2_ck@144 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -249,7 +249,7 @@
 		ti,index-starts-at-one;
 	};
 
-	dpll_core_h22x2_ck: dpll_core_h22x2_ck {
+	dpll_core_h22x2_ck: dpll_core_h22x2_ck@154 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -258,7 +258,7 @@
 		ti,index-starts-at-one;
 	};
 
-	dpll_core_h23x2_ck: dpll_core_h23x2_ck {
+	dpll_core_h23x2_ck: dpll_core_h23x2_ck@158 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -267,7 +267,7 @@
 		ti,index-starts-at-one;
 	};
 
-	dpll_core_h24x2_ck: dpll_core_h24x2_ck {
+	dpll_core_h24x2_ck: dpll_core_h24x2_ck@15c {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -276,7 +276,7 @@
 		ti,index-starts-at-one;
 	};
 
-	dpll_core_m2_ck: dpll_core_m2_ck {
+	dpll_core_m2_ck: dpll_core_m2_ck@130 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_ck>;
@@ -285,7 +285,7 @@
 		ti,index-starts-at-one;
 	};
 
-	dpll_core_m3x2_ck: dpll_core_m3x2_ck {
+	dpll_core_m3x2_ck: dpll_core_m3x2_ck@134 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_core_x2_ck>;
@@ -302,7 +302,7 @@
 		clock-div = <1>;
 	};
 
-	dpll_iva_byp_mux: dpll_iva_byp_mux {
+	dpll_iva_byp_mux: dpll_iva_byp_mux@1ac {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin>, <&iva_dpll_hs_clk_div>;
@@ -310,7 +310,7 @@
 		reg = <0x01ac>;
 	};
 
-	dpll_iva_ck: dpll_iva_ck {
+	dpll_iva_ck: dpll_iva_ck@1a0 {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-clock";
 		clocks = <&sys_clkin>, <&dpll_iva_byp_mux>;
@@ -323,7 +323,7 @@
 		clocks = <&dpll_iva_ck>;
 	};
 
-	dpll_iva_h11x2_ck: dpll_iva_h11x2_ck {
+	dpll_iva_h11x2_ck: dpll_iva_h11x2_ck@1b8 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_iva_x2_ck>;
@@ -332,7 +332,7 @@
 		ti,index-starts-at-one;
 	};
 
-	dpll_iva_h12x2_ck: dpll_iva_h12x2_ck {
+	dpll_iva_h12x2_ck: dpll_iva_h12x2_ck@1bc {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_iva_x2_ck>;
@@ -349,14 +349,14 @@
 		clock-div = <1>;
 	};
 
-	dpll_mpu_ck: dpll_mpu_ck {
+	dpll_mpu_ck: dpll_mpu_ck@160 {
 		#clock-cells = <0>;
 		compatible = "ti,omap5-mpu-dpll-clock";
 		clocks = <&sys_clkin>, <&mpu_dpll_hs_clk_div>;
 		reg = <0x0160>, <0x0164>, <0x016c>, <0x0168>;
 	};
 
-	dpll_mpu_m2_ck: dpll_mpu_m2_ck {
+	dpll_mpu_m2_ck: dpll_mpu_m2_ck@170 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_mpu_ck>;
@@ -381,7 +381,7 @@
 		clock-div = <3>;
 	};
 
-	l3_iclk_div: l3_iclk_div {
+	l3_iclk_div: l3_iclk_div@100 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		ti,max-div = <2>;
@@ -399,7 +399,7 @@
 		clock-div = <1>;
 	};
 
-	l4_root_clk_div: l4_root_clk_div {
+	l4_root_clk_div: l4_root_clk_div@100 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		ti,max-div = <2>;
@@ -409,7 +409,7 @@
 		ti,index-power-of-two;
 	};
 
-	slimbus1_slimbus_clk: slimbus1_slimbus_clk {
+	slimbus1_slimbus_clk: slimbus1_slimbus_clk@560 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&slimbus_clk>;
@@ -417,7 +417,7 @@
 		reg = <0x0560>;
 	};
 
-	aess_fclk: aess_fclk {
+	aess_fclk: aess_fclk@528 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&abe_clk>;
@@ -426,7 +426,7 @@
 		reg = <0x0528>;
 	};
 
-	dmic_sync_mux_ck: dmic_sync_mux_ck {
+	dmic_sync_mux_ck: dmic_sync_mux_ck@538 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&abe_24m_fclk>, <&dss_syc_gfclk_div>, <&func_24m_clk>;
@@ -434,7 +434,7 @@
 		reg = <0x0538>;
 	};
 
-	dmic_gfclk: dmic_gfclk {
+	dmic_gfclk: dmic_gfclk@538 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&dmic_sync_mux_ck>, <&pad_clks_ck>, <&slimbus_clk>;
@@ -442,7 +442,7 @@
 		reg = <0x0538>;
 	};
 
-	mcasp_sync_mux_ck: mcasp_sync_mux_ck {
+	mcasp_sync_mux_ck: mcasp_sync_mux_ck@540 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&abe_24m_fclk>, <&dss_syc_gfclk_div>, <&func_24m_clk>;
@@ -450,7 +450,7 @@
 		reg = <0x0540>;
 	};
 
-	mcasp_gfclk: mcasp_gfclk {
+	mcasp_gfclk: mcasp_gfclk@540 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&mcasp_sync_mux_ck>, <&pad_clks_ck>, <&slimbus_clk>;
@@ -458,7 +458,7 @@
 		reg = <0x0540>;
 	};
 
-	mcbsp1_sync_mux_ck: mcbsp1_sync_mux_ck {
+	mcbsp1_sync_mux_ck: mcbsp1_sync_mux_ck@548 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&abe_24m_fclk>, <&dss_syc_gfclk_div>, <&func_24m_clk>;
@@ -466,7 +466,7 @@
 		reg = <0x0548>;
 	};
 
-	mcbsp1_gfclk: mcbsp1_gfclk {
+	mcbsp1_gfclk: mcbsp1_gfclk@548 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&mcbsp1_sync_mux_ck>, <&pad_clks_ck>, <&slimbus_clk>;
@@ -474,7 +474,7 @@
 		reg = <0x0548>;
 	};
 
-	mcbsp2_sync_mux_ck: mcbsp2_sync_mux_ck {
+	mcbsp2_sync_mux_ck: mcbsp2_sync_mux_ck@550 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&abe_24m_fclk>, <&dss_syc_gfclk_div>, <&func_24m_clk>;
@@ -482,7 +482,7 @@
 		reg = <0x0550>;
 	};
 
-	mcbsp2_gfclk: mcbsp2_gfclk {
+	mcbsp2_gfclk: mcbsp2_gfclk@550 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&mcbsp2_sync_mux_ck>, <&pad_clks_ck>, <&slimbus_clk>;
@@ -490,7 +490,7 @@
 		reg = <0x0550>;
 	};
 
-	mcbsp3_sync_mux_ck: mcbsp3_sync_mux_ck {
+	mcbsp3_sync_mux_ck: mcbsp3_sync_mux_ck@558 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&abe_24m_fclk>, <&dss_syc_gfclk_div>, <&func_24m_clk>;
@@ -498,7 +498,7 @@
 		reg = <0x0558>;
 	};
 
-	mcbsp3_gfclk: mcbsp3_gfclk {
+	mcbsp3_gfclk: mcbsp3_gfclk@558 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&mcbsp3_sync_mux_ck>, <&pad_clks_ck>, <&slimbus_clk>;
@@ -506,7 +506,7 @@
 		reg = <0x0558>;
 	};
 
-	timer5_gfclk_mux: timer5_gfclk_mux {
+	timer5_gfclk_mux: timer5_gfclk_mux@568 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&dss_syc_gfclk_div>, <&sys_32k_ck>;
@@ -514,7 +514,7 @@
 		reg = <0x0568>;
 	};
 
-	timer6_gfclk_mux: timer6_gfclk_mux {
+	timer6_gfclk_mux: timer6_gfclk_mux@570 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&dss_syc_gfclk_div>, <&sys_32k_ck>;
@@ -522,7 +522,7 @@
 		reg = <0x0570>;
 	};
 
-	timer7_gfclk_mux: timer7_gfclk_mux {
+	timer7_gfclk_mux: timer7_gfclk_mux@578 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&dss_syc_gfclk_div>, <&sys_32k_ck>;
@@ -530,7 +530,7 @@
 		reg = <0x0578>;
 	};
 
-	timer8_gfclk_mux: timer8_gfclk_mux {
+	timer8_gfclk_mux: timer8_gfclk_mux@580 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&dss_syc_gfclk_div>, <&sys_32k_ck>;
@@ -545,7 +545,7 @@
 	};
 };
 &prm_clocks {
-	sys_clkin: sys_clkin {
+	sys_clkin: sys_clkin@110 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&virt_12000000_ck>, <&virt_13000000_ck>, <&virt_16800000_ck>, <&virt_19200000_ck>, <&virt_26000000_ck>, <&virt_27000000_ck>, <&virt_38400000_ck>;
@@ -553,14 +553,14 @@
 		ti,index-starts-at-one;
 	};
 
-	abe_dpll_bypass_clk_mux: abe_dpll_bypass_clk_mux {
+	abe_dpll_bypass_clk_mux: abe_dpll_bypass_clk_mux@108 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin>, <&sys_32k_ck>;
 		reg = <0x0108>;
 	};
 
-	abe_dpll_clk_mux: abe_dpll_clk_mux {
+	abe_dpll_clk_mux: abe_dpll_clk_mux@10c {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin>, <&sys_32k_ck>;
@@ -583,7 +583,7 @@
 		clock-div = <1>;
 	};
 
-	wkupaon_iclk_mux: wkupaon_iclk_mux {
+	wkupaon_iclk_mux: wkupaon_iclk_mux@108 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin>, <&abe_lp_clk_div>;
@@ -598,7 +598,7 @@
 		clock-div = <1>;
 	};
 
-	gpio1_dbclk: gpio1_dbclk {
+	gpio1_dbclk: gpio1_dbclk@1938 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -606,7 +606,7 @@
 		reg = <0x1938>;
 	};
 
-	timer1_gfclk_mux: timer1_gfclk_mux {
+	timer1_gfclk_mux: timer1_gfclk_mux@1940 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin>, <&sys_32k_ck>;
@@ -616,7 +616,7 @@
 };
 &cm_core_clocks {
 
-	dpll_per_byp_mux: dpll_per_byp_mux {
+	dpll_per_byp_mux: dpll_per_byp_mux@14c {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin>, <&per_dpll_hs_clk_div>;
@@ -624,7 +624,7 @@
 		reg = <0x014c>;
 	};
 
-	dpll_per_ck: dpll_per_ck {
+	dpll_per_ck: dpll_per_ck@140 {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-clock";
 		clocks = <&sys_clkin>, <&dpll_per_byp_mux>;
@@ -637,7 +637,7 @@
 		clocks = <&dpll_per_ck>;
 	};
 
-	dpll_per_h11x2_ck: dpll_per_h11x2_ck {
+	dpll_per_h11x2_ck: dpll_per_h11x2_ck@158 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_x2_ck>;
@@ -646,7 +646,7 @@
 		ti,index-starts-at-one;
 	};
 
-	dpll_per_h12x2_ck: dpll_per_h12x2_ck {
+	dpll_per_h12x2_ck: dpll_per_h12x2_ck@15c {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_x2_ck>;
@@ -655,7 +655,7 @@
 		ti,index-starts-at-one;
 	};
 
-	dpll_per_h14x2_ck: dpll_per_h14x2_ck {
+	dpll_per_h14x2_ck: dpll_per_h14x2_ck@164 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_x2_ck>;
@@ -664,7 +664,7 @@
 		ti,index-starts-at-one;
 	};
 
-	dpll_per_m2_ck: dpll_per_m2_ck {
+	dpll_per_m2_ck: dpll_per_m2_ck@150 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_ck>;
@@ -673,7 +673,7 @@
 		ti,index-starts-at-one;
 	};
 
-	dpll_per_m2x2_ck: dpll_per_m2x2_ck {
+	dpll_per_m2x2_ck: dpll_per_m2x2_ck@150 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_x2_ck>;
@@ -682,7 +682,7 @@
 		ti,index-starts-at-one;
 	};
 
-	dpll_per_m3x2_ck: dpll_per_m3x2_ck {
+	dpll_per_m3x2_ck: dpll_per_m3x2_ck@154 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_x2_ck>;
@@ -691,7 +691,7 @@
 		ti,index-starts-at-one;
 	};
 
-	dpll_unipro1_ck: dpll_unipro1_ck {
+	dpll_unipro1_ck: dpll_unipro1_ck@200 {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-clock";
 		clocks = <&sys_clkin>, <&sys_clkin>;
@@ -706,7 +706,7 @@
 		clock-div = <1>;
 	};
 
-	dpll_unipro1_m2_ck: dpll_unipro1_m2_ck {
+	dpll_unipro1_m2_ck: dpll_unipro1_m2_ck@210 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_unipro1_ck>;
@@ -715,7 +715,7 @@
 		ti,index-starts-at-one;
 	};
 
-	dpll_unipro2_ck: dpll_unipro2_ck {
+	dpll_unipro2_ck: dpll_unipro2_ck@1c0 {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-clock";
 		clocks = <&sys_clkin>, <&sys_clkin>;
@@ -730,7 +730,7 @@
 		clock-div = <1>;
 	};
 
-	dpll_unipro2_m2_ck: dpll_unipro2_m2_ck {
+	dpll_unipro2_m2_ck: dpll_unipro2_m2_ck@1d0 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_unipro2_ck>;
@@ -739,7 +739,7 @@
 		ti,index-starts-at-one;
 	};
 
-	dpll_usb_byp_mux: dpll_usb_byp_mux {
+	dpll_usb_byp_mux: dpll_usb_byp_mux@18c {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin>, <&usb_dpll_hs_clk_div>;
@@ -747,7 +747,7 @@
 		reg = <0x018c>;
 	};
 
-	dpll_usb_ck: dpll_usb_ck {
+	dpll_usb_ck: dpll_usb_ck@180 {
 		#clock-cells = <0>;
 		compatible = "ti,omap4-dpll-j-type-clock";
 		clocks = <&sys_clkin>, <&dpll_usb_byp_mux>;
@@ -762,7 +762,7 @@
 		clock-div = <1>;
 	};
 
-	dpll_usb_m2_ck: dpll_usb_m2_ck {
+	dpll_usb_m2_ck: dpll_usb_m2_ck@190 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_usb_ck>;
@@ -811,7 +811,7 @@
 		clock-div = <2>;
 	};
 
-	l3init_60m_fclk: l3init_60m_fclk {
+	l3init_60m_fclk: l3init_60m_fclk@104 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_usb_m2_ck>;
@@ -819,7 +819,7 @@
 		ti,dividers = <1>, <8>;
 	};
 
-	dss_32khz_clk: dss_32khz_clk {
+	dss_32khz_clk: dss_32khz_clk@1420 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -827,7 +827,7 @@
 		reg = <0x1420>;
 	};
 
-	dss_48mhz_clk: dss_48mhz_clk {
+	dss_48mhz_clk: dss_48mhz_clk@1420 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&func_48m_fclk>;
@@ -835,7 +835,7 @@
 		reg = <0x1420>;
 	};
 
-	dss_dss_clk: dss_dss_clk {
+	dss_dss_clk: dss_dss_clk@1420 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll_per_h12x2_ck>;
@@ -844,7 +844,7 @@
 		ti,set-rate-parent;
 	};
 
-	dss_sys_clk: dss_sys_clk {
+	dss_sys_clk: dss_sys_clk@1420 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dss_syc_gfclk_div>;
@@ -852,7 +852,7 @@
 		reg = <0x1420>;
 	};
 
-	gpio2_dbclk: gpio2_dbclk {
+	gpio2_dbclk: gpio2_dbclk@1060 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -860,7 +860,7 @@
 		reg = <0x1060>;
 	};
 
-	gpio3_dbclk: gpio3_dbclk {
+	gpio3_dbclk: gpio3_dbclk@1068 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -868,7 +868,7 @@
 		reg = <0x1068>;
 	};
 
-	gpio4_dbclk: gpio4_dbclk {
+	gpio4_dbclk: gpio4_dbclk@1070 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -876,7 +876,7 @@
 		reg = <0x1070>;
 	};
 
-	gpio5_dbclk: gpio5_dbclk {
+	gpio5_dbclk: gpio5_dbclk@1078 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -884,7 +884,7 @@
 		reg = <0x1078>;
 	};
 
-	gpio6_dbclk: gpio6_dbclk {
+	gpio6_dbclk: gpio6_dbclk@1080 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -892,7 +892,7 @@
 		reg = <0x1080>;
 	};
 
-	gpio7_dbclk: gpio7_dbclk {
+	gpio7_dbclk: gpio7_dbclk@1110 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -900,7 +900,7 @@
 		reg = <0x1110>;
 	};
 
-	gpio8_dbclk: gpio8_dbclk {
+	gpio8_dbclk: gpio8_dbclk@1118 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -908,7 +908,7 @@
 		reg = <0x1118>;
 	};
 
-	iss_ctrlclk: iss_ctrlclk {
+	iss_ctrlclk: iss_ctrlclk@1320 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&func_96m_fclk>;
@@ -916,7 +916,7 @@
 		reg = <0x1320>;
 	};
 
-	lli_txphy_clk: lli_txphy_clk {
+	lli_txphy_clk: lli_txphy_clk@f20 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll_unipro1_clkdcoldo>;
@@ -924,7 +924,7 @@
 		reg = <0x0f20>;
 	};
 
-	lli_txphy_ls_clk: lli_txphy_ls_clk {
+	lli_txphy_ls_clk: lli_txphy_ls_clk@f20 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll_unipro1_m2_ck>;
@@ -932,7 +932,7 @@
 		reg = <0x0f20>;
 	};
 
-	mmc1_32khz_clk: mmc1_32khz_clk {
+	mmc1_32khz_clk: mmc1_32khz_clk@1628 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -940,7 +940,7 @@
 		reg = <0x1628>;
 	};
 
-	sata_ref_clk: sata_ref_clk {
+	sata_ref_clk: sata_ref_clk@1688 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_clkin>;
@@ -948,7 +948,7 @@
 		reg = <0x1688>;
 	};
 
-	usb_host_hs_hsic480m_p1_clk: usb_host_hs_hsic480m_p1_clk {
+	usb_host_hs_hsic480m_p1_clk: usb_host_hs_hsic480m_p1_clk@1658 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll_usb_m2_ck>;
@@ -956,7 +956,7 @@
 		reg = <0x1658>;
 	};
 
-	usb_host_hs_hsic480m_p2_clk: usb_host_hs_hsic480m_p2_clk {
+	usb_host_hs_hsic480m_p2_clk: usb_host_hs_hsic480m_p2_clk@1658 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll_usb_m2_ck>;
@@ -964,7 +964,7 @@
 		reg = <0x1658>;
 	};
 
-	usb_host_hs_hsic480m_p3_clk: usb_host_hs_hsic480m_p3_clk {
+	usb_host_hs_hsic480m_p3_clk: usb_host_hs_hsic480m_p3_clk@1658 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll_usb_m2_ck>;
@@ -972,7 +972,7 @@
 		reg = <0x1658>;
 	};
 
-	usb_host_hs_hsic60m_p1_clk: usb_host_hs_hsic60m_p1_clk {
+	usb_host_hs_hsic60m_p1_clk: usb_host_hs_hsic60m_p1_clk@1658 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l3init_60m_fclk>;
@@ -980,7 +980,7 @@
 		reg = <0x1658>;
 	};
 
-	usb_host_hs_hsic60m_p2_clk: usb_host_hs_hsic60m_p2_clk {
+	usb_host_hs_hsic60m_p2_clk: usb_host_hs_hsic60m_p2_clk@1658 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l3init_60m_fclk>;
@@ -988,7 +988,7 @@
 		reg = <0x1658>;
 	};
 
-	usb_host_hs_hsic60m_p3_clk: usb_host_hs_hsic60m_p3_clk {
+	usb_host_hs_hsic60m_p3_clk: usb_host_hs_hsic60m_p3_clk@1658 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l3init_60m_fclk>;
@@ -996,7 +996,7 @@
 		reg = <0x1658>;
 	};
 
-	utmi_p1_gfclk: utmi_p1_gfclk {
+	utmi_p1_gfclk: utmi_p1_gfclk@1658 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&l3init_60m_fclk>, <&xclk60mhsp1_ck>;
@@ -1004,7 +1004,7 @@
 		reg = <0x1658>;
 	};
 
-	usb_host_hs_utmi_p1_clk: usb_host_hs_utmi_p1_clk {
+	usb_host_hs_utmi_p1_clk: usb_host_hs_utmi_p1_clk@1658 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&utmi_p1_gfclk>;
@@ -1012,7 +1012,7 @@
 		reg = <0x1658>;
 	};
 
-	utmi_p2_gfclk: utmi_p2_gfclk {
+	utmi_p2_gfclk: utmi_p2_gfclk@1658 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&l3init_60m_fclk>, <&xclk60mhsp2_ck>;
@@ -1020,7 +1020,7 @@
 		reg = <0x1658>;
 	};
 
-	usb_host_hs_utmi_p2_clk: usb_host_hs_utmi_p2_clk {
+	usb_host_hs_utmi_p2_clk: usb_host_hs_utmi_p2_clk@1658 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&utmi_p2_gfclk>;
@@ -1028,7 +1028,7 @@
 		reg = <0x1658>;
 	};
 
-	usb_host_hs_utmi_p3_clk: usb_host_hs_utmi_p3_clk {
+	usb_host_hs_utmi_p3_clk: usb_host_hs_utmi_p3_clk@1658 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l3init_60m_fclk>;
@@ -1036,7 +1036,7 @@
 		reg = <0x1658>;
 	};
 
-	usb_otg_ss_refclk960m: usb_otg_ss_refclk960m {
+	usb_otg_ss_refclk960m: usb_otg_ss_refclk960m@16f0 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&dpll_usb_clkdcoldo>;
@@ -1044,7 +1044,7 @@
 		reg = <0x16f0>;
 	};
 
-	usb_phy_cm_clk32k: usb_phy_cm_clk32k {
+	usb_phy_cm_clk32k: usb_phy_cm_clk32k@640 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&sys_32k_ck>;
@@ -1052,7 +1052,7 @@
 		reg = <0x0640>;
 	};
 
-	usb_tll_hs_usb_ch0_clk: usb_tll_hs_usb_ch0_clk {
+	usb_tll_hs_usb_ch0_clk: usb_tll_hs_usb_ch0_clk@1668 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l3init_60m_fclk>;
@@ -1060,7 +1060,7 @@
 		reg = <0x1668>;
 	};
 
-	usb_tll_hs_usb_ch1_clk: usb_tll_hs_usb_ch1_clk {
+	usb_tll_hs_usb_ch1_clk: usb_tll_hs_usb_ch1_clk@1668 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l3init_60m_fclk>;
@@ -1068,7 +1068,7 @@
 		reg = <0x1668>;
 	};
 
-	usb_tll_hs_usb_ch2_clk: usb_tll_hs_usb_ch2_clk {
+	usb_tll_hs_usb_ch2_clk: usb_tll_hs_usb_ch2_clk@1668 {
 		#clock-cells = <0>;
 		compatible = "ti,gate-clock";
 		clocks = <&l3init_60m_fclk>;
@@ -1076,7 +1076,7 @@
 		reg = <0x1668>;
 	};
 
-	fdif_fclk: fdif_fclk {
+	fdif_fclk: fdif_fclk@1328 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_h11x2_ck>;
@@ -1085,7 +1085,7 @@
 		reg = <0x1328>;
 	};
 
-	gpu_core_gclk_mux: gpu_core_gclk_mux {
+	gpu_core_gclk_mux: gpu_core_gclk_mux@1520 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&dpll_core_h14x2_ck>, <&dpll_per_h14x2_ck>;
@@ -1093,7 +1093,7 @@
 		reg = <0x1520>;
 	};
 
-	gpu_hyd_gclk_mux: gpu_hyd_gclk_mux {
+	gpu_hyd_gclk_mux: gpu_hyd_gclk_mux@1520 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&dpll_core_h14x2_ck>, <&dpll_per_h14x2_ck>;
@@ -1101,7 +1101,7 @@
 		reg = <0x1520>;
 	};
 
-	hsi_fclk: hsi_fclk {
+	hsi_fclk: hsi_fclk@1638 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&dpll_per_m2x2_ck>;
@@ -1110,7 +1110,7 @@
 		reg = <0x1638>;
 	};
 
-	mmc1_fclk_mux: mmc1_fclk_mux {
+	mmc1_fclk_mux: mmc1_fclk_mux@1628 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&func_128m_clk>, <&dpll_per_m2x2_ck>;
@@ -1118,7 +1118,7 @@
 		reg = <0x1628>;
 	};
 
-	mmc1_fclk: mmc1_fclk {
+	mmc1_fclk: mmc1_fclk@1628 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&mmc1_fclk_mux>;
@@ -1127,7 +1127,7 @@
 		reg = <0x1628>;
 	};
 
-	mmc2_fclk_mux: mmc2_fclk_mux {
+	mmc2_fclk_mux: mmc2_fclk_mux@1630 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&func_128m_clk>, <&dpll_per_m2x2_ck>;
@@ -1135,7 +1135,7 @@
 		reg = <0x1630>;
 	};
 
-	mmc2_fclk: mmc2_fclk {
+	mmc2_fclk: mmc2_fclk@1630 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&mmc2_fclk_mux>;
@@ -1144,7 +1144,7 @@
 		reg = <0x1630>;
 	};
 
-	timer10_gfclk_mux: timer10_gfclk_mux {
+	timer10_gfclk_mux: timer10_gfclk_mux@1028 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin>, <&sys_32k_ck>;
@@ -1152,7 +1152,7 @@
 		reg = <0x1028>;
 	};
 
-	timer11_gfclk_mux: timer11_gfclk_mux {
+	timer11_gfclk_mux: timer11_gfclk_mux@1030 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin>, <&sys_32k_ck>;
@@ -1160,7 +1160,7 @@
 		reg = <0x1030>;
 	};
 
-	timer2_gfclk_mux: timer2_gfclk_mux {
+	timer2_gfclk_mux: timer2_gfclk_mux@1038 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin>, <&sys_32k_ck>;
@@ -1168,7 +1168,7 @@
 		reg = <0x1038>;
 	};
 
-	timer3_gfclk_mux: timer3_gfclk_mux {
+	timer3_gfclk_mux: timer3_gfclk_mux@1040 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin>, <&sys_32k_ck>;
@@ -1176,7 +1176,7 @@
 		reg = <0x1040>;
 	};
 
-	timer4_gfclk_mux: timer4_gfclk_mux {
+	timer4_gfclk_mux: timer4_gfclk_mux@1048 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin>, <&sys_32k_ck>;
@@ -1184,7 +1184,7 @@
 		reg = <0x1048>;
 	};
 
-	timer9_gfclk_mux: timer9_gfclk_mux {
+	timer9_gfclk_mux: timer9_gfclk_mux@1050 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&sys_clkin>, <&sys_32k_ck>;
@@ -1201,7 +1201,7 @@
 };
 
 &scrm_clocks {
-	auxclk0_src_gate_ck: auxclk0_src_gate_ck {
+	auxclk0_src_gate_ck: auxclk0_src_gate_ck@310 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-no-wait-gate-clock";
 		clocks = <&dpll_core_m3x2_ck>;
@@ -1209,7 +1209,7 @@
 		reg = <0x0310>;
 	};
 
-	auxclk0_src_mux_ck: auxclk0_src_mux_ck {
+	auxclk0_src_mux_ck: auxclk0_src_mux_ck@310 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&sys_clkin>, <&dpll_core_m3x2_ck>, <&dpll_per_m3x2_ck>;
@@ -1223,7 +1223,7 @@
 		clocks = <&auxclk0_src_gate_ck>, <&auxclk0_src_mux_ck>;
 	};
 
-	auxclk0_ck: auxclk0_ck {
+	auxclk0_ck: auxclk0_ck@310 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&auxclk0_src_ck>;
@@ -1232,7 +1232,7 @@
 		reg = <0x0310>;
 	};
 
-	auxclk1_src_gate_ck: auxclk1_src_gate_ck {
+	auxclk1_src_gate_ck: auxclk1_src_gate_ck@314 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-no-wait-gate-clock";
 		clocks = <&dpll_core_m3x2_ck>;
@@ -1240,7 +1240,7 @@
 		reg = <0x0314>;
 	};
 
-	auxclk1_src_mux_ck: auxclk1_src_mux_ck {
+	auxclk1_src_mux_ck: auxclk1_src_mux_ck@314 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&sys_clkin>, <&dpll_core_m3x2_ck>, <&dpll_per_m3x2_ck>;
@@ -1254,7 +1254,7 @@
 		clocks = <&auxclk1_src_gate_ck>, <&auxclk1_src_mux_ck>;
 	};
 
-	auxclk1_ck: auxclk1_ck {
+	auxclk1_ck: auxclk1_ck@314 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&auxclk1_src_ck>;
@@ -1263,7 +1263,7 @@
 		reg = <0x0314>;
 	};
 
-	auxclk2_src_gate_ck: auxclk2_src_gate_ck {
+	auxclk2_src_gate_ck: auxclk2_src_gate_ck@318 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-no-wait-gate-clock";
 		clocks = <&dpll_core_m3x2_ck>;
@@ -1271,7 +1271,7 @@
 		reg = <0x0318>;
 	};
 
-	auxclk2_src_mux_ck: auxclk2_src_mux_ck {
+	auxclk2_src_mux_ck: auxclk2_src_mux_ck@318 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&sys_clkin>, <&dpll_core_m3x2_ck>, <&dpll_per_m3x2_ck>;
@@ -1285,7 +1285,7 @@
 		clocks = <&auxclk2_src_gate_ck>, <&auxclk2_src_mux_ck>;
 	};
 
-	auxclk2_ck: auxclk2_ck {
+	auxclk2_ck: auxclk2_ck@318 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&auxclk2_src_ck>;
@@ -1294,7 +1294,7 @@
 		reg = <0x0318>;
 	};
 
-	auxclk3_src_gate_ck: auxclk3_src_gate_ck {
+	auxclk3_src_gate_ck: auxclk3_src_gate_ck@31c {
 		#clock-cells = <0>;
 		compatible = "ti,composite-no-wait-gate-clock";
 		clocks = <&dpll_core_m3x2_ck>;
@@ -1302,7 +1302,7 @@
 		reg = <0x031c>;
 	};
 
-	auxclk3_src_mux_ck: auxclk3_src_mux_ck {
+	auxclk3_src_mux_ck: auxclk3_src_mux_ck@31c {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&sys_clkin>, <&dpll_core_m3x2_ck>, <&dpll_per_m3x2_ck>;
@@ -1316,7 +1316,7 @@
 		clocks = <&auxclk3_src_gate_ck>, <&auxclk3_src_mux_ck>;
 	};
 
-	auxclk3_ck: auxclk3_ck {
+	auxclk3_ck: auxclk3_ck@31c {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&auxclk3_src_ck>;
@@ -1325,7 +1325,7 @@
 		reg = <0x031c>;
 	};
 
-	auxclk4_src_gate_ck: auxclk4_src_gate_ck {
+	auxclk4_src_gate_ck: auxclk4_src_gate_ck@320 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-no-wait-gate-clock";
 		clocks = <&dpll_core_m3x2_ck>;
@@ -1333,7 +1333,7 @@
 		reg = <0x0320>;
 	};
 
-	auxclk4_src_mux_ck: auxclk4_src_mux_ck {
+	auxclk4_src_mux_ck: auxclk4_src_mux_ck@320 {
 		#clock-cells = <0>;
 		compatible = "ti,composite-mux-clock";
 		clocks = <&sys_clkin>, <&dpll_core_m3x2_ck>, <&dpll_per_m3x2_ck>;
@@ -1347,7 +1347,7 @@
 		clocks = <&auxclk4_src_gate_ck>, <&auxclk4_src_mux_ck>;
 	};
 
-	auxclk4_ck: auxclk4_ck {
+	auxclk4_ck: auxclk4_ck@320 {
 		#clock-cells = <0>;
 		compatible = "ti,divider-clock";
 		clocks = <&auxclk4_src_ck>;
@@ -1356,7 +1356,7 @@
 		reg = <0x0320>;
 	};
 
-	auxclkreq0_ck: auxclkreq0_ck {
+	auxclkreq0_ck: auxclkreq0_ck@210 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&auxclk0_ck>, <&auxclk1_ck>, <&auxclk2_ck>, <&auxclk3_ck>, <&auxclk4_ck>;
@@ -1364,7 +1364,7 @@
 		reg = <0x0210>;
 	};
 
-	auxclkreq1_ck: auxclkreq1_ck {
+	auxclkreq1_ck: auxclkreq1_ck@214 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&auxclk0_ck>, <&auxclk1_ck>, <&auxclk2_ck>, <&auxclk3_ck>, <&auxclk4_ck>;
@@ -1372,7 +1372,7 @@
 		reg = <0x0214>;
 	};
 
-	auxclkreq2_ck: auxclkreq2_ck {
+	auxclkreq2_ck: auxclkreq2_ck@218 {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&auxclk0_ck>, <&auxclk1_ck>, <&auxclk2_ck>, <&auxclk3_ck>, <&auxclk4_ck>;
@@ -1380,7 +1380,7 @@
 		reg = <0x0218>;
 	};
 
-	auxclkreq3_ck: auxclkreq3_ck {
+	auxclkreq3_ck: auxclkreq3_ck@21c {
 		#clock-cells = <0>;
 		compatible = "ti,mux-clock";
 		clocks = <&auxclk0_ck>, <&auxclk1_ck>, <&auxclk2_ck>, <&auxclk3_ck>, <&auxclk4_ck>;
diff --git a/arch/arm/boot/dts/orion5x-kuroboxpro.dts b/arch/arm/boot/dts/orion5x-kuroboxpro.dts
new file mode 100644
index 0000000..1a672b0
--- /dev/null
+++ b/arch/arm/boot/dts/orion5x-kuroboxpro.dts
@@ -0,0 +1,127 @@
+/*
+ * Device Tree file for Buffalo/Revogear Kurobox Pro
+ *
+ * Copyright (C) 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * Based on the board file arch/arm/mach-orion5x/kurobox_pro-setup.c
+ * Copyright (C) Ronen Shitrit <rshitrit@marvell.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "orion5x-linkstation.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+	model = "Buffalo/Revogear Kurobox Pro";
+	compatible = "buffalo,kurobox-pro", "marvell,orion5x-88f5182", "marvell,orion5x";
+
+	soc {
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000>,
+				 <MBUS_ID(0x09, 0x00) 0 0xf2200000 0x800>,
+				 <MBUS_ID(0x01, 0x0f) 0 0xf4000000 0x40000>,
+				 <MBUS_ID(0x01, 0x1e) 0 0xfc000000 0x1000000>;
+	};
+
+	memory { /* 128 MB */
+		device_type = "memory";
+		reg = <0x00000000 0x8000000>;
+	};
+};
+
+&pinctrl {
+	pmx_power_hdd: pmx-power-hdd {
+		marvell,pins = "mpp1";
+		marvell,function = "gpio";
+	};
+
+	pmx_power_usb: pmx-power-usb {
+		marvell,pins = "mpp9";
+		marvell,function = "gpio";
+	};
+};
+
+&devbus_cs0 {
+	status = "okay";
+	compatible = "marvell,orion-nand";
+	reg = <MBUS_ID(0x01, 0x1e) 0 0x400>;
+	cle = <0>;
+	ale = <1>;
+	bank-width = <1>;
+
+	partitions {
+		compatible = "fixed-partitions";
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		uImage@0 { /* 4 MB */
+			reg = <0 0x400000>;
+			read-only;
+		};
+
+		rootfs@400000 { /* 64 MB */
+			reg = <0x400000 0x4000000>;
+			read-only;
+		};
+
+		extra@4400000 { /* 188 MB */
+			reg = <0x4400000 0xBC00000>;
+			read-only;
+		};
+	};
+};
+
+&hdd_power {
+	gpios = <&gpio0 1 GPIO_ACTIVE_HIGH>;
+};
+
+&usb_power {
+	gpios = <&gpio0 9 GPIO_ACTIVE_HIGH>;
+};
+
+&sata {
+	nr-ports = <2>;
+};
+
+&ehci1 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/ox810se.dtsi b/arch/arm/boot/dts/ox810se.dtsi
new file mode 100644
index 0000000..ce13705
--- /dev/null
+++ b/arch/arm/boot/dts/ox810se.dtsi
@@ -0,0 +1,336 @@
+/*
+ * ox810se.dtsi - Device tree file for Oxford Semiconductor OX810SE SoC
+ *
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Licensed under GPLv2 or later
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+	compatible = "oxsemi,ox810se";
+
+	cpus {
+		#address-cells = <0>;
+		#size-cells = <0>;
+
+		cpu {
+			device_type = "cpu";
+			compatible = "arm,arm926ej-s";
+			clocks = <&armclk>;
+		};
+	};
+
+	memory {
+		/* Max 256MB @ 0x48000000 */
+		reg = <0x48000000 0x10000000>;
+	};
+
+	clocks {
+		osc: oscillator {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <25000000>;
+		};
+
+		gmacclk: gmacclk {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <125000000>;
+		};
+
+		rpsclk: rpsclk {
+			compatible = "fixed-factor-clock";
+			#clock-cells = <0>;
+			clock-div = <1>;
+			clock-mult = <1>;
+			clocks = <&osc>;
+		};
+
+		pll400: pll400 {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <733333333>;
+		};
+
+		sysclk: sysclk {
+			compatible = "fixed-factor-clock";
+			#clock-cells = <0>;
+			clock-div = <4>;
+			clock-mult = <1>;
+			clocks = <&pll400>;
+		};
+
+		armclk: armclk {
+			compatible = "fixed-factor-clock";
+			#clock-cells = <0>;
+			clock-div = <2>;
+			clock-mult = <1>;
+			clocks = <&pll400>;
+		};
+	};
+
+	soc {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "simple-bus";
+		ranges;
+		interrupt-parent = <&intc>;
+
+		apb-bridge@44000000 {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			compatible = "simple-bus";
+			ranges = <0 0x44000000 0x1000000>;
+
+			pinctrl: pinctrl {
+				compatible = "oxsemi,ox810se-pinctrl";
+
+				/* Regmap for sys registers */
+				oxsemi,sys-ctrl = <&sys>;
+
+				pinctrl_uart0: uart0 {
+					uart0a {
+						pins = "gpio31";
+						function = "fct3";
+					};
+					uart0b {
+						pins = "gpio32";
+						function = "fct3";
+					};
+				};
+
+				pinctrl_uart0_modem: uart0_modem {
+					uart0c {
+						pins = "gpio27";
+						function = "fct3";
+					};
+					uart0d {
+						pins = "gpio28";
+						function = "fct3";
+					};
+					uart0e {
+						pins = "gpio29";
+						function = "fct3";
+					};
+					uart0f {
+						pins = "gpio30";
+						function = "fct3";
+					};
+					uart0g {
+						pins = "gpio33";
+						function = "fct3";
+					};
+					uart0h {
+						pins = "gpio34";
+						function = "fct3";
+					};
+				};
+
+				pinctrl_uart1: uart1 {
+					uart1a {
+						pins = "gpio20";
+						function = "fct3";
+					};
+					uart1b {
+						pins = "gpio22";
+						function = "fct3";
+					};
+				};
+
+				pinctrl_uart1_modem: uart1_modem {
+					uart1c {
+						pins = "gpio8";
+						function = "fct3";
+					};
+					uart1d {
+						pins = "gpio9";
+						function = "fct3";
+					};
+					uart1e {
+						pins = "gpio23";
+						function = "fct3";
+					};
+					uart1f {
+						pins = "gpio24";
+						function = "fct3";
+					};
+					uart1g {
+						pins = "gpio25";
+						function = "fct3";
+					};
+					uart1h {
+						pins = "gpio26";
+						function = "fct3";
+					};
+				};
+
+				pinctrl_uart2: uart2 {
+					uart2a {
+						pins = "gpio6";
+						function = "fct3";
+					};
+					uart2b {
+						pins = "gpio7";
+						function = "fct3";
+					};
+				};
+
+				pinctrl_uart2_modem: uart2_modem {
+					uart2c {
+						pins = "gpio0";
+						function = "fct3";
+					};
+					uart2d {
+						pins = "gpio1";
+						function = "fct3";
+					};
+					uart2e {
+						pins = "gpio2";
+						function = "fct3";
+					};
+					uart2f {
+						pins = "gpio3";
+						function = "fct3";
+					};
+					uart2g {
+						pins = "gpio4";
+						function = "fct3";
+					};
+					uart2h {
+						pins = "gpio5";
+						function = "fct3";
+					};
+				};
+			};
+
+			gpio0: gpio@000000 {
+				compatible = "oxsemi,ox810se-gpio";
+				reg = <0x000000 0x100000>;
+				interrupts = <21>;
+				#gpio-cells = <2>;
+				gpio-controller;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				ngpios = <32>;
+				oxsemi,gpio-bank = <0>;
+				gpio-ranges = <&pinctrl 0 0 32>;
+			};
+
+			gpio1: gpio@100000 {
+				compatible = "oxsemi,ox810se-gpio";
+				reg = <0x100000 0x100000>;
+				interrupts = <22>;
+				#gpio-cells = <2>;
+				gpio-controller;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				ngpios = <3>;
+				oxsemi,gpio-bank = <1>;
+				gpio-ranges = <&pinctrl 0 32 3>;
+			};
+
+			uart0: serial@200000 {
+			       compatible = "ns16550a";
+			       reg = <0x200000 0x100000>;
+			       clocks = <&sysclk>;
+			       interrupts = <23>;
+			       reg-shift = <0>;
+			       fifo-size = <16>;
+			       reg-io-width = <1>;
+			       current-speed = <115200>;
+			       no-loopback-test;
+			       status = "disabled";
+			       resets = <&reset 17>;
+			};
+
+			uart1: serial@300000 {
+			       compatible = "ns16550a";
+			       reg = <0x300000 0x100000>;
+			       clocks = <&sysclk>;
+			       interrupts = <24>;
+			       reg-shift = <0>;
+			       fifo-size = <16>;
+			       reg-io-width = <1>;
+			       current-speed = <115200>;
+			       no-loopback-test;
+			       status = "disabled";
+			       resets = <&reset 18>;
+			};
+
+			uart2: serial@900000 {
+			       compatible = "ns16550a";
+			       reg = <0x900000 0x100000>;
+			       clocks = <&sysclk>;
+			       interrupts = <29>;
+			       reg-shift = <0>;
+			       fifo-size = <16>;
+			       reg-io-width = <1>;
+			       current-speed = <115200>;
+			       no-loopback-test;
+			       status = "disabled";
+			       resets = <&reset 22>;
+			};
+
+			uart3: serial@a00000 {
+			       compatible = "ns16550a";
+			       reg = <0xa00000 0x100000>;
+			       clocks = <&sysclk>;
+			       interrupts = <30>;
+			       reg-shift = <0>;
+			       fifo-size = <16>;
+			       reg-io-width = <1>;
+			       current-speed = <115200>;
+			       no-loopback-test;
+			       status = "disabled";
+			       resets = <&reset 23>;
+			};
+		};
+
+		apb-bridge@45000000 {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			compatible = "simple-bus";
+			ranges = <0 0x45000000 0x1000000>;
+
+			sys: sys-ctrl@000000 {
+				compatible = "oxsemi,ox810se-sys-ctrl", "syscon", "simple-mfd";
+				reg = <0x000000 0x100000>;
+
+				reset: reset-controller {
+					compatible = "oxsemi,ox810se-reset";
+					#reset-cells = <1>;
+				};
+
+				stdclk: stdclk {
+					compatible = "oxsemi,ox810se-stdclk";
+					#clock-cells = <1>;
+				};
+			};
+
+			rps@300000 {
+				#address-cells = <1>;
+				#size-cells = <1>;
+				compatible = "simple-bus";
+				ranges = <0 0x300000 0x100000>;
+
+				intc: interrupt-controller@0 {
+					compatible = "oxsemi,ox810se-rps-irq";
+					interrupt-controller;
+					reg = <0 0x200>;
+					#interrupt-cells = <1>;
+					valid-mask = <0xFFFFFFFF>;
+					clear-mask = <0>;
+				};
+
+				timer0: timer@200 {
+					compatible = "oxsemi,ox810se-rps-timer";
+					reg = <0x200 0x40>;
+					clocks = <&rpsclk>;
+					interrupts = <4 5>;
+				};
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/phy3250.dts b/arch/arm/boot/dts/phy3250.dts
deleted file mode 100644
index a00d7ce..0000000
--- a/arch/arm/boot/dts/phy3250.dts
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- * PHYTEC phyCORE-LPC3250 board
- *
- * Copyright 2012 Roland Stigge <stigge@antcom.de>
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-
-/dts-v1/;
-#include "lpc32xx.dtsi"
-
-/ {
-	model = "PHYTEC phyCORE-LPC3250 board based on NXP LPC3250";
-	compatible = "phytec,phy3250", "nxp,lpc3250";
-	#address-cells = <1>;
-	#size-cells = <1>;
-
-	memory {
-		device_type = "memory";
-		reg = <0x80000000 0x4000000>;
-	};
-
-	regulators {
-		backlight_reg: regulator@0 {
-			compatible = "regulator-fixed";
-			regulator-name = "backlight_reg";
-			regulator-min-microvolt = <1800000>;
-			regulator-max-microvolt = <1800000>;
-			gpio = <&gpio 5 4 0>;
-			enable-active-high;
-			regulator-boot-on;
-		};
-
-		lcd_reg: regulator@1 {
-			compatible = "regulator-fixed";
-			regulator-name = "lcd_reg";
-			regulator-min-microvolt = <1800000>;
-			regulator-max-microvolt = <1800000>;
-			gpio = <&gpio 5 0 0>;
-			enable-active-high;
-			regulator-boot-on;
-		};
-
-		sd_reg: regulator@2 {
-			compatible = "regulator-fixed";
-			regulator-name = "sd_reg";
-			regulator-min-microvolt = <1800000>;
-			regulator-max-microvolt = <1800000>;
-			gpio = <&gpio 5 5 0>;
-			enable-active-high;
-		};
-	};
-
-	ahb {
-		mac: ethernet@31060000 {
-			phy-mode = "rmii";
-			use-iram;
-		};
-
-		clcd@31040000 {
-			status = "okay";
-		};
-
-		/* 64MB Flash via SLC NAND controller */
-		slc: flash@20020000 {
-			status = "okay";
-			#address-cells = <1>;
-			#size-cells = <1>;
-
-			nxp,wdr-clks = <14>;
-			nxp,wwidth = <40000000>;
-			nxp,whold = <100000000>;
-			nxp,wsetup = <100000000>;
-			nxp,rdr-clks = <14>;
-			nxp,rwidth = <40000000>;
-			nxp,rhold = <66666666>;
-			nxp,rsetup = <100000000>;
-			nand-on-flash-bbt;
-			gpios = <&gpio 5 19 1>; /* GPO_P3 19, active low */
-
-			mtd0@00000000 {
-				label = "phy3250-boot";
-				reg = <0x00000000 0x00064000>;
-				read-only;
-			};
-
-			mtd1@00064000 {
-				label = "phy3250-uboot";
-				reg = <0x00064000 0x00190000>;
-				read-only;
-			};
-
-			mtd2@001f4000 {
-				label = "phy3250-ubt-prms";
-				reg = <0x001f4000 0x00010000>;
-			};
-
-			mtd3@00204000 {
-				label = "phy3250-kernel";
-				reg = <0x00204000 0x00400000>;
-			};
-
-			mtd4@00604000 {
-				label = "phy3250-rootfs";
-				reg = <0x00604000 0x039fc000>;
-			};
-		};
-
-		apb {
-			uart5: serial@40090000 {
-				status = "okay";
-			};
-
-			uart3: serial@40080000 {
-				status = "okay";
-			};
-
-			i2c1: i2c@400A0000 {
-				clock-frequency = <100000>;
-
-				pcf8563: rtc@51 {
-					compatible = "nxp,pcf8563";
-					reg = <0x51>;
-				};
-
-				uda1380: uda1380@18 {
-					compatible = "nxp,uda1380";
-					reg = <0x18>;
-					power-gpio = <&gpio 0x59 0>;
-					reset-gpio = <&gpio 0x51 0>;
-					dac-clk = "wspll";
-				};
-			};
-
-			i2c2: i2c@400A8000 {
-				clock-frequency = <100000>;
-			};
-
-			ssp0: ssp@20084000 {
-				#address-cells = <1>;
-				#size-cells = <0>;
-				num-cs = <1>;
-				cs-gpios = <&gpio 3 5 0>;
-
-				eeprom: at25@0 {
-					pl022,interface = <0>;
-					pl022,com-mode = <0>;
-					pl022,rx-level-trig = <1>;
-					pl022,tx-level-trig = <1>;
-					pl022,ctrl-len = <11>;
-					pl022,wait-state = <0>;
-					pl022,duplex = <0>;
-
-					at25,byte-len = <0x8000>;
-					at25,addr-mode = <2>;
-					at25,page-size = <64>;
-
-					compatible = "atmel,at25";
-					reg = <0>;
-					spi-max-frequency = <5000000>;
-				};
-			};
-
-			sd@20098000 {
-				wp-gpios = <&gpio 3 0 0>;
-				cd-gpios = <&gpio 3 1 0>;
-				cd-inverted;
-				bus-width = <4>;
-				vmmc-supply = <&sd_reg>;
-				status = "okay";
-			};
-		};
-
-		fab {
-			uart2: serial@40018000 {
-				status = "okay";
-			};
-
-			tsc@40048000 {
-				status = "okay";
-			};
-
-			key@40050000 {
-				status = "okay";
-				keypad,num-rows = <1>;
-				keypad,num-columns = <1>;
-				nxp,debounce-delay-ms = <3>;
-				nxp,scan-delay-ms = <34>;
-				linux,keymap = <0x00000002>;
-			};
-		};
-	};
-
-	leds {
-		compatible = "gpio-leds";
-
-		led0 { /* red */
-			gpios = <&gpio 5 1 0>; /* GPO_P3 1, GPIO 80, active high */
-			default-state = "off";
-		};
-
-		led1 { /* green */
-			gpios = <&gpio 5 14 0>; /* GPO_P3 14, GPIO 93, active high */
-			linux,default-trigger = "heartbeat";
-		};
-	};
-};
-
-/* Here, choose exactly one from: ohci, usbd */
-&ohci /* &usbd */ {
-	transceiver = <&isp1301>;
-	status = "okay";
-};
-
-&i2cusb {
-	clock-frequency = <100000>;
-
-	isp1301: usb-transceiver@2c {
-		compatible = "nxp,isp1301";
-		reg = <0x2c>;
-	};
-};
diff --git a/arch/arm/boot/dts/qcom-apq8064-arrow-db600c-pins.dtsi b/arch/arm/boot/dts/qcom-apq8064-arrow-db600c-pins.dtsi
new file mode 100644
index 0000000..a3efb97
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-apq8064-arrow-db600c-pins.dtsi
@@ -0,0 +1,52 @@
+&tlmm_pinmux {
+	card_detect: card-detect {
+		mux {
+			pins = "gpio26";
+			function = "gpio";
+			bias-disable;
+		};
+	};
+
+	pcie_pins: pcie-pinmux {
+		mux {
+			pins = "gpio27";
+			function = "gpio";
+		};
+		conf {
+			pins = "gpio27";
+			drive-strength = <12>;
+			bias-disable;
+		};
+	};
+
+	user_leds: user-leds {
+		mux {
+			pins = "gpio3", "gpio7", "gpio10", "gpio11";
+			function = "gpio";
+		};
+
+		conf {
+			pins = "gpio3", "gpio7", "gpio10", "gpio11";
+			function = "gpio";
+			output-low;
+		};
+	};
+
+	magneto_pins: magneto-pins {
+		mux {
+			pins = "gpio31", "gpio48";
+			function = "gpio";
+			bias-disable;
+		};
+	};
+};
+
+&pm8921_mpps {
+	mpp_leds: mpp-leds {
+		pinconf {
+			pins = "mpp7", "mpp8";
+			function = "digital";
+			output-low;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/qcom-apq8064-arrow-db600c.dts b/arch/arm/boot/dts/qcom-apq8064-arrow-db600c.dts
new file mode 100644
index 0000000..e01b27e
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-apq8064-arrow-db600c.dts
@@ -0,0 +1,349 @@
+#include "qcom-apq8064-v2.0.dtsi"
+#include "qcom-apq8064-arrow-db600c-pins.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+	model = "Arrow Electronics, APQ8064 DB600c";
+	compatible = "arrow,db600c", "qcom,apq8064";
+
+	aliases {
+		serial0 = &gsbi7_serial;
+		serial1 = &gsbi1_serial;
+		i2c0 = &gsbi2_i2c;
+		i2c1 = &gsbi3_i2c;
+		i2c2 = &gsbi4_i2c;
+		i2c3 = &gsbi7_i2c;
+		spi0 = &gsbi5_spi;
+	};
+
+	regulators {
+		compatible = "simple-bus";
+		vph: regulator-fixed@1 {
+			compatible = "regulator-fixed";
+			regulator-min-microvolt = <4500000>;
+			regulator-max-microvolt = <4500000>;
+			regulator-name = "VPH";
+			regulator-type = "voltage";
+			regulator-boot-on;
+		};
+
+		/* on board fixed 3.3v supply */
+		vcc3v3: vcc3v3 {
+			compatible = "regulator-fixed";
+			regulator-name = "VCC3V3";
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+			regulator-always-on;
+		};
+
+	};
+
+	soc {
+		rpm@108000 {
+			regulators {
+				vdd_s1-supply = <&vph>;
+				vdd_s2-supply = <&vph>;
+				vdd_s3-supply = <&vph>;
+				vdd_s4-supply = <&vph>;
+				vdd_s5-supply = <&vph>;
+				vdd_s6-supply = <&vph>;
+				vdd_s7-supply = <&vph>;
+				vdd_l1_l2_l12_l18-supply = <&pm8921_s4>;
+				vdd_l3_l15_l17-supply = <&vph>;
+				vdd_l4_l14-supply = <&vph>;
+				vdd_l5_l8_l16-supply = <&vph>;
+				vdd_l6_l7-supply = <&vph>;
+				vdd_l9_l11-supply = <&vph>;
+				vdd_l10_l22-supply = <&vph>;
+				vdd_l21_l23_l29-supply = <&vph>;
+				vdd_l24-supply = <&pm8921_s1>;
+				vdd_l25-supply = <&pm8921_s1>;
+				vdd_l26-supply = <&pm8921_s7>;
+				vdd_l27-supply = <&pm8921_s7>;
+				vdd_l28-supply = <&pm8921_s7>;
+				vin_lvs1_3_6-supply = <&pm8921_s4>;
+				vin_lvs2-supply = <&pm8921_s1>;
+				vin_lvs4_5_7-supply = <&pm8921_s4>;
+
+				s1 {
+					regulator-always-on;
+					regulator-min-microvolt = <1225000>;
+					regulator-max-microvolt = <1225000>;
+					qcom,switch-mode-frequency = <3200000>;
+					bias-pull-down;
+				};
+
+				s3 {
+					regulator-min-microvolt = <1000000>;
+					regulator-max-microvolt = <1400000>;
+					qcom,switch-mode-frequency = <4800000>;
+				};
+
+				s4 {
+					regulator-min-microvolt	= <1800000>;
+					regulator-max-microvolt	= <1800000>;
+					qcom,switch-mode-frequency = <3200000>;
+					bias-pull-down;
+					regulator-always-on;
+				};
+
+				s7 {
+					regulator-min-microvolt = <1300000>;
+					regulator-max-microvolt = <1300000>;
+					qcom,switch-mode-frequency = <3200000>;
+				 };
+
+				l3 {
+					regulator-min-microvolt = <3050000>;
+					regulator-max-microvolt = <3300000>;
+					bias-pull-down;
+				};
+
+				l4 {
+					regulator-min-microvolt = <1000000>;
+					regulator-max-microvolt = <1800000>;
+					bias-pull-down;
+				};
+
+				l5 {
+					regulator-min-microvolt = <2750000>;
+					regulator-max-microvolt = <3000000>;
+					bias-pull-down;
+					regulator-boot-on;
+					regulator-always-on;
+				};
+
+				l6 {
+					regulator-min-microvolt = <2950000>;
+					regulator-max-microvolt = <2950000>;
+					bias-pull-down;
+				};
+
+				l23 {
+					regulator-min-microvolt = <1700000>;
+					regulator-max-microvolt = <1900000>;
+					bias-pull-down;
+				};
+
+				lvs6 {
+					bias-pull-down;
+				};
+
+				lvs7 {
+					bias-pull-down;
+				};
+			};
+		};
+
+		gsbi@12440000 {
+			status = "okay";
+			qcom,mode = <GSBI_PROT_UART_W_FC>;
+			serial@12450000 {
+				label = "LS-UART1";
+				status = "okay";
+				pinctrl-names = "default";
+				pinctrl-0 = <&gsbi1_uart_4pins>;
+			};
+		};
+
+		gsbi@12480000 {
+			status = "okay";
+			qcom,mode = <GSBI_PROT_I2C>;
+			i2c@124a0000 {
+				/* On Low speed expansion and Sensors */
+				label = "LS-I2C0";
+				status = "okay";
+				lis3mdl_mag@1e {
+					compatible = "st,lis3mdl-magn";
+					reg = <0x1e>;
+					vdd-supply = <&vcc3v3>;
+					vddio-supply = <&pm8921_s4>;
+					pinctrl-names = "default";
+					pinctrl-0 = <&magneto_pins>;
+					interrupt-parent = <&tlmm_pinmux>;
+
+					st,drdy-int-pin = <2>;
+					interrupts = <48 IRQ_TYPE_EDGE_RISING>, /* DRDY line */
+						     <31 IRQ_TYPE_EDGE_RISING>; /* INT */
+				};
+			};
+		};
+
+		gsbi@16200000 {
+			status = "okay";
+			qcom,mode = <GSBI_PROT_I2C>;
+			i2c@16280000 {
+			/* On Low speed expansion */
+				status = "okay";
+				label = "LS-I2C1";
+				clock-frequency = <200000>;
+				eeprom@52 {
+					compatible = "atmel,24c128";
+					reg = <0x52>;
+					pagesize = <64>;
+				};
+			};
+		};
+
+		gsbi@16300000 {
+			status = "okay";
+			qcom,mode = <GSBI_PROT_I2C>;
+			i2c@16380000 {
+				/* On High speed expansion */
+				label = "HS-CAM-I2C3";
+				status = "okay";
+			};
+		};
+
+		gsbi@1a200000 {
+			status = "okay";
+			spi@1a280000 {
+				/* On Low speed expansion */
+				label = "LS-SPI0";
+				status = "okay";
+			};
+		};
+
+		/* DEBUG UART  */
+		gsbi@16600000 {
+			status = "okay";
+			qcom,mode = <GSBI_PROT_I2C_UART>;
+			serial@16640000 {
+				label = "LS-UART0";
+				status = "okay";
+				pinctrl-names = "default";
+				pinctrl-0 = <&gsbi7_uart_2pins>;
+			};
+
+			i2c@16680000 {
+				/* On High speed expansion */
+				status = "okay";
+				label = "HS-CAM-I2C2";
+			};
+		};
+
+		leds {
+			pinctrl-names = "default";
+			pinctrl-0 = <&user_leds>, <&mpp_leds>;
+
+			compatible = "gpio-leds";
+
+			user-led0 {
+				label = "user0-led";
+				gpios = <&tlmm_pinmux 3 GPIO_ACTIVE_HIGH>;
+				linux,default-trigger = "heartbeat";
+				default-state = "off";
+			};
+
+			user-led1 {
+				label = "user1-led";
+				gpios = <&tlmm_pinmux 7 GPIO_ACTIVE_HIGH>;
+				linux,default-trigger = "mmc0";
+				default-state = "off";
+			};
+
+			user-led2 {
+				label = "user2-led";
+				gpios = <&tlmm_pinmux 10 GPIO_ACTIVE_HIGH>;
+				linux,default-trigger = "mmc1";
+				default-state = "off";
+			};
+
+			user-led3 {
+				label = "user3-led";
+				gpios = <&tlmm_pinmux 11 GPIO_ACTIVE_HIGH>;
+				linux,default-trigger = "none";
+				default-state = "off";
+			};
+
+			wifi-led {
+				label = "WiFi-led";
+				gpios = <&pm8921_mpps 7 GPIO_ACTIVE_HIGH>;
+				default-state = "off";
+			};
+
+			bt-led {
+				label = "BT-led";
+				gpios = <&pm8921_mpps 8 GPIO_ACTIVE_HIGH>;
+				default-state = "off";
+			};
+		};
+
+		pci@1b500000 {
+			status = "okay";
+			vdda-supply = <&pm8921_s3>;
+			vdda_phy-supply = <&pm8921_lvs6>;
+			vdda_refclk-supply = <&vcc3v3>;
+			pinctrl-0 = <&pcie_pins>;
+			pinctrl-names = "default";
+			perst-gpio = <&tlmm_pinmux 27 GPIO_ACTIVE_LOW>;
+		};
+
+		phy@1b400000 {
+			status = "okay";
+		};
+
+		sata@29000000 {
+			status	= "okay";
+			target-supply	= <&pm8921_lvs7>;
+		};
+
+		/* OTG */
+		phy@12500000 {
+			status		= "okay";
+			dr_mode		= "peripheral";
+			vddcx-supply	= <&pm8921_s3>;
+			v3p3-supply	= <&pm8921_l3>;
+			v1p8-supply	= <&pm8921_l4>;
+		};
+
+		phy@12520000 {
+			status		= "okay";
+			vddcx-supply	= <&pm8921_s3>;
+			v3p3-supply	= <&pm8921_l3>;
+			v1p8-supply	= <&pm8921_l23>;
+		};
+
+		phy@12530000 {
+			status		= "okay";
+			vddcx-supply	= <&pm8921_s3>;
+			v3p3-supply	= <&pm8921_l3>;
+			v1p8-supply	= <&pm8921_l23>;
+		};
+
+		gadget@12500000 {
+			status = "okay";
+		};
+
+		/* OTG */
+		usb@12500000 {
+			status = "okay";
+		};
+
+		usb@12520000 {
+			status = "okay";
+		};
+
+		usb@12530000 {
+			status = "okay";
+		};
+
+		amba {
+			/* eMMC */
+			sdcc@12400000 {
+				status = "okay";
+				vmmc-supply = <&pm8921_l5>;
+				vqmmc-supply = <&pm8921_s4>;
+			};
+
+			/* External micro SD card */
+			sdcc@12180000 {
+				status = "okay";
+				vmmc-supply = <&pm8921_l6>;
+				pinctrl-names	= "default";
+				pinctrl-0	= <&card_detect>;
+				cd-gpios	= <&tlmm_pinmux 26 GPIO_ACTIVE_HIGH>;
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts b/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts
index c535b3f..32fedfa 100644
--- a/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts
+++ b/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts
@@ -224,6 +224,12 @@
 					reg = <0x52>;
 					pagesize = <32>;
 				};
+
+				bq27541@55 {
+					compatible = "ti,bq27541";
+					reg = <0x55>;
+				};
+
 			};
 		};
 
diff --git a/arch/arm/boot/dts/qcom-apq8064-pins.dtsi b/arch/arm/boot/dts/qcom-apq8064-pins.dtsi
index b57c59d..4102a98 100644
--- a/arch/arm/boot/dts/qcom-apq8064-pins.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8064-pins.dtsi
@@ -39,6 +39,20 @@
 		};
 	};
 
+	gsbi1_uart_2pins: gsbi1_uart_2pins {
+		mux {
+			pins = "gpio18", "gpio19";
+			function = "gsbi1";
+		};
+	};
+
+	gsbi1_uart_4pins: gsbi1_uart_4pins {
+		mux {
+			pins = "gpio18", "gpio19", "gpio20", "gpio21";
+			function = "gsbi1";
+		};
+	};
+
 	i2c2_pins: i2c2 {
 		mux {
 			pins = "gpio24", "gpio25";
@@ -205,4 +219,29 @@
 			function = "gsbi7";
 		};
 	};
+
+	i2c7_pins: i2c7 {
+		mux {
+			pins = "gpio84", "gpio85";
+			function = "gsbi7";
+		};
+
+		pinconf {
+			pins = "gpio84", "gpio85";
+			drive-strength = <16>;
+			bias-disable;
+		};
+	};
+
+	i2c7_pins_sleep: i2c7_pins_sleep {
+		mux {
+			pins = "gpio84", "gpio85";
+			function = "gpio";
+		};
+		pinconf {
+			pins = "gpio84", "gpio85";
+			drive-strength = <2>;
+			bias-disable = <0>;
+		};
+	};
 };
diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
index 65d0e8d..df96ccd 100644
--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
@@ -124,6 +124,95 @@
 		hwlocks = <&sfpb_mutex 3>;
 	};
 
+	smd {
+		compatible = "qcom,smd";
+
+		modem@0 {
+			interrupts = <0 37 IRQ_TYPE_EDGE_RISING>;
+
+			qcom,ipc = <&l2cc 8 3>;
+			qcom,smd-edge = <0>;
+
+			status = "disabled";
+		};
+
+		q6@1 {
+			interrupts = <0 90 IRQ_TYPE_EDGE_RISING>;
+
+			qcom,ipc = <&l2cc 8 15>;
+			qcom,smd-edge = <1>;
+
+			status = "disabled";
+		};
+
+		dsps@3 {
+			interrupts = <0 138 IRQ_TYPE_EDGE_RISING>;
+
+			qcom,ipc = <&sps_sic_non_secure 0x4080 0>;
+			qcom,smd-edge = <3>;
+
+			status = "disabled";
+		};
+
+		riva@6 {
+			interrupts = <0 198 IRQ_TYPE_EDGE_RISING>;
+
+			qcom,ipc = <&l2cc 8 25>;
+			qcom,smd-edge = <6>;
+
+			status = "disabled";
+		};
+	};
+
+	smsm {
+		compatible = "qcom,smsm";
+
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,ipc-1 = <&l2cc 8 4>;
+		qcom,ipc-2 = <&l2cc 8 14>;
+		qcom,ipc-3 = <&l2cc 8 23>;
+		qcom,ipc-4 = <&sps_sic_non_secure 0x4094 0>;
+
+		apps_smsm: apps@0 {
+			reg = <0>;
+			#qcom,state-cells = <1>;
+		};
+
+		modem_smsm: modem@1 {
+			reg = <1>;
+			interrupts = <0 38 IRQ_TYPE_EDGE_RISING>;
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+
+		q6_smsm: q6@2 {
+			reg = <2>;
+			interrupts = <0 89 IRQ_TYPE_EDGE_RISING>;
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+
+		wcnss_smsm: wcnss@3 {
+			reg = <3>;
+			interrupts = <0 204 IRQ_TYPE_EDGE_RISING>;
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+
+		dsps_smsm: dsps@4 {
+			reg = <4>;
+			interrupts = <0 137 IRQ_TYPE_EDGE_RISING>;
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+	};
+
 	soc: soc {
 		#address-cells = <1>;
 		#size-cells = <1>;
@@ -212,6 +301,11 @@
 			regulator;
 		};
 
+		sps_sic_non_secure: sps-sic-non-secure@12100000 {
+			compatible	= "syscon";
+			reg		= <0x12100000 0x10000>;
+		};
+
 		gsbi1: gsbi@12440000 {
 			status = "disabled";
 			compatible = "qcom,gsbi-v1.0.0";
@@ -225,9 +319,20 @@
 
 			syscon-tcsr = <&tcsr>;
 
+			gsbi1_serial: serial@12450000 {
+				compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
+				reg = <0x12450000 0x100>,
+				      <0x12400000 0x03>;
+				interrupts = <0 193 0x0>;
+				clocks = <&gcc GSBI1_UART_CLK>, <&gcc GSBI1_H_CLK>;
+				clock-names = "core", "iface";
+				status = "disabled";
+			};
+
 			gsbi1_i2c: i2c@12460000 {
 				compatible = "qcom,i2c-qup-v1.1.1";
-				pinctrl-0 = <&i2c1_pins &i2c1_pins_sleep>;
+				pinctrl-0 = <&i2c1_pins>;
+				pinctrl-1 = <&i2c1_pins_sleep>;
 				pinctrl-names = "default", "sleep";
 				reg = <0x12460000 0x1000>;
 				interrupts = <0 194 IRQ_TYPE_NONE>;
@@ -255,7 +360,8 @@
 			gsbi2_i2c: i2c@124a0000 {
 				compatible = "qcom,i2c-qup-v1.1.1";
 				reg = <0x124a0000 0x1000>;
-				pinctrl-0 = <&i2c2_pins &i2c2_pins_sleep>;
+				pinctrl-0 = <&i2c2_pins>;
+				pinctrl-1 = <&i2c2_pins_sleep>;
 				pinctrl-names = "default", "sleep";
 				interrupts = <0 196 IRQ_TYPE_NONE>;
 				clocks = <&gcc GSBI2_QUP_CLK>, <&gcc GSBI2_H_CLK>;
@@ -277,7 +383,8 @@
 			ranges;
 			gsbi3_i2c: i2c@16280000 {
 				compatible = "qcom,i2c-qup-v1.1.1";
-				pinctrl-0 = <&i2c3_pins &i2c3_pins_sleep>;
+				pinctrl-0 = <&i2c3_pins>;
+				pinctrl-1 = <&i2c3_pins_sleep>;
 				pinctrl-names = "default", "sleep";
 				reg = <0x16280000 0x1000>;
 				interrupts = <GIC_SPI 151 IRQ_TYPE_NONE>;
@@ -302,7 +409,8 @@
 
 			gsbi4_i2c: i2c@16380000 {
 				compatible = "qcom,i2c-qup-v1.1.1";
-				pinctrl-0 = <&i2c4_pins &i2c4_pins_sleep>;
+				pinctrl-0 = <&i2c4_pins>;
+				pinctrl-1 = <&i2c4_pins_sleep>;
 				pinctrl-names = "default", "sleep";
 				reg = <0x16380000 0x1000>;
 				interrupts = <GIC_SPI 153 IRQ_TYPE_NONE>;
@@ -337,7 +445,8 @@
 				compatible = "qcom,spi-qup-v1.1.1";
 				reg = <0x1a280000 0x1000>;
 				interrupts = <0 155 0>;
-				pinctrl-0 = <&spi5_default &spi5_sleep>;
+				pinctrl-0 = <&spi5_default>;
+				pinctrl-1 = <&spi5_sleep>;
 				pinctrl-names = "default", "sleep";
 				clocks = <&gcc GSBI5_QUP_CLK>, <&gcc GSBI5_H_CLK>;
 				clock-names = "core", "iface";
@@ -370,7 +479,8 @@
 
 			gsbi6_i2c: i2c@16580000 {
 				compatible = "qcom,i2c-qup-v1.1.1";
-				pinctrl-0 = <&i2c6_pins &i2c6_pins_sleep>;
+				pinctrl-0 = <&i2c6_pins>;
+				pinctrl-1 = <&i2c6_pins_sleep>;
 				pinctrl-names = "default", "sleep";
 				reg = <0x16580000 0x1000>;
 				interrupts = <GIC_SPI 157 IRQ_TYPE_NONE>;
@@ -401,6 +511,19 @@
 				clock-names = "core", "iface";
 				status = "disabled";
 			};
+
+			gsbi7_i2c: i2c@16680000 {
+				compatible = "qcom,i2c-qup-v1.1.1";
+				pinctrl-0 = <&i2c7_pins>;
+				pinctrl-1 = <&i2c7_pins_sleep>;
+				pinctrl-names = "default", "sleep";
+				reg = <0x16680000 0x1000>;
+				interrupts = <GIC_SPI 159 IRQ_TYPE_NONE>;
+				clocks = <&gcc GSBI7_QUP_CLK>,
+					 <&gcc GSBI7_H_CLK>;
+				clock-names = "core", "iface";
+				status = "disabled";
+			};
 		};
 
 		rng@1a500000 {
@@ -666,7 +789,7 @@
 		};
 
 		sata0: sata@29000000 {
-			compatible		= "generic-ahci";
+			compatible		= "qcom,apq8064-ahci", "generic-ahci";
 			status			= "disabled";
 			reg			= <0x29000000 0x180>;
 			interrupts		= <GIC_SPI 209 IRQ_TYPE_NONE>;
@@ -688,6 +811,7 @@
 
 			phys			= <&sata_phy0>;
 			phy-names		= "sata-phy";
+			ports-implemented	= <0x1>;
 		};
 
 		/* Temporary fixed regulator */
diff --git a/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1-c1.dts b/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1-c1.dts
new file mode 100644
index 0000000..0d92f1b
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1-c1.dts
@@ -0,0 +1,22 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include "qcom-ipq4019-ap.dk01.1.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. IPQ40xx/AP-DK01.1-C1";
+
+};
diff --git a/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi b/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi
new file mode 100644
index 0000000..b9457dd
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-ipq4019-ap.dk01.1.dtsi
@@ -0,0 +1,112 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include "qcom-ipq4019.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. IPQ4019/AP-DK01.1";
+	compatible = "qcom,ipq4019";
+
+	clocks {
+                xo: xo {
+                        compatible = "fixed-clock";
+                        clock-frequency = <48000000>;
+                        #clock-cells = <0>;
+                };
+	};
+
+	soc {
+
+
+		timer {
+			compatible = "arm,armv7-timer";
+			interrupts = <1 2 0xf08>,
+				     <1 3 0xf08>,
+				     <1 4 0xf08>,
+				     <1 1 0xf08>;
+			clock-frequency = <48000000>;
+		};
+
+		pinctrl@0x01000000 {
+			serial_pins: serial_pinmux {
+				mux {
+					pins = "gpio60", "gpio61";
+					function = "blsp_uart0";
+					bias-disable;
+				};
+			};
+
+			spi_0_pins: spi_0_pinmux {
+				pinmux {
+					function = "blsp_spi0";
+					pins = "gpio55", "gpio56", "gpio57";
+				};
+				pinmux_cs {
+					function = "gpio";
+					pins = "gpio54";
+				};
+				pinconf {
+					pins = "gpio55", "gpio56", "gpio57";
+					drive-strength = <12>;
+					bias-disable;
+				};
+				pinconf_cs {
+					pins = "gpio54";
+					drive-strength = <2>;
+					bias-disable;
+					output-high;
+				};
+			};
+		};
+
+		blsp_dma: dma@7884000 {
+			status = "ok";
+		};
+
+		spi_0: spi@78b5000 {
+			pinctrl-0 = <&spi_0_pins>;
+			pinctrl-names = "default";
+			status = "ok";
+			cs-gpios = <&tlmm 54 0>;
+
+			mx25l25635e@0 {
+				#address-cells = <1>;
+				#size-cells = <1>;
+				reg = <0>;
+				compatible = "mx25l25635e";
+				spi-max-frequency = <24000000>;
+			};
+		};
+
+		serial@78af000 {
+			pinctrl-0 = <&serial_pins>;
+			pinctrl-names = "default";
+			status = "ok";
+		};
+
+		cryptobam: dma@8e04000 {
+			status = "ok";
+		};
+
+		crypto@8e3a000 {
+			status = "ok";
+		};
+
+		watchdog@b017000 {
+			status = "ok";
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi
new file mode 100644
index 0000000..5c08d19
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
@@ -0,0 +1,267 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "skeleton.dtsi"
+#include <dt-bindings/clock/qcom,gcc-ipq4019.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+	model = "Qualcomm Technologies, Inc. IPQ4019";
+	compatible = "qcom,ipq4019";
+	interrupt-parent = <&intc>;
+
+	aliases {
+		spi0 = &spi_0;
+		i2c0 = &i2c_0;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a7";
+			enable-method = "qcom,kpss-acc-v1";
+			qcom,acc = <&acc0>;
+			qcom,saw = <&saw0>;
+			reg = <0x0>;
+			clocks = <&gcc GCC_APPS_CLK_SRC>;
+			clock-frequency = <0>;
+			operating-points = <
+				/* kHz	uV (fixed) */
+				48000	1100000
+				200000	1100000
+				500000	1100000
+				666000	1100000
+			>;
+			clock-latency = <256000>;
+		};
+
+		cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a7";
+			enable-method = "qcom,kpss-acc-v1";
+			qcom,acc = <&acc1>;
+			qcom,saw = <&saw1>;
+			reg = <0x1>;
+			clocks = <&gcc GCC_APPS_CLK_SRC>;
+			clock-frequency = <0>;
+		};
+
+		cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a7";
+			enable-method = "qcom,kpss-acc-v1";
+			qcom,acc = <&acc2>;
+			qcom,saw = <&saw2>;
+			reg = <0x2>;
+			clocks = <&gcc GCC_APPS_CLK_SRC>;
+			clock-frequency = <0>;
+		};
+
+		cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a7";
+			enable-method = "qcom,kpss-acc-v1";
+			qcom,acc = <&acc3>;
+			qcom,saw = <&saw3>;
+			reg = <0x3>;
+			clocks = <&gcc GCC_APPS_CLK_SRC>;
+			clock-frequency = <0>;
+		};
+	};
+
+	clocks {
+		sleep_clk: sleep_clk {
+			compatible = "fixed-clock";
+			clock-frequency = <32768>;
+			#clock-cells = <0>;
+		};
+	};
+
+	soc {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		compatible = "simple-bus";
+
+		intc: interrupt-controller@b000000 {
+			compatible = "qcom,msm-qgic2";
+			interrupt-controller;
+			#interrupt-cells = <3>;
+			reg = <0x0b000000 0x1000>,
+			<0x0b002000 0x1000>;
+		};
+
+		gcc: clock-controller@1800000 {
+			compatible = "qcom,gcc-ipq4019";
+			#clock-cells = <1>;
+			#reset-cells = <1>;
+			reg = <0x1800000 0x60000>;
+		};
+
+		tlmm: pinctrl@0x01000000 {
+			compatible = "qcom,ipq4019-pinctrl";
+			reg = <0x01000000 0x300000>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			interrupts = <0 208 0>;
+		};
+
+		blsp_dma: dma@7884000 {
+			compatible = "qcom,bam-v1.7.0";
+			reg = <0x07884000 0x23000>;
+			interrupts = <GIC_SPI 238 IRQ_TYPE_NONE>;
+			clocks = <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "bam_clk";
+			#dma-cells = <1>;
+			qcom,ee = <0>;
+			status = "disabled";
+		};
+
+		spi_0: spi@78b5000 {
+			compatible = "qcom,spi-qup-v2.2.1";
+			reg = <0x78b5000 0x600>;
+			interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		i2c_0: i2c@78b7000 {
+			compatible = "qcom,i2c-qup-v2.2.1";
+			reg = <0x78b7000 0x6000>;
+			interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+				 <&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>;
+			clock-names = "iface", "core";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+
+		cryptobam: dma@8e04000 {
+			compatible = "qcom,bam-v1.7.0";
+			reg = <0x08e04000 0x20000>;
+			interrupts = <GIC_SPI 207 0>;
+			clocks = <&gcc GCC_CRYPTO_AHB_CLK>;
+			clock-names = "bam_clk";
+			#dma-cells = <1>;
+			qcom,ee = <1>;
+			qcom,controlled-remotely;
+			status = "disabled";
+		};
+
+		crypto@8e3a000 {
+			compatible = "qcom,crypto-v5.1";
+			reg = <0x08e3a000 0x6000>;
+			clocks = <&gcc GCC_CRYPTO_AHB_CLK>,
+				 <&gcc GCC_CRYPTO_AXI_CLK>,
+				 <&gcc GCC_CRYPTO_CLK>;
+			clock-names = "iface", "bus", "core";
+			dmas = <&cryptobam 2>, <&cryptobam 3>;
+			dma-names = "rx", "tx";
+			status = "disabled";
+		};
+
+                acc0: clock-controller@b088000 {
+                        compatible = "qcom,kpss-acc-v1";
+                        reg = <0x0b088000 0x1000>, <0xb008000 0x1000>;
+                };
+
+                acc1: clock-controller@b098000 {
+                        compatible = "qcom,kpss-acc-v1";
+                        reg = <0x0b098000 0x1000>, <0xb008000 0x1000>;
+                };
+
+                acc2: clock-controller@b0a8000 {
+                        compatible = "qcom,kpss-acc-v1";
+                        reg = <0x0b0a8000 0x1000>, <0xb008000 0x1000>;
+                };
+
+                acc3: clock-controller@b0b8000 {
+                        compatible = "qcom,kpss-acc-v1";
+                        reg = <0x0b0b8000 0x1000>, <0xb008000 0x1000>;
+                };
+
+                saw0: regulator@b089000 {
+                        compatible = "qcom,saw2";
+                        reg = <0x02089000 0x1000>, <0x0b009000 0x1000>;
+                        regulator;
+                };
+
+                saw1: regulator@b099000 {
+                        compatible = "qcom,saw2";
+                        reg = <0x0b099000 0x1000>, <0x0b009000 0x1000>;
+                        regulator;
+                };
+
+                saw2: regulator@b0a9000 {
+                        compatible = "qcom,saw2";
+                        reg = <0x0b0a9000 0x1000>, <0x0b009000 0x1000>;
+                        regulator;
+                };
+
+                saw3: regulator@b0b9000 {
+                        compatible = "qcom,saw2";
+                        reg = <0x0b0b9000 0x1000>, <0x0b009000 0x1000>;
+                        regulator;
+                };
+
+		serial@78af000 {
+			compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+			reg = <0x78af000 0x200>;
+			interrupts = <0 107 0>;
+			status = "disabled";
+			clocks = <&gcc GCC_BLSP1_UART1_APPS_CLK>,
+				<&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			dmas = <&blsp_dma 1>, <&blsp_dma 0>;
+			dma-names = "rx", "tx";
+		};
+
+		serial@78b0000 {
+			compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+			reg = <0x78b0000 0x200>;
+			interrupts = <0 108 0>;
+			status = "disabled";
+			clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>,
+				<&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			dmas = <&blsp_dma 3>, <&blsp_dma 2>;
+			dma-names = "rx", "tx";
+		};
+
+		watchdog@b017000 {
+			compatible = "qcom,kpss-standalone";
+			reg = <0xb017000 0x40>;
+			clocks = <&sleep_clk>;
+			timeout-sec = <10>;
+			status = "disabled";
+		};
+
+		restart@4ab000 {
+			compatible = "qcom,pshold";
+			reg = <0x4ab000 0x4>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi
index ef53305..6f16426 100644
--- a/arch/arm/boot/dts/qcom-msm8974.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8974.dtsi
@@ -1,6 +1,6 @@
 /dts-v1/;
 
-#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/clock/qcom,gcc-msm8974.h>
 #include "skeleton.dtsi"
 
@@ -49,8 +49,13 @@
 			no-map;
 		};
 
-		efs@0fd600000 {
-			reg = <0x0fd60000 0x1a0000>;
+		rfsa@0fd60000 {
+			reg = <0x0fd60000 0x20000>;
+			no-map;
+		};
+
+		rmtfs@0fd80000 {
+			reg = <0x0fd80000 0x180000>;
 			no-map;
 		};
 
@@ -163,6 +168,31 @@
 		hwlocks = <&tcsr_mutex 3>;
 	};
 
+	smp2p-modem {
+		compatible = "qcom,smp2p";
+		qcom,smem = <435>, <428>;
+
+		interrupt-parent = <&intc>;
+		interrupts = <0 27 IRQ_TYPE_EDGE_RISING>;
+
+		qcom,ipc = <&apcs 8 14>;
+
+		qcom,local-pid = <0>;
+		qcom,remote-pid = <1>;
+
+		modem_smp2p_out: master-kernel {
+			qcom,entry-name = "master-kernel";
+			#qcom,state-cells = <1>;
+		};
+
+		modem_smp2p_in: slave-kernel {
+			qcom,entry-name = "slave-kernel";
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+	};
+
 	smp2p-wcnss {
 		compatible = "qcom,smp2p";
 		qcom,smem = <451>, <431>;
@@ -440,6 +470,17 @@
 			interrupts = <0 208 0>;
 		};
 
+		i2c@f9924000 {
+			status = "disabled";
+			compatible = "qcom,i2c-qup-v2.1.1";
+			reg = <0xf9924000 0x1000>;
+			interrupts = <0 96 IRQ_TYPE_NONE>;
+			clocks = <&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>, <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
 		blsp_i2c8: i2c@f9964000 {
 			status = "disabled";
 			compatible = "qcom,i2c-qup-v2.1.1";
@@ -460,8 +501,6 @@
 			clock-names = "core", "iface";
 			#address-cells = <1>;
 			#size-cells = <0>;
-			dmas = <&blsp2_dma 20>, <&blsp2_dma 21>;
-			dma-names = "tx", "rx";
 		};
 
 		spmi_bus: spmi@fc4cf000 {
@@ -479,21 +518,18 @@
 			interrupt-controller;
 			#interrupt-cells = <4>;
 		};
-
-		blsp2_dma: dma-controller@f9944000 {
-			compatible = "qcom,bam-v1.4.0";
-			reg = <0xf9944000 0x19000>;
-			interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&gcc GCC_BLSP2_AHB_CLK>;
-			clock-names = "bam_clk";
-			#dma-cells = <1>;
-			qcom,ee = <0>;
-		};
 	};
 
 	smd {
 		compatible = "qcom,smd";
 
+		modem {
+			interrupts = <0 25 IRQ_TYPE_EDGE_RISING>;
+
+			qcom,ipc = <&apcs 8 12>;
+			qcom,smd-edge = <0>;
+		};
+
 		rpm {
 			interrupts = <0 168 1>;
 			qcom,ipc = <&apcs 8 0>;
diff --git a/arch/arm/boot/dts/r7s72100.dtsi b/arch/arm/boot/dts/r7s72100.dtsi
index 89e46eb..e8e2a5d7 100644
--- a/arch/arm/boot/dts/r7s72100.dtsi
+++ b/arch/arm/boot/dts/r7s72100.dtsi
@@ -37,46 +37,41 @@
 		#size-cells = <1>;
 
 		/* External clocks */
-		extal_clk: extal_clk {
+		extal_clk: extal {
 			#clock-cells = <0>;
 			compatible = "fixed-clock";
 			/* If clk present, value must be set by board */
 			clock-frequency = <0>;
-			clock-output-names = "extal";
 		};
 
-		usb_x1_clk: usb_x1_clk {
+		usb_x1_clk: usb_x1 {
 			#clock-cells = <0>;
 			compatible = "fixed-clock";
 			/* If clk present, value must be set by board */
 			clock-frequency = <0>;
-			clock-output-names = "usb_x1";
 		};
 
 		/* Fixed factor clocks */
-		b_clk: b_clk {
+		b_clk: b {
 			#clock-cells = <0>;
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R7S72100_CLK_PLL>;
 			clock-mult = <1>;
 			clock-div = <3>;
-			clock-output-names = "b";
 		};
-		p1_clk: p1_clk {
+		p1_clk: p1 {
 			#clock-cells = <0>;
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R7S72100_CLK_PLL>;
 			clock-mult = <1>;
 			clock-div = <6>;
-			clock-output-names = "p1";
 		};
-		p0_clk: p0_clk {
+		p0_clk: p0 {
 			#clock-cells = <0>;
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R7S72100_CLK_PLL>;
 			clock-mult = <1>;
 			clock-div = <12>;
-			clock-output-names = "p0";
 		};
 
 		/* Special CPG clocks */
diff --git a/arch/arm/boot/dts/r8a73a4-ape6evm.dts b/arch/arm/boot/dts/r8a73a4-ape6evm.dts
index 5902570..93ace33 100644
--- a/arch/arm/boot/dts/r8a73a4-ape6evm.dts
+++ b/arch/arm/boot/dts/r8a73a4-ape6evm.dts
@@ -189,28 +189,28 @@
 
 &pfc {
 	scifa0_pins: serial0 {
-		renesas,groups = "scifa0_data";
-		renesas,function = "scifa0";
+		groups = "scifa0_data";
+		function = "scifa0";
 	};
 
 	mmc0_pins: mmc {
-		renesas,groups = "mmc0_data8", "mmc0_ctrl";
-		renesas,function = "mmc0";
+		groups = "mmc0_data8", "mmc0_ctrl";
+		function = "mmc0";
 	};
 
 	sdhi0_pins: sd0 {
-		renesas,groups = "sdhi0_data4", "sdhi0_ctrl", "sdhi0_cd";
-		renesas,function = "sdhi0";
+		groups = "sdhi0_data4", "sdhi0_ctrl", "sdhi0_cd";
+		function = "sdhi0";
 	};
 
 	sdhi1_pins: sd1 {
-		renesas,groups = "sdhi1_data4", "sdhi1_ctrl";
-		renesas,function = "sdhi1";
+		groups = "sdhi1_data4", "sdhi1_ctrl";
+		function = "sdhi1";
 	};
 
 	keyboard_pins: keyboard {
-		renesas,pins = "PORT324", "PORT325", "PORT326", "PORT327",
-			       "PORT328", "PORT329";
+		pins = "PORT324", "PORT325", "PORT326", "PORT327", "PORT328",
+		       "PORT329";
 		bias-pull-up;
 	};
 };
diff --git a/arch/arm/boot/dts/r8a73a4.dtsi b/arch/arm/boot/dts/r8a73a4.dtsi
index 6583a1d..6954912 100644
--- a/arch/arm/boot/dts/r8a73a4.dtsi
+++ b/arch/arm/boot/dts/r8a73a4.dtsi
@@ -486,37 +486,32 @@
 		ranges;
 
 		/* External root clocks */
-		extalr_clk: extalr_clk {
+		extalr_clk: extalr {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <32768>;
-			clock-output-names = "extalr";
 		};
-		extal1_clk: extal1_clk {
+		extal1_clk: extal1 {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <25000000>;
-			clock-output-names = "extal1";
 		};
-		extal2_clk: extal2_clk {
+		extal2_clk: extal2 {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <48000000>;
-			clock-output-names = "extal2";
 		};
-		fsiack_clk: fsiack_clk {
+		fsiack_clk: fsiack {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			/* This value must be overridden by the board. */
 			clock-frequency = <0>;
-			clock-output-names = "fsiack";
 		};
-		fsibck_clk: fsibck_clk {
+		fsibck_clk: fsibck {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			/* This value must be overridden by the board. */
 			clock-frequency = <0>;
-			clock-output-names = "fsibck";
 		};
 
 		/* Special CPG clocks */
@@ -540,171 +535,151 @@
 			#clock-cells = <0>;
 			clock-output-names = "zb";
 		};
-		sdhi0_clk: sdhi0_clk@e6150074 {
+		sdhi0_clk: sdhi0ck@e6150074 {
 			compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe6150074 0 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
 				 <0>, <&extal2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "sdhi0ck";
 		};
-		sdhi1_clk: sdhi1_clk@e6150078 {
+		sdhi1_clk: sdhi1ck@e6150078 {
 			compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe6150078 0 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
 				 <0>, <&extal2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "sdhi1ck";
 		};
-		sdhi2_clk: sdhi2_clk@e615007c {
+		sdhi2_clk: sdhi2ck@e615007c {
 			compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe615007c 0 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
 				 <0>, <&extal2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "sdhi2ck";
 		};
-		mmc0_clk: mmc0_clk@e6150240 {
+		mmc0_clk: mmc0@e6150240 {
 			compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe6150240 0 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
 				 <0>, <&extal2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "mmc0";
 		};
-		mmc1_clk: mmc1_clk@e6150244 {
+		mmc1_clk: mmc1@e6150244 {
 			compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe6150244 0 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
 				 <0>, <&extal2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "mmc1";
 		};
-		vclk1_clk: vclk1_clk@e6150008 {
+		vclk1_clk: vclk1@e6150008 {
 			compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe6150008 0 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
 				 <0>, <&extal2_clk>, <&main_div2_clk>,
 				 <&extalr_clk>, <0>, <0>;
 			#clock-cells = <0>;
-			clock-output-names = "vclk1";
 		};
-		vclk2_clk: vclk2_clk@e615000c {
+		vclk2_clk: vclk2@e615000c {
 			compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe615000c 0 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
 				 <0>, <&extal2_clk>, <&main_div2_clk>,
 				 <&extalr_clk>, <0>, <0>;
 			#clock-cells = <0>;
-			clock-output-names = "vclk2";
 		};
-		vclk3_clk: vclk3_clk@e615001c {
+		vclk3_clk: vclk3@e615001c {
 			compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe615001c 0 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
 				 <0>, <&extal2_clk>, <&main_div2_clk>,
 				 <&extalr_clk>, <0>, <0>;
 			#clock-cells = <0>;
-			clock-output-names = "vclk3";
 		};
-		vclk4_clk: vclk4_clk@e6150014 {
+		vclk4_clk: vclk4@e6150014 {
 			compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe6150014 0 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
 				 <0>, <&extal2_clk>, <&main_div2_clk>,
 				 <&extalr_clk>, <0>, <0>;
 			#clock-cells = <0>;
-			clock-output-names = "vclk4";
 		};
-		vclk5_clk: vclk5_clk@e6150034 {
+		vclk5_clk: vclk5@e6150034 {
 			compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe6150034 0 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
 				 <0>, <&extal2_clk>, <&main_div2_clk>,
 				 <&extalr_clk>, <0>, <0>;
 			#clock-cells = <0>;
-			clock-output-names = "vclk5";
 		};
-		fsia_clk: fsia_clk@e6150018 {
+		fsia_clk: fsia@e6150018 {
 			compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe6150018 0 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
 				 <&fsiack_clk>, <0>;
 			#clock-cells = <0>;
-			clock-output-names = "fsia";
 		};
-		fsib_clk: fsib_clk@e6150090 {
+		fsib_clk: fsib@e6150090 {
 			compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe6150090 0 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
 				 <&fsibck_clk>, <0>;
 			#clock-cells = <0>;
-			clock-output-names = "fsib";
 		};
-		mp_clk: mp_clk@e6150080 {
+		mp_clk: mp@e6150080 {
 			compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe6150080 0 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
 				 <&extal2_clk>, <&extal2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "mp";
 		};
-		m4_clk: m4_clk@e6150098 {
+		m4_clk: m4@e6150098 {
 			compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe6150098 0 4>;
 			clocks = <&cpg_clocks R8A73A4_CLK_PLL2S>;
 			#clock-cells = <0>;
-			clock-output-names = "m4";
 		};
-		hsi_clk: hsi_clk@e615026c {
+		hsi_clk: hsi@e615026c {
 			compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe615026c 0 4>;
 			clocks = <&cpg_clocks R8A73A4_CLK_PLL2H>, <&pll1_div2_clk>,
 				 <&cpg_clocks R8A73A4_CLK_PLL2S>, <0>;
 			#clock-cells = <0>;
-			clock-output-names = "hsi";
 		};
-		spuv_clk: spuv_clk@e6150094 {
+		spuv_clk: spuv@e6150094 {
 			compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe6150094 0 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
 				 <&extal2_clk>, <&extal2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "spuv";
 		};
 
 		/* Fixed factor clocks */
-		main_div2_clk: main_div2_clk {
+		main_div2_clk: main_div2 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A73A4_CLK_MAIN>;
 			#clock-cells = <0>;
 			clock-div = <2>;
 			clock-mult = <1>;
-			clock-output-names = "main_div2";
 		};
-		pll0_div2_clk: pll0_div2_clk {
+		pll0_div2_clk: pll0_div2 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A73A4_CLK_PLL0>;
 			#clock-cells = <0>;
 			clock-div = <2>;
 			clock-mult = <1>;
-			clock-output-names = "pll0_div2";
 		};
-		pll1_div2_clk: pll1_div2_clk {
+		pll1_div2_clk: pll1_div2 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A73A4_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <2>;
 			clock-mult = <1>;
-			clock-output-names = "pll1_div2";
 		};
-		extal1_div2_clk: extal1_div2_clk {
+		extal1_div2_clk: extal1_div2 {
 			compatible = "fixed-factor-clock";
 			clocks = <&extal1_clk>;
 			#clock-cells = <0>;
 			clock-div = <2>;
 			clock-mult = <1>;
-			clock-output-names = "extal1_div2";
 		};
 
 		/* Gate clocks */
diff --git a/arch/arm/boot/dts/r8a7740-armadillo800eva.dts b/arch/arm/boot/dts/r8a7740-armadillo800eva.dts
index c548cab..2c82dab 100644
--- a/arch/arm/boot/dts/r8a7740-armadillo800eva.dts
+++ b/arch/arm/boot/dts/r8a7740-armadillo800eva.dts
@@ -228,44 +228,44 @@
 	pinctrl-names = "default";
 
 	ether_pins: ether {
-		renesas,groups = "gether_mii", "gether_int";
-		renesas,function = "gether";
+		groups = "gether_mii", "gether_int";
+		function = "gether";
 	};
 
 	scifa1_pins: serial1 {
-		renesas,groups = "scifa1_data";
-		renesas,function = "scifa1";
+		groups = "scifa1_data";
+		function = "scifa1";
 	};
 
 	st1232_pins: touchscreen {
-		renesas,groups = "intc_irq10";
-		renesas,function = "intc";
+		groups = "intc_irq10";
+		function = "intc";
 	};
 
 	backlight_pins: backlight {
-		renesas,groups = "tpu0_to2_1";
-		renesas,function = "tpu0";
+		groups = "tpu0_to2_1";
+		function = "tpu0";
 	};
 
 	mmc0_pins: mmc0 {
-		renesas,groups = "mmc0_data8_1", "mmc0_ctrl_1";
-		renesas,function = "mmc0";
+		groups = "mmc0_data8_1", "mmc0_ctrl_1";
+		function = "mmc0";
 	};
 
 	sdhi0_pins: sd0 {
-		renesas,groups = "sdhi0_data4", "sdhi0_ctrl", "sdhi0_wp";
-		renesas,function = "sdhi0";
+		groups = "sdhi0_data4", "sdhi0_ctrl", "sdhi0_wp";
+		function = "sdhi0";
 	};
 
 	fsia_pins: sounda {
-		renesas,groups = "fsia_sclk_in", "fsia_mclk_out",
-				 "fsia_data_in_1", "fsia_data_out_0";
-		renesas,function = "fsia";
+		groups = "fsia_sclk_in", "fsia_mclk_out",
+			 "fsia_data_in_1", "fsia_data_out_0";
+		function = "fsia";
 	};
 
 	lcd0_pins: lcd0 {
-		renesas,groups = "lcd0_data24_0", "lcd0_lclk_1", "lcd0_sync";
-		renesas,function = "lcd0";
+		groups = "lcd0_data24_0", "lcd0_lclk_1", "lcd0_sync";
+		function = "lcd0";
 
 		/* DBGMD/LCDC0/FSIA MUX */
 		gpio-hog;
diff --git a/arch/arm/boot/dts/r8a7740.dtsi b/arch/arm/boot/dts/r8a7740.dtsi
index 995fbda..39b2f88 100644
--- a/arch/arm/boot/dts/r8a7740.dtsi
+++ b/arch/arm/boot/dts/r8a7740.dtsi
@@ -422,53 +422,45 @@
 		ranges;
 
 		/* External root clock */
-		extalr_clk: extalr_clk {
+		extalr_clk: extalr {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <32768>;
-			clock-output-names = "extalr";
 		};
-		extal1_clk: extal1_clk {
+		extal1_clk: extal1 {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <0>;
-			clock-output-names = "extal1";
 		};
-		extal2_clk: extal2_clk {
+		extal2_clk: extal2 {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <0>;
-			clock-output-names = "extal2";
 		};
-		dv_clk: dv_clk {
+		dv_clk: dv {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <27000000>;
-			clock-output-names = "dv";
 		};
-		fmsick_clk: fmsick_clk {
+		fmsick_clk: fmsick {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <0>;
-			clock-output-names = "fmsick";
 		};
-		fmsock_clk: fmsock_clk {
+		fmsock_clk: fmsock {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <0>;
-			clock-output-names = "fmsock";
 		};
-		fsiack_clk: fsiack_clk {
+		fsiack_clk: fsiack {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <0>;
-			clock-output-names = "fsiack";
 		};
-		fsibck_clk: fsibck_clk {
+		fsibck_clk: fsibck {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <0>;
-			clock-output-names = "fsibck";
 		};
 
 		/* Special CPG clocks */
@@ -486,7 +478,7 @@
 		};
 
 		/* Variable factor clocks (DIV6) */
-		vclk1_clk: vclk1_clk@e6150008 {
+		vclk1_clk: vclk1@e6150008 {
 			compatible = "renesas,r8a7740-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe6150008 4>;
 			clocks = <&pllc1_div2_clk>, <0>, <&dv_clk>,
@@ -494,9 +486,8 @@
 				 <&extal1_div2_clk>, <&extalr_clk>, <0>,
 				 <0>;
 			#clock-cells = <0>;
-			clock-output-names = "vclk1";
 		};
-		vclk2_clk: vclk2_clk@e615000c {
+		vclk2_clk: vclk2@e615000c {
 			compatible = "renesas,r8a7740-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe615000c 4>;
 			clocks = <&pllc1_div2_clk>, <0>, <&dv_clk>,
@@ -504,77 +495,67 @@
 				 <&extal1_div2_clk>, <&extalr_clk>, <0>,
 				 <0>;
 			#clock-cells = <0>;
-			clock-output-names = "vclk2";
 		};
-		fmsi_clk: fmsi_clk@e6150010 {
+		fmsi_clk: fmsi@e6150010 {
 			compatible = "renesas,r8a7740-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe6150010 4>;
 			clocks = <&pllc1_div2_clk>, <&fmsick_clk>, <0>, <0>;
 			#clock-cells = <0>;
-			clock-output-names = "fmsi";
 		};
-		fmso_clk: fmso_clk@e6150014 {
+		fmso_clk: fmso@e6150014 {
 			compatible = "renesas,r8a7740-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe6150014 4>;
 			clocks = <&pllc1_div2_clk>, <&fmsock_clk>, <0>, <0>;
 			#clock-cells = <0>;
-			clock-output-names = "fmso";
 		};
-		fsia_clk: fsia_clk@e6150018 {
+		fsia_clk: fsia@e6150018 {
 			compatible = "renesas,r8a7740-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe6150018 4>;
 			clocks = <&pllc1_div2_clk>, <&fsiack_clk>, <0>, <0>;
 			#clock-cells = <0>;
-			clock-output-names = "fsia";
 		};
-		sub_clk: sub_clk@e6150080 {
+		sub_clk: sub@e6150080 {
 			compatible = "renesas,r8a7740-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe6150080 4>;
 			clocks = <&pllc1_div2_clk>,
 				 <&cpg_clocks R8A7740_CLK_USB24S>, <0>, <0>;
 			#clock-cells = <0>;
-			clock-output-names = "sub";
 		};
-		spu_clk: spu_clk@e6150084 {
+		spu_clk: spu@e6150084 {
 			compatible = "renesas,r8a7740-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe6150084 4>;
 			clocks = <&pllc1_div2_clk>,
 				 <&cpg_clocks R8A7740_CLK_USB24S>, <0>, <0>;
 			#clock-cells = <0>;
-			clock-output-names = "spu";
 		};
-		vou_clk: vou_clk@e6150088 {
+		vou_clk: vou@e6150088 {
 			compatible = "renesas,r8a7740-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe6150088 4>;
 			clocks = <&pllc1_div2_clk>, <&extal1_clk>, <&dv_clk>,
 				 <0>;
 			#clock-cells = <0>;
-			clock-output-names = "vou";
 		};
-		stpro_clk: stpro_clk@e615009c {
+		stpro_clk: stpro@e615009c {
 			compatible = "renesas,r8a7740-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe615009c 4>;
 			clocks = <&cpg_clocks R8A7740_CLK_PLLC0>;
 			#clock-cells = <0>;
-			clock-output-names = "stpro";
 		};
 
 		/* Fixed factor clocks */
-		pllc1_div2_clk: pllc1_div2_clk {
+		pllc1_div2_clk: pllc1_div2 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7740_CLK_PLLC1>;
 			#clock-cells = <0>;
 			clock-div = <2>;
 			clock-mult = <1>;
-			clock-output-names = "pllc1_div2";
 		};
-		extal1_div2_clk: extal1_div2_clk {
+		extal1_div2_clk: extal1_div2 {
 			compatible = "fixed-factor-clock";
 			clocks = <&extal1_clk>;
 			#clock-cells = <0>;
 			clock-div = <2>;
 			clock-mult = <1>;
-			clock-output-names = "extal1_div2";
 		};
 
 		/* Gate clocks */
diff --git a/arch/arm/boot/dts/r8a7778-bockw.dts b/arch/arm/boot/dts/r8a7778-bockw.dts
index 21e3b9d..e0dab14 100644
--- a/arch/arm/boot/dts/r8a7778-bockw.dts
+++ b/arch/arm/boot/dts/r8a7778-bockw.dts
@@ -130,53 +130,53 @@
 	pinctrl-names = "default";
 
 	scif0_pins: serial0 {
-		renesas,groups = "scif0_data_a", "scif0_ctrl";
-		renesas,function = "scif0";
+		groups = "scif0_data_a", "scif0_ctrl";
+		function = "scif0";
 	};
 
 	scif_clk_pins: scif_clk {
-		renesas,groups = "scif_clk";
-		renesas,function = "scif_clk";
+		groups = "scif_clk";
+		function = "scif_clk";
 	};
 
 	mmc_pins: mmc {
-		renesas,groups = "mmc_data8", "mmc_ctrl";
-		renesas,function = "mmc";
+		groups = "mmc_data8", "mmc_ctrl";
+		function = "mmc";
 	};
 
 	sdhi0_pins: sd0 {
-		renesas,groups = "sdhi0_data4", "sdhi0_ctrl";
-		renesas,function = "sdhi0";
+		groups = "sdhi0_data4", "sdhi0_ctrl";
+		function = "sdhi0";
 	};
 	sdhi0_pup_pins: sd0_pup {
-		renesas,groups = "sdhi0_cd", "sdhi0_wp";
-		renesas,function = "sdhi0";
+		groups = "sdhi0_cd", "sdhi0_wp";
+		function = "sdhi0";
 		bias-pull-up;
 	};
 
 	hspi0_pins: hspi0 {
-		renesas,groups = "hspi0_a";
-		renesas,function = "hspi0";
+		groups = "hspi0_a";
+		function = "hspi0";
 	};
 
 	usb0_pins: usb0 {
-		renesas,groups = "usb0";
-		renesas,function = "usb0";
+		groups = "usb0";
+		function = "usb0";
 	};
 
 	usb1_pins: usb1 {
-		renesas,groups = "usb1";
-		renesas,function = "usb1";
+		groups = "usb1";
+		function = "usb1";
 	};
 
 	vin0_pins: vin0 {
-		renesas,groups = "vin0_data8", "vin0_clk";
-		renesas,function = "vin0";
+		groups = "vin0_data8", "vin0_clk";
+		function = "vin0";
 	};
 
 	vin1_pins: vin1 {
-		renesas,groups = "vin1_data8", "vin1_clk";
-		renesas,function = "vin1";
+		groups = "vin1_data8", "vin1_clk";
+		function = "vin1";
 	};
 };
 
diff --git a/arch/arm/boot/dts/r8a7778.dtsi b/arch/arm/boot/dts/r8a7778.dtsi
index f83a348..fe787b4 100644
--- a/arch/arm/boot/dts/r8a7778.dtsi
+++ b/arch/arm/boot/dts/r8a7778.dtsi
@@ -443,11 +443,10 @@
 		ranges;
 
 		/* External input clock */
-		extal_clk: extal_clk {
+		extal_clk: extal {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <0>;
-			clock-output-names = "extal";
 		};
 
 		/* External SCIF clock */
@@ -456,7 +455,6 @@
 			#clock-cells = <0>;
 			/* This value must be overridden by the board. */
 			clock-frequency = <0>;
-			status = "disabled";
 		};
 
 		/* Special CPG clocks */
@@ -474,59 +472,51 @@
 		audio_clk_a: audio_clk_a {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
-			clock-output-names = "audio_clk_a";
 		};
 		audio_clk_b: audio_clk_b {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
-			clock-output-names = "audio_clk_b";
 		};
 		audio_clk_c: audio_clk_c {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
-			clock-output-names = "audio_clk_c";
 		};
 
 		/* Fixed ratio clocks */
-		g_clk: g_clk {
+		g_clk: g {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7778_CLK_PLLA>;
 			#clock-cells = <0>;
 			clock-div = <12>;
 			clock-mult = <1>;
-			clock-output-names = "g";
 		};
-		i_clk: i_clk {
+		i_clk: i {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7778_CLK_PLLA>;
 			#clock-cells = <0>;
 			clock-div = <1>;
 			clock-mult = <1>;
-			clock-output-names = "i";
 		};
-		s3_clk: s3_clk {
+		s3_clk: s3 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7778_CLK_PLLA>;
 			#clock-cells = <0>;
 			clock-div = <4>;
 			clock-mult = <1>;
-			clock-output-names = "s3";
 		};
-		s4_clk: s4_clk {
+		s4_clk: s4 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7778_CLK_PLLA>;
 			#clock-cells = <0>;
 			clock-div = <8>;
 			clock-mult = <1>;
-			clock-output-names = "s4";
 		};
-		z_clk: z_clk {
+		z_clk: z {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7778_CLK_PLLB>;
 			#clock-cells = <0>;
 			clock-div = <1>;
 			clock-mult = <1>;
-			clock-output-names = "z";
 		};
 
 		/* Gate clocks */
diff --git a/arch/arm/boot/dts/r8a7779-marzen.dts b/arch/arm/boot/dts/r8a7779-marzen.dts
index e111d35..b795da6 100644
--- a/arch/arm/boot/dts/r8a7779-marzen.dts
+++ b/arch/arm/boot/dts/r8a7779-marzen.dts
@@ -170,49 +170,49 @@
 
 	du_pins: du {
 		du0 {
-			renesas,groups = "du0_rgb888", "du0_sync_1", "du0_clk_out_0";
-			renesas,function = "du0";
+			groups = "du0_rgb888", "du0_sync_1", "du0_clk_out_0";
+			function = "du0";
 		};
 		du1 {
-			renesas,groups = "du1_rgb666", "du1_sync_1", "du1_clk_out";
-			renesas,function = "du1";
+			groups = "du1_rgb666", "du1_sync_1", "du1_clk_out";
+			function = "du1";
 		};
 	};
 
 	scif_clk_pins: scif_clk {
-		renesas,groups = "scif_clk_b";
-		renesas,function = "scif_clk";
+		groups = "scif_clk_b";
+		function = "scif_clk";
 	};
 
 	ethernet_pins: ethernet {
 		intc {
-			renesas,groups = "intc_irq1_b";
-			renesas,function = "intc";
+			groups = "intc_irq1_b";
+			function = "intc";
 		};
 		lbsc {
-			renesas,groups = "lbsc_ex_cs0";
-			renesas,function = "lbsc";
+			groups = "lbsc_ex_cs0";
+			function = "lbsc";
 		};
 	};
 
 	scif2_pins: serial2 {
-		renesas,groups = "scif2_data_c";
-		renesas,function = "scif2";
+		groups = "scif2_data_c";
+		function = "scif2";
 	};
 
 	scif4_pins: serial4 {
-		renesas,groups = "scif4_data";
-		renesas,function = "scif4";
+		groups = "scif4_data";
+		function = "scif4";
 	};
 
 	sdhi0_pins: sd0 {
-		renesas,groups = "sdhi0_data4", "sdhi0_ctrl", "sdhi0_cd";
-		renesas,function = "sdhi0";
+		groups = "sdhi0_data4", "sdhi0_ctrl", "sdhi0_cd";
+		function = "sdhi0";
 	};
 
 	hspi0_pins: hspi0 {
-		renesas,groups = "hspi0";
-		renesas,function = "hspi0";
+		groups = "hspi0";
+		function = "hspi0";
 	};
 };
 
diff --git a/arch/arm/boot/dts/r8a7779.dtsi b/arch/arm/boot/dts/r8a7779.dtsi
index a0cc08e..0c82097 100644
--- a/arch/arm/boot/dts/r8a7779.dtsi
+++ b/arch/arm/boot/dts/r8a7779.dtsi
@@ -67,7 +67,7 @@
 		compatible = "arm,cortex-a9-twd-timer";
 		reg = <0xf0000600 0x20>;
 		interrupts = <GIC_PPI 13
-			(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+			(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
 		clocks = <&cpg_clocks R8A7779_CLK_ZS>;
 	};
 
@@ -445,12 +445,11 @@
 		ranges;
 
 		/* External root clock */
-		extal_clk: extal_clk {
+		extal_clk: extal {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			/* This value must be overriden by the board. */
 			clock-frequency = <0>;
-			clock-output-names = "extal";
 		};
 
 		/* External SCIF clock */
@@ -459,7 +458,6 @@
 			#clock-cells = <0>;
 			/* This value must be overridden by the board. */
 			clock-frequency = <0>;
-			status = "disabled";
 		};
 
 		/* Special CPG clocks */
@@ -474,37 +472,33 @@
 		};
 
 		/* Fixed factor clocks */
-		i_clk: i_clk {
+		i_clk: i {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7779_CLK_PLLA>;
 			#clock-cells = <0>;
 			clock-div = <2>;
 			clock-mult = <1>;
-			clock-output-names = "i";
 		};
-		s3_clk: s3_clk {
+		s3_clk: s3 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7779_CLK_PLLA>;
 			#clock-cells = <0>;
 			clock-div = <8>;
 			clock-mult = <1>;
-			clock-output-names = "s3";
 		};
-		s4_clk: s4_clk {
+		s4_clk: s4 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7779_CLK_PLLA>;
 			#clock-cells = <0>;
 			clock-div = <16>;
 			clock-mult = <1>;
-			clock-output-names = "s4";
 		};
-		g_clk: g_clk {
+		g_clk: g {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7779_CLK_PLLA>;
 			#clock-cells = <0>;
 			clock-div = <24>;
 			clock-mult = <1>;
-			clock-output-names = "g";
 		};
 
 		/* Gate clocks */
diff --git a/arch/arm/boot/dts/r8a7790-lager.dts b/arch/arm/boot/dts/r8a7790-lager.dts
index aa6ca92..749ba02 100644
--- a/arch/arm/boot/dts/r8a7790-lager.dts
+++ b/arch/arm/boot/dts/r8a7790-lager.dts
@@ -176,11 +176,10 @@
 			  1800000 0>;
 	};
 
-	audio_clock: clock {
+	audio_clock: audio_clock {
 		compatible = "fixed-clock";
 		#clock-cells = <0>;
 		clock-frequency = <11289600>;
-		clock-output-names = "audio_clock";
 	};
 
 	rsnd_ak4643: sound {
@@ -314,119 +313,133 @@
 	pinctrl-names = "default";
 
 	du_pins: du {
-		renesas,groups = "du_rgb666", "du_sync_1", "du_clk_out_0";
-		renesas,function = "du";
+		groups = "du_rgb666", "du_sync_1", "du_clk_out_0";
+		function = "du";
 	};
 
 	scif0_pins: serial0 {
-		renesas,groups = "scif0_data";
-		renesas,function = "scif0";
+		groups = "scif0_data";
+		function = "scif0";
 	};
 
 	scif_clk_pins: scif_clk {
-		renesas,groups = "scif_clk";
-		renesas,function = "scif_clk";
+		groups = "scif_clk";
+		function = "scif_clk";
 	};
 
 	ether_pins: ether {
-		renesas,groups = "eth_link", "eth_mdio", "eth_rmii";
-		renesas,function = "eth";
+		groups = "eth_link", "eth_mdio", "eth_rmii";
+		function = "eth";
 	};
 
 	phy1_pins: phy1 {
-		renesas,groups = "intc_irq0";
-		renesas,function = "intc";
+		groups = "intc_irq0";
+		function = "intc";
 	};
 
 	scifa1_pins: serial1 {
-		renesas,groups = "scifa1_data";
-		renesas,function = "scifa1";
+		groups = "scifa1_data";
+		function = "scifa1";
 	};
 
 	sdhi0_pins: sd0 {
-		renesas,groups = "sdhi0_data4", "sdhi0_ctrl";
-		renesas,function = "sdhi0";
+		groups = "sdhi0_data4", "sdhi0_ctrl";
+		function = "sdhi0";
+		power-source = <3300>;
+	};
+
+	sdhi0_pins_uhs: sd0_uhs {
+		groups = "sdhi0_data4", "sdhi0_ctrl";
+		function = "sdhi0";
+		power-source = <1800>;
 	};
 
 	sdhi2_pins: sd2 {
-		renesas,groups = "sdhi2_data4", "sdhi2_ctrl";
-		renesas,function = "sdhi2";
+		groups = "sdhi2_data4", "sdhi2_ctrl";
+		function = "sdhi2";
+		power-source = <3300>;
+	};
+
+	sdhi2_pins_uhs: sd2_uhs {
+		groups = "sdhi2_data4", "sdhi2_ctrl";
+		function = "sdhi2";
+		power-source = <1800>;
 	};
 
 	mmc1_pins: mmc1 {
-		renesas,groups = "mmc1_data8", "mmc1_ctrl";
-		renesas,function = "mmc1";
+		groups = "mmc1_data8", "mmc1_ctrl";
+		function = "mmc1";
 	};
 
 	qspi_pins: spi0 {
-		renesas,groups = "qspi_ctrl", "qspi_data4";
-		renesas,function = "qspi";
+		groups = "qspi_ctrl", "qspi_data4";
+		function = "qspi";
 	};
 
 	msiof1_pins: spi2 {
-		renesas,groups = "msiof1_clk", "msiof1_sync", "msiof1_rx",
+		groups = "msiof1_clk", "msiof1_sync", "msiof1_rx",
 				 "msiof1_tx";
-		renesas,function = "msiof1";
+		function = "msiof1";
 	};
 
 	i2c0_pins: i2c0 {
-		renesas,groups = "i2c0";
-		renesas,function = "i2c0";
+		groups = "i2c0";
+		function = "i2c0";
 	};
 
 	iic0_pins: iic0 {
-		renesas,groups = "iic0";
-		renesas,function = "iic0";
+		groups = "iic0";
+		function = "iic0";
 	};
 
 	iic1_pins: iic1 {
-		renesas,groups = "iic1";
-		renesas,function = "iic1";
+		groups = "iic1";
+		function = "iic1";
 	};
 
 	iic2_pins: iic2 {
-		renesas,groups = "iic2";
-		renesas,function = "iic2";
+		groups = "iic2";
+		function = "iic2";
 	};
 
 	iic3_pins: iic3 {
-		renesas,groups = "iic3";
-		renesas,function = "iic3";
+		groups = "iic3";
+		function = "iic3";
 	};
 
 	hsusb_pins: hsusb {
-		renesas,groups = "usb0_ovc_vbus";
-		renesas,function = "usb0";
+		groups = "usb0_ovc_vbus";
+		function = "usb0";
 	};
 
 	usb0_pins: usb0 {
-		renesas,groups = "usb0";
-		renesas,function = "usb0";
+		groups = "usb0";
+		function = "usb0";
 	};
 
 	usb1_pins: usb1 {
-		renesas,groups = "usb1";
-		renesas,function = "usb1";
+		groups = "usb1";
+		function = "usb1";
 	};
 
 	usb2_pins: usb2 {
-		renesas,groups = "usb2";
-		renesas,function = "usb2";
+		groups = "usb2";
+		function = "usb2";
 	};
 
 	vin1_pins: vin {
-		renesas,groups = "vin1_data8", "vin1_clk";
-		renesas,function = "vin1";
+		groups = "vin1_data8", "vin1_clk";
+		function = "vin1";
 	};
 
 	sound_pins: sound {
-		renesas,groups = "ssi0129_ctrl", "ssi0_data", "ssi1_data";
-		renesas,function = "ssi";
+		groups = "ssi0129_ctrl", "ssi0_data", "ssi1_data";
+		function = "ssi";
 	};
 
 	sound_clk_pins: sound_clk {
-		renesas,groups = "audio_clk_a";
-		renesas,function = "audio_clk";
+		groups = "audio_clk_a";
+		function = "audio_clk";
 	};
 };
 
@@ -539,21 +552,25 @@
 
 &sdhi0 {
 	pinctrl-0 = <&sdhi0_pins>;
-	pinctrl-names = "default";
+	pinctrl-1 = <&sdhi0_pins_uhs>;
+	pinctrl-names = "default", "state_uhs";
 
 	vmmc-supply = <&vcc_sdhi0>;
 	vqmmc-supply = <&vccq_sdhi0>;
 	cd-gpios = <&gpio3 6 GPIO_ACTIVE_LOW>;
+	sd-uhs-sdr50;
 	status = "okay";
 };
 
 &sdhi2 {
 	pinctrl-0 = <&sdhi2_pins>;
-	pinctrl-names = "default";
+	pinctrl-1 = <&sdhi2_pins_uhs>;
+	pinctrl-names = "default", "state_uhs";
 
 	vmmc-supply = <&vcc_sdhi2>;
 	vqmmc-supply = <&vccq_sdhi2>;
 	cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>;
+	sd-uhs-sdr50;
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index 38b7063..935064f 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -589,6 +589,7 @@
 		clocks = <&mstp3_clks R8A7790_CLK_SDHI0>;
 		dmas = <&dmac1 0xcd>, <&dmac1 0xce>;
 		dma-names = "tx", "rx";
+		max-frequency = <195000000>;
 		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
@@ -600,6 +601,7 @@
 		clocks = <&mstp3_clks R8A7790_CLK_SDHI1>;
 		dmas = <&dmac1 0xc9>, <&dmac1 0xca>;
 		dma-names = "tx", "rx";
+		max-frequency = <195000000>;
 		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
@@ -611,6 +613,7 @@
 		clocks = <&mstp3_clks R8A7790_CLK_SDHI2>;
 		dmas = <&dmac1 0xc1>, <&dmac1 0xc2>;
 		dma-names = "tx", "rx";
+		max-frequency = <97500000>;
 		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
@@ -622,6 +625,7 @@
 		clocks = <&mstp3_clks R8A7790_CLK_SDHI3>;
 		dmas = <&dmac1 0xd3>, <&dmac1 0xd4>;
 		dma-names = "tx", "rx";
+		max-frequency = <97500000>;
 		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
@@ -732,6 +736,20 @@
 		status = "disabled";
 	};
 
+	scif2: serial@e6e56000 {
+		compatible = "renesas,scif-r8a7790", "renesas,rcar-gen2-scif",
+			     "renesas,scif";
+		reg = <0 0xe6e56000 0 64>;
+		interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp3_clks R8A7790_CLK_SCIF2>, <&zs_clk>,
+			 <&scif_clk>;
+		clock-names = "fck", "brg_int", "scif_clk";
+		dmas = <&dmac0 0x2b>, <&dmac0 0x2c>;
+		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
+		status = "disabled";
+	};
+
 	hscif0: serial@e62c0000 {
 		compatible = "renesas,hscif-r8a7790",
 			     "renesas,rcar-gen2-hscif", "renesas,hscif";
@@ -968,7 +986,7 @@
 	};
 
 	can0: can@e6e80000 {
-		compatible = "renesas,can-r8a7790";
+		compatible = "renesas,can-r8a7790", "renesas,rcar-gen2-can";
 		reg = <0 0xe6e80000 0 0x1000>;
 		interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7790_CLK_RCAN0>,
@@ -979,7 +997,7 @@
 	};
 
 	can1: can@e6e88000 {
-		compatible = "renesas,can-r8a7790";
+		compatible = "renesas,can-r8a7790", "renesas,rcar-gen2-can";
 		reg = <0 0xe6e88000 0 0x1000>;
 		interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7790_CLK_RCAN1>,
@@ -990,7 +1008,7 @@
 	};
 
 	jpu: jpeg-codec@fe980000 {
-		compatible = "renesas,jpu-r8a7790";
+		compatible = "renesas,jpu-r8a7790", "renesas,rcar-gen2-jpu";
 		reg = <0 0xfe980000 0 0x10300>;
 		interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7790_CLK_JPU>;
@@ -1003,21 +1021,18 @@
 		ranges;
 
 		/* External root clock */
-		extal_clk: extal_clk {
+		extal_clk: extal {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			/* This value must be overriden by the board. */
 			clock-frequency = <0>;
-			clock-output-names = "extal";
 		};
 
 		/* External PCIe clock - can be overridden by the board */
-		pcie_bus_clk: pcie_bus_clk {
+		pcie_bus_clk: pcie_bus {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
-			clock-frequency = <100000000>;
-			clock-output-names = "pcie_bus";
-			status = "disabled";
+			clock-frequency = <0>;
 		};
 
 		/*
@@ -1028,19 +1043,16 @@
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <0>;
-			clock-output-names = "audio_clk_a";
 		};
 		audio_clk_b: audio_clk_b {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <0>;
-			clock-output-names = "audio_clk_b";
 		};
 		audio_clk_c: audio_clk_c {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <0>;
-			clock-output-names = "audio_clk_c";
 		};
 
 		/* External SCIF clock */
@@ -1049,15 +1061,13 @@
 			#clock-cells = <0>;
 			/* This value must be overridden by the board. */
 			clock-frequency = <0>;
-			status = "disabled";
 		};
 
 		/* External USB clock - can be overridden by the board */
-		usb_extal_clk: usb_extal_clk {
+		usb_extal_clk: usb_extal {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <48000000>;
-			clock-output-names = "usb_extal";
 		};
 
 		/* External CAN clock */
@@ -1066,8 +1076,6 @@
 			#clock-cells = <0>;
 			/* This value must be overridden by the board. */
 			clock-frequency = <0>;
-			clock-output-names = "can_clk";
-			status = "disabled";
 		};
 
 		/* Special CPG clocks */
@@ -1084,201 +1092,176 @@
 		};
 
 		/* Variable factor clocks */
-		sd2_clk: sd2_clk@e6150078 {
+		sd2_clk: sd2@e6150078 {
 			compatible = "renesas,r8a7790-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe6150078 0 4>;
 			clocks = <&pll1_div2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "sd2";
 		};
-		sd3_clk: sd3_clk@e615026c {
+		sd3_clk: sd3@e615026c {
 			compatible = "renesas,r8a7790-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe615026c 0 4>;
 			clocks = <&pll1_div2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "sd3";
 		};
-		mmc0_clk: mmc0_clk@e6150240 {
+		mmc0_clk: mmc0@e6150240 {
 			compatible = "renesas,r8a7790-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe6150240 0 4>;
 			clocks = <&pll1_div2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "mmc0";
 		};
-		mmc1_clk: mmc1_clk@e6150244 {
+		mmc1_clk: mmc1@e6150244 {
 			compatible = "renesas,r8a7790-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe6150244 0 4>;
 			clocks = <&pll1_div2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "mmc1";
 		};
-		ssp_clk: ssp_clk@e6150248 {
+		ssp_clk: ssp@e6150248 {
 			compatible = "renesas,r8a7790-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe6150248 0 4>;
 			clocks = <&pll1_div2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "ssp";
 		};
-		ssprs_clk: ssprs_clk@e615024c {
+		ssprs_clk: ssprs@e615024c {
 			compatible = "renesas,r8a7790-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe615024c 0 4>;
 			clocks = <&pll1_div2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "ssprs";
 		};
 
 		/* Fixed factor clocks */
-		pll1_div2_clk: pll1_div2_clk {
+		pll1_div2_clk: pll1_div2 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7790_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <2>;
 			clock-mult = <1>;
-			clock-output-names = "pll1_div2";
 		};
-		z2_clk: z2_clk {
+		z2_clk: z2 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7790_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <2>;
 			clock-mult = <1>;
-			clock-output-names = "z2";
 		};
-		zg_clk: zg_clk {
+		zg_clk: zg {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7790_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <3>;
 			clock-mult = <1>;
-			clock-output-names = "zg";
 		};
-		zx_clk: zx_clk {
+		zx_clk: zx {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7790_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <3>;
 			clock-mult = <1>;
-			clock-output-names = "zx";
 		};
-		zs_clk: zs_clk {
+		zs_clk: zs {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7790_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <6>;
 			clock-mult = <1>;
-			clock-output-names = "zs";
 		};
-		hp_clk: hp_clk {
+		hp_clk: hp {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7790_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <12>;
 			clock-mult = <1>;
-			clock-output-names = "hp";
 		};
-		i_clk: i_clk {
+		i_clk: i {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7790_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <2>;
 			clock-mult = <1>;
-			clock-output-names = "i";
 		};
-		b_clk: b_clk {
+		b_clk: b {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7790_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <12>;
 			clock-mult = <1>;
-			clock-output-names = "b";
 		};
-		p_clk: p_clk {
+		p_clk: p {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7790_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <24>;
 			clock-mult = <1>;
-			clock-output-names = "p";
 		};
-		cl_clk: cl_clk {
+		cl_clk: cl {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7790_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <48>;
 			clock-mult = <1>;
-			clock-output-names = "cl";
 		};
-		m2_clk: m2_clk {
+		m2_clk: m2 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7790_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <8>;
 			clock-mult = <1>;
-			clock-output-names = "m2";
 		};
-		imp_clk: imp_clk {
+		imp_clk: imp {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7790_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <4>;
 			clock-mult = <1>;
-			clock-output-names = "imp";
 		};
-		rclk_clk: rclk_clk {
+		rclk_clk: rclk {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7790_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <(48 * 1024)>;
 			clock-mult = <1>;
-			clock-output-names = "rclk";
 		};
-		oscclk_clk: oscclk_clk {
+		oscclk_clk: oscclk {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7790_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <(12 * 1024)>;
 			clock-mult = <1>;
-			clock-output-names = "oscclk";
 		};
-		zb3_clk: zb3_clk {
+		zb3_clk: zb3 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7790_CLK_PLL3>;
 			#clock-cells = <0>;
 			clock-div = <4>;
 			clock-mult = <1>;
-			clock-output-names = "zb3";
 		};
-		zb3d2_clk: zb3d2_clk {
+		zb3d2_clk: zb3d2 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7790_CLK_PLL3>;
 			#clock-cells = <0>;
 			clock-div = <8>;
 			clock-mult = <1>;
-			clock-output-names = "zb3d2";
 		};
-		ddr_clk: ddr_clk {
+		ddr_clk: ddr {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7790_CLK_PLL3>;
 			#clock-cells = <0>;
 			clock-div = <8>;
 			clock-mult = <1>;
-			clock-output-names = "ddr";
 		};
-		mp_clk: mp_clk {
+		mp_clk: mp {
 			compatible = "fixed-factor-clock";
 			clocks = <&pll1_div2_clk>;
 			#clock-cells = <0>;
 			clock-div = <15>;
 			clock-mult = <1>;
-			clock-output-names = "mp";
 		};
-		cp_clk: cp_clk {
+		cp_clk: cp {
 			compatible = "fixed-factor-clock";
 			clocks = <&extal_clk>;
 			#clock-cells = <0>;
 			clock-div = <2>;
 			clock-mult = <1>;
-			clock-output-names = "cp";
 		};
 
 		/* Gate clocks */
@@ -1334,19 +1317,19 @@
 		mstp3_clks: mstp3_clks@e615013c {
 			compatible = "renesas,r8a7790-mstp-clocks", "renesas,cpg-mstp-clocks";
 			reg = <0 0xe615013c 0 4>, <0 0xe6150048 0 4>;
-			clocks = <&hp_clk>, <&cp_clk>, <&mmc1_clk>, <&sd3_clk>,
+			clocks = <&hp_clk>, <&cp_clk>, <&mmc1_clk>, <&p_clk>, <&sd3_clk>,
 				 <&sd2_clk>, <&cpg_clocks R8A7790_CLK_SD1>, <&cpg_clocks R8A7790_CLK_SD0>, <&mmc0_clk>,
 				 <&hp_clk>, <&mp_clk>, <&hp_clk>, <&mp_clk>, <&rclk_clk>,
 				 <&hp_clk>, <&hp_clk>;
 			#clock-cells = <1>;
 			clock-indices = <
-				R8A7790_CLK_IIC2 R8A7790_CLK_TPU0 R8A7790_CLK_MMCIF1 R8A7790_CLK_SDHI3
+				R8A7790_CLK_IIC2 R8A7790_CLK_TPU0 R8A7790_CLK_MMCIF1 R8A7790_CLK_SCIF2 R8A7790_CLK_SDHI3
 				R8A7790_CLK_SDHI2 R8A7790_CLK_SDHI1 R8A7790_CLK_SDHI0 R8A7790_CLK_MMCIF0
 				R8A7790_CLK_IIC0 R8A7790_CLK_PCIEC R8A7790_CLK_IIC1 R8A7790_CLK_SSUSB R8A7790_CLK_CMT1
 				R8A7790_CLK_USBDMAC0 R8A7790_CLK_USBDMAC1
 			>;
 			clock-output-names =
-				"iic2", "tpu0", "mmcif1", "sdhi3",
+				"iic2", "tpu0", "mmcif1", "scif2", "sdhi3",
 				"sdhi2", "sdhi1", "sdhi0", "mmcif0",
 				"iic0", "pciec", "iic1", "ssusb", "cmt1",
 				"usbdmac0", "usbdmac1";
@@ -1531,7 +1514,7 @@
 	};
 
 	xhci: usb@ee000000 {
-		compatible = "renesas,xhci-r8a7790";
+		compatible = "renesas,xhci-r8a7790", "renesas,rcar-gen2-xhci";
 		reg = <0 0xee000000 0 0xc00>;
 		interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7790_CLK_SSUSB>;
diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
index 0ad71b8..da59c28 100644
--- a/arch/arm/boot/dts/r8a7791-koelsch.dts
+++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
@@ -242,11 +242,10 @@
 			  1800000 0>;
 	};
 
-	audio_clock: clock {
+	audio_clock: audio_clock {
 		compatible = "fixed-clock";
 		#clock-cells = <0>;
 		clock-frequency = <11289600>;
-		clock-output-names = "audio_clock";
 	};
 
 	rsnd_ak4643: sound {
@@ -324,89 +323,89 @@
 	pinctrl-names = "default";
 
 	i2c2_pins: i2c2 {
-		renesas,groups = "i2c2";
-		renesas,function = "i2c2";
+		groups = "i2c2";
+		function = "i2c2";
 	};
 
 	du_pins: du {
-		renesas,groups = "du_rgb888", "du_sync", "du_disp", "du_clk_out_0";
-		renesas,function = "du";
+		groups = "du_rgb888", "du_sync", "du_disp", "du_clk_out_0";
+		function = "du";
 	};
 
 	scif0_pins: serial0 {
-		renesas,groups = "scif0_data_d";
-		renesas,function = "scif0";
+		groups = "scif0_data_d";
+		function = "scif0";
 	};
 
 	scif1_pins: serial1 {
-		renesas,groups = "scif1_data_d";
-		renesas,function = "scif1";
+		groups = "scif1_data_d";
+		function = "scif1";
 	};
 
 	scif_clk_pins: scif_clk {
-		renesas,groups = "scif_clk";
-		renesas,function = "scif_clk";
+		groups = "scif_clk";
+		function = "scif_clk";
 	};
 
 	ether_pins: ether {
-		renesas,groups = "eth_link", "eth_mdio", "eth_rmii";
-		renesas,function = "eth";
+		groups = "eth_link", "eth_mdio", "eth_rmii";
+		function = "eth";
 	};
 
 	phy1_pins: phy1 {
-		renesas,groups = "intc_irq0";
-		renesas,function = "intc";
+		groups = "intc_irq0";
+		function = "intc";
 	};
 
 	sdhi0_pins: sd0 {
-		renesas,groups = "sdhi0_data4", "sdhi0_ctrl";
-		renesas,function = "sdhi0";
+		groups = "sdhi0_data4", "sdhi0_ctrl";
+		function = "sdhi0";
 	};
 
 	sdhi1_pins: sd1 {
-		renesas,groups = "sdhi1_data4", "sdhi1_ctrl";
-		renesas,function = "sdhi1";
+		groups = "sdhi1_data4", "sdhi1_ctrl";
+		function = "sdhi1";
 	};
 
 	sdhi2_pins: sd2 {
-		renesas,groups = "sdhi2_data4", "sdhi2_ctrl";
-		renesas,function = "sdhi2";
+		groups = "sdhi2_data4", "sdhi2_ctrl";
+		function = "sdhi2";
 	};
 
 	qspi_pins: spi0 {
-		renesas,groups = "qspi_ctrl", "qspi_data4";
-		renesas,function = "qspi";
+		groups = "qspi_ctrl", "qspi_data4";
+		function = "qspi";
 	};
 
 	msiof0_pins: spi1 {
-		renesas,groups = "msiof0_clk", "msiof0_sync", "msiof0_rx",
+		groups = "msiof0_clk", "msiof0_sync", "msiof0_rx",
 				 "msiof0_tx";
-		renesas,function = "msiof0";
+		function = "msiof0";
 	};
 
 	usb0_pins: usb0 {
-		renesas,groups = "usb0";
-		renesas,function = "usb0";
+		groups = "usb0";
+		function = "usb0";
 	};
 
 	usb1_pins: usb1 {
-		renesas,groups = "usb1";
-		renesas,function = "usb1";
+		groups = "usb1";
+		function = "usb1";
 	};
 
 	vin1_pins: vin1 {
-		renesas,groups = "vin1_data8", "vin1_clk";
-		renesas,function = "vin1";
+		groups = "vin1_data8", "vin1_clk";
+		function = "vin1";
 	};
 
 	sound_pins: sound {
-		renesas,groups = "ssi0129_ctrl", "ssi0_data", "ssi1_data";
-		renesas,function = "ssi";
+		groups = "ssi0129_ctrl", "ssi0_data", "ssi1_data";
+		function = "ssi";
 	};
 
 	sound_clk_pins: sound_clk {
-		renesas,groups = "audio_clk_a";
-		renesas,function = "audio_clk";
+		groups = "audio_clk_a";
+		function = "audio_clk";
 	};
 };
 
@@ -661,6 +660,7 @@
 };
 
 &pcie_bus_clk {
+	clock-frequency = <100000000>;
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts
index 6c08314..6a1bb1a 100644
--- a/arch/arm/boot/dts/r8a7791-porter.dts
+++ b/arch/arm/boot/dts/r8a7791-porter.dts
@@ -113,11 +113,10 @@
 		clock-frequency = <74250000>;
 	};
 
-	x14_clk: x14-clock {
+	x14_clk: audio_clock {
 		compatible = "fixed-clock";
 		#clock-cells = <0>;
 		clock-frequency = <11289600>;
-		clock-output-names = "audio_clock";
 	};
 
 	sound {
@@ -143,82 +142,74 @@
 };
 
 &pfc {
-	pinctrl-0 = <&scif_clk_pins>;
-	pinctrl-names = "default";
-
 	scif0_pins: serial0 {
-		renesas,groups = "scif0_data_d";
-		renesas,function = "scif0";
-	};
-
-	scif_clk_pins: scif_clk {
-		renesas,groups = "scif_clk";
-		renesas,function = "scif_clk";
+		groups = "scif0_data_d";
+		function = "scif0";
 	};
 
 	ether_pins: ether {
-		renesas,groups = "eth_link", "eth_mdio", "eth_rmii";
-		renesas,function = "eth";
+		groups = "eth_link", "eth_mdio", "eth_rmii";
+		function = "eth";
 	};
 
 	phy1_pins: phy1 {
-		renesas,groups = "intc_irq0";
-		renesas,function = "intc";
+		groups = "intc_irq0";
+		function = "intc";
 	};
 
 	sdhi0_pins: sd0 {
-		renesas,groups = "sdhi0_data4", "sdhi0_ctrl";
-		renesas,function = "sdhi0";
+		groups = "sdhi0_data4", "sdhi0_ctrl";
+		function = "sdhi0";
 	};
 
 	sdhi2_pins: sd2 {
-		renesas,groups = "sdhi2_data4", "sdhi2_ctrl";
-		renesas,function = "sdhi2";
+		groups = "sdhi2_data4", "sdhi2_ctrl";
+		function = "sdhi2";
 	};
 
 	qspi_pins: spi0 {
-		renesas,groups = "qspi_ctrl", "qspi_data4";
-		renesas,function = "qspi";
+		groups = "qspi_ctrl", "qspi_data4";
+		function = "qspi";
 	};
 
 	i2c2_pins: i2c2 {
-		renesas,groups = "i2c2";
-		renesas,function = "i2c2";
+		groups = "i2c2";
+		function = "i2c2";
 	};
 
 	usb0_pins: usb0 {
-		renesas,groups = "usb0";
-		renesas,function = "usb0";
+		groups = "usb0";
+		function = "usb0";
 	};
 
 	usb1_pins: usb1 {
-		renesas,groups = "usb1";
-		renesas,function = "usb1";
+		groups = "usb1";
+		function = "usb1";
 	};
 
 	vin0_pins: vin0 {
-		renesas,groups = "vin0_data8", "vin0_clk";
-		renesas,function = "vin0";
+		groups = "vin0_data8", "vin0_clk";
+		function = "vin0";
 	};
 
 	can0_pins: can0 {
-		renesas,groups = "can0_data";
-		renesas,function = "can0";
+		groups = "can0_data";
+		function = "can0";
 	};
 
 	du_pins: du {
-		renesas,groups = "du_rgb888", "du_sync", "du_disp", "du_clk_out_0";
-		renesas,function = "du";
+		groups = "du_rgb888", "du_sync", "du_disp", "du_clk_out_0";
+		function = "du";
 	};
 
 	ssi_pins: sound {
-		renesas,groups = "ssi0129_ctrl", "ssi0_data", "ssi1_data";
-		renesas,function = "ssi";
+		groups = "ssi0129_ctrl", "ssi0_data", "ssi1_data";
+		function = "ssi";
 	};
 
 	audio_clk_pins: audio_clk {
-		renesas,groups = "audio_clk_a";
-		renesas,function = "audio_clk";
+		groups = "audio_clk_a";
+		function = "audio_clk";
 	};
 };
 
@@ -229,11 +220,6 @@
 	status = "okay";
 };
 
-&scif_clk {
-	clock-frequency = <14745600>;
-	status = "okay";
-};
-
 &ether {
 	pinctrl-0 = <&ether_pins &phy1_pins>;
 	pinctrl-names = "default";
@@ -414,6 +400,7 @@
 };
 
 &pcie_bus_clk {
+	clock-frequency = <100000000>;
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index 6439f05..565c270 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -1013,7 +1013,7 @@
 	};
 
 	can0: can@e6e80000 {
-		compatible = "renesas,can-r8a7791";
+		compatible = "renesas,can-r8a7791", "renesas,rcar-gen2-can";
 		reg = <0 0xe6e80000 0 0x1000>;
 		interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7791_CLK_RCAN0>,
@@ -1024,7 +1024,7 @@
 	};
 
 	can1: can@e6e88000 {
-		compatible = "renesas,can-r8a7791";
+		compatible = "renesas,can-r8a7791", "renesas,rcar-gen2-can";
 		reg = <0 0xe6e88000 0 0x1000>;
 		interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7791_CLK_RCAN1>,
@@ -1035,7 +1035,7 @@
 	};
 
 	jpu: jpeg-codec@fe980000 {
-		compatible = "renesas,jpu-r8a7791";
+		compatible = "renesas,jpu-r8a7791", "renesas,rcar-gen2-jpu";
 		reg = <0 0xfe980000 0 0x10300>;
 		interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7791_CLK_JPU>;
@@ -1048,12 +1048,11 @@
 		ranges;
 
 		/* External root clock */
-		extal_clk: extal_clk {
+		extal_clk: extal {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			/* This value must be overriden by the board. */
 			clock-frequency = <0>;
-			clock-output-names = "extal";
 		};
 
 		/*
@@ -1064,28 +1063,23 @@
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <0>;
-			clock-output-names = "audio_clk_a";
 		};
 		audio_clk_b: audio_clk_b {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <0>;
-			clock-output-names = "audio_clk_b";
 		};
 		audio_clk_c: audio_clk_c {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <0>;
-			clock-output-names = "audio_clk_c";
 		};
 
 		/* External PCIe clock - can be overridden by the board */
-		pcie_bus_clk: pcie_bus_clk {
+		pcie_bus_clk: pcie_bus {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
-			clock-frequency = <100000000>;
-			clock-output-names = "pcie_bus";
-			status = "disabled";
+			clock-frequency = <0>;
 		};
 
 		/* External SCIF clock */
@@ -1094,15 +1088,13 @@
 			#clock-cells = <0>;
 			/* This value must be overridden by the board. */
 			clock-frequency = <0>;
-			status = "disabled";
 		};
 
 		/* External USB clock - can be overridden by the board */
-		usb_extal_clk: usb_extal_clk {
+		usb_extal_clk: usb_extal {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <48000000>;
-			clock-output-names = "usb_extal";
 		};
 
 		/* External CAN clock */
@@ -1111,8 +1103,6 @@
 			#clock-cells = <0>;
 			/* This value must be overridden by the board. */
 			clock-frequency = <0>;
-			clock-output-names = "can_clk";
-			status = "disabled";
 		};
 
 		/* Special CPG clocks */
@@ -1129,178 +1119,156 @@
 		};
 
 		/* Variable factor clocks */
-		sd2_clk: sd2_clk@e6150078 {
+		sd2_clk: sd2@e6150078 {
 			compatible = "renesas,r8a7791-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe6150078 0 4>;
 			clocks = <&pll1_div2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "sd2";
 		};
-		sd3_clk: sd3_clk@e615026c {
+		sd3_clk: sd3@e615026c {
 			compatible = "renesas,r8a7791-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe615026c 0 4>;
 			clocks = <&pll1_div2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "sd3";
 		};
-		mmc0_clk: mmc0_clk@e6150240 {
+		mmc0_clk: mmc0@e6150240 {
 			compatible = "renesas,r8a7791-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe6150240 0 4>;
 			clocks = <&pll1_div2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "mmc0";
 		};
-		ssp_clk: ssp_clk@e6150248 {
+		ssp_clk: ssp@e6150248 {
 			compatible = "renesas,r8a7791-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe6150248 0 4>;
 			clocks = <&pll1_div2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "ssp";
 		};
-		ssprs_clk: ssprs_clk@e615024c {
+		ssprs_clk: ssprs@e615024c {
 			compatible = "renesas,r8a7791-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe615024c 0 4>;
 			clocks = <&pll1_div2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "ssprs";
 		};
 
 		/* Fixed factor clocks */
-		pll1_div2_clk: pll1_div2_clk {
+		pll1_div2_clk: pll1_div2 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7791_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <2>;
 			clock-mult = <1>;
-			clock-output-names = "pll1_div2";
 		};
-		zg_clk: zg_clk {
+		zg_clk: zg {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7791_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <3>;
 			clock-mult = <1>;
-			clock-output-names = "zg";
 		};
-		zx_clk: zx_clk {
+		zx_clk: zx {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7791_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <3>;
 			clock-mult = <1>;
-			clock-output-names = "zx";
 		};
-		zs_clk: zs_clk {
+		zs_clk: zs {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7791_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <6>;
 			clock-mult = <1>;
-			clock-output-names = "zs";
 		};
-		hp_clk: hp_clk {
+		hp_clk: hp {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7791_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <12>;
 			clock-mult = <1>;
-			clock-output-names = "hp";
 		};
-		i_clk: i_clk {
+		i_clk: i {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7791_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <2>;
 			clock-mult = <1>;
-			clock-output-names = "i";
 		};
-		b_clk: b_clk {
+		b_clk: b {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7791_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <12>;
 			clock-mult = <1>;
-			clock-output-names = "b";
 		};
-		p_clk: p_clk {
+		p_clk: p {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7791_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <24>;
 			clock-mult = <1>;
-			clock-output-names = "p";
 		};
-		cl_clk: cl_clk {
+		cl_clk: cl {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7791_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <48>;
 			clock-mult = <1>;
-			clock-output-names = "cl";
 		};
-		m2_clk: m2_clk {
+		m2_clk: m2 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7791_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <8>;
 			clock-mult = <1>;
-			clock-output-names = "m2";
 		};
-		rclk_clk: rclk_clk {
+		rclk_clk: rclk {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7791_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <(48 * 1024)>;
 			clock-mult = <1>;
-			clock-output-names = "rclk";
 		};
-		oscclk_clk: oscclk_clk {
+		oscclk_clk: oscclk {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7791_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <(12 * 1024)>;
 			clock-mult = <1>;
-			clock-output-names = "oscclk";
 		};
-		zb3_clk: zb3_clk {
+		zb3_clk: zb3 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7791_CLK_PLL3>;
 			#clock-cells = <0>;
 			clock-div = <4>;
 			clock-mult = <1>;
-			clock-output-names = "zb3";
 		};
-		zb3d2_clk: zb3d2_clk {
+		zb3d2_clk: zb3d2 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7791_CLK_PLL3>;
 			#clock-cells = <0>;
 			clock-div = <8>;
 			clock-mult = <1>;
-			clock-output-names = "zb3d2";
 		};
-		ddr_clk: ddr_clk {
+		ddr_clk: ddr {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7791_CLK_PLL3>;
 			#clock-cells = <0>;
 			clock-div = <8>;
 			clock-mult = <1>;
-			clock-output-names = "ddr";
 		};
-		mp_clk: mp_clk {
+		mp_clk: mp {
 			compatible = "fixed-factor-clock";
 			clocks = <&pll1_div2_clk>;
 			#clock-cells = <0>;
 			clock-div = <15>;
 			clock-mult = <1>;
-			clock-output-names = "mp";
 		};
-		cp_clk: cp_clk {
+		cp_clk: cp {
 			compatible = "fixed-factor-clock";
 			clocks = <&extal_clk>;
 			#clock-cells = <0>;
 			clock-div = <2>;
 			clock-mult = <1>;
-			clock-output-names = "cp";
 		};
 
 		/* Gate clocks */
@@ -1549,7 +1517,7 @@
 	};
 
 	xhci: usb@ee000000 {
-		compatible = "renesas,xhci-r8a7791";
+		compatible = "renesas,xhci-r8a7791", "renesas,rcar-gen2-xhci";
 		reg = <0 0xee000000 0 0xc00>;
 		interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7791_CLK_SSUSB>;
diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts
index 87e89ec..0ebc3ee 100644
--- a/arch/arm/boot/dts/r8a7793-gose.dts
+++ b/arch/arm/boot/dts/r8a7793-gose.dts
@@ -158,11 +158,82 @@
 		};
 	};
 
-	audio_clock: clock {
+	vcc_sdhi0: regulator@0 {
+		compatible = "regulator-fixed";
+
+		regulator-name = "SDHI0 Vcc";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+
+		gpio = <&gpio7 17 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	vccq_sdhi0: regulator@1 {
+		compatible = "regulator-gpio";
+
+		regulator-name = "SDHI0 VccQ";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <3300000>;
+
+		gpios = <&gpio2 12 GPIO_ACTIVE_HIGH>;
+		gpios-states = <1>;
+		states = <3300000 1
+			  1800000 0>;
+	};
+
+	vcc_sdhi1: regulator@2 {
+		compatible = "regulator-fixed";
+
+		regulator-name = "SDHI1 Vcc";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+
+		gpio = <&gpio7 18 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	vccq_sdhi1: regulator@3 {
+		compatible = "regulator-gpio";
+
+		regulator-name = "SDHI1 VccQ";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <3300000>;
+
+		gpios = <&gpio2 13 GPIO_ACTIVE_HIGH>;
+		gpios-states = <1>;
+		states = <3300000 1
+			  1800000 0>;
+	};
+
+	vcc_sdhi2: regulator@4 {
+		compatible = "regulator-fixed";
+
+		regulator-name = "SDHI2 Vcc";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+
+		gpio = <&gpio7 19 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	vccq_sdhi2: regulator@5 {
+		compatible = "regulator-gpio";
+
+		regulator-name = "SDHI2 VccQ";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <3300000>;
+
+		gpios = <&gpio2 26 GPIO_ACTIVE_HIGH>;
+		gpios-states = <1>;
+		states = <3300000 1
+			  1800000 0>;
+	};
+
+	audio_clock: audio_clock {
 		compatible = "fixed-clock";
 		#clock-cells = <0>;
 		clock-frequency = <11289600>;
-		clock-output-names = "audio_clock";
 	};
 
 	rsnd_ak4643: sound {
@@ -240,53 +311,68 @@
 	pinctrl-names = "default";
 
 	i2c2_pins: i2c2 {
-		renesas,groups = "i2c2";
-		renesas,function = "i2c2";
+		groups = "i2c2";
+		function = "i2c2";
 	};
 
 	du_pins: du {
-		renesas,groups = "du_rgb888", "du_sync", "du_disp", "du_clk_out_0";
-		renesas,function = "du";
+		groups = "du_rgb888", "du_sync", "du_disp", "du_clk_out_0";
+		function = "du";
 	};
 
 	scif0_pins: serial0 {
-		renesas,groups = "scif0_data_d";
-		renesas,function = "scif0";
+		groups = "scif0_data_d";
+		function = "scif0";
 	};
 
 	scif1_pins: serial1 {
-		renesas,groups = "scif1_data_d";
-		renesas,function = "scif1";
+		groups = "scif1_data_d";
+		function = "scif1";
 	};
 
 	scif_clk_pins: scif_clk {
-		renesas,groups = "scif_clk";
-		renesas,function = "scif_clk";
+		groups = "scif_clk";
+		function = "scif_clk";
 	};
 
 	ether_pins: ether {
-		renesas,groups = "eth_link", "eth_mdio", "eth_rmii";
-		renesas,function = "eth";
+		groups = "eth_link", "eth_mdio", "eth_rmii";
+		function = "eth";
 	};
 
 	phy1_pins: phy1 {
-		renesas,groups = "intc_irq0";
-		renesas,function = "intc";
+		groups = "intc_irq0";
+		function = "intc";
+	};
+
+	sdhi0_pins: sd0 {
+		renesas,groups = "sdhi0_data4", "sdhi0_ctrl";
+		renesas,function = "sdhi0";
+	};
+
+	sdhi1_pins: sd1 {
+		renesas,groups = "sdhi1_data4", "sdhi1_ctrl";
+		renesas,function = "sdhi1";
+	};
+
+	sdhi2_pins: sd2 {
+		renesas,groups = "sdhi2_data4", "sdhi2_ctrl";
+		renesas,function = "sdhi2";
 	};
 
 	qspi_pins: spi0 {
-		renesas,groups = "qspi_ctrl", "qspi_data4";
-		renesas,function = "qspi";
+		groups = "qspi_ctrl", "qspi_data4";
+		function = "qspi";
 	};
 
 	sound_pins: sound {
-		renesas,groups = "ssi0129_ctrl", "ssi0_data", "ssi1_data";
-		renesas,function = "ssi";
+		groups = "ssi0129_ctrl", "ssi0_data", "ssi1_data";
+		function = "ssi";
 	};
 
 	sound_clk_pins: sound_clk {
-		renesas,groups = "audio_clk_a";
-		renesas,function = "audio_clk";
+		groups = "audio_clk_a";
+		function = "audio_clk";
 	};
 };
 
@@ -329,6 +415,38 @@
 	status = "okay";
 };
 
+&sdhi0 {
+	pinctrl-0 = <&sdhi0_pins>;
+	pinctrl-names = "default";
+
+	vmmc-supply = <&vcc_sdhi0>;
+	vqmmc-supply = <&vccq_sdhi0>;
+	cd-gpios = <&gpio6 6 GPIO_ACTIVE_LOW>;
+	wp-gpios = <&gpio6 7 GPIO_ACTIVE_HIGH>;
+	status = "okay";
+};
+
+&sdhi1 {
+	pinctrl-0 = <&sdhi1_pins>;
+	pinctrl-names = "default";
+
+	vmmc-supply = <&vcc_sdhi1>;
+	vqmmc-supply = <&vccq_sdhi1>;
+	cd-gpios = <&gpio6 14 GPIO_ACTIVE_LOW>;
+	wp-gpios = <&gpio6 15 GPIO_ACTIVE_HIGH>;
+	status = "okay";
+};
+
+&sdhi2 {
+	pinctrl-0 = <&sdhi2_pins>;
+	pinctrl-names = "default";
+
+	vmmc-supply = <&vcc_sdhi2>;
+	vqmmc-supply = <&vccq_sdhi2>;
+	cd-gpios = <&gpio6 22 GPIO_ACTIVE_LOW>;
+	status = "okay";
+};
+
 &qspi {
 	pinctrl-0 = <&qspi_pins>;
 	pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/r8a7793.dtsi b/arch/arm/boot/dts/r8a7793.dtsi
index b482159..cf6dc2a 100644
--- a/arch/arm/boot/dts/r8a7793.dtsi
+++ b/arch/arm/boot/dts/r8a7793.dtsi
@@ -507,6 +507,39 @@
 		reg = <0 0xe6060000 0 0x250>;
 	};
 
+	sdhi0: sd@ee100000 {
+		compatible = "renesas,sdhi-r8a7793";
+		reg = <0 0xee100000 0 0x328>;
+		interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp3_clks R8A7793_CLK_SDHI0>;
+		dmas = <&dmac0 0xcd>, <&dmac0 0xce>;
+		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
+		status = "disabled";
+	};
+
+	sdhi1: sd@ee140000 {
+		compatible = "renesas,sdhi-r8a7793";
+		reg = <0 0xee140000 0 0x100>;
+		interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp3_clks R8A7793_CLK_SDHI1>;
+		dmas = <&dmac0 0xc1>, <&dmac0 0xc2>;
+		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
+		status = "disabled";
+	};
+
+	sdhi2: sd@ee160000 {
+		compatible = "renesas,sdhi-r8a7793";
+		reg = <0 0xee160000 0 0x100>;
+		interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp3_clks R8A7793_CLK_SDHI2>;
+		dmas = <&dmac0 0xd3>, <&dmac0 0xd4>;
+		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
+		status = "disabled";
+	};
+
 	scifa0: serial@e6c40000 {
 		compatible = "renesas,scifa-r8a7793",
 			     "renesas,rcar-gen2-scifa", "renesas,scifa";
@@ -806,18 +839,39 @@
 		};
 	};
 
+	can0: can@e6e80000 {
+		compatible = "renesas,can-r8a7793", "renesas,rcar-gen2-can";
+		reg = <0 0xe6e80000 0 0x1000>;
+		interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp9_clks R8A7793_CLK_RCAN0>,
+			 <&cpg_clocks R8A7793_CLK_RCAN>, <&can_clk>;
+		clock-names = "clkp1", "clkp2", "can_clk";
+		power-domains = <&cpg_clocks>;
+		status = "disabled";
+	};
+
+	can1: can@e6e88000 {
+		compatible = "renesas,can-r8a7793", "renesas,rcar-gen2-can";
+		reg = <0 0xe6e88000 0 0x1000>;
+		interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp9_clks R8A7793_CLK_RCAN1>,
+			 <&cpg_clocks R8A7793_CLK_RCAN>, <&can_clk>;
+		clock-names = "clkp1", "clkp2", "can_clk";
+		power-domains = <&cpg_clocks>;
+		status = "disabled";
+	};
+
 	clocks {
 		#address-cells = <2>;
 		#size-cells = <2>;
 		ranges;
 
 		/* External root clock */
-		extal_clk: extal_clk {
+		extal_clk: extal {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			/* This value must be overridden by the board. */
 			clock-frequency = <0>;
-			clock-output-names = "extal";
 		};
 
 		/*
@@ -828,19 +882,31 @@
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <0>;
-			clock-output-names = "audio_clk_a";
 		};
 		audio_clk_b: audio_clk_b {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <0>;
-			clock-output-names = "audio_clk_b";
 		};
 		audio_clk_c: audio_clk_c {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <0>;
-			clock-output-names = "audio_clk_c";
+		};
+
+		/* External USB clock - can be overridden by the board */
+		usb_extal_clk: usb_extal {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <48000000>;
+		};
+
+		/* External CAN clock */
+		can_clk: can {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			/* This value must be overridden by the board. */
+			clock-frequency = <0>;
 		};
 
 		/* External SCIF clock */
@@ -849,7 +915,6 @@
 			#clock-cells = <0>;
 			/* This value must be overridden by the board. */
 			clock-frequency = <0>;
-			status = "disabled";
 		};
 
 		/* Special CPG clocks */
@@ -857,7 +922,7 @@
 			compatible = "renesas,r8a7793-cpg-clocks",
 				     "renesas,rcar-gen2-cpg-clocks";
 			reg = <0 0xe6150000 0 0x1000>;
-			clocks = <&extal_clk>;
+			clocks = <&extal_clk &usb_extal_clk>;
 			#clock-cells = <1>;
 			clock-output-names = "main", "pll0", "pll1", "pll3",
 					     "lb", "qspi", "sdh", "sd0", "z",
@@ -866,111 +931,98 @@
 		};
 
 		/* Variable factor clocks */
-		sd2_clk: sd2_clk@e6150078 {
+		sd2_clk: sd2@e6150078 {
 			compatible = "renesas,r8a7793-div6-clock",
 				     "renesas,cpg-div6-clock";
 			reg = <0 0xe6150078 0 4>;
 			clocks = <&pll1_div2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "sd2";
 		};
-		sd3_clk: sd3_clk@e615026c {
+		sd3_clk: sd3@e615026c {
 			compatible = "renesas,r8a7793-div6-clock",
 				     "renesas,cpg-div6-clock";
 			reg = <0 0xe615026c 0 4>;
 			clocks = <&pll1_div2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "sd3";
 		};
-		mmc0_clk: mmc0_clk@e6150240 {
+		mmc0_clk: mmc0@e6150240 {
 			compatible = "renesas,r8a7793-div6-clock",
 				     "renesas,cpg-div6-clock";
 			reg = <0 0xe6150240 0 4>;
 			clocks = <&pll1_div2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "mmc0";
 		};
 
 		/* Fixed factor clocks */
-		pll1_div2_clk: pll1_div2_clk {
+		pll1_div2_clk: pll1_div2 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7793_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <2>;
 			clock-mult = <1>;
-			clock-output-names = "pll1_div2";
 		};
-		zg_clk: zg_clk {
+		zg_clk: zg {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7793_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <5>;
 			clock-mult = <1>;
-			clock-output-names = "zg";
 		};
-		zx_clk: zx_clk {
+		zx_clk: zx {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7793_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <3>;
 			clock-mult = <1>;
-			clock-output-names = "zx";
 		};
-		zs_clk: zs_clk {
+		zs_clk: zs {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7793_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <6>;
 			clock-mult = <1>;
-			clock-output-names = "zs";
 		};
-		hp_clk: hp_clk {
+		hp_clk: hp {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7793_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <12>;
 			clock-mult = <1>;
-			clock-output-names = "hp";
 		};
-		p_clk: p_clk {
+		p_clk: p {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7793_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <24>;
 			clock-mult = <1>;
-			clock-output-names = "p";
 		};
-		m2_clk: m2_clk {
+		m2_clk: m2 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7793_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <8>;
 			clock-mult = <1>;
-			clock-output-names = "m2";
 		};
-		rclk_clk: rclk_clk {
+		rclk_clk: rclk {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7793_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <(48 * 1024)>;
 			clock-mult = <1>;
-			clock-output-names = "rclk";
 		};
-		mp_clk: mp_clk {
+		mp_clk: mp {
 			compatible = "fixed-factor-clock";
 			clocks = <&pll1_div2_clk>;
 			#clock-cells = <0>;
 			clock-div = <15>;
 			clock-mult = <1>;
-			clock-output-names = "mp";
 		};
-		cp_clk: cp_clk {
+		cp_clk: cp {
 			compatible = "fixed-factor-clock";
 			clocks = <&extal_clk>;
 			#clock-cells = <0>;
 			clock-div = <2>;
 			clock-mult = <1>;
-			clock-output-names = "cp";
 		};
 
 		/* Gate clocks */
@@ -1098,6 +1150,7 @@
 			reg = <0 0xe6150994 0 4>, <0 0xe61509a4 0 4>;
 			clocks = <&cp_clk>, <&cp_clk>, <&cp_clk>, <&cp_clk>,
 				 <&cp_clk>, <&cp_clk>, <&cp_clk>, <&cp_clk>,
+				 <&p_clk>, <&p_clk>,
 				 <&cpg_clocks R8A7793_CLK_QSPI>, <&hp_clk>,
 				 <&cp_clk>, <&hp_clk>, <&hp_clk>, <&hp_clk>,
 				 <&hp_clk>, <&hp_clk>;
@@ -1107,7 +1160,8 @@
 				R8A7793_CLK_GPIO5 R8A7793_CLK_GPIO4
 				R8A7793_CLK_GPIO3 R8A7793_CLK_GPIO2
 				R8A7793_CLK_GPIO1 R8A7793_CLK_GPIO0
-				R8A7793_CLK_QSPI_MOD R8A7793_CLK_I2C5
+				R8A7793_CLK_QSPI_MOD R8A7793_CLK_RCAN1
+				R8A7793_CLK_RCAN0 R8A7793_CLK_I2C5
 				R8A7793_CLK_IICDVFS R8A7793_CLK_I2C4
 				R8A7793_CLK_I2C3 R8A7793_CLK_I2C2
 				R8A7793_CLK_I2C1 R8A7793_CLK_I2C0
@@ -1115,8 +1169,9 @@
 			clock-output-names =
 				"gpio7", "gpio6", "gpio5", "gpio4",
 				"gpio3", "gpio2", "gpio1", "gpio0",
-				"qspi_mod", "i2c5", "i2c6", "i2c4",
-				"i2c3", "i2c2", "i2c1", "i2c0";
+				"rcan1", "rcan0", "qspi_mod", "i2c5",
+				"i2c6", "i2c4", "i2c3", "i2c2", "i2c1",
+				"i2c0";
 		};
 		mstp10_clks: mstp10_clks@e6150998 {
 			compatible = "renesas,r8a7793-mstp-clocks", "renesas,cpg-mstp-clocks";
diff --git a/arch/arm/boot/dts/r8a7794-alt.dts b/arch/arm/boot/dts/r8a7794-alt.dts
index ca9bc4f..383ad79 100644
--- a/arch/arm/boot/dts/r8a7794-alt.dts
+++ b/arch/arm/boot/dts/r8a7794-alt.dts
@@ -107,38 +107,38 @@
 	pinctrl-names = "default";
 
 	du_pins: du {
-		renesas,groups = "du1_rgb666", "du1_sync", "du1_disp", "du1_dotclkout0";
-		renesas,function = "du";
+		groups = "du1_rgb666", "du1_sync", "du1_disp", "du1_dotclkout0";
+		function = "du";
 	};
 
 	scif2_pins: serial2 {
-		renesas,groups = "scif2_data";
-		renesas,function = "scif2";
+		groups = "scif2_data";
+		function = "scif2";
 	};
 
 	scif_clk_pins: scif_clk {
-		renesas,groups = "scif_clk";
-		renesas,function = "scif_clk";
+		groups = "scif_clk";
+		function = "scif_clk";
 	};
 
 	ether_pins: ether {
-		renesas,groups = "eth_link", "eth_mdio", "eth_rmii";
-		renesas,function = "eth";
+		groups = "eth_link", "eth_mdio", "eth_rmii";
+		function = "eth";
 	};
 
 	phy1_pins: phy1 {
-		renesas,groups = "intc_irq8";
-		renesas,function = "intc";
+		groups = "intc_irq8";
+		function = "intc";
 	};
 
 	i2c1_pins: i2c1 {
-		renesas,groups = "i2c1";
-		renesas,function = "i2c1";
+		groups = "i2c1";
+		function = "i2c1";
 	};
 
 	vin0_pins: vin0 {
-		renesas,groups = "vin0_data8", "vin0_clk";
-		renesas,function = "vin0";
+		groups = "vin0_data8", "vin0_clk";
+		function = "vin0";
 	};
 };
 
@@ -148,8 +148,8 @@
 
 &pfc {
 	qspi_pins: spi0 {
-		renesas,groups = "qspi_ctrl", "qspi_data4";
-		renesas,function = "qspi";
+		groups = "qspi_ctrl", "qspi_data4";
+		function = "qspi";
 	};
 };
 
diff --git a/arch/arm/boot/dts/r8a7794-silk.dts b/arch/arm/boot/dts/r8a7794-silk.dts
index 66f077a..56d98d5 100644
--- a/arch/arm/boot/dts/r8a7794-silk.dts
+++ b/arch/arm/boot/dts/r8a7794-silk.dts
@@ -130,58 +130,58 @@
 	pinctrl-names = "default";
 
 	scif2_pins: serial2 {
-		renesas,groups = "scif2_data";
-		renesas,function = "scif2";
+		groups = "scif2_data";
+		function = "scif2";
 	};
 
 	scif_clk_pins: scif_clk {
-		renesas,groups = "scif_clk";
-		renesas,function = "scif_clk";
+		groups = "scif_clk";
+		function = "scif_clk";
 	};
 
 	ether_pins: ether {
-		renesas,groups = "eth_link", "eth_mdio", "eth_rmii";
-		renesas,function = "eth";
+		groups = "eth_link", "eth_mdio", "eth_rmii";
+		function = "eth";
 	};
 
 	phy1_pins: phy1 {
-		renesas,groups = "intc_irq8";
-		renesas,function = "intc";
+		groups = "intc_irq8";
+		function = "intc";
 	};
 
 	i2c1_pins: i2c1 {
-		renesas,groups = "i2c1";
-		renesas,function = "i2c1";
+		groups = "i2c1";
+		function = "i2c1";
 	};
 
 	mmcif0_pins: mmcif0 {
-		renesas,groups = "mmc_data8", "mmc_ctrl";
-		renesas,function = "mmc";
+		groups = "mmc_data8", "mmc_ctrl";
+		function = "mmc";
 	};
 
 	sdhi1_pins: sd1 {
-		renesas,groups = "sdhi1_data4", "sdhi1_ctrl";
-		renesas,function = "sdhi1";
+		groups = "sdhi1_data4", "sdhi1_ctrl";
+		function = "sdhi1";
 	};
 
 	qspi_pins: spi0 {
-		renesas,groups = "qspi_ctrl", "qspi_data4";
-		renesas,function = "qspi";
+		groups = "qspi_ctrl", "qspi_data4";
+		function = "qspi";
 	};
 
 	vin0_pins: vin0 {
-		renesas,groups = "vin0_data8", "vin0_clk";
-		renesas,function = "vin0";
+		groups = "vin0_data8", "vin0_clk";
+		function = "vin0";
 	};
 
 	usb0_pins: usb0 {
-		renesas,groups = "usb0";
-		renesas,function = "usb0";
+		groups = "usb0";
+		function = "usb0";
 	};
 
 	usb1_pins: usb1 {
-		renesas,groups = "usb1";
-		renesas,function = "usb1";
+		groups = "usb1";
+		function = "usb1";
 	};
 };
 
diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi
index eacb2b2..e45b23f 100644
--- a/arch/arm/boot/dts/r8a7794.dtsi
+++ b/arch/arm/boot/dts/r8a7794.dtsi
@@ -26,6 +26,8 @@
 		i2c3 = &i2c3;
 		i2c4 = &i2c4;
 		i2c5 = &i2c5;
+		i2c6 = &i2c6;
+		i2c7 = &i2c7;
 		spi0 = &qspi;
 		vin0 = &vin0;
 		vin1 = &vin1;
@@ -629,6 +631,32 @@
 		status = "disabled";
 	};
 
+	i2c6: i2c@e6500000 {
+		compatible = "renesas,iic-r8a7794", "renesas,rmobile-iic";
+		reg = <0 0xe6500000 0 0x425>;
+		interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp3_clks R8A7794_CLK_IIC0>;
+		dmas = <&dmac0 0x61>, <&dmac0 0x62>;
+		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+
+	i2c7: i2c@e6510000 {
+		compatible = "renesas,iic-r8a7794", "renesas,rmobile-iic";
+		reg = <0 0xe6510000 0 0x425>;
+		interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp3_clks R8A7794_CLK_IIC1>;
+		dmas = <&dmac0 0x65>, <&dmac0 0x66>;
+		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+
 	mmcif0: mmc@ee200000 {
 		compatible = "renesas,mmcif-r8a7794", "renesas,sh-mmcif";
 		reg = <0 0xee200000 0 0x80>;
@@ -830,18 +858,54 @@
 		};
 	};
 
+	can0: can@e6e80000 {
+		compatible = "renesas,can-r8a7794", "renesas,rcar-gen2-can";
+		reg = <0 0xe6e80000 0 0x1000>;
+		interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp9_clks R8A7794_CLK_RCAN0>,
+			 <&cpg_clocks R8A7794_CLK_RCAN>, <&can_clk>;
+		clock-names = "clkp1", "clkp2", "can_clk";
+		power-domains = <&cpg_clocks>;
+		status = "disabled";
+	};
+
+	can1: can@e6e88000 {
+		compatible = "renesas,can-r8a7794", "renesas,rcar-gen2-can";
+		reg = <0 0xe6e88000 0 0x1000>;
+		interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp9_clks R8A7794_CLK_RCAN1>,
+			 <&cpg_clocks R8A7794_CLK_RCAN>, <&can_clk>;
+		clock-names = "clkp1", "clkp2", "can_clk";
+		power-domains = <&cpg_clocks>;
+		status = "disabled";
+	};
+
 	clocks {
 		#address-cells = <2>;
 		#size-cells = <2>;
 		ranges;
 
 		/* External root clock */
-		extal_clk: extal_clk {
+		extal_clk: extal {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			/* This value must be overriden by the board. */
 			clock-frequency = <0>;
-			clock-output-names = "extal";
+		};
+
+		/* External USB clock - can be overridden by the board */
+		usb_extal_clk: usb_extal {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <48000000>;
+		};
+
+		/* External CAN clock */
+		can_clk: can {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			/* This value must be overridden by the board. */
+			clock-frequency = <0>;
 		};
 
 		/* External SCIF clock */
@@ -850,7 +914,6 @@
 			#clock-cells = <0>;
 			/* This value must be overridden by the board. */
 			clock-frequency = <0>;
-			status = "disabled";
 		};
 
 		/* Special CPG clocks */
@@ -858,180 +921,160 @@
 			compatible = "renesas,r8a7794-cpg-clocks",
 				     "renesas,rcar-gen2-cpg-clocks";
 			reg = <0 0xe6150000 0 0x1000>;
-			clocks = <&extal_clk>;
+			clocks = <&extal_clk &usb_extal_clk>;
 			#clock-cells = <1>;
 			clock-output-names = "main", "pll0", "pll1", "pll3",
-					     "lb", "qspi", "sdh", "sd0", "z";
+					     "lb", "qspi", "sdh", "sd0", "z",
+					     "rcan";
 			#power-domain-cells = <0>;
 		};
 		/* Variable factor clocks */
-		sd2_clk: sd2_clk@e6150078 {
+		sd2_clk: sd2@e6150078 {
 			compatible = "renesas,r8a7794-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe6150078 0 4>;
 			clocks = <&pll1_div2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "sd2";
 		};
-		sd3_clk: sd3_clk@e615026c {
+		sd3_clk: sd3@e615026c {
 			compatible = "renesas,r8a7794-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe615026c 0 4>;
 			clocks = <&pll1_div2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "sd3";
 		};
-		mmc0_clk: mmc0_clk@e6150240 {
+		mmc0_clk: mmc0@e6150240 {
 			compatible = "renesas,r8a7794-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0 0xe6150240 0 4>;
 			clocks = <&pll1_div2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "mmc0";
 		};
 
 		/* Fixed factor clocks */
-		pll1_div2_clk: pll1_div2_clk {
+		pll1_div2_clk: pll1_div2 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7794_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <2>;
 			clock-mult = <1>;
-			clock-output-names = "pll1_div2";
 		};
-		zg_clk: zg_clk {
+		zg_clk: zg {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7794_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <6>;
 			clock-mult = <1>;
-			clock-output-names = "zg";
 		};
-		zx_clk: zx_clk {
+		zx_clk: zx {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7794_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <3>;
 			clock-mult = <1>;
-			clock-output-names = "zx";
 		};
-		zs_clk: zs_clk {
+		zs_clk: zs {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7794_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <6>;
 			clock-mult = <1>;
-			clock-output-names = "zs";
 		};
-		hp_clk: hp_clk {
+		hp_clk: hp {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7794_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <12>;
 			clock-mult = <1>;
-			clock-output-names = "hp";
 		};
-		i_clk: i_clk {
+		i_clk: i {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7794_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <2>;
 			clock-mult = <1>;
-			clock-output-names = "i";
 		};
-		b_clk: b_clk {
+		b_clk: b {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7794_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <12>;
 			clock-mult = <1>;
-			clock-output-names = "b";
 		};
-		p_clk: p_clk {
+		p_clk: p {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7794_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <24>;
 			clock-mult = <1>;
-			clock-output-names = "p";
 		};
-		cl_clk: cl_clk {
+		cl_clk: cl {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7794_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <48>;
 			clock-mult = <1>;
-			clock-output-names = "cl";
 		};
-		m2_clk: m2_clk {
+		m2_clk: m2 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7794_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <8>;
 			clock-mult = <1>;
-			clock-output-names = "m2";
 		};
-		rclk_clk: rclk_clk {
+		rclk_clk: rclk {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7794_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <(48 * 1024)>;
 			clock-mult = <1>;
-			clock-output-names = "rclk";
 		};
-		oscclk_clk: oscclk_clk {
+		oscclk_clk: oscclk {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7794_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <(12 * 1024)>;
 			clock-mult = <1>;
-			clock-output-names = "oscclk";
 		};
-		zb3_clk: zb3_clk {
+		zb3_clk: zb3 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7794_CLK_PLL3>;
 			#clock-cells = <0>;
 			clock-div = <4>;
 			clock-mult = <1>;
-			clock-output-names = "zb3";
 		};
-		zb3d2_clk: zb3d2_clk {
+		zb3d2_clk: zb3d2 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7794_CLK_PLL3>;
 			#clock-cells = <0>;
 			clock-div = <8>;
 			clock-mult = <1>;
-			clock-output-names = "zb3d2";
 		};
-		ddr_clk: ddr_clk {
+		ddr_clk: ddr {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7794_CLK_PLL3>;
 			#clock-cells = <0>;
 			clock-div = <8>;
 			clock-mult = <1>;
-			clock-output-names = "ddr";
 		};
-		mp_clk: mp_clk {
+		mp_clk: mp {
 			compatible = "fixed-factor-clock";
 			clocks = <&pll1_div2_clk>;
 			#clock-cells = <0>;
 			clock-div = <15>;
 			clock-mult = <1>;
-			clock-output-names = "mp";
 		};
-		cp_clk: cp_clk {
+		cp_clk: cp {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7794_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <48>;
 			clock-mult = <1>;
-			clock-output-names = "cp";
 		};
 
-		acp_clk: acp_clk {
+		acp_clk: acp {
 			compatible = "fixed-factor-clock";
 			clocks = <&extal_clk>;
 			#clock-cells = <0>;
 			clock-div = <2>;
 			clock-mult = <1>;
-			clock-output-names = "acp";
 		};
 
 		/* Gate clocks */
@@ -1082,16 +1125,19 @@
 			compatible = "renesas,r8a7794-mstp-clocks", "renesas,cpg-mstp-clocks";
 			reg = <0 0xe615013c 0 4>, <0 0xe6150048 0 4>;
 			clocks = <&sd3_clk>, <&sd2_clk>, <&cpg_clocks R8A7794_CLK_SD0>,
-			         <&mmc0_clk>, <&rclk_clk>, <&hp_clk>, <&hp_clk>;
+				 <&mmc0_clk>, <&hp_clk>, <&hp_clk>, <&rclk_clk>,
+				 <&hp_clk>, <&hp_clk>;
 			#clock-cells = <1>;
 			clock-indices = <
 			        R8A7794_CLK_SDHI2 R8A7794_CLK_SDHI1 R8A7794_CLK_SDHI0
-				R8A7794_CLK_MMCIF0 R8A7794_CLK_CMT1
+				R8A7794_CLK_MMCIF0 R8A7794_CLK_IIC0
+				R8A7794_CLK_IIC1 R8A7794_CLK_CMT1
 				R8A7794_CLK_USBDMAC0 R8A7794_CLK_USBDMAC1
 			>;
 			clock-output-names =
 			        "sdhi2", "sdhi1", "sdhi0",
-				"mmcif0", "cmt1", "usbdmac0", "usbdmac1";
+				"mmcif0", "i2c6", "i2c7",
+				"cmt1", "usbdmac0", "usbdmac1";
 		};
 		mstp4_clks: mstp4_clks@e6150140 {
 			compatible = "renesas,r8a7794-mstp-clocks", "renesas,cpg-mstp-clocks";
@@ -1137,20 +1183,22 @@
 			compatible = "renesas,r8a7794-mstp-clocks", "renesas,cpg-mstp-clocks";
 			reg = <0 0xe6150994 0 4>, <0 0xe61509a4 0 4>;
 			clocks = <&cp_clk>, <&cp_clk>, <&cp_clk>, <&cp_clk>,
-				 <&cp_clk>, <&cp_clk>, <&cp_clk>,
-				 <&cpg_clocks R8A7794_CLK_QSPI>, <&hp_clk>, <&hp_clk>,
-				 <&hp_clk>, <&hp_clk>, <&hp_clk>, <&hp_clk>;
+				 <&cp_clk>, <&cp_clk>, <&cp_clk>, <&p_clk>,
+				 <&p_clk>, <&cpg_clocks R8A7794_CLK_QSPI>,
+				 <&hp_clk>, <&hp_clk>, <&hp_clk>, <&hp_clk>,
+				 <&hp_clk>, <&hp_clk>;
 			#clock-cells = <1>;
 			clock-indices = <R8A7794_CLK_GPIO6 R8A7794_CLK_GPIO5
 					 R8A7794_CLK_GPIO4 R8A7794_CLK_GPIO3
 					 R8A7794_CLK_GPIO2 R8A7794_CLK_GPIO1
-					 R8A7794_CLK_GPIO0 R8A7794_CLK_QSPI_MOD
+					 R8A7794_CLK_GPIO0 R8A7794_CLK_RCAN1
+					 R8A7794_CLK_RCAN0 R8A7794_CLK_QSPI_MOD
 					 R8A7794_CLK_I2C5 R8A7794_CLK_I2C4
 					 R8A7794_CLK_I2C3 R8A7794_CLK_I2C2
 					 R8A7794_CLK_I2C1 R8A7794_CLK_I2C0>;
 			clock-output-names =
 				"gpio6", "gpio5", "gpio4", "gpio3", "gpio2",
-				"gpio1", "gpio0", "qspi_mod",
+				"gpio1", "gpio0", "rcan1", "rcan0", "qspi_mod",
 				"i2c5", "i2c4", "i2c3", "i2c2", "i2c1", "i2c0";
 		};
 		mstp11_clks: mstp11_clks@e615099c {
diff --git a/arch/arm/boot/dts/rk3036-evb.dts b/arch/arm/boot/dts/rk3036-evb.dts
index b3d6ec8..8db9e9b 100644
--- a/arch/arm/boot/dts/rk3036-evb.dts
+++ b/arch/arm/boot/dts/rk3036-evb.dts
@@ -45,6 +45,11 @@
 / {
 	model = "Rockchip RK3036 Evaluation board";
 	compatible = "rockchip,rk3036-evb", "rockchip,rk3036";
+
+	memory {
+		device_type = "memory";
+		reg = <0x60000000 0x40000000>;
+	};
 };
 
 &emac {
diff --git a/arch/arm/boot/dts/rk3036-kylin.dts b/arch/arm/boot/dts/rk3036-kylin.dts
index 6251d10..1df1557 100644
--- a/arch/arm/boot/dts/rk3036-kylin.dts
+++ b/arch/arm/boot/dts/rk3036-kylin.dts
@@ -46,6 +46,11 @@
 	model = "Rockchip RK3036 KylinBoard";
 	compatible = "rockchip,rk3036-kylin", "rockchip,rk3036";
 
+	memory {
+		device_type = "memory";
+		reg = <0x60000000 0x20000000>;
+	};
+
 	leds: gpio-leds {
 		compatible = "gpio-leds";
 
@@ -130,6 +135,10 @@
 	status = "okay";
 };
 
+&hdmi {
+	status = "okay";
+};
+
 &i2c1 {
 	clock-frequency = <400000>;
 
@@ -341,7 +350,6 @@
 &sdio {
 	status = "okay";
 
-	broken-cd;
 	bus-width = <4>;
 	cap-sd-highspeed;
 	cap-sdio-irq;
@@ -385,6 +393,14 @@
 	status = "okay";
 };
 
+&vop {
+	status = "okay";
+};
+
+&vop_mmu {
+	status = "okay";
+};
+
 &pinctrl {
 	leds {
 		led_ctl: led-ctl {
diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi
index d0f4bb7..843d2be 100644
--- a/arch/arm/boot/dts/rk3036.dtsi
+++ b/arch/arm/boot/dts/rk3036.dtsi
@@ -63,11 +63,6 @@
 		spi = &spi;
 	};
 
-	memory {
-		device_type = "memory";
-		reg = <0x60000000 0x40000000>;
-	};
-
 	cpus {
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -119,6 +114,11 @@
 		interrupt-affinity = <&cpu0>, <&cpu1>;
 	};
 
+	display-subsystem {
+		compatible = "rockchip,display-subsystem";
+		ports = <&vop_out>;
+	};
+
 	timer {
 		compatible = "arm,armv7-timer";
 		arm,cpu-registers-not-fw-configured;
@@ -149,6 +149,36 @@
 		};
 	};
 
+	vop: vop@10118000 {
+		compatible = "rockchip,rk3036-vop";
+		reg = <0x10118000 0x19c>;
+		interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cru ACLK_LCDC>, <&cru SCLK_LCDC>, <&cru HCLK_LCDC>;
+		clock-names = "aclk_vop", "dclk_vop", "hclk_vop";
+		resets = <&cru SRST_LCDC1_A>, <&cru SRST_LCDC1_H>, <&cru SRST_LCDC1_D>;
+		reset-names = "axi", "ahb", "dclk";
+		iommus = <&vop_mmu>;
+		status = "disabled";
+
+		vop_out: port {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			vop_out_hdmi: endpoint@0 {
+				reg = <0>;
+				remote-endpoint = <&hdmi_in_vop>;
+			};
+		};
+	};
+
+	vop_mmu: iommu@10118300 {
+		compatible = "rockchip,iommu";
+		reg = <0x10118300 0x100>;
+		interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "vop_mmu";
+		#iommu-cells = <0>;
+		status = "disabled";
+	};
+
 	gic: interrupt-controller@10139000 {
 		compatible = "arm,gic-400";
 		interrupt-controller;
@@ -237,7 +267,6 @@
 		compatible = "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x1021c000 0x4000>;
 		interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
-		broken-cd;
 		bus-width = <8>;
 		cap-mmc-highspeed;
 		clock-frequency = <37500000>;
@@ -297,6 +326,27 @@
 		status = "disabled";
 	};
 
+	hdmi: hdmi@20034000 {
+		compatible = "rockchip,rk3036-inno-hdmi";
+		reg = <0x20034000 0x4000>;
+		interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cru  PCLK_HDMI>;
+		clock-names = "pclk";
+		rockchip,grf = <&grf>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&hdmi_ctl>;
+		status = "disabled";
+
+		hdmi_in: port {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			hdmi_in_vop: endpoint@0 {
+				reg = <0>;
+				remote-endpoint = <&vop_out_hdmi>;
+			};
+		};
+	};
+
 	timer: timer@20044000 {
 		compatible = "rockchip,rk3036-timer", "rockchip,rk3288-timer";
 		reg = <0x20044000 0x20>;
@@ -644,6 +694,15 @@
 			};
 		};
 
+		hdmi {
+			hdmi_ctl: hdmi-ctl {
+				rockchip,pins = <1 8  RK_FUNC_1 &pcfg_pull_none>,
+						<1 9  RK_FUNC_1 &pcfg_pull_none>,
+						<1 10 RK_FUNC_1 &pcfg_pull_none>,
+						<1 11 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+
 		uart0 {
 			uart0_xfer: uart0-xfer {
 				rockchip,pins = <0 16 RK_FUNC_1 &pcfg_pull_default>,
diff --git a/arch/arm/boot/dts/rk3066a-bqcurie2.dts b/arch/arm/boot/dts/rk3066a-bqcurie2.dts
index 6d2a5b3..bc674ee 100644
--- a/arch/arm/boot/dts/rk3066a-bqcurie2.dts
+++ b/arch/arm/boot/dts/rk3066a-bqcurie2.dts
@@ -42,6 +42,7 @@
  */
 
 /dts-v1/;
+#include <dt-bindings/input/input.h>
 #include "rk3066a.dtsi"
 
 / {
@@ -77,21 +78,19 @@
 
 	gpio-keys {
 		compatible = "gpio-keys";
-		#address-cells = <1>;
-		#size-cells = <0>;
 		autorepeat;
 
-		button@0 {
+		power {
 			gpios = <&gpio6 2 GPIO_ACTIVE_LOW>; /* GPIO6_A2 */
-			linux,code = <116>;
+			linux,code = <KEY_POWER>;
 			label = "GPIO Key Power";
 			linux,input-type = <1>;
 			wakeup-source;
 			debounce-interval = <100>;
 		};
-		button@1 {
+		volume-down {
 			gpios = <&gpio4 21 GPIO_ACTIVE_LOW>; /* GPIO4_C5 */
-			linux,code = <104>;
+			linux,code = <KEY_VOLUMEDOWN>;
 			label = "GPIO Key Vol-";
 			linux,input-type = <1>;
 			debounce-interval = <100>;
diff --git a/arch/arm/boot/dts/rk3066a-rayeager.dts b/arch/arm/boot/dts/rk3066a-rayeager.dts
index 0553300..6e7f218 100644
--- a/arch/arm/boot/dts/rk3066a-rayeager.dts
+++ b/arch/arm/boot/dts/rk3066a-rayeager.dts
@@ -41,6 +41,7 @@
  */
 
 /dts-v1/;
+#include <dt-bindings/input/input.h>
 #include "rk3066a.dtsi"
 
 / {
@@ -61,14 +62,12 @@
 
 	keys: gpio-keys {
 		compatible = "gpio-keys";
-		#address-cells = <1>;
-		#size-cells = <0>;
 
-		button@0 {
+		power {
 			wakeup-source;
 			gpios = <&gpio6 2 GPIO_ACTIVE_LOW>;
 			label = "GPIO Power";
-			linux,code = <116>;
+			linux,code = <KEY_POWER>;
 			pinctrl-names = "default";
 			pinctrl-0 = <&pwr_key>;
 		};
@@ -182,7 +181,6 @@
 };
 
 &emmc {
-	broken-cd;
 	bus-width = <8>;
 	cap-mmc-highspeed;
 	disable-wp;
@@ -348,7 +346,6 @@
 };
 
 &mmc1 {
-	broken-cd;
 	bus-width = <4>;
 	disable-wp;
 	non-removable;
diff --git a/arch/arm/boot/dts/rk3066a.dtsi b/arch/arm/boot/dts/rk3066a.dtsi
index cb0a552..c0ba86c 100644
--- a/arch/arm/boot/dts/rk3066a.dtsi
+++ b/arch/arm/boot/dts/rk3066a.dtsi
@@ -169,7 +169,7 @@
 		clocks = <&cru PCLK_EFUSE>;
 		clock-names = "pclk_efuse";
 
-		cpu_leakage: cpu_leakage {
+		cpu_leakage: cpu_leakage@17 {
 			reg = <0x17 0x1>;
 		};
 	};
@@ -207,7 +207,7 @@
 		#size-cells = <0>;
 		status = "disabled";
 
-		usbphy0: usb-phy0 {
+		usbphy0: usb-phy@17c {
 			#phy-cells = <0>;
 			reg = <0x17c>;
 			clocks = <&cru SCLK_OTGPHY0>;
@@ -215,7 +215,7 @@
 			#clock-cells = <0>;
 		};
 
-		usbphy1: usb-phy1 {
+		usbphy1: usb-phy@188 {
 			#phy-cells = <0>;
 			reg = <0x188>;
 			clocks = <&cru SCLK_OTGPHY1>;
diff --git a/arch/arm/boot/dts/rk3188-radxarock.dts b/arch/arm/boot/dts/rk3188-radxarock.dts
index 0b6924c..1da46d1 100644
--- a/arch/arm/boot/dts/rk3188-radxarock.dts
+++ b/arch/arm/boot/dts/rk3188-radxarock.dts
@@ -41,6 +41,7 @@
  */
 
 /dts-v1/;
+#include <dt-bindings/input/input.h>
 #include "rk3188.dtsi"
 
 / {
@@ -54,13 +55,11 @@
 
 	gpio-keys {
 		compatible = "gpio-keys";
-		#address-cells = <1>;
-		#size-cells = <0>;
 		autorepeat;
 
-		button@0 {
+		power {
 			gpios = <&gpio0 4 GPIO_ACTIVE_LOW>;
-			linux,code = <116>;
+			linux,code = <KEY_POWER>;
 			label = "GPIO Key Power";
 			linux,input-type = <1>;
 			wakeup-source;
diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi
index 9271833..31f81b2 100644
--- a/arch/arm/boot/dts/rk3188.dtsi
+++ b/arch/arm/boot/dts/rk3188.dtsi
@@ -154,7 +154,7 @@
 		clocks = <&cru PCLK_EFUSE>;
 		clock-names = "pclk_efuse";
 
-		cpu_leakage: cpu_leakage {
+		cpu_leakage: cpu_leakage@17 {
 			reg = <0x17 0x1>;
 		};
 	};
@@ -166,7 +166,7 @@
 		#size-cells = <0>;
 		status = "disabled";
 
-		usbphy0: usb-phy0 {
+		usbphy0: usb-phy@10c {
 			#phy-cells = <0>;
 			reg = <0x10c>;
 			clocks = <&cru SCLK_OTGPHY0>;
@@ -174,7 +174,7 @@
 			#clock-cells = <0>;
 		};
 
-		usbphy1: usb-phy1 {
+		usbphy1: usb-phy@11c {
 			#phy-cells = <0>;
 			reg = <0x11c>;
 			clocks = <&cru SCLK_OTGPHY1>;
diff --git a/arch/arm/boot/dts/rk3228-evb.dts b/arch/arm/boot/dts/rk3228-evb.dts
index e3898b8..5956e82 100644
--- a/arch/arm/boot/dts/rk3228-evb.dts
+++ b/arch/arm/boot/dts/rk3228-evb.dts
@@ -53,7 +53,6 @@
 };
 
 &emmc {
-	broken-cd;
 	cap-mmc-highspeed;
 	mmc-ddr-1_8v;
 	disable-wp;
@@ -61,6 +60,13 @@
 	status = "okay";
 };
 
+&tsadc {
+	status = "okay";
+
+	rockchip,hw-tshut-mode = <0>; /* tshut mode 0:CRU 1:GPIO */
+	rockchip,hw-tshut-polarity = <1>; /* tshut polarity 0:LOW 1:HIGH */
+};
+
 &uart2 {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/rk3228.dtsi b/arch/arm/boot/dts/rk3228.dtsi
index 4dae42a..e23a22e 100644
--- a/arch/arm/boot/dts/rk3228.dtsi
+++ b/arch/arm/boot/dts/rk3228.dtsi
@@ -43,6 +43,7 @@
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/pinctrl/rockchip.h>
 #include <dt-bindings/clock/rk3228-cru.h>
+#include <dt-bindings/thermal/thermal.h>
 #include "skeleton.dtsi"
 
 / {
@@ -69,6 +70,7 @@
 				/* KHz    uV */
 				 816000 1000000
 			>;
+			#cooling-cells = <2>; /* min followed by max */
 			clock-latency = <40000>;
 			clocks = <&cru ARMCLK>;
 		};
@@ -185,6 +187,58 @@
 		status = "disabled";
 	};
 
+	i2c0: i2c@11050000 {
+		compatible = "rockchip,rk3228-i2c";
+		reg = <0x11050000 0x1000>;
+		interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "i2c";
+		clocks = <&cru PCLK_I2C0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&i2c0_xfer>;
+		status = "disabled";
+	};
+
+	i2c1: i2c@11060000 {
+		compatible = "rockchip,rk3228-i2c";
+		reg = <0x11060000 0x1000>;
+		interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "i2c";
+		clocks = <&cru PCLK_I2C1>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&i2c1_xfer>;
+		status = "disabled";
+	};
+
+	i2c2: i2c@11070000 {
+		compatible = "rockchip,rk3228-i2c";
+		reg = <0x11070000 0x1000>;
+		interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "i2c";
+		clocks = <&cru PCLK_I2C2>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&i2c2_xfer>;
+		status = "disabled";
+	};
+
+	i2c3: i2c@11080000 {
+		compatible = "rockchip,rk3228-i2c";
+		reg = <0x11080000 0x1000>;
+		interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "i2c";
+		clocks = <&cru PCLK_I2C3>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&i2c3_xfer>;
+		status = "disabled";
+	};
+
 	pwm0: pwm@110b0000 {
 		compatible = "rockchip,rk3288-pwm";
 		reg = <0x110b0000 0x10>;
@@ -247,6 +301,63 @@
 		assigned-clock-rates = <594000000>;
 	};
 
+	thermal-zones {
+		cpu_thermal: cpu-thermal {
+			polling-delay-passive = <100>; /* milliseconds */
+			polling-delay = <5000>; /* milliseconds */
+
+			thermal-sensors = <&tsadc 0>;
+
+			trips {
+				cpu_alert0: cpu_alert0 {
+					temperature = <70000>; /* millicelsius */
+					hysteresis = <2000>; /* millicelsius */
+					type = "passive";
+				};
+				cpu_alert1: cpu_alert1 {
+					temperature = <75000>; /* millicelsius */
+					hysteresis = <2000>; /* millicelsius */
+					type = "passive";
+				};
+				cpu_crit: cpu_crit {
+					temperature = <90000>; /* millicelsius */
+					hysteresis = <2000>; /* millicelsius */
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				map0 {
+					trip = <&cpu_alert0>;
+					cooling-device =
+						<&cpu0 THERMAL_NO_LIMIT 6>;
+				};
+				map1 {
+					trip = <&cpu_alert1>;
+					cooling-device =
+						<&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+			};
+		};
+	};
+
+	tsadc: tsadc@11150000 {
+		compatible = "rockchip,rk3228-tsadc";
+		reg = <0x11150000 0x100>;
+		interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cru SCLK_TSADC>, <&cru PCLK_TSADC>;
+		clock-names = "tsadc", "apb_pclk";
+		resets = <&cru SRST_TSADC>;
+		reset-names = "tsadc-apb";
+		pinctrl-names = "init", "default", "sleep";
+		pinctrl-0 = <&otp_gpio>;
+		pinctrl-1 = <&otp_out>;
+		pinctrl-2 = <&otp_gpio>;
+		#thermal-sensor-cells = <0>;
+		rockchip,hw-tshut-temp = <95000>;
+		status = "disabled";
+	};
+
 	emmc: dwmmc@30020000 {
 		compatible = "rockchip,rk3288-dw-mshc";
 		reg = <0x30020000 0x4000>;
@@ -370,6 +481,34 @@
 			};
 		};
 
+		i2c0 {
+			i2c0_xfer: i2c0-xfer {
+				rockchip,pins = <0 0 RK_FUNC_1 &pcfg_pull_none>,
+						<0 1 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+
+		i2c1 {
+			i2c1_xfer: i2c1-xfer {
+				rockchip,pins = <0 2 RK_FUNC_1 &pcfg_pull_none>,
+						<0 3 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+
+		i2c2 {
+			i2c2_xfer: i2c2-xfer {
+				rockchip,pins = <2 20 RK_FUNC_1 &pcfg_pull_none>,
+						<2 21 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+
+		i2c3 {
+			i2c3_xfer: i2c3-xfer {
+				rockchip,pins = <0 6 RK_FUNC_1 &pcfg_pull_none>,
+						<0 7 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+
 		pwm0 {
 			pwm0_pin: pwm0-pin {
 				rockchip,pins = <3 21 RK_FUNC_1 &pcfg_pull_none>;
@@ -394,6 +533,16 @@
 			};
 		};
 
+		tsadc {
+			otp_gpio: otp-gpio {
+				rockchip,pins = <0 24 RK_FUNC_GPIO &pcfg_pull_none>;
+			};
+
+			otp_out: otp-out {
+				rockchip,pins = <0 24 RK_FUNC_2 &pcfg_pull_none>;
+			};
+		};
+
 		uart0 {
 			uart0_xfer: uart0-xfer {
 				rockchip,pins = <2 26 RK_FUNC_1 &pcfg_pull_none>,
diff --git a/arch/arm/boot/dts/rk3288-evb.dtsi b/arch/arm/boot/dts/rk3288-evb.dtsi
index 78d47f7..963365d 100644
--- a/arch/arm/boot/dts/rk3288-evb.dtsi
+++ b/arch/arm/boot/dts/rk3288-evb.dtsi
@@ -38,6 +38,7 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#include <dt-bindings/input/input.h>
 #include <dt-bindings/pwm/pwm.h>
 #include "rk3288.dtsi"
 
@@ -98,16 +99,14 @@
 
 	gpio-keys {
 		compatible = "gpio-keys";
-		#address-cells = <1>;
-		#size-cells = <0>;
 		autorepeat;
 
 		pinctrl-names = "default";
 		pinctrl-0 = <&pwrbtn>;
 
-		button@0 {
+		power {
 			gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
-			linux,code = <116>;
+			linux,code = <KEY_POWER>;
 			label = "GPIO Key Power";
 			linux,input-type = <1>;
 			wakeup-source;
@@ -172,7 +171,6 @@
 };
 
 &emmc {
-	broken-cd;
 	bus-width = <8>;
 	cap-mmc-highspeed;
 	disable-wp;
diff --git a/arch/arm/boot/dts/rk3288-firefly.dtsi b/arch/arm/boot/dts/rk3288-firefly.dtsi
index 98c586a4..d6cf9ad 100644
--- a/arch/arm/boot/dts/rk3288-firefly.dtsi
+++ b/arch/arm/boot/dts/rk3288-firefly.dtsi
@@ -40,6 +40,7 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#include <dt-bindings/input/input.h>
 #include "rk3288.dtsi"
 
 / {
@@ -87,14 +88,12 @@
 
 	keys: gpio-keys {
 		compatible = "gpio-keys";
-		#address-cells = <1>;
-		#size-cells = <0>;
 
-		button@0 {
+		power {
 			wakeup-source;
 			gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
 			label = "GPIO Power";
-			linux,code = <116>;
+			linux,code = <KEY_POWER>;
 			pinctrl-names = "default";
 			pinctrl-0 = <&pwr_key>;
 		};
@@ -208,7 +207,6 @@
 };
 
 &emmc {
-	broken-cd;
 	bus-width = <8>;
 	cap-mmc-highspeed;
 	disable-wp;
@@ -509,7 +507,6 @@
 };
 
 &sdio0 {
-	broken-cd;
 	bus-width = <4>;
 	disable-wp;
 	non-removable;
diff --git a/arch/arm/boot/dts/rk3288-miqi.dts b/arch/arm/boot/dts/rk3288-miqi.dts
new file mode 100644
index 0000000..8643103
--- /dev/null
+++ b/arch/arm/boot/dts/rk3288-miqi.dts
@@ -0,0 +1,472 @@
+/*
+ * Copyright (c) 2016 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include <dt-bindings/input/input.h>
+#include "rk3288.dtsi"
+
+/ {
+	model = "mqmaker MiQi";
+	compatible = "mqmaker,miqi", "rockchip,rk3288";
+
+	chosen {
+		stdout-path = "serial2:115200n8";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0 0x80000000>;
+	};
+
+	ext_gmac: external-gmac-clock {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <125000000>;
+		clock-output-names = "ext_gmac";
+	};
+
+	io_domains: io-domains {
+		compatible = "rockchip,rk3288-io-voltage-domain";
+
+		audio-supply = <&vcca_33>;
+		flash0-supply = <&vcc_flash>;
+		flash1-supply = <&vcc_lan>;
+		gpio30-supply = <&vcc_io>;
+		gpio1830-supply = <&vcc_io>;
+		lcdc-supply = <&vcc_io>;
+		sdcard-supply = <&vccio_sd>;
+		wifi-supply = <&vcc_18>;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		work {
+			gpios = <&gpio7 4 GPIO_ACTIVE_LOW>;
+			label = "miqi:green:user";
+			linux,default-trigger = "default-on";
+			pinctrl-names = "default";
+			pinctrl-0 = <&led_ctl>;
+		};
+	};
+
+	vcc_flash: flash-regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc_flash";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		vin-supply = <&vcc_io>;
+	};
+
+	vcc_host: usb-host-regulator {
+		compatible = "regulator-fixed";
+		enable-active-high;
+		gpio = <&gpio0 14 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&host_vbus_drv>;
+		regulator-name = "vcc_host";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-always-on;
+		vin-supply = <&vcc_sys>;
+	};
+
+	vcc_sd: sdmmc-regulator {
+		compatible = "regulator-fixed";
+		gpio = <&gpio7 11 GPIO_ACTIVE_LOW>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&sdmmc_pwr>;
+		regulator-name = "vcc_sd";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		startup-delay-us = <100000>;
+		vin-supply = <&vcc_io>;
+	};
+
+	vcc_sys: vsys-regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc_sys";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-always-on;
+		regulator-boot-on;
+	};
+};
+
+&cpu0 {
+	cpu0-supply = <&vdd_cpu>;
+};
+
+&emmc {
+	bus-width = <8>;
+	cap-mmc-highspeed;
+	disable-wp;
+	non-removable;
+	num-slots = <1>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&emmc_clk>, <&emmc_cmd>, <&emmc_pwr>, <&emmc_bus8>;
+	vmmc-supply = <&vcc_io>;
+	vqmmc-supply = <&vcc_flash>;
+	status = "okay";
+};
+
+&gmac {
+	assigned-clocks = <&cru SCLK_MAC>;
+	assigned-clock-parents = <&ext_gmac>;
+	clock_in_out = "input";
+	pinctrl-names = "default";
+	pinctrl-0 = <&rgmii_pins>, <&phy_rst>, <&phy_pmeb>, <&phy_int>;
+	phy-supply = <&vcc_lan>;
+	phy-mode = "rgmii";
+	snps,reset-active-low;
+	snps,reset-delays-us = <0 10000 1000000>;
+	snps,reset-gpio = <&gpio4 8 GPIO_ACTIVE_LOW>;
+	tx_delay = <0x30>;
+	rx_delay = <0x10>;
+	status = "ok";
+};
+
+&hdmi {
+	ddc-i2c-bus = <&i2c5>;
+	status = "okay";
+};
+
+&i2c0 {
+	clock-frequency = <400000>;
+	status = "okay";
+
+	vdd_cpu: syr827@40 {
+		compatible = "silergy,syr827";
+		fcs,suspend-voltage-selector = <1>;
+		reg = <0x40>;
+		regulator-name = "vdd_cpu";
+		regulator-min-microvolt = <850000>;
+		regulator-max-microvolt = <1350000>;
+		regulator-always-on;
+		regulator-boot-on;
+		regulator-enable-ramp-delay = <300>;
+		regulator-ramp-delay = <8000>;
+		vin-supply = <&vcc_sys>;
+	};
+
+	vdd_gpu: syr828@41 {
+		compatible = "silergy,syr828";
+		fcs,suspend-voltage-selector = <1>;
+		reg = <0x41>;
+		regulator-name = "vdd_gpu";
+		regulator-min-microvolt = <850000>;
+		regulator-max-microvolt = <1350000>;
+		regulator-always-on;
+		vin-supply = <&vcc_sys>;
+	};
+
+	hym8563: hym8563@51 {
+		compatible = "haoyu,hym8563";
+		reg = <0x51>;
+		#clock-cells = <0>;
+		clock-frequency = <32768>;
+		clock-output-names = "xin32k";
+	};
+
+	act8846: act8846@5a {
+		compatible = "active-semi,act8846";
+		reg = <0x5a>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pmic_vsel>;
+		system-power-controller;
+
+		vp1-supply = <&vcc_sys>;
+		vp2-supply = <&vcc_sys>;
+		vp3-supply = <&vcc_sys>;
+		vp4-supply = <&vcc_sys>;
+		inl1-supply = <&vcc_sys>;
+		inl2-supply = <&vcc_sys>;
+		inl3-supply = <&vcc_20>;
+
+		regulators {
+			vcc_ddr: REG1 {
+				regulator-name = "vcc_ddr";
+				regulator-always-on;
+			};
+
+			vcc_io: REG2 {
+				regulator-name = "vcc_io";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			vdd_log: REG3 {
+				regulator-name = "vdd_log";
+				regulator-min-microvolt = <1100000>;
+				regulator-max-microvolt = <1100000>;
+				regulator-always-on;
+			};
+
+			vcc_20: REG4 {
+				regulator-name = "vcc_20";
+				regulator-min-microvolt = <2000000>;
+				regulator-max-microvolt = <2000000>;
+				regulator-always-on;
+			};
+
+			vccio_sd: REG5 {
+				regulator-name = "vccio_sd";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			vdd10_lcd: REG6 {
+				regulator-name = "vdd10_lcd";
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1000000>;
+				regulator-always-on;
+			};
+
+			vcca_18: REG7 {
+				regulator-name = "vcca_18";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+			};
+
+			vcca_33: REG8 {
+				regulator-name = "vcca_33";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+			};
+
+			vcc_lan: REG9 {
+				regulator-name = "vcc_lan";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+			};
+
+			vdd_10: REG10 {
+				regulator-name = "vdd_10";
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1000000>;
+				regulator-always-on;
+			};
+
+			vcc_18: REG11 {
+				regulator-name = "vcc_18";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
+			};
+
+			vcc18_lcd: REG12 {
+				regulator-name = "vcc18_lcd";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
+			};
+		};
+	};
+};
+
+&i2c1 {
+	status = "okay";
+};
+
+&i2c2 {
+	status = "okay";
+};
+
+&i2c4 {
+	status = "okay";
+};
+
+&i2c5 {
+	status = "okay";
+};
+
+&pinctrl {
+	pcfg_output_high: pcfg-output-high {
+		output-high;
+	};
+
+	pcfg_output_low: pcfg-output-low {
+		output-low;
+	};
+
+	pcfg_pull_up_drv_12ma: pcfg-pull-up-drv-12ma {
+		bias-pull-up;
+		drive-strength = <12>;
+	};
+
+	act8846 {
+		pmic_int: pmic-int {
+			rockchip,pins = <0 4 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+
+		pmic_sleep: pmic-sleep {
+			rockchip,pins = <0 0 RK_FUNC_GPIO &pcfg_output_low>;
+		};
+
+		pmic_vsel: pmic-vsel {
+			rockchip,pins = <7 1 RK_FUNC_GPIO &pcfg_output_low>;
+		};
+	};
+
+	gmac {
+		phy_int: phy-int {
+			rockchip,pins = <0 9 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+
+		phy_pmeb: phy-pmeb {
+			rockchip,pins = <0 8 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+
+		phy_rst: phy-rst {
+			rockchip,pins = <4 8 RK_FUNC_GPIO &pcfg_output_high>;
+		};
+	};
+
+	leds {
+		led_ctl: led-ctl {
+			rockchip,pins = <7 4 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	sdmmc {
+		/*
+		 * Default drive strength isn't enough to achieve even
+		 * high-speed mode on firefly board so bump up to 12ma.
+		 */
+		sdmmc_bus4: sdmmc-bus4 {
+			rockchip,pins = <6 16 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
+					<6 17 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
+					<6 18 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
+					<6 19 RK_FUNC_1 &pcfg_pull_up_drv_12ma>;
+		};
+
+		sdmmc_clk: sdmmc-clk {
+			rockchip,pins = <6 20 RK_FUNC_1 &pcfg_pull_none_12ma>;
+		};
+
+		sdmmc_cmd: sdmmc-cmd {
+			rockchip,pins = <6 21 RK_FUNC_1 &pcfg_pull_up_drv_12ma>;
+		};
+
+		sdmmc_pwr: sdmmc-pwr {
+			rockchip,pins = <7 11 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	usb_host {
+		host_vbus_drv: host-vbus-drv {
+			rockchip,pins = <0 14 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+};
+
+&saradc {
+	vref-supply = <&vcc_18>;
+	status = "okay";
+};
+
+&sdmmc {
+	bus-width = <4>;
+	cap-mmc-highspeed;
+	cap-sd-highspeed;
+	card-detect-delay = <200>;
+	disable-wp;
+	num-slots = <1>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&sdmmc_clk>, <&sdmmc_cmd>, <&sdmmc_cd>, <&sdmmc_bus4>;
+	vmmc-supply = <&vcc_sd>;
+	vqmmc-supply = <&vccio_sd>;
+	status = "okay";
+};
+
+&tsadc {
+	rockchip,hw-tshut-mode = <0>;
+	rockchip,hw-tshut-polarity = <0>;
+	status = "okay";
+};
+
+&uart2 {
+	status = "okay";
+};
+
+&uart3 {
+	status = "okay";
+};
+
+&usbphy {
+	status = "okay";
+};
+
+&usb_host1 {
+	status = "okay";
+};
+
+&usb_otg {
+	/*
+	 * The otg controller is the only system power source,
+	 * so needs to always stay in device mode.
+	 */
+	dr_mode = "peripheral";
+	status = "okay";
+};
+
+&vopb {
+	status = "okay";
+};
+
+&vopb_mmu {
+	status = "okay";
+};
+
+&vopl {
+	status = "okay";
+};
+
+&vopl_mmu {
+	status = "okay";
+};
+
+&wdt {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/rk3288-popmetal.dts b/arch/arm/boot/dts/rk3288-popmetal.dts
index 2ff9689..720717b 100644
--- a/arch/arm/boot/dts/rk3288-popmetal.dts
+++ b/arch/arm/boot/dts/rk3288-popmetal.dts
@@ -41,7 +41,7 @@
  */
 
 /dts-v1/;
-
+#include <dt-bindings/input/input.h>
 #include "rk3288.dtsi"
 
 / {
@@ -62,16 +62,14 @@
 
 	gpio-keys {
 		compatible = "gpio-keys";
-		#address-cells = <1>;
-		#size-cells = <0>;
 		autorepeat;
 
 		pinctrl-names = "default";
 		pinctrl-0 = <&pwrbtn>;
 
-		button@0 {
+		power {
 			gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
-			linux,code = <116>;
+			linux,code = <KEY_POWER>;
 			label = "GPIO Key Power";
 			linux,input-type = <1>;
 			wakeup-source;
@@ -162,7 +160,6 @@
 };
 
 &emmc {
-	broken-cd;
 	bus-width = <8>;
 	cap-mmc-highspeed;
 	disable-wp;
diff --git a/arch/arm/boot/dts/rk3288-r89.dts b/arch/arm/boot/dts/rk3288-r89.dts
index 510a1d0..4b8a8ad 100644
--- a/arch/arm/boot/dts/rk3288-r89.dts
+++ b/arch/arm/boot/dts/rk3288-r89.dts
@@ -41,6 +41,7 @@
  */
 
 /dts-v1/;
+#include <dt-bindings/input/input.h>
 #include <dt-bindings/pwm/pwm.h>
 #include "rk3288.dtsi"
 
@@ -61,16 +62,14 @@
 
 	gpio-keys {
 		compatible = "gpio-keys";
-		#address-cells = <1>;
-		#size-cells = <0>;
 		autorepeat;
 
 		pinctrl-names = "default";
 		pinctrl-0 = <&pwrbtn>;
 
-		button@0 {
+		power {
 			gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
-			linux,code = <116>;
+			linux,code = <KEY_POWER>;
 			label = "GPIO Key Power";
 			linux,input-type = <1>;
 			wakeup-source;
diff --git a/arch/arm/boot/dts/rk3288-thermal.dtsi b/arch/arm/boot/dts/rk3288-thermal.dtsi
deleted file mode 100644
index 651b962..0000000
--- a/arch/arm/boot/dts/rk3288-thermal.dtsi
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Device Tree Source for RK3288 SoC thermal
- *
- * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <dt-bindings/thermal/thermal.h>
-
-reserve_thermal: reserve_thermal {
-	polling-delay-passive = <1000>; /* milliseconds */
-	polling-delay = <5000>; /* milliseconds */
-
-	thermal-sensors = <&tsadc 0>;
-};
-
-cpu_thermal: cpu_thermal {
-	polling-delay-passive = <100>; /* milliseconds */
-	polling-delay = <5000>; /* milliseconds */
-
-	thermal-sensors = <&tsadc 1>;
-
-	trips {
-		cpu_alert0: cpu_alert0 {
-			temperature = <70000>; /* millicelsius */
-			hysteresis = <2000>; /* millicelsius */
-			type = "passive";
-		};
-		cpu_alert1: cpu_alert1 {
-			temperature = <75000>; /* millicelsius */
-			hysteresis = <2000>; /* millicelsius */
-			type = "passive";
-		};
-		cpu_crit: cpu_crit {
-			temperature = <90000>; /* millicelsius */
-			hysteresis = <2000>; /* millicelsius */
-			type = "critical";
-		};
-	};
-
-	cooling-maps {
-		map0 {
-			trip = <&cpu_alert0>;
-			cooling-device =
-				<&cpu0 THERMAL_NO_LIMIT 6>;
-		};
-		map1 {
-			trip = <&cpu_alert1>;
-			cooling-device =
-				<&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
-		};
-	};
-};
-
-gpu_thermal: gpu_thermal {
-	polling-delay-passive = <100>; /* milliseconds */
-	polling-delay = <5000>; /* milliseconds */
-
-	thermal-sensors = <&tsadc 2>;
-
-	trips {
-		gpu_alert0: gpu_alert0 {
-			temperature = <70000>; /* millicelsius */
-			hysteresis = <2000>; /* millicelsius */
-			type = "passive";
-		};
-		gpu_crit: gpu_crit {
-			temperature = <90000>; /* millicelsius */
-			hysteresis = <2000>; /* millicelsius */
-			type = "critical";
-		};
-	};
-
-	cooling-maps {
-		map0 {
-			trip = <&gpu_alert0>;
-			cooling-device =
-				<&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
-		};
-	};
-};
diff --git a/arch/arm/boot/dts/rk3288-veyron-chromebook.dtsi b/arch/arm/boot/dts/rk3288-veyron-chromebook.dtsi
index 610769d..2958c36 100644
--- a/arch/arm/boot/dts/rk3288-veyron-chromebook.dtsi
+++ b/arch/arm/boot/dts/rk3288-veyron-chromebook.dtsi
@@ -54,6 +54,50 @@
 		i2c20 = &i2c_tunnel;
 	};
 
+	backlight: backlight {
+		compatible = "pwm-backlight";
+		brightness-levels = <
+			  0   1   2   3   4   5   6   7
+			  8   9  10  11  12  13  14  15
+			 16  17  18  19  20  21  22  23
+			 24  25  26  27  28  29  30  31
+			 32  33  34  35  36  37  38  39
+			 40  41  42  43  44  45  46  47
+			 48  49  50  51  52  53  54  55
+			 56  57  58  59  60  61  62  63
+			 64  65  66  67  68  69  70  71
+			 72  73  74  75  76  77  78  79
+			 80  81  82  83  84  85  86  87
+			 88  89  90  91  92  93  94  95
+			 96  97  98  99 100 101 102 103
+			104 105 106 107 108 109 110 111
+			112 113 114 115 116 117 118 119
+			120 121 122 123 124 125 126 127
+			128 129 130 131 132 133 134 135
+			136 137 138 139 140 141 142 143
+			144 145 146 147 148 149 150 151
+			152 153 154 155 156 157 158 159
+			160 161 162 163 164 165 166 167
+			168 169 170 171 172 173 174 175
+			176 177 178 179 180 181 182 183
+			184 185 186 187 188 189 190 191
+			192 193 194 195 196 197 198 199
+			200 201 202 203 204 205 206 207
+			208 209 210 211 212 213 214 215
+			216 217 218 219 220 221 222 223
+			224 225 226 227 228 229 230 231
+			232 233 234 235 236 237 238 239
+			240 241 242 243 244 245 246 247
+			248 249 250 251 252 253 254 255>;
+		default-brightness-level = <128>;
+		enable-gpios = <&gpio7 2 GPIO_ACTIVE_HIGH>;
+		backlight-boot-off;
+		pinctrl-names = "default";
+		pinctrl-0 = <&bl_en>;
+		pwms = <&pwm0 0 1000000 0>;
+		pwm-delay-us = <10000>;
+	};
+
 	gpio-charger {
 		compatible = "gpio-charger";
 		charger-type = "mains";
@@ -62,6 +106,21 @@
 		pinctrl-0 = <&ac_present_ap>;
 	};
 
+	panel: panel {
+		compatible ="innolux,n116bge", "simple-panel";
+		status = "okay";
+		power-supply = <&vcc33_lcd>;
+		backlight = <&backlight>;
+
+		ports {
+			panel_in: port {
+				panel_in_edp: endpoint {
+					remote-endpoint = <&edp_out_panel>;
+				};
+			};
+		};
+	};
+
 	/* A non-regulated voltage from power supply or battery */
 	vccsys: vccsys {
 		compatible = "regulator-fixed";
@@ -103,6 +162,29 @@
 	};
 };
 
+&edp {
+	status = "okay";
+
+	pinctrl-names = "default";
+	pinctrl-0 = <&edp_hpd>;
+
+	ports {
+		edp_out: port@1 {
+			reg = <1>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			edp_out_panel: endpoint {
+				reg = <0>;
+				remote-endpoint = <&panel_in_edp>;
+			};
+		};
+	};
+};
+
+&edp_phy {
+	status = "okay";
+};
+
 &gpio_keys {
 	pinctrl-0 = <&pwr_key_l &ap_lid_int_l>;
 	lid {
@@ -115,6 +197,10 @@
 	};
 };
 
+&pwm0 {
+	status = "okay";
+};
+
 &rk808 {
 	vcc11-supply = <&vcc_5v>;
 
@@ -168,6 +254,14 @@
 	};
 };
 
+&vopl {
+	status = "okay";
+};
+
+&vopl_mmu {
+	status = "okay";
+};
+
 &pinctrl {
 	pinctrl-0 = <
 		/* Common for sleep and wake, but no owners */
@@ -184,6 +278,12 @@
 		&suspend_l_sleep
 	>;
 
+	backlight {
+		bl_en: bl-en {
+			rockchip,pins = <7 2 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
 	buttons {
 		ap_lid_int_l: ap-lid-int-l {
 			rockchip,pins = <0 6 RK_FUNC_GPIO &pcfg_pull_up>;
diff --git a/arch/arm/boot/dts/rk3288-veyron-jaq.dts b/arch/arm/boot/dts/rk3288-veyron-jaq.dts
index c2f52cf..3748abf 100644
--- a/arch/arm/boot/dts/rk3288-veyron-jaq.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-jaq.dts
@@ -61,6 +61,7 @@
 		pinctrl-names = "default";
 		pinctrl-0 = <&lcd_enable_h>;
 		regulator-name = "panel_regulator";
+		startup-delay-us = <100000>;
 		vin-supply = <&vcc33_sys>;
 	};
 
@@ -88,6 +89,48 @@
 	};
 };
 
+&backlight {
+	/* Jaq panel PWM must be >= 3%, so start non-zero brightness at 8 */
+	brightness-levels = <
+		  0
+		  8   9  10  11  12  13  14  15
+		 16  17  18  19  20  21  22  23
+		 24  25  26  27  28  29  30  31
+		 32  33  34  35  36  37  38  39
+		 40  41  42  43  44  45  46  47
+		 48  49  50  51  52  53  54  55
+		 56  57  58  59  60  61  62  63
+		 64  65  66  67  68  69  70  71
+		 72  73  74  75  76  77  78  79
+		 80  81  82  83  84  85  86  87
+		 88  89  90  91  92  93  94  95
+		 96  97  98  99 100 101 102 103
+		104 105 106 107 108 109 110 111
+		112 113 114 115 116 117 118 119
+		120 121 122 123 124 125 126 127
+		128 129 130 131 132 133 134 135
+		136 137 138 139 140 141 142 143
+		144 145 146 147 148 149 150 151
+		152 153 154 155 156 157 158 159
+		160 161 162 163 164 165 166 167
+		168 169 170 171 172 173 174 175
+		176 177 178 179 180 181 182 183
+		184 185 186 187 188 189 190 191
+		192 193 194 195 196 197 198 199
+		200 201 202 203 204 205 206 207
+		208 209 210 211 212 213 214 215
+		216 217 218 219 220 221 222 223
+		224 225 226 227 228 229 230 231
+		232 233 234 235 236 237 238 239
+		240 241 242 243 244 245 246 247
+		248 249 250 251 252 253 254 255>;
+	power-supply = <&backlight_regulator>;
+};
+
+&panel {
+	power-supply = <&panel_regulator>;
+};
+
 &rk808 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pmic_int_l &dvs_1 &dvs_2>;
@@ -142,12 +185,6 @@
 		};
 	};
 
-	edp {
-		edp_hpd: edp_hpd {
-			rockchip,pins = <7 11 RK_FUNC_2 &pcfg_pull_down>;
-		};
-	};
-
 	hdmi {
 		vcc50_hdmi_en: vcc50-hdmi-en {
 			rockchip,pins = <5 19 RK_FUNC_GPIO &pcfg_pull_none>;
diff --git a/arch/arm/boot/dts/rk3288-veyron-jerry.dts b/arch/arm/boot/dts/rk3288-veyron-jerry.dts
index 60bd6e9..f6b2eaa 100644
--- a/arch/arm/boot/dts/rk3288-veyron-jerry.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-jerry.dts
@@ -60,6 +60,7 @@
 		pinctrl-names = "default";
 		pinctrl-0 = <&lcd_enable_h>;
 		regulator-name = "panel_regulator";
+		startup-delay-us = <100000>;
 		vin-supply = <&vcc33_sys>;
 	};
 
@@ -87,6 +88,14 @@
 	};
 };
 
+&backlight {
+	power-supply = <&backlight_regulator>;
+};
+
+&panel {
+	power-supply= <&panel_regulator>;
+};
+
 &rk808 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pmic_int_l>;
diff --git a/arch/arm/boot/dts/rk3288-veyron-minnie.dts b/arch/arm/boot/dts/rk3288-veyron-minnie.dts
index 699beb0..f72d616d 100644
--- a/arch/arm/boot/dts/rk3288-veyron-minnie.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-minnie.dts
@@ -70,6 +70,7 @@
 		pinctrl-names = "default";
 		pinctrl-0 = <&lcd_enable_h>;
 		regulator-name = "panel_regulator";
+		startup-delay-us = <100000>;
 		vin-supply = <&vcc33_sys>;
 	};
 
@@ -86,6 +87,44 @@
 	};
 };
 
+&backlight {
+	/* Minnie panel PWM must be >= 1%, so start non-zero brightness at 3 */
+	brightness-levels = <
+			  0   3   4   5   6   7
+			  8   9  10  11  12  13  14  15
+			 16  17  18  19  20  21  22  23
+			 24  25  26  27  28  29  30  31
+			 32  33  34  35  36  37  38  39
+			 40  41  42  43  44  45  46  47
+			 48  49  50  51  52  53  54  55
+			 56  57  58  59  60  61  62  63
+			 64  65  66  67  68  69  70  71
+			 72  73  74  75  76  77  78  79
+			 80  81  82  83  84  85  86  87
+			 88  89  90  91  92  93  94  95
+			 96  97  98  99 100 101 102 103
+			104 105 106 107 108 109 110 111
+			112 113 114 115 116 117 118 119
+			120 121 122 123 124 125 126 127
+			128 129 130 131 132 133 134 135
+			136 137 138 139 140 141 142 143
+			144 145 146 147 148 149 150 151
+			152 153 154 155 156 157 158 159
+			160 161 162 163 164 165 166 167
+			168 169 170 171 172 173 174 175
+			176 177 178 179 180 181 182 183
+			184 185 186 187 188 189 190 191
+			192 193 194 195 196 197 198 199
+			200 201 202 203 204 205 206 207
+			208 209 210 211 212 213 214 215
+			216 217 218 219 220 221 222 223
+			224 225 226 227 228 229 230 231
+			232 233 234 235 236 237 238 239
+			240 241 242 243 244 245 246 247
+			248 249 250 251 252 253 254 255>;
+	power-supply = <&backlight_regulator>;
+};
+
 &emmc {
 	/delete-property/mmc-hs200-1_8v;
 };
@@ -135,6 +174,11 @@
 	};
 };
 
+&panel {
+	compatible = "auo,b101ean01", "simple-panel";
+	power-supply= <&panel_regulator>;
+};
+
 &rk808 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pmic_int_l &dvs_1 &dvs_2>;
diff --git a/arch/arm/boot/dts/rk3288-veyron-pinky.dts b/arch/arm/boot/dts/rk3288-veyron-pinky.dts
index 94b56e3..d44351e 100644
--- a/arch/arm/boot/dts/rk3288-veyron-pinky.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-pinky.dts
@@ -65,6 +65,13 @@
 	pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8 &emmc_reset>;
 };
 
+&edp {
+	/delete-property/pinctrl-names;
+	/delete-property/pinctrl-0;
+
+	force-hpd;
+};
+
 &gpio_keys {
 	pinctrl-0 = <&pwr_key_h &ap_lid_int_l>;
 
diff --git a/arch/arm/boot/dts/rk3288-veyron-speedy.dts b/arch/arm/boot/dts/rk3288-veyron-speedy.dts
index b34a7b5..a0d033f 100644
--- a/arch/arm/boot/dts/rk3288-veyron-speedy.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-speedy.dts
@@ -61,6 +61,7 @@
 		pinctrl-names = "default";
 		pinctrl-0 = <&lcd_enable_h>;
 		regulator-name = "panel_regulator";
+		startup-delay-us = <100000>;
 		vin-supply = <&vcc33_sys>;
 	};
 
@@ -88,6 +89,10 @@
 	};
 };
 
+&backlight {
+	power-supply = <&backlight_regulator>;
+};
+
 &cpu_alert0 {
 	temperature = <65000>;
 };
@@ -96,6 +101,17 @@
 	temperature = <70000>;
 };
 
+&edp {
+	/delete-property/pinctrl-names;
+	/delete-property/pinctrl-0;
+
+	force-hpd;
+};
+
+&panel {
+	power-supply= <&panel_regulator>;
+};
+
 &rk808 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pmic_int_l>;
diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi
index 412809c..b2557bf 100644
--- a/arch/arm/boot/dts/rk3288-veyron.dtsi
+++ b/arch/arm/boot/dts/rk3288-veyron.dtsi
@@ -141,12 +141,27 @@
 
 &cpu0 {
 	cpu0-supply = <&vdd_cpu>;
+	operating-points = <
+		/* KHz    uV */
+		1800000 1400000
+		1704000 1350000
+		1608000 1300000
+		1512000 1250000
+		1416000 1200000
+		1200000 1100000
+		1008000 1050000
+		 816000 1000000
+		 696000  950000
+		 600000  900000
+		 408000  900000
+		 216000  900000
+		 126000  900000
+	>;
 };
 
 &emmc {
 	status = "okay";
 
-	broken-cd;
 	bus-width = <8>;
 	cap-mmc-highspeed;
 	rockchip,default-sample-phase = <158>;
@@ -347,7 +362,6 @@
 &sdio0 {
 	status = "okay";
 
-	broken-cd;
 	bus-width = <4>;
 	cap-sd-highspeed;
 	cap-sdio-irq;
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index 31f7e20..3b44ef3 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -445,7 +445,78 @@
 	};
 
 	thermal-zones {
-		#include "rk3288-thermal.dtsi"
+		reserve_thermal: reserve_thermal {
+			polling-delay-passive = <1000>; /* milliseconds */
+			polling-delay = <5000>; /* milliseconds */
+
+			thermal-sensors = <&tsadc 0>;
+		};
+
+		cpu_thermal: cpu_thermal {
+			polling-delay-passive = <100>; /* milliseconds */
+			polling-delay = <5000>; /* milliseconds */
+
+			thermal-sensors = <&tsadc 1>;
+
+			trips {
+				cpu_alert0: cpu_alert0 {
+					temperature = <70000>; /* millicelsius */
+					hysteresis = <2000>; /* millicelsius */
+					type = "passive";
+				};
+				cpu_alert1: cpu_alert1 {
+					temperature = <75000>; /* millicelsius */
+					hysteresis = <2000>; /* millicelsius */
+					type = "passive";
+				};
+				cpu_crit: cpu_crit {
+					temperature = <90000>; /* millicelsius */
+					hysteresis = <2000>; /* millicelsius */
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				map0 {
+					trip = <&cpu_alert0>;
+					cooling-device =
+						<&cpu0 THERMAL_NO_LIMIT 6>;
+				};
+				map1 {
+					trip = <&cpu_alert1>;
+					cooling-device =
+						<&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+			};
+		};
+
+		gpu_thermal: gpu_thermal {
+			polling-delay-passive = <100>; /* milliseconds */
+			polling-delay = <5000>; /* milliseconds */
+
+			thermal-sensors = <&tsadc 2>;
+
+			trips {
+				gpu_alert0: gpu_alert0 {
+					temperature = <70000>; /* millicelsius */
+					hysteresis = <2000>; /* millicelsius */
+					type = "passive";
+				};
+				gpu_crit: gpu_crit {
+					temperature = <90000>; /* millicelsius */
+					hysteresis = <2000>; /* millicelsius */
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				map0 {
+					trip = <&gpu_alert0>;
+					cooling-device =
+						<&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+			};
+		};
 	};
 
 	tsadc: tsadc@ff280000 {
@@ -659,7 +730,7 @@
 			 *	*_HDMI		HDMI
 			 *	*_MIPI_*	MIPI
 			 */
-			pd_vio {
+			pd_vio@RK3288_PD_VIO {
 				reg = <RK3288_PD_VIO>;
 				clocks = <&cru ACLK_IEP>,
 					 <&cru ACLK_ISP>,
@@ -692,7 +763,7 @@
 			 * Note: The following 3 are HEVC(H.265) clocks,
 			 * and on the ACLK_HEVC_NIU (NOC).
 			 */
-			pd_hevc {
+			pd_hevc@RK3288_PD_HEVC {
 				reg = <RK3288_PD_HEVC>;
 				clocks = <&cru ACLK_HEVC>,
 					 <&cru SCLK_HEVC_CABAC>,
@@ -704,7 +775,7 @@
 			 * (video endecoder & decoder) clocks that on the
 			 * ACLK_VCODEC_NIU and HCLK_VCODEC_NIU (NOC).
 			 */
-			pd_video {
+			pd_video@RK3288_PD_VIDEO {
 				reg = <RK3288_PD_VIDEO>;
 				clocks = <&cru ACLK_VCODEC>,
 					 <&cru HCLK_VCODEC>;
@@ -714,7 +785,7 @@
 			 * Note: ACLK_GPU is the GPU clock,
 			 * and on the ACLK_GPU_NIU (NOC).
 			 */
-			pd_gpu {
+			pd_gpu@RK3288_PD_GPU {
 				reg = <RK3288_PD_GPU>;
 				clocks = <&cru ACLK_GPU>;
 			};
@@ -745,8 +816,16 @@
 	};
 
 	grf: syscon@ff770000 {
-		compatible = "rockchip,rk3288-grf", "syscon";
+		compatible = "rockchip,rk3288-grf", "syscon", "simple-mfd";
 		reg = <0xff770000 0x1000>;
+
+		edp_phy: edp-phy {
+			compatible = "rockchip,rk3288-dp-phy";
+			clocks = <&cru SCLK_EDP_24M>;
+			clock-names = "24m";
+			#phy-cells = <0>;
+			status = "disabled";
+		};
 	};
 
 	wdt: watchdog@ff800000 {
@@ -765,7 +844,7 @@
 		clocks = <&cru HCLK_SPDIF8CH>, <&cru SCLK_SPDIF8CH>;
 		dmas = <&dmac_bus_s 3>;
 		dma-names = "tx";
-		interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
 		pinctrl-names = "default";
 		pinctrl-0 = <&spdif_tx>;
 		rockchip,grf = <&grf>;
@@ -775,7 +854,7 @@
 	i2s: i2s@ff890000 {
 		compatible = "rockchip,rk3288-i2s", "rockchip,rk3066-i2s";
 		reg = <0xff890000 0x10000>;
-		interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		dmas = <&dmac_bus_s 0>, <&dmac_bus_s 1>;
@@ -821,6 +900,12 @@
 				reg = <0>;
 				remote-endpoint = <&hdmi_in_vopb>;
 			};
+
+			vopb_out_edp: endpoint@1 {
+				reg = <1>;
+				remote-endpoint = <&edp_in_vopb>;
+			};
+
 			vopb_out_mipi: endpoint@2 {
 				reg = <2>;
 				remote-endpoint = <&mipi_in_vopb>;
@@ -858,6 +943,12 @@
 				reg = <0>;
 				remote-endpoint = <&hdmi_in_vopl>;
 			};
+
+			vopl_out_edp: endpoint@1 {
+				reg = <1>;
+				remote-endpoint = <&edp_in_vopl>;
+			};
+
 			vopl_out_mipi: endpoint@2 {
 				reg = <2>;
 				remote-endpoint = <&mipi_in_vopl>;
@@ -878,19 +969,16 @@
 	mipi_dsi: mipi@ff960000 {
 		compatible = "rockchip,rk3288-mipi-dsi", "snps,dw-mipi-dsi";
 		reg = <0xff960000 0x4000>;
-		interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&cru SCLK_MIPIDSI_24M>, <&cru PCLK_MIPI_DSI0>;
 		clock-names = "ref", "pclk";
+		power-domains = <&power RK3288_PD_VIO>;
 		rockchip,grf = <&grf>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		status = "disabled";
 
 		ports {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			reg = <1>;
-
 			mipi_in: port {
 				#address-cells = <1>;
 				#size-cells = <0>;
@@ -906,6 +994,38 @@
 		};
 	};
 
+	edp: dp@ff970000 {
+		compatible = "rockchip,rk3288-dp";
+		reg = <0xff970000 0x4000>;
+		interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cru SCLK_EDP>, <&cru PCLK_EDP_CTRL>;
+		clock-names = "dp", "pclk";
+		phys = <&edp_phy>;
+		phy-names = "dp";
+		resets = <&cru SRST_EDP>;
+		reset-names = "dp";
+		rockchip,grf = <&grf>;
+		status = "disabled";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			edp_in: port@0 {
+				reg = <0>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				edp_in_vopb: endpoint@0 {
+					reg = <0>;
+					remote-endpoint = <&vopb_out_edp>;
+				};
+				edp_in_vopl: endpoint@1 {
+					reg = <1>;
+					remote-endpoint = <&vopl_out_edp>;
+				};
+			};
+		};
+	};
+
 	hdmi: hdmi@ff980000 {
 		compatible = "rockchip,rk3288-dw-hdmi";
 		reg = <0xff980000 0x20000>;
@@ -966,7 +1086,7 @@
 		#size-cells = <0>;
 		status = "disabled";
 
-		usbphy0: usb-phy0 {
+		usbphy0: usb-phy@320 {
 			#phy-cells = <0>;
 			reg = <0x320>;
 			clocks = <&cru SCLK_OTGPHY0>;
@@ -974,7 +1094,7 @@
 			#clock-cells = <0>;
 		};
 
-		usbphy1: usb-phy1 {
+		usbphy1: usb-phy@334 {
 			#phy-cells = <0>;
 			reg = <0x334>;
 			clocks = <&cru SCLK_OTGPHY1>;
@@ -982,7 +1102,7 @@
 			#clock-cells = <0>;
 		};
 
-		usbphy2: usb-phy2 {
+		usbphy2: usb-phy@348 {
 			#phy-cells = <0>;
 			reg = <0x348>;
 			clocks = <&cru SCLK_OTGPHY2>;
@@ -1158,6 +1278,12 @@
 			};
 		};
 
+		edp {
+			edp_hpd: edp-hpd {
+				rockchip,pins = <7 11 RK_FUNC_2 &pcfg_pull_down>;
+			};
+		};
+
 		i2c0 {
 			i2c0_xfer: i2c0-xfer {
 				rockchip,pins = <0 15 RK_FUNC_1 &pcfg_pull_none>,
diff --git a/arch/arm/boot/dts/s5pv210-smdkv210.dts b/arch/arm/boot/dts/s5pv210-smdkv210.dts
index 54fcc3f..9eb6aff 100644
--- a/arch/arm/boot/dts/s5pv210-smdkv210.dts
+++ b/arch/arm/boot/dts/s5pv210-smdkv210.dts
@@ -197,7 +197,7 @@
 	display-timings {
 		native-mode = <&timing0>;
 
-		timing0: timing@0 {
+		timing0: timing {
 			/* 800x480@60Hz */
 			clock-frequency = <24373920>;
 			hactive = <800>;
diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h
index b0c912fe..8a394f3 100644
--- a/arch/arm/boot/dts/sama5d2-pinfunc.h
+++ b/arch/arm/boot/dts/sama5d2-pinfunc.h
@@ -837,8 +837,8 @@
 #define PIN_PD23__ISC_FIELD		PINMUX_PIN(PIN_PD23, 6, 4)
 #define PIN_PD24			120
 #define PIN_PD24__GPIO			PINMUX_PIN(PIN_PD24, 0, 0)
-#define PIN_PD24__UTXD2			PINMUX_PIN(PIN_PD23, 1, 2)
-#define PIN_PD24__FLEXCOM4_IO3		PINMUX_PIN(PIN_PD23, 3, 3)
+#define PIN_PD24__UTXD2			PINMUX_PIN(PIN_PD24, 1, 2)
+#define PIN_PD24__FLEXCOM4_IO3		PINMUX_PIN(PIN_PD24, 3, 3)
 #define PIN_PD25			121
 #define PIN_PD25__GPIO			PINMUX_PIN(PIN_PD25, 0, 0)
 #define PIN_PD25__SPI1_SPCK		PINMUX_PIN(PIN_PD25, 1, 3)
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 78996bd..2827e7a 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -280,7 +280,7 @@
 			status = "disabled";
 
 			nfc@c0000000 {
-				compatible = "atmel,sama5d4-nfc";
+				compatible = "atmel,sama5d3-nfc";
 				#address-cells = <1>;
 				#size-cells = <1>;
 				reg = < /* NFC Command Registers */
@@ -319,6 +319,32 @@
 			#size-cells = <1>;
 			ranges;
 
+			hlcdc: hlcdc@f0000000 {
+				compatible = "atmel,sama5d2-hlcdc";
+				reg = <0xf0000000 0x2000>;
+				interrupts = <45 IRQ_TYPE_LEVEL_HIGH 0>;
+				clocks = <&lcdc_clk>, <&lcdck>, <&clk32k>;
+				clock-names = "periph_clk","sys_clk", "slow_clk";
+				status = "disabled";
+
+				hlcdc-display-controller {
+					compatible = "atmel,hlcdc-display-controller";
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					port@0 {
+						#address-cells = <1>;
+						#size-cells = <0>;
+						reg = <0>;
+					};
+				};
+
+				hlcdc_pwm: hlcdc-pwm {
+					compatible = "atmel,hlcdc-pwm";
+					#pwm-cells = <3>;
+				};
+			};
+
 			ramc0: ramc@f000c000 {
 				compatible = "atmel,sama5d3-ddramc";
 				reg = <0xf000c000 0x200>;
@@ -973,6 +999,11 @@
 				status = "disabled";
 			};
 
+			sfr: sfr@f8030000 {
+				compatible = "atmel,sama5d2-sfr", "syscon";
+				reg = <0xf8030000 0x98>;
+			};
+
 			flx0: flexcom@f8034000 {
 				compatible = "atmel,sama5d2-flexcom";
 				reg = <0xf8034000 0x200>;
@@ -999,6 +1030,15 @@
 				clocks = <&clk32k>;
 			};
 
+			shdwc@f8048010 {
+				compatible = "atmel,sama5d2-shdwc";
+				reg = <0xf8048010 0x10>;
+				clocks = <&clk32k>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				atmel,wakeup-rtc-timer;
+			};
+
 			pit: timer@f8048030 {
 				compatible = "atmel,at91sam9260-pit";
 				reg = <0xf8048030 0x10>;
@@ -1010,6 +1050,7 @@
 				compatible = "atmel,sama5d4-wdt";
 				reg = <0xf8048040 0x10>;
 				interrupts = <4 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&clk32k>;
 				status = "disabled";
 			};
 
@@ -1127,6 +1168,13 @@
 				status = "disabled";
 			};
 
+			trng@fc01c000 {
+				compatible = "atmel,at91sam9g45-trng";
+				reg = <0xfc01c000 0x100>;
+				interrupts = <47 IRQ_TYPE_LEVEL_HIGH 0>;
+				clocks = <&trng_clk>;
+			};
+
 			aic: interrupt-controller@fc020000 {
 				#interrupt-cells = <3>;
 				compatible = "atmel,sama5d2-aic";
@@ -1193,6 +1241,11 @@
 				clock-names = "tdes_clk";
 				status = "okay";
 			};
+
+			chipid@fc069000 {
+				compatible = "atmel,sama5d2-chipid";
+				reg = <0xfc069000 0x8>;
+			};
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
index a532791..36301bd 100644
--- a/arch/arm/boot/dts/sama5d3.dtsi
+++ b/arch/arm/boot/dts/sama5d3.dtsi
@@ -426,6 +426,13 @@
 				clock-names = "tdes_clk";
 			};
 
+			trng@f8040000 {
+				compatible = "atmel,at91sam9g45-trng";
+				reg = <0xf8040000 0x100>;
+				interrupts = <45 IRQ_TYPE_LEVEL_HIGH 0>;
+				clocks = <&trng_clk>;
+			};
+
 			dma0: dma-controller@ffffe600 {
 				compatible = "atmel,at91sam9g45-dma";
 				reg = <0xffffe600 0x200>;
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index db1151c..4e2cc30 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -1202,6 +1202,13 @@
 				status = "disabled";
 			};
 
+			trng@fc030000 {
+				compatible = "atmel,at91sam9g45-trng";
+				reg = <0xfc030000 0x100>;
+				interrupts = <53 IRQ_TYPE_LEVEL_HIGH 0>;
+				clocks = <&trng_clk>;
+			};
+
 			adc0: adc@fc034000 {
 				compatible = "atmel,at91sam9x5-adc";
 				reg = <0xfc034000 0x100>;
@@ -1302,6 +1309,7 @@
 			watchdog@fc068640 {
 				compatible = "atmel,sama5d4-wdt";
 				reg = <0xfc068640 0x10>;
+				interrupts = <4 IRQ_TYPE_LEVEL_HIGH 7>;
 				clocks = <&clk32k>;
 				status = "disabled";
 			};
diff --git a/arch/arm/boot/dts/sh73a0-kzm9g.dts b/arch/arm/boot/dts/sh73a0-kzm9g.dts
index aa8bae3..c2d8a08 100644
--- a/arch/arm/boot/dts/sh73a0-kzm9g.dts
+++ b/arch/arm/boot/dts/sh73a0-kzm9g.dts
@@ -149,6 +149,13 @@
 			label = "SW1";
 			wakeup-source;
 		};
+
+		wakeup-key {
+			gpios = <&pfc 159 GPIO_ACTIVE_LOW>;
+			linux,code = <KEY_WAKEUP>;
+			label = "NMI";
+			wakeup-source;
+		};
 	};
 
 	sound {
@@ -329,41 +336,41 @@
 
 &pfc {
 	i2c3_pins: i2c3 {
-		renesas,groups = "i2c3_1";
-		renesas,function = "i2c3";
+		groups = "i2c3_1";
+		function = "i2c3";
 	};
 
 	mmcif_pins: mmc {
 		mux {
-			renesas,groups = "mmc0_data8_0", "mmc0_ctrl_0";
-			renesas,function = "mmc0";
+			groups = "mmc0_data8_0", "mmc0_ctrl_0";
+			function = "mmc0";
 		};
 		cfg {
-			renesas,groups = "mmc0_data8_0";
-			renesas,pins = "PORT279";
+			groups = "mmc0_data8_0";
+			pins = "PORT279";
 			bias-pull-up;
 		};
 	};
 
 	scifa4_pins: serial4 {
-		renesas,groups = "scifa4_data", "scifa4_ctrl";
-		renesas,function = "scifa4";
+		groups = "scifa4_data", "scifa4_ctrl";
+		function = "scifa4";
 	};
 
 	sdhi0_pins: sd0 {
-		renesas,groups = "sdhi0_data4", "sdhi0_ctrl", "sdhi0_cd", "sdhi0_wp";
-		renesas,function = "sdhi0";
+		groups = "sdhi0_data4", "sdhi0_ctrl", "sdhi0_cd", "sdhi0_wp";
+		function = "sdhi0";
 	};
 
 	sdhi2_pins: sd2 {
-		renesas,groups = "sdhi2_data4", "sdhi2_ctrl";
-		renesas,function = "sdhi2";
+		groups = "sdhi2_data4", "sdhi2_ctrl";
+		function = "sdhi2";
 	};
 
 	fsia_pins: sounda {
-		renesas,groups = "fsia_mclk_in", "fsia_sclk_in",
-				 "fsia_data_in", "fsia_data_out";
-		renesas,function = "fsia";
+		groups = "fsia_mclk_in", "fsia_sclk_in",
+			 "fsia_data_in", "fsia_data_out";
+		function = "fsia";
 	};
 };
 
diff --git a/arch/arm/boot/dts/sh73a0.dtsi b/arch/arm/boot/dts/sh73a0.dtsi
index bf825ca..c4f434c 100644
--- a/arch/arm/boot/dts/sh73a0.dtsi
+++ b/arch/arm/boot/dts/sh73a0.dtsi
@@ -43,7 +43,7 @@
 	timer@f0000600 {
 		compatible = "arm,cortex-a9-twd-timer";
 		reg = <0xf0000600 0x20>;
-		interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
+		interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_EDGE_RISING)>;
 		clocks = <&twd_clk>;
 	};
 
@@ -602,39 +602,33 @@
 		ranges;
 
 		/* External root clocks */
-		extalr_clk: extalr_clk {
+		extalr_clk: extalr {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <32768>;
-			clock-output-names = "extalr";
 		};
-		extal1_clk: extal1_clk {
+		extal1_clk: extal1 {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <26000000>;
-			clock-output-names = "extal1";
 		};
-		extal2_clk: extal2_clk {
+		extal2_clk: extal2 {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
-			clock-output-names = "extal2";
 		};
-		extcki_clk: extcki_clk {
+		extcki_clk: extcki {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
-			clock-output-names = "extcki";
 		};
-		fsiack_clk: fsiack_clk {
+		fsiack_clk: fsiack {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <0>;
-			clock-output-names = "fsiack";
 		};
-		fsibck_clk: fsibck_clk {
+		fsibck_clk: fsibck {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <0>;
-			clock-output-names = "fsibck";
 		};
 
 		/* Special CPG clocks */
@@ -650,7 +644,7 @@
 		};
 
 		/* Variable factor clocks (DIV6) */
-		vclk1_clk: vclk1_clk@e6150008 {
+		vclk1_clk: vclk1@e6150008 {
 			compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe6150008 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks SH73A0_CLK_PLL2>,
@@ -658,9 +652,8 @@
 				 <&extalr_clk>, <&cpg_clocks SH73A0_CLK_MAIN>,
 				 <0>;
 			#clock-cells = <0>;
-			clock-output-names = "vclk1";
 		};
-		vclk2_clk: vclk2_clk@e615000c {
+		vclk2_clk: vclk2@e615000c {
 			compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe615000c 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks SH73A0_CLK_PLL2>,
@@ -668,9 +661,8 @@
 				 <&extalr_clk>, <&cpg_clocks SH73A0_CLK_MAIN>,
 				 <0>;
 			#clock-cells = <0>;
-			clock-output-names = "vclk2";
 		};
-		vclk3_clk: vclk3_clk@e615001c {
+		vclk3_clk: vclk3@e615001c {
 			compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe615001c 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks SH73A0_CLK_PLL2>,
@@ -678,7 +670,6 @@
 				 <&extalr_clk>, <&cpg_clocks SH73A0_CLK_MAIN>,
 				 <0>;
 			#clock-cells = <0>;
-			clock-output-names = "vclk3";
 		};
 		zb_clk: zb_clk@e6150010 {
 			compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
@@ -688,168 +679,148 @@
 			#clock-cells = <0>;
 			clock-output-names = "zb";
 		};
-		flctl_clk: flctl_clk@e6150014 {
+		flctl_clk: flctlck@e6150014 {
 			compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe6150014 4>;
 			clocks = <&pll1_div2_clk>, <0>,
 				 <&cpg_clocks SH73A0_CLK_PLL2>, <0>;
 			#clock-cells = <0>;
-			clock-output-names = "flctlck";
 		};
-		sdhi0_clk: sdhi0_clk@e6150074 {
+		sdhi0_clk: sdhi0ck@e6150074 {
 			compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe6150074 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks SH73A0_CLK_PLL2>,
 				 <&pll1_div13_clk>, <0>;
 			#clock-cells = <0>;
-			clock-output-names = "sdhi0ck";
 		};
-		sdhi1_clk: sdhi1_clk@e6150078 {
+		sdhi1_clk: sdhi1ck@e6150078 {
 			compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe6150078 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks SH73A0_CLK_PLL2>,
 				 <&pll1_div13_clk>, <0>;
 			#clock-cells = <0>;
-			clock-output-names = "sdhi1ck";
 		};
-		sdhi2_clk: sdhi2_clk@e615007c {
+		sdhi2_clk: sdhi2ck@e615007c {
 			compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe615007c 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks SH73A0_CLK_PLL2>,
 				 <&pll1_div13_clk>, <0>;
 			#clock-cells = <0>;
-			clock-output-names = "sdhi2ck";
 		};
-		fsia_clk: fsia_clk@e6150018 {
+		fsia_clk: fsia@e6150018 {
 			compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe6150018 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks SH73A0_CLK_PLL2>,
 				 <&fsiack_clk>, <&fsiack_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "fsia";
 		};
-		fsib_clk: fsib_clk@e6150090 {
+		fsib_clk: fsib@e6150090 {
 			compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe6150090 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks SH73A0_CLK_PLL2>,
 				 <&fsibck_clk>, <&fsibck_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "fsib";
 		};
-		sub_clk: sub_clk@e6150080 {
+		sub_clk: sub@e6150080 {
 			compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe6150080 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks SH73A0_CLK_PLL2>,
 				 <&extal2_clk>, <&extal2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "sub";
 		};
-		spua_clk: spua_clk@e6150084 {
+		spua_clk: spua@e6150084 {
 			compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe6150084 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks SH73A0_CLK_PLL2>,
 				 <&extal2_clk>, <&extal2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "spua";
 		};
-		spuv_clk: spuv_clk@e6150094 {
+		spuv_clk: spuv@e6150094 {
 			compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe6150094 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks SH73A0_CLK_PLL2>,
 				 <&extal2_clk>, <&extal2_clk>;
 			#clock-cells = <0>;
-			clock-output-names = "spuv";
 		};
-		msu_clk: msu_clk@e6150088 {
+		msu_clk: msu@e6150088 {
 			compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe6150088 4>;
 			clocks = <&pll1_div2_clk>, <0>,
 				 <&cpg_clocks SH73A0_CLK_PLL2>, <0>;
 			#clock-cells = <0>;
-			clock-output-names = "msu";
 		};
-		hsi_clk: hsi_clk@e615008c {
+		hsi_clk: hsi@e615008c {
 			compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe615008c 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks SH73A0_CLK_PLL2>,
 				 <&pll1_div7_clk>, <0>;
 			#clock-cells = <0>;
-			clock-output-names = "hsi";
 		};
-		mfg1_clk: mfg1_clk@e6150098 {
+		mfg1_clk: mfg1@e6150098 {
 			compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe6150098 4>;
 			clocks = <&pll1_div2_clk>, <0>,
 				 <&cpg_clocks SH73A0_CLK_PLL2>, <0>;
 			#clock-cells = <0>;
-			clock-output-names = "mfg1";
 		};
-		mfg2_clk: mfg2_clk@e615009c {
+		mfg2_clk: mfg2@e615009c {
 			compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe615009c 4>;
 			clocks = <&pll1_div2_clk>, <0>,
 				 <&cpg_clocks SH73A0_CLK_PLL2>, <0>;
 			#clock-cells = <0>;
-			clock-output-names = "mfg2";
 		};
-		dsit_clk: dsit_clk@e6150060 {
+		dsit_clk: dsit@e6150060 {
 			compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe6150060 4>;
 			clocks = <&pll1_div2_clk>, <0>,
 				 <&cpg_clocks SH73A0_CLK_PLL2>, <0>;
 			#clock-cells = <0>;
-			clock-output-names = "dsit";
 		};
-		dsi0p_clk: dsi0p_clk@e6150064 {
+		dsi0p_clk: dsi0pck@e6150064 {
 			compatible = "renesas,sh73a0-div6-clock", "renesas,cpg-div6-clock";
 			reg = <0xe6150064 4>;
 			clocks = <&pll1_div2_clk>, <&cpg_clocks SH73A0_CLK_PLL2>,
 				 <&cpg_clocks SH73A0_CLK_MAIN>, <&extal2_clk>,
 				 <&extcki_clk>, <0>, <0>, <0>;
 			#clock-cells = <0>;
-			clock-output-names = "dsi0pck";
 		};
 
 		/* Fixed factor clocks */
-		main_div2_clk: main_div2_clk {
+		main_div2_clk: main_div2 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks SH73A0_CLK_MAIN>;
 			#clock-cells = <0>;
 			clock-div = <2>;
 			clock-mult = <1>;
-			clock-output-names = "main_div2";
 		};
-		pll1_div2_clk: pll1_div2_clk {
+		pll1_div2_clk: pll1_div2 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks SH73A0_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <2>;
 			clock-mult = <1>;
-			clock-output-names = "pll1_div2";
 		};
-		pll1_div7_clk: pll1_div7_clk {
+		pll1_div7_clk: pll1_div7 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks SH73A0_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <7>;
 			clock-mult = <1>;
-			clock-output-names = "pll1_div7";
 		};
-		pll1_div13_clk: pll1_div13_clk {
+		pll1_div13_clk: pll1_div13 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks SH73A0_CLK_PLL1>;
 			#clock-cells = <0>;
 			clock-div = <13>;
 			clock-mult = <1>;
-			clock-output-names = "pll1_div13";
 		};
-		twd_clk: twd_clk {
+		twd_clk: twd {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks SH73A0_CLK_Z>;
 			#clock-cells = <0>;
 			clock-div = <4>;
 			clock-mult = <1>;
-			clock-output-names = "twd";
 		};
 
 		/* Gate clocks */
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index b89cbde3b..9f48141 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -831,6 +831,8 @@
 			interrupts = <0 125 4>;
 			clocks = <&usb_mp_clk>;
 			clock-names = "otg";
+			resets = <&rst USB0_RESET>;
+			reset-names = "dwc2";
 			phys = <&usbphy0>;
 			phy-names = "usb2-phy";
 			status = "disabled";
@@ -842,6 +844,8 @@
 			interrupts = <0 128 4>;
 			clocks = <&usb_mp_clk>;
 			clock-names = "otg";
+			resets = <&rst USB1_RESET>;
+			reset-names = "dwc2";
 			phys = <&usbphy0>;
 			phy-names = "usb2-phy";
 			status = "disabled";
diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi
index 1c5e139..17e81dc 100644
--- a/arch/arm/boot/dts/socfpga_arria10.dtsi
+++ b/arch/arm/boot/dts/socfpga_arria10.dtsi
@@ -78,10 +78,13 @@
 					     <0 87 IRQ_TYPE_LEVEL_HIGH>,
 					     <0 88 IRQ_TYPE_LEVEL_HIGH>,
 					     <0 89 IRQ_TYPE_LEVEL_HIGH>,
-					     <0 90 IRQ_TYPE_LEVEL_HIGH>;
+					     <0 90 IRQ_TYPE_LEVEL_HIGH>,
+					     <0 91 IRQ_TYPE_LEVEL_HIGH>;
 				#dma-cells = <1>;
 				#dma-channels = <8>;
 				#dma-requests = <32>;
+				clocks = <&l4_main_clk>;
+				clock-names = "apb_pclk";
 			};
 		};
 
@@ -362,6 +365,7 @@
 						compatible = "altr,socfpga-a10-gate-clk";
 						clocks = <&sdmmc_free_clk>;
 						clk-gate = <0xC8 5>;
+						clk-phase = <0 135>;
 					};
 
 					qspi_clk: qspi_clk {
@@ -589,7 +593,7 @@
 			reg = <0xff808000 0x1000>;
 			interrupts = <0 98 IRQ_TYPE_LEVEL_HIGH>;
 			fifo-depth = <0x400>;
-			clocks = <&l4_mp_clk>, <&sdmmc_free_clk>;
+			clocks = <&l4_mp_clk>, <&sdmmc_clk>;
 			clock-names = "biu", "ciu";
 			status = "disabled";
 		};
@@ -599,6 +603,26 @@
 			reg = <0xffe00000 0x40000>;
 		};
 
+		eccmgr: eccmgr@ffd06000 {
+			compatible = "altr,socfpga-a10-ecc-manager";
+			altr,sysmgr-syscon = <&sysmgr>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 0 IRQ_TYPE_LEVEL_HIGH>;
+			ranges;
+
+			l2-ecc@ffd06010 {
+				compatible = "altr,socfpga-a10-l2-ecc";
+				reg = <0xffd06010 0x4>;
+			};
+
+			ocram-ecc@ff8c3000 {
+				compatible = "altr,socfpga-a10-ocram-ecc";
+				reg = <0xff8c3000 0x400>;
+			};
+		};
+
 		rst: rstmgr@ffd05000 {
 			#reset-cells = <1>;
 			compatible = "altr,rst-mgr";
@@ -689,6 +713,8 @@
 			interrupts = <0 95 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&usb_clk>;
 			clock-names = "otg";
+			resets = <&rst USB0_RESET>;
+			reset-names = "dwc2";
 			phys = <&usbphy0>;
 			phy-names = "usb2-phy";
 			status = "disabled";
@@ -700,6 +726,8 @@
 			interrupts = <0 96 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&usb_clk>;
 			clock-names = "otg";
+			resets = <&rst USB1_RESET>;
+			reset-names = "dwc2";
 			phys = <&usbphy0>;
 			phy-names = "usb2-phy";
 			status = "disabled";
diff --git a/arch/arm/boot/dts/socfpga_arria10_socdk_sdmmc.dts b/arch/arm/boot/dts/socfpga_arria10_socdk_sdmmc.dts
index dbbb751..8a7dfa4 100644
--- a/arch/arm/boot/dts/socfpga_arria10_socdk_sdmmc.dts
+++ b/arch/arm/boot/dts/socfpga_arria10_socdk_sdmmc.dts
@@ -21,6 +21,7 @@
 &mmc {
 	status = "okay";
 	num-slots = <1>;
+	cap-sd-highspeed;
 	broken-cd;
 	bus-width = <4>;
 };
diff --git a/arch/arm/boot/dts/socfpga_cyclone5.dtsi b/arch/arm/boot/dts/socfpga_cyclone5.dtsi
index 06db951..a05e3df 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5.dtsi
+++ b/arch/arm/boot/dts/socfpga_cyclone5.dtsi
@@ -38,12 +38,6 @@
 			cap-sd-highspeed;
 		};
 
-		ethernet@ff702000 {
-			phy-mode = "rgmii";
-			phy-addr = <0xffffffff>; /* probe for phy addr */
-			status = "okay";
-		};
-
 		sysmgr@ffd08000 {
 			cpu1-start-addr = <0xffd080c4>;
 		};
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
index b61f22f..02e22f5 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
@@ -39,6 +39,90 @@
 		ethernet0 = &gmac1;
 	};
 
+	leds {
+		compatible = "gpio-leds";
+
+		hps_led0 {
+			label = "hps:blue:led0";
+			gpios = <&portb 24 0>;	/* HPS_GPIO53 */
+			linux,default-trigger = "heartbeat";
+		};
+
+		hps_led1 {
+			label = "hps:blue:led1";
+			gpios = <&portb 25 0>;	/* HPS_GPIO54 */
+			linux,default-trigger = "heartbeat";
+		};
+
+		hps_led2 {
+			label = "hps:blue:led2";
+			gpios = <&portb 26 0>;	/* HPS_GPIO55 */
+			linux,default-trigger = "heartbeat";
+		};
+
+		hps_led3 {
+			label = "hps:blue:led3";
+			gpios = <&portb 27 0>;	/* HPS_GPIO56 */
+			linux,default-trigger = "heartbeat";
+		};
+	};
+
+	gpio-keys {
+		compatible = "gpio-keys";
+
+		hps_sw0 {
+			label = "hps_sw0";
+			gpios = <&portc 20 0>;	/* HPS_GPI7 */
+			linux,input-type = <5>;	/* EV_SW */
+			linux,code = <0x0>;	/* SW_LID */
+		};
+
+		hps_sw1 {
+			label = "hps_sw1";
+			gpios = <&portc 19 0>;	/* HPS_GPI6 */
+			linux,input-type = <5>;	/* EV_SW */
+			linux,code = <0x5>;	/* SW_DOCK */
+		};
+
+		hps_sw2 {
+			label = "hps_sw2";
+			gpios = <&portc 18 0>;	/* HPS_GPI5 */
+			linux,input-type = <5>;	/* EV_SW */
+			linux,code = <0xa>;	/* SW_KEYPAD_SLIDE */
+		};
+
+		hps_sw3 {
+			label = "hps_sw3";
+			gpios = <&portc 17 0>;	/* HPS_GPI4 */
+			linux,input-type = <5>;	/* EV_SW */
+			linux,code = <0xc>;	/* SW_ROTATE_LOCK */
+		};
+
+		hps_hkey0 {
+			label = "hps_hkey0";
+			gpios = <&portc 21 1>;	/* HPS_GPI8 */
+			linux,code = <187>;	/* KEY_F17 */
+		};
+
+		hps_hkey1 {
+			label = "hps_hkey1";
+			gpios = <&portc 22 1>;	/* HPS_GPI9 */
+			linux,code = <188>;	/* KEY_F18 */
+		};
+
+		hps_hkey2 {
+			label = "hps_hkey2";
+			gpios = <&portc 23 1>;	/* HPS_GPI10 */
+			linux,code = <189>;	/* KEY_F19 */
+		};
+
+		hps_hkey3 {
+			label = "hps_hkey3";
+			gpios = <&portc 24 1>;	/* HPS_GPI11 */
+			linux,code = <190>;	/* KEY_F20 */
+		};
+	};
+
 	regulator_3_3v: vcc3p3-regulator {
 		compatible = "regulator-fixed";
 		regulator-name = "VCC3P3";
@@ -61,7 +145,15 @@
 	rxc-skew-ps = <2000>;
 };
 
-&gpio2 {
+&gpio0 {	/* GPIO 0..29 */
+	status = "okay";
+};
+
+&gpio1 {	/* GPIO 30..57 */
+	status = "okay";
+};
+
+&gpio2 {	/* GPIO 58..66 (HLGPI 0..13 at offset 13) */
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts b/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts
index 019dd2f..e1a61f2 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts
@@ -36,6 +36,7 @@
 };
 
 &gmac1 {
+	phy-mode = "rgmii";
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
new file mode 100644
index 0000000..a3601e4
--- /dev/null
+++ b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
@@ -0,0 +1,310 @@
+/*
+ *  Copyright (C) 2015 Marek Vasut <marex@denx.de>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of
+ *     the License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *     You should have received a copy of the GNU General Public
+ *     License along with this file; if not, write to the Free
+ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ *     MA 02110-1301 USA
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "socfpga_cyclone5.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+	model = "samtec VIN|ING FPGA";
+	compatible = "altr,socfpga-cyclone5", "altr,socfpga";
+
+	chosen {
+		bootargs = "console=ttyS0,115200";
+	};
+
+	memory {
+		name = "memory";
+		device_type = "memory";
+		reg = <0x0 0x40000000>; /* 1GB */
+	};
+
+	aliases {
+		/*
+		 * This allow the ethaddr uboot environment variable contents
+		 * to be added to the gmac1 device tree blob.
+		 */
+		ethernet0 = &gmac1;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		hps_led0 {
+			label = "hps:green:led0";	/* ALIVE_LED_GR */
+			gpios = <&portb 19 0>;	/* HPS_GPIO48 */
+			linux,default-trigger = "heartbeat";
+		};
+
+		hps_led1 {
+			label = "hps:red:led0";		/* ALIVE_LED_RD */
+			gpios = <&portb 24 0>;	/* HPS_GPIO53 */
+			linux,default-trigger = "none";
+		};
+
+		hps_led2 {
+			label = "hps:green:led1";	/* LINK2HOST_LED_GR */
+			gpios = <&portb 25 0>;	/* HPS_GPIO54 */
+			linux,default-trigger = "heartbeat";
+		};
+
+		hps_led3 {
+			label = "hps:red:led1";		/* LINK2HOST_LED_RD */
+			gpios = <&portc 7 0>;	/* HPS_GPIO65 */
+			linux,default-trigger = "none";
+		};
+	};
+
+	gpio-keys {
+		compatible = "gpio-keys";
+
+		hps_temp0 {
+			label = "BTN_0";			/* TEMP_OS */
+			gpios = <&portc 18 GPIO_ACTIVE_LOW>;	/* HPS_GPIO60 */
+			linux,code = <BTN_0>;
+		};
+
+		hps_hkey0 {
+			label = "BTN_1";			/* DIS_PWR */
+			gpios = <&portc 19 GPIO_ACTIVE_LOW>;	/* HPS_GPIO61 */
+			linux,code = <BTN_1>;
+		};
+
+		hps_hkey1 {
+			label = "hps_hkey1";			/* POWER_DOWN */
+			gpios = <&portc 20 GPIO_ACTIVE_LOW>;	/* HPS_GPIO62 */
+			linux,code = <KEY_POWER>;
+		};
+	};
+
+	regulator-usb-nrst {
+		compatible = "regulator-fixed";
+		regulator-name = "usb_nrst";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		gpio = <&portb 5 GPIO_ACTIVE_HIGH>;
+		startup-delay-us = <70000>;
+		enable-active-high;
+		regulator-always-on;
+	};
+};
+
+&gmac1 {
+	status = "okay";
+	phy-mode = "rgmii";
+
+	snps,reset-gpio = <&porta 0 GPIO_ACTIVE_LOW>;
+	snps,reset-active-low;
+	snps,reset-delays-us = <10000 10000 10000>;
+
+	mdio0 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "snps,dwmac-mdio";
+		phy1: ethernet-phy@1 {
+			reg = <1>;
+			rxd0-skew-ps = <0>;
+			rxd1-skew-ps = <0>;
+			rxd2-skew-ps = <0>;
+			rxd3-skew-ps = <0>;
+			txen-skew-ps = <0>;
+			txc-skew-ps = <2600>;
+			rxdv-skew-ps = <0>;
+			rxc-skew-ps = <2000>;
+		};
+	};
+};
+
+&gpio0 {	/* GPIO 0..29 */
+	status = "okay";
+};
+
+&gpio1 {	/* GPIO 30..57 */
+	status = "okay";
+};
+
+&gpio2 {	/* GPIO 58..66 (HLGPI 0..13 at offset 13) */
+	status = "okay";
+};
+
+&i2c0 {
+	status = "okay";
+
+	gpio: pca9557@1f {
+		compatible = "nxp,pca9557";
+		reg = <0x1f>;
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+
+	temp: lm75@48 {
+		compatible = "lm75";
+		reg = <0x48>;
+	};
+
+	at24@50 {
+		compatible = "at24,24c01";
+		pagesize = <8>;
+		reg = <0x50>;
+	};
+
+	i2cswitch@70 {
+		compatible = "nxp,pca9548";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x70>;
+
+		i2c@0 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0>;
+			eeprom@51 {
+				compatible = "at,24c01";
+				pagesize = <8>;
+				reg = <0x51>;
+			};
+		};
+
+		i2c@1 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <1>;
+			eeprom@51 {
+				compatible = "at,24c01";
+				pagesize = <8>;
+				reg = <0x51>;
+			};
+		};
+
+		i2c@2 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <2>;
+			eeprom@51 {
+				compatible = "at,24c01";
+				pagesize = <8>;
+				reg = <0x51>;
+			};
+		};
+
+		i2c@3 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <3>;
+			eeprom@51 {
+				compatible = "at,24c01";
+				pagesize = <8>;
+				reg = <0x51>;
+			};
+		};
+
+		i2c@4 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <4>;
+			eeprom@51 {
+				compatible = "at,24c01";
+				pagesize = <8>;
+				reg = <0x51>;
+			};
+		};
+
+		i2c@5 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <5>;
+			eeprom@51 {
+				compatible = "at,24c01";
+				pagesize = <8>;
+				reg = <0x51>;
+			};
+		};
+
+		i2c@6 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <6>;
+			eeprom@51 {
+				compatible = "at,24c01";
+				pagesize = <8>;
+				reg = <0x51>;
+			};
+		};
+
+		i2c@7 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <7>;
+			eeprom@51 {
+				compatible = "at,24c01";
+				pagesize = <8>;
+				reg = <0x51>;
+			};
+		};
+	};
+};
+
+&i2c1 {
+	status = "okay";
+	clock-frequency = <100000>;
+
+	at24@50 {
+		compatible = "at24,24c02";
+		pagesize = <8>;
+		reg = <0x50>;
+	};
+};
+
+&usb0 {
+	dr_mode = "host";
+	status = "okay";
+};
+
+&usb1 {
+	dr_mode = "peripheral";
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
index 14594ce..449acf0 100644
--- a/arch/arm/boot/dts/spear13xx.dtsi
+++ b/arch/arm/boot/dts/spear13xx.dtsi
@@ -117,7 +117,7 @@
 			chan_priority = <1>;
 			block_size = <0xfff>;
 			dma-masters = <2>;
-			data_width = <3 3>;
+			data-width = <8 8>;
 		};
 
 		dma@eb000000 {
@@ -133,7 +133,7 @@
 			chan_allocation_order = <1>;
 			chan_priority = <1>;
 			block_size = <0xfff>;
-			data_width = <3 3>;
+			data-width = <8 8>;
 		};
 
 		fsmc: flash@b0000000 {
diff --git a/arch/arm/boot/dts/ste-ccu9540.dts b/arch/arm/boot/dts/ste-ccu9540.dts
index c8b8158..b3b9bb8 100644
--- a/arch/arm/boot/dts/ste-ccu9540.dts
+++ b/arch/arm/boot/dts/ste-ccu9540.dts
@@ -49,7 +49,7 @@
 			cap-mmc-highspeed;
 			vmmc-supply = <&ab8500_ldo_aux3_reg>;
 
-			cd-gpios  = <&gpio7 6 0x4>; // 230
+			cd-gpios  = <&gpio7 6 GPIO_ACTIVE_HIGH>; // 230
 			cd-inverted;
 
 			status = "okay";
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index 341f5b7..6ae5683 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -10,8 +10,10 @@
  */
 
 #include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/mfd/dbx500-prcmu.h>
 #include <dt-bindings/arm/ux500_pm_domains.h>
+#include <dt-bindings/gpio/gpio.h>
 #include "skeleton.dtsi"
 
 / {
@@ -203,14 +205,14 @@
 		L2: l2-cache {
 			compatible = "arm,pl310-cache";
 			reg = <0xa0412000 0x1000>;
-			interrupts = <0 13 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
 			cache-unified;
 			cache-level = <2>;
 		};
 
 		pmu {
 			compatible = "arm,cortex-a9-pmu";
-			interrupts = <0 7 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		pm_domains: pm_domains0 {
@@ -253,7 +255,7 @@
 			/* Nomadik System Timer */
 			compatible = "st,nomadik-mtu";
 			reg = <0xa03c6000 0x1000>;
-			interrupts = <0 4 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
 
 			clocks = <&prcmu_clk PRCMU_TIMCLK>, <&prcc_pclk 6 6>;
 			clock-names = "timclk", "apb_pclk";
@@ -262,7 +264,7 @@
 		timer@a0410600 {
 			compatible = "arm,cortex-a9-twd-timer";
 			reg = <0xa0410600 0x20>;
-			interrupts = <1 13 0x304>; /* IRQ level high per-CPU */
+			interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(3) | IRQ_TYPE_LEVEL_HIGH)>;
 
 			clocks = <&smp_twd_clk>;
 		};
@@ -270,14 +272,14 @@
 		watchdog@a0410620 {
 			compatible = "arm,cortex-a9-twd-wdt";
 			reg = <0xa0410620 0x20>;
-			interrupts = <1 14 0x304>;
+			interrupts = <GIC_PPI 14 (GIC_CPU_MASK_RAW(3) | IRQ_TYPE_LEVEL_HIGH)>;
 			clocks = <&smp_twd_clk>;
 		};
 
 		rtc@80154000 {
 			compatible = "arm,rtc-pl031", "arm,primecell";
 			reg = <0x80154000 0x1000>;
-			interrupts = <0 18 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
 
 			clocks = <&rtc_clk>;
 			clock-names = "apb_pclk";
@@ -287,7 +289,7 @@
 			compatible = "stericsson,db8500-gpio",
 				"st,nomadik-gpio";
 			reg =  <0x8012e000 0x80>;
-			interrupts = <0 119 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-controller;
 			#interrupt-cells = <2>;
 			st,supports-sleepmode;
@@ -302,7 +304,7 @@
 			compatible = "stericsson,db8500-gpio",
 				"st,nomadik-gpio";
 			reg =  <0x8012e080 0x80>;
-			interrupts = <0 120 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-controller;
 			#interrupt-cells = <2>;
 			st,supports-sleepmode;
@@ -317,7 +319,7 @@
 			compatible = "stericsson,db8500-gpio",
 				"st,nomadik-gpio";
 			reg =  <0x8000e000 0x80>;
-			interrupts = <0 121 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-controller;
 			#interrupt-cells = <2>;
 			st,supports-sleepmode;
@@ -332,7 +334,7 @@
 			compatible = "stericsson,db8500-gpio",
 				"st,nomadik-gpio";
 			reg =  <0x8000e080 0x80>;
-			interrupts = <0 122 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-controller;
 			#interrupt-cells = <2>;
 			st,supports-sleepmode;
@@ -347,7 +349,7 @@
 			compatible = "stericsson,db8500-gpio",
 				"st,nomadik-gpio";
 			reg =  <0x8000e100 0x80>;
-			interrupts = <0 123 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-controller;
 			#interrupt-cells = <2>;
 			st,supports-sleepmode;
@@ -362,7 +364,7 @@
 			compatible = "stericsson,db8500-gpio",
 				"st,nomadik-gpio";
 			reg =  <0x8000e180 0x80>;
-			interrupts = <0 124 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-controller;
 			#interrupt-cells = <2>;
 			st,supports-sleepmode;
@@ -377,7 +379,7 @@
 			compatible = "stericsson,db8500-gpio",
 				"st,nomadik-gpio";
 			reg =  <0x8011e000 0x80>;
-			interrupts = <0 125 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-controller;
 			#interrupt-cells = <2>;
 			st,supports-sleepmode;
@@ -392,7 +394,7 @@
 			compatible = "stericsson,db8500-gpio",
 				"st,nomadik-gpio";
 			reg =  <0x8011e080 0x80>;
-			interrupts = <0 126 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-controller;
 			#interrupt-cells = <2>;
 			st,supports-sleepmode;
@@ -407,7 +409,7 @@
 			compatible = "stericsson,db8500-gpio",
 				"st,nomadik-gpio";
 			reg =  <0xa03fe000 0x80>;
-			interrupts = <0 127 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-controller;
 			#interrupt-cells = <2>;
 			st,supports-sleepmode;
@@ -429,7 +431,7 @@
 		usb_per5@a03e0000 {
 			compatible = "stericsson,db8500-musb";
 			reg = <0xa03e0000 0x10000>;
-			interrupts = <0 23 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-names = "mc";
 
 			dr_mode = "otg";
@@ -467,7 +469,7 @@
 			compatible = "stericsson,db8500-dma40", "stericsson,dma40";
 			reg = <0x801C0000 0x1000 0x40010000 0x800>;
 			reg-names = "base", "lcpa";
-			interrupts = <0 25 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
 
 			#dma-cells = <3>;
 			memcpy-channels = <56 57 58 59 60>;
@@ -479,7 +481,7 @@
 			compatible = "stericsson,db8500-prcmu";
 			reg = <0x80157000 0x2000>, <0x801b0000 0x8000>, <0x801b8000 0x1000>;
 			reg-names = "prcmu", "prcmu-tcpm", "prcmu-tcdm";
-			interrupts = <0 47 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <1>;
 			interrupt-controller;
@@ -597,7 +599,7 @@
 			ab8500 {
 				compatible = "stericsson,ab8500";
 				interrupt-parent = <&intc>;
-				interrupts = <0 40 IRQ_TYPE_LEVEL_HIGH>;
+				interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
 				interrupt-controller;
 				#interrupt-cells = <2>;
 
@@ -785,7 +787,7 @@
 		i2c@80004000 {
 			compatible = "stericsson,db8500-i2c", "st,nomadik-i2c", "arm,primecell";
 			reg = <0x80004000 0x1000>;
-			interrupts = <0 21 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
 
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -800,7 +802,7 @@
 		i2c@80122000 {
 			compatible = "stericsson,db8500-i2c", "st,nomadik-i2c", "arm,primecell";
 			reg = <0x80122000 0x1000>;
-			interrupts = <0 22 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
 
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -816,7 +818,7 @@
 		i2c@80128000 {
 			compatible = "stericsson,db8500-i2c", "st,nomadik-i2c", "arm,primecell";
 			reg = <0x80128000 0x1000>;
-			interrupts = <0 55 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
 
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -832,7 +834,7 @@
 		i2c@80110000 {
 			compatible = "stericsson,db8500-i2c", "st,nomadik-i2c", "arm,primecell";
 			reg = <0x80110000 0x1000>;
-			interrupts = <0 12 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
 
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -848,7 +850,7 @@
 		i2c@8012a000 {
 			compatible = "stericsson,db8500-i2c", "st,nomadik-i2c", "arm,primecell";
 			reg = <0x8012a000 0x1000>;
-			interrupts = <0 51 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
 
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -864,7 +866,7 @@
 		ssp@80002000 {
 			compatible = "arm,pl022", "arm,primecell";
 			reg = <0x80002000 0x1000>;
-			interrupts = <0 14 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			clocks = <&prcc_kclk 3 1>, <&prcc_pclk 3 1>;
@@ -878,7 +880,7 @@
 		ssp@80003000 {
 			compatible = "arm,pl022", "arm,primecell";
 			reg = <0x80003000 0x1000>;
-			interrupts = <0 52 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			clocks = <&prcc_kclk 3 2>, <&prcc_pclk 3 2>;
@@ -892,7 +894,7 @@
 		spi@8011a000 {
 			compatible = "arm,pl022", "arm,primecell";
 			reg = <0x8011a000 0x1000>;
-			interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			/* Same clock wired to kernel and pclk */
@@ -907,7 +909,7 @@
 		spi@80112000 {
 			compatible = "arm,pl022", "arm,primecell";
 			reg = <0x80112000 0x1000>;
-			interrupts = <0 96 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			/* Same clock wired to kernel and pclk */
@@ -922,7 +924,7 @@
 		spi@80111000 {
 			compatible = "arm,pl022", "arm,primecell";
 			reg = <0x80111000 0x1000>;
-			interrupts = <0 6 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			/* Same clock wired to kernel and pclk */
@@ -937,7 +939,7 @@
 		spi@80129000 {
 			compatible = "arm,pl022", "arm,primecell";
 			reg = <0x80129000 0x1000>;
-			interrupts = <0 49 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			/* Same clock wired to kernel and pclk */
@@ -952,7 +954,7 @@
 		ux500_serial0: uart@80120000 {
 			compatible = "arm,pl011", "arm,primecell";
 			reg = <0x80120000 0x1000>;
-			interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
 
 			dmas = <&dma 13 0 0x2>, /* Logical - DevToMem */
 			       <&dma 13 0 0x0>; /* Logical - MemToDev */
@@ -967,7 +969,7 @@
 		ux500_serial1: uart@80121000 {
 			compatible = "arm,pl011", "arm,primecell";
 			reg = <0x80121000 0x1000>;
-			interrupts = <0 19 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
 
 			dmas = <&dma 12 0 0x2>, /* Logical - DevToMem */
 			       <&dma 12 0 0x0>; /* Logical - MemToDev */
@@ -982,7 +984,7 @@
 		ux500_serial2: uart@80007000 {
 			compatible = "arm,pl011", "arm,primecell";
 			reg = <0x80007000 0x1000>;
-			interrupts = <0 26 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
 
 			dmas = <&dma 11 0 0x2>, /* Logical - DevToMem */
 			       <&dma 11 0 0x0>; /* Logical - MemToDev */
@@ -997,7 +999,7 @@
 		sdi0_per1@80126000 {
 			compatible = "arm,pl18x", "arm,primecell";
 			reg = <0x80126000 0x1000>;
-			interrupts = <0 60 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
 
 			dmas = <&dma 29 0 0x2>, /* Logical - DevToMem */
 			       <&dma 29 0 0x0>; /* Logical - MemToDev */
@@ -1013,7 +1015,7 @@
 		sdi1_per2@80118000 {
 			compatible = "arm,pl18x", "arm,primecell";
 			reg = <0x80118000 0x1000>;
-			interrupts = <0 50 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
 
 			dmas = <&dma 32 0 0x2>, /* Logical - DevToMem */
 			       <&dma 32 0 0x0>; /* Logical - MemToDev */
@@ -1029,7 +1031,7 @@
 		sdi2_per3@80005000 {
 			compatible = "arm,pl18x", "arm,primecell";
 			reg = <0x80005000 0x1000>;
-			interrupts = <0 41 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
 
 			dmas = <&dma 28 0 0x2>, /* Logical - DevToMem */
 			       <&dma 28 0 0x0>; /* Logical - MemToDev */
@@ -1045,7 +1047,7 @@
 		sdi3_per2@80119000 {
 			compatible = "arm,pl18x", "arm,primecell";
 			reg = <0x80119000 0x1000>;
-			interrupts = <0 59 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
 
 			dmas = <&dma 41 0 0x2>, /* Logical - DevToMem */
 			       <&dma 41 0 0x0>; /* Logical - MemToDev */
@@ -1061,7 +1063,7 @@
 		sdi4_per2@80114000 {
 			compatible = "arm,pl18x", "arm,primecell";
 			reg = <0x80114000 0x1000>;
-			interrupts = <0 99 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
 
 			dmas = <&dma 42 0 0x2>, /* Logical - DevToMem */
 			       <&dma 42 0 0x0>; /* Logical - MemToDev */
@@ -1077,7 +1079,7 @@
 		sdi5_per3@80008000 {
 			compatible = "arm,pl18x", "arm,primecell";
 			reg = <0x80008000 0x1000>;
-			interrupts = <0 100 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
 
 			dmas = <&dma 43 0 0x2>, /* Logical - DevToMem */
 			       <&dma 43 0 0x0>; /* Logical - MemToDev */
@@ -1093,7 +1095,7 @@
 		msp0: msp@80123000 {
 			compatible = "stericsson,ux500-msp-i2s";
 			reg = <0x80123000 0x1000>;
-			interrupts = <0 31 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
 			v-ape-supply = <&db8500_vape_reg>;
 
 			dmas = <&dma 31 0 0x12>, /* Logical - DevToMem - HighPrio */
@@ -1109,7 +1111,7 @@
 		msp1: msp@80124000 {
 			compatible = "stericsson,ux500-msp-i2s";
 			reg = <0x80124000 0x1000>;
-			interrupts = <0 62 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
 			v-ape-supply = <&db8500_vape_reg>;
 
 			/* This DMA channel only exist on DB8500 v1 */
@@ -1126,7 +1128,7 @@
 		msp2: msp@80117000 {
 			compatible = "stericsson,ux500-msp-i2s";
 			reg = <0x80117000 0x1000>;
-			interrupts = <0 98 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
 			v-ape-supply = <&db8500_vape_reg>;
 
 			dmas = <&dma 14 0 0x12>, /* Logical  - DevToMem - HighPrio */
@@ -1143,7 +1145,7 @@
 		msp3: msp@80125000 {
 			compatible = "stericsson,ux500-msp-i2s";
 			reg = <0x80125000 0x1000>;
-			interrupts = <0 62 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
 			v-ape-supply = <&db8500_vape_reg>;
 
 			/* This DMA channel only exist on DB8500 v2 */
@@ -1176,7 +1178,7 @@
 			      <0xa0351000 0x1000>, /* DSI link 1 */
 			      <0xa0352000 0x1000>, /* DSI link 2 */
 			      <0xa0353000 0x1000>; /* DSI link 3 */
-			interrupts = <0 48 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&prcmu_clk PRCMU_MCDECLK>, /* Main MCDE clock */
 				 <&prcmu_clk PRCMU_LCDCLK>, /* LCD clock */
 				 <&prcmu_clk PRCMU_PLLDSI>, /* HDMI clock */
@@ -1190,7 +1192,7 @@
 		cryp@a03cb000 {
 			compatible = "stericsson,ux500-cryp";
 			reg = <0xa03cb000 0x1000>;
-			interrupts = <0 15 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
 
 			v-ape-supply = <&db8500_vape_reg>;
 			clocks = <&prcc_pclk 6 1>;
diff --git a/arch/arm/boot/dts/ste-href-stuib.dtsi b/arch/arm/boot/dts/ste-href-stuib.dtsi
index c3987ad..6f72075 100644
--- a/arch/arm/boot/dts/ste-href-stuib.dtsi
+++ b/arch/arm/boot/dts/ste-href-stuib.dtsi
@@ -22,13 +22,13 @@
 
 		button@139 {
 			/* Proximity sensor */
-			gpios = <&gpio6 25 0x4>;
+			gpios = <&gpio6 25 GPIO_ACTIVE_HIGH>;
 			linux,code = <11>; /* SW_FRONT_PROXIMITY */
 			label = "SFH7741 Proximity Sensor";
 		};
 		button@145 {
 			/* Hall sensor */
-			gpios = <&gpio4 17 0x4>;
+			gpios = <&gpio4 17 GPIO_ACTIVE_HIGH>;
 			linux,code = <0>; /* SW_LID */
 			label = "HED54XXU11 Hall Effect Sensor";
 		};
diff --git a/arch/arm/boot/dts/ste-href-tvk1281618.dtsi b/arch/arm/boot/dts/ste-href-tvk1281618.dtsi
index 55f9d0c..fc5e8ce 100644
--- a/arch/arm/boot/dts/ste-href-tvk1281618.dtsi
+++ b/arch/arm/boot/dts/ste-href-tvk1281618.dtsi
@@ -24,13 +24,13 @@
 
 		button@139 {
 			/* Proximity sensor */
-			gpios = <&gpio6 25 0x4>;
+			gpios = <&gpio6 25 GPIO_ACTIVE_HIGH>;
 			linux,code = <11>; /* SW_FRONT_PROXIMITY */
 			label = "SFH7741 Proximity Sensor";
 		};
 		button@145 {
 			/* Hall sensor */
-			gpios = <&gpio4 17 0x4>;
+			gpios = <&gpio4 17 GPIO_ACTIVE_HIGH>;
 			linux,code = <0>; /* SW_LID */
 			label = "HED54XXU11 Hall Effect Sensor";
 		};
@@ -93,14 +93,15 @@
 				/* Accelerometer */
 				compatible = "st,lsm303dlh-accel";
 				st,drdy-int-pin = <1>;
+				drive-open-drain;
 				reg = <0x18>;
 				vdd-supply = <&ab8500_ldo_aux1_reg>;
 				vddio-supply = <&db8500_vsmps2_reg>;
 				pinctrl-names = "default";
 				pinctrl-0 = <&accel_tvk_mode>;
 				interrupt-parent = <&gpio2>;
-				interrupts = <18 IRQ_TYPE_EDGE_RISING>,
-					     <19 IRQ_TYPE_EDGE_RISING>;
+				interrupts = <18 IRQ_TYPE_EDGE_FALLING>,
+					     <19 IRQ_TYPE_EDGE_FALLING>;
 			};
 			lsm303dlh@1e {
 				/*
@@ -118,14 +119,15 @@
 				/* Accelerometer */
 				compatible = "st,lis331dl-accel";
 				st,drdy-int-pin = <1>;
+				drive-open-drain;
 				reg = <0x1c>;
 				vdd-supply = <&ab8500_ldo_aux1_reg>;
 				vddio-supply = <&db8500_vsmps2_reg>;
 				pinctrl-names = "default";
 				pinctrl-0 = <&accel_tvk_mode>;
 				interrupt-parent = <&gpio2>;
-				interrupts = <18 IRQ_TYPE_EDGE_RISING>,
-					     <19 IRQ_TYPE_EDGE_RISING>;
+				interrupts = <18 IRQ_TYPE_EDGE_FALLING>,
+					     <19 IRQ_TYPE_EDGE_FALLING>;
 			};
 			ak8974@0f {
 				/* Magnetometer */
@@ -216,7 +218,7 @@
 					/* Accelerometer interrupt lines 1 & 2 */
 					tvk_cfg {
 						pins = "GPIO82_C1", "GPIO83_D3";
-						ste,config = <&gpio_in_pd>;
+						ste,config = <&gpio_in_pu>;
 					};
 				};
 			};
diff --git a/arch/arm/boot/dts/ste-hrefprev60.dtsi b/arch/arm/boot/dts/ste-hrefprev60.dtsi
index b0278f4..ece222d 100644
--- a/arch/arm/boot/dts/ste-hrefprev60.dtsi
+++ b/arch/arm/boot/dts/ste-hrefprev60.dtsi
@@ -18,7 +18,7 @@
 / {
 	gpio_keys {
 		button@1 {
-			gpios = <&tc3589x_gpio 7 0x4>;
+			gpios = <&tc3589x_gpio 7 GPIO_ACTIVE_HIGH>;
 		};
 	};
 
@@ -68,12 +68,12 @@
 
 		// External Micro SD slot
 		sdi0_per1@80126000 {
-			cd-gpios  = <&tc3589x_gpio 3 0x4>;
+			cd-gpios  = <&tc3589x_gpio 3 GPIO_ACTIVE_HIGH>;
 		};
 
 		vmmci: regulator-gpio {
-			gpios = <&tc3589x_gpio 18 0x4>;
-			enable-gpio = <&tc3589x_gpio 17 0x4>;
+			gpios = <&tc3589x_gpio 18 GPIO_ACTIVE_HIGH>;
+			enable-gpio = <&tc3589x_gpio 17 GPIO_ACTIVE_HIGH>;
 		};
 
 		pinctrl {
diff --git a/arch/arm/boot/dts/ste-hrefv60plus.dtsi b/arch/arm/boot/dts/ste-hrefv60plus.dtsi
index 149a72e..45d7af3 100644
--- a/arch/arm/boot/dts/ste-hrefv60plus.dtsi
+++ b/arch/arm/boot/dts/ste-hrefv60plus.dtsi
@@ -20,12 +20,12 @@
 	soc {
 		// External Micro SD slot
 		sdi0_per1@80126000 {
-			cd-gpios  = <&gpio2 31 0x4>; // 95
+			cd-gpios  = <&gpio2 31 GPIO_ACTIVE_HIGH>; // 95
 		};
 
 		vmmci: regulator-gpio {
-			gpios = <&gpio0 5 0x4>;
-			enable-gpio = <&gpio5 9 0x4>;
+			gpios = <&gpio0 5 GPIO_ACTIVE_HIGH>;
+			enable-gpio = <&gpio5 9 GPIO_ACTIVE_HIGH>;
 		};
 
 		pinctrl {
diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
index 4a21c64..d35aa88 100644
--- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts
+++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
@@ -57,8 +57,15 @@
 				};
 			};
 		};
+		lis3lv02dl {
+			lis3lv02dl_nhk_mode: lis3lv02dl_nhk {
+				nhk_cfg1 {
+					pins = "GPIO82_C10"; // IRQ line
+					ste,input = <0>;
+				};
+			};
+		};
 	};
-
 	src@101e0000 {
 		/* These chrystal outputs are not used on this board */
 		disable-sxtalo;
@@ -86,6 +93,10 @@
 		lis3lv02dl@1d {
 			/* Accelerometer */
 			compatible = "st,lis3lv02dl-accel";
+			interrupt-parent = <&gpio2>;
+			interrupts = <18 IRQ_TYPE_EDGE_RISING>; // GPIO 82
+			pinctrl-0 = <&lis3lv02dl_nhk_mode>;
+			pinctrl-names = "default";
 			reg = <0x1d>;
 		};
 		stmpe0: stmpe2401@43 {
diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
index e2be533..d2d532a 100644
--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
@@ -748,6 +748,9 @@
 			clocks = <&uart0clk>, <&pclkuart0>;
 			clock-names = "uartclk", "apb_pclk";
 			status = "disabled";
+			dmas = <&dmac0 14 1>,
+			       <&dmac0 15 1>;
+			dma-names = "rx", "tx";
 		};
 
 		uart1: uart@101fb000 {
@@ -759,6 +762,9 @@
 			clock-names = "uartclk", "apb_pclk";
 			pinctrl-names = "default";
 			pinctrl-0 = <&uart1_default_mux>;
+			dmas = <&dmac1 22 1>,
+			       <&dmac1 23 1>;
+			dma-names = "rx", "tx";
 		};
 
 		uart2: uart@101f2000 {
@@ -769,6 +775,9 @@
 			clocks = <&uart2clk>, <&pclkuart2>;
 			clock-names = "uartclk", "apb_pclk";
 			status = "disabled";
+			dmas = <&dmac1 30 1>,
+			       <&dmac1 31 1>;
+			dma-names = "rx", "tx";
 		};
 
 		rng: rng@101b0000 {
@@ -813,5 +822,34 @@
 			pinctrl-0 = <&mmcsd_default_mux>, <&mmcsd_default_mode>;
 			vmmc-supply = <&vmmc_regulator>;
 		};
+
+		dmac0: dma-controller@10130000 {
+			compatible = "arm,pl080", "arm,primecell";
+			reg = <0x10130000 0x1000>;
+			interrupt-parent = <&vica>;
+			interrupts = <15>;
+			clocks = <&hclkdma0>;
+			clock-names = "apb_pclk";
+			lli-bus-interface-ahb1;
+			lli-bus-interface-ahb2;
+			mem-bus-interface-ahb2;
+			memcpy-burst-size = <256>;
+			memcpy-bus-width = <32>;
+			#dma-cells = <2>;
+		};
+		dmac1: dma-controller@10150000 {
+			compatible = "arm,pl080", "arm,primecell";
+			reg = <0x10150000 0x1000>;
+			interrupt-parent = <&vica>;
+			interrupts = <13>;
+			clocks = <&hclkdma1>;
+			clock-names = "apb_pclk";
+			lli-bus-interface-ahb1;
+			lli-bus-interface-ahb2;
+			mem-bus-interface-ahb2;
+			memcpy-burst-size = <256>;
+			memcpy-bus-width = <32>;
+			#dma-cells = <2>;
+		};
 	};
 };
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
index 08f8207..36e84ef 100644
--- a/arch/arm/boot/dts/ste-snowball.dts
+++ b/arch/arm/boot/dts/ste-snowball.dts
@@ -50,35 +50,35 @@
 			wakeup-source;
 			linux,code = <2>;
 			label = "userpb";
-			gpios = <&gpio1 0 0x4>;
+			gpios = <&gpio1 0 GPIO_ACTIVE_HIGH>;
 		};
 		button@2 {
 			debounce_interval = <50>;
 			wakeup-source;
 			linux,code = <3>;
 			label = "extkb1";
-			gpios = <&gpio4 23 0x4>;
+			gpios = <&gpio4 23 GPIO_ACTIVE_HIGH>;
 		};
 		button@3 {
 			debounce_interval = <50>;
 			wakeup-source;
 			linux,code = <4>;
 			label = "extkb2";
-			gpios = <&gpio4 24 0x4>;
+			gpios = <&gpio4 24 GPIO_ACTIVE_HIGH>;
 		};
 		button@4 {
 			debounce_interval = <50>;
 			wakeup-source;
 			linux,code = <5>;
 			label = "extkb3";
-			gpios = <&gpio5 1 0x4>;
+			gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
 		};
 		button@5 {
 			debounce_interval = <50>;
 			wakeup-source;
 			linux,code = <6>;
 			label = "extkb4";
-			gpios = <&gpio5 2 0x4>;
+			gpios = <&gpio5 2 GPIO_ACTIVE_HIGH>;
 		};
 	};
 
@@ -88,7 +88,7 @@
 		pinctrl-0 = <&gpioled_snowball_mode>;
 		used-led {
 			label = "user_led";
-			gpios = <&gpio4 14 0x4>;
+			gpios = <&gpio4 14 GPIO_ACTIVE_HIGH>;
 			default-state = "on";
 			linux,default-trigger = "heartbeat";
 		};
@@ -155,8 +155,8 @@
 		vmmci: regulator-gpio {
 			compatible = "regulator-gpio";
 
-			gpios = <&gpio7 4 0x4>;
-			enable-gpio = <&gpio6 25 0x4>;
+			gpios = <&gpio7 4 GPIO_ACTIVE_HIGH>;
+			enable-gpio = <&gpio6 25 GPIO_ACTIVE_HIGH>;
 
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <2900000>;
@@ -182,8 +182,7 @@
 			pinctrl-0 = <&sdi0_default_mode>;
 			pinctrl-1 = <&sdi0_sleep_mode>;
 
-			cd-gpios  = <&gpio6 26 0x4>; // 218
-			cd-inverted;
+			cd-gpios  = <&gpio6 26 GPIO_ACTIVE_LOW>; // 218
 
 			status = "okay";
 		};
diff --git a/arch/arm/boot/dts/stih407-family.dtsi b/arch/arm/boot/dts/stih407-family.dtsi
index 81f8121..ad8ba10 100644
--- a/arch/arm/boot/dts/stih407-family.dtsi
+++ b/arch/arm/boot/dts/stih407-family.dtsi
@@ -15,6 +15,36 @@
 	#address-cells = <1>;
 	#size-cells = <1>;
 
+	reserved-memory {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		gp0_reserved: rproc@40000000 {
+			compatible = "shared-dma-pool";
+			reg = <0x40000000 0x01000000>;
+			no-map;
+		};
+
+		gp1_reserved: rproc@41000000 {
+			compatible = "shared-dma-pool";
+			reg = <0x41000000 0x01000000>;
+			no-map;
+		};
+
+		audio_reserved: rproc@42000000 {
+			compatible = "shared-dma-pool";
+			reg = <0x42000000 0x01000000>;
+			no-map;
+		};
+
+		dmu_reserved: rproc@43000000 {
+			compatible = "shared-dma-pool";
+			reg = <0x43000000 0x01000000>;
+			no-map;
+		};
+	};
+
 	cpus {
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -22,15 +52,35 @@
 			device_type = "cpu";
 			compatible = "arm,cortex-a9";
 			reg = <0>;
+
 			/* u-boot puts hpen in SBC dmem at 0xa4 offset */
 			cpu-release-addr = <0x94100A4>;
+
+					 /* kHz     uV   */
+			operating-points = <1500000 0
+					    1200000 0
+					    800000  0
+					    500000  0>;
+
+			clocks = <&clk_m_a9>;
+			clock-names = "cpu";
+			clock-latency = <100000>;
+			cpu0-supply = <&pwm_regulator>;
+			st,syscfg = <&syscfg_core 0x8e0>;
 		};
 		cpu@1 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a9";
 			reg = <1>;
+
 			/* u-boot puts hpen in SBC dmem at 0xa4 offset */
 			cpu-release-addr = <0x94100A4>;
+
+					 /* kHz     uV   */
+			operating-points = <1500000 0
+					    1200000 0
+					    800000  0
+					    500000  0>;
 		};
 	};
 
@@ -534,7 +584,7 @@
 			reg = <0x8788000 0x1000>;
 			interrupts = <GIC_SPI 130 IRQ_TYPE_EDGE_RISING>;
 			clocks = <&clk_s_d3_flexgen CLK_LPC_1>;
-			st,lpc-mode = <ST_LPC_MODE_RTC>;
+			st,lpc-mode = <ST_LPC_MODE_CLKSRC>;
 		};
 
 		sata0: sata@9b20000 {
@@ -694,5 +744,79 @@
 			clocks          = <&clk_sysin>;
 			status		= "okay";
 		};
+
+		mailbox0: mailbox@8f00000  {
+			compatible	= "st,stih407-mailbox";
+			reg		= <0x8f00000 0x1000>;
+			interrupts	= <GIC_SPI 1 IRQ_TYPE_NONE>;
+			#mbox-cells	= <2>;
+			mbox-name	= "a9";
+			status		= "okay";
+		};
+
+		mailbox1: mailbox@8f01000 {
+			compatible	= "st,stih407-mailbox";
+			reg		= <0x8f01000 0x1000>;
+			#mbox-cells	= <2>;
+			mbox-name	= "st231_gp_1";
+			status		= "okay";
+		};
+
+		mailbox2: mailbox@8f02000 {
+			compatible	= "st,stih407-mailbox";
+			reg		= <0x8f02000 0x1000>;
+			#mbox-cells	= <2>;
+			mbox-name	= "st231_gp_0";
+			status		= "okay";
+		};
+
+		mailbox3: mailbox@8f03000 {
+			compatible	= "st,stih407-mailbox";
+			reg		= <0x8f03000 0x1000>;
+			#mbox-cells	= <2>;
+			mbox-name	= "st231_audio_video";
+			status		= "okay";
+		};
+
+		st231_gp0: remote-processor {
+			compatible	= "st,st231-rproc";
+			memory-region	= <&gp0_reserved>;
+			resets		= <&softreset STIH407_ST231_GP0_SOFTRESET>;
+			reset-names	= "sw_reset";
+			clocks		= <&clk_s_c0_flexgen CLK_ST231_GP_0>;
+			clock-frequency	= <600000000>;
+			st,syscfg	= <&syscfg_core 0x22c>;
+		};
+
+
+		st231_gp1: remote-processor {
+			compatible	= "st,st231-rproc";
+			memory-region	= <&gp1_reserved>;
+			resets		= <&softreset STIH407_ST231_GP1_SOFTRESET>;
+			reset-names	= "sw_reset";
+			clocks		= <&clk_s_c0_flexgen CLK_ST231_GP_1>;
+			clock-frequency = <600000000>;
+			st,syscfg	= <&syscfg_core 0x220>;
+		};
+
+		st231_audio: remote-processor {
+			compatible	= "st,st231-rproc";
+			memory-region	= <&audio_reserved>;
+			resets		= <&softreset STIH407_ST231_AUD_SOFTRESET>;
+			reset-names	= "sw_reset";
+			clocks		= <&clk_s_c0_flexgen CLK_ST231_AUD_0>;
+			clock-frequency	= <600000000>;
+			st,syscfg	= <&syscfg_core 0x228>;
+		};
+
+		st231_dmu: remote-processor {
+			compatible	= "st,st231-rproc";
+			memory-region	= <&dmu_reserved>;
+			resets		= <&softreset STIH407_ST231_DMU_SOFTRESET>;
+			reset-names	= "sw_reset";
+			clocks		= <&clk_s_c0_flexgen CLK_ST231_DMU>;
+			clock-frequency	= <600000000>;
+			st,syscfg	= <&syscfg_core 0x224>;
+		};
 	};
 };
diff --git a/arch/arm/boot/dts/sun4i-a10-a1000.dts b/arch/arm/boot/dts/sun4i-a10-a1000.dts
index 97570cb..c92a1ae 100644
--- a/arch/arm/boot/dts/sun4i-a10-a1000.dts
+++ b/arch/arm/boot/dts/sun4i-a10-a1000.dts
@@ -87,6 +87,24 @@
 		enable-active-high;
 		gpio = <&pio 7 15 GPIO_ACTIVE_HIGH>;
 	};
+
+	sound {
+		compatible = "simple-audio-card";
+		simple-audio-card,name = "On-board SPDIF";
+
+		simple-audio-card,cpu {
+			sound-dai = <&spdif>;
+		};
+
+		simple-audio-card,codec {
+			sound-dai = <&spdif_out>;
+		};
+	};
+
+	spdif_out: spdif-out {
+		#sound-dai-cells = <0>;
+		compatible = "linux,spdif-dit";
+	};
 };
 
 &ahci {
@@ -188,6 +206,12 @@
 	status = "okay";
 };
 
+&spdif {
+	pinctrl-names = "default";
+	pinctrl-0 = <&spdif_tx_pins_a>;
+	status = "okay";
+};
+
 &uart0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart0_pins_a>;
diff --git a/arch/arm/boot/dts/sun4i-a10-dserve-dsrv9703c.dts b/arch/arm/boot/dts/sun4i-a10-dserve-dsrv9703c.dts
new file mode 100644
index 0000000..893497e
--- /dev/null
+++ b/arch/arm/boot/dts/sun4i-a10-dserve-dsrv9703c.dts
@@ -0,0 +1,281 @@
+/*
+ * Copyright 2016 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun4i-a10.dtsi"
+#include "sunxi-common-regulators.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/pinctrl/sun4i-a10.h>
+#include <dt-bindings/pwm/pwm.h>
+
+/ {
+	model = "Dserve DSRV9703C";
+	compatible = "dserve,dsrv9703c", "allwinner,sun4i-a10";
+
+	aliases {
+		serial0 = &uart0;
+	};
+
+	backlight: backlight {
+		compatible = "pwm-backlight";
+		pinctrl-names = "default";
+		pinctrl-0 = <&bl_en_pin_dsrv9703c>;
+		pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
+		brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
+		default-brightness-level = <8>;
+		enable-gpios = <&pio 7 7 GPIO_ACTIVE_HIGH>; /* PH7 */
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	haptics {
+		compatible = "regulator-haptic";
+		haptic-supply = <&reg_motor>;
+		min-microvolt = <3000000>;
+		max-microvolt = <3000000>;
+	};
+
+	reg_motor: reg_motor {
+		compatible = "regulator-fixed";
+		pinctrl-names = "default";
+		pinctrl-0 = <&motor_pins>;
+		regulator-name = "vcc-motor";
+		regulator-min-microvolt = <3000000>;
+		regulator-max-microvolt = <3000000>;
+		enable-active-high;
+		gpio = <&pio 1 3 GPIO_ACTIVE_HIGH>; /* PB3 */
+	};
+};
+
+&codec {
+	pinctrl-names = "default";
+	pinctrl-0 = <&codec_pa_pin>;
+	allwinner,pa-gpios = <&pio 7 15 GPIO_ACTIVE_HIGH>; /* PH15 */
+	status = "okay";
+};
+
+&cpu0 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&ehci1 {
+	status = "okay";
+};
+
+&i2c0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c0_pins_a>;
+	status = "okay";
+
+	axp209: pmic@34 {
+		reg = <0x34>;
+		interrupts = <0>;
+	};
+};
+
+#include "axp209.dtsi"
+
+&i2c1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c1_pins_a>;
+	/* pull-ups and devices require AXP209 LDO3 */
+	status = "failed";
+};
+
+&i2c2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c2_pins_a>;
+	status = "okay";
+
+	ft5406ee8: touchscreen@38 {
+		compatible = "edt,edt-ft5406";
+		reg = <0x38>;
+		interrupt-parent = <&pio>;
+		interrupts = <7 21 IRQ_TYPE_EDGE_FALLING>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&touchscreen_pins>;
+		reset-gpios = <&pio 1 13 GPIO_ACTIVE_LOW>;
+		touchscreen-size-x = <1024>;
+		touchscreen-size-y = <768>;
+	};
+};
+
+&lradc {
+	vref-supply = <&reg_ldo2>;
+	status = "okay";
+
+	button@400 {
+		label = "Volume Down";
+		linux,code = <KEY_VOLUMEDOWN>;
+		channel = <0>;
+		voltage = <400000>;
+	};
+
+	button@800 {
+		label = "Volume Up";
+		linux,code = <KEY_VOLUMEUP>;
+		channel = <0>;
+		voltage = <800000>;
+	};
+};
+
+&mmc0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	vmmc-supply = <&reg_vcc3v3>;
+	bus-width = <4>;
+	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
+	cd-inverted;
+	status = "okay";
+};
+
+&otg_sram {
+	status = "okay";
+};
+
+&pio {
+	bl_en_pin_dsrv9703c: bl_en_pin@0 {
+		allwinner,pins = "PH7";
+		allwinner,function = "gpio_out";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+
+	codec_pa_pin: codec_pa_pin@0 {
+		allwinner,pins = "PH15";
+		allwinner,function = "gpio_out";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+
+	motor_pins: motor_pins@0 {
+		allwinner,pins = "PB3";
+		allwinner,function = "gpio_out";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+
+	touchscreen_pins: touchscreen_pins@0 {
+		allwinner,pins = "PB13";
+		allwinner,function = "gpio_out";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+
+	usb0_id_detect_pin: usb0_id_detect_pin@0 {
+		allwinner,pins = "PH4";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+	};
+
+	usb0_vbus_detect_pin: usb0_vbus_detect_pin@0 {
+		allwinner,pins = "PH5";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_PULL_DOWN>;
+	};
+};
+
+&pwm {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pwm0_pins_a>;
+	status = "okay";
+};
+
+&reg_dcdc2 {
+	regulator-always-on;
+	regulator-min-microvolt = <1000000>;
+	regulator-max-microvolt = <1400000>;
+	regulator-name = "vdd-cpu";
+};
+
+&reg_dcdc3 {
+	regulator-always-on;
+	regulator-min-microvolt = <1250000>;
+	regulator-max-microvolt = <1250000>;
+	regulator-name = "vdd-int-dll";
+};
+
+&reg_ldo1 {
+	regulator-name = "vdd-rtc";
+};
+
+&reg_ldo2 {
+	regulator-always-on;
+	regulator-min-microvolt = <3000000>;
+	regulator-max-microvolt = <3000000>;
+	regulator-name = "avcc";
+};
+
+&reg_usb0_vbus {
+	status = "okay";
+};
+
+&reg_usb2_vbus {
+	status = "okay";
+};
+
+&uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_pins_a>;
+	status = "okay";
+};
+
+&usb_otg {
+	dr_mode = "otg";
+	status = "okay";
+};
+
+&usbphy {
+	pinctrl-names = "default";
+	pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
+	usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
+	usb0_vbus_det-gpio = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */
+	usb0_vbus-supply = <&reg_usb0_vbus>;
+	usb2_vbus-supply = <&reg_usb2_vbus>;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 2c8f5e6..a03e56f 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -96,7 +96,7 @@
 			allwinner,pipeline = "de_fe0-de_be0-lcd0-tve0";
 			clocks = <&pll5 1>, <&ahb_gates 34>, <&ahb_gates 36>,
 				 <&ahb_gates 44>, <&ahb_gates 46>,
-				 <&dram_gates 25>, <&dram_gates 26>;
+				 <&dram_gates 5>, <&dram_gates 25>, <&dram_gates 26>;
 			status = "disabled";
 		};
 	};
@@ -184,6 +184,15 @@
 			clock-output-names = "osc24M";
 		};
 
+		osc3M: osc3M_clk {
+			compatible = "fixed-factor-clock";
+			#clock-cells = <0>;
+			clock-div = <8>;
+			clock-mult = <1>;
+			clocks = <&osc24M>;
+			clock-output-names = "osc3M";
+		};
+
 		osc32k: clk@0 {
 			#clock-cells = <0>;
 			compatible = "fixed-clock";
@@ -208,6 +217,23 @@
 					     "pll2-4x", "pll2-8x";
 		};
 
+		pll3: clk@01c20010 {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun4i-a10-pll3-clk";
+			reg = <0x01c20010 0x4>;
+			clocks = <&osc3M>;
+			clock-output-names = "pll3";
+		};
+
+		pll3x2: pll3x2_clk {
+			compatible = "fixed-factor-clock";
+			#clock-cells = <0>;
+			clock-div = <1>;
+			clock-mult = <2>;
+			clocks = <&pll3>;
+			clock-output-names = "pll3-2x";
+		};
+
 		pll4: clk@01c20018 {
 			#clock-cells = <0>;
 			compatible = "allwinner,sun4i-a10-pll1-clk";
@@ -232,6 +258,23 @@
 			clock-output-names = "pll6_sata", "pll6_other", "pll6";
 		};
 
+		pll7: clk@01c20030 {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun4i-a10-pll3-clk";
+			reg = <0x01c20030 0x4>;
+			clocks = <&osc3M>;
+			clock-output-names = "pll7";
+		};
+
+		pll7x2: pll7x2_clk {
+			compatible = "fixed-factor-clock";
+			#clock-cells = <0>;
+			clock-div = <1>;
+			clock-mult = <2>;
+			clocks = <&pll7>;
+			clock-output-names = "pll7-2x";
+		};
+
 		/* dummy is 200M */
 		cpu: cpu@01c20054 {
 			#clock-cells = <0>;
@@ -477,6 +520,17 @@
 			clock-output-names = "ir1";
 		};
 
+		spdif_clk: clk@01c200c0 {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun4i-a10-mod1-clk";
+			reg = <0x01c200c0 0x4>;
+			clocks = <&pll2 SUN4I_A10_PLL2_8X>,
+				 <&pll2 SUN4I_A10_PLL2_4X>,
+				 <&pll2 SUN4I_A10_PLL2_2X>,
+				 <&pll2 SUN4I_A10_PLL2_1X>;
+			clock-output-names = "spdif";
+		};
+
 		usb_clk: clk@01c200cc {
 			#clock-cells = <1>;
 			#reset-cells = <1>;
@@ -1006,6 +1060,13 @@
 				allwinner,drive = <SUN4I_PINCTRL_10_MA>;
 				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 			};
+
+			spdif_tx_pins_a: spdif@0 {
+				allwinner,pins = "PB13";
+				allwinner,function = "spdif";
+				allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+				allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+			};
 		};
 
 		timer@01c20c00 {
@@ -1034,6 +1095,19 @@
 			status = "disabled";
 		};
 
+		spdif: spdif@01c21000 {
+			#sound-dai-cells = <0>;
+			compatible = "allwinner,sun4i-a10-spdif";
+			reg = <0x01c21000 0x400>;
+			interrupts = <13>;
+			clocks = <&apb0_gates 1>, <&spdif_clk>;
+			clock-names = "apb", "spdif";
+			dmas = <&dma SUN4I_DMA_NORMAL 2>,
+			       <&dma SUN4I_DMA_NORMAL 2>;
+			dma-names = "rx", "tx";
+			status = "disabled";
+		};
+
 		ir0: ir@01c21800 {
 			compatible = "allwinner,sun4i-a10-ir";
 			clocks = <&apb0_gates 6>, <&ir0_clk>;
diff --git a/arch/arm/boot/dts/sun5i-a13-difrnce-dit4350.dts b/arch/arm/boot/dts/sun5i-a13-difrnce-dit4350.dts
new file mode 100644
index 0000000..6546fa0
--- /dev/null
+++ b/arch/arm/boot/dts/sun5i-a13-difrnce-dit4350.dts
@@ -0,0 +1,226 @@
+/*
+ * Copyright 2016 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun5i-a13.dtsi"
+#include "sunxi-common-regulators.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/pinctrl/sun4i-a10.h>
+#include <dt-bindings/pwm/pwm.h>
+
+/ {
+	model = "Difrnce DIT4350";
+	compatible = "difrnce,dit4350", "allwinner,sun5i-a13";
+
+	aliases {
+		serial0 = &uart1;
+	};
+
+	backlight: backlight {
+		compatible = "pwm-backlight";
+		pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
+		brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
+		default-brightness-level = <8>;
+		/* TODO: backlight uses axp gpio1 as enable pin */
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+};
+
+&cpu0 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&ehci0 {
+	status = "okay";
+};
+
+&i2c0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c0_pins_a>;
+	status = "okay";
+
+	axp209: pmic@34 {
+		reg = <0x34>;
+		interrupts = <0>;
+	};
+};
+
+#include "axp209.dtsi"
+
+&i2c1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c1_pins_a>;
+	status = "okay";
+
+	pcf8563: rtc@51 {
+		compatible = "nxp,pcf8563";
+		reg = <0x51>;
+	};
+};
+
+&lradc {
+	vref-supply = <&reg_ldo2>;
+	status = "okay";
+
+	button@200 {
+		label = "Volume Up";
+		linux,code = <KEY_VOLUMEUP>;
+		channel = <0>;
+		voltage = <200000>;
+	};
+
+	button@400 {
+		label = "Volume Down";
+		linux,code = <KEY_VOLUMEDOWN>;
+		channel = <0>;
+		voltage = <400000>;
+	};
+};
+
+&mmc0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_d709>;
+	vmmc-supply = <&reg_vcc3v3>;
+	bus-width = <4>;
+	cd-gpios = <&pio 6 0 GPIO_ACTIVE_HIGH>; /* PG0 */
+	cd-inverted;
+	status = "okay";
+};
+
+&otg_sram {
+	status = "okay";
+};
+
+&pio {
+	mmc0_cd_pin_d709: mmc0_cd_pin@0 {
+		allwinner,pins = "PG0";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+	};
+
+	usb0_vbus_detect_pin: usb0_vbus_detect_pin@0 {
+		allwinner,pins = "PG1";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_PULL_DOWN>;
+	};
+
+	usb0_id_detect_pin: usb0_id_detect_pin@0 {
+		allwinner,pins = "PG2";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+	};
+};
+
+&pwm {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pwm0_pins>;
+	status = "okay";
+};
+
+&reg_dcdc2 {
+	regulator-always-on;
+	regulator-min-microvolt = <1000000>;
+	regulator-max-microvolt = <1400000>;
+	regulator-name = "vdd-cpu";
+};
+
+&reg_dcdc3 {
+	regulator-always-on;
+	regulator-min-microvolt = <1250000>;
+	regulator-max-microvolt = <1250000>;
+	regulator-name = "vdd-int-pll";
+};
+
+&reg_ldo1 {
+	regulator-name = "vdd-rtc";
+};
+
+&reg_ldo2 {
+	regulator-always-on;
+	regulator-min-microvolt = <3000000>;
+	regulator-max-microvolt = <3000000>;
+	regulator-name = "avcc";
+};
+
+&reg_ldo3 {
+	regulator-min-microvolt = <3300000>;
+	regulator-max-microvolt = <3300000>;
+	regulator-name = "vcc-wifi";
+};
+
+&reg_usb0_vbus {
+	gpio = <&pio 6 12 GPIO_ACTIVE_HIGH>; /* PG12 */
+	status = "okay";
+};
+
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart1_pins_b>;
+	status = "okay";
+};
+
+&usb_otg {
+	dr_mode = "otg";
+	status = "okay";
+};
+
+&usb0_vbus_pin_a {
+	allwinner,pins = "PG12";
+};
+
+&usbphy {
+	pinctrl-names = "default";
+	pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
+	usb0_id_det-gpio = <&pio 6 2 GPIO_ACTIVE_HIGH>; /* PG2 */
+	usb0_vbus_det-gpio = <&pio 6 1 GPIO_ACTIVE_HIGH>; /* PG1 */
+	usb0_vbus-supply = <&reg_usb0_vbus>;
+	usb1_vbus-supply = <&reg_ldo3>;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun5i-a13-empire-electronix-d709.dts b/arch/arm/boot/dts/sun5i-a13-empire-electronix-d709.dts
index 7fbb0b0..6efbba6 100644
--- a/arch/arm/boot/dts/sun5i-a13-empire-electronix-d709.dts
+++ b/arch/arm/boot/dts/sun5i-a13-empire-electronix-d709.dts
@@ -123,7 +123,7 @@
 
 &mmc0 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_inet98fv2>;
+	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_d709>;
 	vmmc-supply = <&reg_vcc3v3>;
 	bus-width = <4>;
 	cd-gpios = <&pio 6 0 GPIO_ACTIVE_HIGH>; /* PG0 */
@@ -131,27 +131,12 @@
 	status = "okay";
 };
 
-&mmc2 {
-	pinctrl-names = "default";
-	pinctrl-0 = <&mmc2_pins_a>;
-	vmmc-supply = <&reg_vcc3v3>;
-	bus-width = <8>;
-	non-removable;
-	status = "okay";
-
-	mmccard: mmccard@0 {
-		reg = <0>;
-		compatible = "mmc-card";
-		broken-hpi;
-	};
-};
-
 &otg_sram {
 	status = "okay";
 };
 
 &pio {
-	mmc0_cd_pin_inet98fv2: mmc0_cd_pin@0 {
+	mmc0_cd_pin_d709: mmc0_cd_pin@0 {
 		allwinner,pins = "PG0";
 		allwinner,function = "gpio_in";
 		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
diff --git a/arch/arm/boot/dts/sun5i-a13-inet-98v-rev2.dts b/arch/arm/boot/dts/sun5i-a13-inet-98v-rev2.dts
index 6fa54b6..1b11ec9 100644
--- a/arch/arm/boot/dts/sun5i-a13-inet-98v-rev2.dts
+++ b/arch/arm/boot/dts/sun5i-a13-inet-98v-rev2.dts
@@ -123,21 +123,6 @@
 	status = "okay";
 };
 
-&mmc2 {
-	pinctrl-names = "default";
-	pinctrl-0 = <&mmc2_pins_a>;
-	vmmc-supply = <&reg_vcc3v3>;
-	bus-width = <8>;
-	non-removable;
-	status = "okay";
-
-	mmccard: mmccard@0 {
-		reg = <0>;
-		compatible = "mmc-card";
-		broken-hpi;
-	};
-};
-
 &otg_sram {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
index ad84fe4..081329e 100644
--- a/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
@@ -109,6 +109,10 @@
 	status = "okay";
 };
 
+&otg_sram {
+	status = "okay";
+};
+
 &pio {
 	mmc0_cd_pin_olinuxinom: mmc0_cd_pin@0 {
 		allwinner,pins = "PG0";
@@ -124,6 +128,27 @@
 		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 	};
 
+	usb0_id_detect_pin: usb0_id_detect_pin@0 {
+		allwinner,pins = "PG2";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+	};
+
+	usb0_vbus_detect_pin: usb0_vbus_detect_pin@0 {
+		allwinner,pins = "PG1";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_PULL_DOWN>;
+	};
+
+	usb0_vbus_pin_olinuxinom: usb0_vbus_pin@0 {
+		allwinner,pins = "PG12";
+		allwinner,function = "gpio_out";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+
 	usb1_vbus_pin_olinuxinom: usb1_vbus_pin@0 {
 		allwinner,pins = "PG11";
 		allwinner,function = "gpio_out";
@@ -132,6 +157,12 @@
 	};
 };
 
+&reg_usb0_vbus {
+	pinctrl-0 = <&usb0_vbus_pin_olinuxinom>;
+	gpio = <&pio 6 12 GPIO_ACTIVE_HIGH>;
+	status = "okay";
+};
+
 &reg_usb1_vbus {
 	pinctrl-0 = <&usb1_vbus_pin_olinuxinom>;
 	gpio = <&pio 6 11 GPIO_ACTIVE_HIGH>;
@@ -144,7 +175,17 @@
 	status = "okay";
 };
 
+&usb_otg {
+	dr_mode = "otg";
+	status = "okay";
+};
+
 &usbphy {
+	pinctrl-names = "default";
+	pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
+	usb0_id_det-gpio = <&pio 6 2 GPIO_ACTIVE_HIGH>; /* PG2 */
+	usb0_vbus_det-gpio = <&pio 6 1 GPIO_ACTIVE_HIGH>; /* PG1 */
+	usb0_vbus-supply = <&reg_usb0_vbus>;
 	usb1_vbus-supply = <&reg_usb1_vbus>;
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index d910d3a..263d46d 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -61,7 +61,8 @@
 			compatible = "allwinner,simple-framebuffer",
 				     "simple-framebuffer";
 			allwinner,pipeline = "de_be0-lcd0";
-			clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 44>;
+			clocks = <&ahb_gates 36>, <&ahb_gates 44>, <&de_be_clk>,
+				 <&tcon_ch0_clk>, <&dram_gates 26>;
 			status = "disabled";
 		};
 	};
@@ -110,8 +111,8 @@
 					<10>, <13>,
 					<14>, <20>,
 					<21>, <22>,
-				        <28>, <32>, <36>,
-				        <40>, <44>,
+					<28>, <32>, <34>,
+					<36>, <40>, <44>,
 					<46>, <51>,
 					<52>;
 			clock-output-names = "ahb_usbotg", "ahb_ehci",
@@ -120,8 +121,8 @@
 					     "ahb_mmc2", "ahb_nand",
 					     "ahb_sdram", "ahb_spi0",
 					     "ahb_spi1", "ahb_spi2",
-					     "ahb_stimer", "ahb_ve", "ahb_lcd",
-					     "ahb_csi", "ahb_de_be",
+					     "ahb_stimer", "ahb_ve", "ahb_tve",
+					     "ahb_lcd", "ahb_csi", "ahb_de_be",
 					     "ahb_de_fe", "ahb_iep",
 					     "ahb_mali400";
 		};
@@ -149,6 +150,61 @@
 					     "apb1_i2c2", "apb1_uart1",
 					     "apb1_uart3";
 		};
+
+		dram_gates: clk@01c20100 {
+			#clock-cells = <1>;
+			compatible = "allwinner,sun5i-a13-dram-gates-clk",
+				     "allwinner,sun4i-a10-gates-clk";
+			reg = <0x01c20100 0x4>;
+			clocks = <&pll5 0>;
+			clock-indices = <0>,
+					<1>,
+					<25>,
+					<26>,
+					<29>,
+					<31>;
+			clock-output-names = "dram_ve",
+					     "dram_csi",
+					     "dram_de_fe",
+					     "dram_de_be",
+					     "dram_ace",
+					     "dram_iep";
+		};
+
+		de_be_clk: clk@01c20104 {
+			#clock-cells = <0>;
+			#reset-cells = <0>;
+			compatible = "allwinner,sun4i-a10-display-clk";
+			reg = <0x01c20104 0x4>;
+			clocks = <&pll3>, <&pll7>, <&pll5 1>;
+			clock-output-names = "de-be";
+		};
+
+		de_fe_clk: clk@01c2010c {
+			#clock-cells = <0>;
+			#reset-cells = <0>;
+			compatible = "allwinner,sun4i-a10-display-clk";
+			reg = <0x01c2010c 0x4>;
+			clocks = <&pll3>, <&pll7>, <&pll5 1>;
+			clock-output-names = "de-fe";
+		};
+
+		tcon_ch0_clk: clk@01c20118 {
+			#clock-cells = <0>;
+			#reset-cells = <1>;
+			compatible = "allwinner,sun4i-a10-tcon-ch0-clk";
+			reg = <0x01c20118 0x4>;
+			clocks = <&pll3>, <&pll7>, <&pll3x2>, <&pll7x2>;
+			clock-output-names = "tcon-ch0-sclk";
+		};
+
+		tcon_ch1_clk: clk@01c2012c {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun4i-a10-tcon-ch1-clk";
+			reg = <0x01c2012c 0x4>;
+			clocks = <&pll3>, <&pll7>, <&pll3x2>, <&pll7x2>;
+			clock-output-names = "tcon-ch1-sclk";
+		};
 	};
 
 	soc@01c00000 {
diff --git a/arch/arm/boot/dts/sun5i-r8-chip.dts b/arch/arm/boot/dts/sun5i-r8-chip.dts
index f6898c6..a8d8b45 100644
--- a/arch/arm/boot/dts/sun5i-r8-chip.dts
+++ b/arch/arm/boot/dts/sun5i-r8-chip.dts
@@ -66,6 +66,10 @@
 	};
 };
 
+&be0 {
+	status = "okay";
+};
+
 &codec {
 	status = "okay";
 };
@@ -188,6 +192,14 @@
 	status = "okay";
 };
 
+&tcon0 {
+	status = "okay";
+};
+
+&tve0 {
+	status = "okay";
+};
+
 &uart1 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart1_pins_b>;
diff --git a/arch/arm/boot/dts/sun5i-r8.dtsi b/arch/arm/boot/dts/sun5i-r8.dtsi
index 0ef8656..c04cf69 100644
--- a/arch/arm/boot/dts/sun5i-r8.dtsi
+++ b/arch/arm/boot/dts/sun5i-r8.dtsi
@@ -51,9 +51,147 @@
 			compatible = "allwinner,simple-framebuffer",
 				     "simple-framebuffer";
 			allwinner,pipeline = "de_be0-lcd0-tve0";
-			clocks = <&pll5 1>, <&ahb_gates 34>, <&ahb_gates 36>,
-				 <&ahb_gates 44>;
+			clocks = <&ahb_gates 34>, <&ahb_gates 36>,
+				 <&ahb_gates 44>, <&de_be_clk>,
+				 <&tcon_ch1_clk>, <&dram_gates 26>;
 			status = "disabled";
 		};
 	};
+
+	soc@01c00000 {
+		tve0: tv-encoder@01c0a000 {
+			compatible = "allwinner,sun4i-a10-tv-encoder";
+			reg = <0x01c0a000 0x1000>;
+			clocks = <&ahb_gates 34>;
+			resets = <&tcon_ch0_clk 0>;
+			status = "disabled";
+
+			port {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				tve0_in_tcon0: endpoint@0 {
+					reg = <0>;
+					remote-endpoint = <&tcon0_out_tve0>;
+				};
+			};
+		};
+
+		tcon0: lcd-controller@01c0c000 {
+			compatible = "allwinner,sun5i-a13-tcon";
+			reg = <0x01c0c000 0x1000>;
+			interrupts = <44>;
+			resets = <&tcon_ch0_clk 1>;
+			reset-names = "lcd";
+			clocks = <&ahb_gates 36>,
+				 <&tcon_ch0_clk>,
+				 <&tcon_ch1_clk>;
+			clock-names = "ahb",
+				      "tcon-ch0",
+				      "tcon-ch1";
+			clock-output-names = "tcon-pixel-clock";
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				tcon0_in: port@0 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					reg = <0>;
+
+					tcon0_in_be0: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&be0_out_tcon0>;
+					};
+				};
+
+				tcon0_out: port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					reg = <1>;
+
+					tcon0_out_tve0: endpoint@1 {
+						reg = <1>;
+						remote-endpoint = <&tve0_in_tcon0>;
+					};
+				};
+			};
+		};
+
+		fe0: display-frontend@01e00000 {
+			compatible = "allwinner,sun5i-a13-display-frontend";
+			reg = <0x01e00000 0x20000>;
+			interrupts = <47>;
+			clocks = <&ahb_gates 46>, <&de_fe_clk>,
+				 <&dram_gates 25>;
+			clock-names = "ahb", "mod",
+				      "ram";
+			resets = <&de_fe_clk>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				fe0_out: port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					reg = <1>;
+
+					fe0_out_be0: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&be0_in_fe0>;
+					};
+				};
+			};
+		};
+
+		be0: display-backend@01e60000 {
+			compatible = "allwinner,sun5i-a13-display-backend";
+			reg = <0x01e60000 0x10000>;
+			clocks = <&ahb_gates 44>, <&de_be_clk>,
+				 <&dram_gates 26>;
+			clock-names = "ahb", "mod",
+				      "ram";
+			resets = <&de_be_clk>;
+			status = "disabled";
+
+			assigned-clocks = <&de_be_clk>;
+			assigned-clock-rates = <300000000>;
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				be0_in: port@0 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					reg = <0>;
+
+					be0_in_fe0: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&fe0_out_be0>;
+					};
+				};
+
+				be0_out: port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					reg = <1>;
+
+					be0_out_tcon0: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&tcon0_in_be0>;
+					};
+				};
+			};
+		};
+	};
+
+	display-engine {
+		compatible = "allwinner,sun5i-a13-display-engine";
+		allwinner,pipelines = <&fe0>;
+	};
 };
diff --git a/arch/arm/boot/dts/sun5i.dtsi b/arch/arm/boot/dts/sun5i.dtsi
index 59a9426..0840612 100644
--- a/arch/arm/boot/dts/sun5i.dtsi
+++ b/arch/arm/boot/dts/sun5i.dtsi
@@ -88,6 +88,15 @@
 			clock-output-names = "osc24M";
 		};
 
+		osc3M: osc3M_clk {
+			compatible = "fixed-factor-clock";
+			#clock-cells = <0>;
+			clock-div = <8>;
+			clock-mult = <1>;
+			clocks = <&osc24M>;
+			clock-output-names = "osc3M";
+		};
+
 		osc32k: clk@0 {
 			#clock-cells = <0>;
 			compatible = "fixed-clock";
@@ -112,6 +121,23 @@
 					     "pll2-4x", "pll2-8x";
 		};
 
+		pll3: clk@01c20010 {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun4i-a10-pll3-clk";
+			reg = <0x01c20010 0x4>;
+			clocks = <&osc3M>;
+			clock-output-names = "pll3";
+		};
+
+		pll3x2: pll3x2_clk {
+			compatible = "fixed-factor-clock";
+			#clock-cells = <0>;
+			clock-div = <1>;
+			clock-mult = <2>;
+			clocks = <&pll3>;
+			clock-output-names = "pll3-2x";
+		};
+
 		pll4: clk@01c20018 {
 			#clock-cells = <0>;
 			compatible = "allwinner,sun4i-a10-pll1-clk";
@@ -136,6 +162,23 @@
 			clock-output-names = "pll6_sata", "pll6_other", "pll6";
 		};
 
+		pll7: clk@01c20030 {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun4i-a10-pll3-clk";
+			reg = <0x01c20030 0x4>;
+			clocks = <&osc3M>;
+			clock-output-names = "pll7";
+		};
+
+		pll7x2: pll7x2_clk {
+			compatible = "fixed-factor-clock";
+			#clock-cells = <0>;
+			clock-div = <1>;
+			clock-mult = <2>;
+			clocks = <&pll7>;
+			clock-output-names = "pll7-2x";
+		};
+
 		/* dummy is 200M */
 		cpu: cpu@01c20054 {
 			#clock-cells = <0>;
diff --git a/arch/arm/boot/dts/sun6i-a31s-colorfly-e708-q1.dts b/arch/arm/boot/dts/sun6i-a31s-colorfly-e708-q1.dts
new file mode 100644
index 0000000..e182eec
--- /dev/null
+++ b/arch/arm/boot/dts/sun6i-a31s-colorfly-e708-q1.dts
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2016 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun6i-a31s.dtsi"
+#include "sunxi-common-regulators.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/sun4i-a10.h>
+
+/ {
+	model = "Colorfly E708 Q1 tablet";
+	compatible = "colorfly,e708-q1", "allwinner,sun6i-a31s";
+
+	aliases {
+		serial0 = &uart0;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+};
+
+&cpu0 {
+	cpu-supply = <&reg_dcdc3>;
+};
+
+&ehci0 {
+	/* rtl8188etv wifi is connected here */
+	status = "okay";
+};
+
+&lradc {
+	vref-supply = <&reg_aldo3>;
+	status = "okay";
+
+	button@1000 {
+		label = "Home";
+		linux,code = <KEY_HOMEPAGE>;
+		channel = <0>;
+		voltage = <1000000>;
+	};
+};
+
+&mmc0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_e708_q1>;
+	vmmc-supply = <&reg_dcdc1>;
+	bus-width = <4>;
+	cd-gpios = <&pio 0 8 GPIO_ACTIVE_HIGH>; /* PA8 */
+	cd-inverted;
+	status = "okay";
+};
+
+&pio {
+	mma8452_int_e708_q1: mma8452_int_pin@0 {
+		allwinner,pins = "PA9";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+	};
+
+	mmc0_cd_pin_e708_q1: mmc0_cd_pin@0 {
+		allwinner,pins = "PA8";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+	};
+};
+
+&p2wi {
+	status = "okay";
+
+	axp22x: pmic@68 {
+		compatible = "x-powers,axp221";
+		reg = <0x68>;
+		interrupt-parent = <&nmi_intc>;
+		interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+	};
+};
+
+#include "axp22x.dtsi"
+
+&reg_aldo3 {
+	regulator-always-on;
+	regulator-min-microvolt = <2700000>;
+	regulator-max-microvolt = <3300000>;
+	regulator-name = "avcc";
+};
+
+&reg_dc1sw {
+	regulator-name = "vcc-lcd";
+};
+
+&reg_dc5ldo {
+	regulator-always-on;
+	regulator-min-microvolt = <700000>;
+	regulator-max-microvolt = <1320000>;
+	regulator-name = "vdd-cpus"; /* This is an educated guess */
+};
+
+&reg_dcdc1 {
+	regulator-always-on;
+	regulator-min-microvolt = <3000000>;
+	regulator-max-microvolt = <3000000>;
+	regulator-name = "vcc-3v0";
+};
+
+&reg_dcdc2 {
+	regulator-min-microvolt = <700000>;
+	regulator-max-microvolt = <1320000>;
+	regulator-name = "vdd-gpu";
+};
+
+&reg_dcdc3 {
+	regulator-always-on;
+	regulator-min-microvolt = <700000>;
+	regulator-max-microvolt = <1320000>;
+	regulator-name = "vdd-cpu";
+};
+
+&reg_dcdc4 {
+	regulator-always-on;
+	regulator-min-microvolt = <700000>;
+	regulator-max-microvolt = <1320000>;
+	regulator-name = "vdd-sys-dll";
+};
+
+&reg_dcdc5 {
+	regulator-always-on;
+	regulator-min-microvolt = <1500000>;
+	regulator-max-microvolt = <1500000>;
+	regulator-name = "vcc-dram";
+};
+
+&reg_dldo1 {
+	regulator-min-microvolt = <3300000>;
+	regulator-max-microvolt = <3300000>;
+	regulator-name = "vcc-wifi";
+};
+
+&reg_dldo2 {
+	regulator-min-microvolt = <1800000>;
+	regulator-max-microvolt = <1800000>;
+	regulator-name = "vcc-pg";
+};
+
+&simplefb_lcd {
+	vcc-lcd-supply = <&reg_dc1sw>;
+	vcc-pg-supply = <&reg_dldo2>;
+};
+
+/*
+ * FIXME for now we only support host mode and rely on u-boot to have
+ * turned on Vbus which is controlled by the axp221 pmic on the board.
+ *
+ * Once we have axp221 power-supply and vbus-usb support we should switch
+ * to fully supporting otg.
+ */
+&usb_otg {
+	dr_mode = "host";
+	status = "okay";
+};
+
+&usbphy {
+	usb1_vbus-supply = <&reg_dldo1>;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun7i-a20-cubietruck.dts b/arch/arm/boot/dts/sun7i-a20-cubietruck.dts
index 8da939a..83f39b0 100644
--- a/arch/arm/boot/dts/sun7i-a20-cubietruck.dts
+++ b/arch/arm/boot/dts/sun7i-a20-cubietruck.dts
@@ -94,6 +94,24 @@
 		pinctrl-0 = <&mmc3_pwrseq_pin_cubietruck>;
 		reset-gpios = <&pio 7 9 GPIO_ACTIVE_LOW>; /* PH9 WIFI_EN */
 	};
+
+	sound {
+		compatible = "simple-audio-card";
+		simple-audio-card,name = "On-board SPDIF";
+
+		simple-audio-card,cpu {
+			sound-dai = <&spdif>;
+		};
+
+		simple-audio-card,codec {
+			sound-dai = <&spdif_out>;
+		};
+	};
+
+	spdif_out: spdif-out {
+		#sound-dai-cells = <0>;
+		compatible = "linux,spdif-dit";
+	};
 };
 
 &ahci {
@@ -301,6 +319,12 @@
 	status = "okay";
 };
 
+&spdif {
+	pinctrl-names = "default";
+	pinctrl-0 = <&spdif_tx_pins_a>;
+	status = "okay";
+};
+
 &uart0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart0_pins_a>;
diff --git a/arch/arm/boot/dts/sun7i-a20-itead-ibox.dts b/arch/arm/boot/dts/sun7i-a20-itead-ibox.dts
index 661c21d..10d48cb 100644
--- a/arch/arm/boot/dts/sun7i-a20-itead-ibox.dts
+++ b/arch/arm/boot/dts/sun7i-a20-itead-ibox.dts
@@ -65,6 +65,24 @@
 			default-state = "on";
 		};
 	};
+
+	sound {
+		compatible = "simple-audio-card";
+		simple-audio-card,name = "On-board SPDIF";
+
+		simple-audio-card,cpu {
+			sound-dai = <&spdif>;
+		};
+
+		simple-audio-card,codec {
+			sound-dai = <&spdif_out>;
+		};
+	};
+
+	spdif_out: spdif-out {
+		#sound-dai-cells = <0>;
+		compatible = "linux,spdif-dit";
+	};
 };
 
 &ahci {
@@ -123,3 +141,9 @@
 &reg_ahci_5v {
 	status = "okay";
 };
+
+&spdif {
+	pinctrl-names = "default";
+	pinctrl-0 = <&spdif_tx_pins_a>;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
new file mode 100644
index 0000000..5ea4915
--- /dev/null
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
@@ -0,0 +1,82 @@
+ /*
+ * Copyright 2015 - Ultimaker B.V.
+ * Author Olliver Schinagl <oliver@schinagl.nl>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "sun7i-a20-olinuxino-lime2.dts"
+
+/ {
+	model = "Olimex A20-OLinuXino-LIME2-eMMC";
+	compatible = "olimex,a20-olinuxino-lime2-emmc", "allwinner,sun7i-a20";
+
+	mmc2_pwrseq: pwrseq {
+		pinctrl-0 = <&mmc2_pins_nrst>;
+		pinctrl-names = "default";
+		compatible = "mmc-pwrseq-emmc";
+		reset-gpios = <&pio 2 16 GPIO_ACTIVE_LOW>;
+	};
+};
+
+&pio {
+	mmc2_pins_nrst: mmc2@0 {
+		allwinner,pins = "PC16";
+		allwinner,function = "gpio_out";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+};
+
+&mmc2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc2_pins_a>;
+	vmmc-supply = <&reg_vcc3v3>;
+	vqmmc-supply = <&reg_vcc3v3>;
+	bus-width = <4>;
+	non-removable;
+	mmc-pwrseq = <&mmc2_pwrseq>;
+	status = "okay";
+
+	emmc: emmc@0 {
+		reg = <0>;
+		compatible = "mmc-card";
+		broken-hpi;
+	};
+};
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 0940a78..febdf4c 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -85,8 +85,9 @@
 			compatible = "allwinner,simple-framebuffer",
 				     "simple-framebuffer";
 			allwinner,pipeline = "de_be0-lcd0-tve0";
-			clocks = <&pll5 1>, <&ahb_gates 34>, <&ahb_gates 36>,
-				 <&ahb_gates 44>, <&dram_gates 26>;
+			clocks = <&pll5 1>,
+				 <&ahb_gates 34>, <&ahb_gates 36>, <&ahb_gates 44>,
+				 <&dram_gates 5>, <&dram_gates 26>;
 			status = "disabled";
 		};
 	};
@@ -186,6 +187,15 @@
 			clock-output-names = "osc24M";
 		};
 
+		osc3M: osc3M_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-factor-clock";
+			clock-div = <8>;
+			clock-mult = <1>;
+			clocks = <&osc24M>;
+			clock-output-names = "osc3M";
+		};
+
 		osc32k: clk@0 {
 			#clock-cells = <0>;
 			compatible = "fixed-clock";
@@ -210,6 +220,22 @@
 					     "pll2-4x", "pll2-8x";
 		};
 
+		pll3: clk@01c20010 {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun4i-a10-pll3-clk";
+			reg = <0x01c20010 0x4>;
+			clocks = <&osc3M>;
+			clock-output-names = "pll3";
+		};
+
+		pll3x2: pll3x2_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-factor-clock";
+			clock-div = <1>;
+			clock-mult = <2>;
+			clock-output-names = "pll3-2x";
+		};
+
 		pll4: clk@01c20018 {
 			#clock-cells = <0>;
 			compatible = "allwinner,sun7i-a20-pll4-clk";
@@ -235,6 +261,22 @@
 					     "pll6_div_4";
 		};
 
+		pll7: clk@01c20030 {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun4i-a10-pll3-clk";
+			reg = <0x01c20030 0x4>;
+			clocks = <&osc3M>;
+			clock-output-names = "pll7";
+		};
+
+		pll7x2: pll7x2_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-factor-clock";
+			clock-div = <1>;
+			clock-mult = <2>;
+			clock-output-names = "pll7-2x";
+		};
+
 		pll8: clk@01c20040 {
 			#clock-cells = <0>;
 			compatible = "allwinner,sun7i-a20-pll4-clk";
@@ -476,6 +518,17 @@
 			clock-output-names = "ir1";
 		};
 
+		spdif_clk: clk@01c200c0 {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun4i-a10-mod1-clk";
+			reg = <0x01c200c0 0x4>;
+			clocks = <&pll2 SUN4I_A10_PLL2_8X>,
+				 <&pll2 SUN4I_A10_PLL2_4X>,
+				 <&pll2 SUN4I_A10_PLL2_2X>,
+				 <&pll2 SUN4I_A10_PLL2_1X>;
+			clock-output-names = "spdif";
+		};
+
 		keypad_clk: clk@01c200c4 {
 			#clock-cells = <0>;
 			compatible = "allwinner,sun4i-a10-mod0-clk";
@@ -1193,6 +1246,13 @@
 				allwinner,drive = <SUN4I_PINCTRL_10_MA>;
 				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 			};
+
+			spdif_tx_pins_a: spdif@0 {
+				allwinner,pins = "PB13";
+				allwinner,function = "spdif";
+				allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+				allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+			};
 		};
 
 		timer@01c20c00 {
@@ -1226,6 +1286,19 @@
 			status = "disabled";
 		};
 
+		spdif: spdif@01c21000 {
+			#sound-dai-cells = <0>;
+			compatible = "allwinner,sun4i-a10-spdif";
+			reg = <0x01c21000 0x400>;
+			interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&apb0_gates 1>, <&spdif_clk>;
+			clock-names = "apb", "spdif";
+			dmas = <&dma SUN4I_DMA_NORMAL 2>,
+			       <&dma SUN4I_DMA_NORMAL 2>;
+			dma-names = "rx", "tx";
+			status = "disabled";
+		};
+
 		ir0: ir@01c21800 {
 			compatible = "allwinner,sun4i-a10-ir";
 			clocks = <&apb0_gates 6>, <&ir0_clk>;
diff --git a/arch/arm/boot/dts/sun8i-a23-gt90h-v4.dts b/arch/arm/boot/dts/sun8i-a23-gt90h-v4.dts
index 1aeb06c..b2ce284 100644
--- a/arch/arm/boot/dts/sun8i-a23-gt90h-v4.dts
+++ b/arch/arm/boot/dts/sun8i-a23-gt90h-v4.dts
@@ -47,15 +47,26 @@
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/input/input.h>
 #include <dt-bindings/pinctrl/sun4i-a10.h>
+#include <dt-bindings/pwm/pwm.h>
 
 / {
-	model = "Allwinner GT90H Quad Core Tablet (v4)";
-	compatible = "allwinner,gt90h-v4", "allwinner,sun8i-a33";
+	model = "Allwinner GT90H Dual Core Tablet (v4)";
+	compatible = "allwinner,gt90h-v4", "allwinner,sun8i-a23";
 
 	aliases {
 		serial0 = &r_uart;
 	};
 
+	backlight: backlight {
+		compatible = "pwm-backlight";
+		pinctrl-names = "default";
+		pinctrl-0 = <&bl_en_pin_gt90h>;
+		pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
+		brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
+		default-brightness-level = <8>;
+		enable-gpios = <&pio 7 6 GPIO_ACTIVE_HIGH>; /* PH6 */
+	};
+
 	chosen {
 		stdout-path = "serial0:115200n8";
 	};
@@ -106,8 +117,7 @@
 &mmc0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_gt90h>;
-	/* FIXME this really is aldo1, correct once we've pmic support */
-	vmmc-supply = <&reg_vcc3v0>;
+	vmmc-supply = <&reg_aldo1>;
 	bus-width = <4>;
 	cd-gpios = <&pio 1 4 GPIO_ACTIVE_HIGH>; /* PB4 */
 	cd-inverted;
@@ -115,6 +125,13 @@
 };
 
 &pio {
+	bl_en_pin_gt90h: bl_en_pin@0 {
+		allwinner,pins = "PH6";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+
 	mmc0_cd_pin_gt90h: mmc0_cd_pin@0 {
 		allwinner,pins = "PB4";
 		allwinner,function = "gpio_in";
@@ -123,12 +140,106 @@
 	};
 };
 
+&pwm {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pwm0_pins>;
+	status = "okay";
+};
+
+&r_rsb {
+	status = "okay";
+
+	axp22x: pmic@3a3 {
+		compatible = "x-powers,axp223";
+		reg = <0x3a3>;
+		interrupt-parent = <&nmi_intc>;
+		interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+		eldoin-supply = <&reg_dcdc1>;
+	};
+};
+
 &r_uart {
 	pinctrl-names = "default";
 	pinctrl-0 = <&r_uart_pins_a>;
 	status = "okay";
 };
 
+#include "axp22x.dtsi"
+
+&reg_aldo1 {
+	regulator-always-on;
+	regulator-min-microvolt = <3000000>;
+	regulator-max-microvolt = <3000000>;
+	regulator-name = "vcc-io";
+};
+
+&reg_aldo2 {
+	regulator-always-on;
+	regulator-min-microvolt = <2350000>;
+	regulator-max-microvolt = <2650000>;
+	regulator-name = "vdd-dll";
+};
+
+&reg_aldo3 {
+	regulator-always-on;
+	regulator-min-microvolt = <2700000>;
+	regulator-max-microvolt = <3300000>;
+	regulator-name = "vcc-pll-avcc";
+};
+
+&reg_dc1sw {
+	regulator-name = "vcc-lcd";
+};
+
+&reg_dc5ldo {
+	regulator-always-on;
+	regulator-min-microvolt = <900000>;
+	regulator-max-microvolt = <1400000>;
+	regulator-name = "vdd-cpus";
+};
+
+&reg_dcdc1 {
+	regulator-always-on;
+	regulator-min-microvolt = <3000000>;
+	regulator-max-microvolt = <3000000>;
+	regulator-name = "vcc-3v0";
+};
+
+&reg_dcdc2 {
+	regulator-always-on;
+	regulator-min-microvolt = <900000>;
+	regulator-max-microvolt = <1400000>;
+	regulator-name = "vdd-sys";
+};
+
+&reg_dcdc3 {
+	regulator-always-on;
+	regulator-min-microvolt = <900000>;
+	regulator-max-microvolt = <1400000>;
+	regulator-name = "vdd-cpu";
+};
+
+&reg_dcdc5 {
+	regulator-always-on;
+	regulator-min-microvolt = <1500000>;
+	regulator-max-microvolt = <1500000>;
+	regulator-name = "vcc-dram";
+};
+
+&reg_dldo1 {
+	regulator-min-microvolt = <3300000>;
+	regulator-max-microvolt = <3300000>;
+	regulator-name = "vcc-wifi";
+};
+
+&reg_rtc_ldo {
+	regulator-name = "vcc-rtc";
+};
+
+&simplefb_lcd {
+	vcc-lcd-supply = <&reg_dc1sw>;
+};
+
 /*
  * FIXME for now we only support host mode and rely on u-boot to have
  * turned on Vbus which is controlled by the axp223 pmic on the board.
@@ -141,5 +252,6 @@
 };
 
 &usbphy {
+	usb1_vbus-supply = <&reg_dldo1>;
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/sun8i-a23-polaroid-mid2809pxe04.dts b/arch/arm/boot/dts/sun8i-a23-polaroid-mid2809pxe04.dts
new file mode 100644
index 0000000..cb5daaf
--- /dev/null
+++ b/arch/arm/boot/dts/sun8i-a23-polaroid-mid2809pxe04.dts
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2016 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun8i-a23.dtsi"
+#include "sunxi-common-regulators.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/sun4i-a10.h>
+#include <dt-bindings/pwm/pwm.h>
+
+/ {
+	model = "Polaroid MID2809PXE04 tablet";
+	compatible = "polaroid,mid2809pxe04", "allwinner,sun8i-a23";
+
+	aliases {
+		serial0 = &r_uart;
+	};
+
+	backlight: backlight {
+		compatible = "pwm-backlight";
+		pinctrl-names = "default";
+		pinctrl-0 = <&bl_en_pin_mid2809>;
+		pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
+		brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
+		default-brightness-level = <8>;
+		enable-gpios = <&pio 7 6 GPIO_ACTIVE_HIGH>; /* PH6 */
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+};
+
+&ehci0 {
+	status = "okay";
+};
+
+&i2c0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c0_pins_a>;
+	status = "okay";
+};
+
+&i2c1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c1_pins_a>;
+	status = "okay";
+};
+
+&lradc {
+	vref-supply = <&reg_vcc3v0>;
+	status = "okay";
+
+	button@200 {
+		label = "Volume Up";
+		linux,code = <KEY_VOLUMEUP>;
+		channel = <0>;
+		voltage = <200000>;
+	};
+
+	button@400 {
+		label = "Volume Down";
+		linux,code = <KEY_VOLUMEDOWN>;
+		channel = <0>;
+		voltage = <400000>;
+	};
+};
+
+&mmc0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_mid2809>;
+	vmmc-supply = <&reg_dcdc1>;
+	bus-width = <4>;
+	cd-gpios = <&pio 1 4 GPIO_ACTIVE_HIGH>; /* PB4 */
+	cd-inverted;
+	status = "okay";
+};
+
+&pio {
+	bl_en_pin_mid2809: bl_en_pin@0 {
+		allwinner,pins = "PH6";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+
+	mmc0_cd_pin_mid2809: mmc0_cd_pin@0 {
+		allwinner,pins = "PB4";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+	};
+};
+
+&pwm {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pwm0_pins>;
+	status = "okay";
+};
+
+&r_rsb {
+	status = "okay";
+
+	axp22x: pmic@3a3 {
+		compatible = "x-powers,axp223";
+		reg = <0x3a3>;
+		interrupt-parent = <&nmi_intc>;
+		interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+		eldoin-supply = <&reg_dcdc1>;
+	};
+};
+
+&r_uart {
+	pinctrl-names = "default";
+	pinctrl-0 = <&r_uart_pins_a>;
+	status = "okay";
+};
+
+#include "axp22x.dtsi"
+
+&reg_aldo1 {
+	regulator-always-on;
+	regulator-min-microvolt = <3000000>;
+	regulator-max-microvolt = <3000000>;
+	regulator-name = "vcc-io";
+};
+
+&reg_aldo2 {
+	regulator-always-on;
+	regulator-min-microvolt = <2350000>;
+	regulator-max-microvolt = <2650000>;
+	regulator-name = "vdd-dll";
+};
+
+&reg_aldo3 {
+	regulator-always-on;
+	regulator-min-microvolt = <2700000>;
+	regulator-max-microvolt = <3300000>;
+	regulator-name = "vcc-pll-avcc";
+};
+
+&reg_dc1sw {
+	regulator-name = "vcc-lcd";
+};
+
+&reg_dc5ldo {
+	regulator-always-on;
+	regulator-min-microvolt = <900000>;
+	regulator-max-microvolt = <1400000>;
+	regulator-name = "vdd-cpus";
+};
+
+&reg_dcdc1 {
+	regulator-always-on;
+	regulator-min-microvolt = <3000000>;
+	regulator-max-microvolt = <3000000>;
+	regulator-name = "vcc-3v0";
+};
+
+&reg_dcdc2 {
+	regulator-always-on;
+	regulator-min-microvolt = <900000>;
+	regulator-max-microvolt = <1400000>;
+	regulator-name = "vdd-sys";
+};
+
+&reg_dcdc3 {
+	regulator-always-on;
+	regulator-min-microvolt = <900000>;
+	regulator-max-microvolt = <1400000>;
+	regulator-name = "vdd-cpu";
+};
+
+&reg_dcdc5 {
+	regulator-always-on;
+	regulator-min-microvolt = <1500000>;
+	regulator-max-microvolt = <1500000>;
+	regulator-name = "vcc-dram";
+};
+
+&reg_rtc_ldo {
+	regulator-name = "vcc-rtc";
+};
+
+&simplefb_lcd {
+	vcc-lcd-supply = <&reg_dc1sw>;
+};
+
+/*
+ * FIXME for now we only support host mode and rely on u-boot to have
+ * turned on Vbus which is controlled by the axp223 pmic on the board.
+ *
+ * Once we have axp223 support we should switch to fully supporting otg.
+ */
+&usb_otg {
+	dr_mode = "host";
+	status = "okay";
+};
+
+&usbphy {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts
new file mode 100644
index 0000000..f93f5d1
--- /dev/null
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2016 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun8i-h3.dtsi"
+#include "sunxi-common-regulators.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/sun4i-a10.h>
+
+/ {
+	model = "Xunlong Orange Pi 2";
+	compatible = "xunlong,orangepi-2", "allwinner,sun8i-h3";
+
+	aliases {
+		serial0 = &uart0;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		pinctrl-names = "default";
+		pinctrl-0 = <&leds_opc>, <&leds_r_opc>;
+
+		status_led {
+			label = "orangepi:red:status";
+			gpios = <&pio 0 15 GPIO_ACTIVE_HIGH>;
+		};
+
+		pwr_led {
+			label = "orangepi:green:pwr";
+			gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>;
+			default-state = "on";
+		};
+	};
+
+	r_gpio_keys {
+		compatible = "gpio-keys";
+		pinctrl-names = "default";
+		pinctrl-0 = <&sw_r_opc>;
+
+		sw2 {
+			label = "sw2";
+			linux,code = <BTN_1>;
+			gpios = <&r_pio 0 4 GPIO_ACTIVE_LOW>;
+		};
+
+		sw4 {
+			label = "sw4";
+			linux,code = <BTN_0>;
+			gpios = <&r_pio 0 3 GPIO_ACTIVE_LOW>;
+		};
+	};
+
+	wifi_pwrseq: wifi_pwrseq {
+		compatible = "mmc-pwrseq-simple";
+		pinctrl-names = "default";
+		pinctrl-0 = <&wifi_pwrseq_pin_orangepi>;
+		reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; /* PL7 WIFI_EN */
+	};
+};
+
+&ehci1 {
+	status = "okay";
+};
+
+&ir {
+	pinctrl-names = "default";
+	pinctrl-0 = <&ir_pins_a>;
+	status = "okay";
+};
+
+&mmc0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
+	vmmc-supply = <&reg_vcc3v3>;
+	bus-width = <4>;
+	cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 */
+	cd-inverted;
+	status = "okay";
+};
+
+&mmc1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc1_pins_a>;
+	vmmc-supply = <&reg_vcc3v3>;
+	mmc-pwrseq = <&wifi_pwrseq>;
+	bus-width = <4>;
+	non-removable;
+	status = "okay";
+};
+
+&pio {
+	leds_opc: led_pins@0 {
+		allwinner,pins = "PA15";
+		allwinner,function = "gpio_out";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+};
+
+&r_pio {
+	leds_r_opc: led_pins@0 {
+		allwinner,pins = "PL10";
+		allwinner,function = "gpio_out";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+
+	sw_r_opc: key_pins@0 {
+		allwinner,pins = "PL3", "PL4";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+
+	wifi_pwrseq_pin_orangepi: wifi_pwrseq_pin@0 {
+		allwinner,pins = "PL7";
+		allwinner,function = "gpio_out";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+};
+
+&reg_usb1_vbus {
+	gpio = <&pio 6 13 GPIO_ACTIVE_HIGH>;
+	status = "okay";
+};
+
+&uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_pins_a>;
+	status = "okay";
+};
+
+&usb1_vbus_pin_a {
+	allwinner,pins = "PG13";
+};
+
+&usbphy {
+	usb1_vbus-supply = <&reg_usb1_vbus>;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
new file mode 100644
index 0000000..0adf932
--- /dev/null
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2016 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun8i-h3.dtsi"
+#include "sunxi-common-regulators.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/sun4i-a10.h>
+
+/ {
+	model = "Xunlong Orange Pi One";
+	compatible = "xunlong,orangepi-one", "allwinner,sun8i-h3";
+
+	aliases {
+		serial0 = &uart0;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		pinctrl-names = "default";
+		pinctrl-0 = <&leds_opc>, <&leds_r_opc>;
+
+		pwr_led {
+			label = "orangepi:green:pwr";
+			gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>;
+			default-state = "on";
+		};
+
+		status_led {
+			label = "orangepi:red:status";
+			gpios = <&pio 0 15 GPIO_ACTIVE_HIGH>;
+		};
+	};
+
+	r_gpio_keys {
+		compatible = "gpio-keys";
+		pinctrl-names = "default";
+		pinctrl-0 = <&sw_r_opc>;
+
+		sw4 {
+			label = "sw4";
+			linux,code = <BTN_0>;
+			gpios = <&r_pio 0 3 GPIO_ACTIVE_LOW>;
+		};
+	};
+};
+
+&ehci1 {
+	status = "okay";
+};
+
+&mmc0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
+	vmmc-supply = <&reg_vcc3v3>;
+	bus-width = <4>;
+	cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 */
+	cd-inverted;
+	status = "okay";
+};
+
+&ohci1 {
+	status = "okay";
+};
+
+&pio {
+	leds_opc: led_pins@0 {
+		allwinner,pins = "PA15";
+		allwinner,function = "gpio_out";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+};
+
+&r_pio {
+	leds_r_opc: led_pins@0 {
+		allwinner,pins = "PL10";
+		allwinner,function = "gpio_out";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+
+	sw_r_opc: key_pins@0 {
+		allwinner,pins = "PL3";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+};
+
+&uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_pins_a>;
+	status = "okay";
+};
+
+&usbphy {
+	/* USB VBUS is always on */
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts
new file mode 100644
index 0000000..daf50b9a6
--- /dev/null
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2015 Chen-Yu Tsai <wens@csie.org>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun8i-h3.dtsi"
+#include "sunxi-common-regulators.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/sun4i-a10.h>
+
+/ {
+	model = "Xunlong Orange Pi PC";
+	compatible = "xunlong,orangepi-pc", "allwinner,sun8i-h3";
+
+	aliases {
+		serial0 = &uart0;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		pinctrl-names = "default";
+		pinctrl-0 = <&leds_opc>, <&leds_r_opc>;
+
+		pwr_led {
+			label = "orangepi:green:pwr";
+			gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>;
+			default-state = "on";
+		};
+
+		status_led {
+			label = "orangepi:red:status";
+			gpios = <&pio 0 15 GPIO_ACTIVE_HIGH>;
+		};
+	};
+
+	r_gpio_keys {
+		compatible = "gpio-keys";
+		pinctrl-names = "default";
+		pinctrl-0 = <&sw_r_opc>;
+
+		sw4 {
+			label = "sw4";
+			linux,code = <BTN_0>;
+			gpios = <&r_pio 0 3 GPIO_ACTIVE_LOW>;
+		};
+	};
+};
+
+&ehci1 {
+	status = "okay";
+};
+
+&ehci2 {
+	status = "okay";
+};
+
+&ehci3 {
+	status = "okay";
+};
+
+&ir {
+	pinctrl-names = "default";
+	pinctrl-0 = <&ir_pins_a>;
+	status = "okay";
+};
+
+&mmc0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
+	vmmc-supply = <&reg_vcc3v3>;
+	bus-width = <4>;
+	cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 */
+	cd-inverted;
+	status = "okay";
+};
+
+&ohci1 {
+	status = "okay";
+};
+
+&ohci2 {
+	status = "okay";
+};
+
+&ohci3 {
+	status = "okay";
+};
+
+&pio {
+	leds_opc: led_pins@0 {
+		allwinner,pins = "PA15";
+		allwinner,function = "gpio_out";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+};
+
+&r_pio {
+	leds_r_opc: led_pins@0 {
+		allwinner,pins = "PL10";
+		allwinner,function = "gpio_out";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+
+	sw_r_opc: key_pins@0 {
+		allwinner,pins = "PL3";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+};
+
+&uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_pins_a>;
+	status = "okay";
+};
+
+&usbphy {
+	/* USB VBUS is always on */
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts
index 79f40c3..b0cb417 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts
@@ -40,95 +40,56 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
-/dts-v1/;
-#include "sun8i-h3.dtsi"
-#include "sunxi-common-regulators.dtsi"
-
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/input/input.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
+/* The Orange Pi Plus is an extended version of the Orange Pi 2 */
+#include "sun8i-h3-orangepi-2.dts"
 
 / {
 	model = "Xunlong Orange Pi Plus";
 	compatible = "xunlong,orangepi-plus", "allwinner,sun8i-h3";
 
-	aliases {
-		serial0 = &uart0;
-	};
-
-	chosen {
-		stdout-path = "serial0:115200n8";
-	};
-
-	leds {
-		compatible = "gpio-leds";
+	reg_usb3_vbus: usb3-vbus {
+		compatible = "regulator-fixed";
 		pinctrl-names = "default";
-		pinctrl-0 = <&leds_opc>, <&leds_r_opc>;
-
-		status_led {
-			label = "orangepi-plus:red:status";
-			gpios = <&pio 0 15 GPIO_ACTIVE_HIGH>;
-		};
-
-		pwr_led {
-			label = "orangepi-plus:green:pwr";
-			gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>;
-			default-state = "on";
-		};
+		pinctrl-0 = <&usb3_vbus_pin_a>;
+		regulator-name = "usb3-vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-boot-on;
+		enable-active-high;
+		gpio = <&pio 6 11 GPIO_ACTIVE_HIGH>;
 	};
+};
 
-	r_gpio_keys {
-		compatible = "gpio-keys";
-		input-name = "sw4";
+&ehci3 {
+	status = "okay";
+};
 
-		pinctrl-names = "default";
-		pinctrl-0 = <&sw_r_opc>;
+&mmc2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc2_8bit_pins>;
+	vmmc-supply = <&reg_vcc3v3>;
+	bus-width = <8>;
+	non-removable;
+	cap-mmc-hw-reset;
+	status = "okay";
+};
 
-		sw4@0 {
-			label = "sw4";
-			linux,code = <BTN_0>;
-			gpios = <&r_pio 0 3 GPIO_ACTIVE_LOW>;
-		};
-	};
+&mmc2_8bit_pins {
+	/* Increase drive strength for DDR modes */
+	allwinner,drive = <SUN4I_PINCTRL_40_MA>;
+	/* eMMC is missing pull-ups */
+	allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
 };
 
 &pio {
-	leds_opc: led_pins@0 {
-		allwinner,pins = "PA15";
+	usb3_vbus_pin_a: usb3_vbus_pin@0 {
+		allwinner,pins = "PG11";
 		allwinner,function = "gpio_out";
 		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
 		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 	};
 };
 
-&r_pio {
-	leds_r_opc: led_pins@0 {
-		allwinner,pins = "PL10";
-		allwinner,function = "gpio_out";
-		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
-		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
-	};
-
-	sw_r_opc: key_pins@0 {
-		allwinner,pins = "PL03";
-		allwinner,function = "gpio_in";
-		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
-		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
-	};
-};
-
-&mmc0 {
-	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
-	vmmc-supply = <&reg_vcc3v3>;
-	bus-width = <4>;
-	cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 */
-	cd-inverted;
-	status = "okay";
-};
-
-&uart0 {
-	pinctrl-names = "default";
-	pinctrl-0 = <&uart0_pins_a>;
-	status = "okay";
+&usbphy {
+	usb3_vbus-supply = <&reg_usb3_vbus>;
 };
diff --git a/arch/arm/boot/dts/sun8i-h3.dtsi b/arch/arm/boot/dts/sun8i-h3.dtsi
index dadb7f6..4a4926b 100644
--- a/arch/arm/boot/dts/sun8i-h3.dtsi
+++ b/arch/arm/boot/dts/sun8i-h3.dtsi
@@ -269,6 +269,18 @@
 					     "mmc2_sample";
 		};
 
+		usb_clk: clk@01c200cc {
+			#clock-cells = <1>;
+			#reset-cells = <1>;
+			compatible = "allwinner,sun8i-h3-usb-clk";
+			reg = <0x01c200cc 0x4>;
+			clocks = <&osc24M>;
+			clock-output-names = "usb_phy0", "usb_phy1",
+					     "usb_phy2", "usb_phy3",
+					     "usb_ohci0", "usb_ohci1",
+					     "usb_ohci2", "usb_ohci3";
+		};
+
 		mbus_clk: clk@01c2015c {
 			#clock-cells = <0>;
 			compatible = "allwinner,sun8i-a23-mbus-clk";
@@ -377,6 +389,107 @@
 			#size-cells = <0>;
 		};
 
+		usbphy: phy@01c19400 {
+			compatible = "allwinner,sun8i-h3-usb-phy";
+			reg = <0x01c19400 0x2c>,
+			      <0x01c1a800 0x4>,
+			      <0x01c1b800 0x4>,
+			      <0x01c1c800 0x4>,
+			      <0x01c1d800 0x4>;
+			reg-names = "phy_ctrl",
+				    "pmu0",
+				    "pmu1",
+				    "pmu2",
+				    "pmu3";
+			clocks = <&usb_clk 8>,
+				 <&usb_clk 9>,
+				 <&usb_clk 10>,
+				 <&usb_clk 11>;
+			clock-names = "usb0_phy",
+				      "usb1_phy",
+				      "usb2_phy",
+				      "usb3_phy";
+			resets = <&usb_clk 0>,
+				 <&usb_clk 1>,
+				 <&usb_clk 2>,
+				 <&usb_clk 3>;
+			reset-names = "usb0_reset",
+				      "usb1_reset",
+				      "usb2_reset",
+				      "usb3_reset";
+			status = "disabled";
+			#phy-cells = <1>;
+		};
+
+		ehci1: usb@01c1b000 {
+			compatible = "allwinner,sun8i-h3-ehci", "generic-ehci";
+			reg = <0x01c1b000 0x100>;
+			interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&bus_gates 25>, <&bus_gates 29>;
+			resets = <&ahb_rst 25>, <&ahb_rst 29>;
+			phys = <&usbphy 1>;
+			phy-names = "usb";
+			status = "disabled";
+		};
+
+		ohci1: usb@01c1b400 {
+			compatible = "allwinner,sun8i-h3-ohci", "generic-ohci";
+			reg = <0x01c1b400 0x100>;
+			interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&bus_gates 29>, <&bus_gates 25>,
+				 <&usb_clk 17>;
+			resets = <&ahb_rst 29>, <&ahb_rst 25>;
+			phys = <&usbphy 1>;
+			phy-names = "usb";
+			status = "disabled";
+		};
+
+		ehci2: usb@01c1c000 {
+			compatible = "allwinner,sun8i-h3-ehci", "generic-ehci";
+			reg = <0x01c1c000 0x100>;
+			interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&bus_gates 26>, <&bus_gates 30>;
+			resets = <&ahb_rst 26>, <&ahb_rst 30>;
+			phys = <&usbphy 2>;
+			phy-names = "usb";
+			status = "disabled";
+		};
+
+		ohci2: usb@01c1c400 {
+			compatible = "allwinner,sun8i-h3-ohci", "generic-ohci";
+			reg = <0x01c1c400 0x100>;
+			interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&bus_gates 30>, <&bus_gates 26>,
+				 <&usb_clk 18>;
+			resets = <&ahb_rst 30>, <&ahb_rst 26>;
+			phys = <&usbphy 2>;
+			phy-names = "usb";
+			status = "disabled";
+		};
+
+		ehci3: usb@01c1d000 {
+			compatible = "allwinner,sun8i-h3-ehci", "generic-ehci";
+			reg = <0x01c1d000 0x100>;
+			interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&bus_gates 27>, <&bus_gates 31>;
+			resets = <&ahb_rst 27>, <&ahb_rst 31>;
+			phys = <&usbphy 3>;
+			phy-names = "usb";
+			status = "disabled";
+		};
+
+		ohci3: usb@01c1d400 {
+			compatible = "allwinner,sun8i-h3-ohci", "generic-ohci";
+			reg = <0x01c1d400 0x100>;
+			interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&bus_gates 31>, <&bus_gates 27>,
+				 <&usb_clk 19>;
+			resets = <&ahb_rst 31>, <&ahb_rst 27>;
+			phys = <&usbphy 3>;
+			phy-names = "usb";
+			status = "disabled";
+		};
+
 		pio: pinctrl@01c20800 {
 			compatible = "allwinner,sun8i-h3-pinctrl";
 			reg = <0x01c20800 0x400>;
@@ -417,6 +530,16 @@
 				allwinner,drive = <SUN4I_PINCTRL_30_MA>;
 				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 			};
+
+			mmc2_8bit_pins: mmc2_8bit {
+				allwinner,pins = "PC5", "PC6", "PC8",
+						 "PC9", "PC10", "PC11",
+						 "PC12", "PC13", "PC14",
+						 "PC15", "PC16";
+				allwinner,function = "mmc2";
+				allwinner,drive = <SUN4I_PINCTRL_30_MA>;
+				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+			};
 		};
 
 		ahb_rst: reset@01c202c0 {
diff --git a/arch/arm/boot/dts/sun8i-q8-common.dtsi b/arch/arm/boot/dts/sun8i-q8-common.dtsi
index 9d2b7e2..346a49d 100644
--- a/arch/arm/boot/dts/sun8i-q8-common.dtsi
+++ b/arch/arm/boot/dts/sun8i-q8-common.dtsi
@@ -125,8 +125,6 @@
 };
 
 &reg_dc1sw {
-	regulator-min-microvolt = <3000000>;
-	regulator-max-microvolt = <3000000>;
 	regulator-name = "vcc-lcd";
 };
 
diff --git a/arch/arm/boot/dts/tango4-common.dtsi b/arch/arm/boot/dts/tango4-common.dtsi
index ef665d2..dd7eb5f 100644
--- a/arch/arm/boot/dts/tango4-common.dtsi
+++ b/arch/arm/boot/dts/tango4-common.dtsi
@@ -3,11 +3,13 @@
  * https://github.com/mansr/linux-tangox
  */
 
-#define CPU_CLK 0
-#define SYS_CLK 1
-
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
+#define  CPU_CLK	0
+#define  SYS_CLK	1
+#define  USB_CLK	2
+#define SDIO_CLK	3
+
 / {
 	interrupt-parent = <&gic>;
 	#address-cells = <1>;
@@ -70,7 +72,7 @@
 
 		clkgen: clkgen@10000 {
 			compatible = "sigma,tango4-clkgen";
-			reg = <0x10000 0x40>;
+			reg = <0x10000 0x100>;
 			clocks = <&xtal>;
 			#clock-cells = <1>;
 		};
@@ -89,6 +91,12 @@
 			reg-shift = <2>;
 		};
 
+		watchdog@1fd00 {
+			compatible = "sigma,smp8759-wdt";
+			reg = <0x1fd00 8>;
+			clocks = <&xtal>;
+		};
+
 		eth0: ethernet@26000 {
 			compatible = "sigma,smp8734-ethernet";
 			reg = <0x26000 0x800>;
diff --git a/arch/arm/boot/dts/tango4-smp8758.dtsi b/arch/arm/boot/dts/tango4-smp8758.dtsi
index 7ed88ee..d2e65c4 100644
--- a/arch/arm/boot/dts/tango4-smp8758.dtsi
+++ b/arch/arm/boot/dts/tango4-smp8758.dtsi
@@ -1,4 +1,4 @@
-#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include "tango4-common.dtsi"
 
 / {
 	cpus {
@@ -11,6 +11,9 @@
 			next-level-cache = <&l2cc>;
 			device_type = "cpu";
 			reg = <0>;
+			clocks = <&clkgen CPU_CLK>;
+			clock-latency = <1>;
+			operating-points = <1215000 0 607500 0 405000 0 243000 0 135000 0>;
 		};
 
 		cpu1: cpu@1 {
@@ -28,4 +31,27 @@
 			<GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
 			<GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
 	};
+
+	soc {
+		cpu_temp: thermal@920100 {
+			#thermal-sensor-cells = <0>;
+			compatible = "sigma,smp8758-thermal";
+			reg = <0x920100 12>;
+		};
+	};
+
+	thermal-zones {
+		cpu_thermal: cpu-thermal {
+			polling-delay = <997>;		/* milliseconds */
+			polling-delay-passive = <499>;	/* milliseconds */
+			thermal-sensors = <&cpu_temp>;
+			trips {
+				cpu_critical {
+					temperature = <120000>;
+					hysteresis = <2500>;
+					type = "critical";
+				};
+			};
+		};
+	};
 };
diff --git a/arch/arm/boot/dts/tango4-vantage-1172.dts b/arch/arm/boot/dts/tango4-vantage-1172.dts
index 3e5b9c8..4cab64c 100644
--- a/arch/arm/boot/dts/tango4-vantage-1172.dts
+++ b/arch/arm/boot/dts/tango4-vantage-1172.dts
@@ -1,7 +1,6 @@
 /dts-v1/;
 
 #include "tango4-smp8758.dtsi"
-#include "tango4-common.dtsi"
 
 / {
 	model = "Sigma Designs SMP8758 Vantage-1172 Rev E1";
diff --git a/arch/arm/boot/dts/tegra114-dalmore.dts b/arch/arm/boot/dts/tegra114-dalmore.dts
index 8b7aa0d..c970bf6 100644
--- a/arch/arm/boot/dts/tegra114-dalmore.dts
+++ b/arch/arm/boot/dts/tegra114-dalmore.dts
@@ -18,6 +18,10 @@
 		serial0 = &uartd;
 	};
 
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
 	memory {
 		reg = <0x80000000 0x40000000>;
 	};
@@ -1164,7 +1168,7 @@
 			label = "Power";
 			gpios = <&gpio TEGRA_GPIO(Q, 0) GPIO_ACTIVE_LOW>;
 			linux,code = <KEY_POWER>;
-			gpio-key,wakeup;
+			wakeup-source;
 		};
 
 		volume_down {
diff --git a/arch/arm/boot/dts/tegra114-roth.dts b/arch/arm/boot/dts/tegra114-roth.dts
index 38acf78..9d868af 100644
--- a/arch/arm/boot/dts/tegra114-roth.dts
+++ b/arch/arm/boot/dts/tegra114-roth.dts
@@ -1047,7 +1047,7 @@
 			label = "Power";
 			gpios = <&gpio TEGRA_GPIO(Q, 0) GPIO_ACTIVE_LOW>;
 			linux,code = <KEY_POWER>;
-			gpio-key,wakeup;
+			wakeup-source;
 		};
 	};
 
diff --git a/arch/arm/boot/dts/tegra114-tn7.dts b/arch/arm/boot/dts/tegra114-tn7.dts
index f91c2c9..89047ed 100644
--- a/arch/arm/boot/dts/tegra114-tn7.dts
+++ b/arch/arm/boot/dts/tegra114-tn7.dts
@@ -292,7 +292,7 @@
 			label = "Power";
 			gpios = <&gpio TEGRA_GPIO(Q, 0) GPIO_ACTIVE_LOW>;
 			linux,code = <KEY_POWER>;
-			gpio-key,wakeup;
+			wakeup-source;
 		};
 
 		volume_down {
diff --git a/arch/arm/boot/dts/tegra114.dtsi b/arch/arm/boot/dts/tegra114.dtsi
index d845bd1..cb9393a 100644
--- a/arch/arm/boot/dts/tegra114.dtsi
+++ b/arch/arm/boot/dts/tegra114.dtsi
@@ -150,7 +150,7 @@
 	};
 
 	timer@60005000 {
-		compatible = "nvidia,tegra114-timer", "nvidia,tegra20-timer";
+		compatible = "nvidia,tegra114-timer", "nvidia,tegra30-timer", "nvidia,tegra20-timer";
 		reg = <0x60005000 0x400>;
 		interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
 			     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
@@ -256,7 +256,7 @@
 	 * driver and APB DMA based serial driver for higher baudrate
 	 * and performace. To enable the 8250 based driver, the compatible
 	 * is "nvidia,tegra114-uart", "nvidia,tegra20-uart" and to enable
-	 * the APB DMA based serial driver, the comptible is
+	 * the APB DMA based serial driver, the compatible is
 	 * "nvidia,tegra114-hsuart", "nvidia,tegra30-hsuart".
 	 */
 	uarta: serial@70006000 {
diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
index 66b4451..a99f07a 100644
--- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts
+++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
@@ -12,7 +12,15 @@
 	aliases {
 		rtc0 = "/i2c@0,7000d000/pmic@40";
 		rtc1 = "/rtc@0,7000e000";
+
+		/* This order keeps the mapping DB9 connector <-> ttyS0 */
 		serial0 = &uartd;
+		serial1 = &uarta;
+		serial2 = &uartb;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
 	};
 
 	memory {
@@ -1367,6 +1375,28 @@
 		};
 	};
 
+	/*
+	 * First high speed UART, exposed on the expansion connector J3A2
+	 *   Pin 41: BR_UART1_TXD
+	 *   Pin 44: BR_UART1_RXD
+	 */
+	serial@70006000 {
+		compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart";
+		status = "okay";
+	};
+
+	/*
+	 * Second high speed UART, exposed on the expansion connector J3A2
+	 *   Pin 65: UART2_RXD
+	 *   Pin 68: UART2_TXD
+	 *   Pin 71: UART2_CTS_L
+	 *   Pin 74: UART2_RTS_L
+	 */
+	serial@70006040 {
+		compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart";
+		status = "okay";
+	};
+
 	/* DB9 serial port */
 	serial@0,70006300 {
 		status = "okay";
@@ -1761,7 +1791,7 @@
 			gpios = <&gpio TEGRA_GPIO(Q, 0) GPIO_ACTIVE_LOW>;
 			linux,code = <KEY_POWER>;
 			debounce-interval = <10>;
-			gpio-key,wakeup;
+			wakeup-source;
 		};
 	};
 
diff --git a/arch/arm/boot/dts/tegra124-nyan.dtsi b/arch/arm/boot/dts/tegra124-nyan.dtsi
index ec1aa64..5f1fc14 100644
--- a/arch/arm/boot/dts/tegra124-nyan.dtsi
+++ b/arch/arm/boot/dts/tegra124-nyan.dtsi
@@ -8,6 +8,10 @@
 		serial0 = &uarta;
 	};
 
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
 	memory {
 		reg = <0x0 0x80000000 0x0 0x80000000>;
 	};
@@ -509,7 +513,7 @@
 			linux,input-type = <5>;
 			linux,code = <KEY_RESERVED>;
 			debounce-interval = <1>;
-			gpio-key,wakeup;
+			wakeup-source;
 		};
 
 		power {
@@ -517,7 +521,7 @@
 			gpios = <&gpio TEGRA_GPIO(Q, 0) GPIO_ACTIVE_LOW>;
 			linux,code = <KEY_POWER>;
 			debounce-interval = <30>;
-			gpio-key,wakeup;
+			wakeup-source;
 		};
 	};
 
diff --git a/arch/arm/boot/dts/tegra124-venice2.dts b/arch/arm/boot/dts/tegra124-venice2.dts
index cfbdf42..0318258 100644
--- a/arch/arm/boot/dts/tegra124-venice2.dts
+++ b/arch/arm/boot/dts/tegra124-venice2.dts
@@ -13,6 +13,10 @@
 		serial0 = &uarta;
 	};
 
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
 	memory {
 		reg = <0x0 0x80000000 0x0 0x80000000>;
 	};
@@ -975,7 +979,7 @@
 			gpios = <&gpio TEGRA_GPIO(Q, 0) GPIO_ACTIVE_LOW>;
 			linux,code = <KEY_POWER>;
 			debounce-interval = <10>;
-			gpio-key,wakeup;
+			wakeup-source;
 		};
 	};
 
diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi
index 68669f7..e4eac1f 100644
--- a/arch/arm/boot/dts/tegra124.dtsi
+++ b/arch/arm/boot/dts/tegra124.dtsi
@@ -208,7 +208,7 @@
 	};
 
 	timer@0,60005000 {
-		compatible = "nvidia,tegra124-timer", "nvidia,tegra20-timer";
+		compatible = "nvidia,tegra124-timer", "nvidia,tegra30-timer", "nvidia,tegra20-timer";
 		reg = <0x0 0x60005000 0x0 0x400>;
 		interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
 			     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
@@ -322,7 +322,7 @@
 	 * driver and APB DMA based serial driver for higher baudrate
 	 * and performace. To enable the 8250 based driver, the compatible
 	 * is "nvidia,tegra124-uart", "nvidia,tegra20-uart" and to enable
-	 * the APB DMA based serial driver, the comptible is
+	 * the APB DMA based serial driver, the compatible is
 	 * "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart".
 	 */
 	uarta: serial@0,70006000 {
diff --git a/arch/arm/boot/dts/tegra20-harmony.dts b/arch/arm/boot/dts/tegra20-harmony.dts
index b926a07..d2e960c 100644
--- a/arch/arm/boot/dts/tegra20-harmony.dts
+++ b/arch/arm/boot/dts/tegra20-harmony.dts
@@ -13,6 +13,10 @@
 		serial0 = &uartd;
 	};
 
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
 	memory {
 		reg = <0x00000000 0x40000000>;
 	};
@@ -655,7 +659,7 @@
 			label = "Power";
 			gpios = <&gpio TEGRA_GPIO(V, 2) GPIO_ACTIVE_LOW>;
 			linux,code = <KEY_POWER>;
-			gpio-key,wakeup;
+			wakeup-source;
 		};
 	};
 
diff --git a/arch/arm/boot/dts/tegra20-iris-512.dts b/arch/arm/boot/dts/tegra20-iris-512.dts
index 1dd7d7b..bb56dfe 100644
--- a/arch/arm/boot/dts/tegra20-iris-512.dts
+++ b/arch/arm/boot/dts/tegra20-iris-512.dts
@@ -11,6 +11,10 @@
 		serial1 = &uartd;
 	};
 
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
 	host1x@50000000 {
 		hdmi@54280000 {
 			status = "okay";
diff --git a/arch/arm/boot/dts/tegra20-medcom-wide.dts b/arch/arm/boot/dts/tegra20-medcom-wide.dts
index 9b87526..34c6588 100644
--- a/arch/arm/boot/dts/tegra20-medcom-wide.dts
+++ b/arch/arm/boot/dts/tegra20-medcom-wide.dts
@@ -10,6 +10,10 @@
 		serial0 = &uartd;
 	};
 
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
 	pwm@7000a000 {
 		status = "okay";
 	};
diff --git a/arch/arm/boot/dts/tegra20-paz00.dts b/arch/arm/boot/dts/tegra20-paz00.dts
index ed7e100..33ed2b2 100644
--- a/arch/arm/boot/dts/tegra20-paz00.dts
+++ b/arch/arm/boot/dts/tegra20-paz00.dts
@@ -14,6 +14,10 @@
 		serial1 = &uartc;
 	};
 
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
 	memory {
 		reg = <0x00000000 0x20000000>;
 	};
@@ -521,7 +525,7 @@
 			label = "Power";
 			gpios = <&gpio TEGRA_GPIO(J, 7) GPIO_ACTIVE_LOW>;
 			linux,code = <KEY_POWER>;
-			gpio-key,wakeup;
+			wakeup-source;
 		};
 	};
 
diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts
index aea8994..94b60a7 100644
--- a/arch/arm/boot/dts/tegra20-seaboard.dts
+++ b/arch/arm/boot/dts/tegra20-seaboard.dts
@@ -13,6 +13,10 @@
 		serial0 = &uartd;
 	};
 
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
 	memory {
 		reg = <0x00000000 0x40000000>;
 	};
@@ -807,7 +811,7 @@
 			label = "Power";
 			gpios = <&gpio TEGRA_GPIO(V, 2) GPIO_ACTIVE_LOW>;
 			linux,code = <KEY_POWER>;
-			gpio-key,wakeup;
+			wakeup-source;
 		};
 
 		lid {
@@ -816,7 +820,7 @@
 			linux,input-type = <5>; /* EV_SW */
 			linux,code = <0>; /* SW_LID */
 			debounce-interval = <1>;
-			gpio-key,wakeup;
+			wakeup-source;
 		};
 	};
 
diff --git a/arch/arm/boot/dts/tegra20-tamonten.dtsi b/arch/arm/boot/dts/tegra20-tamonten.dtsi
index 13d4e61..025e9e8 100644
--- a/arch/arm/boot/dts/tegra20-tamonten.dtsi
+++ b/arch/arm/boot/dts/tegra20-tamonten.dtsi
@@ -10,6 +10,10 @@
 		serial0 = &uartd;
 	};
 
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
 	memory {
 		reg = <0x00000000 0x20000000>;
 	};
diff --git a/arch/arm/boot/dts/tegra20-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts
index d99af4e..4a035f7 100644
--- a/arch/arm/boot/dts/tegra20-trimslice.dts
+++ b/arch/arm/boot/dts/tegra20-trimslice.dts
@@ -13,6 +13,10 @@
 		serial0 = &uarta;
 	};
 
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
 	memory {
 		reg = <0x00000000 0x40000000>;
 	};
@@ -392,7 +396,7 @@
 			label = "Power";
 			gpios = <&gpio TEGRA_GPIO(X, 6) GPIO_ACTIVE_LOW>;
 			linux,code = <KEY_POWER>;
-			gpio-key,wakeup;
+			wakeup-source;
 		};
 	};
 
diff --git a/arch/arm/boot/dts/tegra20-ventana.dts b/arch/arm/boot/dts/tegra20-ventana.dts
index 04c58e9..a28c060 100644
--- a/arch/arm/boot/dts/tegra20-ventana.dts
+++ b/arch/arm/boot/dts/tegra20-ventana.dts
@@ -13,6 +13,10 @@
 		serial0 = &uartd;
 	};
 
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
 	memory {
 		reg = <0x00000000 0x40000000>;
 	};
@@ -601,7 +605,7 @@
 			label = "Power";
 			gpios = <&gpio TEGRA_GPIO(V, 2) GPIO_ACTIVE_LOW>;
 			linux,code = <KEY_POWER>;
-			gpio-key,wakeup;
+			wakeup-source;
 		};
 	};
 
diff --git a/arch/arm/boot/dts/tegra20-whistler.dts b/arch/arm/boot/dts/tegra20-whistler.dts
index 340d811..073806d 100644
--- a/arch/arm/boot/dts/tegra20-whistler.dts
+++ b/arch/arm/boot/dts/tegra20-whistler.dts
@@ -13,6 +13,10 @@
 		serial0 = &uarta;
 	};
 
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
 	memory {
 		reg = <0x00000000 0x20000000>;
 	};
@@ -508,7 +512,7 @@
 		nvidia,repeat-delay-ms = <160>;
 		nvidia,kbc-row-pins = <0 1 2>;
 		nvidia,kbc-col-pins = <16 17>;
-		nvidia,wakeup-source;
+		wakeup-source;
 		linux,keymap = <MATRIX_KEY(0x00, 0x00, KEY_POWER)
 				MATRIX_KEY(0x01, 0x00, KEY_HOME)
 				MATRIX_KEY(0x01, 0x01, KEY_BACK)
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index 33173e1..2207c08 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -145,7 +145,7 @@
 		interrupt-parent = <&intc>;
 		reg = <0x50040600 0x20>;
 		interrupts = <GIC_PPI 13
-			(GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
+			(GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_EDGE_RISING)>;
 		clocks = <&tegra_car TEGRA20_CLK_TWD>;
 	};
 
@@ -309,7 +309,7 @@
 	 * driver and APB DMA based serial driver for higher baudrate
 	 * and performace. To enable the 8250 based driver, the compatible
 	 * is "nvidia,tegra20-uart" and to enable the APB DMA based serial
-	 * driver, the comptible is "nvidia,tegra20-hsuart".
+	 * driver, the compatible is "nvidia,tegra20-hsuart".
 	 */
 	uarta: serial@70006000 {
 		compatible = "nvidia,tegra20-uart";
diff --git a/arch/arm/boot/dts/tegra30-apalis-eval.dts b/arch/arm/boot/dts/tegra30-apalis-eval.dts
index f2879cf..99a6945 100644
--- a/arch/arm/boot/dts/tegra30-apalis-eval.dts
+++ b/arch/arm/boot/dts/tegra30-apalis-eval.dts
@@ -17,6 +17,10 @@
 		serial3 = &uartd;
 	};
 
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
 	pcie-controller@00003000 {
 		status = "okay";
 
@@ -196,7 +200,7 @@
 			gpios = <&gpio TEGRA_GPIO(V, 1) GPIO_ACTIVE_LOW>;
 			linux,code = <KEY_WAKEUP>;
 			debounce-interval = <10>;
-			gpio-key,wakeup;
+			wakeup-source;
 		};
 	};
 
diff --git a/arch/arm/boot/dts/tegra30-beaver.dts b/arch/arm/boot/dts/tegra30-beaver.dts
index 3dede39..1eca3b2 100644
--- a/arch/arm/boot/dts/tegra30-beaver.dts
+++ b/arch/arm/boot/dts/tegra30-beaver.dts
@@ -12,6 +12,10 @@
 		serial0 = &uarta;
 	};
 
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
 	memory {
 		reg = <0x80000000 0x7ff00000>;
 	};
diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi
index bb1ca15..4721c1c 100644
--- a/arch/arm/boot/dts/tegra30-cardhu.dtsi
+++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi
@@ -35,6 +35,10 @@
 		serial1 = &uartc;
 	};
 
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
 	memory {
 		reg = <0x80000000 0x40000000>;
 	};
@@ -626,7 +630,7 @@
 			interrupts = <2 0>;
 			linux,code = <KEY_POWER>;
 			debounce-interval = <100>;
-			gpio-key,wakeup;
+			wakeup-source;
 		};
 
 		volume-down {
diff --git a/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts b/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts
index 3ff019f..76875c3 100644
--- a/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts
+++ b/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts
@@ -15,6 +15,10 @@
 		serial2 = &uartd;
 	};
 
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
 	host1x@50000000 {
 		dc@54200000 {
 			rgb {
@@ -142,7 +146,7 @@
 			gpios = <&gpio TEGRA_GPIO(V, 1) GPIO_ACTIVE_HIGH>;
 			linux,code = <KEY_WAKEUP>;
 			debounce-interval = <10>;
-			gpio-key,wakeup;
+			wakeup-source;
 		};
 	};
 
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index 313e260..5030065 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -230,7 +230,7 @@
 		reg = <0x50040600 0x20>;
 		interrupt-parent = <&intc>;
 		interrupts = <GIC_PPI 13
-			(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+			(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
 		clocks = <&tegra_car TEGRA30_CLK_TWD>;
 	};
 
@@ -371,7 +371,7 @@
 	 * driver and APB DMA based serial driver for higher baudrate
 	 * and performace. To enable the 8250 based driver, the compatible
 	 * is "nvidia,tegra30-uart", "nvidia,tegra20-uart" and to enable
-	 * the APB DMA based serial driver, the comptible is
+	 * the APB DMA based serial driver, the compatible is
 	 * "nvidia,tegra30-hsuart", "nvidia,tegra20-hsuart".
 	 */
 	uarta: serial@70006000 {
diff --git a/arch/arm/boot/dts/twl6030.dtsi b/arch/arm/boot/dts/twl6030.dtsi
index 55eb35f..c45f97f 100644
--- a/arch/arm/boot/dts/twl6030.dtsi
+++ b/arch/arm/boot/dts/twl6030.dtsi
@@ -99,4 +99,10 @@
 		compatible = "ti,twl6030-pwmled";
 		#pwm-cells = <2>;
 	};
+
+	gpadc {
+		compatible = "ti,twl6030-gpadc";
+		interrupts = <3>;
+		#io-channel-cells = <1>;
+	};
 };
diff --git a/arch/arm/boot/dts/uniphier-pinctrl.dtsi b/arch/arm/boot/dts/uniphier-pinctrl.dtsi
index 2459279..f2f3fbe 100644
--- a/arch/arm/boot/dts/uniphier-pinctrl.dtsi
+++ b/arch/arm/boot/dts/uniphier-pinctrl.dtsi
@@ -68,6 +68,16 @@
 		function = "i2c4";
 	};
 
+	pinctrl_nand: nand_grp {
+		groups = "nand";
+		function = "nand";
+	};
+
+	pinctrl_nand2cs: nand2cs_grp {
+		groups = "nand", "nand_cs1";
+		function = "nand";
+	};
+
 	pinctrl_uart0: uart0_grp {
 		groups = "uart0";
 		function = "uart0";
diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts
index d23320a..409e069 100644
--- a/arch/arm/boot/dts/versatile-ab.dts
+++ b/arch/arm/boot/dts/versatile-ab.dts
@@ -119,8 +119,9 @@
 	};
 
 	flash@34000000 {
-		compatible = "arm,versatile-flash";
-		reg = <0x34000000 0x4000000>;
+		/* 64 MiB NOR flash in non-interleaved chips */
+		compatible = "arm,versatile-flash", "cfi-flash";
+		reg = <0x34000000 0x04000000>;
 		bank-width = <4>;
 	};
 
diff --git a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
index 7a556b9..3086efa 100644
--- a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
+++ b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
@@ -75,19 +75,19 @@
 				compatible = "arm,vexpress-sysreg";
 				reg = <0x010000 0x1000>;
 
-				v2m_led_gpios: sys_led@08 {
+				v2m_led_gpios: sys_led {
 					compatible = "arm,vexpress-sysreg,sys_led";
 					gpio-controller;
 					#gpio-cells = <2>;
 				};
 
-				v2m_mmc_gpios: sys_mci@48 {
+				v2m_mmc_gpios: sys_mci {
 					compatible = "arm,vexpress-sysreg,sys_mci";
 					gpio-controller;
 					#gpio-cells = <2>;
 				};
 
-				v2m_flash_gpios: sys_flash@4c {
+				v2m_flash_gpios: sys_flash {
 					compatible = "arm,vexpress-sysreg,sys_flash";
 					gpio-controller;
 					#gpio-cells = <2>;
@@ -286,7 +286,7 @@
 			};
 		};
 
-		v2m_fixed_3v3: fixedregulator@0 {
+		v2m_fixed_3v3: fixed-regulator-0 {
 			compatible = "regulator-fixed";
 			regulator-name = "3V3";
 			regulator-min-microvolt = <3300000>;
@@ -318,49 +318,49 @@
 		leds {
 			compatible = "gpio-leds";
 
-			user@1 {
+			user1 {
 				label = "v2m:green:user1";
 				gpios = <&v2m_led_gpios 0 0>;
 				linux,default-trigger = "heartbeat";
 			};
 
-			user@2 {
+			user2 {
 				label = "v2m:green:user2";
 				gpios = <&v2m_led_gpios 1 0>;
 				linux,default-trigger = "mmc0";
 			};
 
-			user@3 {
+			user3 {
 				label = "v2m:green:user3";
 				gpios = <&v2m_led_gpios 2 0>;
 				linux,default-trigger = "cpu0";
 			};
 
-			user@4 {
+			user4 {
 				label = "v2m:green:user4";
 				gpios = <&v2m_led_gpios 3 0>;
 				linux,default-trigger = "cpu1";
 			};
 
-			user@5 {
+			user5 {
 				label = "v2m:green:user5";
 				gpios = <&v2m_led_gpios 4 0>;
 				linux,default-trigger = "cpu2";
 			};
 
-			user@6 {
+			user6 {
 				label = "v2m:green:user6";
 				gpios = <&v2m_led_gpios 5 0>;
 				linux,default-trigger = "cpu3";
 			};
 
-			user@7 {
+			user7 {
 				label = "v2m:green:user7";
 				gpios = <&v2m_led_gpios 6 0>;
 				linux,default-trigger = "cpu4";
 			};
 
-			user@8 {
+			user8 {
 				label = "v2m:green:user8";
 				gpios = <&v2m_led_gpios 7 0>;
 				linux,default-trigger = "cpu5";
@@ -371,7 +371,7 @@
 			compatible = "arm,vexpress,config-bus";
 			arm,vexpress,config-bridge = <&v2m_sysreg>;
 
-			osc@0 {
+			oscclk0 {
 				/* MCC static memory clock */
 				compatible = "arm,vexpress-osc";
 				arm,vexpress-sysreg,func = <1 0>;
@@ -380,7 +380,7 @@
 				clock-output-names = "v2m:oscclk0";
 			};
 
-			v2m_oscclk1: osc@1 {
+			v2m_oscclk1: oscclk1 {
 				/* CLCD clock */
 				compatible = "arm,vexpress-osc";
 				arm,vexpress-sysreg,func = <1 1>;
@@ -389,7 +389,7 @@
 				clock-output-names = "v2m:oscclk1";
 			};
 
-			v2m_oscclk2: osc@2 {
+			v2m_oscclk2: oscclk2 {
 				/* IO FPGA peripheral clock */
 				compatible = "arm,vexpress-osc";
 				arm,vexpress-sysreg,func = <1 2>;
@@ -398,7 +398,7 @@
 				clock-output-names = "v2m:oscclk2";
 			};
 
-			volt@0 {
+			volt-vio {
 				/* Logic level voltage */
 				compatible = "arm,vexpress-volt";
 				arm,vexpress-sysreg,func = <2 0>;
@@ -407,34 +407,34 @@
 				label = "VIO";
 			};
 
-			temp@0 {
+			temp-mcc {
 				/* MCC internal operating temperature */
 				compatible = "arm,vexpress-temp";
 				arm,vexpress-sysreg,func = <4 0>;
 				label = "MCC";
 			};
 
-			reset@0 {
+			reset {
 				compatible = "arm,vexpress-reset";
 				arm,vexpress-sysreg,func = <5 0>;
 			};
 
-			muxfpga@0 {
+			muxfpga {
 				compatible = "arm,vexpress-muxfpga";
 				arm,vexpress-sysreg,func = <7 0>;
 			};
 
-			shutdown@0 {
+			shutdown {
 				compatible = "arm,vexpress-shutdown";
 				arm,vexpress-sysreg,func = <8 0>;
 			};
 
-			reboot@0 {
+			reboot {
 				compatible = "arm,vexpress-reboot";
 				arm,vexpress-sysreg,func = <9 0>;
 			};
 
-			dvimode@0 {
+			dvimode {
 				compatible = "arm,vexpress-dvimode";
 				arm,vexpress-sysreg,func = <11 0>;
 			};
diff --git a/arch/arm/boot/dts/vexpress-v2m.dtsi b/arch/arm/boot/dts/vexpress-v2m.dtsi
index 47e4a11..c6393d3 100644
--- a/arch/arm/boot/dts/vexpress-v2m.dtsi
+++ b/arch/arm/boot/dts/vexpress-v2m.dtsi
@@ -74,19 +74,19 @@
 				compatible = "arm,vexpress-sysreg";
 				reg = <0x00000 0x1000>;
 
-				v2m_led_gpios: sys_led@08 {
+				v2m_led_gpios: sys_led {
 					compatible = "arm,vexpress-sysreg,sys_led";
 					gpio-controller;
 					#gpio-cells = <2>;
 				};
 
-				v2m_mmc_gpios: sys_mci@48 {
+				v2m_mmc_gpios: sys_mci {
 					compatible = "arm,vexpress-sysreg,sys_mci";
 					gpio-controller;
 					#gpio-cells = <2>;
 				};
 
-				v2m_flash_gpios: sys_flash@4c {
+				v2m_flash_gpios: sys_flash {
 					compatible = "arm,vexpress-sysreg,sys_flash";
 					gpio-controller;
 					#gpio-cells = <2>;
@@ -285,7 +285,7 @@
 			};
 		};
 
-		v2m_fixed_3v3: fixedregulator@0 {
+		v2m_fixed_3v3: fixed-regulator-0 {
 			compatible = "regulator-fixed";
 			regulator-name = "3V3";
 			regulator-min-microvolt = <3300000>;
@@ -317,49 +317,49 @@
 		leds {
 			compatible = "gpio-leds";
 
-			user@1 {
+			user1 {
 				label = "v2m:green:user1";
 				gpios = <&v2m_led_gpios 0 0>;
 				linux,default-trigger = "heartbeat";
 			};
 
-			user@2 {
+			user2 {
 				label = "v2m:green:user2";
 				gpios = <&v2m_led_gpios 1 0>;
 				linux,default-trigger = "mmc0";
 			};
 
-			user@3 {
+			user3 {
 				label = "v2m:green:user3";
 				gpios = <&v2m_led_gpios 2 0>;
 				linux,default-trigger = "cpu0";
 			};
 
-			user@4 {
+			user4 {
 				label = "v2m:green:user4";
 				gpios = <&v2m_led_gpios 3 0>;
 				linux,default-trigger = "cpu1";
 			};
 
-			user@5 {
+			user5 {
 				label = "v2m:green:user5";
 				gpios = <&v2m_led_gpios 4 0>;
 				linux,default-trigger = "cpu2";
 			};
 
-			user@6 {
+			user6 {
 				label = "v2m:green:user6";
 				gpios = <&v2m_led_gpios 5 0>;
 				linux,default-trigger = "cpu3";
 			};
 
-			user@7 {
+			user7 {
 				label = "v2m:green:user7";
 				gpios = <&v2m_led_gpios 6 0>;
 				linux,default-trigger = "cpu4";
 			};
 
-			user@8 {
+			user8 {
 				label = "v2m:green:user8";
 				gpios = <&v2m_led_gpios 7 0>;
 				linux,default-trigger = "cpu5";
@@ -370,7 +370,7 @@
 			compatible = "arm,vexpress,config-bus";
 			arm,vexpress,config-bridge = <&v2m_sysreg>;
 
-			osc@0 {
+			oscclk0 {
 				/* MCC static memory clock */
 				compatible = "arm,vexpress-osc";
 				arm,vexpress-sysreg,func = <1 0>;
@@ -379,7 +379,7 @@
 				clock-output-names = "v2m:oscclk0";
 			};
 
-			v2m_oscclk1: osc@1 {
+			v2m_oscclk1: oscclk1 {
 				/* CLCD clock */
 				compatible = "arm,vexpress-osc";
 				arm,vexpress-sysreg,func = <1 1>;
@@ -388,7 +388,7 @@
 				clock-output-names = "v2m:oscclk1";
 			};
 
-			v2m_oscclk2: osc@2 {
+			v2m_oscclk2: oscclk2 {
 				/* IO FPGA peripheral clock */
 				compatible = "arm,vexpress-osc";
 				arm,vexpress-sysreg,func = <1 2>;
@@ -397,7 +397,7 @@
 				clock-output-names = "v2m:oscclk2";
 			};
 
-			volt@0 {
+			volt-vio {
 				/* Logic level voltage */
 				compatible = "arm,vexpress-volt";
 				arm,vexpress-sysreg,func = <2 0>;
@@ -406,34 +406,34 @@
 				label = "VIO";
 			};
 
-			temp@0 {
+			temp-mcc {
 				/* MCC internal operating temperature */
 				compatible = "arm,vexpress-temp";
 				arm,vexpress-sysreg,func = <4 0>;
 				label = "MCC";
 			};
 
-			reset@0 {
+			reset {
 				compatible = "arm,vexpress-reset";
 				arm,vexpress-sysreg,func = <5 0>;
 			};
 
-			muxfpga@0 {
+			muxfpga {
 				compatible = "arm,vexpress-muxfpga";
 				arm,vexpress-sysreg,func = <7 0>;
 			};
 
-			shutdown@0 {
+			shutdown {
 				compatible = "arm,vexpress-shutdown";
 				arm,vexpress-sysreg,func = <8 0>;
 			};
 
-			reboot@0 {
+			reboot {
 				compatible = "arm,vexpress-reboot";
 				arm,vexpress-sysreg,func = <9 0>;
 			};
 
-			dvimode@0 {
+			dvimode {
 				compatible = "arm,vexpress-dvimode";
 				arm,vexpress-sysreg,func = <11 0>;
 			};
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
index 9420053..102838f 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
@@ -55,14 +55,14 @@
 		compatible = "arm,hdlcd";
 		reg = <0 0x2b000000 0 0x1000>;
 		interrupts = <0 85 4>;
-		clocks = <&oscclk5>;
+		clocks = <&hdlcd_clk>;
 		clock-names = "pxlclk";
 	};
 
 	memory-controller@2b0a0000 {
 		compatible = "arm,pl341", "arm,primecell";
 		reg = <0 0x2b0a0000 0 0x1000>;
-		clocks = <&oscclk7>;
+		clocks = <&sys_pll>;
 		clock-names = "apb_pclk";
 	};
 
@@ -71,7 +71,7 @@
 		status = "disabled";
 		reg = <0 0x2b060000 0 0x1000>;
 		interrupts = <0 98 4>;
-		clocks = <&oscclk7>;
+		clocks = <&sys_pll>;
 		clock-names = "apb_pclk";
 	};
 
@@ -92,7 +92,7 @@
 		reg = <0 0x7ffd0000 0 0x1000>;
 		interrupts = <0 86 4>,
 			     <0 87 4>;
-		clocks = <&oscclk7>;
+		clocks = <&sys_pll>;
 		clock-names = "apb_pclk";
 	};
 
@@ -104,7 +104,7 @@
 			     <0 89 4>,
 			     <0 90 4>,
 			     <0 91 4>;
-		clocks = <&oscclk7>;
+		clocks = <&sys_pll>;
 		clock-names = "apb_pclk";
 	};
 
@@ -126,7 +126,7 @@
 		compatible = "arm,vexpress,config-bus";
 		arm,vexpress,config-bridge = <&v2m_sysreg>;
 
-		osc@0 {
+		oscclk0 {
 			/* CPU PLL reference clock */
 			compatible = "arm,vexpress-osc";
 			arm,vexpress-sysreg,func = <1 0>;
@@ -135,7 +135,7 @@
 			clock-output-names = "oscclk0";
 		};
 
-		osc@4 {
+		oscclk4 {
 			/* Multiplexed AXI master clock */
 			compatible = "arm,vexpress-osc";
 			arm,vexpress-sysreg,func = <1 4>;
@@ -144,7 +144,7 @@
 			clock-output-names = "oscclk4";
 		};
 
-		oscclk5: osc@5 {
+		hdlcd_clk: oscclk5 {
 			/* HDLCD PLL reference clock */
 			compatible = "arm,vexpress-osc";
 			arm,vexpress-sysreg,func = <1 5>;
@@ -153,7 +153,7 @@
 			clock-output-names = "oscclk5";
 		};
 
-		smbclk: osc@6 {
+		smbclk: oscclk6 {
 			/* SMB clock */
 			compatible = "arm,vexpress-osc";
 			arm,vexpress-sysreg,func = <1 6>;
@@ -162,7 +162,7 @@
 			clock-output-names = "oscclk6";
 		};
 
-		oscclk7: osc@7 {
+		sys_pll: oscclk7 {
 			/* SYS PLL reference clock */
 			compatible = "arm,vexpress-osc";
 			arm,vexpress-sysreg,func = <1 7>;
@@ -171,7 +171,7 @@
 			clock-output-names = "oscclk7";
 		};
 
-		osc@8 {
+		oscclk8 {
 			/* DDR2 PLL reference clock */
 			compatible = "arm,vexpress-osc";
 			arm,vexpress-sysreg,func = <1 8>;
@@ -180,7 +180,7 @@
 			clock-output-names = "oscclk8";
 		};
 
-		volt@0 {
+		volt-cores {
 			/* CPU core voltage */
 			compatible = "arm,vexpress-volt";
 			arm,vexpress-sysreg,func = <2 0>;
@@ -191,28 +191,28 @@
 			label = "Cores";
 		};
 
-		amp@0 {
+		amp-cores {
 			/* Total current for the two cores */
 			compatible = "arm,vexpress-amp";
 			arm,vexpress-sysreg,func = <3 0>;
 			label = "Cores";
 		};
 
-		temp@0 {
+		temp-dcc {
 			/* DCC internal temperature */
 			compatible = "arm,vexpress-temp";
 			arm,vexpress-sysreg,func = <4 0>;
 			label = "DCC";
 		};
 
-		power@0 {
+		power-cores {
 			/* Total power */
 			compatible = "arm,vexpress-power";
 			arm,vexpress-sysreg,func = <12 0>;
 			label = "Cores";
 		};
 
-		energy@0 {
+		energy {
 			/* Total energy */
 			compatible = "arm,vexpress-energy";
 			arm,vexpress-sysreg,func = <13 0>;
@@ -220,7 +220,7 @@
 		};
 	};
 
-	smb {
+	smb@08000000 {
 		compatible = "simple-bus";
 
 		#address-cells = <2>;
@@ -280,4 +280,17 @@
 
 		/include/ "vexpress-v2m-rs1.dtsi"
 	};
+
+	site2: hsb@40000000 {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0 0 0x40000000 0x3fef0000>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 3>;
+		interrupt-map = <0 0 &gic 0 36 4>,
+				<0 1 &gic 0 37 4>,
+				<0 2 &gic 0 38 4>,
+				<0 3 &gic 0 39 4>;
+	};
 };
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
index 17f63f7..0205c97 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
@@ -109,7 +109,7 @@
 		compatible = "arm,hdlcd";
 		reg = <0 0x2b000000 0 0x1000>;
 		interrupts = <0 85 4>;
-		clocks = <&oscclk5>;
+		clocks = <&hdlcd_clk>;
 		clock-names = "pxlclk";
 	};
 
@@ -227,7 +227,7 @@
 		compatible = "arm,vexpress,config-bus";
 		arm,vexpress,config-bridge = <&v2m_sysreg>;
 
-		osc@0 {
+		oscclk0 {
 			/* A15 PLL 0 reference clock */
 			compatible = "arm,vexpress-osc";
 			arm,vexpress-sysreg,func = <1 0>;
@@ -236,7 +236,7 @@
 			clock-output-names = "oscclk0";
 		};
 
-		osc@1 {
+		oscclk1 {
 			/* A15 PLL 1 reference clock */
 			compatible = "arm,vexpress-osc";
 			arm,vexpress-sysreg,func = <1 1>;
@@ -245,7 +245,7 @@
 			clock-output-names = "oscclk1";
 		};
 
-		osc@2 {
+		oscclk2 {
 			/* A7 PLL 0 reference clock */
 			compatible = "arm,vexpress-osc";
 			arm,vexpress-sysreg,func = <1 2>;
@@ -254,7 +254,7 @@
 			clock-output-names = "oscclk2";
 		};
 
-		osc@3 {
+		oscclk3 {
 			/* A7 PLL 1 reference clock */
 			compatible = "arm,vexpress-osc";
 			arm,vexpress-sysreg,func = <1 3>;
@@ -263,7 +263,7 @@
 			clock-output-names = "oscclk3";
 		};
 
-		osc@4 {
+		oscclk4 {
 			/* External AXI master clock */
 			compatible = "arm,vexpress-osc";
 			arm,vexpress-sysreg,func = <1 4>;
@@ -272,7 +272,7 @@
 			clock-output-names = "oscclk4";
 		};
 
-		oscclk5: osc@5 {
+		hdlcd_clk: oscclk5 {
 			/* HDLCD PLL reference clock */
 			compatible = "arm,vexpress-osc";
 			arm,vexpress-sysreg,func = <1 5>;
@@ -281,7 +281,7 @@
 			clock-output-names = "oscclk5";
 		};
 
-		smbclk: osc@6 {
+		smbclk: oscclk6 {
 			/* Static memory controller clock */
 			compatible = "arm,vexpress-osc";
 			arm,vexpress-sysreg,func = <1 6>;
@@ -290,7 +290,7 @@
 			clock-output-names = "oscclk6";
 		};
 
-		osc@7 {
+		oscclk7 {
 			/* SYS PLL reference clock */
 			compatible = "arm,vexpress-osc";
 			arm,vexpress-sysreg,func = <1 7>;
@@ -299,7 +299,7 @@
 			clock-output-names = "oscclk7";
 		};
 
-		osc@8 {
+		oscclk8 {
 			/* DDR2 PLL reference clock */
 			compatible = "arm,vexpress-osc";
 			arm,vexpress-sysreg,func = <1 8>;
@@ -308,7 +308,7 @@
 			clock-output-names = "oscclk8";
 		};
 
-		volt@0 {
+		volt-a15 {
 			/* A15 CPU core voltage */
 			compatible = "arm,vexpress-volt";
 			arm,vexpress-sysreg,func = <2 0>;
@@ -319,7 +319,7 @@
 			label = "A15 Vcore";
 		};
 
-		volt@1 {
+		volt-a7 {
 			/* A7 CPU core voltage */
 			compatible = "arm,vexpress-volt";
 			arm,vexpress-sysreg,func = <2 1>;
@@ -330,49 +330,49 @@
 			label = "A7 Vcore";
 		};
 
-		amp@0 {
+		amp-a15 {
 			/* Total current for the two A15 cores */
 			compatible = "arm,vexpress-amp";
 			arm,vexpress-sysreg,func = <3 0>;
 			label = "A15 Icore";
 		};
 
-		amp@1 {
+		amp-a7 {
 			/* Total current for the three A7 cores */
 			compatible = "arm,vexpress-amp";
 			arm,vexpress-sysreg,func = <3 1>;
 			label = "A7 Icore";
 		};
 
-		temp@0 {
+		temp-dcc {
 			/* DCC internal temperature */
 			compatible = "arm,vexpress-temp";
 			arm,vexpress-sysreg,func = <4 0>;
 			label = "DCC";
 		};
 
-		power@0 {
+		power-a15 {
 			/* Total power for the two A15 cores */
 			compatible = "arm,vexpress-power";
 			arm,vexpress-sysreg,func = <12 0>;
 			label = "A15 Pcore";
 		};
 
-		power@1 {
+		power-a7 {
 			/* Total power for the three A7 cores */
 			compatible = "arm,vexpress-power";
 			arm,vexpress-sysreg,func = <12 1>;
 			label = "A7 Pcore";
 		};
 
-		energy@0 {
+		energy-a15 {
 			/* Total energy for the two A15 cores */
 			compatible = "arm,vexpress-energy";
 			arm,vexpress-sysreg,func = <13 0>, <13 1>;
 			label = "A15 Jcore";
 		};
 
-		energy@2 {
+		energy-a7 {
 			/* Total energy for the three A7 cores */
 			compatible = "arm,vexpress-energy";
 			arm,vexpress-sysreg,func = <13 2>, <13 3>;
@@ -387,7 +387,7 @@
 		clocks = <&oscclk6a>;
 		clock-names = "apb_pclk";
 		port {
-			etb_in_port: endpoint@0 {
+			etb_in_port: endpoint {
 				slave-mode;
 				remote-endpoint = <&replicator_out_port0>;
 			};
@@ -401,7 +401,7 @@
 		clocks = <&oscclk6a>;
 		clock-names = "apb_pclk";
 		port {
-			tpiu_in_port: endpoint@0 {
+			tpiu_in_port: endpoint {
 				slave-mode;
 				remote-endpoint = <&replicator_out_port1>;
 			};
@@ -578,7 +578,7 @@
 		};
 	};
 
-	smb {
+	smb@08000000 {
 		compatible = "simple-bus";
 
 		#address-cells = <2>;
@@ -638,4 +638,17 @@
 
 		/include/ "vexpress-v2m-rs1.dtsi"
 	};
+
+	site2: hsb@40000000 {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0 0 0x40000000 0x3fef0000>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 3>;
+		interrupt-map = <0 0 &gic 0 36 4>,
+				<0 1 &gic 0 37 4>,
+				<0 2 &gic 0 38 4>,
+				<0 3 &gic 0 39 4>;
+	};
 };
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
index d2709b7..1acecaf 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
@@ -57,14 +57,14 @@
 		compatible = "arm,hdlcd";
 		reg = <0x2a110000 0x1000>;
 		interrupts = <0 85 4>;
-		clocks = <&oscclk3>;
+		clocks = <&hdlcd_clk>;
 		clock-names = "pxlclk";
 	};
 
 	memory-controller@2a150000 {
 		compatible = "arm,pl341", "arm,primecell";
 		reg = <0x2a150000 0x1000>;
-		clocks = <&oscclk1>;
+		clocks = <&axi_clk>;
 		clock-names = "apb_pclk";
 	};
 
@@ -73,7 +73,7 @@
 		reg = <0x2a190000 0x1000>;
 		interrupts = <0 86 4>,
 			     <0 87 4>;
-		clocks = <&oscclk1>;
+		clocks = <&axi_clk>;
 		clock-names = "apb_pclk";
 	};
 
@@ -93,7 +93,7 @@
 		             "arm,cortex-a9-global-timer";
 		reg = <0x2c000200 0x20>;
 		interrupts = <1 11 0x304>;
-		clocks = <&oscclk0>;
+		clocks = <&cpu_clk>;
 	};
 
 	watchdog@2c000620 {
@@ -128,7 +128,7 @@
 		compatible = "arm,vexpress,config-bus";
 		arm,vexpress,config-bridge = <&v2m_sysreg>;
 
-		oscclk0: osc@0 {
+		cpu_clk: oscclk0 {
 			/* CPU and internal AXI reference clock */
 			compatible = "arm,vexpress-osc";
 			arm,vexpress-sysreg,func = <1 0>;
@@ -137,7 +137,7 @@
 			clock-output-names = "oscclk0";
 		};
 
-		oscclk1: osc@1 {
+		axi_clk: oscclk1 {
 			/* Multiplexed AXI master clock */
 			compatible = "arm,vexpress-osc";
 			arm,vexpress-sysreg,func = <1 1>;
@@ -146,7 +146,7 @@
 			clock-output-names = "oscclk1";
 		};
 
-		osc@2 {
+		oscclk2 {
 			/* DDR2 */
 			compatible = "arm,vexpress-osc";
 			arm,vexpress-sysreg,func = <1 2>;
@@ -155,7 +155,7 @@
 			clock-output-names = "oscclk2";
 		};
 
-		oscclk3: osc@3 {
+		hdlcd_clk: oscclk3 {
 			/* HDLCD */
 			compatible = "arm,vexpress-osc";
 			arm,vexpress-sysreg,func = <1 3>;
@@ -164,7 +164,7 @@
 			clock-output-names = "oscclk3";
 		};
 
-		osc@4 {
+		oscclk4 {
 			/* Test chip gate configuration */
 			compatible = "arm,vexpress-osc";
 			arm,vexpress-sysreg,func = <1 4>;
@@ -173,7 +173,7 @@
 			clock-output-names = "oscclk4";
 		};
 
-		smbclk: osc@5 {
+		smbclk: oscclk5 {
 			/* SMB clock */
 			compatible = "arm,vexpress-osc";
 			arm,vexpress-sysreg,func = <1 5>;
@@ -182,7 +182,7 @@
 			clock-output-names = "oscclk5";
 		};
 
-		temp@0 {
+		temp-dcc {
 			/* DCC internal operating temperature */
 			compatible = "arm,vexpress-temp";
 			arm,vexpress-sysreg,func = <4 0>;
@@ -190,7 +190,7 @@
 		};
 	};
 
-	smb {
+	smb@08000000 {
 		compatible = "simple-bus";
 
 		#address-cells = <2>;
@@ -250,4 +250,17 @@
 
 		/include/ "vexpress-v2m-rs1.dtsi"
 	};
+
+	site2: hsb@40000000 {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0 0x40000000 0x40000000>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 3>;
+		interrupt-map = <0 0 &gic 0 36 4>,
+				<0 1 &gic 0 37 4>,
+				<0 2 &gic 0 38 4>,
+				<0 3 &gic 0 39 4>;
+	};
 };
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca9.dts b/arch/arm/boot/dts/vexpress-v2p-ca9.dts
index d949fac..b608a03 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca9.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca9.dts
@@ -190,7 +190,7 @@
 		compatible = "arm,vexpress,config-bus";
 		arm,vexpress,config-bridge = <&v2m_sysreg>;
 
-		osc@0 {
+		oscclk0: extsaxiclk {
 			/* ACLK clock to the AXI master port on the test chip */
 			compatible = "arm,vexpress-osc";
 			arm,vexpress-sysreg,func = <1 0>;
@@ -199,7 +199,7 @@
 			clock-output-names = "extsaxiclk";
 		};
 
-		oscclk1: osc@1 {
+		oscclk1: clcdclk {
 			/* Reference clock for the CLCD */
 			compatible = "arm,vexpress-osc";
 			arm,vexpress-sysreg,func = <1 1>;
@@ -208,7 +208,7 @@
 			clock-output-names = "clcdclk";
 		};
 
-		smbclk: oscclk2: osc@2 {
+		smbclk: oscclk2: tcrefclk {
 			/* Reference clock for the test chip internal PLLs */
 			compatible = "arm,vexpress-osc";
 			arm,vexpress-sysreg,func = <1 2>;
@@ -217,7 +217,7 @@
 			clock-output-names = "tcrefclk";
 		};
 
-		volt@0 {
+		volt-vd10 {
 			/* Test Chip internal logic voltage */
 			compatible = "arm,vexpress-volt";
 			arm,vexpress-sysreg,func = <2 0>;
@@ -226,7 +226,7 @@
 			label = "VD10";
 		};
 
-		volt@1 {
+		volt-vd10-s2 {
 			/* PL310, L2 cache, RAM cell supply (not PL310 logic) */
 			compatible = "arm,vexpress-volt";
 			arm,vexpress-sysreg,func = <2 1>;
@@ -235,7 +235,7 @@
 			label = "VD10_S2";
 		};
 
-		volt@2 {
+		volt-vd10-s3 {
 			/* Cortex-A9 system supply, Cores, MPEs, SCU and PL310 logic */
 			compatible = "arm,vexpress-volt";
 			arm,vexpress-sysreg,func = <2 2>;
@@ -244,7 +244,7 @@
 			label = "VD10_S3";
 		};
 
-		volt@3 {
+		volt-vcc1v8 {
 			/* DDR2 SDRAM and Test Chip DDR2 I/O supply */
 			compatible = "arm,vexpress-volt";
 			arm,vexpress-sysreg,func = <2 3>;
@@ -253,7 +253,7 @@
 			label = "VCC1V8";
 		};
 
-		volt@4 {
+		volt-ddr2vtt {
 			/* DDR2 SDRAM VTT termination voltage */
 			compatible = "arm,vexpress-volt";
 			arm,vexpress-sysreg,func = <2 4>;
@@ -262,7 +262,7 @@
 			label = "DDR2VTT";
 		};
 
-		volt@5 {
+		volt-vcc3v3 {
 			/* Local board supply for miscellaneous logic external to the Test Chip */
 			arm,vexpress-sysreg,func = <2 5>;
 			compatible = "arm,vexpress-volt";
@@ -271,28 +271,28 @@
 			label = "VCC3V3";
 		};
 
-		amp@0 {
+		amp-vd10-s2 {
 			/* PL310, L2 cache, RAM cell supply (not PL310 logic) */
 			compatible = "arm,vexpress-amp";
 			arm,vexpress-sysreg,func = <3 0>;
 			label = "VD10_S2";
 		};
 
-		amp@1 {
+		amp-vd10-s3 {
 			/* Cortex-A9 system supply, Cores, MPEs, SCU and PL310 logic */
 			compatible = "arm,vexpress-amp";
 			arm,vexpress-sysreg,func = <3 1>;
 			label = "VD10_S3";
 		};
 
-		power@0 {
+		power-vd10-s2 {
 			/* PL310, L2 cache, RAM cell supply (not PL310 logic) */
 			compatible = "arm,vexpress-power";
 			arm,vexpress-sysreg,func = <12 0>;
 			label = "PVD10_S2";
 		};
 
-		power@1 {
+		power-vd10-s3 {
 			/* Cortex-A9 system supply, Cores, MPEs, SCU and PL310 logic */
 			compatible = "arm,vexpress-power";
 			arm,vexpress-sysreg,func = <12 1>;
@@ -300,7 +300,7 @@
 		};
 	};
 
-	smb {
+	smb@04000000 {
 		compatible = "simple-bus";
 
 		#address-cells = <2>;
@@ -359,4 +359,17 @@
 
 		/include/ "vexpress-v2m.dtsi"
 	};
+
+	site2: hsb@e0000000 {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0 0xe0000000 0x20000000>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 3>;
+		interrupt-map = <0 0 &gic 0 36 4>,
+				<0 1 &gic 0 37 4>,
+				<0 2 &gic 0 38 4>,
+				<0 3 &gic 0 39 4>;
+	};
 };
diff --git a/arch/arm/boot/dts/vf-colibri.dtsi b/arch/arm/boot/dts/vf-colibri.dtsi
index fda7f28..226a86f 100644
--- a/arch/arm/boot/dts/vf-colibri.dtsi
+++ b/arch/arm/boot/dts/vf-colibri.dtsi
@@ -40,6 +40,11 @@
  */
 
 / {
+	aliases {
+		ethernet0 = &fec1;
+		ethernet1 = &fec0;
+	};
+
 	bl: backlight {
 		compatible = "pwm-backlight";
 		pinctrl-names = "default";
@@ -125,8 +130,6 @@
 };
 
 &nfc {
-	assigned-clocks = <&clks VF610_CLK_NFC>;
-	assigned-clock-rates = <33000000>;
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_nfc>;
 	status = "okay";
diff --git a/arch/arm/boot/dts/vf500-colibri.dtsi b/arch/arm/boot/dts/vf500-colibri.dtsi
index 3fe1f48..1a8a0ef 100644
--- a/arch/arm/boot/dts/vf500-colibri.dtsi
+++ b/arch/arm/boot/dts/vf500-colibri.dtsi
@@ -69,6 +69,11 @@
 	};
 };
 
+&nfc {
+	assigned-clocks = <&clks VF610_CLK_NFC>;
+	assigned-clock-rates = <33000000>;
+};
+
 &iomuxc {
 	vf610-colibri {
 		pinctrl_touchctrl_idle: touchctrl_idle {
diff --git a/arch/arm/boot/dts/vf500.dtsi b/arch/arm/boot/dts/vf500.dtsi
index 9d37272..a3824e6 100644
--- a/arch/arm/boot/dts/vf500.dtsi
+++ b/arch/arm/boot/dts/vf500.dtsi
@@ -81,6 +81,7 @@
 				compatible = "arm,cortex-a5-pmu";
 				interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
 				interrupt-affinity = <&a5_cpu>;
+				reg = <0x40089000 0x1000>;
 			};
 		};
 
diff --git a/arch/arm/boot/dts/vf610-colibri.dtsi b/arch/arm/boot/dts/vf610-colibri.dtsi
index ab4a29f..9ec9e33 100644
--- a/arch/arm/boot/dts/vf610-colibri.dtsi
+++ b/arch/arm/boot/dts/vf610-colibri.dtsi
@@ -50,3 +50,8 @@
 		reg = <0x80000000 0x10000000>;
 	};
 };
+
+&nfc {
+	assigned-clocks = <&clks VF610_CLK_NFC>;
+	assigned-clock-rates = <50000000>;
+};
diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
new file mode 100644
index 0000000..6c60b7f
--- /dev/null
+++ b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
@@ -0,0 +1,734 @@
+/*
+ * Copyright (C) 2015, 2016 Zodiac Inflight Innovations
+ *
+ * Based on an original 'vf610-twr.dts' which is Copyright 2015,
+ * Freescale Semiconductor, Inc.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "vf610.dtsi"
+
+/ {
+	model = "ZII VF610 Development Board, Rev B";
+	compatible = "zii,vf610dev-b", "zii,vf610dev", "fsl,vf610";
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory {
+		reg = <0x80000000 0x20000000>;
+	};
+
+	gpio-leds {
+		compatible = "gpio-leds";
+		pinctrl-0 = <&pinctrl_leds_debug>;
+		pinctrl-names = "default";
+
+		debug {
+			label = "zii:green:debug1";
+			gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "heartbeat";
+		};
+	};
+
+	mdio-mux {
+		compatible = "mdio-mux-gpio";
+		pinctrl-0 = <&pinctrl_mdio_mux>;
+		pinctrl-names = "default";
+		gpios = <&gpio0 8  GPIO_ACTIVE_HIGH
+			 &gpio0 9  GPIO_ACTIVE_HIGH
+			 &gpio0 24 GPIO_ACTIVE_HIGH
+			 &gpio0 25 GPIO_ACTIVE_HIGH>;
+		mdio-parent-bus = <&mdio1>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		mdio_mux_1: mdio@1 {
+			reg = <1>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
+		mdio_mux_2: mdio@2 {
+			reg = <2>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
+		mdio_mux_4: mdio@4 {
+			reg = <4>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
+		mdio_mux_8: mdio@8 {
+			reg = <8>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+	};
+
+	dsa {
+		compatible = "marvell,dsa";
+		#address-cells = <2>;
+		#size-cells = <0>;
+		dsa,ethernet = <&fec1>;
+		dsa,mii-bus = <&mdio_mux_1>;
+
+		/* 6352 - Primary - 7 ports */
+		switch0: switch@0-0 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0x00 0>;
+			eeprom-length = <512>;
+
+			port@0 {
+				reg = <0>;
+				label = "lan0";
+			};
+
+			port@1 {
+				reg = <1>;
+				label = "lan1";
+			};
+
+			port@2 {
+				reg = <2>;
+				label = "lan2";
+			};
+
+			switch0port5: port@5 {
+				reg = <5>;
+				label = "dsa";
+				phy-mode = "rgmii-txid";
+				link = <&switch1port6
+					&switch2port9>;
+
+				fixed-link {
+					speed = <1000>;
+					full-duplex;
+				};
+			};
+
+			port@6 {
+				reg = <6>;
+				label = "cpu";
+
+				fixed-link {
+					speed = <100>;
+					full-duplex;
+				};
+			};
+
+		};
+
+		/* 6352 - Secondary - 7 ports */
+		switch1: switch@0-1 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0x00 1>;
+			eeprom-length = <512>;
+			mii-bus = <&mdio_mux_2>;
+
+			port@0 {
+				reg = <0>;
+				label = "lan3";
+			};
+
+			port@1 {
+				reg = <1>;
+				label = "lan4";
+			};
+
+			port@2 {
+				reg = <2>;
+				label = "lan5";
+			};
+
+			switch1port5: port@5 {
+				reg = <5>;
+				label = "dsa";
+				link = <&switch2port9>;
+				phy-mode = "rgmii-txid";
+
+				fixed-link {
+					speed = <1000>;
+					full-duplex;
+				};
+			};
+
+			switch1port6: port@6 {
+				reg = <6>;
+				label = "dsa";
+				phy-mode = "rgmii-txid";
+				link = <&switch0port5>;
+
+				fixed-link {
+					speed = <1000>;
+					full-duplex;
+				};
+			};
+		};
+
+		/* 6185 - 10 ports */
+		switch2: switch@0-2 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0x00 2>;
+			mii-bus = <&mdio_mux_4>;
+
+			port@0 {
+				reg = <0>;
+				label = "lan6";
+			};
+
+			port@1 {
+				reg = <1>;
+				label = "lan7";
+			};
+
+			port@2 {
+				reg = <2>;
+				label = "lan8";
+			};
+
+			port@3 {
+				reg = <3>;
+				label = "optical3";
+
+				fixed-link {
+					speed = <1000>;
+					full-duplex;
+					link-gpios = <&gpio6 2
+						      GPIO_ACTIVE_HIGH>;
+				};
+			};
+
+			port@4 {
+				reg = <4>;
+				label = "optical4";
+
+				fixed-link {
+					speed = <1000>;
+					full-duplex;
+					link-gpios = <&gpio6 3
+						      GPIO_ACTIVE_HIGH>;
+				};
+			};
+
+			switch2port9: port@9 {
+				reg = <9>;
+				label = "dsa";
+				phy-mode = "rgmii-txid";
+				link = <&switch1port5
+					&switch0port5>;
+
+				fixed-link {
+					speed = <1000>;
+					full-duplex;
+				};
+			};
+		};
+	};
+
+	reg_vcc_3v3_mcu: regulator-vcc-3v3-mcu {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc_3v3_mcu";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	usb0_vbus: regulator-usb0-vbus {
+		compatible = "regulator-fixed";
+		pinctrl-0 = <&pinctrl_usb_vbus>;
+		regulator-name = "usb_vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		enable-active-high;
+		regulator-always-on;
+		regulator-boot-on;
+		gpio = <&gpio0 6 0>;
+	};
+
+	spi0 {
+		compatible = "spi-gpio";
+		pinctrl-0 = <&pinctrl_gpio_spi0>;
+		pinctrl-names = "default";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		gpio-sck  = <&gpio1 12 GPIO_ACTIVE_HIGH>;
+		gpio-mosi = <&gpio1 11 GPIO_ACTIVE_HIGH>;
+		gpio-miso = <&gpio1 10 GPIO_ACTIVE_HIGH>;
+		cs-gpios  = <&gpio1  9 GPIO_ACTIVE_HIGH
+			     &gpio1  8 GPIO_ACTIVE_HIGH>;
+		num-chipselects = <2>;
+
+		m25p128@0 {
+			compatible = "m25p128", "jedec,spi-nor";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			reg = <0>;
+			spi-max-frequency = <1000000>;
+		};
+
+		at93c46d@1 {
+			compatible = "atmel,at93c46d";
+			pinctrl-0 = <&pinctrl_gpio_e6185_eeprom_sel>;
+			pinctrl-names = "default";
+			#address-cells = <0>;
+			#size-cells = <0>;
+			reg = <1>;
+			spi-max-frequency = <500000>;
+			spi-cs-high;
+			data-size = <16>;
+			select-gpios = <&gpio4 4 GPIO_ACTIVE_HIGH>;
+		};
+	};
+};
+
+&adc0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_adc0_ad5>;
+	vref-supply = <&reg_vcc_3v3_mcu>;
+	status = "okay";
+};
+
+&edma0 {
+	status = "okay";
+};
+
+&esdhc1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_esdhc1>;
+	bus-width = <4>;
+	status = "okay";
+};
+
+&fec0 {
+	phy-mode = "rmii";
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_fec0>;
+	status = "okay";
+};
+
+&fec1 {
+	phy-mode = "rmii";
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_fec1>;
+	status = "okay";
+
+	fixed-link {
+		   speed = <100>;
+		   full-duplex;
+	};
+
+	mdio1: mdio {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "okay";
+	};
+};
+
+&i2c0 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c0>;
+	status = "okay";
+
+	gpio5: pca9554@20 {
+		compatible = "nxp,pca9554";
+		reg = <0x20>;
+		gpio-controller;
+		#gpio-cells = <2>;
+
+	};
+
+	gpio6: pca9554@22 {
+		compatible = "nxp,pca9554";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_pca9554_22>;
+		reg = <0x22>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-parent = <&gpio2>;
+		interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+	};
+
+	lm75@48 {
+		compatible = "national,lm75";
+		reg = <0x48>;
+	};
+
+	at24c04@50 {
+		compatible = "atmel,24c04";
+		reg = <0x50>;
+	};
+
+	at24c04@52 {
+		compatible = "atmel,24c04";
+		reg = <0x52>;
+	};
+
+	ds1682@6b {
+		compatible = "dallas,ds1682";
+		reg = <0x6b>;
+	};
+};
+
+&i2c1 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c1>;
+	status = "okay";
+};
+
+&i2c2 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c2>;
+	status = "okay";
+
+	tca9548@70 {
+		compatible = "nxp,pca9548";
+		pinctrl-0 = <&pinctrl_i2c_mux_reset>;
+		pinctrl-names = "default";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x70>;
+		reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
+
+		i2c@0 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0>;
+
+			sfp1: at24c04@50 {
+				compatible = "atmel,24c02";
+				reg = <0x50>;
+			};
+		};
+
+		i2c@1 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <1>;
+
+			sfp2: at24c04@50 {
+				compatible = "atmel,24c02";
+				reg = <0x50>;
+			};
+		};
+
+		i2c@2 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <2>;
+
+			sfp3: at24c04@50 {
+				compatible = "atmel,24c02";
+				reg = <0x50>;
+			};
+		};
+
+		i2c@3 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <3>;
+
+			sfp4: at24c04@50 {
+				compatible = "atmel,24c02";
+				reg = <0x50>;
+			};
+		};
+
+		i2c@4 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <4>;
+		};
+	};
+};
+
+&i2c3 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c3>;
+	status = "okay";
+};
+
+&uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart0>;
+	status = "okay";
+};
+
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart1>;
+	status = "okay";
+};
+
+&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart2>;
+	status = "okay";
+};
+
+&usbdev0 {
+	disable-over-current;
+	vbus-supply = <&usb0_vbus>;
+	dr_mode = "host";
+	status = "okay";
+};
+
+&usbh1 {
+	disable-over-current;
+	status = "okay";
+};
+
+&usbmisc0 {
+	status = "okay";
+};
+
+&usbmisc1 {
+	status = "okay";
+};
+
+&usbphy0 {
+	status = "okay";
+};
+
+&usbphy1 {
+	status = "okay";
+};
+
+&iomuxc {
+	pinctrl_adc0_ad5: adc0ad5grp {
+		fsl,pins = <
+			VF610_PAD_PTC30__ADC0_SE5	0x00a1
+		>;
+	};
+
+	pinctrl_dspi0: dspi0grp {
+		fsl,pins = <
+			VF610_PAD_PTB18__DSPI0_CS1	0x1182
+			VF610_PAD_PTB19__DSPI0_CS0	0x1182
+			VF610_PAD_PTB20__DSPI0_SIN	0x1181
+			VF610_PAD_PTB21__DSPI0_SOUT	0x1182
+			VF610_PAD_PTB22__DSPI0_SCK	0x1182
+		>;
+	};
+
+	pinctrl_dspi2: dspi2grp {
+		fsl,pins = <
+			VF610_PAD_PTD31__DSPI2_CS1	0x1182
+			VF610_PAD_PTD30__DSPI2_CS0	0x1182
+			VF610_PAD_PTD29__DSPI2_SIN	0x1181
+			VF610_PAD_PTD28__DSPI2_SOUT	0x1182
+			VF610_PAD_PTD27__DSPI2_SCK	0x1182
+		>;
+	};
+
+	pinctrl_esdhc1: esdhc1grp {
+		fsl,pins = <
+			VF610_PAD_PTA24__ESDHC1_CLK	0x31ef
+			VF610_PAD_PTA25__ESDHC1_CMD	0x31ef
+			VF610_PAD_PTA26__ESDHC1_DAT0	0x31ef
+			VF610_PAD_PTA27__ESDHC1_DAT1	0x31ef
+			VF610_PAD_PTA28__ESDHC1_DATA2	0x31ef
+			VF610_PAD_PTA29__ESDHC1_DAT3	0x31ef
+			VF610_PAD_PTA7__GPIO_134	0x219d
+		>;
+	};
+
+	pinctrl_fec0: fec0grp {
+		fsl,pins = <
+			VF610_PAD_PTC0__ENET_RMII0_MDC	0x30d2
+			VF610_PAD_PTC1__ENET_RMII0_MDIO	0x30d3
+			VF610_PAD_PTC2__ENET_RMII0_CRS	0x30d1
+			VF610_PAD_PTC3__ENET_RMII0_RXD1	0x30d1
+			VF610_PAD_PTC4__ENET_RMII0_RXD0	0x30d1
+			VF610_PAD_PTC5__ENET_RMII0_RXER	0x30d1
+			VF610_PAD_PTC6__ENET_RMII0_TXD1	0x30d2
+			VF610_PAD_PTC7__ENET_RMII0_TXD0	0x30d2
+			VF610_PAD_PTC8__ENET_RMII0_TXEN	0x30d2
+		>;
+	};
+
+	pinctrl_fec1: fec1grp {
+		fsl,pins = <
+			VF610_PAD_PTA6__RMII_CLKIN		0x30d1
+			VF610_PAD_PTC9__ENET_RMII1_MDC		0x30d2
+			VF610_PAD_PTC10__ENET_RMII1_MDIO	0x30d3
+			VF610_PAD_PTC11__ENET_RMII1_CRS		0x30d1
+			VF610_PAD_PTC12__ENET_RMII1_RXD1	0x30d1
+			VF610_PAD_PTC13__ENET_RMII1_RXD0	0x30d1
+			VF610_PAD_PTC14__ENET_RMII1_RXER	0x30d1
+			VF610_PAD_PTC15__ENET_RMII1_TXD1	0x30d2
+			VF610_PAD_PTC16__ENET_RMII1_TXD0	0x30d2
+			VF610_PAD_PTC17__ENET_RMII1_TXEN	0x30d2
+		>;
+	};
+
+	pinctrl_gpio_e6185_eeprom_sel: pinctrl-gpio-e6185-eeprom-spi0 {
+		fsl,pins = <
+			VF610_PAD_PTE27__GPIO_132	0x33e2
+		>;
+	};
+
+	pinctrl_gpio_spi0: pinctrl-gpio-spi0 {
+		fsl,pins = <
+			VF610_PAD_PTB22__GPIO_44	0x33e2
+			VF610_PAD_PTB21__GPIO_43	0x33e2
+			VF610_PAD_PTB20__GPIO_42	0x33e1
+			VF610_PAD_PTB19__GPIO_41	0x33e2
+			VF610_PAD_PTB18__GPIO_40	0x33e2
+		>;
+	};
+
+	pinctrl_i2c_mux_reset: pinctrl-i2c-mux-reset {
+		fsl,pins = <
+			 VF610_PAD_PTE14__GPIO_119	0x31c2
+			 >;
+	};
+
+	pinctrl_i2c0: i2c0grp {
+		fsl,pins = <
+			VF610_PAD_PTB14__I2C0_SCL	0x37ff
+			VF610_PAD_PTB15__I2C0_SDA	0x37ff
+		>;
+	};
+
+	pinctrl_i2c1: i2c1grp {
+		fsl,pins = <
+			VF610_PAD_PTB16__I2C1_SCL	0x37ff
+			VF610_PAD_PTB17__I2C1_SDA	0x37ff
+		>;
+	};
+
+	pinctrl_i2c2: i2c2grp {
+		fsl,pins = <
+			VF610_PAD_PTA22__I2C2_SCL	0x37ff
+			VF610_PAD_PTA23__I2C2_SDA	0x37ff
+		>;
+	};
+
+	pinctrl_i2c3: i2c3grp {
+		fsl,pins = <
+			VF610_PAD_PTA30__I2C3_SCL	0x37ff
+			VF610_PAD_PTA31__I2C3_SDA	0x37ff
+		>;
+	};
+
+	pinctrl_leds_debug: pinctrl-leds-debug {
+		fsl,pins = <
+			 VF610_PAD_PTD20__GPIO_74	0x31c2
+			 >;
+	};
+
+	pinctrl_mdio_mux: pinctrl-mdio-mux {
+		fsl,pins = <
+			VF610_PAD_PTA18__GPIO_8		0x31c2
+			VF610_PAD_PTA19__GPIO_9		0x31c2
+			VF610_PAD_PTB2__GPIO_24		0x31c2
+			VF610_PAD_PTB3__GPIO_25		0x31c2
+		>;
+	};
+
+	pinctrl_pca9554_22: pinctrl-pca95540-22 {
+		fsl,pins = <
+			VF610_PAD_PTB28__GPIO_98	0x219d
+		>;
+	};
+
+	pinctrl_pwm0: pwm0grp {
+		fsl,pins = <
+			VF610_PAD_PTB0__FTM0_CH0	0x1582
+			VF610_PAD_PTB1__FTM0_CH1	0x1582
+			VF610_PAD_PTB2__FTM0_CH2	0x1582
+			VF610_PAD_PTB3__FTM0_CH3	0x1582
+		>;
+	};
+
+	pinctrl_qspi0: qspi0grp {
+		fsl,pins = <
+			VF610_PAD_PTD7__QSPI0_B_QSCK	0x31c3
+			VF610_PAD_PTD8__QSPI0_B_CS0	0x31ff
+			VF610_PAD_PTD9__QSPI0_B_DATA3	0x31c3
+			VF610_PAD_PTD10__QSPI0_B_DATA2	0x31c3
+			VF610_PAD_PTD11__QSPI0_B_DATA1	0x31c3
+			VF610_PAD_PTD12__QSPI0_B_DATA0	0x31c3
+		>;
+	};
+
+	pinctrl_uart0: uart0grp {
+		fsl,pins = <
+			VF610_PAD_PTB10__UART0_TX	0x21a2
+			VF610_PAD_PTB11__UART0_RX	0x21a1
+		>;
+	};
+
+	pinctrl_uart1: uart1grp {
+		fsl,pins = <
+			VF610_PAD_PTB23__UART1_TX	0x21a2
+			VF610_PAD_PTB24__UART1_RX	0x21a1
+		>;
+	};
+
+	pinctrl_uart2: uart2grp {
+		fsl,pins = <
+			VF610_PAD_PTD0__UART2_TX	0x21a2
+			VF610_PAD_PTD1__UART2_RX	0x21a1
+		>;
+	};
+
+	pinctrl_usb_vbus: pinctrl-usb-vbus {
+		fsl,pins = <
+			VF610_PAD_PTA16__GPIO_6	0x31c2
+		>;
+	};
+
+	pinctrl_usb0_host: usb0-host-grp {
+		fsl,pins = <
+			VF610_PAD_PTD6__GPIO_85		0x0062
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi
index 5c09754..04ef54d 100644
--- a/arch/arm/boot/dts/vfxxx.dtsi
+++ b/arch/arm/boot/dts/vfxxx.dtsi
@@ -95,6 +95,7 @@
 			compatible = "fsl,aips-bus", "simple-bus";
 			#address-cells = <1>;
 			#size-cells = <1>;
+			reg = <0x40000000 0x00070000>;
 			ranges;
 
 			mscm_cpucfg: cpucfg@40001000 {
@@ -481,6 +482,7 @@
 			compatible = "fsl,aips-bus", "simple-bus";
 			#address-cells = <1>;
 			#size-cells = <1>;
+			reg = <0x40080000 0x0007f000>;
 			ranges;
 
 			edma1: dma-controller@40098000 {
diff --git a/arch/arm/boot/dts/wd-mbwe.dts b/arch/arm/boot/dts/wd-mbwe.dts
new file mode 100644
index 0000000..ac3250a
--- /dev/null
+++ b/arch/arm/boot/dts/wd-mbwe.dts
@@ -0,0 +1,112 @@
+/*
+ * wd-mbwe.dtsi - Device tree file for Western Digital My Book World Edition
+ *
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Licensed under GPLv2 or later
+ */
+
+/dts-v1/;
+#include "ox810se.dtsi"
+
+/ {
+	model = "Western Digital My Book World Edition";
+
+	compatible = "wd,mbwe", "oxsemi,ox810se";
+
+	chosen {
+		bootargs = "console=ttyS1,115200n8 earlyprintk=serial";
+	};
+
+	memory {
+		/* 128Mbytes DDR */
+		reg = <0x48000000 0x8000000>;
+	};
+
+	aliases {
+		serial1 = &uart1;
+		gpio0 = &gpio0;
+		gpio1 = &gpio1;
+	};
+
+	gpio-keys-polled {
+		compatible = "gpio-keys-polled";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		poll-interval = <100>;
+
+		power {
+			label = "power";
+			gpios = <&gpio0 0 1>;
+			linux,code = <0x198>;
+		};
+
+		recovery {
+			label = "recovery";
+			gpios = <&gpio0 4 1>;
+			linux,code = <0xab>;
+		};
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		a0 {
+			label = "activity0";
+			gpios = <&gpio0 25 0>;
+			default-state = "keep";
+		};
+
+		a1 {
+			label = "activity1";
+			gpios = <&gpio0 26 0>;
+			default-state = "keep";
+		};
+
+		a2 {
+			label = "activity2";
+			gpios = <&gpio0 5 0>;
+			default-state = "keep";
+		};
+
+		a3 {
+			label = "activity3";
+			gpios = <&gpio0 6 0>;
+			default-state = "keep";
+		};
+
+		a4 {
+			label = "activity4";
+			gpios = <&gpio0 7 0>;
+			default-state = "keep";
+		};
+
+		a5 {
+			label = "activity5";
+			gpios = <&gpio1 2 0>;
+			default-state = "keep";
+		};
+	};
+
+	i2c-gpio {
+		compatible = "i2c-gpio";
+		gpios = <&gpio0 3 0 /* sda */
+			 &gpio0 2 0 /* scl */
+			 >;
+		i2c-gpio,delay-us = <2>;        /* ~100 kHz */
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		rtc0: rtc@48 {
+			compatible = "st,m41t00";
+			reg = <0x68>;
+		};
+	};
+};
+
+&uart1 {
+	status = "okay";
+
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart1>;
+};
diff --git a/arch/arm/configs/aspeed_g4_defconfig b/arch/arm/configs/aspeed_g4_defconfig
new file mode 100644
index 0000000..b6e54ee
--- /dev/null
+++ b/arch/arm/configs/aspeed_g4_defconfig
@@ -0,0 +1,86 @@
+CONFIG_KERNEL_XZ=y
+CONFIG_SYSVIPC=y
+CONFIG_USELIB=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_BPF_SYSCALL=y
+# CONFIG_SHMEM is not set
+# CONFIG_AIO is not set
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLOCK is not set
+# CONFIG_ARCH_MULTI_V7 is not set
+CONFIG_ARCH_ASPEED=y
+CONFIG_MACH_ASPEED_G4=y
+CONFIG_DEBUG_RODATA=y
+CONFIG_AEABI=y
+CONFIG_UACCESS_WITH_MEMCPY=y
+CONFIG_SECCOMP=y
+# CONFIG_ATAGS is not set
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
+CONFIG_KEXEC=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=6
+CONFIG_SERIAL_8250_RUNTIME_UARTS=6
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_FIRMWARE_MEMMAP=y
+CONFIG_FANOTIFY=y
+CONFIG_PRINTK_TIME=1
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_PAGE_POISONING=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_WQ_WATCHDOG=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHED_STACK_END_CHECK=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
+# CONFIG_FTRACE is not set
+CONFIG_MEMTEST=y
+CONFIG_UBSAN=y
+CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_LL=y
+CONFIG_DEBUG_LL_UART_8250=y
+CONFIG_DEBUG_UART_PHYS=0x1e784000
+CONFIG_DEBUG_UART_VIRT=0xe8784000
+CONFIG_EARLY_PRINTK=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_POWERPC is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_SPARC is not set
diff --git a/arch/arm/configs/aspeed_g5_defconfig b/arch/arm/configs/aspeed_g5_defconfig
new file mode 100644
index 0000000..8926051
--- /dev/null
+++ b/arch/arm/configs/aspeed_g5_defconfig
@@ -0,0 +1,88 @@
+CONFIG_KERNEL_XZ=y
+CONFIG_SYSVIPC=y
+CONFIG_USELIB=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_BPF_SYSCALL=y
+# CONFIG_SHMEM is not set
+# CONFIG_AIO is not set
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLOCK is not set
+CONFIG_ARCH_MULTI_V6=y
+# CONFIG_ARCH_MULTI_V7 is not set
+CONFIG_ARCH_ASPEED=y
+CONFIG_MACH_ASPEED_G5=y
+CONFIG_DEBUG_RODATA=y
+CONFIG_AEABI=y
+CONFIG_UACCESS_WITH_MEMCPY=y
+CONFIG_SECCOMP=y
+# CONFIG_ATAGS is not set
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
+CONFIG_KEXEC=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=6
+CONFIG_SERIAL_8250_RUNTIME_UARTS=6
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_FIRMWARE_MEMMAP=y
+CONFIG_FANOTIFY=y
+CONFIG_PRINTK_TIME=1
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_PAGE_POISONING=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_WQ_WATCHDOG=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHED_STACK_END_CHECK=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
+# CONFIG_FTRACE is not set
+CONFIG_MEMTEST=y
+CONFIG_UBSAN=y
+CONFIG_UBSAN_ALIGNMENT=y
+CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_LL=y
+CONFIG_DEBUG_LL_UART_8250=y
+CONFIG_DEBUG_UART_PHYS=0x1e784000
+CONFIG_DEBUG_UART_VIRT=0xe8784000
+CONFIG_EARLY_PRINTK=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_POWERPC is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_SPARC is not set
diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig
index 1ef69fc..79de828 100644
--- a/arch/arm/configs/bcm2835_defconfig
+++ b/arch/arm/configs/bcm2835_defconfig
@@ -38,10 +38,13 @@
 CONFIG_VFP=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 # CONFIG_SUSPEND is not set
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
 CONFIG_NETWORK_SECMARK=y
 CONFIG_NETFILTER=y
 CONFIG_CFG80211=y
@@ -63,7 +66,6 @@
 CONFIG_SERIAL_AMBA_PL011=y
 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
 CONFIG_TTY_PRINTK=y
-CONFIG_I2C=y
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_BCM2835=y
 CONFIG_SPI=y
@@ -73,10 +75,10 @@
 # CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_BCM2835_WDT=y
-CONFIG_FB=y
+CONFIG_DRM=y
+CONFIG_DRM_VC4=y
 CONFIG_FB_SIMPLE=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_SOC=y
@@ -87,7 +89,7 @@
 CONFIG_MMC=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
-CONFIG_MMC_SDHCI_BCM2835=y
+CONFIG_MMC_SDHCI_IPROC=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_GPIO=y
@@ -122,6 +124,7 @@
 CONFIG_TMPFS_POSIX_ACL=y
 # CONFIG_MISC_FILESYSTEMS is not set
 CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
 CONFIG_NFSD=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ASCII=y
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index 235842c..f33d042 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -2,6 +2,7 @@
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
@@ -70,8 +71,10 @@
 CONFIG_MTD_CFI_INTELEXT=m
 CONFIG_MTD_CFI_AMDSTD=m
 CONFIG_MTD_PHYSMAP=m
+CONFIG_MTD_M25P80=m
 CONFIG_MTD_NAND=m
 CONFIG_MTD_NAND_DAVINCI=m
+CONFIG_MTD_SPI_NOR=m
 CONFIG_PROC_DEVICETREE=y
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_RAM=y
@@ -117,7 +120,10 @@
 CONFIG_I2C=y
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_DAVINCI=y
+CONFIG_SPI=y
+CONFIG_SPI_DAVINCI=m
 CONFIG_PINCTRL_SINGLE=y
+CONFIG_GPIO_SYSFS=y
 CONFIG_GPIO_PCF857X=y
 CONFIG_WATCHDOG=y
 CONFIG_DAVINCI_WATCHDOG=m
@@ -187,7 +193,6 @@
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 CONFIG_XFS_FS=m
-CONFIG_INOTIFY=y
 CONFIG_AUTOFS4_FS=m
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 6ffd7e7..10f49ab5 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -28,6 +28,10 @@
 CONFIG_CPU_FREQ=y
 CONFIG_CPU_FREQ_STAT_DETAILS=y
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=m
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=m
 CONFIG_CPUFREQ_DT=y
 CONFIG_CPU_IDLE=y
 CONFIG_ARM_EXYNOS_CPUIDLE=y
@@ -74,6 +78,7 @@
 CONFIG_MOUSE_CYAPA=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_TOUCHSCREEN_ATMEL_MXT=y
+CONFIG_TOUCHSCREEN_MMS114=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_MAX77693_HAPTIC=y
 CONFIG_INPUT_MAX8997_HAPTIC=y
@@ -93,6 +98,7 @@
 CONFIG_SPI_GPIO=y
 CONFIG_SPI_S3C64XX=y
 CONFIG_DEBUG_GPIO=y
+CONFIG_GPIO_WM8994=y
 CONFIG_POWER_SUPPLY=y
 CONFIG_BATTERY_SBS=y
 CONFIG_BATTERY_MAX17040=y
@@ -134,6 +140,7 @@
 CONFIG_REGULATOR_S2MPS11=y
 CONFIG_REGULATOR_S5M8767=y
 CONFIG_REGULATOR_TPS65090=y
+CONFIG_REGULATOR_WM8994=y
 CONFIG_MEDIA_SUPPORT=m
 CONFIG_MEDIA_CAMERA_SUPPORT=y
 CONFIG_MEDIA_USB_SUPPORT=y
@@ -160,6 +167,8 @@
 CONFIG_SND=y
 CONFIG_SND_SOC=y
 CONFIG_SND_SOC_SAMSUNG=y
+CONFIG_SND_SOC_SAMSUNG_SMDK_WM8994=y
+CONFIG_SND_SOC_SMDK_WM8994_PCM=y
 CONFIG_SND_SOC_SNOW=y
 CONFIG_SND_SOC_ODROIDX2=y
 CONFIG_SND_SIMPLE_CARD=y
@@ -210,6 +219,8 @@
 CONFIG_EXTCON_MAX8997=y
 CONFIG_IIO=y
 CONFIG_EXYNOS_ADC=y
+CONFIG_CM36651=y
+CONFIG_AK8975=y
 CONFIG_PWM=y
 CONFIG_PWM_SAMSUNG=y
 CONFIG_PHY_EXYNOS5250_SATA=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index 978c5de..21339ce 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -142,6 +142,7 @@
 CONFIG_SMSC911X=y
 # CONFIG_NET_VENDOR_STMICRO is not set
 CONFIG_AT803X_PHY=y
+CONFIG_MICREL_PHY=y
 CONFIG_USB_PEGASUS=m
 CONFIG_USB_RTL8150=m
 CONFIG_USB_RTL8152=m
@@ -162,7 +163,9 @@
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_TOUCHSCREEN_EGALAX=y
 CONFIG_TOUCHSCREEN_IMX6UL_TSC=y
+CONFIG_TOUCHSCREEN_EDT_FT5X06=y
 CONFIG_TOUCHSCREEN_MC13783=y
+CONFIG_TOUCHSCREEN_TSC2004=y
 CONFIG_TOUCHSCREEN_TSC2007=y
 CONFIG_TOUCHSCREEN_STMPE=y
 CONFIG_TOUCHSCREEN_SX8654=y
@@ -178,9 +181,11 @@
 CONFIG_SERIAL_FSL_LPUART_CONSOLE=y
 # CONFIG_I2C_COMPAT is not set
 CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX_GPIO=y
 # CONFIG_I2C_HELPER_AUTO is not set
 CONFIG_I2C_ALGOPCF=m
 CONFIG_I2C_ALGOPCA=m
+CONFIG_I2C_GPIO=y
 CONFIG_I2C_IMX=y
 CONFIG_SPI=y
 CONFIG_SPI_IMX=y
@@ -313,6 +318,7 @@
 CONFIG_RTC_DRV_ISL1208=y
 CONFIG_RTC_DRV_PCF8523=y
 CONFIG_RTC_DRV_PCF8563=y
+CONFIG_RTC_DRV_M41T80=y
 CONFIG_RTC_DRV_MC13XXX=y
 CONFIG_RTC_DRV_MXC=y
 CONFIG_RTC_DRV_SNVS=y
diff --git a/arch/arm/configs/keystone_defconfig b/arch/arm/configs/keystone_defconfig
index 5bcc9cf..faba04d 100644
--- a/arch/arm/configs/keystone_defconfig
+++ b/arch/arm/configs/keystone_defconfig
@@ -30,6 +30,8 @@
 CONFIG_PCI_MSI=y
 CONFIG_PCI_KEYSTONE=y
 CONFIG_SMP=y
+CONFIG_HOTPLUG_CPU=y
+CONFIG_ARM_PSCI=y
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
 CONFIG_HIGHMEM=y
diff --git a/arch/arm/configs/lpc32xx_defconfig b/arch/arm/configs/lpc32xx_defconfig
index 9f56ca3..6ba430d 100644
--- a/arch/arm/configs/lpc32xx_defconfig
+++ b/arch/arm/configs/lpc32xx_defconfig
@@ -17,8 +17,6 @@
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_ARCH_LPC32XX=y
-CONFIG_GPIO_PCA953X=y
-CONFIG_KEYBOARD_GPIO_POLLED=y
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
 CONFIG_ZBOOT_ROM_TEXT=0x0
@@ -27,10 +25,8 @@
 CONFIG_ARM_ATAG_DTB_COMPAT=y
 CONFIG_CMDLINE="console=ttyS0,115200n81 root=/dev/ram0"
 CONFIG_CPU_IDLE=y
-CONFIG_FPE_NWFPE=y
 CONFIG_VFP=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_BINFMT_AOUT=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -42,10 +38,7 @@
 # CONFIG_INET_XFRM_MODE_TRANSPORT is not set
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 # CONFIG_INET_DIAG is not set
-CONFIG_IPV6=y
-CONFIG_IPV6_PRIVACY=y
 # CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_DEVTMPFS=y
@@ -53,9 +46,7 @@
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
-CONFIG_MTD_M25P80=y
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_SLC_LPC32XX=y
 CONFIG_MTD_NAND_MLC_LPC32XX=y
@@ -70,7 +61,6 @@
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_NETDEVICES=y
-CONFIG_MII=y
 # CONFIG_NET_VENDOR_BROADCOM is not set
 # CONFIG_NET_VENDOR_CIRRUS is not set
 # CONFIG_NET_VENDOR_FARADAY is not set
@@ -91,6 +81,7 @@
 CONFIG_INPUT_EVDEV=y
 # CONFIG_KEYBOARD_ATKBD is not set
 CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_GPIO_POLLED=y
 CONFIG_KEYBOARD_LPC32XX=y
 # CONFIG_INPUT_MOUSE is not set
 CONFIG_INPUT_TOUCHSCREEN=y
@@ -99,8 +90,8 @@
 # CONFIG_LEGACY_PTYS is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_HS_LPC32XX=y
 CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_HS_LPC32XX=y
 # CONFIG_HW_RANDOM is not set
 CONFIG_I2C=y
 CONFIG_I2C_CHARDEV=y
@@ -108,19 +99,20 @@
 CONFIG_SPI=y
 CONFIG_SPI_PL022=y
 CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_GENERIC_PLATFORM=y
 CONFIG_GPIO_EM=y
+CONFIG_GPIO_GENERIC_PLATFORM=y
 CONFIG_GPIO_PL061=y
-CONFIG_GPIO_MAX7300=y
-CONFIG_GPIO_MAX732X=y
-CONFIG_GPIO_PCF857X=y
-CONFIG_GPIO_SX150X=y
 CONFIG_GPIO_ADP5588=y
 CONFIG_GPIO_ADNP=y
-CONFIG_GPIO_MAX7301=y
-CONFIG_GPIO_MCP23S08=y
-CONFIG_GPIO_MC33880=y
+CONFIG_GPIO_MAX7300=y
+CONFIG_GPIO_MAX732X=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCF857X=y
+CONFIG_GPIO_SX150X=y
 CONFIG_GPIO_74X164=y
+CONFIG_GPIO_MAX7301=y
+CONFIG_GPIO_MC33880=y
+CONFIG_GPIO_MCP23S08=y
 CONFIG_SENSORS_DS620=y
 CONFIG_SENSORS_MAX6639=y
 CONFIG_WATCHDOG=y
@@ -147,7 +139,6 @@
 # CONFIG_SND_SPI is not set
 CONFIG_SND_SOC=y
 CONFIG_USB=y
-CONFIG_USB_PHY=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_STORAGE=y
 CONFIG_USB_GADGET=y
@@ -179,6 +170,8 @@
 CONFIG_AMBA_PL08X=y
 CONFIG_STAGING=y
 CONFIG_LPC32XX_ADC=y
+CONFIG_MEMORY=y
+CONFIG_ARM_PL172_MPMC=y
 CONFIG_IIO=y
 CONFIG_MAX517=y
 CONFIG_PWM=y
@@ -198,9 +191,9 @@
 CONFIG_NLS_ASCII=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_NLS_UTF8=y
+CONFIG_DEBUG_INFO=y
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_DEBUG_PREEMPT is not set
-CONFIG_DEBUG_INFO=y
 # CONFIG_FTRACE is not set
 # CONFIG_ARM_UNWIND is not set
 CONFIG_DEBUG_LL=y
diff --git a/arch/arm/configs/mps2_defconfig b/arch/arm/configs/mps2_defconfig
new file mode 100644
index 0000000..19d119f
--- /dev/null
+++ b/arch/arm/configs/mps2_defconfig
@@ -0,0 +1,109 @@
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EXPERT=y
+# CONFIG_UID16 is not set
+# CONFIG_BASE_FULL is not set
+# CONFIG_FUTEX is not set
+# CONFIG_EPOLL is not set
+# CONFIG_SIGNALFD is not set
+# CONFIG_EVENTFD is not set
+# CONFIG_AIO is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_BLOCK is not set
+# CONFIG_MMU is not set
+CONFIG_ARCH_MPS2=y
+CONFIG_SET_MEM_PARAM=y
+CONFIG_DRAM_BASE=0x21000000
+CONFIG_DRAM_SIZE=0x1000000
+CONFIG_PREEMPT_VOLUNTARY=y
+# CONFIG_ATAGS is not set
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_BINFMT_FLAT=y
+CONFIG_BINFMT_SHARED_FLAT=y
+# CONFIG_COREDUMP is not set
+# CONFIG_SUSPEND is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_FW_LOADER is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_CORE is not set
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_FARADAY is not set
+# CONFIG_NET_VENDOR_HISILICON is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+CONFIG_SMSC911X=y
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_NONSTANDARD=y
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_MPS2_UART_CONSOLE=y
+CONFIG_SERIAL_MPS2_UART=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_ARM_SP805_WATCHDOG=y
+CONFIG_MFD_SYSCON=y
+# CONFIG_USB_SUPPORT is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_ONESHOT=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_ARM_TIMER_SP804=y
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_MEMTEST=y
diff --git a/arch/arm/configs/multi_v5_defconfig b/arch/arm/configs/multi_v5_defconfig
index e11d99d..4f82656 100644
--- a/arch/arm/configs/multi_v5_defconfig
+++ b/arch/arm/configs/multi_v5_defconfig
@@ -1,18 +1,26 @@
 CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_LOG_BUF_SHIFT=19
+CONFIG_CGROUPS=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=y
 CONFIG_KPROBES=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
 # CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_MVEBU=y
 CONFIG_MACH_KIRKWOOD=y
+CONFIG_ARCH_ASPEED=y
+CONFIG_MACH_ASPEED_G4=y
 CONFIG_ARCH_MXC=y
+CONFIG_MACH_MX21ADS=y
+CONFIG_MACH_MX27ADS=y
+CONFIG_MACH_MX27_3DS=y
+CONFIG_MACH_IMX27_VISSTRIM_M10=y
+CONFIG_MACH_PCA100=y
 CONFIG_MACH_IMX27_DT=y
 CONFIG_SOC_IMX25=y
 CONFIG_ARCH_ORION5X=y
@@ -60,13 +68,15 @@
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
-# CONFIG_IPV6 is not set
 CONFIG_NET_DSA=y
 CONFIG_NET_SWITCHDEV=y
 CONFIG_NET_PKTGEN=m
 CONFIG_CFG80211=y
 CONFIG_MAC80211=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_IMX_WEIM=y
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_BLOCK=y
@@ -91,10 +101,7 @@
 CONFIG_SATA_MV=y
 CONFIG_NETDEVICES=y
 CONFIG_NET_DSA_MV88E6060=y
-CONFIG_NET_DSA_MV88E6131=y
-CONFIG_NET_DSA_MV88E6123=y
-CONFIG_NET_DSA_MV88E6171=y
-CONFIG_NET_DSA_MV88E6352=y
+CONFIG_NET_DSA_MV88E6XXX=y
 CONFIG_MV643XX_ETH=y
 CONFIG_R8169=y
 CONFIG_MARVELL_PHY=y
@@ -108,15 +115,22 @@
 # CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_8250_NR_UARTS=6
+CONFIG_SERIAL_8250_RUNTIME_UARTS=6
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
 CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_IMX=y
+CONFIG_SERIAL_IMX_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
 CONFIG_I2C=y
 # CONFIG_I2C_COMPAT is not set
 CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_IMX=y
 CONFIG_I2C_MV64XXX=y
 CONFIG_I2C_NOMADIK=y
 CONFIG_SPI=y
+CONFIG_SPI_IMX=y
 CONFIG_SPI_ORION=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_POWER_RESET=y
@@ -129,12 +143,13 @@
 CONFIG_SENSORS_LM85=y
 CONFIG_THERMAL=y
 CONFIG_KIRKWOOD_THERMAL=y
-CONFIG_WATCHDOG=y
 CONFIG_ORION_WATCHDOG=y
+CONFIG_IMX2_WDT=y
 # CONFIG_ABX500_CORE is not set
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_FB=y
+CONFIG_FB_IMX=y
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_SOC=y
@@ -158,7 +173,6 @@
 CONFIG_USB=y
 CONFIG_USB_XHCI_HCD=y
 CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_ROOT_HUB_TT=y
 CONFIG_USB_PRINTER=m
 CONFIG_USB_STORAGE=y
 CONFIG_USB_STORAGE_DATAFAB=y
@@ -166,6 +180,8 @@
 CONFIG_USB_STORAGE_SDDR09=y
 CONFIG_USB_STORAGE_SDDR55=y
 CONFIG_USB_STORAGE_JUMPSHOT=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_HOST=y
 CONFIG_MMC=y
 CONFIG_SDIO_UART=y
 CONFIG_MMC_MVSDIO=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 2823490..8f85756 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -105,7 +105,6 @@
 CONFIG_ARCH_U8500=y
 CONFIG_MACH_HREFV60=y
 CONFIG_MACH_SNOWBALL=y
-CONFIG_MACH_UX500_DT=y
 CONFIG_ARCH_VEXPRESS=y
 CONFIG_ARCH_VEXPRESS_CA9X4=y
 CONFIG_ARCH_VEXPRESS_TC2_PM=y
@@ -119,7 +118,7 @@
 CONFIG_PCI_MVEBU=y
 CONFIG_PCI_TEGRA=y
 CONFIG_PCI_RCAR_GEN2=y
-CONFIG_PCI_RCAR_GEN2_PCIE=y
+CONFIG_PCIE_RCAR=y
 CONFIG_PCIEPORTBUS=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=16
@@ -131,6 +130,10 @@
 CONFIG_CPU_FREQ=y
 CONFIG_CPU_FREQ_STAT_DETAILS=y
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=m
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=m
 CONFIG_QORIQ_CPUFREQ=y
 CONFIG_CPU_IDLE=y
 CONFIG_ARM_CPUIDLE=y
@@ -183,6 +186,7 @@
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_M25P80=y
 CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_DENALI_DT=y
 CONFIG_MTD_NAND_ATMEL=y
 CONFIG_MTD_NAND_BRCMNAND=y
 CONFIG_MTD_NAND_VF610_NFC=y
@@ -322,6 +326,7 @@
 CONFIG_I2C_ARB_GPIO_CHALLENGE=m
 CONFIG_I2C_MUX_PCA954x=y
 CONFIG_I2C_MUX_PINCTRL=y
+CONFIG_I2C_DEMUX_PINCTRL=y
 CONFIG_I2C_AT91=m
 CONFIG_I2C_BCM2835=y
 CONFIG_I2C_CADENCE=y
@@ -345,6 +350,7 @@
 CONFIG_I2C_XILINX=y
 CONFIG_I2C_RCAR=y
 CONFIG_I2C_CROS_EC_TUNNEL=m
+CONFIG_I2C_SLAVE_EEPROM=y
 CONFIG_SPI=y
 CONFIG_SPI_ATMEL=m
 CONFIG_SPI_BCM2835=y
@@ -410,6 +416,7 @@
 CONFIG_POWER_RESET_GPIO_RESTART=y
 CONFIG_POWER_RESET_KEYSTONE=y
 CONFIG_POWER_RESET_RMOBILE=y
+CONFIG_POWER_RESET_ST=y
 CONFIG_POWER_AVS=y
 CONFIG_ROCKCHIP_IODOMAIN=y
 CONFIG_SENSORS_IIO_HWMON=y
@@ -430,6 +437,8 @@
 CONFIG_DA9063_WATCHDOG=m
 CONFIG_XILINX_WATCHDOG=y
 CONFIG_ARM_SP805_WATCHDOG=y
+CONFIG_AT91SAM9X_WATCHDOG=y
+CONFIG_SAMA5D4_WATCHDOG=y
 CONFIG_ORION_WATCHDOG=y
 CONFIG_ST_LPC_WATCHDOG=y
 CONFIG_SUNXI_WATCHDOG=y
@@ -438,9 +447,11 @@
 CONFIG_MESON_WATCHDOG=y
 CONFIG_DW_WATCHDOG=y
 CONFIG_DIGICOLOR_WATCHDOG=y
+CONFIG_BCM2835_WDT=y
 CONFIG_MFD_AS3711=y
 CONFIG_MFD_AS3722=y
 CONFIG_MFD_ATMEL_FLEXCOM=y
+CONFIG_MFD_ATMEL_HLCDC=m
 CONFIG_MFD_BCM590XX=y
 CONFIG_MFD_AXP20X=y
 CONFIG_MFD_AXP20X_I2C=m
@@ -488,7 +499,7 @@
 CONFIG_REGULATOR_MAX77802=m
 CONFIG_REGULATOR_PALMAS=y
 CONFIG_REGULATOR_PBIAS=y
-CONFIG_REGULATOR_PWM=m
+CONFIG_REGULATOR_PWM=y
 CONFIG_REGULATOR_QCOM_RPM=y
 CONFIG_REGULATOR_QCOM_SMD_RPM=y
 CONFIG_REGULATOR_S2MPS11=y
@@ -514,6 +525,7 @@
 CONFIG_SOC_CAMERA=m
 CONFIG_SOC_CAMERA_PLATFORM=m
 CONFIG_VIDEO_RCAR_VIN=m
+CONFIG_VIDEO_ATMEL_ISI=m
 CONFIG_V4L_MEM2MEM_DRIVERS=y
 CONFIG_VIDEO_RENESAS_JPU=m
 CONFIG_VIDEO_RENESAS_VSP1=m
@@ -532,7 +544,11 @@
 CONFIG_DRM_EXYNOS_FIMD=y
 CONFIG_DRM_EXYNOS_HDMI=y
 CONFIG_DRM_ROCKCHIP=m
+CONFIG_ROCKCHIP_ANALOGIX_DP=m
 CONFIG_ROCKCHIP_DW_HDMI=m
+CONFIG_ROCKCHIP_DW_MIPI_DSI=m
+CONFIG_ROCKCHIP_INNO_HDMI=m
+CONFIG_DRM_ATMEL_HLCDC=m
 CONFIG_DRM_RCAR_DU=m
 CONFIG_DRM_RCAR_HDMI=y
 CONFIG_DRM_RCAR_LVDS=y
@@ -564,6 +580,8 @@
 CONFIG_SND_SOC=m
 CONFIG_SND_ATMEL_SOC=m
 CONFIG_SND_ATMEL_SOC_WM8904=m
+CONFIG_SND_ATMEL_SOC_PDMIC=m
+CONFIG_SND_BCM2835_SOC_I2S=m
 CONFIG_SND_SOC_FSL_SAI=m
 CONFIG_SND_SOC_ROCKCHIP=m
 CONFIG_SND_SOC_ROCKCHIP_SPDIF=m
@@ -592,6 +610,7 @@
 CONFIG_USB_XHCI_HCD=y
 CONFIG_USB_XHCI_MVEBU=y
 CONFIG_USB_XHCI_RCAR=m
+CONFIG_USB_XHCI_TEGRA=m
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_MSM=m
 CONFIG_USB_EHCI_EXYNOS=y
@@ -609,7 +628,7 @@
 CONFIG_USB_MUSB_HDRC=m
 CONFIG_USB_MUSB_SUNXI=m
 CONFIG_USB_DWC3=y
-CONFIG_USB_DWC2=m
+CONFIG_USB_DWC2=y
 CONFIG_USB_CHIPIDEA=y
 CONFIG_USB_CHIPIDEA_UDC=y
 CONFIG_USB_CHIPIDEA_HOST=y
@@ -640,7 +659,6 @@
 CONFIG_MMC_SDHCI_S3C=y
 CONFIG_MMC_SDHCI_S3C_DMA=y
 CONFIG_MMC_SDHCI_BCM_KONA=y
-CONFIG_MMC_SDHCI_BCM2835=y
 CONFIG_MMC_SDHCI_ST=y
 CONFIG_MMC_OMAP=y
 CONFIG_MMC_OMAP_HS=y
@@ -767,6 +785,7 @@
 CONFIG_TI_AEMIF=y
 CONFIG_IIO=y
 CONFIG_AT91_ADC=m
+CONFIG_AT91_SAMA5D2_ADC=m
 CONFIG_BERLIN2_ADC=m
 CONFIG_EXYNOS_ADC=m
 CONFIG_VF610_ADC=m
@@ -775,6 +794,7 @@
 CONFIG_RASPBERRYPI_POWER=y
 CONFIG_PWM=y
 CONFIG_PWM_ATMEL=m
+CONFIG_PWM_ATMEL_HLCDC_PWM=m
 CONFIG_PWM_ATMEL_TCB=m
 CONFIG_PWM_FSL_FTM=m
 CONFIG_PWM_RENESAS_TPU=y
@@ -784,12 +804,13 @@
 CONFIG_PWM_TEGRA=y
 CONFIG_PWM_VT8500=y
 CONFIG_PHY_HIX5HD2_SATA=y
-CONFIG_PWM_STI=m
+CONFIG_PWM_STI=y
 CONFIG_PWM_BCM2835=y
 CONFIG_OMAP_USB2=y
 CONFIG_TI_PIPE3=y
 CONFIG_PHY_BERLIN_USB=y
 CONFIG_PHY_BERLIN_SATA=y
+CONFIG_PHY_ROCKCHIP_DP=m
 CONFIG_PHY_ROCKCHIP_USB=m
 CONFIG_PHY_QCOM_APQ8064_SATA=m
 CONFIG_PHY_MIPHY28LP=y
@@ -800,6 +821,7 @@
 CONFIG_PHY_SUN4I_USB=y
 CONFIG_PHY_SUN9I_USB=y
 CONFIG_PHY_SAMSUNG_USB2=m
+CONFIG_PHY_TEGRA_XUSB=y
 CONFIG_NVMEM=y
 CONFIG_NVMEM_SUNXI_SID=y
 CONFIG_BCM2835_MBOX=y
@@ -829,6 +851,9 @@
 CONFIG_CRYPTO_DEV_TEGRA_AES=y
 CONFIG_CPUFREQ_DT=y
 CONFIG_KEYSTONE_IRQ=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_ST=y
+CONFIG_CRYPTO_DEV_MARVELL_CESA=m
 CONFIG_CRYPTO_DEV_SUN4I_SS=m
 CONFIG_CRYPTO_DEV_ROCKCHIP=m
 CONFIG_ARM_CRYPTO=y
diff --git a/arch/arm/configs/mvebu_v5_defconfig b/arch/arm/configs/mvebu_v5_defconfig
index 4562006..6051c51 100644
--- a/arch/arm/configs/mvebu_v5_defconfig
+++ b/arch/arm/configs/mvebu_v5_defconfig
@@ -208,8 +208,8 @@
 # CONFIG_DEBUG_PREEMPT is not set
 # CONFIG_FTRACE is not set
 CONFIG_DEBUG_USER=y
+CONFIG_CRYPTO_DEV_MARVELL_CESA=y
 CONFIG_CRYPTO_CBC=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_DEV_MV_CESA=y
 CONFIG_CRC_CCITT=y
 CONFIG_LIBCRC32C=y
diff --git a/arch/arm/configs/mvebu_v7_defconfig b/arch/arm/configs/mvebu_v7_defconfig
index dc5797a..486a4ca 100644
--- a/arch/arm/configs/mvebu_v7_defconfig
+++ b/arch/arm/configs/mvebu_v7_defconfig
@@ -66,7 +66,7 @@
 CONFIG_AHCI_MVEBU=y
 CONFIG_SATA_MV=y
 CONFIG_NETDEVICES=y
-CONFIG_NET_DSA_MV88E6171=y
+CONFIG_NET_DSA_MV88E6XXX=y
 CONFIG_MV643XX_ETH=y
 CONFIG_MVNETA=y
 CONFIG_MVPP2=y
@@ -155,3 +155,4 @@
 CONFIG_TIMER_STATS=y
 # CONFIG_DEBUG_BUGVERBOSE is not set
 CONFIG_DEBUG_USER=y
+CONFIG_CRYPTO_DEV_MARVELL_CESA=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 156bc88..ac717cc 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -10,18 +10,17 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=16
 CONFIG_CGROUPS=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
-CONFIG_CGROUP_PERF=y
+CONFIG_BLK_CGROUP=y
 CONFIG_CGROUP_SCHED=y
 CONFIG_CFS_BANDWIDTH=y
 CONFIG_RT_GROUP_SCHED=y
-CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
 CONFIG_NAMESPACES=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
@@ -50,7 +49,6 @@
 CONFIG_SOC_AM43XX=y
 CONFIG_SOC_DRA7XX=y
 CONFIG_ARM_THUMBEE=y
-CONFIG_ARM_KERNMEM_PERMS=y
 CONFIG_ARM_ERRATA_411920=y
 CONFIG_ARM_ERRATA_430973=y
 CONFIG_SMP=y
@@ -69,7 +67,7 @@
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
-CONFIG_CPUFREQ_DT=y
+CONFIG_CPUFREQ_DT=m
 # CONFIG_ARM_OMAP2PLUS_CPUFREQ is not set
 CONFIG_CPU_IDLE=y
 CONFIG_BINFMT_MISC=y
@@ -86,7 +84,6 @@
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
-# CONFIG_INET_LRO is not set
 CONFIG_NETFILTER=y
 CONFIG_PHONET=m
 CONFIG_CAN=m
@@ -102,19 +99,18 @@
 CONFIG_BT_HCIBTUSB=m
 CONFIG_BT_HCIBTSDIO=m
 CONFIG_BT_HCIUART=m
-CONFIG_BT_HCIUART_H4=y
 CONFIG_BT_HCIUART_BCSP=y
 CONFIG_BT_HCIUART_LL=y
 CONFIG_BT_HCIUART_3WIRE=y
 CONFIG_BT_HCIBCM203X=m
 CONFIG_BT_HCIBPA10X=m
-CONFIG_CFG80211=m
 CONFIG_BT_HCIBFUSB=m
 CONFIG_BT_HCIVHCI=m
 CONFIG_BT_MRVL=m
 CONFIG_BT_MRVL_SDIO=m
 CONFIG_AF_RXRPC=m
-CONFIG_RXKAD=m
+CONFIG_RXKAD=y
+CONFIG_CFG80211=m
 CONFIG_MAC80211=m
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -129,6 +125,7 @@
 CONFIG_MTD_CFI_INTELEXT=y
 CONFIG_MTD_PHYSMAP=y
 CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_M25P80=m
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_ECC_BCH=y
 CONFIG_MTD_NAND_OMAP2=y
@@ -136,9 +133,8 @@
 CONFIG_MTD_ONENAND=y
 CONFIG_MTD_ONENAND_VERIFY_WRITE=y
 CONFIG_MTD_ONENAND_OMAP2=y
-CONFIG_MTD_UBI=y
 CONFIG_MTD_SPI_NOR=m
-CONFIG_MTD_M25P80=m
+CONFIG_MTD_UBI=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=16384
@@ -184,22 +180,19 @@
 CONFIG_USB_ALI_M5632=y
 CONFIG_USB_AN2720=y
 CONFIG_USB_EPSON2888=y
-CONFIG_USB_EHCI_HCD=m
-CONFIG_USB_OHCI_HCD=m
 CONFIG_USB_KC2190=y
 CONFIG_USB_CDC_PHONET=m
 CONFIG_LIBERTAS=m
 CONFIG_LIBERTAS_USB=m
 CONFIG_LIBERTAS_SDIO=m
 CONFIG_LIBERTAS_DEBUG=y
-CONFIG_WL_TI=y
+CONFIG_MWIFIEX=m
+CONFIG_MWIFIEX_SDIO=m
+CONFIG_MWIFIEX_USB=m
 CONFIG_WL12XX=m
 CONFIG_WL18XX=m
 CONFIG_WLCORE_SPI=m
 CONFIG_WLCORE_SDIO=m
-CONFIG_MWIFIEX=m
-CONFIG_MWIFIEX_SDIO=m
-CONFIG_MWIFIEX_USB=m
 CONFIG_INPUT_JOYDEV=m
 CONFIG_INPUT_EVDEV=m
 CONFIG_KEYBOARD_ATKBD=m
@@ -211,10 +204,10 @@
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_TOUCHSCREEN_ADS7846=m
 CONFIG_TOUCHSCREEN_EDT_FT5X06=m
+CONFIG_TOUCHSCREEN_TI_AM335X_TSC=m
 CONFIG_TOUCHSCREEN_PIXCIR=m
 CONFIG_TOUCHSCREEN_TSC2005=m
 CONFIG_TOUCHSCREEN_TSC2007=m
-CONFIG_TOUCHSCREEN_TI_AM335X_TSC=m
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_TPS65218_PWRBUTTON=m
 CONFIG_INPUT_TWL4030_PWRBUTTON=m
@@ -238,15 +231,14 @@
 CONFIG_SPI_TI_QSPI=m
 CONFIG_HSI=m
 CONFIG_OMAP_SSI=m
-CONFIG_NOKIA_MODEM=m
 CONFIG_SSI_PROTOCOL=m
 CONFIG_PINCTRL_SINGLE=y
 CONFIG_DEBUG_GPIO=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_GPIO_PCA953X=m
 CONFIG_GPIO_PCF857X=y
-CONFIG_GPIO_TWL4030=y
 CONFIG_GPIO_PALMAS=y
+CONFIG_GPIO_TWL4030=y
 CONFIG_W1=m
 CONFIG_HDQ_MASTER_OMAP=m
 CONFIG_BATTERY_BQ27XXX=m
@@ -273,11 +265,11 @@
 CONFIG_WATCHDOG=y
 CONFIG_OMAP_WATCHDOG=m
 CONFIG_TWL4030_WATCHDOG=m
+CONFIG_MFD_TI_AM335X_TSCADC=m
 CONFIG_MFD_PALMAS=y
 CONFIG_MFD_TPS65217=y
 CONFIG_MFD_TPS65218=y
 CONFIG_MFD_TPS65910=y
-CONFIG_MFD_TI_AM335X_TSCADC=m
 CONFIG_TWL6040_CORE=y
 CONFIG_REGULATOR_LP872X=y
 CONFIG_REGULATOR_PALMAS=y
@@ -292,8 +284,12 @@
 CONFIG_REGULATOR_TWL4030=y
 CONFIG_MEDIA_SUPPORT=m
 CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_RC_SUPPORT=y
 CONFIG_MEDIA_CONTROLLER=y
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_LIRC=m
+CONFIG_RC_DEVICES=y
+CONFIG_IR_RX51=m
 CONFIG_V4L_PLATFORM_DRIVERS=y
 CONFIG_VIDEO_OMAP3=m
 # CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
@@ -302,10 +298,10 @@
 CONFIG_FIRMWARE_EDID=y
 CONFIG_FB_MODE_HELPERS=y
 CONFIG_FB_TILEBLITTING=y
+CONFIG_FB_OMAP2=m
 CONFIG_FB_OMAP5_DSS_HDMI=y
 CONFIG_FB_OMAP2_DSS_SDI=y
 CONFIG_FB_OMAP2_DSS_DSI=y
-CONFIG_FB_OMAP2=m
 CONFIG_FB_OMAP2_ENCODER_TFP410=m
 CONFIG_FB_OMAP2_ENCODER_TPD12S015=m
 CONFIG_FB_OMAP2_CONNECTOR_DVI=m
@@ -341,13 +337,11 @@
 CONFIG_SND_SOC=m
 CONFIG_SND_EDMA_SOC=m
 CONFIG_SND_AM33XX_SOC_EVM=m
-CONFIG_SND_DAVINCI_SOC_MCASP=m
 CONFIG_SND_OMAP_SOC=m
 CONFIG_SND_OMAP_SOC_OMAP_TWL4030=m
 CONFIG_SND_OMAP_SOC_OMAP_ABE_TWL6040=m
 CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=m
 CONFIG_SND_SIMPLE_CARD=m
-CONFIG_SND_SOC_TLV320AIC3X=m
 CONFIG_HID_GENERIC=m
 CONFIG_USB_HIDDEV=y
 CONFIG_USB_KBD=m
@@ -356,6 +350,8 @@
 CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
 CONFIG_USB_MON=m
 CONFIG_USB_XHCI_HCD=m
+CONFIG_USB_EHCI_HCD=m
+CONFIG_USB_OHCI_HCD=m
 CONFIG_USB_WDM=m
 CONFIG_USB_STORAGE=m
 CONFIG_USB_MUSB_HDRC=m
@@ -372,6 +368,7 @@
 CONFIG_USB_SERIAL_PL2303=m
 CONFIG_USB_TEST=m
 CONFIG_AM335X_PHY_USB=y
+CONFIG_TWL6030_USB=m
 CONFIG_USB_GADGET=m
 CONFIG_USB_GADGET_DEBUG=y
 CONFIG_USB_GADGET_DEBUG_FILES=y
@@ -402,8 +399,8 @@
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=m
 CONFIG_LEDS_GPIO=m
-CONFIG_LEDS_PWM=m
 CONFIG_LEDS_PCA963X=m
+CONFIG_LEDS_PWM=m
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=m
 CONFIG_LEDS_TRIGGER_ONESHOT=m
@@ -414,22 +411,22 @@
 CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_DS1307=m
-CONFIG_RTC_DRV_PALMAS=m
 CONFIG_RTC_DRV_TWL92330=y
 CONFIG_RTC_DRV_TWL4030=m
+CONFIG_RTC_DRV_PALMAS=m
 CONFIG_RTC_DRV_OMAP=m
 CONFIG_DMADEVICES=y
-CONFIG_TI_EDMA=y
 CONFIG_DMA_OMAP=y
-CONFIG_IOMMU_SUPPORT=y
+CONFIG_TI_EDMA=y
 CONFIG_OMAP_IOMMU=y
 CONFIG_EXTCON=m
-CONFIG_EXTCON_USB_GPIO=m
 CONFIG_EXTCON_PALMAS=m
+CONFIG_EXTCON_USB_GPIO=m
 CONFIG_TI_EMIF=m
 CONFIG_IIO=m
 CONFIG_TI_AM335X_ADC=m
 CONFIG_PWM=y
+CONFIG_PWM_OMAP_DMTIMER=m
 CONFIG_PWM_TIECAP=m
 CONFIG_PWM_TIEHRPWM=m
 CONFIG_PWM_TWL=m
@@ -440,8 +437,6 @@
 CONFIG_TWL4030_USB=m
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
-# CONFIG_EXT3_FS_XATTR is not set
-CONFIG_EXT4_FS=y
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA=y
 CONFIG_QFMT_V2=y
@@ -476,7 +471,6 @@
 # CONFIG_DEBUG_BUGVERBOSE is not set
 CONFIG_SECURITY=y
 CONFIG_CRYPTO_MICHAEL_MIC=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
 CONFIG_CRC_CCITT=y
 CONFIG_CRC_T10DIF=y
 CONFIG_CRC_ITU_T=y
diff --git a/arch/arm/configs/orion5x_defconfig b/arch/arm/configs/orion5x_defconfig
index 6a5bc27..27a70a7 100644
--- a/arch/arm/configs/orion5x_defconfig
+++ b/arch/arm/configs/orion5x_defconfig
@@ -85,8 +85,7 @@
 CONFIG_SATA_MV=y
 CONFIG_NETDEVICES=y
 CONFIG_MII=y
-CONFIG_NET_DSA_MV88E6131=y
-CONFIG_NET_DSA_MV88E6123=y
+CONFIG_NET_DSA_MV88E6XXX=y
 CONFIG_MV643XX_ETH=y
 CONFIG_MARVELL_PHY=y
 # CONFIG_INPUT_MOUSEDEV is not set
diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig
index afbda41..9cb1a85 100644
--- a/arch/arm/configs/sama5_defconfig
+++ b/arch/arm/configs/sama5_defconfig
@@ -1,8 +1,10 @@
 # CONFIG_LOCALVERSION_AUTO is not set
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
 CONFIG_IRQ_DOMAIN_DEBUG=y
 CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EMBEDDED=y
 CONFIG_SLAB=y
@@ -133,6 +135,7 @@
 CONFIG_AT91SAM9X_WATCHDOG=y
 CONFIG_SAMA5D4_WATCHDOG=y
 CONFIG_MFD_ATMEL_FLEXCOM=y
+CONFIG_MFD_ATMEL_HLCDC=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_ACT8865=y
@@ -142,11 +145,14 @@
 CONFIG_SOC_CAMERA=y
 CONFIG_VIDEO_ATMEL_ISI=y
 CONFIG_SOC_CAMERA_OV2640=y
-CONFIG_FB=y
+CONFIG_DRM=y
+CONFIG_DRM_ATMEL_HLCDC=y
+CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
-# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_LCD_CLASS_DEVICE=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
 # CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_BACKLIGHT_PWM=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_SOUND=y
 CONFIG_SND=y
@@ -154,6 +160,7 @@
 CONFIG_SND_ATMEL_SOC=y
 CONFIG_SND_ATMEL_SOC_WM8904=y
 # CONFIG_HID_GENERIC is not set
+CONFIG_SND_ATMEL_SOC_PDMIC=y
 CONFIG_USB=y
 CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
 CONFIG_USB_EHCI_HCD=y
@@ -183,6 +190,7 @@
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_AT91RM9200=y
 CONFIG_DMADEVICES=y
+CONFIG_AT_HDMAC=y
 CONFIG_AT_XDMAC=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_IIO=y
@@ -190,6 +198,7 @@
 CONFIG_AT91_SAMA5D2_ADC=y
 CONFIG_PWM=y
 CONFIG_PWM_ATMEL=y
+CONFIG_PWM_ATMEL_HLCDC_PWM=y
 CONFIG_PWM_ATMEL_TCB=y
 CONFIG_EXT4_FS=y
 CONFIG_FANOTIFY=y
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig
index b7b714c..f2d6355 100644
--- a/arch/arm/configs/shmobile_defconfig
+++ b/arch/arm/configs/shmobile_defconfig
@@ -24,7 +24,7 @@
 CONFIG_ARM_ERRATA_754322=y
 CONFIG_PCI=y
 CONFIG_PCI_RCAR_GEN2=y
-CONFIG_PCI_RCAR_GEN2_PCIE=y
+CONFIG_PCIE_RCAR=y
 CONFIG_SMP=y
 CONFIG_SCHED_MC=y
 CONFIG_HAVE_ARM_ARCH_TIMER=y
@@ -99,11 +99,14 @@
 CONFIG_SERIAL_SH_SCI_NR_UARTS=20
 CONFIG_SERIAL_SH_SCI_CONSOLE=y
 CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_DEMUX_PINCTRL=y
 CONFIG_I2C_EMEV2=y
 CONFIG_I2C_GPIO=y
 CONFIG_I2C_RIIC=y
 CONFIG_I2C_SH_MOBILE=y
 CONFIG_I2C_RCAR=y
+CONFIG_I2C_SLAVE_EEPROM=y
 CONFIG_SPI=y
 CONFIG_SPI_RSPI=y
 CONFIG_SPI_SH_MSIOF=y
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 3a36244..6012a1e 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -218,6 +218,7 @@
 CONFIG_SND_SOC_TEGRA_MAX98090=y
 CONFIG_USB=y
 CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_TEGRA=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_TEGRA=y
 CONFIG_USB_ACM=y
@@ -266,6 +267,7 @@
 CONFIG_AK8975=y
 CONFIG_PWM=y
 CONFIG_PWM_TEGRA=y
+CONFIG_PHY_TEGRA_XUSB=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT2_FS_POSIX_ACL=y
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index 07055ea..b7b0918 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -45,7 +45,6 @@
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=65536
-CONFIG_SENSORS_BH1780=y
 CONFIG_NETDEVICES=y
 CONFIG_SMSC911X=y
 CONFIG_SMSC_PHY=y
@@ -63,6 +62,9 @@
 CONFIG_TOUCHSCREEN_BU21013=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_AB8500_PONKEY=y
+CONFIG_RMI4_CORE=y
+CONFIG_RMI4_I2C=y
+CONFIG_RMI4_F11=y
 # CONFIG_SERIO is not set
 CONFIG_VT_HW_CONSOLE_BINDING=y
 # CONFIG_LEGACY_PTYS is not set
@@ -104,12 +106,12 @@
 CONFIG_RTC_DRV_PL031=y
 CONFIG_DMADEVICES=y
 CONFIG_STE_DMA40=y
-CONFIG_STAGING=y
-CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=y
 CONFIG_HSEM_U8500=y
 CONFIG_IIO=y
+CONFIG_IIO_BUFFER=y
 CONFIG_IIO_ST_ACCEL_3AXIS=y
 CONFIG_IIO_ST_GYRO_3AXIS=y
+CONFIG_BH1780=y
 CONFIG_IIO_ST_MAGN_3AXIS=y
 CONFIG_IIO_ST_PRESS=y
 CONFIG_EXT2_FS=y
diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
index 3848259..baefe1d 100644
--- a/arch/arm/include/asm/cpuidle.h
+++ b/arch/arm/include/asm/cpuidle.h
@@ -36,7 +36,7 @@
 
 struct of_cpuidle_method {
 	const char *method;
-	struct cpuidle_ops *ops;
+	const struct cpuidle_ops *ops;
 };
 
 #define CPUIDLE_METHOD_OF_DECLARE(name, _method, _ops)			\
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index b23c6c8..1ee94c7 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -276,7 +276,7 @@
 	int feature = (features >> field) & 15;
 
 	/* feature registers are signed values */
-	if (feature > 8)
+	if (feature > 7)
 		feature -= 16;
 
 	return feature;
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 6ad1ced..02283eb 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -118,7 +118,7 @@
 
 #define arch_setup_dma_ops arch_setup_dma_ops
 extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-			       struct iommu_ops *iommu, bool coherent);
+			       const struct iommu_ops *iommu, bool coherent);
 
 #define arch_teardown_dma_ops arch_teardown_dma_ops
 extern void arch_teardown_dma_ops(struct device *dev);
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index fc8ba16..99d9f63 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -84,6 +84,7 @@
 
 #ifndef __ASSEMBLY__
 
+#ifdef CONFIG_CPU_CP15_MMU
 static inline unsigned int get_domain(void)
 {
 	unsigned int domain;
@@ -103,6 +104,16 @@
 	  : : "r" (val) : "memory");
 	isb();
 }
+#else
+static inline unsigned int get_domain(void)
+{
+	return 0;
+}
+
+static inline void set_domain(unsigned val)
+{
+}
+#endif
 
 #ifdef CONFIG_CPU_USE_DOMAINS
 #define modify_domain(dom,type)					\
diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h
index e0eea72..a708fa1 100644
--- a/arch/arm/include/asm/efi.h
+++ b/arch/arm/include/asm/efi.h
@@ -17,33 +17,27 @@
 #include <asm/mach/map.h>
 #include <asm/mmu_context.h>
 #include <asm/pgtable.h>
+#include <asm/ptrace.h>
 
 #ifdef CONFIG_EFI
 void efi_init(void);
 
 int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
+int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
 
-#define efi_call_virt(f, ...)						\
+#define arch_efi_call_virt_setup()	efi_virtmap_load()
+#define arch_efi_call_virt_teardown()	efi_virtmap_unload()
+
+#define arch_efi_call_virt(f, args...)					\
 ({									\
 	efi_##f##_t *__f;						\
-	efi_status_t __s;						\
-									\
-	efi_virtmap_load();						\
 	__f = efi.systab->runtime->f;					\
-	__s = __f(__VA_ARGS__);						\
-	efi_virtmap_unload();						\
-	__s;								\
+	__f(args);							\
 })
 
-#define __efi_call_virt(f, ...)						\
-({									\
-	efi_##f##_t *__f;						\
-									\
-	efi_virtmap_load();						\
-	__f = efi.systab->runtime->f;					\
-	__f(__VA_ARGS__);						\
-	efi_virtmap_unload();						\
-})
+#define ARCH_EFI_IRQ_FLAGS_MASK \
+	(PSR_J_BIT | PSR_E_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | \
+	 PSR_T_BIT | MODE_MASK)
 
 static inline void efi_set_pgd(struct mm_struct *mm)
 {
@@ -59,7 +53,16 @@
 
 /* arch specific definitions used by the stub code */
 
-#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__)
+#define efi_call_early(f, ...)		sys_table_arg->boottime->f(__VA_ARGS__)
+#define __efi_call_early(f, ...)	f(__VA_ARGS__)
+#define efi_is_64bit()			(false)
+
+struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg);
+void free_screen_info(efi_system_table_t *sys_table, struct screen_info *si);
+
+static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
+{
+}
 
 /*
  * A reasonable upper bound for the uncompressed kernel size is 32 MBytes,
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 3850701..0df6b1f 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -187,6 +187,7 @@
 struct kvm_vcpu_stat {
 	u32 halt_successful_poll;
 	u32 halt_attempted_poll;
+	u32 halt_poll_invalid;
 	u32 halt_wakeup;
 	u32 hvc_exit_stat;
 	u64 wfe_exit_stat;
@@ -265,6 +266,15 @@
 	kvm_call_hyp(__init_stage2_translation);
 }
 
+static inline void __cpu_reset_hyp_mode(phys_addr_t boot_pgd_ptr,
+					phys_addr_t phys_idmap_start)
+{
+	/*
+	 * TODO
+	 * kvm_call_reset(boot_pgd_ptr, phys_idmap_start);
+	 */
+}
+
 static inline int kvm_arch_dev_ioctl_check_extension(long ext)
 {
 	return 0;
@@ -277,11 +287,11 @@
 
 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
 
-static inline void kvm_arch_hardware_disable(void) {}
 static inline void kvm_arch_hardware_unsetup(void) {}
 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
 static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
 
 static inline void kvm_arm_init_debug(void) {}
 static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index da44be9..f9a6506 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -47,6 +47,7 @@
 #include <linux/highmem.h>
 #include <asm/cacheflush.h>
 #include <asm/pgalloc.h>
+#include <asm/stage2_pgtable.h>
 
 int create_hyp_mappings(void *from, void *to);
 int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
@@ -66,6 +67,7 @@
 phys_addr_t kvm_mmu_get_httbr(void);
 phys_addr_t kvm_mmu_get_boot_httbr(void);
 phys_addr_t kvm_get_idmap_vector(void);
+phys_addr_t kvm_get_idmap_start(void);
 int kvm_mmu_init(void);
 void kvm_clear_hyp_idmap(void);
 
@@ -105,14 +107,16 @@
 	clean_pte_table(pte);
 }
 
-static inline void kvm_set_s2pte_writable(pte_t *pte)
+static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
 {
-	pte_val(*pte) |= L_PTE_S2_RDWR;
+	pte_val(pte) |= L_PTE_S2_RDWR;
+	return pte;
 }
 
-static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
+static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
 {
-	pmd_val(*pmd) |= L_PMD_S2_RDWR;
+	pmd_val(pmd) |= L_PMD_S2_RDWR;
+	return pmd;
 }
 
 static inline void kvm_set_s2pte_readonly(pte_t *pte)
@@ -135,22 +139,6 @@
 	return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY;
 }
 
-
-/* Open coded p*d_addr_end that can deal with 64bit addresses */
-#define kvm_pgd_addr_end(addr, end)					\
-({	u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK;		\
-	(__boundary - 1 < (end) - 1)? __boundary: (end);		\
-})
-
-#define kvm_pud_addr_end(addr,end)		(end)
-
-#define kvm_pmd_addr_end(addr, end)					\
-({	u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK;		\
-	(__boundary - 1 < (end) - 1)? __boundary: (end);		\
-})
-
-#define kvm_pgd_index(addr)			pgd_index(addr)
-
 static inline bool kvm_page_empty(void *ptr)
 {
 	struct page *ptr_page = virt_to_page(ptr);
@@ -159,19 +147,11 @@
 
 #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
 #define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
-#define kvm_pud_table_empty(kvm, pudp) (0)
+#define kvm_pud_table_empty(kvm, pudp) false
 
-#define KVM_PREALLOC_LEVEL	0
-
-static inline void *kvm_get_hwpgd(struct kvm *kvm)
-{
-	return kvm->arch.pgd;
-}
-
-static inline unsigned int kvm_get_hwpgd_size(void)
-{
-	return PTRS_PER_S2_PGD * sizeof(pgd_t);
-}
+#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
+#define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
+#define hyp_pud_table_empty(pudp) false
 
 struct kvm;
 
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index fa5b42d..3cc14dd 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -15,6 +15,7 @@
 
 #include <linux/compiler.h>
 #include <linux/sched.h>
+#include <linux/preempt.h>
 #include <asm/cacheflush.h>
 #include <asm/cachetype.h>
 #include <asm/proc-fns.h>
@@ -66,6 +67,7 @@
 		cpu_switch_mm(mm->pgd, mm);
 }
 
+#ifndef MODULE
 #define finish_arch_post_lock_switch \
 	finish_arch_post_lock_switch
 static inline void finish_arch_post_lock_switch(void)
@@ -87,6 +89,7 @@
 		preempt_enable_no_resched();
 	}
 }
+#endif /* !MODULE */
 
 #endif	/* CONFIG_MMU */
 
diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h
new file mode 100644
index 0000000..460d616
--- /dev/null
+++ b/arch/arm/include/asm/stage2_pgtable.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2016 - ARM Ltd
+ *
+ * stage2 page table helpers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM_S2_PGTABLE_H_
+#define __ARM_S2_PGTABLE_H_
+
+#define stage2_pgd_none(pgd)			pgd_none(pgd)
+#define stage2_pgd_clear(pgd)			pgd_clear(pgd)
+#define stage2_pgd_present(pgd)			pgd_present(pgd)
+#define stage2_pgd_populate(pgd, pud)		pgd_populate(NULL, pgd, pud)
+#define stage2_pud_offset(pgd, address)		pud_offset(pgd, address)
+#define stage2_pud_free(pud)			pud_free(NULL, pud)
+
+#define stage2_pud_none(pud)			pud_none(pud)
+#define stage2_pud_clear(pud)			pud_clear(pud)
+#define stage2_pud_present(pud)			pud_present(pud)
+#define stage2_pud_populate(pud, pmd)		pud_populate(NULL, pud, pmd)
+#define stage2_pmd_offset(pud, address)		pmd_offset(pud, address)
+#define stage2_pmd_free(pmd)			pmd_free(NULL, pmd)
+
+#define stage2_pud_huge(pud)			pud_huge(pud)
+
+/* Open coded p*d_addr_end that can deal with 64bit addresses */
+static inline phys_addr_t stage2_pgd_addr_end(phys_addr_t addr, phys_addr_t end)
+{
+	phys_addr_t boundary = (addr + PGDIR_SIZE) & PGDIR_MASK;
+
+	return (boundary - 1 < end - 1) ? boundary : end;
+}
+
+#define stage2_pud_addr_end(addr, end)		(end)
+
+static inline phys_addr_t stage2_pmd_addr_end(phys_addr_t addr, phys_addr_t end)
+{
+	phys_addr_t boundary = (addr + PMD_SIZE) & PMD_MASK;
+
+	return (boundary - 1 < end - 1) ? boundary : end;
+}
+
+#define stage2_pgd_index(addr)				pgd_index(addr)
+
+#define stage2_pte_table_empty(ptep)			kvm_page_empty(ptep)
+#define stage2_pmd_table_empty(pmdp)			kvm_page_empty(pmdp)
+#define stage2_pud_table_empty(pudp)			false
+
+#endif	/* __ARM_S2_PGTABLE_H_ */
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 7b84657..194b699 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -19,7 +19,7 @@
  * This may need to be greater than __NR_last_syscall+1 in order to
  * account for the padding in the syscall table
  */
-#define __NR_syscalls  (392)
+#define __NR_syscalls  (396)
 
 #define __ARCH_WANT_STAT64
 #define __ARCH_WANT_SYS_GETHOSTNAME
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 5dd2528..2cb9dc7 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -418,6 +418,8 @@
 #define __NR_membarrier			(__NR_SYSCALL_BASE+389)
 #define __NR_mlock2			(__NR_SYSCALL_BASE+390)
 #define __NR_copy_file_range		(__NR_SYSCALL_BASE+391)
+#define __NR_preadv2			(__NR_SYSCALL_BASE+392)
+#define __NR_pwritev2			(__NR_SYSCALL_BASE+393)
 
 /*
  * The following SWIs are ARM private.
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index dfc7cd6..703fa0f 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -399,8 +399,10 @@
 		CALL(sys_execveat)
 		CALL(sys_userfaultfd)
 		CALL(sys_membarrier)
-		CALL(sys_mlock2)
+/* 390 */	CALL(sys_mlock2)
 		CALL(sys_copy_file_range)
+		CALL(sys_preadv2)
+		CALL(sys_pwritev2)
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted
diff --git a/arch/arm/kernel/cpuidle.c b/arch/arm/kernel/cpuidle.c
index 703926e..a44b268e 100644
--- a/arch/arm/kernel/cpuidle.c
+++ b/arch/arm/kernel/cpuidle.c
@@ -70,7 +70,7 @@
  *
  * Returns a struct cpuidle_ops pointer, NULL if not found.
  */
-static struct cpuidle_ops *__init arm_cpuidle_get_ops(const char *method)
+static const struct cpuidle_ops *__init arm_cpuidle_get_ops(const char *method)
 {
 	struct of_cpuidle_method *m = __cpuidle_method_of_table;
 
@@ -88,7 +88,7 @@
  *
  * Get the method name defined in the 'enable-method' property, retrieve the
  * associated cpuidle_ops and do a struct copy. This copy is needed because all
- * cpuidle_ops are tagged __initdata and will be unloaded after the init
+ * cpuidle_ops are tagged __initconst and will be unloaded after the init
  * process.
  *
  * Return 0 on sucess, -ENOENT if no 'enable-method' is defined, -EOPNOTSUPP if
@@ -97,7 +97,7 @@
 static int __init arm_cpuidle_read_ops(struct device_node *dn, int cpu)
 {
 	const char *enable_method;
-	struct cpuidle_ops *ops;
+	const struct cpuidle_ops *ops;
 
 	enable_method = of_get_property(dn, "enable-method", NULL);
 	if (!enable_method)
diff --git a/arch/arm/kernel/efi.c b/arch/arm/kernel/efi.c
index ff8a9d8..9f43ba0 100644
--- a/arch/arm/kernel/efi.c
+++ b/arch/arm/kernel/efi.c
@@ -11,6 +11,41 @@
 #include <asm/mach/map.h>
 #include <asm/mmu_context.h>
 
+static int __init set_permissions(pte_t *ptep, pgtable_t token,
+				  unsigned long addr, void *data)
+{
+	efi_memory_desc_t *md = data;
+	pte_t pte = *ptep;
+
+	if (md->attribute & EFI_MEMORY_RO)
+		pte = set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
+	if (md->attribute & EFI_MEMORY_XP)
+		pte = set_pte_bit(pte, __pgprot(L_PTE_XN));
+	set_pte_ext(ptep, pte, PTE_EXT_NG);
+	return 0;
+}
+
+int __init efi_set_mapping_permissions(struct mm_struct *mm,
+				       efi_memory_desc_t *md)
+{
+	unsigned long base, size;
+
+	base = md->virt_addr;
+	size = md->num_pages << EFI_PAGE_SHIFT;
+
+	/*
+	 * We can only use apply_to_page_range() if we can guarantee that the
+	 * entire region was mapped using pages. This should be the case if the
+	 * region does not cover any naturally aligned SECTION_SIZE sized
+	 * blocks.
+	 */
+	if (round_down(base + size, SECTION_SIZE) <
+	    round_up(base, SECTION_SIZE) + SECTION_SIZE)
+		return apply_to_page_range(mm, base, size, set_permissions, md);
+
+	return 0;
+}
+
 int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
 {
 	struct map_desc desc = {
@@ -34,5 +69,11 @@
 		desc.type = MT_DEVICE;
 
 	create_mapping_late(mm, &desc, true);
+
+	/*
+	 * If stricter permissions were specified, apply them now.
+	 */
+	if (md->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))
+		return efi_set_mapping_permissions(mm, md);
 	return 0;
 }
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 9b8c5a1..fb1a69e 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -236,7 +236,7 @@
 	mov	r0, #CONFIG_VECTORS_BASE	@ Cover from VECTORS_BASE
 	ldr	r5,=(MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL)
 	/* Writing N to bits 5:1 (RSR_SZ) --> region size 2^N+1 */
-	mov	r6, #(((PAGE_SHIFT - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN)
+	mov	r6, #(((2 * PAGE_SHIFT - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN)
 
 	setup_region r0, r5, r6, MPU_DATA_SIDE	@ VECTORS_BASE, PL0 NA, enabled
 	beq	3f				@ Memory-map not unified
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 6284779..b8df458 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -631,7 +631,7 @@
 	info->address &= ~alignment_mask;
 	info->ctrl.len <<= offset;
 
-	if (!bp->overflow_handler) {
+	if (is_default_overflow_handler(bp)) {
 		/*
 		 * Mismatch breakpoints are required for single-stepping
 		 * breakpoints.
@@ -754,7 +754,7 @@
 		 * mismatch breakpoint so we can single-step over the
 		 * watchpoint trigger.
 		 */
-		if (!wp->overflow_handler)
+		if (is_default_overflow_handler(wp))
 			enable_single_step(wp, instruction_pointer(regs));
 
 unlock:
diff --git a/arch/arm/kernel/perf_callchain.c b/arch/arm/kernel/perf_callchain.c
index 4e02ae5..27563be 100644
--- a/arch/arm/kernel/perf_callchain.c
+++ b/arch/arm/kernel/perf_callchain.c
@@ -75,7 +75,7 @@
 
 	tail = (struct frame_tail __user *)regs->ARM_fp - 1;
 
-	while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
+	while ((entry->nr < sysctl_perf_event_max_stack) &&
 	       tail && !((unsigned long)tail & 0x3))
 		tail = user_backtrace(tail, entry);
 }
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 139791e..7d4e285 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -430,11 +430,13 @@
 	pr_info("CPU: div instructions available: patching division code\n");
 
 	fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
+	asm ("" : "+g" (fn_addr));
 	((u32 *)fn_addr)[0] = udiv_instruction();
 	((u32 *)fn_addr)[1] = bx_lr_instruction();
 	flush_icache_range(fn_addr, fn_addr + 8);
 
 	fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
+	asm ("" : "+g" (fn_addr));
 	((u32 *)fn_addr)[0] = sdiv_instruction();
 	((u32 *)fn_addr)[1] = bx_lr_instruction();
 	flush_icache_range(fn_addr, fn_addr + 8);
@@ -510,7 +512,7 @@
 	 */
 	if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
 	    (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
-	     cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3))
+	     cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
 		elf_hwcap &= ~HWCAP_SWP;
 }
 
@@ -881,7 +883,8 @@
 		request_resource(&ioport_resource, &lp2);
 }
 
-#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
+    defined(CONFIG_EFI)
 struct screen_info screen_info = {
  .orig_video_lines	= 30,
  .orig_video_cols	= 80,
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 6accd66..237d5d8 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -16,7 +16,6 @@
  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  */
 
-#include <linux/cpu.h>
 #include <linux/cpu_pm.h>
 #include <linux/errno.h>
 #include <linux/err.h>
@@ -66,6 +65,8 @@
 
 static bool vgic_present;
 
+static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
+
 static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
 {
 	BUG_ON(preemptible());
@@ -90,11 +91,6 @@
 	return &kvm_arm_running_vcpu;
 }
 
-int kvm_arch_hardware_enable(void)
-{
-	return 0;
-}
-
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 {
 	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
@@ -448,7 +444,7 @@
 	kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
 
 	/* update vttbr to be used with the new vmid */
-	pgd_phys = virt_to_phys(kvm_get_hwpgd(kvm));
+	pgd_phys = virt_to_phys(kvm->arch.pgd);
 	BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
 	vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
 	kvm->arch.vttbr = pgd_phys | vmid;
@@ -1033,11 +1029,6 @@
 	}
 }
 
-static void cpu_init_stage2(void *dummy)
-{
-	__cpu_init_stage2();
-}
-
 static void cpu_init_hyp_mode(void *dummy)
 {
 	phys_addr_t boot_pgd_ptr;
@@ -1061,36 +1052,91 @@
 	kvm_arm_init_debug();
 }
 
-static int hyp_init_cpu_notify(struct notifier_block *self,
-			       unsigned long action, void *cpu)
+static void cpu_hyp_reinit(void)
 {
-	switch (action) {
-	case CPU_STARTING:
-	case CPU_STARTING_FROZEN:
+	if (is_kernel_in_hyp_mode()) {
+		/*
+		 * __cpu_init_stage2() is safe to call even if the PM
+		 * event was cancelled before the CPU was reset.
+		 */
+		__cpu_init_stage2();
+	} else {
 		if (__hyp_get_vectors() == hyp_default_vectors)
 			cpu_init_hyp_mode(NULL);
-		break;
 	}
-
-	return NOTIFY_OK;
 }
 
-static struct notifier_block hyp_init_cpu_nb = {
-	.notifier_call = hyp_init_cpu_notify,
-};
+static void cpu_hyp_reset(void)
+{
+	phys_addr_t boot_pgd_ptr;
+	phys_addr_t phys_idmap_start;
+
+	if (!is_kernel_in_hyp_mode()) {
+		boot_pgd_ptr = kvm_mmu_get_boot_httbr();
+		phys_idmap_start = kvm_get_idmap_start();
+
+		__cpu_reset_hyp_mode(boot_pgd_ptr, phys_idmap_start);
+	}
+}
+
+static void _kvm_arch_hardware_enable(void *discard)
+{
+	if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
+		cpu_hyp_reinit();
+		__this_cpu_write(kvm_arm_hardware_enabled, 1);
+	}
+}
+
+int kvm_arch_hardware_enable(void)
+{
+	_kvm_arch_hardware_enable(NULL);
+	return 0;
+}
+
+static void _kvm_arch_hardware_disable(void *discard)
+{
+	if (__this_cpu_read(kvm_arm_hardware_enabled)) {
+		cpu_hyp_reset();
+		__this_cpu_write(kvm_arm_hardware_enabled, 0);
+	}
+}
+
+void kvm_arch_hardware_disable(void)
+{
+	_kvm_arch_hardware_disable(NULL);
+}
 
 #ifdef CONFIG_CPU_PM
 static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
 				    unsigned long cmd,
 				    void *v)
 {
-	if (cmd == CPU_PM_EXIT &&
-	    __hyp_get_vectors() == hyp_default_vectors) {
-		cpu_init_hyp_mode(NULL);
-		return NOTIFY_OK;
-	}
+	/*
+	 * kvm_arm_hardware_enabled is left with its old value over
+	 * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
+	 * re-enable hyp.
+	 */
+	switch (cmd) {
+	case CPU_PM_ENTER:
+		if (__this_cpu_read(kvm_arm_hardware_enabled))
+			/*
+			 * don't update kvm_arm_hardware_enabled here
+			 * so that the hardware will be re-enabled
+			 * when we resume. See below.
+			 */
+			cpu_hyp_reset();
 
-	return NOTIFY_DONE;
+		return NOTIFY_OK;
+	case CPU_PM_EXIT:
+		if (__this_cpu_read(kvm_arm_hardware_enabled))
+			/* The hardware was enabled before suspend. */
+			cpu_hyp_reinit();
+
+		return NOTIFY_OK;
+
+	default:
+		return NOTIFY_DONE;
+	}
 }
 
 static struct notifier_block hyp_init_cpu_pm_nb = {
@@ -1101,10 +1147,17 @@
 {
 	cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
 }
+static void __init hyp_cpu_pm_exit(void)
+{
+	cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
+}
 #else
 static inline void hyp_cpu_pm_init(void)
 {
 }
+static inline void hyp_cpu_pm_exit(void)
+{
+}
 #endif
 
 static void teardown_common_resources(void)
@@ -1125,7 +1178,17 @@
 
 static int init_subsystems(void)
 {
-	int err;
+	int err = 0;
+
+	/*
+	 * Enable hardware so that subsystem initialisation can access EL2.
+	 */
+	on_each_cpu(_kvm_arch_hardware_enable, NULL, 1);
+
+	/*
+	 * Register CPU lower-power notifier
+	 */
+	hyp_cpu_pm_init();
 
 	/*
 	 * Init HYP view of VGIC
@@ -1138,9 +1201,10 @@
 	case -ENODEV:
 	case -ENXIO:
 		vgic_present = false;
+		err = 0;
 		break;
 	default:
-		return err;
+		goto out;
 	}
 
 	/*
@@ -1148,12 +1212,15 @@
 	 */
 	err = kvm_timer_hyp_init();
 	if (err)
-		return err;
+		goto out;
 
 	kvm_perf_init();
 	kvm_coproc_table_init();
 
-	return 0;
+out:
+	on_each_cpu(_kvm_arch_hardware_disable, NULL, 1);
+
+	return err;
 }
 
 static void teardown_hyp_mode(void)
@@ -1166,15 +1233,11 @@
 	free_hyp_pgds();
 	for_each_possible_cpu(cpu)
 		free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
+	hyp_cpu_pm_exit();
 }
 
 static int init_vhe_mode(void)
 {
-	/*
-	 * Execute the init code on each CPU.
-	 */
-	on_each_cpu(cpu_init_stage2, NULL, 1);
-
 	/* set size of VMID supported by CPU */
 	kvm_vmid_bits = kvm_get_vmid_bits();
 	kvm_info("%d-bit VMID\n", kvm_vmid_bits);
@@ -1261,28 +1324,10 @@
 		}
 	}
 
-	/*
-	 * Execute the init code on each CPU.
-	 */
-	on_each_cpu(cpu_init_hyp_mode, NULL, 1);
-
 #ifndef CONFIG_HOTPLUG_CPU
 	free_boot_hyp_pgd();
 #endif
 
-	cpu_notifier_register_begin();
-
-	err = __register_cpu_notifier(&hyp_init_cpu_nb);
-
-	cpu_notifier_register_done();
-
-	if (err) {
-		kvm_err("Cannot register HYP init CPU notifier (%d)\n", err);
-		goto out_err;
-	}
-
-	hyp_cpu_pm_init();
-
 	/* set size of VMID supported by CPU */
 	kvm_vmid_bits = kvm_get_vmid_bits();
 	kvm_info("%d-bit VMID\n", kvm_vmid_bits);
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 58dbd5c..45c43ae 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -43,11 +43,9 @@
 static unsigned long hyp_idmap_end;
 static phys_addr_t hyp_idmap_vector;
 
+#define S2_PGD_SIZE	(PTRS_PER_S2_PGD * sizeof(pgd_t))
 #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
 
-#define kvm_pmd_huge(_x)	(pmd_huge(_x) || pmd_trans_huge(_x))
-#define kvm_pud_huge(_x)	pud_huge(_x)
-
 #define KVM_S2PTE_FLAG_IS_IOMAP		(1UL << 0)
 #define KVM_S2_FLAG_LOGGING_ACTIVE	(1UL << 1)
 
@@ -69,14 +67,7 @@
 
 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 {
-	/*
-	 * This function also gets called when dealing with HYP page
-	 * tables. As HYP doesn't have an associated struct kvm (and
-	 * the HYP page tables are fairly static), we don't do
-	 * anything there.
-	 */
-	if (kvm)
-		kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
+	kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
 }
 
 /*
@@ -115,7 +106,7 @@
  */
 static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
 {
-	if (!kvm_pmd_huge(*pmd))
+	if (!pmd_thp_or_huge(*pmd))
 		return;
 
 	pmd_clear(pmd);
@@ -155,29 +146,29 @@
 	return p;
 }
 
-static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
+static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
 {
-	pud_t *pud_table __maybe_unused = pud_offset(pgd, 0);
-	pgd_clear(pgd);
+	pud_t *pud_table __maybe_unused = stage2_pud_offset(pgd, 0UL);
+	stage2_pgd_clear(pgd);
 	kvm_tlb_flush_vmid_ipa(kvm, addr);
-	pud_free(NULL, pud_table);
+	stage2_pud_free(pud_table);
 	put_page(virt_to_page(pgd));
 }
 
-static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
+static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
 {
-	pmd_t *pmd_table = pmd_offset(pud, 0);
-	VM_BUG_ON(pud_huge(*pud));
-	pud_clear(pud);
+	pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(pud, 0);
+	VM_BUG_ON(stage2_pud_huge(*pud));
+	stage2_pud_clear(pud);
 	kvm_tlb_flush_vmid_ipa(kvm, addr);
-	pmd_free(NULL, pmd_table);
+	stage2_pmd_free(pmd_table);
 	put_page(virt_to_page(pud));
 }
 
-static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
+static void clear_stage2_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
 {
 	pte_t *pte_table = pte_offset_kernel(pmd, 0);
-	VM_BUG_ON(kvm_pmd_huge(*pmd));
+	VM_BUG_ON(pmd_thp_or_huge(*pmd));
 	pmd_clear(pmd);
 	kvm_tlb_flush_vmid_ipa(kvm, addr);
 	pte_free_kernel(NULL, pte_table);
@@ -204,7 +195,7 @@
  * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
  * the IO subsystem will never hit in the cache.
  */
-static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
+static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
 		       phys_addr_t addr, phys_addr_t end)
 {
 	phys_addr_t start_addr = addr;
@@ -226,21 +217,21 @@
 		}
 	} while (pte++, addr += PAGE_SIZE, addr != end);
 
-	if (kvm_pte_table_empty(kvm, start_pte))
-		clear_pmd_entry(kvm, pmd, start_addr);
+	if (stage2_pte_table_empty(start_pte))
+		clear_stage2_pmd_entry(kvm, pmd, start_addr);
 }
 
-static void unmap_pmds(struct kvm *kvm, pud_t *pud,
+static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
 		       phys_addr_t addr, phys_addr_t end)
 {
 	phys_addr_t next, start_addr = addr;
 	pmd_t *pmd, *start_pmd;
 
-	start_pmd = pmd = pmd_offset(pud, addr);
+	start_pmd = pmd = stage2_pmd_offset(pud, addr);
 	do {
-		next = kvm_pmd_addr_end(addr, end);
+		next = stage2_pmd_addr_end(addr, end);
 		if (!pmd_none(*pmd)) {
-			if (kvm_pmd_huge(*pmd)) {
+			if (pmd_thp_or_huge(*pmd)) {
 				pmd_t old_pmd = *pmd;
 
 				pmd_clear(pmd);
@@ -250,57 +241,64 @@
 
 				put_page(virt_to_page(pmd));
 			} else {
-				unmap_ptes(kvm, pmd, addr, next);
+				unmap_stage2_ptes(kvm, pmd, addr, next);
 			}
 		}
 	} while (pmd++, addr = next, addr != end);
 
-	if (kvm_pmd_table_empty(kvm, start_pmd))
-		clear_pud_entry(kvm, pud, start_addr);
+	if (stage2_pmd_table_empty(start_pmd))
+		clear_stage2_pud_entry(kvm, pud, start_addr);
 }
 
-static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
+static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
 		       phys_addr_t addr, phys_addr_t end)
 {
 	phys_addr_t next, start_addr = addr;
 	pud_t *pud, *start_pud;
 
-	start_pud = pud = pud_offset(pgd, addr);
+	start_pud = pud = stage2_pud_offset(pgd, addr);
 	do {
-		next = kvm_pud_addr_end(addr, end);
-		if (!pud_none(*pud)) {
-			if (pud_huge(*pud)) {
+		next = stage2_pud_addr_end(addr, end);
+		if (!stage2_pud_none(*pud)) {
+			if (stage2_pud_huge(*pud)) {
 				pud_t old_pud = *pud;
 
-				pud_clear(pud);
+				stage2_pud_clear(pud);
 				kvm_tlb_flush_vmid_ipa(kvm, addr);
-
 				kvm_flush_dcache_pud(old_pud);
-
 				put_page(virt_to_page(pud));
 			} else {
-				unmap_pmds(kvm, pud, addr, next);
+				unmap_stage2_pmds(kvm, pud, addr, next);
 			}
 		}
 	} while (pud++, addr = next, addr != end);
 
-	if (kvm_pud_table_empty(kvm, start_pud))
-		clear_pgd_entry(kvm, pgd, start_addr);
+	if (stage2_pud_table_empty(start_pud))
+		clear_stage2_pgd_entry(kvm, pgd, start_addr);
 }
 
-
-static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
-			phys_addr_t start, u64 size)
+/**
+ * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
+ * @kvm:   The VM pointer
+ * @start: The intermediate physical base address of the range to unmap
+ * @size:  The size of the area to unmap
+ *
+ * Clear a range of stage-2 mappings, lowering the various ref-counts.  Must
+ * be called while holding mmu_lock (unless for freeing the stage2 pgd before
+ * destroying the VM), otherwise another faulting VCPU may come in and mess
+ * with things behind our backs.
+ */
+static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
 {
 	pgd_t *pgd;
 	phys_addr_t addr = start, end = start + size;
 	phys_addr_t next;
 
-	pgd = pgdp + kvm_pgd_index(addr);
+	pgd = kvm->arch.pgd + stage2_pgd_index(addr);
 	do {
-		next = kvm_pgd_addr_end(addr, end);
-		if (!pgd_none(*pgd))
-			unmap_puds(kvm, pgd, addr, next);
+		next = stage2_pgd_addr_end(addr, end);
+		if (!stage2_pgd_none(*pgd))
+			unmap_stage2_puds(kvm, pgd, addr, next);
 	} while (pgd++, addr = next, addr != end);
 }
 
@@ -322,11 +320,11 @@
 	pmd_t *pmd;
 	phys_addr_t next;
 
-	pmd = pmd_offset(pud, addr);
+	pmd = stage2_pmd_offset(pud, addr);
 	do {
-		next = kvm_pmd_addr_end(addr, end);
+		next = stage2_pmd_addr_end(addr, end);
 		if (!pmd_none(*pmd)) {
-			if (kvm_pmd_huge(*pmd))
+			if (pmd_thp_or_huge(*pmd))
 				kvm_flush_dcache_pmd(*pmd);
 			else
 				stage2_flush_ptes(kvm, pmd, addr, next);
@@ -340,11 +338,11 @@
 	pud_t *pud;
 	phys_addr_t next;
 
-	pud = pud_offset(pgd, addr);
+	pud = stage2_pud_offset(pgd, addr);
 	do {
-		next = kvm_pud_addr_end(addr, end);
-		if (!pud_none(*pud)) {
-			if (pud_huge(*pud))
+		next = stage2_pud_addr_end(addr, end);
+		if (!stage2_pud_none(*pud)) {
+			if (stage2_pud_huge(*pud))
 				kvm_flush_dcache_pud(*pud);
 			else
 				stage2_flush_pmds(kvm, pud, addr, next);
@@ -360,9 +358,9 @@
 	phys_addr_t next;
 	pgd_t *pgd;
 
-	pgd = kvm->arch.pgd + kvm_pgd_index(addr);
+	pgd = kvm->arch.pgd + stage2_pgd_index(addr);
 	do {
-		next = kvm_pgd_addr_end(addr, end);
+		next = stage2_pgd_addr_end(addr, end);
 		stage2_flush_puds(kvm, pgd, addr, next);
 	} while (pgd++, addr = next, addr != end);
 }
@@ -391,6 +389,100 @@
 	srcu_read_unlock(&kvm->srcu, idx);
 }
 
+static void clear_hyp_pgd_entry(pgd_t *pgd)
+{
+	pud_t *pud_table __maybe_unused = pud_offset(pgd, 0UL);
+	pgd_clear(pgd);
+	pud_free(NULL, pud_table);
+	put_page(virt_to_page(pgd));
+}
+
+static void clear_hyp_pud_entry(pud_t *pud)
+{
+	pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0);
+	VM_BUG_ON(pud_huge(*pud));
+	pud_clear(pud);
+	pmd_free(NULL, pmd_table);
+	put_page(virt_to_page(pud));
+}
+
+static void clear_hyp_pmd_entry(pmd_t *pmd)
+{
+	pte_t *pte_table = pte_offset_kernel(pmd, 0);
+	VM_BUG_ON(pmd_thp_or_huge(*pmd));
+	pmd_clear(pmd);
+	pte_free_kernel(NULL, pte_table);
+	put_page(virt_to_page(pmd));
+}
+
+static void unmap_hyp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
+{
+	pte_t *pte, *start_pte;
+
+	start_pte = pte = pte_offset_kernel(pmd, addr);
+	do {
+		if (!pte_none(*pte)) {
+			kvm_set_pte(pte, __pte(0));
+			put_page(virt_to_page(pte));
+		}
+	} while (pte++, addr += PAGE_SIZE, addr != end);
+
+	if (hyp_pte_table_empty(start_pte))
+		clear_hyp_pmd_entry(pmd);
+}
+
+static void unmap_hyp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
+{
+	phys_addr_t next;
+	pmd_t *pmd, *start_pmd;
+
+	start_pmd = pmd = pmd_offset(pud, addr);
+	do {
+		next = pmd_addr_end(addr, end);
+		/* Hyp doesn't use huge pmds */
+		if (!pmd_none(*pmd))
+			unmap_hyp_ptes(pmd, addr, next);
+	} while (pmd++, addr = next, addr != end);
+
+	if (hyp_pmd_table_empty(start_pmd))
+		clear_hyp_pud_entry(pud);
+}
+
+static void unmap_hyp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
+{
+	phys_addr_t next;
+	pud_t *pud, *start_pud;
+
+	start_pud = pud = pud_offset(pgd, addr);
+	do {
+		next = pud_addr_end(addr, end);
+		/* Hyp doesn't use huge puds */
+		if (!pud_none(*pud))
+			unmap_hyp_pmds(pud, addr, next);
+	} while (pud++, addr = next, addr != end);
+
+	if (hyp_pud_table_empty(start_pud))
+		clear_hyp_pgd_entry(pgd);
+}
+
+static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
+{
+	pgd_t *pgd;
+	phys_addr_t addr = start, end = start + size;
+	phys_addr_t next;
+
+	/*
+	 * We don't unmap anything from HYP, except at the hyp tear down.
+	 * Hence, we don't have to invalidate the TLBs here.
+	 */
+	pgd = pgdp + pgd_index(addr);
+	do {
+		next = pgd_addr_end(addr, end);
+		if (!pgd_none(*pgd))
+			unmap_hyp_puds(pgd, addr, next);
+	} while (pgd++, addr = next, addr != end);
+}
+
 /**
  * free_boot_hyp_pgd - free HYP boot page tables
  *
@@ -401,14 +493,14 @@
 	mutex_lock(&kvm_hyp_pgd_mutex);
 
 	if (boot_hyp_pgd) {
-		unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
-		unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
+		unmap_hyp_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
+		unmap_hyp_range(boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
 		free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
 		boot_hyp_pgd = NULL;
 	}
 
 	if (hyp_pgd)
-		unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
+		unmap_hyp_range(hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
 
 	mutex_unlock(&kvm_hyp_pgd_mutex);
 }
@@ -433,9 +525,9 @@
 
 	if (hyp_pgd) {
 		for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
-			unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
+			unmap_hyp_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
 		for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
-			unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
+			unmap_hyp_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
 
 		free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
 		hyp_pgd = NULL;
@@ -645,20 +737,6 @@
 				     __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
 }
 
-/* Free the HW pgd, one page at a time */
-static void kvm_free_hwpgd(void *hwpgd)
-{
-	free_pages_exact(hwpgd, kvm_get_hwpgd_size());
-}
-
-/* Allocate the HW PGD, making sure that each page gets its own refcount */
-static void *kvm_alloc_hwpgd(void)
-{
-	unsigned int size = kvm_get_hwpgd_size();
-
-	return alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
-}
-
 /**
  * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
  * @kvm:	The KVM struct pointer for the VM.
@@ -673,81 +751,22 @@
 int kvm_alloc_stage2_pgd(struct kvm *kvm)
 {
 	pgd_t *pgd;
-	void *hwpgd;
 
 	if (kvm->arch.pgd != NULL) {
 		kvm_err("kvm_arch already initialized?\n");
 		return -EINVAL;
 	}
 
-	hwpgd = kvm_alloc_hwpgd();
-	if (!hwpgd)
+	/* Allocate the HW PGD, making sure that each page gets its own refcount */
+	pgd = alloc_pages_exact(S2_PGD_SIZE, GFP_KERNEL | __GFP_ZERO);
+	if (!pgd)
 		return -ENOMEM;
 
-	/* When the kernel uses more levels of page tables than the
-	 * guest, we allocate a fake PGD and pre-populate it to point
-	 * to the next-level page table, which will be the real
-	 * initial page table pointed to by the VTTBR.
-	 *
-	 * When KVM_PREALLOC_LEVEL==2, we allocate a single page for
-	 * the PMD and the kernel will use folded pud.
-	 * When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD
-	 * pages.
-	 */
-	if (KVM_PREALLOC_LEVEL > 0) {
-		int i;
-
-		/*
-		 * Allocate fake pgd for the page table manipulation macros to
-		 * work.  This is not used by the hardware and we have no
-		 * alignment requirement for this allocation.
-		 */
-		pgd = kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
-				GFP_KERNEL | __GFP_ZERO);
-
-		if (!pgd) {
-			kvm_free_hwpgd(hwpgd);
-			return -ENOMEM;
-		}
-
-		/* Plug the HW PGD into the fake one. */
-		for (i = 0; i < PTRS_PER_S2_PGD; i++) {
-			if (KVM_PREALLOC_LEVEL == 1)
-				pgd_populate(NULL, pgd + i,
-					     (pud_t *)hwpgd + i * PTRS_PER_PUD);
-			else if (KVM_PREALLOC_LEVEL == 2)
-				pud_populate(NULL, pud_offset(pgd, 0) + i,
-					     (pmd_t *)hwpgd + i * PTRS_PER_PMD);
-		}
-	} else {
-		/*
-		 * Allocate actual first-level Stage-2 page table used by the
-		 * hardware for Stage-2 page table walks.
-		 */
-		pgd = (pgd_t *)hwpgd;
-	}
-
 	kvm_clean_pgd(pgd);
 	kvm->arch.pgd = pgd;
 	return 0;
 }
 
-/**
- * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
- * @kvm:   The VM pointer
- * @start: The intermediate physical base address of the range to unmap
- * @size:  The size of the area to unmap
- *
- * Clear a range of stage-2 mappings, lowering the various ref-counts.  Must
- * be called while holding mmu_lock (unless for freeing the stage2 pgd before
- * destroying the VM), otherwise another faulting VCPU may come in and mess
- * with things behind our backs.
- */
-static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
-{
-	unmap_range(kvm, kvm->arch.pgd, start, size);
-}
-
 static void stage2_unmap_memslot(struct kvm *kvm,
 				 struct kvm_memory_slot *memslot)
 {
@@ -830,10 +849,8 @@
 		return;
 
 	unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
-	kvm_free_hwpgd(kvm_get_hwpgd(kvm));
-	if (KVM_PREALLOC_LEVEL > 0)
-		kfree(kvm->arch.pgd);
-
+	/* Free the HW pgd, one page at a time */
+	free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
 	kvm->arch.pgd = NULL;
 }
 
@@ -843,16 +860,16 @@
 	pgd_t *pgd;
 	pud_t *pud;
 
-	pgd = kvm->arch.pgd + kvm_pgd_index(addr);
-	if (WARN_ON(pgd_none(*pgd))) {
+	pgd = kvm->arch.pgd + stage2_pgd_index(addr);
+	if (WARN_ON(stage2_pgd_none(*pgd))) {
 		if (!cache)
 			return NULL;
 		pud = mmu_memory_cache_alloc(cache);
-		pgd_populate(NULL, pgd, pud);
+		stage2_pgd_populate(pgd, pud);
 		get_page(virt_to_page(pgd));
 	}
 
-	return pud_offset(pgd, addr);
+	return stage2_pud_offset(pgd, addr);
 }
 
 static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
@@ -862,15 +879,15 @@
 	pmd_t *pmd;
 
 	pud = stage2_get_pud(kvm, cache, addr);
-	if (pud_none(*pud)) {
+	if (stage2_pud_none(*pud)) {
 		if (!cache)
 			return NULL;
 		pmd = mmu_memory_cache_alloc(cache);
-		pud_populate(NULL, pud, pmd);
+		stage2_pud_populate(pud, pmd);
 		get_page(virt_to_page(pud));
 	}
 
-	return pmd_offset(pud, addr);
+	return stage2_pmd_offset(pud, addr);
 }
 
 static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
@@ -893,11 +910,14 @@
 	VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
 
 	old_pmd = *pmd;
-	kvm_set_pmd(pmd, *new_pmd);
-	if (pmd_present(old_pmd))
+	if (pmd_present(old_pmd)) {
+		pmd_clear(pmd);
 		kvm_tlb_flush_vmid_ipa(kvm, addr);
-	else
+	} else {
 		get_page(virt_to_page(pmd));
+	}
+
+	kvm_set_pmd(pmd, *new_pmd);
 	return 0;
 }
 
@@ -946,15 +966,38 @@
 
 	/* Create 2nd stage page table mapping - Level 3 */
 	old_pte = *pte;
-	kvm_set_pte(pte, *new_pte);
-	if (pte_present(old_pte))
+	if (pte_present(old_pte)) {
+		kvm_set_pte(pte, __pte(0));
 		kvm_tlb_flush_vmid_ipa(kvm, addr);
-	else
+	} else {
 		get_page(virt_to_page(pte));
+	}
 
+	kvm_set_pte(pte, *new_pte);
 	return 0;
 }
 
+#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static int stage2_ptep_test_and_clear_young(pte_t *pte)
+{
+	if (pte_young(*pte)) {
+		*pte = pte_mkold(*pte);
+		return 1;
+	}
+	return 0;
+}
+#else
+static int stage2_ptep_test_and_clear_young(pte_t *pte)
+{
+	return __ptep_test_and_clear_young(pte);
+}
+#endif
+
+static int stage2_pmdp_test_and_clear_young(pmd_t *pmd)
+{
+	return stage2_ptep_test_and_clear_young((pte_t *)pmd);
+}
+
 /**
  * kvm_phys_addr_ioremap - map a device range to guest IPA
  *
@@ -978,7 +1021,7 @@
 		pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
 
 		if (writable)
-			kvm_set_s2pte_writable(&pte);
+			pte = kvm_s2pte_mkwrite(pte);
 
 		ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES,
 						KVM_NR_MEM_OBJS);
@@ -1004,7 +1047,7 @@
 	kvm_pfn_t pfn = *pfnp;
 	gfn_t gfn = *ipap >> PAGE_SHIFT;
 
-	if (PageTransCompound(pfn_to_page(pfn))) {
+	if (PageTransCompoundMap(pfn_to_page(pfn))) {
 		unsigned long mask;
 		/*
 		 * The address we faulted on is backed by a transparent huge
@@ -1078,12 +1121,12 @@
 	pmd_t *pmd;
 	phys_addr_t next;
 
-	pmd = pmd_offset(pud, addr);
+	pmd = stage2_pmd_offset(pud, addr);
 
 	do {
-		next = kvm_pmd_addr_end(addr, end);
+		next = stage2_pmd_addr_end(addr, end);
 		if (!pmd_none(*pmd)) {
-			if (kvm_pmd_huge(*pmd)) {
+			if (pmd_thp_or_huge(*pmd)) {
 				if (!kvm_s2pmd_readonly(pmd))
 					kvm_set_s2pmd_readonly(pmd);
 			} else {
@@ -1106,12 +1149,12 @@
 	pud_t *pud;
 	phys_addr_t next;
 
-	pud = pud_offset(pgd, addr);
+	pud = stage2_pud_offset(pgd, addr);
 	do {
-		next = kvm_pud_addr_end(addr, end);
-		if (!pud_none(*pud)) {
+		next = stage2_pud_addr_end(addr, end);
+		if (!stage2_pud_none(*pud)) {
 			/* TODO:PUD not supported, revisit later if supported */
-			BUG_ON(kvm_pud_huge(*pud));
+			BUG_ON(stage2_pud_huge(*pud));
 			stage2_wp_pmds(pud, addr, next);
 		}
 	} while (pud++, addr = next, addr != end);
@@ -1128,7 +1171,7 @@
 	pgd_t *pgd;
 	phys_addr_t next;
 
-	pgd = kvm->arch.pgd + kvm_pgd_index(addr);
+	pgd = kvm->arch.pgd + stage2_pgd_index(addr);
 	do {
 		/*
 		 * Release kvm_mmu_lock periodically if the memory region is
@@ -1140,8 +1183,8 @@
 		if (need_resched() || spin_needbreak(&kvm->mmu_lock))
 			cond_resched_lock(&kvm->mmu_lock);
 
-		next = kvm_pgd_addr_end(addr, end);
-		if (pgd_present(*pgd))
+		next = stage2_pgd_addr_end(addr, end);
+		if (stage2_pgd_present(*pgd))
 			stage2_wp_puds(pgd, addr, next);
 	} while (pgd++, addr = next, addr != end);
 }
@@ -1320,7 +1363,7 @@
 		pmd_t new_pmd = pfn_pmd(pfn, mem_type);
 		new_pmd = pmd_mkhuge(new_pmd);
 		if (writable) {
-			kvm_set_s2pmd_writable(&new_pmd);
+			new_pmd = kvm_s2pmd_mkwrite(new_pmd);
 			kvm_set_pfn_dirty(pfn);
 		}
 		coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached);
@@ -1329,7 +1372,7 @@
 		pte_t new_pte = pfn_pte(pfn, mem_type);
 
 		if (writable) {
-			kvm_set_s2pte_writable(&new_pte);
+			new_pte = kvm_s2pte_mkwrite(new_pte);
 			kvm_set_pfn_dirty(pfn);
 			mark_page_dirty(kvm, gfn);
 		}
@@ -1348,6 +1391,8 @@
  * Resolve the access fault by making the page young again.
  * Note that because the faulting entry is guaranteed not to be
  * cached in the TLB, we don't need to invalidate anything.
+ * Only the HW Access Flag updates are supported for Stage 2 (no DBM),
+ * so there is no need for atomic (pte|pmd)_mkyoung operations.
  */
 static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
 {
@@ -1364,7 +1409,7 @@
 	if (!pmd || pmd_none(*pmd))	/* Nothing there */
 		goto out;
 
-	if (kvm_pmd_huge(*pmd)) {	/* THP, HugeTLB */
+	if (pmd_thp_or_huge(*pmd)) {	/* THP, HugeTLB */
 		*pmd = pmd_mkyoung(*pmd);
 		pfn = pmd_pfn(*pmd);
 		pfn_valid = true;
@@ -1588,25 +1633,14 @@
 	if (!pmd || pmd_none(*pmd))	/* Nothing there */
 		return 0;
 
-	if (kvm_pmd_huge(*pmd)) {	/* THP, HugeTLB */
-		if (pmd_young(*pmd)) {
-			*pmd = pmd_mkold(*pmd);
-			return 1;
-		}
-
-		return 0;
-	}
+	if (pmd_thp_or_huge(*pmd))	/* THP, HugeTLB */
+		return stage2_pmdp_test_and_clear_young(pmd);
 
 	pte = pte_offset_kernel(pmd, gpa);
 	if (pte_none(*pte))
 		return 0;
 
-	if (pte_young(*pte)) {
-		*pte = pte_mkold(*pte);	/* Just a page... */
-		return 1;
-	}
-
-	return 0;
+	return stage2_ptep_test_and_clear_young(pte);
 }
 
 static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
@@ -1618,7 +1652,7 @@
 	if (!pmd || pmd_none(*pmd))	/* Nothing there */
 		return 0;
 
-	if (kvm_pmd_huge(*pmd))		/* THP, HugeTLB */
+	if (pmd_thp_or_huge(*pmd))		/* THP, HugeTLB */
 		return pmd_young(*pmd);
 
 	pte = pte_offset_kernel(pmd, gpa);
@@ -1666,6 +1700,11 @@
 	return hyp_idmap_vector;
 }
 
+phys_addr_t kvm_get_idmap_start(void)
+{
+	return hyp_idmap_start;
+}
+
 int kvm_mmu_init(void)
 {
 	int err;
diff --git a/arch/arm/mach-aspeed/Kconfig b/arch/arm/mach-aspeed/Kconfig
new file mode 100644
index 0000000..5225fbc
--- /dev/null
+++ b/arch/arm/mach-aspeed/Kconfig
@@ -0,0 +1,30 @@
+menuconfig ARCH_ASPEED
+	bool "Aspeed BMC architectures"
+	depends on ARCH_MULTI_V5 || ARCH_MULTI_V6
+	select SRAM
+	select WATCHDOG
+	select ASPEED_WATCHDOG
+	select MOXART_TIMER
+	help
+	  Say Y here if you want to run your kernel on an ASpeed BMC SoC.
+
+if ARCH_ASPEED
+
+config MACH_ASPEED_G4
+	bool "Aspeed SoC 4th Generation"
+	depends on ARCH_MULTI_V5
+	select CPU_ARM926T
+	help
+	 Say yes if you intend to run on an Aspeed ast2400 or similar
+	 fourth generation BMCs, such as those used by OpenPower Power8
+	 systems.
+
+config MACH_ASPEED_G5
+	bool "Aspeed SoC 5th Generation"
+	depends on ARCH_MULTI_V6
+	select CPU_V6
+	help
+	 Say yes if you intend to run on an Aspeed ast2500 or similar
+	 fifth generation Aspeed BMCs.
+
+endif
diff --git a/arch/arm/mach-at91/sama5.c b/arch/arm/mach-at91/sama5.c
index df8fdf1..922b85f 100644
--- a/arch/arm/mach-at91/sama5.c
+++ b/arch/arm/mach-at91/sama5.c
@@ -18,8 +18,26 @@
 #include "soc.h"
 
 static const struct at91_soc sama5_socs[] = {
-	AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27_EXID_MATCH,
+	AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D21CU_EXID_MATCH,
+		 "sama5d21", "sama5d2"),
+	AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D22CU_EXID_MATCH,
+		 "sama5d22", "sama5d2"),
+	AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D23CU_EXID_MATCH,
+		 "sama5d23", "sama5d2"),
+	AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D24CX_EXID_MATCH,
+		 "sama5d24", "sama5d2"),
+	AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D24CU_EXID_MATCH,
+		 "sama5d24", "sama5d2"),
+	AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D26CU_EXID_MATCH,
+		 "sama5d26", "sama5d2"),
+	AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27CU_EXID_MATCH,
 		 "sama5d27", "sama5d2"),
+	AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27CN_EXID_MATCH,
+		 "sama5d27", "sama5d2"),
+	AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28CU_EXID_MATCH,
+		 "sama5d28", "sama5d2"),
+	AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28CN_EXID_MATCH,
+		 "sama5d28", "sama5d2"),
 	AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D31_EXID_MATCH,
 		 "sama5d31", "sama5d3"),
 	AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D33_EXID_MATCH,
diff --git a/arch/arm/mach-at91/soc.c b/arch/arm/mach-at91/soc.c
index 54343ff..c6fda75 100644
--- a/arch/arm/mach-at91/soc.c
+++ b/arch/arm/mach-at91/soc.c
@@ -22,48 +22,93 @@
 #include "soc.h"
 
 #define AT91_DBGU_CIDR			0x40
-#define AT91_DBGU_CIDR_VERSION(x)	((x) & 0x1f)
-#define AT91_DBGU_CIDR_EXT		BIT(31)
-#define AT91_DBGU_CIDR_MATCH_MASK	0x7fffffe0
 #define AT91_DBGU_EXID			0x44
+#define AT91_CHIPID_CIDR		0x00
+#define AT91_CHIPID_EXID		0x04
+#define AT91_CIDR_VERSION(x)		((x) & 0x1f)
+#define AT91_CIDR_EXT			BIT(31)
+#define AT91_CIDR_MATCH_MASK		0x7fffffe0
 
-struct soc_device * __init at91_soc_init(const struct at91_soc *socs)
+static int __init at91_get_cidr_exid_from_dbgu(u32 *cidr, u32 *exid)
 {
-	struct soc_device_attribute *soc_dev_attr;
-	const struct at91_soc *soc;
-	struct soc_device *soc_dev;
 	struct device_node *np;
 	void __iomem *regs;
-	u32 cidr, exid;
 
 	np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-dbgu");
 	if (!np)
 		np = of_find_compatible_node(NULL, NULL,
 					     "atmel,at91sam9260-dbgu");
-
-	if (!np) {
-		pr_warn("Could not find DBGU node");
-		return NULL;
-	}
+	if (!np)
+		return -ENODEV;
 
 	regs = of_iomap(np, 0);
 	of_node_put(np);
 
 	if (!regs) {
 		pr_warn("Could not map DBGU iomem range");
-		return NULL;
+		return -ENXIO;
 	}
 
-	cidr = readl(regs + AT91_DBGU_CIDR);
-	exid = readl(regs + AT91_DBGU_EXID);
+	*cidr = readl(regs + AT91_DBGU_CIDR);
+	*exid = readl(regs + AT91_DBGU_EXID);
 
 	iounmap(regs);
 
+	return 0;
+}
+
+static int __init at91_get_cidr_exid_from_chipid(u32 *cidr, u32 *exid)
+{
+	struct device_node *np;
+	void __iomem *regs;
+
+	np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-chipid");
+	if (!np)
+		return -ENODEV;
+
+	regs = of_iomap(np, 0);
+	of_node_put(np);
+
+	if (!regs) {
+		pr_warn("Could not map DBGU iomem range");
+		return -ENXIO;
+	}
+
+	*cidr = readl(regs + AT91_CHIPID_CIDR);
+	*exid = readl(regs + AT91_CHIPID_EXID);
+
+	iounmap(regs);
+
+	return 0;
+}
+
+struct soc_device * __init at91_soc_init(const struct at91_soc *socs)
+{
+	struct soc_device_attribute *soc_dev_attr;
+	const struct at91_soc *soc;
+	struct soc_device *soc_dev;
+	u32 cidr, exid;
+	int ret;
+
+	/*
+	 * With SAMA5D2 and later SoCs, CIDR and EXID registers are no more
+	 * in the dbgu device but in the chipid device whose purpose is only
+	 * to expose these two registers.
+	 */
+	ret = at91_get_cidr_exid_from_dbgu(&cidr, &exid);
+	if (ret)
+		ret = at91_get_cidr_exid_from_chipid(&cidr, &exid);
+	if (ret) {
+		if (ret == -ENODEV)
+			pr_warn("Could not find identification node");
+		return NULL;
+	}
+
 	for (soc = socs; soc->name; soc++) {
-		if (soc->cidr_match != (cidr & AT91_DBGU_CIDR_MATCH_MASK))
+		if (soc->cidr_match != (cidr & AT91_CIDR_MATCH_MASK))
 			continue;
 
-		if (!(cidr & AT91_DBGU_CIDR_EXT) || soc->exid_match == exid)
+		if (!(cidr & AT91_CIDR_EXT) || soc->exid_match == exid)
 			break;
 	}
 
@@ -79,7 +124,7 @@
 	soc_dev_attr->family = soc->family;
 	soc_dev_attr->soc_id = soc->name;
 	soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%X",
-					   AT91_DBGU_CIDR_VERSION(cidr));
+					   AT91_CIDR_VERSION(cidr));
 	soc_dev = soc_device_register(soc_dev_attr);
 	if (IS_ERR(soc_dev)) {
 		kfree(soc_dev_attr->revision);
@@ -91,7 +136,7 @@
 	if (soc->family)
 		pr_info("Detected SoC family: %s\n", soc->family);
 	pr_info("Detected SoC: %s, revision %X\n", soc->name,
-		AT91_DBGU_CIDR_VERSION(cidr));
+		AT91_CIDR_VERSION(cidr));
 
 	return soc_dev;
 }
diff --git a/arch/arm/mach-at91/soc.h b/arch/arm/mach-at91/soc.h
index 8ede0ef..228efde 100644
--- a/arch/arm/mach-at91/soc.h
+++ b/arch/arm/mach-at91/soc.h
@@ -63,7 +63,17 @@
 #define AT91SAM9XE512_CIDR_MATCH	0x329aa3a0
 
 #define SAMA5D2_CIDR_MATCH		0x0a5c08c0
-#define SAMA5D27_EXID_MATCH		0x00000021
+#define SAMA5D21CU_EXID_MATCH		0x0000005a
+#define SAMA5D22CU_EXID_MATCH		0x00000059
+#define SAMA5D22CN_EXID_MATCH		0x00000069
+#define SAMA5D23CU_EXID_MATCH		0x00000058
+#define SAMA5D24CX_EXID_MATCH		0x00000004
+#define SAMA5D24CU_EXID_MATCH		0x00000014
+#define SAMA5D26CU_EXID_MATCH		0x00000012
+#define SAMA5D27CU_EXID_MATCH		0x00000011
+#define SAMA5D27CN_EXID_MATCH		0x00000021
+#define SAMA5D28CU_EXID_MATCH		0x00000010
+#define SAMA5D28CN_EXID_MATCH		0x00000020
 
 #define SAMA5D3_CIDR_MATCH		0x0a5c07c0
 #define SAMA5D31_EXID_MATCH		0x00444300
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index 7ef1214..68ab641 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -173,12 +173,12 @@
 	select ARM_GIC
 	select ARM_ERRATA_798181 if SMP
 	select HAVE_ARM_ARCH_TIMER
-	select BRCMSTB_GISB_ARB
 	select BRCMSTB_L2_IRQ
 	select BCM7120_L2_IRQ
 	select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
 	select ARCH_WANT_OPTIONAL_GPIOLIB
 	select SOC_BRCMSTB
+	select SOC_BUS
 	help
 	  Say Y if you intend to run the kernel on a Broadcom ARM-based STB
 	  chipset.
diff --git a/arch/arm/mach-berlin/berlin.c b/arch/arm/mach-berlin/berlin.c
index 25d7387..ac181c6 100644
--- a/arch/arm/mach-berlin/berlin.c
+++ b/arch/arm/mach-berlin/berlin.c
@@ -18,11 +18,6 @@
 #include <asm/hardware/cache-l2x0.h>
 #include <asm/mach/arch.h>
 
-static void __init berlin_init_late(void)
-{
-	platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
-}
-
 static const char * const berlin_dt_compat[] = {
 	"marvell,berlin",
 	NULL,
@@ -30,7 +25,6 @@
 
 DT_MACHINE_START(BERLIN_DT, "Marvell Berlin")
 	.dt_compat	= berlin_dt_compat,
-	.init_late	= berlin_init_late,
 	/*
 	 * with DT probing for L2CCs, berlin_init_machine can be removed.
 	 * Note: 88DE3005 (Armada 1500-mini) uses pl310 l2cc
diff --git a/arch/arm/mach-davinci/Makefile b/arch/arm/mach-davinci/Makefile
index 2e3464b..da4c336 100644
--- a/arch/arm/mach-davinci/Makefile
+++ b/arch/arm/mach-davinci/Makefile
@@ -14,8 +14,8 @@
 obj-$(CONFIG_ARCH_DAVINCI_DM355)        += dm355.o devices.o
 obj-$(CONFIG_ARCH_DAVINCI_DM646x)       += dm646x.o devices.o
 obj-$(CONFIG_ARCH_DAVINCI_DM365)	+= dm365.o devices.o
-obj-$(CONFIG_ARCH_DAVINCI_DA830)        += da830.o devices-da8xx.o
-obj-$(CONFIG_ARCH_DAVINCI_DA850)        += da850.o devices-da8xx.o
+obj-$(CONFIG_ARCH_DAVINCI_DA830)	+= da830.o devices-da8xx.o usb-da8xx.o
+obj-$(CONFIG_ARCH_DAVINCI_DA850)	+= da850.o devices-da8xx.o usb-da8xx.o
 
 obj-$(CONFIG_AINTC)			+= irq.o
 obj-$(CONFIG_CP_INTC)			+= cp_intc.o
diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c
index d97c588..bc4e63f 100644
--- a/arch/arm/mach-davinci/board-mityomapl138.c
+++ b/arch/arm/mach-davinci/board-mityomapl138.c
@@ -121,6 +121,11 @@
 	const char *partnum = NULL;
 	struct davinci_soc_info *soc_info = &davinci_soc_info;
 
+	if (!IS_BUILTIN(CONFIG_NVMEM)) {
+		pr_warn("Factory Config not available without CONFIG_NVMEM\n");
+		goto bad_config;
+	}
+
 	ret = nvmem_device_read(nvmem, 0, sizeof(factory_config),
 				&factory_config);
 	if (ret != sizeof(struct factory_config)) {
diff --git a/arch/arm/mach-davinci/clock.c b/arch/arm/mach-davinci/clock.c
index 3424eac6..df42c93 100644
--- a/arch/arm/mach-davinci/clock.c
+++ b/arch/arm/mach-davinci/clock.c
@@ -195,6 +195,14 @@
 		return -EINVAL;
 
 	mutex_lock(&clocks_mutex);
+	if (clk->set_parent) {
+		int ret = clk->set_parent(clk, parent);
+
+		if (ret) {
+			mutex_unlock(&clocks_mutex);
+			return ret;
+		}
+	}
 	clk->parent = parent;
 	list_del_init(&clk->childnode);
 	list_add(&clk->childnode, &clk->parent->children);
@@ -224,8 +232,17 @@
 
 	mutex_lock(&clocks_mutex);
 	list_add_tail(&clk->node, &clocks);
-	if (clk->parent)
+	if (clk->parent) {
+		if (clk->set_parent) {
+			int ret = clk->set_parent(clk, clk->parent);
+
+			if (ret) {
+				mutex_unlock(&clocks_mutex);
+				return ret;
+			}
+		}
 		list_add_tail(&clk->childnode, &clk->parent->children);
+	}
 	mutex_unlock(&clocks_mutex);
 
 	/* If rate is already set, use it */
@@ -560,7 +577,7 @@
  * than that used by default in <soc>.c file. The reference clock rate
  * should be updated early in the boot process; ideally soon after the
  * clock tree has been initialized once with the default reference clock
- * rate (davinci_common_init()).
+ * rate (davinci_clk_init()).
  *
  * Returns 0 on success, error otherwise.
  */
diff --git a/arch/arm/mach-davinci/clock.h b/arch/arm/mach-davinci/clock.h
index 1e4e836..e2a5437 100644
--- a/arch/arm/mach-davinci/clock.h
+++ b/arch/arm/mach-davinci/clock.h
@@ -106,6 +106,7 @@
 	int (*reset) (struct clk *clk, bool reset);
 	void (*clk_enable) (struct clk *clk);
 	void (*clk_disable) (struct clk *clk);
+	int (*set_parent) (struct clk *clk, struct clk *parent);
 };
 
 /* Clock flags: SoC-specific flags start at BIT(16) */
diff --git a/arch/arm/mach-davinci/common.c b/arch/arm/mach-davinci/common.c
index f55ef2e..049025f 100644
--- a/arch/arm/mach-davinci/common.c
+++ b/arch/arm/mach-davinci/common.c
@@ -33,6 +33,11 @@
 	char *mac_addr = davinci_soc_info.emac_pdata->mac_addr;
 	off_t offset = (off_t)context;
 
+	if (!IS_BUILTIN(CONFIG_NVMEM)) {
+		pr_warn("Cannot read MAC addr from EEPROM without CONFIG_NVMEM\n");
+		return;
+	}
+
 	/* Read MAC addr from EEPROM */
 	if (nvmem_device_read(nvmem, offset, ETH_ALEN, mac_addr) == ETH_ALEN)
 		pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr);
@@ -103,12 +108,6 @@
 	if (ret < 0)
 		goto err;
 
-	if (davinci_soc_info.cpu_clks) {
-		ret = davinci_clk_init(davinci_soc_info.cpu_clks);
-
-		if (ret != 0)
-			goto err;
-	}
 
 	return;
 
diff --git a/arch/arm/mach-davinci/cp_intc.c b/arch/arm/mach-davinci/cp_intc.c
index 1a68d24..94085d2 100644
--- a/arch/arm/mach-davinci/cp_intc.c
+++ b/arch/arm/mach-davinci/cp_intc.c
@@ -12,6 +12,7 @@
 #include <linux/export.h>
 #include <linux/init.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/irqdomain.h>
 #include <linux/io.h>
 #include <linux/of.h>
@@ -210,3 +211,5 @@
 {
 	cp_intc_of_init(NULL, NULL);
 }
+
+IRQCHIP_DECLARE(cp_intc, "ti,cp-intc", cp_intc_of_init);
diff --git a/arch/arm/mach-davinci/da830.c b/arch/arm/mach-davinci/da830.c
index 7187e7f..426fd74 100644
--- a/arch/arm/mach-davinci/da830.c
+++ b/arch/arm/mach-davinci/da830.c
@@ -1214,4 +1214,6 @@
 
 	da8xx_syscfg0_base = ioremap(DA8XX_SYSCFG0_BASE, SZ_4K);
 	WARN(!da8xx_syscfg0_base, "Unable to map syscfg0 module");
+
+	davinci_clk_init(davinci_soc_info_da830.cpu_clks);
 }
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index 97d8779..2398862 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -34,9 +34,6 @@
 #include "clock.h"
 #include "mux.h"
 
-/* SoC specific clock flags */
-#define DA850_CLK_ASYNC3	BIT(16)
-
 #define DA850_PLL1_BASE		0x01e1a000
 #define DA850_TIMER64P2_BASE	0x01f0c000
 #define DA850_TIMER64P3_BASE	0x01f0d000
@@ -161,6 +158,32 @@
 	.div_reg	= PLLDIV3,
 };
 
+static int da850_async3_set_parent(struct clk *clk, struct clk *parent)
+{
+	u32 val;
+
+	val = readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG));
+
+	if (parent == &pll0_sysclk2) {
+		val &= ~CFGCHIP3_ASYNC3_CLKSRC;
+	} else if (parent == &pll1_sysclk2) {
+		val |= CFGCHIP3_ASYNC3_CLKSRC;
+	} else {
+		pr_err("Bad parent on async3 clock mux\n");
+		return -EINVAL;
+	}
+
+	writel(val, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG));
+
+	return 0;
+}
+
+static struct clk async3_clk = {
+	.name		= "async3",
+	.parent		= &pll1_sysclk2,
+	.set_parent	= da850_async3_set_parent,
+};
+
 static struct clk i2c0_clk = {
 	.name		= "i2c0",
 	.parent		= &pll0_aux_clk,
@@ -234,18 +257,16 @@
 
 static struct clk uart1_clk = {
 	.name		= "uart1",
-	.parent		= &pll0_sysclk2,
+	.parent		= &async3_clk,
 	.lpsc		= DA8XX_LPSC1_UART1,
 	.gpsc		= 1,
-	.flags		= DA850_CLK_ASYNC3,
 };
 
 static struct clk uart2_clk = {
 	.name		= "uart2",
-	.parent		= &pll0_sysclk2,
+	.parent		= &async3_clk,
 	.lpsc		= DA8XX_LPSC1_UART2,
 	.gpsc		= 1,
-	.flags		= DA850_CLK_ASYNC3,
 };
 
 static struct clk aintc_clk = {
@@ -300,10 +321,9 @@
 
 static struct clk mcasp_clk = {
 	.name		= "mcasp",
-	.parent		= &pll0_sysclk2,
+	.parent		= &async3_clk,
 	.lpsc		= DA8XX_LPSC1_McASP0,
 	.gpsc		= 1,
-	.flags		= DA850_CLK_ASYNC3,
 };
 
 static struct clk lcdc_clk = {
@@ -355,10 +375,9 @@
 
 static struct clk spi1_clk = {
 	.name		= "spi1",
-	.parent		= &pll0_sysclk2,
+	.parent		= &async3_clk,
 	.lpsc		= DA8XX_LPSC1_SPI1,
 	.gpsc		= 1,
-	.flags		= DA850_CLK_ASYNC3,
 };
 
 static struct clk vpif_clk = {
@@ -386,10 +405,9 @@
 
 static struct clk ehrpwm_clk = {
 	.name		= "ehrpwm",
-	.parent		= &pll0_sysclk2,
+	.parent		= &async3_clk,
 	.lpsc		= DA8XX_LPSC1_PWM,
 	.gpsc		= 1,
-	.flags		= DA850_CLK_ASYNC3,
 };
 
 #define DA8XX_EHRPWM_TBCLKSYNC	BIT(12)
@@ -421,10 +439,9 @@
 
 static struct clk ecap_clk = {
 	.name		= "ecap",
-	.parent		= &pll0_sysclk2,
+	.parent		= &async3_clk,
 	.lpsc		= DA8XX_LPSC1_ECAP,
 	.gpsc		= 1,
-	.flags		= DA850_CLK_ASYNC3,
 };
 
 static struct clk_lookup da850_clks[] = {
@@ -442,6 +459,7 @@
 	CLK(NULL,		"pll1_aux",	&pll1_aux_clk),
 	CLK(NULL,		"pll1_sysclk2",	&pll1_sysclk2),
 	CLK(NULL,		"pll1_sysclk3",	&pll1_sysclk3),
+	CLK(NULL,		"async3",	&async3_clk),
 	CLK("i2c_davinci.1",	NULL,		&i2c0_clk),
 	CLK(NULL,		"timer0",	&timerp64_0_clk),
 	CLK("davinci-wdt",	NULL,		&timerp64_1_clk),
@@ -909,30 +927,6 @@
 	.clocksource_id	= T0_TOP,
 };
 
-static void da850_set_async3_src(int pllnum)
-{
-	struct clk *clk, *newparent = pllnum ? &pll1_sysclk2 : &pll0_sysclk2;
-	struct clk_lookup *c;
-	unsigned int v;
-	int ret;
-
-	for (c = da850_clks; c->clk; c++) {
-		clk = c->clk;
-		if (clk->flags & DA850_CLK_ASYNC3) {
-			ret = clk_set_parent(clk, newparent);
-			WARN(ret, "DA850: unable to re-parent clock %s",
-								clk->name);
-		}
-       }
-
-	v = __raw_readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG));
-	if (pllnum)
-		v |= CFGCHIP3_ASYNC3_CLKSRC;
-	else
-		v &= ~CFGCHIP3_ASYNC3_CLKSRC;
-	__raw_writel(v, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG));
-}
-
 #ifdef CONFIG_CPU_FREQ
 /*
  * Notes:
@@ -1328,15 +1322,6 @@
 	if (WARN(!da8xx_syscfg1_base, "Unable to map syscfg1 module"))
 		return;
 
-	/*
-	 * Move the clock source of Async3 domain to PLL1 SYSCLK2.
-	 * This helps keeping the peripherals on this domain insulated
-	 * from CPU frequency changes caused by DVFS. The firmware sets
-	 * both PLL0 and PLL1 to the same frequency so, there should not
-	 * be any noticeable change even in non-DVFS use cases.
-	 */
-	da850_set_async3_src(1);
-
 	/* Unlock writing to PLL0 registers */
 	v = __raw_readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP0_REG));
 	v &= ~CFGCHIP0_PLL_MASTER_LOCK;
@@ -1346,4 +1331,6 @@
 	v = __raw_readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG));
 	v &= ~CFGCHIP3_PLL1_MASTER_LOCK;
 	__raw_writel(v, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG));
+
+	davinci_clk_init(davinci_soc_info_da850.cpu_clks);
 }
diff --git a/arch/arm/mach-davinci/da8xx-dt.c b/arch/arm/mach-davinci/da8xx-dt.c
index c4b5808..754f478 100644
--- a/arch/arm/mach-davinci/da8xx-dt.c
+++ b/arch/arm/mach-davinci/da8xx-dt.c
@@ -18,20 +18,9 @@
 #include "cp_intc.h"
 #include <mach/da8xx.h>
 
-#define DA8XX_NUM_UARTS	3
-
-static const struct of_device_id const da8xx_irq_match[] __initconst = {
-	{ .compatible = "ti,cp-intc", .data = cp_intc_of_init, },
-	{ }
-};
-
-static void __init da8xx_init_irq(void)
-{
-	of_irq_init(da8xx_irq_match);
-}
-
 static struct of_dev_auxdata da850_auxdata_lookup[] __initdata = {
 	OF_DEV_AUXDATA("ti,davinci-i2c", 0x01c22000, "i2c_davinci.1", NULL),
+	OF_DEV_AUXDATA("ti,davinci-i2c", 0x01e28000, "i2c_davinci.2", NULL),
 	OF_DEV_AUXDATA("ti,davinci-wdt", 0x01c21000, "davinci-wdt", NULL),
 	OF_DEV_AUXDATA("ti,da830-mmc", 0x01c40000, "da830-mmc.0", NULL),
 	OF_DEV_AUXDATA("ti,da850-ehrpwm", 0x01f00000, "ehrpwm", NULL),
@@ -39,6 +28,7 @@
 	OF_DEV_AUXDATA("ti,da850-ecap", 0x01f06000, "ecap", NULL),
 	OF_DEV_AUXDATA("ti,da850-ecap", 0x01f07000, "ecap", NULL),
 	OF_DEV_AUXDATA("ti,da850-ecap", 0x01f08000, "ecap", NULL),
+	OF_DEV_AUXDATA("ti,da830-spi", 0x01c41000, "spi_davinci.0", NULL),
 	OF_DEV_AUXDATA("ti,da830-spi", 0x01f0e000, "spi_davinci.1", NULL),
 	OF_DEV_AUXDATA("ns16550a", 0x01c42000, "serial8250.0", NULL),
 	OF_DEV_AUXDATA("ns16550a", 0x01d0c000, "serial8250.1", NULL),
@@ -54,9 +44,7 @@
 
 static void __init da850_init_machine(void)
 {
-	of_platform_populate(NULL, of_default_bus_match_table,
-			     da850_auxdata_lookup, NULL);
-
+	of_platform_default_populate(NULL, da850_auxdata_lookup, NULL);
 }
 
 static const char *const da850_boards_compat[] __initconst = {
@@ -68,7 +56,6 @@
 
 DT_MACHINE_START(DA850_DT, "Generic DA850/OMAP-L138/AM18x")
 	.map_io		= da850_init,
-	.init_irq	= da8xx_init_irq,
 	.init_time	= davinci_timer_init,
 	.init_machine	= da850_init_machine,
 	.dt_compat	= da850_boards_compat,
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 725e693..add3771 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -751,16 +751,6 @@
 		.end	= IRQ_DA8XX_MMCSDINT0,
 		.flags	= IORESOURCE_IRQ,
 	},
-	{		/* DMA RX */
-		.start	= DA8XX_DMA_MMCSD0_RX,
-		.end	= DA8XX_DMA_MMCSD0_RX,
-		.flags	= IORESOURCE_DMA,
-	},
-	{		/* DMA TX */
-		.start	= DA8XX_DMA_MMCSD0_TX,
-		.end	= DA8XX_DMA_MMCSD0_TX,
-		.flags	= IORESOURCE_DMA,
-	},
 };
 
 static struct platform_device da8xx_mmcsd0_device = {
@@ -788,16 +778,6 @@
 		.end	= IRQ_DA850_MMCSDINT0_1,
 		.flags	= IORESOURCE_IRQ,
 	},
-	{		/* DMA RX */
-		.start	= DA850_DMA_MMCSD1_RX,
-		.end	= DA850_DMA_MMCSD1_RX,
-		.flags	= IORESOURCE_DMA,
-	},
-	{		/* DMA TX */
-		.start	= DA850_DMA_MMCSD1_TX,
-		.end	= DA850_DMA_MMCSD1_TX,
-		.flags	= IORESOURCE_DMA,
-	},
 };
 
 static struct platform_device da850_mmcsd1_device = {
diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c
index 6257aa4..67d26c5 100644
--- a/arch/arm/mach-davinci/devices.c
+++ b/arch/arm/mach-davinci/devices.c
@@ -144,14 +144,6 @@
 		.start = IRQ_SDIOINT,
 		.flags = IORESOURCE_IRQ,
 	},
-	/* DMA channels: RX, then TX */
-	{
-		.start = EDMA_CTLR_CHAN(0, DAVINCI_DMA_MMCRXEVT),
-		.flags = IORESOURCE_DMA,
-	}, {
-		.start = EDMA_CTLR_CHAN(0, DAVINCI_DMA_MMCTXEVT),
-		.flags = IORESOURCE_DMA,
-	},
 };
 
 static struct platform_device davinci_mmcsd0_device = {
@@ -181,14 +173,6 @@
 		.start = IRQ_DM355_SDIOINT1,
 		.flags = IORESOURCE_IRQ,
 	},
-	/* DMA channels: RX, then TX */
-	{
-		.start = EDMA_CTLR_CHAN(0, 30),	/* rx */
-		.flags = IORESOURCE_DMA,
-	}, {
-		.start = EDMA_CTLR_CHAN(0, 31),	/* tx */
-		.flags = IORESOURCE_DMA,
-	},
 };
 
 static struct platform_device davinci_mmcsd1_device = {
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index a0ecf49..5a19cca 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -1052,6 +1052,7 @@
 {
 	davinci_common_init(&davinci_soc_info_dm355);
 	davinci_map_sysmod();
+	davinci_clk_init(davinci_soc_info_dm355.cpu_clks);
 }
 
 int __init dm355_init_video(struct vpfe_config *vpfe_cfg,
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 384d367..8aa004b 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -1176,6 +1176,7 @@
 {
 	davinci_common_init(&davinci_soc_info_dm365);
 	davinci_map_sysmod();
+	davinci_clk_init(davinci_soc_info_dm365.cpu_clks);
 }
 
 static struct resource dm365_vpss_resources[] = {
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index b4b3a8b..0afa279 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -932,6 +932,7 @@
 {
 	davinci_common_init(&davinci_soc_info_dm644x);
 	davinci_map_sysmod();
+	davinci_clk_init(davinci_soc_info_dm644x.cpu_clks);
 }
 
 int __init dm644x_init_video(struct vpfe_config *vpfe_cfg,
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index a43db0f..da21353 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -956,6 +956,7 @@
 {
 	davinci_common_init(&davinci_soc_info_dm646x);
 	davinci_map_sysmod();
+	davinci_clk_init(davinci_soc_info_dm646x.cpu_clks);
 }
 
 static int __init dm646x_init_devices(void)
diff --git a/arch/arm/mach-davinci/usb-da8xx.c b/arch/arm/mach-davinci/usb-da8xx.c
new file mode 100644
index 0000000..f141f51
--- /dev/null
+++ b/arch/arm/mach-davinci/usb-da8xx.c
@@ -0,0 +1,107 @@
+/*
+ * DA8xx USB
+ */
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/platform_data/usb-davinci.h>
+#include <linux/platform_device.h>
+#include <linux/usb/musb.h>
+
+#include <mach/common.h>
+#include <mach/cputype.h>
+#include <mach/da8xx.h>
+#include <mach/irqs.h>
+
+#define DA8XX_USB0_BASE		0x01e00000
+#define DA8XX_USB1_BASE		0x01e25000
+
+#if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
+
+static struct musb_hdrc_config musb_config = {
+	.multipoint	= true,
+	.num_eps	= 5,
+	.ram_bits	= 10,
+};
+
+static struct musb_hdrc_platform_data usb_data = {
+	/* OTG requires a Mini-AB connector */
+	.mode           = MUSB_OTG,
+	.clock		= "usb20",
+	.config		= &musb_config,
+};
+
+static struct resource da8xx_usb20_resources[] = {
+	{
+		.start		= DA8XX_USB0_BASE,
+		.end		= DA8XX_USB0_BASE + SZ_64K - 1,
+		.flags		= IORESOURCE_MEM,
+	},
+	{
+		.start		= IRQ_DA8XX_USB_INT,
+		.flags		= IORESOURCE_IRQ,
+		.name		= "mc",
+	},
+};
+
+static u64 usb_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device usb_dev = {
+	.name		= "musb-da8xx",
+	.id             = -1,
+	.dev = {
+		.platform_data		= &usb_data,
+		.dma_mask		= &usb_dmamask,
+		.coherent_dma_mask      = DMA_BIT_MASK(32),
+	},
+	.resource	= da8xx_usb20_resources,
+	.num_resources	= ARRAY_SIZE(da8xx_usb20_resources),
+};
+
+int __init da8xx_register_usb20(unsigned int mA, unsigned int potpgt)
+{
+	usb_data.power	= mA > 510 ? 255 : mA / 2;
+	usb_data.potpgt = (potpgt + 1) / 2;
+
+	return platform_device_register(&usb_dev);
+}
+
+#else
+
+int __init da8xx_register_usb20(unsigned int mA, unsigned int potpgt)
+{
+	return 0;
+}
+
+#endif  /* CONFIG_USB_MUSB_HDRC */
+
+static struct resource da8xx_usb11_resources[] = {
+	[0] = {
+		.start	= DA8XX_USB1_BASE,
+		.end	= DA8XX_USB1_BASE + SZ_4K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= IRQ_DA8XX_IRQN,
+		.end	= IRQ_DA8XX_IRQN,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static u64 da8xx_usb11_dma_mask = DMA_BIT_MASK(32);
+
+static struct platform_device da8xx_usb11_device = {
+	.name		= "ohci",
+	.id		= 0,
+	.dev = {
+		.dma_mask		= &da8xx_usb11_dma_mask,
+		.coherent_dma_mask	= DMA_BIT_MASK(32),
+	},
+	.num_resources	= ARRAY_SIZE(da8xx_usb11_resources),
+	.resource	= da8xx_usb11_resources,
+};
+
+int __init da8xx_register_usb11(struct da8xx_ohci_root_hub *pdata)
+{
+	da8xx_usb11_device.dev.platform_data = pdata;
+	return platform_device_register(&da8xx_usb11_device);
+}
diff --git a/arch/arm/mach-davinci/usb.c b/arch/arm/mach-davinci/usb.c
index b0a6b52..0e7e89c 100644
--- a/arch/arm/mach-davinci/usb.c
+++ b/arch/arm/mach-davinci/usb.c
@@ -10,36 +10,16 @@
 #include <mach/common.h>
 #include <mach/irqs.h>
 #include <mach/cputype.h>
-#include <mach/da8xx.h>
 #include <linux/platform_data/usb-davinci.h>
 
 #define DAVINCI_USB_OTG_BASE	0x01c64000
 
-#define DA8XX_USB0_BASE 	0x01e00000
-#define DA8XX_USB1_BASE 	0x01e25000
-
 #if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
-static struct musb_hdrc_eps_bits musb_eps[] = {
-	{ "ep1_tx", 8, },
-	{ "ep1_rx", 8, },
-	{ "ep2_tx", 8, },
-	{ "ep2_rx", 8, },
-	{ "ep3_tx", 5, },
-	{ "ep3_rx", 5, },
-	{ "ep4_tx", 5, },
-	{ "ep4_rx", 5, },
-};
-
 static struct musb_hdrc_config musb_config = {
 	.multipoint	= true,
-	.dyn_fifo	= true,
-	.soft_con	= true,
-	.dma		= true,
 
 	.num_eps	= 5,
-	.dma_channels	= 8,
 	.ram_bits	= 10,
-	.eps_bits	= musb_eps,
 };
 
 static struct musb_hdrc_platform_data usb_data = {
@@ -97,79 +77,10 @@
 	platform_device_register(&usb_dev);
 }
 
-#ifdef CONFIG_ARCH_DAVINCI_DA8XX
-static struct resource da8xx_usb20_resources[] = {
-	{
-		.start		= DA8XX_USB0_BASE,
-		.end		= DA8XX_USB0_BASE + SZ_64K - 1,
-		.flags		= IORESOURCE_MEM,
-	},
-	{
-		.start		= IRQ_DA8XX_USB_INT,
-		.flags		= IORESOURCE_IRQ,
-		.name		= "mc",
-	},
-};
-
-int __init da8xx_register_usb20(unsigned mA, unsigned potpgt)
-{
-	usb_data.clock  = "usb20";
-	usb_data.power	= mA > 510 ? 255 : mA / 2;
-	usb_data.potpgt = (potpgt + 1) / 2;
-
-	usb_dev.resource = da8xx_usb20_resources;
-	usb_dev.num_resources = ARRAY_SIZE(da8xx_usb20_resources);
-	usb_dev.name = "musb-da8xx";
-
-	return platform_device_register(&usb_dev);
-}
-#endif	/* CONFIG_DAVINCI_DA8XX */
-
 #else
 
 void __init davinci_setup_usb(unsigned mA, unsigned potpgt_ms)
 {
 }
 
-#ifdef CONFIG_ARCH_DAVINCI_DA8XX
-int __init da8xx_register_usb20(unsigned mA, unsigned potpgt)
-{
-	return 0;
-}
-#endif
-
 #endif  /* CONFIG_USB_MUSB_HDRC */
-
-#ifdef	CONFIG_ARCH_DAVINCI_DA8XX
-static struct resource da8xx_usb11_resources[] = {
-	[0] = {
-		.start	= DA8XX_USB1_BASE,
-		.end	= DA8XX_USB1_BASE + SZ_4K - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= IRQ_DA8XX_IRQN,
-		.end	= IRQ_DA8XX_IRQN,
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static u64 da8xx_usb11_dma_mask = DMA_BIT_MASK(32);
-
-static struct platform_device da8xx_usb11_device = {
-	.name		= "ohci",
-	.id		= 0,
-	.dev = {
-		.dma_mask		= &da8xx_usb11_dma_mask,
-		.coherent_dma_mask	= DMA_BIT_MASK(32),
-	},
-	.num_resources	= ARRAY_SIZE(da8xx_usb11_resources),
-	.resource	= da8xx_usb11_resources,
-};
-
-int __init da8xx_register_usb11(struct da8xx_ohci_root_hub *pdata)
-{
-	da8xx_usb11_device.dev.platform_data = pdata;
-	return platform_device_register(&da8xx_usb11_device);
-}
-#endif	/* CONFIG_DAVINCI_DA8XX */
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index 0cdaa38..0d420a2 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -88,8 +88,7 @@
 	struct clk *nand, *camera, *i2s0, *i2s1, *crypto, *ac97, *pdma;
 	struct clk *xor0, *xor1, *ge, *gephy;
 
-	tclk = clk_register_fixed_rate(NULL, "tclk", NULL, CLK_IS_ROOT,
-				       dove_tclk);
+	tclk = clk_register_fixed_rate(NULL, "tclk", NULL, 0, dove_tclk);
 
 	usb0 = dove_register_gate("usb0", "tclk", CLOCK_GATING_BIT_USB0);
 	usb1 = dove_register_gate("usb1", "tclk", CLOCK_GATING_BIT_USB1);
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 207fa2c..e65aa7d 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -18,6 +18,7 @@
 	select COMMON_CLK_SAMSUNG
 	select EXYNOS_THERMAL
 	select EXYNOS_PMU
+	select EXYNOS_SROM
 	select HAVE_ARM_SCU if SMP
 	select HAVE_S3C2410_I2C if I2C
 	select HAVE_S3C2410_WATCHDOG if WATCHDOG
@@ -26,11 +27,13 @@
 	select PINCTRL_EXYNOS
 	select PM_GENERIC_DOMAINS if PM
 	select S5P_DEV_MFC
+	select SAMSUNG_MC
 	select SOC_SAMSUNG
 	select SRAM
 	select THERMAL
 	select THERMAL_OF
 	select MFD_SYSCON
+	select MEMORY
 	select CLKSRC_EXYNOS_MCT
 	select POWER_RESET
 	select POWER_RESET_SYSCON
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index bbf51a4..52ccf24 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -31,11 +31,6 @@
 
 static struct map_desc exynos4_iodesc[] __initdata = {
 	{
-		.virtual	= (unsigned long)S5P_VA_SROMC,
-		.pfn		= __phys_to_pfn(EXYNOS4_PA_SROMC),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE,
-	}, {
 		.virtual	= (unsigned long)S5P_VA_CMU,
 		.pfn		= __phys_to_pfn(EXYNOS4_PA_CMU),
 		.length		= SZ_128K,
@@ -58,15 +53,6 @@
 	},
 };
 
-static struct map_desc exynos5_iodesc[] __initdata = {
-	{
-		.virtual	= (unsigned long)S5P_VA_SROMC,
-		.pfn		= __phys_to_pfn(EXYNOS5_PA_SROMC),
-		.length		= SZ_4K,
-		.type		= MT_DEVICE,
-	},
-};
-
 static struct platform_device exynos_cpuidle = {
 	.name              = "exynos_cpuidle",
 #ifdef CONFIG_ARM_EXYNOS_CPUIDLE
@@ -138,9 +124,6 @@
 {
 	if (soc_is_exynos4())
 		iotable_init(exynos4_iodesc, ARRAY_SIZE(exynos4_iodesc));
-
-	if (soc_is_exynos5())
-		iotable_init(exynos5_iodesc, ARRAY_SIZE(exynos5_iodesc));
 }
 
 static void __init exynos_init_io(void)
@@ -213,33 +196,6 @@
 	exynos_map_pmu();
 }
 
-static const struct of_device_id exynos_cpufreq_matches[] = {
-	{ .compatible = "samsung,exynos3250", .data = "cpufreq-dt" },
-	{ .compatible = "samsung,exynos4210", .data = "cpufreq-dt" },
-	{ .compatible = "samsung,exynos4212", .data = "cpufreq-dt" },
-	{ .compatible = "samsung,exynos4412", .data = "cpufreq-dt" },
-	{ .compatible = "samsung,exynos5250", .data = "cpufreq-dt" },
-#ifndef CONFIG_BL_SWITCHER
-	{ .compatible = "samsung,exynos5420", .data = "cpufreq-dt" },
-	{ .compatible = "samsung,exynos5800", .data = "cpufreq-dt" },
-#endif
-	{ /* sentinel */ }
-};
-
-static void __init exynos_cpufreq_init(void)
-{
-	struct device_node *root = of_find_node_by_path("/");
-	const struct of_device_id *match;
-
-	match = of_match_node(exynos_cpufreq_matches, root);
-	if (!match) {
-		platform_device_register_simple("exynos-cpufreq", -1, NULL, 0);
-		return;
-	}
-
-	platform_device_register_simple(match->data, -1, NULL, 0);
-}
-
 static void __init exynos_dt_machine_init(void)
 {
 	/*
@@ -262,8 +218,6 @@
 	    of_machine_is_compatible("samsung,exynos5250"))
 		platform_device_register(&exynos_cpuidle);
 
-	exynos_cpufreq_init();
-
 	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 
diff --git a/arch/arm/mach-exynos/include/mach/map.h b/arch/arm/mach-exynos/include/mach/map.h
index c88325d..c48ba4f 100644
--- a/arch/arm/mach-exynos/include/mach/map.h
+++ b/arch/arm/mach-exynos/include/mach/map.h
@@ -25,7 +25,4 @@
 
 #define EXYNOS4_PA_COREPERI		0x10500000
 
-#define EXYNOS4_PA_SROMC		0x12570000
-#define EXYNOS5_PA_SROMC		0x12250000
-
 #endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 7c21760..875a2ba 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -92,7 +92,7 @@
 			if (IS_ERR(pd->clk[i]))
 				break;
 
-			if (IS_ERR(pd->clk[i]))
+			if (IS_ERR(pd->pclk[i]))
 				continue; /* Skip on first power up */
 			if (clk_set_parent(pd->clk[i], pd->pclk[i]))
 				pr_err("%s: error setting parent to clock%d\n",
diff --git a/arch/arm/mach-exynos/regs-srom.h b/arch/arm/mach-exynos/regs-srom.h
deleted file mode 100644
index 5c4d442..0000000
--- a/arch/arm/mach-exynos/regs-srom.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com
- *
- * S5P SROMC register definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __PLAT_SAMSUNG_REGS_SROM_H
-#define __PLAT_SAMSUNG_REGS_SROM_H __FILE__
-
-#include <mach/map.h>
-
-#define S5P_SROMREG(x)		(S5P_VA_SROMC + (x))
-
-#define S5P_SROM_BW		S5P_SROMREG(0x0)
-#define S5P_SROM_BC0		S5P_SROMREG(0x4)
-#define S5P_SROM_BC1		S5P_SROMREG(0x8)
-#define S5P_SROM_BC2		S5P_SROMREG(0xc)
-#define S5P_SROM_BC3		S5P_SROMREG(0x10)
-#define S5P_SROM_BC4		S5P_SROMREG(0x14)
-#define S5P_SROM_BC5		S5P_SROMREG(0x18)
-
-/* one register BW holds 4 x 4-bit packed settings for NCS0 - NCS3 */
-
-#define S5P_SROM_BW__DATAWIDTH__SHIFT		0
-#define S5P_SROM_BW__ADDRMODE__SHIFT		1
-#define S5P_SROM_BW__WAITENABLE__SHIFT		2
-#define S5P_SROM_BW__BYTEENABLE__SHIFT		3
-
-#define S5P_SROM_BW__CS_MASK			0xf
-
-#define S5P_SROM_BW__NCS0__SHIFT		0
-#define S5P_SROM_BW__NCS1__SHIFT		4
-#define S5P_SROM_BW__NCS2__SHIFT		8
-#define S5P_SROM_BW__NCS3__SHIFT		12
-#define S5P_SROM_BW__NCS4__SHIFT		16
-#define S5P_SROM_BW__NCS5__SHIFT		20
-
-/* applies to same to BCS0 - BCS3 */
-
-#define S5P_SROM_BCX__PMC__SHIFT		0
-#define S5P_SROM_BCX__TACP__SHIFT		4
-#define S5P_SROM_BCX__TCAH__SHIFT		8
-#define S5P_SROM_BCX__TCOH__SHIFT		12
-#define S5P_SROM_BCX__TACC__SHIFT		16
-#define S5P_SROM_BCX__TCOS__SHIFT		24
-#define S5P_SROM_BCX__TACS__SHIFT		28
-
-#endif /* __PLAT_SAMSUNG_REGS_SROM_H */
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index fee2b00..f216909 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -34,10 +34,11 @@
 #include <asm/smp_scu.h>
 #include <asm/suspend.h>
 
+#include <mach/map.h>
+
 #include <plat/pm-common.h>
 
 #include "common.h"
-#include "regs-srom.h"
 
 #define REG_TABLE_END (-1U)
 
@@ -53,15 +54,6 @@
 	u32 mask;
 };
 
-static struct sleep_save exynos_core_save[] = {
-	/* SROM side */
-	SAVE_ITEM(S5P_SROM_BW),
-	SAVE_ITEM(S5P_SROM_BC0),
-	SAVE_ITEM(S5P_SROM_BC1),
-	SAVE_ITEM(S5P_SROM_BC2),
-	SAVE_ITEM(S5P_SROM_BC3),
-};
-
 struct exynos_pm_data {
 	const struct exynos_wkup_irq *wkup_irq;
 	unsigned int wake_disable_mask;
@@ -343,8 +335,6 @@
 	/* Set wake-up mask registers */
 	exynos_pm_set_wakeup_mask();
 
-	s3c_pm_do_save(exynos_core_save, ARRAY_SIZE(exynos_core_save));
-
 	exynos_pm_enter_sleep_mode();
 
 	/* ensure at least INFORM0 has the resume address */
@@ -375,8 +365,6 @@
 	/* Set wake-up mask registers */
 	exynos_pm_set_wakeup_mask();
 
-	s3c_pm_do_save(exynos_core_save, ARRAY_SIZE(exynos_core_save));
-
 	exynos_pmu_spare3 = pmu_raw_readl(S5P_PMU_SPARE3);
 	/*
 	 * The cpu state needs to be saved and restored so that the
@@ -467,8 +455,6 @@
 	/* For release retention */
 	exynos_pm_release_retention();
 
-	s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
-
 	if (cpuid == ARM_CPU_PART_CORTEX_A9)
 		scu_enable(S5P_VA_SCU);
 
@@ -535,8 +521,6 @@
 
 	pmu_raw_writel(exynos_pmu_spare3, S5P_PMU_SPARE3);
 
-	s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
-
 early_wakeup:
 
 	tmp = pmu_raw_readl(EXYNOS5420_SFR_AXI_CGDIS1);
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 8973fae..dd905b9 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -526,7 +526,7 @@
 	bool "i.MX6 Quad/DualLite support"
 	select ARM_ERRATA_764369 if SMP
 	select HAVE_ARM_SCU if SMP
-	select HAVE_ARM_TWD if SMP
+	select HAVE_ARM_TWD
 	select PCI_DOMAINS if PCI
 	select PINCTRL_IMX6Q
 	select SOC_IMX6
diff --git a/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c b/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c
index a5edd7d..3d039ef 100644
--- a/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c
+++ b/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c
@@ -71,6 +71,7 @@
 	if (!pdata)
 		pdata = &default_esdhc_pdata;
 
-	return imx_add_platform_device(data->devid, data->id, res,
-			ARRAY_SIZE(res), pdata, sizeof(*pdata));
+	return imx_add_platform_device_dmamask(data->devid, data->id, res,
+			ARRAY_SIZE(res), pdata, sizeof(*pdata),
+			DMA_BIT_MASK(32));
 }
diff --git a/arch/arm/mach-imx/imx27-dt.c b/arch/arm/mach-imx/imx27-dt.c
index bd42d1b..530a728 100644
--- a/arch/arm/mach-imx/imx27-dt.c
+++ b/arch/arm/mach-imx/imx27-dt.c
@@ -18,15 +18,6 @@
 #include "common.h"
 #include "mx27.h"
 
-static void __init imx27_dt_init(void)
-{
-	struct platform_device_info devinfo = { .name = "cpufreq-dt", };
-
-	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-
-	platform_device_register_full(&devinfo);
-}
-
 static const char * const imx27_dt_board_compat[] __initconst = {
 	"fsl,imx27",
 	NULL
@@ -36,6 +27,5 @@
 	.map_io		= mx27_map_io,
 	.init_early	= imx27_init_early,
 	.init_irq	= mx27_init_irq,
-	.init_machine	= imx27_dt_init,
 	.dt_compat	= imx27_dt_board_compat,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx51.c b/arch/arm/mach-imx/mach-imx51.c
index 6883fba..10a82a4 100644
--- a/arch/arm/mach-imx/mach-imx51.c
+++ b/arch/arm/mach-imx/mach-imx51.c
@@ -50,13 +50,10 @@
 
 static void __init imx51_dt_init(void)
 {
-	struct platform_device_info devinfo = { .name = "cpufreq-dt", };
-
 	imx51_ipu_mipi_setup();
 	imx_src_init();
 
 	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-	platform_device_register_full(&devinfo);
 }
 
 static void __init imx51_init_late(void)
diff --git a/arch/arm/mach-imx/mach-imx53.c b/arch/arm/mach-imx/mach-imx53.c
index 86316a9..18b5c5c13 100644
--- a/arch/arm/mach-imx/mach-imx53.c
+++ b/arch/arm/mach-imx/mach-imx53.c
@@ -40,8 +40,6 @@
 static void __init imx53_init_late(void)
 {
 	imx53_pm_init();
-
-	platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
 }
 
 static const char * const imx53_dt_board_compat[] __initconst = {
diff --git a/arch/arm/mach-imx/mach-imx7d.c b/arch/arm/mach-imx/mach-imx7d.c
index 5a27f20..b450f52 100644
--- a/arch/arm/mach-imx/mach-imx7d.c
+++ b/arch/arm/mach-imx/mach-imx7d.c
@@ -105,11 +105,6 @@
 	irqchip_init();
 }
 
-static void __init imx7d_init_late(void)
-{
-	platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
-}
-
 static const char *const imx7d_dt_compat[] __initconst = {
 	"fsl,imx7d",
 	NULL,
@@ -117,7 +112,6 @@
 
 DT_MACHINE_START(IMX7D, "Freescale i.MX7 Dual (Device Tree)")
 	.init_irq	= imx7d_init_irq,
-	.init_late	= imx7d_init_late,
 	.init_machine	= imx7d_init_machine,
 	.dt_compat	= imx7d_dt_compat,
 MACHINE_END
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index 5b0e363..2b118f2 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -29,7 +29,6 @@
 #include <linux/amba/kmi.h>
 #include <linux/io.h>
 #include <linux/irqchip.h>
-#include <linux/mtd/physmap.h>
 #include <linux/platform_data/clk-integrator.h>
 #include <linux/of_irq.h>
 #include <linux/of_address.h>
@@ -147,65 +146,6 @@
 device_initcall(irq_syscore_init);
 
 /*
- * Flash handling.
- */
-static int ap_flash_init(struct platform_device *dev)
-{
-	u32 tmp;
-
-	writel(INTEGRATOR_SC_CTRL_nFLVPPEN | INTEGRATOR_SC_CTRL_nFLWP,
-	       ap_syscon_base + INTEGRATOR_SC_CTRLC_OFFSET);
-
-	tmp = readl(ebi_base + INTEGRATOR_EBI_CSR1_OFFSET) |
-		INTEGRATOR_EBI_WRITE_ENABLE;
-	writel(tmp, ebi_base + INTEGRATOR_EBI_CSR1_OFFSET);
-
-	if (!(readl(ebi_base + INTEGRATOR_EBI_CSR1_OFFSET)
-	      & INTEGRATOR_EBI_WRITE_ENABLE)) {
-		writel(0xa05f, ebi_base + INTEGRATOR_EBI_LOCK_OFFSET);
-		writel(tmp, ebi_base + INTEGRATOR_EBI_CSR1_OFFSET);
-		writel(0, ebi_base + INTEGRATOR_EBI_LOCK_OFFSET);
-	}
-	return 0;
-}
-
-static void ap_flash_exit(struct platform_device *dev)
-{
-	u32 tmp;
-
-	writel(INTEGRATOR_SC_CTRL_nFLVPPEN | INTEGRATOR_SC_CTRL_nFLWP,
-	       ap_syscon_base + INTEGRATOR_SC_CTRLC_OFFSET);
-
-	tmp = readl(ebi_base + INTEGRATOR_EBI_CSR1_OFFSET) &
-		~INTEGRATOR_EBI_WRITE_ENABLE;
-	writel(tmp, ebi_base + INTEGRATOR_EBI_CSR1_OFFSET);
-
-	if (readl(ebi_base + INTEGRATOR_EBI_CSR1_OFFSET) &
-	    INTEGRATOR_EBI_WRITE_ENABLE) {
-		writel(0xa05f, ebi_base + INTEGRATOR_EBI_LOCK_OFFSET);
-		writel(tmp, ebi_base + INTEGRATOR_EBI_CSR1_OFFSET);
-		writel(0, ebi_base + INTEGRATOR_EBI_LOCK_OFFSET);
-	}
-}
-
-static void ap_flash_set_vpp(struct platform_device *pdev, int on)
-{
-	if (on)
-		writel(INTEGRATOR_SC_CTRL_nFLVPPEN,
-		       ap_syscon_base + INTEGRATOR_SC_CTRLS_OFFSET);
-	else
-		writel(INTEGRATOR_SC_CTRL_nFLVPPEN,
-		       ap_syscon_base + INTEGRATOR_SC_CTRLC_OFFSET);
-}
-
-static struct physmap_flash_data ap_flash_data = {
-	.width		= 4,
-	.init		= ap_flash_init,
-	.exit		= ap_flash_exit,
-	.set_vpp	= ap_flash_set_vpp,
-};
-
-/*
  * For the PL010 found in the Integrator/AP some of the UART control is
  * implemented in the system controller and accessed using a callback
  * from the driver.
@@ -266,8 +206,6 @@
 		"kmi0", NULL),
 	OF_DEV_AUXDATA("arm,primecell", KMI1_BASE,
 		"kmi1", NULL),
-	OF_DEV_AUXDATA("cfi-flash", INTEGRATOR_FLASH_BASE,
-		"physmap-flash", &ap_flash_data),
 	{ /* sentinel */ },
 };
 
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index b5fb71a..6f6b051 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -23,7 +23,6 @@
 #include <linux/io.h>
 #include <linux/irqchip.h>
 #include <linux/gfp.h>
-#include <linux/mtd/physmap.h>
 #include <linux/of_irq.h>
 #include <linux/of_address.h>
 #include <linux/of_platform.h>
@@ -43,14 +42,8 @@
 /* Base address to the CP controller */
 static void __iomem *intcp_con_base;
 
-#define INTCP_PA_FLASH_BASE		0x24000000
-
 #define INTCP_PA_CLCD_BASE		0xc0000000
 
-#define INTCP_FLASHPROG			0x04
-#define CINTEGRATOR_FLASHPROG_FLVPPEN	(1 << 0)
-#define CINTEGRATOR_FLASHPROG_FLWREN	(1 << 1)
-
 /*
  * Logical      Physical
  * f1000000	10000000	Core module registers
@@ -108,48 +101,6 @@
 }
 
 /*
- * Flash handling.
- */
-static int intcp_flash_init(struct platform_device *dev)
-{
-	u32 val;
-
-	val = readl(intcp_con_base + INTCP_FLASHPROG);
-	val |= CINTEGRATOR_FLASHPROG_FLWREN;
-	writel(val, intcp_con_base + INTCP_FLASHPROG);
-
-	return 0;
-}
-
-static void intcp_flash_exit(struct platform_device *dev)
-{
-	u32 val;
-
-	val = readl(intcp_con_base + INTCP_FLASHPROG);
-	val &= ~(CINTEGRATOR_FLASHPROG_FLVPPEN|CINTEGRATOR_FLASHPROG_FLWREN);
-	writel(val, intcp_con_base + INTCP_FLASHPROG);
-}
-
-static void intcp_flash_set_vpp(struct platform_device *pdev, int on)
-{
-	u32 val;
-
-	val = readl(intcp_con_base + INTCP_FLASHPROG);
-	if (on)
-		val |= CINTEGRATOR_FLASHPROG_FLVPPEN;
-	else
-		val &= ~CINTEGRATOR_FLASHPROG_FLVPPEN;
-	writel(val, intcp_con_base + INTCP_FLASHPROG);
-}
-
-static struct physmap_flash_data intcp_flash_data = {
-	.width		= 4,
-	.init		= intcp_flash_init,
-	.exit		= intcp_flash_exit,
-	.set_vpp	= intcp_flash_set_vpp,
-};
-
-/*
  * It seems that the card insertion interrupt remains active after
  * we've acknowledged it.  We therefore ignore the interrupt, and
  * rely on reading it from the SIC.  This also means that we must
@@ -260,8 +211,6 @@
 		"aaci", &mmc_data),
 	OF_DEV_AUXDATA("arm,primecell", INTCP_PA_CLCD_BASE,
 		"clcd", &clcd_data),
-	OF_DEV_AUXDATA("cfi-flash", INTCP_PA_FLASH_BASE,
-		"physmap-flash", &intcp_flash_data),
 	{ /* sentinel */ },
 };
 
diff --git a/arch/arm/mach-lpc32xx/common.c b/arch/arm/mach-lpc32xx/common.c
index 5b7a1e7..2f6067b 100644
--- a/arch/arm/mach-lpc32xx/common.c
+++ b/arch/arm/mach-lpc32xx/common.c
@@ -17,13 +17,6 @@
  */
 
 #include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/err.h>
-#include <linux/i2c.h>
-#include <linux/i2c-pnx.h>
-#include <linux/io.h>
 
 #include <asm/mach/map.h>
 #include <asm/system_info.h>
@@ -44,19 +37,6 @@
 }
 
 /*
- * Returns SYSCLK source
- * 0 = PLL397, 1 = main oscillator
- */
-int clk_is_sysclk_mainosc(void)
-{
-	if ((__raw_readl(LPC32XX_CLKPWR_SYSCLK_CTRL) &
-		LPC32XX_CLKPWR_SYSCTRL_SYSCLKMUX) == 0)
-		return 1;
-
-	return 0;
-}
-
-/*
  * Detects and returns IRAM size for the device variation
  */
 #define LPC32XX_IRAM_BANK_SIZE SZ_128K
@@ -87,81 +67,6 @@
 }
 EXPORT_SYMBOL_GPL(lpc32xx_return_iram_size);
 
-/*
- * Computes PLL rate from PLL register and input clock
- */
-u32 clk_check_pll_setup(u32 ifreq, struct clk_pll_setup *pllsetup)
-{
-	u32 ilfreq, p, m, n, fcco, fref, cfreq;
-	int mode;
-
-	/*
-	 * PLL requirements
-	 * ifreq must be >= 1MHz and <= 20MHz
-	 * FCCO must be >= 156MHz and <= 320MHz
-	 * FREF must be >= 1MHz and <= 27MHz
-	 * Assume the passed input data is not valid
-	 */
-
-	ilfreq = ifreq;
-	m = pllsetup->pll_m;
-	n = pllsetup->pll_n;
-	p = pllsetup->pll_p;
-
-	mode = (pllsetup->cco_bypass_b15 << 2) |
-		(pllsetup->direct_output_b14 << 1) |
-	pllsetup->fdbk_div_ctrl_b13;
-
-	switch (mode) {
-	case 0x0: /* Non-integer mode */
-		cfreq = (m * ilfreq) / (2 * p * n);
-		fcco = (m * ilfreq) / n;
-		fref = ilfreq / n;
-		break;
-
-	case 0x1: /* integer mode */
-		cfreq = (m * ilfreq) / n;
-		fcco = (m * ilfreq) / (n * 2 * p);
-		fref = ilfreq / n;
-		break;
-
-	case 0x2:
-	case 0x3: /* Direct mode */
-		cfreq = (m * ilfreq) / n;
-		fcco = cfreq;
-		fref = ilfreq / n;
-		break;
-
-	case 0x4:
-	case 0x5: /* Bypass mode */
-		cfreq = ilfreq / (2 * p);
-		fcco = 156000000;
-		fref = 1000000;
-		break;
-
-	case 0x6:
-	case 0x7: /* Direct bypass mode */
-	default:
-		cfreq = ilfreq;
-		fcco = 156000000;
-		fref = 1000000;
-		break;
-	}
-
-	if (fcco < 156000000 || fcco > 320000000)
-		cfreq = 0;
-
-	if (fref < 1000000 || fref > 27000000)
-		cfreq = 0;
-
-	return (u32) cfreq;
-}
-
-u32 clk_get_pclk_div(void)
-{
-	return 1 + ((__raw_readl(LPC32XX_CLKPWR_HCLK_DIV) >> 2) & 0x1F);
-}
-
 static struct map_desc lpc32xx_io_desc[] __initdata = {
 	{
 		.virtual	= (unsigned long)IO_ADDRESS(LPC32XX_AHB0_START),
diff --git a/arch/arm/mach-lpc32xx/common.h b/arch/arm/mach-lpc32xx/common.h
index 2d90801..30c9e64 100644
--- a/arch/arm/mach-lpc32xx/common.h
+++ b/arch/arm/mach-lpc32xx/common.h
@@ -19,37 +19,15 @@
 #ifndef __LPC32XX_COMMON_H
 #define __LPC32XX_COMMON_H
 
-#include <mach/board.h>
-#include <linux/platform_device.h>
-#include <linux/reboot.h>
+#include <linux/init.h>
 
 /*
  * Other arch specific structures and functions
  */
-extern void lpc32xx_timer_init(void);
 extern void __init lpc32xx_init_irq(void);
 extern void __init lpc32xx_map_io(void);
 extern void __init lpc32xx_serial_init(void);
 
-
-/*
- * Structure used for setting up and querying the PLLS
- */
-struct clk_pll_setup {
-	int analog_on;
-	int cco_bypass_b15;
-	int direct_output_b14;
-	int fdbk_div_ctrl_b13;
-	int pll_p;
-	int pll_n;
-	u32 pll_m;
-};
-
-extern int clk_is_sysclk_mainosc(void);
-extern u32 clk_check_pll_setup(u32 ifreq, struct clk_pll_setup *pllsetup);
-extern u32 clk_get_pllrate_from_reg(u32 inputclk, u32 regval);
-extern u32 clk_get_pclk_div(void);
-
 /*
  * Returns the LPC32xx unique 128-bit chip ID
  */
diff --git a/arch/arm/mach-lpc32xx/phy3250.c b/arch/arm/mach-lpc32xx/phy3250.c
index b2f9e22..81265e8 100644
--- a/arch/arm/mach-lpc32xx/phy3250.c
+++ b/arch/arm/mach-lpc32xx/phy3250.c
@@ -159,7 +159,7 @@
 	.dma_filter = pl08x_filter_id,
 };
 
-static const struct of_dev_auxdata const lpc32xx_auxdata_lookup[] __initconst = {
+static const struct of_dev_auxdata lpc32xx_auxdata_lookup[] __initconst = {
 	OF_DEV_AUXDATA("arm,pl022", 0x20084000, "dev:ssp0", NULL),
 	OF_DEV_AUXDATA("arm,pl022", 0x2008C000, "dev:ssp1", NULL),
 	OF_DEV_AUXDATA("arm,pl110", 0x31040000, "dev:clcd", &lpc32xx_clcd_data),
@@ -206,7 +206,6 @@
 DT_MACHINE_START(LPC32XX_DT, "LPC32XX SoC (Flattened Device Tree)")
 	.atag_offset	= 0x100,
 	.map_io		= lpc32xx_map_io,
-	.init_irq	= lpc32xx_init_irq,
 	.init_machine	= lpc3250_machine_init,
 	.dt_compat	= lpc32xx_dt_compat,
 MACHINE_END
diff --git a/arch/arm/mach-mediatek/Kconfig b/arch/arm/mach-mediatek/Kconfig
index 8ced4ad..70e49d5 100644
--- a/arch/arm/mach-mediatek/Kconfig
+++ b/arch/arm/mach-mediatek/Kconfig
@@ -10,6 +10,10 @@
 
 if ARCH_MEDIATEK
 
+config MACH_MT2701
+	bool "MediaTek MT2701 SoCs support"
+	default ARCH_MEDIATEK
+
 config MACH_MT6589
 	bool "MediaTek MT6589 SoCs support"
 	default ARCH_MEDIATEK
diff --git a/arch/arm/mach-mediatek/mediatek.c b/arch/arm/mach-mediatek/mediatek.c
index 9c2e38d..a6e3c98 100644
--- a/arch/arm/mach-mediatek/mediatek.c
+++ b/arch/arm/mach-mediatek/mediatek.c
@@ -29,6 +29,7 @@
 	void __iomem *gpt_base;
 
 	if (of_machine_is_compatible("mediatek,mt6589") ||
+	    of_machine_is_compatible("mediatek,mt7623") ||
 	    of_machine_is_compatible("mediatek,mt8135") ||
 	    of_machine_is_compatible("mediatek,mt8127")) {
 		/* turn on GPT6 which ungates arch timer clocks */
diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c
index 99cc939..45a0520 100644
--- a/arch/arm/mach-mv78xx0/common.c
+++ b/arch/arm/mach-mv78xx0/common.c
@@ -168,8 +168,7 @@
 
 static void __init clk_init(void)
 {
-	tclk = clk_register_fixed_rate(NULL, "tclk", NULL, CLK_IS_ROOT,
-				       get_tclk());
+	tclk = clk_register_fixed_rate(NULL, "tclk", NULL, 0, get_tclk());
 
 	orion_clkdev_init(tclk);
 }
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
index ed8fda4..b444423 100644
--- a/arch/arm/mach-mvebu/pmsu.c
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -20,7 +20,6 @@
 
 #include <linux/clk.h>
 #include <linux/cpu_pm.h>
-#include <linux/cpufreq-dt.h>
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/io.h>
@@ -29,7 +28,6 @@
 #include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
-#include <linux/pm_opp.h>
 #include <linux/resource.h>
 #include <linux/slab.h>
 #include <linux/smp.h>
@@ -608,86 +606,3 @@
 
 	return 0;
 }
-
-struct cpufreq_dt_platform_data cpufreq_dt_pd = {
-	.independent_clocks = true,
-};
-
-static int __init armada_xp_pmsu_cpufreq_init(void)
-{
-	struct device_node *np;
-	struct resource res;
-	int ret, cpu;
-
-	if (!of_machine_is_compatible("marvell,armadaxp"))
-		return 0;
-
-	/*
-	 * In order to have proper cpufreq handling, we need to ensure
-	 * that the Device Tree description of the CPU clock includes
-	 * the definition of the PMU DFS registers. If not, we do not
-	 * register the clock notifier and the cpufreq driver. This
-	 * piece of code is only for compatibility with old Device
-	 * Trees.
-	 */
-	np = of_find_compatible_node(NULL, NULL, "marvell,armada-xp-cpu-clock");
-	if (!np)
-		return 0;
-
-	ret = of_address_to_resource(np, 1, &res);
-	if (ret) {
-		pr_warn(FW_WARN "not enabling cpufreq, deprecated armada-xp-cpu-clock binding\n");
-		of_node_put(np);
-		return 0;
-	}
-
-	of_node_put(np);
-
-	/*
-	 * For each CPU, this loop registers the operating points
-	 * supported (which are the nominal CPU frequency and half of
-	 * it), and registers the clock notifier that will take care
-	 * of doing the PMSU part of a frequency transition.
-	 */
-	for_each_possible_cpu(cpu) {
-		struct device *cpu_dev;
-		struct clk *clk;
-		int ret;
-
-		cpu_dev = get_cpu_device(cpu);
-		if (!cpu_dev) {
-			pr_err("Cannot get CPU %d\n", cpu);
-			continue;
-		}
-
-		clk = clk_get(cpu_dev, 0);
-		if (IS_ERR(clk)) {
-			pr_err("Cannot get clock for CPU %d\n", cpu);
-			return PTR_ERR(clk);
-		}
-
-		/*
-		 * In case of a failure of dev_pm_opp_add(), we don't
-		 * bother with cleaning up the registered OPP (there's
-		 * no function to do so), and simply cancel the
-		 * registration of the cpufreq device.
-		 */
-		ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk), 0);
-		if (ret) {
-			clk_put(clk);
-			return ret;
-		}
-
-		ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk) / 2, 0);
-		if (ret) {
-			clk_put(clk);
-			return ret;
-		}
-	}
-
-	platform_device_register_data(NULL, "cpufreq-dt", -1,
-				      &cpufreq_dt_pd, sizeof(cpufreq_dt_pd));
-	return 0;
-}
-
-device_initcall(armada_xp_pmsu_cpufreq_init);
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 0ba6a0e..04e276c 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -2,7 +2,7 @@
 # Makefile for the linux kernel.
 #
 
-ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \
+ccflags-y := -I$(srctree)/$(src)/include \
 	-I$(srctree)/arch/arm/plat-omap/include
 
 # Common support
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index da174c0..9a70739 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -30,6 +30,8 @@
 #include <linux/platform_data/spi-omap2-mcspi.h>
 #include <linux/platform_data/mtd-onenand-omap2.h>
 
+#include <plat/dmtimer.h>
+
 #include <asm/system_info.h>
 
 #include "common.h"
@@ -47,9 +49,8 @@
 
 #include <video/omap-panel-data.h>
 
-#if defined(CONFIG_IR_RX51) || defined(CONFIG_IR_RX51_MODULE)
+#include <linux/platform_data/pwm_omap_dmtimer.h>
 #include <linux/platform_data/media/ir-rx51.h>
-#endif
 
 #include "mux.h"
 #include "omap-pm.h"
@@ -1212,10 +1213,40 @@
 				gpio_to_irq(RX51_TSC2005_IRQ_GPIO);
 }
 
+#if IS_ENABLED(CONFIG_OMAP_DM_TIMER)
+static struct pwm_omap_dmtimer_pdata __maybe_unused pwm_dmtimer_pdata = {
+	.request_by_node = omap_dm_timer_request_by_node,
+	.request_specific = omap_dm_timer_request_specific,
+	.request = omap_dm_timer_request,
+	.set_source = omap_dm_timer_set_source,
+	.get_irq = omap_dm_timer_get_irq,
+	.set_int_enable = omap_dm_timer_set_int_enable,
+	.set_int_disable = omap_dm_timer_set_int_disable,
+	.free = omap_dm_timer_free,
+	.enable = omap_dm_timer_enable,
+	.disable = omap_dm_timer_disable,
+	.get_fclk = omap_dm_timer_get_fclk,
+	.start = omap_dm_timer_start,
+	.stop = omap_dm_timer_stop,
+	.set_load = omap_dm_timer_set_load,
+	.set_match = omap_dm_timer_set_match,
+	.set_pwm = omap_dm_timer_set_pwm,
+	.set_prescaler = omap_dm_timer_set_prescaler,
+	.read_counter = omap_dm_timer_read_counter,
+	.write_counter = omap_dm_timer_write_counter,
+	.read_status = omap_dm_timer_read_status,
+	.write_status = omap_dm_timer_write_status,
+};
+#endif
+
 #if defined(CONFIG_IR_RX51) || defined(CONFIG_IR_RX51_MODULE)
 static struct lirc_rx51_platform_data rx51_lirc_data = {
 	.set_max_mpu_wakeup_lat = omap_pm_set_max_mpu_wakeup_lat,
 	.pwm_timer = 9, /* Use GPT 9 for CIR */
+#if IS_ENABLED(CONFIG_OMAP_DM_TIMER)
+	.dmtimer = &pwm_dmtimer_pdata,
+#endif
+
 };
 
 static struct platform_device rx51_lirc_device = {
diff --git a/arch/arm/mach-omap2/clockdomains7xx_data.c b/arch/arm/mach-omap2/clockdomains7xx_data.c
index 7581e03..ef9ed36 100644
--- a/arch/arm/mach-omap2/clockdomains7xx_data.c
+++ b/arch/arm/mach-omap2/clockdomains7xx_data.c
@@ -461,7 +461,7 @@
 	.cm_inst	  = DRA7XX_CM_CORE_AON_IPU_INST,
 	.clkdm_offs	  = DRA7XX_CM_CORE_AON_IPU_IPU_CDOFFS,
 	.dep_bit	  = DRA7XX_IPU_STATDEP_SHIFT,
-	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+	.flags		  = CLKDM_CAN_SWSUP,
 };
 
 static struct clockdomain mpu1_7xx_clkdm = {
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index d85c249..2abd53a 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -669,9 +669,9 @@
 		case 0:
 			omap_revision = DRA722_REV_ES1_0;
 			break;
+		case 1:
 		default:
-			/* If we have no new revisions */
-			omap_revision = DRA722_REV_ES1_0;
+			omap_revision = DRA722_REV_ES2_0;
 			break;
 		}
 		break;
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 3c87e40..49de4dd 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -368,6 +368,7 @@
 void __init dra7xx_map_io(void)
 {
 	iotable_init(dra7xx_io_desc, ARRAY_SIZE(dra7xx_io_desc));
+	omap_barriers_init();
 }
 #endif
 /*
@@ -736,7 +737,8 @@
 #ifdef CONFIG_SOC_DRA7XX
 void __init dra7xx_init_early(void)
 {
-	omap2_set_globals_tap(-1, OMAP2_L4_IO_ADDRESS(DRA7XX_TAP_BASE));
+	omap2_set_globals_tap(DRA7XX_CLASS,
+			      OMAP2_L4_IO_ADDRESS(DRA7XX_TAP_BASE));
 	omap2_set_globals_prcm_mpu(OMAP2_L4_IO_ADDRESS(OMAP54XX_PRCM_MPU_BASE));
 	omap2_control_base_init();
 	omap4_pm_init_early();
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index f397bd6..0c47543 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -274,6 +274,10 @@
  */
 static void irq_save_context(void)
 {
+	/* DRA7 has no SAR to save */
+	if (soc_is_dra7xx())
+		return;
+
 	if (!sar_base)
 		sar_base = omap4_get_sar_ram_base();
 
@@ -290,6 +294,9 @@
 {
 	u32 val;
 	u32 offset = SAR_BACKUP_STATUS_OFFSET;
+	/* DRA7 has no SAR to save */
+	if (soc_is_dra7xx())
+		return;
 
 	if (soc_is_omap54xx())
 		offset = OMAP5_SAR_BACKUP_STATUS_OFFSET;
@@ -320,6 +327,11 @@
 {
 	unsigned int cpu = (unsigned int)hcpu;
 
+	/*
+	 * Corresponding FROZEN transitions do not have to be handled,
+	 * they are handled by at a higher level
+	 * (drivers/cpuidle/coupled.c).
+	 */
 	switch (action) {
 	case CPU_ONLINE:
 		wakeupgen_irqmask_all(cpu, 0);
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index b6d62e4..83cb527 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1416,9 +1416,7 @@
 	    (sf & SYSC_HAS_CLOCKACTIVITY))
 		_set_clockactivity(oh, oh->class->sysc->clockact, &v);
 
-	/* If the cached value is the same as the new value, skip the write */
-	if (oh->_sysc_cache != v)
-		_write_sysconfig(v, oh);
+	_write_sysconfig(v, oh);
 
 	/*
 	 * Set the autoidle bit only after setting the smartidle bit
@@ -1481,7 +1479,9 @@
 		_set_master_standbymode(oh, idlemode, &v);
 	}
 
-	_write_sysconfig(v, oh);
+	/* If the cached value is the same as the new value, skip the write */
+	if (oh->_sysc_cache != v)
+		_write_sysconfig(v, oh);
 }
 
 /**
@@ -2207,15 +2207,15 @@
 
 	pr_debug("omap_hwmod: %s: idling\n", oh->name);
 
+	if (_are_all_hardreset_lines_asserted(oh))
+		return 0;
+
 	if (oh->_state != _HWMOD_STATE_ENABLED) {
 		WARN(1, "omap_hwmod: %s: idle state can only be entered from enabled state\n",
 			oh->name);
 		return -EINVAL;
 	}
 
-	if (_are_all_hardreset_lines_asserted(oh))
-		return 0;
-
 	if (oh->class->sysc)
 		_idle_sysc(oh);
 	_del_initiator_dep(oh, mpu_oh);
@@ -2262,6 +2262,9 @@
 	int ret, i;
 	u8 prev_state;
 
+	if (_are_all_hardreset_lines_asserted(oh))
+		return 0;
+
 	if (oh->_state != _HWMOD_STATE_IDLE &&
 	    oh->_state != _HWMOD_STATE_ENABLED) {
 		WARN(1, "omap_hwmod: %s: disabled state can only be entered from idle, or enabled state\n",
@@ -2269,9 +2272,6 @@
 		return -EINVAL;
 	}
 
-	if (_are_all_hardreset_lines_asserted(oh))
-		return 0;
-
 	pr_debug("omap_hwmod: %s: disabling\n", oh->name);
 
 	if (oh->class->pre_shutdown) {
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index 7c7a311..4041bad 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -754,6 +754,8 @@
  */
 
 extern int omap_hwmod_aess_preprogram(struct omap_hwmod *oh);
+void omap_hwmod_rtc_unlock(struct omap_hwmod *oh);
+void omap_hwmod_rtc_lock(struct omap_hwmod *oh);
 
 /*
  * Chip variant-specific hwmod init routines - XXX should be converted
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
index 907a452b..aed3362 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
@@ -918,6 +918,8 @@
 static struct omap_hwmod_class am33xx_rtc_hwmod_class = {
 	.name		= "rtc",
 	.sysc		= &am33xx_rtc_sysc,
+	.unlock		= &omap_hwmod_rtc_unlock,
+	.lock		= &omap_hwmod_rtc_lock,
 };
 
 struct omap_hwmod am33xx_rtc_hwmod = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 9442d89..d0e7e525 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -383,6 +383,68 @@
 	},
 };
 
+/* pwmss  */
+static struct omap_hwmod_class_sysconfig dra7xx_epwmss_sysc = {
+	.rev_offs	= 0x0,
+	.sysc_offs	= 0x4,
+	.sysc_flags	= SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET,
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+	.sysc_fields	= &omap_hwmod_sysc_type2,
+};
+
+/*
+ * epwmss class
+ */
+static struct omap_hwmod_class dra7xx_epwmss_hwmod_class = {
+	.name		= "epwmss",
+	.sysc		= &dra7xx_epwmss_sysc,
+};
+
+/* epwmss0 */
+static struct omap_hwmod dra7xx_epwmss0_hwmod = {
+	.name		= "epwmss0",
+	.class		= &dra7xx_epwmss_hwmod_class,
+	.clkdm_name	= "l4per2_clkdm",
+	.main_clk	= "l4_root_clk_div",
+	.prcm		= {
+		.omap4	= {
+			.modulemode	= MODULEMODE_SWCTRL,
+			.clkctrl_offs	= DRA7XX_CM_L4PER2_PWMSS1_CLKCTRL_OFFSET,
+			.context_offs	= DRA7XX_RM_L4PER2_PWMSS1_CONTEXT_OFFSET,
+		},
+	},
+};
+
+/* epwmss1 */
+static struct omap_hwmod dra7xx_epwmss1_hwmod = {
+	.name		= "epwmss1",
+	.class		= &dra7xx_epwmss_hwmod_class,
+	.clkdm_name	= "l4per2_clkdm",
+	.main_clk	= "l4_root_clk_div",
+	.prcm		= {
+		.omap4	= {
+			.modulemode	= MODULEMODE_SWCTRL,
+			.clkctrl_offs	= DRA7XX_CM_L4PER2_PWMSS2_CLKCTRL_OFFSET,
+			.context_offs	= DRA7XX_RM_L4PER2_PWMSS2_CONTEXT_OFFSET,
+		},
+	},
+};
+
+/* epwmss2 */
+static struct omap_hwmod dra7xx_epwmss2_hwmod = {
+	.name		= "epwmss2",
+	.class		= &dra7xx_epwmss_hwmod_class,
+	.clkdm_name	= "l4per2_clkdm",
+	.main_clk	= "l4_root_clk_div",
+	.prcm		= {
+		.omap4	= {
+			.modulemode	= MODULEMODE_SWCTRL,
+			.clkctrl_offs	= DRA7XX_CM_L4PER2_PWMSS3_CLKCTRL_OFFSET,
+			.context_offs	= DRA7XX_RM_L4PER2_PWMSS3_CONTEXT_OFFSET,
+		},
+	},
+};
+
 /*
  * 'dma' class
  *
@@ -1374,6 +1436,52 @@
 	.sysc	= &dra7xx_mcasp_sysc,
 };
 
+/* mcasp1 */
+static struct omap_hwmod_opt_clk mcasp1_opt_clks[] = {
+	{ .role = "ahclkx", .clk = "mcasp1_ahclkx_mux" },
+	{ .role = "ahclkr", .clk = "mcasp1_ahclkr_mux" },
+};
+
+static struct omap_hwmod dra7xx_mcasp1_hwmod = {
+	.name		= "mcasp1",
+	.class		= &dra7xx_mcasp_hwmod_class,
+	.clkdm_name	= "ipu_clkdm",
+	.main_clk	= "mcasp1_aux_gfclk_mux",
+	.flags		= HWMOD_OPT_CLKS_NEEDED,
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_IPU_MCASP1_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_IPU_MCASP1_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+	.opt_clks	= mcasp1_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(mcasp1_opt_clks),
+};
+
+/* mcasp2 */
+static struct omap_hwmod_opt_clk mcasp2_opt_clks[] = {
+	{ .role = "ahclkx", .clk = "mcasp2_ahclkx_mux" },
+	{ .role = "ahclkr", .clk = "mcasp2_ahclkr_mux" },
+};
+
+static struct omap_hwmod dra7xx_mcasp2_hwmod = {
+	.name		= "mcasp2",
+	.class		= &dra7xx_mcasp_hwmod_class,
+	.clkdm_name	= "l4per2_clkdm",
+	.main_clk	= "mcasp2_aux_gfclk_mux",
+	.flags		= HWMOD_OPT_CLKS_NEEDED,
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER2_MCASP2_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER2_MCASP2_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+	.opt_clks	= mcasp2_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(mcasp2_opt_clks),
+};
+
 /* mcasp3 */
 static struct omap_hwmod_opt_clk mcasp3_opt_clks[] = {
 	{ .role = "ahclkx", .clk = "mcasp3_ahclkx_mux" },
@@ -1396,6 +1504,116 @@
 	.opt_clks_cnt	= ARRAY_SIZE(mcasp3_opt_clks),
 };
 
+/* mcasp4 */
+static struct omap_hwmod_opt_clk mcasp4_opt_clks[] = {
+	{ .role = "ahclkx", .clk = "mcasp4_ahclkx_mux" },
+};
+
+static struct omap_hwmod dra7xx_mcasp4_hwmod = {
+	.name		= "mcasp4",
+	.class		= &dra7xx_mcasp_hwmod_class,
+	.clkdm_name	= "l4per2_clkdm",
+	.main_clk	= "mcasp4_aux_gfclk_mux",
+	.flags		= HWMOD_OPT_CLKS_NEEDED,
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER2_MCASP4_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER2_MCASP4_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+	.opt_clks	= mcasp4_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(mcasp4_opt_clks),
+};
+
+/* mcasp5 */
+static struct omap_hwmod_opt_clk mcasp5_opt_clks[] = {
+	{ .role = "ahclkx", .clk = "mcasp5_ahclkx_mux" },
+};
+
+static struct omap_hwmod dra7xx_mcasp5_hwmod = {
+	.name		= "mcasp5",
+	.class		= &dra7xx_mcasp_hwmod_class,
+	.clkdm_name	= "l4per2_clkdm",
+	.main_clk	= "mcasp5_aux_gfclk_mux",
+	.flags		= HWMOD_OPT_CLKS_NEEDED,
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER2_MCASP5_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER2_MCASP5_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+	.opt_clks	= mcasp5_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(mcasp5_opt_clks),
+};
+
+/* mcasp6 */
+static struct omap_hwmod_opt_clk mcasp6_opt_clks[] = {
+	{ .role = "ahclkx", .clk = "mcasp6_ahclkx_mux" },
+};
+
+static struct omap_hwmod dra7xx_mcasp6_hwmod = {
+	.name		= "mcasp6",
+	.class		= &dra7xx_mcasp_hwmod_class,
+	.clkdm_name	= "l4per2_clkdm",
+	.main_clk	= "mcasp6_aux_gfclk_mux",
+	.flags		= HWMOD_OPT_CLKS_NEEDED,
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER2_MCASP6_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER2_MCASP6_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+	.opt_clks	= mcasp6_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(mcasp6_opt_clks),
+};
+
+/* mcasp7 */
+static struct omap_hwmod_opt_clk mcasp7_opt_clks[] = {
+	{ .role = "ahclkx", .clk = "mcasp7_ahclkx_mux" },
+};
+
+static struct omap_hwmod dra7xx_mcasp7_hwmod = {
+	.name		= "mcasp7",
+	.class		= &dra7xx_mcasp_hwmod_class,
+	.clkdm_name	= "l4per2_clkdm",
+	.main_clk	= "mcasp7_aux_gfclk_mux",
+	.flags		= HWMOD_OPT_CLKS_NEEDED,
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER2_MCASP7_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER2_MCASP7_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+	.opt_clks	= mcasp7_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(mcasp7_opt_clks),
+};
+
+/* mcasp8 */
+static struct omap_hwmod_opt_clk mcasp8_opt_clks[] = {
+	{ .role = "ahclkx", .clk = "mcasp8_ahclkx_mux" },
+};
+
+static struct omap_hwmod dra7xx_mcasp8_hwmod = {
+	.name		= "mcasp8",
+	.class		= &dra7xx_mcasp_hwmod_class,
+	.clkdm_name	= "l4per2_clkdm",
+	.main_clk	= "mcasp8_aux_gfclk_mux",
+	.flags		= HWMOD_OPT_CLKS_NEEDED,
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4PER2_MCASP8_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4PER2_MCASP8_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+	.opt_clks	= mcasp8_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(mcasp8_opt_clks),
+};
+
 /*
  * 'mmc' class
  *
@@ -1707,6 +1925,8 @@
 static struct omap_hwmod_class dra7xx_rtcss_hwmod_class = {
 	.name	= "rtcss",
 	.sysc	= &dra7xx_rtcss_sysc,
+	.unlock	= &omap_hwmod_rtc_unlock,
+	.lock	= &omap_hwmod_rtc_lock,
 };
 
 /* rtcss */
@@ -2065,6 +2285,20 @@
 	},
 };
 
+/* timer12 */
+static struct omap_hwmod dra7xx_timer12_hwmod = {
+	.name		= "timer12",
+	.class		= &dra7xx_timer_hwmod_class,
+	.clkdm_name	= "wkupaon_clkdm",
+	.main_clk	= "secure_32k_clk_src_ck",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_WKUPAON_TIMER12_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_WKUPAON_TIMER12_CONTEXT_OFFSET,
+		},
+	},
+};
+
 /* timer13 */
 static struct omap_hwmod dra7xx_timer13_hwmod = {
 	.name		= "timer13",
@@ -2726,6 +2960,38 @@
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
+/* l4_per2 -> mcasp1 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per2__mcasp1 = {
+	.master		= &dra7xx_l4_per2_hwmod,
+	.slave		= &dra7xx_mcasp1_hwmod,
+	.clk		= "l4_root_clk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l3_main_1 -> mcasp1 */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__mcasp1 = {
+	.master		= &dra7xx_l3_main_1_hwmod,
+	.slave		= &dra7xx_mcasp1_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per2 -> mcasp2 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per2__mcasp2 = {
+	.master		= &dra7xx_l4_per2_hwmod,
+	.slave		= &dra7xx_mcasp2_hwmod,
+	.clk		= "l4_root_clk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l3_main_1 -> mcasp2 */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__mcasp2 = {
+	.master		= &dra7xx_l3_main_1_hwmod,
+	.slave		= &dra7xx_mcasp2_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
 /* l4_per2 -> mcasp3 */
 static struct omap_hwmod_ocp_if dra7xx_l4_per2__mcasp3 = {
 	.master		= &dra7xx_l4_per2_hwmod,
@@ -2742,6 +3008,46 @@
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
+/* l4_per2 -> mcasp4 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per2__mcasp4 = {
+	.master		= &dra7xx_l4_per2_hwmod,
+	.slave		= &dra7xx_mcasp4_hwmod,
+	.clk		= "l4_root_clk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per2 -> mcasp5 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per2__mcasp5 = {
+	.master		= &dra7xx_l4_per2_hwmod,
+	.slave		= &dra7xx_mcasp5_hwmod,
+	.clk		= "l4_root_clk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per2 -> mcasp6 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per2__mcasp6 = {
+	.master		= &dra7xx_l4_per2_hwmod,
+	.slave		= &dra7xx_mcasp6_hwmod,
+	.clk		= "l4_root_clk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per2 -> mcasp7 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per2__mcasp7 = {
+	.master		= &dra7xx_l4_per2_hwmod,
+	.slave		= &dra7xx_mcasp7_hwmod,
+	.clk		= "l4_root_clk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per2 -> mcasp8 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per2__mcasp8 = {
+	.master		= &dra7xx_l4_per2_hwmod,
+	.slave		= &dra7xx_mcasp8_hwmod,
+	.clk		= "l4_root_clk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
 /* l4_per1 -> elm */
 static struct omap_hwmod_ocp_if dra7xx_l4_per1__elm = {
 	.master		= &dra7xx_l4_per1_hwmod,
@@ -3281,6 +3587,14 @@
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
+/* l4_wkup -> timer12 */
+static struct omap_hwmod_ocp_if dra7xx_l4_wkup__timer12 = {
+	.master		= &dra7xx_l4_wkup_hwmod,
+	.slave		= &dra7xx_timer12_hwmod,
+	.clk		= "wkupaon_iclk_mux",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
 /* l4_per3 -> timer13 */
 static struct omap_hwmod_ocp_if dra7xx_l4_per3__timer13 = {
 	.master		= &dra7xx_l4_per3_hwmod,
@@ -3465,6 +3779,30 @@
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
+/* l4_per2 -> epwmss0 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per2__epwmss0 = {
+	.master		= &dra7xx_l4_per2_hwmod,
+	.slave		= &dra7xx_epwmss0_hwmod,
+	.clk		= "l4_root_clk_div",
+	.user		= OCP_USER_MPU,
+};
+
+/* l4_per2 -> epwmss1 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per2__epwmss1 = {
+	.master		= &dra7xx_l4_per2_hwmod,
+	.slave		= &dra7xx_epwmss1_hwmod,
+	.clk		= "l4_root_clk_div",
+	.user		= OCP_USER_MPU,
+};
+
+/* l4_per2 -> epwmss2 */
+static struct omap_hwmod_ocp_if dra7xx_l4_per2__epwmss2 = {
+	.master		= &dra7xx_l4_per2_hwmod,
+	.slave		= &dra7xx_epwmss2_hwmod,
+	.clk		= "l4_root_clk_div",
+	.user		= OCP_USER_MPU,
+};
+
 static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
 	&dra7xx_l3_main_1__dmm,
 	&dra7xx_l3_main_2__l3_instr,
@@ -3484,8 +3822,17 @@
 	&dra7xx_l4_wkup__dcan1,
 	&dra7xx_l4_per2__dcan2,
 	&dra7xx_l4_per2__cpgmac0,
+	&dra7xx_l4_per2__mcasp1,
+	&dra7xx_l3_main_1__mcasp1,
+	&dra7xx_l4_per2__mcasp2,
+	&dra7xx_l3_main_1__mcasp2,
 	&dra7xx_l4_per2__mcasp3,
 	&dra7xx_l3_main_1__mcasp3,
+	&dra7xx_l4_per2__mcasp4,
+	&dra7xx_l4_per2__mcasp5,
+	&dra7xx_l4_per2__mcasp6,
+	&dra7xx_l4_per2__mcasp7,
+	&dra7xx_l4_per2__mcasp8,
 	&dra7xx_gmac__mdio,
 	&dra7xx_l4_cfg__dma_system,
 	&dra7xx_l3_main_1__tpcc,
@@ -3577,9 +3924,19 @@
 	&dra7xx_l3_main_1__vcp2,
 	&dra7xx_l4_per2__vcp2,
 	&dra7xx_l4_wkup__wd_timer2,
+	&dra7xx_l4_per2__epwmss0,
+	&dra7xx_l4_per2__epwmss1,
+	&dra7xx_l4_per2__epwmss2,
 	NULL,
 };
 
+/* GP-only hwmod links */
+static struct omap_hwmod_ocp_if *dra7xx_gp_hwmod_ocp_ifs[] __initdata = {
+	&dra7xx_l4_wkup__timer12,
+	NULL,
+};
+
+/* SoC variant specific hwmod links */
 static struct omap_hwmod_ocp_if *dra74x_hwmod_ocp_ifs[] __initdata = {
 	&dra7xx_l4_per3__usb_otg_ss4,
 	NULL,
@@ -3597,9 +3954,12 @@
 	ret = omap_hwmod_register_links(dra7xx_hwmod_ocp_ifs);
 
 	if (!ret && soc_is_dra74x())
-		return omap_hwmod_register_links(dra74x_hwmod_ocp_ifs);
+		ret = omap_hwmod_register_links(dra74x_hwmod_ocp_ifs);
 	else if (!ret && soc_is_dra72x())
-		return omap_hwmod_register_links(dra72x_hwmod_ocp_ifs);
+		ret = omap_hwmod_register_links(dra72x_hwmod_ocp_ifs);
+
+	if (!ret && omap_type() == OMAP2_DEVICE_TYPE_GP)
+		ret = omap_hwmod_register_links(dra7xx_gp_hwmod_ocp_ifs);
 
 	return ret;
 }
diff --git a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
index 39736ad..df83277 100644
--- a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
@@ -582,9 +582,11 @@
 	.user		= OCP_USER_MPU,
 };
 
+/* USB needs udelay 1 after reset at least on hp t410, use 2 for margin */
 static struct omap_hwmod_class_sysconfig dm81xx_usbhsotg_sysc = {
 	.rev_offs	= 0x0,
 	.sysc_offs	= 0x10,
+	.srst_udelay	= 2,
 	.sysc_flags	= SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
 				SYSC_HAS_SOFTRESET,
 	.idlemodes	= SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_SMART,
diff --git a/arch/arm/mach-omap2/omap_hwmod_reset.c b/arch/arm/mach-omap2/omap_hwmod_reset.c
index 65e186c..b68f9c0 100644
--- a/arch/arm/mach-omap2/omap_hwmod_reset.c
+++ b/arch/arm/mach-omap2/omap_hwmod_reset.c
@@ -29,6 +29,16 @@
 #include <sound/aess.h>
 
 #include "omap_hwmod.h"
+#include "common.h"
+
+#define OMAP_RTC_STATUS_REG	0x44
+#define OMAP_RTC_KICK0_REG	0x6c
+#define OMAP_RTC_KICK1_REG	0x70
+
+#define OMAP_RTC_KICK0_VALUE	0x83E70B13
+#define OMAP_RTC_KICK1_VALUE	0x95A4F1E0
+#define OMAP_RTC_STATUS_BUSY	BIT(0)
+#define OMAP_RTC_MAX_READY_TIME	50
 
 /**
  * omap_hwmod_aess_preprogram - enable AESS internal autogating
@@ -51,3 +61,58 @@
 
 	return 0;
 }
+
+/**
+ * omap_rtc_wait_not_busy - Wait for the RTC BUSY flag
+ * @oh: struct omap_hwmod *
+ *
+ * For updating certain RTC registers, the MPU must wait
+ * for the BUSY status in OMAP_RTC_STATUS_REG to become zero.
+ * Once the BUSY status is zero, there is a 15 microseconds access
+ * period in which the MPU can program.
+ */
+static void omap_rtc_wait_not_busy(struct omap_hwmod *oh)
+{
+	int i;
+
+	/* BUSY may stay active for 1/32768 second (~30 usec) */
+	omap_test_timeout(omap_hwmod_read(oh, OMAP_RTC_STATUS_REG)
+			  & OMAP_RTC_STATUS_BUSY, OMAP_RTC_MAX_READY_TIME, i);
+	/* now we have ~15 microseconds to read/write various registers */
+}
+
+/**
+ * omap_hwmod_rtc_unlock - Unlock the Kicker mechanism.
+ * @oh: struct omap_hwmod *
+ *
+ * RTC IP have kicker feature. This prevents spurious writes to its registers.
+ * In order to write into any of the RTC registers, KICK values has te be
+ * written in respective KICK registers. This is needed for hwmod to write into
+ * sysconfig register.
+ */
+void omap_hwmod_rtc_unlock(struct omap_hwmod *oh)
+{
+	local_irq_disable();
+	omap_rtc_wait_not_busy(oh);
+	omap_hwmod_write(OMAP_RTC_KICK0_VALUE, oh, OMAP_RTC_KICK0_REG);
+	omap_hwmod_write(OMAP_RTC_KICK1_VALUE, oh, OMAP_RTC_KICK1_REG);
+	local_irq_enable();
+}
+
+/**
+ * omap_hwmod_rtc_lock - Lock the Kicker mechanism.
+ * @oh: struct omap_hwmod *
+ *
+ * RTC IP have kicker feature. This prevents spurious writes to its registers.
+ * Once the RTC registers are written, KICK mechanism needs to be locked,
+ * in order to prevent any spurious writes. This function locks back the RTC
+ * registers once hwmod completes its write into sysconfig register.
+ */
+void omap_hwmod_rtc_lock(struct omap_hwmod *oh)
+{
+	local_irq_disable();
+	omap_rtc_wait_not_busy(oh);
+	omap_hwmod_write(0x0, oh, OMAP_RTC_KICK0_REG);
+	omap_hwmod_write(0x0, oh, OMAP_RTC_KICK1_REG);
+	local_irq_enable();
+}
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index a935d28..6571ad9 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -21,9 +21,11 @@
 #include <linux/regulator/fixed.h>
 
 #include <linux/platform_data/pinctrl-single.h>
+#include <linux/platform_data/hsmmc-omap.h>
 #include <linux/platform_data/iommu-omap.h>
 #include <linux/platform_data/wkup_m3.h>
 #include <linux/platform_data/pwm_omap_dmtimer.h>
+#include <linux/platform_data/media/ir-rx51.h>
 #include <plat/dmtimer.h>
 
 #include "common.h"
@@ -31,10 +33,13 @@
 #include "dss-common.h"
 #include "control.h"
 #include "omap_device.h"
+#include "omap-pm.h"
 #include "omap-secure.h"
 #include "soc.h"
 #include "hsmmc.h"
 
+static struct omap_hsmmc_platform_data __maybe_unused mmc_pdata[2];
+
 struct pdata_init {
 	const char *compatible;
 	void (*fn)(void);
@@ -268,9 +273,13 @@
 	},
 };
 
+static struct platform_device rx51_lirc_device;
+
 static void __init nokia_n900_legacy_init(void)
 {
 	hsmmc2_internal_input_clk();
+	mmc_pdata[0].name = "external";
+	mmc_pdata[1].name = "internal";
 
 	if (omap_type() == OMAP2_DEVICE_TYPE_SEC) {
 		if (IS_ENABLED(CONFIG_ARM_ERRATA_430973)) {
@@ -286,6 +295,8 @@
 		platform_device_register(&omap3_rom_rng_device);
 
 	}
+
+	platform_device_register(&rx51_lirc_device);
 }
 
 static void __init omap3_tao3530_legacy_init(void)
@@ -453,8 +464,14 @@
 
 /* Dual mode timer PWM callbacks platdata */
 #if IS_ENABLED(CONFIG_OMAP_DM_TIMER)
-struct pwm_omap_dmtimer_pdata pwm_dmtimer_pdata = {
+static struct pwm_omap_dmtimer_pdata pwm_dmtimer_pdata = {
 	.request_by_node = omap_dm_timer_request_by_node,
+	.request_specific = omap_dm_timer_request_specific,
+	.request = omap_dm_timer_request,
+	.set_source = omap_dm_timer_set_source,
+	.get_irq = omap_dm_timer_get_irq,
+	.set_int_enable = omap_dm_timer_set_int_enable,
+	.set_int_disable = omap_dm_timer_set_int_disable,
 	.free = omap_dm_timer_free,
 	.enable = omap_dm_timer_enable,
 	.disable = omap_dm_timer_disable,
@@ -465,10 +482,29 @@
 	.set_match = omap_dm_timer_set_match,
 	.set_pwm = omap_dm_timer_set_pwm,
 	.set_prescaler = omap_dm_timer_set_prescaler,
+	.read_counter = omap_dm_timer_read_counter,
 	.write_counter = omap_dm_timer_write_counter,
+	.read_status = omap_dm_timer_read_status,
+	.write_status = omap_dm_timer_write_status,
 };
 #endif
 
+static struct lirc_rx51_platform_data __maybe_unused rx51_lirc_data = {
+	.set_max_mpu_wakeup_lat = omap_pm_set_max_mpu_wakeup_lat,
+	.pwm_timer = 9, /* Use GPT 9 for CIR */
+#if IS_ENABLED(CONFIG_OMAP_DM_TIMER)
+	.dmtimer = &pwm_dmtimer_pdata,
+#endif
+};
+
+static struct platform_device __maybe_unused rx51_lirc_device = {
+	.name           = "lirc_rx51",
+	.id             = -1,
+	.dev            = {
+		.platform_data = &rx51_lirc_data,
+	},
+};
+
 /*
  * Few boards still need auxdata populated before we populate
  * the dev entries in of_platform_populate().
@@ -492,11 +528,10 @@
 	OF_DEV_AUXDATA("tlv320aic3x", 0x18, "2-0018", &n810_aic33_data),
 #endif
 #ifdef CONFIG_ARCH_OMAP3
-	OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002030, "48002030.pinmux", &pcs_pdata),
-	OF_DEV_AUXDATA("ti,omap3-padconf", 0x480025a0, "480025a0.pinmux", &pcs_pdata),
-	OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002a00, "48002a00.pinmux", &pcs_pdata),
 	OF_DEV_AUXDATA("ti,omap2-iommu", 0x5d000000, "5d000000.mmu",
 		       &omap3_iommu_pdata),
+	OF_DEV_AUXDATA("ti,omap3-hsmmc", 0x4809c000, "4809c000.mmc", &mmc_pdata[0]),
+	OF_DEV_AUXDATA("ti,omap3-hsmmc", 0x480b4000, "480b4000.mmc", &mmc_pdata[1]),
 	/* Only on am3517 */
 	OF_DEV_AUXDATA("ti,davinci_mdio", 0x5c030000, "davinci_mdio.0", NULL),
 	OF_DEV_AUXDATA("ti,am3517-emac", 0x5c000000, "davinci_emac.0",
@@ -506,19 +541,7 @@
 	OF_DEV_AUXDATA("ti,am3352-wkup-m3", 0x44d00000, "44d00000.wkup_m3",
 		       &wkup_m3_data),
 #endif
-#ifdef CONFIG_ARCH_OMAP4
-	OF_DEV_AUXDATA("ti,omap4-padconf", 0x4a100040, "4a100040.pinmux", &pcs_pdata),
-	OF_DEV_AUXDATA("ti,omap4-padconf", 0x4a31e040, "4a31e040.pinmux", &pcs_pdata),
-#endif
-#ifdef CONFIG_SOC_OMAP5
-	OF_DEV_AUXDATA("ti,omap5-padconf", 0x4a002840, "4a002840.pinmux", &pcs_pdata),
-	OF_DEV_AUXDATA("ti,omap5-padconf", 0x4ae0c840, "4ae0c840.pinmux", &pcs_pdata),
-#endif
-#ifdef CONFIG_SOC_DRA7XX
-	OF_DEV_AUXDATA("ti,dra7-padconf", 0x4a003400, "4a003400.pinmux", &pcs_pdata),
-#endif
 #ifdef CONFIG_SOC_AM43XX
-	OF_DEV_AUXDATA("ti,am437-padconf", 0x44e10800, "44e10800.pinmux", &pcs_pdata),
 	OF_DEV_AUXDATA("ti,am4372-wkup-m3", 0x44d00000, "44d00000.wkup_m3",
 		       &wkup_m3_data),
 #endif
@@ -531,6 +554,8 @@
 	OF_DEV_AUXDATA("ti,omap4-iommu", 0x55082000, "55082000.mmu",
 		       &omap4_iommu_pdata),
 #endif
+	/* Common auxdata */
+	OF_DEV_AUXDATA("pinctrl-single", 0, NULL, &pcs_pdata),
 	{ /* sentinel */ },
 };
 
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 58920bc..2f7b11d 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -277,13 +277,10 @@
 
 static inline void omap_init_cpufreq(void)
 {
-	struct platform_device_info devinfo = { };
+	struct platform_device_info devinfo = { .name = "omap-cpufreq" };
 
 	if (!of_have_populated_dt())
-		devinfo.name = "omap-cpufreq";
-	else
-		devinfo.name = "cpufreq-dt";
-	platform_device_register_full(&devinfo);
+		platform_device_register_full(&devinfo);
 }
 
 static int __init omap2_common_pm_init(void)
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 2dbd378..d44e0e2 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -198,7 +198,6 @@
 	int per_next_state = PWRDM_POWER_ON;
 	int core_next_state = PWRDM_POWER_ON;
 	int per_going_off;
-	int core_prev_state;
 	u32 sdrc_pwr = 0;
 
 	mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
@@ -278,16 +277,20 @@
 		sdrc_write_reg(sdrc_pwr, SDRC_POWER);
 
 	/* CORE */
-	if (core_next_state < PWRDM_POWER_ON) {
-		core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm);
-		if (core_prev_state == PWRDM_POWER_OFF) {
-			omap3_core_restore_context();
-			omap3_cm_restore_context();
-			omap3_sram_restore_context();
-			omap2_sms_restore_context();
-		}
+	if (core_next_state < PWRDM_POWER_ON &&
+	    pwrdm_read_prev_pwrst(core_pwrdm) == PWRDM_POWER_OFF) {
+		omap3_core_restore_context();
+		omap3_cm_restore_context();
+		omap3_sram_restore_context();
+		omap2_sms_restore_context();
+	} else {
+		/*
+		 * In off-mode resume path above, omap3_core_restore_context
+		 * also handles the INTC autoidle restore done here so limit
+		 * this to non-off mode resume paths so we don't do it twice.
+		 */
+		omap3_intc_resume_idle();
 	}
-	omap3_intc_resume_idle();
 
 	pwrdm_post_transition(NULL);
 
diff --git a/arch/arm/mach-omap2/powerdomains7xx_data.c b/arch/arm/mach-omap2/powerdomains7xx_data.c
index 287a203..0ec2d00 100644
--- a/arch/arm/mach-omap2/powerdomains7xx_data.c
+++ b/arch/arm/mach-omap2/powerdomains7xx_data.c
@@ -35,7 +35,7 @@
 	.name		  = "iva_pwrdm",
 	.prcm_offs	  = DRA7XX_PRM_IVA_INST,
 	.prcm_partition	  = DRA7XX_PRM_PARTITION,
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts		  = PWRSTS_OFF_ON,
 	.pwrsts_logic_ret = PWRSTS_OFF,
 	.banks		  = 4,
 	.pwrsts_mem_ret	= {
@@ -45,10 +45,10 @@
 		[3] = PWRSTS_OFF_RET,	/* tcm2_mem */
 	},
 	.pwrsts_mem_on	= {
-		[0] = PWRSTS_OFF_RET,	/* hwa_mem */
-		[1] = PWRSTS_OFF_RET,	/* sl2_mem */
-		[2] = PWRSTS_OFF_RET,	/* tcm1_mem */
-		[3] = PWRSTS_OFF_RET,	/* tcm2_mem */
+		[0] = PWRSTS_ON,	/* hwa_mem */
+		[1] = PWRSTS_ON,	/* sl2_mem */
+		[2] = PWRSTS_ON,	/* tcm1_mem */
+		[3] = PWRSTS_ON,	/* tcm2_mem */
 	},
 	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
@@ -75,7 +75,7 @@
 	.name		  = "ipu_pwrdm",
 	.prcm_offs	  = DRA7XX_PRM_IPU_INST,
 	.prcm_partition	  = DRA7XX_PRM_PARTITION,
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts		  = PWRSTS_OFF_ON,
 	.pwrsts_logic_ret = PWRSTS_OFF,
 	.banks		  = 2,
 	.pwrsts_mem_ret	= {
@@ -83,8 +83,8 @@
 		[1] = PWRSTS_OFF_RET,	/* periphmem */
 	},
 	.pwrsts_mem_on	= {
-		[0] = PWRSTS_OFF_RET,	/* aessmem */
-		[1] = PWRSTS_OFF_RET,	/* periphmem */
+		[0] = PWRSTS_ON,	/* aessmem */
+		[1] = PWRSTS_ON,	/* periphmem */
 	},
 	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
@@ -94,14 +94,14 @@
 	.name		  = "dss_pwrdm",
 	.prcm_offs	  = DRA7XX_PRM_DSS_INST,
 	.prcm_partition	  = DRA7XX_PRM_PARTITION,
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts		  = PWRSTS_OFF_ON,
 	.pwrsts_logic_ret = PWRSTS_OFF,
 	.banks		  = 1,
 	.pwrsts_mem_ret	= {
 		[0] = PWRSTS_OFF_RET,	/* dss_mem */
 	},
 	.pwrsts_mem_on	= {
-		[0] = PWRSTS_OFF_RET,	/* dss_mem */
+		[0] = PWRSTS_ON,	/* dss_mem */
 	},
 	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
@@ -112,15 +112,15 @@
 	.prcm_offs	  = DRA7XX_PRM_L4PER_INST,
 	.prcm_partition	  = DRA7XX_PRM_PARTITION,
 	.pwrsts		  = PWRSTS_RET_ON,
-	.pwrsts_logic_ret = PWRSTS_OFF_RET,
+	.pwrsts_logic_ret = PWRSTS_RET,
 	.banks		  = 2,
 	.pwrsts_mem_ret	= {
 		[0] = PWRSTS_OFF_RET,	/* nonretained_bank */
 		[1] = PWRSTS_OFF_RET,	/* retained_bank */
 	},
 	.pwrsts_mem_on	= {
-		[0] = PWRSTS_OFF_RET,	/* nonretained_bank */
-		[1] = PWRSTS_OFF_RET,	/* retained_bank */
+		[0] = PWRSTS_ON,	/* nonretained_bank */
+		[1] = PWRSTS_ON,	/* retained_bank */
 	},
 	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
@@ -136,7 +136,7 @@
 		[0] = PWRSTS_OFF_RET,	/* gpu_mem */
 	},
 	.pwrsts_mem_on	= {
-		[0] = PWRSTS_OFF_RET,	/* gpu_mem */
+		[0] = PWRSTS_ON,	/* gpu_mem */
 	},
 	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
@@ -160,7 +160,7 @@
 	.name		  = "core_pwrdm",
 	.prcm_offs	  = DRA7XX_PRM_CORE_INST,
 	.prcm_partition	  = DRA7XX_PRM_PARTITION,
-	.pwrsts		  = PWRSTS_INA_ON,
+	.pwrsts		  = PWRSTS_ON,
 	.pwrsts_logic_ret = PWRSTS_RET,
 	.banks		  = 5,
 	.pwrsts_mem_ret	= {
@@ -171,11 +171,11 @@
 		[4] = PWRSTS_OFF_RET,	/* ipu_unicache */
 	},
 	.pwrsts_mem_on	= {
-		[0] = PWRSTS_OFF_RET,	/* core_nret_bank */
-		[1] = PWRSTS_OFF_RET,	/* core_ocmram */
-		[2] = PWRSTS_OFF_RET,	/* core_other_bank */
-		[3] = PWRSTS_OFF_RET,	/* ipu_l2ram */
-		[4] = PWRSTS_OFF_RET,	/* ipu_unicache */
+		[0] = PWRSTS_ON,	/* core_nret_bank */
+		[1] = PWRSTS_ON,	/* core_ocmram */
+		[2] = PWRSTS_ON,	/* core_other_bank */
+		[3] = PWRSTS_ON,	/* ipu_l2ram */
+		[4] = PWRSTS_ON,	/* ipu_unicache */
 	},
 	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
@@ -225,14 +225,14 @@
 	.name		  = "vpe_pwrdm",
 	.prcm_offs	  = DRA7XX_PRM_VPE_INST,
 	.prcm_partition	  = DRA7XX_PRM_PARTITION,
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
-	.pwrsts_logic_ret = PWRSTS_OFF_RET,
+	.pwrsts		  = PWRSTS_OFF_ON,
+	.pwrsts_logic_ret = PWRSTS_OFF,
 	.banks		  = 1,
 	.pwrsts_mem_ret	= {
 		[0] = PWRSTS_OFF_RET,	/* vpe_bank */
 	},
 	.pwrsts_mem_on	= {
-		[0] = PWRSTS_OFF_RET,	/* vpe_bank */
+		[0] = PWRSTS_ON,	/* vpe_bank */
 	},
 	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
@@ -250,8 +250,8 @@
 		[1] = PWRSTS_RET,	/* mpu_ram */
 	},
 	.pwrsts_mem_on	= {
-		[0] = PWRSTS_OFF_RET,	/* mpu_l2 */
-		[1] = PWRSTS_OFF_RET,	/* mpu_ram */
+		[0] = PWRSTS_ON,	/* mpu_l2 */
+		[1] = PWRSTS_ON,	/* mpu_ram */
 	},
 };
 
@@ -261,7 +261,7 @@
 	.prcm_offs	  = DRA7XX_PRM_L3INIT_INST,
 	.prcm_partition	  = DRA7XX_PRM_PARTITION,
 	.pwrsts		  = PWRSTS_RET_ON,
-	.pwrsts_logic_ret = PWRSTS_OFF_RET,
+	.pwrsts_logic_ret = PWRSTS_RET,
 	.banks		  = 3,
 	.pwrsts_mem_ret	= {
 		[0] = PWRSTS_OFF_RET,	/* gmac_bank */
@@ -269,9 +269,9 @@
 		[2] = PWRSTS_OFF_RET,	/* l3init_bank2 */
 	},
 	.pwrsts_mem_on	= {
-		[0] = PWRSTS_OFF_RET,	/* gmac_bank */
-		[1] = PWRSTS_OFF_RET,	/* l3init_bank1 */
-		[2] = PWRSTS_OFF_RET,	/* l3init_bank2 */
+		[0] = PWRSTS_ON,	/* gmac_bank */
+		[1] = PWRSTS_ON,	/* l3init_bank1 */
+		[2] = PWRSTS_ON,	/* l3init_bank2 */
 	},
 	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
@@ -287,7 +287,7 @@
 		[0] = PWRSTS_OFF_RET,	/* eve3_bank */
 	},
 	.pwrsts_mem_on	= {
-		[0] = PWRSTS_OFF_RET,	/* eve3_bank */
+		[0] = PWRSTS_ON,	/* eve3_bank */
 	},
 	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
@@ -303,7 +303,7 @@
 		[0] = PWRSTS_OFF_RET,	/* emu_bank */
 	},
 	.pwrsts_mem_on	= {
-		[0] = PWRSTS_OFF_RET,	/* emu_bank */
+		[0] = PWRSTS_ON,	/* emu_bank */
 	},
 };
 
@@ -320,9 +320,9 @@
 		[2] = PWRSTS_OFF_RET,	/* dsp2_l2 */
 	},
 	.pwrsts_mem_on	= {
-		[0] = PWRSTS_OFF_RET,	/* dsp2_edma */
-		[1] = PWRSTS_OFF_RET,	/* dsp2_l1 */
-		[2] = PWRSTS_OFF_RET,	/* dsp2_l2 */
+		[0] = PWRSTS_ON,	/* dsp2_edma */
+		[1] = PWRSTS_ON,	/* dsp2_l1 */
+		[2] = PWRSTS_ON,	/* dsp2_l2 */
 	},
 	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
@@ -340,9 +340,9 @@
 		[2] = PWRSTS_OFF_RET,	/* dsp1_l2 */
 	},
 	.pwrsts_mem_on	= {
-		[0] = PWRSTS_OFF_RET,	/* dsp1_edma */
-		[1] = PWRSTS_OFF_RET,	/* dsp1_l1 */
-		[2] = PWRSTS_OFF_RET,	/* dsp1_l2 */
+		[0] = PWRSTS_ON,	/* dsp1_edma */
+		[1] = PWRSTS_ON,	/* dsp1_l1 */
+		[2] = PWRSTS_ON,	/* dsp1_l2 */
 	},
 	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
@@ -358,7 +358,7 @@
 		[0] = PWRSTS_OFF_RET,	/* vip_bank */
 	},
 	.pwrsts_mem_on	= {
-		[0] = PWRSTS_OFF_RET,	/* vip_bank */
+		[0] = PWRSTS_ON,	/* vip_bank */
 	},
 	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
@@ -374,7 +374,7 @@
 		[0] = PWRSTS_OFF_RET,	/* eve4_bank */
 	},
 	.pwrsts_mem_on	= {
-		[0] = PWRSTS_OFF_RET,	/* eve4_bank */
+		[0] = PWRSTS_ON,	/* eve4_bank */
 	},
 	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
@@ -390,7 +390,7 @@
 		[0] = PWRSTS_OFF_RET,	/* eve2_bank */
 	},
 	.pwrsts_mem_on	= {
-		[0] = PWRSTS_OFF_RET,	/* eve2_bank */
+		[0] = PWRSTS_ON,	/* eve2_bank */
 	},
 	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
@@ -406,7 +406,7 @@
 		[0] = PWRSTS_OFF_RET,	/* eve1_bank */
 	},
 	.pwrsts_mem_on	= {
-		[0] = PWRSTS_OFF_RET,	/* eve1_bank */
+		[0] = PWRSTS_ON,	/* eve1_bank */
 	},
 	.flags		  = PWRDM_HAS_LOWPOWERSTATECHANGE,
 };
diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h
index 70df8f6..2aa01c2 100644
--- a/arch/arm/mach-omap2/soc.h
+++ b/arch/arm/mach-omap2/soc.h
@@ -39,82 +39,10 @@
 #include <linux/of.h>
 
 /*
- * Test if multicore OMAP support is needed
+ * OMAP2+ is always defined as ARCH_MULTIPLATFORM in Kconfig
  */
 #undef MULTI_OMAP2
-#undef OMAP_NAME
-
-#ifdef CONFIG_ARCH_MULTIPLATFORM
 #define MULTI_OMAP2
-#endif
-#ifdef CONFIG_SOC_OMAP2420
-# ifdef OMAP_NAME
-#  undef  MULTI_OMAP2
-#  define MULTI_OMAP2
-# else
-#  define OMAP_NAME omap2420
-# endif
-#endif
-#ifdef CONFIG_SOC_OMAP2430
-# ifdef OMAP_NAME
-#  undef  MULTI_OMAP2
-#  define MULTI_OMAP2
-# else
-#  define OMAP_NAME omap2430
-# endif
-#endif
-#ifdef CONFIG_ARCH_OMAP3
-# ifdef OMAP_NAME
-#  undef  MULTI_OMAP2
-#  define MULTI_OMAP2
-# else
-#  define OMAP_NAME omap3
-# endif
-#endif
-#ifdef CONFIG_ARCH_OMAP4
-# ifdef OMAP_NAME
-#  undef  MULTI_OMAP2
-#  define MULTI_OMAP2
-# else
-#  define OMAP_NAME omap4
-# endif
-#endif
-
-#ifdef CONFIG_SOC_OMAP5
-# ifdef OMAP_NAME
-#  undef  MULTI_OMAP2
-#  define MULTI_OMAP2
-# else
-#  define OMAP_NAME omap5
-# endif
-#endif
-
-#ifdef CONFIG_SOC_AM33XX
-# ifdef OMAP_NAME
-#  undef  MULTI_OMAP2
-#  define MULTI_OMAP2
-# else
-#  define OMAP_NAME am33xx
-# endif
-#endif
-
-#ifdef CONFIG_SOC_AM43XX
-# ifdef OMAP_NAME
-#  undef  MULTI_OMAP2
-#  define MULTI_OMAP2
-# else
-#  define OMAP_NAME am43xx
-# endif
-#endif
-
-#ifdef CONFIG_SOC_DRA7XX
-# ifdef OMAP_NAME
-#  undef MULTI_OMAP2
-#  define MULTI_OMAP2
-# else
-#  define OMAP_NAME DRA7XX
-# endif
-#endif
 
 /*
  * Omap device type i.e. EMU/HS/TST/GP/BAD
@@ -242,11 +170,6 @@
 IS_DRA_SUBCLASS(75x, 0x75)
 IS_DRA_SUBCLASS(72x, 0x72)
 
-#define soc_is_omap24xx()		0
-#define soc_is_omap242x()		0
-#define soc_is_omap243x()		0
-#define soc_is_omap34xx()		0
-#define soc_is_omap343x()		0
 #define soc_is_ti81xx()			0
 #define soc_is_ti816x()			0
 #define soc_is_ti814x()			0
@@ -265,46 +188,27 @@
 #define soc_is_dra74x()			0
 #define soc_is_dra72x()			0
 
-#if defined(MULTI_OMAP2)
-# if defined(CONFIG_ARCH_OMAP2)
-#  undef  soc_is_omap24xx
-#  define soc_is_omap24xx()		is_omap24xx()
-# endif
-# if defined (CONFIG_SOC_OMAP2420)
-#  undef  soc_is_omap242x
-#  define soc_is_omap242x()		is_omap242x()
-# endif
-# if defined (CONFIG_SOC_OMAP2430)
-#  undef  soc_is_omap243x
-#  define soc_is_omap243x()		is_omap243x()
-# endif
-# if defined(CONFIG_ARCH_OMAP3)
-#  undef  soc_is_omap34xx
-#  undef  soc_is_omap343x
-#  define soc_is_omap34xx()		is_omap34xx()
-#  define soc_is_omap343x()		is_omap343x()
-# endif
+#if defined(CONFIG_ARCH_OMAP2)
+# define soc_is_omap24xx()		is_omap24xx()
 #else
-# if defined(CONFIG_ARCH_OMAP2)
-#  undef  soc_is_omap24xx
-#  define soc_is_omap24xx()		1
-# endif
-# if defined(CONFIG_SOC_OMAP2420)
-#  undef  soc_is_omap242x
-#  define soc_is_omap242x()		1
-# endif
-# if defined(CONFIG_SOC_OMAP2430)
-#  undef  soc_is_omap243x
-#  define soc_is_omap243x()		1
-# endif
-# if defined(CONFIG_ARCH_OMAP3)
-#  undef  soc_is_omap34xx
-#  define soc_is_omap34xx()		1
-# endif
-# if defined(CONFIG_SOC_OMAP3430)
-#  undef  soc_is_omap343x
-#  define soc_is_omap343x()		1
-# endif
+# define soc_is_omap24xx()		0
+#endif
+#if defined(CONFIG_SOC_OMAP2420)
+# define soc_is_omap242x()		is_omap242x()
+#else
+# define soc_is_omap242x()		0
+#endif
+#if defined(CONFIG_SOC_OMAP2430)
+# define soc_is_omap243x()		is_omap243x()
+#else
+# define soc_is_omap243x()		0
+#endif
+#if defined(CONFIG_ARCH_OMAP3)
+# define soc_is_omap34xx()		is_omap34xx()
+# define soc_is_omap343x()		is_omap343x()
+#else
+# define soc_is_omap34xx()		0
+# define soc_is_omap343x()		0
 #endif
 
 /*
@@ -339,7 +243,6 @@
 #define soc_is_omap5430()		0
 
 /* These are needed for the common code */
-#ifdef CONFIG_ARCH_OMAP2PLUS
 #define soc_is_omap7xx()		0
 #define soc_is_omap15xx()		0
 #define soc_is_omap16xx()		0
@@ -350,7 +253,6 @@
 #define soc_is_omap1710()		0
 #define cpu_class_is_omap1()		0
 #define cpu_class_is_omap2()		1
-#endif
 
 #if defined(CONFIG_ARCH_OMAP2)
 # undef  soc_is_omap2420
@@ -489,6 +391,7 @@
 #define DRA752_REV_ES2_0	(DRA7XX_CLASS | (0x52 << 16) | (0x20 << 8))
 #define DRA722_REV_ES1_0	(DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8))
 #define DRA722_REV_ES1_0	(DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8))
+#define DRA722_REV_ES2_0	(DRA7XX_CLASS | (0x22 << 16) | (0x20 << 8))
 
 void omap2xxx_check_revision(void);
 void omap3xxx_check_revision(void);
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index 70c3366..058994e 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -66,8 +66,7 @@
 
 void __init clk_init(void)
 {
-	tclk = clk_register_fixed_rate(NULL, "tclk", NULL, CLK_IS_ROOT,
-				       orion5x_tclk);
+	tclk = clk_register_fixed_rate(NULL, "tclk", NULL, 0, orion5x_tclk);
 
 	orion_clkdev_init(tclk);
 }
diff --git a/arch/arm/mach-oxnas/Kconfig b/arch/arm/mach-oxnas/Kconfig
new file mode 100644
index 0000000..4fff3c7
--- /dev/null
+++ b/arch/arm/mach-oxnas/Kconfig
@@ -0,0 +1,24 @@
+menuconfig ARCH_OXNAS
+	bool "Oxford Semiconductor OXNAS Family SoCs"
+	select ARCH_REQUIRE_GPIOLIB
+	select ARCH_HAS_RESET_CONTROLLER
+	select PINCTRL
+	depends on ARCH_MULTI_V5
+	help
+	  Support for OxNas SoC family developed by Oxford Semiconductor.
+
+if ARCH_OXNAS
+
+config MACH_OX810SE
+	bool "Support OX810SE Based Products"
+	select ARM_TIMER_SP804
+	select COMMON_CLK_OXNAS
+	select CPU_ARM926T
+	select MFD_SYSCON
+	select PINCTRL_OXNAS
+	select RESET_OXNAS
+	select VERSATILE_FPGA_IRQ
+	help
+	  Include Support for the Oxford Semiconductor OX810SE SoC Based Products.
+
+endif
diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c
index 913a319..fffb697 100644
--- a/arch/arm/mach-pxa/devices.c
+++ b/arch/arm/mach-pxa/devices.c
@@ -1235,5 +1235,6 @@
 void __init pxa2xx_set_dmac_info(int nb_channels, int nb_requestors)
 {
 	pxa_dma_pdata.dma_channels = nb_channels;
+	pxa_dma_pdata.nb_requestors = nb_requestors;
 	pxa_register_device(&pxa2xx_pxa_dma, &pxa_dma_pdata);
 }
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index 5a941bd..e216433 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -385,10 +385,6 @@
 	{ },
 };
 
-static struct property_set raumfeld_rotary_property_set = {
-	.properties = raumfeld_rotary_properties,
-};
-
 static struct platform_device rotary_encoder_device = {
 	.name		= "rotary-encoder",
 	.id		= 0,
@@ -1063,8 +1059,8 @@
 	pxa3xx_mfp_config(ARRAY_AND_SIZE(raumfeld_controller_pin_config));
 
 	gpiod_add_lookup_table(&raumfeld_rotary_gpios_table);
-	device_add_property_set(&rotary_encoder_device.dev,
-				&raumfeld_rotary_property_set);
+	device_add_properties(&rotary_encoder_device.dev,
+			      raumfeld_rotary_properties);
 	platform_device_register(&rotary_encoder_device);
 
 	spi_register_board_info(ARRAY_AND_SIZE(controller_spi_devices));
@@ -1103,8 +1099,8 @@
 	platform_device_register(&smc91x_device);
 
 	gpiod_add_lookup_table(&raumfeld_rotary_gpios_table);
-	device_add_property_set(&rotary_encoder_device.dev,
-				&raumfeld_rotary_property_set);
+	device_add_properties(&rotary_encoder_device.dev,
+			      raumfeld_rotary_properties);
 	platform_device_register(&rotary_encoder_device);
 
 	raumfeld_audio_init();
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c
index b9f0757..be1cec5 100644
--- a/arch/arm/mach-realview/realview_pbx.c
+++ b/arch/arm/mach-realview/realview_pbx.c
@@ -248,6 +248,7 @@
 	},
 };
 
+#ifdef CONFIG_CACHE_L2X0
 static struct resource pmu_resources[] = {
 	[0] = {
 		.start		= IRQ_PBX_PMU_CPU0,
@@ -277,6 +278,7 @@
 	.num_resources		= ARRAY_SIZE(pmu_resources),
 	.resource		= pmu_resources,
 };
+#endif
 
 static void __init gic_init_irq(void)
 {
diff --git a/arch/arm/mach-rockchip/platsmp.c b/arch/arm/mach-rockchip/platsmp.c
index d42a07e..4d827a0 100644
--- a/arch/arm/mach-rockchip/platsmp.c
+++ b/arch/arm/mach-rockchip/platsmp.c
@@ -65,7 +65,7 @@
 	if (dev)
 		np = dev->of_node;
 	else
-		np = of_get_cpu_node(cpu, 0);
+		np = of_get_cpu_node(cpu, NULL);
 
 	return of_reset_control_get(np, NULL);
 }
diff --git a/arch/arm/mach-rockchip/rockchip.c b/arch/arm/mach-rockchip/rockchip.c
index 3f07cc5..beb71da 100644
--- a/arch/arm/mach-rockchip/rockchip.c
+++ b/arch/arm/mach-rockchip/rockchip.c
@@ -74,7 +74,6 @@
 {
 	rockchip_suspend_init();
 	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-	platform_device_register_simple("cpufreq-dt", 0, NULL, 0);
 }
 
 static const char * const rockchip_board_dt_compat[] = {
diff --git a/arch/arm/mach-sa1100/Kconfig b/arch/arm/mach-sa1100/Kconfig
index c6f6ed1..36e3c79 100644
--- a/arch/arm/mach-sa1100/Kconfig
+++ b/arch/arm/mach-sa1100/Kconfig
@@ -61,10 +61,7 @@
 	select MFD_IPAQ_MICRO
 	help
 	  Say Y here if you intend to run this kernel on the Compaq iPAQ
-	  H3100 handheld computer.  Information about this machine and the
-	  Linux port to this machine can be found at:
-
-	  <http://www.handhelds.org/Compaq/index.html#iPAQ_H3100>
+	  H3100 handheld computer.
 
 config SA1100_H3600
 	bool "Compaq iPAQ H3600/H3700"
@@ -73,10 +70,7 @@
 	select MFD_IPAQ_MICRO
 	help
 	  Say Y here if you intend to run this kernel on the Compaq iPAQ
-	  H3600 handheld computer.  Information about this machine and the
-	  Linux port to this machine can be found at:
-
-	  <http://www.handhelds.org/Compaq/index.html#iPAQ_H3600>
+	  H3600 and H3700 handheld computers.
 
 config SA1100_BADGE4
 	bool "HP Labs BadgePAD 4"
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index f2bc5c3..fe4ccb5 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -4,11 +4,6 @@
 config ARCH_SHMOBILE_MULTI
 	bool
 
-config PM_RCAR
-	bool
-	select PM
-	select PM_GENERIC_DOMAINS
-
 config PM_RMOBILE
 	bool
 	select PM
@@ -16,13 +11,15 @@
 
 config ARCH_RCAR_GEN1
 	bool
-	select PM_RCAR
+	select PM
+	select PM_GENERIC_DOMAINS
 	select RENESAS_INTC_IRQPIN
 	select SYS_SUPPORTS_SH_TMU
 
 config ARCH_RCAR_GEN2
 	bool
-	select PM_RCAR
+	select PM
+	select PM_GENERIC_DOMAINS
 	select RENESAS_IRQC
 	select SYS_SUPPORTS_SH_CMT
 	select PCI_DOMAINS if PCI
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index a65c80ac..fc95f7b 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -38,8 +38,6 @@
 
 # PM objects
 obj-$(CONFIG_SUSPEND)		+= suspend.o
-obj-$(CONFIG_CPU_FREQ)		+= cpufreq.o
-obj-$(CONFIG_PM_RCAR)		+= pm-rcar.o
 obj-$(CONFIG_PM_RMOBILE)	+= pm-rmobile.o
 obj-$(CONFIG_ARCH_RCAR_GEN2)	+= pm-rcar-gen2.o
 
diff --git a/arch/arm/mach-shmobile/common.h b/arch/arm/mach-shmobile/common.h
index 5464b7a..3b562d8 100644
--- a/arch/arm/mach-shmobile/common.h
+++ b/arch/arm/mach-shmobile/common.h
@@ -25,16 +25,9 @@
 static inline void shmobile_smp_apmu_suspend_init(void) { }
 #endif
 
-#ifdef CONFIG_CPU_FREQ
-int shmobile_cpufreq_init(void);
-#else
-static inline int shmobile_cpufreq_init(void) { return 0; }
-#endif
-
 static inline void __init shmobile_init_late(void)
 {
 	shmobile_suspend_init();
-	shmobile_cpufreq_init();
 }
 
 #endif /* __ARCH_MACH_COMMON_H */
diff --git a/arch/arm/mach-shmobile/cpufreq.c b/arch/arm/mach-shmobile/cpufreq.c
deleted file mode 100644
index 634d701..0000000
--- a/arch/arm/mach-shmobile/cpufreq.c
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * CPUFreq support code for SH-Mobile ARM
- *
- *  Copyright (C) 2014 Gaku Inami
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/platform_device.h>
-
-#include "common.h"
-
-int __init shmobile_cpufreq_init(void)
-{
-	platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
-	return 0;
-}
diff --git a/arch/arm/mach-shmobile/pm-r8a7779.c b/arch/arm/mach-shmobile/pm-r8a7779.c
index 14c42a1b..4174cbc 100644
--- a/arch/arm/mach-shmobile/pm-r8a7779.c
+++ b/arch/arm/mach-shmobile/pm-r8a7779.c
@@ -9,9 +9,10 @@
  * for more details.
  */
 
+#include <linux/soc/renesas/rcar-sysc.h>
+
 #include <asm/io.h>
 
-#include "pm-rcar.h"
 #include "r8a7779.h"
 
 /* SYSC */
diff --git a/arch/arm/mach-shmobile/pm-rcar-gen2.c b/arch/arm/mach-shmobile/pm-rcar-gen2.c
index 6815781..691ac16 100644
--- a/arch/arm/mach-shmobile/pm-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/pm-rcar-gen2.c
@@ -13,9 +13,9 @@
 #include <linux/kernel.h>
 #include <linux/of.h>
 #include <linux/smp.h>
+#include <linux/soc/renesas/rcar-sysc.h>
 #include <asm/io.h>
 #include "common.h"
-#include "pm-rcar.h"
 #include "rcar-gen2.h"
 
 /* RST */
diff --git a/arch/arm/mach-shmobile/pm-rcar.c b/arch/arm/mach-shmobile/pm-rcar.c
deleted file mode 100644
index 0af05d2..0000000
--- a/arch/arm/mach-shmobile/pm-rcar.c
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * R-Car SYSC Power management support
- *
- * Copyright (C) 2014  Magnus Damm
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/mm.h>
-#include <linux/spinlock.h>
-#include <linux/io.h>
-#include "pm-rcar.h"
-
-/* SYSC Common */
-#define SYSCSR			0x00	/* SYSC Status Register */
-#define SYSCISR			0x04	/* Interrupt Status Register */
-#define SYSCISCR		0x08	/* Interrupt Status Clear Register */
-#define SYSCIER			0x0c	/* Interrupt Enable Register */
-#define SYSCIMR			0x10	/* Interrupt Mask Register */
-
-/* SYSC Status Register */
-#define SYSCSR_PONENB		1	/* Ready for power resume requests */
-#define SYSCSR_POFFENB		0	/* Ready for power shutoff requests */
-
-/*
- * Power Control Register Offsets inside the register block for each domain
- * Note: The "CR" registers for ARM cores exist on H1 only
- *       Use WFI to power off, CPG/APMU to resume ARM cores on R-Car Gen2
- */
-#define PWRSR_OFFS		0x00	/* Power Status Register */
-#define PWROFFCR_OFFS		0x04	/* Power Shutoff Control Register */
-#define PWROFFSR_OFFS		0x08	/* Power Shutoff Status Register */
-#define PWRONCR_OFFS		0x0c	/* Power Resume Control Register */
-#define PWRONSR_OFFS		0x10	/* Power Resume Status Register */
-#define PWRER_OFFS		0x14	/* Power Shutoff/Resume Error */
-
-
-#define SYSCSR_RETRIES		100
-#define SYSCSR_DELAY_US		1
-
-#define PWRER_RETRIES		100
-#define PWRER_DELAY_US		1
-
-#define SYSCISR_RETRIES		1000
-#define SYSCISR_DELAY_US	1
-
-static void __iomem *rcar_sysc_base;
-static DEFINE_SPINLOCK(rcar_sysc_lock); /* SMP CPUs + I/O devices */
-
-static int rcar_sysc_pwr_on_off(const struct rcar_sysc_ch *sysc_ch, bool on)
-{
-	unsigned int sr_bit, reg_offs;
-	int k;
-
-	if (on) {
-		sr_bit = SYSCSR_PONENB;
-		reg_offs = PWRONCR_OFFS;
-	} else {
-		sr_bit = SYSCSR_POFFENB;
-		reg_offs = PWROFFCR_OFFS;
-	}
-
-	/* Wait until SYSC is ready to accept a power request */
-	for (k = 0; k < SYSCSR_RETRIES; k++) {
-		if (ioread32(rcar_sysc_base + SYSCSR) & BIT(sr_bit))
-			break;
-		udelay(SYSCSR_DELAY_US);
-	}
-
-	if (k == SYSCSR_RETRIES)
-		return -EAGAIN;
-
-	/* Submit power shutoff or power resume request */
-	iowrite32(BIT(sysc_ch->chan_bit),
-		  rcar_sysc_base + sysc_ch->chan_offs + reg_offs);
-
-	return 0;
-}
-
-static int rcar_sysc_power(const struct rcar_sysc_ch *sysc_ch, bool on)
-{
-	unsigned int isr_mask = BIT(sysc_ch->isr_bit);
-	unsigned int chan_mask = BIT(sysc_ch->chan_bit);
-	unsigned int status;
-	unsigned long flags;
-	int ret = 0;
-	int k;
-
-	spin_lock_irqsave(&rcar_sysc_lock, flags);
-
-	iowrite32(isr_mask, rcar_sysc_base + SYSCISCR);
-
-	/* Submit power shutoff or resume request until it was accepted */
-	for (k = 0; k < PWRER_RETRIES; k++) {
-		ret = rcar_sysc_pwr_on_off(sysc_ch, on);
-		if (ret)
-			goto out;
-
-		status = ioread32(rcar_sysc_base +
-				  sysc_ch->chan_offs + PWRER_OFFS);
-		if (!(status & chan_mask))
-			break;
-
-		udelay(PWRER_DELAY_US);
-	}
-
-	if (k == PWRER_RETRIES) {
-		ret = -EIO;
-		goto out;
-	}
-
-	/* Wait until the power shutoff or resume request has completed * */
-	for (k = 0; k < SYSCISR_RETRIES; k++) {
-		if (ioread32(rcar_sysc_base + SYSCISR) & isr_mask)
-			break;
-		udelay(SYSCISR_DELAY_US);
-	}
-
-	if (k == SYSCISR_RETRIES)
-		ret = -EIO;
-
-	iowrite32(isr_mask, rcar_sysc_base + SYSCISCR);
-
- out:
-	spin_unlock_irqrestore(&rcar_sysc_lock, flags);
-
-	pr_debug("sysc power domain %d: %08x -> %d\n",
-		 sysc_ch->isr_bit, ioread32(rcar_sysc_base + SYSCISR), ret);
-	return ret;
-}
-
-int rcar_sysc_power_down(const struct rcar_sysc_ch *sysc_ch)
-{
-	return rcar_sysc_power(sysc_ch, false);
-}
-
-int rcar_sysc_power_up(const struct rcar_sysc_ch *sysc_ch)
-{
-	return rcar_sysc_power(sysc_ch, true);
-}
-
-bool rcar_sysc_power_is_off(const struct rcar_sysc_ch *sysc_ch)
-{
-	unsigned int st;
-
-	st = ioread32(rcar_sysc_base + sysc_ch->chan_offs + PWRSR_OFFS);
-	if (st & BIT(sysc_ch->chan_bit))
-		return true;
-
-	return false;
-}
-
-void __iomem *rcar_sysc_init(phys_addr_t base)
-{
-	rcar_sysc_base = ioremap_nocache(base, PAGE_SIZE);
-	if (!rcar_sysc_base)
-		panic("unable to ioremap R-Car SYSC hardware block\n");
-
-	return rcar_sysc_base;
-}
diff --git a/arch/arm/mach-shmobile/pm-rcar.h b/arch/arm/mach-shmobile/pm-rcar.h
deleted file mode 100644
index 1b901db4..0000000
--- a/arch/arm/mach-shmobile/pm-rcar.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef PM_RCAR_H
-#define PM_RCAR_H
-
-struct rcar_sysc_ch {
-	u16 chan_offs;
-	u8 chan_bit;
-	u8 isr_bit;
-};
-
-int rcar_sysc_power_down(const struct rcar_sysc_ch *sysc_ch);
-int rcar_sysc_power_up(const struct rcar_sysc_ch *sysc_ch);
-bool rcar_sysc_power_is_off(const struct rcar_sysc_ch *sysc_ch);
-void __iomem *rcar_sysc_init(phys_addr_t base);
-
-#endif /* PM_RCAR_H */
diff --git a/arch/arm/mach-shmobile/smp-r8a7779.c b/arch/arm/mach-shmobile/smp-r8a7779.c
index f5c31fb..c6951ee 100644
--- a/arch/arm/mach-shmobile/smp-r8a7779.c
+++ b/arch/arm/mach-shmobile/smp-r8a7779.c
@@ -19,13 +19,13 @@
 #include <linux/spinlock.h>
 #include <linux/io.h>
 #include <linux/delay.h>
+#include <linux/soc/renesas/rcar-sysc.h>
 
 #include <asm/cacheflush.h>
 #include <asm/smp_plat.h>
 #include <asm/smp_scu.h>
 
 #include "common.h"
-#include "pm-rcar.h"
 #include "r8a7779.h"
 
 #define AVECR IOMEM(0xfe700040)
diff --git a/arch/arm/mach-shmobile/smp-r8a7790.c b/arch/arm/mach-shmobile/smp-r8a7790.c
index f6426c6..28f26d5 100644
--- a/arch/arm/mach-shmobile/smp-r8a7790.c
+++ b/arch/arm/mach-shmobile/smp-r8a7790.c
@@ -17,12 +17,12 @@
 #include <linux/init.h>
 #include <linux/smp.h>
 #include <linux/io.h>
+#include <linux/soc/renesas/rcar-sysc.h>
 
 #include <asm/smp_plat.h>
 
 #include "common.h"
 #include "platsmp-apmu.h"
-#include "pm-rcar.h"
 #include "rcar-gen2.h"
 #include "r8a7790.h"
 
diff --git a/arch/arm/mach-shmobile/timer.c b/arch/arm/mach-shmobile/timer.c
index ad008e4..6196a63 100644
--- a/arch/arm/mach-shmobile/timer.c
+++ b/arch/arm/mach-shmobile/timer.c
@@ -20,29 +20,9 @@
 
 #include "common.h"
 
-static void __init shmobile_setup_delay_hz(unsigned int max_cpu_core_hz,
-					   unsigned int mult, unsigned int div)
-{
-	/* calculate a worst-case loops-per-jiffy value
-	 * based on maximum cpu core hz setting and the
-	 * __delay() implementation in arch/arm/lib/delay.S
-	 *
-	 * this will result in a longer delay than expected
-	 * when the cpu core runs on lower frequencies.
-	 */
-
-	unsigned int value = HZ * div / mult;
-
-	if (!preset_lpj)
-		preset_lpj = max_cpu_core_hz / value;
-}
-
 void __init shmobile_init_delay(void)
 {
 	struct device_node *np, *cpus;
-	bool is_a7_a8_a9 = false;
-	bool is_a15 = false;
-	bool has_arch_timer = false;
 	u32 max_freq = 0;
 
 	cpus = of_find_node_by_path("/cpus");
@@ -52,19 +32,16 @@
 	for_each_child_of_node(cpus, np) {
 		u32 freq;
 
+		if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER) &&
+		    (of_device_is_compatible(np, "arm,cortex-a7") ||
+		     of_device_is_compatible(np, "arm,cortex-a15"))) {
+			of_node_put(np);
+			of_node_put(cpus);
+			return;
+		}
+
 		if (!of_property_read_u32(np, "clock-frequency", &freq))
 			max_freq = max(max_freq, freq);
-
-		if (of_device_is_compatible(np, "arm,cortex-a8") ||
-		    of_device_is_compatible(np, "arm,cortex-a9")) {
-			is_a7_a8_a9 = true;
-		} else if (of_device_is_compatible(np, "arm,cortex-a7")) {
-			is_a7_a8_a9 = true;
-			has_arch_timer = true;
-		} else if (of_device_is_compatible(np, "arm,cortex-a15")) {
-			is_a15 = true;
-			has_arch_timer = true;
-		}
 	}
 
 	of_node_put(cpus);
@@ -72,10 +49,15 @@
 	if (!max_freq)
 		return;
 
-	if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) {
-		if (is_a7_a8_a9)
-			shmobile_setup_delay_hz(max_freq, 1, 3);
-		else if (is_a15)
-			shmobile_setup_delay_hz(max_freq, 2, 4);
-	}
+	/*
+	 * Calculate a worst-case loops-per-jiffy value
+	 * based on maximum cpu core hz setting and the
+	 * __delay() implementation in arch/arm/lib/delay.S.
+	 *
+	 * This will result in a longer delay than expected
+	 * when the cpu core runs on lower frequencies.
+	 */
+
+	if (!preset_lpj)
+		preset_lpj = max_freq / HZ;
 }
diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h
index 575195b..65e1817 100644
--- a/arch/arm/mach-socfpga/core.h
+++ b/arch/arm/mach-socfpga/core.h
@@ -38,6 +38,8 @@
 extern void socfpga_sysmgr_init(void);
 void socfpga_init_l2_ecc(void);
 void socfpga_init_ocram_ecc(void);
+void socfpga_init_arria10_l2_ecc(void);
+void socfpga_init_arria10_ocram_ecc(void);
 
 extern void __iomem *sys_manager_base_addr;
 extern void __iomem *rst_manager_base_addr;
diff --git a/arch/arm/mach-socfpga/headsmp.S b/arch/arm/mach-socfpga/headsmp.S
index 5d94b7a..c160fa3 100644
--- a/arch/arm/mach-socfpga/headsmp.S
+++ b/arch/arm/mach-socfpga/headsmp.S
@@ -13,6 +13,7 @@
 #include <asm/assembler.h>
 
 	.arch	armv7-a
+	.arm
 
 ENTRY(secondary_trampoline)
 	/* CPU1 will always fetch from 0x0 when it is brought out of reset.
diff --git a/arch/arm/mach-socfpga/l2_cache.c b/arch/arm/mach-socfpga/l2_cache.c
index e3907ab..4267c95f 100644
--- a/arch/arm/mach-socfpga/l2_cache.c
+++ b/arch/arm/mach-socfpga/l2_cache.c
@@ -17,6 +17,20 @@
 #include <linux/of_platform.h>
 #include <linux/of_address.h>
 
+#include "core.h"
+
+/* A10 System Manager L2 ECC Control register */
+#define A10_MPU_CTRL_L2_ECC_OFST          0x0
+#define A10_MPU_CTRL_L2_ECC_EN            BIT(0)
+
+/* A10 System Manager Global IRQ Mask register */
+#define A10_SYSMGR_ECC_INTMASK_CLR_OFST   0x98
+#define A10_SYSMGR_ECC_INTMASK_CLR_L2     BIT(0)
+
+/* A10 System Manager L2 ECC IRQ Clear register */
+#define A10_SYSMGR_MPU_CLEAR_L2_ECC_OFST  0xA8
+#define A10_SYSMGR_MPU_CLEAR_L2_ECC       (BIT(31) | BIT(15))
+
 void socfpga_init_l2_ecc(void)
 {
 	struct device_node *np;
@@ -39,3 +53,38 @@
 	writel(0x01, mapped_l2_edac_addr);
 	iounmap(mapped_l2_edac_addr);
 }
+
+void socfpga_init_arria10_l2_ecc(void)
+{
+	struct device_node *np;
+	void __iomem *mapped_l2_edac_addr;
+
+	/* Find the L2 EDAC device tree node */
+	np = of_find_compatible_node(NULL, NULL, "altr,socfpga-a10-l2-ecc");
+	if (!np) {
+		pr_err("Unable to find socfpga-a10-l2-ecc in dtb\n");
+		return;
+	}
+
+	mapped_l2_edac_addr = of_iomap(np, 0);
+	of_node_put(np);
+	if (!mapped_l2_edac_addr) {
+		pr_err("Unable to find L2 ECC mapping in dtb\n");
+		return;
+	}
+
+	if (!sys_manager_base_addr) {
+		pr_err("System Mananger not mapped for L2 ECC\n");
+		goto exit;
+	}
+	/* Clear any pending IRQs */
+	writel(A10_SYSMGR_MPU_CLEAR_L2_ECC, (sys_manager_base_addr +
+	       A10_SYSMGR_MPU_CLEAR_L2_ECC_OFST));
+	/* Enable ECC */
+	writel(A10_SYSMGR_ECC_INTMASK_CLR_L2, sys_manager_base_addr +
+	       A10_SYSMGR_ECC_INTMASK_CLR_OFST);
+	writel(A10_MPU_CTRL_L2_ECC_EN, mapped_l2_edac_addr +
+	       A10_MPU_CTRL_L2_ECC_OFST);
+exit:
+	iounmap(mapped_l2_edac_addr);
+}
diff --git a/arch/arm/mach-socfpga/ocram.c b/arch/arm/mach-socfpga/ocram.c
index 60ec643..10d6732 100644
--- a/arch/arm/mach-socfpga/ocram.c
+++ b/arch/arm/mach-socfpga/ocram.c
@@ -13,12 +13,15 @@
  * You should have received a copy of the GNU General Public License along with
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
+#include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/genalloc.h>
 #include <linux/module.h>
 #include <linux/of_address.h>
 #include <linux/of_platform.h>
 
+#include "core.h"
+
 #define ALTR_OCRAM_CLEAR_ECC          0x00000018
 #define ALTR_OCRAM_ECC_EN             0x00000019
 
@@ -47,3 +50,133 @@
 
 	iounmap(mapped_ocr_edac_addr);
 }
+
+/* Arria10 OCRAM Section */
+#define ALTR_A10_ECC_CTRL_OFST          0x08
+#define ALTR_A10_OCRAM_ECC_EN_CTL       (BIT(1) | BIT(0))
+#define ALTR_A10_ECC_INITA              BIT(16)
+
+#define ALTR_A10_ECC_INITSTAT_OFST      0x0C
+#define ALTR_A10_ECC_INITCOMPLETEA      BIT(0)
+#define ALTR_A10_ECC_INITCOMPLETEB      BIT(8)
+
+#define ALTR_A10_ECC_ERRINTEN_OFST      0x10
+#define ALTR_A10_ECC_SERRINTEN          BIT(0)
+
+#define ALTR_A10_ECC_INTSTAT_OFST       0x20
+#define ALTR_A10_ECC_SERRPENA           BIT(0)
+#define ALTR_A10_ECC_DERRPENA           BIT(8)
+#define ALTR_A10_ECC_ERRPENA_MASK       (ALTR_A10_ECC_SERRPENA | \
+					 ALTR_A10_ECC_DERRPENA)
+/* ECC Manager Defines */
+#define A10_SYSMGR_ECC_INTMASK_SET_OFST   0x94
+#define A10_SYSMGR_ECC_INTMASK_CLR_OFST   0x98
+#define A10_SYSMGR_ECC_INTMASK_OCRAM      BIT(1)
+
+#define ALTR_A10_ECC_INIT_WATCHDOG_10US   10000
+
+static inline void ecc_set_bits(u32 bit_mask, void __iomem *ioaddr)
+{
+	u32 value = readl(ioaddr);
+
+	value |= bit_mask;
+	writel(value, ioaddr);
+}
+
+static inline void ecc_clear_bits(u32 bit_mask, void __iomem *ioaddr)
+{
+	u32 value = readl(ioaddr);
+
+	value &= ~bit_mask;
+	writel(value, ioaddr);
+}
+
+static inline int ecc_test_bits(u32 bit_mask, void __iomem *ioaddr)
+{
+	u32 value = readl(ioaddr);
+
+	return (value & bit_mask) ? 1 : 0;
+}
+
+/*
+ * This function uses the memory initialization block in the Arria10 ECC
+ * controller to initialize/clear the entire memory data and ECC data.
+ */
+static int altr_init_memory_port(void __iomem *ioaddr)
+{
+	int limit = ALTR_A10_ECC_INIT_WATCHDOG_10US;
+
+	ecc_set_bits(ALTR_A10_ECC_INITA, (ioaddr + ALTR_A10_ECC_CTRL_OFST));
+	while (limit--) {
+		if (ecc_test_bits(ALTR_A10_ECC_INITCOMPLETEA,
+				  (ioaddr + ALTR_A10_ECC_INITSTAT_OFST)))
+			break;
+		udelay(1);
+	}
+	if (limit < 0)
+		return -EBUSY;
+
+	/* Clear any pending ECC interrupts */
+	writel(ALTR_A10_ECC_ERRPENA_MASK,
+	       (ioaddr + ALTR_A10_ECC_INTSTAT_OFST));
+
+	return 0;
+}
+
+void socfpga_init_arria10_ocram_ecc(void)
+{
+	struct device_node *np;
+	int ret = 0;
+	void __iomem *ecc_block_base;
+
+	if (!sys_manager_base_addr) {
+		pr_err("SOCFPGA: sys-mgr is not initialized\n");
+		return;
+	}
+
+	/* Find the OCRAM EDAC device tree node */
+	np = of_find_compatible_node(NULL, NULL, "altr,socfpga-a10-ocram-ecc");
+	if (!np) {
+		pr_err("Unable to find socfpga-a10-ocram-ecc\n");
+		return;
+	}
+
+	/* Map the ECC Block */
+	ecc_block_base = of_iomap(np, 0);
+	of_node_put(np);
+	if (!ecc_block_base) {
+		pr_err("Unable to map OCRAM ECC block\n");
+		return;
+	}
+
+	/* Disable ECC */
+	writel(ALTR_A10_OCRAM_ECC_EN_CTL,
+	       sys_manager_base_addr + A10_SYSMGR_ECC_INTMASK_SET_OFST);
+	ecc_clear_bits(ALTR_A10_ECC_SERRINTEN,
+		       (ecc_block_base + ALTR_A10_ECC_ERRINTEN_OFST));
+	ecc_clear_bits(ALTR_A10_OCRAM_ECC_EN_CTL,
+		       (ecc_block_base + ALTR_A10_ECC_CTRL_OFST));
+
+	/* Ensure all writes complete */
+	wmb();
+
+	/* Use HW initialization block to initialize memory for ECC */
+	ret = altr_init_memory_port(ecc_block_base);
+	if (ret) {
+		pr_err("ECC: cannot init OCRAM PORTA memory\n");
+		goto exit;
+	}
+
+	/* Enable ECC */
+	ecc_set_bits(ALTR_A10_OCRAM_ECC_EN_CTL,
+		     (ecc_block_base + ALTR_A10_ECC_CTRL_OFST));
+	ecc_set_bits(ALTR_A10_ECC_SERRINTEN,
+		     (ecc_block_base + ALTR_A10_ECC_ERRINTEN_OFST));
+	writel(ALTR_A10_OCRAM_ECC_EN_CTL,
+	       sys_manager_base_addr + A10_SYSMGR_ECC_INTMASK_CLR_OFST);
+
+	/* Ensure all writes complete */
+	wmb();
+exit:
+	iounmap(ecc_block_base);
+}
diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c
index 7e0aad2..dde14f7 100644
--- a/arch/arm/mach-socfpga/socfpga.c
+++ b/arch/arm/mach-socfpga/socfpga.c
@@ -66,6 +66,16 @@
 		socfpga_init_ocram_ecc();
 }
 
+static void __init socfpga_arria10_init_irq(void)
+{
+	irqchip_init();
+	socfpga_sysmgr_init();
+	if (IS_ENABLED(CONFIG_EDAC_ALTERA_L2C))
+		socfpga_init_arria10_l2_ecc();
+	if (IS_ENABLED(CONFIG_EDAC_ALTERA_OCRAM))
+		socfpga_init_arria10_ocram_ecc();
+}
+
 static void socfpga_cyclone5_restart(enum reboot_mode mode, const char *cmd)
 {
 	u32 temp;
@@ -113,7 +123,7 @@
 DT_MACHINE_START(SOCFPGA_A10, "Altera SOCFPGA Arria10")
 	.l2c_aux_val	= 0,
 	.l2c_aux_mask	= ~0,
-	.init_irq	= socfpga_init_irq,
+	.init_irq	= socfpga_arria10_init_irq,
 	.restart	= socfpga_arria10_restart,
 	.dt_compat	= altera_a10_dt_match,
 MACHINE_END
diff --git a/arch/arm/mach-sti/Kconfig b/arch/arm/mach-sti/Kconfig
index a196d14..6f1af29 100644
--- a/arch/arm/mach-sti/Kconfig
+++ b/arch/arm/mach-sti/Kconfig
@@ -18,11 +18,10 @@
 	select PL310_ERRATA_769419 if CACHE_L2X0
 	select RESET_CONTROLLER
 	help
-	  Include support for STiH41x SOCs like STiH415/416 using the device tree
-	  for discovery
-	  More information at Documentation/arm/STiH41x and
-	  at Documentation/devicetree
-
+	  Include support for STMicroelectronics' STiH415/416, STiH407/10 and
+	  STiH418 family SoCs using the Device Tree for discovery.  More
+	  information can be found in Documentation/arm/sti/ and
+	  Documentation/devicetree.
 
 if ARCH_STI
 
diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c
index 3c15619..95dca8c 100644
--- a/arch/arm/mach-sunxi/sunxi.c
+++ b/arch/arm/mach-sunxi/sunxi.c
@@ -17,11 +17,6 @@
 
 #include <asm/mach/arch.h>
 
-static void __init sunxi_dt_cpufreq_init(void)
-{
-	platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
-}
-
 static const char * const sunxi_board_dt_compat[] = {
 	"allwinner,sun4i-a10",
 	"allwinner,sun5i-a10s",
@@ -32,7 +27,6 @@
 
 DT_MACHINE_START(SUNXI_DT, "Allwinner sun4i/sun5i Families")
 	.dt_compat	= sunxi_board_dt_compat,
-	.init_late	= sunxi_dt_cpufreq_init,
 MACHINE_END
 
 static const char * const sun6i_board_dt_compat[] = {
@@ -53,7 +47,6 @@
 DT_MACHINE_START(SUN6I_DT, "Allwinner sun6i (A31) Family")
 	.init_time	= sun6i_timer_init,
 	.dt_compat	= sun6i_board_dt_compat,
-	.init_late	= sunxi_dt_cpufreq_init,
 MACHINE_END
 
 static const char * const sun7i_board_dt_compat[] = {
@@ -63,7 +56,6 @@
 
 DT_MACHINE_START(SUN7I_DT, "Allwinner sun7i (A20) Family")
 	.dt_compat	= sun7i_board_dt_compat,
-	.init_late	= sunxi_dt_cpufreq_init,
 MACHINE_END
 
 static const char * const sun8i_board_dt_compat[] = {
@@ -77,7 +69,6 @@
 DT_MACHINE_START(SUN8I_DT, "Allwinner sun8i Family")
 	.init_time	= sun6i_timer_init,
 	.dt_compat	= sun8i_board_dt_compat,
-	.init_late	= sunxi_dt_cpufreq_init,
 MACHINE_END
 
 static const char * const sun9i_board_dt_compat[] = {
diff --git a/arch/arm/mach-tegra/board-paz00.c b/arch/arm/mach-tegra/board-paz00.c
index 52db8bf..7478f6f 100644
--- a/arch/arm/mach-tegra/board-paz00.c
+++ b/arch/arm/mach-tegra/board-paz00.c
@@ -29,10 +29,6 @@
 	{ },
 };
 
-static struct property_set __initdata wifi_rfkill_pset = {
-	.properties = wifi_rfkill_prop,
-};
-
 static struct platform_device wifi_rfkill_device = {
 	.name	= "rfkill_gpio",
 	.id	= -1,
@@ -49,7 +45,7 @@
 
 void __init tegra_paz00_wifikill_init(void)
 {
-	platform_device_add_properties(&wifi_rfkill_device, &wifi_rfkill_pset);
+	platform_device_add_properties(&wifi_rfkill_device, wifi_rfkill_prop);
 	gpiod_add_lookup_table(&wifi_gpio_lookup);
 	platform_device_register(&wifi_rfkill_device);
 }
diff --git a/arch/arm/mach-tegra/platsmp.c b/arch/arm/mach-tegra/platsmp.c
index f3f61db..75620ae 100644
--- a/arch/arm/mach-tegra/platsmp.c
+++ b/arch/arm/mach-tegra/platsmp.c
@@ -108,19 +108,9 @@
 	 * be un-gated by un-toggling the power gate register
 	 * manually.
 	 */
-	if (!tegra_pmc_cpu_is_powered(cpu)) {
-		ret = tegra_pmc_cpu_power_on(cpu);
-		if (ret)
-			return ret;
-
-		/* Wait for the power to come up. */
-		timeout = jiffies + msecs_to_jiffies(100);
-		while (!tegra_pmc_cpu_is_powered(cpu)) {
-			if (time_after(jiffies, timeout))
-				return -ETIMEDOUT;
-			udelay(10);
-		}
-	}
+	ret = tegra_pmc_cpu_power_on(cpu);
+	if (ret)
+		return ret;
 
 remove_clamps:
 	/* CPU partition is powered. Enable the CPU clock. */
diff --git a/arch/arm/mach-uniphier/platsmp.c b/arch/arm/mach-uniphier/platsmp.c
index 6914135..e802ca8 100644
--- a/arch/arm/mach-uniphier/platsmp.c
+++ b/arch/arm/mach-uniphier/platsmp.c
@@ -99,16 +99,16 @@
 	int ret;
 
 	np = of_find_compatible_node(NULL, NULL, "socionext,uniphier-smpctrl");
-	of_node_put(np);
 	ret = of_address_to_resource(np, 0, &res);
+	of_node_put(np);
 	if (!ret) {
 		rom_rsv2_phys = res.start + UNIPHIER_SMPCTRL_ROM_RSV2;
 	} else {
 		/* try old binding too */
 		np = of_find_compatible_node(NULL, NULL,
 					     "socionext,uniphier-system-bus-controller");
-		of_node_put(np);
 		ret = of_address_to_resource(np, 1, &res);
+		of_node_put(np);
 		if (ret) {
 			pr_err("failed to get resource of SMP control\n");
 			return ret;
@@ -120,7 +120,7 @@
 	if (ret)
 		return ret;
 
-	uniphier_smp_rom_boot_rsv2 = ioremap(rom_rsv2_phys, sizeof(SZ_4));
+	uniphier_smp_rom_boot_rsv2 = ioremap(rom_rsv2_phys, SZ_4);
 	if (!uniphier_smp_rom_boot_rsv2) {
 		pr_err("failed to map ROM_BOOT_RSV2 register\n");
 		return -ENOMEM;
diff --git a/arch/arm/mach-versatile/versatile_dt.c b/arch/arm/mach-versatile/versatile_dt.c
index dff1c05..d643b92 100644
--- a/arch/arm/mach-versatile/versatile_dt.c
+++ b/arch/arm/mach-versatile/versatile_dt.c
@@ -32,7 +32,6 @@
 #include <linux/amba/clcd.h>
 #include <linux/platform_data/video-clcd-versatile.h>
 #include <linux/amba/mmci.h>
-#include <linux/mtd/physmap.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
@@ -42,27 +41,15 @@
 #define __io_address(n)		((void __iomem __force *)IO_ADDRESS(n))
 
 /*
- * Memory definitions
- */
-#define VERSATILE_FLASH_BASE           0x34000000
-#define VERSATILE_FLASH_SIZE           SZ_64M
-
-/*
  * ------------------------------------------------------------------------
  *  Versatile Registers
  * ------------------------------------------------------------------------
  */
 #define VERSATILE_SYS_PCICTL_OFFSET           0x44
 #define VERSATILE_SYS_MCI_OFFSET              0x48
-#define VERSATILE_SYS_FLASH_OFFSET            0x4C
 #define VERSATILE_SYS_CLCD_OFFSET             0x50
 
 /*
- * VERSATILE_SYS_FLASH
- */
-#define VERSATILE_FLASHPROG_FLVPPEN	(1 << 0)	/* Enable writing to flash */
-
-/*
  * VERSATILE peripheral addresses
  */
 #define VERSATILE_MMCI0_BASE           0x10005000	/* MMC interface */
@@ -86,39 +73,6 @@
 static void __iomem *versatile_sys_base;
 static void __iomem *versatile_ib2_ctrl;
 
-static void versatile_flash_set_vpp(struct platform_device *pdev, int on)
-{
-	u32 val;
-
-	val = readl(versatile_sys_base + VERSATILE_SYS_FLASH_OFFSET);
-	if (on)
-		val |= VERSATILE_FLASHPROG_FLVPPEN;
-	else
-		val &= ~VERSATILE_FLASHPROG_FLVPPEN;
-	writel(val, versatile_sys_base + VERSATILE_SYS_FLASH_OFFSET);
-}
-
-static struct physmap_flash_data versatile_flash_data = {
-	.width			= 4,
-	.set_vpp		= versatile_flash_set_vpp,
-};
-
-static struct resource versatile_flash_resource = {
-	.start			= VERSATILE_FLASH_BASE,
-	.end			= VERSATILE_FLASH_BASE + VERSATILE_FLASH_SIZE - 1,
-	.flags			= IORESOURCE_MEM,
-};
-
-struct platform_device versatile_flash_device = {
-	.name			= "physmap-flash",
-	.id			= 0,
-	.dev			= {
-		.platform_data	= &versatile_flash_data,
-	},
-	.num_resources		= 1,
-	.resource		= &versatile_flash_resource,
-};
-
 unsigned int mmc_status(struct device *dev)
 {
 	struct amba_device *adev = container_of(dev, struct amba_device, dev);
@@ -390,7 +344,6 @@
 
 	versatile_dt_pci_init();
 
-	platform_device_register(&versatile_flash_device);
 	of_platform_populate(NULL, of_default_bus_match_table,
 			     versatile_auxdata_lookup, NULL);
 }
diff --git a/arch/arm/mach-vexpress/Makefile b/arch/arm/mach-vexpress/Makefile
index f5c1006..73caae7 100644
--- a/arch/arm/mach-vexpress/Makefile
+++ b/arch/arm/mach-vexpress/Makefile
@@ -4,7 +4,7 @@
 ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := \
 	-I$(srctree)/arch/arm/plat-versatile/include
 
-obj-y					:= v2m.o
+obj-$(CONFIG_ARCH_VEXPRESS)		:= v2m.o
 obj-$(CONFIG_ARCH_VEXPRESS_DCSCB)	+= dcscb.o	dcscb_setup.o
 CFLAGS_dcscb.o				+= -march=armv7-a
 CFLAGS_REMOVE_dcscb.o			= -pg
@@ -15,3 +15,5 @@
 CFLAGS_REMOVE_tc2_pm.o			= -pg
 obj-$(CONFIG_SMP)			+= platsmp.o
 obj-$(CONFIG_HOTPLUG_CPU)		+= hotplug.o
+
+obj-$(CONFIG_ARCH_MPS2)			+= v2m-mps2.o
diff --git a/arch/arm/mach-vexpress/Makefile.boot b/arch/arm/mach-vexpress/Makefile.boot
new file mode 100644
index 0000000..eacfc3f
--- /dev/null
+++ b/arch/arm/mach-vexpress/Makefile.boot
@@ -0,0 +1,3 @@
+# Empty file waiting for deletion once Makefile.boot isn't needed any more.
+# Patch waits for application at
+# http://www.arm.linux.org.uk/developer/patches/viewpatch.php?id=7889/1 .
diff --git a/arch/arm/mach-vexpress/v2m-mps2.c b/arch/arm/mach-vexpress/v2m-mps2.c
new file mode 100644
index 0000000..e7ad9c2
--- /dev/null
+++ b/arch/arm/mach-vexpress/v2m-mps2.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2015 ARM Limited
+ *
+ * Author: Vladimir Murzin <vladimir.murzin@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <asm/mach/arch.h>
+
+static const char *const mps2_compat[] __initconst = {
+	"arm,mps2",
+	NULL
+};
+
+DT_MACHINE_START(MPS2DT, "MPS2 (Device Tree Support)")
+	.dt_compat = mps2_compat,
+MACHINE_END
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index 860ffb6..da876d2 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -110,7 +110,6 @@
  */
 static void __init zynq_init_machine(void)
 {
-	struct platform_device_info devinfo = { .name = "cpufreq-dt", };
 	struct soc_device_attribute *soc_dev_attr;
 	struct soc_device *soc_dev;
 	struct device *parent = NULL;
@@ -145,7 +144,6 @@
 	of_platform_populate(NULL, of_default_bus_match_table, NULL, parent);
 
 	platform_device_register(&zynq_cpuidle_device);
-	platform_device_register_full(&devinfo);
 }
 
 static void __init zynq_timer_init(void)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index deac58d..5c2ca06 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -762,7 +762,8 @@
 	if (!mask)
 		return NULL;
 
-	buf = kzalloc(sizeof(*buf), gfp);
+	buf = kzalloc(sizeof(*buf),
+		      gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
 	if (!buf)
 		return NULL;
 
@@ -2214,7 +2215,7 @@
 }
 
 static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
-				    struct iommu_ops *iommu)
+				    const struct iommu_ops *iommu)
 {
 	struct dma_iommu_mapping *mapping;
 
@@ -2252,7 +2253,7 @@
 #else
 
 static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
-				    struct iommu_ops *iommu)
+				    const struct iommu_ops *iommu)
 {
 	return false;
 }
@@ -2269,7 +2270,7 @@
 }
 
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-			struct iommu_ops *iommu, bool coherent)
+			const struct iommu_ops *iommu, bool coherent)
 {
 	struct dma_map_ops *dma_ops;
 
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index d0ba3551..3cced84 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -235,7 +235,7 @@
 	 */
 	if (mapping && cache_is_vipt_aliasing())
 		flush_pfn_alias(page_to_pfn(page),
-				page->index << PAGE_CACHE_SHIFT);
+				page->index << PAGE_SHIFT);
 }
 
 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
@@ -250,7 +250,7 @@
 	 *   data in the current VM view associated with this page.
 	 * - aliasing VIPT: we only need to find one mapping of this page.
 	 */
-	pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+	pgoff = page->index;
 
 	flush_dcache_mmap_lock(mapping);
 	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 1dd1093..d5805e4 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -87,7 +87,6 @@
 /* MPU initialisation functions */
 void __init sanity_check_meminfo_mpu(void)
 {
-	int i;
 	phys_addr_t phys_offset = PHYS_OFFSET;
 	phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size;
 	struct memblock_region *reg;
@@ -110,11 +109,13 @@
 		} else {
 			/*
 			 * memblock auto merges contiguous blocks, remove
-			 * all blocks afterwards
+			 * all blocks afterwards in one go (we can't remove
+			 * blocks separately while iterating)
 			 */
 			pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
-				  &mem_start, &reg->base);
-			memblock_remove(reg->base, reg->size);
+				  &mem_end, &reg->base);
+			memblock_remove(reg->base, 0 - reg->base);
+			break;
 		}
 	}
 
@@ -144,7 +145,7 @@
 		pr_warn("Truncating memory from %pa to %pa (MPU region constraints)",
 				&specified_mem_size, &aligned_region_size);
 		memblock_remove(mem_start + aligned_region_size,
-				specified_mem_size - aligned_round_size);
+				specified_mem_size - aligned_region_size);
 
 		mem_end = mem_start + aligned_region_size;
 	}
@@ -261,7 +262,7 @@
 		return;
 
 	region_err = mpu_setup_region(MPU_RAM_REGION, PHYS_OFFSET,
-					ilog2(meminfo.bank[0].size),
+					ilog2(memblock.memory.regions[0].size),
 					MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL);
 	if (region_err) {
 		panic("MPU region initialization failure! %d", region_err);
@@ -285,7 +286,7 @@
 	 * some architectures which the DRAM is the exception vector to trap,
 	 * alloc_page breaks with error, although it is not NULL, but "0."
 	 */
-	memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE);
+	memblock_reserve(CONFIG_VECTORS_BASE, 2 * PAGE_SIZE);
 #else /* ifndef CONFIG_CPU_V7M */
 	/*
 	 * There is no dedicated vector page on V7-M. So nothing needs to be
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 0f8963a..6fcaac8 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -281,12 +281,12 @@
 	bl      v7_invalidate_l1
 	ldmia	r12, {r1-r6, lr}
 #ifdef CONFIG_SMP
+	orr	r10, r10, #(1 << 6)		@ Enable SMP/nAMP mode
 	ALT_SMP(mrc	p15, 0, r0, c1, c0, 1)
-	ALT_UP(mov	r0, #(1 << 6))		@ fake it for UP
-	tst	r0, #(1 << 6)			@ SMP/nAMP mode enabled?
-	orreq	r0, r0, #(1 << 6)		@ Enable SMP/nAMP mode
-	orreq	r0, r0, r10			@ Enable CPU-specific SMP bits
-	mcreq	p15, 0, r0, c1, c0, 1
+	ALT_UP(mov	r0, r10)		@ fake it for UP
+	orr	r10, r10, r0			@ Set required bits
+	teq	r10, r0				@ Were they already set?
+	mcrne	p15, 0, r10, c1, c0, 1		@ No, update register
 #endif
 	b	__v7_setup_cont
 
diff --git a/arch/arm/plat-samsung/include/plat/map-s5p.h b/arch/arm/plat-samsung/include/plat/map-s5p.h
index 4ec9a70..b63aeeb 100644
--- a/arch/arm/plat-samsung/include/plat/map-s5p.h
+++ b/arch/arm/plat-samsung/include/plat/map-s5p.h
@@ -18,7 +18,6 @@
 
 #define S5P_VA_DMC0		S3C_ADDR(0x02440000)
 #define S5P_VA_DMC1		S3C_ADDR(0x02480000)
-#define S5P_VA_SROMC		S3C_ADDR(0x024C0000)
 
 #define S5P_VA_COREPERI_BASE	S3C_ADDR(0x02800000)
 #define S5P_VA_COREPERI(x)	(S5P_VA_COREPERI_BASE + (x))
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 4f43622..76747d9 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -11,6 +11,7 @@
 	select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
 	select ARCH_USE_CMPXCHG_LOCKREF
 	select ARCH_SUPPORTS_ATOMIC_RMW
+	select ARCH_SUPPORTS_NUMA_BALANCING
 	select ARCH_WANT_OPTIONAL_GPIOLIB
 	select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
 	select ARCH_WANT_FRAME_POINTERS
@@ -58,11 +59,14 @@
 	select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
 	select HAVE_ARCH_SECCOMP_FILTER
 	select HAVE_ARCH_TRACEHOOK
-	select HAVE_BPF_JIT
+	select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+	select HAVE_ARM_SMCCC
+	select HAVE_EBPF_JIT
 	select HAVE_C_RECORDMCOUNT
 	select HAVE_CC_STACKPROTECTOR
 	select HAVE_CMPXCHG_DOUBLE
 	select HAVE_CMPXCHG_LOCAL
+	select HAVE_CONTEXT_TRACKING
 	select HAVE_DEBUG_BUGVERBOSE
 	select HAVE_DEBUG_KMEMLEAK
 	select HAVE_DMA_API_DEBUG
@@ -76,6 +80,7 @@
 	select HAVE_HW_BREAKPOINT if PERF_EVENTS
 	select HAVE_IRQ_TIME_ACCOUNTING
 	select HAVE_MEMBLOCK
+	select HAVE_MEMBLOCK_NODE_MAP if NUMA
 	select HAVE_PATA_PLATFORM
 	select HAVE_PERF_EVENTS
 	select HAVE_PERF_REGS
@@ -89,15 +94,13 @@
 	select NO_BOOTMEM
 	select OF
 	select OF_EARLY_FLATTREE
+	select OF_NUMA if NUMA && OF
 	select OF_RESERVED_MEM
 	select PERF_USE_VMALLOC
 	select POWER_RESET
 	select POWER_SUPPLY
-	select RTC_LIB
 	select SPARSE_IRQ
 	select SYSCTL_EXCEPTION_TRACE
-	select HAVE_CONTEXT_TRACKING
-	select HAVE_ARM_SMCCC
 	help
 	  ARM 64-bit (AArch64) Linux support.
 
@@ -546,10 +549,35 @@
 	  Say Y here to experiment with turning CPUs off and on.  CPUs
 	  can be controlled through /sys/devices/system/cpu.
 
+# Common NUMA Features
+config NUMA
+	bool "Numa Memory Allocation and Scheduler Support"
+	depends on SMP
+	help
+	  Enable NUMA (Non Uniform Memory Access) support.
+
+	  The kernel will try to allocate memory used by a CPU on the
+	  local memory of the CPU and add some more
+	  NUMA awareness to the kernel.
+
+config NODES_SHIFT
+	int "Maximum NUMA Nodes (as a power of 2)"
+	range 1 10
+	default "2"
+	depends on NEED_MULTIPLE_NODES
+	help
+	  Specify the maximum number of NUMA Nodes available on the target
+	  system.  Increases memory reserved to accommodate various tables.
+
+config USE_PERCPU_NUMA_NODE_ID
+	def_bool y
+	depends on NUMA
+
 source kernel/Kconfig.preempt
 source kernel/Kconfig.hz
 
 config ARCH_SUPPORTS_DEBUG_PAGEALLOC
+	depends on !HIBERNATION
 	def_bool y
 
 config ARCH_HAS_HOLES_MEMORYMODEL
@@ -578,9 +606,6 @@
 config ARCH_WANT_HUGE_PMD_SHARE
 	def_bool y if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
 
-config HAVE_ARCH_TRANSPARENT_HUGEPAGE
-	def_bool y
-
 config ARCH_HAS_CACHE_LINE_SIZE
 	def_bool y
 
@@ -953,6 +978,14 @@
 
 source "kernel/power/Kconfig"
 
+config ARCH_HIBERNATION_POSSIBLE
+	def_bool y
+	depends on CPU_PM
+
+config ARCH_HIBERNATION_HEADER
+	def_bool y
+	depends on HIBERNATION
+
 config ARCH_SUSPEND_POSSIBLE
 	def_bool y
 
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 7e76845..710fde4 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -59,7 +59,7 @@
 	  If in doubt, say Y
 
 config DEBUG_ALIGN_RODATA
-	depends on DEBUG_RODATA && ARM64_4K_PAGES
+	depends on DEBUG_RODATA
 	bool "Align linker sections up to SECTION_SIZE"
 	help
 	  If this option is enabled, sections that may potentially be marked as
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index efa77c1..7ef1d05 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -2,6 +2,7 @@
 
 config ARCH_SUNXI
 	bool "Allwinner sunxi 64-bit SoC Family"
+	select GENERIC_IRQ_CHIP
 	help
 	  This enables support for Allwinner sunxi based SoCs like the A64.
 
@@ -43,8 +44,14 @@
 	help
 	  This enables support for the Freescale Layerscape SoC family.
 
+config ARCH_LG1K
+	bool "LG Electronics LG1K SoC Family"
+	help
+	  This enables support for LG Electronics LG1K SoC Family
+
 config ARCH_HISI
 	bool "Hisilicon SoC Family"
+	select ARM_TIMER_SP804
 	select HISILICON_IRQ_MBIGEN
 	help
 	  This enables support for Hisilicon ARMv8 SoC family
@@ -64,8 +71,8 @@
 
 config ARCH_MVEBU
 	bool "Marvell EBU SoC Family"
-	select ARMADA_AP806_CORE_CLK
-	select ARMADA_AP806_RING_CLK
+	select ARMADA_AP806_SYSCON
+	select ARMADA_CP110_SYSCON
 	select MVEBU_ODMI
 	help
 	  This enables support for Marvell EBU familly, including:
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index 330fae9..6e199c9 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -18,6 +18,7 @@
 dts-dirs += socionext
 dts-dirs += sprd
 dts-dirs += xilinx
+dts-dirs += lg
 
 subdir-y	:= $(dts-dirs)
 
diff --git a/arch/arm64/boot/dts/amlogic/Makefile b/arch/arm64/boot/dts/amlogic/Makefile
index eb672f3..47ec703 100644
--- a/arch/arm64/boot/dts/amlogic/Makefile
+++ b/arch/arm64/boot/dts/amlogic/Makefile
@@ -1,3 +1,6 @@
+dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-odroidc2.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-p200.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-p201.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-vega-s95-pro.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-vega-s95-meta.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-vega-s95-telos.dtb
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
new file mode 100644
index 0000000..7f2c674
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2016 Andreas Färber
+ * Copyright (c) 2016 BayLibre, Inc.
+ * Author: Kevin Hilman <khilman@kernel.org>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "meson-gxbb.dtsi"
+
+/ {
+	compatible = "hardkernel,odroid-c2", "amlogic,meson-gxbb";
+	model = "Hardkernel ODROID-C2";
+	
+	aliases {
+		serial0 = &uart_AO;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory@0 {
+		device_type = "memory";
+		reg = <0x0 0x0 0x0 0x80000000>;
+	};
+};
+
+&uart_AO {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts
new file mode 100644
index 0000000..6297907
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2016 Andreas Färber
+ * Copyright (c) 2016 BayLibre, Inc.
+ * Author: Kevin Hilman <khilman@kernel.org>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "meson-gxbb-p20x.dtsi"
+
+/ {
+	compatible = "amlogic,p200", "amlogic,meson-gxbb";
+	model = "Amlogic Meson GXBB P200 Development Board";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p201.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-p201.dts
new file mode 100644
index 0000000..39bb037
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p201.dts
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2016 Andreas Färber
+ * Copyright (c) 2016 BayLibre, Inc.
+ * Author: Kevin Hilman <khilman@kernel.org>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "meson-gxbb-p20x.dtsi"
+
+/ {
+	compatible = "amlogic,p201", "amlogic,meson-gxbb";
+	model = "Amlogic Meson GXBB P201 Development Board";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
new file mode 100644
index 0000000..bf7ff1d
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2016 Andreas Färber
+ * Copyright (c) 2016 BayLibre, Inc.
+ * Author: Kevin Hilman <khilman@kernel.org>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "meson-gxbb.dtsi"
+
+/ {
+	aliases {
+		serial0 = &uart_AO;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory@0 {
+		device_type = "memory";
+		reg = <0x0 0x0 0x0 0x40000000>;
+	};
+};
+
+/* This UART is brought out to the DB9 connector */
+&uart_AO {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95-meta.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95-meta.dts
index 399aff9..62fb496 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95-meta.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95-meta.dts
@@ -48,7 +48,7 @@
 	compatible = "tronsmart,vega-s95-meta", "tronsmart,vega-s95", "amlogic,meson-gxbb";
 	model = "Tronsmart Vega S95 Meta";
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0x0 0x0 0x0 0x80000000>;
 	};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95-pro.dts
index ac5a241..9a9663a 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95-pro.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95-pro.dts
@@ -48,7 +48,7 @@
 	compatible = "tronsmart,vega-s95-pro", "tronsmart,vega-s95", "amlogic,meson-gxbb";
 	model = "Tronsmart Vega S95 Pro";
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0x0 0x0 0x0 0x40000000>;
 	};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95-telos.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95-telos.dts
index fff7bfa..2fe167b 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95-telos.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95-telos.dts
@@ -48,7 +48,7 @@
 	compatible = "tronsmart,vega-s95-telos", "tronsmart,vega-s95", "amlogic,meson-gxbb";
 	model = "Tronsmart Vega S95 Telos";
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0x0 0x0 0x0 0x80000000>;
 	};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
index c1fa266..012cdcc 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
@@ -45,6 +45,10 @@
 / {
 	compatible = "tronsmart,vega-s95", "amlogic,meson-gxbb";
 
+	aliases {
+		serial0 = &uart_AO;
+	};
+
 	chosen {
 		stdout-path = "serial0:115200n8";
 	};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
index eaa0a45..832815d 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
@@ -50,11 +50,6 @@
 	#address-cells = <2>;
 	#size-cells = <2>;
 
-	aliases {
-		serial0 = &uart_AO;
-		serial1 = &uart_A;
-	};
-
 	cpus {
 		#address-cells = <0x2>;
 		#size-cells = <0x0>;
diff --git a/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi b/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi
index a055a5d..c569f76 100644
--- a/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi
@@ -543,7 +543,7 @@
 		};
 
 		sata1: sata@1a000000 {
-			compatible = "apm,xgene-ahci";
+			compatible = "apm,xgene-ahci-v2";
 			reg = <0x0 0x1a000000 0x0 0x1000>,
 			      <0x0 0x1f200000 0x0 0x1000>,
 			      <0x0 0x1f20d000 0x0 0x1000>,
@@ -553,7 +553,7 @@
 		};
 
 		sata2: sata@1a200000 {
-			compatible = "apm,xgene-ahci";
+			compatible = "apm,xgene-ahci-v2";
 			reg = <0x0 0x1a200000 0x0 0x1000>,
 			      <0x0 0x1f210000 0x0 0x1000>,
 			      <0x0 0x1f21d000 0x0 0x1000>,
@@ -563,7 +563,7 @@
 		};
 
 		sata3: sata@1a400000 {
-			compatible = "apm,xgene-ahci";
+			compatible = "apm,xgene-ahci-v2";
 			reg = <0x0 0x1a400000 0x0 0x1000>,
 			      <0x0 0x1f220000 0x0 0x1000>,
 			      <0x0 0x1f22d000 0x0 0x1000>,
@@ -653,6 +653,7 @@
 				     <0 113 4>,
 				     <0 114 4>,
 				     <0 115 4>;
+			channel = <12>;
 			port-id = <1>;
 			dma-coherent;
 			clocks = <&xge1clk 0>;
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index ae4a173..5147d76 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -993,6 +993,7 @@
 				     <0x0 0x65 0x4>,
 				     <0x0 0x66 0x4>,
 				     <0x0 0x67 0x4>;
+			channel = <0>;
 			dma-coherent;
 			clocks = <&xge0clk 0>;
 			/* mac address will be overwritten by the bootloader */
diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi
index 68ccc39..dee2386 100644
--- a/arch/arm64/boot/dts/arm/juno-base.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-base.dtsi
@@ -272,3 +272,13 @@
 
 		/include/ "juno-motherboard.dtsi"
 	};
+
+	site2: tlx@60000000 {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0 0 0x60000000 0x10000000>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 0>;
+		interrupt-map = <0 0 &gic 0 0 0 168 IRQ_TYPE_LEVEL_HIGH>;
+	};
diff --git a/arch/arm64/boot/dts/broadcom/ns2-clock.dtsi b/arch/arm64/boot/dts/broadcom/ns2-clock.dtsi
new file mode 100644
index 0000000..99009fd
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/ns2-clock.dtsi
@@ -0,0 +1,105 @@
+/*
+ *  BSD LICENSE
+ *
+ *  Copyright (c) 2016 Broadcom.  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *
+ *    * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *    * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in
+ *      the documentation and/or other materials provided with the
+ *      distribution.
+ *    * Neither the name of Broadcom Corporation nor the names of its
+ *      contributors may be used to endorse or promote products derived
+ *      from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <dt-bindings/clock/bcm-ns2.h>
+
+	osc: oscillator {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <25000000>;
+	};
+
+	lcpll_ddr: lcpll_ddr@6501d058 {
+		#clock-cells = <1>;
+		compatible = "brcm,ns2-lcpll-ddr";
+		reg = <0x6501d058 0x20>,
+		      <0x6501c020 0x4>,
+		      <0x6501d04c 0x4>;
+		clocks = <&osc>;
+		clock-output-names = "lcpll_ddr", "pcie_sata_usb",
+				     "ddr", "ddr_ch2_unused",
+				     "ddr_ch3_unused", "ddr_ch4_unused",
+				     "ddr_ch5_unused";
+	};
+
+	lcpll_ports: lcpll_ports@6501d078 {
+		#clock-cells = <1>;
+		compatible = "brcm,ns2-lcpll-ports";
+		reg = <0x6501d078 0x20>,
+		      <0x6501c020 0x4>,
+		      <0x6501d054 0x4>;
+		clocks = <&osc>;
+		clock-output-names = "lcpll_ports", "wan", "rgmii",
+				     "ports_ch2_unused",
+				     "ports_ch3_unused",
+				     "ports_ch4_unused",
+				     "ports_ch5_unused";
+	};
+
+	genpll_scr: genpll_scr@6501d098 {
+		#clock-cells = <1>;
+		compatible = "brcm,ns2-genpll-scr";
+		reg = <0x6501d098 0x32>,
+		      <0x6501c020 0x4>,
+		      <0x6501d044 0x4>;
+		clocks = <&osc>;
+		clock-output-names = "genpll_scr", "scr", "fs",
+				     "audio_ref", "scr_ch3_unused",
+				     "scr_ch4_unused", "scr_ch5_unused";
+	};
+
+	iprocmed: iprocmed {
+		#clock-cells = <0>;
+		compatible = "fixed-factor-clock";
+		clocks = <&genpll_scr BCM_NS2_GENPLL_SCR_SCR_CLK>;
+		clock-div = <2>;
+		clock-mult = <1>;
+	};
+
+	iprocslow: iprocslow {
+		#clock-cells = <0>;
+		compatible = "fixed-factor-clock";
+		clocks = <&genpll_scr BCM_NS2_GENPLL_SCR_SCR_CLK>;
+		clock-div = <4>;
+		clock-mult = <1>;
+	};
+
+	genpll_sw: genpll_sw@6501d0c4 {
+		#clock-cells = <1>;
+		compatible = "brcm,ns2-genpll-sw";
+		reg = <0x6501d0c4 0x32>,
+		      <0x6501c020 0x4>,
+		      <0x6501d044 0x4>;
+		clocks = <&osc>;
+		clock-output-names = "genpll_sw", "rpe", "250", "nic",
+				     "chimp", "port", "sdio";
+	};
diff --git a/arch/arm64/boot/dts/broadcom/ns2-svk.dts b/arch/arm64/boot/dts/broadcom/ns2-svk.dts
index ce0ab84..54ca40c 100644
--- a/arch/arm64/boot/dts/broadcom/ns2-svk.dts
+++ b/arch/arm64/boot/dts/broadcom/ns2-svk.dts
@@ -72,6 +72,51 @@
 	status = "ok";
 };
 
+&ssp0 {
+	status = "ok";
+
+	slic@0 {
+		compatible = "silabs,si3226x";
+		reg = <0>;
+		spi-max-frequency = <5000000>;
+		spi-cpha = <1>;
+		spi-cpol = <1>;
+		pl022,hierarchy = <0>;
+		pl022,interface = <0>;
+		pl022,slave-tx-disable = <0>;
+		pl022,com-mode = <0>;
+		pl022,rx-level-trig = <1>;
+		pl022,tx-level-trig = <1>;
+		pl022,ctrl-len = <11>;
+		pl022,wait-state = <0>;
+		pl022,duplex = <0>;
+	};
+};
+
+&ssp1 {
+	status = "ok";
+
+	at25@0 {
+		compatible = "atmel,at25";
+		reg = <0>;
+		spi-max-frequency = <5000000>;
+		at25,byte-len = <0x8000>;
+		at25,addr-mode = <2>;
+		at25,page-size = <64>;
+		spi-cpha = <1>;
+		spi-cpol = <1>;
+		pl022,hierarchy = <0>;
+		pl022,interface = <0>;
+		pl022,slave-tx-disable = <0>;
+		pl022,com-mode = <0>;
+		pl022,rx-level-trig = <1>;
+		pl022,tx-level-trig = <1>;
+		pl022,ctrl-len = <11>;
+		pl022,wait-state = <0>;
+		pl022,duplex = <0>;
+	};
+};
+
 &sdio0 {
 	status = "ok";
 };
diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/ns2.dtsi
index 6f81c9d..ec68ec1 100644
--- a/arch/arm64/boot/dts/broadcom/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi
@@ -1,7 +1,7 @@
 /*
  *  BSD LICENSE
  *
- *  Copyright(c) 2015 Broadcom Corporation.  All rights reserved.
+ *  Copyright (c) 2015 Broadcom.  All rights reserved.
  *
  *  Redistribution and use in source and binary forms, with or without
  *  modification, are permitted provided that the following conditions
@@ -33,8 +33,6 @@
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/clock/bcm-ns2.h>
 
-/memreserve/ 0x84b00000 0x00000008;
-
 / {
 	compatible = "brcm,ns2";
 	interrupt-parent = <&gic>;
@@ -49,8 +47,7 @@
 			device_type = "cpu";
 			compatible = "arm,cortex-a57", "arm,armv8";
 			reg = <0 0>;
-			enable-method = "spin-table";
-			cpu-release-addr = <0 0x84b00000>;
+			enable-method = "psci";
 			next-level-cache = <&CLUSTER0_L2>;
 		};
 
@@ -58,8 +55,7 @@
 			device_type = "cpu";
 			compatible = "arm,cortex-a57", "arm,armv8";
 			reg = <0 1>;
-			enable-method = "spin-table";
-			cpu-release-addr = <0 0x84b00000>;
+			enable-method = "psci";
 			next-level-cache = <&CLUSTER0_L2>;
 		};
 
@@ -67,8 +63,7 @@
 			device_type = "cpu";
 			compatible = "arm,cortex-a57", "arm,armv8";
 			reg = <0 2>;
-			enable-method = "spin-table";
-			cpu-release-addr = <0 0x84b00000>;
+			enable-method = "psci";
 			next-level-cache = <&CLUSTER0_L2>;
 		};
 
@@ -76,8 +71,7 @@
 			device_type = "cpu";
 			compatible = "arm,cortex-a57", "arm,armv8";
 			reg = <0 3>;
-			enable-method = "spin-table";
-			cpu-release-addr = <0 0x84b00000>;
+			enable-method = "psci";
 			next-level-cache = <&CLUSTER0_L2>;
 		};
 
@@ -86,6 +80,11 @@
 		};
 	};
 
+	psci {
+		compatible = "arm,psci-1.0";
+		method = "smc";
+	};
+
 	timer {
 		compatible = "arm,armv8-timer";
 		interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(0xff) |
@@ -110,33 +109,6 @@
 				     <&A57_3>;
 	};
 
-	clocks {
-		#address-cells = <1>;
-		#size-cells = <1>;
-
-		osc: oscillator {
-			#clock-cells = <0>;
-			compatible = "fixed-clock";
-			clock-frequency = <25000000>;
-		};
-
-		iprocmed: iprocmed {
-			#clock-cells = <0>;
-			compatible = "fixed-factor-clock";
-			clocks = <&genpll_scr BCM_NS2_GENPLL_SCR_SCR_CLK>;
-			clock-div = <2>;
-			clock-mult = <1>;
-		};
-
-		iprocslow: iprocslow {
-			#clock-cells = <0>;
-			compatible = "fixed-factor-clock";
-			clocks = <&genpll_scr BCM_NS2_GENPLL_SCR_SCR_CLK>;
-			clock-div = <4>;
-			clock-mult = <1>;
-		};
-	};
-
 	pcie0: pcie@20020000 {
 		compatible = "brcm,iproc-pcie";
 		reg = <0 0x20020000 0 0x1000>;
@@ -217,6 +189,27 @@
 		#size-cells = <1>;
 		ranges = <0 0 0 0xffffffff>;
 
+		#include "ns2-clock.dtsi"
+
+		dma0: dma@61360000 {
+			compatible = "arm,pl330", "arm,primecell";
+			reg = <0x61360000 0x1000>;
+			interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>;
+			#dma-cells = <1>;
+			#dma-channels = <8>;
+			#dma-requests = <32>;
+			clocks = <&iprocslow>;
+			clock-names = "apb_pclk";
+		};
+
 		smmu: mmu@64000000 {
 			compatible = "arm,mmu-500";
 			reg = <0x64000000 0x40000>;
@@ -258,68 +251,6 @@
 			mmu-masters;
 		};
 
-		lcpll_ddr: lcpll_ddr@6501d058 {
-			#clock-cells = <1>;
-			compatible = "brcm,ns2-lcpll-ddr";
-			reg = <0x6501d058 0x20>,
-			      <0x6501c020 0x4>,
-			      <0x6501d04c 0x4>;
-			clocks = <&osc>;
-			clock-output-names = "lcpll_ddr", "pcie_sata_usb",
-					     "ddr", "ddr_ch2_unused",
-					     "ddr_ch3_unused", "ddr_ch4_unused",
-					     "ddr_ch5_unused";
-		};
-
-		lcpll_ports: lcpll_ports@6501d078 {
-			#clock-cells = <1>;
-			compatible = "brcm,ns2-lcpll-ports";
-			reg = <0x6501d078 0x20>,
-			      <0x6501c020 0x4>,
-			      <0x6501d054 0x4>;
-			clocks = <&osc>;
-			clock-output-names = "lcpll_ports", "wan", "rgmii",
-					     "ports_ch2_unused",
-					     "ports_ch3_unused",
-					     "ports_ch4_unused",
-					     "ports_ch5_unused";
-		};
-
-		genpll_scr: genpll_scr@6501d098 {
-			#clock-cells = <1>;
-			compatible = "brcm,ns2-genpll-scr";
-			reg = <0x6501d098 0x32>,
-			      <0x6501c020 0x4>,
-			      <0x6501d044 0x4>;
-			clocks = <&osc>;
-			clock-output-names = "genpll_scr", "scr", "fs",
-					     "audio_ref", "scr_ch3_unused",
-					     "scr_ch4_unused", "scr_ch5_unused";
-		};
-
-		genpll_sw: genpll_sw@6501d0c4 {
-			#clock-cells = <1>;
-			compatible = "brcm,ns2-genpll-sw";
-			reg = <0x6501d0c4 0x32>,
-			      <0x6501c020 0x4>,
-			      <0x6501d044 0x4>;
-			clocks = <&osc>;
-			clock-output-names = "genpll_sw", "rpe", "250", "nic",
-					     "chimp", "port", "sdio";
-		};
-
-		crmu: crmu@65024000 {
-			compatible = "syscon";
-			reg = <0x65024000 0x100>;
-		};
-
-		reboot@65024000 {
-			compatible ="syscon-reboot";
-			regmap = <&crmu>;
-			offset = <0x90>;
-			mask = <0xfffffffd>;
-		};
-
 		gic: interrupt-controller@65210000 {
 			compatible = "arm,gic-400";
 			#interrupt-cells = <3>;
@@ -328,6 +259,8 @@
 			      <0x65220000 0x1000>,
 			      <0x65240000 0x2000>,
 			      <0x65260000 0x1000>;
+			interrupts = <GIC_PPI 9 (GIC_CPU_MASK_RAW(0xf) |
+				      IRQ_TYPE_LEVEL_HIGH)>;
 		};
 
 		timer0: timer@66030000 {
@@ -408,6 +341,28 @@
 			status = "disabled";
 		};
 
+		ssp0: ssp@66180000 {
+			compatible = "arm,pl022", "arm,primecell";
+			reg = <0x66180000 0x1000>;
+			interrupts = <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&iprocslow>, <&iprocslow>;
+			clock-names = "spiclk", "apb_pclk";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		ssp1: ssp@66190000 {
+			compatible = "arm,pl022", "arm,primecell";
+			reg = <0x66190000 0x1000>;
+			interrupts = <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&iprocslow>, <&iprocslow>;
+			clock-names = "spiclk", "apb_pclk";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
 		hwrng: hwrng@66220000 {
 			compatible = "brcm,iproc-rng200";
 			reg = <0x66220000 0x28>;
diff --git a/arch/arm64/boot/dts/broadcom/vulcan.dtsi b/arch/arm64/boot/dts/broadcom/vulcan.dtsi
index c49b5a8..34e11a9 100644
--- a/arch/arm64/boot/dts/broadcom/vulcan.dtsi
+++ b/arch/arm64/boot/dts/broadcom/vulcan.dtsi
@@ -86,7 +86,7 @@
 	};
 
 	pmu {
-		compatible = "arm,armv8-pmuv3";
+		compatible = "brcm,vulcan-pmu", "arm,armv8-pmuv3";
 		interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>; /* PMU overflow */
 	};
 
@@ -108,12 +108,15 @@
 		reg = <0x0 0x30000000  0x0 0x10000000>;
 		reg-names = "PCI ECAM";
 
-			  /* IO 0x4000_0000 - 0x4001_0000 */
-		ranges = <0x01000000 0 0x40000000 0 0x40000000 0 0x00010000
-			  /* MEM 0x4800_0000 - 0x5000_0000 */
-			  0x02000000 0 0x48000000 0 0x48000000 0 0x08000000
-			  /* MEM64 pref 0x6_0000_0000 - 0x7_0000_0000 */
-			  0x43000000 6 0x00000000 6 0x00000000 1 0x00000000>;
+		/*
+		 * PCI ranges:
+		 *   IO		no supported
+		 *   MEM        0x4000_0000 - 0x6000_0000
+		 *   MEM64 pref 0x40_0000_0000 - 0x60_0000_0000
+		 */
+		ranges =
+		  <0x02000000    0 0x40000000    0 0x40000000    0 0x20000000
+		   0x43000000 0x40 0x00000000 0x40 0x00000000 0x20 0x00000000>;
 		interrupt-map-mask = <0 0 0 7>;
 		interrupt-map =
 		      /* addr  pin  ic   icaddr  icintr */
diff --git a/arch/arm64/boot/dts/exynos/exynos7-tmu-sensor-conf.dtsi b/arch/arm64/boot/dts/exynos/exynos7-tmu-sensor-conf.dtsi
new file mode 100644
index 0000000..1d6dcf2
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/exynos7-tmu-sensor-conf.dtsi
@@ -0,0 +1,25 @@
+/*
+ * Device tree sources for Exynos7 TMU sensor configuration
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <dt-bindings/thermal/thermal_exynos.h>
+
+#thermal-sensor-cells = <0>;
+samsung,tmu_gain = <9>;
+samsung,tmu_reference_voltage = <17>;
+samsung,tmu_noise_cancel_mode = <4>;
+samsung,tmu_efuse_value = <75>;
+samsung,tmu_min_efuse_value = <15>;
+samsung,tmu_max_efuse_value = <100>;
+samsung,tmu_first_point_trim = <25>;
+samsung,tmu_second_point_trim = <85>;
+samsung,tmu_default_temp_offset = <50>;
+samsung,tmu_cal_type = <TYPE_ONE_POINT_TRIMMING>;
diff --git a/arch/arm64/boot/dts/exynos/exynos7-trip-points.dtsi b/arch/arm64/boot/dts/exynos/exynos7-trip-points.dtsi
new file mode 100644
index 0000000..0623583
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/exynos7-trip-points.dtsi
@@ -0,0 +1,54 @@
+/*
+ * Device tree sources for default Exynos7 thermal zone definition
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+trips {
+	cpu-alert-0 {
+		temperature = <75000>; /* millicelsius */
+		hysteresis = <10000>; /* millicelsius */
+		type = "passive";
+	};
+	cpu-alert-1 {
+		temperature = <80000>; /* millicelsius */
+		hysteresis = <10000>; /* millicelsius */
+		type = "passive";
+	};
+	cpu-alert-2 {
+		temperature = <85000>; /* millicelsius */
+		hysteresis = <10000>; /* millicelsius */
+		type = "passive";
+	};
+	cpu-alert-3 {
+		temperature = <90000>; /* millicelsius */
+		hysteresis = <10000>; /* millicelsius */
+		type = "passive";
+	};
+	cpu-alert-4 {
+		temperature = <95000>; /* millicelsius */
+		hysteresis = <10000>; /* millicelsius */
+		type = "passive";
+	};
+	cpu-alert-5 {
+		temperature = <100000>; /* millicelsius */
+		hysteresis = <10000>; /* millicelsius */
+		type = "passive";
+	};
+	cpu-alert-6 {
+		temperature = <110000>; /* millicelsius */
+		hysteresis = <10000>; /* millicelsius */
+		type = "passive";
+	};
+	cpu-crit-0 {
+		temperature = <115000>; /* millicelsius */
+		hysteresis = <0>; /* millicelsius */
+		type = "critical";
+	};
+};
diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi
index 93108f1..ca663df 100644
--- a/arch/arm64/boot/dts/exynos/exynos7.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi
@@ -27,6 +27,7 @@
 		pinctrl6 = &pinctrl_fsys0;
 		pinctrl7 = &pinctrl_fsys1;
 		pinctrl8 = &pinctrl_bus1;
+		tmuctrl0 = &tmuctrl_0;
 	};
 
 	cpus {
@@ -95,6 +96,35 @@
 				<0x11006000 0x2000>;
 		};
 
+		amba {
+			compatible = "simple-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+
+			pdma0: pdma@10E10000 {
+				compatible = "arm,pl330", "arm,primecell";
+				reg = <0x10E10000 0x1000>;
+				interrupts = <0 225 0>;
+				clocks = <&clock_fsys0 ACLK_PDMA0>;
+				clock-names = "apb_pclk";
+				#dma-cells = <1>;
+				#dma-channels = <8>;
+				#dma-requests = <32>;
+			};
+
+			pdma1: pdma@10EB0000 {
+				compatible = "arm,pl330", "arm,primecell";
+				reg = <0x10EB0000 0x1000>;
+				interrupts = <0 226 0>;
+				clocks = <&clock_fsys0 ACLK_PDMA1>;
+				clock-names = "apb_pclk";
+				#dma-cells = <1>;
+				#dma-channels = <8>;
+				#dma-requests = <32>;
+			};
+		};
+
 		clock_topc: clock-controller@10570000 {
 			compatible = "samsung,exynos7-clock-topc";
 			reg = <0x10570000 0x10000>;
@@ -538,6 +568,25 @@
 			clocks = <&clock_peric0 PCLK_PWM>;
 			clock-names = "timers";
 		};
+
+		tmuctrl_0: tmu@10060000 {
+			compatible = "samsung,exynos7-tmu";
+			reg = <0x10060000 0x200>;
+			interrupts = <0 108 0>;
+			clocks = <&clock_peris PCLK_TMU>,
+				 <&clock_peris SCLK_TMU>;
+			clock-names = "tmu_apbif", "tmu_sclk";
+			#include "exynos7-tmu-sensor-conf.dtsi"
+		};
+
+		thermal-zones {
+			atlas_thermal: cluster0-thermal {
+				polling-delay-passive = <0>; /* milliseconds */
+				polling-delay = <0>; /* milliseconds */
+				thermal-sensors = <&tmuctrl_0>;
+				#include "exynos7-trip-points.dtsi"
+			};
+		};
 	};
 };
 
diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
index f3c2516..1b7783d 100644
--- a/arch/arm64/boot/dts/freescale/Makefile
+++ b/arch/arm64/boot/dts/freescale/Makefile
@@ -1,7 +1,8 @@
+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-qds.dtb
+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-rdb.dtb
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-qds.dtb
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-rdb.dtb
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-simu.dtb
-dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-rdb.dtb
  
 always		:= $(dtb-y)
 subdir-y	:= $(dts-dirs)
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
new file mode 100644
index 0000000..9d3e9fe
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
@@ -0,0 +1,181 @@
+/*
+ * Device Tree Include file for Freescale Layerscape-1043A family SoC.
+ *
+ * Copyright 2014-2015, Freescale Semiconductor
+ *
+ * Mingkai Hu <Mingkai.hu@freescale.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPLv2 or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+/include/ "fsl-ls1043a.dtsi"
+
+/ {
+	model = "LS1043A QDS Board";
+	compatible = "fsl,ls1043a-qds", "fsl,ls1043a";
+
+	aliases {
+		gpio0 = &gpio1;
+		gpio1 = &gpio2;
+		gpio2 = &gpio3;
+		gpio3 = &gpio4;
+		serial0 = &lpuart0;
+		serial1 = &lpuart1;
+		serial2 = &lpuart2;
+		serial3 = &lpuart3;
+		serial4 = &lpuart4;
+		serial5 = &lpuart5;
+	};
+};
+
+&duart0 {
+	status = "okay";
+};
+
+&duart1 {
+	status = "okay";
+};
+
+&ifc {
+	#address-cells = <2>;
+	#size-cells = <1>;
+	/* NOR, NAND Flashes and FPGA on board */
+	ranges = <0x0 0x0 0x0 0x60000000 0x08000000
+		  0x1 0x0 0x0 0x7e800000 0x00010000
+		  0x2 0x0 0x0 0x7fb00000 0x00000100>;
+	status = "okay";
+
+	nor@0,0 {
+		compatible = "cfi-flash";
+		reg = <0x0 0x0 0x8000000>;
+		bank-width = <2>;
+		device-width = <1>;
+	};
+
+	nand@1,0 {
+		compatible = "fsl,ifc-nand";
+		reg = <0x1 0x0 0x10000>;
+	};
+
+	fpga: board-control@2,0 {
+		compatible = "fsl,ls1043aqds-fpga", "fsl,fpga-qixis";
+		reg = <0x2 0x0 0x0000100>;
+	};
+};
+
+&i2c0 {
+	status = "okay";
+
+	pca9547@77 {
+		compatible = "nxp,pca9547";
+		reg = <0x77>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		i2c@0 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0x0>;
+
+			rtc@68 {
+				compatible = "dallas,ds3232";
+				reg = <0x68>;
+				/* IRQ10_B */
+				interrupts = <0 150 0x4>;
+			};
+		};
+
+		i2c@2 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0x2>;
+
+			ina220@40 {
+				compatible = "ti,ina220";
+				reg = <0x40>;
+				shunt-resistor = <1000>;
+			};
+
+			ina220@41 {
+				compatible = "ti,ina220";
+				reg = <0x41>;
+				shunt-resistor = <1000>;
+			};
+		};
+
+		i2c@3 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0x3>;
+
+			eeprom@56 {
+				compatible = "atmel,24c512";
+				reg = <0x56>;
+			};
+
+			eeprom@57 {
+				compatible = "atmel,24c512";
+				reg = <0x57>;
+			};
+
+			temp-sensor@4c {
+				compatible = "adi,adt7461a";
+				reg = <0x4c>;
+			};
+		};
+	};
+};
+
+&lpuart0 {
+	status = "okay";
+};
+
+&qspi {
+	bus-num = <0>;
+	status = "okay";
+
+	qflash0: s25fl128s@0 {
+		compatible = "spansion,m25p80";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		spi-max-frequency = <20000000>;
+		reg = <0>;
+	};
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
index ce23557..f895fc0 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
@@ -107,6 +107,19 @@
 		};
 };
 
+&dspi0 {
+	bus-num = <0>;
+	status = "okay";
+
+	flash@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "n25q128a13", "jedec,spi-nor";  /* 16MB */
+		reg = <0>;
+		spi-max-frequency = <1000000>; /* input clock */
+	};
+};
+
 &duart0 {
 	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index be72bf5..de0323b 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -171,6 +171,20 @@
 			interrupts = <0 43 0x4>;
 		};
 
+		qspi: quadspi@1550000 {
+			compatible = "fsl,ls1043a-qspi", "fsl,ls1021a-qspi";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0x0 0x1550000 0x0 0x10000>,
+				<0x0 0x40000000 0x0 0x4000000>;
+			reg-names = "QuadSPI", "QuadSPI-memory";
+			interrupts = <0 99 0x4>;
+			clock-names = "qspi_en", "qspi";
+			clocks = <&clockgen 4 0>, <&clockgen 4 0>;
+			big-endian;
+			status = "disabled";
+		};
+
 		esdhc: esdhc@1560000 {
 			compatible = "fsl,ls1043a-esdhc", "fsl,esdhc";
 			reg = <0x0 0x1560000 0x0 0x10000>;
@@ -284,7 +298,7 @@
 		};
 
 		gpio1: gpio@2300000 {
-			compatible = "fsl,ls1043a-gpio";
+			compatible = "fsl,ls1043a-gpio", "fsl,qoriq-gpio";
 			reg = <0x0 0x2300000 0x0 0x10000>;
 			interrupts = <0 66 0x4>;
 			gpio-controller;
@@ -294,7 +308,7 @@
 		};
 
 		gpio2: gpio@2310000 {
-			compatible = "fsl,ls1043a-gpio";
+			compatible = "fsl,ls1043a-gpio", "fsl,qoriq-gpio";
 			reg = <0x0 0x2310000 0x0 0x10000>;
 			interrupts = <0 67 0x4>;
 			gpio-controller;
@@ -304,7 +318,7 @@
 		};
 
 		gpio3: gpio@2320000 {
-			compatible = "fsl,ls1043a-gpio";
+			compatible = "fsl,ls1043a-gpio", "fsl,qoriq-gpio";
 			reg = <0x0 0x2320000 0x0 0x10000>;
 			interrupts = <0 68 0x4>;
 			gpio-controller;
@@ -314,7 +328,7 @@
 		};
 
 		gpio4: gpio@2330000 {
-			compatible = "fsl,ls1043a-gpio";
+			compatible = "fsl,ls1043a-gpio", "fsl,qoriq-gpio";
 			reg = <0x0 0x2330000 0x0 0x10000>;
 			interrupts = <0 134 0x4>;
 			gpio-controller;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts
index 4cb996d..e8801fa 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts
@@ -178,7 +178,14 @@
 
 &qspi {
 	status = "okay";
-	qflash0: s25fl008k {
+	flash0: s25fl256s1@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "st,m25p80";
+		spi-max-frequency = <20000000>;
+		reg = <0>;
+	};
+	flash2: s25fl256s1@2 {
 		#address-cells = <1>;
 		#size-cells = <1>;
 		compatible = "st,m25p80";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
index 9d746c6..3187c82 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
@@ -265,6 +265,104 @@
 			compatible = "fsl,qoriq-mc";
 			reg = <0x00000008 0x0c000000 0 0x40>,	 /* MC portal base */
 			      <0x00000000 0x08340000 0 0x40000>; /* MC control reg */
+			msi-parent = <&its>;
+			#address-cells = <3>;
+			#size-cells = <1>;
+
+			/*
+			 * Region type 0x0 - MC portals
+			 * Region type 0x1 - QBMAN portals
+			 */
+			ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000
+				  0x1 0x0 0x0 0x8 0x18000000 0x8000000>;
+
+			/*
+			 * Define the maximum number of MACs present on the SoC.
+			 */
+			dpmacs {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				dpmac1: dpmac@1 {
+					compatible = "fsl,qoriq-mc-dpmac";
+					reg = <0x1>;
+				};
+
+				dpmac2: dpmac@2 {
+					compatible = "fsl,qoriq-mc-dpmac";
+					reg = <0x2>;
+				};
+
+				dpmac3: dpmac@3 {
+					compatible = "fsl,qoriq-mc-dpmac";
+					reg = <0x3>;
+				};
+
+				dpmac4: dpmac@4 {
+					compatible = "fsl,qoriq-mc-dpmac";
+					reg = <0x4>;
+				};
+
+				dpmac5: dpmac@5 {
+					compatible = "fsl,qoriq-mc-dpmac";
+					reg = <0x5>;
+				};
+
+				dpmac6: dpmac@6 {
+					compatible = "fsl,qoriq-mc-dpmac";
+					reg = <0x6>;
+				};
+
+				dpmac7: dpmac@7 {
+					compatible = "fsl,qoriq-mc-dpmac";
+					reg = <0x7>;
+				};
+
+				dpmac8: dpmac@8 {
+					compatible = "fsl,qoriq-mc-dpmac";
+					reg = <0x8>;
+				};
+
+				dpmac9: dpmac@9 {
+					compatible = "fsl,qoriq-mc-dpmac";
+					reg = <0x9>;
+				};
+
+				dpmac10: dpmac@a {
+					compatible = "fsl,qoriq-mc-dpmac";
+					reg = <0xa>;
+				};
+
+				dpmac11: dpmac@b {
+					compatible = "fsl,qoriq-mc-dpmac";
+					reg = <0xb>;
+				};
+
+				dpmac12: dpmac@c {
+					compatible = "fsl,qoriq-mc-dpmac";
+					reg = <0xc>;
+				};
+
+				dpmac13: dpmac@d {
+					compatible = "fsl,qoriq-mc-dpmac";
+					reg = <0xd>;
+				};
+
+				dpmac14: dpmac@e {
+					compatible = "fsl,qoriq-mc-dpmac";
+					reg = <0xe>;
+				};
+
+				dpmac15: dpmac@f {
+					compatible = "fsl,qoriq-mc-dpmac";
+					reg = <0xf>;
+				};
+
+				dpmac16: dpmac@10 {
+					compatible = "fsl,qoriq-mc-dpmac";
+					reg = <0x10>;
+				};
+			};
 		};
 
 		smmu: iommu@5000000 {
@@ -318,7 +416,7 @@
 
 		dspi: dspi@2100000 {
 			status = "disabled";
-			compatible = "fsl,vf610-dspi";
+			compatible = "fsl,ls2080a-dspi", "fsl,ls2085a-dspi";
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <0x0 0x2100000 0x0 0x10000>;
@@ -342,7 +440,7 @@
 		};
 
 		gpio0: gpio@2300000 {
-			compatible = "fsl,qoriq-gpio";
+			compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
 			reg = <0x0 0x2300000 0x0 0x10000>;
 			interrupts = <0 36 0x4>; /* Level high type */
 			gpio-controller;
@@ -353,7 +451,7 @@
 		};
 
 		gpio1: gpio@2310000 {
-			compatible = "fsl,qoriq-gpio";
+			compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
 			reg = <0x0 0x2310000 0x0 0x10000>;
 			interrupts = <0 36 0x4>; /* Level high type */
 			gpio-controller;
@@ -364,7 +462,7 @@
 		};
 
 		gpio2: gpio@2320000 {
-			compatible = "fsl,qoriq-gpio";
+			compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
 			reg = <0x0 0x2320000 0x0 0x10000>;
 			interrupts = <0 37 0x4>; /* Level high type */
 			gpio-controller;
@@ -375,7 +473,7 @@
 		};
 
 		gpio3: gpio@2330000 {
-			compatible = "fsl,qoriq-gpio";
+			compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
 			reg = <0x0 0x2330000 0x0 0x10000>;
 			interrupts = <0 37 0x4>; /* Level high type */
 			gpio-controller;
@@ -444,7 +542,7 @@
 
 		qspi: quadspi@20c0000 {
 			status = "disabled";
-			compatible = "fsl,vf610-qspi";
+			compatible = "fsl,ls2080a-qspi", "fsl,ls1021a-qspi";
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <0x0 0x20c0000 0x0 0x10000>,
diff --git a/arch/arm64/boot/dts/hisilicon/Makefile b/arch/arm64/boot/dts/hisilicon/Makefile
index cd158b8..d5f43a0 100644
--- a/arch/arm64/boot/dts/hisilicon/Makefile
+++ b/arch/arm64/boot/dts/hisilicon/Makefile
@@ -1,4 +1,6 @@
-dtb-$(CONFIG_ARCH_HISI) += hi6220-hikey.dtb hip05-d02.dtb
+dtb-$(CONFIG_ARCH_HISI) += hi6220-hikey.dtb
+dtb-$(CONFIG_ARCH_HISI) += hip05-d02.dtb
+dtb-$(CONFIG_ARCH_HISI) += hip06-d03.dtb
 
 always		:= $(dtb-y)
 subdir-y	:= $(dts-dirs)
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
index 8185251..e92a30c 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
@@ -6,11 +6,9 @@
  */
 
 /dts-v1/;
-
-/*Reserved 1MB memory for MCU*/
-/memreserve/ 0x05e00000 0x00100000;
-
 #include "hi6220.dtsi"
+#include "hikey-pinctrl.dtsi"
+#include <dt-bindings/gpio/gpio.h>
 
 / {
 	model = "HiKey Development Board";
@@ -27,9 +25,201 @@
 		stdout-path = "serial3:115200n8";
 	};
 
+	/*
+	 * Reserve below regions from memory node:
+	 *
+	 *  0x05e0,0000 - 0x05ef,ffff: MCU firmware runtime using
+	 *  0x06df,f000 - 0x06df,ffff: Mailbox message data
+	 *  0x0740,f000 - 0x0740,ffff: MCU firmware section
+	 *  0x3e00,0000 - 0x3fff,ffff: OP-TEE
+	 */
 	memory@0 {
 		device_type = "memory";
-		reg = <0x0 0x0 0x0 0x40000000>;
+		reg = <0x00000000 0x00000000 0x00000000 0x05e00000>,
+		      <0x00000000 0x05f00000 0x00000000 0x00eff000>,
+		      <0x00000000 0x06e00000 0x00000000 0x0060f000>,
+		      <0x00000000 0x07410000 0x00000000 0x36bf0000>;
+	};
+
+	soc {
+		spi0: spi@f7106000 {
+			status = "ok";
+		};
+
+		i2c0: i2c@f7100000 {
+			status = "ok";
+		};
+
+		i2c1: i2c@f7101000 {
+			status = "ok";
+		};
+
+		uart1: uart@f7111000 {
+			status = "ok";
+		};
+
+		uart2: uart@f7112000 {
+			status = "ok";
+		};
+
+		uart3: uart@f7113000 {
+			status = "ok";
+		};
+
+		dwmmc_2: dwmmc2@f723f000 {
+			ti,non-removable;
+			non-removable;
+			/* WL_EN */
+			vmmc-supply = <&wlan_en_reg>;
+
+			#address-cells = <0x1>;
+			#size-cells = <0x0>;
+			wlcore: wlcore@2 {
+				compatible = "ti,wl1835";
+				reg = <2>;	/* sdio func num */
+				/* WL_IRQ, WL_HOST_WAKE_GPIO1_3 */
+				interrupt-parent = <&gpio1>;
+				interrupts = <3 IRQ_TYPE_EDGE_RISING>;
+			};
+		};
+
+		wlan_en_reg: regulator@1 {
+			compatible = "regulator-fixed";
+			regulator-name = "wlan-en-regulator";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			/* WLAN_EN GPIO */
+			gpio = <&gpio0 5 0>;
+			/* WLAN card specific delay */
+			startup-delay-us = <70000>;
+			enable-active-high;
+		};
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		user_led4 {
+			label = "user_led4";
+			gpios = <&gpio4 0 0>; /* <&gpio_user_led_1>; */
+			linux,default-trigger = "heartbeat";
+		};
+
+		user_led3 {
+			label = "user_led3";
+			gpios = <&gpio4 1 0>; /* <&gpio_user_led_2>; */
+			linux,default-trigger = "mmc0";
+		};
+
+		user_led2 {
+			label = "user_led2";
+			gpios = <&gpio4 2 0>; /* <&gpio_user_led_3>; */
+			linux,default-trigger = "mmc1";
+		};
+
+		user_led1 {
+			label = "user_led1";
+			gpios = <&gpio4 3 0>; /* <&gpio_user_led_4>; */
+			linux,default-trigger = "cpu0";
+		};
+
+		wlan_active_led {
+			label = "wifi_active";
+			gpios = <&gpio3 5 0>; /* <&gpio_wlan_active_led>; */
+			linux,default-trigger = "phy0tx";
+			default-state = "off";
+		};
+
+		bt_active_led {
+			label = "bt_active";
+			gpios = <&gpio4 7 0>; /* <&gpio_bt_active_led>; */
+			linux,default-trigger = "hci0rx";
+			default-state = "off";
+		};
+	};
+
+	pmic: pmic@f8000000 {
+		compatible = "hisilicon,hi655x-pmic";
+		reg = <0x0 0xf8000000 0x0 0x1000>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+		pmic-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+
+		regulators {
+			ldo2: LDO2 {
+				regulator-name = "LDO2_2V8";
+				regulator-min-microvolt = <2500000>;
+				regulator-max-microvolt = <3200000>;
+				regulator-enable-ramp-delay = <120>;
+			};
+
+			ldo7: LDO7 {
+				regulator-name = "LDO7_SDIO";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <120>;
+			};
+
+			ldo10: LDO10 {
+				regulator-name = "LDO10_2V85";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3000000>;
+				regulator-enable-ramp-delay = <360>;
+			};
+
+			ldo13: LDO13 {
+				regulator-name = "LDO13_1V8";
+				regulator-min-microvolt = <1600000>;
+				regulator-max-microvolt = <1950000>;
+				regulator-enable-ramp-delay = <120>;
+			};
+
+			ldo14: LDO14 {
+				regulator-name = "LDO14_2V8";
+				regulator-min-microvolt = <2500000>;
+				regulator-max-microvolt = <3200000>;
+				regulator-enable-ramp-delay = <120>;
+			};
+
+			ldo15: LDO15 {
+				regulator-name = "LDO15_1V8";
+				regulator-min-microvolt = <1600000>;
+				regulator-max-microvolt = <1950000>;
+				regulator-boot-on;
+				regulator-always-on;
+				regulator-enable-ramp-delay = <120>;
+			};
+
+			ldo17: LDO17 {
+				regulator-name = "LDO17_2V5";
+				regulator-min-microvolt = <2500000>;
+				regulator-max-microvolt = <3200000>;
+				regulator-enable-ramp-delay = <120>;
+			};
+
+			ldo19: LDO19 {
+				regulator-name = "LDO19_3V0";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3000000>;
+				regulator-enable-ramp-delay = <360>;
+			};
+
+			ldo21: LDO21 {
+				regulator-name = "LDO21_1V8";
+				regulator-min-microvolt = <1650000>;
+				regulator-max-microvolt = <2000000>;
+				regulator-always-on;
+				regulator-enable-ramp-delay = <120>;
+			};
+
+			ldo22: LDO22 {
+				regulator-name = "LDO22_1V2";
+				regulator-min-microvolt = <900000>;
+				regulator-max-microvolt = <1200000>;
+				regulator-boot-on;
+				regulator-always-on;
+				regulator-enable-ramp-delay = <120>;
+			};
+		};
 	};
 };
 
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
index ad1f1eb..189d215 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
@@ -6,6 +6,8 @@
 
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/clock/hi6220-clock.h>
+#include <dt-bindings/pinctrl/hisi.h>
+#include <dt-bindings/thermal/thermal.h>
 
 / {
 	compatible = "hisilicon,hi6220";
@@ -53,11 +55,42 @@
 			};
 		};
 
+		idle-states {
+			entry-method = "psci";
+
+			CPU_SLEEP: cpu-sleep {
+				compatible = "arm,idle-state";
+				local-timer-stop;
+				arm,psci-suspend-param = <0x0010000>;
+				entry-latency-us = <700>;
+				exit-latency-us = <250>;
+				min-residency-us = <1000>;
+			};
+
+			CLUSTER_SLEEP: cluster-sleep {
+				compatible = "arm,idle-state";
+				local-timer-stop;
+				arm,psci-suspend-param = <0x1010000>;
+				entry-latency-us = <1000>;
+				exit-latency-us = <700>;
+				min-residency-us = <2700>;
+				wakeup-latency-us = <1500>;
+			};
+		};
+
 		cpu0: cpu@0 {
 			compatible = "arm,cortex-a53", "arm,armv8";
 			device_type = "cpu";
 			reg = <0x0 0x0>;
 			enable-method = "psci";
+			next-level-cache = <&CLUSTER0_L2>;
+			clocks = <&stub_clock 0>;
+			operating-points-v2 = <&cpu_opp_table>;
+			cooling-min-level = <4>;
+			cooling-max-level = <0>;
+			#cooling-cells = <2>; /* min followed by max */
+			cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+			dynamic-power-coefficient = <311>;
 		};
 
 		cpu1: cpu@1 {
@@ -65,6 +98,9 @@
 			device_type = "cpu";
 			reg = <0x0 0x1>;
 			enable-method = "psci";
+			next-level-cache = <&CLUSTER0_L2>;
+			operating-points-v2 = <&cpu_opp_table>;
+			cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
 		};
 
 		cpu2: cpu@2 {
@@ -72,6 +108,9 @@
 			device_type = "cpu";
 			reg = <0x0 0x2>;
 			enable-method = "psci";
+			next-level-cache = <&CLUSTER0_L2>;
+			operating-points-v2 = <&cpu_opp_table>;
+			cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
 		};
 
 		cpu3: cpu@3 {
@@ -79,6 +118,9 @@
 			device_type = "cpu";
 			reg = <0x0 0x3>;
 			enable-method = "psci";
+			next-level-cache = <&CLUSTER0_L2>;
+			operating-points-v2 = <&cpu_opp_table>;
+			cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
 		};
 
 		cpu4: cpu@100 {
@@ -86,6 +128,9 @@
 			device_type = "cpu";
 			reg = <0x0 0x100>;
 			enable-method = "psci";
+			next-level-cache = <&CLUSTER1_L2>;
+			operating-points-v2 = <&cpu_opp_table>;
+			cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
 		};
 
 		cpu5: cpu@101 {
@@ -93,6 +138,9 @@
 			device_type = "cpu";
 			reg = <0x0 0x101>;
 			enable-method = "psci";
+			next-level-cache = <&CLUSTER1_L2>;
+			operating-points-v2 = <&cpu_opp_table>;
+			cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
 		};
 
 		cpu6: cpu@102 {
@@ -100,6 +148,9 @@
 			device_type = "cpu";
 			reg = <0x0 0x102>;
 			enable-method = "psci";
+			next-level-cache = <&CLUSTER1_L2>;
+			operating-points-v2 = <&cpu_opp_table>;
+			cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
 		};
 
 		cpu7: cpu@103 {
@@ -107,6 +158,48 @@
 			device_type = "cpu";
 			reg = <0x0 0x103>;
 			enable-method = "psci";
+			next-level-cache = <&CLUSTER1_L2>;
+			operating-points-v2 = <&cpu_opp_table>;
+			cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+		};
+
+		CLUSTER0_L2: l2-cache0 {
+			compatible = "cache";
+		};
+
+		CLUSTER1_L2: l2-cache1 {
+			compatible = "cache";
+		};
+	};
+
+	cpu_opp_table: cpu_opp_table {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp00 {
+			opp-hz = /bits/ 64 <208000000>;
+			opp-microvolt = <1040000>;
+			clock-latency-ns = <500000>;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <432000000>;
+			opp-microvolt = <1040000>;
+			clock-latency-ns = <500000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <729000000>;
+			opp-microvolt = <1090000>;
+			clock-latency-ns = <500000>;
+		};
+		opp03 {
+			opp-hz = /bits/ 64 <960000000>;
+			opp-microvolt = <1180000>;
+			clock-latency-ns = <500000>;
+		};
+		opp04 {
+			opp-hz = /bits/ 64 <1200000000>;
+			opp-microvolt = <1330000>;
+			clock-latency-ns = <500000>;
 		};
 	};
 
@@ -137,6 +230,11 @@
 		#size-cells = <2>;
 		ranges;
 
+		sram: sram@fff80000 {
+			compatible = "hisilicon,hi6220-sramctrl", "syscon";
+			reg = <0x0 0xfff80000 0x0 0x12000>;
+		};
+
 		ao_ctrl: ao_ctrl@f7800000 {
 			compatible = "hisilicon,hi6220-aoctrl", "syscon";
 			reg = <0x0 0xf7800000 0x0 0x2000>;
@@ -162,6 +260,14 @@
 			#clock-cells = <1>;
 		};
 
+		stub_clock: stub_clock {
+			compatible = "hisilicon,hi6220-stub-clk";
+			hisilicon,hi6220-clk-sram = <&sram>;
+			#clock-cells = <1>;
+			mbox-names = "mbox-tx";
+			mboxes = <&mailbox 1 0 11>;
+		};
+
 		uart0: uart@f8015000 {	/* console */
 			compatible = "arm,pl011", "arm,primecell";
 			reg = <0x0 0xf8015000 0x0 0x1000>;
@@ -178,6 +284,8 @@
 			clocks = <&sys_ctrl HI6220_UART1_PCLK>,
 				 <&sys_ctrl HI6220_UART1_PCLK>;
 			clock-names = "uartclk", "apb_pclk";
+			pinctrl-names = "default";
+			pinctrl-0 = <&uart1_pmx_func &uart1_cfg_func1 &uart1_cfg_func2>;
 			status = "disabled";
 		};
 
@@ -188,6 +296,8 @@
 			clocks = <&sys_ctrl HI6220_UART2_PCLK>,
 				 <&sys_ctrl HI6220_UART2_PCLK>;
 			clock-names = "uartclk", "apb_pclk";
+			pinctrl-names = "default";
+			pinctrl-0 = <&uart2_pmx_func &uart2_cfg_func>;
 			status = "disabled";
 		};
 
@@ -198,6 +308,9 @@
 			clocks = <&sys_ctrl HI6220_UART3_PCLK>,
 				 <&sys_ctrl HI6220_UART3_PCLK>;
 			clock-names = "uartclk", "apb_pclk";
+			pinctrl-names = "default";
+			pinctrl-0 = <&uart3_pmx_func &uart3_cfg_func>;
+			status = "disabled";
 		};
 
 		uart4: uart@f7114000 {
@@ -207,7 +320,517 @@
 			clocks = <&sys_ctrl HI6220_UART4_PCLK>,
 				 <&sys_ctrl HI6220_UART4_PCLK>;
 			clock-names = "uartclk", "apb_pclk";
+			pinctrl-names = "default";
+			pinctrl-0 = <&uart4_pmx_func &uart4_cfg_func>;
 			status = "disabled";
 		};
+
+		dual_timer0: timer@f8008000 {
+			compatible = "arm,sp804", "arm,primecell";
+			reg = <0x0 0xf8008000 0x0 0x1000>;
+			interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ao_ctrl HI6220_TIMER0_PCLK>,
+				 <&ao_ctrl HI6220_TIMER0_PCLK>,
+				 <&ao_ctrl HI6220_TIMER0_PCLK>;
+			clock-names = "timer1", "timer2", "apb_pclk";
+		};
+
+		pmx0: pinmux@f7010000 {
+			compatible = "pinctrl-single";
+			reg = <0x0 0xf7010000  0x0 0x27c>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			#gpio-range-cells = <3>;
+			pinctrl-single,register-width = <32>;
+			pinctrl-single,function-mask = <7>;
+			pinctrl-single,gpio-range = <
+				&range  80  8 MUX_M0 /* gpio  3: [0..7] */
+				&range  88  8 MUX_M0 /* gpio  4: [0..7] */
+				&range  96  8 MUX_M0 /* gpio  5: [0..7] */
+				&range 104  8 MUX_M0 /* gpio  6: [0..7] */
+				&range 112  8 MUX_M0 /* gpio  7: [0..7] */
+				&range 120  2 MUX_M0 /* gpio  8: [0..1] */
+				&range   2  6 MUX_M1 /* gpio  8: [2..7] */
+				&range   8  8 MUX_M1 /* gpio  9: [0..7] */
+				&range   0  1 MUX_M1 /* gpio 10: [0]    */
+				&range  16  7 MUX_M1 /* gpio 10: [1..7] */
+				&range  23  3 MUX_M1 /* gpio 11: [0..2] */
+				&range  28  5 MUX_M1 /* gpio 11: [3..7] */
+				&range  33  3 MUX_M1 /* gpio 12: [0..2] */
+				&range  43  5 MUX_M1 /* gpio 12: [3..7] */
+				&range  48  8 MUX_M1 /* gpio 13: [0..7] */
+				&range  56  8 MUX_M1 /* gpio 14: [0..7] */
+				&range  74  6 MUX_M1 /* gpio 15: [0..5] */
+				&range 122  1 MUX_M1 /* gpio 15: [6]    */
+				&range 126  1 MUX_M1 /* gpio 15: [7]    */
+				&range 127  8 MUX_M1 /* gpio 16: [0..7] */
+				&range 135  8 MUX_M1 /* gpio 17: [0..7] */
+				&range 143  8 MUX_M1 /* gpio 18: [0..7] */
+				&range 151  8 MUX_M1 /* gpio 19: [0..7] */
+			>;
+			range: gpio-range {
+				#pinctrl-single,gpio-range-cells = <3>;
+			};
+		};
+
+		pmx1: pinmux@f7010800 {
+			compatible = "pinconf-single";
+			reg = <0x0 0xf7010800 0x0 0x28c>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			pinctrl-single,register-width = <32>;
+		};
+
+		pmx2: pinmux@f8001800 {
+			compatible = "pinconf-single";
+			reg = <0x0 0xf8001800 0x0 0x78>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			pinctrl-single,register-width = <32>;
+		};
+
+		gpio0: gpio@f8011000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0x0 0xf8011000 0x0 0x1000>;
+			interrupts = <0 52 0x4>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&ao_ctrl 2>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio1: gpio@f8012000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0x0 0xf8012000 0x0 0x1000>;
+			interrupts = <0 53 0x4>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&ao_ctrl 2>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio2: gpio@f8013000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0x0 0xf8013000 0x0 0x1000>;
+			interrupts = <0 54 0x4>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&ao_ctrl 2>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio3: gpio@f8014000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0x0 0xf8014000 0x0 0x1000>;
+			interrupts = <0 55 0x4>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 80 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&ao_ctrl 2>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio4: gpio@f7020000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0x0 0xf7020000 0x0 0x1000>;
+			interrupts = <0 56 0x4>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 88 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&ao_ctrl 2>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio5: gpio@f7021000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0x0 0xf7021000 0x0 0x1000>;
+			interrupts = <0 57 0x4>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 96 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&ao_ctrl 2>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio6: gpio@f7022000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0x0 0xf7022000 0x0 0x1000>;
+			interrupts = <0 58 0x4>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 104 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&ao_ctrl 2>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio7: gpio@f7023000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0x0 0xf7023000 0x0 0x1000>;
+			interrupts = <0 59 0x4>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 112 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&ao_ctrl 2>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio8: gpio@f7024000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0x0 0xf7024000 0x0 0x1000>;
+			interrupts = <0 60 0x4>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 120 2 &pmx0 2 2 6>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&ao_ctrl 2>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio9: gpio@f7025000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0x0 0xf7025000 0x0 0x1000>;
+			interrupts = <0 61 0x4>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 8 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&ao_ctrl 2>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio10: gpio@f7026000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0x0 0xf7026000 0x0 0x1000>;
+			interrupts = <0 62 0x4>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 0 1 &pmx0 1 16 7>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&ao_ctrl 2>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio11: gpio@f7027000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0x0 0xf7027000 0x0 0x1000>;
+			interrupts = <0 63 0x4>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 23 3 &pmx0 3 28 5>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&ao_ctrl 2>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio12: gpio@f7028000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0x0 0xf7028000 0x0 0x1000>;
+			interrupts = <0 64 0x4>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 33 3 &pmx0 3 43 5>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&ao_ctrl 2>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio13: gpio@f7029000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0x0 0xf7029000 0x0 0x1000>;
+			interrupts = <0 65 0x4>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 48 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&ao_ctrl 2>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio14: gpio@f702a000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0x0 0xf702a000 0x0 0x1000>;
+			interrupts = <0 66 0x4>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 56 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&ao_ctrl 2>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio15: gpio@f702b000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0x0 0xf702b000 0x0 0x1000>;
+			interrupts = <0 67 0x4>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <
+				&pmx0 0 74 6
+				&pmx0 6 122 1
+				&pmx0 7 126 1
+			>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&ao_ctrl 2>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio16: gpio@f702c000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0x0 0xf702c000 0x0 0x1000>;
+			interrupts = <0 68 0x4>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 127 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&ao_ctrl 2>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio17: gpio@f702d000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0x0 0xf702d000 0x0 0x1000>;
+			interrupts = <0 69 0x4>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 135 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&ao_ctrl 2>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio18: gpio@f702e000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0x0 0xf702e000 0x0 0x1000>;
+			interrupts = <0 70 0x4>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 143 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&ao_ctrl 2>;
+			clock-names = "apb_pclk";
+		};
+
+		gpio19: gpio@f702f000 {
+			compatible = "arm,pl061", "arm,primecell";
+			reg = <0x0 0xf702f000 0x0 0x1000>;
+			interrupts = <0 71 0x4>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges = <&pmx0 0 151 8>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			clocks = <&ao_ctrl 2>;
+			clock-names = "apb_pclk";
+		};
+
+		spi0: spi@f7106000 {
+			compatible = "arm,pl022", "arm,primecell";
+			reg = <0x0 0xf7106000 0x0 0x1000>;
+			interrupts = <0 50 4>;
+			bus-id = <0>;
+			enable-dma = <0>;
+			clocks = <&sys_ctrl HI6220_SPI_CLK>;
+			clock-names = "apb_pclk";
+			pinctrl-names = "default";
+			pinctrl-0 = <&spi0_pmx_func &spi0_cfg_func>;
+			num-cs = <1>;
+			cs-gpios = <&gpio6 2 0>;
+			status = "disabled";
+		};
+
+		i2c0: i2c@f7100000 {
+			compatible = "snps,designware-i2c";
+			reg = <0x0 0xf7100000 0x0 0x1000>;
+			interrupts = <0 44 4>;
+			clocks = <&sys_ctrl HI6220_I2C0_CLK>;
+			i2c-sda-hold-time-ns = <300>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&i2c0_pmx_func &i2c0_cfg_func>;
+			status = "disabled";
+		};
+
+		i2c1: i2c@f7101000 {
+			compatible = "snps,designware-i2c";
+			reg = <0x0 0xf7101000 0x0 0x1000>;
+			clocks = <&sys_ctrl HI6220_I2C1_CLK>;
+			interrupts = <0 45 4>;
+			i2c-sda-hold-time-ns = <300>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&i2c1_pmx_func &i2c1_cfg_func>;
+			status = "disabled";
+		};
+
+		i2c2: i2c@f7102000 {
+			compatible = "snps,designware-i2c";
+			reg = <0x0 0xf7102000 0x0 0x1000>;
+			clocks = <&sys_ctrl HI6220_I2C2_CLK>;
+			interrupts = <0 46 4>;
+			i2c-sda-hold-time-ns = <300>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&i2c2_pmx_func &i2c2_cfg_func>;
+			status = "disabled";
+		};
+
+		fixed_5v_hub: regulator@0 {
+			compatible = "regulator-fixed";
+			regulator-name = "fixed_5v_hub";
+			regulator-min-microvolt = <5000000>;
+			regulator-max-microvolt = <5000000>;
+			regulator-boot-on;
+			gpio = <&gpio0 7 0>;
+			regulator-always-on;
+		};
+
+		usb_phy: usbphy {
+			compatible = "hisilicon,hi6220-usb-phy";
+			#phy-cells = <0>;
+			phy-supply = <&fixed_5v_hub>;
+			hisilicon,peripheral-syscon = <&sys_ctrl>;
+		};
+
+		usb: usb@f72c0000 {
+			compatible = "hisilicon,hi6220-usb";
+			reg = <0x0 0xf72c0000 0x0 0x40000>;
+			phys = <&usb_phy>;
+			phy-names = "usb2-phy";
+			clocks = <&sys_ctrl HI6220_USBOTG_HCLK>;
+			clock-names = "otg";
+			dr_mode = "otg";
+			g-use-dma;
+			g-rx-fifo-size = <512>;
+			g-np-tx-fifo-size = <128>;
+			g-tx-fifo-size = <128 128 128 128 128 128>;
+			interrupts = <0 77 0x4>;
+		};
+
+		mailbox: mailbox@f7510000 {
+			compatible = "hisilicon,hi6220-mbox";
+			reg = <0x0 0xf7510000 0x0 0x1000>, /* IPC_S */
+			      <0x0 0x06dff800 0x0 0x0800>; /* Mailbox buffer */
+			interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
+			#mbox-cells = <3>;
+		};
+
+		dwmmc_0: dwmmc0@f723d000 {
+			compatible = "hisilicon,hi6220-dw-mshc";
+			num-slots = <0x1>;
+			cap-mmc-highspeed;
+			non-removable;
+			reg = <0x0 0xf723d000 0x0 0x1000>;
+			interrupts = <0x0 0x48 0x4>;
+			clocks = <&sys_ctrl 2>, <&sys_ctrl 1>;
+			clock-names = "ciu", "biu";
+			bus-width = <0x8>;
+			vmmc-supply = <&ldo19>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&emmc_pmx_func &emmc_clk_cfg_func
+				     &emmc_cfg_func &emmc_rst_cfg_func>;
+		};
+
+		dwmmc_1: dwmmc1@f723e000 {
+			compatible = "hisilicon,hi6220-dw-mshc";
+			num-slots = <0x1>;
+			card-detect-delay = <200>;
+			hisilicon,peripheral-syscon = <&ao_ctrl>;
+			cap-sd-highspeed;
+			reg = <0x0 0xf723e000 0x0 0x1000>;
+			interrupts = <0x0 0x49 0x4>;
+			#address-cells = <0x1>;
+			#size-cells = <0x0>;
+			clocks = <&sys_ctrl 4>, <&sys_ctrl 3>;
+			clock-names = "ciu", "biu";
+			vqmmc-supply = <&ldo7>;
+			vmmc-supply = <&ldo10>;
+			bus-width = <0x4>;
+			disable-wp;
+			cd-gpios = <&gpio1 0 1>;
+			pinctrl-names = "default", "idle";
+			pinctrl-0 = <&sd_pmx_func &sd_clk_cfg_func &sd_cfg_func>;
+			pinctrl-1 = <&sd_pmx_idle &sd_clk_cfg_idle &sd_cfg_idle>;
+		};
+
+		dwmmc_2: dwmmc2@f723f000 {
+			compatible = "hisilicon,hi6220-dw-mshc";
+			num-slots = <0x1>;
+			reg = <0x0 0xf723f000 0x0 0x1000>;
+			interrupts = <0x0 0x4a 0x4>;
+			clocks = <&sys_ctrl HI6220_MMC2_CIUCLK>, <&sys_ctrl HI6220_MMC2_CLK>;
+			clock-names = "ciu", "biu";
+			bus-width = <0x4>;
+			broken-cd;
+			pinctrl-names = "default", "idle";
+			pinctrl-0 = <&sdio_pmx_func &sdio_clk_cfg_func &sdio_cfg_func>;
+			pinctrl-1 = <&sdio_pmx_idle &sdio_clk_cfg_idle &sdio_cfg_idle>;
+		};
+
+		tsensor: tsensor@0,f7030700 {
+			compatible = "hisilicon,tsensor";
+			reg = <0x0 0xf7030700 0x0 0x1000>;
+			interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&sys_ctrl 22>;
+			clock-names = "thermal_clk";
+			#thermal-sensor-cells = <1>;
+		};
+
+		thermal-zones {
+
+			cls0: cls0 {
+				polling-delay = <1000>;
+				polling-delay-passive = <100>;
+				sustainable-power = <3326>;
+
+				/* sensor ID */
+				thermal-sensors = <&tsensor 2>;
+
+				trips {
+					threshold: trip-point@0 {
+						temperature = <65000>;
+						hysteresis = <0>;
+						type = "passive";
+					};
+
+					target: trip-point@1 {
+						temperature = <75000>;
+						hysteresis = <0>;
+						type = "passive";
+					};
+				};
+
+				cooling-maps {
+					map0 {
+						trip = <&target>;
+						cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+					};
+				};
+			};
+		};
 	};
 };
diff --git a/arch/arm64/boot/dts/hisilicon/hikey-pinctrl.dtsi b/arch/arm64/boot/dts/hisilicon/hikey-pinctrl.dtsi
new file mode 100644
index 0000000..0916e84
--- /dev/null
+++ b/arch/arm64/boot/dts/hisilicon/hikey-pinctrl.dtsi
@@ -0,0 +1,705 @@
+/*
+ * pinctrl dts fils for Hislicon HiKey development board
+ *
+ */
+#include <dt-bindings/pinctrl/hisi.h>
+
+/ {
+	soc {
+		pmx0: pinmux@f7010000 {
+			pinctrl-names = "default";
+			pinctrl-0 = <
+				&boot_sel_pmx_func
+				&hkadc_ssi_pmx_func
+				&codec_clk_pmx_func
+				&pwm_in_pmx_func
+				&bl_pwm_pmx_func
+				>;
+
+			boot_sel_pmx_func: boot_sel_pmx_func {
+				pinctrl-single,pins = <
+					0x0    MUX_M0	/* BOOT_SEL     (IOMG000) */
+				>;
+			};
+
+			emmc_pmx_func: emmc_pmx_func {
+				pinctrl-single,pins = <
+					0x100  MUX_M0	/* EMMC_CLK     (IOMG064) */
+					0x104  MUX_M0	/* EMMC_CMD     (IOMG065) */
+					0x108  MUX_M0	/* EMMC_DATA0   (IOMG066) */
+					0x10c  MUX_M0	/* EMMC_DATA1   (IOMG067) */
+					0x110  MUX_M0	/* EMMC_DATA2   (IOMG068) */
+					0x114  MUX_M0	/* EMMC_DATA3   (IOMG069) */
+					0x118  MUX_M0	/* EMMC_DATA4   (IOMG070) */
+					0x11c  MUX_M0	/* EMMC_DATA5   (IOMG071) */
+					0x120  MUX_M0	/* EMMC_DATA6   (IOMG072) */
+					0x124  MUX_M0	/* EMMC_DATA7   (IOMG073) */
+				>;
+			};
+
+			sd_pmx_func: sd_pmx_func {
+				pinctrl-single,pins = <
+					0xc    MUX_M0	/* SD_CLK       (IOMG003) */
+					0x10   MUX_M0	/* SD_CMD       (IOMG004) */
+					0x14   MUX_M0	/* SD_DATA0     (IOMG005) */
+					0x18   MUX_M0	/* SD_DATA1     (IOMG006) */
+					0x1c   MUX_M0	/* SD_DATA2     (IOMG007) */
+					0x20   MUX_M0	/* SD_DATA3     (IOMG008) */
+				>;
+			};
+			sd_pmx_idle: sd_pmx_idle {
+				pinctrl-single,pins = <
+					0xc    MUX_M1	/* SD_CLK       (IOMG003) */
+					0x10   MUX_M1	/* SD_CMD       (IOMG004) */
+					0x14   MUX_M1	/* SD_DATA0     (IOMG005) */
+					0x18   MUX_M1	/* SD_DATA1     (IOMG006) */
+					0x1c   MUX_M1	/* SD_DATA2     (IOMG007) */
+					0x20   MUX_M1	/* SD_DATA3     (IOMG008) */
+				>;
+			};
+
+			sdio_pmx_func: sdio_pmx_func {
+				pinctrl-single,pins = <
+					0x128  MUX_M0	/* SDIO_CLK     (IOMG074) */
+					0x12c  MUX_M0	/* SDIO_CMD     (IOMG075) */
+					0x130  MUX_M0	/* SDIO_DATA0   (IOMG076) */
+					0x134  MUX_M0	/* SDIO_DATA1   (IOMG077) */
+					0x138  MUX_M0	/* SDIO_DATA2   (IOMG078) */
+					0x13c  MUX_M0	/* SDIO_DATA3   (IOMG079) */
+				>;
+			};
+			sdio_pmx_idle: sdio_pmx_idle {
+				pinctrl-single,pins = <
+					0x128  MUX_M1	/* SDIO_CLK     (IOMG074) */
+					0x12c  MUX_M1	/* SDIO_CMD     (IOMG075) */
+					0x130  MUX_M1	/* SDIO_DATA0   (IOMG076) */
+					0x134  MUX_M1	/* SDIO_DATA1   (IOMG077) */
+					0x138  MUX_M1	/* SDIO_DATA2   (IOMG078) */
+					0x13c  MUX_M1	/* SDIO_DATA3   (IOMG079) */
+				>;
+			};
+
+			isp_pmx_func: isp_pmx_func {
+				pinctrl-single,pins = <
+					0x24   MUX_M0	/* ISP_PWDN0    (IOMG009) */
+					0x28   MUX_M0	/* ISP_PWDN1    (IOMG010) */
+					0x2c   MUX_M0	/* ISP_PWDN2    (IOMG011) */
+					0x30   MUX_M1	/* ISP_SHUTTER0 (IOMG012) */
+					0x34   MUX_M1	/* ISP_SHUTTER1 (IOMG013) */
+					0x38   MUX_M1	/* ISP_PWM      (IOMG014) */
+					0x3c   MUX_M0	/* ISP_CCLK0    (IOMG015) */
+					0x40   MUX_M0	/* ISP_CCLK1    (IOMG016) */
+					0x44   MUX_M0	/* ISP_RESETB0  (IOMG017) */
+					0x48   MUX_M0	/* ISP_RESETB1  (IOMG018) */
+					0x4c   MUX_M1	/* ISP_STROBE0  (IOMG019) */
+					0x50   MUX_M1	/* ISP_STROBE1  (IOMG020) */
+					0x54   MUX_M0	/* ISP_SDA0     (IOMG021) */
+					0x58   MUX_M0	/* ISP_SCL0     (IOMG022) */
+					0x5c   MUX_M0	/* ISP_SDA1     (IOMG023) */
+					0x60   MUX_M0	/* ISP_SCL1     (IOMG024) */
+				>;
+			};
+
+			hkadc_ssi_pmx_func: hkadc_ssi_pmx_func {
+				pinctrl-single,pins = <
+					0x68   MUX_M0	/* HKADC_SSI    (IOMG026) */
+				>;
+			};
+
+			codec_clk_pmx_func: codec_clk_pmx_func {
+				pinctrl-single,pins = <
+					0x6c   MUX_M0	/* CODEC_CLK    (IOMG027) */
+				>;
+			};
+
+			codec_pmx_func: codec_pmx_func {
+				pinctrl-single,pins = <
+					0x70   MUX_M1	/* DMIC_CLK     (IOMG028) */
+					0x74   MUX_M0	/* CODEC_SYNC   (IOMG029) */
+					0x78   MUX_M0	/* CODEC_DI     (IOMG030) */
+					0x7c   MUX_M0	/* CODEC_DO     (IOMG031) */
+				>;
+			};
+
+			fm_pmx_func: fm_pmx_func {
+				pinctrl-single,pins = <
+					0x80   MUX_M1	/* FM_XCLK      (IOMG032) */
+					0x84   MUX_M1	/* FM_XFS       (IOMG033) */
+					0x88   MUX_M1	/* FM_DI        (IOMG034) */
+					0x8c   MUX_M1	/* FM_DO        (IOMG035) */
+				>;
+			};
+
+			bt_pmx_func: bt_pmx_func {
+				pinctrl-single,pins = <
+					0x90   MUX_M0	/* BT_XCLK      (IOMG036) */
+					0x94   MUX_M0	/* BT_XFS       (IOMG037) */
+					0x98   MUX_M0	/* BT_DI        (IOMG038) */
+					0x9c   MUX_M0	/* BT_DO        (IOMG039) */
+				>;
+			};
+
+			pwm_in_pmx_func: pwm_in_pmx_func {
+				pinctrl-single,pins = <
+					0xb8   MUX_M1	/* PWM_IN       (IOMG046) */
+				>;
+			};
+
+			bl_pwm_pmx_func: bl_pwm_pmx_func {
+				pinctrl-single,pins = <
+					0xbc   MUX_M1	/* BL_PWM       (IOMG047) */
+				>;
+			};
+
+			uart0_pmx_func: uart0_pmx_func {
+				pinctrl-single,pins = <
+					0xc0   MUX_M0	/* UART0_RXD    (IOMG048) */
+					0xc4   MUX_M0	/* UART0_TXD    (IOMG049) */
+				>;
+			};
+
+			uart1_pmx_func: uart1_pmx_func {
+				pinctrl-single,pins = <
+					0xc8   MUX_M0	/* UART1_CTS_N  (IOMG050) */
+					0xcc   MUX_M0	/* UART1_RTS_N  (IOMG051) */
+					0xd0   MUX_M0	/* UART1_RXD    (IOMG052) */
+					0xd4   MUX_M0	/* UART1_TXD    (IOMG053) */
+				>;
+			};
+
+			uart2_pmx_func: uart2_pmx_func {
+				pinctrl-single,pins = <
+					0xd8   MUX_M0	/* UART2_CTS_N  (IOMG054) */
+					0xdc   MUX_M0	/* UART2_RTS_N  (IOMG055) */
+					0xe0   MUX_M0	/* UART2_RXD    (IOMG056) */
+					0xe4   MUX_M0	/* UART2_TXD    (IOMG057) */
+				>;
+			};
+
+			uart3_pmx_func: uart3_pmx_func {
+				pinctrl-single,pins = <
+					0x180  MUX_M1	/* UART3_CTS_N  (IOMG096) */
+					0x184  MUX_M1	/* UART3_RTS_N  (IOMG097) */
+					0x188  MUX_M1	/* UART3_RXD    (IOMG098) */
+					0x18c  MUX_M1	/* UART3_TXD    (IOMG099) */
+				>;
+			};
+
+			uart4_pmx_func: uart4_pmx_func {
+				pinctrl-single,pins = <
+					0x1d0  MUX_M1	/* UART4_CTS_N  (IOMG116) */
+					0x1d4  MUX_M1	/* UART4_RTS_N  (IOMG117) */
+					0x1d8  MUX_M1	/* UART4_RXD    (IOMG118) */
+					0x1dc  MUX_M1	/* UART4_TXD    (IOMG119) */
+				>;
+			};
+
+			uart5_pmx_func: uart5_pmx_func {
+				pinctrl-single,pins = <
+					0x1c8  MUX_M1	/* UART5_RXD    (IOMG114) */
+					0x1cc  MUX_M1	/* UART5_TXD    (IOMG115) */
+				>;
+			};
+
+			i2c0_pmx_func: i2c0_pmx_func {
+				pinctrl-single,pins = <
+					0xe8   MUX_M0	/* I2C0_SCL     (IOMG058) */
+					0xec   MUX_M0	/* I2C0_SDA     (IOMG059) */
+				>;
+			};
+
+			i2c1_pmx_func: i2c1_pmx_func {
+				pinctrl-single,pins = <
+					0xf0   MUX_M0	/* I2C1_SCL     (IOMG060) */
+					0xf4   MUX_M0	/* I2C1_SDA     (IOMG061) */
+				>;
+			};
+
+			i2c2_pmx_func: i2c2_pmx_func {
+				pinctrl-single,pins = <
+					0xf8   MUX_M0	/* I2C2_SCL     (IOMG062) */
+					0xfc   MUX_M0	/* I2C2_SDA     (IOMG063) */
+				>;
+			};
+
+			spi0_pmx_func: spi0_pmx_func {
+				pinctrl-single,pins = <
+					0x1a0  MUX_M1   /* SPI0_DI      (IOMG104) */
+					0x1a4  MUX_M1	/* SPI0_DO	(IOMG105) */
+					0x1a8  MUX_M1	/* SPI0_CS_N	(IOMG106) */
+					0x1ac  MUX_M1	/* SPI0_CLK	(IOMG107) */
+				>;
+			};
+		};
+
+		pmx1: pinmux@f7010800 {
+
+			pinctrl-names = "default";
+			pinctrl-0 = <
+				&boot_sel_cfg_func
+				&hkadc_ssi_cfg_func
+				&codec_clk_cfg_func
+				&pwm_in_cfg_func
+				&bl_pwm_cfg_func
+				>;
+
+			boot_sel_cfg_func: boot_sel_cfg_func {
+				pinctrl-single,pins = <
+					0x0    0x0	/* BOOT_SEL     (IOCFG000) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_UP   PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			hkadc_ssi_cfg_func: hkadc_ssi_cfg_func {
+				pinctrl-single,pins = <
+					0x6c   0x0	/* HKADC_SSI    (IOCFG027) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			emmc_clk_cfg_func: emmc_clk_cfg_func {
+				pinctrl-single,pins = <
+					0x104  0x0	/* EMMC_CLK     (IOCFG065) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_08MA DRIVE_MASK>;
+			};
+
+			emmc_cfg_func: emmc_cfg_func {
+				pinctrl-single,pins = <
+					0x108  0x0	/* EMMC_CMD     (IOCFG066) */
+					0x10c  0x0	/* EMMC_DATA0   (IOCFG067) */
+					0x110  0x0	/* EMMC_DATA1   (IOCFG068) */
+					0x114  0x0	/* EMMC_DATA2   (IOCFG069) */
+					0x118  0x0	/* EMMC_DATA3   (IOCFG070) */
+					0x11c  0x0	/* EMMC_DATA4   (IOCFG071) */
+					0x120  0x0	/* EMMC_DATA5   (IOCFG072) */
+					0x124  0x0	/* EMMC_DATA6   (IOCFG073) */
+					0x128  0x0	/* EMMC_DATA7   (IOCFG074) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_UP   PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_04MA DRIVE_MASK>;
+			};
+
+			emmc_rst_cfg_func: emmc_rst_cfg_func {
+				pinctrl-single,pins = <
+					0x12c  0x0	/* EMMC_RST_N   (IOCFG075) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_04MA DRIVE_MASK>;
+			};
+
+			sd_clk_cfg_func: sd_clk_cfg_func {
+				pinctrl-single,pins = <
+					0xc    0x0	/* SD_CLK       (IOCFG003) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_10MA DRIVE_MASK>;
+			};
+			sd_clk_cfg_idle: sd_clk_cfg_idle {
+				pinctrl-single,pins = <
+					0xc    0x0	/* SD_CLK       (IOCFG003) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DOWN PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			sd_cfg_func: sd_cfg_func {
+				pinctrl-single,pins = <
+					0x10   0x0	/* SD_CMD       (IOCFG004) */
+					0x14   0x0	/* SD_DATA0     (IOCFG005) */
+					0x18   0x0	/* SD_DATA1     (IOCFG006) */
+					0x1c   0x0	/* SD_DATA2     (IOCFG007) */
+					0x20   0x0	/* SD_DATA3     (IOCFG008) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_08MA DRIVE_MASK>;
+			};
+			sd_cfg_idle: sd_cfg_idle {
+				pinctrl-single,pins = <
+					0x10   0x0	/* SD_CMD       (IOCFG004) */
+					0x14   0x0	/* SD_DATA0     (IOCFG005) */
+					0x18   0x0	/* SD_DATA1     (IOCFG006) */
+					0x1c   0x0	/* SD_DATA2     (IOCFG007) */
+					0x20   0x0	/* SD_DATA3     (IOCFG008) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DOWN PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			sdio_clk_cfg_func: sdio_clk_cfg_func {
+				pinctrl-single,pins = <
+					0x134  0x0	/* SDIO_CLK     (IOCFG077) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_08MA DRIVE_MASK>;
+			};
+			sdio_clk_cfg_idle: sdio_clk_cfg_idle {
+				pinctrl-single,pins = <
+					0x134  0x0	/* SDIO_CLK     (IOCFG077) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DOWN PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			sdio_cfg_func: sdio_cfg_func {
+				pinctrl-single,pins = <
+					0x138  0x0	/* SDIO_CMD     (IOCFG078) */
+					0x13c  0x0	/* SDIO_DATA0   (IOCFG079) */
+					0x140  0x0	/* SDIO_DATA1   (IOCFG080) */
+					0x144  0x0	/* SDIO_DATA2   (IOCFG081) */
+					0x148  0x0	/* SDIO_DATA3   (IOCFG082) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_UP   PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_04MA DRIVE_MASK>;
+			};
+			sdio_cfg_idle: sdio_cfg_idle {
+				pinctrl-single,pins = <
+					0x138  0x0	/* SDIO_CMD     (IOCFG078) */
+					0x13c  0x0	/* SDIO_DATA0   (IOCFG079) */
+					0x140  0x0	/* SDIO_DATA1   (IOCFG080) */
+					0x144  0x0	/* SDIO_DATA2   (IOCFG081) */
+					0x148  0x0	/* SDIO_DATA3   (IOCFG082) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_UP   PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			isp_cfg_func1: isp_cfg_func1 {
+				pinctrl-single,pins = <
+					0x28   0x0	/* ISP_PWDN0    (IOCFG010) */
+					0x2c   0x0	/* ISP_PWDN1    (IOCFG011) */
+					0x30   0x0	/* ISP_PWDN2    (IOCFG012) */
+					0x34   0x0	/* ISP_SHUTTER0 (IOCFG013) */
+					0x38   0x0	/* ISP_SHUTTER1 (IOCFG014) */
+					0x3c   0x0	/* ISP_PWM      (IOCFG015) */
+					0x40   0x0	/* ISP_CCLK0    (IOCFG016) */
+					0x44   0x0	/* ISP_CCLK1    (IOCFG017) */
+					0x48   0x0	/* ISP_RESETB0  (IOCFG018) */
+					0x4c   0x0	/* ISP_RESETB1  (IOCFG019) */
+					0x50   0x0	/* ISP_STROBE0  (IOCFG020) */
+					0x58   0x0	/* ISP_SDA0     (IOCFG022) */
+					0x5c   0x0	/* ISP_SCL0     (IOCFG023) */
+					0x60   0x0	/* ISP_SDA1     (IOCFG024) */
+					0x64   0x0	/* ISP_SCL1     (IOCFG025) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+			isp_cfg_idle1: isp_cfg_idle1 {
+				pinctrl-single,pins = <
+					0x34   0x0	/* ISP_SHUTTER0 (IOCFG013) */
+					0x38   0x0	/* ISP_SHUTTER1 (IOCFG014) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DOWN PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			isp_cfg_func2: isp_cfg_func2 {
+				pinctrl-single,pins = <
+					0x54   0x0	/* ISP_STROBE1  (IOCFG021) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DOWN PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			codec_clk_cfg_func: codec_clk_cfg_func {
+				pinctrl-single,pins = <
+					0x70   0x0	/* CODEC_CLK    (IOCFG028) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_04MA DRIVE_MASK>;
+			};
+			codec_clk_cfg_idle: codec_clk_cfg_idle {
+				pinctrl-single,pins = <
+					0x70   0x0	/* CODEC_CLK    (IOCFG028) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			codec_cfg_func1: codec_cfg_func1 {
+				pinctrl-single,pins = <
+					0x74   0x0	/* DMIC_CLK     (IOCFG029) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DOWN PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			codec_cfg_func2: codec_cfg_func2 {
+				pinctrl-single,pins = <
+					0x78   0x0	/* CODEC_SYNC   (IOCFG030) */
+					0x7c   0x0	/* CODEC_DI     (IOCFG031) */
+					0x80   0x0	/* CODEC_DO     (IOCFG032) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_04MA DRIVE_MASK>;
+			};
+			codec_cfg_idle2: codec_cfg_idle2 {
+				pinctrl-single,pins = <
+					0x78   0x0	/* CODEC_SYNC   (IOCFG030) */
+					0x7c   0x0	/* CODEC_DI     (IOCFG031) */
+					0x80   0x0	/* CODEC_DO     (IOCFG032) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			fm_cfg_func: fm_cfg_func {
+				pinctrl-single,pins = <
+					0x84   0x0	/* FM_XCLK      (IOCFG033) */
+					0x88   0x0	/* FM_XFS       (IOCFG034) */
+					0x8c   0x0	/* FM_DI        (IOCFG035) */
+					0x90   0x0	/* FM_DO        (IOCFG036) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DOWN PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			bt_cfg_func: bt_cfg_func {
+				pinctrl-single,pins = <
+					0x94   0x0	/* BT_XCLK      (IOCFG037) */
+					0x98   0x0	/* BT_XFS       (IOCFG038) */
+					0x9c   0x0	/* BT_DI        (IOCFG039) */
+					0xa0   0x0	/* BT_DO        (IOCFG040) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+			bt_cfg_idle: bt_cfg_idle {
+				pinctrl-single,pins = <
+					0x94   0x0	/* BT_XCLK      (IOCFG037) */
+					0x98   0x0	/* BT_XFS       (IOCFG038) */
+					0x9c   0x0	/* BT_DI        (IOCFG039) */
+					0xa0   0x0	/* BT_DO        (IOCFG040) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DOWN PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			pwm_in_cfg_func: pwm_in_cfg_func {
+				pinctrl-single,pins = <
+					0xbc   0x0	/* PWM_IN       (IOCFG047) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DOWN PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			bl_pwm_cfg_func: bl_pwm_cfg_func {
+				pinctrl-single,pins = <
+					0xc0   0x0	/* BL_PWM       (IOCFG048) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DOWN PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			uart0_cfg_func1: uart0_cfg_func1 {
+				pinctrl-single,pins = <
+					0xc4   0x0	/* UART0_RXD    (IOCFG049) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_UP   PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			uart0_cfg_func2: uart0_cfg_func2 {
+				pinctrl-single,pins = <
+					0xc8   0x0	/* UART0_TXD    (IOCFG050) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_04MA DRIVE_MASK>;
+			};
+
+			uart1_cfg_func1: uart1_cfg_func1 {
+				pinctrl-single,pins = <
+					0xcc   0x0	/* UART1_CTS_N  (IOCFG051) */
+					0xd4   0x0	/* UART1_RXD    (IOCFG053) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_UP   PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			uart1_cfg_func2: uart1_cfg_func2 {
+				pinctrl-single,pins = <
+					0xd0   0x0	/* UART1_RTS_N  (IOCFG052) */
+					0xd8   0x0	/* UART1_TXD    (IOCFG054) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			uart2_cfg_func: uart2_cfg_func {
+				pinctrl-single,pins = <
+					0xdc   0x0	/* UART2_CTS_N  (IOCFG055) */
+					0xe0   0x0	/* UART2_RTS_N  (IOCFG056) */
+					0xe4   0x0	/* UART2_RXD    (IOCFG057) */
+					0xe8   0x0	/* UART2_TXD    (IOCFG058) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			uart3_cfg_func: uart3_cfg_func {
+				pinctrl-single,pins = <
+					0x190  0x0	/* UART3_CTS_N  (IOCFG100) */
+					0x194  0x0	/* UART3_RTS_N  (IOCFG101) */
+					0x198  0x0	/* UART3_RXD    (IOCFG102) */
+					0x19c  0x0	/* UART3_TXD    (IOCFG103) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DOWN PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			uart4_cfg_func: uart4_cfg_func {
+				pinctrl-single,pins = <
+					0x1e0  0x0	/* UART4_CTS_N  (IOCFG120) */
+					0x1e4  0x0	/* UART4_RTS_N  (IOCFG121) */
+					0x1e8  0x0	/* UART4_RXD    (IOCFG122) */
+					0x1ec  0x0	/* UART4_TXD    (IOCFG123) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DOWN PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			uart5_cfg_func: uart5_cfg_func {
+				pinctrl-single,pins = <
+					0x1d8  0x0	/* UART4_RXD    (IOCFG118) */
+					0x1dc  0x0	/* UART4_TXD    (IOCFG119) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DOWN PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			i2c0_cfg_func: i2c0_cfg_func {
+				pinctrl-single,pins = <
+					0xec   0x0	/* I2C0_SCL     (IOCFG059) */
+					0xf0   0x0	/* I2C0_SDA     (IOCFG060) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			i2c1_cfg_func: i2c1_cfg_func {
+				pinctrl-single,pins = <
+					0xf4   0x0	/* I2C1_SCL     (IOCFG061) */
+					0xf8   0x0	/* I2C1_SDA     (IOCFG062) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			i2c2_cfg_func: i2c2_cfg_func {
+				pinctrl-single,pins = <
+					0xfc   0x0	/* I2C2_SCL     (IOCFG063) */
+					0x100  0x0	/* I2C2_SDA     (IOCFG064) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			spi0_cfg_func: spi0_cfg_func {
+				pinctrl-single,pins = <
+					0x1b0  0x0	/* SPI0_DI	(IOCFG108) */
+					0x1b4  0x0	/* SPI0_DO	(IOCFG109) */
+					0x1b8  0x0	/* SPI0_CS_N	(IOCFG110) */
+					0x1bc  0x0	/* SPI0_CLK	(IOCFG111) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+		};
+
+		pmx2: pinmux@f8001800 {
+
+			pinctrl-names = "default";
+			pinctrl-0 = <
+				&rstout_n_cfg_func
+				>;
+
+			rstout_n_cfg_func: rstout_n_cfg_func {
+				pinctrl-single,pins = <
+					0x0    0x0	/* RSTOUT_N     (IOCFG000) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			pmu_peri_en_cfg_func: pmu_peri_en_cfg_func {
+				pinctrl-single,pins = <
+					0x4    0x0	/* PMU_PERI_EN  (IOCFG001) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			sysclk0_en_cfg_func: sysclk0_en_cfg_func {
+				pinctrl-single,pins = <
+					0x8    0x0	/* SYSCLK0_EN   (IOCFG002) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+
+			jtag_tdo_cfg_func: jtag_tdo_cfg_func {
+				pinctrl-single,pins = <
+					0xc    0x0	/* JTAG_TDO     (IOCFG003) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_08MA DRIVE_MASK>;
+			};
+
+			rf_reset_cfg_func: rf_reset_cfg_func {
+				pinctrl-single,pins = <
+					0x70   0x0	/* RF_RESET0    (IOCFG028) */
+					0x74   0x0	/* RF_RESET1    (IOCFG029) */
+				>;
+				pinctrl-single,bias-pulldown  = <PULL_DIS  PULL_DOWN PULL_DIS  PULL_DOWN>;
+				pinctrl-single,bias-pullup    = <PULL_DIS  PULL_UP   PULL_DIS  PULL_UP>;
+				pinctrl-single,drive-strength = <DRIVE1_02MA DRIVE_MASK>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/hisilicon/hip05-d02.dts b/arch/arm64/boot/dts/hisilicon/hip05-d02.dts
index e9436c0..abba750 100644
--- a/arch/arm64/boot/dts/hisilicon/hip05-d02.dts
+++ b/arch/arm64/boot/dts/hisilicon/hip05-d02.dts
@@ -52,3 +52,37 @@
 &peri_gpio0 {
 	status = "ok";
 };
+
+&lbc {
+	status = "ok";
+	#address-cells = <2>;
+	#size-cells = <1>;
+	ranges = <0 0 0x0 0x90000000 0x08000000>,
+		 <1 0 0x0 0x98000000 0x08000000>;
+
+	nor-flash@0,0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "numonyx,js28f00a", "cfi-flash";
+		reg = <0 0x0 0x08000000>;
+		bank-width = <2>;
+		/* The three parts may not used */
+		partition@0 {
+			label = "BIOS";
+			reg = <0x0 0x300000>;
+		};
+		partition@300000 {
+			label = "Linux";
+			reg = <0x300000 0xa00000>;
+		};
+		partition@1000000 {
+			label = "Rootfs";
+			reg = <0x01000000 0x02000000>;
+		};
+	};
+
+	cpld@1,0 {
+		compatible = "hisilicon,hip05-cpld";
+		reg = <1 0x0 0x100>;
+	};
+};
diff --git a/arch/arm64/boot/dts/hisilicon/hip05.dtsi b/arch/arm64/boot/dts/hisilicon/hip05.dtsi
index 6319ff3..bf322ed 100644
--- a/arch/arm64/boot/dts/hisilicon/hip05.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hip05.dtsi
@@ -249,24 +249,28 @@
 		its_peri: interrupt-controller@8c000000 {
 			compatible = "arm,gic-v3-its";
 			msi-controller;
+			#msi-cells = <1>;
 			reg = <0x0 0x8c000000 0x0 0x40000>;
 		};
 
 		its_m3: interrupt-controller@a3000000 {
 			compatible = "arm,gic-v3-its";
 			msi-controller;
+			#msi-cells = <1>;
 			reg = <0x0 0xa3000000 0x0 0x40000>;
 		};
 
 		its_pcie: interrupt-controller@b7000000 {
 			compatible = "arm,gic-v3-its";
 			msi-controller;
+			#msi-cells = <1>;
 			reg = <0x0 0xb7000000 0x0 0x40000>;
 		};
 
 		its_dsa: interrupt-controller@c6000000 {
 			compatible = "arm,gic-v3-its";
 			msi-controller;
+			#msi-cells = <1>;
 			reg = <0x0 0xc6000000 0x0 0x40000>;
 		};
 	};
@@ -323,6 +327,12 @@
 			status = "disabled";
 		};
 
+		lbc: localbus@80380000 {
+			compatible = "hisilicon,hisi-localbus", "simple-bus";
+			reg = <0x0 0x80380000 0x0 0x10000>;
+			status = "disabled";
+		};
+
 		peri_gpio0: gpio@802e0000 {
 			#address-cells = <1>;
 			#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi b/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi
index 933cba3..b6a130c 100644
--- a/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi
@@ -24,17 +24,19 @@
 	};
 
 	dsaf0: dsa@c7000000 {
+		#address-cells = <1>;
+		#size-cells = <0>;
 		compatible = "hisilicon,hns-dsaf-v1";
 		mode = "6port-16rss";
 		interrupt-parent = <&mbigen_dsa>;
 
-		reg = <0x0 0xC0000000 0x0 0x420000
-		       0x0 0xC2000000 0x0 0x300000
-		       0x0 0xc5000000 0x0 0x890000
+		reg = <0x0 0xc5000000 0x0 0x890000
 		       0x0 0xc7000000 0x0 0x60000
 		       >;
 
-		phy-handle = <0 0 0 0 &soc0_phy0 &soc0_phy1 0 0>;
+		reg-names = "ppe-base","dsaf-base";
+		subctrl-syscon = <&dsaf_subctrl>;
+		reset-field-offset = <0>;
 		interrupts = <
 			/* [14] ge fifo err 8 / xge 6**/
 			149 0x4 150 0x4 151 0x4 152 0x4
@@ -122,12 +124,31 @@
 		buf-size = <4096>;
 		desc-num = <1024>;
 		dma-coherent;
+
+		port@0 {
+			reg = <0>;
+			serdes-syscon = <&serdes_ctrl0>;
+		};
+		port@1 {
+			reg = <1>;
+			serdes-syscon = <&serdes_ctrl0>;
+		};
+		port@4 {
+			reg = <4>;
+			phy-handle = <&soc0_phy0>;
+			serdes-syscon = <&serdes_ctrl1>;
+		};
+		port@5 {
+			reg = <5>;
+			phy-handle = <&soc0_phy1>;
+			serdes-syscon = <&serdes_ctrl1>;
+		};
 	};
 
 	eth0: ethernet@0{
 		compatible = "hisilicon,hns-nic-v1";
 		ae-handle = <&dsaf0>;
-		port-id = <0>;
+		port-idx-in-ae = <0>;
 		local-mac-address = [00 00 00 01 00 58];
 		status = "disabled";
 		dma-coherent;
@@ -135,56 +156,25 @@
 	eth1: ethernet@1{
 		compatible = "hisilicon,hns-nic-v1";
 		ae-handle = <&dsaf0>;
-		port-id = <1>;
+		port-idx-in-ae = <1>;
+		local-mac-address = [00 00 00 01 00 59];
 		status = "disabled";
 		dma-coherent;
 	};
-	eth2: ethernet@2{
+	eth2: ethernet@4{
 		compatible = "hisilicon,hns-nic-v1";
 		ae-handle = <&dsaf0>;
-		port-id = <2>;
+		port-idx-in-ae = <4>;
 		local-mac-address = [00 00 00 01 00 5a];
 		status = "disabled";
 		dma-coherent;
 	};
-	eth3: ethernet@3{
+	eth3: ethernet@5{
 		compatible = "hisilicon,hns-nic-v1";
 		ae-handle = <&dsaf0>;
-		port-id = <3>;
+		port-idx-in-ae = <5>;
 		local-mac-address = [00 00 00 01 00 5b];
 		status = "disabled";
 		dma-coherent;
 	};
-	eth4: ethernet@4{
-		compatible = "hisilicon,hns-nic-v1";
-		ae-handle = <&dsaf0>;
-		port-id = <4>;
-		local-mac-address = [00 00 00 01 00 5c];
-		status = "disabled";
-		dma-coherent;
-	};
-	eth5: ethernet@5{
-		compatible = "hisilicon,hns-nic-v1";
-		ae-handle = <&dsaf0>;
-		port-id = <5>;
-		local-mac-address = [00 00 00 01 00 5d];
-		status = "disabled";
-		dma-coherent;
-	};
-	eth6: ethernet@6{
-		compatible = "hisilicon,hns-nic-v1";
-		ae-handle = <&dsaf0>;
-		port-id = <6>;
-		local-mac-address = [00 00 00 01 00 5e];
-		status = "disabled";
-		dma-coherent;
-	};
-	eth7: ethernet@7{
-		compatible = "hisilicon,hns-nic-v1";
-		ae-handle = <&dsaf0>;
-		port-id = <7>;
-		local-mac-address = [00 00 00 01 00 5f];
-		status = "disabled";
-		dma-coherent;
-	};
 };
diff --git a/arch/arm64/boot/dts/hisilicon/hip06-d03.dts b/arch/arm64/boot/dts/hisilicon/hip06-d03.dts
new file mode 100644
index 0000000..f3e5323
--- /dev/null
+++ b/arch/arm64/boot/dts/hisilicon/hip06-d03.dts
@@ -0,0 +1,34 @@
+/**
+ * dts file for Hisilicon D03 Development Board
+ *
+ * Copyright (C) 2016 Hisilicon Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * publishhed by the Free Software Foundation.
+ *
+ */
+
+/dts-v1/;
+
+#include "hip06.dtsi"
+
+/ {
+	model = "Hisilicon Hip06 D03 Development Board";
+	compatible = "hisilicon,hip06-d03";
+
+	memory@00000000 {
+		device_type = "memory";
+		reg = <0x0 0x00000000 0x0 0x40000000>;
+	};
+
+	chosen { };
+};
+
+&usb_ohci {
+	status = "ok";
+};
+
+&usb_ehci {
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/hisilicon/hip06.dtsi b/arch/arm64/boot/dts/hisilicon/hip06.dtsi
new file mode 100644
index 0000000..5927bc4
--- /dev/null
+++ b/arch/arm64/boot/dts/hisilicon/hip06.dtsi
@@ -0,0 +1,307 @@
+/**
+ * dts file for Hisilicon D03 Development Board
+ *
+ * Copyright (C) 2016 Hisilicon Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * publishhed by the Free Software Foundation.
+ *
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+	compatible = "hisilicon,hip06-d03";
+	interrupt-parent = <&gic>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	psci {
+		compatible = "arm,psci-0.2";
+		method = "smc";
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu-map {
+			cluster0 {
+				core0 {
+					cpu = <&cpu0>;
+				};
+				core1 {
+					cpu = <&cpu1>;
+				};
+				core2 {
+					cpu = <&cpu2>;
+				};
+				core3 {
+					cpu = <&cpu3>;
+				};
+			};
+			cluster1 {
+				core0 {
+					cpu = <&cpu4>;
+				};
+				core1 {
+					cpu = <&cpu5>;
+				};
+				core2 {
+					cpu = <&cpu6>;
+				};
+				core3 {
+					cpu = <&cpu7>;
+				};
+			};
+			cluster2 {
+				core0 {
+					cpu = <&cpu8>;
+				};
+				core1 {
+					cpu = <&cpu9>;
+				};
+				core2 {
+					cpu = <&cpu10>;
+				};
+				core3 {
+					cpu = <&cpu11>;
+				};
+			};
+			cluster3 {
+				core0 {
+					cpu = <&cpu12>;
+				};
+				core1 {
+					cpu = <&cpu13>;
+				};
+				core2 {
+					cpu = <&cpu14>;
+				};
+				core3 {
+					cpu = <&cpu15>;
+				};
+			};
+		};
+
+		cpu0: cpu@10000 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a57", "arm,armv8";
+			reg = <0x10000>;
+			enable-method = "psci";
+			next-level-cache = <&cluster0_l2>;
+		};
+
+		cpu1: cpu@10001 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a57", "arm,armv8";
+			reg = <0x10001>;
+			enable-method = "psci";
+			next-level-cache = <&cluster0_l2>;
+		};
+
+		cpu2: cpu@10002 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a57", "arm,armv8";
+			reg = <0x10002>;
+			enable-method = "psci";
+			next-level-cache = <&cluster0_l2>;
+		};
+
+		cpu3: cpu@10003 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a57", "arm,armv8";
+			reg = <0x10003>;
+			enable-method = "psci";
+			next-level-cache = <&cluster0_l2>;
+		};
+
+		cpu4: cpu@10100 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a57", "arm,armv8";
+			reg = <0x10100>;
+			enable-method = "psci";
+			next-level-cache = <&cluster1_l2>;
+		};
+
+		cpu5: cpu@10101 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a57", "arm,armv8";
+			reg = <0x10101>;
+			enable-method = "psci";
+			next-level-cache = <&cluster1_l2>;
+		};
+
+		cpu6: cpu@10102 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a57", "arm,armv8";
+			reg = <0x10102>;
+			enable-method = "psci";
+			next-level-cache = <&cluster1_l2>;
+		};
+
+		cpu7: cpu@10103 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a57", "arm,armv8";
+			reg = <0x10103>;
+			enable-method = "psci";
+			next-level-cache = <&cluster1_l2>;
+		};
+
+		cpu8: cpu@10200 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a57", "arm,armv8";
+			reg = <0x10200>;
+			enable-method = "psci";
+			next-level-cache = <&cluster2_l2>;
+		};
+
+		cpu9: cpu@10201 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a57", "arm,armv8";
+			reg = <0x10201>;
+			enable-method = "psci";
+			next-level-cache = <&cluster2_l2>;
+		};
+
+		cpu10: cpu@10202 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a57", "arm,armv8";
+			reg = <0x10202>;
+			enable-method = "psci";
+			next-level-cache = <&cluster2_l2>;
+		};
+
+		cpu11: cpu@10203 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a57", "arm,armv8";
+			reg = <0x10203>;
+			enable-method = "psci";
+			next-level-cache = <&cluster2_l2>;
+		};
+
+		cpu12: cpu@10300 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a57", "arm,armv8";
+			reg = <0x10300>;
+			enable-method = "psci";
+			next-level-cache = <&cluster3_l2>;
+		};
+
+		cpu13: cpu@10301 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a57", "arm,armv8";
+			reg = <0x10301>;
+			enable-method = "psci";
+			next-level-cache = <&cluster3_l2>;
+		};
+
+		cpu14: cpu@10302 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a57", "arm,armv8";
+			reg = <0x10302>;
+			enable-method = "psci";
+			next-level-cache = <&cluster3_l2>;
+		};
+
+		cpu15: cpu@10303 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a57", "arm,armv8";
+			reg = <0x10303>;
+			enable-method = "psci";
+			next-level-cache = <&cluster3_l2>;
+		};
+
+		cluster0_l2: l2-cache0 {
+			compatible = "cache";
+		};
+
+		cluster1_l2: l2-cache1 {
+			compatible = "cache";
+		};
+
+		cluster2_l2: l2-cache2 {
+			compatible = "cache";
+		};
+
+		cluster3_l2: l2-cache3 {
+			compatible = "cache";
+		};
+	};
+
+	gic: interrupt-controller@4d000000 {
+		compatible = "arm,gic-v3";
+		#interrupt-cells = <3>;
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+		interrupt-controller;
+		#redistributor-regions = <1>;
+		redistributor-stride = <0x0 0x30000>;
+		reg = <0x0 0x4d000000 0 0x10000>,	/* GICD */
+		      <0x0 0x4d100000 0 0x300000>,	/* GICR */
+		      <0x0 0xfe000000 0 0x10000>,	/* GICC */
+		      <0x0 0xfe010000 0 0x10000>,       /* GICH */
+		      <0x0 0xfe020000 0 0x10000>;       /* GICV */
+		interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+
+		its_dsa: interrupt-controller@c6000000 {
+			compatible = "arm,gic-v3-its";
+			msi-controller;
+			#msi-cells = <1>;
+			reg = <0x0 0xc6000000 0x0 0x40000>;
+		};
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+	};
+
+	pmu {
+		compatible = "arm,cortex-a57-pmu";
+		interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	mbigen_pcie@a0080000 {
+		compatible = "hisilicon,mbigen-v2";
+		reg = <0x0 0xa0080000 0x0 0x10000>;
+
+		mbigen_usb: intc_usb {
+			msi-parent = <&its_dsa 0x40080>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			num-pins = <2>;
+		};
+	};
+
+	soc {
+		compatible = "simple-bus";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		usb_ohci: ohci@a7030000 {
+			compatible = "generic-ohci";
+			reg = <0x0 0xa7030000 0x0 0x10000>;
+			interrupt-parent = <&mbigen_usb>;
+			interrupts = <64 4>;
+			dma-coherent;
+			status = "disabled";
+		};
+
+		usb_ehci: ehci@a7020000 {
+			compatible = "generic-ehci";
+			reg = <0x0 0xa7020000 0x0 0x10000>;
+			interrupt-parent = <&mbigen_usb>;
+			interrupts = <65 4>;
+			dma-coherent;
+			status = "disabled";
+		};
+	};
+
+};
diff --git a/arch/arm64/boot/dts/lg/Makefile b/arch/arm64/boot/dts/lg/Makefile
new file mode 100644
index 0000000..b0cc649
--- /dev/null
+++ b/arch/arm64/boot/dts/lg/Makefile
@@ -0,0 +1,5 @@
+dtb-$(CONFIG_ARCH_LG1K) += lg1312-ref.dtb
+
+always		:= $(dtb-y)
+subdir-y	:= $(dts-dirs)
+clean-files	:= *.dtb
diff --git a/arch/arm64/boot/dts/lg/lg1312-ref.dts b/arch/arm64/boot/dts/lg/lg1312-ref.dts
new file mode 100644
index 0000000..6d78d6b
--- /dev/null
+++ b/arch/arm64/boot/dts/lg/lg1312-ref.dts
@@ -0,0 +1,36 @@
+/*
+ * dts file for lg1312 Reference Board.
+ *
+ * Copyright (C) 2016, LG Electronics
+ */
+
+/dts-v1/;
+
+#include "lg1312.dtsi"
+
+/ {
+	#address-cells = <2>;
+	#size-cells = <1>;
+
+	model = "LG Electronics, DTV SoC LG1312 Reference Board";
+	compatible = "lge,lg1312-ref", "lge,lg1312";
+
+	aliases {
+		serial0 = &uart0;
+		serial1 = &uart1;
+		serial2 = &uart2;
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x0 0x00000000 0x20000000>;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+};
+
+&uart0 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/lg/lg1312.dtsi b/arch/arm64/boot/dts/lg/lg1312.dtsi
new file mode 100644
index 0000000..3a4e9a2
--- /dev/null
+++ b/arch/arm64/boot/dts/lg/lg1312.dtsi
@@ -0,0 +1,351 @@
+/*
+ * dts file for lg1312 SoC
+ *
+ * Copyright (C) 2016, LG Electronics
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	compatible = "lge,lg1312";
+	interrupt-parent = <&gic>;
+
+	cpus {
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		cpu0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x0>;
+			next-level-cache = <&L2_0>;
+		};
+		cpu1: cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x1>;
+			enable-method = "psci";
+			next-level-cache = <&L2_0>;
+		};
+		cpu2: cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x2>;
+			enable-method = "psci";
+			next-level-cache = <&L2_0>;
+		};
+		cpu3: cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x3>;
+			enable-method = "psci";
+			next-level-cache = <&L2_0>;
+		};
+		L2_0: l2-cache0 {
+			compatible = "cache";
+		};
+	};
+
+	psci {
+		compatible  = "arm,psci-0.2", "arm,psci";
+		method = "smc";
+		cpu_suspend = <0x84000001>;
+		cpu_off = <0x84000002>;
+		cpu_on = <0x84000003>;
+	};
+
+	gic: interrupt-controller@c0001000 {
+		#interrupt-cells = <3>;
+		compatible = "arm,gic-400";
+		interrupt-controller;
+		reg = <0x0 0xc0001000 0x1000>,
+		      <0x0 0xc0002000 0x2000>,
+		      <0x0 0xc0004000 0x2000>,
+		      <0x0 0xc0006000 0x2000>;
+	};
+
+	pmu {
+		compatible = "arm,cortex-a53-pmu";
+		interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-affinity = <&cpu0>,
+				     <&cpu1>,
+				     <&cpu2>,
+				     <&cpu3>;
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(0x0f) |
+			      IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 14 (GIC_CPU_MASK_RAW(0x0f) |
+			      IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 11 (GIC_CPU_MASK_RAW(0x0f) |
+			      IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 10 (GIC_CPU_MASK_RAW(0x0f) |
+			      IRQ_TYPE_LEVEL_LOW)>;
+	};
+
+	clk_bus: clk_bus {
+		#clock-cells = <0>;
+
+		compatible = "fixed-clock";
+		clock-frequency = <198000000>;
+		clock-output-names = "BUSCLK";
+	};
+
+	soc {
+		#address-cells = <2>;
+		#size-cells = <1>;
+
+		compatible = "simple-bus";
+		interrupt-parent = <&gic>;
+		ranges;
+
+		eth0: ethernet@c1b00000 {
+			compatible = "cdns,gem";
+			reg = <0x0 0xc1b00000 0x1000>;
+			interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clk_bus>, <&clk_bus>;
+			clock-names = "hclk", "pclk";
+			phy-mode = "rmii";
+			/* Filled in by boot */
+			mac-address = [ 00 00 00 00 00 00 ];
+		};
+	};
+
+	amba {
+		#address-cells = <2>;
+		#size-cells = <1>;
+		#interrupts-cells = <3>;
+
+		compatible = "arm,amba-bus";
+		interrupt-parent = <&gic>;
+		ranges;
+
+		timers: timer@fd100000 {
+			compatible = "arm,sp804";
+			reg = <0x0 0xfd100000 0x1000>;
+			interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clk_bus>;
+			clock-names = "apb_pclk";
+		};
+		wdog: watchdog@fd200000 {
+			compatible = "arm,sp805", "arm,primecell";
+			reg = <0x0 0xfd200000 0x1000>;
+			interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clk_bus>;
+			clock-names = "apb_pclk";
+		};
+		uart0: serial@fe000000 {
+			compatible = "arm,pl011", "arm,primecell";
+			reg = <0x0 0xfe000000 0x1000>;
+			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clk_bus>;
+			clock-names = "apb_pclk";
+			status="disabled";
+		};
+		uart1: serial@fe100000 {
+			compatible = "arm,pl011", "arm,primecell";
+			reg = <0x0 0xfe100000 0x1000>;
+			interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clk_bus>;
+			clock-names = "apb_pclk";
+			status="disabled";
+		};
+		uart2: serial@fe200000 {
+			compatible = "arm,pl011", "arm,primecell";
+			reg = <0x0 0xfe200000 0x1000>;
+			interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clk_bus>;
+			clock-names = "apb_pclk";
+			status="disabled";
+		};
+		spi0: ssp@fe800000 {
+			compatible = "arm,pl022", "arm,primecell";
+			reg = <0x0 0xfe800000 0x1000>;
+			interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clk_bus>;
+			clock-names = "apb_pclk";
+		};
+		spi1: ssp@fe900000 {
+			compatible = "arm,pl022", "arm,primecell";
+			reg = <0x0 0xfe900000 0x1000>;
+			interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clk_bus>;
+			clock-names = "apb_pclk";
+		};
+		dmac0: dma@c1128000 {
+			compatible = "arm,pl330", "arm,primecell";
+			reg = <0x0 0xc1128000 0x1000>;
+			interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clk_bus>;
+			clock-names = "apb_pclk";
+		};
+		gpio0: gpio@fd400000 {
+			#gpio-cells = <2>;
+			compatible = "arm,pl061", "arm,primecell";
+			gpio-controller;
+			reg = <0x0 0xfd400000 0x1000>;
+			clocks = <&clk_bus>;
+			clock-names = "apb_pclk";
+			status="disabled";
+		};
+		gpio1: gpio@fd410000 {
+			#gpio-cells = <2>;
+			compatible = "arm,pl061", "arm,primecell";
+			gpio-controller;
+			reg = <0x0 0xfd410000 0x1000>;
+			clocks = <&clk_bus>;
+			clock-names = "apb_pclk";
+			status="disabled";
+		};
+		gpio2: gpio@fd420000 {
+			#gpio-cells = <2>;
+			compatible = "arm,pl061", "arm,primecell";
+			gpio-controller;
+			reg = <0x0 0xfd420000 0x1000>;
+			clocks = <&clk_bus>;
+			clock-names = "apb_pclk";
+			status="disabled";
+		};
+		gpio3: gpio@fd430000 {
+			#gpio-cells = <2>;
+			compatible = "arm,pl061", "arm,primecell";
+			gpio-controller;
+			reg = <0x0 0xfd430000 0x1000>;
+			clocks = <&clk_bus>;
+			clock-names = "apb_pclk";
+		};
+		gpio4: gpio@fd440000 {
+			#gpio-cells = <2>;
+			compatible = "arm,pl061", "arm,primecell";
+			gpio-controller;
+			reg = <0x0 0xfd440000 0x1000>;
+			clocks = <&clk_bus>;
+			clock-names = "apb_pclk";
+			status="disabled";
+		};
+		gpio5: gpio@fd450000 {
+			#gpio-cells = <2>;
+			compatible = "arm,pl061", "arm,primecell";
+			gpio-controller;
+			reg = <0x0 0xfd450000 0x1000>;
+			clocks = <&clk_bus>;
+			clock-names = "apb_pclk";
+			status="disabled";
+		};
+		gpio6: gpio@fd460000 {
+			#gpio-cells = <2>;
+			compatible = "arm,pl061", "arm,primecell";
+			gpio-controller;
+			reg = <0x0 0xfd460000 0x1000>;
+			clocks = <&clk_bus>;
+			clock-names = "apb_pclk";
+			status="disabled";
+		};
+		gpio7: gpio@fd470000 {
+			#gpio-cells = <2>;
+			compatible = "arm,pl061", "arm,primecell";
+			gpio-controller;
+			reg = <0x0 0xfd470000 0x1000>;
+			clocks = <&clk_bus>;
+			clock-names = "apb_pclk";
+			status="disabled";
+		};
+		gpio8: gpio@fd480000 {
+			#gpio-cells = <2>;
+			compatible = "arm,pl061", "arm,primecell";
+			gpio-controller;
+			reg = <0x0 0xfd480000 0x1000>;
+			clocks = <&clk_bus>;
+			clock-names = "apb_pclk";
+			status="disabled";
+		};
+		gpio9: gpio@fd490000 {
+			#gpio-cells = <2>;
+			compatible = "arm,pl061", "arm,primecell";
+			gpio-controller;
+			reg = <0x0 0xfd490000 0x1000>;
+			clocks = <&clk_bus>;
+			clock-names = "apb_pclk";
+			status="disabled";
+		};
+		gpio10: gpio@fd4a0000 {
+			#gpio-cells = <2>;
+			compatible = "arm,pl061", "arm,primecell";
+			gpio-controller;
+			reg = <0x0 0xfd4a0000 0x1000>;
+			clocks = <&clk_bus>;
+			clock-names = "apb_pclk";
+			status="disabled";
+		};
+		gpio11: gpio@fd4b0000 {
+			#gpio-cells = <2>;
+			compatible = "arm,pl061", "arm,primecell";
+			gpio-controller;
+			reg = <0x0 0xfd4b0000 0x1000>;
+			clocks = <&clk_bus>;
+			clock-names = "apb_pclk";
+		};
+		gpio12: gpio@fd4c0000 {
+			#gpio-cells = <2>;
+			compatible = "arm,pl061", "arm,primecell";
+			gpio-controller;
+			reg = <0x0 0xfd4c0000 0x1000>;
+			clocks = <&clk_bus>;
+			clock-names = "apb_pclk";
+			status="disabled";
+		};
+		gpio13: gpio@fd4d0000 {
+			#gpio-cells = <2>;
+			compatible = "arm,pl061", "arm,primecell";
+			gpio-controller;
+			reg = <0x0 0xfd4d0000 0x1000>;
+			clocks = <&clk_bus>;
+			clock-names = "apb_pclk";
+			status="disabled";
+		};
+		gpio14: gpio@fd4e0000 {
+			#gpio-cells = <2>;
+			compatible = "arm,pl061", "arm,primecell";
+			gpio-controller;
+			reg = <0x0 0xfd4e0000 0x1000>;
+			clocks = <&clk_bus>;
+			clock-names = "apb_pclk";
+			status="disabled";
+		};
+		gpio15: gpio@fd4f0000 {
+			#gpio-cells = <2>;
+			compatible = "arm,pl061", "arm,primecell";
+			gpio-controller;
+			reg = <0x0 0xfd4f0000 0x1000>;
+			clocks = <&clk_bus>;
+			clock-names = "apb_pclk";
+			status="disabled";
+		};
+		gpio16: gpio@fd500000 {
+			#gpio-cells = <2>;
+			compatible = "arm,pl061", "arm,primecell";
+			gpio-controller;
+			reg = <0x0 0xfd500000 0x1000>;
+			clocks = <&clk_bus>;
+			clock-names = "apb_pclk";
+			status="disabled";
+		};
+		gpio17: gpio@fd510000 {
+			#gpio-cells = <2>;
+			compatible = "arm,pl061", "arm,primecell";
+			gpio-controller;
+			reg = <0x0 0xfd510000 0x1000>;
+			clocks = <&clk_bus>;
+			clock-names = "apb_pclk";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-db.dts b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
index 3590501..86110a6 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-db.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
@@ -60,27 +60,19 @@
 		device_type = "memory";
 		reg = <0x00000000 0x00000000 0x00000000 0x20000000>;
 	};
-
-	soc {
-		internal-regs {
-			/*
-			* Exported on the micro USB connector CON32
-			* through an FTDI
-			*/
-			uart0: serial@12000 {
-				status = "okay";
-			};
-
-			/* CON31 */
-			usb3@58000 {
-				status = "okay";
-			};
-
-			/* CON3 */
-			sata@e0000 {
-			       status = "okay";
-			};
-		};
-	};
 };
 
+/* CON3 */
+&sata {
+	status = "okay";
+};
+
+/* Exported on the micro USB connector CON32 through an FTDI */
+&uart0 {
+	status = "okay";
+};
+
+/* CON31 */
+&usb3 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/marvell/armada-372x.dtsi b/arch/arm64/boot/dts/marvell/armada-372x.dtsi
index f292a00..5120296 100644
--- a/arch/arm64/boot/dts/marvell/armada-372x.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-372x.dtsi
@@ -59,5 +59,4 @@
 			enable-method = "psci";
 		};
 	};
-
 };
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index ba9df7f..9e2efb8 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -105,14 +105,28 @@
 				status = "disabled";
 			};
 
-			usb3@58000 {
-				compatible = "generic-xhci";
+			usb3: usb@58000 {
+				compatible = "marvell,armada3700-xhci",
+				"generic-xhci";
 				reg = <0x58000 0x4000>;
 				interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
 				status = "disabled";
 			};
 
-			sata@e0000 {
+			xor@60900 {
+				compatible = "marvell,armada-3700-xor";
+				reg = <0x60900 0x100
+				       0x60b00 0x100>;
+
+				xor10 {
+					interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
+				};
+				xor11 {
+					interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+				};
+			};
+
+			sata: sata@e0000 {
 				compatible = "marvell,armada-3700-ahci";
 				reg = <0xe0000 0x2000>;
 				interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/marvell/armada-7020.dtsi b/arch/arm64/boot/dts/marvell/armada-7020.dtsi
index 5257575..975e733 100644
--- a/arch/arm64/boot/dts/marvell/armada-7020.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-7020.dtsi
@@ -46,6 +46,7 @@
  */
 
 #include "armada-ap806-dual.dtsi"
+#include "armada-cp110-master.dtsi"
 
 / {
 	model = "Marvell Armada 7020";
diff --git a/arch/arm64/boot/dts/marvell/armada-7040-db.dts b/arch/arm64/boot/dts/marvell/armada-7040-db.dts
index 064a251..070b589 100644
--- a/arch/arm64/boot/dts/marvell/armada-7040-db.dts
+++ b/arch/arm64/boot/dts/marvell/armada-7040-db.dts
@@ -51,42 +51,98 @@
 	compatible = "marvell,armada7040-db", "marvell,armada7040",
 		     "marvell,armada-ap806-quad", "marvell,armada-ap806";
 
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
 	memory@00000000 {
 		device_type = "memory";
 		reg = <0x0 0x0 0x0 0x80000000>;
 	};
+};
 
-	ap806 {
-		config-space {
-			spi@510600 {
-				status = "okay";
+&i2c0 {
+	status = "okay";
+	clock-frequency = <100000>;
+};
 
-				spi-flash@0 {
-					#address-cells = <1>;
-					#size-cells = <1>;
-					compatible = "n25q128a13";
-					reg = <0>; /* Chip select 0 */
-					spi-max-frequency = <10000000>;
+&spi0 {
+	status = "okay";
 
-					partition@0 {
-						label = "U-Boot";
-						reg = <0 0x200000>;
-					};
-					partition@400000 {
-						label = "Filesystem";
-						reg = <0x200000 0xce0000>;
-					};
-				};
+	spi-flash@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "jedec,spi-nor";
+		reg = <0>;
+		spi-max-frequency = <10000000>;
+
+		partitions {
+			compatible = "fixed-partitions";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			partition@0 {
+				label = "U-Boot";
+				reg = <0 0x200000>;
 			};
-
-			i2c@511000 {
-				status = "okay";
-				clock-frequency = <100000>;
-			};
-
-			serial@512000 {
-				status = "okay";
+			partition@400000 {
+				label = "Filesystem";
+				reg = <0x200000 0xce0000>;
 			};
 		};
 	};
 };
+
+&uart0 {
+	status = "okay";
+};
+
+
+&cpm_pcie2 {
+	status = "okay";
+};
+
+&cpm_i2c0 {
+	status = "okay";
+	clock-frequency = <100000>;
+};
+
+&cpm_spi1 {
+	status = "okay";
+
+	spi-flash@0 {
+		#address-cells = <0x1>;
+		#size-cells = <0x1>;
+		compatible = "jedec,spi-nor";
+		reg = <0x0>;
+		spi-max-frequency = <20000000>;
+
+		partitions {
+			compatible = "fixed-partitions";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			partition@0 {
+				label = "U-Boot";
+				reg = <0x0 0x200000>;
+			};
+
+			partition@400000 {
+				label = "Filesystem";
+				reg = <0x200000 0xe00000>;
+			};
+		};
+	};
+};
+
+&cpm_sata0 {
+	status = "okay";
+};
+
+&cpm_usb3_0 {
+	status = "okay";
+};
+
+&cpm_usb3_1 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/marvell/armada-7040.dtsi b/arch/arm64/boot/dts/marvell/armada-7040.dtsi
index 7a2de8b..78d995d 100644
--- a/arch/arm64/boot/dts/marvell/armada-7040.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-7040.dtsi
@@ -46,6 +46,7 @@
  */
 
 #include "armada-ap806-quad.dtsi"
+#include "armada-cp110-master.dtsi"
 
 / {
 	model = "Marvell Armada 7040";
diff --git a/arch/arm64/boot/dts/marvell/armada-8020.dtsi b/arch/arm64/boot/dts/marvell/armada-8020.dtsi
index 73d69d9..3753c1c 100644
--- a/arch/arm64/boot/dts/marvell/armada-8020.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-8020.dtsi
@@ -46,6 +46,7 @@
  */
 
 #include "armada-ap806-dual.dtsi"
+#include "armada-cp110-master.dtsi"
 
 / {
 	model = "Marvell Armada 8020";
diff --git a/arch/arm64/boot/dts/marvell/armada-8040.dtsi b/arch/arm64/boot/dts/marvell/armada-8040.dtsi
index a1406a4..8bd0d8f 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-8040.dtsi
@@ -46,6 +46,7 @@
  */
 
 #include "armada-ap806-quad.dtsi"
+#include "armada-cp110-master.dtsi"
 
 / {
 	model = "Marvell Armada 8040";
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi
index f25c5c1..95a1ff6 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi
@@ -68,4 +68,3 @@
 		};
 	};
 };
-
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi
index baa7d9a..ba43a43 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi
@@ -79,6 +79,4 @@
 			enable-method = "psci";
 		};
 	};
-
 };
-
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
index 556a92b..20d256b 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
@@ -54,12 +54,16 @@
 	#address-cells = <2>;
 	#size-cells = <2>;
 
+	aliases {
+		serial0 = &uart0;
+		serial1 = &uart1;
+	};
+
 	psci {
 		compatible = "arm,psci-0.2";
 		method = "smc";
 	};
 
-
 	ap806 {
 		#address-cells = <2>;
 		#size-cells = <2>;
@@ -136,7 +140,7 @@
 				marvell,spi-base = <128>, <136>, <144>, <152>;
 			};
 
-			xor0@400000 {
+			xor@400000 {
 				compatible = "marvell,mv-xor-v2";
 				reg = <0x400000 0x1000>,
 				      <0x410000 0x1000>;
@@ -144,7 +148,7 @@
 				dma-coherent;
 			};
 
-			xor1@420000 {
+			xor@420000 {
 				compatible = "marvell,mv-xor-v2";
 				reg = <0x420000 0x1000>,
 				      <0x430000 0x1000>;
@@ -152,7 +156,7 @@
 				dma-coherent;
 			};
 
-			xor2@440000 {
+			xor@440000 {
 				compatible = "marvell,mv-xor-v2";
 				reg = <0x440000 0x1000>,
 				      <0x450000 0x1000>;
@@ -160,7 +164,7 @@
 				dma-coherent;
 			};
 
-			xor3@460000 {
+			xor@460000 {
 				compatible = "marvell,mv-xor-v2";
 				reg = <0x460000 0x1000>,
 				      <0x470000 0x1000>;
@@ -175,63 +179,51 @@
 				#size-cells = <0>;
 				cell-index = <0>;
 				interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
-				clocks = <&ringclk 2>;
+				clocks = <&ap_syscon 3>;
 				status = "disabled";
 			};
 
 			i2c0: i2c@511000 {
-				compatible = "marvell,mv64xxx-i2c";
+				compatible = "marvell,mv78230-i2c";
 				reg = <0x511000 0x20>;
 				#address-cells = <1>;
 				#size-cells = <0>;
 				interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
 				timeout-ms = <1000>;
-				clocks = <&ringclk 2>;
+				clocks = <&ap_syscon 3>;
 				status = "disabled";
 			};
 
-			serial@512000 {
+			uart0: serial@512000 {
 				compatible = "snps,dw-apb-uart";
 				reg = <0x512000 0x100>;
 				reg-shift = <2>;
 				interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
 				reg-io-width = <1>;
-				clocks = <&ringclk 2>;
+				clocks = <&ap_syscon 3>;
 				status = "disabled";
 			};
 
-			serial@512100 {
+			uart1: serial@512100 {
 				compatible = "snps,dw-apb-uart";
 				reg = <0x512100 0x100>;
 				reg-shift = <2>;
 				interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
 				reg-io-width = <1>;
-				clocks = <&ringclk 2>;
+				clocks = <&ap_syscon 3>;
 				status = "disabled";
 
 			};
 
-			dfx-server@6f8000 {
-				compatible = "simple-mfd", "syscon";
-				reg = <0x6f8000 0x70000>;
-
-				coreclk: clk@204 {
-					compatible = "marvell,armada-ap806-core-clock";
-					#clock-cells = <1>;
-					clock-output-names = "ddr", "ring", "cpu";
-				};
-
-				ringclk: clk@250 {
-					compatible = "marvell,armada-ap806-ring-clock";
-					#clock-cells = <1>;
-					clock-output-names = "ring-0", "ring-2",
-							     "ring-3", "ring-4",
-							     "ring-5";
-					clocks = <&coreclk 1>;
-				};
+			ap_syscon: system-controller@6f4000 {
+				compatible = "marvell,ap806-system-controller",
+					     "syscon";
+				#clock-cells = <1>;
+				clock-output-names = "ap-cpu-cluster-0",
+						     "ap-cpu-cluster-1",
+						     "ap-fixed", "ap-mss";
+				reg = <0x6f4000 0x1000>;
 			};
 		};
 	};
-
 };
-
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
new file mode 100644
index 0000000..367138b
--- /dev/null
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2016 Marvell Technology Group Ltd.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPLv2 or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Device Tree file for Marvell Armada CP110 Master.
+ */
+
+/ {
+	cp110-master {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		compatible = "simple-bus";
+		interrupt-parent = <&gic>;
+		ranges;
+
+		config-space {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			compatible = "simple-bus";
+			interrupt-parent = <&gic>;
+			ranges = <0x0 0x0 0xf2000000 0x2000000>;
+
+			cpm_syscon0: system-controller@440000 {
+				compatible = "marvell,cp110-system-controller0",
+					     "syscon";
+				reg = <0x440000 0x1000>;
+				#clock-cells = <2>;
+				core-clock-output-names =
+					"cpm-apll", "cpm-ppv2-core", "cpm-eip",
+					"cpm-core", "cpm-nand-core";
+				gate-clock-output-names =
+					"cpm-audio", "cpm-communit", "cpm-nand",
+					"cpm-ppv2", "cpm-sdio", "cpm-mg-domain",
+					"cpm-mg-core", "cpm-xor1", "cpm-xor0",
+					"cpm-gop-dp", "none", "cpm-pcie_x10",
+					"cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor",
+					"cpm-sata", "cpm-sata-usb", "cpm-main",
+					"cpm-sd-mmc", "none", "none",
+					"cpm-slow-io", "cpm-usb3h0", "cpm-usb3h1",
+					"cpm-usb3dev", "cpm-eip150", "cpm-eip197";
+			};
+
+			cpm_sata0: sata@540000 {
+				compatible = "marvell,armada-8k-ahci";
+				reg = <0x540000 0x30000>;
+				interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&cpm_syscon0 1 15>;
+				status = "disabled";
+			};
+
+			cpm_usb3_0: usb3@500000 {
+				compatible = "marvell,armada-8k-xhci",
+					     "generic-xhci";
+				reg = <0x500000 0x4000>;
+				dma-coherent;
+				interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&cpm_syscon0 1 22>;
+				status = "disabled";
+			};
+
+			cpm_usb3_1: usb3@510000 {
+				compatible = "marvell,armada-8k-xhci",
+					     "generic-xhci";
+				reg = <0x510000 0x4000>;
+				dma-coherent;
+				interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&cpm_syscon0 1 23>;
+				status = "disabled";
+			};
+
+			cpm_spi0: spi@700600 {
+				compatible = "marvell,armada-380-spi";
+				reg = <0x700600 0x50>;
+				#address-cells = <0x1>;
+				#size-cells = <0x0>;
+				cell-index = <1>;
+				clocks = <&cpm_syscon0 0 3>;
+				status = "disabled";
+			};
+
+			cpm_spi1: spi@700680 {
+				compatible = "marvell,armada-380-spi";
+				reg = <0x700680 0x50>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				cell-index = <2>;
+				clocks = <&cpm_syscon0 1 21>;
+				status = "disabled";
+			};
+
+			cpm_i2c0: i2c@701000 {
+				compatible = "marvell,mv78230-i2c";
+				reg = <0x701000 0x20>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&cpm_syscon0 1 21>;
+				status = "disabled";
+			};
+
+			cpm_i2c1: i2c@701100 {
+				compatible = "marvell,mv78230-i2c";
+				reg = <0x701100 0x20>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&cpm_syscon0 1 21>;
+				status = "disabled";
+			};
+		};
+
+		cpm_pcie0: pcie@f2600000 {
+			compatible = "marvell,armada8k-pcie", "snps,dw-pcie";
+			reg = <0 0xf2600000 0 0x10000>,
+			      <0 0xf6f00000 0 0x80000>;
+			reg-names = "ctrl", "config";
+			#address-cells = <3>;
+			#size-cells = <2>;
+			#interrupt-cells = <1>;
+			device_type = "pci";
+			dma-coherent;
+
+			bus-range = <0 0xff>;
+			ranges =
+				/* downstream I/O */
+				<0x81000000 0 0xf9000000 0  0xf9000000 0 0x10000
+				/* non-prefetchable memory */
+				0x82000000 0 0xf6000000 0  0xf6000000 0 0xf00000>;
+			interrupt-map-mask = <0 0 0 0>;
+			interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+			num-lanes = <1>;
+			clocks = <&cpm_syscon0 1 13>;
+			status = "disabled";
+		};
+
+		cpm_pcie1: pcie@f2620000 {
+			compatible = "marvell,armada8k-pcie", "snps,dw-pcie";
+			reg = <0 0xf2620000 0 0x10000>,
+			      <0 0xf7f00000 0 0x80000>;
+			reg-names = "ctrl", "config";
+			#address-cells = <3>;
+			#size-cells = <2>;
+			#interrupt-cells = <1>;
+			device_type = "pci";
+			dma-coherent;
+
+			bus-range = <0 0xff>;
+			ranges =
+				/* downstream I/O */
+				<0x81000000 0 0xf9010000 0  0xf9010000 0 0x10000
+				/* non-prefetchable memory */
+				0x82000000 0 0xf7000000 0  0xf7000000 0 0xf00000>;
+			interrupt-map-mask = <0 0 0 0>;
+			interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+
+			num-lanes = <1>;
+			clocks = <&cpm_syscon0 1 11>;
+			status = "disabled";
+		};
+
+		cpm_pcie2: pcie@f2640000 {
+			compatible = "marvell,armada8k-pcie", "snps,dw-pcie";
+			reg = <0 0xf2640000 0 0x10000>,
+			      <0 0xf8f00000 0 0x80000>;
+			reg-names = "ctrl", "config";
+			#address-cells = <3>;
+			#size-cells = <2>;
+			#interrupt-cells = <1>;
+			device_type = "pci";
+			dma-coherent;
+
+			bus-range = <0 0xff>;
+			ranges =
+				/* downstream I/O */
+				<0x81000000 0 0xf9020000 0  0xf9020000 0 0x10000
+				/* non-prefetchable memory */
+				0x82000000 0 0xf8000000 0  0xf8000000 0 0xf00000>;
+			interrupt-map-mask = <0 0 0 0>;
+			interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+
+			num-lanes = <1>;
+			clocks = <&cpm_syscon0 1 12>;
+			status = "disabled";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index eab7efc..05f89c4 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -125,6 +125,49 @@
 		clock-output-names = "cpum_ck";
 	};
 
+	thermal-zones {
+		cpu_thermal: cpu_thermal {
+			polling-delay-passive = <1000>; /* milliseconds */
+			polling-delay = <1000>; /* milliseconds */
+
+			thermal-sensors = <&thermal>;
+			sustainable-power = <1500>; /* milliwatts */
+
+			trips {
+				threshold: trip-point@0 {
+					temperature = <68000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				target: trip-point@1 {
+					temperature = <85000>;
+					hysteresis = <2000>;
+					type = "passive";
+				};
+
+				cpu_crit: cpu_crit@0 {
+					temperature = <115000>;
+					hysteresis = <2000>;
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				map@0 {
+					trip = <&target>;
+					cooling-device = <&cpu0 0 0>;
+					contribution = <1024>;
+				};
+				map@1 {
+					trip = <&target>;
+					cooling-device = <&cpu2 0 0>;
+					contribution = <2048>;
+				};
+			};
+		};
+	};
+
 	timer {
 		compatible = "arm,armv8-timer";
 		interrupt-parent = <&gic>;
@@ -313,6 +356,11 @@
 				(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
 		};
 
+		auxadc: auxadc@11001000 {
+			compatible = "mediatek,mt8173-auxadc";
+			reg = <0 0x11001000 0 0x1000>;
+		};
+
 		uart0: serial@11002000 {
 			compatible = "mediatek,mt8173-uart",
 				     "mediatek,mt6577-uart";
@@ -414,6 +462,18 @@
 			status = "disabled";
 		};
 
+		thermal: thermal@1100b000 {
+			#thermal-sensor-cells = <0>;
+			compatible = "mediatek,mt8173-thermal";
+			reg = <0 0x1100b000 0 0x1000>;
+			interrupts = <0 70 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&pericfg CLK_PERI_THERM>, <&pericfg CLK_PERI_AUXADC>;
+			clock-names = "therm", "auxadc";
+			resets = <&pericfg MT8173_PERI_THERM_SW_RST>;
+			mediatek,auxadc = <&auxadc>;
+			mediatek,apmixedsys = <&apmixedsys>;
+		};
+
 		nor_flash: spi@1100d000 {
 			compatible = "mediatek,mt8173-nor";
 			reg = <0 0x1100d000 0 0xe0>;
diff --git a/arch/arm64/boot/dts/nvidia/Makefile b/arch/arm64/boot/dts/nvidia/Makefile
index a7e865d..0f7cdf3 100644
--- a/arch/arm64/boot/dts/nvidia/Makefile
+++ b/arch/arm64/boot/dts/nvidia/Makefile
@@ -2,6 +2,7 @@
 dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-p2371-0000.dtb
 dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-p2371-2180.dtb
 dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-p2571.dtb
+dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-smaug.dtb
 
 always		:= $(dtb-y)
 clean-files	:= *.dtb
diff --git a/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts b/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
index 62f33fc..759af96 100644
--- a/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
@@ -8,19 +8,22 @@
 	compatible = "nvidia,norrin", "nvidia,tegra132", "nvidia,tegra124";
 
 	aliases {
-		rtc0 = "/i2c@0,7000d000/as3722@40";
-		rtc1 = "/rtc@0,7000e000";
+		rtc0 = "/i2c@7000d000/as3722@40";
+		rtc1 = "/rtc@7000e000";
+		serial0 = &uarta;
 	};
 
-	chosen { };
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
 
 	memory {
 		device_type = "memory";
 		reg = <0x0 0x80000000 0x0 0x80000000>;
 	};
 
-	host1x@0,50000000 {
-		hdmi@0,54280000 {
+	host1x@50000000 {
+		hdmi@54280000 {
 			status = "disabled";
 
 			vdd-supply = <&vdd_3v3_hdmi>;
@@ -32,26 +35,26 @@
 				<&gpio TEGRA_GPIO(N, 7) GPIO_ACTIVE_HIGH>;
 		};
 
-		sor@0,54540000 {
+		sor@54540000 {
 			status = "okay";
 
 			nvidia,dpaux = <&dpaux>;
 			nvidia,panel = <&panel>;
 		};
 
-		dpaux: dpaux@0,545c0000 {
+		dpaux: dpaux@545c0000 {
 			vdd-supply = <&vdd_3v3_panel>;
 			status = "okay";
 		};
 	};
 
-	gpu@0,57000000 {
+	gpu@57000000 {
 		status = "okay";
 
 		vdd-supply = <&vdd_gpu>;
 	};
 
-	pinmux@0,70000868 {
+	pinmux@70000868 {
 		pinctrl-names = "default";
 		pinctrl-0 = <&pinmux_default>;
 
@@ -523,21 +526,21 @@
 		};
 	};
 
-	serial@0,70006000 {
+	serial@70006000 {
 		status = "okay";
 	};
 
-	pwm: pwm@0,7000a000 {
+	pwm: pwm@7000a000 {
 		status = "okay";
 	};
 
 	/* HDMI DDC */
-	hdmi_ddc: i2c@0,7000c700 {
+	hdmi_ddc: i2c@7000c700 {
 		status = "okay";
 		clock-frequency = <100000>;
 	};
 
-	i2c@0,7000d000 {
+	i2c@7000d000 {
 		status = "okay";
 		clock-frequency = <400000>;
 
@@ -744,7 +747,7 @@
 		};
 	};
 
-	spi@0,7000d400 {
+	spi@7000d400 {
 		status = "okay";
 
 		ec: cros-ec@0 {
@@ -876,7 +879,7 @@
 		};
 	};
 
-	pmc@0,7000e400 {
+	pmc@7000e400 {
 		nvidia,invert-interrupt;
 		nvidia,suspend-mode = <0>;
 		#wake-cells = <3>;
@@ -890,12 +893,12 @@
 	};
 
 	/* WIFI/BT module */
-	sdhci@0,700b0000 {
+	sdhci@700b0000 {
 		status = "disabled";
 	};
 
 	/* external SD/MMC */
-	sdhci@0,700b0400 {
+	sdhci@700b0400 {
 		cd-gpios = <&gpio TEGRA_GPIO(V, 2) GPIO_ACTIVE_LOW>;
 		power-gpios = <&gpio TEGRA_GPIO(R, 0) GPIO_ACTIVE_HIGH>;
 		wp-gpios = <&gpio TEGRA_GPIO(Q, 4) GPIO_ACTIVE_HIGH>;
@@ -905,35 +908,35 @@
 	};
 
 	/* EMMC 4.51 */
-	sdhci@0,700b0600 {
+	sdhci@700b0600 {
 		status = "okay";
 		bus-width = <8>;
 		non-removable;
 	};
 
-	usb@0,7d000000 {
+	usb@7d000000 {
 		status = "okay";
 	};
 
-	usb-phy@0,7d000000 {
+	usb-phy@7d000000 {
 		status = "okay";
 		vbus-supply = <&vdd_usb1_vbus>;
 	};
 
-	usb@0,7d004000 {
+	usb@7d004000 {
 		status = "okay";
 	};
 
-	usb-phy@0,7d004000 {
+	usb-phy@7d004000 {
 		status = "okay";
 		vbus-supply = <&vdd_run_cam>;
 	};
 
-	usb@0,7d008000 {
+	usb@7d008000 {
 		status = "okay";
 	};
 
-	usb-phy@0,7d008000 {
+	usb-phy@7d008000 {
 		status = "okay";
 		vbus-supply = <&vdd_usb3_vbus>;
 	};
@@ -973,7 +976,7 @@
 			linux,input-type = <5>;
 			linux,code = <0>;
 			debounce-interval = <1>;
-			gpio-key,wakeup;
+			wakeup-source;
 		};
 
 		power {
@@ -981,7 +984,7 @@
 			gpios = <&gpio TEGRA_GPIO(Q, 0) GPIO_ACTIVE_LOW>;
 			linux,code = <KEY_POWER>;
 			debounce-interval = <10>;
-			gpio-key,wakeup;
+			wakeup-source;
 		};
 	};
 
diff --git a/arch/arm64/boot/dts/nvidia/tegra132.dtsi b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
index 6e28e41..2013f89 100644
--- a/arch/arm64/boot/dts/nvidia/tegra132.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
@@ -11,7 +11,7 @@
 	#address-cells = <2>;
 	#size-cells = <2>;
 
-	pcie-controller@0,01003000 {
+	pcie-controller@01003000 {
 		compatible = "nvidia,tegra124-pcie";
 		device_type = "pci";
 		reg = <0x0 0x01003000 0x0 0x00000800   /* PADS registers */
@@ -77,7 +77,7 @@
 		};
 	};
 
-	host1x@0,50000000 {
+	host1x@50000000 {
 		compatible = "nvidia,tegra124-host1x", "simple-bus";
 		reg = <0x0 0x50000000 0x0 0x00034000>;
 		interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>, /* syncpt */
@@ -92,7 +92,7 @@
 
 		ranges = <0 0x54000000 0 0x54000000 0 0x01000000>;
 
-		dc@0,54200000 {
+		dc@54200000 {
 			compatible = "nvidia,tegra124-dc";
 			reg = <0x0 0x54200000 0x0 0x00040000>;
 			interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
@@ -107,7 +107,7 @@
 			nvidia,head = <0>;
 		};
 
-		dc@0,54240000 {
+		dc@54240000 {
 			compatible = "nvidia,tegra124-dc";
 			reg = <0x0 0x54240000 0x0 0x00040000>;
 			interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
@@ -122,7 +122,7 @@
 			nvidia,head = <1>;
 		};
 
-		hdmi@0,54280000 {
+		hdmi@54280000 {
 			compatible = "nvidia,tegra124-hdmi";
 			reg = <0x0 0x54280000 0x0 0x00040000>;
 			interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
@@ -134,7 +134,7 @@
 			status = "disabled";
 		};
 
-		sor@0,54540000 {
+		sor@54540000 {
 			compatible = "nvidia,tegra124-sor";
 			reg = <0x0 0x54540000 0x0 0x00040000>;
 			interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
@@ -148,7 +148,7 @@
 			status = "disabled";
 		};
 
-		dpaux: dpaux@0,545c0000 {
+		dpaux: dpaux@545c0000 {
 			compatible = "nvidia,tegra124-dpaux";
 			reg = <0x0 0x545c0000 0x0 0x00040000>;
 			interrupts = <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>;
@@ -161,7 +161,7 @@
 		};
 	};
 
-	gic: interrupt-controller@0,50041000 {
+	gic: interrupt-controller@50041000 {
 		compatible = "arm,cortex-a15-gic";
 		#interrupt-cells = <3>;
 		interrupt-controller;
@@ -174,7 +174,7 @@
 		interrupt-parent = <&gic>;
 	};
 
-	gpu@0,57000000 {
+	gpu@57000000 {
 		compatible = "nvidia,gk20a";
 		reg = <0x0 0x57000000 0x0 0x01000000>,
 		      <0x0 0x58000000 0x0 0x01000000>;
@@ -201,7 +201,7 @@
 		interrupt-parent = <&gic>;
 	};
 
-	timer@0,60005000 {
+	timer@60005000 {
 		compatible = "nvidia,tegra124-timer", "nvidia,tegra20-timer";
 		reg = <0x0 0x60005000 0x0 0x400>;
 		interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
@@ -214,7 +214,7 @@
 		clock-names = "timer";
 	};
 
-	tegra_car: clock@0,60006000 {
+	tegra_car: clock@60006000 {
 		compatible = "nvidia,tegra132-car";
 		reg = <0x0 0x60006000 0x0 0x1000>;
 		#clock-cells = <1>;
@@ -222,12 +222,12 @@
 		nvidia,external-memory-controller = <&emc>;
 	};
 
-	flow-controller@0,60007000 {
+	flow-controller@60007000 {
 		compatible = "nvidia,tegra124-flowctrl";
 		reg = <0x0 0x60007000 0x0 0x1000>;
 	};
 
-	actmon@0,6000c800 {
+	actmon@6000c800 {
 		compatible = "nvidia,tegra124-actmon";
 		reg = <0x0 0x6000c800 0x0 0x400>;
 		interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
@@ -238,7 +238,7 @@
 		reset-names = "actmon";
 	};
 
-	gpio: gpio@0,6000d000 {
+	gpio: gpio@6000d000 {
 		compatible = "nvidia,tegra124-gpio", "nvidia,tegra30-gpio";
 		reg = <0x0 0x6000d000 0x0 0x1000>;
 		interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
@@ -255,7 +255,7 @@
 		interrupt-controller;
 	};
 
-	apbdma: dma@0,60020000 {
+	apbdma: dma@60020000 {
 		compatible = "nvidia,tegra124-apbdma", "nvidia,tegra148-apbdma";
 		reg = <0x0 0x60020000 0x0 0x1400>;
 		interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
@@ -297,13 +297,13 @@
 		#dma-cells = <1>;
 	};
 
-	apbmisc@0,70000800 {
+	apbmisc@70000800 {
 		compatible = "nvidia,tegra124-apbmisc", "nvidia,tegra20-apbmisc";
 		reg = <0x0 0x70000800 0x0 0x64>,   /* Chip revision */
 		      <0x0 0x7000e864 0x0 0x04>;   /* Strapping options */
 	};
 
-	pinmux: pinmux@0,70000868 {
+	pinmux: pinmux@70000868 {
 		compatible = "nvidia,tegra124-pinmux";
 		reg = <0x0 0x70000868 0x0 0x164>, /* Pad control registers */
 		      <0x0 0x70003000 0x0 0x434>, /* Mux registers */
@@ -315,10 +315,10 @@
 	 * driver and APB DMA based serial driver for higher baudrate
 	 * and performance. To enable the 8250 based driver, the compatible
 	 * is "nvidia,tegra124-uart", "nvidia,tegra20-uart" and to enable
-	 * the APB DMA based serial driver, the comptible is
+	 * the APB DMA based serial driver, the compatible is
 	 * "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart".
 	 */
-	uarta: serial@0,70006000 {
+	uarta: serial@70006000 {
 		compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
 		reg = <0x0 0x70006000 0x0 0x40>;
 		reg-shift = <2>;
@@ -332,7 +332,7 @@
 		status = "disabled";
 	};
 
-	uartb: serial@0,70006040 {
+	uartb: serial@70006040 {
 		compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
 		reg = <0x0 0x70006040 0x0 0x40>;
 		reg-shift = <2>;
@@ -346,7 +346,7 @@
 		status = "disabled";
 	};
 
-	uartc: serial@0,70006200 {
+	uartc: serial@70006200 {
 		compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
 		reg = <0x0 0x70006200 0x0 0x40>;
 		reg-shift = <2>;
@@ -360,7 +360,7 @@
 		status = "disabled";
 	};
 
-	uartd: serial@0,70006300 {
+	uartd: serial@70006300 {
 		compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
 		reg = <0x0 0x70006300 0x0 0x40>;
 		reg-shift = <2>;
@@ -374,7 +374,7 @@
 		status = "disabled";
 	};
 
-	pwm: pwm@0,7000a000 {
+	pwm: pwm@7000a000 {
 		compatible = "nvidia,tegra124-pwm", "nvidia,tegra20-pwm";
 		reg = <0x0 0x7000a000 0x0 0x100>;
 		#pwm-cells = <2>;
@@ -385,7 +385,7 @@
 		status = "disabled";
 	};
 
-	i2c@0,7000c000 {
+	i2c@7000c000 {
 		compatible = "nvidia,tegra124-i2c", "nvidia,tegra114-i2c";
 		reg = <0x0 0x7000c000 0x0 0x100>;
 		interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
@@ -400,7 +400,7 @@
 		status = "disabled";
 	};
 
-	i2c@0,7000c400 {
+	i2c@7000c400 {
 		compatible = "nvidia,tegra124-i2c", "nvidia,tegra114-i2c";
 		reg = <0x0 0x7000c400 0x0 0x100>;
 		interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
@@ -415,7 +415,7 @@
 		status = "disabled";
 	};
 
-	i2c@0,7000c500 {
+	i2c@7000c500 {
 		compatible = "nvidia,tegra124-i2c", "nvidia,tegra114-i2c";
 		reg = <0x0 0x7000c500 0x0 0x100>;
 		interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
@@ -430,7 +430,7 @@
 		status = "disabled";
 	};
 
-	i2c@0,7000c700 {
+	i2c@7000c700 {
 		compatible = "nvidia,tegra124-i2c", "nvidia,tegra114-i2c";
 		reg = <0x0 0x7000c700 0x0 0x100>;
 		interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
@@ -445,7 +445,7 @@
 		status = "disabled";
 	};
 
-	i2c@0,7000d000 {
+	i2c@7000d000 {
 		compatible = "nvidia,tegra124-i2c", "nvidia,tegra114-i2c";
 		reg = <0x0 0x7000d000 0x0 0x100>;
 		interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
@@ -460,7 +460,7 @@
 		status = "disabled";
 	};
 
-	i2c@0,7000d100 {
+	i2c@7000d100 {
 		compatible = "nvidia,tegra124-i2c", "nvidia,tegra114-i2c";
 		reg = <0x0 0x7000d100 0x0 0x100>;
 		interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
@@ -475,7 +475,7 @@
 		status = "disabled";
 	};
 
-	spi@0,7000d400 {
+	spi@7000d400 {
 		compatible = "nvidia,tegra124-spi", "nvidia,tegra114-spi";
 		reg = <0x0 0x7000d400 0x0 0x200>;
 		interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
@@ -490,7 +490,7 @@
 		status = "disabled";
 	};
 
-	spi@0,7000d600 {
+	spi@7000d600 {
 		compatible = "nvidia,tegra124-spi", "nvidia,tegra114-spi";
 		reg = <0x0 0x7000d600 0x0 0x200>;
 		interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
@@ -505,7 +505,7 @@
 		status = "disabled";
 	};
 
-	spi@0,7000d800 {
+	spi@7000d800 {
 		compatible = "nvidia,tegra124-spi", "nvidia,tegra114-spi";
 		reg = <0x0 0x7000d800 0x0 0x200>;
 		interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
@@ -520,7 +520,7 @@
 		status = "disabled";
 	};
 
-	spi@0,7000da00 {
+	spi@7000da00 {
 		compatible = "nvidia,tegra124-spi", "nvidia,tegra114-spi";
 		reg = <0x0 0x7000da00 0x0 0x200>;
 		interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
@@ -535,7 +535,7 @@
 		status = "disabled";
 	};
 
-	spi@0,7000dc00 {
+	spi@7000dc00 {
 		compatible = "nvidia,tegra124-spi", "nvidia,tegra114-spi";
 		reg = <0x0 0x7000dc00 0x0 0x200>;
 		interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
@@ -550,7 +550,7 @@
 		status = "disabled";
 	};
 
-	spi@0,7000de00 {
+	spi@7000de00 {
 		compatible = "nvidia,tegra124-spi", "nvidia,tegra114-spi";
 		reg = <0x0 0x7000de00 0x0 0x200>;
 		interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
@@ -565,7 +565,7 @@
 		status = "disabled";
 	};
 
-	rtc@0,7000e000 {
+	rtc@7000e000 {
 		compatible = "nvidia,tegra124-rtc", "nvidia,tegra20-rtc";
 		reg = <0x0 0x7000e000 0x0 0x100>;
 		interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
@@ -573,14 +573,14 @@
 		clock-names = "rtc";
 	};
 
-	pmc@0,7000e400 {
+	pmc@7000e400 {
 		compatible = "nvidia,tegra124-pmc";
 		reg = <0x0 0x7000e400 0x0 0x400>;
 		clocks = <&tegra_car TEGRA124_CLK_PCLK>, <&clk32k_in>;
 		clock-names = "pclk", "clk32k_in";
 	};
 
-	fuse@0,7000f800 {
+	fuse@7000f800 {
 		compatible = "nvidia,tegra124-efuse";
 		reg = <0x0 0x7000f800 0x0 0x400>;
 		clocks = <&tegra_car TEGRA124_CLK_FUSE>;
@@ -589,7 +589,7 @@
 		reset-names = "fuse";
 	};
 
-	mc: memory-controller@0,70019000 {
+	mc: memory-controller@70019000 {
 		compatible = "nvidia,tegra132-mc";
 		reg = <0x0 0x70019000 0x0 0x1000>;
 		clocks = <&tegra_car TEGRA124_CLK_MC>;
@@ -600,14 +600,14 @@
 		#iommu-cells = <1>;
 	};
 
-	emc: emc@0,7001b000 {
+	emc: emc@7001b000 {
 		compatible = "nvidia,tegra132-emc", "nvidia,tegra124-emc";
 		reg = <0x0 0x7001b000 0x0 0x1000>;
 
 		nvidia,memory-controller = <&mc>;
 	};
 
-	sata@0,70020000 {
+	sata@70020000 {
 		compatible = "nvidia,tegra124-ahci";
 		reg = <0x0 0x70027000 0x0 0x2000>, /* AHCI */
 		      <0x0 0x70020000 0x0 0x7000>; /* SATA */
@@ -626,7 +626,7 @@
 		status = "disabled";
 	};
 
-	hda@0,70030000 {
+	hda@70030000 {
 		compatible = "nvidia,tegra132-hda", "nvidia,tegra124-hda",
 			     "nvidia,tegra30-hda";
 		reg = <0x0 0x70030000 0x0 0x10000>;
@@ -642,7 +642,7 @@
 		status = "disabled";
 	};
 
-	padctl: padctl@0,7009f000 {
+	padctl: padctl@7009f000 {
 		compatible = "nvidia,tegra132-xusb-padctl",
 			     "nvidia,tegra124-xusb-padctl";
 		reg = <0x0 0x7009f000 0x0 0x1000>;
@@ -682,7 +682,7 @@
 		};
 	};
 
-	sdhci@0,700b0000 {
+	sdhci@700b0000 {
 		compatible = "nvidia,tegra124-sdhci";
 		reg = <0x0 0x700b0000 0x0 0x200>;
 		interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
@@ -693,7 +693,7 @@
 		status = "disabled";
 	};
 
-	sdhci@0,700b0200 {
+	sdhci@700b0200 {
 		compatible = "nvidia,tegra124-sdhci";
 		reg = <0x0 0x700b0200 0x0 0x200>;
 		interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
@@ -704,7 +704,7 @@
 		status = "disabled";
 	};
 
-	sdhci@0,700b0400 {
+	sdhci@700b0400 {
 		compatible = "nvidia,tegra124-sdhci";
 		reg = <0x0 0x700b0400 0x0 0x200>;
 		interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
@@ -715,7 +715,7 @@
 		status = "disabled";
 	};
 
-	sdhci@0,700b0600 {
+	sdhci@700b0600 {
 		compatible = "nvidia,tegra124-sdhci";
 		reg = <0x0 0x700b0600 0x0 0x200>;
 		interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
@@ -726,7 +726,7 @@
 		status = "disabled";
 	};
 
-	soctherm: thermal-sensor@0,700e2000 {
+	soctherm: thermal-sensor@700e2000 {
 		compatible = "nvidia,tegra124-soctherm";
 		reg = <0x0 0x700e2000 0x0 0x1000>;
 		interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
@@ -738,7 +738,7 @@
 		#thermal-sensor-cells = <1>;
 	};
 
-	ahub@0,70300000 {
+	ahub@70300000 {
 		compatible = "nvidia,tegra124-ahub";
 		reg = <0x0 0x70300000 0x0 0x200>,
 		      <0x0 0x70300800 0x0 0x800>,
@@ -790,7 +790,7 @@
 		#address-cells = <2>;
 		#size-cells = <2>;
 
-		tegra_i2s0: i2s@0,70301000 {
+		tegra_i2s0: i2s@70301000 {
 			compatible = "nvidia,tegra124-i2s";
 			reg = <0x0 0x70301000 0x0 0x100>;
 			nvidia,ahub-cif-ids = <4 4>;
@@ -801,7 +801,7 @@
 			status = "disabled";
 		};
 
-		tegra_i2s1: i2s@0,70301100 {
+		tegra_i2s1: i2s@70301100 {
 			compatible = "nvidia,tegra124-i2s";
 			reg = <0x0 0x70301100 0x0 0x100>;
 			nvidia,ahub-cif-ids = <5 5>;
@@ -812,7 +812,7 @@
 			status = "disabled";
 		};
 
-		tegra_i2s2: i2s@0,70301200 {
+		tegra_i2s2: i2s@70301200 {
 			compatible = "nvidia,tegra124-i2s";
 			reg = <0x0 0x70301200 0x0 0x100>;
 			nvidia,ahub-cif-ids = <6 6>;
@@ -823,7 +823,7 @@
 			status = "disabled";
 		};
 
-		tegra_i2s3: i2s@0,70301300 {
+		tegra_i2s3: i2s@70301300 {
 			compatible = "nvidia,tegra124-i2s";
 			reg = <0x0 0x70301300 0x0 0x100>;
 			nvidia,ahub-cif-ids = <7 7>;
@@ -834,7 +834,7 @@
 			status = "disabled";
 		};
 
-		tegra_i2s4: i2s@0,70301400 {
+		tegra_i2s4: i2s@70301400 {
 			compatible = "nvidia,tegra124-i2s";
 			reg = <0x0 0x70301400 0x0 0x100>;
 			nvidia,ahub-cif-ids = <8 8>;
@@ -846,7 +846,7 @@
 		};
 	};
 
-	usb@0,7d000000 {
+	usb@7d000000 {
 		compatible = "nvidia,tegra124-ehci", "nvidia,tegra30-ehci", "usb-ehci";
 		reg = <0x0 0x7d000000 0x0 0x4000>;
 		interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
@@ -859,7 +859,7 @@
 		status = "disabled";
 	};
 
-	phy1: usb-phy@0,7d000000 {
+	phy1: usb-phy@7d000000 {
 		compatible = "nvidia,tegra124-usb-phy", "nvidia,tegra30-usb-phy";
 		reg = <0x0 0x7d000000 0x0 0x4000>,
 		      <0x0 0x7d000000 0x0 0x4000>;
@@ -884,7 +884,7 @@
 		status = "disabled";
 	};
 
-	usb@0,7d004000 {
+	usb@7d004000 {
 		compatible = "nvidia,tegra124-ehci", "nvidia,tegra30-ehci", "usb-ehci";
 		reg = <0x0 0x7d004000 0x0 0x4000>;
 		interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
@@ -897,7 +897,7 @@
 		status = "disabled";
 	};
 
-	phy2: usb-phy@0,7d004000 {
+	phy2: usb-phy@7d004000 {
 		compatible = "nvidia,tegra124-usb-phy", "nvidia,tegra30-usb-phy";
 		reg = <0x0 0x7d004000 0x0 0x4000>,
 		      <0x0 0x7d000000 0x0 0x4000>;
@@ -921,7 +921,7 @@
 		status = "disabled";
 	};
 
-	usb@0,7d008000 {
+	usb@7d008000 {
 		compatible = "nvidia,tegra124-ehci", "nvidia,tegra30-ehci", "usb-ehci";
 		reg = <0x0 0x7d008000 0x0 0x4000>;
 		interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
@@ -934,7 +934,7 @@
 		status = "disabled";
 	};
 
-	phy3: usb-phy@0,7d008000 {
+	phy3: usb-phy@7d008000 {
 		compatible = "nvidia,tegra124-usb-phy", "nvidia,tegra30-usb-phy";
 		reg = <0x0 0x7d008000 0x0 0x4000>,
 		      <0x0 0x7d000000 0x0 0x4000>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
index 2b7f889..316c92c 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
@@ -5,7 +5,7 @@
 	compatible = "nvidia,p2180", "nvidia,tegra210";
 
 	aliases {
-		rtc1 = "/rtc@0,7000e000";
+		rtc1 = "/rtc@7000e000";
 		serial0 = &uarta;
 	};
 
@@ -15,16 +15,16 @@
 	};
 
 	/* debug port */
-	serial@0,70006000 {
+	serial@70006000 {
 		status = "okay";
 	};
 
-	pmc@0,7000e400 {
+	pmc@7000e400 {
 		nvidia,invert-interrupt;
 	};
 
 	/* eMMC */
-	sdhci@0,700b0600 {
+	sdhci@700b0600 {
 		status = "okay";
 		bus-width = <8>;
 		non-removable;
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2530.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2530.dtsi
index ece0dec..0ec9257 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2530.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2530.dtsi
@@ -5,31 +5,35 @@
 	compatible = "nvidia,p2530", "nvidia,tegra210";
 
 	aliases {
-		rtc1 = "/rtc@0,7000e000";
+		rtc1 = "/rtc@7000e000";
 		serial0 = &uarta;
 	};
 
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
 	memory {
 		device_type = "memory";
 		reg = <0x0 0x80000000 0x0 0xc0000000>;
 	};
 
 	/* debug port */
-	serial@0,70006000 {
+	serial@70006000 {
 		status = "okay";
 	};
 
-	i2c@0,7000d000 {
+	i2c@7000d000 {
 		status = "okay";
 		clock-frequency = <400000>;
 	};
 
-	pmc@0,7000e400 {
+	pmc@7000e400 {
 		nvidia,invert-interrupt;
 	};
 
 	/* eMMC */
-	sdhci@0,700b0600 {
+	sdhci@700b0600 {
 		status = "okay";
 		bus-width = <8>;
 		non-removable;
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2571.dts b/arch/arm64/boot/dts/nvidia/tegra210-p2571.dts
index 58d27dd..576957a 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2571.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2571.dts
@@ -7,7 +7,7 @@
 	model = "NVIDIA Tegra210 P2571 reference design";
 	compatible = "nvidia,p2571", "nvidia,tegra210";
 
-	pinmux: pinmux@0,700008d4 {
+	pinmux: pinmux@700008d4 {
 		pinctrl-names = "boot";
 		pinctrl-0 = <&state_boot>;
 
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2595.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2595.dtsi
index f3f9139..e008e33 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2595.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2595.dtsi
@@ -2,7 +2,7 @@
 	model = "NVIDIA Tegra210 P2595 I/O board";
 	compatible = "nvidia,p2595", "nvidia,tegra210";
 
-	pinmux: pinmux@0,700008d4 {
+	pinmux: pinmux@700008d4 {
 		pinctrl-names = "boot";
 		pinctrl-0 = <&state_boot>;
 
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
index be3eccb..a2480c0 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
@@ -1,8 +1,10 @@
+#include <dt-bindings/input/input.h>
+
 / {
 	model = "NVIDIA Tegra210 P2597 I/O board";
 	compatible = "nvidia,p2597", "nvidia,tegra210";
 
-	pinmux: pinmux@0,700008d4 {
+	pinmux: pinmux@700008d4 {
 		pinctrl-names = "boot";
 		pinctrl-0 = <&state_boot>;
 
@@ -1260,11 +1262,35 @@
 	};
 
 	/* MMC/SD */
-	sdhci@0,700b0000 {
+	sdhci@700b0000 {
 		status = "okay";
 		bus-width = <4>;
 		no-1-8-v;
 
 		cd-gpios = <&gpio TEGRA_GPIO(Z, 1) GPIO_ACTIVE_LOW>;
 	};
+
+	gpio-keys {
+		compatible = "gpio-keys";
+		label = "gpio-keys";
+
+		power {
+			label = "Power";
+			gpios = <&gpio TEGRA_GPIO(X, 5) GPIO_ACTIVE_LOW>;
+			linux,code = <KEY_POWER>;
+			wakeup-source;
+		};
+
+		volume_down {
+			label = "Volume Down";
+			gpios = <&gpio TEGRA_GPIO(Y, 0) GPIO_ACTIVE_LOW>;
+			linux,code = <KEY_VOLUMEDOWN>;
+		};
+
+		volume_up {
+			label = "Volume Up";
+			gpios = <&gpio TEGRA_GPIO(X, 6) GPIO_ACTIVE_LOW>;
+			linux,code = <KEY_VOLUMEUP>;
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts b/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
new file mode 100644
index 0000000..4d89f4e
--- /dev/null
+++ b/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
@@ -0,0 +1,1424 @@
+/dts-v1/;
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/pinctrl-tegra.h>
+
+#include "tegra210.dtsi"
+
+/ {
+	model = "Google Pixel C";
+	compatible = "google,smaug-rev8", "google,smaug-rev7",
+		     "google,smaug-rev6", "google,smaug-rev5",
+		     "google,smaug-rev4", "google,smaug-rev3",
+		     "google,smaug-rev1", "google,smaug", "nvidia,tegra210";
+
+	aliases {
+		serial0 = &uarta;
+	};
+
+	chosen {
+		bootargs = "earlycon";
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x0 0x80000000 0x0 0xc0000000>;
+	};
+
+	pinmux: pinmux@700008d4 {
+		pinctrl-names = "boot";
+		pinctrl-0 = <&state_boot>;
+
+		state_boot: pinmux {
+			pex_l0_rst_n_pa0 {
+				nvidia,pins = "pex_l0_rst_n_pa0";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+				nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+			};
+			pex_l0_clkreq_n_pa1 {
+				nvidia,pins = "pex_l0_clkreq_n_pa1";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+				nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+			};
+			pex_wake_n_pa2 {
+				nvidia,pins = "pex_wake_n_pa2";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+				nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+			};
+			pex_l1_rst_n_pa3 {
+				nvidia,pins = "pex_l1_rst_n_pa3";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+				nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+			};
+			pex_l1_clkreq_n_pa4 {
+				nvidia,pins = "pex_l1_clkreq_n_pa4";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+				nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+			};
+			sata_led_active_pa5 {
+				nvidia,pins = "sata_led_active_pa5";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			pa6 {
+				nvidia,pins = "pa6";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_UP>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			dap1_fs_pb0 {
+				nvidia,pins = "dap1_fs_pb0";
+				nvidia,function = "i2s1";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			dap1_din_pb1 {
+				nvidia,pins = "dap1_din_pb1";
+				nvidia,function = "i2s1";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			dap1_dout_pb2 {
+				nvidia,pins = "dap1_dout_pb2";
+				nvidia,function = "i2s1";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			dap1_sclk_pb3 {
+				nvidia,pins = "dap1_sclk_pb3";
+				nvidia,function = "i2s1";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			spi2_mosi_pb4 {
+				nvidia,pins = "spi2_mosi_pb4";
+				nvidia,pull = <TEGRA_PIN_PULL_UP>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			spi2_miso_pb5 {
+				nvidia,pins = "spi2_miso_pb5";
+				nvidia,function = "rsvd2";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			spi2_sck_pb6 {
+				nvidia,pins = "spi2_sck_pb6";
+				nvidia,function = "rsvd2";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			spi2_cs0_pb7 {
+				nvidia,pins = "spi2_cs0_pb7";
+				nvidia,function = "rsvd2";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			spi1_mosi_pc0 {
+				nvidia,pins = "spi1_mosi_pc0";
+				nvidia,function = "spi1";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			spi1_miso_pc1 {
+				nvidia,pins = "spi1_miso_pc1";
+				nvidia,function = "spi1";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			spi1_sck_pc2 {
+				nvidia,pins = "spi1_sck_pc2";
+				nvidia,function = "spi1";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			spi1_cs0_pc3 {
+				nvidia,pins = "spi1_cs0_pc3";
+				nvidia,function = "spi1";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			spi1_cs1_pc4 {
+				nvidia,pins = "spi1_cs1_pc4";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			spi4_sck_pc5 {
+				nvidia,pins = "spi4_sck_pc5";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			spi4_cs0_pc6 {
+				nvidia,pins = "spi4_cs0_pc6";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			spi4_mosi_pc7 {
+				nvidia,pins = "spi4_mosi_pc7";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			spi4_miso_pd0 {
+				nvidia,pins = "spi4_miso_pd0";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			uart3_tx_pd1 {
+				nvidia,pins = "uart3_tx_pd1";
+				nvidia,function = "uartc";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			uart3_rx_pd2 {
+				nvidia,pins = "uart3_rx_pd2";
+				nvidia,function = "uartc";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			uart3_rts_pd3 {
+				nvidia,pins = "uart3_rts_pd3";
+				nvidia,function = "uartc";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			uart3_cts_pd4 {
+				nvidia,pins = "uart3_cts_pd4";
+				nvidia,function = "uartc";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			dmic1_clk_pe0 {
+				nvidia,pins = "dmic1_clk_pe0";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			dmic1_dat_pe1 {
+				nvidia,pins = "dmic1_dat_pe1";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			dmic2_clk_pe2 {
+				nvidia,pins = "dmic2_clk_pe2";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			dmic2_dat_pe3 {
+				nvidia,pins = "dmic2_dat_pe3";
+				nvidia,pull = <TEGRA_PIN_PULL_UP>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			dmic3_clk_pe4 {
+				nvidia,pins = "dmic3_clk_pe4";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			dmic3_dat_pe5 {
+				nvidia,pins = "dmic3_dat_pe5";
+				nvidia,function = "rsvd2";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			pe6 {
+				nvidia,pins = "pe6";
+				nvidia,pull = <TEGRA_PIN_PULL_UP>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			pe7 {
+				nvidia,pins = "pe7";
+				nvidia,pull = <TEGRA_PIN_PULL_UP>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			gen3_i2c_scl_pf0 {
+				nvidia,pins = "gen3_i2c_scl_pf0";
+				nvidia,function = "i2c3";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+				nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+			};
+			gen3_i2c_sda_pf1 {
+				nvidia,pins = "gen3_i2c_sda_pf1";
+				nvidia,function = "i2c3";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+				nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+			};
+			uart2_tx_pg0 {
+				nvidia,pins = "uart2_tx_pg0";
+				nvidia,function = "uartb";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			uart2_rx_pg1 {
+				nvidia,pins = "uart2_rx_pg1";
+				nvidia,function = "uartb";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			uart2_rts_pg2 {
+				nvidia,pins = "uart2_rts_pg2";
+				nvidia,function = "rsvd2";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			uart2_cts_pg3 {
+				nvidia,pins = "uart2_cts_pg3";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			wifi_en_ph0 {
+				nvidia,pins = "wifi_en_ph0";
+				nvidia,pull = <TEGRA_PIN_PULL_UP>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			wifi_rst_ph1 {
+				nvidia,pins = "wifi_rst_ph1";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			wifi_wake_ap_ph2 {
+				nvidia,pins = "wifi_wake_ap_ph2";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			ap_wake_bt_ph3 {
+				nvidia,pins = "ap_wake_bt_ph3";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			bt_rst_ph4 {
+				nvidia,pins = "bt_rst_ph4";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			bt_wake_ap_ph5 {
+				nvidia,pins = "bt_wake_ap_ph5";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			ph6 {
+				nvidia,pins = "ph6";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			ap_wake_nfc_ph7 {
+				nvidia,pins = "ap_wake_nfc_ph7";
+				nvidia,function = "rsvd0";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			nfc_en_pi0 {
+				nvidia,pins = "nfc_en_pi0";
+				nvidia,function = "rsvd0";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			nfc_int_pi1 {
+				nvidia,pins = "nfc_int_pi1";
+				nvidia,function = "rsvd0";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			gps_en_pi2 {
+				nvidia,pins = "gps_en_pi2";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			gps_rst_pi3 {
+				nvidia,pins = "gps_rst_pi3";
+				nvidia,function = "rsvd0";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			uart4_tx_pi4 {
+				nvidia,pins = "uart4_tx_pi4";
+				nvidia,function = "uartd";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			uart4_rx_pi5 {
+				nvidia,pins = "uart4_rx_pi5";
+				nvidia,function = "uartd";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			uart4_rts_pi6 {
+				nvidia,pins = "uart4_rts_pi6";
+				nvidia,function = "uartd";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			uart4_cts_pi7 {
+				nvidia,pins = "uart4_cts_pi7";
+				nvidia,function = "uartd";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			gen1_i2c_sda_pj0 {
+				nvidia,pins = "gen1_i2c_sda_pj0";
+				nvidia,function = "i2c1";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+				nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+			};
+			gen1_i2c_scl_pj1 {
+				nvidia,pins = "gen1_i2c_scl_pj1";
+				nvidia,function = "i2c1";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+				nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+			};
+			gen2_i2c_scl_pj2 {
+				nvidia,pins = "gen2_i2c_scl_pj2";
+				nvidia,function = "i2c2";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+				nvidia,io-hv = <TEGRA_PIN_ENABLE>;
+			};
+			gen2_i2c_sda_pj3 {
+				nvidia,pins = "gen2_i2c_sda_pj3";
+				nvidia,function = "i2c2";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+				nvidia,io-hv = <TEGRA_PIN_ENABLE>;
+			};
+			dap4_fs_pj4 {
+				nvidia,pins = "dap4_fs_pj4";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			dap4_din_pj5 {
+				nvidia,pins = "dap4_din_pj5";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			dap4_dout_pj6 {
+				nvidia,pins = "dap4_dout_pj6";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			dap4_sclk_pj7 {
+				nvidia,pins = "dap4_sclk_pj7";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			pk0 {
+				nvidia,pins = "pk0";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			pk1 {
+				nvidia,pins = "pk1";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			pk2 {
+				nvidia,pins = "pk2";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			pk3 {
+				nvidia,pins = "pk3";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			pk4 {
+				nvidia,pins = "pk4";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			pk5 {
+				nvidia,pins = "pk5";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			pk6 {
+				nvidia,pins = "pk6";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			pk7 {
+				nvidia,pins = "pk7";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			pl0 {
+				nvidia,pins = "pl0";
+				nvidia,function = "rsvd0";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			pl1 {
+				nvidia,pins = "pl1";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			sdmmc1_clk_pm0 {
+				nvidia,pins = "sdmmc1_clk_pm0";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			sdmmc1_cmd_pm1 {
+				nvidia,pins = "sdmmc1_cmd_pm1";
+				nvidia,function = "rsvd2";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			sdmmc1_dat3_pm2 {
+				nvidia,pins = "sdmmc1_dat3_pm2";
+				nvidia,function = "rsvd2";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			sdmmc1_dat2_pm3 {
+				nvidia,pins = "sdmmc1_dat2_pm3";
+				nvidia,function = "rsvd2";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			sdmmc1_dat1_pm4 {
+				nvidia,pins = "sdmmc1_dat1_pm4";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			sdmmc1_dat0_pm5 {
+				nvidia,pins = "sdmmc1_dat0_pm5";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			sdmmc3_clk_pp0 {
+				nvidia,pins = "sdmmc3_clk_pp0";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			sdmmc3_cmd_pp1 {
+				nvidia,pins = "sdmmc3_cmd_pp1";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			sdmmc3_dat3_pp2 {
+				nvidia,pins = "sdmmc3_dat3_pp2";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			sdmmc3_dat2_pp3 {
+				nvidia,pins = "sdmmc3_dat2_pp3";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			sdmmc3_dat1_pp4 {
+				nvidia,pins = "sdmmc3_dat1_pp4";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			sdmmc3_dat0_pp5 {
+				nvidia,pins = "sdmmc3_dat0_pp5";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			cam1_mclk_ps0 {
+				nvidia,pins = "cam1_mclk_ps0";
+				nvidia,function = "extperiph3";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			cam2_mclk_ps1 {
+				nvidia,pins = "cam2_mclk_ps1";
+				nvidia,function = "extperiph3";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			cam_i2c_scl_ps2 {
+				nvidia,pins = "cam_i2c_scl_ps2";
+				nvidia,function = "i2cvi";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+				nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+			};
+			cam_i2c_sda_ps3 {
+				nvidia,pins = "cam_i2c_sda_ps3";
+				nvidia,function = "i2cvi";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+				nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+			};
+			cam_rst_ps4 {
+				nvidia,pins = "cam_rst_ps4";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			cam_af_en_ps5 {
+				nvidia,pins = "cam_af_en_ps5";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			cam_flash_en_ps6 {
+				nvidia,pins = "cam_flash_en_ps6";
+				nvidia,function = "rsvd2";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			cam1_pwdn_ps7 {
+				nvidia,pins = "cam1_pwdn_ps7";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			cam2_pwdn_pt0 {
+				nvidia,pins = "cam2_pwdn_pt0";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			cam1_strobe_pt1 {
+				nvidia,pins = "cam1_strobe_pt1";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			uart1_tx_pu0 {
+				nvidia,pins = "uart1_tx_pu0";
+				nvidia,function = "uarta";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			uart1_rx_pu1 {
+				nvidia,pins = "uart1_rx_pu1";
+				nvidia,function = "uarta";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			uart1_rts_pu2 {
+				nvidia,pins = "uart1_rts_pu2";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			uart1_cts_pu3 {
+				nvidia,pins = "uart1_cts_pu3";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			lcd_bl_pwm_pv0 {
+				nvidia,pins = "lcd_bl_pwm_pv0";
+				nvidia,function = "rsvd3";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			lcd_bl_en_pv1 {
+				nvidia,pins = "lcd_bl_en_pv1";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			lcd_rst_pv2 {
+				nvidia,pins = "lcd_rst_pv2";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			lcd_gpio1_pv3 {
+				nvidia,pins = "lcd_gpio1_pv3";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			lcd_gpio2_pv4 {
+				nvidia,pins = "lcd_gpio2_pv4";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			ap_ready_pv5 {
+				nvidia,pins = "ap_ready_pv5";
+				nvidia,function = "rsvd0";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			touch_rst_pv6 {
+				nvidia,pins = "touch_rst_pv6";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			touch_clk_pv7 {
+				nvidia,pins = "touch_clk_pv7";
+				nvidia,function = "touch";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			modem_wake_ap_px0 {
+				nvidia,pins = "modem_wake_ap_px0";
+				nvidia,function = "rsvd0";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			touch_int_px1 {
+				nvidia,pins = "touch_int_px1";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			motion_int_px2 {
+				nvidia,pins = "motion_int_px2";
+				nvidia,function = "rsvd0";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			als_prox_int_px3 {
+				nvidia,pins = "als_prox_int_px3";
+				nvidia,function = "rsvd0";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			temp_alert_px4 {
+				nvidia,pins = "temp_alert_px4";
+				nvidia,pull = <TEGRA_PIN_PULL_UP>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			button_power_on_px5 {
+				nvidia,pins = "button_power_on_px5";
+				nvidia,pull = <TEGRA_PIN_PULL_UP>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			button_vol_up_px6 {
+				nvidia,pins = "button_vol_up_px6";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			button_vol_down_px7 {
+				nvidia,pins = "button_vol_down_px7";
+				nvidia,pull = <TEGRA_PIN_PULL_UP>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			button_slide_sw_py0 {
+				nvidia,pins = "button_slide_sw_py0";
+				nvidia,pull = <TEGRA_PIN_PULL_UP>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			button_home_py1 {
+				nvidia,pins = "button_home_py1";
+				nvidia,pull = <TEGRA_PIN_PULL_UP>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			lcd_te_py2 {
+				nvidia,pins = "lcd_te_py2";
+				nvidia,function = "displaya";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			pwr_i2c_scl_py3 {
+				nvidia,pins = "pwr_i2c_scl_py3";
+				nvidia,function = "i2cpmu";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+				nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+			};
+			pwr_i2c_sda_py4 {
+				nvidia,pins = "pwr_i2c_sda_py4";
+				nvidia,function = "i2cpmu";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+				nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+			};
+			clk_32k_out_py5 {
+				nvidia,pins = "clk_32k_out_py5";
+				nvidia,function = "soc";
+				nvidia,pull = <TEGRA_PIN_PULL_UP>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			pz0 {
+				nvidia,pins = "pz0";
+				nvidia,pull = <TEGRA_PIN_PULL_UP>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			pz1 {
+				nvidia,pins = "pz1";
+				nvidia,pull = <TEGRA_PIN_PULL_UP>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			pz2 {
+				nvidia,pins = "pz2";
+				nvidia,pull = <TEGRA_PIN_PULL_UP>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			pz3 {
+				nvidia,pins = "pz3";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			pz4 {
+				nvidia,pins = "pz4";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			pz5 {
+				nvidia,pins = "pz5";
+				nvidia,function = "soc";
+				nvidia,pull = <TEGRA_PIN_PULL_UP>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			dap2_fs_paa0 {
+				nvidia,pins = "dap2_fs_paa0";
+				nvidia,function = "i2s2";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			dap2_sclk_paa1 {
+				nvidia,pins = "dap2_sclk_paa1";
+				nvidia,function = "i2s2";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			dap2_din_paa2 {
+				nvidia,pins = "dap2_din_paa2";
+				nvidia,function = "i2s2";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			dap2_dout_paa3 {
+				nvidia,pins = "dap2_dout_paa3";
+				nvidia,function = "i2s2";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			aud_mclk_pbb0 {
+				nvidia,pins = "aud_mclk_pbb0";
+				nvidia,function = "aud";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			dvfs_pwm_pbb1 {
+				nvidia,pins = "dvfs_pwm_pbb1";
+				nvidia,function = "rsvd0";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			dvfs_clk_pbb2 {
+				nvidia,pins = "dvfs_clk_pbb2";
+				nvidia,function = "rsvd0";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			gpio_x1_aud_pbb3 {
+				nvidia,pins = "gpio_x1_aud_pbb3";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			gpio_x3_aud_pbb4 {
+				nvidia,pins = "gpio_x3_aud_pbb4";
+				nvidia,pull = <TEGRA_PIN_PULL_UP>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			hdmi_cec_pcc0 {
+				nvidia,pins = "hdmi_cec_pcc0";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+				nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+			};
+			hdmi_int_dp_hpd_pcc1 {
+				nvidia,pins = "hdmi_int_dp_hpd_pcc1";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+				nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+			};
+			spdif_out_pcc2 {
+				nvidia,pins = "spdif_out_pcc2";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			spdif_in_pcc3 {
+				nvidia,pins = "spdif_in_pcc3";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			usb_vbus_en0_pcc4 {
+				nvidia,pins = "usb_vbus_en0_pcc4";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+				nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+			};
+			usb_vbus_en1_pcc5 {
+				nvidia,pins = "usb_vbus_en1_pcc5";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+				nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+			};
+			dp_hpd0_pcc6 {
+				nvidia,pins = "dp_hpd0_pcc6";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			pcc7 {
+				nvidia,pins = "pcc7";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+				nvidia,io-hv = <TEGRA_PIN_DISABLE>;
+			};
+			spi2_cs1_pdd0 {
+				nvidia,pins = "spi2_cs1_pdd0";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			qspi_sck_pee0 {
+				nvidia,pins = "qspi_sck_pee0";
+				nvidia,function = "qspi";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			qspi_cs_n_pee1 {
+				nvidia,pins = "qspi_cs_n_pee1";
+				nvidia,function = "qspi";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			qspi_io0_pee2 {
+				nvidia,pins = "qspi_io0_pee2";
+				nvidia,function = "qspi";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			qspi_io1_pee3 {
+				nvidia,pins = "qspi_io1_pee3";
+				nvidia,function = "qspi";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			qspi_io2_pee4 {
+				nvidia,pins = "qspi_io2_pee4";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			qspi_io3_pee5 {
+				nvidia,pins = "qspi_io3_pee5";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			core_pwr_req {
+				nvidia,pins = "core_pwr_req";
+				nvidia,function = "core";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			cpu_pwr_req {
+				nvidia,pins = "cpu_pwr_req";
+				nvidia,function = "cpu";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			pwr_int_n {
+				nvidia,pins = "pwr_int_n";
+				nvidia,function = "pmi";
+				nvidia,pull = <TEGRA_PIN_PULL_UP>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			clk_32k_in {
+				nvidia,pins = "clk_32k_in";
+				nvidia,function = "clk";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			jtag_rtck {
+				nvidia,pins = "jtag_rtck";
+				nvidia,function = "jtag";
+				nvidia,pull = <TEGRA_PIN_PULL_UP>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			clk_req {
+				nvidia,pins = "clk_req";
+				nvidia,function = "rsvd1";
+				nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+				nvidia,tristate = <TEGRA_PIN_ENABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+			shutdown {
+				nvidia,pins = "shutdown";
+				nvidia,function = "shutdown";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+				nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+			};
+		};
+	};
+
+	serial@70006000 {
+		status = "okay";
+	};
+
+	i2c@7000c400 {
+		status = "okay";
+		clock-frequency = <1000000>;
+
+		ec@1e {
+			compatible = "google,cros-ec-i2c";
+			reg = <0x1e>;
+			interrupt-parent = <&gpio>;
+			interrupts = <TEGRA_GPIO(Z, 1) IRQ_TYPE_LEVEL_LOW>;
+			wakeup-source;
+
+			ec_i2c_0: i2c-tunnel {
+				compatible = "google,cros-ec-i2c-tunnel";
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				google,remote-bus = <0>;
+
+				battery: bq27742@55 {
+					compatible = "ti,bq27742";
+					reg = <0x55>;
+					battery-name = "battery";
+				};
+			};
+		};
+	};
+
+	pmc@7000e400 {
+		nvidia,invert-interrupt;
+		nvidia,suspend-mode = <0>;
+		nvidia,cpu-pwr-good-time = <0>;
+		nvidia,cpu-pwr-off-time = <0>;
+		nvidia,core-pwr-good-time = <12000 6000>;
+		nvidia,core-pwr-off-time = <39053>;
+		nvidia,core-power-req-active-high;
+		nvidia,sys-clock-req-active-high;
+		status = "okay";
+	};
+
+	sdhci@700b0600 {
+		bus-width = <8>;
+		non-removable;
+		status = "okay";
+	};
+
+	clocks {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		clk32k_in: clock@0 {
+			compatible = "fixed-clock";
+			reg = <0>;
+			#clock-cells = <0>;
+			clock-frequency = <32768>;
+		};
+	};
+
+	cpus {
+		cpu@0 {
+			enable-method = "psci";
+		};
+
+		cpu@1 {
+			enable-method = "psci";
+		};
+
+		cpu@2 {
+			enable-method = "psci";
+		};
+
+		cpu@3 {
+			enable-method = "psci";
+		};
+	};
+
+	gpio-keys {
+		compatible = "gpio-keys";
+		gpio-keys,name = "gpio-keys";
+
+		power {
+			label = "Power";
+			gpios = <&gpio TEGRA_GPIO(X, 5) GPIO_ACTIVE_LOW>;
+			linux,code = <KEY_POWER>;
+			debounce-interval = <30>;
+			wakeup-source;
+		};
+
+		lid {
+			label = "Lid";
+			gpios = <&gpio TEGRA_GPIO(B, 4) GPIO_ACTIVE_LOW>;
+			linux,input-type = <EV_SW>;
+			linux,code = <SW_LID>;
+			wakeup-source;
+		};
+
+		tablet_mode {
+			label = "Tablet Mode";
+			gpios = <&gpio TEGRA_GPIO(Z, 2) GPIO_ACTIVE_HIGH>;
+			linux,input-type = <EV_SW>;
+			linux,code = <SW_TABLET_MODE>;
+			wakeup-source;
+		};
+
+		volume_down {
+			label = "Volume Down";
+			gpios = <&gpio TEGRA_GPIO(X, 7) GPIO_ACTIVE_LOW>;
+			linux,code = <KEY_VOLUMEDOWN>;
+		};
+
+		volume_up {
+			label = "Volume Up";
+			gpios = <&gpio TEGRA_GPIO(M, 4) GPIO_ACTIVE_LOW>;
+			linux,code = <KEY_VOLUMEUP>;
+		};
+	};
+
+	psci {
+		compatible = "arm,psci-1.0";
+		method = "smc";
+	};
+};
diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
index 23b0630..76fe31f 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
@@ -10,7 +10,7 @@
 	#address-cells = <2>;
 	#size-cells = <2>;
 
-	host1x@0,50000000 {
+	host1x@50000000 {
 		compatible = "nvidia,tegra210-host1x", "simple-bus";
 		reg = <0x0 0x50000000 0x0 0x00034000>;
 		interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>, /* syncpt */
@@ -25,7 +25,7 @@
 
 		ranges = <0x0 0x54000000 0x0 0x54000000 0x0 0x01000000>;
 
-		dpaux1: dpaux@0,54040000 {
+		dpaux1: dpaux@54040000 {
 			compatible = "nvidia,tegra210-dpaux";
 			reg = <0x0 0x54040000 0x0 0x00040000>;
 			interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
@@ -37,19 +37,19 @@
 			status = "disabled";
 		};
 
-		vi@0,54080000 {
+		vi@54080000 {
 			compatible = "nvidia,tegra210-vi";
 			reg = <0x0 0x54080000 0x0 0x00040000>;
 			interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
 			status = "disabled";
 		};
 
-		tsec@0,54100000 {
+		tsec@54100000 {
 			compatible = "nvidia,tegra210-tsec";
 			reg = <0x0 0x54100000 0x0 0x00040000>;
 		};
 
-		dc@0,54200000 {
+		dc@54200000 {
 			compatible = "nvidia,tegra210-dc";
 			reg = <0x0 0x54200000 0x0 0x00040000>;
 			interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
@@ -64,7 +64,7 @@
 			nvidia,head = <0>;
 		};
 
-		dc@0,54240000 {
+		dc@54240000 {
 			compatible = "nvidia,tegra210-dc";
 			reg = <0x0 0x54240000 0x0 0x00040000>;
 			interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
@@ -79,7 +79,7 @@
 			nvidia,head = <1>;
 		};
 
-		dsi@0,54300000 {
+		dsi@54300000 {
 			compatible = "nvidia,tegra210-dsi";
 			reg = <0x0 0x54300000 0x0 0x00040000>;
 			clocks = <&tegra_car TEGRA210_CLK_DSIA>,
@@ -96,19 +96,19 @@
 			#size-cells = <0>;
 		};
 
-		vic@0,54340000 {
+		vic@54340000 {
 			compatible = "nvidia,tegra210-vic";
 			reg = <0x0 0x54340000 0x0 0x00040000>;
 			status = "disabled";
 		};
 
-		nvjpg@0,54380000 {
+		nvjpg@54380000 {
 			compatible = "nvidia,tegra210-nvjpg";
 			reg = <0x0 0x54380000 0x0 0x00040000>;
 			status = "disabled";
 		};
 
-		dsi@0,54400000 {
+		dsi@54400000 {
 			compatible = "nvidia,tegra210-dsi";
 			reg = <0x0 0x54400000 0x0 0x00040000>;
 			clocks = <&tegra_car TEGRA210_CLK_DSIB>,
@@ -125,25 +125,25 @@
 			#size-cells = <0>;
 		};
 
-		nvdec@0,54480000 {
+		nvdec@54480000 {
 			compatible = "nvidia,tegra210-nvdec";
 			reg = <0x0 0x54480000 0x0 0x00040000>;
 			status = "disabled";
 		};
 
-		nvenc@0,544c0000 {
+		nvenc@544c0000 {
 			compatible = "nvidia,tegra210-nvenc";
 			reg = <0x0 0x544c0000 0x0 0x00040000>;
 			status = "disabled";
 		};
 
-		tsec@0,54500000 {
+		tsec@54500000 {
 			compatible = "nvidia,tegra210-tsec";
 			reg = <0x0 0x54500000 0x0 0x00040000>;
 			status = "disabled";
 		};
 
-		sor@0,54540000 {
+		sor@54540000 {
 			compatible = "nvidia,tegra210-sor";
 			reg = <0x0 0x54540000 0x0 0x00040000>;
 			interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
@@ -157,7 +157,7 @@
 			status = "disabled";
 		};
 
-		sor@0,54580000 {
+		sor@54580000 {
 			compatible = "nvidia,tegra210-sor1";
 			reg = <0x0 0x54580000 0x0 0x00040000>;
 			interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
@@ -171,7 +171,7 @@
 			status = "disabled";
 		};
 
-		dpaux: dpaux@0,545c0000 {
+		dpaux: dpaux@545c0000 {
 			compatible = "nvidia,tegra124-dpaux";
 			reg = <0x0 0x545c0000 0x0 0x00040000>;
 			interrupts = <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>;
@@ -183,21 +183,21 @@
 			status = "disabled";
 		};
 
-		isp@0,54600000 {
+		isp@54600000 {
 			compatible = "nvidia,tegra210-isp";
 			reg = <0x0 0x54600000 0x0 0x00040000>;
 			interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
 			status = "disabled";
 		};
 
-		isp@0,54680000 {
+		isp@54680000 {
 			compatible = "nvidia,tegra210-isp";
 			reg = <0x0 0x54680000 0x0 0x00040000>;
 			interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
 			status = "disabled";
 		};
 
-		i2c@0,546c0000 {
+		i2c@546c0000 {
 			compatible = "nvidia,tegra210-i2c-vi";
 			reg = <0x0 0x546c0000 0x0 0x00040000>;
 			interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
@@ -205,7 +205,7 @@
 		};
 	};
 
-	gic: interrupt-controller@0,50041000 {
+	gic: interrupt-controller@50041000 {
 		compatible = "arm,gic-400";
 		#interrupt-cells = <3>;
 		interrupt-controller;
@@ -218,7 +218,7 @@
 		interrupt-parent = <&gic>;
 	};
 
-	gpu@0,57000000 {
+	gpu@57000000 {
 		compatible = "nvidia,gm20b";
 		reg = <0x0 0x57000000 0x0 0x01000000>,
 		      <0x0 0x58000000 0x0 0x01000000>;
@@ -226,14 +226,18 @@
 			     <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>;
 		interrupt-names = "stall", "nonstall";
 		clocks = <&tegra_car TEGRA210_CLK_GPU>,
-			 <&tegra_car TEGRA210_CLK_PLL_P_OUT5>;
-		clock-names = "gpu", "pwr";
+			 <&tegra_car TEGRA210_CLK_PLL_P_OUT5>,
+			 <&tegra_car TEGRA210_CLK_PLL_G_REF>;
+		clock-names = "gpu", "pwr", "ref";
 		resets = <&tegra_car 184>;
 		reset-names = "gpu";
+
+		iommus = <&mc TEGRA_SWGROUP_GPU>;
+
 		status = "disabled";
 	};
 
-	lic: interrupt-controller@0,60004000 {
+	lic: interrupt-controller@60004000 {
 		compatible = "nvidia,tegra210-ictlr";
 		reg = <0x0 0x60004000 0x0 0x40>, /* primary controller */
 		      <0x0 0x60004100 0x0 0x40>, /* secondary controller */
@@ -246,7 +250,7 @@
 		interrupt-parent = <&gic>;
 	};
 
-	timer@0,60005000 {
+	timer@60005000 {
 		compatible = "nvidia,tegra210-timer", "nvidia,tegra20-timer";
 		reg = <0x0 0x60005000 0x0 0x400>;
 		interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
@@ -259,19 +263,19 @@
 		clock-names = "timer";
 	};
 
-	tegra_car: clock@0,60006000 {
+	tegra_car: clock@60006000 {
 		compatible = "nvidia,tegra210-car";
 		reg = <0x0 0x60006000 0x0 0x1000>;
 		#clock-cells = <1>;
 		#reset-cells = <1>;
 	};
 
-	flow-controller@0,60007000 {
+	flow-controller@60007000 {
 		compatible = "nvidia,tegra210-flowctrl";
 		reg = <0x0 0x60007000 0x0 0x1000>;
 	};
 
-	gpio: gpio@0,6000d000 {
+	gpio: gpio@6000d000 {
 		compatible = "nvidia,tegra210-gpio", "nvidia,tegra124-gpio", "nvidia,tegra30-gpio";
 		reg = <0x0 0x6000d000 0x0 0x1000>;
 		interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
@@ -288,7 +292,7 @@
 		interrupt-controller;
 	};
 
-	apbdma: dma@0,60020000 {
+	apbdma: dma@60020000 {
 		compatible = "nvidia,tegra210-apbdma", "nvidia,tegra148-apbdma";
 		reg = <0x0 0x60020000 0x0 0x1400>;
 		interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
@@ -330,13 +334,13 @@
 		#dma-cells = <1>;
 	};
 
-	apbmisc@0,70000800 {
+	apbmisc@70000800 {
 		compatible = "nvidia,tegra210-apbmisc", "nvidia,tegra20-apbmisc";
 		reg = <0x0 0x70000800 0x0 0x64>,   /* Chip revision */
 		      <0x0 0x7000e864 0x0 0x04>;   /* Strapping options */
 	};
 
-	pinmux: pinmux@0,700008d4 {
+	pinmux: pinmux@700008d4 {
 		compatible = "nvidia,tegra210-pinmux";
 		reg = <0x0 0x700008d4 0x0 0x29c>, /* Pad control registers */
 		      <0x0 0x70003000 0x0 0x294>; /* Mux registers */
@@ -347,10 +351,10 @@
 	 * driver and APB DMA based serial driver for higher baudrate
 	 * and performance. To enable the 8250 based driver, the compatible
 	 * is "nvidia,tegra124-uart", "nvidia,tegra20-uart" and to enable
-	 * the APB DMA based serial driver, the comptible is
+	 * the APB DMA based serial driver, the compatible is
 	 * "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart".
 	 */
-	uarta: serial@0,70006000 {
+	uarta: serial@70006000 {
 		compatible = "nvidia,tegra210-uart", "nvidia,tegra20-uart";
 		reg = <0x0 0x70006000 0x0 0x40>;
 		reg-shift = <2>;
@@ -364,7 +368,7 @@
 		status = "disabled";
 	};
 
-	uartb: serial@0,70006040 {
+	uartb: serial@70006040 {
 		compatible = "nvidia,tegra210-uart", "nvidia,tegra20-uart";
 		reg = <0x0 0x70006040 0x0 0x40>;
 		reg-shift = <2>;
@@ -378,7 +382,7 @@
 		status = "disabled";
 	};
 
-	uartc: serial@0,70006200 {
+	uartc: serial@70006200 {
 		compatible = "nvidia,tegra210-uart", "nvidia,tegra20-uart";
 		reg = <0x0 0x70006200 0x0 0x40>;
 		reg-shift = <2>;
@@ -392,7 +396,7 @@
 		status = "disabled";
 	};
 
-	uartd: serial@0,70006300 {
+	uartd: serial@70006300 {
 		compatible = "nvidia,tegra210-uart", "nvidia,tegra20-uart";
 		reg = <0x0 0x70006300 0x0 0x40>;
 		reg-shift = <2>;
@@ -406,7 +410,7 @@
 		status = "disabled";
 	};
 
-	pwm: pwm@0,7000a000 {
+	pwm: pwm@7000a000 {
 		compatible = "nvidia,tegra210-pwm", "nvidia,tegra20-pwm";
 		reg = <0x0 0x7000a000 0x0 0x100>;
 		#pwm-cells = <2>;
@@ -417,7 +421,7 @@
 		status = "disabled";
 	};
 
-	i2c@0,7000c000 {
+	i2c@7000c000 {
 		compatible = "nvidia,tegra210-i2c", "nvidia,tegra114-i2c";
 		reg = <0x0 0x7000c000 0x0 0x100>;
 		interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
@@ -432,7 +436,7 @@
 		status = "disabled";
 	};
 
-	i2c@0,7000c400 {
+	i2c@7000c400 {
 		compatible = "nvidia,tegra210-i2c", "nvidia,tegra114-i2c";
 		reg = <0x0 0x7000c400 0x0 0x100>;
 		interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
@@ -447,7 +451,7 @@
 		status = "disabled";
 	};
 
-	i2c@0,7000c500 {
+	i2c@7000c500 {
 		compatible = "nvidia,tegra210-i2c", "nvidia,tegra114-i2c";
 		reg = <0x0 0x7000c500 0x0 0x100>;
 		interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
@@ -462,7 +466,7 @@
 		status = "disabled";
 	};
 
-	i2c@0,7000c700 {
+	i2c@7000c700 {
 		compatible = "nvidia,tegra210-i2c", "nvidia,tegra114-i2c";
 		reg = <0x0 0x7000c700 0x0 0x100>;
 		interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
@@ -477,7 +481,7 @@
 		status = "disabled";
 	};
 
-	i2c@0,7000d000 {
+	i2c@7000d000 {
 		compatible = "nvidia,tegra210-i2c", "nvidia,tegra114-i2c";
 		reg = <0x0 0x7000d000 0x0 0x100>;
 		interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
@@ -492,7 +496,7 @@
 		status = "disabled";
 	};
 
-	i2c@0,7000d100 {
+	i2c@7000d100 {
 		compatible = "nvidia,tegra210-i2c", "nvidia,tegra114-i2c";
 		reg = <0x0 0x7000d100 0x0 0x100>;
 		interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
@@ -507,7 +511,7 @@
 		status = "disabled";
 	};
 
-	spi@0,7000d400 {
+	spi@7000d400 {
 		compatible = "nvidia,tegra210-spi", "nvidia,tegra114-spi";
 		reg = <0x0 0x7000d400 0x0 0x200>;
 		interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
@@ -522,7 +526,7 @@
 		status = "disabled";
 	};
 
-	spi@0,7000d600 {
+	spi@7000d600 {
 		compatible = "nvidia,tegra210-spi", "nvidia,tegra114-spi";
 		reg = <0x0 0x7000d600 0x0 0x200>;
 		interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
@@ -537,7 +541,7 @@
 		status = "disabled";
 	};
 
-	spi@0,7000d800 {
+	spi@7000d800 {
 		compatible = "nvidia,tegra210-spi", "nvidia,tegra114-spi";
 		reg = <0x0 0x7000d800 0x0 0x200>;
 		interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
@@ -552,7 +556,7 @@
 		status = "disabled";
 	};
 
-	spi@0,7000da00 {
+	spi@7000da00 {
 		compatible = "nvidia,tegra210-spi", "nvidia,tegra114-spi";
 		reg = <0x0 0x7000da00 0x0 0x200>;
 		interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
@@ -567,7 +571,7 @@
 		status = "disabled";
 	};
 
-	rtc@0,7000e000 {
+	rtc@7000e000 {
 		compatible = "nvidia,tegra210-rtc", "nvidia,tegra20-rtc";
 		reg = <0x0 0x7000e000 0x0 0x100>;
 		interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
@@ -575,16 +579,14 @@
 		clock-names = "rtc";
 	};
 
-	pmc: pmc@0,7000e400 {
+	pmc: pmc@7000e400 {
 		compatible = "nvidia,tegra210-pmc";
 		reg = <0x0 0x7000e400 0x0 0x400>;
 		clocks = <&tegra_car TEGRA210_CLK_PCLK>, <&clk32k_in>;
 		clock-names = "pclk", "clk32k_in";
-
-		#power-domain-cells = <1>;
 	};
 
-	fuse@0,7000f800 {
+	fuse@7000f800 {
 		compatible = "nvidia,tegra210-efuse";
 		reg = <0x0 0x7000f800 0x0 0x400>;
 		clocks = <&tegra_car TEGRA210_CLK_FUSE>;
@@ -593,7 +595,7 @@
 		reset-names = "fuse";
 	};
 
-	mc: memory-controller@0,70019000 {
+	mc: memory-controller@70019000 {
 		compatible = "nvidia,tegra210-mc";
 		reg = <0x0 0x70019000 0x0 0x1000>;
 		clocks = <&tegra_car TEGRA210_CLK_MC>;
@@ -604,7 +606,7 @@
 		#iommu-cells = <1>;
 	};
 
-	hda@0,70030000 {
+	hda@70030000 {
 		compatible = "nvidia,tegra210-hda", "nvidia,tegra30-hda";
 		reg = <0x0 0x70030000 0x0 0x10000>;
 		interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
@@ -619,7 +621,7 @@
 		status = "disabled";
 	};
 
-	sdhci@0,700b0000 {
+	sdhci@700b0000 {
 		compatible = "nvidia,tegra210-sdhci", "nvidia,tegra124-sdhci";
 		reg = <0x0 0x700b0000 0x0 0x200>;
 		interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
@@ -630,7 +632,7 @@
 		status = "disabled";
 	};
 
-	sdhci@0,700b0200 {
+	sdhci@700b0200 {
 		compatible = "nvidia,tegra210-sdhci", "nvidia,tegra124-sdhci";
 		reg = <0x0 0x700b0200 0x0 0x200>;
 		interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
@@ -641,7 +643,7 @@
 		status = "disabled";
 	};
 
-	sdhci@0,700b0400 {
+	sdhci@700b0400 {
 		compatible = "nvidia,tegra210-sdhci", "nvidia,tegra124-sdhci";
 		reg = <0x0 0x700b0400 0x0 0x200>;
 		interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
@@ -652,7 +654,7 @@
 		status = "disabled";
 	};
 
-	sdhci@0,700b0600 {
+	sdhci@700b0600 {
 		compatible = "nvidia,tegra210-sdhci", "nvidia,tegra124-sdhci";
 		reg = <0x0 0x700b0600 0x0 0x200>;
 		interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
@@ -663,7 +665,7 @@
 		status = "disabled";
 	};
 
-	mipi: mipi@0,700e3000 {
+	mipi: mipi@700e3000 {
 		compatible = "nvidia,tegra210-mipi";
 		reg = <0x0 0x700e3000 0x0 0x100>;
 		clocks = <&tegra_car TEGRA210_CLK_MIPI_CAL>;
@@ -671,7 +673,7 @@
 		#nvidia,mipi-calibrate-cells = <1>;
 	};
 
-	spi@0,70410000 {
+	spi@70410000 {
 		compatible = "nvidia,tegra210-qspi";
 		reg = <0x0 0x70410000 0x0 0x1000>;
 		interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
@@ -686,7 +688,7 @@
 		status = "disabled";
 	};
 
-	usb@0,7d000000 {
+	usb@7d000000 {
 		compatible = "nvidia,tegra210-ehci", "nvidia,tegra30-ehci", "usb-ehci";
 		reg = <0x0 0x7d000000 0x0 0x4000>;
 		interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
@@ -699,7 +701,7 @@
 		status = "disabled";
 	};
 
-	phy1: usb-phy@0,7d000000 {
+	phy1: usb-phy@7d000000 {
 		compatible = "nvidia,tegra210-usb-phy", "nvidia,tegra30-usb-phy";
 		reg = <0x0 0x7d000000 0x0 0x4000>,
 		      <0x0 0x7d000000 0x0 0x4000>;
@@ -724,7 +726,7 @@
 		status = "disabled";
 	};
 
-	usb@0,7d004000 {
+	usb@7d004000 {
 		compatible = "nvidia,tegra210-ehci", "nvidia,tegra30-ehci", "usb-ehci";
 		reg = <0x0 0x7d004000 0x0 0x4000>;
 		interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
@@ -737,7 +739,7 @@
 		status = "disabled";
 	};
 
-	phy2: usb-phy@0,7d004000 {
+	phy2: usb-phy@7d004000 {
 		compatible = "nvidia,tegra210-usb-phy", "nvidia,tegra30-usb-phy";
 		reg = <0x0 0x7d004000 0x0 0x4000>,
 		      <0x0 0x7d000000 0x0 0x4000>;
diff --git a/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts
index b992b1a..9f561c9 100644
--- a/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts
+++ b/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts
@@ -141,62 +141,66 @@
 	clock-frequency = <16666666>;
 };
 
+&extalr_clk {
+	clock-frequency = <32768>;
+};
+
 &pfc {
 	pinctrl-0 = <&scif_clk_pins>;
 	pinctrl-names = "default";
 
 	scif1_pins: scif1 {
-		renesas,groups = "scif1_data_a", "scif1_ctrl";
-		renesas,function = "scif1";
+		groups = "scif1_data_a", "scif1_ctrl";
+		function = "scif1";
 	};
 	scif2_pins: scif2 {
-		renesas,groups = "scif2_data_a";
-		renesas,function = "scif2";
+		groups = "scif2_data_a";
+		function = "scif2";
 	};
 	scif_clk_pins: scif_clk {
-		renesas,groups = "scif_clk_a";
-		renesas,function = "scif_clk";
+		groups = "scif_clk_a";
+		function = "scif_clk";
 	};
 
 	i2c2_pins: i2c2 {
-		renesas,groups = "i2c2_a";
-		renesas,function = "i2c2";
+		groups = "i2c2_a";
+		function = "i2c2";
 	};
 
 	avb_pins: avb {
-		renesas,groups = "avb_mdc";
-		renesas,function = "avb";
+		groups = "avb_mdc";
+		function = "avb";
 	};
 
 	sdhi0_pins: sd0 {
-		renesas,groups = "sdhi0_data4", "sdhi0_ctrl";
-		renesas,function = "sdhi0";
+		groups = "sdhi0_data4", "sdhi0_ctrl";
+		function = "sdhi0";
 	};
 
 	sdhi3_pins: sd3 {
-		renesas,groups = "sdhi3_data4", "sdhi3_ctrl";
-		renesas,function = "sdhi3";
+		groups = "sdhi3_data4", "sdhi3_ctrl";
+		function = "sdhi3";
 	};
 
 	sound_pins: sound {
-		renesas,groups = "ssi01239_ctrl", "ssi0_data", "ssi1_data_a";
-		renesas,function = "ssi";
+		groups = "ssi01239_ctrl", "ssi0_data", "ssi1_data_a";
+		function = "ssi";
 	};
 
 	sound_clk_pins: sound_clk {
-		renesas,groups = "audio_clk_a_a", "audio_clk_b_a", "audio_clk_c_a",
-				 "audio_clkout_a", "audio_clkout3_a";
-		renesas,function = "audio_clk";
+		groups = "audio_clk_a_a", "audio_clk_b_a", "audio_clk_c_a",
+			 "audio_clkout_a", "audio_clkout3_a";
+		function = "audio_clk";
 	};
 
 	usb1_pins: usb1 {
-		renesas,groups = "usb1";
-		renesas,function = "usb1";
+		groups = "usb1";
+		function = "usb1";
 	};
 
 	usb2_pins: usb2 {
-		renesas,groups = "usb2";
-		renesas,function = "usb2";
+		groups = "usb2";
+		function = "usb2";
 	};
 };
 
@@ -388,3 +392,16 @@
 &ohci2 {
 	status = "okay";
 };
+
+&pcie_bus_clk {
+	clock-frequency = <100000000>;
+	status = "okay";
+};
+
+&pciec0 {
+	status = "okay";
+};
+
+&pciec1 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a7795.dtsi b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
index a7315eb..7cb2d72 100644
--- a/arch/arm64/boot/dts/renesas/r8a7795.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
@@ -115,12 +115,25 @@
 		clock-frequency = <0>;
 	};
 
+	/* External CAN clock - to be overridden by boards that provide it */
+	can_clk: can {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+	};
+
 	/* External SCIF clock - to be overridden by boards that provide it */
 	scif_clk: scif {
 		compatible = "fixed-clock";
 		#clock-cells = <0>;
 		clock-frequency = <0>;
-		status = "disabled";
+	};
+
+	/* External PCIe clock - can be overridden by the board */
+	pcie_bus_clk: pcie_bus {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
 	};
 
 	soc {
@@ -515,6 +528,36 @@
 			#size-cells = <0>;
 		};
 
+		can0: can@e6c30000 {
+			compatible = "renesas,can-r8a7795",
+				     "renesas,rcar-gen3-can";
+			reg = <0 0xe6c30000 0 0x1000>;
+			interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 916>,
+			       <&cpg CPG_CORE R8A7795_CLK_CANFD>,
+			       <&can_clk>;
+			clock-names = "clkp1", "clkp2", "can_clk";
+			assigned-clocks = <&cpg CPG_CORE R8A7795_CLK_CANFD>;
+			assigned-clock-rates = <40000000>;
+			power-domains = <&cpg>;
+			status = "disabled";
+		};
+
+		can1: can@e6c38000 {
+			compatible = "renesas,can-r8a7795",
+				     "renesas,rcar-gen3-can";
+			reg = <0 0xe6c38000 0 0x1000>;
+			interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 915>,
+			       <&cpg CPG_CORE R8A7795_CLK_CANFD>,
+			       <&can_clk>;
+			clock-names = "clkp1", "clkp2", "can_clk";
+			assigned-clocks = <&cpg CPG_CORE R8A7795_CLK_CANFD>;
+			assigned-clock-rates = <40000000>;
+			power-domains = <&cpg>;
+			status = "disabled";
+		};
+
 		hscif0: serial@e6540000 {
 			compatible = "renesas,hscif-r8a7795",
 				     "renesas,rcar-gen3-hscif",
@@ -944,7 +987,7 @@
 		};
 
 		xhci0: usb@ee000000 {
-			compatible = "renesas,xhci-r8a7795";
+			compatible = "renesas,xhci-r8a7795", "renesas,rcar-gen3-xhci";
 			reg = <0 0xee000000 0 0xc00>;
 			interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 328>;
@@ -953,7 +996,7 @@
 		};
 
 		xhci1: usb@ee0400000 {
-			compatible = "renesas,xhci-r8a7795";
+			compatible = "renesas,xhci-r8a7795", "renesas,rcar-gen3-xhci";
 			reg = <0 0xee040000 0 0xc00>;
 			interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cpg CPG_MOD 327>;
@@ -1118,5 +1161,54 @@
 			power-domains = <&cpg>;
 			status = "disabled";
 		};
+		pciec0: pcie@fe000000 {
+			compatible = "renesas,pcie-r8a7795";
+			reg = <0 0xfe000000 0 0x80000>;
+			#address-cells = <3>;
+			#size-cells = <2>;
+			bus-range = <0x00 0xff>;
+			device_type = "pci";
+			ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000
+				0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000
+				0x02000000 0 0x30000000 0 0x30000000 0 0x08000000
+				0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
+			/* Map all possible DDR as inbound ranges */
+			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x40000000>;
+			interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
+			#interrupt-cells = <1>;
+			interrupt-map-mask = <0 0 0 0>;
+			interrupt-map = <0 0 0 0 &gic GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 319>, <&pcie_bus_clk>;
+			clock-names = "pcie", "pcie_bus";
+			power-domains = <&cpg>;
+			status = "disabled";
+		};
+
+		pciec1: pcie@ee800000 {
+			compatible = "renesas,pcie-r8a7795";
+			reg = <0 0xee800000 0 0x80000>;
+			#address-cells = <3>;
+			#size-cells = <2>;
+			bus-range = <0x00 0xff>;
+			device_type = "pci";
+			ranges = <0x01000000 0 0x00000000 0 0xee900000 0 0x00100000
+				0x02000000 0 0xeea00000 0 0xeea00000 0 0x00200000
+				0x02000000 0 0xc0000000 0 0xc0000000 0 0x08000000
+				0x42000000 0 0xc8000000 0 0xc8000000 0 0x08000000>;
+			/* Map all possible DDR as inbound ranges */
+			dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x40000000>;
+			interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
+			#interrupt-cells = <1>;
+			interrupt-map-mask = <0 0 0 0>;
+			interrupt-map = <0 0 0 0 &gic GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 318>, <&pcie_bus_clk>;
+			clock-names = "pcie", "pcie_bus";
+			power-domains = <&cpg>;
+			status = "disabled";
+		};
 	};
 };
diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile
index e3f0b5f..7037a16 100644
--- a/arch/arm64/boot/dts/rockchip/Makefile
+++ b/arch/arm64/boot/dts/rockchip/Makefile
@@ -1,5 +1,7 @@
 dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-evb-act8846.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-geekbox.dtb
 dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-r88.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-evb.dtb
 
 always		:= $(dtb-y)
 subdir-y	:= $(dts-dirs)
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi b/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi
index 6e27b22..fff8b19 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi
@@ -40,6 +40,7 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#include <dt-bindings/input/input.h>
 #include <dt-bindings/pwm/pwm.h>
 #include "rk3368.dtsi"
 
@@ -105,16 +106,14 @@
 
 	keys: gpio-keys {
 		compatible = "gpio-keys";
-		#address-cells = <1>;
-		#size-cells = <0>;
 		pinctrl-names = "default";
 		pinctrl-0 = <&pwr_key>;
 
-		button@0 {
+		power {
 			wakeup-source;
 			gpios = <&gpio0 2 GPIO_ACTIVE_LOW>;
 			label = "GPIO Power";
-			linux,code = <116>;
+			linux,code = <KEY_POWER>;
 		};
 	};
 
@@ -152,7 +151,6 @@
 };
 
 &emmc {
-	broken-cd;
 	bus-width = <8>;
 	cap-mmc-highspeed;
 	disable-wp;
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts b/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
new file mode 100644
index 0000000..46cdddf
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 2016 Andreas Färber
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "rk3368.dtsi"
+#include <dt-bindings/input/input.h>
+
+/ {
+	model = "GeekBox";
+	compatible = "geekbuying,geekbox", "rockchip,rk3368";
+
+	chosen {
+		stdout-path = "serial2:115200n8";
+	};
+
+	memory@0 {
+		device_type = "memory";
+		reg = <0x0 0x0 0x0 0x80000000>;
+	};
+
+	ext_gmac: gmac-clk {
+		compatible = "fixed-clock";
+		clock-frequency = <125000000>;
+		clock-output-names = "ext_gmac";
+		#clock-cells = <0>;
+	};
+
+	ir: ir-receiver {
+		compatible = "gpio-ir-receiver";
+		gpios = <&gpio3 30 GPIO_ACTIVE_LOW>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&ir_int>;
+	};
+
+	keys: gpio-keys {
+		compatible = "gpio-keys";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pwr_key>;
+
+		power {
+			gpios = <&gpio0 2 GPIO_ACTIVE_LOW>;
+			label = "GPIO Power";
+			linux,code = <KEY_POWER>;
+			wakeup-source;
+		};
+	};
+
+	leds: gpio-leds {
+		compatible = "gpio-leds";
+
+		blue {
+			gpios = <&gpio2 2 GPIO_ACTIVE_HIGH>;
+			label = "geekbox:blue:led";
+			default-state = "on";
+		};
+
+		red {
+			gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
+			label = "geekbox:red:led";
+			default-state = "off";
+		};
+	};
+
+	vcc_sys: vcc-sys-regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc_sys";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-always-on;
+		regulator-boot-on;
+	};
+};
+
+&emmc {
+	status = "okay";
+	bus-width = <8>;
+	cap-mmc-highspeed;
+	clock-frequency = <150000000>;
+	disable-wp;
+	keep-power-in-suspend;
+	non-removable;
+	num-slots = <1>;
+	vmmc-supply = <&vcc_io>;
+	vqmmc-supply = <&vcc18_flash>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&emmc_clk>, <&emmc_cmd>, <&emmc_bus8>;
+};
+
+&gmac {
+	status = "okay";
+	phy-supply = <&vcc_lan>;
+	phy-mode = "rgmii";
+	clock_in_out = "input";
+	assigned-clocks = <&cru SCLK_MAC>;
+	assigned-clock-parents = <&ext_gmac>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&rgmii_pins>;
+	tx_delay = <0x30>;
+	rx_delay = <0x10>;
+};
+
+&i2c0 {
+	status = "okay";
+
+	rk808: pmic@1b {
+		compatible = "rockchip,rk808";
+		reg = <0x1b>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pmic_int>, <&pmic_sleep>;
+		interrupt-parent = <&gpio0>;
+		interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
+		rockchip,system-power-controller;
+		vcc1-supply = <&vcc_sys>;
+		vcc2-supply = <&vcc_sys>;
+		vcc3-supply = <&vcc_sys>;
+		vcc4-supply = <&vcc_sys>;
+		vcc6-supply = <&vcc_sys>;
+		vcc7-supply = <&vcc_sys>;
+		vcc8-supply = <&vcc_io>;
+		vcc9-supply = <&vcc_sys>;
+		vcc10-supply = <&vcc_sys>;
+		vcc11-supply = <&vcc_sys>;
+		vcc12-supply = <&vcc_io>;
+		clock-output-names = "xin32k", "rk808-clkout2";
+		#clock-cells = <1>;
+
+		regulators {
+			vdd_cpu: DCDC_REG1 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <700000>;
+				regulator-max-microvolt = <1500000>;
+				regulator-name = "vdd_cpu";
+			};
+
+			vdd_log: DCDC_REG2 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <700000>;
+				regulator-max-microvolt = <1500000>;
+				regulator-name = "vdd_log";
+			};
+
+			vcc_ddr: DCDC_REG3 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-name = "vcc_ddr";
+			};
+
+			vcc_io: DCDC_REG4 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-name = "vcc_io";
+			};
+
+			vcc18_flash: LDO_REG1 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-name = "vcc18_flash";
+			};
+
+			vcc33_lcd: LDO_REG2 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-name = "vcc33_lcd";
+			};
+
+			vdd_10: LDO_REG3 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1000000>;
+				regulator-name = "vdd_10";
+			};
+
+			vcca_18: LDO_REG4 {
+				regulator-boot-on;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-name = "vcca_18";
+			};
+
+			vccio_sd: LDO_REG5 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-name = "vccio_sd";
+			};
+
+			vdd10_lcd: LDO_REG6 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1000000>;
+				regulator-name = "vdd10_lcd";
+			};
+
+			vcc_18: LDO_REG7 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-name = "vcc_18";
+			};
+
+			vcc18_lcd: LDO_REG8 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-name = "vcc18_lcd";
+			};
+
+			vcc_sd: SWITCH_REG1 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-name = "vcc_sd";
+			};
+
+			vcc_lan: SWITCH_REG2 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-name = "vcc_lan";
+			};
+		};
+	};
+};
+
+&pinctrl {
+	ir {
+		ir_int: ir-int {
+			rockchip,pins = <3 30 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	keys {
+		pwr_key: pwr-key {
+			rockchip,pins = <0 2 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	pmic {
+		pmic_sleep: pmic-sleep {
+			rockchip,pins = <0 0 RK_FUNC_2 &pcfg_pull_none>;
+		};
+
+		pmic_int: pmic-int {
+			rockchip,pins = <0 5 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+	};
+};
+
+&tsadc {
+	status = "okay";
+	rockchip,hw-tshut-mode = <0>; /* CRU */
+	rockchip,hw-tshut-polarity = <1>; /* high */
+};
+
+&uart2 {
+	status = "okay";
+};
+
+&usb_host0_ehci {
+	status = "okay";
+};
+
+&usb_otg {
+	status = "okay";
+};
+
+&wdt {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-r88.dts b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
index 1f2b642..b56b720 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
@@ -42,6 +42,7 @@
 
 /dts-v1/;
 #include "rk3368.dtsi"
+#include <dt-bindings/input/input.h>
 
 / {
 	model = "Rockchip R88";
@@ -65,16 +66,14 @@
 
 	keys: gpio-keys {
 		compatible = "gpio-keys";
-		#address-cells = <1>;
-		#size-cells = <0>;
 		pinctrl-names = "default";
 		pinctrl-0 = <&pwr_key>;
 
-		button@0 {
+		power {
 			wakeup-source;
 			gpios = <&gpio0 2 GPIO_ACTIVE_LOW>;
 			label = "GPIO Power";
-			linux,code = <116>;
+			linux,code = <KEY_POWER>;
 		};
 	};
 
@@ -185,7 +184,6 @@
 };
 
 &emmc {
-	broken-cd;
 	bus-width = <8>;
 	cap-mmc-highspeed;
 	disable-wp;
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-thermal.dtsi b/arch/arm64/boot/dts/rockchip/rk3368-thermal.dtsi
deleted file mode 100644
index a10010f..0000000
--- a/arch/arm64/boot/dts/rockchip/rk3368-thermal.dtsi
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Device Tree Source for RK3368 SoC thermal
- *
- * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
- * Caesar Wang <wxt@rock-chips.com>
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <dt-bindings/thermal/thermal.h>
-
-cpu_thermal: cpu_thermal {
-	polling-delay-passive = <100>; /* milliseconds */
-	polling-delay = <5000>; /* milliseconds */
-
-	thermal-sensors = <&tsadc 0>;
-
-	trips {
-		cpu_alert0: cpu_alert0 {
-			temperature = <75000>; /* millicelsius */
-			hysteresis = <2000>; /* millicelsius */
-			type = "passive";
-		};
-		cpu_alert1: cpu_alert1 {
-			temperature = <80000>; /* millicelsius */
-			hysteresis = <2000>; /* millicelsius */
-			type = "passive";
-		};
-		cpu_crit: cpu_crit {
-			temperature = <95000>; /* millicelsius */
-			hysteresis = <2000>; /* millicelsius */
-			type = "critical";
-		};
-	};
-
-	cooling-maps {
-		map0 {
-			trip = <&cpu_alert0>;
-			cooling-device =
-				<&cpu_b0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
-		};
-		map1 {
-			trip = <&cpu_alert1>;
-			cooling-device =
-				<&cpu_l0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
-		};
-	};
-};
-
-gpu_thermal: gpu_thermal {
-	polling-delay-passive = <100>; /* milliseconds */
-	polling-delay = <5000>; /* milliseconds */
-
-	thermal-sensors = <&tsadc 1>;
-
-	trips {
-		gpu_alert0: gpu_alert0 {
-			temperature = <80000>; /* millicelsius */
-			hysteresis = <2000>; /* millicelsius */
-			type = "passive";
-		};
-		gpu_crit: gpu_crit {
-			temperature = <1150000>; /* millicelsius */
-			hysteresis = <2000>; /* millicelsius */
-			type = "critical";
-		};
-	};
-
-	cooling-maps {
-		map0 {
-			trip = <&gpu_alert0>;
-			cooling-device =
-				<&cpu_b0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
-		};
-	};
-};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index 49d1191..8b4a7c9 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -413,7 +413,71 @@
 	};
 
 	thermal-zones {
-		#include "rk3368-thermal.dtsi"
+		cpu {
+			polling-delay-passive = <100>; /* milliseconds */
+			polling-delay = <5000>; /* milliseconds */
+
+			thermal-sensors = <&tsadc 0>;
+
+			trips {
+				cpu_alert0: cpu_alert0 {
+					temperature = <75000>; /* millicelsius */
+					hysteresis = <2000>; /* millicelsius */
+					type = "passive";
+				};
+				cpu_alert1: cpu_alert1 {
+					temperature = <80000>; /* millicelsius */
+					hysteresis = <2000>; /* millicelsius */
+					type = "passive";
+				};
+				cpu_crit: cpu_crit {
+					temperature = <95000>; /* millicelsius */
+					hysteresis = <2000>; /* millicelsius */
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				map0 {
+					trip = <&cpu_alert0>;
+					cooling-device =
+					<&cpu_b0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+				map1 {
+					trip = <&cpu_alert1>;
+					cooling-device =
+					<&cpu_l0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+			};
+		};
+
+		gpu {
+			polling-delay-passive = <100>; /* milliseconds */
+			polling-delay = <5000>; /* milliseconds */
+
+			thermal-sensors = <&tsadc 1>;
+
+			trips {
+				gpu_alert0: gpu_alert0 {
+					temperature = <80000>; /* millicelsius */
+					hysteresis = <2000>; /* millicelsius */
+					type = "passive";
+				};
+				gpu_crit: gpu_crit {
+					temperature = <115000>; /* millicelsius */
+					hysteresis = <2000>; /* millicelsius */
+					type = "critical";
+				};
+			};
+
+			cooling-maps {
+				map0 {
+					trip = <&gpu_alert0>;
+					cooling-device =
+					<&cpu_b0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+			};
+		};
 	};
 
 	tsadc: tsadc@ff280000 {
@@ -555,6 +619,18 @@
 		status = "disabled";
 	};
 
+	mbox: mbox@ff6b0000 {
+		compatible = "rockchip,rk3368-mailbox";
+		reg = <0x0 0xff6b0000 0x0 0x1000>;
+		interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cru PCLK_MAILBOX>;
+		clock-names = "pclk_mailbox";
+		#mbox-cells = <1>;
+	};
+
 	pmugrf: syscon@ff738000 {
 		compatible = "rockchip,rk3368-pmugrf", "syscon";
 		reg = <0x0 0xff738000 0x0 0x1000>;
@@ -926,11 +1002,11 @@
 
 		tsadc {
 			otp_gpio: otp-gpio {
-				rockchip,pins = <0 10 RK_FUNC_GPIO &pcfg_pull_none>;
+				rockchip,pins = <0 3 RK_FUNC_GPIO &pcfg_pull_none>;
 			};
 
 			otp_out: otp-out {
-				rockchip,pins = <0 10 RK_FUNC_1 &pcfg_pull_none>;
+				rockchip,pins = <0 3 RK_FUNC_1 &pcfg_pull_none>;
 			};
 		};
 
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-evb.dts b/arch/arm64/boot/dts/rockchip/rk3399-evb.dts
new file mode 100644
index 0000000..1a3eb14
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3399-evb.dts
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2016 Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include <dt-bindings/pwm/pwm.h>
+#include "rk3399.dtsi"
+
+/ {
+	model = "Rockchip RK3399 Evaluation Board";
+	compatible = "rockchip,rk3399-evb", "rockchip,rk3399",
+		     "google,rk3399evb-rev2";
+
+	vdd_center: vdd-center {
+		compatible = "pwm-regulator";
+		pwms = <&pwm3 0 25000 0>;
+		regulator-name = "vdd_center";
+		regulator-min-microvolt = <800000>;
+		regulator-max-microvolt = <1400000>;
+		regulator-always-on;
+		regulator-boot-on;
+		status = "okay";
+	};
+
+	vcc3v3_sys: vcc3v3-sys {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc3v3_sys";
+		regulator-always-on;
+		regulator-boot-on;
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	vcc_phy: vcc-phy-regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc_phy";
+		regulator-always-on;
+		regulator-boot-on;
+	};
+};
+
+&pwm0 {
+	status = "okay";
+};
+
+&pwm2 {
+	status = "okay";
+};
+
+&pwm3 {
+	status = "okay";
+};
+
+&uart2 {
+	status = "okay";
+};
+
+&usb_host0_ehci {
+	status = "okay";
+};
+
+&usb_host0_ohci {
+	status = "okay";
+};
+
+&usb_host1_ehci {
+	status = "okay";
+};
+
+&usb_host1_ohci {
+	status = "okay";
+};
+
+&pinctrl {
+	pmic {
+		pmic_int_l: pmic-int-l {
+			rockchip,pins =
+				<1 21 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+
+		pmic_dvs2: pmic-dvs2 {
+			rockchip,pins =
+				<1 18 RK_FUNC_GPIO &pcfg_pull_down>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
new file mode 100644
index 0000000..46f325a
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -0,0 +1,1013 @@
+/*
+ * Copyright (c) 2016 Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/clock/rk3399-cru.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+
+/ {
+	compatible = "rockchip,rk3399";
+
+	interrupt-parent = <&gic>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	aliases {
+		serial0 = &uart0;
+		serial1 = &uart1;
+		serial2 = &uart2;
+		serial3 = &uart3;
+		serial4 = &uart4;
+	};
+
+	cpus {
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		cpu-map {
+			cluster0 {
+				core0 {
+					cpu = <&cpu_l0>;
+				};
+				core1 {
+					cpu = <&cpu_l1>;
+				};
+				core2 {
+					cpu = <&cpu_l2>;
+				};
+				core3 {
+					cpu = <&cpu_l3>;
+				};
+			};
+
+			cluster1 {
+				core0 {
+					cpu = <&cpu_b0>;
+				};
+				core1 {
+					cpu = <&cpu_b1>;
+				};
+			};
+		};
+
+		cpu_l0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x0>;
+			enable-method = "psci";
+			#cooling-cells = <2>; /* min followed by max */
+			clocks = <&cru ARMCLKL>;
+		};
+
+		cpu_l1: cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x1>;
+			enable-method = "psci";
+			clocks = <&cru ARMCLKL>;
+		};
+
+		cpu_l2: cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x2>;
+			enable-method = "psci";
+			clocks = <&cru ARMCLKL>;
+		};
+
+		cpu_l3: cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x3>;
+			enable-method = "psci";
+			clocks = <&cru ARMCLKL>;
+		};
+
+		cpu_b0: cpu@100 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x0 0x100>;
+			enable-method = "psci";
+			#cooling-cells = <2>; /* min followed by max */
+			clocks = <&cru ARMCLKB>;
+		};
+
+		cpu_b1: cpu@101 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x0 0x101>;
+			enable-method = "psci";
+			clocks = <&cru ARMCLKB>;
+		};
+	};
+
+	psci {
+		compatible = "arm,psci-1.0";
+		method = "smc";
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+	};
+
+	xin24m: xin24m {
+		compatible = "fixed-clock";
+		clock-frequency = <24000000>;
+		clock-output-names = "xin24m";
+		#clock-cells = <0>;
+	};
+
+	amba {
+		compatible = "arm,amba-bus";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		dmac_bus: dma-controller@ff6d0000 {
+			compatible = "arm,pl330", "arm,primecell";
+			reg = <0x0 0xff6d0000 0x0 0x4000>;
+			interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+			#dma-cells = <1>;
+			clocks = <&cru ACLK_DMAC0_PERILP>;
+			clock-names = "apb_pclk";
+		};
+
+		dmac_peri: dma-controller@ff6e0000 {
+			compatible = "arm,pl330", "arm,primecell";
+			reg = <0x0 0xff6e0000 0x0 0x4000>;
+			interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+			#dma-cells = <1>;
+			clocks = <&cru ACLK_DMAC1_PERILP>;
+			clock-names = "apb_pclk";
+		};
+	};
+
+	sdio0: dwmmc@fe310000 {
+		compatible = "rockchip,rk3399-dw-mshc",
+			     "rockchip,rk3288-dw-mshc";
+		reg = <0x0 0xfe310000 0x0 0x4000>;
+		interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+		clock-freq-min-max = <400000 150000000>;
+		clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
+			 <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
+		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+		fifo-depth = <0x100>;
+		status = "disabled";
+	};
+
+	sdmmc: dwmmc@fe320000 {
+		compatible = "rockchip,rk3399-dw-mshc",
+			     "rockchip,rk3288-dw-mshc";
+		reg = <0x0 0xfe320000 0x0 0x4000>;
+		interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+		clock-freq-min-max = <400000 150000000>;
+		clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
+			 <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
+		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+		fifo-depth = <0x100>;
+		status = "disabled";
+	};
+
+	usb_host0_ehci: usb@fe380000 {
+		compatible = "generic-ehci";
+		reg = <0x0 0xfe380000 0x0 0x20000>;
+		interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cru HCLK_HOST0>, <&cru HCLK_HOST0_ARB>;
+		clock-names = "hclk_host0", "hclk_host0_arb";
+		status = "disabled";
+	};
+
+	usb_host0_ohci: usb@fe3a0000 {
+		compatible = "generic-ohci";
+		reg = <0x0 0xfe3a0000 0x0 0x20000>;
+		interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cru HCLK_HOST0>, <&cru HCLK_HOST0_ARB>;
+		clock-names = "hclk_host0", "hclk_host0_arb";
+		status = "disabled";
+	};
+
+	usb_host1_ehci: usb@fe3c0000 {
+		compatible = "generic-ehci";
+		reg = <0x0 0xfe3c0000 0x0 0x20000>;
+		interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cru HCLK_HOST1>, <&cru HCLK_HOST1_ARB>;
+		clock-names = "hclk_host1", "hclk_host1_arb";
+		status = "disabled";
+	};
+
+	usb_host1_ohci: usb@fe3e0000 {
+		compatible = "generic-ohci";
+		reg = <0x0 0xfe3e0000 0x0 0x20000>;
+		interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cru HCLK_HOST1>, <&cru HCLK_HOST1_ARB>;
+		clock-names = "hclk_host1", "hclk_host1_arb";
+		status = "disabled";
+	};
+
+	gic: interrupt-controller@fee00000 {
+		compatible = "arm,gic-v3";
+		#interrupt-cells = <3>;
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+		interrupt-controller;
+
+		reg = <0x0 0xfee00000 0 0x10000>, /* GICD */
+		      <0x0 0xfef00000 0 0xc0000>, /* GICR */
+		      <0x0 0xfff00000 0 0x10000>, /* GICC */
+		      <0x0 0xfff10000 0 0x10000>, /* GICH */
+		      <0x0 0xfff20000 0 0x10000>; /* GICV */
+		interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+		its: interrupt-controller@fee20000 {
+			compatible = "arm,gic-v3-its";
+			msi-controller;
+			reg = <0x0 0xfee20000 0x0 0x20000>;
+		};
+	};
+
+	uart0: serial@ff180000 {
+		compatible = "rockchip,rk3399-uart", "snps,dw-apb-uart";
+		reg = <0x0 0xff180000 0x0 0x100>;
+		clocks = <&cru SCLK_UART0>, <&cru PCLK_UART0>;
+		clock-names = "baudclk", "apb_pclk";
+		interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
+		reg-shift = <2>;
+		reg-io-width = <4>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&uart0_xfer>;
+		status = "disabled";
+	};
+
+	uart1: serial@ff190000 {
+		compatible = "rockchip,rk3399-uart", "snps,dw-apb-uart";
+		reg = <0x0 0xff190000 0x0 0x100>;
+		clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>;
+		clock-names = "baudclk", "apb_pclk";
+		interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+		reg-shift = <2>;
+		reg-io-width = <4>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&uart1_xfer>;
+		status = "disabled";
+	};
+
+	uart2: serial@ff1a0000 {
+		compatible = "rockchip,rk3399-uart", "snps,dw-apb-uart";
+		reg = <0x0 0xff1a0000 0x0 0x100>;
+		clocks = <&cru SCLK_UART2>, <&cru PCLK_UART2>;
+		clock-names = "baudclk", "apb_pclk";
+		interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+		reg-shift = <2>;
+		reg-io-width = <4>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&uart2c_xfer>;
+		status = "disabled";
+	};
+
+	uart3: serial@ff1b0000 {
+		compatible = "rockchip,rk3399-uart", "snps,dw-apb-uart";
+		reg = <0x0 0xff1b0000 0x0 0x100>;
+		clocks = <&cru SCLK_UART3>, <&cru PCLK_UART3>;
+		clock-names = "baudclk", "apb_pclk";
+		interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+		reg-shift = <2>;
+		reg-io-width = <4>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&uart3_xfer>;
+		status = "disabled";
+	};
+
+	spi0: spi@ff1c0000 {
+		compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi";
+		reg = <0x0 0xff1c0000 0x0 0x1000>;
+		clocks = <&cru SCLK_SPI0>, <&cru PCLK_SPI0>;
+		clock-names = "spiclk", "apb_pclk";
+		interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&spi0_clk &spi0_tx &spi0_rx &spi0_cs0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+
+	spi1: spi@ff1d0000 {
+		compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi";
+		reg = <0x0 0xff1d0000 0x0 0x1000>;
+		clocks = <&cru SCLK_SPI1>, <&cru PCLK_SPI1>;
+		clock-names = "spiclk", "apb_pclk";
+		interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&spi1_clk &spi1_tx &spi1_rx &spi1_cs0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+
+	spi2: spi@ff1e0000 {
+		compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi";
+		reg = <0x0 0xff1e0000 0x0 0x1000>;
+		clocks = <&cru SCLK_SPI2>, <&cru PCLK_SPI2>;
+		clock-names = "spiclk", "apb_pclk";
+		interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&spi2_clk &spi2_tx &spi2_rx &spi2_cs0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+
+	spi4: spi@ff1f0000 {
+		compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi";
+		reg = <0x0 0xff1f0000 0x0 0x1000>;
+		clocks = <&cru SCLK_SPI4>, <&cru PCLK_SPI4>;
+		clock-names = "spiclk", "apb_pclk";
+		interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&spi4_clk &spi4_tx &spi4_rx &spi4_cs0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+
+	spi5: spi@ff200000 {
+		compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi";
+		reg = <0x0 0xff200000 0x0 0x1000>;
+		clocks = <&cru SCLK_SPI5>, <&cru PCLK_SPI5>;
+		clock-names = "spiclk", "apb_pclk";
+		interrupts = <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&spi5_clk &spi5_tx &spi5_rx &spi5_cs0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+
+	pmugrf: syscon@ff320000 {
+		compatible = "rockchip,rk3399-pmugrf", "syscon";
+		reg = <0x0 0xff320000 0x0 0x1000>;
+	};
+
+	spi3: spi@ff350000 {
+		compatible = "rockchip,rk3399-spi", "rockchip,rk3066-spi";
+		reg = <0x0 0xff350000 0x0 0x1000>;
+		clocks = <&pmucru SCLK_SPI3_PMU>, <&pmucru PCLK_SPI3_PMU>;
+		clock-names = "spiclk", "apb_pclk";
+		interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&spi3_clk &spi3_tx &spi3_rx &spi3_cs0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+
+	uart4: serial@ff370000 {
+		compatible = "rockchip,rk3399-uart", "snps,dw-apb-uart";
+		reg = <0x0 0xff370000 0x0 0x100>;
+		clocks = <&pmucru SCLK_UART4_PMU>, <&pmucru PCLK_UART4_PMU>;
+		clock-names = "baudclk", "apb_pclk";
+		interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+		reg-shift = <2>;
+		reg-io-width = <4>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&uart4_xfer>;
+		status = "disabled";
+	};
+
+	pwm0: pwm@ff420000 {
+		compatible = "rockchip,rk3399-pwm", "rockchip,rk3288-pwm";
+		reg = <0x0 0xff420000 0x0 0x10>;
+		#pwm-cells = <3>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pwm0_pin>;
+		clocks = <&pmucru PCLK_RKPWM_PMU>;
+		clock-names = "pwm";
+		status = "disabled";
+	};
+
+	pwm1: pwm@ff420010 {
+		compatible = "rockchip,rk3399-pwm", "rockchip,rk3288-pwm";
+		reg = <0x0 0xff420010 0x0 0x10>;
+		#pwm-cells = <3>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pwm1_pin>;
+		clocks = <&pmucru PCLK_RKPWM_PMU>;
+		clock-names = "pwm";
+		status = "disabled";
+	};
+
+	pwm2: pwm@ff420020 {
+		compatible = "rockchip,rk3399-pwm", "rockchip,rk3288-pwm";
+		reg = <0x0 0xff420020 0x0 0x10>;
+		#pwm-cells = <3>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pwm2_pin>;
+		clocks = <&pmucru PCLK_RKPWM_PMU>;
+		clock-names = "pwm";
+		status = "disabled";
+	};
+
+	pwm3: pwm@ff420030 {
+		compatible = "rockchip,rk3399-pwm", "rockchip,rk3288-pwm";
+		reg = <0x0 0xff420030 0x0 0x10>;
+		#pwm-cells = <3>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pwm3a_pin>;
+		clocks = <&pmucru PCLK_RKPWM_PMU>;
+		clock-names = "pwm";
+		status = "disabled";
+	};
+
+	pmucru: pmu-clock-controller@ff750000 {
+		compatible = "rockchip,rk3399-pmucru";
+		reg = <0x0 0xff750000 0x0 0x1000>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+		assigned-clocks = <&pmucru PLL_PPLL>;
+		assigned-clock-rates = <676000000>;
+	};
+
+	cru: clock-controller@ff760000 {
+		compatible = "rockchip,rk3399-cru";
+		reg = <0x0 0xff760000 0x0 0x1000>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	grf: syscon@ff770000 {
+		compatible = "rockchip,rk3399-grf", "syscon";
+		reg = <0x0 0xff770000 0x0 0x10000>;
+	};
+
+	watchdog@ff840000 {
+		compatible = "snps,dw-wdt";
+		reg = <0x0 0xff840000 0x0 0x100>;
+		clocks = <&cru PCLK_WDT>;
+		interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	spdif: spdif@ff870000 {
+		compatible = "rockchip,rk3399-spdif";
+		reg = <0x0 0xff870000 0x0 0x1000>;
+		interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
+		dmas = <&dmac_bus 7>;
+		dma-names = "tx";
+		clock-names = "mclk", "hclk";
+		clocks = <&cru SCLK_SPDIF_8CH>, <&cru HCLK_SPDIF>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&spdif_bus>;
+		status = "disabled";
+	};
+
+	i2s0: i2s@ff880000 {
+		compatible = "rockchip,rk3399-i2s", "rockchip,rk3066-i2s";
+		reg = <0x0 0xff880000 0x0 0x1000>;
+		rockchip,grf = <&grf>;
+		interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+		dmas = <&dmac_bus 0>, <&dmac_bus 1>;
+		dma-names = "tx", "rx";
+		clock-names = "i2s_clk", "i2s_hclk";
+		clocks = <&cru SCLK_I2S0_8CH>, <&cru HCLK_I2S0_8CH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&i2s0_8ch_bus>;
+		status = "disabled";
+	};
+
+	i2s1: i2s@ff890000 {
+		compatible = "rockchip,rk3399-i2s", "rockchip,rk3066-i2s";
+		reg = <0x0 0xff890000 0x0 0x1000>;
+		interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+		dmas = <&dmac_bus 2>, <&dmac_bus 3>;
+		dma-names = "tx", "rx";
+		clock-names = "i2s_clk", "i2s_hclk";
+		clocks = <&cru SCLK_I2S1_8CH>, <&cru HCLK_I2S1_8CH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&i2s1_2ch_bus>;
+		status = "disabled";
+	};
+
+	i2s2: i2s@ff8a0000 {
+		compatible = "rockchip,rk3399-i2s", "rockchip,rk3066-i2s";
+		reg = <0x0 0xff8a0000 0x0 0x1000>;
+		interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+		dmas = <&dmac_bus 4>, <&dmac_bus 5>;
+		dma-names = "tx", "rx";
+		clock-names = "i2s_clk", "i2s_hclk";
+		clocks = <&cru SCLK_I2S2_8CH>, <&cru HCLK_I2S2_8CH>;
+		status = "disabled";
+	};
+
+	pinctrl: pinctrl {
+		compatible = "rockchip,rk3399-pinctrl";
+		rockchip,grf = <&grf>;
+		rockchip,pmu = <&pmugrf>;
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		gpio0: gpio0@ff720000 {
+			compatible = "rockchip,gpio-bank";
+			reg = <0x0 0xff720000 0x0 0x100>;
+			clocks = <&pmucru PCLK_GPIO0_PMU>;
+			interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+
+			gpio-controller;
+			#gpio-cells = <0x2>;
+
+			interrupt-controller;
+			#interrupt-cells = <0x2>;
+		};
+
+		gpio1: gpio1@ff730000 {
+			compatible = "rockchip,gpio-bank";
+			reg = <0x0 0xff730000 0x0 0x100>;
+			clocks = <&pmucru PCLK_GPIO1_PMU>;
+			interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+
+			gpio-controller;
+			#gpio-cells = <0x2>;
+
+			interrupt-controller;
+			#interrupt-cells = <0x2>;
+		};
+
+		gpio2: gpio2@ff780000 {
+			compatible = "rockchip,gpio-bank";
+			reg = <0x0 0xff780000 0x0 0x100>;
+			clocks = <&cru PCLK_GPIO2>;
+			interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+
+			gpio-controller;
+			#gpio-cells = <0x2>;
+
+			interrupt-controller;
+			#interrupt-cells = <0x2>;
+		};
+
+		gpio3: gpio3@ff788000 {
+			compatible = "rockchip,gpio-bank";
+			reg = <0x0 0xff788000 0x0 0x100>;
+			clocks = <&cru PCLK_GPIO3>;
+			interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+
+			gpio-controller;
+			#gpio-cells = <0x2>;
+
+			interrupt-controller;
+			#interrupt-cells = <0x2>;
+		};
+
+		gpio4: gpio4@ff790000 {
+			compatible = "rockchip,gpio-bank";
+			reg = <0x0 0xff790000 0x0 0x100>;
+			clocks = <&cru PCLK_GPIO4>;
+			interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
+
+			gpio-controller;
+			#gpio-cells = <0x2>;
+
+			interrupt-controller;
+			#interrupt-cells = <0x2>;
+		};
+
+		pcfg_pull_up: pcfg-pull-up {
+			bias-pull-up;
+		};
+
+		pcfg_pull_down: pcfg-pull-down {
+			bias-pull-down;
+		};
+
+		pcfg_pull_none: pcfg-pull-none {
+			bias-disable;
+		};
+
+		pcfg_pull_none_12ma: pcfg-pull-none-12ma {
+			bias-disable;
+			drive-strength = <12>;
+		};
+
+		pcfg_pull_up_8ma: pcfg-pull-up-8ma {
+			bias-pull-up;
+			drive-strength = <8>;
+		};
+
+		pcfg_pull_down_4ma: pcfg-pull-down-4ma {
+			bias-pull-down;
+			drive-strength = <4>;
+		};
+
+		pcfg_pull_up_2ma: pcfg-pull-up-2ma {
+			bias-pull-up;
+			drive-strength = <2>;
+		};
+
+		pcfg_pull_down_12ma: pcfg-pull-down-12ma {
+			bias-pull-down;
+			drive-strength = <12>;
+		};
+
+		pcfg_pull_none_13ma: pcfg-pull-none-13ma {
+			bias-disable;
+			drive-strength = <13>;
+		};
+
+		i2c0 {
+			i2c0_xfer: i2c0-xfer {
+				rockchip,pins =
+					<1 15 RK_FUNC_2 &pcfg_pull_none>,
+					<1 16 RK_FUNC_2 &pcfg_pull_none>;
+			};
+		};
+
+		i2c1 {
+			i2c1_xfer: i2c1-xfer {
+				rockchip,pins =
+					<4 2 RK_FUNC_1 &pcfg_pull_none>,
+					<4 1 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+
+		i2c2 {
+			i2c2_xfer: i2c2-xfer {
+				rockchip,pins =
+					<2 1 RK_FUNC_2 &pcfg_pull_none_12ma>,
+					<2 0 RK_FUNC_2 &pcfg_pull_none_12ma>;
+			};
+		};
+
+		i2c3 {
+			i2c3_xfer: i2c3-xfer {
+				rockchip,pins =
+					<4 17 RK_FUNC_1 &pcfg_pull_none>,
+					<4 16 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+
+		i2c4 {
+			i2c4_xfer: i2c4-xfer {
+				rockchip,pins =
+					<1 12 RK_FUNC_1 &pcfg_pull_none>,
+					<1 11 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+
+		i2c5 {
+			i2c5_xfer: i2c5-xfer {
+				rockchip,pins =
+					<3 11 RK_FUNC_2 &pcfg_pull_none>,
+					<3 10 RK_FUNC_2 &pcfg_pull_none>;
+			};
+		};
+
+		i2c6 {
+			i2c6_xfer: i2c6-xfer {
+				rockchip,pins =
+					<2 10 RK_FUNC_2 &pcfg_pull_none>,
+					<2 9 RK_FUNC_2 &pcfg_pull_none>;
+			};
+		};
+
+		i2c7 {
+			i2c7_xfer: i2c7-xfer {
+				rockchip,pins =
+					<2 8 RK_FUNC_2 &pcfg_pull_none>,
+					<2 7 RK_FUNC_2 &pcfg_pull_none>;
+			};
+		};
+
+		i2c8 {
+			i2c8_xfer: i2c8-xfer {
+				rockchip,pins =
+					<1 21 RK_FUNC_1 &pcfg_pull_none>,
+					<1 20 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+
+		i2s0 {
+			i2s0_8ch_bus: i2s0-8ch-bus {
+				rockchip,pins =
+					<3 24 RK_FUNC_1 &pcfg_pull_none>,
+					<3 25 RK_FUNC_1 &pcfg_pull_none>,
+					<3 26 RK_FUNC_1 &pcfg_pull_none>,
+					<3 27 RK_FUNC_1 &pcfg_pull_none>,
+					<3 28 RK_FUNC_1 &pcfg_pull_none>,
+					<3 29 RK_FUNC_1 &pcfg_pull_none>,
+					<3 30 RK_FUNC_1 &pcfg_pull_none>,
+					<3 31 RK_FUNC_1 &pcfg_pull_none>,
+					<4 0 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+
+		i2s1 {
+			i2s1_2ch_bus: i2s1-2ch-bus {
+				rockchip,pins =
+					<4 3 RK_FUNC_1 &pcfg_pull_none>,
+					<4 4 RK_FUNC_1 &pcfg_pull_none>,
+					<4 5 RK_FUNC_1 &pcfg_pull_none>,
+					<4 6 RK_FUNC_1 &pcfg_pull_none>,
+					<4 7 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+
+		spdif {
+			spdif_bus: spdif-bus {
+				rockchip,pins =
+					<4 21 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+
+		spi0 {
+			spi0_clk: spi0-clk {
+				rockchip,pins =
+					<3 6 RK_FUNC_2 &pcfg_pull_up>;
+			};
+			spi0_cs0: spi0-cs0 {
+				rockchip,pins =
+					<3 7 RK_FUNC_2 &pcfg_pull_up>;
+			};
+			spi0_cs1: spi0-cs1 {
+				rockchip,pins =
+					<3 8 RK_FUNC_2 &pcfg_pull_up>;
+			};
+			spi0_tx: spi0-tx {
+				rockchip,pins =
+					<3 5 RK_FUNC_2 &pcfg_pull_up>;
+			};
+			spi0_rx: spi0-rx {
+				rockchip,pins =
+					<3 4 RK_FUNC_2 &pcfg_pull_up>;
+			};
+		};
+
+		spi1 {
+			spi1_clk: spi1-clk {
+				rockchip,pins =
+					<1 9 RK_FUNC_2 &pcfg_pull_up>;
+			};
+			spi1_cs0: spi1-cs0 {
+				rockchip,pins =
+					<1 10 RK_FUNC_2 &pcfg_pull_up>;
+			};
+			spi1_rx: spi1-rx {
+				rockchip,pins =
+					<1 7 RK_FUNC_2 &pcfg_pull_up>;
+			};
+			spi1_tx: spi1-tx {
+				rockchip,pins =
+					<1 8 RK_FUNC_2 &pcfg_pull_up>;
+			};
+		};
+
+		spi2 {
+			spi2_clk: spi2-clk {
+				rockchip,pins =
+					<2 11 RK_FUNC_1 &pcfg_pull_up>;
+			};
+			spi2_cs0: spi2-cs0 {
+				rockchip,pins =
+					<2 12 RK_FUNC_1 &pcfg_pull_up>;
+			};
+			spi2_rx: spi2-rx {
+				rockchip,pins =
+					<2 9 RK_FUNC_1 &pcfg_pull_up>;
+			};
+			spi2_tx: spi2-tx {
+				rockchip,pins =
+					<2 10 RK_FUNC_1 &pcfg_pull_up>;
+			};
+		};
+
+		spi3 {
+			spi3_clk: spi3-clk {
+				rockchip,pins =
+					<1 17 RK_FUNC_1 &pcfg_pull_up>;
+			};
+			spi3_cs0: spi3-cs0 {
+				rockchip,pins =
+					<1 18 RK_FUNC_1 &pcfg_pull_up>;
+			};
+			spi3_rx: spi3-rx {
+				rockchip,pins =
+					<1 15 RK_FUNC_1 &pcfg_pull_up>;
+			};
+			spi3_tx: spi3-tx {
+				rockchip,pins =
+					<1 16 RK_FUNC_1 &pcfg_pull_up>;
+			};
+		};
+
+		spi4 {
+			spi4_clk: spi4-clk {
+				rockchip,pins =
+					<3 2 RK_FUNC_2 &pcfg_pull_up>;
+			};
+			spi4_cs0: spi4-cs0 {
+				rockchip,pins =
+					<3 3 RK_FUNC_2 &pcfg_pull_up>;
+			};
+			spi4_rx: spi4-rx {
+				rockchip,pins =
+					<3 0 RK_FUNC_2 &pcfg_pull_up>;
+			};
+			spi4_tx: spi4-tx {
+				rockchip,pins =
+					<3 1 RK_FUNC_2 &pcfg_pull_up>;
+			};
+		};
+
+		spi5 {
+			spi5_clk: spi5-clk {
+				rockchip,pins =
+					<2 22 RK_FUNC_2 &pcfg_pull_up>;
+			};
+			spi5_cs0: spi5-cs0 {
+				rockchip,pins =
+					<2 23 RK_FUNC_2 &pcfg_pull_up>;
+			};
+			spi5_rx: spi5-rx {
+				rockchip,pins =
+					<2 20 RK_FUNC_2 &pcfg_pull_up>;
+			};
+			spi5_tx: spi5-tx {
+				rockchip,pins =
+					<2 21 RK_FUNC_2 &pcfg_pull_up>;
+			};
+		};
+
+		uart0 {
+			uart0_xfer: uart0-xfer {
+				rockchip,pins =
+					<2 16 RK_FUNC_1 &pcfg_pull_up>,
+					<2 17 RK_FUNC_1 &pcfg_pull_none>;
+			};
+
+			uart0_cts: uart0-cts {
+				rockchip,pins =
+					<2 18 RK_FUNC_1 &pcfg_pull_none>;
+			};
+
+			uart0_rts: uart0-rts {
+				rockchip,pins =
+					<2 19 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+
+		uart1 {
+			uart1_xfer: uart1-xfer {
+				rockchip,pins =
+					<3 12 RK_FUNC_2 &pcfg_pull_up>,
+					<3 13 RK_FUNC_2 &pcfg_pull_none>;
+			};
+		};
+
+		uart2a {
+			uart2a_xfer: uart2a-xfer {
+				rockchip,pins =
+					<4 8 RK_FUNC_2 &pcfg_pull_up>,
+					<4 9 RK_FUNC_2 &pcfg_pull_none>;
+			};
+		};
+
+		uart2b {
+			uart2b_xfer: uart2b-xfer {
+				rockchip,pins =
+					<4 16 RK_FUNC_2 &pcfg_pull_up>,
+					<4 17 RK_FUNC_2 &pcfg_pull_none>;
+			};
+		};
+
+		uart2c {
+			uart2c_xfer: uart2c-xfer {
+				rockchip,pins =
+					<4 19 RK_FUNC_1 &pcfg_pull_up>,
+					<4 20 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+
+		uart3 {
+			uart3_xfer: uart3-xfer {
+				rockchip,pins =
+					<3 14 RK_FUNC_2 &pcfg_pull_up>,
+					<3 15 RK_FUNC_2 &pcfg_pull_none>;
+			};
+
+			uart3_cts: uart3-cts {
+				rockchip,pins =
+					<3 18 RK_FUNC_2 &pcfg_pull_none>;
+			};
+
+			uart3_rts: uart3-rts {
+				rockchip,pins =
+					<3 19 RK_FUNC_2 &pcfg_pull_none>;
+			};
+		};
+
+		uart4 {
+			uart4_xfer: uart4-xfer {
+				rockchip,pins =
+					<1 7 RK_FUNC_1 &pcfg_pull_up>,
+					<1 8 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+
+		uarthdcp {
+			uarthdcp_xfer: uarthdcp-xfer {
+				rockchip,pins =
+					<4 21 RK_FUNC_2 &pcfg_pull_up>,
+					<4 22 RK_FUNC_2 &pcfg_pull_none>;
+			};
+		};
+
+		pwm0 {
+			pwm0_pin: pwm0-pin {
+				rockchip,pins =
+					<4 18 RK_FUNC_1 &pcfg_pull_none>;
+			};
+
+			vop0_pwm_pin: vop0-pwm-pin {
+				rockchip,pins =
+					<4 18 RK_FUNC_2 &pcfg_pull_none>;
+			};
+		};
+
+		pwm1 {
+			pwm1_pin: pwm1-pin {
+				rockchip,pins =
+					<4 22 RK_FUNC_1 &pcfg_pull_none>;
+			};
+
+			vop1_pwm_pin: vop1-pwm-pin {
+				rockchip,pins =
+					<4 18 RK_FUNC_3 &pcfg_pull_none>;
+			};
+		};
+
+		pwm2 {
+			pwm2_pin: pwm2-pin {
+				rockchip,pins =
+					<1 19 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+
+		pwm3a {
+			pwm3a_pin: pwm3a-pin {
+				rockchip,pins =
+					<0 6 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+
+		pwm3b {
+			pwm3b_pin: pwm3b-pin {
+				rockchip,pins =
+					<1 14 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts
index 727ae5f..2adad8c 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts
@@ -44,6 +44,7 @@
 
 /dts-v1/;
 /include/ "uniphier-ph1-ld20.dtsi"
+/include/ "uniphier-ref-daughter.dtsi"
 /include/ "uniphier-support-card.dtsi"
 
 / {
@@ -70,7 +71,6 @@
 		i2c3 = &i2c3;
 		i2c4 = &i2c4;
 		i2c5 = &i2c5;
-		i2c6 = &i2c6;
 	};
 };
 
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
index e682a3f..9532880 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
@@ -106,6 +106,12 @@
 	};
 
 	clocks {
+		refclk: ref {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <25000000>;
+		};
+
 		uart_clk: uart_clk {
 			#clock-cells = <0>;
 			compatible = "fixed-clock";
@@ -201,15 +207,12 @@
 
 		i2c2: i2c@58782000 {
 			compatible = "socionext,uniphier-fi2c";
-			status = "disabled";
 			reg = <0x58782000 0x80>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			interrupts = <0 43 4>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_i2c2>;
 			clocks = <&i2c_clk>;
-			clock-frequency = <100000>;
+			clock-frequency = <400000>;
 		};
 
 		i2c3: i2c@58783000 {
@@ -227,12 +230,15 @@
 
 		i2c4: i2c@58784000 {
 			compatible = "socionext,uniphier-fi2c";
+			status = "disabled";
 			reg = <0x58784000 0x80>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			interrupts = <0 45 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c4>;
 			clocks = <&i2c_clk>;
-			clock-frequency = <400000>;
+			clock-frequency = <100000>;
 		};
 
 		i2c5: i2c@58785000 {
@@ -245,16 +251,6 @@
 			clock-frequency = <400000>;
 		};
 
-		i2c6: i2c@58786000 {
-			compatible = "socionext,uniphier-fi2c";
-			reg = <0x58786000 0x80>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			interrupts = <0 26 4>;
-			clocks = <&i2c_clk>;
-			clock-frequency = <400000>;
-		};
-
 		system_bus: system-bus@58c00000 {
 			compatible = "socionext,uniphier-system-bus";
 			status = "disabled";
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ref-daughter.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ref-daughter.dtsi
new file mode 120000
index 0000000..4685a8d
--- /dev/null
+++ b/arch/arm64/boot/dts/socionext/uniphier-ref-daughter.dtsi
@@ -0,0 +1 @@
+../../../../arm/boot/dts/uniphier-ref-daughter.dtsi
\ No newline at end of file
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index f705051..8917150 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -1,7 +1,6 @@
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_AUDIT=y
 CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -36,8 +35,10 @@
 CONFIG_ARCH_BERLIN=y
 CONFIG_ARCH_EXYNOS=y
 CONFIG_ARCH_LAYERSCAPE=y
+CONFIG_ARCH_LG1K=y
 CONFIG_ARCH_HISI=y
 CONFIG_ARCH_MEDIATEK=y
+CONFIG_ARCH_MESON=y
 CONFIG_ARCH_MVEBU=y
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_ROCKCHIP=y
@@ -56,23 +57,26 @@
 CONFIG_PCI=y
 CONFIG_PCI_MSI=y
 CONFIG_PCI_IOV=y
-CONFIG_PCI_RCAR_GEN2_PCIE=y
+CONFIG_PCIE_RCAR=y
 CONFIG_PCI_HOST_GENERIC=y
 CONFIG_PCI_XGENE=y
 CONFIG_PCI_LAYERSCAPE=y
 CONFIG_PCI_HISI=y
 CONFIG_PCIE_QCOM=y
+CONFIG_ARM64_VA_BITS_48=y
 CONFIG_SCHED_MC=y
 CONFIG_PREEMPT=y
 CONFIG_KSM=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
 CONFIG_CMA=y
 CONFIG_XEN=y
-CONFIG_CMDLINE="console=ttyAMA0"
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_COMPAT=y
 CONFIG_CPU_IDLE=y
 CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
+CONFIG_ARM_SCPI_CPUFREQ=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -80,16 +84,21 @@
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 CONFIG_BPF_JIT=y
-# CONFIG_WIRELESS is not set
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_LEDS=y
+CONFIG_RFKILL=m
 CONFIG_NET_9P=y
 CONFIG_NET_9P_VIRTIO=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_DMA_CMA=y
+CONFIG_MTD=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_SPI_NOR=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_VIRTIO_BLK=y
 # CONFIG_SCSI_PROC_FS is not set
@@ -101,7 +110,9 @@
 CONFIG_AHCI_CEVA=y
 CONFIG_AHCI_MVEBU=y
 CONFIG_AHCI_XGENE=y
+CONFIG_AHCI_QORIQ=y
 CONFIG_SATA_RCAR=y
+CONFIG_SATA_SIL24=y
 CONFIG_PATA_PLATFORM=y
 CONFIG_PATA_OF_PLATFORM=y
 CONFIG_NETDEVICES=y
@@ -117,7 +128,18 @@
 CONFIG_SMC91X=y
 CONFIG_SMSC911X=y
 CONFIG_MICREL_PHY=y
-# CONFIG_WLAN is not set
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_RTL8152=m
+CONFIG_USB_USBNET=m
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_SR9800=m
+CONFIG_USB_NET_SMSC75XX=m
+CONFIG_USB_NET_SMSC95XX=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_WL18XX=m
+CONFIG_WLCORE_SDIO=m
 CONFIG_INPUT_EVDEV=y
 CONFIG_KEYBOARD_GPIO=y
 # CONFIG_SERIO_SERPORT is not set
@@ -137,6 +159,8 @@
 CONFIG_SERIAL_SH_SCI=y
 CONFIG_SERIAL_SH_SCI_NR_UARTS=11
 CONFIG_SERIAL_SH_SCI_CONSOLE=y
+CONFIG_SERIAL_MESON=y
+CONFIG_SERIAL_MESON_CONSOLE=y
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
 CONFIG_SERIAL_XILINX_PS_UART=y
@@ -144,16 +168,23 @@
 CONFIG_SERIAL_MVEBU_UART=y
 CONFIG_VIRTIO_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
 CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_I2C_IMX=y
 CONFIG_I2C_MV64XXX=y
 CONFIG_I2C_QUP=y
+CONFIG_I2C_TEGRA=y
 CONFIG_I2C_UNIPHIER_F=y
 CONFIG_I2C_RCAR=y
 CONFIG_SPI=y
+CONFIG_SPI_ORION=y
 CONFIG_SPI_PL022=y
 CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=m
 CONFIG_SPMI=y
+CONFIG_PINCTRL_SINGLE=y
 CONFIG_PINCTRL_MSM8916=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_GPIO_SYSFS=y
@@ -164,14 +195,17 @@
 CONFIG_POWER_RESET_MSM=y
 CONFIG_POWER_RESET_XGENE=y
 CONFIG_POWER_RESET_SYSCON=y
-# CONFIG_HWMON is not set
+CONFIG_SENSORS_LM90=m
+CONFIG_SENSORS_INA2XX=m
 CONFIG_THERMAL=y
 CONFIG_THERMAL_EMULATION=y
 CONFIG_EXYNOS_THERMAL=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_MFD_SEC_CORE=y
+CONFIG_MFD_HI655X_PMIC=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_HI655X=y
 CONFIG_REGULATOR_QCOM_SMD_RPM=y
 CONFIG_REGULATOR_QCOM_SPMI=y
 CONFIG_REGULATOR_S2MPS11=y
@@ -189,13 +223,14 @@
 CONFIG_USB=y
 CONFIG_USB_OTG=y
 CONFIG_USB_XHCI_HCD=y
-CONFIG_USB_XHCI_PLATFORM=y
+CONFIG_USB_XHCI_RCAR=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_MSM=y
 CONFIG_USB_EHCI_HCD_PLATFORM=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_OHCI_HCD_PLATFORM=y
 CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC2=y
 CONFIG_USB_CHIPIDEA=y
 CONFIG_USB_CHIPIDEA_UDC=y
 CONFIG_USB_CHIPIDEA_HOST=y
@@ -205,32 +240,36 @@
 CONFIG_USB_ULPI=y
 CONFIG_USB_GADGET=y
 CONFIG_MMC=y
-CONFIG_MMC_BLOCK_MINORS=16
+CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_ARMMMCI=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ESDHC=y
 CONFIG_MMC_SDHCI_TEGRA=y
 CONFIG_MMC_SDHCI_MSM=y
 CONFIG_MMC_SPI=y
-CONFIG_MMC_SUNXI=y
 CONFIG_MMC_DW=y
 CONFIG_MMC_DW_EXYNOS=y
-CONFIG_MMC_BLOCK_MINORS=16
+CONFIG_MMC_DW_K3=y
+CONFIG_MMC_SUNXI=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
 CONFIG_LEDS_SYSCON=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 CONFIG_LEDS_TRIGGER_CPU=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_S5M=y
+CONFIG_RTC_DRV_DS3232=y
 CONFIG_RTC_DRV_EFI=y
 CONFIG_RTC_DRV_PL031=y
 CONFIG_RTC_DRV_SUN6I=y
 CONFIG_RTC_DRV_XGENE=y
 CONFIG_DMADEVICES=y
-CONFIG_QCOM_BAM_DMA=y
+CONFIG_PL330_DMA=y
 CONFIG_TEGRA20_APB_DMA=y
+CONFIG_QCOM_BAM_DMA=y
 CONFIG_RCAR_DMAC=y
 CONFIG_VFIO=y
 CONFIG_VFIO_PCI=y
@@ -239,20 +278,28 @@
 CONFIG_VIRTIO_MMIO=y
 CONFIG_XEN_GNTDEV=y
 CONFIG_XEN_GRANT_DEV_ALLOC=y
+CONFIG_COMMON_CLK_SCPI=y
 CONFIG_COMMON_CLK_CS2000_CP=y
+CONFIG_CLK_QORIQ=y
 CONFIG_COMMON_CLK_QCOM=y
 CONFIG_MSM_GCC_8916=y
 CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_MAILBOX=y
+CONFIG_ARM_MHU=y
+CONFIG_HI6220_MBOX=y
 CONFIG_ARM_SMMU=y
 CONFIG_QCOM_SMEM=y
 CONFIG_QCOM_SMD=y
 CONFIG_QCOM_SMD_RPM=y
 CONFIG_ARCH_TEGRA_132_SOC=y
 CONFIG_ARCH_TEGRA_210_SOC=y
-CONFIG_HISILICON_IRQ_MBIGEN=y
 CONFIG_EXTCON_USB_GPIO=y
+CONFIG_COMMON_RESET_HI6220=y
 CONFIG_PHY_RCAR_GEN3_USB2=y
+CONFIG_PHY_HI6220_USB=y
 CONFIG_PHY_XGENE=y
+CONFIG_ARM_SCPI_PROTOCOL=y
+CONFIG_ACPI=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 CONFIG_FANOTIFY=y
@@ -264,6 +311,7 @@
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
 CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=y
 CONFIG_EFIVAR_FS=y
 CONFIG_SQUASHFS=y
 CONFIG_NFS_FS=y
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 70f7b9e..10b017c 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -1,5 +1,5 @@
 /*
- * Based on arch/arm/include/asm/assembler.h
+ * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
  *
  * Copyright (C) 1996-2000 Russell King
  * Copyright (C) 2012 ARM Ltd.
@@ -23,22 +23,13 @@
 #ifndef __ASM_ASSEMBLER_H
 #define __ASM_ASSEMBLER_H
 
+#include <asm/asm-offsets.h>
+#include <asm/page.h>
+#include <asm/pgtable-hwdef.h>
 #include <asm/ptrace.h>
 #include <asm/thread_info.h>
 
 /*
- * Stack pushing/popping (register pairs only). Equivalent to store decrement
- * before, load increment after.
- */
-	.macro	push, xreg1, xreg2
-	stp	\xreg1, \xreg2, [sp, #-16]!
-	.endm
-
-	.macro	pop, xreg1, xreg2
-	ldp	\xreg1, \xreg2, [sp], #16
-	.endm
-
-/*
  * Enable and disable interrupts.
  */
 	.macro	disable_irq
@@ -212,6 +203,102 @@
 	.endm
 
 /*
+ * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
+ */
+	.macro	vma_vm_mm, rd, rn
+	ldr	\rd, [\rn, #VMA_VM_MM]
+	.endm
+
+/*
+ * mmid - get context id from mm pointer (mm->context.id)
+ */
+	.macro	mmid, rd, rn
+	ldr	\rd, [\rn, #MM_CONTEXT_ID]
+	.endm
+
+/*
+ * dcache_line_size - get the minimum D-cache line size from the CTR register.
+ */
+	.macro	dcache_line_size, reg, tmp
+	mrs	\tmp, ctr_el0			// read CTR
+	ubfm	\tmp, \tmp, #16, #19		// cache line size encoding
+	mov	\reg, #4			// bytes per word
+	lsl	\reg, \reg, \tmp		// actual cache line size
+	.endm
+
+/*
+ * icache_line_size - get the minimum I-cache line size from the CTR register.
+ */
+	.macro	icache_line_size, reg, tmp
+	mrs	\tmp, ctr_el0			// read CTR
+	and	\tmp, \tmp, #0xf		// cache line size encoding
+	mov	\reg, #4			// bytes per word
+	lsl	\reg, \reg, \tmp		// actual cache line size
+	.endm
+
+/*
+ * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
+ */
+	.macro	tcr_set_idmap_t0sz, valreg, tmpreg
+#ifndef CONFIG_ARM64_VA_BITS_48
+	ldr_l	\tmpreg, idmap_t0sz
+	bfi	\valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
+#endif
+	.endm
+
+/*
+ * Macro to perform a data cache maintenance for the interval
+ * [kaddr, kaddr + size)
+ *
+ * 	op:		operation passed to dc instruction
+ * 	domain:		domain used in dsb instruciton
+ * 	kaddr:		starting virtual address of the region
+ * 	size:		size of the region
+ * 	Corrupts:	kaddr, size, tmp1, tmp2
+ */
+	.macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
+	dcache_line_size \tmp1, \tmp2
+	add	\size, \kaddr, \size
+	sub	\tmp2, \tmp1, #1
+	bic	\kaddr, \kaddr, \tmp2
+9998:	dc	\op, \kaddr
+	add	\kaddr, \kaddr, \tmp1
+	cmp	\kaddr, \size
+	b.lo	9998b
+	dsb	\domain
+	.endm
+
+/*
+ * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
+ */
+	.macro	reset_pmuserenr_el0, tmpreg
+	mrs	\tmpreg, id_aa64dfr0_el1	// Check ID_AA64DFR0_EL1 PMUVer
+	sbfx	\tmpreg, \tmpreg, #8, #4
+	cmp	\tmpreg, #1			// Skip if no PMU present
+	b.lt	9000f
+	msr	pmuserenr_el0, xzr		// Disable PMU access from EL0
+9000:
+	.endm
+
+/*
+ * copy_page - copy src to dest using temp registers t1-t8
+ */
+	.macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
+9998:	ldp	\t1, \t2, [\src]
+	ldp	\t3, \t4, [\src, #16]
+	ldp	\t5, \t6, [\src, #32]
+	ldp	\t7, \t8, [\src, #48]
+	add	\src, \src, #64
+	stnp	\t1, \t2, [\dest]
+	stnp	\t3, \t4, [\dest, #16]
+	stnp	\t5, \t6, [\dest, #32]
+	stnp	\t7, \t8, [\dest, #48]
+	add	\dest, \dest, #64
+	tst	\src, #(PAGE_SIZE - 1)
+	b.ne	9998b
+	.endm
+
+/*
  * Annotate a function as position independent, i.e., safe to be called before
  * the kernel virtual mapping is activated.
  */
@@ -233,4 +320,24 @@
 	.long	\sym\()_hi32
 	.endm
 
+	/*
+	 * mov_q - move an immediate constant into a 64-bit register using
+	 *         between 2 and 4 movz/movk instructions (depending on the
+	 *         magnitude and sign of the operand)
+	 */
+	.macro	mov_q, reg, val
+	.if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
+	movz	\reg, :abs_g1_s:\val
+	.else
+	.if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
+	movz	\reg, :abs_g2_s:\val
+	.else
+	movz	\reg, :abs_g3:\val
+	movk	\reg, :abs_g2_nc:\val
+	.endif
+	movk	\reg, :abs_g1_nc:\val
+	.endif
+	movk	\reg, :abs_g0_nc:\val
+	.endm
+
 #endif	/* __ASM_ASSEMBLER_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index b9b6494..224efe7 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -35,8 +35,9 @@
 #define ARM64_ALT_PAN_NOT_UAO			10
 #define ARM64_HAS_VIRT_HOST_EXTN		11
 #define ARM64_WORKAROUND_CAVIUM_27456		12
+#define ARM64_HAS_32BIT_EL0			13
 
-#define ARM64_NCAPS				13
+#define ARM64_NCAPS				14
 
 #ifndef __ASSEMBLY__
 
@@ -77,10 +78,17 @@
 	struct arm64_ftr_bits	*ftr_bits;
 };
 
+/* scope of capability check */
+enum {
+	SCOPE_SYSTEM,
+	SCOPE_LOCAL_CPU,
+};
+
 struct arm64_cpu_capabilities {
 	const char *desc;
 	u16 capability;
-	bool (*matches)(const struct arm64_cpu_capabilities *);
+	int def_scope;			/* default scope */
+	bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
 	void (*enable)(void *);		/* Called on all active CPUs */
 	union {
 		struct {	/* To be used for erratum handling only */
@@ -101,6 +109,8 @@
 
 extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
 
+bool this_cpu_has_cap(unsigned int cap);
+
 static inline bool cpu_have_feature(unsigned int num)
 {
 	return elf_hwcap & (1UL << num);
@@ -170,12 +180,20 @@
 		cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1;
 }
 
+static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
+{
+	u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT);
+
+	return val == ID_AA64PFR0_EL0_32BIT_64BIT;
+}
+
 void __init setup_cpu_features(void);
 
 void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
 			    const char *info);
 void check_local_cpu_errata(void);
 
+void verify_local_cpu_errata(void);
 void verify_local_cpu_capabilities(void);
 
 u64 read_system_reg(u32 id);
@@ -185,6 +203,11 @@
 	return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
 }
 
+static inline bool system_supports_32bit_el0(void)
+{
+	return cpus_have_cap(ARM64_HAS_32BIT_EL0);
+}
+
 static inline bool system_supports_mixed_endian_el0(void)
 {
 	return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1));
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index ba437f0..7dbea6c 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -48,7 +48,7 @@
 }
 
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-			struct iommu_ops *iommu, bool coherent);
+			const struct iommu_ops *iommu, bool coherent);
 #define arch_setup_dma_ops	arch_setup_dma_ops
 
 #ifdef CONFIG_IOMMU_DMA
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 8e88a69..622db3c 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -4,6 +4,7 @@
 #include <asm/io.h>
 #include <asm/mmu_context.h>
 #include <asm/neon.h>
+#include <asm/ptrace.h>
 #include <asm/tlbflush.h>
 
 #ifdef CONFIG_EFI
@@ -14,32 +15,29 @@
 
 int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
 
-#define efi_call_virt(f, ...)						\
+#define efi_set_mapping_permissions	efi_create_mapping
+
+#define arch_efi_call_virt_setup()					\
 ({									\
-	efi_##f##_t *__f;						\
-	efi_status_t __s;						\
-									\
 	kernel_neon_begin();						\
 	efi_virtmap_load();						\
-	__f = efi.systab->runtime->f;					\
-	__s = __f(__VA_ARGS__);						\
-	efi_virtmap_unload();						\
-	kernel_neon_end();						\
-	__s;								\
 })
 
-#define __efi_call_virt(f, ...)						\
+#define arch_efi_call_virt(f, args...)					\
 ({									\
 	efi_##f##_t *__f;						\
-									\
-	kernel_neon_begin();						\
-	efi_virtmap_load();						\
 	__f = efi.systab->runtime->f;					\
-	__f(__VA_ARGS__);						\
+	__f(args);							\
+})
+
+#define arch_efi_call_virt_teardown()					\
+({									\
 	efi_virtmap_unload();						\
 	kernel_neon_end();						\
 })
 
+#define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
+
 /* arch specific definitions used by the stub code */
 
 /*
@@ -50,7 +48,16 @@
 #define EFI_FDT_ALIGN	SZ_2M   /* used by allocate_new_fdt_and_exit_boot() */
 #define MAX_FDT_OFFSET	SZ_512M
 
-#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__)
+#define efi_call_early(f, ...)		sys_table_arg->boottime->f(__VA_ARGS__)
+#define __efi_call_early(f, ...)	f(__VA_ARGS__)
+#define efi_is_64bit()			(true)
+
+#define alloc_screen_info(x...)		&screen_info
+#define free_screen_info(x...)
+
+static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
+{
+}
 
 #define EFI_ALLOC_ALIGN		SZ_64K
 
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 24ed037..7a09c48 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -177,7 +177,8 @@
 
 /* AArch32 EABI. */
 #define EF_ARM_EABI_MASK		0xff000000
-#define compat_elf_check_arch(x)	(((x)->e_machine == EM_ARM) && \
+#define compat_elf_check_arch(x)	(system_supports_32bit_el0() && \
+					 ((x)->e_machine == EM_ARM) && \
 					 ((x)->e_flags & EF_ARM_EABI_MASK))
 
 #define compat_start_thread		compat_start_thread
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index 5c6375d..7e51d1b 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -19,6 +19,7 @@
 #ifndef __ASM_KERNEL_PGTABLE_H
 #define __ASM_KERNEL_PGTABLE_H
 
+#include <asm/sparsemem.h>
 
 /*
  * The linear mapping and the start of memory are both 2M aligned (per
@@ -86,10 +87,24 @@
  * (64k granule), or a multiple that can be mapped using contiguous bits
  * in the page tables: 32 * PMD_SIZE (16k granule)
  */
-#ifdef CONFIG_ARM64_64K_PAGES
-#define ARM64_MEMSTART_ALIGN	SZ_512M
+#if defined(CONFIG_ARM64_4K_PAGES)
+#define ARM64_MEMSTART_SHIFT		PUD_SHIFT
+#elif defined(CONFIG_ARM64_16K_PAGES)
+#define ARM64_MEMSTART_SHIFT		(PMD_SHIFT + 5)
 #else
-#define ARM64_MEMSTART_ALIGN	SZ_1G
+#define ARM64_MEMSTART_SHIFT		PMD_SHIFT
+#endif
+
+/*
+ * sparsemem vmemmap imposes an additional requirement on the alignment of
+ * memstart_addr, due to the fact that the base of the vmemmap region
+ * has a direct correspondence, and needs to appear sufficiently aligned
+ * in the virtual address space.
+ */
+#if defined(CONFIG_SPARSEMEM_VMEMMAP) && ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS
+#define ARM64_MEMSTART_ALIGN	(1UL << SECTION_SIZE_BITS)
+#else
+#define ARM64_MEMSTART_ALIGN	(1UL << ARM64_MEMSTART_SHIFT)
 #endif
 
 #endif	/* __ASM_KERNEL_PGTABLE_H */
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 0e391db..2cdb6b5 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -84,47 +84,43 @@
 #define HCR_INT_OVERRIDE   (HCR_FMO | HCR_IMO)
 #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
 
-/* Hyp System Control Register (SCTLR_EL2) bits */
-#define SCTLR_EL2_EE	(1 << 25)
-#define SCTLR_EL2_WXN	(1 << 19)
-#define SCTLR_EL2_I	(1 << 12)
-#define SCTLR_EL2_SA	(1 << 3)
-#define SCTLR_EL2_C	(1 << 2)
-#define SCTLR_EL2_A	(1 << 1)
-#define SCTLR_EL2_M	1
-#define SCTLR_EL2_FLAGS	(SCTLR_EL2_M | SCTLR_EL2_A | SCTLR_EL2_C |	\
-			 SCTLR_EL2_SA | SCTLR_EL2_I)
-
 /* TCR_EL2 Registers bits */
-#define TCR_EL2_RES1	((1 << 31) | (1 << 23))
-#define TCR_EL2_TBI	(1 << 20)
-#define TCR_EL2_PS	(7 << 16)
-#define TCR_EL2_PS_40B	(2 << 16)
-#define TCR_EL2_TG0	(1 << 14)
-#define TCR_EL2_SH0	(3 << 12)
-#define TCR_EL2_ORGN0	(3 << 10)
-#define TCR_EL2_IRGN0	(3 << 8)
-#define TCR_EL2_T0SZ	0x3f
-#define TCR_EL2_MASK	(TCR_EL2_TG0 | TCR_EL2_SH0 | \
-			 TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ)
+#define TCR_EL2_RES1		((1 << 31) | (1 << 23))
+#define TCR_EL2_TBI		(1 << 20)
+#define TCR_EL2_PS_SHIFT	16
+#define TCR_EL2_PS_MASK		(7 << TCR_EL2_PS_SHIFT)
+#define TCR_EL2_PS_40B		(2 << TCR_EL2_PS_SHIFT)
+#define TCR_EL2_TG0_MASK	TCR_TG0_MASK
+#define TCR_EL2_SH0_MASK	TCR_SH0_MASK
+#define TCR_EL2_ORGN0_MASK	TCR_ORGN0_MASK
+#define TCR_EL2_IRGN0_MASK	TCR_IRGN0_MASK
+#define TCR_EL2_T0SZ_MASK	0x3f
+#define TCR_EL2_MASK	(TCR_EL2_TG0_MASK | TCR_EL2_SH0_MASK | \
+			 TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
 
 /* VTCR_EL2 Registers bits */
 #define VTCR_EL2_RES1		(1 << 31)
-#define VTCR_EL2_PS_MASK	(7 << 16)
-#define VTCR_EL2_TG0_MASK	(1 << 14)
-#define VTCR_EL2_TG0_4K		(0 << 14)
-#define VTCR_EL2_TG0_64K	(1 << 14)
-#define VTCR_EL2_SH0_MASK	(3 << 12)
-#define VTCR_EL2_SH0_INNER	(3 << 12)
-#define VTCR_EL2_ORGN0_MASK	(3 << 10)
-#define VTCR_EL2_ORGN0_WBWA	(1 << 10)
-#define VTCR_EL2_IRGN0_MASK	(3 << 8)
-#define VTCR_EL2_IRGN0_WBWA	(1 << 8)
-#define VTCR_EL2_SL0_MASK	(3 << 6)
-#define VTCR_EL2_SL0_LVL1	(1 << 6)
+#define VTCR_EL2_HD		(1 << 22)
+#define VTCR_EL2_HA		(1 << 21)
+#define VTCR_EL2_PS_MASK	TCR_EL2_PS_MASK
+#define VTCR_EL2_TG0_MASK	TCR_TG0_MASK
+#define VTCR_EL2_TG0_4K		TCR_TG0_4K
+#define VTCR_EL2_TG0_16K	TCR_TG0_16K
+#define VTCR_EL2_TG0_64K	TCR_TG0_64K
+#define VTCR_EL2_SH0_MASK	TCR_SH0_MASK
+#define VTCR_EL2_SH0_INNER	TCR_SH0_INNER
+#define VTCR_EL2_ORGN0_MASK	TCR_ORGN0_MASK
+#define VTCR_EL2_ORGN0_WBWA	TCR_ORGN0_WBWA
+#define VTCR_EL2_IRGN0_MASK	TCR_IRGN0_MASK
+#define VTCR_EL2_IRGN0_WBWA	TCR_IRGN0_WBWA
+#define VTCR_EL2_SL0_SHIFT	6
+#define VTCR_EL2_SL0_MASK	(3 << VTCR_EL2_SL0_SHIFT)
+#define VTCR_EL2_SL0_LVL1	(1 << VTCR_EL2_SL0_SHIFT)
 #define VTCR_EL2_T0SZ_MASK	0x3f
 #define VTCR_EL2_T0SZ_40B	24
-#define VTCR_EL2_VS		19
+#define VTCR_EL2_VS_SHIFT	19
+#define VTCR_EL2_VS_8BIT	(0 << VTCR_EL2_VS_SHIFT)
+#define VTCR_EL2_VS_16BIT	(1 << VTCR_EL2_VS_SHIFT)
 
 /*
  * We configure the Stage-2 page tables to always restrict the IPA space to be
@@ -135,37 +131,45 @@
  * (see hyp-init.S).
  *
  * Note that when using 4K pages, we concatenate two first level page tables
- * together.
+ * together. With 16K pages, we concatenate 16 first level page tables.
  *
  * The magic numbers used for VTTBR_X in this patch can be found in Tables
  * D4-23 and D4-25 in ARM DDI 0487A.b.
  */
+
+#define VTCR_EL2_T0SZ_IPA	VTCR_EL2_T0SZ_40B
+#define VTCR_EL2_COMMON_BITS	(VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \
+				 VTCR_EL2_IRGN0_WBWA | VTCR_EL2_RES1)
+
 #ifdef CONFIG_ARM64_64K_PAGES
 /*
  * Stage2 translation configuration:
- * 40bits input  (T0SZ = 24)
  * 64kB pages (TG0 = 1)
  * 2 level page tables (SL = 1)
  */
-#define VTCR_EL2_FLAGS		(VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \
-				 VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
-				 VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \
-				 VTCR_EL2_RES1)
-#define VTTBR_X		(38 - VTCR_EL2_T0SZ_40B)
-#else
+#define VTCR_EL2_TGRAN_FLAGS		(VTCR_EL2_TG0_64K | VTCR_EL2_SL0_LVL1)
+#define VTTBR_X_TGRAN_MAGIC		38
+#elif defined(CONFIG_ARM64_16K_PAGES)
 /*
  * Stage2 translation configuration:
- * 40bits input  (T0SZ = 24)
+ * 16kB pages (TG0 = 2)
+ * 2 level page tables (SL = 1)
+ */
+#define VTCR_EL2_TGRAN_FLAGS		(VTCR_EL2_TG0_16K | VTCR_EL2_SL0_LVL1)
+#define VTTBR_X_TGRAN_MAGIC		42
+#else	/* 4K */
+/*
+ * Stage2 translation configuration:
  * 4kB pages (TG0 = 0)
  * 3 level page tables (SL = 1)
  */
-#define VTCR_EL2_FLAGS		(VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \
-				 VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
-				 VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \
-				 VTCR_EL2_RES1)
-#define VTTBR_X		(37 - VTCR_EL2_T0SZ_40B)
+#define VTCR_EL2_TGRAN_FLAGS		(VTCR_EL2_TG0_4K | VTCR_EL2_SL0_LVL1)
+#define VTTBR_X_TGRAN_MAGIC		37
 #endif
 
+#define VTCR_EL2_FLAGS			(VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS)
+#define VTTBR_X				(VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA)
+
 #define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
 #define VTTBR_BADDR_MASK  (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
 #define VTTBR_VMID_SHIFT  (UL(48))
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index eb7490d..7561f63 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -22,6 +22,8 @@
 
 #define ARM_EXCEPTION_IRQ	  0
 #define ARM_EXCEPTION_TRAP	  1
+/* The hyp-stub will return this for any kvm_call_hyp() call */
+#define ARM_EXCEPTION_HYP_GONE	  2
 
 #define KVM_ARM64_DEBUG_DIRTY_SHIFT	0
 #define KVM_ARM64_DEBUG_DIRTY		(1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
@@ -40,6 +42,7 @@
 
 extern char __kvm_hyp_init[];
 extern char __kvm_hyp_init_end[];
+extern char __kvm_hyp_reset[];
 
 extern char __kvm_hyp_vector[];
 
@@ -54,7 +57,7 @@
 
 extern u32 __kvm_get_mdcr_el2(void);
 
-extern void __init_stage2_translation(void);
+extern u32 __init_stage2_translation(void);
 
 #endif
 
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 227ed47..e63d23b 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -27,7 +27,6 @@
 #include <asm/kvm.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_mmio.h>
-#include <asm/kvm_perf_event.h>
 
 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
 
@@ -47,6 +46,8 @@
 int __attribute_const__ kvm_target_cpu(void);
 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
 int kvm_arch_dev_ioctl_check_extension(long ext);
+unsigned long kvm_hyp_reset_entry(void);
+void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start);
 
 struct kvm_arch {
 	/* The VMID generation used for the virt. memory system */
@@ -294,6 +295,7 @@
 struct kvm_vcpu_stat {
 	u32 halt_successful_poll;
 	u32 halt_attempted_poll;
+	u32 halt_poll_invalid;
 	u32 halt_wakeup;
 	u32 hvc_exit_stat;
 	u64 wfe_exit_stat;
@@ -353,11 +355,22 @@
 		       hyp_stack_ptr, vector_ptr);
 }
 
-static inline void kvm_arch_hardware_disable(void) {}
+static inline void __cpu_reset_hyp_mode(phys_addr_t boot_pgd_ptr,
+					phys_addr_t phys_idmap_start)
+{
+	/*
+	 * Call reset code, and switch back to stub hyp vectors.
+	 * Uses __kvm_call_hyp() to avoid kaslr's kvm_ksym_ref() translation.
+	 */
+	__kvm_call_hyp((void *)kvm_hyp_reset_entry(),
+		       boot_pgd_ptr, phys_idmap_start);
+}
+
 static inline void kvm_arch_hardware_unsetup(void) {}
 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
 static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
 
 void kvm_arm_init_debug(void);
 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
@@ -370,11 +383,12 @@
 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
 			       struct kvm_device_attr *attr);
 
-/* #define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__) */
-
 static inline void __cpu_init_stage2(void)
 {
-	kvm_call_hyp(__init_stage2_translation);
+	u32 parange = kvm_call_hyp(__init_stage2_translation);
+
+	WARN_ONCE(parange < 40,
+		  "PARange is %d bits, unsupported configuration!", parange);
 }
 
 #endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index a46b019..44eaff7 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -21,7 +21,6 @@
 #include <linux/compiler.h>
 #include <linux/kvm_host.h>
 #include <asm/kvm_mmu.h>
-#include <asm/kvm_perf_event.h>
 #include <asm/sysreg.h>
 
 #define __hyp_text __section(.hyp.text) notrace
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 22732a5..f05ac27 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -45,18 +45,6 @@
  */
 #define TRAMPOLINE_VA		(HYP_PAGE_OFFSET_MASK & PAGE_MASK)
 
-/*
- * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation
- * levels in addition to the PGD and potentially the PUD which are
- * pre-allocated (we pre-allocate the fake PGD and the PUD when the Stage-2
- * tables use one level of tables less than the kernel.
- */
-#ifdef CONFIG_ARM64_64K_PAGES
-#define KVM_MMU_CACHE_MIN_PAGES	1
-#else
-#define KVM_MMU_CACHE_MIN_PAGES	2
-#endif
-
 #ifdef __ASSEMBLY__
 
 #include <asm/alternative.h>
@@ -91,6 +79,8 @@
 #define KVM_PHYS_SIZE	(1UL << KVM_PHYS_SHIFT)
 #define KVM_PHYS_MASK	(KVM_PHYS_SIZE - 1UL)
 
+#include <asm/stage2_pgtable.h>
+
 int create_hyp_mappings(void *from, void *to);
 int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
 void free_boot_hyp_pgd(void);
@@ -109,6 +99,7 @@
 phys_addr_t kvm_mmu_get_httbr(void);
 phys_addr_t kvm_mmu_get_boot_httbr(void);
 phys_addr_t kvm_get_idmap_vector(void);
+phys_addr_t kvm_get_idmap_start(void);
 int kvm_mmu_init(void);
 void kvm_clear_hyp_idmap(void);
 
@@ -121,19 +112,32 @@
 static inline void kvm_clean_pte(pte_t *pte) {}
 static inline void kvm_clean_pte_entry(pte_t *pte) {}
 
-static inline void kvm_set_s2pte_writable(pte_t *pte)
+static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
 {
-	pte_val(*pte) |= PTE_S2_RDWR;
+	pte_val(pte) |= PTE_S2_RDWR;
+	return pte;
 }
 
-static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
+static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
 {
-	pmd_val(*pmd) |= PMD_S2_RDWR;
+	pmd_val(pmd) |= PMD_S2_RDWR;
+	return pmd;
 }
 
 static inline void kvm_set_s2pte_readonly(pte_t *pte)
 {
-	pte_val(*pte) = (pte_val(*pte) & ~PTE_S2_RDWR) | PTE_S2_RDONLY;
+	pteval_t pteval;
+	unsigned long tmp;
+
+	asm volatile("//	kvm_set_s2pte_readonly\n"
+	"	prfm	pstl1strm, %2\n"
+	"1:	ldxr	%0, %2\n"
+	"	and	%0, %0, %3		// clear PTE_S2_RDWR\n"
+	"	orr	%0, %0, %4		// set PTE_S2_RDONLY\n"
+	"	stxr	%w1, %0, %2\n"
+	"	cbnz	%w1, 1b\n"
+	: "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*pte))
+	: "L" (~PTE_S2_RDWR), "L" (PTE_S2_RDONLY));
 }
 
 static inline bool kvm_s2pte_readonly(pte_t *pte)
@@ -143,69 +147,12 @@
 
 static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
 {
-	pmd_val(*pmd) = (pmd_val(*pmd) & ~PMD_S2_RDWR) | PMD_S2_RDONLY;
+	kvm_set_s2pte_readonly((pte_t *)pmd);
 }
 
 static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
 {
-	return (pmd_val(*pmd) & PMD_S2_RDWR) == PMD_S2_RDONLY;
-}
-
-
-#define kvm_pgd_addr_end(addr, end)	pgd_addr_end(addr, end)
-#define kvm_pud_addr_end(addr, end)	pud_addr_end(addr, end)
-#define kvm_pmd_addr_end(addr, end)	pmd_addr_end(addr, end)
-
-/*
- * In the case where PGDIR_SHIFT is larger than KVM_PHYS_SHIFT, we can address
- * the entire IPA input range with a single pgd entry, and we would only need
- * one pgd entry.  Note that in this case, the pgd is actually not used by
- * the MMU for Stage-2 translations, but is merely a fake pgd used as a data
- * structure for the kernel pgtable macros to work.
- */
-#if PGDIR_SHIFT > KVM_PHYS_SHIFT
-#define PTRS_PER_S2_PGD_SHIFT	0
-#else
-#define PTRS_PER_S2_PGD_SHIFT	(KVM_PHYS_SHIFT - PGDIR_SHIFT)
-#endif
-#define PTRS_PER_S2_PGD		(1 << PTRS_PER_S2_PGD_SHIFT)
-
-#define kvm_pgd_index(addr)	(((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
-
-/*
- * If we are concatenating first level stage-2 page tables, we would have less
- * than or equal to 16 pointers in the fake PGD, because that's what the
- * architecture allows.  In this case, (4 - CONFIG_PGTABLE_LEVELS)
- * represents the first level for the host, and we add 1 to go to the next
- * level (which uses contatenation) for the stage-2 tables.
- */
-#if PTRS_PER_S2_PGD <= 16
-#define KVM_PREALLOC_LEVEL	(4 - CONFIG_PGTABLE_LEVELS + 1)
-#else
-#define KVM_PREALLOC_LEVEL	(0)
-#endif
-
-static inline void *kvm_get_hwpgd(struct kvm *kvm)
-{
-	pgd_t *pgd = kvm->arch.pgd;
-	pud_t *pud;
-
-	if (KVM_PREALLOC_LEVEL == 0)
-		return pgd;
-
-	pud = pud_offset(pgd, 0);
-	if (KVM_PREALLOC_LEVEL == 1)
-		return pud;
-
-	BUG_ON(KVM_PREALLOC_LEVEL != 2);
-	return pmd_offset(pud, 0);
-}
-
-static inline unsigned int kvm_get_hwpgd_size(void)
-{
-	if (KVM_PREALLOC_LEVEL > 0)
-		return PTRS_PER_S2_PGD * PAGE_SIZE;
-	return PTRS_PER_S2_PGD * sizeof(pgd_t);
+	return kvm_s2pte_readonly((pte_t *)pmd);
 }
 
 static inline bool kvm_page_empty(void *ptr)
@@ -214,23 +161,20 @@
 	return page_count(ptr_page) == 1;
 }
 
-#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
+#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
 
 #ifdef __PAGETABLE_PMD_FOLDED
-#define kvm_pmd_table_empty(kvm, pmdp) (0)
+#define hyp_pmd_table_empty(pmdp) (0)
 #else
-#define kvm_pmd_table_empty(kvm, pmdp) \
-	(kvm_page_empty(pmdp) && (!(kvm) || KVM_PREALLOC_LEVEL < 2))
+#define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
 #endif
 
 #ifdef __PAGETABLE_PUD_FOLDED
-#define kvm_pud_table_empty(kvm, pudp) (0)
+#define hyp_pud_table_empty(pudp) (0)
 #else
-#define kvm_pud_table_empty(kvm, pudp) \
-	(kvm_page_empty(pudp) && (!(kvm) || KVM_PREALLOC_LEVEL < 1))
+#define hyp_pud_table_empty(pudp) kvm_page_empty(pudp)
 #endif
 
-
 struct kvm;
 
 #define kvm_flush_dcache_to_poc(a,l)	__flush_dcache_area((a), (l))
diff --git a/arch/arm64/include/asm/kvm_perf_event.h b/arch/arm64/include/asm/kvm_perf_event.h
deleted file mode 100644
index c18fdeb..0000000
--- a/arch/arm64/include/asm/kvm_perf_event.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __ASM_KVM_PERF_EVENT_H
-#define __ASM_KVM_PERF_EVENT_H
-
-#define	ARMV8_PMU_MAX_COUNTERS	32
-#define	ARMV8_PMU_COUNTER_MASK	(ARMV8_PMU_MAX_COUNTERS - 1)
-
-/*
- * Per-CPU PMCR: config reg
- */
-#define ARMV8_PMU_PMCR_E	(1 << 0) /* Enable all counters */
-#define ARMV8_PMU_PMCR_P	(1 << 1) /* Reset all counters */
-#define ARMV8_PMU_PMCR_C	(1 << 2) /* Cycle counter reset */
-#define ARMV8_PMU_PMCR_D	(1 << 3) /* CCNT counts every 64th cpu cycle */
-#define ARMV8_PMU_PMCR_X	(1 << 4) /* Export to ETM */
-#define ARMV8_PMU_PMCR_DP	(1 << 5) /* Disable CCNT if non-invasive debug*/
-/* Determines which bit of PMCCNTR_EL0 generates an overflow */
-#define ARMV8_PMU_PMCR_LC	(1 << 6)
-#define	ARMV8_PMU_PMCR_N_SHIFT	11	 /* Number of counters supported */
-#define	ARMV8_PMU_PMCR_N_MASK	0x1f
-#define	ARMV8_PMU_PMCR_MASK	0x7f	 /* Mask for writable bits */
-
-/*
- * PMOVSR: counters overflow flag status reg
- */
-#define	ARMV8_PMU_OVSR_MASK		0xffffffff	/* Mask for writable bits */
-#define	ARMV8_PMU_OVERFLOWED_MASK	ARMV8_PMU_OVSR_MASK
-
-/*
- * PMXEVTYPER: Event selection reg
- */
-#define	ARMV8_PMU_EVTYPE_MASK	0xc80003ff	/* Mask for writable bits */
-#define	ARMV8_PMU_EVTYPE_EVENT	0x3ff		/* Mask for EVENT bits */
-
-#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR	0	/* Software increment event */
-
-/*
- * Event filters for PMUv3
- */
-#define	ARMV8_PMU_EXCLUDE_EL1	(1 << 31)
-#define	ARMV8_PMU_EXCLUDE_EL0	(1 << 30)
-#define	ARMV8_PMU_INCLUDE_EL2	(1 << 27)
-
-/*
- * PMUSERENR: user enable reg
- */
-#define ARMV8_PMU_USERENR_MASK	0xf		/* Mask for writable bits */
-#define ARMV8_PMU_USERENR_EN	(1 << 0) /* PMU regs can be accessed at EL0 */
-#define ARMV8_PMU_USERENR_SW	(1 << 1) /* PMSWINC can be written at EL0 */
-#define ARMV8_PMU_USERENR_CR	(1 << 2) /* Cycle counter can be read at EL0 */
-#define ARMV8_PMU_USERENR_ER	(1 << 3) /* Event counter can be read at EL0 */
-
-#endif
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 12f8a00..72a3025 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -40,6 +40,21 @@
 #define PCI_IO_SIZE		SZ_16M
 
 /*
+ * Log2 of the upper bound of the size of a struct page. Used for sizing
+ * the vmemmap region only, does not affect actual memory footprint.
+ * We don't use sizeof(struct page) directly since taking its size here
+ * requires its definition to be available at this point in the inclusion
+ * chain, and it may not be a power of 2 in the first place.
+ */
+#define STRUCT_PAGE_MAX_SHIFT	6
+
+/*
+ * VMEMMAP_SIZE - allows the whole linear region to be covered by
+ *                a struct page array
+ */
+#define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT))
+
+/*
  * PAGE_OFFSET - the virtual address of the start of the kernel image (top
  *		 (VA_BITS - 1))
  * VA_BITS - the maximum number of bits for virtual addresses.
@@ -54,7 +69,8 @@
 #define MODULES_END		(MODULES_VADDR + MODULES_VSIZE)
 #define MODULES_VADDR		(VA_START + KASAN_SHADOW_SIZE)
 #define MODULES_VSIZE		(SZ_128M)
-#define PCI_IO_END		(PAGE_OFFSET - SZ_2M)
+#define VMEMMAP_START		(PAGE_OFFSET - VMEMMAP_SIZE)
+#define PCI_IO_END		(VMEMMAP_START - SZ_2M)
 #define PCI_IO_START		(PCI_IO_END - PCI_IO_SIZE)
 #define FIXADDR_TOP		(PCI_IO_START - SZ_2M)
 #define TASK_SIZE_64		(UL(1) << VA_BITS)
@@ -71,6 +87,9 @@
 
 #define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 4))
 
+#define KERNEL_START      _text
+#define KERNEL_END        _end
+
 /*
  * The size of the KASAN shadow region. This should be 1/8th of the
  * size of the entire kernel virtual address space.
@@ -192,9 +211,19 @@
  */
 #define ARCH_PFN_OFFSET		((unsigned long)PHYS_PFN_OFFSET)
 
+#ifndef CONFIG_SPARSEMEM_VMEMMAP
 #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define	virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#else
+#define __virt_to_pgoff(kaddr)	(((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
+#define __page_to_voff(kaddr)	(((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
 
+#define page_to_virt(page)	((void *)((__page_to_voff(page)) | PAGE_OFFSET))
+#define virt_to_page(vaddr)	((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
+
+#define virt_addr_valid(kaddr)	pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \
+					   + PHYS_OFFSET) >> PAGE_SHIFT)
+#endif
 #endif
 
 #include <asm-generic/memory_model.h>
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 990124a..97b1d8f 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -29,6 +29,7 @@
 #define ASID(mm)	((mm)->context.id.counter & 0xffff)
 
 extern void paging_init(void);
+extern void bootmem_init(void);
 extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
 extern void init_mem_pgprot(void);
 extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
diff --git a/arch/arm64/include/asm/mmzone.h b/arch/arm64/include/asm/mmzone.h
new file mode 100644
index 0000000..a0de9e6
--- /dev/null
+++ b/arch/arm64/include/asm/mmzone.h
@@ -0,0 +1,12 @@
+#ifndef __ASM_MMZONE_H
+#define __ASM_MMZONE_H
+
+#ifdef CONFIG_NUMA
+
+#include <asm/numa.h>
+
+extern struct pglist_data *node_data[];
+#define NODE_DATA(nid)		(node_data[(nid)])
+
+#endif /* CONFIG_NUMA */
+#endif /* __ASM_MMZONE_H */
diff --git a/arch/arm64/include/asm/numa.h b/arch/arm64/include/asm/numa.h
new file mode 100644
index 0000000..e9b4f29
--- /dev/null
+++ b/arch/arm64/include/asm/numa.h
@@ -0,0 +1,45 @@
+#ifndef __ASM_NUMA_H
+#define __ASM_NUMA_H
+
+#include <asm/topology.h>
+
+#ifdef CONFIG_NUMA
+
+/* currently, arm64 implements flat NUMA topology */
+#define parent_node(node)	(node)
+
+int __node_distance(int from, int to);
+#define node_distance(a, b) __node_distance(a, b)
+
+extern nodemask_t numa_nodes_parsed __initdata;
+
+/* Mappings between node number and cpus on that node. */
+extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+void numa_clear_node(unsigned int cpu);
+
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+const struct cpumask *cpumask_of_node(int node);
+#else
+/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
+static inline const struct cpumask *cpumask_of_node(int node)
+{
+	return node_to_cpumask_map[node];
+}
+#endif
+
+void __init arm64_numa_init(void);
+int __init numa_add_memblk(int nodeid, u64 start, u64 end);
+void __init numa_set_distance(int from, int to, int distance);
+void __init numa_free_distance(void);
+void __init early_map_cpu_to_node(unsigned int cpu, int nid);
+void numa_store_cpu_info(unsigned int cpu);
+
+#else	/* CONFIG_NUMA */
+
+static inline void numa_store_cpu_info(unsigned int cpu) { }
+static inline void arm64_numa_init(void) { }
+static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { }
+
+#endif	/* CONFIG_NUMA */
+
+#endif	/* __ASM_NUMA_H */
diff --git a/arch/arm64/include/asm/opcodes.h b/arch/arm64/include/asm/opcodes.h
index 4e603ea..123f45d 100644
--- a/arch/arm64/include/asm/opcodes.h
+++ b/arch/arm64/include/asm/opcodes.h
@@ -1 +1,5 @@
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define CONFIG_CPU_ENDIAN_BE8 CONFIG_CPU_BIG_ENDIAN
+#endif
+
 #include <../../arm/include/asm/opcodes.h>
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index ae615b9..17b45f7 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -19,6 +19,8 @@
 #ifndef __ASM_PAGE_H
 #define __ASM_PAGE_H
 
+#include <linux/const.h>
+
 /* PAGE_SHIFT determines the page size */
 /* CONT_SHIFT determines the number of pages which can be tracked together  */
 #ifdef CONFIG_ARM64_64K_PAGES
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 7bd3cdb..2065f46 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -17,6 +17,53 @@
 #ifndef __ASM_PERF_EVENT_H
 #define __ASM_PERF_EVENT_H
 
+#define	ARMV8_PMU_MAX_COUNTERS	32
+#define	ARMV8_PMU_COUNTER_MASK	(ARMV8_PMU_MAX_COUNTERS - 1)
+
+/*
+ * Per-CPU PMCR: config reg
+ */
+#define ARMV8_PMU_PMCR_E	(1 << 0) /* Enable all counters */
+#define ARMV8_PMU_PMCR_P	(1 << 1) /* Reset all counters */
+#define ARMV8_PMU_PMCR_C	(1 << 2) /* Cycle counter reset */
+#define ARMV8_PMU_PMCR_D	(1 << 3) /* CCNT counts every 64th cpu cycle */
+#define ARMV8_PMU_PMCR_X	(1 << 4) /* Export to ETM */
+#define ARMV8_PMU_PMCR_DP	(1 << 5) /* Disable CCNT if non-invasive debug*/
+#define ARMV8_PMU_PMCR_LC	(1 << 6) /* Overflow on 64 bit cycle counter */
+#define	ARMV8_PMU_PMCR_N_SHIFT	11	 /* Number of counters supported */
+#define	ARMV8_PMU_PMCR_N_MASK	0x1f
+#define	ARMV8_PMU_PMCR_MASK	0x7f	 /* Mask for writable bits */
+
+/*
+ * PMOVSR: counters overflow flag status reg
+ */
+#define	ARMV8_PMU_OVSR_MASK		0xffffffff	/* Mask for writable bits */
+#define	ARMV8_PMU_OVERFLOWED_MASK	ARMV8_PMU_OVSR_MASK
+
+/*
+ * PMXEVTYPER: Event selection reg
+ */
+#define	ARMV8_PMU_EVTYPE_MASK	0xc800ffff	/* Mask for writable bits */
+#define	ARMV8_PMU_EVTYPE_EVENT	0xffff		/* Mask for EVENT bits */
+
+#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR	0	/* Software increment event */
+
+/*
+ * Event filters for PMUv3
+ */
+#define	ARMV8_PMU_EXCLUDE_EL1	(1 << 31)
+#define	ARMV8_PMU_EXCLUDE_EL0	(1 << 30)
+#define	ARMV8_PMU_INCLUDE_EL2	(1 << 27)
+
+/*
+ * PMUSERENR: user enable reg
+ */
+#define ARMV8_PMU_USERENR_MASK	0xf		/* Mask for writable bits */
+#define ARMV8_PMU_USERENR_EN	(1 << 0) /* PMU regs can be accessed at EL0 */
+#define ARMV8_PMU_USERENR_SW	(1 << 1) /* PMSWINC can be written at EL0 */
+#define ARMV8_PMU_USERENR_CR	(1 << 2) /* Cycle counter can be read at EL0 */
+#define ARMV8_PMU_USERENR_ER	(1 << 3) /* Event counter can be read at EL0 */
+
 #ifdef CONFIG_PERF_EVENTS
 struct pt_regs;
 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 5c25b83..2813748 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -133,7 +133,6 @@
  * Section
  */
 #define PMD_SECT_VALID		(_AT(pmdval_t, 1) << 0)
-#define PMD_SECT_PROT_NONE	(_AT(pmdval_t, 1) << 58)
 #define PMD_SECT_USER		(_AT(pmdval_t, 1) << 6)		/* AP[1] */
 #define PMD_SECT_RDONLY		(_AT(pmdval_t, 1) << 7)		/* AP[2] */
 #define PMD_SECT_S		(_AT(pmdval_t, 3) << 8)
@@ -208,23 +207,69 @@
 #define TCR_T1SZ(x)		((UL(64) - (x)) << TCR_T1SZ_OFFSET)
 #define TCR_TxSZ(x)		(TCR_T0SZ(x) | TCR_T1SZ(x))
 #define TCR_TxSZ_WIDTH		6
-#define TCR_IRGN_NC		((UL(0) << 8) | (UL(0) << 24))
-#define TCR_IRGN_WBWA		((UL(1) << 8) | (UL(1) << 24))
-#define TCR_IRGN_WT		((UL(2) << 8) | (UL(2) << 24))
-#define TCR_IRGN_WBnWA		((UL(3) << 8) | (UL(3) << 24))
-#define TCR_IRGN_MASK		((UL(3) << 8) | (UL(3) << 24))
-#define TCR_ORGN_NC		((UL(0) << 10) | (UL(0) << 26))
-#define TCR_ORGN_WBWA		((UL(1) << 10) | (UL(1) << 26))
-#define TCR_ORGN_WT		((UL(2) << 10) | (UL(2) << 26))
-#define TCR_ORGN_WBnWA		((UL(3) << 10) | (UL(3) << 26))
-#define TCR_ORGN_MASK		((UL(3) << 10) | (UL(3) << 26))
-#define TCR_SHARED		((UL(3) << 12) | (UL(3) << 28))
-#define TCR_TG0_4K		(UL(0) << 14)
-#define TCR_TG0_64K		(UL(1) << 14)
-#define TCR_TG0_16K		(UL(2) << 14)
-#define TCR_TG1_16K		(UL(1) << 30)
-#define TCR_TG1_4K		(UL(2) << 30)
-#define TCR_TG1_64K		(UL(3) << 30)
+
+#define TCR_IRGN0_SHIFT		8
+#define TCR_IRGN0_MASK		(UL(3) << TCR_IRGN0_SHIFT)
+#define TCR_IRGN0_NC		(UL(0) << TCR_IRGN0_SHIFT)
+#define TCR_IRGN0_WBWA		(UL(1) << TCR_IRGN0_SHIFT)
+#define TCR_IRGN0_WT		(UL(2) << TCR_IRGN0_SHIFT)
+#define TCR_IRGN0_WBnWA		(UL(3) << TCR_IRGN0_SHIFT)
+
+#define TCR_IRGN1_SHIFT		24
+#define TCR_IRGN1_MASK		(UL(3) << TCR_IRGN1_SHIFT)
+#define TCR_IRGN1_NC		(UL(0) << TCR_IRGN1_SHIFT)
+#define TCR_IRGN1_WBWA		(UL(1) << TCR_IRGN1_SHIFT)
+#define TCR_IRGN1_WT		(UL(2) << TCR_IRGN1_SHIFT)
+#define TCR_IRGN1_WBnWA		(UL(3) << TCR_IRGN1_SHIFT)
+
+#define TCR_IRGN_NC		(TCR_IRGN0_NC | TCR_IRGN1_NC)
+#define TCR_IRGN_WBWA		(TCR_IRGN0_WBWA | TCR_IRGN1_WBWA)
+#define TCR_IRGN_WT		(TCR_IRGN0_WT | TCR_IRGN1_WT)
+#define TCR_IRGN_WBnWA		(TCR_IRGN0_WBnWA | TCR_IRGN1_WBnWA)
+#define TCR_IRGN_MASK		(TCR_IRGN0_MASK | TCR_IRGN1_MASK)
+
+
+#define TCR_ORGN0_SHIFT		10
+#define TCR_ORGN0_MASK		(UL(3) << TCR_ORGN0_SHIFT)
+#define TCR_ORGN0_NC		(UL(0) << TCR_ORGN0_SHIFT)
+#define TCR_ORGN0_WBWA		(UL(1) << TCR_ORGN0_SHIFT)
+#define TCR_ORGN0_WT		(UL(2) << TCR_ORGN0_SHIFT)
+#define TCR_ORGN0_WBnWA		(UL(3) << TCR_ORGN0_SHIFT)
+
+#define TCR_ORGN1_SHIFT		26
+#define TCR_ORGN1_MASK		(UL(3) << TCR_ORGN1_SHIFT)
+#define TCR_ORGN1_NC		(UL(0) << TCR_ORGN1_SHIFT)
+#define TCR_ORGN1_WBWA		(UL(1) << TCR_ORGN1_SHIFT)
+#define TCR_ORGN1_WT		(UL(2) << TCR_ORGN1_SHIFT)
+#define TCR_ORGN1_WBnWA		(UL(3) << TCR_ORGN1_SHIFT)
+
+#define TCR_ORGN_NC		(TCR_ORGN0_NC | TCR_ORGN1_NC)
+#define TCR_ORGN_WBWA		(TCR_ORGN0_WBWA | TCR_ORGN1_WBWA)
+#define TCR_ORGN_WT		(TCR_ORGN0_WT | TCR_ORGN1_WT)
+#define TCR_ORGN_WBnWA		(TCR_ORGN0_WBnWA | TCR_ORGN1_WBnWA)
+#define TCR_ORGN_MASK		(TCR_ORGN0_MASK | TCR_ORGN1_MASK)
+
+#define TCR_SH0_SHIFT		12
+#define TCR_SH0_MASK		(UL(3) << TCR_SH0_SHIFT)
+#define TCR_SH0_INNER		(UL(3) << TCR_SH0_SHIFT)
+
+#define TCR_SH1_SHIFT		28
+#define TCR_SH1_MASK		(UL(3) << TCR_SH1_SHIFT)
+#define TCR_SH1_INNER		(UL(3) << TCR_SH1_SHIFT)
+#define TCR_SHARED		(TCR_SH0_INNER | TCR_SH1_INNER)
+
+#define TCR_TG0_SHIFT		14
+#define TCR_TG0_MASK		(UL(3) << TCR_TG0_SHIFT)
+#define TCR_TG0_4K		(UL(0) << TCR_TG0_SHIFT)
+#define TCR_TG0_64K		(UL(1) << TCR_TG0_SHIFT)
+#define TCR_TG0_16K		(UL(2) << TCR_TG0_SHIFT)
+
+#define TCR_TG1_SHIFT		30
+#define TCR_TG1_MASK		(UL(3) << TCR_TG1_SHIFT)
+#define TCR_TG1_16K		(UL(1) << TCR_TG1_SHIFT)
+#define TCR_TG1_4K		(UL(2) << TCR_TG1_SHIFT)
+#define TCR_TG1_64K		(UL(3) << TCR_TG1_SHIFT)
+
 #define TCR_ASID16		(UL(1) << 36)
 #define TCR_TBI0		(UL(1) << 37)
 #define TCR_HA			(UL(1) << 39)
diff --git a/arch/arm64/include/asm/pgtable-types.h b/arch/arm64/include/asm/pgtable-types.h
index 2b1bd7e..69b2fd4 100644
--- a/arch/arm64/include/asm/pgtable-types.h
+++ b/arch/arm64/include/asm/pgtable-types.h
@@ -27,10 +27,6 @@
 typedef u64 pudval_t;
 typedef u64 pgdval_t;
 
-#undef STRICT_MM_TYPECHECKS
-
-#ifdef STRICT_MM_TYPECHECKS
-
 /*
  * These are used to make use of C type-checking..
  */
@@ -58,34 +54,6 @@
 #define pgprot_val(x)	((x).pgprot)
 #define __pgprot(x)	((pgprot_t) { (x) } )
 
-#else	/* !STRICT_MM_TYPECHECKS */
-
-typedef pteval_t pte_t;
-#define pte_val(x)	(x)
-#define __pte(x)	(x)
-
-#if CONFIG_PGTABLE_LEVELS > 2
-typedef pmdval_t pmd_t;
-#define pmd_val(x)	(x)
-#define __pmd(x)	(x)
-#endif
-
-#if CONFIG_PGTABLE_LEVELS > 3
-typedef pudval_t pud_t;
-#define pud_val(x)	(x)
-#define __pud(x)	(x)
-#endif
-
-typedef pgdval_t pgd_t;
-#define pgd_val(x)	(x)
-#define __pgd(x)	(x)
-
-typedef pteval_t pgprot_t;
-#define pgprot_val(x)	(x)
-#define __pgprot(x)	(x)
-
-#endif /* STRICT_MM_TYPECHECKS */
-
 #if CONFIG_PGTABLE_LEVELS == 2
 #include <asm-generic/pgtable-nopmd.h>
 #elif CONFIG_PGTABLE_LEVELS == 3
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 989fef1..1910bf47 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -24,22 +24,16 @@
 #include <asm/pgtable-prot.h>
 
 /*
- * VMALLOC and SPARSEMEM_VMEMMAP ranges.
+ * VMALLOC range.
  *
- * VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array
- *	(rounded up to PUD_SIZE).
  * VMALLOC_START: beginning of the kernel vmalloc space
- * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
- *	fixed mappings and modules
+ * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space
+ *	and fixed mappings
  */
-#define VMEMMAP_SIZE		ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
-
 #define VMALLOC_START		(MODULES_END)
 #define VMALLOC_END		(PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
 
-#define VMEMMAP_START		(VMALLOC_END + SZ_64K)
-#define vmemmap			((struct page *)VMEMMAP_START - \
-				 SECTION_ALIGN_DOWN(memstart_addr >> PAGE_SHIFT))
+#define vmemmap			((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
 
 #define FIRST_USER_ADDRESS	0UL
 
@@ -58,7 +52,7 @@
  * for zero-mapped memory areas etc..
  */
 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr)	virt_to_page(empty_zero_page)
+#define ZERO_PAGE(vaddr)	pfn_to_page(PHYS_PFN(__pa(empty_zero_page)))
 
 #define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte_val(pte))
 
@@ -272,6 +266,21 @@
 	return __pgprot(pgprot_val(prot) & ~PTE_TABLE_BIT);
 }
 
+#ifdef CONFIG_NUMA_BALANCING
+/*
+ * See the comment in include/asm-generic/pgtable.h
+ */
+static inline int pte_protnone(pte_t pte)
+{
+	return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
+}
+
+static inline int pmd_protnone(pmd_t pmd)
+{
+	return pte_protnone(pmd_pte(pmd));
+}
+#endif
+
 /*
  * THP definitions.
  */
@@ -280,15 +289,18 @@
 #define pmd_trans_huge(pmd)	(pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
+#define pmd_present(pmd)	pte_present(pmd_pte(pmd))
 #define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
 #define pmd_young(pmd)		pte_young(pmd_pte(pmd))
 #define pmd_wrprotect(pmd)	pte_pmd(pte_wrprotect(pmd_pte(pmd)))
 #define pmd_mkold(pmd)		pte_pmd(pte_mkold(pmd_pte(pmd)))
 #define pmd_mkwrite(pmd)	pte_pmd(pte_mkwrite(pmd_pte(pmd)))
-#define pmd_mkclean(pmd)       pte_pmd(pte_mkclean(pmd_pte(pmd)))
+#define pmd_mkclean(pmd)	pte_pmd(pte_mkclean(pmd_pte(pmd)))
 #define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))
 #define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
-#define pmd_mknotpresent(pmd)	(__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK))
+#define pmd_mknotpresent(pmd)	(__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
+
+#define pmd_thp_or_huge(pmd)	(pmd_huge(pmd) || pmd_trans_huge(pmd))
 
 #define __HAVE_ARCH_PMD_WRITE
 #define pmd_write(pmd)		pte_write(pmd_pte(pmd))
@@ -327,9 +339,8 @@
 				     unsigned long size, pgprot_t vma_prot);
 
 #define pmd_none(pmd)		(!pmd_val(pmd))
-#define pmd_present(pmd)	(pmd_val(pmd))
 
-#define pmd_bad(pmd)		(!(pmd_val(pmd) & 2))
+#define pmd_bad(pmd)		(!(pmd_val(pmd) & PMD_TABLE_BIT))
 
 #define pmd_table(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
 				 PMD_TYPE_TABLE)
@@ -394,7 +405,7 @@
 #define pmd_ERROR(pmd)		__pmd_error(__FILE__, __LINE__, pmd_val(pmd))
 
 #define pud_none(pud)		(!pud_val(pud))
-#define pud_bad(pud)		(!(pud_val(pud) & 2))
+#define pud_bad(pud)		(!(pud_val(pud) & PUD_TABLE_BIT))
 #define pud_present(pud)	(pud_val(pud))
 
 static inline void set_pud(pud_t *pudp, pud_t pud)
@@ -526,18 +537,31 @@
 }
 
 #ifdef CONFIG_ARM64_HW_AFDBM
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+extern int ptep_set_access_flags(struct vm_area_struct *vma,
+				 unsigned long address, pte_t *ptep,
+				 pte_t entry, int dirty);
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
+					unsigned long address, pmd_t *pmdp,
+					pmd_t entry, int dirty)
+{
+	return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
+}
+#endif
+
 /*
  * Atomic pte/pmd modifications.
  */
 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
-					    unsigned long address,
-					    pte_t *ptep)
+static inline int __ptep_test_and_clear_young(pte_t *ptep)
 {
 	pteval_t pteval;
 	unsigned int tmp, res;
 
-	asm volatile("//	ptep_test_and_clear_young\n"
+	asm volatile("//	__ptep_test_and_clear_young\n"
 	"	prfm	pstl1strm, %2\n"
 	"1:	ldxr	%0, %2\n"
 	"	ubfx	%w3, %w0, %5, #1	// extract PTE_AF (young)\n"
@@ -550,6 +574,13 @@
 	return res;
 }
 
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+					    unsigned long address,
+					    pte_t *ptep)
+{
+	return __ptep_test_and_clear_young(ptep);
+}
+
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
@@ -578,9 +609,9 @@
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
-static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
-				       unsigned long address, pmd_t *pmdp)
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+					    unsigned long address, pmd_t *pmdp)
 {
 	return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
 }
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 817a067..433e504 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -113,6 +113,17 @@
 	dsb(ishst);
 }
 
+/*
+ * The calling secondary CPU has detected serious configuration mismatch,
+ * which calls for a kernel panic. Update the boot status and park the calling
+ * CPU.
+ */
+static inline void cpu_panic_kernel(void)
+{
+	update_cpu_boot_status(CPU_PANIC_KERNEL);
+	cpu_park_loop();
+}
+
 #endif /* ifndef __ASSEMBLY__ */
 
 #endif /* ifndef __ASM_SMP_H */
diff --git a/arch/arm64/include/asm/stage2_pgtable-nopmd.h b/arch/arm64/include/asm/stage2_pgtable-nopmd.h
new file mode 100644
index 0000000..2656a0f
--- /dev/null
+++ b/arch/arm64/include/asm/stage2_pgtable-nopmd.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2016 - ARM Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM64_S2_PGTABLE_NOPMD_H_
+#define __ARM64_S2_PGTABLE_NOPMD_H_
+
+#include <asm/stage2_pgtable-nopud.h>
+
+#define __S2_PGTABLE_PMD_FOLDED
+
+#define S2_PMD_SHIFT		S2_PUD_SHIFT
+#define S2_PTRS_PER_PMD		1
+#define S2_PMD_SIZE		(1UL << S2_PMD_SHIFT)
+#define S2_PMD_MASK		(~(S2_PMD_SIZE-1))
+
+#define stage2_pud_none(pud)			(0)
+#define stage2_pud_present(pud)			(1)
+#define stage2_pud_clear(pud)			do { } while (0)
+#define stage2_pud_populate(pud, pmd)		do { } while (0)
+#define stage2_pmd_offset(pud, address)		((pmd_t *)(pud))
+
+#define stage2_pmd_free(pmd)			do { } while (0)
+
+#define stage2_pmd_addr_end(addr, end)		(end)
+
+#define stage2_pud_huge(pud)			(0)
+#define stage2_pmd_table_empty(pmdp)		(0)
+
+#endif
diff --git a/arch/arm64/include/asm/stage2_pgtable-nopud.h b/arch/arm64/include/asm/stage2_pgtable-nopud.h
new file mode 100644
index 0000000..5ee87b5
--- /dev/null
+++ b/arch/arm64/include/asm/stage2_pgtable-nopud.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2016 - ARM Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM64_S2_PGTABLE_NOPUD_H_
+#define __ARM64_S2_PGTABLE_NOPUD_H_
+
+#define __S2_PGTABLE_PUD_FOLDED
+
+#define S2_PUD_SHIFT		S2_PGDIR_SHIFT
+#define S2_PTRS_PER_PUD		1
+#define S2_PUD_SIZE		(_AC(1, UL) << S2_PUD_SHIFT)
+#define S2_PUD_MASK		(~(S2_PUD_SIZE-1))
+
+#define stage2_pgd_none(pgd)			(0)
+#define stage2_pgd_present(pgd)			(1)
+#define stage2_pgd_clear(pgd)			do { } while (0)
+#define stage2_pgd_populate(pgd, pud)	do { } while (0)
+
+#define stage2_pud_offset(pgd, address)		((pud_t *)(pgd))
+
+#define stage2_pud_free(x)			do { } while (0)
+
+#define stage2_pud_addr_end(addr, end)		(end)
+#define stage2_pud_table_empty(pmdp)		(0)
+
+#endif
diff --git a/arch/arm64/include/asm/stage2_pgtable.h b/arch/arm64/include/asm/stage2_pgtable.h
new file mode 100644
index 0000000..8b68099
--- /dev/null
+++ b/arch/arm64/include/asm/stage2_pgtable.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2016 - ARM Ltd
+ *
+ * stage2 page table helpers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM64_S2_PGTABLE_H_
+#define __ARM64_S2_PGTABLE_H_
+
+#include <asm/pgtable.h>
+
+/*
+ * The hardware supports concatenation of up to 16 tables at stage2 entry level
+ * and we use the feature whenever possible.
+ *
+ * Now, the minimum number of bits resolved at any level is (PAGE_SHIFT - 3).
+ * On arm64, the smallest PAGE_SIZE supported is 4k, which means
+ *             (PAGE_SHIFT - 3) > 4 holds for all page sizes.
+ * This implies, the total number of page table levels at stage2 expected
+ * by the hardware is actually the number of levels required for (KVM_PHYS_SHIFT - 4)
+ * in normal translations(e.g, stage1), since we cannot have another level in
+ * the range (KVM_PHYS_SHIFT, KVM_PHYS_SHIFT - 4).
+ */
+#define STAGE2_PGTABLE_LEVELS		ARM64_HW_PGTABLE_LEVELS(KVM_PHYS_SHIFT - 4)
+
+/*
+ * With all the supported VA_BITs and 40bit guest IPA, the following condition
+ * is always true:
+ *
+ *       STAGE2_PGTABLE_LEVELS <= CONFIG_PGTABLE_LEVELS
+ *
+ * We base our stage-2 page table walker helpers on this assumption and
+ * fall back to using the host version of the helper wherever possible.
+ * i.e, if a particular level is not folded (e.g, PUD) at stage2, we fall back
+ * to using the host version, since it is guaranteed it is not folded at host.
+ *
+ * If the condition breaks in the future, we can rearrange the host level
+ * definitions and reuse them for stage2. Till then...
+ */
+#if STAGE2_PGTABLE_LEVELS > CONFIG_PGTABLE_LEVELS
+#error "Unsupported combination of guest IPA and host VA_BITS."
+#endif
+
+/* S2_PGDIR_SHIFT is the size mapped by top-level stage2 entry */
+#define S2_PGDIR_SHIFT			ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - STAGE2_PGTABLE_LEVELS)
+#define S2_PGDIR_SIZE			(_AC(1, UL) << S2_PGDIR_SHIFT)
+#define S2_PGDIR_MASK			(~(S2_PGDIR_SIZE - 1))
+
+/*
+ * The number of PTRS across all concatenated stage2 tables given by the
+ * number of bits resolved at the initial level.
+ */
+#define PTRS_PER_S2_PGD			(1 << (KVM_PHYS_SHIFT - S2_PGDIR_SHIFT))
+
+/*
+ * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation
+ * levels in addition to the PGD.
+ */
+#define KVM_MMU_CACHE_MIN_PAGES		(STAGE2_PGTABLE_LEVELS - 1)
+
+
+#if STAGE2_PGTABLE_LEVELS > 3
+
+#define S2_PUD_SHIFT			ARM64_HW_PGTABLE_LEVEL_SHIFT(1)
+#define S2_PUD_SIZE			(_AC(1, UL) << S2_PUD_SHIFT)
+#define S2_PUD_MASK			(~(S2_PUD_SIZE - 1))
+
+#define stage2_pgd_none(pgd)				pgd_none(pgd)
+#define stage2_pgd_clear(pgd)				pgd_clear(pgd)
+#define stage2_pgd_present(pgd)				pgd_present(pgd)
+#define stage2_pgd_populate(pgd, pud)			pgd_populate(NULL, pgd, pud)
+#define stage2_pud_offset(pgd, address)			pud_offset(pgd, address)
+#define stage2_pud_free(pud)				pud_free(NULL, pud)
+
+#define stage2_pud_table_empty(pudp)			kvm_page_empty(pudp)
+
+static inline phys_addr_t stage2_pud_addr_end(phys_addr_t addr, phys_addr_t end)
+{
+	phys_addr_t boundary = (addr + S2_PUD_SIZE) & S2_PUD_MASK;
+
+	return (boundary - 1 < end - 1) ? boundary : end;
+}
+
+#endif		/* STAGE2_PGTABLE_LEVELS > 3 */
+
+
+#if STAGE2_PGTABLE_LEVELS > 2
+
+#define S2_PMD_SHIFT			ARM64_HW_PGTABLE_LEVEL_SHIFT(2)
+#define S2_PMD_SIZE			(_AC(1, UL) << S2_PMD_SHIFT)
+#define S2_PMD_MASK			(~(S2_PMD_SIZE - 1))
+
+#define stage2_pud_none(pud)				pud_none(pud)
+#define stage2_pud_clear(pud)				pud_clear(pud)
+#define stage2_pud_present(pud)				pud_present(pud)
+#define stage2_pud_populate(pud, pmd)			pud_populate(NULL, pud, pmd)
+#define stage2_pmd_offset(pud, address)			pmd_offset(pud, address)
+#define stage2_pmd_free(pmd)				pmd_free(NULL, pmd)
+
+#define stage2_pud_huge(pud)				pud_huge(pud)
+#define stage2_pmd_table_empty(pmdp)			kvm_page_empty(pmdp)
+
+static inline phys_addr_t stage2_pmd_addr_end(phys_addr_t addr, phys_addr_t end)
+{
+	phys_addr_t boundary = (addr + S2_PMD_SIZE) & S2_PMD_MASK;
+
+	return (boundary - 1 < end - 1) ? boundary : end;
+}
+
+#endif		/* STAGE2_PGTABLE_LEVELS > 2 */
+
+#define stage2_pte_table_empty(ptep)			kvm_page_empty(ptep)
+
+#if STAGE2_PGTABLE_LEVELS == 2
+#include <asm/stage2_pgtable-nopmd.h>
+#elif STAGE2_PGTABLE_LEVELS == 3
+#include <asm/stage2_pgtable-nopud.h>
+#endif
+
+
+#define stage2_pgd_index(addr)				(((addr) >> S2_PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
+
+static inline phys_addr_t stage2_pgd_addr_end(phys_addr_t addr, phys_addr_t end)
+{
+	phys_addr_t boundary = (addr + S2_PGDIR_SIZE) & S2_PGDIR_MASK;
+
+	return (boundary - 1 < end - 1) ? boundary : end;
+}
+
+#endif	/* __ARM64_S2_PGTABLE_H_ */
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
index 59a5b0f1..024d623 100644
--- a/arch/arm64/include/asm/suspend.h
+++ b/arch/arm64/include/asm/suspend.h
@@ -1,7 +1,8 @@
 #ifndef __ASM_SUSPEND_H
 #define __ASM_SUSPEND_H
 
-#define NR_CTX_REGS 11
+#define NR_CTX_REGS 10
+#define NR_CALLEE_SAVED_REGS 12
 
 /*
  * struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on
@@ -16,11 +17,34 @@
 	u64 sp;
 } __aligned(16);
 
-struct sleep_save_sp {
-	phys_addr_t *save_ptr_stash;
-	phys_addr_t save_ptr_stash_phys;
+/*
+ * Memory to save the cpu state is allocated on the stack by
+ * __cpu_suspend_enter()'s caller, and populated by __cpu_suspend_enter().
+ * This data must survive until cpu_resume() is called.
+ *
+ * This struct desribes the size and the layout of the saved cpu state.
+ * The layout of the callee_saved_regs is defined by the implementation
+ * of __cpu_suspend_enter(), and cpu_resume(). This struct must be passed
+ * in by the caller as __cpu_suspend_enter()'s stack-frame is gone once it
+ * returns, and the data would be subsequently corrupted by the call to the
+ * finisher.
+ */
+struct sleep_stack_data {
+	struct cpu_suspend_ctx	system_regs;
+	unsigned long		callee_saved_regs[NR_CALLEE_SAVED_REGS];
 };
 
+extern unsigned long *sleep_save_stash;
+
 extern int cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
 extern void cpu_resume(void);
+int __cpu_suspend_enter(struct sleep_stack_data *state);
+void __cpu_suspend_exit(void);
+void _cpu_resume(void);
+
+int swsusp_arch_suspend(void);
+int swsusp_arch_resume(void);
+int arch_hibernation_header_save(void *addr, unsigned int max_size);
+int arch_hibernation_header_restore(void *addr);
+
 #endif
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 1a78d6e..751e901 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -86,10 +86,21 @@
 #define SET_PSTATE_UAO(x) __inst_arm(0xd5000000 | REG_PSTATE_UAO_IMM |\
 				     (!!x)<<8 | 0x1f)
 
-/* SCTLR_EL1 */
-#define SCTLR_EL1_CP15BEN	(0x1 << 5)
-#define SCTLR_EL1_SED		(0x1 << 8)
-#define SCTLR_EL1_SPAN		(0x1 << 23)
+/* Common SCTLR_ELx flags. */
+#define SCTLR_ELx_EE    (1 << 25)
+#define SCTLR_ELx_I	(1 << 12)
+#define SCTLR_ELx_SA	(1 << 3)
+#define SCTLR_ELx_C	(1 << 2)
+#define SCTLR_ELx_A	(1 << 1)
+#define SCTLR_ELx_M	1
+
+#define SCTLR_ELx_FLAGS	(SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
+			 SCTLR_ELx_SA | SCTLR_ELx_I)
+
+/* SCTLR_EL1 specific flags. */
+#define SCTLR_EL1_SPAN		(1 << 23)
+#define SCTLR_EL1_SED		(1 << 8)
+#define SCTLR_EL1_CP15BEN	(1 << 5)
 
 
 /* id_aa64isar0 */
@@ -115,6 +126,7 @@
 #define ID_AA64PFR0_ASIMD_SUPPORTED	0x0
 #define ID_AA64PFR0_EL1_64BIT_ONLY	0x1
 #define ID_AA64PFR0_EL0_64BIT_ONLY	0x1
+#define ID_AA64PFR0_EL0_32BIT_64BIT	0x2
 
 /* id_aa64mmfr0 */
 #define ID_AA64MMFR0_TGRAN4_SHIFT	28
@@ -141,8 +153,15 @@
 #define ID_AA64MMFR1_VMIDBITS_SHIFT	4
 #define ID_AA64MMFR1_HADBS_SHIFT	0
 
+#define ID_AA64MMFR1_VMIDBITS_8		0
+#define ID_AA64MMFR1_VMIDBITS_16	2
+
 /* id_aa64mmfr2 */
+#define ID_AA64MMFR2_LVA_SHIFT		16
+#define ID_AA64MMFR2_IESB_SHIFT		12
+#define ID_AA64MMFR2_LSM_SHIFT		8
 #define ID_AA64MMFR2_UAO_SHIFT		4
+#define ID_AA64MMFR2_CNP_SHIFT		0
 
 /* id_aa64dfr0 */
 #define ID_AA64DFR0_CTX_CMPS_SHIFT	28
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index a3e9d6f..8b57339 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -22,6 +22,16 @@
 void store_cpu_topology(unsigned int cpuid);
 const struct cpumask *cpu_coregroup_mask(int cpu);
 
+#ifdef CONFIG_NUMA
+
+struct pci_bus;
+int pcibus_to_node(struct pci_bus *bus);
+#define cpumask_of_pcibus(bus)	(pcibus_to_node(bus) == -1 ?		\
+				 cpu_all_mask :				\
+				 cpumask_of_node(pcibus_to_node(bus)))
+
+#endif /* CONFIG_NUMA */
+
 #include <asm-generic/topology.h>
 
 #endif /* _ASM_ARM_TOPOLOGY_H */
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 9f22dd6..dcbcf8d 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -18,6 +18,22 @@
 #ifndef __ASM__VIRT_H
 #define __ASM__VIRT_H
 
+/*
+ * The arm64 hcall implementation uses x0 to specify the hcall type. A value
+ * less than 0xfff indicates a special hcall, such as get/set vector.
+ * Any other value is used as a pointer to the function to call.
+ */
+
+/* HVC_GET_VECTORS - Return the value of the vbar_el2 register. */
+#define HVC_GET_VECTORS 0
+
+/*
+ * HVC_SET_VECTORS - Set the value of the vbar_el2 register.
+ *
+ * @x1: Physical address of the new vector table.
+ */
+#define HVC_SET_VECTORS 1
+
 #define BOOT_CPU_MODE_EL1	(0xe11)
 #define BOOT_CPU_MODE_EL2	(0xe12)
 
@@ -60,6 +76,12 @@
 	return el == CurrentEL_EL2;
 }
 
+#ifdef CONFIG_ARM64_VHE
+extern void verify_cpu_run_el(void);
+#else
+static inline void verify_cpu_run_el(void) {}
+#endif
+
 /* The section containing the hypervisor text */
 extern char __hyp_text_start[];
 extern char __hyp_text_end[];
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 3793003..2173149 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -45,6 +45,7 @@
 arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL)	+= acpi_parking_protocol.o
 arm64-obj-$(CONFIG_PARAVIRT)		+= paravirt.o
 arm64-obj-$(CONFIG_RANDOMIZE_BASE)	+= kaslr.o
+arm64-obj-$(CONFIG_HIBERNATION)		+= hibernate.o hibernate-asm.o
 
 obj-y					+= $(arm64-obj-y) vdso/
 obj-m					+= $(arm64-obj-m)
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index d1ce8e2..3e4f1a4 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -42,6 +42,7 @@
 EXPORT_SYMBOL(acpi_pci_disabled);
 
 static bool param_acpi_off __initdata;
+static bool param_acpi_on __initdata;
 static bool param_acpi_force __initdata;
 
 static int __init parse_acpi(char *arg)
@@ -52,6 +53,8 @@
 	/* "acpi=off" disables both ACPI table parsing and interpreter */
 	if (strcmp(arg, "off") == 0)
 		param_acpi_off = true;
+	else if (strcmp(arg, "on") == 0) /* prefer ACPI over DT */
+		param_acpi_on = true;
 	else if (strcmp(arg, "force") == 0) /* force ACPI to be enabled */
 		param_acpi_force = true;
 	else
@@ -66,12 +69,24 @@
 				       void *data)
 {
 	/*
-	 * Return 1 as soon as we encounter a node at depth 1 that is
-	 * not the /chosen node.
+	 * Ignore anything not directly under the root node; we'll
+	 * catch its parent instead.
 	 */
-	if (depth == 1 && (strcmp(uname, "chosen") != 0))
-		return 1;
-	return 0;
+	if (depth != 1)
+		return 0;
+
+	if (strcmp(uname, "chosen") == 0)
+		return 0;
+
+	if (strcmp(uname, "hypervisor") == 0 &&
+	    of_flat_dt_is_compatible(node, "xen,xen"))
+		return 0;
+
+	/*
+	 * This node at depth 1 is neither a chosen node nor a xen node,
+	 * which we do not expect.
+	 */
+	return 1;
 }
 
 /*
@@ -184,11 +199,13 @@
 	/*
 	 * Enable ACPI instead of device tree unless
 	 * - ACPI has been disabled explicitly (acpi=off), or
-	 * - the device tree is not empty (it has more than just a /chosen node)
-	 *   and ACPI has not been force enabled (acpi=force)
+	 * - the device tree is not empty (it has more than just a /chosen node,
+	 *   and a /hypervisor node when running on Xen)
+	 *   and ACPI has not been [force] enabled (acpi=on|force)
 	 */
 	if (param_acpi_off ||
-	    (!param_acpi_force && of_scan_flat_dt(dt_scan_depth1_nodes, NULL)))
+	    (!param_acpi_on && !param_acpi_force &&
+	     of_scan_flat_dt(dt_scan_depth1_nodes, NULL)))
 		return;
 
 	/*
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 3ae6b31..f8e5d47 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -22,6 +22,7 @@
 #include <linux/mm.h>
 #include <linux/dma-mapping.h>
 #include <linux/kvm_host.h>
+#include <linux/suspend.h>
 #include <asm/thread_info.h>
 #include <asm/memory.h>
 #include <asm/smp_plat.h>
@@ -119,11 +120,14 @@
   DEFINE(CPU_CTX_SP,		offsetof(struct cpu_suspend_ctx, sp));
   DEFINE(MPIDR_HASH_MASK,	offsetof(struct mpidr_hash, mask));
   DEFINE(MPIDR_HASH_SHIFTS,	offsetof(struct mpidr_hash, shift_aff));
-  DEFINE(SLEEP_SAVE_SP_SZ,	sizeof(struct sleep_save_sp));
-  DEFINE(SLEEP_SAVE_SP_PHYS,	offsetof(struct sleep_save_sp, save_ptr_stash_phys));
-  DEFINE(SLEEP_SAVE_SP_VIRT,	offsetof(struct sleep_save_sp, save_ptr_stash));
+  DEFINE(SLEEP_STACK_DATA_SYSTEM_REGS,	offsetof(struct sleep_stack_data, system_regs));
+  DEFINE(SLEEP_STACK_DATA_CALLEE_REGS,	offsetof(struct sleep_stack_data, callee_saved_regs));
 #endif
   DEFINE(ARM_SMCCC_RES_X0_OFFS,	offsetof(struct arm_smccc_res, a0));
   DEFINE(ARM_SMCCC_RES_X2_OFFS,	offsetof(struct arm_smccc_res, a2));
+  BLANK();
+  DEFINE(HIBERN_PBE_ORIG,	offsetof(struct pbe, orig_address));
+  DEFINE(HIBERN_PBE_ADDR,	offsetof(struct pbe, address));
+  DEFINE(HIBERN_PBE_NEXT,	offsetof(struct pbe, next));
   return 0;
 }
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 06afd04..d427894 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -22,14 +22,16 @@
 #include <asm/cpufeature.h>
 
 static bool __maybe_unused
-is_affected_midr_range(const struct arm64_cpu_capabilities *entry)
+is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
 {
+	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 	return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model,
 				       entry->midr_range_min,
 				       entry->midr_range_max);
 }
 
 #define MIDR_RANGE(model, min, max) \
+	.def_scope = SCOPE_LOCAL_CPU, \
 	.matches = is_affected_midr_range, \
 	.midr_model = model, \
 	.midr_range_min = min, \
@@ -101,6 +103,26 @@
 	}
 };
 
+/*
+ * The CPU Errata work arounds are detected and applied at boot time
+ * and the related information is freed soon after. If the new CPU requires
+ * an errata not detected at boot, fail this CPU.
+ */
+void verify_local_cpu_errata(void)
+{
+	const struct arm64_cpu_capabilities *caps = arm64_errata;
+
+	for (; caps->matches; caps++)
+		if (!cpus_have_cap(caps->capability) &&
+			caps->matches(caps, SCOPE_LOCAL_CPU)) {
+			pr_crit("CPU%d: Requires work around for %s, not detected"
+					" at boot time\n",
+				smp_processor_id(),
+				caps->desc ? : "an erratum");
+			cpu_die_early();
+		}
+}
+
 void check_local_cpu_errata(void)
 {
 	update_cpu_capabilities(arm64_errata, "enabling workaround for");
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 943f514..811773d 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -71,7 +71,8 @@
 
 /* meta feature for alternatives */
 static bool __maybe_unused
-cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry);
+cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
+
 
 static struct arm64_ftr_bits ftr_id_aa64isar0[] = {
 	ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
@@ -130,7 +131,11 @@
 };
 
 static struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
+	ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
+	ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
+	ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
+	ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
 	ARM64_FTR_END,
 };
 
@@ -435,22 +440,26 @@
 	init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
 	init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
 	init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
-	init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
-	init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
-	init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
-	init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
-	init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
-	init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
-	init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
-	init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
-	init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
-	init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
-	init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
-	init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
-	init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
-	init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
-	init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
-	init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
+
+	if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
+		init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
+		init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
+		init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
+		init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
+		init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
+		init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
+		init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
+		init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
+		init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
+		init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
+		init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
+		init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
+		init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
+		init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
+		init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
+		init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
+	}
+
 }
 
 static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
@@ -555,47 +564,51 @@
 				      info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
 
 	/*
-	 * If we have AArch32, we care about 32-bit features for compat. These
-	 * registers should be RES0 otherwise.
+	 * If we have AArch32, we care about 32-bit features for compat.
+	 * If the system doesn't support AArch32, don't update them.
 	 */
-	taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
+	if (id_aa64pfr0_32bit_el0(read_system_reg(SYS_ID_AA64PFR0_EL1)) &&
+		id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
+
+		taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
 					info->reg_id_dfr0, boot->reg_id_dfr0);
-	taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
+		taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
 					info->reg_id_isar0, boot->reg_id_isar0);
-	taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
+		taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
 					info->reg_id_isar1, boot->reg_id_isar1);
-	taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
+		taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
 					info->reg_id_isar2, boot->reg_id_isar2);
-	taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
+		taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
 					info->reg_id_isar3, boot->reg_id_isar3);
-	taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
+		taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
 					info->reg_id_isar4, boot->reg_id_isar4);
-	taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
+		taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
 					info->reg_id_isar5, boot->reg_id_isar5);
 
-	/*
-	 * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
-	 * ACTLR formats could differ across CPUs and therefore would have to
-	 * be trapped for virtualization anyway.
-	 */
-	taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
+		/*
+		 * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
+		 * ACTLR formats could differ across CPUs and therefore would have to
+		 * be trapped for virtualization anyway.
+		 */
+		taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
 					info->reg_id_mmfr0, boot->reg_id_mmfr0);
-	taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
+		taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
 					info->reg_id_mmfr1, boot->reg_id_mmfr1);
-	taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
+		taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
 					info->reg_id_mmfr2, boot->reg_id_mmfr2);
-	taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
+		taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
 					info->reg_id_mmfr3, boot->reg_id_mmfr3);
-	taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
+		taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
 					info->reg_id_pfr0, boot->reg_id_pfr0);
-	taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
+		taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
 					info->reg_id_pfr1, boot->reg_id_pfr1);
-	taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
+		taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
 					info->reg_mvfr0, boot->reg_mvfr0);
-	taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
+		taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
 					info->reg_mvfr1, boot->reg_mvfr1);
-	taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
+		taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
 					info->reg_mvfr2, boot->reg_mvfr2);
+	}
 
 	/*
 	 * Mismatched CPU features are a recipe for disaster. Don't even
@@ -614,254 +627,9 @@
 	return regp->sys_val;
 }
 
-#include <linux/irqchip/arm-gic-v3.h>
-
-static bool
-feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
-{
-	int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign);
-
-	return val >= entry->min_field_value;
-}
-
-static bool
-has_cpuid_feature(const struct arm64_cpu_capabilities *entry)
-{
-	u64 val;
-
-	val = read_system_reg(entry->sys_reg);
-	return feature_matches(val, entry);
-}
-
-static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry)
-{
-	bool has_sre;
-
-	if (!has_cpuid_feature(entry))
-		return false;
-
-	has_sre = gic_enable_sre();
-	if (!has_sre)
-		pr_warn_once("%s present but disabled by higher exception level\n",
-			     entry->desc);
-
-	return has_sre;
-}
-
-static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry)
-{
-	u32 midr = read_cpuid_id();
-	u32 rv_min, rv_max;
-
-	/* Cavium ThunderX pass 1.x and 2.x */
-	rv_min = 0;
-	rv_max = (1 << MIDR_VARIANT_SHIFT) | MIDR_REVISION_MASK;
-
-	return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, rv_min, rv_max);
-}
-
-static bool runs_at_el2(const struct arm64_cpu_capabilities *entry)
-{
-	return is_kernel_in_hyp_mode();
-}
-
-static const struct arm64_cpu_capabilities arm64_features[] = {
-	{
-		.desc = "GIC system register CPU interface",
-		.capability = ARM64_HAS_SYSREG_GIC_CPUIF,
-		.matches = has_useable_gicv3_cpuif,
-		.sys_reg = SYS_ID_AA64PFR0_EL1,
-		.field_pos = ID_AA64PFR0_GIC_SHIFT,
-		.sign = FTR_UNSIGNED,
-		.min_field_value = 1,
-	},
-#ifdef CONFIG_ARM64_PAN
-	{
-		.desc = "Privileged Access Never",
-		.capability = ARM64_HAS_PAN,
-		.matches = has_cpuid_feature,
-		.sys_reg = SYS_ID_AA64MMFR1_EL1,
-		.field_pos = ID_AA64MMFR1_PAN_SHIFT,
-		.sign = FTR_UNSIGNED,
-		.min_field_value = 1,
-		.enable = cpu_enable_pan,
-	},
-#endif /* CONFIG_ARM64_PAN */
-#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
-	{
-		.desc = "LSE atomic instructions",
-		.capability = ARM64_HAS_LSE_ATOMICS,
-		.matches = has_cpuid_feature,
-		.sys_reg = SYS_ID_AA64ISAR0_EL1,
-		.field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
-		.sign = FTR_UNSIGNED,
-		.min_field_value = 2,
-	},
-#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
-	{
-		.desc = "Software prefetching using PRFM",
-		.capability = ARM64_HAS_NO_HW_PREFETCH,
-		.matches = has_no_hw_prefetch,
-	},
-#ifdef CONFIG_ARM64_UAO
-	{
-		.desc = "User Access Override",
-		.capability = ARM64_HAS_UAO,
-		.matches = has_cpuid_feature,
-		.sys_reg = SYS_ID_AA64MMFR2_EL1,
-		.field_pos = ID_AA64MMFR2_UAO_SHIFT,
-		.min_field_value = 1,
-		.enable = cpu_enable_uao,
-	},
-#endif /* CONFIG_ARM64_UAO */
-#ifdef CONFIG_ARM64_PAN
-	{
-		.capability = ARM64_ALT_PAN_NOT_UAO,
-		.matches = cpufeature_pan_not_uao,
-	},
-#endif /* CONFIG_ARM64_PAN */
-	{
-		.desc = "Virtualization Host Extensions",
-		.capability = ARM64_HAS_VIRT_HOST_EXTN,
-		.matches = runs_at_el2,
-	},
-	{},
-};
-
-#define HWCAP_CAP(reg, field, s, min_value, type, cap)	\
-	{							\
-		.desc = #cap,					\
-		.matches = has_cpuid_feature,			\
-		.sys_reg = reg,					\
-		.field_pos = field,				\
-		.sign = s,					\
-		.min_field_value = min_value,			\
-		.hwcap_type = type,				\
-		.hwcap = cap,					\
-	}
-
-static const struct arm64_cpu_capabilities arm64_hwcaps[] = {
-	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
-	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
-	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
-	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
-	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
-	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
-	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
-	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
-	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
-	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
-#ifdef CONFIG_COMPAT
-	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
-	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
-	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
-	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
-	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
-#endif
-	{},
-};
-
-static void __init cap_set_hwcap(const struct arm64_cpu_capabilities *cap)
-{
-	switch (cap->hwcap_type) {
-	case CAP_HWCAP:
-		elf_hwcap |= cap->hwcap;
-		break;
-#ifdef CONFIG_COMPAT
-	case CAP_COMPAT_HWCAP:
-		compat_elf_hwcap |= (u32)cap->hwcap;
-		break;
-	case CAP_COMPAT_HWCAP2:
-		compat_elf_hwcap2 |= (u32)cap->hwcap;
-		break;
-#endif
-	default:
-		WARN_ON(1);
-		break;
-	}
-}
-
-/* Check if we have a particular HWCAP enabled */
-static bool __maybe_unused cpus_have_hwcap(const struct arm64_cpu_capabilities *cap)
-{
-	bool rc;
-
-	switch (cap->hwcap_type) {
-	case CAP_HWCAP:
-		rc = (elf_hwcap & cap->hwcap) != 0;
-		break;
-#ifdef CONFIG_COMPAT
-	case CAP_COMPAT_HWCAP:
-		rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
-		break;
-	case CAP_COMPAT_HWCAP2:
-		rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
-		break;
-#endif
-	default:
-		WARN_ON(1);
-		rc = false;
-	}
-
-	return rc;
-}
-
-static void __init setup_cpu_hwcaps(void)
-{
-	int i;
-	const struct arm64_cpu_capabilities *hwcaps = arm64_hwcaps;
-
-	for (i = 0; hwcaps[i].matches; i++)
-		if (hwcaps[i].matches(&hwcaps[i]))
-			cap_set_hwcap(&hwcaps[i]);
-}
-
-void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
-			    const char *info)
-{
-	int i;
-
-	for (i = 0; caps[i].matches; i++) {
-		if (!caps[i].matches(&caps[i]))
-			continue;
-
-		if (!cpus_have_cap(caps[i].capability) && caps[i].desc)
-			pr_info("%s %s\n", info, caps[i].desc);
-		cpus_set_cap(caps[i].capability);
-	}
-}
-
-/*
- * Run through the enabled capabilities and enable() it on all active
- * CPUs
- */
-static void __init
-enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
-{
-	int i;
-
-	for (i = 0; caps[i].matches; i++)
-		if (caps[i].enable && cpus_have_cap(caps[i].capability))
-			on_each_cpu(caps[i].enable, NULL, true);
-}
-
-/*
- * Flag to indicate if we have computed the system wide
- * capabilities based on the boot time active CPUs. This
- * will be used to determine if a new booting CPU should
- * go through the verification process to make sure that it
- * supports the system capabilities, without using a hotplug
- * notifier.
- */
-static bool sys_caps_initialised;
-
-static inline void set_sys_caps_initialised(void)
-{
-	sys_caps_initialised = true;
-}
-
 /*
  * __raw_read_system_reg() - Used by a STARTING cpu before cpuinfo is populated.
+ * Read the system register on the current CPU
  */
 static u64 __raw_read_system_reg(u32 sys_id)
 {
@@ -902,15 +670,314 @@
 	}
 }
 
+#include <linux/irqchip/arm-gic-v3.h>
+
+static bool
+feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
+{
+	int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign);
+
+	return val >= entry->min_field_value;
+}
+
+static bool
+has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
+{
+	u64 val;
+
+	WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
+	if (scope == SCOPE_SYSTEM)
+		val = read_system_reg(entry->sys_reg);
+	else
+		val = __raw_read_system_reg(entry->sys_reg);
+
+	return feature_matches(val, entry);
+}
+
+static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
+{
+	bool has_sre;
+
+	if (!has_cpuid_feature(entry, scope))
+		return false;
+
+	has_sre = gic_enable_sre();
+	if (!has_sre)
+		pr_warn_once("%s present but disabled by higher exception level\n",
+			     entry->desc);
+
+	return has_sre;
+}
+
+static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
+{
+	u32 midr = read_cpuid_id();
+	u32 rv_min, rv_max;
+
+	/* Cavium ThunderX pass 1.x and 2.x */
+	rv_min = 0;
+	rv_max = (1 << MIDR_VARIANT_SHIFT) | MIDR_REVISION_MASK;
+
+	return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, rv_min, rv_max);
+}
+
+static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
+{
+	return is_kernel_in_hyp_mode();
+}
+
+static const struct arm64_cpu_capabilities arm64_features[] = {
+	{
+		.desc = "GIC system register CPU interface",
+		.capability = ARM64_HAS_SYSREG_GIC_CPUIF,
+		.def_scope = SCOPE_SYSTEM,
+		.matches = has_useable_gicv3_cpuif,
+		.sys_reg = SYS_ID_AA64PFR0_EL1,
+		.field_pos = ID_AA64PFR0_GIC_SHIFT,
+		.sign = FTR_UNSIGNED,
+		.min_field_value = 1,
+	},
+#ifdef CONFIG_ARM64_PAN
+	{
+		.desc = "Privileged Access Never",
+		.capability = ARM64_HAS_PAN,
+		.def_scope = SCOPE_SYSTEM,
+		.matches = has_cpuid_feature,
+		.sys_reg = SYS_ID_AA64MMFR1_EL1,
+		.field_pos = ID_AA64MMFR1_PAN_SHIFT,
+		.sign = FTR_UNSIGNED,
+		.min_field_value = 1,
+		.enable = cpu_enable_pan,
+	},
+#endif /* CONFIG_ARM64_PAN */
+#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
+	{
+		.desc = "LSE atomic instructions",
+		.capability = ARM64_HAS_LSE_ATOMICS,
+		.def_scope = SCOPE_SYSTEM,
+		.matches = has_cpuid_feature,
+		.sys_reg = SYS_ID_AA64ISAR0_EL1,
+		.field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
+		.sign = FTR_UNSIGNED,
+		.min_field_value = 2,
+	},
+#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+	{
+		.desc = "Software prefetching using PRFM",
+		.capability = ARM64_HAS_NO_HW_PREFETCH,
+		.def_scope = SCOPE_SYSTEM,
+		.matches = has_no_hw_prefetch,
+	},
+#ifdef CONFIG_ARM64_UAO
+	{
+		.desc = "User Access Override",
+		.capability = ARM64_HAS_UAO,
+		.def_scope = SCOPE_SYSTEM,
+		.matches = has_cpuid_feature,
+		.sys_reg = SYS_ID_AA64MMFR2_EL1,
+		.field_pos = ID_AA64MMFR2_UAO_SHIFT,
+		.min_field_value = 1,
+		.enable = cpu_enable_uao,
+	},
+#endif /* CONFIG_ARM64_UAO */
+#ifdef CONFIG_ARM64_PAN
+	{
+		.capability = ARM64_ALT_PAN_NOT_UAO,
+		.def_scope = SCOPE_SYSTEM,
+		.matches = cpufeature_pan_not_uao,
+	},
+#endif /* CONFIG_ARM64_PAN */
+	{
+		.desc = "Virtualization Host Extensions",
+		.capability = ARM64_HAS_VIRT_HOST_EXTN,
+		.def_scope = SCOPE_SYSTEM,
+		.matches = runs_at_el2,
+	},
+	{
+		.desc = "32-bit EL0 Support",
+		.capability = ARM64_HAS_32BIT_EL0,
+		.def_scope = SCOPE_SYSTEM,
+		.matches = has_cpuid_feature,
+		.sys_reg = SYS_ID_AA64PFR0_EL1,
+		.sign = FTR_UNSIGNED,
+		.field_pos = ID_AA64PFR0_EL0_SHIFT,
+		.min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
+	},
+	{},
+};
+
+#define HWCAP_CAP(reg, field, s, min_value, type, cap)	\
+	{							\
+		.desc = #cap,					\
+		.def_scope = SCOPE_SYSTEM,			\
+		.matches = has_cpuid_feature,			\
+		.sys_reg = reg,					\
+		.field_pos = field,				\
+		.sign = s,					\
+		.min_field_value = min_value,			\
+		.hwcap_type = type,				\
+		.hwcap = cap,					\
+	}
+
+static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
+	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
+	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
+	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
+	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
+	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
+	HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
+	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
+	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
+	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
+	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
+	{},
+};
+
+static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
+#ifdef CONFIG_COMPAT
+	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
+	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
+	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
+	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
+	HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
+#endif
+	{},
+};
+
+static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
+{
+	switch (cap->hwcap_type) {
+	case CAP_HWCAP:
+		elf_hwcap |= cap->hwcap;
+		break;
+#ifdef CONFIG_COMPAT
+	case CAP_COMPAT_HWCAP:
+		compat_elf_hwcap |= (u32)cap->hwcap;
+		break;
+	case CAP_COMPAT_HWCAP2:
+		compat_elf_hwcap2 |= (u32)cap->hwcap;
+		break;
+#endif
+	default:
+		WARN_ON(1);
+		break;
+	}
+}
+
+/* Check if we have a particular HWCAP enabled */
+static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
+{
+	bool rc;
+
+	switch (cap->hwcap_type) {
+	case CAP_HWCAP:
+		rc = (elf_hwcap & cap->hwcap) != 0;
+		break;
+#ifdef CONFIG_COMPAT
+	case CAP_COMPAT_HWCAP:
+		rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
+		break;
+	case CAP_COMPAT_HWCAP2:
+		rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
+		break;
+#endif
+	default:
+		WARN_ON(1);
+		rc = false;
+	}
+
+	return rc;
+}
+
+static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
+{
+	for (; hwcaps->matches; hwcaps++)
+		if (hwcaps->matches(hwcaps, hwcaps->def_scope))
+			cap_set_elf_hwcap(hwcaps);
+}
+
+void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
+			    const char *info)
+{
+	for (; caps->matches; caps++) {
+		if (!caps->matches(caps, caps->def_scope))
+			continue;
+
+		if (!cpus_have_cap(caps->capability) && caps->desc)
+			pr_info("%s %s\n", info, caps->desc);
+		cpus_set_cap(caps->capability);
+	}
+}
+
+/*
+ * Run through the enabled capabilities and enable() it on all active
+ * CPUs
+ */
+static void __init
+enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
+{
+	for (; caps->matches; caps++)
+		if (caps->enable && cpus_have_cap(caps->capability))
+			on_each_cpu(caps->enable, NULL, true);
+}
+
+/*
+ * Flag to indicate if we have computed the system wide
+ * capabilities based on the boot time active CPUs. This
+ * will be used to determine if a new booting CPU should
+ * go through the verification process to make sure that it
+ * supports the system capabilities, without using a hotplug
+ * notifier.
+ */
+static bool sys_caps_initialised;
+
+static inline void set_sys_caps_initialised(void)
+{
+	sys_caps_initialised = true;
+}
+
 /*
  * Check for CPU features that are used in early boot
  * based on the Boot CPU value.
  */
 static void check_early_cpu_features(void)
 {
+	verify_cpu_run_el();
 	verify_cpu_asid_bits();
 }
 
+static void
+verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
+{
+
+	for (; caps->matches; caps++)
+		if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) {
+			pr_crit("CPU%d: missing HWCAP: %s\n",
+					smp_processor_id(), caps->desc);
+			cpu_die_early();
+		}
+}
+
+static void
+verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)
+{
+	for (; caps->matches; caps++) {
+		if (!cpus_have_cap(caps->capability))
+			continue;
+		/*
+		 * If the new CPU misses an advertised feature, we cannot proceed
+		 * further, park the cpu.
+		 */
+		if (!caps->matches(caps, SCOPE_LOCAL_CPU)) {
+			pr_crit("CPU%d: missing feature: %s\n",
+					smp_processor_id(), caps->desc);
+			cpu_die_early();
+		}
+		if (caps->enable)
+			caps->enable(NULL);
+	}
+}
+
 /*
  * Run through the enabled system capabilities and enable() it on this CPU.
  * The capabilities were decided based on the available CPUs at the boot time.
@@ -921,8 +988,6 @@
  */
 void verify_local_cpu_capabilities(void)
 {
-	int i;
-	const struct arm64_cpu_capabilities *caps;
 
 	check_early_cpu_features();
 
@@ -933,32 +998,11 @@
 	if (!sys_caps_initialised)
 		return;
 
-	caps = arm64_features;
-	for (i = 0; caps[i].matches; i++) {
-		if (!cpus_have_cap(caps[i].capability) || !caps[i].sys_reg)
-			continue;
-		/*
-		 * If the new CPU misses an advertised feature, we cannot proceed
-		 * further, park the cpu.
-		 */
-		if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i])) {
-			pr_crit("CPU%d: missing feature: %s\n",
-					smp_processor_id(), caps[i].desc);
-			cpu_die_early();
-		}
-		if (caps[i].enable)
-			caps[i].enable(NULL);
-	}
-
-	for (i = 0, caps = arm64_hwcaps; caps[i].matches; i++) {
-		if (!cpus_have_hwcap(&caps[i]))
-			continue;
-		if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i])) {
-			pr_crit("CPU%d: missing HWCAP: %s\n",
-					smp_processor_id(), caps[i].desc);
-			cpu_die_early();
-		}
-	}
+	verify_local_cpu_errata();
+	verify_local_cpu_features(arm64_features);
+	verify_local_elf_hwcaps(arm64_elf_hwcaps);
+	if (system_supports_32bit_el0())
+		verify_local_elf_hwcaps(compat_elf_hwcaps);
 }
 
 static void __init setup_feature_capabilities(void)
@@ -967,6 +1011,24 @@
 	enable_cpu_capabilities(arm64_features);
 }
 
+/*
+ * Check if the current CPU has a given feature capability.
+ * Should be called from non-preemptible context.
+ */
+bool this_cpu_has_cap(unsigned int cap)
+{
+	const struct arm64_cpu_capabilities *caps;
+
+	if (WARN_ON(preemptible()))
+		return false;
+
+	for (caps = arm64_features; caps->desc; caps++)
+		if (caps->capability == cap && caps->matches)
+			return caps->matches(caps, SCOPE_LOCAL_CPU);
+
+	return false;
+}
+
 void __init setup_cpu_features(void)
 {
 	u32 cwg;
@@ -974,7 +1036,10 @@
 
 	/* Set the CPU feature capabilies */
 	setup_feature_capabilities();
-	setup_cpu_hwcaps();
+	setup_elf_hwcaps(arm64_elf_hwcaps);
+
+	if (system_supports_32bit_el0())
+		setup_elf_hwcaps(compat_elf_hwcaps);
 
 	/* Advertise that we have computed the system capabilities */
 	set_sys_caps_initialised();
@@ -993,7 +1058,7 @@
 }
 
 static bool __maybe_unused
-cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry)
+cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
 {
 	return (cpus_have_cap(ARM64_HAS_PAN) && !cpus_have_cap(ARM64_HAS_UAO));
 }
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index 9047cab6..e11857f 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -19,7 +19,8 @@
 {
 	int ret = -EOPNOTSUPP;
 
-	if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_init_idle)
+	if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_suspend &&
+			cpu_ops[cpu]->cpu_init_idle)
 		ret = cpu_ops[cpu]->cpu_init_idle(cpu);
 
 	return ret;
@@ -36,11 +37,5 @@
 {
 	int cpu = smp_processor_id();
 
-	/*
-	 * If cpu_ops have not been registered or suspend
-	 * has not been initialized, cpu_suspend call fails early.
-	 */
-	if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend)
-		return -EOPNOTSUPP;
 	return cpu_ops[cpu]->cpu_suspend(index);
 }
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 84c8684..3808470 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -87,7 +87,8 @@
 	"idivt",
 	"vfpd32",
 	"lpae",
-	"evtstrm"
+	"evtstrm",
+	NULL
 };
 
 static const char *const compat_hwcap2_str[] = {
@@ -216,23 +217,26 @@
 	info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
 	info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
 
-	info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
-	info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
-	info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
-	info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
-	info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
-	info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
-	info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
-	info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
-	info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
-	info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
-	info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
-	info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
-	info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
+	/* Update the 32bit ID registers only if AArch32 is implemented */
+	if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
+		info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
+		info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
+		info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
+		info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
+		info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
+		info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
+		info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
+		info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
+		info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
+		info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
+		info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
+		info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
+		info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
 
-	info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
-	info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
-	info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
+		info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
+		info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
+		info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
+	}
 
 	cpuinfo_detect_icache_policy(info);
 
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index c45f296..4fbf3c5 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -135,9 +135,8 @@
 static int os_lock_notify(struct notifier_block *self,
 				    unsigned long action, void *data)
 {
-	int cpu = (unsigned long)data;
 	if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
-		smp_call_function_single(cpu, clear_os_lock, NULL, 1);
+		clear_os_lock(NULL);
 	return NOTIFY_OK;
 }
 
diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S
index cae3112..e88c064 100644
--- a/arch/arm64/kernel/efi-entry.S
+++ b/arch/arm64/kernel/efi-entry.S
@@ -62,7 +62,7 @@
 	 */
 	mov	x20, x0		// DTB address
 	ldr	x0, [sp, #16]	// relocated _text address
-	movz	x21, #:abs_g0:stext_offset
+	ldr	w21, =stext_offset
 	add	x21, x0, x21
 
 	/*
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index b6abc85..78f5248 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -17,22 +17,51 @@
 
 #include <asm/efi.h>
 
+/*
+ * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
+ * executable, everything else can be mapped with the XN bits
+ * set. Also take the new (optional) RO/XP bits into account.
+ */
+static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
+{
+	u64 attr = md->attribute;
+	u32 type = md->type;
+
+	if (type == EFI_MEMORY_MAPPED_IO)
+		return PROT_DEVICE_nGnRE;
+
+	if (WARN_ONCE(!PAGE_ALIGNED(md->phys_addr),
+		      "UEFI Runtime regions are not aligned to 64 KB -- buggy firmware?"))
+		/*
+		 * If the region is not aligned to the page size of the OS, we
+		 * can not use strict permissions, since that would also affect
+		 * the mapping attributes of the adjacent regions.
+		 */
+		return pgprot_val(PAGE_KERNEL_EXEC);
+
+	/* R-- */
+	if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) ==
+	    (EFI_MEMORY_XP | EFI_MEMORY_RO))
+		return pgprot_val(PAGE_KERNEL_RO);
+
+	/* R-X */
+	if (attr & EFI_MEMORY_RO)
+		return pgprot_val(PAGE_KERNEL_ROX);
+
+	/* RW- */
+	if (attr & EFI_MEMORY_XP || type != EFI_RUNTIME_SERVICES_CODE)
+		return pgprot_val(PAGE_KERNEL);
+
+	/* RWX */
+	return pgprot_val(PAGE_KERNEL_EXEC);
+}
+
+/* we will fill this structure from the stub, so don't put it in .bss */
+struct screen_info screen_info __section(.data);
+
 int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
 {
-	pteval_t prot_val;
-
-	/*
-	 * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
-	 * executable, everything else can be mapped with the XN bits
-	 * set.
-	 */
-	if ((md->attribute & EFI_MEMORY_WB) == 0)
-		prot_val = PROT_DEVICE_nGnRE;
-	else if (md->type == EFI_RUNTIME_SERVICES_CODE ||
-		 !PAGE_ALIGNED(md->phys_addr))
-		prot_val = pgprot_val(PAGE_KERNEL_EXEC);
-	else
-		prot_val = pgprot_val(PAGE_KERNEL);
+	pteval_t prot_val = create_mapping_protection(md);
 
 	create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
 			   md->num_pages << EFI_PAGE_SHIFT,
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 4203d5f..2c6e598 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -25,6 +25,7 @@
 #include <linux/irqchip/arm-gic-v3.h>
 
 #include <asm/assembler.h>
+#include <asm/boot.h>
 #include <asm/ptrace.h>
 #include <asm/asm-offsets.h>
 #include <asm/cache.h>
@@ -51,9 +52,6 @@
 #error TEXT_OFFSET must be less than 2MB
 #endif
 
-#define KERNEL_START	_text
-#define KERNEL_END	_end
-
 /*
  * Kernel startup entry point.
  * ---------------------------
@@ -102,8 +100,6 @@
 #endif
 
 #ifdef CONFIG_EFI
-	.globl	__efistub_stext_offset
-	.set	__efistub_stext_offset, stext - _head
 	.align 3
 pe_header:
 	.ascii	"PE"
@@ -123,11 +119,11 @@
 	.short	0x20b				// PE32+ format
 	.byte	0x02				// MajorLinkerVersion
 	.byte	0x14				// MinorLinkerVersion
-	.long	_end - stext			// SizeOfCode
+	.long	_end - efi_header_end		// SizeOfCode
 	.long	0				// SizeOfInitializedData
 	.long	0				// SizeOfUninitializedData
 	.long	__efistub_entry - _head		// AddressOfEntryPoint
-	.long	__efistub_stext_offset		// BaseOfCode
+	.long	efi_header_end - _head		// BaseOfCode
 
 extra_header_fields:
 	.quad	0				// ImageBase
@@ -144,7 +140,7 @@
 	.long	_end - _head			// SizeOfImage
 
 	// Everything before the kernel image is considered part of the header
-	.long	__efistub_stext_offset		// SizeOfHeaders
+	.long	efi_header_end - _head		// SizeOfHeaders
 	.long	0				// CheckSum
 	.short	0xa				// Subsystem (EFI application)
 	.short	0				// DllCharacteristics
@@ -188,10 +184,10 @@
 	.byte	0
 	.byte	0
 	.byte	0        		// end of 0 padding of section name
-	.long	_end - stext		// VirtualSize
-	.long	__efistub_stext_offset	// VirtualAddress
-	.long	_edata - stext		// SizeOfRawData
-	.long	__efistub_stext_offset	// PointerToRawData
+	.long	_end - efi_header_end	// VirtualSize
+	.long	efi_header_end - _head	// VirtualAddress
+	.long	_edata - efi_header_end	// SizeOfRawData
+	.long	efi_header_end - _head	// PointerToRawData
 
 	.long	0		// PointerToRelocations (0 for executables)
 	.long	0		// PointerToLineNumbers (0 for executables)
@@ -200,20 +196,23 @@
 	.long	0xe0500020	// Characteristics (section flags)
 
 	/*
-	 * EFI will load stext onwards at the 4k section alignment
+	 * EFI will load .text onwards at the 4k section alignment
 	 * described in the PE/COFF header. To ensure that instruction
 	 * sequences using an adrp and a :lo12: immediate will function
-	 * correctly at this alignment, we must ensure that stext is
+	 * correctly at this alignment, we must ensure that .text is
 	 * placed at a 4k boundary in the Image to begin with.
 	 */
 	.align 12
+efi_header_end:
 #endif
 
+	__INIT
+
 ENTRY(stext)
 	bl	preserve_boot_args
 	bl	el2_setup			// Drop to EL1, w20=cpu_boot_mode
-	mov	x23, xzr			// KASLR offset, defaults to 0
 	adrp	x24, __PHYS_OFFSET
+	and	x23, x24, MIN_KIMG_ALIGN - 1	// KASLR offset, defaults to 0
 	bl	set_cpu_boot_mode_flag
 	bl	__create_page_tables		// x25=TTBR0, x26=TTBR1
 	/*
@@ -222,13 +221,11 @@
 	 * On return, the CPU will be ready for the MMU to be turned on and
 	 * the TCR will have been set.
 	 */
-	ldr	x27, 0f				// address to jump to after
+	bl	__cpu_setup			// initialise processor
+	adr_l	x27, __primary_switch		// address to jump to after
 						// MMU has been enabled
-	adr_l	lr, __enable_mmu		// return (PIC) address
-	b	__cpu_setup			// initialise processor
+	b	__enable_mmu
 ENDPROC(stext)
-	.align	3
-0:	.quad	__mmap_switched - (_head - TEXT_OFFSET) + KIMAGE_VADDR
 
 /*
  * Preserve the arguments passed by the bootloader in x0 .. x3
@@ -338,7 +335,7 @@
 	cmp	x0, x6
 	b.lo	1b
 
-	ldr	x7, =SWAPPER_MM_MMUFLAGS
+	mov	x7, SWAPPER_MM_MMUFLAGS
 
 	/*
 	 * Create the identity mapping.
@@ -394,12 +391,13 @@
 	 * Map the kernel image (starting with PHYS_OFFSET).
 	 */
 	mov	x0, x26				// swapper_pg_dir
-	ldr	x5, =KIMAGE_VADDR
+	mov_q	x5, KIMAGE_VADDR + TEXT_OFFSET	// compile time __va(_text)
 	add	x5, x5, x23			// add KASLR displacement
 	create_pgd_entry x0, x5, x3, x6
-	ldr	w6, kernel_img_size
-	add	x6, x6, x5
-	mov	x3, x24				// phys offset
+	adrp	x6, _end			// runtime __pa(_end)
+	adrp	x3, _text			// runtime __pa(_text)
+	sub	x6, x6, x3			// _end - _text
+	add	x6, x6, x5			// runtime __va(_end)
 	create_block_map x0, x7, x3, x5, x6
 
 	/*
@@ -414,16 +412,13 @@
 
 	ret	x28
 ENDPROC(__create_page_tables)
-
-kernel_img_size:
-	.long	_end - (_head - TEXT_OFFSET)
 	.ltorg
 
 /*
  * The following fragment of code is executed with the MMU enabled.
  */
 	.set	initial_sp, init_thread_union + THREAD_START_SP
-__mmap_switched:
+__primary_switched:
 	mov	x28, lr				// preserve LR
 	adr_l	x8, vectors			// load VBAR_EL1 with virtual
 	msr	vbar_el1, x8			// vector table address
@@ -437,44 +432,6 @@
 	bl	__pi_memset
 	dsb	ishst				// Make zero page visible to PTW
 
-#ifdef CONFIG_RELOCATABLE
-
-	/*
-	 * Iterate over each entry in the relocation table, and apply the
-	 * relocations in place.
-	 */
-	adr_l	x8, __dynsym_start		// start of symbol table
-	adr_l	x9, __reloc_start		// start of reloc table
-	adr_l	x10, __reloc_end		// end of reloc table
-
-0:	cmp	x9, x10
-	b.hs	2f
-	ldp	x11, x12, [x9], #24
-	ldr	x13, [x9, #-8]
-	cmp	w12, #R_AARCH64_RELATIVE
-	b.ne	1f
-	add	x13, x13, x23			// relocate
-	str	x13, [x11, x23]
-	b	0b
-
-1:	cmp	w12, #R_AARCH64_ABS64
-	b.ne	0b
-	add	x12, x12, x12, lsl #1		// symtab offset: 24x top word
-	add	x12, x8, x12, lsr #(32 - 3)	// ... shifted into bottom word
-	ldrsh	w14, [x12, #6]			// Elf64_Sym::st_shndx
-	ldr	x15, [x12, #8]			// Elf64_Sym::st_value
-	cmp	w14, #-0xf			// SHN_ABS (0xfff1) ?
-	add	x14, x15, x23			// relocate
-	csel	x15, x14, x15, ne
-	add	x15, x13, x15
-	str	x15, [x11, x23]
-	b	0b
-
-2:	adr_l	x8, kimage_vaddr		// make relocated kimage_vaddr
-	dc	cvac, x8			// value visible to secondaries
-	dsb	sy				// with MMU off
-#endif
-
 	adr_l	sp, initial_sp, x4
 	mov	x4, sp
 	and	x4, x4, #~(THREAD_SIZE - 1)
@@ -490,17 +447,19 @@
 	bl	kasan_early_init
 #endif
 #ifdef CONFIG_RANDOMIZE_BASE
-	cbnz	x23, 0f				// already running randomized?
+	tst	x23, ~(MIN_KIMG_ALIGN - 1)	// already running randomized?
+	b.ne	0f
 	mov	x0, x21				// pass FDT address in x0
+	mov	x1, x23				// pass modulo offset in x1
 	bl	kaslr_early_init		// parse FDT for KASLR options
 	cbz	x0, 0f				// KASLR disabled? just proceed
-	mov	x23, x0				// record KASLR offset
+	orr	x23, x23, x0			// record KASLR offset
 	ret	x28				// we must enable KASLR, return
 						// to __enable_mmu()
 0:
 #endif
 	b	start_kernel
-ENDPROC(__mmap_switched)
+ENDPROC(__primary_switched)
 
 /*
  * end early head section, begin head code that is also used for
@@ -588,6 +547,15 @@
 	msr	vpidr_el2, x0
 	msr	vmpidr_el2, x1
 
+	/*
+	 * When VHE is not in use, early init of EL2 and EL1 needs to be
+	 * done here.
+	 * When VHE _is_ in use, EL1 will not be used in the host and
+	 * requires no configuration, and all non-hyp-specific EL2 setup
+	 * will be done via the _EL1 system register aliases in __cpu_setup.
+	 */
+	cbnz	x2, 1f
+
 	/* sctlr_el1 */
 	mov	x0, #0x0800			// Set/clear RES{1,0} bits
 CPU_BE(	movk	x0, #0x33d0, lsl #16	)	// Set EE and E0E on BE systems
@@ -597,6 +565,7 @@
 	/* Coprocessor traps. */
 	mov	x0, #0x33ff
 	msr	cptr_el2, x0			// Disable copro. traps to EL2
+1:
 
 #ifdef CONFIG_COMPAT
 	msr	hstr_el2, xzr			// Disable CP15 traps to EL2
@@ -640,7 +609,7 @@
  * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
  * in x20. See arch/arm64/include/asm/virt.h for more info.
  */
-ENTRY(set_cpu_boot_mode_flag)
+set_cpu_boot_mode_flag:
 	adr_l	x1, __boot_cpu_mode
 	cmp	w20, #BOOT_CPU_MODE_EL2
 	b.ne	1f
@@ -673,7 +642,7 @@
 	bl	el2_setup			// Drop to EL1, w20=cpu_boot_mode
 	bl	set_cpu_boot_mode_flag
 	mrs	x0, mpidr_el1
-	ldr     x1, =MPIDR_HWID_BITMASK
+	mov_q	x1, MPIDR_HWID_BITMASK
 	and	x0, x0, x1
 	adr_l	x3, secondary_holding_pen_release
 pen:	ldr	x4, [x3]
@@ -693,7 +662,7 @@
 	b	secondary_startup
 ENDPROC(secondary_entry)
 
-ENTRY(secondary_startup)
+secondary_startup:
 	/*
 	 * Common entry point for secondary CPUs.
 	 */
@@ -701,14 +670,11 @@
 	adrp	x26, swapper_pg_dir
 	bl	__cpu_setup			// initialise processor
 
-	ldr	x8, kimage_vaddr
-	ldr	w9, 0f
-	sub	x27, x8, w9, sxtw		// address to jump to after enabling the MMU
+	adr_l	x27, __secondary_switch		// address to jump to after enabling the MMU
 	b	__enable_mmu
 ENDPROC(secondary_startup)
-0:	.long	(_text - TEXT_OFFSET) - __secondary_switched
 
-ENTRY(__secondary_switched)
+__secondary_switched:
 	adr_l	x5, vectors
 	msr	vbar_el1, x5
 	isb
@@ -734,7 +700,8 @@
 
 	.macro	update_early_cpu_boot_status status, tmp1, tmp2
 	mov	\tmp2, #\status
-	str_l	\tmp2, __early_cpu_boot_status, \tmp1
+	adr_l	\tmp1, __early_cpu_boot_status
+	str	\tmp2, [\tmp1]
 	dmb	sy
 	dc	ivac, \tmp1			// Invalidate potentially stale cache line
 	.endm
@@ -757,7 +724,7 @@
  * If it isn't, park the CPU
  */
 	.section	".idmap.text", "ax"
-__enable_mmu:
+ENTRY(__enable_mmu)
 	mrs	x22, sctlr_el1			// preserve old SCTLR_EL1 value
 	mrs	x1, ID_AA64MMFR0_EL1
 	ubfx	x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
@@ -795,7 +762,6 @@
 	ic	iallu				// flush instructions fetched
 	dsb	nsh				// via old mapping
 	isb
-	add	x27, x27, x23			// relocated __mmap_switched
 #endif
 	br	x27
 ENDPROC(__enable_mmu)
@@ -808,3 +774,53 @@
 	wfi
 	b 1b
 ENDPROC(__no_granule_support)
+
+__primary_switch:
+#ifdef CONFIG_RELOCATABLE
+	/*
+	 * Iterate over each entry in the relocation table, and apply the
+	 * relocations in place.
+	 */
+	ldr	w8, =__dynsym_offset		// offset to symbol table
+	ldr	w9, =__rela_offset		// offset to reloc table
+	ldr	w10, =__rela_size		// size of reloc table
+
+	mov_q	x11, KIMAGE_VADDR		// default virtual offset
+	add	x11, x11, x23			// actual virtual offset
+	add	x8, x8, x11			// __va(.dynsym)
+	add	x9, x9, x11			// __va(.rela)
+	add	x10, x9, x10			// __va(.rela) + sizeof(.rela)
+
+0:	cmp	x9, x10
+	b.hs	2f
+	ldp	x11, x12, [x9], #24
+	ldr	x13, [x9, #-8]
+	cmp	w12, #R_AARCH64_RELATIVE
+	b.ne	1f
+	add	x13, x13, x23			// relocate
+	str	x13, [x11, x23]
+	b	0b
+
+1:	cmp	w12, #R_AARCH64_ABS64
+	b.ne	0b
+	add	x12, x12, x12, lsl #1		// symtab offset: 24x top word
+	add	x12, x8, x12, lsr #(32 - 3)	// ... shifted into bottom word
+	ldrsh	w14, [x12, #6]			// Elf64_Sym::st_shndx
+	ldr	x15, [x12, #8]			// Elf64_Sym::st_value
+	cmp	w14, #-0xf			// SHN_ABS (0xfff1) ?
+	add	x14, x15, x23			// relocate
+	csel	x15, x14, x15, ne
+	add	x15, x13, x15
+	str	x15, [x11, x23]
+	b	0b
+
+2:
+#endif
+	ldr	x8, =__primary_switched
+	br	x8
+ENDPROC(__primary_switch)
+
+__secondary_switch:
+	ldr	x8, =__secondary_switched
+	br	x8
+ENDPROC(__secondary_switch)
diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S
new file mode 100644
index 0000000..46f29b6
--- /dev/null
+++ b/arch/arm64/kernel/hibernate-asm.S
@@ -0,0 +1,176 @@
+/*
+ * Hibernate low-level support
+ *
+ * Copyright (C) 2016 ARM Ltd.
+ * Author:	James Morse <james.morse@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/linkage.h>
+#include <linux/errno.h>
+
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
+#include <asm/cputype.h>
+#include <asm/memory.h>
+#include <asm/page.h>
+#include <asm/virt.h>
+
+/*
+ * To prevent the possibility of old and new partial table walks being visible
+ * in the tlb, switch the ttbr to a zero page when we invalidate the old
+ * records. D4.7.1 'General TLB maintenance requirements' in ARM DDI 0487A.i
+ * Even switching to our copied tables will cause a changed output address at
+ * each stage of the walk.
+ */
+.macro break_before_make_ttbr_switch zero_page, page_table
+	msr	ttbr1_el1, \zero_page
+	isb
+	tlbi	vmalle1is
+	dsb	ish
+	msr	ttbr1_el1, \page_table
+	isb
+.endm
+
+
+/*
+ * Resume from hibernate
+ *
+ * Loads temporary page tables then restores the memory image.
+ * Finally branches to cpu_resume() to restore the state saved by
+ * swsusp_arch_suspend().
+ *
+ * Because this code has to be copied to a 'safe' page, it can't call out to
+ * other functions by PC-relative address. Also remember that it may be
+ * mid-way through over-writing other functions. For this reason it contains
+ * code from flush_icache_range() and uses the copy_page() macro.
+ *
+ * This 'safe' page is mapped via ttbr0, and executed from there. This function
+ * switches to a copy of the linear map in ttbr1, performs the restore, then
+ * switches ttbr1 to the original kernel's swapper_pg_dir.
+ *
+ * All of memory gets written to, including code. We need to clean the kernel
+ * text to the Point of Coherence (PoC) before secondary cores can be booted.
+ * Because the kernel modules and executable pages mapped to user space are
+ * also written as data, we clean all pages we touch to the Point of
+ * Unification (PoU).
+ *
+ * x0: physical address of temporary page tables
+ * x1: physical address of swapper page tables
+ * x2: address of cpu_resume
+ * x3: linear map address of restore_pblist in the current kernel
+ * x4: physical address of __hyp_stub_vectors, or 0
+ * x5: physical address of a  zero page that remains zero after resume
+ */
+.pushsection    ".hibernate_exit.text", "ax"
+ENTRY(swsusp_arch_suspend_exit)
+	/*
+	 * We execute from ttbr0, change ttbr1 to our copied linear map tables
+	 * with a break-before-make via the zero page
+	 */
+	break_before_make_ttbr_switch	x5, x0
+
+	mov	x21, x1
+	mov	x30, x2
+	mov	x24, x4
+	mov	x25, x5
+
+	/* walk the restore_pblist and use copy_page() to over-write memory */
+	mov	x19, x3
+
+1:	ldr	x10, [x19, #HIBERN_PBE_ORIG]
+	mov	x0, x10
+	ldr	x1, [x19, #HIBERN_PBE_ADDR]
+
+	copy_page	x0, x1, x2, x3, x4, x5, x6, x7, x8, x9
+
+	add	x1, x10, #PAGE_SIZE
+	/* Clean the copied page to PoU - based on flush_icache_range() */
+	dcache_line_size x2, x3
+	sub	x3, x2, #1
+	bic	x4, x10, x3
+2:	dc	cvau, x4	/* clean D line / unified line */
+	add	x4, x4, x2
+	cmp	x4, x1
+	b.lo	2b
+
+	ldr	x19, [x19, #HIBERN_PBE_NEXT]
+	cbnz	x19, 1b
+	dsb	ish		/* wait for PoU cleaning to finish */
+
+	/* switch to the restored kernels page tables */
+	break_before_make_ttbr_switch	x25, x21
+
+	ic	ialluis
+	dsb	ish
+	isb
+
+	cbz	x24, 3f		/* Do we need to re-initialise EL2? */
+	hvc	#0
+3:	ret
+
+	.ltorg
+ENDPROC(swsusp_arch_suspend_exit)
+
+/*
+ * Restore the hyp stub.
+ * This must be done before the hibernate page is unmapped by _cpu_resume(),
+ * but happens before any of the hyp-stub's code is cleaned to PoC.
+ *
+ * x24: The physical address of __hyp_stub_vectors
+ */
+el1_sync:
+	msr	vbar_el2, x24
+	eret
+ENDPROC(el1_sync)
+
+.macro invalid_vector	label
+\label:
+	b \label
+ENDPROC(\label)
+.endm
+
+	invalid_vector	el2_sync_invalid
+	invalid_vector	el2_irq_invalid
+	invalid_vector	el2_fiq_invalid
+	invalid_vector	el2_error_invalid
+	invalid_vector	el1_sync_invalid
+	invalid_vector	el1_irq_invalid
+	invalid_vector	el1_fiq_invalid
+	invalid_vector	el1_error_invalid
+
+/* el2 vectors - switch el2 here while we restore the memory image. */
+	.align 11
+ENTRY(hibernate_el2_vectors)
+	ventry	el2_sync_invalid		// Synchronous EL2t
+	ventry	el2_irq_invalid			// IRQ EL2t
+	ventry	el2_fiq_invalid			// FIQ EL2t
+	ventry	el2_error_invalid		// Error EL2t
+
+	ventry	el2_sync_invalid		// Synchronous EL2h
+	ventry	el2_irq_invalid			// IRQ EL2h
+	ventry	el2_fiq_invalid			// FIQ EL2h
+	ventry	el2_error_invalid		// Error EL2h
+
+	ventry	el1_sync			// Synchronous 64-bit EL1
+	ventry	el1_irq_invalid			// IRQ 64-bit EL1
+	ventry	el1_fiq_invalid			// FIQ 64-bit EL1
+	ventry	el1_error_invalid		// Error 64-bit EL1
+
+	ventry	el1_sync_invalid		// Synchronous 32-bit EL1
+	ventry	el1_irq_invalid			// IRQ 32-bit EL1
+	ventry	el1_fiq_invalid			// FIQ 32-bit EL1
+	ventry	el1_error_invalid		// Error 32-bit EL1
+END(hibernate_el2_vectors)
+
+.popsection
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
new file mode 100644
index 0000000..f8df75d
--- /dev/null
+++ b/arch/arm64/kernel/hibernate.c
@@ -0,0 +1,487 @@
+/*:
+ * Hibernate support specific for ARM64
+ *
+ * Derived from work on ARM hibernation support by:
+ *
+ * Ubuntu project, hibernation support for mach-dove
+ * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
+ * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
+ *  https://lkml.org/lkml/2010/6/18/4
+ *  https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
+ *  https://patchwork.kernel.org/patch/96442/
+ *
+ * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#define pr_fmt(x) "hibernate: " x
+#include <linux/kvm_host.h>
+#include <linux/mm.h>
+#include <linux/notifier.h>
+#include <linux/pm.h>
+#include <linux/sched.h>
+#include <linux/suspend.h>
+#include <linux/utsname.h>
+#include <linux/version.h>
+
+#include <asm/barrier.h>
+#include <asm/cacheflush.h>
+#include <asm/irqflags.h>
+#include <asm/memory.h>
+#include <asm/mmu_context.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/pgtable-hwdef.h>
+#include <asm/sections.h>
+#include <asm/suspend.h>
+#include <asm/virt.h>
+
+/*
+ * Hibernate core relies on this value being 0 on resume, and marks it
+ * __nosavedata assuming it will keep the resume kernel's '0' value. This
+ * doesn't happen with either KASLR.
+ *
+ * defined as "__visible int in_suspend __nosavedata" in
+ * kernel/power/hibernate.c
+ */
+extern int in_suspend;
+
+/* Find a symbols alias in the linear map */
+#define LMADDR(x)	phys_to_virt(virt_to_phys(x))
+
+/* Do we need to reset el2? */
+#define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode())
+
+/*
+ * Start/end of the hibernate exit code, this must be copied to a 'safe'
+ * location in memory, and executed from there.
+ */
+extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[];
+
+/* temporary el2 vectors in the __hibernate_exit_text section. */
+extern char hibernate_el2_vectors[];
+
+/* hyp-stub vectors, used to restore el2 during resume from hibernate. */
+extern char __hyp_stub_vectors[];
+
+/*
+ * Values that may not change over hibernate/resume. We put the build number
+ * and date in here so that we guarantee not to resume with a different
+ * kernel.
+ */
+struct arch_hibernate_hdr_invariants {
+	char		uts_version[__NEW_UTS_LEN + 1];
+};
+
+/* These values need to be know across a hibernate/restore. */
+static struct arch_hibernate_hdr {
+	struct arch_hibernate_hdr_invariants invariants;
+
+	/* These are needed to find the relocated kernel if built with kaslr */
+	phys_addr_t	ttbr1_el1;
+	void		(*reenter_kernel)(void);
+
+	/*
+	 * We need to know where the __hyp_stub_vectors are after restore to
+	 * re-configure el2.
+	 */
+	phys_addr_t	__hyp_stub_vectors;
+} resume_hdr;
+
+static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
+{
+	memset(i, 0, sizeof(*i));
+	memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version));
+}
+
+int pfn_is_nosave(unsigned long pfn)
+{
+	unsigned long nosave_begin_pfn = virt_to_pfn(&__nosave_begin);
+	unsigned long nosave_end_pfn = virt_to_pfn(&__nosave_end - 1);
+
+	return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn);
+}
+
+void notrace save_processor_state(void)
+{
+	WARN_ON(num_online_cpus() != 1);
+}
+
+void notrace restore_processor_state(void)
+{
+}
+
+int arch_hibernation_header_save(void *addr, unsigned int max_size)
+{
+	struct arch_hibernate_hdr *hdr = addr;
+
+	if (max_size < sizeof(*hdr))
+		return -EOVERFLOW;
+
+	arch_hdr_invariants(&hdr->invariants);
+	hdr->ttbr1_el1		= virt_to_phys(swapper_pg_dir);
+	hdr->reenter_kernel	= _cpu_resume;
+
+	/* We can't use __hyp_get_vectors() because kvm may still be loaded */
+	if (el2_reset_needed())
+		hdr->__hyp_stub_vectors = virt_to_phys(__hyp_stub_vectors);
+	else
+		hdr->__hyp_stub_vectors = 0;
+
+	return 0;
+}
+EXPORT_SYMBOL(arch_hibernation_header_save);
+
+int arch_hibernation_header_restore(void *addr)
+{
+	struct arch_hibernate_hdr_invariants invariants;
+	struct arch_hibernate_hdr *hdr = addr;
+
+	arch_hdr_invariants(&invariants);
+	if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) {
+		pr_crit("Hibernate image not generated by this kernel!\n");
+		return -EINVAL;
+	}
+
+	resume_hdr = *hdr;
+
+	return 0;
+}
+EXPORT_SYMBOL(arch_hibernation_header_restore);
+
+/*
+ * Copies length bytes, starting at src_start into an new page,
+ * perform cache maintentance, then maps it at the specified address low
+ * address as executable.
+ *
+ * This is used by hibernate to copy the code it needs to execute when
+ * overwriting the kernel text. This function generates a new set of page
+ * tables, which it loads into ttbr0.
+ *
+ * Length is provided as we probably only want 4K of data, even on a 64K
+ * page system.
+ */
+static int create_safe_exec_page(void *src_start, size_t length,
+				 unsigned long dst_addr,
+				 phys_addr_t *phys_dst_addr,
+				 void *(*allocator)(gfp_t mask),
+				 gfp_t mask)
+{
+	int rc = 0;
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+	pte_t *pte;
+	unsigned long dst = (unsigned long)allocator(mask);
+
+	if (!dst) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	memcpy((void *)dst, src_start, length);
+	flush_icache_range(dst, dst + length);
+
+	pgd = pgd_offset_raw(allocator(mask), dst_addr);
+	if (pgd_none(*pgd)) {
+		pud = allocator(mask);
+		if (!pud) {
+			rc = -ENOMEM;
+			goto out;
+		}
+		pgd_populate(&init_mm, pgd, pud);
+	}
+
+	pud = pud_offset(pgd, dst_addr);
+	if (pud_none(*pud)) {
+		pmd = allocator(mask);
+		if (!pmd) {
+			rc = -ENOMEM;
+			goto out;
+		}
+		pud_populate(&init_mm, pud, pmd);
+	}
+
+	pmd = pmd_offset(pud, dst_addr);
+	if (pmd_none(*pmd)) {
+		pte = allocator(mask);
+		if (!pte) {
+			rc = -ENOMEM;
+			goto out;
+		}
+		pmd_populate_kernel(&init_mm, pmd, pte);
+	}
+
+	pte = pte_offset_kernel(pmd, dst_addr);
+	set_pte(pte, __pte(virt_to_phys((void *)dst) |
+			 pgprot_val(PAGE_KERNEL_EXEC)));
+
+	/* Load our new page tables */
+	asm volatile("msr	ttbr0_el1, %0;"
+		     "isb;"
+		     "tlbi	vmalle1is;"
+		     "dsb	ish;"
+		     "isb" : : "r"(virt_to_phys(pgd)));
+
+	*phys_dst_addr = virt_to_phys((void *)dst);
+
+out:
+	return rc;
+}
+
+
+int swsusp_arch_suspend(void)
+{
+	int ret = 0;
+	unsigned long flags;
+	struct sleep_stack_data state;
+
+	local_dbg_save(flags);
+
+	if (__cpu_suspend_enter(&state)) {
+		ret = swsusp_save();
+	} else {
+		/* Clean kernel to PoC for secondary core startup */
+		__flush_dcache_area(LMADDR(KERNEL_START), KERNEL_END - KERNEL_START);
+
+		/*
+		 * Tell the hibernation core that we've just restored
+		 * the memory
+		 */
+		in_suspend = 0;
+
+		__cpu_suspend_exit();
+	}
+
+	local_dbg_restore(flags);
+
+	return ret;
+}
+
+static int copy_pte(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long start,
+		    unsigned long end)
+{
+	pte_t *src_pte;
+	pte_t *dst_pte;
+	unsigned long addr = start;
+
+	dst_pte = (pte_t *)get_safe_page(GFP_ATOMIC);
+	if (!dst_pte)
+		return -ENOMEM;
+	pmd_populate_kernel(&init_mm, dst_pmd, dst_pte);
+	dst_pte = pte_offset_kernel(dst_pmd, start);
+
+	src_pte = pte_offset_kernel(src_pmd, start);
+	do {
+		if (!pte_none(*src_pte))
+			/*
+			 * Resume will overwrite areas that may be marked
+			 * read only (code, rodata). Clear the RDONLY bit from
+			 * the temporary mappings we use during restore.
+			 */
+			set_pte(dst_pte, __pte(pte_val(*src_pte) & ~PTE_RDONLY));
+	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
+
+	return 0;
+}
+
+static int copy_pmd(pud_t *dst_pud, pud_t *src_pud, unsigned long start,
+		    unsigned long end)
+{
+	pmd_t *src_pmd;
+	pmd_t *dst_pmd;
+	unsigned long next;
+	unsigned long addr = start;
+
+	if (pud_none(*dst_pud)) {
+		dst_pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
+		if (!dst_pmd)
+			return -ENOMEM;
+		pud_populate(&init_mm, dst_pud, dst_pmd);
+	}
+	dst_pmd = pmd_offset(dst_pud, start);
+
+	src_pmd = pmd_offset(src_pud, start);
+	do {
+		next = pmd_addr_end(addr, end);
+		if (pmd_none(*src_pmd))
+			continue;
+		if (pmd_table(*src_pmd)) {
+			if (copy_pte(dst_pmd, src_pmd, addr, next))
+				return -ENOMEM;
+		} else {
+			set_pmd(dst_pmd,
+				__pmd(pmd_val(*src_pmd) & ~PMD_SECT_RDONLY));
+		}
+	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
+
+	return 0;
+}
+
+static int copy_pud(pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long start,
+		    unsigned long end)
+{
+	pud_t *dst_pud;
+	pud_t *src_pud;
+	unsigned long next;
+	unsigned long addr = start;
+
+	if (pgd_none(*dst_pgd)) {
+		dst_pud = (pud_t *)get_safe_page(GFP_ATOMIC);
+		if (!dst_pud)
+			return -ENOMEM;
+		pgd_populate(&init_mm, dst_pgd, dst_pud);
+	}
+	dst_pud = pud_offset(dst_pgd, start);
+
+	src_pud = pud_offset(src_pgd, start);
+	do {
+		next = pud_addr_end(addr, end);
+		if (pud_none(*src_pud))
+			continue;
+		if (pud_table(*(src_pud))) {
+			if (copy_pmd(dst_pud, src_pud, addr, next))
+				return -ENOMEM;
+		} else {
+			set_pud(dst_pud,
+				__pud(pud_val(*src_pud) & ~PMD_SECT_RDONLY));
+		}
+	} while (dst_pud++, src_pud++, addr = next, addr != end);
+
+	return 0;
+}
+
+static int copy_page_tables(pgd_t *dst_pgd, unsigned long start,
+			    unsigned long end)
+{
+	unsigned long next;
+	unsigned long addr = start;
+	pgd_t *src_pgd = pgd_offset_k(start);
+
+	dst_pgd = pgd_offset_raw(dst_pgd, start);
+	do {
+		next = pgd_addr_end(addr, end);
+		if (pgd_none(*src_pgd))
+			continue;
+		if (copy_pud(dst_pgd, src_pgd, addr, next))
+			return -ENOMEM;
+	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
+
+	return 0;
+}
+
+/*
+ * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
+ *
+ * Memory allocated by get_safe_page() will be dealt with by the hibernate code,
+ * we don't need to free it here.
+ */
+int swsusp_arch_resume(void)
+{
+	int rc = 0;
+	void *zero_page;
+	size_t exit_size;
+	pgd_t *tmp_pg_dir;
+	void *lm_restore_pblist;
+	phys_addr_t phys_hibernate_exit;
+	void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
+					  void *, phys_addr_t, phys_addr_t);
+
+	/*
+	 * Locate the exit code in the bottom-but-one page, so that *NULL
+	 * still has disastrous affects.
+	 */
+	hibernate_exit = (void *)PAGE_SIZE;
+	exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start;
+	/*
+	 * Copy swsusp_arch_suspend_exit() to a safe page. This will generate
+	 * a new set of ttbr0 page tables and load them.
+	 */
+	rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
+				   (unsigned long)hibernate_exit,
+				   &phys_hibernate_exit,
+				   (void *)get_safe_page, GFP_ATOMIC);
+	if (rc) {
+		pr_err("Failed to create safe executable page for hibernate_exit code.");
+		goto out;
+	}
+
+	/*
+	 * The hibernate exit text contains a set of el2 vectors, that will
+	 * be executed at el2 with the mmu off in order to reload hyp-stub.
+	 */
+	__flush_dcache_area(hibernate_exit, exit_size);
+
+	/*
+	 * Restoring the memory image will overwrite the ttbr1 page tables.
+	 * Create a second copy of just the linear map, and use this when
+	 * restoring.
+	 */
+	tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
+	if (!tmp_pg_dir) {
+		pr_err("Failed to allocate memory for temporary page tables.");
+		rc = -ENOMEM;
+		goto out;
+	}
+	rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
+	if (rc)
+		goto out;
+
+	/*
+	 * Since we only copied the linear map, we need to find restore_pblist's
+	 * linear map address.
+	 */
+	lm_restore_pblist = LMADDR(restore_pblist);
+
+	/*
+	 * KASLR will cause the el2 vectors to be in a different location in
+	 * the resumed kernel. Load hibernate's temporary copy into el2.
+	 *
+	 * We can skip this step if we booted at EL1, or are running with VHE.
+	 */
+	if (el2_reset_needed()) {
+		phys_addr_t el2_vectors = phys_hibernate_exit;  /* base */
+		el2_vectors += hibernate_el2_vectors -
+			       __hibernate_exit_text_start;     /* offset */
+
+		__hyp_set_vectors(el2_vectors);
+	}
+
+	/*
+	 * We need a zero page that is zero before & after resume in order to
+	 * to break before make on the ttbr1 page tables.
+	 */
+	zero_page = (void *)get_safe_page(GFP_ATOMIC);
+
+	hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
+		       resume_hdr.reenter_kernel, lm_restore_pblist,
+		       resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
+
+out:
+	return rc;
+}
+
+static int check_boot_cpu_online_pm_callback(struct notifier_block *nb,
+					     unsigned long action, void *ptr)
+{
+	if (action == PM_HIBERNATION_PREPARE &&
+	     cpumask_first(cpu_online_mask) != 0) {
+		pr_warn("CPU0 is offline.\n");
+		return notifier_from_errno(-ENODEV);
+	}
+
+	return NOTIFY_OK;
+}
+
+static int __init check_boot_cpu_online_init(void)
+{
+	/*
+	 * Set this pm_notifier callback with a lower priority than
+	 * cpu_hotplug_pm_callback, so that cpu_hotplug_pm_callback will be
+	 * called earlier to disable cpu hotplug before the cpu online check.
+	 */
+	pm_notifier(check_boot_cpu_online_pm_callback, -INT_MAX);
+
+	return 0;
+}
+core_initcall(check_boot_cpu_online_init);
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index b45c95d..ce21aa8 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -616,7 +616,7 @@
 		perf_bp_event(bp, regs);
 
 		/* Do we need to handle the stepping? */
-		if (!bp->overflow_handler)
+		if (is_default_overflow_handler(bp))
 			step = 1;
 unlock:
 		rcu_read_unlock();
@@ -712,7 +712,7 @@
 		perf_bp_event(wp, regs);
 
 		/* Do we need to handle the stepping? */
-		if (!wp->overflow_handler)
+		if (is_default_overflow_handler(wp))
 			step = 1;
 
 unlock:
@@ -886,9 +886,11 @@
 						unsigned long action,
 						void *hcpu)
 {
-	int cpu = (long)hcpu;
-	if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
-		smp_call_function_single(cpu, hw_breakpoint_reset, NULL, 1);
+	if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE) {
+		local_irq_disable();
+		hw_breakpoint_reset(NULL);
+		local_irq_enable();
+	}
 	return NOTIFY_OK;
 }
 
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index a272f33..8727f44 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -22,6 +22,8 @@
 #include <linux/irqchip/arm-gic-v3.h>
 
 #include <asm/assembler.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
 #include <asm/ptrace.h>
 #include <asm/virt.h>
 
@@ -53,15 +55,26 @@
 	.align 11
 
 el1_sync:
-	mrs	x1, esr_el2
-	lsr	x1, x1, #26
-	cmp	x1, #0x16
-	b.ne	2f				// Not an HVC trap
-	cbz	x0, 1f
-	msr	vbar_el2, x0			// Set vbar_el2
-	b	2f
-1:	mrs	x0, vbar_el2			// Return vbar_el2
-2:	eret
+	mrs	x30, esr_el2
+	lsr	x30, x30, #ESR_ELx_EC_SHIFT
+
+	cmp	x30, #ESR_ELx_EC_HVC64
+	b.ne	9f				// Not an HVC trap
+
+	cmp	x0, #HVC_GET_VECTORS
+	b.ne	1f
+	mrs	x0, vbar_el2
+	b	9f
+
+1:	cmp	x0, #HVC_SET_VECTORS
+	b.ne	2f
+	msr	vbar_el2, x1
+	b	9f
+
+	/* Someone called kvm_call_hyp() against the hyp-stub... */
+2:	mov     x0, #ARM_EXCEPTION_HYP_GONE
+
+9:	eret
 ENDPROC(el1_sync)
 
 .macro invalid_vector	label
@@ -101,10 +114,18 @@
  */
 
 ENTRY(__hyp_get_vectors)
-	mov	x0, xzr
-	// fall through
-ENTRY(__hyp_set_vectors)
+	str	lr, [sp, #-16]!
+	mov	x0, #HVC_GET_VECTORS
 	hvc	#0
+	ldr	lr, [sp], #16
 	ret
 ENDPROC(__hyp_get_vectors)
+
+ENTRY(__hyp_set_vectors)
+	str	lr, [sp, #-16]!
+	mov	x1, x0
+	mov	x0, #HVC_SET_VECTORS
+	hvc	#0
+	ldr	lr, [sp], #16
+	ret
 ENDPROC(__hyp_set_vectors)
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
index 5e360ce..c7fcb23 100644
--- a/arch/arm64/kernel/image.h
+++ b/arch/arm64/kernel/image.h
@@ -73,6 +73,8 @@
 
 #ifdef CONFIG_EFI
 
+__efistub_stext_offset = stext - _text;
+
 /*
  * Prevent the symbol aliases below from being emitted into the kallsyms
  * table, by forcing them to be absolute symbols (which are conveniently
@@ -112,6 +114,7 @@
 __efistub__text			= KALLSYMS_HIDE(_text);
 __efistub__end			= KALLSYMS_HIDE(_end);
 __efistub__edata		= KALLSYMS_HIDE(_edata);
+__efistub_screen_info		= KALLSYMS_HIDE(screen_info);
 
 #endif
 
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 7371455..368c082 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -96,7 +96,7 @@
 	if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
 		page = vmalloc_to_page(addr);
 	else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA))
-		page = virt_to_page(addr);
+		page = pfn_to_page(PHYS_PFN(__pa(addr)));
 	else
 		return addr;
 
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 5829839..b054691 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -74,7 +74,7 @@
  * containing function pointers) to be reinitialized, and zero-initialized
  * .bss variables will be reset to 0.
  */
-u64 __init kaslr_early_init(u64 dt_phys)
+u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
 {
 	void *fdt;
 	u64 seed, offset, mask, module_range;
@@ -132,8 +132,8 @@
 	 * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
 	 * happens, increase the KASLR offset by the size of the kernel image.
 	 */
-	if ((((u64)_text + offset) >> SWAPPER_TABLE_SHIFT) !=
-	    (((u64)_end + offset) >> SWAPPER_TABLE_SHIFT))
+	if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) !=
+	    (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT))
 		offset = (offset + (u64)(_end - _text)) & mask;
 
 	if (IS_ENABLED(CONFIG_KASAN))
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index c72de66..3c4e308 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -74,6 +74,16 @@
 	return -ENXIO;
 }
 
+#ifdef CONFIG_NUMA
+
+int pcibus_to_node(struct pci_bus *bus)
+{
+	return dev_to_node(&bus->dev);
+}
+EXPORT_SYMBOL(pcibus_to_node);
+
+#endif
+
 #ifdef CONFIG_ACPI
 /* Root bridge scanning */
 struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
index ff46654..32c3c6e 100644
--- a/arch/arm64/kernel/perf_callchain.c
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -122,7 +122,7 @@
 
 		tail = (struct frame_tail __user *)regs->regs[29];
 
-		while (entry->nr < PERF_MAX_STACK_DEPTH &&
+		while (entry->nr < sysctl_perf_event_max_stack &&
 		       tail && !((unsigned long)tail & 0xf))
 			tail = user_backtrace(tail, entry);
 	} else {
@@ -132,7 +132,7 @@
 
 		tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
 
-		while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
+		while ((entry->nr < sysctl_perf_event_max_stack) &&
 			tail && !((unsigned long)tail & 0x3))
 			tail = compat_user_backtrace(tail, entry);
 #endif
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 767c4f6..838ccf1 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -20,6 +20,8 @@
  */
 
 #include <asm/irq_regs.h>
+#include <asm/perf_event.h>
+#include <asm/sysreg.h>
 #include <asm/virt.h>
 
 #include <linux/of.h>
@@ -32,43 +34,43 @@
  */
 
 /* Required events. */
-#define ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR			0x00
-#define ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL			0x03
-#define ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS			0x04
-#define ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED			0x10
-#define ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES			0x11
-#define ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED			0x12
+#define ARMV8_PMUV3_PERFCTR_SW_INCR				0x00
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL			0x03
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE				0x04
+#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED				0x10
+#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES				0x11
+#define ARMV8_PMUV3_PERFCTR_BR_PRED				0x12
 
 /* At least one of the following is required. */
-#define ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED			0x08
-#define ARMV8_PMUV3_PERFCTR_OP_SPEC				0x1B
+#define ARMV8_PMUV3_PERFCTR_INST_RETIRED			0x08
+#define ARMV8_PMUV3_PERFCTR_INST_SPEC				0x1B
 
 /* Common architectural events. */
-#define ARMV8_PMUV3_PERFCTR_MEM_READ				0x06
-#define ARMV8_PMUV3_PERFCTR_MEM_WRITE				0x07
+#define ARMV8_PMUV3_PERFCTR_LD_RETIRED				0x06
+#define ARMV8_PMUV3_PERFCTR_ST_RETIRED				0x07
 #define ARMV8_PMUV3_PERFCTR_EXC_TAKEN				0x09
-#define ARMV8_PMUV3_PERFCTR_EXC_EXECUTED			0x0A
-#define ARMV8_PMUV3_PERFCTR_CID_WRITE				0x0B
-#define ARMV8_PMUV3_PERFCTR_PC_WRITE				0x0C
-#define ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH			0x0D
-#define ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN			0x0E
-#define ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS		0x0F
-#define ARMV8_PMUV3_PERFCTR_TTBR_WRITE				0x1C
+#define ARMV8_PMUV3_PERFCTR_EXC_RETURN				0x0A
+#define ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED			0x0B
+#define ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED			0x0C
+#define ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED			0x0D
+#define ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED			0x0E
+#define ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED		0x0F
+#define ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED			0x1C
 #define ARMV8_PMUV3_PERFCTR_CHAIN				0x1E
 #define ARMV8_PMUV3_PERFCTR_BR_RETIRED				0x21
 
 /* Common microarchitectural events. */
-#define ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL			0x01
-#define ARMV8_PMUV3_PERFCTR_ITLB_REFILL				0x02
-#define ARMV8_PMUV3_PERFCTR_DTLB_REFILL				0x05
+#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL			0x01
+#define ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL			0x02
+#define ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL			0x05
 #define ARMV8_PMUV3_PERFCTR_MEM_ACCESS				0x13
-#define ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS			0x14
-#define ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB			0x15
-#define ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS			0x16
-#define ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL			0x17
-#define ARMV8_PMUV3_PERFCTR_L2_CACHE_WB				0x18
+#define ARMV8_PMUV3_PERFCTR_L1I_CACHE				0x14
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB			0x15
+#define ARMV8_PMUV3_PERFCTR_L2D_CACHE				0x16
+#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL			0x17
+#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB			0x18
 #define ARMV8_PMUV3_PERFCTR_BUS_ACCESS				0x19
-#define ARMV8_PMUV3_PERFCTR_MEM_ERROR				0x1A
+#define ARMV8_PMUV3_PERFCTR_MEMORY_ERROR			0x1A
 #define ARMV8_PMUV3_PERFCTR_BUS_CYCLES				0x1D
 #define ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE			0x1F
 #define ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE			0x20
@@ -84,89 +86,182 @@
 #define ARMV8_PMUV3_PERFCTR_L3D_CACHE				0x2B
 #define ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB			0x2C
 #define ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL			0x2D
-#define ARMV8_PMUV3_PERFCTR_L21_TLB_REFILL			0x2E
+#define ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL			0x2E
 #define ARMV8_PMUV3_PERFCTR_L2D_TLB				0x2F
-#define ARMV8_PMUV3_PERFCTR_L21_TLB				0x30
+#define ARMV8_PMUV3_PERFCTR_L2I_TLB				0x30
 
-/* ARMv8 implementation defined event types. */
-#define ARMV8_IMPDEF_PERFCTR_L1_DCACHE_ACCESS_LD		0x40
-#define ARMV8_IMPDEF_PERFCTR_L1_DCACHE_ACCESS_ST		0x41
-#define ARMV8_IMPDEF_PERFCTR_L1_DCACHE_REFILL_LD		0x42
-#define ARMV8_IMPDEF_PERFCTR_L1_DCACHE_REFILL_ST		0x43
-#define ARMV8_IMPDEF_PERFCTR_DTLB_REFILL_LD			0x4C
-#define ARMV8_IMPDEF_PERFCTR_DTLB_REFILL_ST			0x4D
-#define ARMV8_IMPDEF_PERFCTR_DTLB_ACCESS_LD			0x4E
-#define ARMV8_IMPDEF_PERFCTR_DTLB_ACCESS_ST			0x4F
+/* ARMv8 recommended implementation defined event types */
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD			0x40
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR			0x41
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD		0x42
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR		0x43
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_INNER		0x44
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_OUTER		0x45
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_VICTIM		0x46
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_CLEAN			0x47
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_INVAL			0x48
+
+#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD			0x4C
+#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR			0x4D
+#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD				0x4E
+#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR				0x4F
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_RD			0x50
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WR			0x51
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_RD		0x52
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_WR		0x53
+
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_VICTIM		0x56
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_CLEAN			0x57
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_INVAL			0x58
+
+#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_RD			0x5C
+#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_WR			0x5D
+#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_RD				0x5E
+#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_WR				0x5F
+
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD			0x60
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR			0x61
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_SHARED			0x62
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NOT_SHARED		0x63
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NORMAL			0x64
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_PERIPH			0x65
+
+#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_RD			0x66
+#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_WR			0x67
+#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LD_SPEC			0x68
+#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_ST_SPEC			0x69
+#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LDST_SPEC		0x6A
+
+#define ARMV8_IMPDEF_PERFCTR_LDREX_SPEC				0x6C
+#define ARMV8_IMPDEF_PERFCTR_STREX_PASS_SPEC			0x6D
+#define ARMV8_IMPDEF_PERFCTR_STREX_FAIL_SPEC			0x6E
+#define ARMV8_IMPDEF_PERFCTR_STREX_SPEC				0x6F
+#define ARMV8_IMPDEF_PERFCTR_LD_SPEC				0x70
+#define ARMV8_IMPDEF_PERFCTR_ST_SPEC				0x71
+#define ARMV8_IMPDEF_PERFCTR_LDST_SPEC				0x72
+#define ARMV8_IMPDEF_PERFCTR_DP_SPEC				0x73
+#define ARMV8_IMPDEF_PERFCTR_ASE_SPEC				0x74
+#define ARMV8_IMPDEF_PERFCTR_VFP_SPEC				0x75
+#define ARMV8_IMPDEF_PERFCTR_PC_WRITE_SPEC			0x76
+#define ARMV8_IMPDEF_PERFCTR_CRYPTO_SPEC			0x77
+#define ARMV8_IMPDEF_PERFCTR_BR_IMMED_SPEC			0x78
+#define ARMV8_IMPDEF_PERFCTR_BR_RETURN_SPEC			0x79
+#define ARMV8_IMPDEF_PERFCTR_BR_INDIRECT_SPEC			0x7A
+
+#define ARMV8_IMPDEF_PERFCTR_ISB_SPEC				0x7C
+#define ARMV8_IMPDEF_PERFCTR_DSB_SPEC				0x7D
+#define ARMV8_IMPDEF_PERFCTR_DMB_SPEC				0x7E
+
+#define ARMV8_IMPDEF_PERFCTR_EXC_UNDEF				0x81
+#define ARMV8_IMPDEF_PERFCTR_EXC_SVC				0x82
+#define ARMV8_IMPDEF_PERFCTR_EXC_PABORT				0x83
+#define ARMV8_IMPDEF_PERFCTR_EXC_DABORT				0x84
+
+#define ARMV8_IMPDEF_PERFCTR_EXC_IRQ				0x86
+#define ARMV8_IMPDEF_PERFCTR_EXC_FIQ				0x87
+#define ARMV8_IMPDEF_PERFCTR_EXC_SMC				0x88
+
+#define ARMV8_IMPDEF_PERFCTR_EXC_HVC				0x8A
+#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_PABORT			0x8B
+#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_DABORT			0x8C
+#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_OTHER			0x8D
+#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_IRQ			0x8E
+#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_FIQ			0x8F
+#define ARMV8_IMPDEF_PERFCTR_RC_LD_SPEC				0x90
+#define ARMV8_IMPDEF_PERFCTR_RC_ST_SPEC				0x91
+
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_RD			0xA0
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WR			0xA1
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_RD		0xA2
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_WR		0xA3
+
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_VICTIM		0xA6
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_CLEAN			0xA7
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_INVAL			0xA8
 
 /* ARMv8 Cortex-A53 specific event types. */
-#define ARMV8_A53_PERFCTR_PREFETCH_LINEFILL			0xC2
+#define ARMV8_A53_PERFCTR_PREF_LINEFILL				0xC2
 
 /* ARMv8 Cavium ThunderX specific event types. */
-#define ARMV8_THUNDER_PERFCTR_L1_DCACHE_MISS_ST			0xE9
-#define ARMV8_THUNDER_PERFCTR_L1_DCACHE_PREF_ACCESS		0xEA
-#define ARMV8_THUNDER_PERFCTR_L1_DCACHE_PREF_MISS		0xEB
-#define ARMV8_THUNDER_PERFCTR_L1_ICACHE_PREF_ACCESS		0xEC
-#define ARMV8_THUNDER_PERFCTR_L1_ICACHE_PREF_MISS		0xED
+#define ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST			0xE9
+#define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS		0xEA
+#define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS		0xEB
+#define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS		0xEC
+#define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS		0xED
 
 /* PMUv3 HW events mapping. */
 static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
 	PERF_MAP_ALL_UNSUPPORTED,
-	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
-	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
-	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
-	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
-	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV8_PMUV3_PERFCTR_INST_RETIRED,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV8_PMUV3_PERFCTR_L1D_CACHE,
+	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
 };
 
 /* ARM Cortex-A53 HW events mapping. */
 static const unsigned armv8_a53_perf_map[PERF_COUNT_HW_MAX] = {
 	PERF_MAP_ALL_UNSUPPORTED,
-	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
-	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
-	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
-	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
-	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV8_PMUV3_PERFCTR_PC_WRITE,
-	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV8_PMUV3_PERFCTR_INST_RETIRED,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV8_PMUV3_PERFCTR_L1D_CACHE,
+	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
 	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
 };
 
 /* ARM Cortex-A57 and Cortex-A72 events mapping. */
 static const unsigned armv8_a57_perf_map[PERF_COUNT_HW_MAX] = {
 	PERF_MAP_ALL_UNSUPPORTED,
-	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
-	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
-	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
-	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
-	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV8_PMUV3_PERFCTR_INST_RETIRED,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV8_PMUV3_PERFCTR_L1D_CACHE,
+	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
 	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
 };
 
 static const unsigned armv8_thunder_perf_map[PERF_COUNT_HW_MAX] = {
 	PERF_MAP_ALL_UNSUPPORTED,
-	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
-	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
-	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
-	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
-	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV8_PMUV3_PERFCTR_PC_WRITE,
-	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV8_PMUV3_PERFCTR_INST_RETIRED,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV8_PMUV3_PERFCTR_L1D_CACHE,
+	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
 	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= ARMV8_PMUV3_PERFCTR_STALL_BACKEND,
 };
 
+/* Broadcom Vulcan events mapping */
+static const unsigned armv8_vulcan_perf_map[PERF_COUNT_HW_MAX] = {
+	PERF_MAP_ALL_UNSUPPORTED,
+	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
+	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV8_PMUV3_PERFCTR_INST_RETIRED,
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV8_PMUV3_PERFCTR_L1D_CACHE,
+	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV8_PMUV3_PERFCTR_BR_RETIRED,
+	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
+	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
+	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
+	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= ARMV8_PMUV3_PERFCTR_STALL_BACKEND,
+};
+
 static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 						[PERF_COUNT_HW_CACHE_OP_MAX]
 						[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 
-	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
-	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
-	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
-	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1D_CACHE,
+	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
+	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1D_CACHE,
+	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
 
-	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
-	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
-	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
-	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_BR_PRED,
+	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
+	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_BR_PRED,
+	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
 };
 
 static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -174,21 +269,21 @@
 					      [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 
-	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
-	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
-	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
-	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
-	[C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREFETCH_LINEFILL,
+	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1D_CACHE,
+	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
+	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1D_CACHE,
+	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
+	[C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREF_LINEFILL,
 
-	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS,
-	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL,
+	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1I_CACHE,
+	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
 
-	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_ITLB_REFILL,
+	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
 
-	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
-	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
-	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
-	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_BR_PRED,
+	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
+	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_BR_PRED,
+	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
 };
 
 static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -196,23 +291,23 @@
 					      [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 
-	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_IMPDEF_PERFCTR_L1_DCACHE_ACCESS_LD,
-	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_IMPDEF_PERFCTR_L1_DCACHE_REFILL_LD,
-	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_IMPDEF_PERFCTR_L1_DCACHE_ACCESS_ST,
-	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_IMPDEF_PERFCTR_L1_DCACHE_REFILL_ST,
+	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
+	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
+	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
+	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
 
-	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS,
-	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL,
+	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1I_CACHE,
+	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
 
-	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_IMPDEF_PERFCTR_DTLB_REFILL_LD,
-	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_IMPDEF_PERFCTR_DTLB_REFILL_ST,
+	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
+	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
 
-	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_ITLB_REFILL,
+	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
 
-	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
-	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
-	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
-	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_BR_PRED,
+	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
+	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_BR_PRED,
+	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
 };
 
 static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -220,67 +315,108 @@
 						   [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 
-	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_IMPDEF_PERFCTR_L1_DCACHE_ACCESS_LD,
-	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_IMPDEF_PERFCTR_L1_DCACHE_REFILL_LD,
-	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_IMPDEF_PERFCTR_L1_DCACHE_ACCESS_ST,
-	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_THUNDER_PERFCTR_L1_DCACHE_MISS_ST,
-	[C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1_DCACHE_PREF_ACCESS,
-	[C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1_DCACHE_PREF_MISS,
+	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
+	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
+	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
+	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST,
+	[C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS,
+	[C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS,
 
-	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS,
-	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL,
-	[C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1_ICACHE_PREF_ACCESS,
-	[C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1_ICACHE_PREF_MISS,
+	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1I_CACHE,
+	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
+	[C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS,
+	[C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS,
 
-	[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_IMPDEF_PERFCTR_DTLB_ACCESS_LD,
-	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_IMPDEF_PERFCTR_DTLB_REFILL_LD,
-	[C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_DTLB_ACCESS_ST,
-	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_IMPDEF_PERFCTR_DTLB_REFILL_ST,
+	[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
+	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
+	[C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
+	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
 
-	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_ITLB_REFILL,
+	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
 
-	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
-	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
-	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
-	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_BR_PRED,
+	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
+	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_BR_PRED,
+	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
 };
 
+static const unsigned armv8_vulcan_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+					      [PERF_COUNT_HW_CACHE_OP_MAX]
+					      [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+	PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
+	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
+	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
+	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
+
+	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1I_CACHE,
+	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
+
+	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
+	[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1I_TLB,
+
+	[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
+	[C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
+	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
+	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
+
+	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_BR_PRED,
+	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
+	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_BR_PRED,
+	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
+
+	[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
+	[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
+};
+
+static ssize_t
+armv8pmu_events_sysfs_show(struct device *dev,
+			   struct device_attribute *attr, char *page)
+{
+	struct perf_pmu_events_attr *pmu_attr;
+
+	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+
+	return sprintf(page, "event=0x%03llx\n", pmu_attr->id);
+}
+
 #define ARMV8_EVENT_ATTR_RESOLVE(m) #m
 #define ARMV8_EVENT_ATTR(name, config) \
-	PMU_EVENT_ATTR_STRING(name, armv8_event_attr_##name, \
-			      "event=" ARMV8_EVENT_ATTR_RESOLVE(config))
+	PMU_EVENT_ATTR(name, armv8_event_attr_##name, \
+		       config, armv8pmu_events_sysfs_show)
 
-ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR);
-ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL);
-ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_ITLB_REFILL);
-ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL);
-ARMV8_EVENT_ATTR(l1d_cache, ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS);
-ARMV8_EVENT_ATTR(l1d_tlb_refill, ARMV8_PMUV3_PERFCTR_DTLB_REFILL);
-ARMV8_EVENT_ATTR(ld_retired, ARMV8_PMUV3_PERFCTR_MEM_READ);
-ARMV8_EVENT_ATTR(st_retired, ARMV8_PMUV3_PERFCTR_MEM_WRITE);
-ARMV8_EVENT_ATTR(inst_retired, ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED);
+ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR);
+ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL);
+ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL);
+ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL);
+ARMV8_EVENT_ATTR(l1d_cache, ARMV8_PMUV3_PERFCTR_L1D_CACHE);
+ARMV8_EVENT_ATTR(l1d_tlb_refill, ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL);
+ARMV8_EVENT_ATTR(ld_retired, ARMV8_PMUV3_PERFCTR_LD_RETIRED);
+ARMV8_EVENT_ATTR(st_retired, ARMV8_PMUV3_PERFCTR_ST_RETIRED);
+ARMV8_EVENT_ATTR(inst_retired, ARMV8_PMUV3_PERFCTR_INST_RETIRED);
 ARMV8_EVENT_ATTR(exc_taken, ARMV8_PMUV3_PERFCTR_EXC_TAKEN);
-ARMV8_EVENT_ATTR(exc_return, ARMV8_PMUV3_PERFCTR_EXC_EXECUTED);
-ARMV8_EVENT_ATTR(cid_write_retired, ARMV8_PMUV3_PERFCTR_CID_WRITE);
-ARMV8_EVENT_ATTR(pc_write_retired, ARMV8_PMUV3_PERFCTR_PC_WRITE);
-ARMV8_EVENT_ATTR(br_immed_retired, ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH);
-ARMV8_EVENT_ATTR(br_return_retired, ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN);
-ARMV8_EVENT_ATTR(unaligned_ldst_retired, ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS);
-ARMV8_EVENT_ATTR(br_mis_pred, ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED);
-ARMV8_EVENT_ATTR(cpu_cycles, ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES);
-ARMV8_EVENT_ATTR(br_pred, ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED);
+ARMV8_EVENT_ATTR(exc_return, ARMV8_PMUV3_PERFCTR_EXC_RETURN);
+ARMV8_EVENT_ATTR(cid_write_retired, ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED);
+ARMV8_EVENT_ATTR(pc_write_retired, ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED);
+ARMV8_EVENT_ATTR(br_immed_retired, ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED);
+ARMV8_EVENT_ATTR(br_return_retired, ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED);
+ARMV8_EVENT_ATTR(unaligned_ldst_retired, ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED);
+ARMV8_EVENT_ATTR(br_mis_pred, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED);
+ARMV8_EVENT_ATTR(cpu_cycles, ARMV8_PMUV3_PERFCTR_CPU_CYCLES);
+ARMV8_EVENT_ATTR(br_pred, ARMV8_PMUV3_PERFCTR_BR_PRED);
 ARMV8_EVENT_ATTR(mem_access, ARMV8_PMUV3_PERFCTR_MEM_ACCESS);
-ARMV8_EVENT_ATTR(l1i_cache, ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS);
-ARMV8_EVENT_ATTR(l1d_cache_wb, ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB);
-ARMV8_EVENT_ATTR(l2d_cache, ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS);
-ARMV8_EVENT_ATTR(l2d_cache_refill, ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL);
-ARMV8_EVENT_ATTR(l2d_cache_wb, ARMV8_PMUV3_PERFCTR_L2_CACHE_WB);
+ARMV8_EVENT_ATTR(l1i_cache, ARMV8_PMUV3_PERFCTR_L1I_CACHE);
+ARMV8_EVENT_ATTR(l1d_cache_wb, ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB);
+ARMV8_EVENT_ATTR(l2d_cache, ARMV8_PMUV3_PERFCTR_L2D_CACHE);
+ARMV8_EVENT_ATTR(l2d_cache_refill, ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL);
+ARMV8_EVENT_ATTR(l2d_cache_wb, ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB);
 ARMV8_EVENT_ATTR(bus_access, ARMV8_PMUV3_PERFCTR_BUS_ACCESS);
-ARMV8_EVENT_ATTR(memory_error, ARMV8_PMUV3_PERFCTR_MEM_ERROR);
-ARMV8_EVENT_ATTR(inst_spec, ARMV8_PMUV3_PERFCTR_OP_SPEC);
-ARMV8_EVENT_ATTR(ttbr_write_retired, ARMV8_PMUV3_PERFCTR_TTBR_WRITE);
+ARMV8_EVENT_ATTR(memory_error, ARMV8_PMUV3_PERFCTR_MEMORY_ERROR);
+ARMV8_EVENT_ATTR(inst_spec, ARMV8_PMUV3_PERFCTR_INST_SPEC);
+ARMV8_EVENT_ATTR(ttbr_write_retired, ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED);
 ARMV8_EVENT_ATTR(bus_cycles, ARMV8_PMUV3_PERFCTR_BUS_CYCLES);
-ARMV8_EVENT_ATTR(chain, ARMV8_PMUV3_PERFCTR_CHAIN);
+/* Don't expose the chain event in /sys, since it's useless in isolation */
 ARMV8_EVENT_ATTR(l1d_cache_allocate, ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE);
 ARMV8_EVENT_ATTR(l2d_cache_allocate, ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE);
 ARMV8_EVENT_ATTR(br_retired, ARMV8_PMUV3_PERFCTR_BR_RETIRED);
@@ -296,9 +432,9 @@
 ARMV8_EVENT_ATTR(l3d_cache, ARMV8_PMUV3_PERFCTR_L3D_CACHE);
 ARMV8_EVENT_ATTR(l3d_cache_wb, ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB);
 ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL);
-ARMV8_EVENT_ATTR(l21_tlb_refill, ARMV8_PMUV3_PERFCTR_L21_TLB_REFILL);
+ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL);
 ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB);
-ARMV8_EVENT_ATTR(l21_tlb, ARMV8_PMUV3_PERFCTR_L21_TLB);
+ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB);
 
 static struct attribute *armv8_pmuv3_event_attrs[] = {
 	&armv8_event_attr_sw_incr.attr.attr,
@@ -331,7 +467,6 @@
 	&armv8_event_attr_inst_spec.attr.attr,
 	&armv8_event_attr_ttbr_write_retired.attr.attr,
 	&armv8_event_attr_bus_cycles.attr.attr,
-	&armv8_event_attr_chain.attr.attr,
 	&armv8_event_attr_l1d_cache_allocate.attr.attr,
 	&armv8_event_attr_l2d_cache_allocate.attr.attr,
 	&armv8_event_attr_br_retired.attr.attr,
@@ -347,15 +482,33 @@
 	&armv8_event_attr_l3d_cache.attr.attr,
 	&armv8_event_attr_l3d_cache_wb.attr.attr,
 	&armv8_event_attr_l2d_tlb_refill.attr.attr,
-	&armv8_event_attr_l21_tlb_refill.attr.attr,
+	&armv8_event_attr_l2i_tlb_refill.attr.attr,
 	&armv8_event_attr_l2d_tlb.attr.attr,
-	&armv8_event_attr_l21_tlb.attr.attr,
+	&armv8_event_attr_l2i_tlb.attr.attr,
 	NULL,
 };
 
+static umode_t
+armv8pmu_event_attr_is_visible(struct kobject *kobj,
+			       struct attribute *attr, int unused)
+{
+	struct device *dev = kobj_to_dev(kobj);
+	struct pmu *pmu = dev_get_drvdata(dev);
+	struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
+	struct perf_pmu_events_attr *pmu_attr;
+
+	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
+
+	if (test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
+		return attr->mode;
+
+	return 0;
+}
+
 static struct attribute_group armv8_pmuv3_events_attr_group = {
 	.name = "events",
 	.attrs = armv8_pmuv3_event_attrs,
+	.is_visible = armv8pmu_event_attr_is_visible,
 };
 
 PMU_FORMAT_ATTR(event, "config:0-9");
@@ -384,9 +537,6 @@
 #define	ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
 	(ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
 
-#define	ARMV8_MAX_COUNTERS	32
-#define	ARMV8_COUNTER_MASK	(ARMV8_MAX_COUNTERS - 1)
-
 /*
  * ARMv8 low level PMU access
  */
@@ -395,58 +545,23 @@
  * Perf Event to low level counters mapping
  */
 #define	ARMV8_IDX_TO_COUNTER(x)	\
-	(((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK)
-
-/*
- * Per-CPU PMCR: config reg
- */
-#define ARMV8_PMCR_E		(1 << 0) /* Enable all counters */
-#define ARMV8_PMCR_P		(1 << 1) /* Reset all counters */
-#define ARMV8_PMCR_C		(1 << 2) /* Cycle counter reset */
-#define ARMV8_PMCR_D		(1 << 3) /* CCNT counts every 64th cpu cycle */
-#define ARMV8_PMCR_X		(1 << 4) /* Export to ETM */
-#define ARMV8_PMCR_DP		(1 << 5) /* Disable CCNT if non-invasive debug*/
-#define ARMV8_PMCR_LC		(1 << 6) /* Overflow on 64 bit cycle counter */
-#define	ARMV8_PMCR_N_SHIFT	11	 /* Number of counters supported */
-#define	ARMV8_PMCR_N_MASK	0x1f
-#define	ARMV8_PMCR_MASK		0x7f	 /* Mask for writable bits */
-
-/*
- * PMOVSR: counters overflow flag status reg
- */
-#define	ARMV8_OVSR_MASK		0xffffffff	/* Mask for writable bits */
-#define	ARMV8_OVERFLOWED_MASK	ARMV8_OVSR_MASK
-
-/*
- * PMXEVTYPER: Event selection reg
- */
-#define	ARMV8_EVTYPE_MASK	0xc800ffff	/* Mask for writable bits */
-#define	ARMV8_EVTYPE_EVENT	0xffff		/* Mask for EVENT bits */
-
-/*
- * Event filters for PMUv3
- */
-#define	ARMV8_EXCLUDE_EL1	(1 << 31)
-#define	ARMV8_EXCLUDE_EL0	(1 << 30)
-#define	ARMV8_INCLUDE_EL2	(1 << 27)
+	(((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
 
 static inline u32 armv8pmu_pmcr_read(void)
 {
-	u32 val;
-	asm volatile("mrs %0, pmcr_el0" : "=r" (val));
-	return val;
+	return read_sysreg(pmcr_el0);
 }
 
 static inline void armv8pmu_pmcr_write(u32 val)
 {
-	val &= ARMV8_PMCR_MASK;
+	val &= ARMV8_PMU_PMCR_MASK;
 	isb();
-	asm volatile("msr pmcr_el0, %0" :: "r" (val));
+	write_sysreg(val, pmcr_el0);
 }
 
 static inline int armv8pmu_has_overflowed(u32 pmovsr)
 {
-	return pmovsr & ARMV8_OVERFLOWED_MASK;
+	return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
 }
 
 static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
@@ -463,7 +578,7 @@
 static inline int armv8pmu_select_counter(int idx)
 {
 	u32 counter = ARMV8_IDX_TO_COUNTER(idx);
-	asm volatile("msr pmselr_el0, %0" :: "r" (counter));
+	write_sysreg(counter, pmselr_el0);
 	isb();
 
 	return idx;
@@ -480,9 +595,9 @@
 		pr_err("CPU%u reading wrong counter %d\n",
 			smp_processor_id(), idx);
 	else if (idx == ARMV8_IDX_CYCLE_COUNTER)
-		asm volatile("mrs %0, pmccntr_el0" : "=r" (value));
+		value = read_sysreg(pmccntr_el0);
 	else if (armv8pmu_select_counter(idx) == idx)
-		asm volatile("mrs %0, pmxevcntr_el0" : "=r" (value));
+		value = read_sysreg(pmxevcntr_el0);
 
 	return value;
 }
@@ -504,47 +619,47 @@
 		 */
 		u64 value64 = 0xffffffff00000000ULL | value;
 
-		asm volatile("msr pmccntr_el0, %0" :: "r" (value64));
+		write_sysreg(value64, pmccntr_el0);
 	} else if (armv8pmu_select_counter(idx) == idx)
-		asm volatile("msr pmxevcntr_el0, %0" :: "r" (value));
+		write_sysreg(value, pmxevcntr_el0);
 }
 
 static inline void armv8pmu_write_evtype(int idx, u32 val)
 {
 	if (armv8pmu_select_counter(idx) == idx) {
-		val &= ARMV8_EVTYPE_MASK;
-		asm volatile("msr pmxevtyper_el0, %0" :: "r" (val));
+		val &= ARMV8_PMU_EVTYPE_MASK;
+		write_sysreg(val, pmxevtyper_el0);
 	}
 }
 
 static inline int armv8pmu_enable_counter(int idx)
 {
 	u32 counter = ARMV8_IDX_TO_COUNTER(idx);
-	asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter)));
+	write_sysreg(BIT(counter), pmcntenset_el0);
 	return idx;
 }
 
 static inline int armv8pmu_disable_counter(int idx)
 {
 	u32 counter = ARMV8_IDX_TO_COUNTER(idx);
-	asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter)));
+	write_sysreg(BIT(counter), pmcntenclr_el0);
 	return idx;
 }
 
 static inline int armv8pmu_enable_intens(int idx)
 {
 	u32 counter = ARMV8_IDX_TO_COUNTER(idx);
-	asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter)));
+	write_sysreg(BIT(counter), pmintenset_el1);
 	return idx;
 }
 
 static inline int armv8pmu_disable_intens(int idx)
 {
 	u32 counter = ARMV8_IDX_TO_COUNTER(idx);
-	asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter)));
+	write_sysreg(BIT(counter), pmintenclr_el1);
 	isb();
 	/* Clear the overflow flag in case an interrupt is pending. */
-	asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter)));
+	write_sysreg(BIT(counter), pmovsclr_el0);
 	isb();
 
 	return idx;
@@ -555,11 +670,11 @@
 	u32 value;
 
 	/* Read */
-	asm volatile("mrs %0, pmovsclr_el0" : "=r" (value));
+	value = read_sysreg(pmovsclr_el0);
 
 	/* Write to clear flags */
-	value &= ARMV8_OVSR_MASK;
-	asm volatile("msr pmovsclr_el0, %0" :: "r" (value));
+	value &= ARMV8_PMU_OVSR_MASK;
+	write_sysreg(value, pmovsclr_el0);
 
 	return value;
 }
@@ -696,7 +811,7 @@
 
 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
 	/* Enable all counters */
-	armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E);
+	armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 }
 
@@ -707,7 +822,7 @@
 
 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
 	/* Disable all counters */
-	armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E);
+	armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 }
 
@@ -717,10 +832,10 @@
 	int idx;
 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 	struct hw_perf_event *hwc = &event->hw;
-	unsigned long evtype = hwc->config_base & ARMV8_EVTYPE_EVENT;
+	unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
 
 	/* Always place a cycle counter into the cycle counter. */
-	if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
+	if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
 		if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
 			return -EAGAIN;
 
@@ -754,11 +869,11 @@
 	    attr->exclude_kernel != attr->exclude_hv)
 		return -EINVAL;
 	if (attr->exclude_user)
-		config_base |= ARMV8_EXCLUDE_EL0;
+		config_base |= ARMV8_PMU_EXCLUDE_EL0;
 	if (!is_kernel_in_hyp_mode() && attr->exclude_kernel)
-		config_base |= ARMV8_EXCLUDE_EL1;
+		config_base |= ARMV8_PMU_EXCLUDE_EL1;
 	if (!attr->exclude_hv)
-		config_base |= ARMV8_INCLUDE_EL2;
+		config_base |= ARMV8_PMU_INCLUDE_EL2;
 
 	/*
 	 * Install the filter into config_base as this is used to
@@ -784,53 +899,70 @@
 	 * Initialize & Reset PMNC. Request overflow interrupt for
 	 * 64 bit cycle counter but cheat in armv8pmu_write_counter().
 	 */
-	armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C | ARMV8_PMCR_LC);
+	armv8pmu_pmcr_write(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C |
+			    ARMV8_PMU_PMCR_LC);
 }
 
 static int armv8_pmuv3_map_event(struct perf_event *event)
 {
 	return armpmu_map_event(event, &armv8_pmuv3_perf_map,
 				&armv8_pmuv3_perf_cache_map,
-				ARMV8_EVTYPE_EVENT);
+				ARMV8_PMU_EVTYPE_EVENT);
 }
 
 static int armv8_a53_map_event(struct perf_event *event)
 {
 	return armpmu_map_event(event, &armv8_a53_perf_map,
 				&armv8_a53_perf_cache_map,
-				ARMV8_EVTYPE_EVENT);
+				ARMV8_PMU_EVTYPE_EVENT);
 }
 
 static int armv8_a57_map_event(struct perf_event *event)
 {
 	return armpmu_map_event(event, &armv8_a57_perf_map,
 				&armv8_a57_perf_cache_map,
-				ARMV8_EVTYPE_EVENT);
+				ARMV8_PMU_EVTYPE_EVENT);
 }
 
 static int armv8_thunder_map_event(struct perf_event *event)
 {
 	return armpmu_map_event(event, &armv8_thunder_perf_map,
 				&armv8_thunder_perf_cache_map,
-				ARMV8_EVTYPE_EVENT);
+				ARMV8_PMU_EVTYPE_EVENT);
 }
 
-static void armv8pmu_read_num_pmnc_events(void *info)
+static int armv8_vulcan_map_event(struct perf_event *event)
 {
-	int *nb_cnt = info;
+	return armpmu_map_event(event, &armv8_vulcan_perf_map,
+				&armv8_vulcan_perf_cache_map,
+				ARMV8_PMU_EVTYPE_EVENT);
+}
+
+static void __armv8pmu_probe_pmu(void *info)
+{
+	struct arm_pmu *cpu_pmu = info;
+	u32 pmceid[2];
 
 	/* Read the nb of CNTx counters supported from PMNC */
-	*nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;
+	cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT)
+		& ARMV8_PMU_PMCR_N_MASK;
 
 	/* Add the CPU cycles counter */
-	*nb_cnt += 1;
+	cpu_pmu->num_events += 1;
+
+	pmceid[0] = read_sysreg(pmceid0_el0);
+	pmceid[1] = read_sysreg(pmceid1_el0);
+
+	bitmap_from_u32array(cpu_pmu->pmceid_bitmap,
+			     ARMV8_PMUV3_MAX_COMMON_EVENTS, pmceid,
+			     ARRAY_SIZE(pmceid));
 }
 
-static int armv8pmu_probe_num_events(struct arm_pmu *arm_pmu)
+static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
 {
-	return smp_call_function_any(&arm_pmu->supported_cpus,
-				    armv8pmu_read_num_pmnc_events,
-				    &arm_pmu->num_events, 1);
+	return smp_call_function_any(&cpu_pmu->supported_cpus,
+				    __armv8pmu_probe_pmu,
+				    cpu_pmu, 1);
 }
 
 static void armv8_pmu_init(struct arm_pmu *cpu_pmu)
@@ -853,7 +985,8 @@
 	armv8_pmu_init(cpu_pmu);
 	cpu_pmu->name			= "armv8_pmuv3";
 	cpu_pmu->map_event		= armv8_pmuv3_map_event;
-	return armv8pmu_probe_num_events(cpu_pmu);
+	cpu_pmu->pmu.attr_groups	= armv8_pmuv3_attr_groups;
+	return armv8pmu_probe_pmu(cpu_pmu);
 }
 
 static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
@@ -862,7 +995,7 @@
 	cpu_pmu->name			= "armv8_cortex_a53";
 	cpu_pmu->map_event		= armv8_a53_map_event;
 	cpu_pmu->pmu.attr_groups	= armv8_pmuv3_attr_groups;
-	return armv8pmu_probe_num_events(cpu_pmu);
+	return armv8pmu_probe_pmu(cpu_pmu);
 }
 
 static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
@@ -871,7 +1004,7 @@
 	cpu_pmu->name			= "armv8_cortex_a57";
 	cpu_pmu->map_event		= armv8_a57_map_event;
 	cpu_pmu->pmu.attr_groups	= armv8_pmuv3_attr_groups;
-	return armv8pmu_probe_num_events(cpu_pmu);
+	return armv8pmu_probe_pmu(cpu_pmu);
 }
 
 static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
@@ -880,7 +1013,7 @@
 	cpu_pmu->name			= "armv8_cortex_a72";
 	cpu_pmu->map_event		= armv8_a57_map_event;
 	cpu_pmu->pmu.attr_groups	= armv8_pmuv3_attr_groups;
-	return armv8pmu_probe_num_events(cpu_pmu);
+	return armv8pmu_probe_pmu(cpu_pmu);
 }
 
 static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
@@ -889,7 +1022,16 @@
 	cpu_pmu->name			= "armv8_cavium_thunder";
 	cpu_pmu->map_event		= armv8_thunder_map_event;
 	cpu_pmu->pmu.attr_groups	= armv8_pmuv3_attr_groups;
-	return armv8pmu_probe_num_events(cpu_pmu);
+	return armv8pmu_probe_pmu(cpu_pmu);
+}
+
+static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
+{
+	armv8_pmu_init(cpu_pmu);
+	cpu_pmu->name			= "armv8_brcm_vulcan";
+	cpu_pmu->map_event		= armv8_vulcan_map_event;
+	cpu_pmu->pmu.attr_groups	= armv8_pmuv3_attr_groups;
+	return armv8pmu_probe_pmu(cpu_pmu);
 }
 
 static const struct of_device_id armv8_pmu_of_device_ids[] = {
@@ -898,6 +1040,7 @@
 	{.compatible = "arm,cortex-a57-pmu",	.data = armv8_a57_pmu_init},
 	{.compatible = "arm,cortex-a72-pmu",	.data = armv8_a72_pmu_init},
 	{.compatible = "cavium,thunder-pmu",	.data = armv8_thunder_pmu_init},
+	{.compatible = "brcm,vulcan-pmu",	.data = armv8_vulcan_pmu_init},
 	{},
 };
 
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 8062482..48eea68 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -265,9 +265,6 @@
 		if (stack_start) {
 			if (is_compat_thread(task_thread_info(p)))
 				childregs->compat_sp = stack_start;
-			/* 16-byte aligned stack mandatory on AArch64 */
-			else if (stack_start & 15)
-				return -EINVAL;
 			else
 				childregs->sp = stack_start;
 		}
@@ -382,13 +379,14 @@
 	return sp & ~0xf;
 }
 
-static unsigned long randomize_base(unsigned long base)
-{
-	unsigned long range_end = base + (STACK_RND_MASK << PAGE_SHIFT) + 1;
-	return randomize_range(base, range_end, 0) ? : base;
-}
-
 unsigned long arch_randomize_brk(struct mm_struct *mm)
 {
-	return randomize_base(mm->brk);
+	unsigned long range_end = mm->brk;
+
+	if (is_compat_task())
+		range_end += 0x02000000;
+	else
+		range_end += 0x40000000;
+
+	return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
 }
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 9dc6776..3279def 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -53,6 +53,7 @@
 #include <asm/cpufeature.h>
 #include <asm/cpu_ops.h>
 #include <asm/kasan.h>
+#include <asm/numa.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
 #include <asm/smp_plat.h>
@@ -175,7 +176,6 @@
 	 */
 	if (mpidr_hash_size() > 4 * num_possible_cpus())
 		pr_warn("Large number of MPIDR hash buckets detected\n");
-	__flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
 }
 
 static void __init setup_machine_fdt(phys_addr_t dt_phys)
@@ -224,69 +224,6 @@
 	}
 }
 
-#ifdef CONFIG_BLK_DEV_INITRD
-/*
- * Relocate initrd if it is not completely within the linear mapping.
- * This would be the case if mem= cuts out all or part of it.
- */
-static void __init relocate_initrd(void)
-{
-	phys_addr_t orig_start = __virt_to_phys(initrd_start);
-	phys_addr_t orig_end = __virt_to_phys(initrd_end);
-	phys_addr_t ram_end = memblock_end_of_DRAM();
-	phys_addr_t new_start;
-	unsigned long size, to_free = 0;
-	void *dest;
-
-	if (orig_end <= ram_end)
-		return;
-
-	/*
-	 * Any of the original initrd which overlaps the linear map should
-	 * be freed after relocating.
-	 */
-	if (orig_start < ram_end)
-		to_free = ram_end - orig_start;
-
-	size = orig_end - orig_start;
-	if (!size)
-		return;
-
-	/* initrd needs to be relocated completely inside linear mapping */
-	new_start = memblock_find_in_range(0, PFN_PHYS(max_pfn),
-					   size, PAGE_SIZE);
-	if (!new_start)
-		panic("Cannot relocate initrd of size %ld\n", size);
-	memblock_reserve(new_start, size);
-
-	initrd_start = __phys_to_virt(new_start);
-	initrd_end   = initrd_start + size;
-
-	pr_info("Moving initrd from [%llx-%llx] to [%llx-%llx]\n",
-		orig_start, orig_start + size - 1,
-		new_start, new_start + size - 1);
-
-	dest = (void *)initrd_start;
-
-	if (to_free) {
-		memcpy(dest, (void *)__phys_to_virt(orig_start), to_free);
-		dest += to_free;
-	}
-
-	copy_from_early_mem(dest, orig_start + to_free, size - to_free);
-
-	if (to_free) {
-		pr_info("Freeing original RAMDISK from [%llx-%llx]\n",
-			orig_start, orig_start + to_free - 1);
-		memblock_free(orig_start, to_free);
-	}
-}
-#else
-static inline void __init relocate_initrd(void)
-{
-}
-#endif
-
 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
 
 void __init setup_arch(char **cmdline_p)
@@ -327,7 +264,11 @@
 	acpi_boot_table_init();
 
 	paging_init();
-	relocate_initrd();
+
+	if (acpi_disabled)
+		unflatten_device_tree();
+
+	bootmem_init();
 
 	kasan_init();
 
@@ -335,12 +276,11 @@
 
 	early_ioremap_reset();
 
-	if (acpi_disabled) {
-		unflatten_device_tree();
+	if (acpi_disabled)
 		psci_dt_init();
-	} else {
+	else
 		psci_acpi_init();
-	}
+
 	xen_early_init();
 
 	cpu_read_bootcpu_ops();
@@ -379,6 +319,9 @@
 {
 	int i;
 
+	for_each_online_node(i)
+		register_one_node(i);
+
 	for_each_possible_cpu(i) {
 		struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
 		cpu->hotpluggable = 1;
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index fd10eb6..9a3aec9 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -49,39 +49,32 @@
 	orr	\dst, \dst, \mask		// dst|=(aff3>>rs3)
 	.endm
 /*
- * Save CPU state for a suspend and execute the suspend finisher.
- * On success it will return 0 through cpu_resume - ie through a CPU
- * soft/hard reboot from the reset vector.
- * On failure it returns the suspend finisher return value or force
- * -EOPNOTSUPP if the finisher erroneously returns 0 (the suspend finisher
- * is not allowed to return, if it does this must be considered failure).
- * It saves callee registers, and allocates space on the kernel stack
- * to save the CPU specific registers + some other data for resume.
+ * Save CPU state in the provided sleep_stack_data area, and publish its
+ * location for cpu_resume()'s use in sleep_save_stash.
  *
- *  x0 = suspend finisher argument
- *  x1 = suspend finisher function pointer
+ * cpu_resume() will restore this saved state, and return. Because the
+ * link-register is saved and restored, it will appear to return from this
+ * function. So that the caller can tell the suspend/resume paths apart,
+ * __cpu_suspend_enter() will always return a non-zero value, whereas the
+ * path through cpu_resume() will return 0.
+ *
+ *  x0 = struct sleep_stack_data area
  */
 ENTRY(__cpu_suspend_enter)
-	stp	x29, lr, [sp, #-96]!
-	stp	x19, x20, [sp,#16]
-	stp	x21, x22, [sp,#32]
-	stp	x23, x24, [sp,#48]
-	stp	x25, x26, [sp,#64]
-	stp	x27, x28, [sp,#80]
-	/*
-	 * Stash suspend finisher and its argument in x20 and x19
-	 */
-	mov	x19, x0
-	mov	x20, x1
+	stp	x29, lr, [x0, #SLEEP_STACK_DATA_CALLEE_REGS]
+	stp	x19, x20, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+16]
+	stp	x21, x22, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+32]
+	stp	x23, x24, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+48]
+	stp	x25, x26, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+64]
+	stp	x27, x28, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+80]
+
+	/* save the sp in cpu_suspend_ctx */
 	mov	x2, sp
-	sub	sp, sp, #CPU_SUSPEND_SZ	// allocate cpu_suspend_ctx
-	mov	x0, sp
-	/*
-	 * x0 now points to struct cpu_suspend_ctx allocated on the stack
-	 */
-	str	x2, [x0, #CPU_CTX_SP]
-	ldr	x1, =sleep_save_sp
-	ldr	x1, [x1, #SLEEP_SAVE_SP_VIRT]
+	str	x2, [x0, #SLEEP_STACK_DATA_SYSTEM_REGS + CPU_CTX_SP]
+
+	/* find the mpidr_hash */
+	ldr	x1, =sleep_save_stash
+	ldr	x1, [x1]
 	mrs	x7, mpidr_el1
 	ldr	x9, =mpidr_hash
 	ldr	x10, [x9, #MPIDR_HASH_MASK]
@@ -93,74 +86,28 @@
 	ldp	w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
 	compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
 	add	x1, x1, x8, lsl #3
-	bl	__cpu_suspend_save
-	/*
-	 * Grab suspend finisher in x20 and its argument in x19
-	 */
-	mov	x0, x19
-	mov	x1, x20
-	/*
-	 * We are ready for power down, fire off the suspend finisher
-	 * in x1, with argument in x0
-	 */
-	blr	x1
-        /*
-	 * Never gets here, unless suspend finisher fails.
-	 * Successful cpu_suspend should return from cpu_resume, returning
-	 * through this code path is considered an error
-	 * If the return value is set to 0 force x0 = -EOPNOTSUPP
-	 * to make sure a proper error condition is propagated
-	 */
-	cmp	x0, #0
-	mov	x3, #-EOPNOTSUPP
-	csel	x0, x3, x0, eq
-	add	sp, sp, #CPU_SUSPEND_SZ	// rewind stack pointer
-	ldp	x19, x20, [sp, #16]
-	ldp	x21, x22, [sp, #32]
-	ldp	x23, x24, [sp, #48]
-	ldp	x25, x26, [sp, #64]
-	ldp	x27, x28, [sp, #80]
-	ldp	x29, lr, [sp], #96
+
+	str	x0, [x1]
+	add	x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS
+	stp	x29, lr, [sp, #-16]!
+	bl	cpu_do_suspend
+	ldp	x29, lr, [sp], #16
+	mov	x0, #1
 	ret
 ENDPROC(__cpu_suspend_enter)
 	.ltorg
 
-/*
- * x0 must contain the sctlr value retrieved from restored context
- */
-	.pushsection	".idmap.text", "ax"
-ENTRY(cpu_resume_mmu)
-	ldr	x3, =cpu_resume_after_mmu
-	msr	sctlr_el1, x0		// restore sctlr_el1
-	isb
-	/*
-	 * Invalidate the local I-cache so that any instructions fetched
-	 * speculatively from the PoC are discarded, since they may have
-	 * been dynamically patched at the PoU.
-	 */
-	ic	iallu
-	dsb	nsh
-	isb
-	br	x3			// global jump to virtual address
-ENDPROC(cpu_resume_mmu)
-	.popsection
-cpu_resume_after_mmu:
-#ifdef CONFIG_KASAN
-	mov	x0, sp
-	bl	kasan_unpoison_remaining_stack
-#endif
-	mov	x0, #0			// return zero on success
-	ldp	x19, x20, [sp, #16]
-	ldp	x21, x22, [sp, #32]
-	ldp	x23, x24, [sp, #48]
-	ldp	x25, x26, [sp, #64]
-	ldp	x27, x28, [sp, #80]
-	ldp	x29, lr, [sp], #96
-	ret
-ENDPROC(cpu_resume_after_mmu)
-
 ENTRY(cpu_resume)
 	bl	el2_setup		// if in EL2 drop to EL1 cleanly
+	/* enable the MMU early - so we can access sleep_save_stash by va */
+	adr_l	lr, __enable_mmu	/* __cpu_setup will return here */
+	ldr	x27, =_cpu_resume	/* __enable_mmu will branch here */
+	adrp	x25, idmap_pg_dir
+	adrp	x26, swapper_pg_dir
+	b	__cpu_setup
+ENDPROC(cpu_resume)
+
+ENTRY(_cpu_resume)
 	mrs	x1, mpidr_el1
 	adrp	x8, mpidr_hash
 	add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address
@@ -170,20 +117,32 @@
 	ldp	w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)]
 	compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2
         /* x7 contains hash index, let's use it to grab context pointer */
-	ldr_l	x0, sleep_save_sp + SLEEP_SAVE_SP_PHYS
+	ldr_l	x0, sleep_save_stash
 	ldr	x0, [x0, x7, lsl #3]
+	add	x29, x0, #SLEEP_STACK_DATA_CALLEE_REGS
+	add	x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS
 	/* load sp from context */
 	ldr	x2, [x0, #CPU_CTX_SP]
-	/* load physical address of identity map page table in x1 */
-	adrp	x1, idmap_pg_dir
 	mov	sp, x2
 	/* save thread_info */
 	and	x2, x2, #~(THREAD_SIZE - 1)
 	msr	sp_el0, x2
 	/*
-	 * cpu_do_resume expects x0 to contain context physical address
-	 * pointer and x1 to contain physical address of 1:1 page tables
+	 * cpu_do_resume expects x0 to contain context address pointer
 	 */
-	bl	cpu_do_resume		// PC relative jump, MMU off
-	b	cpu_resume_mmu		// Resume MMU, never returns
-ENDPROC(cpu_resume)
+	bl	cpu_do_resume
+
+#ifdef CONFIG_KASAN
+	mov	x0, sp
+	bl	kasan_unpoison_remaining_stack
+#endif
+
+	ldp	x19, x20, [x29, #16]
+	ldp	x21, x22, [x29, #32]
+	ldp	x23, x24, [x29, #48]
+	ldp	x25, x26, [x29, #64]
+	ldp	x27, x28, [x29, #80]
+	ldp	x29, lr, [x29]
+	mov	x0, #0
+	ret
+ENDPROC(_cpu_resume)
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index b2d5f4e..678e084 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -45,6 +45,7 @@
 #include <asm/cputype.h>
 #include <asm/cpu_ops.h>
 #include <asm/mmu_context.h>
+#include <asm/numa.h>
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
 #include <asm/processor.h>
@@ -75,6 +76,43 @@
 	IPI_WAKEUP
 };
 
+#ifdef CONFIG_ARM64_VHE
+
+/* Whether the boot CPU is running in HYP mode or not*/
+static bool boot_cpu_hyp_mode;
+
+static inline void save_boot_cpu_run_el(void)
+{
+	boot_cpu_hyp_mode = is_kernel_in_hyp_mode();
+}
+
+static inline bool is_boot_cpu_in_hyp_mode(void)
+{
+	return boot_cpu_hyp_mode;
+}
+
+/*
+ * Verify that a secondary CPU is running the kernel at the same
+ * EL as that of the boot CPU.
+ */
+void verify_cpu_run_el(void)
+{
+	bool in_el2 = is_kernel_in_hyp_mode();
+	bool boot_cpu_el2 = is_boot_cpu_in_hyp_mode();
+
+	if (in_el2 ^ boot_cpu_el2) {
+		pr_crit("CPU%d: mismatched Exception Level(EL%d) with boot CPU(EL%d)\n",
+					smp_processor_id(),
+					in_el2 ? 2 : 1,
+					boot_cpu_el2 ? 2 : 1);
+		cpu_panic_kernel();
+	}
+}
+
+#else
+static inline void save_boot_cpu_run_el(void) {}
+#endif
+
 #ifdef CONFIG_HOTPLUG_CPU
 static int op_cpu_kill(unsigned int cpu);
 #else
@@ -166,6 +204,7 @@
 static void smp_store_cpu_info(unsigned int cpuid)
 {
 	store_cpu_topology(cpuid);
+	numa_store_cpu_info(cpuid);
 }
 
 /*
@@ -225,8 +264,6 @@
 	pr_info("CPU%u: Booted secondary processor [%08x]\n",
 					 cpu, read_cpuid_id());
 	update_cpu_boot_status(CPU_BOOT_SUCCESS);
-	/* Make sure the status update is visible before we complete */
-	smp_wmb();
 	set_cpu_online(cpu, true);
 	complete(&cpu_running);
 
@@ -401,6 +438,7 @@
 void __init smp_prepare_boot_cpu(void)
 {
 	cpuinfo_store_boot_cpu();
+	save_boot_cpu_run_el();
 	set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
 }
 
@@ -595,6 +633,8 @@
 
 		pr_debug("cpu logical map 0x%llx\n", hwid);
 		cpu_logical_map(cpu_count) = hwid;
+
+		early_map_cpu_to_node(cpu_count, of_node_to_nid(dn));
 next:
 		cpu_count++;
 	}
@@ -647,33 +687,18 @@
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
 	int err;
-	unsigned int cpu, ncores = num_possible_cpus();
+	unsigned int cpu;
 
 	init_cpu_topology();
 
 	smp_store_cpu_info(smp_processor_id());
 
 	/*
-	 * are we trying to boot more cores than exist?
-	 */
-	if (max_cpus > ncores)
-		max_cpus = ncores;
-
-	/* Don't bother if we're effectively UP */
-	if (max_cpus <= 1)
-		return;
-
-	/*
 	 * Initialise the present map (which describes the set of CPUs
 	 * actually populated at the present time) and release the
 	 * secondaries from the bootloader.
-	 *
-	 * Make sure we online at most (max_cpus - 1) additional CPUs.
 	 */
-	max_cpus--;
 	for_each_possible_cpu(cpu) {
-		if (max_cpus == 0)
-			break;
 
 		if (cpu == smp_processor_id())
 			continue;
@@ -686,7 +711,6 @@
 			continue;
 
 		set_cpu_present(cpu, true);
-		max_cpus--;
 	}
 }
 
@@ -763,21 +787,11 @@
 }
 #endif
 
-static DEFINE_RAW_SPINLOCK(stop_lock);
-
 /*
  * ipi_cpu_stop - handle IPI from smp_send_stop()
  */
 static void ipi_cpu_stop(unsigned int cpu)
 {
-	if (system_state == SYSTEM_BOOTING ||
-	    system_state == SYSTEM_RUNNING) {
-		raw_spin_lock(&stop_lock);
-		pr_crit("CPU%u: stopping\n", cpu);
-		dump_stack();
-		raw_spin_unlock(&stop_lock);
-	}
-
 	set_cpu_online(cpu, false);
 
 	local_irq_disable();
@@ -872,6 +886,9 @@
 		cpumask_copy(&mask, cpu_online_mask);
 		cpumask_clear_cpu(smp_processor_id(), &mask);
 
+		if (system_state == SYSTEM_BOOTING ||
+		    system_state == SYSTEM_RUNNING)
+			pr_crit("SMP: stopping secondary CPUs\n");
 		smp_cross_call(&mask, IPI_CPU_STOP);
 	}
 
@@ -881,7 +898,8 @@
 		udelay(1);
 
 	if (num_online_cpus() > 1)
-		pr_warning("SMP: failed to stop secondary CPUs\n");
+		pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
+			   cpumask_pr_args(cpu_online_mask));
 }
 
 /*
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index aef3605..18a71bc 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -52,6 +52,7 @@
 static int smp_spin_table_cpu_init(unsigned int cpu)
 {
 	struct device_node *dn;
+	int ret;
 
 	dn = of_get_cpu_node(cpu, NULL);
 	if (!dn)
@@ -60,15 +61,15 @@
 	/*
 	 * Determine the address from which the CPU is polling.
 	 */
-	if (of_property_read_u64(dn, "cpu-release-addr",
-				 &cpu_release_addr[cpu])) {
+	ret = of_property_read_u64(dn, "cpu-release-addr",
+				   &cpu_release_addr[cpu]);
+	if (ret)
 		pr_err("CPU %d: missing or invalid cpu-release-addr property\n",
 		       cpu);
 
-		return -1;
-	}
+	of_node_put(dn);
 
-	return 0;
+	return ret;
 }
 
 static int smp_spin_table_cpu_prepare(unsigned int cpu)
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 6605539..b616e36 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -10,30 +10,11 @@
 #include <asm/suspend.h>
 #include <asm/tlbflush.h>
 
-extern int __cpu_suspend_enter(unsigned long arg, int (*fn)(unsigned long));
 /*
- * This is called by __cpu_suspend_enter() to save the state, and do whatever
- * flushing is required to ensure that when the CPU goes to sleep we have
- * the necessary data available when the caches are not searched.
- *
- * ptr: CPU context virtual address
- * save_ptr: address of the location where the context physical address
- *           must be saved
+ * This is allocated by cpu_suspend_init(), and used to store a pointer to
+ * the 'struct sleep_stack_data' the contains a particular CPUs state.
  */
-void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr,
-				phys_addr_t *save_ptr)
-{
-	*save_ptr = virt_to_phys(ptr);
-
-	cpu_do_suspend(ptr);
-	/*
-	 * Only flush the context that must be retrieved with the MMU
-	 * off. VA primitives ensure the flush is applied to all
-	 * cache levels so context is pushed to DRAM.
-	 */
-	__flush_dcache_area(ptr, sizeof(*ptr));
-	__flush_dcache_area(save_ptr, sizeof(*save_ptr));
-}
+unsigned long *sleep_save_stash;
 
 /*
  * This hook is provided so that cpu_suspend code can restore HW
@@ -51,6 +32,30 @@
 	hw_breakpoint_restore = hw_bp_restore;
 }
 
+void notrace __cpu_suspend_exit(void)
+{
+	/*
+	 * We are resuming from reset with the idmap active in TTBR0_EL1.
+	 * We must uninstall the idmap and restore the expected MMU
+	 * state before we can possibly return to userspace.
+	 */
+	cpu_uninstall_idmap();
+
+	/*
+	 * Restore per-cpu offset before any kernel
+	 * subsystem relying on it has a chance to run.
+	 */
+	set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
+
+	/*
+	 * Restore HW breakpoint registers to sane values
+	 * before debug exceptions are possibly reenabled
+	 * through local_dbg_restore.
+	 */
+	if (hw_breakpoint_restore)
+		hw_breakpoint_restore(NULL);
+}
+
 /*
  * cpu_suspend
  *
@@ -60,8 +65,9 @@
  */
 int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
 {
-	int ret;
+	int ret = 0;
 	unsigned long flags;
+	struct sleep_stack_data state;
 
 	/*
 	 * From this point debug exceptions are disabled to prevent
@@ -77,34 +83,21 @@
 	 */
 	pause_graph_tracing();
 
-	/*
-	 * mm context saved on the stack, it will be restored when
-	 * the cpu comes out of reset through the identity mapped
-	 * page tables, so that the thread address space is properly
-	 * set-up on function return.
-	 */
-	ret = __cpu_suspend_enter(arg, fn);
-	if (ret == 0) {
-		/*
-		 * We are resuming from reset with the idmap active in TTBR0_EL1.
-		 * We must uninstall the idmap and restore the expected MMU
-		 * state before we can possibly return to userspace.
-		 */
-		cpu_uninstall_idmap();
+	if (__cpu_suspend_enter(&state)) {
+		/* Call the suspend finisher */
+		ret = fn(arg);
 
 		/*
-		 * Restore per-cpu offset before any kernel
-		 * subsystem relying on it has a chance to run.
+		 * Never gets here, unless the suspend finisher fails.
+		 * Successful cpu_suspend() should return from cpu_resume(),
+		 * returning through this code path is considered an error
+		 * If the return value is set to 0 force ret = -EOPNOTSUPP
+		 * to make sure a proper error condition is propagated
 		 */
-		set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
-
-		/*
-		 * Restore HW breakpoint registers to sane values
-		 * before debug exceptions are possibly reenabled
-		 * through local_dbg_restore.
-		 */
-		if (hw_breakpoint_restore)
-			hw_breakpoint_restore(NULL);
+		if (!ret)
+			ret = -EOPNOTSUPP;
+	} else {
+		__cpu_suspend_exit();
 	}
 
 	unpause_graph_tracing();
@@ -119,22 +112,15 @@
 	return ret;
 }
 
-struct sleep_save_sp sleep_save_sp;
-
 static int __init cpu_suspend_init(void)
 {
-	void *ctx_ptr;
-
 	/* ctx_ptr is an array of physical addresses */
-	ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(phys_addr_t), GFP_KERNEL);
+	sleep_save_stash = kcalloc(mpidr_hash_size(), sizeof(*sleep_save_stash),
+				   GFP_KERNEL);
 
-	if (WARN_ON(!ctx_ptr))
+	if (WARN_ON(!sleep_save_stash))
 		return -ENOMEM;
 
-	sleep_save_sp.save_ptr_stash = ctx_ptr;
-	sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
-	__flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp));
-
 	return 0;
 }
 early_initcall(cpu_suspend_init);
diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c
index 75151aa..26fe8ea 100644
--- a/arch/arm64/kernel/sys.c
+++ b/arch/arm64/kernel/sys.c
@@ -25,6 +25,7 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/syscalls.h>
+#include <asm/cpufeature.h>
 
 asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
 			 unsigned long prot, unsigned long flags,
@@ -36,11 +37,20 @@
 	return sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
 }
 
+SYSCALL_DEFINE1(arm64_personality, unsigned int, personality)
+{
+	if (personality(personality) == PER_LINUX32 &&
+		!system_supports_32bit_el0())
+		return -EINVAL;
+	return sys_personality(personality);
+}
+
 /*
  * Wrappers to pass the pt_regs argument.
  */
 asmlinkage long sys_rt_sigreturn_wrapper(void);
 #define sys_rt_sigreturn	sys_rt_sigreturn_wrapper
+#define sys_personality		sys_arm64_personality
 
 #undef __SYSCALL
 #define __SYSCALL(nr, sym)	[nr] = sym,
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 97bc68f..64fc030 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -131,11 +131,11 @@
 		return -ENOMEM;
 
 	/* Grab the vDSO data page. */
-	vdso_pagelist[0] = virt_to_page(vdso_data);
+	vdso_pagelist[0] = pfn_to_page(PHYS_PFN(__pa(vdso_data)));
 
 	/* Grab the vDSO code pages. */
 	for (i = 0; i < vdso_pages; i++)
-		vdso_pagelist[i + 1] = virt_to_page(&vdso_start + i * PAGE_SIZE);
+		vdso_pagelist[i + 1] = pfn_to_page(PHYS_PFN(__pa(&vdso_start)) + i);
 
 	/* Populate the special mapping structures */
 	vdso_spec[0] = (struct vm_special_mapping) {
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 5a1939a..435e820 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -46,6 +46,16 @@
 	*(.idmap.text)					\
 	VMLINUX_SYMBOL(__idmap_text_end) = .;
 
+#ifdef CONFIG_HIBERNATION
+#define HIBERNATE_TEXT					\
+	. = ALIGN(SZ_4K);				\
+	VMLINUX_SYMBOL(__hibernate_exit_text_start) = .;\
+	*(.hibernate_exit.text)				\
+	VMLINUX_SYMBOL(__hibernate_exit_text_end) = .;
+#else
+#define HIBERNATE_TEXT
+#endif
+
 /*
  * The size of the PE/COFF section that covers the kernel image, which
  * runs from stext to _edata, must be a round multiple of the PE/COFF
@@ -63,14 +73,19 @@
 #endif
 
 #if defined(CONFIG_DEBUG_ALIGN_RODATA)
-#define ALIGN_DEBUG_RO			. = ALIGN(1<<SECTION_SHIFT);
-#define ALIGN_DEBUG_RO_MIN(min)		ALIGN_DEBUG_RO
-#elif defined(CONFIG_DEBUG_RODATA)
-#define ALIGN_DEBUG_RO			. = ALIGN(1<<PAGE_SHIFT);
-#define ALIGN_DEBUG_RO_MIN(min)		ALIGN_DEBUG_RO
+/*
+ *  4 KB granule:   1 level 2 entry
+ * 16 KB granule: 128 level 3 entries, with contiguous bit
+ * 64 KB granule:  32 level 3 entries, with contiguous bit
+ */
+#define SEGMENT_ALIGN			SZ_2M
 #else
-#define ALIGN_DEBUG_RO
-#define ALIGN_DEBUG_RO_MIN(min)		. = ALIGN(min);
+/*
+ *  4 KB granule:  16 level 3 entries, with contiguous bit
+ * 16 KB granule:   4 level 3 entries, without contiguous bit
+ * 64 KB granule:   1 level 3 entry
+ */
+#define SEGMENT_ALIGN			SZ_64K
 #endif
 
 SECTIONS
@@ -96,7 +111,6 @@
 		_text = .;
 		HEAD_TEXT
 	}
-	ALIGN_DEBUG_RO_MIN(PAGE_SIZE)
 	.text : {			/* Real text segment		*/
 		_stext = .;		/* Text and read-only data	*/
 			__exception_text_start = .;
@@ -109,18 +123,19 @@
 			LOCK_TEXT
 			HYPERVISOR_TEXT
 			IDMAP_TEXT
+			HIBERNATE_TEXT
 			*(.fixup)
 			*(.gnu.warning)
 		. = ALIGN(16);
 		*(.got)			/* Global offset table		*/
 	}
 
-	ALIGN_DEBUG_RO_MIN(PAGE_SIZE)
+	. = ALIGN(SEGMENT_ALIGN);
 	RO_DATA(PAGE_SIZE)		/* everything from this point to */
 	EXCEPTION_TABLE(8)		/* _etext will be marked RO NX   */
 	NOTES
 
-	ALIGN_DEBUG_RO_MIN(PAGE_SIZE)
+	. = ALIGN(SEGMENT_ALIGN);
 	_etext = .;			/* End of text and rodata section */
 	__init_begin = .;
 
@@ -154,12 +169,9 @@
 		*(.altinstr_replacement)
 	}
 	.rela : ALIGN(8) {
-		__reloc_start = .;
 		*(.rela .rela*)
-		__reloc_end = .;
 	}
 	.dynsym : ALIGN(8) {
-		__dynsym_start = .;
 		*(.dynsym)
 	}
 	.dynstr : {
@@ -169,7 +181,11 @@
 		*(.hash)
 	}
 
-	. = ALIGN(PAGE_SIZE);
+	__rela_offset	= ADDR(.rela) - KIMAGE_VADDR;
+	__rela_size	= SIZEOF(.rela);
+	__dynsym_offset	= ADDR(.dynsym) - KIMAGE_VADDR;
+
+	. = ALIGN(SEGMENT_ALIGN);
 	__init_end = .;
 
 	_data = .;
@@ -201,6 +217,10 @@
 	"HYP init code too big or misaligned")
 ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
 	"ID map text too big or misaligned")
+#ifdef CONFIG_HIBERNATION
+ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
+	<= SZ_4K, "Hibernate exit text too big or misaligned")
+#endif
 
 /*
  * If padding is applied before .head.text, virt<->phys conversions will fail.
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index de7450d..aa2e34e 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -22,7 +22,6 @@
 config KVM
 	bool "Kernel-based Virtual Machine (KVM) support"
 	depends on OF
-	depends on !ARM64_16K_PAGES
 	select MMU_NOTIFIER
 	select PREEMPT_NOTIFIERS
 	select ANON_INODES
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index eba89e4..3246c4a 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -186,6 +186,13 @@
 		exit_handler = kvm_get_exit_handler(vcpu);
 
 		return exit_handler(vcpu, run);
+	case ARM_EXCEPTION_HYP_GONE:
+		/*
+		 * EL2 has been reset to the hyp-stub. This happens when a guest
+		 * is pre-empted by kvm_reboot()'s shutdown call.
+		 */
+		run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+		return 0;
 	default:
 		kvm_pr_unimpl("Unsupported exception type: %d",
 			      exception_index);
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 7d8747c..a873a6d 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -21,6 +21,7 @@
 #include <asm/kvm_arm.h>
 #include <asm/kvm_mmu.h>
 #include <asm/pgtable-hwdef.h>
+#include <asm/sysreg.h>
 
 	.text
 	.pushsection	.hyp.idmap.text, "ax"
@@ -103,8 +104,8 @@
 	dsb	sy
 
 	mrs	x4, sctlr_el2
-	and	x4, x4, #SCTLR_EL2_EE	// preserve endianness of EL2
-	ldr	x5, =SCTLR_EL2_FLAGS
+	and	x4, x4, #SCTLR_ELx_EE	// preserve endianness of EL2
+	ldr	x5, =SCTLR_ELx_FLAGS
 	orr	x4, x4, x5
 	msr	sctlr_el2, x4
 	isb
@@ -138,6 +139,49 @@
 	eret
 ENDPROC(__kvm_hyp_init)
 
+	/*
+	 * Reset kvm back to the hyp stub. This is the trampoline dance in
+	 * reverse. If kvm used an extended idmap, __extended_idmap_trampoline
+	 * calls this code directly in the idmap. In this case switching to the
+	 * boot tables is a no-op.
+	 *
+	 * x0: HYP boot pgd
+	 * x1: HYP phys_idmap_start
+	 */
+ENTRY(__kvm_hyp_reset)
+	/* We're in trampoline code in VA, switch back to boot page tables */
+	msr	ttbr0_el2, x0
+	isb
+
+	/* Ensure the PA branch doesn't find a stale tlb entry or stale code. */
+	ic	iallu
+	tlbi	alle2
+	dsb	sy
+	isb
+
+	/* Branch into PA space */
+	adr	x0, 1f
+	bfi	x1, x0, #0, #PAGE_SHIFT
+	br	x1
+
+	/* We're now in idmap, disable MMU */
+1:	mrs	x0, sctlr_el2
+	ldr	x1, =SCTLR_ELx_FLAGS
+	bic	x0, x0, x1		// Clear SCTL_M and etc
+	msr	sctlr_el2, x0
+	isb
+
+	/* Invalidate the old TLBs */
+	tlbi	alle2
+	dsb	sy
+
+	/* Install stub vectors */
+	adr_l	x0, __hyp_stub_vectors
+	msr	vbar_el2, x0
+
+	eret
+ENDPROC(__kvm_hyp_reset)
+
 	.ltorg
 
 	.popsection
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 48f19a3..7ce9315 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -35,16 +35,21 @@
  * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c).  Return values are
  * passed in x0.
  *
- * A function pointer with a value of 0 has a special meaning, and is
- * used to implement __hyp_get_vectors in the same way as in
+ * A function pointer with a value less than 0xfff has a special meaning,
+ * and is used to implement __hyp_get_vectors in the same way as in
  * arch/arm64/kernel/hyp_stub.S.
+ * HVC behaves as a 'bl' call and will clobber lr.
  */
 ENTRY(__kvm_call_hyp)
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN	
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+	str     lr, [sp, #-16]!
 	hvc	#0
+	ldr     lr, [sp], #16
 	ret
 alternative_else
 	b	__vhe_hyp_call
 	nop
+	nop
+	nop
 alternative_endif
 ENDPROC(__kvm_call_hyp)
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index ce9e5e5..70254a6 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -164,3 +164,22 @@
 
 	eret
 ENDPROC(__fpsimd_guest_restore)
+
+/*
+ * When using the extended idmap, we don't have a trampoline page we can use
+ * while we switch pages tables during __kvm_hyp_reset. Accessing the idmap
+ * directly would be ideal, but if we're using the extended idmap then the
+ * idmap is located above HYP_PAGE_OFFSET, and the address will be masked by
+ * kvm_call_hyp using kern_hyp_va.
+ *
+ * x0: HYP boot pgd
+ * x1: HYP phys_idmap_start
+ */
+ENTRY(__extended_idmap_trampoline)
+	mov	x4, x1
+	adr_l	x3, __kvm_hyp_reset
+
+	/* insert __kvm_hyp_reset()s offset into phys_idmap_start */
+	bfi	x4, x3, #0, #PAGE_SHIFT
+	br	x4
+ENDPROC(__extended_idmap_trampoline)
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 3488894..2d87f36 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -42,19 +42,17 @@
 	 * Shuffle the parameters before calling the function
 	 * pointed to in x0. Assumes parameters in x[1,2,3].
 	 */
-	sub	sp, sp, #16
-	str	lr, [sp]
 	mov	lr, x0
 	mov	x0, x1
 	mov	x1, x2
 	mov	x2, x3
 	blr	lr
-	ldr	lr, [sp]
-	add	sp, sp, #16
 .endm
 
 ENTRY(__vhe_hyp_call)
+	str	lr, [sp, #-16]!
 	do_el2_call
+	ldr	lr, [sp], #16
 	/*
 	 * We used to rely on having an exception return to get
 	 * an implicit isb. In the E2H case, we don't have it anymore.
@@ -84,8 +82,8 @@
 	/* Here, we're pretty sure the host called HVC. */
 	restore_x0_to_x3
 
-	/* Check for __hyp_get_vectors */
-	cbnz	x0, 1f
+	cmp	x0, #HVC_GET_VECTORS
+	b.ne	1f
 	mrs	x0, vbar_el2
 	b	2f
 
diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c
index bfc54fd..b81f409 100644
--- a/arch/arm64/kvm/hyp/s2-setup.c
+++ b/arch/arm64/kvm/hyp/s2-setup.c
@@ -20,9 +20,10 @@
 #include <asm/kvm_asm.h>
 #include <asm/kvm_hyp.h>
 
-void __hyp_text __init_stage2_translation(void)
+u32 __hyp_text __init_stage2_translation(void)
 {
 	u64 val = VTCR_EL2_FLAGS;
+	u64 parange;
 	u64 tmp;
 
 	/*
@@ -30,14 +31,58 @@
 	 * bits in VTCR_EL2. Amusingly, the PARange is 4 bits, while
 	 * PS is only 3. Fortunately, bit 19 is RES0 in VTCR_EL2...
 	 */
-	val |= (read_sysreg(id_aa64mmfr0_el1) & 7) << 16;
+	parange = read_sysreg(id_aa64mmfr0_el1) & 7;
+	val |= parange << 16;
+
+	/* Compute the actual PARange... */
+	switch (parange) {
+	case 0:
+		parange = 32;
+		break;
+	case 1:
+		parange = 36;
+		break;
+	case 2:
+		parange = 40;
+		break;
+	case 3:
+		parange = 42;
+		break;
+	case 4:
+		parange = 44;
+		break;
+	case 5:
+	default:
+		parange = 48;
+		break;
+	}
+
+	/*
+	 * ... and clamp it to 40 bits, unless we have some braindead
+	 * HW that implements less than that. In all cases, we'll
+	 * return that value for the rest of the kernel to decide what
+	 * to do.
+	 */
+	val |= 64 - (parange > 40 ? 40 : parange);
+
+	/*
+	 * Check the availability of Hardware Access Flag / Dirty Bit
+	 * Management in ID_AA64MMFR1_EL1 and enable the feature in VTCR_EL2.
+	 */
+	tmp = (read_sysreg(id_aa64mmfr1_el1) >> ID_AA64MMFR1_HADBS_SHIFT) & 0xf;
+	if (IS_ENABLED(CONFIG_ARM64_HW_AFDBM) && tmp)
+		val |= VTCR_EL2_HA;
 
 	/*
 	 * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS
 	 * bit in VTCR_EL2.
 	 */
-	tmp = (read_sysreg(id_aa64mmfr1_el1) >> 4) & 0xf;
-	val |= (tmp == 2) ? VTCR_EL2_VS : 0;
+	tmp = (read_sysreg(id_aa64mmfr1_el1) >> ID_AA64MMFR1_VMIDBITS_SHIFT) & 0xf;
+	val |= (tmp == ID_AA64MMFR1_VMIDBITS_16) ?
+			VTCR_EL2_VS_16BIT :
+			VTCR_EL2_VS_8BIT;
 
 	write_sysreg(val, vtcr_el2);
+
+	return parange;
 }
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 9677bf0..b1ad730 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -29,7 +29,9 @@
 #include <asm/cputype.h>
 #include <asm/ptrace.h>
 #include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
 #include <asm/kvm_coproc.h>
+#include <asm/kvm_mmu.h>
 
 /*
  * ARMv8 Reset Values
@@ -130,3 +132,31 @@
 	/* Reset timer */
 	return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
 }
+
+extern char __hyp_idmap_text_start[];
+
+unsigned long kvm_hyp_reset_entry(void)
+{
+	if (!__kvm_cpu_uses_extended_idmap()) {
+		unsigned long offset;
+
+		/*
+		 * Find the address of __kvm_hyp_reset() in the trampoline page.
+		 * This is present in the running page tables, and the boot page
+		 * tables, so we call the code here to start the trampoline
+		 * dance in reverse.
+		 */
+		offset = (unsigned long)__kvm_hyp_reset
+			 - ((unsigned long)__hyp_idmap_text_start & PAGE_MASK);
+
+		return TRAMPOLINE_VA + offset;
+	} else {
+		/*
+		 * KVM is running with merged page tables, which don't have the
+		 * trampoline page mapped. We know the idmap is still mapped,
+		 * but can't be called into directly. Use
+		 * __extended_idmap_trampoline to do the call.
+		 */
+		return (unsigned long)kvm_ksym_ref(__extended_idmap_trampoline);
+	}
+}
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index 57f57fd..54bb209 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -4,6 +4,7 @@
 				   context.o proc.o pageattr.o
 obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
 obj-$(CONFIG_ARM64_PTDUMP)	+= dump.o
+obj-$(CONFIG_NUMA)		+= numa.o
 
 obj-$(CONFIG_KASAN)		+= kasan_init.o
 KASAN_SANITIZE_kasan_init.o	:= n
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 6df0706..50ff9ba 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -24,8 +24,6 @@
 #include <asm/cpufeature.h>
 #include <asm/alternative.h>
 
-#include "proc-macros.S"
-
 /*
  *	flush_icache_range(start,end)
  *
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index c90c3c5..b7b3978 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -75,8 +75,7 @@
 		 */
 		pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n",
 				smp_processor_id(), asid, asid_bits);
-		update_cpu_boot_status(CPU_PANIC_KERNEL);
-		cpu_park_loop();
+		cpu_panic_kernel();
 	}
 }
 
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index a6e757c..c566ec8 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -562,8 +562,8 @@
 		struct page **pages;
 		pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
 
-		pages = iommu_dma_alloc(dev, iosize, gfp, ioprot, handle,
-					flush_page);
+		pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
+					handle, flush_page);
 		if (!pages)
 			return NULL;
 
@@ -804,57 +804,24 @@
 static LIST_HEAD(iommu_dma_masters);
 static DEFINE_MUTEX(iommu_dma_notifier_lock);
 
-/*
- * Temporarily "borrow" a domain feature flag to to tell if we had to resort
- * to creating our own domain here, in case we need to clean it up again.
- */
-#define __IOMMU_DOMAIN_FAKE_DEFAULT		(1U << 31)
-
 static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
 			   u64 dma_base, u64 size)
 {
 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
 
 	/*
-	 * Best case: The device is either part of a group which was
-	 * already attached to a domain in a previous call, or it's
-	 * been put in a default DMA domain by the IOMMU core.
+	 * If the IOMMU driver has the DMA domain support that we require,
+	 * then the IOMMU core will have already configured a group for this
+	 * device, and allocated the default domain for that group.
 	 */
-	if (!domain) {
-		/*
-		 * Urgh. The IOMMU core isn't going to do default domains
-		 * for non-PCI devices anyway, until it has some means of
-		 * abstracting the entirely implementation-specific
-		 * sideband data/SoC topology/unicorn dust that may or
-		 * may not differentiate upstream masters.
-		 * So until then, HORRIBLE HACKS!
-		 */
-		domain = ops->domain_alloc(IOMMU_DOMAIN_DMA);
-		if (!domain)
-			goto out_no_domain;
-
-		domain->ops = ops;
-		domain->type = IOMMU_DOMAIN_DMA | __IOMMU_DOMAIN_FAKE_DEFAULT;
-
-		if (iommu_attach_device(domain, dev))
-			goto out_put_domain;
+	if (!domain || iommu_dma_init_domain(domain, dma_base, size)) {
+		pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
+			dev_name(dev));
+		return false;
 	}
 
-	if (iommu_dma_init_domain(domain, dma_base, size))
-		goto out_detach;
-
 	dev->archdata.dma_ops = &iommu_dma_ops;
 	return true;
-
-out_detach:
-	iommu_detach_device(domain, dev);
-out_put_domain:
-	if (domain->type & __IOMMU_DOMAIN_FAKE_DEFAULT)
-		iommu_domain_free(domain);
-out_no_domain:
-	pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
-		dev_name(dev));
-	return false;
 }
 
 static void queue_iommu_attach(struct device *dev, const struct iommu_ops *ops,
@@ -933,6 +900,10 @@
 		ret = register_iommu_dma_ops_notifier(&platform_bus_type);
 	if (!ret)
 		ret = register_iommu_dma_ops_notifier(&amba_bustype);
+#ifdef CONFIG_PCI
+	if (!ret)
+		ret = register_iommu_dma_ops_notifier(&pci_bus_type);
+#endif
 
 	/* handle devices queued before this arch_initcall */
 	if (!ret)
@@ -967,11 +938,8 @@
 {
 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
 
-	if (domain) {
+	if (WARN_ON(domain))
 		iommu_detach_device(domain, dev);
-		if (domain->type & __IOMMU_DOMAIN_FAKE_DEFAULT)
-			iommu_domain_free(domain);
-	}
 
 	dev->archdata.dma_ops = NULL;
 }
@@ -979,13 +947,13 @@
 #else
 
 static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-				  struct iommu_ops *iommu)
+				  const struct iommu_ops *iommu)
 { }
 
 #endif  /* CONFIG_IOMMU_DMA */
 
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-			struct iommu_ops *iommu, bool coherent)
+			const struct iommu_ops *iommu, bool coherent)
 {
 	if (!dev->archdata.dma_ops)
 		dev->archdata.dma_ops = &swiotlb_dma_ops;
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index f9271cb..8404190 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -23,6 +23,7 @@
 #include <linux/seq_file.h>
 
 #include <asm/fixmap.h>
+#include <asm/kasan.h>
 #include <asm/memory.h>
 #include <asm/pgtable.h>
 #include <asm/pgtable-hwdef.h>
@@ -32,37 +33,25 @@
 	const char *name;
 };
 
-enum address_markers_idx {
-	MODULES_START_NR = 0,
-	MODULES_END_NR,
-	VMALLOC_START_NR,
-	VMALLOC_END_NR,
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
-	VMEMMAP_START_NR,
-	VMEMMAP_END_NR,
+static const struct addr_marker address_markers[] = {
+#ifdef CONFIG_KASAN
+	{ KASAN_SHADOW_START,		"Kasan shadow start" },
+	{ KASAN_SHADOW_END,		"Kasan shadow end" },
 #endif
-	FIXADDR_START_NR,
-	FIXADDR_END_NR,
-	PCI_START_NR,
-	PCI_END_NR,
-	KERNEL_SPACE_NR,
-};
-
-static struct addr_marker address_markers[] = {
-	{ MODULES_VADDR,	"Modules start" },
-	{ MODULES_END,		"Modules end" },
-	{ VMALLOC_START,	"vmalloc() Area" },
-	{ VMALLOC_END,		"vmalloc() End" },
+	{ MODULES_VADDR,		"Modules start" },
+	{ MODULES_END,			"Modules end" },
+	{ VMALLOC_START,		"vmalloc() Area" },
+	{ VMALLOC_END,			"vmalloc() End" },
+	{ FIXADDR_START,		"Fixmap start" },
+	{ FIXADDR_TOP,			"Fixmap end" },
+	{ PCI_IO_START,			"PCI I/O start" },
+	{ PCI_IO_END,			"PCI I/O end" },
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
-	{ 0,			"vmemmap start" },
-	{ 0,			"vmemmap end" },
+	{ VMEMMAP_START,		"vmemmap start" },
+	{ VMEMMAP_START + VMEMMAP_SIZE,	"vmemmap end" },
 #endif
-	{ FIXADDR_START,	"Fixmap start" },
-	{ FIXADDR_TOP,		"Fixmap end" },
-	{ PCI_IO_START,		"PCI I/O start" },
-	{ PCI_IO_END,		"PCI I/O end" },
-	{ PAGE_OFFSET,		"Linear Mapping" },
-	{ -1,			NULL },
+	{ PAGE_OFFSET,			"Linear Mapping" },
+	{ -1,				NULL },
 };
 
 /*
@@ -347,13 +336,6 @@
 			for (j = 0; j < pg_level[i].num; j++)
 				pg_level[i].mask |= pg_level[i].bits[j].mask;
 
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
-	address_markers[VMEMMAP_START_NR].start_address =
-				(unsigned long)virt_to_page(PAGE_OFFSET);
-	address_markers[VMEMMAP_END_NR].start_address =
-				(unsigned long)virt_to_page(high_memory);
-#endif
-
 	pe = debugfs_create_file("kernel_page_tables", 0400, NULL, NULL,
 				 &ptdump_fops);
 	return pe ? 0 : -ENOMEM;
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 95df28b..5954881 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -81,6 +81,56 @@
 	printk("\n");
 }
 
+#ifdef CONFIG_ARM64_HW_AFDBM
+/*
+ * This function sets the access flags (dirty, accessed), as well as write
+ * permission, and only to a more permissive setting.
+ *
+ * It needs to cope with hardware update of the accessed/dirty state by other
+ * agents in the system and can safely skip the __sync_icache_dcache() call as,
+ * like set_pte_at(), the PTE is never changed from no-exec to exec here.
+ *
+ * Returns whether or not the PTE actually changed.
+ */
+int ptep_set_access_flags(struct vm_area_struct *vma,
+			  unsigned long address, pte_t *ptep,
+			  pte_t entry, int dirty)
+{
+	pteval_t old_pteval;
+	unsigned int tmp;
+
+	if (pte_same(*ptep, entry))
+		return 0;
+
+	/* only preserve the access flags and write permission */
+	pte_val(entry) &= PTE_AF | PTE_WRITE | PTE_DIRTY;
+
+	/*
+	 * PTE_RDONLY is cleared by default in the asm below, so set it in
+	 * back if necessary (read-only or clean PTE).
+	 */
+	if (!pte_write(entry) || !dirty)
+		pte_val(entry) |= PTE_RDONLY;
+
+	/*
+	 * Setting the flags must be done atomically to avoid racing with the
+	 * hardware update of the access/dirty state.
+	 */
+	asm volatile("//	ptep_set_access_flags\n"
+	"	prfm	pstl1strm, %2\n"
+	"1:	ldxr	%0, %2\n"
+	"	and	%0, %0, %3		// clear PTE_RDONLY\n"
+	"	orr	%0, %0, %4		// set flags\n"
+	"	stxr	%w1, %0, %2\n"
+	"	cbnz	%w1, 1b\n"
+	: "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))
+	: "L" (~PTE_RDONLY), "r" (pte_val(entry)));
+
+	flush_tlb_fix_spurious_fault(vma, address);
+	return 1;
+}
+#endif
+
 /*
  * The kernel tried to access some page that wasn't present.
  */
@@ -212,10 +262,6 @@
 	tsk = current;
 	mm  = tsk->mm;
 
-	/* Enable interrupts if they were enabled in the parent context. */
-	if (interrupts_enabled(regs))
-		local_irq_enable();
-
 	/*
 	 * If we're in an interrupt or have no user context, we must not take
 	 * the fault.
@@ -555,20 +601,33 @@
 {
 	const struct fault_info *inf = debug_fault_info + DBG_ESR_EVT(esr);
 	struct siginfo info;
+	int rv;
 
-	if (!inf->fn(addr, esr, regs))
-		return 1;
+	/*
+	 * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
+	 * already disabled to preserve the last enabled/disabled addresses.
+	 */
+	if (interrupts_enabled(regs))
+		trace_hardirqs_off();
 
-	pr_alert("Unhandled debug exception: %s (0x%08x) at 0x%016lx\n",
-		 inf->name, esr, addr);
+	if (!inf->fn(addr, esr, regs)) {
+		rv = 1;
+	} else {
+		pr_alert("Unhandled debug exception: %s (0x%08x) at 0x%016lx\n",
+			 inf->name, esr, addr);
 
-	info.si_signo = inf->sig;
-	info.si_errno = 0;
-	info.si_code  = inf->code;
-	info.si_addr  = (void __user *)addr;
-	arm64_notify_die("", regs, &info, 0);
+		info.si_signo = inf->sig;
+		info.si_errno = 0;
+		info.si_code  = inf->code;
+		info.si_addr  = (void __user *)addr;
+		arm64_notify_die("", regs, &info, 0);
+		rv = 0;
+	}
 
-	return 0;
+	if (interrupts_enabled(regs))
+		trace_hardirqs_on();
+
+	return rv;
 }
 
 #ifdef CONFIG_ARM64_PAN
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index ea989d8..d45f862 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -40,6 +40,7 @@
 #include <asm/kasan.h>
 #include <asm/kernel-pgtable.h>
 #include <asm/memory.h>
+#include <asm/numa.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
 #include <asm/sizes.h>
@@ -86,6 +87,21 @@
 	return min(offset + (1ULL << 32), memblock_end_of_DRAM());
 }
 
+#ifdef CONFIG_NUMA
+
+static void __init zone_sizes_init(unsigned long min, unsigned long max)
+{
+	unsigned long max_zone_pfns[MAX_NR_ZONES]  = {0};
+
+	if (IS_ENABLED(CONFIG_ZONE_DMA))
+		max_zone_pfns[ZONE_DMA] = PFN_DOWN(max_zone_dma_phys());
+	max_zone_pfns[ZONE_NORMAL] = max;
+
+	free_area_init_nodes(max_zone_pfns);
+}
+
+#else
+
 static void __init zone_sizes_init(unsigned long min, unsigned long max)
 {
 	struct memblock_region *reg;
@@ -126,6 +142,8 @@
 	free_area_init_node(0, zone_size, min, zhole_size);
 }
 
+#endif /* CONFIG_NUMA */
+
 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
 int pfn_valid(unsigned long pfn)
 {
@@ -142,10 +160,15 @@
 static void __init arm64_memory_present(void)
 {
 	struct memblock_region *reg;
+	int nid = 0;
 
-	for_each_memblock(memory, reg)
-		memory_present(0, memblock_region_memory_base_pfn(reg),
-			       memblock_region_memory_end_pfn(reg));
+	for_each_memblock(memory, reg) {
+#ifdef CONFIG_NUMA
+		nid = reg->nid;
+#endif
+		memory_present(nid, memblock_region_memory_base_pfn(reg),
+				memblock_region_memory_end_pfn(reg));
+	}
 }
 #endif
 
@@ -190,8 +213,12 @@
 	 */
 	memblock_remove(max_t(u64, memstart_addr + linear_region_size, __pa(_end)),
 			ULLONG_MAX);
-	if (memblock_end_of_DRAM() > linear_region_size)
-		memblock_remove(0, memblock_end_of_DRAM() - linear_region_size);
+	if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
+		/* ensure that memstart_addr remains sufficiently aligned */
+		memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
+					 ARM64_MEMSTART_ALIGN);
+		memblock_remove(0, memstart_addr);
+	}
 
 	/*
 	 * Apply the memory limit if it was set. Since the kernel may be loaded
@@ -203,6 +230,35 @@
 		memblock_add(__pa(_text), (u64)(_end - _text));
 	}
 
+	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_start) {
+		/*
+		 * Add back the memory we just removed if it results in the
+		 * initrd to become inaccessible via the linear mapping.
+		 * Otherwise, this is a no-op
+		 */
+		u64 base = initrd_start & PAGE_MASK;
+		u64 size = PAGE_ALIGN(initrd_end) - base;
+
+		/*
+		 * We can only add back the initrd memory if we don't end up
+		 * with more memory than we can address via the linear mapping.
+		 * It is up to the bootloader to position the kernel and the
+		 * initrd reasonably close to each other (i.e., within 32 GB of
+		 * each other) so that all granule/#levels combinations can
+		 * always access both.
+		 */
+		if (WARN(base < memblock_start_of_DRAM() ||
+			 base + size > memblock_start_of_DRAM() +
+				       linear_region_size,
+			"initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
+			initrd_start = 0;
+		} else {
+			memblock_remove(base, size); /* clear MEMBLOCK_ flags */
+			memblock_add(base, size);
+			memblock_reserve(base, size);
+		}
+	}
+
 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
 		extern u16 memstart_offset_seed;
 		u64 range = linear_region_size -
@@ -245,7 +301,6 @@
 	dma_contiguous_reserve(arm64_dma_phys_limit);
 
 	memblock_allow_resize();
-	memblock_dump_all();
 }
 
 void __init bootmem_init(void)
@@ -257,6 +312,9 @@
 
 	early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);
 
+	max_pfn = max_low_pfn = max;
+
+	arm64_numa_init();
 	/*
 	 * Sparsemem tries to allocate bootmem in memory_present(), so must be
 	 * done after the fixed reservations.
@@ -267,7 +325,7 @@
 	zone_sizes_init(min, max);
 
 	high_memory = __va((max << PAGE_SHIFT) - 1) + 1;
-	max_pfn = max_low_pfn = max;
+	memblock_dump_all();
 }
 
 #ifndef CONFIG_SPARSEMEM_VMEMMAP
@@ -371,26 +429,27 @@
 		MLM(MODULES_VADDR, MODULES_END));
 	pr_cont("    vmalloc : 0x%16lx - 0x%16lx   (%6ld GB)\n",
 		MLG(VMALLOC_START, VMALLOC_END));
-	pr_cont("      .text : 0x%p" " - 0x%p" "   (%6ld KB)\n"
-		"    .rodata : 0x%p" " - 0x%p" "   (%6ld KB)\n"
-		"      .init : 0x%p" " - 0x%p" "   (%6ld KB)\n"
-		"      .data : 0x%p" " - 0x%p" "   (%6ld KB)\n",
-		MLK_ROUNDUP(_text, __start_rodata),
-		MLK_ROUNDUP(__start_rodata, _etext),
-		MLK_ROUNDUP(__init_begin, __init_end),
+	pr_cont("      .text : 0x%p" " - 0x%p" "   (%6ld KB)\n",
+		MLK_ROUNDUP(_text, __start_rodata));
+	pr_cont("    .rodata : 0x%p" " - 0x%p" "   (%6ld KB)\n",
+		MLK_ROUNDUP(__start_rodata, _etext));
+	pr_cont("      .init : 0x%p" " - 0x%p" "   (%6ld KB)\n",
+		MLK_ROUNDUP(__init_begin, __init_end));
+	pr_cont("      .data : 0x%p" " - 0x%p" "   (%6ld KB)\n",
 		MLK_ROUNDUP(_sdata, _edata));
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
-	pr_cont("    vmemmap : 0x%16lx - 0x%16lx   (%6ld GB maximum)\n"
-		"              0x%16lx - 0x%16lx   (%6ld MB actual)\n",
-		MLG(VMEMMAP_START,
-		    VMEMMAP_START + VMEMMAP_SIZE),
-		MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()),
-		    (unsigned long)virt_to_page(high_memory)));
-#endif
+	pr_cont("       .bss : 0x%p" " - 0x%p" "   (%6ld KB)\n",
+		MLK_ROUNDUP(__bss_start, __bss_stop));
 	pr_cont("    fixed   : 0x%16lx - 0x%16lx   (%6ld KB)\n",
 		MLK(FIXADDR_START, FIXADDR_TOP));
 	pr_cont("    PCI I/O : 0x%16lx - 0x%16lx   (%6ld MB)\n",
 		MLM(PCI_IO_START, PCI_IO_END));
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+	pr_cont("    vmemmap : 0x%16lx - 0x%16lx   (%6ld GB maximum)\n",
+		MLG(VMEMMAP_START, VMEMMAP_START + VMEMMAP_SIZE));
+	pr_cont("              0x%16lx - 0x%16lx   (%6ld MB actual)\n",
+		MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()),
+		    (unsigned long)virt_to_page(high_memory)));
+#endif
 	pr_cont("    memory  : 0x%16lx - 0x%16lx   (%6ld MB)\n",
 		MLM(__phys_to_virt(memblock_start_of_DRAM()),
 		    (unsigned long)high_memory));
@@ -407,6 +466,12 @@
 	BUILD_BUG_ON(TASK_SIZE_32			> TASK_SIZE_64);
 #endif
 
+	/*
+	 * Make sure we chose the upper bound of sizeof(struct page)
+	 * correctly.
+	 */
+	BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT));
+
 	if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
 		extern int sysctl_overcommit_memory;
 		/*
@@ -419,7 +484,8 @@
 
 void free_initmem(void)
 {
-	free_initmem_default(0);
+	free_reserved_area(__va(__pa(__init_begin)), __va(__pa(__init_end)),
+			   0, "unused kernel");
 	fixup_init();
 }
 
diff --git a/arch/arm64/mm/mm.h b/arch/arm64/mm/mm.h
index ef47d99..71fe989 100644
--- a/arch/arm64/mm/mm.h
+++ b/arch/arm64/mm/mm.h
@@ -1,3 +1,2 @@
-extern void __init bootmem_init(void);
 
 void fixup_init(void);
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 232f787..01c1717 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -95,8 +95,6 @@
 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
 	}
 }
-EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
-
 
 /*
  * You really shouldn't be using read() or write() on /dev/mem.  This might go
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index f3e5c74..0f85a46 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -385,7 +385,7 @@
 
 static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
 {
-	unsigned long kernel_start = __pa(_stext);
+	unsigned long kernel_start = __pa(_text);
 	unsigned long kernel_end = __pa(_etext);
 
 	/*
@@ -417,7 +417,7 @@
 				     early_pgtable_alloc);
 
 	/*
-	 * Map the linear alias of the [_stext, _etext) interval as
+	 * Map the linear alias of the [_text, _etext) interval as
 	 * read-only/non-executable. This makes the contents of the
 	 * region accessible to subsystems such as hibernate, but
 	 * protects it from inadvertent modification or execution.
@@ -449,8 +449,8 @@
 {
 	unsigned long section_size;
 
-	section_size = (unsigned long)__start_rodata - (unsigned long)_stext;
-	create_mapping_late(__pa(_stext), (unsigned long)_stext,
+	section_size = (unsigned long)__start_rodata - (unsigned long)_text;
+	create_mapping_late(__pa(_text), (unsigned long)_text,
 			    section_size, PAGE_KERNEL_ROX);
 	/*
 	 * mark .rodata as read only. Use _etext rather than __end_rodata to
@@ -471,8 +471,8 @@
 	unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
 }
 
-static void __init map_kernel_chunk(pgd_t *pgd, void *va_start, void *va_end,
-				    pgprot_t prot, struct vm_struct *vma)
+static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
+				      pgprot_t prot, struct vm_struct *vma)
 {
 	phys_addr_t pa_start = __pa(va_start);
 	unsigned long size = va_end - va_start;
@@ -499,11 +499,11 @@
 {
 	static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_init, vmlinux_data;
 
-	map_kernel_chunk(pgd, _stext, __start_rodata, PAGE_KERNEL_EXEC, &vmlinux_text);
-	map_kernel_chunk(pgd, __start_rodata, _etext, PAGE_KERNEL, &vmlinux_rodata);
-	map_kernel_chunk(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC,
-			 &vmlinux_init);
-	map_kernel_chunk(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data);
+	map_kernel_segment(pgd, _text, __start_rodata, PAGE_KERNEL_EXEC, &vmlinux_text);
+	map_kernel_segment(pgd, __start_rodata, _etext, PAGE_KERNEL, &vmlinux_rodata);
+	map_kernel_segment(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC,
+			   &vmlinux_init);
+	map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data);
 
 	if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
 		/*
@@ -564,8 +564,6 @@
 	 */
 	memblock_free(__pa(swapper_pg_dir) + PAGE_SIZE,
 		      SWAPPER_DIR_SIZE - PAGE_SIZE);
-
-	bootmem_init();
 }
 
 /*
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
new file mode 100644
index 0000000..98dc104
--- /dev/null
+++ b/arch/arm64/mm/numa.c
@@ -0,0 +1,396 @@
+/*
+ * NUMA support, based on the x86 implementation.
+ *
+ * Copyright (C) 2015 Cavium Inc.
+ * Author: Ganapatrao Kulkarni <gkulkarni@cavium.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/bootmem.h>
+#include <linux/memblock.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
+EXPORT_SYMBOL(node_data);
+nodemask_t numa_nodes_parsed __initdata;
+static int cpu_to_node_map[NR_CPUS] = { [0 ... NR_CPUS-1] = NUMA_NO_NODE };
+
+static int numa_distance_cnt;
+static u8 *numa_distance;
+static int numa_off;
+
+static __init int numa_parse_early_param(char *opt)
+{
+	if (!opt)
+		return -EINVAL;
+	if (!strncmp(opt, "off", 3)) {
+		pr_info("%s\n", "NUMA turned off");
+		numa_off = 1;
+	}
+	return 0;
+}
+early_param("numa", numa_parse_early_param);
+
+cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+EXPORT_SYMBOL(node_to_cpumask_map);
+
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+
+/*
+ * Returns a pointer to the bitmask of CPUs on Node 'node'.
+ */
+const struct cpumask *cpumask_of_node(int node)
+{
+	if (WARN_ON(node >= nr_node_ids))
+		return cpu_none_mask;
+
+	if (WARN_ON(node_to_cpumask_map[node] == NULL))
+		return cpu_online_mask;
+
+	return node_to_cpumask_map[node];
+}
+EXPORT_SYMBOL(cpumask_of_node);
+
+#endif
+
+static void map_cpu_to_node(unsigned int cpu, int nid)
+{
+	set_cpu_numa_node(cpu, nid);
+	if (nid >= 0)
+		cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
+}
+
+void numa_clear_node(unsigned int cpu)
+{
+	int nid = cpu_to_node(cpu);
+
+	if (nid >= 0)
+		cpumask_clear_cpu(cpu, node_to_cpumask_map[nid]);
+	set_cpu_numa_node(cpu, NUMA_NO_NODE);
+}
+
+/*
+ * Allocate node_to_cpumask_map based on number of available nodes
+ * Requires node_possible_map to be valid.
+ *
+ * Note: cpumask_of_node() is not valid until after this is done.
+ * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
+ */
+static void __init setup_node_to_cpumask_map(void)
+{
+	unsigned int cpu;
+	int node;
+
+	/* setup nr_node_ids if not done yet */
+	if (nr_node_ids == MAX_NUMNODES)
+		setup_nr_node_ids();
+
+	/* allocate and clear the mapping */
+	for (node = 0; node < nr_node_ids; node++) {
+		alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
+		cpumask_clear(node_to_cpumask_map[node]);
+	}
+
+	for_each_possible_cpu(cpu)
+		set_cpu_numa_node(cpu, NUMA_NO_NODE);
+
+	/* cpumask_of_node() will now work */
+	pr_debug("NUMA: Node to cpumask map for %d nodes\n", nr_node_ids);
+}
+
+/*
+ *  Set the cpu to node and mem mapping
+ */
+void numa_store_cpu_info(unsigned int cpu)
+{
+	map_cpu_to_node(cpu, numa_off ? 0 : cpu_to_node_map[cpu]);
+}
+
+void __init early_map_cpu_to_node(unsigned int cpu, int nid)
+{
+	/* fallback to node 0 */
+	if (nid < 0 || nid >= MAX_NUMNODES)
+		nid = 0;
+
+	cpu_to_node_map[cpu] = nid;
+}
+
+/**
+ * numa_add_memblk - Set node id to memblk
+ * @nid: NUMA node ID of the new memblk
+ * @start: Start address of the new memblk
+ * @size:  Size of the new memblk
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int __init numa_add_memblk(int nid, u64 start, u64 size)
+{
+	int ret;
+
+	ret = memblock_set_node(start, size, &memblock.memory, nid);
+	if (ret < 0) {
+		pr_err("NUMA: memblock [0x%llx - 0x%llx] failed to add on node %d\n",
+			start, (start + size - 1), nid);
+		return ret;
+	}
+
+	node_set(nid, numa_nodes_parsed);
+	pr_info("NUMA: Adding memblock [0x%llx - 0x%llx] on node %d\n",
+			start, (start + size - 1), nid);
+	return ret;
+}
+
+/**
+ * Initialize NODE_DATA for a node on the local memory
+ */
+static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
+{
+	const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
+	u64 nd_pa;
+	void *nd;
+	int tnid;
+
+	pr_info("NUMA: Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
+			nid, start_pfn << PAGE_SHIFT,
+			(end_pfn << PAGE_SHIFT) - 1);
+
+	nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
+	nd = __va(nd_pa);
+
+	/* report and initialize */
+	pr_info("NUMA: NODE_DATA [mem %#010Lx-%#010Lx]\n",
+		nd_pa, nd_pa + nd_size - 1);
+	tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
+	if (tnid != nid)
+		pr_info("NUMA: NODE_DATA(%d) on node %d\n", nid, tnid);
+
+	node_data[nid] = nd;
+	memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
+	NODE_DATA(nid)->node_id = nid;
+	NODE_DATA(nid)->node_start_pfn = start_pfn;
+	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
+}
+
+/**
+ * numa_free_distance
+ *
+ * The current table is freed.
+ */
+void __init numa_free_distance(void)
+{
+	size_t size;
+
+	if (!numa_distance)
+		return;
+
+	size = numa_distance_cnt * numa_distance_cnt *
+		sizeof(numa_distance[0]);
+
+	memblock_free(__pa(numa_distance), size);
+	numa_distance_cnt = 0;
+	numa_distance = NULL;
+}
+
+/**
+ *
+ * Create a new NUMA distance table.
+ *
+ */
+static int __init numa_alloc_distance(void)
+{
+	size_t size;
+	u64 phys;
+	int i, j;
+
+	size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]);
+	phys = memblock_find_in_range(0, PFN_PHYS(max_pfn),
+				      size, PAGE_SIZE);
+	if (WARN_ON(!phys))
+		return -ENOMEM;
+
+	memblock_reserve(phys, size);
+
+	numa_distance = __va(phys);
+	numa_distance_cnt = nr_node_ids;
+
+	/* fill with the default distances */
+	for (i = 0; i < numa_distance_cnt; i++)
+		for (j = 0; j < numa_distance_cnt; j++)
+			numa_distance[i * numa_distance_cnt + j] = i == j ?
+				LOCAL_DISTANCE : REMOTE_DISTANCE;
+
+	pr_debug("NUMA: Initialized distance table, cnt=%d\n",
+			numa_distance_cnt);
+
+	return 0;
+}
+
+/**
+ * numa_set_distance - Set inter node NUMA distance from node to node.
+ * @from: the 'from' node to set distance
+ * @to: the 'to'  node to set distance
+ * @distance: NUMA distance
+ *
+ * Set the distance from node @from to @to to @distance.
+ * If distance table doesn't exist, a warning is printed.
+ *
+ * If @from or @to is higher than the highest known node or lower than zero
+ * or @distance doesn't make sense, the call is ignored.
+ *
+ */
+void __init numa_set_distance(int from, int to, int distance)
+{
+	if (!numa_distance) {
+		pr_warn_once("NUMA: Warning: distance table not allocated yet\n");
+		return;
+	}
+
+	if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
+			from < 0 || to < 0) {
+		pr_warn_once("NUMA: Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
+			    from, to, distance);
+		return;
+	}
+
+	if ((u8)distance != distance ||
+	    (from == to && distance != LOCAL_DISTANCE)) {
+		pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
+			     from, to, distance);
+		return;
+	}
+
+	numa_distance[from * numa_distance_cnt + to] = distance;
+}
+
+/**
+ * Return NUMA distance @from to @to
+ */
+int __node_distance(int from, int to)
+{
+	if (from >= numa_distance_cnt || to >= numa_distance_cnt)
+		return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
+	return numa_distance[from * numa_distance_cnt + to];
+}
+EXPORT_SYMBOL(__node_distance);
+
+static int __init numa_register_nodes(void)
+{
+	int nid;
+	struct memblock_region *mblk;
+
+	/* Check that valid nid is set to memblks */
+	for_each_memblock(memory, mblk)
+		if (mblk->nid == NUMA_NO_NODE || mblk->nid >= MAX_NUMNODES) {
+			pr_warn("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
+				mblk->nid, mblk->base,
+				mblk->base + mblk->size - 1);
+			return -EINVAL;
+		}
+
+	/* Finally register nodes. */
+	for_each_node_mask(nid, numa_nodes_parsed) {
+		unsigned long start_pfn, end_pfn;
+
+		get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
+		setup_node_data(nid, start_pfn, end_pfn);
+		node_set_online(nid);
+	}
+
+	/* Setup online nodes to actual nodes*/
+	node_possible_map = numa_nodes_parsed;
+
+	return 0;
+}
+
+static int __init numa_init(int (*init_func)(void))
+{
+	int ret;
+
+	nodes_clear(numa_nodes_parsed);
+	nodes_clear(node_possible_map);
+	nodes_clear(node_online_map);
+	numa_free_distance();
+
+	ret = numa_alloc_distance();
+	if (ret < 0)
+		return ret;
+
+	ret = init_func();
+	if (ret < 0)
+		return ret;
+
+	if (nodes_empty(numa_nodes_parsed))
+		return -EINVAL;
+
+	ret = numa_register_nodes();
+	if (ret < 0)
+		return ret;
+
+	setup_node_to_cpumask_map();
+
+	/* init boot processor */
+	cpu_to_node_map[0] = 0;
+	map_cpu_to_node(0, 0);
+
+	return 0;
+}
+
+/**
+ * dummy_numa_init - Fallback dummy NUMA init
+ *
+ * Used if there's no underlying NUMA architecture, NUMA initialization
+ * fails, or NUMA is disabled on the command line.
+ *
+ * Must online at least one node (node 0) and add memory blocks that cover all
+ * allowed memory. It is unlikely that this function fails.
+ */
+static int __init dummy_numa_init(void)
+{
+	int ret;
+	struct memblock_region *mblk;
+
+	pr_info("%s\n", "No NUMA configuration found");
+	pr_info("NUMA: Faking a node at [mem %#018Lx-%#018Lx]\n",
+	       0LLU, PFN_PHYS(max_pfn) - 1);
+
+	for_each_memblock(memory, mblk) {
+		ret = numa_add_memblk(0, mblk->base, mblk->size);
+		if (!ret)
+			continue;
+
+		pr_err("NUMA init failed\n");
+		return ret;
+	}
+
+	numa_off = 1;
+	return 0;
+}
+
+/**
+ * arm64_numa_init - Initialize NUMA
+ *
+ * Try each configured NUMA initialization method until one succeeds.  The
+ * last fallback is dummy single node config encomapssing whole memory.
+ */
+void __init arm64_numa_init(void)
+{
+	if (!numa_off) {
+		if (!numa_init(of_numa_init))
+			return;
+	}
+
+	numa_init(dummy_numa_init);
+}
diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S
deleted file mode 100644
index e6a30e1..0000000
--- a/arch/arm64/mm/proc-macros.S
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Based on arch/arm/mm/proc-macros.S
- *
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
-
-/*
- * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
- */
-	.macro	vma_vm_mm, rd, rn
-	ldr	\rd, [\rn, #VMA_VM_MM]
-	.endm
-
-/*
- * mmid - get context id from mm pointer (mm->context.id)
- */
-	.macro	mmid, rd, rn
-	ldr	\rd, [\rn, #MM_CONTEXT_ID]
-	.endm
-
-/*
- * dcache_line_size - get the minimum D-cache line size from the CTR register.
- */
-	.macro	dcache_line_size, reg, tmp
-	mrs	\tmp, ctr_el0			// read CTR
-	ubfm	\tmp, \tmp, #16, #19		// cache line size encoding
-	mov	\reg, #4			// bytes per word
-	lsl	\reg, \reg, \tmp		// actual cache line size
-	.endm
-
-/*
- * icache_line_size - get the minimum I-cache line size from the CTR register.
- */
-	.macro	icache_line_size, reg, tmp
-	mrs	\tmp, ctr_el0			// read CTR
-	and	\tmp, \tmp, #0xf		// cache line size encoding
-	mov	\reg, #4			// bytes per word
-	lsl	\reg, \reg, \tmp		// actual cache line size
-	.endm
-
-/*
- * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
- */
-	.macro	tcr_set_idmap_t0sz, valreg, tmpreg
-#ifndef CONFIG_ARM64_VA_BITS_48
-	ldr_l	\tmpreg, idmap_t0sz
-	bfi	\valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
-#endif
-	.endm
-
-/*
- * Macro to perform a data cache maintenance for the interval
- * [kaddr, kaddr + size)
- *
- * 	op:		operation passed to dc instruction
- * 	domain:		domain used in dsb instruciton
- * 	kaddr:		starting virtual address of the region
- * 	size:		size of the region
- * 	Corrupts: 	kaddr, size, tmp1, tmp2
- */
-	.macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
-	dcache_line_size \tmp1, \tmp2
-	add	\size, \kaddr, \size
-	sub	\tmp2, \tmp1, #1
-	bic	\kaddr, \kaddr, \tmp2
-9998:	dc	\op, \kaddr
-	add	\kaddr, \kaddr, \tmp1
-	cmp	\kaddr, \size
-	b.lo	9998b
-	dsb	\domain
-	.endm
-
-/*
- * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
- */
-	.macro	reset_pmuserenr_el0, tmpreg
-	mrs	\tmpreg, id_aa64dfr0_el1	// Check ID_AA64DFR0_EL1 PMUVer
-	sbfx	\tmpreg, \tmpreg, #8, #4
-	cmp	\tmpreg, #1			// Skip if no PMU present
-	b.lt	9000f
-	msr	pmuserenr_el0, xzr		// Disable PMU access from EL0
-9000:
-	.endm
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 543f519..c431787 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -23,13 +23,11 @@
 #include <asm/assembler.h>
 #include <asm/asm-offsets.h>
 #include <asm/hwcap.h>
-#include <asm/pgtable-hwdef.h>
 #include <asm/pgtable.h>
+#include <asm/pgtable-hwdef.h>
 #include <asm/cpufeature.h>
 #include <asm/alternative.h>
 
-#include "proc-macros.S"
-
 #ifdef CONFIG_ARM64_64K_PAGES
 #define TCR_TG_FLAGS	TCR_TG0_64K | TCR_TG1_64K
 #elif defined(CONFIG_ARM64_16K_PAGES)
@@ -66,62 +64,50 @@
 	mrs	x2, tpidr_el0
 	mrs	x3, tpidrro_el0
 	mrs	x4, contextidr_el1
-	mrs	x5, mair_el1
-	mrs	x6, cpacr_el1
-	mrs	x7, ttbr1_el1
-	mrs	x8, tcr_el1
-	mrs	x9, vbar_el1
-	mrs	x10, mdscr_el1
-	mrs	x11, oslsr_el1
-	mrs	x12, sctlr_el1
+	mrs	x5, cpacr_el1
+	mrs	x6, tcr_el1
+	mrs	x7, vbar_el1
+	mrs	x8, mdscr_el1
+	mrs	x9, oslsr_el1
+	mrs	x10, sctlr_el1
 	stp	x2, x3, [x0]
-	stp	x4, x5, [x0, #16]
-	stp	x6, x7, [x0, #32]
-	stp	x8, x9, [x0, #48]
-	stp	x10, x11, [x0, #64]
-	str	x12, [x0, #80]
+	stp	x4, xzr, [x0, #16]
+	stp	x5, x6, [x0, #32]
+	stp	x7, x8, [x0, #48]
+	stp	x9, x10, [x0, #64]
 	ret
 ENDPROC(cpu_do_suspend)
 
 /**
  * cpu_do_resume - restore CPU register context
  *
- * x0: Physical address of context pointer
- * x1: ttbr0_el1 to be restored
- *
- * Returns:
- *	sctlr_el1 value in x0
+ * x0: Address of context pointer
  */
 ENTRY(cpu_do_resume)
-	/*
-	 * Invalidate local tlb entries before turning on MMU
-	 */
-	tlbi	vmalle1
 	ldp	x2, x3, [x0]
 	ldp	x4, x5, [x0, #16]
-	ldp	x6, x7, [x0, #32]
-	ldp	x8, x9, [x0, #48]
-	ldp	x10, x11, [x0, #64]
-	ldr	x12, [x0, #80]
+	ldp	x6, x8, [x0, #32]
+	ldp	x9, x10, [x0, #48]
+	ldp	x11, x12, [x0, #64]
 	msr	tpidr_el0, x2
 	msr	tpidrro_el0, x3
 	msr	contextidr_el1, x4
-	msr	mair_el1, x5
 	msr	cpacr_el1, x6
-	msr	ttbr0_el1, x1
-	msr	ttbr1_el1, x7
-	tcr_set_idmap_t0sz x8, x7
+
+	/* Don't change t0sz here, mask those bits when restoring */
+	mrs	x5, tcr_el1
+	bfi	x8, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
+
 	msr	tcr_el1, x8
 	msr	vbar_el1, x9
 	msr	mdscr_el1, x10
+	msr	sctlr_el1, x12
 	/*
 	 * Restore oslsr_el1 by writing oslar_el1
 	 */
 	ubfx	x11, x11, #1, #1
 	msr	oslar_el1, x11
 	reset_pmuserenr_el0 x0			// Disable PMU access from EL0
-	mov	x0, x12
-	dsb	nsh		// Make sure local tlb invalidation completed
 	isb
 	ret
 ENDPROC(cpu_do_resume)
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index a34420a..49ba37e 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -31,8 +31,8 @@
 
 int bpf_jit_enable __read_mostly;
 
-#define TMP_REG_1 (MAX_BPF_REG + 0)
-#define TMP_REG_2 (MAX_BPF_REG + 1)
+#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
+#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
 
 /* Map BPF registers to A64 registers */
 static const int bpf2a64[] = {
@@ -51,15 +51,16 @@
 	[BPF_REG_9] = A64_R(22),
 	/* read-only frame pointer to access stack */
 	[BPF_REG_FP] = A64_R(25),
-	/* temporary register for internal BPF JIT */
-	[TMP_REG_1] = A64_R(23),
-	[TMP_REG_2] = A64_R(24),
+	/* temporary registers for internal BPF JIT */
+	[TMP_REG_1] = A64_R(10),
+	[TMP_REG_2] = A64_R(11),
+	/* temporary register for blinding constants */
+	[BPF_REG_AX] = A64_R(9),
 };
 
 struct jit_ctx {
 	const struct bpf_prog *prog;
 	int idx;
-	int tmp_used;
 	int epilogue_offset;
 	int *offset;
 	u32 *image;
@@ -152,8 +153,6 @@
 	const u8 r8 = bpf2a64[BPF_REG_8];
 	const u8 r9 = bpf2a64[BPF_REG_9];
 	const u8 fp = bpf2a64[BPF_REG_FP];
-	const u8 tmp1 = bpf2a64[TMP_REG_1];
-	const u8 tmp2 = bpf2a64[TMP_REG_2];
 
 	/*
 	 * BPF prog stack layout
@@ -165,7 +164,7 @@
 	 *                        | ... | callee saved registers
 	 *                        +-----+
 	 *                        |     | x25/x26
-	 * BPF fp register => -80:+-----+ <= (BPF_FP)
+	 * BPF fp register => -64:+-----+ <= (BPF_FP)
 	 *                        |     |
 	 *                        | ... | BPF prog stack
 	 *                        |     |
@@ -187,8 +186,6 @@
 	/* Save callee-saved register */
 	emit(A64_PUSH(r6, r7, A64_SP), ctx);
 	emit(A64_PUSH(r8, r9, A64_SP), ctx);
-	if (ctx->tmp_used)
-		emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx);
 
 	/* Save fp (x25) and x26. SP requires 16 bytes alignment */
 	emit(A64_PUSH(fp, A64_R(26), A64_SP), ctx);
@@ -208,8 +205,6 @@
 	const u8 r8 = bpf2a64[BPF_REG_8];
 	const u8 r9 = bpf2a64[BPF_REG_9];
 	const u8 fp = bpf2a64[BPF_REG_FP];
-	const u8 tmp1 = bpf2a64[TMP_REG_1];
-	const u8 tmp2 = bpf2a64[TMP_REG_2];
 
 	/* We're done with BPF stack */
 	emit(A64_ADD_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
@@ -218,8 +213,6 @@
 	emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
 
 	/* Restore callee-saved register */
-	if (ctx->tmp_used)
-		emit(A64_POP(tmp1, tmp2, A64_SP), ctx);
 	emit(A64_POP(r8, r9, A64_SP), ctx);
 	emit(A64_POP(r6, r7, A64_SP), ctx);
 
@@ -315,7 +308,6 @@
 			emit(A64_UDIV(is64, dst, dst, src), ctx);
 			break;
 		case BPF_MOD:
-			ctx->tmp_used = 1;
 			emit(A64_UDIV(is64, tmp, dst, src), ctx);
 			emit(A64_MUL(is64, tmp, tmp, src), ctx);
 			emit(A64_SUB(is64, dst, dst, tmp), ctx);
@@ -388,49 +380,41 @@
 	/* dst = dst OP imm */
 	case BPF_ALU | BPF_ADD | BPF_K:
 	case BPF_ALU64 | BPF_ADD | BPF_K:
-		ctx->tmp_used = 1;
 		emit_a64_mov_i(is64, tmp, imm, ctx);
 		emit(A64_ADD(is64, dst, dst, tmp), ctx);
 		break;
 	case BPF_ALU | BPF_SUB | BPF_K:
 	case BPF_ALU64 | BPF_SUB | BPF_K:
-		ctx->tmp_used = 1;
 		emit_a64_mov_i(is64, tmp, imm, ctx);
 		emit(A64_SUB(is64, dst, dst, tmp), ctx);
 		break;
 	case BPF_ALU | BPF_AND | BPF_K:
 	case BPF_ALU64 | BPF_AND | BPF_K:
-		ctx->tmp_used = 1;
 		emit_a64_mov_i(is64, tmp, imm, ctx);
 		emit(A64_AND(is64, dst, dst, tmp), ctx);
 		break;
 	case BPF_ALU | BPF_OR | BPF_K:
 	case BPF_ALU64 | BPF_OR | BPF_K:
-		ctx->tmp_used = 1;
 		emit_a64_mov_i(is64, tmp, imm, ctx);
 		emit(A64_ORR(is64, dst, dst, tmp), ctx);
 		break;
 	case BPF_ALU | BPF_XOR | BPF_K:
 	case BPF_ALU64 | BPF_XOR | BPF_K:
-		ctx->tmp_used = 1;
 		emit_a64_mov_i(is64, tmp, imm, ctx);
 		emit(A64_EOR(is64, dst, dst, tmp), ctx);
 		break;
 	case BPF_ALU | BPF_MUL | BPF_K:
 	case BPF_ALU64 | BPF_MUL | BPF_K:
-		ctx->tmp_used = 1;
 		emit_a64_mov_i(is64, tmp, imm, ctx);
 		emit(A64_MUL(is64, dst, dst, tmp), ctx);
 		break;
 	case BPF_ALU | BPF_DIV | BPF_K:
 	case BPF_ALU64 | BPF_DIV | BPF_K:
-		ctx->tmp_used = 1;
 		emit_a64_mov_i(is64, tmp, imm, ctx);
 		emit(A64_UDIV(is64, dst, dst, tmp), ctx);
 		break;
 	case BPF_ALU | BPF_MOD | BPF_K:
 	case BPF_ALU64 | BPF_MOD | BPF_K:
-		ctx->tmp_used = 1;
 		emit_a64_mov_i(is64, tmp2, imm, ctx);
 		emit(A64_UDIV(is64, tmp, dst, tmp2), ctx);
 		emit(A64_MUL(is64, tmp, tmp, tmp2), ctx);
@@ -476,6 +460,7 @@
 		case BPF_JGE:
 			jmp_cond = A64_COND_CS;
 			break;
+		case BPF_JSET:
 		case BPF_JNE:
 			jmp_cond = A64_COND_NE;
 			break;
@@ -500,12 +485,10 @@
 	case BPF_JMP | BPF_JNE | BPF_K:
 	case BPF_JMP | BPF_JSGT | BPF_K:
 	case BPF_JMP | BPF_JSGE | BPF_K:
-		ctx->tmp_used = 1;
 		emit_a64_mov_i(1, tmp, imm, ctx);
 		emit(A64_CMP(1, dst, tmp), ctx);
 		goto emit_cond_jmp;
 	case BPF_JMP | BPF_JSET | BPF_K:
-		ctx->tmp_used = 1;
 		emit_a64_mov_i(1, tmp, imm, ctx);
 		emit(A64_TST(1, dst, tmp), ctx);
 		goto emit_cond_jmp;
@@ -515,7 +498,6 @@
 		const u8 r0 = bpf2a64[BPF_REG_0];
 		const u64 func = (u64)__bpf_call_base + imm;
 
-		ctx->tmp_used = 1;
 		emit_a64_mov_i64(tmp, func, ctx);
 		emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
 		emit(A64_MOV(1, A64_FP, A64_SP), ctx);
@@ -561,7 +543,6 @@
 	case BPF_LDX | BPF_MEM | BPF_H:
 	case BPF_LDX | BPF_MEM | BPF_B:
 	case BPF_LDX | BPF_MEM | BPF_DW:
-		ctx->tmp_used = 1;
 		emit_a64_mov_i(1, tmp, off, ctx);
 		switch (BPF_SIZE(code)) {
 		case BPF_W:
@@ -585,7 +566,6 @@
 	case BPF_ST | BPF_MEM | BPF_B:
 	case BPF_ST | BPF_MEM | BPF_DW:
 		/* Load imm to a register then store it */
-		ctx->tmp_used = 1;
 		emit_a64_mov_i(1, tmp2, off, ctx);
 		emit_a64_mov_i(1, tmp, imm, ctx);
 		switch (BPF_SIZE(code)) {
@@ -609,7 +589,6 @@
 	case BPF_STX | BPF_MEM | BPF_H:
 	case BPF_STX | BPF_MEM | BPF_B:
 	case BPF_STX | BPF_MEM | BPF_DW:
-		ctx->tmp_used = 1;
 		emit_a64_mov_i(1, tmp, off, ctx);
 		switch (BPF_SIZE(code)) {
 		case BPF_W:
@@ -761,31 +740,45 @@
 	/* Nothing to do here. We support Internal BPF. */
 }
 
-void bpf_int_jit_compile(struct bpf_prog *prog)
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 {
+	struct bpf_prog *tmp, *orig_prog = prog;
 	struct bpf_binary_header *header;
+	bool tmp_blinded = false;
 	struct jit_ctx ctx;
 	int image_size;
 	u8 *image_ptr;
 
 	if (!bpf_jit_enable)
-		return;
+		return orig_prog;
 
-	if (!prog || !prog->len)
-		return;
+	tmp = bpf_jit_blind_constants(prog);
+	/* If blinding was requested and we failed during blinding,
+	 * we must fall back to the interpreter.
+	 */
+	if (IS_ERR(tmp))
+		return orig_prog;
+	if (tmp != prog) {
+		tmp_blinded = true;
+		prog = tmp;
+	}
 
 	memset(&ctx, 0, sizeof(ctx));
 	ctx.prog = prog;
 
 	ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
-	if (ctx.offset == NULL)
-		return;
+	if (ctx.offset == NULL) {
+		prog = orig_prog;
+		goto out;
+	}
 
 	/* 1. Initial fake pass to compute ctx->idx. */
 
-	/* Fake pass to fill in ctx->offset and ctx->tmp_used. */
-	if (build_body(&ctx))
-		goto out;
+	/* Fake pass to fill in ctx->offset. */
+	if (build_body(&ctx)) {
+		prog = orig_prog;
+		goto out_off;
+	}
 
 	build_prologue(&ctx);
 
@@ -796,8 +789,10 @@
 	image_size = sizeof(u32) * ctx.idx;
 	header = bpf_jit_binary_alloc(image_size, &image_ptr,
 				      sizeof(u32), jit_fill_hole);
-	if (header == NULL)
-		goto out;
+	if (header == NULL) {
+		prog = orig_prog;
+		goto out_off;
+	}
 
 	/* 2. Now, the actual pass. */
 
@@ -808,7 +803,8 @@
 
 	if (build_body(&ctx)) {
 		bpf_jit_binary_free(header);
-		goto out;
+		prog = orig_prog;
+		goto out_off;
 	}
 
 	build_epilogue(&ctx);
@@ -816,7 +812,8 @@
 	/* 3. Extra pass to validate JITed code. */
 	if (validate_code(&ctx)) {
 		bpf_jit_binary_free(header);
-		goto out;
+		prog = orig_prog;
+		goto out_off;
 	}
 
 	/* And we're done. */
@@ -828,8 +825,14 @@
 	set_memory_ro((unsigned long)header, header->pages);
 	prog->bpf_func = (void *)ctx.image;
 	prog->jited = 1;
-out:
+
+out_off:
 	kfree(ctx.offset);
+out:
+	if (tmp_blinded)
+		bpf_jit_prog_release_other(prog, prog == orig_prog ?
+					   tmp : orig_prog);
+	return prog;
 }
 
 void bpf_jit_free(struct bpf_prog *prog)
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index b6878eb..18b8877 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -74,7 +74,7 @@
 	select SUBARCH_AVR32B
 	select MMU
 	select PERFORMANCE_COUNTERS
-	select ARCH_REQUIRE_GPIOLIB
+	select GPIOLIB
 	select GENERIC_ALLOCATOR
 	select HAVE_FB_ATMEL
 
diff --git a/arch/avr32/include/asm/addrspace.h b/arch/avr32/include/asm/addrspace.h
index 3667948..5a47a79 100644
--- a/arch/avr32/include/asm/addrspace.h
+++ b/arch/avr32/include/asm/addrspace.h
@@ -1,5 +1,5 @@
 /*
- * Defitions for the address spaces of the AVR32 CPUs. Heavily based on
+ * Definitions for the address spaces of the AVR32 CPUs. Heavily based on
  * include/asm-sh/addrspace.h
  *
  * Copyright (C) 2004-2006 Atmel Corporation
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index bf445aa..00d6dcc 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -1365,8 +1365,8 @@
 	slave->dma_dev = &dw_dmac0_device.dev;
 	slave->src_id = 0;
 	slave->dst_id = 1;
-	slave->src_master = 1;
-	slave->dst_master = 0;
+	slave->m_master = 1;
+	slave->p_master = 0;
 
 	data->dma_slave = slave;
 	data->dma_filter = at32_mci_dma_filter;
@@ -2061,16 +2061,16 @@
 	if (flags & AC97C_CAPTURE) {
 		rx_dws->dma_dev = &dw_dmac0_device.dev;
 		rx_dws->src_id = 3;
-		rx_dws->src_master = 0;
-		rx_dws->dst_master = 1;
+		rx_dws->m_master = 0;
+		rx_dws->p_master = 1;
 	}
 
 	/* Check if DMA slave interface for playback should be configured. */
 	if (flags & AC97C_PLAYBACK) {
 		tx_dws->dma_dev = &dw_dmac0_device.dev;
 		tx_dws->dst_id = 4;
-		tx_dws->src_master = 0;
-		tx_dws->dst_master = 1;
+		tx_dws->m_master = 0;
+		tx_dws->p_master = 1;
 	}
 
 	if (platform_device_add_data(pdev, data,
@@ -2141,8 +2141,8 @@
 
 	dws->dma_dev = &dw_dmac0_device.dev;
 	dws->dst_id = 2;
-	dws->src_master = 0;
-	dws->dst_master = 1;
+	dws->m_master = 0;
+	dws->p_master = 1;
 
 	if (platform_device_add_data(pdev, data,
 				sizeof(struct atmel_abdac_pdata)))
diff --git a/arch/blackfin/lib/udivsi3.S b/arch/blackfin/lib/udivsi3.S
index 748a6a2..90bfa80 100644
--- a/arch/blackfin/lib/udivsi3.S
+++ b/arch/blackfin/lib/udivsi3.S
@@ -154,7 +154,7 @@
        CC = R7 < 0;               /* Check quotient(AQ) */
                                   /* If AQ==0, we'll sub divisor */
        IF CC R5 = R1;             /* and if AQ==1, we'll add it. */
-       R3 = R3 + R5;              /* Add/sub divsor to partial remainder */
+       R3 = R3 + R5;              /* Add/sub divisor to partial remainder */
        R7 = R3 ^ R1;              /* Generate next quotient bit */
 
        R5 = R7 >> 31;             /* Get AQ */
diff --git a/arch/blackfin/mach-bf609/include/mach/defBF60x_base.h b/arch/blackfin/mach-bf609/include/mach/defBF60x_base.h
index 35caa7b..3933e91 100644
--- a/arch/blackfin/mach-bf609/include/mach/defBF60x_base.h
+++ b/arch/blackfin/mach-bf609/include/mach/defBF60x_base.h
@@ -2689,7 +2689,7 @@
 #define L2CTL0_STAT                 0xFFCA3010         /* L2CTL0 L2 Status Register */
 #define L2CTL0_RPCR                 0xFFCA3014         /* L2CTL0 L2 Read Priority Count Register */
 #define L2CTL0_WPCR                 0xFFCA3018         /* L2CTL0 L2 Write Priority Count Register */
-#define L2CTL0_RFA                  0xFFCA3024         /* L2CTL0 L2 Refresh Address Regsiter */
+#define L2CTL0_RFA                  0xFFCA3024         /* L2CTL0 L2 Refresh Address Register */
 #define L2CTL0_ERRADDR0             0xFFCA3040         /* L2CTL0 L2 Bank 0 ECC Error Address Register */
 #define L2CTL0_ERRADDR1             0xFFCA3044         /* L2CTL0 L2 Bank 1 ECC Error Address Register */
 #define L2CTL0_ERRADDR2             0xFFCA3048         /* L2CTL0 L2 Bank 2 ECC Error Address Register */
diff --git a/arch/c6x/include/asm/clock.h b/arch/c6x/include/asm/clock.h
index bcf42b2..e2f818a 100644
--- a/arch/c6x/include/asm/clock.h
+++ b/arch/c6x/include/asm/clock.h
@@ -101,7 +101,7 @@
 #define CLK_PLL			BIT(2) /* PLL-derived clock */
 #define PRE_PLL			BIT(3) /* source is before PLL mult/div */
 #define FIXED_DIV_PLL		BIT(4) /* fixed divisor from PLL */
-#define FIXED_RATE_PLL		BIT(5) /* fixed ouput rate PLL */
+#define FIXED_RATE_PLL		BIT(5) /* fixed output rate PLL */
 
 #define MAX_PLL_SYSCLKS 16
 
diff --git a/arch/c6x/platforms/cache.c b/arch/c6x/platforms/cache.c
index 46fd2d5..ec3c887 100644
--- a/arch/c6x/platforms/cache.c
+++ b/arch/c6x/platforms/cache.c
@@ -145,7 +145,7 @@
 		spin_lock_irqsave(&cache_lock, flags);
 
 		/*
-		 * If another cache operation is occuring
+		 * If another cache operation is occurring
 		 */
 		if (unlikely(imcr_get(wc_reg))) {
 			spin_unlock_irqrestore(&cache_lock, flags);
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index e086f9e..99bda1b 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -61,7 +61,7 @@
 	select CLONE_BACKWARDS2
 	select OLD_SIGSUSPEND
 	select OLD_SIGACTION
-	select ARCH_REQUIRE_GPIOLIB
+	select GPIOLIB
 	select IRQ_DOMAIN if ETRAX_ARCH_V32
 	select OF if ETRAX_ARCH_V32
 	select OF_EARLY_FLATTREE if ETRAX_ARCH_V32
diff --git a/arch/cris/arch-v10/drivers/axisflashmap.c b/arch/cris/arch-v10/drivers/axisflashmap.c
index a4bbdfd..60d57c5 100644
--- a/arch/cris/arch-v10/drivers/axisflashmap.c
+++ b/arch/cris/arch-v10/drivers/axisflashmap.c
@@ -212,7 +212,7 @@
 /*
  * Probe each chip select individually for flash chips. If there are chips on
  * both cse0 and cse1, the mtd_info structs will be concatenated to one struct
- * so that MTD partitions can cross chip boundries.
+ * so that MTD partitions can cross chip boundaries.
  *
  * The only known restriction to how you can mount your chips is that each
  * chip select must hold similar flash chips. But you need external hardware
diff --git a/arch/cris/arch-v32/drivers/axisflashmap.c b/arch/cris/arch-v32/drivers/axisflashmap.c
index c6309a1..bd10d3b 100644
--- a/arch/cris/arch-v32/drivers/axisflashmap.c
+++ b/arch/cris/arch-v32/drivers/axisflashmap.c
@@ -246,7 +246,7 @@
 /*
  * Probe each chip select individually for flash chips. If there are chips on
  * both cse0 and cse1, the mtd_info structs will be concatenated to one struct
- * so that MTD partitions can cross chip boundries.
+ * so that MTD partitions can cross chip boundaries.
  *
  * The only known restriction to how you can mount your chips is that each
  * chip select must hold similar flash chips. But you need external hardware
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
index 617645d..2081d8b 100644
--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -525,7 +525,7 @@
 	return 0;
 }
 
-/* Map the ouput length of the transform to operation output starting on the inject index. */
+/* Map the output length of the transform to operation output starting on the inject index. */
 static int create_input_descriptors(struct cryptocop_operation *operation, struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **id, int alloc_flag)
 {
 	int                        err = 0;
diff --git a/arch/cris/arch-v32/mach-a3/dram_init.S b/arch/cris/arch-v32/mach-a3/dram_init.S
index ec8648b..5c4f24d 100644
--- a/arch/cris/arch-v32/mach-a3/dram_init.S
+++ b/arch/cris/arch-v32/mach-a3/dram_init.S
@@ -11,7 +11,7 @@
  */
 
 /* Just to be certain the config file is included, we include it here
- * explicitely instead of depending on it being included in the file that
+ * explicitly instead of depending on it being included in the file that
  * uses this code.
  */
 
diff --git a/arch/cris/arch-v32/mach-fs/dram_init.S b/arch/cris/arch-v32/mach-fs/dram_init.S
index 6fbad33..d3ce2eb 100644
--- a/arch/cris/arch-v32/mach-fs/dram_init.S
+++ b/arch/cris/arch-v32/mach-fs/dram_init.S
@@ -11,7 +11,7 @@
  */
 
 /* Just to be certain the config file is included, we include it here
- * explicitely instead of depending on it being included in the file that
+ * explicitly instead of depending on it being included in the file that
  * uses this code.
  */
 
diff --git a/arch/hexagon/include/asm/hexagon_vm.h b/arch/hexagon/include/asm/hexagon_vm.h
index 1f6918b..e8990c9a 100644
--- a/arch/hexagon/include/asm/hexagon_vm.h
+++ b/arch/hexagon/include/asm/hexagon_vm.h
@@ -237,7 +237,7 @@
 
 /*
  * The initial program gets to find a system environment descriptor
- * on its stack when it begins exection. The first word is a version
+ * on its stack when it begins execution. The first word is a version
  * code to indicate what is there.  Zero means nothing more.
  */
 
diff --git a/arch/hexagon/include/asm/vm_mmu.h b/arch/hexagon/include/asm/vm_mmu.h
index 096537d..6fc29d9 100644
--- a/arch/hexagon/include/asm/vm_mmu.h
+++ b/arch/hexagon/include/asm/vm_mmu.h
@@ -78,7 +78,7 @@
 #define	__HEXAGON_C_WB_L2	0x7	/* Write-back, with L2 */
 
 /*
- * This can be overriden, but we're defaulting to the most aggressive
+ * This can be overridden, but we're defaulting to the most aggressive
  * cache policy, the better to find bugs sooner.
  */
 
diff --git a/arch/hexagon/kernel/kgdb.c b/arch/hexagon/kernel/kgdb.c
index 038580c..62dece3 100644
--- a/arch/hexagon/kernel/kgdb.c
+++ b/arch/hexagon/kernel/kgdb.c
@@ -236,9 +236,9 @@
 };
 
 /**
- * kgdb_arch_init - Perform any architecture specific initalization.
+ * kgdb_arch_init - Perform any architecture specific initialization.
  *
- * This function will handle the initalization of any architecture
+ * This function will handle the initialization of any architecture
  * specific callbacks.
  */
 int kgdb_arch_init(void)
diff --git a/arch/hexagon/kernel/vm_ops.S b/arch/hexagon/kernel/vm_ops.S
index 9fb77b3..58f2b92 100644
--- a/arch/hexagon/kernel/vm_ops.S
+++ b/arch/hexagon/kernel/vm_ops.S
@@ -26,7 +26,7 @@
  * could be, and perhaps some day will be, handled as in-line
  * macros, but for tracing/debugging it's handy to have
  * a single point of invocation for each of them.
- * Conveniently, they take paramters and return values
+ * Conveniently, they take parameters and return values
  * consistent with the ABI calling convention.
  */
 
diff --git a/arch/hexagon/lib/memcpy.S b/arch/hexagon/lib/memcpy.S
index 81c561c..a46093a 100644
--- a/arch/hexagon/lib/memcpy.S
+++ b/arch/hexagon/lib/memcpy.S
@@ -39,7 +39,7 @@
  *   DJH 10/14/09 Version 1.3 added special loop for aligned case, was
  *                            overreading bloated codesize back up to 892
  *   DJH  4/20/10 Version 1.4 fixed Ldword_loop_epilog loop to prevent loads
- *                            occuring if only 1 left outstanding, fixes bug
+ *                            occurring if only 1 left outstanding, fixes bug
  *                            # 3888, corrected for all alignments. Peeled off
  *                            1 32byte chunk from kernel loop and extended 8byte
  *                            loop at end to solve all combinations and prevent
diff --git a/arch/ia64/include/asm/iommu.h b/arch/ia64/include/asm/iommu.h
index 105c93b..1d12129 100644
--- a/arch/ia64/include/asm/iommu.h
+++ b/arch/ia64/include/asm/iommu.h
@@ -1,7 +1,6 @@
 #ifndef _ASM_IA64_IOMMU_H
 #define _ASM_IA64_IOMMU_H 1
 
-#define cpu_has_x2apic 0
 /* 10 seconds */
 #define DMAR_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
 
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h
index ce11247..8b23e07 100644
--- a/arch/ia64/include/asm/rwsem.h
+++ b/arch/ia64/include/asm/rwsem.h
@@ -49,8 +49,8 @@
 /*
  * lock for writing
  */
-static inline void
-__down_write (struct rw_semaphore *sem)
+static inline long
+___down_write (struct rw_semaphore *sem)
 {
 	long old, new;
 
@@ -59,10 +59,26 @@
 		new = old + RWSEM_ACTIVE_WRITE_BIAS;
 	} while (cmpxchg_acq(&sem->count, old, new) != old);
 
-	if (old != 0)
+	return old;
+}
+
+static inline void
+__down_write (struct rw_semaphore *sem)
+{
+	if (___down_write(sem))
 		rwsem_down_write_failed(sem);
 }
 
+static inline int
+__down_write_killable (struct rw_semaphore *sem)
+{
+	if (___down_write(sem))
+		if (IS_ERR(rwsem_down_write_failed_killable(sem)))
+			return -EINTR;
+
+	return 0;
+}
+
 /*
  * unlock after reading
  */
diff --git a/arch/ia64/include/asm/sn/ioc3.h b/arch/ia64/include/asm/sn/ioc3.h
index 95ed6cc..6eaa3cc 100644
--- a/arch/ia64/include/asm/sn/ioc3.h
+++ b/arch/ia64/include/asm/sn/ioc3.h
@@ -131,7 +131,7 @@
 #define SSCR_PAUSE_STATE	0x40000000	/* set when PAUSE takes effect*/
 #define SSCR_RESET		0x80000000	/* reset DMA channels */
 
-/* all producer/comsumer pointers are the same bitfield */
+/* all producer/consumer pointers are the same bitfield */
 #define PROD_CONS_PTR_4K	0x00000ff8	/* for 4K buffers */
 #define PROD_CONS_PTR_1K	0x000003f8	/* for 1K buffers */
 #define PROD_CONS_PTR_OFF	3
diff --git a/arch/ia64/include/asm/sn/shubio.h b/arch/ia64/include/asm/sn/shubio.h
index ecb8a49..8a1ec13 100644
--- a/arch/ia64/include/asm/sn/shubio.h
+++ b/arch/ia64/include/asm/sn/shubio.h
@@ -1385,7 +1385,7 @@
  * respones are captured until IXSS[VALID] is cleared by setting the    *
  * appropriate bit in IECLR. Every time a spurious read response is     *
  * detected, the SPUR_RD bit of the PRB corresponding to the incoming   *
- * message's SIDN field is set. This always happens, regarless of       *
+ * message's SIDN field is set. This always happens, regardless of       *
  * whether a header is captured. The programmer should check            *
  * IXSM[SIDN] to determine which widget sent the spurious response,     *
  * because there may be more than one SPUR_RD bit set in the PRB        *
@@ -2997,7 +2997,7 @@
 /*
  * Values for field imsgtype
  */
-#define IIO_ICRB_IMSGT_XTALK    0	/* Incoming Meessage from Xtalk */
+#define IIO_ICRB_IMSGT_XTALK    0	/* Incoming message from Xtalk */
 #define IIO_ICRB_IMSGT_BTE      1	/* Incoming message from BTE    */
 #define IIO_ICRB_IMSGT_SN1NET   2	/* Incoming message from SN1 net */
 #define IIO_ICRB_IMSGT_CRB      3	/* Incoming message from CRB ???  */
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 300dac3..3b7a60e 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -531,8 +531,6 @@
 	       efi.systab->hdr.revision >> 16,
 	       efi.systab->hdr.revision & 0xffff, vendor);
 
-	set_bit(EFI_SYSTEM_TABLES, &efi.flags);
-
 	palo_phys      = EFI_INVALID_TABLE_ADDR;
 
 	if (efi_config_init(arch_tables) != 0)
@@ -966,7 +964,7 @@
 /*
  * Look for the first granule aligned memory descriptor memory
  * that is big enough to hold EFI memory map. Make sure this
- * descriptor is atleast granule sized so it does not get trimmed
+ * descriptor is at least granule sized so it does not get trimmed
  */
 struct kern_memdesc *
 find_memmap_space (void)
diff --git a/arch/ia64/lib/idiv32.S b/arch/ia64/lib/idiv32.S
index 2ac28bf..c91b5b0 100644
--- a/arch/ia64/lib/idiv32.S
+++ b/arch/ia64/lib/idiv32.S
@@ -11,7 +11,7 @@
  *
  * For more details on the theory behind these algorithms, see "IA-64
  * and Elementary Functions" by Peter Markstein; HP Professional Books
- * (http://www.hp.com/go/retailbooks/)
+ * (http://www.goodreads.com/book/show/2019887.Ia_64_and_Elementary_Functions)
  */
 
 #include <asm/asmmacro.h>
diff --git a/arch/ia64/lib/idiv64.S b/arch/ia64/lib/idiv64.S
index f69bd2b..627573c 100644
--- a/arch/ia64/lib/idiv64.S
+++ b/arch/ia64/lib/idiv64.S
@@ -11,7 +11,7 @@
  *
  * For more details on the theory behind these algorithms, see "IA-64
  * and Elementary Functions" by Peter Markstein; HP Professional Books
- * (http://www.hp.com/go/retailbooks/)
+ * (http://www.goodreads.com/book/show/2019887.Ia_64_and_Elementary_Functions)
  */
 
 #include <asm/asmmacro.h>
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index 0dfcf12..c1beb5a 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -22,11 +22,11 @@
 
 config COLDFIRE
 	bool "Coldfire CPU family support"
-	select ARCH_REQUIRE_GPIOLIB
 	select ARCH_HAVE_CUSTOM_GPIO_H
 	select CPU_HAS_NO_BITFIELDS
 	select CPU_HAS_NO_MULDIV64
 	select GENERIC_CSUM
+	select GPIOLIB
 	select HAVE_CLK
 
 endchoice
diff --git a/arch/m68k/bvme6000/rtc.c b/arch/m68k/bvme6000/rtc.c
index cf12a17..f7984f4 100644
--- a/arch/m68k/bvme6000/rtc.c
+++ b/arch/m68k/bvme6000/rtc.c
@@ -15,7 +15,7 @@
 #include <linux/init.h>
 #include <linux/poll.h>
 #include <linux/module.h>
-#include <linux/mc146818rtc.h>	/* For struct rtc_time and ioctls, etc */
+#include <linux/rtc.h>	/* For struct rtc_time and ioctls, etc */
 #include <linux/bcd.h>
 #include <asm/bvme6000hw.h>
 
diff --git a/arch/m68k/coldfire/gpio.c b/arch/m68k/coldfire/gpio.c
index 8832083..b515809 100644
--- a/arch/m68k/coldfire/gpio.c
+++ b/arch/m68k/coldfire/gpio.c
@@ -158,11 +158,6 @@
 		return -EINVAL;
 }
 
-static struct bus_type mcfgpio_subsys = {
-	.name		= "gpio",
-	.dev_name	= "gpio",
-};
-
 static struct gpio_chip mcfgpio_chip = {
 	.label			= "mcfgpio",
 	.request		= mcfgpio_request,
@@ -178,8 +173,7 @@
 
 static int __init mcfgpio_sysinit(void)
 {
-	gpiochip_add_data(&mcfgpio_chip, NULL);
-	return subsys_system_register(&mcfgpio_subsys, NULL);
+	return gpiochip_add_data(&mcfgpio_chip, NULL);
 }
 
 core_initcall(mcfgpio_sysinit);
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index d1fc479..3ee6976 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-amiga"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -64,7 +63,6 @@
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -285,7 +283,9 @@
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -359,6 +359,7 @@
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -452,6 +453,7 @@
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -468,6 +470,7 @@
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -549,6 +552,7 @@
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -557,7 +561,6 @@
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -565,12 +568,9 @@
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -594,7 +594,6 @@
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 9bfe8be..e96787f 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-apollo"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -62,7 +61,6 @@
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -283,7 +281,9 @@
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -341,6 +341,7 @@
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -411,6 +412,7 @@
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -427,6 +429,7 @@
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -508,6 +511,7 @@
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -516,7 +520,6 @@
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -524,12 +527,9 @@
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -553,7 +553,6 @@
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index ebdcfae..083fe6b 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-atari"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -62,7 +61,6 @@
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -283,7 +281,9 @@
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -350,6 +350,7 @@
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -432,6 +433,7 @@
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -448,6 +450,7 @@
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -529,6 +532,7 @@
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -537,7 +541,6 @@
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -545,12 +548,9 @@
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -574,7 +574,6 @@
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 8acc65e..475130c 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-bvme6000"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -60,7 +59,6 @@
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -281,7 +279,9 @@
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -340,6 +340,7 @@
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -403,6 +404,7 @@
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -419,6 +421,7 @@
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -500,6 +503,7 @@
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -508,7 +512,6 @@
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -516,12 +519,9 @@
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -545,7 +545,6 @@
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 0c6a3d5..4339658 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-hp300"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -62,7 +61,6 @@
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -283,7 +281,9 @@
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -341,6 +341,7 @@
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -413,6 +414,7 @@
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -429,6 +431,7 @@
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -510,6 +513,7 @@
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -518,7 +522,6 @@
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -526,12 +529,9 @@
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -555,7 +555,6 @@
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 12a8a6c..831cc8c 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-mac"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -61,7 +60,6 @@
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -285,7 +283,9 @@
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -357,6 +357,7 @@
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -435,6 +436,7 @@
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -451,6 +453,7 @@
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -532,6 +535,7 @@
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -540,7 +544,6 @@
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -548,12 +551,9 @@
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -577,7 +577,6 @@
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 64ff2dc..6377afe 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-multi"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -71,7 +70,6 @@
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -295,7 +293,9 @@
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -390,6 +390,7 @@
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -515,6 +516,7 @@
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -531,6 +533,7 @@
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -612,6 +615,7 @@
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -620,7 +624,6 @@
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -628,12 +631,9 @@
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -657,7 +657,6 @@
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 07fc6ab..4304b3d 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-mvme147"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -59,7 +58,6 @@
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -280,7 +278,9 @@
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -339,6 +339,7 @@
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -403,6 +404,7 @@
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -419,6 +421,7 @@
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -500,6 +503,7 @@
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -508,7 +512,6 @@
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -516,12 +519,9 @@
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -545,7 +545,6 @@
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 69903de..074bda4 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-mvme16x"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -60,7 +59,6 @@
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -281,7 +279,9 @@
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -340,6 +340,7 @@
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -403,6 +404,7 @@
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -419,6 +421,7 @@
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -500,6 +503,7 @@
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -508,7 +512,6 @@
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -516,12 +519,9 @@
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -545,7 +545,6 @@
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index bd84016..07b9fa8 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-q40"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -60,7 +59,6 @@
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -281,7 +279,9 @@
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -346,6 +346,7 @@
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -426,6 +427,7 @@
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -442,6 +444,7 @@
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -523,6 +526,7 @@
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -531,7 +535,6 @@
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -539,12 +542,9 @@
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -568,7 +568,6 @@
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 5f9fb3a..36e6fae 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-sun3"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -57,7 +56,6 @@
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -278,7 +276,9 @@
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -337,6 +337,7 @@
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -405,6 +406,7 @@
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -421,6 +423,7 @@
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -502,6 +505,7 @@
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -509,7 +513,6 @@
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -517,12 +520,9 @@
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -546,7 +546,6 @@
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 5d1c674..903acf9 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -1,7 +1,6 @@
 CONFIG_LOCALVERSION="-sun3x"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=16
@@ -57,7 +56,6 @@
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-# CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_IPV6=m
@@ -278,7 +276,9 @@
 CONFIG_MPLS_ROUTING=m
 CONFIG_MPLS_IPTUNNEL=m
 CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_AF_KCM=m
 # CONFIG_WIRELESS is not set
+CONFIG_NET_DEVLINK=m
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -337,6 +337,7 @@
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -405,6 +406,7 @@
 CONFIG_XFS_FS=m
 CONFIG_OCFS2_FS=m
 # CONFIG_OCFS2_DEBUG_MASKLOG is not set
+CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
@@ -421,6 +423,7 @@
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
+CONFIG_ORANGEFS_FS=m
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_ECRYPT_FS_MESSAGING=y
@@ -502,6 +505,7 @@
 CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
@@ -510,7 +514,6 @@
 CONFIG_TEST_UDELAY=m
 CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
-CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
@@ -518,12 +521,9 @@
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
@@ -547,7 +547,6 @@
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index bafaff6..a857d82 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls		377
+#define NR_syscalls		379
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 0ca7296..9fe674bf 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -382,5 +382,7 @@
 #define __NR_membarrier		374
 #define __NR_mlock2		375
 #define __NR_copy_file_range	376
+#define __NR_preadv2		377
+#define __NR_pwritev2		378
 
 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 8bb9426..d6fd6d9 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -397,3 +397,5 @@
 	.long sys_membarrier
 	.long sys_mlock2		/* 375 */
 	.long sys_copy_file_range
+	.long sys_preadv2
+	.long sys_pwritev2
diff --git a/arch/m68k/mvme16x/rtc.c b/arch/m68k/mvme16x/rtc.c
index 1755e2f..1cdc732 100644
--- a/arch/m68k/mvme16x/rtc.c
+++ b/arch/m68k/mvme16x/rtc.c
@@ -14,7 +14,7 @@
 #include <linux/fcntl.h>
 #include <linux/init.h>
 #include <linux/poll.h>
-#include <linux/mc146818rtc.h>	/* For struct rtc_time and ioctls, etc */
+#include <linux/rtc.h>	/* For struct rtc_time and ioctls, etc */
 #include <linux/bcd.h>
 #include <asm/mvme16xhw.h>
 
diff --git a/arch/metag/Kconfig.soc b/arch/metag/Kconfig.soc
index 973640f..50f979c 100644
--- a/arch/metag/Kconfig.soc
+++ b/arch/metag/Kconfig.soc
@@ -16,7 +16,6 @@
 
 config SOC_TZ1090
 	bool "Toumaz Xenif TZ1090 SoC (Comet)"
-	select ARCH_WANT_OPTIONAL_GPIOLIB
 	select IMGPDC_IRQ
 	select METAG_LNKGET_AROUND_CACHE
 	select METAG_META21
diff --git a/arch/metag/include/asm/atomic_lnkget.h b/arch/metag/include/asm/atomic_lnkget.h
index a625818..88fa25f 100644
--- a/arch/metag/include/asm/atomic_lnkget.h
+++ b/arch/metag/include/asm/atomic_lnkget.h
@@ -61,7 +61,7 @@
 		"	CMPT	%0, #HI(0x02000000)\n"			\
 		"	BNZ 1b\n"					\
 		: "=&d" (temp), "=&da" (result)				\
-		: "da" (&v->counter), "bd" (i)				\
+		: "da" (&v->counter), "br" (i)				\
 		: "cc");						\
 									\
 	smp_mb();							\
diff --git a/arch/metag/include/asm/metag_regs.h b/arch/metag/include/asm/metag_regs.h
index acf4b8e..40c3f67 100644
--- a/arch/metag/include/asm/metag_regs.h
+++ b/arch/metag/include/asm/metag_regs.h
@@ -1165,7 +1165,7 @@
 #define TXSTATUS_IPTOGGLE_BIT           0x80000000 /* Prev PToggle of TXPRIVEXT */
 #define TXSTATUS_ISTATE_BIT             0x40000000 /* IState bit */
 #define TXSTATUS_IWAIT_BIT              0x20000000 /* wait indefinitely in decision step*/
-#define TXSTATUS_IEXCEPT_BIT            0x10000000 /* Indicate an exception occured */
+#define TXSTATUS_IEXCEPT_BIT            0x10000000 /* Indicate an exception occurred */
 #define TXSTATUS_IRPCOUNT_BITS          0x0E000000 /* Number of 'dirty' date entries*/
 #define TXSTATUS_IRPCOUNT_S             25
 #define TXSTATUS_IRQSTAT_BITS           0x0000F000 /* IRQEnc bits, trigger or interrupts */
diff --git a/arch/metag/include/asm/tbx.h b/arch/metag/include/asm/tbx.h
index 703b9cb..5cd2a6c 100644
--- a/arch/metag/include/asm/tbx.h
+++ b/arch/metag/include/asm/tbx.h
@@ -668,7 +668,7 @@
    
    State.Sig.TrigMask will indicate the bits set within TXMASKI at
           the time of the handler call that have all been cleared to prevent
-          nested interrupt occuring immediately.
+          nested interrupt occurring immediately.
    
    State.Sig.SaveMask is a bit-mask which will be set to Zero when a trigger
           occurs at background level and TBICTX_CRIT_BIT and optionally
@@ -1083,7 +1083,7 @@
 /* This routine causes the TBICTX structure specified in State.Sig.pCtx to
    be restored. This implies that execution will not return to the caller.
    The State.Sig.TrigMask field will be restored during the context switch
-   such that any immediately occuring interrupts occur in the context of the
+   such that any immediately occurring interrupts occur in the context of the
    newly specified task. The State.Sig.SaveMask parameter is ignored. */
 void __TBIASyncResume( TBIRES State );
 
@@ -1305,7 +1305,7 @@
 
 /* 
  * Calculate linear PC value from real PC and Minim mode control, the LSB of
- * the result returned indicates if address compression has occured.
+ * the result returned indicates if address compression has occurred.
  */
 #ifndef __ASSEMBLY__
 #define METAG_LINPC( PCVal )                                              (\
diff --git a/arch/metag/kernel/ftrace.c b/arch/metag/kernel/ftrace.c
index ac8c039..f7b23d3 100644
--- a/arch/metag/kernel/ftrace.c
+++ b/arch/metag/kernel/ftrace.c
@@ -115,7 +115,6 @@
 	return ftrace_modify_code(ip, old, new);
 }
 
-/* run from kstop_machine */
 int __init ftrace_dyn_arch_init(void)
 {
 	return 0;
diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c
index 2478ec6..33a365f 100644
--- a/arch/metag/kernel/perf/perf_event.c
+++ b/arch/metag/kernel/perf/perf_event.c
@@ -618,6 +618,8 @@
 
 	/* Check for a core internal or performance channel event. */
 	if (tmp) {
+		/* PERF_ICORE/PERF_CHAN only exist since Meta2 */
+#ifdef METAC_2_1
 		void *perf_addr;
 
 		/*
@@ -640,6 +642,7 @@
 
 		if (perf_addr)
 			metag_out32((config & 0x0f), perf_addr);
+#endif
 
 		/*
 		 * Now we use the high nibble as the performance event to
diff --git a/arch/metag/kernel/perf_callchain.c b/arch/metag/kernel/perf_callchain.c
index 3156334..252abc1 100644
--- a/arch/metag/kernel/perf_callchain.c
+++ b/arch/metag/kernel/perf_callchain.c
@@ -65,7 +65,7 @@
 
 	--frame;
 
-	while ((entry->nr < PERF_MAX_STACK_DEPTH) && frame)
+	while ((entry->nr < sysctl_perf_event_max_stack) && frame)
 		frame = user_backtrace(frame, entry);
 }
 
diff --git a/arch/metag/tbx/tbipcx.S b/arch/metag/tbx/tbipcx.S
index de0626f..163c79a 100644
--- a/arch/metag/tbx/tbipcx.S
+++ b/arch/metag/tbx/tbipcx.S
@@ -15,7 +15,7 @@
 #include <asm/tbx.h>
 
 /* BEGIN HACK */
-/* define these for now while doing inital conversion to GAS 
+/* define these for now while doing initial conversion to GAS
    will fix properly later */
 
 /* Signal identifiers always have the TBID_SIGNAL_BIT set and contain the
diff --git a/arch/metag/tbx/tbisoft.S b/arch/metag/tbx/tbisoft.S
index 0346fe8..b04f50d 100644
--- a/arch/metag/tbx/tbisoft.S
+++ b/arch/metag/tbx/tbisoft.S
@@ -56,7 +56,7 @@
 /*
  * TBIRES __TBISwitch( TBIRES Switch, PTBICTX *rpSaveCtx )
  *
- * Software syncronous context switch between soft threads, save only the
+ * Software synchronous context switch between soft threads, save only the
  * registers which are actually valid on call entry.
  *
  *	A0FrP, D0RtP, D0.5, D0.6, D0.7      - Saved on stack
@@ -76,7 +76,7 @@
 	SETL	[A0StP+#8++],D0FrT,D1RtP
 /*
  * Save current frame state - we save all regs because we don't want
- * uninitialised crap in the TBICTX structure that the asyncronous resumption
+ * uninitialised crap in the TBICTX structure that the asynchronous resumption
  * of a thread will restore.
  */
 	MOVT	D1Re0,#HI($LSwitchExit)		/* ASync resume point here */
@@ -117,7 +117,7 @@
  * This routine causes the TBICTX structure specified in State.Sig.pCtx to
  * be restored. This implies that execution will not return to the caller.
  * The State.Sig.TrigMask field will be ored into TXMASKI during the
- * context switch such that any immediately occuring interrupts occur in
+ * context switch such that any immediately occurring interrupts occur in
  * the context of the newly specified task. The State.Sig.SaveMask parameter
  * is ignored.
  */
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 2018c2b..5663f41 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -15,7 +15,7 @@
 	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_SECCOMP_FILTER
 	select HAVE_ARCH_TRACEHOOK
-	select HAVE_BPF_JIT if !CPU_MICROMIPS
+	select HAVE_CBPF_JIT if !CPU_MICROMIPS
 	select HAVE_FUNCTION_TRACER
 	select HAVE_DYNAMIC_FTRACE
 	select HAVE_FTRACE_MCOUNT_RECORD
@@ -62,6 +62,7 @@
 	select HAVE_IRQ_TIME_ACCOUNTING
 	select GENERIC_TIME_VSYSCALL
 	select ARCH_CLOCKSOURCE_DATA
+	select HANDLE_DOMAIN_IRQ
 
 menu "Machine selection"
 
@@ -79,7 +80,7 @@
 	select SYS_HAS_CPU_MIPS32_R1
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_APM_EMULATION
-	select ARCH_REQUIRE_GPIOLIB
+	select GPIOLIB
 	select SYS_SUPPORTS_ZBOOT
 	select COMMON_CLK
 
@@ -98,7 +99,7 @@
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select SYS_SUPPORTS_MIPS16
 	select SYS_SUPPORTS_ZBOOT_UART16550
-	select ARCH_REQUIRE_GPIOLIB
+	select GPIOLIB
 	select VLYNQ
 	select HAVE_CLK
 	help
@@ -122,11 +123,11 @@
 config ATH79
 	bool "Atheros AR71XX/AR724X/AR913X based boards"
 	select ARCH_HAS_RESET_CONTROLLER
-	select ARCH_REQUIRE_GPIOLIB
 	select BOOT_RAW
 	select CEVT_R4K
 	select CSRC_R4K
 	select DMA_NONCOHERENT
+	select GPIOLIB
 	select HAVE_CLK
 	select COMMON_CLK
 	select CLKDEV_LOOKUP
@@ -137,7 +138,7 @@
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_BIG_ENDIAN
 	select SYS_SUPPORTS_MIPS16
-	select SYS_SUPPORTS_ZBOOT
+	select SYS_SUPPORTS_ZBOOT_UART_PROM
 	select USE_OF
 	help
 	  Support for the Atheros AR71XX/AR724X/AR913X SoCs.
@@ -170,7 +171,6 @@
 	select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
 	select USB_OHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
 	select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
-	select ARCH_WANT_OPTIONAL_GPIOLIB
 	help
 	  Build a generic DT-based kernel image that boots on select
 	  BCM33xx cable modem chips, BCM63xx DSL chips, and BCM7xxx set-top
@@ -179,7 +179,6 @@
 
 config BCM47XX
 	bool "Broadcom BCM47XX based boards"
-	select ARCH_WANT_OPTIONAL_GPIOLIB
 	select BOOT_RAW
 	select CEVT_R4K
 	select CSRC_R4K
@@ -196,6 +195,7 @@
 	select GPIOLIB
 	select LEDS_GPIO_REGISTER
 	select BCM47XX_NVRAM
+	select BCM47XX_SPROM
 	help
 	 Support for BCM47XX based boards
 
@@ -211,7 +211,7 @@
 	select SYS_SUPPORTS_BIG_ENDIAN
 	select SYS_HAS_EARLY_PRINTK
 	select SWAP_IO_SPACE
-	select ARCH_REQUIRE_GPIOLIB
+	select GPIOLIB
 	select HAVE_CLK
 	select MIPS_L1_CACHE_SHIFT_4
 	help
@@ -305,7 +305,7 @@
 	select SYS_SUPPORTS_ZBOOT_UART16550
 	select DMA_NONCOHERENT
 	select IRQ_MIPS_CPU
-	select ARCH_REQUIRE_GPIOLIB
+	select GPIOLIB
 	select COMMON_CLK
 	select GENERIC_IRQ_CHIP
 	select BUILTIN_DTB
@@ -325,7 +325,7 @@
 	select SYS_SUPPORTS_MIPS16
 	select SYS_SUPPORTS_MULTITHREADING
 	select SYS_HAS_EARLY_PRINTK
-	select ARCH_REQUIRE_GPIOLIB
+	select GPIOLIB
 	select SWAP_IO_SPACE
 	select BOOT_RAW
 	select CLKDEV_LOOKUP
@@ -377,7 +377,6 @@
 
 config MACH_PISTACHIO
 	bool "IMG Pistachio SoC based boards"
-	select ARCH_REQUIRE_GPIOLIB
 	select BOOT_ELF32
 	select BOOT_RAW
 	select CEVT_R4K
@@ -385,6 +384,7 @@
 	select COMMON_CLK
 	select CSRC_R4K
 	select DMA_MAYBE_COHERENT
+	select GPIOLIB
 	select IRQ_MIPS_CPU
 	select LIBFDT
 	select MFD_SYSCON
@@ -406,13 +406,13 @@
 
 config MACH_XILFPGA
 	bool "MIPSfpga Xilinx based boards"
-	select ARCH_REQUIRE_GPIOLIB
 	select BOOT_ELF32
 	select BOOT_RAW
 	select BUILTIN_DTB
 	select CEVT_R4K
 	select COMMON_CLK
 	select CSRC_R4K
+	select GPIOLIB
 	select IRQ_MIPS_CPU
 	select LIBFDT
 	select MIPS_CPU_SCACHE
@@ -473,6 +473,7 @@
 	select SYS_SUPPORTS_MULTITHREADING
 	select SYS_SUPPORTS_SMARTMIPS
 	select SYS_SUPPORTS_ZBOOT
+	select SYS_SUPPORTS_RELOCATABLE
 	select USE_OF
 	select ZONE_DMA32 if 64BIT
 	select BUILTIN_DTB
@@ -507,6 +508,7 @@
 	select MIPS_MSC
 	select SYS_HAS_CPU_MIPS32_R1
 	select SYS_HAS_CPU_MIPS32_R2
+	select SYS_HAS_CPU_MIPS32_R6
 	select SYS_HAS_CPU_MIPS64_R1
 	select SYS_HAS_EARLY_PRINTK
 	select SYS_SUPPORTS_32BIT_KERNEL
@@ -516,6 +518,7 @@
 	select SYS_SUPPORTS_SMARTMIPS
 	select SYS_SUPPORTS_MICROMIPS
 	select SYS_SUPPORTS_MIPS16
+	select SYS_SUPPORTS_RELOCATABLE
 	select USB_EHCI_BIG_ENDIAN_DESC
 	select USB_EHCI_BIG_ENDIAN_MMIO
 	select USE_OF
@@ -536,7 +539,7 @@
 	select CSRC_R4K
 	select SYS_HAS_CPU_VR41XX
 	select SYS_SUPPORTS_MIPS16
-	select ARCH_REQUIRE_GPIOLIB
+	select GPIOLIB
 
 config NXP_STB220
 	bool "NXP STB220 board"
@@ -856,7 +859,7 @@
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select SWAP_IO_SPACE
 	select BOOT_RAW
-	select ARCH_REQUIRE_GPIOLIB
+	select GPIOLIB
 	select MIPS_L1_CACHE_SHIFT_4
 	help
 	  Support the Mikrotik(tm) RouterBoard 532 series,
@@ -879,7 +882,7 @@
 	select HW_HAS_PCI
 	select ZONE_DMA32
 	select HOLES_IN_ZONE
-	select ARCH_REQUIRE_GPIOLIB
+	select GPIOLIB
 	select LIBFDT
 	select USE_OF
 	select ARCH_SPARSEMEM_ENABLE
@@ -937,7 +940,7 @@
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_64BIT_KERNEL
 	select ARCH_PHYS_ADDR_T_64BIT
-	select ARCH_REQUIRE_GPIOLIB
+	select GPIOLIB
 	select SYS_SUPPORTS_BIG_ENDIAN
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select SYS_SUPPORTS_HIGHMEM
@@ -1077,7 +1080,7 @@
 	def_bool CSRC_R4K || CLKSRC_MIPS_GIC
 
 config GPIO_TXX9
-	select ARCH_REQUIRE_GPIOLIB
+	select GPIOLIB
 	bool
 
 config FW_CFE
@@ -1155,6 +1158,13 @@
 config HOLES_IN_ZONE
 	bool
 
+config SYS_SUPPORTS_RELOCATABLE
+	bool
+	help
+	 Selected if the platform supports relocating the kernel.
+	 The platform must provide plat_get_fdt() if it selects CONFIG_USE_OF
+	 to allow access to command line and entropy sources.
+
 #
 # Endianness selection.  Sufficiently obscure so many users don't know what to
 # answer,so we try hard to limit the available choices.  Also the use of a
@@ -1342,11 +1352,30 @@
 	select CPU_SUPPORTS_HUGEPAGES
 	select WEAK_ORDERING
 	select WEAK_REORDERING_BEYOND_LLSC
-	select ARCH_REQUIRE_GPIOLIB
+	select MIPS_PGD_C0_CONTEXT
+	select GPIOLIB
 	help
 		The Loongson 3 processor implements the MIPS64R2 instruction
 		set with many extensions.
 
+config LOONGSON3_ENHANCEMENT
+	bool "New Loongson 3 CPU Enhancements"
+	default n
+	select CPU_MIPSR2
+	select CPU_HAS_PREFETCH
+	depends on CPU_LOONGSON3
+	help
+	  New Loongson 3 CPU (since Loongson-3A R2, as opposed to Loongson-3A
+	  R1, Loongson-3B R1 and Loongson-3B R2) has many enhancements, such as
+	  FTLB, L1-VCache, EI/DI/Wait/Prefetch instruction, DSP/DSPv2 ASE, User
+	  Local register, Read-Inhibit/Execute-Inhibit, SFB (Store Fill Buffer),
+	  Fast TLB refill support, etc.
+
+	  This option enable those enhancements which are not probed at run
+	  time. If you want a generic kernel to run on all Loongson 3 machines,
+	  please say 'N' here. If you want a high-performance kernel to run on
+	  new Loongson 3 machines only, please say 'Y' here.
+
 config CPU_LOONGSON2E
 	bool "Loongson 2E"
 	depends on SYS_HAS_CPU_LOONGSON2E
@@ -1362,7 +1391,7 @@
 	bool "Loongson 2F"
 	depends on SYS_HAS_CPU_LOONGSON2F
 	select CPU_LOONGSON2
-	select ARCH_REQUIRE_GPIOLIB
+	select GPIOLIB
 	help
 	  The Loongson 2F processor implements the MIPS III instruction set
 	  with many extensions.
@@ -1375,6 +1404,8 @@
 	bool "Loongson 1B"
 	depends on SYS_HAS_CPU_LOONGSON1B
 	select CPU_LOONGSON1
+	select ARCH_WANT_OPTIONAL_GPIOLIB
+	select LEDS_GPIO_REGISTER
 	help
 	  The Loongson 1B is a 32-bit SoC, which implements the MIPS32
 	  release 2 instruction set.
@@ -1673,6 +1704,7 @@
 	select CPU_HAS_PREFETCH
 	select CPU_MIPSR2
 	select CPU_SUPPORTS_HUGEPAGES
+	select MIPS_ASID_BITS_VARIABLE
 	help
 	  Netlogic Microsystems XLP processors.
 endchoice
@@ -1798,6 +1830,7 @@
 	select MIPS_L1_CACHE_SHIFT_6
 	select SYS_SUPPORTS_SMP
 	select SYS_SUPPORTS_HOTPLUG_CPU
+	select CPU_HAS_RIXI
 
 config CPU_BMIPS5000
 	bool
@@ -1805,10 +1838,12 @@
 	select MIPS_L1_CACHE_SHIFT_7
 	select SYS_SUPPORTS_SMP
 	select SYS_SUPPORTS_HOTPLUG_CPU
+	select CPU_HAS_RIXI
 
 config SYS_HAS_CPU_LOONGSON3
 	bool
 	select CPU_SUPPORTS_CPUFREQ
+	select CPU_HAS_RIXI
 
 config SYS_HAS_CPU_LOONGSON2E
 	bool
@@ -1961,11 +1996,15 @@
 config CPU_MIPSR2
 	bool
 	default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON
+	select CPU_HAS_RIXI
 	select MIPS_SPRAM
 
 config CPU_MIPSR6
 	bool
 	default y if CPU_MIPS32_R6 || CPU_MIPS64_R6
+	select CPU_HAS_RIXI
+	select HAVE_ARCH_BITREVERSE
+	select MIPS_ASID_BITS_VARIABLE
 	select MIPS_SPRAM
 
 config EVA
@@ -1999,7 +2038,7 @@
 #
 config HARDWARE_WATCHPOINTS
        bool
-       default y if CPU_MIPSR1 || CPU_MIPSR2
+       default y if CPU_MIPSR1 || CPU_MIPSR2 || CPU_MIPSR6
 
 menu "Kernel type"
 
@@ -2042,6 +2081,16 @@
 	  emulation when determining guest CPU Frequency. Instead, the guest's
 	  timer frequency is specified directly.
 
+config MIPS_VA_BITS_48
+	bool "48 bits virtual memory"
+	depends on 64BIT
+	help
+	  Support a maximum at least 48 bits of application virtual memory.
+	  Default is 40 bits or less, depending on the CPU.
+	  This option result in a small memory overhead for page tables.
+	  This option is only supported with 16k and 64k page sizes.
+	  If unsure, say N.
+
 choice
 	prompt "Kernel page size"
 	default PAGE_SIZE_4KB
@@ -2049,6 +2098,7 @@
 config PAGE_SIZE_4KB
 	bool "4kB"
 	depends on !CPU_LOONGSON2 && !CPU_LOONGSON3
+	depends on !MIPS_VA_BITS_48
 	help
 	 This option select the standard 4kB Linux page size.  On some
 	 R3000-family processors this is the only available page size.  Using
@@ -2058,6 +2108,7 @@
 config PAGE_SIZE_8KB
 	bool "8kB"
 	depends on CPU_R8000 || CPU_CAVIUM_OCTEON
+	depends on !MIPS_VA_BITS_48
 	help
 	  Using 8kB page size will result in higher performance kernel at
 	  the price of higher memory consumption.  This option is available
@@ -2076,6 +2127,7 @@
 config PAGE_SIZE_32KB
 	bool "32kB"
 	depends on CPU_CAVIUM_OCTEON
+	depends on !MIPS_VA_BITS_48
 	help
 	  Using 32kB page size will result in higher performance kernel at
 	  the price of higher memory consumption.  This option is available
@@ -2280,7 +2332,7 @@
 
 config MIPS_CPS
 	bool "MIPS Coherent Processing System support"
-	depends on SYS_SUPPORTS_MIPS_CPS && !CPU_MIPSR6
+	depends on SYS_SUPPORTS_MIPS_CPS
 	select MIPS_CM
 	select MIPS_CPC
 	select MIPS_CPS_PM if HOTPLUG_CPU
@@ -2371,6 +2423,9 @@
 config XKS01
 	bool
 
+config CPU_HAS_RIXI
+	bool
+
 #
 # Vectored interrupt mode is an R2 feature
 #
@@ -2401,6 +2456,21 @@
 config CPU_R4400_WORKAROUNDS
 	bool
 
+config MIPS_ASID_SHIFT
+	int
+	default 6 if CPU_R3000 || CPU_TX39XX
+	default 4 if CPU_R8000
+	default 0
+
+config MIPS_ASID_BITS
+	int
+	default 0 if MIPS_ASID_BITS_VARIABLE
+	default 6 if CPU_R3000 || CPU_TX39XX
+	default 8
+
+config MIPS_ASID_BITS_VARIABLE
+	bool
+
 #
 # - Highmem only makes sense for the 32-bit kernel.
 # - The current highmem code will only work properly on physically indexed
@@ -2470,6 +2540,61 @@
 config SYS_SUPPORTS_NUMA
 	bool
 
+config RELOCATABLE
+	bool "Relocatable kernel"
+	depends on SYS_SUPPORTS_RELOCATABLE && (CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_MIPS32_R6 || CPU_MIPS64_R6)
+	help
+	  This builds a kernel image that retains relocation information
+	  so it can be loaded someplace besides the default 1MB.
+	  The relocations make the kernel binary about 15% larger,
+	  but are discarded at runtime
+
+config RELOCATION_TABLE_SIZE
+	hex "Relocation table size"
+	depends on RELOCATABLE
+	range 0x0 0x01000000
+	default "0x00100000"
+	---help---
+	  A table of relocation data will be appended to the kernel binary
+	  and parsed at boot to fix up the relocated kernel.
+
+	  This option allows the amount of space reserved for the table to be
+	  adjusted, although the default of 1Mb should be ok in most cases.
+
+	  The build will fail and a valid size suggested if this is too small.
+
+	  If unsure, leave at the default value.
+
+config RANDOMIZE_BASE
+	bool "Randomize the address of the kernel image"
+	depends on RELOCATABLE
+	---help---
+	   Randomizes the physical and virtual address at which the
+	   kernel image is loaded, as a security feature that
+	   deters exploit attempts relying on knowledge of the location
+	   of kernel internals.
+
+	   Entropy is generated using any coprocessor 0 registers available.
+
+	   The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET.
+
+	   If unsure, say N.
+
+config RANDOMIZE_BASE_MAX_OFFSET
+	hex "Maximum kASLR offset" if EXPERT
+	depends on RANDOMIZE_BASE
+	range 0x0 0x40000000 if EVA || 64BIT
+	range 0x0 0x08000000
+	default "0x01000000"
+	---help---
+	  When kASLR is active, this provides the maximum offset that will
+	  be applied to the kernel image. It should be set according to the
+	  amount of physical RAM available in the target system minus
+	  PHYSICAL_START and must be a power of 2.
+
+	  This is limited by the size of KSEG0, 256Mb on 32-bit or 1Gb with
+	  EVA or 64-bit. The default is 16Mb.
+
 config NODES_SHIFT
 	int
 	default "6"
@@ -2477,7 +2602,7 @@
 
 config HW_PERF_EVENTS
 	bool "Enable hardware performance counter support for perf events"
-	depends on PERF_EVENTS && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP || CPU_LOONGSON3)
+	depends on PERF_EVENTS && !OPROFILE && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP || CPU_LOONGSON3)
 	default y
 	help
 	  Enable hardware performance counter support for perf events. If
@@ -2810,6 +2935,10 @@
 
 	config MIPS_CMDLINE_FROM_BOOTLOADER
 		bool "Bootloader kernel arguments if available"
+
+	config MIPS_CMDLINE_BUILTIN_EXTEND
+		depends on CMDLINE_BOOL
+		bool "Extend builtin kernel arguments with bootloader arguments"
 endchoice
 
 endmenu
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index e78d60d..efd7a9d 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -12,6 +12,9 @@
 # for "archclean" cleaning up for this architecture.
 #
 
+archscripts: scripts_basic
+	$(Q)$(MAKE) $(build)=arch/mips/boot/tools relocs
+
 KBUILD_DEFCONFIG := ip22_defconfig
 
 #
@@ -93,6 +96,10 @@
 KBUILD_AFLAGS_MODULE		+= -mlong-calls
 KBUILD_CFLAGS_MODULE		+= -mlong-calls
 
+ifeq ($(CONFIG_RELOCATABLE),y)
+LDFLAGS_vmlinux			+= --emit-relocs
+endif
+
 #
 # pass -msoft-float to GAS if it supports it.  However on newer binutils
 # (specifically newer than 2.24.51.20140728) we then also need to explicitly
@@ -193,6 +200,8 @@
 toolchain-msa				:= $(call cc-option-yn,$(mips-cflags) -mhard-float -mfp64 -Wa$(comma)-mmsa)
 cflags-$(toolchain-msa)			+= -DTOOLCHAIN_SUPPORTS_MSA
 endif
+toolchain-virt				:= $(call cc-option-yn,$(mips-cflags) -mvirt)
+cflags-$(toolchain-virt)		+= -DTOOLCHAIN_SUPPORTS_VIRT
 
 cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_NEVER)	+= -mcompact-branches=never
 cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_OPTIMAL)	+= -mcompact-branches=optimal
@@ -310,6 +319,10 @@
 		$(bootvars-y) $@
 endif
 
+CMD_RELOCS = arch/mips/boot/tools/relocs
+quiet_cmd_relocs = RELOCS  $<
+      cmd_relocs = $(CMD_RELOCS) $<
+
 #
 # Some machines like the Indy need 32-bit ELF binaries for booting purposes.
 # Other need ECOFF, so we build a 32-bit ELF binary for them which we then
@@ -318,6 +331,11 @@
 quiet_cmd_32 = OBJCOPY $@
 	cmd_32 = $(OBJCOPY) -O $(32bit-bfd) $(OBJCOPYFLAGS) $< $@
 vmlinux.32: vmlinux
+ifeq ($(CONFIG_RELOCATABLE)$(CONFIG_64BIT),yy)
+# Currently, objcopy fails to handle the relocations in the elf64
+# So the relocs tool must be run here to remove them first
+	$(call cmd,relocs)
+endif
 	$(call cmd,32)
 
 #
@@ -333,6 +351,9 @@
 
 # boot
 $(boot-y): $(vmlinux-32) FORCE
+ifeq ($(CONFIG_RELOCATABLE)$(CONFIG_32BIT),yy)
+	$(call cmd,relocs)
+endif
 	$(Q)$(MAKE) $(build)=arch/mips/boot VMLINUX=$(vmlinux-32) \
 		$(bootvars-y) arch/mips/boot/$@
 
@@ -385,6 +406,7 @@
 archclean:
 	$(Q)$(MAKE) $(clean)=arch/mips/boot
 	$(Q)$(MAKE) $(clean)=arch/mips/boot/compressed
+	$(Q)$(MAKE) $(clean)=arch/mips/boot/tools
 	$(Q)$(MAKE) $(clean)=arch/mips/lasat
 
 define archhelp
diff --git a/arch/mips/alchemy/Kconfig b/arch/mips/alchemy/Kconfig
index 7fa2488..88b4d6a 100644
--- a/arch/mips/alchemy/Kconfig
+++ b/arch/mips/alchemy/Kconfig
@@ -20,7 +20,7 @@
 
 config MIPS_DB1XXX
 	bool "Alchemy DB1XXX / PB1XXX boards"
-	select ARCH_REQUIRE_GPIOLIB
+	select GPIOLIB
 	select HW_HAS_PCI
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select SYS_HAS_EARLY_PRINTK
diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c
index bd34f40..7ba7ea0 100644
--- a/arch/mips/alchemy/common/clock.c
+++ b/arch/mips/alchemy/common/clock.c
@@ -1043,8 +1043,7 @@
 
 	/* Root of the Alchemy clock tree: external 12MHz crystal osc */
 	c = clk_register_fixed_rate(NULL, ALCHEMY_ROOT_CLK, NULL,
-					   CLK_IS_ROOT,
-					   ALCHEMY_ROOTCLK_RATE);
+					   0, ALCHEMY_ROOTCLK_RATE);
 	ERRCK(c)
 
 	/* CPU core clock */
diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c
index 745695d..f2f264b 100644
--- a/arch/mips/alchemy/common/dbdma.c
+++ b/arch/mips/alchemy/common/dbdma.c
@@ -261,7 +261,7 @@
 	au1x_dma_chan_t *cp;
 
 	/*
-	 * We do the intialization on the first channel allocation.
+	 * We do the initialization on the first channel allocation.
 	 * We have to wait because of the interrupt handler initialization
 	 * which can't be done successfully during board set up.
 	 */
@@ -964,7 +964,7 @@
 	dp->dscr_source1 = dscr->dscr_source1;
 	dp->dscr_cmd1 = dscr->dscr_cmd1;
 	nbytes = dscr->dscr_cmd1;
-	/* Allow the caller to specifiy if an interrupt is generated */
+	/* Allow the caller to specify if an interrupt is generated */
 	dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
 	dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V;
 	ctp->chan_ptr->ddma_dbell = 0;
diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
index bdeed9d..433c4b9 100644
--- a/arch/mips/alchemy/devboards/db1000.c
+++ b/arch/mips/alchemy/devboards/db1000.c
@@ -503,15 +503,15 @@
 	if (board == BCSR_WHOAMI_DB1500) {
 		c0 = AU1500_GPIO2_INT;
 		c1 = AU1500_GPIO5_INT;
-		d0 = AU1500_GPIO0_INT;
-		d1 = AU1500_GPIO3_INT;
+		d0 = 0;	/* GPIO number, NOT irq! */
+		d1 = 3; /* GPIO number, NOT irq! */
 		s0 = AU1500_GPIO1_INT;
 		s1 = AU1500_GPIO4_INT;
 	} else if (board == BCSR_WHOAMI_DB1100) {
 		c0 = AU1100_GPIO2_INT;
 		c1 = AU1100_GPIO5_INT;
-		d0 = AU1100_GPIO0_INT;
-		d1 = AU1100_GPIO3_INT;
+		d0 = 0; /* GPIO number, NOT irq! */
+		d1 = 3; /* GPIO number, NOT irq! */
 		s0 = AU1100_GPIO1_INT;
 		s1 = AU1100_GPIO4_INT;
 
@@ -545,15 +545,15 @@
 	} else if (board == BCSR_WHOAMI_DB1000) {
 		c0 = AU1000_GPIO2_INT;
 		c1 = AU1000_GPIO5_INT;
-		d0 = AU1000_GPIO0_INT;
-		d1 = AU1000_GPIO3_INT;
+		d0 = 0; /* GPIO number, NOT irq! */
+		d1 = 3; /* GPIO number, NOT irq! */
 		s0 = AU1000_GPIO1_INT;
 		s1 = AU1000_GPIO4_INT;
 		platform_add_devices(db1000_devs, ARRAY_SIZE(db1000_devs));
 	} else if ((board == BCSR_WHOAMI_PB1500) ||
 		   (board == BCSR_WHOAMI_PB1500R2)) {
 		c0 = AU1500_GPIO203_INT;
-		d0 = AU1500_GPIO201_INT;
+		d0 = 1; /* GPIO number, NOT irq! */
 		s0 = AU1500_GPIO202_INT;
 		twosocks = 0;
 		flashsize = 64;
@@ -566,7 +566,7 @@
 		 */
 	} else if (board == BCSR_WHOAMI_PB1100) {
 		c0 = AU1100_GPIO11_INT;
-		d0 = AU1100_GPIO9_INT;
+		d0 = 9; /* GPIO number, NOT irq! */
 		s0 = AU1100_GPIO10_INT;
 		twosocks = 0;
 		flashsize = 64;
@@ -583,7 +583,6 @@
 	} else
 		return 0; /* unknown board, no further dev setup to do */
 
-	irq_set_irq_type(d0, IRQ_TYPE_EDGE_BOTH);
 	irq_set_irq_type(c0, IRQ_TYPE_LEVEL_LOW);
 	irq_set_irq_type(s0, IRQ_TYPE_LEVEL_LOW);
 
@@ -597,7 +596,6 @@
 		c0, d0, /*s0*/0, 0, 0);
 
 	if (twosocks) {
-		irq_set_irq_type(d1, IRQ_TYPE_EDGE_BOTH);
 		irq_set_irq_type(c1, IRQ_TYPE_LEVEL_LOW);
 		irq_set_irq_type(s1, IRQ_TYPE_LEVEL_LOW);
 
diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c
index b518f02..1c01d6e 100644
--- a/arch/mips/alchemy/devboards/db1550.c
+++ b/arch/mips/alchemy/devboards/db1550.c
@@ -514,7 +514,7 @@
 		AU1000_PCMCIA_MEM_PHYS_ADDR  + 0x000400000 - 1,
 		AU1000_PCMCIA_IO_PHYS_ADDR,
 		AU1000_PCMCIA_IO_PHYS_ADDR   + 0x000010000 - 1,
-		AU1550_GPIO3_INT, AU1550_GPIO0_INT,
+		AU1550_GPIO3_INT, 0,
 		/*AU1550_GPIO21_INT*/0, 0, 0);
 
 	db1x_register_pcmcia_socket(
@@ -524,7 +524,7 @@
 		AU1000_PCMCIA_MEM_PHYS_ADDR  + 0x004400000 - 1,
 		AU1000_PCMCIA_IO_PHYS_ADDR   + 0x004000000,
 		AU1000_PCMCIA_IO_PHYS_ADDR   + 0x004010000 - 1,
-		AU1550_GPIO5_INT, AU1550_GPIO1_INT,
+		AU1550_GPIO5_INT, 1,
 		/*AU1550_GPIO22_INT*/0, 0, 1);
 
 	platform_device_register(&db1550_nand_dev);
diff --git a/arch/mips/ath79/Kconfig b/arch/mips/ath79/Kconfig
index 13c04cf..dfc6020 100644
--- a/arch/mips/ath79/Kconfig
+++ b/arch/mips/ath79/Kconfig
@@ -71,18 +71,6 @@
 	  Say 'Y' here if you want your kernel to support the
 	  Ubiquiti Networks XM (rev 1.0) board.
 
-choice
-	prompt "Build a DTB in the kernel"
-	optional
-	help
-	  Select a devicetree that should be built into the kernel.
-
-	config DTB_TL_WR1043ND_V1
-		bool "TL-WR1043ND Version 1"
-		select BUILTIN_DTB
-		select SOC_AR913X
-endchoice
-
 endmenu
 
 config SOC_AR71XX
diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
index eb5117c..2e73784 100644
--- a/arch/mips/ath79/clock.c
+++ b/arch/mips/ath79/clock.c
@@ -18,18 +18,21 @@
 #include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <dt-bindings/clock/ath79-clk.h>
 
 #include <asm/div64.h>
 
 #include <asm/mach-ath79/ath79.h>
 #include <asm/mach-ath79/ar71xx_regs.h>
 #include "common.h"
+#include "machtypes.h"
 
 #define AR71XX_BASE_FREQ	40000000
-#define AR724X_BASE_FREQ	5000000
-#define AR913X_BASE_FREQ	5000000
+#define AR724X_BASE_FREQ	40000000
 
-static struct clk *clks[3];
+static struct clk *clks[ATH79_CLK_END];
 static struct clk_onecell_data clk_data = {
 	.clks = clks,
 	.clk_num = ARRAY_SIZE(clks),
@@ -41,7 +44,7 @@
 	struct clk *clk;
 	int err;
 
-	clk = clk_register_fixed_rate(NULL, id, NULL, CLK_IS_ROOT, rate);
+	clk = clk_register_fixed_rate(NULL, id, NULL, 0, rate);
 	if (!clk)
 		panic("failed to allocate %s clock structure", id);
 
@@ -79,92 +82,123 @@
 	ahb_rate = cpu_rate / div;
 
 	ath79_add_sys_clkdev("ref", ref_rate);
-	clks[0] = ath79_add_sys_clkdev("cpu", cpu_rate);
-	clks[1] = ath79_add_sys_clkdev("ddr", ddr_rate);
-	clks[2] = ath79_add_sys_clkdev("ahb", ahb_rate);
+	clks[ATH79_CLK_CPU] = ath79_add_sys_clkdev("cpu", cpu_rate);
+	clks[ATH79_CLK_DDR] = ath79_add_sys_clkdev("ddr", ddr_rate);
+	clks[ATH79_CLK_AHB] = ath79_add_sys_clkdev("ahb", ahb_rate);
 
 	clk_add_alias("wdt", NULL, "ahb", NULL);
 	clk_add_alias("uart", NULL, "ahb", NULL);
 }
 
+static struct clk * __init ath79_reg_ffclk(const char *name,
+		const char *parent_name, unsigned int mult, unsigned int div)
+{
+	struct clk *clk;
+
+	clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mult, div);
+	if (!clk)
+		panic("failed to allocate %s clock structure", name);
+
+	return clk;
+}
+
+static void __init ar724x_clk_init(struct clk *ref_clk, void __iomem *pll_base)
+{
+	u32 pll;
+	u32 mult, div, ddr_div, ahb_div;
+
+	pll = __raw_readl(pll_base + AR724X_PLL_REG_CPU_CONFIG);
+
+	mult = ((pll >> AR724X_PLL_FB_SHIFT) & AR724X_PLL_FB_MASK);
+	div = ((pll >> AR724X_PLL_REF_DIV_SHIFT) & AR724X_PLL_REF_DIV_MASK) * 2;
+
+	ddr_div = ((pll >> AR724X_DDR_DIV_SHIFT) & AR724X_DDR_DIV_MASK) + 1;
+	ahb_div = (((pll >> AR724X_AHB_DIV_SHIFT) & AR724X_AHB_DIV_MASK) + 1) * 2;
+
+	clks[ATH79_CLK_CPU] = ath79_reg_ffclk("cpu", "ref", mult, div);
+	clks[ATH79_CLK_DDR] = ath79_reg_ffclk("ddr", "ref", mult, div * ddr_div);
+	clks[ATH79_CLK_AHB] = ath79_reg_ffclk("ahb", "ref", mult, div * ahb_div);
+}
+
 static void __init ar724x_clocks_init(void)
 {
-	unsigned long ref_rate;
-	unsigned long cpu_rate;
-	unsigned long ddr_rate;
-	unsigned long ahb_rate;
-	u32 pll;
-	u32 freq;
-	u32 div;
+	struct clk *ref_clk;
 
-	ref_rate = AR724X_BASE_FREQ;
-	pll = ath79_pll_rr(AR724X_PLL_REG_CPU_CONFIG);
+	ref_clk = ath79_add_sys_clkdev("ref", AR724X_BASE_FREQ);
 
-	div = ((pll >> AR724X_PLL_FB_SHIFT) & AR724X_PLL_FB_MASK);
-	freq = div * ref_rate;
+	ar724x_clk_init(ref_clk, ath79_pll_base);
 
-	div = ((pll >> AR724X_PLL_REF_DIV_SHIFT) & AR724X_PLL_REF_DIV_MASK);
-	freq *= div;
-
-	cpu_rate = freq;
-
-	div = ((pll >> AR724X_DDR_DIV_SHIFT) & AR724X_DDR_DIV_MASK) + 1;
-	ddr_rate = freq / div;
-
-	div = (((pll >> AR724X_AHB_DIV_SHIFT) & AR724X_AHB_DIV_MASK) + 1) * 2;
-	ahb_rate = cpu_rate / div;
-
-	ath79_add_sys_clkdev("ref", ref_rate);
-	clks[0] = ath79_add_sys_clkdev("cpu", cpu_rate);
-	clks[1] = ath79_add_sys_clkdev("ddr", ddr_rate);
-	clks[2] = ath79_add_sys_clkdev("ahb", ahb_rate);
+	/* just make happy plat_time_init() from arch/mips/ath79/setup.c */
+	clk_register_clkdev(clks[ATH79_CLK_CPU], "cpu", NULL);
+	clk_register_clkdev(clks[ATH79_CLK_DDR], "ddr", NULL);
+	clk_register_clkdev(clks[ATH79_CLK_AHB], "ahb", NULL);
 
 	clk_add_alias("wdt", NULL, "ahb", NULL);
 	clk_add_alias("uart", NULL, "ahb", NULL);
 }
 
-static void __init ar913x_clocks_init(void)
+static void __init ar9330_clk_init(struct clk *ref_clk, void __iomem *pll_base)
 {
-	unsigned long ref_rate;
-	unsigned long cpu_rate;
-	unsigned long ddr_rate;
-	unsigned long ahb_rate;
-	u32 pll;
-	u32 freq;
-	u32 div;
+	u32 clock_ctrl;
+	u32 ref_div;
+	u32 ninit_mul;
+	u32 out_div;
 
-	ref_rate = AR913X_BASE_FREQ;
-	pll = ath79_pll_rr(AR913X_PLL_REG_CPU_CONFIG);
+	u32 cpu_div;
+	u32 ddr_div;
+	u32 ahb_div;
 
-	div = ((pll >> AR913X_PLL_FB_SHIFT) & AR913X_PLL_FB_MASK);
-	freq = div * ref_rate;
+	clock_ctrl = __raw_readl(pll_base + AR933X_PLL_CLOCK_CTRL_REG);
+	if (clock_ctrl & AR933X_PLL_CLOCK_CTRL_BYPASS) {
+		ref_div = 1;
+		ninit_mul = 1;
+		out_div = 1;
 
-	cpu_rate = freq;
+		cpu_div = 1;
+		ddr_div = 1;
+		ahb_div = 1;
+	} else {
+		u32 cpu_config;
+		u32 t;
 
-	div = ((pll >> AR913X_DDR_DIV_SHIFT) & AR913X_DDR_DIV_MASK) + 1;
-	ddr_rate = freq / div;
+		cpu_config = __raw_readl(pll_base + AR933X_PLL_CPU_CONFIG_REG);
 
-	div = (((pll >> AR913X_AHB_DIV_SHIFT) & AR913X_AHB_DIV_MASK) + 1) * 2;
-	ahb_rate = cpu_rate / div;
+		t = (cpu_config >> AR933X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
+		    AR933X_PLL_CPU_CONFIG_REFDIV_MASK;
+		ref_div = t;
 
-	ath79_add_sys_clkdev("ref", ref_rate);
-	clks[0] = ath79_add_sys_clkdev("cpu", cpu_rate);
-	clks[1] = ath79_add_sys_clkdev("ddr", ddr_rate);
-	clks[2] = ath79_add_sys_clkdev("ahb", ahb_rate);
+		ninit_mul = (cpu_config >> AR933X_PLL_CPU_CONFIG_NINT_SHIFT) &
+		    AR933X_PLL_CPU_CONFIG_NINT_MASK;
 
-	clk_add_alias("wdt", NULL, "ahb", NULL);
-	clk_add_alias("uart", NULL, "ahb", NULL);
+		t = (cpu_config >> AR933X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
+		    AR933X_PLL_CPU_CONFIG_OUTDIV_MASK;
+		if (t == 0)
+			t = 1;
+
+		out_div = (1 << t);
+
+		cpu_div = ((clock_ctrl >> AR933X_PLL_CLOCK_CTRL_CPU_DIV_SHIFT) &
+		     AR933X_PLL_CLOCK_CTRL_CPU_DIV_MASK) + 1;
+
+		ddr_div = ((clock_ctrl >> AR933X_PLL_CLOCK_CTRL_DDR_DIV_SHIFT) &
+		      AR933X_PLL_CLOCK_CTRL_DDR_DIV_MASK) + 1;
+
+		ahb_div = ((clock_ctrl >> AR933X_PLL_CLOCK_CTRL_AHB_DIV_SHIFT) &
+		     AR933X_PLL_CLOCK_CTRL_AHB_DIV_MASK) + 1;
+	}
+
+	clks[ATH79_CLK_CPU] = ath79_reg_ffclk("cpu", "ref",
+					ninit_mul, ref_div * out_div * cpu_div);
+	clks[ATH79_CLK_DDR] = ath79_reg_ffclk("ddr", "ref",
+					ninit_mul, ref_div * out_div * ddr_div);
+	clks[ATH79_CLK_AHB] = ath79_reg_ffclk("ahb", "ref",
+					ninit_mul, ref_div * out_div * ahb_div);
 }
 
 static void __init ar933x_clocks_init(void)
 {
+	struct clk *ref_clk;
 	unsigned long ref_rate;
-	unsigned long cpu_rate;
-	unsigned long ddr_rate;
-	unsigned long ahb_rate;
-	u32 clock_ctrl;
-	u32 cpu_config;
-	u32 freq;
 	u32 t;
 
 	t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP);
@@ -173,46 +207,14 @@
 	else
 		ref_rate = (25 * 1000 * 1000);
 
-	clock_ctrl = ath79_pll_rr(AR933X_PLL_CLOCK_CTRL_REG);
-	if (clock_ctrl & AR933X_PLL_CLOCK_CTRL_BYPASS) {
-		cpu_rate = ref_rate;
-		ahb_rate = ref_rate;
-		ddr_rate = ref_rate;
-	} else {
-		cpu_config = ath79_pll_rr(AR933X_PLL_CPU_CONFIG_REG);
+	ref_clk = ath79_add_sys_clkdev("ref", ref_rate);
 
-		t = (cpu_config >> AR933X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
-		    AR933X_PLL_CPU_CONFIG_REFDIV_MASK;
-		freq = ref_rate / t;
+	ar9330_clk_init(ref_clk, ath79_pll_base);
 
-		t = (cpu_config >> AR933X_PLL_CPU_CONFIG_NINT_SHIFT) &
-		    AR933X_PLL_CPU_CONFIG_NINT_MASK;
-		freq *= t;
-
-		t = (cpu_config >> AR933X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
-		    AR933X_PLL_CPU_CONFIG_OUTDIV_MASK;
-		if (t == 0)
-			t = 1;
-
-		freq >>= t;
-
-		t = ((clock_ctrl >> AR933X_PLL_CLOCK_CTRL_CPU_DIV_SHIFT) &
-		     AR933X_PLL_CLOCK_CTRL_CPU_DIV_MASK) + 1;
-		cpu_rate = freq / t;
-
-		t = ((clock_ctrl >> AR933X_PLL_CLOCK_CTRL_DDR_DIV_SHIFT) &
-		      AR933X_PLL_CLOCK_CTRL_DDR_DIV_MASK) + 1;
-		ddr_rate = freq / t;
-
-		t = ((clock_ctrl >> AR933X_PLL_CLOCK_CTRL_AHB_DIV_SHIFT) &
-		     AR933X_PLL_CLOCK_CTRL_AHB_DIV_MASK) + 1;
-		ahb_rate = freq / t;
-	}
-
-	ath79_add_sys_clkdev("ref", ref_rate);
-	clks[0] = ath79_add_sys_clkdev("cpu", cpu_rate);
-	clks[1] = ath79_add_sys_clkdev("ddr", ddr_rate);
-	clks[2] = ath79_add_sys_clkdev("ahb", ahb_rate);
+	/* just make happy plat_time_init() from arch/mips/ath79/setup.c */
+	clk_register_clkdev(clks[ATH79_CLK_CPU], "cpu", NULL);
+	clk_register_clkdev(clks[ATH79_CLK_DDR], "ddr", NULL);
+	clk_register_clkdev(clks[ATH79_CLK_AHB], "ahb", NULL);
 
 	clk_add_alias("wdt", NULL, "ahb", NULL);
 	clk_add_alias("uart", NULL, "ref", NULL);
@@ -344,9 +346,9 @@
 		ahb_rate = cpu_pll / (postdiv + 1);
 
 	ath79_add_sys_clkdev("ref", ref_rate);
-	clks[0] = ath79_add_sys_clkdev("cpu", cpu_rate);
-	clks[1] = ath79_add_sys_clkdev("ddr", ddr_rate);
-	clks[2] = ath79_add_sys_clkdev("ahb", ahb_rate);
+	clks[ATH79_CLK_CPU] = ath79_add_sys_clkdev("cpu", cpu_rate);
+	clks[ATH79_CLK_DDR] = ath79_add_sys_clkdev("ddr", ddr_rate);
+	clks[ATH79_CLK_AHB] = ath79_add_sys_clkdev("ahb", ahb_rate);
 
 	clk_add_alias("wdt", NULL, "ref", NULL);
 	clk_add_alias("uart", NULL, "ref", NULL);
@@ -431,9 +433,9 @@
 		ahb_rate = cpu_pll / (postdiv + 1);
 
 	ath79_add_sys_clkdev("ref", ref_rate);
-	clks[0] = ath79_add_sys_clkdev("cpu", cpu_rate);
-	clks[1] = ath79_add_sys_clkdev("ddr", ddr_rate);
-	clks[2] = ath79_add_sys_clkdev("ahb", ahb_rate);
+	clks[ATH79_CLK_CPU] = ath79_add_sys_clkdev("cpu", cpu_rate);
+	clks[ATH79_CLK_DDR] = ath79_add_sys_clkdev("ddr", ddr_rate);
+	clks[ATH79_CLK_AHB] = ath79_add_sys_clkdev("ahb", ahb_rate);
 
 	clk_add_alias("wdt", NULL, "ref", NULL);
 	clk_add_alias("uart", NULL, "ref", NULL);
@@ -443,10 +445,8 @@
 {
 	if (soc_is_ar71xx())
 		ar71xx_clocks_init();
-	else if (soc_is_ar724x())
+	else if (soc_is_ar724x() || soc_is_ar913x())
 		ar724x_clocks_init();
-	else if (soc_is_ar913x())
-		ar913x_clocks_init();
 	else if (soc_is_ar933x())
 		ar933x_clocks_init();
 	else if (soc_is_ar934x())
@@ -455,8 +455,6 @@
 		qca955x_clocks_init();
 	else
 		BUG();
-
-	of_clk_init(NULL);
 }
 
 unsigned long __init
@@ -483,8 +481,49 @@
 
 CLK_OF_DECLARE(ar7100, "qca,ar7100-pll", ath79_clocks_init_dt);
 CLK_OF_DECLARE(ar7240, "qca,ar7240-pll", ath79_clocks_init_dt);
-CLK_OF_DECLARE(ar9130, "qca,ar9130-pll", ath79_clocks_init_dt);
-CLK_OF_DECLARE(ar9330, "qca,ar9330-pll", ath79_clocks_init_dt);
 CLK_OF_DECLARE(ar9340, "qca,ar9340-pll", ath79_clocks_init_dt);
 CLK_OF_DECLARE(ar9550, "qca,qca9550-pll", ath79_clocks_init_dt);
+
+static void __init ath79_clocks_init_dt_ng(struct device_node *np)
+{
+	struct clk *ref_clk;
+	void __iomem *pll_base;
+	const char *dnfn = of_node_full_name(np);
+
+	ref_clk = of_clk_get(np, 0);
+	if (IS_ERR(ref_clk)) {
+		pr_err("%s: of_clk_get failed\n", dnfn);
+		goto err;
+	}
+
+	pll_base = of_iomap(np, 0);
+	if (!pll_base) {
+		pr_err("%s: can't map pll registers\n", dnfn);
+		goto err_clk;
+	}
+
+	if (of_device_is_compatible(np, "qca,ar9130-pll"))
+		ar724x_clk_init(ref_clk, pll_base);
+	else if (of_device_is_compatible(np, "qca,ar9330-pll"))
+		ar9330_clk_init(ref_clk, pll_base);
+	else {
+		pr_err("%s: could not find any appropriate clk_init()\n", dnfn);
+		goto err_clk;
+	}
+
+	if (of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data)) {
+		pr_err("%s: could not register clk provider\n", dnfn);
+		goto err_clk;
+	}
+
+	return;
+
+err_clk:
+	clk_put(ref_clk);
+
+err:
+	return;
+}
+CLK_OF_DECLARE(ar9130_clk, "qca,ar9130-pll", ath79_clocks_init_dt_ng);
+CLK_OF_DECLARE(ar9330_clk, "qca,ar9330-pll", ath79_clocks_init_dt_ng);
 #endif
diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c
index 3cedd1f..d071a3a 100644
--- a/arch/mips/ath79/common.c
+++ b/arch/mips/ath79/common.c
@@ -46,12 +46,12 @@
 {
 	ath79_ddr_base = ioremap_nocache(AR71XX_DDR_CTRL_BASE,
 					 AR71XX_DDR_CTRL_SIZE);
-	if (soc_is_ar71xx() || soc_is_ar934x()) {
-		ath79_ddr_wb_flush_base = ath79_ddr_base + 0x9c;
-		ath79_ddr_pci_win_base = ath79_ddr_base + 0x7c;
-	} else {
+	if (soc_is_ar913x() || soc_is_ar724x() || soc_is_ar933x()) {
 		ath79_ddr_wb_flush_base = ath79_ddr_base + 0x7c;
 		ath79_ddr_pci_win_base = 0;
+	} else {
+		ath79_ddr_wb_flush_base = ath79_ddr_base + 0x9c;
+		ath79_ddr_pci_win_base = ath79_ddr_base + 0x7c;
 	}
 }
 EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init);
@@ -76,14 +76,14 @@
 {
 	BUG_ON(!ath79_ddr_pci_win_base);
 
-	__raw_writel(AR71XX_PCI_WIN0_OFFS, ath79_ddr_pci_win_base + 0);
-	__raw_writel(AR71XX_PCI_WIN1_OFFS, ath79_ddr_pci_win_base + 1);
-	__raw_writel(AR71XX_PCI_WIN2_OFFS, ath79_ddr_pci_win_base + 2);
-	__raw_writel(AR71XX_PCI_WIN3_OFFS, ath79_ddr_pci_win_base + 3);
-	__raw_writel(AR71XX_PCI_WIN4_OFFS, ath79_ddr_pci_win_base + 4);
-	__raw_writel(AR71XX_PCI_WIN5_OFFS, ath79_ddr_pci_win_base + 5);
-	__raw_writel(AR71XX_PCI_WIN6_OFFS, ath79_ddr_pci_win_base + 6);
-	__raw_writel(AR71XX_PCI_WIN7_OFFS, ath79_ddr_pci_win_base + 7);
+	__raw_writel(AR71XX_PCI_WIN0_OFFS, ath79_ddr_pci_win_base + 0x0);
+	__raw_writel(AR71XX_PCI_WIN1_OFFS, ath79_ddr_pci_win_base + 0x4);
+	__raw_writel(AR71XX_PCI_WIN2_OFFS, ath79_ddr_pci_win_base + 0x8);
+	__raw_writel(AR71XX_PCI_WIN3_OFFS, ath79_ddr_pci_win_base + 0xc);
+	__raw_writel(AR71XX_PCI_WIN4_OFFS, ath79_ddr_pci_win_base + 0x10);
+	__raw_writel(AR71XX_PCI_WIN5_OFFS, ath79_ddr_pci_win_base + 0x14);
+	__raw_writel(AR71XX_PCI_WIN6_OFFS, ath79_ddr_pci_win_base + 0x18);
+	__raw_writel(AR71XX_PCI_WIN7_OFFS, ath79_ddr_pci_win_base + 0x1c);
 }
 EXPORT_SYMBOL_GPL(ath79_ddr_set_pci_windows);
 
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index be451ee4a..7adab18 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -17,6 +17,7 @@
 #include <linux/bootmem.h>
 #include <linux/err.h>
 #include <linux/clk.h>
+#include <linux/clk-provider.h>
 #include <linux/of_platform.h>
 #include <linux/of_fdt.h>
 
@@ -203,26 +204,57 @@
 	fdt_start = fw_getenvl("fdt_start");
 	if (fdt_start)
 		__dt_setup_arch((void *)KSEG0ADDR(fdt_start));
-#ifdef CONFIG_BUILTIN_DTB
-	else
-		__dt_setup_arch(__dtb_start);
-#endif
+	else if (fw_arg0 == -2)
+		__dt_setup_arch((void *)KSEG0ADDR(fw_arg1));
 
-	ath79_reset_base = ioremap_nocache(AR71XX_RESET_BASE,
-					   AR71XX_RESET_SIZE);
-	ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE,
-					 AR71XX_PLL_SIZE);
-	ath79_detect_sys_type();
-	ath79_ddr_ctrl_init();
+	if (mips_machtype != ATH79_MACH_GENERIC_OF) {
+		ath79_reset_base = ioremap_nocache(AR71XX_RESET_BASE,
+						   AR71XX_RESET_SIZE);
+		ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE,
+						 AR71XX_PLL_SIZE);
+		ath79_detect_sys_type();
+		ath79_ddr_ctrl_init();
 
-	if (mips_machtype != ATH79_MACH_GENERIC_OF)
 		detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX);
 
-	_machine_restart = ath79_restart;
+		/* OF machines should use the reset driver */
+		_machine_restart = ath79_restart;
+	}
+
 	_machine_halt = ath79_halt;
 	pm_power_off = ath79_halt;
 }
 
+static void __init ath79_of_plat_time_init(void)
+{
+	struct device_node *np;
+	struct clk *clk;
+	unsigned long cpu_clk_rate;
+
+	of_clk_init(NULL);
+
+	np = of_get_cpu_node(0, NULL);
+	if (!np) {
+		pr_err("Failed to get CPU node\n");
+		return;
+	}
+
+	clk = of_clk_get(np, 0);
+	if (IS_ERR(clk)) {
+		pr_err("Failed to get CPU clock: %ld\n", PTR_ERR(clk));
+		return;
+	}
+
+	cpu_clk_rate = clk_get_rate(clk);
+
+	pr_info("CPU clock: %lu.%03lu MHz\n",
+		cpu_clk_rate / 1000000, (cpu_clk_rate / 1000) % 1000);
+
+	mips_hpt_frequency = cpu_clk_rate / 2;
+
+	clk_put(clk);
+}
+
 void __init plat_time_init(void)
 {
 	unsigned long cpu_clk_rate;
@@ -230,6 +262,11 @@
 	unsigned long ddr_clk_rate;
 	unsigned long ref_clk_rate;
 
+	if (IS_ENABLED(CONFIG_OF) && mips_machtype == ATH79_MACH_GENERIC_OF) {
+		ath79_of_plat_time_init();
+		return;
+	}
+
 	ath79_clocks_init();
 
 	cpu_clk_rate = ath79_get_sys_clk_rate("cpu");
diff --git a/arch/mips/bcm47xx/Makefile b/arch/mips/bcm47xx/Makefile
index 66bea4e..6d86150 100644
--- a/arch/mips/bcm47xx/Makefile
+++ b/arch/mips/bcm47xx/Makefile
@@ -3,5 +3,5 @@
 # under Linux.
 #
 
-obj-y				+= irq.o prom.o serial.o setup.o time.o sprom.o
+obj-y				+= irq.o prom.o serial.o setup.o time.o
 obj-y				+= board.o buttons.o leds.o workarounds.o
diff --git a/arch/mips/bcm47xx/bcm47xx_private.h b/arch/mips/bcm47xx/bcm47xx_private.h
index 41796be..0367ac7 100644
--- a/arch/mips/bcm47xx/bcm47xx_private.h
+++ b/arch/mips/bcm47xx/bcm47xx_private.h
@@ -10,9 +10,6 @@
 /* prom.c */
 void __init bcm47xx_prom_highmem_init(void);
 
-/* sprom.c */
-void bcm47xx_sprom_register_fallbacks(void);
-
 /* buttons.c */
 int __init bcm47xx_buttons_register(void);
 
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index c807e32..6054d49 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -28,6 +28,7 @@
 
 #include "bcm47xx_private.h"
 
+#include <linux/bcm47xx_sprom.h>
 #include <linux/export.h>
 #include <linux/types.h>
 #include <linux/ethtool.h>
@@ -151,7 +152,6 @@
 		pr_info("Using bcma bus\n");
 #ifdef CONFIG_BCM47XX_BCMA
 		bcm47xx_bus_type = BCM47XX_BUS_TYPE_BCMA;
-		bcm47xx_sprom_register_fallbacks();
 		bcm47xx_register_bcma();
 		bcm47xx_set_system_type(bcm47xx_bus.bcma.bus.chipinfo.id);
 #ifdef CONFIG_HIGHMEM
diff --git a/arch/mips/bcm47xx/sprom.c b/arch/mips/bcm47xx/sprom.c
deleted file mode 100644
index 959c145..0000000
--- a/arch/mips/bcm47xx/sprom.c
+++ /dev/null
@@ -1,724 +0,0 @@
-/*
- *  Copyright (C) 2004 Florian Schirmer <jolt@tuxbox.org>
- *  Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
- *  Copyright (C) 2006 Michael Buesch <m@bues.ch>
- *  Copyright (C) 2010 Waldemar Brodkorb <wbx@openadk.org>
- *  Copyright (C) 2010-2012 Hauke Mehrtens <hauke@hauke-m.de>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
- *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <bcm47xx.h>
-#include <linux/if_ether.h>
-#include <linux/etherdevice.h>
-
-static void create_key(const char *prefix, const char *postfix,
-		       const char *name, char *buf, int len)
-{
-	if (prefix && postfix)
-		snprintf(buf, len, "%s%s%s", prefix, name, postfix);
-	else if (prefix)
-		snprintf(buf, len, "%s%s", prefix, name);
-	else if (postfix)
-		snprintf(buf, len, "%s%s", name, postfix);
-	else
-		snprintf(buf, len, "%s", name);
-}
-
-static int get_nvram_var(const char *prefix, const char *postfix,
-			 const char *name, char *buf, int len, bool fallback)
-{
-	char key[40];
-	int err;
-
-	create_key(prefix, postfix, name, key, sizeof(key));
-
-	err = bcm47xx_nvram_getenv(key, buf, len);
-	if (fallback && err == -ENOENT && prefix) {
-		create_key(NULL, postfix, name, key, sizeof(key));
-		err = bcm47xx_nvram_getenv(key, buf, len);
-	}
-	return err;
-}
-
-#define NVRAM_READ_VAL(type)						\
-static void nvram_read_ ## type(const char *prefix,			\
-				const char *postfix, const char *name,	\
-				type *val, type allset, bool fallback)	\
-{									\
-	char buf[100];							\
-	int err;							\
-	type var;							\
-									\
-	err = get_nvram_var(prefix, postfix, name, buf, sizeof(buf),	\
-			    fallback);					\
-	if (err < 0)							\
-		return;							\
-	err = kstrto ## type(strim(buf), 0, &var);			\
-	if (err) {							\
-		pr_warn("can not parse nvram name %s%s%s with value %s got %i\n",	\
-			prefix, name, postfix, buf, err);		\
-		return;							\
-	}								\
-	if (allset && var == allset)					\
-		return;							\
-	*val = var;							\
-}
-
-NVRAM_READ_VAL(u8)
-NVRAM_READ_VAL(s8)
-NVRAM_READ_VAL(u16)
-NVRAM_READ_VAL(u32)
-
-#undef NVRAM_READ_VAL
-
-static void nvram_read_u32_2(const char *prefix, const char *name,
-			     u16 *val_lo, u16 *val_hi, bool fallback)
-{
-	char buf[100];
-	int err;
-	u32 val;
-
-	err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
-	if (err < 0)
-		return;
-	err = kstrtou32(strim(buf), 0, &val);
-	if (err) {
-		pr_warn("can not parse nvram name %s%s with value %s got %i\n",
-			prefix, name, buf, err);
-		return;
-	}
-	*val_lo = (val & 0x0000FFFFU);
-	*val_hi = (val & 0xFFFF0000U) >> 16;
-}
-
-static void nvram_read_leddc(const char *prefix, const char *name,
-			     u8 *leddc_on_time, u8 *leddc_off_time,
-			     bool fallback)
-{
-	char buf[100];
-	int err;
-	u32 val;
-
-	err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
-	if (err < 0)
-		return;
-	err = kstrtou32(strim(buf), 0, &val);
-	if (err) {
-		pr_warn("can not parse nvram name %s%s with value %s got %i\n",
-			prefix, name, buf, err);
-		return;
-	}
-
-	if (val == 0xffff || val == 0xffffffff)
-		return;
-
-	*leddc_on_time = val & 0xff;
-	*leddc_off_time = (val >> 16) & 0xff;
-}
-
-static void bcm47xx_nvram_parse_macaddr(char *buf, u8 macaddr[6])
-{
-	if (strchr(buf, ':'))
-		sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0],
-			&macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4],
-			&macaddr[5]);
-	else if (strchr(buf, '-'))
-		sscanf(buf, "%hhx-%hhx-%hhx-%hhx-%hhx-%hhx", &macaddr[0],
-			&macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4],
-			&macaddr[5]);
-	else
-		pr_warn("Can not parse mac address: %s\n", buf);
-}
-
-static void nvram_read_macaddr(const char *prefix, const char *name,
-			       u8 val[6], bool fallback)
-{
-	char buf[100];
-	int err;
-
-	err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
-	if (err < 0)
-		return;
-
-	bcm47xx_nvram_parse_macaddr(buf, val);
-}
-
-static void nvram_read_alpha2(const char *prefix, const char *name,
-			     char val[2], bool fallback)
-{
-	char buf[10];
-	int err;
-
-	err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
-	if (err < 0)
-		return;
-	if (buf[0] == '0')
-		return;
-	if (strlen(buf) > 2) {
-		pr_warn("alpha2 is too long %s\n", buf);
-		return;
-	}
-	memcpy(val, buf, 2);
-}
-
-/* This is one-function-only macro, it uses local "sprom" variable! */
-#define ENTRY(_revmask, _type, _prefix, _name, _val, _allset, _fallback) \
-	if (_revmask & BIT(sprom->revision)) \
-		nvram_read_ ## _type(_prefix, NULL, _name, &sprom->_val, \
-				     _allset, _fallback)
-/*
- * Special version of filling function that can be safely called for any SPROM
- * revision. For every NVRAM to SPROM mapping it contains bitmask of revisions
- * for which the mapping is valid.
- * It obviously requires some hexadecimal/bitmasks knowledge, but allows
- * writing cleaner code (easy revisions handling).
- * Note that while SPROM revision 0 was never used, we still keep BIT(0)
- * reserved for it, just to keep numbering sane.
- */
-static void bcm47xx_sprom_fill_auto(struct ssb_sprom *sprom,
-				    const char *prefix, bool fallback)
-{
-	const char *pre = prefix;
-	bool fb = fallback;
-
-	/* Broadcom extracts it for rev 8+ but it was found on 2 and 4 too */
-	ENTRY(0xfffffffe, u16, pre, "devid", dev_id, 0, fallback);
-
-	ENTRY(0xfffffffe, u16, pre, "boardrev", board_rev, 0, true);
-	ENTRY(0xfffffffe, u32, pre, "boardflags", boardflags, 0, fb);
-	ENTRY(0xfffffff0, u32, pre, "boardflags2", boardflags2, 0, fb);
-	ENTRY(0xfffff800, u32, pre, "boardflags3", boardflags3, 0, fb);
-	ENTRY(0x00000002, u16, pre, "boardflags", boardflags_lo, 0, fb);
-	ENTRY(0xfffffffc, u16, pre, "boardtype", board_type, 0, true);
-	ENTRY(0xfffffffe, u16, pre, "boardnum", board_num, 0, fb);
-	ENTRY(0x00000002, u8, pre, "cc", country_code, 0, fb);
-	ENTRY(0xfffffff8, u8, pre, "regrev", regrev, 0, fb);
-
-	ENTRY(0xfffffffe, u8, pre, "ledbh0", gpio0, 0xff, fb);
-	ENTRY(0xfffffffe, u8, pre, "ledbh1", gpio1, 0xff, fb);
-	ENTRY(0xfffffffe, u8, pre, "ledbh2", gpio2, 0xff, fb);
-	ENTRY(0xfffffffe, u8, pre, "ledbh3", gpio3, 0xff, fb);
-
-	ENTRY(0x0000070e, u16, pre, "pa0b0", pa0b0, 0, fb);
-	ENTRY(0x0000070e, u16, pre, "pa0b1", pa0b1, 0, fb);
-	ENTRY(0x0000070e, u16, pre, "pa0b2", pa0b2, 0, fb);
-	ENTRY(0x0000070e, u8, pre, "pa0itssit", itssi_bg, 0, fb);
-	ENTRY(0x0000070e, u8, pre, "pa0maxpwr", maxpwr_bg, 0, fb);
-
-	ENTRY(0x0000070c, u8, pre, "opo", opo, 0, fb);
-	ENTRY(0xfffffffe, u8, pre, "aa2g", ant_available_bg, 0, fb);
-	ENTRY(0xfffffffe, u8, pre, "aa5g", ant_available_a, 0, fb);
-	ENTRY(0x000007fe, s8, pre, "ag0", antenna_gain.a0, 0, fb);
-	ENTRY(0x000007fe, s8, pre, "ag1", antenna_gain.a1, 0, fb);
-	ENTRY(0x000007f0, s8, pre, "ag2", antenna_gain.a2, 0, fb);
-	ENTRY(0x000007f0, s8, pre, "ag3", antenna_gain.a3, 0, fb);
-
-	ENTRY(0x0000070e, u16, pre, "pa1b0", pa1b0, 0, fb);
-	ENTRY(0x0000070e, u16, pre, "pa1b1", pa1b1, 0, fb);
-	ENTRY(0x0000070e, u16, pre, "pa1b2", pa1b2, 0, fb);
-	ENTRY(0x0000070c, u16, pre, "pa1lob0", pa1lob0, 0, fb);
-	ENTRY(0x0000070c, u16, pre, "pa1lob1", pa1lob1, 0, fb);
-	ENTRY(0x0000070c, u16, pre, "pa1lob2", pa1lob2, 0, fb);
-	ENTRY(0x0000070c, u16, pre, "pa1hib0", pa1hib0, 0, fb);
-	ENTRY(0x0000070c, u16, pre, "pa1hib1", pa1hib1, 0, fb);
-	ENTRY(0x0000070c, u16, pre, "pa1hib2", pa1hib2, 0, fb);
-	ENTRY(0x0000070e, u8, pre, "pa1itssit", itssi_a, 0, fb);
-	ENTRY(0x0000070e, u8, pre, "pa1maxpwr", maxpwr_a, 0, fb);
-	ENTRY(0x0000070c, u8, pre, "pa1lomaxpwr", maxpwr_al, 0, fb);
-	ENTRY(0x0000070c, u8, pre, "pa1himaxpwr", maxpwr_ah, 0, fb);
-
-	ENTRY(0x00000708, u8, pre, "bxa2g", bxa2g, 0, fb);
-	ENTRY(0x00000708, u8, pre, "rssisav2g", rssisav2g, 0, fb);
-	ENTRY(0x00000708, u8, pre, "rssismc2g", rssismc2g, 0, fb);
-	ENTRY(0x00000708, u8, pre, "rssismf2g", rssismf2g, 0, fb);
-	ENTRY(0x00000708, u8, pre, "bxa5g", bxa5g, 0, fb);
-	ENTRY(0x00000708, u8, pre, "rssisav5g", rssisav5g, 0, fb);
-	ENTRY(0x00000708, u8, pre, "rssismc5g", rssismc5g, 0, fb);
-	ENTRY(0x00000708, u8, pre, "rssismf5g", rssismf5g, 0, fb);
-	ENTRY(0x00000708, u8, pre, "tri2g", tri2g, 0, fb);
-	ENTRY(0x00000708, u8, pre, "tri5g", tri5g, 0, fb);
-	ENTRY(0x00000708, u8, pre, "tri5gl", tri5gl, 0, fb);
-	ENTRY(0x00000708, u8, pre, "tri5gh", tri5gh, 0, fb);
-	ENTRY(0x00000708, s8, pre, "rxpo2g", rxpo2g, 0, fb);
-	ENTRY(0x00000708, s8, pre, "rxpo5g", rxpo5g, 0, fb);
-	ENTRY(0xfffffff0, u8, pre, "txchain", txchain, 0xf, fb);
-	ENTRY(0xfffffff0, u8, pre, "rxchain", rxchain, 0xf, fb);
-	ENTRY(0xfffffff0, u8, pre, "antswitch", antswitch, 0xff, fb);
-	ENTRY(0x00000700, u8, pre, "tssipos2g", fem.ghz2.tssipos, 0, fb);
-	ENTRY(0x00000700, u8, pre, "extpagain2g", fem.ghz2.extpa_gain, 0, fb);
-	ENTRY(0x00000700, u8, pre, "pdetrange2g", fem.ghz2.pdet_range, 0, fb);
-	ENTRY(0x00000700, u8, pre, "triso2g", fem.ghz2.tr_iso, 0, fb);
-	ENTRY(0x00000700, u8, pre, "antswctl2g", fem.ghz2.antswlut, 0, fb);
-	ENTRY(0x00000700, u8, pre, "tssipos5g", fem.ghz5.tssipos, 0, fb);
-	ENTRY(0x00000700, u8, pre, "extpagain5g", fem.ghz5.extpa_gain, 0, fb);
-	ENTRY(0x00000700, u8, pre, "pdetrange5g", fem.ghz5.pdet_range, 0, fb);
-	ENTRY(0x00000700, u8, pre, "triso5g", fem.ghz5.tr_iso, 0, fb);
-	ENTRY(0x00000700, u8, pre, "antswctl5g", fem.ghz5.antswlut, 0, fb);
-	ENTRY(0x000000f0, u8, pre, "txpid2ga0", txpid2g[0], 0, fb);
-	ENTRY(0x000000f0, u8, pre, "txpid2ga1", txpid2g[1], 0, fb);
-	ENTRY(0x000000f0, u8, pre, "txpid2ga2", txpid2g[2], 0, fb);
-	ENTRY(0x000000f0, u8, pre, "txpid2ga3", txpid2g[3], 0, fb);
-	ENTRY(0x000000f0, u8, pre, "txpid5ga0", txpid5g[0], 0, fb);
-	ENTRY(0x000000f0, u8, pre, "txpid5ga1", txpid5g[1], 0, fb);
-	ENTRY(0x000000f0, u8, pre, "txpid5ga2", txpid5g[2], 0, fb);
-	ENTRY(0x000000f0, u8, pre, "txpid5ga3", txpid5g[3], 0, fb);
-	ENTRY(0x000000f0, u8, pre, "txpid5gla0", txpid5gl[0], 0, fb);
-	ENTRY(0x000000f0, u8, pre, "txpid5gla1", txpid5gl[1], 0, fb);
-	ENTRY(0x000000f0, u8, pre, "txpid5gla2", txpid5gl[2], 0, fb);
-	ENTRY(0x000000f0, u8, pre, "txpid5gla3", txpid5gl[3], 0, fb);
-	ENTRY(0x000000f0, u8, pre, "txpid5gha0", txpid5gh[0], 0, fb);
-	ENTRY(0x000000f0, u8, pre, "txpid5gha1", txpid5gh[1], 0, fb);
-	ENTRY(0x000000f0, u8, pre, "txpid5gha2", txpid5gh[2], 0, fb);
-	ENTRY(0x000000f0, u8, pre, "txpid5gha3", txpid5gh[3], 0, fb);
-
-	ENTRY(0xffffff00, u8, pre, "tempthresh", tempthresh, 0, fb);
-	ENTRY(0xffffff00, u8, pre, "tempoffset", tempoffset, 0, fb);
-	ENTRY(0xffffff00, u16, pre, "rawtempsense", rawtempsense, 0, fb);
-	ENTRY(0xffffff00, u8, pre, "measpower", measpower, 0, fb);
-	ENTRY(0xffffff00, u8, pre, "tempsense_slope", tempsense_slope, 0, fb);
-	ENTRY(0xffffff00, u8, pre, "tempcorrx", tempcorrx, 0, fb);
-	ENTRY(0xffffff00, u8, pre, "tempsense_option", tempsense_option, 0, fb);
-	ENTRY(0x00000700, u8, pre, "freqoffset_corr", freqoffset_corr, 0, fb);
-	ENTRY(0x00000700, u8, pre, "iqcal_swp_dis", iqcal_swp_dis, 0, fb);
-	ENTRY(0x00000700, u8, pre, "hw_iqcal_en", hw_iqcal_en, 0, fb);
-	ENTRY(0x00000700, u8, pre, "elna2g", elna2g, 0, fb);
-	ENTRY(0x00000700, u8, pre, "elna5g", elna5g, 0, fb);
-	ENTRY(0xffffff00, u8, pre, "phycal_tempdelta", phycal_tempdelta, 0, fb);
-	ENTRY(0xffffff00, u8, pre, "temps_period", temps_period, 0, fb);
-	ENTRY(0xffffff00, u8, pre, "temps_hysteresis", temps_hysteresis, 0, fb);
-	ENTRY(0xffffff00, u8, pre, "measpower1", measpower1, 0, fb);
-	ENTRY(0xffffff00, u8, pre, "measpower2", measpower2, 0, fb);
-
-	ENTRY(0x000001f0, u16, pre, "cck2gpo", cck2gpo, 0, fb);
-	ENTRY(0x000001f0, u32, pre, "ofdm2gpo", ofdm2gpo, 0, fb);
-	ENTRY(0x000001f0, u32, pre, "ofdm5gpo", ofdm5gpo, 0, fb);
-	ENTRY(0x000001f0, u32, pre, "ofdm5glpo", ofdm5glpo, 0, fb);
-	ENTRY(0x000001f0, u32, pre, "ofdm5ghpo", ofdm5ghpo, 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs2gpo0", mcs2gpo[0], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs2gpo1", mcs2gpo[1], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs2gpo2", mcs2gpo[2], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs2gpo3", mcs2gpo[3], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs2gpo4", mcs2gpo[4], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs2gpo5", mcs2gpo[5], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs2gpo6", mcs2gpo[6], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs2gpo7", mcs2gpo[7], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs5gpo0", mcs5gpo[0], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs5gpo1", mcs5gpo[1], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs5gpo2", mcs5gpo[2], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs5gpo3", mcs5gpo[3], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs5gpo4", mcs5gpo[4], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs5gpo5", mcs5gpo[5], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs5gpo6", mcs5gpo[6], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs5gpo7", mcs5gpo[7], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs5glpo0", mcs5glpo[0], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs5glpo1", mcs5glpo[1], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs5glpo2", mcs5glpo[2], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs5glpo3", mcs5glpo[3], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs5glpo4", mcs5glpo[4], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs5glpo5", mcs5glpo[5], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs5glpo6", mcs5glpo[6], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs5glpo7", mcs5glpo[7], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs5ghpo0", mcs5ghpo[0], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs5ghpo1", mcs5ghpo[1], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs5ghpo2", mcs5ghpo[2], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs5ghpo3", mcs5ghpo[3], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs5ghpo4", mcs5ghpo[4], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs5ghpo5", mcs5ghpo[5], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs5ghpo6", mcs5ghpo[6], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "mcs5ghpo7", mcs5ghpo[7], 0, fb);
-	ENTRY(0x000001f0, u16, pre, "cddpo", cddpo, 0, fb);
-	ENTRY(0x000001f0, u16, pre, "stbcpo", stbcpo, 0, fb);
-	ENTRY(0x000001f0, u16, pre, "bw40po", bw40po, 0, fb);
-	ENTRY(0x000001f0, u16, pre, "bwduppo", bwduppo, 0, fb);
-
-	ENTRY(0xfffffe00, u16, pre, "cckbw202gpo", cckbw202gpo, 0, fb);
-	ENTRY(0xfffffe00, u16, pre, "cckbw20ul2gpo", cckbw20ul2gpo, 0, fb);
-	ENTRY(0x00000600, u32, pre, "legofdmbw202gpo", legofdmbw202gpo, 0, fb);
-	ENTRY(0x00000600, u32, pre, "legofdmbw20ul2gpo", legofdmbw20ul2gpo, 0, fb);
-	ENTRY(0x00000600, u32, pre, "legofdmbw205glpo", legofdmbw205glpo, 0, fb);
-	ENTRY(0x00000600, u32, pre, "legofdmbw20ul5glpo", legofdmbw20ul5glpo, 0, fb);
-	ENTRY(0x00000600, u32, pre, "legofdmbw205gmpo", legofdmbw205gmpo, 0, fb);
-	ENTRY(0x00000600, u32, pre, "legofdmbw20ul5gmpo", legofdmbw20ul5gmpo, 0, fb);
-	ENTRY(0x00000600, u32, pre, "legofdmbw205ghpo", legofdmbw205ghpo, 0, fb);
-	ENTRY(0x00000600, u32, pre, "legofdmbw20ul5ghpo", legofdmbw20ul5ghpo, 0, fb);
-	ENTRY(0xfffffe00, u32, pre, "mcsbw202gpo", mcsbw202gpo, 0, fb);
-	ENTRY(0x00000600, u32, pre, "mcsbw20ul2gpo", mcsbw20ul2gpo, 0, fb);
-	ENTRY(0xfffffe00, u32, pre, "mcsbw402gpo", mcsbw402gpo, 0, fb);
-	ENTRY(0xfffffe00, u32, pre, "mcsbw205glpo", mcsbw205glpo, 0, fb);
-	ENTRY(0x00000600, u32, pre, "mcsbw20ul5glpo", mcsbw20ul5glpo, 0, fb);
-	ENTRY(0xfffffe00, u32, pre, "mcsbw405glpo", mcsbw405glpo, 0, fb);
-	ENTRY(0xfffffe00, u32, pre, "mcsbw205gmpo", mcsbw205gmpo, 0, fb);
-	ENTRY(0x00000600, u32, pre, "mcsbw20ul5gmpo", mcsbw20ul5gmpo, 0, fb);
-	ENTRY(0xfffffe00, u32, pre, "mcsbw405gmpo", mcsbw405gmpo, 0, fb);
-	ENTRY(0xfffffe00, u32, pre, "mcsbw205ghpo", mcsbw205ghpo, 0, fb);
-	ENTRY(0x00000600, u32, pre, "mcsbw20ul5ghpo", mcsbw20ul5ghpo, 0, fb);
-	ENTRY(0xfffffe00, u32, pre, "mcsbw405ghpo", mcsbw405ghpo, 0, fb);
-	ENTRY(0x00000600, u16, pre, "mcs32po", mcs32po, 0, fb);
-	ENTRY(0x00000600, u16, pre, "legofdm40duppo", legofdm40duppo, 0, fb);
-	ENTRY(0x00000700, u8, pre, "pcieingress_war", pcieingress_war, 0, fb);
-
-	/* TODO: rev 11 support */
-	ENTRY(0x00000700, u8, pre, "rxgainerr2ga0", rxgainerr2ga[0], 0, fb);
-	ENTRY(0x00000700, u8, pre, "rxgainerr2ga1", rxgainerr2ga[1], 0, fb);
-	ENTRY(0x00000700, u8, pre, "rxgainerr2ga2", rxgainerr2ga[2], 0, fb);
-	ENTRY(0x00000700, u8, pre, "rxgainerr5gla0", rxgainerr5gla[0], 0, fb);
-	ENTRY(0x00000700, u8, pre, "rxgainerr5gla1", rxgainerr5gla[1], 0, fb);
-	ENTRY(0x00000700, u8, pre, "rxgainerr5gla2", rxgainerr5gla[2], 0, fb);
-	ENTRY(0x00000700, u8, pre, "rxgainerr5gma0", rxgainerr5gma[0], 0, fb);
-	ENTRY(0x00000700, u8, pre, "rxgainerr5gma1", rxgainerr5gma[1], 0, fb);
-	ENTRY(0x00000700, u8, pre, "rxgainerr5gma2", rxgainerr5gma[2], 0, fb);
-	ENTRY(0x00000700, u8, pre, "rxgainerr5gha0", rxgainerr5gha[0], 0, fb);
-	ENTRY(0x00000700, u8, pre, "rxgainerr5gha1", rxgainerr5gha[1], 0, fb);
-	ENTRY(0x00000700, u8, pre, "rxgainerr5gha2", rxgainerr5gha[2], 0, fb);
-	ENTRY(0x00000700, u8, pre, "rxgainerr5gua0", rxgainerr5gua[0], 0, fb);
-	ENTRY(0x00000700, u8, pre, "rxgainerr5gua1", rxgainerr5gua[1], 0, fb);
-	ENTRY(0x00000700, u8, pre, "rxgainerr5gua2", rxgainerr5gua[2], 0, fb);
-
-	ENTRY(0xfffffe00, u8, pre, "sar2g", sar2g, 0, fb);
-	ENTRY(0xfffffe00, u8, pre, "sar5g", sar5g, 0, fb);
-
-	/* TODO: rev 11 support */
-	ENTRY(0x00000700, u8, pre, "noiselvl2ga0", noiselvl2ga[0], 0, fb);
-	ENTRY(0x00000700, u8, pre, "noiselvl2ga1", noiselvl2ga[1], 0, fb);
-	ENTRY(0x00000700, u8, pre, "noiselvl2ga2", noiselvl2ga[2], 0, fb);
-	ENTRY(0x00000700, u8, pre, "noiselvl5gla0", noiselvl5gla[0], 0, fb);
-	ENTRY(0x00000700, u8, pre, "noiselvl5gla1", noiselvl5gla[1], 0, fb);
-	ENTRY(0x00000700, u8, pre, "noiselvl5gla2", noiselvl5gla[2], 0, fb);
-	ENTRY(0x00000700, u8, pre, "noiselvl5gma0", noiselvl5gma[0], 0, fb);
-	ENTRY(0x00000700, u8, pre, "noiselvl5gma1", noiselvl5gma[1], 0, fb);
-	ENTRY(0x00000700, u8, pre, "noiselvl5gma2", noiselvl5gma[2], 0, fb);
-	ENTRY(0x00000700, u8, pre, "noiselvl5gha0", noiselvl5gha[0], 0, fb);
-	ENTRY(0x00000700, u8, pre, "noiselvl5gha1", noiselvl5gha[1], 0, fb);
-	ENTRY(0x00000700, u8, pre, "noiselvl5gha2", noiselvl5gha[2], 0, fb);
-	ENTRY(0x00000700, u8, pre, "noiselvl5gua0", noiselvl5gua[0], 0, fb);
-	ENTRY(0x00000700, u8, pre, "noiselvl5gua1", noiselvl5gua[1], 0, fb);
-	ENTRY(0x00000700, u8, pre, "noiselvl5gua2", noiselvl5gua[2], 0, fb);
-}
-#undef ENTRY /* It's specififc, uses local variable, don't use it (again). */
-
-static void bcm47xx_fill_sprom_path_r4589(struct ssb_sprom *sprom,
-					  const char *prefix, bool fallback)
-{
-	char postfix[2];
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(sprom->core_pwr_info); i++) {
-		struct ssb_sprom_core_pwr_info *pwr_info;
-
-		pwr_info = &sprom->core_pwr_info[i];
-
-		snprintf(postfix, sizeof(postfix), "%i", i);
-		nvram_read_u8(prefix, postfix, "maxp2ga",
-			      &pwr_info->maxpwr_2g, 0, fallback);
-		nvram_read_u8(prefix, postfix, "itt2ga",
-			      &pwr_info->itssi_2g, 0, fallback);
-		nvram_read_u8(prefix, postfix, "itt5ga",
-			      &pwr_info->itssi_5g, 0, fallback);
-		nvram_read_u16(prefix, postfix, "pa2gw0a",
-			       &pwr_info->pa_2g[0], 0, fallback);
-		nvram_read_u16(prefix, postfix, "pa2gw1a",
-			       &pwr_info->pa_2g[1], 0, fallback);
-		nvram_read_u16(prefix, postfix, "pa2gw2a",
-			       &pwr_info->pa_2g[2], 0, fallback);
-		nvram_read_u8(prefix, postfix, "maxp5ga",
-			      &pwr_info->maxpwr_5g, 0, fallback);
-		nvram_read_u8(prefix, postfix, "maxp5gha",
-			      &pwr_info->maxpwr_5gh, 0, fallback);
-		nvram_read_u8(prefix, postfix, "maxp5gla",
-			      &pwr_info->maxpwr_5gl, 0, fallback);
-		nvram_read_u16(prefix, postfix, "pa5gw0a",
-			       &pwr_info->pa_5g[0], 0, fallback);
-		nvram_read_u16(prefix, postfix, "pa5gw1a",
-			       &pwr_info->pa_5g[1], 0, fallback);
-		nvram_read_u16(prefix, postfix, "pa5gw2a",
-			       &pwr_info->pa_5g[2], 0, fallback);
-		nvram_read_u16(prefix, postfix, "pa5glw0a",
-			       &pwr_info->pa_5gl[0], 0, fallback);
-		nvram_read_u16(prefix, postfix, "pa5glw1a",
-			       &pwr_info->pa_5gl[1], 0, fallback);
-		nvram_read_u16(prefix, postfix, "pa5glw2a",
-			       &pwr_info->pa_5gl[2], 0, fallback);
-		nvram_read_u16(prefix, postfix, "pa5ghw0a",
-			       &pwr_info->pa_5gh[0], 0, fallback);
-		nvram_read_u16(prefix, postfix, "pa5ghw1a",
-			       &pwr_info->pa_5gh[1], 0, fallback);
-		nvram_read_u16(prefix, postfix, "pa5ghw2a",
-			       &pwr_info->pa_5gh[2], 0, fallback);
-	}
-}
-
-static void bcm47xx_fill_sprom_path_r45(struct ssb_sprom *sprom,
-					const char *prefix, bool fallback)
-{
-	char postfix[2];
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(sprom->core_pwr_info); i++) {
-		struct ssb_sprom_core_pwr_info *pwr_info;
-
-		pwr_info = &sprom->core_pwr_info[i];
-
-		snprintf(postfix, sizeof(postfix), "%i", i);
-		nvram_read_u16(prefix, postfix, "pa2gw3a",
-			       &pwr_info->pa_2g[3], 0, fallback);
-		nvram_read_u16(prefix, postfix, "pa5gw3a",
-			       &pwr_info->pa_5g[3], 0, fallback);
-		nvram_read_u16(prefix, postfix, "pa5glw3a",
-			       &pwr_info->pa_5gl[3], 0, fallback);
-		nvram_read_u16(prefix, postfix, "pa5ghw3a",
-			       &pwr_info->pa_5gh[3], 0, fallback);
-	}
-}
-
-static bool bcm47xx_is_valid_mac(u8 *mac)
-{
-	return mac && !(mac[0] == 0x00 && mac[1] == 0x90 && mac[2] == 0x4c);
-}
-
-static int bcm47xx_increase_mac_addr(u8 *mac, u8 num)
-{
-	u8 *oui = mac + ETH_ALEN/2 - 1;
-	u8 *p = mac + ETH_ALEN - 1;
-
-	do {
-		(*p) += num;
-		if (*p > num)
-			break;
-		p--;
-		num = 1;
-	} while (p != oui);
-
-	if (p == oui) {
-		pr_err("unable to fetch mac address\n");
-		return -ENOENT;
-	}
-	return 0;
-}
-
-static int mac_addr_used = 2;
-
-static void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom,
-					const char *prefix, bool fallback)
-{
-	bool fb = fallback;
-
-	nvram_read_macaddr(prefix, "et0macaddr", sprom->et0mac, fallback);
-	nvram_read_u8(prefix, NULL, "et0mdcport", &sprom->et0mdcport, 0,
-		      fallback);
-	nvram_read_u8(prefix, NULL, "et0phyaddr", &sprom->et0phyaddr, 0,
-		      fallback);
-
-	nvram_read_macaddr(prefix, "et1macaddr", sprom->et1mac, fallback);
-	nvram_read_u8(prefix, NULL, "et1mdcport", &sprom->et1mdcport, 0,
-		      fallback);
-	nvram_read_u8(prefix, NULL, "et1phyaddr", &sprom->et1phyaddr, 0,
-		      fallback);
-
-	nvram_read_macaddr(prefix, "et2macaddr", sprom->et2mac, fb);
-	nvram_read_u8(prefix, NULL, "et2mdcport", &sprom->et2mdcport, 0, fb);
-	nvram_read_u8(prefix, NULL, "et2phyaddr", &sprom->et2phyaddr, 0, fb);
-
-	nvram_read_macaddr(prefix, "macaddr", sprom->il0mac, fallback);
-	nvram_read_macaddr(prefix, "il0macaddr", sprom->il0mac, fallback);
-
-	/* The address prefix 00:90:4C is used by Broadcom in their initial
-	 * configuration. When a mac address with the prefix 00:90:4C is used
-	 * all devices from the same series are sharing the same mac address.
-	 * To prevent mac address collisions we replace them with a mac address
-	 * based on the base address.
-	 */
-	if (!bcm47xx_is_valid_mac(sprom->il0mac)) {
-		u8 mac[6];
-
-		nvram_read_macaddr(NULL, "et0macaddr", mac, false);
-		if (bcm47xx_is_valid_mac(mac)) {
-			int err = bcm47xx_increase_mac_addr(mac, mac_addr_used);
-
-			if (!err) {
-				ether_addr_copy(sprom->il0mac, mac);
-				mac_addr_used++;
-			}
-		}
-	}
-}
-
-static void bcm47xx_fill_board_data(struct ssb_sprom *sprom, const char *prefix,
-				    bool fallback)
-{
-	nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo,
-			 &sprom->boardflags_hi, fallback);
-	nvram_read_u32_2(prefix, "boardflags2", &sprom->boardflags2_lo,
-			 &sprom->boardflags2_hi, fallback);
-}
-
-void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix,
-			bool fallback)
-{
-	bcm47xx_fill_sprom_ethernet(sprom, prefix, fallback);
-	bcm47xx_fill_board_data(sprom, prefix, fallback);
-
-	nvram_read_u8(prefix, NULL, "sromrev", &sprom->revision, 0, fallback);
-
-	/* Entries requiring custom functions */
-	nvram_read_alpha2(prefix, "ccode", sprom->alpha2, fallback);
-	if (sprom->revision >= 3)
-		nvram_read_leddc(prefix, "leddc", &sprom->leddc_on_time,
-				 &sprom->leddc_off_time, fallback);
-
-	switch (sprom->revision) {
-	case 4:
-	case 5:
-		bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback);
-		bcm47xx_fill_sprom_path_r45(sprom, prefix, fallback);
-		break;
-	case 8:
-	case 9:
-		bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback);
-		break;
-	}
-
-	bcm47xx_sprom_fill_auto(sprom, prefix, fallback);
-}
-
-#if defined(CONFIG_BCM47XX_SSB)
-static int bcm47xx_get_sprom_ssb(struct ssb_bus *bus, struct ssb_sprom *out)
-{
-	char prefix[10];
-
-	switch (bus->bustype) {
-	case SSB_BUSTYPE_SSB:
-		bcm47xx_fill_sprom(out, NULL, false);
-		return 0;
-	case SSB_BUSTYPE_PCI:
-		memset(out, 0, sizeof(struct ssb_sprom));
-		snprintf(prefix, sizeof(prefix), "pci/%u/%u/",
-			 bus->host_pci->bus->number + 1,
-			 PCI_SLOT(bus->host_pci->devfn));
-		bcm47xx_fill_sprom(out, prefix, false);
-		return 0;
-	default:
-		pr_warn("Unable to fill SPROM for given bustype.\n");
-		return -EINVAL;
-	}
-}
-#endif
-
-#if defined(CONFIG_BCM47XX_BCMA)
-/*
- * Having many NVRAM entries for PCI devices led to repeating prefixes like
- * pci/1/1/ all the time and wasting flash space. So at some point Broadcom
- * decided to introduce prefixes like 0: 1: 2: etc.
- * If we find e.g. devpath0=pci/2/1 or devpath0=pci/2/1/ we should use 0:
- * instead of pci/2/1/.
- */
-static void bcm47xx_sprom_apply_prefix_alias(char *prefix, size_t prefix_size)
-{
-	size_t prefix_len = strlen(prefix);
-	size_t short_len = prefix_len - 1;
-	char nvram_var[10];
-	char buf[20];
-	int i;
-
-	/* Passed prefix has to end with a slash */
-	if (prefix_len <= 0 || prefix[prefix_len - 1] != '/')
-		return;
-
-	for (i = 0; i < 3; i++) {
-		if (snprintf(nvram_var, sizeof(nvram_var), "devpath%d", i) <= 0)
-			continue;
-		if (bcm47xx_nvram_getenv(nvram_var, buf, sizeof(buf)) < 0)
-			continue;
-		if (!strcmp(buf, prefix) ||
-		    (short_len && strlen(buf) == short_len && !strncmp(buf, prefix, short_len))) {
-			snprintf(prefix, prefix_size, "%d:", i);
-			return;
-		}
-	}
-}
-
-static int bcm47xx_get_sprom_bcma(struct bcma_bus *bus, struct ssb_sprom *out)
-{
-	struct bcma_boardinfo *binfo = &bus->boardinfo;
-	struct bcma_device *core;
-	char buf[10];
-	char *prefix;
-	bool fallback = false;
-
-	switch (bus->hosttype) {
-	case BCMA_HOSTTYPE_PCI:
-		memset(out, 0, sizeof(struct ssb_sprom));
-		/* On BCM47XX all PCI buses share the same domain */
-		if (config_enabled(CONFIG_BCM47XX))
-			snprintf(buf, sizeof(buf), "pci/%u/%u/",
-				 bus->host_pci->bus->number + 1,
-				 PCI_SLOT(bus->host_pci->devfn));
-		else
-			snprintf(buf, sizeof(buf), "pci/%u/%u/",
-				 pci_domain_nr(bus->host_pci->bus) + 1,
-				 bus->host_pci->bus->number);
-		bcm47xx_sprom_apply_prefix_alias(buf, sizeof(buf));
-		prefix = buf;
-		break;
-	case BCMA_HOSTTYPE_SOC:
-		memset(out, 0, sizeof(struct ssb_sprom));
-		core = bcma_find_core(bus, BCMA_CORE_80211);
-		if (core) {
-			snprintf(buf, sizeof(buf), "sb/%u/",
-				 core->core_index);
-			prefix = buf;
-			fallback = true;
-		} else {
-			prefix = NULL;
-		}
-		break;
-	default:
-		pr_warn("Unable to fill SPROM for given bustype.\n");
-		return -EINVAL;
-	}
-
-	nvram_read_u16(prefix, NULL, "boardvendor", &binfo->vendor, 0, true);
-	if (!binfo->vendor)
-		binfo->vendor = SSB_BOARDVENDOR_BCM;
-	nvram_read_u16(prefix, NULL, "boardtype", &binfo->type, 0, true);
-
-	bcm47xx_fill_sprom(out, prefix, fallback);
-
-	return 0;
-}
-#endif
-
-/*
- * On bcm47xx we need to register SPROM fallback handler very early, so we can't
- * use anything like platform device / driver for this.
- */
-void bcm47xx_sprom_register_fallbacks(void)
-{
-#if defined(CONFIG_BCM47XX_SSB)
-	if (ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom_ssb))
-		pr_warn("Failed to registered ssb SPROM handler\n");
-#endif
-
-#if defined(CONFIG_BCM47XX_BCMA)
-	if (bcma_arch_register_fallback_sprom(&bcm47xx_get_sprom_bcma))
-		pr_warn("Failed to registered bcma SPROM handler\n");
-#endif
-}
diff --git a/arch/mips/bmips/Kconfig b/arch/mips/bmips/Kconfig
index e2c4fd6..264328d 100644
--- a/arch/mips/bmips/Kconfig
+++ b/arch/mips/bmips/Kconfig
@@ -21,6 +21,10 @@
 	bool "BCM93384WVG Viper CPU (EXPERIMENTAL)"
 	select BUILTIN_DTB
 
+config DT_BCM96358NB4SER
+	bool "BCM96358NB4SER"
+	select BUILTIN_DTB
+
 config DT_BCM96368MVWG
 	bool "BCM96368MVWG"
 	select BUILTIN_DTB
diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c
index 3553528..f146d12 100644
--- a/arch/mips/bmips/setup.c
+++ b/arch/mips/bmips/setup.c
@@ -95,6 +95,15 @@
 		bcm63xx_fixup_cpu1();
 }
 
+static void bcm6358_quirks(void)
+{
+	/*
+	 * BCM6358 needs special handling for its shared TLB, so
+	 * disable SMP for now
+	 */
+	bmips_smp_enabled = 0;
+}
+
 static void bcm6368_quirks(void)
 {
 	bcm63xx_fixup_cpu1();
@@ -104,13 +113,16 @@
 	{ "brcm,bcm3384-viper",		&bcm3384_viper_quirks		},
 	{ "brcm,bcm33843-viper",	&bcm3384_viper_quirks		},
 	{ "brcm,bcm6328",		&bcm6328_quirks			},
+	{ "brcm,bcm6358",		&bcm6358_quirks			},
 	{ "brcm,bcm6368",		&bcm6368_quirks			},
 	{ "brcm,bcm63168",		&bcm6368_quirks			},
+	{ "brcm,bcm63268",		&bcm6368_quirks			},
 	{ },
 };
 
 void __init prom_init(void)
 {
+	bmips_cpu_setup();
 	register_bmips_smp_ops();
 }
 
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index 4eff1ef..90aca95 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -37,12 +37,18 @@
 vmlinuzobjs-$(CONFIG_SYS_SUPPORTS_ZBOOT_UART16550) += $(obj)/uart-16550.o
 vmlinuzobjs-$(CONFIG_SYS_SUPPORTS_ZBOOT_UART_PROM) += $(obj)/uart-prom.o
 vmlinuzobjs-$(CONFIG_MIPS_ALCHEMY)		   += $(obj)/uart-alchemy.o
+vmlinuzobjs-$(CONFIG_ATH79)			   += $(obj)/uart-ath79.o
 endif
 
-vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o
+extra-y += uart-ath79.c
+$(obj)/uart-ath79.c: $(srctree)/arch/mips/ath79/early_printk.c
+	$(call cmd,shipped)
 
-$(obj)/ashldi3.o: KBUILD_CFLAGS += -I$(srctree)/arch/mips/lib
-$(obj)/ashldi3.c: $(srctree)/arch/mips/lib/ashldi3.c
+vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o $(obj)/bswapsi.o
+
+extra-y += ashldi3.c bswapsi.c
+$(obj)/ashldi3.o $(obj)/bswapsi.o: KBUILD_CFLAGS += -I$(srctree)/arch/mips/lib
+$(obj)/ashldi3.c $(obj)/bswapsi.c: $(obj)/%.c: $(srctree)/arch/mips/lib/%.c
 	$(call cmd,shipped)
 
 targets := $(notdir $(vmlinuzobjs-y))
diff --git a/arch/mips/boot/dts/brcm/Makefile b/arch/mips/boot/dts/brcm/Makefile
index eabeb60..fda9d38 100644
--- a/arch/mips/boot/dts/brcm/Makefile
+++ b/arch/mips/boot/dts/brcm/Makefile
@@ -1,5 +1,6 @@
 dtb-$(CONFIG_DT_BCM93384WVG)		+= bcm93384wvg.dtb
 dtb-$(CONFIG_DT_BCM93384WVG_VIPER)	+= bcm93384wvg_viper.dtb
+dtb-$(CONFIG_DT_BCM96358NB4SER)		+= bcm96358nb4ser.dtb
 dtb-$(CONFIG_DT_BCM96368MVWG)		+= bcm96368mvwg.dtb
 dtb-$(CONFIG_DT_BCM9EJTAGPRB)		+= bcm9ejtagprb.dtb
 dtb-$(CONFIG_DT_BCM97125CBMB)		+= bcm97125cbmb.dtb
@@ -14,6 +15,7 @@
 dtb-$(CONFIG_DT_NONE)			+= \
 						bcm93384wvg.dtb		\
 						bcm93384wvg_viper.dtb	\
+						bcm96358nb4ser.dtb	\
 						bcm96368mvwg.dtb	\
 						bcm9ejtagprb.dtb	\
 						bcm97125cbmb.dtb	\
diff --git a/arch/mips/boot/dts/brcm/bcm6328.dtsi b/arch/mips/boot/dts/brcm/bcm6328.dtsi
index 9d19236..5633b9d 100644
--- a/arch/mips/boot/dts/brcm/bcm6328.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm6328.dtsi
@@ -23,7 +23,7 @@
 	};
 
 	clocks {
-		periph_clk: periph_clk {
+		periph_clk: periph-clk {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <50000000>;
@@ -31,11 +31,11 @@
 	};
 
 	aliases {
-		leds0 = &leds0;
-		uart0 = &uart0;
+		serial0 = &uart0;
+		serial1 = &uart1;
 	};
 
-	cpu_intc: cpu_intc {
+	cpu_intc: interrupt-controller {
 		#address-cells = <0>;
 		compatible = "mti,cpu-interrupt-controller";
 
@@ -50,16 +50,16 @@
 		compatible = "simple-bus";
 		ranges;
 
-		periph_intc: periph_intc@10000020 {
-			compatible = "brcm,bcm3380-l2-intc";
-			reg = <0x10000024 0x4 0x1000002c 0x4>,
-			      <0x10000020 0x4 0x10000028 0x4>;
+		periph_intc: interrupt-controller@10000020 {
+			compatible = "brcm,bcm6345-l1-intc";
+			reg = <0x10000020 0x10>,
+			      <0x10000030 0x10>;
 
 			interrupt-controller;
 			#interrupt-cells = <1>;
 
 			interrupt-parent = <&cpu_intc>;
-			interrupts = <2>;
+			interrupts = <2>, <3>;
 		};
 
 		uart0: serial@10000100 {
@@ -71,13 +71,22 @@
 			status = "disabled";
 		};
 
-		timer: timer@10000040 {
+		uart1: serial@10000120 {
+			compatible = "brcm,bcm6345-uart";
+			reg = <0x10000120 0x18>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <39>;
+			clocks = <&periph_clk>;
+			status = "disabled";
+		};
+
+		timer: syscon@10000040 {
 			compatible = "syscon";
 			reg = <0x10000040 0x2c>;
 			native-endian;
 		};
 
-		reboot {
+		reboot: syscon-reboot@10000068 {
 			compatible = "syscon-reboot";
 			regmap = <&timer>;
 			offset = <0x28>;
@@ -91,5 +100,24 @@
 			reg = <0x10000800 0x24>;
 			status = "disabled";
 		};
+
+		ehci: usb@10002500 {
+			compatible = "brcm,bcm6328-ehci", "generic-ehci";
+			reg = <0x10002500 0x100>;
+			big-endian;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <42>;
+			status = "disabled";
+		};
+
+		ohci: usb@10002600 {
+			compatible = "brcm,bcm6328-ohci", "generic-ohci";
+			reg = <0x10002600 0x100>;
+			big-endian;
+			no-big-frame-no;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <41>;
+			status = "disabled";
+		};
 	};
 };
diff --git a/arch/mips/boot/dts/brcm/bcm6358.dtsi b/arch/mips/boot/dts/brcm/bcm6358.dtsi
new file mode 100644
index 0000000..f9d8d39
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm6358.dtsi
@@ -0,0 +1,130 @@
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	compatible = "brcm,bcm6358";
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		mips-hpt-frequency = <150000000>;
+
+		cpu@0 {
+			compatible = "brcm,bmips4350";
+			device_type = "cpu";
+			reg = <0>;
+		};
+
+		cpu@1 {
+			compatible = "brcm,bmips4350";
+			device_type = "cpu";
+			reg = <1>;
+		};
+	};
+
+	clocks {
+		periph_clk: periph-clk {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <50000000>;
+		};
+	};
+
+	aliases {
+		serial0 = &uart0;
+		serial1 = &uart1;
+	};
+
+	cpu_intc: interrupt-controller {
+		#address-cells = <0>;
+		compatible = "mti,cpu-interrupt-controller";
+
+		interrupt-controller;
+		#interrupt-cells = <1>;
+	};
+
+	ubus {
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		compatible = "simple-bus";
+		ranges;
+
+		periph_cntl: syscon@fffe0000 {
+			compatible = "syscon";
+			reg = <0xfffe0000 0xc>;
+			native-endian;
+		};
+
+		reboot: syscon-reboot@fffe0008 {
+			compatible = "syscon-reboot";
+			regmap = <&periph_cntl>;
+			offset = <0x8>;
+			mask = <0x1>;
+		};
+
+		periph_intc: interrupt-controller@fffe000c {
+			compatible = "brcm,bcm6345-l1-intc";
+			reg = <0xfffe000c 0x8>,
+			      <0xfffe0038 0x8>;
+
+			interrupt-controller;
+			#interrupt-cells = <1>;
+
+			interrupt-parent = <&cpu_intc>;
+			interrupts = <2>, <3>;
+		};
+
+		leds0: led-controller@fffe00d0 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "brcm,bcm6358-leds";
+			reg = <0xfffe00d0 0x8>;
+
+			status = "disabled";
+		};
+
+		uart0: serial@fffe0100 {
+			compatible = "brcm,bcm6345-uart";
+			reg = <0xfffe0100 0x18>;
+
+			interrupt-parent = <&periph_intc>;
+			interrupts = <2>;
+
+			clocks = <&periph_clk>;
+
+			status = "disabled";
+		};
+
+		uart1: serial@fffe0120 {
+			compatible = "brcm,bcm6345-uart";
+			reg = <0xfffe0120 0x18>;
+
+			interrupt-parent = <&periph_intc>;
+			interrupts = <3>;
+
+			clocks = <&periph_clk>;
+
+			status = "disabled";
+		};
+
+		ehci: usb@fffe1300 {
+			compatible = "brcm,bcm6358-ehci", "generic-ehci";
+			reg = <0xfffe1300 0x100>;
+			big-endian;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <10>;
+			status = "disabled";
+		};
+
+		ohci: usb@fffe1400 {
+			compatible = "brcm,bcm6358-ohci", "generic-ohci";
+			reg = <0xfffe1400 0x100>;
+			big-endian;
+			no-big-frame-no;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <5>;
+			status = "disabled";
+		};
+	};
+};
diff --git a/arch/mips/boot/dts/brcm/bcm6368.dtsi b/arch/mips/boot/dts/brcm/bcm6368.dtsi
index 1f6b9b5..d0e3a70 100644
--- a/arch/mips/boot/dts/brcm/bcm6368.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm6368.dtsi
@@ -20,11 +20,10 @@
 			device_type = "cpu";
 			reg = <1>;
 		};
-
 	};
 
 	clocks {
-		periph_clk: periph_clk {
+		periph_clk: periph-clk {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <50000000>;
@@ -32,11 +31,11 @@
 	};
 
 	aliases {
-		leds0 = &leds0;
-		uart0 = &uart0;
+		serial0 = &uart0;
+		serial1 = &uart1;
 	};
 
-	cpu_intc: cpu_intc {
+	cpu_intc: interrupt-controller {
 		#address-cells = <0>;
 		compatible = "mti,cpu-interrupt-controller";
 
@@ -64,16 +63,16 @@
 			mask = <0x1>;
 		};
 
-		periph_intc: periph_intc@10000020 {
-			compatible = "brcm,bcm3380-l2-intc";
-			reg = <0x10000024 0x4 0x1000002c 0x4>,
-			      <0x10000020 0x4 0x10000028 0x4>;
+		periph_intc: interrupt-controller@10000020 {
+			compatible = "brcm,bcm6345-l1-intc";
+			reg = <0x10000020 0x10>,
+			      <0x10000030 0x10>;
 
 			interrupt-controller;
 			#interrupt-cells = <1>;
 
 			interrupt-parent = <&cpu_intc>;
-			interrupts = <2>;
+			interrupts = <2>, <3>;
 		};
 
 		leds0: led-controller@100000d0 {
@@ -93,7 +92,16 @@
 			status = "disabled";
 		};
 
-		ehci0: usb@10001500 {
+		uart1: serial@10000120 {
+			compatible = "brcm,bcm6345-uart";
+			reg = <0x10000120 0x18>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <3>;
+			clocks = <&periph_clk>;
+			status = "disabled";
+		};
+
+		ehci: usb@10001500 {
 			compatible = "brcm,bcm6368-ehci", "generic-ehci";
 			reg = <0x10001500 0x100>;
 			big-endian;
@@ -102,7 +110,7 @@
 			status = "disabled";
 		};
 
-		ohci0: usb@10001600 {
+		ohci: usb@10001600 {
 			compatible = "brcm,bcm6368-ohci", "generic-ohci";
 			reg = <0x10001600 0x100>;
 			big-endian;
diff --git a/arch/mips/boot/dts/brcm/bcm7125.dtsi b/arch/mips/boot/dts/brcm/bcm7125.dtsi
index 3ae1605..550e1d9 100644
--- a/arch/mips/boot/dts/brcm/bcm7125.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7125.dtsi
@@ -85,14 +85,15 @@
 			compatible = "brcm,bcm7120-l2-intc";
 			reg = <0x406780 0x8>;
 
-			brcm,int-map-mask = <0x44>;
+			brcm,int-map-mask = <0x44>, <0xf000000>;
 			brcm,int-fwd-mask = <0x70000>;
 
 			interrupt-controller;
 			#interrupt-cells = <1>;
 
 			interrupt-parent = <&periph_intc>;
-			interrupts = <18>;
+			interrupts = <18>, <19>;
+			interrupt-names = "upg_main", "upg_bsc";
 		};
 
 		sun_top_ctrl: syscon@404000 {
@@ -118,6 +119,70 @@
 			status = "disabled";
 		};
 
+		uart1: serial@406b40 {
+			compatible = "ns16550a";
+			reg = <0x406b40 0x20>;
+			reg-io-width = <0x4>;
+			reg-shift = <0x2>;
+			native-endian;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <64>;
+			clocks = <&uart_clk>;
+			status = "disabled";
+		};
+
+		uart2: serial@406b80 {
+			compatible = "ns16550a";
+			reg = <0x406b80 0x20>;
+			reg-io-width = <0x4>;
+			reg-shift = <0x2>;
+			native-endian;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <65>;
+			clocks = <&uart_clk>;
+			status = "disabled";
+		};
+
+		bsca: i2c@406200 {
+		      clock-frequency = <390000>;
+		      compatible = "brcm,brcmstb-i2c";
+		      interrupt-parent = <&upg_irq0_intc>;
+		      reg = <0x406200 0x58>;
+		      interrupts = <24>;
+		      interrupt-names = "upg_bsca";
+		      status = "disabled";
+		};
+
+		bscb: i2c@406280 {
+		      clock-frequency = <390000>;
+		      compatible = "brcm,brcmstb-i2c";
+		      interrupt-parent = <&upg_irq0_intc>;
+		      reg = <0x406280 0x58>;
+		      interrupts = <25>;
+		      interrupt-names = "upg_bscb";
+		      status = "disabled";
+		};
+
+		bscc: i2c@406300 {
+		      clock-frequency = <390000>;
+		      compatible = "brcm,brcmstb-i2c";
+		      interrupt-parent = <&upg_irq0_intc>;
+		      reg = <0x406300 0x58>;
+		      interrupts = <26>;
+		      interrupt-names = "upg_bscc";
+		      status = "disabled";
+		};
+
+		bscd: i2c@406380 {
+		      clock-frequency = <390000>;
+		      compatible = "brcm,brcmstb-i2c";
+		      interrupt-parent = <&upg_irq0_intc>;
+		      reg = <0x406380 0x58>;
+		      interrupts = <27>;
+		      interrupt-names = "upg_bscd";
+		      status = "disabled";
+		};
+
 		ehci0: usb@488300 {
 			compatible = "brcm,bcm7125-ehci", "generic-ehci";
 			reg = <0x488300 0x100>;
diff --git a/arch/mips/boot/dts/brcm/bcm7346.dtsi b/arch/mips/boot/dts/brcm/bcm7346.dtsi
index be79919..ec95906 100644
--- a/arch/mips/boot/dts/brcm/bcm7346.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7346.dtsi
@@ -24,8 +24,6 @@
 
 	aliases {
 		uart0 = &uart0;
-		uart1 = &uart1;
-		uart2 = &uart2;
 	};
 
 	cpu_intc: cpu_intc {
@@ -323,8 +321,6 @@
 			interrupts = <40>;
 			#address-cells = <1>;
 			#size-cells = <0>;
-			brcm,broken-ncq;
-			brcm,broken-phy;
 			status = "disabled";
 
 			sata0: sata-port@0 {
@@ -338,7 +334,7 @@
 			};
 		};
 
-		sata_phy: sata-phy@1800000 {
+		sata_phy: sata-phy@180100 {
 			compatible = "brcm,bcm7425-sata-phy", "brcm,phy-sata3";
 			reg = <0x180100 0x0eff>;
 			reg-names = "phy";
diff --git a/arch/mips/boot/dts/brcm/bcm7358.dtsi b/arch/mips/boot/dts/brcm/bcm7358.dtsi
index 060805b..ca57fb5 100644
--- a/arch/mips/boot/dts/brcm/bcm7358.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7358.dtsi
@@ -18,8 +18,6 @@
 
 	aliases {
 		uart0 = &uart0;
-		uart1 = &uart1;
-		uart2 = &uart2;
 	};
 
 	cpu_intc: cpu_intc {
diff --git a/arch/mips/boot/dts/brcm/bcm7360.dtsi b/arch/mips/boot/dts/brcm/bcm7360.dtsi
index bcdb09b..1c0c3d4 100644
--- a/arch/mips/boot/dts/brcm/bcm7360.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7360.dtsi
@@ -18,8 +18,6 @@
 
 	aliases {
 		uart0 = &uart0;
-		uart1 = &uart1;
-		uart2 = &uart2;
 	};
 
 	cpu_intc: cpu_intc {
@@ -241,5 +239,45 @@
 			interrupts = <66>;
 			status = "disabled";
 		};
+
+		sata: sata@181000 {
+			compatible = "brcm,bcm7425-ahci", "brcm,sata3-ahci";
+			reg-names = "ahci", "top-ctrl";
+			reg = <0x181000 0xa9c>, <0x180020 0x1c>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <86>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+
+			sata0: sata-port@0 {
+				reg = <0>;
+				phys = <&sata_phy0>;
+			};
+
+			sata1: sata-port@1 {
+				reg = <1>;
+				phys = <&sata_phy1>;
+			};
+		};
+
+		sata_phy: sata-phy@180100 {
+			compatible = "brcm,bcm7425-sata-phy", "brcm,phy-sata3";
+			reg = <0x180100 0x0eff>;
+			reg-names = "phy";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+
+			sata_phy0: sata-phy@0 {
+				reg = <0>;
+				#phy-cells = <0>;
+			};
+
+			sata_phy1: sata-phy@1 {
+				reg = <1>;
+				#phy-cells = <0>;
+			};
+		};
 	};
 };
diff --git a/arch/mips/boot/dts/brcm/bcm7362.dtsi b/arch/mips/boot/dts/brcm/bcm7362.dtsi
index d3b1b76..6b4713a 100644
--- a/arch/mips/boot/dts/brcm/bcm7362.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7362.dtsi
@@ -24,8 +24,6 @@
 
 	aliases {
 		uart0 = &uart0;
-		uart1 = &uart1;
-		uart2 = &uart2;
 	};
 
 	cpu_intc: cpu_intc {
@@ -246,8 +244,6 @@
 			interrupts = <86>;
 			#address-cells = <1>;
 			#size-cells = <0>;
-			brcm,broken-ncq;
-			brcm,broken-phy;
 			status = "disabled";
 
 			sata0: sata-port@0 {
@@ -261,7 +257,7 @@
 			};
 		};
 
-		sata_phy: sata-phy@1800000 {
+		sata_phy: sata-phy@180100 {
 			compatible = "brcm,bcm7425-sata-phy", "brcm,phy-sata3";
 			reg = <0x180100 0x0eff>;
 			reg-names = "phy";
diff --git a/arch/mips/boot/dts/brcm/bcm7420.dtsi b/arch/mips/boot/dts/brcm/bcm7420.dtsi
index 3302a1b..0586bf6 100644
--- a/arch/mips/boot/dts/brcm/bcm7420.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7420.dtsi
@@ -86,14 +86,15 @@
 			compatible = "brcm,bcm7120-l2-intc";
 			reg = <0x406780 0x8>;
 
-			brcm,int-map-mask = <0x44>;
+			brcm,int-map-mask = <0x44>, <0x1f000000>;
 			brcm,int-fwd-mask = <0x70000>;
 
 			interrupt-controller;
 			#interrupt-cells = <1>;
 
 			interrupt-parent = <&periph_intc>;
-			interrupts = <18>;
+			interrupts = <18>, <19>;
+			interrupt-names = "upg_main", "upg_bsc";
 		};
 
 		sun_top_ctrl: syscon@404000 {
@@ -118,6 +119,78 @@
 			status = "disabled";
 		};
 
+		uart1: serial@406b40 {
+			compatible = "ns16550a";
+			reg = <0x406b40 0x20>;
+			reg-io-width = <0x4>;
+			reg-shift = <0x2>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <64>;
+			clocks = <&uart_clk>;
+			status = "disabled";
+		};
+
+		uart2: serial@406b80 {
+			compatible = "ns16550a";
+			reg = <0x406b80 0x20>;
+			reg-io-width = <0x4>;
+			reg-shift = <0x2>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <65>;
+			clocks = <&uart_clk>;
+			status = "disabled";
+		};
+
+		bsca: i2c@406200 {
+		      clock-frequency = <390000>;
+		      compatible = "brcm,brcmstb-i2c";
+		      interrupt-parent = <&upg_irq0_intc>;
+		      reg = <0x406200 0x58>;
+		      interrupts = <24>;
+		      interrupt-names = "upg_bsca";
+		      status = "disabled";
+		};
+
+		bscb: i2c@406280 {
+		      clock-frequency = <390000>;
+		      compatible = "brcm,brcmstb-i2c";
+		      interrupt-parent = <&upg_irq0_intc>;
+		      reg = <0x406280 0x58>;
+		      interrupts = <25>;
+		      interrupt-names = "upg_bscb";
+		      status = "disabled";
+		};
+
+		bscc: i2c@406300 {
+		      clock-frequency = <390000>;
+		      compatible = "brcm,brcmstb-i2c";
+		      interrupt-parent = <&upg_irq0_intc>;
+		      reg = <0x406300 0x58>;
+		      interrupts = <26>;
+		      interrupt-names = "upg_bscc";
+		      status = "disabled";
+		};
+
+		bscd: i2c@406380 {
+		      clock-frequency = <390000>;
+		      compatible = "brcm,brcmstb-i2c";
+		      interrupt-parent = <&upg_irq0_intc>;
+		      reg = <0x406380 0x58>;
+		      interrupts = <27>;
+		      interrupt-names = "upg_bscd";
+		      status = "disabled";
+		};
+
+		bsce: i2c@406800 {
+		      clock-frequency = <390000>;
+		      compatible = "brcm,brcmstb-i2c";
+		      interrupt-parent = <&upg_irq0_intc>;
+		      reg = <0x406800 0x58>;
+		      interrupts = <28>;
+		      interrupt-names = "upg_bsce";
+		      status = "disabled";
+		};
+
 		enet0: ethernet@468000 {
 			phy-mode = "internal";
 			phy-handle = <&phy1>;
diff --git a/arch/mips/boot/dts/brcm/bcm7425.dtsi b/arch/mips/boot/dts/brcm/bcm7425.dtsi
index 15b27aa..c1c15ed 100644
--- a/arch/mips/boot/dts/brcm/bcm7425.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7425.dtsi
@@ -87,14 +87,32 @@
 			compatible = "brcm,bcm7120-l2-intc";
 			reg = <0x406780 0x8>;
 
-			brcm,int-map-mask = <0x44>;
+			brcm,int-map-mask = <0x44>, <0x7000000>;
 			brcm,int-fwd-mask = <0x70000>;
 
 			interrupt-controller;
 			#interrupt-cells = <1>;
 
 			interrupt-parent = <&periph_intc>;
-			interrupts = <55>;
+			interrupts = <55>, <53>;
+			interrupt-names = "upg_main", "upg_bsc";
+		};
+
+		upg_aon_irq0_intc: upg_aon_irq0_intc@409480 {
+			compatible = "brcm,bcm7120-l2-intc";
+			reg = <0x409480 0x8>;
+
+			brcm,int-map-mask = <0x40>, <0x18000000>, <0x100000>;
+			brcm,int-fwd-mask = <0>;
+			brcm,irq-can-wake;
+
+			interrupt-controller;
+			#interrupt-cells = <1>;
+
+			interrupt-parent = <&periph_intc>;
+			interrupts = <56>, <54>, <59>;
+			interrupt-names = "upg_main_aon", "upg_bsc_aon",
+					  "upg_spi";
 		};
 
 		sun_top_ctrl: syscon@404000 {
@@ -119,6 +137,78 @@
 			status = "disabled";
 		};
 
+		uart1: serial@406b40 {
+			compatible = "ns16550a";
+			reg = <0x406b40 0x20>;
+			reg-io-width = <0x4>;
+			reg-shift = <0x2>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <62>;
+			clocks = <&uart_clk>;
+			status = "disabled";
+		};
+
+		uart2: serial@406b80 {
+			compatible = "ns16550a";
+			reg = <0x406b80 0x20>;
+			reg-io-width = <0x4>;
+			reg-shift = <0x2>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <63>;
+			clocks = <&uart_clk>;
+			status = "disabled";
+		};
+
+		bsca: i2c@409180 {
+		      clock-frequency = <390000>;
+		      compatible = "brcm,brcmstb-i2c";
+		      interrupt-parent = <&upg_aon_irq0_intc>;
+		      reg = <0x409180 0x58>;
+		      interrupts = <27>;
+		      interrupt-names = "upg_bsca";
+		      status = "disabled";
+		};
+
+		bscb: i2c@409400 {
+		      clock-frequency = <390000>;
+		      compatible = "brcm,brcmstb-i2c";
+		      interrupt-parent = <&upg_aon_irq0_intc>;
+		      reg = <0x409400 0x58>;
+		      interrupts = <28>;
+		      interrupt-names = "upg_bscb";
+		      status = "disabled";
+		};
+
+		bscc: i2c@406200 {
+		      clock-frequency = <390000>;
+		      compatible = "brcm,brcmstb-i2c";
+		      interrupt-parent = <&upg_irq0_intc>;
+		      reg = <0x406200 0x58>;
+		      interrupts = <24>;
+		      interrupt-names = "upg_bscc";
+		      status = "disabled";
+		};
+
+		bscd: i2c@406280 {
+		      clock-frequency = <390000>;
+		      compatible = "brcm,brcmstb-i2c";
+		      interrupt-parent = <&upg_irq0_intc>;
+		      reg = <0x406280 0x58>;
+		      interrupts = <25>;
+		      interrupt-names = "upg_bscd";
+		      status = "disabled";
+		};
+
+		bsce: i2c@406300 {
+		      clock-frequency = <390000>;
+		      compatible = "brcm,brcmstb-i2c";
+		      interrupt-parent = <&upg_irq0_intc>;
+		      reg = <0x406300 0x58>;
+		      interrupts = <26>;
+		      interrupt-names = "upg_bsce";
+		      status = "disabled";
+		};
+
 		enet0: ethernet@b80000 {
 			phy-mode = "internal";
 			phy-handle = <&phy1>;
@@ -227,11 +317,9 @@
 			reg-names = "ahci", "top-ctrl";
 			reg = <0x181000 0xa9c>, <0x180020 0x1c>;
 			interrupt-parent = <&periph_intc>;
-			interrupts = <40>;
+			interrupts = <41>;
 			#address-cells = <1>;
 			#size-cells = <0>;
-			brcm,broken-ncq;
-			brcm,broken-phy;
 			status = "disabled";
 
 			sata0: sata-port@0 {
@@ -245,7 +333,7 @@
 			};
 		};
 
-		sata_phy: sata-phy@1800000 {
+		sata_phy: sata-phy@180100 {
 			compatible = "brcm,bcm7425-sata-phy", "brcm,phy-sata3";
 			reg = <0x180100 0x0eff>;
 			reg-names = "phy";
diff --git a/arch/mips/boot/dts/brcm/bcm7435.dtsi b/arch/mips/boot/dts/brcm/bcm7435.dtsi
index adb33e3..a874d3a 100644
--- a/arch/mips/boot/dts/brcm/bcm7435.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7435.dtsi
@@ -7,7 +7,7 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 
-		mips-hpt-frequency = <163125000>;
+		mips-hpt-frequency = <175625000>;
 
 		cpu@0 {
 			compatible = "brcm,bmips5200";
@@ -63,13 +63,14 @@
 
 		periph_intc: periph_intc@41b500 {
 			compatible = "brcm,bcm7038-l1-intc";
-			reg = <0x41b500 0x40>, <0x41b600 0x40>;
+			reg = <0x41b500 0x40>, <0x41b600 0x40>,
+				<0x41b700 0x40>, <0x41b800 0x40>;
 
 			interrupt-controller;
 			#interrupt-cells = <1>;
 
 			interrupt-parent = <&cpu_intc>;
-			interrupts = <2>, <3>;
+			interrupts = <2>, <3>, <2>, <3>;
 		};
 
 		sun_l2_intc: sun_l2_intc@403000 {
@@ -82,7 +83,7 @@
 		};
 
 		gisb-arb@400000 {
-			compatible = "brcm,bcm7400-gisb-arb";
+			compatible = "brcm,bcm7435-gisb-arb";
 			reg = <0x400000 0xdc>;
 			native-endian;
 			interrupt-parent = <&sun_l2_intc>;
@@ -101,14 +102,32 @@
 			compatible = "brcm,bcm7120-l2-intc";
 			reg = <0x406780 0x8>;
 
-			brcm,int-map-mask = <0x44>;
+			brcm,int-map-mask = <0x44>, <0x7000000>;
 			brcm,int-fwd-mask = <0x70000>;
 
 			interrupt-controller;
 			#interrupt-cells = <1>;
 
 			interrupt-parent = <&periph_intc>;
-			interrupts = <60>;
+			interrupts = <60>, <58>;
+			interrupt-names = "upg_main", "upg_bsc";
+		};
+
+		upg_aon_irq0_intc: upg_aon_irq0_intc@409480 {
+			compatible = "brcm,bcm7120-l2-intc";
+			reg = <0x409480 0x8>;
+
+			brcm,int-map-mask = <0x40>, <0x18000000>, <0x100000>;
+			brcm,int-fwd-mask = <0>;
+			brcm,irq-can-wake;
+
+			interrupt-controller;
+			#interrupt-cells = <1>;
+
+			interrupt-parent = <&periph_intc>;
+			interrupts = <61>, <59>, <64>;
+			interrupt-names = "upg_main_aon", "upg_bsc_aon",
+					  "upg_spi";
 		};
 
 		sun_top_ctrl: syscon@404000 {
@@ -133,6 +152,78 @@
 			status = "disabled";
 		};
 
+		uart1: serial@406b40 {
+			compatible = "ns16550a";
+			reg = <0x406b40 0x20>;
+			reg-io-width = <0x4>;
+			reg-shift = <0x2>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <67>;
+			clocks = <&uart_clk>;
+			status = "disabled";
+		};
+
+		uart2: serial@406b80 {
+			compatible = "ns16550a";
+			reg = <0x406b80 0x20>;
+			reg-io-width = <0x4>;
+			reg-shift = <0x2>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <68>;
+			clocks = <&uart_clk>;
+			status = "disabled";
+		};
+
+		bsca: i2c@406300 {
+		      clock-frequency = <390000>;
+		      compatible = "brcm,brcmstb-i2c";
+		      interrupt-parent = <&upg_irq0_intc>;
+		      reg = <0x406300 0x58>;
+		      interrupts = <26>;
+		      interrupt-names = "upg_bsca";
+		      status = "disabled";
+		};
+
+		bscb: i2c@409400 {
+		      clock-frequency = <390000>;
+		      compatible = "brcm,brcmstb-i2c";
+		      interrupt-parent = <&upg_aon_irq0_intc>;
+		      reg = <0x409400 0x58>;
+		      interrupts = <28>;
+		      interrupt-names = "upg_bscb";
+		      status = "disabled";
+		};
+
+		bscc: i2c@406200 {
+		      clock-frequency = <390000>;
+		      compatible = "brcm,brcmstb-i2c";
+		      interrupt-parent = <&upg_irq0_intc>;
+		      reg = <0x406200 0x58>;
+		      interrupts = <24>;
+		      interrupt-names = "upg_bscc";
+		      status = "disabled";
+		};
+
+		bscd: i2c@406280 {
+		      clock-frequency = <390000>;
+		      compatible = "brcm,brcmstb-i2c";
+		      interrupt-parent = <&upg_irq0_intc>;
+		      reg = <0x406280 0x58>;
+		      interrupts = <25>;
+		      interrupt-names = "upg_bscd";
+		      status = "disabled";
+		};
+
+		bsce: i2c@409180 {
+		      clock-frequency = <390000>;
+		      compatible = "brcm,brcmstb-i2c";
+		      interrupt-parent = <&upg_aon_irq0_intc>;
+		      reg = <0x409180 0x58>;
+		      interrupts = <27>;
+		      interrupt-names = "upg_bsce";
+		      status = "disabled";
+		};
+
 		enet0: ethernet@b80000 {
 			phy-mode = "internal";
 			phy-handle = <&phy1>;
@@ -235,5 +326,45 @@
 			interrupts = <78>;
 			status = "disabled";
 		};
+
+		sata: sata@181000 {
+			compatible = "brcm,bcm7425-ahci", "brcm,sata3-ahci";
+			reg-names = "ahci", "top-ctrl";
+			reg = <0x181000 0xa9c>, <0x180020 0x1c>;
+			interrupt-parent = <&periph_intc>;
+			interrupts = <45>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+
+			sata0: sata-port@0 {
+				reg = <0>;
+				phys = <&sata_phy0>;
+			};
+
+			sata1: sata-port@1 {
+				reg = <1>;
+				phys = <&sata_phy1>;
+			};
+		};
+
+		sata_phy: sata-phy@180100 {
+			compatible = "brcm,bcm7425-sata-phy", "brcm,phy-sata3";
+			reg = <0x180100 0x0eff>;
+			reg-names = "phy";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+
+			sata_phy0: sata-phy@0 {
+				reg = <0>;
+				#phy-cells = <0>;
+			};
+
+			sata_phy1: sata-phy@1 {
+				reg = <1>;
+				#phy-cells = <0>;
+			};
+		};
 	};
 };
diff --git a/arch/mips/boot/dts/brcm/bcm96358nb4ser.dts b/arch/mips/boot/dts/brcm/bcm96358nb4ser.dts
new file mode 100644
index 0000000..f412117
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm96358nb4ser.dts
@@ -0,0 +1,46 @@
+/dts-v1/;
+
+/include/ "bcm6358.dtsi"
+
+/ {
+	compatible = "sfr,nb4-ser", "brcm,bcm6358";
+	model = "SFR Neufbox 4 (Sercomm)";
+
+	memory@0 {
+		device_type = "memory";
+		reg = <0x00000000 0x02000000>;
+	};
+
+	chosen {
+		stdout-path = &uart0;
+	};
+};
+
+&leds0 {
+	status = "ok";
+
+	led@0 {
+		reg = <0>;
+		active-low;
+		label = "nb4-ser:white:alarm";
+	};
+	led@2 {
+		reg = <2>;
+		active-low;
+		label = "nb4-ser:white:tv";
+	};
+	led@3 {
+		reg = <3>;
+		active-low;
+		label = "nb4-ser:white:tel";
+	};
+	led@4 {
+		reg = <4>;
+		active-low;
+		label = "nb4-ser:white:adsl";
+	};
+};
+
+&uart0 {
+	status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm96368mvwg.dts b/arch/mips/boot/dts/brcm/bcm96368mvwg.dts
index 0e890c2..8c71c68 100644
--- a/arch/mips/boot/dts/brcm/bcm96368mvwg.dts
+++ b/arch/mips/boot/dts/brcm/bcm96368mvwg.dts
@@ -22,10 +22,10 @@
 };
 
 /* FIXME: need to set up USB_CTRL registers first */
-&ehci0 {
+&ehci {
 	status = "disabled";
 };
 
-&ohci0 {
+&ohci {
 	status = "disabled";
 };
diff --git a/arch/mips/boot/dts/brcm/bcm97125cbmb.dts b/arch/mips/boot/dts/brcm/bcm97125cbmb.dts
index e046b11..f2449d1 100644
--- a/arch/mips/boot/dts/brcm/bcm97125cbmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97125cbmb.dts
@@ -21,6 +21,30 @@
 	status = "okay";
 };
 
+&uart1 {
+	status = "okay";
+};
+
+&uart2 {
+	status = "okay";
+};
+
+&bsca {
+	status = "okay";
+};
+
+&bscb {
+	status = "okay";
+};
+
+&bscc {
+	status = "okay";
+};
+
+&bscd {
+	status = "okay";
+};
+
 /* FIXME: USB is wonky; disable it for now */
 &ehci0 {
 	status = "disabled";
diff --git a/arch/mips/boot/dts/brcm/bcm97360svmb.dts b/arch/mips/boot/dts/brcm/bcm97360svmb.dts
index d48462e..73124be 100644
--- a/arch/mips/boot/dts/brcm/bcm97360svmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97360svmb.dts
@@ -56,3 +56,11 @@
 &ohci0 {
 	status = "okay";
 };
+
+&sata {
+	status = "okay";
+};
+
+&sata_phy {
+	status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97420c.dts b/arch/mips/boot/dts/brcm/bcm97420c.dts
index 67fe1f3..600d57a 100644
--- a/arch/mips/boot/dts/brcm/bcm97420c.dts
+++ b/arch/mips/boot/dts/brcm/bcm97420c.dts
@@ -23,6 +23,34 @@
 	status = "okay";
 };
 
+&uart1 {
+	status = "okay";
+};
+
+&uart2 {
+	status = "okay";
+};
+
+&bsca {
+	status = "okay";
+};
+
+&bscb {
+	status = "okay";
+};
+
+&bscc {
+	status = "okay";
+};
+
+&bscd {
+	status = "okay";
+};
+
+&bsce {
+	status = "okay";
+};
+
 /* FIXME: MAC driver comes up but cannot attach to PHY */
 &enet0 {
 	status = "disabled";
diff --git a/arch/mips/boot/dts/brcm/bcm97425svmb.dts b/arch/mips/boot/dts/brcm/bcm97425svmb.dts
index 689c68a..119c714 100644
--- a/arch/mips/boot/dts/brcm/bcm97425svmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97425svmb.dts
@@ -23,6 +23,34 @@
 	status = "okay";
 };
 
+&uart1 {
+	status = "okay";
+};
+
+&uart2 {
+	status = "okay";
+};
+
+&bsca {
+	status = "okay";
+};
+
+&bscb {
+	status = "okay";
+};
+
+&bscc {
+	status = "okay";
+};
+
+&bscd {
+	status = "okay";
+};
+
+&bsce {
+	status = "okay";
+};
+
 &enet0 {
 	status = "okay";
 };
diff --git a/arch/mips/boot/dts/brcm/bcm97435svmb.dts b/arch/mips/boot/dts/brcm/bcm97435svmb.dts
index 1df0881..43e3ba2 100644
--- a/arch/mips/boot/dts/brcm/bcm97435svmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97435svmb.dts
@@ -14,7 +14,7 @@
 	};
 
 	chosen {
-		bootargs = "console=ttyS0,115200 maxcpus=1";
+		bootargs = "console=ttyS0,115200";
 		stdout-path = &uart0;
 	};
 };
@@ -23,6 +23,34 @@
 	status = "okay";
 };
 
+&uart1 {
+	status = "okay";
+};
+
+&uart2 {
+	status = "okay";
+};
+
+&bsca {
+	status = "okay";
+};
+
+&bscb {
+	status = "okay";
+};
+
+&bscc {
+	status = "okay";
+};
+
+&bscd {
+	status = "okay";
+};
+
+&bsce {
+	status = "okay";
+};
+
 &enet0 {
 	status = "okay";
 };
@@ -58,3 +86,11 @@
 &ohci3 {
 	status = "okay";
 };
+
+&sata {
+	status = "okay";
+};
+
+&sata_phy {
+	status = "okay";
+};
diff --git a/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts
new file mode 100644
index 0000000..d6bc994
--- /dev/null
+++ b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts
@@ -0,0 +1,78 @@
+/*
+ * Device tree source for D-Link DSR-1000N.
+ *
+ * Written by: Aaro Koskinen <aaro.koskinen@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/include/ "octeon_3xxx.dtsi"
+
+/ {
+	model = "dlink,dsr-1000n";
+
+	soc@0 {
+		smi0: mdio@1180000001800 {
+			phy8: ethernet-phy@8 {
+				reg = <8>;
+				compatible = "ethernet-phy-ieee802.3-c22";
+			};
+		};
+
+		pip: pip@11800a0000000 {
+			interface@0 {
+				ethernet@0 {
+					fixed-link {
+						speed = <1000>;
+						full-duplex;
+					};
+				};
+				ethernet@1 {
+					fixed-link {
+						speed = <1000>;
+						full-duplex;
+					};
+				};
+				ethernet@2 {
+					phy-handle = <&phy8>;
+				};
+			};
+		};
+
+		twsi0: i2c@1180000001000 {
+			rtc@68 {
+				compatible = "dallas,ds1337";
+				reg = <0x68>;
+			};
+		};
+
+		uart0: serial@1180000000800 {
+			clock-frequency = <500000000>;
+		};
+
+		usbn: usbn@1180068000000 {
+			refclk-frequency = <12000000>;
+			refclk-type = "crystal";
+		};
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		usb1 {
+			label = "usb1";
+			gpios = <&gpio 9 1>; /* Active low */
+		};
+
+		usb2 {
+			label = "usb2";
+			gpios = <&gpio 10 1>; /* Active low */
+		};
+	};
+
+	aliases {
+		pip = &pip;
+	};
+};
diff --git a/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts b/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts
index 9c48e05..de61f02 100644
--- a/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts
+++ b/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts
@@ -1,4 +1,3 @@
-/dts-v1/;
 /*
  * OCTEON 3XXX, 5XXX, 63XX device tree skeleton.
  *
@@ -6,56 +5,12 @@
  * use.	 Because of this, it contains a super-set of the available
  * devices and properties.
  */
+
+/include/ "octeon_3xxx.dtsi"
+
 / {
-	compatible = "cavium,octeon-3860";
-	#address-cells = <2>;
-	#size-cells = <2>;
-	interrupt-parent = <&ciu>;
-
 	soc@0 {
-		compatible = "simple-bus";
-		#address-cells = <2>;
-		#size-cells = <2>;
-		ranges; /* Direct mapping */
-
-		ciu: interrupt-controller@1070000000000 {
-			compatible = "cavium,octeon-3860-ciu";
-			interrupt-controller;
-			/* Interrupts are specified by two parts:
-			 * 1) Controller register (0 or 1)
-			 * 2) Bit within the register (0..63)
-			 */
-			#interrupt-cells = <2>;
-			reg = <0x10700 0x00000000 0x0 0x7000>;
-		};
-
-		gpio: gpio-controller@1070000000800 {
-			#gpio-cells = <2>;
-			compatible = "cavium,octeon-3860-gpio";
-			reg = <0x10700 0x00000800 0x0 0x100>;
-			gpio-controller;
-			/* Interrupts are specified by two parts:
-			 * 1) GPIO pin number (0..15)
-			 * 2) Triggering (1 - edge rising
-			 *		  2 - edge falling
-			 *		  4 - level active high
-			 *		  8 - level active low)
-			 */
-			interrupt-controller;
-			#interrupt-cells = <2>;
-			/* The GPIO pin connect to 16 consecutive CUI bits */
-			interrupts = <0 16>, <0 17>, <0 18>, <0 19>,
-				     <0 20>, <0 21>, <0 22>, <0 23>,
-				     <0 24>, <0 25>, <0 26>, <0 27>,
-				     <0 28>, <0 29>, <0 30>, <0 31>;
-		};
-
 		smi0: mdio@1180000001800 {
-			compatible = "cavium,octeon-3860-mdio";
-			#address-cells = <1>;
-			#size-cells = <0>;
-			reg = <0x11800 0x00001800 0x0 0x40>;
-
 			phy0: ethernet-phy@0 {
 				compatible = "marvell,88e1118";
 				marvell,reg-init =
@@ -220,35 +175,16 @@
 		};
 
 		pip: pip@11800a0000000 {
-			compatible = "cavium,octeon-3860-pip";
-			#address-cells = <1>;
-			#size-cells = <0>;
-			reg = <0x11800 0xa0000000 0x0 0x2000>;
-
 			interface@0 {
-				compatible = "cavium,octeon-3860-pip-interface";
-				#address-cells = <1>;
-				#size-cells = <0>;
-				reg = <0>; /* interface */
-
 				ethernet@0 {
-					compatible = "cavium,octeon-3860-pip-port";
-					reg = <0x0>; /* Port */
-					local-mac-address = [ 00 00 00 00 00 00 ];
 					phy-handle = <&phy2>;
 					cavium,alt-phy-handle = <&phy100>;
 				};
 				ethernet@1 {
-					compatible = "cavium,octeon-3860-pip-port";
-					reg = <0x1>; /* Port */
-					local-mac-address = [ 00 00 00 00 00 00 ];
 					phy-handle = <&phy3>;
 					cavium,alt-phy-handle = <&phy101>;
 				};
 				ethernet@2 {
-					compatible = "cavium,octeon-3860-pip-port";
-					reg = <0x2>; /* Port */
-					local-mac-address = [ 00 00 00 00 00 00 ];
 					phy-handle = <&phy4>;
 					cavium,alt-phy-handle = <&phy102>;
 				};
@@ -322,11 +258,6 @@
 			};
 
 			interface@1 {
-				compatible = "cavium,octeon-3860-pip-interface";
-				#address-cells = <1>;
-				#size-cells = <0>;
-				reg = <1>; /* interface */
-
 				ethernet@0 {
 					compatible = "cavium,octeon-3860-pip-port";
 					reg = <0x0>; /* Port */
@@ -355,13 +286,6 @@
 		};
 
 		twsi0: i2c@1180000001000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "cavium,octeon-3860-twsi";
-			reg = <0x11800 0x00001000 0x0 0x200>;
-			interrupts = <0 45>;
-			clock-frequency = <100000>;
-
 			rtc@68 {
 				compatible = "dallas,ds1337";
 				reg = <0x68>;
@@ -381,15 +305,6 @@
 			clock-frequency = <100000>;
 		};
 
-		uart0: serial@1180000000800 {
-			compatible = "cavium,octeon-3860-uart","ns16550";
-			reg = <0x11800 0x00000800 0x0 0x400>;
-			clock-frequency = <0>;
-			current-speed = <115200>;
-			reg-shift = <3>;
-			interrupts = <0 34>;
-		};
-
 		uart1: serial@1180000000c00 {
 			compatible = "cavium,octeon-3860-uart","ns16550";
 			reg = <0x11800 0x00000c00 0x0 0x400>;
@@ -409,98 +324,6 @@
 		};
 
 		bootbus: bootbus@1180000000000 {
-			compatible = "cavium,octeon-3860-bootbus";
-			reg = <0x11800 0x00000000 0x0 0x200>;
-			/* The chip select number and offset */
-			#address-cells = <2>;
-			/* The size of the chip select region */
-			#size-cells = <1>;
-			ranges = <0 0  0x0 0x1f400000  0xc00000>,
-				 <1 0  0x10000 0x30000000  0>,
-				 <2 0  0x10000 0x40000000  0>,
-				 <3 0  0x10000 0x50000000  0>,
-				 <4 0  0x0 0x1d020000  0x10000>,
-				 <5 0  0x0 0x1d040000  0x10000>,
-				 <6 0  0x0 0x1d050000  0x10000>,
-				 <7 0  0x10000 0x90000000  0>;
-
-			cavium,cs-config@0 {
-				compatible = "cavium,octeon-3860-bootbus-config";
-				cavium,cs-index = <0>;
-				cavium,t-adr  = <20>;
-				cavium,t-ce   = <60>;
-				cavium,t-oe   = <60>;
-				cavium,t-we   = <45>;
-				cavium,t-rd-hld = <35>;
-				cavium,t-wr-hld = <45>;
-				cavium,t-pause	= <0>;
-				cavium,t-wait	= <0>;
-				cavium,t-page	= <35>;
-				cavium,t-rd-dly = <0>;
-
-				cavium,pages	 = <0>;
-				cavium,bus-width = <8>;
-			};
-			cavium,cs-config@4 {
-				compatible = "cavium,octeon-3860-bootbus-config";
-				cavium,cs-index = <4>;
-				cavium,t-adr  = <320>;
-				cavium,t-ce   = <320>;
-				cavium,t-oe   = <320>;
-				cavium,t-we   = <320>;
-				cavium,t-rd-hld = <320>;
-				cavium,t-wr-hld = <320>;
-				cavium,t-pause	= <320>;
-				cavium,t-wait	= <320>;
-				cavium,t-page	= <320>;
-				cavium,t-rd-dly = <0>;
-
-				cavium,pages	 = <0>;
-				cavium,bus-width = <8>;
-			};
-			cavium,cs-config@5 {
-				compatible = "cavium,octeon-3860-bootbus-config";
-				cavium,cs-index = <5>;
-				cavium,t-adr  = <5>;
-				cavium,t-ce   = <300>;
-				cavium,t-oe   = <125>;
-				cavium,t-we   = <150>;
-				cavium,t-rd-hld = <100>;
-				cavium,t-wr-hld = <30>;
-				cavium,t-pause	= <0>;
-				cavium,t-wait	= <30>;
-				cavium,t-page	= <320>;
-				cavium,t-rd-dly = <0>;
-
-				cavium,pages	 = <0>;
-				cavium,bus-width = <16>;
-			};
-			cavium,cs-config@6 {
-				compatible = "cavium,octeon-3860-bootbus-config";
-				cavium,cs-index = <6>;
-				cavium,t-adr  = <5>;
-				cavium,t-ce   = <300>;
-				cavium,t-oe   = <270>;
-				cavium,t-we   = <150>;
-				cavium,t-rd-hld = <100>;
-				cavium,t-wr-hld = <70>;
-				cavium,t-pause	= <0>;
-				cavium,t-wait	= <0>;
-				cavium,t-page	= <320>;
-				cavium,t-rd-dly = <0>;
-
-				cavium,pages	 = <0>;
-				cavium,wait-mode;
-				cavium,bus-width = <16>;
-			};
-
-			flash0: nor@0,0 {
-				compatible = "cfi-flash";
-				reg = <0 0 0x800000>;
-				#address-cells = <1>;
-				#size-cells = <1>;
-			};
-
 			led0: led-display@4,0 {
 				compatible = "avago,hdsp-253x";
 				reg = <4 0x20 0x20>, <4 0 0x20>;
@@ -515,17 +338,6 @@
 			};
 		};
 
-		dma0: dma-engine@1180000000100 {
-			compatible = "cavium,octeon-5750-bootbus-dma";
-			reg = <0x11800 0x00000100 0x0 0x8>;
-			interrupts = <0 63>;
-		};
-		dma1: dma-engine@1180000000108 {
-			compatible = "cavium,octeon-5750-bootbus-dma";
-			reg = <0x11800 0x00000108 0x0 0x8>;
-			interrupts = <0 63>;
-		};
-
 		uctl: uctl@118006f000000 {
 			compatible = "cavium,octeon-6335-uctl";
 			reg = <0x11800 0x6f000000 0x0 0x100>;
@@ -552,21 +364,10 @@
 		};
 
 		usbn: usbn@1180068000000 {
-			compatible = "cavium,octeon-5750-usbn";
-			reg = <0x11800 0x68000000 0x0 0x1000>;
-			ranges; /* Direct mapping */
-			#address-cells = <2>;
-			#size-cells = <2>;
 			/* 12MHz, 24MHz and 48MHz allowed */
 			refclk-frequency = <12000000>;
 			/* Either "crystal" or "external" */
 			refclk-type = "crystal";
-
-			usbc@16f0010000000 {
-				compatible = "cavium,octeon-5750-usbc";
-				reg = <0x16f00 0x10000000 0x0 0x80000>;
-				interrupts = <0 56>;
-			};
 		};
 	};
 
diff --git a/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dtsi b/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dtsi
new file mode 100644
index 0000000..5302148
--- /dev/null
+++ b/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dtsi
@@ -0,0 +1,231 @@
+/* OCTEON 3XXX DTS common parts. */
+
+/dts-v1/;
+
+/ {
+	compatible = "cavium,octeon-3860";
+	#address-cells = <2>;
+	#size-cells = <2>;
+	interrupt-parent = <&ciu>;
+
+	soc@0 {
+		compatible = "simple-bus";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges; /* Direct mapping */
+
+		ciu: interrupt-controller@1070000000000 {
+			compatible = "cavium,octeon-3860-ciu";
+			interrupt-controller;
+			/* Interrupts are specified by two parts:
+			 * 1) Controller register (0 or 1)
+			 * 2) Bit within the register (0..63)
+			 */
+			#interrupt-cells = <2>;
+			reg = <0x10700 0x00000000 0x0 0x7000>;
+		};
+
+		gpio: gpio-controller@1070000000800 {
+			#gpio-cells = <2>;
+			compatible = "cavium,octeon-3860-gpio";
+			reg = <0x10700 0x00000800 0x0 0x100>;
+			gpio-controller;
+			/* Interrupts are specified by two parts:
+			 * 1) GPIO pin number (0..15)
+			 * 2) Triggering (1 - edge rising
+			 *		  2 - edge falling
+			 *		  4 - level active high
+			 *		  8 - level active low)
+			 */
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			/* The GPIO pin connect to 16 consecutive CUI bits */
+			interrupts = <0 16>, <0 17>, <0 18>, <0 19>,
+				     <0 20>, <0 21>, <0 22>, <0 23>,
+				     <0 24>, <0 25>, <0 26>, <0 27>,
+				     <0 28>, <0 29>, <0 30>, <0 31>;
+		};
+
+		smi0: mdio@1180000001800 {
+			compatible = "cavium,octeon-3860-mdio";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0x11800 0x00001800 0x0 0x40>;
+		};
+
+		pip: pip@11800a0000000 {
+			compatible = "cavium,octeon-3860-pip";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0x11800 0xa0000000 0x0 0x2000>;
+
+			interface@0 {
+				compatible = "cavium,octeon-3860-pip-interface";
+				#address-cells = <1>;
+				#size-cells = <0>;
+				reg = <0>; /* interface */
+
+				ethernet@0 {
+					compatible = "cavium,octeon-3860-pip-port";
+					reg = <0x0>; /* Port */
+					local-mac-address = [ 00 00 00 00 00 00 ];
+				};
+				ethernet@1 {
+					compatible = "cavium,octeon-3860-pip-port";
+					reg = <0x1>; /* Port */
+					local-mac-address = [ 00 00 00 00 00 00 ];
+				};
+				ethernet@2 {
+					compatible = "cavium,octeon-3860-pip-port";
+					reg = <0x2>; /* Port */
+					local-mac-address = [ 00 00 00 00 00 00 ];
+				};
+			};
+
+			interface@1 {
+				compatible = "cavium,octeon-3860-pip-interface";
+				#address-cells = <1>;
+				#size-cells = <0>;
+				reg = <1>; /* interface */
+			};
+		};
+
+		twsi0: i2c@1180000001000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "cavium,octeon-3860-twsi";
+			reg = <0x11800 0x00001000 0x0 0x200>;
+			interrupts = <0 45>;
+			clock-frequency = <100000>;
+		};
+
+		uart0: serial@1180000000800 {
+			compatible = "cavium,octeon-3860-uart","ns16550";
+			reg = <0x11800 0x00000800 0x0 0x400>;
+			clock-frequency = <0>;
+			current-speed = <115200>;
+			reg-shift = <3>;
+			interrupts = <0 34>;
+		};
+
+		bootbus: bootbus@1180000000000 {
+			compatible = "cavium,octeon-3860-bootbus";
+			reg = <0x11800 0x00000000 0x0 0x200>;
+			/* The chip select number and offset */
+			#address-cells = <2>;
+			/* The size of the chip select region */
+			#size-cells = <1>;
+			ranges = <0 0  0x0 0x1f400000  0xc00000>,
+				 <1 0  0x10000 0x30000000  0>,
+				 <2 0  0x10000 0x40000000  0>,
+				 <3 0  0x10000 0x50000000  0>,
+				 <4 0  0x0 0x1d020000  0x10000>,
+				 <5 0  0x0 0x1d040000  0x10000>,
+				 <6 0  0x0 0x1d050000  0x10000>,
+				 <7 0  0x10000 0x90000000  0>;
+
+			cavium,cs-config@0 {
+				compatible = "cavium,octeon-3860-bootbus-config";
+				cavium,cs-index = <0>;
+				cavium,t-adr  = <20>;
+				cavium,t-ce   = <60>;
+				cavium,t-oe   = <60>;
+				cavium,t-we   = <45>;
+				cavium,t-rd-hld = <35>;
+				cavium,t-wr-hld = <45>;
+				cavium,t-pause	= <0>;
+				cavium,t-wait	= <0>;
+				cavium,t-page	= <35>;
+				cavium,t-rd-dly = <0>;
+
+				cavium,pages	 = <0>;
+				cavium,bus-width = <8>;
+			};
+			cavium,cs-config@4 {
+				compatible = "cavium,octeon-3860-bootbus-config";
+				cavium,cs-index = <4>;
+				cavium,t-adr  = <320>;
+				cavium,t-ce   = <320>;
+				cavium,t-oe   = <320>;
+				cavium,t-we   = <320>;
+				cavium,t-rd-hld = <320>;
+				cavium,t-wr-hld = <320>;
+				cavium,t-pause	= <320>;
+				cavium,t-wait	= <320>;
+				cavium,t-page	= <320>;
+				cavium,t-rd-dly = <0>;
+
+				cavium,pages	 = <0>;
+				cavium,bus-width = <8>;
+			};
+			cavium,cs-config@5 {
+				compatible = "cavium,octeon-3860-bootbus-config";
+				cavium,cs-index = <5>;
+				cavium,t-adr  = <5>;
+				cavium,t-ce   = <300>;
+				cavium,t-oe   = <125>;
+				cavium,t-we   = <150>;
+				cavium,t-rd-hld = <100>;
+				cavium,t-wr-hld = <30>;
+				cavium,t-pause	= <0>;
+				cavium,t-wait	= <30>;
+				cavium,t-page	= <320>;
+				cavium,t-rd-dly = <0>;
+
+				cavium,pages	 = <0>;
+				cavium,bus-width = <16>;
+			};
+			cavium,cs-config@6 {
+				compatible = "cavium,octeon-3860-bootbus-config";
+				cavium,cs-index = <6>;
+				cavium,t-adr  = <5>;
+				cavium,t-ce   = <300>;
+				cavium,t-oe   = <270>;
+				cavium,t-we   = <150>;
+				cavium,t-rd-hld = <100>;
+				cavium,t-wr-hld = <70>;
+				cavium,t-pause	= <0>;
+				cavium,t-wait	= <0>;
+				cavium,t-page	= <320>;
+				cavium,t-rd-dly = <0>;
+
+				cavium,pages	 = <0>;
+				cavium,wait-mode;
+				cavium,bus-width = <16>;
+			};
+
+			flash0: nor@0,0 {
+				compatible = "cfi-flash";
+				reg = <0 0 0x800000>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+			};
+		};
+
+		dma0: dma-engine@1180000000100 {
+			compatible = "cavium,octeon-5750-bootbus-dma";
+			reg = <0x11800 0x00000100 0x0 0x8>;
+			interrupts = <0 63>;
+		};
+
+		dma1: dma-engine@1180000000108 {
+			compatible = "cavium,octeon-5750-bootbus-dma";
+			reg = <0x11800 0x00000108 0x0 0x8>;
+			interrupts = <0 63>;
+		};
+
+		usbn: usbn@1180068000000 {
+			compatible = "cavium,octeon-5750-usbn";
+			reg = <0x11800 0x68000000 0x0 0x1000>;
+			ranges; /* Direct mapping */
+			#address-cells = <2>;
+			#size-cells = <2>;
+
+			usbc@16f0010000000 {
+				compatible = "cavium,octeon-5750-usbc";
+				reg = <0x16f00 0x10000000 0x0 0x80000>;
+				interrupts = <0 56>;
+			};
+		};
+	};
+};
diff --git a/arch/mips/boot/dts/cavium-octeon/ubnt_e100.dts b/arch/mips/boot/dts/cavium-octeon/ubnt_e100.dts
new file mode 100644
index 0000000..243e5dc
--- /dev/null
+++ b/arch/mips/boot/dts/cavium-octeon/ubnt_e100.dts
@@ -0,0 +1,59 @@
+/*
+ * Device tree source for EdgeRouter Lite.
+ *
+ * Written by: Aaro Koskinen <aaro.koskinen@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/include/ "octeon_3xxx.dtsi"
+
+/ {
+	model = "ubnt,e100";
+
+	soc@0 {
+		smi0: mdio@1180000001800 {
+			phy5: ethernet-phy@5 {
+				reg = <5>;
+				compatible = "ethernet-phy-ieee802.3-c22";
+			};
+			phy6: ethernet-phy@6 {
+				reg = <6>;
+				compatible = "ethernet-phy-ieee802.3-c22";
+			};
+			phy7: ethernet-phy@7 {
+				reg = <7>;
+				compatible = "ethernet-phy-ieee802.3-c22";
+			};
+		};
+
+		pip: pip@11800a0000000 {
+			interface@0 {
+				ethernet@0 {
+					phy-handle = <&phy7>;
+				};
+				ethernet@1 {
+					phy-handle = <&phy6>;
+				};
+				ethernet@2 {
+					phy-handle = <&phy5>;
+				};
+			};
+		};
+
+		uart0: serial@1180000000800 {
+			clock-frequency = <500000000>;
+		};
+
+		usbn: usbn@1180068000000 {
+			refclk-frequency = <12000000>;
+			refclk-type = "crystal";
+		};
+	};
+
+	aliases {
+		pip = &pip;
+	};
+};
diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi
index 8b2437c..4a9c8f2 100644
--- a/arch/mips/boot/dts/ingenic/jz4740.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi
@@ -65,4 +65,18 @@
 		clocks = <&ext>, <&cgu JZ4740_CLK_UART1>;
 		clock-names = "baud", "module";
 	};
+
+	uhc: uhc@13030000 {
+		compatible = "ingenic,jz4740-ohci", "generic-ohci";
+		reg = <0x13030000 0x1000>;
+
+		clocks = <&cgu JZ4740_CLK_UHC>;
+		assigned-clocks = <&cgu JZ4740_CLK_UHC>;
+		assigned-clock-rates = <48000000>;
+
+		interrupt-parent = <&intc>;
+		interrupts = <3>;
+
+		status = "disabled";
+	};
 };
diff --git a/arch/mips/boot/dts/lantiq/easy50712.dts b/arch/mips/boot/dts/lantiq/easy50712.dts
index 143b8a3..b599625 100644
--- a/arch/mips/boot/dts/lantiq/easy50712.dts
+++ b/arch/mips/boot/dts/lantiq/easy50712.dts
@@ -52,7 +52,7 @@
 		};
 
 		gpio: pinmux@E100B10 {
-			compatible = "lantiq,pinctrl-xway";
+			compatible = "lantiq,danube-pinctrl";
 			pinctrl-names = "default";
 			pinctrl-0 = <&state_default>;
 
diff --git a/arch/mips/boot/dts/pic32/pic32mzda-clk.dtsi b/arch/mips/boot/dts/pic32/pic32mzda-clk.dtsi
deleted file mode 100644
index ef13350..0000000
--- a/arch/mips/boot/dts/pic32/pic32mzda-clk.dtsi
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * Device Tree Source for PIC32MZDA clock data
- *
- * Purna Chandra Mandal <purna.mandal@microchip.com>
- * Copyright (C) 2015 Microchip Technology Inc.  All rights reserved.
- *
- * Licensed under GPLv2 or later.
- */
-
-/* all fixed rate clocks */
-
-/ {
-	POSC:posc_clk { /* On-chip primary oscillator */
-		#clock-cells = <0>;
-		compatible = "fixed-clock";
-		clock-frequency = <24000000>;
-	};
-
-	FRC:frc_clk { /* internal FRC oscillator */
-		#clock-cells = <0>;
-		compatible = "fixed-clock";
-		clock-frequency = <8000000>;
-	};
-
-	BFRC:bfrc_clk { /* internal backup FRC oscillator */
-		#clock-cells = <0>;
-		compatible = "fixed-clock";
-		clock-frequency = <8000000>;
-	};
-
-	LPRC:lprc_clk { /* internal low-power FRC oscillator */
-		#clock-cells = <0>;
-		compatible = "fixed-clock";
-		clock-frequency = <32000>;
-	};
-
-	/* UPLL provides clock to USBCORE */
-	UPLL:usb_phy_clk {
-		#clock-cells = <0>;
-		compatible = "fixed-clock";
-		clock-frequency = <24000000>;
-		clock-output-names = "usbphy_clk";
-	};
-
-	TxCKI:txcki_clk { /* external clock input on TxCLKI pin */
-		#clock-cells = <0>;
-		compatible = "fixed-clock";
-		clock-frequency = <4000000>;
-		status = "disabled";
-	};
-
-	/* external clock input on REFCLKIx pin */
-	REFIx:refix_clk {
-		#clock-cells = <0>;
-		compatible = "fixed-clock";
-		clock-frequency = <24000000>;
-		status = "disabled";
-	};
-
-	/* PIC32 specific clks */
-	pic32_clktree {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		reg = <0x1f801200 0x200>;
-		compatible = "microchip,pic32mzda-clk";
-		ranges = <0 0x1f801200 0x200>;
-
-		/* secondary oscillator; external input on SOSCI pin */
-		SOSC:sosc_clk@0 {
-			#clock-cells = <0>;
-			compatible = "microchip,pic32mzda-sosc";
-			clock-frequency = <32768>;
-			reg = <0x000 0x10>,   /* enable reg */
-			      <0x1d0 0x10>; /* status reg */
-			microchip,bit-mask = <0x02>; /* enable mask */
-			microchip,status-bit-mask = <0x10>; /* status-mask*/
-		};
-
-		FRCDIV:frcdiv_clk {
-			#clock-cells = <0>;
-			compatible = "microchip,pic32mzda-frcdivclk";
-			clocks = <&FRC>;
-			clock-output-names = "frcdiv_clk";
-		};
-
-		/* System PLL clock */
-		SYSPLL:spll_clk@020 {
-			#clock-cells = <0>;
-			compatible = "microchip,pic32mzda-syspll";
-			reg = <0x020 0x10>, /* SPLL register */
-			      <0x1d0 0x10>; /* CLKSTAT register */
-			clocks = <&POSC>, <&FRC>;
-			clock-output-names = "sys_pll";
-			microchip,status-bit-mask = <0x80>; /* SPLLRDY */
-		};
-
-		/* system clock; mux with postdiv & slew */
-		SYSCLK:sys_clk@1c0 {
-			#clock-cells = <0>;
-			compatible = "microchip,pic32mzda-sysclk-v2";
-			reg = <0x1c0 0x04>; /* SLEWCON */
-			clocks = <&FRCDIV>, <&SYSPLL>, <&POSC>, <&SOSC>,
-				 <&LPRC>, <&FRCDIV>;
-			microchip,clock-indices = <0>, <1>, <2>, <4>,
-						  <5>, <7>;
-			clock-output-names = "sys_clk";
-		};
-
-		/* Peripheral bus1 clock */
-		PBCLK1:pb1_clk@140 {
-			reg = <0x140 0x10>;
-			#clock-cells = <0>;
-			compatible = "microchip,pic32mzda-pbclk";
-			clocks = <&SYSCLK>;
-			clock-output-names = "pb1_clk";
-			/* used by system modules, not gateable */
-			microchip,ignore-unused;
-		};
-
-		/* Peripheral bus2 clock */
-		PBCLK2:pb2_clk@150 {
-			reg = <0x150 0x10>;
-			#clock-cells = <0>;
-			compatible = "microchip,pic32mzda-pbclk";
-			clocks = <&SYSCLK>;
-			clock-output-names = "pb2_clk";
-			/* avoid gating even if unused */
-			microchip,ignore-unused;
-		};
-
-		/* Peripheral bus3 clock */
-		PBCLK3:pb3_clk@160 {
-			reg = <0x160 0x10>;
-			#clock-cells = <0>;
-			compatible = "microchip,pic32mzda-pbclk";
-			clocks = <&SYSCLK>;
-			clock-output-names = "pb3_clk";
-		};
-
-		/* Peripheral bus4 clock(I/O ports, GPIO) */
-		PBCLK4:pb4_clk@170 {
-			reg = <0x170 0x10>;
-			#clock-cells = <0>;
-			compatible = "microchip,pic32mzda-pbclk";
-			clocks = <&SYSCLK>;
-			clock-output-names = "pb4_clk";
-		};
-
-		/* Peripheral bus clock */
-		PBCLK5:pb5_clk@180 {
-			reg = <0x180 0x10>;
-			#clock-cells = <0>;
-			compatible = "microchip,pic32mzda-pbclk";
-			clocks = <&SYSCLK>;
-			clock-output-names = "pb5_clk";
-		};
-
-		/* Peripheral Bus6 clock; */
-		PBCLK6:pb6_clk@190 {
-			reg = <0x190 0x10>;
-			compatible = "microchip,pic32mzda-pbclk";
-			clocks = <&SYSCLK>;
-			#clock-cells = <0>;
-		};
-
-		/* Peripheral bus7 clock */
-		PBCLK7:pb7_clk@1a0 {
-			reg = <0x1a0 0x10>;
-			#clock-cells = <0>;
-			compatible = "microchip,pic32mzda-pbclk";
-			/* CPU is driven by this clock; so named */
-			clock-output-names = "cpu_clk";
-			clocks = <&SYSCLK>;
-		};
-
-		/* Reference Oscillator clock for SPI/I2S */
-		REFCLKO1:refo1_clk@80 {
-			reg = <0x080 0x20>;
-			#clock-cells = <0>;
-			compatible = "microchip,pic32mzda-refoclk";
-			clocks = <&SYSCLK>, <&PBCLK1>, <&POSC>, <&FRC>, <&LPRC>,
-				 <&SOSC>, <&SYSPLL>, <&REFIx>, <&BFRC>;
-			microchip,clock-indices = <0>, <1>, <2>, <3>, <4>,
-						  <5>, <7>, <8>, <9>;
-			clock-output-names = "refo1_clk";
-		};
-
-		/* Reference Oscillator clock for SQI */
-		REFCLKO2:refo2_clk@a0 {
-			reg = <0x0a0 0x20>;
-			#clock-cells = <0>;
-			compatible = "microchip,pic32mzda-refoclk";
-			clocks = <&SYSCLK>, <&PBCLK1>, <&POSC>, <&FRC>, <&LPRC>,
-				 <&SOSC>, <&SYSPLL>, <&REFIx>, <&BFRC>;
-			microchip,clock-indices = <0>, <1>, <2>, <3>, <4>,
-						  <5>, <7>, <8>, <9>;
-			clock-output-names = "refo2_clk";
-		};
-
-		/* Reference Oscillator clock, ADC */
-		REFCLKO3:refo3_clk@c0 {
-			reg = <0x0c0 0x20>;
-			compatible = "microchip,pic32mzda-refoclk";
-			clocks = <&SYSCLK>, <&PBCLK1>, <&POSC>, <&FRC>, <&LPRC>,
-				 <&SOSC>, <&SYSPLL>, <&REFIx>, <&BFRC>;
-			microchip,clock-indices = <0>, <1>, <2>, <3>, <4>,
-						  <5>, <7>, <8>, <9>;
-			#clock-cells = <0>;
-			clock-output-names = "refo3_clk";
-		};
-
-		/* Reference Oscillator clock */
-		REFCLKO4:refo4_clk@e0 {
-			reg = <0x0e0 0x20>;
-			compatible = "microchip,pic32mzda-refoclk";
-			clocks = <&SYSCLK>, <&PBCLK1>, <&POSC>, <&FRC>, <&LPRC>,
-				 <&SOSC>, <&SYSPLL>, <&REFIx>, <&BFRC>;
-			microchip,clock-indices = <0>, <1>, <2>, <3>, <4>,
-						  <5>, <7>, <8>, <9>;
-			#clock-cells = <0>;
-			clock-output-names = "refo4_clk";
-		};
-
-		/* Reference Oscillator clock, LCD */
-		REFCLKO5:refo5_clk@100 {
-			reg = <0x100 0x20>;
-			compatible = "microchip,pic32mzda-refoclk";
-			clocks = <&SYSCLK>,<&PBCLK1>,<&POSC>,<&FRC>,<&LPRC>,
-				 <&SOSC>,<&SYSPLL>,<&REFIx>,<&BFRC>;
-			microchip,clock-indices = <0>, <1>, <2>, <3>, <4>,
-						  <5>, <7>, <8>, <9>;
-			#clock-cells = <0>;
-			clock-output-names = "refo5_clk";
-		};
-	};
-};
diff --git a/arch/mips/boot/dts/pic32/pic32mzda.dtsi b/arch/mips/boot/dts/pic32/pic32mzda.dtsi
index ad9e3318..5353a63 100644
--- a/arch/mips/boot/dts/pic32/pic32mzda.dtsi
+++ b/arch/mips/boot/dts/pic32/pic32mzda.dtsi
@@ -6,11 +6,9 @@
  * published by the Free Software Foundation.
  *
  */
-
+#include <dt-bindings/clock/microchip,pic32-clock.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 
-#include "pic32mzda-clk.dtsi"
-
 / {
 	#address-cells = <1>;
 	#size-cells = <1>;
@@ -50,6 +48,29 @@
 		interrupts = <0 IRQ_TYPE_EDGE_RISING>;
 	};
 
+	/* external clock input on TxCLKI pin */
+	txcki: txcki_clk {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <4000000>;
+		status = "disabled";
+	};
+
+	/* external input on REFCLKIx pin */
+	refix: refix_clk {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <24000000>;
+		status = "disabled";
+	};
+
+	rootclk: clock-controller@1f801200 {
+		compatible = "microchip,pic32mzda-clk";
+		reg = <0x1f801200 0x200>;
+		#clock-cells = <1>;
+		microchip,pic32mzda-sosc;
+	};
+
 	evic: interrupt-controller@1f810000 {
 		compatible = "microchip,pic32mzda-evic";
 		interrupt-controller;
@@ -63,7 +84,7 @@
 		#size-cells = <1>;
 		compatible = "microchip,pic32mzda-pinctrl";
 		reg = <0x1f801400 0x400>;
-		clocks = <&PBCLK1>;
+		clocks = <&rootclk PB1CLK>;
 	};
 
 	/* PORTA */
@@ -75,7 +96,7 @@
 		gpio-controller;
 		interrupt-controller;
 		#interrupt-cells = <2>;
-		clocks = <&PBCLK4>;
+		clocks = <&rootclk PB4CLK>;
 		microchip,gpio-bank = <0>;
 		gpio-ranges = <&pic32_pinctrl 0 0 16>;
 	};
@@ -89,7 +110,7 @@
 		gpio-controller;
 		interrupt-controller;
 		#interrupt-cells = <2>;
-		clocks = <&PBCLK4>;
+		clocks = <&rootclk PB4CLK>;
 		microchip,gpio-bank = <1>;
 		gpio-ranges = <&pic32_pinctrl 0 16 16>;
 	};
@@ -103,7 +124,7 @@
 		gpio-controller;
 		interrupt-controller;
 		#interrupt-cells = <2>;
-		clocks = <&PBCLK4>;
+		clocks = <&rootclk PB4CLK>;
 		microchip,gpio-bank = <2>;
 		gpio-ranges = <&pic32_pinctrl 0 32 16>;
 	};
@@ -117,7 +138,7 @@
 		gpio-controller;
 		interrupt-controller;
 		#interrupt-cells = <2>;
-		clocks = <&PBCLK4>;
+		clocks = <&rootclk PB4CLK>;
 		microchip,gpio-bank = <3>;
 		gpio-ranges = <&pic32_pinctrl 0 48 16>;
 	};
@@ -131,7 +152,7 @@
 		gpio-controller;
 		interrupt-controller;
 		#interrupt-cells = <2>;
-		clocks = <&PBCLK4>;
+		clocks = <&rootclk PB4CLK>;
 		microchip,gpio-bank = <4>;
 		gpio-ranges = <&pic32_pinctrl 0 64 16>;
 	};
@@ -145,7 +166,7 @@
 		gpio-controller;
 		interrupt-controller;
 		#interrupt-cells = <2>;
-		clocks = <&PBCLK4>;
+		clocks = <&rootclk PB4CLK>;
 		microchip,gpio-bank = <5>;
 		gpio-ranges = <&pic32_pinctrl 0 80 16>;
 	};
@@ -159,7 +180,7 @@
 		gpio-controller;
 		interrupt-controller;
 		#interrupt-cells = <2>;
-		clocks = <&PBCLK4>;
+		clocks = <&rootclk PB4CLK>;
 		microchip,gpio-bank = <6>;
 		gpio-ranges = <&pic32_pinctrl 0 96 16>;
 	};
@@ -173,7 +194,7 @@
 		gpio-controller;
 		interrupt-controller;
 		#interrupt-cells = <2>;
-		clocks = <&PBCLK4>;
+		clocks = <&rootclk PB4CLK>;
 		microchip,gpio-bank = <7>;
 		gpio-ranges = <&pic32_pinctrl 0 112 16>;
 	};
@@ -189,7 +210,7 @@
 		gpio-controller;
 		interrupt-controller;
 		#interrupt-cells = <2>;
-		clocks = <&PBCLK4>;
+		clocks = <&rootclk PB4CLK>;
 		microchip,gpio-bank = <8>;
 		gpio-ranges = <&pic32_pinctrl 0 128 16>;
 	};
@@ -203,7 +224,7 @@
 		gpio-controller;
 		interrupt-controller;
 		#interrupt-cells = <2>;
-		clocks = <&PBCLK4>;
+		clocks = <&rootclk PB4CLK>;
 		microchip,gpio-bank = <9>;
 		gpio-ranges = <&pic32_pinctrl 0 144 16>;
 	};
@@ -212,7 +233,7 @@
 		compatible = "microchip,pic32mzda-sdhci";
 		reg = <0x1f8ec000 0x100>;
 		interrupts = <191 IRQ_TYPE_LEVEL_HIGH>;
-		clocks = <&REFCLKO4>, <&PBCLK5>;
+		clocks = <&rootclk REF4CLK>, <&rootclk PB5CLK>;
 		clock-names = "base_clk", "sys_clk";
 		bus-width = <4>;
 		cap-sd-highspeed;
@@ -225,7 +246,7 @@
 		interrupts = <112 IRQ_TYPE_LEVEL_HIGH>,
 			<113 IRQ_TYPE_LEVEL_HIGH>,
 			<114 IRQ_TYPE_LEVEL_HIGH>;
-		clocks = <&PBCLK2>;
+		clocks = <&rootclk PB2CLK>;
 		status = "disabled";
 	};
 
@@ -235,7 +256,7 @@
 		interrupts = <145 IRQ_TYPE_LEVEL_HIGH>,
 			<146 IRQ_TYPE_LEVEL_HIGH>,
 			<147 IRQ_TYPE_LEVEL_HIGH>;
-		clocks = <&PBCLK2>;
+		clocks = <&rootclk PB2CLK>;
 		status = "disabled";
 	};
 
@@ -245,7 +266,7 @@
 		interrupts = <157 IRQ_TYPE_LEVEL_HIGH>,
 			<158 IRQ_TYPE_LEVEL_HIGH>,
 			<159 IRQ_TYPE_LEVEL_HIGH>;
-		clocks = <&PBCLK2>;
+		clocks = <&rootclk PB2CLK>;
 		status = "disabled";
 	};
 
@@ -255,7 +276,7 @@
 		interrupts = <170 IRQ_TYPE_LEVEL_HIGH>,
 			<171 IRQ_TYPE_LEVEL_HIGH>,
 			<172 IRQ_TYPE_LEVEL_HIGH>;
-		clocks = <&PBCLK2>;
+		clocks = <&rootclk PB2CLK>;
 		status = "disabled";
 	};
 
@@ -265,7 +286,7 @@
 		interrupts = <179 IRQ_TYPE_LEVEL_HIGH>,
 			<180 IRQ_TYPE_LEVEL_HIGH>,
 			<181 IRQ_TYPE_LEVEL_HIGH>;
-		clocks = <&PBCLK2>;
+		clocks = <&rootclk PB2CLK>;
 		status = "disabled";
 	};
 
@@ -275,7 +296,7 @@
 		interrupts = <188 IRQ_TYPE_LEVEL_HIGH>,
 			<189 IRQ_TYPE_LEVEL_HIGH>,
 			<190 IRQ_TYPE_LEVEL_HIGH>;
-		clocks = <&PBCLK2>;
+		clocks = <&rootclk PB2CLK>;
 		status = "disabled";
 	};
 };
diff --git a/arch/mips/boot/dts/pic32/pic32mzda_sk.dts b/arch/mips/boot/dts/pic32/pic32mzda_sk.dts
index 5d434a5..fc74010 100644
--- a/arch/mips/boot/dts/pic32/pic32mzda_sk.dts
+++ b/arch/mips/boot/dts/pic32/pic32mzda_sk.dts
@@ -95,8 +95,9 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_sdhc1>;
 	status = "okay";
-	assigned-clocks = <&REFCLKO2>,<&REFCLKO4>,<&REFCLKO5>;
-	assigned-clock-rates = <50000000>,<25000000>,<40000000>;
+	assigned-clocks = <&rootclk REF2CLK>, <&rootclk REF4CLK>,
+		<&rootclk REF5CLK>;
+	assigned-clock-rates = <50000000>, <25000000>, <40000000>;
 };
 
 &pic32_pinctrl {
diff --git a/arch/mips/boot/dts/qca/Makefile b/arch/mips/boot/dts/qca/Makefile
index 2d61455d..63a9ddf 100644
--- a/arch/mips/boot/dts/qca/Makefile
+++ b/arch/mips/boot/dts/qca/Makefile
@@ -1,8 +1,9 @@
 # All DTBs
 dtb-$(CONFIG_ATH79)			+= ar9132_tl_wr1043nd_v1.dtb
-
-# Select a DTB to build in the kernel
-obj-$(CONFIG_DTB_TL_WR1043ND_V1)	+= ar9132_tl_wr1043nd_v1.dtb.o
+dtb-$(CONFIG_ATH79)			+= ar9331_dpt_module.dtb
+dtb-$(CONFIG_ATH79)			+= ar9331_dragino_ms14.dtb
+dtb-$(CONFIG_ATH79)			+= ar9331_omega.dtb
+dtb-$(CONFIG_ATH79)			+= ar9331_tl_mr3020.dtb
 
 # Force kbuild to make empty built-in.o if necessary
 obj-				+= dummy.o
diff --git a/arch/mips/boot/dts/qca/ar9132.dtsi b/arch/mips/boot/dts/qca/ar9132.dtsi
index 3ad4ba9..302f0a8 100644
--- a/arch/mips/boot/dts/qca/ar9132.dtsi
+++ b/arch/mips/boot/dts/qca/ar9132.dtsi
@@ -1,3 +1,5 @@
+#include <dt-bindings/clock/ath79-clk.h>
+
 / {
 	compatible = "qca,ar9132";
 
@@ -11,6 +13,7 @@
 		cpu@0 {
 			device_type = "cpu";
 			compatible = "mips,mips24Kc";
+			clocks = <&pll ATH79_CLK_CPU>;
 			reg = <0>;
 		};
 	};
@@ -52,12 +55,12 @@
 				#qca,ddr-wb-channel-cells = <1>;
 			};
 
-			uart@18020000 {
+			uart: uart@18020000 {
 				compatible = "ns8250";
 				reg = <0x18020000 0x20>;
 				interrupts = <3>;
 
-				clocks = <&pll 2>;
+				clocks = <&pll ATH79_CLK_AHB>;
 				clock-names = "uart";
 
 				reg-io-width = <4>;
@@ -83,7 +86,7 @@
 			};
 
 			pll: pll-controller@18050000 {
-				compatible = "qca,ar9132-ppl",
+				compatible = "qca,ar9132-pll",
 						"qca,ar9130-pll";
 				reg = <0x18050000 0x20>;
 
@@ -94,13 +97,13 @@
 				clock-output-names = "cpu", "ddr", "ahb";
 			};
 
-			wdt@18060008 {
+			wdt: wdt@18060008 {
 				compatible = "qca,ar7130-wdt";
 				reg = <0x18060008 0x8>;
 
 				interrupts = <4>;
 
-				clocks = <&pll 2>;
+				clocks = <&pll ATH79_CLK_AHB>;
 				clock-names = "wdt";
 			};
 
@@ -125,7 +128,7 @@
 			};
 		};
 
-		usb@1b000100 {
+		usb: usb@1b000100 {
 			compatible = "qca,ar7100-ehci", "generic-ehci";
 			reg = <0x1b000100 0x100>;
 
@@ -140,11 +143,11 @@
 			status = "disabled";
 		};
 
-		spi@1f000000 {
+		spi: spi@1f000000 {
 			compatible = "qca,ar9132-spi", "qca,ar7100-spi";
 			reg = <0x1f000000 0x10>;
 
-			clocks = <&pll 2>;
+			clocks = <&pll ATH79_CLK_AHB>;
 			clock-names = "ahb";
 
 			status = "disabled";
diff --git a/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts b/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
index e535ee3..3c3b7ce 100644
--- a/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
+++ b/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
@@ -9,70 +9,17 @@
 	compatible = "tplink,tl-wr1043nd-v1", "qca,ar9132";
 	model = "TP-Link TL-WR1043ND Version 1";
 
-	alias {
-		serial0 = "/ahb/apb/uart@18020000";
-	};
-
 	memory@0 {
 		device_type = "memory";
 		reg = <0x0 0x2000000>;
 	};
 
-	extosc: oscillator {
+	extosc: ref {
 		compatible = "fixed-clock";
 		#clock-cells = <0>;
 		clock-frequency = <40000000>;
 	};
 
-	ahb {
-		apb {
-			uart@18020000 {
-				status = "okay";
-			};
-
-			pll-controller@18050000 {
-				clocks = <&extosc>;
-			};
-		};
-
-		usb@1b000100 {
-			status = "okay";
-		};
-
-		spi@1f000000 {
-			status = "okay";
-			num-cs = <1>;
-
-			flash@0 {
-				#address-cells = <1>;
-				#size-cells = <1>;
-				compatible = "s25sl064a";
-				reg = <0>;
-				spi-max-frequency = <25000000>;
-
-				partition@0 {
-					label = "u-boot";
-					reg = <0x000000 0x020000>;
-				};
-
-				partition@1 {
-					label = "firmware";
-					reg = <0x020000 0x7D0000>;
-				};
-
-				partition@2 {
-					label = "art";
-					reg = <0x7F0000 0x010000>;
-					read-only;
-				};
-			};
-		};
-	};
-
-	usb-phy {
-		status = "okay";
-	};
-
 	gpio-keys {
 		compatible = "gpio-keys-polled";
 		#address-cells = <1>;
@@ -118,3 +65,48 @@
 		};
 	};
 };
+
+&uart {
+	status = "okay";
+};
+
+&pll {
+	clocks = <&extosc>;
+};
+
+&usb {
+	status = "okay";
+};
+
+&usb_phy {
+	status = "okay";
+};
+
+&spi {
+	status = "okay";
+	num-cs = <1>;
+
+	flash@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "s25sl064a";
+		reg = <0>;
+		spi-max-frequency = <25000000>;
+
+		partition@0 {
+			label = "u-boot";
+			reg = <0x000000 0x020000>;
+		};
+
+		partition@1 {
+			label = "firmware";
+			reg = <0x020000 0x7D0000>;
+		};
+
+		partition@2 {
+			label = "art";
+			reg = <0x7F0000 0x010000>;
+			read-only;
+		};
+	};
+};
diff --git a/arch/mips/boot/dts/qca/ar9331.dtsi b/arch/mips/boot/dts/qca/ar9331.dtsi
new file mode 100644
index 0000000..cf47ed4
--- /dev/null
+++ b/arch/mips/boot/dts/qca/ar9331.dtsi
@@ -0,0 +1,155 @@
+#include <dt-bindings/clock/ath79-clk.h>
+
+/ {
+	compatible = "qca,ar9331";
+
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu@0 {
+			device_type = "cpu";
+			compatible = "mips,mips24Kc";
+			clocks = <&pll ATH79_CLK_CPU>;
+			reg = <0>;
+		};
+	};
+
+	cpuintc: interrupt-controller {
+		compatible = "qca,ar7100-cpu-intc";
+
+		interrupt-controller;
+		#interrupt-cells = <1>;
+
+		qca,ddr-wb-channel-interrupts = <2>, <3>;
+		qca,ddr-wb-channels = <&ddr_ctrl 3>, <&ddr_ctrl 2>;
+	};
+
+	ref: ref {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+	};
+
+	ahb {
+		compatible = "simple-bus";
+		ranges;
+
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		interrupt-parent = <&cpuintc>;
+
+		apb {
+			compatible = "simple-bus";
+			ranges;
+
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			interrupt-parent = <&miscintc>;
+
+			ddr_ctrl: memory-controller@18000000 {
+				compatible = "qca,ar7240-ddr-controller";
+				reg = <0x18000000 0x100>;
+
+				#qca,ddr-wb-channel-cells = <1>;
+			};
+
+			uart: uart@18020000 {
+				compatible = "qca,ar9330-uart";
+				reg = <0x18020000 0x14>;
+
+				interrupts = <3>;
+
+				clocks = <&ref>;
+				clock-names = "uart";
+
+				status = "disabled";
+			};
+
+			gpio: gpio@18040000 {
+				compatible = "qca,ar7100-gpio";
+				reg = <0x18040000 0x34>;
+				interrupts = <2>;
+
+				ngpios = <30>;
+
+				gpio-controller;
+				#gpio-cells = <2>;
+
+				interrupt-controller;
+				#interrupt-cells = <2>;
+
+				status = "disabled";
+			};
+
+			pll: pll-controller@18050000 {
+				compatible = "qca,ar9330-pll";
+				reg = <0x18050000 0x100>;
+
+				clocks = <&ref>;
+				clock-names = "ref";
+
+				#clock-cells = <1>;
+			};
+
+			miscintc: interrupt-controller@18060010 {
+				compatible = "qca,ar7240-misc-intc";
+				reg = <0x18060010 0x4>;
+
+				interrupt-parent = <&cpuintc>;
+				interrupts = <6>;
+
+				interrupt-controller;
+				#interrupt-cells = <1>;
+			};
+
+			rst: reset-controller@1806001c {
+				compatible = "qca,ar7100-reset";
+				reg = <0x1806001c 0x4>;
+
+				#reset-cells = <1>;
+			};
+		};
+
+		usb: usb@1b000100 {
+			compatible = "chipidea,usb2";
+			reg = <0x1b000000 0x200>;
+
+			interrupts = <3>;
+			resets = <&rst 5>;
+
+			phy-names = "usb-phy";
+			phys = <&usb_phy>;
+
+			status = "disabled";
+		};
+
+		spi: spi@1f000000 {
+			compatible = "qca,ar7100-spi";
+			reg = <0x1f000000 0x10>;
+
+			clocks = <&pll ATH79_CLK_AHB>;
+			clock-names = "ahb";
+
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			status = "disabled";
+		};
+	};
+
+	usb_phy: usb-phy {
+		compatible = "qca,ar7100-usb-phy";
+
+		reset-names = "usb-phy", "usb-suspend-override";
+		resets = <&rst 4>, <&rst 3>;
+
+		#phy-cells = <0>;
+
+		status = "disabled";
+	};
+};
diff --git a/arch/mips/boot/dts/qca/ar9331_dpt_module.dts b/arch/mips/boot/dts/qca/ar9331_dpt_module.dts
new file mode 100644
index 0000000..98e7450
--- /dev/null
+++ b/arch/mips/boot/dts/qca/ar9331_dpt_module.dts
@@ -0,0 +1,78 @@
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+#include "ar9331.dtsi"
+
+/ {
+	model = "DPTechnics DPT-Module";
+	compatible = "dptechnics,dpt-module";
+
+	aliases {
+		serial0 = &uart;
+	};
+
+	memory@0 {
+		device_type = "memory";
+		reg = <0x0 0x4000000>;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		system {
+			label = "dpt-module:green:system";
+			gpios = <&gpio 27 GPIO_ACTIVE_LOW>;
+			default-state = "off";
+		};
+	};
+
+	gpio-keys-polled {
+		compatible = "gpio-keys-polled";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		poll-interval = <100>;
+
+		button@0 {
+			label = "reset";
+			linux,code = <KEY_RESTART>;
+			gpios = <&gpio 11 GPIO_ACTIVE_LOW>;
+		};
+	};
+};
+
+&ref {
+	clock-frequency = <25000000>;
+};
+
+&uart {
+	status = "okay";
+};
+
+&gpio {
+	status = "okay";
+};
+
+&usb {
+	dr_mode = "host";
+	status = "okay";
+};
+
+&usb_phy {
+	status = "okay";
+};
+
+&spi {
+	num-chipselects = <1>;
+	status = "okay";
+
+	/* Winbond 25Q128FVSG SPI flash */
+	spiflash: w25q128@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "winbond,w25q128", "jedec,spi-nor";
+		spi-max-frequency = <104000000>;
+		reg = <0>;
+	};
+};
diff --git a/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts b/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts
new file mode 100644
index 0000000..56f8320
--- /dev/null
+++ b/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts
@@ -0,0 +1,102 @@
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+#include "ar9331.dtsi"
+
+/ {
+	model = "Dragino MS14 (Dragino 2)";
+	compatible = "dragino,ms14";
+
+	aliases {
+		serial0 = &uart;
+	};
+
+	memory@0 {
+		device_type = "memory";
+		reg = <0x0 0x4000000>;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		wlan {
+			label = "dragino2:red:wlan";
+			gpios = <&gpio 0 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		lan {
+			label = "dragino2:red:lan";
+			gpios = <&gpio 13 GPIO_ACTIVE_LOW>;
+			default-state = "off";
+		};
+
+		wan {
+			label = "dragino2:red:wan";
+			gpios = <&gpio 17 GPIO_ACTIVE_LOW>;
+			default-state = "off";
+		};
+
+		system {
+			label = "dragino2:red:system";
+			gpios = <&gpio 28 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+	};
+
+	gpio-keys-polled {
+		compatible = "gpio-keys-polled";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		poll-interval = <100>;
+
+		button@0 {
+			label = "jumpstart";
+			linux,code = <KEY_WPS_BUTTON>;
+			gpios = <&gpio 11 GPIO_ACTIVE_LOW>;
+		};
+
+		button@1 {
+			label = "reset";
+			linux,code = <KEY_RESTART>;
+			gpios = <&gpio 12 GPIO_ACTIVE_LOW>;
+		};
+	};
+};
+
+&ref {
+	clock-frequency = <25000000>;
+};
+
+&uart {
+	status = "okay";
+};
+
+&gpio {
+	status = "okay";
+};
+
+&usb {
+	dr_mode = "host";
+	status = "okay";
+};
+
+&usb_phy {
+	status = "okay";
+};
+
+&spi {
+	num-chipselects = <1>;
+	status = "okay";
+
+	/* Winbond 25Q128BVFG SPI flash */
+	spiflash: w25q128@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "winbond,w25q128", "jedec,spi-nor";
+		spi-max-frequency = <104000000>;
+		reg = <0>;
+	};
+};
diff --git a/arch/mips/boot/dts/qca/ar9331_omega.dts b/arch/mips/boot/dts/qca/ar9331_omega.dts
new file mode 100644
index 0000000..b2be3b0
--- /dev/null
+++ b/arch/mips/boot/dts/qca/ar9331_omega.dts
@@ -0,0 +1,78 @@
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+#include "ar9331.dtsi"
+
+/ {
+	model = "Onion Omega";
+	compatible = "onion,omega";
+
+	aliases {
+		serial0 = &uart;
+	};
+
+	memory@0 {
+		device_type = "memory";
+		reg = <0x0 0x4000000>;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		system {
+			label = "onion:amber:system";
+			gpios = <&gpio 27 GPIO_ACTIVE_LOW>;
+			default-state = "off";
+		};
+	};
+
+	gpio-keys-polled {
+		compatible = "gpio-keys-polled";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		poll-interval = <100>;
+
+		button@0 {
+			label = "reset";
+			linux,code = <KEY_RESTART>;
+			gpios = <&gpio 11 GPIO_ACTIVE_HIGH>;
+		};
+	};
+};
+
+&ref {
+	clock-frequency = <25000000>;
+};
+
+&uart {
+	status = "okay";
+};
+
+&gpio {
+	status = "okay";
+};
+
+&usb {
+	dr_mode = "host";
+	status = "okay";
+};
+
+&usb_phy {
+	status = "okay";
+};
+
+&spi {
+	num-chipselects = <1>;
+	status = "okay";
+
+	/* Winbond 25Q128FVSG SPI flash */
+	spiflash: w25q128@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "winbond,w25q128", "jedec,spi-nor";
+		spi-max-frequency = <104000000>;
+		reg = <0>;
+	};
+};
diff --git a/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts b/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts
new file mode 100644
index 0000000..919cf3b
--- /dev/null
+++ b/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts
@@ -0,0 +1,118 @@
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+#include "ar9331.dtsi"
+
+/ {
+	model = "TP-Link TL-MR3020";
+	compatible = "tplink,tl-mr3020";
+
+	aliases {
+		serial0 = &uart;
+	};
+
+	memory@0 {
+		device_type = "memory";
+		reg = <0x0 0x2000000>;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		wlan {
+			label = "tp-link:green:wlan";
+			gpios = <&gpio 0 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		lan {
+			label = "tp-link:green:lan";
+			gpios = <&gpio 17 GPIO_ACTIVE_LOW>;
+			default-state = "off";
+		};
+
+		wps {
+			label = "tp-link:green:wps";
+			gpios = <&gpio 26 GPIO_ACTIVE_LOW>;
+			default-state = "off";
+		};
+
+		led3g {
+			label = "tp-link:green:3g";
+			gpios = <&gpio 27 GPIO_ACTIVE_LOW>;
+			default-state = "off";
+		};
+	};
+
+	gpio-keys-polled {
+		compatible = "gpio-keys-polled";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		poll-interval = <100>;
+
+		button@0 {
+			label = "wps";
+			linux,code = <KEY_WPS_BUTTON>;
+			gpios = <&gpio 11 GPIO_ACTIVE_HIGH>;
+		};
+
+		button@1 {
+			label = "sw1";
+			linux,code = <BTN_0>;
+			gpios = <&gpio 18 GPIO_ACTIVE_HIGH>;
+		};
+
+		button@2 {
+			label = "sw2";
+			linux,code = <BTN_1>;
+			gpios = <&gpio 20 GPIO_ACTIVE_HIGH>;
+		};
+	};
+
+	reg_usb_vbus: reg_usb_vbus {
+		compatible = "regulator-fixed";
+		regulator-name = "usb_vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		gpio = <&gpio 8 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+};
+
+&ref {
+	clock-frequency = <25000000>;
+};
+
+&uart {
+	status = "okay";
+};
+
+&gpio {
+	status = "okay";
+};
+
+&usb {
+	dr_mode = "host";
+	vbus-supply = <&reg_usb_vbus>;
+	status = "okay";
+};
+
+&usb_phy {
+	status = "okay";
+};
+
+&spi {
+	num-chipselects = <1>;
+	status = "okay";
+
+	/* Spansion S25FL032PIF SPI flash */
+	spiflash: s25sl032p@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "spansion,s25sl032p", "jedec,spi-nor";
+		spi-max-frequency = <104000000>;
+		reg = <0>;
+	};
+};
diff --git a/arch/mips/boot/tools/.gitignore b/arch/mips/boot/tools/.gitignore
new file mode 100644
index 0000000..be0ed06
--- /dev/null
+++ b/arch/mips/boot/tools/.gitignore
@@ -0,0 +1 @@
+relocs
diff --git a/arch/mips/boot/tools/Makefile b/arch/mips/boot/tools/Makefile
new file mode 100644
index 0000000..d232a68
--- /dev/null
+++ b/arch/mips/boot/tools/Makefile
@@ -0,0 +1,8 @@
+
+hostprogs-y	+= relocs
+relocs-objs	+= relocs_32.o
+relocs-objs	+= relocs_64.o
+relocs-objs	+= relocs_main.o
+PHONY += relocs
+relocs: $(obj)/relocs
+	@:
diff --git a/arch/mips/boot/tools/relocs.c b/arch/mips/boot/tools/relocs.c
new file mode 100644
index 0000000..b9cbf78
--- /dev/null
+++ b/arch/mips/boot/tools/relocs.c
@@ -0,0 +1,680 @@
+/* This is included from relocs_32/64.c */
+
+#define ElfW(type)		_ElfW(ELF_BITS, type)
+#define _ElfW(bits, type)	__ElfW(bits, type)
+#define __ElfW(bits, type)	Elf##bits##_##type
+
+#define Elf_Addr		ElfW(Addr)
+#define Elf_Ehdr		ElfW(Ehdr)
+#define Elf_Phdr		ElfW(Phdr)
+#define Elf_Shdr		ElfW(Shdr)
+#define Elf_Sym			ElfW(Sym)
+
+static Elf_Ehdr ehdr;
+
+struct relocs {
+	uint32_t	*offset;
+	unsigned long	count;
+	unsigned long	size;
+};
+
+static struct relocs relocs;
+
+struct section {
+	Elf_Shdr       shdr;
+	struct section *link;
+	Elf_Sym        *symtab;
+	Elf_Rel        *reltab;
+	char           *strtab;
+	long           shdr_offset;
+};
+static struct section *secs;
+
+static const char * const regex_sym_kernel = {
+/* Symbols matching these regex's should never be relocated */
+	"^(__crc_)",
+};
+
+static regex_t sym_regex_c;
+
+static int regex_skip_reloc(const char *sym_name)
+{
+	return !regexec(&sym_regex_c, sym_name, 0, NULL, 0);
+}
+
+static void regex_init(void)
+{
+	char errbuf[128];
+	int err;
+
+	err = regcomp(&sym_regex_c, regex_sym_kernel,
+			REG_EXTENDED|REG_NOSUB);
+
+	if (err) {
+		regerror(err, &sym_regex_c, errbuf, sizeof(errbuf));
+		die("%s", errbuf);
+	}
+}
+
+static const char *rel_type(unsigned type)
+{
+	static const char * const type_name[] = {
+#define REL_TYPE(X)[X] = #X
+		REL_TYPE(R_MIPS_NONE),
+		REL_TYPE(R_MIPS_16),
+		REL_TYPE(R_MIPS_32),
+		REL_TYPE(R_MIPS_REL32),
+		REL_TYPE(R_MIPS_26),
+		REL_TYPE(R_MIPS_HI16),
+		REL_TYPE(R_MIPS_LO16),
+		REL_TYPE(R_MIPS_GPREL16),
+		REL_TYPE(R_MIPS_LITERAL),
+		REL_TYPE(R_MIPS_GOT16),
+		REL_TYPE(R_MIPS_PC16),
+		REL_TYPE(R_MIPS_CALL16),
+		REL_TYPE(R_MIPS_GPREL32),
+		REL_TYPE(R_MIPS_64),
+		REL_TYPE(R_MIPS_HIGHER),
+		REL_TYPE(R_MIPS_HIGHEST),
+		REL_TYPE(R_MIPS_PC21_S2),
+		REL_TYPE(R_MIPS_PC26_S2),
+#undef REL_TYPE
+	};
+	const char *name = "unknown type rel type name";
+
+	if (type < ARRAY_SIZE(type_name) && type_name[type])
+		name = type_name[type];
+	return name;
+}
+
+static const char *sec_name(unsigned shndx)
+{
+	const char *sec_strtab;
+	const char *name;
+
+	sec_strtab = secs[ehdr.e_shstrndx].strtab;
+	if (shndx < ehdr.e_shnum)
+		name = sec_strtab + secs[shndx].shdr.sh_name;
+	else if (shndx == SHN_ABS)
+		name = "ABSOLUTE";
+	else if (shndx == SHN_COMMON)
+		name = "COMMON";
+	else
+		name = "<noname>";
+	return name;
+}
+
+static struct section *sec_lookup(const char *secname)
+{
+	int i;
+
+	for (i = 0; i < ehdr.e_shnum; i++)
+		if (strcmp(secname, sec_name(i)) == 0)
+			return &secs[i];
+
+	return NULL;
+}
+
+static const char *sym_name(const char *sym_strtab, Elf_Sym *sym)
+{
+	const char *name;
+
+	if (sym->st_name)
+		name = sym_strtab + sym->st_name;
+	else
+		name = sec_name(sym->st_shndx);
+	return name;
+}
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+#define le16_to_cpu(val) (val)
+#define le32_to_cpu(val) (val)
+#define le64_to_cpu(val) (val)
+#define be16_to_cpu(val) bswap_16(val)
+#define be32_to_cpu(val) bswap_32(val)
+#define be64_to_cpu(val) bswap_64(val)
+
+#define cpu_to_le16(val) (val)
+#define cpu_to_le32(val) (val)
+#define cpu_to_le64(val) (val)
+#define cpu_to_be16(val) bswap_16(val)
+#define cpu_to_be32(val) bswap_32(val)
+#define cpu_to_be64(val) bswap_64(val)
+#endif
+#if BYTE_ORDER == BIG_ENDIAN
+#define le16_to_cpu(val) bswap_16(val)
+#define le32_to_cpu(val) bswap_32(val)
+#define le64_to_cpu(val) bswap_64(val)
+#define be16_to_cpu(val) (val)
+#define be32_to_cpu(val) (val)
+#define be64_to_cpu(val) (val)
+
+#define cpu_to_le16(val) bswap_16(val)
+#define cpu_to_le32(val) bswap_32(val)
+#define cpu_to_le64(val) bswap_64(val)
+#define cpu_to_be16(val) (val)
+#define cpu_to_be32(val) (val)
+#define cpu_to_be64(val) (val)
+#endif
+
+static uint16_t elf16_to_cpu(uint16_t val)
+{
+	if (ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
+		return le16_to_cpu(val);
+	else
+		return be16_to_cpu(val);
+}
+
+static uint32_t elf32_to_cpu(uint32_t val)
+{
+	if (ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
+		return le32_to_cpu(val);
+	else
+		return be32_to_cpu(val);
+}
+
+static uint32_t cpu_to_elf32(uint32_t val)
+{
+	if (ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
+		return cpu_to_le32(val);
+	else
+		return cpu_to_be32(val);
+}
+
+#define elf_half_to_cpu(x)	elf16_to_cpu(x)
+#define elf_word_to_cpu(x)	elf32_to_cpu(x)
+
+#if ELF_BITS == 64
+static uint64_t elf64_to_cpu(uint64_t val)
+{
+	if (ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
+		return le64_to_cpu(val);
+	else
+		return be64_to_cpu(val);
+}
+#define elf_addr_to_cpu(x)	elf64_to_cpu(x)
+#define elf_off_to_cpu(x)	elf64_to_cpu(x)
+#define elf_xword_to_cpu(x)	elf64_to_cpu(x)
+#else
+#define elf_addr_to_cpu(x)	elf32_to_cpu(x)
+#define elf_off_to_cpu(x)	elf32_to_cpu(x)
+#define elf_xword_to_cpu(x)	elf32_to_cpu(x)
+#endif
+
+static void read_ehdr(FILE *fp)
+{
+	if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1)
+		die("Cannot read ELF header: %s\n", strerror(errno));
+
+	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0)
+		die("No ELF magic\n");
+
+	if (ehdr.e_ident[EI_CLASS] != ELF_CLASS)
+		die("Not a %d bit executable\n", ELF_BITS);
+
+	if ((ehdr.e_ident[EI_DATA] != ELFDATA2LSB) &&
+	    (ehdr.e_ident[EI_DATA] != ELFDATA2MSB))
+		die("Unknown ELF Endianness\n");
+
+	if (ehdr.e_ident[EI_VERSION] != EV_CURRENT)
+		die("Unknown ELF version\n");
+
+	/* Convert the fields to native endian */
+	ehdr.e_type      = elf_half_to_cpu(ehdr.e_type);
+	ehdr.e_machine   = elf_half_to_cpu(ehdr.e_machine);
+	ehdr.e_version   = elf_word_to_cpu(ehdr.e_version);
+	ehdr.e_entry     = elf_addr_to_cpu(ehdr.e_entry);
+	ehdr.e_phoff     = elf_off_to_cpu(ehdr.e_phoff);
+	ehdr.e_shoff     = elf_off_to_cpu(ehdr.e_shoff);
+	ehdr.e_flags     = elf_word_to_cpu(ehdr.e_flags);
+	ehdr.e_ehsize    = elf_half_to_cpu(ehdr.e_ehsize);
+	ehdr.e_phentsize = elf_half_to_cpu(ehdr.e_phentsize);
+	ehdr.e_phnum     = elf_half_to_cpu(ehdr.e_phnum);
+	ehdr.e_shentsize = elf_half_to_cpu(ehdr.e_shentsize);
+	ehdr.e_shnum     = elf_half_to_cpu(ehdr.e_shnum);
+	ehdr.e_shstrndx  = elf_half_to_cpu(ehdr.e_shstrndx);
+
+	if ((ehdr.e_type != ET_EXEC) && (ehdr.e_type != ET_DYN))
+		die("Unsupported ELF header type\n");
+
+	if (ehdr.e_machine != ELF_MACHINE)
+		die("Not for %s\n", ELF_MACHINE_NAME);
+
+	if (ehdr.e_version != EV_CURRENT)
+		die("Unknown ELF version\n");
+
+	if (ehdr.e_ehsize != sizeof(Elf_Ehdr))
+		die("Bad Elf header size\n");
+
+	if (ehdr.e_phentsize != sizeof(Elf_Phdr))
+		die("Bad program header entry\n");
+
+	if (ehdr.e_shentsize != sizeof(Elf_Shdr))
+		die("Bad section header entry\n");
+
+	if (ehdr.e_shstrndx >= ehdr.e_shnum)
+		die("String table index out of bounds\n");
+}
+
+static void read_shdrs(FILE *fp)
+{
+	int i;
+	Elf_Shdr shdr;
+
+	secs = calloc(ehdr.e_shnum, sizeof(struct section));
+	if (!secs)
+		die("Unable to allocate %d section headers\n", ehdr.e_shnum);
+
+	if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0)
+		die("Seek to %d failed: %s\n", ehdr.e_shoff, strerror(errno));
+
+	for (i = 0; i < ehdr.e_shnum; i++) {
+		struct section *sec = &secs[i];
+
+		sec->shdr_offset = ftell(fp);
+		if (fread(&shdr, sizeof(shdr), 1, fp) != 1)
+			die("Cannot read ELF section headers %d/%d: %s\n",
+			    i, ehdr.e_shnum, strerror(errno));
+		sec->shdr.sh_name      = elf_word_to_cpu(shdr.sh_name);
+		sec->shdr.sh_type      = elf_word_to_cpu(shdr.sh_type);
+		sec->shdr.sh_flags     = elf_xword_to_cpu(shdr.sh_flags);
+		sec->shdr.sh_addr      = elf_addr_to_cpu(shdr.sh_addr);
+		sec->shdr.sh_offset    = elf_off_to_cpu(shdr.sh_offset);
+		sec->shdr.sh_size      = elf_xword_to_cpu(shdr.sh_size);
+		sec->shdr.sh_link      = elf_word_to_cpu(shdr.sh_link);
+		sec->shdr.sh_info      = elf_word_to_cpu(shdr.sh_info);
+		sec->shdr.sh_addralign = elf_xword_to_cpu(shdr.sh_addralign);
+		sec->shdr.sh_entsize   = elf_xword_to_cpu(shdr.sh_entsize);
+		if (sec->shdr.sh_link < ehdr.e_shnum)
+			sec->link = &secs[sec->shdr.sh_link];
+	}
+}
+
+static void read_strtabs(FILE *fp)
+{
+	int i;
+
+	for (i = 0; i < ehdr.e_shnum; i++) {
+		struct section *sec = &secs[i];
+
+		if (sec->shdr.sh_type != SHT_STRTAB)
+			continue;
+
+		sec->strtab = malloc(sec->shdr.sh_size);
+		if (!sec->strtab)
+			die("malloc of %d bytes for strtab failed\n",
+			    sec->shdr.sh_size);
+
+		if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0)
+			die("Seek to %d failed: %s\n",
+			    sec->shdr.sh_offset, strerror(errno));
+
+		if (fread(sec->strtab, 1, sec->shdr.sh_size, fp) !=
+		    sec->shdr.sh_size)
+			die("Cannot read symbol table: %s\n", strerror(errno));
+	}
+}
+
+static void read_symtabs(FILE *fp)
+{
+	int i, j;
+
+	for (i = 0; i < ehdr.e_shnum; i++) {
+		struct section *sec = &secs[i];
+		if (sec->shdr.sh_type != SHT_SYMTAB)
+			continue;
+
+		sec->symtab = malloc(sec->shdr.sh_size);
+		if (!sec->symtab)
+			die("malloc of %d bytes for symtab failed\n",
+			    sec->shdr.sh_size);
+
+		if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0)
+			die("Seek to %d failed: %s\n",
+			    sec->shdr.sh_offset, strerror(errno));
+
+		if (fread(sec->symtab, 1, sec->shdr.sh_size, fp) !=
+		    sec->shdr.sh_size)
+			die("Cannot read symbol table: %s\n", strerror(errno));
+
+		for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Sym); j++) {
+			Elf_Sym *sym = &sec->symtab[j];
+
+			sym->st_name  = elf_word_to_cpu(sym->st_name);
+			sym->st_value = elf_addr_to_cpu(sym->st_value);
+			sym->st_size  = elf_xword_to_cpu(sym->st_size);
+			sym->st_shndx = elf_half_to_cpu(sym->st_shndx);
+		}
+	}
+}
+
+static void read_relocs(FILE *fp)
+{
+	static unsigned long base = 0;
+	int i, j;
+
+	if (!base) {
+		struct section *sec = sec_lookup(".text");
+
+		if (!sec)
+			die("Could not find .text section\n");
+
+		base = sec->shdr.sh_addr;
+	}
+
+	for (i = 0; i < ehdr.e_shnum; i++) {
+		struct section *sec = &secs[i];
+
+		if (sec->shdr.sh_type != SHT_REL_TYPE)
+			continue;
+
+		sec->reltab = malloc(sec->shdr.sh_size);
+		if (!sec->reltab)
+			die("malloc of %d bytes for relocs failed\n",
+			    sec->shdr.sh_size);
+
+		if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0)
+			die("Seek to %d failed: %s\n",
+			    sec->shdr.sh_offset, strerror(errno));
+
+		if (fread(sec->reltab, 1, sec->shdr.sh_size, fp) !=
+		    sec->shdr.sh_size)
+			die("Cannot read symbol table: %s\n", strerror(errno));
+
+		for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
+			Elf_Rel *rel = &sec->reltab[j];
+
+			rel->r_offset = elf_addr_to_cpu(rel->r_offset);
+			/* Set offset into kernel image */
+			rel->r_offset -= base;
+#if (ELF_BITS == 32)
+			rel->r_info   = elf_xword_to_cpu(rel->r_info);
+#else
+			/* Convert MIPS64 RELA format - only the symbol
+			 * index needs converting to native endianness
+			 */
+			rel->r_info   = rel->r_info;
+			ELF_R_SYM(rel->r_info) = elf32_to_cpu(ELF_R_SYM(rel->r_info));
+#endif
+#if (SHT_REL_TYPE == SHT_RELA)
+			rel->r_addend = elf_xword_to_cpu(rel->r_addend);
+#endif
+		}
+	}
+}
+
+static void remove_relocs(FILE *fp)
+{
+	int i;
+	Elf_Shdr shdr;
+
+	for (i = 0; i < ehdr.e_shnum; i++) {
+		struct section *sec = &secs[i];
+
+		if (sec->shdr.sh_type != SHT_REL_TYPE)
+			continue;
+
+		if (fseek(fp, sec->shdr_offset, SEEK_SET) < 0)
+			die("Seek to %d failed: %s\n",
+			    sec->shdr_offset, strerror(errno));
+
+		if (fread(&shdr, sizeof(shdr), 1, fp) != 1)
+			die("Cannot read ELF section headers %d/%d: %s\n",
+			    i, ehdr.e_shnum, strerror(errno));
+
+		/* Set relocation section size to 0, effectively removing it.
+		 * This is necessary due to lack of support for relocations
+		 * in objcopy when creating 32bit elf from 64bit elf.
+		 */
+		shdr.sh_size = 0;
+
+		if (fseek(fp, sec->shdr_offset, SEEK_SET) < 0)
+			die("Seek to %d failed: %s\n",
+			    sec->shdr_offset, strerror(errno));
+
+		if (fwrite(&shdr, sizeof(shdr), 1, fp) != 1)
+			die("Cannot write ELF section headers %d/%d: %s\n",
+			    i, ehdr.e_shnum, strerror(errno));
+	}
+}
+
+static void add_reloc(struct relocs *r, uint32_t offset, unsigned type)
+{
+	/* Relocation representation in binary table:
+	 * |76543210|76543210|76543210|76543210|
+	 * |  Type  |  offset from _text >> 2  |
+	 */
+	offset >>= 2;
+	if (offset > 0x00FFFFFF)
+		die("Kernel image exceeds maximum size for relocation!\n");
+
+	offset = (offset & 0x00FFFFFF) | ((type & 0xFF) << 24);
+
+	if (r->count == r->size) {
+		unsigned long newsize = r->size + 50000;
+		void *mem = realloc(r->offset, newsize * sizeof(r->offset[0]));
+
+		if (!mem)
+			die("realloc failed\n");
+
+		r->offset = mem;
+		r->size = newsize;
+	}
+	r->offset[r->count++] = offset;
+}
+
+static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
+			Elf_Sym *sym, const char *symname))
+{
+	int i;
+
+	/* Walk through the relocations */
+	for (i = 0; i < ehdr.e_shnum; i++) {
+		char *sym_strtab;
+		Elf_Sym *sh_symtab;
+		struct section *sec_applies, *sec_symtab;
+		int j;
+		struct section *sec = &secs[i];
+
+		if (sec->shdr.sh_type != SHT_REL_TYPE)
+			continue;
+
+		sec_symtab  = sec->link;
+		sec_applies = &secs[sec->shdr.sh_info];
+		if (!(sec_applies->shdr.sh_flags & SHF_ALLOC))
+			continue;
+
+		sh_symtab = sec_symtab->symtab;
+		sym_strtab = sec_symtab->link->strtab;
+		for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
+			Elf_Rel *rel = &sec->reltab[j];
+			Elf_Sym *sym = &sh_symtab[ELF_R_SYM(rel->r_info)];
+			const char *symname = sym_name(sym_strtab, sym);
+
+			process(sec, rel, sym, symname);
+		}
+	}
+}
+
+static int do_reloc(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
+		      const char *symname)
+{
+	unsigned r_type = ELF_R_TYPE(rel->r_info);
+	unsigned bind = ELF_ST_BIND(sym->st_info);
+
+	if ((bind == STB_WEAK) && (sym->st_value == 0)) {
+		/* Don't relocate weak symbols without a target */
+		return 0;
+	}
+
+	if (regex_skip_reloc(symname))
+		return 0;
+
+	switch (r_type) {
+	case R_MIPS_NONE:
+	case R_MIPS_REL32:
+	case R_MIPS_PC16:
+	case R_MIPS_PC21_S2:
+	case R_MIPS_PC26_S2:
+		/*
+		 * NONE can be ignored and PC relative relocations don't
+		 * need to be adjusted.
+		 */
+	case R_MIPS_HIGHEST:
+	case R_MIPS_HIGHER:
+		/* We support relocating within the same 4Gb segment only,
+		 * thus leaving the top 32bits unchanged
+		 */
+	case R_MIPS_LO16:
+		/* We support relocating by 64k jumps only
+		 * thus leaving the bottom 16bits unchanged
+		 */
+		break;
+
+	case R_MIPS_64:
+	case R_MIPS_32:
+	case R_MIPS_26:
+	case R_MIPS_HI16:
+		add_reloc(&relocs, rel->r_offset, r_type);
+		break;
+
+	default:
+		die("Unsupported relocation type: %s (%d)\n",
+		    rel_type(r_type), r_type);
+		break;
+	}
+
+	return 0;
+}
+
+static int write_reloc_as_bin(uint32_t v, FILE *f)
+{
+	unsigned char buf[4];
+
+	v = cpu_to_elf32(v);
+
+	memcpy(buf, &v, sizeof(uint32_t));
+	return fwrite(buf, 1, 4, f);
+}
+
+static int write_reloc_as_text(uint32_t v, FILE *f)
+{
+	int res;
+
+	res = fprintf(f, "\t.long 0x%08"PRIx32"\n", v);
+	if (res < 0)
+		return res;
+	else
+		return sizeof(uint32_t);
+}
+
+static void emit_relocs(int as_text, int as_bin, FILE *outf)
+{
+	int i;
+	int (*write_reloc)(uint32_t, FILE *) = write_reloc_as_bin;
+	int size = 0;
+	int size_reserved;
+	struct section *sec_reloc;
+
+	sec_reloc = sec_lookup(".data.reloc");
+	if (!sec_reloc)
+		die("Could not find relocation section\n");
+
+	size_reserved = sec_reloc->shdr.sh_size;
+
+	/* Collect up the relocations */
+	walk_relocs(do_reloc);
+
+	/* Print the relocations */
+	if (as_text) {
+		/* Print the relocations in a form suitable that
+		 * gas will like.
+		 */
+		printf(".section \".data.reloc\",\"a\"\n");
+		printf(".balign 4\n");
+		/* Output text to stdout */
+		write_reloc = write_reloc_as_text;
+		outf = stdout;
+	} else if (as_bin) {
+		/* Output raw binary to stdout */
+		outf = stdout;
+	} else {
+		/* Seek to offset of the relocation section.
+		* Each relocation is then written into the
+		* vmlinux kernel image.
+		*/
+		if (fseek(outf, sec_reloc->shdr.sh_offset, SEEK_SET) < 0) {
+			die("Seek to %d failed: %s\n",
+				sec_reloc->shdr.sh_offset, strerror(errno));
+		}
+	}
+
+	for (i = 0; i < relocs.count; i++)
+		size += write_reloc(relocs.offset[i], outf);
+
+	/* Print a stop, but only if we've actually written some relocs */
+	if (size)
+		size += write_reloc(0, outf);
+
+	if (size > size_reserved)
+		/* Die, but suggest a value for CONFIG_RELOCATION_TABLE_SIZE
+		 * which will fix this problem and allow a bit of headroom
+		 * if more kernel features are enabled
+		 */
+		die("Relocations overflow available space!\n" \
+		    "Please adjust CONFIG_RELOCATION_TABLE_SIZE " \
+		    "to at least 0x%08x\n", (size + 0x1000) & ~0xFFF);
+}
+
+/*
+ * As an aid to debugging problems with different linkers
+ * print summary information about the relocs.
+ * Since different linkers tend to emit the sections in
+ * different orders we use the section names in the output.
+ */
+static int do_reloc_info(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
+				const char *symname)
+{
+	printf("%16s  0x%08x  %16s  %40s  %16s\n",
+		sec_name(sec->shdr.sh_info),
+		(unsigned int)rel->r_offset,
+		rel_type(ELF_R_TYPE(rel->r_info)),
+		symname,
+		sec_name(sym->st_shndx));
+	return 0;
+}
+
+static void print_reloc_info(void)
+{
+	printf("%16s  %10s  %16s  %40s  %16s\n",
+		"reloc section",
+		"offset",
+		"reloc type",
+		"symbol",
+		"symbol section");
+	walk_relocs(do_reloc_info);
+}
+
+#if ELF_BITS == 64
+# define process process_64
+#else
+# define process process_32
+#endif
+
+void process(FILE *fp, int as_text, int as_bin,
+	     int show_reloc_info, int keep_relocs)
+{
+	regex_init();
+	read_ehdr(fp);
+	read_shdrs(fp);
+	read_strtabs(fp);
+	read_symtabs(fp);
+	read_relocs(fp);
+	if (show_reloc_info) {
+		print_reloc_info();
+		return;
+	}
+	emit_relocs(as_text, as_bin, fp);
+	if (!keep_relocs)
+		remove_relocs(fp);
+}
diff --git a/arch/mips/boot/tools/relocs.h b/arch/mips/boot/tools/relocs.h
new file mode 100644
index 0000000..3cf676f
--- /dev/null
+++ b/arch/mips/boot/tools/relocs.h
@@ -0,0 +1,45 @@
+#ifndef RELOCS_H
+#define RELOCS_H
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include <elf.h>
+#include <byteswap.h>
+#define USE_BSD
+#include <endian.h>
+#include <regex.h>
+
+void die(char *fmt, ...);
+
+/*
+ * Introduced for MIPSr6
+ */
+#ifndef R_MIPS_PC21_S2
+#define R_MIPS_PC21_S2		60
+#endif
+
+#ifndef R_MIPS_PC26_S2
+#define R_MIPS_PC26_S2		61
+#endif
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+enum symtype {
+	S_ABS,
+	S_REL,
+	S_SEG,
+	S_LIN,
+	S_NSYMTYPES
+};
+
+void process_32(FILE *fp, int as_text, int as_bin,
+		int show_reloc_info, int keep_relocs);
+void process_64(FILE *fp, int as_text, int as_bin,
+		int show_reloc_info, int keep_relocs);
+#endif /* RELOCS_H */
diff --git a/arch/mips/boot/tools/relocs_32.c b/arch/mips/boot/tools/relocs_32.c
new file mode 100644
index 0000000..915bdc0
--- /dev/null
+++ b/arch/mips/boot/tools/relocs_32.c
@@ -0,0 +1,17 @@
+#include "relocs.h"
+
+#define ELF_BITS 32
+
+#define ELF_MACHINE		EM_MIPS
+#define ELF_MACHINE_NAME	"MIPS"
+#define SHT_REL_TYPE		SHT_REL
+#define Elf_Rel			ElfW(Rel)
+
+#define ELF_CLASS		ELFCLASS32
+#define ELF_R_SYM(val)		ELF32_R_SYM(val)
+#define ELF_R_TYPE(val)		ELF32_R_TYPE(val)
+#define ELF_ST_TYPE(o)		ELF32_ST_TYPE(o)
+#define ELF_ST_BIND(o)		ELF32_ST_BIND(o)
+#define ELF_ST_VISIBILITY(o)	ELF32_ST_VISIBILITY(o)
+
+#include "relocs.c"
diff --git a/arch/mips/boot/tools/relocs_64.c b/arch/mips/boot/tools/relocs_64.c
new file mode 100644
index 0000000..b671b5e
--- /dev/null
+++ b/arch/mips/boot/tools/relocs_64.c
@@ -0,0 +1,27 @@
+#include "relocs.h"
+
+#define ELF_BITS 64
+
+#define ELF_MACHINE             EM_MIPS
+#define ELF_MACHINE_NAME        "MIPS64"
+#define SHT_REL_TYPE            SHT_RELA
+#define Elf_Rel                 Elf64_Rela
+
+typedef uint8_t Elf64_Byte;
+
+typedef struct {
+	Elf64_Word r_sym;	/* Symbol index.  */
+	Elf64_Byte r_ssym;	/* Special symbol.  */
+	Elf64_Byte r_type3;	/* Third relocation.  */
+	Elf64_Byte r_type2;	/* Second relocation.  */
+	Elf64_Byte r_type;	/* First relocation.  */
+} Elf64_Mips_Rela;
+
+#define ELF_CLASS               ELFCLASS64
+#define ELF_R_SYM(val)          (((Elf64_Mips_Rela *)(&val))->r_sym)
+#define ELF_R_TYPE(val)         (((Elf64_Mips_Rela *)(&val))->r_type)
+#define ELF_ST_TYPE(o)          ELF64_ST_TYPE(o)
+#define ELF_ST_BIND(o)          ELF64_ST_BIND(o)
+#define ELF_ST_VISIBILITY(o)    ELF64_ST_VISIBILITY(o)
+
+#include "relocs.c"
diff --git a/arch/mips/boot/tools/relocs_main.c b/arch/mips/boot/tools/relocs_main.c
new file mode 100644
index 0000000..d8fe234
--- /dev/null
+++ b/arch/mips/boot/tools/relocs_main.c
@@ -0,0 +1,84 @@
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <endian.h>
+#include <elf.h>
+
+#include "relocs.h"
+
+void die(char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	vfprintf(stderr, fmt, ap);
+	va_end(ap);
+	exit(1);
+}
+
+static void usage(void)
+{
+	die("relocs [--reloc-info|--text|--bin|--keep] vmlinux\n");
+}
+
+int main(int argc, char **argv)
+{
+	int show_reloc_info, as_text, as_bin, keep_relocs;
+	const char *fname;
+	FILE *fp;
+	int i;
+	unsigned char e_ident[EI_NIDENT];
+
+	show_reloc_info = 0;
+	as_text = 0;
+	as_bin = 0;
+	keep_relocs = 0;
+	fname = NULL;
+	for (i = 1; i < argc; i++) {
+		char *arg = argv[i];
+
+		if (*arg == '-') {
+			if (strcmp(arg, "--reloc-info") == 0) {
+				show_reloc_info = 1;
+				continue;
+			}
+			if (strcmp(arg, "--text") == 0) {
+				as_text = 1;
+				continue;
+			}
+			if (strcmp(arg, "--bin") == 0) {
+				as_bin = 1;
+				continue;
+			}
+			if (strcmp(arg, "--keep") == 0) {
+				keep_relocs = 1;
+				continue;
+			}
+		} else if (!fname) {
+			fname = arg;
+			continue;
+		}
+		usage();
+	}
+	if (!fname)
+		usage();
+
+	fp = fopen(fname, "r+");
+	if (!fp)
+		die("Cannot open %s: %s\n", fname, strerror(errno));
+
+	if (fread(&e_ident, 1, EI_NIDENT, fp) != EI_NIDENT)
+		die("Cannot read %s: %s", fname, strerror(errno));
+
+	rewind(fp);
+	if (e_ident[EI_CLASS] == ELFCLASS64)
+		process_64(fp, as_text,  as_bin, show_reloc_info, keep_relocs);
+	else
+		process_32(fp, as_text, as_bin, show_reloc_info, keep_relocs);
+	fclose(fp);
+	return 0;
+}
diff --git a/arch/mips/cavium-octeon/csrc-octeon.c b/arch/mips/cavium-octeon/csrc-octeon.c
index 1882e64..23c2344 100644
--- a/arch/mips/cavium-octeon/csrc-octeon.c
+++ b/arch/mips/cavium-octeon/csrc-octeon.c
@@ -19,6 +19,7 @@
 #include <asm/octeon/cvmx-ipd-defs.h>
 #include <asm/octeon/cvmx-mio-defs.h>
 #include <asm/octeon/cvmx-rst-defs.h>
+#include <asm/octeon/cvmx-fpa-defs.h>
 
 static u64 f;
 static u64 rdiv;
@@ -65,9 +66,13 @@
  */
 void octeon_init_cvmcount(void)
 {
+	u64 clk_reg;
 	unsigned long flags;
 	unsigned loops = 2;
 
+	clk_reg = octeon_has_feature(OCTEON_FEATURE_FPA3) ?
+		CVMX_FPA_CLK_COUNT : CVMX_IPD_CLK_COUNT;
+
 	/* Clobber loops so GCC will not unroll the following while loop. */
 	asm("" : "+r" (loops));
 
@@ -77,18 +82,18 @@
 	 * which should give more deterministic timing.
 	 */
 	while (loops--) {
-		u64 ipd_clk_count = cvmx_read_csr(CVMX_IPD_CLK_COUNT);
+		u64 clk_count = cvmx_read_csr(clk_reg);
 		if (rdiv != 0) {
-			ipd_clk_count *= rdiv;
+			clk_count *= rdiv;
 			if (f != 0) {
 				asm("dmultu\t%[cnt],%[f]\n\t"
 				    "mfhi\t%[cnt]"
-				    : [cnt] "+r" (ipd_clk_count)
+				    : [cnt] "+r" (clk_count)
 				    : [f] "r" (f)
 				    : "hi", "lo");
 			}
 		}
-		write_c0_cvmcount(ipd_clk_count);
+		write_c0_cvmcount(clk_count);
 	}
 	local_irq_restore(flags);
 }
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
index 376701f..ff26d02 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
@@ -87,6 +87,8 @@
 		return 9;
 	if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
 		return 4;
+	if (OCTEON_IS_MODEL(OCTEON_CN7XXX))
+		return 5;
 	else
 		return 3;
 }
@@ -260,6 +262,41 @@
 }
 
 /**
+ * @INTERNAL
+ * Return interface mode for CN7XXX.
+ */
+static cvmx_helper_interface_mode_t __cvmx_get_mode_cn7xxx(int interface)
+{
+	union cvmx_gmxx_inf_mode mode;
+
+	mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+
+	switch (interface) {
+	case 0:
+	case 1:
+		switch (mode.cn68xx.mode) {
+		case 0:
+			return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+		case 1:
+		case 2:
+			return CVMX_HELPER_INTERFACE_MODE_SGMII;
+		case 3:
+			return CVMX_HELPER_INTERFACE_MODE_XAUI;
+		default:
+			return CVMX_HELPER_INTERFACE_MODE_SGMII;
+		}
+	case 2:
+		return CVMX_HELPER_INTERFACE_MODE_NPI;
+	case 3:
+		return CVMX_HELPER_INTERFACE_MODE_LOOP;
+	case 4:
+		return CVMX_HELPER_INTERFACE_MODE_RGMII;
+	default:
+		return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+	}
+}
+
+/**
  * Get the operating mode of an interface. Depending on the Octeon
  * chip and configuration, this function returns an enumeration
  * of the type of packet I/O supported by an interface.
@@ -278,6 +315,12 @@
 		return CVMX_HELPER_INTERFACE_MODE_DISABLED;
 
 	/*
+	 * OCTEON III models
+	 */
+	if (OCTEON_IS_MODEL(OCTEON_CN7XXX))
+		return __cvmx_get_mode_cn7xxx(interface);
+
+	/*
 	 * Octeon II models
 	 */
 	if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
diff --git a/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c b/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c
index e59d1b7..2f415d9 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c
@@ -68,7 +68,7 @@
 		gmx_rx_int_en.s.pause_drp = 1;
 		/* Skipping gmx_rx_int_en.s.reserved_16_18 */
 		/*gmx_rx_int_en.s.ifgerr = 1; */
-		/*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+		/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
 		/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
 		/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
 		/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -89,7 +89,7 @@
 		/*gmx_rx_int_en.s.phy_spd = 1; */
 		/*gmx_rx_int_en.s.phy_link = 1; */
 		/*gmx_rx_int_en.s.ifgerr = 1; */
-		/*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+		/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
 		/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
 		/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
 		/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -112,7 +112,7 @@
 		/*gmx_rx_int_en.s.phy_spd = 1; */
 		/*gmx_rx_int_en.s.phy_link = 1; */
 		/*gmx_rx_int_en.s.ifgerr = 1; */
-		/*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+		/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
 		/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
 		/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
 		/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -134,7 +134,7 @@
 		/*gmx_rx_int_en.s.phy_spd = 1; */
 		/*gmx_rx_int_en.s.phy_link = 1; */
 		/*gmx_rx_int_en.s.ifgerr = 1; */
-		/*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+		/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
 		/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
 		/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
 		/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -156,7 +156,7 @@
 		/*gmx_rx_int_en.s.phy_spd = 1; */
 		/*gmx_rx_int_en.s.phy_link = 1; */
 		/*gmx_rx_int_en.s.ifgerr = 1; */
-		/*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+		/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
 		/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
 		/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
 		/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -179,7 +179,7 @@
 		/*gmx_rx_int_en.s.phy_spd = 1; */
 		/*gmx_rx_int_en.s.phy_link = 1; */
 		/*gmx_rx_int_en.s.ifgerr = 1; */
-		/*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+		/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
 		/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
 		/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
 		/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -209,7 +209,7 @@
 		gmx_rx_int_en.s.pause_drp = 1;
 		/* Skipping gmx_rx_int_en.s.reserved_16_18 */
 		/*gmx_rx_int_en.s.ifgerr = 1; */
-		/*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+		/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
 		/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
 		/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
 		/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
diff --git a/arch/mips/cavium-octeon/executive/cvmx-pko.c b/arch/mips/cavium-octeon/executive/cvmx-pko.c
index 87be167..676fab5 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-pko.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-pko.c
@@ -189,7 +189,7 @@
 	/*
 	 * Set the size of the PKO command buffers to an odd number of
 	 * 64bit words. This allows the normal two word send to stay
-	 * aligned and never span a comamnd word buffer.
+	 * aligned and never span a command word buffer.
 	 */
 	config.u64 = 0;
 	config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL;
diff --git a/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
index 3d17fac..cc1b1d2 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
@@ -32,86 +32,22 @@
 #include <linux/module.h>
 
 #include <asm/octeon/cvmx.h>
-#include <asm/octeon/cvmx-spinlock.h>
 #include <asm/octeon/cvmx-sysinfo.h>
 
-/**
+/*
  * This structure defines the private state maintained by sysinfo module.
- *
  */
-static struct {
-	struct cvmx_sysinfo sysinfo;	   /* system information */
-	cvmx_spinlock_t lock;	   /* mutex spinlock */
-
-} state = {
-	.lock = CVMX_SPINLOCK_UNLOCKED_INITIALIZER
-};
-
+static struct cvmx_sysinfo sysinfo;	   /* system information */
 
 /*
- * Global variables that define the min/max of the memory region set
- * up for 32 bit userspace access.
- */
-uint64_t linux_mem32_min;
-uint64_t linux_mem32_max;
-uint64_t linux_mem32_wired;
-uint64_t linux_mem32_offset;
-
-/**
- * This function returns the application information as obtained
+ * Returns the application information as obtained
  * by the bootloader.  This provides the core mask of the cores
  * running the same application image, as well as the physical
  * memory regions available to the core.
- *
- * Returns  Pointer to the boot information structure
- *
  */
 struct cvmx_sysinfo *cvmx_sysinfo_get(void)
 {
-	return &(state.sysinfo);
+	return &sysinfo;
 }
 EXPORT_SYMBOL(cvmx_sysinfo_get);
 
-/**
- * This function is used in non-simple executive environments (such as
- * Linux kernel, u-boot, etc.)	to configure the minimal fields that
- * are required to use simple executive files directly.
- *
- * Locking (if required) must be handled outside of this
- * function
- *
- * @phy_mem_desc_ptr:
- *		     Pointer to global physical memory descriptor
- *		     (bootmem descriptor) @board_type: Octeon board
- *		     type enumeration
- *
- * @board_rev_major:
- *		     Board major revision
- * @board_rev_minor:
- *		     Board minor revision
- * @cpu_clock_hz:
- *		     CPU clock freqency in hertz
- *
- * Returns 0: Failure
- *	   1: success
- */
-int cvmx_sysinfo_minimal_initialize(void *phy_mem_desc_ptr,
-				    uint16_t board_type,
-				    uint8_t board_rev_major,
-				    uint8_t board_rev_minor,
-				    uint32_t cpu_clock_hz)
-{
-
-	/* The sysinfo structure was already initialized */
-	if (state.sysinfo.board_type)
-		return 0;
-
-	memset(&(state.sysinfo), 0x0, sizeof(state.sysinfo));
-	state.sysinfo.phy_mem_desc_ptr = phy_mem_desc_ptr;
-	state.sysinfo.board_type = board_type;
-	state.sysinfo.board_rev_major = board_rev_major;
-	state.sysinfo.board_rev_minor = board_rev_minor;
-	state.sysinfo.cpu_clock_hz = cpu_clock_hz;
-
-	return 1;
-}
diff --git a/arch/mips/cavium-octeon/executive/octeon-model.c b/arch/mips/cavium-octeon/executive/octeon-model.c
index b2104bd..d08a2bc 100644
--- a/arch/mips/cavium-octeon/executive/octeon-model.c
+++ b/arch/mips/cavium-octeon/executive/octeon-model.c
@@ -71,11 +71,11 @@
 	uint32_t fuse_data = 0;
 
 	fus3.u64 = 0;
-	if (!OCTEON_IS_MODEL(OCTEON_CN6XXX))
+	if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
 		fus3.u64 = cvmx_read_csr(CVMX_L2D_FUS3);
 	fus_dat2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2);
 	fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
-	num_cores = cvmx_pop(cvmx_read_csr(CVMX_CIU_FUSE));
+	num_cores = cvmx_octeon_num_cores();
 
 	/* Make sure the non existent devices look disabled */
 	switch ((chip_id >> 8) & 0xff) {
@@ -121,6 +121,15 @@
 	 * later.
 	 */
 	switch (num_cores) {
+	case 48:
+		core_model = "90";
+		break;
+	case 44:
+		core_model = "88";
+		break;
+	case 40:
+		core_model = "85";
+		break;
 	case 32:
 		core_model = "80";
 		break;
@@ -297,7 +306,7 @@
 				if (fus_dat3.s.nozip)
 					suffix = "SCP";
 
-				if (fus_dat3.s.bar2_en)
+				if (fus_dat3.cn56xx.bar2_en)
 					suffix = "NSPB2";
 			}
 			if (fus3.cn56xx.crip_1024k)
@@ -369,6 +378,73 @@
 		else
 			suffix = "AAP";
 		break;
+	case 0x94:		/* CNF71XX */
+		family = "F71";
+		if (fus_dat3.cnf71xx.nozip)
+			suffix = "SCP";
+		else
+			suffix = "AAP";
+		break;
+	case 0x95:		/* CN78XX */
+		if (num_cores == 6)	/* Other core counts match generic */
+			core_model = "35";
+		if (OCTEON_IS_MODEL(OCTEON_CN76XX))
+			family = "76";
+		else
+			family = "78";
+		if (fus_dat3.cn78xx.l2c_crip == 2)
+			family = "77";
+		if (fus_dat3.cn78xx.nozip
+		    && fus_dat3.cn78xx.nodfa_dte
+		    && fus_dat3.cn78xx.nohna_dte) {
+			if (fus_dat3.cn78xx.nozip &&
+				!fus_dat2.cn78xx.raid_en &&
+				fus_dat3.cn78xx.nohna_dte) {
+				suffix = "CP";
+			} else {
+				suffix = "SCP";
+			}
+		} else if (fus_dat2.cn78xx.raid_en == 0)
+			suffix = "HCP";
+		else
+			suffix = "AAP";
+		break;
+	case 0x96:		/* CN70XX */
+		family = "70";
+		if (cvmx_read_csr(CVMX_MIO_FUS_PDF) & (0x1ULL << 32))
+			family = "71";
+		if (fus_dat2.cn70xx.nocrypto)
+			suffix = "CP";
+		else if (fus_dat3.cn70xx.nodfa_dte)
+			suffix = "SCP";
+		else
+			suffix = "AAP";
+		break;
+	case 0x97:		/* CN73XX */
+		if (num_cores == 6)	/* Other core counts match generic */
+			core_model = "35";
+		family = "73";
+		if (fus_dat3.cn73xx.l2c_crip == 2)
+			family = "72";
+		if (fus_dat3.cn73xx.nozip
+				&& fus_dat3.cn73xx.nodfa_dte
+				&& fus_dat3.cn73xx.nohna_dte) {
+			if (!fus_dat2.cn73xx.raid_en)
+				suffix = "CP";
+			else
+				suffix = "SCP";
+		} else
+			suffix = "AAP";
+		break;
+	case 0x98:		/* CN75XX */
+		family = "F75";
+		if (fus_dat3.cn78xx.nozip
+		    && fus_dat3.cn78xx.nodfa_dte
+		    && fus_dat3.cn78xx.nohna_dte)
+			suffix = "SCP";
+		else
+			suffix = "AAP";
+		break;
 	default:
 		family = "XX";
 		core_model = "XX";
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 4f9eb05..368eb49 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -3,7 +3,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 2004-2014 Cavium, Inc.
+ * Copyright (C) 2004-2016 Cavium, Inc.
  */
 
 #include <linux/of_address.h>
@@ -19,16 +19,53 @@
 
 #include <asm/octeon/octeon.h>
 #include <asm/octeon/cvmx-ciu2-defs.h>
+#include <asm/octeon/cvmx-ciu3-defs.h>
 
 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror);
 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
 static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock);
+static DEFINE_PER_CPU(unsigned int, octeon_irq_ciu3_idt_ip2);
+
+static DEFINE_PER_CPU(unsigned int, octeon_irq_ciu3_idt_ip3);
+static DEFINE_PER_CPU(struct octeon_ciu3_info *, octeon_ciu3_info);
+#define CIU3_MBOX_PER_CORE 10
+
+/*
+ * The 8 most significant bits of the intsn identify the interrupt major block.
+ * Each major block might use its own interrupt domain. Thus 256 domains are
+ * needed.
+ */
+#define MAX_CIU3_DOMAINS		256
+
+typedef irq_hw_number_t (*octeon_ciu3_intsn2hw_t)(struct irq_domain *, unsigned int);
+
+/* Information for each ciu3 in the system */
+struct octeon_ciu3_info {
+	u64			ciu3_addr;
+	int			node;
+	struct irq_domain	*domain[MAX_CIU3_DOMAINS];
+	octeon_ciu3_intsn2hw_t	intsn2hw[MAX_CIU3_DOMAINS];
+};
+
+/* Each ciu3 in the system uses its own data (one ciu3 per node) */
+static struct octeon_ciu3_info	*octeon_ciu3_info_per_node[4];
 
 struct octeon_irq_ciu_domain_data {
 	int num_sum;  /* number of sum registers (2 or 3). */
 };
 
-static __read_mostly u8 octeon_irq_ciu_to_irq[8][64];
+/* Register offsets from ciu3_addr */
+#define CIU3_CONST		0x220
+#define CIU3_IDT_CTL(_idt)	((_idt) * 8 + 0x110000)
+#define CIU3_IDT_PP(_idt, _idx)	((_idt) * 32 + (_idx) * 8 + 0x120000)
+#define CIU3_IDT_IO(_idt)	((_idt) * 8 + 0x130000)
+#define CIU3_DEST_PP_INT(_pp_ip) ((_pp_ip) * 8 + 0x200000)
+#define CIU3_DEST_IO_INT(_io)	((_io) * 8 + 0x210000)
+#define CIU3_ISC_CTL(_intsn)	((_intsn) * 8 + 0x80000000)
+#define CIU3_ISC_W1C(_intsn)	((_intsn) * 8 + 0x90000000)
+#define CIU3_ISC_W1S(_intsn)	((_intsn) * 8 + 0xa0000000)
+
+static __read_mostly int octeon_irq_ciu_to_irq[8][64];
 
 struct octeon_ciu_chip_data {
 	union {
@@ -39,10 +76,11 @@
 		struct {		/* only used for ciu/ciu2 */
 			u8 line;
 			u8 bit;
-			u8 gpio_line;
 		};
 	};
+	int gpio_line;
 	int current_cpu;	/* Next CPU expected to take this irq */
+	int ciu_node; /* NUMA node number of the CIU */
 };
 
 struct octeon_core_chip_data {
@@ -626,6 +664,18 @@
 	}
 }
 
+static int octeon_irq_ciu_set_type(struct irq_data *data, unsigned int t)
+{
+	irqd_set_trigger_type(data, t);
+
+	if (t & IRQ_TYPE_EDGE_BOTH)
+		irq_set_handler_locked(data, handle_edge_irq);
+	else
+		irq_set_handler_locked(data, handle_level_irq);
+
+	return IRQ_SET_MASK_OK;
+}
+
 static void octeon_irq_gpio_setup(struct irq_data *data)
 {
 	union cvmx_gpio_bit_cfgx cfg;
@@ -663,7 +713,7 @@
 	irqd_set_trigger_type(data, t);
 	octeon_irq_gpio_setup(data);
 
-	if (irqd_get_trigger_type(data) & IRQ_TYPE_EDGE_BOTH)
+	if (t & IRQ_TYPE_EDGE_BOTH)
 		irq_set_handler_locked(data, handle_edge_irq);
 	else
 		irq_set_handler_locked(data, handle_level_irq);
@@ -863,6 +913,16 @@
 }
 #endif
 
+static unsigned int edge_startup(struct irq_data *data)
+{
+	/* ack any pending edge-irq at startup, so there is
+	 * an _edge_ to fire on when the event reappears.
+	 */
+	data->chip->irq_ack(data);
+	data->chip->irq_enable(data);
+	return 0;
+}
+
 /*
  * Newer octeon chips have support for lockless CIU operation.
  */
@@ -1158,16 +1218,6 @@
 static struct irq_chip *octeon_irq_ciu_chip_edge;
 static struct irq_chip *octeon_irq_gpio_chip;
 
-static bool octeon_irq_virq_in_range(unsigned int virq)
-{
-	/* We cannot let it overflow the mapping array. */
-	if (virq < (1ul << 8 * sizeof(octeon_irq_ciu_to_irq[0][0])))
-		return true;
-
-	WARN_ONCE(true, "virq out of range %u.\n", virq);
-	return false;
-}
-
 static int octeon_irq_ciu_map(struct irq_domain *d,
 			      unsigned int virq, irq_hw_number_t hw)
 {
@@ -1176,13 +1226,6 @@
 	unsigned int bit = hw & 63;
 	struct octeon_irq_ciu_domain_data *dd = d->host_data;
 
-	if (!octeon_irq_virq_in_range(virq))
-		return -EINVAL;
-
-	/* Don't map irq if it is reserved for GPIO. */
-	if (line == 0 && bit >= 16 && bit <32)
-		return 0;
-
 	if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0)
 		return -EINVAL;
 
@@ -1215,9 +1258,6 @@
 	unsigned int line, bit;
 	int r;
 
-	if (!octeon_irq_virq_in_range(virq))
-		return -EINVAL;
-
 	line = (hw + gpiod->base_hwirq) >> 6;
 	bit = (hw + gpiod->base_hwirq) & 63;
 	if (line > ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
@@ -1899,9 +1939,6 @@
 	unsigned int line = hw >> 6;
 	unsigned int bit = hw & 63;
 
-	if (!octeon_irq_virq_in_range(virq))
-		return -EINVAL;
-
 	/*
 	 * Don't map irq if it is reserved for GPIO.
 	 * (Line 7 are the GPIO lines.)
@@ -2294,10 +2331,598 @@
 	return 0;
 }
 
+int octeon_irq_ciu3_xlat(struct irq_domain *d,
+			 struct device_node *node,
+			 const u32 *intspec,
+			 unsigned int intsize,
+			 unsigned long *out_hwirq,
+			 unsigned int *out_type)
+{
+	struct octeon_ciu3_info *ciu3_info = d->host_data;
+	unsigned int hwirq, type, intsn_major;
+	union cvmx_ciu3_iscx_ctl isc;
+
+	if (intsize < 2)
+		return -EINVAL;
+	hwirq = intspec[0];
+	type = intspec[1];
+
+	if (hwirq >= (1 << 20))
+		return -EINVAL;
+
+	intsn_major = hwirq >> 12;
+	switch (intsn_major) {
+	case 0x04: /* Software handled separately. */
+		return -EINVAL;
+	default:
+		break;
+	}
+
+	isc.u64 =  cvmx_read_csr(ciu3_info->ciu3_addr + CIU3_ISC_CTL(hwirq));
+	if (!isc.s.imp)
+		return -EINVAL;
+
+	switch (type) {
+	case 4: /* official value for level triggering. */
+		*out_type = IRQ_TYPE_LEVEL_HIGH;
+		break;
+	case 0: /* unofficial value, but we might as well let it work. */
+	case 1: /* official value for edge triggering. */
+		*out_type = IRQ_TYPE_EDGE_RISING;
+		break;
+	default: /* Nothing else is acceptable. */
+		return -EINVAL;
+	}
+
+	*out_hwirq = hwirq;
+
+	return 0;
+}
+
+void octeon_irq_ciu3_enable(struct irq_data *data)
+{
+	int cpu;
+	union cvmx_ciu3_iscx_ctl isc_ctl;
+	union cvmx_ciu3_iscx_w1c isc_w1c;
+	u64 isc_ctl_addr;
+
+	struct octeon_ciu_chip_data *cd;
+
+	cpu = next_cpu_for_irq(data);
+
+	cd = irq_data_get_irq_chip_data(data);
+
+	isc_w1c.u64 = 0;
+	isc_w1c.s.en = 1;
+	cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
+
+	isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
+	isc_ctl.u64 = 0;
+	isc_ctl.s.en = 1;
+	isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
+	cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
+	cvmx_read_csr(isc_ctl_addr);
+}
+
+void octeon_irq_ciu3_disable(struct irq_data *data)
+{
+	u64 isc_ctl_addr;
+	union cvmx_ciu3_iscx_w1c isc_w1c;
+
+	struct octeon_ciu_chip_data *cd;
+
+	cd = irq_data_get_irq_chip_data(data);
+
+	isc_w1c.u64 = 0;
+	isc_w1c.s.en = 1;
+
+	isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
+	cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
+	cvmx_write_csr(isc_ctl_addr, 0);
+	cvmx_read_csr(isc_ctl_addr);
+}
+
+void octeon_irq_ciu3_ack(struct irq_data *data)
+{
+	u64 isc_w1c_addr;
+	union cvmx_ciu3_iscx_w1c isc_w1c;
+	struct octeon_ciu_chip_data *cd;
+	u32 trigger_type = irqd_get_trigger_type(data);
+
+	/*
+	 * We use a single irq_chip, so we have to do nothing to ack a
+	 * level interrupt.
+	 */
+	if (!(trigger_type & IRQ_TYPE_EDGE_BOTH))
+		return;
+
+	cd = irq_data_get_irq_chip_data(data);
+
+	isc_w1c.u64 = 0;
+	isc_w1c.s.raw = 1;
+
+	isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
+	cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
+	cvmx_read_csr(isc_w1c_addr);
+}
+
+void octeon_irq_ciu3_mask(struct irq_data *data)
+{
+	union cvmx_ciu3_iscx_w1c isc_w1c;
+	u64 isc_w1c_addr;
+	struct octeon_ciu_chip_data *cd;
+
+	cd = irq_data_get_irq_chip_data(data);
+
+	isc_w1c.u64 = 0;
+	isc_w1c.s.en = 1;
+
+	isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
+	cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
+	cvmx_read_csr(isc_w1c_addr);
+}
+
+void octeon_irq_ciu3_mask_ack(struct irq_data *data)
+{
+	union cvmx_ciu3_iscx_w1c isc_w1c;
+	u64 isc_w1c_addr;
+	struct octeon_ciu_chip_data *cd;
+	u32 trigger_type = irqd_get_trigger_type(data);
+
+	cd = irq_data_get_irq_chip_data(data);
+
+	isc_w1c.u64 = 0;
+	isc_w1c.s.en = 1;
+
+	/*
+	 * We use a single irq_chip, so only ack an edge (!level)
+	 * interrupt.
+	 */
+	if (trigger_type & IRQ_TYPE_EDGE_BOTH)
+		isc_w1c.s.raw = 1;
+
+	isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
+	cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
+	cvmx_read_csr(isc_w1c_addr);
+}
+
+#ifdef CONFIG_SMP
+int octeon_irq_ciu3_set_affinity(struct irq_data *data,
+				 const struct cpumask *dest, bool force)
+{
+	union cvmx_ciu3_iscx_ctl isc_ctl;
+	union cvmx_ciu3_iscx_w1c isc_w1c;
+	u64 isc_ctl_addr;
+	int cpu;
+	bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
+	struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
+
+	if (!cpumask_subset(dest, cpumask_of_node(cd->ciu_node)))
+		return -EINVAL;
+
+	if (!enable_one)
+		return IRQ_SET_MASK_OK;
+
+	cd = irq_data_get_irq_chip_data(data);
+	cpu = cpumask_first(dest);
+	if (cpu >= nr_cpu_ids)
+		cpu = smp_processor_id();
+	cd->current_cpu = cpu;
+
+	isc_w1c.u64 = 0;
+	isc_w1c.s.en = 1;
+	cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
+
+	isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
+	isc_ctl.u64 = 0;
+	isc_ctl.s.en = 1;
+	isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
+	cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
+	cvmx_read_csr(isc_ctl_addr);
+
+	return IRQ_SET_MASK_OK;
+}
+#endif
+
+static struct irq_chip octeon_irq_chip_ciu3 = {
+	.name = "CIU3",
+	.irq_startup = edge_startup,
+	.irq_enable = octeon_irq_ciu3_enable,
+	.irq_disable = octeon_irq_ciu3_disable,
+	.irq_ack = octeon_irq_ciu3_ack,
+	.irq_mask = octeon_irq_ciu3_mask,
+	.irq_mask_ack = octeon_irq_ciu3_mask_ack,
+	.irq_unmask = octeon_irq_ciu3_enable,
+	.irq_set_type = octeon_irq_ciu_set_type,
+#ifdef CONFIG_SMP
+	.irq_set_affinity = octeon_irq_ciu3_set_affinity,
+	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+int octeon_irq_ciu3_mapx(struct irq_domain *d, unsigned int virq,
+			 irq_hw_number_t hw, struct irq_chip *chip)
+{
+	struct octeon_ciu3_info *ciu3_info = d->host_data;
+	struct octeon_ciu_chip_data *cd = kzalloc_node(sizeof(*cd), GFP_KERNEL,
+						       ciu3_info->node);
+	if (!cd)
+		return -ENOMEM;
+	cd->intsn = hw;
+	cd->current_cpu = -1;
+	cd->ciu3_addr = ciu3_info->ciu3_addr;
+	cd->ciu_node = ciu3_info->node;
+	irq_set_chip_and_handler(virq, chip, handle_edge_irq);
+	irq_set_chip_data(virq, cd);
+
+	return 0;
+}
+
+static int octeon_irq_ciu3_map(struct irq_domain *d,
+			       unsigned int virq, irq_hw_number_t hw)
+{
+	return octeon_irq_ciu3_mapx(d, virq, hw, &octeon_irq_chip_ciu3);
+}
+
+static struct irq_domain_ops octeon_dflt_domain_ciu3_ops = {
+	.map = octeon_irq_ciu3_map,
+	.unmap = octeon_irq_free_cd,
+	.xlate = octeon_irq_ciu3_xlat,
+};
+
+static void octeon_irq_ciu3_ip2(void)
+{
+	union cvmx_ciu3_destx_pp_int dest_pp_int;
+	struct octeon_ciu3_info *ciu3_info;
+	u64 ciu3_addr;
+
+	ciu3_info = __this_cpu_read(octeon_ciu3_info);
+	ciu3_addr = ciu3_info->ciu3_addr;
+
+	dest_pp_int.u64 = cvmx_read_csr(ciu3_addr + CIU3_DEST_PP_INT(3 * cvmx_get_local_core_num()));
+
+	if (likely(dest_pp_int.s.intr)) {
+		irq_hw_number_t intsn = dest_pp_int.s.intsn;
+		irq_hw_number_t hw;
+		struct irq_domain *domain;
+		/* Get the domain to use from the major block */
+		int block = intsn >> 12;
+		int ret;
+
+		domain = ciu3_info->domain[block];
+		if (ciu3_info->intsn2hw[block])
+			hw = ciu3_info->intsn2hw[block](domain, intsn);
+		else
+			hw = intsn;
+
+		ret = handle_domain_irq(domain, hw, NULL);
+		if (ret < 0) {
+			union cvmx_ciu3_iscx_w1c isc_w1c;
+			u64 isc_w1c_addr = ciu3_addr + CIU3_ISC_W1C(intsn);
+
+			isc_w1c.u64 = 0;
+			isc_w1c.s.en = 1;
+			cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
+			cvmx_read_csr(isc_w1c_addr);
+			spurious_interrupt();
+		}
+	} else {
+		spurious_interrupt();
+	}
+}
+
+/*
+ * 10 mbox per core starting from zero.
+ * Base mbox is core * 10
+ */
+static unsigned int octeon_irq_ciu3_base_mbox_intsn(int core)
+{
+	/* SW (mbox) are 0x04 in bits 12..19 */
+	return 0x04000 + CIU3_MBOX_PER_CORE * core;
+}
+
+static unsigned int octeon_irq_ciu3_mbox_intsn_for_core(int core, unsigned int mbox)
+{
+	return octeon_irq_ciu3_base_mbox_intsn(core) + mbox;
+}
+
+static unsigned int octeon_irq_ciu3_mbox_intsn_for_cpu(int cpu, unsigned int mbox)
+{
+	int local_core = octeon_coreid_for_cpu(cpu) & 0x3f;
+
+	return octeon_irq_ciu3_mbox_intsn_for_core(local_core, mbox);
+}
+
+static void octeon_irq_ciu3_mbox(void)
+{
+	union cvmx_ciu3_destx_pp_int dest_pp_int;
+	struct octeon_ciu3_info *ciu3_info;
+	u64 ciu3_addr;
+	int core = cvmx_get_local_core_num();
+
+	ciu3_info = __this_cpu_read(octeon_ciu3_info);
+	ciu3_addr = ciu3_info->ciu3_addr;
+
+	dest_pp_int.u64 = cvmx_read_csr(ciu3_addr + CIU3_DEST_PP_INT(1 + 3 * core));
+
+	if (likely(dest_pp_int.s.intr)) {
+		irq_hw_number_t intsn = dest_pp_int.s.intsn;
+		int mbox = intsn - octeon_irq_ciu3_base_mbox_intsn(core);
+
+		if (likely(mbox >= 0 && mbox < CIU3_MBOX_PER_CORE)) {
+			do_IRQ(mbox + OCTEON_IRQ_MBOX0);
+		} else {
+			union cvmx_ciu3_iscx_w1c isc_w1c;
+			u64 isc_w1c_addr = ciu3_addr + CIU3_ISC_W1C(intsn);
+
+			isc_w1c.u64 = 0;
+			isc_w1c.s.en = 1;
+			cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
+			cvmx_read_csr(isc_w1c_addr);
+			spurious_interrupt();
+		}
+	} else {
+		spurious_interrupt();
+	}
+}
+
+void octeon_ciu3_mbox_send(int cpu, unsigned int mbox)
+{
+	struct octeon_ciu3_info *ciu3_info;
+	unsigned int intsn;
+	union cvmx_ciu3_iscx_w1s isc_w1s;
+	u64 isc_w1s_addr;
+
+	if (WARN_ON_ONCE(mbox >= CIU3_MBOX_PER_CORE))
+		return;
+
+	intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
+	ciu3_info = per_cpu(octeon_ciu3_info, cpu);
+	isc_w1s_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1S(intsn);
+
+	isc_w1s.u64 = 0;
+	isc_w1s.s.raw = 1;
+
+	cvmx_write_csr(isc_w1s_addr, isc_w1s.u64);
+	cvmx_read_csr(isc_w1s_addr);
+}
+
+static void octeon_irq_ciu3_mbox_set_enable(struct irq_data *data, int cpu, bool en)
+{
+	struct octeon_ciu3_info *ciu3_info;
+	unsigned int intsn;
+	u64 isc_ctl_addr, isc_w1c_addr;
+	union cvmx_ciu3_iscx_ctl isc_ctl;
+	unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
+
+	intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
+	ciu3_info = per_cpu(octeon_ciu3_info, cpu);
+	isc_w1c_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1C(intsn);
+	isc_ctl_addr = ciu3_info->ciu3_addr + CIU3_ISC_CTL(intsn);
+
+	isc_ctl.u64 = 0;
+	isc_ctl.s.en = 1;
+
+	cvmx_write_csr(isc_w1c_addr, isc_ctl.u64);
+	cvmx_write_csr(isc_ctl_addr, 0);
+	if (en) {
+		unsigned int idt = per_cpu(octeon_irq_ciu3_idt_ip3, cpu);
+
+		isc_ctl.u64 = 0;
+		isc_ctl.s.en = 1;
+		isc_ctl.s.idt = idt;
+		cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
+	}
+	cvmx_read_csr(isc_ctl_addr);
+}
+
+static void octeon_irq_ciu3_mbox_enable(struct irq_data *data)
+{
+	int cpu;
+	unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
+
+	WARN_ON(mbox >= CIU3_MBOX_PER_CORE);
+
+	for_each_online_cpu(cpu)
+		octeon_irq_ciu3_mbox_set_enable(data, cpu, true);
+}
+
+static void octeon_irq_ciu3_mbox_disable(struct irq_data *data)
+{
+	int cpu;
+	unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
+
+	WARN_ON(mbox >= CIU3_MBOX_PER_CORE);
+
+	for_each_online_cpu(cpu)
+		octeon_irq_ciu3_mbox_set_enable(data, cpu, false);
+}
+
+static void octeon_irq_ciu3_mbox_ack(struct irq_data *data)
+{
+	struct octeon_ciu3_info *ciu3_info;
+	unsigned int intsn;
+	u64 isc_w1c_addr;
+	union cvmx_ciu3_iscx_w1c isc_w1c;
+	unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
+
+	intsn = octeon_irq_ciu3_mbox_intsn_for_core(cvmx_get_local_core_num(), mbox);
+
+	isc_w1c.u64 = 0;
+	isc_w1c.s.raw = 1;
+
+	ciu3_info = __this_cpu_read(octeon_ciu3_info);
+	isc_w1c_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1C(intsn);
+	cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
+	cvmx_read_csr(isc_w1c_addr);
+}
+
+static void octeon_irq_ciu3_mbox_cpu_online(struct irq_data *data)
+{
+	octeon_irq_ciu3_mbox_set_enable(data, smp_processor_id(), true);
+}
+
+static void octeon_irq_ciu3_mbox_cpu_offline(struct irq_data *data)
+{
+	octeon_irq_ciu3_mbox_set_enable(data, smp_processor_id(), false);
+}
+
+static int octeon_irq_ciu3_alloc_resources(struct octeon_ciu3_info *ciu3_info)
+{
+	u64 b = ciu3_info->ciu3_addr;
+	int idt_ip2, idt_ip3, idt_ip4;
+	int unused_idt2;
+	int core = cvmx_get_local_core_num();
+	int i;
+
+	__this_cpu_write(octeon_ciu3_info, ciu3_info);
+
+	/*
+	 * 4 idt per core starting from 1 because zero is reserved.
+	 * Base idt per core is 4 * core + 1
+	 */
+	idt_ip2 = core * 4 + 1;
+	idt_ip3 = core * 4 + 2;
+	idt_ip4 = core * 4 + 3;
+	unused_idt2 = core * 4 + 4;
+	__this_cpu_write(octeon_irq_ciu3_idt_ip2, idt_ip2);
+	__this_cpu_write(octeon_irq_ciu3_idt_ip3, idt_ip3);
+
+	/* ip2 interrupts for this CPU */
+	cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip2), 0);
+	cvmx_write_csr(b + CIU3_IDT_PP(idt_ip2, 0), 1ull << core);
+	cvmx_write_csr(b + CIU3_IDT_IO(idt_ip2), 0);
+
+	/* ip3 interrupts for this CPU */
+	cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip3), 1);
+	cvmx_write_csr(b + CIU3_IDT_PP(idt_ip3, 0), 1ull << core);
+	cvmx_write_csr(b + CIU3_IDT_IO(idt_ip3), 0);
+
+	/* ip4 interrupts for this CPU */
+	cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip4), 2);
+	cvmx_write_csr(b + CIU3_IDT_PP(idt_ip4, 0), 0);
+	cvmx_write_csr(b + CIU3_IDT_IO(idt_ip4), 0);
+
+	cvmx_write_csr(b + CIU3_IDT_CTL(unused_idt2), 0);
+	cvmx_write_csr(b + CIU3_IDT_PP(unused_idt2, 0), 0);
+	cvmx_write_csr(b + CIU3_IDT_IO(unused_idt2), 0);
+
+	for (i = 0; i < CIU3_MBOX_PER_CORE; i++) {
+		unsigned int intsn = octeon_irq_ciu3_mbox_intsn_for_core(core, i);
+
+		cvmx_write_csr(b + CIU3_ISC_W1C(intsn), 2);
+		cvmx_write_csr(b + CIU3_ISC_CTL(intsn), 0);
+	}
+
+	return 0;
+}
+
+static void octeon_irq_setup_secondary_ciu3(void)
+{
+	struct octeon_ciu3_info *ciu3_info;
+
+	ciu3_info = octeon_ciu3_info_per_node[cvmx_get_node_num()];
+	octeon_irq_ciu3_alloc_resources(ciu3_info);
+	irq_cpu_online();
+
+	/* Enable the CIU lines */
+	set_c0_status(STATUSF_IP3 | STATUSF_IP2);
+	if (octeon_irq_use_ip4)
+		set_c0_status(STATUSF_IP4);
+	else
+		clear_c0_status(STATUSF_IP4);
+}
+
+static struct irq_chip octeon_irq_chip_ciu3_mbox = {
+	.name = "CIU3-M",
+	.irq_enable = octeon_irq_ciu3_mbox_enable,
+	.irq_disable = octeon_irq_ciu3_mbox_disable,
+	.irq_ack = octeon_irq_ciu3_mbox_ack,
+
+	.irq_cpu_online = octeon_irq_ciu3_mbox_cpu_online,
+	.irq_cpu_offline = octeon_irq_ciu3_mbox_cpu_offline,
+	.flags = IRQCHIP_ONOFFLINE_ENABLED,
+};
+
+static int __init octeon_irq_init_ciu3(struct device_node *ciu_node,
+				       struct device_node *parent)
+{
+	int i;
+	int node;
+	struct irq_domain *domain;
+	struct octeon_ciu3_info *ciu3_info;
+	const __be32 *zero_addr;
+	u64 base_addr;
+	union cvmx_ciu3_const consts;
+
+	node = 0; /* of_node_to_nid(ciu_node); */
+	ciu3_info = kzalloc_node(sizeof(*ciu3_info), GFP_KERNEL, node);
+
+	if (!ciu3_info)
+		return -ENOMEM;
+
+	zero_addr = of_get_address(ciu_node, 0, NULL, NULL);
+	if (WARN_ON(!zero_addr))
+		return -EINVAL;
+
+	base_addr = of_translate_address(ciu_node, zero_addr);
+	base_addr = (u64)phys_to_virt(base_addr);
+
+	ciu3_info->ciu3_addr = base_addr;
+	ciu3_info->node = node;
+
+	consts.u64 = cvmx_read_csr(base_addr + CIU3_CONST);
+
+	octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu3;
+
+	octeon_irq_ip2 = octeon_irq_ciu3_ip2;
+	octeon_irq_ip3 = octeon_irq_ciu3_mbox;
+	octeon_irq_ip4 = octeon_irq_ip4_mask;
+
+	if (node == cvmx_get_node_num()) {
+		/* Mips internal */
+		octeon_irq_init_core();
+
+		/* Only do per CPU things if it is the CIU of the boot node. */
+		i = irq_alloc_descs_from(OCTEON_IRQ_MBOX0, 8, node);
+		WARN_ON(i < 0);
+
+		for (i = 0; i < 8; i++)
+			irq_set_chip_and_handler(i + OCTEON_IRQ_MBOX0,
+						 &octeon_irq_chip_ciu3_mbox, handle_percpu_irq);
+	}
+
+	/*
+	 * Initialize all domains to use the default domain. Specific major
+	 * blocks will overwrite the default domain as needed.
+	 */
+	domain = irq_domain_add_tree(ciu_node, &octeon_dflt_domain_ciu3_ops,
+				     ciu3_info);
+	for (i = 0; i < MAX_CIU3_DOMAINS; i++)
+		ciu3_info->domain[i] = domain;
+
+	octeon_ciu3_info_per_node[node] = ciu3_info;
+
+	if (node == cvmx_get_node_num()) {
+		/* Only do per CPU things if it is the CIU of the boot node. */
+		octeon_irq_ciu3_alloc_resources(ciu3_info);
+		if (node == 0)
+			irq_set_default_host(domain);
+
+		octeon_irq_use_ip4 = false;
+		/* Enable the CIU lines */
+		set_c0_status(STATUSF_IP2 | STATUSF_IP3);
+		clear_c0_status(STATUSF_IP4);
+	}
+
+	return 0;
+}
+
 static struct of_device_id ciu_types[] __initdata = {
 	{.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu},
 	{.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio},
 	{.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2},
+	{.compatible = "cavium,octeon-7890-ciu3", .data = octeon_irq_init_ciu3},
 	{.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib},
 	{}
 };
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index d113c8d..7aeafed 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -13,6 +13,7 @@
 #include <linux/i2c.h>
 #include <linux/usb.h>
 #include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
@@ -525,10 +526,17 @@
 
 static void __init octeon_fdt_set_mac_addr(int n, u64 *pmac)
 {
+	const u8 *old_mac;
+	int old_len;
 	u8 new_mac[6];
 	u64 mac = *pmac;
 	int r;
 
+	old_mac = fdt_getprop(initial_boot_params, n, "local-mac-address",
+			      &old_len);
+	if (!old_mac || old_len != 6 || is_valid_ether_addr(old_mac))
+		return;
+
 	new_mac[0] = (mac >> 40) & 0xff;
 	new_mac[1] = (mac >> 32) & 0xff;
 	new_mac[2] = (mac >> 24) & 0xff;
@@ -560,7 +568,7 @@
 	fdt_nop_node(initial_boot_params, node);
 }
 
-static void __init octeon_fdt_pip_port(int iface, int i, int p, int max, u64 *pmac)
+static void __init octeon_fdt_pip_port(int iface, int i, int p, int max)
 {
 	char name_buffer[20];
 	int eth;
@@ -583,10 +591,9 @@
 
 	phy_addr = cvmx_helper_board_get_mii_address(ipd_port);
 	octeon_fdt_set_phy(eth, phy_addr);
-	octeon_fdt_set_mac_addr(eth, pmac);
 }
 
-static void __init octeon_fdt_pip_iface(int pip, int idx, u64 *pmac)
+static void __init octeon_fdt_pip_iface(int pip, int idx)
 {
 	char name_buffer[20];
 	int iface;
@@ -602,7 +609,73 @@
 		count = cvmx_helper_ports_on_interface(idx);
 
 	for (p = 0; p < 16; p++)
-		octeon_fdt_pip_port(iface, idx, p, count - 1, pmac);
+		octeon_fdt_pip_port(iface, idx, p, count - 1);
+}
+
+void __init octeon_fill_mac_addresses(void)
+{
+	const char *alias_prop;
+	char name_buffer[20];
+	u64 mac_addr_base;
+	int aliases;
+	int pip;
+	int i;
+
+	aliases = fdt_path_offset(initial_boot_params, "/aliases");
+	if (aliases < 0)
+		return;
+
+	mac_addr_base =
+		((octeon_bootinfo->mac_addr_base[0] & 0xffull)) << 40 |
+		((octeon_bootinfo->mac_addr_base[1] & 0xffull)) << 32 |
+		((octeon_bootinfo->mac_addr_base[2] & 0xffull)) << 24 |
+		((octeon_bootinfo->mac_addr_base[3] & 0xffull)) << 16 |
+		((octeon_bootinfo->mac_addr_base[4] & 0xffull)) << 8 |
+		 (octeon_bootinfo->mac_addr_base[5] & 0xffull);
+
+	for (i = 0; i < 2; i++) {
+		int mgmt;
+
+		snprintf(name_buffer, sizeof(name_buffer), "mix%d", i);
+		alias_prop = fdt_getprop(initial_boot_params, aliases,
+					 name_buffer, NULL);
+		if (!alias_prop)
+			continue;
+		mgmt = fdt_path_offset(initial_boot_params, alias_prop);
+		if (mgmt < 0)
+			continue;
+		octeon_fdt_set_mac_addr(mgmt, &mac_addr_base);
+	}
+
+	alias_prop = fdt_getprop(initial_boot_params, aliases, "pip", NULL);
+	if (!alias_prop)
+		return;
+
+	pip = fdt_path_offset(initial_boot_params, alias_prop);
+	if (pip < 0)
+		return;
+
+	for (i = 0; i <= 4; i++) {
+		int iface;
+		int p;
+
+		snprintf(name_buffer, sizeof(name_buffer), "interface@%d", i);
+		iface = fdt_subnode_offset(initial_boot_params, pip,
+					   name_buffer);
+		if (iface < 0)
+			continue;
+		for (p = 0; p < 16; p++) {
+			int eth;
+
+			snprintf(name_buffer, sizeof(name_buffer),
+				 "ethernet@%x", p);
+			eth = fdt_subnode_offset(initial_boot_params, iface,
+						 name_buffer);
+			if (eth < 0)
+				continue;
+			octeon_fdt_set_mac_addr(eth, &mac_addr_base);
+		}
+	}
 }
 
 int __init octeon_prune_device_tree(void)
@@ -612,7 +685,6 @@
 	const char *alias_prop;
 	char name_buffer[20];
 	int aliases;
-	u64 mac_addr_base;
 
 	if (fdt_check_header(initial_boot_params))
 		panic("Corrupt Device Tree.");
@@ -623,15 +695,6 @@
 		return -EINVAL;
 	}
 
-
-	mac_addr_base =
-		((octeon_bootinfo->mac_addr_base[0] & 0xffull)) << 40 |
-		((octeon_bootinfo->mac_addr_base[1] & 0xffull)) << 32 |
-		((octeon_bootinfo->mac_addr_base[2] & 0xffull)) << 24 |
-		((octeon_bootinfo->mac_addr_base[3] & 0xffull)) << 16 |
-		((octeon_bootinfo->mac_addr_base[4] & 0xffull)) << 8 |
-		(octeon_bootinfo->mac_addr_base[5] & 0xffull);
-
 	if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN63XX))
 		max_port = 2;
 	else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN68XX))
@@ -660,7 +723,6 @@
 			} else {
 				int phy_addr = cvmx_helper_board_get_mii_address(CVMX_HELPER_BOARD_MGMT_IPD_PORT + i);
 				octeon_fdt_set_phy(mgmt, phy_addr);
-				octeon_fdt_set_mac_addr(mgmt, &mac_addr_base);
 			}
 		}
 	}
@@ -670,7 +732,7 @@
 		int pip = fdt_path_offset(initial_boot_params, pip_path);
 		if (pip	 >= 0)
 			for (i = 0; i <= 4; i++)
-				octeon_fdt_pip_iface(pip, i, &mac_addr_base);
+				octeon_fdt_pip_iface(pip, i);
 	}
 
 	/* I2C */
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index cd7101f..64f852b 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -43,8 +43,6 @@
 #include <asm/octeon/cvmx-mio-defs.h>
 #include <asm/octeon/cvmx-rst-defs.h>
 
-extern struct plat_smp_ops octeon_smp_ops;
-
 #ifdef CONFIG_PCI
 extern void pci_console_init(const char *arg);
 #endif
@@ -466,15 +464,25 @@
 
 static char __read_mostly octeon_system_type[80];
 
-static int __init init_octeon_system_type(void)
+static void __init init_octeon_system_type(void)
 {
-	snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)",
-		cvmx_board_type_to_string(octeon_bootinfo->board_type),
-		octeon_model_get_string(read_c0_prid()));
+	char const *board_type;
 
-	return 0;
+	board_type = cvmx_board_type_to_string(octeon_bootinfo->board_type);
+	if (board_type == NULL) {
+		struct device_node *root;
+		int ret;
+
+		root = of_find_node_by_path("/");
+		ret = of_property_read_string(root, "model", &board_type);
+		of_node_put(root);
+		if (ret)
+			board_type = "Unsupported Board";
+	}
+
+	snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)",
+		 board_type, octeon_model_get_string(read_c0_prid()));
 }
-early_initcall(init_octeon_system_type);
 
 /**
  * Return a string representing the system type
@@ -492,8 +500,6 @@
 void octeon_user_io_init(void)
 {
 	union octeon_cvmemctl cvmmemctl;
-	union cvmx_iob_fau_timeout fau_timeout;
-	union cvmx_pow_nw_tim nm_tim;
 
 	/* Get the current settings for CP0_CVMMEMCTL_REG */
 	cvmmemctl.u64 = read_c0_cvmmemctl();
@@ -595,17 +601,27 @@
 			  CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
 			  CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128);
 
-	/* Set a default for the hardware timeouts */
-	fau_timeout.u64 = 0;
-	fau_timeout.s.tout_val = 0xfff;
-	/* Disable tagwait FAU timeout */
-	fau_timeout.s.tout_enb = 0;
-	cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_timeout.u64);
+	if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+		union cvmx_iob_fau_timeout fau_timeout;
 
-	nm_tim.u64 = 0;
-	/* 4096 cycles */
-	nm_tim.s.nw_tim = 3;
-	cvmx_write_csr(CVMX_POW_NW_TIM, nm_tim.u64);
+		/* Set a default for the hardware timeouts */
+		fau_timeout.u64 = 0;
+		fau_timeout.s.tout_val = 0xfff;
+		/* Disable tagwait FAU timeout */
+		fau_timeout.s.tout_enb = 0;
+		cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_timeout.u64);
+	}
+
+	if ((!OCTEON_IS_MODEL(OCTEON_CN68XX) &&
+	     !OCTEON_IS_MODEL(OCTEON_CN7XXX)) ||
+	    OCTEON_IS_MODEL(OCTEON_CN70XX)) {
+		union cvmx_pow_nw_tim nm_tim;
+
+		nm_tim.u64 = 0;
+		/* 4096 cycles */
+		nm_tim.s.nw_tim = 3;
+		cvmx_write_csr(CVMX_POW_NW_TIM, nm_tim.u64);
+	}
 
 	write_octeon_c0_icacheerr(0);
 	write_c0_derraddr1(0);
@@ -637,9 +653,22 @@
 	sysinfo = cvmx_sysinfo_get();
 	memset(sysinfo, 0, sizeof(*sysinfo));
 	sysinfo->system_dram_size = octeon_bootinfo->dram_size << 20;
-	sysinfo->phy_mem_desc_ptr =
-		cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr);
-	sysinfo->core_mask = octeon_bootinfo->core_mask;
+	sysinfo->phy_mem_desc_addr = (u64)phys_to_virt(octeon_bootinfo->phy_mem_desc_addr);
+
+	if ((octeon_bootinfo->major_version > 1) ||
+	    (octeon_bootinfo->major_version == 1 &&
+	     octeon_bootinfo->minor_version >= 4))
+		cvmx_coremask_copy(&sysinfo->core_mask,
+				   &octeon_bootinfo->ext_core_mask);
+	else
+		cvmx_coremask_set64(&sysinfo->core_mask,
+				    octeon_bootinfo->core_mask);
+
+	/* Some broken u-boot pass garbage in upper bits, clear them out */
+	if (!OCTEON_IS_MODEL(OCTEON_CN78XX))
+		for (i = 512; i < 1024; i++)
+			cvmx_coremask_clear_core(&sysinfo->core_mask, i);
+
 	sysinfo->exception_base_addr = octeon_bootinfo->exception_base_addr;
 	sysinfo->cpu_clock_hz = octeon_bootinfo->eclock_hz;
 	sysinfo->dram_data_rate_hz = octeon_bootinfo->dclock_hz * 2;
@@ -867,7 +896,7 @@
 #endif
 
 	octeon_user_io_init();
-	register_smp_ops(&octeon_smp_ops);
+	octeon_setup_smp();
 }
 
 /* Exclude a single page from the regions obtained in plat_mem_setup. */
@@ -1079,6 +1108,7 @@
 	}
 }
 
+void __init octeon_fill_mac_addresses(void);
 int octeon_prune_device_tree(void);
 
 extern const char __appended_dtb;
@@ -1088,11 +1118,13 @@
 {
 	const void *fdt;
 	bool do_prune;
+	bool fill_mac;
 
 #ifdef CONFIG_MIPS_ELF_APPENDED_DTB
 	if (!fdt_check_header(&__appended_dtb)) {
 		fdt = &__appended_dtb;
 		do_prune = false;
+		fill_mac = true;
 		pr_info("Using appended Device Tree.\n");
 	} else
 #endif
@@ -1101,13 +1133,16 @@
 		if (fdt_check_header(fdt))
 			panic("Corrupt Device Tree passed to kernel.");
 		do_prune = false;
+		fill_mac = false;
 		pr_info("Using passed Device Tree.\n");
 	} else if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
 		fdt = &__dtb_octeon_68xx_begin;
 		do_prune = true;
+		fill_mac = true;
 	} else {
 		fdt = &__dtb_octeon_3xxx_begin;
 		do_prune = true;
+		fill_mac = true;
 	}
 
 	initial_boot_params = (void *)fdt;
@@ -1116,7 +1151,10 @@
 		octeon_prune_device_tree();
 		pr_info("Using internal Device Tree.\n");
 	}
+	if (fill_mac)
+		octeon_fill_mac_addresses();
 	unflatten_and_copy_device_tree();
+	init_octeon_system_type();
 }
 
 static int __initdata disable_octeon_edac_p;
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index b7fa9ae..dff88aa 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -30,25 +30,55 @@
 EXPORT_SYMBOL(octeon_bootloader_entry_addr);
 #endif
 
+static void octeon_icache_flush(void)
+{
+	asm volatile ("synci 0($0)\n");
+}
+
+static void (*octeon_message_functions[8])(void) = {
+	scheduler_ipi,
+	generic_smp_call_function_interrupt,
+	octeon_icache_flush,
+};
+
 static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
 {
-	const int coreid = cvmx_get_core_num();
-	uint64_t action;
+	u64 mbox_clrx = CVMX_CIU_MBOX_CLRX(cvmx_get_core_num());
+	u64 action;
+	int i;
 
-	/* Load the mailbox register to figure out what we're supposed to do */
-	action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)) & 0xffff;
+	/*
+	 * Make sure the function array initialization remains
+	 * correct.
+	 */
+	BUILD_BUG_ON(SMP_RESCHEDULE_YOURSELF != (1 << 0));
+	BUILD_BUG_ON(SMP_CALL_FUNCTION       != (1 << 1));
+	BUILD_BUG_ON(SMP_ICACHE_FLUSH        != (1 << 2));
+
+	/*
+	 * Load the mailbox register to figure out what we're supposed
+	 * to do.
+	 */
+	action = cvmx_read_csr(mbox_clrx);
+
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		action &= 0xff;
+	else
+		action &= 0xffff;
 
 	/* Clear the mailbox to clear the interrupt */
-	cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
+	cvmx_write_csr(mbox_clrx, action);
 
-	if (action & SMP_CALL_FUNCTION)
-		generic_smp_call_function_interrupt();
-	if (action & SMP_RESCHEDULE_YOURSELF)
-		scheduler_ipi();
+	for (i = 0; i < ARRAY_SIZE(octeon_message_functions) && action;) {
+		if (action & 1) {
+			void (*fn)(void) = octeon_message_functions[i];
 
-	/* Check if we've been told to flush the icache */
-	if (action & SMP_ICACHE_FLUSH)
-		asm volatile ("synci 0($0)\n");
+			if (fn)
+				fn();
+		}
+		action >>= 1;
+		i++;
+	}
 	return IRQ_HANDLED;
 }
 
@@ -97,13 +127,15 @@
 #endif
 }
 
-static void octeon_smp_setup(void)
+static void __init octeon_smp_setup(void)
 {
 	const int coreid = cvmx_get_core_num();
 	int cpus;
 	int id;
-	int core_mask = octeon_get_boot_coremask();
+	struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get();
+
 #ifdef CONFIG_HOTPLUG_CPU
+	int core_mask = octeon_get_boot_coremask();
 	unsigned int num_cores = cvmx_octeon_num_cores();
 #endif
 
@@ -119,7 +151,7 @@
 	/* The present CPUs get the lowest CPU numbers. */
 	cpus = 1;
 	for (id = 0; id < NR_CPUS; id++) {
-		if ((id != coreid) && (core_mask & (1 << id))) {
+		if ((id != coreid) && cvmx_coremask_is_core_set(&sysinfo->core_mask, id)) {
 			set_cpu_possible(cpus, true);
 			set_cpu_present(cpus, true);
 			__cpu_number_map[id] = cpus;
@@ -196,7 +228,7 @@
  * Callout to firmware before smp_init
  *
  */
-void octeon_prepare_cpus(unsigned int max_cpus)
+static void __init octeon_prepare_cpus(unsigned int max_cpus)
 {
 	/*
 	 * Only the low order mailbox bits are used for IPIs, leave
@@ -242,7 +274,7 @@
 	cpumask_clear_cpu(cpu, &cpu_callin_map);
 	octeon_fixup_irqs();
 
-	flush_cache_all();
+	__flush_cache_all();
 	local_flush_tlb_all();
 
 	return 0;
@@ -331,7 +363,7 @@
 	}
 
 	if (!(avail_coremask & (1 << coreid))) {
-		/* core not available, assume, that catched by simple-executive */
+		/* core not available, assume, that caught by simple-executive */
 		cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
 		cvmx_write_csr(CVMX_CIU_PP_RST, 0);
 	}
@@ -388,3 +420,92 @@
 	.cpu_die		= octeon_cpu_die,
 #endif
 };
+
+static irqreturn_t octeon_78xx_reched_interrupt(int irq, void *dev_id)
+{
+	scheduler_ipi();
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t octeon_78xx_call_function_interrupt(int irq, void *dev_id)
+{
+	generic_smp_call_function_interrupt();
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t octeon_78xx_icache_flush_interrupt(int irq, void *dev_id)
+{
+	octeon_icache_flush();
+	return IRQ_HANDLED;
+}
+
+/*
+ * Callout to firmware before smp_init
+ */
+static void octeon_78xx_prepare_cpus(unsigned int max_cpus)
+{
+	if (request_irq(OCTEON_IRQ_MBOX0 + 0,
+			octeon_78xx_reched_interrupt,
+			IRQF_PERCPU | IRQF_NO_THREAD, "Scheduler",
+			octeon_78xx_reched_interrupt)) {
+		panic("Cannot request_irq for SchedulerIPI");
+	}
+	if (request_irq(OCTEON_IRQ_MBOX0 + 1,
+			octeon_78xx_call_function_interrupt,
+			IRQF_PERCPU | IRQF_NO_THREAD, "SMP-Call",
+			octeon_78xx_call_function_interrupt)) {
+		panic("Cannot request_irq for SMP-Call");
+	}
+	if (request_irq(OCTEON_IRQ_MBOX0 + 2,
+			octeon_78xx_icache_flush_interrupt,
+			IRQF_PERCPU | IRQF_NO_THREAD, "ICache-Flush",
+			octeon_78xx_icache_flush_interrupt)) {
+		panic("Cannot request_irq for ICache-Flush");
+	}
+}
+
+static void octeon_78xx_send_ipi_single(int cpu, unsigned int action)
+{
+	int i;
+
+	for (i = 0; i < 8; i++) {
+		if (action & 1)
+			octeon_ciu3_mbox_send(cpu, i);
+		action >>= 1;
+	}
+}
+
+static void octeon_78xx_send_ipi_mask(const struct cpumask *mask,
+				      unsigned int action)
+{
+	unsigned int cpu;
+
+	for_each_cpu(cpu, mask)
+		octeon_78xx_send_ipi_single(cpu, action);
+}
+
+static struct plat_smp_ops octeon_78xx_smp_ops = {
+	.send_ipi_single	= octeon_78xx_send_ipi_single,
+	.send_ipi_mask		= octeon_78xx_send_ipi_mask,
+	.init_secondary		= octeon_init_secondary,
+	.smp_finish		= octeon_smp_finish,
+	.boot_secondary		= octeon_boot_secondary,
+	.smp_setup		= octeon_smp_setup,
+	.prepare_cpus		= octeon_78xx_prepare_cpus,
+#ifdef CONFIG_HOTPLUG_CPU
+	.cpu_disable		= octeon_cpu_disable,
+	.cpu_die		= octeon_cpu_die,
+#endif
+};
+
+void __init octeon_setup_smp(void)
+{
+	struct plat_smp_ops *ops;
+
+	if (octeon_has_feature(OCTEON_FEATURE_CIU3))
+		ops = &octeon_78xx_smp_ops;
+	else
+		ops = &octeon_smp_ops;
+
+	register_smp_ops(ops);
+}
diff --git a/arch/mips/configs/bcm47xx_defconfig b/arch/mips/configs/bcm47xx_defconfig
index 0db4eb3..fad8e96 100644
--- a/arch/mips/configs/bcm47xx_defconfig
+++ b/arch/mips/configs/bcm47xx_defconfig
@@ -23,7 +23,6 @@
 CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
 CONFIG_SYN_COOKIES=y
 CONFIG_TCP_CONG_ADVANCED=y
-CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
 CONFIG_IPV6_MROUTE=y
diff --git a/arch/mips/configs/bcm63xx_defconfig b/arch/mips/configs/bcm63xx_defconfig
index 3fec264..5599a9f 100644
--- a/arch/mips/configs/bcm63xx_defconfig
+++ b/arch/mips/configs/bcm63xx_defconfig
@@ -44,6 +44,7 @@
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 CONFIG_MTD=y
+CONFIG_MTD_BCM63XX_PARTS=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_INTELEXT=y
 CONFIG_MTD_CFI_AMDSTD=y
diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig
index e070dac..d20b09d 100644
--- a/arch/mips/configs/bigsur_defconfig
+++ b/arch/mips/configs/bigsur_defconfig
@@ -62,7 +62,6 @@
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 # CONFIG_INET_LRO is not set
 CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_IPV6_OPTIMISTIC_DAD=y
diff --git a/arch/mips/configs/bmips_be_defconfig b/arch/mips/configs/bmips_be_defconfig
index 24dcb90..acf7785 100644
--- a/arch/mips/configs/bmips_be_defconfig
+++ b/arch/mips/configs/bmips_be_defconfig
@@ -36,6 +36,7 @@
 CONFIG_PRINTK_TIME=y
 CONFIG_BRCMSTB_GISB_ARB=y
 CONFIG_MTD=y
+CONFIG_MTD_BCM63XX_PARTS=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_INTELEXT=y
 CONFIG_MTD_CFI_AMDSTD=y
diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig
index e57058d..dcac308 100644
--- a/arch/mips/configs/cavium_octeon_defconfig
+++ b/arch/mips/configs/cavium_octeon_defconfig
@@ -119,14 +119,16 @@
 CONFIG_SPI_OCTEON=y
 # CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
-# CONFIG_USB_SUPPORT is not set
-CONFIG_USB_EHCI_BIG_ENDIAN_MMIO=y
-CONFIG_USB_OHCI_BIG_ENDIAN_MMIO=y
-CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+CONFIG_USB=m
+CONFIG_USB_EHCI_HCD=m
+CONFIG_USB_EHCI_HCD_PLATFORM=m
+CONFIG_USB_OHCI_HCD=m
+CONFIG_USB_OHCI_HCD_PLATFORM=m
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_DS1307=y
 CONFIG_STAGING=y
 CONFIG_OCTEON_ETHERNET=y
+CONFIG_OCTEON_USB=m
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
@@ -152,6 +154,9 @@
 CONFIG_SECURITY_NETWORK=y
 CONFIG_CRYPTO_CBC=y
 CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MD5_OCTEON=y
+CONFIG_CRYPTO_SHA1_OCTEON=m
+CONFIG_CRYPTO_SHA256_OCTEON=m
+CONFIG_CRYPTO_SHA512_OCTEON=m
 CONFIG_CRYPTO_DES=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/mips/configs/ci20_defconfig b/arch/mips/configs/ci20_defconfig
index 4e36b6e..43e0ba2 100644
--- a/arch/mips/configs/ci20_defconfig
+++ b/arch/mips/configs/ci20_defconfig
@@ -17,13 +17,12 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_CGROUPS=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
 CONFIG_MEMCG=y
-CONFIG_MEMCG_KMEM=y
 CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
 CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -52,6 +51,11 @@
 # CONFIG_ALLOW_DEV_COREDUMP is not set
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=32
+CONFIG_MTD=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_JZ4780=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_FASTMAP=y
 CONFIG_NETDEVICES=y
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_CADENCE is not set
@@ -103,7 +107,7 @@
 # CONFIG_PROC_PAGE_MONITOR is not set
 CONFIG_TMPFS=y
 CONFIG_CONFIGFS_FS=y
-# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_UBIFS_FS=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_NLS=y
 CONFIG_NLS_CODEPAGE_437=y
diff --git a/arch/mips/configs/decstation_defconfig b/arch/mips/configs/decstation_defconfig
index ebc011c..2b6cb41 100644
--- a/arch/mips/configs/decstation_defconfig
+++ b/arch/mips/configs/decstation_defconfig
@@ -30,7 +30,6 @@
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
 CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
index 6ba9ce9..5d83ff7 100644
--- a/arch/mips/configs/ip22_defconfig
+++ b/arch/mips/configs/ip22_defconfig
@@ -48,7 +48,6 @@
 CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_IPV6_OPTIMISTIC_DAD=y
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index 77e9f50..2b74aee 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -43,7 +43,6 @@
 CONFIG_INET_XFRM_MODE_BEET=m
 CONFIG_TCP_MD5SIG=y
 CONFIG_IPV6=y
-CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_IPV6_OPTIMISTIC_DAD=y
diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig
index a5e85e1..3019fce 100644
--- a/arch/mips/configs/jazz_defconfig
+++ b/arch/mips/configs/jazz_defconfig
@@ -34,7 +34,6 @@
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
index d1f198b..5da76e0 100644
--- a/arch/mips/configs/lemote2f_defconfig
+++ b/arch/mips/configs/lemote2f_defconfig
@@ -71,7 +71,6 @@
 CONFIG_TCP_CONG_BIC=y
 CONFIG_DEFAULT_BIC=y
 CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_TUNNEL=m
 CONFIG_IPV6_MULTIPLE_TABLES=y
diff --git a/arch/mips/configs/loongson1b_defconfig b/arch/mips/configs/loongson1b_defconfig
new file mode 100644
index 0000000..c442f27
--- /dev/null
+++ b/arch/mips/configs/loongson1b_defconfig
@@ -0,0 +1,125 @@
+CONFIG_MACH_LOONGSON32=y
+CONFIG_PREEMPT=y
+# CONFIG_SECCOMP is not set
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_KERNEL_XZ=y
+CONFIG_SYSVIPC=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_NAMESPACES=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_SUSPEND is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_LOONGSON1=y
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_SCSI=m
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=m
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_LEGACY_PTY_COUNT=8
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_LOONGSON1=y
+# CONFIG_HWMON is not set
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_HID_GENERIC=m
+CONFIG_USB_HID=m
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=m
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_LOONGSON1=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+CONFIG_UBIFS_ATIME_SUPPORT=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_DYNAMIC_DEBUG=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
+# CONFIG_EARLY_PRINTK is not set
+# CONFIG_CRYPTO_ECHAINIV is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/ls1b_defconfig b/arch/mips/configs/ls1b_defconfig
deleted file mode 100644
index 1b2cc1f..0000000
--- a/arch/mips/configs/ls1b_defconfig
+++ /dev/null
@@ -1,110 +0,0 @@
-CONFIG_MACH_LOONGSON32=y
-CONFIG_PREEMPT=y
-# CONFIG_SECCOMP is not set
-CONFIG_EXPERIMENTAL=y
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=16
-CONFIG_NAMESPACES=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_RD_BZIP2=y
-CONFIG_RD_LZMA=y
-CONFIG_EXPERT=y
-CONFIG_PERF_EVENTS=y
-# CONFIG_COMPAT_BRK is not set
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_LBDAF is not set
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-# CONFIG_SUSPEND is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_STANDALONE is not set
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_SCSI=m
-# CONFIG_SCSI_PROC_FS is not set
-CONFIG_BLK_DEV_SD=m
-# CONFIG_SCSI_LOWLEVEL is not set
-CONFIG_NETDEVICES=y
-# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_CHELSIO is not set
-# CONFIG_NET_VENDOR_INTEL is not set
-# CONFIG_NET_VENDOR_MARVELL is not set
-# CONFIG_NET_VENDOR_MICREL is not set
-# CONFIG_NET_VENDOR_NATSEMI is not set
-# CONFIG_NET_VENDOR_SEEQ is not set
-# CONFIG_NET_VENDOR_SMSC is not set
-CONFIG_STMMAC_ETH=y
-CONFIG_STMMAC_DA=y
-# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_WLAN is not set
-CONFIG_INPUT_EVDEV=y
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_VT_HW_CONSOLE_BINDING=y
-CONFIG_LEGACY_PTY_COUNT=8
-# CONFIG_DEVKMEM is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_USB_HID=m
-CONFIG_HID_GENERIC=m
-CONFIG_USB=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_HCD_PLATFORM=y
-# CONFIG_USB_EHCI_TT_NEWSCHED is not set
-CONFIG_USB_STORAGE=m
-CONFIG_USB_SERIAL=m
-CONFIG_USB_SERIAL_PL2303=m
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_LOONGSON1=y
-# CONFIG_IOMMU_SUPPORT is not set
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
-# CONFIG_DNOTIFY is not set
-CONFIG_VFAT_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-# CONFIG_MISC_FILESYSTEMS is not set
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_ISO8859_1=m
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_MAGIC_SYSRQ=y
-# CONFIG_SCHED_DEBUG is not set
-# CONFIG_DEBUG_PREEMPT is not set
-# CONFIG_FTRACE is not set
-# CONFIG_EARLY_PRINTK is not set
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index 9b6926d..f3f6005 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -51,7 +51,6 @@
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
-CONFIG_IPV6_PRIVACY=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
diff --git a/arch/mips/configs/nlm_xlp_defconfig b/arch/mips/configs/nlm_xlp_defconfig
index b3d1d37..b496c25 100644
--- a/arch/mips/configs/nlm_xlp_defconfig
+++ b/arch/mips/configs/nlm_xlp_defconfig
@@ -95,7 +95,6 @@
 CONFIG_TCP_CONG_ILLINOIS=m
 CONFIG_TCP_MD5SIG=y
 CONFIG_IPV6=y
-CONFIG_IPV6_PRIVACY=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig
index 3d8016d..8e99ad8 100644
--- a/arch/mips/configs/nlm_xlr_defconfig
+++ b/arch/mips/configs/nlm_xlr_defconfig
@@ -75,7 +75,6 @@
 CONFIG_TCP_CONG_ILLINOIS=m
 CONFIG_TCP_MD5SIG=y
 CONFIG_IPV6=y
-CONFIG_IPV6_PRIVACY=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
index 82db4e3..c2b4e3f 100644
--- a/arch/mips/configs/rm200_defconfig
+++ b/arch/mips/configs/rm200_defconfig
@@ -37,7 +37,6 @@
 CONFIG_INET_XFRM_MODE_TUNNEL=m
 CONFIG_INET_XFRM_MODE_BEET=m
 CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_INET6_AH=m
diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S
index 8c6f508..d7b9918 100644
--- a/arch/mips/dec/int-handler.S
+++ b/arch/mips/dec/int-handler.S
@@ -5,7 +5,7 @@
  * Written by Ralf Baechle and Andreas Busse, modified for DECstation
  * support by Paul Antoine and Harald Koerfgen.
  *
- * completly rewritten:
+ * completely rewritten:
  * Copyright (C) 1998 Harald Koerfgen
  *
  * Rewritten extensively for controller-driven IRQ support
diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c
index a0b8943..1c3bf9f 100644
--- a/arch/mips/dec/setup.c
+++ b/arch/mips/dec/setup.c
@@ -60,6 +60,7 @@
 int dec_tc_bus;
 
 DEFINE_SPINLOCK(ioasic_ssr_lock);
+EXPORT_SYMBOL(ioasic_ssr_lock);
 
 volatile u32 *ioasic_base;
 
diff --git a/arch/mips/fw/arc/memory.c b/arch/mips/fw/arc/memory.c
index 5537b94..0d75b5a 100644
--- a/arch/mips/fw/arc/memory.c
+++ b/arch/mips/fw/arc/memory.c
@@ -9,7 +9,7 @@
  * PROM library functions for acquiring/using memory descriptors given to us
  * from the ARCS firmware.  This is only used when CONFIG_ARC_MEMORY is set
  * because on some machines like SGI IP27 the ARC memory configuration data
- * completly bogus and alternate easier to use mechanisms are available.
+ * completely bogus and alternate easier to use mechanisms are available.
  */
 #include <linux/init.h>
 #include <linux/kernel.h>
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index c7fe4d0..9740066 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -1,5 +1,6 @@
 # MIPS headers
 generic-(CONFIG_GENERIC_CSUM) += checksum.h
+generic-y += clkdev.h
 generic-y += cputime.h
 generic-y += current.h
 generic-y += dma-contiguous.h
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 867f924..6741673 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -235,6 +235,7 @@
 	.macro	ld_b	wd, off, base
 	.set	push
 	.set	mips32r2
+	.set	fp=64
 	.set	msa
 	ld.b	$w\wd, \off(\base)
 	.set	pop
@@ -243,6 +244,7 @@
 	.macro	ld_h	wd, off, base
 	.set	push
 	.set	mips32r2
+	.set	fp=64
 	.set	msa
 	ld.h	$w\wd, \off(\base)
 	.set	pop
@@ -251,6 +253,7 @@
 	.macro	ld_w	wd, off, base
 	.set	push
 	.set	mips32r2
+	.set	fp=64
 	.set	msa
 	ld.w	$w\wd, \off(\base)
 	.set	pop
@@ -268,6 +271,7 @@
 	.macro	st_b	wd, off, base
 	.set	push
 	.set	mips32r2
+	.set	fp=64
 	.set	msa
 	st.b	$w\wd, \off(\base)
 	.set	pop
@@ -276,6 +280,7 @@
 	.macro	st_h	wd, off, base
 	.set	push
 	.set	mips32r2
+	.set	fp=64
 	.set	msa
 	st.h	$w\wd, \off(\base)
 	.set	pop
@@ -284,6 +289,7 @@
 	.macro	st_w	wd, off, base
 	.set	push
 	.set	mips32r2
+	.set	fp=64
 	.set	msa
 	st.w	$w\wd, \off(\base)
 	.set	pop
@@ -298,21 +304,21 @@
 	.set	pop
 	.endm
 
-	.macro	copy_u_w	ws, n
+	.macro	copy_s_w	ws, n
 	.set	push
 	.set	mips32r2
 	.set	fp=64
 	.set	msa
-	copy_u.w $1, $w\ws[\n]
+	copy_s.w $1, $w\ws[\n]
 	.set	pop
 	.endm
 
-	.macro	copy_u_d	ws, n
+	.macro	copy_s_d	ws, n
 	.set	push
 	.set	mips64r2
 	.set	fp=64
 	.set	msa
-	copy_u.d $1, $w\ws[\n]
+	copy_s.d $1, $w\ws[\n]
 	.set	pop
 	.endm
 
@@ -346,8 +352,8 @@
 #define STH_MSA_INSN		0x5800081f
 #define STW_MSA_INSN		0x5800082f
 #define STD_MSA_INSN		0x5800083f
-#define COPY_UW_MSA_INSN	0x58f00056
-#define COPY_UD_MSA_INSN	0x58f80056
+#define COPY_SW_MSA_INSN	0x58b00056
+#define COPY_SD_MSA_INSN	0x58b80056
 #define INSERT_W_MSA_INSN	0x59300816
 #define INSERT_D_MSA_INSN	0x59380816
 #else
@@ -361,8 +367,8 @@
 #define STH_MSA_INSN		0x78000825
 #define STW_MSA_INSN		0x78000826
 #define STD_MSA_INSN		0x78000827
-#define COPY_UW_MSA_INSN	0x78f00059
-#define COPY_UD_MSA_INSN	0x78f80059
+#define COPY_SW_MSA_INSN	0x78b00059
+#define COPY_SD_MSA_INSN	0x78b80059
 #define INSERT_W_MSA_INSN	0x79300819
 #define INSERT_D_MSA_INSN	0x79380819
 #endif
@@ -393,7 +399,7 @@
 	.set	push
 	.set	noat
 	SET_HARDFLOAT
-	addu	$1, \base, \off
+	PTR_ADDU $1, \base, \off
 	.word	LDB_MSA_INSN | (\wd << 6)
 	.set	pop
 	.endm
@@ -402,7 +408,7 @@
 	.set	push
 	.set	noat
 	SET_HARDFLOAT
-	addu	$1, \base, \off
+	PTR_ADDU $1, \base, \off
 	.word	LDH_MSA_INSN | (\wd << 6)
 	.set	pop
 	.endm
@@ -411,7 +417,7 @@
 	.set	push
 	.set	noat
 	SET_HARDFLOAT
-	addu	$1, \base, \off
+	PTR_ADDU $1, \base, \off
 	.word	LDW_MSA_INSN | (\wd << 6)
 	.set	pop
 	.endm
@@ -420,7 +426,7 @@
 	.set	push
 	.set	noat
 	SET_HARDFLOAT
-	addu	$1, \base, \off
+	PTR_ADDU $1, \base, \off
 	.word	LDD_MSA_INSN | (\wd << 6)
 	.set	pop
 	.endm
@@ -429,7 +435,7 @@
 	.set	push
 	.set	noat
 	SET_HARDFLOAT
-	addu	$1, \base, \off
+	PTR_ADDU $1, \base, \off
 	.word	STB_MSA_INSN | (\wd << 6)
 	.set	pop
 	.endm
@@ -438,7 +444,7 @@
 	.set	push
 	.set	noat
 	SET_HARDFLOAT
-	addu	$1, \base, \off
+	PTR_ADDU $1, \base, \off
 	.word	STH_MSA_INSN | (\wd << 6)
 	.set	pop
 	.endm
@@ -447,7 +453,7 @@
 	.set	push
 	.set	noat
 	SET_HARDFLOAT
-	addu	$1, \base, \off
+	PTR_ADDU $1, \base, \off
 	.word	STW_MSA_INSN | (\wd << 6)
 	.set	pop
 	.endm
@@ -456,26 +462,26 @@
 	.set	push
 	.set	noat
 	SET_HARDFLOAT
-	addu	$1, \base, \off
+	PTR_ADDU $1, \base, \off
 	.word	STD_MSA_INSN | (\wd << 6)
 	.set	pop
 	.endm
 
-	.macro	copy_u_w	ws, n
+	.macro	copy_s_w	ws, n
 	.set	push
 	.set	noat
 	SET_HARDFLOAT
 	.insn
-	.word	COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11)
+	.word	COPY_SW_MSA_INSN | (\n << 16) | (\ws << 11)
 	.set	pop
 	.endm
 
-	.macro	copy_u_d	ws, n
+	.macro	copy_s_d	ws, n
 	.set	push
 	.set	noat
 	SET_HARDFLOAT
 	.insn
-	.word	COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11)
+	.word	COPY_SD_MSA_INSN | (\n << 16) | (\ws << 11)
 	.set	pop
 	.endm
 
@@ -496,41 +502,52 @@
 	.endm
 #endif
 
+#ifdef TOOLCHAIN_SUPPORTS_MSA
+#define FPR_BASE_OFFS	THREAD_FPR0
+#define FPR_BASE	$1
+#else
+#define FPR_BASE_OFFS	0
+#define FPR_BASE	\thread
+#endif
+
 	.macro	msa_save_all	thread
-	st_d	0, THREAD_FPR0, \thread
-	st_d	1, THREAD_FPR1, \thread
-	st_d	2, THREAD_FPR2, \thread
-	st_d	3, THREAD_FPR3, \thread
-	st_d	4, THREAD_FPR4, \thread
-	st_d	5, THREAD_FPR5, \thread
-	st_d	6, THREAD_FPR6, \thread
-	st_d	7, THREAD_FPR7, \thread
-	st_d	8, THREAD_FPR8, \thread
-	st_d	9, THREAD_FPR9, \thread
-	st_d	10, THREAD_FPR10, \thread
-	st_d	11, THREAD_FPR11, \thread
-	st_d	12, THREAD_FPR12, \thread
-	st_d	13, THREAD_FPR13, \thread
-	st_d	14, THREAD_FPR14, \thread
-	st_d	15, THREAD_FPR15, \thread
-	st_d	16, THREAD_FPR16, \thread
-	st_d	17, THREAD_FPR17, \thread
-	st_d	18, THREAD_FPR18, \thread
-	st_d	19, THREAD_FPR19, \thread
-	st_d	20, THREAD_FPR20, \thread
-	st_d	21, THREAD_FPR21, \thread
-	st_d	22, THREAD_FPR22, \thread
-	st_d	23, THREAD_FPR23, \thread
-	st_d	24, THREAD_FPR24, \thread
-	st_d	25, THREAD_FPR25, \thread
-	st_d	26, THREAD_FPR26, \thread
-	st_d	27, THREAD_FPR27, \thread
-	st_d	28, THREAD_FPR28, \thread
-	st_d	29, THREAD_FPR29, \thread
-	st_d	30, THREAD_FPR30, \thread
-	st_d	31, THREAD_FPR31, \thread
 	.set	push
 	.set	noat
+#ifdef TOOLCHAIN_SUPPORTS_MSA
+	PTR_ADDU FPR_BASE, \thread, FPR_BASE_OFFS
+#endif
+	st_d	 0, THREAD_FPR0  - FPR_BASE_OFFS, FPR_BASE
+	st_d	 1, THREAD_FPR1  - FPR_BASE_OFFS, FPR_BASE
+	st_d	 2, THREAD_FPR2  - FPR_BASE_OFFS, FPR_BASE
+	st_d	 3, THREAD_FPR3  - FPR_BASE_OFFS, FPR_BASE
+	st_d	 4, THREAD_FPR4  - FPR_BASE_OFFS, FPR_BASE
+	st_d	 5, THREAD_FPR5  - FPR_BASE_OFFS, FPR_BASE
+	st_d	 6, THREAD_FPR6  - FPR_BASE_OFFS, FPR_BASE
+	st_d	 7, THREAD_FPR7  - FPR_BASE_OFFS, FPR_BASE
+	st_d	 8, THREAD_FPR8  - FPR_BASE_OFFS, FPR_BASE
+	st_d	 9, THREAD_FPR9  - FPR_BASE_OFFS, FPR_BASE
+	st_d	10, THREAD_FPR10 - FPR_BASE_OFFS, FPR_BASE
+	st_d	11, THREAD_FPR11 - FPR_BASE_OFFS, FPR_BASE
+	st_d	12, THREAD_FPR12 - FPR_BASE_OFFS, FPR_BASE
+	st_d	13, THREAD_FPR13 - FPR_BASE_OFFS, FPR_BASE
+	st_d	14, THREAD_FPR14 - FPR_BASE_OFFS, FPR_BASE
+	st_d	15, THREAD_FPR15 - FPR_BASE_OFFS, FPR_BASE
+	st_d	16, THREAD_FPR16 - FPR_BASE_OFFS, FPR_BASE
+	st_d	17, THREAD_FPR17 - FPR_BASE_OFFS, FPR_BASE
+	st_d	18, THREAD_FPR18 - FPR_BASE_OFFS, FPR_BASE
+	st_d	19, THREAD_FPR19 - FPR_BASE_OFFS, FPR_BASE
+	st_d	20, THREAD_FPR20 - FPR_BASE_OFFS, FPR_BASE
+	st_d	21, THREAD_FPR21 - FPR_BASE_OFFS, FPR_BASE
+	st_d	22, THREAD_FPR22 - FPR_BASE_OFFS, FPR_BASE
+	st_d	23, THREAD_FPR23 - FPR_BASE_OFFS, FPR_BASE
+	st_d	24, THREAD_FPR24 - FPR_BASE_OFFS, FPR_BASE
+	st_d	25, THREAD_FPR25 - FPR_BASE_OFFS, FPR_BASE
+	st_d	26, THREAD_FPR26 - FPR_BASE_OFFS, FPR_BASE
+	st_d	27, THREAD_FPR27 - FPR_BASE_OFFS, FPR_BASE
+	st_d	28, THREAD_FPR28 - FPR_BASE_OFFS, FPR_BASE
+	st_d	29, THREAD_FPR29 - FPR_BASE_OFFS, FPR_BASE
+	st_d	30, THREAD_FPR30 - FPR_BASE_OFFS, FPR_BASE
+	st_d	31, THREAD_FPR31 - FPR_BASE_OFFS, FPR_BASE
 	SET_HARDFLOAT
 	_cfcmsa	$1, MSA_CSR
 	sw	$1, THREAD_MSA_CSR(\thread)
@@ -543,41 +560,47 @@
 	SET_HARDFLOAT
 	lw	$1, THREAD_MSA_CSR(\thread)
 	_ctcmsa	MSA_CSR, $1
-	.set	pop
-	ld_d	0, THREAD_FPR0, \thread
-	ld_d	1, THREAD_FPR1, \thread
-	ld_d	2, THREAD_FPR2, \thread
-	ld_d	3, THREAD_FPR3, \thread
-	ld_d	4, THREAD_FPR4, \thread
-	ld_d	5, THREAD_FPR5, \thread
-	ld_d	6, THREAD_FPR6, \thread
-	ld_d	7, THREAD_FPR7, \thread
-	ld_d	8, THREAD_FPR8, \thread
-	ld_d	9, THREAD_FPR9, \thread
-	ld_d	10, THREAD_FPR10, \thread
-	ld_d	11, THREAD_FPR11, \thread
-	ld_d	12, THREAD_FPR12, \thread
-	ld_d	13, THREAD_FPR13, \thread
-	ld_d	14, THREAD_FPR14, \thread
-	ld_d	15, THREAD_FPR15, \thread
-	ld_d	16, THREAD_FPR16, \thread
-	ld_d	17, THREAD_FPR17, \thread
-	ld_d	18, THREAD_FPR18, \thread
-	ld_d	19, THREAD_FPR19, \thread
-	ld_d	20, THREAD_FPR20, \thread
-	ld_d	21, THREAD_FPR21, \thread
-	ld_d	22, THREAD_FPR22, \thread
-	ld_d	23, THREAD_FPR23, \thread
-	ld_d	24, THREAD_FPR24, \thread
-	ld_d	25, THREAD_FPR25, \thread
-	ld_d	26, THREAD_FPR26, \thread
-	ld_d	27, THREAD_FPR27, \thread
-	ld_d	28, THREAD_FPR28, \thread
-	ld_d	29, THREAD_FPR29, \thread
-	ld_d	30, THREAD_FPR30, \thread
-	ld_d	31, THREAD_FPR31, \thread
+#ifdef TOOLCHAIN_SUPPORTS_MSA
+	PTR_ADDU FPR_BASE, \thread, FPR_BASE_OFFS
+#endif
+	ld_d	 0, THREAD_FPR0  - FPR_BASE_OFFS, FPR_BASE
+	ld_d	 1, THREAD_FPR1  - FPR_BASE_OFFS, FPR_BASE
+	ld_d	 2, THREAD_FPR2  - FPR_BASE_OFFS, FPR_BASE
+	ld_d	 3, THREAD_FPR3  - FPR_BASE_OFFS, FPR_BASE
+	ld_d	 4, THREAD_FPR4  - FPR_BASE_OFFS, FPR_BASE
+	ld_d	 5, THREAD_FPR5  - FPR_BASE_OFFS, FPR_BASE
+	ld_d	 6, THREAD_FPR6  - FPR_BASE_OFFS, FPR_BASE
+	ld_d	 7, THREAD_FPR7  - FPR_BASE_OFFS, FPR_BASE
+	ld_d	 8, THREAD_FPR8  - FPR_BASE_OFFS, FPR_BASE
+	ld_d	 9, THREAD_FPR9  - FPR_BASE_OFFS, FPR_BASE
+	ld_d	10, THREAD_FPR10 - FPR_BASE_OFFS, FPR_BASE
+	ld_d	11, THREAD_FPR11 - FPR_BASE_OFFS, FPR_BASE
+	ld_d	12, THREAD_FPR12 - FPR_BASE_OFFS, FPR_BASE
+	ld_d	13, THREAD_FPR13 - FPR_BASE_OFFS, FPR_BASE
+	ld_d	14, THREAD_FPR14 - FPR_BASE_OFFS, FPR_BASE
+	ld_d	15, THREAD_FPR15 - FPR_BASE_OFFS, FPR_BASE
+	ld_d	16, THREAD_FPR16 - FPR_BASE_OFFS, FPR_BASE
+	ld_d	17, THREAD_FPR17 - FPR_BASE_OFFS, FPR_BASE
+	ld_d	18, THREAD_FPR18 - FPR_BASE_OFFS, FPR_BASE
+	ld_d	19, THREAD_FPR19 - FPR_BASE_OFFS, FPR_BASE
+	ld_d	20, THREAD_FPR20 - FPR_BASE_OFFS, FPR_BASE
+	ld_d	21, THREAD_FPR21 - FPR_BASE_OFFS, FPR_BASE
+	ld_d	22, THREAD_FPR22 - FPR_BASE_OFFS, FPR_BASE
+	ld_d	23, THREAD_FPR23 - FPR_BASE_OFFS, FPR_BASE
+	ld_d	24, THREAD_FPR24 - FPR_BASE_OFFS, FPR_BASE
+	ld_d	25, THREAD_FPR25 - FPR_BASE_OFFS, FPR_BASE
+	ld_d	26, THREAD_FPR26 - FPR_BASE_OFFS, FPR_BASE
+	ld_d	27, THREAD_FPR27 - FPR_BASE_OFFS, FPR_BASE
+	ld_d	28, THREAD_FPR28 - FPR_BASE_OFFS, FPR_BASE
+	ld_d	29, THREAD_FPR29 - FPR_BASE_OFFS, FPR_BASE
+	ld_d	30, THREAD_FPR30 - FPR_BASE_OFFS, FPR_BASE
+	ld_d	31, THREAD_FPR31 - FPR_BASE_OFFS, FPR_BASE
+	.set pop
 	.endm
 
+#undef FPR_BASE_OFFS
+#undef FPR_BASE
+
 	.macro	msa_init_upper wd
 #ifdef CONFIG_64BIT
 	insert_d \wd, 1
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index ce9666c..fa57cef 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -19,25 +19,10 @@
 #include <asm/byteorder.h>		/* sigh ... */
 #include <asm/compiler.h>
 #include <asm/cpu-features.h>
+#include <asm/llsc.h>
 #include <asm/sgidefs.h>
 #include <asm/war.h>
 
-#if _MIPS_SZLONG == 32
-#define SZLONG_LOG 5
-#define SZLONG_MASK 31UL
-#define __LL		"ll	"
-#define __SC		"sc	"
-#define __INS		"ins	"
-#define __EXT		"ext	"
-#elif _MIPS_SZLONG == 64
-#define SZLONG_LOG 6
-#define SZLONG_MASK 63UL
-#define __LL		"lld	"
-#define __SC		"scd	"
-#define __INS		"dins	 "
-#define __EXT		"dext	 "
-#endif
-
 /*
  * These are the "slower" versions of the functions and are in bitops.c.
  * These functions call raw_local_irq_{save,restore}().
diff --git a/arch/mips/include/asm/bitrev.h b/arch/mips/include/asm/bitrev.h
new file mode 100644
index 0000000..bc739a4
--- /dev/null
+++ b/arch/mips/include/asm/bitrev.h
@@ -0,0 +1,30 @@
+#ifndef __MIPS_ASM_BITREV_H__
+#define __MIPS_ASM_BITREV_H__
+
+#include <linux/swab.h>
+
+static __always_inline __attribute_const__ u32 __arch_bitrev32(u32 x)
+{
+	u32 ret;
+
+	asm("bitswap	%0, %1" : "=r"(ret) : "r"(__swab32(x)));
+	return ret;
+}
+
+static __always_inline __attribute_const__ u16 __arch_bitrev16(u16 x)
+{
+	u16 ret;
+
+	asm("bitswap	%0, %1" : "=r"(ret) : "r"(__swab16(x)));
+	return ret;
+}
+
+static __always_inline __attribute_const__ u8 __arch_bitrev8(u8 x)
+{
+	u8 ret;
+
+	asm("bitswap	%0, %1" : "=r"(ret) : "r"(x));
+	return ret;
+}
+
+#endif /* __MIPS_ASM_BITREV_H__ */
diff --git a/arch/mips/include/asm/bmips.h b/arch/mips/include/asm/bmips.h
index 6d25ad3..a92aee7 100644
--- a/arch/mips/include/asm/bmips.h
+++ b/arch/mips/include/asm/bmips.h
@@ -88,6 +88,7 @@
 
 extern void bmips_ebase_setup(void);
 extern asmlinkage void plat_wired_tlb_setup(void);
+extern void bmips_cpu_setup(void);
 
 static inline unsigned long bmips_read_zscm_reg(unsigned int offset)
 {
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index b603804..9f67033 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -144,4 +144,22 @@
 
 #endif /* CONFIG_SWIOTLB */
 
+#ifdef CONFIG_USE_OF
+/**
+ * plat_get_fdt() - Return a pointer to the platform's device tree blob
+ *
+ * This function provides a platform independent API to get a pointer to the
+ * flattened device tree blob. The interface between bootloader and kernel
+ * is not consistent across platforms so it is necessary to provide this
+ * API such that common startup code can locate the FDT.
+ *
+ * This is used by the KASLR code to get command line arguments and random
+ * seed from the device tree. Any platform wishing to use KASLR should
+ * provide this API and select SYS_SUPPORTS_RELOCATABLE.
+ *
+ * Return: Pointer to the flattened device tree blob.
+ */
+extern void *plat_get_fdt(void);
+#endif /* CONFIG_USE_OF */
+
 #endif /* _ASM_BOOTINFO_H */
diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h
index 723229f..34ed22e 100644
--- a/arch/mips/include/asm/cacheflush.h
+++ b/arch/mips/include/asm/cacheflush.h
@@ -51,7 +51,6 @@
 	unsigned long start, unsigned long end);
 extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
 extern void __flush_dcache_page(struct page *page);
-extern void __flush_icache_page(struct vm_area_struct *vma, struct page *page);
 
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 static inline void flush_dcache_page(struct page *page)
@@ -77,11 +76,6 @@
 static inline void flush_icache_page(struct vm_area_struct *vma,
 	struct page *page)
 {
-	if (!cpu_has_ic_fills_f_dc && (vma->vm_flags & VM_EXEC) &&
-	    Page_dcache_dirty(page)) {
-		__flush_icache_page(vma, page);
-		ClearPageDcacheDirty(page);
-	}
 }
 
 extern void (*flush_icache_range)(unsigned long start, unsigned long end);
@@ -132,6 +126,7 @@
 static inline void flush_kernel_dcache_page(struct page *page)
 {
 	BUG_ON(cpu_has_dc_aliases && PageHighMem(page));
+	flush_dcache_page(page);
 }
 
 /*
diff --git a/arch/mips/include/asm/cacheops.h b/arch/mips/include/asm/cacheops.h
index c3212ff..8031fbc 100644
--- a/arch/mips/include/asm/cacheops.h
+++ b/arch/mips/include/asm/cacheops.h
@@ -21,6 +21,7 @@
 #define Cache_I				0x00
 #define Cache_D				0x01
 #define Cache_T				0x02
+#define Cache_V				0x02 /* Loongson-3 */
 #define Cache_S				0x03
 
 #define Index_Writeback_Inv		0x00
@@ -107,4 +108,9 @@
  */
 #define Hit_Invalidate_I_Loongson2	(Cache_I | 0x00)
 
+/*
+ * Loongson3-specific cacheops
+ */
+#define Index_Writeback_Inv_V		(Cache_V | Index_Writeback_Inv)
+
 #endif	/* __ASM_CACHEOPS_H */
diff --git a/arch/mips/include/asm/clkdev.h b/arch/mips/include/asm/clkdev.h
deleted file mode 100644
index 1b3ad7b..0000000
--- a/arch/mips/include/asm/clkdev.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- *  based on arch/arm/include/asm/clkdev.h
- *
- *  Copyright (C) 2008 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Helper for the clk API to assist looking up a struct clk.
- */
-#ifndef __ASM_CLKDEV_H
-#define __ASM_CLKDEV_H
-
-#include <linux/slab.h>
-
-#ifndef CONFIG_COMMON_CLK
-#define __clk_get(clk)	({ 1; })
-#define __clk_put(clk)	do { } while (0)
-#endif
-
-static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
-{
-	return kzalloc(size, GFP_KERNEL);
-}
-
-#endif
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index eeec8c8..e6f19fc 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -35,6 +35,9 @@
 #ifndef cpu_has_htw
 #define cpu_has_htw		(cpu_data[0].options & MIPS_CPU_HTW)
 #endif
+#ifndef cpu_has_ldpte
+#define cpu_has_ldpte		(cpu_data[0].options & MIPS_CPU_LDPTE)
+#endif
 #ifndef cpu_has_rixiex
 #define cpu_has_rixiex		(cpu_data[0].options & MIPS_CPU_RIXIEX)
 #endif
@@ -117,6 +120,21 @@
 #ifndef kernel_uses_llsc
 #define kernel_uses_llsc	cpu_has_llsc
 #endif
+#ifndef cpu_has_guestctl0ext
+#define cpu_has_guestctl0ext	(cpu_data[0].options & MIPS_CPU_GUESTCTL0EXT)
+#endif
+#ifndef cpu_has_guestctl1
+#define cpu_has_guestctl1	(cpu_data[0].options & MIPS_CPU_GUESTCTL1)
+#endif
+#ifndef cpu_has_guestctl2
+#define cpu_has_guestctl2	(cpu_data[0].options & MIPS_CPU_GUESTCTL2)
+#endif
+#ifndef cpu_has_guestid
+#define cpu_has_guestid		(cpu_data[0].options & MIPS_CPU_GUESTID)
+#endif
+#ifndef cpu_has_drg
+#define cpu_has_drg		(cpu_data[0].options & MIPS_CPU_DRG)
+#endif
 #ifndef cpu_has_mips16
 #define cpu_has_mips16		(cpu_data[0].ases & MIPS_ASE_MIPS16)
 #endif
@@ -142,8 +160,14 @@
 # endif
 #endif
 
+#ifndef cpu_has_lpa
+#define cpu_has_lpa		(cpu_data[0].options & MIPS_CPU_LPA)
+#endif
+#ifndef cpu_has_mvh
+#define cpu_has_mvh		(cpu_data[0].options & MIPS_CPU_MVH)
+#endif
 #ifndef cpu_has_xpa
-#define cpu_has_xpa		(cpu_data[0].options & MIPS_CPU_XPA)
+#define cpu_has_xpa		(cpu_has_lpa && cpu_has_mvh)
 #endif
 #ifndef cpu_has_vtag_icache
 #define cpu_has_vtag_icache	(cpu_data[0].icache.flags & MIPS_CACHE_VTAG)
@@ -307,10 +331,18 @@
 #define cpu_has_dsp2		(cpu_data[0].ases & MIPS_ASE_DSP2P)
 #endif
 
+#ifndef cpu_has_dsp3
+#define cpu_has_dsp3		(cpu_data[0].ases & MIPS_ASE_DSP3)
+#endif
+
 #ifndef cpu_has_mipsmt
 #define cpu_has_mipsmt		(cpu_data[0].ases & MIPS_ASE_MIPSMT)
 #endif
 
+#ifndef cpu_has_vp
+#define cpu_has_vp		(cpu_data[0].options & MIPS_CPU_VP)
+#endif
+
 #ifndef cpu_has_userlocal
 #define cpu_has_userlocal	(cpu_data[0].options & MIPS_CPU_ULRI)
 #endif
@@ -421,4 +453,107 @@
 #define cpu_has_nan_2008	(cpu_data[0].options & MIPS_CPU_NAN_2008)
 #endif
 
+#ifndef cpu_has_ebase_wg
+# define cpu_has_ebase_wg	(cpu_data[0].options & MIPS_CPU_EBASE_WG)
+#endif
+
+#ifndef cpu_has_badinstr
+# define cpu_has_badinstr	(cpu_data[0].options & MIPS_CPU_BADINSTR)
+#endif
+
+#ifndef cpu_has_badinstrp
+# define cpu_has_badinstrp	(cpu_data[0].options & MIPS_CPU_BADINSTRP)
+#endif
+
+#ifndef cpu_has_contextconfig
+# define cpu_has_contextconfig	(cpu_data[0].options & MIPS_CPU_CTXTC)
+#endif
+
+#ifndef cpu_has_perf
+# define cpu_has_perf		(cpu_data[0].options & MIPS_CPU_PERF)
+#endif
+
+/*
+ * Guest capabilities
+ */
+#ifndef cpu_guest_has_conf1
+#define cpu_guest_has_conf1	(cpu_data[0].guest.conf & (1 << 1))
+#endif
+#ifndef cpu_guest_has_conf2
+#define cpu_guest_has_conf2	(cpu_data[0].guest.conf & (1 << 2))
+#endif
+#ifndef cpu_guest_has_conf3
+#define cpu_guest_has_conf3	(cpu_data[0].guest.conf & (1 << 3))
+#endif
+#ifndef cpu_guest_has_conf4
+#define cpu_guest_has_conf4	(cpu_data[0].guest.conf & (1 << 4))
+#endif
+#ifndef cpu_guest_has_conf5
+#define cpu_guest_has_conf5	(cpu_data[0].guest.conf & (1 << 5))
+#endif
+#ifndef cpu_guest_has_conf6
+#define cpu_guest_has_conf6	(cpu_data[0].guest.conf & (1 << 6))
+#endif
+#ifndef cpu_guest_has_conf7
+#define cpu_guest_has_conf7	(cpu_data[0].guest.conf & (1 << 7))
+#endif
+#ifndef cpu_guest_has_fpu
+#define cpu_guest_has_fpu	(cpu_data[0].guest.options & MIPS_CPU_FPU)
+#endif
+#ifndef cpu_guest_has_watch
+#define cpu_guest_has_watch	(cpu_data[0].guest.options & MIPS_CPU_WATCH)
+#endif
+#ifndef cpu_guest_has_contextconfig
+#define cpu_guest_has_contextconfig (cpu_data[0].guest.options & MIPS_CPU_CTXTC)
+#endif
+#ifndef cpu_guest_has_segments
+#define cpu_guest_has_segments	(cpu_data[0].guest.options & MIPS_CPU_SEGMENTS)
+#endif
+#ifndef cpu_guest_has_badinstr
+#define cpu_guest_has_badinstr	(cpu_data[0].guest.options & MIPS_CPU_BADINSTR)
+#endif
+#ifndef cpu_guest_has_badinstrp
+#define cpu_guest_has_badinstrp	(cpu_data[0].guest.options & MIPS_CPU_BADINSTRP)
+#endif
+#ifndef cpu_guest_has_htw
+#define cpu_guest_has_htw	(cpu_data[0].guest.options & MIPS_CPU_HTW)
+#endif
+#ifndef cpu_guest_has_msa
+#define cpu_guest_has_msa	(cpu_data[0].guest.ases & MIPS_ASE_MSA)
+#endif
+#ifndef cpu_guest_has_kscr
+#define cpu_guest_has_kscr(n)	(cpu_data[0].guest.kscratch_mask & (1u << (n)))
+#endif
+#ifndef cpu_guest_has_rw_llb
+#define cpu_guest_has_rw_llb	(cpu_has_mips_r6 || (cpu_data[0].guest.options & MIPS_CPU_RW_LLB))
+#endif
+#ifndef cpu_guest_has_perf
+#define cpu_guest_has_perf	(cpu_data[0].guest.options & MIPS_CPU_PERF)
+#endif
+#ifndef cpu_guest_has_maar
+#define cpu_guest_has_maar	(cpu_data[0].guest.options & MIPS_CPU_MAAR)
+#endif
+
+/*
+ * Guest dynamic capabilities
+ */
+#ifndef cpu_guest_has_dyn_fpu
+#define cpu_guest_has_dyn_fpu	(cpu_data[0].guest.options_dyn & MIPS_CPU_FPU)
+#endif
+#ifndef cpu_guest_has_dyn_watch
+#define cpu_guest_has_dyn_watch	(cpu_data[0].guest.options_dyn & MIPS_CPU_WATCH)
+#endif
+#ifndef cpu_guest_has_dyn_contextconfig
+#define cpu_guest_has_dyn_contextconfig (cpu_data[0].guest.options_dyn & MIPS_CPU_CTXTC)
+#endif
+#ifndef cpu_guest_has_dyn_perf
+#define cpu_guest_has_dyn_perf	(cpu_data[0].guest.options_dyn & MIPS_CPU_PERF)
+#endif
+#ifndef cpu_guest_has_dyn_msa
+#define cpu_guest_has_dyn_msa	(cpu_data[0].guest.ases_dyn & MIPS_ASE_MSA)
+#endif
+#ifndef cpu_guest_has_dyn_maar
+#define cpu_guest_has_dyn_maar	(cpu_data[0].guest.options_dyn & MIPS_CPU_MAAR)
+#endif
+
 #endif /* __ASM_CPU_FEATURES_H */
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index e7dc785..edbe273 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -28,6 +28,15 @@
 	unsigned char flags;	/* Flags describing cache properties */
 };
 
+struct guest_info {
+	unsigned long		ases;
+	unsigned long		ases_dyn;
+	unsigned long long	options;
+	unsigned long long	options_dyn;
+	u8			conf;
+	u8			kscratch_mask;
+};
+
 /*
  * Flag definitions
  */
@@ -40,6 +49,9 @@
 
 struct cpuinfo_mips {
 	unsigned long		asid_cache;
+#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
+	unsigned long		asid_mask;
+#endif
 
 	/*
 	 * Capability and feature descriptor structure for MIPS CPU
@@ -60,6 +72,7 @@
 	int			tlbsizeftlbways;
 	struct cache_desc	icache; /* Primary I-cache */
 	struct cache_desc	dcache; /* Primary D or combined I/D cache */
+	struct cache_desc	vcache; /* Victim cache, between pcache and scache */
 	struct cache_desc	scache; /* Secondary cache */
 	struct cache_desc	tcache; /* Tertiary/split secondary cache */
 	int			srsets; /* Shadow register sets */
@@ -68,7 +81,7 @@
 #ifdef CONFIG_64BIT
 	int			vmbits; /* Virtual memory size in bits */
 #endif
-#ifdef CONFIG_MIPS_MT_SMP
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
 	/*
 	 * There is not necessarily a 1:1 mapping of VPE num to CPU number
 	 * in particular on multi-core systems.
@@ -91,6 +104,11 @@
 	 * htw_start/htw_stop calls
 	 */
 	unsigned int		htw_seq;
+
+	/* VZ & Guest features */
+	struct guest_info	guest;
+	unsigned int		gtoffset_mask;
+	unsigned int		guestid_mask;
 } __attribute__((aligned(SMP_CACHE_BYTES)));
 
 extern struct cpuinfo_mips cpu_data[];
@@ -102,7 +120,7 @@
 extern void cpu_report(void);
 
 extern const char *__cpu_name[];
-#define cpu_name_string()	__cpu_name[smp_processor_id()]
+#define cpu_name_string()	__cpu_name[raw_smp_processor_id()]
 
 struct seq_file;
 struct notifier_block;
@@ -125,10 +143,31 @@
 	unsigned long n;
 };
 
-#ifdef CONFIG_MIPS_MT_SMP
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
 # define cpu_vpe_id(cpuinfo)	((cpuinfo)->vpe_id)
 #else
 # define cpu_vpe_id(cpuinfo)	({ (void)cpuinfo; 0; })
 #endif
 
+static inline unsigned long cpu_asid_inc(void)
+{
+	return 1 << CONFIG_MIPS_ASID_SHIFT;
+}
+
+static inline unsigned long cpu_asid_mask(struct cpuinfo_mips *cpuinfo)
+{
+#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
+	return cpuinfo->asid_mask;
+#endif
+	return ((1 << CONFIG_MIPS_ASID_BITS) - 1) << CONFIG_MIPS_ASID_SHIFT;
+}
+
+static inline void set_cpu_asid_mask(struct cpuinfo_mips *cpuinfo,
+				     unsigned long asid_mask)
+{
+#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
+	cpuinfo->asid_mask = asid_mask;
+#endif
+}
+
 #endif /* __ASM_CPU_INFO_H */
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index abee2bf..fbe1881 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -77,8 +77,13 @@
 	 */
 #endif
 
+#ifdef CONFIG_SYS_HAS_CPU_MIPS32_R6
+	case CPU_M6250:
+#endif
+
 #ifdef CONFIG_SYS_HAS_CPU_MIPS64_R6
 	case CPU_I6400:
+	case CPU_P6600:
 #endif
 
 #ifdef CONFIG_SYS_HAS_CPU_R3000
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index a97ca97..f672df8 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -42,6 +42,7 @@
 #define PRID_COMP_LEXRA		0x0b0000
 #define PRID_COMP_NETLOGIC	0x0c0000
 #define PRID_COMP_CAVIUM	0x0d0000
+#define PRID_COMP_LOONGSON	0x140000
 #define PRID_COMP_INGENIC_D0	0xd00000	/* JZ4740, JZ4750 */
 #define PRID_COMP_INGENIC_D1	0xd10000	/* JZ4770, JZ4775 */
 #define PRID_COMP_INGENIC_E1	0xe10000	/* JZ4780 */
@@ -118,9 +119,11 @@
 #define PRID_IMP_INTERAPTIV_MP	0xa100
 #define PRID_IMP_PROAPTIV_UP	0xa200
 #define PRID_IMP_PROAPTIV_MP	0xa300
+#define PRID_IMP_P6600		0xa400
 #define PRID_IMP_M5150		0xa700
 #define PRID_IMP_P5600		0xa800
 #define PRID_IMP_I6400		0xa900
+#define PRID_IMP_M6250		0xab00
 
 /*
  * These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
@@ -169,6 +172,8 @@
 #define PRID_IMP_CAVIUM_CNF71XX 0x9400
 #define PRID_IMP_CAVIUM_CN78XX 0x9500
 #define PRID_IMP_CAVIUM_CN70XX 0x9600
+#define PRID_IMP_CAVIUM_CN73XX 0x9700
+#define PRID_IMP_CAVIUM_CNF75XX 0x9800
 
 /*
  * These are the PRID's for when 23:16 == PRID_COMP_INGENIC_*
@@ -237,9 +242,10 @@
 #define PRID_REV_LOONGSON1B	0x0020
 #define PRID_REV_LOONGSON2E	0x0002
 #define PRID_REV_LOONGSON2F	0x0003
-#define PRID_REV_LOONGSON3A	0x0005
+#define PRID_REV_LOONGSON3A_R1	0x0005
 #define PRID_REV_LOONGSON3B_R1	0x0006
 #define PRID_REV_LOONGSON3B_R2	0x0007
+#define PRID_REV_LOONGSON3A_R2	0x0008
 
 /*
  * Older processors used to encode processor version and revision in two
@@ -307,8 +313,8 @@
 	CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
 	CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
 	CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_LOONGSON1, CPU_M14KC,
-	CPU_M14KEC, CPU_INTERAPTIV, CPU_P5600, CPU_PROAPTIV, CPU_1074K, CPU_M5150,
-	CPU_I6400,
+	CPU_M14KEC, CPU_INTERAPTIV, CPU_P5600, CPU_PROAPTIV, CPU_1074K,
+	CPU_M5150, CPU_I6400, CPU_P6600, CPU_M6250,
 
 	/*
 	 * MIPS64 class processors
@@ -346,48 +352,68 @@
 	MIPS_CPU_ISA_M64R6)
 
 /*
+ * Private version of BIT_ULL() to escape include file recursion hell.
+ * We soon will have to switch to another mechanism that will work with
+ * more than 64 bits anyway.
+ */
+#define MBIT_ULL(bit)		(1ULL << (bit))
+
+/*
  * CPU Option encodings
  */
-#define MIPS_CPU_TLB		0x00000001ull /* CPU has TLB */
-#define MIPS_CPU_4KEX		0x00000002ull /* "R4K" exception model */
-#define MIPS_CPU_3K_CACHE	0x00000004ull /* R3000-style caches */
-#define MIPS_CPU_4K_CACHE	0x00000008ull /* R4000-style caches */
-#define MIPS_CPU_TX39_CACHE	0x00000010ull /* TX3900-style caches */
-#define MIPS_CPU_FPU		0x00000020ull /* CPU has FPU */
-#define MIPS_CPU_32FPR		0x00000040ull /* 32 dbl. prec. FP registers */
-#define MIPS_CPU_COUNTER	0x00000080ull /* Cycle count/compare */
-#define MIPS_CPU_WATCH		0x00000100ull /* watchpoint registers */
-#define MIPS_CPU_DIVEC		0x00000200ull /* dedicated interrupt vector */
-#define MIPS_CPU_VCE		0x00000400ull /* virt. coherence conflict possible */
-#define MIPS_CPU_CACHE_CDEX_P	0x00000800ull /* Create_Dirty_Exclusive CACHE op */
-#define MIPS_CPU_CACHE_CDEX_S	0x00001000ull /* ... same for seconary cache ... */
-#define MIPS_CPU_MCHECK		0x00002000ull /* Machine check exception */
-#define MIPS_CPU_EJTAG		0x00004000ull /* EJTAG exception */
-#define MIPS_CPU_NOFPUEX	0x00008000ull /* no FPU exception */
-#define MIPS_CPU_LLSC		0x00010000ull /* CPU has ll/sc instructions */
-#define MIPS_CPU_INCLUSIVE_CACHES	0x00020000ull /* P-cache subset enforced */
-#define MIPS_CPU_PREFETCH	0x00040000ull /* CPU has usable prefetch */
-#define MIPS_CPU_VINT		0x00080000ull /* CPU supports MIPSR2 vectored interrupts */
-#define MIPS_CPU_VEIC		0x00100000ull /* CPU supports MIPSR2 external interrupt controller mode */
-#define MIPS_CPU_ULRI		0x00200000ull /* CPU has ULRI feature */
-#define MIPS_CPU_PCI		0x00400000ull /* CPU has Perf Ctr Int indicator */
-#define MIPS_CPU_RIXI		0x00800000ull /* CPU has TLB Read/eXec Inhibit */
-#define MIPS_CPU_MICROMIPS	0x01000000ull /* CPU has microMIPS capability */
-#define MIPS_CPU_TLBINV		0x02000000ull /* CPU supports TLBINV/F */
-#define MIPS_CPU_SEGMENTS	0x04000000ull /* CPU supports Segmentation Control registers */
-#define MIPS_CPU_EVA		0x80000000ull /* CPU supports Enhanced Virtual Addressing */
-#define MIPS_CPU_HTW		0x100000000ull /* CPU support Hardware Page Table Walker */
-#define MIPS_CPU_RIXIEX		0x200000000ull /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */
-#define MIPS_CPU_MAAR		0x400000000ull /* MAAR(I) registers are present */
-#define MIPS_CPU_FRE		0x800000000ull /* FRE & UFE bits implemented */
-#define MIPS_CPU_RW_LLB		0x1000000000ull /* LLADDR/LLB writes are allowed */
-#define MIPS_CPU_XPA		0x2000000000ull /* CPU supports Extended Physical Addressing */
-#define MIPS_CPU_CDMM		0x4000000000ull	/* CPU has Common Device Memory Map */
-#define MIPS_CPU_BP_GHIST	0x8000000000ull /* R12K+ Branch Prediction Global History */
-#define MIPS_CPU_SP		0x10000000000ull /* Small (1KB) page support */
-#define MIPS_CPU_FTLB		0x20000000000ull /* CPU has Fixed-page-size TLB */
-#define MIPS_CPU_NAN_LEGACY	0x40000000000ull /* Legacy NaN implemented */
-#define MIPS_CPU_NAN_2008	0x80000000000ull /* 2008 NaN implemented */
+#define MIPS_CPU_TLB		MBIT_ULL( 0)	/* CPU has TLB */
+#define MIPS_CPU_4KEX		MBIT_ULL( 1)	/* "R4K" exception model */
+#define MIPS_CPU_3K_CACHE	MBIT_ULL( 2)	/* R3000-style caches */
+#define MIPS_CPU_4K_CACHE	MBIT_ULL( 3)	/* R4000-style caches */
+#define MIPS_CPU_TX39_CACHE	MBIT_ULL( 4)	/* TX3900-style caches */
+#define MIPS_CPU_FPU		MBIT_ULL( 5)	/* CPU has FPU */
+#define MIPS_CPU_32FPR		MBIT_ULL( 6)	/* 32 dbl. prec. FP registers */
+#define MIPS_CPU_COUNTER	MBIT_ULL( 7)	/* Cycle count/compare */
+#define MIPS_CPU_WATCH		MBIT_ULL( 8)	/* watchpoint registers */
+#define MIPS_CPU_DIVEC		MBIT_ULL( 9)	/* dedicated interrupt vector */
+#define MIPS_CPU_VCE		MBIT_ULL(10)	/* virt. coherence conflict possible */
+#define MIPS_CPU_CACHE_CDEX_P	MBIT_ULL(11)	/* Create_Dirty_Exclusive CACHE op */
+#define MIPS_CPU_CACHE_CDEX_S	MBIT_ULL(12)	/* ... same for seconary cache ... */
+#define MIPS_CPU_MCHECK		MBIT_ULL(13)	/* Machine check exception */
+#define MIPS_CPU_EJTAG		MBIT_ULL(14)	/* EJTAG exception */
+#define MIPS_CPU_NOFPUEX	MBIT_ULL(15)	/* no FPU exception */
+#define MIPS_CPU_LLSC		MBIT_ULL(16)	/* CPU has ll/sc instructions */
+#define MIPS_CPU_INCLUSIVE_CACHES	MBIT_ULL(17)	/* P-cache subset enforced */
+#define MIPS_CPU_PREFETCH	MBIT_ULL(18)	/* CPU has usable prefetch */
+#define MIPS_CPU_VINT		MBIT_ULL(19)	/* CPU supports MIPSR2 vectored interrupts */
+#define MIPS_CPU_VEIC		MBIT_ULL(20)	/* CPU supports MIPSR2 external interrupt controller mode */
+#define MIPS_CPU_ULRI		MBIT_ULL(21)	/* CPU has ULRI feature */
+#define MIPS_CPU_PCI		MBIT_ULL(22)	/* CPU has Perf Ctr Int indicator */
+#define MIPS_CPU_RIXI		MBIT_ULL(23)	/* CPU has TLB Read/eXec Inhibit */
+#define MIPS_CPU_MICROMIPS	MBIT_ULL(24)	/* CPU has microMIPS capability */
+#define MIPS_CPU_TLBINV		MBIT_ULL(25)	/* CPU supports TLBINV/F */
+#define MIPS_CPU_SEGMENTS	MBIT_ULL(26)	/* CPU supports Segmentation Control registers */
+#define MIPS_CPU_EVA		MBIT_ULL(27)	/* CPU supports Enhanced Virtual Addressing */
+#define MIPS_CPU_HTW		MBIT_ULL(28)	/* CPU support Hardware Page Table Walker */
+#define MIPS_CPU_RIXIEX		MBIT_ULL(29)	/* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */
+#define MIPS_CPU_MAAR		MBIT_ULL(30)	/* MAAR(I) registers are present */
+#define MIPS_CPU_FRE		MBIT_ULL(31)	/* FRE & UFE bits implemented */
+#define MIPS_CPU_RW_LLB		MBIT_ULL(32)	/* LLADDR/LLB writes are allowed */
+#define MIPS_CPU_LPA		MBIT_ULL(33)	/* CPU supports Large Physical Addressing */
+#define MIPS_CPU_CDMM		MBIT_ULL(34)	/* CPU has Common Device Memory Map */
+#define MIPS_CPU_BP_GHIST	MBIT_ULL(35)	/* R12K+ Branch Prediction Global History */
+#define MIPS_CPU_SP		MBIT_ULL(36)	/* Small (1KB) page support */
+#define MIPS_CPU_FTLB		MBIT_ULL(37)	/* CPU has Fixed-page-size TLB */
+#define MIPS_CPU_NAN_LEGACY	MBIT_ULL(38)	/* Legacy NaN implemented */
+#define MIPS_CPU_NAN_2008	MBIT_ULL(39)	/* 2008 NaN implemented */
+#define MIPS_CPU_VP		MBIT_ULL(40)	/* MIPSr6 Virtual Processors (multi-threading) */
+#define MIPS_CPU_LDPTE		MBIT_ULL(41)	/* CPU has ldpte/lddir instructions */
+#define MIPS_CPU_MVH		MBIT_ULL(42)	/* CPU supports MFHC0/MTHC0 */
+#define MIPS_CPU_EBASE_WG	MBIT_ULL(43)	/* CPU has EBase.WG */
+#define MIPS_CPU_BADINSTR	MBIT_ULL(44)	/* CPU has BadInstr register */
+#define MIPS_CPU_BADINSTRP	MBIT_ULL(45)	/* CPU has BadInstrP register */
+#define MIPS_CPU_CTXTC		MBIT_ULL(46)	/* CPU has [X]ConfigContext registers */
+#define MIPS_CPU_PERF		MBIT_ULL(47)	/* CPU has MIPS performance counters */
+#define MIPS_CPU_GUESTCTL0EXT	MBIT_ULL(48)	/* CPU has VZ GuestCtl0Ext register */
+#define MIPS_CPU_GUESTCTL1	MBIT_ULL(49)	/* CPU has VZ GuestCtl1 register */
+#define MIPS_CPU_GUESTCTL2	MBIT_ULL(50)	/* CPU has VZ GuestCtl2 register */
+#define MIPS_CPU_GUESTID	MBIT_ULL(51)	/* CPU uses VZ ASE GuestID feature */
+#define MIPS_CPU_DRG		MBIT_ULL(52)	/* CPU has VZ Direct Root to Guest (DRG) */
 
 /*
  * CPU ASE encodings
@@ -401,5 +427,6 @@
 #define MIPS_ASE_DSP2P		0x00000040 /* Signal Processing ASE Rev 2 */
 #define MIPS_ASE_VZ		0x00000080 /* Virtualization ASE */
 #define MIPS_ASE_MSA		0x00000100 /* MIPS SIMD Architecture */
+#define MIPS_ASE_DSP3		0x00000200 /* Signal Processing ASE Rev 3*/
 
 #endif /* _ASM_CPU_H */
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index e090fc3..f5f4571 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -111,6 +111,11 @@
 #define R_MIPS_CALLHI16		30
 #define R_MIPS_CALLLO16		31
 /*
+ * Introduced for MIPSr6.
+ */
+#define R_MIPS_PC21_S2		60
+#define R_MIPS_PC26_S2		61
+/*
  * This range is reserved for vendor specific relocations.
  */
 #define R_MIPS_LOVENDOR		100
@@ -170,16 +175,14 @@
 #define SHF_MIPS_NAMES		0x02000000
 #define SHF_MIPS_NODUPES	0x01000000
 
-#ifndef ELF_ARCH
-/* ELF register definitions */
-#define ELF_NGREG	45
-#define ELF_NFPREG	33
-
-typedef unsigned long elf_greg_t;
-typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-
-typedef double elf_fpreg_t;
-typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+#define MIPS_ABI_FP_ANY		0	/* FP ABI doesn't matter */
+#define MIPS_ABI_FP_DOUBLE	1	/* -mdouble-float */
+#define MIPS_ABI_FP_SINGLE	2	/* -msingle-float */
+#define MIPS_ABI_FP_SOFT	3	/* -msoft-float */
+#define MIPS_ABI_FP_OLD_64	4	/* -mips32r2 -mfp64 */
+#define MIPS_ABI_FP_XX		5	/* -mfpxx */
+#define MIPS_ABI_FP_64		6	/* -mips32r2 -mfp64 */
+#define MIPS_ABI_FP_64A		7	/* -mips32r2 -mfp64 -mno-odd-spreg */
 
 struct mips_elf_abiflags_v0 {
 	uint16_t version;	/* Version of flags structure */
@@ -196,51 +199,22 @@
 	uint32_t flags2;
 };
 
-#define MIPS_ABI_FP_ANY		0	/* FP ABI doesn't matter */
-#define MIPS_ABI_FP_DOUBLE	1	/* -mdouble-float */
-#define MIPS_ABI_FP_SINGLE	2	/* -msingle-float */
-#define MIPS_ABI_FP_SOFT	3	/* -msoft-float */
-#define MIPS_ABI_FP_OLD_64	4	/* -mips32r2 -mfp64 */
-#define MIPS_ABI_FP_XX		5	/* -mfpxx */
-#define MIPS_ABI_FP_64		6	/* -mips32r2 -mfp64 */
-#define MIPS_ABI_FP_64A		7	/* -mips32r2 -mfp64 -mno-odd-spreg */
+#ifndef ELF_ARCH
+/* ELF register definitions */
+#define ELF_NGREG	45
+#define ELF_NFPREG	33
+
+typedef unsigned long elf_greg_t;
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef double elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
 
 #ifdef CONFIG_32BIT
-
-/*
- * In order to be sure that we don't attempt to execute an O32 binary which
- * requires 64 bit FP (FR=1) on a system which does not support it we refuse
- * to execute any binary which has bits specified by the following macro set
- * in its ELF header flags.
- */
-#ifdef CONFIG_MIPS_O32_FP64_SUPPORT
-# define __MIPS_O32_FP64_MUST_BE_ZERO	0
-#else
-# define __MIPS_O32_FP64_MUST_BE_ZERO	EF_MIPS_FP64
-#endif
-
 /*
  * This is used to ensure we don't load something for the wrong architecture.
  */
-#define elf_check_arch(hdr)						\
-({									\
-	int __res = 1;							\
-	struct elfhdr *__h = (hdr);					\
-									\
-	if (!mips_elf_check_machine(__h))				\
-		__res = 0;						\
-	if (__h->e_ident[EI_CLASS] != ELFCLASS32)			\
-		__res = 0;						\
-	if ((__h->e_flags & EF_MIPS_ABI2) != 0)				\
-		__res = 0;						\
-	if (((__h->e_flags & EF_MIPS_ABI) != 0) &&			\
-	    ((__h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32))		\
-		__res = 0;						\
-	if (__h->e_flags & __MIPS_O32_FP64_MUST_BE_ZERO)		\
-		__res = 0;						\
-									\
-	__res;								\
-})
+#define elf_check_arch elfo32_check_arch
 
 /*
  * These are used to set parameters in the core dumps.
@@ -253,18 +227,7 @@
 /*
  * This is used to ensure we don't load something for the wrong architecture.
  */
-#define elf_check_arch(hdr)						\
-({									\
-	int __res = 1;							\
-	struct elfhdr *__h = (hdr);					\
-									\
-	if (!mips_elf_check_machine(__h))				\
-		__res = 0;						\
-	if (__h->e_ident[EI_CLASS] != ELFCLASS64)			\
-		__res = 0;						\
-									\
-	__res;								\
-})
+#define elf_check_arch elfn64_check_arch
 
 /*
  * These are used to set parameters in the core dumps.
@@ -285,11 +248,81 @@
 
 #endif /* !defined(ELF_ARCH) */
 
+/*
+ * In order to be sure that we don't attempt to execute an O32 binary which
+ * requires 64 bit FP (FR=1) on a system which does not support it we refuse
+ * to execute any binary which has bits specified by the following macro set
+ * in its ELF header flags.
+ */
+#ifdef CONFIG_MIPS_O32_FP64_SUPPORT
+# define __MIPS_O32_FP64_MUST_BE_ZERO	0
+#else
+# define __MIPS_O32_FP64_MUST_BE_ZERO	EF_MIPS_FP64
+#endif
+
 #define mips_elf_check_machine(x) ((x)->e_machine == EM_MIPS)
 
 #define vmcore_elf32_check_arch mips_elf_check_machine
 #define vmcore_elf64_check_arch mips_elf_check_machine
 
+/*
+ * Return non-zero if HDR identifies an o32 ELF binary.
+ */
+#define elfo32_check_arch(hdr)						\
+({									\
+	int __res = 1;							\
+	struct elfhdr *__h = (hdr);					\
+									\
+	if (!mips_elf_check_machine(__h))				\
+		__res = 0;						\
+	if (__h->e_ident[EI_CLASS] != ELFCLASS32)			\
+		__res = 0;						\
+	if ((__h->e_flags & EF_MIPS_ABI2) != 0)				\
+		__res = 0;						\
+	if (((__h->e_flags & EF_MIPS_ABI) != 0) &&			\
+	    ((__h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32))		\
+		__res = 0;						\
+	if (__h->e_flags & __MIPS_O32_FP64_MUST_BE_ZERO)		\
+		__res = 0;						\
+									\
+	__res;								\
+})
+
+/*
+ * Return non-zero if HDR identifies an n64 ELF binary.
+ */
+#define elfn64_check_arch(hdr)						\
+({									\
+	int __res = 1;							\
+	struct elfhdr *__h = (hdr);					\
+									\
+	if (!mips_elf_check_machine(__h))				\
+		__res = 0;						\
+	if (__h->e_ident[EI_CLASS] != ELFCLASS64)			\
+		__res = 0;						\
+									\
+	__res;								\
+})
+
+/*
+ * Return non-zero if HDR identifies an n32 ELF binary.
+ */
+#define elfn32_check_arch(hdr)						\
+({									\
+	int __res = 1;							\
+	struct elfhdr *__h = (hdr);					\
+									\
+	if (!mips_elf_check_machine(__h))				\
+		__res = 0;						\
+	if (__h->e_ident[EI_CLASS] != ELFCLASS32)			\
+		__res = 0;						\
+	if (((__h->e_flags & EF_MIPS_ABI2) == 0) ||			\
+	    ((__h->e_flags & EF_MIPS_ABI) != 0))			\
+		__res = 0;						\
+									\
+	__res;								\
+})
+
 struct mips_abi;
 
 extern struct mips_abi mips_abi;
@@ -300,17 +333,16 @@
 
 #define SET_PERSONALITY2(ex, state)					\
 do {									\
-	if (personality(current->personality) != PER_LINUX)		\
-		set_personality(PER_LINUX);				\
-									\
 	clear_thread_flag(TIF_HYBRID_FPREGS);				\
 	set_thread_flag(TIF_32BIT_FPREGS);				\
 									\
-	mips_set_personality_fp(state);					\
-									\
 	current->thread.abi = &mips_abi;				\
 									\
+	mips_set_personality_fp(state);					\
 	mips_set_personality_nan(state);				\
+									\
+	if (personality(current->personality) != PER_LINUX)		\
+		set_personality(PER_LINUX);				\
 } while (0)
 
 #endif /* CONFIG_32BIT */
@@ -321,6 +353,7 @@
 #define __SET_PERSONALITY32_N32()					\
 	do {								\
 		set_thread_flag(TIF_32BIT_ADDR);			\
+									\
 		current->thread.abi = &mips_abi_n32;			\
 	} while (0)
 #else
@@ -336,9 +369,9 @@
 		clear_thread_flag(TIF_HYBRID_FPREGS);			\
 		set_thread_flag(TIF_32BIT_FPREGS);			\
 									\
-		mips_set_personality_fp(state);				\
-									\
 		current->thread.abi = &mips_abi_32;			\
+									\
+		mips_set_personality_fp(state);				\
 	} while (0)
 #else
 #define __SET_PERSONALITY32_O32(ex, state)				\
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h
index 7b99efd..dbb1eb6 100644
--- a/arch/mips/include/asm/hazards.h
+++ b/arch/mips/include/asm/hazards.h
@@ -22,7 +22,8 @@
 /*
  * TLB hazards
  */
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) && !defined(CONFIG_CPU_CAVIUM_OCTEON)
+#if (defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)) && \
+	!defined(CONFIG_CPU_CAVIUM_OCTEON) && !defined(CONFIG_LOONGSON3_ENHANCEMENT)
 
 /*
  * MIPSR2 defines ehb for hazard avoidance
@@ -155,8 +156,8 @@
 } while (0)
 
 #elif defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_CPU_CAVIUM_OCTEON) || \
-	defined(CONFIG_CPU_LOONGSON2) || defined(CONFIG_CPU_R10000) || \
-	defined(CONFIG_CPU_R5500) || defined(CONFIG_CPU_XLR)
+	defined(CONFIG_CPU_LOONGSON2) || defined(CONFIG_LOONGSON3_ENHANCEMENT) || \
+	defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_R5500) || defined(CONFIG_CPU_XLR)
 
 /*
  * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
index 01880b3..64f2500 100644
--- a/arch/mips/include/asm/highmem.h
+++ b/arch/mips/include/asm/highmem.h
@@ -19,8 +19,10 @@
 
 #ifdef __KERNEL__
 
+#include <linux/bug.h>
 #include <linux/interrupt.h>
 #include <linux/uaccess.h>
+#include <asm/cpu-features.h>
 #include <asm/kmap_types.h>
 
 /* undef for production */
@@ -50,7 +52,7 @@
 extern void __kunmap_atomic(void *kvaddr);
 extern void *kmap_atomic_pfn(unsigned long pfn);
 
-#define flush_cache_kmaps()	flush_cache_all()
+#define flush_cache_kmaps()	BUG_ON(cpu_has_dc_aliases)
 
 extern void kmap_init(void);
 
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 2b4dc7a..ecabc00 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -304,10 +304,10 @@
 #undef __IS_KSEG1
 }
 
-#ifdef CONFIG_CPU_CAVIUM_OCTEON
-#define war_octeon_io_reorder_wmb()		wmb()
+#if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_LOONGSON3_ENHANCEMENT)
+#define war_io_reorder_wmb()		wmb()
 #else
-#define war_octeon_io_reorder_wmb()		do { } while (0)
+#define war_io_reorder_wmb()		do { } while (0)
 #endif
 
 #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq)			\
@@ -318,7 +318,7 @@
 	volatile type *__mem;						\
 	type __val;							\
 									\
-	war_octeon_io_reorder_wmb();					\
+	war_io_reorder_wmb();					\
 									\
 	__mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem));	\
 									\
@@ -387,7 +387,7 @@
 	volatile type *__addr;						\
 	type __val;							\
 									\
-	war_octeon_io_reorder_wmb();					\
+	war_io_reorder_wmb();					\
 									\
 	__addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
 									\
diff --git a/arch/mips/include/asm/irq_regs.h b/arch/mips/include/asm/irq_regs.h
index 33bd2a0..8c48d6d 100644
--- a/arch/mips/include/asm/irq_regs.h
+++ b/arch/mips/include/asm/irq_regs.h
@@ -18,4 +18,14 @@
 	return current_thread_info()->regs;
 }
 
+static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
+{
+	struct pt_regs *old_regs;
+
+	old_regs = get_irq_regs();
+	current_thread_info()->regs = new_regs;
+
+	return old_regs;
+}
+
 #endif /* __ASM_IRQ_REGS_H */
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 65c351e..9d3610b 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -41,7 +41,12 @@
 	"	.set	push						\n"
 	"	.set	reorder						\n"
 	"	.set	noat						\n"
+#if defined(CONFIG_CPU_LOONGSON3)
+	"	mfc0	%[flags], $12					\n"
+	"	di							\n"
+#else
 	"	di	%[flags]					\n"
+#endif
 	"	andi	%[flags], 1					\n"
 	"	" __stringify(__irq_disable_hazard) "			\n"
 	"	.set	pop						\n"
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index f6b1279..6733ac5 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -122,6 +122,7 @@
 	u32 flush_dcache_exits;
 	u32 halt_successful_poll;
 	u32 halt_attempted_poll;
+	u32 halt_poll_invalid;
 	u32 halt_wakeup;
 };
 
@@ -311,17 +312,18 @@
 #define MIPS3_PG_FRAME		0x3fffffc0
 
 #define VPN2_MASK		0xffffe000
+#define KVM_ENTRYHI_ASID	MIPS_ENTRYHI_ASID
 #define TLB_IS_GLOBAL(x)	(((x).tlb_lo0 & MIPS3_PG_G) &&		\
 				 ((x).tlb_lo1 & MIPS3_PG_G))
 #define TLB_VPN2(x)		((x).tlb_hi & VPN2_MASK)
-#define TLB_ASID(x)		((x).tlb_hi & ASID_MASK)
+#define TLB_ASID(x)		((x).tlb_hi & KVM_ENTRYHI_ASID)
 #define TLB_IS_VALID(x, va)	(((va) & (1 << PAGE_SHIFT))		\
 				 ? ((x).tlb_lo1 & MIPS3_PG_V)		\
 				 : ((x).tlb_lo0 & MIPS3_PG_V))
 #define TLB_HI_VPN2_HIT(x, y)	((TLB_VPN2(x) & ~(x).tlb_mask) ==	\
 				 ((y) & VPN2_MASK & ~(x).tlb_mask))
 #define TLB_HI_ASID_HIT(x, y)	(TLB_IS_GLOBAL(x) ||			\
-				 TLB_ASID(x) == ((y) & ASID_MASK))
+				 TLB_ASID(x) == ((y) & KVM_ENTRYHI_ASID))
 
 struct kvm_mips_tlb {
 	long tlb_mask;
@@ -747,7 +749,7 @@
 
 uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu);
 void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count);
-void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare);
+void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack);
 void kvm_mips_init_count(struct kvm_vcpu *vcpu);
 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
@@ -812,5 +814,6 @@
 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
 
 #endif /* __MIPS_KVM_HOST_H__ */
diff --git a/arch/mips/include/asm/llsc.h b/arch/mips/include/asm/llsc.h
new file mode 100644
index 0000000..c6d17d1
--- /dev/null
+++ b/arch/mips/include/asm/llsc.h
@@ -0,0 +1,28 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Macros for 32/64-bit neutral inline assembler
+ */
+
+#ifndef __ASM_LLSC_H
+#define __ASM_LLSC_H
+
+#if _MIPS_SZLONG == 32
+#define SZLONG_LOG 5
+#define SZLONG_MASK 31UL
+#define __LL		"ll	"
+#define __SC		"sc	"
+#define __INS		"ins	"
+#define __EXT		"ext	"
+#elif _MIPS_SZLONG == 64
+#define SZLONG_LOG 6
+#define SZLONG_MASK 63UL
+#define __LL		"lld	"
+#define __SC		"scd	"
+#define __INS		"dins	"
+#define __EXT		"dext	"
+#endif
+
+#endif /* __ASM_LLSC_H  */
diff --git a/arch/mips/include/asm/mach-bmips/cpu-feature-overrides.h b/arch/mips/include/asm/mach-bmips/cpu-feature-overrides.h
new file mode 100644
index 0000000..fa0583e
--- /dev/null
+++ b/arch/mips/include/asm/mach-bmips/cpu-feature-overrides.h
@@ -0,0 +1,14 @@
+#ifndef __ASM_MACH_BMIPS_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_BMIPS_CPU_FEATURE_OVERRIDES_H
+
+/* Invariants across all BMIPS processors */
+#define cpu_has_vtag_icache		0
+#define cpu_icache_snoops_remote_store	1
+
+/* Processor ISA compatibility is MIPS32R1 */
+#define cpu_has_mips32r1		1
+#define cpu_has_mips32r2		0
+#define cpu_has_mips64r1		0
+#define cpu_has_mips64r2		0
+
+#endif /* __ASM_MACH_BMIPS_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-bmips/ioremap.h b/arch/mips/include/asm/mach-bmips/ioremap.h
new file mode 100644
index 0000000..29c7a7b
--- /dev/null
+++ b/arch/mips/include/asm/mach-bmips/ioremap.h
@@ -0,0 +1,33 @@
+#ifndef __ASM_MACH_BMIPS_IOREMAP_H
+#define __ASM_MACH_BMIPS_IOREMAP_H
+
+#include <linux/types.h>
+
+static inline phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size)
+{
+	return phys_addr;
+}
+
+static inline int is_bmips_internal_registers(phys_addr_t offset)
+{
+	if (offset >= 0xfff80000)
+		return 1;
+
+	return 0;
+}
+
+static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size,
+					 unsigned long flags)
+{
+	if (is_bmips_internal_registers(offset))
+		return (void __iomem *)offset;
+
+	return NULL;
+}
+
+static inline int plat_iounmap(const volatile void __iomem *addr)
+{
+	return is_bmips_internal_registers((unsigned long)addr);
+}
+
+#endif /* __ASM_MACH_BMIPS_IOREMAP_H */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
index cf92fe7..c4873e8 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
@@ -141,7 +141,7 @@
 .endm
 
 /*
- * Do SMP slave processor setup necessary before we can savely execute C code.
+ * Do SMP slave processor setup necessary before we can safely execute C code.
  */
 	.macro	smp_slave_setup
 	.endm
diff --git a/arch/mips/include/asm/mach-generic/kernel-entry-init.h b/arch/mips/include/asm/mach-generic/kernel-entry-init.h
index 13b0751..a229297 100644
--- a/arch/mips/include/asm/mach-generic/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-generic/kernel-entry-init.h
@@ -16,7 +16,7 @@
 	.endm
 
 /*
- * Do SMP slave processor setup necessary before we can savely execute C code.
+ * Do SMP slave processor setup necessary before we can safely execute C code.
  */
 	.macro	smp_slave_setup
 	.endm
diff --git a/arch/mips/include/asm/mach-ip27/irq.h b/arch/mips/include/asm/mach-ip27/irq.h
index cf4384b..b0b7261 100644
--- a/arch/mips/include/asm/mach-ip27/irq.h
+++ b/arch/mips/include/asm/mach-ip27/irq.h
@@ -11,7 +11,7 @@
 #define __ASM_MACH_IP27_IRQ_H
 
 /*
- * A hardwired interrupt number is completly stupid for this system - a
+ * A hardwired interrupt number is completely stupid for this system - a
  * large configuration might have thousands if not tenthousands of
  * interrupts.
  */
diff --git a/arch/mips/include/asm/mach-ip27/kernel-entry-init.h b/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
index b087cb8..f992c1d 100644
--- a/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
@@ -81,7 +81,7 @@
 	.endm
 
 /*
- * Do SMP slave processor setup necessary before we can savely execute C code.
+ * Do SMP slave processor setup necessary before we can safely execute C code.
  */
 	.macro	smp_slave_setup
 	GET_NASID_ASM	t1
diff --git a/arch/mips/include/asm/mach-jz4740/gpio.h b/arch/mips/include/asm/mach-jz4740/gpio.h
index bf8c3e1..7c7708a 100644
--- a/arch/mips/include/asm/mach-jz4740/gpio.h
+++ b/arch/mips/include/asm/mach-jz4740/gpio.h
@@ -27,7 +27,7 @@
 
 /*
  Usually a driver for a SoC component has to request several gpio pins and
- configure them as funcion pins.
+ configure them as function pins.
  jz_gpio_bulk_request can be used to ease this process.
  Usually one would do something like:
 
diff --git a/arch/mips/include/asm/mach-jz4740/platform.h b/arch/mips/include/asm/mach-jz4740/platform.h
index 32cfbe6..073b8bf 100644
--- a/arch/mips/include/asm/mach-jz4740/platform.h
+++ b/arch/mips/include/asm/mach-jz4740/platform.h
@@ -19,7 +19,6 @@
 
 #include <linux/platform_device.h>
 
-extern struct platform_device jz4740_usb_ohci_device;
 extern struct platform_device jz4740_udc_device;
 extern struct platform_device jz4740_udc_xceiv_device;
 extern struct platform_device jz4740_mmc_device;
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
index 98d6a2f..7023883 100644
--- a/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
+++ b/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
@@ -3,7 +3,7 @@
  * under the terms of the GNU General Public License version 2 as published
  * by the Free Software Foundation.
  *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
  */
 
 #ifndef _LTQ_FALCON_H__
diff --git a/arch/mips/include/asm/mach-lantiq/lantiq.h b/arch/mips/include/asm/mach-lantiq/lantiq.h
index 4e5ae65..8064d7a 100644
--- a/arch/mips/include/asm/mach-lantiq/lantiq.h
+++ b/arch/mips/include/asm/mach-lantiq/lantiq.h
@@ -3,7 +3,7 @@
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
  *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ *  Copyright (C) 2010 John Crispin <john@phrozen.org>
  */
 #ifndef _LANTIQ_H__
 #define _LANTIQ_H__
diff --git a/arch/mips/include/asm/mach-lantiq/lantiq_platform.h b/arch/mips/include/asm/mach-lantiq/lantiq_platform.h
index e23bf7c..17d2fdc 100644
--- a/arch/mips/include/asm/mach-lantiq/lantiq_platform.h
+++ b/arch/mips/include/asm/mach-lantiq/lantiq_platform.h
@@ -3,7 +3,7 @@
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
  *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ *  Copyright (C) 2010 John Crispin <john@phrozen.org>
  */
 
 #ifndef _LANTIQ_PLATFORM_H__
diff --git a/arch/mips/include/asm/mach-lantiq/xway/irq.h b/arch/mips/include/asm/mach-lantiq/xway/irq.h
index a1471d2..83e5f03 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/irq.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/irq.h
@@ -3,7 +3,7 @@
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
  *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ *  Copyright (C) 2010 John Crispin <john@phrozen.org>
  */
 
 #ifndef __LANTIQ_IRQ_H
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
index 5eadfe5..1410763 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
@@ -3,7 +3,7 @@
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
  *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ *  Copyright (C) 2010 John Crispin <john@phrozen.org>
  */
 
 #ifndef _LANTIQ_XWAY_IRQ_H__
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
index dd6005b..f873107 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
@@ -3,7 +3,7 @@
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
  *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ *  Copyright (C) 2010 John Crispin <john@phrozen.org>
  */
 
 #ifndef _LTQ_XWAY_H__
diff --git a/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
index 5f8693d..4901833 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
@@ -12,7 +12,7 @@
  *   along with this program; if not, write to the Free Software
  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
  *
- *   Copyright (C) 2011 John Crispin <blogic@openwrt.org>
+ *   Copyright (C) 2011 John Crispin <john@phrozen.org>
  */
 
 #ifndef LTQ_DMA_H__
diff --git a/arch/mips/include/asm/mach-loongson32/cpufreq.h b/arch/mips/include/asm/mach-loongson32/cpufreq.h
index 6843fa1..2f1ecb0 100644
--- a/arch/mips/include/asm/mach-loongson32/cpufreq.h
+++ b/arch/mips/include/asm/mach-loongson32/cpufreq.h
@@ -9,7 +9,6 @@
  * option) any later version.
  */
 
-
 #ifndef __ASM_MACH_LOONGSON32_CPUFREQ_H
 #define __ASM_MACH_LOONGSON32_CPUFREQ_H
 
diff --git a/arch/mips/include/asm/mach-loongson32/dma.h b/arch/mips/include/asm/mach-loongson32/dma.h
new file mode 100644
index 0000000..ad1dec7
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson32/dma.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2015 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * Loongson 1 NAND platform support.
+ *
+ * This program is free software; you can redistribute	it and/or modify it
+ * under  the terms of	the GNU General	 Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __ASM_MACH_LOONGSON32_DMA_H
+#define __ASM_MACH_LOONGSON32_DMA_H
+
+#define LS1X_DMA_CHANNEL0	0
+#define LS1X_DMA_CHANNEL1	1
+#define LS1X_DMA_CHANNEL2	2
+
+struct plat_ls1x_dma {
+	int nr_channels;
+};
+
+extern struct plat_ls1x_dma ls1b_dma_pdata;
+
+#endif /* __ASM_MACH_LOONGSON32_DMA_H */
diff --git a/arch/mips/include/asm/mach-loongson32/irq.h b/arch/mips/include/asm/mach-loongson32/irq.h
index 0d35b99..c1c7441 100644
--- a/arch/mips/include/asm/mach-loongson32/irq.h
+++ b/arch/mips/include/asm/mach-loongson32/irq.h
@@ -9,7 +9,6 @@
  * option) any later version.
  */
 
-
 #ifndef __ASM_MACH_LOONGSON32_IRQ_H
 #define __ASM_MACH_LOONGSON32_IRQ_H
 
diff --git a/arch/mips/include/asm/mach-loongson32/loongson1.h b/arch/mips/include/asm/mach-loongson32/loongson1.h
index 12aa129..978f6df 100644
--- a/arch/mips/include/asm/mach-loongson32/loongson1.h
+++ b/arch/mips/include/asm/mach-loongson32/loongson1.h
@@ -9,7 +9,6 @@
  * option) any later version.
  */
 
-
 #ifndef __ASM_MACH_LOONGSON32_LOONGSON1_H
 #define __ASM_MACH_LOONGSON32_LOONGSON1_H
 
@@ -18,6 +17,9 @@
 /* Loongson 1 Register Bases */
 #define LS1X_MUX_BASE			0x1fd00420
 #define LS1X_INTC_BASE			0x1fd01040
+#define LS1X_GPIO0_BASE			0x1fd010c0
+#define LS1X_GPIO1_BASE			0x1fd010c4
+#define LS1X_DMAC_BASE			0x1fd01160
 #define LS1X_EHCI_BASE			0x1fe00000
 #define LS1X_OHCI_BASE			0x1fe08000
 #define LS1X_GMAC0_BASE			0x1fe10000
diff --git a/arch/mips/include/asm/mach-loongson32/nand.h b/arch/mips/include/asm/mach-loongson32/nand.h
new file mode 100644
index 0000000..e274912
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson32/nand.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2015 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * Loongson 1 NAND platform support.
+ *
+ * This program is free software; you can redistribute	it and/or modify it
+ * under  the terms of	the GNU General	 Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __ASM_MACH_LOONGSON32_NAND_H
+#define __ASM_MACH_LOONGSON32_NAND_H
+
+#include <linux/dmaengine.h>
+#include <linux/mtd/partitions.h>
+
+struct plat_ls1x_nand {
+	struct mtd_partition *parts;
+	unsigned int nr_parts;
+
+	int hold_cycle;
+	int wait_cycle;
+};
+
+extern struct plat_ls1x_nand ls1b_nand_pdata;
+
+bool ls1x_dma_filter_fn(struct dma_chan *chan, void *param);
+
+#endif /* __ASM_MACH_LOONGSON32_NAND_H */
diff --git a/arch/mips/include/asm/mach-loongson32/platform.h b/arch/mips/include/asm/mach-loongson32/platform.h
index c32f03f..672531a 100644
--- a/arch/mips/include/asm/mach-loongson32/platform.h
+++ b/arch/mips/include/asm/mach-loongson32/platform.h
@@ -7,20 +7,28 @@
  * option) any later version.
  */
 
-
 #ifndef __ASM_MACH_LOONGSON32_PLATFORM_H
 #define __ASM_MACH_LOONGSON32_PLATFORM_H
 
 #include <linux/platform_device.h>
 
+#include <dma.h>
+#include <nand.h>
+
 extern struct platform_device ls1x_uart_pdev;
 extern struct platform_device ls1x_cpufreq_pdev;
+extern struct platform_device ls1x_dma_pdev;
 extern struct platform_device ls1x_eth0_pdev;
 extern struct platform_device ls1x_eth1_pdev;
 extern struct platform_device ls1x_ehci_pdev;
+extern struct platform_device ls1x_gpio0_pdev;
+extern struct platform_device ls1x_gpio1_pdev;
+extern struct platform_device ls1x_nand_pdev;
 extern struct platform_device ls1x_rtc_pdev;
 
-extern void __init ls1x_clk_init(void);
-extern void __init ls1x_serial_setup(struct platform_device *pdev);
+void __init ls1x_clk_init(void);
+void __init ls1x_dma_set_platdata(struct plat_ls1x_dma *pdata);
+void __init ls1x_nand_set_platdata(struct plat_ls1x_nand *pdata);
+void __init ls1x_serial_set_uartclk(struct platform_device *pdev);
 
 #endif /* __ASM_MACH_LOONGSON32_PLATFORM_H */
diff --git a/arch/mips/include/asm/mach-loongson32/regs-clk.h b/arch/mips/include/asm/mach-loongson32/regs-clk.h
index 1f5a715..4d56fc3 100644
--- a/arch/mips/include/asm/mach-loongson32/regs-clk.h
+++ b/arch/mips/include/asm/mach-loongson32/regs-clk.h
@@ -19,18 +19,18 @@
 #define LS1X_CLK_PLL_DIV		LS1X_CLK_REG(0x4)
 
 /* Clock PLL Divisor Register Bits */
-#define DIV_DC_EN			(0x1 << 31)
-#define DIV_DC_RST			(0x1 << 30)
-#define DIV_CPU_EN			(0x1 << 25)
-#define DIV_CPU_RST			(0x1 << 24)
-#define DIV_DDR_EN			(0x1 << 19)
-#define DIV_DDR_RST			(0x1 << 18)
-#define RST_DC_EN			(0x1 << 5)
-#define RST_DC				(0x1 << 4)
-#define RST_DDR_EN			(0x1 << 3)
-#define RST_DDR				(0x1 << 2)
-#define RST_CPU_EN			(0x1 << 1)
-#define RST_CPU				0x1
+#define DIV_DC_EN			BIT(31)
+#define DIV_DC_RST			BIT(30)
+#define DIV_CPU_EN			BIT(25)
+#define DIV_CPU_RST			BIT(24)
+#define DIV_DDR_EN			BIT(19)
+#define DIV_DDR_RST			BIT(18)
+#define RST_DC_EN			BIT(5)
+#define RST_DC				BIT(4)
+#define RST_DDR_EN			BIT(3)
+#define RST_DDR				BIT(2)
+#define RST_CPU_EN			BIT(1)
+#define RST_CPU				BIT(0)
 
 #define DIV_DC_SHIFT			26
 #define DIV_CPU_SHIFT			20
diff --git a/arch/mips/include/asm/mach-loongson32/regs-mux.h b/arch/mips/include/asm/mach-loongson32/regs-mux.h
index 8302d92..7c394f9 100644
--- a/arch/mips/include/asm/mach-loongson32/regs-mux.h
+++ b/arch/mips/include/asm/mach-loongson32/regs-mux.h
@@ -19,49 +19,49 @@
 #define LS1X_MUX_CTRL1			LS1X_MUX_REG(0x4)
 
 /* MUX CTRL0 Register Bits */
-#define UART0_USE_PWM23			(0x1 << 28)
-#define UART0_USE_PWM01			(0x1 << 27)
-#define UART1_USE_LCD0_5_6_11		(0x1 << 26)
-#define I2C2_USE_CAN1			(0x1 << 25)
-#define I2C1_USE_CAN0			(0x1 << 24)
-#define NAND3_USE_UART5			(0x1 << 23)
-#define NAND3_USE_UART4			(0x1 << 22)
-#define NAND3_USE_UART1_DAT		(0x1 << 21)
-#define NAND3_USE_UART1_CTS		(0x1 << 20)
-#define NAND3_USE_PWM23			(0x1 << 19)
-#define NAND3_USE_PWM01			(0x1 << 18)
-#define NAND2_USE_UART5			(0x1 << 17)
-#define NAND2_USE_UART4			(0x1 << 16)
-#define NAND2_USE_UART1_DAT		(0x1 << 15)
-#define NAND2_USE_UART1_CTS		(0x1 << 14)
-#define NAND2_USE_PWM23			(0x1 << 13)
-#define NAND2_USE_PWM01			(0x1 << 12)
-#define NAND1_USE_UART5			(0x1 << 11)
-#define NAND1_USE_UART4			(0x1 << 10)
-#define NAND1_USE_UART1_DAT		(0x1 << 9)
-#define NAND1_USE_UART1_CTS		(0x1 << 8)
-#define NAND1_USE_PWM23			(0x1 << 7)
-#define NAND1_USE_PWM01			(0x1 << 6)
-#define GMAC1_USE_UART1			(0x1 << 4)
-#define GMAC1_USE_UART0			(0x1 << 3)
-#define LCD_USE_UART0_DAT		(0x1 << 2)
-#define LCD_USE_UART15			(0x1 << 1)
-#define LCD_USE_UART0			0x1
+#define UART0_USE_PWM23			BIT(28)
+#define UART0_USE_PWM01			BIT(27)
+#define UART1_USE_LCD0_5_6_11		BIT(26)
+#define I2C2_USE_CAN1			BIT(25)
+#define I2C1_USE_CAN0			BIT(24)
+#define NAND3_USE_UART5			BIT(23)
+#define NAND3_USE_UART4			BIT(22)
+#define NAND3_USE_UART1_DAT		BIT(21)
+#define NAND3_USE_UART1_CTS		BIT(20)
+#define NAND3_USE_PWM23			BIT(19)
+#define NAND3_USE_PWM01			BIT(18)
+#define NAND2_USE_UART5			BIT(17)
+#define NAND2_USE_UART4			BIT(16)
+#define NAND2_USE_UART1_DAT		BIT(15)
+#define NAND2_USE_UART1_CTS		BIT(14)
+#define NAND2_USE_PWM23			BIT(13)
+#define NAND2_USE_PWM01			BIT(12)
+#define NAND1_USE_UART5			BIT(11)
+#define NAND1_USE_UART4			BIT(10)
+#define NAND1_USE_UART1_DAT		BIT(9)
+#define NAND1_USE_UART1_CTS		BIT(8)
+#define NAND1_USE_PWM23			BIT(7)
+#define NAND1_USE_PWM01			BIT(6)
+#define GMAC1_USE_UART1			BIT(4)
+#define GMAC1_USE_UART0			BIT(3)
+#define LCD_USE_UART0_DAT		BIT(2)
+#define LCD_USE_UART15			BIT(1)
+#define LCD_USE_UART0			BIT(0)
 
 /* MUX CTRL1 Register Bits */
-#define USB_RESET			(0x1 << 31)
-#define SPI1_CS_USE_PWM01		(0x1 << 24)
-#define SPI1_USE_CAN			(0x1 << 23)
-#define DISABLE_DDR_CONFSPACE		(0x1 << 20)
-#define DDR32TO16EN			(0x1 << 16)
-#define GMAC1_SHUT			(0x1 << 13)
-#define GMAC0_SHUT			(0x1 << 12)
-#define USB_SHUT			(0x1 << 11)
-#define UART1_3_USE_CAN1		(0x1 << 5)
-#define UART1_2_USE_CAN0		(0x1 << 4)
-#define GMAC1_USE_TXCLK			(0x1 << 3)
-#define GMAC0_USE_TXCLK			(0x1 << 2)
-#define GMAC1_USE_PWM23			(0x1 << 1)
-#define GMAC0_USE_PWM01			0x1
+#define USB_RESET			BIT(31)
+#define SPI1_CS_USE_PWM01		BIT(24)
+#define SPI1_USE_CAN			BIT(23)
+#define DISABLE_DDR_CONFSPACE		BIT(20)
+#define DDR32TO16EN			BIT(16)
+#define GMAC1_SHUT			BIT(13)
+#define GMAC0_SHUT			BIT(12)
+#define USB_SHUT			BIT(11)
+#define UART1_3_USE_CAN1		BIT(5)
+#define UART1_2_USE_CAN0		BIT(4)
+#define GMAC1_USE_TXCLK			BIT(3)
+#define GMAC0_USE_TXCLK			BIT(2)
+#define GMAC1_USE_PWM23			BIT(1)
+#define GMAC0_USE_PWM01			BIT(0)
 
 #endif /* __ASM_MACH_LOONGSON32_REGS_MUX_H */
diff --git a/arch/mips/include/asm/mach-loongson32/regs-pwm.h b/arch/mips/include/asm/mach-loongson32/regs-pwm.h
index 69f174e..4119600 100644
--- a/arch/mips/include/asm/mach-loongson32/regs-pwm.h
+++ b/arch/mips/include/asm/mach-loongson32/regs-pwm.h
@@ -19,11 +19,11 @@
 #define PWM_CTRL		0xc
 
 /* PWM Control Register Bits */
-#define CNT_RST			(0x1 << 7)
-#define INT_SR			(0x1 << 6)
-#define INT_EN			(0x1 << 5)
-#define PWM_SINGLE		(0x1 << 4)
-#define PWM_OE			(0x1 << 3)
-#define CNT_EN			0x1
+#define CNT_RST			BIT(7)
+#define INT_SR			BIT(6)
+#define INT_EN			BIT(5)
+#define PWM_SINGLE		BIT(4)
+#define PWM_OE			BIT(3)
+#define CNT_EN			BIT(0)
 
 #endif /* __ASM_MACH_LOONGSON32_REGS_PWM_H */
diff --git a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
index 98963c2..89328a3d 100644
--- a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
@@ -16,11 +16,6 @@
 #ifndef __ASM_MACH_LOONGSON64_CPU_FEATURE_OVERRIDES_H
 #define __ASM_MACH_LOONGSON64_CPU_FEATURE_OVERRIDES_H
 
-#define cpu_dcache_line_size()	32
-#define cpu_icache_line_size()	32
-#define cpu_scache_line_size()	32
-
-
 #define cpu_has_32fpr		1
 #define cpu_has_3k_cache	0
 #define cpu_has_4k_cache	1
@@ -31,24 +26,17 @@
 #define cpu_has_counter		1
 #define cpu_has_dc_aliases	(PAGE_SIZE < 0x4000)
 #define cpu_has_divec		0
-#define cpu_has_dsp		0
-#define cpu_has_dsp2		0
 #define cpu_has_ejtag		0
-#define cpu_has_ic_fills_f_dc	0
 #define cpu_has_inclusive_pcaches	1
 #define cpu_has_llsc		1
 #define cpu_has_mcheck		0
 #define cpu_has_mdmx		0
 #define cpu_has_mips16		0
-#define cpu_has_mips32r2	0
 #define cpu_has_mips3d		0
-#define cpu_has_mips64r2	0
 #define cpu_has_mipsmt		0
-#define cpu_has_prefetch	0
 #define cpu_has_smartmips	0
 #define cpu_has_tlb		1
 #define cpu_has_tx39_cache	0
-#define cpu_has_userlocal	0
 #define cpu_has_vce		0
 #define cpu_has_veic		0
 #define cpu_has_vint		0
@@ -56,6 +44,10 @@
 #define cpu_has_watch		1
 #define cpu_has_local_ebase	0
 
-#define cpu_has_wsbh		IS_ENABLED(CONFIG_CPU_LOONGSON3)
+#ifdef CONFIG_CPU_LOONGSON3
+#define cpu_has_wsbh		1
+#define cpu_has_ic_fills_f_dc	1
+#define cpu_hwrena_impl_bits	0xc0000000
+#endif
 
 #endif /* __ASM_MACH_LOONGSON64_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
index 3f2f84f..8393bc54 100644
--- a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
@@ -23,8 +23,15 @@
 	or	t0, (0x1 << 7)
 	mtc0	t0, $16, 3
 	/* Set ELPA on LOONGSON3 pagegrain */
-	li	t0, (0x1 << 29)
+	mfc0	t0, $5, 1
+	or	t0, (0x1 << 29)
 	mtc0	t0, $5, 1
+#ifdef CONFIG_LOONGSON3_ENHANCEMENT
+	/* Enable STFill Buffer */
+	mfc0	t0, $16, 6
+	or	t0, 0x100
+	mtc0	t0, $16, 6
+#endif
 	_ehb
 	.set	pop
 #endif
@@ -42,8 +49,15 @@
 	or	t0, (0x1 << 7)
 	mtc0	t0, $16, 3
 	/* Set ELPA on LOONGSON3 pagegrain */
-	li	t0, (0x1 << 29)
+	mfc0	t0, $5, 1
+	or	t0, (0x1 << 29)
 	mtc0	t0, $5, 1
+#ifdef CONFIG_LOONGSON3_ENHANCEMENT
+	/* Enable STFill Buffer */
+	mfc0	t0, $16, 6
+	or	t0, 0x100
+	mtc0	t0, $16, 6
+#endif
 	_ehb
 	.set	pop
 #endif
diff --git a/arch/mips/include/asm/mach-ralink/mt7620.h b/arch/mips/include/asm/mach-ralink/mt7620.h
index 455d406..a73350b 100644
--- a/arch/mips/include/asm/mach-ralink/mt7620.h
+++ b/arch/mips/include/asm/mach-ralink/mt7620.h
@@ -7,7 +7,7 @@
  *
  * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
  * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
  */
 
 #ifndef _MT7620_REGS_H_
@@ -72,6 +72,7 @@
 #define SYSCFG0_DRAM_TYPE_SDRAM		0
 #define SYSCFG0_DRAM_TYPE_DDR1		1
 #define SYSCFG0_DRAM_TYPE_DDR2		2
+#define SYSCFG0_DRAM_TYPE_UNKNOWN	3
 
 #define SYSCFG0_DRAM_TYPE_DDR2_MT7628	0
 #define SYSCFG0_DRAM_TYPE_DDR1_MT7628	1
diff --git a/arch/mips/include/asm/mach-ralink/mt7621.h b/arch/mips/include/asm/mach-ralink/mt7621.h
index 610b61e..a672e06 100644
--- a/arch/mips/include/asm/mach-ralink/mt7621.h
+++ b/arch/mips/include/asm/mach-ralink/mt7621.h
@@ -3,7 +3,7 @@
  * under the terms of the GNU General Public License version 2 as published
  * by the Free Software Foundation.
  *
- * Copyright (C) 2015 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2015 John Crispin <john@phrozen.org>
  */
 
 #ifndef _MT7621_REGS_H_
diff --git a/arch/mips/include/asm/mach-ralink/pinmux.h b/arch/mips/include/asm/mach-ralink/pinmux.h
index be106cb..ba8ac33 100644
--- a/arch/mips/include/asm/mach-ralink/pinmux.h
+++ b/arch/mips/include/asm/mach-ralink/pinmux.h
@@ -3,7 +3,7 @@
  *  it under the terms of the GNU General Public License version 2 as
  *  publishhed by the Free Software Foundation.
  *
- *  Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ *  Copyright (C) 2012 John Crispin <john@phrozen.org>
  */
 
 #ifndef _RT288X_PINMUX_H__
diff --git a/arch/mips/include/asm/mach-ralink/ralink_regs.h b/arch/mips/include/asm/mach-ralink/ralink_regs.h
index 4c9fba6..9df1a53 100644
--- a/arch/mips/include/asm/mach-ralink/ralink_regs.h
+++ b/arch/mips/include/asm/mach-ralink/ralink_regs.h
@@ -1,7 +1,7 @@
 /*
  *  Ralink SoC register definitions
  *
- *  Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ *  Copyright (C) 2013 John Crispin <john@phrozen.org>
  *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
  *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
  *
diff --git a/arch/mips/include/asm/mach-ralink/rt288x.h b/arch/mips/include/asm/mach-ralink/rt288x.h
index 03ad716..25ae104 100644
--- a/arch/mips/include/asm/mach-ralink/rt288x.h
+++ b/arch/mips/include/asm/mach-ralink/rt288x.h
@@ -7,7 +7,7 @@
  *
  * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
  * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
  */
 
 #ifndef _RT288X_REGS_H_
diff --git a/arch/mips/include/asm/mach-ralink/rt305x.h b/arch/mips/include/asm/mach-ralink/rt305x.h
index 2eea793..ac2d65c 100644
--- a/arch/mips/include/asm/mach-ralink/rt305x.h
+++ b/arch/mips/include/asm/mach-ralink/rt305x.h
@@ -7,7 +7,7 @@
  *
  * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
  * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
  */
 
 #ifndef _RT305X_REGS_H_
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index b196825..9411a4c 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -28,7 +28,7 @@
  * This function returns the physical base address of the Coherence Manager
  * global control block, or 0 if no Coherence Manager is present. It provides
  * a default implementation which reads the CMGCRBase register where available,
- * and may be overriden by platforms which determine this address in a
+ * and may be overridden by platforms which determine this address in a
  * different way by defining a function with the same prototype except for the
  * name mips_cm_phys_base (without underscores).
  */
@@ -208,6 +208,7 @@
 BUILD_CM_RW(sys_config2,	MIPS_CM_GCB_OFS + 0x150)
 BUILD_CM_RW(l2_pft_control,	MIPS_CM_GCB_OFS + 0x300)
 BUILD_CM_RW(l2_pft_control_b,	MIPS_CM_GCB_OFS + 0x308)
+BUILD_CM_RW(bev_base,		MIPS_CM_GCB_OFS + 0x680)
 
 /* Core Local & Core Other register accessor functions */
 BUILD_CM_Cx_RW(reset_release,	0x00)
@@ -290,8 +291,8 @@
 #define CM_GCR_GIC_BASE_GICEN_MSK		(_ULCAST_(0x1) << 0)
 
 /* GCR_CPC_BASE register fields */
-#define CM_GCR_CPC_BASE_CPCBASE_SHF		17
-#define CM_GCR_CPC_BASE_CPCBASE_MSK		(_ULCAST_(0x7fff) << 17)
+#define CM_GCR_CPC_BASE_CPCBASE_SHF		15
+#define CM_GCR_CPC_BASE_CPCBASE_MSK		(_ULCAST_(0x1ffff) << 15)
 #define CM_GCR_CPC_BASE_CPCEN_SHF		0
 #define CM_GCR_CPC_BASE_CPCEN_MSK		(_ULCAST_(0x1) << 0)
 
@@ -461,7 +462,10 @@
 	if (mips_cm_revision() >= CM_REV_CM3)
 		return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK;
 
-	return smp_num_siblings;
+	if (config_enabled(CONFIG_SMP))
+		return smp_num_siblings;
+
+	return 1;
 }
 
 /**
@@ -505,7 +509,7 @@
 
 #else /* !CONFIG_MIPS_CM */
 
-static inline void mips_cm_lock_other(unsigned int core) { }
+static inline void mips_cm_lock_other(unsigned int core, unsigned int vp) { }
 static inline void mips_cm_unlock_other(void) { }
 
 #endif /* !CONFIG_MIPS_CM */
diff --git a/arch/mips/include/asm/mips-cpc.h b/arch/mips/include/asm/mips-cpc.h
index e09035239..8c519f9 100644
--- a/arch/mips/include/asm/mips-cpc.h
+++ b/arch/mips/include/asm/mips-cpc.h
@@ -106,6 +106,9 @@
 BUILD_CPC_Cx_RW(cmd,		0x00)
 BUILD_CPC_Cx_RW(stat_conf,	0x08)
 BUILD_CPC_Cx_RW(other,		0x10)
+BUILD_CPC_Cx_RW(vp_stop,	0x20)
+BUILD_CPC_Cx_RW(vp_run,		0x28)
+BUILD_CPC_Cx_RW(vp_running,	0x30)
 
 /* CPC_Cx_CMD register fields */
 #define CPC_Cx_CMD_SHF				0
diff --git a/arch/mips/include/asm/mips-r2-to-r6-emul.h b/arch/mips/include/asm/mips-r2-to-r6-emul.h
index 1f6ea83..20621e1 100644
--- a/arch/mips/include/asm/mips-r2-to-r6-emul.h
+++ b/arch/mips/include/asm/mips-r2-to-r6-emul.h
@@ -79,7 +79,7 @@
 };
 
 
-extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
 			  const char *str);
 
 #ifndef CONFIG_MIPSR2_TO_R6_EMULATOR
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 3ad19ad..25d0157 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -55,8 +55,14 @@
 #define CP0_BADINSTR $8, 1
 #define CP0_COUNT $9
 #define CP0_ENTRYHI $10
+#define CP0_GUESTCTL1 $10, 4
+#define CP0_GUESTCTL2 $10, 5
+#define CP0_GUESTCTL3 $10, 6
 #define CP0_COMPARE $11
+#define CP0_GUESTCTL0EXT $11, 4
 #define CP0_STATUS $12
+#define CP0_GUESTCTL0 $12, 6
+#define CP0_GTOFFSET $12, 7
 #define CP0_CAUSE $13
 #define CP0_EPC $14
 #define CP0_PRID $15
@@ -229,6 +235,8 @@
 
 /* MIPS32/64 EntryHI bit definitions */
 #define MIPS_ENTRYHI_EHINV	(_ULCAST_(1) << 10)
+#define MIPS_ENTRYHI_ASIDX	(_ULCAST_(0x3) << 8)
+#define MIPS_ENTRYHI_ASID	(_ULCAST_(0xff) << 0)
 
 /*
  * R4x00 interrupt enable / cause bits
@@ -390,6 +398,8 @@
 #define	 CAUSEF_IP7		(_ULCAST_(1)   << 15)
 #define CAUSEB_FDCI		21
 #define CAUSEF_FDCI		(_ULCAST_(1)   << 21)
+#define CAUSEB_WP		22
+#define CAUSEF_WP		(_ULCAST_(1)   << 22)
 #define CAUSEB_IV		23
 #define CAUSEF_IV		(_ULCAST_(1)   << 23)
 #define CAUSEB_PCI		26
@@ -611,7 +621,8 @@
 #define MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT (_ULCAST_(1) << 14)
 #define MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT	(_ULCAST_(2) << 14)
 #define MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT	(_ULCAST_(3) << 14)
-#define MIPS_CONF4_KSCREXIST	(_ULCAST_(255) << 16)
+#define MIPS_CONF4_KSCREXIST_SHIFT	(16)
+#define MIPS_CONF4_KSCREXIST	(_ULCAST_(255) << MIPS_CONF4_KSCREXIST_SHIFT)
 #define MIPS_CONF4_VTLBSIZEEXT_SHIFT	(24)
 #define MIPS_CONF4_VTLBSIZEEXT	(_ULCAST_(15) << MIPS_CONF4_VTLBSIZEEXT_SHIFT)
 #define MIPS_CONF4_AE		(_ULCAST_(1) << 28)
@@ -623,6 +634,7 @@
 #define MIPS_CONF5_MRP		(_ULCAST_(1) << 3)
 #define MIPS_CONF5_LLB		(_ULCAST_(1) << 4)
 #define MIPS_CONF5_MVH		(_ULCAST_(1) << 5)
+#define MIPS_CONF5_VP		(_ULCAST_(1) << 7)
 #define MIPS_CONF5_FRE		(_ULCAST_(1) << 8)
 #define MIPS_CONF5_UFE		(_ULCAST_(1) << 9)
 #define MIPS_CONF5_MSAEN	(_ULCAST_(1) << 27)
@@ -633,6 +645,8 @@
 #define MIPS_CONF6_SYND		(_ULCAST_(1) << 13)
 /* proAptiv FTLB on/off bit */
 #define MIPS_CONF6_FTLBEN	(_ULCAST_(1) << 15)
+/* Loongson-3 FTLB on/off bit */
+#define MIPS_CONF6_FTLBDIS	(_ULCAST_(1) << 22)
 /* FTLB probability bits */
 #define MIPS_CONF6_FTLBP_SHIFT	(16)
 
@@ -645,12 +659,38 @@
 /* FTLB probability bits for R6 */
 #define MIPS_CONF7_FTLBP_SHIFT	(18)
 
+/* WatchLo* register definitions */
+#define MIPS_WATCHLO_IRW	(_ULCAST_(0x7) << 0)
+
+/* WatchHi* register definitions */
+#define MIPS_WATCHHI_M		(_ULCAST_(1) << 31)
+#define MIPS_WATCHHI_G		(_ULCAST_(1) << 30)
+#define MIPS_WATCHHI_WM		(_ULCAST_(0x3) << 28)
+#define MIPS_WATCHHI_WM_R_RVA	(_ULCAST_(0) << 28)
+#define MIPS_WATCHHI_WM_R_GPA	(_ULCAST_(1) << 28)
+#define MIPS_WATCHHI_WM_G_GVA	(_ULCAST_(2) << 28)
+#define MIPS_WATCHHI_EAS	(_ULCAST_(0x3) << 24)
+#define MIPS_WATCHHI_ASID	(_ULCAST_(0xff) << 16)
+#define MIPS_WATCHHI_MASK	(_ULCAST_(0x1ff) << 3)
+#define MIPS_WATCHHI_I		(_ULCAST_(1) << 2)
+#define MIPS_WATCHHI_R		(_ULCAST_(1) << 1)
+#define MIPS_WATCHHI_W		(_ULCAST_(1) << 0)
+#define MIPS_WATCHHI_IRW	(_ULCAST_(0x7) << 0)
+
 /* MAAR bit definitions */
 #define MIPS_MAAR_ADDR		((BIT_ULL(BITS_PER_LONG - 12) - 1) << 12)
 #define MIPS_MAAR_ADDR_SHIFT	12
 #define MIPS_MAAR_S		(_ULCAST_(1) << 1)
 #define MIPS_MAAR_V		(_ULCAST_(1) << 0)
 
+/* EBase bit definitions */
+#define MIPS_EBASE_CPUNUM_SHIFT	0
+#define MIPS_EBASE_CPUNUM	(_ULCAST_(0x3ff) << 0)
+#define MIPS_EBASE_WG_SHIFT	11
+#define MIPS_EBASE_WG		(_ULCAST_(1) << 11)
+#define MIPS_EBASE_BASE_SHIFT	12
+#define MIPS_EBASE_BASE		(~_ULCAST_((1 << MIPS_EBASE_BASE_SHIFT) - 1))
+
 /* CMGCRBase bit definitions */
 #define MIPS_CMGCRB_BASE	11
 #define MIPS_CMGCRF_BASE	(~_ULCAST_((1 << MIPS_CMGCRB_BASE) - 1))
@@ -706,6 +746,94 @@
 #define MIPS_PWCTL_PSN_SHIFT	0
 #define MIPS_PWCTL_PSN_MASK	0x0000003f
 
+/* GuestCtl0 fields */
+#define MIPS_GCTL0_GM_SHIFT	31
+#define MIPS_GCTL0_GM		(_ULCAST_(1) << MIPS_GCTL0_GM_SHIFT)
+#define MIPS_GCTL0_RI_SHIFT	30
+#define MIPS_GCTL0_RI		(_ULCAST_(1) << MIPS_GCTL0_RI_SHIFT)
+#define MIPS_GCTL0_MC_SHIFT	29
+#define MIPS_GCTL0_MC		(_ULCAST_(1) << MIPS_GCTL0_MC_SHIFT)
+#define MIPS_GCTL0_CP0_SHIFT	28
+#define MIPS_GCTL0_CP0		(_ULCAST_(1) << MIPS_GCTL0_CP0_SHIFT)
+#define MIPS_GCTL0_AT_SHIFT	26
+#define MIPS_GCTL0_AT		(_ULCAST_(0x3) << MIPS_GCTL0_AT_SHIFT)
+#define MIPS_GCTL0_GT_SHIFT	25
+#define MIPS_GCTL0_GT		(_ULCAST_(1) << MIPS_GCTL0_GT_SHIFT)
+#define MIPS_GCTL0_CG_SHIFT	24
+#define MIPS_GCTL0_CG		(_ULCAST_(1) << MIPS_GCTL0_CG_SHIFT)
+#define MIPS_GCTL0_CF_SHIFT	23
+#define MIPS_GCTL0_CF		(_ULCAST_(1) << MIPS_GCTL0_CF_SHIFT)
+#define MIPS_GCTL0_G1_SHIFT	22
+#define MIPS_GCTL0_G1		(_ULCAST_(1) << MIPS_GCTL0_G1_SHIFT)
+#define MIPS_GCTL0_G0E_SHIFT	19
+#define MIPS_GCTL0_G0E		(_ULCAST_(1) << MIPS_GCTL0_G0E_SHIFT)
+#define MIPS_GCTL0_PT_SHIFT	18
+#define MIPS_GCTL0_PT		(_ULCAST_(1) << MIPS_GCTL0_PT_SHIFT)
+#define MIPS_GCTL0_RAD_SHIFT	9
+#define MIPS_GCTL0_RAD		(_ULCAST_(1) << MIPS_GCTL0_RAD_SHIFT)
+#define MIPS_GCTL0_DRG_SHIFT	8
+#define MIPS_GCTL0_DRG		(_ULCAST_(1) << MIPS_GCTL0_DRG_SHIFT)
+#define MIPS_GCTL0_G2_SHIFT	7
+#define MIPS_GCTL0_G2		(_ULCAST_(1) << MIPS_GCTL0_G2_SHIFT)
+#define MIPS_GCTL0_GEXC_SHIFT	2
+#define MIPS_GCTL0_GEXC		(_ULCAST_(0x1f) << MIPS_GCTL0_GEXC_SHIFT)
+#define MIPS_GCTL0_SFC2_SHIFT	1
+#define MIPS_GCTL0_SFC2		(_ULCAST_(1) << MIPS_GCTL0_SFC2_SHIFT)
+#define MIPS_GCTL0_SFC1_SHIFT	0
+#define MIPS_GCTL0_SFC1		(_ULCAST_(1) << MIPS_GCTL0_SFC1_SHIFT)
+
+/* GuestCtl0.AT Guest address translation control */
+#define MIPS_GCTL0_AT_ROOT	1  /* Guest MMU under Root control */
+#define MIPS_GCTL0_AT_GUEST	3  /* Guest MMU under Guest control */
+
+/* GuestCtl0.GExcCode Hypervisor exception cause codes */
+#define MIPS_GCTL0_GEXC_GPSI	0  /* Guest Privileged Sensitive Instruction */
+#define MIPS_GCTL0_GEXC_GSFC	1  /* Guest Software Field Change */
+#define MIPS_GCTL0_GEXC_HC	2  /* Hypercall */
+#define MIPS_GCTL0_GEXC_GRR	3  /* Guest Reserved Instruction Redirect */
+#define MIPS_GCTL0_GEXC_GVA	8  /* Guest Virtual Address available */
+#define MIPS_GCTL0_GEXC_GHFC	9  /* Guest Hardware Field Change */
+#define MIPS_GCTL0_GEXC_GPA	10 /* Guest Physical Address available */
+
+/* GuestCtl0Ext fields */
+#define MIPS_GCTL0EXT_RPW_SHIFT	8
+#define MIPS_GCTL0EXT_RPW	(_ULCAST_(0x3) << MIPS_GCTL0EXT_RPW_SHIFT)
+#define MIPS_GCTL0EXT_NCC_SHIFT	6
+#define MIPS_GCTL0EXT_NCC	(_ULCAST_(0x3) << MIPS_GCTL0EXT_NCC_SHIFT)
+#define MIPS_GCTL0EXT_CGI_SHIFT	4
+#define MIPS_GCTL0EXT_CGI	(_ULCAST_(1) << MIPS_GCTL0EXT_CGI_SHIFT)
+#define MIPS_GCTL0EXT_FCD_SHIFT	3
+#define MIPS_GCTL0EXT_FCD	(_ULCAST_(1) << MIPS_GCTL0EXT_FCD_SHIFT)
+#define MIPS_GCTL0EXT_OG_SHIFT	2
+#define MIPS_GCTL0EXT_OG	(_ULCAST_(1) << MIPS_GCTL0EXT_OG_SHIFT)
+#define MIPS_GCTL0EXT_BG_SHIFT	1
+#define MIPS_GCTL0EXT_BG	(_ULCAST_(1) << MIPS_GCTL0EXT_BG_SHIFT)
+#define MIPS_GCTL0EXT_MG_SHIFT	0
+#define MIPS_GCTL0EXT_MG	(_ULCAST_(1) << MIPS_GCTL0EXT_MG_SHIFT)
+
+/* GuestCtl0Ext.RPW Root page walk configuration */
+#define MIPS_GCTL0EXT_RPW_BOTH	0  /* Root PW for GPA->RPA and RVA->RPA */
+#define MIPS_GCTL0EXT_RPW_GPA	2  /* Root PW for GPA->RPA */
+#define MIPS_GCTL0EXT_RPW_RVA	3  /* Root PW for RVA->RPA */
+
+/* GuestCtl0Ext.NCC Nested cache coherency attributes */
+#define MIPS_GCTL0EXT_NCC_IND	0  /* Guest CCA independent of Root CCA */
+#define MIPS_GCTL0EXT_NCC_MOD	1  /* Guest CCA modified by Root CCA */
+
+/* GuestCtl1 fields */
+#define MIPS_GCTL1_ID_SHIFT	0
+#define MIPS_GCTL1_ID_WIDTH	8
+#define MIPS_GCTL1_ID		(_ULCAST_(0xff) << MIPS_GCTL1_ID_SHIFT)
+#define MIPS_GCTL1_RID_SHIFT	16
+#define MIPS_GCTL1_RID_WIDTH	8
+#define MIPS_GCTL1_RID		(_ULCAST_(0xff) << MIPS_GCTL1_RID_SHIFT)
+#define MIPS_GCTL1_EID_SHIFT	24
+#define MIPS_GCTL1_EID_WIDTH	8
+#define MIPS_GCTL1_EID		(_ULCAST_(0xff) << MIPS_GCTL1_EID_SHIFT)
+
+/* GuestID reserved for root context */
+#define MIPS_GCTL1_ROOT_GUESTID	0
+
 /* CDMMBase register bit definitions */
 #define MIPS_CDMMBASE_SIZE_SHIFT 0
 #define MIPS_CDMMBASE_SIZE	(_ULCAST_(511) << MIPS_CDMMBASE_SIZE_SHIFT)
@@ -757,6 +885,15 @@
 /* Disable Branch Return Cache */
 #define R10K_DIAG_D_BRC		(_ULCAST_(1) << 22)
 
+/* Flush ITLB */
+#define LOONGSON_DIAG_ITLB	(_ULCAST_(1) << 2)
+/* Flush DTLB */
+#define LOONGSON_DIAG_DTLB	(_ULCAST_(1) << 3)
+/* Flush VTLB */
+#define LOONGSON_DIAG_VTLB	(_ULCAST_(1) << 12)
+/* Flush FTLB */
+#define LOONGSON_DIAG_FTLB	(_ULCAST_(1) << 13)
+
 /*
  * Coprocessor 1 (FPU) register names
  */
@@ -1186,9 +1323,15 @@
 #define read_c0_context()	__read_ulong_c0_register($4, 0)
 #define write_c0_context(val)	__write_ulong_c0_register($4, 0, val)
 
+#define read_c0_contextconfig()		__read_32bit_c0_register($4, 1)
+#define write_c0_contextconfig(val)	__write_32bit_c0_register($4, 1, val)
+
 #define read_c0_userlocal()	__read_ulong_c0_register($4, 2)
 #define write_c0_userlocal(val) __write_ulong_c0_register($4, 2, val)
 
+#define read_c0_xcontextconfig()	__read_ulong_c0_register($4, 3)
+#define write_c0_xcontextconfig(val)	__write_ulong_c0_register($4, 3, val)
+
 #define read_c0_pagemask()	__read_32bit_c0_register($5, 0)
 #define write_c0_pagemask(val)	__write_32bit_c0_register($5, 0, val)
 
@@ -1206,6 +1349,9 @@
 #define read_c0_badvaddr()	__read_ulong_c0_register($8, 0)
 #define write_c0_badvaddr(val)	__write_ulong_c0_register($8, 0, val)
 
+#define read_c0_badinstr()	__read_32bit_c0_register($8, 1)
+#define read_c0_badinstrp()	__read_32bit_c0_register($8, 2)
+
 #define read_c0_count()		__read_32bit_c0_register($9, 0)
 #define write_c0_count(val)	__write_32bit_c0_register($9, 0, val)
 
@@ -1218,9 +1364,21 @@
 #define read_c0_entryhi()	__read_ulong_c0_register($10, 0)
 #define write_c0_entryhi(val)	__write_ulong_c0_register($10, 0, val)
 
+#define read_c0_guestctl1()	__read_32bit_c0_register($10, 4)
+#define write_c0_guestctl1(val)	__write_32bit_c0_register($10, 4, val)
+
+#define read_c0_guestctl2()	__read_32bit_c0_register($10, 5)
+#define write_c0_guestctl2(val)	__write_32bit_c0_register($10, 5, val)
+
+#define read_c0_guestctl3()	__read_32bit_c0_register($10, 6)
+#define write_c0_guestctl3(val)	__write_32bit_c0_register($10, 6, val)
+
 #define read_c0_compare()	__read_32bit_c0_register($11, 0)
 #define write_c0_compare(val)	__write_32bit_c0_register($11, 0, val)
 
+#define read_c0_guestctl0ext()	__read_32bit_c0_register($11, 4)
+#define write_c0_guestctl0ext(val) __write_32bit_c0_register($11, 4, val)
+
 #define read_c0_compare2()	__read_32bit_c0_register($11, 6) /* pnx8550 */
 #define write_c0_compare2(val)	__write_32bit_c0_register($11, 6, val)
 
@@ -1231,6 +1389,12 @@
 
 #define write_c0_status(val)	__write_32bit_c0_register($12, 0, val)
 
+#define read_c0_guestctl0()	__read_32bit_c0_register($12, 6)
+#define write_c0_guestctl0(val)	__write_32bit_c0_register($12, 6, val)
+
+#define read_c0_gtoffset()	__read_32bit_c0_register($12, 7)
+#define write_c0_gtoffset(val)	__write_32bit_c0_register($12, 7, val)
+
 #define read_c0_cause()		__read_32bit_c0_register($13, 0)
 #define write_c0_cause(val)	__write_32bit_c0_register($13, 0, val)
 
@@ -1416,6 +1580,9 @@
 #define read_c0_ebase()		__read_32bit_c0_register($15, 1)
 #define write_c0_ebase(val)	__write_32bit_c0_register($15, 1, val)
 
+#define read_c0_ebase_64()	__read_64bit_c0_register($15, 1)
+#define write_c0_ebase_64(val)	__write_64bit_c0_register($15, 1, val)
+
 #define read_c0_cdmmbase()	__read_ulong_c0_register($15, 2)
 #define write_c0_cdmmbase(val)	__write_ulong_c0_register($15, 2, val)
 
@@ -1442,6 +1609,12 @@
 #define read_c0_pwctl()		__read_32bit_c0_register($6, 6)
 #define write_c0_pwctl(val)	__write_32bit_c0_register($6, 6, val)
 
+#define read_c0_pgd()		__read_64bit_c0_register($9, 7)
+#define write_c0_pgd(val)	__write_64bit_c0_register($9, 7, val)
+
+#define read_c0_kpgd()		__read_64bit_c0_register($31, 7)
+#define write_c0_kpgd(val)	__write_64bit_c0_register($31, 7, val)
+
 /* Cavium OCTEON (cnMIPS) */
 #define read_c0_cvmcount()	__read_ulong_c0_register($9, 6)
 #define write_c0_cvmcount(val)	__write_ulong_c0_register($9, 6, val)
@@ -1507,6 +1680,317 @@
 #define write_c0_brcm_sleepcount(val)	__write_32bit_c0_register($22, 7, val)
 
 /*
+ * Macros to access the guest system control coprocessor
+ */
+
+#ifdef TOOLCHAIN_SUPPORTS_VIRT
+
+#define __read_32bit_gc0_register(source, sel)				\
+({ int __res;								\
+	__asm__ __volatile__(						\
+		".set\tpush\n\t"					\
+		".set\tmips32r2\n\t"					\
+		".set\tvirt\n\t"					\
+		"mfgc0\t%0, $%1, %2\n\t"				\
+		".set\tpop"						\
+		: "=r" (__res)						\
+		: "i" (source), "i" (sel));				\
+	__res;								\
+})
+
+#define __read_64bit_gc0_register(source, sel)				\
+({ unsigned long long __res;						\
+	__asm__ __volatile__(						\
+		".set\tpush\n\t"					\
+		".set\tmips64r2\n\t"					\
+		".set\tvirt\n\t"					\
+		"dmfgc0\t%0, $%1, %2\n\t"			\
+		".set\tpop"						\
+		: "=r" (__res)						\
+		: "i" (source), "i" (sel));				\
+	__res;								\
+})
+
+#define __write_32bit_gc0_register(register, sel, value)		\
+do {									\
+	__asm__ __volatile__(						\
+		".set\tpush\n\t"					\
+		".set\tmips32r2\n\t"					\
+		".set\tvirt\n\t"					\
+		"mtgc0\t%z0, $%1, %2\n\t"				\
+		".set\tpop"						\
+		: : "Jr" ((unsigned int)(value)),			\
+		    "i" (register), "i" (sel));				\
+} while (0)
+
+#define __write_64bit_gc0_register(register, sel, value)		\
+do {									\
+	__asm__ __volatile__(						\
+		".set\tpush\n\t"					\
+		".set\tmips64r2\n\t"					\
+		".set\tvirt\n\t"					\
+		"dmtgc0\t%z0, $%1, %2\n\t"				\
+		".set\tpop"						\
+		: : "Jr" (value),					\
+		    "i" (register), "i" (sel));				\
+} while (0)
+
+#else	/* TOOLCHAIN_SUPPORTS_VIRT */
+
+#define __read_32bit_gc0_register(source, sel)				\
+({ int __res;								\
+	__asm__ __volatile__(						\
+		".set\tpush\n\t"					\
+		".set\tnoat\n\t"					\
+		"# mfgc0\t$1, $%1, %2\n\t"				\
+		".word\t(0x40610000 | %1 << 11 | %2)\n\t"		\
+		"move\t%0, $1\n\t"					\
+		".set\tpop"						\
+		: "=r" (__res)						\
+		: "i" (source), "i" (sel));				\
+	__res;								\
+})
+
+#define __read_64bit_gc0_register(source, sel)				\
+({ unsigned long long __res;						\
+	__asm__ __volatile__(						\
+		".set\tpush\n\t"					\
+		".set\tnoat\n\t"					\
+		"# dmfgc0\t$1, $%1, %2\n\t"				\
+		".word\t(0x40610100 | %1 << 11 | %2)\n\t"		\
+		"move\t%0, $1\n\t"					\
+		".set\tpop"						\
+		: "=r" (__res)						\
+		: "i" (source), "i" (sel));				\
+	__res;								\
+})
+
+#define __write_32bit_gc0_register(register, sel, value)		\
+do {									\
+	__asm__ __volatile__(						\
+		".set\tpush\n\t"					\
+		".set\tnoat\n\t"					\
+		"move\t$1, %0\n\t"					\
+		"# mtgc0\t$1, $%1, %2\n\t"				\
+		".word\t(0x40610200 | %1 << 11 | %2)\n\t"		\
+		".set\tpop"						\
+		: : "Jr" ((unsigned int)(value)),			\
+		    "i" (register), "i" (sel));				\
+} while (0)
+
+#define __write_64bit_gc0_register(register, sel, value)		\
+do {									\
+	__asm__ __volatile__(						\
+		".set\tpush\n\t"					\
+		".set\tnoat\n\t"					\
+		"move\t$1, %0\n\t"					\
+		"# dmtgc0\t$1, $%1, %2\n\t"				\
+		".word\t(0x40610300 | %1 << 11 | %2)\n\t"		\
+		".set\tpop"						\
+		: : "Jr" (value),					\
+		    "i" (register), "i" (sel));				\
+} while (0)
+
+#endif	/* !TOOLCHAIN_SUPPORTS_VIRT */
+
+#define __read_ulong_gc0_register(reg, sel)				\
+	((sizeof(unsigned long) == 4) ?					\
+	(unsigned long) __read_32bit_gc0_register(reg, sel) :		\
+	(unsigned long) __read_64bit_gc0_register(reg, sel))
+
+#define __write_ulong_gc0_register(reg, sel, val)			\
+do {									\
+	if (sizeof(unsigned long) == 4)					\
+		__write_32bit_gc0_register(reg, sel, val);		\
+	else								\
+		__write_64bit_gc0_register(reg, sel, val);		\
+} while (0)
+
+#define read_gc0_index()		__read_32bit_gc0_register(0, 0)
+#define write_gc0_index(val)		__write_32bit_gc0_register(0, 0, val)
+
+#define read_gc0_entrylo0()		__read_ulong_gc0_register(2, 0)
+#define write_gc0_entrylo0(val)		__write_ulong_gc0_register(2, 0, val)
+
+#define read_gc0_entrylo1()		__read_ulong_gc0_register(3, 0)
+#define write_gc0_entrylo1(val)		__write_ulong_gc0_register(3, 0, val)
+
+#define read_gc0_context()		__read_ulong_gc0_register(4, 0)
+#define write_gc0_context(val)		__write_ulong_gc0_register(4, 0, val)
+
+#define read_gc0_contextconfig()	__read_32bit_gc0_register(4, 1)
+#define write_gc0_contextconfig(val)	__write_32bit_gc0_register(4, 1, val)
+
+#define read_gc0_userlocal()		__read_ulong_gc0_register(4, 2)
+#define write_gc0_userlocal(val)	__write_ulong_gc0_register(4, 2, val)
+
+#define read_gc0_xcontextconfig()	__read_ulong_gc0_register(4, 3)
+#define write_gc0_xcontextconfig(val)	__write_ulong_gc0_register(4, 3, val)
+
+#define read_gc0_pagemask()		__read_32bit_gc0_register(5, 0)
+#define write_gc0_pagemask(val)		__write_32bit_gc0_register(5, 0, val)
+
+#define read_gc0_pagegrain()		__read_32bit_gc0_register(5, 1)
+#define write_gc0_pagegrain(val)	__write_32bit_gc0_register(5, 1, val)
+
+#define read_gc0_segctl0()		__read_ulong_gc0_register(5, 2)
+#define write_gc0_segctl0(val)		__write_ulong_gc0_register(5, 2, val)
+
+#define read_gc0_segctl1()		__read_ulong_gc0_register(5, 3)
+#define write_gc0_segctl1(val)		__write_ulong_gc0_register(5, 3, val)
+
+#define read_gc0_segctl2()		__read_ulong_gc0_register(5, 4)
+#define write_gc0_segctl2(val)		__write_ulong_gc0_register(5, 4, val)
+
+#define read_gc0_pwbase()		__read_ulong_gc0_register(5, 5)
+#define write_gc0_pwbase(val)		__write_ulong_gc0_register(5, 5, val)
+
+#define read_gc0_pwfield()		__read_ulong_gc0_register(5, 6)
+#define write_gc0_pwfield(val)		__write_ulong_gc0_register(5, 6, val)
+
+#define read_gc0_pwsize()		__read_ulong_gc0_register(5, 7)
+#define write_gc0_pwsize(val)		__write_ulong_gc0_register(5, 7, val)
+
+#define read_gc0_wired()		__read_32bit_gc0_register(6, 0)
+#define write_gc0_wired(val)		__write_32bit_gc0_register(6, 0, val)
+
+#define read_gc0_pwctl()		__read_32bit_gc0_register(6, 6)
+#define write_gc0_pwctl(val)		__write_32bit_gc0_register(6, 6, val)
+
+#define read_gc0_hwrena()		__read_32bit_gc0_register(7, 0)
+#define write_gc0_hwrena(val)		__write_32bit_gc0_register(7, 0, val)
+
+#define read_gc0_badvaddr()		__read_ulong_gc0_register(8, 0)
+#define write_gc0_badvaddr(val)		__write_ulong_gc0_register(8, 0, val)
+
+#define read_gc0_badinstr()		__read_32bit_gc0_register(8, 1)
+#define write_gc0_badinstr(val)		__write_32bit_gc0_register(8, 1, val)
+
+#define read_gc0_badinstrp()		__read_32bit_gc0_register(8, 2)
+#define write_gc0_badinstrp(val)	__write_32bit_gc0_register(8, 2, val)
+
+#define read_gc0_count()		__read_32bit_gc0_register(9, 0)
+
+#define read_gc0_entryhi()		__read_ulong_gc0_register(10, 0)
+#define write_gc0_entryhi(val)		__write_ulong_gc0_register(10, 0, val)
+
+#define read_gc0_compare()		__read_32bit_gc0_register(11, 0)
+#define write_gc0_compare(val)		__write_32bit_gc0_register(11, 0, val)
+
+#define read_gc0_status()		__read_32bit_gc0_register(12, 0)
+#define write_gc0_status(val)		__write_32bit_gc0_register(12, 0, val)
+
+#define read_gc0_intctl()		__read_32bit_gc0_register(12, 1)
+#define write_gc0_intctl(val)		__write_32bit_gc0_register(12, 1, val)
+
+#define read_gc0_cause()		__read_32bit_gc0_register(13, 0)
+#define write_gc0_cause(val)		__write_32bit_gc0_register(13, 0, val)
+
+#define read_gc0_epc()			__read_ulong_gc0_register(14, 0)
+#define write_gc0_epc(val)		__write_ulong_gc0_register(14, 0, val)
+
+#define read_gc0_ebase()		__read_32bit_gc0_register(15, 1)
+#define write_gc0_ebase(val)		__write_32bit_gc0_register(15, 1, val)
+
+#define read_gc0_ebase_64()		__read_64bit_gc0_register(15, 1)
+#define write_gc0_ebase_64(val)		__write_64bit_gc0_register(15, 1, val)
+
+#define read_gc0_config()		__read_32bit_gc0_register(16, 0)
+#define read_gc0_config1()		__read_32bit_gc0_register(16, 1)
+#define read_gc0_config2()		__read_32bit_gc0_register(16, 2)
+#define read_gc0_config3()		__read_32bit_gc0_register(16, 3)
+#define read_gc0_config4()		__read_32bit_gc0_register(16, 4)
+#define read_gc0_config5()		__read_32bit_gc0_register(16, 5)
+#define read_gc0_config6()		__read_32bit_gc0_register(16, 6)
+#define read_gc0_config7()		__read_32bit_gc0_register(16, 7)
+#define write_gc0_config(val)		__write_32bit_gc0_register(16, 0, val)
+#define write_gc0_config1(val)		__write_32bit_gc0_register(16, 1, val)
+#define write_gc0_config2(val)		__write_32bit_gc0_register(16, 2, val)
+#define write_gc0_config3(val)		__write_32bit_gc0_register(16, 3, val)
+#define write_gc0_config4(val)		__write_32bit_gc0_register(16, 4, val)
+#define write_gc0_config5(val)		__write_32bit_gc0_register(16, 5, val)
+#define write_gc0_config6(val)		__write_32bit_gc0_register(16, 6, val)
+#define write_gc0_config7(val)		__write_32bit_gc0_register(16, 7, val)
+
+#define read_gc0_watchlo0()		__read_ulong_gc0_register(18, 0)
+#define read_gc0_watchlo1()		__read_ulong_gc0_register(18, 1)
+#define read_gc0_watchlo2()		__read_ulong_gc0_register(18, 2)
+#define read_gc0_watchlo3()		__read_ulong_gc0_register(18, 3)
+#define read_gc0_watchlo4()		__read_ulong_gc0_register(18, 4)
+#define read_gc0_watchlo5()		__read_ulong_gc0_register(18, 5)
+#define read_gc0_watchlo6()		__read_ulong_gc0_register(18, 6)
+#define read_gc0_watchlo7()		__read_ulong_gc0_register(18, 7)
+#define write_gc0_watchlo0(val)		__write_ulong_gc0_register(18, 0, val)
+#define write_gc0_watchlo1(val)		__write_ulong_gc0_register(18, 1, val)
+#define write_gc0_watchlo2(val)		__write_ulong_gc0_register(18, 2, val)
+#define write_gc0_watchlo3(val)		__write_ulong_gc0_register(18, 3, val)
+#define write_gc0_watchlo4(val)		__write_ulong_gc0_register(18, 4, val)
+#define write_gc0_watchlo5(val)		__write_ulong_gc0_register(18, 5, val)
+#define write_gc0_watchlo6(val)		__write_ulong_gc0_register(18, 6, val)
+#define write_gc0_watchlo7(val)		__write_ulong_gc0_register(18, 7, val)
+
+#define read_gc0_watchhi0()		__read_32bit_gc0_register(19, 0)
+#define read_gc0_watchhi1()		__read_32bit_gc0_register(19, 1)
+#define read_gc0_watchhi2()		__read_32bit_gc0_register(19, 2)
+#define read_gc0_watchhi3()		__read_32bit_gc0_register(19, 3)
+#define read_gc0_watchhi4()		__read_32bit_gc0_register(19, 4)
+#define read_gc0_watchhi5()		__read_32bit_gc0_register(19, 5)
+#define read_gc0_watchhi6()		__read_32bit_gc0_register(19, 6)
+#define read_gc0_watchhi7()		__read_32bit_gc0_register(19, 7)
+#define write_gc0_watchhi0(val)		__write_32bit_gc0_register(19, 0, val)
+#define write_gc0_watchhi1(val)		__write_32bit_gc0_register(19, 1, val)
+#define write_gc0_watchhi2(val)		__write_32bit_gc0_register(19, 2, val)
+#define write_gc0_watchhi3(val)		__write_32bit_gc0_register(19, 3, val)
+#define write_gc0_watchhi4(val)		__write_32bit_gc0_register(19, 4, val)
+#define write_gc0_watchhi5(val)		__write_32bit_gc0_register(19, 5, val)
+#define write_gc0_watchhi6(val)		__write_32bit_gc0_register(19, 6, val)
+#define write_gc0_watchhi7(val)		__write_32bit_gc0_register(19, 7, val)
+
+#define read_gc0_xcontext()		__read_ulong_gc0_register(20, 0)
+#define write_gc0_xcontext(val)		__write_ulong_gc0_register(20, 0, val)
+
+#define read_gc0_perfctrl0()		__read_32bit_gc0_register(25, 0)
+#define write_gc0_perfctrl0(val)	__write_32bit_gc0_register(25, 0, val)
+#define read_gc0_perfcntr0()		__read_32bit_gc0_register(25, 1)
+#define write_gc0_perfcntr0(val)	__write_32bit_gc0_register(25, 1, val)
+#define read_gc0_perfcntr0_64()		__read_64bit_gc0_register(25, 1)
+#define write_gc0_perfcntr0_64(val)	__write_64bit_gc0_register(25, 1, val)
+#define read_gc0_perfctrl1()		__read_32bit_gc0_register(25, 2)
+#define write_gc0_perfctrl1(val)	__write_32bit_gc0_register(25, 2, val)
+#define read_gc0_perfcntr1()		__read_32bit_gc0_register(25, 3)
+#define write_gc0_perfcntr1(val)	__write_32bit_gc0_register(25, 3, val)
+#define read_gc0_perfcntr1_64()		__read_64bit_gc0_register(25, 3)
+#define write_gc0_perfcntr1_64(val)	__write_64bit_gc0_register(25, 3, val)
+#define read_gc0_perfctrl2()		__read_32bit_gc0_register(25, 4)
+#define write_gc0_perfctrl2(val)	__write_32bit_gc0_register(25, 4, val)
+#define read_gc0_perfcntr2()		__read_32bit_gc0_register(25, 5)
+#define write_gc0_perfcntr2(val)	__write_32bit_gc0_register(25, 5, val)
+#define read_gc0_perfcntr2_64()		__read_64bit_gc0_register(25, 5)
+#define write_gc0_perfcntr2_64(val)	__write_64bit_gc0_register(25, 5, val)
+#define read_gc0_perfctrl3()		__read_32bit_gc0_register(25, 6)
+#define write_gc0_perfctrl3(val)	__write_32bit_gc0_register(25, 6, val)
+#define read_gc0_perfcntr3()		__read_32bit_gc0_register(25, 7)
+#define write_gc0_perfcntr3(val)	__write_32bit_gc0_register(25, 7, val)
+#define read_gc0_perfcntr3_64()		__read_64bit_gc0_register(25, 7)
+#define write_gc0_perfcntr3_64(val)	__write_64bit_gc0_register(25, 7, val)
+
+#define read_gc0_errorepc()		__read_ulong_gc0_register(30, 0)
+#define write_gc0_errorepc(val)		__write_ulong_gc0_register(30, 0, val)
+
+#define read_gc0_kscratch1()		__read_ulong_gc0_register(31, 2)
+#define read_gc0_kscratch2()		__read_ulong_gc0_register(31, 3)
+#define read_gc0_kscratch3()		__read_ulong_gc0_register(31, 4)
+#define read_gc0_kscratch4()		__read_ulong_gc0_register(31, 5)
+#define read_gc0_kscratch5()		__read_ulong_gc0_register(31, 6)
+#define read_gc0_kscratch6()		__read_ulong_gc0_register(31, 7)
+#define write_gc0_kscratch1(val)	__write_ulong_gc0_register(31, 2, val)
+#define write_gc0_kscratch2(val)	__write_ulong_gc0_register(31, 3, val)
+#define write_gc0_kscratch3(val)	__write_ulong_gc0_register(31, 4, val)
+#define write_gc0_kscratch4(val)	__write_ulong_gc0_register(31, 5, val)
+#define write_gc0_kscratch5(val)	__write_ulong_gc0_register(31, 6, val)
+#define write_gc0_kscratch6(val)	__write_ulong_gc0_register(31, 7, val)
+
+/*
  * Macros to access the floating point coprocessor control registers
  */
 #define _read_32bit_cp1_register(source, gas_hardfloat)			\
@@ -2001,47 +2485,159 @@
 		".set reorder");
 }
 
+#ifdef TOOLCHAIN_SUPPORTS_VIRT
+
 /*
- * Manipulate bits in a c0 register.
+ * Guest TLB operations.
+ *
+ * It is responsibility of the caller to take care of any TLB hazards.
  */
-#define __BUILD_SET_C0(name)					\
+static inline void guest_tlb_probe(void)
+{
+	__asm__ __volatile__(
+		".set push\n\t"
+		".set noreorder\n\t"
+		".set virt\n\t"
+		"tlbgp\n\t"
+		".set pop");
+}
+
+static inline void guest_tlb_read(void)
+{
+	__asm__ __volatile__(
+		".set push\n\t"
+		".set noreorder\n\t"
+		".set virt\n\t"
+		"tlbgr\n\t"
+		".set pop");
+}
+
+static inline void guest_tlb_write_indexed(void)
+{
+	__asm__ __volatile__(
+		".set push\n\t"
+		".set noreorder\n\t"
+		".set virt\n\t"
+		"tlbgwi\n\t"
+		".set pop");
+}
+
+static inline void guest_tlb_write_random(void)
+{
+	__asm__ __volatile__(
+		".set push\n\t"
+		".set noreorder\n\t"
+		".set virt\n\t"
+		"tlbgwr\n\t"
+		".set pop");
+}
+
+/*
+ * Guest TLB Invalidate Flush
+ */
+static inline void guest_tlbinvf(void)
+{
+	__asm__ __volatile__(
+		".set push\n\t"
+		".set noreorder\n\t"
+		".set virt\n\t"
+		"tlbginvf\n\t"
+		".set pop");
+}
+
+#else	/* TOOLCHAIN_SUPPORTS_VIRT */
+
+/*
+ * Guest TLB operations.
+ *
+ * It is responsibility of the caller to take care of any TLB hazards.
+ */
+static inline void guest_tlb_probe(void)
+{
+	__asm__ __volatile__(
+		"# tlbgp\n\t"
+		".word 0x42000010");
+}
+
+static inline void guest_tlb_read(void)
+{
+	__asm__ __volatile__(
+		"# tlbgr\n\t"
+		".word 0x42000009");
+}
+
+static inline void guest_tlb_write_indexed(void)
+{
+	__asm__ __volatile__(
+		"# tlbgwi\n\t"
+		".word 0x4200000a");
+}
+
+static inline void guest_tlb_write_random(void)
+{
+	__asm__ __volatile__(
+		"# tlbgwr\n\t"
+		".word 0x4200000e");
+}
+
+/*
+ * Guest TLB Invalidate Flush
+ */
+static inline void guest_tlbinvf(void)
+{
+	__asm__ __volatile__(
+		"# tlbginvf\n\t"
+		".word 0x4200000c");
+}
+
+#endif	/* !TOOLCHAIN_SUPPORTS_VIRT */
+
+/*
+ * Manipulate bits in a register.
+ */
+#define __BUILD_SET_COMMON(name)				\
 static inline unsigned int					\
-set_c0_##name(unsigned int set)					\
+set_##name(unsigned int set)					\
 {								\
 	unsigned int res, new;					\
 								\
-	res = read_c0_##name();					\
+	res = read_##name();					\
 	new = res | set;					\
-	write_c0_##name(new);					\
+	write_##name(new);					\
 								\
 	return res;						\
 }								\
 								\
 static inline unsigned int					\
-clear_c0_##name(unsigned int clear)				\
+clear_##name(unsigned int clear)				\
 {								\
 	unsigned int res, new;					\
 								\
-	res = read_c0_##name();					\
+	res = read_##name();					\
 	new = res & ~clear;					\
-	write_c0_##name(new);					\
+	write_##name(new);					\
 								\
 	return res;						\
 }								\
 								\
 static inline unsigned int					\
-change_c0_##name(unsigned int change, unsigned int val)		\
+change_##name(unsigned int change, unsigned int val)		\
 {								\
 	unsigned int res, new;					\
 								\
-	res = read_c0_##name();					\
+	res = read_##name();					\
 	new = res & ~change;					\
 	new |= (val & change);					\
-	write_c0_##name(new);					\
+	write_##name(new);					\
 								\
 	return res;						\
 }
 
+/*
+ * Manipulate bits in a c0 register.
+ */
+#define __BUILD_SET_C0(name)	__BUILD_SET_COMMON(c0_##name)
+
 __BUILD_SET_C0(status)
 __BUILD_SET_C0(cause)
 __BUILD_SET_C0(config)
@@ -2050,6 +2646,11 @@
 __BUILD_SET_C0(intctl)
 __BUILD_SET_C0(srsmap)
 __BUILD_SET_C0(pagegrain)
+__BUILD_SET_C0(guestctl0)
+__BUILD_SET_C0(guestctl0ext)
+__BUILD_SET_C0(guestctl1)
+__BUILD_SET_C0(guestctl2)
+__BUILD_SET_C0(guestctl3)
 __BUILD_SET_C0(brcm_config_0)
 __BUILD_SET_C0(brcm_bus_pll)
 __BUILD_SET_C0(brcm_reset)
@@ -2059,12 +2660,21 @@
 __BUILD_SET_C0(brcm_mode)
 
 /*
+ * Manipulate bits in a guest c0 register.
+ */
+#define __BUILD_SET_GC0(name)	__BUILD_SET_COMMON(gc0_##name)
+
+__BUILD_SET_GC0(status)
+__BUILD_SET_GC0(cause)
+__BUILD_SET_GC0(ebase)
+
+/*
  * Return low 10 bits of ebase.
  * Note that under KVM (MIPSVZ) this returns vcpu id.
  */
 static inline unsigned int get_ebase_cpunum(void)
 {
-	return read_c0_ebase() & 0x3ff;
+	return read_c0_ebase() & MIPS_EBASE_CPUNUM;
 }
 
 #endif /* !__ASSEMBLY__ */
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 45914b5..fc57e13 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -65,37 +65,32 @@
 	back_to_back_c0_hazard();					\
 	TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
-#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
-
-#define ASID_INC	0x40
-#define ASID_MASK	0xfc0
-
-#elif defined(CONFIG_CPU_R8000)
-
-#define ASID_INC	0x10
-#define ASID_MASK	0xff0
-
-#else /* FIXME: not correct for R6000 */
-
-#define ASID_INC	0x1
-#define ASID_MASK	0xff
-
-#endif
-
-#define cpu_context(cpu, mm)	((mm)->context.asid[cpu])
-#define cpu_asid(cpu, mm)	(cpu_context((cpu), (mm)) & ASID_MASK)
-#define asid_cache(cpu)		(cpu_data[cpu].asid_cache)
-
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
 
 /*
  *  All unused by hardware upper bits will be considered
  *  as a software asid extension.
  */
-#define ASID_VERSION_MASK  ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
-#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
+static unsigned long asid_version_mask(unsigned int cpu)
+{
+	unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
+
+	return ~(asid_mask | (asid_mask - 1));
+}
+
+static unsigned long asid_first_version(unsigned int cpu)
+{
+	return ~asid_version_mask(cpu) + 1;
+}
+
+#define cpu_context(cpu, mm)	((mm)->context.asid[cpu])
+#define asid_cache(cpu)		(cpu_data[cpu].asid_cache)
+#define cpu_asid(cpu, mm) \
+	(cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
 
 /* Normal, classic MIPS get_new_mmu_context */
 static inline void
@@ -104,7 +99,7 @@
 	extern void kvm_local_flush_tlb_all(void);
 	unsigned long asid = asid_cache(cpu);
 
-	if (! ((asid += ASID_INC) & ASID_MASK) ) {
+	if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
 		if (cpu_has_vtag_icache)
 			flush_icache_all();
 #ifdef CONFIG_KVM
@@ -113,7 +108,7 @@
 		local_flush_tlb_all();	/* start new asid cycle */
 #endif
 		if (!asid)		/* fix version if needed */
-			asid = ASID_FIRST_VERSION;
+			asid = asid_first_version(cpu);
 	}
 
 	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
@@ -145,7 +140,7 @@
 
 	htw_stop();
 	/* Check if our ASID is of an older version and thus invalid */
-	if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
+	if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & asid_version_mask(cpu))
 		get_new_mmu_context(next, cpu);
 	write_c0_entryhi(cpu_asid(cpu, next));
 	TLBMISS_HANDLER_SETUP_PGD(next->pgd);
diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h
index bbb85fe..6e4effa 100644
--- a/arch/mips/include/asm/msa.h
+++ b/arch/mips/include/asm/msa.h
@@ -147,6 +147,19 @@
 		_restore_msa(t);
 }
 
+static inline void init_msa_upper(void)
+{
+	/*
+	 * Check cpu_has_msa only if it's a constant. This will allow the
+	 * compiler to optimise out code for CPUs without MSA without adding
+	 * an extra redundant check for CPUs with MSA.
+	 */
+	if (__builtin_constant_p(cpu_has_msa) && !cpu_has_msa)
+		return;
+
+	_init_msa_upper();
+}
+
 #ifdef TOOLCHAIN_SUPPORTS_MSA
 
 #define __BUILD_MSA_CTL_REG(name, cs)				\
diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
index d92cf59..6278776 100644
--- a/arch/mips/include/asm/octeon/cvmx-bootinfo.h
+++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
@@ -32,6 +32,8 @@
 #ifndef __CVMX_BOOTINFO_H__
 #define __CVMX_BOOTINFO_H__
 
+#include "cvmx-coremask.h"
+
 /*
  * Current major and minor versions of the CVMX bootinfo block that is
  * passed from the bootloader to the application.  This is versioned
@@ -39,7 +41,7 @@
  * versions.
  */
 #define CVMX_BOOTINFO_MAJ_VER 1
-#define CVMX_BOOTINFO_MIN_VER 3
+#define CVMX_BOOTINFO_MIN_VER 4
 
 #if (CVMX_BOOTINFO_MAJ_VER == 1)
 #define CVMX_BOOTINFO_OCTEON_SERIAL_LEN 20
@@ -124,6 +126,13 @@
 	 */
 	uint64_t fdt_addr;
 #endif
+#if (CVMX_BOOTINFO_MIN_VER >= 4)
+	/*
+	 * Coremask used for processors with more than 32 cores
+	 * or with OCI.  This replaces core_mask.
+	 */
+	struct cvmx_coremask ext_core_mask;
+#endif
 #else				/* __BIG_ENDIAN */
 	/*
 	 * Little-Endian: When the CPU mode is switched to
@@ -177,6 +186,9 @@
 #if (CVMX_BOOTINFO_MIN_VER >= 3)
 	uint64_t fdt_addr;
 #endif
+#if (CVMX_BOOTINFO_MIN_VER >= 4)
+	struct cvmx_coremask ext_core_mask;
+#endif
 #endif
 };
 
@@ -388,7 +400,7 @@
 		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KONTRON_S1901)
 		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MAX)
 	}
-	return "Unsupported Board";
+	return NULL;
 }
 
 #define ENUM_CHIP_TYPE_CASE(x) \
diff --git a/arch/mips/include/asm/octeon/cvmx-ciu3-defs.h b/arch/mips/include/asm/octeon/cvmx-ciu3-defs.h
new file mode 100644
index 0000000..547f778
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-ciu3-defs.h
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2003-2016 Cavium Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ */
+
+#ifndef __CVMX_CIU3_DEFS_H__
+#define __CVMX_CIU3_DEFS_H__
+
+#define CVMX_CIU3_FUSE CVMX_ADD_IO_SEG(0x00010100000001A0ull)
+#define CVMX_CIU3_BIST CVMX_ADD_IO_SEG(0x00010100000001C0ull)
+#define CVMX_CIU3_CONST CVMX_ADD_IO_SEG(0x0001010000000220ull)
+#define CVMX_CIU3_CTL CVMX_ADD_IO_SEG(0x00010100000000E0ull)
+#define CVMX_CIU3_DESTX_IO_INT(offset) (CVMX_ADD_IO_SEG(0x0001010000210000ull) + ((offset) & 7) * 8)
+#define CVMX_CIU3_DESTX_PP_INT(offset) (CVMX_ADD_IO_SEG(0x0001010000200000ull) + ((offset) & 255) * 8)
+#define CVMX_CIU3_GSTOP CVMX_ADD_IO_SEG(0x0001010000000140ull)
+#define CVMX_CIU3_IDTX_CTL(offset) (CVMX_ADD_IO_SEG(0x0001010000110000ull) + ((offset) & 255) * 8)
+#define CVMX_CIU3_IDTX_IO(offset) (CVMX_ADD_IO_SEG(0x0001010000130000ull) + ((offset) & 255) * 8)
+#define CVMX_CIU3_IDTX_PPX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001010000120000ull) + ((block_id) & 255) * 0x20ull)
+#define CVMX_CIU3_INTR_RAM_ECC_CTL CVMX_ADD_IO_SEG(0x0001010000000260ull)
+#define CVMX_CIU3_INTR_RAM_ECC_ST CVMX_ADD_IO_SEG(0x0001010000000280ull)
+#define CVMX_CIU3_INTR_READY CVMX_ADD_IO_SEG(0x00010100000002A0ull)
+#define CVMX_CIU3_INTR_SLOWDOWN CVMX_ADD_IO_SEG(0x0001010000000240ull)
+#define CVMX_CIU3_ISCX_CTL(offset) (CVMX_ADD_IO_SEG(0x0001010080000000ull) + ((offset) & 1048575) * 8)
+#define CVMX_CIU3_ISCX_W1C(offset) (CVMX_ADD_IO_SEG(0x0001010090000000ull) + ((offset) & 1048575) * 8)
+#define CVMX_CIU3_ISCX_W1S(offset) (CVMX_ADD_IO_SEG(0x00010100A0000000ull) + ((offset) & 1048575) * 8)
+#define CVMX_CIU3_NMI CVMX_ADD_IO_SEG(0x0001010000000160ull)
+#define CVMX_CIU3_SISCX(offset) (CVMX_ADD_IO_SEG(0x0001010000220000ull) + ((offset) & 255) * 8)
+#define CVMX_CIU3_TIMX(offset) (CVMX_ADD_IO_SEG(0x0001010000010000ull) + ((offset) & 15) * 8)
+
+union cvmx_ciu3_bist {
+	uint64_t u64;
+	struct cvmx_ciu3_bist_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint64_t reserved_9_63                : 55;
+	uint64_t bist                         : 9;
+#else
+	uint64_t bist                         : 9;
+	uint64_t reserved_9_63                : 55;
+#endif
+	} s;
+};
+
+union cvmx_ciu3_const {
+	uint64_t u64;
+	struct cvmx_ciu3_const_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint64_t dests_io                     : 16;
+	uint64_t pintsn                       : 16;
+	uint64_t dests_pp                     : 16;
+	uint64_t idt                          : 16;
+#else
+	uint64_t idt                          : 16;
+	uint64_t dests_pp                     : 16;
+	uint64_t pintsn                       : 16;
+	uint64_t dests_io                     : 16;
+#endif
+	} s;
+};
+
+union cvmx_ciu3_ctl {
+	uint64_t u64;
+	struct cvmx_ciu3_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint64_t reserved_5_63                : 59;
+	uint64_t mcd_sel                      : 2;
+	uint64_t iscmem_le                    : 1;
+	uint64_t seq_dis                      : 1;
+	uint64_t cclk_dis                     : 1;
+#else
+	uint64_t cclk_dis                     : 1;
+	uint64_t seq_dis                      : 1;
+	uint64_t iscmem_le                    : 1;
+	uint64_t mcd_sel                      : 2;
+	uint64_t reserved_5_63                : 59;
+#endif
+	} s;
+};
+
+union cvmx_ciu3_destx_io_int {
+	uint64_t u64;
+	struct cvmx_ciu3_destx_io_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint64_t reserved_52_63               : 12;
+	uint64_t intsn                        : 20;
+	uint64_t reserved_10_31               : 22;
+	uint64_t intidt                       : 8;
+	uint64_t newint                       : 1;
+	uint64_t intr                         : 1;
+#else
+	uint64_t intr                         : 1;
+	uint64_t newint                       : 1;
+	uint64_t intidt                       : 8;
+	uint64_t reserved_10_31               : 22;
+	uint64_t intsn                        : 20;
+	uint64_t reserved_52_63               : 12;
+#endif
+	} s;
+};
+
+union cvmx_ciu3_destx_pp_int {
+	uint64_t u64;
+	struct cvmx_ciu3_destx_pp_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint64_t reserved_52_63               : 12;
+	uint64_t intsn                        : 20;
+	uint64_t reserved_10_31               : 22;
+	uint64_t intidt                       : 8;
+	uint64_t newint                       : 1;
+	uint64_t intr                         : 1;
+#else
+	uint64_t intr                         : 1;
+	uint64_t newint                       : 1;
+	uint64_t intidt                       : 8;
+	uint64_t reserved_10_31               : 22;
+	uint64_t intsn                        : 20;
+	uint64_t reserved_52_63               : 12;
+#endif
+	} s;
+};
+
+union cvmx_ciu3_gstop {
+	uint64_t u64;
+	struct cvmx_ciu3_gstop_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint64_t reserved_1_63                : 63;
+	uint64_t gstop                        : 1;
+#else
+	uint64_t gstop                        : 1;
+	uint64_t reserved_1_63                : 63;
+#endif
+	} s;
+};
+
+union cvmx_ciu3_idtx_ctl {
+	uint64_t u64;
+	struct cvmx_ciu3_idtx_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint64_t reserved_52_63               : 12;
+	uint64_t intsn                        : 20;
+	uint64_t reserved_4_31                : 28;
+	uint64_t intr                         : 1;
+	uint64_t newint                       : 1;
+	uint64_t ip_num                       : 2;
+#else
+	uint64_t ip_num                       : 2;
+	uint64_t newint                       : 1;
+	uint64_t intr                         : 1;
+	uint64_t reserved_4_31                : 28;
+	uint64_t intsn                        : 20;
+	uint64_t reserved_52_63               : 12;
+#endif
+	} s;
+};
+
+union cvmx_ciu3_idtx_io {
+	uint64_t u64;
+	struct cvmx_ciu3_idtx_io_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint64_t reserved_5_63                : 59;
+	uint64_t io                           : 5;
+#else
+	uint64_t io                           : 5;
+	uint64_t reserved_5_63                : 59;
+#endif
+	} s;
+};
+
+union cvmx_ciu3_idtx_ppx {
+	uint64_t u64;
+	struct cvmx_ciu3_idtx_ppx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint64_t reserved_48_63               : 16;
+	uint64_t pp                           : 48;
+#else
+	uint64_t pp                           : 48;
+	uint64_t reserved_48_63               : 16;
+#endif
+	} s;
+};
+
+union cvmx_ciu3_intr_ram_ecc_ctl {
+	uint64_t u64;
+	struct cvmx_ciu3_intr_ram_ecc_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint64_t reserved_3_63                : 61;
+	uint64_t flip_synd                    : 2;
+	uint64_t ecc_ena                      : 1;
+#else
+	uint64_t ecc_ena                      : 1;
+	uint64_t flip_synd                    : 2;
+	uint64_t reserved_3_63                : 61;
+#endif
+	} s;
+};
+
+union cvmx_ciu3_intr_ram_ecc_st {
+	uint64_t u64;
+	struct cvmx_ciu3_intr_ram_ecc_st_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint64_t reserved_52_63               : 12;
+	uint64_t addr                         : 20;
+	uint64_t reserved_6_31                : 26;
+	uint64_t sisc_dbe                     : 1;
+	uint64_t sisc_sbe                     : 1;
+	uint64_t idt_dbe                      : 1;
+	uint64_t idt_sbe                      : 1;
+	uint64_t isc_dbe                      : 1;
+	uint64_t isc_sbe                      : 1;
+#else
+	uint64_t isc_sbe                      : 1;
+	uint64_t isc_dbe                      : 1;
+	uint64_t idt_sbe                      : 1;
+	uint64_t idt_dbe                      : 1;
+	uint64_t sisc_sbe                     : 1;
+	uint64_t sisc_dbe                     : 1;
+	uint64_t reserved_6_31                : 26;
+	uint64_t addr                         : 20;
+	uint64_t reserved_52_63               : 12;
+#endif
+	} s;
+};
+
+union cvmx_ciu3_intr_ready {
+	uint64_t u64;
+	struct cvmx_ciu3_intr_ready_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint64_t reserved_46_63               : 18;
+	uint64_t index                        : 14;
+	uint64_t reserved_1_31                : 31;
+	uint64_t ready                        : 1;
+#else
+	uint64_t ready                        : 1;
+	uint64_t reserved_1_31                : 31;
+	uint64_t index                        : 14;
+	uint64_t reserved_46_63               : 18;
+#endif
+	} s;
+};
+
+union cvmx_ciu3_intr_slowdown {
+	uint64_t u64;
+	struct cvmx_ciu3_intr_slowdown_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint64_t reserved_3_63                : 61;
+	uint64_t ctl                          : 3;
+#else
+	uint64_t ctl                          : 3;
+	uint64_t reserved_3_63                : 61;
+#endif
+	} s;
+};
+
+union cvmx_ciu3_iscx_ctl {
+	uint64_t u64;
+	struct cvmx_ciu3_iscx_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint64_t reserved_24_63               : 40;
+	uint64_t idt                          : 8;
+	uint64_t imp                          : 1;
+	uint64_t reserved_2_14                : 13;
+	uint64_t en                           : 1;
+	uint64_t raw                          : 1;
+#else
+	uint64_t raw                          : 1;
+	uint64_t en                           : 1;
+	uint64_t reserved_2_14                : 13;
+	uint64_t imp                          : 1;
+	uint64_t idt                          : 8;
+	uint64_t reserved_24_63               : 40;
+#endif
+	} s;
+};
+
+union cvmx_ciu3_iscx_w1c {
+	uint64_t u64;
+	struct cvmx_ciu3_iscx_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint64_t reserved_2_63                : 62;
+	uint64_t en                           : 1;
+	uint64_t raw                          : 1;
+#else
+	uint64_t raw                          : 1;
+	uint64_t en                           : 1;
+	uint64_t reserved_2_63                : 62;
+#endif
+	} s;
+};
+
+union cvmx_ciu3_iscx_w1s {
+	uint64_t u64;
+	struct cvmx_ciu3_iscx_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint64_t reserved_2_63                : 62;
+	uint64_t en                           : 1;
+	uint64_t raw                          : 1;
+#else
+	uint64_t raw                          : 1;
+	uint64_t en                           : 1;
+	uint64_t reserved_2_63                : 62;
+#endif
+	} s;
+};
+
+union cvmx_ciu3_nmi {
+	uint64_t u64;
+	struct cvmx_ciu3_nmi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint64_t reserved_48_63               : 16;
+	uint64_t nmi                          : 48;
+#else
+	uint64_t nmi                          : 48;
+	uint64_t reserved_48_63               : 16;
+#endif
+	} s;
+};
+
+union cvmx_ciu3_siscx {
+	uint64_t u64;
+	struct cvmx_ciu3_siscx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint64_t en                           : 64;
+#else
+	uint64_t en                           : 64;
+#endif
+	} s;
+};
+
+union cvmx_ciu3_timx {
+	uint64_t u64;
+	struct cvmx_ciu3_timx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint64_t reserved_37_63               : 27;
+	uint64_t one_shot                     : 1;
+	uint64_t len                          : 36;
+#else
+	uint64_t len                          : 36;
+	uint64_t one_shot                     : 1;
+	uint64_t reserved_37_63               : 27;
+#endif
+	} s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-config.h b/arch/mips/include/asm/octeon/cvmx-config.h
index f7dd17d..f4f1996 100644
--- a/arch/mips/include/asm/octeon/cvmx-config.h
+++ b/arch/mips/include/asm/octeon/cvmx-config.h
@@ -33,7 +33,7 @@
 /* Packet buffers */
 #define CVMX_FPA_PACKET_POOL		    (0)
 #define CVMX_FPA_PACKET_POOL_SIZE	    CVMX_FPA_POOL_0_SIZE
-/* Work queue entrys */
+/* Work queue entries */
 #define CVMX_FPA_WQE_POOL		    (1)
 #define CVMX_FPA_WQE_POOL_SIZE		    CVMX_FPA_POOL_1_SIZE
 /* PKO queue command buffers */
diff --git a/arch/mips/include/asm/octeon/cvmx-coremask.h b/arch/mips/include/asm/octeon/cvmx-coremask.h
new file mode 100644
index 0000000..097dc09
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-coremask.h
@@ -0,0 +1,89 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2016  Cavium Inc. (support@cavium.com).
+ *
+ */
+
+/*
+ * Module to support operations on bitmap of cores. Coremask can be used to
+ * select a specific core, a group of cores, or all available cores, for
+ * initialization and differentiation of roles within a single shared binary
+ * executable image.
+ *
+ * The core numbers used in this file are the same value as what is found in
+ * the COP0_EBASE register and the rdhwr 0 instruction.
+ *
+ * For the CN78XX and other multi-node environments the core numbers are not
+ * contiguous.  The core numbers for the CN78XX are as follows:
+ *
+ * Node 0:	Cores 0 - 47
+ * Node 1:	Cores 128 - 175
+ * Node 2:	Cores 256 - 303
+ * Node 3:	Cores 384 - 431
+ *
+ */
+
+#ifndef __CVMX_COREMASK_H__
+#define __CVMX_COREMASK_H__
+
+#define CVMX_MIPS_MAX_CORES 1024
+/* bits per holder */
+#define CVMX_COREMASK_ELTSZ 64
+
+/* cvmx_coremask_t's size in u64 */
+#define CVMX_COREMASK_BMPSZ (CVMX_MIPS_MAX_CORES / CVMX_COREMASK_ELTSZ)
+
+
+/* cvmx_coremask_t */
+struct cvmx_coremask {
+	u64 coremask_bitmap[CVMX_COREMASK_BMPSZ];
+};
+
+/*
+ * Is ``core'' set in the coremask?
+ */
+static inline bool cvmx_coremask_is_core_set(const struct cvmx_coremask *pcm,
+					    int core)
+{
+	int n, i;
+
+	n = core % CVMX_COREMASK_ELTSZ;
+	i = core / CVMX_COREMASK_ELTSZ;
+
+	return (pcm->coremask_bitmap[i] & ((u64)1 << n)) != 0;
+}
+
+/*
+ * Make a copy of a coremask
+ */
+static inline void cvmx_coremask_copy(struct cvmx_coremask *dest,
+				      const struct cvmx_coremask *src)
+{
+	memcpy(dest, src, sizeof(*dest));
+}
+
+/*
+ * Set the lower 64-bit of the coremask.
+ */
+static inline void cvmx_coremask_set64(struct cvmx_coremask *pcm,
+				       uint64_t coremask_64)
+{
+	pcm->coremask_bitmap[0] = coremask_64;
+}
+
+/*
+ * Clear ``core'' from the coremask.
+ */
+static inline void cvmx_coremask_clear_core(struct cvmx_coremask *pcm, int core)
+{
+	int n, i;
+
+	n = core % CVMX_COREMASK_ELTSZ;
+	i = core / CVMX_COREMASK_ELTSZ;
+	pcm->coremask_bitmap[i] &= ~(1ull << n);
+}
+
+#endif /* __CVMX_COREMASK_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-fpa-defs.h b/arch/mips/include/asm/octeon/cvmx-fpa-defs.h
index 1d79e3c..887ff8e 100644
--- a/arch/mips/include/asm/octeon/cvmx-fpa-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-fpa-defs.h
@@ -66,6 +66,7 @@
 #define CVMX_FPA_WART_CTL (CVMX_ADD_IO_SEG(0x00011800280000D8ull))
 #define CVMX_FPA_WART_STATUS (CVMX_ADD_IO_SEG(0x00011800280000E0ull))
 #define CVMX_FPA_WQE_THRESHOLD (CVMX_ADD_IO_SEG(0x0001180028000468ull))
+#define CVMX_FPA_CLK_COUNT (CVMX_ADD_IO_SEG(0x00012800000000F0ull))
 
 union cvmx_fpa_addr_range_error {
 	uint64_t u64;
diff --git a/arch/mips/include/asm/octeon/cvmx-mio-defs.h b/arch/mips/include/asm/octeon/cvmx-mio-defs.h
index bb0ae33..5196c04 100644
--- a/arch/mips/include/asm/octeon/cvmx-mio-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-mio-defs.h
@@ -1481,7 +1481,9 @@
 	uint64_t u64;
 	struct cvmx_mio_fus_dat2_s {
 #ifdef __BIG_ENDIAN_BITFIELD
-		uint64_t reserved_48_63:16;
+		uint64_t reserved_59_63:5;
+		uint64_t run_platform:3;
+		uint64_t gbl_pwr_throttle:8;
 		uint64_t fus118:1;
 		uint64_t rom_info:10;
 		uint64_t power_limit:2;
@@ -1513,7 +1515,9 @@
 		uint64_t power_limit:2;
 		uint64_t rom_info:10;
 		uint64_t fus118:1;
-		uint64_t reserved_48_63:16;
+		uint64_t gbl_pwr_throttle:8;
+		uint64_t run_platform:3;
+		uint64_t reserved_59_63:5;
 #endif
 	} s;
 	struct cvmx_mio_fus_dat2_cn30xx {
@@ -1837,50 +1841,192 @@
 #endif
 	} cn68xx;
 	struct cvmx_mio_fus_dat2_cn68xx cn68xxp1;
+	struct cvmx_mio_fus_dat2_cn70xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t reserved_48_63:16;
+		uint64_t fus118:1;
+		uint64_t rom_info:10;
+		uint64_t power_limit:2;
+		uint64_t dorm_crypto:1;
+		uint64_t fus318:1;
+		uint64_t raid_en:1;
+		uint64_t reserved_31_29:3;
+		uint64_t nodfa_cp2:1;
+		uint64_t nomul:1;
+		uint64_t nocrypto:1;
+		uint64_t reserved_25_24:2;
+		uint64_t chip_id:8;
+		uint64_t reserved_15_0:16;
+#else
+		uint64_t reserved_15_0:16;
+		uint64_t chip_id:8;
+		uint64_t reserved_25_24:2;
+		uint64_t nocrypto:1;
+		uint64_t nomul:1;
+		uint64_t nodfa_cp2:1;
+		uint64_t reserved_31_29:3;
+		uint64_t raid_en:1;
+		uint64_t fus318:1;
+		uint64_t dorm_crypto:1;
+		uint64_t power_limit:2;
+		uint64_t rom_info:10;
+		uint64_t fus118:1;
+		uint64_t reserved_48_63:16;
+#endif
+	} cn70xx;
+	struct cvmx_mio_fus_dat2_cn70xx cn70xxp1;
+	struct cvmx_mio_fus_dat2_cn73xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t reserved_59_63:5;
+		uint64_t run_platform:3;
+		uint64_t gbl_pwr_throttle:8;
+		uint64_t fus118:1;
+		uint64_t rom_info:10;
+		uint64_t power_limit:2;
+		uint64_t dorm_crypto:1;
+		uint64_t fus318:1;
+		uint64_t raid_en:1;
+		uint64_t reserved_31_29:3;
+		uint64_t nodfa_cp2:1;
+		uint64_t nomul:1;
+		uint64_t nocrypto:1;
+		uint64_t reserved_25_24:2;
+		uint64_t chip_id:8;
+		uint64_t reserved_15_0:16;
+#else
+		uint64_t reserved_15_0:16;
+		uint64_t chip_id:8;
+		uint64_t reserved_25_24:2;
+		uint64_t nocrypto:1;
+		uint64_t nomul:1;
+		uint64_t nodfa_cp2:1;
+		uint64_t reserved_31_29:3;
+		uint64_t raid_en:1;
+		uint64_t fus318:1;
+		uint64_t dorm_crypto:1;
+		uint64_t power_limit:2;
+		uint64_t rom_info:10;
+		uint64_t fus118:1;
+		uint64_t gbl_pwr_throttle:8;
+		uint64_t run_platform:3;
+		uint64_t reserved_59_63:5;
+#endif
+	} cn73xx;
+	struct cvmx_mio_fus_dat2_cn78xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t reserved_59_63:5;
+		uint64_t run_platform:3;
+		uint64_t reserved_48_55:8;
+		uint64_t fus118:1;
+		uint64_t rom_info:10;
+		uint64_t power_limit:2;
+		uint64_t dorm_crypto:1;
+		uint64_t fus318:1;
+		uint64_t raid_en:1;
+		uint64_t reserved_31_29:3;
+		uint64_t nodfa_cp2:1;
+		uint64_t nomul:1;
+		uint64_t nocrypto:1;
+		uint64_t reserved_25_24:2;
+		uint64_t chip_id:8;
+		uint64_t reserved_0_15:16;
+#else
+		uint64_t reserved_0_15:16;
+		uint64_t chip_id:8;
+		uint64_t reserved_25_24:2;
+		uint64_t nocrypto:1;
+		uint64_t nomul:1;
+		uint64_t nodfa_cp2:1;
+		uint64_t reserved_31_29:3;
+		uint64_t raid_en:1;
+		uint64_t fus318:1;
+		uint64_t dorm_crypto:1;
+		uint64_t power_limit:2;
+		uint64_t rom_info:10;
+		uint64_t fus118:1;
+		uint64_t reserved_48_55:8;
+		uint64_t run_platform:3;
+		uint64_t reserved_59_63:5;
+#endif
+	} cn78xx;
+	struct cvmx_mio_fus_dat2_cn78xxp2 {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t reserved_59_63:5;
+		uint64_t run_platform:3;
+		uint64_t gbl_pwr_throttle:8;
+		uint64_t fus118:1;
+		uint64_t rom_info:10;
+		uint64_t power_limit:2;
+		uint64_t dorm_crypto:1;
+		uint64_t fus318:1;
+		uint64_t raid_en:1;
+		uint64_t reserved_31_29:3;
+		uint64_t nodfa_cp2:1;
+		uint64_t nomul:1;
+		uint64_t nocrypto:1;
+		uint64_t reserved_25_24:2;
+		uint64_t chip_id:8;
+		uint64_t reserved_0_15:16;
+#else
+		uint64_t reserved_0_15:16;
+		uint64_t chip_id:8;
+		uint64_t reserved_25_24:2;
+		uint64_t nocrypto:1;
+		uint64_t nomul:1;
+		uint64_t nodfa_cp2:1;
+		uint64_t reserved_31_29:3;
+		uint64_t raid_en:1;
+		uint64_t fus318:1;
+		uint64_t dorm_crypto:1;
+		uint64_t power_limit:2;
+		uint64_t rom_info:10;
+		uint64_t fus118:1;
+		uint64_t gbl_pwr_throttle:8;
+		uint64_t run_platform:3;
+		uint64_t reserved_59_63:5;
+#endif
+	} cn78xxp2;
 	struct cvmx_mio_fus_dat2_cn61xx cnf71xx;
+	struct cvmx_mio_fus_dat2_cn73xx cnf75xx;
 };
 
 union cvmx_mio_fus_dat3 {
 	uint64_t u64;
 	struct cvmx_mio_fus_dat3_s {
 #ifdef __BIG_ENDIAN_BITFIELD
-		uint64_t reserved_58_63:6;
+		uint64_t ema0:6;
 		uint64_t pll_ctl:10;
 		uint64_t dfa_info_dte:3;
 		uint64_t dfa_info_clm:4;
-		uint64_t reserved_40_40:1;
-		uint64_t ema:2;
+		uint64_t pll_alt_matrix:1;
+		uint64_t reserved_38_39:2;
 		uint64_t efus_lck_rsv:1;
 		uint64_t efus_lck_man:1;
 		uint64_t pll_half_dis:1;
 		uint64_t l2c_crip:3;
-		uint64_t pll_div4:1;
-		uint64_t reserved_29_30:2;
-		uint64_t bar2_en:1;
+		uint64_t reserved_28_31:4;
 		uint64_t efus_lck:1;
 		uint64_t efus_ign:1;
 		uint64_t nozip:1;
 		uint64_t nodfa_dte:1;
-		uint64_t icache:24;
+		uint64_t reserved_0_23:24;
 #else
-		uint64_t icache:24;
+		uint64_t reserved_0_23:24;
 		uint64_t nodfa_dte:1;
 		uint64_t nozip:1;
 		uint64_t efus_ign:1;
 		uint64_t efus_lck:1;
-		uint64_t bar2_en:1;
-		uint64_t reserved_29_30:2;
-		uint64_t pll_div4:1;
+		uint64_t reserved_28_31:4;
 		uint64_t l2c_crip:3;
 		uint64_t pll_half_dis:1;
 		uint64_t efus_lck_man:1;
 		uint64_t efus_lck_rsv:1;
-		uint64_t ema:2;
-		uint64_t reserved_40_40:1;
+		uint64_t reserved_38_39:2;
+		uint64_t pll_alt_matrix:1;
 		uint64_t dfa_info_clm:4;
 		uint64_t dfa_info_dte:3;
 		uint64_t pll_ctl:10;
-		uint64_t reserved_58_63:6;
+		uint64_t ema0:6;
 #endif
 	} s;
 	struct cvmx_mio_fus_dat3_cn30xx {
@@ -2022,7 +2168,239 @@
 	struct cvmx_mio_fus_dat3_cn61xx cn66xx;
 	struct cvmx_mio_fus_dat3_cn61xx cn68xx;
 	struct cvmx_mio_fus_dat3_cn61xx cn68xxp1;
+	struct cvmx_mio_fus_dat3_cn70xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t ema0:6;
+		uint64_t pll_ctl:10;
+		uint64_t dfa_info_dte:3;
+		uint64_t dfa_info_clm:4;
+		uint64_t pll_alt_matrix:1;
+		uint64_t pll_bwadj_denom:2;
+		uint64_t efus_lck_rsv:1;
+		uint64_t efus_lck_man:1;
+		uint64_t pll_half_dis:1;
+		uint64_t l2c_crip:3;
+		uint64_t use_int_refclk:1;
+		uint64_t zip_info:2;
+		uint64_t bar2_sz_conf:1;
+		uint64_t efus_lck:1;
+		uint64_t efus_ign:1;
+		uint64_t nozip:1;
+		uint64_t nodfa_dte:1;
+		uint64_t ema1:6;
+		uint64_t reserved_0_17:18;
+#else
+		uint64_t reserved_0_17:18;
+		uint64_t ema1:6;
+		uint64_t nodfa_dte:1;
+		uint64_t nozip:1;
+		uint64_t efus_ign:1;
+		uint64_t efus_lck:1;
+		uint64_t bar2_sz_conf:1;
+		uint64_t zip_info:2;
+		uint64_t use_int_refclk:1;
+		uint64_t l2c_crip:3;
+		uint64_t pll_half_dis:1;
+		uint64_t efus_lck_man:1;
+		uint64_t efus_lck_rsv:1;
+		uint64_t pll_bwadj_denom:2;
+		uint64_t pll_alt_matrix:1;
+		uint64_t dfa_info_clm:4;
+		uint64_t dfa_info_dte:3;
+		uint64_t pll_ctl:10;
+		uint64_t ema0:6;
+#endif
+	} cn70xx;
+	struct cvmx_mio_fus_dat3_cn70xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t ema0:6;
+		uint64_t pll_ctl:10;
+		uint64_t dfa_info_dte:3;
+		uint64_t dfa_info_clm:4;
+		uint64_t reserved_38_40:3;
+		uint64_t efus_lck_rsv:1;
+		uint64_t efus_lck_man:1;
+		uint64_t pll_half_dis:1;
+		uint64_t l2c_crip:3;
+		uint64_t reserved_31_31:1;
+		uint64_t zip_info:2;
+		uint64_t bar2_sz_conf:1;
+		uint64_t efus_lck:1;
+		uint64_t efus_ign:1;
+		uint64_t nozip:1;
+		uint64_t nodfa_dte:1;
+		uint64_t ema1:6;
+		uint64_t reserved_0_17:18;
+#else
+		uint64_t reserved_0_17:18;
+		uint64_t ema1:6;
+		uint64_t nodfa_dte:1;
+		uint64_t nozip:1;
+		uint64_t efus_ign:1;
+		uint64_t efus_lck:1;
+		uint64_t bar2_sz_conf:1;
+		uint64_t zip_info:2;
+		uint64_t reserved_31_31:1;
+		uint64_t l2c_crip:3;
+		uint64_t pll_half_dis:1;
+		uint64_t efus_lck_man:1;
+		uint64_t efus_lck_rsv:1;
+		uint64_t reserved_38_40:3;
+		uint64_t dfa_info_clm:4;
+		uint64_t dfa_info_dte:3;
+		uint64_t pll_ctl:10;
+		uint64_t ema0:6;
+#endif
+	} cn70xxp1;
+	struct cvmx_mio_fus_dat3_cn73xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t ema0:6;
+		uint64_t pll_ctl:10;
+		uint64_t dfa_info_dte:3;
+		uint64_t dfa_info_clm:4;
+		uint64_t pll_alt_matrix:1;
+		uint64_t pll_bwadj_denom:2;
+		uint64_t efus_lck_rsv:1;
+		uint64_t efus_lck_man:1;
+		uint64_t pll_half_dis:1;
+		uint64_t l2c_crip:3;
+		uint64_t use_int_refclk:1;
+		uint64_t zip_info:2;
+		uint64_t bar2_sz_conf:1;
+		uint64_t efus_lck:1;
+		uint64_t efus_ign:1;
+		uint64_t nozip:1;
+		uint64_t nodfa_dte:1;
+		uint64_t ema1:6;
+		uint64_t nohna_dte:1;
+		uint64_t hna_info_dte:3;
+		uint64_t hna_info_clm:4;
+		uint64_t reserved_9_9:1;
+		uint64_t core_pll_mul:5;
+		uint64_t pnr_pll_mul:4;
+#else
+		uint64_t pnr_pll_mul:4;
+		uint64_t core_pll_mul:5;
+		uint64_t reserved_9_9:1;
+		uint64_t hna_info_clm:4;
+		uint64_t hna_info_dte:3;
+		uint64_t nohna_dte:1;
+		uint64_t ema1:6;
+		uint64_t nodfa_dte:1;
+		uint64_t nozip:1;
+		uint64_t efus_ign:1;
+		uint64_t efus_lck:1;
+		uint64_t bar2_sz_conf:1;
+		uint64_t zip_info:2;
+		uint64_t use_int_refclk:1;
+		uint64_t l2c_crip:3;
+		uint64_t pll_half_dis:1;
+		uint64_t efus_lck_man:1;
+		uint64_t efus_lck_rsv:1;
+		uint64_t pll_bwadj_denom:2;
+		uint64_t pll_alt_matrix:1;
+		uint64_t dfa_info_clm:4;
+		uint64_t dfa_info_dte:3;
+		uint64_t pll_ctl:10;
+		uint64_t ema0:6;
+#endif
+	} cn73xx;
+	struct cvmx_mio_fus_dat3_cn78xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t ema0:6;
+		uint64_t pll_ctl:10;
+		uint64_t dfa_info_dte:3;
+		uint64_t dfa_info_clm:4;
+		uint64_t reserved_38_40:3;
+		uint64_t efus_lck_rsv:1;
+		uint64_t efus_lck_man:1;
+		uint64_t pll_half_dis:1;
+		uint64_t l2c_crip:3;
+		uint64_t reserved_31_31:1;
+		uint64_t zip_info:2;
+		uint64_t bar2_sz_conf:1;
+		uint64_t efus_lck:1;
+		uint64_t efus_ign:1;
+		uint64_t nozip:1;
+		uint64_t nodfa_dte:1;
+		uint64_t ema1:6;
+		uint64_t nohna_dte:1;
+		uint64_t hna_info_dte:3;
+		uint64_t hna_info_clm:4;
+		uint64_t reserved_0_9:10;
+#else
+		uint64_t reserved_0_9:10;
+		uint64_t hna_info_clm:4;
+		uint64_t hna_info_dte:3;
+		uint64_t nohna_dte:1;
+		uint64_t ema1:6;
+		uint64_t nodfa_dte:1;
+		uint64_t nozip:1;
+		uint64_t efus_ign:1;
+		uint64_t efus_lck:1;
+		uint64_t bar2_sz_conf:1;
+		uint64_t zip_info:2;
+		uint64_t reserved_31_31:1;
+		uint64_t l2c_crip:3;
+		uint64_t pll_half_dis:1;
+		uint64_t efus_lck_man:1;
+		uint64_t efus_lck_rsv:1;
+		uint64_t reserved_38_40:3;
+		uint64_t dfa_info_clm:4;
+		uint64_t dfa_info_dte:3;
+		uint64_t pll_ctl:10;
+		uint64_t ema0:6;
+#endif
+	} cn78xx;
+	struct cvmx_mio_fus_dat3_cn73xx cn78xxp2;
 	struct cvmx_mio_fus_dat3_cn61xx cnf71xx;
+	struct cvmx_mio_fus_dat3_cnf75xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t ema0:6;
+		uint64_t pll_ctl:10;
+		uint64_t dfa_info_dte:3;
+		uint64_t dfa_info_clm:4;
+		uint64_t pll_alt_matrix:1;
+		uint64_t pll_bwadj_denom:2;
+		uint64_t efus_lck_rsv:1;
+		uint64_t efus_lck_man:1;
+		uint64_t pll_half_dis:1;
+		uint64_t l2c_crip:3;
+		uint64_t use_int_refclk:1;
+		uint64_t zip_info:2;
+		uint64_t bar2_sz_conf:1;
+		uint64_t efus_lck:1;
+		uint64_t efus_ign:1;
+		uint64_t nozip:1;
+		uint64_t nodfa_dte:1;
+		uint64_t ema1:6;
+		uint64_t reserved_9_17:9;
+		uint64_t core_pll_mul:5;
+		uint64_t pnr_pll_mul:4;
+#else
+		uint64_t pnr_pll_mul:4;
+		uint64_t core_pll_mul:5;
+		uint64_t reserved_9_17:9;
+		uint64_t ema1:6;
+		uint64_t nodfa_dte:1;
+		uint64_t nozip:1;
+		uint64_t efus_ign:1;
+		uint64_t efus_lck:1;
+		uint64_t bar2_sz_conf:1;
+		uint64_t zip_info:2;
+		uint64_t use_int_refclk:1;
+		uint64_t l2c_crip:3;
+		uint64_t pll_half_dis:1;
+		uint64_t efus_lck_man:1;
+		uint64_t efus_lck_rsv:1;
+		uint64_t pll_bwadj_denom:2;
+		uint64_t pll_alt_matrix:1;
+		uint64_t dfa_info_clm:4;
+		uint64_t dfa_info_dte:3;
+		uint64_t pll_ctl:10;
+		uint64_t ema0:6;
+#endif
+	} cnf75xx;
 };
 
 union cvmx_mio_fus_ema {
diff --git a/arch/mips/include/asm/octeon/cvmx-sysinfo.h b/arch/mips/include/asm/octeon/cvmx-sysinfo.h
index 2131197..c6c3ee3 100644
--- a/arch/mips/include/asm/octeon/cvmx-sysinfo.h
+++ b/arch/mips/include/asm/octeon/cvmx-sysinfo.h
@@ -4,7 +4,7 @@
  * Contact: support@caviumnetworks.com
  * This file is part of the OCTEON SDK
  *
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2016 Cavium, Inc.
  *
  * This file is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License, Version 2, as
@@ -32,6 +32,8 @@
 #ifndef __CVMX_SYSINFO_H__
 #define __CVMX_SYSINFO_H__
 
+#include "cvmx-coremask.h"
+
 #define OCTEON_SERIAL_LEN 20
 /**
  * Structure describing application specific information.
@@ -50,8 +52,7 @@
 	uint64_t system_dram_size;
 
 	/* ptr to memory descriptor block */
-	void *phy_mem_desc_ptr;
-
+	uint64_t phy_mem_desc_addr;
 
 	/* Application image specific variables */
 	/* stack top address (virtual) */
@@ -63,7 +64,7 @@
 	/* heap size in bytes */
 	uint32_t heap_size;
 	/* coremask defining cores running application */
-	uint32_t core_mask;
+	struct cvmx_coremask core_mask;
 	/* Deprecated, use cvmx_coremask_first_core() to select init core */
 	uint32_t init_core;
 
@@ -121,32 +122,4 @@
 
 extern struct cvmx_sysinfo *cvmx_sysinfo_get(void);
 
-/**
- * This function is used in non-simple executive environments (such as
- * Linux kernel, u-boot, etc.)	to configure the minimal fields that
- * are required to use simple executive files directly.
- *
- * Locking (if required) must be handled outside of this
- * function
- *
- * @phy_mem_desc_ptr: Pointer to global physical memory descriptor
- *		     (bootmem descriptor) @board_type: Octeon board
- *		     type enumeration
- *
- * @board_rev_major:
- *		     Board major revision
- * @board_rev_minor:
- *		     Board minor revision
- * @cpu_clock_hz:
- *		     CPU clock freqency in hertz
- *
- * Returns 0: Failure
- *	   1: success
- */
-extern int cvmx_sysinfo_minimal_initialize(void *phy_mem_desc_ptr,
-					   uint16_t board_type,
-					   uint8_t board_rev_major,
-					   uint8_t board_rev_minor,
-					   uint32_t cpu_clock_hz);
-
 #endif /* __CVMX_SYSINFO_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h
index 19e139c..2530e87 100644
--- a/arch/mips/include/asm/octeon/cvmx.h
+++ b/arch/mips/include/asm/octeon/cvmx.h
@@ -57,6 +57,7 @@
 #include <asm/octeon/cvmx-sysinfo.h>
 
 #include <asm/octeon/cvmx-ciu-defs.h>
+#include <asm/octeon/cvmx-ciu3-defs.h>
 #include <asm/octeon/cvmx-gpio-defs.h>
 #include <asm/octeon/cvmx-iob-defs.h>
 #include <asm/octeon/cvmx-ipd-defs.h>
@@ -189,7 +190,7 @@
 static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
 {
 	if (sizeof(void *) == 8) {
-		/* Just set the top bit, avoiding any TLB uglyness */
+		/* Just set the top bit, avoiding any TLB ugliness */
 		return CASTPTR(void,
 			       CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
 					    physical_address));
@@ -341,6 +342,21 @@
 	return core_num;
 }
 
+/* Maximum # of bits to define core in node */
+#define CVMX_NODE_NO_SHIFT	7
+#define CVMX_NODE_MASK		0x3
+static inline unsigned int cvmx_get_node_num(void)
+{
+	unsigned int core_num = cvmx_get_core_num();
+
+	return (core_num >> CVMX_NODE_NO_SHIFT) & CVMX_NODE_MASK;
+}
+
+static inline unsigned int cvmx_get_local_core_num(void)
+{
+	return cvmx_get_core_num() & ((1 << CVMX_NODE_NO_SHIFT) - 1);
+}
+
 /**
  * Returns the number of bits set in the provided value.
  * Simple wrapper for POP instruction.
@@ -448,8 +464,15 @@
 /* Return the number of cores available in the chip */
 static inline uint32_t cvmx_octeon_num_cores(void)
 {
-	uint32_t ciu_fuse = (uint32_t) cvmx_read_csr(CVMX_CIU_FUSE) & 0xffff;
-	return cvmx_pop(ciu_fuse);
+	u64 ciu_fuse_reg;
+	u64 ciu_fuse;
+
+	if (OCTEON_IS_OCTEON3() && !OCTEON_IS_MODEL(OCTEON_CN70XX))
+		ciu_fuse_reg = CVMX_CIU3_FUSE;
+	else
+		ciu_fuse_reg = CVMX_CIU_FUSE;
+	ciu_fuse = cvmx_read_csr(ciu_fuse_reg);
+	return cvmx_dpop(ciu_fuse);
 }
 
 #endif /*  __CVMX_H__  */
diff --git a/arch/mips/include/asm/octeon/octeon-feature.h b/arch/mips/include/asm/octeon/octeon-feature.h
index 3ed10a8..a19ca3b 100644
--- a/arch/mips/include/asm/octeon/octeon-feature.h
+++ b/arch/mips/include/asm/octeon/octeon-feature.h
@@ -81,6 +81,10 @@
 	OCTEON_FEATURE_HFA,
 	OCTEON_FEATURE_DFM,
 	OCTEON_FEATURE_CIU2,
+	OCTEON_FEATURE_CIU3,
+	/* Octeon has FPA first seen on 78XX */
+	OCTEON_FEATURE_FPA3,
+	OCTEON_FEATURE_FAU,
 	OCTEON_MAX_FEATURE
 };
 
@@ -110,7 +114,7 @@
  * Returns Non zero if the feature exists. Zero if the feature does not
  *	   exist.
  */
-static inline int octeon_has_feature(enum octeon_feature feature)
+static inline bool octeon_has_feature(enum octeon_feature feature)
 {
 	switch (feature) {
 	case OCTEON_FEATURE_SAAD:
@@ -122,7 +126,7 @@
 			fus_2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2);
 			return !fus_2.s.nocrypto && !fus_2.s.nomul && fus_2.s.dorm_crypto;
 		} else {
-			return 0;
+			return false;
 		}
 
 	case OCTEON_FEATURE_PCIE:
@@ -190,11 +194,20 @@
 
 	case OCTEON_FEATURE_CIU2:
 		return OCTEON_IS_MODEL(OCTEON_CN68XX);
+	case OCTEON_FEATURE_CIU3:
+	case OCTEON_FEATURE_FPA3:
+		return OCTEON_IS_MODEL(OCTEON_CN78XX)
+			|| OCTEON_IS_MODEL(OCTEON_CNF75XX)
+			|| OCTEON_IS_MODEL(OCTEON_CN73XX);
+	case OCTEON_FEATURE_FAU:
+		return !(OCTEON_IS_MODEL(OCTEON_CN78XX)
+			 || OCTEON_IS_MODEL(OCTEON_CNF75XX)
+			 || OCTEON_IS_MODEL(OCTEON_CN73XX));
 
 	default:
 		break;
 	}
-	return 0;
+	return false;
 }
 
 #endif /* __OCTEON_FEATURE_H__ */
diff --git a/arch/mips/include/asm/octeon/octeon-model.h b/arch/mips/include/asm/octeon/octeon-model.h
index 92b377e..6c68517 100644
--- a/arch/mips/include/asm/octeon/octeon-model.h
+++ b/arch/mips/include/asm/octeon/octeon-model.h
@@ -74,7 +74,12 @@
  * CN7XXX models with new revision encoding
  */
 
+#define OCTEON_CNF75XX_PASS1_0	0x000d9800
+#define OCTEON_CNF75XX		(OCTEON_CNF75XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CNF75XX_PASS1_X	(OCTEON_CNF75XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+
 #define OCTEON_CN73XX_PASS1_0	0x000d9700
+#define OCTEON_CN73XX_PASS1_1	0x000d9701
 #define OCTEON_CN73XX		(OCTEON_CN73XX_PASS1_0 | OM_IGNORE_REVISION)
 #define OCTEON_CN73XX_PASS1_X	(OCTEON_CN73XX_PASS1_0 | \
 				 OM_IGNORE_MINOR_REVISION)
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
index de9f74e..07c0516 100644
--- a/arch/mips/include/asm/octeon/octeon.h
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -299,6 +299,31 @@
 	cvmx_read64_uint32(address ^ 4);
 }
 
+#ifdef CONFIG_SMP
+void octeon_setup_smp(void);
+#else
+static inline void octeon_setup_smp(void) {}
+#endif
+
+struct irq_domain;
+struct device_node;
+struct irq_data;
+struct irq_chip;
+void octeon_ciu3_mbox_send(int cpu, unsigned int mbox);
+int octeon_irq_ciu3_xlat(struct irq_domain *d,
+			 struct device_node *node,
+			 const u32 *intspec,
+			 unsigned int intsize,
+			 unsigned long *out_hwirq,
+			 unsigned int *out_type);
+void octeon_irq_ciu3_enable(struct irq_data *data);
+void octeon_irq_ciu3_disable(struct irq_data *data);
+void octeon_irq_ciu3_ack(struct irq_data *data);
+void octeon_irq_ciu3_mask(struct irq_data *data);
+void octeon_irq_ciu3_mask_ack(struct irq_data *data);
+int octeon_irq_ciu3_mapx(struct irq_domain *d, unsigned int virq,
+			 irq_hw_number_t hw, struct irq_chip *chip);
+
 /* Octeon multiplier save/restore routines from octeon_switch.S */
 void octeon_mult_save(void);
 void octeon_mult_restore(void);
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 8c16fb7..86b239d 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -43,8 +43,6 @@
 	   and XFree86. Eventually will be removed. */
 	unsigned int need_domain_info;
 
-	int iommu;
-
 	/* Optional access methods for reading/writing the bus number
 	   of the PCI controller */
 	int (*get_busno)(void);
@@ -106,11 +104,11 @@
 struct pci_dev;
 
 /*
- * The PCI address space does equal the physical memory address space.	The
- * networking and block device layers use this boolean for bounce buffer
- * decisions.  This is set if any hose does not have an IOMMU.
+ * The PCI address space does equal the physical memory address space.
+ * The networking and block device layers use this boolean for bounce
+ * buffer decisions.
  */
-extern unsigned int PCI_DMA_BUS_IS_PHYS;
+#define PCI_DMA_BUS_IS_PHYS     (1)
 
 #ifdef CONFIG_PCI_DOMAINS
 #define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
diff --git a/arch/mips/include/asm/pci/bridge.h b/arch/mips/include/asm/pci/bridge.h
index 8d7a63b..3206245 100644
--- a/arch/mips/include/asm/pci/bridge.h
+++ b/arch/mips/include/asm/pci/bridge.h
@@ -269,16 +269,16 @@
 	union {
 		u32		cmd_word;
 		struct {
-			u32	didn:4,		/* Destination ID */
-				sidn:4,		/* Source ID	  */
-				pactyp:4,	/* Packet type	  */
-				tnum:5,		/* Trans Number	  */
-				coh:1,		/* Coh Transacti  */
-				ds:2,		/* Data size	  */
-				gbr:1,		/* GBR enable	  */
-				vbpm:1,		/* VBPM message	  */
+			u32	didn:4,		/* Destination ID  */
+				sidn:4,		/* Source ID	   */
+				pactyp:4,	/* Packet type	   */
+				tnum:5,		/* Trans Number	   */
+				coh:1,		/* Coh Transaction */
+				ds:2,		/* Data size	   */
+				gbr:1,		/* GBR enable	   */
+				vbpm:1,		/* VBPM message	   */
 				error:1,	/* Error occurred  */
-				barr:1,		/* Barrier op	  */
+				barr:1,		/* Barrier op	   */
 				rsvd:8;
 		} berr_st;
 	} berr_un;
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index 832e216..d21f3da 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -103,8 +103,8 @@
 	pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
 }
 
-#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
-#define pte_page(x)		pfn_to_page(pte_pfn(x))
+#if defined(CONFIG_XPA)
+
 #define pte_pfn(x)		(((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
 static inline pte_t
 pfn_pte(unsigned long pfn, pgprot_t prot)
@@ -118,9 +118,21 @@
 	return pte;
 }
 
-#else
+#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
 
-#define pte_page(x)		pfn_to_page(pte_pfn(x))
+#define pte_pfn(x)		((unsigned long)((x).pte_high >> 6))
+
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
+{
+	pte_t pte;
+
+	pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f);
+	pte.pte_low = pgprot_val(prot);
+
+	return pte;
+}
+
+#else
 
 #ifdef CONFIG_CPU_VR41XX
 #define pte_pfn(x)		((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
@@ -131,6 +143,8 @@
 #endif
 #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
 
+#define pte_page(x)		pfn_to_page(pte_pfn(x))
+
 #define __pgd_offset(address)	pgd_index(address)
 #define __pud_offset(address)	(((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
 #define __pmd_offset(address)	(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
@@ -166,7 +180,7 @@
 
 #else
 
-#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+#if defined(CONFIG_XPA)
 
 /* Swap entries must have VALID and GLOBAL bits cleared. */
 #define __swp_type(x)			(((x).val >> 4) & 0x1f)
@@ -175,6 +189,15 @@
 #define __pte_to_swp_entry(pte)		((swp_entry_t) { (pte).pte_high })
 #define __swp_entry_to_pte(x)		((pte_t) { 0, (x).val })
 
+#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+
+/* Swap entries must have VALID and GLOBAL bits cleared. */
+#define __swp_type(x)			(((x).val >> 2) & 0x1f)
+#define __swp_offset(x)			 ((x).val >> 7)
+#define __swp_entry(type, offset)	((swp_entry_t)  { ((type) << 2) | ((offset) << 7) })
+#define __pte_to_swp_entry(pte)		((swp_entry_t) { (pte).pte_high })
+#define __swp_entry_to_pte(x)		((pte_t) { 0, (x).val })
+
 #else
 /*
  * Constraints:
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index cf661a2..514cbc0 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -17,7 +17,7 @@
 #include <asm/cachectl.h>
 #include <asm/fixmap.h>
 
-#ifdef CONFIG_PAGE_SIZE_64KB
+#if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48)
 #include <asm-generic/pgtable-nopmd.h>
 #else
 #include <asm-generic/pgtable-nopud.h>
@@ -90,7 +90,11 @@
 #define PTE_ORDER		0
 #endif
 #ifdef CONFIG_PAGE_SIZE_16KB
-#define PGD_ORDER		0
+#ifdef CONFIG_MIPS_VA_BITS_48
+#define PGD_ORDER               1
+#else
+#define PGD_ORDER               0
+#endif
 #define PUD_ORDER		aieeee_attempt_to_allocate_pud
 #define PMD_ORDER		0
 #define PTE_ORDER		0
@@ -104,7 +108,11 @@
 #ifdef CONFIG_PAGE_SIZE_64KB
 #define PGD_ORDER		0
 #define PUD_ORDER		aieeee_attempt_to_allocate_pud
+#ifdef CONFIG_MIPS_VA_BITS_48
+#define PMD_ORDER		0
+#else
 #define PMD_ORDER		aieeee_attempt_to_allocate_pmd
+#endif
 #define PTE_ORDER		0
 #endif
 
@@ -114,11 +122,7 @@
 #endif
 #define PTRS_PER_PTE	((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
 
-#if PGDIR_SIZE >= TASK_SIZE64
-#define USER_PTRS_PER_PGD	(1)
-#else
-#define USER_PTRS_PER_PGD	(TASK_SIZE64 / PGDIR_SIZE)
-#endif
+#define USER_PTRS_PER_PGD       ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
 #define FIRST_USER_ADDRESS	0UL
 
 /*
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index 97b3138..f88a48c 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -32,149 +32,132 @@
  * unpredictable things.  The code (when it is written) to deal with
  * this problem will be in the update_mmu_cache() code for the r4k.
  */
-#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+#if defined(CONFIG_XPA)
 
 /*
- * The following bits are implemented by the TLB hardware
+ * Page table bit offsets used for 64 bit physical addressing on
+ * MIPS32r5 with XPA.
  */
-#define _PAGE_NO_EXEC_SHIFT	0
-#define _PAGE_NO_EXEC		(1 << _PAGE_NO_EXEC_SHIFT)
-#define _PAGE_NO_READ_SHIFT	(_PAGE_NO_EXEC_SHIFT + 1)
-#define _PAGE_NO_READ		(1 << _PAGE_NO_READ_SHIFT)
-#define _PAGE_GLOBAL_SHIFT	(_PAGE_NO_READ_SHIFT + 1)
-#define _PAGE_GLOBAL		(1 << _PAGE_GLOBAL_SHIFT)
-#define _PAGE_VALID_SHIFT	(_PAGE_GLOBAL_SHIFT + 1)
-#define _PAGE_VALID		(1 << _PAGE_VALID_SHIFT)
-#define _PAGE_DIRTY_SHIFT	(_PAGE_VALID_SHIFT + 1)
-#define _PAGE_DIRTY		(1 << _PAGE_DIRTY_SHIFT)
-#define _CACHE_SHIFT		(_PAGE_DIRTY_SHIFT + 1)
-#define _CACHE_MASK		(7 << _CACHE_SHIFT)
+enum pgtable_bits {
+	/* Used by TLB hardware (placed in EntryLo*) */
+	_PAGE_NO_EXEC_SHIFT,
+	_PAGE_NO_READ_SHIFT,
+	_PAGE_GLOBAL_SHIFT,
+	_PAGE_VALID_SHIFT,
+	_PAGE_DIRTY_SHIFT,
+	_CACHE_SHIFT,
 
-/*
- * The following bits are implemented in software
- */
-#define _PAGE_PRESENT_SHIFT	(24)
-#define _PAGE_PRESENT		(1 << _PAGE_PRESENT_SHIFT)
-#define _PAGE_READ_SHIFT	(_PAGE_PRESENT_SHIFT + 1)
-#define _PAGE_READ		(1 << _PAGE_READ_SHIFT)
-#define _PAGE_WRITE_SHIFT	(_PAGE_READ_SHIFT + 1)
-#define _PAGE_WRITE		(1 << _PAGE_WRITE_SHIFT)
-#define _PAGE_ACCESSED_SHIFT	(_PAGE_WRITE_SHIFT + 1)
-#define _PAGE_ACCESSED		(1 << _PAGE_ACCESSED_SHIFT)
-#define _PAGE_MODIFIED_SHIFT	(_PAGE_ACCESSED_SHIFT + 1)
-#define _PAGE_MODIFIED		(1 << _PAGE_MODIFIED_SHIFT)
-
-#define _PFN_SHIFT		(PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
+	/* Used only by software (masked out before writing EntryLo*) */
+	_PAGE_PRESENT_SHIFT = 24,
+	_PAGE_WRITE_SHIFT,
+	_PAGE_ACCESSED_SHIFT,
+	_PAGE_MODIFIED_SHIFT,
+};
 
 /*
  * Bits for extended EntryLo0/EntryLo1 registers
  */
 #define _PFNX_MASK		0xffffff
 
+#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+
+/*
+ * Page table bit offsets used for 36 bit physical addressing on MIPS32,
+ * for example with Alchemy or Netlogic XLP/XLR.
+ */
+enum pgtable_bits {
+	/* Used by TLB hardware (placed in EntryLo*) */
+	_PAGE_GLOBAL_SHIFT,
+	_PAGE_VALID_SHIFT,
+	_PAGE_DIRTY_SHIFT,
+	_CACHE_SHIFT,
+
+	/* Used only by software (masked out before writing EntryLo*) */
+	_PAGE_PRESENT_SHIFT = _CACHE_SHIFT + 3,
+	_PAGE_NO_READ_SHIFT,
+	_PAGE_WRITE_SHIFT,
+	_PAGE_ACCESSED_SHIFT,
+	_PAGE_MODIFIED_SHIFT,
+};
+
 #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
 
-/*
- * The following bits are implemented in software
- */
-#define _PAGE_PRESENT_SHIFT	(0)
-#define _PAGE_PRESENT		(1 << _PAGE_PRESENT_SHIFT)
-#define _PAGE_READ_SHIFT	(_PAGE_PRESENT_SHIFT + 1)
-#define _PAGE_READ		(1 << _PAGE_READ_SHIFT)
-#define _PAGE_WRITE_SHIFT	(_PAGE_READ_SHIFT + 1)
-#define _PAGE_WRITE		(1 << _PAGE_WRITE_SHIFT)
-#define _PAGE_ACCESSED_SHIFT	(_PAGE_WRITE_SHIFT + 1)
-#define _PAGE_ACCESSED		(1 << _PAGE_ACCESSED_SHIFT)
-#define _PAGE_MODIFIED_SHIFT	(_PAGE_ACCESSED_SHIFT + 1)
-#define _PAGE_MODIFIED		(1 << _PAGE_MODIFIED_SHIFT)
+/* Page table bits used for r3k systems */
+enum pgtable_bits {
+	/* Used only by software (writes to EntryLo ignored) */
+	_PAGE_PRESENT_SHIFT,
+	_PAGE_NO_READ_SHIFT,
+	_PAGE_WRITE_SHIFT,
+	_PAGE_ACCESSED_SHIFT,
+	_PAGE_MODIFIED_SHIFT,
 
-/*
- * The following bits are implemented by the TLB hardware
- */
-#define _PAGE_GLOBAL_SHIFT	(_PAGE_MODIFIED_SHIFT + 4)
-#define _PAGE_GLOBAL		(1 << _PAGE_GLOBAL_SHIFT)
-#define _PAGE_VALID_SHIFT	(_PAGE_GLOBAL_SHIFT + 1)
-#define _PAGE_VALID		(1 << _PAGE_VALID_SHIFT)
-#define _PAGE_DIRTY_SHIFT	(_PAGE_VALID_SHIFT + 1)
-#define _PAGE_DIRTY		(1 << _PAGE_DIRTY_SHIFT)
-#define _CACHE_UNCACHED_SHIFT	(_PAGE_DIRTY_SHIFT + 1)
-#define _CACHE_UNCACHED		(1 << _CACHE_UNCACHED_SHIFT)
-#define _CACHE_MASK		_CACHE_UNCACHED
-
-#define _PFN_SHIFT		PAGE_SHIFT
+	/* Used by TLB hardware (placed in EntryLo) */
+	_PAGE_GLOBAL_SHIFT = 8,
+	_PAGE_VALID_SHIFT,
+	_PAGE_DIRTY_SHIFT,
+	_CACHE_UNCACHED_SHIFT,
+};
 
 #else
-/*
- * Below are the "Normal" R4K cases
- */
 
-/*
- * The following bits are implemented in software
- */
-#define _PAGE_PRESENT_SHIFT	0
-#define _PAGE_PRESENT		(1 << _PAGE_PRESENT_SHIFT)
-/* R2 or later cores check for RI/XI support to determine _PAGE_READ */
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
-#define _PAGE_WRITE_SHIFT	(_PAGE_PRESENT_SHIFT + 1)
-#define _PAGE_WRITE		(1 << _PAGE_WRITE_SHIFT)
-#else
-#define _PAGE_READ_SHIFT	(_PAGE_PRESENT_SHIFT + 1)
-#define _PAGE_READ		(1 << _PAGE_READ_SHIFT)
-#define _PAGE_WRITE_SHIFT	(_PAGE_READ_SHIFT + 1)
-#define _PAGE_WRITE		(1 << _PAGE_WRITE_SHIFT)
+/* Page table bits used for r4k systems */
+enum pgtable_bits {
+	/* Used only by software (masked out before writing EntryLo*) */
+	_PAGE_PRESENT_SHIFT,
+#if !defined(CONFIG_CPU_HAS_RIXI)
+	_PAGE_NO_READ_SHIFT,
 #endif
-#define _PAGE_ACCESSED_SHIFT	(_PAGE_WRITE_SHIFT + 1)
-#define _PAGE_ACCESSED		(1 << _PAGE_ACCESSED_SHIFT)
-#define _PAGE_MODIFIED_SHIFT	(_PAGE_ACCESSED_SHIFT + 1)
-#define _PAGE_MODIFIED		(1 << _PAGE_MODIFIED_SHIFT)
-
+	_PAGE_WRITE_SHIFT,
+	_PAGE_ACCESSED_SHIFT,
+	_PAGE_MODIFIED_SHIFT,
 #if defined(CONFIG_64BIT) && defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
-/* Huge TLB page */
-#define _PAGE_HUGE_SHIFT	(_PAGE_MODIFIED_SHIFT + 1)
-#define _PAGE_HUGE		(1 << _PAGE_HUGE_SHIFT)
-#endif	/* CONFIG_64BIT && CONFIG_MIPS_HUGE_TLB_SUPPORT */
-
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
-/* XI - page cannot be executed */
-#ifdef _PAGE_HUGE_SHIFT
-#define _PAGE_NO_EXEC_SHIFT	(_PAGE_HUGE_SHIFT + 1)
-#else
-#define _PAGE_NO_EXEC_SHIFT	(_PAGE_MODIFIED_SHIFT + 1)
+	_PAGE_HUGE_SHIFT,
 #endif
-#define _PAGE_NO_EXEC		(cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0)
 
-/* RI - page cannot be read */
-#define _PAGE_READ_SHIFT	(_PAGE_NO_EXEC_SHIFT + 1)
-#define _PAGE_READ		(cpu_has_rixi ? 0 : (1 << _PAGE_READ_SHIFT))
-#define _PAGE_NO_READ_SHIFT	_PAGE_READ_SHIFT
-#define _PAGE_NO_READ		(cpu_has_rixi ? (1 << _PAGE_READ_SHIFT) : 0)
-#endif	/* defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) */
-
-#if defined(_PAGE_NO_READ_SHIFT)
-#define _PAGE_GLOBAL_SHIFT	(_PAGE_NO_READ_SHIFT + 1)
-#elif defined(_PAGE_HUGE_SHIFT)
-#define _PAGE_GLOBAL_SHIFT	(_PAGE_HUGE_SHIFT + 1)
-#else
-#define _PAGE_GLOBAL_SHIFT	(_PAGE_MODIFIED_SHIFT + 1)
+	/* Used by TLB hardware (placed in EntryLo*) */
+#if defined(CONFIG_CPU_HAS_RIXI)
+	_PAGE_NO_EXEC_SHIFT,
+	_PAGE_NO_READ_SHIFT,
 #endif
-#define _PAGE_GLOBAL		(1 << _PAGE_GLOBAL_SHIFT)
-
-#define _PAGE_VALID_SHIFT	(_PAGE_GLOBAL_SHIFT + 1)
-#define _PAGE_VALID		(1 << _PAGE_VALID_SHIFT)
-#define _PAGE_DIRTY_SHIFT	(_PAGE_VALID_SHIFT + 1)
-#define _PAGE_DIRTY		(1 << _PAGE_DIRTY_SHIFT)
-#define _CACHE_SHIFT		(_PAGE_DIRTY_SHIFT + 1)
-#define _CACHE_MASK		(7 << _CACHE_SHIFT)
-
-#define _PFN_SHIFT		(PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
+	_PAGE_GLOBAL_SHIFT,
+	_PAGE_VALID_SHIFT,
+	_PAGE_DIRTY_SHIFT,
+	_CACHE_SHIFT,
+};
 
 #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */
 
+/* Used only by software */
+#define _PAGE_PRESENT		(1 << _PAGE_PRESENT_SHIFT)
+#define _PAGE_WRITE		(1 << _PAGE_WRITE_SHIFT)
+#define _PAGE_ACCESSED		(1 << _PAGE_ACCESSED_SHIFT)
+#define _PAGE_MODIFIED		(1 << _PAGE_MODIFIED_SHIFT)
+#if defined(CONFIG_64BIT) && defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
+# define _PAGE_HUGE		(1 << _PAGE_HUGE_SHIFT)
+#endif
+
+/* Used by TLB hardware (placed in EntryLo*) */
+#if defined(CONFIG_XPA)
+# define _PAGE_NO_EXEC		(1 << _PAGE_NO_EXEC_SHIFT)
+#elif defined(CONFIG_CPU_HAS_RIXI)
+# define _PAGE_NO_EXEC		(cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0)
+#endif
+#define _PAGE_NO_READ		(1 << _PAGE_NO_READ_SHIFT)
+#define _PAGE_GLOBAL		(1 << _PAGE_GLOBAL_SHIFT)
+#define _PAGE_VALID		(1 << _PAGE_VALID_SHIFT)
+#define _PAGE_DIRTY		(1 << _PAGE_DIRTY_SHIFT)
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+# define _CACHE_UNCACHED	(1 << _CACHE_UNCACHED_SHIFT)
+# define _CACHE_MASK		_CACHE_UNCACHED
+# define _PFN_SHIFT		PAGE_SHIFT
+#else
+# define _CACHE_MASK		(7 << _CACHE_SHIFT)
+# define _PFN_SHIFT		(PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
+#endif
+
 #ifndef _PAGE_NO_EXEC
 #define _PAGE_NO_EXEC		0
 #endif
-#ifndef _PAGE_NO_READ
-#define _PAGE_NO_READ		0
-#endif
 
 #define _PAGE_SILENT_READ	_PAGE_VALID
 #define _PAGE_SILENT_WRITE	_PAGE_DIRTY
@@ -191,14 +174,13 @@
  */
 
 
-#ifndef __ASSEMBLY__
 /*
  * pte_to_entrylo converts a page table entry (PTE) into a Mips
  * entrylo0/1 value.
  */
 static inline uint64_t pte_to_entrylo(unsigned long pte_val)
 {
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+#ifdef CONFIG_CPU_HAS_RIXI
 	if (cpu_has_rixi) {
 		int sa;
 #ifdef CONFIG_32BIT
@@ -218,7 +200,6 @@
 
 	return pte_val >> _PAGE_GLOBAL_SHIFT;
 }
-#endif
 
 /*
  * Cache attributes
@@ -274,7 +255,7 @@
 #define _CACHE_UNCACHED_ACCELERATED	(7<<_CACHE_SHIFT)
 #endif
 
-#define __READABLE	(_PAGE_SILENT_READ | _PAGE_READ | _PAGE_ACCESSED)
+#define __READABLE	(_PAGE_SILENT_READ | _PAGE_ACCESSED)
 #define __WRITEABLE	(_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED)
 
 #define _PAGE_CHG_MASK	(_PAGE_ACCESSED | _PAGE_MODIFIED |	\
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 9a4fe01..e07a105 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -23,18 +23,19 @@
 struct mm_struct;
 struct vm_area_struct;
 
-#define PAGE_NONE	__pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
-#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | \
+#define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \
+				 _CACHE_CACHABLE_NONCOHERENT)
+#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
 				 _page_cachable_default)
-#define PAGE_COPY	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_NO_EXEC | \
+#define PAGE_COPY	__pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \
 				 _page_cachable_default)
-#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_READ | \
+#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | \
 				 _page_cachable_default)
 #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
 				 _PAGE_GLOBAL | _page_cachable_default)
 #define PAGE_KERNEL_NC	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
 				 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
-#define PAGE_USERIO	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+#define PAGE_USERIO	__pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
 				 _page_cachable_default)
 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
 			__WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
@@ -127,10 +128,19 @@
 	}								\
 } while(0)
 
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+			      pte_t *ptep, pte_t pteval);
+
 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
 
-#define pte_none(pte)		(!(((pte).pte_high) & ~_PAGE_GLOBAL))
+#ifdef CONFIG_XPA
+# define pte_none(pte)		(!(((pte).pte_high) & ~_PAGE_GLOBAL))
+#else
+# define pte_none(pte)		(!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
+#endif
+
 #define pte_present(pte)	((pte).pte_low & _PAGE_PRESENT)
+#define pte_no_exec(pte)	((pte).pte_low & _PAGE_NO_EXEC)
 
 static inline void set_pte(pte_t *ptep, pte_t pte)
 {
@@ -138,17 +148,23 @@
 	smp_wmb();
 	ptep->pte_low = pte.pte_low;
 
+#ifdef CONFIG_XPA
 	if (pte.pte_high & _PAGE_GLOBAL) {
+#else
+	if (pte.pte_low & _PAGE_GLOBAL) {
+#endif
 		pte_t *buddy = ptep_buddy(ptep);
 		/*
 		 * Make sure the buddy is global too (if it's !none,
 		 * it better already be global)
 		 */
-		if (pte_none(*buddy))
+		if (pte_none(*buddy)) {
+			if (!config_enabled(CONFIG_XPA))
+				buddy->pte_low |= _PAGE_GLOBAL;
 			buddy->pte_high |= _PAGE_GLOBAL;
+		}
 	}
 }
-#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
 
 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
@@ -156,8 +172,13 @@
 
 	htw_stop();
 	/* Preserve global status for the pair */
-	if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
-		null.pte_high = _PAGE_GLOBAL;
+	if (config_enabled(CONFIG_XPA)) {
+		if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
+			null.pte_high = _PAGE_GLOBAL;
+	} else {
+		if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
+			null.pte_low = null.pte_high = _PAGE_GLOBAL;
+	}
 
 	set_pte_at(mm, addr, ptep, null);
 	htw_start();
@@ -166,6 +187,7 @@
 
 #define pte_none(pte)		(!(pte_val(pte) & ~_PAGE_GLOBAL))
 #define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)
+#define pte_no_exec(pte)	(pte_val(pte) & _PAGE_NO_EXEC)
 
 /*
  * Certain architectures need to do special things when pte's
@@ -187,30 +209,42 @@
 		 * For SMP, multiple CPUs can race, so we need to do
 		 * this atomically.
 		 */
-#ifdef CONFIG_64BIT
-#define LL_INSN "lld"
-#define SC_INSN "scd"
-#else /* CONFIG_32BIT */
-#define LL_INSN "ll"
-#define SC_INSN "sc"
-#endif
 		unsigned long page_global = _PAGE_GLOBAL;
 		unsigned long tmp;
 
-		__asm__ __volatile__ (
-			"	.set	push\n"
-			"	.set	noreorder\n"
-			"1:	" LL_INSN "	%[tmp], %[buddy]\n"
-			"	bnez	%[tmp], 2f\n"
-			"	 or	%[tmp], %[tmp], %[global]\n"
-			"	" SC_INSN "	%[tmp], %[buddy]\n"
-			"	beqz	%[tmp], 1b\n"
-			"	 nop\n"
-			"2:\n"
-			"	.set pop"
-			: [buddy] "+m" (buddy->pte),
-			  [tmp] "=&r" (tmp)
+		if (kernel_uses_llsc && R10000_LLSC_WAR) {
+			__asm__ __volatile__ (
+			"	.set	arch=r4000			\n"
+			"	.set	push				\n"
+			"	.set	noreorder			\n"
+			"1:"	__LL	"%[tmp], %[buddy]		\n"
+			"	bnez	%[tmp], 2f			\n"
+			"	 or	%[tmp], %[tmp], %[global]	\n"
+				__SC	"%[tmp], %[buddy]		\n"
+			"	beqzl	%[tmp], 1b			\n"
+			"	nop					\n"
+			"2:						\n"
+			"	.set	pop				\n"
+			"	.set	mips0				\n"
+			: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
 			: [global] "r" (page_global));
+		} else if (kernel_uses_llsc) {
+			__asm__ __volatile__ (
+			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
+			"	.set	push				\n"
+			"	.set	noreorder			\n"
+			"1:"	__LL	"%[tmp], %[buddy]		\n"
+			"	bnez	%[tmp], 2f			\n"
+			"	 or	%[tmp], %[tmp], %[global]	\n"
+				__SC	"%[tmp], %[buddy]		\n"
+			"	beqz	%[tmp], 1b			\n"
+			"	nop					\n"
+			"2:						\n"
+			"	.set	pop				\n"
+			"	.set	mips0				\n"
+			: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
+			: [global] "r" (page_global));
+		}
 #else /* !CONFIG_SMP */
 		if (pte_none(*buddy))
 			pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
@@ -218,7 +252,6 @@
 	}
 #endif
 }
-#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
 
 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
@@ -234,6 +267,22 @@
 }
 #endif
 
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+			      pte_t *ptep, pte_t pteval)
+{
+	extern void __update_cache(unsigned long address, pte_t pte);
+
+	if (!pte_present(pteval))
+		goto cache_sync_done;
+
+	if (pte_present(*ptep) && (pte_pfn(*ptep) == pte_pfn(pteval)))
+		goto cache_sync_done;
+
+	__update_cache(addr, pteval);
+cache_sync_done:
+	set_pte(ptep, pteval);
+}
+
 /*
  * (pmds are folded into puds so this doesn't get actually called,
  * but the define is needed for a generic inline function.)
@@ -270,6 +319,8 @@
 static inline pte_t pte_wrprotect(pte_t pte)
 {
 	pte.pte_low  &= ~_PAGE_WRITE;
+	if (!config_enabled(CONFIG_XPA))
+		pte.pte_low &= ~_PAGE_SILENT_WRITE;
 	pte.pte_high &= ~_PAGE_SILENT_WRITE;
 	return pte;
 }
@@ -277,6 +328,8 @@
 static inline pte_t pte_mkclean(pte_t pte)
 {
 	pte.pte_low  &= ~_PAGE_MODIFIED;
+	if (!config_enabled(CONFIG_XPA))
+		pte.pte_low &= ~_PAGE_SILENT_WRITE;
 	pte.pte_high &= ~_PAGE_SILENT_WRITE;
 	return pte;
 }
@@ -284,6 +337,8 @@
 static inline pte_t pte_mkold(pte_t pte)
 {
 	pte.pte_low  &= ~_PAGE_ACCESSED;
+	if (!config_enabled(CONFIG_XPA))
+		pte.pte_low &= ~_PAGE_SILENT_READ;
 	pte.pte_high &= ~_PAGE_SILENT_READ;
 	return pte;
 }
@@ -291,24 +346,33 @@
 static inline pte_t pte_mkwrite(pte_t pte)
 {
 	pte.pte_low |= _PAGE_WRITE;
-	if (pte.pte_low & _PAGE_MODIFIED)
+	if (pte.pte_low & _PAGE_MODIFIED) {
+		if (!config_enabled(CONFIG_XPA))
+			pte.pte_low |= _PAGE_SILENT_WRITE;
 		pte.pte_high |= _PAGE_SILENT_WRITE;
+	}
 	return pte;
 }
 
 static inline pte_t pte_mkdirty(pte_t pte)
 {
 	pte.pte_low |= _PAGE_MODIFIED;
-	if (pte.pte_low & _PAGE_WRITE)
+	if (pte.pte_low & _PAGE_WRITE) {
+		if (!config_enabled(CONFIG_XPA))
+			pte.pte_low |= _PAGE_SILENT_WRITE;
 		pte.pte_high |= _PAGE_SILENT_WRITE;
+	}
 	return pte;
 }
 
 static inline pte_t pte_mkyoung(pte_t pte)
 {
 	pte.pte_low |= _PAGE_ACCESSED;
-	if (pte.pte_low & _PAGE_READ)
+	if (!(pte.pte_low & _PAGE_NO_READ)) {
+		if (!config_enabled(CONFIG_XPA))
+			pte.pte_low |= _PAGE_SILENT_READ;
 		pte.pte_high |= _PAGE_SILENT_READ;
+	}
 	return pte;
 }
 #else
@@ -353,13 +417,8 @@
 static inline pte_t pte_mkyoung(pte_t pte)
 {
 	pte_val(pte) |= _PAGE_ACCESSED;
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 	if (!(pte_val(pte) & _PAGE_NO_READ))
 		pte_val(pte) |= _PAGE_SILENT_READ;
-	else
-#endif
-	if (pte_val(pte) & _PAGE_READ)
-		pte_val(pte) |= _PAGE_SILENT_READ;
 	return pte;
 }
 
@@ -411,7 +470,7 @@
  */
 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
 
-#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+#if defined(CONFIG_XPA)
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
 	pte.pte_low  &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
@@ -420,6 +479,15 @@
 	pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK;
 	return pte;
 }
+#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+	pte.pte_low  &= _PAGE_CHG_MASK;
+	pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
+	pte.pte_low  |= pgprot_val(newprot);
+	pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
+	return pte;
+}
 #else
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
@@ -430,15 +498,12 @@
 
 extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
 	pte_t pte);
-extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
-	pte_t pte);
 
 static inline void update_mmu_cache(struct vm_area_struct *vma,
 	unsigned long address, pte_t *ptep)
 {
 	pte_t pte = *ptep;
 	__update_tlb(vma, address, pte);
-	__update_cache(vma, address, pte);
 }
 
 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
@@ -542,13 +607,8 @@
 {
 	pmd_val(pmd) |= _PAGE_ACCESSED;
 
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 	if (!(pmd_val(pmd) & _PAGE_NO_READ))
 		pmd_val(pmd) |= _PAGE_SILENT_READ;
-	else
-#endif
-	if (pmd_val(pmd) & _PAGE_READ)
-		pmd_val(pmd) |= _PAGE_SILENT_READ;
 
 	return pmd;
 }
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 041153f..7e78b62 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -63,7 +63,11 @@
  * 8192EB ...
  */
 #define TASK_SIZE32	0x7fff8000UL
-#define TASK_SIZE64	0x10000000000UL
+#ifdef CONFIG_MIPS_VA_BITS_48
+#define TASK_SIZE64     (0x1UL << ((cpu_data[0].vmbits>48)?48:cpu_data[0].vmbits))
+#else
+#define TASK_SIZE64     0x10000000000UL
+#endif
 #define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
 #define STACK_TOP_MAX	TASK_SIZE64
 
@@ -355,6 +359,10 @@
  */
 extern void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp);
 
+static inline void flush_thread(void)
+{
+}
+
 unsigned long get_wchan(struct task_struct *p);
 
 #define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \
diff --git a/arch/mips/include/asm/seccomp.h b/arch/mips/include/asm/seccomp.h
index 1d8a2e2..684fb3a 100644
--- a/arch/mips/include/asm/seccomp.h
+++ b/arch/mips/include/asm/seccomp.h
@@ -2,27 +2,32 @@
 
 #include <linux/unistd.h>
 
-/*
- * Kludge alert:
- *
- * The generic seccomp code currently allows only a single compat ABI.	Until
- * this is fixed we priorize O32 as the compat ABI over N32.
- */
-#ifdef CONFIG_MIPS32_O32
+#ifdef CONFIG_COMPAT
+static inline const int *get_compat_mode1_syscalls(void)
+{
+	static const int syscalls_O32[] = {
+		__NR_O32_Linux + 3, __NR_O32_Linux + 4,
+		__NR_O32_Linux + 1, __NR_O32_Linux + 193,
+		0, /* null terminated */
+	};
+	static const int syscalls_N32[] = {
+		__NR_N32_Linux + 0, __NR_N32_Linux + 1,
+		__NR_N32_Linux + 58, __NR_N32_Linux + 211,
+		0, /* null terminated */
+	};
 
-#define __NR_seccomp_read_32		4003
-#define __NR_seccomp_write_32		4004
-#define __NR_seccomp_exit_32		4001
-#define __NR_seccomp_sigreturn_32	4193	/* rt_sigreturn */
+	if (config_enabled(CONFIG_MIPS32_O32) && test_thread_flag(TIF_32BIT_REGS))
+		return syscalls_O32;
 
-#elif defined(CONFIG_MIPS32_N32)
+	if (config_enabled(CONFIG_MIPS32_N32))
+		return syscalls_N32;
 
-#define __NR_seccomp_read_32		6000
-#define __NR_seccomp_write_32		6001
-#define __NR_seccomp_exit_32		6058
-#define __NR_seccomp_sigreturn_32	6211	/* rt_sigreturn */
+	BUG();
+}
 
-#endif /* CONFIG_MIPS32_O32 */
+#define get_compat_mode1_syscalls get_compat_mode1_syscalls
+
+#endif /* CONFIG_COMPAT */
 
 #include <asm-generic/seccomp.h>
 
diff --git a/arch/mips/include/asm/sgi/hpc3.h b/arch/mips/include/asm/sgi/hpc3.h
index 59920b3..4a9c990 100644
--- a/arch/mips/include/asm/sgi/hpc3.h
+++ b/arch/mips/include/asm/sgi/hpc3.h
@@ -147,7 +147,7 @@
 #define HPC3_EPCFG_P1	 0x000f /* Cycles to spend in P1 state for PIO */
 #define HPC3_EPCFG_P2	 0x00f0 /* Cycles to spend in P2 state for PIO */
 #define HPC3_EPCFG_P3	 0x0f00 /* Cycles to spend in P3 state for PIO */
-#define HPC3_EPCFG_TST	 0x1000 /* Diagnistic ram test feature bit */
+#define HPC3_EPCFG_TST	 0x1000 /* Diagnostic ram test feature bit */
 
 	u32 _unused2[0x1000/4 - 8];	/* padding */
 
diff --git a/arch/mips/include/asm/sgiarcs.h b/arch/mips/include/asm/sgiarcs.h
index 26ddfff..105a947 100644
--- a/arch/mips/include/asm/sgiarcs.h
+++ b/arch/mips/include/asm/sgiarcs.h
@@ -144,7 +144,7 @@
 struct linux_vdirent {
 	ULONG namelen;
 	unsigned char attr;
-	char fname[32]; /* XXX imperical, should be a define */
+	char fname[32]; /* XXX empirical, should be a define */
 };
 
 /* Other stuff for files. */
@@ -179,7 +179,7 @@
 	enum linux_devtypes   dtype;
 	unsigned long	      namelen;
 	unsigned char	      attr;
-	char		      name[32]; /* XXX imperical, should be define */
+	char		      name[32]; /* XXX empirical, should be define */
 };
 
 /* This describes the vector containing function pointers to the ARC
diff --git a/arch/mips/include/asm/sibyte/bcm1480_regs.h b/arch/mips/include/asm/sibyte/bcm1480_regs.h
index ec0dacf..32a8483 100644
--- a/arch/mips/include/asm/sibyte/bcm1480_regs.h
+++ b/arch/mips/include/asm/sibyte/bcm1480_regs.h
@@ -415,8 +415,8 @@
 					(cpu)*BCM1480_IMR_ALIAS_MAILBOX_SPACING)
 #define A_BCM1480_IMR_ALIAS_MAILBOX_REGISTER(cpu, reg) (A_BCM1480_IMR_ALIAS_MAILBOX(cpu)+(reg))
 
-#define R_BCM1480_IMR_ALIAS_MAILBOX_0		0x0000		/* 0x0x0 */
-#define R_BCM1480_IMR_ALIAS_MAILBOX_0_SET	0x0008		/* 0x0x8 */
+#define R_BCM1480_IMR_ALIAS_MAILBOX_0		0x0000
+#define R_BCM1480_IMR_ALIAS_MAILBOX_0_SET	0x0008
 
 /*
  * these macros work together to build the address of a mailbox
diff --git a/arch/mips/include/asm/signal.h b/arch/mips/include/asm/signal.h
index 003e273..2292373 100644
--- a/arch/mips/include/asm/signal.h
+++ b/arch/mips/include/asm/signal.h
@@ -11,11 +11,17 @@
 
 #include <uapi/asm/signal.h>
 
+#ifdef CONFIG_MIPS32_COMPAT
+extern struct mips_abi mips_abi_32;
 
-#ifdef CONFIG_TRAD_SIGNALS
-#define sig_uses_siginfo(ka)	((ka)->sa.sa_flags & SA_SIGINFO)
+#define sig_uses_siginfo(ka, abi)                               \
+	((abi != &mips_abi_32) ? 1 :                            \
+		((ka)->sa.sa_flags & SA_SIGINFO))
 #else
-#define sig_uses_siginfo(ka)	(1)
+#define sig_uses_siginfo(ka, abi)                               \
+	(config_enabled(CONFIG_64BIT) ? 1 :                     \
+		(config_enabled(CONFIG_TRAD_SIGNALS) ?          \
+			((ka)->sa.sa_flags & SA_SIGINFO) : 1) )
 #endif
 
 #include <asm/sigcontext.h>
diff --git a/arch/mips/include/asm/smp-cps.h b/arch/mips/include/asm/smp-cps.h
index 326c16e..2ae1f61 100644
--- a/arch/mips/include/asm/smp-cps.h
+++ b/arch/mips/include/asm/smp-cps.h
@@ -29,7 +29,7 @@
 extern void mips_cps_core_entry(void);
 extern void mips_cps_core_init(void);
 
-extern struct vpe_boot_config *mips_cps_boot_vpes(void);
+extern void mips_cps_boot_vpes(struct core_boot_config *cfg, unsigned vpe);
 
 extern void mips_cps_pm_save(void);
 extern void mips_cps_pm_restore(void);
diff --git a/arch/mips/include/asm/sn/ioc3.h b/arch/mips/include/asm/sn/ioc3.h
index e33f036..feb3851 100644
--- a/arch/mips/include/asm/sn/ioc3.h
+++ b/arch/mips/include/asm/sn/ioc3.h
@@ -355,7 +355,7 @@
 #define SSCR_PAUSE_STATE 0x40000000	/* sets when PAUSE takes effect */
 #define SSCR_RESET	0x80000000	/* reset DMA channels */
 
-/* all producer/comsumer pointers are the same bitfield */
+/* all producer/consumer pointers are the same bitfield */
 #define PROD_CONS_PTR_4K 0x00000ff8	/* for 4K buffers */
 #define PROD_CONS_PTR_1K 0x000003f8	/* for 1K buffers */
 #define PROD_CONS_PTR_OFF 3
diff --git a/arch/mips/include/asm/sn/sn0/hubio.h b/arch/mips/include/asm/sn/sn0/hubio.h
index 5998b13..57ece90 100644
--- a/arch/mips/include/asm/sn/sn0/hubio.h
+++ b/arch/mips/include/asm/sn/sn0/hubio.h
@@ -628,7 +628,7 @@
 /*
  * Values for field imsgtype
  */
-#define IIO_ICRB_IMSGT_XTALK	0	/* Incoming Meessage from Xtalk */
+#define IIO_ICRB_IMSGT_XTALK	0	/* Incoming Message from Xtalk */
 #define IIO_ICRB_IMSGT_BTE	1	/* Incoming message from BTE	*/
 #define IIO_ICRB_IMSGT_SN0NET	2	/* Incoming message from SN0 net */
 #define IIO_ICRB_IMSGT_CRB	3	/* Incoming message from CRB ???  */
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index 28b5d84a..ebb5c0f 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -105,7 +105,7 @@
 	__clear_software_ll_bit();					\
 	if (cpu_has_userlocal)						\
 		write_c0_userlocal(task_thread_info(next)->tp_value);	\
-	__restore_watch();						\
+	__restore_watch(next);						\
 	(last) = resume(prev, next, task_thread_info(next));		\
 } while (0)
 
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 095ecaf..7f109d4 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -95,7 +95,7 @@
 }
 
 /*
- * Is a address valid? This does a straighforward calculation rather
+ * Is a address valid? This does a straightforward calculation rather
  * than tests.
  *
  * Address valid if:
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index fc1cdd2..b6ecfee 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -171,7 +171,8 @@
 Ip_u3u1u2(_xor);
 Ip_u2u1u3(_xori);
 Ip_u2u1(_yield);
-
+Ip_u1u2(_ldpte);
+Ip_u2u1u3(_lddir);
 
 /* Handle labels. */
 struct uasm_label {
diff --git a/arch/mips/include/asm/watch.h b/arch/mips/include/asm/watch.h
index 20126ec..6ffe3ea 100644
--- a/arch/mips/include/asm/watch.h
+++ b/arch/mips/include/asm/watch.h
@@ -12,21 +12,21 @@
 
 #include <asm/mipsregs.h>
 
-void mips_install_watch_registers(void);
+void mips_install_watch_registers(struct task_struct *t);
 void mips_read_watch_registers(void);
 void mips_clear_watch_registers(void);
 void mips_probe_watch_registers(struct cpuinfo_mips *c);
 
 #ifdef CONFIG_HARDWARE_WATCHPOINTS
-#define __restore_watch() do {						\
+#define __restore_watch(task) do {					\
 	if (unlikely(test_bit(TIF_LOAD_WATCH,				\
-			      &current_thread_info()->flags))) {	\
-		mips_install_watch_registers();				\
+			      &task_thread_info(task)->flags))) {	\
+		mips_install_watch_registers(task);			\
 	}								\
 } while (0)
 
 #else
-#define __restore_watch() do {} while (0)
+#define __restore_watch(task) do {} while (0)
 #endif
 
 #endif /* _ASM_WATCH_H */
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index ddea53e..8051f9a 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -167,6 +167,7 @@
 	fceill_op    =	0x0a, ffloorl_op   =  0x0b,
 	fround_op    =	0x0c, ftrunc_op	   =  0x0d,
 	fceil_op     =	0x0e, ffloor_op	   =  0x0f,
+	fsel_op      =  0x10,
 	fmovc_op     =	0x11, fmovz_op	   =  0x12,
 	fmovn_op     =	0x13, fseleqz_op   =  0x14,
 	frecip_op    =  0x15, frsqrt_op    =  0x16,
@@ -204,6 +205,16 @@
 };
 
 /*
+ * func field for page table walker (Loongson-3).
+ */
+enum ptw_func {
+	lwdir_op = 0x00,
+	lwpte_op = 0x01,
+	lddir_op = 0x02,
+	ldpte_op = 0x03,
+};
+
+/*
  * func field for special3 lx opcodes (Cavium Octeon).
  */
 enum lx_func {
diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h
index cc49dc2..8069cf7 100644
--- a/arch/mips/include/uapi/asm/siginfo.h
+++ b/arch/mips/include/uapi/asm/siginfo.h
@@ -28,7 +28,7 @@
 
 #define __ARCH_SIGSYS
 
-#include <uapi/asm-generic/siginfo.h>
+#include <asm-generic/siginfo.h>
 
 /* We can't use generic siginfo_t, because our si_code and si_errno are swapped */
 typedef struct siginfo {
@@ -42,13 +42,13 @@
 
 		/* kill() */
 		struct {
-			pid_t _pid;		/* sender's pid */
+			__kernel_pid_t _pid;	/* sender's pid */
 			__ARCH_SI_UID_T _uid;	/* sender's uid */
 		} _kill;
 
 		/* POSIX.1b timers */
 		struct {
-			timer_t _tid;		/* timer id */
+			__kernel_timer_t _tid;	/* timer id */
 			int _overrun;		/* overrun count */
 			char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
 			sigval_t _sigval;	/* same as below */
@@ -57,26 +57,26 @@
 
 		/* POSIX.1b signals */
 		struct {
-			pid_t _pid;		/* sender's pid */
+			__kernel_pid_t _pid;	/* sender's pid */
 			__ARCH_SI_UID_T _uid;	/* sender's uid */
 			sigval_t _sigval;
 		} _rt;
 
 		/* SIGCHLD */
 		struct {
-			pid_t _pid;		/* which child */
+			__kernel_pid_t _pid;	/* which child */
 			__ARCH_SI_UID_T _uid;	/* sender's uid */
 			int _status;		/* exit code */
-			clock_t _utime;
-			clock_t _stime;
+			__kernel_clock_t _utime;
+			__kernel_clock_t _stime;
 		} _sigchld;
 
 		/* IRIX SIGCHLD */
 		struct {
-			pid_t _pid;		/* which child */
-			clock_t _utime;
+			__kernel_pid_t _pid;	/* which child */
+			__kernel_clock_t _utime;
 			int _status;		/* exit code */
-			clock_t _stime;
+			__kernel_clock_t _stime;
 		} _irix_sigchld;
 
 		/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
@@ -123,6 +123,4 @@
 #define SI_TIMER __SI_CODE(__SI_TIMER, -3) /* sent by timer expiration */
 #define SI_MESGQ __SI_CODE(__SI_MESGQ, -4) /* sent by real time mesq state change */
 
-#include <asm-generic/siginfo.h>
-
 #endif /* _UAPI_ASM_SIGINFO_H */
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 3129795..24ad815 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -381,16 +381,18 @@
 #define __NR_membarrier			(__NR_Linux + 358)
 #define __NR_mlock2			(__NR_Linux + 359)
 #define __NR_copy_file_range		(__NR_Linux + 360)
+#define __NR_preadv2			(__NR_Linux + 361)
+#define __NR_pwritev2			(__NR_Linux + 362)
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls		360
+#define __NR_Linux_syscalls		362
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux			4000
-#define __NR_O32_Linux_syscalls		360
+#define __NR_O32_Linux_syscalls		362
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
@@ -719,16 +721,18 @@
 #define __NR_membarrier			(__NR_Linux + 318)
 #define __NR_mlock2			(__NR_Linux + 319)
 #define __NR_copy_file_range		(__NR_Linux + 320)
+#define __NR_preadv2			(__NR_Linux + 321)
+#define __NR_pwritev2			(__NR_Linux + 322)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls		320
+#define __NR_Linux_syscalls		322
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux			5000
-#define __NR_64_Linux_syscalls		320
+#define __NR_64_Linux_syscalls		322
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
@@ -1061,15 +1065,17 @@
 #define __NR_membarrier			(__NR_Linux + 322)
 #define __NR_mlock2			(__NR_Linux + 323)
 #define __NR_copy_file_range		(__NR_Linux + 324)
+#define __NR_preadv2			(__NR_Linux + 325)
+#define __NR_pwritev2			(__NR_Linux + 326)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls		324
+#define __NR_Linux_syscalls		326
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux			6000
-#define __NR_N32_Linux_syscalls		324
+#define __NR_N32_Linux_syscalls		326
 
 #endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
index 934b15b..4e3f9b7a 100644
--- a/arch/mips/jz4740/board-qi_lb60.c
+++ b/arch/mips/jz4740/board-qi_lb60.c
@@ -39,8 +39,6 @@
 
 #include "clock.h"
 
-static bool is_avt2;
-
 /* GPIOs */
 #define QI_LB60_GPIO_SD_CD		JZ_GPIO_PORTD(0)
 #define QI_LB60_GPIO_SD_VCC_EN_N	JZ_GPIO_PORTD(2)
@@ -367,43 +365,12 @@
 	.power_active_low	= 1,
 };
 
-/* OHCI */
-static struct regulator_consumer_supply avt2_usb_regulator_consumer =
-	REGULATOR_SUPPLY("vbus", "jz4740-ohci");
-
-static struct regulator_init_data avt2_usb_regulator_init_data = {
-	.num_consumer_supplies = 1,
-	.consumer_supplies = &avt2_usb_regulator_consumer,
-	.constraints = {
-		.name = "USB power",
-		.min_uV = 5000000,
-		.max_uV = 5000000,
-		.valid_modes_mask = REGULATOR_MODE_NORMAL,
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-};
-
-static struct fixed_voltage_config avt2_usb_regulator_data = {
-	.supply_name = "USB power",
-	.microvolts = 5000000,
-	.gpio = JZ_GPIO_PORTB(17),
-	.init_data = &avt2_usb_regulator_init_data,
-};
-
-static struct platform_device avt2_usb_regulator_device = {
-	.name = "reg-fixed-voltage",
-	.id = -1,
-	.dev = {
-		.platform_data = &avt2_usb_regulator_data,
-	}
-};
-
+/* beeper */
 static struct pwm_lookup qi_lb60_pwm_lookup[] = {
 	PWM_LOOKUP("jz4740-pwm", 4, "pwm-beeper", NULL, 0,
 		   PWM_POLARITY_NORMAL),
 };
 
-/* beeper */
 static struct platform_device qi_lb60_pwm_beeper = {
 	.name = "pwm-beeper",
 	.id = -1,
@@ -487,11 +454,6 @@
 	spi_register_board_info(qi_lb60_spi_board_info,
 				ARRAY_SIZE(qi_lb60_spi_board_info));
 
-	if (is_avt2) {
-		platform_device_register(&avt2_usb_regulator_device);
-		platform_device_register(&jz4740_usb_ohci_device);
-	}
-
 	pwm_add_table(qi_lb60_pwm_lookup, ARRAY_SIZE(qi_lb60_pwm_lookup));
 
 	return platform_add_devices(jz_platform_devices,
@@ -499,19 +461,9 @@
 
 }
 
-static __init int board_avt2(char *str)
-{
-	qi_lb60_mmc_pdata.card_detect_active_low = 1;
-	is_avt2 = true;
-
-	return 1;
-}
-__setup("avt2", board_avt2);
-
 static int __init qi_lb60_board_setup(void)
 {
-	printk(KERN_INFO "Qi Hardware JZ4740 QI %s setup\n",
-		is_avt2 ? "AVT2" : "LB60");
+	printk(KERN_INFO "Qi Hardware JZ4740 QI LB60 setup\n");
 
 	board_gpio_setup();
 
diff --git a/arch/mips/jz4740/platform.c b/arch/mips/jz4740/platform.c
index e8a463b..2f1dab3 100644
--- a/arch/mips/jz4740/platform.c
+++ b/arch/mips/jz4740/platform.c
@@ -32,31 +32,6 @@
 
 #include "clock.h"
 
-/* OHCI controller */
-static struct resource jz4740_usb_ohci_resources[] = {
-	{
-		.start	= JZ4740_UHC_BASE_ADDR,
-		.end	= JZ4740_UHC_BASE_ADDR + 0x1000 - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	{
-		.start	= JZ4740_IRQ_UHC,
-		.end	= JZ4740_IRQ_UHC,
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-struct platform_device jz4740_usb_ohci_device = {
-	.name		= "jz4740-ohci",
-	.id		= -1,
-	.dev = {
-		.dma_mask = &jz4740_usb_ohci_device.dev.coherent_dma_mask,
-		.coherent_dma_mask = DMA_BIT_MASK(32),
-	},
-	.num_resources	= ARRAY_SIZE(jz4740_usb_ohci_resources),
-	.resource	= jz4740_usb_ohci_resources,
-};
-
 /* USB Device Controller */
 struct platform_device jz4740_udc_xceiv_device = {
 	.name = "usb_phy_generic",
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index b0988fd..e6053d0 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -44,7 +44,7 @@
 
 obj-$(CONFIG_SMP)		+= smp.o
 obj-$(CONFIG_SMP_UP)		+= smp-up.o
-obj-$(CONFIG_CPU_BMIPS)		+= smp-bmips.o bmips_vec.o
+obj-$(CONFIG_CPU_BMIPS)		+= smp-bmips.o bmips_vec.o bmips_5xxx_init.o
 
 obj-$(CONFIG_MIPS_MT)		+= mips-mt.o
 obj-$(CONFIG_MIPS_MT_FPAFF)	+= mips-mt-fpaff.o
@@ -83,6 +83,8 @@
 
 obj-$(CONFIG_GPIO_TXX9)		+= gpio_txx9.o
 
+obj-$(CONFIG_RELOCATABLE)	+= relocate.o
+
 obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o crash.o
 obj-$(CONFIG_CRASH_DUMP)	+= crash_dump.o
 obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 154e203..1ea973b 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -14,6 +14,7 @@
 #include <linux/mm.h>
 #include <linux/kbuild.h>
 #include <linux/suspend.h>
+#include <asm/cpu-info.h>
 #include <asm/pm.h>
 #include <asm/ptrace.h>
 #include <asm/processor.h>
@@ -338,6 +339,15 @@
 }
 #endif
 
+void output_cpuinfo_defines(void)
+{
+	COMMENT(" MIPS cpuinfo offsets. ");
+	DEFINE(CPUINFO_SIZE, sizeof(struct cpuinfo_mips));
+#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
+	OFFSET(CPUINFO_ASID_MASK, cpuinfo_mips, asid_mask);
+#endif
+}
+
 void output_kvm_defines(void)
 {
 	COMMENT(" KVM/MIPS Specfic offsets. ");
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
index 1b992c6..58ad63d 100644
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -30,21 +30,7 @@
 /*
  * This is used to ensure we don't load something for the wrong architecture.
  */
-#define elf_check_arch(hdr)						\
-({									\
-	int __res = 1;							\
-	struct elfhdr *__h = (hdr);					\
-									\
-	if (!mips_elf_check_machine(__h))				\
-		__res = 0;						\
-	if (__h->e_ident[EI_CLASS] != ELFCLASS32)			\
-		__res = 0;						\
-	if (((__h->e_flags & EF_MIPS_ABI2) == 0) ||			\
-	    ((__h->e_flags & EF_MIPS_ABI) != 0))			\
-		__res = 0;						\
-									\
-	__res;								\
-})
+#define elf_check_arch elfn32_check_arch
 
 #define TASK32_SIZE		0x7fff8000UL
 #undef ELF_ET_DYN_BASE
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index abd3aff..49fb881 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -28,39 +28,9 @@
 typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
 
 /*
- * In order to be sure that we don't attempt to execute an O32 binary which
- * requires 64 bit FP (FR=1) on a system which does not support it we refuse
- * to execute any binary which has bits specified by the following macro set
- * in its ELF header flags.
- */
-#ifdef CONFIG_MIPS_O32_FP64_SUPPORT
-# define __MIPS_O32_FP64_MUST_BE_ZERO	0
-#else
-# define __MIPS_O32_FP64_MUST_BE_ZERO	EF_MIPS_FP64
-#endif
-
-/*
  * This is used to ensure we don't load something for the wrong architecture.
  */
-#define elf_check_arch(hdr)						\
-({									\
-	int __res = 1;							\
-	struct elfhdr *__h = (hdr);					\
-									\
-	if (!mips_elf_check_machine(__h))				\
-		__res = 0;						\
-	if (__h->e_ident[EI_CLASS] != ELFCLASS32)			\
-		__res = 0;						\
-	if ((__h->e_flags & EF_MIPS_ABI2) != 0)				\
-		__res = 0;						\
-	if (((__h->e_flags & EF_MIPS_ABI) != 0) &&			\
-	    ((__h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32))		\
-		__res = 0;						\
-	if (__h->e_flags & __MIPS_O32_FP64_MUST_BE_ZERO)		\
-		__res = 0;						\
-									\
-	__res;								\
-})
+#define elf_check_arch elfo32_check_arch
 
 #ifdef CONFIG_KVM_GUEST
 #define TASK32_SIZE		0x3fff8000UL
diff --git a/arch/mips/kernel/bmips_5xxx_init.S b/arch/mips/kernel/bmips_5xxx_init.S
new file mode 100644
index 0000000..adaa82e
--- /dev/null
+++ b/arch/mips/kernel/bmips_5xxx_init.S
@@ -0,0 +1,753 @@
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011-2012 by Broadcom Corporation
+ *
+ * Init for bmips 5000.
+ * Used to init second core in dual core 5000's.
+ */
+
+#include <linux/init.h>
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/cacheops.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/addrspace.h>
+#include <asm/hazards.h>
+#include <asm/bmips.h>
+
+#ifdef CONFIG_CPU_BMIPS5000
+
+
+#define cacheop(kva, size, linesize, op) 	\
+	.set noreorder			;	\
+	addu		t1, kva, size	;	\
+	subu		t2, linesize, 1	;	\
+	not		t2		;	\
+	and		t0, kva, t2	;	\
+	addiu		t1, t1, -1	;	\
+	and		t1, t2		;	\
+9:	cache		op, 0(t0)	;	\
+	bne		t0, t1, 9b	;	\
+	 addu		t0, linesize	;	\
+	.set reorder			;
+
+
+
+#define	IS_SHIFT	22
+#define	IL_SHIFT	19
+#define	IA_SHIFT	16
+#define	DS_SHIFT	13
+#define	DL_SHIFT	10
+#define	DA_SHIFT	 7
+#define	IS_MASK		 7
+#define	IL_MASK		 7
+#define	IA_MASK		 7
+#define	DS_MASK		 7
+#define	DL_MASK		 7
+#define	DA_MASK		 7
+#define	ICE_MASK	0x80000000
+#define	DCE_MASK	0x40000000
+
+#define CP0_BRCM_CONFIG0	$22, 0
+#define CP0_BRCM_MODE		$22, 1
+#define	CP0_CONFIG_K0_MASK	7
+
+#define CP0_ICACHE_TAG_LO	$28
+#define CP0_ICACHE_DATA_LO	$28, 1
+#define CP0_DCACHE_TAG_LO	$28, 2
+#define CP0_D_SEC_CACHE_DATA_LO	$28, 3
+#define CP0_ICACHE_TAG_HI	$29
+#define CP0_ICACHE_DATA_HI	$29, 1
+#define CP0_DCACHE_TAG_HI	$29, 2
+
+#define CP0_BRCM_MODE_Luc_MASK		(1 << 11)
+#define	CP0_BRCM_CONFIG0_CWF_MASK	(1 << 20)
+#define	CP0_BRCM_CONFIG0_TSE_MASK	(1 << 19)
+#define CP0_BRCM_MODE_SET_MASK		(1 << 7)
+#define CP0_BRCM_MODE_ClkRATIO_MASK	(7 << 4)
+#define CP0_BRCM_MODE_BrPRED_MASK 	(3 << 24)
+#define CP0_BRCM_MODE_BrPRED_SHIFT	24
+#define CP0_BRCM_MODE_BrHIST_MASK 	(0x1f << 20)
+#define CP0_BRCM_MODE_BrHIST_SHIFT	20
+
+/* ZSC L2 Cache Register Access Register Definitions */
+#define BRCM_ZSC_ALL_REGS_SELECT		0x7 << 24
+
+#define BRCM_ZSC_CONFIG_REG			0 << 3
+#define BRCM_ZSC_REQ_BUFFER_REG			2 << 3
+#define BRCM_ZSC_RBUS_ADDR_MAPPING_REG0		4 << 3
+#define BRCM_ZSC_RBUS_ADDR_MAPPING_REG1		6 << 3
+#define BRCM_ZSC_RBUS_ADDR_MAPPING_REG2		8 << 3
+
+#define BRCM_ZSC_SCB0_ADDR_MAPPING_REG0		0xa << 3
+#define BRCM_ZSC_SCB0_ADDR_MAPPING_REG1		0xc << 3
+
+#define BRCM_ZSC_SCB1_ADDR_MAPPING_REG0		0xe << 3
+#define BRCM_ZSC_SCB1_ADDR_MAPPING_REG1		0x10 << 3
+
+#define BRCM_ZSC_CONFIG_LMB1En			1 << (15)
+#define BRCM_ZSC_CONFIG_LMB0En			1 << (14)
+
+/* branch predition values */
+
+#define BRCM_BrPRED_ALL_TAKEN		(0x0)
+#define BRCM_BrPRED_ALL_NOT_TAKEN	(0x1)
+#define BRCM_BrPRED_BHT_ENABLE		(0x2)
+#define BRCM_BrPRED_PREDICT_BACKWARD	(0x3)
+
+
+
+.align 2
+/*
+ * Function: 	size_i_cache
+ * Arguments: 	None
+ * Returns:	v0 = i cache size, v1 = I cache line size
+ * Description: compute the I-cache size and I-cache line size
+ * Trashes:	v0, v1, a0, t0
+ *
+ *	pseudo code:
+ *
+ */
+
+LEAF(size_i_cache)
+	.set	noreorder
+
+	mfc0	a0, CP0_CONFIG, 1
+	move	t0, a0
+
+	/*
+	 * Determine sets per way: IS
+	 *
+	 * This field contains the number of sets (i.e., indices) per way of
+	 * the instruction cache:
+	 * i) 0x0: 64, ii) 0x1: 128, iii) 0x2: 256, iv) 0x3: 512, v) 0x4: 1k
+	 * vi) 0x5 - 0x7: Reserved.
+	 */
+
+	srl	a0, a0, IS_SHIFT
+	and	a0, a0, IS_MASK
+
+	/* sets per way = (64<<IS) */
+
+	li	v0, 0x40
+	sllv	v0, v0, a0
+
+	/*
+	 * Determine line size
+	 *
+	 * This field contains the line size of the instruction cache:
+	 * i) 0x0: No I-cache present, i) 0x3: 16 bytes, ii) 0x4: 32 bytes, iii)
+	 * 0x5: 64 bytes, iv) the rest: Reserved.
+	 */
+
+	move	a0, t0
+
+	srl	a0, a0, IL_SHIFT
+	and	a0, a0, IL_MASK
+
+	beqz	a0, no_i_cache
+	nop
+
+	/* line size = 2 ^ (IL+1) */
+
+	addi	a0, a0, 1
+	li	v1, 1
+	sll	v1, v1, a0
+
+	/* v0 now have sets per way, multiply it by line size now
+	 * that will give the set size
+	 */
+
+	sll	v0, v0, a0
+
+	/*
+	 * Determine set associativity
+	 *
+	 * This field contains the set associativity of the instruction cache.
+	 * i) 0x0: Direct mapped, ii) 0x1: 2-way, iii) 0x2: 3-way, iv) 0x3:
+	 * 4-way, v) 0x4 - 0x7: Reserved.
+	 */
+
+	move	a0, t0
+
+	srl	a0, a0, IA_SHIFT
+	and	a0, a0, IA_MASK
+	addi	a0, a0, 0x1
+
+	/* v0 has the set size, multiply it by
+	 * set associativiy, to get the cache size
+	 */
+
+	multu	v0, a0	/*multu is interlocked, so no need to insert nops */
+	mflo	v0
+	b	1f
+	nop
+
+no_i_cache:
+	move	v0, zero
+	move	v1, zero
+1:
+	jr	ra
+	nop
+	.set	reorder
+
+END(size_i_cache)
+
+/*
+ * Function: 	size_d_cache
+ * Arguments: 	None
+ * Returns:	v0 = d cache size, v1 = d cache line size
+ * Description: compute the D-cache size and D-cache line size.
+ * Trashes:	v0, v1, a0, t0
+ *
+ */
+
+LEAF(size_d_cache)
+	.set	noreorder
+
+	mfc0	a0, CP0_CONFIG, 1
+	move	t0, a0
+
+	/*
+	 * Determine sets per way: IS
+	 *
+	 * This field contains the number of sets (i.e., indices) per way of
+	 * the instruction cache:
+	 * i) 0x0: 64, ii) 0x1: 128, iii) 0x2: 256, iv) 0x3: 512, v) 0x4: 1k
+	 * vi) 0x5 - 0x7: Reserved.
+	 */
+
+	srl	a0, a0, DS_SHIFT
+	and	a0, a0, DS_MASK
+
+	/* sets per way = (64<<IS) */
+
+	li	v0, 0x40
+	sllv	v0, v0, a0
+
+	/*
+	 * Determine line size
+	 *
+	 * This field contains the line size of the instruction cache:
+	 * i) 0x0: No I-cache present, i) 0x3: 16 bytes, ii) 0x4: 32 bytes, iii)
+	 * 0x5: 64 bytes, iv) the rest: Reserved.
+	 */
+	move	a0, t0
+
+	srl	a0, a0, DL_SHIFT
+	and	a0, a0, DL_MASK
+
+	beqz	a0, no_d_cache
+	nop
+
+	/* line size = 2 ^ (IL+1) */
+
+	addi	a0, a0, 1
+	li	v1, 1
+	sll	v1, v1, a0
+
+	/* v0 now have sets per way, multiply it by line size now
+	 * that will give the set size
+	 */
+
+	sll	v0, v0, a0
+
+	/* determine set associativity
+	 *
+	 * This field contains the set associativity of the instruction cache.
+	 * i) 0x0: Direct mapped, ii) 0x1: 2-way, iii) 0x2: 3-way, iv) 0x3:
+	 * 4-way, v) 0x4 - 0x7: Reserved.
+	 */
+
+	move	a0, t0
+
+	srl	a0, a0, DA_SHIFT
+	and	a0, a0, DA_MASK
+	addi	a0, a0, 0x1
+
+	/* v0 has the set size, multiply it by
+	 * set associativiy, to get the cache size
+	 */
+
+	multu	v0, a0	/*multu is interlocked, so no need to insert nops */
+	mflo	v0
+
+	b	1f
+	nop
+
+no_d_cache:
+	move	v0, zero
+	move	v1, zero
+1:
+	jr	ra
+	nop
+	.set	reorder
+
+END(size_d_cache)
+
+
+/*
+ * Function: enable_ID
+ * Arguments: 	None
+ * Returns:	None
+ * Description: Enable I and D caches, initialize I and D-caches, also set
+ *		hardware delay for d-cache (TP0).
+ * Trashes:	t0
+ *
+ */
+	.global	enable_ID
+	.ent	enable_ID
+	.set	noreorder
+enable_ID:
+	mfc0	t0, CP0_BRCM_CONFIG0
+	or	t0, t0, (ICE_MASK | DCE_MASK)
+	mtc0	t0, CP0_BRCM_CONFIG0
+	jr	ra
+	nop
+
+	.end	enable_ID
+	.set	reorder
+
+
+/*
+ * Function: l1_init
+ * Arguments: 	None
+ * Returns:	None
+ * Description: Enable I and D caches, and initialize I and D-caches
+ * Trashes:	a0, v0, v1, t0, t1, t2, t8
+ *
+ */
+	.globl	l1_init
+	.ent	l1_init
+	.set	noreorder
+l1_init:
+
+	/* save return address */
+	move	t8, ra
+
+
+	/* initialize I and D cache Data and Tag registers.  */
+	mtc0	zero, CP0_ICACHE_TAG_LO
+	mtc0	zero, CP0_ICACHE_TAG_HI
+	mtc0	zero, CP0_ICACHE_DATA_LO
+	mtc0	zero, CP0_ICACHE_DATA_HI
+	mtc0	zero, CP0_DCACHE_TAG_LO
+	mtc0	zero, CP0_DCACHE_TAG_HI
+
+	/* Enable Caches before Clearing. If the caches are disabled
+	 * then the cache operations to clear the cache will be ignored
+	 */
+
+	jal	enable_ID
+	nop
+
+	jal	size_i_cache	/* v0 = i-cache size, v1 = i-cache line size */
+	nop
+
+	/* run uncached in kseg 1 */
+	la	k0, 1f
+	lui	k1, 0x2000
+	or	k0, k1, k0
+	jr	k0
+	nop
+1:
+
+	/*
+	 * set K0 cache mode
+	 */
+
+	mfc0	t0, CP0_CONFIG
+	and	t0, t0, ~CP0_CONFIG_K0_MASK
+	or	t0, t0, 3	/* Write Back mode */
+	mtc0	t0, CP0_CONFIG
+
+	/*
+	 * Initialize instruction cache.
+	 */
+
+	li	a0, KSEG0
+	cacheop(a0, v0, v1, Index_Store_Tag_I)
+
+	/*
+	 * Now we can run from I-$, kseg 0
+	 */
+	la	k0, 1f
+	lui	k1, 0x2000
+	or	k0, k1, k0
+	xor	k0, k1, k0
+	jr	k0
+	nop
+1:
+	/*
+	 * Initialize data cache.
+	 */
+
+	jal	size_d_cache	/* v0 = d-cache size, v1 = d-cache line size */
+	nop
+
+
+	li	a0, KSEG0
+	cacheop(a0, v0, v1, Index_Store_Tag_D)
+
+	jr	t8
+	nop
+
+	.end 	l1_init
+	.set	reorder
+
+
+/*
+ * Function: 	set_other_config
+ * Arguments:	none
+ * Returns:	None
+ * Description: initialize other remainder configuration to defaults.
+ * Trashes:	t0, t1
+ *
+ *	pseudo code:
+ *
+ */
+LEAF(set_other_config)
+	.set noreorder
+
+	/* enable Bus error for I-fetch */
+	mfc0	t0, CP0_CACHEERR, 0
+	li	t1, 0x4
+	or	t0, t1
+	mtc0	t0, CP0_CACHEERR, 0
+
+	/* enable Bus error for Load */
+	mfc0	t0, CP0_CACHEERR, 1
+	li	t1, 0x4
+	or	t0, t1
+	mtc0	t0, CP0_CACHEERR, 1
+
+	/* enable Bus Error for Store */
+	mfc0	t0, CP0_CACHEERR, 2
+	li	t1, 0x4
+	or	t0, t1
+	mtc0	t0, CP0_CACHEERR, 2
+
+	jr	ra
+	nop
+	.set reorder
+END(set_other_config)
+
+/*
+ * Function: 	set_branch_pred
+ * Arguments:	none
+ * Returns:	None
+ * Description:
+ * Trashes:	t0, t1
+ *
+ *	pseudo code:
+ *
+ */
+
+LEAF(set_branch_pred)
+	.set noreorder
+	mfc0	t0, CP0_BRCM_MODE
+	li	t1, ~(CP0_BRCM_MODE_BrPRED_MASK | CP0_BRCM_MODE_BrHIST_MASK )
+	and	t0, t0, t1
+
+	/* enable Branch prediction */
+	li	t1, BRCM_BrPRED_BHT_ENABLE
+	sll	t1, CP0_BRCM_MODE_BrPRED_SHIFT
+	or	t0, t0, t1
+
+	/* set history count to 8 */
+	li	t1, 8
+	sll	t1, CP0_BRCM_MODE_BrHIST_SHIFT
+	or	t0, t0, t1
+
+	mtc0	t0, CP0_BRCM_MODE
+	jr	ra
+	nop
+	.set	reorder
+END(set_branch_pred)
+
+
+/*
+ * Function: 	set_luc
+ * Arguments:	set link uncached.
+ * Returns:	None
+ * Description:
+ * Trashes:	t0, t1
+ *
+ */
+LEAF(set_luc)
+	.set noreorder
+	mfc0	t0, CP0_BRCM_MODE
+	li	t1, ~(CP0_BRCM_MODE_Luc_MASK)
+	and	t0, t0, t1
+
+	/* set Luc */
+	ori	t0, t0, CP0_BRCM_MODE_Luc_MASK
+
+	mtc0	t0, CP0_BRCM_MODE
+	jr	ra
+	nop
+	.set	reorder
+END(set_luc)
+
+/*
+ * Function: 	set_cwf_tse
+ * Arguments:	set CWF and TSE bits
+ * Returns:	None
+ * Description:
+ * Trashes:	t0, t1
+ *
+ */
+LEAF(set_cwf_tse)
+	.set noreorder
+	mfc0	t0, CP0_BRCM_CONFIG0
+	li	t1, (CP0_BRCM_CONFIG0_CWF_MASK | CP0_BRCM_CONFIG0_TSE_MASK)
+	or	t0, t0, t1
+
+	mtc0	t0, CP0_BRCM_CONFIG0
+	jr	ra
+	nop
+	.set	reorder
+END(set_cwf_tse)
+
+/*
+ * Function: 	set_clock_ratio
+ * Arguments:	set clock ratio specified by a0
+ * Returns:	None
+ * Description:
+ * Trashes:	v0, v1, a0, a1
+ *
+ *	pseudo code:
+ *
+ */
+LEAF(set_clock_ratio)
+	.set noreorder
+
+	mfc0	t0, CP0_BRCM_MODE
+	li	t1, ~(CP0_BRCM_MODE_SET_MASK | CP0_BRCM_MODE_ClkRATIO_MASK)
+	and	t0, t0, t1
+	li	t1, CP0_BRCM_MODE_SET_MASK
+	or	t0, t0, t1
+	or	t0, t0, a0
+	mtc0	t0, CP0_BRCM_MODE
+	jr	ra
+	nop
+	.set	reorder
+END(set_clock_ratio)
+/*
+ * Function: set_zephyr
+ * Arguments:	None
+ * Returns:	None
+ * Description: Set any zephyr bits
+ * Trashes:	t0 & t1
+ *
+ */
+LEAF(set_zephyr)
+	.set	noreorder
+
+	/* enable read/write of CP0 #22 sel. 8 */
+	li	t0, 0x5a455048
+	.word	0x4088b00f	/* mtc0    t0, $22, 15 */
+
+	.word	0x4008b008	/* mfc0    t0, $22, 8 */
+	li	t1, 0x09008000	/* turn off pref, jtb */
+	or	t0, t0, t1
+	.word	0x4088b008	/* mtc0    t0, $22, 8 */
+	sync
+
+	/* disable read/write of CP0 #22 sel 8 */
+	li	t0, 0x0
+	.word	0x4088b00f	/* mtc0    t0, $22, 15 */
+
+
+	jr	ra
+	nop
+	.set reorder
+
+END(set_zephyr)
+
+
+/*
+ * Function:	set_llmb
+ * Arguments:	a0=0 disable llmb, a0=1 enables llmb
+ * Returns:	None
+ * Description:
+ * Trashes:	t0, t1, t2
+ *
+ *      pseudo code:
+ *
+ */
+LEAF(set_llmb)
+	.set noreorder
+
+	li	t2, 0x90000000 | BRCM_ZSC_ALL_REGS_SELECT | BRCM_ZSC_CONFIG_REG
+	sync
+	cache	0x7, 0x0(t2)
+	sync
+	mfc0	t0, CP0_D_SEC_CACHE_DATA_LO
+	li	t1, ~(BRCM_ZSC_CONFIG_LMB1En | BRCM_ZSC_CONFIG_LMB0En)
+	and	t0, t0, t1
+
+	beqz	a0, svlmb
+	nop
+
+enable_lmb:
+	li	t1, (BRCM_ZSC_CONFIG_LMB1En | BRCM_ZSC_CONFIG_LMB0En)
+	or	t0, t0, t1
+
+svlmb:
+	mtc0	t0, CP0_D_SEC_CACHE_DATA_LO
+	sync
+	cache	0xb, 0x0(t2)
+	sync
+
+	jr	ra
+	nop
+	.set reorder
+
+END(set_llmb)
+/*
+ * Function: 	core_init
+ * Arguments:	none
+ * Returns:	None
+ * Description: initialize core related configuration
+ * Trashes:	v0,v1,a0,a1,t8
+ *
+ *	pseudo code:
+ *
+ */
+	.globl	core_init
+	.ent	core_init
+	.set	noreorder
+core_init:
+	move	t8, ra
+
+	/* set Zephyr bits. */
+	bal	set_zephyr
+	nop
+
+#if ENABLE_FPU==1
+	/* initialize the Floating point unit (both TPs) */
+	bal	init_fpu
+	nop
+#endif
+
+	/* set low latency memory bus */
+	li	a0, 1
+	bal	set_llmb
+	nop
+
+	/* set branch prediction (TP0 only) */
+	bal	set_branch_pred
+	nop
+
+	/* set link uncached */
+	bal	set_luc
+	nop
+
+	/* set CWF and TSE */
+	bal	set_cwf_tse
+	nop
+
+	/*
+	 *set clock ratio by setting 1 to 'set'
+	 * and 0 to ClkRatio, (TP0 only)
+	 */
+	li	a0, 0
+	bal	set_clock_ratio
+	nop
+
+	/* set other configuration to defaults */
+	bal	set_other_config
+	nop
+
+	move	ra, t8
+	jr	ra
+	nop
+
+	.set reorder
+	.end	core_init
+
+/*
+ * Function: 	clear_jump_target_buffer
+ * Arguments:	None
+ * Returns:	None
+ * Description:
+ * Trashes:	t0, t1, t2
+ *
+ */
+#define RESET_CALL_RETURN_STACK_THIS_THREAD		(0x06<<16)
+#define RESET_JUMP_TARGET_BUFFER_THIS_THREAD		(0x04<<16)
+#define JTB_CS_CNTL_MASK				(0xFF<<16)
+
+	.globl	clear_jump_target_buffer
+	.ent	clear_jump_target_buffer
+	.set	noreorder
+clear_jump_target_buffer:
+
+	mfc0	t0, $22, 2
+	nop
+	nop
+
+	li	t1, ~JTB_CS_CNTL_MASK
+	and	t0, t0, t1
+	li	t2, RESET_CALL_RETURN_STACK_THIS_THREAD
+	or	t0, t0, t2
+	mtc0	t0, $22, 2
+	nop
+	nop
+
+	and	t0, t0, t1
+	li	t2, RESET_JUMP_TARGET_BUFFER_THIS_THREAD
+	or	t0, t0, t2
+	mtc0	t0, $22, 2
+	nop
+	nop
+	jr	ra
+	nop
+
+	.end	clear_jump_target_buffer
+	.set	reorder
+/*
+ * Function: 	bmips_cache_init
+ * Arguments: 	None
+ * Returns:	None
+ * Description: Enable I and D caches, and initialize I and D-caches
+ * Trashes:	v0, v1, t0, t1, t2, t5, t7, t8
+ *
+ */
+	.globl	bmips_5xxx_init
+	.ent	bmips_5xxx_init
+	.set	noreorder
+bmips_5xxx_init:
+
+	/* save return address and A0 */
+	move	t7, ra
+	move	t5, a0
+
+	jal	l1_init
+	nop
+
+	jal	core_init
+	nop
+
+	jal	clear_jump_target_buffer
+	nop
+
+	mtc0	zero, CP0_CAUSE
+
+	move 	a0, t5
+	jr	t7
+	nop
+
+	.end 	bmips_5xxx_init
+	.set	reorder
+
+
+#endif
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S
index 8649507..921a5fa 100644
--- a/arch/mips/kernel/bmips_vec.S
+++ b/arch/mips/kernel/bmips_vec.S
@@ -88,12 +88,13 @@
 	li	k1, (1 << 19)
 	mfc0	k0, CP0_STATUS
 	and	k0, k1
-	beqz	k0, bmips_smp_entry
+	beqz	k0, soft_reset
 
 #if defined(CONFIG_CPU_BMIPS5000)
 	mfc0	k0, CP0_PRID
 	li	k1, PRID_IMP_BMIPS5000
-	andi	k0, 0xff00
+	/* mask with PRID_IMP_BMIPS5000 to cover both variants */
+	andi	k0, PRID_IMP_BMIPS5000
 	bne	k0, k1, 1f
 
 	/* if we're not on core 0, this must be the SMP boot signal */
@@ -125,13 +126,48 @@
 	.set	arch=r4000
 	eret
 
+#ifdef CONFIG_SMP
+soft_reset:
+
+#if defined(CONFIG_CPU_BMIPS5000)
+	mfc0	k0, CP0_PRID
+	andi	k0, 0xff00
+	li	k1, PRID_IMP_BMIPS5200
+	bne	k0, k1, bmips_smp_entry
+
+        /* if running on TP 1, jump  to  bmips_smp_entry */
+        mfc0    k0, $22
+        li      k1, (1 << 24)
+        and     k1, k0
+        bnez    k1, bmips_smp_entry
+        nop
+
+        /*
+         * running on TP0, can not be core 0 (the boot core).
+         * Check for soft reset.  Indicates a warm boot
+         */
+        mfc0    k0, $12
+        li      k1, (1 << 20)
+        and     k0, k1
+        beqz    k0, bmips_smp_entry
+
+        /*
+         * Warm boot.
+         * Cache init is only done on TP0
+         */
+        la      k0, bmips_5xxx_init
+        jalr    k0
+        nop
+
+        b       bmips_smp_entry
+        nop
+#endif
+
 /***********************************************************************
  * CPU1 reset vector (used for the initial boot only)
  * This is still part of bmips_reset_nmi_vec().
  ***********************************************************************/
 
-#ifdef CONFIG_SMP
-
 bmips_smp_entry:
 
 	/* set up CP0 STATUS; enable FPU */
@@ -166,10 +202,12 @@
 2:
 #endif /* CONFIG_CPU_BMIPS4350 || CONFIG_CPU_BMIPS4380 */
 #if defined(CONFIG_CPU_BMIPS5000)
-	/* set exception vector base */
+	/* mask with PRID_IMP_BMIPS5000 to cover both variants */
 	li	k1, PRID_IMP_BMIPS5000
+	andi	k0, PRID_IMP_BMIPS5000
 	bne	k0, k1, 3f
 
+	/* set exception vector base */
 	la	k0, ebase
 	lw	k0, 0(k0)
 	mtc0	k0, $15, 1
@@ -263,6 +301,8 @@
 #endif /* CONFIG_CPU_BMIPS4380 */
 #if defined(CONFIG_CPU_BMIPS5000)
 	li	t1, PRID_IMP_BMIPS5000
+	/* mask with PRID_IMP_BMIPS5000 to cover both variants */
+	andi	t2, PRID_IMP_BMIPS5000
 	bne	t2, t1, 2f
 
 	mfc0	t0, $22, 5
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index d8f9b35..ceca6cc 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -688,21 +688,9 @@
 			}
 			lose_fpu(1);    /* Save FPU state for the emulator. */
 			reg = insn.i_format.rt;
-			bit = 0;
-			switch (insn.i_format.rs) {
-			case bc1eqz_op:
-				/* Test bit 0 */
-				if (get_fpr32(&current->thread.fpu.fpr[reg], 0)
-				    & 0x1)
-					bit = 1;
-				break;
-			case bc1nez_op:
-				/* Test bit 0 */
-				if (!(get_fpr32(&current->thread.fpu.fpr[reg], 0)
-				      & 0x1))
-					bit = 1;
-				break;
-			}
+			bit = get_fpr32(&current->thread.fpu.fpr[reg], 0) & 0x1;
+			if (insn.i_format.rs == bc1eqz_op)
+				bit = !bit;
 			own_fpu(1);
 			if (bit)
 				epc = epc + 4 +
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 8dfe6a6..e4c21bb 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -28,6 +28,83 @@
 	return res;
 }
 
+/**
+ * calculate_min_delta() - Calculate a good minimum delta for mips_next_event().
+ *
+ * Running under virtualisation can introduce overhead into mips_next_event() in
+ * the form of hypervisor emulation of CP0_Count/CP0_Compare registers,
+ * potentially with an unnatural frequency, which makes a fixed min_delta_ns
+ * value inappropriate as it may be too small.
+ *
+ * It can also introduce occasional latency from the guest being descheduled.
+ *
+ * This function calculates a good minimum delta based roughly on the 75th
+ * percentile of the time taken to do the mips_next_event() sequence, in order
+ * to handle potentially higher overhead while also eliminating outliers due to
+ * unpredictable hypervisor latency (which can be handled by retries).
+ *
+ * Return:	An appropriate minimum delta for the clock event device.
+ */
+static unsigned int calculate_min_delta(void)
+{
+	unsigned int cnt, i, j, k, l;
+	unsigned int buf1[4], buf2[3];
+	unsigned int min_delta;
+
+	/*
+	 * Calculate the median of 5 75th percentiles of 5 samples of how long
+	 * it takes to set CP0_Compare = CP0_Count + delta.
+	 */
+	for (i = 0; i < 5; ++i) {
+		for (j = 0; j < 5; ++j) {
+			/*
+			 * This is like the code in mips_next_event(), and
+			 * directly measures the borderline "safe" delta.
+			 */
+			cnt = read_c0_count();
+			write_c0_compare(cnt);
+			cnt = read_c0_count() - cnt;
+
+			/* Sorted insert into buf1 */
+			for (k = 0; k < j; ++k) {
+				if (cnt < buf1[k]) {
+					l = min_t(unsigned int,
+						  j, ARRAY_SIZE(buf1) - 1);
+					for (; l > k; --l)
+						buf1[l] = buf1[l - 1];
+					break;
+				}
+			}
+			if (k < ARRAY_SIZE(buf1))
+				buf1[k] = cnt;
+		}
+
+		/* Sorted insert of 75th percentile into buf2 */
+		for (k = 0; k < i; ++k) {
+			if (buf1[ARRAY_SIZE(buf1) - 1] < buf2[k]) {
+				l = min_t(unsigned int,
+					  i, ARRAY_SIZE(buf2) - 1);
+				for (; l > k; --l)
+					buf2[l] = buf2[l - 1];
+				break;
+			}
+		}
+		if (k < ARRAY_SIZE(buf2))
+			buf2[k] = buf1[ARRAY_SIZE(buf1) - 1];
+	}
+
+	/* Use 2 * median of 75th percentiles */
+	min_delta = buf2[ARRAY_SIZE(buf2) - 1] * 2;
+
+	/* Don't go too low */
+	if (min_delta < 0x300)
+		min_delta = 0x300;
+
+	pr_debug("%s: median 75th percentile=%#x, min_delta=%#x\n",
+		 __func__, buf2[ARRAY_SIZE(buf2) - 1], min_delta);
+	return min_delta;
+}
+
 DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
 int cp0_timer_irq_installed;
 
@@ -177,7 +254,7 @@
 {
 	unsigned int cpu = smp_processor_id();
 	struct clock_event_device *cd;
-	unsigned int irq;
+	unsigned int irq, min_delta;
 
 	if (!cpu_has_counter || !mips_hpt_frequency)
 		return -ENXIO;
@@ -203,7 +280,8 @@
 
 	/* Calculate the min / max delta */
 	cd->max_delta_ns	= clockevent_delta2ns(0x7fffffff, cd);
-	cd->min_delta_ns	= clockevent_delta2ns(0x300, cd);
+	min_delta		= calculate_min_delta();
+	cd->min_delta_ns	= clockevent_delta2ns(min_delta, cd);
 
 	cd->rating		= 300;
 	cd->irq			= irq;
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index ac81edd..51b98dc 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -18,9 +18,12 @@
 #include <asm/mipsmtregs.h>
 #include <asm/pm.h>
 
+#define GCR_CPC_BASE_OFS	0x0088
 #define GCR_CL_COHERENCE_OFS	0x2008
 #define GCR_CL_ID_OFS		0x2028
 
+#define CPC_CL_VC_RUN_OFS	0x2028
+
 .extern mips_cm_base
 
 .set noreorder
@@ -60,6 +63,37 @@
 	 nop
 	.endm
 
+	/*
+	 * Set dest to non-zero if the core supports MIPSr6 multithreading
+	 * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then
+	 * branch to nomt.
+	 */
+	.macro	has_vp	dest, nomt
+	mfc0	\dest, CP0_CONFIG, 1
+	bgez	\dest, \nomt
+	 mfc0	\dest, CP0_CONFIG, 2
+	bgez	\dest, \nomt
+	 mfc0	\dest, CP0_CONFIG, 3
+	bgez	\dest, \nomt
+	 mfc0	\dest, CP0_CONFIG, 4
+	bgez	\dest, \nomt
+	 mfc0	\dest, CP0_CONFIG, 5
+	andi	\dest, \dest, MIPS_CONF5_VP
+	beqz	\dest, \nomt
+	 nop
+	.endm
+
+	/* Calculate an uncached address for the CM GCRs */
+	.macro	cmgcrb	dest
+	.set	push
+	.set	noat
+	MFC0	$1, CP0_CMGCRBASE
+	PTR_SLL	$1, $1, 4
+	PTR_LI	\dest, UNCAC_BASE
+	PTR_ADDU \dest, \dest, $1
+	.set	pop
+	.endm
+
 .section .text.cps-vec
 .balign 0x1000
 
@@ -90,120 +124,64 @@
 	li	t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS
 	mtc0	t0, CP0_STATUS
 
-	/*
-	 * Clear the bits used to index the caches. Note that the architecture
-	 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
-	 * be valid for all MIPS32 CPUs, even those for which said writes are
-	 * unnecessary.
-	 */
-	mtc0	zero, CP0_TAGLO, 0
-	mtc0	zero, CP0_TAGHI, 0
-	mtc0	zero, CP0_TAGLO, 2
-	mtc0	zero, CP0_TAGHI, 2
-	ehb
-
-	/* Primary cache configuration is indicated by Config1 */
-	mfc0	v0, CP0_CONFIG, 1
-
-	/* Detect I-cache line size */
-	_EXT	t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
-	beqz	t0, icache_done
-	 li	t1, 2
-	sllv	t0, t1, t0
-
-	/* Detect I-cache size */
-	_EXT	t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
-	xori	t2, t1, 0x7
-	beqz	t2, 1f
-	 li	t3, 32
-	addiu	t1, t1, 1
-	sllv	t1, t3, t1
-1:	/* At this point t1 == I-cache sets per way */
-	_EXT	t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
-	addiu	t2, t2, 1
-	mul	t1, t1, t0
-	mul	t1, t1, t2
-
-	li	a0, CKSEG0
-	PTR_ADD	a1, a0, t1
-1:	cache	Index_Store_Tag_I, 0(a0)
-	PTR_ADD	a0, a0, t0
-	bne	a0, a1, 1b
+	/* Skip cache & coherence setup if we're already coherent */
+	cmgcrb	v1
+	lw	s7, GCR_CL_COHERENCE_OFS(v1)
+	bnez	s7, 1f
 	 nop
-icache_done:
 
-	/* Detect D-cache line size */
-	_EXT	t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
-	beqz	t0, dcache_done
-	 li	t1, 2
-	sllv	t0, t1, t0
-
-	/* Detect D-cache size */
-	_EXT	t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
-	xori	t2, t1, 0x7
-	beqz	t2, 1f
-	 li	t3, 32
-	addiu	t1, t1, 1
-	sllv	t1, t3, t1
-1:	/* At this point t1 == D-cache sets per way */
-	_EXT	t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
-	addiu	t2, t2, 1
-	mul	t1, t1, t0
-	mul	t1, t1, t2
-
-	li	a0, CKSEG0
-	PTR_ADDU a1, a0, t1
-	PTR_SUBU a1, a1, t0
-1:	cache	Index_Store_Tag_D, 0(a0)
-	bne	a0, a1, 1b
-	 PTR_ADD a0, a0, t0
-dcache_done:
-
-	/* Set Kseg0 CCA to that in s0 */
-	mfc0	t0, CP0_CONFIG
-	ori	t0, 0x7
-	xori	t0, 0x7
-	or	t0, t0, s0
-	mtc0	t0, CP0_CONFIG
-	ehb
-
-	/* Calculate an uncached address for the CM GCRs */
-	MFC0	v1, CP0_CMGCRBASE
-	PTR_SLL	v1, v1, 4
-	PTR_LI	t0, UNCAC_BASE
-	PTR_ADDU v1, v1, t0
+	/* Initialize the L1 caches */
+	jal	mips_cps_cache_init
+	 nop
 
 	/* Enter the coherent domain */
 	li	t0, 0xff
 	sw	t0, GCR_CL_COHERENCE_OFS(v1)
 	ehb
 
+	/* Set Kseg0 CCA to that in s0 */
+1:	mfc0	t0, CP0_CONFIG
+	ori	t0, 0x7
+	xori	t0, 0x7
+	or	t0, t0, s0
+	mtc0	t0, CP0_CONFIG
+	ehb
+
 	/* Jump to kseg0 */
 	PTR_LA	t0, 1f
 	jr	t0
 	 nop
 
 	/*
-	 * We're up, cached & coherent. Perform any further required core-level
-	 * initialisation.
+	 * We're up, cached & coherent. Perform any EVA initialization necessary
+	 * before we access memory.
 	 */
-1:	jal	mips_cps_core_init
+1:	eva_init
+
+	/* Retrieve boot configuration pointers */
+	jal	mips_cps_get_bootcfg
 	 nop
 
-	/* Do any EVA initialization if necessary */
-	eva_init
+	/* Skip core-level init if we started up coherent */
+	bnez	s7, 1f
+	 nop
+
+	/* Perform any further required core-level initialisation */
+	jal	mips_cps_core_init
+	 nop
 
 	/*
 	 * Boot any other VPEs within this core that should be online, and
 	 * deactivate this VPE if it should be offline.
 	 */
+	move	a1, t9
 	jal	mips_cps_boot_vpes
-	 nop
+	 move	a0, v0
 
 	/* Off we go! */
-	PTR_L	t1, VPEBOOTCFG_PC(v0)
-	PTR_L	gp, VPEBOOTCFG_GP(v0)
-	PTR_L	sp, VPEBOOTCFG_SP(v0)
+1:	PTR_L	t1, VPEBOOTCFG_PC(v1)
+	PTR_L	gp, VPEBOOTCFG_GP(v1)
+	PTR_L	sp, VPEBOOTCFG_SP(v1)
 	jr	t1
 	 nop
 	END(mips_cps_core_entry)
@@ -245,7 +223,6 @@
 
 .org 0x480
 LEAF(excep_ejtag)
-	DUMP_EXCEP("EJTAG")
 	PTR_LA	k0, ejtag_debug_handler
 	jr	k0
 	 nop
@@ -323,22 +300,35 @@
 	 nop
 	END(mips_cps_core_init)
 
-LEAF(mips_cps_boot_vpes)
-	/* Retrieve CM base address */
-	PTR_LA	t0, mips_cm_base
-	PTR_L	t0, 0(t0)
-
+/**
+ * mips_cps_get_bootcfg() - retrieve boot configuration pointers
+ *
+ * Returns: pointer to struct core_boot_config in v0, pointer to
+ *          struct vpe_boot_config in v1, VPE ID in t9
+ */
+LEAF(mips_cps_get_bootcfg)
 	/* Calculate a pointer to this cores struct core_boot_config */
+	cmgcrb	t0
 	lw	t0, GCR_CL_ID_OFS(t0)
 	li	t1, COREBOOTCFG_SIZE
 	mul	t0, t0, t1
 	PTR_LA	t1, mips_cps_core_bootcfg
 	PTR_L	t1, 0(t1)
-	PTR_ADDU t0, t0, t1
+	PTR_ADDU v0, t0, t1
 
 	/* Calculate this VPEs ID. If the core doesn't support MT use 0 */
 	li	t9, 0
-#ifdef CONFIG_MIPS_MT_SMP
+#if defined(CONFIG_CPU_MIPSR6)
+	has_vp	ta2, 1f
+
+	/*
+	 * Assume non-contiguous numbering. Perhaps some day we'll need
+	 * to handle contiguous VP numbering, but no such systems yet
+	 * exist.
+	 */
+	mfc0	t9, $3, 1
+	andi	t9, t9, 0xff
+#elif defined(CONFIG_MIPS_MT_SMP)
 	has_mt	ta2, 1f
 
 	/* Find the number of VPEs present in the core */
@@ -362,22 +352,43 @@
 
 1:	/* Calculate a pointer to this VPEs struct vpe_boot_config */
 	li	t1, VPEBOOTCFG_SIZE
-	mul	v0, t9, t1
-	PTR_L	ta3, COREBOOTCFG_VPECONFIG(t0)
-	PTR_ADDU v0, v0, ta3
+	mul	v1, t9, t1
+	PTR_L	ta3, COREBOOTCFG_VPECONFIG(v0)
+	PTR_ADDU v1, v1, ta3
 
-#ifdef CONFIG_MIPS_MT_SMP
-
-	/* If the core doesn't support MT then return */
-	bnez	ta2, 1f
-	 nop
 	jr	ra
 	 nop
+	END(mips_cps_get_bootcfg)
+
+LEAF(mips_cps_boot_vpes)
+	PTR_L	ta2, COREBOOTCFG_VPEMASK(a0)
+	PTR_L	ta3, COREBOOTCFG_VPECONFIG(a0)
+
+#if defined(CONFIG_CPU_MIPSR6)
+
+	has_vp	t0, 5f
+
+	/* Find base address of CPC */
+	cmgcrb	t3
+	PTR_L	t1, GCR_CPC_BASE_OFS(t3)
+	PTR_LI	t2, ~0x7fff
+	and	t1, t1, t2
+	PTR_LI	t2, UNCAC_BASE
+	PTR_ADD	t1, t1, t2
+
+	/* Set VC_RUN to the VPE mask */
+	PTR_S	ta2, CPC_CL_VC_RUN_OFS(t1)
+	ehb
+
+#elif defined(CONFIG_MIPS_MT)
 
 	.set	push
 	.set	mt
 
-1:	/* Enter VPE configuration state */
+	/* If the core doesn't support MT then return */
+	has_mt	t0, 5f
+
+	/* Enter VPE configuration state */
 	dvpe
 	PTR_LA	t1, 1f
 	jr.hb	t1
@@ -388,7 +399,6 @@
 	ehb
 
 	/* Loop through each VPE */
-	PTR_L	ta2, COREBOOTCFG_VPEMASK(t0)
 	move	t8, ta2
 	li	ta1, 0
 
@@ -465,7 +475,7 @@
 
 	/* Check whether this VPE is meant to be running */
 	li	t0, 1
-	sll	t0, t0, t9
+	sll	t0, t0, a1
 	and	t0, t0, t8
 	bnez	t0, 2f
 	 nop
@@ -482,10 +492,84 @@
 #endif /* CONFIG_MIPS_MT_SMP */
 
 	/* Return */
-	jr	ra
+5:	jr	ra
 	 nop
 	END(mips_cps_boot_vpes)
 
+LEAF(mips_cps_cache_init)
+	/*
+	 * Clear the bits used to index the caches. Note that the architecture
+	 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
+	 * be valid for all MIPS32 CPUs, even those for which said writes are
+	 * unnecessary.
+	 */
+	mtc0	zero, CP0_TAGLO, 0
+	mtc0	zero, CP0_TAGHI, 0
+	mtc0	zero, CP0_TAGLO, 2
+	mtc0	zero, CP0_TAGHI, 2
+	ehb
+
+	/* Primary cache configuration is indicated by Config1 */
+	mfc0	v0, CP0_CONFIG, 1
+
+	/* Detect I-cache line size */
+	_EXT	t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
+	beqz	t0, icache_done
+	 li	t1, 2
+	sllv	t0, t1, t0
+
+	/* Detect I-cache size */
+	_EXT	t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
+	xori	t2, t1, 0x7
+	beqz	t2, 1f
+	 li	t3, 32
+	addiu	t1, t1, 1
+	sllv	t1, t3, t1
+1:	/* At this point t1 == I-cache sets per way */
+	_EXT	t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
+	addiu	t2, t2, 1
+	mul	t1, t1, t0
+	mul	t1, t1, t2
+
+	li	a0, CKSEG0
+	PTR_ADD	a1, a0, t1
+1:	cache	Index_Store_Tag_I, 0(a0)
+	PTR_ADD	a0, a0, t0
+	bne	a0, a1, 1b
+	 nop
+icache_done:
+
+	/* Detect D-cache line size */
+	_EXT	t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
+	beqz	t0, dcache_done
+	 li	t1, 2
+	sllv	t0, t1, t0
+
+	/* Detect D-cache size */
+	_EXT	t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
+	xori	t2, t1, 0x7
+	beqz	t2, 1f
+	 li	t3, 32
+	addiu	t1, t1, 1
+	sllv	t1, t3, t1
+1:	/* At this point t1 == D-cache sets per way */
+	_EXT	t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
+	addiu	t2, t2, 1
+	mul	t1, t1, t0
+	mul	t1, t1, t2
+
+	li	a0, CKSEG0
+	PTR_ADDU a1, a0, t1
+	PTR_SUBU a1, a1, t0
+1:	cache	Index_Store_Tag_D, 0(a0)
+	bne	a0, a1, 1b
+	 PTR_ADD a0, a0, t0
+dcache_done:
+
+	jr	ra
+	 nop
+	END(mips_cps_cache_init)
+
 #if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
 
 	/* Calculate a pointer to this CPUs struct mips_static_suspend_state */
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index b725b71..5ac5c3e 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -539,6 +539,7 @@
 	switch (c->cputype) {
 	case CPU_PROAPTIV:
 	case CPU_P5600:
+	case CPU_P6600:
 		/* proAptiv & related cores use Config6 to enable the FTLB */
 		config = read_c0_config6();
 		/* Clear the old probability value */
@@ -561,6 +562,19 @@
 		write_c0_config7(config | (calculate_ftlb_probability(c)
 					   << MIPS_CONF7_FTLBP_SHIFT));
 		break;
+	case CPU_LOONGSON3:
+		/* Flush ITLB, DTLB, VTLB and FTLB */
+		write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB |
+			      LOONGSON_DIAG_VTLB | LOONGSON_DIAG_FTLB);
+		/* Loongson-3 cores use Config6 to enable the FTLB */
+		config = read_c0_config6();
+		if (enable)
+			/* Enable FTLB */
+			write_c0_config6(config & ~MIPS_CONF6_FTLBDIS);
+		else
+			/* Disable FTLB */
+			write_c0_config6(config | MIPS_CONF6_FTLBDIS);
+		break;
 	default:
 		return 1;
 	}
@@ -634,6 +648,8 @@
 
 	if (config1 & MIPS_CONF1_MD)
 		c->ases |= MIPS_ASE_MDMX;
+	if (config1 & MIPS_CONF1_PC)
+		c->options |= MIPS_CPU_PERF;
 	if (config1 & MIPS_CONF1_WR)
 		c->options |= MIPS_CPU_WATCH;
 	if (config1 & MIPS_CONF1_CA)
@@ -673,18 +689,25 @@
 
 	if (config3 & MIPS_CONF3_SM) {
 		c->ases |= MIPS_ASE_SMARTMIPS;
-		c->options |= MIPS_CPU_RIXI;
+		c->options |= MIPS_CPU_RIXI | MIPS_CPU_CTXTC;
 	}
 	if (config3 & MIPS_CONF3_RXI)
 		c->options |= MIPS_CPU_RIXI;
+	if (config3 & MIPS_CONF3_CTXTC)
+		c->options |= MIPS_CPU_CTXTC;
 	if (config3 & MIPS_CONF3_DSP)
 		c->ases |= MIPS_ASE_DSP;
-	if (config3 & MIPS_CONF3_DSP2P)
+	if (config3 & MIPS_CONF3_DSP2P) {
 		c->ases |= MIPS_ASE_DSP2P;
+		if (cpu_has_mips_r6)
+			c->ases |= MIPS_ASE_DSP3;
+	}
 	if (config3 & MIPS_CONF3_VINT)
 		c->options |= MIPS_CPU_VINT;
 	if (config3 & MIPS_CONF3_VEIC)
 		c->options |= MIPS_CPU_VEIC;
+	if (config3 & MIPS_CONF3_LPA)
+		c->options |= MIPS_CPU_LPA;
 	if (config3 & MIPS_CONF3_MT)
 		c->ases |= MIPS_ASE_MIPSMT;
 	if (config3 & MIPS_CONF3_ULRI)
@@ -695,6 +718,10 @@
 		c->ases |= MIPS_ASE_VZ;
 	if (config3 & MIPS_CONF3_SC)
 		c->options |= MIPS_CPU_SEGMENTS;
+	if (config3 & MIPS_CONF3_BI)
+		c->options |= MIPS_CPU_BADINSTR;
+	if (config3 & MIPS_CONF3_BP)
+		c->options |= MIPS_CPU_BADINSTRP;
 	if (config3 & MIPS_CONF3_MSA)
 		c->ases |= MIPS_ASE_MSA;
 	if (config3 & MIPS_CONF3_PW) {
@@ -715,6 +742,7 @@
 	unsigned int newcf4;
 	unsigned int mmuextdef;
 	unsigned int ftlb_page = MIPS_CONF4_FTLBPAGESIZE;
+	unsigned long asid_mask;
 
 	config4 = read_c0_config4();
 
@@ -773,7 +801,20 @@
 		}
 	}
 
-	c->kscratch_mask = (config4 >> 16) & 0xff;
+	c->kscratch_mask = (config4 & MIPS_CONF4_KSCREXIST)
+				>> MIPS_CONF4_KSCREXIST_SHIFT;
+
+	asid_mask = MIPS_ENTRYHI_ASID;
+	if (config4 & MIPS_CONF4_AE)
+		asid_mask |= MIPS_ENTRYHI_ASIDX;
+	set_cpu_asid_mask(c, asid_mask);
+
+	/*
+	 * Warn if the computed ASID mask doesn't match the mask the kernel
+	 * is built for. This may indicate either a serious problem or an
+	 * easy optimisation opportunity, but either way should be addressed.
+	 */
+	WARN_ON(asid_mask != cpu_asid_mask(c));
 
 	return config4 & MIPS_CONF_M;
 }
@@ -796,6 +837,8 @@
 	if (config5 & MIPS_CONF5_MVH)
 		c->options |= MIPS_CPU_XPA;
 #endif
+	if (cpu_has_mips_r6 && (config5 & MIPS_CONF5_VP))
+		c->options |= MIPS_CPU_VP;
 
 	return config5 & MIPS_CONF_M;
 }
@@ -826,17 +869,43 @@
 	if (ok)
 		ok = decode_config5(c);
 
-	mips_probe_watch_registers(c);
+	/* Probe the EBase.WG bit */
+	if (cpu_has_mips_r2_r6) {
+		u64 ebase;
+		unsigned int status;
 
-	if (cpu_has_rixi) {
-		/* Enable the RIXI exceptions */
-		set_c0_pagegrain(PG_IEC);
-		back_to_back_c0_hazard();
-		/* Verify the IEC bit is set */
-		if (read_c0_pagegrain() & PG_IEC)
-			c->options |= MIPS_CPU_RIXIEX;
+		/* {read,write}_c0_ebase_64() may be UNDEFINED prior to r6 */
+		ebase = cpu_has_mips64r6 ? read_c0_ebase_64()
+					 : (s32)read_c0_ebase();
+		if (ebase & MIPS_EBASE_WG) {
+			/* WG bit already set, we can avoid the clumsy probe */
+			c->options |= MIPS_CPU_EBASE_WG;
+		} else {
+			/* Its UNDEFINED to change EBase while BEV=0 */
+			status = read_c0_status();
+			write_c0_status(status | ST0_BEV);
+			irq_enable_hazard();
+			/*
+			 * On pre-r6 cores, this may well clobber the upper bits
+			 * of EBase. This is hard to avoid without potentially
+			 * hitting UNDEFINED dm*c0 behaviour if EBase is 32-bit.
+			 */
+			if (cpu_has_mips64r6)
+				write_c0_ebase_64(ebase | MIPS_EBASE_WG);
+			else
+				write_c0_ebase(ebase | MIPS_EBASE_WG);
+			back_to_back_c0_hazard();
+			/* Restore BEV */
+			write_c0_status(status);
+			if (read_c0_ebase() & MIPS_EBASE_WG) {
+				c->options |= MIPS_CPU_EBASE_WG;
+				write_c0_ebase(ebase);
+			}
+		}
 	}
 
+	mips_probe_watch_registers(c);
+
 #ifndef CONFIG_MIPS_CPS
 	if (cpu_has_mips_r2_r6) {
 		c->core = get_ebase_cpunum();
@@ -846,6 +915,235 @@
 #endif
 }
 
+/*
+ * Probe for certain guest capabilities by writing config bits and reading back.
+ * Finally write back the original value.
+ */
+#define probe_gc0_config(name, maxconf, bits)				\
+do {									\
+	unsigned int tmp;						\
+	tmp = read_gc0_##name();					\
+	write_gc0_##name(tmp | (bits));					\
+	back_to_back_c0_hazard();					\
+	maxconf = read_gc0_##name();					\
+	write_gc0_##name(tmp);						\
+} while (0)
+
+/*
+ * Probe for dynamic guest capabilities by changing certain config bits and
+ * reading back to see if they change. Finally write back the original value.
+ */
+#define probe_gc0_config_dyn(name, maxconf, dynconf, bits)		\
+do {									\
+	maxconf = read_gc0_##name();					\
+	write_gc0_##name(maxconf ^ (bits));				\
+	back_to_back_c0_hazard();					\
+	dynconf = maxconf ^ read_gc0_##name();				\
+	write_gc0_##name(maxconf);					\
+	maxconf |= dynconf;						\
+} while (0)
+
+static inline unsigned int decode_guest_config0(struct cpuinfo_mips *c)
+{
+	unsigned int config0;
+
+	probe_gc0_config(config, config0, MIPS_CONF_M);
+
+	if (config0 & MIPS_CONF_M)
+		c->guest.conf |= BIT(1);
+	return config0 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_guest_config1(struct cpuinfo_mips *c)
+{
+	unsigned int config1, config1_dyn;
+
+	probe_gc0_config_dyn(config1, config1, config1_dyn,
+			     MIPS_CONF_M | MIPS_CONF1_PC | MIPS_CONF1_WR |
+			     MIPS_CONF1_FP);
+
+	if (config1 & MIPS_CONF1_FP)
+		c->guest.options |= MIPS_CPU_FPU;
+	if (config1_dyn & MIPS_CONF1_FP)
+		c->guest.options_dyn |= MIPS_CPU_FPU;
+
+	if (config1 & MIPS_CONF1_WR)
+		c->guest.options |= MIPS_CPU_WATCH;
+	if (config1_dyn & MIPS_CONF1_WR)
+		c->guest.options_dyn |= MIPS_CPU_WATCH;
+
+	if (config1 & MIPS_CONF1_PC)
+		c->guest.options |= MIPS_CPU_PERF;
+	if (config1_dyn & MIPS_CONF1_PC)
+		c->guest.options_dyn |= MIPS_CPU_PERF;
+
+	if (config1 & MIPS_CONF_M)
+		c->guest.conf |= BIT(2);
+	return config1 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_guest_config2(struct cpuinfo_mips *c)
+{
+	unsigned int config2;
+
+	probe_gc0_config(config2, config2, MIPS_CONF_M);
+
+	if (config2 & MIPS_CONF_M)
+		c->guest.conf |= BIT(3);
+	return config2 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_guest_config3(struct cpuinfo_mips *c)
+{
+	unsigned int config3, config3_dyn;
+
+	probe_gc0_config_dyn(config3, config3, config3_dyn,
+			     MIPS_CONF_M | MIPS_CONF3_MSA | MIPS_CONF3_CTXTC);
+
+	if (config3 & MIPS_CONF3_CTXTC)
+		c->guest.options |= MIPS_CPU_CTXTC;
+	if (config3_dyn & MIPS_CONF3_CTXTC)
+		c->guest.options_dyn |= MIPS_CPU_CTXTC;
+
+	if (config3 & MIPS_CONF3_PW)
+		c->guest.options |= MIPS_CPU_HTW;
+
+	if (config3 & MIPS_CONF3_SC)
+		c->guest.options |= MIPS_CPU_SEGMENTS;
+
+	if (config3 & MIPS_CONF3_BI)
+		c->guest.options |= MIPS_CPU_BADINSTR;
+	if (config3 & MIPS_CONF3_BP)
+		c->guest.options |= MIPS_CPU_BADINSTRP;
+
+	if (config3 & MIPS_CONF3_MSA)
+		c->guest.ases |= MIPS_ASE_MSA;
+	if (config3_dyn & MIPS_CONF3_MSA)
+		c->guest.ases_dyn |= MIPS_ASE_MSA;
+
+	if (config3 & MIPS_CONF_M)
+		c->guest.conf |= BIT(4);
+	return config3 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_guest_config4(struct cpuinfo_mips *c)
+{
+	unsigned int config4;
+
+	probe_gc0_config(config4, config4,
+			 MIPS_CONF_M | MIPS_CONF4_KSCREXIST);
+
+	c->guest.kscratch_mask = (config4 & MIPS_CONF4_KSCREXIST)
+				>> MIPS_CONF4_KSCREXIST_SHIFT;
+
+	if (config4 & MIPS_CONF_M)
+		c->guest.conf |= BIT(5);
+	return config4 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_guest_config5(struct cpuinfo_mips *c)
+{
+	unsigned int config5, config5_dyn;
+
+	probe_gc0_config_dyn(config5, config5, config5_dyn,
+			 MIPS_CONF_M | MIPS_CONF5_MRP);
+
+	if (config5 & MIPS_CONF5_MRP)
+		c->guest.options |= MIPS_CPU_MAAR;
+	if (config5_dyn & MIPS_CONF5_MRP)
+		c->guest.options_dyn |= MIPS_CPU_MAAR;
+
+	if (config5 & MIPS_CONF5_LLB)
+		c->guest.options |= MIPS_CPU_RW_LLB;
+
+	if (config5 & MIPS_CONF_M)
+		c->guest.conf |= BIT(6);
+	return config5 & MIPS_CONF_M;
+}
+
+static inline void decode_guest_configs(struct cpuinfo_mips *c)
+{
+	unsigned int ok;
+
+	ok = decode_guest_config0(c);
+	if (ok)
+		ok = decode_guest_config1(c);
+	if (ok)
+		ok = decode_guest_config2(c);
+	if (ok)
+		ok = decode_guest_config3(c);
+	if (ok)
+		ok = decode_guest_config4(c);
+	if (ok)
+		decode_guest_config5(c);
+}
+
+static inline void cpu_probe_guestctl0(struct cpuinfo_mips *c)
+{
+	unsigned int guestctl0, temp;
+
+	guestctl0 = read_c0_guestctl0();
+
+	if (guestctl0 & MIPS_GCTL0_G0E)
+		c->options |= MIPS_CPU_GUESTCTL0EXT;
+	if (guestctl0 & MIPS_GCTL0_G1)
+		c->options |= MIPS_CPU_GUESTCTL1;
+	if (guestctl0 & MIPS_GCTL0_G2)
+		c->options |= MIPS_CPU_GUESTCTL2;
+	if (!(guestctl0 & MIPS_GCTL0_RAD)) {
+		c->options |= MIPS_CPU_GUESTID;
+
+		/*
+		 * Probe for Direct Root to Guest (DRG). Set GuestCtl1.RID = 0
+		 * first, otherwise all data accesses will be fully virtualised
+		 * as if they were performed by guest mode.
+		 */
+		write_c0_guestctl1(0);
+		tlbw_use_hazard();
+
+		write_c0_guestctl0(guestctl0 | MIPS_GCTL0_DRG);
+		back_to_back_c0_hazard();
+		temp = read_c0_guestctl0();
+
+		if (temp & MIPS_GCTL0_DRG) {
+			write_c0_guestctl0(guestctl0);
+			c->options |= MIPS_CPU_DRG;
+		}
+	}
+}
+
+static inline void cpu_probe_guestctl1(struct cpuinfo_mips *c)
+{
+	if (cpu_has_guestid) {
+		/* determine the number of bits of GuestID available */
+		write_c0_guestctl1(MIPS_GCTL1_ID);
+		back_to_back_c0_hazard();
+		c->guestid_mask = (read_c0_guestctl1() & MIPS_GCTL1_ID)
+						>> MIPS_GCTL1_ID_SHIFT;
+		write_c0_guestctl1(0);
+	}
+}
+
+static inline void cpu_probe_gtoffset(struct cpuinfo_mips *c)
+{
+	/* determine the number of bits of GTOffset available */
+	write_c0_gtoffset(0xffffffff);
+	back_to_back_c0_hazard();
+	c->gtoffset_mask = read_c0_gtoffset();
+	write_c0_gtoffset(0);
+}
+
+static inline void cpu_probe_vz(struct cpuinfo_mips *c)
+{
+	cpu_probe_guestctl0(c);
+	if (cpu_has_guestctl1)
+		cpu_probe_guestctl1(c);
+
+	cpu_probe_gtoffset(c);
+
+	decode_guest_configs(c);
+}
+
 #define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \
 		| MIPS_CPU_COUNTER)
 
@@ -1172,7 +1470,7 @@
 			set_isa(c, MIPS_CPU_ISA_III);
 			c->fpu_msk31 |= FPU_CSR_CONDX;
 			break;
-		case PRID_REV_LOONGSON3A:
+		case PRID_REV_LOONGSON3A_R1:
 			c->cputype = CPU_LOONGSON3;
 			__cpu_name[cpu] = "ICT Loongson-3";
 			set_elf_platform(cpu, "loongson3a");
@@ -1314,6 +1612,10 @@
 		c->cputype = CPU_P5600;
 		__cpu_name[cpu] = "MIPS P5600";
 		break;
+	case PRID_IMP_P6600:
+		c->cputype = CPU_P6600;
+		__cpu_name[cpu] = "MIPS P6600";
+		break;
 	case PRID_IMP_I6400:
 		c->cputype = CPU_I6400;
 		__cpu_name[cpu] = "MIPS I6400";
@@ -1322,6 +1624,10 @@
 		c->cputype = CPU_M5150;
 		__cpu_name[cpu] = "MIPS M5150";
 		break;
+	case PRID_IMP_M6250:
+		c->cputype = CPU_M6250;
+		__cpu_name[cpu] = "MIPS M6250";
+		break;
 	}
 
 	decode_configs(c);
@@ -1435,6 +1741,7 @@
 			c->cputype = CPU_BMIPS4380;
 			__cpu_name[cpu] = "Broadcom BMIPS4380";
 			set_elf_platform(cpu, "bmips4380");
+			c->options |= MIPS_CPU_RIXI;
 		} else {
 			c->cputype = CPU_BMIPS4350;
 			__cpu_name[cpu] = "Broadcom BMIPS4350";
@@ -1445,9 +1752,12 @@
 	case PRID_IMP_BMIPS5000:
 	case PRID_IMP_BMIPS5200:
 		c->cputype = CPU_BMIPS5000;
-		__cpu_name[cpu] = "Broadcom BMIPS5000";
+		if ((c->processor_id & PRID_IMP_MASK) == PRID_IMP_BMIPS5200)
+			__cpu_name[cpu] = "Broadcom BMIPS5200";
+		else
+			__cpu_name[cpu] = "Broadcom BMIPS5000";
 		set_elf_platform(cpu, "bmips5000");
-		c->options |= MIPS_CPU_ULRI;
+		c->options |= MIPS_CPU_ULRI | MIPS_CPU_RIXI;
 		break;
 	}
 }
@@ -1481,6 +1791,8 @@
 		set_elf_platform(cpu, "octeon2");
 		break;
 	case PRID_IMP_CAVIUM_CN70XX:
+	case PRID_IMP_CAVIUM_CN73XX:
+	case PRID_IMP_CAVIUM_CNF75XX:
 	case PRID_IMP_CAVIUM_CN78XX:
 		c->cputype = CPU_CAVIUM_OCTEON3;
 		__cpu_name[cpu] = "Cavium Octeon III";
@@ -1493,6 +1805,29 @@
 	}
 }
 
+static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
+{
+	switch (c->processor_id & PRID_IMP_MASK) {
+	case PRID_IMP_LOONGSON_64:  /* Loongson-2/3 */
+		switch (c->processor_id & PRID_REV_MASK) {
+		case PRID_REV_LOONGSON3A_R2:
+			c->cputype = CPU_LOONGSON3;
+			__cpu_name[cpu] = "ICT Loongson-3";
+			set_elf_platform(cpu, "loongson3a");
+			set_isa(c, MIPS_CPU_ISA_M64R2);
+			break;
+		}
+
+		decode_configs(c);
+		c->options |= MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
+		c->writecombine = _CACHE_UNCACHED_ACCELERATED;
+		break;
+	default:
+		panic("Unknown Loongson Processor ID!");
+		break;
+	}
+}
+
 static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
 {
 	decode_configs(c);
@@ -1640,6 +1975,9 @@
 	case PRID_COMP_CAVIUM:
 		cpu_probe_cavium(c, cpu);
 		break;
+	case PRID_COMP_LOONGSON:
+		cpu_probe_loongson(c, cpu);
+		break;
 	case PRID_COMP_INGENIC_D0:
 	case PRID_COMP_INGENIC_D1:
 	case PRID_COMP_INGENIC_E1:
@@ -1660,6 +1998,15 @@
 	 */
 	BUG_ON(current_cpu_type() != c->cputype);
 
+	if (cpu_has_rixi) {
+		/* Enable the RIXI exceptions */
+		set_c0_pagegrain(PG_IEC);
+		back_to_back_c0_hazard();
+		/* Verify the IEC bit is set */
+		if (read_c0_pagegrain() & PG_IEC)
+			c->options |= MIPS_CPU_RIXIEX;
+	}
+
 	if (mips_fpu_disabled)
 		c->options &= ~MIPS_CPU_FPU;
 
@@ -1699,6 +2046,9 @@
 		elf_hwcap |= HWCAP_MIPS_MSA;
 	}
 
+	if (cpu_has_vz)
+		cpu_probe_vz(c);
+
 	cpu_probe_vmbits(c);
 
 #ifdef CONFIG_64BIT
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
index d434d5d..610f0f3 100644
--- a/arch/mips/kernel/crash.c
+++ b/arch/mips/kernel/crash.c
@@ -14,12 +14,22 @@
 static cpumask_t cpus_in_crash = CPU_MASK_NONE;
 
 #ifdef CONFIG_SMP
-static void crash_shutdown_secondary(void *ignore)
+static void crash_shutdown_secondary(void *passed_regs)
 {
-	struct pt_regs *regs;
+	struct pt_regs *regs = passed_regs;
 	int cpu = smp_processor_id();
 
-	regs = task_pt_regs(current);
+	/*
+	 * If we are passed registers, use those.  Otherwise get the
+	 * regs from the last interrupt, which should be correct, as
+	 * we are in an interrupt.  But if the regs are not there,
+	 * pull them from the top of the stack.  They are probably
+	 * wrong, but we need something to keep from crashing again.
+	 */
+	if (!regs)
+		regs = get_irq_regs();
+	if (!regs)
+		regs = task_pt_regs(current);
 
 	if (!cpu_online(cpu))
 		return;
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index baa7b6f..17326a9 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -130,7 +130,7 @@
 	/* end of rollback region (the region size must be power of two) */
 1:
 	jr	ra
-	nop
+	 nop
 	.set	pop
 	END(__r4k_wait)
 
@@ -172,7 +172,7 @@
 	mfc0	k0, CP0_EPC
 	.set	noreorder
 	j	k0
-	rfe
+	 rfe
 #else
 	and	k0, ST0_IE
 	bnez	k0, 1f
@@ -189,7 +189,7 @@
 	LONG_L	s0, TI_REGS($28)
 	LONG_S	sp, TI_REGS($28)
 	PTR_LA	ra, ret_from_irq
-	PTR_LA  v0, plat_irq_dispatch
+	PTR_LA	v0, plat_irq_dispatch
 	jr	v0
 #ifdef CONFIG_CPU_MICROMIPS
 	nop
@@ -292,7 +292,7 @@
 	MFC0	k0, CP0_DESAVE
 	.set	mips32
 	deret
-	.set pop
+	.set	pop
 	END(ejtag_debug_handler)
 
 /*
@@ -329,10 +329,10 @@
 	 * Clear BEV - required for page fault exception handler to work
 	 */
 	mfc0	k0, CP0_STATUS
-	ori     k0, k0, ST0_EXL
+	ori	k0, k0, ST0_EXL
 	li	k1, ~(ST0_BEV | ST0_ERL)
-	and     k0, k0, k1
-	mtc0    k0, CP0_STATUS
+	and	k0, k0, k1
+	mtc0	k0, CP0_STATUS
 	_ehb
 	SAVE_ALL
 	move	a0, sp
@@ -396,7 +396,7 @@
 
 	.macro	__BUILD_count exception
 	LONG_L	t0,exception_count_\exception
-	LONG_ADDIU t0, 1
+	LONG_ADDIU	t0, 1
 	LONG_S	t0,exception_count_\exception
 	.comm	exception_count\exception, 8, 8
 	.endm
@@ -455,10 +455,10 @@
 	.set	noreorder
 	/* check if TLB contains a entry for EPC */
 	MFC0	k1, CP0_ENTRYHI
-	andi	k1, 0xff	/* ASID_MASK */
+	andi	k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX
 	MFC0	k0, CP0_EPC
-	PTR_SRL k0, _PAGE_SHIFT + 1
-	PTR_SLL k0, _PAGE_SHIFT + 1
+	PTR_SRL	k0, _PAGE_SHIFT + 1
+	PTR_SLL	k0, _PAGE_SHIFT + 1
 	or	k1, k0
 	MTC0	k1, CP0_ENTRYHI
 	mtc0_tlbw_hazard
@@ -478,27 +478,27 @@
 	/* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
 	MFC0	k1, CP0_EPC
 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
-	and     k0, k1, 1
-	beqz    k0, 1f
-	xor     k1, k0
-	lhu     k0, (k1)
-	lhu     k1, 2(k1)
-	ins     k1, k0, 16, 16
-	lui     k0, 0x007d
-	b       docheck
-	ori     k0, 0x6b3c
+	and	k0, k1, 1
+	beqz	k0, 1f
+	 xor	k1, k0
+	lhu	k0, (k1)
+	lhu	k1, 2(k1)
+	ins	k1, k0, 16, 16
+	lui	k0, 0x007d
+	b	docheck
+	 ori	k0, 0x6b3c
 1:
-	lui     k0, 0x7c03
-	lw      k1, (k1)
-	ori     k0, 0xe83b
+	lui	k0, 0x7c03
+	lw	k1, (k1)
+	ori	k0, 0xe83b
 #else
-	andi    k0, k1, 1
-	bnez    k0, handle_ri
-	lui     k0, 0x7c03
-	lw      k1, (k1)
-	ori     k0, 0xe83b
+	andi	k0, k1, 1
+	bnez	k0, handle_ri
+	 lui	k0, 0x7c03
+	lw	k1, (k1)
+	ori	k0, 0xe83b
 #endif
-	.set    reorder
+	.set	reorder
 docheck:
 	bne	k0, k1, handle_ri	/* if not ours */
 
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 4e4cc5b..56e8fed 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -21,7 +21,6 @@
 #include <asm/asmmacro.h>
 #include <asm/irqflags.h>
 #include <asm/regdef.h>
-#include <asm/pgtable-bits.h>
 #include <asm/mipsregs.h>
 #include <asm/stackframe.h>
 
@@ -132,7 +131,27 @@
 	set_saved_sp	sp, t0, t1
 	PTR_SUBU	sp, 4 * SZREG		# init stack pointer
 
+#ifdef CONFIG_RELOCATABLE
+	/* Copy kernel and apply the relocations */
+	jal		relocate_kernel
+
+	/* Repoint the sp into the new kernel image */
+	PTR_LI		sp, _THREAD_SIZE - 32 - PT_SIZE
+	PTR_ADDU	sp, $28
+	set_saved_sp	sp, t0, t1
+	PTR_SUBU	sp, 4 * SZREG		# init stack pointer
+
+	/*
+	 * relocate_kernel returns the entry point either
+	 * in the relocated kernel or the original if for
+	 * some reason relocation failed - jump there now
+	 * with instruction hazard barrier because of the
+	 * newly sync'd icache.
+	 */
+	jr.hb		v0
+#else
 	j		start_kernel
+#endif
 	END(kernel_entry)
 
 #ifdef CONFIG_SMP
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 46794d6..60ab4c4 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -181,6 +181,11 @@
 	case CPU_XLP:
 		cpu_wait = r4k_wait;
 		break;
+	case CPU_LOONGSON3:
+		if ((c->processor_id & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2)
+			cpu_wait = r4k_wait;
+		break;
+
 	case CPU_BMIPS5000:
 		cpu_wait = r4k_wait_irqoff;
 		break;
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index 1448c1f..760217b 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -24,7 +24,7 @@
 	"0x04", "cpc", "0x06", "0x07"
 };
 
-/* CM3 Tag ECC transation type */
+/* CM3 Tag ECC transaction type */
 static char *cm3_tr[16] = {
 	[0x0] = "ReqNoData",
 	[0x1] = "0x1",
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index 1f5aac7..625ee77 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -28,6 +28,7 @@
 #include <asm/inst.h>
 #include <asm/mips-r2-to-r6-emul.h>
 #include <asm/local.h>
+#include <asm/mipsregs.h>
 #include <asm/ptrace.h>
 #include <asm/uaccess.h>
 
@@ -940,42 +941,42 @@
 		switch (rt) {
 		case tgei_op:
 			if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst))
-				do_trap_or_bp(regs, 0, "TGEI");
+				do_trap_or_bp(regs, 0, 0, "TGEI");
 
 			MIPS_R2_STATS(traps);
 
 			break;
 		case tgeiu_op:
 			if (regs->regs[rs] >= MIPSInst_UIMM(inst))
-				do_trap_or_bp(regs, 0, "TGEIU");
+				do_trap_or_bp(regs, 0, 0, "TGEIU");
 
 			MIPS_R2_STATS(traps);
 
 			break;
 		case tlti_op:
 			if ((long)regs->regs[rs] < MIPSInst_SIMM(inst))
-				do_trap_or_bp(regs, 0, "TLTI");
+				do_trap_or_bp(regs, 0, 0, "TLTI");
 
 			MIPS_R2_STATS(traps);
 
 			break;
 		case tltiu_op:
 			if (regs->regs[rs] < MIPSInst_UIMM(inst))
-				do_trap_or_bp(regs, 0, "TLTIU");
+				do_trap_or_bp(regs, 0, 0, "TLTIU");
 
 			MIPS_R2_STATS(traps);
 
 			break;
 		case teqi_op:
 			if (regs->regs[rs] == MIPSInst_SIMM(inst))
-				do_trap_or_bp(regs, 0, "TEQI");
+				do_trap_or_bp(regs, 0, 0, "TEQI");
 
 			MIPS_R2_STATS(traps);
 
 			break;
 		case tnei_op:
 			if (regs->regs[rs] != MIPSInst_SIMM(inst))
-				do_trap_or_bp(regs, 0, "TNEI");
+				do_trap_or_bp(regs, 0, 0, "TNEI");
 
 			MIPS_R2_STATS(traps);
 
@@ -1251,10 +1252,10 @@
 			"	j	10b\n"
 			"	.previous\n"
 			"	.section	__ex_table,\"a\"\n"
-			"	.word	1b,8b\n"
-			"	.word	2b,8b\n"
-			"	.word	3b,8b\n"
-			"	.word	4b,8b\n"
+			STR(PTR) " 1b,8b\n"
+			STR(PTR) " 2b,8b\n"
+			STR(PTR) " 3b,8b\n"
+			STR(PTR) " 4b,8b\n"
 			"	.previous\n"
 			"	.set	pop\n"
 			: "+&r"(rt), "=&r"(rs),
@@ -1326,10 +1327,10 @@
 			"	j	10b\n"
 			"       .previous\n"
 			"	.section	__ex_table,\"a\"\n"
-			"	.word	1b,8b\n"
-			"	.word	2b,8b\n"
-			"	.word	3b,8b\n"
-			"	.word	4b,8b\n"
+			STR(PTR) " 1b,8b\n"
+			STR(PTR) " 2b,8b\n"
+			STR(PTR) " 3b,8b\n"
+			STR(PTR) " 4b,8b\n"
 			"	.previous\n"
 			"	.set	pop\n"
 			: "+&r"(rt), "=&r"(rs),
@@ -1397,10 +1398,10 @@
 			"	j	9b\n"
 			"	.previous\n"
 			"	.section        __ex_table,\"a\"\n"
-			"	.word	1b,8b\n"
-			"	.word	2b,8b\n"
-			"	.word	3b,8b\n"
-			"	.word	4b,8b\n"
+			STR(PTR) " 1b,8b\n"
+			STR(PTR) " 2b,8b\n"
+			STR(PTR) " 3b,8b\n"
+			STR(PTR) " 4b,8b\n"
 			"	.previous\n"
 			"	.set	pop\n"
 			: "+&r"(rt), "=&r"(rs),
@@ -1467,10 +1468,10 @@
 			"	j	9b\n"
 			"	.previous\n"
 			"	.section        __ex_table,\"a\"\n"
-			"	.word	1b,8b\n"
-			"	.word	2b,8b\n"
-			"	.word	3b,8b\n"
-			"	.word	4b,8b\n"
+			STR(PTR) " 1b,8b\n"
+			STR(PTR) " 2b,8b\n"
+			STR(PTR) " 3b,8b\n"
+			STR(PTR) " 4b,8b\n"
 			"	.previous\n"
 			"	.set	pop\n"
 			: "+&r"(rt), "=&r"(rs),
@@ -1582,14 +1583,14 @@
 			"	j	9b\n"
 			"	.previous\n"
 			"	.section        __ex_table,\"a\"\n"
-			"	.word	1b,8b\n"
-			"	.word	2b,8b\n"
-			"	.word	3b,8b\n"
-			"	.word	4b,8b\n"
-			"	.word	5b,8b\n"
-			"	.word	6b,8b\n"
-			"	.word	7b,8b\n"
-			"	.word	0b,8b\n"
+			STR(PTR) " 1b,8b\n"
+			STR(PTR) " 2b,8b\n"
+			STR(PTR) " 3b,8b\n"
+			STR(PTR) " 4b,8b\n"
+			STR(PTR) " 5b,8b\n"
+			STR(PTR) " 6b,8b\n"
+			STR(PTR) " 7b,8b\n"
+			STR(PTR) " 0b,8b\n"
 			"	.previous\n"
 			"	.set	pop\n"
 			: "+&r"(rt), "=&r"(rs),
@@ -1701,14 +1702,14 @@
 			"	j      9b\n"
 			"	.previous\n"
 			"	.section        __ex_table,\"a\"\n"
-			"	.word  1b,8b\n"
-			"	.word  2b,8b\n"
-			"	.word  3b,8b\n"
-			"	.word  4b,8b\n"
-			"	.word  5b,8b\n"
-			"	.word  6b,8b\n"
-			"	.word  7b,8b\n"
-			"	.word  0b,8b\n"
+			STR(PTR) " 1b,8b\n"
+			STR(PTR) " 2b,8b\n"
+			STR(PTR) " 3b,8b\n"
+			STR(PTR) " 4b,8b\n"
+			STR(PTR) " 5b,8b\n"
+			STR(PTR) " 6b,8b\n"
+			STR(PTR) " 7b,8b\n"
+			STR(PTR) " 0b,8b\n"
 			"	.previous\n"
 			"	.set    pop\n"
 			: "+&r"(rt), "=&r"(rs),
@@ -1820,14 +1821,14 @@
 			"	j	9b\n"
 			"	.previous\n"
 			"	.section        __ex_table,\"a\"\n"
-			"	.word	1b,8b\n"
-			"	.word	2b,8b\n"
-			"	.word	3b,8b\n"
-			"	.word	4b,8b\n"
-			"	.word	5b,8b\n"
-			"	.word	6b,8b\n"
-			"	.word	7b,8b\n"
-			"	.word	0b,8b\n"
+			STR(PTR) " 1b,8b\n"
+			STR(PTR) " 2b,8b\n"
+			STR(PTR) " 3b,8b\n"
+			STR(PTR) " 4b,8b\n"
+			STR(PTR) " 5b,8b\n"
+			STR(PTR) " 6b,8b\n"
+			STR(PTR) " 7b,8b\n"
+			STR(PTR) " 0b,8b\n"
 			"	.previous\n"
 			"	.set	pop\n"
 			: "+&r"(rt), "=&r"(rs),
@@ -1938,14 +1939,14 @@
 			"       j	9b\n"
 			"       .previous\n"
 			"       .section        __ex_table,\"a\"\n"
-			"       .word	1b,8b\n"
-			"       .word	2b,8b\n"
-			"       .word	3b,8b\n"
-			"       .word	4b,8b\n"
-			"       .word	5b,8b\n"
-			"       .word	6b,8b\n"
-			"       .word	7b,8b\n"
-			"       .word	0b,8b\n"
+			STR(PTR) " 1b,8b\n"
+			STR(PTR) " 2b,8b\n"
+			STR(PTR) " 3b,8b\n"
+			STR(PTR) " 4b,8b\n"
+			STR(PTR) " 5b,8b\n"
+			STR(PTR) " 6b,8b\n"
+			STR(PTR) " 7b,8b\n"
+			STR(PTR) " 0b,8b\n"
 			"       .previous\n"
 			"       .set	pop\n"
 			: "+&r"(rt), "=&r"(rs),
@@ -2000,7 +2001,7 @@
 			"j	2b\n"
 			".previous\n"
 			".section        __ex_table,\"a\"\n"
-			".word  1b, 3b\n"
+			STR(PTR) " 1b,3b\n"
 			".previous\n"
 			: "=&r"(res), "+&r"(err)
 			: "r"(vaddr), "i"(SIGSEGV)
@@ -2058,7 +2059,7 @@
 			"j	2b\n"
 			".previous\n"
 			".section        __ex_table,\"a\"\n"
-			".word	1b, 3b\n"
+			STR(PTR) " 1b,3b\n"
 			".previous\n"
 			: "+&r"(res), "+&r"(err)
 			: "r"(vaddr), "i"(SIGSEGV));
@@ -2119,7 +2120,7 @@
 			"j	2b\n"
 			".previous\n"
 			".section        __ex_table,\"a\"\n"
-			".word  1b, 3b\n"
+			STR(PTR) " 1b,3b\n"
 			".previous\n"
 			: "=&r"(res), "+&r"(err)
 			: "r"(vaddr), "i"(SIGSEGV)
@@ -2182,7 +2183,7 @@
 			"j	2b\n"
 			".previous\n"
 			".section        __ex_table,\"a\"\n"
-			".word	1b, 3b\n"
+			STR(PTR) " 1b,3b\n"
 			".previous\n"
 			: "+&r"(res), "+&r"(err)
 			: "r"(vaddr), "i"(SIGSEGV));
diff --git a/arch/mips/kernel/module-rela.c b/arch/mips/kernel/module-rela.c
index 2b70723..7811688 100644
--- a/arch/mips/kernel/module-rela.c
+++ b/arch/mips/kernel/module-rela.c
@@ -16,6 +16,7 @@
  *  Copyright (C) 2001 Rusty Russell.
  *  Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org)
  *  Copyright (C) 2005 Thiemo Seufer
+ *  Copyright (C) 2015 Imagination Technologies Ltd.
  */
 
 #include <linux/elf.h>
@@ -35,15 +36,13 @@
 static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v)
 {
 	if (v % 4) {
-		pr_err("module %s: dangerous R_MIPS_26 RELArelocation\n",
+		pr_err("module %s: dangerous R_MIPS_26 RELA relocation\n",
 		       me->name);
 		return -ENOEXEC;
 	}
 
 	if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
-		printk(KERN_ERR
-		       "module %s: relocation overflow\n",
-		       me->name);
+		pr_err("module %s: relocation overflow\n", me->name);
 		return -ENOEXEC;
 	}
 
@@ -67,6 +66,48 @@
 	return 0;
 }
 
+static int apply_r_mips_pc_rela(struct module *me, u32 *location, Elf_Addr v,
+				unsigned bits)
+{
+	unsigned long mask = GENMASK(bits - 1, 0);
+	unsigned long se_bits;
+	long offset;
+
+	if (v % 4) {
+		pr_err("module %s: dangerous R_MIPS_PC%u RELA relocation\n",
+		       me->name, bits);
+		return -ENOEXEC;
+	}
+
+	offset = ((long)v - (long)location) >> 2;
+
+	/* check the sign bit onwards are identical - ie. we didn't overflow */
+	se_bits = (offset & BIT(bits - 1)) ? ~0ul : 0;
+	if ((offset & ~mask) != (se_bits & ~mask)) {
+		pr_err("module %s: relocation overflow\n", me->name);
+		return -ENOEXEC;
+	}
+
+	*location = (*location & ~mask) | (offset & mask);
+
+	return 0;
+}
+
+static int apply_r_mips_pc16_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+	return apply_r_mips_pc_rela(me, location, v, 16);
+}
+
+static int apply_r_mips_pc21_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+	return apply_r_mips_pc_rela(me, location, v, 21);
+}
+
+static int apply_r_mips_pc26_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+	return apply_r_mips_pc_rela(me, location, v, 26);
+}
+
 static int apply_r_mips_64_rela(struct module *me, u32 *location, Elf_Addr v)
 {
 	*(Elf_Addr *)location = v;
@@ -99,9 +140,12 @@
 	[R_MIPS_26]		= apply_r_mips_26_rela,
 	[R_MIPS_HI16]		= apply_r_mips_hi16_rela,
 	[R_MIPS_LO16]		= apply_r_mips_lo16_rela,
+	[R_MIPS_PC16]		= apply_r_mips_pc16_rela,
 	[R_MIPS_64]		= apply_r_mips_64_rela,
 	[R_MIPS_HIGHER]		= apply_r_mips_higher_rela,
-	[R_MIPS_HIGHEST]	= apply_r_mips_highest_rela
+	[R_MIPS_HIGHEST]	= apply_r_mips_highest_rela,
+	[R_MIPS_PC21_S2]	= apply_r_mips_pc21_rela,
+	[R_MIPS_PC26_S2]	= apply_r_mips_pc26_rela,
 };
 
 int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
@@ -109,9 +153,10 @@
 		       struct module *me)
 {
 	Elf_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr;
+	int (*handler)(struct module *me, u32 *location, Elf_Addr v);
 	Elf_Sym *sym;
 	u32 *location;
-	unsigned int i;
+	unsigned int i, type;
 	Elf_Addr v;
 	int res;
 
@@ -125,18 +170,30 @@
 		/* This is the symbol it is referring to */
 		sym = (Elf_Sym *)sechdrs[symindex].sh_addr
 			+ ELF_MIPS_R_SYM(rel[i]);
-		if (IS_ERR_VALUE(sym->st_value)) {
+		if (sym->st_value >= -MAX_ERRNO) {
 			/* Ignore unresolved weak symbol */
 			if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
 				continue;
-			printk(KERN_WARNING "%s: Unknown symbol %s\n",
+			pr_warn("%s: Unknown symbol %s\n",
 			       me->name, strtab + sym->st_name);
 			return -ENOENT;
 		}
 
-		v = sym->st_value + rel[i].r_addend;
+		type = ELF_MIPS_R_TYPE(rel[i]);
 
-		res = reloc_handlers_rela[ELF_MIPS_R_TYPE(rel[i])](me, location, v);
+		if (type < ARRAY_SIZE(reloc_handlers_rela))
+			handler = reloc_handlers_rela[type];
+		else
+			handler = NULL;
+
+		if (!handler) {
+			pr_err("%s: Unknown relocation type %u\n",
+			       me->name, type);
+			return -EINVAL;
+		}
+
+		v = sym->st_value + rel[i].r_addend;
+		res = handler(me, location, v);
 		if (res)
 			return res;
 	}
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index 1833f51..79850e3 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -73,8 +73,7 @@
 	}
 
 	if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
-		printk(KERN_ERR
-		       "module %s: relocation overflow\n",
+		pr_err("module %s: relocation overflow\n",
 		       me->name);
 		return -ENOEXEC;
 	}
@@ -183,13 +182,62 @@
 	return -ENOEXEC;
 }
 
+static int apply_r_mips_pc_rel(struct module *me, u32 *location, Elf_Addr v,
+			       unsigned bits)
+{
+	unsigned long mask = GENMASK(bits - 1, 0);
+	unsigned long se_bits;
+	long offset;
+
+	if (v % 4) {
+		pr_err("module %s: dangerous R_MIPS_PC%u REL relocation\n",
+		       me->name, bits);
+		return -ENOEXEC;
+	}
+
+	/* retrieve & sign extend implicit addend */
+	offset = *location & mask;
+	offset |= (offset & BIT(bits - 1)) ? ~mask : 0;
+
+	offset += ((long)v - (long)location) >> 2;
+
+	/* check the sign bit onwards are identical - ie. we didn't overflow */
+	se_bits = (offset & BIT(bits - 1)) ? ~0ul : 0;
+	if ((offset & ~mask) != (se_bits & ~mask)) {
+		pr_err("module %s: relocation overflow\n", me->name);
+		return -ENOEXEC;
+	}
+
+	*location = (*location & ~mask) | (offset & mask);
+
+	return 0;
+}
+
+static int apply_r_mips_pc16_rel(struct module *me, u32 *location, Elf_Addr v)
+{
+	return apply_r_mips_pc_rel(me, location, v, 16);
+}
+
+static int apply_r_mips_pc21_rel(struct module *me, u32 *location, Elf_Addr v)
+{
+	return apply_r_mips_pc_rel(me, location, v, 21);
+}
+
+static int apply_r_mips_pc26_rel(struct module *me, u32 *location, Elf_Addr v)
+{
+	return apply_r_mips_pc_rel(me, location, v, 26);
+}
+
 static int (*reloc_handlers_rel[]) (struct module *me, u32 *location,
 				Elf_Addr v) = {
 	[R_MIPS_NONE]		= apply_r_mips_none,
 	[R_MIPS_32]		= apply_r_mips_32_rel,
 	[R_MIPS_26]		= apply_r_mips_26_rel,
 	[R_MIPS_HI16]		= apply_r_mips_hi16_rel,
-	[R_MIPS_LO16]		= apply_r_mips_lo16_rel
+	[R_MIPS_LO16]		= apply_r_mips_lo16_rel,
+	[R_MIPS_PC16]		= apply_r_mips_pc16_rel,
+	[R_MIPS_PC21_S2]	= apply_r_mips_pc21_rel,
+	[R_MIPS_PC26_S2]	= apply_r_mips_pc26_rel,
 };
 
 int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
@@ -197,9 +245,10 @@
 		   struct module *me)
 {
 	Elf_Mips_Rel *rel = (void *) sechdrs[relsec].sh_addr;
+	int (*handler)(struct module *me, u32 *location, Elf_Addr v);
 	Elf_Sym *sym;
 	u32 *location;
-	unsigned int i;
+	unsigned int i, type;
 	Elf_Addr v;
 	int res;
 
@@ -214,18 +263,30 @@
 		/* This is the symbol it is referring to */
 		sym = (Elf_Sym *)sechdrs[symindex].sh_addr
 			+ ELF_MIPS_R_SYM(rel[i]);
-		if (IS_ERR_VALUE(sym->st_value)) {
+		if (sym->st_value >= -MAX_ERRNO) {
 			/* Ignore unresolved weak symbol */
 			if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
 				continue;
-			printk(KERN_WARNING "%s: Unknown symbol %s\n",
-			       me->name, strtab + sym->st_name);
+			pr_warn("%s: Unknown symbol %s\n",
+				me->name, strtab + sym->st_name);
 			return -ENOENT;
 		}
 
-		v = sym->st_value;
+		type = ELF_MIPS_R_TYPE(rel[i]);
 
-		res = reloc_handlers_rel[ELF_MIPS_R_TYPE(rel[i])](me, location, v);
+		if (type < ARRAY_SIZE(reloc_handlers_rel))
+			handler = reloc_handlers_rel[type];
+		else
+			handler = NULL;
+
+		if (!handler) {
+			pr_err("%s: Unknown relocation type %u\n",
+			       me->name, type);
+			return -EINVAL;
+		}
+
+		v = sym->st_value;
+		res = handler(me, location, v);
 		if (res)
 			return res;
 	}
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c
index c1cf9c6..5021c54 100644
--- a/arch/mips/kernel/perf_event.c
+++ b/arch/mips/kernel/perf_event.c
@@ -35,7 +35,7 @@
 		addr = *sp++;
 		if (__kernel_text_address(addr)) {
 			perf_callchain_store(entry, addr);
-			if (entry->nr >= PERF_MAX_STACK_DEPTH)
+			if (entry->nr >= sysctl_perf_event_max_stack)
 				break;
 		}
 	}
@@ -59,7 +59,7 @@
 	}
 	do {
 		perf_callchain_store(entry, pc);
-		if (entry->nr >= PERF_MAX_STACK_DEPTH)
+		if (entry->nr >= sysctl_perf_event_max_stack)
 			break;
 		pc = unwind_stack(current, &sp, pc, &ra);
 	} while (pc);
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index d7b8dd4..d3ba9f4 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -101,8 +101,6 @@
 
 static struct mips_pmu mipspmu;
 
-#define M_CONFIG1_PC	(1 << 4)
-
 #define M_PERFCTL_EXL			(1	<<  0)
 #define M_PERFCTL_KERNEL		(1	<<  1)
 #define M_PERFCTL_SUPERVISOR		(1	<<  2)
@@ -530,7 +528,7 @@
 
 /*
  * MIPS performance counters can be per-TC. The control registers can
- * not be directly accessed accross CPUs. Hence if we want to do global
+ * not be directly accessed across CPUs. Hence if we want to do global
  * control, we need cross CPU calls. on_each_cpu() can help us, but we
  * can not make sure this function is called with interrupts enabled. So
  * here we pause local counters and then grab a rwlock and leave the
@@ -754,7 +752,7 @@
 
 static int __n_counters(void)
 {
-	if (!(read_c0_config1() & M_CONFIG1_PC))
+	if (!cpu_has_perf)
 		return 0;
 	if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
 		return 1;
@@ -825,6 +823,16 @@
 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
 };
 
+static const struct mips_perf_event i6400_event_map[PERF_COUNT_HW_MAX] = {
+	[PERF_COUNT_HW_CPU_CYCLES]          = { 0x00, CNTR_EVEN | CNTR_ODD },
+	[PERF_COUNT_HW_INSTRUCTIONS]        = { 0x01, CNTR_EVEN | CNTR_ODD },
+	/* These only count dcache, not icache */
+	[PERF_COUNT_HW_CACHE_REFERENCES]    = { 0x45, CNTR_EVEN | CNTR_ODD },
+	[PERF_COUNT_HW_CACHE_MISSES]        = { 0x48, CNTR_EVEN | CNTR_ODD },
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x15, CNTR_EVEN | CNTR_ODD },
+	[PERF_COUNT_HW_BRANCH_MISSES]       = { 0x16, CNTR_EVEN | CNTR_ODD },
+};
+
 static const struct mips_perf_event loongson3_event_map[PERF_COUNT_HW_MAX] = {
 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN },
 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, CNTR_ODD },
@@ -1015,6 +1023,46 @@
 },
 };
 
+static const struct mips_perf_event i6400_cache_map
+				[PERF_COUNT_HW_CACHE_MAX]
+				[PERF_COUNT_HW_CACHE_OP_MAX]
+				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)]	= { 0x46, CNTR_EVEN | CNTR_ODD },
+		[C(RESULT_MISS)]	= { 0x49, CNTR_EVEN | CNTR_ODD },
+	},
+	[C(OP_WRITE)] = {
+		[C(RESULT_ACCESS)]	= { 0x47, CNTR_EVEN | CNTR_ODD },
+		[C(RESULT_MISS)]	= { 0x4a, CNTR_EVEN | CNTR_ODD },
+	},
+},
+[C(L1I)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)]	= { 0x84, CNTR_EVEN | CNTR_ODD },
+		[C(RESULT_MISS)]	= { 0x85, CNTR_EVEN | CNTR_ODD },
+	},
+},
+[C(DTLB)] = {
+	/* Can't distinguish read & write */
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)]	= { 0x40, CNTR_EVEN | CNTR_ODD },
+		[C(RESULT_MISS)]	= { 0x41, CNTR_EVEN | CNTR_ODD },
+	},
+	[C(OP_WRITE)] = {
+		[C(RESULT_ACCESS)]	= { 0x40, CNTR_EVEN | CNTR_ODD },
+		[C(RESULT_MISS)]	= { 0x41, CNTR_EVEN | CNTR_ODD },
+	},
+},
+[C(BPU)] = {
+	/* Conditional branches / mispredicted */
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)]	= { 0x15, CNTR_EVEN | CNTR_ODD },
+		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN | CNTR_ODD },
+	},
+},
+};
+
 static const struct mips_perf_event loongson3_cache_map
 				[PERF_COUNT_HW_CACHE_MAX]
 				[PERF_COUNT_HW_CACHE_OP_MAX]
@@ -1556,6 +1604,7 @@
 #endif
 		break;
 	case CPU_P5600:
+	case CPU_P6600:
 	case CPU_I6400:
 		/* 8-bit event numbers */
 		raw_id = config & 0x1ff;
@@ -1718,11 +1767,16 @@
 		mipspmu.general_event_map = &mipsxxcore_event_map2;
 		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
 		break;
-	case CPU_I6400:
-		mipspmu.name = "mips/I6400";
+	case CPU_P6600:
+		mipspmu.name = "mips/P6600";
 		mipspmu.general_event_map = &mipsxxcore_event_map2;
 		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
 		break;
+	case CPU_I6400:
+		mipspmu.name = "mips/I6400";
+		mipspmu.general_event_map = &i6400_event_map;
+		mipspmu.cache_event_map = &i6400_cache_map;
+		break;
 	case CPU_1004K:
 		mipspmu.name = "mips/1004K";
 		mipspmu.general_event_map = &mipsxxcore_event_map;
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index f63a289..adda3ff 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -224,11 +224,18 @@
 	uasm_build_label(pl, *pp, lbl);
 
 	/* Generate the cache ops */
-	for (i = 0; i < unroll_lines; i++)
-		uasm_i_cache(pp, op, i * cache->linesz, t0);
+	for (i = 0; i < unroll_lines; i++) {
+		if (cpu_has_mips_r6) {
+			uasm_i_cache(pp, op, 0, t0);
+			uasm_i_addiu(pp, t0, t0, cache->linesz);
+		} else {
+			uasm_i_cache(pp, op, i * cache->linesz, t0);
+		}
+	}
 
-	/* Update the base address */
-	uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz);
+	if (!cpu_has_mips_r6)
+		/* Update the base address */
+		uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz);
 
 	/* Loop if we haven't reached the end address yet */
 	uasm_il_bne(pp, pr, t0, t1, lbl);
@@ -472,7 +479,7 @@
 	/*
 	 * Disable all but self interventions. The load from COHCTL is defined
 	 * by the interAptiv & proAptiv SUMs as ensuring that the operation
-	 * resulting from the preceeding store is complete.
+	 * resulting from the preceding store is complete.
 	 */
 	uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
 	uasm_i_sw(&p, t0, 0, r_pcohctl);
diff --git a/arch/mips/kernel/pm.c b/arch/mips/kernel/pm.c
index fefdf39..dc81489 100644
--- a/arch/mips/kernel/pm.c
+++ b/arch/mips/kernel/pm.c
@@ -56,7 +56,7 @@
 		write_c0_userlocal(current_thread_info()->tp_value);
 
 	/* Restore watch registers */
-	__restore_watch();
+	__restore_watch(current);
 }
 
 /**
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 298b2b7..97dc01b 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -114,6 +114,7 @@
 	if (cpu_has_smartmips)	seq_printf(m, "%s", " smartmips");
 	if (cpu_has_dsp)	seq_printf(m, "%s", " dsp");
 	if (cpu_has_dsp2)	seq_printf(m, "%s", " dsp2");
+	if (cpu_has_dsp3)	seq_printf(m, "%s", " dsp3");
 	if (cpu_has_mipsmt)	seq_printf(m, "%s", " mt");
 	if (cpu_has_mmips)	seq_printf(m, "%s", " micromips");
 	if (cpu_has_vz)		seq_printf(m, "%s", " vz");
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index eddd5fd..a6b3dc5 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -77,10 +77,6 @@
 {
 }
 
-void flush_thread(void)
-{
-}
-
 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 {
 	/*
@@ -455,7 +451,7 @@
 		    *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
 			regs = (struct pt_regs *)*sp;
 			pc = regs->cp0_epc;
-			if (__kernel_text_address(pc)) {
+			if (!user_mode(regs) && __kernel_text_address(pc)) {
 				*sp = regs->regs[29];
 				*ra = regs->regs[31];
 				return pc;
@@ -580,11 +576,19 @@
 	return value;
 }
 
+static void prepare_for_fp_mode_switch(void *info)
+{
+	struct mm_struct *mm = info;
+
+	if (current->mm == mm)
+		lose_fpu(1);
+}
+
 int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
 {
 	const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
-	unsigned long switch_count;
 	struct task_struct *t;
+	int max_users;
 
 	/* Check the value is valid */
 	if (value & ~known_bits)
@@ -601,6 +605,9 @@
 	if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
 		return -EOPNOTSUPP;
 
+	/* Proceed with the mode switch */
+	preempt_disable();
+
 	/* Save FP & vector context, then disable FPU & MSA */
 	if (task->signal == current->signal)
 		lose_fpu(1);
@@ -610,31 +617,17 @@
 	smp_mb__after_atomic();
 
 	/*
-	 * If there are multiple online CPUs then wait until all threads whose
-	 * FP mode is about to change have been context switched. This approach
-	 * allows us to only worry about whether an FP mode switch is in
-	 * progress when FP is first used in a tasks time slice. Pretty much all
-	 * of the mode switch overhead can thus be confined to cases where mode
-	 * switches are actually occuring. That is, to here. However for the
-	 * thread performing the mode switch it may take a while...
+	 * If there are multiple online CPUs then force any which are running
+	 * threads in this process to lose their FPU context, which they can't
+	 * regain until fp_mode_switching is cleared later.
 	 */
 	if (num_online_cpus() > 1) {
-		spin_lock_irq(&task->sighand->siglock);
+		/* No need to send an IPI for the local CPU */
+		max_users = (task->mm == current->mm) ? 1 : 0;
 
-		for_each_thread(task, t) {
-			if (t == current)
-				continue;
-
-			switch_count = t->nvcsw + t->nivcsw;
-
-			do {
-				spin_unlock_irq(&task->sighand->siglock);
-				cond_resched();
-				spin_lock_irq(&task->sighand->siglock);
-			} while ((t->nvcsw + t->nivcsw) == switch_count);
-		}
-
-		spin_unlock_irq(&task->sighand->siglock);
+		if (atomic_read(&current->mm->mm_users) > max_users)
+			smp_call_function(prepare_for_fp_mode_switch,
+					  (void *)current->mm, 1);
 	}
 
 	/*
@@ -659,6 +652,7 @@
 
 	/* Allow threads to use FP again */
 	atomic_set(&task->mm->context.fp_mode_switching, 0);
+	preempt_enable();
 
 	return 0;
 }
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index a5279b2..0dcf691 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -57,8 +57,7 @@
 	/* Begin with data registers set to all 1s... */
 	memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
 
-	/* ...and FCSR zeroed */
-	target->thread.fpu.fcr31 = 0;
+	/* FCSR has been preset by `mips_set_personality_nan'.  */
 
 	/*
 	 * Record that the target has "used" math, such that the context
@@ -80,6 +79,22 @@
 }
 
 /*
+ * Poke at FCSR according to its mask.  Don't set the cause bits as
+ * this is currently not handled correctly in FP context restoration
+ * and will cause an oops if a corresponding enable bit is set.
+ */
+static void ptrace_setfcr31(struct task_struct *child, u32 value)
+{
+	u32 fcr31;
+	u32 mask;
+
+	value &= ~FPU_CSR_ALL_X;
+	fcr31 = child->thread.fpu.fcr31;
+	mask = boot_cpu_data.fpu_msk31;
+	child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
+}
+
+/*
  * Read a general register set.	 We always use the 64-bit format, even
  * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
  * Registers are sign extended to fill the available space.
@@ -159,9 +174,7 @@
 {
 	union fpureg *fregs;
 	u64 fpr_val;
-	u32 fcr31;
 	u32 value;
-	u32 mask;
 	int i;
 
 	if (!access_ok(VERIFY_READ, data, 33 * 8))
@@ -176,9 +189,7 @@
 	}
 
 	__get_user(value, data + 64);
-	fcr31 = child->thread.fpu.fcr31;
-	mask = boot_cpu_data.fpu_msk31;
-	child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
+	ptrace_setfcr31(child, value);
 
 	/* FIR may not be written.  */
 
@@ -210,7 +221,8 @@
 	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
 		__put_user(child->thread.watch.mips3264.watchlo[i],
 			   &addr->WATCH_STYLE.watchlo[i]);
-		__put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff,
+		__put_user(child->thread.watch.mips3264.watchhi[i] &
+				(MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW),
 			   &addr->WATCH_STYLE.watchhi[i]);
 		__put_user(boot_cpu_data.watch_reg_masks[i],
 			   &addr->WATCH_STYLE.watch_masks[i]);
@@ -252,12 +264,12 @@
 		}
 #endif
 		__get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
-		if (ht[i] & ~0xff8)
+		if (ht[i] & ~MIPS_WATCHHI_MASK)
 			return -EINVAL;
 	}
 	/* Install them. */
 	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
-		if (lt[i] & 7)
+		if (lt[i] & MIPS_WATCHLO_IRW)
 			watch_active = 1;
 		child->thread.watch.mips3264.watchlo[i] = lt[i];
 		/* Set the G bit. */
@@ -805,7 +817,7 @@
 			break;
 #endif
 		case FPC_CSR:
-			child->thread.fpu.fcr31 = data & ~FPU_CSR_ALL_X;
+			ptrace_setfcr31(child, data);
 			break;
 		case DSP_BASE ... DSP_BASE + 5: {
 			dspreg_t *dregs;
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 17732f8..56d86b0 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -244,17 +244,17 @@
 	.set	push
 	.set	noat
 #ifdef CONFIG_64BIT
-	copy_u_d \wr, 1
+	copy_s_d \wr, 1
 	EX sd	$1, \off(\base)
 #elif defined(CONFIG_CPU_LITTLE_ENDIAN)
-	copy_u_w \wr, 2
+	copy_s_w \wr, 2
 	EX sw	$1, \off(\base)
-	copy_u_w \wr, 3
+	copy_s_w \wr, 3
 	EX sw	$1, (\off+4)(\base)
 #else /* CONFIG_CPU_BIG_ENDIAN */
-	copy_u_w \wr, 2
+	copy_s_w \wr, 2
 	EX sw	$1, (\off+4)(\base)
-	copy_u_w \wr, 3
+	copy_s_w \wr, 3
 	EX sw	$1, \off(\base)
 #endif
 	.set	pop
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 92cd051..2f0a3b2 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -15,7 +15,6 @@
 #include <asm/fpregdef.h>
 #include <asm/mipsregs.h>
 #include <asm/asm-offsets.h>
-#include <asm/pgtable-bits.h>
 #include <asm/regdef.h>
 #include <asm/stackframe.h>
 #include <asm/thread_info.h>
diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
new file mode 100644
index 0000000..ca1cc30
--- /dev/null
+++ b/arch/mips/kernel/relocate.c
@@ -0,0 +1,386 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Support for Kernel relocation at boot time
+ *
+ * Copyright (C) 2015, Imagination Technologies Ltd.
+ * Authors: Matt Redfearn (matt.redfearn@imgtec.com)
+ */
+#include <asm/bootinfo.h>
+#include <asm/cacheflush.h>
+#include <asm/fw/fw.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/timex.h>
+#include <linux/elf.h>
+#include <linux/kernel.h>
+#include <linux/libfdt.h>
+#include <linux/of_fdt.h>
+#include <linux/sched.h>
+#include <linux/start_kernel.h>
+#include <linux/string.h>
+#include <linux/printk.h>
+
+#define RELOCATED(x) ((void *)((long)x + offset))
+
+extern u32 _relocation_start[];	/* End kernel image / start relocation table */
+extern u32 _relocation_end[];	/* End relocation table */
+
+extern long __start___ex_table;	/* Start exception table */
+extern long __stop___ex_table;	/* End exception table */
+
+static inline u32 __init get_synci_step(void)
+{
+	u32 res;
+
+	__asm__("rdhwr  %0, $1" : "=r" (res));
+
+	return res;
+}
+
+static void __init sync_icache(void *kbase, unsigned long kernel_length)
+{
+	void *kend = kbase + kernel_length;
+	u32 step = get_synci_step();
+
+	do {
+		__asm__ __volatile__(
+			"synci  0(%0)"
+			: /* no output */
+			: "r" (kbase));
+
+		kbase += step;
+	} while (kbase < kend);
+
+	/* Completion barrier */
+	__sync();
+}
+
+static int __init apply_r_mips_64_rel(u32 *loc_orig, u32 *loc_new, long offset)
+{
+	*(u64 *)loc_new += offset;
+
+	return 0;
+}
+
+static int __init apply_r_mips_32_rel(u32 *loc_orig, u32 *loc_new, long offset)
+{
+	*loc_new += offset;
+
+	return 0;
+}
+
+static int __init apply_r_mips_26_rel(u32 *loc_orig, u32 *loc_new, long offset)
+{
+	unsigned long target_addr = (*loc_orig) & 0x03ffffff;
+
+	if (offset % 4) {
+		pr_err("Dangerous R_MIPS_26 REL relocation\n");
+		return -ENOEXEC;
+	}
+
+	/* Original target address */
+	target_addr <<= 2;
+	target_addr += (unsigned long)loc_orig & ~0x03ffffff;
+
+	/* Get the new target address */
+	target_addr += offset;
+
+	if ((target_addr & 0xf0000000) != ((unsigned long)loc_new & 0xf0000000)) {
+		pr_err("R_MIPS_26 REL relocation overflow\n");
+		return -ENOEXEC;
+	}
+
+	target_addr -= (unsigned long)loc_new & ~0x03ffffff;
+	target_addr >>= 2;
+
+	*loc_new = (*loc_new & ~0x03ffffff) | (target_addr & 0x03ffffff);
+
+	return 0;
+}
+
+
+static int __init apply_r_mips_hi16_rel(u32 *loc_orig, u32 *loc_new, long offset)
+{
+	unsigned long insn = *loc_orig;
+	unsigned long target = (insn & 0xffff) << 16; /* high 16bits of target */
+
+	target += offset;
+
+	*loc_new = (insn & ~0xffff) | ((target >> 16) & 0xffff);
+	return 0;
+}
+
+static int (*reloc_handlers_rel[]) (u32 *, u32 *, long) __initdata = {
+	[R_MIPS_64]		= apply_r_mips_64_rel,
+	[R_MIPS_32]		= apply_r_mips_32_rel,
+	[R_MIPS_26]		= apply_r_mips_26_rel,
+	[R_MIPS_HI16]		= apply_r_mips_hi16_rel,
+};
+
+int __init do_relocations(void *kbase_old, void *kbase_new, long offset)
+{
+	u32 *r;
+	u32 *loc_orig;
+	u32 *loc_new;
+	int type;
+	int res;
+
+	for (r = _relocation_start; r < _relocation_end; r++) {
+		/* Sentinel for last relocation */
+		if (*r == 0)
+			break;
+
+		type = (*r >> 24) & 0xff;
+		loc_orig = (void *)(kbase_old + ((*r & 0x00ffffff) << 2));
+		loc_new = RELOCATED(loc_orig);
+
+		if (reloc_handlers_rel[type] == NULL) {
+			/* Unsupported relocation */
+			pr_err("Unhandled relocation type %d at 0x%pK\n",
+			       type, loc_orig);
+			return -ENOEXEC;
+		}
+
+		res = reloc_handlers_rel[type](loc_orig, loc_new, offset);
+		if (res)
+			return res;
+	}
+
+	return 0;
+}
+
+/*
+ * The exception table is filled in by the relocs tool after vmlinux is linked.
+ * It must be relocated separately since there will not be any relocation
+ * information for it filled in by the linker.
+ */
+static int __init relocate_exception_table(long offset)
+{
+	unsigned long *etable_start, *etable_end, *e;
+
+	etable_start = RELOCATED(&__start___ex_table);
+	etable_end = RELOCATED(&__stop___ex_table);
+
+	for (e = etable_start; e < etable_end; e++)
+		*e += offset;
+
+	return 0;
+}
+
+#ifdef CONFIG_RANDOMIZE_BASE
+
+static inline __init unsigned long rotate_xor(unsigned long hash,
+					      const void *area, size_t size)
+{
+	size_t i;
+	unsigned long *ptr = (unsigned long *)area;
+
+	for (i = 0; i < size / sizeof(hash); i++) {
+		/* Rotate by odd number of bits and XOR. */
+		hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
+		hash ^= ptr[i];
+	}
+
+	return hash;
+}
+
+static inline __init unsigned long get_random_boot(void)
+{
+	unsigned long entropy = random_get_entropy();
+	unsigned long hash = 0;
+
+	/* Attempt to create a simple but unpredictable starting entropy. */
+	hash = rotate_xor(hash, linux_banner, strlen(linux_banner));
+
+	/* Add in any runtime entropy we can get */
+	hash = rotate_xor(hash, &entropy, sizeof(entropy));
+
+#if defined(CONFIG_USE_OF)
+	/* Get any additional entropy passed in device tree */
+	{
+		int node, len;
+		u64 *prop;
+
+		node = fdt_path_offset(initial_boot_params, "/chosen");
+		if (node >= 0) {
+			prop = fdt_getprop_w(initial_boot_params, node,
+					     "kaslr-seed", &len);
+			if (prop && (len == sizeof(u64)))
+				hash = rotate_xor(hash, prop, sizeof(*prop));
+		}
+	}
+#endif /* CONFIG_USE_OF */
+
+	return hash;
+}
+
+static inline __init bool kaslr_disabled(void)
+{
+	char *str;
+
+#if defined(CONFIG_CMDLINE_BOOL)
+	const char *builtin_cmdline = CONFIG_CMDLINE;
+
+	str = strstr(builtin_cmdline, "nokaslr");
+	if (str == builtin_cmdline ||
+	    (str > builtin_cmdline && *(str - 1) == ' '))
+		return true;
+#endif
+	str = strstr(arcs_cmdline, "nokaslr");
+	if (str == arcs_cmdline || (str > arcs_cmdline && *(str - 1) == ' '))
+		return true;
+
+	return false;
+}
+
+static inline void __init *determine_relocation_address(void)
+{
+	/* Choose a new address for the kernel */
+	unsigned long kernel_length;
+	void *dest = &_text;
+	unsigned long offset;
+
+	if (kaslr_disabled())
+		return dest;
+
+	kernel_length = (long)_end - (long)(&_text);
+
+	offset = get_random_boot() << 16;
+	offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1);
+	if (offset < kernel_length)
+		offset += ALIGN(kernel_length, 0xffff);
+
+	return RELOCATED(dest);
+}
+
+#else
+
+static inline void __init *determine_relocation_address(void)
+{
+	/*
+	 * Choose a new address for the kernel
+	 * For now we'll hard code the destination
+	 */
+	return (void *)0xffffffff81000000;
+}
+
+#endif
+
+static inline int __init relocation_addr_valid(void *loc_new)
+{
+	if ((unsigned long)loc_new & 0x0000ffff) {
+		/* Inappropriately aligned new location */
+		return 0;
+	}
+	if ((unsigned long)loc_new < (unsigned long)&_end) {
+		/* New location overlaps original kernel */
+		return 0;
+	}
+	return 1;
+}
+
+void *__init relocate_kernel(void)
+{
+	void *loc_new;
+	unsigned long kernel_length;
+	unsigned long bss_length;
+	long offset = 0;
+	int res = 1;
+	/* Default to original kernel entry point */
+	void *kernel_entry = start_kernel;
+
+	/* Get the command line */
+	fw_init_cmdline();
+#if defined(CONFIG_USE_OF)
+	/* Deal with the device tree */
+	early_init_dt_scan(plat_get_fdt());
+	if (boot_command_line[0]) {
+		/* Boot command line was passed in device tree */
+		strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+	}
+#endif /* CONFIG_USE_OF */
+
+	kernel_length = (long)(&_relocation_start) - (long)(&_text);
+	bss_length = (long)&__bss_stop - (long)&__bss_start;
+
+	loc_new = determine_relocation_address();
+
+	/* Sanity check relocation address */
+	if (relocation_addr_valid(loc_new))
+		offset = (unsigned long)loc_new - (unsigned long)(&_text);
+
+	/* Reset the command line now so we don't end up with a duplicate */
+	arcs_cmdline[0] = '\0';
+
+	if (offset) {
+		/* Copy the kernel to it's new location */
+		memcpy(loc_new, &_text, kernel_length);
+
+		/* Perform relocations on the new kernel */
+		res = do_relocations(&_text, loc_new, offset);
+		if (res < 0)
+			goto out;
+
+		/* Sync the caches ready for execution of new kernel */
+		sync_icache(loc_new, kernel_length);
+
+		res = relocate_exception_table(offset);
+		if (res < 0)
+			goto out;
+
+		/*
+		 * The original .bss has already been cleared, and
+		 * some variables such as command line parameters
+		 * stored to it so make a copy in the new location.
+		 */
+		memcpy(RELOCATED(&__bss_start), &__bss_start, bss_length);
+
+		/* The current thread is now within the relocated image */
+		__current_thread_info = RELOCATED(&init_thread_union);
+
+		/* Return the new kernel's entry point */
+		kernel_entry = RELOCATED(start_kernel);
+	}
+out:
+	return kernel_entry;
+}
+
+/*
+ * Show relocation information on panic.
+ */
+void show_kernel_relocation(const char *level)
+{
+	unsigned long offset;
+
+	offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS);
+
+	if (IS_ENABLED(CONFIG_RELOCATABLE) && offset > 0) {
+		printk(level);
+		pr_cont("Kernel relocated by 0x%pK\n", (void *)offset);
+		pr_cont(" .text @ 0x%pK\n", _text);
+		pr_cont(" .data @ 0x%pK\n", _sdata);
+		pr_cont(" .bss  @ 0x%pK\n", __bss_start);
+	}
+}
+
+static int kernel_location_notifier_fn(struct notifier_block *self,
+				       unsigned long v, void *p)
+{
+	show_kernel_relocation(KERN_EMERG);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block kernel_location_notifier = {
+	.notifier_call = kernel_location_notifier_fn
+};
+
+static int __init register_kernel_offset_dumper(void)
+{
+	atomic_notifier_chain_register(&panic_notifier_list,
+				       &kernel_location_notifier);
+	return 0;
+}
+__initcall(register_kernel_offset_dumper);
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index a563174..c8e43e0 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -35,7 +35,6 @@
 
 	lw	t1, PT_EPC(sp)		# skip syscall on return
 
-	subu	v0, v0, __NR_O32_Linux	# check syscall number
 	addiu	t1, 4			# skip to next instruction
 	sw	t1, PT_EPC(sp)
 
@@ -89,6 +88,7 @@
 	and	t0, t1
 	bnez	t0, syscall_trace_entry # -> yes
 syscall_common:
+	subu	v0, v0, __NR_O32_Linux	# check syscall number
 	sltiu	t0, v0, __NR_O32_Linux_syscalls + 1
 	beqz	t0, illegal_syscall
 
@@ -118,24 +118,23 @@
 
 syscall_trace_entry:
 	SAVE_STATIC
-	move	s0, v0
 	move	a0, sp
 
 	/*
 	 * syscall number is in v0 unless we called syscall(__NR_###)
 	 * where the real syscall number is in a0
 	 */
-	addiu	a1, v0,  __NR_O32_Linux
-	bnez	v0, 1f /* __NR_syscall at offset 0 */
+	move	a1, v0
+	subu	t2, v0,  __NR_O32_Linux
+	bnez	t2, 1f /* __NR_syscall at offset 0 */
 	lw	a1, PT_R4(sp)
 
 1:	jal	syscall_trace_enter
 
 	bltz	v0, 1f			# seccomp failed? Skip syscall
 
-	move	v0, s0			# restore syscall
-
 	RESTORE_STATIC
+	lw	v0, PT_R2(sp)		# Restore syscall (maybe modified)
 	lw	a0, PT_R4(sp)		# Restore argument registers
 	lw	a1, PT_R5(sp)
 	lw	a2, PT_R6(sp)
@@ -596,3 +595,5 @@
 	PTR	sys_membarrier
 	PTR	sys_mlock2
 	PTR	sys_copy_file_range		/* 4360 */
+	PTR	sys_preadv2
+	PTR	sys_pwritev2
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 2b2dc14..e6ede12 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -82,15 +82,14 @@
 
 syscall_trace_entry:
 	SAVE_STATIC
-	move	s0, v0
 	move	a0, sp
 	move	a1, v0
 	jal	syscall_trace_enter
 
 	bltz	v0, 1f			# seccomp failed? Skip syscall
 
-	move	v0, s0
 	RESTORE_STATIC
+	ld	v0, PT_R2(sp)		# Restore syscall (maybe modified)
 	ld	a0, PT_R4(sp)		# Restore argument registers
 	ld	a1, PT_R5(sp)
 	ld	a2, PT_R6(sp)
@@ -434,4 +433,6 @@
 	PTR	sys_membarrier
 	PTR	sys_mlock2
 	PTR	sys_copy_file_range		/* 5320 */
+	PTR	sys_preadv2
+	PTR	sys_pwritev2
 	.size	sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 2bf5c85..9c0b387 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -42,9 +42,6 @@
 #endif
 	beqz	t0, not_n32_scall
 
-	dsll	t0, v0, 3		# offset into table
-	ld	t2, (sysn32_call_table - (__NR_N32_Linux * 8))(t0)
-
 	sd	a3, PT_R26(sp)		# save a3 for syscall restarting
 
 	li	t1, _TIF_WORK_SYSCALL_ENTRY
@@ -53,6 +50,9 @@
 	bnez	t0, n32_syscall_trace_entry
 
 syscall_common:
+	dsll	t0, v0, 3		# offset into table
+	ld	t2, (sysn32_call_table - (__NR_N32_Linux * 8))(t0)
+
 	jalr	t2			# Do The Real Thing (TM)
 
 	li	t0, -EMAXERRNO - 1	# error?
@@ -71,21 +71,25 @@
 
 n32_syscall_trace_entry:
 	SAVE_STATIC
-	move	s0, t2
 	move	a0, sp
 	move	a1, v0
 	jal	syscall_trace_enter
 
 	bltz	v0, 1f			# seccomp failed? Skip syscall
 
-	move	t2, s0
 	RESTORE_STATIC
+	ld	v0, PT_R2(sp)		# Restore syscall (maybe modified)
 	ld	a0, PT_R4(sp)		# Restore argument registers
 	ld	a1, PT_R5(sp)
 	ld	a2, PT_R6(sp)
 	ld	a3, PT_R7(sp)
 	ld	a4, PT_R8(sp)
 	ld	a5, PT_R9(sp)
+
+	dsubu	t2, v0, __NR_N32_Linux	# check (new) syscall number
+	sltiu   t0, t2, __NR_N32_Linux_syscalls + 1
+	beqz	t0, not_n32_scall
+
 	j	syscall_common
 
 1:	j	syscall_exit
@@ -424,4 +428,6 @@
 	PTR	sys_membarrier
 	PTR	sys_mlock2
 	PTR	sys_copy_file_range
+	PTR	compat_sys_preadv2		/* 6325 */
+	PTR	compat_sys_pwritev2
 	.size	sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index c5b759e..f4f28b1 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -52,9 +52,6 @@
 	sll	a2, a2, 0
 	sll	a3, a3, 0
 
-	dsll	t0, v0, 3		# offset into table
-	ld	t2, (sys32_call_table - (__NR_O32_Linux * 8))(t0)
-
 	sd	a3, PT_R26(sp)		# save a3 for syscall restarting
 
 	/*
@@ -88,6 +85,9 @@
 	bnez	t0, trace_a_syscall
 
 syscall_common:
+	dsll	t0, v0, 3		# offset into table
+	ld	t2, (sys32_call_table - (__NR_O32_Linux * 8))(t0)
+
 	jalr	t2			# Do The Real Thing (TM)
 
 	li	t0, -EMAXERRNO - 1	# error?
@@ -112,7 +112,6 @@
 	sd	a6, PT_R10(sp)
 	sd	a7, PT_R11(sp)		# For indirect syscalls
 
-	move	s0, t2			# Save syscall pointer
 	move	a0, sp
 	/*
 	 * absolute syscall number is in v0 unless we called syscall(__NR_###)
@@ -133,8 +132,8 @@
 
 	bltz	v0, 1f			# seccomp failed? Skip syscall
 
-	move	t2, s0
 	RESTORE_STATIC
+	ld	v0, PT_R2(sp)		# Restore syscall (maybe modified)
 	ld	a0, PT_R4(sp)		# Restore argument registers
 	ld	a1, PT_R5(sp)
 	ld	a2, PT_R6(sp)
@@ -143,6 +142,11 @@
 	ld	a5, PT_R9(sp)
 	ld	a6, PT_R10(sp)
 	ld	a7, PT_R11(sp)		# For indirect syscalls
+
+	dsubu	t0, v0, __NR_O32_Linux	# check (new) syscall number
+	sltiu	t0, t0, __NR_O32_Linux_syscalls + 1
+	beqz	t0, not_o32_scall
+
 	j	syscall_common
 
 1:	j	syscall_exit
@@ -579,4 +583,6 @@
 	PTR	sys_membarrier
 	PTR	sys_mlock2
 	PTR	sys_copy_file_range		/* 4360 */
+	PTR	compat_sys_preadv2
+	PTR	compat_sys_pwritev2
 	.size	sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 4f60734..ef408a0 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -26,6 +26,7 @@
 #include <linux/sizes.h>
 #include <linux/device.h>
 #include <linux/dma-contiguous.h>
+#include <linux/decompress/generic.h>
 
 #include <asm/addrspace.h>
 #include <asm/bootinfo.h>
@@ -52,13 +53,6 @@
 #endif
 
 /*
- * Despite it's name this variable is even if we don't have PCI
- */
-unsigned int PCI_DMA_BUS_IS_PHYS;
-
-EXPORT_SYMBOL(PCI_DMA_BUS_IS_PHYS);
-
-/*
  * Setup information
  *
  * These are initialized so they are in the .data section
@@ -250,6 +244,35 @@
 	return 0;
 }
 
+/* In some conditions (e.g. big endian bootloader with a little endian
+   kernel), the initrd might appear byte swapped.  Try to detect this and
+   byte swap it if needed.  */
+static void __init maybe_bswap_initrd(void)
+{
+#if defined(CONFIG_CPU_CAVIUM_OCTEON)
+	u64 buf;
+
+	/* Check for CPIO signature */
+	if (!memcmp((void *)initrd_start, "070701", 6))
+		return;
+
+	/* Check for compressed initrd */
+	if (decompress_method((unsigned char *)initrd_start, 8, NULL))
+		return;
+
+	/* Try again with a byte swapped header */
+	buf = swab64p((u64 *)initrd_start);
+	if (!memcmp(&buf, "070701", 6) ||
+	    decompress_method((unsigned char *)(&buf), 8, NULL)) {
+		unsigned long i;
+
+		pr_info("Byteswapped initrd detected\n");
+		for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8)
+			swab64s((u64 *)i);
+	}
+#endif
+}
+
 static void __init finalize_initrd(void)
 {
 	unsigned long size = initrd_end - initrd_start;
@@ -263,6 +286,8 @@
 		goto disable;
 	}
 
+	maybe_bswap_initrd();
+
 	reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
 	initrd_below_start_ok = 1;
 
@@ -469,6 +494,29 @@
 	 */
 	reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT);
 
+#ifdef CONFIG_RELOCATABLE
+	/*
+	 * The kernel reserves all memory below its _end symbol as bootmem,
+	 * but the kernel may now be at a much higher address. The memory
+	 * between the original and new locations may be returned to the system.
+	 */
+	if (__pa_symbol(_text) > __pa_symbol(VMLINUX_LOAD_ADDRESS)) {
+		unsigned long offset;
+		extern void show_kernel_relocation(const char *level);
+
+		offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS);
+		free_bootmem(__pa_symbol(VMLINUX_LOAD_ADDRESS), offset);
+
+#if defined(CONFIG_DEBUG_KERNEL) && defined(CONFIG_DEBUG_INFO)
+		/*
+		 * This information is necessary when debugging the kernel
+		 * But is a security vulnerability otherwise!
+		 */
+		show_kernel_relocation(KERN_INFO);
+#endif
+	}
+#endif
+
 	/*
 	 * Reserve initrd memory if needed.
 	 */
@@ -624,6 +672,8 @@
 #define USE_PROM_CMDLINE	IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER)
 #define USE_DTB_CMDLINE		IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)
 #define EXTEND_WITH_PROM	IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)
+#define BUILTIN_EXTEND_WITH_PROM	\
+	IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)
 
 static void __init arch_mem_init(char **cmdline_p)
 {
@@ -657,15 +707,23 @@
 		strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
 
 	if (EXTEND_WITH_PROM && arcs_cmdline[0]) {
-		strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
+		if (boot_command_line[0])
+			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
 		strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
 	}
 
 #if defined(CONFIG_CMDLINE_BOOL)
 	if (builtin_cmdline[0]) {
-		strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
+		if (boot_command_line[0])
+			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
 		strlcat(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
 	}
+
+	if (BUILTIN_EXTEND_WITH_PROM && arcs_cmdline[0]) {
+		if (boot_command_line[0])
+			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
+		strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
+	}
 #endif
 #endif
 	strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
@@ -706,6 +764,9 @@
 	for_each_memblock(reserved, reg)
 		if (reg->size != 0)
 			reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
+
+	reserve_bootmem_region(__pa_symbol(&__nosave_begin),
+			__pa_symbol(&__nosave_end)); /* Reserve for hibernation */
 }
 
 static void __init resource_init(void)
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index bf792e2..ab04229 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -195,6 +195,9 @@
 	unsigned int csr;
 	int i, err;
 
+	if (!config_enabled(CONFIG_CPU_HAS_MSA))
+		return SIGSYS;
+
 	if (size != sizeof(*msa))
 		return -EINVAL;
 
@@ -398,8 +401,8 @@
 	}
 
 fp_done:
-	if (used & USED_EXTCONTEXT)
-		err |= restore_extcontext(sc_to_extcontext(sc));
+	if (!err && (used & USED_EXTCONTEXT))
+		err = restore_extcontext(sc_to_extcontext(sc));
 
 	return err ?: sig;
 }
@@ -798,7 +801,7 @@
 		regs->regs[0] = 0;		/* Don't deal with this again.	*/
 	}
 
-	if (sig_uses_siginfo(&ksig->ka))
+	if (sig_uses_siginfo(&ksig->ka, abi))
 		ret = abi->setup_rt_frame(vdso + abi->vdso->off_rt_sigreturn,
 					  ksig, regs, oldset);
 	else
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 4909639..78c8349 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -227,6 +227,12 @@
 			err |= __put_user(from->si_uid, &to->si_uid);
 			err |= __put_user(from->si_int, &to->si_int);
 			break;
+		case __SI_SYS >> 16:
+			err |= __copy_to_user(&to->si_call_addr, &from->si_call_addr,
+					      sizeof(compat_uptr_t));
+			err |= __put_user(from->si_syscall, &to->si_syscall);
+			err |= __put_user(from->si_arch, &to->si_arch);
+			break;
 		}
 	}
 	return err;
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 78cf8c2..e02addc 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -243,6 +243,7 @@
 		break;
 	case CPU_BMIPS5000:
 		write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0));
+		current_cpu_data.core = (read_c0_brcm_config() >> 25) & 3;
 		break;
 	}
 }
@@ -565,3 +566,90 @@
 	 * once the wired entries are present.
 	 */
 }
+
+void __init bmips_cpu_setup(void)
+{
+	void __iomem __maybe_unused *cbr = BMIPS_GET_CBR();
+	u32 __maybe_unused cfg;
+
+	switch (current_cpu_type()) {
+	case CPU_BMIPS3300:
+		/* Set BIU to async mode */
+		set_c0_brcm_bus_pll(BIT(22));
+		__sync();
+
+		/* put the BIU back in sync mode */
+		clear_c0_brcm_bus_pll(BIT(22));
+
+		/* clear BHTD to enable branch history table */
+		clear_c0_brcm_reset(BIT(16));
+
+		/* Flush and enable RAC */
+		cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
+		__raw_writel(cfg | 0x100, BMIPS_RAC_CONFIG);
+		__raw_readl(cbr + BMIPS_RAC_CONFIG);
+
+		cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
+		__raw_writel(cfg | 0xf, BMIPS_RAC_CONFIG);
+		__raw_readl(cbr + BMIPS_RAC_CONFIG);
+
+		cfg = __raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE);
+		__raw_writel(cfg | 0x0fff0000, cbr + BMIPS_RAC_ADDRESS_RANGE);
+		__raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE);
+		break;
+
+	case CPU_BMIPS4380:
+		/* CBG workaround for early BMIPS4380 CPUs */
+		switch (read_c0_prid()) {
+		case 0x2a040:
+		case 0x2a042:
+		case 0x2a044:
+		case 0x2a060:
+			cfg = __raw_readl(cbr + BMIPS_L2_CONFIG);
+			__raw_writel(cfg & ~0x07000000, cbr + BMIPS_L2_CONFIG);
+			__raw_readl(cbr + BMIPS_L2_CONFIG);
+		}
+
+		/* clear BHTD to enable branch history table */
+		clear_c0_brcm_config_0(BIT(21));
+
+		/* XI/ROTR enable */
+		set_c0_brcm_config_0(BIT(23));
+		set_c0_brcm_cmt_ctrl(BIT(15));
+		break;
+
+	case CPU_BMIPS5000:
+		/* enable RDHWR, BRDHWR */
+		set_c0_brcm_config(BIT(17) | BIT(21));
+
+		/* Disable JTB */
+		__asm__ __volatile__(
+		"	.set	noreorder\n"
+		"	li	$8, 0x5a455048\n"
+		"	.word	0x4088b00f\n"	/* mtc0	t0, $22, 15 */
+		"	.word	0x4008b008\n"	/* mfc0	t0, $22, 8 */
+		"	li	$9, 0x00008000\n"
+		"	or	$8, $8, $9\n"
+		"	.word	0x4088b008\n"	/* mtc0	t0, $22, 8 */
+		"	sync\n"
+		"	li	$8, 0x0\n"
+		"	.word	0x4088b00f\n"	/* mtc0	t0, $22, 15 */
+		"	.set	reorder\n"
+		: : : "$8", "$9");
+
+		/* XI enable */
+		set_c0_brcm_config(BIT(27));
+
+		/* enable MIPS32R2 ROR instruction for XI TLB handlers */
+		__asm__ __volatile__(
+		"	li	$8, 0x5a455048\n"
+		"	.word	0x4088b00f\n"	/* mtc0 $8, $22, 15 */
+		"	nop; nop; nop\n"
+		"	.word	0x4008b008\n"	/* mfc0 $8, $22, 8 */
+		"	lui	$9, 0x0100\n"
+		"	or	$8, $9\n"
+		"	.word	0x4088b008\n"	/* mtc0 $8, $22, 8 */
+		: : : "$8", "$9");
+		break;
+	}
+}
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 253e140..1061bd2 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -27,15 +27,27 @@
 #include <asm/time.h>
 #include <asm/uasm.h>
 
+static bool threads_disabled;
 static DECLARE_BITMAP(core_power, NR_CPUS);
 
 struct core_boot_config *mips_cps_core_bootcfg;
 
+static int __init setup_nothreads(char *s)
+{
+	threads_disabled = true;
+	return 0;
+}
+early_param("nothreads", setup_nothreads);
+
 static unsigned core_vpe_count(unsigned core)
 {
 	unsigned cfg;
 
-	if (!config_enabled(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
+	if (threads_disabled)
+		return 1;
+
+	if ((!config_enabled(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
+		&& (!config_enabled(CONFIG_CPU_MIPSR6) || !cpu_has_vp))
 		return 1;
 
 	mips_cm_lock_other(core, 0);
@@ -47,11 +59,12 @@
 static void __init cps_smp_setup(void)
 {
 	unsigned int ncores, nvpes, core_vpes;
+	unsigned long core_entry;
 	int c, v;
 
 	/* Detect & record VPE topology */
 	ncores = mips_cm_numcores();
-	pr_info("VPE topology ");
+	pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE");
 	for (c = nvpes = 0; c < ncores; c++) {
 		core_vpes = core_vpe_count(c);
 		pr_cont("%c%u", c ? ',' : '{', core_vpes);
@@ -62,7 +75,7 @@
 
 		for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
 			cpu_data[nvpes + v].core = c;
-#ifdef CONFIG_MIPS_MT_SMP
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
 			cpu_data[nvpes + v].vpe_id = v;
 #endif
 		}
@@ -91,6 +104,11 @@
 	/* Make core 0 coherent with everything */
 	write_gcr_cl_coherence(0xff);
 
+	if (mips_cm_revision() >= CM_REV_CM3) {
+		core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
+		write_gcr_bev_base(core_entry);
+	}
+
 #ifdef CONFIG_MIPS_MT_FPAFF
 	/* If we have an FPU, enroll ourselves in the FPU-full mask */
 	if (cpu_has_fpu)
@@ -213,6 +231,18 @@
 	if (mips_cpc_present()) {
 		/* Reset the core */
 		mips_cpc_lock_other(core);
+
+		if (mips_cm_revision() >= CM_REV_CM3) {
+			/* Run VP0 following the reset */
+			write_cpc_co_vp_run(0x1);
+
+			/*
+			 * Ensure that the VP_RUN register is written before the
+			 * core leaves reset.
+			 */
+			wmb();
+		}
+
 		write_cpc_co_cmd(CPC_Cx_CMD_RESET);
 
 		timeout = 100;
@@ -250,7 +280,10 @@
 
 static void remote_vpe_boot(void *dummy)
 {
-	mips_cps_boot_vpes();
+	unsigned core = current_cpu_data.core;
+	struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
+
+	mips_cps_boot_vpes(core_cfg, cpu_vpe_id(&current_cpu_data));
 }
 
 static void cps_boot_secondary(int cpu, struct task_struct *idle)
@@ -259,6 +292,7 @@
 	unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
 	struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
 	struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
+	unsigned long core_entry;
 	unsigned int remote;
 	int err;
 
@@ -276,6 +310,13 @@
 		goto out;
 	}
 
+	if (cpu_has_vp) {
+		mips_cm_lock_other(core, vpe_id);
+		core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
+		write_gcr_co_reset_base(core_entry);
+		mips_cm_unlock_other();
+	}
+
 	if (core != current_cpu_data.core) {
 		/* Boot a VPE on another powered up core */
 		for (remote = 0; remote < NR_CPUS; remote++) {
@@ -293,10 +334,10 @@
 		goto out;
 	}
 
-	BUG_ON(!cpu_has_mipsmt);
+	BUG_ON(!cpu_has_mipsmt && !cpu_has_vp);
 
 	/* Boot a VPE on this core */
-	mips_cps_boot_vpes();
+	mips_cps_boot_vpes(core_cfg, vpe_id);
 out:
 	preempt_enable();
 }
@@ -307,6 +348,17 @@
 	if (cpu_has_mipsmt)
 		dmt();
 
+	if (mips_cm_revision() >= CM_REV_CM3) {
+		unsigned ident = gic_read_local_vp_id();
+
+		/*
+		 * Ensure that our calculation of the VP ID matches up with
+		 * what the GIC reports, otherwise we'll have configured
+		 * interrupts incorrectly.
+		 */
+		BUG_ON(ident != mips_cm_vp_id(smp_processor_id()));
+	}
+
 	change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 |
 				 STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7);
 }
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 37708d9..f9d01e9 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -254,7 +254,17 @@
 	if (node && !ipidomain)
 		ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
 
-	BUG_ON(!ipidomain);
+	/*
+	 * There are systems which only use IPI domains some of the time,
+	 * depending upon configuration we don't know until runtime. An
+	 * example is Malta where we may compile in support for GIC & the
+	 * MT ASE, but run on a system which has multiple VPEs in a single
+	 * core and doesn't include a GIC. Until all IPI implementations
+	 * have been converted to use IPI domains the best we can do here
+	 * is to return & hope some other code sets up the IPIs.
+	 */
+	if (!ipidomain)
+		return 0;
 
 	call_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask);
 	BUG_ON(!call_virq);
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
index 8489c88..d6e6cf7 100644
--- a/arch/mips/kernel/spram.c
+++ b/arch/mips/kernel/spram.c
@@ -210,6 +210,7 @@
 	case CPU_P5600:
 	case CPU_QEMU_GENERIC:
 	case CPU_I6400:
+	case CPU_P6600:
 		config0 = read_c0_config();
 		/* FIXME: addresses are Malta specific */
 		if (config0 & (1<<24)) {
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index bf14da9..4a1712b 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -56,6 +56,7 @@
 #include <asm/pgtable.h>
 #include <asm/ptrace.h>
 #include <asm/sections.h>
+#include <asm/siginfo.h>
 #include <asm/tlbdebug.h>
 #include <asm/traps.h>
 #include <asm/uaccess.h>
@@ -144,7 +145,7 @@
 	if (!task)
 		task = current;
 
-	if (raw_show_trace || !__kernel_text_address(pc)) {
+	if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
 		show_raw_backtrace(sp);
 		return;
 	}
@@ -398,11 +399,8 @@
 	if (in_interrupt())
 		panic("Fatal exception in interrupt");
 
-	if (panic_on_oops) {
-		printk(KERN_EMERG "Fatal exception: panic in 5 seconds");
-		ssleep(5);
+	if (panic_on_oops)
 		panic("Fatal exception");
-	}
 
 	if (regs && kexec_should_crash(current))
 		crash_kexec(regs);
@@ -871,7 +869,7 @@
 	exception_exit(prev_state);
 }
 
-void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
 	const char *str)
 {
 	siginfo_t info = { 0 };
@@ -928,7 +926,13 @@
 	default:
 		scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
 		die_if_kernel(b, regs);
-		force_sig(SIGTRAP, current);
+		if (si_code) {
+			info.si_signo = SIGTRAP;
+			info.si_code = si_code;
+			force_sig_info(SIGTRAP, &info, current);
+		} else {
+			force_sig(SIGTRAP, current);
+		}
 	}
 }
 
@@ -1012,7 +1016,7 @@
 		break;
 	}
 
-	do_trap_or_bp(regs, bcode, "Break");
+	do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
 
 out:
 	set_fs(seg);
@@ -1054,7 +1058,7 @@
 			tcode = (opcode >> 6) & ((1 << 10) - 1);
 	}
 
-	do_trap_or_bp(regs, tcode, "Trap");
+	do_trap_or_bp(regs, tcode, 0, "Trap");
 
 out:
 	set_fs(seg);
@@ -1115,19 +1119,7 @@
 	if (unlikely(compute_return_epc(regs) < 0))
 		goto out;
 
-	if (get_isa16_mode(regs->cp0_epc)) {
-		unsigned short mmop[2] = { 0 };
-
-		if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
-			status = SIGSEGV;
-		if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
-			status = SIGSEGV;
-		opcode = mmop[0];
-		opcode = (opcode << 16) | mmop[1];
-
-		if (status < 0)
-			status = simulate_rdhwr_mm(regs, opcode);
-	} else {
+	if (!get_isa16_mode(regs->cp0_epc)) {
 		if (unlikely(get_user(opcode, epc) < 0))
 			status = SIGSEGV;
 
@@ -1142,6 +1134,18 @@
 
 		if (status < 0)
 			status = simulate_fp(regs, opcode, old_epc, old31);
+	} else if (cpu_has_mmips) {
+		unsigned short mmop[2] = { 0 };
+
+		if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
+			status = SIGSEGV;
+		if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
+			status = SIGSEGV;
+		opcode = mmop[0];
+		opcode = (opcode << 16) | mmop[1];
+
+		if (status < 0)
+			status = simulate_rdhwr_mm(regs, opcode);
 	}
 
 	if (status < 0)
@@ -1242,7 +1246,7 @@
 		err = init_fpu();
 		if (msa && !err) {
 			enable_msa();
-			_init_msa_upper();
+			init_msa_upper();
 			set_thread_flag(TIF_USEDMSA);
 			set_thread_flag(TIF_MSA_CTX_LIVE);
 		}
@@ -1305,7 +1309,7 @@
 	 */
 	prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
 	if (!prior_msa && was_fpu_owner) {
-		_init_msa_upper();
+		init_msa_upper();
 
 		goto out;
 	}
@@ -1322,7 +1326,7 @@
 		 * of each vector register such that it cannot see data left
 		 * behind by another task.
 		 */
-		_init_msa_upper();
+		init_msa_upper();
 	} else {
 		/* We need to restore the vector context. */
 		restore_msa(current);
@@ -1349,7 +1353,6 @@
 	unsigned long fcr31;
 	unsigned int cpid;
 	int status, err;
-	unsigned long __maybe_unused flags;
 	int sig;
 
 	prev_state = exception_enter();
@@ -1492,17 +1495,15 @@
  */
 asmlinkage void do_watch(struct pt_regs *regs)
 {
+	siginfo_t info = { .si_signo = SIGTRAP, .si_code = TRAP_HWBKPT };
 	enum ctx_state prev_state;
-	u32 cause;
 
 	prev_state = exception_enter();
 	/*
 	 * Clear WP (bit 22) bit of cause register so we don't loop
 	 * forever.
 	 */
-	cause = read_c0_cause();
-	cause &= ~(1 << 22);
-	write_c0_cause(cause);
+	clear_c0_cause(CAUSEF_WP);
 
 	/*
 	 * If the current thread has the watch registers loaded, save
@@ -1512,7 +1513,7 @@
 	if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
 		mips_read_watch_registers();
 		local_irq_enable();
-		force_sig(SIGTRAP, current);
+		force_sig_info(SIGTRAP, &info, current);
 	} else {
 		mips_clear_watch_registers();
 		local_irq_enable();
@@ -1639,6 +1640,7 @@
 	case CPU_P5600:
 	case CPU_QEMU_GENERIC:
 	case CPU_I6400:
+	case CPU_P6600:
 		{
 #define ERRCTL_PE	0x80000000
 #define ERRCTL_L2P	0x00800000
@@ -1769,7 +1771,8 @@
 
 	/* For the moment, report the problem and hang. */
 	if ((cpu_has_mips_r2_r6) &&
-	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
+	    (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
+	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
 		pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
 		       read_c0_ecc());
 		pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
@@ -2111,6 +2114,13 @@
 	 *  o read IntCtl.IPFDC to determine the fast debug channel interrupt
 	 */
 	if (cpu_has_mips_r2_r6) {
+		/*
+		 * We shouldn't trust a secondary core has a sane EBASE register
+		 * so use the one calculated by the boot CPU.
+		 */
+		if (!is_boot_cpu)
+			write_c0_ebase(ebase);
+
 		cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
 		cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
 		cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
@@ -2126,7 +2136,7 @@
 	}
 
 	if (!cpu_data[cpu].asid_cache)
-		cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
+		cpu_data[cpu].asid_cache = asid_first_version(cpu);
 
 	atomic_inc(&init_mm.mm_count);
 	current->active_mm = &init_mm;
@@ -2214,7 +2224,7 @@
 
 	/*
 	 * Copy the generic exception handlers to their final destination.
-	 * This will be overriden later as suitable for a particular
+	 * This will be overridden later as suitable for a particular
 	 * configuration.
 	 */
 	set_handler(0x180, &except_vec3_generic, 0x80);
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 490cea5..28b3af7 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -885,7 +885,7 @@
 {
 	union mips_instruction insn;
 	unsigned long value;
-	unsigned int res;
+	unsigned int res, preempted;
 	unsigned long origpc;
 	unsigned long orig31;
 	void __user *fault_addr = NULL;
@@ -1191,6 +1191,7 @@
 	case ldc1_op:
 	case swc1_op:
 	case sdc1_op:
+	case cop1x_op:
 		die_if_kernel("Unaligned FP access in kernel code", regs);
 		BUG_ON(!used_math());
 
@@ -1226,27 +1227,36 @@
 			if (!access_ok(VERIFY_READ, addr, sizeof(*fpr)))
 				goto sigbus;
 
-			/*
-			 * Disable preemption to avoid a race between copying
-			 * state from userland, migrating to another CPU and
-			 * updating the hardware vector register below.
-			 */
-			preempt_disable();
+			do {
+				/*
+				 * If we have live MSA context keep track of
+				 * whether we get preempted in order to avoid
+				 * the register context we load being clobbered
+				 * by the live context as it's saved during
+				 * preemption. If we don't have live context
+				 * then it can't be saved to clobber the value
+				 * we load.
+				 */
+				preempted = test_thread_flag(TIF_USEDMSA);
 
-			res = __copy_from_user_inatomic(fpr, addr,
-							sizeof(*fpr));
-			if (res)
-				goto fault;
+				res = __copy_from_user_inatomic(fpr, addr,
+								sizeof(*fpr));
+				if (res)
+					goto fault;
 
-			/*
-			 * Update the hardware register if it is in use by the
-			 * task in this quantum, in order to avoid having to
-			 * save & restore the whole vector context.
-			 */
-			if (test_thread_flag(TIF_USEDMSA))
-				write_msa_wr(wd, fpr, df);
-
-			preempt_enable();
+				/*
+				 * Update the hardware register if it is in use
+				 * by the task in this quantum, in order to
+				 * avoid having to save & restore the whole
+				 * vector context.
+				 */
+				preempt_disable();
+				if (test_thread_flag(TIF_USEDMSA)) {
+					write_msa_wr(wd, fpr, df);
+					preempted = 0;
+				}
+				preempt_enable();
+			} while (preempted);
 			break;
 
 		case msa_st_op:
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 54d653e..a82c178 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -136,6 +136,27 @@
 #ifdef CONFIG_SMP
 	PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
 #endif
+
+#ifdef CONFIG_RELOCATABLE
+	. = ALIGN(4);
+
+	.data.reloc : {
+		_relocation_start = .;
+		/*
+		 * Space for relocation table
+		 * This needs to be filled so that the
+		 * relocs tool can overwrite the content.
+		 * An invalid value is left at the start of the
+		 * section to abort relocation if the table
+		 * has not been filled in.
+		 */
+		LONG(0xFFFFFFFF);
+		FILL(0);
+		. += CONFIG_RELOCATION_TABLE_SIZE - 4;
+		_relocation_end = .;
+	}
+#endif
+
 #ifdef CONFIG_MIPS_RAW_APPENDED_DTB
 	__appended_dtb = .;
 	/* leave space for appended DTB */
diff --git a/arch/mips/kernel/watch.c b/arch/mips/kernel/watch.c
index 2a03abb..19fcab7 100644
--- a/arch/mips/kernel/watch.c
+++ b/arch/mips/kernel/watch.c
@@ -15,10 +15,9 @@
  * Install the watch registers for the current thread.	A maximum of
  * four registers are installed although the machine may have more.
  */
-void mips_install_watch_registers(void)
+void mips_install_watch_registers(struct task_struct *t)
 {
-	struct mips3264_watch_reg_state *watches =
-		&current->thread.watch.mips3264;
+	struct mips3264_watch_reg_state *watches = &t->thread.watch.mips3264;
 	switch (current_cpu_data.watch_reg_use_cnt) {
 	default:
 		BUG();
@@ -26,16 +25,20 @@
 		write_c0_watchlo3(watches->watchlo[3]);
 		/* Write 1 to the I, R, and W bits to clear them, and
 		   1 to G so all ASIDs are trapped. */
-		write_c0_watchhi3(0x40000007 | watches->watchhi[3]);
+		write_c0_watchhi3(MIPS_WATCHHI_G | MIPS_WATCHHI_IRW |
+				  watches->watchhi[3]);
 	case 3:
 		write_c0_watchlo2(watches->watchlo[2]);
-		write_c0_watchhi2(0x40000007 | watches->watchhi[2]);
+		write_c0_watchhi2(MIPS_WATCHHI_G | MIPS_WATCHHI_IRW |
+				  watches->watchhi[2]);
 	case 2:
 		write_c0_watchlo1(watches->watchlo[1]);
-		write_c0_watchhi1(0x40000007 | watches->watchhi[1]);
+		write_c0_watchhi1(MIPS_WATCHHI_G | MIPS_WATCHHI_IRW |
+				  watches->watchhi[1]);
 	case 1:
 		write_c0_watchlo0(watches->watchlo[0]);
-		write_c0_watchhi0(0x40000007 | watches->watchhi[0]);
+		write_c0_watchhi0(MIPS_WATCHHI_G | MIPS_WATCHHI_IRW |
+				  watches->watchhi[0]);
 	}
 }
 
@@ -52,22 +55,26 @@
 	default:
 		BUG();
 	case 4:
-		watches->watchhi[3] = (read_c0_watchhi3() & 0x0fff);
+		watches->watchhi[3] = (read_c0_watchhi3() &
+				       (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW));
 	case 3:
-		watches->watchhi[2] = (read_c0_watchhi2() & 0x0fff);
+		watches->watchhi[2] = (read_c0_watchhi2() &
+				       (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW));
 	case 2:
-		watches->watchhi[1] = (read_c0_watchhi1() & 0x0fff);
+		watches->watchhi[1] = (read_c0_watchhi1() &
+				       (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW));
 	case 1:
-		watches->watchhi[0] = (read_c0_watchhi0() & 0x0fff);
+		watches->watchhi[0] = (read_c0_watchhi0() &
+				       (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW));
 	}
 	if (current_cpu_data.watch_reg_use_cnt == 1 &&
-	    (watches->watchhi[0] & 7) == 0) {
+	    (watches->watchhi[0] & MIPS_WATCHHI_IRW) == 0) {
 		/* Pathological case of release 1 architecture that
 		 * doesn't set the condition bits.  We assume that
 		 * since we got here, the watch condition was met and
 		 * signal that the conditions requested in watchlo
 		 * were met.  */
-		watches->watchhi[0] |= (watches->watchlo[0] & 7);
+		watches->watchhi[0] |= (watches->watchlo[0] & MIPS_WATCHHI_IRW);
 	}
  }
 
@@ -110,86 +117,86 @@
 	 * Check which of the I,R and W bits are supported, then
 	 * disable the register.
 	 */
-	write_c0_watchlo0(7);
+	write_c0_watchlo0(MIPS_WATCHLO_IRW);
 	back_to_back_c0_hazard();
 	t = read_c0_watchlo0();
 	write_c0_watchlo0(0);
-	c->watch_reg_masks[0] = t & 7;
+	c->watch_reg_masks[0] = t & MIPS_WATCHLO_IRW;
 
 	/* Write the mask bits and read them back to determine which
 	 * can be used. */
 	c->watch_reg_count = 1;
 	c->watch_reg_use_cnt = 1;
 	t = read_c0_watchhi0();
-	write_c0_watchhi0(t | 0xff8);
+	write_c0_watchhi0(t | MIPS_WATCHHI_MASK);
 	back_to_back_c0_hazard();
 	t = read_c0_watchhi0();
-	c->watch_reg_masks[0] |= (t & 0xff8);
-	if ((t & 0x80000000) == 0)
+	c->watch_reg_masks[0] |= (t & MIPS_WATCHHI_MASK);
+	if ((t & MIPS_WATCHHI_M) == 0)
 		return;
 
-	write_c0_watchlo1(7);
+	write_c0_watchlo1(MIPS_WATCHLO_IRW);
 	back_to_back_c0_hazard();
 	t = read_c0_watchlo1();
 	write_c0_watchlo1(0);
-	c->watch_reg_masks[1] = t & 7;
+	c->watch_reg_masks[1] = t & MIPS_WATCHLO_IRW;
 
 	c->watch_reg_count = 2;
 	c->watch_reg_use_cnt = 2;
 	t = read_c0_watchhi1();
-	write_c0_watchhi1(t | 0xff8);
+	write_c0_watchhi1(t | MIPS_WATCHHI_MASK);
 	back_to_back_c0_hazard();
 	t = read_c0_watchhi1();
-	c->watch_reg_masks[1] |= (t & 0xff8);
-	if ((t & 0x80000000) == 0)
+	c->watch_reg_masks[1] |= (t & MIPS_WATCHHI_MASK);
+	if ((t & MIPS_WATCHHI_M) == 0)
 		return;
 
-	write_c0_watchlo2(7);
+	write_c0_watchlo2(MIPS_WATCHLO_IRW);
 	back_to_back_c0_hazard();
 	t = read_c0_watchlo2();
 	write_c0_watchlo2(0);
-	c->watch_reg_masks[2] = t & 7;
+	c->watch_reg_masks[2] = t & MIPS_WATCHLO_IRW;
 
 	c->watch_reg_count = 3;
 	c->watch_reg_use_cnt = 3;
 	t = read_c0_watchhi2();
-	write_c0_watchhi2(t | 0xff8);
+	write_c0_watchhi2(t | MIPS_WATCHHI_MASK);
 	back_to_back_c0_hazard();
 	t = read_c0_watchhi2();
-	c->watch_reg_masks[2] |= (t & 0xff8);
-	if ((t & 0x80000000) == 0)
+	c->watch_reg_masks[2] |= (t & MIPS_WATCHHI_MASK);
+	if ((t & MIPS_WATCHHI_M) == 0)
 		return;
 
-	write_c0_watchlo3(7);
+	write_c0_watchlo3(MIPS_WATCHLO_IRW);
 	back_to_back_c0_hazard();
 	t = read_c0_watchlo3();
 	write_c0_watchlo3(0);
-	c->watch_reg_masks[3] = t & 7;
+	c->watch_reg_masks[3] = t & MIPS_WATCHLO_IRW;
 
 	c->watch_reg_count = 4;
 	c->watch_reg_use_cnt = 4;
 	t = read_c0_watchhi3();
-	write_c0_watchhi3(t | 0xff8);
+	write_c0_watchhi3(t | MIPS_WATCHHI_MASK);
 	back_to_back_c0_hazard();
 	t = read_c0_watchhi3();
-	c->watch_reg_masks[3] |= (t & 0xff8);
-	if ((t & 0x80000000) == 0)
+	c->watch_reg_masks[3] |= (t & MIPS_WATCHHI_MASK);
+	if ((t & MIPS_WATCHHI_M) == 0)
 		return;
 
 	/* We use at most 4, but probe and report up to 8. */
 	c->watch_reg_count = 5;
 	t = read_c0_watchhi4();
-	if ((t & 0x80000000) == 0)
+	if ((t & MIPS_WATCHHI_M) == 0)
 		return;
 
 	c->watch_reg_count = 6;
 	t = read_c0_watchhi5();
-	if ((t & 0x80000000) == 0)
+	if ((t & MIPS_WATCHHI_M) == 0)
 		return;
 
 	c->watch_reg_count = 7;
 	t = read_c0_watchhi6();
-	if ((t & 0x80000000) == 0)
+	if ((t & MIPS_WATCHHI_M) == 0)
 		return;
 
 	c->watch_reg_count = 8;
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index b37954c..396df6e 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -302,12 +302,31 @@
  */
 static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
 {
-	ktime_t expires;
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	ktime_t expires, threshold;
+	uint32_t count, compare;
 	int running;
 
-	/* Is the hrtimer pending? */
+	/* Calculate the biased and scaled guest CP0_Count */
+	count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
+	compare = kvm_read_c0_guest_compare(cop0);
+
+	/*
+	 * Find whether CP0_Count has reached the closest timer interrupt. If
+	 * not, we shouldn't inject it.
+	 */
+	if ((int32_t)(count - compare) < 0)
+		return count;
+
+	/*
+	 * The CP0_Count we're going to return has already reached the closest
+	 * timer interrupt. Quickly check if it really is a new interrupt by
+	 * looking at whether the interval until the hrtimer expiry time is
+	 * less than 1/4 of the timer period.
+	 */
 	expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
-	if (ktime_compare(now, expires) >= 0) {
+	threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
+	if (ktime_before(expires, threshold)) {
 		/*
 		 * Cancel it while we handle it so there's no chance of
 		 * interference with the timeout handler.
@@ -329,8 +348,7 @@
 		}
 	}
 
-	/* Return the biased and scaled guest CP0_Count */
-	return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
+	return count;
 }
 
 /**
@@ -420,32 +438,6 @@
 }
 
 /**
- * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
- * @vcpu:	Virtual CPU.
- *
- * Recalculates and updates the expiry time of the hrtimer. This can be used
- * after timer parameters have been altered which do not depend on the time that
- * the change occurs (in those cases kvm_mips_freeze_hrtimer() and
- * kvm_mips_resume_hrtimer() are used directly).
- *
- * It is guaranteed that no timer interrupts will be lost in the process.
- *
- * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
- */
-static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
-{
-	ktime_t now;
-	uint32_t count;
-
-	/*
-	 * freeze_hrtimer takes care of a timer interrupts <= count, and
-	 * resume_hrtimer the hrtimer takes care of a timer interrupts > count.
-	 */
-	now = kvm_mips_freeze_hrtimer(vcpu, &count);
-	kvm_mips_resume_hrtimer(vcpu, now, count);
-}
-
-/**
  * kvm_mips_write_count() - Modify the count and update timer.
  * @vcpu:	Virtual CPU.
  * @count:	Guest CP0_Count value to set.
@@ -540,23 +532,42 @@
  * kvm_mips_write_compare() - Modify compare and update timer.
  * @vcpu:	Virtual CPU.
  * @compare:	New CP0_Compare value.
+ * @ack:	Whether to acknowledge timer interrupt.
  *
  * Update CP0_Compare to a new value and update the timeout.
+ * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
+ * any pending timer interrupt is preserved.
  */
-void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
+void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack)
 {
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	int dc;
+	u32 old_compare = kvm_read_c0_guest_compare(cop0);
+	ktime_t now;
+	uint32_t count;
 
 	/* if unchanged, must just be an ack */
-	if (kvm_read_c0_guest_compare(cop0) == compare)
+	if (old_compare == compare) {
+		if (!ack)
+			return;
+		kvm_mips_callbacks->dequeue_timer_int(vcpu);
+		kvm_write_c0_guest_compare(cop0, compare);
 		return;
+	}
 
-	/* Update compare */
+	/* freeze_hrtimer() takes care of timer interrupts <= count */
+	dc = kvm_mips_count_disabled(vcpu);
+	if (!dc)
+		now = kvm_mips_freeze_hrtimer(vcpu, &count);
+
+	if (ack)
+		kvm_mips_callbacks->dequeue_timer_int(vcpu);
+
 	kvm_write_c0_guest_compare(cop0, compare);
 
-	/* Update timeout if count enabled */
-	if (!kvm_mips_count_disabled(vcpu))
-		kvm_mips_update_hrtimer(vcpu);
+	/* resume_hrtimer() takes care of timer interrupts > count */
+	if (!dc)
+		kvm_mips_resume_hrtimer(vcpu, now, count);
 }
 
 /**
@@ -1068,15 +1079,15 @@
 					kvm_read_c0_guest_ebase(cop0));
 			} else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
 				uint32_t nasid =
-					vcpu->arch.gprs[rt] & ASID_MASK;
+					vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID;
 				if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
 				    ((kvm_read_c0_guest_entryhi(cop0) &
-				      ASID_MASK) != nasid)) {
+				      KVM_ENTRYHI_ASID) != nasid)) {
 					kvm_debug("MTCz, change ASID from %#lx to %#lx\n",
 						kvm_read_c0_guest_entryhi(cop0)
-						& ASID_MASK,
+						& KVM_ENTRYHI_ASID,
 						vcpu->arch.gprs[rt]
-						& ASID_MASK);
+						& KVM_ENTRYHI_ASID);
 
 					/* Blow away the shadow host TLBs */
 					kvm_mips_flush_host_tlb(1);
@@ -1095,9 +1106,9 @@
 
 				/* If we are writing to COMPARE */
 				/* Clear pending timer interrupt, if any */
-				kvm_mips_callbacks->dequeue_timer_int(vcpu);
 				kvm_mips_write_compare(vcpu,
-						       vcpu->arch.gprs[rt]);
+						       vcpu->arch.gprs[rt],
+						       true);
 			} else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
 				unsigned int old_val, val, change;
 
@@ -1620,7 +1631,7 @@
 		 */
 		index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
 						  (kvm_read_c0_guest_entryhi
-						   (cop0) & ASID_MASK));
+						   (cop0) & KVM_ENTRYHI_ASID));
 
 		if (index < 0) {
 			vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
@@ -1786,7 +1797,7 @@
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
 	struct kvm_vcpu_arch *arch = &vcpu->arch;
 	unsigned long entryhi = (vcpu->arch.  host_cp0_badvaddr & VPN2_MASK) |
-				(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+			(kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
 
 	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
 		/* save old pc */
@@ -1833,7 +1844,7 @@
 	struct kvm_vcpu_arch *arch = &vcpu->arch;
 	unsigned long entryhi =
 		(vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
-		(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+		(kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
 
 	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
 		/* save old pc */
@@ -1878,7 +1889,7 @@
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
 	struct kvm_vcpu_arch *arch = &vcpu->arch;
 	unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
-				(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+			(kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
 
 	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
 		/* save old pc */
@@ -1922,7 +1933,7 @@
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
 	struct kvm_vcpu_arch *arch = &vcpu->arch;
 	unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
-		(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+		(kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
 
 	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
 		/* save old pc */
@@ -1967,7 +1978,7 @@
 #ifdef DEBUG
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
 	unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
-				(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+			(kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
 	int index;
 
 	/* If address not in the guest TLB, then we are in trouble */
@@ -1994,7 +2005,7 @@
 {
 	struct mips_coproc *cop0 = vcpu->arch.cop0;
 	unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
-				(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+			(kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
 	struct kvm_vcpu_arch *arch = &vcpu->arch;
 
 	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
@@ -2569,7 +2580,8 @@
 	 */
 	index = kvm_mips_guest_tlb_lookup(vcpu,
 		      (va & VPN2_MASK) |
-		      (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & ASID_MASK));
+		      (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
+		       KVM_ENTRYHI_ASID));
 	if (index < 0) {
 		if (exccode == EXCCODE_TLBL) {
 			er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
index 81687ab..3ef0300 100644
--- a/arch/mips/kvm/locore.S
+++ b/arch/mips/kvm/locore.S
@@ -32,7 +32,6 @@
     EXPORT(x);
 
 /* Overload, Danger Will Robinson!! */
-#define PT_HOST_ASID        PT_BVADDR
 #define PT_HOST_USERLOCAL   PT_EPC
 
 #define CP0_DDATA_LO        $28,3
@@ -49,45 +48,18 @@
  * a1: vcpu
  */
 	.set	noreorder
-	.set	noat
 
 FEXPORT(__kvm_mips_vcpu_run)
 	/* k0/k1 not being used in host kernel context */
 	INT_ADDIU k1, sp, -PT_SIZE
-	LONG_S	$0, PT_R0(k1)
-	LONG_S	$1, PT_R1(k1)
-	LONG_S	$2, PT_R2(k1)
-	LONG_S	$3, PT_R3(k1)
-
-	LONG_S	$4, PT_R4(k1)
-	LONG_S	$5, PT_R5(k1)
-	LONG_S	$6, PT_R6(k1)
-	LONG_S	$7, PT_R7(k1)
-
-	LONG_S	$8,  PT_R8(k1)
-	LONG_S	$9,  PT_R9(k1)
-	LONG_S	$10, PT_R10(k1)
-	LONG_S	$11, PT_R11(k1)
-	LONG_S	$12, PT_R12(k1)
-	LONG_S	$13, PT_R13(k1)
-	LONG_S	$14, PT_R14(k1)
-	LONG_S	$15, PT_R15(k1)
 	LONG_S	$16, PT_R16(k1)
 	LONG_S	$17, PT_R17(k1)
-
 	LONG_S	$18, PT_R18(k1)
 	LONG_S	$19, PT_R19(k1)
 	LONG_S	$20, PT_R20(k1)
 	LONG_S	$21, PT_R21(k1)
 	LONG_S	$22, PT_R22(k1)
 	LONG_S	$23, PT_R23(k1)
-	LONG_S	$24, PT_R24(k1)
-	LONG_S	$25, PT_R25(k1)
-
-	/*
-	 * XXXKYMA k0/k1 not saved, not being used if we got here through
-	 * an ioctl()
-	 */
 
 	LONG_S	$28, PT_R28(k1)
 	LONG_S	$29, PT_R29(k1)
@@ -104,11 +76,6 @@
 	mfc0	v0, CP0_STATUS
 	LONG_S	v0, PT_STATUS(k1)
 
-	/* Save host ASID, shove it into the BVADDR location */
-	mfc0	v1, CP0_ENTRYHI
-	andi	v1, 0xff
-	LONG_S	v1, PT_HOST_ASID(k1)
-
 	/* Save DDATA_LO, will be used to store pointer to vcpu */
 	mfc0	v1, CP0_DDATA_LO
 	LONG_S	v1, PT_HOST_USERLOCAL(k1)
@@ -170,13 +137,21 @@
 	INT_SLL	t2, t2, 2                   /* x4 */
 	REG_ADDU t3, t1, t2
 	LONG_L	k0, (t3)
-	andi	k0, k0, 0xff
+#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
+	li	t3, CPUINFO_SIZE/4
+	mul	t2, t2, t3		/* x sizeof(struct cpuinfo_mips)/4 */
+	LONG_L	t2, (cpu_data + CPUINFO_ASID_MASK)(t2)
+	and	k0, k0, t2
+#else
+	andi	k0, k0, MIPS_ENTRYHI_ASID
+#endif
 	mtc0	k0, CP0_ENTRYHI
 	ehb
 
 	/* Disable RDHWR access */
 	mtc0	zero, CP0_HWRENA
 
+	.set	noat
 	/* Now load up the Guest Context from VCPU */
 	LONG_L	$1, VCPU_R1(k1)
 	LONG_L	$2, VCPU_R2(k1)
@@ -288,6 +263,8 @@
 	LONG_S	$30, VCPU_R30(k1)
 	LONG_S	$31, VCPU_R31(k1)
 
+	.set at
+
 	/* We need to save hi/lo and restore them on the way out */
 	mfhi	t0
 	LONG_S	t0, VCPU_HI(k1)
@@ -339,9 +316,7 @@
 	/* load up the host EBASE */
 	mfc0	v0, CP0_STATUS
 
-	.set	at
 	or	k0, v0, ST0_BEV
-	.set	noat
 
 	mtc0	k0, CP0_STATUS
 	ehb
@@ -353,7 +328,6 @@
 	 * If FPU is enabled, save FCR31 and clear it so that later ctc1's don't
 	 * trigger FPE for pending exceptions.
 	 */
-	.set	at
 	and	v1, v0, ST0_CU1
 	beqz	v1, 1f
 	 nop
@@ -363,7 +337,6 @@
 	sw	t0, VCPU_FCR31(k1)
 	ctc1	zero,fcr31
 	.set	pop
-	.set	noat
 1:
 
 #ifdef CONFIG_CPU_HAS_MSA
@@ -386,10 +359,8 @@
 #endif
 
 	/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
-	.set	at
 	and	v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
 	or	v0, v0, ST0_CU0
-	.set	noat
 	mtc0	v0, CP0_STATUS
 	ehb
 
@@ -456,18 +427,14 @@
 
 	/* Switch EBASE back to the one used by KVM */
 	mfc0	v1, CP0_STATUS
-	.set	at
 	or	k0, v1, ST0_BEV
-	.set	noat
 	mtc0	k0, CP0_STATUS
 	ehb
 	mtc0	t0, CP0_EBASE
 
 	/* Setup status register for running guest in UM */
-	.set	at
 	or	v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
 	and	v1, v1, ~(ST0_CU0 | ST0_MX)
-	.set	noat
 	mtc0	v1, CP0_STATUS
 	ehb
 
@@ -489,13 +456,21 @@
 	INT_SLL	t2, t2, 2		/* x4 */
 	REG_ADDU t3, t1, t2
 	LONG_L	k0, (t3)
-	andi	k0, k0, 0xff
+#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
+	li	t3, CPUINFO_SIZE/4
+	mul	t2, t2, t3		/* x sizeof(struct cpuinfo_mips)/4 */
+	LONG_L	t2, (cpu_data + CPUINFO_ASID_MASK)(t2)
+	and	k0, k0, t2
+#else
+	andi	k0, k0, MIPS_ENTRYHI_ASID
+#endif
 	mtc0	k0, CP0_ENTRYHI
 	ehb
 
 	/* Disable RDHWR access */
 	mtc0	zero, CP0_HWRENA
 
+	.set	noat
 	/* load the guest context from VCPU and return */
 	LONG_L	$0, VCPU_R0(k1)
 	LONG_L	$1, VCPU_R1(k1)
@@ -541,6 +516,7 @@
 	LONG_L	k1, VCPU_R27(k1)
 
 	eret
+	.set	at
 
 __kvm_mips_return_to_host:
 	/* EBASE is already pointing to Linux */
@@ -551,16 +527,6 @@
 	LONG_L	k0, PT_HOST_USERLOCAL(k1)
 	mtc0	k0, CP0_DDATA_LO
 
-	/* Restore host ASID */
-	LONG_L	k0, PT_HOST_ASID(sp)
-	andi	k0, 0xff
-	mtc0	k0,CP0_ENTRYHI
-	ehb
-
-	/* Load context saved on the host stack */
-	LONG_L	$0, PT_R0(k1)
-	LONG_L	$1, PT_R1(k1)
-
 	/*
 	 * r2/v0 is the return code, shift it down by 2 (arithmetic)
 	 * to recover the err code
@@ -568,19 +534,7 @@
 	INT_SRA	k0, v0, 2
 	move	$2, k0
 
-	LONG_L	$3, PT_R3(k1)
-	LONG_L	$4, PT_R4(k1)
-	LONG_L	$5, PT_R5(k1)
-	LONG_L	$6, PT_R6(k1)
-	LONG_L	$7, PT_R7(k1)
-	LONG_L	$8, PT_R8(k1)
-	LONG_L	$9, PT_R9(k1)
-	LONG_L	$10, PT_R10(k1)
-	LONG_L	$11, PT_R11(k1)
-	LONG_L	$12, PT_R12(k1)
-	LONG_L	$13, PT_R13(k1)
-	LONG_L	$14, PT_R14(k1)
-	LONG_L	$15, PT_R15(k1)
+	/* Load context saved on the host stack */
 	LONG_L	$16, PT_R16(k1)
 	LONG_L	$17, PT_R17(k1)
 	LONG_L	$18, PT_R18(k1)
@@ -589,10 +543,6 @@
 	LONG_L	$21, PT_R21(k1)
 	LONG_L	$22, PT_R22(k1)
 	LONG_L	$23, PT_R23(k1)
-	LONG_L	$24, PT_R24(k1)
-	LONG_L	$25, PT_R25(k1)
-
-	/* Host k0/k1 were not saved */
 
 	LONG_L	$28, PT_R28(k1)
 	LONG_L	$29, PT_R29(k1)
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 70ef1a4..dc052fb 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -56,6 +56,7 @@
 	{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
 	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
+	{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU },
 	{ "halt_wakeup",  VCPU_STAT(halt_wakeup),	 KVM_STAT_VCPU },
 	{NULL}
 };
@@ -1079,7 +1080,8 @@
 		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
 		break;
 	case KVM_CAP_MIPS_FPU:
-		r = !!cpu_has_fpu;
+		/* We don't handle systems with inconsistent cpu_has_fpu */
+		r = !!raw_cpu_has_fpu;
 		break;
 	case KVM_CAP_MIPS_MSA:
 		/*
@@ -1555,8 +1557,10 @@
 
 		/* Disable MSA & FPU */
 		disable_msa();
-		if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
+		if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
 			clear_c0_status(ST0_CU1 | ST0_FR);
+			disable_fpu_hazard();
+		}
 		vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA);
 	} else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
 		set_c0_status(ST0_CU1);
@@ -1567,6 +1571,7 @@
 
 		/* Disable FPU */
 		clear_c0_status(ST0_CU1 | ST0_FR);
+		disable_fpu_hazard();
 	}
 	preempt_enable();
 }
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index a08c439..ed021ae 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -49,12 +49,18 @@
 
 uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
 {
-	return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
+	int cpu = smp_processor_id();
+
+	return vcpu->arch.guest_kernel_asid[cpu] &
+			cpu_asid_mask(&cpu_data[cpu]);
 }
 
 uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
 {
-	return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
+	int cpu = smp_processor_id();
+
+	return vcpu->arch.guest_user_asid[cpu] &
+			cpu_asid_mask(&cpu_data[cpu]);
 }
 
 inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
@@ -78,7 +84,8 @@
 	old_pagemask = read_c0_pagemask();
 
 	kvm_info("HOST TLBs:\n");
-	kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
+	kvm_info("ASID: %#lx\n", read_c0_entryhi() &
+		 cpu_asid_mask(&current_cpu_data));
 
 	for (i = 0; i < current_cpu_data.tlbsize; i++) {
 		write_c0_index(i);
@@ -268,6 +275,7 @@
 	int even;
 	struct kvm *kvm = vcpu->kvm;
 	const int flush_dcache_mask = 0;
+	int ret;
 
 	if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
 		kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
@@ -299,14 +307,18 @@
 		pfn1 = kvm->arch.guest_pmap[gfn];
 	}
 
-	entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
 	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
 		   (1 << 2) | (0x1 << 1);
 	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
 		   (1 << 2) | (0x1 << 1);
 
-	return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
-				       flush_dcache_mask);
+	preempt_disable();
+	entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
+	ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+				      flush_dcache_mask);
+	preempt_enable();
+
+	return ret;
 }
 EXPORT_SYMBOL_GPL(kvm_mips_handle_kseg0_tlb_fault);
 
@@ -361,6 +373,7 @@
 	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
 	struct kvm *kvm = vcpu->kvm;
 	kvm_pfn_t pfn0, pfn1;
+	int ret;
 
 	if ((tlb->tlb_hi & VPN2_MASK) == 0) {
 		pfn0 = 0;
@@ -387,9 +400,6 @@
 		*hpa1 = pfn1 << PAGE_SHIFT;
 
 	/* Get attributes from the Guest TLB */
-	entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
-					       kvm_mips_get_kernel_asid(vcpu) :
-					       kvm_mips_get_user_asid(vcpu));
 	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
 		   (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
 	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
@@ -398,8 +408,15 @@
 	kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
 		  tlb->tlb_lo0, tlb->tlb_lo1);
 
-	return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
-				       tlb->tlb_mask);
+	preempt_disable();
+	entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
+					       kvm_mips_get_kernel_asid(vcpu) :
+					       kvm_mips_get_user_asid(vcpu));
+	ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+				      tlb->tlb_mask);
+	preempt_enable();
+
+	return ret;
 }
 EXPORT_SYMBOL_GPL(kvm_mips_handle_mapped_seg_tlb_fault);
 
@@ -564,15 +581,15 @@
 {
 	unsigned long asid = asid_cache(cpu);
 
-	asid += ASID_INC;
-	if (!(asid & ASID_MASK)) {
+	asid += cpu_asid_inc();
+	if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
 		if (cpu_has_vtag_icache)
 			flush_icache_all();
 
 		kvm_local_flush_tlb_all();      /* start new asid cycle */
 
 		if (!asid)      /* fix version if needed */
-			asid = ASID_FIRST_VERSION;
+			asid = asid_first_version(cpu);
 	}
 
 	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
@@ -627,17 +644,18 @@
 /* Restore ASID once we are scheduled back after preemption */
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
+	unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
 	unsigned long flags;
 	int newasid = 0;
 
 	kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
 
-	/* Alocate new kernel and user ASIDs if needed */
+	/* Allocate new kernel and user ASIDs if needed */
 
 	local_irq_save(flags);
 
 	if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
-							ASID_VERSION_MASK) {
+						asid_version_mask(cpu)) {
 		kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
 		vcpu->arch.guest_kernel_asid[cpu] =
 		    vcpu->arch.guest_kernel_mm.context.asid[cpu];
@@ -672,7 +690,7 @@
 		 */
 		if (current->flags & PF_VCPU) {
 			write_c0_entryhi(vcpu->arch.
-					 preempt_entryhi & ASID_MASK);
+					 preempt_entryhi & asid_mask);
 			ehb();
 		}
 	} else {
@@ -687,11 +705,11 @@
 			if (KVM_GUEST_KERNEL_MODE(vcpu))
 				write_c0_entryhi(vcpu->arch.
 						 guest_kernel_asid[cpu] &
-						 ASID_MASK);
+						 asid_mask);
 			else
 				write_c0_entryhi(vcpu->arch.
 						 guest_user_asid[cpu] &
-						 ASID_MASK);
+						 asid_mask);
 			ehb();
 		}
 	}
@@ -721,7 +739,7 @@
 	kvm_mips_callbacks->vcpu_get_regs(vcpu);
 
 	if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
-	     ASID_VERSION_MASK)) {
+	     asid_version_mask(cpu))) {
 		kvm_debug("%s: Dropping MMU Context:  %#lx\n", __func__,
 			  cpu_context(cpu, current->mm));
 		drop_mmu_context(current->mm, cpu);
@@ -748,7 +766,8 @@
 			inst = *(opc);
 		} else {
 			vpn2 = (unsigned long) opc & VPN2_MASK;
-			asid = kvm_read_c0_guest_entryhi(cop0) & ASID_MASK;
+			asid = kvm_read_c0_guest_entryhi(cop0) &
+						KVM_ENTRYHI_ASID;
 			index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
 			if (index < 0) {
 				kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index ad98800..6ba0faf 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -500,12 +500,13 @@
 	kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
 
 	/*
-	 * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5)
+	 * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
 	 */
 	kvm_write_c0_guest_intctl(cop0, 0xFC000000);
 
 	/* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
-	kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
+	kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 |
+				       (vcpu_id & MIPS_EBASE_CPUNUM));
 
 	return 0;
 }
@@ -546,7 +547,7 @@
 		kvm_mips_write_count(vcpu, v);
 		break;
 	case KVM_REG_MIPS_CP0_COMPARE:
-		kvm_mips_write_compare(vcpu, v);
+		kvm_mips_write_compare(vcpu, v, false);
 		break;
 	case KVM_REG_MIPS_CP0_CAUSE:
 		/*
diff --git a/arch/mips/lantiq/Kconfig b/arch/mips/lantiq/Kconfig
index e10d333..177769d 100644
--- a/arch/mips/lantiq/Kconfig
+++ b/arch/mips/lantiq/Kconfig
@@ -25,7 +25,17 @@
 endchoice
 
 choice
-	prompt "Devicetree"
+	prompt "Built-in device tree"
+	help
+	  Legacy bootloaders do not pass a DTB pointer to the kernel, so
+	  if a "wrapper" is not being used, the kernel will need to include
+	  a device tree that matches the target board.
+
+	  The builtin DTB will only be used if the firmware does not supply
+	  a valid DTB.
+
+config LANTIQ_DT_NONE
+	bool "None"
 
 config DT_EASY50712
 	bool "Easy50712"
diff --git a/arch/mips/lantiq/Makefile b/arch/mips/lantiq/Makefile
index 690257a..2718652 100644
--- a/arch/mips/lantiq/Makefile
+++ b/arch/mips/lantiq/Makefile
@@ -1,4 +1,4 @@
-# Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+# Copyright (C) 2010 John Crispin <john@phrozen.org>
 #
 # This program is free software; you can redistribute it and/or modify it
 # under the terms of the GNU General Public License version 2 as published
diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c
index a0706fd..149f051 100644
--- a/arch/mips/lantiq/clk.c
+++ b/arch/mips/lantiq/clk.c
@@ -4,7 +4,7 @@
  *  by the Free Software Foundation.
  *
  * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
  */
 #include <linux/io.h>
 #include <linux/export.h>
diff --git a/arch/mips/lantiq/clk.h b/arch/mips/lantiq/clk.h
index 7376ce8..e806e04 100644
--- a/arch/mips/lantiq/clk.h
+++ b/arch/mips/lantiq/clk.h
@@ -3,7 +3,7 @@
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
  *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
  */
 
 #ifndef _LTQ_CLK_H__
diff --git a/arch/mips/lantiq/early_printk.c b/arch/mips/lantiq/early_printk.c
index 9b28d09..44bccae 100644
--- a/arch/mips/lantiq/early_printk.c
+++ b/arch/mips/lantiq/early_printk.c
@@ -3,7 +3,7 @@
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
  *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ *  Copyright (C) 2010 John Crispin <john@phrozen.org>
  */
 
 #include <linux/cpu.h>
diff --git a/arch/mips/lantiq/falcon/prom.c b/arch/mips/lantiq/falcon/prom.c
index aa94979..75315c0 100644
--- a/arch/mips/lantiq/falcon/prom.c
+++ b/arch/mips/lantiq/falcon/prom.c
@@ -4,7 +4,7 @@
  * by the Free Software Foundation.
  *
  * Copyright (C) 2012 Thomas Langer <thomas.langer@lantiq.com>
- * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
  */
 
 #include <linux/kernel.h>
diff --git a/arch/mips/lantiq/falcon/reset.c b/arch/mips/lantiq/falcon/reset.c
index 5682482..7a535d7 100644
--- a/arch/mips/lantiq/falcon/reset.c
+++ b/arch/mips/lantiq/falcon/reset.c
@@ -4,7 +4,7 @@
  * by the Free Software Foundation.
  *
  * Copyright (C) 2012 Thomas Langer <thomas.langer@lantiq.com>
- * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
  */
 
 #include <linux/init.h>
diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c
index 7edcd49..2a1b302 100644
--- a/arch/mips/lantiq/falcon/sysctrl.c
+++ b/arch/mips/lantiq/falcon/sysctrl.c
@@ -4,7 +4,7 @@
  * by the Free Software Foundation.
  *
  * Copyright (C) 2011 Thomas Langer <thomas.langer@lantiq.com>
- * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2011 John Crispin <john@phrozen.org>
  */
 
 #include <linux/ioport.h>
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 2e7f60c..ff17669e 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -3,7 +3,7 @@
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
  *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
  * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
  */
 
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
index 297bcaa..5f693ac7 100644
--- a/arch/mips/lantiq/prom.c
+++ b/arch/mips/lantiq/prom.c
@@ -3,7 +3,7 @@
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
  *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
  */
 
 #include <linux/export.h>
@@ -65,6 +65,8 @@
 
 void __init plat_mem_setup(void)
 {
+	void *dtb;
+
 	ioport_resource.start = IOPORT_RESOURCE_START;
 	ioport_resource.end = IOPORT_RESOURCE_END;
 	iomem_resource.start = IOMEM_RESOURCE_START;
@@ -72,11 +74,18 @@
 
 	set_io_port_base((unsigned long) KSEG1);
 
+	if (fw_arg0 == -2) /* UHI interface */
+		dtb = (void *)fw_arg1;
+	else if (__dtb_start != __dtb_end)
+		dtb = (void *)__dtb_start;
+	else
+		panic("no dtb found");
+
 	/*
-	 * Load the builtin devicetree. This causes the chosen node to be
+	 * Load the devicetree. This causes the chosen node to be
 	 * parsed resulting in our memory appearing
 	 */
-	__dt_setup_arch(__dtb_start);
+	__dt_setup_arch(dtb);
 }
 
 void __init device_tree_init(void)
diff --git a/arch/mips/lantiq/prom.h b/arch/mips/lantiq/prom.h
index bfd2d58..4b6576c 100644
--- a/arch/mips/lantiq/prom.h
+++ b/arch/mips/lantiq/prom.h
@@ -3,7 +3,7 @@
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
  *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
  */
 
 #ifndef _LTQ_PROM_H__
diff --git a/arch/mips/lantiq/xway/clk.c b/arch/mips/lantiq/xway/clk.c
index 07f6d5b..41fc30d 100644
--- a/arch/mips/lantiq/xway/clk.c
+++ b/arch/mips/lantiq/xway/clk.c
@@ -3,7 +3,7 @@
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
  *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ *  Copyright (C) 2010 John Crispin <john@phrozen.org>
  *  Copyright (C) 2013-2015 Lantiq Beteiligungs-GmbH & Co.KG
  */
 
diff --git a/arch/mips/lantiq/xway/dcdc.c b/arch/mips/lantiq/xway/dcdc.c
index ae8e930..08f7aba 100644
--- a/arch/mips/lantiq/xway/dcdc.c
+++ b/arch/mips/lantiq/xway/dcdc.c
@@ -3,7 +3,7 @@
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
  *
- *  Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ *  Copyright (C) 2012 John Crispin <john@phrozen.org>
  *  Copyright (C) 2010 Sameer Ahmad, Lantiq GmbH
  */
 
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index 34a116e..cef8117 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -12,7 +12,7 @@
  *   along with this program; if not, write to the Free Software
  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
  *
- *   Copyright (C) 2011 John Crispin <blogic@openwrt.org>
+ *   Copyright (C) 2011 John Crispin <john@phrozen.org>
  */
 
 #include <linux/init.h>
diff --git a/arch/mips/lantiq/xway/gptu.c b/arch/mips/lantiq/xway/gptu.c
index f1492b2..0f1bbea 100644
--- a/arch/mips/lantiq/xway/gptu.c
+++ b/arch/mips/lantiq/xway/gptu.c
@@ -3,7 +3,7 @@
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
  *
- *  Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ *  Copyright (C) 2012 John Crispin <john@phrozen.org>
  *  Copyright (C) 2012 Lantiq GmbH
  */
 
diff --git a/arch/mips/lantiq/xway/prom.c b/arch/mips/lantiq/xway/prom.c
index 8f6e02f..9475b25 100644
--- a/arch/mips/lantiq/xway/prom.c
+++ b/arch/mips/lantiq/xway/prom.c
@@ -3,7 +3,7 @@
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
  *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ *  Copyright (C) 2010 John Crispin <john@phrozen.org>
  *  Copyright (C) 2013-2015 Lantiq Beteiligungs-GmbH & Co.KG
  */
 
diff --git a/arch/mips/lantiq/xway/reset.c b/arch/mips/lantiq/xway/reset.c
index bc29bb3..83fd65d 100644
--- a/arch/mips/lantiq/xway/reset.c
+++ b/arch/mips/lantiq/xway/reset.c
@@ -3,7 +3,7 @@
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
  *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ *  Copyright (C) 2010 John Crispin <john@phrozen.org>
  *  Copyright (C) 2013-2015 Lantiq Beteiligungs-GmbH & Co.KG
  */
 
@@ -258,7 +258,7 @@
 	return ltq_deassert_device(rcdev, id);
 }
 
-static struct reset_control_ops reset_ops = {
+static const struct reset_control_ops reset_ops = {
 	.reset = ltq_reset_device,
 	.assert = ltq_assert_device,
 	.deassert = ltq_deassert_device,
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
index 80554e8..236193b 100644
--- a/arch/mips/lantiq/xway/sysctrl.c
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -3,7 +3,7 @@
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
  *
- *  Copyright (C) 2011-2012 John Crispin <blogic@openwrt.org>
+ *  Copyright (C) 2011-2012 John Crispin <john@phrozen.org>
  *  Copyright (C) 2013-2015 Lantiq Beteiligungs-GmbH & Co.KG
  */
 
diff --git a/arch/mips/lantiq/xway/vmmc.c b/arch/mips/lantiq/xway/vmmc.c
index d001bc3..4625495 100644
--- a/arch/mips/lantiq/xway/vmmc.c
+++ b/arch/mips/lantiq/xway/vmmc.c
@@ -3,7 +3,7 @@
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
  *
- *  Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ *  Copyright (C) 2012 John Crispin <john@phrozen.org>
  */
 
 #include <linux/module.h>
diff --git a/arch/mips/lantiq/xway/xrx200_phy_fw.c b/arch/mips/lantiq/xway/xrx200_phy_fw.c
index 199094a..71e518c 100644
--- a/arch/mips/lantiq/xway/xrx200_phy_fw.c
+++ b/arch/mips/lantiq/xway/xrx200_phy_fw.c
@@ -3,7 +3,7 @@
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
  *
- *  Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ *  Copyright (C) 2012 John Crispin <john@phrozen.org>
  */
 
 #include <linux/delay.h>
@@ -112,6 +112,6 @@
 
 module_platform_driver(xway_phy_driver);
 
-MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
+MODULE_AUTHOR("John Crispin <john@phrozen.org>");
 MODULE_DESCRIPTION("Lantiq XRX200 PHY Firmware Loader");
 MODULE_LICENSE("GPL");
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 92a3731..0f80b93 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -19,6 +19,8 @@
 
 	pr_info("Index    : %0x\n", read_c0_index());
 	pr_info("PageMask : %0x\n", read_c0_pagemask());
+	if (cpu_has_guestid)
+		pr_info("GuestCtl1: %0x\n", read_c0_guestctl1());
 	pr_info("EntryHi  : %0*lx\n", field, read_c0_entryhi());
 	pr_info("EntryLo0 : %0*lx\n", field, read_c0_entrylo0());
 	pr_info("EntryLo1 : %0*lx\n", field, read_c0_entrylo1());
@@ -72,7 +74,10 @@
 {
 	unsigned long s_entryhi, entryhi, asid;
 	unsigned long long entrylo0, entrylo1, pa;
-	unsigned int s_index, s_pagemask, pagemask, c0, c1, i;
+	unsigned int s_index, s_pagemask, s_guestctl1 = 0;
+	unsigned int pagemask, guestctl1 = 0, c0, c1, i;
+	unsigned long asidmask = cpu_asid_mask(&current_cpu_data);
+	int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4);
 #ifdef CONFIG_32BIT
 	bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA);
 	int pwidth = xpa ? 11 : 8;
@@ -86,7 +91,9 @@
 	s_pagemask = read_c0_pagemask();
 	s_entryhi = read_c0_entryhi();
 	s_index = read_c0_index();
-	asid = s_entryhi & 0xff;
+	asid = s_entryhi & asidmask;
+	if (cpu_has_guestid)
+		s_guestctl1 = read_c0_guestctl1();
 
 	for (i = first; i <= last; i++) {
 		write_c0_index(i);
@@ -97,6 +104,8 @@
 		entryhi	 = read_c0_entryhi();
 		entrylo0 = read_c0_entrylo0();
 		entrylo1 = read_c0_entrylo1();
+		if (cpu_has_guestid)
+			guestctl1 = read_c0_guestctl1();
 
 		/* EHINV bit marks entire entry as invalid */
 		if (cpu_has_tlbinv && entryhi & MIPS_ENTRYHI_EHINV)
@@ -115,7 +124,7 @@
 		 * due to duplicate TLB entry.
 		 */
 		if (!((entrylo0 | entrylo1) & ENTRYLO_G) &&
-		    (entryhi & 0xff) != asid)
+		    (entryhi & asidmask) != asid)
 			continue;
 
 		/*
@@ -126,15 +135,19 @@
 		c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
 		c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
 
-		printk("va=%0*lx asid=%02lx\n",
+		printk("va=%0*lx asid=%0*lx",
 		       vwidth, (entryhi & ~0x1fffUL),
-		       entryhi & 0xff);
+		       asidwidth, entryhi & asidmask);
+		if (cpu_has_guestid)
+			printk(" gid=%02lx",
+			       (guestctl1 & MIPS_GCTL1_RID)
+					>> MIPS_GCTL1_RID_SHIFT);
 		/* RI/XI are in awkward places, so mask them off separately */
 		pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
 		if (xpa)
 			pa |= (unsigned long long)readx_c0_entrylo0() << 30;
 		pa = (pa << 6) & PAGE_MASK;
-		printk("\t[");
+		printk("\n\t[");
 		if (cpu_has_rixi)
 			printk("ri=%d xi=%d ",
 			       (entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
@@ -164,6 +177,8 @@
 	write_c0_entryhi(s_entryhi);
 	write_c0_index(s_index);
 	write_c0_pagemask(s_pagemask);
+	if (cpu_has_guestid)
+		write_c0_guestctl1(s_guestctl1);
 }
 
 void dump_tlb_all(void)
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 8f0019a..18a1ccd 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -228,10 +228,12 @@
 	.hidden __memset
 	.endif
 
+#ifdef CONFIG_CPU_MIPSR6
 .Lbyte_fixup\@:
 	PTR_SUBU	a2, $0, t0
 	jr		ra
 	 PTR_ADDIU	a2, 1
+#endif /* CONFIG_CPU_MIPSR6 */
 
 .Lfirst_fixup\@:
 	jr	ra
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index cfcbb52..744f4a7 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -29,9 +29,10 @@
 {
 	int	i;
 	unsigned int asid;
-	unsigned long entryhi, entrylo0;
+	unsigned long entryhi, entrylo0, asid_mask;
 
-	asid = read_c0_entryhi() & ASID_MASK;
+	asid_mask = cpu_asid_mask(&current_cpu_data);
+	asid = read_c0_entryhi() & asid_mask;
 
 	for (i = first; i <= last; i++) {
 		write_c0_index(i<<8);
@@ -46,7 +47,7 @@
 		/* Unused entries have a virtual address of KSEG0.  */
 		if ((entryhi & PAGE_MASK) != KSEG0 &&
 		    (entrylo0 & R3K_ENTRYLO_G ||
-		     (entryhi & ASID_MASK) == asid)) {
+		     (entryhi & asid_mask) == asid)) {
 			/*
 			 * Only print entries in use
 			 */
@@ -55,7 +56,7 @@
 			printk("va=%08lx asid=%08lx"
 			       "  [pa=%06lx n=%d d=%d v=%d g=%d]",
 			       entryhi & PAGE_MASK,
-			       entryhi & ASID_MASK,
+			       entryhi & asid_mask,
 			       entrylo0 & PAGE_MASK,
 			       (entrylo0 & R3K_ENTRYLO_N) ? 1 : 0,
 			       (entrylo0 & R3K_ENTRYLO_D) ? 1 : 0,
diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c
index ddf1d4c..f2c714d 100644
--- a/arch/mips/loongson32/common/platform.c
+++ b/arch/mips/loongson32/common/platform.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
+ * Copyright (c) 2011-2016 Zhang, Keguang <keguang.zhang@gmail.com>
  *
  * This program is free software; you can redistribute	it and/or modify it
  * under  the terms of	the GNU General	 Public License as published by the
@@ -10,14 +10,17 @@
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
 #include <linux/err.h>
+#include <linux/mtd/partitions.h>
+#include <linux/sizes.h>
 #include <linux/phy.h>
 #include <linux/serial_8250.h>
 #include <linux/stmmac.h>
 #include <linux/usb/ehci_pdriver.h>
-#include <asm-generic/sizes.h>
 
-#include <cpufreq.h>
 #include <loongson1.h>
+#include <cpufreq.h>
+#include <dma.h>
+#include <nand.h>
 
 /* 8250/16550 compatible UART */
 #define LS1X_UART(_id)						\
@@ -45,7 +48,7 @@
 	},
 };
 
-void __init ls1x_serial_setup(struct platform_device *pdev)
+void __init ls1x_serial_set_uartclk(struct platform_device *pdev)
 {
 	struct clk *clk;
 	struct plat_serial8250_port *p;
@@ -77,6 +80,42 @@
 	},
 };
 
+/* DMA */
+static struct resource ls1x_dma_resources[] = {
+	[0] = {
+		.start = LS1X_DMAC_BASE,
+		.end = LS1X_DMAC_BASE + SZ_4 - 1,
+		.flags = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start = LS1X_DMA0_IRQ,
+		.end = LS1X_DMA0_IRQ,
+		.flags = IORESOURCE_IRQ,
+	},
+	[2] = {
+		.start = LS1X_DMA1_IRQ,
+		.end = LS1X_DMA1_IRQ,
+		.flags = IORESOURCE_IRQ,
+	},
+	[3] = {
+		.start = LS1X_DMA2_IRQ,
+		.end = LS1X_DMA2_IRQ,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+struct platform_device ls1x_dma_pdev = {
+	.name		= "ls1x-dma",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(ls1x_dma_resources),
+	.resource	= ls1x_dma_resources,
+};
+
+void __init ls1x_dma_set_platdata(struct plat_ls1x_dma *pdata)
+{
+	ls1x_dma_pdev.dev.platform_data = pdata;
+}
+
 /* Synopsys Ethernet GMAC */
 static struct stmmac_mdio_bus_data ls1x_mdio_bus_data = {
 	.phy_mask	= 0,
@@ -198,6 +237,64 @@
 	},
 };
 
+/* GPIO */
+static struct resource ls1x_gpio0_resources[] = {
+	[0] = {
+		.start	= LS1X_GPIO0_BASE,
+		.end	= LS1X_GPIO0_BASE + SZ_4 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+struct platform_device ls1x_gpio0_pdev = {
+	.name		= "ls1x-gpio",
+	.id		= 0,
+	.num_resources	= ARRAY_SIZE(ls1x_gpio0_resources),
+	.resource	= ls1x_gpio0_resources,
+};
+
+static struct resource ls1x_gpio1_resources[] = {
+	[0] = {
+		.start	= LS1X_GPIO1_BASE,
+		.end	= LS1X_GPIO1_BASE + SZ_4 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+struct platform_device ls1x_gpio1_pdev = {
+	.name		= "ls1x-gpio",
+	.id		= 1,
+	.num_resources	= ARRAY_SIZE(ls1x_gpio1_resources),
+	.resource	= ls1x_gpio1_resources,
+};
+
+/* NAND Flash */
+static struct resource ls1x_nand_resources[] = {
+	[0] = {
+		.start	= LS1X_NAND_BASE,
+		.end	= LS1X_NAND_BASE + SZ_32 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		/* DMA channel 0 is dedicated to NAND */
+		.start	= LS1X_DMA_CHANNEL0,
+		.end	= LS1X_DMA_CHANNEL0,
+		.flags	= IORESOURCE_DMA,
+	},
+};
+
+struct platform_device ls1x_nand_pdev = {
+	.name		= "ls1x-nand",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(ls1x_nand_resources),
+	.resource	= ls1x_nand_resources,
+};
+
+void __init ls1x_nand_set_platdata(struct plat_ls1x_nand *pdata)
+{
+	ls1x_nand_pdev.dev.platform_data = pdata;
+}
+
 /* USB EHCI */
 static u64 ls1x_ehci_dmamask = DMA_BIT_MASK(32);
 
diff --git a/arch/mips/loongson32/common/reset.c b/arch/mips/loongson32/common/reset.c
index c41e4ca..8a1d9cc 100644
--- a/arch/mips/loongson32/common/reset.c
+++ b/arch/mips/loongson32/common/reset.c
@@ -9,12 +9,13 @@
 
 #include <linux/io.h>
 #include <linux/pm.h>
+#include <linux/sizes.h>
 #include <asm/idle.h>
 #include <asm/reboot.h>
 
 #include <loongson1.h>
 
-static void __iomem *wdt_base;
+static void __iomem *wdt_reg_base;
 
 static void ls1x_halt(void)
 {
@@ -26,9 +27,9 @@
 
 static void ls1x_restart(char *command)
 {
-	__raw_writel(0x1, wdt_base + WDT_EN);
-	__raw_writel(0x1, wdt_base + WDT_TIMER);
-	__raw_writel(0x1, wdt_base + WDT_SET);
+	__raw_writel(0x1, wdt_reg_base + WDT_EN);
+	__raw_writel(0x1, wdt_reg_base + WDT_TIMER);
+	__raw_writel(0x1, wdt_reg_base + WDT_SET);
 
 	ls1x_halt();
 }
@@ -40,8 +41,8 @@
 
 static int __init ls1x_reboot_setup(void)
 {
-	wdt_base = ioremap_nocache(LS1X_WDT_BASE, 0x0f);
-	if (!wdt_base)
+	wdt_reg_base = ioremap_nocache(LS1X_WDT_BASE, (SZ_4 + SZ_8));
+	if (!wdt_reg_base)
 		panic("Failed to remap watchdog registers");
 
 	_machine_restart = ls1x_restart;
diff --git a/arch/mips/loongson32/common/time.c b/arch/mips/loongson32/common/time.c
index 0996b02..ff224f0 100644
--- a/arch/mips/loongson32/common/time.c
+++ b/arch/mips/loongson32/common/time.c
@@ -9,6 +9,7 @@
 
 #include <linux/clk.h>
 #include <linux/interrupt.h>
+#include <linux/sizes.h>
 #include <asm/time.h>
 
 #include <loongson1.h>
@@ -35,25 +36,25 @@
 
 DEFINE_RAW_SPINLOCK(ls1x_timer_lock);
 
-static void __iomem *timer_base;
+static void __iomem *timer_reg_base;
 static uint32_t ls1x_jiffies_per_tick;
 
 static inline void ls1x_pwmtimer_set_period(uint32_t period)
 {
-	__raw_writel(period, timer_base + PWM_HRC);
-	__raw_writel(period, timer_base + PWM_LRC);
+	__raw_writel(period, timer_reg_base + PWM_HRC);
+	__raw_writel(period, timer_reg_base + PWM_LRC);
 }
 
 static inline void ls1x_pwmtimer_restart(void)
 {
-	__raw_writel(0x0, timer_base + PWM_CNT);
-	__raw_writel(INT_EN | CNT_EN, timer_base + PWM_CTRL);
+	__raw_writel(0x0, timer_reg_base + PWM_CNT);
+	__raw_writel(INT_EN | CNT_EN, timer_reg_base + PWM_CTRL);
 }
 
 void __init ls1x_pwmtimer_init(void)
 {
-	timer_base = ioremap(LS1X_TIMER_BASE, 0xf);
-	if (!timer_base)
+	timer_reg_base = ioremap_nocache(LS1X_TIMER_BASE, SZ_16);
+	if (!timer_reg_base)
 		panic("Failed to remap timer registers");
 
 	ls1x_jiffies_per_tick = DIV_ROUND_CLOSEST(mips_hpt_frequency, HZ);
@@ -86,7 +87,7 @@
 	 */
 	jifs = jiffies;
 	/* read the count */
-	count = __raw_readl(timer_base + PWM_CNT);
+	count = __raw_readl(timer_reg_base + PWM_CNT);
 
 	/*
 	 * It's possible for count to appear to go the wrong way for this
@@ -131,7 +132,7 @@
 	raw_spin_lock(&ls1x_timer_lock);
 	ls1x_pwmtimer_set_period(ls1x_jiffies_per_tick);
 	ls1x_pwmtimer_restart();
-	__raw_writel(INT_EN | CNT_EN, timer_base + PWM_CTRL);
+	__raw_writel(INT_EN | CNT_EN, timer_reg_base + PWM_CTRL);
 	raw_spin_unlock(&ls1x_timer_lock);
 
 	return 0;
@@ -140,7 +141,7 @@
 static int ls1x_clockevent_tick_resume(struct clock_event_device *cd)
 {
 	raw_spin_lock(&ls1x_timer_lock);
-	__raw_writel(INT_EN | CNT_EN, timer_base + PWM_CTRL);
+	__raw_writel(INT_EN | CNT_EN, timer_reg_base + PWM_CTRL);
 	raw_spin_unlock(&ls1x_timer_lock);
 
 	return 0;
@@ -149,8 +150,8 @@
 static int ls1x_clockevent_set_state_shutdown(struct clock_event_device *cd)
 {
 	raw_spin_lock(&ls1x_timer_lock);
-	__raw_writel(__raw_readl(timer_base + PWM_CTRL) & ~CNT_EN,
-		     timer_base + PWM_CTRL);
+	__raw_writel(__raw_readl(timer_reg_base + PWM_CTRL) & ~CNT_EN,
+		     timer_reg_base + PWM_CTRL);
 	raw_spin_unlock(&ls1x_timer_lock);
 
 	return 0;
@@ -220,7 +221,7 @@
 
 #ifdef CONFIG_CEVT_CSRC_LS1X
 	/* setup LS1X PWM timer */
-	clk = clk_get(NULL, "ls1x_pwmtimer");
+	clk = clk_get(NULL, "ls1x-pwmtimer");
 	if (IS_ERR(clk))
 		panic("unable to get timer clock, err=%ld", PTR_ERR(clk));
 
diff --git a/arch/mips/loongson32/ls1b/board.c b/arch/mips/loongson32/ls1b/board.c
index 58daeea..38a1d40 100644
--- a/arch/mips/loongson32/ls1b/board.c
+++ b/arch/mips/loongson32/ls1b/board.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
+ * Copyright (c) 2011-2016 Zhang, Keguang <keguang.zhang@gmail.com>
  *
  * This program is free software; you can redistribute	it and/or modify it
  * under  the terms of	the GNU General	 Public License as published by the
@@ -7,26 +7,83 @@
  * option) any later version.
  */
 
+#include <linux/leds.h>
+#include <linux/mtd/partitions.h>
+#include <linux/sizes.h>
+
+#include <loongson1.h>
+#include <dma.h>
+#include <nand.h>
 #include <platform.h>
 
+struct plat_ls1x_dma ls1x_dma_pdata = {
+	.nr_channels	= 3,
+};
+
+static struct mtd_partition ls1x_nand_parts[] = {
+	{
+		.name        = "kernel",
+		.offset      = 0,
+		.size        = SZ_16M,
+	},
+	{
+		.name        = "rootfs",
+		.offset      = MTDPART_OFS_APPEND,
+		.size        = MTDPART_SIZ_FULL,
+	},
+};
+
+struct plat_ls1x_nand ls1x_nand_pdata = {
+	.parts		= ls1x_nand_parts,
+	.nr_parts	= ARRAY_SIZE(ls1x_nand_parts),
+	.hold_cycle	= 0x2,
+	.wait_cycle	= 0xc,
+};
+
+static const struct gpio_led ls1x_gpio_leds[] __initconst = {
+	{
+		.name			= "LED9",
+		.default_trigger	= "heartbeat",
+		.gpio			= 38,
+		.active_low		= 1,
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	}, {
+		.name			= "LED6",
+		.default_trigger	= "nand-disk",
+		.gpio			= 39,
+		.active_low		= 1,
+		.default_state		= LEDS_GPIO_DEFSTATE_OFF,
+	},
+};
+
+static const struct gpio_led_platform_data ls1x_led_pdata __initconst = {
+	.num_leds	= ARRAY_SIZE(ls1x_gpio_leds),
+	.leds		= ls1x_gpio_leds,
+};
+
 static struct platform_device *ls1b_platform_devices[] __initdata = {
 	&ls1x_uart_pdev,
 	&ls1x_cpufreq_pdev,
+	&ls1x_dma_pdev,
 	&ls1x_eth0_pdev,
 	&ls1x_eth1_pdev,
 	&ls1x_ehci_pdev,
+	&ls1x_gpio0_pdev,
+	&ls1x_gpio1_pdev,
+	&ls1x_nand_pdev,
 	&ls1x_rtc_pdev,
 };
 
 static int __init ls1b_platform_init(void)
 {
-	int err;
+	ls1x_serial_set_uartclk(&ls1x_uart_pdev);
+	ls1x_dma_set_platdata(&ls1x_dma_pdata);
+	ls1x_nand_set_platdata(&ls1x_nand_pdata);
 
-	ls1x_serial_setup(&ls1x_uart_pdev);
+	gpio_led_register_device(-1, &ls1x_led_pdata);
 
-	err = platform_add_devices(ls1b_platform_devices,
+	return platform_add_devices(ls1b_platform_devices,
 				   ARRAY_SIZE(ls1b_platform_devices));
-	return err;
 }
 
 arch_initcall(ls1b_platform_init);
diff --git a/arch/mips/loongson64/Platform b/arch/mips/loongson64/Platform
index 85d8089..0fce460 100644
--- a/arch/mips/loongson64/Platform
+++ b/arch/mips/loongson64/Platform
@@ -31,7 +31,7 @@
 # can't easily be used safely within the kbuild framework.
 #
 ifeq ($(call cc-ifversion, -ge, 0409, y), y)
-  ifeq ($(call ld-ifversion, -ge, 22500000, y), y)
+  ifeq ($(call ld-ifversion, -ge, 225000000, y), y)
     cflags-$(CONFIG_CPU_LOONGSON3)  += \
       $(call cc-option,-march=loongson3a -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64)
   else
diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c
index d6d07ad..57d590a 100644
--- a/arch/mips/loongson64/common/env.c
+++ b/arch/mips/loongson64/common/env.c
@@ -105,6 +105,10 @@
 		loongson_chiptemp[1] = 0x900010001fe0019c;
 		loongson_chiptemp[2] = 0x900020001fe0019c;
 		loongson_chiptemp[3] = 0x900030001fe0019c;
+		loongson_freqctrl[0] = 0x900000001fe001d0;
+		loongson_freqctrl[1] = 0x900010001fe001d0;
+		loongson_freqctrl[2] = 0x900020001fe001d0;
+		loongson_freqctrl[3] = 0x900030001fe001d0;
 		loongson_sysconf.ht_control_base = 0x90000EFDFB000000;
 		loongson_sysconf.workarounds = WORKAROUND_CPUFREQ;
 	} else if (ecpu->cputype == Loongson_3B) {
@@ -187,7 +191,8 @@
 		case PRID_REV_LOONGSON2F:
 			cpu_clock_freq = 797000000;
 			break;
-		case PRID_REV_LOONGSON3A:
+		case PRID_REV_LOONGSON3A_R1:
+		case PRID_REV_LOONGSON3A_R2:
 			cpu_clock_freq = 900000000;
 			break;
 		case PRID_REV_LOONGSON3B_R1:
diff --git a/arch/mips/loongson64/loongson-3/Makefile b/arch/mips/loongson64/loongson-3/Makefile
index 622fead..44bc148 100644
--- a/arch/mips/loongson64/loongson-3/Makefile
+++ b/arch/mips/loongson64/loongson-3/Makefile
@@ -1,7 +1,7 @@
 #
 # Makefile for Loongson-3 family machines
 #
-obj-y			+= irq.o cop2-ex.o platform.o
+obj-y			+= irq.o cop2-ex.o platform.o acpi_init.o
 
 obj-$(CONFIG_SMP)	+= smp.o
 
diff --git a/drivers/platform/mips/acpi_init.c b/arch/mips/loongson64/loongson-3/acpi_init.c
similarity index 100%
rename from drivers/platform/mips/acpi_init.c
rename to arch/mips/loongson64/loongson-3/acpi_init.c
diff --git a/arch/mips/loongson64/loongson-3/irq.c b/arch/mips/loongson64/loongson-3/irq.c
index 0f75b6b..8e76490 100644
--- a/arch/mips/loongson64/loongson-3/irq.c
+++ b/arch/mips/loongson64/loongson-3/irq.c
@@ -24,19 +24,21 @@
 	}
 }
 
+#define UNUSED_IPS (CAUSEF_IP5 | CAUSEF_IP4 | CAUSEF_IP1 | CAUSEF_IP0)
+
 void mach_irq_dispatch(unsigned int pending)
 {
 	if (pending & CAUSEF_IP7)
 		do_IRQ(LOONGSON_TIMER_IRQ);
 #if defined(CONFIG_SMP)
-	else if (pending & CAUSEF_IP6)
+	if (pending & CAUSEF_IP6)
 		loongson3_ipi_interrupt(NULL);
 #endif
-	else if (pending & CAUSEF_IP3)
+	if (pending & CAUSEF_IP3)
 		ht_irqdispatch();
-	else if (pending & CAUSEF_IP2)
+	if (pending & CAUSEF_IP2)
 		do_IRQ(LOONGSON_UART_IRQ);
-	else {
+	if (pending & UNUSED_IPS) {
 		pr_err("%s : spurious interrupt\n", __func__);
 		spurious_interrupt();
 	}
diff --git a/arch/mips/loongson64/loongson-3/numa.c b/arch/mips/loongson64/loongson-3/numa.c
index 6f9e010..282c5a8 100644
--- a/arch/mips/loongson64/loongson-3/numa.c
+++ b/arch/mips/loongson64/loongson-3/numa.c
@@ -213,10 +213,10 @@
 		BOOTMEM_DEFAULT);
 
 	if (node == 0 && node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT)) {
-		/* Reserve 0xff800000~0xffffffff for RS780E integrated GPU */
+		/* Reserve 0xfe000000~0xffffffff for RS780E integrated GPU */
 		reserve_bootmem_node(NODE_DATA(node),
-				(node_addrspace_offset | 0xff800000),
-				8 << 20, BOOTMEM_DEFAULT);
+				(node_addrspace_offset | 0xfe000000),
+				32 << 20, BOOTMEM_DEFAULT);
 	}
 
 	sparse_memory_present_with_active_regions(node);
diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
index 509832a9..e59759a 100644
--- a/arch/mips/loongson64/loongson-3/smp.c
+++ b/arch/mips/loongson64/loongson-3/smp.c
@@ -421,7 +421,6 @@
 	local_irq_save(flags);
 	fixup_irqs();
 	local_irq_restore(flags);
-	flush_cache_all();
 	local_flush_tlb_all();
 
 	return 0;
@@ -440,7 +439,7 @@
  * flush all L1 entries at first. Then, another core (usually Core 0) can
  * safely disable the clock of the target core. loongson3_play_dead() is
  * called via CKSEG1 (uncached and unmmaped) */
-static void loongson3a_play_dead(int *state_addr)
+static void loongson3a_r1_play_dead(int *state_addr)
 {
 	register int val;
 	register long cpuid, core, node, count;
@@ -502,6 +501,89 @@
 		: "a1");
 }
 
+static void loongson3a_r2_play_dead(int *state_addr)
+{
+	register int val;
+	register long cpuid, core, node, count;
+	register void *addr, *base, *initfunc;
+
+	__asm__ __volatile__(
+		"   .set push                     \n"
+		"   .set noreorder                \n"
+		"   li %[addr], 0x80000000        \n" /* KSEG0 */
+		"1: cache 0, 0(%[addr])           \n" /* flush L1 ICache */
+		"   cache 0, 1(%[addr])           \n"
+		"   cache 0, 2(%[addr])           \n"
+		"   cache 0, 3(%[addr])           \n"
+		"   cache 1, 0(%[addr])           \n" /* flush L1 DCache */
+		"   cache 1, 1(%[addr])           \n"
+		"   cache 1, 2(%[addr])           \n"
+		"   cache 1, 3(%[addr])           \n"
+		"   addiu %[sets], %[sets], -1    \n"
+		"   bnez  %[sets], 1b             \n"
+		"   addiu %[addr], %[addr], 0x40  \n"
+		"   li %[addr], 0x80000000        \n" /* KSEG0 */
+		"2: cache 2, 0(%[addr])           \n" /* flush L1 VCache */
+		"   cache 2, 1(%[addr])           \n"
+		"   cache 2, 2(%[addr])           \n"
+		"   cache 2, 3(%[addr])           \n"
+		"   cache 2, 4(%[addr])           \n"
+		"   cache 2, 5(%[addr])           \n"
+		"   cache 2, 6(%[addr])           \n"
+		"   cache 2, 7(%[addr])           \n"
+		"   cache 2, 8(%[addr])           \n"
+		"   cache 2, 9(%[addr])           \n"
+		"   cache 2, 10(%[addr])          \n"
+		"   cache 2, 11(%[addr])          \n"
+		"   cache 2, 12(%[addr])          \n"
+		"   cache 2, 13(%[addr])          \n"
+		"   cache 2, 14(%[addr])          \n"
+		"   cache 2, 15(%[addr])          \n"
+		"   addiu %[vsets], %[vsets], -1  \n"
+		"   bnez  %[vsets], 2b            \n"
+		"   addiu %[addr], %[addr], 0x40  \n"
+		"   li    %[val], 0x7             \n" /* *state_addr = CPU_DEAD; */
+		"   sw    %[val], (%[state_addr]) \n"
+		"   sync                          \n"
+		"   cache 21, (%[state_addr])     \n" /* flush entry of *state_addr */
+		"   .set pop                      \n"
+		: [addr] "=&r" (addr), [val] "=&r" (val)
+		: [state_addr] "r" (state_addr),
+		  [sets] "r" (cpu_data[smp_processor_id()].dcache.sets),
+		  [vsets] "r" (cpu_data[smp_processor_id()].vcache.sets));
+
+	__asm__ __volatile__(
+		"   .set push                         \n"
+		"   .set noreorder                    \n"
+		"   .set mips64                       \n"
+		"   mfc0  %[cpuid], $15, 1            \n"
+		"   andi  %[cpuid], 0x3ff             \n"
+		"   dli   %[base], 0x900000003ff01000 \n"
+		"   andi  %[core], %[cpuid], 0x3      \n"
+		"   sll   %[core], 8                  \n" /* get core id */
+		"   or    %[base], %[base], %[core]   \n"
+		"   andi  %[node], %[cpuid], 0xc      \n"
+		"   dsll  %[node], 42                 \n" /* get node id */
+		"   or    %[base], %[base], %[node]   \n"
+		"1: li    %[count], 0x100             \n" /* wait for init loop */
+		"2: bnez  %[count], 2b                \n" /* limit mailbox access */
+		"   addiu %[count], -1                \n"
+		"   ld    %[initfunc], 0x20(%[base])  \n" /* get PC via mailbox */
+		"   beqz  %[initfunc], 1b             \n"
+		"   nop                               \n"
+		"   ld    $sp, 0x28(%[base])          \n" /* get SP via mailbox */
+		"   ld    $gp, 0x30(%[base])          \n" /* get GP via mailbox */
+		"   ld    $a1, 0x38(%[base])          \n"
+		"   jr    %[initfunc]                 \n" /* jump to initial PC */
+		"   nop                               \n"
+		"   .set pop                          \n"
+		: [core] "=&r" (core), [node] "=&r" (node),
+		  [base] "=&r" (base), [cpuid] "=&r" (cpuid),
+		  [count] "=&r" (count), [initfunc] "=&r" (initfunc)
+		: /* No Input */
+		: "a1");
+}
+
 static void loongson3b_play_dead(int *state_addr)
 {
 	register int val;
@@ -573,13 +655,18 @@
 	void (*play_dead_at_ckseg1)(int *);
 
 	idle_task_exit();
-	switch (loongson_sysconf.cputype) {
-	case Loongson_3A:
+	switch (read_c0_prid() & PRID_REV_MASK) {
+	case PRID_REV_LOONGSON3A_R1:
 	default:
 		play_dead_at_ckseg1 =
-			(void *)CKSEG1ADDR((unsigned long)loongson3a_play_dead);
+			(void *)CKSEG1ADDR((unsigned long)loongson3a_r1_play_dead);
 		break;
-	case Loongson_3B:
+	case PRID_REV_LOONGSON3A_R2:
+		play_dead_at_ckseg1 =
+			(void *)CKSEG1ADDR((unsigned long)loongson3a_r2_play_dead);
+		break;
+	case PRID_REV_LOONGSON3B_R1:
+	case PRID_REV_LOONGSON3B_R2:
 		play_dead_at_ckseg1 =
 			(void *)CKSEG1ADDR((unsigned long)loongson3b_play_dead);
 		break;
@@ -594,9 +681,9 @@
 	uint64_t core_id = cpu_data[cpu].core;
 	uint64_t package_id = cpu_data[cpu].package;
 
-	if (loongson_sysconf.cputype == Loongson_3A) {
+	if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) {
 		LOONGSON_CHIPCFG(package_id) &= ~(1 << (12 + core_id));
-	} else if (loongson_sysconf.cputype == Loongson_3B) {
+	} else {
 		if (!(loongson_sysconf.workarounds & WORKAROUND_CPUHOTPLUG))
 			LOONGSON_FREQCTRL(package_id) &= ~(1 << (core_id * 4 + 3));
 	}
@@ -607,9 +694,9 @@
 	uint64_t core_id = cpu_data[cpu].core;
 	uint64_t package_id = cpu_data[cpu].package;
 
-	if (loongson_sysconf.cputype == Loongson_3A) {
+	if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) {
 		LOONGSON_CHIPCFG(package_id) |= 1 << (12 + core_id);
-	} else if (loongson_sysconf.cputype == Loongson_3B) {
+	} else {
 		if (!(loongson_sysconf.workarounds & WORKAROUND_CPUHOTPLUG))
 			LOONGSON_FREQCTRL(package_id) |= 1 << (core_id * 4 + 3);
 	}
diff --git a/arch/mips/math-emu/Makefile b/arch/mips/math-emu/Makefile
index a19641d..e9bbc2a 100644
--- a/arch/mips/math-emu/Makefile
+++ b/arch/mips/math-emu/Makefile
@@ -4,9 +4,9 @@
 
 obj-y	+= cp1emu.o ieee754dp.o ieee754sp.o ieee754.o \
 	   dp_div.o dp_mul.o dp_sub.o dp_add.o dp_fsp.o dp_cmp.o dp_simple.o \
-	   dp_tint.o dp_fint.o dp_maddf.o dp_msubf.o dp_2008class.o dp_fmin.o dp_fmax.o \
+	   dp_tint.o dp_fint.o dp_maddf.o dp_2008class.o dp_fmin.o dp_fmax.o \
 	   sp_div.o sp_mul.o sp_sub.o sp_add.o sp_fdp.o sp_cmp.o sp_simple.o \
-	   sp_tint.o sp_fint.o sp_maddf.o sp_msubf.o sp_2008class.o sp_fmin.o sp_fmax.o \
+	   sp_tint.o sp_fint.o sp_maddf.o sp_2008class.o sp_fmin.o sp_fmax.o \
 	   dsemul.o
 
 lib-y	+= ieee754d.o \
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index cdfd44f..d96e912b 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -445,9 +445,11 @@
 	case spec_op:
 		switch (insn.r_format.func) {
 		case jalr_op:
-			regs->regs[insn.r_format.rd] =
-				regs->cp0_epc + dec_insn.pc_inc +
-				dec_insn.next_pc_inc;
+			if (insn.r_format.rd != 0) {
+				regs->regs[insn.r_format.rd] =
+					regs->cp0_epc + dec_insn.pc_inc +
+					dec_insn.next_pc_inc;
+			}
 			/* Fall through */
 		case jr_op:
 			/* For R6, JR already emulated in jalr_op */
@@ -973,9 +975,10 @@
 		struct mm_decoded_insn dec_insn, void *__user *fault_addr)
 {
 	unsigned long contpc = xcp->cp0_epc + dec_insn.pc_inc;
-	unsigned int cond, cbit;
+	unsigned int cond, cbit, bit0;
 	mips_instruction ir;
 	int likely, pc_inc;
+	union fpureg *fpr;
 	u32 __user *wva;
 	u64 __user *dva;
 	u32 wval;
@@ -1187,14 +1190,14 @@
 				return SIGILL;
 
 			cond = likely = 0;
+			fpr = &current->thread.fpu.fpr[MIPSInst_RT(ir)];
+			bit0 = get_fpr32(fpr, 0) & 0x1;
 			switch (MIPSInst_RS(ir)) {
 			case bc1eqz_op:
-				if (get_fpr32(&current->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1)
-				    cond = 1;
+				cond = bit0 == 0;
 				break;
 			case bc1nez_op:
-				if (!(get_fpr32(&current->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1))
-				    cond = 1;
+				cond = bit0 != 0;
 				break;
 			}
 			goto branch_common;
@@ -1674,7 +1677,7 @@
 			union ieee754sp(*b) (union ieee754sp, union ieee754sp);
 			union ieee754sp(*u) (union ieee754sp);
 		} handler;
-		union ieee754sp fs, ft;
+		union ieee754sp fd, fs, ft;
 
 		switch (MIPSInst_FUNC(ir)) {
 			/* binary ops */
@@ -1945,6 +1948,17 @@
 			rfmt = w_fmt;
 			goto copcsr;
 
+		case fsel_op:
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(fd, MIPSInst_FD(ir));
+			if (fd.bits & 0x1)
+				SPFROMREG(rv.s, MIPSInst_FT(ir));
+			else
+				SPFROMREG(rv.s, MIPSInst_FS(ir));
+			break;
+
 		case fcvtl_op:
 			if (!cpu_has_mips_3_4_5_64_r2_r6)
 				return SIGILL;
@@ -1993,7 +2007,7 @@
 	}
 
 	case d_fmt: {
-		union ieee754dp fs, ft;
+		union ieee754dp fd, fs, ft;
 		union {
 			union ieee754dp(*b) (union ieee754dp, union ieee754dp);
 			union ieee754dp(*u) (union ieee754dp);
@@ -2243,6 +2257,17 @@
 			rfmt = w_fmt;
 			goto copcsr;
 
+		case fsel_op:
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(fd, MIPSInst_FD(ir));
+			if (fd.bits & 0x1)
+				DPFROMREG(rv.d, MIPSInst_FT(ir));
+			else
+				DPFROMREG(rv.d, MIPSInst_FS(ir));
+			break;
+
 		case fcvtl_op:
 			if (!cpu_has_mips_3_4_5_64_r2_r6)
 				return SIGILL;
diff --git a/arch/mips/math-emu/dp_maddf.c b/arch/mips/math-emu/dp_maddf.c
index 119eda9..4a2d03c 100644
--- a/arch/mips/math-emu/dp_maddf.c
+++ b/arch/mips/math-emu/dp_maddf.c
@@ -14,8 +14,12 @@
 
 #include "ieee754dp.h"
 
-union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x,
-				union ieee754dp y)
+enum maddf_flags {
+	maddf_negate_product	= 1 << 0,
+};
+
+static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
+				 union ieee754dp y, enum maddf_flags flags)
 {
 	int re;
 	int rs;
@@ -32,16 +36,15 @@
 
 	COMPXDP;
 	COMPYDP;
-
-	u64 zm; int ze; int zs __maybe_unused; int zc;
+	COMPZDP;
 
 	EXPLODEXDP;
 	EXPLODEYDP;
-	EXPLODEDP(z, zc, zs, ze, zm)
+	EXPLODEZDP;
 
 	FLUSHXDP;
 	FLUSHYDP;
-	FLUSHDP(z, zc, zs, ze, zm);
+	FLUSHZDP;
 
 	ieee754_clearcx();
 
@@ -50,7 +53,7 @@
 		ieee754_setcx(IEEE754_INVALID_OPERATION);
 		return ieee754dp_nanxcpt(z);
 	case IEEE754_CLASS_DNORM:
-		DPDNORMx(zm, ze);
+		DPDNORMZ;
 	/* QNAN is handled separately below */
 	}
 
@@ -154,13 +157,15 @@
 
 	re = xe + ye;
 	rs = xs ^ ys;
+	if (flags & maddf_negate_product)
+		rs ^= 1;
 
 	/* shunt to top of word */
 	xm <<= 64 - (DP_FBITS + 1);
 	ym <<= 64 - (DP_FBITS + 1);
 
 	/*
-	 * Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
+	 * Multiply 64 bits xm, ym to give high 64 bits rm with stickness.
 	 */
 
 	/* 32 * 32 => 64 */
@@ -198,7 +203,7 @@
 	if ((s64) rm < 0) {
 		rm = (rm >> (64 - (DP_FBITS + 1 + 3))) |
 		     ((rm << (DP_FBITS + 1 + 3)) != 0);
-			re++;
+		re++;
 	} else {
 		rm = (rm >> (64 - (DP_FBITS + 1 + 3 + 1))) |
 		     ((rm << (DP_FBITS + 1 + 3 + 1)) != 0);
@@ -263,3 +268,15 @@
 
 	return ieee754dp_format(zs, ze, zm);
 }
+
+union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x,
+				union ieee754dp y)
+{
+	return _dp_maddf(z, x, y, 0);
+}
+
+union ieee754dp ieee754dp_msubf(union ieee754dp z, union ieee754dp x,
+				union ieee754dp y)
+{
+	return _dp_maddf(z, x, y, maddf_negate_product);
+}
diff --git a/arch/mips/math-emu/dp_msubf.c b/arch/mips/math-emu/dp_msubf.c
deleted file mode 100644
index 1224126..0000000
--- a/arch/mips/math-emu/dp_msubf.c
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * IEEE754 floating point arithmetic
- * double precision: MSUB.f (Fused Multiply Subtract)
- * MSUBF.fmt: FPR[fd] = FPR[fd] - (FPR[fs] x FPR[ft])
- *
- * MIPS floating point support
- * Copyright (C) 2015 Imagination Technologies, Ltd.
- * Author: Markos Chandras <markos.chandras@imgtec.com>
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License as published by the
- *  Free Software Foundation; version 2 of the License.
- */
-
-#include "ieee754dp.h"
-
-union ieee754dp ieee754dp_msubf(union ieee754dp z, union ieee754dp x,
-				union ieee754dp y)
-{
-	int re;
-	int rs;
-	u64 rm;
-	unsigned lxm;
-	unsigned hxm;
-	unsigned lym;
-	unsigned hym;
-	u64 lrm;
-	u64 hrm;
-	u64 t;
-	u64 at;
-	int s;
-
-	COMPXDP;
-	COMPYDP;
-
-	u64 zm; int ze; int zs __maybe_unused; int zc;
-
-	EXPLODEXDP;
-	EXPLODEYDP;
-	EXPLODEDP(z, zc, zs, ze, zm)
-
-	FLUSHXDP;
-	FLUSHYDP;
-	FLUSHDP(z, zc, zs, ze, zm);
-
-	ieee754_clearcx();
-
-	switch (zc) {
-	case IEEE754_CLASS_SNAN:
-		ieee754_setcx(IEEE754_INVALID_OPERATION);
-		return ieee754dp_nanxcpt(z);
-	case IEEE754_CLASS_DNORM:
-		DPDNORMx(zm, ze);
-	/* QNAN is handled separately below */
-	}
-
-	switch (CLPAIR(xc, yc)) {
-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
-	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
-	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
-	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
-	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
-		return ieee754dp_nanxcpt(y);
-
-	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
-	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
-	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
-	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
-	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
-	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
-		return ieee754dp_nanxcpt(x);
-
-	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
-	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
-	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
-	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
-		return y;
-
-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
-		return x;
-
-
-	/*
-	 * Infinity handling
-	 */
-	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
-	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
-		if (zc == IEEE754_CLASS_QNAN)
-			return z;
-		ieee754_setcx(IEEE754_INVALID_OPERATION);
-		return ieee754dp_indef();
-
-	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
-	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
-	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
-	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
-	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
-		if (zc == IEEE754_CLASS_QNAN)
-			return z;
-		return ieee754dp_inf(xs ^ ys);
-
-	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
-	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
-	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
-	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
-	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
-		if (zc == IEEE754_CLASS_INF)
-			return ieee754dp_inf(zs);
-		/* Multiplication is 0 so just return z */
-		return z;
-
-	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
-		DPDNORMX;
-
-	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
-		if (zc == IEEE754_CLASS_QNAN)
-			return z;
-		else if (zc == IEEE754_CLASS_INF)
-			return ieee754dp_inf(zs);
-		DPDNORMY;
-		break;
-
-	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
-		if (zc == IEEE754_CLASS_QNAN)
-			return z;
-		else if (zc == IEEE754_CLASS_INF)
-			return ieee754dp_inf(zs);
-		DPDNORMX;
-		break;
-
-	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
-		if (zc == IEEE754_CLASS_QNAN)
-			return z;
-		else if (zc == IEEE754_CLASS_INF)
-			return ieee754dp_inf(zs);
-		/* fall through to real computations */
-	}
-
-	/* Finally get to do some computation */
-
-	/*
-	 * Do the multiplication bit first
-	 *
-	 * rm = xm * ym, re = xe + ye basically
-	 *
-	 * At this point xm and ym should have been normalized.
-	 */
-	assert(xm & DP_HIDDEN_BIT);
-	assert(ym & DP_HIDDEN_BIT);
-
-	re = xe + ye;
-	rs = xs ^ ys;
-
-	/* shunt to top of word */
-	xm <<= 64 - (DP_FBITS + 1);
-	ym <<= 64 - (DP_FBITS + 1);
-
-	/*
-	 * Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
-	 */
-
-	/* 32 * 32 => 64 */
-#define DPXMULT(x, y)	((u64)(x) * (u64)y)
-
-	lxm = xm;
-	hxm = xm >> 32;
-	lym = ym;
-	hym = ym >> 32;
-
-	lrm = DPXMULT(lxm, lym);
-	hrm = DPXMULT(hxm, hym);
-
-	t = DPXMULT(lxm, hym);
-
-	at = lrm + (t << 32);
-	hrm += at < lrm;
-	lrm = at;
-
-	hrm = hrm + (t >> 32);
-
-	t = DPXMULT(hxm, lym);
-
-	at = lrm + (t << 32);
-	hrm += at < lrm;
-	lrm = at;
-
-	hrm = hrm + (t >> 32);
-
-	rm = hrm | (lrm != 0);
-
-	/*
-	 * Sticky shift down to normal rounding precision.
-	 */
-	if ((s64) rm < 0) {
-		rm = (rm >> (64 - (DP_FBITS + 1 + 3))) |
-		     ((rm << (DP_FBITS + 1 + 3)) != 0);
-			re++;
-	} else {
-		rm = (rm >> (64 - (DP_FBITS + 1 + 3 + 1))) |
-		     ((rm << (DP_FBITS + 1 + 3 + 1)) != 0);
-	}
-	assert(rm & (DP_HIDDEN_BIT << 3));
-
-	/* And now the subtraction */
-
-	/* flip sign of r and handle as add */
-	rs ^= 1;
-
-	assert(zm & DP_HIDDEN_BIT);
-
-	/*
-	 * Provide guard,round and stick bit space.
-	 */
-	zm <<= 3;
-
-	if (ze > re) {
-		/*
-		 * Have to shift y fraction right to align.
-		 */
-		s = ze - re;
-		rm = XDPSRS(rm, s);
-		re += s;
-	} else if (re > ze) {
-		/*
-		 * Have to shift x fraction right to align.
-		 */
-		s = re - ze;
-		zm = XDPSRS(zm, s);
-		ze += s;
-	}
-	assert(ze == re);
-	assert(ze <= DP_EMAX);
-
-	if (zs == rs) {
-		/*
-		 * Generate 28 bit result of adding two 27 bit numbers
-		 * leaving result in xm, xs and xe.
-		 */
-		zm = zm + rm;
-
-		if (zm >> (DP_FBITS + 1 + 3)) { /* carry out */
-			zm = XDPSRS1(zm);
-			ze++;
-		}
-	} else {
-		if (zm >= rm) {
-			zm = zm - rm;
-		} else {
-			zm = rm - zm;
-			zs = rs;
-		}
-		if (zm == 0)
-			return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD);
-
-		/*
-		 * Normalize to rounding precision.
-		 */
-		while ((zm >> (DP_FBITS + 3)) == 0) {
-			zm <<= 1;
-			ze--;
-		}
-	}
-
-	return ieee754dp_format(zs, ze, zm);
-}
diff --git a/arch/mips/math-emu/dp_mul.c b/arch/mips/math-emu/dp_mul.c
index d0901f0..87d0b44 100644
--- a/arch/mips/math-emu/dp_mul.c
+++ b/arch/mips/math-emu/dp_mul.c
@@ -125,7 +125,7 @@
 	ym <<= 64 - (DP_FBITS + 1);
 
 	/*
-	 * Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
+	 * Multiply 64 bits xm, ym to give high 64 bits rm with stickness.
 	 */
 
 	/* 32 * 32 => 64 */
@@ -163,7 +163,7 @@
 	if ((s64) rm < 0) {
 		rm = (rm >> (64 - (DP_FBITS + 1 + 3))) |
 		     ((rm << (DP_FBITS + 1 + 3)) != 0);
-			re++;
+		re++;
 	} else {
 		rm = (rm >> (64 - (DP_FBITS + 1 + 3 + 1))) |
 		     ((rm << (DP_FBITS + 1 + 3 + 1)) != 0);
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
index 46b964d..d4ceacd 100644
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -60,7 +60,7 @@
 			unsigned int rs;
 			s32 v;
 
-			rs = (((insn.mm_a_format.rs + 0x1e) & 0xf) + 2);
+			rs = (((insn.mm_a_format.rs + 0xe) & 0xf) + 2);
 			v = regs->cp0_epc & ~3;
 			v += insn.mm_a_format.simmediate << 2;
 			regs->regs[rs] = (long)v;
diff --git a/arch/mips/math-emu/ieee754dp.c b/arch/mips/math-emu/ieee754dp.c
index ad3c734..465a034 100644
--- a/arch/mips/math-emu/ieee754dp.c
+++ b/arch/mips/math-emu/ieee754dp.c
@@ -54,10 +54,13 @@
 	assert(ieee754dp_issnan(r));
 
 	ieee754_setcx(IEEE754_INVALID_OPERATION);
-	if (ieee754_csr.nan2008)
+	if (ieee754_csr.nan2008) {
 		DPMANT(r) |= DP_MBIT(DP_FBITS - 1);
-	else
-		r = ieee754dp_indef();
+	} else {
+		DPMANT(r) &= ~DP_MBIT(DP_FBITS - 1);
+		if (!ieee754dp_isnan(r))
+			DPMANT(r) |= DP_MBIT(DP_FBITS - 2);
+	}
 
 	return r;
 }
@@ -97,7 +100,7 @@
 {
 	assert(xm);		/* we don't gen exact zeros (probably should) */
 
-	assert((xm >> (DP_FBITS + 1 + 3)) == 0);	/* no execess */
+	assert((xm >> (DP_FBITS + 1 + 3)) == 0);	/* no excess */
 	assert(xm & (DP_HIDDEN_BIT << 3));
 
 	if (xe < DP_EMIN) {
@@ -165,7 +168,7 @@
 	/* strip grs bits */
 	xm >>= 3;
 
-	assert((xm >> (DP_FBITS + 1)) == 0);	/* no execess */
+	assert((xm >> (DP_FBITS + 1)) == 0);	/* no excess */
 	assert(xe >= DP_EMIN);
 
 	if (xe > DP_EMAX) {
@@ -198,7 +201,7 @@
 			ieee754_setcx(IEEE754_UNDERFLOW);
 		return builddp(sn, DP_EMIN - 1 + DP_EBIAS, xm);
 	} else {
-		assert((xm >> (DP_FBITS + 1)) == 0);	/* no execess */
+		assert((xm >> (DP_FBITS + 1)) == 0);	/* no excess */
 		assert(xm & DP_HIDDEN_BIT);
 
 		return builddp(sn, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT);
diff --git a/arch/mips/math-emu/ieee754dp.h b/arch/mips/math-emu/ieee754dp.h
index e2babd9..9ba0230 100644
--- a/arch/mips/math-emu/ieee754dp.h
+++ b/arch/mips/math-emu/ieee754dp.h
@@ -60,6 +60,7 @@
 	while ((m >> DP_FBITS) == 0) { m <<= 1; e--; }
 #define DPDNORMX	DPDNORMx(xm, xe)
 #define DPDNORMY	DPDNORMx(ym, ye)
+#define DPDNORMZ	DPDNORMx(zm, ze)
 
 static inline union ieee754dp builddp(int s, int bx, u64 m)
 {
diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h
index ed7bb27..8bc2f69 100644
--- a/arch/mips/math-emu/ieee754int.h
+++ b/arch/mips/math-emu/ieee754int.h
@@ -55,6 +55,9 @@
 #define COMPYSP \
 	unsigned ym; int ye; int ys; int yc
 
+#define COMPZSP \
+	unsigned zm; int ze; int zs; int zc
+
 #define EXPLODESP(v, vc, vs, ve, vm)					\
 {									\
 	vs = SPSIGN(v);							\
@@ -81,6 +84,7 @@
 }
 #define EXPLODEXSP EXPLODESP(x, xc, xs, xe, xm)
 #define EXPLODEYSP EXPLODESP(y, yc, ys, ye, ym)
+#define EXPLODEZSP EXPLODESP(z, zc, zs, ze, zm)
 
 
 #define COMPXDP \
@@ -89,6 +93,9 @@
 #define COMPYDP \
 	u64 ym; int ye; int ys; int yc
 
+#define COMPZDP \
+	u64 zm; int ze; int zs; int zc
+
 #define EXPLODEDP(v, vc, vs, ve, vm)					\
 {									\
 	vm = DPMANT(v);							\
@@ -115,6 +122,7 @@
 }
 #define EXPLODEXDP EXPLODEDP(x, xc, xs, xe, xm)
 #define EXPLODEYDP EXPLODEDP(y, yc, ys, ye, ym)
+#define EXPLODEZDP EXPLODEDP(z, zc, zs, ze, zm)
 
 #define FLUSHDP(v, vc, vs, ve, vm)					\
 	if (vc==IEEE754_CLASS_DNORM) {					\
@@ -140,7 +148,9 @@
 
 #define FLUSHXDP FLUSHDP(x, xc, xs, xe, xm)
 #define FLUSHYDP FLUSHDP(y, yc, ys, ye, ym)
+#define FLUSHZDP FLUSHDP(z, zc, zs, ze, zm)
 #define FLUSHXSP FLUSHSP(x, xc, xs, xe, xm)
 #define FLUSHYSP FLUSHSP(y, yc, ys, ye, ym)
+#define FLUSHZSP FLUSHSP(z, zc, zs, ze, zm)
 
 #endif /* __IEEE754INT_H  */
diff --git a/arch/mips/math-emu/ieee754sp.c b/arch/mips/math-emu/ieee754sp.c
index def00ff..260e6896 100644
--- a/arch/mips/math-emu/ieee754sp.c
+++ b/arch/mips/math-emu/ieee754sp.c
@@ -54,10 +54,13 @@
 	assert(ieee754sp_issnan(r));
 
 	ieee754_setcx(IEEE754_INVALID_OPERATION);
-	if (ieee754_csr.nan2008)
+	if (ieee754_csr.nan2008) {
 		SPMANT(r) |= SP_MBIT(SP_FBITS - 1);
-	else
-		r = ieee754sp_indef();
+	} else {
+		SPMANT(r) &= ~SP_MBIT(SP_FBITS - 1);
+		if (!ieee754sp_isnan(r))
+			SPMANT(r) |= SP_MBIT(SP_FBITS - 2);
+	}
 
 	return r;
 }
@@ -97,7 +100,7 @@
 {
 	assert(xm);		/* we don't gen exact zeros (probably should) */
 
-	assert((xm >> (SP_FBITS + 1 + 3)) == 0);	/* no execess */
+	assert((xm >> (SP_FBITS + 1 + 3)) == 0);	/* no excess */
 	assert(xm & (SP_HIDDEN_BIT << 3));
 
 	if (xe < SP_EMIN) {
@@ -138,7 +141,8 @@
 		} else {
 			/* sticky right shift es bits
 			 */
-			SPXSRSXn(es);
+			xm = XSPSRS(xm, es);
+			xe += es;
 			assert((xm & (SP_HIDDEN_BIT << 3)) == 0);
 			assert(xe == SP_EMIN);
 		}
@@ -163,7 +167,7 @@
 	/* strip grs bits */
 	xm >>= 3;
 
-	assert((xm >> (SP_FBITS + 1)) == 0);	/* no execess */
+	assert((xm >> (SP_FBITS + 1)) == 0);	/* no excess */
 	assert(xe >= SP_EMIN);
 
 	if (xe > SP_EMAX) {
@@ -196,7 +200,7 @@
 			ieee754_setcx(IEEE754_UNDERFLOW);
 		return buildsp(sn, SP_EMIN - 1 + SP_EBIAS, xm);
 	} else {
-		assert((xm >> (SP_FBITS + 1)) == 0);	/* no execess */
+		assert((xm >> (SP_FBITS + 1)) == 0);	/* no excess */
 		assert(xm & SP_HIDDEN_BIT);
 
 		return buildsp(sn, xe + SP_EBIAS, xm & ~SP_HIDDEN_BIT);
diff --git a/arch/mips/math-emu/ieee754sp.h b/arch/mips/math-emu/ieee754sp.h
index 374a3f0..8476067 100644
--- a/arch/mips/math-emu/ieee754sp.h
+++ b/arch/mips/math-emu/ieee754sp.h
@@ -46,25 +46,24 @@
 }
 
 /* 3bit extended single precision sticky right shift */
-#define SPXSRSXn(rs)							\
-	(xe += rs,							\
-	 xm = (rs > (SP_FBITS+3))?1:((xm) >> (rs)) | ((xm) << (32-(rs)) != 0))
+#define XSPSRS(v, rs)						\
+	((rs > (SP_FBITS+3))?1:((v) >> (rs)) | ((v) << (32-(rs)) != 0))
+
+#define XSPSRS1(m) \
+	((m >> 1) | (m & 1))
 
 #define SPXSRSX1() \
-	(xe++, (xm = (xm >> 1) | (xm & 1)))
-
-#define SPXSRSYn(rs)								\
-	(ye+=rs,								\
-	 ym = (rs > (SP_FBITS+3))?1:((ym) >> (rs)) | ((ym) << (32-(rs)) != 0))
+	(xe++, (xm = XSPSRS1(xm)))
 
 #define SPXSRSY1() \
-	(ye++, (ym = (ym >> 1) | (ym & 1)))
+	(ye++, (ym = XSPSRS1(ym)))
 
 /* convert denormal to normalized with extended exponent */
 #define SPDNORMx(m,e) \
 	while ((m >> SP_FBITS) == 0) { m <<= 1; e--; }
 #define SPDNORMX	SPDNORMx(xm, xe)
 #define SPDNORMY	SPDNORMx(ym, ye)
+#define SPDNORMZ	SPDNORMx(zm, ze)
 
 static inline union ieee754sp buildsp(int s, int bx, unsigned m)
 {
diff --git a/arch/mips/math-emu/sp_add.c b/arch/mips/math-emu/sp_add.c
index f1c87b0..c55c0c0 100644
--- a/arch/mips/math-emu/sp_add.c
+++ b/arch/mips/math-emu/sp_add.c
@@ -132,13 +132,15 @@
 		 * Have to shift y fraction right to align.
 		 */
 		s = xe - ye;
-		SPXSRSYn(s);
+		ym = XSPSRS(ym, s);
+		ye += s;
 	} else if (ye > xe) {
 		/*
 		 * Have to shift x fraction right to align.
 		 */
 		s = ye - xe;
-		SPXSRSXn(s);
+		xm = XSPSRS(xm, s);
+		xe += s;
 	}
 	assert(xe == ye);
 	assert(xe <= SP_EMAX);
diff --git a/arch/mips/math-emu/sp_maddf.c b/arch/mips/math-emu/sp_maddf.c
index dd1dd83..a8cd8b4 100644
--- a/arch/mips/math-emu/sp_maddf.c
+++ b/arch/mips/math-emu/sp_maddf.c
@@ -14,8 +14,12 @@
 
 #include "ieee754sp.h"
 
-union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x,
-				union ieee754sp y)
+enum maddf_flags {
+	maddf_negate_product	= 1 << 0,
+};
+
+static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
+				 union ieee754sp y, enum maddf_flags flags)
 {
 	int re;
 	int rs;
@@ -32,15 +36,15 @@
 
 	COMPXSP;
 	COMPYSP;
-	u32 zm; int ze; int zs __maybe_unused; int zc;
+	COMPZSP;
 
 	EXPLODEXSP;
 	EXPLODEYSP;
-	EXPLODESP(z, zc, zs, ze, zm)
+	EXPLODEZSP;
 
 	FLUSHXSP;
 	FLUSHYSP;
-	FLUSHSP(z, zc, zs, ze, zm);
+	FLUSHZSP;
 
 	ieee754_clearcx();
 
@@ -49,7 +53,7 @@
 		ieee754_setcx(IEEE754_INVALID_OPERATION);
 		return ieee754sp_nanxcpt(z);
 	case IEEE754_CLASS_DNORM:
-		SPDNORMx(zm, ze);
+		SPDNORMZ;
 	/* QNAN is handled separately below */
 	}
 
@@ -154,6 +158,8 @@
 
 	re = xe + ye;
 	rs = xs ^ ys;
+	if (flags & maddf_negate_product)
+		rs ^= 1;
 
 	/* shunt to top of word */
 	xm <<= 32 - (SP_FBITS + 1);
@@ -208,16 +214,18 @@
 
 	if (ze > re) {
 		/*
-		 * Have to shift y fraction right to align.
+		 * Have to shift r fraction right to align.
 		 */
 		s = ze - re;
-		SPXSRSYn(s);
+		rm = XSPSRS(rm, s);
+		re += s;
 	} else if (re > ze) {
 		/*
-		 * Have to shift x fraction right to align.
+		 * Have to shift z fraction right to align.
 		 */
 		s = re - ze;
-		SPXSRSYn(s);
+		zm = XSPSRS(zm, s);
+		ze += s;
 	}
 	assert(ze == re);
 	assert(ze <= SP_EMAX);
@@ -230,7 +238,8 @@
 		zm = zm + rm;
 
 		if (zm >> (SP_FBITS + 1 + 3)) { /* carry out */
-			SPXSRSX1();
+			zm = XSPSRS1(zm);
+			ze++;
 		}
 	} else {
 		if (zm >= rm) {
@@ -253,3 +262,15 @@
 	}
 	return ieee754sp_format(zs, ze, zm);
 }
+
+union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x,
+				union ieee754sp y)
+{
+	return _sp_maddf(z, x, y, 0);
+}
+
+union ieee754sp ieee754sp_msubf(union ieee754sp z, union ieee754sp x,
+				union ieee754sp y)
+{
+	return _sp_maddf(z, x, y, maddf_negate_product);
+}
diff --git a/arch/mips/math-emu/sp_msubf.c b/arch/mips/math-emu/sp_msubf.c
deleted file mode 100644
index 81c38b980..0000000
--- a/arch/mips/math-emu/sp_msubf.c
+++ /dev/null
@@ -1,258 +0,0 @@
-/*
- * IEEE754 floating point arithmetic
- * single precision: MSUB.f (Fused Multiply Subtract)
- * MSUBF.fmt: FPR[fd] = FPR[fd] - (FPR[fs] x FPR[ft])
- *
- * MIPS floating point support
- * Copyright (C) 2015 Imagination Technologies, Ltd.
- * Author: Markos Chandras <markos.chandras@imgtec.com>
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License as published by the
- *  Free Software Foundation; version 2 of the License.
- */
-
-#include "ieee754sp.h"
-
-union ieee754sp ieee754sp_msubf(union ieee754sp z, union ieee754sp x,
-				union ieee754sp y)
-{
-	int re;
-	int rs;
-	unsigned rm;
-	unsigned short lxm;
-	unsigned short hxm;
-	unsigned short lym;
-	unsigned short hym;
-	unsigned lrm;
-	unsigned hrm;
-	unsigned t;
-	unsigned at;
-	int s;
-
-	COMPXSP;
-	COMPYSP;
-	u32 zm; int ze; int zs __maybe_unused; int zc;
-
-	EXPLODEXSP;
-	EXPLODEYSP;
-	EXPLODESP(z, zc, zs, ze, zm)
-
-	FLUSHXSP;
-	FLUSHYSP;
-	FLUSHSP(z, zc, zs, ze, zm);
-
-	ieee754_clearcx();
-
-	switch (zc) {
-	case IEEE754_CLASS_SNAN:
-		ieee754_setcx(IEEE754_INVALID_OPERATION);
-		return ieee754sp_nanxcpt(z);
-	case IEEE754_CLASS_DNORM:
-		SPDNORMx(zm, ze);
-	/* QNAN is handled separately below */
-	}
-
-	switch (CLPAIR(xc, yc)) {
-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
-	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
-	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
-	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
-	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
-		return ieee754sp_nanxcpt(y);
-
-	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
-	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
-	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
-	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
-	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
-	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
-		return ieee754sp_nanxcpt(x);
-
-	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
-	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
-	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
-	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
-		return y;
-
-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
-		return x;
-
-	/*
-	 * Infinity handling
-	 */
-	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
-	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
-		if (zc == IEEE754_CLASS_QNAN)
-			return z;
-		ieee754_setcx(IEEE754_INVALID_OPERATION);
-		return ieee754sp_indef();
-
-	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
-	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
-	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
-	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
-	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
-		if (zc == IEEE754_CLASS_QNAN)
-			return z;
-		return ieee754sp_inf(xs ^ ys);
-
-	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
-	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
-	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
-	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
-	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
-		if (zc == IEEE754_CLASS_INF)
-			return ieee754sp_inf(zs);
-		/* Multiplication is 0 so just return z */
-		return z;
-
-	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
-		SPDNORMX;
-
-	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
-		if (zc == IEEE754_CLASS_QNAN)
-			return z;
-		else if (zc == IEEE754_CLASS_INF)
-			return ieee754sp_inf(zs);
-		SPDNORMY;
-		break;
-
-	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
-		if (zc == IEEE754_CLASS_QNAN)
-			return z;
-		else if (zc == IEEE754_CLASS_INF)
-			return ieee754sp_inf(zs);
-		SPDNORMX;
-		break;
-
-	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
-		if (zc == IEEE754_CLASS_QNAN)
-			return z;
-		else if (zc == IEEE754_CLASS_INF)
-			return ieee754sp_inf(zs);
-		/* fall through to real compuation */
-	}
-
-	/* Finally get to do some computation */
-
-	/*
-	 * Do the multiplication bit first
-	 *
-	 * rm = xm * ym, re = xe + ye basically
-	 *
-	 * At this point xm and ym should have been normalized.
-	 */
-
-	/* rm = xm * ym, re = xe+ye basically */
-	assert(xm & SP_HIDDEN_BIT);
-	assert(ym & SP_HIDDEN_BIT);
-
-	re = xe + ye;
-	rs = xs ^ ys;
-
-	/* shunt to top of word */
-	xm <<= 32 - (SP_FBITS + 1);
-	ym <<= 32 - (SP_FBITS + 1);
-
-	/*
-	 * Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
-	 */
-	lxm = xm & 0xffff;
-	hxm = xm >> 16;
-	lym = ym & 0xffff;
-	hym = ym >> 16;
-
-	lrm = lxm * lym;	/* 16 * 16 => 32 */
-	hrm = hxm * hym;	/* 16 * 16 => 32 */
-
-	t = lxm * hym; /* 16 * 16 => 32 */
-	at = lrm + (t << 16);
-	hrm += at < lrm;
-	lrm = at;
-	hrm = hrm + (t >> 16);
-
-	t = hxm * lym; /* 16 * 16 => 32 */
-	at = lrm + (t << 16);
-	hrm += at < lrm;
-	lrm = at;
-	hrm = hrm + (t >> 16);
-
-	rm = hrm | (lrm != 0);
-
-	/*
-	 * Sticky shift down to normal rounding precision.
-	 */
-	if ((int) rm < 0) {
-		rm = (rm >> (32 - (SP_FBITS + 1 + 3))) |
-		    ((rm << (SP_FBITS + 1 + 3)) != 0);
-		re++;
-	} else {
-		rm = (rm >> (32 - (SP_FBITS + 1 + 3 + 1))) |
-		     ((rm << (SP_FBITS + 1 + 3 + 1)) != 0);
-	}
-	assert(rm & (SP_HIDDEN_BIT << 3));
-
-	/* And now the subtraction */
-
-	/* Flip sign of r and handle as add */
-	rs ^= 1;
-
-	assert(zm & SP_HIDDEN_BIT);
-
-	/*
-	 * Provide guard,round and stick bit space.
-	 */
-	zm <<= 3;
-
-	if (ze > re) {
-		/*
-		 * Have to shift y fraction right to align.
-		 */
-		s = ze - re;
-		SPXSRSYn(s);
-	} else if (re > ze) {
-		/*
-		 * Have to shift x fraction right to align.
-		 */
-		s = re - ze;
-		SPXSRSYn(s);
-	}
-	assert(ze == re);
-	assert(ze <= SP_EMAX);
-
-	if (zs == rs) {
-		/*
-		 * Generate 28 bit result of adding two 27 bit numbers
-		 * leaving result in zm, zs and ze.
-		 */
-		zm = zm + rm;
-
-		if (zm >> (SP_FBITS + 1 + 3)) { /* carry out */
-			SPXSRSX1(); /* shift preserving sticky */
-		}
-	} else {
-		if (zm >= rm) {
-			zm = zm - rm;
-		} else {
-			zm = rm - zm;
-			zs = rs;
-		}
-		if (zm == 0)
-			return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
-
-		/*
-		 * Normalize in extended single precision
-		 */
-		while ((zm >> (SP_MBITS + 3)) == 0) {
-			zm <<= 1;
-			ze--;
-		}
-
-	}
-	return ieee754sp_format(zs, ze, zm);
-}
diff --git a/arch/mips/math-emu/sp_sub.c b/arch/mips/math-emu/sp_sub.c
index ec5f937..dc998ed 100644
--- a/arch/mips/math-emu/sp_sub.c
+++ b/arch/mips/math-emu/sp_sub.c
@@ -134,13 +134,15 @@
 		 * have to shift y fraction right to align
 		 */
 		s = xe - ye;
-		SPXSRSYn(s);
+		ym = XSPSRS(ym, s);
+		ye += s;
 	} else if (ye > xe) {
 		/*
 		 * have to shift x fraction right to align
 		 */
 		s = ye - xe;
-		SPXSRSXn(s);
+		xm = XSPSRS(xm, s);
+		xe += s;
 	}
 	assert(xe == ye);
 	assert(xe <= SP_EMAX);
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index caac3d7..ef7f925 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -77,6 +77,7 @@
  */
 static unsigned long icache_size __read_mostly;
 static unsigned long dcache_size __read_mostly;
+static unsigned long vcache_size __read_mostly;
 static unsigned long scache_size __read_mostly;
 
 /*
@@ -447,6 +448,11 @@
 		r4k_blast_scache();
 		break;
 
+	case CPU_BMIPS5000:
+		r4k_blast_scache();
+		__sync();
+		break;
+
 	default:
 		r4k_blast_dcache();
 		r4k_blast_icache();
@@ -492,7 +498,14 @@
 	if (!(has_valid_asid(vma->vm_mm)))
 		return;
 
-	r4k_blast_dcache();
+	/*
+	 * If dcache can alias, we must blast it since mapping is changing.
+	 * If executable, we must ensure any dirty lines are written back far
+	 * enough to be visible to icache.
+	 */
+	if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
+		r4k_blast_dcache();
+	/* If executable, blast stale lines from icache */
 	if (exec)
 		r4k_blast_icache();
 }
@@ -502,7 +515,7 @@
 {
 	int exec = vma->vm_flags & VM_EXEC;
 
-	if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
+	if (cpu_has_dc_aliases || exec)
 		r4k_on_each_cpu(local_r4k_flush_cache_range, vma);
 }
 
@@ -1148,6 +1161,8 @@
 					  c->dcache.ways *
 					  c->dcache.linesz;
 		c->dcache.waybit = 0;
+		if ((prid & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2)
+			c->options |= MIPS_CPU_PREFETCH;
 		break;
 
 	case CPU_CAVIUM_OCTEON3:
@@ -1278,6 +1293,8 @@
 	case CPU_M5150:
 	case CPU_QEMU_GENERIC:
 	case CPU_I6400:
+	case CPU_P6600:
+	case CPU_M6250:
 		if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
 		    (c->icache.waysize > PAGE_SIZE))
 			c->icache.flags |= MIPS_CACHE_ALIASES;
@@ -1304,9 +1321,16 @@
 		break;
 
 	case CPU_ALCHEMY:
+	case CPU_I6400:
 		c->icache.flags |= MIPS_CACHE_IC_F_DC;
 		break;
 
+	case CPU_BMIPS5000:
+		c->icache.flags |= MIPS_CACHE_IC_F_DC;
+		/* Cache aliases are handled in hardware; allow HIGHMEM */
+		c->dcache.flags &= ~MIPS_CACHE_ALIASES;
+		break;
+
 	case CPU_LOONGSON2:
 		/*
 		 * LOONGSON2 has 4 way icache, but when using indexed cache op,
@@ -1328,6 +1352,31 @@
 	       c->dcache.linesz);
 }
 
+static void probe_vcache(void)
+{
+	struct cpuinfo_mips *c = &current_cpu_data;
+	unsigned int config2, lsize;
+
+	if (current_cpu_type() != CPU_LOONGSON3)
+		return;
+
+	config2 = read_c0_config2();
+	if ((lsize = ((config2 >> 20) & 15)))
+		c->vcache.linesz = 2 << lsize;
+	else
+		c->vcache.linesz = lsize;
+
+	c->vcache.sets = 64 << ((config2 >> 24) & 15);
+	c->vcache.ways = 1 + ((config2 >> 16) & 15);
+
+	vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz;
+
+	c->vcache.waybit = 0;
+
+	pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
+		vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
+}
+
 /*
  * If you even _breathe_ on this function, look at the gcc output and make sure
  * it does not pop things on and off the stack for the cache sizing loop that
@@ -1650,6 +1699,7 @@
 	struct cpuinfo_mips *c = &current_cpu_data;
 
 	probe_pcache();
+	probe_vcache();
 	setup_scache();
 
 	r4k_blast_dcache_page_setup();
@@ -1671,7 +1721,7 @@
 	 * This code supports virtually indexed processors and will be
 	 * unnecessarily inefficient on physically indexed processors.
 	 */
-	if (c->dcache.linesz)
+	if (c->dcache.linesz && cpu_has_dc_aliases)
 		shm_align_mask = max_t( unsigned long,
 					c->dcache.sets * c->dcache.linesz - 1,
 					PAGE_SIZE - 1);
@@ -1744,12 +1794,24 @@
 		flush_icache_range = (void *)b5k_instruction_hazard;
 		local_flush_icache_range = (void *)b5k_instruction_hazard;
 
-		/* Cache aliases are handled in hardware; allow HIGHMEM */
-		current_cpu_data.dcache.flags &= ~MIPS_CACHE_ALIASES;
 
 		/* Optimization: an L2 flush implicitly flushes the L1 */
 		current_cpu_data.options |= MIPS_CPU_INCLUSIVE_CACHES;
 		break;
+	case CPU_LOONGSON3:
+		/* Loongson-3 maintains cache coherency by hardware */
+		__flush_cache_all	= cache_noop;
+		__flush_cache_vmap	= cache_noop;
+		__flush_cache_vunmap	= cache_noop;
+		__flush_kernel_vmap_range = (void *)cache_noop;
+		flush_cache_mm		= (void *)cache_noop;
+		flush_cache_page	= (void *)cache_noop;
+		flush_cache_range	= (void *)cache_noop;
+		flush_cache_sigtramp	= (void *)cache_noop;
+		flush_icache_all	= (void *)cache_noop;
+		flush_data_cache_page	= (void *)cache_noop;
+		local_flush_data_cache_page	= (void *)cache_noop;
+		break;
 	}
 }
 
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 3f159ca..bf04c6c 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -16,6 +16,7 @@
 #include <linux/mm.h>
 
 #include <asm/cacheflush.h>
+#include <asm/highmem.h>
 #include <asm/processor.h>
 #include <asm/cpu.h>
 #include <asm/cpu-features.h>
@@ -83,8 +84,6 @@
 	struct address_space *mapping = page_mapping(page);
 	unsigned long addr;
 
-	if (PageHighMem(page))
-		return;
 	if (mapping && !mapping_mapped(mapping)) {
 		SetPageDcacheDirty(page);
 		return;
@@ -95,8 +94,15 @@
 	 * case is for exec env/arg pages and those are %99 certainly going to
 	 * get faulted into the tlb (and thus flushed) anyways.
 	 */
-	addr = (unsigned long) page_address(page);
+	if (PageHighMem(page))
+		addr = (unsigned long)kmap_atomic(page);
+	else
+		addr = (unsigned long)page_address(page);
+
 	flush_data_cache_page(addr);
+
+	if (PageHighMem(page))
+		__kunmap_atomic((void *)addr);
 }
 
 EXPORT_SYMBOL(__flush_dcache_page);
@@ -119,33 +125,28 @@
 
 EXPORT_SYMBOL(__flush_anon_page);
 
-void __flush_icache_page(struct vm_area_struct *vma, struct page *page)
-{
-	unsigned long addr;
-
-	if (PageHighMem(page))
-		return;
-
-	addr = (unsigned long) page_address(page);
-	flush_data_cache_page(addr);
-}
-EXPORT_SYMBOL_GPL(__flush_icache_page);
-
-void __update_cache(struct vm_area_struct *vma, unsigned long address,
-	pte_t pte)
+void __update_cache(unsigned long address, pte_t pte)
 {
 	struct page *page;
 	unsigned long pfn, addr;
-	int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
+	int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc;
 
 	pfn = pte_pfn(pte);
 	if (unlikely(!pfn_valid(pfn)))
 		return;
 	page = pfn_to_page(pfn);
-	if (page_mapping(page) && Page_dcache_dirty(page)) {
-		addr = (unsigned long) page_address(page);
+	if (Page_dcache_dirty(page)) {
+		if (PageHighMem(page))
+			addr = (unsigned long)kmap_atomic(page);
+		else
+			addr = (unsigned long)page_address(page);
+
 		if (exec || pages_do_alias(addr, address & PAGE_MASK))
 			flush_data_cache_page(addr);
+
+		if (PageHighMem(page))
+			__kunmap_atomic((void *)addr);
+
 		ClearPageDcacheDirty(page);
 	}
 }
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 730d394..cb557d2 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -88,19 +88,20 @@
 	else
 #endif
 #if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
-	     if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
+	     if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(32))
 			dma_flag = __GFP_DMA;
 	else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
 			dma_flag = __GFP_DMA32;
 	else
 #endif
 #if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
-	     if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+	     if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(64))
 		dma_flag = __GFP_DMA32;
 	else
 #endif
 #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
-	     if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
+	     if (dev == NULL ||
+		 dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
 		dma_flag = __GFP_DMA;
 	else
 #endif
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 7e5fa09..9b58eb5 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -98,8 +98,10 @@
 	idx += in_interrupt() ? FIX_N_COLOURS : 0;
 	vaddr = __fix_to_virt(FIX_CMAP_END - idx);
 	pte = mk_pte(page, prot);
-#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+#if defined(CONFIG_XPA)
 	entrylo = pte_to_entrylo(pte.pte_high);
+#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+	entrylo = pte.pte_high;
 #else
 	entrylo = pte_to_entrylo(pte_val(pte));
 #endif
@@ -110,9 +112,11 @@
 	write_c0_entrylo0(entrylo);
 	write_c0_entrylo1(entrylo);
 #ifdef CONFIG_XPA
-	entrylo = (pte.pte_low & _PFNX_MASK);
-	writex_c0_entrylo0(entrylo);
-	writex_c0_entrylo1(entrylo);
+	if (cpu_has_xpa) {
+		entrylo = (pte.pte_low & _PFNX_MASK);
+		writex_c0_entrylo0(entrylo);
+		writex_c0_entrylo1(entrylo);
+	}
 #endif
 	tlbidx = read_c0_wired();
 	write_c0_wired(tlbidx + 1);
@@ -196,7 +200,7 @@
 		if (cpu_has_dc_aliases)
 			SetPageDcacheDirty(page);
 	}
-	if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
+	if (vma->vm_flags & VM_EXEC)
 		flush_cache_page(vma, vaddr, page_to_pfn(page));
 }
 
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 885d73f..c41953c 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -188,6 +188,15 @@
 			}
 			break;
 
+		case CPU_LOONGSON3:
+			/* Loongson-3 only support the Pref_Load/Pref_Store. */
+			pref_bias_clear_store = 128;
+			pref_bias_copy_load = 128;
+			pref_bias_copy_store = 128;
+			pref_src_mode = Pref_Load;
+			pref_dst_mode = Pref_Store;
+			break;
+
 		default:
 			pref_bias_clear_store = 128;
 			pref_bias_copy_load = 256;
diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c
index dc7c5a5..026cb59 100644
--- a/arch/mips/mm/sc-ip22.c
+++ b/arch/mips/mm/sc-ip22.c
@@ -158,7 +158,7 @@
 	return 1;
 }
 
-/* XXX Check with wje if the Indy caches can differenciate between
+/* XXX Check with wje if the Indy caches can differentiate between
    writeback + invalidate and just invalidate.	*/
 static struct bcache_ops indy_sc_ops = {
 	.bc_enable = indy_sc_enable,
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index 91dec32..286a4d5 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -141,6 +141,7 @@
 	case CPU_P5600:
 	case CPU_BMIPS5000:
 	case CPU_QEMU_GENERIC:
+	case CPU_P6600:
 		if (config2 & (1 << 12))
 			return 0;
 	}
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index b4f366f..1290b99 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -43,7 +43,7 @@
 {
 	unsigned long old_ctx;
 
-	old_ctx = read_c0_entryhi() & ASID_MASK;
+	old_ctx = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
 	write_c0_entrylo0(0);
 	while (entry < current_cpu_data.tlbsize) {
 		write_c0_index(entry << 8);
@@ -81,6 +81,7 @@
 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 			   unsigned long end)
 {
+	unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
 	struct mm_struct *mm = vma->vm_mm;
 	int cpu = smp_processor_id();
 
@@ -89,13 +90,13 @@
 
 #ifdef DEBUG_TLB
 		printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
-			cpu_context(cpu, mm) & ASID_MASK, start, end);
+			cpu_context(cpu, mm) & asid_mask, start, end);
 #endif
 		local_irq_save(flags);
 		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
 		if (size <= current_cpu_data.tlbsize) {
-			int oldpid = read_c0_entryhi() & ASID_MASK;
-			int newpid = cpu_context(cpu, mm) & ASID_MASK;
+			int oldpid = read_c0_entryhi() & asid_mask;
+			int newpid = cpu_context(cpu, mm) & asid_mask;
 
 			start &= PAGE_MASK;
 			end += PAGE_SIZE - 1;
@@ -159,6 +160,7 @@
 
 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 {
+	unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
 	int cpu = smp_processor_id();
 
 	if (cpu_context(cpu, vma->vm_mm) != 0) {
@@ -168,10 +170,10 @@
 #ifdef DEBUG_TLB
 		printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
 #endif
-		newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK;
+		newpid = cpu_context(cpu, vma->vm_mm) & asid_mask;
 		page &= PAGE_MASK;
 		local_irq_save(flags);
-		oldpid = read_c0_entryhi() & ASID_MASK;
+		oldpid = read_c0_entryhi() & asid_mask;
 		write_c0_entryhi(page | newpid);
 		BARRIER;
 		tlb_probe();
@@ -190,6 +192,7 @@
 
 void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
 {
+	unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
 	unsigned long flags;
 	int idx, pid;
 
@@ -199,10 +202,10 @@
 	if (current->active_mm != vma->vm_mm)
 		return;
 
-	pid = read_c0_entryhi() & ASID_MASK;
+	pid = read_c0_entryhi() & asid_mask;
 
 #ifdef DEBUG_TLB
-	if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
+	if ((pid != (cpu_context(cpu, vma->vm_mm) & asid_mask)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
 		printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
 		       (cpu_context(cpu, vma->vm_mm)), pid);
 	}
@@ -228,6 +231,7 @@
 void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
 		     unsigned long entryhi, unsigned long pagemask)
 {
+	unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
 	unsigned long flags;
 	unsigned long old_ctx;
 	static unsigned long wired = 0;
@@ -243,7 +247,7 @@
 
 		local_irq_save(flags);
 		/* Save old context and create impossible VPN2 value */
-		old_ctx = read_c0_entryhi() & ASID_MASK;
+		old_ctx = read_c0_entryhi() & asid_mask;
 		old_pagemask = read_c0_pagemask();
 		w = read_c0_wired();
 		write_c0_wired(w + 1);
@@ -266,7 +270,7 @@
 #endif
 
 		local_irq_save(flags);
-		old_ctx = read_c0_entryhi() & ASID_MASK;
+		old_ctx = read_c0_entryhi() & asid_mask;
 		write_c0_entrylo0(entrylo0);
 		write_c0_entryhi(entryhi);
 		write_c0_index(wired);
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 5037d58..5a5c7fe 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -19,6 +19,7 @@
 #include <asm/cpu.h>
 #include <asm/cpu-type.h>
 #include <asm/bootinfo.h>
+#include <asm/hazards.h>
 #include <asm/mmu_context.h>
 #include <asm/pgtable.h>
 #include <asm/tlb.h>
@@ -27,25 +28,28 @@
 extern void build_tlb_refill_handler(void);
 
 /*
- * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb,
- * unfortunately, itlb is not totally transparent to software.
+ * LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has
+ * a 4 entry itlb and a 4 entry dtlb which are subsets of jtlb. Unfortunately,
+ * itlb/dtlb are not totally transparent to software.
  */
-static inline void flush_itlb(void)
+static inline void flush_micro_tlb(void)
 {
 	switch (current_cpu_type()) {
 	case CPU_LOONGSON2:
+		write_c0_diag(LOONGSON_DIAG_ITLB);
+		break;
 	case CPU_LOONGSON3:
-		write_c0_diag(4);
+		write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
 		break;
 	default:
 		break;
 	}
 }
 
-static inline void flush_itlb_vm(struct vm_area_struct *vma)
+static inline void flush_micro_tlb_vm(struct vm_area_struct *vma)
 {
 	if (vma->vm_flags & VM_EXEC)
-		flush_itlb();
+		flush_micro_tlb();
 }
 
 void local_flush_tlb_all(void)
@@ -92,7 +96,7 @@
 	tlbw_use_hazard();
 	write_c0_entryhi(old_ctx);
 	htw_start();
-	flush_itlb();
+	flush_micro_tlb();
 	local_irq_restore(flags);
 }
 EXPORT_SYMBOL(local_flush_tlb_all);
@@ -158,7 +162,7 @@
 		} else {
 			drop_mmu_context(mm, cpu);
 		}
-		flush_itlb();
+		flush_micro_tlb();
 		local_irq_restore(flags);
 	}
 }
@@ -204,7 +208,7 @@
 	} else {
 		local_flush_tlb_all();
 	}
-	flush_itlb();
+	flush_micro_tlb();
 	local_irq_restore(flags);
 }
 
@@ -239,7 +243,7 @@
 	finish:
 		write_c0_entryhi(oldpid);
 		htw_start();
-		flush_itlb_vm(vma);
+		flush_micro_tlb_vm(vma);
 		local_irq_restore(flags);
 	}
 }
@@ -273,7 +277,7 @@
 	}
 	write_c0_entryhi(oldpid);
 	htw_start();
-	flush_itlb();
+	flush_micro_tlb();
 	local_irq_restore(flags);
 }
 
@@ -300,7 +304,7 @@
 	local_irq_save(flags);
 
 	htw_stop();
-	pid = read_c0_entryhi() & ASID_MASK;
+	pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
 	address &= (PAGE_MASK << 1);
 	write_c0_entryhi(address | pid);
 	pgdp = pgd_offset(vma->vm_mm, address);
@@ -335,10 +339,12 @@
 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
 #ifdef CONFIG_XPA
 		write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
-		writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
+		if (cpu_has_xpa)
+			writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
 		ptep++;
 		write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
-		writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
+		if (cpu_has_xpa)
+			writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
 #else
 		write_c0_entrylo0(ptep->pte_high);
 		ptep++;
@@ -356,7 +362,7 @@
 	}
 	tlbw_use_hazard();
 	htw_start();
-	flush_itlb_vm(vma);
+	flush_micro_tlb_vm(vma);
 	local_irq_restore(flags);
 }
 
@@ -486,6 +492,10 @@
 	 *     be set to fixed-size pages.
 	 */
 	write_c0_pagemask(PM_DEFAULT_MASK);
+	back_to_back_c0_hazard();
+	if (read_c0_pagemask() != PM_DEFAULT_MASK)
+		panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
+
 	write_c0_wired(0);
 	if (current_cpu_type() == CPU_R10000 ||
 	    current_cpu_type() == CPU_R12000 ||
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c
index 138a2ec..e86e2e5 100644
--- a/arch/mips/mm/tlb-r8k.c
+++ b/arch/mips/mm/tlb-r8k.c
@@ -194,7 +194,7 @@
 	if (current->active_mm != vma->vm_mm)
 		return;
 
-	pid = read_c0_entryhi() & ASID_MASK;
+	pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
 
 	local_irq_save(flags);
 	address &= PAGE_MASK;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 5a04b6f..274da90 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -12,7 +12,7 @@
  * Copyright (C) 2011  MIPS Technologies, Inc.
  *
  * ... and the days got worse and worse and now you see
- * I've gone completly out of my mind.
+ * I've gone completely out of my mind.
  *
  * They're coming to take me a away haha
  * they're coming to take me a away hoho hihi haha
@@ -234,20 +234,16 @@
 	pr_debug("\n");
 
 	pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
-	pr_define("_PAGE_READ_SHIFT %d\n", _PAGE_READ_SHIFT);
+	pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
 	pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
 	pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT);
 	pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT);
 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 	pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
 #endif
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
-	if (cpu_has_rixi) {
 #ifdef _PAGE_NO_EXEC_SHIFT
+	if (cpu_has_rixi)
 		pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
-		pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
-#endif
-	}
 #endif
 	pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
 	pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
@@ -284,7 +280,12 @@
 #define C0_ENTRYLO1	3, 0
 #define C0_CONTEXT	4, 0
 #define C0_PAGEMASK	5, 0
+#define C0_PWBASE	5, 5
+#define C0_PWFIELD	5, 6
+#define C0_PWSIZE	5, 7
+#define C0_PWCTL	6, 6
 #define C0_BADVADDR	8, 0
+#define C0_PGD		9, 7
 #define C0_ENTRYHI	10, 0
 #define C0_EPC		14, 0
 #define C0_XCONTEXT	20, 0
@@ -630,6 +631,11 @@
 static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
 							unsigned int reg)
 {
+	if (_PAGE_GLOBAL_SHIFT == 0) {
+		/* pte_t is already in EntryLo format */
+		return;
+	}
+
 	if (cpu_has_rixi && _PAGE_NO_EXEC) {
 		if (fill_includes_sw_bits) {
 			UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
@@ -808,7 +814,10 @@
 
 	if (pgd_reg != -1) {
 		/* pgd is in pgd_reg */
-		UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
+		if (cpu_has_ldpte)
+			UASM_i_MFC0(p, ptr, C0_PWBASE);
+		else
+			UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
 	} else {
 #if defined(CONFIG_MIPS_PGD_C0_CONTEXT)
 		/*
@@ -1007,39 +1016,40 @@
 
 static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
 {
-	/*
-	 * 64bit address support (36bit on a 32bit CPU) in a 32bit
-	 * Kernel is a special case. Only a few CPUs use it.
-	 */
-	if (config_enabled(CONFIG_PHYS_ADDR_T_64BIT) && !cpu_has_64bits) {
-		int pte_off_even = sizeof(pte_t) / 2;
-		int pte_off_odd = pte_off_even + sizeof(pte_t);
-#ifdef CONFIG_XPA
-		const int scratch = 1; /* Our extra working register */
+	int pte_off_even = 0;
+	int pte_off_odd = sizeof(pte_t);
 
-		uasm_i_addu(p, scratch, 0, ptep);
+#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_PHYS_ADDR_T_64BIT)
+	/* The low 32 bits of EntryLo is stored in pte_high */
+	pte_off_even += offsetof(pte_t, pte_high);
+	pte_off_odd += offsetof(pte_t, pte_high);
 #endif
+
+	if (config_enabled(CONFIG_XPA)) {
 		uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
-		uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */
 		UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
-		UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
 		UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
-		UASM_i_MTC0(p, ptep, C0_ENTRYLO1);
-#ifdef CONFIG_XPA
-		uasm_i_lw(p, tmp, 0, scratch);
-		uasm_i_lw(p, ptep, sizeof(pte_t), scratch);
-		uasm_i_lui(p, scratch, 0xff);
-		uasm_i_ori(p, scratch, scratch, 0xffff);
-		uasm_i_and(p, tmp, scratch, tmp);
-		uasm_i_and(p, ptep, scratch, ptep);
-		uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
-		uasm_i_mthc0(p, ptep, C0_ENTRYLO1);
-#endif
+
+		if (cpu_has_xpa && !mips_xpa_disabled) {
+			uasm_i_lw(p, tmp, 0, ptep);
+			uasm_i_ext(p, tmp, tmp, 0, 24);
+			uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
+		}
+
+		uasm_i_lw(p, tmp, pte_off_odd, ptep); /* odd pte */
+		UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
+		UASM_i_MTC0(p, tmp, C0_ENTRYLO1);
+
+		if (cpu_has_xpa && !mips_xpa_disabled) {
+			uasm_i_lw(p, tmp, sizeof(pte_t), ptep);
+			uasm_i_ext(p, tmp, tmp, 0, 24);
+			uasm_i_mthc0(p, tmp, C0_ENTRYLO1);
+		}
 		return;
 	}
 
-	UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
-	UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
+	UASM_i_LW(p, tmp, pte_off_even, ptep); /* get even pte */
+	UASM_i_LW(p, ptep, pte_off_odd, ptep); /* get odd pte */
 	if (r45k_bvahwbug())
 		build_tlb_probe_entry(p);
 	build_convert_pte_to_entrylo(p, tmp);
@@ -1421,6 +1431,108 @@
 	dump_handler("r4000_tlb_refill", (u32 *)ebase, 64);
 }
 
+static void setup_pw(void)
+{
+	unsigned long pgd_i, pgd_w;
+#ifndef __PAGETABLE_PMD_FOLDED
+	unsigned long pmd_i, pmd_w;
+#endif
+	unsigned long pt_i, pt_w;
+	unsigned long pte_i, pte_w;
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+	unsigned long psn;
+
+	psn = ilog2(_PAGE_HUGE);     /* bit used to indicate huge page */
+#endif
+	pgd_i = PGDIR_SHIFT;  /* 1st level PGD */
+#ifndef __PAGETABLE_PMD_FOLDED
+	pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_ORDER;
+
+	pmd_i = PMD_SHIFT;    /* 2nd level PMD */
+	pmd_w = PMD_SHIFT - PAGE_SHIFT;
+#else
+	pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_ORDER;
+#endif
+
+	pt_i  = PAGE_SHIFT;    /* 3rd level PTE */
+	pt_w  = PAGE_SHIFT - 3;
+
+	pte_i = ilog2(_PAGE_GLOBAL);
+	pte_w = 0;
+
+#ifndef __PAGETABLE_PMD_FOLDED
+	write_c0_pwfield(pgd_i << 24 | pmd_i << 12 | pt_i << 6 | pte_i);
+	write_c0_pwsize(1 << 30 | pgd_w << 24 | pmd_w << 12 | pt_w << 6 | pte_w);
+#else
+	write_c0_pwfield(pgd_i << 24 | pt_i << 6 | pte_i);
+	write_c0_pwsize(1 << 30 | pgd_w << 24 | pt_w << 6 | pte_w);
+#endif
+
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+	write_c0_pwctl(1 << 6 | psn);
+#endif
+	write_c0_kpgd(swapper_pg_dir);
+	kscratch_used_mask |= (1 << 7); /* KScratch6 is used for KPGD */
+}
+
+static void build_loongson3_tlb_refill_handler(void)
+{
+	u32 *p = tlb_handler;
+	struct uasm_label *l = labels;
+	struct uasm_reloc *r = relocs;
+
+	memset(labels, 0, sizeof(labels));
+	memset(relocs, 0, sizeof(relocs));
+	memset(tlb_handler, 0, sizeof(tlb_handler));
+
+	if (check_for_high_segbits) {
+		uasm_i_dmfc0(&p, K0, C0_BADVADDR);
+		uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+		uasm_il_beqz(&p, &r, K1, label_vmalloc);
+		uasm_i_nop(&p);
+
+		uasm_il_bgez(&p, &r, K0, label_large_segbits_fault);
+		uasm_i_nop(&p);
+		uasm_l_vmalloc(&l, p);
+	}
+
+	uasm_i_dmfc0(&p, K1, C0_PGD);
+
+	uasm_i_lddir(&p, K0, K1, 3);  /* global page dir */
+#ifndef __PAGETABLE_PMD_FOLDED
+	uasm_i_lddir(&p, K1, K0, 1);  /* middle page dir */
+#endif
+	uasm_i_ldpte(&p, K1, 0);      /* even */
+	uasm_i_ldpte(&p, K1, 1);      /* odd */
+	uasm_i_tlbwr(&p);
+
+	/* restore page mask */
+	if (PM_DEFAULT_MASK >> 16) {
+		uasm_i_lui(&p, K0, PM_DEFAULT_MASK >> 16);
+		uasm_i_ori(&p, K0, K0, PM_DEFAULT_MASK & 0xffff);
+		uasm_i_mtc0(&p, K0, C0_PAGEMASK);
+	} else if (PM_DEFAULT_MASK) {
+		uasm_i_ori(&p, K0, 0, PM_DEFAULT_MASK);
+		uasm_i_mtc0(&p, K0, C0_PAGEMASK);
+	} else {
+		uasm_i_mtc0(&p, 0, C0_PAGEMASK);
+	}
+
+	uasm_i_eret(&p);
+
+	if (check_for_high_segbits) {
+		uasm_l_large_segbits_fault(&l, p);
+		UASM_i_LA(&p, K1, (unsigned long)tlb_do_page_fault_0);
+		uasm_i_jr(&p, K1);
+		uasm_i_nop(&p);
+	}
+
+	uasm_resolve_relocs(relocs, labels);
+	memcpy((void *)(ebase + 0x80), tlb_handler, 0x80);
+	local_flush_icache_range(ebase + 0x80, ebase + 0x100);
+	dump_handler("loongson3_tlb_refill", (u32 *)(ebase + 0x80), 32);
+}
+
 extern u32 handle_tlbl[], handle_tlbl_end[];
 extern u32 handle_tlbs[], handle_tlbs_end[];
 extern u32 handle_tlbm[], handle_tlbm_end[];
@@ -1468,7 +1580,10 @@
 	} else {
 		/* PGD in c0_KScratch */
 		uasm_i_jr(&p, 31);
-		UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
+		if (cpu_has_ldpte)
+			UASM_i_MTC0(&p, a0, C0_PWBASE);
+		else
+			UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
 	}
 #else
 #ifdef CONFIG_SMP
@@ -1523,19 +1638,19 @@
 
 static void
 iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
-	unsigned int mode)
+	unsigned int mode, unsigned int scratch)
 {
-#ifdef CONFIG_PHYS_ADDR_T_64BIT
 	unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
+	unsigned int swmode = mode & ~hwmode;
 
-	if (!cpu_has_64bits) {
-		const int scratch = 1; /* Our extra working register */
-
-		uasm_i_lui(p, scratch, (mode >> 16));
+	if (config_enabled(CONFIG_XPA) && !cpu_has_64bits) {
+		uasm_i_lui(p, scratch, swmode >> 16);
 		uasm_i_or(p, pte, pte, scratch);
-	} else
-#endif
-	uasm_i_ori(p, pte, pte, mode);
+		BUG_ON(swmode & 0xffff);
+	} else {
+		uasm_i_ori(p, pte, pte, mode);
+	}
+
 #ifdef CONFIG_SMP
 # ifdef CONFIG_PHYS_ADDR_T_64BIT
 	if (cpu_has_64bits)
@@ -1554,6 +1669,7 @@
 		/* no uasm_i_nop needed */
 		uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
 		uasm_i_ori(p, pte, pte, hwmode);
+		BUG_ON(hwmode & ~0xffff);
 		uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
 		uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
 		/* no uasm_i_nop needed */
@@ -1575,6 +1691,7 @@
 	if (!cpu_has_64bits) {
 		uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
 		uasm_i_ori(p, pte, pte, hwmode);
+		BUG_ON(hwmode & ~0xffff);
 		uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
 		uasm_i_lw(p, pte, 0, ptr);
 	}
@@ -1615,9 +1732,8 @@
 			cur = t;
 		}
 		uasm_i_andi(p, t, cur,
-			(_PAGE_PRESENT | _PAGE_READ) >> _PAGE_PRESENT_SHIFT);
-		uasm_i_xori(p, t, t,
-			(_PAGE_PRESENT | _PAGE_READ) >> _PAGE_PRESENT_SHIFT);
+			(_PAGE_PRESENT | _PAGE_NO_READ) >> _PAGE_PRESENT_SHIFT);
+		uasm_i_xori(p, t, t, _PAGE_PRESENT >> _PAGE_PRESENT_SHIFT);
 		uasm_il_bnez(p, r, t, lid);
 		if (pte == t)
 			/* You lose the SMP race :-(*/
@@ -1628,11 +1744,11 @@
 /* Make PTE valid, store result in PTR. */
 static void
 build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
-		 unsigned int ptr)
+		 unsigned int ptr, unsigned int scratch)
 {
 	unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
 
-	iPTE_SW(p, r, pte, ptr, mode);
+	iPTE_SW(p, r, pte, ptr, mode, scratch);
 }
 
 /*
@@ -1668,12 +1784,12 @@
  */
 static void
 build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
-		 unsigned int ptr)
+		 unsigned int ptr, unsigned int scratch)
 {
 	unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
 			     | _PAGE_DIRTY);
 
-	iPTE_SW(p, r, pte, ptr, mode);
+	iPTE_SW(p, r, pte, ptr, mode, scratch);
 }
 
 /*
@@ -1778,7 +1894,7 @@
 	build_r3000_tlbchange_handler_head(&p, K0, K1);
 	build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
 	uasm_i_nop(&p); /* load delay */
-	build_make_valid(&p, &r, K0, K1);
+	build_make_valid(&p, &r, K0, K1, -1);
 	build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
 
 	uasm_l_nopage_tlbl(&l, p);
@@ -1809,7 +1925,7 @@
 	build_r3000_tlbchange_handler_head(&p, K0, K1);
 	build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
 	uasm_i_nop(&p); /* load delay */
-	build_make_write(&p, &r, K0, K1);
+	build_make_write(&p, &r, K0, K1, -1);
 	build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
 
 	uasm_l_nopage_tlbs(&l, p);
@@ -1840,7 +1956,7 @@
 	build_r3000_tlbchange_handler_head(&p, K0, K1);
 	build_pte_modifiable(&p, &r, K0, K1,  -1, label_nopage_tlbm);
 	uasm_i_nop(&p); /* load delay */
-	build_make_write(&p, &r, K0, K1);
+	build_make_write(&p, &r, K0, K1, -1);
 	build_r3000_pte_reload_tlbwi(&p, K0, K1);
 
 	uasm_l_nopage_tlbm(&l, p);
@@ -2008,7 +2124,7 @@
 		}
 		uasm_l_tlbl_goaround1(&l, p);
 	}
-	build_make_valid(&p, &r, wr.r1, wr.r2);
+	build_make_valid(&p, &r, wr.r1, wr.r2, wr.r3);
 	build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
 
 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
@@ -2122,7 +2238,7 @@
 	build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
 	if (m4kc_tlbp_war())
 		build_tlb_probe_entry(&p);
-	build_make_write(&p, &r, wr.r1, wr.r2);
+	build_make_write(&p, &r, wr.r1, wr.r2, wr.r3);
 	build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
 
 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
@@ -2178,7 +2294,7 @@
 	if (m4kc_tlbp_war())
 		build_tlb_probe_entry(&p);
 	/* Present and writable bits set, set accessed and dirty bits. */
-	build_make_write(&p, &r, wr.r1, wr.r2);
+	build_make_write(&p, &r, wr.r1, wr.r2, wr.r3);
 	build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
 
 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
@@ -2311,9 +2427,7 @@
 	if (CONFIG_PGTABLE_LEVELS >= 3)
 		pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT;
 
-	/* If XPA has been enabled, PTEs are 64-bit in size. */
-	if (config_enabled(CONFIG_64BITS) || (read_c0_pagegrain() & PG_ELPA))
-		pwsize |= 1;
+	pwsize |= ilog2(sizeof(pte_t)/4) << MIPS_PWSIZE_PTEW_SHIFT;
 
 	write_c0_pwsize(pwsize);
 
@@ -2394,6 +2508,9 @@
 	 */
 	static int run_once = 0;
 
+	if (config_enabled(CONFIG_XPA) && !cpu_has_rixi)
+		panic("Kernels supporting XPA currently require CPUs with RIXI");
+
 	output_pgtable_bits_defines();
 	check_pabits();
 
@@ -2437,13 +2554,18 @@
 		break;
 
 	default:
+		if (cpu_has_ldpte)
+			setup_pw();
+
 		if (!run_once) {
 			scratch_reg = allocate_kscratch();
 			build_setup_pgd();
 			build_r4000_tlb_load_handler();
 			build_r4000_tlb_store_handler();
 			build_r4000_tlb_modify_handler();
-			if (!cpu_has_local_ebase)
+			if (cpu_has_ldpte)
+				build_loongson3_tlb_refill_handler();
+			else if (!cpu_has_local_ebase)
 				build_r4000_tlb_refill_handler();
 			flush_tlb_handlers();
 			run_once++;
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index b4a83789..9c2220a 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -153,6 +153,8 @@
 	{ insn_xori,  M(xori_op, 0, 0, 0, 0, 0),  RS | RT | UIMM },
 	{ insn_xor,  M(spec_op, 0, 0, 0, 0, xor_op),  RS | RT | RD },
 	{ insn_yield, M(spec3_op, 0, 0, 0, 0, yield_op), RS | RD },
+	{ insn_ldpte, M(lwc2_op, 0, 0, 0, ldpte_op, mult_op), RS | RD },
+	{ insn_lddir, M(lwc2_op, 0, 0, 0, lddir_op, mult_op), RS | RT | RD },
 	{ insn_invalid, 0, 0 }
 };
 
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 319051c..ad718de 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -60,6 +60,7 @@
 	insn_sltiu, insn_sltu, insn_sra, insn_srl, insn_srlv, insn_subu,
 	insn_sw, insn_sync, insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi,
 	insn_tlbwr, insn_wait, insn_wsbh, insn_xor, insn_xori, insn_yield,
+	insn_lddir, insn_ldpte,
 };
 
 struct insn {
@@ -335,6 +336,8 @@
 I_u1u2s3(_bbit1);
 I_u3u1u2(_lwx)
 I_u3u1u2(_ldx)
+I_u1u2(_ldpte)
+I_u2u1u3(_lddir)
 
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
 #include <asm/octeon/octeon.h>
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index 4740c82..33d5ff5 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -248,10 +248,15 @@
 #endif
 }
 
+void __init *plat_get_fdt(void)
+{
+	return (void *)__dtb_start;
+}
+
 void __init plat_mem_setup(void)
 {
 	unsigned int i;
-	void *fdt = __dtb_start;
+	void *fdt = plat_get_fdt();
 
 	fdt = malta_dt_shim(fdt);
 	__dt_setup_arch(fdt);
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index b7bf721..7407da0 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -21,6 +21,7 @@
 #include <linux/i8253.h>
 #include <linux/init.h>
 #include <linux/kernel_stat.h>
+#include <linux/math64.h>
 #include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
@@ -72,6 +73,8 @@
 {
 	unsigned long flags;
 	unsigned int count, start;
+	unsigned char secs1, secs2, ctrl;
+	int secs;
 	cycle_t giccount = 0, gicstart = 0;
 
 #if defined(CONFIG_KVM_GUEST) && CONFIG_KVM_GUEST_TIMER_FREQ
@@ -81,32 +84,51 @@
 
 	local_irq_save(flags);
 
-	/* Start counter exactly on falling edge of update flag. */
-	while (CMOS_READ(RTC_REG_A) & RTC_UIP);
-	while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
-
-	/* Initialize counters. */
-	start = read_c0_count();
-	if (gic_present) {
+	if (gic_present)
 		gic_start_count();
-		gicstart = gic_read_count();
-	}
 
-	/* Read counter exactly on falling edge of update flag. */
+	/*
+	 * Read counters exactly on rising edge of update flag.
+	 * This helps get an accurate reading under virtualisation.
+	 */
 	while (CMOS_READ(RTC_REG_A) & RTC_UIP);
 	while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
+	start = read_c0_count();
+	if (gic_present)
+		gicstart = gic_read_count();
 
+	/* Wait for falling edge before reading RTC. */
+	while (CMOS_READ(RTC_REG_A) & RTC_UIP);
+	secs1 = CMOS_READ(RTC_SECONDS);
+
+	/* Read counters again exactly on rising edge of update flag. */
+	while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
 	count = read_c0_count();
 	if (gic_present)
 		giccount = gic_read_count();
 
+	/* Wait for falling edge before reading RTC again. */
+	while (CMOS_READ(RTC_REG_A) & RTC_UIP);
+	secs2 = CMOS_READ(RTC_SECONDS);
+
+	ctrl = CMOS_READ(RTC_CONTROL);
+
 	local_irq_restore(flags);
 
+	if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+		secs1 = bcd2bin(secs1);
+		secs2 = bcd2bin(secs2);
+	}
+	secs = secs2 - secs1;
+	if (secs < 1)
+		secs += 60;
+
 	count -= start;
+	count /= secs;
 	mips_hpt_frequency = count;
 
 	if (gic_present) {
-		giccount -= gicstart;
+		giccount = div_u64(giccount - gicstart, secs);
 		gic_frequency = giccount;
 	}
 }
diff --git a/arch/mips/mti-sead3/sead3-setup.c b/arch/mips/mti-sead3/sead3-setup.c
index e43f480..9f2f9b2 100644
--- a/arch/mips/mti-sead3/sead3-setup.c
+++ b/arch/mips/mti-sead3/sead3-setup.c
@@ -83,6 +83,11 @@
 	}
 }
 
+void __init *plat_get_fdt(void)
+{
+	return (void *)__dtb_start;
+}
+
 void __init plat_mem_setup(void)
 {
 	/* allow command line/bootloader env to override memory size in DT */
diff --git a/arch/mips/netlogic/common/reset.S b/arch/mips/netlogic/common/reset.S
index edbab9b..c474981 100644
--- a/arch/mips/netlogic/common/reset.S
+++ b/arch/mips/netlogic/common/reset.S
@@ -50,7 +50,6 @@
 #include <asm/netlogic/xlp-hal/sys.h>
 #include <asm/netlogic/xlp-hal/cpucontrol.h>
 
-#define CP0_EBASE	$15
 #define SYS_CPU_COHERENT_BASE	CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \
 			XLP_IO_SYS_OFFSET(0) + XLP_IO_PCI_HDRSZ + \
 			SYS_CPU_NONCOHERENT_MODE * 4
@@ -92,7 +91,7 @@
  * registers. On XLPII CPUs, usual cache instructions work.
  */
 .macro	xlp_flush_l1_dcache
-	mfc0	t0, CP0_EBASE, 0
+	mfc0	t0, CP0_PRID
 	andi	t0, t0, PRID_IMP_MASK
 	slt	t1, t0, 0x1200
 	beqz	t1, 15f
@@ -171,7 +170,7 @@
 	nop
 
 1:	/* Entry point on core wakeup */
-	mfc0	t0, CP0_EBASE, 0	/* processor ID */
+	mfc0	t0, CP0_PRID		/* processor ID */
 	andi	t0, PRID_IMP_MASK
 	li	t1, 0x1500		/* XLP 9xx */
 	beq	t0, t1, 2f		/* does not need to set coherent */
@@ -182,8 +181,8 @@
 	nop
 
 	/* set bit in SYS coherent register for the core */
-	mfc0	t0, CP0_EBASE, 1
-	mfc0	t1, CP0_EBASE, 1
+	mfc0	t0, CP0_EBASE
+	mfc0	t1, CP0_EBASE
 	srl	t1, 5
 	andi	t1, 0x3			/* t1 <- node */
 	li	t2, 0x40000
@@ -232,7 +231,7 @@
 
 	 * NOTE: All GPR contents are lost after the mtcr above!
 	 */
-	mfc0	v0, CP0_EBASE, 1
+	mfc0	v0, CP0_EBASE
 	andi	v0, 0x3ff		/* v0 <- node/core */
 
 	/*
diff --git a/arch/mips/netlogic/common/smpboot.S b/arch/mips/netlogic/common/smpboot.S
index 805355b..f0cc4c9 100644
--- a/arch/mips/netlogic/common/smpboot.S
+++ b/arch/mips/netlogic/common/smpboot.S
@@ -48,8 +48,6 @@
 #include <asm/netlogic/xlp-hal/sys.h>
 #include <asm/netlogic/xlp-hal/cpucontrol.h>
 
-#define CP0_EBASE	$15
-
 	.set	noreorder
 	.set	noat
 	.set	arch=xlr		/* for mfcr/mtcr, XLR is sufficient */
@@ -86,7 +84,7 @@
 	PTR_L	gp, 0(t1)
 
 	/* a0 has the processor id */
-	mfc0	a0, CP0_EBASE, 1
+	mfc0	a0, CP0_EBASE
 	andi	a0, 0x3ff		/* a0 <- node/core */
 	PTR_LA	t0, nlm_early_init_secondary
 	jalr	t0
diff --git a/arch/mips/netlogic/xlp/nlm_hal.c b/arch/mips/netlogic/xlp/nlm_hal.c
index 80ec929..25ee694 100644
--- a/arch/mips/netlogic/xlp/nlm_hal.c
+++ b/arch/mips/netlogic/xlp/nlm_hal.c
@@ -58,7 +58,7 @@
 		nodep->coremask = 1;	/* node 0, boot cpu */
 	nodep->sysbase = nlm_get_sys_regbase(node);
 	nodep->picbase = nlm_get_pic_regbase(node);
-	nodep->ebase = read_c0_ebase() & (~((1 << 12) - 1));
+	nodep->ebase = read_c0_ebase() & MIPS_EBASE_BASE;
 	if (cpu_is_xlp9xx())
 		nodep->socbus = xlp9xx_get_socbus(node);
 	else
diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c
index d118b9a..72ceddc 100644
--- a/arch/mips/netlogic/xlr/setup.c
+++ b/arch/mips/netlogic/xlr/setup.c
@@ -168,7 +168,7 @@
 
 	nodep = nlm_current_node();
 	nodep->picbase = nlm_mmio_base(NETLOGIC_IO_PIC_OFFSET);
-	nodep->ebase = read_c0_ebase() & (~((1 << 12) - 1));
+	nodep->ebase = read_c0_ebase() & MIPS_EBASE_BASE;
 	spin_lock_init(&nodep->piclock);
 }
 
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index 3c9ec3d..2f33992 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -77,7 +77,7 @@
 	struct op_mips_model *lmodel = NULL;
 	int res;
 
-	switch (current_cpu_type()) {
+	switch (boot_cpu_type()) {
 	case CPU_5KC:
 	case CPU_M14KC:
 	case CPU_M14KEC:
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 8f988a6..45cb274 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -269,11 +269,9 @@
 	return handled;
 }
 
-#define M_CONFIG1_PC	(1 << 4)
-
 static inline int __n_counters(void)
 {
-	if (!(read_c0_config1() & M_CONFIG1_PC))
+	if (!cpu_has_perf)
 		return 0;
 	if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
 		return 1;
diff --git a/arch/mips/pci/fixup-lantiq.c b/arch/mips/pci/fixup-lantiq.c
index c2ce41e..2b5427d 100644
--- a/arch/mips/pci/fixup-lantiq.c
+++ b/arch/mips/pci/fixup-lantiq.c
@@ -3,7 +3,7 @@
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
  *
- *  Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ *  Copyright (C) 2012 John Crispin <john@phrozen.org>
  */
 
 #include <linux/of_irq.h>
diff --git a/arch/mips/pci/ops-lantiq.c b/arch/mips/pci/ops-lantiq.c
index e5738ee..f51e108 100644
--- a/arch/mips/pci/ops-lantiq.c
+++ b/arch/mips/pci/ops-lantiq.c
@@ -3,7 +3,7 @@
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
  *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ *  Copyright (C) 2010 John Crispin <john@phrozen.org>
  */
 
 #include <linux/types.h>
diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c
index 2895263..c8994c1 100644
--- a/arch/mips/pci/pci-alchemy.c
+++ b/arch/mips/pci/pci-alchemy.c
@@ -76,7 +76,7 @@
 	unsigned long old_ctx;
 
 	/* Save old context and create impossible VPN2 value */
-	old_ctx = read_c0_entryhi() & 0xff;
+	old_ctx = read_c0_entryhi() & MIPS_ENTRYHI_ASID;
 	old_pagemask = read_c0_pagemask();
 	write_c0_index(entry);
 	write_c0_pagemask(pagemask);
diff --git a/arch/mips/pci/pci-ip32.c b/arch/mips/pci/pci-ip32.c
index b1e061f..7ae89d0 100644
--- a/arch/mips/pci/pci-ip32.c
+++ b/arch/mips/pci/pci-ip32.c
@@ -116,7 +116,6 @@
 	.pci_ops	= &mace_pci_ops,
 	.mem_resource	= &mace_pci_mem_resource,
 	.io_resource	= &mace_pci_io_resource,
-	.iommu		= 0,
 	.mem_offset	= MACE_PCI_MEM_OFFSET,
 	.io_offset	= 0,
 	.io_map_base	= CKSEG1ADDR(MACEPCI_LOW_IO),
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index 6a15dbd..b9deab1 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -3,7 +3,7 @@
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
  *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ *  Copyright (C) 2010 John Crispin <john@phrozen.org>
  */
 
 #include <linux/types.h>
diff --git a/arch/mips/pci/pci-lantiq.h b/arch/mips/pci/pci-lantiq.h
index 66bf6cd..0cc7125 100644
--- a/arch/mips/pci/pci-lantiq.h
+++ b/arch/mips/pci/pci-lantiq.h
@@ -3,7 +3,7 @@
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
  *
- *  Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ *  Copyright (C) 2010 John Crispin <john@phrozen.org>
  */
 
 #ifndef _LTQ_PCI_H__
diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c
index 1ae932c..6ce8162 100644
--- a/arch/mips/pci/pci-mt7620.c
+++ b/arch/mips/pci/pci-mt7620.c
@@ -2,7 +2,7 @@
  *  Ralink MT7620A SoC PCI support
  *
  *  Copyright (C) 2007-2013 Bruce Chang (Mediatek)
- *  Copyright (C) 2013-2016 John Crispin <blogic@openwrt.org>
+ *  Copyright (C) 2013-2016 John Crispin <john@phrozen.org>
  *
  *  This program is free software; you can redistribute it and/or modify it
  *  under the terms of the GNU General Public License version 2 as published
diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c
index a245cad..f2a1050 100644
--- a/arch/mips/pci/pci-rt2880.c
+++ b/arch/mips/pci/pci-rt2880.c
@@ -1,7 +1,7 @@
 /*
  *  Ralink RT288x SoC PCI register definitions
  *
- *  Copyright (C) 2009 John Crispin <blogic@openwrt.org>
+ *  Copyright (C) 2009 John Crispin <john@phrozen.org>
  *  Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
  *
  *  Parts of this file are based on Ralink's 2.6.21 BSP
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index b8a0bf5..f1b11f0 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -83,9 +83,6 @@
 	LIST_HEAD(resources);
 	struct pci_bus *bus;
 
-	if (!hose->iommu)
-		PCI_DMA_BUS_IS_PHYS = 1;
-
 	if (hose->get_busno && pci_has_flag(PCI_PROBE_ONLY))
 		next_busno = (*hose->get_busno)();
 
diff --git a/arch/mips/pic32/Kconfig b/arch/mips/pic32/Kconfig
index 1985971..527d37d 100644
--- a/arch/mips/pic32/Kconfig
+++ b/arch/mips/pic32/Kconfig
@@ -14,7 +14,7 @@
 	select SYS_HAS_EARLY_PRINTK
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_LITTLE_ENDIAN
-	select ARCH_REQUIRE_GPIOLIB
+	select GPIOLIB
 	select COMMON_CLK
 	select CLKDEV_LOOKUP
 	select LIBFDT
diff --git a/arch/mips/pic32/pic32mzda/time.c b/arch/mips/pic32/pic32mzda/time.c
index ca6a62b..62a0a78 100644
--- a/arch/mips/pic32/pic32mzda/time.c
+++ b/arch/mips/pic32/pic32mzda/time.c
@@ -11,13 +11,12 @@
  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  *  for more details.
  */
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/clocksource.h>
 #include <linux/init.h>
+#include <linux/irqdomain.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
-#include <linux/irqdomain.h>
 
 #include <asm/time.h>
 
@@ -58,16 +57,12 @@
 
 void __init plat_time_init(void)
 {
-	struct clk *clk;
+	unsigned long rate = pic32_get_pbclk(7);
 
 	of_clk_init(NULL);
-	clk = clk_get_sys("cpu_clk", NULL);
-	if (IS_ERR(clk))
-		panic("unable to get CPU clock, err=%ld", PTR_ERR(clk));
 
-	clk_prepare_enable(clk);
-	pr_info("CPU Clock: %ldMHz\n", clk_get_rate(clk) / 1000000);
-	mips_hpt_frequency = clk_get_rate(clk) / 2;
+	pr_info("CPU Clock: %ldMHz\n", rate / 1000000);
+	mips_hpt_frequency = rate / 2;
 
 	clocksource_probe();
 }
diff --git a/arch/mips/pistachio/init.c b/arch/mips/pistachio/init.c
index 96ba2cc..956c92e 100644
--- a/arch/mips/pistachio/init.c
+++ b/arch/mips/pistachio/init.c
@@ -2,6 +2,7 @@
  * Pistachio platform setup
  *
  * Copyright (C) 2014 Google, Inc.
+ * Copyright (C) 2016 Imagination Technologies
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -9,6 +10,7 @@
  */
 
 #include <linux/init.h>
+#include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/of_address.h>
 #include <linux/of_fdt.h>
@@ -24,9 +26,38 @@
 #include <asm/smp-ops.h>
 #include <asm/traps.h>
 
+/*
+ * Core revision register decoding
+ * Bits 23 to 20: Major rev
+ * Bits 15 to 8: Minor rev
+ * Bits 7 to 0: Maintenance rev
+ */
+#define PISTACHIO_CORE_REV_REG	0xB81483D0
+#define PISTACHIO_CORE_REV_A1	0x00100006
+#define PISTACHIO_CORE_REV_B0	0x00100106
+
 const char *get_system_type(void)
 {
-	return "IMG Pistachio SoC";
+	u32 core_rev;
+	const char *sys_type;
+
+	core_rev = __raw_readl((const void *)PISTACHIO_CORE_REV_REG);
+
+	switch (core_rev) {
+	case PISTACHIO_CORE_REV_B0:
+		sys_type = "IMG Pistachio SoC (B0)";
+		break;
+
+	case PISTACHIO_CORE_REV_A1:
+		sys_type = "IMG Pistachio SoC (A1)";
+		break;
+
+	default:
+		sys_type = "IMG Pistachio SoC";
+		break;
+	}
+
+	return sys_type;
 }
 
 static void __init plat_setup_iocoherency(void)
@@ -109,6 +140,8 @@
 	mips_cm_probe();
 	mips_cpc_probe();
 	register_cps_smp_ops();
+
+	pr_info("SoC Type: %s\n", get_system_type());
 }
 
 void __init prom_free_prom_memory(void)
diff --git a/arch/mips/pmcs-msp71xx/msp_setup.c b/arch/mips/pmcs-msp71xx/msp_setup.c
index 9d293b3..a63b736 100644
--- a/arch/mips/pmcs-msp71xx/msp_setup.c
+++ b/arch/mips/pmcs-msp71xx/msp_setup.c
@@ -118,7 +118,7 @@
 	/* No chip-specific reset code, just jump to the ROM reset vector */
 	set_c0_status(ST0_BEV | ST0_ERL);
 	change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
-	flush_cache_all();
+	__flush_cache_all();
 	write_c0_wired(0);
 
 	__asm__ __volatile__("jr\t%0"::"r"(0xbfc00000));
diff --git a/arch/mips/pnx833x/common/setup.c b/arch/mips/pnx833x/common/setup.c
index 99b4d94..8a7443b 100644
--- a/arch/mips/pnx833x/common/setup.c
+++ b/arch/mips/pnx833x/common/setup.c
@@ -38,9 +38,6 @@
 
 int __init plat_mem_setup(void)
 {
-	/* fake pci bus to avoid bounce buffers */
-	PCI_DMA_BUS_IS_PHYS = 1;
-
 	/* set mips clock to 320MHz */
 #if defined(CONFIG_SOC_PNX8335)
 	PNX8335_WRITEFIELD(0x17, CLOCK_PLL_CPU_CTL, FREQ);
diff --git a/arch/mips/ralink/Makefile b/arch/mips/ralink/Makefile
index 0d1795a..fe34715 100644
--- a/arch/mips/ralink/Makefile
+++ b/arch/mips/ralink/Makefile
@@ -4,7 +4,7 @@
 # Makefile for the Ralink common stuff
 #
 # Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
-# Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+# Copyright (C) 2013 John Crispin <john@phrozen.org>
 
 obj-y := prom.o of.o reset.o
 
diff --git a/arch/mips/ralink/bootrom.c b/arch/mips/ralink/bootrom.c
index 5403468..e1fa597 100644
--- a/arch/mips/ralink/bootrom.c
+++ b/arch/mips/ralink/bootrom.c
@@ -3,7 +3,7 @@
  * under the terms of the GNU General Public License version 2 as published
  * by the Free Software Foundation.
  *
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
  */
 
 #include <linux/debugfs.h>
diff --git a/arch/mips/ralink/cevt-rt3352.c b/arch/mips/ralink/cevt-rt3352.c
index e46f91f..3ad0b07 100644
--- a/arch/mips/ralink/cevt-rt3352.c
+++ b/arch/mips/ralink/cevt-rt3352.c
@@ -3,7 +3,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 2013 by John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 by John Crispin <john@phrozen.org>
  */
 
 #include <linux/clockchips.h>
diff --git a/arch/mips/ralink/clk.c b/arch/mips/ralink/clk.c
index 25c4a61..ebaa7cc 100644
--- a/arch/mips/ralink/clk.c
+++ b/arch/mips/ralink/clk.c
@@ -4,7 +4,7 @@
  *  by the Free Software Foundation.
  *
  *  Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
- *  Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ *  Copyright (C) 2013 John Crispin <john@phrozen.org>
  */
 
 #include <linux/kernel.h>
diff --git a/arch/mips/ralink/common.h b/arch/mips/ralink/common.h
index 8e7d8e6..b8245d0 100644
--- a/arch/mips/ralink/common.h
+++ b/arch/mips/ralink/common.h
@@ -3,7 +3,7 @@
  *  under the terms of the GNU General Public License version 2 as published
  *  by the Free Software Foundation.
  *
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
  */
 
 #ifndef _RALINK_COMMON_H__
diff --git a/arch/mips/ralink/ill_acc.c b/arch/mips/ralink/ill_acc.c
index e10d10b..765d5ba 100644
--- a/arch/mips/ralink/ill_acc.c
+++ b/arch/mips/ralink/ill_acc.c
@@ -3,7 +3,7 @@
  * under the terms of the GNU General Public License version 2 as published
  * by the Free Software Foundation.
  *
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
  */
 
 #include <linux/interrupt.h>
diff --git a/arch/mips/ralink/irq-gic.c b/arch/mips/ralink/irq-gic.c
index 50d6c55..2058280 100644
--- a/arch/mips/ralink/irq-gic.c
+++ b/arch/mips/ralink/irq-gic.c
@@ -4,7 +4,7 @@
  * by the Free Software Foundation.
  *
  * Copyright (C) 2015 Nikolay Martynov <mar.kolya@gmail.com>
- * Copyright (C) 2015 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2015 John Crispin <john@phrozen.org>
  */
 
 #include <linux/init.h>
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
index 4cf77f3..4911c14 100644
--- a/arch/mips/ralink/irq.c
+++ b/arch/mips/ralink/irq.c
@@ -4,7 +4,7 @@
  * by the Free Software Foundation.
  *
  * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
  */
 
 #include <linux/io.h>
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
index 0d3d1a9..88b82fe 100644
--- a/arch/mips/ralink/mt7620.c
+++ b/arch/mips/ralink/mt7620.c
@@ -7,7 +7,7 @@
  *
  * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
  * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
  */
 
 #include <linux/kernel.h>
@@ -581,11 +581,14 @@
 		(rev & CHIP_REV_ECO_MASK));
 
 	cfg0 = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG0);
-	if (is_mt76x8())
+	if (is_mt76x8()) {
 		dram_type = cfg0 & DRAM_TYPE_MT7628_MASK;
-	else
+	} else {
 		dram_type = (cfg0 >> SYSCFG0_DRAM_TYPE_SHIFT) &
 			    SYSCFG0_DRAM_TYPE_MASK;
+		if (dram_type == SYSCFG0_DRAM_TYPE_UNKNOWN)
+			dram_type = SYSCFG0_DRAM_TYPE_SDRAM;
+	}
 
 	soc_info->mem_base = MT7620_DRAM_BASE;
 	if (is_mt76x8())
diff --git a/arch/mips/ralink/mt7621.c b/arch/mips/ralink/mt7621.c
index e9b9fa3..a45bbbe 100644
--- a/arch/mips/ralink/mt7621.c
+++ b/arch/mips/ralink/mt7621.c
@@ -4,7 +4,7 @@
  * by the Free Software Foundation.
  *
  * Copyright (C) 2015 Nikolay Martynov <mar.kolya@gmail.com>
- * Copyright (C) 2015 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2015 John Crispin <john@phrozen.org>
  */
 
 #include <linux/kernel.h>
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
index f9eda5d..0aa67a2 100644
--- a/arch/mips/ralink/of.c
+++ b/arch/mips/ralink/of.c
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
  * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
  */
 
 #include <linux/io.h>
diff --git a/arch/mips/ralink/prom.c b/arch/mips/ralink/prom.c
index 39a9142f..5a73c5e 100644
--- a/arch/mips/ralink/prom.c
+++ b/arch/mips/ralink/prom.c
@@ -5,7 +5,7 @@
  *
  *  Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
  *  Copyright (C) 2010 Joonas Lahtinen <joonas.lahtinen@gmail.com>
- *  Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ *  Copyright (C) 2013 John Crispin <john@phrozen.org>
  */
 
 #include <linux/string.h>
diff --git a/arch/mips/ralink/reset.c b/arch/mips/ralink/reset.c
index ee117c4..64543d6 100644
--- a/arch/mips/ralink/reset.c
+++ b/arch/mips/ralink/reset.c
@@ -5,7 +5,7 @@
  *
  * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
  * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
  */
 
 #include <linux/pm.h>
@@ -61,7 +61,7 @@
 	return ralink_deassert_device(rcdev, id);
 }
 
-static struct reset_control_ops reset_ops = {
+static const struct reset_control_ops reset_ops = {
 	.reset = ralink_reset_device,
 	.assert = ralink_assert_device,
 	.deassert = ralink_deassert_device,
diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c
index 3c84166..285796e 100644
--- a/arch/mips/ralink/rt288x.c
+++ b/arch/mips/ralink/rt288x.c
@@ -7,7 +7,7 @@
  *
  * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
  * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
  */
 
 #include <linux/kernel.h>
diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c
index d7c4ba4..c8a28c4b 100644
--- a/arch/mips/ralink/rt305x.c
+++ b/arch/mips/ralink/rt305x.c
@@ -7,7 +7,7 @@
  *
  * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
  * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
  */
 
 #include <linux/kernel.h>
diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
index fafec94..4cef916 100644
--- a/arch/mips/ralink/rt3883.c
+++ b/arch/mips/ralink/rt3883.c
@@ -7,7 +7,7 @@
  *
  * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
  * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
  */
 
 #include <linux/kernel.h>
diff --git a/arch/mips/ralink/timer-gic.c b/arch/mips/ralink/timer-gic.c
index 5b4f186..069771d 100644
--- a/arch/mips/ralink/timer-gic.c
+++ b/arch/mips/ralink/timer-gic.c
@@ -4,7 +4,7 @@
  * by the Free Software Foundation.
  *
  * Copyright (C) 2015 Nikolay Martynov <mar.kolya@gmail.com>
- * Copyright (C) 2015 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2015 John Crispin <john@phrozen.org>
  */
 
 #include <linux/init.h>
diff --git a/arch/mips/ralink/timer.c b/arch/mips/ralink/timer.c
index 82c72a1..b0343ff 100644
--- a/arch/mips/ralink/timer.c
+++ b/arch/mips/ralink/timer.c
@@ -3,7 +3,7 @@
  * under the terms of the GNU General Public License version 2 as published
  * by the Free Software Foundation.
  *
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
 */
 
 #include <linux/module.h>
@@ -180,5 +180,5 @@
 module_platform_driver(rt_timer_driver);
 
 MODULE_DESCRIPTION("Ralink RT2880 timer");
-MODULE_AUTHOR("John Crispin <blogic@openwrt.org");
+MODULE_AUTHOR("John Crispin <john@phrozen.org");
 MODULE_LICENSE("GPL");
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 8d0eb26..f1f8829 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -7,7 +7,7 @@
  * Copyright (C) 2000 by Silicon Graphics, Inc.
  * Copyright (C) 2004 by Christoph Hellwig
  *
- * On SGI IP27 the ARC memory configuration data is completly bogus but
+ * On SGI IP27 the ARC memory configuration data is completely bogus but
  * alternate easier to use mechanisms are available.
  */
 #include <linux/init.h>
diff --git a/arch/mips/sibyte/Kconfig b/arch/mips/sibyte/Kconfig
index cb9a095..707b884 100644
--- a/arch/mips/sibyte/Kconfig
+++ b/arch/mips/sibyte/Kconfig
@@ -143,7 +143,8 @@
 config SIBYTE_BUS_WATCHER
 	bool "Support for Bus Watcher statistics"
 	depends on SIBYTE_SB1xxx_SOC && \
-		(SIBYTE_BCM112X || SIBYTE_SB1250)
+		(SIBYTE_BCM112X || SIBYTE_SB1250 || \
+		 SIBYTE_BCM1x55 || SIBYTE_BCM1x80)
 	help
 	  Handle and keep statistics on the bus error interrupts (COR_ECC,
 	  BAD_ECC, IO_BUS).
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index ee3617c..b369509 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -50,13 +50,17 @@
       cmd_vdsold = $(CC) $(c_flags) $(VDSO_LDFLAGS) \
                    -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@
 
+# Strip rule for the raw .so files
+$(obj)/%.so.raw: OBJCOPYFLAGS := -S
+$(obj)/%.so.raw: $(obj)/%.so.dbg.raw FORCE
+	$(call if_changed,objcopy)
+
 hostprogs-y := genvdso
 
 quiet_cmd_genvdso = GENVDSO $@
 define cmd_genvdso
-	cp $< $(<:%.dbg=%) && \
-	$(OBJCOPY) -S $< $(<:%.dbg=%) && \
-	$(obj)/genvdso $< $(<:%.dbg=%) $@ $(VDSO_NAME)
+	$(foreach file,$(filter %.raw,$^),cp $(file) $(file:%.raw=%) &&) \
+	$(obj)/genvdso $(<:%.raw=%) $(<:%.dbg.raw=%) $@ $(VDSO_NAME)
 endef
 
 #
@@ -66,7 +70,10 @@
 native-abi := $(filter -mabi=%,$(KBUILD_CFLAGS))
 
 targets += $(obj-vdso-y)
-targets += vdso.lds vdso.so.dbg vdso.so vdso-image.c
+targets += vdso.lds
+targets += vdso.so.dbg.raw vdso.so.raw
+targets += vdso.so.dbg vdso.so
+targets += vdso-image.c
 
 obj-vdso := $(obj-vdso-y:%.o=$(obj)/%.o)
 
@@ -75,10 +82,11 @@
 
 $(obj)/vdso.lds: KBUILD_CPPFLAGS := $(native-abi)
 
-$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
+$(obj)/vdso.so.dbg.raw: $(obj)/vdso.lds $(obj-vdso) FORCE
 	$(call if_changed,vdsold)
 
-$(obj)/vdso-image.c: $(obj)/vdso.so.dbg $(obj)/genvdso FORCE
+$(obj)/vdso-image.c: $(obj)/vdso.so.dbg.raw $(obj)/vdso.so.raw \
+                     $(obj)/genvdso FORCE
 	$(call if_changed,genvdso)
 
 obj-y += vdso-image.o
@@ -89,7 +97,10 @@
 
 # Define these outside the ifdef to ensure they are picked up by clean.
 targets += $(obj-vdso-y:%.o=%-o32.o)
-targets += vdso-o32.lds vdso-o32.so.dbg vdso-o32.so vdso-o32-image.c
+targets += vdso-o32.lds
+targets += vdso-o32.so.dbg.raw vdso-o32.so.raw
+targets += vdso-o32.so.dbg vdso-o32.so
+targets += vdso-o32-image.c
 
 ifdef CONFIG_MIPS32_O32
 
@@ -109,11 +120,12 @@
 $(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE
 	$(call if_changed_dep,cpp_lds_S)
 
-$(obj)/vdso-o32.so.dbg: $(obj)/vdso-o32.lds $(obj-vdso-o32) FORCE
+$(obj)/vdso-o32.so.dbg.raw: $(obj)/vdso-o32.lds $(obj-vdso-o32) FORCE
 	$(call if_changed,vdsold)
 
 $(obj)/vdso-o32-image.c: VDSO_NAME := o32
-$(obj)/vdso-o32-image.c: $(obj)/vdso-o32.so.dbg $(obj)/genvdso FORCE
+$(obj)/vdso-o32-image.c: $(obj)/vdso-o32.so.dbg.raw $(obj)/vdso-o32.so.raw \
+                         $(obj)/genvdso FORCE
 	$(call if_changed,genvdso)
 
 obj-y += vdso-o32-image.o
@@ -125,7 +137,10 @@
 #
 
 targets += $(obj-vdso-y:%.o=%-n32.o)
-targets += vdso-n32.lds vdso-n32.so.dbg vdso-n32.so vdso-n32-image.c
+targets += vdso-n32.lds
+targets += vdso-n32.so.dbg.raw vdso-n32.so.raw
+targets += vdso-n32.so.dbg vdso-n32.so
+targets += vdso-n32-image.c
 
 ifdef CONFIG_MIPS32_N32
 
@@ -145,11 +160,12 @@
 $(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE
 	$(call if_changed_dep,cpp_lds_S)
 
-$(obj)/vdso-n32.so.dbg: $(obj)/vdso-n32.lds $(obj-vdso-n32) FORCE
+$(obj)/vdso-n32.so.dbg.raw: $(obj)/vdso-n32.lds $(obj-vdso-n32) FORCE
 	$(call if_changed,vdsold)
 
 $(obj)/vdso-n32-image.c: VDSO_NAME := n32
-$(obj)/vdso-n32-image.c: $(obj)/vdso-n32.so.dbg $(obj)/genvdso FORCE
+$(obj)/vdso-n32-image.c: $(obj)/vdso-n32.so.dbg.raw $(obj)/vdso-n32.so.raw \
+                         $(obj)/genvdso FORCE
 	$(call if_changed,genvdso)
 
 obj-y += vdso-n32-image.o
diff --git a/arch/mips/vr41xx/common/pmu.c b/arch/mips/vr41xx/common/pmu.c
index d7f7558..39a0db3 100644
--- a/arch/mips/vr41xx/common/pmu.c
+++ b/arch/mips/vr41xx/common/pmu.c
@@ -73,7 +73,7 @@
 	default:
 		set_c0_status(ST0_BEV | ST0_ERL);
 		change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
-		flush_cache_all();
+		__flush_cache_all();
 		write_c0_wired(0);
 		__asm__("jr	%0"::"r"(0xbfc00000));
 		break;
diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
index 4375554..87ca653 100644
--- a/arch/nios2/Kconfig
+++ b/arch/nios2/Kconfig
@@ -1,6 +1,5 @@
 config NIOS2
 	def_bool y
-	select ARCH_WANT_OPTIONAL_GPIOLIB
 	select CLKSRC_OF
 	select GENERIC_ATOMIC64
 	select GENERIC_CLOCKEVENTS
diff --git a/arch/nios2/include/asm/io.h b/arch/nios2/include/asm/io.h
index c5a62da..ce072ba 100644
--- a/arch/nios2/include/asm/io.h
+++ b/arch/nios2/include/asm/io.h
@@ -50,7 +50,6 @@
 
 /* Pages to physical address... */
 #define page_to_phys(page)	virt_to_phys(page_to_virt(page))
-#define page_to_bus(page)	page_to_virt(page)
 
 /* Macros used for converting between virtual and physical mappings. */
 #define phys_to_virt(vaddr)	\
diff --git a/arch/nios2/include/asm/page.h b/arch/nios2/include/asm/page.h
index 4b32d6f..c1683f5 100644
--- a/arch/nios2/include/asm/page.h
+++ b/arch/nios2/include/asm/page.h
@@ -84,7 +84,7 @@
 	((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
 
 #define page_to_virt(page)	\
-	((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
+	((void *)(((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
 
 # define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
 # define pfn_valid(pfn)		((pfn) >= ARCH_PFN_OFFSET &&	\
diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h
index a213e8c..298393c 100644
--- a/arch/nios2/include/asm/pgtable.h
+++ b/arch/nios2/include/asm/pgtable.h
@@ -209,7 +209,7 @@
 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 			      pte_t *ptep, pte_t pteval)
 {
-	unsigned long paddr = page_to_virt(pte_page(pteval));
+	unsigned long paddr = (unsigned long)page_to_virt(pte_page(pteval));
 
 	flush_dcache_range(paddr, paddr + PAGE_SIZE);
 	set_pte(ptep, pteval);
diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c
index 718dd19..367c542 100644
--- a/arch/nios2/kernel/prom.c
+++ b/arch/nios2/kernel/prom.c
@@ -97,8 +97,7 @@
 		return 0;
 #endif
 
-	*addr64 = fdt_translate_address((const void *)initial_boot_params,
-		node);
+	*addr64 = of_flat_dt_translate_address(node);
 
 	return *addr64 == OF_BAD_ADDR ? 0 : 1;
 }
diff --git a/arch/nios2/lib/memset.c b/arch/nios2/lib/memset.c
index c2cfcb1..2fcefe7 100644
--- a/arch/nios2/lib/memset.c
+++ b/arch/nios2/lib/memset.c
@@ -68,7 +68,7 @@
 		  "=r" (charcnt),	/* %1  Output */
 		  "=r" (dwordcnt),	/* %2  Output */
 		  "=r" (fill8reg),	/* %3  Output */
-		  "=r" (wrkrega)	/* %4  Output */
+		  "=&r" (wrkrega)	/* %4  Output only */
 		: "r" (c),		/* %5  Input */
 		  "0" (s),		/* %0  Input/Output */
 		  "1" (count)		/* %1  Input/Output */
diff --git a/arch/openrisc/include/asm/page.h b/arch/openrisc/include/asm/page.h
index e613d36..35bcb7c 100644
--- a/arch/openrisc/include/asm/page.h
+++ b/arch/openrisc/include/asm/page.h
@@ -81,8 +81,6 @@
 
 #define virt_to_page(addr) \
 	(mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT))
-#define page_to_virt(page) \
-	((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
 
 #define page_to_phys(page)      ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
 
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 989fa14..88cfaa8 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -4,8 +4,8 @@
 	select ARCH_MIGHT_HAVE_PC_PARPORT
 	select HAVE_IDE
 	select HAVE_OPROFILE
-	select HAVE_FUNCTION_TRACER if 64BIT
-	select HAVE_FUNCTION_GRAPH_TRACER if 64BIT
+	select HAVE_FUNCTION_TRACER
+	select HAVE_FUNCTION_GRAPH_TRACER
 	select ARCH_WANT_FRAME_POINTERS
 	select RTC_CLASS
 	select RTC_DRV_GENERIC
@@ -30,6 +30,7 @@
 	select TTY # Needed for pdc_cons.c
 	select HAVE_DEBUG_STACKOVERFLOW
 	select HAVE_ARCH_AUDITSYSCALL
+	select HAVE_ARCH_SECCOMP_FILTER
 	select ARCH_NO_COHERENT_DMA_MMAP
 
 	help
diff --git a/arch/parisc/Kconfig.debug b/arch/parisc/Kconfig.debug
index bc989e5..68b7cbd 100644
--- a/arch/parisc/Kconfig.debug
+++ b/arch/parisc/Kconfig.debug
@@ -2,9 +2,13 @@
 
 source "lib/Kconfig.debug"
 
+config TRACE_IRQFLAGS_SUPPORT
+	def_bool y
+
 config DEBUG_RODATA
        bool "Write protect kernel read-only data structures"
        depends on DEBUG_KERNEL
+       default y
        help
          Mark the kernel read-only data as write-protected in the pagetables,
          in order to catch accidental (and incorrect) writes to such const
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 965a099..75cb451 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -62,9 +62,7 @@
 
 # Without this, "ld -r" results in .text sections that are too big
 # (> 0x40000) for branches to reach stubs.
-ifndef CONFIG_FUNCTION_TRACER
-  cflags-y	+= -ffunction-sections
-endif
+cflags-y	+= -ffunction-sections
 
 # Use long jumps instead of long branches (needed if your linker fails to
 # link a too big vmlinux executable). Not enabled for building modules.
diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h
index 0448a2c..3387307 100644
--- a/arch/parisc/include/asm/compat.h
+++ b/arch/parisc/include/asm/compat.h
@@ -183,6 +183,13 @@
 			int _band;      /* POLL_IN, POLL_OUT, POLL_MSG */
 			int _fd;
 		} _sigpoll;
+
+		/* SIGSYS */
+		struct {
+			compat_uptr_t _call_addr; /* calling user insn */
+			int _syscall;	/* triggering system call number */
+			compat_uint_t _arch;	/* AUDIT_ARCH_* of syscall */
+		} _sigsys;
 	} _sifields;
 } compat_siginfo_t;
 
diff --git a/arch/parisc/include/asm/ftrace.h b/arch/parisc/include/asm/ftrace.h
index 544ed8e..24cd81d 100644
--- a/arch/parisc/include/asm/ftrace.h
+++ b/arch/parisc/include/asm/ftrace.h
@@ -4,23 +4,7 @@
 #ifndef __ASSEMBLY__
 extern void mcount(void);
 
-/*
- * Stack of return addresses for functions of a thread.
- * Used in struct thread_info
- */
-struct ftrace_ret_stack {
-	unsigned long ret;
-	unsigned long func;
-	unsigned long long calltime;
-};
-
-/*
- * Primary handler of a function return.
- * It relays on ftrace_return_to_handler.
- * Defined in entry.S
- */
-extern void return_to_handler(void);
-
+#define MCOUNT_INSN_SIZE 4
 
 extern unsigned long return_address(unsigned int);
 
diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h
index a5eba95..637ce8d 100644
--- a/arch/parisc/include/asm/syscall.h
+++ b/arch/parisc/include/asm/syscall.h
@@ -39,6 +39,19 @@
 	}
 }
 
+static inline void syscall_set_return_value(struct task_struct *task,
+					    struct pt_regs *regs,
+					    int error, long val)
+{
+	regs->gr[28] = error ? error : val;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+				    struct pt_regs *regs)
+{
+	/* do nothing */
+}
+
 static inline int syscall_get_arch(void)
 {
 	int arch = AUDIT_ARCH_PARISC;
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index d4dd6e5..7955e43 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -44,20 +44,18 @@
 #define LDD_USER(ptr)		BUILD_BUG()
 #define STD_KERNEL(x, ptr)	__put_kernel_asm64(x, ptr)
 #define STD_USER(x, ptr)	__put_user_asm64(x, ptr)
-#define ASM_WORD_INSN		".word\t"
 #else
 #define LDD_KERNEL(ptr)		__get_kernel_asm("ldd", ptr)
 #define LDD_USER(ptr)		__get_user_asm("ldd", ptr)
 #define STD_KERNEL(x, ptr)	__put_kernel_asm("std", x, ptr)
 #define STD_USER(x, ptr)	__put_user_asm("std", x, ptr)
-#define ASM_WORD_INSN		".dword\t"
 #endif
 
 /*
- * The exception table contains two values: the first is an address
- * for an instruction that is allowed to fault, and the second is
- * the address to the fixup routine. Even on a 64bit kernel we could
- * use a 32bit (unsigned int) address here.
+ * The exception table contains two values: the first is the relative offset to
+ * the address of the instruction that is allowed to fault, and the second is
+ * the relative offset to the address of the fixup routine. Since relative
+ * addresses are used, 32bit values are sufficient even on 64bit kernel.
  */
 
 #define ARCH_HAS_RELATIVE_EXTABLE
@@ -77,6 +75,7 @@
  */
 struct exception_data {
 	unsigned long fault_ip;
+	unsigned long fault_gp;
 	unsigned long fault_space;
 	unsigned long fault_addr;
 };
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile
index ff87b46..69a1118 100644
--- a/arch/parisc/kernel/Makefile
+++ b/arch/parisc/kernel/Makefile
@@ -15,11 +15,7 @@
 # Do not profile debug and lowlevel utilities
 CFLAGS_REMOVE_ftrace.o = -pg
 CFLAGS_REMOVE_cache.o = -pg
-CFLAGS_REMOVE_irq.o = -pg
-CFLAGS_REMOVE_pacache.o = -pg
 CFLAGS_REMOVE_perf.o = -pg
-CFLAGS_REMOVE_traps.o = -pg
-CFLAGS_REMOVE_unaligned.o = -pg
 CFLAGS_REMOVE_unwind.o = -pg
 endif
 
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index d2f6257..78d30d2 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -299,6 +299,7 @@
 #endif
 	BLANK();
 	DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
+	DEFINE(EXCDATA_GP, offsetof(struct exception_data, fault_gp));
 	DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
 	DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
 	BLANK();
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 91c2a39..6700127 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -319,7 +319,7 @@
 	if (!mapping)
 		return;
 
-	pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+	pgoff = page->index;
 
 	/* We have carefully arranged in arch_get_unmapped_area() that
 	 * *any* mappings of a file are always congruently mapped (whether
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 623496c..39127d3 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -1970,43 +1970,98 @@
 	b	intr_restore
 	copy	%r25,%r16
 
-	.import schedule,code
 syscall_do_resched:
-	BL	schedule,%r2
+	load32	syscall_check_resched,%r2 /* if resched, we start over again */
+	load32	schedule,%r19
+	bv	%r0(%r19)		/* jumps to schedule() */
 #ifdef CONFIG_64BIT
 	ldo	-16(%r30),%r29		/* Reference param save area */
 #else
 	nop
 #endif
-	b	syscall_check_resched	/* if resched, we start over again */
-	nop
 ENDPROC(syscall_exit)
 
 
 #ifdef CONFIG_FUNCTION_TRACER
+
 	.import ftrace_function_trampoline,code
-ENTRY(_mcount)
-	copy	%r3, %arg2
+	.align L1_CACHE_BYTES
+	.globl mcount
+	.type  mcount, @function
+ENTRY(mcount)
+_mcount:
+	.export _mcount,data
+	.proc
+	.callinfo caller,frame=0
+	.entry
+	/*
+	 * The 64bit mcount() function pointer needs 4 dwords, of which the
+	 * first two are free.  We optimize it here and put 2 instructions for
+	 * calling mcount(), and 2 instructions for ftrace_stub().  That way we
+	 * have all on one L1 cacheline.
+	 */
 	b	ftrace_function_trampoline
-	nop
-ENDPROC(_mcount)
-
-ENTRY(return_to_handler)
-	load32	return_trampoline, %rp
-	copy	%ret0, %arg0
-	copy	%ret1, %arg1
-	b	ftrace_return_to_handler
-	nop
-return_trampoline:
-	copy	%ret0, %rp
-	copy	%r23, %ret0
-	copy	%r24, %ret1
-
-.globl ftrace_stub
+	copy	%r3, %arg2	/* caller original %sp */
 ftrace_stub:
+	.globl ftrace_stub
+        .type  ftrace_stub, @function
+#ifdef CONFIG_64BIT
+	bve	(%rp)
+#else
 	bv	%r0(%rp)
+#endif
 	nop
+#ifdef CONFIG_64BIT
+	.dword mcount
+	.dword 0 /* code in head.S puts value of global gp here */
+#endif
+	.exit
+	.procend
+ENDPROC(mcount)
+
+	.align 8
+	.globl return_to_handler
+	.type  return_to_handler, @function
+ENTRY(return_to_handler)
+	.proc
+	.callinfo caller,frame=FRAME_SIZE
+	.entry
+	.export parisc_return_to_handler,data
+parisc_return_to_handler:
+	copy %r3,%r1
+	STREG %r0,-RP_OFFSET(%sp)	/* store 0 as %rp */
+	copy %sp,%r3
+	STREGM %r1,FRAME_SIZE(%sp)
+	STREG %ret0,8(%r3)
+	STREG %ret1,16(%r3)
+
+#ifdef CONFIG_64BIT
+	loadgp
+#endif
+
+	/* call ftrace_return_to_handler(0) */
+#ifdef CONFIG_64BIT
+	ldo -16(%sp),%ret1		/* Reference param save area */
+#endif
+	BL ftrace_return_to_handler,%r2
+	ldi 0,%r26
+	copy %ret0,%rp
+
+	/* restore original return values */
+	LDREG 8(%r3),%ret0
+	LDREG 16(%r3),%ret1
+
+	/* return from function */
+#ifdef CONFIG_64BIT
+	bve	(%rp)
+#else
+	bv	%r0(%rp)
+#endif
+	LDREGM -FRAME_SIZE(%sp),%r3
+	.exit
+	.procend
 ENDPROC(return_to_handler)
+
 #endif	/* CONFIG_FUNCTION_TRACER */
 
 #ifdef CONFIG_IRQSTACKS
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
index 559d400..b13f9ec 100644
--- a/arch/parisc/kernel/ftrace.c
+++ b/arch/parisc/kernel/ftrace.c
@@ -1,6 +1,6 @@
 /*
  * Code for tracing calls in Linux kernel.
- * Copyright (C) 2009 Helge Deller <deller@gmx.de>
+ * Copyright (C) 2009-2016 Helge Deller <deller@gmx.de>
  *
  * based on code for x86 which is:
  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
@@ -13,104 +13,21 @@
 #include <linux/init.h>
 #include <linux/ftrace.h>
 
+#include <asm/assembly.h>
 #include <asm/sections.h>
 #include <asm/ftrace.h>
 
 
-
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-
-/* Add a function return address to the trace stack on thread info.*/
-static int push_return_trace(unsigned long ret, unsigned long long time,
-				unsigned long func, int *depth)
-{
-	int index;
-
-	if (!current->ret_stack)
-		return -EBUSY;
-
-	/* The return trace stack is full */
-	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
-		atomic_inc(&current->trace_overrun);
-		return -EBUSY;
-	}
-
-	index = ++current->curr_ret_stack;
-	barrier();
-	current->ret_stack[index].ret = ret;
-	current->ret_stack[index].func = func;
-	current->ret_stack[index].calltime = time;
-	*depth = index;
-
-	return 0;
-}
-
-/* Retrieve a function return address to the trace stack on thread info.*/
-static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
-{
-	int index;
-
-	index = current->curr_ret_stack;
-
-	if (unlikely(index < 0)) {
-		ftrace_graph_stop();
-		WARN_ON(1);
-		/* Might as well panic, otherwise we have no where to go */
-		*ret = (unsigned long)
-			dereference_function_descriptor(&panic);
-		return;
-	}
-
-	*ret = current->ret_stack[index].ret;
-	trace->func = current->ret_stack[index].func;
-	trace->calltime = current->ret_stack[index].calltime;
-	trace->overrun = atomic_read(&current->trace_overrun);
-	trace->depth = index;
-	barrier();
-	current->curr_ret_stack--;
-
-}
-
-/*
- * Send the trace to the ring-buffer.
- * @return the original return address.
- */
-unsigned long ftrace_return_to_handler(unsigned long retval0,
-				       unsigned long retval1)
-{
-	struct ftrace_graph_ret trace;
-	unsigned long ret;
-
-	pop_return_trace(&trace, &ret);
-	trace.rettime = local_clock();
-	ftrace_graph_return(&trace);
-
-	if (unlikely(!ret)) {
-		ftrace_graph_stop();
-		WARN_ON(1);
-		/* Might as well panic. What else to do? */
-		ret = (unsigned long)
-			dereference_function_descriptor(&panic);
-	}
-
-	/* HACK: we hand over the old functions' return values
-	   in %r23 and %r24. Assembly in entry.S will take care
-	   and move those to their final registers %ret0 and %ret1 */
-	asm( "copy %0, %%r23 \n\t"
-	     "copy %1, %%r24 \n" : : "r" (retval0), "r" (retval1) );
-
-	return ret;
-}
-
 /*
  * Hook the return address and push it in the stack of return addrs
  * in current thread info.
  */
-void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
+static void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
 {
 	unsigned long old;
-	unsigned long long calltime;
 	struct ftrace_graph_ent trace;
+	extern int parisc_return_to_handler;
 
 	if (unlikely(ftrace_graph_is_dead()))
 		return;
@@ -119,64 +36,47 @@
 		return;
 
 	old = *parent;
-	*parent = (unsigned long)
-		  dereference_function_descriptor(&return_to_handler);
-
-	if (unlikely(!__kernel_text_address(old))) {
-		ftrace_graph_stop();
-		*parent = old;
-		WARN_ON(1);
-		return;
-	}
-
-	calltime = local_clock();
-
-	if (push_return_trace(old, calltime,
-				self_addr, &trace.depth) == -EBUSY) {
-		*parent = old;
-		return;
-	}
 
 	trace.func = self_addr;
+	trace.depth = current->curr_ret_stack + 1;
 
 	/* Only trace if the calling function expects to */
-	if (!ftrace_graph_entry(&trace)) {
-		current->curr_ret_stack--;
-		*parent = old;
-	}
-}
+	if (!ftrace_graph_entry(&trace))
+		return;
 
+        if (ftrace_push_return_trace(old, self_addr, &trace.depth,
+			0 ) == -EBUSY)
+                return;
+
+	/* activate parisc_return_to_handler() as return point */
+	*parent = (unsigned long) &parisc_return_to_handler;
+}
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
-
-void ftrace_function_trampoline(unsigned long parent,
+void notrace ftrace_function_trampoline(unsigned long parent,
 				unsigned long self_addr,
 				unsigned long org_sp_gr3)
 {
-	extern ftrace_func_t ftrace_trace_function;
+	extern ftrace_func_t ftrace_trace_function;  /* depends on CONFIG_DYNAMIC_FTRACE */
+	extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
 
 	if (ftrace_trace_function != ftrace_stub) {
-		ftrace_trace_function(parent, self_addr);
+		/* struct ftrace_ops *op, struct pt_regs *regs); */
+		ftrace_trace_function(parent, self_addr, NULL, NULL);
 		return;
 	}
+
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-	if (ftrace_graph_entry && ftrace_graph_return) {
-		unsigned long sp;
+	if (ftrace_graph_return != (trace_func_graph_ret_t) ftrace_stub ||
+		ftrace_graph_entry != ftrace_graph_entry_stub) {
 		unsigned long *parent_rp;
 
-                asm volatile ("copy %%r30, %0" : "=r"(sp));
-		/* sanity check: is stack pointer which we got from
-		   assembler function in entry.S in a reasonable
-		   range compared to current stack pointer? */
-		if ((sp - org_sp_gr3) > 0x400)
-			return;
-
 		/* calculate pointer to %rp in stack */
-		parent_rp = (unsigned long *) org_sp_gr3 - 0x10;
+		parent_rp = (unsigned long *) (org_sp_gr3 - RP_OFFSET);
 		/* sanity check: parent_rp should hold parent */
 		if (*parent_rp != parent)
 			return;
-		
+
 		prepare_ftrace_return(parent_rp, self_addr);
 		return;
 	}
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index 75aa0db..bbbe360 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -129,6 +129,15 @@
 	/* And the stack pointer too */
 	ldo             THREAD_SZ_ALGN(%r6),%sp
 
+#if defined(CONFIG_64BIT) && defined(CONFIG_FUNCTION_TRACER)
+	.import _mcount,data
+	/* initialize mcount FPTR */
+	/* Get the global data pointer */
+	loadgp
+	load32		PA(_mcount), %r10
+	std		%dp,0x18(%r10)
+#endif
+
 #ifdef CONFIG_SMP
 	/* Set the smp rendezvous address into page zero.
 	** It would be safer to do this in init_smp_config() but
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index b9d75d9..a0ecdb4 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -660,6 +660,10 @@
 			}
 			*loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
 			break;
+		case R_PARISC_PCREL32:
+			/* 32-bit PC relative address */
+			*loc = val - dot - 8 + addend;
+			break;
 
 		default:
 			printk(KERN_ERR "module %s: Unknown relocation: %u\n",
@@ -788,6 +792,10 @@
 			CHECK_RELOC(val, 22);
 			*loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
 			break;
+		case R_PARISC_PCREL32:
+			/* 32-bit PC relative address */
+			*loc = val - dot - 8 + addend;
+			break;
 		case R_PARISC_DIR64:
 			/* 64-bit effective address */
 			*loc64 = val + addend;
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index 568b2c6..3cad8aa 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -47,11 +47,11 @@
 EXPORT_SYMBOL(lclear_user);
 EXPORT_SYMBOL(lstrnlen_user);
 
-/* Global fixups */
-extern void fixup_get_user_skip_1(void);
-extern void fixup_get_user_skip_2(void);
-extern void fixup_put_user_skip_1(void);
-extern void fixup_put_user_skip_2(void);
+/* Global fixups - defined as int to avoid creation of function pointers */
+extern int fixup_get_user_skip_1;
+extern int fixup_get_user_skip_2;
+extern int fixup_put_user_skip_1;
+extern int fixup_put_user_skip_2;
 EXPORT_SYMBOL(fixup_get_user_skip_1);
 EXPORT_SYMBOL(fixup_get_user_skip_2);
 EXPORT_SYMBOL(fixup_put_user_skip_1);
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index ce0b2b4..8fb81a3 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -270,7 +270,8 @@
 long do_syscall_trace_enter(struct pt_regs *regs)
 {
 	/* Do the secure computing check first. */
-	secure_computing_strict(regs->gr[20]);
+	if (secure_computing() == -1)
+		return -1;
 
 	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
 	    tracehook_report_syscall_entry(regs)) {
@@ -296,7 +297,11 @@
 			regs->gr[23] & 0xffffffff);
 
 out:
-	return regs->gr[20];
+	/*
+	 * Sign extend the syscall number to 64bit since it may have been
+	 * modified by a compat ptrace call
+	 */
+	return (int) ((u32) regs->gr[20]);
 }
 
 void do_syscall_trace_exit(struct pt_regs *regs)
diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
index 984abbe..c342b2e 100644
--- a/arch/parisc/kernel/signal32.c
+++ b/arch/parisc/kernel/signal32.c
@@ -371,6 +371,11 @@
 			val = (compat_int_t)from->si_int;
 			err |= __put_user(val, &to->si_int);
 			break;
+		case __SI_SYS >> 16:
+			err |= __put_user(ptr_to_compat(from->si_call_addr), &to->si_call_addr);
+			err |= __put_user(from->si_syscall, &to->si_syscall);
+			err |= __put_user(from->si_arch, &to->si_arch);
+			break;
 		}
 	}
 	return err;
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index fbafa0d..57b4836 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -329,6 +329,7 @@
 
 	ldo     -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1      /* get task ptr */
 	LDREG	TI_TASK(%r1), %r1
+	LDREG   TASK_PT_GR28(%r1), %r28		/* Restore return value */
 	LDREG   TASK_PT_GR26(%r1), %r26		/* Restore the users args */
 	LDREG   TASK_PT_GR25(%r1), %r25
 	LDREG   TASK_PT_GR24(%r1), %r24
@@ -342,7 +343,8 @@
 	stw     %r21, -56(%r30)                 /* 6th argument */
 #endif
 
-	comiclr,>>=	__NR_Linux_syscalls, %r20, %r0
+	cmpib,COND(=),n -1,%r20,tracesys_exit /* seccomp may have returned -1 */
+	comiclr,>>	__NR_Linux_syscalls, %r20, %r0
 	b,n	.Ltracesys_nosys
 
 	LDREGX  %r20(%r19), %r19
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 16e0735..97d6b20 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -795,6 +795,9 @@
 
 	    if (fault_space == 0 && !faulthandler_disabled())
 	    {
+		/* Clean up and return if in exception table. */
+		if (fixup_exception(regs))
+			return;
 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
 		parisc_terminate("Kernel Fault", regs, code, fault_address);
 	    }
diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S
index 536ef66..1052b74 100644
--- a/arch/parisc/lib/fixup.S
+++ b/arch/parisc/lib/fixup.S
@@ -26,6 +26,7 @@
 
 #ifdef CONFIG_SMP
 	.macro  get_fault_ip t1 t2
+	loadgp
 	addil LT%__per_cpu_offset,%r27
 	LDREG RT%__per_cpu_offset(%r1),\t1
 	/* t2 = smp_processor_id() */
@@ -40,14 +41,19 @@
 	LDREG RT%exception_data(%r1),\t1
 	/* t1 = this_cpu_ptr(&exception_data) */
 	add,l \t1,\t2,\t1
+	/* %r27 = t1->fault_gp - restore gp */
+	LDREG EXCDATA_GP(\t1), %r27
 	/* t1 = t1->fault_ip */
 	LDREG EXCDATA_IP(\t1), \t1
 	.endm
 #else
 	.macro  get_fault_ip t1 t2
+	loadgp
 	/* t1 = this_cpu_ptr(&exception_data) */
 	addil LT%exception_data,%r27
 	LDREG RT%exception_data(%r1),\t2
+	/* %r27 = t2->fault_gp - restore gp */
+	LDREG EXCDATA_GP(\t2), %r27
 	/* t1 = t2->fault_ip */
 	LDREG EXCDATA_IP(\t2), \t1
 	.endm
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 26fac9c..16dbe81 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -145,6 +145,7 @@
 		struct exception_data *d;
 		d = this_cpu_ptr(&exception_data);
 		d->fault_ip = regs->iaoq[0];
+		d->fault_gp = regs->gr[27];
 		d->fault_space = regs->isr;
 		d->fault_addr = regs->ior;
 
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 3c07d6b..6b3e7c6 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -22,7 +22,7 @@
 #include <linux/swap.h>
 #include <linux/unistd.h>
 #include <linux/nodemask.h>	/* for node_online_map */
-#include <linux/pagemap.h>	/* for release_pages and page_cache_release */
+#include <linux/pagemap.h>	/* for release_pages */
 #include <linux/compat.h>
 
 #include <asm/pgalloc.h>
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 7cd32c0..a18a0dc 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -126,7 +126,7 @@
 	select IRQ_FORCED_THREADING
 	select HAVE_RCU_TABLE_FREE if SMP
 	select HAVE_SYSCALL_TRACEPOINTS
-	select HAVE_BPF_JIT
+	select HAVE_CBPF_JIT
 	select HAVE_ARCH_JUMP_LABEL
 	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 	select ARCH_HAS_GCOV_PROFILE_ALL
@@ -160,6 +160,7 @@
 	select HAVE_ARCH_SECCOMP_FILTER
 	select ARCH_HAS_UBSAN_SANITIZE_ALL
 	select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
+	select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
 
 config GENERIC_CSUM
 	def_bool CPU_LITTLE_ENDIAN
@@ -1107,3 +1108,5 @@
 	bool
 
 source "arch/powerpc/kvm/Kconfig"
+
+source "kernel/livepatch/Kconfig"
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index d7b3431..ec35af3 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -40,6 +40,9 @@
 #define KVM_MAX_VCORES		NR_CPUS
 #define KVM_USER_MEM_SLOTS	512
 
+#include <asm/cputhreads.h>
+#define KVM_MAX_VCPU_ID                (threads_per_subcore * KVM_MAX_VCORES)
+
 #ifdef CONFIG_KVM_MMIO
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
 #endif
@@ -113,6 +116,7 @@
 	u32 ext_intr_exits;
 	u32 halt_successful_poll;
 	u32 halt_attempted_poll;
+	u32 halt_poll_invalid;
 	u32 halt_wakeup;
 	u32 dbell_exits;
 	u32 gdbell_exits;
@@ -724,5 +728,6 @@
 static inline void kvm_arch_exit(void) {}
 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
 
 #endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h
new file mode 100644
index 0000000..a402f7f
--- /dev/null
+++ b/arch/powerpc/include/asm/livepatch.h
@@ -0,0 +1,62 @@
+/*
+ * livepatch.h - powerpc-specific Kernel Live Patching Core
+ *
+ * Copyright (C) 2015-2016, SUSE, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _ASM_POWERPC_LIVEPATCH_H
+#define _ASM_POWERPC_LIVEPATCH_H
+
+#include <linux/module.h>
+#include <linux/ftrace.h>
+
+#ifdef CONFIG_LIVEPATCH
+static inline int klp_check_compiler_support(void)
+{
+	return 0;
+}
+
+static inline int klp_write_module_reloc(struct module *mod, unsigned long
+		type, unsigned long loc, unsigned long value)
+{
+	/* This requires infrastructure changes; we need the loadinfos. */
+	return -ENOSYS;
+}
+
+static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+	regs->nip = ip;
+}
+
+#define klp_get_ftrace_location klp_get_ftrace_location
+static inline unsigned long klp_get_ftrace_location(unsigned long faddr)
+{
+	/*
+	 * Live patch works only with -mprofile-kernel on PPC. In this case,
+	 * the ftrace location is always within the first 16 bytes.
+	 */
+	return ftrace_location_range(faddr, faddr + 16);
+}
+
+static inline void klp_init_thread_info(struct thread_info *ti)
+{
+	/* + 1 to account for STACK_END_MAGIC */
+	ti->livepatch_sp = (unsigned long *)(ti + 1) + 1;
+}
+#else
+static void klp_init_thread_info(struct thread_info *ti) { }
+#endif /* CONFIG_LIVEPATCH */
+
+#endif /* _ASM_POWERPC_LIVEPATCH_H */
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 8ab8a1a..009fab1 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -246,7 +246,7 @@
 #endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_VSX
 	/* VSR status */
-	int		used_vsr;	/* set if process has used altivec */
+	int		used_vsr;	/* set if process has used VSX */
 #endif /* CONFIG_VSX */
 #ifdef CONFIG_SPE
 	unsigned long	evr[32];	/* upper 32-bits of SPE regs */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 3fa9df7..2fc5d4d 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -384,3 +384,5 @@
 SYSCALL(ni_syscall)
 SYSCALL(mlock2)
 SYSCALL(copy_file_range)
+COMPAT_SYS_SPU(preadv2)
+COMPAT_SYS_SPU(pwritev2)
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 7efee4a..8febc3f 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -43,7 +43,9 @@
 	int		preempt_count;		/* 0 => preemptable,
 						   <0 => BUG */
 	unsigned long	local_flags;		/* private flags for thread */
-
+#ifdef CONFIG_LIVEPATCH
+	unsigned long *livepatch_sp;
+#endif
 	/* low level flags - has atomic operations done on it */
 	unsigned long	flags ____cacheline_aligned_in_smp;
 };
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 1f2594d..cf12c58 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls		380
+#define NR_syscalls		382
 
 #define __NR__exit __NR_exit
 
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
index e4396a7..4afe66a 100644
--- a/arch/powerpc/include/asm/word-at-a-time.h
+++ b/arch/powerpc/include/asm/word-at-a-time.h
@@ -82,7 +82,7 @@
 	    "andc	%1,%1,%2\n\t"
 	    "popcntd	%0,%1"
 		: "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask)
-		: "r" (bits));
+		: "b" (bits));
 
 	return leading_zero_bits;
 }
diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
index 8dde199..f63c96c 100644
--- a/arch/powerpc/include/uapi/asm/cputable.h
+++ b/arch/powerpc/include/uapi/asm/cputable.h
@@ -31,6 +31,7 @@
 #define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
 					0x00000040
 
+/* Reserved - do not use		0x00000004 */
 #define PPC_FEATURE_TRUE_LE		0x00000002
 #define PPC_FEATURE_PPC_LE		0x00000001
 
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 940290d..e9f5f41 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -390,5 +390,7 @@
 #define __NR_membarrier		365
 #define __NR_mlock2		378
 #define __NR_copy_file_range	379
+#define __NR_preadv2		380
+#define __NR_pwritev2		381
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 0d0183d..c9370d4 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -86,6 +86,10 @@
 	DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
 #endif /* CONFIG_PPC64 */
 
+#ifdef CONFIG_LIVEPATCH
+	DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
+#endif
+
 	DEFINE(KSP, offsetof(struct thread_struct, ksp));
 	DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
 #ifdef CONFIG_BOOKE
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 9916d15..39a79c8 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -20,6 +20,7 @@
 
 #include <linux/errno.h>
 #include <linux/err.h>
+#include <linux/magic.h>
 #include <asm/unistd.h>
 #include <asm/processor.h>
 #include <asm/page.h>
@@ -1248,6 +1249,9 @@
 	addi	r3,r3,function_trace_op@toc@l
 	ld	r5,0(r3)
 
+#ifdef CONFIG_LIVEPATCH
+	mr	r14,r7		/* remember old NIP */
+#endif
 	/* Calculate ip from nip-4 into r3 for call below */
 	subi    r3, r7, MCOUNT_INSN_SIZE
 
@@ -1272,6 +1276,9 @@
 	/* Load ctr with the possibly modified NIP */
 	ld	r3, _NIP(r1)
 	mtctr	r3
+#ifdef CONFIG_LIVEPATCH
+	cmpd	r14,r3		/* has NIP been altered? */
+#endif
 
 	/* Restore gprs */
 	REST_8GPRS(0,r1)
@@ -1289,6 +1296,11 @@
 	ld	r0, LRSAVE(r1)
 	mtlr	r0
 
+#ifdef CONFIG_LIVEPATCH
+        /* Based on the cmpd above, if the NIP was altered handle livepatch */
+	bne-	livepatch_handler
+#endif
+
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 	stdu	r1, -112(r1)
 .globl ftrace_graph_call
@@ -1305,6 +1317,91 @@
 
 _GLOBAL(ftrace_stub)
 	blr
+
+#ifdef CONFIG_LIVEPATCH
+	/*
+	 * This function runs in the mcount context, between two functions. As
+	 * such it can only clobber registers which are volatile and used in
+	 * function linkage.
+	 *
+	 * We get here when a function A, calls another function B, but B has
+	 * been live patched with a new function C.
+	 *
+	 * On entry:
+	 *  - we have no stack frame and can not allocate one
+	 *  - LR points back to the original caller (in A)
+	 *  - CTR holds the new NIP in C
+	 *  - r0 & r12 are free
+	 *
+	 * r0 can't be used as the base register for a DS-form load or store, so
+	 * we temporarily shuffle r1 (stack pointer) into r0 and then put it back.
+	 */
+livepatch_handler:
+	CURRENT_THREAD_INFO(r12, r1)
+
+	/* Save stack pointer into r0 */
+	mr	r0, r1
+
+	/* Allocate 3 x 8 bytes */
+	ld	r1, TI_livepatch_sp(r12)
+	addi	r1, r1, 24
+	std	r1, TI_livepatch_sp(r12)
+
+	/* Save toc & real LR on livepatch stack */
+	std	r2,  -24(r1)
+	mflr	r12
+	std	r12, -16(r1)
+
+	/* Store stack end marker */
+	lis     r12, STACK_END_MAGIC@h
+	ori     r12, r12, STACK_END_MAGIC@l
+	std	r12, -8(r1)
+
+	/* Restore real stack pointer */
+	mr	r1, r0
+
+	/* Put ctr in r12 for global entry and branch there */
+	mfctr	r12
+	bctrl
+
+	/*
+	 * Now we are returning from the patched function to the original
+	 * caller A. We are free to use r0 and r12, and we can use r2 until we
+	 * restore it.
+	 */
+
+	CURRENT_THREAD_INFO(r12, r1)
+
+	/* Save stack pointer into r0 */
+	mr	r0, r1
+
+	ld	r1, TI_livepatch_sp(r12)
+
+	/* Check stack marker hasn't been trashed */
+	lis     r2,  STACK_END_MAGIC@h
+	ori     r2,  r2, STACK_END_MAGIC@l
+	ld	r12, -8(r1)
+1:	tdne	r12, r2
+	EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
+
+	/* Restore LR & toc from livepatch stack */
+	ld	r12, -16(r1)
+	mtlr	r12
+	ld	r2,  -24(r1)
+
+	/* Pop livepatch stack frame */
+	CURRENT_THREAD_INFO(r12, r0)
+	subi	r1, r1, 24
+	std	r1, TI_livepatch_sp(r12)
+
+	/* Restore real stack pointer */
+	mr	r1, r0
+
+	/* Return to original caller of live patched function */
+	blr
+#endif
+
+
 #else
 _GLOBAL_TOC(_mcount)
 	/* Taken from output of objdump from lib64/glibc */
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 290559d..3cb46a3 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -66,6 +66,7 @@
 #include <asm/udbg.h>
 #include <asm/smp.h>
 #include <asm/debug.h>
+#include <asm/livepatch.h>
 
 #ifdef CONFIG_PPC64
 #include <asm/paca.h>
@@ -607,10 +608,12 @@
 		memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
 		tp = softirq_ctx[i];
 		tp->cpu = i;
+		klp_init_thread_info(tp);
 
 		memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
 		tp = hardirq_ctx[i];
 		tp->cpu = i;
+		klp_init_thread_info(tp);
 	}
 }
 
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index b2eb468..671fd51 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -284,7 +284,7 @@
 			printk("%s    Effective address: %016llx\n",
 			       level, evt->u.ue_error.effective_address);
 		if (evt->u.ue_error.physical_address_provided)
-			printk("%s      Physial address: %016llx\n",
+			printk("%s      Physical address: %016llx\n",
 			       level, evt->u.ue_error.physical_address);
 		break;
 	case MCE_ERROR_TYPE_SLB:
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 612df30..2a9280b 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -55,6 +55,8 @@
 #include <asm/firmware.h>
 #endif
 #include <asm/code-patching.h>
+#include <asm/livepatch.h>
+
 #include <linux/kprobes.h>
 #include <linux/kdebug.h>
 
@@ -983,7 +985,7 @@
 static inline void save_sprs(struct thread_struct *t)
 {
 #ifdef CONFIG_ALTIVEC
-	if (cpu_has_feature(cpu_has_feature(CPU_FTR_ALTIVEC)))
+	if (cpu_has_feature(CPU_FTR_ALTIVEC))
 		t->vrsave = mfspr(SPRN_VRSAVE);
 #endif
 #ifdef CONFIG_PPC_BOOK3S_64
@@ -1400,13 +1402,15 @@
 	extern void ret_from_kernel_thread(void);
 	void (*f)(void);
 	unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
+	struct thread_info *ti = task_thread_info(p);
+
+	klp_init_thread_info(ti);
 
 	/* Copy registers */
 	sp -= sizeof(struct pt_regs);
 	childregs = (struct pt_regs *) sp;
 	if (unlikely(p->flags & PF_KTHREAD)) {
 		/* kernel thread */
-		struct thread_info *ti = (void *)task_stack_page(p);
 		memset(childregs, 0, sizeof(struct pt_regs));
 		childregs->gpr[1] = sp + sizeof(struct pt_regs);
 		/* function */
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 7030b03..a15fe1d 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -148,23 +148,25 @@
 	unsigned long	cpu_features;	/* CPU_FTR_xxx bit */
 	unsigned long	mmu_features;	/* MMU_FTR_xxx bit */
 	unsigned int	cpu_user_ftrs;	/* PPC_FEATURE_xxx bit */
+	unsigned int	cpu_user_ftrs2;	/* PPC_FEATURE2_xxx bit */
 	unsigned char	pabyte;		/* byte number in ibm,pa-features */
 	unsigned char	pabit;		/* bit number (big-endian) */
 	unsigned char	invert;		/* if 1, pa bit set => clear feature */
 } ibm_pa_features[] __initdata = {
-	{0, 0, PPC_FEATURE_HAS_MMU,	0, 0, 0},
-	{0, 0, PPC_FEATURE_HAS_FPU,	0, 1, 0},
-	{CPU_FTR_CTRL, 0, 0,		0, 3, 0},
-	{CPU_FTR_NOEXECUTE, 0, 0,	0, 6, 0},
-	{CPU_FTR_NODSISRALIGN, 0, 0,	1, 1, 1},
-	{0, MMU_FTR_CI_LARGE_PAGE, 0,	1, 2, 0},
-	{CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
+	{0, 0, PPC_FEATURE_HAS_MMU, 0,		0, 0, 0},
+	{0, 0, PPC_FEATURE_HAS_FPU, 0,		0, 1, 0},
+	{CPU_FTR_CTRL, 0, 0, 0,			0, 3, 0},
+	{CPU_FTR_NOEXECUTE, 0, 0, 0,		0, 6, 0},
+	{CPU_FTR_NODSISRALIGN, 0, 0, 0,		1, 1, 1},
+	{0, MMU_FTR_CI_LARGE_PAGE, 0, 0,		1, 2, 0},
+	{CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
 	/*
-	 * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n),
-	 * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP
-	 * which is 0 if the kernel doesn't support TM.
+	 * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
+	 * we don't want to turn on TM here, so we use the *_COMP versions
+	 * which are 0 if the kernel doesn't support TM.
 	 */
-	{CPU_FTR_TM_COMP, 0, 0,		22, 0, 0},
+	{CPU_FTR_TM_COMP, 0, 0,
+	 PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
 };
 
 static void __init scan_features(unsigned long node, const unsigned char *ftrs,
@@ -195,10 +197,12 @@
 		if (bit ^ fp->invert) {
 			cur_cpu_spec->cpu_features |= fp->cpu_features;
 			cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
+			cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
 			cur_cpu_spec->mmu_features |= fp->mmu_features;
 		} else {
 			cur_cpu_spec->cpu_features &= ~fp->cpu_features;
 			cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
+			cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
 			cur_cpu_spec->mmu_features &= ~fp->mmu_features;
 		}
 	}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index f98be83..96d4a2b 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -69,6 +69,7 @@
 #include <asm/kvm_ppc.h>
 #include <asm/hugetlb.h>
 #include <asm/epapr_hcalls.h>
+#include <asm/livepatch.h>
 
 #ifdef DEBUG
 #define DBG(fmt...) udbg_printf(fmt)
@@ -667,16 +668,16 @@
 	limit = min(safe_stack_limit(), ppc64_rma_size);
 
 	for_each_possible_cpu(i) {
-		unsigned long sp;
-		sp  = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
-		sp += THREAD_SIZE;
-		paca[i].emergency_sp = __va(sp);
+		struct thread_info *ti;
+		ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
+		klp_init_thread_info(ti);
+		paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
 
 #ifdef CONFIG_PPC_BOOK3S_64
 		/* emergency stack for machine check exception handling. */
-		sp  = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
-		sp += THREAD_SIZE;
-		paca[i].mc_emergency_sp = __va(sp);
+		ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
+		klp_init_thread_info(ti);
+		paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
 #endif
 	}
 }
@@ -700,6 +701,8 @@
 	if (ppc_md.panic)
 		setup_panic();
 
+	klp_init_thread_info(&init_thread_info);
+
 	init_mm.start_code = (unsigned long)_stext;
 	init_mm.end_code = (unsigned long) _etext;
 	init_mm.end_data = (unsigned long) _edata;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 8cac1eb..55c924b 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -565,7 +565,7 @@
 		smp_ops->give_timebase();
 
 	/* Wait until cpu puts itself in the online & active maps */
-	while (!cpu_online(cpu) || !cpu_active(cpu))
+	while (!cpu_online(cpu))
 		cpu_relax();
 
 	return 0;
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index b34220d..47018fc 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -54,6 +54,7 @@
 	{ "queue_intr",  VCPU_STAT(queue_intr) },
 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
 	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), },
+	{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
 	{ "pf_storage",  VCPU_STAT(pf_storage) },
 	{ "sp_storage",  VCPU_STAT(sp_storage) },
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 84fb4fc..9324355 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -27,6 +27,7 @@
 #include <linux/export.h>
 #include <linux/fs.h>
 #include <linux/anon_inodes.h>
+#include <linux/cpu.h>
 #include <linux/cpumask.h>
 #include <linux/spinlock.h>
 #include <linux/page-flags.h>
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 95bceca..8129b0d 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -882,6 +882,24 @@
 }
 #endif
 
+static void kvmppc_setup_debug(struct kvm_vcpu *vcpu)
+{
+	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
+		u64 msr = kvmppc_get_msr(vcpu);
+
+		kvmppc_set_msr(vcpu, msr | MSR_SE);
+	}
+}
+
+static void kvmppc_clear_debug(struct kvm_vcpu *vcpu)
+{
+	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
+		u64 msr = kvmppc_get_msr(vcpu);
+
+		kvmppc_set_msr(vcpu, msr & ~MSR_SE);
+	}
+}
+
 int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
 			  unsigned int exit_nr)
 {
@@ -1207,10 +1225,18 @@
 		break;
 #endif
 	case BOOK3S_INTERRUPT_MACHINE_CHECK:
-	case BOOK3S_INTERRUPT_TRACE:
 		kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
 		r = RESUME_GUEST;
 		break;
+	case BOOK3S_INTERRUPT_TRACE:
+		if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
+			run->exit_reason = KVM_EXIT_DEBUG;
+			r = RESUME_HOST;
+		} else {
+			kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+			r = RESUME_GUEST;
+		}
+		break;
 	default:
 	{
 		ulong shadow_srr1 = vcpu->arch.shadow_srr1;
@@ -1479,6 +1505,8 @@
 		goto out;
 	}
 
+	kvmppc_setup_debug(vcpu);
+
 	/*
 	 * Interrupts could be timers for the guest which we have to inject
 	 * again, so let's postpone them until we're in the guest and if we
@@ -1501,6 +1529,8 @@
 
 	ret = __kvmppc_vcpu_run(kvm_run, vcpu);
 
+	kvmppc_clear_debug(vcpu);
+
 	/* No need for kvm_guest_exit. It's done in handle_exit.
 	   We also get here with interrupts enabled. */
 
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index 46871d5..a75ba38 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -92,7 +92,7 @@
 	 * we are the only setter, thus concurrent access is undefined
 	 * to begin with.
 	 */
-	if (level == 1 || level == KVM_INTERRUPT_SET_LEVEL)
+	if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
 		state->asserted = 1;
 	else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
 		state->asserted = 0;
@@ -280,7 +280,7 @@
 	if (!success)
 		goto bail;
 
-	XICS_DBG("UPD [%04x] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
+	XICS_DBG("UPD [%04lx] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
 		 icp->server_num,
 		 old.cppr, old.mfrr, old.pending_pri, old.xisr,
 		 old.need_resend, old.out_ee);
@@ -336,7 +336,7 @@
 	union kvmppc_icp_state old_state, new_state;
 	bool success;
 
-	XICS_DBG("try deliver %#x(P:%#x) to server %#x\n", irq, priority,
+	XICS_DBG("try deliver %#x(P:%#x) to server %#lx\n", irq, priority,
 		 icp->server_num);
 
 	do {
@@ -1174,9 +1174,11 @@
 			prio = irqp->saved_priority;
 		}
 		val |= prio << KVM_XICS_PRIORITY_SHIFT;
-		if (irqp->asserted)
-			val |= KVM_XICS_LEVEL_SENSITIVE | KVM_XICS_PENDING;
-		else if (irqp->masked_pending || irqp->resend)
+		if (irqp->lsi) {
+			val |= KVM_XICS_LEVEL_SENSITIVE;
+			if (irqp->asserted)
+				val |= KVM_XICS_PENDING;
+		} else if (irqp->masked_pending || irqp->resend)
 			val |= KVM_XICS_PENDING;
 		ret = 0;
 	}
@@ -1228,9 +1230,13 @@
 	irqp->priority = prio;
 	irqp->resend = 0;
 	irqp->masked_pending = 0;
+	irqp->lsi = 0;
 	irqp->asserted = 0;
-	if ((val & KVM_XICS_PENDING) && (val & KVM_XICS_LEVEL_SENSITIVE))
-		irqp->asserted = 1;
+	if (val & KVM_XICS_LEVEL_SENSITIVE) {
+		irqp->lsi = 1;
+		if (val & KVM_XICS_PENDING)
+			irqp->asserted = 1;
+	}
 	irqp->exists = 1;
 	arch_spin_unlock(&ics->lock);
 	local_irq_restore(flags);
@@ -1249,11 +1255,10 @@
 	return ics_deliver_irq(xics, irq, level);
 }
 
-int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
-		int irq_source_id, int level, bool line_status)
+int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
+			      struct kvm *kvm, int irq_source_id,
+			      int level, bool line_status)
 {
-	if (!level)
-		return -1;
 	return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
 			   level, line_status);
 }
diff --git a/arch/powerpc/kvm/book3s_xics.h b/arch/powerpc/kvm/book3s_xics.h
index 56ea44f..a46b954 100644
--- a/arch/powerpc/kvm/book3s_xics.h
+++ b/arch/powerpc/kvm/book3s_xics.h
@@ -39,6 +39,7 @@
 	u8  saved_priority;
 	u8  resend;
 	u8  masked_pending;
+	u8  lsi;		/* level-sensitive interrupt */
 	u8  asserted; /* Only for LSI */
 	u8  exists;
 };
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 4d66f44..4afae69 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -64,6 +64,7 @@
 	{ "ext_intr",   VCPU_STAT(ext_intr_exits) },
 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
 	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
+	{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
 	{ "doorbell", VCPU_STAT(dbell_exits) },
 	{ "guest doorbell", VCPU_STAT(gdbell_exits) },
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 6a68730..02416fe 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -800,9 +800,9 @@
 	}
 }
 
-int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
-		       unsigned int rt, unsigned int bytes,
-		       int is_default_endian)
+static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+				unsigned int rt, unsigned int bytes,
+				int is_default_endian, int sign_extend)
 {
 	int idx, ret;
 	bool host_swabbed;
@@ -827,7 +827,7 @@
 	vcpu->arch.mmio_host_swabbed = host_swabbed;
 	vcpu->mmio_needed = 1;
 	vcpu->mmio_is_write = 0;
-	vcpu->arch.mmio_sign_extend = 0;
+	vcpu->arch.mmio_sign_extend = sign_extend;
 
 	idx = srcu_read_lock(&vcpu->kvm->srcu);
 
@@ -844,6 +844,13 @@
 
 	return EMULATE_DO_MMIO;
 }
+
+int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+		       unsigned int rt, unsigned int bytes,
+		       int is_default_endian)
+{
+	return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
+}
 EXPORT_SYMBOL_GPL(kvmppc_handle_load);
 
 /* Same as above, but sign extends */
@@ -851,12 +858,7 @@
 			unsigned int rt, unsigned int bytes,
 			int is_default_endian)
 {
-	int r;
-
-	vcpu->arch.mmio_sign_extend = 1;
-	r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian);
-
-	return r;
+	return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
 }
 
 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 6dd272b..d991b9e 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -413,13 +413,13 @@
 {
 	struct hugepd_freelist **batchp;
 
-	batchp = this_cpu_ptr(&hugepd_freelist_cur);
+	batchp = &get_cpu_var(hugepd_freelist_cur);
 
 	if (atomic_read(&tlb->mm->mm_users) < 2 ||
 	    cpumask_equal(mm_cpumask(tlb->mm),
 			  cpumask_of(smp_processor_id()))) {
 		kmem_cache_free(hugepte_cache, hugepte);
-        put_cpu_var(hugepd_freelist_cur);
+		put_cpu_var(hugepd_freelist_cur);
 		return;
 	}
 
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index e04a675..22d9015 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -247,7 +247,7 @@
 	sp = regs->gpr[1];
 	perf_callchain_store(entry, next_ip);
 
-	while (entry->nr < PERF_MAX_STACK_DEPTH) {
+	while (entry->nr < sysctl_perf_event_max_stack) {
 		fp = (unsigned long __user *) sp;
 		if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
 			return;
@@ -453,7 +453,7 @@
 	sp = regs->gpr[1];
 	perf_callchain_store(entry, next_ip);
 
-	while (entry->nr < PERF_MAX_STACK_DEPTH) {
+	while (entry->nr < sysctl_perf_event_max_stack) {
 		fp = (unsigned int __user *) (unsigned long) sp;
 		if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp))
 			return;
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index 3048e34..22645a7 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -278,14 +278,9 @@
  * GPIOLIB hooks
  */
 #if defined(CONFIG_GPIOLIB)
-static inline struct mpc52xx_gpt_priv *gc_to_mpc52xx_gpt(struct gpio_chip *gc)
-{
-	return container_of(gc, struct mpc52xx_gpt_priv, gc);
-}
-
 static int mpc52xx_gpt_gpio_get(struct gpio_chip *gc, unsigned int gpio)
 {
-	struct mpc52xx_gpt_priv *gpt = gc_to_mpc52xx_gpt(gc);
+	struct mpc52xx_gpt_priv *gpt = gpiochip_get_data(gc);
 
 	return (in_be32(&gpt->regs->status) >> 8) & 1;
 }
@@ -293,7 +288,7 @@
 static void
 mpc52xx_gpt_gpio_set(struct gpio_chip *gc, unsigned int gpio, int v)
 {
-	struct mpc52xx_gpt_priv *gpt = gc_to_mpc52xx_gpt(gc);
+	struct mpc52xx_gpt_priv *gpt = gpiochip_get_data(gc);
 	unsigned long flags;
 	u32 r;
 
@@ -307,7 +302,7 @@
 
 static int mpc52xx_gpt_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
 {
-	struct mpc52xx_gpt_priv *gpt = gc_to_mpc52xx_gpt(gc);
+	struct mpc52xx_gpt_priv *gpt = gpiochip_get_data(gc);
 	unsigned long flags;
 
 	dev_dbg(gpt->dev, "%s: gpio:%d\n", __func__, gpio);
@@ -354,9 +349,9 @@
 	clrsetbits_be32(&gpt->regs->mode, MPC52xx_GPT_MODE_MS_MASK,
 			MPC52xx_GPT_MODE_MS_GPIO);
 
-	rc = gpiochip_add(&gpt->gc);
+	rc = gpiochip_add_data(&gpt->gc, gpt);
 	if (rc)
-		dev_err(gpt->dev, "gpiochip_add() failed; rc=%i\n", rc);
+		dev_err(gpt->dev, "gpiochip_add_data() failed; rc=%i\n", rc);
 
 	dev_dbg(gpt->dev, "%s() complete.\n", __func__);
 }
diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
index 15e8021..dbcd030 100644
--- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
+++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
@@ -16,7 +16,7 @@
 #include <linux/device.h>
 #include <linux/mutex.h>
 #include <linux/i2c.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
 #include <linux/of.h>
 #include <linux/of_gpio.h>
 #include <linux/slab.h>
@@ -99,7 +99,7 @@
 
 static void mcu_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
 {
-	struct mcu *mcu = container_of(gc, struct mcu, gc);
+	struct mcu *mcu = gpiochip_get_data(gc);
 	u8 bit = 1 << (4 + gpio);
 
 	mutex_lock(&mcu->lock);
@@ -136,7 +136,7 @@
 	gc->direction_output = mcu_gpio_dir_out;
 	gc->of_node = np;
 
-	return gpiochip_add(gc);
+	return gpiochip_add_data(gc, mcu);
 }
 
 static int mcu_gpiochip_remove(struct mcu *mcu)
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index be6212d..84fb984 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -137,6 +137,7 @@
 	char *name;
 	char fullname[80], *buf;
 	struct elf_note en;
+	size_t skip;
 
 	buf = (void *)get_zeroed_page(GFP_KERNEL);
 	if (!buf)
@@ -171,8 +172,8 @@
 	if (rc < 0)
 		goto out;
 
-	if (!dump_skip(cprm,
-		       roundup(cprm->written - total + sz, 4) - cprm->written))
+	skip = roundup(cprm->file->f_pos - total + sz, 4) - cprm->file->f_pos;
+	if (!dump_skip(cprm, skip))
 		goto Eio;
 out:
 	free_page((unsigned long)buf);
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index dfa8638..5be15cf 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -238,7 +238,7 @@
 	.release	= spufs_dir_close,
 	.llseek		= dcache_dir_lseek,
 	.read		= generic_read_dir,
-	.iterate	= dcache_readdir,
+	.iterate_shared	= dcache_readdir,
 	.fsync		= noop_fsync,
 };
 EXPORT_SYMBOL_GPL(spufs_context_fops);
@@ -732,8 +732,8 @@
 		return -ENOMEM;
 
 	sb->s_maxbytes = MAX_LFS_FILESIZE;
-	sb->s_blocksize = PAGE_CACHE_SIZE;
-	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	sb->s_blocksize = PAGE_SIZE;
+	sb->s_blocksize_bits = PAGE_SHIFT;
 	sb->s_magic = SPUFS_MAGIC;
 	sb->s_op = &s_ops;
 	sb->s_fs_info = info;
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
index 8ed6536..6c11099 100644
--- a/arch/powerpc/sysdev/cpm1.c
+++ b/arch/powerpc/sysdev/cpm1.c
@@ -532,15 +532,9 @@
 	u16 cpdata;
 };
 
-static inline struct cpm1_gpio16_chip *
-to_cpm1_gpio16_chip(struct of_mm_gpio_chip *mm_gc)
-{
-	return container_of(mm_gc, struct cpm1_gpio16_chip, mm_gc);
-}
-
 static void cpm1_gpio16_save_regs(struct of_mm_gpio_chip *mm_gc)
 {
-	struct cpm1_gpio16_chip *cpm1_gc = to_cpm1_gpio16_chip(mm_gc);
+	struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
 	struct cpm_ioport16 __iomem *iop = mm_gc->regs;
 
 	cpm1_gc->cpdata = in_be16(&iop->dat);
@@ -560,7 +554,7 @@
 static void __cpm1_gpio16_set(struct of_mm_gpio_chip *mm_gc, u16 pin_mask,
 	int value)
 {
-	struct cpm1_gpio16_chip *cpm1_gc = to_cpm1_gpio16_chip(mm_gc);
+	struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
 	struct cpm_ioport16 __iomem *iop = mm_gc->regs;
 
 	if (value)
@@ -574,7 +568,7 @@
 static void cpm1_gpio16_set(struct gpio_chip *gc, unsigned int gpio, int value)
 {
 	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
-	struct cpm1_gpio16_chip *cpm1_gc = to_cpm1_gpio16_chip(mm_gc);
+	struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
 	unsigned long flags;
 	u16 pin_mask = 1 << (15 - gpio);
 
@@ -588,7 +582,7 @@
 static int cpm1_gpio16_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
 {
 	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
-	struct cpm1_gpio16_chip *cpm1_gc = to_cpm1_gpio16_chip(mm_gc);
+	struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
 	struct cpm_ioport16 __iomem *iop = mm_gc->regs;
 	unsigned long flags;
 	u16 pin_mask = 1 << (15 - gpio);
@@ -606,7 +600,7 @@
 static int cpm1_gpio16_dir_in(struct gpio_chip *gc, unsigned int gpio)
 {
 	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
-	struct cpm1_gpio16_chip *cpm1_gc = to_cpm1_gpio16_chip(mm_gc);
+	struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
 	struct cpm_ioport16 __iomem *iop = mm_gc->regs;
 	unsigned long flags;
 	u16 pin_mask = 1 << (15 - gpio);
@@ -642,7 +636,7 @@
 	gc->get = cpm1_gpio16_get;
 	gc->set = cpm1_gpio16_set;
 
-	return of_mm_gpiochip_add(np, mm_gc);
+	return of_mm_gpiochip_add_data(np, mm_gc, cpm1_gc);
 }
 
 struct cpm1_gpio32_chip {
@@ -653,15 +647,9 @@
 	u32 cpdata;
 };
 
-static inline struct cpm1_gpio32_chip *
-to_cpm1_gpio32_chip(struct of_mm_gpio_chip *mm_gc)
-{
-	return container_of(mm_gc, struct cpm1_gpio32_chip, mm_gc);
-}
-
 static void cpm1_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc)
 {
-	struct cpm1_gpio32_chip *cpm1_gc = to_cpm1_gpio32_chip(mm_gc);
+	struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
 	struct cpm_ioport32b __iomem *iop = mm_gc->regs;
 
 	cpm1_gc->cpdata = in_be32(&iop->dat);
@@ -681,7 +669,7 @@
 static void __cpm1_gpio32_set(struct of_mm_gpio_chip *mm_gc, u32 pin_mask,
 	int value)
 {
-	struct cpm1_gpio32_chip *cpm1_gc = to_cpm1_gpio32_chip(mm_gc);
+	struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
 	struct cpm_ioport32b __iomem *iop = mm_gc->regs;
 
 	if (value)
@@ -695,7 +683,7 @@
 static void cpm1_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value)
 {
 	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
-	struct cpm1_gpio32_chip *cpm1_gc = to_cpm1_gpio32_chip(mm_gc);
+	struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
 	unsigned long flags;
 	u32 pin_mask = 1 << (31 - gpio);
 
@@ -709,7 +697,7 @@
 static int cpm1_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
 {
 	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
-	struct cpm1_gpio32_chip *cpm1_gc = to_cpm1_gpio32_chip(mm_gc);
+	struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
 	struct cpm_ioport32b __iomem *iop = mm_gc->regs;
 	unsigned long flags;
 	u32 pin_mask = 1 << (31 - gpio);
@@ -727,7 +715,7 @@
 static int cpm1_gpio32_dir_in(struct gpio_chip *gc, unsigned int gpio)
 {
 	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
-	struct cpm1_gpio32_chip *cpm1_gc = to_cpm1_gpio32_chip(mm_gc);
+	struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
 	struct cpm_ioport32b __iomem *iop = mm_gc->regs;
 	unsigned long flags;
 	u32 pin_mask = 1 << (31 - gpio);
@@ -763,7 +751,7 @@
 	gc->get = cpm1_gpio32_get;
 	gc->set = cpm1_gpio32_set;
 
-	return of_mm_gpiochip_add(np, mm_gc);
+	return of_mm_gpiochip_add_data(np, mm_gc, cpm1_gc);
 }
 
 static int cpm_init_par_io(void)
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index 9d32465..0ac12e5 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -80,15 +80,9 @@
 	u32 cpdata;
 };
 
-static inline struct cpm2_gpio32_chip *
-to_cpm2_gpio32_chip(struct of_mm_gpio_chip *mm_gc)
-{
-	return container_of(mm_gc, struct cpm2_gpio32_chip, mm_gc);
-}
-
 static void cpm2_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc)
 {
-	struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc);
+	struct cpm2_gpio32_chip *cpm2_gc = gpiochip_get_data(&mm_gc->gc);
 	struct cpm2_ioports __iomem *iop = mm_gc->regs;
 
 	cpm2_gc->cpdata = in_be32(&iop->dat);
@@ -108,7 +102,7 @@
 static void __cpm2_gpio32_set(struct of_mm_gpio_chip *mm_gc, u32 pin_mask,
 	int value)
 {
-	struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc);
+	struct cpm2_gpio32_chip *cpm2_gc = gpiochip_get_data(&mm_gc->gc);
 	struct cpm2_ioports __iomem *iop = mm_gc->regs;
 
 	if (value)
@@ -122,7 +116,7 @@
 static void cpm2_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value)
 {
 	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
-	struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc);
+	struct cpm2_gpio32_chip *cpm2_gc = gpiochip_get_data(gc);
 	unsigned long flags;
 	u32 pin_mask = 1 << (31 - gpio);
 
@@ -136,7 +130,7 @@
 static int cpm2_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
 {
 	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
-	struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc);
+	struct cpm2_gpio32_chip *cpm2_gc = gpiochip_get_data(gc);
 	struct cpm2_ioports __iomem *iop = mm_gc->regs;
 	unsigned long flags;
 	u32 pin_mask = 1 << (31 - gpio);
@@ -154,7 +148,7 @@
 static int cpm2_gpio32_dir_in(struct gpio_chip *gc, unsigned int gpio)
 {
 	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
-	struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc);
+	struct cpm2_gpio32_chip *cpm2_gc = gpiochip_get_data(gc);
 	struct cpm2_ioports __iomem *iop = mm_gc->regs;
 	unsigned long flags;
 	u32 pin_mask = 1 << (31 - gpio);
@@ -190,6 +184,6 @@
 	gc->get = cpm2_gpio32_get;
 	gc->set = cpm2_gpio32_set;
 
-	return of_mm_gpiochip_add(np, mm_gc);
+	return of_mm_gpiochip_add_data(np, mm_gc, cpm2_gc);
 }
 #endif /* CONFIG_CPM2 || CONFIG_8xx_GPIO */
diff --git a/arch/powerpc/sysdev/ppc4xx_gpio.c b/arch/powerpc/sysdev/ppc4xx_gpio.c
index d7a7ef13..5382d04 100644
--- a/arch/powerpc/sysdev/ppc4xx_gpio.c
+++ b/arch/powerpc/sysdev/ppc4xx_gpio.c
@@ -27,7 +27,7 @@
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of_gpio.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
 #include <linux/types.h>
 #include <linux/slab.h>
 
@@ -67,12 +67,6 @@
  * There are a maximum of 32 gpios in each gpio controller.
  */
 
-static inline struct ppc4xx_gpio_chip *
-to_ppc4xx_gpiochip(struct of_mm_gpio_chip *mm_gc)
-{
-	return container_of(mm_gc, struct ppc4xx_gpio_chip, mm_gc);
-}
-
 static int ppc4xx_gpio_get(struct gpio_chip *gc, unsigned int gpio)
 {
 	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
@@ -96,8 +90,7 @@
 static void
 ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
 {
-	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
-	struct ppc4xx_gpio_chip *chip = to_ppc4xx_gpiochip(mm_gc);
+	struct ppc4xx_gpio_chip *chip = gpiochip_get_data(gc);
 	unsigned long flags;
 
 	spin_lock_irqsave(&chip->lock, flags);
@@ -112,7 +105,7 @@
 static int ppc4xx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
 {
 	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
-	struct ppc4xx_gpio_chip *chip = to_ppc4xx_gpiochip(mm_gc);
+	struct ppc4xx_gpio_chip *chip = gpiochip_get_data(gc);
 	struct ppc4xx_gpio __iomem *regs = mm_gc->regs;
 	unsigned long flags;
 
@@ -142,7 +135,7 @@
 ppc4xx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
 {
 	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
-	struct ppc4xx_gpio_chip *chip = to_ppc4xx_gpiochip(mm_gc);
+	struct ppc4xx_gpio_chip *chip = gpiochip_get_data(gc);
 	struct ppc4xx_gpio __iomem *regs = mm_gc->regs;
 	unsigned long flags;
 
@@ -200,7 +193,7 @@
 		gc->get = ppc4xx_gpio_get;
 		gc->set = ppc4xx_gpio_set;
 
-		ret = of_mm_gpiochip_add(np, mm_gc);
+		ret = of_mm_gpiochip_add_data(np, mm_gc, ppc4xx_gc);
 		if (ret)
 			goto err;
 		continue;
diff --git a/arch/powerpc/sysdev/simple_gpio.c b/arch/powerpc/sysdev/simple_gpio.c
index 56ce8ca..ef470b4 100644
--- a/arch/powerpc/sysdev/simple_gpio.c
+++ b/arch/powerpc/sysdev/simple_gpio.c
@@ -19,7 +19,7 @@
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of_gpio.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
 #include <linux/slab.h>
 #include <asm/prom.h>
 #include "simple_gpio.h"
@@ -32,11 +32,6 @@
 	u8 data;
 };
 
-static struct u8_gpio_chip *to_u8_gpio_chip(struct of_mm_gpio_chip *mm_gc)
-{
-	return container_of(mm_gc, struct u8_gpio_chip, mm_gc);
-}
-
 static u8 u8_pin2mask(unsigned int pin)
 {
 	return 1 << (8 - 1 - pin);
@@ -52,7 +47,7 @@
 static void u8_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
 {
 	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
-	struct u8_gpio_chip *u8_gc = to_u8_gpio_chip(mm_gc);
+	struct u8_gpio_chip *u8_gc = gpiochip_get_data(gc);
 	unsigned long flags;
 
 	spin_lock_irqsave(&u8_gc->lock, flags);
@@ -80,7 +75,7 @@
 
 static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
 {
-	struct u8_gpio_chip *u8_gc = to_u8_gpio_chip(mm_gc);
+	struct u8_gpio_chip *u8_gc = gpiochip_get_data(&mm_gc->gc);
 
 	u8_gc->data = in_8(mm_gc->regs);
 }
@@ -108,7 +103,7 @@
 	gc->get = u8_gpio_get;
 	gc->set = u8_gpio_set;
 
-	ret = of_mm_gpiochip_add(np, mm_gc);
+	ret = of_mm_gpiochip_add_data(np, mm_gc, u8_gc);
 	if (ret)
 		goto err;
 	return 0;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index b9df8d1..de0fcc0 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -4,6 +4,9 @@
 config ZONE_DMA
 	def_bool y
 
+config CPU_BIG_ENDIAN
+	def_bool y
+
 config LOCKDEP_SUPPORT
 	def_bool y
 
@@ -59,6 +62,9 @@
 config ARCH_SUPPORTS_UPROBES
 	def_bool y
 
+config DEBUG_RODATA
+	def_bool y
+
 config S390
 	def_bool y
 	select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
@@ -101,6 +107,7 @@
 	select ARCH_SUPPORTS_NUMA_BALANCING
 	select ARCH_USE_BUILTIN_BSWAP
 	select ARCH_USE_CMPXCHG_LOCKREF
+	select ARCH_WANTS_DYNAMIC_TASK_STRUCT
 	select ARCH_WANTS_PROT_NUMA_PROT_NONE
 	select ARCH_WANT_IPC_PARSE_VERSION
 	select BUILDTIME_EXTABLE_SORT
@@ -120,7 +127,7 @@
 	select HAVE_ARCH_SOFT_DIRTY
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_ARCH_TRANSPARENT_HUGEPAGE
-	select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
+	select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
 	select HAVE_CMPXCHG_DOUBLE
 	select HAVE_CMPXCHG_LOCAL
 	select HAVE_DEBUG_KMEMLEAK
@@ -204,7 +211,7 @@
 
 choice
 	prompt "Processor type"
-	default MARCH_Z900
+	default MARCH_Z196
 
 config MARCH_Z900
 	bool "IBM zSeries model z800 and z900"
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 48e1a2d..7554a8b 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -28,7 +28,7 @@
 #include <linux/init.h>
 #include <linux/spinlock.h>
 #include <crypto/xts.h>
-#include "crypt_s390.h"
+#include <asm/cpacf.h>
 
 #define AES_KEYLEN_128		1
 #define AES_KEYLEN_192		2
@@ -145,16 +145,16 @@
 
 	switch (sctx->key_len) {
 	case 16:
-		crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in,
-			      AES_BLOCK_SIZE);
+		cpacf_km(CPACF_KM_AES_128_ENC, &sctx->key, out, in,
+			 AES_BLOCK_SIZE);
 		break;
 	case 24:
-		crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in,
-			      AES_BLOCK_SIZE);
+		cpacf_km(CPACF_KM_AES_192_ENC, &sctx->key, out, in,
+			 AES_BLOCK_SIZE);
 		break;
 	case 32:
-		crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in,
-			      AES_BLOCK_SIZE);
+		cpacf_km(CPACF_KM_AES_256_ENC, &sctx->key, out, in,
+			 AES_BLOCK_SIZE);
 		break;
 	}
 }
@@ -170,16 +170,16 @@
 
 	switch (sctx->key_len) {
 	case 16:
-		crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in,
-			      AES_BLOCK_SIZE);
+		cpacf_km(CPACF_KM_AES_128_DEC, &sctx->key, out, in,
+			 AES_BLOCK_SIZE);
 		break;
 	case 24:
-		crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in,
-			      AES_BLOCK_SIZE);
+		cpacf_km(CPACF_KM_AES_192_DEC, &sctx->key, out, in,
+			 AES_BLOCK_SIZE);
 		break;
 	case 32:
-		crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in,
-			      AES_BLOCK_SIZE);
+		cpacf_km(CPACF_KM_AES_256_DEC, &sctx->key, out, in,
+			 AES_BLOCK_SIZE);
 		break;
 	}
 }
@@ -212,7 +212,7 @@
 static struct crypto_alg aes_alg = {
 	.cra_name		=	"aes",
 	.cra_driver_name	=	"aes-s390",
-	.cra_priority		=	CRYPT_S390_PRIORITY,
+	.cra_priority		=	300,
 	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER |
 					CRYPTO_ALG_NEED_FALLBACK,
 	.cra_blocksize		=	AES_BLOCK_SIZE,
@@ -298,16 +298,16 @@
 
 	switch (key_len) {
 	case 16:
-		sctx->enc = KM_AES_128_ENCRYPT;
-		sctx->dec = KM_AES_128_DECRYPT;
+		sctx->enc = CPACF_KM_AES_128_ENC;
+		sctx->dec = CPACF_KM_AES_128_DEC;
 		break;
 	case 24:
-		sctx->enc = KM_AES_192_ENCRYPT;
-		sctx->dec = KM_AES_192_DECRYPT;
+		sctx->enc = CPACF_KM_AES_192_ENC;
+		sctx->dec = CPACF_KM_AES_192_DEC;
 		break;
 	case 32:
-		sctx->enc = KM_AES_256_ENCRYPT;
-		sctx->dec = KM_AES_256_DECRYPT;
+		sctx->enc = CPACF_KM_AES_256_ENC;
+		sctx->dec = CPACF_KM_AES_256_DEC;
 		break;
 	}
 
@@ -326,7 +326,7 @@
 		u8 *out = walk->dst.virt.addr;
 		u8 *in = walk->src.virt.addr;
 
-		ret = crypt_s390_km(func, param, out, in, n);
+		ret = cpacf_km(func, param, out, in, n);
 		if (ret < 0 || ret != n)
 			return -EIO;
 
@@ -393,7 +393,7 @@
 static struct crypto_alg ecb_aes_alg = {
 	.cra_name		=	"ecb(aes)",
 	.cra_driver_name	=	"ecb-aes-s390",
-	.cra_priority		=	CRYPT_S390_COMPOSITE_PRIORITY,
+	.cra_priority		=	400,	/* combo: aes + ecb */
 	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER |
 					CRYPTO_ALG_NEED_FALLBACK,
 	.cra_blocksize		=	AES_BLOCK_SIZE,
@@ -427,16 +427,16 @@
 
 	switch (key_len) {
 	case 16:
-		sctx->enc = KMC_AES_128_ENCRYPT;
-		sctx->dec = KMC_AES_128_DECRYPT;
+		sctx->enc = CPACF_KMC_AES_128_ENC;
+		sctx->dec = CPACF_KMC_AES_128_DEC;
 		break;
 	case 24:
-		sctx->enc = KMC_AES_192_ENCRYPT;
-		sctx->dec = KMC_AES_192_DECRYPT;
+		sctx->enc = CPACF_KMC_AES_192_ENC;
+		sctx->dec = CPACF_KMC_AES_192_DEC;
 		break;
 	case 32:
-		sctx->enc = KMC_AES_256_ENCRYPT;
-		sctx->dec = KMC_AES_256_DECRYPT;
+		sctx->enc = CPACF_KMC_AES_256_ENC;
+		sctx->dec = CPACF_KMC_AES_256_DEC;
 		break;
 	}
 
@@ -465,7 +465,7 @@
 		u8 *out = walk->dst.virt.addr;
 		u8 *in = walk->src.virt.addr;
 
-		ret = crypt_s390_kmc(func, &param, out, in, n);
+		ret = cpacf_kmc(func, &param, out, in, n);
 		if (ret < 0 || ret != n)
 			return -EIO;
 
@@ -509,7 +509,7 @@
 static struct crypto_alg cbc_aes_alg = {
 	.cra_name		=	"cbc(aes)",
 	.cra_driver_name	=	"cbc-aes-s390",
-	.cra_priority		=	CRYPT_S390_COMPOSITE_PRIORITY,
+	.cra_priority		=	400,	/* combo: aes + cbc */
 	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER |
 					CRYPTO_ALG_NEED_FALLBACK,
 	.cra_blocksize		=	AES_BLOCK_SIZE,
@@ -596,8 +596,8 @@
 
 	switch (key_len) {
 	case 32:
-		xts_ctx->enc = KM_XTS_128_ENCRYPT;
-		xts_ctx->dec = KM_XTS_128_DECRYPT;
+		xts_ctx->enc = CPACF_KM_XTS_128_ENC;
+		xts_ctx->dec = CPACF_KM_XTS_128_DEC;
 		memcpy(xts_ctx->key + 16, in_key, 16);
 		memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
 		break;
@@ -607,8 +607,8 @@
 		xts_fallback_setkey(tfm, in_key, key_len);
 		break;
 	case 64:
-		xts_ctx->enc = KM_XTS_256_ENCRYPT;
-		xts_ctx->dec = KM_XTS_256_DECRYPT;
+		xts_ctx->enc = CPACF_KM_XTS_256_ENC;
+		xts_ctx->dec = CPACF_KM_XTS_256_DEC;
 		memcpy(xts_ctx->key, in_key, 32);
 		memcpy(xts_ctx->pcc_key, in_key + 32, 32);
 		break;
@@ -643,7 +643,8 @@
 	memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
 	memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
 	memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
-	ret = crypt_s390_pcc(func, &pcc_param.key[offset]);
+	/* remove decipher modifier bit from 'func' and call PCC */
+	ret = cpacf_pcc(func & 0x7f, &pcc_param.key[offset]);
 	if (ret < 0)
 		return -EIO;
 
@@ -655,7 +656,7 @@
 		out = walk->dst.virt.addr;
 		in = walk->src.virt.addr;
 
-		ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n);
+		ret = cpacf_km(func, &xts_param.key[offset], out, in, n);
 		if (ret < 0 || ret != n)
 			return -EIO;
 
@@ -721,7 +722,7 @@
 static struct crypto_alg xts_aes_alg = {
 	.cra_name		=	"xts(aes)",
 	.cra_driver_name	=	"xts-aes-s390",
-	.cra_priority		=	CRYPT_S390_COMPOSITE_PRIORITY,
+	.cra_priority		=	400,	/* combo: aes + xts */
 	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER |
 					CRYPTO_ALG_NEED_FALLBACK,
 	.cra_blocksize		=	AES_BLOCK_SIZE,
@@ -751,16 +752,16 @@
 
 	switch (key_len) {
 	case 16:
-		sctx->enc = KMCTR_AES_128_ENCRYPT;
-		sctx->dec = KMCTR_AES_128_DECRYPT;
+		sctx->enc = CPACF_KMCTR_AES_128_ENC;
+		sctx->dec = CPACF_KMCTR_AES_128_DEC;
 		break;
 	case 24:
-		sctx->enc = KMCTR_AES_192_ENCRYPT;
-		sctx->dec = KMCTR_AES_192_DECRYPT;
+		sctx->enc = CPACF_KMCTR_AES_192_ENC;
+		sctx->dec = CPACF_KMCTR_AES_192_DEC;
 		break;
 	case 32:
-		sctx->enc = KMCTR_AES_256_ENCRYPT;
-		sctx->dec = KMCTR_AES_256_DECRYPT;
+		sctx->enc = CPACF_KMCTR_AES_256_ENC;
+		sctx->dec = CPACF_KMCTR_AES_256_DEC;
 		break;
 	}
 
@@ -804,8 +805,7 @@
 				n = __ctrblk_init(ctrptr, nbytes);
 			else
 				n = AES_BLOCK_SIZE;
-			ret = crypt_s390_kmctr(func, sctx->key, out, in,
-					       n, ctrptr);
+			ret = cpacf_kmctr(func, sctx->key, out, in, n, ctrptr);
 			if (ret < 0 || ret != n) {
 				if (ctrptr == ctrblk)
 					spin_unlock(&ctrblk_lock);
@@ -837,8 +837,8 @@
 	if (nbytes) {
 		out = walk->dst.virt.addr;
 		in = walk->src.virt.addr;
-		ret = crypt_s390_kmctr(func, sctx->key, buf, in,
-				       AES_BLOCK_SIZE, ctrbuf);
+		ret = cpacf_kmctr(func, sctx->key, buf, in,
+				  AES_BLOCK_SIZE, ctrbuf);
 		if (ret < 0 || ret != AES_BLOCK_SIZE)
 			return -EIO;
 		memcpy(out, buf, nbytes);
@@ -875,7 +875,7 @@
 static struct crypto_alg ctr_aes_alg = {
 	.cra_name		=	"ctr(aes)",
 	.cra_driver_name	=	"ctr-aes-s390",
-	.cra_priority		=	CRYPT_S390_COMPOSITE_PRIORITY,
+	.cra_priority		=	400,	/* combo: aes + ctr */
 	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
 	.cra_blocksize		=	1,
 	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
@@ -899,11 +899,11 @@
 {
 	int ret;
 
-	if (crypt_s390_func_available(KM_AES_128_ENCRYPT, CRYPT_S390_MSA))
+	if (cpacf_query(CPACF_KM, CPACF_KM_AES_128_ENC))
 		keylen_flag |= AES_KEYLEN_128;
-	if (crypt_s390_func_available(KM_AES_192_ENCRYPT, CRYPT_S390_MSA))
+	if (cpacf_query(CPACF_KM, CPACF_KM_AES_192_ENC))
 		keylen_flag |= AES_KEYLEN_192;
-	if (crypt_s390_func_available(KM_AES_256_ENCRYPT, CRYPT_S390_MSA))
+	if (cpacf_query(CPACF_KM, CPACF_KM_AES_256_ENC))
 		keylen_flag |= AES_KEYLEN_256;
 
 	if (!keylen_flag)
@@ -926,22 +926,17 @@
 	if (ret)
 		goto cbc_aes_err;
 
-	if (crypt_s390_func_available(KM_XTS_128_ENCRYPT,
-			CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
-	    crypt_s390_func_available(KM_XTS_256_ENCRYPT,
-			CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
+	if (cpacf_query(CPACF_KM, CPACF_KM_XTS_128_ENC) &&
+	    cpacf_query(CPACF_KM, CPACF_KM_XTS_256_ENC)) {
 		ret = crypto_register_alg(&xts_aes_alg);
 		if (ret)
 			goto xts_aes_err;
 		xts_aes_alg_reg = 1;
 	}
 
-	if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT,
-				CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
-	    crypt_s390_func_available(KMCTR_AES_192_ENCRYPT,
-				CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
-	    crypt_s390_func_available(KMCTR_AES_256_ENCRYPT,
-				CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
+	if (cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_128_ENC) &&
+	    cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_192_ENC) &&
+	    cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_256_ENC)) {
 		ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
 		if (!ctrblk) {
 			ret = -ENOMEM;
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h
deleted file mode 100644
index d9c4c31..0000000
--- a/arch/s390/crypto/crypt_s390.h
+++ /dev/null
@@ -1,493 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Support for s390 cryptographic instructions.
- *
- *   Copyright IBM Corp. 2003, 2015
- *   Author(s): Thomas Spatzier
- *		Jan Glauber (jan.glauber@de.ibm.com)
- *		Harald Freudenberger (freude@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-#ifndef _CRYPTO_ARCH_S390_CRYPT_S390_H
-#define _CRYPTO_ARCH_S390_CRYPT_S390_H
-
-#include <asm/errno.h>
-#include <asm/facility.h>
-
-#define CRYPT_S390_OP_MASK 0xFF00
-#define CRYPT_S390_FUNC_MASK 0x00FF
-
-#define CRYPT_S390_PRIORITY 300
-#define CRYPT_S390_COMPOSITE_PRIORITY 400
-
-#define CRYPT_S390_MSA	0x1
-#define CRYPT_S390_MSA3	0x2
-#define CRYPT_S390_MSA4	0x4
-#define CRYPT_S390_MSA5	0x8
-
-/* s390 cryptographic operations */
-enum crypt_s390_operations {
-	CRYPT_S390_KM	 = 0x0100,
-	CRYPT_S390_KMC	 = 0x0200,
-	CRYPT_S390_KIMD  = 0x0300,
-	CRYPT_S390_KLMD  = 0x0400,
-	CRYPT_S390_KMAC  = 0x0500,
-	CRYPT_S390_KMCTR = 0x0600,
-	CRYPT_S390_PPNO  = 0x0700
-};
-
-/*
- * function codes for KM (CIPHER MESSAGE) instruction
- * 0x80 is the decipher modifier bit
- */
-enum crypt_s390_km_func {
-	KM_QUERY	    = CRYPT_S390_KM | 0x0,
-	KM_DEA_ENCRYPT      = CRYPT_S390_KM | 0x1,
-	KM_DEA_DECRYPT      = CRYPT_S390_KM | 0x1 | 0x80,
-	KM_TDEA_128_ENCRYPT = CRYPT_S390_KM | 0x2,
-	KM_TDEA_128_DECRYPT = CRYPT_S390_KM | 0x2 | 0x80,
-	KM_TDEA_192_ENCRYPT = CRYPT_S390_KM | 0x3,
-	KM_TDEA_192_DECRYPT = CRYPT_S390_KM | 0x3 | 0x80,
-	KM_AES_128_ENCRYPT  = CRYPT_S390_KM | 0x12,
-	KM_AES_128_DECRYPT  = CRYPT_S390_KM | 0x12 | 0x80,
-	KM_AES_192_ENCRYPT  = CRYPT_S390_KM | 0x13,
-	KM_AES_192_DECRYPT  = CRYPT_S390_KM | 0x13 | 0x80,
-	KM_AES_256_ENCRYPT  = CRYPT_S390_KM | 0x14,
-	KM_AES_256_DECRYPT  = CRYPT_S390_KM | 0x14 | 0x80,
-	KM_XTS_128_ENCRYPT  = CRYPT_S390_KM | 0x32,
-	KM_XTS_128_DECRYPT  = CRYPT_S390_KM | 0x32 | 0x80,
-	KM_XTS_256_ENCRYPT  = CRYPT_S390_KM | 0x34,
-	KM_XTS_256_DECRYPT  = CRYPT_S390_KM | 0x34 | 0x80,
-};
-
-/*
- * function codes for KMC (CIPHER MESSAGE WITH CHAINING)
- * instruction
- */
-enum crypt_s390_kmc_func {
-	KMC_QUERY            = CRYPT_S390_KMC | 0x0,
-	KMC_DEA_ENCRYPT      = CRYPT_S390_KMC | 0x1,
-	KMC_DEA_DECRYPT      = CRYPT_S390_KMC | 0x1 | 0x80,
-	KMC_TDEA_128_ENCRYPT = CRYPT_S390_KMC | 0x2,
-	KMC_TDEA_128_DECRYPT = CRYPT_S390_KMC | 0x2 | 0x80,
-	KMC_TDEA_192_ENCRYPT = CRYPT_S390_KMC | 0x3,
-	KMC_TDEA_192_DECRYPT = CRYPT_S390_KMC | 0x3 | 0x80,
-	KMC_AES_128_ENCRYPT  = CRYPT_S390_KMC | 0x12,
-	KMC_AES_128_DECRYPT  = CRYPT_S390_KMC | 0x12 | 0x80,
-	KMC_AES_192_ENCRYPT  = CRYPT_S390_KMC | 0x13,
-	KMC_AES_192_DECRYPT  = CRYPT_S390_KMC | 0x13 | 0x80,
-	KMC_AES_256_ENCRYPT  = CRYPT_S390_KMC | 0x14,
-	KMC_AES_256_DECRYPT  = CRYPT_S390_KMC | 0x14 | 0x80,
-	KMC_PRNG	     = CRYPT_S390_KMC | 0x43,
-};
-
-/*
- * function codes for KMCTR (CIPHER MESSAGE WITH COUNTER)
- * instruction
- */
-enum crypt_s390_kmctr_func {
-	KMCTR_QUERY            = CRYPT_S390_KMCTR | 0x0,
-	KMCTR_DEA_ENCRYPT      = CRYPT_S390_KMCTR | 0x1,
-	KMCTR_DEA_DECRYPT      = CRYPT_S390_KMCTR | 0x1 | 0x80,
-	KMCTR_TDEA_128_ENCRYPT = CRYPT_S390_KMCTR | 0x2,
-	KMCTR_TDEA_128_DECRYPT = CRYPT_S390_KMCTR | 0x2 | 0x80,
-	KMCTR_TDEA_192_ENCRYPT = CRYPT_S390_KMCTR | 0x3,
-	KMCTR_TDEA_192_DECRYPT = CRYPT_S390_KMCTR | 0x3 | 0x80,
-	KMCTR_AES_128_ENCRYPT  = CRYPT_S390_KMCTR | 0x12,
-	KMCTR_AES_128_DECRYPT  = CRYPT_S390_KMCTR | 0x12 | 0x80,
-	KMCTR_AES_192_ENCRYPT  = CRYPT_S390_KMCTR | 0x13,
-	KMCTR_AES_192_DECRYPT  = CRYPT_S390_KMCTR | 0x13 | 0x80,
-	KMCTR_AES_256_ENCRYPT  = CRYPT_S390_KMCTR | 0x14,
-	KMCTR_AES_256_DECRYPT  = CRYPT_S390_KMCTR | 0x14 | 0x80,
-};
-
-/*
- * function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
- * instruction
- */
-enum crypt_s390_kimd_func {
-	KIMD_QUERY   = CRYPT_S390_KIMD | 0,
-	KIMD_SHA_1   = CRYPT_S390_KIMD | 1,
-	KIMD_SHA_256 = CRYPT_S390_KIMD | 2,
-	KIMD_SHA_512 = CRYPT_S390_KIMD | 3,
-	KIMD_GHASH   = CRYPT_S390_KIMD | 65,
-};
-
-/*
- * function codes for KLMD (COMPUTE LAST MESSAGE DIGEST)
- * instruction
- */
-enum crypt_s390_klmd_func {
-	KLMD_QUERY   = CRYPT_S390_KLMD | 0,
-	KLMD_SHA_1   = CRYPT_S390_KLMD | 1,
-	KLMD_SHA_256 = CRYPT_S390_KLMD | 2,
-	KLMD_SHA_512 = CRYPT_S390_KLMD | 3,
-};
-
-/*
- * function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
- * instruction
- */
-enum crypt_s390_kmac_func {
-	KMAC_QUERY    = CRYPT_S390_KMAC | 0,
-	KMAC_DEA      = CRYPT_S390_KMAC | 1,
-	KMAC_TDEA_128 = CRYPT_S390_KMAC | 2,
-	KMAC_TDEA_192 = CRYPT_S390_KMAC | 3
-};
-
-/*
- * function codes for PPNO (PERFORM PSEUDORANDOM NUMBER
- * OPERATION) instruction
- */
-enum crypt_s390_ppno_func {
-	PPNO_QUERY	      = CRYPT_S390_PPNO | 0,
-	PPNO_SHA512_DRNG_GEN  = CRYPT_S390_PPNO | 3,
-	PPNO_SHA512_DRNG_SEED = CRYPT_S390_PPNO | 0x83
-};
-
-/**
- * crypt_s390_km:
- * @func: the function code passed to KM; see crypt_s390_km_func
- * @param: address of parameter block; see POP for details on each func
- * @dest: address of destination memory area
- * @src: address of source memory area
- * @src_len: length of src operand in bytes
- *
- * Executes the KM (CIPHER MESSAGE) operation of the CPU.
- *
- * Returns -1 for failure, 0 for the query func, number of processed
- * bytes for encryption/decryption funcs
- */
-static inline int crypt_s390_km(long func, void *param,
-				u8 *dest, const u8 *src, long src_len)
-{
-	register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
-	register void *__param asm("1") = param;
-	register const u8 *__src asm("2") = src;
-	register long __src_len asm("3") = src_len;
-	register u8 *__dest asm("4") = dest;
-	int ret;
-
-	asm volatile(
-		"0:	.insn	rre,0xb92e0000,%3,%1\n" /* KM opcode */
-		"1:	brc	1,0b\n" /* handle partial completion */
-		"	la	%0,0\n"
-		"2:\n"
-		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
-		: "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
-		: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
-	if (ret < 0)
-		return ret;
-	return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
-}
-
-/**
- * crypt_s390_kmc:
- * @func: the function code passed to KM; see crypt_s390_kmc_func
- * @param: address of parameter block; see POP for details on each func
- * @dest: address of destination memory area
- * @src: address of source memory area
- * @src_len: length of src operand in bytes
- *
- * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the CPU.
- *
- * Returns -1 for failure, 0 for the query func, number of processed
- * bytes for encryption/decryption funcs
- */
-static inline int crypt_s390_kmc(long func, void *param,
-				 u8 *dest, const u8 *src, long src_len)
-{
-	register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
-	register void *__param asm("1") = param;
-	register const u8 *__src asm("2") = src;
-	register long __src_len asm("3") = src_len;
-	register u8 *__dest asm("4") = dest;
-	int ret;
-
-	asm volatile(
-		"0:	.insn	rre,0xb92f0000,%3,%1\n" /* KMC opcode */
-		"1:	brc	1,0b\n" /* handle partial completion */
-		"	la	%0,0\n"
-		"2:\n"
-		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
-		: "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
-		: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
-	if (ret < 0)
-		return ret;
-	return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
-}
-
-/**
- * crypt_s390_kimd:
- * @func: the function code passed to KM; see crypt_s390_kimd_func
- * @param: address of parameter block; see POP for details on each func
- * @src: address of source memory area
- * @src_len: length of src operand in bytes
- *
- * Executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) operation
- * of the CPU.
- *
- * Returns -1 for failure, 0 for the query func, number of processed
- * bytes for digest funcs
- */
-static inline int crypt_s390_kimd(long func, void *param,
-				  const u8 *src, long src_len)
-{
-	register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
-	register void *__param asm("1") = param;
-	register const u8 *__src asm("2") = src;
-	register long __src_len asm("3") = src_len;
-	int ret;
-
-	asm volatile(
-		"0:	.insn	rre,0xb93e0000,%1,%1\n" /* KIMD opcode */
-		"1:	brc	1,0b\n" /* handle partial completion */
-		"	la	%0,0\n"
-		"2:\n"
-		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
-		: "=d" (ret), "+a" (__src), "+d" (__src_len)
-		: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
-	if (ret < 0)
-		return ret;
-	return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
-}
-
-/**
- * crypt_s390_klmd:
- * @func: the function code passed to KM; see crypt_s390_klmd_func
- * @param: address of parameter block; see POP for details on each func
- * @src: address of source memory area
- * @src_len: length of src operand in bytes
- *
- * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the CPU.
- *
- * Returns -1 for failure, 0 for the query func, number of processed
- * bytes for digest funcs
- */
-static inline int crypt_s390_klmd(long func, void *param,
-				  const u8 *src, long src_len)
-{
-	register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
-	register void *__param asm("1") = param;
-	register const u8 *__src asm("2") = src;
-	register long __src_len asm("3") = src_len;
-	int ret;
-
-	asm volatile(
-		"0:	.insn	rre,0xb93f0000,%1,%1\n" /* KLMD opcode */
-		"1:	brc	1,0b\n" /* handle partial completion */
-		"	la	%0,0\n"
-		"2:\n"
-		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
-		: "=d" (ret), "+a" (__src), "+d" (__src_len)
-		: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
-	if (ret < 0)
-		return ret;
-	return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
-}
-
-/**
- * crypt_s390_kmac:
- * @func: the function code passed to KM; see crypt_s390_klmd_func
- * @param: address of parameter block; see POP for details on each func
- * @src: address of source memory area
- * @src_len: length of src operand in bytes
- *
- * Executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) operation
- * of the CPU.
- *
- * Returns -1 for failure, 0 for the query func, number of processed
- * bytes for digest funcs
- */
-static inline int crypt_s390_kmac(long func, void *param,
-				  const u8 *src, long src_len)
-{
-	register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
-	register void *__param asm("1") = param;
-	register const u8 *__src asm("2") = src;
-	register long __src_len asm("3") = src_len;
-	int ret;
-
-	asm volatile(
-		"0:	.insn	rre,0xb91e0000,%1,%1\n" /* KLAC opcode */
-		"1:	brc	1,0b\n" /* handle partial completion */
-		"	la	%0,0\n"
-		"2:\n"
-		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
-		: "=d" (ret), "+a" (__src), "+d" (__src_len)
-		: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
-	if (ret < 0)
-		return ret;
-	return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
-}
-
-/**
- * crypt_s390_kmctr:
- * @func: the function code passed to KMCTR; see crypt_s390_kmctr_func
- * @param: address of parameter block; see POP for details on each func
- * @dest: address of destination memory area
- * @src: address of source memory area
- * @src_len: length of src operand in bytes
- * @counter: address of counter value
- *
- * Executes the KMCTR (CIPHER MESSAGE WITH COUNTER) operation of the CPU.
- *
- * Returns -1 for failure, 0 for the query func, number of processed
- * bytes for encryption/decryption funcs
- */
-static inline int crypt_s390_kmctr(long func, void *param, u8 *dest,
-				 const u8 *src, long src_len, u8 *counter)
-{
-	register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
-	register void *__param asm("1") = param;
-	register const u8 *__src asm("2") = src;
-	register long __src_len asm("3") = src_len;
-	register u8 *__dest asm("4") = dest;
-	register u8 *__ctr asm("6") = counter;
-	int ret = -1;
-
-	asm volatile(
-		"0:	.insn	rrf,0xb92d0000,%3,%1,%4,0\n" /* KMCTR opcode */
-		"1:	brc	1,0b\n" /* handle partial completion */
-		"	la	%0,0\n"
-		"2:\n"
-		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
-		: "+d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest),
-		  "+a" (__ctr)
-		: "d" (__func), "a" (__param) : "cc", "memory");
-	if (ret < 0)
-		return ret;
-	return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
-}
-
-/**
- * crypt_s390_ppno:
- * @func: the function code passed to PPNO; see crypt_s390_ppno_func
- * @param: address of parameter block; see POP for details on each func
- * @dest: address of destination memory area
- * @dest_len: size of destination memory area in bytes
- * @seed: address of seed data
- * @seed_len: size of seed data in bytes
- *
- * Executes the PPNO (PERFORM PSEUDORANDOM NUMBER OPERATION)
- * operation of the CPU.
- *
- * Returns -1 for failure, 0 for the query func, number of random
- * bytes stored in dest buffer for generate function
- */
-static inline int crypt_s390_ppno(long func, void *param,
-				  u8 *dest, long dest_len,
-				  const u8 *seed, long seed_len)
-{
-	register long  __func	  asm("0") = func & CRYPT_S390_FUNC_MASK;
-	register void *__param	  asm("1") = param;    /* param block (240 bytes) */
-	register u8   *__dest	  asm("2") = dest;     /* buf for recv random bytes */
-	register long  __dest_len asm("3") = dest_len; /* requested random bytes */
-	register const u8 *__seed asm("4") = seed;     /* buf with seed data */
-	register long  __seed_len asm("5") = seed_len; /* bytes in seed buf */
-	int ret = -1;
-
-	asm volatile (
-		"0:	.insn	rre,0xb93c0000,%1,%5\n"	/* PPNO opcode */
-		"1:	brc	1,0b\n"	  /* handle partial completion */
-		"	la	%0,0\n"
-		"2:\n"
-		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
-		: "+d" (ret), "+a"(__dest), "+d"(__dest_len)
-		: "d"(__func), "a"(__param), "a"(__seed), "d"(__seed_len)
-		: "cc", "memory");
-	if (ret < 0)
-		return ret;
-	return (func & CRYPT_S390_FUNC_MASK) ? dest_len - __dest_len : 0;
-}
-
-/**
- * crypt_s390_func_available:
- * @func: the function code of the specific function; 0 if op in general
- *
- * Tests if a specific crypto function is implemented on the machine.
- *
- * Returns 1 if func available; 0 if func or op in general not available
- */
-static inline int crypt_s390_func_available(int func,
-					    unsigned int facility_mask)
-{
-	unsigned char status[16];
-	int ret;
-
-	if (facility_mask & CRYPT_S390_MSA && !test_facility(17))
-		return 0;
-	if (facility_mask & CRYPT_S390_MSA3 && !test_facility(76))
-		return 0;
-	if (facility_mask & CRYPT_S390_MSA4 && !test_facility(77))
-		return 0;
-	if (facility_mask & CRYPT_S390_MSA5 && !test_facility(57))
-		return 0;
-
-	switch (func & CRYPT_S390_OP_MASK) {
-	case CRYPT_S390_KM:
-		ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
-		break;
-	case CRYPT_S390_KMC:
-		ret = crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0);
-		break;
-	case CRYPT_S390_KIMD:
-		ret = crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0);
-		break;
-	case CRYPT_S390_KLMD:
-		ret = crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0);
-		break;
-	case CRYPT_S390_KMAC:
-		ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
-		break;
-	case CRYPT_S390_KMCTR:
-		ret = crypt_s390_kmctr(KMCTR_QUERY, &status,
-				       NULL, NULL, 0, NULL);
-		break;
-	case CRYPT_S390_PPNO:
-		ret = crypt_s390_ppno(PPNO_QUERY, &status,
-				      NULL, 0, NULL, 0);
-		break;
-	default:
-		return 0;
-	}
-	if (ret < 0)
-		return 0;
-	func &= CRYPT_S390_FUNC_MASK;
-	func &= 0x7f;		/* mask modifier bit */
-	return (status[func >> 3] & (0x80 >> (func & 7))) != 0;
-}
-
-/**
- * crypt_s390_pcc:
- * @func: the function code passed to KM; see crypt_s390_km_func
- * @param: address of parameter block; see POP for details on each func
- *
- * Executes the PCC (PERFORM CRYPTOGRAPHIC COMPUTATION) operation of the CPU.
- *
- * Returns -1 for failure, 0 for success.
- */
-static inline int crypt_s390_pcc(long func, void *param)
-{
-	register long __func asm("0") = func & 0x7f; /* encrypt or decrypt */
-	register void *__param asm("1") = param;
-	int ret = -1;
-
-	asm volatile(
-		"0:	.insn	rre,0xb92c0000,0,0\n" /* PCC opcode */
-		"1:	brc	1,0b\n" /* handle partial completion */
-		"	la	%0,0\n"
-		"2:\n"
-		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
-		: "+d" (ret)
-		: "d" (__func), "a" (__param) : "cc", "memory");
-	return ret;
-}
-
-#endif	/* _CRYPTO_ARCH_S390_CRYPT_S390_H */
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index fba1c10..697e71a 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -20,8 +20,7 @@
 #include <linux/crypto.h>
 #include <crypto/algapi.h>
 #include <crypto/des.h>
-
-#include "crypt_s390.h"
+#include <asm/cpacf.h>
 
 #define DES3_KEY_SIZE	(3 * DES_KEY_SIZE)
 
@@ -54,20 +53,20 @@
 {
 	struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
 
-	crypt_s390_km(KM_DEA_ENCRYPT, ctx->key, out, in, DES_BLOCK_SIZE);
+	cpacf_km(CPACF_KM_DEA_ENC, ctx->key, out, in, DES_BLOCK_SIZE);
 }
 
 static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 {
 	struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
 
-	crypt_s390_km(KM_DEA_DECRYPT, ctx->key, out, in, DES_BLOCK_SIZE);
+	cpacf_km(CPACF_KM_DEA_DEC, ctx->key, out, in, DES_BLOCK_SIZE);
 }
 
 static struct crypto_alg des_alg = {
 	.cra_name		=	"des",
 	.cra_driver_name	=	"des-s390",
-	.cra_priority		=	CRYPT_S390_PRIORITY,
+	.cra_priority		=	300,
 	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
 	.cra_blocksize		=	DES_BLOCK_SIZE,
 	.cra_ctxsize		=	sizeof(struct s390_des_ctx),
@@ -95,7 +94,7 @@
 		u8 *out = walk->dst.virt.addr;
 		u8 *in = walk->src.virt.addr;
 
-		ret = crypt_s390_km(func, key, out, in, n);
+		ret = cpacf_km(func, key, out, in, n);
 		if (ret < 0 || ret != n)
 			return -EIO;
 
@@ -128,7 +127,7 @@
 		u8 *out = walk->dst.virt.addr;
 		u8 *in = walk->src.virt.addr;
 
-		ret = crypt_s390_kmc(func, &param, out, in, n);
+		ret = cpacf_kmc(func, &param, out, in, n);
 		if (ret < 0 || ret != n)
 			return -EIO;
 
@@ -149,7 +148,7 @@
 	struct blkcipher_walk walk;
 
 	blkcipher_walk_init(&walk, dst, src, nbytes);
-	return ecb_desall_crypt(desc, KM_DEA_ENCRYPT, ctx->key, &walk);
+	return ecb_desall_crypt(desc, CPACF_KM_DEA_ENC, ctx->key, &walk);
 }
 
 static int ecb_des_decrypt(struct blkcipher_desc *desc,
@@ -160,13 +159,13 @@
 	struct blkcipher_walk walk;
 
 	blkcipher_walk_init(&walk, dst, src, nbytes);
-	return ecb_desall_crypt(desc, KM_DEA_DECRYPT, ctx->key, &walk);
+	return ecb_desall_crypt(desc, CPACF_KM_DEA_DEC, ctx->key, &walk);
 }
 
 static struct crypto_alg ecb_des_alg = {
 	.cra_name		=	"ecb(des)",
 	.cra_driver_name	=	"ecb-des-s390",
-	.cra_priority		=	CRYPT_S390_COMPOSITE_PRIORITY,
+	.cra_priority		=	400,	/* combo: des + ecb */
 	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
 	.cra_blocksize		=	DES_BLOCK_SIZE,
 	.cra_ctxsize		=	sizeof(struct s390_des_ctx),
@@ -190,7 +189,7 @@
 	struct blkcipher_walk walk;
 
 	blkcipher_walk_init(&walk, dst, src, nbytes);
-	return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk);
+	return cbc_desall_crypt(desc, CPACF_KMC_DEA_ENC, &walk);
 }
 
 static int cbc_des_decrypt(struct blkcipher_desc *desc,
@@ -200,13 +199,13 @@
 	struct blkcipher_walk walk;
 
 	blkcipher_walk_init(&walk, dst, src, nbytes);
-	return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk);
+	return cbc_desall_crypt(desc, CPACF_KMC_DEA_DEC, &walk);
 }
 
 static struct crypto_alg cbc_des_alg = {
 	.cra_name		=	"cbc(des)",
 	.cra_driver_name	=	"cbc-des-s390",
-	.cra_priority		=	CRYPT_S390_COMPOSITE_PRIORITY,
+	.cra_priority		=	400,	/* combo: des + cbc */
 	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
 	.cra_blocksize		=	DES_BLOCK_SIZE,
 	.cra_ctxsize		=	sizeof(struct s390_des_ctx),
@@ -258,20 +257,20 @@
 {
 	struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
 
-	crypt_s390_km(KM_TDEA_192_ENCRYPT, ctx->key, dst, src, DES_BLOCK_SIZE);
+	cpacf_km(CPACF_KM_TDEA_192_ENC, ctx->key, dst, src, DES_BLOCK_SIZE);
 }
 
 static void des3_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 {
 	struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
 
-	crypt_s390_km(KM_TDEA_192_DECRYPT, ctx->key, dst, src, DES_BLOCK_SIZE);
+	cpacf_km(CPACF_KM_TDEA_192_DEC, ctx->key, dst, src, DES_BLOCK_SIZE);
 }
 
 static struct crypto_alg des3_alg = {
 	.cra_name		=	"des3_ede",
 	.cra_driver_name	=	"des3_ede-s390",
-	.cra_priority		=	CRYPT_S390_PRIORITY,
+	.cra_priority		=	300,
 	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
 	.cra_blocksize		=	DES_BLOCK_SIZE,
 	.cra_ctxsize		=	sizeof(struct s390_des_ctx),
@@ -295,7 +294,7 @@
 	struct blkcipher_walk walk;
 
 	blkcipher_walk_init(&walk, dst, src, nbytes);
-	return ecb_desall_crypt(desc, KM_TDEA_192_ENCRYPT, ctx->key, &walk);
+	return ecb_desall_crypt(desc, CPACF_KM_TDEA_192_ENC, ctx->key, &walk);
 }
 
 static int ecb_des3_decrypt(struct blkcipher_desc *desc,
@@ -306,13 +305,13 @@
 	struct blkcipher_walk walk;
 
 	blkcipher_walk_init(&walk, dst, src, nbytes);
-	return ecb_desall_crypt(desc, KM_TDEA_192_DECRYPT, ctx->key, &walk);
+	return ecb_desall_crypt(desc, CPACF_KM_TDEA_192_DEC, ctx->key, &walk);
 }
 
 static struct crypto_alg ecb_des3_alg = {
 	.cra_name		=	"ecb(des3_ede)",
 	.cra_driver_name	=	"ecb-des3_ede-s390",
-	.cra_priority		=	CRYPT_S390_COMPOSITE_PRIORITY,
+	.cra_priority		=	400,	/* combo: des3 + ecb */
 	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
 	.cra_blocksize		=	DES_BLOCK_SIZE,
 	.cra_ctxsize		=	sizeof(struct s390_des_ctx),
@@ -336,7 +335,7 @@
 	struct blkcipher_walk walk;
 
 	blkcipher_walk_init(&walk, dst, src, nbytes);
-	return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk);
+	return cbc_desall_crypt(desc, CPACF_KMC_TDEA_192_ENC, &walk);
 }
 
 static int cbc_des3_decrypt(struct blkcipher_desc *desc,
@@ -346,13 +345,13 @@
 	struct blkcipher_walk walk;
 
 	blkcipher_walk_init(&walk, dst, src, nbytes);
-	return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk);
+	return cbc_desall_crypt(desc, CPACF_KMC_TDEA_192_DEC, &walk);
 }
 
 static struct crypto_alg cbc_des3_alg = {
 	.cra_name		=	"cbc(des3_ede)",
 	.cra_driver_name	=	"cbc-des3_ede-s390",
-	.cra_priority		=	CRYPT_S390_COMPOSITE_PRIORITY,
+	.cra_priority		=	400,	/* combo: des3 + cbc */
 	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
 	.cra_blocksize		=	DES_BLOCK_SIZE,
 	.cra_ctxsize		=	sizeof(struct s390_des_ctx),
@@ -407,8 +406,7 @@
 				n = __ctrblk_init(ctrptr, nbytes);
 			else
 				n = DES_BLOCK_SIZE;
-			ret = crypt_s390_kmctr(func, ctx->key, out, in,
-					       n, ctrptr);
+			ret = cpacf_kmctr(func, ctx->key, out, in, n, ctrptr);
 			if (ret < 0 || ret != n) {
 				if (ctrptr == ctrblk)
 					spin_unlock(&ctrblk_lock);
@@ -438,8 +436,8 @@
 	if (nbytes) {
 		out = walk->dst.virt.addr;
 		in = walk->src.virt.addr;
-		ret = crypt_s390_kmctr(func, ctx->key, buf, in,
-				       DES_BLOCK_SIZE, ctrbuf);
+		ret = cpacf_kmctr(func, ctx->key, buf, in,
+				  DES_BLOCK_SIZE, ctrbuf);
 		if (ret < 0 || ret != DES_BLOCK_SIZE)
 			return -EIO;
 		memcpy(out, buf, nbytes);
@@ -458,7 +456,7 @@
 	struct blkcipher_walk walk;
 
 	blkcipher_walk_init(&walk, dst, src, nbytes);
-	return ctr_desall_crypt(desc, KMCTR_DEA_ENCRYPT, ctx, &walk);
+	return ctr_desall_crypt(desc, CPACF_KMCTR_DEA_ENC, ctx, &walk);
 }
 
 static int ctr_des_decrypt(struct blkcipher_desc *desc,
@@ -469,13 +467,13 @@
 	struct blkcipher_walk walk;
 
 	blkcipher_walk_init(&walk, dst, src, nbytes);
-	return ctr_desall_crypt(desc, KMCTR_DEA_DECRYPT, ctx, &walk);
+	return ctr_desall_crypt(desc, CPACF_KMCTR_DEA_DEC, ctx, &walk);
 }
 
 static struct crypto_alg ctr_des_alg = {
 	.cra_name		=	"ctr(des)",
 	.cra_driver_name	=	"ctr-des-s390",
-	.cra_priority		=	CRYPT_S390_COMPOSITE_PRIORITY,
+	.cra_priority		=	400,	/* combo: des + ctr */
 	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
 	.cra_blocksize		=	1,
 	.cra_ctxsize		=	sizeof(struct s390_des_ctx),
@@ -501,7 +499,7 @@
 	struct blkcipher_walk walk;
 
 	blkcipher_walk_init(&walk, dst, src, nbytes);
-	return ctr_desall_crypt(desc, KMCTR_TDEA_192_ENCRYPT, ctx, &walk);
+	return ctr_desall_crypt(desc, CPACF_KMCTR_TDEA_192_ENC, ctx, &walk);
 }
 
 static int ctr_des3_decrypt(struct blkcipher_desc *desc,
@@ -512,13 +510,13 @@
 	struct blkcipher_walk walk;
 
 	blkcipher_walk_init(&walk, dst, src, nbytes);
-	return ctr_desall_crypt(desc, KMCTR_TDEA_192_DECRYPT, ctx, &walk);
+	return ctr_desall_crypt(desc, CPACF_KMCTR_TDEA_192_DEC, ctx, &walk);
 }
 
 static struct crypto_alg ctr_des3_alg = {
 	.cra_name		=	"ctr(des3_ede)",
 	.cra_driver_name	=	"ctr-des3_ede-s390",
-	.cra_priority		=	CRYPT_S390_COMPOSITE_PRIORITY,
+	.cra_priority		=	400,	/* combo: des3 + ede */
 	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
 	.cra_blocksize		=	1,
 	.cra_ctxsize		=	sizeof(struct s390_des_ctx),
@@ -540,8 +538,8 @@
 {
 	int ret;
 
-	if (!crypt_s390_func_available(KM_DEA_ENCRYPT, CRYPT_S390_MSA) ||
-	    !crypt_s390_func_available(KM_TDEA_192_ENCRYPT, CRYPT_S390_MSA))
+	if (!cpacf_query(CPACF_KM, CPACF_KM_DEA_ENC) ||
+	    !cpacf_query(CPACF_KM, CPACF_KM_TDEA_192_ENC))
 		return -EOPNOTSUPP;
 
 	ret = crypto_register_alg(&des_alg);
@@ -563,10 +561,8 @@
 	if (ret)
 		goto cbc_des3_err;
 
-	if (crypt_s390_func_available(KMCTR_DEA_ENCRYPT,
-			CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
-	    crypt_s390_func_available(KMCTR_TDEA_192_ENCRYPT,
-			CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
+	if (cpacf_query(CPACF_KMCTR, CPACF_KMCTR_DEA_ENC) &&
+	    cpacf_query(CPACF_KMCTR, CPACF_KMCTR_TDEA_192_ENC)) {
 		ret = crypto_register_alg(&ctr_des_alg);
 		if (ret)
 			goto ctr_des_err;
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
index 26e14ef..ab68de7 100644
--- a/arch/s390/crypto/ghash_s390.c
+++ b/arch/s390/crypto/ghash_s390.c
@@ -10,8 +10,7 @@
 #include <crypto/internal/hash.h>
 #include <linux/module.h>
 #include <linux/cpufeature.h>
-
-#include "crypt_s390.h"
+#include <asm/cpacf.h>
 
 #define GHASH_BLOCK_SIZE	16
 #define GHASH_DIGEST_SIZE	16
@@ -72,8 +71,8 @@
 		src += n;
 
 		if (!dctx->bytes) {
-			ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf,
-					      GHASH_BLOCK_SIZE);
+			ret = cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf,
+					 GHASH_BLOCK_SIZE);
 			if (ret != GHASH_BLOCK_SIZE)
 				return -EIO;
 		}
@@ -81,7 +80,7 @@
 
 	n = srclen & ~(GHASH_BLOCK_SIZE - 1);
 	if (n) {
-		ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n);
+		ret = cpacf_kimd(CPACF_KIMD_GHASH, dctx, src, n);
 		if (ret != n)
 			return -EIO;
 		src += n;
@@ -106,7 +105,7 @@
 
 		memset(pos, 0, dctx->bytes);
 
-		ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
+		ret = cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
 		if (ret != GHASH_BLOCK_SIZE)
 			return -EIO;
 
@@ -137,7 +136,7 @@
 	.base		= {
 		.cra_name		= "ghash",
 		.cra_driver_name	= "ghash-s390",
-		.cra_priority		= CRYPT_S390_PRIORITY,
+		.cra_priority		= 300,
 		.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize		= GHASH_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct ghash_ctx),
@@ -147,8 +146,7 @@
 
 static int __init ghash_mod_init(void)
 {
-	if (!crypt_s390_func_available(KIMD_GHASH,
-				       CRYPT_S390_MSA | CRYPT_S390_MSA4))
+	if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_GHASH))
 		return -EOPNOTSUPP;
 
 	return crypto_register_shash(&ghash_alg);
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index b8045b9..41527b1 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -23,8 +23,7 @@
 #include <asm/debug.h>
 #include <asm/uaccess.h>
 #include <asm/timex.h>
-
-#include "crypt_s390.h"
+#include <asm/cpacf.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("IBM Corporation");
@@ -136,8 +135,8 @@
 		else
 			h = ebuf;
 		/* generate sha256 from this page */
-		if (crypt_s390_kimd(KIMD_SHA_256, h,
-				    pg, PAGE_SIZE) != PAGE_SIZE) {
+		if (cpacf_kimd(CPACF_KIMD_SHA_256, h,
+			       pg, PAGE_SIZE) != PAGE_SIZE) {
 			prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
 			ret = -EIO;
 			goto out;
@@ -164,9 +163,9 @@
 	int ret;
 
 	for (i = 0; i < 16; i++) {
-		ret = crypt_s390_kmc(KMC_PRNG, prng_data->prngws.parm_block,
-				     (char *)entropy, (char *)entropy,
-				     sizeof(entropy));
+		ret = cpacf_kmc(CPACF_KMC_PRNG, prng_data->prngws.parm_block,
+				(char *)entropy, (char *)entropy,
+				sizeof(entropy));
 		BUG_ON(ret < 0 || ret != sizeof(entropy));
 		memcpy(prng_data->prngws.parm_block, entropy, sizeof(entropy));
 	}
@@ -311,9 +310,8 @@
 	memset(&ws, 0, sizeof(ws));
 
 	/* initial seed */
-	ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
-			      &ws, NULL, 0,
-			      seed, sizeof(seed));
+	ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED, &ws, NULL, 0,
+			 seed, sizeof(seed));
 	if (ret < 0) {
 		pr_err("The prng self test seed operation for the "
 		       "SHA-512 mode failed with rc=%d\n", ret);
@@ -331,18 +329,16 @@
 	}
 
 	/* generate random bytes */
-	ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
-			      &ws, buf, sizeof(buf),
-			      NULL, 0);
+	ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
+			 &ws, buf, sizeof(buf), NULL, 0);
 	if (ret < 0) {
 		pr_err("The prng self test generate operation for "
 		       "the SHA-512 mode failed with rc=%d\n", ret);
 		prng_errorflag = PRNG_SELFTEST_FAILED;
 		return -EIO;
 	}
-	ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
-			      &ws, buf, sizeof(buf),
-			      NULL, 0);
+	ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
+			 &ws, buf, sizeof(buf), NULL, 0);
 	if (ret < 0) {
 		pr_err("The prng self test generate operation for "
 		       "the SHA-512 mode failed with rc=%d\n", ret);
@@ -396,9 +392,8 @@
 	get_tod_clock_ext(seed + 48);
 
 	/* initial seed of the ppno drng */
-	ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
-			      &prng_data->ppnows, NULL, 0,
-			      seed, sizeof(seed));
+	ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
+			 &prng_data->ppnows, NULL, 0, seed, sizeof(seed));
 	if (ret < 0) {
 		prng_errorflag = PRNG_SEED_FAILED;
 		ret = -EIO;
@@ -409,11 +404,9 @@
 	   bytes for the FIPS 140-2 Conditional Self Test */
 	if (fips_enabled) {
 		prng_data->prev = prng_data->buf + prng_chunk_size;
-		ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
-				      &prng_data->ppnows,
-				      prng_data->prev,
-				      prng_chunk_size,
-				      NULL, 0);
+		ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
+				 &prng_data->ppnows,
+				 prng_data->prev, prng_chunk_size, NULL, 0);
 		if (ret < 0 || ret != prng_chunk_size) {
 			prng_errorflag = PRNG_GEN_FAILED;
 			ret = -EIO;
@@ -447,9 +440,8 @@
 		return ret;
 
 	/* do a reseed of the ppno drng with this bytestring */
-	ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
-			      &prng_data->ppnows, NULL, 0,
-			      seed, sizeof(seed));
+	ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
+			 &prng_data->ppnows, NULL, 0, seed, sizeof(seed));
 	if (ret) {
 		prng_errorflag = PRNG_RESEED_FAILED;
 		return -EIO;
@@ -471,9 +463,8 @@
 	}
 
 	/* PPNO generate */
-	ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
-			      &prng_data->ppnows, buf, nbytes,
-			      NULL, 0);
+	ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
+			 &prng_data->ppnows, buf, nbytes, NULL, 0);
 	if (ret < 0 || ret != nbytes) {
 		prng_errorflag = PRNG_GEN_FAILED;
 		return -EIO;
@@ -555,8 +546,8 @@
 		 * Note: you can still get strict X9.17 conformity by setting
 		 * prng_chunk_size to 8 bytes.
 		*/
-		tmp = crypt_s390_kmc(KMC_PRNG, prng_data->prngws.parm_block,
-				     prng_data->buf, prng_data->buf, n);
+		tmp = cpacf_kmc(CPACF_KMC_PRNG, prng_data->prngws.parm_block,
+				prng_data->buf, prng_data->buf, n);
 		if (tmp < 0 || tmp != n) {
 			ret = -EIO;
 			break;
@@ -669,11 +660,13 @@
 static struct miscdevice prng_sha512_dev = {
 	.name	= "prandom",
 	.minor	= MISC_DYNAMIC_MINOR,
+	.mode	= 0644,
 	.fops	= &prng_sha512_fops,
 };
 static struct miscdevice prng_tdes_dev = {
 	.name	= "prandom",
 	.minor	= MISC_DYNAMIC_MINOR,
+	.mode	= 0644,
 	.fops	= &prng_tdes_fops,
 };
 
@@ -813,14 +806,13 @@
 	int ret;
 
 	/* check if the CPU has a PRNG */
-	if (!crypt_s390_func_available(KMC_PRNG, CRYPT_S390_MSA))
+	if (!cpacf_query(CPACF_KMC, CPACF_KMC_PRNG))
 		return -EOPNOTSUPP;
 
 	/* choose prng mode */
 	if (prng_mode != PRNG_MODE_TDES) {
 		/* check for MSA5 support for PPNO operations */
-		if (!crypt_s390_func_available(PPNO_SHA512_DRNG_GEN,
-					       CRYPT_S390_MSA5)) {
+		if (!cpacf_query(CPACF_PPNO, CPACF_PPNO_SHA512_DRNG_GEN)) {
 			if (prng_mode == PRNG_MODE_SHA512) {
 				pr_err("The prng module cannot "
 				       "start in SHA-512 mode\n");
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index 9208ead..5fbf91b 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -28,8 +28,8 @@
 #include <linux/module.h>
 #include <linux/cpufeature.h>
 #include <crypto/sha.h>
+#include <asm/cpacf.h>
 
-#include "crypt_s390.h"
 #include "sha.h"
 
 static int sha1_init(struct shash_desc *desc)
@@ -42,7 +42,7 @@
 	sctx->state[3] = SHA1_H3;
 	sctx->state[4] = SHA1_H4;
 	sctx->count = 0;
-	sctx->func = KIMD_SHA_1;
+	sctx->func = CPACF_KIMD_SHA_1;
 
 	return 0;
 }
@@ -66,7 +66,7 @@
 	sctx->count = ictx->count;
 	memcpy(sctx->state, ictx->state, sizeof(ictx->state));
 	memcpy(sctx->buf, ictx->buffer, sizeof(ictx->buffer));
-	sctx->func = KIMD_SHA_1;
+	sctx->func = CPACF_KIMD_SHA_1;
 	return 0;
 }
 
@@ -82,7 +82,7 @@
 	.base		=	{
 		.cra_name	=	"sha1",
 		.cra_driver_name=	"sha1-s390",
-		.cra_priority	=	CRYPT_S390_PRIORITY,
+		.cra_priority	=	300,
 		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA1_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
@@ -91,7 +91,7 @@
 
 static int __init sha1_s390_init(void)
 {
-	if (!crypt_s390_func_available(KIMD_SHA_1, CRYPT_S390_MSA))
+	if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_1))
 		return -EOPNOTSUPP;
 	return crypto_register_shash(&alg);
 }
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 667888f..10aac0b 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -18,8 +18,8 @@
 #include <linux/module.h>
 #include <linux/cpufeature.h>
 #include <crypto/sha.h>
+#include <asm/cpacf.h>
 
-#include "crypt_s390.h"
 #include "sha.h"
 
 static int sha256_init(struct shash_desc *desc)
@@ -35,7 +35,7 @@
 	sctx->state[6] = SHA256_H6;
 	sctx->state[7] = SHA256_H7;
 	sctx->count = 0;
-	sctx->func = KIMD_SHA_256;
+	sctx->func = CPACF_KIMD_SHA_256;
 
 	return 0;
 }
@@ -59,7 +59,7 @@
 	sctx->count = ictx->count;
 	memcpy(sctx->state, ictx->state, sizeof(ictx->state));
 	memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
-	sctx->func = KIMD_SHA_256;
+	sctx->func = CPACF_KIMD_SHA_256;
 	return 0;
 }
 
@@ -75,7 +75,7 @@
 	.base		=	{
 		.cra_name	=	"sha256",
 		.cra_driver_name=	"sha256-s390",
-		.cra_priority	=	CRYPT_S390_PRIORITY,
+		.cra_priority	=	300,
 		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA256_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
@@ -95,7 +95,7 @@
 	sctx->state[6] = SHA224_H6;
 	sctx->state[7] = SHA224_H7;
 	sctx->count = 0;
-	sctx->func = KIMD_SHA_256;
+	sctx->func = CPACF_KIMD_SHA_256;
 
 	return 0;
 }
@@ -112,7 +112,7 @@
 	.base		=	{
 		.cra_name	=	"sha224",
 		.cra_driver_name=	"sha224-s390",
-		.cra_priority	=	CRYPT_S390_PRIORITY,
+		.cra_priority	=	300,
 		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA224_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
@@ -123,7 +123,7 @@
 {
 	int ret;
 
-	if (!crypt_s390_func_available(KIMD_SHA_256, CRYPT_S390_MSA))
+	if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_256))
 		return -EOPNOTSUPP;
 	ret = crypto_register_shash(&sha256_alg);
 	if (ret < 0)
diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
index 2ba66b1..ea85757 100644
--- a/arch/s390/crypto/sha512_s390.c
+++ b/arch/s390/crypto/sha512_s390.c
@@ -19,9 +19,9 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/cpufeature.h>
+#include <asm/cpacf.h>
 
 #include "sha.h"
-#include "crypt_s390.h"
 
 static int sha512_init(struct shash_desc *desc)
 {
@@ -36,7 +36,7 @@
 	*(__u64 *)&ctx->state[12] = 0x1f83d9abfb41bd6bULL;
 	*(__u64 *)&ctx->state[14] = 0x5be0cd19137e2179ULL;
 	ctx->count = 0;
-	ctx->func = KIMD_SHA_512;
+	ctx->func = CPACF_KIMD_SHA_512;
 
 	return 0;
 }
@@ -64,7 +64,7 @@
 
 	memcpy(sctx->state, ictx->state, sizeof(ictx->state));
 	memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
-	sctx->func = KIMD_SHA_512;
+	sctx->func = CPACF_KIMD_SHA_512;
 	return 0;
 }
 
@@ -80,7 +80,7 @@
 	.base		=	{
 		.cra_name	=	"sha512",
 		.cra_driver_name=	"sha512-s390",
-		.cra_priority	=	CRYPT_S390_PRIORITY,
+		.cra_priority	=	300,
 		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA512_BLOCK_SIZE,
 		.cra_module	=	THIS_MODULE,
@@ -102,7 +102,7 @@
 	*(__u64 *)&ctx->state[12] = 0xdb0c2e0d64f98fa7ULL;
 	*(__u64 *)&ctx->state[14] = 0x47b5481dbefa4fa4ULL;
 	ctx->count = 0;
-	ctx->func = KIMD_SHA_512;
+	ctx->func = CPACF_KIMD_SHA_512;
 
 	return 0;
 }
@@ -119,7 +119,7 @@
 	.base		=	{
 		.cra_name	=	"sha384",
 		.cra_driver_name=	"sha384-s390",
-		.cra_priority	=	CRYPT_S390_PRIORITY,
+		.cra_priority	=	300,
 		.cra_flags	=	CRYPTO_ALG_TYPE_SHASH,
 		.cra_blocksize	=	SHA384_BLOCK_SIZE,
 		.cra_ctxsize	=	sizeof(struct s390_sha_ctx),
@@ -133,7 +133,7 @@
 {
 	int ret;
 
-	if (!crypt_s390_func_available(KIMD_SHA_512, CRYPT_S390_MSA))
+	if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_512))
 		return -EOPNOTSUPP;
 	if ((ret = crypto_register_shash(&sha512_alg)) < 0)
 		goto out;
diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c
index 8620b0e..8e90816 100644
--- a/arch/s390/crypto/sha_common.c
+++ b/arch/s390/crypto/sha_common.c
@@ -15,8 +15,8 @@
 
 #include <crypto/internal/hash.h>
 #include <linux/module.h>
+#include <asm/cpacf.h>
 #include "sha.h"
-#include "crypt_s390.h"
 
 int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
 {
@@ -35,7 +35,7 @@
 	/* process one stored block */
 	if (index) {
 		memcpy(ctx->buf + index, data, bsize - index);
-		ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, bsize);
+		ret = cpacf_kimd(ctx->func, ctx->state, ctx->buf, bsize);
 		if (ret != bsize)
 			return -EIO;
 		data += bsize - index;
@@ -45,8 +45,8 @@
 
 	/* process as many blocks as possible */
 	if (len >= bsize) {
-		ret = crypt_s390_kimd(ctx->func, ctx->state, data,
-				      len & ~(bsize - 1));
+		ret = cpacf_kimd(ctx->func, ctx->state, data,
+				 len & ~(bsize - 1));
 		if (ret != (len & ~(bsize - 1)))
 			return -EIO;
 		data += ret;
@@ -89,7 +89,7 @@
 	bits = ctx->count * 8;
 	memcpy(ctx->buf + end - 8, &bits, sizeof(bits));
 
-	ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, end);
+	ret = cpacf_kimd(ctx->func, ctx->state, ctx->buf, end);
 	if (ret != end)
 		return -EIO;
 
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 0f3da2c..255c7ee 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -278,8 +278,8 @@
 	sbi->uid = current_uid();
 	sbi->gid = current_gid();
 	sb->s_fs_info = sbi;
-	sb->s_blocksize = PAGE_CACHE_SIZE;
-	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	sb->s_blocksize = PAGE_SIZE;
+	sb->s_blocksize_bits = PAGE_SHIFT;
 	sb->s_magic = HYPFS_MAGIC;
 	sb->s_op = &hypfs_s_ops;
 	if (hypfs_parse_options(data, sb))
diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
index 4d7ccac..22da3b3 100644
--- a/arch/s390/include/asm/cache.h
+++ b/arch/s390/include/asm/cache.h
@@ -15,4 +15,7 @@
 
 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
 
+/* Read-only memory is marked before mark_rodata_ro() is called. */
+#define __ro_after_init __read_mostly
+
 #endif
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
new file mode 100644
index 0000000..1a82cf2
--- /dev/null
+++ b/arch/s390/include/asm/cpacf.h
@@ -0,0 +1,410 @@
+/*
+ * CP Assist for Cryptographic Functions (CPACF)
+ *
+ * Copyright IBM Corp. 2003, 2016
+ * Author(s): Thomas Spatzier
+ *	      Jan Glauber
+ *	      Harald Freudenberger (freude@de.ibm.com)
+ *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+#ifndef _ASM_S390_CPACF_H
+#define _ASM_S390_CPACF_H
+
+#include <asm/facility.h>
+
+/*
+ * Instruction opcodes for the CPACF instructions
+ */
+#define CPACF_KMAC		0xb91e		/* MSA	*/
+#define CPACF_KM		0xb92e		/* MSA	*/
+#define CPACF_KMC		0xb92f		/* MSA	*/
+#define CPACF_KIMD		0xb93e		/* MSA	*/
+#define CPACF_KLMD		0xb93f		/* MSA	*/
+#define CPACF_PCC		0xb92c		/* MSA4 */
+#define CPACF_KMCTR		0xb92d		/* MSA4 */
+#define CPACF_PPNO		0xb93c		/* MSA5 */
+
+/*
+ * Function codes for the KM (CIPHER MESSAGE)
+ * instruction (0x80 is the decipher modifier bit)
+ */
+#define CPACF_KM_QUERY		0x00
+#define CPACF_KM_DEA_ENC	0x01
+#define CPACF_KM_DEA_DEC	0x81
+#define CPACF_KM_TDEA_128_ENC	0x02
+#define CPACF_KM_TDEA_128_DEC	0x82
+#define CPACF_KM_TDEA_192_ENC	0x03
+#define CPACF_KM_TDEA_192_DEC	0x83
+#define CPACF_KM_AES_128_ENC	0x12
+#define CPACF_KM_AES_128_DEC	0x92
+#define CPACF_KM_AES_192_ENC	0x13
+#define CPACF_KM_AES_192_DEC	0x93
+#define CPACF_KM_AES_256_ENC	0x14
+#define CPACF_KM_AES_256_DEC	0x94
+#define CPACF_KM_XTS_128_ENC	0x32
+#define CPACF_KM_XTS_128_DEC	0xb2
+#define CPACF_KM_XTS_256_ENC	0x34
+#define CPACF_KM_XTS_256_DEC	0xb4
+
+/*
+ * Function codes for the KMC (CIPHER MESSAGE WITH CHAINING)
+ * instruction (0x80 is the decipher modifier bit)
+ */
+#define CPACF_KMC_QUERY		0x00
+#define CPACF_KMC_DEA_ENC	0x01
+#define CPACF_KMC_DEA_DEC	0x81
+#define CPACF_KMC_TDEA_128_ENC	0x02
+#define CPACF_KMC_TDEA_128_DEC	0x82
+#define CPACF_KMC_TDEA_192_ENC	0x03
+#define CPACF_KMC_TDEA_192_DEC	0x83
+#define CPACF_KMC_AES_128_ENC	0x12
+#define CPACF_KMC_AES_128_DEC	0x92
+#define CPACF_KMC_AES_192_ENC	0x13
+#define CPACF_KMC_AES_192_DEC	0x93
+#define CPACF_KMC_AES_256_ENC	0x14
+#define CPACF_KMC_AES_256_DEC	0x94
+#define CPACF_KMC_PRNG		0x43
+
+/*
+ * Function codes for the KMCTR (CIPHER MESSAGE WITH COUNTER)
+ * instruction (0x80 is the decipher modifier bit)
+ */
+#define CPACF_KMCTR_QUERY	 0x00
+#define CPACF_KMCTR_DEA_ENC	 0x01
+#define CPACF_KMCTR_DEA_DEC	 0x81
+#define CPACF_KMCTR_TDEA_128_ENC 0x02
+#define CPACF_KMCTR_TDEA_128_DEC 0x82
+#define CPACF_KMCTR_TDEA_192_ENC 0x03
+#define CPACF_KMCTR_TDEA_192_DEC 0x83
+#define CPACF_KMCTR_AES_128_ENC	 0x12
+#define CPACF_KMCTR_AES_128_DEC	 0x92
+#define CPACF_KMCTR_AES_192_ENC	 0x13
+#define CPACF_KMCTR_AES_192_DEC	 0x93
+#define CPACF_KMCTR_AES_256_ENC	 0x14
+#define CPACF_KMCTR_AES_256_DEC	 0x94
+
+/*
+ * Function codes for the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
+ * instruction (0x80 is the decipher modifier bit)
+ */
+#define CPACF_KIMD_QUERY	0x00
+#define CPACF_KIMD_SHA_1	0x01
+#define CPACF_KIMD_SHA_256	0x02
+#define CPACF_KIMD_SHA_512	0x03
+#define CPACF_KIMD_GHASH	0x41
+
+/*
+ * Function codes for the KLMD (COMPUTE LAST MESSAGE DIGEST)
+ * instruction (0x80 is the decipher modifier bit)
+ */
+#define CPACF_KLMD_QUERY	0x00
+#define CPACF_KLMD_SHA_1	0x01
+#define CPACF_KLMD_SHA_256	0x02
+#define CPACF_KLMD_SHA_512	0x03
+
+/*
+ * function codes for the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
+ * instruction (0x80 is the decipher modifier bit)
+ */
+#define CPACF_KMAC_QUERY	0x00
+#define CPACF_KMAC_DEA		0x01
+#define CPACF_KMAC_TDEA_128	0x02
+#define CPACF_KMAC_TDEA_192	0x03
+
+/*
+ * Function codes for the PPNO (PERFORM PSEUDORANDOM NUMBER OPERATION)
+ * instruction (0x80 is the decipher modifier bit)
+ */
+#define CPACF_PPNO_QUERY		0x00
+#define CPACF_PPNO_SHA512_DRNG_GEN	0x03
+#define CPACF_PPNO_SHA512_DRNG_SEED	0x83
+
+/**
+ * cpacf_query() - check if a specific CPACF function is available
+ * @opcode: the opcode of the crypto instruction
+ * @func: the function code to test for
+ *
+ * Executes the query function for the given crypto instruction @opcode
+ * and checks if @func is available
+ *
+ * Returns 1 if @func is available for @opcode, 0 otherwise
+ */
+static inline void __cpacf_query(unsigned int opcode, unsigned char *status)
+{
+	typedef struct { unsigned char _[16]; } status_type;
+	register unsigned long r0 asm("0") = 0;	/* query function */
+	register unsigned long r1 asm("1") = (unsigned long) status;
+
+	asm volatile(
+		/* Parameter registers are ignored, but may not be 0 */
+		"0:	.insn	rrf,%[opc] << 16,2,2,2,0\n"
+		"	brc	1,0b\n"	/* handle partial completion */
+		: "=m" (*(status_type *) status)
+		: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (opcode)
+		: "cc");
+}
+
+static inline int cpacf_query(unsigned int opcode, unsigned int func)
+{
+	unsigned char status[16];
+
+	switch (opcode) {
+	case CPACF_KMAC:
+	case CPACF_KM:
+	case CPACF_KMC:
+	case CPACF_KIMD:
+	case CPACF_KLMD:
+		if (!test_facility(17))	/* check for MSA */
+			return 0;
+		break;
+	case CPACF_PCC:
+	case CPACF_KMCTR:
+		if (!test_facility(77))	/* check for MSA4 */
+			return 0;
+		break;
+	case CPACF_PPNO:
+		if (!test_facility(57))	/* check for MSA5 */
+			return 0;
+		break;
+	default:
+		BUG();
+	}
+	__cpacf_query(opcode, status);
+	return (status[func >> 3] & (0x80 >> (func & 7))) != 0;
+}
+
+/**
+ * cpacf_km() - executes the KM (CIPHER MESSAGE) instruction
+ * @func: the function code passed to KM; see CPACF_KM_xxx defines
+ * @param: address of parameter block; see POP for details on each func
+ * @dest: address of destination memory area
+ * @src: address of source memory area
+ * @src_len: length of src operand in bytes
+ *
+ * Returns 0 for the query func, number of processed bytes for
+ * encryption/decryption funcs
+ */
+static inline int cpacf_km(long func, void *param,
+			   u8 *dest, const u8 *src, long src_len)
+{
+	register unsigned long r0 asm("0") = (unsigned long) func;
+	register unsigned long r1 asm("1") = (unsigned long) param;
+	register unsigned long r2 asm("2") = (unsigned long) src;
+	register unsigned long r3 asm("3") = (unsigned long) src_len;
+	register unsigned long r4 asm("4") = (unsigned long) dest;
+
+	asm volatile(
+		"0:	.insn	rre,%[opc] << 16,%[dst],%[src]\n"
+		"	brc	1,0b\n" /* handle partial completion */
+		: [src] "+a" (r2), [len] "+d" (r3), [dst] "+a" (r4)
+		: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KM)
+		: "cc", "memory");
+
+	return src_len - r3;
+}
+
+/**
+ * cpacf_kmc() - executes the KMC (CIPHER MESSAGE WITH CHAINING) instruction
+ * @func: the function code passed to KM; see CPACF_KMC_xxx defines
+ * @param: address of parameter block; see POP for details on each func
+ * @dest: address of destination memory area
+ * @src: address of source memory area
+ * @src_len: length of src operand in bytes
+ *
+ * Returns 0 for the query func, number of processed bytes for
+ * encryption/decryption funcs
+ */
+static inline int cpacf_kmc(long func, void *param,
+			    u8 *dest, const u8 *src, long src_len)
+{
+	register unsigned long r0 asm("0") = (unsigned long) func;
+	register unsigned long r1 asm("1") = (unsigned long) param;
+	register unsigned long r2 asm("2") = (unsigned long) src;
+	register unsigned long r3 asm("3") = (unsigned long) src_len;
+	register unsigned long r4 asm("4") = (unsigned long) dest;
+
+	asm volatile(
+		"0:	.insn	rre,%[opc] << 16,%[dst],%[src]\n"
+		"	brc	1,0b\n" /* handle partial completion */
+		: [src] "+a" (r2), [len] "+d" (r3), [dst] "+a" (r4)
+		: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KMC)
+		: "cc", "memory");
+
+	return src_len - r3;
+}
+
+/**
+ * cpacf_kimd() - executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
+ *		  instruction
+ * @func: the function code passed to KM; see CPACF_KIMD_xxx defines
+ * @param: address of parameter block; see POP for details on each func
+ * @src: address of source memory area
+ * @src_len: length of src operand in bytes
+ *
+ * Returns 0 for the query func, number of processed bytes for digest funcs
+ */
+static inline int cpacf_kimd(long func, void *param,
+			     const u8 *src, long src_len)
+{
+	register unsigned long r0 asm("0") = (unsigned long) func;
+	register unsigned long r1 asm("1") = (unsigned long) param;
+	register unsigned long r2 asm("2") = (unsigned long) src;
+	register unsigned long r3 asm("3") = (unsigned long) src_len;
+
+	asm volatile(
+		"0:	.insn	rre,%[opc] << 16,0,%[src]\n"
+		"	brc	1,0b\n" /* handle partial completion */
+		: [src] "+a" (r2), [len] "+d" (r3)
+		: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KIMD)
+		: "cc", "memory");
+
+	return src_len - r3;
+}
+
+/**
+ * cpacf_klmd() - executes the KLMD (COMPUTE LAST MESSAGE DIGEST) instruction
+ * @func: the function code passed to KM; see CPACF_KLMD_xxx defines
+ * @param: address of parameter block; see POP for details on each func
+ * @src: address of source memory area
+ * @src_len: length of src operand in bytes
+ *
+ * Returns 0 for the query func, number of processed bytes for digest funcs
+ */
+static inline int cpacf_klmd(long func, void *param,
+			     const u8 *src, long src_len)
+{
+	register unsigned long r0 asm("0") = (unsigned long) func;
+	register unsigned long r1 asm("1") = (unsigned long) param;
+	register unsigned long r2 asm("2") = (unsigned long) src;
+	register unsigned long r3 asm("3") = (unsigned long) src_len;
+
+	asm volatile(
+		"0:	.insn	rre,%[opc] << 16,0,%[src]\n"
+		"	brc	1,0b\n" /* handle partial completion */
+		: [src] "+a" (r2), [len] "+d" (r3)
+		: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KLMD)
+		: "cc", "memory");
+
+	return src_len - r3;
+}
+
+/**
+ * cpacf_kmac() - executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
+ *		  instruction
+ * @func: the function code passed to KM; see CPACF_KMAC_xxx defines
+ * @param: address of parameter block; see POP for details on each func
+ * @src: address of source memory area
+ * @src_len: length of src operand in bytes
+ *
+ * Returns 0 for the query func, number of processed bytes for digest funcs
+ */
+static inline int cpacf_kmac(long func, void *param,
+			     const u8 *src, long src_len)
+{
+	register unsigned long r0 asm("0") = (unsigned long) func;
+	register unsigned long r1 asm("1") = (unsigned long) param;
+	register unsigned long r2 asm("2") = (unsigned long) src;
+	register unsigned long r3 asm("3") = (unsigned long) src_len;
+
+	asm volatile(
+		"0:	.insn	rre,%[opc] << 16,0,%[src]\n"
+		"	brc	1,0b\n" /* handle partial completion */
+		: [src] "+a" (r2), [len] "+d" (r3)
+		: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KMAC)
+		: "cc", "memory");
+
+	return src_len - r3;
+}
+
+/**
+ * cpacf_kmctr() - executes the KMCTR (CIPHER MESSAGE WITH COUNTER) instruction
+ * @func: the function code passed to KMCTR; see CPACF_KMCTR_xxx defines
+ * @param: address of parameter block; see POP for details on each func
+ * @dest: address of destination memory area
+ * @src: address of source memory area
+ * @src_len: length of src operand in bytes
+ * @counter: address of counter value
+ *
+ * Returns 0 for the query func, number of processed bytes for
+ * encryption/decryption funcs
+ */
+static inline int cpacf_kmctr(long func, void *param, u8 *dest,
+			      const u8 *src, long src_len, u8 *counter)
+{
+	register unsigned long r0 asm("0") = (unsigned long) func;
+	register unsigned long r1 asm("1") = (unsigned long) param;
+	register unsigned long r2 asm("2") = (unsigned long) src;
+	register unsigned long r3 asm("3") = (unsigned long) src_len;
+	register unsigned long r4 asm("4") = (unsigned long) dest;
+	register unsigned long r6 asm("6") = (unsigned long) counter;
+
+	asm volatile(
+		"0:	.insn	rrf,%[opc] << 16,%[dst],%[src],%[ctr],0\n"
+		"	brc	1,0b\n" /* handle partial completion */
+		: [src] "+a" (r2), [len] "+d" (r3),
+		  [dst] "+a" (r4), [ctr] "+a" (r6)
+		: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KMCTR)
+		: "cc", "memory");
+
+	return src_len - r3;
+}
+
+/**
+ * cpacf_ppno() - executes the PPNO (PERFORM PSEUDORANDOM NUMBER OPERATION)
+ *		  instruction
+ * @func: the function code passed to PPNO; see CPACF_PPNO_xxx defines
+ * @param: address of parameter block; see POP for details on each func
+ * @dest: address of destination memory area
+ * @dest_len: size of destination memory area in bytes
+ * @seed: address of seed data
+ * @seed_len: size of seed data in bytes
+ *
+ * Returns 0 for the query func, number of random bytes stored in
+ * dest buffer for generate function
+ */
+static inline int cpacf_ppno(long func, void *param,
+			     u8 *dest, long dest_len,
+			     const u8 *seed, long seed_len)
+{
+	register unsigned long r0 asm("0") = (unsigned long) func;
+	register unsigned long r1 asm("1") = (unsigned long) param;
+	register unsigned long r2 asm("2") = (unsigned long) dest;
+	register unsigned long r3 asm("3") = (unsigned long) dest_len;
+	register unsigned long r4 asm("4") = (unsigned long) seed;
+	register unsigned long r5 asm("5") = (unsigned long) seed_len;
+
+	asm volatile (
+		"0:	.insn	rre,%[opc] << 16,%[dst],%[seed]\n"
+		"	brc	1,0b\n"	  /* handle partial completion */
+		: [dst] "+a" (r2), [dlen] "+d" (r3)
+		: [fc] "d" (r0), [pba] "a" (r1),
+		  [seed] "a" (r4), [slen] "d" (r5), [opc] "i" (CPACF_PPNO)
+		: "cc", "memory");
+
+	return dest_len - r3;
+}
+
+/**
+ * cpacf_pcc() - executes the PCC (PERFORM CRYPTOGRAPHIC COMPUTATION)
+ *		 instruction
+ * @func: the function code passed to PCC; see CPACF_KM_xxx defines
+ * @param: address of parameter block; see POP for details on each func
+ *
+ * Returns 0.
+ */
+static inline int cpacf_pcc(long func, void *param)
+{
+	register unsigned long r0 asm("0") = (unsigned long) func;
+	register unsigned long r1 asm("1") = (unsigned long) param;
+
+	asm volatile(
+		"0:	.insn	rre,%[opc] << 16,0,0\n" /* PCC opcode */
+		"	brc	1,0b\n" /* handle partial completion */
+		:
+		: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_PCC)
+		: "cc", "memory");
+
+	return 0;
+}
+
+#endif	/* _ASM_S390_CPACF_H */
diff --git a/arch/s390/include/asm/fpu/types.h b/arch/s390/include/asm/fpu/types.h
index 14a8b0c..fe937c9 100644
--- a/arch/s390/include/asm/fpu/types.h
+++ b/arch/s390/include/asm/fpu/types.h
@@ -11,11 +11,13 @@
 #include <asm/sigcontext.h>
 
 struct fpu {
-	__u32 fpc;			/* Floating-point control */
+	__u32 fpc;		/* Floating-point control */
+	void *regs;		/* Pointer to the current save area */
 	union {
-		void *regs;
-		freg_t *fprs;		/* Floating-point register save area */
-		__vector128 *vxrs;	/* Vector register save area */
+		/* Floating-point register save area */
+		freg_t fprs[__NUM_FPRS];
+		/* Vector register save area */
+		__vector128 vxrs[__NUM_VXRS];
 	};
 };
 
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 836c562..64053d9 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -12,7 +12,9 @@
 
 #ifndef __ASSEMBLY__
 
-#define ftrace_return_address(n) __builtin_return_address(n)
+unsigned long return_address(int depth);
+
+#define ftrace_return_address(n) return_address(n)
 
 void _mcount(void);
 void ftrace_caller(void);
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 6da41fa..37b9017 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -38,7 +38,7 @@
  */
 #define KVM_NR_IRQCHIPS 1
 #define KVM_IRQCHIP_NUM_PINS 4096
-#define KVM_HALT_POLL_NS_DEFAULT 0
+#define KVM_HALT_POLL_NS_DEFAULT 80000
 
 /* s390-specific vcpu->requests bit members */
 #define KVM_REQ_ENABLE_IBS         8
@@ -247,6 +247,7 @@
 	u32 exit_instruction;
 	u32 halt_successful_poll;
 	u32 halt_attempted_poll;
+	u32 halt_poll_invalid;
 	u32 halt_wakeup;
 	u32 instruction_lctl;
 	u32 instruction_lctlg;
@@ -544,10 +545,6 @@
 	struct kvm_s390_local_interrupt local_int;
 	struct hrtimer    ckc_timer;
 	struct kvm_s390_pgm_info pgm;
-	union  {
-		struct cpuid	cpu_id;
-		u64		stidp_data;
-	};
 	struct gmap *gmap;
 	struct kvm_guestdbg_info_arch guestdbg;
 	unsigned long pfault_token;
@@ -605,7 +602,7 @@
 	__u64 fac_mask[S390_ARCH_FAC_LIST_SIZE_U64];
 	/* facility list requested by guest (in dma page) */
 	__u64 *fac_list;
-	struct cpuid cpu_id;
+	u64 cpuid;
 	unsigned short ibc;
 };
 
@@ -700,4 +697,6 @@
 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
 
+void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu);
+
 #endif
diff --git a/arch/s390/include/asm/livepatch.h b/arch/s390/include/asm/livepatch.h
index d5427c7..2c12137 100644
--- a/arch/s390/include/asm/livepatch.h
+++ b/arch/s390/include/asm/livepatch.h
@@ -24,13 +24,6 @@
 	return 0;
 }
 
-static inline int klp_write_module_reloc(struct module *mod, unsigned long
-		type, unsigned long loc, unsigned long value)
-{
-	/* not supported yet */
-	return -ENOSYS;
-}
-
 static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
 {
 	regs->psw.addr = ip;
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index d29ad95..081b2ad 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -11,7 +11,7 @@
 	spinlock_t list_lock;
 	struct list_head pgtable_list;
 	struct list_head gmap_list;
-	unsigned long asce_bits;
+	unsigned long asce;
 	unsigned long asce_limit;
 	unsigned long vdso_base;
 	/* The mmu context allocates 4K page tables. */
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index d321469..c837b79 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -26,12 +26,28 @@
 	mm->context.has_pgste = 0;
 	mm->context.use_skey = 0;
 #endif
-	if (mm->context.asce_limit == 0) {
+	switch (mm->context.asce_limit) {
+	case 1UL << 42:
+		/*
+		 * forked 3-level task, fall through to set new asce with new
+		 * mm->pgd
+		 */
+	case 0:
 		/* context created by exec, set asce limit to 4TB */
-		mm->context.asce_bits = _ASCE_TABLE_LENGTH |
-			_ASCE_USER_BITS | _ASCE_TYPE_REGION3;
 		mm->context.asce_limit = STACK_TOP_MAX;
-	} else if (mm->context.asce_limit == (1UL << 31)) {
+		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+				   _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
+		break;
+	case 1UL << 53:
+		/* forked 4-level task, set new asce with new mm->pgd */
+		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+				   _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
+		break;
+	case 1UL << 31:
+		/* forked 2-level compat task, set new asce with new mm->pgd */
+		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+				   _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
+		/* pgd_alloc() did not increase mm->nr_pmds */
 		mm_inc_nr_pmds(mm);
 	}
 	crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
@@ -42,7 +58,7 @@
 
 static inline void set_user_asce(struct mm_struct *mm)
 {
-	S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd);
+	S390_lowcore.user_asce = mm->context.asce;
 	if (current->thread.mm_segment.ar4)
 		__ctl_load(S390_lowcore.user_asce, 7, 7);
 	set_cpu_flag(CIF_ASCE);
@@ -71,7 +87,7 @@
 {
 	int cpu = smp_processor_id();
 
-	S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
+	S390_lowcore.user_asce = next->context.asce;
 	if (prev == next)
 		return;
 	if (MACHINE_HAS_TLB_LC)
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index b6bfa16..0da91c4 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -31,20 +31,42 @@
 #define ZPCI_FC_BLOCKED			0x20
 #define ZPCI_FC_DMA_ENABLED		0x10
 
+#define ZPCI_FMB_DMA_COUNTER_VALID	(1 << 23)
+
+struct zpci_fmb_fmt0 {
+	u64 dma_rbytes;
+	u64 dma_wbytes;
+};
+
+struct zpci_fmb_fmt1 {
+	u64 rx_bytes;
+	u64 rx_packets;
+	u64 tx_bytes;
+	u64 tx_packets;
+};
+
+struct zpci_fmb_fmt2 {
+	u64 consumed_work_units;
+	u64 max_work_units;
+};
+
 struct zpci_fmb {
-	u32 format	:  8;
-	u32 dma_valid	:  1;
-	u32		: 23;
+	u32 format	: 8;
+	u32 fmt_ind	: 24;
 	u32 samples;
 	u64 last_update;
-	/* hardware counters */
+	/* common counters */
 	u64 ld_ops;
 	u64 st_ops;
 	u64 stb_ops;
 	u64 rpcit_ops;
-	u64 dma_rbytes;
-	u64 dma_wbytes;
-} __packed __aligned(64);
+	/* format specific counters */
+	union {
+		struct zpci_fmb_fmt0 fmt0;
+		struct zpci_fmb_fmt1 fmt1;
+		struct zpci_fmb_fmt2 fmt2;
+	};
+} __packed __aligned(128);
 
 enum zpci_state {
 	ZPCI_FN_STATE_RESERVED,
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 9b3d9b6..da34cb6 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -52,8 +52,8 @@
 	return _REGION2_ENTRY_EMPTY;
 }
 
-int crst_table_upgrade(struct mm_struct *, unsigned long limit);
-void crst_table_downgrade(struct mm_struct *, unsigned long limit);
+int crst_table_upgrade(struct mm_struct *);
+void crst_table_downgrade(struct mm_struct *);
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
 {
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index d6fd22e..9d4d311 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -105,7 +105,6 @@
  * Thread structure
  */
 struct thread_struct {
-	struct fpu fpu;			/* FP and VX register save area */
 	unsigned int  acrs[NUM_ACRS];
         unsigned long ksp;              /* kernel stack pointer             */
 	mm_segment_t mm_segment;
@@ -120,6 +119,11 @@
 	/* cpu runtime instrumentation */
 	struct runtime_instr_cb *ri_cb;
 	unsigned char trap_tdb[256];	/* Transaction abort diagnose block */
+	/*
+	 * Warning: 'fpu' is dynamically-sized. It *MUST* be at
+	 * the end.
+	 */
+	struct fpu fpu;			/* FP and VX register save area */
 };
 
 /* Flag to disable transactions. */
@@ -155,10 +159,9 @@
 
 #define ARCH_MIN_TASKALIGN	8
 
-extern __vector128 init_task_fpu_regs[__NUM_VXRS];
 #define INIT_THREAD {							\
 	.ksp = sizeof(init_stack) + (unsigned long) &init_stack,	\
-	.fpu.regs = (void *)&init_task_fpu_regs,			\
+	.fpu.regs = (void *) init_task.thread.fpu.fprs,			\
 }
 
 /*
@@ -175,7 +178,7 @@
 	regs->psw.mask	= PSW_USER_BITS | PSW_MASK_BA;			\
 	regs->psw.addr	= new_psw;					\
 	regs->gprs[15]	= new_stackp;					\
-	crst_table_downgrade(current->mm, 1UL << 31);			\
+	crst_table_downgrade(current->mm);				\
 	execve_tail();							\
 } while (0)
 
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
index fead491..c75e447 100644
--- a/arch/s390/include/asm/rwsem.h
+++ b/arch/s390/include/asm/rwsem.h
@@ -90,7 +90,7 @@
 /*
  * lock for writing
  */
-static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+static inline long ___down_write(struct rw_semaphore *sem)
 {
 	signed long old, new, tmp;
 
@@ -104,13 +104,23 @@
 		: "=&d" (old), "=&d" (new), "=Q" (sem->count)
 		: "Q" (sem->count), "m" (tmp)
 		: "cc", "memory");
-	if (old != 0)
-		rwsem_down_write_failed(sem);
+
+	return old;
 }
 
 static inline void __down_write(struct rw_semaphore *sem)
 {
-	__down_write_nested(sem, 0);
+	if (___down_write(sem))
+		rwsem_down_write_failed(sem);
+}
+
+static inline int __down_write_killable(struct rw_semaphore *sem)
+{
+	if (___down_write(sem))
+		if (IS_ERR(rwsem_down_write_failed_killable(sem)))
+			return -EINTR;
+
+	return 0;
 }
 
 /*
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index bab456b..e4f6f73 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -69,9 +69,22 @@
 	unsigned int max_cores;
 	unsigned long hsa_size;
 	unsigned long facilities;
+	unsigned int hmfai;
 };
 extern struct sclp_info sclp;
 
+struct zpci_report_error_header {
+	u8 version;	/* Interface version byte */
+	u8 action;	/* Action qualifier byte
+			 * 1: Deconfigure and repair action requested
+			 *	(OpenCrypto Problem Call Home)
+			 * 2: Informational Report
+			 *	(OpenCrypto Successful Diagnostics Execution)
+			 */
+	u16 length;	/* Length of Subsequent Data (up to 4K – SCLP header */
+	u8 data[0];	/* Subsequent Data passed verbatim to SCLP ET 24 */
+} __packed;
+
 int sclp_get_core_info(struct sclp_core_info *info);
 int sclp_core_configure(u8 core);
 int sclp_core_deconfigure(u8 core);
@@ -83,6 +96,7 @@
 void sclp_get_ipl_info(struct sclp_ipl_info *info);
 int sclp_pci_configure(u32 fid);
 int sclp_pci_deconfigure(u32 fid);
+int sclp_pci_report(struct zpci_report_error_header *report, u32 fh, u32 fid);
 int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count);
 int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count);
 void sclp_early_detect(void);
diff --git a/arch/s390/include/asm/seccomp.h b/arch/s390/include/asm/seccomp.h
index 781a9cf..e10f833 100644
--- a/arch/s390/include/asm/seccomp.h
+++ b/arch/s390/include/asm/seccomp.h
@@ -13,4 +13,6 @@
 #define __NR_seccomp_exit_32	__NR_exit
 #define __NR_seccomp_sigreturn_32 __NR_sigreturn
 
+#include <asm-generic/seccomp.h>
+
 #endif	/* _ASM_S390_SECCOMP_H */
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index ec60cf7..1c8f33f 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -27,6 +27,7 @@
 
 /* SIGP cpu status bits */
 
+#define SIGP_STATUS_INVALID_ORDER	0x00000002UL
 #define SIGP_STATUS_CHECK_STOP		0x00000010UL
 #define SIGP_STATUS_STOPPED		0x00000040UL
 #define SIGP_STATUS_EXT_CALL_PENDING	0x00000080UL
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 2fffc2c..f15c039 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -62,6 +62,7 @@
 }
 
 void arch_release_task_struct(struct task_struct *tsk);
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
 
 #define THREAD_SIZE_ORDER THREAD_ORDER
 
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index ca148f7..a2e6ef3 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -110,8 +110,7 @@
 static inline void __tlb_flush_kernel(void)
 {
 	if (MACHINE_HAS_IDTE)
-		__tlb_flush_idte((unsigned long) init_mm.pgd |
-				 init_mm.context.asce_bits);
+		__tlb_flush_idte(init_mm.context.asce);
 	else
 		__tlb_flush_global();
 }
@@ -133,8 +132,7 @@
 static inline void __tlb_flush_kernel(void)
 {
 	if (MACHINE_HAS_TLB_LC)
-		__tlb_flush_idte_local((unsigned long) init_mm.pgd |
-				       init_mm.context.asce_bits);
+		__tlb_flush_idte_local(init_mm.context.asce);
 	else
 		__tlb_flush_local();
 }
@@ -148,8 +146,7 @@
 	 * only ran on the local cpu.
 	 */
 	if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
-		__tlb_flush_asce(mm, (unsigned long) mm->pgd |
-				 mm->context.asce_bits);
+		__tlb_flush_asce(mm, mm->context.asce);
 	else
 		__tlb_flush_full(mm);
 }
diff --git a/arch/s390/include/uapi/asm/dasd.h b/arch/s390/include/uapi/asm/dasd.h
index 5812a3b..1340311 100644
--- a/arch/s390/include/uapi/asm/dasd.h
+++ b/arch/s390/include/uapi/asm/dasd.h
@@ -187,6 +187,36 @@
 #define DASD_FMT_INT_INVAL  4 /* invalidate tracks */
 #define DASD_FMT_INT_COMPAT 8 /* use OS/390 compatible disk layout */
 
+/*
+ * struct format_check_t
+ * represents all data necessary to evaluate the format of
+ * different tracks of a dasd
+ */
+typedef struct format_check_t {
+	/* Input */
+	struct format_data_t expect;
+
+	/* Output */
+	unsigned int result;		/* Error indication (DASD_FMT_ERR_*) */
+	unsigned int unit;		/* Track that is in error */
+	unsigned int rec;		/* Record that is in error */
+	unsigned int num_records;	/* Records in the track in error */
+	unsigned int blksize;		/* Blocksize of first record in error */
+	unsigned int key_length;	/* Key length of first record in error */
+} format_check_t;
+
+/* Values returned in format_check_t when a format error is detected: */
+/* Too few records were found on a single track */
+#define DASD_FMT_ERR_TOO_FEW_RECORDS	1
+/* Too many records were found on a single track */
+#define DASD_FMT_ERR_TOO_MANY_RECORDS	2
+/* Blocksize/data-length of a record was wrong */
+#define DASD_FMT_ERR_BLKSIZE		3
+/* A record ID is defined by cylinder, head, and record number (CHR). */
+/* On mismatch, this error is set */
+#define DASD_FMT_ERR_RECORD_ID		4
+/* If key-length was != 0 */
+#define DASD_FMT_ERR_KEY_LENGTH		5
 
 /* 
  * struct attrib_data_t
@@ -288,6 +318,8 @@
 
 /* Get Sense Path Group ID (SNID) data */
 #define BIODASDSNID    _IOWR(DASD_IOCTL_LETTER, 1, struct dasd_snid_ioctl_data)
+/* Check device format according to format_check_t */
+#define BIODASDCHECKFMT _IOWR(DASD_IOCTL_LETTER, 2, format_check_t)
 
 #define BIODASDSYMMIO  _IOWR(DASD_IOCTL_LETTER, 240, dasd_symmio_parms_t)
 
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index 347fe5a..3b8e99e 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -25,6 +25,7 @@
 #define KVM_DEV_FLIC_APF_DISABLE_WAIT	5
 #define KVM_DEV_FLIC_ADAPTER_REGISTER	6
 #define KVM_DEV_FLIC_ADAPTER_MODIFY	7
+#define KVM_DEV_FLIC_CLEAR_IO_IRQ	8
 /*
  * We can have up to 4*64k pending subchannels + 8 adapter interrupts,
  * as well as up  to ASYNC_PF_PER_VCPU*KVM_MAX_VCPUS pfault done interrupts.
diff --git a/arch/s390/include/uapi/asm/sie.h b/arch/s390/include/uapi/asm/sie.h
index 5dbaa72..8fb5d4a 100644
--- a/arch/s390/include/uapi/asm/sie.h
+++ b/arch/s390/include/uapi/asm/sie.h
@@ -16,14 +16,19 @@
 	{ 0x01, "SIGP sense" },					\
 	{ 0x02, "SIGP external call" },				\
 	{ 0x03, "SIGP emergency signal" },			\
+	{ 0x04, "SIGP start" },					\
 	{ 0x05, "SIGP stop" },					\
 	{ 0x06, "SIGP restart" },				\
 	{ 0x09, "SIGP stop and store status" },			\
 	{ 0x0b, "SIGP initial cpu reset" },			\
+	{ 0x0c, "SIGP cpu reset" },				\
 	{ 0x0d, "SIGP set prefix" },				\
 	{ 0x0e, "SIGP store status at address" },		\
 	{ 0x12, "SIGP set architecture" },			\
-	{ 0x15, "SIGP sense running" }
+	{ 0x13, "SIGP conditional emergency signal" },		\
+	{ 0x15, "SIGP sense running" },				\
+	{ 0x16, "SIGP set multithreading"},			\
+	{ 0x17, "SIGP store additional status ait address"}
 
 #define icpt_prog_codes						\
 	{ 0x0001, "Prog Operation" },				\
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index ab3aa68..4384bc7 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -311,7 +311,9 @@
 #define __NR_shutdown		373
 #define __NR_mlock2		374
 #define __NR_copy_file_range	375
-#define NR_syscalls 376
+#define __NR_preadv2		376
+#define __NR_pwritev2		377
+#define NR_syscalls 378
 
 /* 
  * There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index 8ba3243..77a84bd 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -72,7 +72,6 @@
 
 	if (!test_facility(34))
 		return;
-	get_online_cpus();
 	this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask));
 	for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
 		cache = this_cpu_ci->info_list + idx;
@@ -86,7 +85,6 @@
 		seq_printf(m, "associativity=%d", cache->ways_of_associativity);
 		seq_puts(m, "\n");
 	}
-	put_online_cpus();
 }
 
 static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 3986c9f..29df8484 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -173,7 +173,7 @@
 /*
  * Copy memory of the old, dumped system to a user space virtual address
  */
-int copy_oldmem_user(void __user *dst, void *src, size_t count)
+static int copy_oldmem_user(void __user *dst, void *src, size_t count)
 {
 	unsigned long from, len;
 	int rc;
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 1b6081c..69f9908 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -89,6 +89,30 @@
 }
 EXPORT_SYMBOL_GPL(dump_trace);
 
+struct return_address_data {
+	unsigned long address;
+	int depth;
+};
+
+static int __return_address(void *data, unsigned long address)
+{
+	struct return_address_data *rd = data;
+
+	if (rd->depth--)
+		return 0;
+	rd->address = address;
+	return 1;
+}
+
+unsigned long return_address(int depth)
+{
+	struct return_address_data rd = { .depth = depth + 2 };
+
+	dump_trace(__return_address, &rd, NULL, current_stack_pointer());
+	return rd.address;
+}
+EXPORT_SYMBOL_GPL(return_address);
+
 static int show_address(void *data, unsigned long address)
 {
 	printk("([<%016lx>] %pSR)\n", address, (void *)address);
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index b7019ab..bedd2f5 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -1,6 +1,7 @@
 #ifndef _ENTRY_H
 #define _ENTRY_H
 
+#include <linux/percpu.h>
 #include <linux/types.h>
 #include <linux/signal.h>
 #include <asm/ptrace.h>
@@ -75,4 +76,7 @@
 long sys_s390_runtime_instr(int command, int signum);
 long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t);
 long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
+
+DECLARE_PER_CPU(u64, mt_cycles[8]);
+
 #endif /* _ENTRY_H */
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 7873e17..fbc0789 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -51,6 +51,10 @@
 
 void module_arch_freeing_init(struct module *mod)
 {
+	if (is_livepatch_module(mod) &&
+	    mod->state == MODULE_STATE_LIVE)
+		return;
+
 	vfree(mod->arch.syminfo);
 	mod->arch.syminfo = NULL;
 }
@@ -425,7 +429,5 @@
 		    struct module *me)
 {
 	jump_label_apply_nops(me);
-	vfree(me->arch.syminfo);
-	me->arch.syminfo = NULL;
 	return 0;
 }
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 58bf457..59215c5 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -665,17 +665,21 @@
 static int cpumf_pmu_notifier(struct notifier_block *self, unsigned long action,
 			      void *hcpu)
 {
-	unsigned int cpu = (long) hcpu;
 	int flags;
 
 	switch (action & ~CPU_TASKS_FROZEN) {
 	case CPU_ONLINE:
+	case CPU_DOWN_FAILED:
 		flags = PMC_INIT;
-		smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
+		local_irq_disable();
+		setup_pmc_cpu(&flags);
+		local_irq_enable();
 		break;
 	case CPU_DOWN_PREPARE:
 		flags = PMC_RELEASE;
-		smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
+		local_irq_disable();
+		setup_pmc_cpu(&flags);
+		local_irq_enable();
 		break;
 	default:
 		break;
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 1a43474..a8e8321 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1510,7 +1510,6 @@
 static int cpumf_pmu_notifier(struct notifier_block *self,
 			      unsigned long action, void *hcpu)
 {
-	unsigned int cpu = (long) hcpu;
 	int flags;
 
 	/* Ignore the notification if no events are scheduled on the PMU.
@@ -1521,13 +1520,17 @@
 
 	switch (action & ~CPU_TASKS_FROZEN) {
 	case CPU_ONLINE:
-	case CPU_ONLINE_FROZEN:
+	case CPU_DOWN_FAILED:
 		flags = PMC_INIT;
-		smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
+		local_irq_disable();
+		setup_pmc_cpu(&flags);
+		local_irq_enable();
 		break;
 	case CPU_DOWN_PREPARE:
 		flags = PMC_RELEASE;
-		smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
+		local_irq_disable();
+		setup_pmc_cpu(&flags);
+		local_irq_enable();
 		break;
 	default:
 		break;
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 2bba7df..481d7a83 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -7,6 +7,7 @@
  *		 Denis Joseph Barrow,
  */
 
+#include <linux/elf-randomize.h>
 #include <linux/compiler.h>
 #include <linux/cpu.h>
 #include <linux/sched.h>
@@ -37,9 +38,6 @@
 
 asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
 
-/* FPU save area for the init task */
-__vector128 init_task_fpu_regs[__NUM_VXRS] __init_task_data;
-
 /*
  * Return saved PC of a blocked thread. used in kernel/sched.
  * resume in entry.S does not create a new stack frame, it
@@ -85,35 +83,19 @@
 
 void arch_release_task_struct(struct task_struct *tsk)
 {
-	/* Free either the floating-point or the vector register save area */
-	kfree(tsk->thread.fpu.regs);
 }
 
 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 {
-	size_t fpu_regs_size;
-
-	*dst = *src;
-
-	/*
-	 * If the vector extension is available, it is enabled for all tasks,
-	 * and, thus, the FPU register save area must be allocated accordingly.
-	 */
-	fpu_regs_size = MACHINE_HAS_VX ? sizeof(__vector128) * __NUM_VXRS
-				       : sizeof(freg_t) * __NUM_FPRS;
-	dst->thread.fpu.regs = kzalloc(fpu_regs_size, GFP_KERNEL|__GFP_REPEAT);
-	if (!dst->thread.fpu.regs)
-		return -ENOMEM;
-
 	/*
 	 * Save the floating-point or vector register state of the current
 	 * task and set the CIF_FPU flag to lazy restore the FPU register
 	 * state when returning to user space.
 	 */
 	save_fpu_regs();
-	dst->thread.fpu.fpc = current->thread.fpu.fpc;
-	memcpy(dst->thread.fpu.regs, current->thread.fpu.regs, fpu_regs_size);
 
+	memcpy(dst, src, arch_task_struct_size);
+	dst->thread.fpu.regs = dst->thread.fpu.fprs;
 	return 0;
 }
 
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 647128d..de74510 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -6,6 +6,7 @@
 #define KMSG_COMPONENT "cpu"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
+#include <linux/cpufeature.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/seq_file.h>
@@ -84,7 +85,6 @@
 		seq_puts(m, "\n");
 		show_cacheinfo(m);
 	}
-	get_online_cpus();
 	if (cpu_online(n)) {
 		struct cpuid *id = &per_cpu(cpu_id, n);
 		seq_printf(m, "processor %li: "
@@ -93,23 +93,31 @@
 			   "machine = %04X\n",
 			   n, id->version, id->ident, id->machine);
 	}
-	put_online_cpus();
 	return 0;
 }
 
+static inline void *c_update(loff_t *pos)
+{
+	if (*pos)
+		*pos = cpumask_next(*pos - 1, cpu_online_mask);
+	return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL;
+}
+
 static void *c_start(struct seq_file *m, loff_t *pos)
 {
-	return *pos < nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL;
+	get_online_cpus();
+	return c_update(pos);
 }
 
 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
 {
 	++*pos;
-	return c_start(m, pos);
+	return c_update(pos);
 }
 
 static void c_stop(struct seq_file *m, void *v)
 {
+	put_online_cpus();
 }
 
 const struct seq_operations cpuinfo_op = {
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index d3f9688..f319391 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -809,6 +809,22 @@
 }
 
 /*
+ * Find the correct size for the task_struct. This depends on
+ * the size of the struct fpu at the end of the thread_struct
+ * which is embedded in the task_struct.
+ */
+static void __init setup_task_size(void)
+{
+	int task_size = sizeof(struct task_struct);
+
+	if (!MACHINE_HAS_VX) {
+		task_size -= sizeof(__vector128) * __NUM_VXRS;
+		task_size += sizeof(freg_t) * __NUM_FPRS;
+	}
+	arch_task_struct_size = task_size;
+}
+
+/*
  * Setup function called from init/main.c just after the banner
  * was printed.
  */
@@ -846,6 +862,7 @@
 
 	os_info_init();
 	setup_ipl();
+	setup_task_size();
 
 	/* Do some memory reservations *before* memory is added to memblock */
 	reserve_memory_end();
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 40a6b4f..7b89a75 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -832,7 +832,7 @@
 	pcpu_attach_task(pcpu, tidle);
 	pcpu_start_fn(pcpu, smp_start_secondary, NULL);
 	/* Wait until cpu puts itself in the online & active maps */
-	while (!cpu_online(cpu) || !cpu_active(cpu))
+	while (!cpu_online(cpu))
 		cpu_relax();
 	return 0;
 }
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 293d8b9..9b59e62 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -384,3 +384,5 @@
 SYSCALL(sys_shutdown,sys_shutdown)
 SYSCALL(sys_mlock2,compat_sys_mlock2)
 SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */
+SYSCALL(sys_preadv2,compat_sys_preadv2)
+SYSCALL(sys_pwritev2,compat_sys_pwritev2)
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index dafc44f..856e30d 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -18,6 +18,8 @@
 #include <asm/cpu_mf.h>
 #include <asm/smp.h>
 
+#include "entry.h"
+
 static void virt_timer_expire(void);
 
 static LIST_HEAD(virt_timer_list);
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index 5ea5af3..b190023 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -28,6 +28,7 @@
 	select HAVE_KVM_IRQCHIP
 	select HAVE_KVM_IRQFD
 	select HAVE_KVM_IRQ_ROUTING
+	select HAVE_KVM_INVALID_WAKEUPS
 	select SRCU
 	select KVM_VFIO
 	---help---
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 84efc2b..5a80af7 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -977,6 +977,11 @@
 
 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
 {
+	/*
+	 * We cannot move this into the if, as the CPU might be already
+	 * in kvm_vcpu_block without having the waitqueue set (polling)
+	 */
+	vcpu->valid_wakeup = true;
 	if (swait_active(&vcpu->wq)) {
 		/*
 		 * The vcpu gave up the cpu voluntarily, mark it as a good
@@ -2034,6 +2039,27 @@
 	return ret;
 }
 
+static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr)
+
+{
+	const u64 isc_mask = 0xffUL << 24; /* all iscs set */
+	u32 schid;
+
+	if (attr->flags)
+		return -EINVAL;
+	if (attr->attr != sizeof(schid))
+		return -EINVAL;
+	if (copy_from_user(&schid, (void __user *) attr->addr, sizeof(schid)))
+		return -EFAULT;
+	kfree(kvm_s390_get_io_int(kvm, isc_mask, schid));
+	/*
+	 * If userspace is conforming to the architecture, we can have at most
+	 * one pending I/O interrupt per subchannel, so this is effectively a
+	 * clear all.
+	 */
+	return 0;
+}
+
 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
 {
 	int r = 0;
@@ -2067,6 +2093,9 @@
 	case KVM_DEV_FLIC_ADAPTER_MODIFY:
 		r = modify_io_adapter(dev, attr);
 		break;
+	case KVM_DEV_FLIC_CLEAR_IO_IRQ:
+		r = clear_io_irq(dev->kvm, attr);
+		break;
 	default:
 		r = -EINVAL;
 	}
@@ -2074,6 +2103,23 @@
 	return r;
 }
 
+static int flic_has_attr(struct kvm_device *dev,
+			     struct kvm_device_attr *attr)
+{
+	switch (attr->group) {
+	case KVM_DEV_FLIC_GET_ALL_IRQS:
+	case KVM_DEV_FLIC_ENQUEUE:
+	case KVM_DEV_FLIC_CLEAR_IRQS:
+	case KVM_DEV_FLIC_APF_ENABLE:
+	case KVM_DEV_FLIC_APF_DISABLE_WAIT:
+	case KVM_DEV_FLIC_ADAPTER_REGISTER:
+	case KVM_DEV_FLIC_ADAPTER_MODIFY:
+	case KVM_DEV_FLIC_CLEAR_IO_IRQ:
+		return 0;
+	}
+	return -ENXIO;
+}
+
 static int flic_create(struct kvm_device *dev, u32 type)
 {
 	if (!dev)
@@ -2095,6 +2141,7 @@
 	.name = "kvm-flic",
 	.get_attr = flic_get_attr,
 	.set_attr = flic_set_attr,
+	.has_attr = flic_has_attr,
 	.create = flic_create,
 	.destroy = flic_destroy,
 };
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 668c087..6d8ec3a 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -65,6 +65,7 @@
 	{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
 	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
+	{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
 	{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
 	{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
@@ -118,9 +119,9 @@
 };
 
 /* upper facilities limit for kvm */
-unsigned long kvm_s390_fac_list_mask[] = {
-	0xffe6fffbfcfdfc40UL,
-	0x005e800000000000UL,
+unsigned long kvm_s390_fac_list_mask[16] = {
+	0xffe6000000000000UL,
+	0x005e000000000000UL,
 };
 
 unsigned long kvm_s390_fac_list_mask_size(void)
@@ -638,6 +639,7 @@
 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
 {
 	struct kvm_s390_vm_cpu_processor *proc;
+	u16 lowest_ibc, unblocked_ibc;
 	int ret = 0;
 
 	mutex_lock(&kvm->lock);
@@ -652,9 +654,17 @@
 	}
 	if (!copy_from_user(proc, (void __user *)attr->addr,
 			    sizeof(*proc))) {
-		memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
-		       sizeof(struct cpuid));
-		kvm->arch.model.ibc = proc->ibc;
+		kvm->arch.model.cpuid = proc->cpuid;
+		lowest_ibc = sclp.ibc >> 16 & 0xfff;
+		unblocked_ibc = sclp.ibc & 0xfff;
+		if (lowest_ibc) {
+			if (proc->ibc > unblocked_ibc)
+				kvm->arch.model.ibc = unblocked_ibc;
+			else if (proc->ibc < lowest_ibc)
+				kvm->arch.model.ibc = lowest_ibc;
+			else
+				kvm->arch.model.ibc = proc->ibc;
+		}
 		memcpy(kvm->arch.model.fac_list, proc->fac_list,
 		       S390_ARCH_FAC_LIST_SIZE_BYTE);
 	} else
@@ -687,7 +697,7 @@
 		ret = -ENOMEM;
 		goto out;
 	}
-	memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
+	proc->cpuid = kvm->arch.model.cpuid;
 	proc->ibc = kvm->arch.model.ibc;
 	memcpy(&proc->fac_list, kvm->arch.model.fac_list,
 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
@@ -1081,10 +1091,13 @@
 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
 }
 
-static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
+static u64 kvm_s390_get_initial_cpuid(void)
 {
-	get_cpu_id(cpu_id);
-	cpu_id->version = 0xff;
+	struct cpuid cpuid;
+
+	get_cpu_id(&cpuid);
+	cpuid.version = 0xff;
+	return *((u64 *) &cpuid);
 }
 
 static void kvm_s390_crypto_init(struct kvm *kvm)
@@ -1175,7 +1188,7 @@
 	memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
 
-	kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
+	kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
 	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
 
 	kvm_s390_crypto_init(kvm);
@@ -1624,7 +1637,6 @@
 {
 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
 
-	vcpu->arch.cpu_id = model->cpu_id;
 	vcpu->arch.sie_block->ibc = model->ibc;
 	if (test_kvm_facility(vcpu->kvm, 7))
 		vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
@@ -1645,11 +1657,14 @@
 
 	kvm_s390_vcpu_setup_model(vcpu);
 
-	vcpu->arch.sie_block->ecb   = 6;
+	vcpu->arch.sie_block->ecb = 0x02;
+	if (test_kvm_facility(vcpu->kvm, 9))
+		vcpu->arch.sie_block->ecb |= 0x04;
 	if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
 		vcpu->arch.sie_block->ecb |= 0x10;
 
-	vcpu->arch.sie_block->ecb2  = 8;
+	if (test_kvm_facility(vcpu->kvm, 8))
+		vcpu->arch.sie_block->ecb2 |= 0x08;
 	vcpu->arch.sie_block->eca   = 0xC1002000U;
 	if (sclp.has_siif)
 		vcpu->arch.sie_block->eca |= 1;
@@ -2971,13 +2986,31 @@
 	return;
 }
 
+static inline unsigned long nonhyp_mask(int i)
+{
+	unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
+
+	return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
+}
+
+void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
+{
+	vcpu->valid_wakeup = false;
+}
+
 static int __init kvm_s390_init(void)
 {
+	int i;
+
 	if (!sclp.has_sief2) {
 		pr_info("SIE not available\n");
 		return -ENODEV;
 	}
 
+	for (i = 0; i < 16; i++)
+		kvm_s390_fac_list_mask[i] |=
+			S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
+
 	return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
 }
 
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 0a1591d..95916fa 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -439,7 +439,7 @@
 
 static int handle_stidp(struct kvm_vcpu *vcpu)
 {
-	u64 stidp_data = vcpu->arch.stidp_data;
+	u64 stidp_data = vcpu->kvm->arch.model.cpuid;
 	u64 operand2;
 	int rc;
 	ar_t ar;
@@ -670,8 +670,9 @@
 	if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 
-	/* Only provide non-quiescing support if the host supports it */
-	if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14))
+	/* Only provide non-quiescing support if enabled for the guest */
+	if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
+	    !test_kvm_facility(vcpu->kvm, 14))
 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 
 	/* No support for conditional-SSKE */
@@ -744,7 +745,7 @@
 {
 	/* entries expected to be 1FF */
 	int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
-	unsigned long *cbrlo, cbrle;
+	unsigned long *cbrlo;
 	struct gmap *gmap;
 	int i;
 
@@ -765,17 +766,9 @@
 	vcpu->arch.sie_block->cbrlo &= PAGE_MASK;	/* reset nceo */
 	cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
 	down_read(&gmap->mm->mmap_sem);
-	for (i = 0; i < entries; ++i) {
-		cbrle = cbrlo[i];
-		if (unlikely(cbrle & ~PAGE_MASK || cbrle < 2 * PAGE_SIZE))
-			/* invalid entry */
-			break;
-		/* try to free backing */
-		__gmap_zap(gmap, cbrle);
-	}
+	for (i = 0; i < entries; ++i)
+		__gmap_zap(gmap, cbrlo[i]);
 	up_read(&gmap->mm->mmap_sem);
-	if (i < entries)
-		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 	return 0;
 }
 
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 77c22d6..28ea0ca 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -240,6 +240,12 @@
 	struct kvm_s390_local_interrupt *li;
 	int rc;
 
+	if (!test_kvm_facility(vcpu->kvm, 9)) {
+		*reg &= 0xffffffff00000000UL;
+		*reg |= SIGP_STATUS_INVALID_ORDER;
+		return SIGP_CC_STATUS_STORED;
+	}
+
 	li = &dst_vcpu->arch.local_int;
 	if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
 		/* running */
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index d4549c9..e5f50a7 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -105,6 +105,7 @@
 			if (_raw_compare_and_swap(&lp->lock, 0, cpu))
 				return;
 			local_irq_restore(flags);
+			continue;
 		}
 		/* Check if the lock owner is running. */
 		if (first_diag && cpu_is_preempted(~owner)) {
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index cce577f..7a31440 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -631,6 +631,29 @@
 static DEFINE_SPINLOCK(pfault_lock);
 static LIST_HEAD(pfault_list);
 
+#define PF_COMPLETE	0x0080
+
+/*
+ * The mechanism of our pfault code: if Linux is running as guest, runs a user
+ * space process and the user space process accesses a page that the host has
+ * paged out we get a pfault interrupt.
+ *
+ * This allows us, within the guest, to schedule a different process. Without
+ * this mechanism the host would have to suspend the whole virtual cpu until
+ * the page has been paged in.
+ *
+ * So when we get such an interrupt then we set the state of the current task
+ * to uninterruptible and also set the need_resched flag. Both happens within
+ * interrupt context(!). If we later on want to return to user space we
+ * recognize the need_resched flag and then call schedule().  It's not very
+ * obvious how this works...
+ *
+ * Of course we have a lot of additional fun with the completion interrupt (->
+ * host signals that a page of a process has been paged in and the process can
+ * continue to run). This interrupt can arrive on any cpu and, since we have
+ * virtual cpus, actually appear before the interrupt that signals that a page
+ * is missing.
+ */
 static void pfault_interrupt(struct ext_code ext_code,
 			     unsigned int param32, unsigned long param64)
 {
@@ -639,10 +662,9 @@
 	pid_t pid;
 
 	/*
-	 * Get the external interruption subcode & pfault
-	 * initial/completion signal bit. VM stores this 
-	 * in the 'cpu address' field associated with the
-         * external interrupt. 
+	 * Get the external interruption subcode & pfault initial/completion
+	 * signal bit. VM stores this in the 'cpu address' field associated
+	 * with the external interrupt.
 	 */
 	subcode = ext_code.subcode;
 	if ((subcode & 0xff00) != __SUBCODE_MASK)
@@ -658,7 +680,7 @@
 	if (!tsk)
 		return;
 	spin_lock(&pfault_lock);
-	if (subcode & 0x0080) {
+	if (subcode & PF_COMPLETE) {
 		/* signal bit is set -> a page has been swapped in by VM */
 		if (tsk->thread.pfault_wait == 1) {
 			/* Initial interrupt was faster than the completion
@@ -687,8 +709,7 @@
 			goto out;
 		if (tsk->thread.pfault_wait == 1) {
 			/* Already on the list with a reference: put to sleep */
-			__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-			set_tsk_need_resched(tsk);
+			goto block;
 		} else if (tsk->thread.pfault_wait == -1) {
 			/* Completion interrupt was faster than the initial
 			 * interrupt (pfault_wait == -1). Set pfault_wait
@@ -703,7 +724,11 @@
 			get_task_struct(tsk);
 			tsk->thread.pfault_wait = 1;
 			list_add(&tsk->thread.list, &pfault_list);
-			__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+block:
+			/* Since this must be a userspace fault, there
+			 * is no kernel task state to trample. Rely on the
+			 * return to userspace schedule() to block. */
+			__set_current_state(TASK_UNINTERRUPTIBLE);
 			set_tsk_need_resched(tsk);
 		}
 	}
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 69247b4..cace818 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -23,7 +23,7 @@
 /**
  * gmap_alloc - allocate a guest address space
  * @mm: pointer to the parent mm_struct
- * @limit: maximum size of the gmap address space
+ * @limit: maximum address of the gmap address space
  *
  * Returns a guest address space structure.
  */
@@ -292,7 +292,7 @@
 	if ((from | to | len) & (PMD_SIZE - 1))
 		return -EINVAL;
 	if (len == 0 || from + len < from || to + len < to ||
-	    from + len > TASK_MAX_SIZE || to + len > gmap->asce_end)
+	    from + len - 1 > TASK_MAX_SIZE || to + len - 1 > gmap->asce_end)
 		return -EINVAL;
 
 	flush = 0;
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 49a1c84..a8a6765 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -20,9 +20,9 @@
 static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
 		unsigned long end, int write, struct page **pages, int *nr)
 {
+	struct page *head, *page;
 	unsigned long mask;
 	pte_t *ptep, pte;
-	struct page *page;
 
 	mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
 
@@ -37,12 +37,14 @@
 			return 0;
 		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
 		page = pte_page(pte);
-		if (!page_cache_get_speculative(page))
+		head = compound_head(page);
+		if (!page_cache_get_speculative(head))
 			return 0;
 		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
-			put_page(page);
+			put_page(head);
 			return 0;
 		}
+		VM_BUG_ON_PAGE(compound_head(page) != head, page);
 		pages[*nr] = page;
 		(*nr)++;
 
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 73e2903..2489b2e 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -89,7 +89,8 @@
 		asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
 		pgd_type = _REGION3_ENTRY_EMPTY;
 	}
-	S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
+	init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
+	S390_lowcore.kernel_asce = init_mm.context.asce;
 	clear_table((unsigned long *) init_mm.pgd, pgd_type,
 		    sizeof(unsigned long)*2048);
 	vmem_map_init();
@@ -108,6 +109,13 @@
 	free_area_init_nodes(max_zone_pfns);
 }
 
+void mark_rodata_ro(void)
+{
+	/* Text and rodata are already protected. Nothing to do here. */
+	pr_info("Write protecting the kernel read-only data: %luk\n",
+		((unsigned long)&_eshared - (unsigned long)&_stext) >> 10);
+}
+
 void __init mem_init(void)
 {
 	if (MACHINE_HAS_TLB_LC)
@@ -126,9 +134,6 @@
 	setup_zero_pages();	/* Setup zeroed pages. */
 
 	mem_init_print_info(NULL);
-	printk("Write protected kernel read-only data: %#lx - %#lx\n",
-	       (unsigned long)&_stext,
-	       PFN_ALIGN((unsigned long)&_eshared) - 1);
 }
 
 void free_initmem(void)
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 45c4daa..eb9df28 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -22,6 +22,7 @@
  * Started by Ingo Molnar <mingo@elte.hu>
  */
 
+#include <linux/elf-randomize.h>
 #include <linux/personality.h>
 #include <linux/mm.h>
 #include <linux/mman.h>
@@ -174,7 +175,7 @@
 	if (!(flags & MAP_FIXED))
 		addr = 0;
 	if ((addr + len) >= TASK_SIZE)
-		return crst_table_upgrade(current->mm, TASK_MAX_SIZE);
+		return crst_table_upgrade(current->mm);
 	return 0;
 }
 
@@ -191,7 +192,7 @@
 		return area;
 	if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
 		/* Upgrade the page table to 4 levels and retry. */
-		rc = crst_table_upgrade(mm, TASK_MAX_SIZE);
+		rc = crst_table_upgrade(mm);
 		if (rc)
 			return (unsigned long) rc;
 		area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
@@ -213,7 +214,7 @@
 		return area;
 	if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
 		/* Upgrade the page table to 4 levels and retry. */
-		rc = crst_table_upgrade(mm, TASK_MAX_SIZE);
+		rc = crst_table_upgrade(mm);
 		if (rc)
 			return (unsigned long) rc;
 		area = arch_get_unmapped_area_topdown(filp, addr, len,
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index f6c3de2..e8b5962 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -76,81 +76,52 @@
 	__tlb_flush_local();
 }
 
-int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
+int crst_table_upgrade(struct mm_struct *mm)
 {
 	unsigned long *table, *pgd;
-	unsigned long entry;
-	int flush;
 
-	BUG_ON(limit > TASK_MAX_SIZE);
-	flush = 0;
-repeat:
+	/* upgrade should only happen from 3 to 4 levels */
+	BUG_ON(mm->context.asce_limit != (1UL << 42));
+
 	table = crst_table_alloc(mm);
 	if (!table)
 		return -ENOMEM;
+
 	spin_lock_bh(&mm->page_table_lock);
-	if (mm->context.asce_limit < limit) {
-		pgd = (unsigned long *) mm->pgd;
-		if (mm->context.asce_limit <= (1UL << 31)) {
-			entry = _REGION3_ENTRY_EMPTY;
-			mm->context.asce_limit = 1UL << 42;
-			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
-						_ASCE_USER_BITS |
-						_ASCE_TYPE_REGION3;
-		} else {
-			entry = _REGION2_ENTRY_EMPTY;
-			mm->context.asce_limit = 1UL << 53;
-			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
-						_ASCE_USER_BITS |
-						_ASCE_TYPE_REGION2;
-		}
-		crst_table_init(table, entry);
-		pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
-		mm->pgd = (pgd_t *) table;
-		mm->task_size = mm->context.asce_limit;
-		table = NULL;
-		flush = 1;
-	}
+	pgd = (unsigned long *) mm->pgd;
+	crst_table_init(table, _REGION2_ENTRY_EMPTY);
+	pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
+	mm->pgd = (pgd_t *) table;
+	mm->context.asce_limit = 1UL << 53;
+	mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+			   _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
+	mm->task_size = mm->context.asce_limit;
 	spin_unlock_bh(&mm->page_table_lock);
-	if (table)
-		crst_table_free(mm, table);
-	if (mm->context.asce_limit < limit)
-		goto repeat;
-	if (flush)
-		on_each_cpu(__crst_table_upgrade, mm, 0);
+
+	on_each_cpu(__crst_table_upgrade, mm, 0);
 	return 0;
 }
 
-void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
+void crst_table_downgrade(struct mm_struct *mm)
 {
 	pgd_t *pgd;
 
+	/* downgrade should only happen from 3 to 2 levels (compat only) */
+	BUG_ON(mm->context.asce_limit != (1UL << 42));
+
 	if (current->active_mm == mm) {
 		clear_user_asce();
 		__tlb_flush_mm(mm);
 	}
-	while (mm->context.asce_limit > limit) {
-		pgd = mm->pgd;
-		switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
-		case _REGION_ENTRY_TYPE_R2:
-			mm->context.asce_limit = 1UL << 42;
-			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
-						_ASCE_USER_BITS |
-						_ASCE_TYPE_REGION3;
-			break;
-		case _REGION_ENTRY_TYPE_R3:
-			mm->context.asce_limit = 1UL << 31;
-			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
-						_ASCE_USER_BITS |
-						_ASCE_TYPE_SEGMENT;
-			break;
-		default:
-			BUG();
-		}
-		mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
-		mm->task_size = mm->context.asce_limit;
-		crst_table_free(mm, (unsigned long *) pgd);
-	}
+
+	pgd = mm->pgd;
+	mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
+	mm->context.asce_limit = 1UL << 31;
+	mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+			   _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
+	mm->task_size = mm->context.asce_limit;
+	crst_table_free(mm, (unsigned long *) pgd);
+
 	if (current->active_mm == mm)
 		set_user_asce(mm);
 }
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index d27fccba..d48cf25 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -56,7 +56,7 @@
 	return pmd;
 }
 
-static pte_t __ref *vmem_pte_alloc(unsigned long address)
+static pte_t __ref *vmem_pte_alloc(void)
 {
 	pte_t *pte;
 
@@ -121,7 +121,7 @@
 			continue;
 		}
 		if (pmd_none(*pm_dir)) {
-			pt_dir = vmem_pte_alloc(address);
+			pt_dir = vmem_pte_alloc();
 			if (!pt_dir)
 				goto out;
 			pmd_populate(&init_mm, pm_dir, pt_dir);
@@ -233,7 +233,7 @@
 				address = (address + PMD_SIZE) & PMD_MASK;
 				continue;
 			}
-			pt_dir = vmem_pte_alloc(address);
+			pt_dir = vmem_pte_alloc();
 			if (!pt_dir)
 				goto out;
 			pmd_populate(&init_mm, pm_dir, pt_dir);
@@ -370,7 +370,7 @@
 	ro_end = (unsigned long)&_eshared & PAGE_MASK;
 	for_each_memblock(memory, reg) {
 		start = reg->base;
-		end = reg->base + reg->size - 1;
+		end = reg->base + reg->size;
 		if (start >= ro_end || end <= ro_start)
 			vmem_add_mem(start, end - start, 0);
 		else if (start >= ro_start && end <= ro_end)
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 3c0bfc1..9133b0e 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -54,16 +54,17 @@
 #define SEEN_FUNC	16	/* calls C functions */
 #define SEEN_TAIL_CALL	32	/* code uses tail calls */
 #define SEEN_SKB_CHANGE	64	/* code changes skb data */
+#define SEEN_REG_AX	128	/* code uses constant blinding */
 #define SEEN_STACK	(SEEN_FUNC | SEEN_MEM | SEEN_SKB)
 
 /*
  * s390 registers
  */
-#define REG_W0		(__MAX_BPF_REG+0)	/* Work register 1 (even) */
-#define REG_W1		(__MAX_BPF_REG+1)	/* Work register 2 (odd) */
-#define REG_SKB_DATA	(__MAX_BPF_REG+2)	/* SKB data register */
-#define REG_L		(__MAX_BPF_REG+3)	/* Literal pool register */
-#define REG_15		(__MAX_BPF_REG+4)	/* Register 15 */
+#define REG_W0		(MAX_BPF_JIT_REG + 0)	/* Work register 1 (even) */
+#define REG_W1		(MAX_BPF_JIT_REG + 1)	/* Work register 2 (odd) */
+#define REG_SKB_DATA	(MAX_BPF_JIT_REG + 2)	/* SKB data register */
+#define REG_L		(MAX_BPF_JIT_REG + 3)	/* Literal pool register */
+#define REG_15		(MAX_BPF_JIT_REG + 4)	/* Register 15 */
 #define REG_0		REG_W0			/* Register 0 */
 #define REG_1		REG_W1			/* Register 1 */
 #define REG_2		BPF_REG_1		/* Register 2 */
@@ -88,6 +89,8 @@
 	[BPF_REG_9]	= 10,
 	/* BPF stack pointer */
 	[BPF_REG_FP]	= 13,
+	/* Register for blinding (shared with REG_SKB_DATA) */
+	[BPF_REG_AX]	= 12,
 	/* SKB data pointer */
 	[REG_SKB_DATA]	= 12,
 	/* Work registers for s390x backend */
@@ -385,7 +388,7 @@
 /*
  * For SKB access %b1 contains the SKB pointer. For "bpf_jit.S"
  * we store the SKB header length on the stack and the SKB data
- * pointer in REG_SKB_DATA.
+ * pointer in REG_SKB_DATA if BPF_REG_AX is not used.
  */
 static void emit_load_skb_data_hlen(struct bpf_jit *jit)
 {
@@ -397,9 +400,10 @@
 		   offsetof(struct sk_buff, data_len));
 	/* stg %w1,ST_OFF_HLEN(%r0,%r15) */
 	EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, STK_OFF_HLEN);
-	/* lg %skb_data,data_off(%b1) */
-	EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
-		      BPF_REG_1, offsetof(struct sk_buff, data));
+	if (!(jit->seen & SEEN_REG_AX))
+		/* lg %skb_data,data_off(%b1) */
+		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
+			      BPF_REG_1, offsetof(struct sk_buff, data));
 }
 
 /*
@@ -487,6 +491,8 @@
 	s32 imm = insn->imm;
 	s16 off = insn->off;
 
+	if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX)
+		jit->seen |= SEEN_REG_AX;
 	switch (insn->code) {
 	/*
 	 * BPF_MOV
@@ -1188,7 +1194,7 @@
 		/*
 		 * Implicit input:
 		 *  BPF_REG_6	 (R7) : skb pointer
-		 *  REG_SKB_DATA (R12): skb data pointer
+		 *  REG_SKB_DATA (R12): skb data pointer (if no BPF_REG_AX)
 		 *
 		 * Calculated input:
 		 *  BPF_REG_2	 (R3) : offset of byte(s) to fetch in skb
@@ -1209,6 +1215,11 @@
 			/* agfr %b2,%src (%src is s32 here) */
 			EMIT4(0xb9180000, BPF_REG_2, src_reg);
 
+		/* Reload REG_SKB_DATA if BPF_REG_AX is used */
+		if (jit->seen & SEEN_REG_AX)
+			/* lg %skb_data,data_off(%b6) */
+			EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
+				      BPF_REG_6, offsetof(struct sk_buff, data));
 		/* basr %b5,%w1 (%b5 is call saved) */
 		EMIT2(0x0d00, BPF_REG_5, REG_W1);
 
@@ -1262,37 +1273,62 @@
 /*
  * Compile eBPF program "fp"
  */
-void bpf_int_jit_compile(struct bpf_prog *fp)
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
 {
+	struct bpf_prog *tmp, *orig_fp = fp;
 	struct bpf_binary_header *header;
+	bool tmp_blinded = false;
 	struct bpf_jit jit;
 	int pass;
 
 	if (!bpf_jit_enable)
-		return;
+		return orig_fp;
+
+	tmp = bpf_jit_blind_constants(fp);
+	/*
+	 * If blinding was requested and we failed during blinding,
+	 * we must fall back to the interpreter.
+	 */
+	if (IS_ERR(tmp))
+		return orig_fp;
+	if (tmp != fp) {
+		tmp_blinded = true;
+		fp = tmp;
+	}
+
 	memset(&jit, 0, sizeof(jit));
 	jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
-	if (jit.addrs == NULL)
-		return;
+	if (jit.addrs == NULL) {
+		fp = orig_fp;
+		goto out;
+	}
 	/*
 	 * Three initial passes:
 	 *   - 1/2: Determine clobbered registers
 	 *   - 3:   Calculate program size and addrs arrray
 	 */
 	for (pass = 1; pass <= 3; pass++) {
-		if (bpf_jit_prog(&jit, fp))
+		if (bpf_jit_prog(&jit, fp)) {
+			fp = orig_fp;
 			goto free_addrs;
+		}
 	}
 	/*
 	 * Final pass: Allocate and generate program
 	 */
-	if (jit.size >= BPF_SIZE_MAX)
+	if (jit.size >= BPF_SIZE_MAX) {
+		fp = orig_fp;
 		goto free_addrs;
+	}
 	header = bpf_jit_binary_alloc(jit.size, &jit.prg_buf, 2, jit_fill_hole);
-	if (!header)
+	if (!header) {
+		fp = orig_fp;
 		goto free_addrs;
-	if (bpf_jit_prog(&jit, fp))
+	}
+	if (bpf_jit_prog(&jit, fp)) {
+		fp = orig_fp;
 		goto free_addrs;
+	}
 	if (bpf_jit_enable > 1) {
 		bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf);
 		if (jit.prg_buf)
@@ -1305,6 +1341,11 @@
 	}
 free_addrs:
 	kfree(jit.addrs);
+out:
+	if (tmp_blinded)
+		bpf_jit_prog_release_other(fp, fp == orig_fp ?
+					   tmp : orig_fp);
+	return fp;
 }
 
 /*
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 21591dd..1a4512c 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -176,8 +176,7 @@
 		rc = clp_store_query_pci_fn(zdev, &rrb->response);
 		if (rc)
 			goto out;
-		if (rrb->response.pfgid)
-			rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
+		rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
 	} else {
 		zpci_err("Q PCI FN:\n");
 		zpci_err_clp(rrb->response.hdr.rsp, rc);
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
index c555de3..38993b1 100644
--- a/arch/s390/pci/pci_debug.c
+++ b/arch/s390/pci/pci_debug.c
@@ -1,5 +1,5 @@
 /*
- *  Copyright IBM Corp. 2012
+ *  Copyright IBM Corp. 2012,2015
  *
  *  Author(s):
  *    Jan Glauber <jang@linux.vnet.ibm.com>
@@ -23,22 +23,45 @@
 debug_info_t *pci_debug_err_id;
 EXPORT_SYMBOL_GPL(pci_debug_err_id);
 
-static char *pci_perf_names[] = {
-	/* hardware counters */
+static char *pci_common_names[] = {
 	"Load operations",
 	"Store operations",
 	"Store block operations",
 	"Refresh operations",
+};
+
+static char *pci_fmt0_names[] = {
 	"DMA read bytes",
 	"DMA write bytes",
 };
 
+static char *pci_fmt1_names[] = {
+	"Received bytes",
+	"Received packets",
+	"Transmitted bytes",
+	"Transmitted packets",
+};
+
+static char *pci_fmt2_names[] = {
+	"Consumed work units",
+	"Maximum work units",
+};
+
 static char *pci_sw_names[] = {
 	"Allocated pages",
 	"Mapped pages",
 	"Unmapped pages",
 };
 
+static void pci_fmb_show(struct seq_file *m, char *name[], int length,
+			 u64 *data)
+{
+	int i;
+
+	for (i = 0; i < length; i++, data++)
+		seq_printf(m, "%26s:\t%llu\n", name[i], *data);
+}
+
 static void pci_sw_counter_show(struct seq_file *m)
 {
 	struct zpci_dev *zdev = m->private;
@@ -53,8 +76,6 @@
 static int pci_perf_show(struct seq_file *m, void *v)
 {
 	struct zpci_dev *zdev = m->private;
-	u64 *stat;
-	int i;
 
 	if (!zdev)
 		return 0;
@@ -72,15 +93,27 @@
 	seq_printf(m, "Samples: %u\n", zdev->fmb->samples);
 	seq_printf(m, "Last update TOD: %Lx\n", zdev->fmb->last_update);
 
-	/* hardware counters */
-	stat = (u64 *) &zdev->fmb->ld_ops;
-	for (i = 0; i < 4; i++)
-		seq_printf(m, "%26s:\t%llu\n",
-			   pci_perf_names[i], *(stat + i));
-	if (zdev->fmb->dma_valid)
-		for (i = 4; i < 6; i++)
-			seq_printf(m, "%26s:\t%llu\n",
-				   pci_perf_names[i], *(stat + i));
+	pci_fmb_show(m, pci_common_names, ARRAY_SIZE(pci_common_names),
+		     &zdev->fmb->ld_ops);
+
+	switch (zdev->fmb->format) {
+	case 0:
+		if (!(zdev->fmb->fmt_ind & ZPCI_FMB_DMA_COUNTER_VALID))
+			break;
+		pci_fmb_show(m, pci_fmt0_names, ARRAY_SIZE(pci_fmt0_names),
+			     &zdev->fmb->fmt0.dma_rbytes);
+		break;
+	case 1:
+		pci_fmb_show(m, pci_fmt1_names, ARRAY_SIZE(pci_fmt1_names),
+			     &zdev->fmb->fmt1.rx_bytes);
+		break;
+	case 2:
+		pci_fmb_show(m, pci_fmt2_names, ARRAY_SIZE(pci_fmt2_names),
+			     &zdev->fmb->fmt2.consumed_work_units);
+		break;
+	default:
+		seq_puts(m, "Unknown format\n");
+	}
 
 	pci_sw_counter_show(m);
 	mutex_unlock(&zdev->lock);
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index e595e89..1ea8c07 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -457,7 +457,7 @@
 	zdev->dma_table = dma_alloc_cpu_table();
 	if (!zdev->dma_table) {
 		rc = -ENOMEM;
-		goto out_clean;
+		goto out;
 	}
 
 	/*
@@ -477,18 +477,22 @@
 	zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
 	if (!zdev->iommu_bitmap) {
 		rc = -ENOMEM;
-		goto out_reg;
+		goto free_dma_table;
 	}
 
 	rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
 				(u64) zdev->dma_table);
 	if (rc)
-		goto out_reg;
-	return 0;
+		goto free_bitmap;
 
-out_reg:
+	return 0;
+free_bitmap:
+	vfree(zdev->iommu_bitmap);
+	zdev->iommu_bitmap = NULL;
+free_dma_table:
 	dma_free_cpu_table(zdev->dma_table);
-out_clean:
+	zdev->dma_table = NULL;
+out:
 	return rc;
 }
 
diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c
index f37a580..ed484dc 100644
--- a/arch/s390/pci/pci_sysfs.c
+++ b/arch/s390/pci/pci_sysfs.c
@@ -12,6 +12,8 @@
 #include <linux/stat.h>
 #include <linux/pci.h>
 
+#include <asm/sclp.h>
+
 #define zpci_attr(name, fmt, member)					\
 static ssize_t name##_show(struct device *dev,				\
 			   struct device_attribute *attr, char *buf)	\
@@ -77,8 +79,29 @@
 				       sizeof(zdev->util_str));
 }
 static BIN_ATTR_RO(util_string, CLP_UTIL_STR_LEN);
+
+static ssize_t report_error_write(struct file *filp, struct kobject *kobj,
+				  struct bin_attribute *attr, char *buf,
+				  loff_t off, size_t count)
+{
+	struct zpci_report_error_header *report = (void *) buf;
+	struct device *dev = kobj_to_dev(kobj);
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct zpci_dev *zdev = to_zpci(pdev);
+	int ret;
+
+	if (off || (count < sizeof(*report)))
+		return -EINVAL;
+
+	ret = sclp_pci_report(report, zdev->fh, zdev->fid);
+
+	return ret ? ret : count;
+}
+static BIN_ATTR(report_error, S_IWUSR, NULL, report_error_write, PAGE_SIZE);
+
 static struct bin_attribute *zpci_bin_attrs[] = {
 	&bin_attr_util_string,
+	&bin_attr_report_error,
 	NULL,
 };
 
diff --git a/arch/sh/boards/board-sh7757lcr.c b/arch/sh/boards/board-sh7757lcr.c
index 324599b..0104c81 100644
--- a/arch/sh/boards/board-sh7757lcr.c
+++ b/arch/sh/boards/board-sh7757lcr.c
@@ -20,7 +20,6 @@
 #include <linux/mfd/tmio.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/sh_mmcif.h>
-#include <linux/mmc/sh_mobile_sdhi.h>
 #include <linux/sh_eth.h>
 #include <linux/sh_intc.h>
 #include <linux/usb/renesas_usbhs.h>
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
index 62c3b81..de8393c 100644
--- a/arch/sh/boards/mach-ap325rxa/setup.c
+++ b/arch/sh/boards/mach-ap325rxa/setup.c
@@ -15,7 +15,6 @@
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/mmc/host.h>
-#include <linux/mmc/sh_mobile_sdhi.h>
 #include <linux/mtd/physmap.h>
 #include <linux/mtd/sh_flctl.h>
 #include <linux/mfd/tmio.h>
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index a9c0c07..6d61279 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -13,7 +13,6 @@
 #include <linux/platform_device.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/sh_mmcif.h>
-#include <linux/mmc/sh_mobile_sdhi.h>
 #include <linux/mtd/physmap.h>
 #include <linux/mfd/tmio.h>
 #include <linux/gpio.h>
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index 6bd9230..5deb2d8 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -11,7 +11,6 @@
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
 #include <linux/mmc/host.h>
-#include <linux/mmc/sh_mobile_sdhi.h>
 #include <linux/mfd/tmio.h>
 #include <linux/mtd/physmap.h>
 #include <linux/mtd/onenand.h>
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index 7a04da3..5de60a7 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -13,7 +13,6 @@
 #include <linux/input.h>
 #include <linux/input/sh_keysc.h>
 #include <linux/mmc/host.h>
-#include <linux/mmc/sh_mobile_sdhi.h>
 #include <linux/mtd/physmap.h>
 #include <linux/mfd/tmio.h>
 #include <linux/mtd/nand.h>
diff --git a/arch/sh/boards/mach-sdk7786/gpio.c b/arch/sh/boards/mach-sdk7786/gpio.c
index f71ce09..4799701 100644
--- a/arch/sh/boards/mach-sdk7786/gpio.c
+++ b/arch/sh/boards/mach-sdk7786/gpio.c
@@ -9,7 +9,7 @@
  */
 #include <linux/init.h>
 #include <linux/interrupt.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
 #include <linux/irq.h>
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
@@ -44,6 +44,6 @@
 
 static int __init usrgpir_gpio_setup(void)
 {
-	return gpiochip_add(&usrgpir_gpio_chip);
+	return gpiochip_add_data(&usrgpir_gpio_chip, NULL);
 }
 device_initcall(usrgpir_gpio_setup);
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index e0e1df1..f1fecd3 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -15,7 +15,6 @@
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/mmc/host.h>
-#include <linux/mmc/sh_mobile_sdhi.h>
 #include <linux/mfd/tmio.h>
 #include <linux/mtd/physmap.h>
 #include <linux/delay.h>
diff --git a/arch/sh/boards/mach-x3proto/gpio.c b/arch/sh/boards/mach-x3proto/gpio.c
index 1fb2cbe..cea88b0 100644
--- a/arch/sh/boards/mach-x3proto/gpio.c
+++ b/arch/sh/boards/mach-x3proto/gpio.c
@@ -13,7 +13,7 @@
 
 #include <linux/init.h>
 #include <linux/interrupt.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
 #include <linux/irq.h>
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
@@ -107,7 +107,7 @@
 	if (unlikely(ilsel < 0))
 		return ilsel;
 
-	ret = gpiochip_add(&x3proto_gpio_chip);
+	ret = gpiochip_add_data(&x3proto_gpio_chip, NULL);
 	if (unlikely(ret))
 		goto err_gpio;
 
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index a319745..751c337 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -26,6 +26,7 @@
 generic-y += poll.h
 generic-y += preempt.h
 generic-y += resource.h
+generic-y += rwsem.h
 generic-y += sembuf.h
 generic-y += serial.h
 generic-y += shmbuf.h
diff --git a/arch/sh/include/asm/rwsem.h b/arch/sh/include/asm/rwsem.h
deleted file mode 100644
index edab572..0000000
--- a/arch/sh/include/asm/rwsem.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * include/asm-sh/rwsem.h: R/W semaphores for SH using the stuff
- * in lib/rwsem.c.
- */
-
-#ifndef _ASM_SH_RWSEM_H
-#define _ASM_SH_RWSEM_H
-
-#ifndef _LINUX_RWSEM_H
-#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
-#endif
-
-#ifdef __KERNEL__
-
-#define RWSEM_UNLOCKED_VALUE		0x00000000
-#define RWSEM_ACTIVE_BIAS		0x00000001
-#define RWSEM_ACTIVE_MASK		0x0000ffff
-#define RWSEM_WAITING_BIAS		(-0x00010000)
-#define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-/*
- * lock for reading
- */
-static inline void __down_read(struct rw_semaphore *sem)
-{
-	if (atomic_inc_return((atomic_t *)(&sem->count)) > 0)
-		smp_wmb();
-	else
-		rwsem_down_read_failed(sem);
-}
-
-static inline int __down_read_trylock(struct rw_semaphore *sem)
-{
-	int tmp;
-
-	while ((tmp = sem->count) >= 0) {
-		if (tmp == cmpxchg(&sem->count, tmp,
-				   tmp + RWSEM_ACTIVE_READ_BIAS)) {
-			smp_wmb();
-			return 1;
-		}
-	}
-	return 0;
-}
-
-/*
- * lock for writing
- */
-static inline void __down_write(struct rw_semaphore *sem)
-{
-	int tmp;
-
-	tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
-				(atomic_t *)(&sem->count));
-	if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
-		smp_wmb();
-	else
-		rwsem_down_write_failed(sem);
-}
-
-static inline int __down_write_trylock(struct rw_semaphore *sem)
-{
-	int tmp;
-
-	tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
-		      RWSEM_ACTIVE_WRITE_BIAS);
-	smp_wmb();
-	return tmp == RWSEM_UNLOCKED_VALUE;
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
-	int tmp;
-
-	smp_wmb();
-	tmp = atomic_dec_return((atomic_t *)(&sem->count));
-	if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
-		rwsem_wake(sem);
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
-	smp_wmb();
-	if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
-			      (atomic_t *)(&sem->count)) < 0)
-		rwsem_wake(sem);
-}
-
-/*
- * implement atomic add functionality
- */
-static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
-{
-	atomic_add(delta, (atomic_t *)(&sem->count));
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
-	int tmp;
-
-	smp_wmb();
-	tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
-	if (tmp < 0)
-		rwsem_downgrade_wake(sem);
-}
-
-static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
-{
-	__down_write(sem);
-}
-
-/*
- * implement exchange and add functionality
- */
-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
-{
-	smp_mb();
-	return atomic_add_return(delta, (atomic_t *)(&sem->count));
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_SH_RWSEM_H */
diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h
index 1baf0ba..c9f8bbd 100644
--- a/arch/sh/include/asm/smp.h
+++ b/arch/sh/include/asm/smp.h
@@ -34,11 +34,6 @@
 DECLARE_PER_CPU(int, cpu_state);
 
 void smp_message_recv(unsigned int msg);
-void smp_timer_broadcast(const struct cpumask *mask);
-
-void local_timer_interrupt(void);
-void local_timer_setup(unsigned int cpu);
-void local_timer_stop(unsigned int cpu);
 
 void arch_send_call_function_single_ipi(int cpu);
 void arch_send_call_function_ipi_mask(const struct cpumask *mask);
diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h
index b0a282d..358e3f5 100644
--- a/arch/sh/include/asm/topology.h
+++ b/arch/sh/include/asm/topology.h
@@ -17,7 +17,7 @@
 
 #define mc_capable()    (1)
 
-const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
+const struct cpumask *cpu_coregroup_mask(int cpu);
 
 extern cpumask_t cpu_core_map[NR_CPUS];
 
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
index 4a29880..839612c 100644
--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
@@ -73,8 +73,6 @@
 {
 	int i;
 
-	local_timer_setup(0);
-
 	BUILD_BUG_ON(SMP_MSG_NR >= 8);
 
 	for (i = 0; i < SMP_MSG_NR; i++)
diff --git a/arch/sh/kernel/topology.c b/arch/sh/kernel/topology.c
index 772caff..c82912a 100644
--- a/arch/sh/kernel/topology.c
+++ b/arch/sh/kernel/topology.c
@@ -21,7 +21,7 @@
 cpumask_t cpu_core_map[NR_CPUS];
 EXPORT_SYMBOL(cpu_core_map);
 
-static cpumask_t cpu_coregroup_map(unsigned int cpu)
+static cpumask_t cpu_coregroup_map(int cpu)
 {
 	/*
 	 * Presently all SH-X3 SMP cores are multi-cores, so just keep it
@@ -30,7 +30,7 @@
 	return *cpu_possible_mask;
 }
 
-const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
+const struct cpumask *cpu_coregroup_mask(int cpu)
 {
 	return &cpu_core_map[cpu];
 }
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 57ffaf2..db0a26c 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -21,7 +21,6 @@
 	select HAVE_ARCH_KGDB if !SMP || SPARC64
 	select HAVE_ARCH_TRACEHOOK
 	select SYSCTL_EXCEPTION_TRACE
-	select ARCH_WANT_OPTIONAL_GPIOLIB
 	select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
 	select RTC_CLASS
 	select RTC_DRV_M48T59
@@ -32,7 +31,7 @@
 	select ARCH_WANT_IPC_PARSE_VERSION
 	select GENERIC_PCI_IOMAP
 	select HAVE_NMI_WATCHDOG if SPARC64
-	select HAVE_BPF_JIT
+	select HAVE_CBPF_JIT
 	select HAVE_DEBUG_BUGVERBOSE
 	select GENERIC_SMP_IDLE_THREAD
 	select GENERIC_CLOCKEVENTS
diff --git a/arch/sparc/configs/sparc32_defconfig b/arch/sparc/configs/sparc32_defconfig
index fb23fd6..c74d370 100644
--- a/arch/sparc/configs/sparc32_defconfig
+++ b/arch/sparc/configs/sparc32_defconfig
@@ -24,7 +24,6 @@
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
 # CONFIG_INET_LRO is not set
-CONFIG_IPV6_PRIVACY=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
index 04920ab..3583d67 100644
--- a/arch/sparc/configs/sparc64_defconfig
+++ b/arch/sparc/configs/sparc64_defconfig
@@ -48,7 +48,6 @@
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
-CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_IPV6_OPTIMISTIC_DAD=y
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index e9286188..6024c26 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -16,6 +16,7 @@
 generic-y += module.h
 generic-y += mutex.h
 generic-y += preempt.h
+generic-y += rwsem.h
 generic-y += serial.h
 generic-y += trace_clock.h
 generic-y += types.h
diff --git a/arch/sparc/include/asm/compat_signal.h b/arch/sparc/include/asm/compat_signal.h
index 9ed1f12..4b027b1 100644
--- a/arch/sparc/include/asm/compat_signal.h
+++ b/arch/sparc/include/asm/compat_signal.h
@@ -6,17 +6,17 @@
 
 #ifdef CONFIG_COMPAT
 struct __new_sigaction32 {
-	unsigned		sa_handler;
+	unsigned int		sa_handler;
 	unsigned int    	sa_flags;
-	unsigned		sa_restorer;     /* not used by Linux/SPARC yet */
+	unsigned int		sa_restorer;     /* not used by Linux/SPARC yet */
 	compat_sigset_t 	sa_mask;
 };
 
 struct __old_sigaction32 {
-	unsigned		sa_handler;
+	unsigned int		sa_handler;
 	compat_old_sigset_t  	sa_mask;
 	unsigned int    	sa_flags;
-	unsigned		sa_restorer;     /* not used by Linux/SPARC yet */
+	unsigned int		sa_restorer;     /* not used by Linux/SPARC yet */
 };
 #endif
 
diff --git a/arch/sparc/include/asm/obio.h b/arch/sparc/include/asm/obio.h
index 910c1d9..426ad75 100644
--- a/arch/sparc/include/asm/obio.h
+++ b/arch/sparc/include/asm/obio.h
@@ -117,9 +117,9 @@
 			      "i" (ASI_M_CTL));
 }
 
-static inline unsigned bw_get_prof_limit(int cpu)
+static inline unsigned int bw_get_prof_limit(int cpu)
 {
-	unsigned limit;
+	unsigned int limit;
 	
 	__asm__ __volatile__ ("lda [%1] %2, %0" :
 			      "=r" (limit) :
@@ -128,7 +128,7 @@
 	return limit;
 }
 
-static inline void bw_set_prof_limit(int cpu, unsigned limit)
+static inline void bw_set_prof_limit(int cpu, unsigned int limit)
 {
 	__asm__ __volatile__ ("sta %0, [%1] %2" : :
 			      "r" (limit),
@@ -136,9 +136,9 @@
 			      "i" (ASI_M_CTL));
 }
 
-static inline unsigned bw_get_ctrl(int cpu)
+static inline unsigned int bw_get_ctrl(int cpu)
 {
-	unsigned ctrl;
+	unsigned int ctrl;
 	
 	__asm__ __volatile__ ("lda [%1] %2, %0" :
 			      "=r" (ctrl) :
@@ -147,7 +147,7 @@
 	return ctrl;
 }
 
-static inline void bw_set_ctrl(int cpu, unsigned ctrl)
+static inline void bw_set_ctrl(int cpu, unsigned int ctrl)
 {
 	__asm__ __volatile__ ("sta %0, [%1] %2" : :
 			      "r" (ctrl),
@@ -155,9 +155,9 @@
 			      "i" (ASI_M_CTL));
 }
 
-static inline unsigned cc_get_ipen(void)
+static inline unsigned int cc_get_ipen(void)
 {
-	unsigned pending;
+	unsigned int pending;
 	
 	__asm__ __volatile__ ("lduha [%1] %2, %0" :
 			      "=r" (pending) :
@@ -166,7 +166,7 @@
 	return pending;
 }
 
-static inline void cc_set_iclr(unsigned clear)
+static inline void cc_set_iclr(unsigned int clear)
 {
 	__asm__ __volatile__ ("stha %0, [%1] %2" : :
 			      "r" (clear),
@@ -174,9 +174,9 @@
 			      "i" (ASI_M_MXCC));
 }
 
-static inline unsigned cc_get_imsk(void)
+static inline unsigned int cc_get_imsk(void)
 {
-	unsigned mask;
+	unsigned int mask;
 	
 	__asm__ __volatile__ ("lduha [%1] %2, %0" :
 			      "=r" (mask) :
@@ -185,7 +185,7 @@
 	return mask;
 }
 
-static inline void cc_set_imsk(unsigned mask)
+static inline void cc_set_imsk(unsigned int mask)
 {
 	__asm__ __volatile__ ("stha %0, [%1] %2" : :
 			      "r" (mask),
@@ -193,9 +193,9 @@
 			      "i" (ASI_M_MXCC));
 }
 
-static inline unsigned cc_get_imsk_other(int cpuid)
+static inline unsigned int cc_get_imsk_other(int cpuid)
 {
-	unsigned mask;
+	unsigned int mask;
 	
 	__asm__ __volatile__ ("lduha [%1] %2, %0" :
 			      "=r" (mask) :
@@ -204,7 +204,7 @@
 	return mask;
 }
 
-static inline void cc_set_imsk_other(int cpuid, unsigned mask)
+static inline void cc_set_imsk_other(int cpuid, unsigned int mask)
 {
 	__asm__ __volatile__ ("stha %0, [%1] %2" : :
 			      "r" (mask),
@@ -212,7 +212,7 @@
 			      "i" (ASI_M_CTL));
 }
 
-static inline void cc_set_igen(unsigned gen)
+static inline void cc_set_igen(unsigned int gen)
 {
 	__asm__ __volatile__ ("sta %0, [%1] %2" : :
 			      "r" (gen),
diff --git a/arch/sparc/include/asm/openprom.h b/arch/sparc/include/asm/openprom.h
index 47eaafa..63374c4 100644
--- a/arch/sparc/include/asm/openprom.h
+++ b/arch/sparc/include/asm/openprom.h
@@ -29,12 +29,12 @@
 /* V2 and later prom device operations. */
 struct linux_dev_v2_funcs {
 	phandle (*v2_inst2pkg)(int d);	/* Convert ihandle to phandle */
-	char * (*v2_dumb_mem_alloc)(char *va, unsigned sz);
-	void (*v2_dumb_mem_free)(char *va, unsigned sz);
+	char * (*v2_dumb_mem_alloc)(char *va, unsigned int sz);
+	void (*v2_dumb_mem_free)(char *va, unsigned int sz);
 
 	/* To map devices into virtual I/O space. */
-	char * (*v2_dumb_mmap)(char *virta, int which_io, unsigned paddr, unsigned sz);
-	void (*v2_dumb_munmap)(char *virta, unsigned size);
+	char * (*v2_dumb_mmap)(char *virta, int which_io, unsigned int paddr, unsigned int sz);
+	void (*v2_dumb_munmap)(char *virta, unsigned int size);
 
 	int (*v2_dev_open)(char *devpath);
 	void (*v2_dev_close)(int d);
@@ -50,7 +50,7 @@
 struct linux_mlist_v0 {
 	struct linux_mlist_v0 *theres_more;
 	unsigned int start_adr;
-	unsigned num_bytes;
+	unsigned int num_bytes;
 };
 
 struct linux_mem_v0 {
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 7a38d6a..f089cfa 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -218,7 +218,7 @@
 extern pgprot_t PAGE_COPY;
 extern pgprot_t PAGE_SHARED;
 
-/* XXX This uglyness is for the atyfb driver's sparc mmap() support. XXX */
+/* XXX This ugliness is for the atyfb driver's sparc mmap() support. XXX */
 extern unsigned long _PAGE_IE;
 extern unsigned long _PAGE_E;
 extern unsigned long _PAGE_CACHE;
diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h
index 6924bde..ce2595c 100644
--- a/arch/sparc/include/asm/processor_64.h
+++ b/arch/sparc/include/asm/processor_64.h
@@ -201,7 +201,7 @@
 #define KSTK_ESP(tsk)  (task_pt_regs(tsk)->u_regs[UREG_FP])
 
 /* Please see the commentary in asm/backoff.h for a description of
- * what these instructions are doing and how they have been choosen.
+ * what these instructions are doing and how they have been chosen.
  * To make a long story short, we are trying to yield the current cpu
  * strand during busy loops.
  */
diff --git a/arch/sparc/include/asm/rwsem.h b/arch/sparc/include/asm/rwsem.h
deleted file mode 100644
index 069bf4d..0000000
--- a/arch/sparc/include/asm/rwsem.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * rwsem.h: R/W semaphores implemented using CAS
- *
- * Written by David S. Miller (davem@redhat.com), 2001.
- * Derived from asm-i386/rwsem.h
- */
-#ifndef _SPARC64_RWSEM_H
-#define _SPARC64_RWSEM_H
-
-#ifndef _LINUX_RWSEM_H
-#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
-#endif
-
-#ifdef __KERNEL__
-
-#define RWSEM_UNLOCKED_VALUE		0x00000000L
-#define RWSEM_ACTIVE_BIAS		0x00000001L
-#define RWSEM_ACTIVE_MASK		0xffffffffL
-#define RWSEM_WAITING_BIAS		(-RWSEM_ACTIVE_MASK-1)
-#define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-/*
- * lock for reading
- */
-static inline void __down_read(struct rw_semaphore *sem)
-{
-	if (unlikely(atomic64_inc_return((atomic64_t *)(&sem->count)) <= 0L))
-		rwsem_down_read_failed(sem);
-}
-
-static inline int __down_read_trylock(struct rw_semaphore *sem)
-{
-	long tmp;
-
-	while ((tmp = sem->count) >= 0L) {
-		if (tmp == cmpxchg(&sem->count, tmp,
-				   tmp + RWSEM_ACTIVE_READ_BIAS)) {
-			return 1;
-		}
-	}
-	return 0;
-}
-
-/*
- * lock for writing
- */
-static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
-{
-	long tmp;
-
-	tmp = atomic64_add_return(RWSEM_ACTIVE_WRITE_BIAS,
-				  (atomic64_t *)(&sem->count));
-	if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
-		rwsem_down_write_failed(sem);
-}
-
-static inline void __down_write(struct rw_semaphore *sem)
-{
-	__down_write_nested(sem, 0);
-}
-
-static inline int __down_write_trylock(struct rw_semaphore *sem)
-{
-	long tmp;
-
-	tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
-		      RWSEM_ACTIVE_WRITE_BIAS);
-	return tmp == RWSEM_UNLOCKED_VALUE;
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
-	long tmp;
-
-	tmp = atomic64_dec_return((atomic64_t *)(&sem->count));
-	if (unlikely(tmp < -1L && (tmp & RWSEM_ACTIVE_MASK) == 0L))
-		rwsem_wake(sem);
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
-	if (unlikely(atomic64_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
-					 (atomic64_t *)(&sem->count)) < 0L))
-		rwsem_wake(sem);
-}
-
-/*
- * implement atomic add functionality
- */
-static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
-{
-	atomic64_add(delta, (atomic64_t *)(&sem->count));
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
-	long tmp;
-
-	tmp = atomic64_add_return(-RWSEM_WAITING_BIAS, (atomic64_t *)(&sem->count));
-	if (tmp < 0L)
-		rwsem_downgrade_wake(sem);
-}
-
-/*
- * implement exchange and add functionality
- */
-static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
-{
-	return atomic64_add_return(delta, (atomic64_t *)(&sem->count));
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* _SPARC64_RWSEM_H */
diff --git a/arch/sparc/include/asm/sigcontext.h b/arch/sparc/include/asm/sigcontext.h
index fc2df1e..f4eb630 100644
--- a/arch/sparc/include/asm/sigcontext.h
+++ b/arch/sparc/include/asm/sigcontext.h
@@ -25,7 +25,7 @@
 	int sigc_oswins;       /* outstanding windows */
 
 	/* stack ptrs for each regwin buf */
-	unsigned sigc_spbuf[__SUNOS_MAXWIN];
+	unsigned int sigc_spbuf[__SUNOS_MAXWIN];
 
 	/* Windows to restore after signal */
 	struct reg_window32 sigc_wbuf[__SUNOS_MAXWIN];
diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h
index 56f9338..1d8321c 100644
--- a/arch/sparc/include/asm/spitfire.h
+++ b/arch/sparc/include/asm/spitfire.h
@@ -48,6 +48,7 @@
 #define SUN4V_CHIP_SPARC_M6	0x06
 #define SUN4V_CHIP_SPARC_M7	0x07
 #define SUN4V_CHIP_SPARC64X	0x8a
+#define SUN4V_CHIP_SPARC_SN	0x8b
 #define SUN4V_CHIP_UNKNOWN	0xff
 
 #ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h
index ecb49cf..c6a155c 100644
--- a/arch/sparc/include/asm/tsb.h
+++ b/arch/sparc/include/asm/tsb.h
@@ -149,7 +149,7 @@
 	 * page size in question.  So for PMD mappings (which fall on
 	 * bit 23, for 8MB per PMD) we must propagate bit 22 for a
 	 * 4MB huge page.  For huge PUDs (which fall on bit 33, for
-	 * 8GB per PUD), we have to accomodate 256MB and 2GB huge
+	 * 8GB per PUD), we have to accommodate 256MB and 2GB huge
 	 * pages.  So for those we propagate bits 32 to 28.
 	 */
 #define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL)	\
diff --git a/arch/sparc/include/uapi/asm/stat.h b/arch/sparc/include/uapi/asm/stat.h
index a232e9e..2f0583a 100644
--- a/arch/sparc/include/uapi/asm/stat.h
+++ b/arch/sparc/include/uapi/asm/stat.h
@@ -6,13 +6,13 @@
 #if defined(__sparc__) && defined(__arch64__)
 /* 64 bit sparc */
 struct stat {
-	unsigned   st_dev;
+	unsigned int st_dev;
 	ino_t   st_ino;
 	mode_t  st_mode;
 	short   st_nlink;
 	uid_t   st_uid;
 	gid_t   st_gid;
-	unsigned   st_rdev;
+	unsigned int st_rdev;
 	off_t   st_size;
 	time_t  st_atime;
 	time_t  st_mtime;
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h
index b6de8b1..36eee81 100644
--- a/arch/sparc/include/uapi/asm/unistd.h
+++ b/arch/sparc/include/uapi/asm/unistd.h
@@ -423,8 +423,10 @@
 #define __NR_setsockopt		355
 #define __NR_mlock2		356
 #define __NR_copy_file_range	357
+#define __NR_preadv2		358
+#define __NR_pwritev2		359
 
-#define NR_syscalls		358
+#define NR_syscalls		360
 
 /* Bitmask values returned from kern_features system call.  */
 #define KERN_FEATURE_MIXED_MODE_STACK	0x00000001
diff --git a/arch/sparc/kernel/audit.c b/arch/sparc/kernel/audit.c
index 24361b4..2585c1e 100644
--- a/arch/sparc/kernel/audit.c
+++ b/arch/sparc/kernel/audit.c
@@ -5,27 +5,27 @@
 
 #include "kernel.h"
 
-static unsigned dir_class[] = {
+static unsigned int dir_class[] = {
 #include <asm-generic/audit_dir_write.h>
 ~0U
 };
 
-static unsigned read_class[] = {
+static unsigned int read_class[] = {
 #include <asm-generic/audit_read.h>
 ~0U
 };
 
-static unsigned write_class[] = {
+static unsigned int write_class[] = {
 #include <asm-generic/audit_write.h>
 ~0U
 };
 
-static unsigned chattr_class[] = {
+static unsigned int chattr_class[] = {
 #include <asm-generic/audit_change_attr.h>
 ~0U
 };
 
-static unsigned signal_class[] = {
+static unsigned int signal_class[] = {
 #include <asm-generic/audit_signal.h>
 ~0U
 };
@@ -39,7 +39,7 @@
 	return 0;
 }
 
-int audit_classify_syscall(int abi, unsigned syscall)
+int audit_classify_syscall(int abi, unsigned int syscall)
 {
 #ifdef CONFIG_COMPAT
 	if (abi == AUDIT_ARCH_SPARC)
diff --git a/arch/sparc/kernel/cherrs.S b/arch/sparc/kernel/cherrs.S
index 4ee1ad4..655628de 100644
--- a/arch/sparc/kernel/cherrs.S
+++ b/arch/sparc/kernel/cherrs.S
@@ -214,8 +214,7 @@
 	subcc		%g1, %g2, %g1		! Next cacheline
 	bge,pt		%icc, 1b
 	 nop
-	ba,pt		%xcc, dcpe_icpe_tl1_common
-	 nop
+	ba,a,pt		%xcc, dcpe_icpe_tl1_common
 
 do_dcpe_tl1_fatal:
 	sethi		%hi(1f), %g7
@@ -224,8 +223,7 @@
 	mov		0x2, %o0
 	call		cheetah_plus_parity_error
 	 add		%sp, PTREGS_OFF, %o1
-	ba,pt		%xcc, rtrap
-	 nop
+	ba,a,pt		%xcc, rtrap
 	.size		do_dcpe_tl1,.-do_dcpe_tl1
 
 	.globl		do_icpe_tl1
@@ -259,8 +257,7 @@
 	subcc		%g1, %g2, %g1
 	bge,pt		%icc, 1b
 	 nop
-	ba,pt		%xcc, dcpe_icpe_tl1_common
-	 nop
+	ba,a,pt		%xcc, dcpe_icpe_tl1_common
 
 do_icpe_tl1_fatal:
 	sethi		%hi(1f), %g7
@@ -269,8 +266,7 @@
 	mov		0x3, %o0
 	call		cheetah_plus_parity_error
 	 add		%sp, PTREGS_OFF, %o1
-	ba,pt		%xcc, rtrap
-	 nop
+	ba,a,pt		%xcc, rtrap
 	.size		do_icpe_tl1,.-do_icpe_tl1
 	
 	.type		dcpe_icpe_tl1_common,#function
@@ -456,7 +452,7 @@
 	 cmp		%g2, 0x63
 	be		c_cee
 	 nop
-	ba,pt		%xcc, c_deferred
+	ba,a,pt		%xcc, c_deferred
 	.size		__cheetah_log_error,.-__cheetah_log_error
 
 	/* Cheetah FECC trap handling, we get here from tl{0,1}_fecc
diff --git a/arch/sparc/kernel/compat_audit.c b/arch/sparc/kernel/compat_audit.c
index 7062263..e5611cd 100644
--- a/arch/sparc/kernel/compat_audit.c
+++ b/arch/sparc/kernel/compat_audit.c
@@ -2,32 +2,32 @@
 #include <asm/unistd.h>
 #include "kernel.h"
 
-unsigned sparc32_dir_class[] = {
+unsigned int sparc32_dir_class[] = {
 #include <asm-generic/audit_dir_write.h>
 ~0U
 };
 
-unsigned sparc32_chattr_class[] = {
+unsigned int sparc32_chattr_class[] = {
 #include <asm-generic/audit_change_attr.h>
 ~0U
 };
 
-unsigned sparc32_write_class[] = {
+unsigned int sparc32_write_class[] = {
 #include <asm-generic/audit_write.h>
 ~0U
 };
 
-unsigned sparc32_read_class[] = {
+unsigned int sparc32_read_class[] = {
 #include <asm-generic/audit_read.h>
 ~0U
 };
 
-unsigned sparc32_signal_class[] = {
+unsigned int sparc32_signal_class[] = {
 #include <asm-generic/audit_signal.h>
 ~0U
 };
 
-int sparc32_classify_syscall(unsigned syscall)
+int sparc32_classify_syscall(unsigned int syscall)
 {
 	switch(syscall) {
 	case __NR_open:
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index dfad8b1..493e023 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -506,6 +506,12 @@
 		sparc_pmu_type = "sparc-m7";
 		break;
 
+	case SUN4V_CHIP_SPARC_SN:
+		sparc_cpu_type = "SPARC-SN";
+		sparc_fpu_type = "SPARC-SN integrated FPU";
+		sparc_pmu_type = "sparc-sn";
+		break;
+
 	case SUN4V_CHIP_SPARC64X:
 		sparc_cpu_type = "SPARC64-X";
 		sparc_fpu_type = "SPARC64-X integrated FPU";
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
index e69ec0e..45c820e 100644
--- a/arch/sparc/kernel/cpumap.c
+++ b/arch/sparc/kernel/cpumap.c
@@ -328,6 +328,7 @@
 	case SUN4V_CHIP_NIAGARA5:
 	case SUN4V_CHIP_SPARC_M6:
 	case SUN4V_CHIP_SPARC_M7:
+	case SUN4V_CHIP_SPARC_SN:
 	case SUN4V_CHIP_SPARC64X:
 		rover_inc_table = niagara_iterate_method;
 		break;
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index a83707c..51aa6e8 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -1255,7 +1255,7 @@
 kuw_patch1_7win:	sll	%o3, 6, %o3
 
 	/* No matter how much overhead this routine has in the worst
-	 * case scenerio, it is several times better than taking the
+	 * case scenario, it is several times better than taking the
 	 * traps with the old method of just doing flush_user_windows().
 	 */
 kill_user_windows:
diff --git a/arch/sparc/kernel/fpu_traps.S b/arch/sparc/kernel/fpu_traps.S
index a686482..336d275 100644
--- a/arch/sparc/kernel/fpu_traps.S
+++ b/arch/sparc/kernel/fpu_traps.S
@@ -100,8 +100,8 @@
 	fmuld		%f0, %f2, %f26
 	faddd		%f0, %f2, %f28
 	fmuld		%f0, %f2, %f30
-	b,pt		%xcc, fpdis_exit
-	 nop
+	ba,a,pt		%xcc, fpdis_exit
+
 2:	andcc		%g5, FPRS_DU, %g0
 	bne,pt		%icc, 3f
 	 fzero		%f32
@@ -144,8 +144,8 @@
 	fmuld		%f32, %f34, %f58
 	faddd		%f32, %f34, %f60
 	fmuld		%f32, %f34, %f62
-	ba,pt		%xcc, fpdis_exit
-	 nop
+	ba,a,pt		%xcc, fpdis_exit
+
 3:	mov		SECONDARY_CONTEXT, %g3
 	add		%g6, TI_FPREGS, %g1
 
@@ -197,8 +197,7 @@
 fp_other_bounce:
 	call		do_fpother
 	 add		%sp, PTREGS_OFF, %o0
-	ba,pt		%xcc, rtrap
-	 nop
+	ba,a,pt		%xcc, rtrap
 	.size		fp_other_bounce,.-fp_other_bounce
 
 	.align		32
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index cd1f592..a076b42 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -414,6 +414,8 @@
 	cmp	%g2, 'T'
 	be,pt	%xcc, 70f
 	 cmp	%g2, 'M'
+	be,pt	%xcc, 70f
+	 cmp	%g2, 'S'
 	bne,pn	%xcc, 49f
 	 nop
 
@@ -433,6 +435,9 @@
 	cmp	%g2, '7'
 	be,pt	%xcc, 5f
 	 mov	SUN4V_CHIP_SPARC_M7, %g4
+	cmp	%g2, 'N'
+	be,pt	%xcc, 5f
+	 mov	SUN4V_CHIP_SPARC_SN, %g4
 	ba,pt	%xcc, 49f
 	 nop
 
@@ -461,9 +466,8 @@
 	subcc	%g3, 1, %g3
 	bne,pt	%xcc, 41b
 	add	%g1, 1, %g1
-	mov	SUN4V_CHIP_SPARC64X, %g4
 	ba,pt	%xcc, 5f
-	nop
+	 mov	SUN4V_CHIP_SPARC64X, %g4
 
 49:
 	mov	SUN4V_CHIP_UNKNOWN, %g4
@@ -548,8 +552,7 @@
 	stxa		%g0, [%g7] ASI_DMMU
 	membar	#Sync
 
-	ba,pt		%xcc, sun4u_continue
-	 nop
+	ba,a,pt		%xcc, sun4u_continue
 
 sun4v_init:
 	/* Set ctx 0 */
@@ -560,14 +563,12 @@
 	mov		SECONDARY_CONTEXT, %g7
 	stxa		%g0, [%g7] ASI_MMU
 	membar		#Sync
-	ba,pt		%xcc, niagara_tlb_fixup
-	 nop
+	ba,a,pt		%xcc, niagara_tlb_fixup
 
 sun4u_continue:
 	BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup)
 
-	ba,pt	%xcc, spitfire_tlb_fixup
-	 nop
+	ba,a,pt	%xcc, spitfire_tlb_fixup
 
 niagara_tlb_fixup:
 	mov	3, %g2		/* Set TLB type to hypervisor. */
@@ -597,6 +598,9 @@
 	cmp	%g1, SUN4V_CHIP_SPARC_M7
 	be,pt	%xcc, niagara4_patch
 	 nop
+	cmp	%g1, SUN4V_CHIP_SPARC_SN
+	be,pt	%xcc, niagara4_patch
+	 nop
 
 	call	generic_patch_copyops
 	 nop
@@ -639,8 +643,7 @@
 	call	hypervisor_patch_cachetlbops
 	 nop
 
-	ba,pt	%xcc, tlb_fixup_done
-	 nop
+	ba,a,pt	%xcc, tlb_fixup_done
 
 cheetah_tlb_fixup:
 	mov	2, %g2		/* Set TLB type to cheetah+. */
@@ -659,8 +662,7 @@
 	call	cheetah_patch_cachetlbops
 	 nop
 
-	ba,pt	%xcc, tlb_fixup_done
-	 nop
+	ba,a,pt	%xcc, tlb_fixup_done
 
 spitfire_tlb_fixup:
 	/* Set TLB type to spitfire. */
@@ -774,8 +776,7 @@
 	call	%o1
 	 add	%sp, (2047 + 128), %o0
 
-	ba,pt	%xcc, 2f
-	 nop
+	ba,a,pt	%xcc, 2f
 
 1:	sethi	%hi(sparc64_ttable_tl0), %o0
 	set	prom_set_trap_table_name, %g2
@@ -814,8 +815,7 @@
 
 	BRANCH_IF_ANY_CHEETAH(o2, o3, 1f)
 
-	ba,pt	%xcc, 2f
-	 nop
+	ba,a,pt	%xcc, 2f
 
 	/* Disable STICK_INT interrupts. */
 1:
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 28fed53..ffd5ff4 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -131,7 +131,7 @@
 EXPORT_SYMBOL(ioremap);
 
 /*
- * Comlimentary to ioremap().
+ * Complementary to ioremap().
  */
 void iounmap(volatile void __iomem *virtual)
 {
@@ -233,7 +233,7 @@
 }
 
 /*
- * Comlimentary to _sparc_ioremap().
+ * Complementary to _sparc_ioremap().
  */
 static void _sparc_free_io(struct resource *res)
 {
@@ -532,7 +532,7 @@
 }
 
 /* Map a set of buffers described by scatterlist in streaming
- * mode for DMA.  This is the scather-gather version of the
+ * mode for DMA.  This is the scatter-gather version of the
  * above pci_map_single interface.  Here the scatter gather list
  * elements are each tagged with the appropriate dma address
  * and length.  They are obtained via sg_dma_{address,length}(SG).
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index e7f652b..5057ec2 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -54,12 +54,12 @@
 asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp);
 
 /* compat_audit.c */
-extern unsigned sparc32_dir_class[];
-extern unsigned sparc32_chattr_class[];
-extern unsigned sparc32_write_class[];
-extern unsigned sparc32_read_class[];
-extern unsigned sparc32_signal_class[];
-int sparc32_classify_syscall(unsigned syscall);
+extern unsigned int sparc32_dir_class[];
+extern unsigned int sparc32_chattr_class[];
+extern unsigned int sparc32_write_class[];
+extern unsigned int sparc32_read_class[];
+extern unsigned int sparc32_signal_class[];
+int sparc32_classify_syscall(unsigned int syscall);
 #endif
 
 #ifdef CONFIG_SPARC32
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index 42efcf8..33cd171 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -203,7 +203,7 @@
 
 /*
  * Build a LEON IRQ for the edge triggered LEON IRQ controller:
- *  Edge (normal) IRQ           - handle_simple_irq, ack=DONT-CARE, never ack
+ *  Edge (normal) IRQ           - handle_simple_irq, ack=DON'T-CARE, never ack
  *  Level IRQ (PCI|Level-GPIO)  - handle_fasteoi_irq, ack=1, ack after ISR
  *  Per-CPU Edge                - handle_percpu_irq, ack=0
  */
diff --git a/arch/sparc/kernel/misctrap.S b/arch/sparc/kernel/misctrap.S
index 753b4f0..34b4933 100644
--- a/arch/sparc/kernel/misctrap.S
+++ b/arch/sparc/kernel/misctrap.S
@@ -18,8 +18,7 @@
 109:	or		%g7, %lo(109b), %g7
 	call		do_privact
 	 add		%sp, PTREGS_OFF, %o0
-	ba,pt		%xcc, rtrap
-	 nop
+	ba,a,pt		%xcc, rtrap
 	.size		__do_privact,.-__do_privact
 
 	.type		do_mna,#function
@@ -46,8 +45,7 @@
 	mov		%l5, %o2
 	call		mem_address_unaligned
 	 add		%sp, PTREGS_OFF, %o0
-	ba,pt		%xcc, rtrap
-	 nop
+	ba,a,pt		%xcc, rtrap
 	.size		do_mna,.-do_mna
 
 	.type		do_lddfmna,#function
@@ -65,8 +63,7 @@
 	mov		%l5, %o2
 	call		handle_lddfmna
 	 add		%sp, PTREGS_OFF, %o0
-	ba,pt		%xcc, rtrap
-	 nop
+	ba,a,pt		%xcc, rtrap
 	.size		do_lddfmna,.-do_lddfmna
 
 	.type		do_stdfmna,#function
@@ -84,8 +81,7 @@
 	mov		%l5, %o2
 	call		handle_stdfmna
 	 add		%sp, PTREGS_OFF, %o0
-	ba,pt		%xcc, rtrap
-	 nop
+	ba,a,pt		%xcc, rtrap
 	.size		do_stdfmna,.-do_stdfmna
 
 	.type		breakpoint_trap,#function
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index badf095..c2b202d 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -245,6 +245,18 @@
 	}
 }
 
+static void pci_init_dev_archdata(struct dev_archdata *sd, void *iommu,
+				  void *stc, void *host_controller,
+				  struct platform_device  *op,
+				  int numa_node)
+{
+	sd->iommu = iommu;
+	sd->stc = stc;
+	sd->host_controller = host_controller;
+	sd->op = op;
+	sd->numa_node = numa_node;
+}
+
 static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
 					 struct device_node *node,
 					 struct pci_bus *bus, int devfn)
@@ -259,13 +271,10 @@
 	if (!dev)
 		return NULL;
 
+	op = of_find_device_by_node(node);
 	sd = &dev->dev.archdata;
-	sd->iommu = pbm->iommu;
-	sd->stc = &pbm->stc;
-	sd->host_controller = pbm;
-	sd->op = op = of_find_device_by_node(node);
-	sd->numa_node = pbm->numa_node;
-
+	pci_init_dev_archdata(sd, pbm->iommu, &pbm->stc, pbm, op,
+			      pbm->numa_node);
 	sd = &op->dev.archdata;
 	sd->iommu = pbm->iommu;
 	sd->stc = &pbm->stc;
@@ -994,6 +1003,27 @@
 	/* No special bus mastering setup handling */
 }
 
+#ifdef CONFIG_PCI_IOV
+int pcibios_add_device(struct pci_dev *dev)
+{
+	struct pci_dev *pdev;
+
+	/* Add sriov arch specific initialization here.
+	 * Copy dev_archdata from PF to VF
+	 */
+	if (dev->is_virtfn) {
+		struct dev_archdata *psd;
+
+		pdev = dev->physfn;
+		psd = &pdev->dev.archdata;
+		pci_init_dev_archdata(&dev->dev.archdata, psd->iommu,
+				      psd->stc, psd->host_controller, NULL,
+				      psd->numa_node);
+	}
+	return 0;
+}
+#endif /* CONFIG_PCI_IOV */
+
 static int __init pcibios_init(void)
 {
 	pci_dfl_cache_line_size = 64 >> 2;
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 6596f66..a4b8b5a 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1756,7 +1756,7 @@
 			}
 		}
 #endif
-	} while (entry->nr < PERF_MAX_STACK_DEPTH);
+	} while (entry->nr < sysctl_perf_event_max_stack);
 }
 
 static inline int
@@ -1790,7 +1790,7 @@
 		pc = sf.callers_pc;
 		ufp = (unsigned long)sf.fp + STACK_BIAS;
 		perf_callchain_store(entry, pc);
-	} while (entry->nr < PERF_MAX_STACK_DEPTH);
+	} while (entry->nr < sysctl_perf_event_max_stack);
 }
 
 static void perf_callchain_user_32(struct perf_callchain_entry *entry,
@@ -1822,7 +1822,7 @@
 			ufp = (unsigned long)sf.fp;
 		}
 		perf_callchain_store(entry, pc);
-	} while (entry->nr < PERF_MAX_STACK_DEPTH);
+	} while (entry->nr < sysctl_perf_event_max_stack);
 }
 
 void
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 46a5964..c16ef1a 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -103,7 +103,7 @@
 	mm_segment_t old_fs;
 	
 	__asm__ __volatile__ ("flushw");
-	rw = compat_ptr((unsigned)regs->u_regs[14]);
+	rw = compat_ptr((unsigned int)regs->u_regs[14]);
 	old_fs = get_fs();
 	set_fs (USER_DS);
 	if (copy_from_user (&r_w, rw, sizeof(r_w))) {
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index baef495..69d75ff 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -109,7 +109,7 @@
 unsigned char boot_cpu_id = 0xff; /* 0xff will make it into DATA section... */
 
 static void
-prom_console_write(struct console *con, const char *s, unsigned n)
+prom_console_write(struct console *con, const char *s, unsigned int n)
 {
 	prom_write(s, n);
 }
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index f3185e2..599f120 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -77,7 +77,7 @@
 };
 
 static void
-prom_console_write(struct console *con, const char *s, unsigned n)
+prom_console_write(struct console *con, const char *s, unsigned int n)
 {
 	prom_write(s, n);
 }
@@ -285,7 +285,8 @@
 
 	sun4v_patch_2insn_range(&__sun4v_2insn_patch,
 				&__sun4v_2insn_patch_end);
-	if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7)
+	if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+	    sun4v_chip_type == SUN4V_CHIP_SPARC_SN)
 		sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
 					 &__sun_m7_2insn_patch_end);
 
@@ -524,6 +525,7 @@
 		    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
 		    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
 		    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+		    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
 		    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
 			cap |= HWCAP_SPARC_BLKINIT;
 		if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
@@ -532,6 +534,7 @@
 		    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
 		    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
 		    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+		    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
 		    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
 			cap |= HWCAP_SPARC_N2;
 	}
@@ -561,6 +564,7 @@
 			    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
 			    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
 			    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+			    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
 			    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
 				cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
 					AV_SPARC_ASI_BLK_INIT |
@@ -570,6 +574,7 @@
 			    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
 			    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
 			    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+			    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
 			    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
 				cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
 					AV_SPARC_FMAF);
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index 4eed773..3c25241 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -144,7 +144,7 @@
 	compat_uptr_t fpu_save;
 	compat_uptr_t rwin_save;
 	unsigned int psr;
-	unsigned pc, npc;
+	unsigned int pc, npc;
 	sigset_t set;
 	compat_sigset_t seta;
 	int err, i;
diff --git a/arch/sparc/kernel/spiterrs.S b/arch/sparc/kernel/spiterrs.S
index c357e40..4a73009 100644
--- a/arch/sparc/kernel/spiterrs.S
+++ b/arch/sparc/kernel/spiterrs.S
@@ -85,8 +85,7 @@
 	ba,pt		%xcc, etraptl1
 	 rd		%pc, %g7
 
-	ba,pt		%xcc, 2f
-	 nop
+	ba,a,pt		%xcc, 2f
 
 1:	ba,pt		%xcc, etrap_irq
 	 rd		%pc, %g7
@@ -100,8 +99,7 @@
 	mov		%l5, %o2
 	call		spitfire_access_error
 	 add		%sp, PTREGS_OFF, %o0
-	ba,pt		%xcc, rtrap
-	 nop
+	ba,a,pt		%xcc, rtrap
 	.size		__spitfire_access_error,.-__spitfire_access_error
 
 	/* This is the trap handler entry point for ECC correctable
@@ -179,8 +177,7 @@
 	mov		%l5, %o2
 	call		spitfire_data_access_exception_tl1
 	 add		%sp, PTREGS_OFF, %o0
-	ba,pt		%xcc, rtrap
-	 nop
+	ba,a,pt		%xcc, rtrap
 	.size		__spitfire_data_access_exception_tl1,.-__spitfire_data_access_exception_tl1
 
 	.type		__spitfire_data_access_exception,#function
@@ -200,8 +197,7 @@
 	mov		%l5, %o2
 	call		spitfire_data_access_exception
 	 add		%sp, PTREGS_OFF, %o0
-	ba,pt		%xcc, rtrap
-	 nop
+	ba,a,pt		%xcc, rtrap
 	.size		__spitfire_data_access_exception,.-__spitfire_data_access_exception
 
 	.type		__spitfire_insn_access_exception_tl1,#function
@@ -220,8 +216,7 @@
 	mov		%l5, %o2
 	call		spitfire_insn_access_exception_tl1
 	 add		%sp, PTREGS_OFF, %o0
-	ba,pt		%xcc, rtrap
-	 nop
+	ba,a,pt		%xcc, rtrap
 	.size		__spitfire_insn_access_exception_tl1,.-__spitfire_insn_access_exception_tl1
 
 	.type		__spitfire_insn_access_exception,#function
@@ -240,6 +235,5 @@
 	mov		%l5, %o2
 	call		spitfire_insn_access_exception
 	 add		%sp, PTREGS_OFF, %o0
-	ba,pt		%xcc, rtrap
-	 nop
+	ba,a,pt		%xcc, rtrap
 	.size		__spitfire_insn_access_exception,.-__spitfire_insn_access_exception
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index b489e97..fe8b8ee 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -337,10 +337,10 @@
 		switch (call) {
 		case SEMOP:
 			err = sys_semtimedop(first, ptr,
-					     (unsigned)second, NULL);
+					     (unsigned int)second, NULL);
 			goto out;
 		case SEMTIMEDOP:
-			err = sys_semtimedop(first, ptr, (unsigned)second,
+			err = sys_semtimedop(first, ptr, (unsigned int)second,
 				(const struct timespec __user *)
 					     (unsigned long) fifth);
 			goto out;
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
index 7f41d40..fa8e21a 100644
--- a/arch/sparc/kernel/sysfs.c
+++ b/arch/sparc/kernel/sysfs.c
@@ -1,4 +1,4 @@
-/* sysfs.c: Toplogy sysfs support code for sparc64.
+/* sysfs.c: Topology sysfs support code for sparc64.
  *
  * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
  */
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 6c3dd6c..eac7f0d 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -88,4 +88,4 @@
 /*340*/	.long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
 /*345*/	.long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/	.long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
-/*355*/	.long sys_setsockopt, sys_mlock2, sys_copy_file_range
+/*355*/	.long sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 12b524c..b0f17ff 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -89,7 +89,7 @@
 /*340*/	.word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
 	.word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/	.word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
-	.word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range
+	.word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range, compat_sys_preadv2, compat_sys_pwritev2
 
 #endif /* CONFIG_COMPAT */
 
@@ -170,4 +170,4 @@
 /*340*/	.word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
 	.word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/	.word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
-	.word sys_setsockopt, sys_mlock2, sys_copy_file_range
+	.word sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index d89e97b..9aacb91 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -209,8 +209,8 @@
 	if (size == 16) {
 		size = 8;
 		zero = (((long)(reg_num ?
-		        (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) |
-			(unsigned)fetch_reg(reg_num + 1, regs);
+		        (unsigned int)fetch_reg(reg_num, regs) : 0)) << 32) |
+			(unsigned int)fetch_reg(reg_num + 1, regs);
 	} else if (reg_num) {
 		src_val_p = fetch_reg_addr(reg_num, regs);
 	}
diff --git a/arch/sparc/kernel/utrap.S b/arch/sparc/kernel/utrap.S
index b7f0f3f..c731e80 100644
--- a/arch/sparc/kernel/utrap.S
+++ b/arch/sparc/kernel/utrap.S
@@ -11,8 +11,7 @@
 	mov		%l4, %o1
         call		bad_trap
 	 add		%sp, PTREGS_OFF, %o0
-	ba,pt		%xcc, rtrap
-	 nop
+	ba,a,pt		%xcc, rtrap
 
 invoke_utrap:
 	sllx		%g3, 3, %g3
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index cb5789c..f6bb857 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -45,6 +45,14 @@
 	return NULL;
 }
 
+static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
+{
+	const struct vio_dev *vio_dev = to_vio_dev(dev);
+
+	add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, vio_dev->compat);
+	return 0;
+}
+
 static int vio_bus_match(struct device *dev, struct device_driver *drv)
 {
 	struct vio_dev *vio_dev = to_vio_dev(dev);
@@ -105,15 +113,25 @@
 	return sprintf(buf, "%s\n", vdev->type);
 }
 
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	const struct vio_dev *vdev = to_vio_dev(dev);
+
+	return sprintf(buf, "vio:T%sS%s\n", vdev->type, vdev->compat);
+}
+
 static struct device_attribute vio_dev_attrs[] = {
 	__ATTR_RO(devspec),
 	__ATTR_RO(type),
+	__ATTR_RO(modalias),
 	__ATTR_NULL
 };
 
 static struct bus_type vio_bus_type = {
 	.name		= "vio",
 	.dev_attrs	= vio_dev_attrs,
+	.uevent         = vio_hotplug,
 	.match		= vio_bus_match,
 	.probe		= vio_device_probe,
 	.remove		= vio_device_remove,
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index aadd321..7d02b1f 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -33,6 +33,10 @@
 jiffies = jiffies_64;
 #endif
 
+#ifdef CONFIG_SPARC64
+ASSERT((swapper_tsb == 0x0000000000408000), "Error: sparc64 early assembler too large")
+#endif
+
 SECTIONS
 {
 #ifdef CONFIG_SPARC64
diff --git a/arch/sparc/kernel/winfixup.S b/arch/sparc/kernel/winfixup.S
index 1e67ce9..855019a 100644
--- a/arch/sparc/kernel/winfixup.S
+++ b/arch/sparc/kernel/winfixup.S
@@ -32,8 +32,7 @@
 	 rd	%pc, %g7
 	call	do_sparc64_fault
 	 add	%sp, PTREGS_OFF, %o0
-	ba,pt	%xcc, rtrap
-	 nop
+	ba,a,pt	%xcc, rtrap
 
 	/* Be very careful about usage of the trap globals here.
 	 * You cannot touch %g5 as that has the fault information.
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index c399e7b..b6c559c 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -303,10 +303,10 @@
 		fixup = search_extables_range(regs->pc, &g2);
 		/* Values below 10 are reserved for other things */
 		if (fixup > 10) {
-			extern const unsigned __memset_start[];
-			extern const unsigned __memset_end[];
-			extern const unsigned __csum_partial_copy_start[];
-			extern const unsigned __csum_partial_copy_end[];
+			extern const unsigned int __memset_start[];
+			extern const unsigned int __memset_end[];
+			extern const unsigned int __csum_partial_copy_start[];
+			extern const unsigned int __csum_partial_copy_end[];
 
 #ifdef DEBUG_EXCEPTIONS
 			printk("Exception: PC<%08lx> faddr<%08lx>\n",
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 1cfe6aa..09e8388 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1769,6 +1769,7 @@
 			max_phys_bits = 47;
 			break;
 		case SUN4V_CHIP_SPARC_M7:
+		case SUN4V_CHIP_SPARC_SN:
 		default:
 			/* M7 and later support 52-bit virtual addresses.  */
 			sparc64_va_hole_top =    0xfff8000000000000UL;
@@ -1986,6 +1987,7 @@
 	 */
 	switch (sun4v_chip_type) {
 	case SUN4V_CHIP_SPARC_M7:
+	case SUN4V_CHIP_SPARC_SN:
 		pagecv_flag = 0x00;
 		break;
 	default:
@@ -2138,6 +2140,7 @@
 	 */
 	switch (sun4v_chip_type) {
 	case SUN4V_CHIP_SPARC_M7:
+	case SUN4V_CHIP_SPARC_SN:
 		page_cache4v_flag = _PAGE_CP_4V;
 		break;
 	default:
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index 3e6e05a..a6d9204 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -351,7 +351,7 @@
  *
  * Sometimes we need to emit a branch earlier in the code
  * sequence.  And in these situations we adjust "destination"
- * to accomodate this difference.  For example, if we needed
+ * to accommodate this difference.  For example, if we needed
  * to emit a branch (and it's delay slot) right before the
  * final instruction emitted for a BPF opcode, we'd use
  * "destination + 4" instead of just plain "destination" above.
diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig
index 3f3dfb8..7189055 100644
--- a/arch/tile/configs/tilegx_defconfig
+++ b/arch/tile/configs/tilegx_defconfig
@@ -221,8 +221,7 @@
 CONFIG_TUN=y
 CONFIG_VETH=m
 CONFIG_NET_DSA_MV88E6060=y
-CONFIG_NET_DSA_MV88E6131=y
-CONFIG_NET_DSA_MV88E6123=y
+CONFIG_NET_DSA_MV88E6XXX=y
 CONFIG_SKY2=y
 CONFIG_PTP_1588_CLOCK_TILEGX=y
 # CONFIG_WLAN is not set
diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig
index ef9e27e..dc85468 100644
--- a/arch/tile/configs/tilepro_defconfig
+++ b/arch/tile/configs/tilepro_defconfig
@@ -340,8 +340,7 @@
 CONFIG_TUN=y
 CONFIG_VETH=m
 CONFIG_NET_DSA_MV88E6060=y
-CONFIG_NET_DSA_MV88E6131=y
-CONFIG_NET_DSA_MV88E6123=y
+CONFIG_NET_DSA_MV88E6XXX=y
 # CONFIG_NET_VENDOR_3COM is not set
 CONFIG_E1000E=y
 # CONFIG_WLAN is not set
diff --git a/arch/tile/include/hv/drv_mpipe_intf.h b/arch/tile/include/hv/drv_mpipe_intf.h
index c97e416..ff7f50f 100644
--- a/arch/tile/include/hv/drv_mpipe_intf.h
+++ b/arch/tile/include/hv/drv_mpipe_intf.h
@@ -211,7 +211,7 @@
  *  request shared data permission on the same link.
  *
  *  No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA,
- *  or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open()
+ *  or ::GXIO_MPIPE_LINK_EXCL_DATA may be specified in a gxio_mpipe_link_open()
  *  call.  If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed.
  */
 #define GXIO_MPIPE_LINK_DATA               0x00000001UL
@@ -219,7 +219,7 @@
 /** Do not request data permission on the specified link.
  *
  *  No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA,
- *  or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open()
+ *  or ::GXIO_MPIPE_LINK_EXCL_DATA may be specified in a gxio_mpipe_link_open()
  *  call.  If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed.
  */
 #define GXIO_MPIPE_LINK_NO_DATA            0x00000002UL
@@ -230,7 +230,7 @@
  *  data permission on it, this open will fail.
  *
  *  No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA,
- *  or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open()
+ *  or ::GXIO_MPIPE_LINK_EXCL_DATA may be specified in a gxio_mpipe_link_open()
  *  call.  If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed.
  */
 #define GXIO_MPIPE_LINK_EXCL_DATA          0x00000004UL
@@ -241,7 +241,7 @@
  *  permission on the same link.
  *
  *  No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS,
- *  or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open()
+ *  or ::GXIO_MPIPE_LINK_EXCL_STATS may be specified in a gxio_mpipe_link_open()
  *  call.  If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed.
  */
 #define GXIO_MPIPE_LINK_STATS              0x00000008UL
@@ -249,7 +249,7 @@
 /** Do not request stats permission on the specified link.
  *
  *  No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS,
- *  or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open()
+ *  or ::GXIO_MPIPE_LINK_EXCL_STATS may be specified in a gxio_mpipe_link_open()
  *  call.  If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed.
  */
 #define GXIO_MPIPE_LINK_NO_STATS           0x00000010UL
@@ -267,7 +267,7 @@
  *  reset by other statistics programs.
  *
  *  No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS,
- *  or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open()
+ *  or ::GXIO_MPIPE_LINK_EXCL_STATS may be specified in a gxio_mpipe_link_open()
  *  call.  If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed.
  */
 #define GXIO_MPIPE_LINK_EXCL_STATS         0x00000020UL
@@ -278,7 +278,7 @@
  *  permission on the same link.
  *
  *  No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL,
- *  or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open()
+ *  or ::GXIO_MPIPE_LINK_EXCL_CTL may be specified in a gxio_mpipe_link_open()
  *  call.  If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed.
  */
 #define GXIO_MPIPE_LINK_CTL                0x00000040UL
@@ -286,7 +286,7 @@
 /** Do not request control permission on the specified link.
  *
  *  No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL,
- *  or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open()
+ *  or ::GXIO_MPIPE_LINK_EXCL_CTL may be specified in a gxio_mpipe_link_open()
  *  call.  If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed.
  */
 #define GXIO_MPIPE_LINK_NO_CTL             0x00000080UL
@@ -301,7 +301,7 @@
  *  it prevents programs like mpipe-link from configuring the link.
  *
  *  No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL,
- *  or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open()
+ *  or ::GXIO_MPIPE_LINK_EXCL_CTL may be specified in a gxio_mpipe_link_open()
  *  call.  If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed.
  */
 #define GXIO_MPIPE_LINK_EXCL_CTL           0x00000100UL
@@ -311,7 +311,7 @@
  *  change the desired state of the link when it is closed or the process
  *  exits.  No more than one of ::GXIO_MPIPE_LINK_AUTO_UP,
  *  ::GXIO_MPIPE_LINK_AUTO_UPDOWN, ::GXIO_MPIPE_LINK_AUTO_DOWN, or
- *  ::GXIO_MPIPE_LINK_AUTO_NONE may be specifed in a gxio_mpipe_link_open()
+ *  ::GXIO_MPIPE_LINK_AUTO_NONE may be specified in a gxio_mpipe_link_open()
  *  call.  If none are specified, ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
  */
 #define GXIO_MPIPE_LINK_AUTO_UP            0x00000200UL
@@ -322,7 +322,7 @@
  *  open, set the desired state of the link to down.  No more than one of
  *  ::GXIO_MPIPE_LINK_AUTO_UP, ::GXIO_MPIPE_LINK_AUTO_UPDOWN,
  *  ::GXIO_MPIPE_LINK_AUTO_DOWN, or ::GXIO_MPIPE_LINK_AUTO_NONE may be
- *  specifed in a gxio_mpipe_link_open() call.  If none are specified,
+ *  specified in a gxio_mpipe_link_open() call.  If none are specified,
  *  ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
  */
 #define GXIO_MPIPE_LINK_AUTO_UPDOWN        0x00000400UL
@@ -332,7 +332,7 @@
  *  process has the link open, set the desired state of the link to down.
  *  No more than one of ::GXIO_MPIPE_LINK_AUTO_UP,
  *  ::GXIO_MPIPE_LINK_AUTO_UPDOWN, ::GXIO_MPIPE_LINK_AUTO_DOWN, or
- *  ::GXIO_MPIPE_LINK_AUTO_NONE may be specifed in a gxio_mpipe_link_open()
+ *  ::GXIO_MPIPE_LINK_AUTO_NONE may be specified in a gxio_mpipe_link_open()
  *  call.  If none are specified, ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
  */
 #define GXIO_MPIPE_LINK_AUTO_DOWN          0x00000800UL
@@ -342,7 +342,7 @@
  *  closed or the process exits.  No more than one of
  *  ::GXIO_MPIPE_LINK_AUTO_UP, ::GXIO_MPIPE_LINK_AUTO_UPDOWN,
  *  ::GXIO_MPIPE_LINK_AUTO_DOWN, or ::GXIO_MPIPE_LINK_AUTO_NONE may be
- *  specifed in a gxio_mpipe_link_open() call.  If none are specified,
+ *  specified in a gxio_mpipe_link_open() call.  If none are specified,
  *  ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
  */
 #define GXIO_MPIPE_LINK_AUTO_NONE          0x00001000UL
diff --git a/arch/tile/kernel/kgdb.c b/arch/tile/kernel/kgdb.c
index a506c2c..9247d6b 100644
--- a/arch/tile/kernel/kgdb.c
+++ b/arch/tile/kernel/kgdb.c
@@ -126,15 +126,15 @@
 sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
 {
 	struct pt_regs *thread_regs;
+	const int NGPRS = TREG_LAST_GPR + 1;
 
 	if (task == NULL)
 		return;
 
-	/* Initialize to zero. */
-	memset(gdb_regs, 0, NUMREGBYTES);
-
 	thread_regs = task_pt_regs(task);
-	memcpy(gdb_regs, thread_regs, TREG_LAST_GPR * sizeof(unsigned long));
+	memcpy(gdb_regs, thread_regs, NGPRS * sizeof(unsigned long));
+	memset(&gdb_regs[NGPRS], 0,
+	       (TILEGX_PC_REGNUM - NGPRS) * sizeof(unsigned long));
 	gdb_regs[TILEGX_PC_REGNUM] = thread_regs->pc;
 	gdb_regs[TILEGX_FAULTNUM_REGNUM] = thread_regs->faultnum;
 }
@@ -433,9 +433,9 @@
 struct kgdb_arch arch_kgdb_ops;
 
 /*
- * kgdb_arch_init - Perform any architecture specific initalization.
+ * kgdb_arch_init - Perform any architecture specific initialization.
  *
- * This function will handle the initalization of any architecture
+ * This function will handle the initialization of any architecture
  * specific callbacks.
  */
 int kgdb_arch_init(void)
@@ -447,9 +447,9 @@
 }
 
 /*
- * kgdb_arch_exit - Perform any architecture specific uninitalization.
+ * kgdb_arch_exit - Perform any architecture specific uninitialization.
  *
- * This function will handle the uninitalization of any architecture
+ * This function will handle the uninitialization of any architecture
  * specific callbacks, for dynamic registration and unregistration.
  */
 void kgdb_arch_exit(void)
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c
index 4c017d0..aa2b44c 100644
--- a/arch/tile/kernel/pci_gx.c
+++ b/arch/tile/kernel/pci_gx.c
@@ -1326,7 +1326,7 @@
 
 
 /*
- * See tile_cfg_read() for relevent comments.
+ * See tile_cfg_read() for relevant comments.
  * Note that "val" is the value to write, not a pointer to that value.
  */
 static int tile_cfg_write(struct pci_bus *bus, unsigned int devfn, int offset,
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index 9ef669d..2cd5b68 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -223,7 +223,7 @@
 	if (len == skb->len) {
 		dev->stats.tx_packets++;
 		dev->stats.tx_bytes += skb->len;
-		dev->trans_start = jiffies;
+		netif_trans_update(dev);
 		netif_start_queue(dev);
 
 		/* this is normally done in the interrupt when tx finishes */
@@ -252,7 +252,7 @@
 
 static void uml_net_tx_timeout(struct net_device *dev)
 {
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	netif_wake_queue(dev);
 }
 
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 39ba207..17e96dc 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -862,7 +862,7 @@
 		goto out;
 	}
 	ubd_dev->queue->queuedata = ubd_dev;
-	blk_queue_flush(ubd_dev->queue, REQ_FLUSH);
+	blk_queue_write_cache(ubd_dev->queue, true, false);
 
 	blk_queue_max_segments(ubd_dev->queue, MAX_SG);
 	err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]);
diff --git a/arch/unicore32/kernel/gpio.c b/arch/unicore32/kernel/gpio.c
index 5ab2379..49347a0 100644
--- a/arch/unicore32/kernel/gpio.c
+++ b/arch/unicore32/kernel/gpio.c
@@ -14,6 +14,8 @@
 
 #include <linux/init.h>
 #include <linux/module.h>
+#include <linux/gpio/driver.h>
+/* FIXME: needed for gpio_set_value() - convert to use descriptors or hogs */
 #include <linux/gpio.h>
 #include <mach/hardware.h>
 
@@ -118,5 +120,5 @@
  *	gpio_set_value(GPO_SET_V2, 1);
  */
 #endif
-	gpiochip_add(&puv3_gpio_chip);
+	gpiochip_add_data(&puv3_gpio_chip, NULL);
 }
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 2dc18605..ace79d2 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -91,7 +91,7 @@
 	select HAVE_ARCH_SOFT_DIRTY		if X86_64
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_ARCH_TRANSPARENT_HUGEPAGE
-	select HAVE_BPF_JIT			if X86_64
+	select HAVE_EBPF_JIT			if X86_64
 	select HAVE_CC_STACKPROTECTOR
 	select HAVE_CMPXCHG_DOUBLE
 	select HAVE_CMPXCHG_LOCAL
@@ -164,10 +164,6 @@
 	def_bool y
 	depends on KPROBES || PERF_EVENTS || UPROBES
 
-config PERF_EVENTS_INTEL_UNCORE
-	def_bool y
-	depends on PERF_EVENTS && CPU_SUP_INTEL && PCI
-
 config OUTPUT_FORMAT
 	string
 	default "elf32-i386" if X86_32
@@ -1046,6 +1042,8 @@
 	def_bool y
 	depends on X86_MCE_INTEL
 
+source "arch/x86/events/Kconfig"
+
 config X86_LEGACY_VM86
 	bool "Legacy VM86 support"
 	default n
@@ -1210,15 +1208,6 @@
 	def_bool y
 	depends on MICROCODE
 
-config PERF_EVENTS_AMD_POWER
-	depends on PERF_EVENTS && CPU_SUP_AMD
-	tristate "AMD Processor Power Reporting Mechanism"
-	---help---
-	  Provide power reporting mechanism support for AMD processors.
-	  Currently, it leverages X86_FEATURE_ACC_POWER
-	  (CPUID Fn8000_0007_EDX[12]) interface to calculate the
-	  average power consumption on Family 15h processors.
-
 config X86_MSR
 	tristate "/dev/cpu/*/msr - Model-specific register support"
 	---help---
@@ -1932,54 +1921,38 @@
 	  (CONFIG_PHYSICAL_START) is used as the minimum location.
 
 config RANDOMIZE_BASE
-	bool "Randomize the address of the kernel image"
+	bool "Randomize the address of the kernel image (KASLR)"
 	depends on RELOCATABLE
 	default n
 	---help---
-	   Randomizes the physical and virtual address at which the
-	   kernel image is decompressed, as a security feature that
-	   deters exploit attempts relying on knowledge of the location
-	   of kernel internals.
+	  In support of Kernel Address Space Layout Randomization (KASLR),
+	  this randomizes the physical address at which the kernel image
+	  is decompressed and the virtual address where the kernel
+	  image is mapped, as a security feature that deters exploit
+	  attempts relying on knowledge of the location of kernel
+	  code internals.
 
-	   Entropy is generated using the RDRAND instruction if it is
-	   supported. If RDTSC is supported, it is used as well. If
-	   neither RDRAND nor RDTSC are supported, then randomness is
-	   read from the i8254 timer.
+	  The kernel physical and virtual address can be randomized
+	  from 16MB up to 1GB on 64-bit and 512MB on 32-bit. (Note that
+	  using RANDOMIZE_BASE reduces the memory space available to
+	  kernel modules from 1.5GB to 1GB.)
 
-	   The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET,
-	   and aligned according to PHYSICAL_ALIGN. Since the kernel is
-	   built using 2GiB addressing, and PHYSICAL_ALGIN must be at a
-	   minimum of 2MiB, only 10 bits of entropy is theoretically
-	   possible. At best, due to page table layouts, 64-bit can use
-	   9 bits of entropy and 32-bit uses 8 bits.
+	  Entropy is generated using the RDRAND instruction if it is
+	  supported. If RDTSC is supported, its value is mixed into
+	  the entropy pool as well. If neither RDRAND nor RDTSC are
+	  supported, then entropy is read from the i8254 timer.
 
-	   If unsure, say N.
+	  Since the kernel is built using 2GB addressing, and
+	  PHYSICAL_ALIGN must be at a minimum of 2MB, only 10 bits of
+	  entropy is theoretically possible. Currently, with the
+	  default value for PHYSICAL_ALIGN and due to page table
+	  layouts, 64-bit uses 9 bits of entropy and 32-bit uses 8 bits.
 
-config RANDOMIZE_BASE_MAX_OFFSET
-	hex "Maximum kASLR offset allowed" if EXPERT
-	depends on RANDOMIZE_BASE
-	range 0x0 0x20000000 if X86_32
-	default "0x20000000" if X86_32
-	range 0x0 0x40000000 if X86_64
-	default "0x40000000" if X86_64
-	---help---
-	  The lesser of RANDOMIZE_BASE_MAX_OFFSET and available physical
-	  memory is used to determine the maximal offset in bytes that will
-	  be applied to the kernel when kernel Address Space Layout
-	  Randomization (kASLR) is active. This must be a multiple of
-	  PHYSICAL_ALIGN.
+	  If CONFIG_HIBERNATE is also enabled, KASLR is disabled at boot
+	  time. To enable it, boot with "kaslr" on the kernel command
+	  line (which will also disable hibernation).
 
-	  On 32-bit this is limited to 512MiB by page table layouts. The
-	  default is 512MiB.
-
-	  On 64-bit this is limited by how the kernel fixmap page table is
-	  positioned, so this cannot be larger than 1GiB currently. Without
-	  RANDOMIZE_BASE, there is a 512MiB to 1.5GiB split between kernel
-	  and modules. When RANDOMIZE_BASE_MAX_OFFSET is above 512MiB, the
-	  modules area will shrink to compensate, up to the current maximum
-	  1GiB to 1GiB split. The default is 1GiB.
-
-	  If unsure, leave at the default value.
+	  If unsure, say N.
 
 # Relocation on x86 needs some additional build support
 config X86_NEED_RELOCS
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 4086abc..6fce7f0 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -208,7 +208,8 @@
 
 head-y := arch/x86/kernel/head_$(BITS).o
 head-y += arch/x86/kernel/head$(BITS).o
-head-y += arch/x86/kernel/head.o
+head-y += arch/x86/kernel/ebda.o
+head-y += arch/x86/kernel/platform-quirks.o
 
 libs-y  += arch/x86/lib/
 
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index b1ef9e4..700a9c6 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -86,16 +86,7 @@
 
 SETUP_OBJS = $(addprefix $(obj)/,$(setup-y))
 
-sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(_text\|_end\)$$/\#define VO_\2 0x\1/p'
-
-quiet_cmd_voffset = VOFFSET $@
-      cmd_voffset = $(NM) $< | sed -n $(sed-voffset) > $@
-
-targets += voffset.h
-$(obj)/voffset.h: vmlinux FORCE
-	$(call if_changed,voffset)
-
-sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p'
+sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p'
 
 quiet_cmd_zoffset = ZOFFSET $@
       cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@
@@ -106,7 +97,7 @@
 
 
 AFLAGS_header.o += -I$(obj)
-$(obj)/header.o: $(obj)/voffset.h $(obj)/zoffset.h
+$(obj)/header.o: $(obj)/zoffset.h
 
 LDFLAGS_setup.elf	:= -T
 $(obj)/setup.elf: $(src)/setup.ld $(SETUP_OBJS) FORCE
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 6915ff2..cfdd8c3 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -26,7 +26,7 @@
 	vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4
 
 KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
-KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
+KBUILD_CFLAGS += -fno-strict-aliasing $(call cc-option, -fPIE, -fPIC)
 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
 cflags-$(CONFIG_X86_32) := -march=i386
 cflags-$(CONFIG_X86_64) := -mcmodel=small
@@ -40,17 +40,44 @@
 UBSAN_SANITIZE :=n
 
 LDFLAGS := -m elf_$(UTS_MACHINE)
+ifeq ($(CONFIG_RELOCATABLE),y)
+# If kernel is relocatable, build compressed kernel as PIE.
+ifeq ($(CONFIG_X86_32),y)
+LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker)
+else
+# To build 64-bit compressed kernel as PIE, we disable relocation
+# overflow check to avoid relocation overflow error with a new linker
+# command-line option, -z noreloc-overflow.
+LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \
+	&& echo "-z noreloc-overflow -pie --no-dynamic-linker")
+endif
+endif
 LDFLAGS_vmlinux := -T
 
 hostprogs-y	:= mkpiggy
 HOST_EXTRACFLAGS += -I$(srctree)/tools/include
 
+sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(_text\|__bss_start\|_end\)$$/\#define VO_\2 _AC(0x\1,UL)/p'
+
+quiet_cmd_voffset = VOFFSET $@
+      cmd_voffset = $(NM) $< | sed -n $(sed-voffset) > $@
+
+targets += ../voffset.h
+
+$(obj)/../voffset.h: vmlinux FORCE
+	$(call if_changed,voffset)
+
+$(obj)/misc.o: $(obj)/../voffset.h
+
 vmlinux-objs-y := $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \
-	$(obj)/string.o $(obj)/cmdline.o \
+	$(obj)/string.o $(obj)/cmdline.o $(obj)/error.o \
 	$(obj)/piggy.o $(obj)/cpuflags.o
 
 vmlinux-objs-$(CONFIG_EARLY_PRINTK) += $(obj)/early_serial_console.o
-vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/aslr.o
+vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/kaslr.o
+ifdef CONFIG_X86_64
+	vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/pagetable.o
+endif
 
 $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
 
@@ -97,10 +124,8 @@
 suffix-$(CONFIG_KERNEL_LZO) 	:= lzo
 suffix-$(CONFIG_KERNEL_LZ4) 	:= lz4
 
-RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \
-	     $(CONFIG_SHELL) $(srctree)/arch/x86/tools/calc_run_size.sh)
 quiet_cmd_mkpiggy = MKPIGGY $@
-      cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false )
+      cmd_mkpiggy = $(obj)/mkpiggy $< > $@ || ( rm -f $@ ; false )
 
 targets += piggy.S
 $(obj)/piggy.S: $(obj)/vmlinux.bin.$(suffix-y) $(obj)/mkpiggy FORCE
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c
deleted file mode 100644
index 6a9b96b..0000000
--- a/arch/x86/boot/compressed/aslr.c
+++ /dev/null
@@ -1,339 +0,0 @@
-#include "misc.h"
-
-#include <asm/msr.h>
-#include <asm/archrandom.h>
-#include <asm/e820.h>
-
-#include <generated/compile.h>
-#include <linux/module.h>
-#include <linux/uts.h>
-#include <linux/utsname.h>
-#include <generated/utsrelease.h>
-
-/* Simplified build-specific string for starting entropy. */
-static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
-		LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
-
-#define I8254_PORT_CONTROL	0x43
-#define I8254_PORT_COUNTER0	0x40
-#define I8254_CMD_READBACK	0xC0
-#define I8254_SELECT_COUNTER0	0x02
-#define I8254_STATUS_NOTREADY	0x40
-static inline u16 i8254(void)
-{
-	u16 status, timer;
-
-	do {
-		outb(I8254_PORT_CONTROL,
-		     I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
-		status = inb(I8254_PORT_COUNTER0);
-		timer  = inb(I8254_PORT_COUNTER0);
-		timer |= inb(I8254_PORT_COUNTER0) << 8;
-	} while (status & I8254_STATUS_NOTREADY);
-
-	return timer;
-}
-
-static unsigned long rotate_xor(unsigned long hash, const void *area,
-				size_t size)
-{
-	size_t i;
-	unsigned long *ptr = (unsigned long *)area;
-
-	for (i = 0; i < size / sizeof(hash); i++) {
-		/* Rotate by odd number of bits and XOR. */
-		hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
-		hash ^= ptr[i];
-	}
-
-	return hash;
-}
-
-/* Attempt to create a simple but unpredictable starting entropy. */
-static unsigned long get_random_boot(void)
-{
-	unsigned long hash = 0;
-
-	hash = rotate_xor(hash, build_str, sizeof(build_str));
-	hash = rotate_xor(hash, real_mode, sizeof(*real_mode));
-
-	return hash;
-}
-
-static unsigned long get_random_long(void)
-{
-#ifdef CONFIG_X86_64
-	const unsigned long mix_const = 0x5d6008cbf3848dd3UL;
-#else
-	const unsigned long mix_const = 0x3f39e593UL;
-#endif
-	unsigned long raw, random = get_random_boot();
-	bool use_i8254 = true;
-
-	debug_putstr("KASLR using");
-
-	if (has_cpuflag(X86_FEATURE_RDRAND)) {
-		debug_putstr(" RDRAND");
-		if (rdrand_long(&raw)) {
-			random ^= raw;
-			use_i8254 = false;
-		}
-	}
-
-	if (has_cpuflag(X86_FEATURE_TSC)) {
-		debug_putstr(" RDTSC");
-		raw = rdtsc();
-
-		random ^= raw;
-		use_i8254 = false;
-	}
-
-	if (use_i8254) {
-		debug_putstr(" i8254");
-		random ^= i8254();
-	}
-
-	/* Circular multiply for better bit diffusion */
-	asm("mul %3"
-	    : "=a" (random), "=d" (raw)
-	    : "a" (random), "rm" (mix_const));
-	random += raw;
-
-	debug_putstr("...\n");
-
-	return random;
-}
-
-struct mem_vector {
-	unsigned long start;
-	unsigned long size;
-};
-
-#define MEM_AVOID_MAX 5
-static struct mem_vector mem_avoid[MEM_AVOID_MAX];
-
-static bool mem_contains(struct mem_vector *region, struct mem_vector *item)
-{
-	/* Item at least partially before region. */
-	if (item->start < region->start)
-		return false;
-	/* Item at least partially after region. */
-	if (item->start + item->size > region->start + region->size)
-		return false;
-	return true;
-}
-
-static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
-{
-	/* Item one is entirely before item two. */
-	if (one->start + one->size <= two->start)
-		return false;
-	/* Item one is entirely after item two. */
-	if (one->start >= two->start + two->size)
-		return false;
-	return true;
-}
-
-static void mem_avoid_init(unsigned long input, unsigned long input_size,
-			   unsigned long output, unsigned long output_size)
-{
-	u64 initrd_start, initrd_size;
-	u64 cmd_line, cmd_line_size;
-	unsigned long unsafe, unsafe_len;
-	char *ptr;
-
-	/*
-	 * Avoid the region that is unsafe to overlap during
-	 * decompression (see calculations at top of misc.c).
-	 */
-	unsafe_len = (output_size >> 12) + 32768 + 18;
-	unsafe = (unsigned long)input + input_size - unsafe_len;
-	mem_avoid[0].start = unsafe;
-	mem_avoid[0].size = unsafe_len;
-
-	/* Avoid initrd. */
-	initrd_start  = (u64)real_mode->ext_ramdisk_image << 32;
-	initrd_start |= real_mode->hdr.ramdisk_image;
-	initrd_size  = (u64)real_mode->ext_ramdisk_size << 32;
-	initrd_size |= real_mode->hdr.ramdisk_size;
-	mem_avoid[1].start = initrd_start;
-	mem_avoid[1].size = initrd_size;
-
-	/* Avoid kernel command line. */
-	cmd_line  = (u64)real_mode->ext_cmd_line_ptr << 32;
-	cmd_line |= real_mode->hdr.cmd_line_ptr;
-	/* Calculate size of cmd_line. */
-	ptr = (char *)(unsigned long)cmd_line;
-	for (cmd_line_size = 0; ptr[cmd_line_size++]; )
-		;
-	mem_avoid[2].start = cmd_line;
-	mem_avoid[2].size = cmd_line_size;
-
-	/* Avoid heap memory. */
-	mem_avoid[3].start = (unsigned long)free_mem_ptr;
-	mem_avoid[3].size = BOOT_HEAP_SIZE;
-
-	/* Avoid stack memory. */
-	mem_avoid[4].start = (unsigned long)free_mem_end_ptr;
-	mem_avoid[4].size = BOOT_STACK_SIZE;
-}
-
-/* Does this memory vector overlap a known avoided area? */
-static bool mem_avoid_overlap(struct mem_vector *img)
-{
-	int i;
-	struct setup_data *ptr;
-
-	for (i = 0; i < MEM_AVOID_MAX; i++) {
-		if (mem_overlaps(img, &mem_avoid[i]))
-			return true;
-	}
-
-	/* Avoid all entries in the setup_data linked list. */
-	ptr = (struct setup_data *)(unsigned long)real_mode->hdr.setup_data;
-	while (ptr) {
-		struct mem_vector avoid;
-
-		avoid.start = (unsigned long)ptr;
-		avoid.size = sizeof(*ptr) + ptr->len;
-
-		if (mem_overlaps(img, &avoid))
-			return true;
-
-		ptr = (struct setup_data *)(unsigned long)ptr->next;
-	}
-
-	return false;
-}
-
-static unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET /
-			   CONFIG_PHYSICAL_ALIGN];
-static unsigned long slot_max;
-
-static void slots_append(unsigned long addr)
-{
-	/* Overflowing the slots list should be impossible. */
-	if (slot_max >= CONFIG_RANDOMIZE_BASE_MAX_OFFSET /
-			CONFIG_PHYSICAL_ALIGN)
-		return;
-
-	slots[slot_max++] = addr;
-}
-
-static unsigned long slots_fetch_random(void)
-{
-	/* Handle case of no slots stored. */
-	if (slot_max == 0)
-		return 0;
-
-	return slots[get_random_long() % slot_max];
-}
-
-static void process_e820_entry(struct e820entry *entry,
-			       unsigned long minimum,
-			       unsigned long image_size)
-{
-	struct mem_vector region, img;
-
-	/* Skip non-RAM entries. */
-	if (entry->type != E820_RAM)
-		return;
-
-	/* Ignore entries entirely above our maximum. */
-	if (entry->addr >= CONFIG_RANDOMIZE_BASE_MAX_OFFSET)
-		return;
-
-	/* Ignore entries entirely below our minimum. */
-	if (entry->addr + entry->size < minimum)
-		return;
-
-	region.start = entry->addr;
-	region.size = entry->size;
-
-	/* Potentially raise address to minimum location. */
-	if (region.start < minimum)
-		region.start = minimum;
-
-	/* Potentially raise address to meet alignment requirements. */
-	region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
-
-	/* Did we raise the address above the bounds of this e820 region? */
-	if (region.start > entry->addr + entry->size)
-		return;
-
-	/* Reduce size by any delta from the original address. */
-	region.size -= region.start - entry->addr;
-
-	/* Reduce maximum size to fit end of image within maximum limit. */
-	if (region.start + region.size > CONFIG_RANDOMIZE_BASE_MAX_OFFSET)
-		region.size = CONFIG_RANDOMIZE_BASE_MAX_OFFSET - region.start;
-
-	/* Walk each aligned slot and check for avoided areas. */
-	for (img.start = region.start, img.size = image_size ;
-	     mem_contains(&region, &img) ;
-	     img.start += CONFIG_PHYSICAL_ALIGN) {
-		if (mem_avoid_overlap(&img))
-			continue;
-		slots_append(img.start);
-	}
-}
-
-static unsigned long find_random_addr(unsigned long minimum,
-				      unsigned long size)
-{
-	int i;
-	unsigned long addr;
-
-	/* Make sure minimum is aligned. */
-	minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
-
-	/* Verify potential e820 positions, appending to slots list. */
-	for (i = 0; i < real_mode->e820_entries; i++) {
-		process_e820_entry(&real_mode->e820_map[i], minimum, size);
-	}
-
-	return slots_fetch_random();
-}
-
-unsigned char *choose_kernel_location(struct boot_params *boot_params,
-				      unsigned char *input,
-				      unsigned long input_size,
-				      unsigned char *output,
-				      unsigned long output_size)
-{
-	unsigned long choice = (unsigned long)output;
-	unsigned long random;
-
-#ifdef CONFIG_HIBERNATION
-	if (!cmdline_find_option_bool("kaslr")) {
-		debug_putstr("KASLR disabled by default...\n");
-		goto out;
-	}
-#else
-	if (cmdline_find_option_bool("nokaslr")) {
-		debug_putstr("KASLR disabled by cmdline...\n");
-		goto out;
-	}
-#endif
-
-	boot_params->hdr.loadflags |= KASLR_FLAG;
-
-	/* Record the various known unsafe memory ranges. */
-	mem_avoid_init((unsigned long)input, input_size,
-		       (unsigned long)output, output_size);
-
-	/* Walk e820 and find a random address. */
-	random = find_random_addr(choice, output_size);
-	if (!random) {
-		debug_putstr("KASLR could not find suitable E820 region...\n");
-		goto out;
-	}
-
-	/* Always enforce the minimum. */
-	if (random < choice)
-		goto out;
-
-	choice = random;
-out:
-	return (unsigned char *)choice;
-}
diff --git a/arch/x86/boot/compressed/cmdline.c b/arch/x86/boot/compressed/cmdline.c
index b68e303..73ccf63 100644
--- a/arch/x86/boot/compressed/cmdline.c
+++ b/arch/x86/boot/compressed/cmdline.c
@@ -15,9 +15,9 @@
 #include "../cmdline.c"
 static unsigned long get_cmd_line_ptr(void)
 {
-	unsigned long cmd_line_ptr = real_mode->hdr.cmd_line_ptr;
+	unsigned long cmd_line_ptr = boot_params->hdr.cmd_line_ptr;
 
-	cmd_line_ptr |= (u64)real_mode->ext_cmd_line_ptr << 32;
+	cmd_line_ptr |= (u64)boot_params->ext_cmd_line_ptr << 32;
 
 	return cmd_line_ptr;
 }
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 583d539..52fef60 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -571,312 +571,6 @@
 	efi_call_early(free_pool, pci_handle);
 }
 
-static void
-setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
-		 struct efi_pixel_bitmask pixel_info, int pixel_format)
-{
-	if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) {
-		si->lfb_depth = 32;
-		si->lfb_linelength = pixels_per_scan_line * 4;
-		si->red_size = 8;
-		si->red_pos = 0;
-		si->green_size = 8;
-		si->green_pos = 8;
-		si->blue_size = 8;
-		si->blue_pos = 16;
-		si->rsvd_size = 8;
-		si->rsvd_pos = 24;
-	} else if (pixel_format == PIXEL_BGR_RESERVED_8BIT_PER_COLOR) {
-		si->lfb_depth = 32;
-		si->lfb_linelength = pixels_per_scan_line * 4;
-		si->red_size = 8;
-		si->red_pos = 16;
-		si->green_size = 8;
-		si->green_pos = 8;
-		si->blue_size = 8;
-		si->blue_pos = 0;
-		si->rsvd_size = 8;
-		si->rsvd_pos = 24;
-	} else if (pixel_format == PIXEL_BIT_MASK) {
-		find_bits(pixel_info.red_mask, &si->red_pos, &si->red_size);
-		find_bits(pixel_info.green_mask, &si->green_pos,
-			  &si->green_size);
-		find_bits(pixel_info.blue_mask, &si->blue_pos, &si->blue_size);
-		find_bits(pixel_info.reserved_mask, &si->rsvd_pos,
-			  &si->rsvd_size);
-		si->lfb_depth = si->red_size + si->green_size +
-			si->blue_size + si->rsvd_size;
-		si->lfb_linelength = (pixels_per_scan_line * si->lfb_depth) / 8;
-	} else {
-		si->lfb_depth = 4;
-		si->lfb_linelength = si->lfb_width / 2;
-		si->red_size = 0;
-		si->red_pos = 0;
-		si->green_size = 0;
-		si->green_pos = 0;
-		si->blue_size = 0;
-		si->blue_pos = 0;
-		si->rsvd_size = 0;
-		si->rsvd_pos = 0;
-	}
-}
-
-static efi_status_t
-__gop_query32(struct efi_graphics_output_protocol_32 *gop32,
-	      struct efi_graphics_output_mode_info **info,
-	      unsigned long *size, u64 *fb_base)
-{
-	struct efi_graphics_output_protocol_mode_32 *mode;
-	efi_status_t status;
-	unsigned long m;
-
-	m = gop32->mode;
-	mode = (struct efi_graphics_output_protocol_mode_32 *)m;
-
-	status = efi_early->call(gop32->query_mode, gop32,
-				 mode->mode, size, info);
-	if (status != EFI_SUCCESS)
-		return status;
-
-	*fb_base = mode->frame_buffer_base;
-	return status;
-}
-
-static efi_status_t
-setup_gop32(struct screen_info *si, efi_guid_t *proto,
-	    unsigned long size, void **gop_handle)
-{
-	struct efi_graphics_output_protocol_32 *gop32, *first_gop;
-	unsigned long nr_gops;
-	u16 width, height;
-	u32 pixels_per_scan_line;
-	u32 ext_lfb_base;
-	u64 fb_base;
-	struct efi_pixel_bitmask pixel_info;
-	int pixel_format;
-	efi_status_t status;
-	u32 *handles = (u32 *)(unsigned long)gop_handle;
-	int i;
-
-	first_gop = NULL;
-	gop32 = NULL;
-
-	nr_gops = size / sizeof(u32);
-	for (i = 0; i < nr_gops; i++) {
-		struct efi_graphics_output_mode_info *info = NULL;
-		efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
-		bool conout_found = false;
-		void *dummy = NULL;
-		u32 h = handles[i];
-		u64 current_fb_base;
-
-		status = efi_call_early(handle_protocol, h,
-					proto, (void **)&gop32);
-		if (status != EFI_SUCCESS)
-			continue;
-
-		status = efi_call_early(handle_protocol, h,
-					&conout_proto, &dummy);
-		if (status == EFI_SUCCESS)
-			conout_found = true;
-
-		status = __gop_query32(gop32, &info, &size, &current_fb_base);
-		if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
-			/*
-			 * Systems that use the UEFI Console Splitter may
-			 * provide multiple GOP devices, not all of which are
-			 * backed by real hardware. The workaround is to search
-			 * for a GOP implementing the ConOut protocol, and if
-			 * one isn't found, to just fall back to the first GOP.
-			 */
-			width = info->horizontal_resolution;
-			height = info->vertical_resolution;
-			pixel_format = info->pixel_format;
-			pixel_info = info->pixel_information;
-			pixels_per_scan_line = info->pixels_per_scan_line;
-			fb_base = current_fb_base;
-
-			/*
-			 * Once we've found a GOP supporting ConOut,
-			 * don't bother looking any further.
-			 */
-			first_gop = gop32;
-			if (conout_found)
-				break;
-		}
-	}
-
-	/* Did we find any GOPs? */
-	if (!first_gop)
-		goto out;
-
-	/* EFI framebuffer */
-	si->orig_video_isVGA = VIDEO_TYPE_EFI;
-
-	si->lfb_width = width;
-	si->lfb_height = height;
-	si->lfb_base = fb_base;
-
-	ext_lfb_base = (u64)(unsigned long)fb_base >> 32;
-	if (ext_lfb_base) {
-		si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
-		si->ext_lfb_base = ext_lfb_base;
-	}
-
-	si->pages = 1;
-
-	setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
-
-	si->lfb_size = si->lfb_linelength * si->lfb_height;
-
-	si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
-out:
-	return status;
-}
-
-static efi_status_t
-__gop_query64(struct efi_graphics_output_protocol_64 *gop64,
-	      struct efi_graphics_output_mode_info **info,
-	      unsigned long *size, u64 *fb_base)
-{
-	struct efi_graphics_output_protocol_mode_64 *mode;
-	efi_status_t status;
-	unsigned long m;
-
-	m = gop64->mode;
-	mode = (struct efi_graphics_output_protocol_mode_64 *)m;
-
-	status = efi_early->call(gop64->query_mode, gop64,
-				 mode->mode, size, info);
-	if (status != EFI_SUCCESS)
-		return status;
-
-	*fb_base = mode->frame_buffer_base;
-	return status;
-}
-
-static efi_status_t
-setup_gop64(struct screen_info *si, efi_guid_t *proto,
-	    unsigned long size, void **gop_handle)
-{
-	struct efi_graphics_output_protocol_64 *gop64, *first_gop;
-	unsigned long nr_gops;
-	u16 width, height;
-	u32 pixels_per_scan_line;
-	u32 ext_lfb_base;
-	u64 fb_base;
-	struct efi_pixel_bitmask pixel_info;
-	int pixel_format;
-	efi_status_t status;
-	u64 *handles = (u64 *)(unsigned long)gop_handle;
-	int i;
-
-	first_gop = NULL;
-	gop64 = NULL;
-
-	nr_gops = size / sizeof(u64);
-	for (i = 0; i < nr_gops; i++) {
-		struct efi_graphics_output_mode_info *info = NULL;
-		efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
-		bool conout_found = false;
-		void *dummy = NULL;
-		u64 h = handles[i];
-		u64 current_fb_base;
-
-		status = efi_call_early(handle_protocol, h,
-					proto, (void **)&gop64);
-		if (status != EFI_SUCCESS)
-			continue;
-
-		status = efi_call_early(handle_protocol, h,
-					&conout_proto, &dummy);
-		if (status == EFI_SUCCESS)
-			conout_found = true;
-
-		status = __gop_query64(gop64, &info, &size, &current_fb_base);
-		if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
-			/*
-			 * Systems that use the UEFI Console Splitter may
-			 * provide multiple GOP devices, not all of which are
-			 * backed by real hardware. The workaround is to search
-			 * for a GOP implementing the ConOut protocol, and if
-			 * one isn't found, to just fall back to the first GOP.
-			 */
-			width = info->horizontal_resolution;
-			height = info->vertical_resolution;
-			pixel_format = info->pixel_format;
-			pixel_info = info->pixel_information;
-			pixels_per_scan_line = info->pixels_per_scan_line;
-			fb_base = current_fb_base;
-
-			/*
-			 * Once we've found a GOP supporting ConOut,
-			 * don't bother looking any further.
-			 */
-			first_gop = gop64;
-			if (conout_found)
-				break;
-		}
-	}
-
-	/* Did we find any GOPs? */
-	if (!first_gop)
-		goto out;
-
-	/* EFI framebuffer */
-	si->orig_video_isVGA = VIDEO_TYPE_EFI;
-
-	si->lfb_width = width;
-	si->lfb_height = height;
-	si->lfb_base = fb_base;
-
-	ext_lfb_base = (u64)(unsigned long)fb_base >> 32;
-	if (ext_lfb_base) {
-		si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
-		si->ext_lfb_base = ext_lfb_base;
-	}
-
-	si->pages = 1;
-
-	setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
-
-	si->lfb_size = si->lfb_linelength * si->lfb_height;
-
-	si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
-out:
-	return status;
-}
-
-/*
- * See if we have Graphics Output Protocol
- */
-static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
-			      unsigned long size)
-{
-	efi_status_t status;
-	void **gop_handle = NULL;
-
-	status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
-				size, (void **)&gop_handle);
-	if (status != EFI_SUCCESS)
-		return status;
-
-	status = efi_call_early(locate_handle,
-				EFI_LOCATE_BY_PROTOCOL,
-				proto, NULL, &size, gop_handle);
-	if (status != EFI_SUCCESS)
-		goto free_handle;
-
-	if (efi_early->is64)
-		status = setup_gop64(si, proto, size, gop_handle);
-	else
-		status = setup_gop32(si, proto, size, gop_handle);
-
-free_handle:
-	efi_call_early(free_pool, gop_handle);
-	return status;
-}
-
 static efi_status_t
 setup_uga32(void **uga_handle, unsigned long size, u32 *width, u32 *height)
 {
@@ -1038,7 +732,7 @@
 				EFI_LOCATE_BY_PROTOCOL,
 				&graphics_proto, NULL, &size, gop_handle);
 	if (status == EFI_BUFFER_TOO_SMALL)
-		status = setup_gop(si, &graphics_proto, size);
+		status = efi_setup_gop(NULL, si, &graphics_proto, size);
 
 	if (status != EFI_SUCCESS) {
 		size = 0;
diff --git a/arch/x86/boot/compressed/eboot.h b/arch/x86/boot/compressed/eboot.h
index d487e72..c0223f1 100644
--- a/arch/x86/boot/compressed/eboot.h
+++ b/arch/x86/boot/compressed/eboot.h
@@ -11,80 +11,6 @@
 
 #define DESC_TYPE_CODE_DATA	(1 << 0)
 
-#define EFI_CONSOLE_OUT_DEVICE_GUID    \
-	EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x0, 0x90, 0x27, \
-		  0x3f, 0xc1, 0x4d)
-
-#define PIXEL_RGB_RESERVED_8BIT_PER_COLOR		0
-#define PIXEL_BGR_RESERVED_8BIT_PER_COLOR		1
-#define PIXEL_BIT_MASK					2
-#define PIXEL_BLT_ONLY					3
-#define PIXEL_FORMAT_MAX				4
-
-struct efi_pixel_bitmask {
-	u32 red_mask;
-	u32 green_mask;
-	u32 blue_mask;
-	u32 reserved_mask;
-};
-
-struct efi_graphics_output_mode_info {
-	u32 version;
-	u32 horizontal_resolution;
-	u32 vertical_resolution;
-	int pixel_format;
-	struct efi_pixel_bitmask pixel_information;
-	u32 pixels_per_scan_line;
-} __packed;
-
-struct efi_graphics_output_protocol_mode_32 {
-	u32 max_mode;
-	u32 mode;
-	u32 info;
-	u32 size_of_info;
-	u64 frame_buffer_base;
-	u32 frame_buffer_size;
-} __packed;
-
-struct efi_graphics_output_protocol_mode_64 {
-	u32 max_mode;
-	u32 mode;
-	u64 info;
-	u64 size_of_info;
-	u64 frame_buffer_base;
-	u64 frame_buffer_size;
-} __packed;
-
-struct efi_graphics_output_protocol_mode {
-	u32 max_mode;
-	u32 mode;
-	unsigned long info;
-	unsigned long size_of_info;
-	u64 frame_buffer_base;
-	unsigned long frame_buffer_size;
-} __packed;
-
-struct efi_graphics_output_protocol_32 {
-	u32 query_mode;
-	u32 set_mode;
-	u32 blt;
-	u32 mode;
-};
-
-struct efi_graphics_output_protocol_64 {
-	u64 query_mode;
-	u64 set_mode;
-	u64 blt;
-	u64 mode;
-};
-
-struct efi_graphics_output_protocol {
-	void *query_mode;
-	unsigned long set_mode;
-	unsigned long blt;
-	struct efi_graphics_output_protocol_mode *mode;
-};
-
 struct efi_uga_draw_protocol_32 {
 	u32 get_mode;
 	u32 set_mode;
diff --git a/arch/x86/boot/compressed/error.c b/arch/x86/boot/compressed/error.c
new file mode 100644
index 0000000..6248740
--- /dev/null
+++ b/arch/x86/boot/compressed/error.c
@@ -0,0 +1,22 @@
+/*
+ * Callers outside of misc.c need access to the error reporting routines,
+ * but the *_putstr() functions need to stay in misc.c because of how
+ * memcpy() and memmove() are defined for the compressed boot environment.
+ */
+#include "misc.h"
+
+void warn(char *m)
+{
+	error_putstr("\n\n");
+	error_putstr(m);
+	error_putstr("\n\n");
+}
+
+void error(char *m)
+{
+	warn(m);
+	error_putstr(" -- System halted");
+
+	while (1)
+		asm("hlt");
+}
diff --git a/arch/x86/boot/compressed/error.h b/arch/x86/boot/compressed/error.h
new file mode 100644
index 0000000..2e59dac
--- /dev/null
+++ b/arch/x86/boot/compressed/error.h
@@ -0,0 +1,7 @@
+#ifndef BOOT_COMPRESSED_ERROR_H
+#define BOOT_COMPRESSED_ERROR_H
+
+void warn(char *m);
+void error(char *m);
+
+#endif /* BOOT_COMPRESSED_ERROR_H */
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index 8ef964d..1038524 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -31,6 +31,34 @@
 #include <asm/asm-offsets.h>
 #include <asm/bootparam.h>
 
+/*
+ * The 32-bit x86 assembler in binutils 2.26 will generate R_386_GOT32X
+ * relocation to get the symbol address in PIC.  When the compressed x86
+ * kernel isn't built as PIC, the linker optimizes R_386_GOT32X
+ * relocations to their fixed symbol addresses.  However, when the
+ * compressed x86 kernel is loaded at a different address, it leads
+ * to the following load failure:
+ *
+ *   Failed to allocate space for phdrs
+ *
+ * during the decompression stage.
+ *
+ * If the compressed x86 kernel is relocatable at run-time, it should be
+ * compiled with -fPIE, instead of -fPIC, if possible and should be built as
+ * Position Independent Executable (PIE) so that linker won't optimize
+ * R_386_GOT32X relocation to its fixed symbol address.  Older
+ * linkers generate R_386_32 relocations against locally defined symbols,
+ * _bss, _ebss, _got and _egot, in PIE.  It isn't wrong, just less
+ * optimal than R_386_RELATIVE.  But the x86 kernel fails to properly handle
+ * R_386_32 relocations when relocating the kernel.  To generate
+ * R_386_RELATIVE relocations, we mark _bss, _ebss, _got and _egot as
+ * hidden:
+ */
+	.hidden _bss
+	.hidden _ebss
+	.hidden _got
+	.hidden _egot
+
 	__HEAD
 ENTRY(startup_32)
 #ifdef CONFIG_EFI_STUB
@@ -148,7 +176,9 @@
 1:
 
 	/* Target address to relocate to for decompression */
-	addl	$z_extract_offset, %ebx
+	movl    BP_init_size(%esi), %eax
+	subl    $_end, %eax
+	addl    %eax, %ebx
 
 	/* Set up the stack */
 	leal	boot_stack_end(%ebx), %esp
@@ -205,24 +235,28 @@
 2:
 
 /*
- * Do the decompression, and jump to the new kernel..
+ * Do the extraction, and jump to the new kernel..
  */
-				/* push arguments for decompress_kernel: */
-	pushl	$z_run_size	/* size of kernel with .bss and .brk */
+				/* push arguments for extract_kernel: */
 	pushl	$z_output_len	/* decompressed length, end of relocs */
-	leal	z_extract_offset_negative(%ebx), %ebp
+
+	movl    BP_init_size(%esi), %eax
+	subl    $_end, %eax
+	movl    %ebx, %ebp
+	subl    %eax, %ebp
 	pushl	%ebp		/* output address */
+
 	pushl	$z_input_len	/* input_len */
 	leal	input_data(%ebx), %eax
 	pushl	%eax		/* input_data */
 	leal	boot_heap(%ebx), %eax
 	pushl	%eax		/* heap area */
 	pushl	%esi		/* real mode pointer */
-	call	decompress_kernel /* returns kernel location in %eax */
-	addl	$28, %esp
+	call	extract_kernel	/* returns kernel location in %eax */
+	addl	$24, %esp
 
 /*
- * Jump to the decompressed kernel.
+ * Jump to the extracted kernel.
  */
 	xorl	%ebx, %ebx
 	jmp	*%eax
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index b0c0d16..0d80a7a 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -33,6 +33,14 @@
 #include <asm/asm-offsets.h>
 #include <asm/bootparam.h>
 
+/*
+ * Locally defined symbols should be marked hidden:
+ */
+	.hidden _bss
+	.hidden _ebss
+	.hidden _got
+	.hidden _egot
+
 	__HEAD
 	.code32
 ENTRY(startup_32)
@@ -102,7 +110,9 @@
 1:
 
 	/* Target address to relocate to for decompression */
-	addl	$z_extract_offset, %ebx
+	movl	BP_init_size(%esi), %eax
+	subl	$_end, %eax
+	addl	%eax, %ebx
 
 /*
  * Prepare for entering 64 bit mode
@@ -124,7 +134,7 @@
 	/* Initialize Page tables to 0 */
 	leal	pgtable(%ebx), %edi
 	xorl	%eax, %eax
-	movl	$((4096*6)/4), %ecx
+	movl	$(BOOT_INIT_PGT_SIZE/4), %ecx
 	rep	stosl
 
 	/* Build Level 4 */
@@ -330,7 +340,9 @@
 1:
 
 	/* Target address to relocate to for decompression */
-	leaq	z_extract_offset(%rbp), %rbx
+	movl	BP_init_size(%rsi), %ebx
+	subl	$_end, %ebx
+	addq	%rbp, %rbx
 
 	/* Set up the stack */
 	leaq	boot_stack_end(%rbx), %rsp
@@ -400,19 +412,16 @@
 2:
 	
 /*
- * Do the decompression, and jump to the new kernel..
+ * Do the extraction, and jump to the new kernel..
  */
 	pushq	%rsi			/* Save the real mode argument */
-	movq	$z_run_size, %r9	/* size of kernel with .bss and .brk */
-	pushq	%r9
 	movq	%rsi, %rdi		/* real mode address */
 	leaq	boot_heap(%rip), %rsi	/* malloc area for uncompression */
 	leaq	input_data(%rip), %rdx  /* input_data */
 	movl	$z_input_len, %ecx	/* input_len */
 	movq	%rbp, %r8		/* output target address */
 	movq	$z_output_len, %r9	/* decompressed length, end of relocs */
-	call	decompress_kernel	/* returns kernel location in %rax */
-	popq	%r9
+	call	extract_kernel		/* returns kernel location in %rax */
 	popq	%rsi
 
 /*
@@ -477,4 +486,4 @@
 	.section ".pgtable","a",@nobits
 	.balign 4096
 pgtable:
-	.fill 6*4096, 1, 0
+	.fill BOOT_PGT_SIZE, 1, 0
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
new file mode 100644
index 0000000..cfeb025
--- /dev/null
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -0,0 +1,510 @@
+/*
+ * kaslr.c
+ *
+ * This contains the routines needed to generate a reasonable level of
+ * entropy to choose a randomized kernel base address offset in support
+ * of Kernel Address Space Layout Randomization (KASLR). Additionally
+ * handles walking the physical memory maps (and tracking memory regions
+ * to avoid) in order to select a physical memory location that can
+ * contain the entire properly aligned running kernel image.
+ *
+ */
+#include "misc.h"
+#include "error.h"
+
+#include <asm/msr.h>
+#include <asm/archrandom.h>
+#include <asm/e820.h>
+
+#include <generated/compile.h>
+#include <linux/module.h>
+#include <linux/uts.h>
+#include <linux/utsname.h>
+#include <generated/utsrelease.h>
+
+/* Simplified build-specific string for starting entropy. */
+static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
+		LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
+
+#define I8254_PORT_CONTROL	0x43
+#define I8254_PORT_COUNTER0	0x40
+#define I8254_CMD_READBACK	0xC0
+#define I8254_SELECT_COUNTER0	0x02
+#define I8254_STATUS_NOTREADY	0x40
+static inline u16 i8254(void)
+{
+	u16 status, timer;
+
+	do {
+		outb(I8254_PORT_CONTROL,
+		     I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
+		status = inb(I8254_PORT_COUNTER0);
+		timer  = inb(I8254_PORT_COUNTER0);
+		timer |= inb(I8254_PORT_COUNTER0) << 8;
+	} while (status & I8254_STATUS_NOTREADY);
+
+	return timer;
+}
+
+static unsigned long rotate_xor(unsigned long hash, const void *area,
+				size_t size)
+{
+	size_t i;
+	unsigned long *ptr = (unsigned long *)area;
+
+	for (i = 0; i < size / sizeof(hash); i++) {
+		/* Rotate by odd number of bits and XOR. */
+		hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
+		hash ^= ptr[i];
+	}
+
+	return hash;
+}
+
+/* Attempt to create a simple but unpredictable starting entropy. */
+static unsigned long get_random_boot(void)
+{
+	unsigned long hash = 0;
+
+	hash = rotate_xor(hash, build_str, sizeof(build_str));
+	hash = rotate_xor(hash, boot_params, sizeof(*boot_params));
+
+	return hash;
+}
+
+static unsigned long get_random_long(const char *purpose)
+{
+#ifdef CONFIG_X86_64
+	const unsigned long mix_const = 0x5d6008cbf3848dd3UL;
+#else
+	const unsigned long mix_const = 0x3f39e593UL;
+#endif
+	unsigned long raw, random = get_random_boot();
+	bool use_i8254 = true;
+
+	debug_putstr(purpose);
+	debug_putstr(" KASLR using");
+
+	if (has_cpuflag(X86_FEATURE_RDRAND)) {
+		debug_putstr(" RDRAND");
+		if (rdrand_long(&raw)) {
+			random ^= raw;
+			use_i8254 = false;
+		}
+	}
+
+	if (has_cpuflag(X86_FEATURE_TSC)) {
+		debug_putstr(" RDTSC");
+		raw = rdtsc();
+
+		random ^= raw;
+		use_i8254 = false;
+	}
+
+	if (use_i8254) {
+		debug_putstr(" i8254");
+		random ^= i8254();
+	}
+
+	/* Circular multiply for better bit diffusion */
+	asm("mul %3"
+	    : "=a" (random), "=d" (raw)
+	    : "a" (random), "rm" (mix_const));
+	random += raw;
+
+	debug_putstr("...\n");
+
+	return random;
+}
+
+struct mem_vector {
+	unsigned long start;
+	unsigned long size;
+};
+
+enum mem_avoid_index {
+	MEM_AVOID_ZO_RANGE = 0,
+	MEM_AVOID_INITRD,
+	MEM_AVOID_CMDLINE,
+	MEM_AVOID_BOOTPARAMS,
+	MEM_AVOID_MAX,
+};
+
+static struct mem_vector mem_avoid[MEM_AVOID_MAX];
+
+static bool mem_contains(struct mem_vector *region, struct mem_vector *item)
+{
+	/* Item at least partially before region. */
+	if (item->start < region->start)
+		return false;
+	/* Item at least partially after region. */
+	if (item->start + item->size > region->start + region->size)
+		return false;
+	return true;
+}
+
+static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
+{
+	/* Item one is entirely before item two. */
+	if (one->start + one->size <= two->start)
+		return false;
+	/* Item one is entirely after item two. */
+	if (one->start >= two->start + two->size)
+		return false;
+	return true;
+}
+
+/*
+ * In theory, KASLR can put the kernel anywhere in the range of [16M, 64T).
+ * The mem_avoid array is used to store the ranges that need to be avoided
+ * when KASLR searches for an appropriate random address. We must avoid any
+ * regions that are unsafe to overlap with during decompression, and other
+ * things like the initrd, cmdline and boot_params. This comment seeks to
+ * explain mem_avoid as clearly as possible since incorrect mem_avoid
+ * memory ranges lead to really hard to debug boot failures.
+ *
+ * The initrd, cmdline, and boot_params are trivial to identify for
+ * avoiding. They are MEM_AVOID_INITRD, MEM_AVOID_CMDLINE, and
+ * MEM_AVOID_BOOTPARAMS respectively below.
+ *
+ * What is not obvious how to avoid is the range of memory that is used
+ * during decompression (MEM_AVOID_ZO_RANGE below). This range must cover
+ * the compressed kernel (ZO) and its run space, which is used to extract
+ * the uncompressed kernel (VO) and relocs.
+ *
+ * ZO's full run size sits against the end of the decompression buffer, so
+ * we can calculate where text, data, bss, etc of ZO are positioned more
+ * easily.
+ *
+ * For additional background, the decompression calculations can be found
+ * in header.S, and the memory diagram is based on the one found in misc.c.
+ *
+ * The following conditions are already enforced by the image layouts and
+ * associated code:
+ *  - input + input_size >= output + output_size
+ *  - kernel_total_size <= init_size
+ *  - kernel_total_size <= output_size (see Note below)
+ *  - output + init_size >= output + output_size
+ *
+ * (Note that kernel_total_size and output_size have no fundamental
+ * relationship, but output_size is passed to choose_random_location
+ * as a maximum of the two. The diagram is showing a case where
+ * kernel_total_size is larger than output_size, but this case is
+ * handled by bumping output_size.)
+ *
+ * The above conditions can be illustrated by a diagram:
+ *
+ * 0   output            input            input+input_size    output+init_size
+ * |     |                 |                             |             |
+ * |     |                 |                             |             |
+ * |-----|--------|--------|--------------|-----------|--|-------------|
+ *                |                       |           |
+ *                |                       |           |
+ * output+init_size-ZO_INIT_SIZE  output+output_size  output+kernel_total_size
+ *
+ * [output, output+init_size) is the entire memory range used for
+ * extracting the compressed image.
+ *
+ * [output, output+kernel_total_size) is the range needed for the
+ * uncompressed kernel (VO) and its run size (bss, brk, etc).
+ *
+ * [output, output+output_size) is VO plus relocs (i.e. the entire
+ * uncompressed payload contained by ZO). This is the area of the buffer
+ * written to during decompression.
+ *
+ * [output+init_size-ZO_INIT_SIZE, output+init_size) is the worst-case
+ * range of the copied ZO and decompression code. (i.e. the range
+ * covered backwards of size ZO_INIT_SIZE, starting from output+init_size.)
+ *
+ * [input, input+input_size) is the original copied compressed image (ZO)
+ * (i.e. it does not include its run size). This range must be avoided
+ * because it contains the data used for decompression.
+ *
+ * [input+input_size, output+init_size) is [_text, _end) for ZO. This
+ * range includes ZO's heap and stack, and must be avoided since it
+ * performs the decompression.
+ *
+ * Since the above two ranges need to be avoided and they are adjacent,
+ * they can be merged, resulting in: [input, output+init_size) which
+ * becomes the MEM_AVOID_ZO_RANGE below.
+ */
+static void mem_avoid_init(unsigned long input, unsigned long input_size,
+			   unsigned long output)
+{
+	unsigned long init_size = boot_params->hdr.init_size;
+	u64 initrd_start, initrd_size;
+	u64 cmd_line, cmd_line_size;
+	char *ptr;
+
+	/*
+	 * Avoid the region that is unsafe to overlap during
+	 * decompression.
+	 */
+	mem_avoid[MEM_AVOID_ZO_RANGE].start = input;
+	mem_avoid[MEM_AVOID_ZO_RANGE].size = (output + init_size) - input;
+	add_identity_map(mem_avoid[MEM_AVOID_ZO_RANGE].start,
+			 mem_avoid[MEM_AVOID_ZO_RANGE].size);
+
+	/* Avoid initrd. */
+	initrd_start  = (u64)boot_params->ext_ramdisk_image << 32;
+	initrd_start |= boot_params->hdr.ramdisk_image;
+	initrd_size  = (u64)boot_params->ext_ramdisk_size << 32;
+	initrd_size |= boot_params->hdr.ramdisk_size;
+	mem_avoid[MEM_AVOID_INITRD].start = initrd_start;
+	mem_avoid[MEM_AVOID_INITRD].size = initrd_size;
+	/* No need to set mapping for initrd, it will be handled in VO. */
+
+	/* Avoid kernel command line. */
+	cmd_line  = (u64)boot_params->ext_cmd_line_ptr << 32;
+	cmd_line |= boot_params->hdr.cmd_line_ptr;
+	/* Calculate size of cmd_line. */
+	ptr = (char *)(unsigned long)cmd_line;
+	for (cmd_line_size = 0; ptr[cmd_line_size++]; )
+		;
+	mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line;
+	mem_avoid[MEM_AVOID_CMDLINE].size = cmd_line_size;
+	add_identity_map(mem_avoid[MEM_AVOID_CMDLINE].start,
+			 mem_avoid[MEM_AVOID_CMDLINE].size);
+
+	/* Avoid boot parameters. */
+	mem_avoid[MEM_AVOID_BOOTPARAMS].start = (unsigned long)boot_params;
+	mem_avoid[MEM_AVOID_BOOTPARAMS].size = sizeof(*boot_params);
+	add_identity_map(mem_avoid[MEM_AVOID_BOOTPARAMS].start,
+			 mem_avoid[MEM_AVOID_BOOTPARAMS].size);
+
+	/* We don't need to set a mapping for setup_data. */
+
+#ifdef CONFIG_X86_VERBOSE_BOOTUP
+	/* Make sure video RAM can be used. */
+	add_identity_map(0, PMD_SIZE);
+#endif
+}
+
+/*
+ * Does this memory vector overlap a known avoided area? If so, record the
+ * overlap region with the lowest address.
+ */
+static bool mem_avoid_overlap(struct mem_vector *img,
+			      struct mem_vector *overlap)
+{
+	int i;
+	struct setup_data *ptr;
+	unsigned long earliest = img->start + img->size;
+	bool is_overlapping = false;
+
+	for (i = 0; i < MEM_AVOID_MAX; i++) {
+		if (mem_overlaps(img, &mem_avoid[i]) &&
+		    mem_avoid[i].start < earliest) {
+			*overlap = mem_avoid[i];
+			is_overlapping = true;
+		}
+	}
+
+	/* Avoid all entries in the setup_data linked list. */
+	ptr = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
+	while (ptr) {
+		struct mem_vector avoid;
+
+		avoid.start = (unsigned long)ptr;
+		avoid.size = sizeof(*ptr) + ptr->len;
+
+		if (mem_overlaps(img, &avoid) && (avoid.start < earliest)) {
+			*overlap = avoid;
+			is_overlapping = true;
+		}
+
+		ptr = (struct setup_data *)(unsigned long)ptr->next;
+	}
+
+	return is_overlapping;
+}
+
+static unsigned long slots[KERNEL_IMAGE_SIZE / CONFIG_PHYSICAL_ALIGN];
+
+struct slot_area {
+	unsigned long addr;
+	int num;
+};
+
+#define MAX_SLOT_AREA 100
+
+static struct slot_area slot_areas[MAX_SLOT_AREA];
+
+static unsigned long slot_max;
+
+static unsigned long slot_area_index;
+
+static void store_slot_info(struct mem_vector *region, unsigned long image_size)
+{
+	struct slot_area slot_area;
+
+	if (slot_area_index == MAX_SLOT_AREA)
+		return;
+
+	slot_area.addr = region->start;
+	slot_area.num = (region->size - image_size) /
+			CONFIG_PHYSICAL_ALIGN + 1;
+
+	if (slot_area.num > 0) {
+		slot_areas[slot_area_index++] = slot_area;
+		slot_max += slot_area.num;
+	}
+}
+
+static void slots_append(unsigned long addr)
+{
+	/* Overflowing the slots list should be impossible. */
+	if (slot_max >= KERNEL_IMAGE_SIZE / CONFIG_PHYSICAL_ALIGN)
+		return;
+
+	slots[slot_max++] = addr;
+}
+
+static unsigned long slots_fetch_random(void)
+{
+	/* Handle case of no slots stored. */
+	if (slot_max == 0)
+		return 0;
+
+	return slots[get_random_long("Physical") % slot_max];
+}
+
+static void process_e820_entry(struct e820entry *entry,
+			       unsigned long minimum,
+			       unsigned long image_size)
+{
+	struct mem_vector region, img, overlap;
+
+	/* Skip non-RAM entries. */
+	if (entry->type != E820_RAM)
+		return;
+
+	/* Ignore entries entirely above our maximum. */
+	if (entry->addr >= KERNEL_IMAGE_SIZE)
+		return;
+
+	/* Ignore entries entirely below our minimum. */
+	if (entry->addr + entry->size < minimum)
+		return;
+
+	region.start = entry->addr;
+	region.size = entry->size;
+
+	/* Potentially raise address to minimum location. */
+	if (region.start < minimum)
+		region.start = minimum;
+
+	/* Potentially raise address to meet alignment requirements. */
+	region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
+
+	/* Did we raise the address above the bounds of this e820 region? */
+	if (region.start > entry->addr + entry->size)
+		return;
+
+	/* Reduce size by any delta from the original address. */
+	region.size -= region.start - entry->addr;
+
+	/* Reduce maximum size to fit end of image within maximum limit. */
+	if (region.start + region.size > KERNEL_IMAGE_SIZE)
+		region.size = KERNEL_IMAGE_SIZE - region.start;
+
+	/* Walk each aligned slot and check for avoided areas. */
+	for (img.start = region.start, img.size = image_size ;
+	     mem_contains(&region, &img) ;
+	     img.start += CONFIG_PHYSICAL_ALIGN) {
+		if (mem_avoid_overlap(&img, &overlap))
+			continue;
+		slots_append(img.start);
+	}
+}
+
+static unsigned long find_random_phys_addr(unsigned long minimum,
+					   unsigned long image_size)
+{
+	int i;
+	unsigned long addr;
+
+	/* Make sure minimum is aligned. */
+	minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
+
+	/* Verify potential e820 positions, appending to slots list. */
+	for (i = 0; i < boot_params->e820_entries; i++) {
+		process_e820_entry(&boot_params->e820_map[i], minimum,
+				   image_size);
+	}
+
+	return slots_fetch_random();
+}
+
+static unsigned long find_random_virt_addr(unsigned long minimum,
+					   unsigned long image_size)
+{
+	unsigned long slots, random_addr;
+
+	/* Make sure minimum is aligned. */
+	minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
+	/* Align image_size for easy slot calculations. */
+	image_size = ALIGN(image_size, CONFIG_PHYSICAL_ALIGN);
+
+	/*
+	 * There are how many CONFIG_PHYSICAL_ALIGN-sized slots
+	 * that can hold image_size within the range of minimum to
+	 * KERNEL_IMAGE_SIZE?
+	 */
+	slots = (KERNEL_IMAGE_SIZE - minimum - image_size) /
+		 CONFIG_PHYSICAL_ALIGN + 1;
+
+	random_addr = get_random_long("Virtual") % slots;
+
+	return random_addr * CONFIG_PHYSICAL_ALIGN + minimum;
+}
+
+/*
+ * Since this function examines addresses much more numerically,
+ * it takes the input and output pointers as 'unsigned long'.
+ */
+unsigned char *choose_random_location(unsigned long input,
+				      unsigned long input_size,
+				      unsigned long output,
+				      unsigned long output_size)
+{
+	unsigned long choice = output;
+	unsigned long random_addr;
+
+#ifdef CONFIG_HIBERNATION
+	if (!cmdline_find_option_bool("kaslr")) {
+		warn("KASLR disabled: 'kaslr' not on cmdline (hibernation selected).");
+		goto out;
+	}
+#else
+	if (cmdline_find_option_bool("nokaslr")) {
+		warn("KASLR disabled: 'nokaslr' on cmdline.");
+		goto out;
+	}
+#endif
+
+	boot_params->hdr.loadflags |= KASLR_FLAG;
+
+	/* Record the various known unsafe memory ranges. */
+	mem_avoid_init(input, input_size, output);
+
+	/* Walk e820 and find a random address. */
+	random_addr = find_random_phys_addr(output, output_size);
+	if (!random_addr) {
+		warn("KASLR disabled: could not find suitable E820 region!");
+		goto out;
+	}
+
+	/* Always enforce the minimum. */
+	if (random_addr < choice)
+		goto out;
+
+	choice = random_addr;
+
+	add_identity_map(choice, output_size);
+
+	/* This actually loads the identity pagetable on x86_64. */
+	finalize_identity_maps();
+out:
+	return (unsigned char *)choice;
+}
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 79dac17..f14db4e 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -1,8 +1,10 @@
 /*
  * misc.c
  *
- * This is a collection of several routines from gzip-1.0.3
- * adapted for Linux.
+ * This is a collection of several routines used to extract the kernel
+ * which includes KASLR relocation, decompression, ELF parsing, and
+ * relocation processing. Additionally included are the screen and serial
+ * output functions and related debugging support functions.
  *
  * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
  * puts by Nick Holloway 1993, better puts by Martin Mares 1995
@@ -10,111 +12,37 @@
  */
 
 #include "misc.h"
+#include "error.h"
 #include "../string.h"
-
-/* WARNING!!
- * This code is compiled with -fPIC and it is relocated dynamically
- * at run time, but no relocation processing is performed.
- * This means that it is not safe to place pointers in static structures.
- */
+#include "../voffset.h"
 
 /*
- * Getting to provable safe in place decompression is hard.
- * Worst case behaviours need to be analyzed.
- * Background information:
- *
- * The file layout is:
- *    magic[2]
- *    method[1]
- *    flags[1]
- *    timestamp[4]
- *    extraflags[1]
- *    os[1]
- *    compressed data blocks[N]
- *    crc[4] orig_len[4]
- *
- * resulting in 18 bytes of non compressed data overhead.
- *
- * Files divided into blocks
- * 1 bit (last block flag)
- * 2 bits (block type)
- *
- * 1 block occurs every 32K -1 bytes or when there 50% compression
- * has been achieved. The smallest block type encoding is always used.
- *
- * stored:
- *    32 bits length in bytes.
- *
- * fixed:
- *    magic fixed tree.
- *    symbols.
- *
- * dynamic:
- *    dynamic tree encoding.
- *    symbols.
- *
- *
- * The buffer for decompression in place is the length of the
- * uncompressed data, plus a small amount extra to keep the algorithm safe.
- * The compressed data is placed at the end of the buffer.  The output
- * pointer is placed at the start of the buffer and the input pointer
- * is placed where the compressed data starts.  Problems will occur
- * when the output pointer overruns the input pointer.
- *
- * The output pointer can only overrun the input pointer if the input
- * pointer is moving faster than the output pointer.  A condition only
- * triggered by data whose compressed form is larger than the uncompressed
- * form.
- *
- * The worst case at the block level is a growth of the compressed data
- * of 5 bytes per 32767 bytes.
- *
- * The worst case internal to a compressed block is very hard to figure.
- * The worst case can at least be boundined by having one bit that represents
- * 32764 bytes and then all of the rest of the bytes representing the very
- * very last byte.
- *
- * All of which is enough to compute an amount of extra data that is required
- * to be safe.  To avoid problems at the block level allocating 5 extra bytes
- * per 32767 bytes of data is sufficient.  To avoind problems internal to a
- * block adding an extra 32767 bytes (the worst case uncompressed block size)
- * is sufficient, to ensure that in the worst case the decompressed data for
- * block will stop the byte before the compressed data for a block begins.
- * To avoid problems with the compressed data's meta information an extra 18
- * bytes are needed.  Leading to the formula:
- *
- * extra_bytes = (uncompressed_size >> 12) + 32768 + 18 + decompressor_size.
- *
- * Adding 8 bytes per 32K is a bit excessive but much easier to calculate.
- * Adding 32768 instead of 32767 just makes for round numbers.
- * Adding the decompressor_size is necessary as it musht live after all
- * of the data as well.  Last I measured the decompressor is about 14K.
- * 10K of actual data and 4K of bss.
- *
+ * WARNING!!
+ * This code is compiled with -fPIC and it is relocated dynamically at
+ * run time, but no relocation processing is performed. This means that
+ * it is not safe to place pointers in static structures.
  */
 
-/*
- * gzip declarations
- */
+/* Macros used by the included decompressor code below. */
 #define STATIC		static
 
-#undef memcpy
-
 /*
- * Use a normal definition of memset() from string.c. There are already
+ * Use normal definitions of mem*() from string.c. There are already
  * included header files which expect a definition of memset() and by
  * the time we define memset macro, it is too late.
  */
+#undef memcpy
 #undef memset
 #define memzero(s, n)	memset((s), 0, (n))
+#define memmove		memmove
 
-
-static void error(char *m);
+/* Functions used by the included decompressor code below. */
+void *memmove(void *dest, const void *src, size_t n);
 
 /*
  * This is set up by the setup-routine at boot-time
  */
-struct boot_params *real_mode;		/* Pointer to real-mode data */
+struct boot_params *boot_params;
 
 memptr free_mem_ptr;
 memptr free_mem_end_ptr;
@@ -146,12 +74,16 @@
 #ifdef CONFIG_KERNEL_LZ4
 #include "../../../../lib/decompress_unlz4.c"
 #endif
+/*
+ * NOTE: When adding a new decompressor, please update the analysis in
+ * ../header.S.
+ */
 
 static void scroll(void)
 {
 	int i;
 
-	memcpy(vidmem, vidmem + cols * 2, (lines - 1) * cols * 2);
+	memmove(vidmem, vidmem + cols * 2, (lines - 1) * cols * 2);
 	for (i = (lines - 1) * cols * 2; i < lines * cols * 2; i += 2)
 		vidmem[i] = ' ';
 }
@@ -184,12 +116,12 @@
 		}
 	}
 
-	if (real_mode->screen_info.orig_video_mode == 0 &&
+	if (boot_params->screen_info.orig_video_mode == 0 &&
 	    lines == 0 && cols == 0)
 		return;
 
-	x = real_mode->screen_info.orig_x;
-	y = real_mode->screen_info.orig_y;
+	x = boot_params->screen_info.orig_x;
+	y = boot_params->screen_info.orig_y;
 
 	while ((c = *s++) != '\0') {
 		if (c == '\n') {
@@ -210,8 +142,8 @@
 		}
 	}
 
-	real_mode->screen_info.orig_x = x;
-	real_mode->screen_info.orig_y = y;
+	boot_params->screen_info.orig_x = x;
+	boot_params->screen_info.orig_y = y;
 
 	pos = (x + cols * y) * 2;	/* Update cursor position */
 	outb(14, vidport);
@@ -237,23 +169,13 @@
 	}
 }
 
-static void error(char *x)
-{
-	error_putstr("\n\n");
-	error_putstr(x);
-	error_putstr("\n\n -- System halted");
-
-	while (1)
-		asm("hlt");
-}
-
 #if CONFIG_X86_NEED_RELOCS
 static void handle_relocations(void *output, unsigned long output_len)
 {
 	int *reloc;
 	unsigned long delta, map, ptr;
 	unsigned long min_addr = (unsigned long)output;
-	unsigned long max_addr = min_addr + output_len;
+	unsigned long max_addr = min_addr + (VO___bss_start - VO__text);
 
 	/*
 	 * Calculate the delta between where vmlinux was linked to load
@@ -295,7 +217,7 @@
 	 * So we work backwards from the end of the decompressed image.
 	 */
 	for (reloc = output + output_len - sizeof(*reloc); *reloc; reloc--) {
-		int extended = *reloc;
+		long extended = *reloc;
 		extended += map;
 
 		ptr = (unsigned long)extended;
@@ -372,9 +294,7 @@
 #else
 			dest = (void *)(phdr->p_paddr);
 #endif
-			memcpy(dest,
-			       output + phdr->p_offset,
-			       phdr->p_filesz);
+			memmove(dest, output + phdr->p_offset, phdr->p_filesz);
 			break;
 		default: /* Ignore other PT_* */ break;
 		}
@@ -383,23 +303,41 @@
 	free(phdrs);
 }
 
-asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
+/*
+ * The compressed kernel image (ZO), has been moved so that its position
+ * is against the end of the buffer used to hold the uncompressed kernel
+ * image (VO) and the execution environment (.bss, .brk), which makes sure
+ * there is room to do the in-place decompression. (See header.S for the
+ * calculations.)
+ *
+ *                             |-----compressed kernel image------|
+ *                             V                                  V
+ * 0                       extract_offset                      +INIT_SIZE
+ * |-----------|---------------|-------------------------|--------|
+ *             |               |                         |        |
+ *           VO__text      startup_32 of ZO          VO__end    ZO__end
+ *             ^                                         ^
+ *             |-------uncompressed kernel image---------|
+ *
+ */
+asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
 				  unsigned char *input_data,
 				  unsigned long input_len,
 				  unsigned char *output,
-				  unsigned long output_len,
-				  unsigned long run_size)
+				  unsigned long output_len)
 {
+	const unsigned long kernel_total_size = VO__end - VO__text;
 	unsigned char *output_orig = output;
 
-	real_mode = rmode;
+	/* Retain x86 boot parameters pointer passed from startup_32/64. */
+	boot_params = rmode;
 
-	/* Clear it for solely in-kernel use */
-	real_mode->hdr.loadflags &= ~KASLR_FLAG;
+	/* Clear flags intended for solely in-kernel use. */
+	boot_params->hdr.loadflags &= ~KASLR_FLAG;
 
-	sanitize_boot_params(real_mode);
+	sanitize_boot_params(boot_params);
 
-	if (real_mode->screen_info.orig_video_mode == 7) {
+	if (boot_params->screen_info.orig_video_mode == 7) {
 		vidmem = (char *) 0xb0000;
 		vidport = 0x3b4;
 	} else {
@@ -407,11 +345,11 @@
 		vidport = 0x3d4;
 	}
 
-	lines = real_mode->screen_info.orig_video_lines;
-	cols = real_mode->screen_info.orig_video_cols;
+	lines = boot_params->screen_info.orig_video_lines;
+	cols = boot_params->screen_info.orig_video_cols;
 
 	console_init();
-	debug_putstr("early console in decompress_kernel\n");
+	debug_putstr("early console in extract_kernel\n");
 
 	free_mem_ptr     = heap;	/* Heap */
 	free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
@@ -421,16 +359,16 @@
 	debug_putaddr(input_len);
 	debug_putaddr(output);
 	debug_putaddr(output_len);
-	debug_putaddr(run_size);
+	debug_putaddr(kernel_total_size);
 
 	/*
 	 * The memory hole needed for the kernel is the larger of either
 	 * the entire decompressed kernel plus relocation table, or the
 	 * entire decompressed kernel plus .bss and .brk sections.
 	 */
-	output = choose_kernel_location(real_mode, input_data, input_len, output,
-					output_len > run_size ? output_len
-							      : run_size);
+	output = choose_random_location((unsigned long)input_data, input_len,
+					(unsigned long)output,
+					max(output_len, kernel_total_size));
 
 	/* Validate memory location choices. */
 	if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1))
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 3783dc3..b6fec1f 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -32,7 +32,7 @@
 /* misc.c */
 extern memptr free_mem_ptr;
 extern memptr free_mem_end_ptr;
-extern struct boot_params *real_mode;		/* Pointer to real-mode data */
+extern struct boot_params *boot_params;
 void __putstr(const char *s);
 void __puthex(unsigned long value);
 #define error_putstr(__x)  __putstr(__x)
@@ -66,26 +66,35 @@
 
 
 #if CONFIG_RANDOMIZE_BASE
-/* aslr.c */
-unsigned char *choose_kernel_location(struct boot_params *boot_params,
-				      unsigned char *input,
+/* kaslr.c */
+unsigned char *choose_random_location(unsigned long input_ptr,
 				      unsigned long input_size,
-				      unsigned char *output,
+				      unsigned long output_ptr,
 				      unsigned long output_size);
 /* cpuflags.c */
 bool has_cpuflag(int flag);
 #else
 static inline
-unsigned char *choose_kernel_location(struct boot_params *boot_params,
-				      unsigned char *input,
+unsigned char *choose_random_location(unsigned long input_ptr,
 				      unsigned long input_size,
-				      unsigned char *output,
+				      unsigned long output_ptr,
 				      unsigned long output_size)
 {
-	return output;
+	return (unsigned char *)output_ptr;
 }
 #endif
 
+#ifdef CONFIG_X86_64
+void add_identity_map(unsigned long start, unsigned long size);
+void finalize_identity_maps(void);
+extern unsigned char _pgtable[];
+#else
+static inline void add_identity_map(unsigned long start, unsigned long size)
+{ }
+static inline void finalize_identity_maps(void)
+{ }
+#endif
+
 #ifdef CONFIG_EARLY_PRINTK
 /* early_serial_console.c */
 extern int early_serial_base;
diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
index d8222f2..72bad2c 100644
--- a/arch/x86/boot/compressed/mkpiggy.c
+++ b/arch/x86/boot/compressed/mkpiggy.c
@@ -18,11 +18,10 @@
  *
  *  H. Peter Anvin <hpa@linux.intel.com>
  *
- * ----------------------------------------------------------------------- */
-
-/*
- * Compute the desired load offset from a compressed program; outputs
- * a small assembly wrapper with the appropriate symbols defined.
+ * -----------------------------------------------------------------------
+ *
+ * Outputs a small assembly wrapper with the appropriate symbols defined.
+ *
  */
 
 #include <stdlib.h>
@@ -35,14 +34,11 @@
 {
 	uint32_t olen;
 	long ilen;
-	unsigned long offs;
-	unsigned long run_size;
 	FILE *f = NULL;
 	int retval = 1;
 
-	if (argc < 3) {
-		fprintf(stderr, "Usage: %s compressed_file run_size\n",
-				argv[0]);
+	if (argc < 2) {
+		fprintf(stderr, "Usage: %s compressed_file\n", argv[0]);
 		goto bail;
 	}
 
@@ -67,29 +63,11 @@
 	ilen = ftell(f);
 	olen = get_unaligned_le32(&olen);
 
-	/*
-	 * Now we have the input (compressed) and output (uncompressed)
-	 * sizes, compute the necessary decompression offset...
-	 */
-
-	offs = (olen > ilen) ? olen - ilen : 0;
-	offs += olen >> 12;	/* Add 8 bytes for each 32K block */
-	offs += 64*1024 + 128;	/* Add 64K + 128 bytes slack */
-	offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
-	run_size = atoi(argv[2]);
-
 	printf(".section \".rodata..compressed\",\"a\",@progbits\n");
 	printf(".globl z_input_len\n");
 	printf("z_input_len = %lu\n", ilen);
 	printf(".globl z_output_len\n");
 	printf("z_output_len = %lu\n", (unsigned long)olen);
-	printf(".globl z_extract_offset\n");
-	printf("z_extract_offset = 0x%lx\n", offs);
-	/* z_extract_offset_negative allows simplification of head_32.S */
-	printf(".globl z_extract_offset_negative\n");
-	printf("z_extract_offset_negative = -0x%lx\n", offs);
-	printf(".globl z_run_size\n");
-	printf("z_run_size = %lu\n", run_size);
 
 	printf(".globl input_data, input_data_end\n");
 	printf("input_data:\n");
diff --git a/arch/x86/boot/compressed/pagetable.c b/arch/x86/boot/compressed/pagetable.c
new file mode 100644
index 0000000..34b95df
--- /dev/null
+++ b/arch/x86/boot/compressed/pagetable.c
@@ -0,0 +1,129 @@
+/*
+ * This code is used on x86_64 to create page table identity mappings on
+ * demand by building up a new set of page tables (or appending to the
+ * existing ones), and then switching over to them when ready.
+ */
+
+/*
+ * Since we're dealing with identity mappings, physical and virtual
+ * addresses are the same, so override these defines which are ultimately
+ * used by the headers in misc.h.
+ */
+#define __pa(x)  ((unsigned long)(x))
+#define __va(x)  ((void *)((unsigned long)(x)))
+
+#include "misc.h"
+
+/* These actually do the work of building the kernel identity maps. */
+#include <asm/init.h>
+#include <asm/pgtable.h>
+#include "../../mm/ident_map.c"
+
+/* Used by pgtable.h asm code to force instruction serialization. */
+unsigned long __force_order;
+
+/* Used to track our page table allocation area. */
+struct alloc_pgt_data {
+	unsigned char *pgt_buf;
+	unsigned long pgt_buf_size;
+	unsigned long pgt_buf_offset;
+};
+
+/*
+ * Allocates space for a page table entry, using struct alloc_pgt_data
+ * above. Besides the local callers, this is used as the allocation
+ * callback in mapping_info below.
+ */
+static void *alloc_pgt_page(void *context)
+{
+	struct alloc_pgt_data *pages = (struct alloc_pgt_data *)context;
+	unsigned char *entry;
+
+	/* Validate there is space available for a new page. */
+	if (pages->pgt_buf_offset >= pages->pgt_buf_size) {
+		debug_putstr("out of pgt_buf in " __FILE__ "!?\n");
+		debug_putaddr(pages->pgt_buf_offset);
+		debug_putaddr(pages->pgt_buf_size);
+		return NULL;
+	}
+
+	entry = pages->pgt_buf + pages->pgt_buf_offset;
+	pages->pgt_buf_offset += PAGE_SIZE;
+
+	return entry;
+}
+
+/* Used to track our allocated page tables. */
+static struct alloc_pgt_data pgt_data;
+
+/* The top level page table entry pointer. */
+static unsigned long level4p;
+
+/* Locates and clears a region for a new top level page table. */
+static void prepare_level4(void)
+{
+	/*
+	 * It should be impossible for this not to already be true,
+	 * but since calling this a second time would rewind the other
+	 * counters, let's just make sure this is reset too.
+	 */
+	pgt_data.pgt_buf_offset = 0;
+
+	/*
+	 * If we came here via startup_32(), cr3 will be _pgtable already
+	 * and we must append to the existing area instead of entirely
+	 * overwriting it.
+	 */
+	level4p = read_cr3();
+	if (level4p == (unsigned long)_pgtable) {
+		debug_putstr("booted via startup_32()\n");
+		pgt_data.pgt_buf = _pgtable + BOOT_INIT_PGT_SIZE;
+		pgt_data.pgt_buf_size = BOOT_PGT_SIZE - BOOT_INIT_PGT_SIZE;
+		memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
+	} else {
+		debug_putstr("booted via startup_64()\n");
+		pgt_data.pgt_buf = _pgtable;
+		pgt_data.pgt_buf_size = BOOT_PGT_SIZE;
+		memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
+		level4p = (unsigned long)alloc_pgt_page(&pgt_data);
+	}
+}
+
+/*
+ * Adds the specified range to what will become the new identity mappings.
+ * Once all ranges have been added, the new mapping is activated by calling
+ * finalize_identity_maps() below.
+ */
+void add_identity_map(unsigned long start, unsigned long size)
+{
+	struct x86_mapping_info mapping_info = {
+		.alloc_pgt_page	= alloc_pgt_page,
+		.context	= &pgt_data,
+		.pmd_flag	= __PAGE_KERNEL_LARGE_EXEC,
+	};
+	unsigned long end = start + size;
+
+	/* Make sure we have a top level page table ready to use. */
+	if (!level4p)
+		prepare_level4();
+
+	/* Align boundary to 2M. */
+	start = round_down(start, PMD_SIZE);
+	end = round_up(end, PMD_SIZE);
+	if (start >= end)
+		return;
+
+	/* Build the mapping. */
+	kernel_ident_mapping_init(&mapping_info, (pgd_t *)level4p,
+				  start, end);
+}
+
+/*
+ * This switches the page tables to the new level4 that has been built
+ * via calls to add_identity_map() above. If booted via startup_32(),
+ * this is effectively a no-op.
+ */
+void finalize_identity_maps(void)
+{
+	write_cr3(level4p);
+}
diff --git a/arch/x86/boot/compressed/string.c b/arch/x86/boot/compressed/string.c
index 00e788b..cea140c 100644
--- a/arch/x86/boot/compressed/string.c
+++ b/arch/x86/boot/compressed/string.c
@@ -1,7 +1,16 @@
+/*
+ * This provides an optimized implementation of memcpy, and a simplified
+ * implementation of memset and memmove. These are used here because the
+ * standard kernel runtime versions are not yet available and we don't
+ * trust the gcc built-in implementations as they may do unexpected things
+ * (e.g. FPU ops) in the minimal decompression stub execution environment.
+ */
+#include "error.h"
+
 #include "../string.c"
 
 #ifdef CONFIG_X86_32
-void *memcpy(void *dest, const void *src, size_t n)
+static void *__memcpy(void *dest, const void *src, size_t n)
 {
 	int d0, d1, d2;
 	asm volatile(
@@ -15,7 +24,7 @@
 	return dest;
 }
 #else
-void *memcpy(void *dest, const void *src, size_t n)
+static void *__memcpy(void *dest, const void *src, size_t n)
 {
 	long d0, d1, d2;
 	asm volatile(
@@ -39,3 +48,27 @@
 		ss[i] = c;
 	return s;
 }
+
+void *memmove(void *dest, const void *src, size_t n)
+{
+	unsigned char *d = dest;
+	const unsigned char *s = src;
+
+	if (d <= s || d - s >= n)
+		return __memcpy(dest, src, n);
+
+	while (n-- > 0)
+		d[n] = s[n];
+
+	return dest;
+}
+
+/* Detect and warn about potential overlaps, but handle them with memmove. */
+void *memcpy(void *dest, const void *src, size_t n)
+{
+	if (dest > src && dest - src < n) {
+		warn("Avoiding potentially unsafe overlapping memcpy()!");
+		return memmove(dest, src, n);
+	}
+	return __memcpy(dest, src, n);
+}
diff --git a/arch/x86/boot/compressed/vmlinux.lds.S b/arch/x86/boot/compressed/vmlinux.lds.S
index 34d047c..e24e0a0 100644
--- a/arch/x86/boot/compressed/vmlinux.lds.S
+++ b/arch/x86/boot/compressed/vmlinux.lds.S
@@ -70,5 +70,6 @@
 		_epgtable = . ;
 	}
 #endif
+	. = ALIGN(PAGE_SIZE);	/* keep ZO size page aligned */
 	_end = .;
 }
diff --git a/arch/x86/boot/early_serial_console.c b/arch/x86/boot/early_serial_console.c
index 45a0768..f0b8d6d 100644
--- a/arch/x86/boot/early_serial_console.c
+++ b/arch/x86/boot/early_serial_console.c
@@ -1,3 +1,7 @@
+/*
+ * Serial port routines for use during early boot reporting. This code is
+ * included from both the compressed kernel and the regular kernel.
+ */
 #include "boot.h"
 
 #define DEFAULT_SERIAL_PORT 0x3f8 /* ttyS0 */
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 6236b9e..3dd5be3 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -440,13 +440,116 @@
 
 pref_address:		.quad LOAD_PHYSICAL_ADDR	# preferred load addr
 
-#define ZO_INIT_SIZE	(ZO__end - ZO_startup_32 + ZO_z_extract_offset)
+#
+# Getting to provably safe in-place decompression is hard. Worst case
+# behaviours need to be analyzed. Here let's take the decompression of
+# a gzip-compressed kernel as example, to illustrate it:
+#
+# The file layout of gzip compressed kernel is:
+#
+#    magic[2]
+#    method[1]
+#    flags[1]
+#    timestamp[4]
+#    extraflags[1]
+#    os[1]
+#    compressed data blocks[N]
+#    crc[4] orig_len[4]
+#
+# ... resulting in +18 bytes overhead of uncompressed data.
+#
+# (For more information, please refer to RFC 1951 and RFC 1952.)
+#
+# Files divided into blocks
+# 1 bit (last block flag)
+# 2 bits (block type)
+#
+# 1 block occurs every 32K -1 bytes or when there 50% compression
+# has been achieved. The smallest block type encoding is always used.
+#
+# stored:
+#    32 bits length in bytes.
+#
+# fixed:
+#    magic fixed tree.
+#    symbols.
+#
+# dynamic:
+#    dynamic tree encoding.
+#    symbols.
+#
+#
+# The buffer for decompression in place is the length of the uncompressed
+# data, plus a small amount extra to keep the algorithm safe. The
+# compressed data is placed at the end of the buffer.  The output pointer
+# is placed at the start of the buffer and the input pointer is placed
+# where the compressed data starts. Problems will occur when the output
+# pointer overruns the input pointer.
+#
+# The output pointer can only overrun the input pointer if the input
+# pointer is moving faster than the output pointer.  A condition only
+# triggered by data whose compressed form is larger than the uncompressed
+# form.
+#
+# The worst case at the block level is a growth of the compressed data
+# of 5 bytes per 32767 bytes.
+#
+# The worst case internal to a compressed block is very hard to figure.
+# The worst case can at least be bounded by having one bit that represents
+# 32764 bytes and then all of the rest of the bytes representing the very
+# very last byte.
+#
+# All of which is enough to compute an amount of extra data that is required
+# to be safe.  To avoid problems at the block level allocating 5 extra bytes
+# per 32767 bytes of data is sufficient.  To avoid problems internal to a
+# block adding an extra 32767 bytes (the worst case uncompressed block size)
+# is sufficient, to ensure that in the worst case the decompressed data for
+# block will stop the byte before the compressed data for a block begins.
+# To avoid problems with the compressed data's meta information an extra 18
+# bytes are needed.  Leading to the formula:
+#
+# extra_bytes = (uncompressed_size >> 12) + 32768 + 18
+#
+# Adding 8 bytes per 32K is a bit excessive but much easier to calculate.
+# Adding 32768 instead of 32767 just makes for round numbers.
+#
+# Above analysis is for decompressing gzip compressed kernel only. Up to
+# now 6 different decompressor are supported all together. And among them
+# xz stores data in chunks and has maximum chunk of 64K. Hence safety
+# margin should be updated to cover all decompressors so that we don't
+# need to deal with each of them separately. Please check
+# the description in lib/decompressor_xxx.c for specific information.
+#
+# extra_bytes = (uncompressed_size >> 12) + 65536 + 128
+
+#define ZO_z_extra_bytes	((ZO_z_output_len >> 12) + 65536 + 128)
+#if ZO_z_output_len > ZO_z_input_len
+# define ZO_z_extract_offset	(ZO_z_output_len + ZO_z_extra_bytes - \
+				 ZO_z_input_len)
+#else
+# define ZO_z_extract_offset	ZO_z_extra_bytes
+#endif
+
+/*
+ * The extract_offset has to be bigger than ZO head section. Otherwise when
+ * the head code is running to move ZO to the end of the buffer, it will
+ * overwrite the head code itself.
+ */
+#if (ZO__ehead - ZO_startup_32) > ZO_z_extract_offset
+# define ZO_z_min_extract_offset ((ZO__ehead - ZO_startup_32 + 4095) & ~4095)
+#else
+# define ZO_z_min_extract_offset ((ZO_z_extract_offset + 4095) & ~4095)
+#endif
+
+#define ZO_INIT_SIZE	(ZO__end - ZO_startup_32 + ZO_z_min_extract_offset)
+
 #define VO_INIT_SIZE	(VO__end - VO__text)
 #if ZO_INIT_SIZE > VO_INIT_SIZE
-#define INIT_SIZE ZO_INIT_SIZE
+# define INIT_SIZE ZO_INIT_SIZE
 #else
-#define INIT_SIZE VO_INIT_SIZE
+# define INIT_SIZE VO_INIT_SIZE
 #endif
+
 init_size:		.long INIT_SIZE		# kernel initialization size
 handover_offset:	.long 0			# Filled in by build.c
 
diff --git a/arch/x86/configs/kvm_guest.config b/arch/x86/configs/kvm_guest.config
index f9affcc..9906505 100644
--- a/arch/x86/configs/kvm_guest.config
+++ b/arch/x86/configs/kvm_guest.config
@@ -26,3 +26,6 @@
 CONFIG_9P_FS=y
 CONFIG_NET_9P=y
 CONFIG_NET_9P_VIRTIO=y
+CONFIG_SCSI_LOWLEVEL=y
+CONFIG_SCSI_VIRTIO=y
+CONFIG_VIRTIO_INPUT=y
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 4f404a6..0c8d796 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -173,6 +173,7 @@
 CONFIG_NET_TULIP=y
 CONFIG_E100=y
 CONFIG_E1000=y
+CONFIG_E1000E=y
 CONFIG_SKY2=y
 CONFIG_FORCEDETH=y
 CONFIG_8139TOO=y
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 064c7e2..5b7fa14 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -1477,7 +1477,7 @@
 	}
 	aesni_ctr_enc_tfm = aesni_ctr_enc;
 #ifdef CONFIG_AS_AVX
-	if (cpu_has_avx) {
+	if (boot_cpu_has(X86_FEATURE_AVX)) {
 		/* optimize performance of ctr mode encryption transform */
 		aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
 		pr_info("AES CTR mode by8 optimization enabled\n");
diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
index d844569..60907c1 100644
--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
@@ -562,7 +562,10 @@
 {
 	const char *feature_name;
 
-	if (!cpu_has_avx2 || !cpu_has_avx || !cpu_has_aes || !cpu_has_osxsave) {
+	if (!boot_cpu_has(X86_FEATURE_AVX) ||
+	    !boot_cpu_has(X86_FEATURE_AVX2) ||
+	    !boot_cpu_has(X86_FEATURE_AES) ||
+	    !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
 		pr_info("AVX2 or AES-NI instructions are not detected.\n");
 		return -ENODEV;
 	}
diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
index 93d8f29..d96429d 100644
--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
@@ -554,7 +554,9 @@
 {
 	const char *feature_name;
 
-	if (!cpu_has_avx || !cpu_has_aes || !cpu_has_osxsave) {
+	if (!boot_cpu_has(X86_FEATURE_AVX) ||
+	    !boot_cpu_has(X86_FEATURE_AES) ||
+	    !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
 		pr_info("AVX or AES-NI instructions are not detected.\n");
 		return -ENODEV;
 	}
diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c
index 8baaff5..2d5c2e0b 100644
--- a/arch/x86/crypto/chacha20_glue.c
+++ b/arch/x86/crypto/chacha20_glue.c
@@ -129,7 +129,8 @@
 		return -ENODEV;
 
 #ifdef CONFIG_AS_AVX2
-	chacha20_use_avx2 = cpu_has_avx && cpu_has_avx2 &&
+	chacha20_use_avx2 = boot_cpu_has(X86_FEATURE_AVX) &&
+			    boot_cpu_has(X86_FEATURE_AVX2) &&
 			    cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL);
 #endif
 	return crypto_register_alg(&alg);
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
index 4264a3d..e32142b 100644
--- a/arch/x86/crypto/poly1305_glue.c
+++ b/arch/x86/crypto/poly1305_glue.c
@@ -179,11 +179,12 @@
 
 static int __init poly1305_simd_mod_init(void)
 {
-	if (!cpu_has_xmm2)
+	if (!boot_cpu_has(X86_FEATURE_XMM2))
 		return -ENODEV;
 
 #ifdef CONFIG_AS_AVX2
-	poly1305_use_avx2 = cpu_has_avx && cpu_has_avx2 &&
+	poly1305_use_avx2 = boot_cpu_has(X86_FEATURE_AVX) &&
+			    boot_cpu_has(X86_FEATURE_AVX2) &&
 			    cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL);
 	alg.descsize = sizeof(struct poly1305_simd_desc_ctx);
 	if (poly1305_use_avx2)
diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
index 6d19834..870f6d8 100644
--- a/arch/x86/crypto/serpent_avx2_glue.c
+++ b/arch/x86/crypto/serpent_avx2_glue.c
@@ -538,7 +538,7 @@
 {
 	const char *feature_name;
 
-	if (!cpu_has_avx2 || !cpu_has_osxsave) {
+	if (!boot_cpu_has(X86_FEATURE_AVX2) || !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
 		pr_info("AVX2 instructions are not detected.\n");
 		return -ENODEV;
 	}
diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
index 8943407..644f97a 100644
--- a/arch/x86/crypto/serpent_sse2_glue.c
+++ b/arch/x86/crypto/serpent_sse2_glue.c
@@ -600,7 +600,7 @@
 
 static int __init serpent_sse2_init(void)
 {
-	if (!cpu_has_xmm2) {
+	if (!boot_cpu_has(X86_FEATURE_XMM2)) {
 		printk(KERN_INFO "SSE2 instructions are not detected.\n");
 		return -ENODEV;
 	}
diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
index a8a0224..9c5af33 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha-mb/sha1_mb.c
@@ -102,14 +102,14 @@
 static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)(struct sha1_mb_mgr *state);
 static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)(struct sha1_mb_mgr *state);
 
-inline void sha1_init_digest(uint32_t *digest)
+static inline void sha1_init_digest(uint32_t *digest)
 {
 	static const uint32_t initial_digest[SHA1_DIGEST_LENGTH] = {SHA1_H0,
 					SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 };
 	memcpy(digest, initial_digest, sizeof(initial_digest));
 }
 
-inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2],
+static inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2],
 			 uint32_t total_len)
 {
 	uint32_t i = total_len & (SHA1_BLOCK_SIZE - 1);
@@ -453,10 +453,10 @@
 
 			req = cast_mcryptd_ctx_to_req(req_ctx);
 			if (irqs_disabled())
-				rctx->complete(&req->base, ret);
+				req_ctx->complete(&req->base, ret);
 			else {
 				local_bh_disable();
-				rctx->complete(&req->base, ret);
+				req_ctx->complete(&req->base, ret);
 				local_bh_enable();
 			}
 		}
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index dd14616..1024e37 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -166,7 +166,7 @@
 static bool avx_usable(void)
 {
 	if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
-		if (cpu_has_avx)
+		if (boot_cpu_has(X86_FEATURE_AVX))
 			pr_info("AVX detected but unusable.\n");
 		return false;
 	}
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index 5f4d608..3ae0f43 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -201,7 +201,7 @@
 static bool avx_usable(void)
 {
 	if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
-		if (cpu_has_avx)
+		if (boot_cpu_has(X86_FEATURE_AVX))
 			pr_info("AVX detected but unusable.\n");
 		return false;
 	}
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index 34e5083..0b17c83 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
@@ -151,7 +151,7 @@
 static bool avx_usable(void)
 {
 	if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
-		if (cpu_has_avx)
+		if (boot_cpu_has(X86_FEATURE_AVX))
 			pr_info("AVX detected but unusable.\n");
 		return false;
 	}
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index e79d93d..ec138e5 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -191,7 +191,7 @@
 
 long syscall_trace_enter(struct pt_regs *regs)
 {
-	u32 arch = is_ia32_task() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
+	u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
 	unsigned long phase1_result = syscall_trace_enter_phase1(regs, arch);
 
 	if (phase1_result == 0)
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 10868aa..983e5d3 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -207,10 +207,7 @@
 ENTRY(ret_from_fork)
 	pushl	%eax
 	call	schedule_tail
-	GET_THREAD_INFO(%ebp)
 	popl	%eax
-	pushl	$0x0202				# Reset kernel eflags
-	popfl
 
 	/* When we fork, we trace the syscall return in the child, too. */
 	movl    %esp, %eax
@@ -221,10 +218,7 @@
 ENTRY(ret_from_kernel_thread)
 	pushl	%eax
 	call	schedule_tail
-	GET_THREAD_INFO(%ebp)
 	popl	%eax
-	pushl	$0x0202				# Reset kernel eflags
-	popfl
 	movl	PT_EBP(%esp), %eax
 	call	*PT_EBX(%esp)
 	movl	$0, PT_EAX(%esp)
@@ -251,7 +245,6 @@
 ret_from_exception:
 	preempt_stop(CLBR_ANY)
 ret_from_intr:
-	GET_THREAD_INFO(%ebp)
 #ifdef CONFIG_VM86
 	movl	PT_EFLAGS(%esp), %eax		# mix EFLAGS and CS
 	movb	PT_CS(%esp), %al
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 858b555..9ee0da1 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -372,9 +372,6 @@
 ENTRY(ret_from_fork)
 	LOCK ; btr $TIF_FORK, TI_flags(%r8)
 
-	pushq	$0x0002
-	popfq					/* reset kernel eflags */
-
 	call	schedule_tail			/* rdi: 'prev' task parameter */
 
 	testb	$3, CS(%rsp)			/* from kernel_thread? */
@@ -781,19 +778,25 @@
 	pushfq
 	DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
 	SWAPGS
-gs_change:
+.Lgs_change:
 	movl	%edi, %gs
-2:	mfence					/* workaround */
+2:	ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
 	SWAPGS
 	popfq
 	ret
 END(native_load_gs_index)
 
-	_ASM_EXTABLE(gs_change, bad_gs)
+	_ASM_EXTABLE(.Lgs_change, bad_gs)
 	.section .fixup, "ax"
 	/* running with kernelgs */
 bad_gs:
 	SWAPGS					/* switch back to user gs */
+.macro ZAP_GS
+	/* This can't be a string because the preprocessor needs to see it. */
+	movl $__USER_DS, %eax
+	movl %eax, %gs
+.endm
+	ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG
 	xorl	%eax, %eax
 	movl	%eax, %gs
 	jmp	2b
@@ -1019,13 +1022,13 @@
 	movl	%ecx, %eax			/* zero extend */
 	cmpq	%rax, RIP+8(%rsp)
 	je	.Lbstep_iret
-	cmpq	$gs_change, RIP+8(%rsp)
+	cmpq	$.Lgs_change, RIP+8(%rsp)
 	jne	.Lerror_entry_done
 
 	/*
-	 * hack: gs_change can fail with user gsbase.  If this happens, fix up
+	 * hack: .Lgs_change can fail with user gsbase.  If this happens, fix up
 	 * gsbase and proceed.  We'll fix up the exception and land in
-	 * gs_change's error handler with kernel gsbase.
+	 * .Lgs_change's error handler with kernel gsbase.
 	 */
 	jmp	.Lerror_entry_from_usermode_swapgs
 
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 847f2f0..e1721da 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -72,24 +72,23 @@
 	pushfq				/* pt_regs->flags (except IF = 0) */
 	orl	$X86_EFLAGS_IF, (%rsp)	/* Fix saved flags */
 	pushq	$__USER32_CS		/* pt_regs->cs */
-	xorq    %r8,%r8
-	pushq	%r8			/* pt_regs->ip = 0 (placeholder) */
+	pushq	$0			/* pt_regs->ip = 0 (placeholder) */
 	pushq	%rax			/* pt_regs->orig_ax */
 	pushq	%rdi			/* pt_regs->di */
 	pushq	%rsi			/* pt_regs->si */
 	pushq	%rdx			/* pt_regs->dx */
 	pushq	%rcx			/* pt_regs->cx */
 	pushq	$-ENOSYS		/* pt_regs->ax */
-	pushq   %r8                     /* pt_regs->r8  = 0 */
-	pushq   %r8                     /* pt_regs->r9  = 0 */
-	pushq   %r8                     /* pt_regs->r10 = 0 */
-	pushq   %r8                     /* pt_regs->r11 = 0 */
+	pushq   $0			/* pt_regs->r8  = 0 */
+	pushq   $0			/* pt_regs->r9  = 0 */
+	pushq   $0			/* pt_regs->r10 = 0 */
+	pushq   $0			/* pt_regs->r11 = 0 */
 	pushq   %rbx                    /* pt_regs->rbx */
 	pushq   %rbp                    /* pt_regs->rbp (will be overwritten) */
-	pushq   %r8                     /* pt_regs->r12 = 0 */
-	pushq   %r8                     /* pt_regs->r13 = 0 */
-	pushq   %r8                     /* pt_regs->r14 = 0 */
-	pushq   %r8                     /* pt_regs->r15 = 0 */
+	pushq   $0			/* pt_regs->r12 = 0 */
+	pushq   $0			/* pt_regs->r13 = 0 */
+	pushq   $0			/* pt_regs->r14 = 0 */
+	pushq   $0			/* pt_regs->r15 = 0 */
 	cld
 
 	/*
@@ -205,17 +204,16 @@
 	pushq	%rdx			/* pt_regs->dx */
 	pushq	%rbp			/* pt_regs->cx (stashed in bp) */
 	pushq	$-ENOSYS		/* pt_regs->ax */
-	xorq    %r8,%r8
-	pushq   %r8                     /* pt_regs->r8  = 0 */
-	pushq   %r8                     /* pt_regs->r9  = 0 */
-	pushq   %r8                     /* pt_regs->r10 = 0 */
-	pushq   %r8                     /* pt_regs->r11 = 0 */
+	pushq   $0			/* pt_regs->r8  = 0 */
+	pushq   $0			/* pt_regs->r9  = 0 */
+	pushq   $0			/* pt_regs->r10 = 0 */
+	pushq   $0			/* pt_regs->r11 = 0 */
 	pushq   %rbx                    /* pt_regs->rbx */
 	pushq   %rbp                    /* pt_regs->rbp (will be overwritten) */
-	pushq   %r8                     /* pt_regs->r12 = 0 */
-	pushq   %r8                     /* pt_regs->r13 = 0 */
-	pushq   %r8                     /* pt_regs->r14 = 0 */
-	pushq   %r8                     /* pt_regs->r15 = 0 */
+	pushq   $0			/* pt_regs->r12 = 0 */
+	pushq   $0			/* pt_regs->r13 = 0 */
+	pushq   $0			/* pt_regs->r14 = 0 */
+	pushq   $0			/* pt_regs->r15 = 0 */
 
 	/*
 	 * User mode is traced as though IRQs are on, and SYSENTER
@@ -316,11 +314,10 @@
 	pushq	%rdx			/* pt_regs->dx */
 	pushq	%rcx			/* pt_regs->cx */
 	pushq	$-ENOSYS		/* pt_regs->ax */
-	xorq    %r8,%r8
-	pushq   %r8                     /* pt_regs->r8  = 0 */
-	pushq   %r8                     /* pt_regs->r9  = 0 */
-	pushq   %r8                     /* pt_regs->r10 = 0 */
-	pushq   %r8                     /* pt_regs->r11 = 0 */
+	pushq   $0			/* pt_regs->r8  = 0 */
+	pushq   $0			/* pt_regs->r9  = 0 */
+	pushq   $0			/* pt_regs->r10 = 0 */
+	pushq   $0			/* pt_regs->r11 = 0 */
 	pushq   %rbx                    /* pt_regs->rbx */
 	pushq   %rbp                    /* pt_regs->rbp */
 	pushq   %r12                    /* pt_regs->r12 */
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index b30dd81..4cddd17 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -384,5 +384,5 @@
 375	i386	membarrier		sys_membarrier
 376	i386	mlock2			sys_mlock2
 377	i386	copy_file_range		sys_copy_file_range
-378	i386	preadv2			sys_preadv2
-379	i386	pwritev2		sys_pwritev2
+378	i386	preadv2			sys_preadv2			compat_sys_preadv2
+379	i386	pwritev2		sys_pwritev2			compat_sys_pwritev2
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index cac6d17..555263e 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -374,3 +374,5 @@
 543	x32	io_setup		compat_sys_io_setup
 544	x32	io_submit		compat_sys_io_submit
 545	x32	execveat		compat_sys_execveat/ptregs
+534	x32	preadv2			compat_sys_preadv2
+535	x32	pwritev2		compat_sys_pwritev2
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index 03c3eb7..2f02d23 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -13,7 +13,6 @@
 
 #include <uapi/linux/time.h>
 #include <asm/vgtod.h>
-#include <asm/hpet.h>
 #include <asm/vvar.h>
 #include <asm/unistd.h>
 #include <asm/msr.h>
@@ -28,16 +27,6 @@
 extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz);
 extern time_t __vdso_time(time_t *t);
 
-#ifdef CONFIG_HPET_TIMER
-extern u8 hpet_page
-	__attribute__((visibility("hidden")));
-
-static notrace cycle_t vread_hpet(void)
-{
-	return *(const volatile u32 *)(&hpet_page + HPET_COUNTER);
-}
-#endif
-
 #ifdef CONFIG_PARAVIRT_CLOCK
 extern u8 pvclock_page
 	__attribute__((visibility("hidden")));
@@ -195,10 +184,6 @@
 
 	if (gtod->vclock_mode == VCLOCK_TSC)
 		cycles = vread_tsc();
-#ifdef CONFIG_HPET_TIMER
-	else if (gtod->vclock_mode == VCLOCK_HPET)
-		cycles = vread_hpet();
-#endif
 #ifdef CONFIG_PARAVIRT_CLOCK
 	else if (gtod->vclock_mode == VCLOCK_PVCLOCK)
 		cycles = vread_pvclock(mode);
diff --git a/arch/x86/entry/vdso/vdso-layout.lds.S b/arch/x86/entry/vdso/vdso-layout.lds.S
index 4158acc..a708aa9 100644
--- a/arch/x86/entry/vdso/vdso-layout.lds.S
+++ b/arch/x86/entry/vdso/vdso-layout.lds.S
@@ -25,7 +25,7 @@
 	 * segment.
 	 */
 
-	vvar_start = . - 3 * PAGE_SIZE;
+	vvar_start = . - 2 * PAGE_SIZE;
 	vvar_page = vvar_start;
 
 	/* Place all vvars at the offsets in asm/vvar.h. */
@@ -35,8 +35,7 @@
 #undef __VVAR_KERNEL_LDS
 #undef EMIT_VVAR
 
-	hpet_page = vvar_start + PAGE_SIZE;
-	pvclock_page = vvar_start + 2 * PAGE_SIZE;
+	pvclock_page = vvar_start + PAGE_SIZE;
 
 	. = SIZEOF_HEADERS;
 
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 10f7045..b3cf813 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -18,7 +18,6 @@
 #include <asm/vdso.h>
 #include <asm/vvar.h>
 #include <asm/page.h>
-#include <asm/hpet.h>
 #include <asm/desc.h>
 #include <asm/cpufeature.h>
 
@@ -129,16 +128,6 @@
 	if (sym_offset == image->sym_vvar_page) {
 		ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
 				    __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
-	} else if (sym_offset == image->sym_hpet_page) {
-#ifdef CONFIG_HPET_TIMER
-		if (hpet_address && vclock_was_used(VCLOCK_HPET)) {
-			ret = vm_insert_pfn_prot(
-				vma,
-				(unsigned long)vmf->virtual_address,
-				hpet_address >> PAGE_SHIFT,
-				pgprot_noncached(PAGE_READONLY));
-		}
-#endif
 	} else if (sym_offset == image->sym_pvclock_page) {
 		struct pvclock_vsyscall_time_info *pvti =
 			pvclock_pvti_cpu0_va();
diff --git a/arch/x86/events/Kconfig b/arch/x86/events/Kconfig
new file mode 100644
index 0000000..98397db
--- /dev/null
+++ b/arch/x86/events/Kconfig
@@ -0,0 +1,36 @@
+menu "Performance monitoring"
+
+config PERF_EVENTS_INTEL_UNCORE
+	tristate "Intel uncore performance events"
+	depends on PERF_EVENTS && CPU_SUP_INTEL && PCI
+	default y
+	---help---
+	Include support for Intel uncore performance events. These are
+	available on NehalemEX and more modern processors.
+
+config PERF_EVENTS_INTEL_RAPL
+	tristate "Intel rapl performance events"
+	depends on PERF_EVENTS && CPU_SUP_INTEL && PCI
+	default y
+	---help---
+	Include support for Intel rapl performance events for power
+	monitoring on modern processors.
+
+config PERF_EVENTS_INTEL_CSTATE
+	tristate "Intel cstate performance events"
+	depends on PERF_EVENTS && CPU_SUP_INTEL && PCI
+	default y
+	---help---
+	Include support for Intel cstate performance events for power
+	monitoring on modern processors.
+
+config PERF_EVENTS_AMD_POWER
+	depends on PERF_EVENTS && CPU_SUP_AMD
+	tristate "AMD Processor Power Reporting Mechanism"
+	---help---
+	  Provide power reporting mechanism support for AMD processors.
+	  Currently, it leverages X86_FEATURE_ACC_POWER
+	  (CPUID Fn8000_0007_EDX[12]) interface to calculate the
+	  average power consumption on Family 15h processors.
+
+endmenu
diff --git a/arch/x86/events/Makefile b/arch/x86/events/Makefile
index f59618a..1d392c3 100644
--- a/arch/x86/events/Makefile
+++ b/arch/x86/events/Makefile
@@ -6,9 +6,6 @@
 ifdef CONFIG_AMD_IOMMU
 obj-$(CONFIG_CPU_SUP_AMD)               += amd/iommu.o
 endif
-obj-$(CONFIG_CPU_SUP_INTEL)		+= intel/core.o intel/bts.o intel/cqm.o
-obj-$(CONFIG_CPU_SUP_INTEL)		+= intel/cstate.o intel/ds.o intel/knc.o 
-obj-$(CONFIG_CPU_SUP_INTEL)		+= intel/lbr.o intel/p4.o intel/p6.o intel/pt.o
-obj-$(CONFIG_CPU_SUP_INTEL)		+= intel/rapl.o msr.o
-obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE)	+= intel/uncore.o intel/uncore_nhmex.o
-obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE)	+= intel/uncore_snb.o intel/uncore_snbep.o
+
+obj-$(CONFIG_CPU_SUP_INTEL)		+= msr.o
+obj-$(CONFIG_CPU_SUP_INTEL)		+= intel/
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index 049ada8d..bd3e842 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -115,7 +115,7 @@
 /*
  * AMD Performance Monitor K7 and later.
  */
-static const u64 amd_perfmon_event_map[] =
+static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
 {
   [PERF_COUNT_HW_CPU_CYCLES]			= 0x0076,
   [PERF_COUNT_HW_INSTRUCTIONS]			= 0x00c0,
@@ -369,7 +369,7 @@
 
 	WARN_ON_ONCE(cpuc->amd_nb);
 
-	if (boot_cpu_data.x86_max_cores < 2)
+	if (!x86_pmu.amd_nb_constraints)
 		return NOTIFY_OK;
 
 	cpuc->amd_nb = amd_alloc_nb(cpu);
@@ -388,7 +388,7 @@
 
 	cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
 
-	if (boot_cpu_data.x86_max_cores < 2)
+	if (!x86_pmu.amd_nb_constraints)
 		return;
 
 	nb_id = amd_get_nb_id(cpu);
@@ -414,7 +414,7 @@
 {
 	struct cpu_hw_events *cpuhw;
 
-	if (boot_cpu_data.x86_max_cores < 2)
+	if (!x86_pmu.amd_nb_constraints)
 		return;
 
 	cpuhw = &per_cpu(cpu_hw_events, cpu);
@@ -648,6 +648,8 @@
 	.cpu_prepare		= amd_pmu_cpu_prepare,
 	.cpu_starting		= amd_pmu_cpu_starting,
 	.cpu_dead		= amd_pmu_cpu_dead,
+
+	.amd_nb_constraints	= 1,
 };
 
 static int __init amd_core_pmu_init(void)
@@ -674,6 +676,11 @@
 	x86_pmu.eventsel	= MSR_F15H_PERF_CTL;
 	x86_pmu.perfctr		= MSR_F15H_PERF_CTR;
 	x86_pmu.num_counters	= AMD64_NUM_COUNTERS_CORE;
+	/*
+	 * AMD Core perfctr has separate MSRs for the NB events, see
+	 * the amd/uncore.c driver.
+	 */
+	x86_pmu.amd_nb_constraints = 0;
 
 	pr_cont("core perfctr, ");
 	return 0;
@@ -693,6 +700,14 @@
 	if (ret)
 		return ret;
 
+	if (num_possible_cpus() == 1) {
+		/*
+		 * No point in allocating data structures to serialize
+		 * against other CPUs, when there is only the one CPU.
+		 */
+		x86_pmu.amd_nb_constraints = 0;
+	}
+
 	/* Events are common for all AMDs */
 	memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
 	       sizeof(hw_cache_event_ids));
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 3ea25c3..feb90f6 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -28,10 +28,46 @@
 #define IBS_FETCH_CONFIG_MASK	(IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
 #define IBS_OP_CONFIG_MASK	IBS_OP_MAX_CNT
 
+
+/*
+ * IBS states:
+ *
+ * ENABLED; tracks the pmu::add(), pmu::del() state, when set the counter is taken
+ * and any further add()s must fail.
+ *
+ * STARTED/STOPPING/STOPPED; deal with pmu::start(), pmu::stop() state but are
+ * complicated by the fact that the IBS hardware can send late NMIs (ie. after
+ * we've cleared the EN bit).
+ *
+ * In order to consume these late NMIs we have the STOPPED state, any NMI that
+ * happens after we've cleared the EN state will clear this bit and report the
+ * NMI handled (this is fundamentally racy in the face or multiple NMI sources,
+ * someone else can consume our BIT and our NMI will go unhandled).
+ *
+ * And since we cannot set/clear this separate bit together with the EN bit,
+ * there are races; if we cleared STARTED early, an NMI could land in
+ * between clearing STARTED and clearing the EN bit (in fact multiple NMIs
+ * could happen if the period is small enough), and consume our STOPPED bit
+ * and trigger streams of unhandled NMIs.
+ *
+ * If, however, we clear STARTED late, an NMI can hit between clearing the
+ * EN bit and clearing STARTED, still see STARTED set and process the event.
+ * If this event will have the VALID bit clear, we bail properly, but this
+ * is not a given. With VALID set we can end up calling pmu::stop() again
+ * (the throttle logic) and trigger the WARNs in there.
+ *
+ * So what we do is set STOPPING before clearing EN to avoid the pmu::stop()
+ * nesting, and clear STARTED late, so that we have a well defined state over
+ * the clearing of the EN bit.
+ *
+ * XXX: we could probably be using !atomic bitops for all this.
+ */
+
 enum ibs_states {
 	IBS_ENABLED	= 0,
 	IBS_STARTED	= 1,
 	IBS_STOPPING	= 2,
+	IBS_STOPPED	= 3,
 
 	IBS_MAX_STATES,
 };
@@ -377,11 +413,10 @@
 
 	perf_ibs_set_period(perf_ibs, hwc, &period);
 	/*
-	 * Set STARTED before enabling the hardware, such that
-	 * a subsequent NMI must observe it. Then clear STOPPING
-	 * such that we don't consume NMIs by accident.
+	 * Set STARTED before enabling the hardware, such that a subsequent NMI
+	 * must observe it.
 	 */
-	set_bit(IBS_STARTED, pcpu->state);
+	set_bit(IBS_STARTED,    pcpu->state);
 	clear_bit(IBS_STOPPING, pcpu->state);
 	perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
 
@@ -396,6 +431,9 @@
 	u64 config;
 	int stopping;
 
+	if (test_and_set_bit(IBS_STOPPING, pcpu->state))
+		return;
+
 	stopping = test_bit(IBS_STARTED, pcpu->state);
 
 	if (!stopping && (hwc->state & PERF_HES_UPTODATE))
@@ -405,12 +443,12 @@
 
 	if (stopping) {
 		/*
-		 * Set STOPPING before disabling the hardware, such that it
+		 * Set STOPPED before disabling the hardware, such that it
 		 * must be visible to NMIs the moment we clear the EN bit,
 		 * at which point we can generate an !VALID sample which
 		 * we need to consume.
 		 */
-		set_bit(IBS_STOPPING, pcpu->state);
+		set_bit(IBS_STOPPED, pcpu->state);
 		perf_ibs_disable_event(perf_ibs, hwc, config);
 		/*
 		 * Clear STARTED after disabling the hardware; if it were
@@ -556,7 +594,7 @@
 		 * with samples that even have the valid bit cleared.
 		 * Mark all this NMIs as handled.
 		 */
-		if (test_and_clear_bit(IBS_STOPPING, pcpu->state))
+		if (test_and_clear_bit(IBS_STOPPED, pcpu->state))
 			return 1;
 
 		return 0;
diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
index 40625ca..6011a57 100644
--- a/arch/x86/events/amd/iommu.c
+++ b/arch/x86/events/amd/iommu.c
@@ -474,6 +474,7 @@
 
 static struct perf_amd_iommu __perf_iommu = {
 	.pmu = {
+		.task_ctx_nr    = perf_invalid_context,
 		.event_init	= perf_iommu_event_init,
 		.add		= perf_iommu_add,
 		.del		= perf_iommu_del,
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index 3db9569..98ac573 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -263,6 +263,7 @@
 };
 
 static struct pmu amd_nb_pmu = {
+	.task_ctx_nr	= perf_invalid_context,
 	.attr_groups	= amd_uncore_attr_groups,
 	.name		= "amd_nb",
 	.event_init	= amd_uncore_event_init,
@@ -274,6 +275,7 @@
 };
 
 static struct pmu amd_l2_pmu = {
+	.task_ctx_nr	= perf_invalid_context,
 	.attr_groups	= amd_uncore_attr_groups,
 	.name		= "amd_l2",
 	.event_init	= amd_uncore_event_init,
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 041e442..73a75aa 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -360,6 +360,9 @@
 {
 	int i;
 
+	if (x86_pmu.lbr_pt_coexist)
+		return 0;
+
 	if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
 		mutex_lock(&pmc_reserve_mutex);
 		for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
@@ -380,6 +383,9 @@
 
 void x86_del_exclusive(unsigned int what)
 {
+	if (x86_pmu.lbr_pt_coexist)
+		return;
+
 	atomic_dec(&x86_pmu.lbr_exclusive[what]);
 	atomic_dec(&active_events);
 }
@@ -1518,7 +1524,7 @@
 
 static void __init pmu_check_apic(void)
 {
-	if (cpu_has_apic)
+	if (boot_cpu_has(X86_FEATURE_APIC))
 		return;
 
 	x86_pmu.apic = 0;
@@ -2177,7 +2183,7 @@
 	 * cap_user_time_zero doesn't make sense when we're using a different
 	 * time base for the records.
 	 */
-	if (event->clock == &local_clock) {
+	if (!event->attr.use_clockid) {
 		userpg->cap_user_time_zero = 1;
 		userpg->time_zero = data->cyc2ns_offset;
 	}
@@ -2277,7 +2283,7 @@
 
 	fp = compat_ptr(ss_base + regs->bp);
 	pagefault_disable();
-	while (entry->nr < PERF_MAX_STACK_DEPTH) {
+	while (entry->nr < sysctl_perf_event_max_stack) {
 		unsigned long bytes;
 		frame.next_frame     = 0;
 		frame.return_address = 0;
@@ -2337,7 +2343,7 @@
 		return;
 
 	pagefault_disable();
-	while (entry->nr < PERF_MAX_STACK_DEPTH) {
+	while (entry->nr < sysctl_perf_event_max_stack) {
 		unsigned long bytes;
 		frame.next_frame	     = NULL;
 		frame.return_address = 0;
diff --git a/arch/x86/events/intel/Makefile b/arch/x86/events/intel/Makefile
new file mode 100644
index 0000000..3660b2c
--- /dev/null
+++ b/arch/x86/events/intel/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_CPU_SUP_INTEL)		+= core.o bts.o cqm.o
+obj-$(CONFIG_CPU_SUP_INTEL)		+= ds.o knc.o
+obj-$(CONFIG_CPU_SUP_INTEL)		+= lbr.o p4.o p6.o pt.o
+obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL)	+= intel-rapl.o
+intel-rapl-objs				:= rapl.o
+obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE)	+= intel-uncore.o
+intel-uncore-objs			:= uncore.o uncore_nhmex.o uncore_snb.o uncore_snbep.o
+obj-$(CONFIG_PERF_EVENTS_INTEL_CSTATE)	+= intel-cstate.o
+intel-cstate-objs			:= cstate.o
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index b99dc92..0a6e393 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -171,18 +171,6 @@
 	memset(page_address(phys->page) + index, 0, phys->size - index);
 }
 
-static bool bts_buffer_is_full(struct bts_buffer *buf, struct bts_ctx *bts)
-{
-	if (buf->snapshot)
-		return false;
-
-	if (local_read(&buf->data_size) >= bts->handle.size ||
-	    bts->handle.size - local_read(&buf->data_size) < BTS_RECORD_SIZE)
-		return true;
-
-	return false;
-}
-
 static void bts_update(struct bts_ctx *bts)
 {
 	int cpu = raw_smp_processor_id();
@@ -213,18 +201,15 @@
 	}
 }
 
+static int
+bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle);
+
 static void __bts_event_start(struct perf_event *event)
 {
 	struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
 	struct bts_buffer *buf = perf_get_aux(&bts->handle);
 	u64 config = 0;
 
-	if (!buf || bts_buffer_is_full(buf, bts))
-		return;
-
-	event->hw.itrace_started = 1;
-	event->hw.state = 0;
-
 	if (!buf->snapshot)
 		config |= ARCH_PERFMON_EVENTSEL_INT;
 	if (!event->attr.exclude_kernel)
@@ -241,16 +226,41 @@
 	wmb();
 
 	intel_pmu_enable_bts(config);
+
 }
 
 static void bts_event_start(struct perf_event *event, int flags)
 {
+	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 	struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+	struct bts_buffer *buf;
+
+	buf = perf_aux_output_begin(&bts->handle, event);
+	if (!buf)
+		goto fail_stop;
+
+	if (bts_buffer_reset(buf, &bts->handle))
+		goto fail_end_stop;
+
+	bts->ds_back.bts_buffer_base = cpuc->ds->bts_buffer_base;
+	bts->ds_back.bts_absolute_maximum = cpuc->ds->bts_absolute_maximum;
+	bts->ds_back.bts_interrupt_threshold = cpuc->ds->bts_interrupt_threshold;
+
+	event->hw.itrace_started = 1;
+	event->hw.state = 0;
 
 	__bts_event_start(event);
 
 	/* PMI handler: this counter is running and likely generating PMIs */
 	ACCESS_ONCE(bts->started) = 1;
+
+	return;
+
+fail_end_stop:
+	perf_aux_output_end(&bts->handle, 0, false);
+
+fail_stop:
+	event->hw.state = PERF_HES_STOPPED;
 }
 
 static void __bts_event_stop(struct perf_event *event)
@@ -269,15 +279,32 @@
 
 static void bts_event_stop(struct perf_event *event, int flags)
 {
+	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 	struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+	struct bts_buffer *buf = perf_get_aux(&bts->handle);
 
 	/* PMI handler: don't restart this counter */
 	ACCESS_ONCE(bts->started) = 0;
 
 	__bts_event_stop(event);
 
-	if (flags & PERF_EF_UPDATE)
+	if (flags & PERF_EF_UPDATE) {
 		bts_update(bts);
+
+		if (buf) {
+			if (buf->snapshot)
+				bts->handle.head =
+					local_xchg(&buf->data_size,
+						   buf->nr_pages << PAGE_SHIFT);
+			perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
+					    !!local_xchg(&buf->lost, 0));
+		}
+
+		cpuc->ds->bts_index = bts->ds_back.bts_buffer_base;
+		cpuc->ds->bts_buffer_base = bts->ds_back.bts_buffer_base;
+		cpuc->ds->bts_absolute_maximum = bts->ds_back.bts_absolute_maximum;
+		cpuc->ds->bts_interrupt_threshold = bts->ds_back.bts_interrupt_threshold;
+	}
 }
 
 void intel_bts_enable_local(void)
@@ -417,34 +444,14 @@
 
 static void bts_event_del(struct perf_event *event, int mode)
 {
-	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
-	struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
-	struct bts_buffer *buf = perf_get_aux(&bts->handle);
-
 	bts_event_stop(event, PERF_EF_UPDATE);
-
-	if (buf) {
-		if (buf->snapshot)
-			bts->handle.head =
-				local_xchg(&buf->data_size,
-					   buf->nr_pages << PAGE_SHIFT);
-		perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
-				    !!local_xchg(&buf->lost, 0));
-	}
-
-	cpuc->ds->bts_index = bts->ds_back.bts_buffer_base;
-	cpuc->ds->bts_buffer_base = bts->ds_back.bts_buffer_base;
-	cpuc->ds->bts_absolute_maximum = bts->ds_back.bts_absolute_maximum;
-	cpuc->ds->bts_interrupt_threshold = bts->ds_back.bts_interrupt_threshold;
 }
 
 static int bts_event_add(struct perf_event *event, int mode)
 {
-	struct bts_buffer *buf;
 	struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 	struct hw_perf_event *hwc = &event->hw;
-	int ret = -EBUSY;
 
 	event->hw.state = PERF_HES_STOPPED;
 
@@ -454,26 +461,10 @@
 	if (bts->handle.event)
 		return -EBUSY;
 
-	buf = perf_aux_output_begin(&bts->handle, event);
-	if (!buf)
-		return -EINVAL;
-
-	ret = bts_buffer_reset(buf, &bts->handle);
-	if (ret) {
-		perf_aux_output_end(&bts->handle, 0, false);
-		return ret;
-	}
-
-	bts->ds_back.bts_buffer_base = cpuc->ds->bts_buffer_base;
-	bts->ds_back.bts_absolute_maximum = cpuc->ds->bts_absolute_maximum;
-	bts->ds_back.bts_interrupt_threshold = cpuc->ds->bts_interrupt_threshold;
-
 	if (mode & PERF_EF_START) {
 		bts_event_start(event, 0);
-		if (hwc->state & PERF_HES_STOPPED) {
-			bts_event_del(event, 0);
-			return -EBUSY;
-		}
+		if (hwc->state & PERF_HES_STOPPED)
+			return -EINVAL;
 	}
 
 	return 0;
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 68fa55b..7c66695 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -1465,6 +1465,140 @@
  },
 };
 
+static struct extra_reg intel_glm_extra_regs[] __read_mostly = {
+	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
+	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0),
+	INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1),
+	EVENT_EXTRA_END
+};
+
+#define GLM_DEMAND_DATA_RD		BIT_ULL(0)
+#define GLM_DEMAND_RFO			BIT_ULL(1)
+#define GLM_ANY_RESPONSE		BIT_ULL(16)
+#define GLM_SNP_NONE_OR_MISS		BIT_ULL(33)
+#define GLM_DEMAND_READ			GLM_DEMAND_DATA_RD
+#define GLM_DEMAND_WRITE		GLM_DEMAND_RFO
+#define GLM_DEMAND_PREFETCH		(SNB_PF_DATA_RD|SNB_PF_RFO)
+#define GLM_LLC_ACCESS			GLM_ANY_RESPONSE
+#define GLM_SNP_ANY			(GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM)
+#define GLM_LLC_MISS			(GLM_SNP_ANY|SNB_NON_DRAM)
+
+static __initconst const u64 glm_hw_cache_event_ids
+				[PERF_COUNT_HW_CACHE_MAX]
+				[PERF_COUNT_HW_CACHE_OP_MAX]
+				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+	[C(L1D)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= 0x81d0,	/* MEM_UOPS_RETIRED.ALL_LOADS */
+			[C(RESULT_MISS)]	= 0x0,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= 0x82d0,	/* MEM_UOPS_RETIRED.ALL_STORES */
+			[C(RESULT_MISS)]	= 0x0,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= 0x0,
+			[C(RESULT_MISS)]	= 0x0,
+		},
+	},
+	[C(L1I)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= 0x0380,	/* ICACHE.ACCESSES */
+			[C(RESULT_MISS)]	= 0x0280,	/* ICACHE.MISSES */
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= -1,
+			[C(RESULT_MISS)]	= -1,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= 0x0,
+			[C(RESULT_MISS)]	= 0x0,
+		},
+	},
+	[C(LL)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
+			[C(RESULT_MISS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
+			[C(RESULT_MISS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
+			[C(RESULT_MISS)]	= 0x1b7,	/* OFFCORE_RESPONSE */
+		},
+	},
+	[C(DTLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= 0x81d0,	/* MEM_UOPS_RETIRED.ALL_LOADS */
+			[C(RESULT_MISS)]	= 0x0,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= 0x82d0,	/* MEM_UOPS_RETIRED.ALL_STORES */
+			[C(RESULT_MISS)]	= 0x0,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= 0x0,
+			[C(RESULT_MISS)]	= 0x0,
+		},
+	},
+	[C(ITLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= 0x00c0,	/* INST_RETIRED.ANY_P */
+			[C(RESULT_MISS)]	= 0x0481,	/* ITLB.MISS */
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= -1,
+			[C(RESULT_MISS)]	= -1,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= -1,
+			[C(RESULT_MISS)]	= -1,
+		},
+	},
+	[C(BPU)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= 0x00c4,	/* BR_INST_RETIRED.ALL_BRANCHES */
+			[C(RESULT_MISS)]	= 0x00c5,	/* BR_MISP_RETIRED.ALL_BRANCHES */
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= -1,
+			[C(RESULT_MISS)]	= -1,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= -1,
+			[C(RESULT_MISS)]	= -1,
+		},
+	},
+};
+
+static __initconst const u64 glm_hw_cache_extra_regs
+				[PERF_COUNT_HW_CACHE_MAX]
+				[PERF_COUNT_HW_CACHE_OP_MAX]
+				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+	[C(LL)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= GLM_DEMAND_READ|
+						  GLM_LLC_ACCESS,
+			[C(RESULT_MISS)]	= GLM_DEMAND_READ|
+						  GLM_LLC_MISS,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= GLM_DEMAND_WRITE|
+						  GLM_LLC_ACCESS,
+			[C(RESULT_MISS)]	= GLM_DEMAND_WRITE|
+						  GLM_LLC_MISS,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= GLM_DEMAND_PREFETCH|
+						  GLM_LLC_ACCESS,
+			[C(RESULT_MISS)]	= GLM_DEMAND_PREFETCH|
+						  GLM_LLC_MISS,
+		},
+	},
+};
+
 #define KNL_OT_L2_HITE		BIT_ULL(19) /* Other Tile L2 Hit */
 #define KNL_OT_L2_HITF		BIT_ULL(20) /* Other Tile L2 Hit */
 #define KNL_MCDRAM_LOCAL	BIT_ULL(21)
@@ -3447,7 +3581,7 @@
 		memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
 		       sizeof(hw_cache_extra_regs));
 
-		intel_pmu_lbr_init_atom();
+		intel_pmu_lbr_init_slm();
 
 		x86_pmu.event_constraints = intel_slm_event_constraints;
 		x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
@@ -3456,6 +3590,30 @@
 		pr_cont("Silvermont events, ");
 		break;
 
+	case 92: /* 14nm Atom "Goldmont" */
+	case 95: /* 14nm Atom "Goldmont Denverton" */
+		memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
+		       sizeof(hw_cache_event_ids));
+		memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
+		       sizeof(hw_cache_extra_regs));
+
+		intel_pmu_lbr_init_skl();
+
+		x86_pmu.event_constraints = intel_slm_event_constraints;
+		x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints;
+		x86_pmu.extra_regs = intel_glm_extra_regs;
+		/*
+		 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
+		 * for precise cycles.
+		 * :pp is identical to :ppp
+		 */
+		x86_pmu.pebs_aliases = NULL;
+		x86_pmu.pebs_prec_dist = true;
+		x86_pmu.lbr_pt_coexist = true;
+		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+		pr_cont("Goldmont events, ");
+		break;
+
 	case 37: /* 32nm Westmere    */
 	case 44: /* 32nm Westmere-EP */
 	case 47: /* 32nm Westmere-EX */
@@ -3637,8 +3795,11 @@
 		pr_cont("Knights Landing events, ");
 		break;
 
+	case 142: /* 14nm Kabylake Mobile */
+	case 158: /* 14nm Kabylake Desktop */
 	case 78: /* 14nm Skylake Mobile */
 	case 94: /* 14nm Skylake Desktop */
+	case 85: /* 14nm Skylake Server */
 		x86_pmu.late_ack = true;
 		memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
 		memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
@@ -3705,7 +3866,7 @@
 				c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
 			}
 			c->idxmsk64 &=
-				~(~0UL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
+				~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
 			c->weight = hweight64(c->idxmsk64);
 		}
 	}
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 7946c42..9ba4e41 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -91,6 +91,8 @@
 #include <asm/cpu_device_id.h>
 #include "../perf_event.h"
 
+MODULE_LICENSE("GPL");
+
 #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format)		\
 static ssize_t __cstate_##_var##_show(struct kobject *kobj,	\
 				struct kobj_attribute *attr,	\
@@ -106,22 +108,27 @@
 				       struct device_attribute *attr,
 				       char *buf);
 
+/* Model -> events mapping */
+struct cstate_model {
+	unsigned long		core_events;
+	unsigned long		pkg_events;
+	unsigned long		quirks;
+};
+
+/* Quirk flags */
+#define SLM_PKG_C6_USE_C7_MSR	(1UL << 0)
+
 struct perf_cstate_msr {
 	u64	msr;
 	struct	perf_pmu_events_attr *attr;
-	bool	(*test)(int idx);
 };
 
 
 /* cstate_core PMU */
-
 static struct pmu cstate_core_pmu;
 static bool has_cstate_core;
 
-enum perf_cstate_core_id {
-	/*
-	 * cstate_core events
-	 */
+enum perf_cstate_core_events {
 	PERF_CSTATE_CORE_C1_RES = 0,
 	PERF_CSTATE_CORE_C3_RES,
 	PERF_CSTATE_CORE_C6_RES,
@@ -130,69 +137,16 @@
 	PERF_CSTATE_CORE_EVENT_MAX,
 };
 
-bool test_core(int idx)
-{
-	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
-	    boot_cpu_data.x86 != 6)
-		return false;
-
-	switch (boot_cpu_data.x86_model) {
-	case 30: /* 45nm Nehalem    */
-	case 26: /* 45nm Nehalem-EP */
-	case 46: /* 45nm Nehalem-EX */
-
-	case 37: /* 32nm Westmere    */
-	case 44: /* 32nm Westmere-EP */
-	case 47: /* 32nm Westmere-EX */
-		if (idx == PERF_CSTATE_CORE_C3_RES ||
-		    idx == PERF_CSTATE_CORE_C6_RES)
-			return true;
-		break;
-	case 42: /* 32nm SandyBridge         */
-	case 45: /* 32nm SandyBridge-E/EN/EP */
-
-	case 58: /* 22nm IvyBridge       */
-	case 62: /* 22nm IvyBridge-EP/EX */
-
-	case 60: /* 22nm Haswell Core */
-	case 63: /* 22nm Haswell Server */
-	case 69: /* 22nm Haswell ULT */
-	case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
-
-	case 61: /* 14nm Broadwell Core-M */
-	case 86: /* 14nm Broadwell Xeon D */
-	case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
-	case 79: /* 14nm Broadwell Server */
-
-	case 78: /* 14nm Skylake Mobile */
-	case 94: /* 14nm Skylake Desktop */
-		if (idx == PERF_CSTATE_CORE_C3_RES ||
-		    idx == PERF_CSTATE_CORE_C6_RES ||
-		    idx == PERF_CSTATE_CORE_C7_RES)
-			return true;
-		break;
-	case 55: /* 22nm Atom "Silvermont"                */
-	case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
-	case 76: /* 14nm Atom "Airmont"                   */
-		if (idx == PERF_CSTATE_CORE_C1_RES ||
-		    idx == PERF_CSTATE_CORE_C6_RES)
-			return true;
-		break;
-	}
-
-	return false;
-}
-
 PMU_EVENT_ATTR_STRING(c1-residency, evattr_cstate_core_c1, "event=0x00");
 PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_core_c3, "event=0x01");
 PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_core_c6, "event=0x02");
 PMU_EVENT_ATTR_STRING(c7-residency, evattr_cstate_core_c7, "event=0x03");
 
 static struct perf_cstate_msr core_msr[] = {
-	[PERF_CSTATE_CORE_C1_RES] = { MSR_CORE_C1_RES,		&evattr_cstate_core_c1,	test_core, },
-	[PERF_CSTATE_CORE_C3_RES] = { MSR_CORE_C3_RESIDENCY,	&evattr_cstate_core_c3, test_core, },
-	[PERF_CSTATE_CORE_C6_RES] = { MSR_CORE_C6_RESIDENCY,	&evattr_cstate_core_c6, test_core, },
-	[PERF_CSTATE_CORE_C7_RES] = { MSR_CORE_C7_RESIDENCY,	&evattr_cstate_core_c7,	test_core, },
+	[PERF_CSTATE_CORE_C1_RES] = { MSR_CORE_C1_RES,		&evattr_cstate_core_c1 },
+	[PERF_CSTATE_CORE_C3_RES] = { MSR_CORE_C3_RESIDENCY,	&evattr_cstate_core_c3 },
+	[PERF_CSTATE_CORE_C6_RES] = { MSR_CORE_C6_RESIDENCY,	&evattr_cstate_core_c6 },
+	[PERF_CSTATE_CORE_C7_RES] = { MSR_CORE_C7_RESIDENCY,	&evattr_cstate_core_c7 },
 };
 
 static struct attribute *core_events_attrs[PERF_CSTATE_CORE_EVENT_MAX + 1] = {
@@ -234,18 +188,11 @@
 	NULL,
 };
 
-/* cstate_core PMU end */
-
-
 /* cstate_pkg PMU */
-
 static struct pmu cstate_pkg_pmu;
 static bool has_cstate_pkg;
 
-enum perf_cstate_pkg_id {
-	/*
-	 * cstate_pkg events
-	 */
+enum perf_cstate_pkg_events {
 	PERF_CSTATE_PKG_C2_RES = 0,
 	PERF_CSTATE_PKG_C3_RES,
 	PERF_CSTATE_PKG_C6_RES,
@@ -257,69 +204,6 @@
 	PERF_CSTATE_PKG_EVENT_MAX,
 };
 
-bool test_pkg(int idx)
-{
-	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
-	    boot_cpu_data.x86 != 6)
-		return false;
-
-	switch (boot_cpu_data.x86_model) {
-	case 30: /* 45nm Nehalem    */
-	case 26: /* 45nm Nehalem-EP */
-	case 46: /* 45nm Nehalem-EX */
-
-	case 37: /* 32nm Westmere    */
-	case 44: /* 32nm Westmere-EP */
-	case 47: /* 32nm Westmere-EX */
-		if (idx == PERF_CSTATE_CORE_C3_RES ||
-		    idx == PERF_CSTATE_CORE_C6_RES ||
-		    idx == PERF_CSTATE_CORE_C7_RES)
-			return true;
-		break;
-	case 42: /* 32nm SandyBridge         */
-	case 45: /* 32nm SandyBridge-E/EN/EP */
-
-	case 58: /* 22nm IvyBridge       */
-	case 62: /* 22nm IvyBridge-EP/EX */
-
-	case 60: /* 22nm Haswell Core */
-	case 63: /* 22nm Haswell Server */
-	case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
-
-	case 61: /* 14nm Broadwell Core-M */
-	case 86: /* 14nm Broadwell Xeon D */
-	case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
-	case 79: /* 14nm Broadwell Server */
-
-	case 78: /* 14nm Skylake Mobile */
-	case 94: /* 14nm Skylake Desktop */
-		if (idx == PERF_CSTATE_PKG_C2_RES ||
-		    idx == PERF_CSTATE_PKG_C3_RES ||
-		    idx == PERF_CSTATE_PKG_C6_RES ||
-		    idx == PERF_CSTATE_PKG_C7_RES)
-			return true;
-		break;
-	case 55: /* 22nm Atom "Silvermont"                */
-	case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
-	case 76: /* 14nm Atom "Airmont"                   */
-		if (idx == PERF_CSTATE_CORE_C6_RES)
-			return true;
-		break;
-	case 69: /* 22nm Haswell ULT */
-		if (idx == PERF_CSTATE_PKG_C2_RES ||
-		    idx == PERF_CSTATE_PKG_C3_RES ||
-		    idx == PERF_CSTATE_PKG_C6_RES ||
-		    idx == PERF_CSTATE_PKG_C7_RES ||
-		    idx == PERF_CSTATE_PKG_C8_RES ||
-		    idx == PERF_CSTATE_PKG_C9_RES ||
-		    idx == PERF_CSTATE_PKG_C10_RES)
-			return true;
-		break;
-	}
-
-	return false;
-}
-
 PMU_EVENT_ATTR_STRING(c2-residency, evattr_cstate_pkg_c2, "event=0x00");
 PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_pkg_c3, "event=0x01");
 PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_pkg_c6, "event=0x02");
@@ -329,13 +213,13 @@
 PMU_EVENT_ATTR_STRING(c10-residency, evattr_cstate_pkg_c10, "event=0x06");
 
 static struct perf_cstate_msr pkg_msr[] = {
-	[PERF_CSTATE_PKG_C2_RES] = { MSR_PKG_C2_RESIDENCY,	&evattr_cstate_pkg_c2,	test_pkg, },
-	[PERF_CSTATE_PKG_C3_RES] = { MSR_PKG_C3_RESIDENCY,	&evattr_cstate_pkg_c3,	test_pkg, },
-	[PERF_CSTATE_PKG_C6_RES] = { MSR_PKG_C6_RESIDENCY,	&evattr_cstate_pkg_c6,	test_pkg, },
-	[PERF_CSTATE_PKG_C7_RES] = { MSR_PKG_C7_RESIDENCY,	&evattr_cstate_pkg_c7,	test_pkg, },
-	[PERF_CSTATE_PKG_C8_RES] = { MSR_PKG_C8_RESIDENCY,	&evattr_cstate_pkg_c8,	test_pkg, },
-	[PERF_CSTATE_PKG_C9_RES] = { MSR_PKG_C9_RESIDENCY,	&evattr_cstate_pkg_c9,	test_pkg, },
-	[PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY,	&evattr_cstate_pkg_c10,	test_pkg, },
+	[PERF_CSTATE_PKG_C2_RES] = { MSR_PKG_C2_RESIDENCY,	&evattr_cstate_pkg_c2 },
+	[PERF_CSTATE_PKG_C3_RES] = { MSR_PKG_C3_RESIDENCY,	&evattr_cstate_pkg_c3 },
+	[PERF_CSTATE_PKG_C6_RES] = { MSR_PKG_C6_RESIDENCY,	&evattr_cstate_pkg_c6 },
+	[PERF_CSTATE_PKG_C7_RES] = { MSR_PKG_C7_RESIDENCY,	&evattr_cstate_pkg_c7 },
+	[PERF_CSTATE_PKG_C8_RES] = { MSR_PKG_C8_RESIDENCY,	&evattr_cstate_pkg_c8 },
+	[PERF_CSTATE_PKG_C9_RES] = { MSR_PKG_C9_RESIDENCY,	&evattr_cstate_pkg_c9 },
+	[PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY,	&evattr_cstate_pkg_c10 },
 };
 
 static struct attribute *pkg_events_attrs[PERF_CSTATE_PKG_EVENT_MAX + 1] = {
@@ -366,8 +250,6 @@
 	NULL,
 };
 
-/* cstate_pkg PMU end*/
-
 static ssize_t cstate_get_attr_cpumask(struct device *dev,
 				       struct device_attribute *attr,
 				       char *buf)
@@ -385,7 +267,7 @@
 static int cstate_pmu_event_init(struct perf_event *event)
 {
 	u64 cfg = event->attr.config;
-	int ret = 0;
+	int cpu;
 
 	if (event->attr.type != event->pmu->type)
 		return -ENOENT;
@@ -400,26 +282,36 @@
 	    event->attr.sample_period) /* no sampling */
 		return -EINVAL;
 
+	if (event->cpu < 0)
+		return -EINVAL;
+
 	if (event->pmu == &cstate_core_pmu) {
 		if (cfg >= PERF_CSTATE_CORE_EVENT_MAX)
 			return -EINVAL;
 		if (!core_msr[cfg].attr)
 			return -EINVAL;
 		event->hw.event_base = core_msr[cfg].msr;
+		cpu = cpumask_any_and(&cstate_core_cpu_mask,
+				      topology_sibling_cpumask(event->cpu));
 	} else if (event->pmu == &cstate_pkg_pmu) {
 		if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
 			return -EINVAL;
 		if (!pkg_msr[cfg].attr)
 			return -EINVAL;
 		event->hw.event_base = pkg_msr[cfg].msr;
-	} else
+		cpu = cpumask_any_and(&cstate_pkg_cpu_mask,
+				      topology_core_cpumask(event->cpu));
+	} else {
 		return -ENOENT;
+	}
 
-	/* must be done before validate_group */
+	if (cpu >= nr_cpu_ids)
+		return -ENODEV;
+
+	event->cpu = cpu;
 	event->hw.config = cfg;
 	event->hw.idx = -1;
-
-	return ret;
+	return 0;
 }
 
 static inline u64 cstate_pmu_read_counter(struct perf_event *event)
@@ -469,172 +361,91 @@
 	return 0;
 }
 
+/*
+ * Check if exiting cpu is the designated reader. If so migrate the
+ * events when there is a valid target available
+ */
 static void cstate_cpu_exit(int cpu)
 {
-	int i, id, target;
+	unsigned int target;
 
-	/* cpu exit for cstate core */
-	if (has_cstate_core) {
-		id = topology_core_id(cpu);
-		target = -1;
+	if (has_cstate_core &&
+	    cpumask_test_and_clear_cpu(cpu, &cstate_core_cpu_mask)) {
 
-		for_each_online_cpu(i) {
-			if (i == cpu)
-				continue;
-			if (id == topology_core_id(i)) {
-				target = i;
-				break;
-			}
-		}
-		if (cpumask_test_and_clear_cpu(cpu, &cstate_core_cpu_mask) && target >= 0)
+		target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
+		/* Migrate events if there is a valid target */
+		if (target < nr_cpu_ids) {
 			cpumask_set_cpu(target, &cstate_core_cpu_mask);
-		WARN_ON(cpumask_empty(&cstate_core_cpu_mask));
-		if (target >= 0)
 			perf_pmu_migrate_context(&cstate_core_pmu, cpu, target);
+		}
 	}
 
-	/* cpu exit for cstate pkg */
-	if (has_cstate_pkg) {
-		id = topology_physical_package_id(cpu);
-		target = -1;
+	if (has_cstate_pkg &&
+	    cpumask_test_and_clear_cpu(cpu, &cstate_pkg_cpu_mask)) {
 
-		for_each_online_cpu(i) {
-			if (i == cpu)
-				continue;
-			if (id == topology_physical_package_id(i)) {
-				target = i;
-				break;
-			}
-		}
-		if (cpumask_test_and_clear_cpu(cpu, &cstate_pkg_cpu_mask) && target >= 0)
+		target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
+		/* Migrate events if there is a valid target */
+		if (target < nr_cpu_ids) {
 			cpumask_set_cpu(target, &cstate_pkg_cpu_mask);
-		WARN_ON(cpumask_empty(&cstate_pkg_cpu_mask));
-		if (target >= 0)
 			perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target);
+		}
 	}
 }
 
 static void cstate_cpu_init(int cpu)
 {
-	int i, id;
+	unsigned int target;
 
-	/* cpu init for cstate core */
-	if (has_cstate_core) {
-		id = topology_core_id(cpu);
-		for_each_cpu(i, &cstate_core_cpu_mask) {
-			if (id == topology_core_id(i))
-				break;
-		}
-		if (i >= nr_cpu_ids)
-			cpumask_set_cpu(cpu, &cstate_core_cpu_mask);
-	}
+	/*
+	 * If this is the first online thread of that core, set it in
+	 * the core cpu mask as the designated reader.
+	 */
+	target = cpumask_any_and(&cstate_core_cpu_mask,
+				 topology_sibling_cpumask(cpu));
 
-	/* cpu init for cstate pkg */
-	if (has_cstate_pkg) {
-		id = topology_physical_package_id(cpu);
-		for_each_cpu(i, &cstate_pkg_cpu_mask) {
-			if (id == topology_physical_package_id(i))
-				break;
-		}
-		if (i >= nr_cpu_ids)
-			cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask);
-	}
+	if (has_cstate_core && target >= nr_cpu_ids)
+		cpumask_set_cpu(cpu, &cstate_core_cpu_mask);
+
+	/*
+	 * If this is the first online thread of that package, set it
+	 * in the package cpu mask as the designated reader.
+	 */
+	target = cpumask_any_and(&cstate_pkg_cpu_mask,
+				 topology_core_cpumask(cpu));
+	if (has_cstate_pkg && target >= nr_cpu_ids)
+		cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask);
 }
 
 static int cstate_cpu_notifier(struct notifier_block *self,
-				  unsigned long action, void *hcpu)
+			       unsigned long action, void *hcpu)
 {
 	unsigned int cpu = (long)hcpu;
 
 	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_UP_PREPARE:
-		break;
 	case CPU_STARTING:
 		cstate_cpu_init(cpu);
 		break;
-	case CPU_UP_CANCELED:
-	case CPU_DYING:
-		break;
-	case CPU_ONLINE:
-	case CPU_DEAD:
-		break;
 	case CPU_DOWN_PREPARE:
 		cstate_cpu_exit(cpu);
 		break;
 	default:
 		break;
 	}
-
 	return NOTIFY_OK;
 }
 
-/*
- * Probe the cstate events and insert the available one into sysfs attrs
- * Return false if there is no available events.
- */
-static bool cstate_probe_msr(struct perf_cstate_msr *msr,
-			     struct attribute	**events_attrs,
-			     int max_event_nr)
-{
-	int i, j = 0;
-	u64 val;
-
-	/* Probe the cstate events. */
-	for (i = 0; i < max_event_nr; i++) {
-		if (!msr[i].test(i) || rdmsrl_safe(msr[i].msr, &val))
-			msr[i].attr = NULL;
-	}
-
-	/* List remaining events in the sysfs attrs. */
-	for (i = 0; i < max_event_nr; i++) {
-		if (msr[i].attr)
-			events_attrs[j++] = &msr[i].attr->attr.attr;
-	}
-	events_attrs[j] = NULL;
-
-	return (j > 0) ? true : false;
-}
-
-static int __init cstate_init(void)
-{
-	/* SLM has different MSR for PKG C6 */
-	switch (boot_cpu_data.x86_model) {
-	case 55:
-	case 76:
-	case 77:
-		pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY;
-	}
-
-	if (cstate_probe_msr(core_msr, core_events_attrs, PERF_CSTATE_CORE_EVENT_MAX))
-		has_cstate_core = true;
-
-	if (cstate_probe_msr(pkg_msr, pkg_events_attrs, PERF_CSTATE_PKG_EVENT_MAX))
-		has_cstate_pkg = true;
-
-	return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV;
-}
-
-static void __init cstate_cpumask_init(void)
-{
-	int cpu;
-
-	cpu_notifier_register_begin();
-
-	for_each_online_cpu(cpu)
-		cstate_cpu_init(cpu);
-
-	__perf_cpu_notifier(cstate_cpu_notifier);
-
-	cpu_notifier_register_done();
-}
+static struct notifier_block cstate_cpu_nb = {
+	.notifier_call	= cstate_cpu_notifier,
+	.priority       = CPU_PRI_PERF + 1,
+};
 
 static struct pmu cstate_core_pmu = {
 	.attr_groups	= core_attr_groups,
 	.name		= "cstate_core",
 	.task_ctx_nr	= perf_invalid_context,
 	.event_init	= cstate_pmu_event_init,
-	.add		= cstate_pmu_event_add, /* must have */
-	.del		= cstate_pmu_event_del, /* must have */
+	.add		= cstate_pmu_event_add,
+	.del		= cstate_pmu_event_del,
 	.start		= cstate_pmu_event_start,
 	.stop		= cstate_pmu_event_stop,
 	.read		= cstate_pmu_event_update,
@@ -646,49 +457,203 @@
 	.name		= "cstate_pkg",
 	.task_ctx_nr	= perf_invalid_context,
 	.event_init	= cstate_pmu_event_init,
-	.add		= cstate_pmu_event_add, /* must have */
-	.del		= cstate_pmu_event_del, /* must have */
+	.add		= cstate_pmu_event_add,
+	.del		= cstate_pmu_event_del,
 	.start		= cstate_pmu_event_start,
 	.stop		= cstate_pmu_event_stop,
 	.read		= cstate_pmu_event_update,
 	.capabilities	= PERF_PMU_CAP_NO_INTERRUPT,
 };
 
-static void __init cstate_pmus_register(void)
+static const struct cstate_model nhm_cstates __initconst = {
+	.core_events		= BIT(PERF_CSTATE_CORE_C3_RES) |
+				  BIT(PERF_CSTATE_CORE_C6_RES),
+
+	.pkg_events		= BIT(PERF_CSTATE_PKG_C3_RES) |
+				  BIT(PERF_CSTATE_PKG_C6_RES) |
+				  BIT(PERF_CSTATE_PKG_C7_RES),
+};
+
+static const struct cstate_model snb_cstates __initconst = {
+	.core_events		= BIT(PERF_CSTATE_CORE_C3_RES) |
+				  BIT(PERF_CSTATE_CORE_C6_RES) |
+				  BIT(PERF_CSTATE_CORE_C7_RES),
+
+	.pkg_events		= BIT(PERF_CSTATE_PKG_C2_RES) |
+				  BIT(PERF_CSTATE_PKG_C3_RES) |
+				  BIT(PERF_CSTATE_PKG_C6_RES) |
+				  BIT(PERF_CSTATE_PKG_C7_RES),
+};
+
+static const struct cstate_model hswult_cstates __initconst = {
+	.core_events		= BIT(PERF_CSTATE_CORE_C3_RES) |
+				  BIT(PERF_CSTATE_CORE_C6_RES) |
+				  BIT(PERF_CSTATE_CORE_C7_RES),
+
+	.pkg_events		= BIT(PERF_CSTATE_PKG_C2_RES) |
+				  BIT(PERF_CSTATE_PKG_C3_RES) |
+				  BIT(PERF_CSTATE_PKG_C6_RES) |
+				  BIT(PERF_CSTATE_PKG_C7_RES) |
+				  BIT(PERF_CSTATE_PKG_C8_RES) |
+				  BIT(PERF_CSTATE_PKG_C9_RES) |
+				  BIT(PERF_CSTATE_PKG_C10_RES),
+};
+
+static const struct cstate_model slm_cstates __initconst = {
+	.core_events		= BIT(PERF_CSTATE_CORE_C1_RES) |
+				  BIT(PERF_CSTATE_CORE_C6_RES),
+
+	.pkg_events		= BIT(PERF_CSTATE_PKG_C6_RES),
+	.quirks			= SLM_PKG_C6_USE_C7_MSR,
+};
+
+#define X86_CSTATES_MODEL(model, states)				\
+	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
+
+static const struct x86_cpu_id intel_cstates_match[] __initconst = {
+	X86_CSTATES_MODEL(30, nhm_cstates),    /* 45nm Nehalem              */
+	X86_CSTATES_MODEL(26, nhm_cstates),    /* 45nm Nehalem-EP           */
+	X86_CSTATES_MODEL(46, nhm_cstates),    /* 45nm Nehalem-EX           */
+
+	X86_CSTATES_MODEL(37, nhm_cstates),    /* 32nm Westmere             */
+	X86_CSTATES_MODEL(44, nhm_cstates),    /* 32nm Westmere-EP          */
+	X86_CSTATES_MODEL(47, nhm_cstates),    /* 32nm Westmere-EX          */
+
+	X86_CSTATES_MODEL(42, snb_cstates),    /* 32nm SandyBridge          */
+	X86_CSTATES_MODEL(45, snb_cstates),    /* 32nm SandyBridge-E/EN/EP  */
+
+	X86_CSTATES_MODEL(58, snb_cstates),    /* 22nm IvyBridge            */
+	X86_CSTATES_MODEL(62, snb_cstates),    /* 22nm IvyBridge-EP/EX      */
+
+	X86_CSTATES_MODEL(60, snb_cstates),    /* 22nm Haswell Core         */
+	X86_CSTATES_MODEL(63, snb_cstates),    /* 22nm Haswell Server       */
+	X86_CSTATES_MODEL(70, snb_cstates),    /* 22nm Haswell + GT3e       */
+
+	X86_CSTATES_MODEL(69, hswult_cstates), /* 22nm Haswell ULT          */
+
+	X86_CSTATES_MODEL(55, slm_cstates),    /* 22nm Atom Silvermont      */
+	X86_CSTATES_MODEL(77, slm_cstates),    /* 22nm Atom Avoton/Rangely  */
+	X86_CSTATES_MODEL(76, slm_cstates),    /* 22nm Atom Airmont         */
+
+	X86_CSTATES_MODEL(61, snb_cstates),    /* 14nm Broadwell Core-M     */
+	X86_CSTATES_MODEL(86, snb_cstates),    /* 14nm Broadwell Xeon D     */
+	X86_CSTATES_MODEL(71, snb_cstates),    /* 14nm Broadwell + GT3e     */
+	X86_CSTATES_MODEL(79, snb_cstates),    /* 14nm Broadwell Server     */
+
+	X86_CSTATES_MODEL(78, snb_cstates),    /* 14nm Skylake Mobile       */
+	X86_CSTATES_MODEL(94, snb_cstates),    /* 14nm Skylake Desktop      */
+	{ },
+};
+MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
+
+/*
+ * Probe the cstate events and insert the available one into sysfs attrs
+ * Return false if there are no available events.
+ */
+static bool __init cstate_probe_msr(const unsigned long evmsk, int max,
+                                   struct perf_cstate_msr *msr,
+                                   struct attribute **attrs)
 {
-	int err;
+	bool found = false;
+	unsigned int bit;
+	u64 val;
+
+	for (bit = 0; bit < max; bit++) {
+		if (test_bit(bit, &evmsk) && !rdmsrl_safe(msr[bit].msr, &val)) {
+			*attrs++ = &msr[bit].attr->attr.attr;
+			found = true;
+		} else {
+			msr[bit].attr = NULL;
+		}
+	}
+	*attrs = NULL;
+
+	return found;
+}
+
+static int __init cstate_probe(const struct cstate_model *cm)
+{
+	/* SLM has different MSR for PKG C6 */
+	if (cm->quirks & SLM_PKG_C6_USE_C7_MSR)
+		pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY;
+
+	has_cstate_core = cstate_probe_msr(cm->core_events,
+					   PERF_CSTATE_CORE_EVENT_MAX,
+					   core_msr, core_events_attrs);
+
+	has_cstate_pkg = cstate_probe_msr(cm->pkg_events,
+					  PERF_CSTATE_PKG_EVENT_MAX,
+					  pkg_msr, pkg_events_attrs);
+
+	return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV;
+}
+
+static inline void cstate_cleanup(void)
+{
+	if (has_cstate_core)
+		perf_pmu_unregister(&cstate_core_pmu);
+
+	if (has_cstate_pkg)
+		perf_pmu_unregister(&cstate_pkg_pmu);
+}
+
+static int __init cstate_init(void)
+{
+	int cpu, err;
+
+	cpu_notifier_register_begin();
+	for_each_online_cpu(cpu)
+		cstate_cpu_init(cpu);
 
 	if (has_cstate_core) {
 		err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
-		if (WARN_ON(err))
-			pr_info("Failed to register PMU %s error %d\n",
-				cstate_core_pmu.name, err);
+		if (err) {
+			has_cstate_core = false;
+			pr_info("Failed to register cstate core pmu\n");
+			goto out;
+		}
 	}
 
 	if (has_cstate_pkg) {
 		err = perf_pmu_register(&cstate_pkg_pmu, cstate_pkg_pmu.name, -1);
-		if (WARN_ON(err))
-			pr_info("Failed to register PMU %s error %d\n",
-				cstate_pkg_pmu.name, err);
+		if (err) {
+			has_cstate_pkg = false;
+			pr_info("Failed to register cstate pkg pmu\n");
+			cstate_cleanup();
+			goto out;
+		}
 	}
+	__register_cpu_notifier(&cstate_cpu_nb);
+out:
+	cpu_notifier_register_done();
+	return err;
 }
 
 static int __init cstate_pmu_init(void)
 {
+	const struct x86_cpu_id *id;
 	int err;
 
-	if (cpu_has_hypervisor)
+	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
 		return -ENODEV;
 
-	err = cstate_init();
+	id = x86_match_cpu(intel_cstates_match);
+	if (!id)
+		return -ENODEV;
+
+	err = cstate_probe((const struct cstate_model *) id->driver_data);
 	if (err)
 		return err;
 
-	cstate_cpumask_init();
-
-	cstate_pmus_register();
-
-	return 0;
+	return cstate_init();
 }
+module_init(cstate_pmu_init);
 
-device_initcall(cstate_pmu_init);
+static void __exit cstate_pmu_exit(void)
+{
+	cpu_notifier_register_begin();
+	__unregister_cpu_notifier(&cstate_cpu_nb);
+	cstate_cleanup();
+	cpu_notifier_register_done();
+}
+module_exit(cstate_pmu_exit);
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 8584b90..7ce9f3f 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -645,6 +645,12 @@
 	EVENT_CONSTRAINT_END
 };
 
+struct event_constraint intel_glm_pebs_event_constraints[] = {
+	/* Allow all events as PEBS with no flags */
+	INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
+	EVENT_CONSTRAINT_END
+};
+
 struct event_constraint intel_nehalem_pebs_event_constraints[] = {
 	INTEL_PLD_CONSTRAINT(0x100b, 0xf),      /* MEM_INST_RETIRED.* */
 	INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf),    /* MEM_UNCORE_RETIRED.* */
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 6c3b7c1..9e2b40c 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -14,7 +14,8 @@
 	LBR_FORMAT_EIP_FLAGS	= 0x03,
 	LBR_FORMAT_EIP_FLAGS2	= 0x04,
 	LBR_FORMAT_INFO		= 0x05,
-	LBR_FORMAT_MAX_KNOWN    = LBR_FORMAT_INFO,
+	LBR_FORMAT_TIME		= 0x06,
+	LBR_FORMAT_MAX_KNOWN    = LBR_FORMAT_TIME,
 };
 
 static enum {
@@ -63,7 +64,7 @@
 
 #define LBR_PLM (LBR_KERNEL | LBR_USER)
 
-#define LBR_SEL_MASK	0x1ff	/* valid bits in LBR_SELECT */
+#define LBR_SEL_MASK	0x3ff	/* valid bits in LBR_SELECT */
 #define LBR_NOT_SUPP	-1	/* LBR filter not supported */
 #define LBR_IGN		0	/* ignored */
 
@@ -464,6 +465,16 @@
 			abort = !!(info & LBR_INFO_ABORT);
 			cycles = (info & LBR_INFO_CYCLES);
 		}
+
+		if (lbr_format == LBR_FORMAT_TIME) {
+			mis = !!(from & LBR_FROM_FLAG_MISPRED);
+			pred = !mis;
+			skip = 1;
+			cycles = ((to >> 48) & LBR_INFO_CYCLES);
+
+			to = (u64)((((s64)to) << 16) >> 16);
+		}
+
 		if (lbr_flags & LBR_EIP_FLAGS) {
 			mis = !!(from & LBR_FROM_FLAG_MISPRED);
 			pred = !mis;
@@ -610,8 +621,10 @@
 	 * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
 	 * in suppress mode. So LBR_SELECT should be set to
 	 * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
+	 * But the 10th bit LBR_CALL_STACK does not operate
+	 * in suppress mode.
 	 */
-	reg->config = mask ^ x86_pmu.lbr_sel_mask;
+	reg->config = mask ^ (x86_pmu.lbr_sel_mask & ~LBR_CALL_STACK);
 
 	if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
 	    (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
@@ -1047,6 +1060,24 @@
 	pr_cont("8-deep LBR, ");
 }
 
+/* slm */
+void __init intel_pmu_lbr_init_slm(void)
+{
+	x86_pmu.lbr_nr	   = 8;
+	x86_pmu.lbr_tos    = MSR_LBR_TOS;
+	x86_pmu.lbr_from   = MSR_LBR_CORE_FROM;
+	x86_pmu.lbr_to     = MSR_LBR_CORE_TO;
+
+	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
+	x86_pmu.lbr_sel_map  = nhm_lbr_sel_map;
+
+	/*
+	 * SW branch filter usage:
+	 * - compensate for lack of HW filter
+	 */
+	pr_cont("8-deep LBR, ");
+}
+
 /* Knights Landing */
 void intel_pmu_lbr_init_knl(void)
 {
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index 6af7cf7..04bb5fb 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -67,11 +67,13 @@
 	PT_CAP(max_subleaf,		0, CR_EAX, 0xffffffff),
 	PT_CAP(cr3_filtering,		0, CR_EBX, BIT(0)),
 	PT_CAP(psb_cyc,			0, CR_EBX, BIT(1)),
+	PT_CAP(ip_filtering,		0, CR_EBX, BIT(2)),
 	PT_CAP(mtc,			0, CR_EBX, BIT(3)),
 	PT_CAP(topa_output,		0, CR_ECX, BIT(0)),
 	PT_CAP(topa_multiple_entries,	0, CR_ECX, BIT(1)),
 	PT_CAP(single_range_output,	0, CR_ECX, BIT(2)),
 	PT_CAP(payloads_lip,		0, CR_ECX, BIT(31)),
+	PT_CAP(num_address_ranges,	1, CR_EAX, 0x3),
 	PT_CAP(mtc_periods,		1, CR_EAX, 0xffff0000),
 	PT_CAP(cycle_thresholds,	1, CR_EBX, 0xffff),
 	PT_CAP(psb_periods,		1, CR_EBX, 0xffff0000),
@@ -125,9 +127,46 @@
 	.attrs	= pt_formats_attr,
 };
 
+static ssize_t
+pt_timing_attr_show(struct device *dev, struct device_attribute *attr,
+		    char *page)
+{
+	struct perf_pmu_events_attr *pmu_attr =
+		container_of(attr, struct perf_pmu_events_attr, attr);
+
+	switch (pmu_attr->id) {
+	case 0:
+		return sprintf(page, "%lu\n", pt_pmu.max_nonturbo_ratio);
+	case 1:
+		return sprintf(page, "%u:%u\n",
+			       pt_pmu.tsc_art_num,
+			       pt_pmu.tsc_art_den);
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+PMU_EVENT_ATTR(max_nonturbo_ratio, timing_attr_max_nonturbo_ratio, 0,
+	       pt_timing_attr_show);
+PMU_EVENT_ATTR(tsc_art_ratio, timing_attr_tsc_art_ratio, 1,
+	       pt_timing_attr_show);
+
+static struct attribute *pt_timing_attr[] = {
+	&timing_attr_max_nonturbo_ratio.attr.attr,
+	&timing_attr_tsc_art_ratio.attr.attr,
+	NULL,
+};
+
+static struct attribute_group pt_timing_group = {
+	.attrs	= pt_timing_attr,
+};
+
 static const struct attribute_group *pt_attr_groups[] = {
 	&pt_cap_group,
 	&pt_format_group,
+	&pt_timing_group,
 	NULL,
 };
 
@@ -136,9 +175,38 @@
 	struct dev_ext_attribute *de_attrs;
 	struct attribute **attrs;
 	size_t size;
+	u64 reg;
 	int ret;
 	long i;
 
+	rdmsrl(MSR_PLATFORM_INFO, reg);
+	pt_pmu.max_nonturbo_ratio = (reg & 0xff00) >> 8;
+
+	/*
+	 * if available, read in TSC to core crystal clock ratio,
+	 * otherwise, zero for numerator stands for "not enumerated"
+	 * as per SDM
+	 */
+	if (boot_cpu_data.cpuid_level >= CPUID_TSC_LEAF) {
+		u32 eax, ebx, ecx, edx;
+
+		cpuid(CPUID_TSC_LEAF, &eax, &ebx, &ecx, &edx);
+
+		pt_pmu.tsc_art_num = ebx;
+		pt_pmu.tsc_art_den = eax;
+	}
+
+	if (boot_cpu_has(X86_FEATURE_VMX)) {
+		/*
+		 * Intel SDM, 36.5 "Tracing post-VMXON" says that
+		 * "IA32_VMX_MISC[bit 14]" being 1 means PT can trace
+		 * post-VMXON.
+		 */
+		rdmsrl(MSR_IA32_VMX_MISC, reg);
+		if (reg & BIT(14))
+			pt_pmu.vmx = true;
+	}
+
 	attrs = NULL;
 
 	for (i = 0; i < PT_CPUID_LEAVES; i++) {
@@ -251,6 +319,75 @@
  * These all are cpu affine and operate on a local PT
  */
 
+/* Address ranges and their corresponding msr configuration registers */
+static const struct pt_address_range {
+	unsigned long	msr_a;
+	unsigned long	msr_b;
+	unsigned int	reg_off;
+} pt_address_ranges[] = {
+	{
+		.msr_a	 = MSR_IA32_RTIT_ADDR0_A,
+		.msr_b	 = MSR_IA32_RTIT_ADDR0_B,
+		.reg_off = RTIT_CTL_ADDR0_OFFSET,
+	},
+	{
+		.msr_a	 = MSR_IA32_RTIT_ADDR1_A,
+		.msr_b	 = MSR_IA32_RTIT_ADDR1_B,
+		.reg_off = RTIT_CTL_ADDR1_OFFSET,
+	},
+	{
+		.msr_a	 = MSR_IA32_RTIT_ADDR2_A,
+		.msr_b	 = MSR_IA32_RTIT_ADDR2_B,
+		.reg_off = RTIT_CTL_ADDR2_OFFSET,
+	},
+	{
+		.msr_a	 = MSR_IA32_RTIT_ADDR3_A,
+		.msr_b	 = MSR_IA32_RTIT_ADDR3_B,
+		.reg_off = RTIT_CTL_ADDR3_OFFSET,
+	}
+};
+
+static u64 pt_config_filters(struct perf_event *event)
+{
+	struct pt_filters *filters = event->hw.addr_filters;
+	struct pt *pt = this_cpu_ptr(&pt_ctx);
+	unsigned int range = 0;
+	u64 rtit_ctl = 0;
+
+	if (!filters)
+		return 0;
+
+	perf_event_addr_filters_sync(event);
+
+	for (range = 0; range < filters->nr_filters; range++) {
+		struct pt_filter *filter = &filters->filter[range];
+
+		/*
+		 * Note, if the range has zero start/end addresses due
+		 * to its dynamic object not being loaded yet, we just
+		 * go ahead and program zeroed range, which will simply
+		 * produce no data. Note^2: if executable code at 0x0
+		 * is a concern, we can set up an "invalid" configuration
+		 * such as msr_b < msr_a.
+		 */
+
+		/* avoid redundant msr writes */
+		if (pt->filters.filter[range].msr_a != filter->msr_a) {
+			wrmsrl(pt_address_ranges[range].msr_a, filter->msr_a);
+			pt->filters.filter[range].msr_a = filter->msr_a;
+		}
+
+		if (pt->filters.filter[range].msr_b != filter->msr_b) {
+			wrmsrl(pt_address_ranges[range].msr_b, filter->msr_b);
+			pt->filters.filter[range].msr_b = filter->msr_b;
+		}
+
+		rtit_ctl |= filter->config << pt_address_ranges[range].reg_off;
+	}
+
+	return rtit_ctl;
+}
+
 static void pt_config(struct perf_event *event)
 {
 	u64 reg;
@@ -260,7 +397,8 @@
 		wrmsrl(MSR_IA32_RTIT_STATUS, 0);
 	}
 
-	reg = RTIT_CTL_TOPA | RTIT_CTL_BRANCH_EN | RTIT_CTL_TRACEEN;
+	reg = pt_config_filters(event);
+	reg |= RTIT_CTL_TOPA | RTIT_CTL_BRANCH_EN | RTIT_CTL_TRACEEN;
 
 	if (!event->attr.exclude_kernel)
 		reg |= RTIT_CTL_OS;
@@ -269,20 +407,23 @@
 
 	reg |= (event->attr.config & PT_CONFIG_MASK);
 
+	event->hw.config = reg;
 	wrmsrl(MSR_IA32_RTIT_CTL, reg);
 }
 
-static void pt_config_start(bool start)
+static void pt_config_stop(struct perf_event *event)
 {
-	u64 ctl;
+	u64 ctl = READ_ONCE(event->hw.config);
 
-	rdmsrl(MSR_IA32_RTIT_CTL, ctl);
-	if (start)
-		ctl |= RTIT_CTL_TRACEEN;
-	else
-		ctl &= ~RTIT_CTL_TRACEEN;
+	/* may be already stopped by a PMI */
+	if (!(ctl & RTIT_CTL_TRACEEN))
+		return;
+
+	ctl &= ~RTIT_CTL_TRACEEN;
 	wrmsrl(MSR_IA32_RTIT_CTL, ctl);
 
+	WRITE_ONCE(event->hw.config, ctl);
+
 	/*
 	 * A wrmsr that disables trace generation serializes other PT
 	 * registers and causes all data packets to be written to memory,
@@ -291,8 +432,7 @@
 	 * The below WMB, separating data store and aux_head store matches
 	 * the consumer's RMB that separates aux_head load and data load.
 	 */
-	if (!start)
-		wmb();
+	wmb();
 }
 
 static void pt_config_buffer(void *buf, unsigned int topa_idx,
@@ -695,6 +835,7 @@
 
 	/* clear STOP and INT from current entry */
 	buf->topa_index[buf->stop_pos]->stop = 0;
+	buf->topa_index[buf->stop_pos]->intr = 0;
 	buf->topa_index[buf->intr_pos]->intr = 0;
 
 	/* how many pages till the STOP marker */
@@ -719,6 +860,7 @@
 	buf->intr_pos = idx;
 
 	buf->topa_index[buf->stop_pos]->stop = 1;
+	buf->topa_index[buf->stop_pos]->intr = 1;
 	buf->topa_index[buf->intr_pos]->intr = 1;
 
 	return 0;
@@ -905,24 +1047,80 @@
 	kfree(buf);
 }
 
-/**
- * pt_buffer_is_full() - check if the buffer is full
- * @buf:	PT buffer.
- * @pt:		Per-cpu pt handle.
- *
- * If the user hasn't read data from the output region that aux_head
- * points to, the buffer is considered full: the user needs to read at
- * least this region and update aux_tail to point past it.
- */
-static bool pt_buffer_is_full(struct pt_buffer *buf, struct pt *pt)
+static int pt_addr_filters_init(struct perf_event *event)
 {
-	if (buf->snapshot)
-		return false;
+	struct pt_filters *filters;
+	int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
 
-	if (local_read(&buf->data_size) >= pt->handle.size)
-		return true;
+	if (!pt_cap_get(PT_CAP_num_address_ranges))
+		return 0;
 
-	return false;
+	filters = kzalloc_node(sizeof(struct pt_filters), GFP_KERNEL, node);
+	if (!filters)
+		return -ENOMEM;
+
+	if (event->parent)
+		memcpy(filters, event->parent->hw.addr_filters,
+		       sizeof(*filters));
+
+	event->hw.addr_filters = filters;
+
+	return 0;
+}
+
+static void pt_addr_filters_fini(struct perf_event *event)
+{
+	kfree(event->hw.addr_filters);
+	event->hw.addr_filters = NULL;
+}
+
+static int pt_event_addr_filters_validate(struct list_head *filters)
+{
+	struct perf_addr_filter *filter;
+	int range = 0;
+
+	list_for_each_entry(filter, filters, entry) {
+		/* PT doesn't support single address triggers */
+		if (!filter->range)
+			return -EOPNOTSUPP;
+
+		if (!filter->inode && !kernel_ip(filter->offset))
+			return -EINVAL;
+
+		if (++range > pt_cap_get(PT_CAP_num_address_ranges))
+			return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static void pt_event_addr_filters_sync(struct perf_event *event)
+{
+	struct perf_addr_filters_head *head = perf_event_addr_filters(event);
+	unsigned long msr_a, msr_b, *offs = event->addr_filters_offs;
+	struct pt_filters *filters = event->hw.addr_filters;
+	struct perf_addr_filter *filter;
+	int range = 0;
+
+	if (!filters)
+		return;
+
+	list_for_each_entry(filter, &head->list, entry) {
+		if (filter->inode && !offs[range]) {
+			msr_a = msr_b = 0;
+		} else {
+			/* apply the offset */
+			msr_a = filter->offset + offs[range];
+			msr_b = filter->size + msr_a;
+		}
+
+		filters->filter[range].msr_a  = msr_a;
+		filters->filter[range].msr_b  = msr_b;
+		filters->filter[range].config = filter->filter ? 1 : 2;
+		range++;
+	}
+
+	filters->nr_filters = range;
 }
 
 /**
@@ -939,14 +1137,20 @@
 	 * after PT has been disabled by pt_event_stop(). Make sure we don't
 	 * do anything (particularly, re-enable) for this event here.
 	 */
-	if (!ACCESS_ONCE(pt->handle_nmi))
+	if (!READ_ONCE(pt->handle_nmi))
 		return;
 
-	pt_config_start(false);
+	/*
+	 * If VMX is on and PT does not support it, don't touch anything.
+	 */
+	if (READ_ONCE(pt->vmx_on))
+		return;
 
 	if (!event)
 		return;
 
+	pt_config_stop(event);
+
 	buf = perf_get_aux(&pt->handle);
 	if (!buf)
 		return;
@@ -983,26 +1187,71 @@
 	}
 }
 
+void intel_pt_handle_vmx(int on)
+{
+	struct pt *pt = this_cpu_ptr(&pt_ctx);
+	struct perf_event *event;
+	unsigned long flags;
+
+	/* PT plays nice with VMX, do nothing */
+	if (pt_pmu.vmx)
+		return;
+
+	/*
+	 * VMXON will clear RTIT_CTL.TraceEn; we need to make
+	 * sure to not try to set it while VMX is on. Disable
+	 * interrupts to avoid racing with pmu callbacks;
+	 * concurrent PMI should be handled fine.
+	 */
+	local_irq_save(flags);
+	WRITE_ONCE(pt->vmx_on, on);
+
+	if (on) {
+		/* prevent pt_config_stop() from writing RTIT_CTL */
+		event = pt->handle.event;
+		if (event)
+			event->hw.config = 0;
+	}
+	local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(intel_pt_handle_vmx);
+
 /*
  * PMU callbacks
  */
 
 static void pt_event_start(struct perf_event *event, int mode)
 {
+	struct hw_perf_event *hwc = &event->hw;
 	struct pt *pt = this_cpu_ptr(&pt_ctx);
-	struct pt_buffer *buf = perf_get_aux(&pt->handle);
+	struct pt_buffer *buf;
 
-	if (!buf || pt_buffer_is_full(buf, pt)) {
-		event->hw.state = PERF_HES_STOPPED;
+	if (READ_ONCE(pt->vmx_on))
 		return;
+
+	buf = perf_aux_output_begin(&pt->handle, event);
+	if (!buf)
+		goto fail_stop;
+
+	pt_buffer_reset_offsets(buf, pt->handle.head);
+	if (!buf->snapshot) {
+		if (pt_buffer_reset_markers(buf, &pt->handle))
+			goto fail_end_stop;
 	}
 
-	ACCESS_ONCE(pt->handle_nmi) = 1;
-	event->hw.state = 0;
+	WRITE_ONCE(pt->handle_nmi, 1);
+	hwc->state = 0;
 
 	pt_config_buffer(buf->cur->table, buf->cur_idx,
 			 buf->output_off);
 	pt_config(event);
+
+	return;
+
+fail_end_stop:
+	perf_aux_output_end(&pt->handle, 0, true);
+fail_stop:
+	hwc->state = PERF_HES_STOPPED;
 }
 
 static void pt_event_stop(struct perf_event *event, int mode)
@@ -1013,8 +1262,9 @@
 	 * Protect against the PMI racing with disabling wrmsr,
 	 * see comment in intel_pt_interrupt().
 	 */
-	ACCESS_ONCE(pt->handle_nmi) = 0;
-	pt_config_start(false);
+	WRITE_ONCE(pt->handle_nmi, 0);
+
+	pt_config_stop(event);
 
 	if (event->hw.state == PERF_HES_STOPPED)
 		return;
@@ -1035,19 +1285,7 @@
 		pt_handle_status(pt);
 
 		pt_update_head(pt);
-	}
-}
 
-static void pt_event_del(struct perf_event *event, int mode)
-{
-	struct pt *pt = this_cpu_ptr(&pt_ctx);
-	struct pt_buffer *buf;
-
-	pt_event_stop(event, PERF_EF_UPDATE);
-
-	buf = perf_get_aux(&pt->handle);
-
-	if (buf) {
 		if (buf->snapshot)
 			pt->handle.head =
 				local_xchg(&buf->data_size,
@@ -1057,9 +1295,13 @@
 	}
 }
 
+static void pt_event_del(struct perf_event *event, int mode)
+{
+	pt_event_stop(event, PERF_EF_UPDATE);
+}
+
 static int pt_event_add(struct perf_event *event, int mode)
 {
-	struct pt_buffer *buf;
 	struct pt *pt = this_cpu_ptr(&pt_ctx);
 	struct hw_perf_event *hwc = &event->hw;
 	int ret = -EBUSY;
@@ -1067,34 +1309,18 @@
 	if (pt->handle.event)
 		goto fail;
 
-	buf = perf_aux_output_begin(&pt->handle, event);
-	ret = -EINVAL;
-	if (!buf)
-		goto fail_stop;
-
-	pt_buffer_reset_offsets(buf, pt->handle.head);
-	if (!buf->snapshot) {
-		ret = pt_buffer_reset_markers(buf, &pt->handle);
-		if (ret)
-			goto fail_end_stop;
-	}
-
 	if (mode & PERF_EF_START) {
 		pt_event_start(event, 0);
-		ret = -EBUSY;
+		ret = -EINVAL;
 		if (hwc->state == PERF_HES_STOPPED)
-			goto fail_end_stop;
+			goto fail;
 	} else {
 		hwc->state = PERF_HES_STOPPED;
 	}
 
-	return 0;
-
-fail_end_stop:
-	perf_aux_output_end(&pt->handle, 0, true);
-fail_stop:
-	hwc->state = PERF_HES_STOPPED;
+	ret = 0;
 fail:
+
 	return ret;
 }
 
@@ -1104,6 +1330,7 @@
 
 static void pt_event_destroy(struct perf_event *event)
 {
+	pt_addr_filters_fini(event);
 	x86_del_exclusive(x86_lbr_exclusive_pt);
 }
 
@@ -1118,6 +1345,11 @@
 	if (x86_add_exclusive(x86_lbr_exclusive_pt))
 		return -EBUSY;
 
+	if (pt_addr_filters_init(event)) {
+		x86_del_exclusive(x86_lbr_exclusive_pt);
+		return -ENOMEM;
+	}
+
 	event->destroy = pt_event_destroy;
 
 	return 0;
@@ -1137,7 +1369,7 @@
 
 	BUILD_BUG_ON(sizeof(struct topa) > PAGE_SIZE);
 
-	if (!test_cpu_cap(&boot_cpu_data, X86_FEATURE_INTEL_PT))
+	if (!boot_cpu_has(X86_FEATURE_INTEL_PT))
 		return -ENODEV;
 
 	get_online_cpus();
@@ -1171,16 +1403,21 @@
 			PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_AUX_SW_DOUBLEBUF;
 
 	pt_pmu.pmu.capabilities	|= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE;
-	pt_pmu.pmu.attr_groups	= pt_attr_groups;
-	pt_pmu.pmu.task_ctx_nr	= perf_sw_context;
-	pt_pmu.pmu.event_init	= pt_event_init;
-	pt_pmu.pmu.add		= pt_event_add;
-	pt_pmu.pmu.del		= pt_event_del;
-	pt_pmu.pmu.start	= pt_event_start;
-	pt_pmu.pmu.stop		= pt_event_stop;
-	pt_pmu.pmu.read		= pt_event_read;
-	pt_pmu.pmu.setup_aux	= pt_buffer_setup_aux;
-	pt_pmu.pmu.free_aux	= pt_buffer_free_aux;
+	pt_pmu.pmu.attr_groups		 = pt_attr_groups;
+	pt_pmu.pmu.task_ctx_nr		 = perf_sw_context;
+	pt_pmu.pmu.event_init		 = pt_event_init;
+	pt_pmu.pmu.add			 = pt_event_add;
+	pt_pmu.pmu.del			 = pt_event_del;
+	pt_pmu.pmu.start		 = pt_event_start;
+	pt_pmu.pmu.stop			 = pt_event_stop;
+	pt_pmu.pmu.read			 = pt_event_read;
+	pt_pmu.pmu.setup_aux		 = pt_buffer_setup_aux;
+	pt_pmu.pmu.free_aux		 = pt_buffer_free_aux;
+	pt_pmu.pmu.addr_filters_sync     = pt_event_addr_filters_sync;
+	pt_pmu.pmu.addr_filters_validate = pt_event_addr_filters_validate;
+	pt_pmu.pmu.nr_addr_filters       =
+		pt_cap_get(PT_CAP_num_address_ranges);
+
 	ret = perf_pmu_register(&pt_pmu.pmu, "intel_pt", -1);
 
 	return ret;
diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h
index 336878a..efffa4a 100644
--- a/arch/x86/events/intel/pt.h
+++ b/arch/x86/events/intel/pt.h
@@ -20,6 +20,40 @@
 #define __INTEL_PT_H__
 
 /*
+ * PT MSR bit definitions
+ */
+#define RTIT_CTL_TRACEEN		BIT(0)
+#define RTIT_CTL_CYCLEACC		BIT(1)
+#define RTIT_CTL_OS			BIT(2)
+#define RTIT_CTL_USR			BIT(3)
+#define RTIT_CTL_CR3EN			BIT(7)
+#define RTIT_CTL_TOPA			BIT(8)
+#define RTIT_CTL_MTC_EN			BIT(9)
+#define RTIT_CTL_TSC_EN			BIT(10)
+#define RTIT_CTL_DISRETC		BIT(11)
+#define RTIT_CTL_BRANCH_EN		BIT(13)
+#define RTIT_CTL_MTC_RANGE_OFFSET	14
+#define RTIT_CTL_MTC_RANGE		(0x0full << RTIT_CTL_MTC_RANGE_OFFSET)
+#define RTIT_CTL_CYC_THRESH_OFFSET	19
+#define RTIT_CTL_CYC_THRESH		(0x0full << RTIT_CTL_CYC_THRESH_OFFSET)
+#define RTIT_CTL_PSB_FREQ_OFFSET	24
+#define RTIT_CTL_PSB_FREQ      		(0x0full << RTIT_CTL_PSB_FREQ_OFFSET)
+#define RTIT_CTL_ADDR0_OFFSET		32
+#define RTIT_CTL_ADDR0      		(0x0full << RTIT_CTL_ADDR0_OFFSET)
+#define RTIT_CTL_ADDR1_OFFSET		36
+#define RTIT_CTL_ADDR1      		(0x0full << RTIT_CTL_ADDR1_OFFSET)
+#define RTIT_CTL_ADDR2_OFFSET		40
+#define RTIT_CTL_ADDR2      		(0x0full << RTIT_CTL_ADDR2_OFFSET)
+#define RTIT_CTL_ADDR3_OFFSET		44
+#define RTIT_CTL_ADDR3      		(0x0full << RTIT_CTL_ADDR3_OFFSET)
+#define RTIT_STATUS_FILTEREN		BIT(0)
+#define RTIT_STATUS_CONTEXTEN		BIT(1)
+#define RTIT_STATUS_TRIGGEREN		BIT(2)
+#define RTIT_STATUS_BUFFOVF		BIT(3)
+#define RTIT_STATUS_ERROR		BIT(4)
+#define RTIT_STATUS_STOPPED		BIT(5)
+
+/*
  * Single-entry ToPA: when this close to region boundary, switch
  * buffers to avoid losing data.
  */
@@ -48,15 +82,20 @@
 #define PT_CPUID_LEAVES		2
 #define PT_CPUID_REGS_NUM	4 /* number of regsters (eax, ebx, ecx, edx) */
 
+/* TSC to Core Crystal Clock Ratio */
+#define CPUID_TSC_LEAF		0x15
+
 enum pt_capabilities {
 	PT_CAP_max_subleaf = 0,
 	PT_CAP_cr3_filtering,
 	PT_CAP_psb_cyc,
+	PT_CAP_ip_filtering,
 	PT_CAP_mtc,
 	PT_CAP_topa_output,
 	PT_CAP_topa_multiple_entries,
 	PT_CAP_single_range_output,
 	PT_CAP_payloads_lip,
+	PT_CAP_num_address_ranges,
 	PT_CAP_mtc_periods,
 	PT_CAP_cycle_thresholds,
 	PT_CAP_psb_periods,
@@ -65,6 +104,10 @@
 struct pt_pmu {
 	struct pmu		pmu;
 	u32			caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
+	bool			vmx;
+	unsigned long		max_nonturbo_ratio;
+	unsigned int		tsc_art_num;
+	unsigned int		tsc_art_den;
 };
 
 /**
@@ -103,14 +146,42 @@
 	struct topa_entry	*topa_index[0];
 };
 
+#define PT_FILTERS_NUM	4
+
+/**
+ * struct pt_filter - IP range filter configuration
+ * @msr_a:	range start, goes to RTIT_ADDRn_A
+ * @msr_b:	range end, goes to RTIT_ADDRn_B
+ * @config:	4-bit field in RTIT_CTL
+ */
+struct pt_filter {
+	unsigned long	msr_a;
+	unsigned long	msr_b;
+	unsigned long	config;
+};
+
+/**
+ * struct pt_filters - IP range filtering context
+ * @filter:	filters defined for this context
+ * @nr_filters:	number of defined filters in the @filter array
+ */
+struct pt_filters {
+	struct pt_filter	filter[PT_FILTERS_NUM];
+	unsigned int		nr_filters;
+};
+
 /**
  * struct pt - per-cpu pt context
  * @handle:	perf output handle
+ * @filters:		last configured filters
  * @handle_nmi:	do handle PT PMI on this cpu, there's an active event
+ * @vmx_on:	1 if VMX is ON on this cpu
  */
 struct pt {
 	struct perf_output_handle handle;
+	struct pt_filters	filters;
 	int			handle_nmi;
+	int			vmx_on;
 };
 
 #endif /* __INTEL_PT_H__ */
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 70c93f9..99c4bab 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -27,10 +27,14 @@
  *	  event: rapl_energy_dram
  *    perf code: 0x3
  *
- * dram counter: consumption of the builtin-gpu domain (client only)
+ * gpu counter: consumption of the builtin-gpu domain (client only)
  *	  event: rapl_energy_gpu
  *    perf code: 0x4
  *
+ *  psys counter: consumption of the builtin-psys domain (client only)
+ *	  event: rapl_energy_psys
+ *    perf code: 0x5
+ *
  * We manage those counters as free running (read-only). They may be
  * use simultaneously by other tools, such as turbostat.
  *
@@ -53,6 +57,8 @@
 #include <asm/cpu_device_id.h>
 #include "../perf_event.h"
 
+MODULE_LICENSE("GPL");
+
 /*
  * RAPL energy status counters
  */
@@ -64,13 +70,16 @@
 #define INTEL_RAPL_RAM		0x3	/* pseudo-encoding */
 #define RAPL_IDX_PP1_NRG_STAT	3	/* gpu */
 #define INTEL_RAPL_PP1		0x4	/* pseudo-encoding */
+#define RAPL_IDX_PSYS_NRG_STAT	4	/* psys */
+#define INTEL_RAPL_PSYS		0x5	/* pseudo-encoding */
 
-#define NR_RAPL_DOMAINS         0x4
+#define NR_RAPL_DOMAINS         0x5
 static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = {
 	"pp0-core",
 	"package",
 	"dram",
 	"pp1-gpu",
+	"psys",
 };
 
 /* Clients have PP0, PKG */
@@ -89,6 +98,13 @@
 			 1<<RAPL_IDX_RAM_NRG_STAT|\
 			 1<<RAPL_IDX_PP1_NRG_STAT)
 
+/* SKL clients have PP0, PKG, RAM, PP1, PSYS */
+#define RAPL_IDX_SKL_CLN (1<<RAPL_IDX_PP0_NRG_STAT|\
+			  1<<RAPL_IDX_PKG_NRG_STAT|\
+			  1<<RAPL_IDX_RAM_NRG_STAT|\
+			  1<<RAPL_IDX_PP1_NRG_STAT|\
+			  1<<RAPL_IDX_PSYS_NRG_STAT)
+
 /* Knights Landing has PKG, RAM */
 #define RAPL_IDX_KNL	(1<<RAPL_IDX_PKG_NRG_STAT|\
 			 1<<RAPL_IDX_RAM_NRG_STAT)
@@ -360,6 +376,10 @@
 		bit = RAPL_IDX_PP1_NRG_STAT;
 		msr = MSR_PP1_ENERGY_STATUS;
 		break;
+	case INTEL_RAPL_PSYS:
+		bit = RAPL_IDX_PSYS_NRG_STAT;
+		msr = MSR_PLATFORM_ENERGY_STATUS;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -414,11 +434,13 @@
 RAPL_EVENT_ATTR_STR(energy-pkg  ,   rapl_pkg, "event=0x02");
 RAPL_EVENT_ATTR_STR(energy-ram  ,   rapl_ram, "event=0x03");
 RAPL_EVENT_ATTR_STR(energy-gpu  ,   rapl_gpu, "event=0x04");
+RAPL_EVENT_ATTR_STR(energy-psys,   rapl_psys, "event=0x05");
 
 RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules");
 RAPL_EVENT_ATTR_STR(energy-pkg.unit  ,   rapl_pkg_unit, "Joules");
 RAPL_EVENT_ATTR_STR(energy-ram.unit  ,   rapl_ram_unit, "Joules");
 RAPL_EVENT_ATTR_STR(energy-gpu.unit  ,   rapl_gpu_unit, "Joules");
+RAPL_EVENT_ATTR_STR(energy-psys.unit,   rapl_psys_unit, "Joules");
 
 /*
  * we compute in 0.23 nJ increments regardless of MSR
@@ -427,6 +449,7 @@
 RAPL_EVENT_ATTR_STR(energy-pkg.scale,     rapl_pkg_scale, "2.3283064365386962890625e-10");
 RAPL_EVENT_ATTR_STR(energy-ram.scale,     rapl_ram_scale, "2.3283064365386962890625e-10");
 RAPL_EVENT_ATTR_STR(energy-gpu.scale,     rapl_gpu_scale, "2.3283064365386962890625e-10");
+RAPL_EVENT_ATTR_STR(energy-psys.scale,   rapl_psys_scale, "2.3283064365386962890625e-10");
 
 static struct attribute *rapl_events_srv_attr[] = {
 	EVENT_PTR(rapl_cores),
@@ -476,6 +499,27 @@
 	NULL,
 };
 
+static struct attribute *rapl_events_skl_attr[] = {
+	EVENT_PTR(rapl_cores),
+	EVENT_PTR(rapl_pkg),
+	EVENT_PTR(rapl_gpu),
+	EVENT_PTR(rapl_ram),
+	EVENT_PTR(rapl_psys),
+
+	EVENT_PTR(rapl_cores_unit),
+	EVENT_PTR(rapl_pkg_unit),
+	EVENT_PTR(rapl_gpu_unit),
+	EVENT_PTR(rapl_ram_unit),
+	EVENT_PTR(rapl_psys_unit),
+
+	EVENT_PTR(rapl_cores_scale),
+	EVENT_PTR(rapl_pkg_scale),
+	EVENT_PTR(rapl_gpu_scale),
+	EVENT_PTR(rapl_ram_scale),
+	EVENT_PTR(rapl_psys_scale),
+	NULL,
+};
+
 static struct attribute *rapl_events_knl_attr[] = {
 	EVENT_PTR(rapl_pkg),
 	EVENT_PTR(rapl_ram),
@@ -592,6 +636,11 @@
 	return NOTIFY_OK;
 }
 
+static struct notifier_block rapl_cpu_nb = {
+	.notifier_call	= rapl_cpu_notifier,
+	.priority       = CPU_PRI_PERF + 1,
+};
+
 static int rapl_check_hw_unit(bool apply_quirk)
 {
 	u64 msr_rapl_power_unit_bits;
@@ -660,7 +709,7 @@
 	return 0;
 }
 
-static void __init cleanup_rapl_pmus(void)
+static void cleanup_rapl_pmus(void)
 {
 	int i;
 
@@ -691,51 +740,92 @@
 	return 0;
 }
 
-static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
-	[0] = { .vendor = X86_VENDOR_INTEL, .family = 6 },
-	[1] = {},
+#define X86_RAPL_MODEL_MATCH(model, init)	\
+	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
+
+struct intel_rapl_init_fun {
+	bool apply_quirk;
+	int cntr_mask;
+	struct attribute **attrs;
 };
 
+static const struct intel_rapl_init_fun snb_rapl_init __initconst = {
+	.apply_quirk = false,
+	.cntr_mask = RAPL_IDX_CLN,
+	.attrs = rapl_events_cln_attr,
+};
+
+static const struct intel_rapl_init_fun hsx_rapl_init __initconst = {
+	.apply_quirk = true,
+	.cntr_mask = RAPL_IDX_SRV,
+	.attrs = rapl_events_srv_attr,
+};
+
+static const struct intel_rapl_init_fun hsw_rapl_init __initconst = {
+	.apply_quirk = false,
+	.cntr_mask = RAPL_IDX_HSW,
+	.attrs = rapl_events_hsw_attr,
+};
+
+static const struct intel_rapl_init_fun snbep_rapl_init __initconst = {
+	.apply_quirk = false,
+	.cntr_mask = RAPL_IDX_SRV,
+	.attrs = rapl_events_srv_attr,
+};
+
+static const struct intel_rapl_init_fun knl_rapl_init __initconst = {
+	.apply_quirk = true,
+	.cntr_mask = RAPL_IDX_KNL,
+	.attrs = rapl_events_knl_attr,
+};
+
+static const struct intel_rapl_init_fun skl_rapl_init __initconst = {
+	.apply_quirk = false,
+	.cntr_mask = RAPL_IDX_SKL_CLN,
+	.attrs = rapl_events_skl_attr,
+};
+
+static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
+	X86_RAPL_MODEL_MATCH(42, snb_rapl_init),	/* Sandy Bridge */
+	X86_RAPL_MODEL_MATCH(45, snbep_rapl_init),	/* Sandy Bridge-EP */
+
+	X86_RAPL_MODEL_MATCH(58, snb_rapl_init),	/* Ivy Bridge */
+	X86_RAPL_MODEL_MATCH(62, snbep_rapl_init),	/* IvyTown */
+
+	X86_RAPL_MODEL_MATCH(60, hsw_rapl_init),	/* Haswell */
+	X86_RAPL_MODEL_MATCH(63, hsx_rapl_init),	/* Haswell-Server */
+	X86_RAPL_MODEL_MATCH(69, hsw_rapl_init),	/* Haswell-Celeron */
+	X86_RAPL_MODEL_MATCH(70, hsw_rapl_init),	/* Haswell GT3e */
+
+	X86_RAPL_MODEL_MATCH(61, hsw_rapl_init),	/* Broadwell */
+	X86_RAPL_MODEL_MATCH(71, hsw_rapl_init),	/* Broadwell-H */
+	X86_RAPL_MODEL_MATCH(79, hsx_rapl_init),	/* Broadwell-Server */
+	X86_RAPL_MODEL_MATCH(86, hsx_rapl_init),	/* Broadwell Xeon D */
+
+	X86_RAPL_MODEL_MATCH(87, knl_rapl_init),	/* Knights Landing */
+
+	X86_RAPL_MODEL_MATCH(78, skl_rapl_init),	/* Skylake */
+	X86_RAPL_MODEL_MATCH(94, skl_rapl_init),	/* Skylake H/S */
+	{},
+};
+
+MODULE_DEVICE_TABLE(x86cpu, rapl_cpu_match);
+
 static int __init rapl_pmu_init(void)
 {
-	bool apply_quirk = false;
+	const struct x86_cpu_id *id;
+	struct intel_rapl_init_fun *rapl_init;
+	bool apply_quirk;
 	int ret;
 
-	if (!x86_match_cpu(rapl_cpu_match))
+	id = x86_match_cpu(rapl_cpu_match);
+	if (!id)
 		return -ENODEV;
 
-	switch (boot_cpu_data.x86_model) {
-	case 42: /* Sandy Bridge */
-	case 58: /* Ivy Bridge */
-		rapl_cntr_mask = RAPL_IDX_CLN;
-		rapl_pmu_events_group.attrs = rapl_events_cln_attr;
-		break;
-	case 63: /* Haswell-Server */
-	case 79: /* Broadwell-Server */
-		apply_quirk = true;
-		rapl_cntr_mask = RAPL_IDX_SRV;
-		rapl_pmu_events_group.attrs = rapl_events_srv_attr;
-		break;
-	case 60: /* Haswell */
-	case 69: /* Haswell-Celeron */
-	case 61: /* Broadwell */
-	case 71: /* Broadwell-H */
-		rapl_cntr_mask = RAPL_IDX_HSW;
-		rapl_pmu_events_group.attrs = rapl_events_hsw_attr;
-		break;
-	case 45: /* Sandy Bridge-EP */
-	case 62: /* IvyTown */
-		rapl_cntr_mask = RAPL_IDX_SRV;
-		rapl_pmu_events_group.attrs = rapl_events_srv_attr;
-		break;
-	case 87: /* Knights Landing */
-		apply_quirk = true;
-		rapl_cntr_mask = RAPL_IDX_KNL;
-		rapl_pmu_events_group.attrs = rapl_events_knl_attr;
-		break;
-	default:
-		return -ENODEV;
-	}
+	rapl_init = (struct intel_rapl_init_fun *)id->driver_data;
+	apply_quirk = rapl_init->apply_quirk;
+	rapl_cntr_mask = rapl_init->cntr_mask;
+	rapl_pmu_events_group.attrs = rapl_init->attrs;
 
 	ret = rapl_check_hw_unit(apply_quirk);
 	if (ret)
@@ -755,7 +845,7 @@
 	if (ret)
 		goto out;
 
-	__perf_cpu_notifier(rapl_cpu_notifier);
+	__register_cpu_notifier(&rapl_cpu_nb);
 	cpu_notifier_register_done();
 	rapl_advertise();
 	return 0;
@@ -766,4 +856,14 @@
 	cpu_notifier_register_done();
 	return ret;
 }
-device_initcall(rapl_pmu_init);
+module_init(rapl_pmu_init);
+
+static void __exit intel_rapl_exit(void)
+{
+	cpu_notifier_register_begin();
+	__unregister_cpu_notifier(&rapl_cpu_nb);
+	perf_pmu_unregister(&rapl_pmus->pmu);
+	cleanup_rapl_pmus();
+	cpu_notifier_register_done();
+}
+module_exit(intel_rapl_exit);
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 7012d18..16c1789 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -1,3 +1,4 @@
+#include <asm/cpu_device_id.h>
 #include "uncore.h"
 
 static struct intel_uncore_type *empty_uncore[] = { NULL, };
@@ -21,6 +22,8 @@
 struct event_constraint uncore_constraint_empty =
 	EVENT_CONSTRAINT(0, 0, 0);
 
+MODULE_LICENSE("GPL");
+
 static int uncore_pcibus_to_physid(struct pci_bus *bus)
 {
 	struct pci2phy_map *map;
@@ -754,7 +757,7 @@
 	pmu->registered = false;
 }
 
-static void __init __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
+static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
 {
 	struct intel_uncore_pmu *pmu = type->pmus;
 	struct intel_uncore_box *box;
@@ -770,7 +773,7 @@
 	}
 }
 
-static void __init uncore_exit_boxes(void *dummy)
+static void uncore_exit_boxes(void *dummy)
 {
 	struct intel_uncore_type **types;
 
@@ -787,7 +790,7 @@
 	kfree(pmu->boxes);
 }
 
-static void __init uncore_type_exit(struct intel_uncore_type *type)
+static void uncore_type_exit(struct intel_uncore_type *type)
 {
 	struct intel_uncore_pmu *pmu = type->pmus;
 	int i;
@@ -804,7 +807,7 @@
 	type->events_group = NULL;
 }
 
-static void __init uncore_types_exit(struct intel_uncore_type **types)
+static void uncore_types_exit(struct intel_uncore_type **types)
 {
 	for (; *types; types++)
 		uncore_type_exit(*types);
@@ -989,46 +992,6 @@
 	size_t size;
 	int ret;
 
-	switch (boot_cpu_data.x86_model) {
-	case 45: /* Sandy Bridge-EP */
-		ret = snbep_uncore_pci_init();
-		break;
-	case 62: /* Ivy Bridge-EP */
-		ret = ivbep_uncore_pci_init();
-		break;
-	case 63: /* Haswell-EP */
-		ret = hswep_uncore_pci_init();
-		break;
-	case 79: /* BDX-EP */
-	case 86: /* BDX-DE */
-		ret = bdx_uncore_pci_init();
-		break;
-	case 42: /* Sandy Bridge */
-		ret = snb_uncore_pci_init();
-		break;
-	case 58: /* Ivy Bridge */
-		ret = ivb_uncore_pci_init();
-		break;
-	case 60: /* Haswell */
-	case 69: /* Haswell Celeron */
-		ret = hsw_uncore_pci_init();
-		break;
-	case 61: /* Broadwell */
-		ret = bdw_uncore_pci_init();
-		break;
-	case 87: /* Knights Landing */
-		ret = knl_uncore_pci_init();
-		break;
-	case 94: /* SkyLake */
-		ret = skl_uncore_pci_init();
-		break;
-	default:
-		return -ENODEV;
-	}
-
-	if (ret)
-		return ret;
-
 	size = max_packages * sizeof(struct pci_extra_dev);
 	uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
 	if (!uncore_extra_pci_dev) {
@@ -1060,7 +1023,7 @@
 	return ret;
 }
 
-static void __init uncore_pci_exit(void)
+static void uncore_pci_exit(void)
 {
 	if (pcidrv_registered) {
 		pcidrv_registered = false;
@@ -1287,46 +1250,6 @@
 {
 	int ret;
 
-	switch (boot_cpu_data.x86_model) {
-	case 26: /* Nehalem */
-	case 30:
-	case 37: /* Westmere */
-	case 44:
-		nhm_uncore_cpu_init();
-		break;
-	case 42: /* Sandy Bridge */
-	case 58: /* Ivy Bridge */
-	case 60: /* Haswell */
-	case 69: /* Haswell */
-	case 70: /* Haswell */
-	case 61: /* Broadwell */
-	case 71: /* Broadwell */
-		snb_uncore_cpu_init();
-		break;
-	case 45: /* Sandy Bridge-EP */
-		snbep_uncore_cpu_init();
-		break;
-	case 46: /* Nehalem-EX */
-	case 47: /* Westmere-EX aka. Xeon E7 */
-		nhmex_uncore_cpu_init();
-		break;
-	case 62: /* Ivy Bridge-EP */
-		ivbep_uncore_cpu_init();
-		break;
-	case 63: /* Haswell-EP */
-		hswep_uncore_cpu_init();
-		break;
-	case 79: /* BDX-EP */
-	case 86: /* BDX-DE */
-		bdx_uncore_cpu_init();
-		break;
-	case 87: /* Knights Landing */
-		knl_uncore_cpu_init();
-		break;
-	default:
-		return -ENODEV;
-	}
-
 	ret = uncore_types_init(uncore_msr_uncores, true);
 	if (ret)
 		goto err;
@@ -1376,20 +1299,123 @@
 	return 0;
 }
 
+#define X86_UNCORE_MODEL_MATCH(model, init)	\
+	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
+
+struct intel_uncore_init_fun {
+	void	(*cpu_init)(void);
+	int	(*pci_init)(void);
+};
+
+static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
+	.cpu_init = nhm_uncore_cpu_init,
+};
+
+static const struct intel_uncore_init_fun snb_uncore_init __initconst = {
+	.cpu_init = snb_uncore_cpu_init,
+	.pci_init = snb_uncore_pci_init,
+};
+
+static const struct intel_uncore_init_fun ivb_uncore_init __initconst = {
+	.cpu_init = snb_uncore_cpu_init,
+	.pci_init = ivb_uncore_pci_init,
+};
+
+static const struct intel_uncore_init_fun hsw_uncore_init __initconst = {
+	.cpu_init = snb_uncore_cpu_init,
+	.pci_init = hsw_uncore_pci_init,
+};
+
+static const struct intel_uncore_init_fun bdw_uncore_init __initconst = {
+	.cpu_init = snb_uncore_cpu_init,
+	.pci_init = bdw_uncore_pci_init,
+};
+
+static const struct intel_uncore_init_fun snbep_uncore_init __initconst = {
+	.cpu_init = snbep_uncore_cpu_init,
+	.pci_init = snbep_uncore_pci_init,
+};
+
+static const struct intel_uncore_init_fun nhmex_uncore_init __initconst = {
+	.cpu_init = nhmex_uncore_cpu_init,
+};
+
+static const struct intel_uncore_init_fun ivbep_uncore_init __initconst = {
+	.cpu_init = ivbep_uncore_cpu_init,
+	.pci_init = ivbep_uncore_pci_init,
+};
+
+static const struct intel_uncore_init_fun hswep_uncore_init __initconst = {
+	.cpu_init = hswep_uncore_cpu_init,
+	.pci_init = hswep_uncore_pci_init,
+};
+
+static const struct intel_uncore_init_fun bdx_uncore_init __initconst = {
+	.cpu_init = bdx_uncore_cpu_init,
+	.pci_init = bdx_uncore_pci_init,
+};
+
+static const struct intel_uncore_init_fun knl_uncore_init __initconst = {
+	.cpu_init = knl_uncore_cpu_init,
+	.pci_init = knl_uncore_pci_init,
+};
+
+static const struct intel_uncore_init_fun skl_uncore_init __initconst = {
+	.pci_init = skl_uncore_pci_init,
+};
+
+static const struct x86_cpu_id intel_uncore_match[] __initconst = {
+	X86_UNCORE_MODEL_MATCH(26, nhm_uncore_init),	/* Nehalem */
+	X86_UNCORE_MODEL_MATCH(30, nhm_uncore_init),
+	X86_UNCORE_MODEL_MATCH(37, nhm_uncore_init),	/* Westmere */
+	X86_UNCORE_MODEL_MATCH(44, nhm_uncore_init),
+	X86_UNCORE_MODEL_MATCH(42, snb_uncore_init),	/* Sandy Bridge */
+	X86_UNCORE_MODEL_MATCH(58, ivb_uncore_init),	/* Ivy Bridge */
+	X86_UNCORE_MODEL_MATCH(60, hsw_uncore_init),	/* Haswell */
+	X86_UNCORE_MODEL_MATCH(69, hsw_uncore_init),	/* Haswell Celeron */
+	X86_UNCORE_MODEL_MATCH(70, hsw_uncore_init),	/* Haswell */
+	X86_UNCORE_MODEL_MATCH(61, bdw_uncore_init),	/* Broadwell */
+	X86_UNCORE_MODEL_MATCH(71, bdw_uncore_init),	/* Broadwell */
+	X86_UNCORE_MODEL_MATCH(45, snbep_uncore_init),	/* Sandy Bridge-EP */
+	X86_UNCORE_MODEL_MATCH(46, nhmex_uncore_init),	/* Nehalem-EX */
+	X86_UNCORE_MODEL_MATCH(47, nhmex_uncore_init),	/* Westmere-EX aka. Xeon E7 */
+	X86_UNCORE_MODEL_MATCH(62, ivbep_uncore_init),	/* Ivy Bridge-EP */
+	X86_UNCORE_MODEL_MATCH(63, hswep_uncore_init),	/* Haswell-EP */
+	X86_UNCORE_MODEL_MATCH(79, bdx_uncore_init),	/* BDX-EP */
+	X86_UNCORE_MODEL_MATCH(86, bdx_uncore_init),	/* BDX-DE */
+	X86_UNCORE_MODEL_MATCH(87, knl_uncore_init),	/* Knights Landing */
+	X86_UNCORE_MODEL_MATCH(94, skl_uncore_init),	/* SkyLake */
+	{},
+};
+
+MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
+
 static int __init intel_uncore_init(void)
 {
-	int pret, cret, ret;
+	const struct x86_cpu_id *id;
+	struct intel_uncore_init_fun *uncore_init;
+	int pret = 0, cret = 0, ret;
 
-	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+	id = x86_match_cpu(intel_uncore_match);
+	if (!id)
 		return -ENODEV;
 
-	if (cpu_has_hypervisor)
+	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
 		return -ENODEV;
 
 	max_packages = topology_max_packages();
 
-	pret = uncore_pci_init();
-	cret = uncore_cpu_init();
+	uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
+	if (uncore_init->pci_init) {
+		pret = uncore_init->pci_init();
+		if (!pret)
+			pret = uncore_pci_init();
+	}
+
+	if (uncore_init->cpu_init) {
+		uncore_init->cpu_init();
+		cret = uncore_cpu_init();
+	}
 
 	if (cret && pret)
 		return -ENODEV;
@@ -1409,4 +1435,14 @@
 	cpu_notifier_register_done();
 	return ret;
 }
-device_initcall(intel_uncore_init);
+module_init(intel_uncore_init);
+
+static void __exit intel_uncore_exit(void)
+{
+	cpu_notifier_register_begin();
+	__unregister_cpu_notifier(&uncore_cpu_nb);
+	uncore_types_exit(uncore_msr_uncores);
+	uncore_pci_exit();
+	cpu_notifier_register_done();
+}
+module_exit(intel_uncore_exit);
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index ab2bcaa..b262586 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -219,6 +219,9 @@
 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID		0x1ff
 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE	(7 << 18)
 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP		(0xfffffe2aULL << 32)
+#define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE	(0x1ULL << 32)
+#define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE	(0x1ULL << 33)
+#define KNL_CHA_MSR_PMON_BOX_FILTER_NNC		(0x1ULL << 37)
 
 /* KNL EDC/MC UCLK */
 #define KNL_UCLK_MSR_PMON_CTR0_LOW		0x400
@@ -1902,6 +1905,10 @@
 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
 			    KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
 		reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
+
+		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
+		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
+		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
 		reg1->idx = idx;
 	}
 	return 0;
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index ec863b9..85ef3c2 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -6,6 +6,8 @@
 	PERF_MSR_MPERF			= 2,
 	PERF_MSR_PPERF			= 3,
 	PERF_MSR_SMI			= 4,
+	PERF_MSR_PTSC			= 5,
+	PERF_MSR_IRPERF			= 6,
 
 	PERF_MSR_EVENT_MAX,
 };
@@ -15,6 +17,16 @@
 	return boot_cpu_has(X86_FEATURE_APERFMPERF);
 }
 
+static bool test_ptsc(int idx)
+{
+	return boot_cpu_has(X86_FEATURE_PTSC);
+}
+
+static bool test_irperf(int idx)
+{
+	return boot_cpu_has(X86_FEATURE_IRPERF);
+}
+
 static bool test_intel(int idx)
 {
 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
@@ -69,18 +81,22 @@
 	bool	(*test)(int idx);
 };
 
-PMU_EVENT_ATTR_STRING(tsc,   evattr_tsc,   "event=0x00");
-PMU_EVENT_ATTR_STRING(aperf, evattr_aperf, "event=0x01");
-PMU_EVENT_ATTR_STRING(mperf, evattr_mperf, "event=0x02");
-PMU_EVENT_ATTR_STRING(pperf, evattr_pperf, "event=0x03");
-PMU_EVENT_ATTR_STRING(smi,   evattr_smi,   "event=0x04");
+PMU_EVENT_ATTR_STRING(tsc,    evattr_tsc,    "event=0x00");
+PMU_EVENT_ATTR_STRING(aperf,  evattr_aperf,  "event=0x01");
+PMU_EVENT_ATTR_STRING(mperf,  evattr_mperf,  "event=0x02");
+PMU_EVENT_ATTR_STRING(pperf,  evattr_pperf,  "event=0x03");
+PMU_EVENT_ATTR_STRING(smi,    evattr_smi,    "event=0x04");
+PMU_EVENT_ATTR_STRING(ptsc,   evattr_ptsc,   "event=0x05");
+PMU_EVENT_ATTR_STRING(irperf, evattr_irperf, "event=0x06");
 
 static struct perf_msr msr[] = {
-	[PERF_MSR_TSC]   = { 0,			&evattr_tsc,	NULL,		 },
-	[PERF_MSR_APERF] = { MSR_IA32_APERF,	&evattr_aperf,	test_aperfmperf, },
-	[PERF_MSR_MPERF] = { MSR_IA32_MPERF,	&evattr_mperf,	test_aperfmperf, },
-	[PERF_MSR_PPERF] = { MSR_PPERF,		&evattr_pperf,	test_intel,	 },
-	[PERF_MSR_SMI]   = { MSR_SMI_COUNT,	&evattr_smi,	test_intel,	 },
+	[PERF_MSR_TSC]    = { 0,		&evattr_tsc,	NULL,		 },
+	[PERF_MSR_APERF]  = { MSR_IA32_APERF,	&evattr_aperf,	test_aperfmperf, },
+	[PERF_MSR_MPERF]  = { MSR_IA32_MPERF,	&evattr_mperf,	test_aperfmperf, },
+	[PERF_MSR_PPERF]  = { MSR_PPERF,	&evattr_pperf,	test_intel,	 },
+	[PERF_MSR_SMI]    = { MSR_SMI_COUNT,	&evattr_smi,	test_intel,	 },
+	[PERF_MSR_PTSC]   = { MSR_F15H_PTSC,	&evattr_ptsc,	test_ptsc,	 },
+	[PERF_MSR_IRPERF] = { MSR_F17H_IRPERF,	&evattr_irperf,	test_irperf,	 },
 };
 
 static struct attribute *events_attrs[PERF_MSR_EVENT_MAX + 1] = {
@@ -166,7 +182,7 @@
 	if (unlikely(event->hw.event_base == MSR_SMI_COUNT))
 		delta = sign_extend64(delta, 31);
 
-	local64_add(now - prev, &event->count);
+	local64_add(delta, &event->count);
 }
 
 static void msr_event_start(struct perf_event *event, int flags)
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index ba6ef18..8bd764d 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -601,6 +601,7 @@
 	u64		lbr_sel_mask;		   /* LBR_SELECT valid bits */
 	const int	*lbr_sel_map;		   /* lbr_select mappings */
 	bool		lbr_double_abort;	   /* duplicated lbr aborts */
+	bool		lbr_pt_coexist;		   /* LBR may coexist with PT */
 
 	/*
 	 * Intel PT/LBR/BTS are exclusive
@@ -608,6 +609,11 @@
 	atomic_t	lbr_exclusive[x86_lbr_exclusive_max];
 
 	/*
+	 * AMD bits
+	 */
+	unsigned int	amd_nb_constraints : 1;
+
+	/*
 	 * Extra registers for events
 	 */
 	struct extra_reg *extra_regs;
@@ -795,6 +801,9 @@
 
 struct attribute **merge_attr(struct attribute **a, struct attribute **b);
 
+ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
+			  char *page);
+
 #ifdef CONFIG_CPU_SUP_AMD
 
 int amd_pmu_init(void);
@@ -851,6 +860,8 @@
 
 extern struct event_constraint intel_slm_pebs_event_constraints[];
 
+extern struct event_constraint intel_glm_pebs_event_constraints[];
+
 extern struct event_constraint intel_nehalem_pebs_event_constraints[];
 
 extern struct event_constraint intel_westmere_pebs_event_constraints[];
@@ -899,6 +910,8 @@
 
 void intel_pmu_lbr_init_atom(void);
 
+void intel_pmu_lbr_init_slm(void);
+
 void intel_pmu_lbr_init_snb(void);
 
 void intel_pmu_lbr_init_hsw(void);
@@ -925,9 +938,6 @@
 
 int knc_pmu_init(void);
 
-ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
-			  char *page);
-
 static inline int is_ht_workaround_enabled(void)
 {
 	return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 0552884..2f29f4e 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -357,7 +357,7 @@
 		put_user_ex(ptr_to_compat(&frame->uc), &frame->puc);
 
 		/* Create the ucontext.  */
-		if (cpu_has_xsave)
+		if (boot_cpu_has(X86_FEATURE_XSAVE))
 			put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
 		else
 			put_user_ex(0, &frame->uc.uc_flags);
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 99afb66..e77a644 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -1,11 +1,12 @@
 #ifndef _ASM_X86_ALTERNATIVE_H
 #define _ASM_X86_ALTERNATIVE_H
 
+#ifndef __ASSEMBLY__
+
 #include <linux/types.h>
 #include <linux/stddef.h>
 #include <linux/stringify.h>
 #include <asm/asm.h>
-#include <asm/ptrace.h>
 
 /*
  * Alternative inline assembly for SMP.
@@ -233,36 +234,6 @@
  */
 #define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr
 
-struct paravirt_patch_site;
-#ifdef CONFIG_PARAVIRT
-void apply_paravirt(struct paravirt_patch_site *start,
-		    struct paravirt_patch_site *end);
-#else
-static inline void apply_paravirt(struct paravirt_patch_site *start,
-				  struct paravirt_patch_site *end)
-{}
-#define __parainstructions	NULL
-#define __parainstructions_end	NULL
-#endif
-
-extern void *text_poke_early(void *addr, const void *opcode, size_t len);
-
-/*
- * Clear and restore the kernel write-protection flag on the local CPU.
- * Allows the kernel to edit read-only pages.
- * Side-effect: any interrupt handler running between save and restore will have
- * the ability to write to read-only pages.
- *
- * Warning:
- * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and
- * no thread can be preempted in the instructions being modified (no iret to an
- * invalid instruction possible) or if the instructions are changed from a
- * consistent state to another consistent state atomically.
- * On the local CPU you need to be protected again NMI or MCE handlers seeing an
- * inconsistent instruction while you patch.
- */
-extern void *text_poke(void *addr, const void *opcode, size_t len);
-extern int poke_int3_handler(struct pt_regs *regs);
-extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
+#endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_X86_ALTERNATIVE_H */
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 98f25bb..bc27611 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -239,10 +239,10 @@
 extern void x2apic_setup(void);
 static inline int x2apic_enabled(void)
 {
-	return cpu_has_x2apic && apic_is_x2apic_enabled();
+	return boot_cpu_has(X86_FEATURE_X2APIC) && apic_is_x2apic_enabled();
 }
 
-#define x2apic_supported()	(cpu_has_x2apic)
+#define x2apic_supported()	(boot_cpu_has(X86_FEATURE_X2APIC))
 #else /* !CONFIG_X86_X2APIC */
 static inline void check_x2apic(void) { }
 static inline void x2apic_setup(void) { }
diff --git a/arch/x86/include/asm/bios_ebda.h b/arch/x86/include/asm/bios_ebda.h
index aa6a317..2b00c77 100644
--- a/arch/x86/include/asm/bios_ebda.h
+++ b/arch/x86/include/asm/bios_ebda.h
@@ -17,27 +17,6 @@
 	return address;	/* 0 means none */
 }
 
-/*
- * Return the sanitized length of the EBDA in bytes, if it exists.
- */
-static inline unsigned int get_bios_ebda_length(void)
-{
-	unsigned int address;
-	unsigned int length;
-
-	address = get_bios_ebda();
-	if (!address)
-		return 0;
-
-	/* EBDA length is byte 0 of the EBDA (stored in KiB) */
-	length = *(unsigned char *)phys_to_virt(address);
-	length <<= 10;
-
-	/* Trim the length if it extends beyond 640KiB */
-	length = min_t(unsigned int, (640 * 1024) - address, length);
-	return length;
-}
-
 void reserve_ebda_region(void);
 
 #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 6b8d6e8..abd06b1 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -12,29 +12,46 @@
 
 /* Minimum kernel alignment, as a power of two */
 #ifdef CONFIG_X86_64
-#define MIN_KERNEL_ALIGN_LG2	PMD_SHIFT
+# define MIN_KERNEL_ALIGN_LG2	PMD_SHIFT
 #else
-#define MIN_KERNEL_ALIGN_LG2	(PAGE_SHIFT + THREAD_SIZE_ORDER)
+# define MIN_KERNEL_ALIGN_LG2	(PAGE_SHIFT + THREAD_SIZE_ORDER)
 #endif
 #define MIN_KERNEL_ALIGN	(_AC(1, UL) << MIN_KERNEL_ALIGN_LG2)
 
 #if (CONFIG_PHYSICAL_ALIGN & (CONFIG_PHYSICAL_ALIGN-1)) || \
 	(CONFIG_PHYSICAL_ALIGN < MIN_KERNEL_ALIGN)
-#error "Invalid value for CONFIG_PHYSICAL_ALIGN"
+# error "Invalid value for CONFIG_PHYSICAL_ALIGN"
 #endif
 
 #ifdef CONFIG_KERNEL_BZIP2
-#define BOOT_HEAP_SIZE             0x400000
+# define BOOT_HEAP_SIZE		0x400000
 #else /* !CONFIG_KERNEL_BZIP2 */
-
-#define BOOT_HEAP_SIZE	0x10000
-
-#endif /* !CONFIG_KERNEL_BZIP2 */
+# define BOOT_HEAP_SIZE		 0x10000
+#endif
 
 #ifdef CONFIG_X86_64
-#define BOOT_STACK_SIZE	0x4000
-#else
-#define BOOT_STACK_SIZE	0x1000
+# define BOOT_STACK_SIZE	0x4000
+
+# define BOOT_INIT_PGT_SIZE	(6*4096)
+# ifdef CONFIG_RANDOMIZE_BASE
+/*
+ * Assuming all cross the 512GB boundary:
+ * 1 page for level4
+ * (2+2)*4 pages for kernel, param, cmd_line, and randomized kernel
+ * 2 pages for first 2M (video RAM: CONFIG_X86_VERBOSE_BOOTUP).
+ * Total is 19 pages.
+ */
+#  ifdef CONFIG_X86_VERBOSE_BOOTUP
+#   define BOOT_PGT_SIZE	(19*4096)
+#  else /* !CONFIG_X86_VERBOSE_BOOTUP */
+#   define BOOT_PGT_SIZE	(17*4096)
+#  endif
+# else /* !CONFIG_RANDOMIZE_BASE */
+#  define BOOT_PGT_SIZE		BOOT_INIT_PGT_SIZE
+# endif
+
+#else /* !CONFIG_X86_64 */
+# define BOOT_STACK_SIZE	0x1000
 #endif
 
 #endif /* _ASM_X86_BOOT_H */
diff --git a/arch/x86/include/asm/clocksource.h b/arch/x86/include/asm/clocksource.h
index d194266..eae33c7 100644
--- a/arch/x86/include/asm/clocksource.h
+++ b/arch/x86/include/asm/clocksource.h
@@ -3,11 +3,10 @@
 #ifndef _ASM_X86_CLOCKSOURCE_H
 #define _ASM_X86_CLOCKSOURCE_H
 
-#define VCLOCK_NONE	0  /* No vDSO clock available.	*/
-#define VCLOCK_TSC	1  /* vDSO should use vread_tsc.	*/
-#define VCLOCK_HPET	2  /* vDSO should use vread_hpet.	*/
-#define VCLOCK_PVCLOCK	3 /* vDSO should use vread_pvclock. */
-#define VCLOCK_MAX	3
+#define VCLOCK_NONE	0	/* No vDSO clock available.		*/
+#define VCLOCK_TSC	1	/* vDSO should use vread_tsc.		*/
+#define VCLOCK_PVCLOCK	2	/* vDSO should use vread_pvclock.	*/
+#define VCLOCK_MAX	2
 
 struct arch_clocksource_data {
 	int vclock_mode;
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index ebb102e..5a3b2c1 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -307,7 +307,7 @@
 	return (void __user *)round_down(sp - len, 16);
 }
 
-static inline bool is_x32_task(void)
+static inline bool in_x32_syscall(void)
 {
 #ifdef CONFIG_X86_X32_ABI
 	if (task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT)
@@ -318,7 +318,7 @@
 
 static inline bool in_compat_syscall(void)
 {
-	return is_ia32_task() || is_x32_task();
+	return in_ia32_syscall() || in_x32_syscall();
 }
 #define in_compat_syscall in_compat_syscall	/* override the generic impl */
 
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 3636ec0..25ebb54 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -27,6 +27,7 @@
 	CPUID_6_EAX,
 	CPUID_8000_000A_EDX,
 	CPUID_7_ECX,
+	CPUID_8000_0007_EBX,
 };
 
 #ifdef CONFIG_X86_FEATURE_NAMES
@@ -118,31 +119,6 @@
 	set_bit(bit, (unsigned long *)cpu_caps_set);	\
 } while (0)
 
-#define cpu_has_fpu		boot_cpu_has(X86_FEATURE_FPU)
-#define cpu_has_pse		boot_cpu_has(X86_FEATURE_PSE)
-#define cpu_has_tsc		boot_cpu_has(X86_FEATURE_TSC)
-#define cpu_has_pge		boot_cpu_has(X86_FEATURE_PGE)
-#define cpu_has_apic		boot_cpu_has(X86_FEATURE_APIC)
-#define cpu_has_fxsr		boot_cpu_has(X86_FEATURE_FXSR)
-#define cpu_has_xmm		boot_cpu_has(X86_FEATURE_XMM)
-#define cpu_has_xmm2		boot_cpu_has(X86_FEATURE_XMM2)
-#define cpu_has_aes		boot_cpu_has(X86_FEATURE_AES)
-#define cpu_has_avx		boot_cpu_has(X86_FEATURE_AVX)
-#define cpu_has_avx2		boot_cpu_has(X86_FEATURE_AVX2)
-#define cpu_has_clflush		boot_cpu_has(X86_FEATURE_CLFLUSH)
-#define cpu_has_gbpages		boot_cpu_has(X86_FEATURE_GBPAGES)
-#define cpu_has_arch_perfmon	boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
-#define cpu_has_pat		boot_cpu_has(X86_FEATURE_PAT)
-#define cpu_has_x2apic		boot_cpu_has(X86_FEATURE_X2APIC)
-#define cpu_has_xsave		boot_cpu_has(X86_FEATURE_XSAVE)
-#define cpu_has_xsaves		boot_cpu_has(X86_FEATURE_XSAVES)
-#define cpu_has_osxsave		boot_cpu_has(X86_FEATURE_OSXSAVE)
-#define cpu_has_hypervisor	boot_cpu_has(X86_FEATURE_HYPERVISOR)
-/*
- * Do not add any more of those clumsy macros - use static_cpu_has() for
- * fast paths and boot_cpu_has() otherwise!
- */
-
 #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_X86_FAST_FEATURE_TESTS)
 /*
  * Static testing of CPU features.  Used the same as boot_cpu_has().
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 8f9afef..4a41348 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -12,7 +12,7 @@
 /*
  * Defines x86 CPU feature bits
  */
-#define NCAPINTS	17	/* N 32-bit words worth of info */
+#define NCAPINTS	18	/* N 32-bit words worth of info */
 #define NBUGINTS	1	/* N 32-bit bug flags */
 
 /*
@@ -177,6 +177,7 @@
 #define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
 #define X86_FEATURE_PERFCTR_NB  ( 6*32+24) /* NB performance counter extensions */
 #define X86_FEATURE_BPEXT	(6*32+26) /* data breakpoint extension */
+#define X86_FEATURE_PTSC	( 6*32+27) /* performance time-stamp counter */
 #define X86_FEATURE_PERFCTR_L2	( 6*32+28) /* L2 performance counter extensions */
 #define X86_FEATURE_MWAITX	( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
 
@@ -250,6 +251,7 @@
 
 /* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
 #define X86_FEATURE_CLZERO	(13*32+0) /* CLZERO instruction */
+#define X86_FEATURE_IRPERF	(13*32+1) /* Instructions Retired Count */
 
 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
 #define X86_FEATURE_DTHERM	(14*32+ 0) /* Digital Thermal Sensor */
@@ -280,6 +282,11 @@
 #define X86_FEATURE_PKU		(16*32+ 3) /* Protection Keys for Userspace */
 #define X86_FEATURE_OSPKE	(16*32+ 4) /* OS Protection Keys Enable */
 
+/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */
+#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */
+#define X86_FEATURE_SUCCOR	(17*32+1) /* Uncorrectable error containment and recovery */
+#define X86_FEATURE_SMCA	(17*32+3) /* Scalable MCA */
+
 /*
  * BUG word(s)
  */
@@ -294,6 +301,9 @@
 #define X86_BUG_FXSAVE_LEAK	X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
 #define X86_BUG_CLFLUSH_MONITOR	X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
 #define X86_BUG_SYSRET_SS_ATTRS	X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
+#define X86_BUG_NULL_SEG	X86_BUG(9) /* Nulling a selector preserves the base */
+#define X86_BUG_SWAPGS_FENCE	X86_BUG(10) /* SWAPGS without input dep on GS */
+
 
 #ifdef CONFIG_X86_32
 /*
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 53748c4..78d1e74 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -3,6 +3,7 @@
 
 #include <asm/fpu/api.h>
 #include <asm/pgtable.h>
+#include <asm/processor-flags.h>
 #include <asm/tlb.h>
 
 /*
@@ -28,33 +29,22 @@
 
 #define MAX_CMDLINE_ADDRESS	UINT_MAX
 
+#define ARCH_EFI_IRQ_FLAGS_MASK	X86_EFLAGS_IF
+
 #ifdef CONFIG_X86_32
 
-
 extern unsigned long asmlinkage efi_call_phys(void *, ...);
 
+#define arch_efi_call_virt_setup()	kernel_fpu_begin()
+#define arch_efi_call_virt_teardown()	kernel_fpu_end()
+
 /*
  * Wrap all the virtual calls in a way that forces the parameters on the stack.
  */
-
-/* Use this macro if your virtual returns a non-void value */
-#define efi_call_virt(f, args...) \
+#define arch_efi_call_virt(f, args...)					\
 ({									\
-	efi_status_t __s;						\
-	kernel_fpu_begin();						\
-	__s = ((efi_##f##_t __attribute__((regparm(0)))*)		\
-		efi.systab->runtime->f)(args);				\
-	kernel_fpu_end();						\
-	__s;								\
-})
-
-/* Use this macro if your virtual call does not return any value */
-#define __efi_call_virt(f, args...) \
-({									\
-	kernel_fpu_begin();						\
 	((efi_##f##_t __attribute__((regparm(0)))*)			\
 		efi.systab->runtime->f)(args);				\
-	kernel_fpu_end();						\
 })
 
 #define efi_ioremap(addr, size, type, attr)	ioremap_cache(addr, size)
@@ -78,10 +68,8 @@
 	u64	phys_stack;
 } __packed;
 
-#define efi_call_virt(f, ...)						\
+#define arch_efi_call_virt_setup()					\
 ({									\
-	efi_status_t __s;						\
-									\
 	efi_sync_low_kernel_mappings();					\
 	preempt_disable();						\
 	__kernel_fpu_begin();						\
@@ -91,9 +79,13 @@
 		write_cr3((unsigned long)efi_scratch.efi_pgt);		\
 		__flush_tlb_all();					\
 	}								\
-									\
-	__s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__);	\
-									\
+})
+
+#define arch_efi_call_virt(f, args...)					\
+	efi_call((void *)efi.systab->runtime->f, args)			\
+
+#define arch_efi_call_virt_teardown()					\
+({									\
 	if (efi_scratch.use_pgd) {					\
 		write_cr3(efi_scratch.prev_cr3);			\
 		__flush_tlb_all();					\
@@ -101,15 +93,8 @@
 									\
 	__kernel_fpu_end();						\
 	preempt_enable();						\
-	__s;								\
 })
 
-/*
- * All X86_64 virt calls return non-void values. Thus, use non-void call for
- * virt calls that would be void on X86_32.
- */
-#define __efi_call_virt(f, args...) efi_call_virt(f, args)
-
 extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
 					u32 type, u64 attribute);
 
@@ -180,6 +165,8 @@
 extern struct console early_efi_console;
 extern void parse_efi_setup(u64 phys_addr, u32 data_len);
 
+extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
+
 #ifdef CONFIG_EFI_MIXED
 extern void efi_thunk_runtime_setup(void);
 extern efi_status_t efi_thunk_set_virtual_address_map(
@@ -225,6 +212,11 @@
 #define efi_call_early(f, ...)						\
 	__efi_early()->call(__efi_early()->f, __VA_ARGS__);
 
+#define __efi_call_early(f, ...)					\
+	__efi_early()->call((unsigned long)f, __VA_ARGS__);
+
+#define efi_is_64bit()		__efi_early()->is64
+
 extern bool efi_reboot_required(void);
 
 #else
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 15340e3..fea7724 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -176,7 +176,7 @@
 	regs->si = regs->di = regs->bp = 0;
 	regs->r8 = regs->r9 = regs->r10 = regs->r11 = 0;
 	regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0;
-	t->fs = t->gs = 0;
+	t->fsbase = t->gsbase = 0;
 	t->fsindex = t->gsindex = 0;
 	t->ds = t->es = ds;
 }
@@ -226,8 +226,8 @@
 	(pr_reg)[18] = (regs)->flags;				\
 	(pr_reg)[19] = (regs)->sp;				\
 	(pr_reg)[20] = (regs)->ss;				\
-	(pr_reg)[21] = current->thread.fs;			\
-	(pr_reg)[22] = current->thread.gs;			\
+	(pr_reg)[21] = current->thread.fsbase;			\
+	(pr_reg)[22] = current->thread.gsbase;			\
 	asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v;	\
 	asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v;	\
 	asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v;	\
diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
index f8a29d2..3a10616 100644
--- a/arch/x86/include/asm/hugetlb.h
+++ b/arch/x86/include/asm/hugetlb.h
@@ -4,6 +4,7 @@
 #include <asm/page.h>
 #include <asm-generic/hugetlb.h>
 
+#define hugepages_supported() boot_cpu_has(X86_FEATURE_PSE)
 
 static inline int is_hugepage_only_range(struct mm_struct *mm,
 					 unsigned long addr,
diff --git a/arch/x86/include/asm/irq_work.h b/arch/x86/include/asm/irq_work.h
index d0afb05..f706041 100644
--- a/arch/x86/include/asm/irq_work.h
+++ b/arch/x86/include/asm/irq_work.h
@@ -5,7 +5,7 @@
 
 static inline bool arch_irq_work_has_interrupt(void)
 {
-	return cpu_has_apic;
+	return boot_cpu_has(X86_FEATURE_APIC);
 }
 
 #endif /* _ASM_IRQ_WORK_H */
diff --git a/arch/x86/include/asm/kgdb.h b/arch/x86/include/asm/kgdb.h
index 332f98c..22a8537 100644
--- a/arch/x86/include/asm/kgdb.h
+++ b/arch/x86/include/asm/kgdb.h
@@ -6,6 +6,8 @@
  * Copyright (C) 2008 Wind River Systems, Inc.
  */
 
+#include <asm/ptrace.h>
+
 /*
  * BUFMAX defines the maximum number of characters in inbound/outbound
  * buffers at least NUMREGBYTES*2 are needed for register packets
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f62a9f37..e0fbe7e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -43,7 +43,7 @@
 
 #define KVM_PIO_PAGE_OFFSET 1
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 2
-#define KVM_HALT_POLL_NS_DEFAULT 500000
+#define KVM_HALT_POLL_NS_DEFAULT 400000
 
 #define KVM_IRQCHIP_NUM_PINS  KVM_IOAPIC_NUM_PINS
 
@@ -562,7 +562,6 @@
 	struct {
 		u64 msr_val;
 		u64 last_steal;
-		u64 accum_steal;
 		struct gfn_to_hva_cache stime;
 		struct kvm_steal_time steal;
 	} st;
@@ -774,6 +773,11 @@
 	u8 nr_reserved_ioapic_pins;
 
 	bool disabled_lapic_found;
+
+	/* Struct members for AVIC */
+	u32 ldr_mode;
+	struct page *avic_logical_id_table_page;
+	struct page *avic_physical_id_table_page;
 };
 
 struct kvm_vm_stat {
@@ -804,6 +808,7 @@
 	u32 halt_exits;
 	u32 halt_successful_poll;
 	u32 halt_attempted_poll;
+	u32 halt_poll_invalid;
 	u32 halt_wakeup;
 	u32 request_irq_exits;
 	u32 irq_exits;
@@ -848,6 +853,9 @@
 	bool (*cpu_has_high_real_mode_segbase)(void);
 	void (*cpuid_update)(struct kvm_vcpu *vcpu);
 
+	int (*vm_init)(struct kvm *kvm);
+	void (*vm_destroy)(struct kvm *kvm);
+
 	/* Create, but do not attach this VCPU */
 	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
 	void (*vcpu_free)(struct kvm_vcpu *vcpu);
@@ -914,7 +922,7 @@
 	bool (*get_enable_apicv)(void);
 	void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
 	void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
-	void (*hwapic_isr_update)(struct kvm *kvm, int isr);
+	void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
 	void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
 	void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
 	void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
@@ -990,8 +998,13 @@
 	 */
 	int (*pre_block)(struct kvm_vcpu *vcpu);
 	void (*post_block)(struct kvm_vcpu *vcpu);
+
+	void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
+	void (*vcpu_unblocking)(struct kvm_vcpu *vcpu);
+
 	int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
 			      uint32_t guest_irq, bool set);
+	void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
 };
 
 struct kvm_arch_async_pf {
@@ -1341,7 +1354,18 @@
 void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e,
 		     struct kvm_lapic_irq *irq);
 
-static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
+{
+	if (kvm_x86_ops->vcpu_blocking)
+		kvm_x86_ops->vcpu_blocking(vcpu);
+}
+
+static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
+{
+	if (kvm_x86_ops->vcpu_unblocking)
+		kvm_x86_ops->vcpu_unblocking(vcpu);
+}
+
+static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
 
 #endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
index 79327e9..0ccb26d 100644
--- a/arch/x86/include/asm/linkage.h
+++ b/arch/x86/include/asm/linkage.h
@@ -8,40 +8,6 @@
 
 #ifdef CONFIG_X86_32
 #define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
-
-/*
- * Make sure the compiler doesn't do anything stupid with the
- * arguments on the stack - they are owned by the *caller*, not
- * the callee. This just fools gcc into not spilling into them,
- * and keeps it from doing tailcall recursion and/or using the
- * stack slots for temporaries, since they are live and "used"
- * all the way to the end of the function.
- *
- * NOTE! On x86-64, all the arguments are in registers, so this
- * only matters on a 32-bit kernel.
- */
-#define asmlinkage_protect(n, ret, args...) \
-	__asmlinkage_protect##n(ret, ##args)
-#define __asmlinkage_protect_n(ret, args...) \
-	__asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
-#define __asmlinkage_protect0(ret) \
-	__asmlinkage_protect_n(ret)
-#define __asmlinkage_protect1(ret, arg1) \
-	__asmlinkage_protect_n(ret, "m" (arg1))
-#define __asmlinkage_protect2(ret, arg1, arg2) \
-	__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
-#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
-	__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
-#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
-	__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
-			      "m" (arg4))
-#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
-	__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
-			      "m" (arg4), "m" (arg5))
-#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
-	__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
-			      "m" (arg4), "m" (arg5), "m" (arg6))
-
 #endif /* CONFIG_X86_32 */
 
 #ifdef __ASSEMBLY__
diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h
index 7e68f95..a7f9181 100644
--- a/arch/x86/include/asm/livepatch.h
+++ b/arch/x86/include/asm/livepatch.h
@@ -32,8 +32,6 @@
 #endif
 	return 0;
 }
-int klp_write_module_reloc(struct module *mod, unsigned long type,
-			   unsigned long loc, unsigned long value);
 
 static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
 {
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 92b6f65..8bf766e 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -104,13 +104,23 @@
 #define MCE_LOG_SIGNATURE	"MACHINECHECK"
 
 /* AMD Scalable MCA */
+#define MSR_AMD64_SMCA_MC0_CTL		0xc0002000
+#define MSR_AMD64_SMCA_MC0_STATUS	0xc0002001
+#define MSR_AMD64_SMCA_MC0_ADDR		0xc0002002
 #define MSR_AMD64_SMCA_MC0_MISC0	0xc0002003
 #define MSR_AMD64_SMCA_MC0_CONFIG	0xc0002004
 #define MSR_AMD64_SMCA_MC0_IPID		0xc0002005
+#define MSR_AMD64_SMCA_MC0_DESTAT	0xc0002008
+#define MSR_AMD64_SMCA_MC0_DEADDR	0xc0002009
 #define MSR_AMD64_SMCA_MC0_MISC1	0xc000200a
+#define MSR_AMD64_SMCA_MCx_CTL(x)	(MSR_AMD64_SMCA_MC0_CTL + 0x10*(x))
+#define MSR_AMD64_SMCA_MCx_STATUS(x)	(MSR_AMD64_SMCA_MC0_STATUS + 0x10*(x))
+#define MSR_AMD64_SMCA_MCx_ADDR(x)	(MSR_AMD64_SMCA_MC0_ADDR + 0x10*(x))
 #define MSR_AMD64_SMCA_MCx_MISC(x)	(MSR_AMD64_SMCA_MC0_MISC0 + 0x10*(x))
 #define MSR_AMD64_SMCA_MCx_CONFIG(x)	(MSR_AMD64_SMCA_MC0_CONFIG + 0x10*(x))
 #define MSR_AMD64_SMCA_MCx_IPID(x)	(MSR_AMD64_SMCA_MC0_IPID + 0x10*(x))
+#define MSR_AMD64_SMCA_MCx_DESTAT(x)	(MSR_AMD64_SMCA_MC0_DESTAT + 0x10*(x))
+#define MSR_AMD64_SMCA_MCx_DEADDR(x)	(MSR_AMD64_SMCA_MC0_DEADDR + 0x10*(x))
 #define MSR_AMD64_SMCA_MCx_MISCy(x, y)	((MSR_AMD64_SMCA_MC0_MISC1 + y) + (0x10*(x)))
 
 /*
@@ -168,9 +178,18 @@
 
 	      __reserved_0	: 61;
 };
+
+struct mca_msr_regs {
+	u32 (*ctl)	(int bank);
+	u32 (*status)	(int bank);
+	u32 (*addr)	(int bank);
+	u32 (*misc)	(int bank);
+};
+
 extern struct mce_vendor_flags mce_flags;
 
 extern struct mca_config mca_cfg;
+extern struct mca_msr_regs msr_ops;
 extern void mce_register_decode_chain(struct notifier_block *nb);
 extern void mce_unregister_decode_chain(struct notifier_block *nb);
 
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 8428002..39634819 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -115,103 +115,12 @@
 	destroy_context_ldt(mm);
 }
 
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
-			     struct task_struct *tsk)
-{
-	unsigned cpu = smp_processor_id();
+extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+		      struct task_struct *tsk);
 
-	if (likely(prev != next)) {
-#ifdef CONFIG_SMP
-		this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
-		this_cpu_write(cpu_tlbstate.active_mm, next);
-#endif
-		cpumask_set_cpu(cpu, mm_cpumask(next));
-
-		/*
-		 * Re-load page tables.
-		 *
-		 * This logic has an ordering constraint:
-		 *
-		 *  CPU 0: Write to a PTE for 'next'
-		 *  CPU 0: load bit 1 in mm_cpumask.  if nonzero, send IPI.
-		 *  CPU 1: set bit 1 in next's mm_cpumask
-		 *  CPU 1: load from the PTE that CPU 0 writes (implicit)
-		 *
-		 * We need to prevent an outcome in which CPU 1 observes
-		 * the new PTE value and CPU 0 observes bit 1 clear in
-		 * mm_cpumask.  (If that occurs, then the IPI will never
-		 * be sent, and CPU 0's TLB will contain a stale entry.)
-		 *
-		 * The bad outcome can occur if either CPU's load is
-		 * reordered before that CPU's store, so both CPUs must
-		 * execute full barriers to prevent this from happening.
-		 *
-		 * Thus, switch_mm needs a full barrier between the
-		 * store to mm_cpumask and any operation that could load
-		 * from next->pgd.  TLB fills are special and can happen
-		 * due to instruction fetches or for no reason at all,
-		 * and neither LOCK nor MFENCE orders them.
-		 * Fortunately, load_cr3() is serializing and gives the
-		 * ordering guarantee we need.
-		 *
-		 */
-		load_cr3(next->pgd);
-
-		trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
-
-		/* Stop flush ipis for the previous mm */
-		cpumask_clear_cpu(cpu, mm_cpumask(prev));
-
-		/* Load per-mm CR4 state */
-		load_mm_cr4(next);
-
-#ifdef CONFIG_MODIFY_LDT_SYSCALL
-		/*
-		 * Load the LDT, if the LDT is different.
-		 *
-		 * It's possible that prev->context.ldt doesn't match
-		 * the LDT register.  This can happen if leave_mm(prev)
-		 * was called and then modify_ldt changed
-		 * prev->context.ldt but suppressed an IPI to this CPU.
-		 * In this case, prev->context.ldt != NULL, because we
-		 * never set context.ldt to NULL while the mm still
-		 * exists.  That means that next->context.ldt !=
-		 * prev->context.ldt, because mms never share an LDT.
-		 */
-		if (unlikely(prev->context.ldt != next->context.ldt))
-			load_mm_ldt(next);
-#endif
-	}
-#ifdef CONFIG_SMP
-	  else {
-		this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
-		BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
-
-		if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
-			/*
-			 * On established mms, the mm_cpumask is only changed
-			 * from irq context, from ptep_clear_flush() while in
-			 * lazy tlb mode, and here. Irqs are blocked during
-			 * schedule, protecting us from simultaneous changes.
-			 */
-			cpumask_set_cpu(cpu, mm_cpumask(next));
-
-			/*
-			 * We were in lazy tlb mode and leave_mm disabled
-			 * tlb flush IPI delivery. We must reload CR3
-			 * to make sure to use no freed page tables.
-			 *
-			 * As above, load_cr3() is serializing and orders TLB
-			 * fills with respect to the mm_cpumask write.
-			 */
-			load_cr3(next->pgd);
-			trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
-			load_mm_cr4(next);
-			load_mm_ldt(next);
-		}
-	}
-#endif
-}
+extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
+			       struct task_struct *tsk);
+#define switch_mm_irqs_off switch_mm_irqs_off
 
 #define activate_mm(prev, next)			\
 do {						\
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 2da46ac..5a73a9c 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -89,27 +89,16 @@
 #define MSR_PEBS_LD_LAT_THRESHOLD	0x000003f6
 
 #define MSR_IA32_RTIT_CTL		0x00000570
-#define RTIT_CTL_TRACEEN		BIT(0)
-#define RTIT_CTL_CYCLEACC		BIT(1)
-#define RTIT_CTL_OS			BIT(2)
-#define RTIT_CTL_USR			BIT(3)
-#define RTIT_CTL_CR3EN			BIT(7)
-#define RTIT_CTL_TOPA			BIT(8)
-#define RTIT_CTL_MTC_EN			BIT(9)
-#define RTIT_CTL_TSC_EN			BIT(10)
-#define RTIT_CTL_DISRETC		BIT(11)
-#define RTIT_CTL_BRANCH_EN		BIT(13)
-#define RTIT_CTL_MTC_RANGE_OFFSET	14
-#define RTIT_CTL_MTC_RANGE		(0x0full << RTIT_CTL_MTC_RANGE_OFFSET)
-#define RTIT_CTL_CYC_THRESH_OFFSET	19
-#define RTIT_CTL_CYC_THRESH		(0x0full << RTIT_CTL_CYC_THRESH_OFFSET)
-#define RTIT_CTL_PSB_FREQ_OFFSET	24
-#define RTIT_CTL_PSB_FREQ      		(0x0full << RTIT_CTL_PSB_FREQ_OFFSET)
 #define MSR_IA32_RTIT_STATUS		0x00000571
-#define RTIT_STATUS_CONTEXTEN		BIT(1)
-#define RTIT_STATUS_TRIGGEREN		BIT(2)
-#define RTIT_STATUS_ERROR		BIT(4)
-#define RTIT_STATUS_STOPPED		BIT(5)
+#define MSR_IA32_RTIT_STATUS		0x00000571
+#define MSR_IA32_RTIT_ADDR0_A		0x00000580
+#define MSR_IA32_RTIT_ADDR0_B		0x00000581
+#define MSR_IA32_RTIT_ADDR1_A		0x00000582
+#define MSR_IA32_RTIT_ADDR1_B		0x00000583
+#define MSR_IA32_RTIT_ADDR2_A		0x00000584
+#define MSR_IA32_RTIT_ADDR2_B		0x00000585
+#define MSR_IA32_RTIT_ADDR3_A		0x00000586
+#define MSR_IA32_RTIT_ADDR3_B		0x00000587
 #define MSR_IA32_RTIT_CR3_MATCH		0x00000572
 #define MSR_IA32_RTIT_OUTPUT_BASE	0x00000560
 #define MSR_IA32_RTIT_OUTPUT_MASK	0x00000561
@@ -167,6 +156,14 @@
 #define MSR_PKG_C9_RESIDENCY		0x00000631
 #define MSR_PKG_C10_RESIDENCY		0x00000632
 
+/* Interrupt Response Limit */
+#define MSR_PKGC3_IRTL			0x0000060a
+#define MSR_PKGC6_IRTL			0x0000060b
+#define MSR_PKGC7_IRTL			0x0000060c
+#define MSR_PKGC8_IRTL			0x00000633
+#define MSR_PKGC9_IRTL			0x00000634
+#define MSR_PKGC10_IRTL			0x00000635
+
 /* Run Time Average Power Limiting (RAPL) Interface */
 
 #define MSR_RAPL_POWER_UNIT		0x00000606
@@ -190,12 +187,15 @@
 #define MSR_PP1_ENERGY_STATUS		0x00000641
 #define MSR_PP1_POLICY			0x00000642
 
+/* Config TDP MSRs */
 #define MSR_CONFIG_TDP_NOMINAL		0x00000648
 #define MSR_CONFIG_TDP_LEVEL_1		0x00000649
 #define MSR_CONFIG_TDP_LEVEL_2		0x0000064A
 #define MSR_CONFIG_TDP_CONTROL		0x0000064B
 #define MSR_TURBO_ACTIVATION_RATIO	0x0000064C
 
+#define MSR_PLATFORM_ENERGY_STATUS	0x0000064D
+
 #define MSR_PKG_WEIGHTED_CORE_C0_RES	0x00000658
 #define MSR_PKG_ANY_CORE_C0_RES		0x00000659
 #define MSR_PKG_ANY_GFXE_C0_RES		0x0000065A
@@ -210,13 +210,6 @@
 #define MSR_GFX_PERF_LIMIT_REASONS	0x000006B0
 #define MSR_RING_PERF_LIMIT_REASONS	0x000006B1
 
-/* Config TDP MSRs */
-#define MSR_CONFIG_TDP_NOMINAL		0x00000648
-#define MSR_CONFIG_TDP_LEVEL1		0x00000649
-#define MSR_CONFIG_TDP_LEVEL2		0x0000064A
-#define MSR_CONFIG_TDP_CONTROL		0x0000064B
-#define MSR_TURBO_ACTIVATION_RATIO	0x0000064C
-
 /* Hardware P state interface */
 #define MSR_PPERF			0x0000064e
 #define MSR_PERF_LIMIT_REASONS		0x0000064f
@@ -313,6 +306,9 @@
 #define MSR_AMD64_IBSOPDATA4		0xc001103d
 #define MSR_AMD64_IBS_REG_COUNT_MAX	8 /* includes MSR_AMD64_IBSBRTARGET */
 
+/* Fam 17h MSRs */
+#define MSR_F17H_IRPERF			0xc00000e9
+
 /* Fam 16h MSRs */
 #define MSR_F16H_L2I_PERF_CTL		0xc0010230
 #define MSR_F16H_L2I_PERF_CTR		0xc0010231
@@ -326,6 +322,7 @@
 #define MSR_F15H_PERF_CTR		0xc0010201
 #define MSR_F15H_NB_PERF_CTL		0xc0010240
 #define MSR_F15H_NB_PERF_CTR		0xc0010241
+#define MSR_F15H_PTSC			0xc0010280
 #define MSR_F15H_IC_CFG			0xc0011021
 
 /* Fam 10h MSRs */
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 7a79ee2..7dc1d8f 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -84,7 +84,10 @@
 {
 	DECLARE_ARGS(val, low, high);
 
-	asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
+	asm volatile("1: rdmsr\n"
+		     "2:\n"
+		     _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_unsafe)
+		     : EAX_EDX_RET(val, low, high) : "c" (msr));
 	if (msr_tracepoint_active(__tracepoint_read_msr))
 		do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), 0);
 	return EAX_EDX_VAL(val, low, high);
@@ -98,7 +101,10 @@
 	asm volatile("2: rdmsr ; xor %[err],%[err]\n"
 		     "1:\n\t"
 		     ".section .fixup,\"ax\"\n\t"
-		     "3:  mov %[fault],%[err] ; jmp 1b\n\t"
+		     "3: mov %[fault],%[err]\n\t"
+		     "xorl %%eax, %%eax\n\t"
+		     "xorl %%edx, %%edx\n\t"
+		     "jmp 1b\n\t"
 		     ".previous\n\t"
 		     _ASM_EXTABLE(2b, 3b)
 		     : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
@@ -108,10 +114,14 @@
 	return EAX_EDX_VAL(val, low, high);
 }
 
-static inline void native_write_msr(unsigned int msr,
-				    unsigned low, unsigned high)
+/* Can be uninlined because referenced by paravirt */
+notrace static inline void native_write_msr(unsigned int msr,
+					    unsigned low, unsigned high)
 {
-	asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
+	asm volatile("1: wrmsr\n"
+		     "2:\n"
+		     _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
+		     : : "c" (msr), "a"(low), "d" (high) : "memory");
 	if (msr_tracepoint_active(__tracepoint_read_msr))
 		do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
 }
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index b94f6f6..dbff145 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -24,6 +24,7 @@
 #define _ASM_X86_MTRR_H
 
 #include <uapi/asm/mtrr.h>
+#include <asm/pat.h>
 
 
 /*
@@ -83,9 +84,12 @@
 static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
 {
 }
+static inline void mtrr_bp_init(void)
+{
+	pat_disable("MTRRs disabled, skipping PAT initialization too.");
+}
 
 #define mtrr_ap_init() do {} while (0)
-#define mtrr_bp_init() do {} while (0)
 #define set_mtrr_aps_delayed_init() do {} while (0)
 #define mtrr_aps_init() do {} while (0)
 #define mtrr_bp_restore() do {} while (0)
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index 802dde3..cf8f619 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -37,7 +37,10 @@
 	alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
 
+#ifndef __pa
 #define __pa(x)		__phys_addr((unsigned long)(x))
+#endif
+
 #define __pa_nodebug(x)	__phys_addr_nodebug((unsigned long)(x))
 /* __pa_symbol should be used for C visible symbols.
    This seems to be the official gcc blessed way to do such arithmetic. */
@@ -51,7 +54,9 @@
 #define __pa_symbol(x) \
 	__phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
 
+#ifndef __va
 #define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
+#endif
 
 #define __boot_va(x)		__va(x)
 #define __boot_pa(x)		__pa(x)
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 4928cf0..d5c2f8b 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -47,12 +47,10 @@
  * are fully set up. If kernel ASLR is configured, it can extend the
  * kernel page table mapping, reducing the size of the modules area.
  */
-#define KERNEL_IMAGE_SIZE_DEFAULT      (512 * 1024 * 1024)
-#if defined(CONFIG_RANDOMIZE_BASE) && \
-	CONFIG_RANDOMIZE_BASE_MAX_OFFSET > KERNEL_IMAGE_SIZE_DEFAULT
-#define KERNEL_IMAGE_SIZE   CONFIG_RANDOMIZE_BASE_MAX_OFFSET
+#if defined(CONFIG_RANDOMIZE_BASE)
+#define KERNEL_IMAGE_SIZE	(1024 * 1024 * 1024)
 #else
-#define KERNEL_IMAGE_SIZE      KERNEL_IMAGE_SIZE_DEFAULT
+#define KERNEL_IMAGE_SIZE	(512 * 1024 * 1024)
 #endif
 
 #endif /* _ASM_X86_PAGE_64_DEFS_H */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 601f1b8..2970d22 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -15,17 +15,6 @@
 #include <linux/cpumask.h>
 #include <asm/frame.h>
 
-static inline int paravirt_enabled(void)
-{
-	return pv_info.paravirt_enabled;
-}
-
-static inline int paravirt_has_feature(unsigned int feature)
-{
-	WARN_ON_ONCE(!pv_info.paravirt_enabled);
-	return (pv_info.features & feature);
-}
-
 static inline void load_sp0(struct tss_struct *tss,
 			     struct thread_struct *thread)
 {
@@ -130,21 +119,31 @@
 
 #define get_kernel_rpl()  (pv_info.kernel_rpl)
 
-static inline u64 paravirt_read_msr(unsigned msr, int *err)
+static inline u64 paravirt_read_msr(unsigned msr)
 {
-	return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
+	return PVOP_CALL1(u64, pv_cpu_ops.read_msr, msr);
 }
 
-static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
+static inline void paravirt_write_msr(unsigned msr,
+				      unsigned low, unsigned high)
 {
-	return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
+	return PVOP_VCALL3(pv_cpu_ops.write_msr, msr, low, high);
 }
 
-/* These should all do BUG_ON(_err), but our headers are too tangled. */
+static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
+{
+	return PVOP_CALL2(u64, pv_cpu_ops.read_msr_safe, msr, err);
+}
+
+static inline int paravirt_write_msr_safe(unsigned msr,
+					  unsigned low, unsigned high)
+{
+	return PVOP_CALL3(int, pv_cpu_ops.write_msr_safe, msr, low, high);
+}
+
 #define rdmsr(msr, val1, val2)			\
 do {						\
-	int _err;				\
-	u64 _l = paravirt_read_msr(msr, &_err);	\
+	u64 _l = paravirt_read_msr(msr);	\
 	val1 = (u32)_l;				\
 	val2 = _l >> 32;			\
 } while (0)
@@ -156,8 +155,7 @@
 
 #define rdmsrl(msr, val)			\
 do {						\
-	int _err;				\
-	val = paravirt_read_msr(msr, &_err);	\
+	val = paravirt_read_msr(msr);		\
 } while (0)
 
 static inline void wrmsrl(unsigned msr, u64 val)
@@ -165,23 +163,23 @@
 	wrmsr(msr, (u32)val, (u32)(val>>32));
 }
 
-#define wrmsr_safe(msr, a, b)	paravirt_write_msr(msr, a, b)
+#define wrmsr_safe(msr, a, b)	paravirt_write_msr_safe(msr, a, b)
 
 /* rdmsr with exception handling */
-#define rdmsr_safe(msr, a, b)			\
-({						\
-	int _err;				\
-	u64 _l = paravirt_read_msr(msr, &_err);	\
-	(*a) = (u32)_l;				\
-	(*b) = _l >> 32;			\
-	_err;					\
+#define rdmsr_safe(msr, a, b)				\
+({							\
+	int _err;					\
+	u64 _l = paravirt_read_msr_safe(msr, &_err);	\
+	(*a) = (u32)_l;					\
+	(*b) = _l >> 32;				\
+	_err;						\
 })
 
 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
 {
 	int err;
 
-	*p = paravirt_read_msr(msr, &err);
+	*p = paravirt_read_msr_safe(msr, &err);
 	return err;
 }
 
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index e8c2326..7fa9e77 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -69,15 +69,9 @@
 	u16 extra_user_64bit_cs;  /* __USER_CS if none */
 #endif
 
-	int paravirt_enabled;
-	unsigned int features;	  /* valid only if paravirt_enabled is set */
 	const char *name;
 };
 
-#define paravirt_has(x) paravirt_has_feature(PV_SUPPORTED_##x)
-/* Supported features */
-#define PV_SUPPORTED_RTC        (1<<0)
-
 struct pv_init_ops {
 	/*
 	 * Patch may replace one of the defined code sequences with
@@ -155,10 +149,16 @@
 	void (*cpuid)(unsigned int *eax, unsigned int *ebx,
 		      unsigned int *ecx, unsigned int *edx);
 
-	/* MSR, PMC and TSR operations.
-	   err = 0/-EFAULT.  wrmsr returns 0/-EFAULT. */
-	u64 (*read_msr)(unsigned int msr, int *err);
-	int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
+	/* Unsafe MSR operations.  These will warn or panic on failure. */
+	u64 (*read_msr)(unsigned int msr);
+	void (*write_msr)(unsigned int msr, unsigned low, unsigned high);
+
+	/*
+	 * Safe MSR operations.
+	 * read sets err to 0 or -EIO.  write returns 0 or -EIO.
+	 */
+	u64 (*read_msr_safe)(unsigned int msr, int *err);
+	int (*write_msr_safe)(unsigned int msr, unsigned low, unsigned high);
 
 	u64 (*read_pmc)(int counter);
 
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index ca6c228..0b1ff4c 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -5,8 +5,8 @@
 #include <asm/pgtable_types.h>
 
 bool pat_enabled(void);
+void pat_disable(const char *reason);
 extern void pat_init(void);
-void pat_init_cache_modes(u64);
 
 extern int reserve_memtype(u64 start, u64 end,
 		enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 5a2ed3e..f353061 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -285,6 +285,10 @@
 static inline void perf_check_microcode(void) { }
 #endif
 
+#ifdef CONFIG_CPU_SUP_INTEL
+ extern void intel_pt_handle_vmx(int on);
+#endif
+
 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
  extern void amd_pmu_enable_virt(void);
  extern void amd_pmu_disable_virt(void);
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 97f3242..f86491a 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -183,7 +183,7 @@
 
 static inline int has_transparent_hugepage(void)
 {
-	return cpu_has_pse;
+	return boot_cpu_has(X86_FEATURE_PSE);
 }
 
 #ifdef __HAVE_ARCH_PTE_DEVMAP
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index bf8b35d..fbc5e92 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -47,6 +47,15 @@
 		BUG();
 }
 
+static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src,
+		size_t n)
+{
+	if (static_cpu_has(X86_FEATURE_MCE_RECOVERY))
+		return memcpy_mcsafe(dst, (void __force *) src, n);
+	memcpy(dst, (void __force *) src, n);
+	return 0;
+}
+
 /**
  * arch_wmb_pmem - synchronize writes to persistent memory
  *
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 983738a..62c6cc3 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -132,8 +132,6 @@
 	u16			logical_proc_id;
 	/* Core id: */
 	u16			cpu_core_id;
-	/* Compute unit id */
-	u8			compute_unit_id;
 	/* Index into per_cpu list: */
 	u16			cpu_index;
 	u32			microcode;
@@ -390,9 +388,16 @@
 	unsigned long		ip;
 #endif
 #ifdef CONFIG_X86_64
-	unsigned long		fs;
+	unsigned long		fsbase;
+	unsigned long		gsbase;
+#else
+	/*
+	 * XXX: this could presumably be unsigned short.  Alternatively,
+	 * 32-bit kernels could be taught to use fsindex instead.
+	 */
+	unsigned long fs;
+	unsigned long gs;
 #endif
-	unsigned long		gs;
 
 	/* Save middle states of ptrace breakpoints */
 	struct perf_event	*ptrace_bps[HBP_NUM];
@@ -475,8 +480,6 @@
 #include <asm/paravirt.h>
 #else
 #define __cpuid			native_cpuid
-#define paravirt_enabled()	0
-#define paravirt_has(x) 	0
 
 static inline void load_sp0(struct tss_struct *tss,
 			    struct thread_struct *thread)
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index ceec86eb..453744c 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -99,26 +99,36 @@
 /*
  * lock for writing
  */
-static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
-{
-	long tmp;
-	asm volatile("# beginning down_write\n\t"
-		     LOCK_PREFIX "  xadd      %1,(%2)\n\t"
-		     /* adds 0xffff0001, returns the old value */
-		     "  test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
-		     /* was the active mask 0 before? */
-		     "  jz        1f\n"
-		     "  call call_rwsem_down_write_failed\n"
-		     "1:\n"
-		     "# ending down_write"
-		     : "+m" (sem->count), "=d" (tmp)
-		     : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS)
-		     : "memory", "cc");
-}
+#define ____down_write(sem, slow_path)			\
+({							\
+	long tmp;					\
+	struct rw_semaphore* ret;			\
+	asm volatile("# beginning down_write\n\t"	\
+		     LOCK_PREFIX "  xadd      %1,(%3)\n\t"	\
+		     /* adds 0xffff0001, returns the old value */ \
+		     "  test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
+		     /* was the active mask 0 before? */\
+		     "  jz        1f\n"			\
+		     "  call " slow_path "\n"		\
+		     "1:\n"				\
+		     "# ending down_write"		\
+		     : "+m" (sem->count), "=d" (tmp), "=a" (ret)	\
+		     : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \
+		     : "memory", "cc");			\
+	ret;						\
+})
 
 static inline void __down_write(struct rw_semaphore *sem)
 {
-	__down_write_nested(sem, 0);
+	____down_write(sem, "call_rwsem_down_write_failed");
+}
+
+static inline int __down_write_killable(struct rw_semaphore *sem)
+{
+	if (IS_ERR(____down_write(sem, "call_rwsem_down_write_failed_killable")))
+		return -EINTR;
+
+	return 0;
 }
 
 /*
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index 7d5a192..1549caa0 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -2,6 +2,7 @@
 #define _ASM_X86_SEGMENT_H
 
 #include <linux/const.h>
+#include <asm/alternative.h>
 
 /*
  * Constructor for a conventional segment GDT (or LDT) entry.
@@ -207,13 +208,6 @@
 #define __USER_CS			(GDT_ENTRY_DEFAULT_USER_CS*8 + 3)
 #define __PER_CPU_SEG			(GDT_ENTRY_PER_CPU*8 + 3)
 
-/* TLS indexes for 64-bit - hardcoded in arch_prctl(): */
-#define FS_TLS				0
-#define GS_TLS				1
-
-#define GS_TLS_SEL			((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
-#define FS_TLS_SEL			((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
-
 #endif
 
 #ifndef CONFIG_PARAVIRT
@@ -249,10 +243,13 @@
 #endif
 
 /*
- * Load a segment. Fall back on loading the zero
- * segment if something goes wrong..
+ * Load a segment. Fall back on loading the zero segment if something goes
+ * wrong.  This variant assumes that loading zero fully clears the segment.
+ * This is always the case on Intel CPUs and, even on 64-bit AMD CPUs, any
+ * failure to fully clear the cached descriptor is only observable for
+ * FS and GS.
  */
-#define loadsegment(seg, value)						\
+#define __loadsegment_simple(seg, value)				\
 do {									\
 	unsigned short __val = (value);					\
 									\
@@ -269,6 +266,38 @@
 		     : "+r" (__val) : : "memory");			\
 } while (0)
 
+#define __loadsegment_ss(value) __loadsegment_simple(ss, (value))
+#define __loadsegment_ds(value) __loadsegment_simple(ds, (value))
+#define __loadsegment_es(value) __loadsegment_simple(es, (value))
+
+#ifdef CONFIG_X86_32
+
+/*
+ * On 32-bit systems, the hidden parts of FS and GS are unobservable if
+ * the selector is NULL, so there's no funny business here.
+ */
+#define __loadsegment_fs(value) __loadsegment_simple(fs, (value))
+#define __loadsegment_gs(value) __loadsegment_simple(gs, (value))
+
+#else
+
+static inline void __loadsegment_fs(unsigned short value)
+{
+	asm volatile("						\n"
+		     "1:	movw %0, %%fs			\n"
+		     "2:					\n"
+
+		     _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_clear_fs)
+
+		     : : "rm" (value) : "memory");
+}
+
+/* __loadsegment_gs is intentionally undefined.  Use load_gs_index instead. */
+
+#endif
+
+#define loadsegment(seg, value) __loadsegment_ ## seg (value)
+
 /*
  * Save a segment register away:
  */
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 11af24e..ac1d5da 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -6,6 +6,7 @@
 #define COMMAND_LINE_SIZE 2048
 
 #include <linux/linkage.h>
+#include <asm/page_types.h>
 
 #ifdef __i386__
 
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 20a3de5..66b0573 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -155,6 +155,7 @@
 	wbinvd();
 	return 0;
 }
+#define smp_num_siblings	1
 #endif /* CONFIG_SMP */
 
 extern unsigned disabled_cpus;
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 6136d99..d0fe23e 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -78,7 +78,8 @@
 	u32 exit_int_info;
 	u32 exit_int_info_err;
 	u64 nested_ctl;
-	u8 reserved_4[16];
+	u64 avic_vapic_bar;
+	u8 reserved_4[8];
 	u32 event_inj;
 	u32 event_inj_err;
 	u64 nested_cr3;
@@ -88,7 +89,11 @@
 	u64 next_rip;
 	u8 insn_len;
 	u8 insn_bytes[15];
-	u8 reserved_6[800];
+	u64 avic_backing_page;	/* Offset 0xe0 */
+	u8 reserved_6[8];	/* Offset 0xe8 */
+	u64 avic_logical_id;	/* Offset 0xf0 */
+	u64 avic_physical_id;	/* Offset 0xf8 */
+	u8 reserved_7[768];
 };
 
 
@@ -111,6 +116,9 @@
 #define V_INTR_MASKING_SHIFT 24
 #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
 
+#define AVIC_ENABLE_SHIFT 31
+#define AVIC_ENABLE_MASK (1 << AVIC_ENABLE_SHIFT)
+
 #define SVM_INTERRUPT_SHADOW_MASK 1
 
 #define SVM_IOIO_STR_SHIFT 2
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 751bf4b..8f321a1 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -39,8 +39,7 @@
 	 */								\
 	unsigned long ebx, ecx, edx, esi, edi;				\
 									\
-	asm volatile("pushfl\n\t"		/* save    flags */	\
-		     "pushl %%ebp\n\t"		/* save    EBP   */	\
+	asm volatile("pushl %%ebp\n\t"		/* save    EBP   */	\
 		     "movl %%esp,%[prev_sp]\n\t"	/* save    ESP   */ \
 		     "movl %[next_sp],%%esp\n\t"	/* restore ESP   */ \
 		     "movl $1f,%[prev_ip]\n\t"	/* save    EIP   */	\
@@ -49,7 +48,6 @@
 		     "jmp __switch_to\n"	/* regparm call  */	\
 		     "1:\t"						\
 		     "popl %%ebp\n\t"		/* restore EBP   */	\
-		     "popfl\n"			/* restore flags */	\
 									\
 		     /* output parameters */				\
 		     : [prev_sp] "=m" (prev->thread.sp),		\
diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
new file mode 100644
index 0000000..9039506
--- /dev/null
+++ b/arch/x86/include/asm/text-patching.h
@@ -0,0 +1,40 @@
+#ifndef _ASM_X86_TEXT_PATCHING_H
+#define _ASM_X86_TEXT_PATCHING_H
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <asm/ptrace.h>
+
+struct paravirt_patch_site;
+#ifdef CONFIG_PARAVIRT
+void apply_paravirt(struct paravirt_patch_site *start,
+		    struct paravirt_patch_site *end);
+#else
+static inline void apply_paravirt(struct paravirt_patch_site *start,
+				  struct paravirt_patch_site *end)
+{}
+#define __parainstructions	NULL
+#define __parainstructions_end	NULL
+#endif
+
+extern void *text_poke_early(void *addr, const void *opcode, size_t len);
+
+/*
+ * Clear and restore the kernel write-protection flag on the local CPU.
+ * Allows the kernel to edit read-only pages.
+ * Side-effect: any interrupt handler running between save and restore will have
+ * the ability to write to read-only pages.
+ *
+ * Warning:
+ * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and
+ * no thread can be preempted in the instructions being modified (no iret to an
+ * invalid instruction possible) or if the instructions are changed from a
+ * consistent state to another consistent state atomically.
+ * On the local CPU you need to be protected again NMI or MCE handlers seeing an
+ * inconsistent instruction while you patch.
+ */
+extern void *text_poke(void *addr, const void *opcode, size_t len);
+extern int poke_int3_handler(struct pt_regs *regs);
+extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
+
+#endif /* _ASM_X86_TEXT_PATCHING_H */
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 8286669..30c133a 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -255,7 +255,7 @@
 	return true;
 }
 
-static inline bool is_ia32_task(void)
+static inline bool in_ia32_syscall(void)
 {
 #ifdef CONFIG_X86_32
 	return true;
@@ -276,11 +276,9 @@
  */
 #define force_iret() set_thread_flag(TIF_NOTIFY_RESUME)
 
-#endif	/* !__ASSEMBLY__ */
-
-#ifndef __ASSEMBLY__
 extern void arch_task_cache_init(void);
 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
 extern void arch_release_task_struct(struct task_struct *tsk);
-#endif
+#endif	/* !__ASSEMBLY__ */
+
 #endif /* _ASM_X86_THREAD_INFO_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index c24b422..4e5be94 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -181,7 +181,7 @@
 
 static inline void __flush_tlb_all(void)
 {
-	if (cpu_has_pge)
+	if (static_cpu_has(X86_FEATURE_PGE))
 		__flush_tlb_global();
 	else
 		__flush_tlb();
@@ -319,12 +319,6 @@
 
 #endif	/* SMP */
 
-/* Not inlined due to inc_irq_stat not being defined yet */
-#define flush_tlb_local() {		\
-	inc_irq_stat(irq_tlb_count);	\
-	local_flush_tlb();		\
-}
-
 #ifndef CONFIG_PARAVIRT
 #define flush_tlb_others(mask, mm, start, end)	\
 	native_flush_tlb_others(mask, mm, start, end)
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 174c421..7428697 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -22,7 +22,7 @@
 static inline cycles_t get_cycles(void)
 {
 #ifndef CONFIG_X86_TSC
-	if (!cpu_has_tsc)
+	if (!boot_cpu_has(X86_FEATURE_TSC))
 		return 0;
 #endif
 
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index a969ae6..12f9653 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -108,9 +108,17 @@
 
 #define ARCH_HAS_RELATIVE_EXTABLE
 
+#define swap_ex_entry_fixup(a, b, tmp, delta)			\
+	do {							\
+		(a)->fixup = (b)->fixup + (delta);		\
+		(b)->fixup = (tmp).fixup - (delta);		\
+		(a)->handler = (b)->handler + (delta);		\
+		(b)->handler = (tmp).handler - (delta);		\
+	} while (0)
+
 extern int fixup_exception(struct pt_regs *regs, int trapnr);
 extern bool ex_has_fault_handler(unsigned long ip);
-extern int early_fixup_exception(unsigned long *ip);
+extern void early_fixup_exception(struct pt_regs *regs, int trapnr);
 
 /*
  * These are the main single-value transfer routines.  They automatically
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h
index 71605c7..c852590 100644
--- a/arch/x86/include/asm/uv/bios.h
+++ b/arch/x86/include/asm/uv/bios.h
@@ -51,15 +51,66 @@
 	BIOS_STATUS_UNAVAIL		= -EBUSY
 };
 
+/* Address map parameters */
+struct uv_gam_parameters {
+	u64	mmr_base;
+	u64	gru_base;
+	u8	mmr_shift;	/* Convert PNode to MMR space offset */
+	u8	gru_shift;	/* Convert PNode to GRU space offset */
+	u8	gpa_shift;	/* Size of offset field in GRU phys addr */
+	u8	unused1;
+};
+
+/* UV_TABLE_GAM_RANGE_ENTRY values */
+#define UV_GAM_RANGE_TYPE_UNUSED	0 /* End of table */
+#define UV_GAM_RANGE_TYPE_RAM		1 /* Normal RAM */
+#define UV_GAM_RANGE_TYPE_NVRAM		2 /* Non-volatile memory */
+#define UV_GAM_RANGE_TYPE_NV_WINDOW	3 /* NVMDIMM block window */
+#define UV_GAM_RANGE_TYPE_NV_MAILBOX	4 /* NVMDIMM mailbox */
+#define UV_GAM_RANGE_TYPE_HOLE		5 /* Unused address range */
+#define UV_GAM_RANGE_TYPE_MAX		6
+
+/* The structure stores PA bits 56:26, for 64MB granularity */
+#define UV_GAM_RANGE_SHFT		26		/* 64MB */
+
+struct uv_gam_range_entry {
+	char	type;		/* Entry type: GAM_RANGE_TYPE_UNUSED, etc. */
+	char	unused1;
+	u16	nasid;		/* HNasid */
+	u16	sockid;		/* Socket ID, high bits of APIC ID */
+	u16	pnode;		/* Index to MMR and GRU spaces */
+	u32	pxm;		/* ACPI proximity domain number */
+	u32	limit;		/* PA bits 56:26 (UV_GAM_RANGE_SHFT) */
+};
+
+#define	UV_SYSTAB_SIG			"UVST"
+#define	UV_SYSTAB_VERSION_1		1	/* UV1/2/3 BIOS version */
+#define	UV_SYSTAB_VERSION_UV4		0x400	/* UV4 BIOS base version */
+#define	UV_SYSTAB_VERSION_UV4_1		0x401	/* + gpa_shift */
+#define	UV_SYSTAB_VERSION_UV4_2		0x402	/* + TYPE_NVRAM/WINDOW/MBOX */
+#define	UV_SYSTAB_VERSION_UV4_LATEST	UV_SYSTAB_VERSION_UV4_2
+
+#define	UV_SYSTAB_TYPE_UNUSED		0	/* End of table (offset == 0) */
+#define	UV_SYSTAB_TYPE_GAM_PARAMS	1	/* GAM PARAM conversions */
+#define	UV_SYSTAB_TYPE_GAM_RNG_TBL	2	/* GAM entry table */
+#define	UV_SYSTAB_TYPE_MAX		3
+
 /*
  * The UV system table describes specific firmware
  * capabilities available to the Linux kernel at runtime.
  */
 struct uv_systab {
-	char signature[4];	/* must be "UVST" */
+	char signature[4];	/* must be UV_SYSTAB_SIG */
 	u32 revision;		/* distinguish different firmware revs */
 	u64 function;		/* BIOS runtime callback function ptr */
+	u32 size;		/* systab size (starting with _VERSION_UV4) */
+	struct {
+		u32 type:8;	/* type of entry */
+		u32 offset:24;	/* byte offset from struct start to entry */
+	} entry[1];		/* additional entries follow */
 };
+extern struct uv_systab *uv_systab;
+/* (... end of definitions from UV BIOS ...) */
 
 enum {
 	BIOS_FREQ_BASE_PLATFORM = 0,
@@ -99,7 +150,11 @@
 extern s64 uv_bios_reserved_page_pa(u64, u64 *, u64 *, u64 *);
 extern int uv_bios_set_legacy_vga_target(bool decode, int domain, int bus);
 
+#ifdef CONFIG_EFI
 extern void uv_bios_init(void);
+#else
+void uv_bios_init(void) { }
+#endif
 
 extern unsigned long sn_rtc_cycles_per_second;
 extern int uv_type;
@@ -107,7 +162,7 @@
 extern long sn_coherency_id;
 extern long sn_region_size;
 extern long system_serial_number;
-#define partition_coherence_id()	(sn_coherency_id)
+#define uv_partition_coherence_id()	(sn_coherency_id)
 
 extern struct kobject *sgi_uv_kobj;	/* /sys/firmware/sgi_uv */
 
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index fc808b8..cc44d92 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -598,7 +598,7 @@
 	int			timeout_tries;
 	int			ipi_attempts;
 	int			conseccompletes;
-	short			nobau;
+	bool			nobau;
 	short			baudisabled;
 	short			cpu;
 	short			osnode;
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index ea707478..097b80c 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -16,9 +16,11 @@
 #include <linux/percpu.h>
 #include <linux/timer.h>
 #include <linux/io.h>
+#include <linux/topology.h>
 #include <asm/types.h>
 #include <asm/percpu.h>
 #include <asm/uv/uv_mmrs.h>
+#include <asm/uv/bios.h>
 #include <asm/irq_vectors.h>
 #include <asm/io_apic.h>
 
@@ -103,7 +105,6 @@
  *	      processor APICID register.
  */
 
-
 /*
  * Maximum number of bricks in all partitions and in all coherency domains.
  * This is the total number of bricks accessible in the numalink fabric. It
@@ -127,6 +128,7 @@
  */
 #define UV_MAX_NASID_VALUE	(UV_MAX_NUMALINK_BLADES * 2)
 
+/* System Controller Interface Reg info */
 struct uv_scir_s {
 	struct timer_list timer;
 	unsigned long	offset;
@@ -137,71 +139,173 @@
 	unsigned char	enabled;
 };
 
+/* GAM (globally addressed memory) range table */
+struct uv_gam_range_s {
+	u32	limit;		/* PA bits 56:26 (GAM_RANGE_SHFT) */
+	u16	nasid;		/* node's global physical address */
+	s8	base;		/* entry index of node's base addr */
+	u8	reserved;
+};
+
 /*
  * The following defines attributes of the HUB chip. These attributes are
- * frequently referenced and are kept in the per-cpu data areas of each cpu.
- * They are kept together in a struct to minimize cache misses.
+ * frequently referenced and are kept in a common per hub struct.
+ * After setup, the struct is read only, so it should be readily
+ * available in the L3 cache on the cpu socket for the node.
  */
 struct uv_hub_info_s {
 	unsigned long		global_mmr_base;
+	unsigned long		global_mmr_shift;
 	unsigned long		gpa_mask;
-	unsigned int		gnode_extra;
+	unsigned short		*socket_to_node;
+	unsigned short		*socket_to_pnode;
+	unsigned short		*pnode_to_socket;
+	struct uv_gam_range_s	*gr_table;
+	unsigned short		min_socket;
+	unsigned short		min_pnode;
+	unsigned char		m_val;
+	unsigned char		n_val;
+	unsigned char		gr_table_len;
 	unsigned char		hub_revision;
 	unsigned char		apic_pnode_shift;
+	unsigned char		gpa_shift;
 	unsigned char		m_shift;
 	unsigned char		n_lshift;
+	unsigned int		gnode_extra;
 	unsigned long		gnode_upper;
 	unsigned long		lowmem_remap_top;
 	unsigned long		lowmem_remap_base;
+	unsigned long		global_gru_base;
+	unsigned long		global_gru_shift;
 	unsigned short		pnode;
 	unsigned short		pnode_mask;
 	unsigned short		coherency_domain_number;
 	unsigned short		numa_blade_id;
-	unsigned char		blade_processor_id;
-	unsigned char		m_val;
-	unsigned char		n_val;
-	struct uv_scir_s	scir;
+	unsigned short		nr_possible_cpus;
+	unsigned short		nr_online_cpus;
+	short			memory_nid;
 };
 
-DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
-#define uv_hub_info		this_cpu_ptr(&__uv_hub_info)
-#define uv_cpu_hub_info(cpu)	(&per_cpu(__uv_hub_info, cpu))
+/* CPU specific info with a pointer to the hub common info struct */
+struct uv_cpu_info_s {
+	void			*p_uv_hub_info;
+	unsigned char		blade_cpu_id;
+	struct uv_scir_s	scir;
+};
+DECLARE_PER_CPU(struct uv_cpu_info_s, __uv_cpu_info);
+
+#define uv_cpu_info		this_cpu_ptr(&__uv_cpu_info)
+#define uv_cpu_info_per(cpu)	(&per_cpu(__uv_cpu_info, cpu))
+
+#define	uv_scir_info		(&uv_cpu_info->scir)
+#define	uv_cpu_scir_info(cpu)	(&uv_cpu_info_per(cpu)->scir)
+
+/* Node specific hub common info struct */
+extern void **__uv_hub_info_list;
+static inline struct uv_hub_info_s *uv_hub_info_list(int node)
+{
+	return (struct uv_hub_info_s *)__uv_hub_info_list[node];
+}
+
+static inline struct uv_hub_info_s *_uv_hub_info(void)
+{
+	return (struct uv_hub_info_s *)uv_cpu_info->p_uv_hub_info;
+}
+#define	uv_hub_info	_uv_hub_info()
+
+static inline struct uv_hub_info_s *uv_cpu_hub_info(int cpu)
+{
+	return (struct uv_hub_info_s *)uv_cpu_info_per(cpu)->p_uv_hub_info;
+}
+
+#define	UV_HUB_INFO_VERSION	0x7150
+extern int uv_hub_info_version(void);
+static inline int uv_hub_info_check(int version)
+{
+	if (uv_hub_info_version() == version)
+		return 0;
+
+	pr_crit("UV: uv_hub_info version(%x) mismatch, expecting(%x)\n",
+		uv_hub_info_version(), version);
+
+	BUG();	/* Catastrophic - cannot continue on unknown UV system */
+}
+#define	_uv_hub_info_check()	uv_hub_info_check(UV_HUB_INFO_VERSION)
 
 /*
- * Hub revisions less than UV2_HUB_REVISION_BASE are UV1 hubs. All UV2
- * hubs have revision numbers greater than or equal to UV2_HUB_REVISION_BASE.
+ * HUB revision ranges for each UV HUB architecture.
  * This is a software convention - NOT the hardware revision numbers in
  * the hub chip.
  */
 #define UV1_HUB_REVISION_BASE		1
 #define UV2_HUB_REVISION_BASE		3
 #define UV3_HUB_REVISION_BASE		5
+#define UV4_HUB_REVISION_BASE		7
 
+#ifdef	UV1_HUB_IS_SUPPORTED
 static inline int is_uv1_hub(void)
 {
 	return uv_hub_info->hub_revision < UV2_HUB_REVISION_BASE;
 }
+#else
+static inline int is_uv1_hub(void)
+{
+	return 0;
+}
+#endif
 
+#ifdef	UV2_HUB_IS_SUPPORTED
 static inline int is_uv2_hub(void)
 {
 	return ((uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE) &&
 		(uv_hub_info->hub_revision < UV3_HUB_REVISION_BASE));
 }
+#else
+static inline int is_uv2_hub(void)
+{
+	return 0;
+}
+#endif
 
+#ifdef	UV3_HUB_IS_SUPPORTED
 static inline int is_uv3_hub(void)
 {
-	return uv_hub_info->hub_revision >= UV3_HUB_REVISION_BASE;
+	return ((uv_hub_info->hub_revision >= UV3_HUB_REVISION_BASE) &&
+		(uv_hub_info->hub_revision < UV4_HUB_REVISION_BASE));
+}
+#else
+static inline int is_uv3_hub(void)
+{
+	return 0;
+}
+#endif
+
+#ifdef	UV4_HUB_IS_SUPPORTED
+static inline int is_uv4_hub(void)
+{
+	return uv_hub_info->hub_revision >= UV4_HUB_REVISION_BASE;
+}
+#else
+static inline int is_uv4_hub(void)
+{
+	return 0;
+}
+#endif
+
+static inline int is_uvx_hub(void)
+{
+	if (uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE)
+		return uv_hub_info->hub_revision;
+
+	return 0;
 }
 
 static inline int is_uv_hub(void)
 {
+#ifdef	UV1_HUB_IS_SUPPORTED
 	return uv_hub_info->hub_revision;
-}
-
-/* code common to uv2 and uv3 only */
-static inline int is_uvx_hub(void)
-{
-	return uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE;
+#endif
+	return is_uvx_hub();
 }
 
 union uvh_apicid {
@@ -243,24 +347,42 @@
 #define UV3_LOCAL_MMR_SIZE		(32UL * 1024 * 1024)
 #define UV3_GLOBAL_MMR32_SIZE		(32UL * 1024 * 1024)
 
-#define UV_LOCAL_MMR_BASE		(is_uv1_hub() ? UV1_LOCAL_MMR_BASE : \
-					(is_uv2_hub() ? UV2_LOCAL_MMR_BASE : \
-							UV3_LOCAL_MMR_BASE))
-#define UV_GLOBAL_MMR32_BASE		(is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE :\
-					(is_uv2_hub() ? UV2_GLOBAL_MMR32_BASE :\
-							UV3_GLOBAL_MMR32_BASE))
-#define UV_LOCAL_MMR_SIZE		(is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \
-					(is_uv2_hub() ? UV2_LOCAL_MMR_SIZE : \
-							UV3_LOCAL_MMR_SIZE))
-#define UV_GLOBAL_MMR32_SIZE		(is_uv1_hub() ? UV1_GLOBAL_MMR32_SIZE :\
-					(is_uv2_hub() ? UV2_GLOBAL_MMR32_SIZE :\
-							UV3_GLOBAL_MMR32_SIZE))
+#define UV4_LOCAL_MMR_BASE		0xfa000000UL
+#define UV4_GLOBAL_MMR32_BASE		0xfc000000UL
+#define UV4_LOCAL_MMR_SIZE		(32UL * 1024 * 1024)
+#define UV4_GLOBAL_MMR32_SIZE		(16UL * 1024 * 1024)
+
+#define UV_LOCAL_MMR_BASE		(				\
+					is_uv1_hub() ? UV1_LOCAL_MMR_BASE : \
+					is_uv2_hub() ? UV2_LOCAL_MMR_BASE : \
+					is_uv3_hub() ? UV3_LOCAL_MMR_BASE : \
+					/*is_uv4_hub*/ UV4_LOCAL_MMR_BASE)
+
+#define UV_GLOBAL_MMR32_BASE		(				\
+					is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE : \
+					is_uv2_hub() ? UV2_GLOBAL_MMR32_BASE : \
+					is_uv3_hub() ? UV3_GLOBAL_MMR32_BASE : \
+					/*is_uv4_hub*/ UV4_GLOBAL_MMR32_BASE)
+
+#define UV_LOCAL_MMR_SIZE		(				\
+					is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \
+					is_uv2_hub() ? UV2_LOCAL_MMR_SIZE : \
+					is_uv3_hub() ? UV3_LOCAL_MMR_SIZE : \
+					/*is_uv4_hub*/ UV4_LOCAL_MMR_SIZE)
+
+#define UV_GLOBAL_MMR32_SIZE		(				\
+					is_uv1_hub() ? UV1_GLOBAL_MMR32_SIZE : \
+					is_uv2_hub() ? UV2_GLOBAL_MMR32_SIZE : \
+					is_uv3_hub() ? UV3_GLOBAL_MMR32_SIZE : \
+					/*is_uv4_hub*/ UV4_GLOBAL_MMR32_SIZE)
+
 #define UV_GLOBAL_MMR64_BASE		(uv_hub_info->global_mmr_base)
 
 #define UV_GLOBAL_GRU_MMR_BASE		0x4000000
 
 #define UV_GLOBAL_MMR32_PNODE_SHIFT	15
-#define UV_GLOBAL_MMR64_PNODE_SHIFT	26
+#define _UV_GLOBAL_MMR64_PNODE_SHIFT	26
+#define UV_GLOBAL_MMR64_PNODE_SHIFT	(uv_hub_info->global_mmr_shift)
 
 #define UV_GLOBAL_MMR32_PNODE_BITS(p)	((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT))
 
@@ -307,18 +429,74 @@
  *	      between socket virtual and socket physical addresses.
  */
 
+/* global bits offset - number of local address bits in gpa for this UV arch */
+static inline unsigned int uv_gpa_shift(void)
+{
+	return uv_hub_info->gpa_shift;
+}
+#define	_uv_gpa_shift
+
+/* Find node that has the address range that contains global address  */
+static inline struct uv_gam_range_s *uv_gam_range(unsigned long pa)
+{
+	struct uv_gam_range_s *gr = uv_hub_info->gr_table;
+	unsigned long pal = (pa & uv_hub_info->gpa_mask) >> UV_GAM_RANGE_SHFT;
+	int i, num = uv_hub_info->gr_table_len;
+
+	if (gr) {
+		for (i = 0; i < num; i++, gr++) {
+			if (pal < gr->limit)
+				return gr;
+		}
+	}
+	pr_crit("UV: GAM Range for 0x%lx not found at %p!\n", pa, gr);
+	BUG();
+}
+
+/* Return base address of node that contains global address  */
+static inline unsigned long uv_gam_range_base(unsigned long pa)
+{
+	struct uv_gam_range_s *gr = uv_gam_range(pa);
+	int base = gr->base;
+
+	if (base < 0)
+		return 0UL;
+
+	return uv_hub_info->gr_table[base].limit;
+}
+
+/* socket phys RAM --> UV global NASID (UV4+) */
+static inline unsigned long uv_soc_phys_ram_to_nasid(unsigned long paddr)
+{
+	return uv_gam_range(paddr)->nasid;
+}
+#define	_uv_soc_phys_ram_to_nasid
+
+/* socket virtual --> UV global NASID (UV4+) */
+static inline unsigned long uv_gpa_nasid(void *v)
+{
+	return uv_soc_phys_ram_to_nasid(__pa(v));
+}
+
 /* socket phys RAM --> UV global physical address */
 static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
 {
+	unsigned int m_val = uv_hub_info->m_val;
+
 	if (paddr < uv_hub_info->lowmem_remap_top)
 		paddr |= uv_hub_info->lowmem_remap_base;
 	paddr |= uv_hub_info->gnode_upper;
-	paddr = ((paddr << uv_hub_info->m_shift) >> uv_hub_info->m_shift) |
-		((paddr >> uv_hub_info->m_val) << uv_hub_info->n_lshift);
+	if (m_val)
+		paddr = ((paddr << uv_hub_info->m_shift)
+						>> uv_hub_info->m_shift) |
+			((paddr >> uv_hub_info->m_val)
+						<< uv_hub_info->n_lshift);
+	else
+		paddr |= uv_soc_phys_ram_to_nasid(paddr)
+						<< uv_hub_info->gpa_shift;
 	return paddr;
 }
 
-
 /* socket virtual --> UV global physical address */
 static inline unsigned long uv_gpa(void *v)
 {
@@ -338,54 +516,89 @@
 	unsigned long paddr;
 	unsigned long remap_base = uv_hub_info->lowmem_remap_base;
 	unsigned long remap_top =  uv_hub_info->lowmem_remap_top;
+	unsigned int m_val = uv_hub_info->m_val;
 
-	gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) |
-		((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val);
+	if (m_val)
+		gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) |
+			((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val);
+
 	paddr = gpa & uv_hub_info->gpa_mask;
 	if (paddr >= remap_base && paddr < remap_base + remap_top)
 		paddr -= remap_base;
 	return paddr;
 }
 
-
-/* gpa -> pnode */
+/* gpa -> gnode */
 static inline unsigned long uv_gpa_to_gnode(unsigned long gpa)
 {
-	return gpa >> uv_hub_info->n_lshift;
+	unsigned int n_lshift = uv_hub_info->n_lshift;
+
+	if (n_lshift)
+		return gpa >> n_lshift;
+
+	return uv_gam_range(gpa)->nasid >> 1;
 }
 
 /* gpa -> pnode */
 static inline int uv_gpa_to_pnode(unsigned long gpa)
 {
-	unsigned long n_mask = (1UL << uv_hub_info->n_val) - 1;
-
-	return uv_gpa_to_gnode(gpa) & n_mask;
+	return uv_gpa_to_gnode(gpa) & uv_hub_info->pnode_mask;
 }
 
-/* gpa -> node offset*/
+/* gpa -> node offset */
 static inline unsigned long uv_gpa_to_offset(unsigned long gpa)
 {
-	return (gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift;
+	unsigned int m_shift = uv_hub_info->m_shift;
+
+	if (m_shift)
+		return (gpa << m_shift) >> m_shift;
+
+	return (gpa & uv_hub_info->gpa_mask) - uv_gam_range_base(gpa);
+}
+
+/* Convert socket to node */
+static inline int _uv_socket_to_node(int socket, unsigned short *s2nid)
+{
+	return s2nid ? s2nid[socket - uv_hub_info->min_socket] : socket;
+}
+
+static inline int uv_socket_to_node(int socket)
+{
+	return _uv_socket_to_node(socket, uv_hub_info->socket_to_node);
 }
 
 /* pnode, offset --> socket virtual */
 static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset)
 {
-	return __va(((unsigned long)pnode << uv_hub_info->m_val) | offset);
+	unsigned int m_val = uv_hub_info->m_val;
+	unsigned long base;
+	unsigned short sockid, node, *p2s;
+
+	if (m_val)
+		return __va(((unsigned long)pnode << m_val) | offset);
+
+	p2s = uv_hub_info->pnode_to_socket;
+	sockid = p2s ? p2s[pnode - uv_hub_info->min_pnode] : pnode;
+	node = uv_socket_to_node(sockid);
+
+	/* limit address of previous socket is our base, except node 0 is 0 */
+	if (!node)
+		return __va((unsigned long)offset);
+
+	base = (unsigned long)(uv_hub_info->gr_table[node - 1].limit);
+	return __va(base << UV_GAM_RANGE_SHFT | offset);
 }
 
-
-/*
- * Extract a PNODE from an APICID (full apicid, not processor subset)
- */
+/* Extract/Convert a PNODE from an APICID (full apicid, not processor subset) */
 static inline int uv_apicid_to_pnode(int apicid)
 {
-	return (apicid >> uv_hub_info->apic_pnode_shift);
+	int pnode = apicid >> uv_hub_info->apic_pnode_shift;
+	unsigned short *s2pn = uv_hub_info->socket_to_pnode;
+
+	return s2pn ? s2pn[pnode - uv_hub_info->min_socket] : pnode;
 }
 
-/*
- * Convert an apicid to the socket number on the blade
- */
+/* Convert an apicid to the socket number on the blade */
 static inline int uv_apicid_to_socket(int apicid)
 {
 	if (is_uv1_hub())
@@ -434,16 +647,6 @@
 	return readq(uv_global_mmr64_address(pnode, offset));
 }
 
-/*
- * Global MMR space addresses when referenced by the GRU. (GRU does
- * NOT use socket addressing).
- */
-static inline unsigned long uv_global_gru_mmr_address(int pnode, unsigned long offset)
-{
-	return UV_GLOBAL_GRU_MMR_BASE | offset |
-		((unsigned long)pnode << uv_hub_info->m_val);
-}
-
 static inline void uv_write_global_mmr8(int pnode, unsigned long offset, unsigned char val)
 {
 	writeb(val, uv_global_mmr64_address(pnode, offset));
@@ -483,27 +686,23 @@
 	writeb(val, uv_local_mmr_address(offset));
 }
 
-/*
- * Structures and definitions for converting between cpu, node, pnode, and blade
- * numbers.
- */
-struct uv_blade_info {
-	unsigned short	nr_possible_cpus;
-	unsigned short	nr_online_cpus;
-	unsigned short	pnode;
-	short		memory_nid;
-	spinlock_t	nmi_lock;	/* obsolete, see uv_hub_nmi */
-	unsigned long	nmi_count;	/* obsolete, see uv_hub_nmi */
-};
-extern struct uv_blade_info *uv_blade_info;
-extern short *uv_node_to_blade;
-extern short *uv_cpu_to_blade;
-extern short uv_possible_blades;
-
 /* Blade-local cpu number of current cpu. Numbered 0 .. <# cpus on the blade> */
 static inline int uv_blade_processor_id(void)
 {
-	return uv_hub_info->blade_processor_id;
+	return uv_cpu_info->blade_cpu_id;
+}
+
+/* Blade-local cpu number of cpu N. Numbered 0 .. <# cpus on the blade> */
+static inline int uv_cpu_blade_processor_id(int cpu)
+{
+	return uv_cpu_info_per(cpu)->blade_cpu_id;
+}
+#define _uv_cpu_blade_processor_id 1	/* indicate function available */
+
+/* Blade number to Node number (UV1..UV4 is 1:1) */
+static inline int uv_blade_to_node(int blade)
+{
+	return blade;
 }
 
 /* Blade number of current cpu. Numnbered 0 .. <#blades -1> */
@@ -512,55 +711,60 @@
 	return uv_hub_info->numa_blade_id;
 }
 
+/*
+ * Convert linux node number to the UV blade number.
+ * .. Currently for UV1 thru UV4 the node and the blade are identical.
+ * .. If this changes then you MUST check references to this function!
+ */
+static inline int uv_node_to_blade_id(int nid)
+{
+	return nid;
+}
+
 /* Convert a cpu number to the the UV blade number */
 static inline int uv_cpu_to_blade_id(int cpu)
 {
-	return uv_cpu_to_blade[cpu];
-}
-
-/* Convert linux node number to the UV blade number */
-static inline int uv_node_to_blade_id(int nid)
-{
-	return uv_node_to_blade[nid];
+	return uv_node_to_blade_id(cpu_to_node(cpu));
 }
 
 /* Convert a blade id to the PNODE of the blade */
 static inline int uv_blade_to_pnode(int bid)
 {
-	return uv_blade_info[bid].pnode;
+	return uv_hub_info_list(uv_blade_to_node(bid))->pnode;
 }
 
 /* Nid of memory node on blade. -1 if no blade-local memory */
 static inline int uv_blade_to_memory_nid(int bid)
 {
-	return uv_blade_info[bid].memory_nid;
+	return uv_hub_info_list(uv_blade_to_node(bid))->memory_nid;
 }
 
 /* Determine the number of possible cpus on a blade */
 static inline int uv_blade_nr_possible_cpus(int bid)
 {
-	return uv_blade_info[bid].nr_possible_cpus;
+	return uv_hub_info_list(uv_blade_to_node(bid))->nr_possible_cpus;
 }
 
 /* Determine the number of online cpus on a blade */
 static inline int uv_blade_nr_online_cpus(int bid)
 {
-	return uv_blade_info[bid].nr_online_cpus;
+	return uv_hub_info_list(uv_blade_to_node(bid))->nr_online_cpus;
 }
 
 /* Convert a cpu id to the PNODE of the blade containing the cpu */
 static inline int uv_cpu_to_pnode(int cpu)
 {
-	return uv_blade_info[uv_cpu_to_blade_id(cpu)].pnode;
+	return uv_cpu_hub_info(cpu)->pnode;
 }
 
 /* Convert a linux node number to the PNODE of the blade */
 static inline int uv_node_to_pnode(int nid)
 {
-	return uv_blade_info[uv_node_to_blade_id(nid)].pnode;
+	return uv_hub_info_list(nid)->pnode;
 }
 
 /* Maximum possible number of blades */
+extern short uv_possible_blades;
 static inline int uv_num_possible_blades(void)
 {
 	return uv_possible_blades;
@@ -578,9 +782,7 @@
 /* Newer SMM NMI handler, not present in all systems */
 #define UVH_NMI_MMRX		UVH_EVENT_OCCURRED0
 #define UVH_NMI_MMRX_CLEAR	UVH_EVENT_OCCURRED0_ALIAS
-#define UVH_NMI_MMRX_SHIFT	(is_uv1_hub() ? \
-					UV1H_EVENT_OCCURRED0_EXTIO_INT0_SHFT :\
-					UVXH_EVENT_OCCURRED0_EXTIO_INT0_SHFT)
+#define UVH_NMI_MMRX_SHIFT	UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT
 #define	UVH_NMI_MMRX_TYPE	"EXTIO_INT0"
 
 /* Non-zero indicates newer SMM NMI handler present */
@@ -622,9 +824,9 @@
 /* Update SCIR state */
 static inline void uv_set_scir_bits(unsigned char value)
 {
-	if (uv_hub_info->scir.state != value) {
-		uv_hub_info->scir.state = value;
-		uv_write_local_mmr8(uv_hub_info->scir.offset, value);
+	if (uv_scir_info->state != value) {
+		uv_scir_info->state = value;
+		uv_write_local_mmr8(uv_scir_info->offset, value);
 	}
 }
 
@@ -635,10 +837,10 @@
 
 static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
 {
-	if (uv_cpu_hub_info(cpu)->scir.state != value) {
+	if (uv_cpu_scir_info(cpu)->state != value) {
 		uv_write_global_mmr8(uv_cpu_to_pnode(cpu),
-				uv_cpu_hub_info(cpu)->scir.offset, value);
-		uv_cpu_hub_info(cpu)->scir.state = value;
+				uv_cpu_scir_info(cpu)->offset, value);
+		uv_cpu_scir_info(cpu)->state = value;
 	}
 }
 
@@ -666,10 +868,7 @@
 
 /*
  * Get the minimum revision number of the hub chips within the partition.
- *     1 - UV1 rev 1.0 initial silicon
- *     2 - UV1 rev 2.0 production silicon
- *     3 - UV2 rev 1.0 initial silicon
- *     5 - UV3 rev 1.0 initial silicon
+ * (See UVx_HUB_REVISION_BASE above for specific values.)
  */
 static inline int uv_get_min_hub_revision_id(void)
 {
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index ddd8db6..548d684 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -5,7 +5,7 @@
  *
  * SGI UV MMR definitions
  *
- * Copyright (C) 2007-2014 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2007-2016 Silicon Graphics, Inc. All rights reserved.
  */
 
 #ifndef _ASM_X86_UV_UV_MMRS_H
@@ -18,10 +18,11 @@
  * grouped by architecture types.
  *
  * UVH  - definitions common to all UV hub types.
- * UVXH - definitions common to all UV eXtended hub types (currently 2 & 3).
+ * UVXH - definitions common to all UV eXtended hub types (currently 2, 3, 4).
  * UV1H - definitions specific to UV type 1 hub.
  * UV2H - definitions specific to UV type 2 hub.
  * UV3H - definitions specific to UV type 3 hub.
+ * UV4H - definitions specific to UV type 4 hub.
  *
  * So in general, MMR addresses and structures are identical on all hubs types.
  * These MMRs are identified as:
@@ -32,19 +33,25 @@
  *		} s;
  *	};
  *
- * If the MMR exists on all hub types but have different addresses:
+ * If the MMR exists on all hub types but have different addresses,
+ * use a conditional operator to define the value at runtime.
  *	#define UV1Hxxx	a
  *	#define UV2Hxxx	b
  *	#define UV3Hxxx	c
+ *	#define UV4Hxxx	d
  *	#define UVHxxx	(is_uv1_hub() ? UV1Hxxx :
  *			(is_uv2_hub() ? UV2Hxxx :
- *					UV3Hxxx))
+ *			(is_uv3_hub() ? UV3Hxxx :
+ *					UV4Hxxx))
  *
- * If the MMR exists on all hub types > 1 but have different addresses:
+ * If the MMR exists on all hub types > 1 but have different addresses, the
+ * variation using "UVX" as the prefix exists.
  *	#define UV2Hxxx	b
  *	#define UV3Hxxx	c
- *	#define UVXHxxx (is_uv2_hub() ? UV2Hxxx :
- *					UV3Hxxx))
+ *	#define UV4Hxxx	d
+ *	#define UVHxxx	(is_uv2_hub() ? UV2Hxxx :
+ *			(is_uv3_hub() ? UV3Hxxx :
+ *					UV4Hxxx))
  *
  *	union uvh_xxx {
  *		unsigned long       v;
@@ -56,6 +63,8 @@
  *		} s2;
  *		struct uv3h_xxx_s {	 # Full UV3 definition (*)
  *		} s3;
+ *		struct uv4h_xxx_s {	 # Full UV4 definition (*)
+ *		} s4;
  *	};
  *		(* - if present and different than the common struct)
  *
@@ -73,7 +82,7 @@
  *		} sn;
  *	};
  *
- * (GEN Flags: mflags_opt= undefs=0 UV23=UVXH)
+ * (GEN Flags: mflags_opt= undefs=function UV234=UVXH)
  */
 
 #define UV_MMR_ENABLE		(1UL << 63)
@@ -83,20 +92,36 @@
 #define UV2_HUB_PART_NUMBER_X	0x1111
 #define UV3_HUB_PART_NUMBER	0x9578
 #define UV3_HUB_PART_NUMBER_X	0x4321
+#define UV4_HUB_PART_NUMBER	0x99a1
 
 /* Compat: Indicate which UV Hubs are supported. */
+#define UV1_HUB_IS_SUPPORTED	1
 #define UV2_HUB_IS_SUPPORTED	1
 #define UV3_HUB_IS_SUPPORTED	1
+#define UV4_HUB_IS_SUPPORTED	1
+
+/* Error function to catch undefined references */
+extern unsigned long uv_undefined(char *str);
 
 /* ========================================================================= */
 /*                          UVH_BAU_DATA_BROADCAST                           */
 /* ========================================================================= */
 #define UVH_BAU_DATA_BROADCAST 0x61688UL
-#define UVH_BAU_DATA_BROADCAST_32 0x440
+
+#define UV1H_BAU_DATA_BROADCAST_32 0x440
+#define UV2H_BAU_DATA_BROADCAST_32 0x440
+#define UV3H_BAU_DATA_BROADCAST_32 0x440
+#define UV4H_BAU_DATA_BROADCAST_32 0x360
+#define UVH_BAU_DATA_BROADCAST_32 (					\
+	is_uv1_hub() ? UV1H_BAU_DATA_BROADCAST_32 :			\
+	is_uv2_hub() ? UV2H_BAU_DATA_BROADCAST_32 :			\
+	is_uv3_hub() ? UV3H_BAU_DATA_BROADCAST_32 :			\
+	/*is_uv4_hub*/ UV4H_BAU_DATA_BROADCAST_32)
 
 #define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT		0
 #define UVH_BAU_DATA_BROADCAST_ENABLE_MASK		0x0000000000000001UL
 
+
 union uvh_bau_data_broadcast_u {
 	unsigned long	v;
 	struct uvh_bau_data_broadcast_s {
@@ -109,7 +134,16 @@
 /*                           UVH_BAU_DATA_CONFIG                             */
 /* ========================================================================= */
 #define UVH_BAU_DATA_CONFIG 0x61680UL
-#define UVH_BAU_DATA_CONFIG_32 0x438
+
+#define UV1H_BAU_DATA_CONFIG_32 0x438
+#define UV2H_BAU_DATA_CONFIG_32 0x438
+#define UV3H_BAU_DATA_CONFIG_32 0x438
+#define UV4H_BAU_DATA_CONFIG_32 0x358
+#define UVH_BAU_DATA_CONFIG_32 (					\
+	is_uv1_hub() ? UV1H_BAU_DATA_CONFIG_32 :			\
+	is_uv2_hub() ? UV2H_BAU_DATA_CONFIG_32 :			\
+	is_uv3_hub() ? UV3H_BAU_DATA_CONFIG_32 :			\
+	/*is_uv4_hub*/ UV4H_BAU_DATA_CONFIG_32)
 
 #define UVH_BAU_DATA_CONFIG_VECTOR_SHFT			0
 #define UVH_BAU_DATA_CONFIG_DM_SHFT			8
@@ -128,6 +162,7 @@
 #define UVH_BAU_DATA_CONFIG_M_MASK			0x0000000000010000UL
 #define UVH_BAU_DATA_CONFIG_APIC_ID_MASK		0xffffffff00000000UL
 
+
 union uvh_bau_data_config_u {
 	unsigned long	v;
 	struct uvh_bau_data_config_s {
@@ -266,7 +301,6 @@
 #define UV1H_EVENT_OCCURRED0_BAU_DATA_MASK		0x0080000000000000UL
 #define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK	0x0100000000000000UL
 
-#define UVXH_EVENT_OCCURRED0_QP_HCERR_SHFT		1
 #define UVXH_EVENT_OCCURRED0_RH_HCERR_SHFT		2
 #define UVXH_EVENT_OCCURRED0_LH0_HCERR_SHFT		3
 #define UVXH_EVENT_OCCURRED0_LH1_HCERR_SHFT		4
@@ -275,55 +309,11 @@
 #define UVXH_EVENT_OCCURRED0_NI0_HCERR_SHFT		7
 #define UVXH_EVENT_OCCURRED0_NI1_HCERR_SHFT		8
 #define UVXH_EVENT_OCCURRED0_LB_AOERR0_SHFT		9
-#define UVXH_EVENT_OCCURRED0_QP_AOERR0_SHFT		10
 #define UVXH_EVENT_OCCURRED0_LH0_AOERR0_SHFT		12
 #define UVXH_EVENT_OCCURRED0_LH1_AOERR0_SHFT		13
 #define UVXH_EVENT_OCCURRED0_GR0_AOERR0_SHFT		14
 #define UVXH_EVENT_OCCURRED0_GR1_AOERR0_SHFT		15
 #define UVXH_EVENT_OCCURRED0_XB_AOERR0_SHFT		16
-#define UVXH_EVENT_OCCURRED0_RT_AOERR0_SHFT		17
-#define UVXH_EVENT_OCCURRED0_NI0_AOERR0_SHFT		18
-#define UVXH_EVENT_OCCURRED0_NI1_AOERR0_SHFT		19
-#define UVXH_EVENT_OCCURRED0_LB_AOERR1_SHFT		20
-#define UVXH_EVENT_OCCURRED0_QP_AOERR1_SHFT		21
-#define UVXH_EVENT_OCCURRED0_RH_AOERR1_SHFT		22
-#define UVXH_EVENT_OCCURRED0_LH0_AOERR1_SHFT		23
-#define UVXH_EVENT_OCCURRED0_LH1_AOERR1_SHFT		24
-#define UVXH_EVENT_OCCURRED0_GR0_AOERR1_SHFT		25
-#define UVXH_EVENT_OCCURRED0_GR1_AOERR1_SHFT		26
-#define UVXH_EVENT_OCCURRED0_XB_AOERR1_SHFT		27
-#define UVXH_EVENT_OCCURRED0_RT_AOERR1_SHFT		28
-#define UVXH_EVENT_OCCURRED0_NI0_AOERR1_SHFT		29
-#define UVXH_EVENT_OCCURRED0_NI1_AOERR1_SHFT		30
-#define UVXH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT	31
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT		32
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT		33
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT		34
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT		35
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT		36
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT		37
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT		38
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT		39
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT		40
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT		41
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT		42
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT		43
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT		44
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT		45
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT		46
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT		47
-#define UVXH_EVENT_OCCURRED0_L1_NMI_INT_SHFT		48
-#define UVXH_EVENT_OCCURRED0_STOP_CLOCK_SHFT		49
-#define UVXH_EVENT_OCCURRED0_ASIC_TO_L1_SHFT		50
-#define UVXH_EVENT_OCCURRED0_L1_TO_ASIC_SHFT		51
-#define UVXH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT	52
-#define UVXH_EVENT_OCCURRED0_IPI_INT_SHFT		53
-#define UVXH_EVENT_OCCURRED0_EXTIO_INT0_SHFT		54
-#define UVXH_EVENT_OCCURRED0_EXTIO_INT1_SHFT		55
-#define UVXH_EVENT_OCCURRED0_EXTIO_INT2_SHFT		56
-#define UVXH_EVENT_OCCURRED0_EXTIO_INT3_SHFT		57
-#define UVXH_EVENT_OCCURRED0_PROFILE_INT_SHFT		58
-#define UVXH_EVENT_OCCURRED0_QP_HCERR_MASK		0x0000000000000002UL
 #define UVXH_EVENT_OCCURRED0_RH_HCERR_MASK		0x0000000000000004UL
 #define UVXH_EVENT_OCCURRED0_LH0_HCERR_MASK		0x0000000000000008UL
 #define UVXH_EVENT_OCCURRED0_LH1_HCERR_MASK		0x0000000000000010UL
@@ -332,54 +322,294 @@
 #define UVXH_EVENT_OCCURRED0_NI0_HCERR_MASK		0x0000000000000080UL
 #define UVXH_EVENT_OCCURRED0_NI1_HCERR_MASK		0x0000000000000100UL
 #define UVXH_EVENT_OCCURRED0_LB_AOERR0_MASK		0x0000000000000200UL
-#define UVXH_EVENT_OCCURRED0_QP_AOERR0_MASK		0x0000000000000400UL
 #define UVXH_EVENT_OCCURRED0_LH0_AOERR0_MASK		0x0000000000001000UL
 #define UVXH_EVENT_OCCURRED0_LH1_AOERR0_MASK		0x0000000000002000UL
 #define UVXH_EVENT_OCCURRED0_GR0_AOERR0_MASK		0x0000000000004000UL
 #define UVXH_EVENT_OCCURRED0_GR1_AOERR0_MASK		0x0000000000008000UL
 #define UVXH_EVENT_OCCURRED0_XB_AOERR0_MASK		0x0000000000010000UL
-#define UVXH_EVENT_OCCURRED0_RT_AOERR0_MASK		0x0000000000020000UL
-#define UVXH_EVENT_OCCURRED0_NI0_AOERR0_MASK		0x0000000000040000UL
-#define UVXH_EVENT_OCCURRED0_NI1_AOERR0_MASK		0x0000000000080000UL
-#define UVXH_EVENT_OCCURRED0_LB_AOERR1_MASK		0x0000000000100000UL
-#define UVXH_EVENT_OCCURRED0_QP_AOERR1_MASK		0x0000000000200000UL
-#define UVXH_EVENT_OCCURRED0_RH_AOERR1_MASK		0x0000000000400000UL
-#define UVXH_EVENT_OCCURRED0_LH0_AOERR1_MASK		0x0000000000800000UL
-#define UVXH_EVENT_OCCURRED0_LH1_AOERR1_MASK		0x0000000001000000UL
-#define UVXH_EVENT_OCCURRED0_GR0_AOERR1_MASK		0x0000000002000000UL
-#define UVXH_EVENT_OCCURRED0_GR1_AOERR1_MASK		0x0000000004000000UL
-#define UVXH_EVENT_OCCURRED0_XB_AOERR1_MASK		0x0000000008000000UL
-#define UVXH_EVENT_OCCURRED0_RT_AOERR1_MASK		0x0000000010000000UL
-#define UVXH_EVENT_OCCURRED0_NI0_AOERR1_MASK		0x0000000020000000UL
-#define UVXH_EVENT_OCCURRED0_NI1_AOERR1_MASK		0x0000000040000000UL
-#define UVXH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK	0x0000000080000000UL
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK		0x0000000100000000UL
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK		0x0000000200000000UL
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK		0x0000000400000000UL
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK		0x0000000800000000UL
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK		0x0000001000000000UL
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK		0x0000002000000000UL
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK		0x0000004000000000UL
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK		0x0000008000000000UL
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK		0x0000010000000000UL
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK		0x0000020000000000UL
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK		0x0000040000000000UL
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK		0x0000080000000000UL
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK		0x0000100000000000UL
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK		0x0000200000000000UL
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK		0x0000400000000000UL
-#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK		0x0000800000000000UL
-#define UVXH_EVENT_OCCURRED0_L1_NMI_INT_MASK		0x0001000000000000UL
-#define UVXH_EVENT_OCCURRED0_STOP_CLOCK_MASK		0x0002000000000000UL
-#define UVXH_EVENT_OCCURRED0_ASIC_TO_L1_MASK		0x0004000000000000UL
-#define UVXH_EVENT_OCCURRED0_L1_TO_ASIC_MASK		0x0008000000000000UL
-#define UVXH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK	0x0010000000000000UL
-#define UVXH_EVENT_OCCURRED0_IPI_INT_MASK		0x0020000000000000UL
-#define UVXH_EVENT_OCCURRED0_EXTIO_INT0_MASK		0x0040000000000000UL
-#define UVXH_EVENT_OCCURRED0_EXTIO_INT1_MASK		0x0080000000000000UL
-#define UVXH_EVENT_OCCURRED0_EXTIO_INT2_MASK		0x0100000000000000UL
-#define UVXH_EVENT_OCCURRED0_EXTIO_INT3_MASK		0x0200000000000000UL
-#define UVXH_EVENT_OCCURRED0_PROFILE_INT_MASK		0x0400000000000000UL
+
+#define UV2H_EVENT_OCCURRED0_QP_HCERR_SHFT		1
+#define UV2H_EVENT_OCCURRED0_QP_AOERR0_SHFT		10
+#define UV2H_EVENT_OCCURRED0_RT_AOERR0_SHFT		17
+#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_SHFT		18
+#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_SHFT		19
+#define UV2H_EVENT_OCCURRED0_LB_AOERR1_SHFT		20
+#define UV2H_EVENT_OCCURRED0_QP_AOERR1_SHFT		21
+#define UV2H_EVENT_OCCURRED0_RH_AOERR1_SHFT		22
+#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_SHFT		23
+#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_SHFT		24
+#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_SHFT		25
+#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_SHFT		26
+#define UV2H_EVENT_OCCURRED0_XB_AOERR1_SHFT		27
+#define UV2H_EVENT_OCCURRED0_RT_AOERR1_SHFT		28
+#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_SHFT		29
+#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_SHFT		30
+#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT	31
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT		32
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT		33
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT		34
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT		35
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT		36
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT		37
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT		38
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT		39
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT		40
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT		41
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT		42
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT		43
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT		44
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT		45
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT		46
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT		47
+#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_SHFT		48
+#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_SHFT		49
+#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT		50
+#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT		51
+#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT	52
+#define UV2H_EVENT_OCCURRED0_IPI_INT_SHFT		53
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_SHFT		54
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_SHFT		55
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_SHFT		56
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_SHFT		57
+#define UV2H_EVENT_OCCURRED0_PROFILE_INT_SHFT		58
+#define UV2H_EVENT_OCCURRED0_QP_HCERR_MASK		0x0000000000000002UL
+#define UV2H_EVENT_OCCURRED0_QP_AOERR0_MASK		0x0000000000000400UL
+#define UV2H_EVENT_OCCURRED0_RT_AOERR0_MASK		0x0000000000020000UL
+#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_MASK		0x0000000000040000UL
+#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_MASK		0x0000000000080000UL
+#define UV2H_EVENT_OCCURRED0_LB_AOERR1_MASK		0x0000000000100000UL
+#define UV2H_EVENT_OCCURRED0_QP_AOERR1_MASK		0x0000000000200000UL
+#define UV2H_EVENT_OCCURRED0_RH_AOERR1_MASK		0x0000000000400000UL
+#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_MASK		0x0000000000800000UL
+#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_MASK		0x0000000001000000UL
+#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_MASK		0x0000000002000000UL
+#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_MASK		0x0000000004000000UL
+#define UV2H_EVENT_OCCURRED0_XB_AOERR1_MASK		0x0000000008000000UL
+#define UV2H_EVENT_OCCURRED0_RT_AOERR1_MASK		0x0000000010000000UL
+#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_MASK		0x0000000020000000UL
+#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_MASK		0x0000000040000000UL
+#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK	0x0000000080000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK		0x0000000100000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK		0x0000000200000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK		0x0000000400000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK		0x0000000800000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK		0x0000001000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK		0x0000002000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK		0x0000004000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK		0x0000008000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK		0x0000010000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK		0x0000020000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK		0x0000040000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK		0x0000080000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK		0x0000100000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK		0x0000200000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK		0x0000400000000000UL
+#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK		0x0000800000000000UL
+#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_MASK		0x0001000000000000UL
+#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_MASK		0x0002000000000000UL
+#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_MASK		0x0004000000000000UL
+#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_MASK		0x0008000000000000UL
+#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK	0x0010000000000000UL
+#define UV2H_EVENT_OCCURRED0_IPI_INT_MASK		0x0020000000000000UL
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_MASK		0x0040000000000000UL
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_MASK		0x0080000000000000UL
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_MASK		0x0100000000000000UL
+#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_MASK		0x0200000000000000UL
+#define UV2H_EVENT_OCCURRED0_PROFILE_INT_MASK		0x0400000000000000UL
+
+#define UV3H_EVENT_OCCURRED0_QP_HCERR_SHFT		1
+#define UV3H_EVENT_OCCURRED0_QP_AOERR0_SHFT		10
+#define UV3H_EVENT_OCCURRED0_RT_AOERR0_SHFT		17
+#define UV3H_EVENT_OCCURRED0_NI0_AOERR0_SHFT		18
+#define UV3H_EVENT_OCCURRED0_NI1_AOERR0_SHFT		19
+#define UV3H_EVENT_OCCURRED0_LB_AOERR1_SHFT		20
+#define UV3H_EVENT_OCCURRED0_QP_AOERR1_SHFT		21
+#define UV3H_EVENT_OCCURRED0_RH_AOERR1_SHFT		22
+#define UV3H_EVENT_OCCURRED0_LH0_AOERR1_SHFT		23
+#define UV3H_EVENT_OCCURRED0_LH1_AOERR1_SHFT		24
+#define UV3H_EVENT_OCCURRED0_GR0_AOERR1_SHFT		25
+#define UV3H_EVENT_OCCURRED0_GR1_AOERR1_SHFT		26
+#define UV3H_EVENT_OCCURRED0_XB_AOERR1_SHFT		27
+#define UV3H_EVENT_OCCURRED0_RT_AOERR1_SHFT		28
+#define UV3H_EVENT_OCCURRED0_NI0_AOERR1_SHFT		29
+#define UV3H_EVENT_OCCURRED0_NI1_AOERR1_SHFT		30
+#define UV3H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT	31
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT		32
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT		33
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT		34
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT		35
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT		36
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT		37
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT		38
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT		39
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT		40
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT		41
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT		42
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT		43
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT		44
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT		45
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT		46
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT		47
+#define UV3H_EVENT_OCCURRED0_L1_NMI_INT_SHFT		48
+#define UV3H_EVENT_OCCURRED0_STOP_CLOCK_SHFT		49
+#define UV3H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT		50
+#define UV3H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT		51
+#define UV3H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT	52
+#define UV3H_EVENT_OCCURRED0_IPI_INT_SHFT		53
+#define UV3H_EVENT_OCCURRED0_EXTIO_INT0_SHFT		54
+#define UV3H_EVENT_OCCURRED0_EXTIO_INT1_SHFT		55
+#define UV3H_EVENT_OCCURRED0_EXTIO_INT2_SHFT		56
+#define UV3H_EVENT_OCCURRED0_EXTIO_INT3_SHFT		57
+#define UV3H_EVENT_OCCURRED0_PROFILE_INT_SHFT		58
+#define UV3H_EVENT_OCCURRED0_QP_HCERR_MASK		0x0000000000000002UL
+#define UV3H_EVENT_OCCURRED0_QP_AOERR0_MASK		0x0000000000000400UL
+#define UV3H_EVENT_OCCURRED0_RT_AOERR0_MASK		0x0000000000020000UL
+#define UV3H_EVENT_OCCURRED0_NI0_AOERR0_MASK		0x0000000000040000UL
+#define UV3H_EVENT_OCCURRED0_NI1_AOERR0_MASK		0x0000000000080000UL
+#define UV3H_EVENT_OCCURRED0_LB_AOERR1_MASK		0x0000000000100000UL
+#define UV3H_EVENT_OCCURRED0_QP_AOERR1_MASK		0x0000000000200000UL
+#define UV3H_EVENT_OCCURRED0_RH_AOERR1_MASK		0x0000000000400000UL
+#define UV3H_EVENT_OCCURRED0_LH0_AOERR1_MASK		0x0000000000800000UL
+#define UV3H_EVENT_OCCURRED0_LH1_AOERR1_MASK		0x0000000001000000UL
+#define UV3H_EVENT_OCCURRED0_GR0_AOERR1_MASK		0x0000000002000000UL
+#define UV3H_EVENT_OCCURRED0_GR1_AOERR1_MASK		0x0000000004000000UL
+#define UV3H_EVENT_OCCURRED0_XB_AOERR1_MASK		0x0000000008000000UL
+#define UV3H_EVENT_OCCURRED0_RT_AOERR1_MASK		0x0000000010000000UL
+#define UV3H_EVENT_OCCURRED0_NI0_AOERR1_MASK		0x0000000020000000UL
+#define UV3H_EVENT_OCCURRED0_NI1_AOERR1_MASK		0x0000000040000000UL
+#define UV3H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK	0x0000000080000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK		0x0000000100000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK		0x0000000200000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK		0x0000000400000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK		0x0000000800000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK		0x0000001000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK		0x0000002000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK		0x0000004000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK		0x0000008000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK		0x0000010000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK		0x0000020000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK		0x0000040000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK		0x0000080000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK		0x0000100000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK		0x0000200000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK		0x0000400000000000UL
+#define UV3H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK		0x0000800000000000UL
+#define UV3H_EVENT_OCCURRED0_L1_NMI_INT_MASK		0x0001000000000000UL
+#define UV3H_EVENT_OCCURRED0_STOP_CLOCK_MASK		0x0002000000000000UL
+#define UV3H_EVENT_OCCURRED0_ASIC_TO_L1_MASK		0x0004000000000000UL
+#define UV3H_EVENT_OCCURRED0_L1_TO_ASIC_MASK		0x0008000000000000UL
+#define UV3H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK	0x0010000000000000UL
+#define UV3H_EVENT_OCCURRED0_IPI_INT_MASK		0x0020000000000000UL
+#define UV3H_EVENT_OCCURRED0_EXTIO_INT0_MASK		0x0040000000000000UL
+#define UV3H_EVENT_OCCURRED0_EXTIO_INT1_MASK		0x0080000000000000UL
+#define UV3H_EVENT_OCCURRED0_EXTIO_INT2_MASK		0x0100000000000000UL
+#define UV3H_EVENT_OCCURRED0_EXTIO_INT3_MASK		0x0200000000000000UL
+#define UV3H_EVENT_OCCURRED0_PROFILE_INT_MASK		0x0400000000000000UL
+
+#define UV4H_EVENT_OCCURRED0_KT_HCERR_SHFT		1
+#define UV4H_EVENT_OCCURRED0_KT_AOERR0_SHFT		10
+#define UV4H_EVENT_OCCURRED0_RTQ0_AOERR0_SHFT		17
+#define UV4H_EVENT_OCCURRED0_RTQ1_AOERR0_SHFT		18
+#define UV4H_EVENT_OCCURRED0_RTQ2_AOERR0_SHFT		19
+#define UV4H_EVENT_OCCURRED0_RTQ3_AOERR0_SHFT		20
+#define UV4H_EVENT_OCCURRED0_NI0_AOERR0_SHFT		21
+#define UV4H_EVENT_OCCURRED0_NI1_AOERR0_SHFT		22
+#define UV4H_EVENT_OCCURRED0_LB_AOERR1_SHFT		23
+#define UV4H_EVENT_OCCURRED0_KT_AOERR1_SHFT		24
+#define UV4H_EVENT_OCCURRED0_RH_AOERR1_SHFT		25
+#define UV4H_EVENT_OCCURRED0_LH0_AOERR1_SHFT		26
+#define UV4H_EVENT_OCCURRED0_LH1_AOERR1_SHFT		27
+#define UV4H_EVENT_OCCURRED0_GR0_AOERR1_SHFT		28
+#define UV4H_EVENT_OCCURRED0_GR1_AOERR1_SHFT		29
+#define UV4H_EVENT_OCCURRED0_XB_AOERR1_SHFT		30
+#define UV4H_EVENT_OCCURRED0_RTQ0_AOERR1_SHFT		31
+#define UV4H_EVENT_OCCURRED0_RTQ1_AOERR1_SHFT		32
+#define UV4H_EVENT_OCCURRED0_RTQ2_AOERR1_SHFT		33
+#define UV4H_EVENT_OCCURRED0_RTQ3_AOERR1_SHFT		34
+#define UV4H_EVENT_OCCURRED0_NI0_AOERR1_SHFT		35
+#define UV4H_EVENT_OCCURRED0_NI1_AOERR1_SHFT		36
+#define UV4H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT	37
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT		38
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT		39
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT		40
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT		41
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT		42
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT		43
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT		44
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT		45
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT		46
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT		47
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT		48
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT		49
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT		50
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT		51
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT		52
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT		53
+#define UV4H_EVENT_OCCURRED0_L1_NMI_INT_SHFT		54
+#define UV4H_EVENT_OCCURRED0_STOP_CLOCK_SHFT		55
+#define UV4H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT		56
+#define UV4H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT		57
+#define UV4H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT	58
+#define UV4H_EVENT_OCCURRED0_IPI_INT_SHFT		59
+#define UV4H_EVENT_OCCURRED0_EXTIO_INT0_SHFT		60
+#define UV4H_EVENT_OCCURRED0_EXTIO_INT1_SHFT		61
+#define UV4H_EVENT_OCCURRED0_EXTIO_INT2_SHFT		62
+#define UV4H_EVENT_OCCURRED0_EXTIO_INT3_SHFT		63
+#define UV4H_EVENT_OCCURRED0_KT_HCERR_MASK		0x0000000000000002UL
+#define UV4H_EVENT_OCCURRED0_KT_AOERR0_MASK		0x0000000000000400UL
+#define UV4H_EVENT_OCCURRED0_RTQ0_AOERR0_MASK		0x0000000000020000UL
+#define UV4H_EVENT_OCCURRED0_RTQ1_AOERR0_MASK		0x0000000000040000UL
+#define UV4H_EVENT_OCCURRED0_RTQ2_AOERR0_MASK		0x0000000000080000UL
+#define UV4H_EVENT_OCCURRED0_RTQ3_AOERR0_MASK		0x0000000000100000UL
+#define UV4H_EVENT_OCCURRED0_NI0_AOERR0_MASK		0x0000000000200000UL
+#define UV4H_EVENT_OCCURRED0_NI1_AOERR0_MASK		0x0000000000400000UL
+#define UV4H_EVENT_OCCURRED0_LB_AOERR1_MASK		0x0000000000800000UL
+#define UV4H_EVENT_OCCURRED0_KT_AOERR1_MASK		0x0000000001000000UL
+#define UV4H_EVENT_OCCURRED0_RH_AOERR1_MASK		0x0000000002000000UL
+#define UV4H_EVENT_OCCURRED0_LH0_AOERR1_MASK		0x0000000004000000UL
+#define UV4H_EVENT_OCCURRED0_LH1_AOERR1_MASK		0x0000000008000000UL
+#define UV4H_EVENT_OCCURRED0_GR0_AOERR1_MASK		0x0000000010000000UL
+#define UV4H_EVENT_OCCURRED0_GR1_AOERR1_MASK		0x0000000020000000UL
+#define UV4H_EVENT_OCCURRED0_XB_AOERR1_MASK		0x0000000040000000UL
+#define UV4H_EVENT_OCCURRED0_RTQ0_AOERR1_MASK		0x0000000080000000UL
+#define UV4H_EVENT_OCCURRED0_RTQ1_AOERR1_MASK		0x0000000100000000UL
+#define UV4H_EVENT_OCCURRED0_RTQ2_AOERR1_MASK		0x0000000200000000UL
+#define UV4H_EVENT_OCCURRED0_RTQ3_AOERR1_MASK		0x0000000400000000UL
+#define UV4H_EVENT_OCCURRED0_NI0_AOERR1_MASK		0x0000000800000000UL
+#define UV4H_EVENT_OCCURRED0_NI1_AOERR1_MASK		0x0000001000000000UL
+#define UV4H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK	0x0000002000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK		0x0000004000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK		0x0000008000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK		0x0000010000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK		0x0000020000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK		0x0000040000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK		0x0000080000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK		0x0000100000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK		0x0000200000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK		0x0000400000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK		0x0000800000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK		0x0001000000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK		0x0002000000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK		0x0004000000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK		0x0008000000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK		0x0010000000000000UL
+#define UV4H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK		0x0020000000000000UL
+#define UV4H_EVENT_OCCURRED0_L1_NMI_INT_MASK		0x0040000000000000UL
+#define UV4H_EVENT_OCCURRED0_STOP_CLOCK_MASK		0x0080000000000000UL
+#define UV4H_EVENT_OCCURRED0_ASIC_TO_L1_MASK		0x0100000000000000UL
+#define UV4H_EVENT_OCCURRED0_L1_TO_ASIC_MASK		0x0200000000000000UL
+#define UV4H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK	0x0400000000000000UL
+#define UV4H_EVENT_OCCURRED0_IPI_INT_MASK		0x0800000000000000UL
+#define UV4H_EVENT_OCCURRED0_EXTIO_INT0_MASK		0x1000000000000000UL
+#define UV4H_EVENT_OCCURRED0_EXTIO_INT1_MASK		0x2000000000000000UL
+#define UV4H_EVENT_OCCURRED0_EXTIO_INT2_MASK		0x4000000000000000UL
+#define UV4H_EVENT_OCCURRED0_EXTIO_INT3_MASK		0x8000000000000000UL
+
+#define UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT (				\
+	is_uv1_hub() ? UV1H_EVENT_OCCURRED0_EXTIO_INT0_SHFT :		\
+	is_uv2_hub() ? UV2H_EVENT_OCCURRED0_EXTIO_INT0_SHFT :		\
+	is_uv3_hub() ? UV3H_EVENT_OCCURRED0_EXTIO_INT0_SHFT :		\
+	/*is_uv4_hub*/ UV4H_EVENT_OCCURRED0_EXTIO_INT0_SHFT)
 
 union uvh_event_occurred0_u {
 	unsigned long	v;
@@ -391,7 +621,7 @@
 	} s;
 	struct uvxh_event_occurred0_s {
 		unsigned long	lb_hcerr:1;			/* RW */
-		unsigned long	qp_hcerr:1;			/* RW */
+		unsigned long	rsvd_1:1;
 		unsigned long	rh_hcerr:1;			/* RW */
 		unsigned long	lh0_hcerr:1;			/* RW */
 		unsigned long	lh1_hcerr:1;			/* RW */
@@ -400,25 +630,51 @@
 		unsigned long	ni0_hcerr:1;			/* RW */
 		unsigned long	ni1_hcerr:1;			/* RW */
 		unsigned long	lb_aoerr0:1;			/* RW */
-		unsigned long	qp_aoerr0:1;			/* RW */
+		unsigned long	rsvd_10:1;
 		unsigned long	rh_aoerr0:1;			/* RW */
 		unsigned long	lh0_aoerr0:1;			/* RW */
 		unsigned long	lh1_aoerr0:1;			/* RW */
 		unsigned long	gr0_aoerr0:1;			/* RW */
 		unsigned long	gr1_aoerr0:1;			/* RW */
 		unsigned long	xb_aoerr0:1;			/* RW */
-		unsigned long	rt_aoerr0:1;			/* RW */
+		unsigned long	rsvd_17_63:47;
+	} sx;
+	struct uv4h_event_occurred0_s {
+		unsigned long	lb_hcerr:1;			/* RW */
+		unsigned long	kt_hcerr:1;			/* RW */
+		unsigned long	rh_hcerr:1;			/* RW */
+		unsigned long	lh0_hcerr:1;			/* RW */
+		unsigned long	lh1_hcerr:1;			/* RW */
+		unsigned long	gr0_hcerr:1;			/* RW */
+		unsigned long	gr1_hcerr:1;			/* RW */
+		unsigned long	ni0_hcerr:1;			/* RW */
+		unsigned long	ni1_hcerr:1;			/* RW */
+		unsigned long	lb_aoerr0:1;			/* RW */
+		unsigned long	kt_aoerr0:1;			/* RW */
+		unsigned long	rh_aoerr0:1;			/* RW */
+		unsigned long	lh0_aoerr0:1;			/* RW */
+		unsigned long	lh1_aoerr0:1;			/* RW */
+		unsigned long	gr0_aoerr0:1;			/* RW */
+		unsigned long	gr1_aoerr0:1;			/* RW */
+		unsigned long	xb_aoerr0:1;			/* RW */
+		unsigned long	rtq0_aoerr0:1;			/* RW */
+		unsigned long	rtq1_aoerr0:1;			/* RW */
+		unsigned long	rtq2_aoerr0:1;			/* RW */
+		unsigned long	rtq3_aoerr0:1;			/* RW */
 		unsigned long	ni0_aoerr0:1;			/* RW */
 		unsigned long	ni1_aoerr0:1;			/* RW */
 		unsigned long	lb_aoerr1:1;			/* RW */
-		unsigned long	qp_aoerr1:1;			/* RW */
+		unsigned long	kt_aoerr1:1;			/* RW */
 		unsigned long	rh_aoerr1:1;			/* RW */
 		unsigned long	lh0_aoerr1:1;			/* RW */
 		unsigned long	lh1_aoerr1:1;			/* RW */
 		unsigned long	gr0_aoerr1:1;			/* RW */
 		unsigned long	gr1_aoerr1:1;			/* RW */
 		unsigned long	xb_aoerr1:1;			/* RW */
-		unsigned long	rt_aoerr1:1;			/* RW */
+		unsigned long	rtq0_aoerr1:1;			/* RW */
+		unsigned long	rtq1_aoerr1:1;			/* RW */
+		unsigned long	rtq2_aoerr1:1;			/* RW */
+		unsigned long	rtq3_aoerr1:1;			/* RW */
 		unsigned long	ni0_aoerr1:1;			/* RW */
 		unsigned long	ni1_aoerr1:1;			/* RW */
 		unsigned long	system_shutdown_int:1;		/* RW */
@@ -448,9 +704,7 @@
 		unsigned long	extio_int1:1;			/* RW */
 		unsigned long	extio_int2:1;			/* RW */
 		unsigned long	extio_int3:1;			/* RW */
-		unsigned long	profile_int:1;			/* RW */
-		unsigned long	rsvd_59_63:5;
-	} sx;
+	} s4;
 };
 
 /* ========================================================================= */
@@ -464,11 +718,21 @@
 /*                         UVH_EXTIO_INT0_BROADCAST                          */
 /* ========================================================================= */
 #define UVH_EXTIO_INT0_BROADCAST 0x61448UL
-#define UVH_EXTIO_INT0_BROADCAST_32 0x3f0
+
+#define UV1H_EXTIO_INT0_BROADCAST_32 0x3f0
+#define UV2H_EXTIO_INT0_BROADCAST_32 0x3f0
+#define UV3H_EXTIO_INT0_BROADCAST_32 0x3f0
+#define UV4H_EXTIO_INT0_BROADCAST_32 0x310
+#define UVH_EXTIO_INT0_BROADCAST_32 (					\
+	is_uv1_hub() ? UV1H_EXTIO_INT0_BROADCAST_32 :			\
+	is_uv2_hub() ? UV2H_EXTIO_INT0_BROADCAST_32 :			\
+	is_uv3_hub() ? UV3H_EXTIO_INT0_BROADCAST_32 :			\
+	/*is_uv4_hub*/ UV4H_EXTIO_INT0_BROADCAST_32)
 
 #define UVH_EXTIO_INT0_BROADCAST_ENABLE_SHFT		0
 #define UVH_EXTIO_INT0_BROADCAST_ENABLE_MASK		0x0000000000000001UL
 
+
 union uvh_extio_int0_broadcast_u {
 	unsigned long	v;
 	struct uvh_extio_int0_broadcast_s {
@@ -499,6 +763,7 @@
 #define UVH_GR0_TLB_INT0_CONFIG_M_MASK			0x0000000000010000UL
 #define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_MASK		0xffffffff00000000UL
 
+
 union uvh_gr0_tlb_int0_config_u {
 	unsigned long	v;
 	struct uvh_gr0_tlb_int0_config_s {
@@ -537,6 +802,7 @@
 #define UVH_GR0_TLB_INT1_CONFIG_M_MASK			0x0000000000010000UL
 #define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_MASK		0xffffffff00000000UL
 
+
 union uvh_gr0_tlb_int1_config_u {
 	unsigned long	v;
 	struct uvh_gr0_tlb_int1_config_s {
@@ -559,19 +825,18 @@
 #define UV1H_GR0_TLB_MMR_CONTROL 0x401080UL
 #define UV2H_GR0_TLB_MMR_CONTROL 0xc01080UL
 #define UV3H_GR0_TLB_MMR_CONTROL 0xc01080UL
-#define UVH_GR0_TLB_MMR_CONTROL						\
-		(is_uv1_hub() ? UV1H_GR0_TLB_MMR_CONTROL :		\
-		(is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL :		\
-				UV3H_GR0_TLB_MMR_CONTROL))
+#define UV4H_GR0_TLB_MMR_CONTROL 0x601080UL
+#define UVH_GR0_TLB_MMR_CONTROL (					\
+	is_uv1_hub() ? UV1H_GR0_TLB_MMR_CONTROL :			\
+	is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL :			\
+	is_uv3_hub() ? UV3H_GR0_TLB_MMR_CONTROL :			\
+	/*is_uv4_hub*/ UV4H_GR0_TLB_MMR_CONTROL)
 
 #define UVH_GR0_TLB_MMR_CONTROL_INDEX_SHFT		0
-#define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT		12
 #define UVH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT	16
 #define UVH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT	20
 #define UVH_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT		30
 #define UVH_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT		31
-#define UVH_GR0_TLB_MMR_CONTROL_INDEX_MASK		0x0000000000000fffUL
-#define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK		0x0000000000003000UL
 #define UVH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK	0x0000000000010000UL
 #define UVH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK	0x0000000000100000UL
 #define UVH_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK		0x0000000040000000UL
@@ -601,14 +866,11 @@
 #define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK	0x1000000000000000UL
 
 #define UVXH_GR0_TLB_MMR_CONTROL_INDEX_SHFT		0
-#define UVXH_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT		12
 #define UVXH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT	16
 #define UVXH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT	20
 #define UVXH_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT		30
 #define UVXH_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT		31
 #define UVXH_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT	32
-#define UVXH_GR0_TLB_MMR_CONTROL_INDEX_MASK		0x0000000000000fffUL
-#define UVXH_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK		0x0000000000003000UL
 #define UVXH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK	0x0000000000010000UL
 #define UVXH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK	0x0000000000100000UL
 #define UVXH_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK		0x0000000040000000UL
@@ -651,12 +913,45 @@
 #define UV3H_GR0_TLB_MMR_CONTROL_MMR_READ_MASK		0x0000000080000000UL
 #define UV3H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK	0x0000000100000000UL
 
+#define UV4H_GR0_TLB_MMR_CONTROL_INDEX_SHFT		0
+#define UV4H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT		13
+#define UV4H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT	16
+#define UV4H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT	20
+#define UV4H_GR0_TLB_MMR_CONTROL_ECC_SEL_SHFT		21
+#define UV4H_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT		30
+#define UV4H_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT		31
+#define UV4H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT	32
+#define UV4H_GR0_TLB_MMR_CONTROL_PAGE_SIZE_SHFT		59
+#define UV4H_GR0_TLB_MMR_CONTROL_INDEX_MASK		0x0000000000001fffUL
+#define UV4H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK		0x0000000000006000UL
+#define UV4H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK	0x0000000000010000UL
+#define UV4H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK	0x0000000000100000UL
+#define UV4H_GR0_TLB_MMR_CONTROL_ECC_SEL_MASK		0x0000000000200000UL
+#define UV4H_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK		0x0000000040000000UL
+#define UV4H_GR0_TLB_MMR_CONTROL_MMR_READ_MASK		0x0000000080000000UL
+#define UV4H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK	0x0000000100000000UL
+#define UV4H_GR0_TLB_MMR_CONTROL_PAGE_SIZE_MASK		0xf800000000000000UL
+
+#define UVH_GR0_TLB_MMR_CONTROL_INDEX_MASK (				\
+	is_uv1_hub() ? UV1H_GR0_TLB_MMR_CONTROL_INDEX_MASK :		\
+	is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL_INDEX_MASK :		\
+	is_uv3_hub() ? UV3H_GR0_TLB_MMR_CONTROL_INDEX_MASK :		\
+	/*is_uv4_hub*/ UV4H_GR0_TLB_MMR_CONTROL_INDEX_MASK)
+#define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK (				\
+	is_uv1_hub() ? UV1H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK :		\
+	is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK :		\
+	is_uv3_hub() ? UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK :		\
+	/*is_uv4_hub*/ UV4H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK)
+#define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT (				\
+	is_uv1_hub() ? UV1H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT :		\
+	is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT :		\
+	is_uv3_hub() ? UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT :		\
+	/*is_uv4_hub*/ UV4H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT)
+
 union uvh_gr0_tlb_mmr_control_u {
 	unsigned long	v;
 	struct uvh_gr0_tlb_mmr_control_s {
-		unsigned long	index:12;			/* RW */
-		unsigned long	mem_sel:2;			/* RW */
-		unsigned long	rsvd_14_15:2;
+		unsigned long	rsvd_0_15:16;
 		unsigned long	auto_valid_en:1;		/* RW */
 		unsigned long	rsvd_17_19:3;
 		unsigned long	mmr_hash_index_en:1;		/* RW */
@@ -690,9 +985,7 @@
 		unsigned long	rsvd_61_63:3;
 	} s1;
 	struct uvxh_gr0_tlb_mmr_control_s {
-		unsigned long	index:12;			/* RW */
-		unsigned long	mem_sel:2;			/* RW */
-		unsigned long	rsvd_14_15:2;
+		unsigned long	rsvd_0_15:16;
 		unsigned long	auto_valid_en:1;		/* RW */
 		unsigned long	rsvd_17_19:3;
 		unsigned long	mmr_hash_index_en:1;		/* RW */
@@ -703,8 +996,7 @@
 		unsigned long	rsvd_33_47:15;
 		unsigned long	rsvd_48:1;
 		unsigned long	rsvd_49_51:3;
-		unsigned long	rsvd_52:1;
-		unsigned long	rsvd_53_63:11;
+		unsigned long	rsvd_52_63:12;
 	} sx;
 	struct uv2h_gr0_tlb_mmr_control_s {
 		unsigned long	index:12;			/* RW */
@@ -741,6 +1033,24 @@
 		unsigned long	undef_52:1;			/* Undefined */
 		unsigned long	rsvd_53_63:11;
 	} s3;
+	struct uv4h_gr0_tlb_mmr_control_s {
+		unsigned long	index:13;			/* RW */
+		unsigned long	mem_sel:2;			/* RW */
+		unsigned long	rsvd_15:1;
+		unsigned long	auto_valid_en:1;		/* RW */
+		unsigned long	rsvd_17_19:3;
+		unsigned long	mmr_hash_index_en:1;		/* RW */
+		unsigned long	ecc_sel:1;			/* RW */
+		unsigned long	rsvd_22_29:8;
+		unsigned long	mmr_write:1;			/* WP */
+		unsigned long	mmr_read:1;			/* WP */
+		unsigned long	mmr_op_done:1;			/* RW */
+		unsigned long	rsvd_33_47:15;
+		unsigned long	undef_48:1;			/* Undefined */
+		unsigned long	rsvd_49_51:3;
+		unsigned long	rsvd_52_58:7;
+		unsigned long	page_size:5;			/* RW */
+	} s4;
 };
 
 /* ========================================================================= */
@@ -749,19 +1059,14 @@
 #define UV1H_GR0_TLB_MMR_READ_DATA_HI 0x4010a0UL
 #define UV2H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL
 #define UV3H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL
-#define UVH_GR0_TLB_MMR_READ_DATA_HI					\
-		(is_uv1_hub() ? UV1H_GR0_TLB_MMR_READ_DATA_HI :		\
-		(is_uv2_hub() ? UV2H_GR0_TLB_MMR_READ_DATA_HI :		\
-				UV3H_GR0_TLB_MMR_READ_DATA_HI))
+#define UV4H_GR0_TLB_MMR_READ_DATA_HI 0x6010a0UL
+#define UVH_GR0_TLB_MMR_READ_DATA_HI (					\
+	is_uv1_hub() ? UV1H_GR0_TLB_MMR_READ_DATA_HI :			\
+	is_uv2_hub() ? UV2H_GR0_TLB_MMR_READ_DATA_HI :			\
+	is_uv3_hub() ? UV3H_GR0_TLB_MMR_READ_DATA_HI :			\
+	/*is_uv4_hub*/ UV4H_GR0_TLB_MMR_READ_DATA_HI)
 
 #define UVH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT		0
-#define UVH_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT		41
-#define UVH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT		43
-#define UVH_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT	44
-#define UVH_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK		0x000001ffffffffffUL
-#define UVH_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK		0x0000060000000000UL
-#define UVH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK		0x0000080000000000UL
-#define UVH_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK	0x0000100000000000UL
 
 #define UV1H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT		0
 #define UV1H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT		41
@@ -773,13 +1078,6 @@
 #define UV1H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK	0x0000100000000000UL
 
 #define UVXH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT		0
-#define UVXH_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT		41
-#define UVXH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT	43
-#define UVXH_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT	44
-#define UVXH_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK		0x000001ffffffffffUL
-#define UVXH_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK		0x0000060000000000UL
-#define UVXH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK	0x0000080000000000UL
-#define UVXH_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK	0x0000100000000000UL
 
 #define UV2H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT		0
 #define UV2H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT		41
@@ -803,15 +1101,24 @@
 #define UV3H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_MASK	0x0000200000000000UL
 #define UV3H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK	0xff80000000000000UL
 
+#define UV4H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT		0
+#define UV4H_GR0_TLB_MMR_READ_DATA_HI_PNID_SHFT		34
+#define UV4H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT		49
+#define UV4H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT	51
+#define UV4H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT	52
+#define UV4H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT	53
+#define UV4H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT	55
+#define UV4H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK		0x00000003ffffffffUL
+#define UV4H_GR0_TLB_MMR_READ_DATA_HI_PNID_MASK		0x0001fffc00000000UL
+#define UV4H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK		0x0006000000000000UL
+#define UV4H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK	0x0008000000000000UL
+#define UV4H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK	0x0010000000000000UL
+#define UV4H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_MASK	0x0020000000000000UL
+#define UV4H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK	0xff80000000000000UL
+
+
 union uvh_gr0_tlb_mmr_read_data_hi_u {
 	unsigned long	v;
-	struct uvh_gr0_tlb_mmr_read_data_hi_s {
-		unsigned long	pfn:41;				/* RO */
-		unsigned long	gaa:2;				/* RO */
-		unsigned long	dirty:1;			/* RO */
-		unsigned long	larger:1;			/* RO */
-		unsigned long	rsvd_45_63:19;
-	} s;
 	struct uv1h_gr0_tlb_mmr_read_data_hi_s {
 		unsigned long	pfn:41;				/* RO */
 		unsigned long	gaa:2;				/* RO */
@@ -819,13 +1126,6 @@
 		unsigned long	larger:1;			/* RO */
 		unsigned long	rsvd_45_63:19;
 	} s1;
-	struct uvxh_gr0_tlb_mmr_read_data_hi_s {
-		unsigned long	pfn:41;				/* RO */
-		unsigned long	gaa:2;				/* RO */
-		unsigned long	dirty:1;			/* RO */
-		unsigned long	larger:1;			/* RO */
-		unsigned long	rsvd_45_63:19;
-	} sx;
 	struct uv2h_gr0_tlb_mmr_read_data_hi_s {
 		unsigned long	pfn:41;				/* RO */
 		unsigned long	gaa:2;				/* RO */
@@ -842,6 +1142,16 @@
 		unsigned long	undef_46_54:9;			/* Undefined */
 		unsigned long	way_ecc:9;			/* RO */
 	} s3;
+	struct uv4h_gr0_tlb_mmr_read_data_hi_s {
+		unsigned long	pfn:34;				/* RO */
+		unsigned long	pnid:15;			/* RO */
+		unsigned long	gaa:2;				/* RO */
+		unsigned long	dirty:1;			/* RO */
+		unsigned long	larger:1;			/* RO */
+		unsigned long	aa_ext:1;			/* RO */
+		unsigned long	undef_54:1;			/* Undefined */
+		unsigned long	way_ecc:9;			/* RO */
+	} s4;
 };
 
 /* ========================================================================= */
@@ -850,10 +1160,12 @@
 #define UV1H_GR0_TLB_MMR_READ_DATA_LO 0x4010a8UL
 #define UV2H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL
 #define UV3H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL
-#define UVH_GR0_TLB_MMR_READ_DATA_LO					\
-		(is_uv1_hub() ? UV1H_GR0_TLB_MMR_READ_DATA_LO :		\
-		(is_uv2_hub() ? UV2H_GR0_TLB_MMR_READ_DATA_LO :		\
-				UV3H_GR0_TLB_MMR_READ_DATA_LO))
+#define UV4H_GR0_TLB_MMR_READ_DATA_LO 0x6010a8UL
+#define UVH_GR0_TLB_MMR_READ_DATA_LO (					\
+	is_uv1_hub() ? UV1H_GR0_TLB_MMR_READ_DATA_LO :			\
+	is_uv2_hub() ? UV2H_GR0_TLB_MMR_READ_DATA_LO :			\
+	is_uv3_hub() ? UV3H_GR0_TLB_MMR_READ_DATA_LO :			\
+	/*is_uv4_hub*/ UV4H_GR0_TLB_MMR_READ_DATA_LO)
 
 #define UVH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT		0
 #define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT		39
@@ -890,6 +1202,14 @@
 #define UV3H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK		0x7fffff8000000000UL
 #define UV3H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK	0x8000000000000000UL
 
+#define UV4H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT		0
+#define UV4H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT		39
+#define UV4H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT	63
+#define UV4H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK		0x0000007fffffffffUL
+#define UV4H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK		0x7fffff8000000000UL
+#define UV4H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK	0x8000000000000000UL
+
+
 union uvh_gr0_tlb_mmr_read_data_lo_u {
 	unsigned long	v;
 	struct uvh_gr0_tlb_mmr_read_data_lo_s {
@@ -917,12 +1237,25 @@
 		unsigned long	asid:24;			/* RO */
 		unsigned long	valid:1;			/* RO */
 	} s3;
+	struct uv4h_gr0_tlb_mmr_read_data_lo_s {
+		unsigned long	vpn:39;				/* RO */
+		unsigned long	asid:24;			/* RO */
+		unsigned long	valid:1;			/* RO */
+	} s4;
 };
 
 /* ========================================================================= */
 /*                         UVH_GR1_TLB_INT0_CONFIG                           */
 /* ========================================================================= */
-#define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL
+#define UV1H_GR1_TLB_INT0_CONFIG 0x61f00UL
+#define UV2H_GR1_TLB_INT0_CONFIG 0x61f00UL
+#define UV3H_GR1_TLB_INT0_CONFIG 0x61f00UL
+#define UV4H_GR1_TLB_INT0_CONFIG 0x62100UL
+#define UVH_GR1_TLB_INT0_CONFIG (					\
+	is_uv1_hub() ? UV1H_GR1_TLB_INT0_CONFIG :			\
+	is_uv2_hub() ? UV2H_GR1_TLB_INT0_CONFIG :			\
+	is_uv3_hub() ? UV3H_GR1_TLB_INT0_CONFIG :			\
+	/*is_uv4_hub*/ UV4H_GR1_TLB_INT0_CONFIG)
 
 #define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT		0
 #define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT			8
@@ -941,6 +1274,7 @@
 #define UVH_GR1_TLB_INT0_CONFIG_M_MASK			0x0000000000010000UL
 #define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_MASK		0xffffffff00000000UL
 
+
 union uvh_gr1_tlb_int0_config_u {
 	unsigned long	v;
 	struct uvh_gr1_tlb_int0_config_s {
@@ -960,7 +1294,15 @@
 /* ========================================================================= */
 /*                         UVH_GR1_TLB_INT1_CONFIG                           */
 /* ========================================================================= */
-#define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL
+#define UV1H_GR1_TLB_INT1_CONFIG 0x61f40UL
+#define UV2H_GR1_TLB_INT1_CONFIG 0x61f40UL
+#define UV3H_GR1_TLB_INT1_CONFIG 0x61f40UL
+#define UV4H_GR1_TLB_INT1_CONFIG 0x62140UL
+#define UVH_GR1_TLB_INT1_CONFIG (					\
+	is_uv1_hub() ? UV1H_GR1_TLB_INT1_CONFIG :			\
+	is_uv2_hub() ? UV2H_GR1_TLB_INT1_CONFIG :			\
+	is_uv3_hub() ? UV3H_GR1_TLB_INT1_CONFIG :			\
+	/*is_uv4_hub*/ UV4H_GR1_TLB_INT1_CONFIG)
 
 #define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT		0
 #define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT			8
@@ -979,6 +1321,7 @@
 #define UVH_GR1_TLB_INT1_CONFIG_M_MASK			0x0000000000010000UL
 #define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_MASK		0xffffffff00000000UL
 
+
 union uvh_gr1_tlb_int1_config_u {
 	unsigned long	v;
 	struct uvh_gr1_tlb_int1_config_s {
@@ -1001,19 +1344,18 @@
 #define UV1H_GR1_TLB_MMR_CONTROL 0x801080UL
 #define UV2H_GR1_TLB_MMR_CONTROL 0x1001080UL
 #define UV3H_GR1_TLB_MMR_CONTROL 0x1001080UL
-#define UVH_GR1_TLB_MMR_CONTROL						\
-		(is_uv1_hub() ? UV1H_GR1_TLB_MMR_CONTROL :		\
-		(is_uv2_hub() ? UV2H_GR1_TLB_MMR_CONTROL :		\
-				UV3H_GR1_TLB_MMR_CONTROL))
+#define UV4H_GR1_TLB_MMR_CONTROL 0x701080UL
+#define UVH_GR1_TLB_MMR_CONTROL (					\
+	is_uv1_hub() ? UV1H_GR1_TLB_MMR_CONTROL :			\
+	is_uv2_hub() ? UV2H_GR1_TLB_MMR_CONTROL :			\
+	is_uv3_hub() ? UV3H_GR1_TLB_MMR_CONTROL :			\
+	/*is_uv4_hub*/ UV4H_GR1_TLB_MMR_CONTROL)
 
 #define UVH_GR1_TLB_MMR_CONTROL_INDEX_SHFT		0
-#define UVH_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT		12
 #define UVH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT	16
 #define UVH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT	20
 #define UVH_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT		30
 #define UVH_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT		31
-#define UVH_GR1_TLB_MMR_CONTROL_INDEX_MASK		0x0000000000000fffUL
-#define UVH_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK		0x0000000000003000UL
 #define UVH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK	0x0000000000010000UL
 #define UVH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK	0x0000000000100000UL
 #define UVH_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK		0x0000000040000000UL
@@ -1043,14 +1385,11 @@
 #define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK	0x1000000000000000UL
 
 #define UVXH_GR1_TLB_MMR_CONTROL_INDEX_SHFT		0
-#define UVXH_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT		12
 #define UVXH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT	16
 #define UVXH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT	20
 #define UVXH_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT		30
 #define UVXH_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT		31
 #define UVXH_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT	32
-#define UVXH_GR1_TLB_MMR_CONTROL_INDEX_MASK		0x0000000000000fffUL
-#define UVXH_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK		0x0000000000003000UL
 #define UVXH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK	0x0000000000010000UL
 #define UVXH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK	0x0000000000100000UL
 #define UVXH_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK		0x0000000040000000UL
@@ -1093,12 +1432,30 @@
 #define UV3H_GR1_TLB_MMR_CONTROL_MMR_READ_MASK		0x0000000080000000UL
 #define UV3H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK	0x0000000100000000UL
 
+#define UV4H_GR1_TLB_MMR_CONTROL_INDEX_SHFT		0
+#define UV4H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT		13
+#define UV4H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT	16
+#define UV4H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT	20
+#define UV4H_GR1_TLB_MMR_CONTROL_ECC_SEL_SHFT		21
+#define UV4H_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT		30
+#define UV4H_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT		31
+#define UV4H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT	32
+#define UV4H_GR1_TLB_MMR_CONTROL_PAGE_SIZE_SHFT		59
+#define UV4H_GR1_TLB_MMR_CONTROL_INDEX_MASK		0x0000000000001fffUL
+#define UV4H_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK		0x0000000000006000UL
+#define UV4H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK	0x0000000000010000UL
+#define UV4H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK	0x0000000000100000UL
+#define UV4H_GR1_TLB_MMR_CONTROL_ECC_SEL_MASK		0x0000000000200000UL
+#define UV4H_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK		0x0000000040000000UL
+#define UV4H_GR1_TLB_MMR_CONTROL_MMR_READ_MASK		0x0000000080000000UL
+#define UV4H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK	0x0000000100000000UL
+#define UV4H_GR1_TLB_MMR_CONTROL_PAGE_SIZE_MASK		0xf800000000000000UL
+
+
 union uvh_gr1_tlb_mmr_control_u {
 	unsigned long	v;
 	struct uvh_gr1_tlb_mmr_control_s {
-		unsigned long	index:12;			/* RW */
-		unsigned long	mem_sel:2;			/* RW */
-		unsigned long	rsvd_14_15:2;
+		unsigned long	rsvd_0_15:16;
 		unsigned long	auto_valid_en:1;		/* RW */
 		unsigned long	rsvd_17_19:3;
 		unsigned long	mmr_hash_index_en:1;		/* RW */
@@ -1132,9 +1489,7 @@
 		unsigned long	rsvd_61_63:3;
 	} s1;
 	struct uvxh_gr1_tlb_mmr_control_s {
-		unsigned long	index:12;			/* RW */
-		unsigned long	mem_sel:2;			/* RW */
-		unsigned long	rsvd_14_15:2;
+		unsigned long	rsvd_0_15:16;
 		unsigned long	auto_valid_en:1;		/* RW */
 		unsigned long	rsvd_17_19:3;
 		unsigned long	mmr_hash_index_en:1;		/* RW */
@@ -1145,8 +1500,7 @@
 		unsigned long	rsvd_33_47:15;
 		unsigned long	rsvd_48:1;
 		unsigned long	rsvd_49_51:3;
-		unsigned long	rsvd_52:1;
-		unsigned long	rsvd_53_63:11;
+		unsigned long	rsvd_52_63:12;
 	} sx;
 	struct uv2h_gr1_tlb_mmr_control_s {
 		unsigned long	index:12;			/* RW */
@@ -1183,6 +1537,24 @@
 		unsigned long	undef_52:1;			/* Undefined */
 		unsigned long	rsvd_53_63:11;
 	} s3;
+	struct uv4h_gr1_tlb_mmr_control_s {
+		unsigned long	index:13;			/* RW */
+		unsigned long	mem_sel:2;			/* RW */
+		unsigned long	rsvd_15:1;
+		unsigned long	auto_valid_en:1;		/* RW */
+		unsigned long	rsvd_17_19:3;
+		unsigned long	mmr_hash_index_en:1;		/* RW */
+		unsigned long	ecc_sel:1;			/* RW */
+		unsigned long	rsvd_22_29:8;
+		unsigned long	mmr_write:1;			/* WP */
+		unsigned long	mmr_read:1;			/* WP */
+		unsigned long	mmr_op_done:1;			/* RW */
+		unsigned long	rsvd_33_47:15;
+		unsigned long	undef_48:1;			/* Undefined */
+		unsigned long	rsvd_49_51:3;
+		unsigned long	rsvd_52_58:7;
+		unsigned long	page_size:5;			/* RW */
+	} s4;
 };
 
 /* ========================================================================= */
@@ -1191,19 +1563,14 @@
 #define UV1H_GR1_TLB_MMR_READ_DATA_HI 0x8010a0UL
 #define UV2H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL
 #define UV3H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL
-#define UVH_GR1_TLB_MMR_READ_DATA_HI					\
-		(is_uv1_hub() ? UV1H_GR1_TLB_MMR_READ_DATA_HI :		\
-		(is_uv2_hub() ? UV2H_GR1_TLB_MMR_READ_DATA_HI :		\
-				UV3H_GR1_TLB_MMR_READ_DATA_HI))
+#define UV4H_GR1_TLB_MMR_READ_DATA_HI 0x7010a0UL
+#define UVH_GR1_TLB_MMR_READ_DATA_HI (					\
+	is_uv1_hub() ? UV1H_GR1_TLB_MMR_READ_DATA_HI :			\
+	is_uv2_hub() ? UV2H_GR1_TLB_MMR_READ_DATA_HI :			\
+	is_uv3_hub() ? UV3H_GR1_TLB_MMR_READ_DATA_HI :			\
+	/*is_uv4_hub*/ UV4H_GR1_TLB_MMR_READ_DATA_HI)
 
 #define UVH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT		0
-#define UVH_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT		41
-#define UVH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT		43
-#define UVH_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT	44
-#define UVH_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK		0x000001ffffffffffUL
-#define UVH_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK		0x0000060000000000UL
-#define UVH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK		0x0000080000000000UL
-#define UVH_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK	0x0000100000000000UL
 
 #define UV1H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT		0
 #define UV1H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT		41
@@ -1215,13 +1582,6 @@
 #define UV1H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK	0x0000100000000000UL
 
 #define UVXH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT		0
-#define UVXH_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT		41
-#define UVXH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT	43
-#define UVXH_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT	44
-#define UVXH_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK		0x000001ffffffffffUL
-#define UVXH_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK		0x0000060000000000UL
-#define UVXH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK	0x0000080000000000UL
-#define UVXH_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK	0x0000100000000000UL
 
 #define UV2H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT		0
 #define UV2H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT		41
@@ -1245,15 +1605,24 @@
 #define UV3H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_MASK	0x0000200000000000UL
 #define UV3H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK	0xff80000000000000UL
 
+#define UV4H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT		0
+#define UV4H_GR1_TLB_MMR_READ_DATA_HI_PNID_SHFT		34
+#define UV4H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT		49
+#define UV4H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT	51
+#define UV4H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT	52
+#define UV4H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT	53
+#define UV4H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT	55
+#define UV4H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK		0x00000003ffffffffUL
+#define UV4H_GR1_TLB_MMR_READ_DATA_HI_PNID_MASK		0x0001fffc00000000UL
+#define UV4H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK		0x0006000000000000UL
+#define UV4H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK	0x0008000000000000UL
+#define UV4H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK	0x0010000000000000UL
+#define UV4H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_MASK	0x0020000000000000UL
+#define UV4H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK	0xff80000000000000UL
+
+
 union uvh_gr1_tlb_mmr_read_data_hi_u {
 	unsigned long	v;
-	struct uvh_gr1_tlb_mmr_read_data_hi_s {
-		unsigned long	pfn:41;				/* RO */
-		unsigned long	gaa:2;				/* RO */
-		unsigned long	dirty:1;			/* RO */
-		unsigned long	larger:1;			/* RO */
-		unsigned long	rsvd_45_63:19;
-	} s;
 	struct uv1h_gr1_tlb_mmr_read_data_hi_s {
 		unsigned long	pfn:41;				/* RO */
 		unsigned long	gaa:2;				/* RO */
@@ -1261,13 +1630,6 @@
 		unsigned long	larger:1;			/* RO */
 		unsigned long	rsvd_45_63:19;
 	} s1;
-	struct uvxh_gr1_tlb_mmr_read_data_hi_s {
-		unsigned long	pfn:41;				/* RO */
-		unsigned long	gaa:2;				/* RO */
-		unsigned long	dirty:1;			/* RO */
-		unsigned long	larger:1;			/* RO */
-		unsigned long	rsvd_45_63:19;
-	} sx;
 	struct uv2h_gr1_tlb_mmr_read_data_hi_s {
 		unsigned long	pfn:41;				/* RO */
 		unsigned long	gaa:2;				/* RO */
@@ -1284,6 +1646,16 @@
 		unsigned long	undef_46_54:9;			/* Undefined */
 		unsigned long	way_ecc:9;			/* RO */
 	} s3;
+	struct uv4h_gr1_tlb_mmr_read_data_hi_s {
+		unsigned long	pfn:34;				/* RO */
+		unsigned long	pnid:15;			/* RO */
+		unsigned long	gaa:2;				/* RO */
+		unsigned long	dirty:1;			/* RO */
+		unsigned long	larger:1;			/* RO */
+		unsigned long	aa_ext:1;			/* RO */
+		unsigned long	undef_54:1;			/* Undefined */
+		unsigned long	way_ecc:9;			/* RO */
+	} s4;
 };
 
 /* ========================================================================= */
@@ -1292,10 +1664,12 @@
 #define UV1H_GR1_TLB_MMR_READ_DATA_LO 0x8010a8UL
 #define UV2H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL
 #define UV3H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL
-#define UVH_GR1_TLB_MMR_READ_DATA_LO					\
-		(is_uv1_hub() ? UV1H_GR1_TLB_MMR_READ_DATA_LO :		\
-		(is_uv2_hub() ? UV2H_GR1_TLB_MMR_READ_DATA_LO :		\
-				UV3H_GR1_TLB_MMR_READ_DATA_LO))
+#define UV4H_GR1_TLB_MMR_READ_DATA_LO 0x7010a8UL
+#define UVH_GR1_TLB_MMR_READ_DATA_LO (					\
+	is_uv1_hub() ? UV1H_GR1_TLB_MMR_READ_DATA_LO :			\
+	is_uv2_hub() ? UV2H_GR1_TLB_MMR_READ_DATA_LO :			\
+	is_uv3_hub() ? UV3H_GR1_TLB_MMR_READ_DATA_LO :			\
+	/*is_uv4_hub*/ UV4H_GR1_TLB_MMR_READ_DATA_LO)
 
 #define UVH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT		0
 #define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT		39
@@ -1332,6 +1706,14 @@
 #define UV3H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK		0x7fffff8000000000UL
 #define UV3H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK	0x8000000000000000UL
 
+#define UV4H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT		0
+#define UV4H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT		39
+#define UV4H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT	63
+#define UV4H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK		0x0000007fffffffffUL
+#define UV4H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK		0x7fffff8000000000UL
+#define UV4H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK	0x8000000000000000UL
+
+
 union uvh_gr1_tlb_mmr_read_data_lo_u {
 	unsigned long	v;
 	struct uvh_gr1_tlb_mmr_read_data_lo_s {
@@ -1359,6 +1741,11 @@
 		unsigned long	asid:24;			/* RO */
 		unsigned long	valid:1;			/* RO */
 	} s3;
+	struct uv4h_gr1_tlb_mmr_read_data_lo_s {
+		unsigned long	vpn:39;				/* RO */
+		unsigned long	asid:24;			/* RO */
+		unsigned long	valid:1;			/* RO */
+	} s4;
 };
 
 /* ========================================================================= */
@@ -1369,6 +1756,7 @@
 #define UVH_INT_CMPB_REAL_TIME_CMPB_SHFT		0
 #define UVH_INT_CMPB_REAL_TIME_CMPB_MASK		0x00ffffffffffffffUL
 
+
 union uvh_int_cmpb_u {
 	unsigned long	v;
 	struct uvh_int_cmpb_s {
@@ -1382,12 +1770,14 @@
 /* ========================================================================= */
 #define UVH_INT_CMPC 0x22100UL
 
+
 #define UV1H_INT_CMPC_REAL_TIME_CMPC_SHFT		0
 #define UV1H_INT_CMPC_REAL_TIME_CMPC_MASK		0x00ffffffffffffffUL
 
 #define UVXH_INT_CMPC_REAL_TIME_CMP_2_SHFT		0
 #define UVXH_INT_CMPC_REAL_TIME_CMP_2_MASK		0x00ffffffffffffffUL
 
+
 union uvh_int_cmpc_u {
 	unsigned long	v;
 	struct uvh_int_cmpc_s {
@@ -1401,12 +1791,14 @@
 /* ========================================================================= */
 #define UVH_INT_CMPD 0x22180UL
 
+
 #define UV1H_INT_CMPD_REAL_TIME_CMPD_SHFT		0
 #define UV1H_INT_CMPD_REAL_TIME_CMPD_MASK		0x00ffffffffffffffUL
 
 #define UVXH_INT_CMPD_REAL_TIME_CMP_3_SHFT		0
 #define UVXH_INT_CMPD_REAL_TIME_CMP_3_MASK		0x00ffffffffffffffUL
 
+
 union uvh_int_cmpd_u {
 	unsigned long	v;
 	struct uvh_int_cmpd_s {
@@ -1419,7 +1811,16 @@
 /*                               UVH_IPI_INT                                 */
 /* ========================================================================= */
 #define UVH_IPI_INT 0x60500UL
-#define UVH_IPI_INT_32 0x348
+
+#define UV1H_IPI_INT_32 0x348
+#define UV2H_IPI_INT_32 0x348
+#define UV3H_IPI_INT_32 0x348
+#define UV4H_IPI_INT_32 0x268
+#define UVH_IPI_INT_32 (						\
+	is_uv1_hub() ? UV1H_IPI_INT_32 :				\
+	is_uv2_hub() ? UV2H_IPI_INT_32 :				\
+	is_uv3_hub() ? UV3H_IPI_INT_32 :				\
+	/*is_uv4_hub*/ UV4H_IPI_INT_32)
 
 #define UVH_IPI_INT_VECTOR_SHFT				0
 #define UVH_IPI_INT_DELIVERY_MODE_SHFT			8
@@ -1432,6 +1833,7 @@
 #define UVH_IPI_INT_APIC_ID_MASK			0x0000ffffffff0000UL
 #define UVH_IPI_INT_SEND_MASK				0x8000000000000000UL
 
+
 union uvh_ipi_int_u {
 	unsigned long	v;
 	struct uvh_ipi_int_s {
@@ -1448,103 +1850,269 @@
 /* ========================================================================= */
 /*                   UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST                     */
 /* ========================================================================= */
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
+#define UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
+#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
+#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
+#define UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST uv_undefined("UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST")
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST (				\
+	is_uv1_hub() ? UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST :		\
+	is_uv2_hub() ? UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST :		\
+	is_uv3_hub() ? UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST :		\
+	/*is_uv4_hub*/ UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST)
 #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x9c0
 
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_MASK 0x7ffe000000000000UL
+
+#define UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4
+#define UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49
+#define UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL
+#define UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_MASK 0x7ffe000000000000UL
+
+
+#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4
+#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49
+#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL
+#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_MASK 0x7ffe000000000000UL
+
+#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4
+#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49
+#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL
+#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_MASK 0x7ffe000000000000UL
+
 
 union uvh_lb_bau_intd_payload_queue_first_u {
 	unsigned long	v;
-	struct uvh_lb_bau_intd_payload_queue_first_s {
+	struct uv1h_lb_bau_intd_payload_queue_first_s {
 		unsigned long	rsvd_0_3:4;
 		unsigned long	address:39;			/* RW */
 		unsigned long	rsvd_43_48:6;
 		unsigned long	node_id:14;			/* RW */
 		unsigned long	rsvd_63:1;
-	} s;
+	} s1;
+	struct uv2h_lb_bau_intd_payload_queue_first_s {
+		unsigned long	rsvd_0_3:4;
+		unsigned long	address:39;			/* RW */
+		unsigned long	rsvd_43_48:6;
+		unsigned long	node_id:14;			/* RW */
+		unsigned long	rsvd_63:1;
+	} s2;
+	struct uv3h_lb_bau_intd_payload_queue_first_s {
+		unsigned long	rsvd_0_3:4;
+		unsigned long	address:39;			/* RW */
+		unsigned long	rsvd_43_48:6;
+		unsigned long	node_id:14;			/* RW */
+		unsigned long	rsvd_63:1;
+	} s3;
 };
 
 /* ========================================================================= */
 /*                    UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST                     */
 /* ========================================================================= */
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
+#define UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
+#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
+#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
+#define UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST uv_undefined("UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST")
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST (				\
+	is_uv1_hub() ? UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST :		\
+	is_uv2_hub() ? UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST :		\
+	is_uv3_hub() ? UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST :		\
+	/*is_uv4_hub*/ UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST)
 #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x9c8
 
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT	4
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK	0x000007fffffffff0UL
+
+#define UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4
+#define UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL
+
+
+#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4
+#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL
+
+#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4
+#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL
+
 
 union uvh_lb_bau_intd_payload_queue_last_u {
 	unsigned long	v;
-	struct uvh_lb_bau_intd_payload_queue_last_s {
+	struct uv1h_lb_bau_intd_payload_queue_last_s {
 		unsigned long	rsvd_0_3:4;
 		unsigned long	address:39;			/* RW */
 		unsigned long	rsvd_43_63:21;
-	} s;
+	} s1;
+	struct uv2h_lb_bau_intd_payload_queue_last_s {
+		unsigned long	rsvd_0_3:4;
+		unsigned long	address:39;			/* RW */
+		unsigned long	rsvd_43_63:21;
+	} s2;
+	struct uv3h_lb_bau_intd_payload_queue_last_s {
+		unsigned long	rsvd_0_3:4;
+		unsigned long	address:39;			/* RW */
+		unsigned long	rsvd_43_63:21;
+	} s3;
 };
 
 /* ========================================================================= */
 /*                    UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL                     */
 /* ========================================================================= */
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
+#define UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
+#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
+#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
+#define UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL uv_undefined("UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL")
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL (				\
+	is_uv1_hub() ? UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL :		\
+	is_uv2_hub() ? UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL :		\
+	is_uv3_hub() ? UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL :		\
+	/*is_uv4_hub*/ UV4H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL)
 #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x9d0
 
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT	4
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK	0x000007fffffffff0UL
+
+#define UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4
+#define UV1H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL
+
+
+#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4
+#define UV2H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL
+
+#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4
+#define UV3H_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL
+
 
 union uvh_lb_bau_intd_payload_queue_tail_u {
 	unsigned long	v;
-	struct uvh_lb_bau_intd_payload_queue_tail_s {
+	struct uv1h_lb_bau_intd_payload_queue_tail_s {
 		unsigned long	rsvd_0_3:4;
 		unsigned long	address:39;			/* RW */
 		unsigned long	rsvd_43_63:21;
-	} s;
+	} s1;
+	struct uv2h_lb_bau_intd_payload_queue_tail_s {
+		unsigned long	rsvd_0_3:4;
+		unsigned long	address:39;			/* RW */
+		unsigned long	rsvd_43_63:21;
+	} s2;
+	struct uv3h_lb_bau_intd_payload_queue_tail_s {
+		unsigned long	rsvd_0_3:4;
+		unsigned long	address:39;			/* RW */
+		unsigned long	rsvd_43_63:21;
+	} s3;
 };
 
 /* ========================================================================= */
 /*                   UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE                    */
 /* ========================================================================= */
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
+#define UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE uv_undefined("UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE")
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE (				\
+	is_uv1_hub() ? UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE :		\
+	is_uv2_hub() ? UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE :		\
+	is_uv3_hub() ? UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE :		\
+	/*is_uv4_hub*/ UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE)
 #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0xa68
 
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_SHFT 2
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_SHFT 3
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_SHFT 4
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_SHFT 5
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_SHFT 6
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_SHFT 7
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_SHFT 8
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_SHFT 9
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_SHFT 10
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_SHFT 11
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_SHFT 12
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_SHFT 13
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_SHFT 14
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_SHFT 15
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_MASK 0x0000000000000002UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_MASK 0x0000000000000004UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_MASK 0x0000000000000008UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_MASK 0x0000000000000010UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_MASK 0x0000000000000020UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_MASK 0x0000000000000040UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_MASK 0x0000000000000080UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_MASK 0x0000000000000100UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_MASK 0x0000000000000200UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_MASK 0x0000000000000400UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_MASK 0x0000000000000800UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_MASK 0x0000000000001000UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_MASK 0x0000000000002000UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_MASK 0x0000000000004000UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_MASK 0x0000000000008000UL
+
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_SHFT 2
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_SHFT 3
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_SHFT 4
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_SHFT 5
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_SHFT 6
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_SHFT 7
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_SHFT 8
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_SHFT 9
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_SHFT 10
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_SHFT 11
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_SHFT 12
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_SHFT 13
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_SHFT 14
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_SHFT 15
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_MASK 0x0000000000000002UL
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_MASK 0x0000000000000004UL
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_MASK 0x0000000000000008UL
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_MASK 0x0000000000000010UL
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_MASK 0x0000000000000020UL
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_MASK 0x0000000000000040UL
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_MASK 0x0000000000000080UL
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_MASK 0x0000000000000100UL
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_MASK 0x0000000000000200UL
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_MASK 0x0000000000000400UL
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_MASK 0x0000000000000800UL
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_MASK 0x0000000000001000UL
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_MASK 0x0000000000002000UL
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_MASK 0x0000000000004000UL
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_MASK 0x0000000000008000UL
+
+
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_SHFT 2
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_SHFT 3
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_SHFT 4
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_SHFT 5
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_SHFT 6
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_SHFT 7
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_SHFT 8
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_SHFT 9
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_SHFT 10
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_SHFT 11
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_SHFT 12
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_SHFT 13
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_SHFT 14
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_SHFT 15
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_MASK 0x0000000000000002UL
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_MASK 0x0000000000000004UL
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_MASK 0x0000000000000008UL
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_MASK 0x0000000000000010UL
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_MASK 0x0000000000000020UL
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_MASK 0x0000000000000040UL
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_MASK 0x0000000000000080UL
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_MASK 0x0000000000000100UL
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_MASK 0x0000000000000200UL
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_MASK 0x0000000000000400UL
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_MASK 0x0000000000000800UL
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_MASK 0x0000000000001000UL
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_MASK 0x0000000000002000UL
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_MASK 0x0000000000004000UL
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_MASK 0x0000000000008000UL
+
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_SHFT 2
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_SHFT 3
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_SHFT 4
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_SHFT 5
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_SHFT 6
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_SHFT 7
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_SHFT 8
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_SHFT 9
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_SHFT 10
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_SHFT 11
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_SHFT 12
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_SHFT 13
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_SHFT 14
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_SHFT 15
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_MASK 0x0000000000000002UL
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_MASK 0x0000000000000004UL
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_MASK 0x0000000000000008UL
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_MASK 0x0000000000000010UL
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_MASK 0x0000000000000020UL
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_MASK 0x0000000000000040UL
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_MASK 0x0000000000000080UL
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_MASK 0x0000000000000100UL
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_MASK 0x0000000000000200UL
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_MASK 0x0000000000000400UL
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_MASK 0x0000000000000800UL
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_MASK 0x0000000000001000UL
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_MASK 0x0000000000002000UL
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_MASK 0x0000000000004000UL
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_MASK 0x0000000000008000UL
+
 
 union uvh_lb_bau_intd_software_acknowledge_u {
 	unsigned long	v;
-	struct uvh_lb_bau_intd_software_acknowledge_s {
+	struct uv1h_lb_bau_intd_software_acknowledge_s {
 		unsigned long	pending_0:1;			/* RW, W1C */
 		unsigned long	pending_1:1;			/* RW, W1C */
 		unsigned long	pending_2:1;			/* RW, W1C */
@@ -1562,27 +2130,84 @@
 		unsigned long	timeout_6:1;			/* RW, W1C */
 		unsigned long	timeout_7:1;			/* RW, W1C */
 		unsigned long	rsvd_16_63:48;
-	} s;
+	} s1;
+	struct uv2h_lb_bau_intd_software_acknowledge_s {
+		unsigned long	pending_0:1;			/* RW */
+		unsigned long	pending_1:1;			/* RW */
+		unsigned long	pending_2:1;			/* RW */
+		unsigned long	pending_3:1;			/* RW */
+		unsigned long	pending_4:1;			/* RW */
+		unsigned long	pending_5:1;			/* RW */
+		unsigned long	pending_6:1;			/* RW */
+		unsigned long	pending_7:1;			/* RW */
+		unsigned long	timeout_0:1;			/* RW */
+		unsigned long	timeout_1:1;			/* RW */
+		unsigned long	timeout_2:1;			/* RW */
+		unsigned long	timeout_3:1;			/* RW */
+		unsigned long	timeout_4:1;			/* RW */
+		unsigned long	timeout_5:1;			/* RW */
+		unsigned long	timeout_6:1;			/* RW */
+		unsigned long	timeout_7:1;			/* RW */
+		unsigned long	rsvd_16_63:48;
+	} s2;
+	struct uv3h_lb_bau_intd_software_acknowledge_s {
+		unsigned long	pending_0:1;			/* RW */
+		unsigned long	pending_1:1;			/* RW */
+		unsigned long	pending_2:1;			/* RW */
+		unsigned long	pending_3:1;			/* RW */
+		unsigned long	pending_4:1;			/* RW */
+		unsigned long	pending_5:1;			/* RW */
+		unsigned long	pending_6:1;			/* RW */
+		unsigned long	pending_7:1;			/* RW */
+		unsigned long	timeout_0:1;			/* RW */
+		unsigned long	timeout_1:1;			/* RW */
+		unsigned long	timeout_2:1;			/* RW */
+		unsigned long	timeout_3:1;			/* RW */
+		unsigned long	timeout_4:1;			/* RW */
+		unsigned long	timeout_5:1;			/* RW */
+		unsigned long	timeout_6:1;			/* RW */
+		unsigned long	timeout_7:1;			/* RW */
+		unsigned long	rsvd_16_63:48;
+	} s3;
 };
 
 /* ========================================================================= */
 /*                UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS                 */
 /* ========================================================================= */
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x320088UL
+#define UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x320088UL
+#define UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x320088UL
+#define UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x320088UL
+#define UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS uv_undefined("UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS")
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS (			\
+	is_uv1_hub() ? UV1H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS :	\
+	is_uv2_hub() ? UV2H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS :	\
+	is_uv3_hub() ? UV3H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS :	\
+	/*is_uv4_hub*/ UV4H_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS)
 #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0xa70
 
 
 /* ========================================================================= */
 /*                         UVH_LB_BAU_MISC_CONTROL                           */
 /* ========================================================================= */
-#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
 #define UV1H_LB_BAU_MISC_CONTROL 0x320170UL
 #define UV2H_LB_BAU_MISC_CONTROL 0x320170UL
 #define UV3H_LB_BAU_MISC_CONTROL 0x320170UL
-#define UVH_LB_BAU_MISC_CONTROL_32 0xa10
-#define UV1H_LB_BAU_MISC_CONTROL_32 0x320170UL
-#define UV2H_LB_BAU_MISC_CONTROL_32 0x320170UL
-#define UV3H_LB_BAU_MISC_CONTROL_32 0x320170UL
+#define UV4H_LB_BAU_MISC_CONTROL 0xc8170UL
+#define UVH_LB_BAU_MISC_CONTROL (					\
+	is_uv1_hub() ? UV1H_LB_BAU_MISC_CONTROL :			\
+	is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL :			\
+	is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL :			\
+	/*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL)
+
+#define UV1H_LB_BAU_MISC_CONTROL_32 0xa10
+#define UV2H_LB_BAU_MISC_CONTROL_32 0xa10
+#define UV3H_LB_BAU_MISC_CONTROL_32 0xa10
+#define UV4H_LB_BAU_MISC_CONTROL_32 0xa18
+#define UVH_LB_BAU_MISC_CONTROL_32 (					\
+	is_uv1_hub() ? UV1H_LB_BAU_MISC_CONTROL_32 :			\
+	is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL_32 :			\
+	is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL_32 :			\
+	/*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL_32)
 
 #define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT	0
 #define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT		8
@@ -1590,8 +2215,6 @@
 #define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT	10
 #define UVH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
 #define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
-#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
-#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
 #define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
 #define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
 #define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
@@ -1606,8 +2229,6 @@
 #define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK	0x0000000000000400UL
 #define UVH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
 #define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
-#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
-#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
 #define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
 #define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
 #define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
@@ -1656,8 +2277,6 @@
 #define UVXH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT	10
 #define UVXH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
 #define UVXH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
-#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
-#define UVXH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
 #define UVXH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
 #define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
 #define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
@@ -1679,8 +2298,6 @@
 #define UVXH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK	0x0000000000000400UL
 #define UVXH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
 #define UVXH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
-#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
-#define UVXH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
 #define UVXH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
 #define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
 #define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
@@ -1797,6 +2414,88 @@
 #define UV3H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_MASK 0x00003fc000000000UL
 #define UV3H_LB_BAU_MISC_CONTROL_FUN_MASK		0xffff000000000000UL
 
+#define UV4H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT	0
+#define UV4H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT		8
+#define UV4H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT	9
+#define UV4H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT	10
+#define UV4H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
+#define UV4H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
+#define UV4H_LB_BAU_MISC_CONTROL_RESERVED_15_19_SHFT	15
+#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
+#define UV4H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
+#define UV4H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
+#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
+#define UV4H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
+#define UV4H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
+#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
+#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
+#define UV4H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT	30
+#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
+#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
+#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
+#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
+#define UV4H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
+#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_SHFT 36
+#define UV4H_LB_BAU_MISC_CONTROL_RESERVED_37_SHFT	37
+#define UV4H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_SHFT 38
+#define UV4H_LB_BAU_MISC_CONTROL_ADDRESS_INTERLEAVE_SELECT_SHFT 46
+#define UV4H_LB_BAU_MISC_CONTROL_FUN_SHFT		48
+#define UV4H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK	0x00000000000000ffUL
+#define UV4H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK		0x0000000000000100UL
+#define UV4H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK	0x0000000000000200UL
+#define UV4H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK	0x0000000000000400UL
+#define UV4H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
+#define UV4H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
+#define UV4H_LB_BAU_MISC_CONTROL_RESERVED_15_19_MASK	0x00000000000f8000UL
+#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
+#define UV4H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
+#define UV4H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
+#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
+#define UV4H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
+#define UV4H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
+#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
+#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
+#define UV4H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK	0x0000000040000000UL
+#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
+#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
+#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
+#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
+#define UV4H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
+#define UV4H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_MASK 0x0000001000000000UL
+#define UV4H_LB_BAU_MISC_CONTROL_RESERVED_37_MASK	0x0000002000000000UL
+#define UV4H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_MASK 0x00003fc000000000UL
+#define UV4H_LB_BAU_MISC_CONTROL_ADDRESS_INTERLEAVE_SELECT_MASK 0x0000400000000000UL
+#define UV4H_LB_BAU_MISC_CONTROL_FUN_MASK		0xffff000000000000UL
+
+#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK	\
+	uv_undefined("UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK")
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK (	\
+	is_uv1_hub() ? UV1H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK : \
+	is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK : \
+	is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK : \
+	/*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK)
+#define UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT	\
+	uv_undefined("UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT")
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT (	\
+	is_uv1_hub() ? UV1H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT : \
+	is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT : \
+	is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT : \
+	/*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT)
+#define UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK	\
+	uv_undefined("UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK")
+#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK (	\
+	is_uv1_hub() ? UV1H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK : \
+	is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK : \
+	is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK : \
+	/*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK)
+#define UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT	\
+	uv_undefined("UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT")
+#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT (	\
+	is_uv1_hub() ? UV1H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT : \
+	is_uv2_hub() ? UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT : \
+	is_uv3_hub() ? UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT : \
+	/*is_uv4_hub*/ UV4H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT)
+
 union uvh_lb_bau_misc_control_u {
 	unsigned long	v;
 	struct uvh_lb_bau_misc_control_s {
@@ -1806,8 +2505,7 @@
 		unsigned long	force_lock_nop:1;		/* RW */
 		unsigned long	qpi_agent_presence_vector:3;	/* RW */
 		unsigned long	descriptor_fetch_mode:1;	/* RW */
-		unsigned long	enable_intd_soft_ack_mode:1;	/* RW */
-		unsigned long	intd_soft_ack_timeout_period:4;	/* RW */
+		unsigned long	rsvd_15_19:5;
 		unsigned long	enable_dual_mapping_mode:1;	/* RW */
 		unsigned long	vga_io_port_decode_enable:1;	/* RW */
 		unsigned long	vga_io_port_16_bit_decode:1;	/* RW */
@@ -1844,8 +2542,7 @@
 		unsigned long	force_lock_nop:1;		/* RW */
 		unsigned long	qpi_agent_presence_vector:3;	/* RW */
 		unsigned long	descriptor_fetch_mode:1;	/* RW */
-		unsigned long	enable_intd_soft_ack_mode:1;	/* RW */
-		unsigned long	intd_soft_ack_timeout_period:4;	/* RW */
+		unsigned long	rsvd_15_19:5;
 		unsigned long	enable_dual_mapping_mode:1;	/* RW */
 		unsigned long	vga_io_port_decode_enable:1;	/* RW */
 		unsigned long	vga_io_port_16_bit_decode:1;	/* RW */
@@ -1918,13 +2615,59 @@
 		unsigned long	rsvd_46_47:2;
 		unsigned long	fun:16;				/* RW */
 	} s3;
+	struct uv4h_lb_bau_misc_control_s {
+		unsigned long	rejection_delay:8;		/* RW */
+		unsigned long	apic_mode:1;			/* RW */
+		unsigned long	force_broadcast:1;		/* RW */
+		unsigned long	force_lock_nop:1;		/* RW */
+		unsigned long	qpi_agent_presence_vector:3;	/* RW */
+		unsigned long	descriptor_fetch_mode:1;	/* RW */
+		unsigned long	rsvd_15_19:5;
+		unsigned long	enable_dual_mapping_mode:1;	/* RW */
+		unsigned long	vga_io_port_decode_enable:1;	/* RW */
+		unsigned long	vga_io_port_16_bit_decode:1;	/* RW */
+		unsigned long	suppress_dest_registration:1;	/* RW */
+		unsigned long	programmed_initial_priority:3;	/* RW */
+		unsigned long	use_incoming_priority:1;	/* RW */
+		unsigned long	enable_programmed_initial_priority:1;/* RW */
+		unsigned long	enable_automatic_apic_mode_selection:1;/* RW */
+		unsigned long	apic_mode_status:1;		/* RO */
+		unsigned long	suppress_interrupts_to_self:1;	/* RW */
+		unsigned long	enable_lock_based_system_flush:1;/* RW */
+		unsigned long	enable_extended_sb_status:1;	/* RW */
+		unsigned long	suppress_int_prio_udt_to_self:1;/* RW */
+		unsigned long	use_legacy_descriptor_formats:1;/* RW */
+		unsigned long	suppress_quiesce_msgs_to_qpi:1;	/* RW */
+		unsigned long	rsvd_37:1;
+		unsigned long	thread_kill_timebase:8;		/* RW */
+		unsigned long	address_interleave_select:1;	/* RW */
+		unsigned long	rsvd_47:1;
+		unsigned long	fun:16;				/* RW */
+	} s4;
 };
 
 /* ========================================================================= */
 /*                     UVH_LB_BAU_SB_ACTIVATION_CONTROL                      */
 /* ========================================================================= */
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8
+#define UV1H_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
+#define UV2H_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
+#define UV3H_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
+#define UV4H_LB_BAU_SB_ACTIVATION_CONTROL 0xc8020UL
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL (				\
+	is_uv1_hub() ? UV1H_LB_BAU_SB_ACTIVATION_CONTROL :		\
+	is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_CONTROL :		\
+	is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_CONTROL :		\
+	/*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_CONTROL)
+
+#define UV1H_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8
+#define UV2H_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8
+#define UV3H_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8
+#define UV4H_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9c8
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 (				\
+	is_uv1_hub() ? UV1H_LB_BAU_SB_ACTIVATION_CONTROL_32 :		\
+	is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_CONTROL_32 :		\
+	is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_CONTROL_32 :		\
+	/*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_CONTROL_32)
 
 #define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT	0
 #define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT	62
@@ -1933,6 +2676,7 @@
 #define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_MASK	0x4000000000000000UL
 #define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_MASK	0x8000000000000000UL
 
+
 union uvh_lb_bau_sb_activation_control_u {
 	unsigned long	v;
 	struct uvh_lb_bau_sb_activation_control_s {
@@ -1946,12 +2690,30 @@
 /* ========================================================================= */
 /*                    UVH_LB_BAU_SB_ACTIVATION_STATUS_0                      */
 /* ========================================================================= */
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0
+#define UV1H_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
+#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
+#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
+#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_0 0xc8030UL
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 (				\
+	is_uv1_hub() ? UV1H_LB_BAU_SB_ACTIVATION_STATUS_0 :		\
+	is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_0 :		\
+	is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_0 :		\
+	/*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_0)
+
+#define UV1H_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0
+#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0
+#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0
+#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9d0
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 (				\
+	is_uv1_hub() ? UV1H_LB_BAU_SB_ACTIVATION_STATUS_0_32 :		\
+	is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_0_32 :		\
+	is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_0_32 :		\
+	/*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_0_32)
 
 #define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT	0
 #define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK	0xffffffffffffffffUL
 
+
 union uvh_lb_bau_sb_activation_status_0_u {
 	unsigned long	v;
 	struct uvh_lb_bau_sb_activation_status_0_s {
@@ -1962,12 +2724,30 @@
 /* ========================================================================= */
 /*                    UVH_LB_BAU_SB_ACTIVATION_STATUS_1                      */
 /* ========================================================================= */
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8
+#define UV1H_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
+#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
+#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
+#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_1 0xc8040UL
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 (				\
+	is_uv1_hub() ? UV1H_LB_BAU_SB_ACTIVATION_STATUS_1 :		\
+	is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_1 :		\
+	is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_1 :		\
+	/*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_1)
+
+#define UV1H_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8
+#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8
+#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8
+#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9d8
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 (				\
+	is_uv1_hub() ? UV1H_LB_BAU_SB_ACTIVATION_STATUS_1_32 :		\
+	is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_1_32 :		\
+	is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_1_32 :		\
+	/*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_1_32)
 
 #define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT	0
 #define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK	0xffffffffffffffffUL
 
+
 union uvh_lb_bau_sb_activation_status_1_u {
 	unsigned long	v;
 	struct uvh_lb_bau_sb_activation_status_1_s {
@@ -1978,23 +2758,55 @@
 /* ========================================================================= */
 /*                      UVH_LB_BAU_SB_DESCRIPTOR_BASE                        */
 /* ========================================================================= */
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0
+#define UV1H_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
+#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
+#define UV3H_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
+#define UV4H_LB_BAU_SB_DESCRIPTOR_BASE 0xc8010UL
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE (					\
+	is_uv1_hub() ? UV1H_LB_BAU_SB_DESCRIPTOR_BASE :			\
+	is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_BASE :			\
+	is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_BASE :			\
+	/*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_BASE)
+
+#define UV1H_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0
+#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0
+#define UV3H_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0
+#define UV4H_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9c0
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 (				\
+	is_uv1_hub() ? UV1H_LB_BAU_SB_DESCRIPTOR_BASE_32 :		\
+	is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_BASE_32 :		\
+	is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_BASE_32 :		\
+	/*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_BASE_32)
 
 #define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT	12
 #define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT	49
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK	0x000007fffffff000UL
 #define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK	0x7ffe000000000000UL
 
+#define UV1H_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL
+
+
+#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL
+
+#define UV3H_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL
+
+#define UV4H_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x00003ffffffff000UL
+
+
 union uvh_lb_bau_sb_descriptor_base_u {
 	unsigned long	v;
 	struct uvh_lb_bau_sb_descriptor_base_s {
 		unsigned long	rsvd_0_11:12;
-		unsigned long	page_address:31;		/* RW */
-		unsigned long	rsvd_43_48:6;
+		unsigned long	rsvd_12_48:37;
 		unsigned long	node_id:14;			/* RW */
 		unsigned long	rsvd_63:1;
 	} s;
+	struct uv4h_lb_bau_sb_descriptor_base_s {
+		unsigned long	rsvd_0_11:12;
+		unsigned long	page_address:34;		/* RW */
+		unsigned long	rsvd_46_48:3;
+		unsigned long	node_id:14;			/* RW */
+		unsigned long	rsvd_63:1;
+	} s4;
 };
 
 /* ========================================================================= */
@@ -2004,6 +2816,7 @@
 #define UV1H_NODE_ID 0x0UL
 #define UV2H_NODE_ID 0x0UL
 #define UV3H_NODE_ID 0x0UL
+#define UV4H_NODE_ID 0x0UL
 
 #define UVH_NODE_ID_FORCE1_SHFT				0
 #define UVH_NODE_ID_MANUFACTURER_SHFT			1
@@ -2080,6 +2893,26 @@
 #define UV3H_NODE_ID_NODES_PER_BIT_MASK			0x01fc000000000000UL
 #define UV3H_NODE_ID_NI_PORT_MASK			0x3e00000000000000UL
 
+#define UV4H_NODE_ID_FORCE1_SHFT			0
+#define UV4H_NODE_ID_MANUFACTURER_SHFT			1
+#define UV4H_NODE_ID_PART_NUMBER_SHFT			12
+#define UV4H_NODE_ID_REVISION_SHFT			28
+#define UV4H_NODE_ID_NODE_ID_SHFT			32
+#define UV4H_NODE_ID_ROUTER_SELECT_SHFT			48
+#define UV4H_NODE_ID_RESERVED_2_SHFT			49
+#define UV4H_NODE_ID_NODES_PER_BIT_SHFT			50
+#define UV4H_NODE_ID_NI_PORT_SHFT			57
+#define UV4H_NODE_ID_FORCE1_MASK			0x0000000000000001UL
+#define UV4H_NODE_ID_MANUFACTURER_MASK			0x0000000000000ffeUL
+#define UV4H_NODE_ID_PART_NUMBER_MASK			0x000000000ffff000UL
+#define UV4H_NODE_ID_REVISION_MASK			0x00000000f0000000UL
+#define UV4H_NODE_ID_NODE_ID_MASK			0x00007fff00000000UL
+#define UV4H_NODE_ID_ROUTER_SELECT_MASK			0x0001000000000000UL
+#define UV4H_NODE_ID_RESERVED_2_MASK			0x0002000000000000UL
+#define UV4H_NODE_ID_NODES_PER_BIT_MASK			0x01fc000000000000UL
+#define UV4H_NODE_ID_NI_PORT_MASK			0x3e00000000000000UL
+
+
 union uvh_node_id_u {
 	unsigned long	v;
 	struct uvh_node_id_s {
@@ -2137,17 +2970,40 @@
 		unsigned long	ni_port:5;			/* RO */
 		unsigned long	rsvd_62_63:2;
 	} s3;
+	struct uv4h_node_id_s {
+		unsigned long	force1:1;			/* RO */
+		unsigned long	manufacturer:11;		/* RO */
+		unsigned long	part_number:16;			/* RO */
+		unsigned long	revision:4;			/* RO */
+		unsigned long	node_id:15;			/* RW */
+		unsigned long	rsvd_47:1;
+		unsigned long	router_select:1;		/* RO */
+		unsigned long	rsvd_49:1;
+		unsigned long	nodes_per_bit:7;		/* RO */
+		unsigned long	ni_port:5;			/* RO */
+		unsigned long	rsvd_62_63:2;
+	} s4;
 };
 
 /* ========================================================================= */
 /*                          UVH_NODE_PRESENT_TABLE                           */
 /* ========================================================================= */
 #define UVH_NODE_PRESENT_TABLE 0x1400UL
-#define UVH_NODE_PRESENT_TABLE_DEPTH 16
+
+#define UV1H_NODE_PRESENT_TABLE_DEPTH 16
+#define UV2H_NODE_PRESENT_TABLE_DEPTH 16
+#define UV3H_NODE_PRESENT_TABLE_DEPTH 16
+#define UV4H_NODE_PRESENT_TABLE_DEPTH 4
+#define UVH_NODE_PRESENT_TABLE_DEPTH (					\
+	is_uv1_hub() ? UV1H_NODE_PRESENT_TABLE_DEPTH :			\
+	is_uv2_hub() ? UV2H_NODE_PRESENT_TABLE_DEPTH :			\
+	is_uv3_hub() ? UV3H_NODE_PRESENT_TABLE_DEPTH :			\
+	/*is_uv4_hub*/ UV4H_NODE_PRESENT_TABLE_DEPTH)
 
 #define UVH_NODE_PRESENT_TABLE_NODES_SHFT		0
 #define UVH_NODE_PRESENT_TABLE_NODES_MASK		0xffffffffffffffffUL
 
+
 union uvh_node_present_table_u {
 	unsigned long	v;
 	struct uvh_node_present_table_s {
@@ -2158,7 +3014,15 @@
 /* ========================================================================= */
 /*                 UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR                  */
 /* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x4800c8UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR (			\
+	is_uv1_hub() ? UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR :	\
+	is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR :	\
+	is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR :	\
+	/*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR)
 
 #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
 #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
@@ -2167,6 +3031,7 @@
 #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
 #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
 
+
 union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
 	unsigned long	v;
 	struct uvh_rh_gam_alias210_overlay_config_0_mmr_s {
@@ -2182,7 +3047,15 @@
 /* ========================================================================= */
 /*                 UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR                  */
 /* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x4800d8UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR (			\
+	is_uv1_hub() ? UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR :	\
+	is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR :	\
+	is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR :	\
+	/*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR)
 
 #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
 #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
@@ -2191,6 +3064,7 @@
 #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
 #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
 
+
 union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
 	unsigned long	v;
 	struct uvh_rh_gam_alias210_overlay_config_1_mmr_s {
@@ -2206,7 +3080,15 @@
 /* ========================================================================= */
 /*                 UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR                  */
 /* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x4800e8UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR (			\
+	is_uv1_hub() ? UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR :	\
+	is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR :	\
+	is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR :	\
+	/*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR)
 
 #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
 #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
@@ -2215,6 +3097,7 @@
 #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
 #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
 
+
 union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
 	unsigned long	v;
 	struct uvh_rh_gam_alias210_overlay_config_2_mmr_s {
@@ -2230,11 +3113,20 @@
 /* ========================================================================= */
 /*                UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR                  */
 /* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
+#define UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
+#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
+#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
+#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x4800d0UL
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR (			\
+	is_uv1_hub() ? UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR :	\
+	is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR :	\
+	is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR :	\
+	/*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR)
 
 #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
 #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
 
+
 union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
 	unsigned long	v;
 	struct uvh_rh_gam_alias210_redirect_config_0_mmr_s {
@@ -2247,11 +3139,20 @@
 /* ========================================================================= */
 /*                UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR                  */
 /* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
+#define UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
+#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
+#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
+#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x4800e0UL
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR (			\
+	is_uv1_hub() ? UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR :	\
+	is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR :	\
+	is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR :	\
+	/*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR)
 
 #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
 #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
 
+
 union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
 	unsigned long	v;
 	struct uvh_rh_gam_alias210_redirect_config_1_mmr_s {
@@ -2264,11 +3165,20 @@
 /* ========================================================================= */
 /*                UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR                  */
 /* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
+#define UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
+#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
+#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
+#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x4800f0UL
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR (			\
+	is_uv1_hub() ? UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR :	\
+	is_uv2_hub() ? UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR :	\
+	is_uv3_hub() ? UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR :	\
+	/*is_uv4_hub*/ UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR)
 
 #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
 #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
 
+
 union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
 	unsigned long	v;
 	struct uvh_rh_gam_alias210_redirect_config_2_mmr_s {
@@ -2281,14 +3191,17 @@
 /* ========================================================================= */
 /*                          UVH_RH_GAM_CONFIG_MMR                            */
 /* ========================================================================= */
-#define UVH_RH_GAM_CONFIG_MMR 0x1600000UL
 #define UV1H_RH_GAM_CONFIG_MMR 0x1600000UL
 #define UV2H_RH_GAM_CONFIG_MMR 0x1600000UL
 #define UV3H_RH_GAM_CONFIG_MMR 0x1600000UL
+#define UV4H_RH_GAM_CONFIG_MMR 0x480000UL
+#define UVH_RH_GAM_CONFIG_MMR (						\
+	is_uv1_hub() ? UV1H_RH_GAM_CONFIG_MMR :				\
+	is_uv2_hub() ? UV2H_RH_GAM_CONFIG_MMR :				\
+	is_uv3_hub() ? UV3H_RH_GAM_CONFIG_MMR :				\
+	/*is_uv4_hub*/ UV4H_RH_GAM_CONFIG_MMR)
 
-#define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT		0
 #define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT		6
-#define UVH_RH_GAM_CONFIG_MMR_M_SKT_MASK		0x000000000000003fUL
 #define UVH_RH_GAM_CONFIG_MMR_N_SKT_MASK		0x00000000000003c0UL
 
 #define UV1H_RH_GAM_CONFIG_MMR_M_SKT_SHFT		0
@@ -2298,9 +3211,7 @@
 #define UV1H_RH_GAM_CONFIG_MMR_N_SKT_MASK		0x00000000000003c0UL
 #define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK		0x0000000000001000UL
 
-#define UVXH_RH_GAM_CONFIG_MMR_M_SKT_SHFT		0
 #define UVXH_RH_GAM_CONFIG_MMR_N_SKT_SHFT		6
-#define UVXH_RH_GAM_CONFIG_MMR_M_SKT_MASK		0x000000000000003fUL
 #define UVXH_RH_GAM_CONFIG_MMR_N_SKT_MASK		0x00000000000003c0UL
 
 #define UV2H_RH_GAM_CONFIG_MMR_M_SKT_SHFT		0
@@ -2313,10 +3224,14 @@
 #define UV3H_RH_GAM_CONFIG_MMR_M_SKT_MASK		0x000000000000003fUL
 #define UV3H_RH_GAM_CONFIG_MMR_N_SKT_MASK		0x00000000000003c0UL
 
+#define UV4H_RH_GAM_CONFIG_MMR_N_SKT_SHFT		6
+#define UV4H_RH_GAM_CONFIG_MMR_N_SKT_MASK		0x00000000000003c0UL
+
+
 union uvh_rh_gam_config_mmr_u {
 	unsigned long	v;
 	struct uvh_rh_gam_config_mmr_s {
-		unsigned long	m_skt:6;			/* RW */
+		unsigned long	rsvd_0_5:6;
 		unsigned long	n_skt:4;			/* RW */
 		unsigned long	rsvd_10_63:54;
 	} s;
@@ -2328,7 +3243,7 @@
 		unsigned long	rsvd_13_63:51;
 	} s1;
 	struct uvxh_rh_gam_config_mmr_s {
-		unsigned long	m_skt:6;			/* RW */
+		unsigned long	rsvd_0_5:6;
 		unsigned long	n_skt:4;			/* RW */
 		unsigned long	rsvd_10_63:54;
 	} sx;
@@ -2342,20 +3257,28 @@
 		unsigned long	n_skt:4;			/* RW */
 		unsigned long	rsvd_10_63:54;
 	} s3;
+	struct uv4h_rh_gam_config_mmr_s {
+		unsigned long	rsvd_0_5:6;
+		unsigned long	n_skt:4;			/* RW */
+		unsigned long	rsvd_10_63:54;
+	} s4;
 };
 
 /* ========================================================================= */
 /*                    UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR                      */
 /* ========================================================================= */
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
 #define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
 #define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
 #define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
+#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x480010UL
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR (				\
+	is_uv1_hub() ? UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR :		\
+	is_uv2_hub() ? UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR :		\
+	is_uv3_hub() ? UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR :		\
+	/*is_uv4_hub*/ UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR)
 
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT	28
 #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT	52
 #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT	63
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK	0x00003ffff0000000UL
 #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK	0x00f0000000000000UL
 #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK	0x8000000000000000UL
 
@@ -2368,10 +3291,8 @@
 #define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK	0x00f0000000000000UL
 #define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK	0x8000000000000000UL
 
-#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT	28
 #define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT	52
 #define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT	63
-#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK	0x00003ffff0000000UL
 #define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK	0x00f0000000000000UL
 #define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK	0x8000000000000000UL
 
@@ -2391,12 +3312,28 @@
 #define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_MODE_MASK	0x4000000000000000UL
 #define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK	0x8000000000000000UL
 
+#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT	26
+#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT	52
+#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT	63
+#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK	0x00003ffffc000000UL
+#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK	0x00f0000000000000UL
+#define UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK	0x8000000000000000UL
+
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK (			\
+	is_uv1_hub() ? UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK :	\
+	is_uv2_hub() ? UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK :	\
+	is_uv3_hub() ? UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK :	\
+	/*is_uv4_hub*/ UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK)
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT (			\
+	is_uv1_hub() ? UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT :	\
+	is_uv2_hub() ? UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT :	\
+	is_uv3_hub() ? UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT :	\
+	/*is_uv4_hub*/ UV4H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT)
+
 union uvh_rh_gam_gru_overlay_config_mmr_u {
 	unsigned long	v;
 	struct uvh_rh_gam_gru_overlay_config_mmr_s {
-		unsigned long	rsvd_0_27:28;
-		unsigned long	base:18;			/* RW */
-		unsigned long	rsvd_46_51:6;
+		unsigned long	rsvd_0_51:52;
 		unsigned long	n_gru:4;			/* RW */
 		unsigned long	rsvd_56_62:7;
 		unsigned long	enable:1;			/* RW */
@@ -2412,8 +3349,7 @@
 		unsigned long	enable:1;			/* RW */
 	} s1;
 	struct uvxh_rh_gam_gru_overlay_config_mmr_s {
-		unsigned long	rsvd_0_27:28;
-		unsigned long	base:18;			/* RW */
+		unsigned long	rsvd_0_45:46;
 		unsigned long	rsvd_46_51:6;
 		unsigned long	n_gru:4;			/* RW */
 		unsigned long	rsvd_56_62:7;
@@ -2436,6 +3372,15 @@
 		unsigned long	mode:1;				/* RW */
 		unsigned long	enable:1;			/* RW */
 	} s3;
+	struct uv4h_rh_gam_gru_overlay_config_mmr_s {
+		unsigned long	rsvd_0_24:25;
+		unsigned long	undef_25:1;			/* Undefined */
+		unsigned long	base:20;			/* RW */
+		unsigned long	rsvd_46_51:6;
+		unsigned long	n_gru:4;			/* RW */
+		unsigned long	rsvd_56_62:7;
+		unsigned long	enable:1;			/* RW */
+	} s4;
 };
 
 /* ========================================================================= */
@@ -2443,6 +3388,14 @@
 /* ========================================================================= */
 #define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
 #define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR uv_undefined("UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR")
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR uv_undefined("UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR")
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR (				\
+	is_uv1_hub() ? UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR :		\
+	is_uv2_hub() ? UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR :		\
+	is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR :		\
+	/*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR)
+
 
 #define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT	30
 #define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT	46
@@ -2453,6 +3406,7 @@
 #define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK	0x00f0000000000000UL
 #define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
 
+
 #define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT	27
 #define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT	46
 #define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT	52
@@ -2462,6 +3416,7 @@
 #define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK	0x00f0000000000000UL
 #define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
 
+
 union uvh_rh_gam_mmioh_overlay_config_mmr_u {
 	unsigned long	v;
 	struct uv1h_rh_gam_mmioh_overlay_config_mmr_s {
@@ -2485,10 +3440,15 @@
 /* ========================================================================= */
 /*                    UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR                      */
 /* ========================================================================= */
-#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
 #define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
 #define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
 #define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
+#define UV4H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x480028UL
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR (				\
+	is_uv1_hub() ? UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR :		\
+	is_uv2_hub() ? UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR :		\
+	is_uv3_hub() ? UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR :		\
+	/*is_uv4_hub*/ UV4H_RH_GAM_MMR_OVERLAY_CONFIG_MMR)
 
 #define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT	26
 #define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT	63
@@ -2517,6 +3477,12 @@
 #define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK	0x00003ffffc000000UL
 #define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK	0x8000000000000000UL
 
+#define UV4H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT	26
+#define UV4H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT	63
+#define UV4H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK	0x00003ffffc000000UL
+#define UV4H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK	0x8000000000000000UL
+
+
 union uvh_rh_gam_mmr_overlay_config_mmr_u {
 	unsigned long	v;
 	struct uvh_rh_gam_mmr_overlay_config_mmr_s {
@@ -2550,16 +3516,31 @@
 		unsigned long	rsvd_46_62:17;
 		unsigned long	enable:1;			/* RW */
 	} s3;
+	struct uv4h_rh_gam_mmr_overlay_config_mmr_s {
+		unsigned long	rsvd_0_25:26;
+		unsigned long	base:20;			/* RW */
+		unsigned long	rsvd_46_62:17;
+		unsigned long	enable:1;			/* RW */
+	} s4;
 };
 
 /* ========================================================================= */
 /*                                 UVH_RTC                                   */
 /* ========================================================================= */
-#define UVH_RTC 0x340000UL
+#define UV1H_RTC 0x340000UL
+#define UV2H_RTC 0x340000UL
+#define UV3H_RTC 0x340000UL
+#define UV4H_RTC 0xe0000UL
+#define UVH_RTC (							\
+	is_uv1_hub() ? UV1H_RTC :					\
+	is_uv2_hub() ? UV2H_RTC :					\
+	is_uv3_hub() ? UV3H_RTC :					\
+	/*is_uv4_hub*/ UV4H_RTC)
 
 #define UVH_RTC_REAL_TIME_CLOCK_SHFT			0
 #define UVH_RTC_REAL_TIME_CLOCK_MASK			0x00ffffffffffffffUL
 
+
 union uvh_rtc_u {
 	unsigned long	v;
 	struct uvh_rtc_s {
@@ -2590,6 +3571,7 @@
 #define UVH_RTC1_INT_CONFIG_M_MASK			0x0000000000010000UL
 #define UVH_RTC1_INT_CONFIG_APIC_ID_MASK		0xffffffff00000000UL
 
+
 union uvh_rtc1_int_config_u {
 	unsigned long	v;
 	struct uvh_rtc1_int_config_s {
@@ -2609,12 +3591,30 @@
 /* ========================================================================= */
 /*                               UVH_SCRATCH5                                */
 /* ========================================================================= */
-#define UVH_SCRATCH5 0x2d0200UL
-#define UVH_SCRATCH5_32 0x778
+#define UV1H_SCRATCH5 0x2d0200UL
+#define UV2H_SCRATCH5 0x2d0200UL
+#define UV3H_SCRATCH5 0x2d0200UL
+#define UV4H_SCRATCH5 0xb0200UL
+#define UVH_SCRATCH5 (							\
+	is_uv1_hub() ? UV1H_SCRATCH5 :					\
+	is_uv2_hub() ? UV2H_SCRATCH5 :					\
+	is_uv3_hub() ? UV3H_SCRATCH5 :					\
+	/*is_uv4_hub*/ UV4H_SCRATCH5)
+
+#define UV1H_SCRATCH5_32 0x778
+#define UV2H_SCRATCH5_32 0x778
+#define UV3H_SCRATCH5_32 0x778
+#define UV4H_SCRATCH5_32 0x798
+#define UVH_SCRATCH5_32 (						\
+	is_uv1_hub() ? UV1H_SCRATCH5_32 :				\
+	is_uv2_hub() ? UV2H_SCRATCH5_32 :				\
+	is_uv3_hub() ? UV3H_SCRATCH5_32 :				\
+	/*is_uv4_hub*/ UV4H_SCRATCH5_32)
 
 #define UVH_SCRATCH5_SCRATCH5_SHFT			0
 #define UVH_SCRATCH5_SCRATCH5_MASK			0xffffffffffffffffUL
 
+
 union uvh_scratch5_u {
 	unsigned long	v;
 	struct uvh_scratch5_s {
@@ -2625,14 +3625,39 @@
 /* ========================================================================= */
 /*                            UVH_SCRATCH5_ALIAS                             */
 /* ========================================================================= */
-#define UVH_SCRATCH5_ALIAS 0x2d0208UL
-#define UVH_SCRATCH5_ALIAS_32 0x780
+#define UV1H_SCRATCH5_ALIAS 0x2d0208UL
+#define UV2H_SCRATCH5_ALIAS 0x2d0208UL
+#define UV3H_SCRATCH5_ALIAS 0x2d0208UL
+#define UV4H_SCRATCH5_ALIAS 0xb0208UL
+#define UVH_SCRATCH5_ALIAS (						\
+	is_uv1_hub() ? UV1H_SCRATCH5_ALIAS :				\
+	is_uv2_hub() ? UV2H_SCRATCH5_ALIAS :				\
+	is_uv3_hub() ? UV3H_SCRATCH5_ALIAS :				\
+	/*is_uv4_hub*/ UV4H_SCRATCH5_ALIAS)
+
+#define UV1H_SCRATCH5_ALIAS_32 0x780
+#define UV2H_SCRATCH5_ALIAS_32 0x780
+#define UV3H_SCRATCH5_ALIAS_32 0x780
+#define UV4H_SCRATCH5_ALIAS_32 0x7a0
+#define UVH_SCRATCH5_ALIAS_32 (						\
+	is_uv1_hub() ? UV1H_SCRATCH5_ALIAS_32 :				\
+	is_uv2_hub() ? UV2H_SCRATCH5_ALIAS_32 :				\
+	is_uv3_hub() ? UV3H_SCRATCH5_ALIAS_32 :				\
+	/*is_uv4_hub*/ UV4H_SCRATCH5_ALIAS_32)
 
 
 /* ========================================================================= */
 /*                           UVH_SCRATCH5_ALIAS_2                            */
 /* ========================================================================= */
-#define UVH_SCRATCH5_ALIAS_2 0x2d0210UL
+#define UV1H_SCRATCH5_ALIAS_2 0x2d0210UL
+#define UV2H_SCRATCH5_ALIAS_2 0x2d0210UL
+#define UV3H_SCRATCH5_ALIAS_2 0x2d0210UL
+#define UV4H_SCRATCH5_ALIAS_2 0xb0210UL
+#define UVH_SCRATCH5_ALIAS_2 (						\
+	is_uv1_hub() ? UV1H_SCRATCH5_ALIAS_2 :				\
+	is_uv2_hub() ? UV2H_SCRATCH5_ALIAS_2 :				\
+	is_uv3_hub() ? UV3H_SCRATCH5_ALIAS_2 :				\
+	/*is_uv4_hub*/ UV4H_SCRATCH5_ALIAS_2)
 #define UVH_SCRATCH5_ALIAS_2_32 0x788
 
 
@@ -2640,76 +3665,255 @@
 /*                          UVXH_EVENT_OCCURRED2                             */
 /* ========================================================================= */
 #define UVXH_EVENT_OCCURRED2 0x70100UL
-#define UVXH_EVENT_OCCURRED2_32 0xb68
 
-#define UVXH_EVENT_OCCURRED2_RTC_0_SHFT			0
-#define UVXH_EVENT_OCCURRED2_RTC_1_SHFT			1
-#define UVXH_EVENT_OCCURRED2_RTC_2_SHFT			2
-#define UVXH_EVENT_OCCURRED2_RTC_3_SHFT			3
-#define UVXH_EVENT_OCCURRED2_RTC_4_SHFT			4
-#define UVXH_EVENT_OCCURRED2_RTC_5_SHFT			5
-#define UVXH_EVENT_OCCURRED2_RTC_6_SHFT			6
-#define UVXH_EVENT_OCCURRED2_RTC_7_SHFT			7
-#define UVXH_EVENT_OCCURRED2_RTC_8_SHFT			8
-#define UVXH_EVENT_OCCURRED2_RTC_9_SHFT			9
-#define UVXH_EVENT_OCCURRED2_RTC_10_SHFT		10
-#define UVXH_EVENT_OCCURRED2_RTC_11_SHFT		11
-#define UVXH_EVENT_OCCURRED2_RTC_12_SHFT		12
-#define UVXH_EVENT_OCCURRED2_RTC_13_SHFT		13
-#define UVXH_EVENT_OCCURRED2_RTC_14_SHFT		14
-#define UVXH_EVENT_OCCURRED2_RTC_15_SHFT		15
-#define UVXH_EVENT_OCCURRED2_RTC_16_SHFT		16
-#define UVXH_EVENT_OCCURRED2_RTC_17_SHFT		17
-#define UVXH_EVENT_OCCURRED2_RTC_18_SHFT		18
-#define UVXH_EVENT_OCCURRED2_RTC_19_SHFT		19
-#define UVXH_EVENT_OCCURRED2_RTC_20_SHFT		20
-#define UVXH_EVENT_OCCURRED2_RTC_21_SHFT		21
-#define UVXH_EVENT_OCCURRED2_RTC_22_SHFT		22
-#define UVXH_EVENT_OCCURRED2_RTC_23_SHFT		23
-#define UVXH_EVENT_OCCURRED2_RTC_24_SHFT		24
-#define UVXH_EVENT_OCCURRED2_RTC_25_SHFT		25
-#define UVXH_EVENT_OCCURRED2_RTC_26_SHFT		26
-#define UVXH_EVENT_OCCURRED2_RTC_27_SHFT		27
-#define UVXH_EVENT_OCCURRED2_RTC_28_SHFT		28
-#define UVXH_EVENT_OCCURRED2_RTC_29_SHFT		29
-#define UVXH_EVENT_OCCURRED2_RTC_30_SHFT		30
-#define UVXH_EVENT_OCCURRED2_RTC_31_SHFT		31
-#define UVXH_EVENT_OCCURRED2_RTC_0_MASK			0x0000000000000001UL
-#define UVXH_EVENT_OCCURRED2_RTC_1_MASK			0x0000000000000002UL
-#define UVXH_EVENT_OCCURRED2_RTC_2_MASK			0x0000000000000004UL
-#define UVXH_EVENT_OCCURRED2_RTC_3_MASK			0x0000000000000008UL
-#define UVXH_EVENT_OCCURRED2_RTC_4_MASK			0x0000000000000010UL
-#define UVXH_EVENT_OCCURRED2_RTC_5_MASK			0x0000000000000020UL
-#define UVXH_EVENT_OCCURRED2_RTC_6_MASK			0x0000000000000040UL
-#define UVXH_EVENT_OCCURRED2_RTC_7_MASK			0x0000000000000080UL
-#define UVXH_EVENT_OCCURRED2_RTC_8_MASK			0x0000000000000100UL
-#define UVXH_EVENT_OCCURRED2_RTC_9_MASK			0x0000000000000200UL
-#define UVXH_EVENT_OCCURRED2_RTC_10_MASK		0x0000000000000400UL
-#define UVXH_EVENT_OCCURRED2_RTC_11_MASK		0x0000000000000800UL
-#define UVXH_EVENT_OCCURRED2_RTC_12_MASK		0x0000000000001000UL
-#define UVXH_EVENT_OCCURRED2_RTC_13_MASK		0x0000000000002000UL
-#define UVXH_EVENT_OCCURRED2_RTC_14_MASK		0x0000000000004000UL
-#define UVXH_EVENT_OCCURRED2_RTC_15_MASK		0x0000000000008000UL
-#define UVXH_EVENT_OCCURRED2_RTC_16_MASK		0x0000000000010000UL
-#define UVXH_EVENT_OCCURRED2_RTC_17_MASK		0x0000000000020000UL
-#define UVXH_EVENT_OCCURRED2_RTC_18_MASK		0x0000000000040000UL
-#define UVXH_EVENT_OCCURRED2_RTC_19_MASK		0x0000000000080000UL
-#define UVXH_EVENT_OCCURRED2_RTC_20_MASK		0x0000000000100000UL
-#define UVXH_EVENT_OCCURRED2_RTC_21_MASK		0x0000000000200000UL
-#define UVXH_EVENT_OCCURRED2_RTC_22_MASK		0x0000000000400000UL
-#define UVXH_EVENT_OCCURRED2_RTC_23_MASK		0x0000000000800000UL
-#define UVXH_EVENT_OCCURRED2_RTC_24_MASK		0x0000000001000000UL
-#define UVXH_EVENT_OCCURRED2_RTC_25_MASK		0x0000000002000000UL
-#define UVXH_EVENT_OCCURRED2_RTC_26_MASK		0x0000000004000000UL
-#define UVXH_EVENT_OCCURRED2_RTC_27_MASK		0x0000000008000000UL
-#define UVXH_EVENT_OCCURRED2_RTC_28_MASK		0x0000000010000000UL
-#define UVXH_EVENT_OCCURRED2_RTC_29_MASK		0x0000000020000000UL
-#define UVXH_EVENT_OCCURRED2_RTC_30_MASK		0x0000000040000000UL
-#define UVXH_EVENT_OCCURRED2_RTC_31_MASK		0x0000000080000000UL
+#define UV2H_EVENT_OCCURRED2_32 0xb68
+#define UV3H_EVENT_OCCURRED2_32 0xb68
+#define UV4H_EVENT_OCCURRED2_32 0x608
+#define UVH_EVENT_OCCURRED2_32 (					\
+	is_uv2_hub() ? UV2H_EVENT_OCCURRED2_32 :			\
+	is_uv3_hub() ? UV3H_EVENT_OCCURRED2_32 :			\
+	/*is_uv4_hub*/ UV4H_EVENT_OCCURRED2_32)
 
-union uvxh_event_occurred2_u {
+
+#define UV2H_EVENT_OCCURRED2_RTC_0_SHFT			0
+#define UV2H_EVENT_OCCURRED2_RTC_1_SHFT			1
+#define UV2H_EVENT_OCCURRED2_RTC_2_SHFT			2
+#define UV2H_EVENT_OCCURRED2_RTC_3_SHFT			3
+#define UV2H_EVENT_OCCURRED2_RTC_4_SHFT			4
+#define UV2H_EVENT_OCCURRED2_RTC_5_SHFT			5
+#define UV2H_EVENT_OCCURRED2_RTC_6_SHFT			6
+#define UV2H_EVENT_OCCURRED2_RTC_7_SHFT			7
+#define UV2H_EVENT_OCCURRED2_RTC_8_SHFT			8
+#define UV2H_EVENT_OCCURRED2_RTC_9_SHFT			9
+#define UV2H_EVENT_OCCURRED2_RTC_10_SHFT		10
+#define UV2H_EVENT_OCCURRED2_RTC_11_SHFT		11
+#define UV2H_EVENT_OCCURRED2_RTC_12_SHFT		12
+#define UV2H_EVENT_OCCURRED2_RTC_13_SHFT		13
+#define UV2H_EVENT_OCCURRED2_RTC_14_SHFT		14
+#define UV2H_EVENT_OCCURRED2_RTC_15_SHFT		15
+#define UV2H_EVENT_OCCURRED2_RTC_16_SHFT		16
+#define UV2H_EVENT_OCCURRED2_RTC_17_SHFT		17
+#define UV2H_EVENT_OCCURRED2_RTC_18_SHFT		18
+#define UV2H_EVENT_OCCURRED2_RTC_19_SHFT		19
+#define UV2H_EVENT_OCCURRED2_RTC_20_SHFT		20
+#define UV2H_EVENT_OCCURRED2_RTC_21_SHFT		21
+#define UV2H_EVENT_OCCURRED2_RTC_22_SHFT		22
+#define UV2H_EVENT_OCCURRED2_RTC_23_SHFT		23
+#define UV2H_EVENT_OCCURRED2_RTC_24_SHFT		24
+#define UV2H_EVENT_OCCURRED2_RTC_25_SHFT		25
+#define UV2H_EVENT_OCCURRED2_RTC_26_SHFT		26
+#define UV2H_EVENT_OCCURRED2_RTC_27_SHFT		27
+#define UV2H_EVENT_OCCURRED2_RTC_28_SHFT		28
+#define UV2H_EVENT_OCCURRED2_RTC_29_SHFT		29
+#define UV2H_EVENT_OCCURRED2_RTC_30_SHFT		30
+#define UV2H_EVENT_OCCURRED2_RTC_31_SHFT		31
+#define UV2H_EVENT_OCCURRED2_RTC_0_MASK			0x0000000000000001UL
+#define UV2H_EVENT_OCCURRED2_RTC_1_MASK			0x0000000000000002UL
+#define UV2H_EVENT_OCCURRED2_RTC_2_MASK			0x0000000000000004UL
+#define UV2H_EVENT_OCCURRED2_RTC_3_MASK			0x0000000000000008UL
+#define UV2H_EVENT_OCCURRED2_RTC_4_MASK			0x0000000000000010UL
+#define UV2H_EVENT_OCCURRED2_RTC_5_MASK			0x0000000000000020UL
+#define UV2H_EVENT_OCCURRED2_RTC_6_MASK			0x0000000000000040UL
+#define UV2H_EVENT_OCCURRED2_RTC_7_MASK			0x0000000000000080UL
+#define UV2H_EVENT_OCCURRED2_RTC_8_MASK			0x0000000000000100UL
+#define UV2H_EVENT_OCCURRED2_RTC_9_MASK			0x0000000000000200UL
+#define UV2H_EVENT_OCCURRED2_RTC_10_MASK		0x0000000000000400UL
+#define UV2H_EVENT_OCCURRED2_RTC_11_MASK		0x0000000000000800UL
+#define UV2H_EVENT_OCCURRED2_RTC_12_MASK		0x0000000000001000UL
+#define UV2H_EVENT_OCCURRED2_RTC_13_MASK		0x0000000000002000UL
+#define UV2H_EVENT_OCCURRED2_RTC_14_MASK		0x0000000000004000UL
+#define UV2H_EVENT_OCCURRED2_RTC_15_MASK		0x0000000000008000UL
+#define UV2H_EVENT_OCCURRED2_RTC_16_MASK		0x0000000000010000UL
+#define UV2H_EVENT_OCCURRED2_RTC_17_MASK		0x0000000000020000UL
+#define UV2H_EVENT_OCCURRED2_RTC_18_MASK		0x0000000000040000UL
+#define UV2H_EVENT_OCCURRED2_RTC_19_MASK		0x0000000000080000UL
+#define UV2H_EVENT_OCCURRED2_RTC_20_MASK		0x0000000000100000UL
+#define UV2H_EVENT_OCCURRED2_RTC_21_MASK		0x0000000000200000UL
+#define UV2H_EVENT_OCCURRED2_RTC_22_MASK		0x0000000000400000UL
+#define UV2H_EVENT_OCCURRED2_RTC_23_MASK		0x0000000000800000UL
+#define UV2H_EVENT_OCCURRED2_RTC_24_MASK		0x0000000001000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_25_MASK		0x0000000002000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_26_MASK		0x0000000004000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_27_MASK		0x0000000008000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_28_MASK		0x0000000010000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_29_MASK		0x0000000020000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_30_MASK		0x0000000040000000UL
+#define UV2H_EVENT_OCCURRED2_RTC_31_MASK		0x0000000080000000UL
+
+#define UV3H_EVENT_OCCURRED2_RTC_0_SHFT			0
+#define UV3H_EVENT_OCCURRED2_RTC_1_SHFT			1
+#define UV3H_EVENT_OCCURRED2_RTC_2_SHFT			2
+#define UV3H_EVENT_OCCURRED2_RTC_3_SHFT			3
+#define UV3H_EVENT_OCCURRED2_RTC_4_SHFT			4
+#define UV3H_EVENT_OCCURRED2_RTC_5_SHFT			5
+#define UV3H_EVENT_OCCURRED2_RTC_6_SHFT			6
+#define UV3H_EVENT_OCCURRED2_RTC_7_SHFT			7
+#define UV3H_EVENT_OCCURRED2_RTC_8_SHFT			8
+#define UV3H_EVENT_OCCURRED2_RTC_9_SHFT			9
+#define UV3H_EVENT_OCCURRED2_RTC_10_SHFT		10
+#define UV3H_EVENT_OCCURRED2_RTC_11_SHFT		11
+#define UV3H_EVENT_OCCURRED2_RTC_12_SHFT		12
+#define UV3H_EVENT_OCCURRED2_RTC_13_SHFT		13
+#define UV3H_EVENT_OCCURRED2_RTC_14_SHFT		14
+#define UV3H_EVENT_OCCURRED2_RTC_15_SHFT		15
+#define UV3H_EVENT_OCCURRED2_RTC_16_SHFT		16
+#define UV3H_EVENT_OCCURRED2_RTC_17_SHFT		17
+#define UV3H_EVENT_OCCURRED2_RTC_18_SHFT		18
+#define UV3H_EVENT_OCCURRED2_RTC_19_SHFT		19
+#define UV3H_EVENT_OCCURRED2_RTC_20_SHFT		20
+#define UV3H_EVENT_OCCURRED2_RTC_21_SHFT		21
+#define UV3H_EVENT_OCCURRED2_RTC_22_SHFT		22
+#define UV3H_EVENT_OCCURRED2_RTC_23_SHFT		23
+#define UV3H_EVENT_OCCURRED2_RTC_24_SHFT		24
+#define UV3H_EVENT_OCCURRED2_RTC_25_SHFT		25
+#define UV3H_EVENT_OCCURRED2_RTC_26_SHFT		26
+#define UV3H_EVENT_OCCURRED2_RTC_27_SHFT		27
+#define UV3H_EVENT_OCCURRED2_RTC_28_SHFT		28
+#define UV3H_EVENT_OCCURRED2_RTC_29_SHFT		29
+#define UV3H_EVENT_OCCURRED2_RTC_30_SHFT		30
+#define UV3H_EVENT_OCCURRED2_RTC_31_SHFT		31
+#define UV3H_EVENT_OCCURRED2_RTC_0_MASK			0x0000000000000001UL
+#define UV3H_EVENT_OCCURRED2_RTC_1_MASK			0x0000000000000002UL
+#define UV3H_EVENT_OCCURRED2_RTC_2_MASK			0x0000000000000004UL
+#define UV3H_EVENT_OCCURRED2_RTC_3_MASK			0x0000000000000008UL
+#define UV3H_EVENT_OCCURRED2_RTC_4_MASK			0x0000000000000010UL
+#define UV3H_EVENT_OCCURRED2_RTC_5_MASK			0x0000000000000020UL
+#define UV3H_EVENT_OCCURRED2_RTC_6_MASK			0x0000000000000040UL
+#define UV3H_EVENT_OCCURRED2_RTC_7_MASK			0x0000000000000080UL
+#define UV3H_EVENT_OCCURRED2_RTC_8_MASK			0x0000000000000100UL
+#define UV3H_EVENT_OCCURRED2_RTC_9_MASK			0x0000000000000200UL
+#define UV3H_EVENT_OCCURRED2_RTC_10_MASK		0x0000000000000400UL
+#define UV3H_EVENT_OCCURRED2_RTC_11_MASK		0x0000000000000800UL
+#define UV3H_EVENT_OCCURRED2_RTC_12_MASK		0x0000000000001000UL
+#define UV3H_EVENT_OCCURRED2_RTC_13_MASK		0x0000000000002000UL
+#define UV3H_EVENT_OCCURRED2_RTC_14_MASK		0x0000000000004000UL
+#define UV3H_EVENT_OCCURRED2_RTC_15_MASK		0x0000000000008000UL
+#define UV3H_EVENT_OCCURRED2_RTC_16_MASK		0x0000000000010000UL
+#define UV3H_EVENT_OCCURRED2_RTC_17_MASK		0x0000000000020000UL
+#define UV3H_EVENT_OCCURRED2_RTC_18_MASK		0x0000000000040000UL
+#define UV3H_EVENT_OCCURRED2_RTC_19_MASK		0x0000000000080000UL
+#define UV3H_EVENT_OCCURRED2_RTC_20_MASK		0x0000000000100000UL
+#define UV3H_EVENT_OCCURRED2_RTC_21_MASK		0x0000000000200000UL
+#define UV3H_EVENT_OCCURRED2_RTC_22_MASK		0x0000000000400000UL
+#define UV3H_EVENT_OCCURRED2_RTC_23_MASK		0x0000000000800000UL
+#define UV3H_EVENT_OCCURRED2_RTC_24_MASK		0x0000000001000000UL
+#define UV3H_EVENT_OCCURRED2_RTC_25_MASK		0x0000000002000000UL
+#define UV3H_EVENT_OCCURRED2_RTC_26_MASK		0x0000000004000000UL
+#define UV3H_EVENT_OCCURRED2_RTC_27_MASK		0x0000000008000000UL
+#define UV3H_EVENT_OCCURRED2_RTC_28_MASK		0x0000000010000000UL
+#define UV3H_EVENT_OCCURRED2_RTC_29_MASK		0x0000000020000000UL
+#define UV3H_EVENT_OCCURRED2_RTC_30_MASK		0x0000000040000000UL
+#define UV3H_EVENT_OCCURRED2_RTC_31_MASK		0x0000000080000000UL
+
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT0_SHFT 0
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT1_SHFT 1
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT2_SHFT 2
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT3_SHFT 3
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT4_SHFT 4
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT5_SHFT 5
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT6_SHFT 6
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT7_SHFT 7
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT8_SHFT 8
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT9_SHFT 9
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT10_SHFT 10
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT11_SHFT 11
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT12_SHFT 12
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT13_SHFT 13
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT14_SHFT 14
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT15_SHFT 15
+#define UV4H_EVENT_OCCURRED2_RTC_INTERVAL_INT_SHFT	16
+#define UV4H_EVENT_OCCURRED2_BAU_DASHBOARD_INT_SHFT	17
+#define UV4H_EVENT_OCCURRED2_RTC_0_SHFT			18
+#define UV4H_EVENT_OCCURRED2_RTC_1_SHFT			19
+#define UV4H_EVENT_OCCURRED2_RTC_2_SHFT			20
+#define UV4H_EVENT_OCCURRED2_RTC_3_SHFT			21
+#define UV4H_EVENT_OCCURRED2_RTC_4_SHFT			22
+#define UV4H_EVENT_OCCURRED2_RTC_5_SHFT			23
+#define UV4H_EVENT_OCCURRED2_RTC_6_SHFT			24
+#define UV4H_EVENT_OCCURRED2_RTC_7_SHFT			25
+#define UV4H_EVENT_OCCURRED2_RTC_8_SHFT			26
+#define UV4H_EVENT_OCCURRED2_RTC_9_SHFT			27
+#define UV4H_EVENT_OCCURRED2_RTC_10_SHFT		28
+#define UV4H_EVENT_OCCURRED2_RTC_11_SHFT		29
+#define UV4H_EVENT_OCCURRED2_RTC_12_SHFT		30
+#define UV4H_EVENT_OCCURRED2_RTC_13_SHFT		31
+#define UV4H_EVENT_OCCURRED2_RTC_14_SHFT		32
+#define UV4H_EVENT_OCCURRED2_RTC_15_SHFT		33
+#define UV4H_EVENT_OCCURRED2_RTC_16_SHFT		34
+#define UV4H_EVENT_OCCURRED2_RTC_17_SHFT		35
+#define UV4H_EVENT_OCCURRED2_RTC_18_SHFT		36
+#define UV4H_EVENT_OCCURRED2_RTC_19_SHFT		37
+#define UV4H_EVENT_OCCURRED2_RTC_20_SHFT		38
+#define UV4H_EVENT_OCCURRED2_RTC_21_SHFT		39
+#define UV4H_EVENT_OCCURRED2_RTC_22_SHFT		40
+#define UV4H_EVENT_OCCURRED2_RTC_23_SHFT		41
+#define UV4H_EVENT_OCCURRED2_RTC_24_SHFT		42
+#define UV4H_EVENT_OCCURRED2_RTC_25_SHFT		43
+#define UV4H_EVENT_OCCURRED2_RTC_26_SHFT		44
+#define UV4H_EVENT_OCCURRED2_RTC_27_SHFT		45
+#define UV4H_EVENT_OCCURRED2_RTC_28_SHFT		46
+#define UV4H_EVENT_OCCURRED2_RTC_29_SHFT		47
+#define UV4H_EVENT_OCCURRED2_RTC_30_SHFT		48
+#define UV4H_EVENT_OCCURRED2_RTC_31_SHFT		49
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT0_MASK 0x0000000000000001UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT1_MASK 0x0000000000000002UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT2_MASK 0x0000000000000004UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT3_MASK 0x0000000000000008UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT4_MASK 0x0000000000000010UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT5_MASK 0x0000000000000020UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT6_MASK 0x0000000000000040UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT7_MASK 0x0000000000000080UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT8_MASK 0x0000000000000100UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT9_MASK 0x0000000000000200UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT10_MASK 0x0000000000000400UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT11_MASK 0x0000000000000800UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT12_MASK 0x0000000000001000UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT13_MASK 0x0000000000002000UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT14_MASK 0x0000000000004000UL
+#define UV4H_EVENT_OCCURRED2_MESSAGE_ACCELERATOR_INT15_MASK 0x0000000000008000UL
+#define UV4H_EVENT_OCCURRED2_RTC_INTERVAL_INT_MASK	0x0000000000010000UL
+#define UV4H_EVENT_OCCURRED2_BAU_DASHBOARD_INT_MASK	0x0000000000020000UL
+#define UV4H_EVENT_OCCURRED2_RTC_0_MASK			0x0000000000040000UL
+#define UV4H_EVENT_OCCURRED2_RTC_1_MASK			0x0000000000080000UL
+#define UV4H_EVENT_OCCURRED2_RTC_2_MASK			0x0000000000100000UL
+#define UV4H_EVENT_OCCURRED2_RTC_3_MASK			0x0000000000200000UL
+#define UV4H_EVENT_OCCURRED2_RTC_4_MASK			0x0000000000400000UL
+#define UV4H_EVENT_OCCURRED2_RTC_5_MASK			0x0000000000800000UL
+#define UV4H_EVENT_OCCURRED2_RTC_6_MASK			0x0000000001000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_7_MASK			0x0000000002000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_8_MASK			0x0000000004000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_9_MASK			0x0000000008000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_10_MASK		0x0000000010000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_11_MASK		0x0000000020000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_12_MASK		0x0000000040000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_13_MASK		0x0000000080000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_14_MASK		0x0000000100000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_15_MASK		0x0000000200000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_16_MASK		0x0000000400000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_17_MASK		0x0000000800000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_18_MASK		0x0000001000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_19_MASK		0x0000002000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_20_MASK		0x0000004000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_21_MASK		0x0000008000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_22_MASK		0x0000010000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_23_MASK		0x0000020000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_24_MASK		0x0000040000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_25_MASK		0x0000080000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_26_MASK		0x0000100000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_27_MASK		0x0000200000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_28_MASK		0x0000400000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_29_MASK		0x0000800000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_30_MASK		0x0001000000000000UL
+#define UV4H_EVENT_OCCURRED2_RTC_31_MASK		0x0002000000000000UL
+
+#define UVXH_EVENT_OCCURRED2_RTC_1_MASK (				\
+	is_uv2_hub() ? UV2H_EVENT_OCCURRED2_RTC_1_MASK :		\
+	is_uv3_hub() ? UV3H_EVENT_OCCURRED2_RTC_1_MASK :		\
+	/*is_uv4_hub*/ UV4H_EVENT_OCCURRED2_RTC_1_MASK)
+
+union uvh_event_occurred2_u {
 	unsigned long	v;
-	struct uvxh_event_occurred2_s {
+	struct uv2h_event_occurred2_s {
 		unsigned long	rtc_0:1;			/* RW */
 		unsigned long	rtc_1:1;			/* RW */
 		unsigned long	rtc_2:1;			/* RW */
@@ -2743,25 +3947,129 @@
 		unsigned long	rtc_30:1;			/* RW */
 		unsigned long	rtc_31:1;			/* RW */
 		unsigned long	rsvd_32_63:32;
-	} sx;
+	} s2;
+	struct uv3h_event_occurred2_s {
+		unsigned long	rtc_0:1;			/* RW */
+		unsigned long	rtc_1:1;			/* RW */
+		unsigned long	rtc_2:1;			/* RW */
+		unsigned long	rtc_3:1;			/* RW */
+		unsigned long	rtc_4:1;			/* RW */
+		unsigned long	rtc_5:1;			/* RW */
+		unsigned long	rtc_6:1;			/* RW */
+		unsigned long	rtc_7:1;			/* RW */
+		unsigned long	rtc_8:1;			/* RW */
+		unsigned long	rtc_9:1;			/* RW */
+		unsigned long	rtc_10:1;			/* RW */
+		unsigned long	rtc_11:1;			/* RW */
+		unsigned long	rtc_12:1;			/* RW */
+		unsigned long	rtc_13:1;			/* RW */
+		unsigned long	rtc_14:1;			/* RW */
+		unsigned long	rtc_15:1;			/* RW */
+		unsigned long	rtc_16:1;			/* RW */
+		unsigned long	rtc_17:1;			/* RW */
+		unsigned long	rtc_18:1;			/* RW */
+		unsigned long	rtc_19:1;			/* RW */
+		unsigned long	rtc_20:1;			/* RW */
+		unsigned long	rtc_21:1;			/* RW */
+		unsigned long	rtc_22:1;			/* RW */
+		unsigned long	rtc_23:1;			/* RW */
+		unsigned long	rtc_24:1;			/* RW */
+		unsigned long	rtc_25:1;			/* RW */
+		unsigned long	rtc_26:1;			/* RW */
+		unsigned long	rtc_27:1;			/* RW */
+		unsigned long	rtc_28:1;			/* RW */
+		unsigned long	rtc_29:1;			/* RW */
+		unsigned long	rtc_30:1;			/* RW */
+		unsigned long	rtc_31:1;			/* RW */
+		unsigned long	rsvd_32_63:32;
+	} s3;
+	struct uv4h_event_occurred2_s {
+		unsigned long	message_accelerator_int0:1;	/* RW */
+		unsigned long	message_accelerator_int1:1;	/* RW */
+		unsigned long	message_accelerator_int2:1;	/* RW */
+		unsigned long	message_accelerator_int3:1;	/* RW */
+		unsigned long	message_accelerator_int4:1;	/* RW */
+		unsigned long	message_accelerator_int5:1;	/* RW */
+		unsigned long	message_accelerator_int6:1;	/* RW */
+		unsigned long	message_accelerator_int7:1;	/* RW */
+		unsigned long	message_accelerator_int8:1;	/* RW */
+		unsigned long	message_accelerator_int9:1;	/* RW */
+		unsigned long	message_accelerator_int10:1;	/* RW */
+		unsigned long	message_accelerator_int11:1;	/* RW */
+		unsigned long	message_accelerator_int12:1;	/* RW */
+		unsigned long	message_accelerator_int13:1;	/* RW */
+		unsigned long	message_accelerator_int14:1;	/* RW */
+		unsigned long	message_accelerator_int15:1;	/* RW */
+		unsigned long	rtc_interval_int:1;		/* RW */
+		unsigned long	bau_dashboard_int:1;		/* RW */
+		unsigned long	rtc_0:1;			/* RW */
+		unsigned long	rtc_1:1;			/* RW */
+		unsigned long	rtc_2:1;			/* RW */
+		unsigned long	rtc_3:1;			/* RW */
+		unsigned long	rtc_4:1;			/* RW */
+		unsigned long	rtc_5:1;			/* RW */
+		unsigned long	rtc_6:1;			/* RW */
+		unsigned long	rtc_7:1;			/* RW */
+		unsigned long	rtc_8:1;			/* RW */
+		unsigned long	rtc_9:1;			/* RW */
+		unsigned long	rtc_10:1;			/* RW */
+		unsigned long	rtc_11:1;			/* RW */
+		unsigned long	rtc_12:1;			/* RW */
+		unsigned long	rtc_13:1;			/* RW */
+		unsigned long	rtc_14:1;			/* RW */
+		unsigned long	rtc_15:1;			/* RW */
+		unsigned long	rtc_16:1;			/* RW */
+		unsigned long	rtc_17:1;			/* RW */
+		unsigned long	rtc_18:1;			/* RW */
+		unsigned long	rtc_19:1;			/* RW */
+		unsigned long	rtc_20:1;			/* RW */
+		unsigned long	rtc_21:1;			/* RW */
+		unsigned long	rtc_22:1;			/* RW */
+		unsigned long	rtc_23:1;			/* RW */
+		unsigned long	rtc_24:1;			/* RW */
+		unsigned long	rtc_25:1;			/* RW */
+		unsigned long	rtc_26:1;			/* RW */
+		unsigned long	rtc_27:1;			/* RW */
+		unsigned long	rtc_28:1;			/* RW */
+		unsigned long	rtc_29:1;			/* RW */
+		unsigned long	rtc_30:1;			/* RW */
+		unsigned long	rtc_31:1;			/* RW */
+		unsigned long	rsvd_50_63:14;
+	} s4;
 };
 
 /* ========================================================================= */
 /*                       UVXH_EVENT_OCCURRED2_ALIAS                          */
 /* ========================================================================= */
 #define UVXH_EVENT_OCCURRED2_ALIAS 0x70108UL
-#define UVXH_EVENT_OCCURRED2_ALIAS_32 0xb70
+
+#define UV2H_EVENT_OCCURRED2_ALIAS_32 0xb70
+#define UV3H_EVENT_OCCURRED2_ALIAS_32 0xb70
+#define UV4H_EVENT_OCCURRED2_ALIAS_32 0x610
+#define UVH_EVENT_OCCURRED2_ALIAS_32 (					\
+	is_uv2_hub() ? UV2H_EVENT_OCCURRED2_ALIAS_32 :			\
+	is_uv3_hub() ? UV3H_EVENT_OCCURRED2_ALIAS_32 :			\
+	/*is_uv4_hub*/ UV4H_EVENT_OCCURRED2_ALIAS_32)
 
 
 /* ========================================================================= */
 /*                   UVXH_LB_BAU_SB_ACTIVATION_STATUS_2                      */
 /* ========================================================================= */
-#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
 #define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
 #define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
-#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0
-#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x320130UL
-#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x320130UL
+#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_2 0xc8130UL
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_2 (				\
+	is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 :		\
+	is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_2 :		\
+	/*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_2)
+
+#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0
+#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0
+#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0xa10
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_2_32 (				\
+	is_uv2_hub() ? UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 :		\
+	is_uv3_hub() ? UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_32 :		\
+	/*is_uv4_hub*/ UV4H_LB_BAU_SB_ACTIVATION_STATUS_2_32)
 
 #define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
 #define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
@@ -2772,6 +4080,10 @@
 #define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
 #define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
 
+#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
+#define UV4H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
+
+
 union uvxh_lb_bau_sb_activation_status_2_u {
 	unsigned long	v;
 	struct uvxh_lb_bau_sb_activation_status_2_s {
@@ -2783,6 +4095,9 @@
 	struct uv3h_lb_bau_sb_activation_status_2_s {
 		unsigned long	aux_error:64;			/* RW */
 	} s3;
+	struct uv4h_lb_bau_sb_activation_status_2_s {
+		unsigned long	aux_error:64;			/* RW */
+	} s4;
 };
 
 /* ========================================================================= */
@@ -2823,26 +4138,6 @@
 };
 
 /* ========================================================================= */
-/*                          UV3H_GR1_GAM_GR_CONFIG                           */
-/* ========================================================================= */
-#define UV3H_GR1_GAM_GR_CONFIG				0x1000028UL
-
-#define UV3H_GR1_GAM_GR_CONFIG_M_SKT_SHFT		0
-#define UV3H_GR1_GAM_GR_CONFIG_SUBSPACE_SHFT		10
-#define UV3H_GR1_GAM_GR_CONFIG_M_SKT_MASK		0x000000000000003fUL
-#define UV3H_GR1_GAM_GR_CONFIG_SUBSPACE_MASK		0x0000000000000400UL
-
-union uv3h_gr1_gam_gr_config_u {
-	unsigned long	v;
-	struct uv3h_gr1_gam_gr_config_s {
-		unsigned long	m_skt:6;			/* RW */
-		unsigned long	undef_6_9:4;			/* Undefined */
-		unsigned long	subspace:1;			/* RW */
-		unsigned long	reserved:53;
-	} s3;
-};
-
-/* ========================================================================= */
 /*                   UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR                   */
 /* ========================================================================= */
 #define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR		0x1603000UL
@@ -2924,5 +4219,67 @@
 	} s3;
 };
 
+/* ========================================================================= */
+/*                       UV4H_LB_PROC_INTD_QUEUE_FIRST                       */
+/* ========================================================================= */
+#define UV4H_LB_PROC_INTD_QUEUE_FIRST			0xa4100UL
+
+#define UV4H_LB_PROC_INTD_QUEUE_FIRST_FIRST_PAYLOAD_ADDRESS_SHFT 6
+#define UV4H_LB_PROC_INTD_QUEUE_FIRST_FIRST_PAYLOAD_ADDRESS_MASK 0x00003fffffffffc0UL
+
+union uv4h_lb_proc_intd_queue_first_u {
+	unsigned long	v;
+	struct uv4h_lb_proc_intd_queue_first_s {
+		unsigned long	undef_0_5:6;			/* Undefined */
+		unsigned long	first_payload_address:40;	/* RW */
+	} s4;
+};
+
+/* ========================================================================= */
+/*                       UV4H_LB_PROC_INTD_QUEUE_LAST                        */
+/* ========================================================================= */
+#define UV4H_LB_PROC_INTD_QUEUE_LAST			0xa4108UL
+
+#define UV4H_LB_PROC_INTD_QUEUE_LAST_LAST_PAYLOAD_ADDRESS_SHFT 5
+#define UV4H_LB_PROC_INTD_QUEUE_LAST_LAST_PAYLOAD_ADDRESS_MASK 0x00003fffffffffe0UL
+
+union uv4h_lb_proc_intd_queue_last_u {
+	unsigned long	v;
+	struct uv4h_lb_proc_intd_queue_last_s {
+		unsigned long	undef_0_4:5;			/* Undefined */
+		unsigned long	last_payload_address:41;	/* RW */
+	} s4;
+};
+
+/* ========================================================================= */
+/*                     UV4H_LB_PROC_INTD_SOFT_ACK_CLEAR                      */
+/* ========================================================================= */
+#define UV4H_LB_PROC_INTD_SOFT_ACK_CLEAR		0xa4118UL
+
+#define UV4H_LB_PROC_INTD_SOFT_ACK_CLEAR_SOFT_ACK_PENDING_FLAGS_SHFT 0
+#define UV4H_LB_PROC_INTD_SOFT_ACK_CLEAR_SOFT_ACK_PENDING_FLAGS_MASK 0x00000000000000ffUL
+
+union uv4h_lb_proc_intd_soft_ack_clear_u {
+	unsigned long	v;
+	struct uv4h_lb_proc_intd_soft_ack_clear_s {
+		unsigned long	soft_ack_pending_flags:8;	/* WP */
+	} s4;
+};
+
+/* ========================================================================= */
+/*                    UV4H_LB_PROC_INTD_SOFT_ACK_PENDING                     */
+/* ========================================================================= */
+#define UV4H_LB_PROC_INTD_SOFT_ACK_PENDING		0xa4110UL
+
+#define UV4H_LB_PROC_INTD_SOFT_ACK_PENDING_SOFT_ACK_FLAGS_SHFT 0
+#define UV4H_LB_PROC_INTD_SOFT_ACK_PENDING_SOFT_ACK_FLAGS_MASK 0x00000000000000ffUL
+
+union uv4h_lb_proc_intd_soft_ack_pending_u {
+	unsigned long	v;
+	struct uv4h_lb_proc_intd_soft_ack_pending_s {
+		unsigned long	soft_ack_flags:8;		/* RW */
+	} s4;
+};
+
 
 #endif /* _ASM_X86_UV_UV_MMRS_H */
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 1ae89a2..4dcdf74 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -142,6 +142,44 @@
 struct timespec;
 
 /**
+ * struct x86_legacy_devices - legacy x86 devices
+ *
+ * @pnpbios: this platform can have a PNPBIOS. If this is disabled the platform
+ * 	is known to never have a PNPBIOS.
+ *
+ * These are devices known to require LPC or ISA bus. The definition of legacy
+ * devices adheres to the ACPI 5.2.9.3 IA-PC Boot Architecture flag
+ * ACPI_FADT_LEGACY_DEVICES. These devices consist of user visible devices on
+ * the LPC or ISA bus. User visible devices are devices that have end-user
+ * accessible connectors (for example, LPT parallel port). Legacy devices on
+ * the LPC bus consist for example of serial and parallel ports, PS/2 keyboard
+ * / mouse, and the floppy disk controller. A system that lacks all known
+ * legacy devices can assume all devices can be detected exclusively via
+ * standard device enumeration mechanisms including the ACPI namespace.
+ *
+ * A system which has does not have ACPI_FADT_LEGACY_DEVICES enabled must not
+ * have any of the legacy devices enumerated below present.
+ */
+struct x86_legacy_devices {
+	int pnpbios;
+};
+
+/**
+ * struct x86_legacy_features - legacy x86 features
+ *
+ * @rtc: this device has a CMOS real-time clock present
+ * @ebda_search: it's safe to search for the EBDA signature in the hardware's
+ * 	low RAM
+ * @devices: legacy x86 devices, refer to struct x86_legacy_devices
+ * 	documentation for further details.
+ */
+struct x86_legacy_features {
+	int rtc;
+	int ebda_search;
+	struct x86_legacy_devices devices;
+};
+
+/**
  * struct x86_platform_ops - platform specific runtime functions
  * @calibrate_tsc:		calibrate TSC
  * @get_wallclock:		get time from HW clock like RTC etc.
@@ -152,6 +190,14 @@
  * @save_sched_clock_state:	save state for sched_clock() on suspend
  * @restore_sched_clock_state:	restore state for sched_clock() on resume
  * @apic_post_init:		adjust apic if neeeded
+ * @legacy:			legacy features
+ * @set_legacy_features:	override legacy features. Use of this callback
+ * 				is highly discouraged. You should only need
+ * 				this if your hardware platform requires further
+ * 				custom fine tuning far beyong what may be
+ * 				possible in x86_early_init_platform_quirks() by
+ * 				only using the current x86_hardware_subarch
+ * 				semantics.
  */
 struct x86_platform_ops {
 	unsigned long (*calibrate_tsc)(void);
@@ -165,6 +211,8 @@
 	void (*save_sched_clock_state)(void);
 	void (*restore_sched_clock_state)(void);
 	void (*apic_post_init)(void);
+	struct x86_legacy_features legacy;
+	void (*set_legacy_features)(void);
 };
 
 struct pci_dev;
@@ -186,6 +234,8 @@
 extern struct x86_platform_ops x86_platform;
 extern struct x86_msi_ops x86_msi;
 extern struct x86_io_apic_ops x86_io_apic_ops;
+
+extern void x86_early_init_platform_quirks(void);
 extern void x86_init_noop(void);
 extern void x86_init_uint_noop(unsigned int unused);
 
diff --git a/arch/x86/include/asm/xor_32.h b/arch/x86/include/asm/xor_32.h
index c54beb4..635eac5 100644
--- a/arch/x86/include/asm/xor_32.h
+++ b/arch/x86/include/asm/xor_32.h
@@ -550,7 +550,7 @@
 #define XOR_TRY_TEMPLATES				\
 do {							\
 	AVX_XOR_SPEED;					\
-	if (cpu_has_xmm) {				\
+	if (boot_cpu_has(X86_FEATURE_XMM)) {				\
 		xor_speed(&xor_block_pIII_sse);		\
 		xor_speed(&xor_block_sse_pf64);		\
 	} else if (boot_cpu_has(X86_FEATURE_MMX)) {	\
diff --git a/arch/x86/include/asm/xor_avx.h b/arch/x86/include/asm/xor_avx.h
index 7c0a517..22a7b18 100644
--- a/arch/x86/include/asm/xor_avx.h
+++ b/arch/x86/include/asm/xor_avx.h
@@ -167,12 +167,12 @@
 
 #define AVX_XOR_SPEED \
 do { \
-	if (cpu_has_avx && cpu_has_osxsave) \
+	if (boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_OSXSAVE)) \
 		xor_speed(&xor_block_avx); \
 } while (0)
 
 #define AVX_SELECT(FASTEST) \
-	(cpu_has_avx && cpu_has_osxsave ? &xor_block_avx : FASTEST)
+	(boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_OSXSAVE) ? &xor_block_avx : FASTEST)
 
 #else
 
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index 3292543..c18ce67 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -157,7 +157,46 @@
 	__u8  _pad9[276];				/* 0xeec */
 } __attribute__((packed));
 
-enum {
+/**
+ * enum x86_hardware_subarch - x86 hardware subarchitecture
+ *
+ * The x86 hardware_subarch and hardware_subarch_data were added as of the x86
+ * boot protocol 2.07 to help distinguish and support custom x86 boot
+ * sequences. This enum represents accepted values for the x86
+ * hardware_subarch.  Custom x86 boot sequences (not X86_SUBARCH_PC) do not
+ * have or simply *cannot* make use of natural stubs like BIOS or EFI, the
+ * hardware_subarch can be used on the Linux entry path to revector to a
+ * subarchitecture stub when needed. This subarchitecture stub can be used to
+ * set up Linux boot parameters or for special care to account for nonstandard
+ * handling of page tables.
+ *
+ * These enums should only ever be used by x86 code, and the code that uses
+ * it should be well contained and compartamentalized.
+ *
+ * KVM and Xen HVM do not have a subarch as these are expected to follow
+ * standard x86 boot entries. If there is a genuine need for "hypervisor" type
+ * that should be considered separately in the future. Future guest types
+ * should seriously consider working with standard x86 boot stubs such as
+ * the BIOS or EFI boot stubs.
+ *
+ * WARNING: this enum is only used for legacy hacks, for platform features that
+ *	    are not easily enumerated or discoverable. You should not ever use
+ *	    this for new features.
+ *
+ * @X86_SUBARCH_PC: Should be used if the hardware is enumerable using standard
+ *	PC mechanisms (PCI, ACPI) and doesn't need a special boot flow.
+ * @X86_SUBARCH_LGUEST: Used for x86 hypervisor demo, lguest
+ * @X86_SUBARCH_XEN: Used for Xen guest types which follow the PV boot path,
+ * 	which start at asm startup_xen() entry point and later jump to the C
+ * 	xen_start_kernel() entry point. Both domU and dom0 type of guests are
+ * 	currently supportd through this PV boot path.
+ * @X86_SUBARCH_INTEL_MID: Used for Intel MID (Mobile Internet Device) platform
+ *	systems which do not have the PCI legacy interfaces.
+ * @X86_SUBARCH_CE4100: Used for Intel CE media processor (CE4100) SoC for
+ * 	for settop boxes and media devices, the use of a subarch for CE4100
+ * 	is more of a hack...
+ */
+enum x86_hardware_subarch {
 	X86_SUBARCH_PC = 0,
 	X86_SUBARCH_LGUEST,
 	X86_SUBARCH_XEN,
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index cd54147..739c0c5 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -216,9 +216,9 @@
 	__u32 padding[3];
 };
 
-#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX		BIT(0)
-#define KVM_CPUID_FLAG_STATEFUL_FUNC		BIT(1)
-#define KVM_CPUID_FLAG_STATE_READ_NEXT		BIT(2)
+#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX		(1 << 0)
+#define KVM_CPUID_FLAG_STATEFUL_FUNC		(1 << 1)
+#define KVM_CPUID_FLAG_STATE_READ_NEXT		(1 << 2)
 
 /* for KVM_SET_CPUID2 */
 struct kvm_cpuid2 {
diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h
index 8a4add8..b9e9bb2 100644
--- a/arch/x86/include/uapi/asm/svm.h
+++ b/arch/x86/include/uapi/asm/svm.h
@@ -73,6 +73,8 @@
 #define SVM_EXIT_MWAIT_COND    0x08c
 #define SVM_EXIT_XSETBV        0x08d
 #define SVM_EXIT_NPF           0x400
+#define SVM_EXIT_AVIC_INCOMPLETE_IPI		0x401
+#define SVM_EXIT_AVIC_UNACCELERATED_ACCESS	0x402
 
 #define SVM_EXIT_ERR           -1
 
@@ -107,8 +109,10 @@
 	{ SVM_EXIT_SMI,         "smi" }, \
 	{ SVM_EXIT_INIT,        "init" }, \
 	{ SVM_EXIT_VINTR,       "vintr" }, \
+	{ SVM_EXIT_CR0_SEL_WRITE, "cr0_sel_write" }, \
 	{ SVM_EXIT_CPUID,       "cpuid" }, \
 	{ SVM_EXIT_INVD,        "invd" }, \
+	{ SVM_EXIT_PAUSE,       "pause" }, \
 	{ SVM_EXIT_HLT,         "hlt" }, \
 	{ SVM_EXIT_INVLPG,      "invlpg" }, \
 	{ SVM_EXIT_INVLPGA,     "invlpga" }, \
@@ -127,7 +131,10 @@
 	{ SVM_EXIT_MONITOR,     "monitor" }, \
 	{ SVM_EXIT_MWAIT,       "mwait" }, \
 	{ SVM_EXIT_XSETBV,      "xsetbv" }, \
-	{ SVM_EXIT_NPF,         "npf" }
+	{ SVM_EXIT_NPF,         "npf" }, \
+	{ SVM_EXIT_RSM,         "rsm" }, \
+	{ SVM_EXIT_AVIC_INCOMPLETE_IPI,		"avic_incomplete_ipi" }, \
+	{ SVM_EXIT_AVIC_UNACCELERATED_ACCESS,   "avic_unaccelerated_access" }
 
 
 #endif /* _UAPI__SVM_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 616ebd2..0503f5b 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -2,7 +2,11 @@
 # Makefile for the linux kernel.
 #
 
-extra-y                := head_$(BITS).o head$(BITS).o head.o vmlinux.lds
+extra-y	:= head_$(BITS).o
+extra-y	+= head$(BITS).o
+extra-y	+= ebda.o
+extra-y	+= platform-quirks.o
+extra-y	+= vmlinux.lds
 
 CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE)
 
@@ -79,7 +83,6 @@
 obj-y				+= apic/
 obj-$(CONFIG_X86_REBOOTFIXUPS)	+= reboot_fixups_32.o
 obj-$(CONFIG_DYNAMIC_FTRACE)	+= ftrace.o
-obj-$(CONFIG_LIVEPATCH)		+= livepatch.o
 obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
 obj-$(CONFIG_FTRACE_SYSCALLS)	+= ftrace.o
 obj-$(CONFIG_X86_TSC)		+= trace_clock.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 8c2f1ef..9414f84 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -136,7 +136,7 @@
 {
 	struct acpi_table_madt *madt = NULL;
 
-	if (!cpu_has_apic)
+	if (!boot_cpu_has(X86_FEATURE_APIC))
 		return -EINVAL;
 
 	madt = (struct acpi_table_madt *)table;
@@ -445,7 +445,6 @@
 		polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
 
 	mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
-	acpi_penalize_sci_irq(bus_irq, trigger, polarity);
 
 	/*
 	 * stash over-ride to indicate we've been here
@@ -913,6 +912,15 @@
 
 static int __init acpi_parse_fadt(struct acpi_table_header *table)
 {
+	if (!(acpi_gbl_FADT.boot_flags & ACPI_FADT_LEGACY_DEVICES)) {
+		pr_debug("ACPI: no legacy devices present\n");
+		x86_platform.legacy.devices.pnpbios = 0;
+	}
+
+	if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_CMOS_RTC) {
+		pr_debug("ACPI: not registering RTC platform device\n");
+		x86_platform.legacy.rtc = 0;
+	}
 
 #ifdef CONFIG_X86_PM_TIMER
 	/* detect the location of the ACPI PM Timer */
@@ -951,7 +959,7 @@
 {
 	int count;
 
-	if (!cpu_has_apic)
+	if (!boot_cpu_has(X86_FEATURE_APIC))
 		return -ENODEV;
 
 	/*
@@ -979,7 +987,7 @@
 	int ret;
 	struct acpi_subtable_proc madt_proc[2];
 
-	if (!cpu_has_apic)
+	if (!boot_cpu_has(X86_FEATURE_APIC))
 		return -ENODEV;
 
 	/*
@@ -1125,7 +1133,7 @@
 	if (acpi_disabled || acpi_noirq)
 		return -ENODEV;
 
-	if (!cpu_has_apic)
+	if (!boot_cpu_has(X86_FEATURE_APIC))
 		return -ENODEV;
 
 	/*
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 25f9093..5cb272a 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -11,6 +11,7 @@
 #include <linux/stop_machine.h>
 #include <linux/slab.h>
 #include <linux/kdebug.h>
+#include <asm/text-patching.h>
 #include <asm/alternative.h>
 #include <asm/sections.h>
 #include <asm/pgtable.h>
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 29fa475..a147e67 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -170,15 +170,13 @@
 {
 	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
 	unsigned int mask;
-	int cuid;
 
 	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
 		return 0;
 
 	pci_read_config_dword(link, 0x1d4, &mask);
 
-	cuid = cpu_data(cpu).compute_unit_id;
-	return (mask >> (4 * cuid)) & 0xf;
+	return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
 }
 
 int amd_set_subcaches(int cpu, unsigned long mask)
@@ -204,7 +202,7 @@
 		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
 	}
 
-	cuid = cpu_data(cpu).compute_unit_id;
+	cuid = cpu_data(cpu).cpu_core_id;
 	mask <<= 4 * cuid;
 	mask |= (0xf ^ (1 << cuid)) << 26;
 
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index d356987..60078a6 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -607,7 +607,7 @@
 	long tapic = apic_read(APIC_TMCCT);
 	unsigned long pm = acpi_pm_read_early();
 
-	if (cpu_has_tsc)
+	if (boot_cpu_has(X86_FEATURE_TSC))
 		tsc = rdtsc();
 
 	switch (lapic_cal_loops++) {
@@ -668,7 +668,7 @@
 	*delta = (long)res;
 
 	/* Correct the tsc counter value */
-	if (cpu_has_tsc) {
+	if (boot_cpu_has(X86_FEATURE_TSC)) {
 		res = (((u64)(*deltatsc)) * pm_100ms);
 		do_div(res, deltapm);
 		apic_printk(APIC_VERBOSE, "TSC delta adjusted to "
@@ -760,7 +760,7 @@
 	apic_printk(APIC_VERBOSE, "..... calibration result: %u\n",
 		    lapic_timer_frequency);
 
-	if (cpu_has_tsc) {
+	if (boot_cpu_has(X86_FEATURE_TSC)) {
 		apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
 			    "%ld.%04ld MHz.\n",
 			    (deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ),
@@ -1085,7 +1085,7 @@
 {
 	unsigned long flags;
 
-	if (!cpu_has_apic && !apic_from_smp_config())
+	if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
 		return;
 
 	local_irq_save(flags);
@@ -1134,7 +1134,7 @@
 	 * Don't do the setup now if we have a SMP BIOS as the
 	 * through-I/O-APIC virtual wire mode might be active.
 	 */
-	if (smp_found_config || !cpu_has_apic)
+	if (smp_found_config || !boot_cpu_has(X86_FEATURE_APIC))
 		return;
 
 	/*
@@ -1227,7 +1227,7 @@
 	unsigned long long tsc = 0, ntsc;
 	long long max_loops = cpu_khz ? cpu_khz : 1000000;
 
-	if (cpu_has_tsc)
+	if (boot_cpu_has(X86_FEATURE_TSC))
 		tsc = rdtsc();
 
 	if (disable_apic) {
@@ -1311,7 +1311,7 @@
 			break;
 		}
 		if (queued) {
-			if (cpu_has_tsc && cpu_khz) {
+			if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) {
 				ntsc = rdtsc();
 				max_loops = (cpu_khz << 10) - (ntsc - tsc);
 			} else
@@ -1445,7 +1445,7 @@
 {
 	u64 msr;
 
-	if (!cpu_has_apic)
+	if (!boot_cpu_has(X86_FEATURE_APIC))
 		return;
 
 	rdmsrl(MSR_IA32_APICBASE, msr);
@@ -1561,7 +1561,7 @@
 		pr_info("x2apic: enabled by BIOS, switching to x2apic ops\n");
 		x2apic_mode = 1;
 		x2apic_state = X2APIC_ON;
-	} else if (!cpu_has_x2apic) {
+	} else if (!boot_cpu_has(X86_FEATURE_X2APIC)) {
 		x2apic_state = X2APIC_DISABLED;
 	}
 }
@@ -1632,7 +1632,7 @@
  */
 static int __init detect_init_APIC(void)
 {
-	if (!cpu_has_apic) {
+	if (!boot_cpu_has(X86_FEATURE_APIC)) {
 		pr_info("No local APIC present\n");
 		return -1;
 	}
@@ -1711,14 +1711,14 @@
 		goto no_apic;
 	case X86_VENDOR_INTEL:
 		if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 ||
-		    (boot_cpu_data.x86 == 5 && cpu_has_apic))
+		    (boot_cpu_data.x86 == 5 && boot_cpu_has(X86_FEATURE_APIC)))
 			break;
 		goto no_apic;
 	default:
 		goto no_apic;
 	}
 
-	if (!cpu_has_apic) {
+	if (!boot_cpu_has(X86_FEATURE_APIC)) {
 		/*
 		 * Over-ride BIOS and try to enable the local APIC only if
 		 * "lapic" specified.
@@ -2233,19 +2233,19 @@
 		return -1;
 	}
 #ifdef CONFIG_X86_64
-	if (!cpu_has_apic) {
+	if (!boot_cpu_has(X86_FEATURE_APIC)) {
 		disable_apic = 1;
 		pr_info("Apic disabled by BIOS\n");
 		return -1;
 	}
 #else
-	if (!smp_found_config && !cpu_has_apic)
+	if (!smp_found_config && !boot_cpu_has(X86_FEATURE_APIC))
 		return -1;
 
 	/*
 	 * Complain if the BIOS pretends there is one.
 	 */
-	if (!cpu_has_apic &&
+	if (!boot_cpu_has(X86_FEATURE_APIC) &&
 	    APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
 		pr_err("BIOS bug, local APIC 0x%x not detected!...\n",
 			boot_cpu_physical_apicid);
@@ -2426,7 +2426,7 @@
 static int __init init_lapic_sysfs(void)
 {
 	/* XXX: remove suspend/resume procs if !apic_pm_state.active? */
-	if (cpu_has_apic)
+	if (boot_cpu_has(X86_FEATURE_APIC))
 		register_syscore_ops(&lapic_syscore_ops);
 
 	return 0;
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
index 331a7a0..13d19ed 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -100,13 +100,13 @@
 
 static u32 noop_apic_read(u32 reg)
 {
-	WARN_ON_ONCE((cpu_has_apic && !disable_apic));
+	WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_APIC) && !disable_apic);
 	return 0;
 }
 
 static void noop_apic_write(u32 reg, u32 v)
 {
-	WARN_ON_ONCE(cpu_has_apic && !disable_apic);
+	WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_APIC) && !disable_apic);
 }
 
 struct apic apic_noop = {
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index fdb0fbf..84e33ff 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1454,7 +1454,7 @@
 		ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
 	}
 
-	if (cpu_has_apic || apic_from_smp_config())
+	if (boot_cpu_has(X86_FEATURE_APIC) || apic_from_smp_config())
 		disconnect_bsp_APIC(ioapic_i8259.pin != -1);
 }
 
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
index 28bde88..2a0f225 100644
--- a/arch/x86/kernel/apic/ipi.c
+++ b/arch/x86/kernel/apic/ipi.c
@@ -230,7 +230,7 @@
 {
 	int apicid, cpuid;
 
-	if (!cpu_has_apic)
+	if (!boot_cpu_has(X86_FEATURE_APIC))
 		return 0;
 
 	apicid = hard_smp_processor_id();
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index ad59d70..a5e400a 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -256,7 +256,8 @@
 	struct irq_desc *desc;
 	int cpu, vector;
 
-	BUG_ON(!data->cfg.vector);
+	if (!data->cfg.vector)
+		return;
 
 	vector = data->cfg.vector;
 	for_each_cpu_and(cpu, data->domain, cpu_online_mask)
@@ -943,7 +944,7 @@
 	print_PIC();
 
 	/* don't print out if apic is not there */
-	if (!cpu_has_apic && !apic_from_smp_config())
+	if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
 		return 0;
 
 	print_local_APICs(show_lapic);
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 8f4942e..2900315 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -48,12 +48,35 @@
 static u64 gru_dist_base, gru_first_node_paddr = -1LL, gru_last_node_paddr;
 static u64 gru_dist_lmask, gru_dist_umask;
 static union uvh_apicid uvh_apicid;
+
+/* info derived from CPUID */
+static struct {
+	unsigned int apicid_shift;
+	unsigned int apicid_mask;
+	unsigned int socketid_shift;	/* aka pnode_shift for UV1/2/3 */
+	unsigned int pnode_mask;
+	unsigned int gpa_shift;
+} uv_cpuid;
+
 int uv_min_hub_revision_id;
 EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
 unsigned int uv_apicid_hibits;
 EXPORT_SYMBOL_GPL(uv_apicid_hibits);
 
 static struct apic apic_x2apic_uv_x;
+static struct uv_hub_info_s uv_hub_info_node0;
+
+/* Set this to use hardware error handler instead of kernel panic */
+static int disable_uv_undefined_panic = 1;
+unsigned long uv_undefined(char *str)
+{
+	if (likely(!disable_uv_undefined_panic))
+		panic("UV: error: undefined MMR: %s\n", str);
+	else
+		pr_crit("UV: error: undefined MMR: %s\n", str);
+	return ~0ul;	/* cause a machine fault  */
+}
+EXPORT_SYMBOL(uv_undefined);
 
 static unsigned long __init uv_early_read_mmr(unsigned long addr)
 {
@@ -108,21 +131,71 @@
 	case UV3_HUB_PART_NUMBER_X:
 		uv_min_hub_revision_id += UV3_HUB_REVISION_BASE;
 		break;
+	case UV4_HUB_PART_NUMBER:
+		uv_min_hub_revision_id += UV4_HUB_REVISION_BASE - 1;
+		break;
 	}
 
 	uv_hub_info->hub_revision = uv_min_hub_revision_id;
-	pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1);
+	uv_cpuid.pnode_mask = (1 << m_n_config.s.n_skt) - 1;
+	pnode = (node_id.s.node_id >> 1) & uv_cpuid.pnode_mask;
+	uv_cpuid.gpa_shift = 46;	/* default unless changed */
+
+	pr_info("UV: rev:%d part#:%x nodeid:%04x n_skt:%d pnmsk:%x pn:%x\n",
+		node_id.s.revision, node_id.s.part_number, node_id.s.node_id,
+		m_n_config.s.n_skt, uv_cpuid.pnode_mask, pnode);
 	return pnode;
 }
 
-static void __init early_get_apic_pnode_shift(void)
+/* [copied from arch/x86/kernel/cpu/topology.c:detect_extended_topology()] */
+#define SMT_LEVEL	0	/* leaf 0xb SMT level */
+#define INVALID_TYPE	0	/* leaf 0xb sub-leaf types */
+#define SMT_TYPE	1
+#define CORE_TYPE	2
+#define LEAFB_SUBTYPE(ecx)		(((ecx) >> 8) & 0xff)
+#define BITS_SHIFT_NEXT_LEVEL(eax)	((eax) & 0x1f)
+
+static void set_x2apic_bits(void)
 {
-	uvh_apicid.v = uv_early_read_mmr(UVH_APICID);
-	if (!uvh_apicid.v)
-		/*
-		 * Old bios, use default value
-		 */
-		uvh_apicid.s.pnode_shift = UV_APIC_PNODE_SHIFT;
+	unsigned int eax, ebx, ecx, edx, sub_index;
+	unsigned int sid_shift;
+
+	cpuid(0, &eax, &ebx, &ecx, &edx);
+	if (eax < 0xb) {
+		pr_info("UV: CPU does not have CPUID.11\n");
+		return;
+	}
+	cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
+	if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE)) {
+		pr_info("UV: CPUID.11 not implemented\n");
+		return;
+	}
+	sid_shift = BITS_SHIFT_NEXT_LEVEL(eax);
+	sub_index = 1;
+	do {
+		cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx);
+		if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) {
+			sid_shift = BITS_SHIFT_NEXT_LEVEL(eax);
+			break;
+		}
+		sub_index++;
+	} while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
+	uv_cpuid.apicid_shift = 0;
+	uv_cpuid.apicid_mask = (~(-1 << sid_shift));
+	uv_cpuid.socketid_shift = sid_shift;
+}
+
+static void __init early_get_apic_socketid_shift(void)
+{
+	if (is_uv2_hub() || is_uv3_hub())
+		uvh_apicid.v = uv_early_read_mmr(UVH_APICID);
+
+	set_x2apic_bits();
+
+	pr_info("UV: apicid_shift:%d apicid_mask:0x%x\n",
+		uv_cpuid.apicid_shift, uv_cpuid.apicid_mask);
+	pr_info("UV: socketid_shift:%d pnode_mask:0x%x\n",
+		uv_cpuid.socketid_shift, uv_cpuid.pnode_mask);
 }
 
 /*
@@ -150,13 +223,18 @@
 	if (strncmp(oem_id, "SGI", 3) != 0)
 		return 0;
 
+	/* Setup early hub type field in uv_hub_info for Node 0 */
+	uv_cpu_info->p_uv_hub_info = &uv_hub_info_node0;
+
 	/*
 	 * Determine UV arch type.
 	 *   SGI: UV100/1000
 	 *   SGI2: UV2000/3000
 	 *   SGI3: UV300 (truncated to 4 chars because of different varieties)
+	 *   SGI4: UV400 (truncated to 4 chars because of different varieties)
 	 */
 	uv_hub_info->hub_revision =
+		!strncmp(oem_id, "SGI4", 4) ? UV4_HUB_REVISION_BASE :
 		!strncmp(oem_id, "SGI3", 4) ? UV3_HUB_REVISION_BASE :
 		!strcmp(oem_id, "SGI2") ? UV2_HUB_REVISION_BASE :
 		!strcmp(oem_id, "SGI") ? UV1_HUB_REVISION_BASE : 0;
@@ -165,7 +243,7 @@
 		goto badbios;
 
 	pnodeid = early_get_pnodeid();
-	early_get_apic_pnode_shift();
+	early_get_apic_socketid_shift();
 	x86_platform.is_untracked_pat_range =  uv_is_untracked_pat_range;
 	x86_platform.nmi_init = uv_nmi_init;
 
@@ -211,17 +289,11 @@
 }
 EXPORT_SYMBOL_GPL(is_uv_system);
 
-DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
-EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
+void **__uv_hub_info_list;
+EXPORT_SYMBOL_GPL(__uv_hub_info_list);
 
-struct uv_blade_info *uv_blade_info;
-EXPORT_SYMBOL_GPL(uv_blade_info);
-
-short *uv_node_to_blade;
-EXPORT_SYMBOL_GPL(uv_node_to_blade);
-
-short *uv_cpu_to_blade;
-EXPORT_SYMBOL_GPL(uv_cpu_to_blade);
+DEFINE_PER_CPU(struct uv_cpu_info_s, __uv_cpu_info);
+EXPORT_PER_CPU_SYMBOL_GPL(__uv_cpu_info);
 
 short uv_possible_blades;
 EXPORT_SYMBOL_GPL(uv_possible_blades);
@@ -229,6 +301,115 @@
 unsigned long sn_rtc_cycles_per_second;
 EXPORT_SYMBOL(sn_rtc_cycles_per_second);
 
+/* the following values are used for the per node hub info struct */
+static __initdata unsigned short *_node_to_pnode;
+static __initdata unsigned short _min_socket, _max_socket;
+static __initdata unsigned short _min_pnode, _max_pnode, _gr_table_len;
+static __initdata struct uv_gam_range_entry *uv_gre_table;
+static __initdata struct uv_gam_parameters *uv_gp_table;
+static __initdata unsigned short *_socket_to_node;
+static __initdata unsigned short *_socket_to_pnode;
+static __initdata unsigned short *_pnode_to_socket;
+static __initdata struct uv_gam_range_s *_gr_table;
+#define	SOCK_EMPTY	((unsigned short)~0)
+
+extern int uv_hub_info_version(void)
+{
+	return UV_HUB_INFO_VERSION;
+}
+EXPORT_SYMBOL(uv_hub_info_version);
+
+/* Build GAM range lookup table */
+static __init void build_uv_gr_table(void)
+{
+	struct uv_gam_range_entry *gre = uv_gre_table;
+	struct uv_gam_range_s *grt;
+	unsigned long last_limit = 0, ram_limit = 0;
+	int bytes, i, sid, lsid = -1;
+
+	if (!gre)
+		return;
+
+	bytes = _gr_table_len * sizeof(struct uv_gam_range_s);
+	grt = kzalloc(bytes, GFP_KERNEL);
+	BUG_ON(!grt);
+	_gr_table = grt;
+
+	for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
+		if (gre->type == UV_GAM_RANGE_TYPE_HOLE) {
+			if (!ram_limit) {   /* mark hole between ram/non-ram */
+				ram_limit = last_limit;
+				last_limit = gre->limit;
+				lsid++;
+				continue;
+			}
+			last_limit = gre->limit;
+			pr_info("UV: extra hole in GAM RE table @%d\n",
+				(int)(gre - uv_gre_table));
+			continue;
+		}
+		if (_max_socket < gre->sockid) {
+			pr_err("UV: GAM table sockid(%d) too large(>%d) @%d\n",
+				gre->sockid, _max_socket,
+				(int)(gre - uv_gre_table));
+			continue;
+		}
+		sid = gre->sockid - _min_socket;
+		if (lsid < sid) {		/* new range */
+			grt = &_gr_table[sid];
+			grt->base = lsid;
+			grt->nasid = gre->nasid;
+			grt->limit = last_limit = gre->limit;
+			lsid = sid;
+			continue;
+		}
+		if (lsid == sid && !ram_limit) {	/* update range */
+			if (grt->limit == last_limit) {	/* .. if contiguous */
+				grt->limit = last_limit = gre->limit;
+				continue;
+			}
+		}
+		if (!ram_limit) {		/* non-contiguous ram range */
+			grt++;
+			grt->base = sid - 1;
+			grt->nasid = gre->nasid;
+			grt->limit = last_limit = gre->limit;
+			continue;
+		}
+		grt++;				/* non-contiguous/non-ram */
+		grt->base = grt - _gr_table;	/* base is this entry */
+		grt->nasid = gre->nasid;
+		grt->limit = last_limit = gre->limit;
+		lsid++;
+	}
+
+	/* shorten table if possible */
+	grt++;
+	i = grt - _gr_table;
+	if (i < _gr_table_len) {
+		void *ret;
+
+		bytes = i * sizeof(struct uv_gam_range_s);
+		ret = krealloc(_gr_table, bytes, GFP_KERNEL);
+		if (ret) {
+			_gr_table = ret;
+			_gr_table_len = i;
+		}
+	}
+
+	/* display resultant gam range table */
+	for (i = 0, grt = _gr_table; i < _gr_table_len; i++, grt++) {
+		int gb = grt->base;
+		unsigned long start = gb < 0 ?  0 :
+			(unsigned long)_gr_table[gb].limit << UV_GAM_RANGE_SHFT;
+		unsigned long end =
+			(unsigned long)grt->limit << UV_GAM_RANGE_SHFT;
+
+		pr_info("UV: GAM Range %2d %04x 0x%013lx-0x%013lx (%d)\n",
+			i, grt->nasid, start, end, gb);
+	}
+}
+
 static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
 {
 	unsigned long val;
@@ -355,7 +536,6 @@
 
 static unsigned int uv_read_apic_id(void)
 {
-
 	return x2apic_get_apic_id(apic_read(APIC_ID));
 }
 
@@ -430,58 +610,38 @@
 	__this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift);
 }
 
-/*
- * Called on boot cpu.
- */
-static __init int boot_pnode_to_blade(int pnode)
-{
-	int blade;
-
-	for (blade = 0; blade < uv_num_possible_blades(); blade++)
-		if (pnode == uv_blade_info[blade].pnode)
-			return blade;
-	BUG();
-}
-
-struct redir_addr {
-	unsigned long redirect;
-	unsigned long alias;
-};
-
+#define	UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_LENGTH	3
 #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
 
-static __initdata struct redir_addr redir_addrs[] = {
-	{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR},
-	{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR},
-	{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR},
-};
-
-static unsigned char get_n_lshift(int m_val)
-{
-	union uv3h_gr0_gam_gr_config_u m_gr_config;
-
-	if (is_uv1_hub())
-		return m_val;
-
-	if (is_uv2_hub())
-		return m_val == 40 ? 40 : 39;
-
-	m_gr_config.v = uv_read_local_mmr(UV3H_GR0_GAM_GR_CONFIG);
-	return m_gr_config.s3.m_skt;
-}
-
 static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
 {
 	union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias;
 	union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
+	unsigned long m_redirect;
+	unsigned long m_overlay;
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) {
-		alias.v = uv_read_local_mmr(redir_addrs[i].alias);
+	for (i = 0; i < UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_LENGTH; i++) {
+		switch (i) {
+		case 0:
+			m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR;
+			m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR;
+			break;
+		case 1:
+			m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR;
+			m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR;
+			break;
+		case 2:
+			m_redirect = UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR;
+			m_overlay = UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR;
+			break;
+		}
+		alias.v = uv_read_local_mmr(m_overlay);
 		if (alias.s.enable && alias.s.base == 0) {
 			*size = (1UL << alias.s.m_alias);
-			redirect.v = uv_read_local_mmr(redir_addrs[i].redirect);
-			*base = (unsigned long)redirect.s.dest_base << DEST_SHIFT;
+			redirect.v = uv_read_local_mmr(m_redirect);
+			*base = (unsigned long)redirect.s.dest_base
+							<< DEST_SHIFT;
 			return;
 		}
 	}
@@ -544,6 +704,8 @@
 {
 	union uvh_rh_gam_gru_overlay_config_mmr_u gru;
 	int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT;
+	unsigned long mask = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK;
+	unsigned long base;
 
 	gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR);
 	if (!gru.s.enable) {
@@ -555,8 +717,9 @@
 		map_gru_distributed(gru.v);
 		return;
 	}
-	map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb);
-	gru_start_paddr = ((u64)gru.s.base << shift);
+	base = (gru.v & mask) >> shift;
+	map_high("GRU", base, shift, shift, max_pnode, map_wb);
+	gru_start_paddr = ((u64)base << shift);
 	gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1);
 }
 
@@ -595,6 +758,7 @@
 	},
 };
 
+/* UV3 & UV4 have identical MMIOH overlay configs */
 static __init void map_mmioh_high_uv3(int index, int min_pnode, int max_pnode)
 {
 	union uv3h_rh_gam_mmioh_overlay_config0_mmr_u overlay;
@@ -674,7 +838,7 @@
 	unsigned long mmr, base;
 	int shift, enable, m_io, n_io;
 
-	if (is_uv3_hub()) {
+	if (is_uv3_hub() || is_uv4_hub()) {
 		/* Map both MMIOH Regions */
 		map_mmioh_high_uv3(0, min_pnode, max_pnode);
 		map_mmioh_high_uv3(1, min_pnode, max_pnode);
@@ -739,8 +903,8 @@
  */
 static void uv_heartbeat(unsigned long ignored)
 {
-	struct timer_list *timer = &uv_hub_info->scir.timer;
-	unsigned char bits = uv_hub_info->scir.state;
+	struct timer_list *timer = &uv_scir_info->timer;
+	unsigned char bits = uv_scir_info->state;
 
 	/* flip heartbeat bit */
 	bits ^= SCIR_CPU_HEARTBEAT;
@@ -760,14 +924,14 @@
 
 static void uv_heartbeat_enable(int cpu)
 {
-	while (!uv_cpu_hub_info(cpu)->scir.enabled) {
-		struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer;
+	while (!uv_cpu_scir_info(cpu)->enabled) {
+		struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer;
 
 		uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
 		setup_timer(timer, uv_heartbeat, cpu);
 		timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
 		add_timer_on(timer, cpu);
-		uv_cpu_hub_info(cpu)->scir.enabled = 1;
+		uv_cpu_scir_info(cpu)->enabled = 1;
 
 		/* also ensure that boot cpu is enabled */
 		cpu = 0;
@@ -777,9 +941,9 @@
 #ifdef CONFIG_HOTPLUG_CPU
 static void uv_heartbeat_disable(int cpu)
 {
-	if (uv_cpu_hub_info(cpu)->scir.enabled) {
-		uv_cpu_hub_info(cpu)->scir.enabled = 0;
-		del_timer(&uv_cpu_hub_info(cpu)->scir.timer);
+	if (uv_cpu_scir_info(cpu)->enabled) {
+		uv_cpu_scir_info(cpu)->enabled = 0;
+		del_timer(&uv_cpu_scir_info(cpu)->timer);
 	}
 	uv_set_cpu_scir_bits(cpu, 0xff);
 }
@@ -862,28 +1026,368 @@
 void uv_cpu_init(void)
 {
 	/* CPU 0 initialization will be done via uv_system_init. */
-	if (!uv_blade_info)
+	if (smp_processor_id() == 0)
 		return;
 
-	uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
+	uv_hub_info->nr_online_cpus++;
 
 	if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
 		set_x2apic_extra_bits(uv_hub_info->pnode);
 }
 
+struct mn {
+	unsigned char	m_val;
+	unsigned char	n_val;
+	unsigned char	m_shift;
+	unsigned char	n_lshift;
+};
+
+static void get_mn(struct mn *mnp)
+{
+	union uvh_rh_gam_config_mmr_u m_n_config;
+	union uv3h_gr0_gam_gr_config_u m_gr_config;
+
+	m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR);
+	mnp->n_val = m_n_config.s.n_skt;
+	if (is_uv4_hub()) {
+		mnp->m_val = 0;
+		mnp->n_lshift = 0;
+	} else if (is_uv3_hub()) {
+		mnp->m_val = m_n_config.s3.m_skt;
+		m_gr_config.v = uv_read_local_mmr(UV3H_GR0_GAM_GR_CONFIG);
+		mnp->n_lshift = m_gr_config.s3.m_skt;
+	} else if (is_uv2_hub()) {
+		mnp->m_val = m_n_config.s2.m_skt;
+		mnp->n_lshift = mnp->m_val == 40 ? 40 : 39;
+	} else if (is_uv1_hub()) {
+		mnp->m_val = m_n_config.s1.m_skt;
+		mnp->n_lshift = mnp->m_val;
+	}
+	mnp->m_shift = mnp->m_val ? 64 - mnp->m_val : 0;
+}
+
+void __init uv_init_hub_info(struct uv_hub_info_s *hub_info)
+{
+	struct mn mn = {0};	/* avoid unitialized warnings */
+	union uvh_node_id_u node_id;
+
+	get_mn(&mn);
+	hub_info->m_val = mn.m_val;
+	hub_info->n_val = mn.n_val;
+	hub_info->m_shift = mn.m_shift;
+	hub_info->n_lshift = mn.n_lshift ? mn.n_lshift : 0;
+
+	hub_info->hub_revision = uv_hub_info->hub_revision;
+	hub_info->pnode_mask = uv_cpuid.pnode_mask;
+	hub_info->min_pnode = _min_pnode;
+	hub_info->min_socket = _min_socket;
+	hub_info->pnode_to_socket = _pnode_to_socket;
+	hub_info->socket_to_node = _socket_to_node;
+	hub_info->socket_to_pnode = _socket_to_pnode;
+	hub_info->gr_table_len = _gr_table_len;
+	hub_info->gr_table = _gr_table;
+	hub_info->gpa_mask = mn.m_val ?
+		(1UL << (mn.m_val + mn.n_val)) - 1 :
+		(1UL << uv_cpuid.gpa_shift) - 1;
+
+	node_id.v = uv_read_local_mmr(UVH_NODE_ID);
+	hub_info->gnode_extra =
+		(node_id.s.node_id & ~((1 << mn.n_val) - 1)) >> 1;
+
+	hub_info->gnode_upper =
+		((unsigned long)hub_info->gnode_extra << mn.m_val);
+
+	if (uv_gp_table) {
+		hub_info->global_mmr_base = uv_gp_table->mmr_base;
+		hub_info->global_mmr_shift = uv_gp_table->mmr_shift;
+		hub_info->global_gru_base = uv_gp_table->gru_base;
+		hub_info->global_gru_shift = uv_gp_table->gru_shift;
+		hub_info->gpa_shift = uv_gp_table->gpa_shift;
+		hub_info->gpa_mask = (1UL << hub_info->gpa_shift) - 1;
+	} else {
+		hub_info->global_mmr_base =
+			uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
+					~UV_MMR_ENABLE;
+		hub_info->global_mmr_shift = _UV_GLOBAL_MMR64_PNODE_SHIFT;
+	}
+
+	get_lowmem_redirect(
+		&hub_info->lowmem_remap_base, &hub_info->lowmem_remap_top);
+
+	hub_info->apic_pnode_shift = uv_cpuid.socketid_shift;
+
+	/* show system specific info */
+	pr_info("UV: N:%d M:%d m_shift:%d n_lshift:%d\n",
+		hub_info->n_val, hub_info->m_val,
+		hub_info->m_shift, hub_info->n_lshift);
+
+	pr_info("UV: gpa_mask/shift:0x%lx/%d pnode_mask:0x%x apic_pns:%d\n",
+		hub_info->gpa_mask, hub_info->gpa_shift,
+		hub_info->pnode_mask, hub_info->apic_pnode_shift);
+
+	pr_info("UV: mmr_base/shift:0x%lx/%ld gru_base/shift:0x%lx/%ld\n",
+		hub_info->global_mmr_base, hub_info->global_mmr_shift,
+		hub_info->global_gru_base, hub_info->global_gru_shift);
+
+	pr_info("UV: gnode_upper:0x%lx gnode_extra:0x%x\n",
+		hub_info->gnode_upper, hub_info->gnode_extra);
+}
+
+static void __init decode_gam_params(unsigned long ptr)
+{
+	uv_gp_table = (struct uv_gam_parameters *)ptr;
+
+	pr_info("UV: GAM Params...\n");
+	pr_info("UV: mmr_base/shift:0x%llx/%d gru_base/shift:0x%llx/%d gpa_shift:%d\n",
+		uv_gp_table->mmr_base, uv_gp_table->mmr_shift,
+		uv_gp_table->gru_base, uv_gp_table->gru_shift,
+		uv_gp_table->gpa_shift);
+}
+
+static void __init decode_gam_rng_tbl(unsigned long ptr)
+{
+	struct uv_gam_range_entry *gre = (struct uv_gam_range_entry *)ptr;
+	unsigned long lgre = 0;
+	int index = 0;
+	int sock_min = 999999, pnode_min = 99999;
+	int sock_max = -1, pnode_max = -1;
+
+	uv_gre_table = gre;
+	for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
+		if (!index) {
+			pr_info("UV: GAM Range Table...\n");
+			pr_info("UV:  # %20s %14s %5s %4s %5s %3s %2s %3s\n",
+				"Range", "", "Size", "Type", "NASID",
+				"SID", "PN", "PXM");
+		}
+		pr_info(
+		"UV: %2d: 0x%014lx-0x%014lx %5luG %3d   %04x  %02x %02x %3d\n",
+			index++,
+			(unsigned long)lgre << UV_GAM_RANGE_SHFT,
+			(unsigned long)gre->limit << UV_GAM_RANGE_SHFT,
+			((unsigned long)(gre->limit - lgre)) >>
+				(30 - UV_GAM_RANGE_SHFT), /* 64M -> 1G */
+			gre->type, gre->nasid, gre->sockid,
+			gre->pnode, gre->pxm);
+
+		lgre = gre->limit;
+		if (sock_min > gre->sockid)
+			sock_min = gre->sockid;
+		if (sock_max < gre->sockid)
+			sock_max = gre->sockid;
+		if (pnode_min > gre->pnode)
+			pnode_min = gre->pnode;
+		if (pnode_max < gre->pnode)
+			pnode_max = gre->pnode;
+	}
+	_min_socket = sock_min;
+	_max_socket = sock_max;
+	_min_pnode = pnode_min;
+	_max_pnode = pnode_max;
+	_gr_table_len = index;
+	pr_info(
+	"UV: GRT: %d entries, sockets(min:%x,max:%x) pnodes(min:%x,max:%x)\n",
+		index, _min_socket, _max_socket, _min_pnode, _max_pnode);
+}
+
+static void __init decode_uv_systab(void)
+{
+	struct uv_systab *st;
+	int i;
+
+	st = uv_systab;
+	if ((!st || st->revision < UV_SYSTAB_VERSION_UV4) && !is_uv4_hub())
+		return;
+	if (st->revision != UV_SYSTAB_VERSION_UV4_LATEST) {
+		pr_crit(
+		"UV: BIOS UVsystab version(%x) mismatch, expecting(%x)\n",
+			st->revision, UV_SYSTAB_VERSION_UV4_LATEST);
+		BUG();
+	}
+
+	for (i = 0; st->entry[i].type != UV_SYSTAB_TYPE_UNUSED; i++) {
+		unsigned long ptr = st->entry[i].offset;
+
+		if (!ptr)
+			continue;
+
+		ptr = ptr + (unsigned long)st;
+
+		switch (st->entry[i].type) {
+		case UV_SYSTAB_TYPE_GAM_PARAMS:
+			decode_gam_params(ptr);
+			break;
+
+		case UV_SYSTAB_TYPE_GAM_RNG_TBL:
+			decode_gam_rng_tbl(ptr);
+			break;
+		}
+	}
+}
+
+/*
+ * Setup physical blade translations from UVH_NODE_PRESENT_TABLE
+ * .. NB: UVH_NODE_PRESENT_TABLE is going away,
+ * .. being replaced by GAM Range Table
+ */
+static __init void boot_init_possible_blades(struct uv_hub_info_s *hub_info)
+{
+	int i, uv_pb = 0;
+
+	pr_info("UV: NODE_PRESENT_DEPTH = %d\n", UVH_NODE_PRESENT_TABLE_DEPTH);
+	for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) {
+		unsigned long np;
+
+		np = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8);
+		if (np)
+			pr_info("UV: NODE_PRESENT(%d) = 0x%016lx\n", i, np);
+
+		uv_pb += hweight64(np);
+	}
+	if (uv_possible_blades != uv_pb)
+		uv_possible_blades = uv_pb;
+}
+
+static void __init build_socket_tables(void)
+{
+	struct uv_gam_range_entry *gre = uv_gre_table;
+	int num, nump;
+	int cpu, i, lnid;
+	int minsock = _min_socket;
+	int maxsock = _max_socket;
+	int minpnode = _min_pnode;
+	int maxpnode = _max_pnode;
+	size_t bytes;
+
+	if (!gre) {
+		if (is_uv1_hub() || is_uv2_hub() || is_uv3_hub()) {
+			pr_info("UV: No UVsystab socket table, ignoring\n");
+			return;		/* not required */
+		}
+		pr_crit(
+		"UV: Error: UVsystab address translations not available!\n");
+		BUG();
+	}
+
+	/* build socket id -> node id, pnode */
+	num = maxsock - minsock + 1;
+	bytes = num * sizeof(_socket_to_node[0]);
+	_socket_to_node = kmalloc(bytes, GFP_KERNEL);
+	_socket_to_pnode = kmalloc(bytes, GFP_KERNEL);
+
+	nump = maxpnode - minpnode + 1;
+	bytes = nump * sizeof(_pnode_to_socket[0]);
+	_pnode_to_socket = kmalloc(bytes, GFP_KERNEL);
+	BUG_ON(!_socket_to_node || !_socket_to_pnode || !_pnode_to_socket);
+
+	for (i = 0; i < num; i++)
+		_socket_to_node[i] = _socket_to_pnode[i] = SOCK_EMPTY;
+
+	for (i = 0; i < nump; i++)
+		_pnode_to_socket[i] = SOCK_EMPTY;
+
+	/* fill in pnode/node/addr conversion list values */
+	pr_info("UV: GAM Building socket/pnode/pxm conversion tables\n");
+	for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
+		if (gre->type == UV_GAM_RANGE_TYPE_HOLE)
+			continue;
+		i = gre->sockid - minsock;
+		if (_socket_to_pnode[i] != SOCK_EMPTY)
+			continue;	/* duplicate */
+		_socket_to_pnode[i] = gre->pnode;
+		_socket_to_node[i] = gre->pxm;
+
+		i = gre->pnode - minpnode;
+		_pnode_to_socket[i] = gre->sockid;
+
+		pr_info(
+		"UV: sid:%02x type:%d nasid:%04x pn:%02x pxm:%2d pn2s:%2x\n",
+			gre->sockid, gre->type, gre->nasid,
+			_socket_to_pnode[gre->sockid - minsock],
+			_socket_to_node[gre->sockid - minsock],
+			_pnode_to_socket[gre->pnode - minpnode]);
+	}
+
+	/* check socket -> node values */
+	lnid = -1;
+	for_each_present_cpu(cpu) {
+		int nid = cpu_to_node(cpu);
+		int apicid, sockid;
+
+		if (lnid == nid)
+			continue;
+		lnid = nid;
+		apicid = per_cpu(x86_cpu_to_apicid, cpu);
+		sockid = apicid >> uv_cpuid.socketid_shift;
+		i = sockid - minsock;
+
+		if (nid != _socket_to_node[i]) {
+			pr_warn(
+			"UV: %02x: type:%d socket:%02x PXM:%02x != node:%2d\n",
+				i, sockid, gre->type, _socket_to_node[i], nid);
+			_socket_to_node[i] = nid;
+		}
+	}
+
+	/* Setup physical blade to pnode translation from GAM Range Table */
+	bytes = num_possible_nodes() * sizeof(_node_to_pnode[0]);
+	_node_to_pnode = kmalloc(bytes, GFP_KERNEL);
+	BUG_ON(!_node_to_pnode);
+
+	for (lnid = 0; lnid < num_possible_nodes(); lnid++) {
+		unsigned short sockid;
+
+		for (sockid = minsock; sockid <= maxsock; sockid++) {
+			if (lnid == _socket_to_node[sockid - minsock]) {
+				_node_to_pnode[lnid] =
+					_socket_to_pnode[sockid - minsock];
+				break;
+			}
+		}
+		if (sockid > maxsock) {
+			pr_err("UV: socket for node %d not found!\n", lnid);
+			BUG();
+		}
+	}
+
+	/*
+	 * If socket id == pnode or socket id == node for all nodes,
+	 *   system runs faster by removing corresponding conversion table.
+	 */
+	pr_info("UV: Checking socket->node/pnode for identity maps\n");
+	if (minsock == 0) {
+		for (i = 0; i < num; i++)
+			if (_socket_to_node[i] == SOCK_EMPTY ||
+				i != _socket_to_node[i])
+				break;
+		if (i >= num) {
+			kfree(_socket_to_node);
+			_socket_to_node = NULL;
+			pr_info("UV: 1:1 socket_to_node table removed\n");
+		}
+	}
+	if (minsock == minpnode) {
+		for (i = 0; i < num; i++)
+			if (_socket_to_pnode[i] != SOCK_EMPTY &&
+				_socket_to_pnode[i] != i + minpnode)
+				break;
+		if (i >= num) {
+			kfree(_socket_to_pnode);
+			_socket_to_pnode = NULL;
+			pr_info("UV: 1:1 socket_to_pnode table removed\n");
+		}
+	}
+}
+
 void __init uv_system_init(void)
 {
-	union uvh_rh_gam_config_mmr_u  m_n_config;
-	union uvh_node_id_u node_id;
-	unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
-	int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
-	int gnode_extra, min_pnode = 999999, max_pnode = -1;
-	unsigned long mmr_base, present, paddr;
-	unsigned short pnode_mask;
-	unsigned char n_lshift;
-	char *hub = (is_uv1_hub() ? "UV100/1000" :
-		    (is_uv2_hub() ? "UV2000/3000" :
-		    (is_uv3_hub() ? "UV300" : NULL)));
+	struct uv_hub_info_s hub_info = {0};
+	int bytes, cpu, nodeid;
+	unsigned short min_pnode = 9999, max_pnode = 0;
+	char *hub = is_uv4_hub() ? "UV400" :
+		    is_uv3_hub() ? "UV300" :
+		    is_uv2_hub() ? "UV2000/3000" :
+		    is_uv1_hub() ? "UV100/1000" : NULL;
 
 	if (!hub) {
 		pr_err("UV: Unknown/unsupported UV hub\n");
@@ -891,128 +1395,106 @@
 	}
 	pr_info("UV: Found %s hub\n", hub);
 
-	/* We now only need to map the MMRs on UV1 */
-	if (is_uv1_hub())
-		map_low_mmrs();
+	map_low_mmrs();
 
-	m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
-	m_val = m_n_config.s.m_skt;
-	n_val = m_n_config.s.n_skt;
-	pnode_mask = (1 << n_val) - 1;
-	n_lshift = get_n_lshift(m_val);
-	mmr_base =
-	    uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
-	    ~UV_MMR_ENABLE;
-
-	node_id.v = uv_read_local_mmr(UVH_NODE_ID);
-	gnode_extra = (node_id.s.node_id & ~((1 << n_val) - 1)) >> 1;
-	gnode_upper = ((unsigned long)gnode_extra  << m_val);
-	pr_info("UV: N:%d M:%d pnode_mask:0x%x gnode_upper/extra:0x%lx/0x%x n_lshift 0x%x\n",
-			n_val, m_val, pnode_mask, gnode_upper, gnode_extra,
-			n_lshift);
-
-	pr_info("UV: global MMR base 0x%lx\n", mmr_base);
-
-	for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
-		uv_possible_blades +=
-		  hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
+	uv_bios_init();			/* get uv_systab for decoding */
+	decode_uv_systab();
+	build_socket_tables();
+	build_uv_gr_table();
+	uv_init_hub_info(&hub_info);
+	uv_possible_blades = num_possible_nodes();
+	if (!_node_to_pnode)
+		boot_init_possible_blades(&hub_info);
 
 	/* uv_num_possible_blades() is really the hub count */
-	pr_info("UV: Found %d blades, %d hubs\n",
-			is_uv1_hub() ? uv_num_possible_blades() :
-			(uv_num_possible_blades() + 1) / 2,
-			uv_num_possible_blades());
+	pr_info("UV: Found %d hubs, %d nodes, %d cpus\n",
+			uv_num_possible_blades(),
+			num_possible_nodes(),
+			num_possible_cpus());
 
-	bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
-	uv_blade_info = kzalloc(bytes, GFP_KERNEL);
-	BUG_ON(!uv_blade_info);
-
-	for (blade = 0; blade < uv_num_possible_blades(); blade++)
-		uv_blade_info[blade].memory_nid = -1;
-
-	get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
-
-	bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
-	uv_node_to_blade = kmalloc(bytes, GFP_KERNEL);
-	BUG_ON(!uv_node_to_blade);
-	memset(uv_node_to_blade, 255, bytes);
-
-	bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus();
-	uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL);
-	BUG_ON(!uv_cpu_to_blade);
-	memset(uv_cpu_to_blade, 255, bytes);
-
-	blade = 0;
-	for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) {
-		present = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8);
-		for (j = 0; j < 64; j++) {
-			if (!test_bit(j, &present))
-				continue;
-			pnode = (i * 64 + j) & pnode_mask;
-			uv_blade_info[blade].pnode = pnode;
-			uv_blade_info[blade].nr_possible_cpus = 0;
-			uv_blade_info[blade].nr_online_cpus = 0;
-			spin_lock_init(&uv_blade_info[blade].nmi_lock);
-			min_pnode = min(pnode, min_pnode);
-			max_pnode = max(pnode, max_pnode);
-			blade++;
-		}
-	}
-
-	uv_bios_init();
 	uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, &sn_coherency_id,
 			    &sn_region_size, &system_serial_number);
+	hub_info.coherency_domain_number = sn_coherency_id;
 	uv_rtc_init();
 
-	for_each_present_cpu(cpu) {
+	bytes = sizeof(void *) * uv_num_possible_blades();
+	__uv_hub_info_list = kzalloc(bytes, GFP_KERNEL);
+	BUG_ON(!__uv_hub_info_list);
+
+	bytes = sizeof(struct uv_hub_info_s);
+	for_each_node(nodeid) {
+		struct uv_hub_info_s *new_hub;
+
+		if (__uv_hub_info_list[nodeid]) {
+			pr_err("UV: Node %d UV HUB already initialized!?\n",
+				nodeid);
+			BUG();
+		}
+
+		/* Allocate new per hub info list */
+		new_hub = (nodeid == 0) ?
+			&uv_hub_info_node0 :
+			kzalloc_node(bytes, GFP_KERNEL, nodeid);
+		BUG_ON(!new_hub);
+		__uv_hub_info_list[nodeid] = new_hub;
+		new_hub = uv_hub_info_list(nodeid);
+		BUG_ON(!new_hub);
+		*new_hub = hub_info;
+
+		/* Use information from GAM table if available */
+		if (_node_to_pnode)
+			new_hub->pnode = _node_to_pnode[nodeid];
+		else	/* Fill in during cpu loop */
+			new_hub->pnode = 0xffff;
+		new_hub->numa_blade_id = uv_node_to_blade_id(nodeid);
+		new_hub->memory_nid = -1;
+		new_hub->nr_possible_cpus = 0;
+		new_hub->nr_online_cpus = 0;
+	}
+
+	/* Initialize per cpu info */
+	for_each_possible_cpu(cpu) {
 		int apicid = per_cpu(x86_cpu_to_apicid, cpu);
+		int numa_node_id;
+		unsigned short pnode;
 
-		nid = cpu_to_node(cpu);
-		/*
-		 * apic_pnode_shift must be set before calling uv_apicid_to_pnode();
-		 */
-		uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
-		uv_cpu_hub_info(cpu)->apic_pnode_shift = uvh_apicid.s.pnode_shift;
-		uv_cpu_hub_info(cpu)->hub_revision = uv_hub_info->hub_revision;
-
-		uv_cpu_hub_info(cpu)->m_shift = 64 - m_val;
-		uv_cpu_hub_info(cpu)->n_lshift = n_lshift;
-
+		nodeid = cpu_to_node(cpu);
+		numa_node_id = numa_cpu_node(cpu);
 		pnode = uv_apicid_to_pnode(apicid);
-		blade = boot_pnode_to_blade(pnode);
-		lcpu = uv_blade_info[blade].nr_possible_cpus;
-		uv_blade_info[blade].nr_possible_cpus++;
 
-		/* Any node on the blade, else will contain -1. */
-		uv_blade_info[blade].memory_nid = nid;
-
-		uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
-		uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size;
-		uv_cpu_hub_info(cpu)->m_val = m_val;
-		uv_cpu_hub_info(cpu)->n_val = n_val;
-		uv_cpu_hub_info(cpu)->numa_blade_id = blade;
-		uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
-		uv_cpu_hub_info(cpu)->pnode = pnode;
-		uv_cpu_hub_info(cpu)->gpa_mask = (1UL << (m_val + n_val)) - 1;
-		uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
-		uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra;
-		uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
-		uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
-		uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
-		uv_node_to_blade[nid] = blade;
-		uv_cpu_to_blade[cpu] = blade;
+		uv_cpu_info_per(cpu)->p_uv_hub_info = uv_hub_info_list(nodeid);
+		uv_cpu_info_per(cpu)->blade_cpu_id =
+			uv_cpu_hub_info(cpu)->nr_possible_cpus++;
+		if (uv_cpu_hub_info(cpu)->memory_nid == -1)
+			uv_cpu_hub_info(cpu)->memory_nid = cpu_to_node(cpu);
+		if (nodeid != numa_node_id &&	/* init memoryless node */
+		    uv_hub_info_list(numa_node_id)->pnode == 0xffff)
+			uv_hub_info_list(numa_node_id)->pnode = pnode;
+		else if (uv_cpu_hub_info(cpu)->pnode == 0xffff)
+			uv_cpu_hub_info(cpu)->pnode = pnode;
+		uv_cpu_scir_info(cpu)->offset = uv_scir_offset(apicid);
 	}
 
-	/* Add blade/pnode info for nodes without cpus */
-	for_each_online_node(nid) {
-		if (uv_node_to_blade[nid] >= 0)
-			continue;
-		paddr = node_start_pfn(nid) << PAGE_SHIFT;
-		pnode = uv_gpa_to_pnode(uv_soc_phys_ram_to_gpa(paddr));
-		blade = boot_pnode_to_blade(pnode);
-		uv_node_to_blade[nid] = blade;
+	for_each_node(nodeid) {
+		unsigned short pnode = uv_hub_info_list(nodeid)->pnode;
+
+		/* Add pnode info for pre-GAM list nodes without cpus */
+		if (pnode == 0xffff) {
+			unsigned long paddr;
+
+			paddr = node_start_pfn(nodeid) << PAGE_SHIFT;
+			pnode = uv_gpa_to_pnode(uv_soc_phys_ram_to_gpa(paddr));
+			uv_hub_info_list(nodeid)->pnode = pnode;
+		}
+		min_pnode = min(pnode, min_pnode);
+		max_pnode = max(pnode, max_pnode);
+		pr_info("UV: UVHUB node:%2d pn:%02x nrcpus:%d\n",
+			nodeid,
+			uv_hub_info_list(nodeid)->pnode,
+			uv_hub_info_list(nodeid)->nr_possible_cpus);
 	}
 
+	pr_info("UV: min_pnode:%02x max_pnode:%02x\n", min_pnode, max_pnode);
 	map_gru_high(max_pnode);
 	map_mmr_high(max_pnode);
 	map_mmioh_high(min_pnode, max_pnode);
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 9307f18..c7364bd 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -2267,7 +2267,7 @@
 
 	dmi_check_system(apm_dmi_table);
 
-	if (apm_info.bios.version == 0 || paravirt_enabled() || machine_is_olpc()) {
+	if (apm_info.bios.version == 0 || machine_is_olpc()) {
 		printk(KERN_INFO "apm: BIOS not found.\n");
 		return -ENODEV;
 	}
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index 5c04246..674134e 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -80,6 +80,7 @@
 	OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch);
 	OFFSET(BP_version, boot_params, hdr.version);
 	OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment);
+	OFFSET(BP_init_size, boot_params, hdr.init_size);
 	OFFSET(BP_pref_address, boot_params, hdr.pref_address);
 	OFFSET(BP_code32_start, boot_params, hdr.code32_start);
 
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 6e47e3a..c343a54 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -300,7 +300,6 @@
 #ifdef CONFIG_SMP
 static void amd_get_topology(struct cpuinfo_x86 *c)
 {
-	u32 cores_per_cu = 1;
 	u8 node_id;
 	int cpu = smp_processor_id();
 
@@ -313,8 +312,8 @@
 
 		/* get compute unit information */
 		smp_num_siblings = ((ebx >> 8) & 3) + 1;
-		c->compute_unit_id = ebx & 0xff;
-		cores_per_cu += ((ebx >> 8) & 3);
+		c->x86_max_cores /= smp_num_siblings;
+		c->cpu_core_id = ebx & 0xff;
 	} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
 		u64 value;
 
@@ -325,19 +324,16 @@
 
 	/* fixup multi-node processor information */
 	if (nodes_per_socket > 1) {
-		u32 cores_per_node;
 		u32 cus_per_node;
 
 		set_cpu_cap(c, X86_FEATURE_AMD_DCM);
-		cores_per_node = c->x86_max_cores / nodes_per_socket;
-		cus_per_node = cores_per_node / cores_per_cu;
+		cus_per_node = c->x86_max_cores / nodes_per_socket;
 
 		/* store NodeID, use llc_shared_map to store sibling info */
 		per_cpu(cpu_llc_id, cpu) = node_id;
 
 		/* core id has to be in the [0 .. cores_per_node - 1] range */
-		c->cpu_core_id %= cores_per_node;
-		c->compute_unit_id %= cus_per_node;
+		c->cpu_core_id %= cus_per_node;
 	}
 }
 #endif
@@ -569,14 +565,17 @@
 	 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
 	 * after 16h.
 	 */
-	if (cpu_has_apic && c->x86 > 0x16) {
-		set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
-	} else if (cpu_has_apic && c->x86 >= 0xf) {
-		/* check CPU config space for extended APIC ID */
-		unsigned int val;
-		val = read_pci_config(0, 24, 0, 0x68);
-		if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
+	if (boot_cpu_has(X86_FEATURE_APIC)) {
+		if (c->x86 > 0x16)
 			set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
+		else if (c->x86 >= 0xf) {
+			/* check CPU config space for extended APIC ID */
+			unsigned int val;
+
+			val = read_pci_config(0, 24, 0, 0x68);
+			if ((val >> 17 & 0x3) == 0x3)
+				set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
+		}
 	}
 #endif
 
@@ -632,6 +631,7 @@
 	 */
 	msr_set_bit(MSR_K7_HWCR, 6);
 #endif
+	set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
 }
 
 static void init_amd_gh(struct cpuinfo_x86 *c)
@@ -750,7 +750,7 @@
 	if (c->x86 >= 0xf)
 		set_cpu_cap(c, X86_FEATURE_K8);
 
-	if (cpu_has_xmm2) {
+	if (cpu_has(c, X86_FEATURE_XMM2)) {
 		/* MFENCE stops RDTSC speculation */
 		set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
 	}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 8394b3d..6ef6ed9 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -430,7 +430,7 @@
 #ifdef CONFIG_X86_32
 	loadsegment(fs, __KERNEL_PERCPU);
 #else
-	loadsegment(gs, 0);
+	__loadsegment_simple(gs, 0);
 	wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
 #endif
 	load_stack_canary_segment();
@@ -717,6 +717,13 @@
 		}
 	}
 
+	if (c->extended_cpuid_level >= 0x80000007) {
+		cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
+
+		c->x86_capability[CPUID_8000_0007_EBX] = ebx;
+		c->x86_power = edx;
+	}
+
 	if (c->extended_cpuid_level >= 0x80000008) {
 		cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
 
@@ -729,9 +736,6 @@
 		c->x86_phys_bits = 36;
 #endif
 
-	if (c->extended_cpuid_level >= 0x80000007)
-		c->x86_power = cpuid_edx(0x80000007);
-
 	if (c->extended_cpuid_level >= 0x8000000a)
 		c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
 
@@ -862,30 +866,34 @@
 #else
 	set_cpu_cap(c, X86_FEATURE_NOPL);
 #endif
+}
 
+static void detect_null_seg_behavior(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_X86_64
 	/*
-	 * ESPFIX is a strange bug.  All real CPUs have it.  Paravirt
-	 * systems that run Linux at CPL > 0 may or may not have the
-	 * issue, but, even if they have the issue, there's absolutely
-	 * nothing we can do about it because we can't use the real IRET
-	 * instruction.
+	 * Empirically, writing zero to a segment selector on AMD does
+	 * not clear the base, whereas writing zero to a segment
+	 * selector on Intel does clear the base.  Intel's behavior
+	 * allows slightly faster context switches in the common case
+	 * where GS is unused by the prev and next threads.
 	 *
-	 * NB: For the time being, only 32-bit kernels support
-	 * X86_BUG_ESPFIX as such.  64-bit kernels directly choose
-	 * whether to apply espfix using paravirt hooks.  If any
-	 * non-paravirt system ever shows up that does *not* have the
-	 * ESPFIX issue, we can change this.
+	 * Since neither vendor documents this anywhere that I can see,
+	 * detect it directly instead of hardcoding the choice by
+	 * vendor.
+	 *
+	 * I've designated AMD's behavior as the "bug" because it's
+	 * counterintuitive and less friendly.
 	 */
-#ifdef CONFIG_X86_32
-#ifdef CONFIG_PARAVIRT
-	do {
-		extern void native_iret(void);
-		if (pv_cpu_ops.iret == native_iret)
-			set_cpu_bug(c, X86_BUG_ESPFIX);
-	} while (0);
-#else
-	set_cpu_bug(c, X86_BUG_ESPFIX);
-#endif
+
+	unsigned long old_base, tmp;
+	rdmsrl(MSR_FS_BASE, old_base);
+	wrmsrl(MSR_FS_BASE, 1);
+	loadsegment(fs, 0);
+	rdmsrl(MSR_FS_BASE, tmp);
+	if (tmp != 0)
+		set_cpu_bug(c, X86_BUG_NULL_SEG);
+	wrmsrl(MSR_FS_BASE, old_base);
 #endif
 }
 
@@ -921,6 +929,33 @@
 	get_model_name(c); /* Default name */
 
 	detect_nopl(c);
+
+	detect_null_seg_behavior(c);
+
+	/*
+	 * ESPFIX is a strange bug.  All real CPUs have it.  Paravirt
+	 * systems that run Linux at CPL > 0 may or may not have the
+	 * issue, but, even if they have the issue, there's absolutely
+	 * nothing we can do about it because we can't use the real IRET
+	 * instruction.
+	 *
+	 * NB: For the time being, only 32-bit kernels support
+	 * X86_BUG_ESPFIX as such.  64-bit kernels directly choose
+	 * whether to apply espfix using paravirt hooks.  If any
+	 * non-paravirt system ever shows up that does *not* have the
+	 * ESPFIX issue, we can change this.
+	 */
+#ifdef CONFIG_X86_32
+# ifdef CONFIG_PARAVIRT
+	do {
+		extern void native_iret(void);
+		if (pv_cpu_ops.iret == native_iret)
+			set_cpu_bug(c, X86_BUG_ESPFIX);
+	} while (0);
+# else
+	set_cpu_bug(c, X86_BUG_ESPFIX);
+# endif
+#endif
 }
 
 static void x86_init_cache_qos(struct cpuinfo_x86 *c)
@@ -1076,12 +1111,12 @@
 	struct tss_struct *tss;
 	int cpu;
 
+	if (!boot_cpu_has(X86_FEATURE_SEP))
+		return;
+
 	cpu = get_cpu();
 	tss = &per_cpu(cpu_tss, cpu);
 
-	if (!boot_cpu_has(X86_FEATURE_SEP))
-		goto out;
-
 	/*
 	 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
 	 * see the big comment in struct x86_hw_tss's definition.
@@ -1096,7 +1131,6 @@
 
 	wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
 
-out:
 	put_cpu();
 }
 #endif
@@ -1528,7 +1562,7 @@
 	pr_info("Initializing CPU#%d\n", cpu);
 
 	if (cpu_feature_enabled(X86_FEATURE_VME) ||
-	    cpu_has_tsc ||
+	    boot_cpu_has(X86_FEATURE_TSC) ||
 	    boot_cpu_has(X86_FEATURE_DE))
 		cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
 
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 6adef9c..bd9dcd6 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -333,7 +333,7 @@
 		switch (dir0_lsn) {
 		case 0xd:  /* either a 486SLC or DLC w/o DEVID */
 			dir0_msn = 0;
-			p = Cx486_name[(cpu_has_fpu ? 1 : 0)];
+			p = Cx486_name[!!boot_cpu_has(X86_FEATURE_FPU)];
 			break;
 
 		case 0xe:  /* a 486S A step */
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 1f7fdb9..8dae51f 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -152,9 +152,9 @@
 	 *  the TLB when any changes are made to any of the page table entries.
 	 *  The operating system must reload CR3 to cause the TLB to be flushed"
 	 *
-	 * As a result cpu_has_pge() in arch/x86/include/asm/tlbflush.h should
-	 * be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
-	 * to be modified
+	 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
+	 * should be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
+	 * to be modified.
 	 */
 	if (c->x86 == 5 && c->x86_model == 9) {
 		pr_info("Disabling PGE capability bit\n");
@@ -233,7 +233,7 @@
 	 * The Quark is also family 5, but does not have the same bug.
 	 */
 	clear_cpu_bug(c, X86_BUG_F00F);
-	if (!paravirt_enabled() && c->x86 == 5 && c->x86_model < 9) {
+	if (c->x86 == 5 && c->x86_model < 9) {
 		static int f00f_workaround_enabled;
 
 		set_cpu_bug(c, X86_BUG_F00F);
@@ -281,7 +281,7 @@
 	 * integrated APIC (see 11AP erratum in "Pentium Processor
 	 * Specification Update").
 	 */
-	if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
+	if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
 	    (c->x86_mask < 0x6 || c->x86_mask == 0xb))
 		set_cpu_bug(c, X86_BUG_11AP);
 
@@ -336,7 +336,7 @@
 {
 	unsigned int eax, ebx, ecx, edx;
 
-	if (c->cpuid_level < 4)
+	if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4)
 		return 1;
 
 	/* Intel has a non-standard dependency on %ecx for this CPUID level. */
@@ -456,7 +456,7 @@
 			set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
 	}
 
-	if (cpu_has_xmm2)
+	if (cpu_has(c, X86_FEATURE_XMM2))
 		set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
 
 	if (boot_cpu_has(X86_FEATURE_DS)) {
@@ -468,7 +468,7 @@
 			set_cpu_cap(c, X86_FEATURE_PEBS);
 	}
 
-	if (c->x86 == 6 && cpu_has_clflush &&
+	if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) &&
 	    (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
 		set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
 
diff --git a/arch/x86/kernel/cpu/mcheck/mce-genpool.c b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
index 0a85010..93d824e 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-genpool.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
@@ -26,10 +26,56 @@
 static LLIST_HEAD(mce_event_llist);
 static char gen_pool_buf[MCE_POOLSZ];
 
+/*
+ * Compare the record "t" with each of the records on list "l" to see if
+ * an equivalent one is present in the list.
+ */
+static bool is_duplicate_mce_record(struct mce_evt_llist *t, struct mce_evt_llist *l)
+{
+	struct mce_evt_llist *node;
+	struct mce *m1, *m2;
+
+	m1 = &t->mce;
+
+	llist_for_each_entry(node, &l->llnode, llnode) {
+		m2 = &node->mce;
+
+		if (!mce_cmp(m1, m2))
+			return true;
+	}
+	return false;
+}
+
+/*
+ * The system has panicked - we'd like to peruse the list of MCE records
+ * that have been queued, but not seen by anyone yet.  The list is in
+ * reverse time order, so we need to reverse it. While doing that we can
+ * also drop duplicate records (these were logged because some banks are
+ * shared between cores or by all threads on a socket).
+ */
+struct llist_node *mce_gen_pool_prepare_records(void)
+{
+	struct llist_node *head;
+	LLIST_HEAD(new_head);
+	struct mce_evt_llist *node, *t;
+
+	head = llist_del_all(&mce_event_llist);
+	if (!head)
+		return NULL;
+
+	/* squeeze out duplicates while reversing order */
+	llist_for_each_entry_safe(node, t, head, llnode) {
+		if (!is_duplicate_mce_record(node, t))
+			llist_add(&node->llnode, &new_head);
+	}
+
+	return new_head.first;
+}
+
 void mce_gen_pool_process(void)
 {
 	struct llist_node *head;
-	struct mce_evt_llist *node;
+	struct mce_evt_llist *node, *tmp;
 	struct mce *mce;
 
 	head = llist_del_all(&mce_event_llist);
@@ -37,7 +83,7 @@
 		return;
 
 	head = llist_reverse_order(head);
-	llist_for_each_entry(node, head, llnode) {
+	llist_for_each_entry_safe(node, tmp, head, llnode) {
 		mce = &node->mce;
 		atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
 		gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
index 547720e..cd74a3f 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
+++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
@@ -35,6 +35,7 @@
 bool mce_gen_pool_empty(void);
 int mce_gen_pool_add(struct mce *mce);
 int mce_gen_pool_init(void);
+struct llist_node *mce_gen_pool_prepare_records(void);
 
 extern int (*mce_severity)(struct mce *a, int tolerant, char **msg, bool is_excp);
 struct dentry *mce_get_debugfs_dir(void);
@@ -81,3 +82,17 @@
 #endif
 
 void mce_inject_log(struct mce *m);
+
+/*
+ * We consider records to be equivalent if bank+status+addr+misc all match.
+ * This is only used when the system is going down because of a fatal error
+ * to avoid cluttering the console log with essentially repeated information.
+ * In normal processing all errors seen are logged.
+ */
+static inline bool mce_cmp(struct mce *m1, struct mce *m2)
+{
+	return m1->bank != m2->bank ||
+		m1->status != m2->status ||
+		m1->addr != m2->addr ||
+		m1->misc != m2->misc;
+}
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 5119766..631356c 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -204,6 +204,33 @@
 	return IN_KERNEL;
 }
 
+static int mce_severity_amd_smca(struct mce *m, int err_ctx)
+{
+	u32 addr = MSR_AMD64_SMCA_MCx_CONFIG(m->bank);
+	u32 low, high;
+
+	/*
+	 * We need to look at the following bits:
+	 * - "succor" bit (data poisoning support), and
+	 * - TCC bit (Task Context Corrupt)
+	 * in MCi_STATUS to determine error severity.
+	 */
+	if (!mce_flags.succor)
+		return MCE_PANIC_SEVERITY;
+
+	if (rdmsr_safe(addr, &low, &high))
+		return MCE_PANIC_SEVERITY;
+
+	/* TCC (Task context corrupt). If set and if IN_KERNEL, panic. */
+	if ((low & MCI_CONFIG_MCAX) &&
+	    (m->status & MCI_STATUS_TCC) &&
+	    (err_ctx == IN_KERNEL))
+		return MCE_PANIC_SEVERITY;
+
+	 /* ...otherwise invoke hwpoison handler. */
+	return MCE_AR_SEVERITY;
+}
+
 /*
  * See AMD Error Scope Hierarchy table in a newer BKDG. For example
  * 49125_15h_Models_30h-3Fh_BKDG.pdf, section "RAS Features"
@@ -225,6 +252,9 @@
 		 * to at least kill process to prolong system operation.
 		 */
 		if (mce_flags.overflow_recov) {
+			if (mce_flags.smca)
+				return mce_severity_amd_smca(m, ctx);
+
 			/* software can try to contain */
 			if (!(m->mcgstatus & MCG_STATUS_RIPV) && (ctx == IN_KERNEL))
 				return MCE_PANIC_SEVERITY;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index f0c921b..92e5e37 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -161,7 +161,6 @@
 	if (!mce_gen_pool_add(mce))
 		irq_work_queue(&mce_irq_work);
 
-	mce->finished = 0;
 	wmb();
 	for (;;) {
 		entry = mce_log_get_idx_check(mcelog.next);
@@ -194,7 +193,6 @@
 	mcelog.entry[entry].finished = 1;
 	wmb();
 
-	mce->finished = 1;
 	set_bit(0, &mce_need_notify);
 }
 
@@ -224,6 +222,53 @@
 }
 EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
 
+static inline u32 ctl_reg(int bank)
+{
+	return MSR_IA32_MCx_CTL(bank);
+}
+
+static inline u32 status_reg(int bank)
+{
+	return MSR_IA32_MCx_STATUS(bank);
+}
+
+static inline u32 addr_reg(int bank)
+{
+	return MSR_IA32_MCx_ADDR(bank);
+}
+
+static inline u32 misc_reg(int bank)
+{
+	return MSR_IA32_MCx_MISC(bank);
+}
+
+static inline u32 smca_ctl_reg(int bank)
+{
+	return MSR_AMD64_SMCA_MCx_CTL(bank);
+}
+
+static inline u32 smca_status_reg(int bank)
+{
+	return MSR_AMD64_SMCA_MCx_STATUS(bank);
+}
+
+static inline u32 smca_addr_reg(int bank)
+{
+	return MSR_AMD64_SMCA_MCx_ADDR(bank);
+}
+
+static inline u32 smca_misc_reg(int bank)
+{
+	return MSR_AMD64_SMCA_MCx_MISC(bank);
+}
+
+struct mca_msr_regs msr_ops = {
+	.ctl	= ctl_reg,
+	.status	= status_reg,
+	.addr	= addr_reg,
+	.misc	= misc_reg
+};
+
 static void print_mce(struct mce *m)
 {
 	int ret = 0;
@@ -290,7 +335,9 @@
 
 static void mce_panic(const char *msg, struct mce *final, char *exp)
 {
-	int i, apei_err = 0;
+	int apei_err = 0;
+	struct llist_node *pending;
+	struct mce_evt_llist *l;
 
 	if (!fake_panic) {
 		/*
@@ -307,11 +354,10 @@
 		if (atomic_inc_return(&mce_fake_panicked) > 1)
 			return;
 	}
+	pending = mce_gen_pool_prepare_records();
 	/* First print corrected ones that are still unlogged */
-	for (i = 0; i < MCE_LOG_LEN; i++) {
-		struct mce *m = &mcelog.entry[i];
-		if (!(m->status & MCI_STATUS_VAL))
-			continue;
+	llist_for_each_entry(l, pending, llnode) {
+		struct mce *m = &l->mce;
 		if (!(m->status & MCI_STATUS_UC)) {
 			print_mce(m);
 			if (!apei_err)
@@ -319,13 +365,11 @@
 		}
 	}
 	/* Now print uncorrected but with the final one last */
-	for (i = 0; i < MCE_LOG_LEN; i++) {
-		struct mce *m = &mcelog.entry[i];
-		if (!(m->status & MCI_STATUS_VAL))
-			continue;
+	llist_for_each_entry(l, pending, llnode) {
+		struct mce *m = &l->mce;
 		if (!(m->status & MCI_STATUS_UC))
 			continue;
-		if (!final || memcmp(m, final, sizeof(struct mce))) {
+		if (!final || mce_cmp(m, final)) {
 			print_mce(m);
 			if (!apei_err)
 				apei_err = apei_write_mce(m);
@@ -356,11 +400,11 @@
 
 	if (msr == mca_cfg.rip_msr)
 		return offsetof(struct mce, ip);
-	if (msr == MSR_IA32_MCx_STATUS(bank))
+	if (msr == msr_ops.status(bank))
 		return offsetof(struct mce, status);
-	if (msr == MSR_IA32_MCx_ADDR(bank))
+	if (msr == msr_ops.addr(bank))
 		return offsetof(struct mce, addr);
-	if (msr == MSR_IA32_MCx_MISC(bank))
+	if (msr == msr_ops.misc(bank))
 		return offsetof(struct mce, misc);
 	if (msr == MSR_IA32_MCG_STATUS)
 		return offsetof(struct mce, mcgstatus);
@@ -523,9 +567,9 @@
 static void mce_read_aux(struct mce *m, int i)
 {
 	if (m->status & MCI_STATUS_MISCV)
-		m->misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i));
+		m->misc = mce_rdmsrl(msr_ops.misc(i));
 	if (m->status & MCI_STATUS_ADDRV) {
-		m->addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i));
+		m->addr = mce_rdmsrl(msr_ops.addr(i));
 
 		/*
 		 * Mask the reported address by the reported granularity.
@@ -607,7 +651,7 @@
 		m.tsc = 0;
 
 		barrier();
-		m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
+		m.status = mce_rdmsrl(msr_ops.status(i));
 		if (!(m.status & MCI_STATUS_VAL))
 			continue;
 
@@ -654,7 +698,7 @@
 		/*
 		 * Clear state for this bank.
 		 */
-		mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
+		mce_wrmsrl(msr_ops.status(i), 0);
 	}
 
 	/*
@@ -679,7 +723,7 @@
 	char *tmp;
 
 	for (i = 0; i < mca_cfg.banks; i++) {
-		m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
+		m->status = mce_rdmsrl(msr_ops.status(i));
 		if (m->status & MCI_STATUS_VAL) {
 			__set_bit(i, validp);
 			if (quirk_no_way_out)
@@ -830,9 +874,9 @@
 
 	atomic_add(*no_way_out, &global_nwo);
 	/*
-	 * global_nwo should be updated before mce_callin
+	 * Rely on the implied barrier below, such that global_nwo
+	 * is updated before mce_callin.
 	 */
-	smp_wmb();
 	order = atomic_inc_return(&mce_callin);
 
 	/*
@@ -957,7 +1001,7 @@
 
 	for (i = 0; i < mca_cfg.banks; i++) {
 		if (test_bit(i, toclear))
-			mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
+			mce_wrmsrl(msr_ops.status(i), 0);
 	}
 }
 
@@ -994,11 +1038,12 @@
 	int i;
 	int worst = 0;
 	int severity;
+
 	/*
 	 * Establish sequential order between the CPUs entering the machine
 	 * check handler.
 	 */
-	int order;
+	int order = -1;
 	/*
 	 * If no_way_out gets set, there is no safe way to recover from this
 	 * MCE.  If mca_cfg.tolerant is cranked up, we'll try anyway.
@@ -1012,7 +1057,12 @@
 	DECLARE_BITMAP(toclear, MAX_NR_BANKS);
 	DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
 	char *msg = "Unknown";
-	int lmce = 0;
+
+	/*
+	 * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
+	 * on Intel.
+	 */
+	int lmce = 1;
 
 	/* If this CPU is offline, just bail out. */
 	if (cpu_is_offline(smp_processor_id())) {
@@ -1051,19 +1101,20 @@
 		kill_it = 1;
 
 	/*
-	 * Check if this MCE is signaled to only this logical processor
+	 * Check if this MCE is signaled to only this logical processor,
+	 * on Intel only.
 	 */
-	if (m.mcgstatus & MCG_STATUS_LMCES)
-		lmce = 1;
-	else {
-		/*
-		 * Go through all the banks in exclusion of the other CPUs.
-		 * This way we don't report duplicated events on shared banks
-		 * because the first one to see it will clear it.
-		 * If this is a Local MCE, then no need to perform rendezvous.
-		 */
+	if (m.cpuvendor == X86_VENDOR_INTEL)
+		lmce = m.mcgstatus & MCG_STATUS_LMCES;
+
+	/*
+	 * Go through all banks in exclusion of the other CPUs. This way we
+	 * don't report duplicated events on shared banks because the first one
+	 * to see it will clear it. If this is a Local MCE, then no need to
+	 * perform rendezvous.
+	 */
+	if (!lmce)
 		order = mce_start(&no_way_out);
-	}
 
 	for (i = 0; i < cfg->banks; i++) {
 		__clear_bit(i, toclear);
@@ -1076,7 +1127,7 @@
 		m.addr = 0;
 		m.bank = i;
 
-		m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
+		m.status = mce_rdmsrl(msr_ops.status(i));
 		if ((m.status & MCI_STATUS_VAL) == 0)
 			continue;
 
@@ -1420,7 +1471,6 @@
 	enum mcp_flags m_fl = 0;
 	mce_banks_t all_banks;
 	u64 cap;
-	int i;
 
 	if (!mca_cfg.bootlog)
 		m_fl = MCP_DONTLOG;
@@ -1436,14 +1486,19 @@
 	rdmsrl(MSR_IA32_MCG_CAP, cap);
 	if (cap & MCG_CTL_P)
 		wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
+}
+
+static void __mcheck_cpu_init_clear_banks(void)
+{
+	int i;
 
 	for (i = 0; i < mca_cfg.banks; i++) {
 		struct mce_bank *b = &mce_banks[i];
 
 		if (!b->init)
 			continue;
-		wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
-		wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
+		wrmsrl(msr_ops.ctl(i), b->ctl);
+		wrmsrl(msr_ops.status(i), 0);
 	}
 }
 
@@ -1495,7 +1550,7 @@
 			 */
 			clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
 		}
-		if (c->x86 <= 17 && cfg->bootlog < 0) {
+		if (c->x86 < 17 && cfg->bootlog < 0) {
 			/*
 			 * Lots of broken BIOS around that don't clear them
 			 * by default and leave crap in there. Don't log:
@@ -1628,11 +1683,19 @@
 		break;
 
 	case X86_VENDOR_AMD: {
-		u32 ebx = cpuid_ebx(0x80000007);
+		mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
+		mce_flags.succor	 = !!cpu_has(c, X86_FEATURE_SUCCOR);
+		mce_flags.smca		 = !!cpu_has(c, X86_FEATURE_SMCA);
 
-		mce_flags.overflow_recov = !!(ebx & BIT(0));
-		mce_flags.succor	 = !!(ebx & BIT(1));
-		mce_flags.smca		 = !!(ebx & BIT(3));
+		/*
+		 * Install proper ops for Scalable MCA enabled processors
+		 */
+		if (mce_flags.smca) {
+			msr_ops.ctl	= smca_ctl_reg;
+			msr_ops.status	= smca_status_reg;
+			msr_ops.addr	= smca_addr_reg;
+			msr_ops.misc	= smca_misc_reg;
+		}
 		mce_amd_feature_init(c);
 
 		break;
@@ -1717,6 +1780,7 @@
 
 	__mcheck_cpu_init_generic();
 	__mcheck_cpu_init_vendor(c);
+	__mcheck_cpu_init_clear_banks();
 	__mcheck_cpu_init_timer();
 }
 
@@ -2082,7 +2146,7 @@
 		struct mce_bank *b = &mce_banks[i];
 
 		if (b->init)
-			wrmsrl(MSR_IA32_MCx_CTL(i), 0);
+			wrmsrl(msr_ops.ctl(i), 0);
 	}
 	return;
 }
@@ -2121,6 +2185,7 @@
 {
 	__mcheck_cpu_init_generic();
 	__mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
+	__mcheck_cpu_init_clear_banks();
 }
 
 static struct syscore_ops mce_syscore_ops = {
@@ -2138,6 +2203,7 @@
 	if (!mce_available(raw_cpu_ptr(&cpu_info)))
 		return;
 	__mcheck_cpu_init_generic();
+	__mcheck_cpu_init_clear_banks();
 	__mcheck_cpu_init_timer();
 }
 
@@ -2413,7 +2479,7 @@
 		struct mce_bank *b = &mce_banks[i];
 
 		if (b->init)
-			wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
+			wrmsrl(msr_ops.ctl(i), b->ctl);
 	}
 }
 
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 9d656fd..10b0661 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -54,14 +54,6 @@
 /* Threshold LVT offset is at MSR0xC0000410[15:12] */
 #define SMCA_THR_LVT_OFF	0xF000
 
-/*
- * OS is required to set the MCAX bit to acknowledge that it is now using the
- * new MSR ranges and new registers under each bank. It also means that the OS
- * will configure deferred errors in the new MCx_CONFIG register. If the bit is
- * not set, uncorrectable errors will cause a system panic.
- */
-#define SMCA_MCAX_EN_OFF	0x1
-
 static const char * const th_names[] = {
 	"load_store",
 	"insn_fetch",
@@ -333,7 +325,7 @@
 	/* Fall back to method we used for older processors: */
 	switch (block) {
 	case 0:
-		addr = MSR_IA32_MCx_MISC(bank);
+		addr = msr_ops.misc(bank);
 		break;
 	case 1:
 		offset = ((low & MASK_BLKPTR_LO) >> 21);
@@ -351,6 +343,7 @@
 			int offset, u32 misc_high)
 {
 	unsigned int cpu = smp_processor_id();
+	u32 smca_low, smca_high, smca_addr;
 	struct threshold_block b;
 	int new;
 
@@ -369,24 +362,49 @@
 
 	b.interrupt_enable = 1;
 
-	if (mce_flags.smca) {
-		u32 smca_low, smca_high;
-		u32 smca_addr = MSR_AMD64_SMCA_MCx_CONFIG(bank);
-
-		if (!rdmsr_safe(smca_addr, &smca_low, &smca_high)) {
-			smca_high |= SMCA_MCAX_EN_OFF;
-			wrmsr(smca_addr, smca_low, smca_high);
-		}
-
-		/* Gather LVT offset for thresholding: */
-		if (rdmsr_safe(MSR_CU_DEF_ERR, &smca_low, &smca_high))
-			goto out;
-
-		new = (smca_low & SMCA_THR_LVT_OFF) >> 12;
-	} else {
+	if (!mce_flags.smca) {
 		new = (misc_high & MASK_LVTOFF_HI) >> 20;
+		goto set_offset;
 	}
 
+	smca_addr = MSR_AMD64_SMCA_MCx_CONFIG(bank);
+
+	if (!rdmsr_safe(smca_addr, &smca_low, &smca_high)) {
+		/*
+		 * OS is required to set the MCAX bit to acknowledge that it is
+		 * now using the new MSR ranges and new registers under each
+		 * bank. It also means that the OS will configure deferred
+		 * errors in the new MCx_CONFIG register. If the bit is not set,
+		 * uncorrectable errors will cause a system panic.
+		 *
+		 * MCA_CONFIG[MCAX] is bit 32 (0 in the high portion of the MSR.)
+		 */
+		smca_high |= BIT(0);
+
+		/*
+		 * SMCA logs Deferred Error information in MCA_DE{STAT,ADDR}
+		 * registers with the option of additionally logging to
+		 * MCA_{STATUS,ADDR} if MCA_CONFIG[LogDeferredInMcaStat] is set.
+		 *
+		 * This bit is usually set by BIOS to retain the old behavior
+		 * for OSes that don't use the new registers. Linux supports the
+		 * new registers so let's disable that additional logging here.
+		 *
+		 * MCA_CONFIG[LogDeferredInMcaStat] is bit 34 (bit 2 in the high
+		 * portion of the MSR).
+		 */
+		smca_high &= ~BIT(2);
+
+		wrmsr(smca_addr, smca_low, smca_high);
+	}
+
+	/* Gather LVT offset for thresholding: */
+	if (rdmsr_safe(MSR_CU_DEF_ERR, &smca_low, &smca_high))
+		goto out;
+
+	new = (smca_low & SMCA_THR_LVT_OFF) >> 12;
+
+set_offset:
 	offset = setup_APIC_mce_threshold(offset, new);
 
 	if ((offset == new) && (mce_threshold_vector != amd_threshold_interrupt))
@@ -430,12 +448,23 @@
 		deferred_error_interrupt_enable(c);
 }
 
-static void __log_error(unsigned int bank, bool threshold_err, u64 misc)
+static void
+__log_error(unsigned int bank, bool deferred_err, bool threshold_err, u64 misc)
 {
+	u32 msr_status = msr_ops.status(bank);
+	u32 msr_addr = msr_ops.addr(bank);
 	struct mce m;
 	u64 status;
 
-	rdmsrl(MSR_IA32_MCx_STATUS(bank), status);
+	WARN_ON_ONCE(deferred_err && threshold_err);
+
+	if (deferred_err && mce_flags.smca) {
+		msr_status = MSR_AMD64_SMCA_MCx_DESTAT(bank);
+		msr_addr = MSR_AMD64_SMCA_MCx_DEADDR(bank);
+	}
+
+	rdmsrl(msr_status, status);
+
 	if (!(status & MCI_STATUS_VAL))
 		return;
 
@@ -448,10 +477,11 @@
 		m.misc = misc;
 
 	if (m.status & MCI_STATUS_ADDRV)
-		rdmsrl(MSR_IA32_MCx_ADDR(bank), m.addr);
+		rdmsrl(msr_addr, m.addr);
 
 	mce_log(&m);
-	wrmsrl(MSR_IA32_MCx_STATUS(bank), 0);
+
+	wrmsrl(msr_status, 0);
 }
 
 static inline void __smp_deferred_error_interrupt(void)
@@ -479,17 +509,21 @@
 /* APIC interrupt handler for deferred errors */
 static void amd_deferred_error_interrupt(void)
 {
-	u64 status;
 	unsigned int bank;
+	u32 msr_status;
+	u64 status;
 
 	for (bank = 0; bank < mca_cfg.banks; ++bank) {
-		rdmsrl(MSR_IA32_MCx_STATUS(bank), status);
+		msr_status = (mce_flags.smca) ? MSR_AMD64_SMCA_MCx_DESTAT(bank)
+					      : msr_ops.status(bank);
+
+		rdmsrl(msr_status, status);
 
 		if (!(status & MCI_STATUS_VAL) ||
 		    !(status & MCI_STATUS_DEFERRED))
 			continue;
 
-		__log_error(bank, false, 0);
+		__log_error(bank, true, false, 0);
 		break;
 	}
 }
@@ -544,7 +578,7 @@
 	return;
 
 log:
-	__log_error(bank, true, ((u64)high << 32) | low);
+	__log_error(bank, false, true, ((u64)high << 32) | low);
 }
 
 /*
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 1e8bb6c..1defb8e 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -84,7 +84,7 @@
 	 */
 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
 		return 0;
-	if (!cpu_has_apic || lapic_get_maxlvt() < 6)
+	if (!boot_cpu_has(X86_FEATURE_APIC) || lapic_get_maxlvt() < 6)
 		return 0;
 	rdmsrl(MSR_IA32_MCG_CAP, cap);
 	*banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff);
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 0b445c2..6b9dc4d 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -384,6 +384,9 @@
 {
 	__u64 msr_val;
 
+	if (static_cpu_has(X86_FEATURE_HWP))
+		wrmsrl_safe(MSR_HWP_STATUS, 0);
+
 	rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
 
 	/* Check for violation of core thermal thresholds*/
@@ -447,7 +450,7 @@
 /* Thermal monitoring depends on APIC, ACPI and clock modulation */
 static int intel_thermal_supported(struct cpuinfo_x86 *c)
 {
-	if (!cpu_has_apic)
+	if (!boot_cpu_has(X86_FEATURE_APIC))
 		return 0;
 	if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
 		return 0;
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index cbb3cf0..65cbbcd 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -422,7 +422,7 @@
 		data_size = get_datasize(mc_saved_header);
 		date = mc_saved_header->date;
 
-		pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, toal size=0x%x, date = %04x-%02x-%02x\n",
+		pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, total size=0x%x, date = %04x-%02x-%02x\n",
 			 i, sig, pf, rev, total_size,
 			 date & 0xffff,
 			 date >> 24,
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 4e7c693..10c11b4 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -152,6 +152,11 @@
 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
+static unsigned char hv_get_nmi_reason(void)
+{
+	return 0;
+}
+
 static void __init ms_hyperv_init_platform(void)
 {
 	/*
@@ -191,6 +196,13 @@
 	machine_ops.crash_shutdown = hv_machine_crash_shutdown;
 #endif
 	mark_tsc_unstable("running on Hyper-V");
+
+	/*
+	 * Generation 2 instances don't support reading the NMI status from
+	 * 0x61 port.
+	 */
+	if (efi_enabled(EFI_BOOT))
+		x86_platform.get_nmi_reason = hv_get_nmi_reason;
 }
 
 const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
index f8c81ba..b1086f7 100644
--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
@@ -137,7 +137,7 @@
 	u32 cr0;
 
 	/*  Save value of CR4 and clear Page Global Enable (bit 7)  */
-	if (cpu_has_pge) {
+	if (boot_cpu_has(X86_FEATURE_PGE)) {
 		cr4 = __read_cr4();
 		__write_cr4(cr4 & ~X86_CR4_PGE);
 	}
@@ -170,7 +170,7 @@
 	write_cr0(read_cr0() & ~X86_CR0_CD);
 
 	/* Restore value of CR4 */
-	if (cpu_has_pge)
+	if (boot_cpu_has(X86_FEATURE_PGE))
 		__write_cr4(cr4);
 }
 
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 19f5736..16e37a25 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -444,11 +444,24 @@
 		pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20);
 }
 
+/* PAT setup for BP. We need to go through sync steps here */
+void __init mtrr_bp_pat_init(void)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	prepare_set();
+
+	pat_init();
+
+	post_set();
+	local_irq_restore(flags);
+}
+
 /* Grab all of the MTRR state for this CPU into *state */
 bool __init get_mtrr_state(void)
 {
 	struct mtrr_var_range *vrs;
-	unsigned long flags;
 	unsigned lo, dummy;
 	unsigned int i;
 
@@ -481,15 +494,6 @@
 
 	mtrr_state_set = 1;
 
-	/* PAT setup for BP. We need to go through sync steps here */
-	local_irq_save(flags);
-	prepare_set();
-
-	pat_init();
-
-	post_set();
-	local_irq_restore(flags);
-
 	return !!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED);
 }
 
@@ -741,7 +745,7 @@
 	wbinvd();
 
 	/* Save value of CR4 and clear Page Global Enable (bit 7) */
-	if (cpu_has_pge) {
+	if (boot_cpu_has(X86_FEATURE_PGE)) {
 		cr4 = __read_cr4();
 		__write_cr4(cr4 & ~X86_CR4_PGE);
 	}
@@ -771,7 +775,7 @@
 	write_cr0(read_cr0() & ~X86_CR0_CD);
 
 	/* Restore value of CR4 */
-	if (cpu_has_pge)
+	if (boot_cpu_has(X86_FEATURE_PGE))
 		__write_cr4(cr4);
 	raw_spin_unlock(&set_atomicity_lock);
 }
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 10f8d47..7d393ec 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -752,6 +752,9 @@
 			/* BIOS may override */
 			__mtrr_enabled = get_mtrr_state();
 
+			if (mtrr_enabled())
+				mtrr_bp_pat_init();
+
 			if (mtrr_cleanup(phys_addr)) {
 				changed_by_mtrr_cleanup = 1;
 				mtrr_if->set_all();
@@ -759,8 +762,16 @@
 		}
 	}
 
-	if (!mtrr_enabled())
+	if (!mtrr_enabled()) {
 		pr_info("MTRR: Disabled\n");
+
+		/*
+		 * PAT initialization relies on MTRR's rendezvous handler.
+		 * Skip PAT init until the handler can initialize both
+		 * features independently.
+		 */
+		pat_disable("MTRRs disabled, skipping PAT initialization too.");
+	}
 }
 
 void mtrr_ap_init(void)
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
index 951884d..6c7ced0 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
@@ -52,6 +52,7 @@
 void fill_mtrr_var_range(unsigned int index,
 		u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
 bool get_mtrr_state(void);
+void mtrr_bp_pat_init(void);
 
 extern void set_mtrr_ops(const struct mtrr_ops *ops);
 
diff --git a/arch/x86/kernel/cpu/powerflags.c b/arch/x86/kernel/cpu/powerflags.c
index 31f0f33..1dd8294 100644
--- a/arch/x86/kernel/cpu/powerflags.c
+++ b/arch/x86/kernel/cpu/powerflags.c
@@ -18,4 +18,6 @@
 	"",	/* tsc invariant mapped to constant_tsc */
 	"cpb",  /* core performance boost */
 	"eff_freq_ro", /* Readonly aperf/mperf */
+	"proc_feedback", /* processor feedback interface */
+	"acc_power", /* accumulated power mechanism */
 };
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 364e583..8cac429 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -94,7 +94,7 @@
  */
 static uint32_t __init vmware_platform(void)
 {
-	if (cpu_has_hypervisor) {
+	if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
 		unsigned int eax;
 		unsigned int hyper_vendor_id[3];
 
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 1f4acd6..3fe45f8 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -151,7 +151,7 @@
 		return;
 
 	/* Did the boot loader setup the local APIC ? */
-	if (!cpu_has_apic) {
+	if (!boot_cpu_has(X86_FEATURE_APIC)) {
 		if (apic_force_enable(r.start))
 			return;
 	}
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 8efa57a..2bb25c3 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -260,19 +260,12 @@
 	unsigned long sp;
 #endif
 	printk(KERN_DEFAULT
-	       "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
-#ifdef CONFIG_PREEMPT
-	printk("PREEMPT ");
-#endif
-#ifdef CONFIG_SMP
-	printk("SMP ");
-#endif
-	if (debug_pagealloc_enabled())
-		printk("DEBUG_PAGEALLOC ");
-#ifdef CONFIG_KASAN
-	printk("KASAN");
-#endif
-	printk("\n");
+	       "%s: %04lx [#%d]%s%s%s%s\n", str, err & 0xffff, ++die_counter,
+	       IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT"         : "",
+	       IS_ENABLED(CONFIG_SMP)     ? " SMP"             : "",
+	       debug_pagealloc_enabled()  ? " DEBUG_PAGEALLOC" : "",
+	       IS_ENABLED(CONFIG_KASAN)   ? " KASAN"           : "");
+
 	if (notify_die(DIE_OOPS, str, regs, err,
 			current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
 		return 1;
diff --git a/arch/x86/kernel/ebda.c b/arch/x86/kernel/ebda.c
new file mode 100644
index 0000000..afe65df
--- /dev/null
+++ b/arch/x86/kernel/ebda.c
@@ -0,0 +1,71 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/memblock.h>
+
+#include <asm/setup.h>
+#include <asm/bios_ebda.h>
+
+/*
+ * The BIOS places the EBDA/XBDA at the top of conventional
+ * memory, and usually decreases the reported amount of
+ * conventional memory (int 0x12) too. This also contains a
+ * workaround for Dell systems that neglect to reserve EBDA.
+ * The same workaround also avoids a problem with the AMD768MPX
+ * chipset: reserve a page before VGA to prevent PCI prefetch
+ * into it (errata #56). Usually the page is reserved anyways,
+ * unless you have no PS/2 mouse plugged in.
+ *
+ * This functions is deliberately very conservative.  Losing
+ * memory in the bottom megabyte is rarely a problem, as long
+ * as we have enough memory to install the trampoline.  Using
+ * memory that is in use by the BIOS or by some DMA device
+ * the BIOS didn't shut down *is* a big problem.
+ */
+
+#define BIOS_LOWMEM_KILOBYTES	0x413
+#define LOWMEM_CAP		0x9f000U	/* Absolute maximum */
+#define INSANE_CUTOFF		0x20000U	/* Less than this = insane */
+
+void __init reserve_ebda_region(void)
+{
+	unsigned int lowmem, ebda_addr;
+
+	/*
+	 * To determine the position of the EBDA and the
+	 * end of conventional memory, we need to look at
+	 * the BIOS data area. In a paravirtual environment
+	 * that area is absent. We'll just have to assume
+	 * that the paravirt case can handle memory setup
+	 * correctly, without our help.
+	 */
+	if (!x86_platform.legacy.ebda_search)
+		return;
+
+	/* end of low (conventional) memory */
+	lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES);
+	lowmem <<= 10;
+
+	/* start of EBDA area */
+	ebda_addr = get_bios_ebda();
+
+	/*
+	 * Note: some old Dells seem to need 4k EBDA without
+	 * reporting so, so just consider the memory above 0x9f000
+	 * to be off limits (bugzilla 2990).
+	 */
+
+	/* If the EBDA address is below 128K, assume it is bogus */
+	if (ebda_addr < INSANE_CUTOFF)
+		ebda_addr = LOWMEM_CAP;
+
+	/* If lowmem is less than 128K, assume it is bogus */
+	if (lowmem < INSANE_CUTOFF)
+		lowmem = LOWMEM_CAP;
+
+	/* Use the lower of the lowmem and EBDA markers as the cutoff */
+	lowmem = min(lowmem, ebda_addr);
+	lowmem = min(lowmem, LOWMEM_CAP); /* Absolute cap */
+
+	/* reserve all memory between lowmem and the 1MB mark */
+	memblock_reserve(lowmem, 0x100000 - lowmem);
+}
diff --git a/arch/x86/kernel/fpu/bugs.c b/arch/x86/kernel/fpu/bugs.c
index dd9ca9b6..aad34aa 100644
--- a/arch/x86/kernel/fpu/bugs.c
+++ b/arch/x86/kernel/fpu/bugs.c
@@ -21,11 +21,15 @@
  * We should really only care about bugs here
  * anyway. Not features.
  */
-static void __init check_fpu(void)
+void __init fpu__init_check_bugs(void)
 {
 	u32 cr0_saved;
 	s32 fdiv_bug;
 
+	/* kernel_fpu_begin/end() relies on patched alternative instructions. */
+	if (!boot_cpu_has(X86_FEATURE_FPU))
+		return;
+
 	/* We might have CR0::TS set already, clear it: */
 	cr0_saved = read_cr0();
 	write_cr0(cr0_saved & ~X86_CR0_TS);
@@ -59,13 +63,3 @@
 		pr_warn("Hmm, FPU with FDIV bug\n");
 	}
 }
-
-void __init fpu__init_check_bugs(void)
-{
-	/*
-	 * kernel_fpu_begin/end() in check_fpu() relies on the patched
-	 * alternative instructions.
-	 */
-	if (cpu_has_fpu)
-		check_fpu();
-}
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 8e37cc8..9702754 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -217,14 +217,14 @@
 
 void fpstate_init(union fpregs_state *state)
 {
-	if (!cpu_has_fpu) {
+	if (!static_cpu_has(X86_FEATURE_FPU)) {
 		fpstate_init_soft(&state->soft);
 		return;
 	}
 
 	memset(state, 0, xstate_size);
 
-	if (cpu_has_fxsr)
+	if (static_cpu_has(X86_FEATURE_FXSR))
 		fpstate_init_fxstate(&state->fxsave);
 	else
 		fpstate_init_fstate(&state->fsave);
@@ -237,7 +237,7 @@
 	dst_fpu->fpregs_active = 0;
 	dst_fpu->last_cpu = -1;
 
-	if (!src_fpu->fpstate_active || !cpu_has_fpu)
+	if (!src_fpu->fpstate_active || !static_cpu_has(X86_FEATURE_FPU))
 		return 0;
 
 	WARN_ON_FPU(src_fpu != &current->thread.fpu);
@@ -506,33 +506,6 @@
  * x87 math exception handling:
  */
 
-static inline unsigned short get_fpu_cwd(struct fpu *fpu)
-{
-	if (cpu_has_fxsr) {
-		return fpu->state.fxsave.cwd;
-	} else {
-		return (unsigned short)fpu->state.fsave.cwd;
-	}
-}
-
-static inline unsigned short get_fpu_swd(struct fpu *fpu)
-{
-	if (cpu_has_fxsr) {
-		return fpu->state.fxsave.swd;
-	} else {
-		return (unsigned short)fpu->state.fsave.swd;
-	}
-}
-
-static inline unsigned short get_fpu_mxcsr(struct fpu *fpu)
-{
-	if (cpu_has_xmm) {
-		return fpu->state.fxsave.mxcsr;
-	} else {
-		return MXCSR_DEFAULT;
-	}
-}
-
 int fpu__exception_code(struct fpu *fpu, int trap_nr)
 {
 	int err;
@@ -547,10 +520,15 @@
 		 * so if this combination doesn't produce any single exception,
 		 * then we have a bad program that isn't synchronizing its FPU usage
 		 * and it will suffer the consequences since we won't be able to
-		 * fully reproduce the context of the exception
+		 * fully reproduce the context of the exception.
 		 */
-		cwd = get_fpu_cwd(fpu);
-		swd = get_fpu_swd(fpu);
+		if (boot_cpu_has(X86_FEATURE_FXSR)) {
+			cwd = fpu->state.fxsave.cwd;
+			swd = fpu->state.fxsave.swd;
+		} else {
+			cwd = (unsigned short)fpu->state.fsave.cwd;
+			swd = (unsigned short)fpu->state.fsave.swd;
+		}
 
 		err = swd & ~cwd;
 	} else {
@@ -560,7 +538,11 @@
 		 * unmasked exception was caught we must mask the exception mask bits
 		 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
 		 */
-		unsigned short mxcsr = get_fpu_mxcsr(fpu);
+		unsigned short mxcsr = MXCSR_DEFAULT;
+
+		if (boot_cpu_has(X86_FEATURE_XMM))
+			mxcsr = fpu->state.fxsave.mxcsr;
+
 		err = ~(mxcsr >> 7) & mxcsr;
 	}
 
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 54c86ff..aacfd7a 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -29,22 +29,22 @@
 	unsigned long cr0;
 	unsigned long cr4_mask = 0;
 
-	if (cpu_has_fxsr)
+	if (boot_cpu_has(X86_FEATURE_FXSR))
 		cr4_mask |= X86_CR4_OSFXSR;
-	if (cpu_has_xmm)
+	if (boot_cpu_has(X86_FEATURE_XMM))
 		cr4_mask |= X86_CR4_OSXMMEXCPT;
 	if (cr4_mask)
 		cr4_set_bits(cr4_mask);
 
 	cr0 = read_cr0();
 	cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */
-	if (!cpu_has_fpu)
+	if (!boot_cpu_has(X86_FEATURE_FPU))
 		cr0 |= X86_CR0_EM;
 	write_cr0(cr0);
 
 	/* Flush out any pending x87 state: */
 #ifdef CONFIG_MATH_EMULATION
-	if (!cpu_has_fpu)
+	if (!boot_cpu_has(X86_FEATURE_FPU))
 		fpstate_init_soft(&current->thread.fpu.state.soft);
 	else
 #endif
@@ -89,7 +89,7 @@
 	}
 
 #ifndef CONFIG_MATH_EMULATION
-	if (!cpu_has_fpu) {
+	if (!boot_cpu_has(X86_FEATURE_FPU)) {
 		pr_emerg("x86/fpu: Giving up, no FPU found and no math emulation present\n");
 		for (;;)
 			asm volatile("hlt");
@@ -106,7 +106,7 @@
 {
 	unsigned int mask = 0;
 
-	if (cpu_has_fxsr) {
+	if (boot_cpu_has(X86_FEATURE_FXSR)) {
 		/* Static because GCC does not get 16-byte stack alignment right: */
 		static struct fxregs_state fxregs __initdata;
 
@@ -212,7 +212,7 @@
 	 * fpu__init_system_xstate().
 	 */
 
-	if (!cpu_has_fpu) {
+	if (!boot_cpu_has(X86_FEATURE_FPU)) {
 		/*
 		 * Disable xsave as we do not support it if i387
 		 * emulation is enabled.
@@ -221,7 +221,7 @@
 		setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
 		xstate_size = sizeof(struct swregs_state);
 	} else {
-		if (cpu_has_fxsr)
+		if (boot_cpu_has(X86_FEATURE_FXSR))
 			xstate_size = sizeof(struct fxregs_state);
 		else
 			xstate_size = sizeof(struct fregs_state);
diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
index 8bd1c00..81422df 100644
--- a/arch/x86/kernel/fpu/regset.c
+++ b/arch/x86/kernel/fpu/regset.c
@@ -21,7 +21,10 @@
 {
 	struct fpu *target_fpu = &target->thread.fpu;
 
-	return (cpu_has_fxsr && target_fpu->fpstate_active) ? regset->n : 0;
+	if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->fpstate_active)
+		return regset->n;
+	else
+		return 0;
 }
 
 int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
@@ -30,7 +33,7 @@
 {
 	struct fpu *fpu = &target->thread.fpu;
 
-	if (!cpu_has_fxsr)
+	if (!boot_cpu_has(X86_FEATURE_FXSR))
 		return -ENODEV;
 
 	fpu__activate_fpstate_read(fpu);
@@ -47,7 +50,7 @@
 	struct fpu *fpu = &target->thread.fpu;
 	int ret;
 
-	if (!cpu_has_fxsr)
+	if (!boot_cpu_has(X86_FEATURE_FXSR))
 		return -ENODEV;
 
 	fpu__activate_fpstate_write(fpu);
@@ -65,7 +68,7 @@
 	 * update the header bits in the xsave header, indicating the
 	 * presence of FP and SSE state.
 	 */
-	if (cpu_has_xsave)
+	if (boot_cpu_has(X86_FEATURE_XSAVE))
 		fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FPSSE;
 
 	return ret;
@@ -79,7 +82,7 @@
 	struct xregs_state *xsave;
 	int ret;
 
-	if (!cpu_has_xsave)
+	if (!boot_cpu_has(X86_FEATURE_XSAVE))
 		return -ENODEV;
 
 	fpu__activate_fpstate_read(fpu);
@@ -108,7 +111,7 @@
 	struct xregs_state *xsave;
 	int ret;
 
-	if (!cpu_has_xsave)
+	if (!boot_cpu_has(X86_FEATURE_XSAVE))
 		return -ENODEV;
 
 	fpu__activate_fpstate_write(fpu);
@@ -275,10 +278,10 @@
 
 	fpu__activate_fpstate_read(fpu);
 
-	if (!static_cpu_has(X86_FEATURE_FPU))
+	if (!boot_cpu_has(X86_FEATURE_FPU))
 		return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
 
-	if (!cpu_has_fxsr)
+	if (!boot_cpu_has(X86_FEATURE_FXSR))
 		return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 					   &fpu->state.fsave, 0,
 					   -1);
@@ -306,10 +309,10 @@
 	fpu__activate_fpstate_write(fpu);
 	fpstate_sanitize_xstate(fpu);
 
-	if (!static_cpu_has(X86_FEATURE_FPU))
+	if (!boot_cpu_has(X86_FEATURE_FPU))
 		return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
 
-	if (!cpu_has_fxsr)
+	if (!boot_cpu_has(X86_FEATURE_FXSR))
 		return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 					  &fpu->state.fsave, 0,
 					  -1);
@@ -325,7 +328,7 @@
 	 * update the header bit in the xsave header, indicating the
 	 * presence of FP.
 	 */
-	if (cpu_has_xsave)
+	if (boot_cpu_has(X86_FEATURE_XSAVE))
 		fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FP;
 	return ret;
 }
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index b48ef35..4ea2a59 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -190,7 +190,7 @@
  */
 void fpu__init_cpu_xstate(void)
 {
-	if (!cpu_has_xsave || !xfeatures_mask)
+	if (!boot_cpu_has(X86_FEATURE_XSAVE) || !xfeatures_mask)
 		return;
 
 	cr4_set_bits(X86_CR4_OSXSAVE);
@@ -280,7 +280,7 @@
 	xstate_comp_offsets[0] = 0;
 	xstate_comp_offsets[1] = offsetof(struct fxregs_state, xmm_space);
 
-	if (!cpu_has_xsaves) {
+	if (!boot_cpu_has(X86_FEATURE_XSAVES)) {
 		for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
 			if (xfeature_enabled(i)) {
 				xstate_comp_offsets[i] = xstate_offsets[i];
@@ -316,13 +316,13 @@
 	WARN_ON_FPU(!on_boot_cpu);
 	on_boot_cpu = 0;
 
-	if (!cpu_has_xsave)
+	if (!boot_cpu_has(X86_FEATURE_XSAVE))
 		return;
 
 	setup_xstate_features();
 	print_xstate_features();
 
-	if (cpu_has_xsaves) {
+	if (boot_cpu_has(X86_FEATURE_XSAVES)) {
 		init_fpstate.xsave.header.xcomp_bv = (u64)1 << 63 | xfeatures_mask;
 		init_fpstate.xsave.header.xfeatures = xfeatures_mask;
 	}
@@ -417,7 +417,7 @@
  */
 static int using_compacted_format(void)
 {
-	return cpu_has_xsaves;
+	return boot_cpu_has(X86_FEATURE_XSAVES);
 }
 
 static void __xstate_dump_leaves(void)
@@ -549,7 +549,7 @@
 	unsigned int eax, ebx, ecx, edx;
 	unsigned int calculated_xstate_size;
 
-	if (!cpu_has_xsaves) {
+	if (!boot_cpu_has(X86_FEATURE_XSAVES)) {
 		/*
 		 * - CPUID function 0DH, sub-function 0:
 		 *    EBX enumerates the size (in bytes) required by
@@ -630,7 +630,7 @@
 	WARN_ON_FPU(!on_boot_cpu);
 	on_boot_cpu = 0;
 
-	if (!cpu_has_xsave) {
+	if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
 		pr_info("x86/fpu: Legacy x87 FPU detected.\n");
 		return;
 	}
@@ -667,7 +667,7 @@
 	pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
 		xfeatures_mask,
 		xstate_size,
-		cpu_has_xsaves ? "compacted" : "standard");
+		boot_cpu_has(X86_FEATURE_XSAVES) ? "compacted" : "standard");
 }
 
 /*
@@ -678,7 +678,7 @@
 	/*
 	 * Restore XCR0 on xsave capable CPUs:
 	 */
-	if (cpu_has_xsave)
+	if (boot_cpu_has(X86_FEATURE_XSAVE))
 		xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask);
 }
 
diff --git a/arch/x86/kernel/head.c b/arch/x86/kernel/head.c
deleted file mode 100644
index 992f442..0000000
--- a/arch/x86/kernel/head.c
+++ /dev/null
@@ -1,71 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/memblock.h>
-
-#include <asm/setup.h>
-#include <asm/bios_ebda.h>
-
-/*
- * The BIOS places the EBDA/XBDA at the top of conventional
- * memory, and usually decreases the reported amount of
- * conventional memory (int 0x12) too. This also contains a
- * workaround for Dell systems that neglect to reserve EBDA.
- * The same workaround also avoids a problem with the AMD768MPX
- * chipset: reserve a page before VGA to prevent PCI prefetch
- * into it (errata #56). Usually the page is reserved anyways,
- * unless you have no PS/2 mouse plugged in.
- *
- * This functions is deliberately very conservative.  Losing
- * memory in the bottom megabyte is rarely a problem, as long
- * as we have enough memory to install the trampoline.  Using
- * memory that is in use by the BIOS or by some DMA device
- * the BIOS didn't shut down *is* a big problem.
- */
-
-#define BIOS_LOWMEM_KILOBYTES	0x413
-#define LOWMEM_CAP		0x9f000U	/* Absolute maximum */
-#define INSANE_CUTOFF		0x20000U	/* Less than this = insane */
-
-void __init reserve_ebda_region(void)
-{
-	unsigned int lowmem, ebda_addr;
-
-	/*
-	 * To determine the position of the EBDA and the
-	 * end of conventional memory, we need to look at
-	 * the BIOS data area. In a paravirtual environment
-	 * that area is absent. We'll just have to assume
-	 * that the paravirt case can handle memory setup
-	 * correctly, without our help.
-	 */
-	if (paravirt_enabled())
-		return;
-
-	/* end of low (conventional) memory */
-	lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES);
-	lowmem <<= 10;
-
-	/* start of EBDA area */
-	ebda_addr = get_bios_ebda();
-
-	/*
-	 * Note: some old Dells seem to need 4k EBDA without
-	 * reporting so, so just consider the memory above 0x9f000
-	 * to be off limits (bugzilla 2990).
-	 */
-
-	/* If the EBDA address is below 128K, assume it is bogus */
-	if (ebda_addr < INSANE_CUTOFF)
-		ebda_addr = LOWMEM_CAP;
-
-	/* If lowmem is less than 128K, assume it is bogus */
-	if (lowmem < INSANE_CUTOFF)
-		lowmem = LOWMEM_CAP;
-
-	/* Use the lower of the lowmem and EBDA markers as the cutoff */
-	lowmem = min(lowmem, ebda_addr);
-	lowmem = min(lowmem, LOWMEM_CAP); /* Absolute cap */
-
-	/* reserve all memory between lowmem and the 1MB mark */
-	memblock_reserve(lowmem, 0x100000 - lowmem);
-}
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 2911ef3..d784bb5 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -34,6 +34,8 @@
 	cr4_init_shadow();
 	sanitize_boot_params(&boot_params);
 
+	x86_early_init_platform_quirks();
+
 	/* Call the subarch specific early setup function */
 	switch (boot_params.hdr.hardware_subarch) {
 	case X86_SUBARCH_INTEL_MID:
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 1f4422d..b72fb0b 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -182,6 +182,7 @@
 	if (!boot_params.hdr.version)
 		copy_bootdata(__va(real_mode_data));
 
+	x86_early_init_platform_quirks();
 	reserve_ebda_region();
 
 	switch (boot_params.hdr.hardware_subarch) {
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 54cdbd2..6f8902b 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -389,12 +389,6 @@
 	/* Make changes effective */
 	wrmsr
 
-	/*
-	 * And make sure that all the mappings we set up have NX set from
-	 * the beginning.
-	 */
-	orl $(1 << (_PAGE_BIT_NX - 32)), pa(__supported_pte_mask + 4)
-
 enable_paging:
 
 /*
@@ -561,62 +555,53 @@
 	 */
 	cld
 
-	cmpl $2,(%esp)		# X86_TRAP_NMI
-	je .Lis_nmi		# Ignore NMI
-
-	cmpl $2,%ss:early_recursion_flag
-	je hlt_loop
 	incl %ss:early_recursion_flag
 
-	push %eax		# 16(%esp)
-	push %ecx		# 12(%esp)
-	push %edx		#  8(%esp)
-	push %ds		#  4(%esp)
-	push %es		#  0(%esp)
-	movl $(__KERNEL_DS),%eax
-	movl %eax,%ds
-	movl %eax,%es
+	/* The vector number is in pt_regs->gs */
 
-	cmpl $(__KERNEL_CS),32(%esp)
-	jne 10f
+	cld
+	pushl	%fs		/* pt_regs->fs */
+	movw	$0, 2(%esp)	/* clear high bits (some CPUs leave garbage) */
+	pushl	%es		/* pt_regs->es */
+	movw	$0, 2(%esp)	/* clear high bits (some CPUs leave garbage) */
+	pushl	%ds		/* pt_regs->ds */
+	movw	$0, 2(%esp)	/* clear high bits (some CPUs leave garbage) */
+	pushl	%eax		/* pt_regs->ax */
+	pushl	%ebp		/* pt_regs->bp */
+	pushl	%edi		/* pt_regs->di */
+	pushl	%esi		/* pt_regs->si */
+	pushl	%edx		/* pt_regs->dx */
+	pushl	%ecx		/* pt_regs->cx */
+	pushl	%ebx		/* pt_regs->bx */
 
-	leal 28(%esp),%eax	# Pointer to %eip
-	call early_fixup_exception
-	andl %eax,%eax
-	jnz ex_entry		/* found an exception entry */
+	/* Fix up DS and ES */
+	movl	$(__KERNEL_DS), %ecx
+	movl	%ecx, %ds
+	movl	%ecx, %es
 
-10:
-#ifdef CONFIG_PRINTK
-	xorl %eax,%eax
-	movw %ax,2(%esp)	/* clean up the segment values on some cpus */
-	movw %ax,6(%esp)
-	movw %ax,34(%esp)
-	leal  40(%esp),%eax
-	pushl %eax		/* %esp before the exception */
-	pushl %ebx
-	pushl %ebp
-	pushl %esi
-	pushl %edi
-	movl %cr2,%eax
-	pushl %eax
-	pushl (20+6*4)(%esp)	/* trapno */
-	pushl $fault_msg
-	call printk
-#endif
-	call dump_stack
-hlt_loop:
-	hlt
-	jmp hlt_loop
+	/* Load the vector number into EDX */
+	movl	PT_GS(%esp), %edx
 
-ex_entry:
-	pop %es
-	pop %ds
-	pop %edx
-	pop %ecx
-	pop %eax
-	decl %ss:early_recursion_flag
-.Lis_nmi:
-	addl $8,%esp		/* drop vector number and error code */
+	/* Load GS into pt_regs->gs and clear high bits */
+	movw	%gs, PT_GS(%esp)
+	movw	$0, PT_GS+2(%esp)
+
+	movl	%esp, %eax	/* args are pt_regs (EAX), trapnr (EDX) */
+	call	early_fixup_exception
+
+	popl	%ebx		/* pt_regs->bx */
+	popl	%ecx		/* pt_regs->cx */
+	popl	%edx		/* pt_regs->dx */
+	popl	%esi		/* pt_regs->si */
+	popl	%edi		/* pt_regs->di */
+	popl	%ebp		/* pt_regs->bp */
+	popl	%eax		/* pt_regs->ax */
+	popl	%ds		/* pt_regs->ds */
+	popl	%es		/* pt_regs->es */
+	popl	%fs		/* pt_regs->fs */
+	popl	%gs		/* pt_regs->gs */
+	decl	%ss:early_recursion_flag
+	addl	$4, %esp	/* pop pt_regs->orig_ax */
 	iret
 ENDPROC(early_idt_handler_common)
 
@@ -653,10 +638,14 @@
 	popl %eax
 #endif
 	iret
+
+hlt_loop:
+	hlt
+	jmp hlt_loop
 ENDPROC(ignore_int)
 __INITDATA
 	.align 4
-early_recursion_flag:
+GLOBAL(early_recursion_flag)
 	.long 0
 
 __REFDATA
@@ -721,19 +710,6 @@
 int_msg:
 	.asciz "Unknown interrupt or fault at: %p %p %p\n"
 
-fault_msg:
-/* fault info: */
-	.ascii "BUG: Int %d: CR2 %p\n"
-/* regs pushed in early_idt_handler: */
-	.ascii "     EDI %p  ESI %p  EBP %p  EBX %p\n"
-	.ascii "     ESP %p   ES %p   DS %p\n"
-	.ascii "     EDX %p  ECX %p  EAX %p\n"
-/* fault frame: */
-	.ascii "     vec %p  err %p  EIP %p   CS %p  flg %p\n"
-	.ascii "Stack: %p %p %p %p %p %p %p %p\n"
-	.ascii "       %p %p %p %p %p %p %p %p\n"
-	.asciz "       %p %p %p %p %p %p %p %p\n"
-
 #include "../../x86/xen/xen-head.S"
 
 /*
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 22fbf9d..5df831e 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -20,6 +20,7 @@
 #include <asm/processor-flags.h>
 #include <asm/percpu.h>
 #include <asm/nops.h>
+#include "../entry/calling.h"
 
 #ifdef CONFIG_PARAVIRT
 #include <asm/asm-offsets.h>
@@ -64,6 +65,14 @@
 	 * tables and then reload them.
 	 */
 
+	/*
+	 * Setup stack for verify_cpu(). "-8" because stack_start is defined
+	 * this way, see below. Our best guess is a NULL ptr for stack
+	 * termination heuristics and we don't want to break anything which
+	 * might depend on it (kgdb, ...).
+	 */
+	leaq	(__end_init_task - 8)(%rip), %rsp
+
 	/* Sanitize CPU configuration */
 	call verify_cpu
 
@@ -350,90 +359,48 @@
 	 */
 	cld
 
-	cmpl $2,(%rsp)		# X86_TRAP_NMI
-	je .Lis_nmi		# Ignore NMI
-
-	cmpl $2,early_recursion_flag(%rip)
-	jz  1f
 	incl early_recursion_flag(%rip)
 
-	pushq %rax		# 64(%rsp)
-	pushq %rcx		# 56(%rsp)
-	pushq %rdx		# 48(%rsp)
-	pushq %rsi		# 40(%rsp)
-	pushq %rdi		# 32(%rsp)
-	pushq %r8		# 24(%rsp)
-	pushq %r9		# 16(%rsp)
-	pushq %r10		#  8(%rsp)
-	pushq %r11		#  0(%rsp)
+	/* The vector number is currently in the pt_regs->di slot. */
+	pushq %rsi				/* pt_regs->si */
+	movq 8(%rsp), %rsi			/* RSI = vector number */
+	movq %rdi, 8(%rsp)			/* pt_regs->di = RDI */
+	pushq %rdx				/* pt_regs->dx */
+	pushq %rcx				/* pt_regs->cx */
+	pushq %rax				/* pt_regs->ax */
+	pushq %r8				/* pt_regs->r8 */
+	pushq %r9				/* pt_regs->r9 */
+	pushq %r10				/* pt_regs->r10 */
+	pushq %r11				/* pt_regs->r11 */
+	pushq %rbx				/* pt_regs->bx */
+	pushq %rbp				/* pt_regs->bp */
+	pushq %r12				/* pt_regs->r12 */
+	pushq %r13				/* pt_regs->r13 */
+	pushq %r14				/* pt_regs->r14 */
+	pushq %r15				/* pt_regs->r15 */
 
-	cmpl $__KERNEL_CS,96(%rsp)
-	jne 11f
-
-	cmpl $14,72(%rsp)	# Page fault?
+	cmpq $14,%rsi		/* Page fault? */
 	jnz 10f
-	GET_CR2_INTO(%rdi)	# can clobber any volatile register if pv
+	GET_CR2_INTO(%rdi)	/* Can clobber any volatile register if pv */
 	call early_make_pgtable
 	andl %eax,%eax
-	jz 20f			# All good
+	jz 20f			/* All good */
 
 10:
-	leaq 88(%rsp),%rdi	# Pointer to %rip
+	movq %rsp,%rdi		/* RDI = pt_regs; RSI is already trapnr */
 	call early_fixup_exception
-	andl %eax,%eax
-	jnz 20f			# Found an exception entry
 
-11:
-#ifdef CONFIG_EARLY_PRINTK
-	GET_CR2_INTO(%r9)	# can clobber any volatile register if pv
-	movl 80(%rsp),%r8d	# error code
-	movl 72(%rsp),%esi	# vector number
-	movl 96(%rsp),%edx	# %cs
-	movq 88(%rsp),%rcx	# %rip
-	xorl %eax,%eax
-	leaq early_idt_msg(%rip),%rdi
-	call early_printk
-	cmpl $2,early_recursion_flag(%rip)
-	jz  1f
-	call dump_stack
-#ifdef CONFIG_KALLSYMS	
-	leaq early_idt_ripmsg(%rip),%rdi
-	movq 40(%rsp),%rsi	# %rip again
-	call __print_symbol
-#endif
-#endif /* EARLY_PRINTK */
-1:	hlt
-	jmp 1b
-
-20:	# Exception table entry found or page table generated
-	popq %r11
-	popq %r10
-	popq %r9
-	popq %r8
-	popq %rdi
-	popq %rsi
-	popq %rdx
-	popq %rcx
-	popq %rax
+20:
 	decl early_recursion_flag(%rip)
-.Lis_nmi:
-	addq $16,%rsp		# drop vector number and error code
-	INTERRUPT_RETURN
+	jmp restore_regs_and_iret
 ENDPROC(early_idt_handler_common)
 
 	__INITDATA
 
 	.balign 4
-early_recursion_flag:
+GLOBAL(early_recursion_flag)
 	.long 0
 
-#ifdef CONFIG_EARLY_PRINTK
-early_idt_msg:
-	.asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
-early_idt_ripmsg:
-	.asciz "RIP %s\n"
-#endif /* CONFIG_EARLY_PRINTK */
-
 #define NEXT_PAGE(name) \
 	.balign	PAGE_SIZE; \
 GLOBAL(name)
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index a1f0e4a..f112af7 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -54,7 +54,7 @@
 	char				name[10];
 };
 
-inline struct hpet_dev *EVT_TO_HPET_DEV(struct clock_event_device *evtdev)
+static inline struct hpet_dev *EVT_TO_HPET_DEV(struct clock_event_device *evtdev)
 {
 	return container_of(evtdev, struct hpet_dev, evt);
 }
@@ -773,7 +773,6 @@
 	.mask		= HPET_MASK,
 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
 	.resume		= hpet_resume_counter,
-	.archdata	= { .vclock_mode = VCLOCK_HPET },
 };
 
 static int hpet_clocksource_register(void)
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index e565e0e..fc25f69 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -13,6 +13,7 @@
 #include <linux/cpu.h>
 #include <asm/kprobes.h>
 #include <asm/alternative.h>
+#include <asm/text-patching.h>
 
 #ifdef HAVE_JUMP_LABEL
 
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 2af478e..f2356bd 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -19,8 +19,7 @@
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/efi.h>
-#include <linux/verify_pefile.h>
-#include <keys/system_keyring.h>
+#include <linux/verification.h>
 
 #include <asm/bootparam.h>
 #include <asm/setup.h>
@@ -529,18 +528,9 @@
 #ifdef CONFIG_KEXEC_BZIMAGE_VERIFY_SIG
 static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len)
 {
-	bool trusted;
-	int ret;
-
-	ret = verify_pefile_signature(kernel, kernel_len,
-				      system_trusted_keyring,
-				      VERIFYING_KEXEC_PE_SIGNATURE,
-				      &trusted);
-	if (ret < 0)
-		return ret;
-	if (!trusted)
-		return -EKEYREJECTED;
-	return 0;
+	return verify_pefile_signature(kernel, kernel_len,
+				       NULL,
+				       VERIFYING_KEXEC_PE_SIGNATURE);
 }
 #endif
 
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 2da6ee9..04cde52 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -45,6 +45,7 @@
 #include <linux/uaccess.h>
 #include <linux/memory.h>
 
+#include <asm/text-patching.h>
 #include <asm/debugreg.h>
 #include <asm/apicdef.h>
 #include <asm/apic.h>
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index ae703ac..38cf7a7 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -51,6 +51,7 @@
 #include <linux/ftrace.h>
 #include <linux/frame.h>
 
+#include <asm/text-patching.h>
 #include <asm/cacheflush.h>
 #include <asm/desc.h>
 #include <asm/pgtable.h>
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 7b3b9d1..4425f59 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -29,6 +29,7 @@
 #include <linux/kallsyms.h>
 #include <linux/ftrace.h>
 
+#include <asm/text-patching.h>
 #include <asm/cacheflush.h>
 #include <asm/desc.h>
 #include <asm/pgtable.h>
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 8079508..eea2a6f 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -285,14 +285,6 @@
 {
 	pv_info.name = "KVM";
 
-	/*
-	 * KVM isn't paravirt in the sense of paravirt_enabled.  A KVM
-	 * guest kernel works like a bare metal kernel with additional
-	 * features, and paravirt_enabled is about features that are
-	 * missing.
-	 */
-	pv_info.paravirt_enabled = 0;
-
 	if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
 		pv_cpu_ops.io_delay = kvm_io_delay;
 
@@ -522,7 +514,7 @@
 	if (boot_cpu_data.cpuid_level < 0)
 		return 0;	/* So we don't blow up on old processors */
 
-	if (cpu_has_hypervisor)
+	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
 		return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
 
 	return 0;
diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c
deleted file mode 100644
index 92fc1a5..0000000
--- a/arch/x86/kernel/livepatch.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * livepatch.c - x86-specific Kernel Live Patching Core
- *
- * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
- * Copyright (C) 2014 SUSE
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/module.h>
-#include <linux/uaccess.h>
-#include <asm/elf.h>
-#include <asm/livepatch.h>
-
-/**
- * klp_write_module_reloc() - write a relocation in a module
- * @mod:	module in which the section to be modified is found
- * @type:	ELF relocation type (see asm/elf.h)
- * @loc:	address that the relocation should be written to
- * @value:	relocation value (sym address + addend)
- *
- * This function writes a relocation to the specified location for
- * a particular module.
- */
-int klp_write_module_reloc(struct module *mod, unsigned long type,
-			   unsigned long loc, unsigned long value)
-{
-	size_t size = 4;
-	unsigned long val;
-	unsigned long core = (unsigned long)mod->core_layout.base;
-	unsigned long core_size = mod->core_layout.size;
-
-	switch (type) {
-	case R_X86_64_NONE:
-		return 0;
-	case R_X86_64_64:
-		val = value;
-		size = 8;
-		break;
-	case R_X86_64_32:
-		val = (u32)value;
-		break;
-	case R_X86_64_32S:
-		val = (s32)value;
-		break;
-	case R_X86_64_PC32:
-		val = (u32)(value - loc);
-		break;
-	default:
-		/* unsupported relocation type */
-		return -EINVAL;
-	}
-
-	if (loc < core || loc >= core + core_size)
-		/* loc does not point to any symbol inside the module */
-		return -EINVAL;
-
-	return probe_kernel_write((void *)loc, &val, size);
-}
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 005c03e..477ae80 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -31,6 +31,7 @@
 #include <linux/jump_label.h>
 #include <linux/random.h>
 
+#include <asm/text-patching.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/setup.h>
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index f08ac28..7b3b3f2 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -294,7 +294,6 @@
 
 struct pv_info pv_info = {
 	.name = "bare hardware",
-	.paravirt_enabled = 0,
 	.kernel_rpl = 0,
 	.shared_kernel_pmd = 1,	/* Only used when CONFIG_X86_PAE is set */
 
@@ -339,8 +338,10 @@
 	.write_cr8 = native_write_cr8,
 #endif
 	.wbinvd = native_wbinvd,
-	.read_msr = native_read_msr_safe,
-	.write_msr = native_write_msr_safe,
+	.read_msr = native_read_msr,
+	.write_msr = native_write_msr,
+	.read_msr_safe = native_read_msr_safe,
+	.write_msr_safe = native_write_msr_safe,
 	.read_pmc = native_read_pmc,
 	.load_tr_desc = native_load_tr_desc,
 	.set_ldt = native_set_ldt,
diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
index 35ccf75..f712dfd 100644
--- a/arch/x86/kernel/pci-iommu_table.c
+++ b/arch/x86/kernel/pci-iommu_table.c
@@ -72,7 +72,7 @@
 	}
 }
 #else
-inline void check_iommu_entries(struct iommu_table_entry *start,
+void __init check_iommu_entries(struct iommu_table_entry *start,
 				       struct iommu_table_entry *finish)
 {
 }
diff --git a/arch/x86/kernel/platform-quirks.c b/arch/x86/kernel/platform-quirks.c
new file mode 100644
index 0000000..b2f8a33
--- /dev/null
+++ b/arch/x86/kernel/platform-quirks.c
@@ -0,0 +1,35 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/setup.h>
+#include <asm/bios_ebda.h>
+
+void __init x86_early_init_platform_quirks(void)
+{
+	x86_platform.legacy.rtc = 1;
+	x86_platform.legacy.ebda_search = 0;
+	x86_platform.legacy.devices.pnpbios = 1;
+
+	switch (boot_params.hdr.hardware_subarch) {
+	case X86_SUBARCH_PC:
+		x86_platform.legacy.ebda_search = 1;
+		break;
+	case X86_SUBARCH_XEN:
+	case X86_SUBARCH_LGUEST:
+	case X86_SUBARCH_INTEL_MID:
+	case X86_SUBARCH_CE4100:
+		x86_platform.legacy.devices.pnpbios = 0;
+		x86_platform.legacy.rtc = 0;
+		break;
+	}
+
+	if (x86_platform.set_legacy_features)
+		x86_platform.set_legacy_features();
+}
+
+#if defined(CONFIG_PNPBIOS)
+bool __init arch_pnpbios_disabled(void)
+{
+	return x86_platform.legacy.devices.pnpbios == 0;
+}
+#endif
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 6cbab31..6b16c36 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -136,25 +136,6 @@
 	}
 }
 
-static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
-{
-	struct user_desc ud = {
-		.base_addr = addr,
-		.limit = 0xfffff,
-		.seg_32bit = 1,
-		.limit_in_pages = 1,
-		.useable = 1,
-	};
-	struct desc_struct *desc = t->thread.tls_array;
-	desc += tls;
-	fill_ldt(desc, &ud);
-}
-
-static inline u32 read_32bit_tls(struct task_struct *t, int tls)
-{
-	return get_desc_base(&t->thread.tls_array[tls]);
-}
-
 int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
 		unsigned long arg, struct task_struct *p, unsigned long tls)
 {
@@ -169,9 +150,9 @@
 	p->thread.io_bitmap_ptr = NULL;
 
 	savesegment(gs, p->thread.gsindex);
-	p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
+	p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
 	savesegment(fs, p->thread.fsindex);
-	p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
+	p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
 	savesegment(es, p->thread.es);
 	savesegment(ds, p->thread.ds);
 	memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
@@ -210,7 +191,7 @@
 	 */
 	if (clone_flags & CLONE_SETTLS) {
 #ifdef CONFIG_IA32_EMULATION
-		if (is_ia32_task())
+		if (in_ia32_syscall())
 			err = do_set_thread_area(p, -1,
 				(struct user_desc __user *)tls, 0);
 		else
@@ -282,7 +263,7 @@
 	struct fpu *next_fpu = &next->fpu;
 	int cpu = smp_processor_id();
 	struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
-	unsigned fsindex, gsindex;
+	unsigned prev_fsindex, prev_gsindex;
 	fpu_switch_t fpu_switch;
 
 	fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
@@ -292,8 +273,8 @@
 	 *
 	 * (e.g. xen_load_tls())
 	 */
-	savesegment(fs, fsindex);
-	savesegment(gs, gsindex);
+	savesegment(fs, prev_fsindex);
+	savesegment(gs, prev_gsindex);
 
 	/*
 	 * Load TLS before restoring any segments so that segment loads
@@ -336,66 +317,104 @@
 	 * Switch FS and GS.
 	 *
 	 * These are even more complicated than DS and ES: they have
-	 * 64-bit bases are that controlled by arch_prctl.  Those bases
-	 * only differ from the values in the GDT or LDT if the selector
-	 * is 0.
+	 * 64-bit bases are that controlled by arch_prctl.  The bases
+	 * don't necessarily match the selectors, as user code can do
+	 * any number of things to cause them to be inconsistent.
 	 *
-	 * Loading the segment register resets the hidden base part of
-	 * the register to 0 or the value from the GDT / LDT.  If the
-	 * next base address zero, writing 0 to the segment register is
-	 * much faster than using wrmsr to explicitly zero the base.
+	 * We don't promise to preserve the bases if the selectors are
+	 * nonzero.  We also don't promise to preserve the base if the
+	 * selector is zero and the base doesn't match whatever was
+	 * most recently passed to ARCH_SET_FS/GS.  (If/when the
+	 * FSGSBASE instructions are enabled, we'll need to offer
+	 * stronger guarantees.)
 	 *
-	 * The thread_struct.fs and thread_struct.gs values are 0
-	 * if the fs and gs bases respectively are not overridden
-	 * from the values implied by fsindex and gsindex.  They
-	 * are nonzero, and store the nonzero base addresses, if
-	 * the bases are overridden.
-	 *
-	 * (fs != 0 && fsindex != 0) || (gs != 0 && gsindex != 0) should
-	 * be impossible.
-	 *
-	 * Therefore we need to reload the segment registers if either
-	 * the old or new selector is nonzero, and we need to override
-	 * the base address if next thread expects it to be overridden.
-	 *
-	 * This code is unnecessarily slow in the case where the old and
-	 * new indexes are zero and the new base is nonzero -- it will
-	 * unnecessarily write 0 to the selector before writing the new
-	 * base address.
-	 *
-	 * Note: This all depends on arch_prctl being the only way that
-	 * user code can override the segment base.  Once wrfsbase and
-	 * wrgsbase are enabled, most of this code will need to change.
+	 * As an invariant,
+	 * (fsbase != 0 && fsindex != 0) || (gsbase != 0 && gsindex != 0) is
+	 * impossible.
 	 */
-	if (unlikely(fsindex | next->fsindex | prev->fs)) {
+	if (next->fsindex) {
+		/* Loading a nonzero value into FS sets the index and base. */
 		loadsegment(fs, next->fsindex);
-
-		/*
-		 * If user code wrote a nonzero value to FS, then it also
-		 * cleared the overridden base address.
-		 *
-		 * XXX: if user code wrote 0 to FS and cleared the base
-		 * address itself, we won't notice and we'll incorrectly
-		 * restore the prior base address next time we reschdule
-		 * the process.
-		 */
-		if (fsindex)
-			prev->fs = 0;
+	} else {
+		if (next->fsbase) {
+			/* Next index is zero but next base is nonzero. */
+			if (prev_fsindex)
+				loadsegment(fs, 0);
+			wrmsrl(MSR_FS_BASE, next->fsbase);
+		} else {
+			/* Next base and index are both zero. */
+			if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
+				/*
+				 * We don't know the previous base and can't
+				 * find out without RDMSR.  Forcibly clear it.
+				 */
+				loadsegment(fs, __USER_DS);
+				loadsegment(fs, 0);
+			} else {
+				/*
+				 * If the previous index is zero and ARCH_SET_FS
+				 * didn't change the base, then the base is
+				 * also zero and we don't need to do anything.
+				 */
+				if (prev->fsbase || prev_fsindex)
+					loadsegment(fs, 0);
+			}
+		}
 	}
-	if (next->fs)
-		wrmsrl(MSR_FS_BASE, next->fs);
-	prev->fsindex = fsindex;
+	/*
+	 * Save the old state and preserve the invariant.
+	 * NB: if prev_fsindex == 0, then we can't reliably learn the base
+	 * without RDMSR because Intel user code can zero it without telling
+	 * us and AMD user code can program any 32-bit value without telling
+	 * us.
+	 */
+	if (prev_fsindex)
+		prev->fsbase = 0;
+	prev->fsindex = prev_fsindex;
 
-	if (unlikely(gsindex | next->gsindex | prev->gs)) {
+	if (next->gsindex) {
+		/* Loading a nonzero value into GS sets the index and base. */
 		load_gs_index(next->gsindex);
-
-		/* This works (and fails) the same way as fsindex above. */
-		if (gsindex)
-			prev->gs = 0;
+	} else {
+		if (next->gsbase) {
+			/* Next index is zero but next base is nonzero. */
+			if (prev_gsindex)
+				load_gs_index(0);
+			wrmsrl(MSR_KERNEL_GS_BASE, next->gsbase);
+		} else {
+			/* Next base and index are both zero. */
+			if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
+				/*
+				 * We don't know the previous base and can't
+				 * find out without RDMSR.  Forcibly clear it.
+				 *
+				 * This contains a pointless SWAPGS pair.
+				 * Fixing it would involve an explicit check
+				 * for Xen or a new pvop.
+				 */
+				load_gs_index(__USER_DS);
+				load_gs_index(0);
+			} else {
+				/*
+				 * If the previous index is zero and ARCH_SET_GS
+				 * didn't change the base, then the base is
+				 * also zero and we don't need to do anything.
+				 */
+				if (prev->gsbase || prev_gsindex)
+					load_gs_index(0);
+			}
+		}
 	}
-	if (next->gs)
-		wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
-	prev->gsindex = gsindex;
+	/*
+	 * Save the old state and preserve the invariant.
+	 * NB: if prev_gsindex == 0, then we can't reliably learn the base
+	 * without RDMSR because Intel user code can zero it without telling
+	 * us and AMD user code can program any 32-bit value without telling
+	 * us.
+	 */
+	if (prev_gsindex)
+		prev->gsbase = 0;
+	prev->gsindex = prev_gsindex;
 
 	switch_fpu_finish(next_fpu, fpu_switch);
 
@@ -516,23 +535,11 @@
 		if (addr >= TASK_SIZE_OF(task))
 			return -EPERM;
 		cpu = get_cpu();
-		/* handle small bases via the GDT because that's faster to
-		   switch. */
-		if (addr <= 0xffffffff) {
-			set_32bit_tls(task, GS_TLS, addr);
-			if (doit) {
-				load_TLS(&task->thread, cpu);
-				load_gs_index(GS_TLS_SEL);
-			}
-			task->thread.gsindex = GS_TLS_SEL;
-			task->thread.gs = 0;
-		} else {
-			task->thread.gsindex = 0;
-			task->thread.gs = addr;
-			if (doit) {
-				load_gs_index(0);
-				ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
-			}
+		task->thread.gsindex = 0;
+		task->thread.gsbase = addr;
+		if (doit) {
+			load_gs_index(0);
+			ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
 		}
 		put_cpu();
 		break;
@@ -542,52 +549,30 @@
 		if (addr >= TASK_SIZE_OF(task))
 			return -EPERM;
 		cpu = get_cpu();
-		/* handle small bases via the GDT because that's faster to
-		   switch. */
-		if (addr <= 0xffffffff) {
-			set_32bit_tls(task, FS_TLS, addr);
-			if (doit) {
-				load_TLS(&task->thread, cpu);
-				loadsegment(fs, FS_TLS_SEL);
-			}
-			task->thread.fsindex = FS_TLS_SEL;
-			task->thread.fs = 0;
-		} else {
-			task->thread.fsindex = 0;
-			task->thread.fs = addr;
-			if (doit) {
-				/* set the selector to 0 to not confuse
-				   __switch_to */
-				loadsegment(fs, 0);
-				ret = wrmsrl_safe(MSR_FS_BASE, addr);
-			}
+		task->thread.fsindex = 0;
+		task->thread.fsbase = addr;
+		if (doit) {
+			/* set the selector to 0 to not confuse __switch_to */
+			loadsegment(fs, 0);
+			ret = wrmsrl_safe(MSR_FS_BASE, addr);
 		}
 		put_cpu();
 		break;
 	case ARCH_GET_FS: {
 		unsigned long base;
-		if (task->thread.fsindex == FS_TLS_SEL)
-			base = read_32bit_tls(task, FS_TLS);
-		else if (doit)
+		if (doit)
 			rdmsrl(MSR_FS_BASE, base);
 		else
-			base = task->thread.fs;
+			base = task->thread.fsbase;
 		ret = put_user(base, (unsigned long __user *)addr);
 		break;
 	}
 	case ARCH_GET_GS: {
 		unsigned long base;
-		unsigned gsindex;
-		if (task->thread.gsindex == GS_TLS_SEL)
-			base = read_32bit_tls(task, GS_TLS);
-		else if (doit) {
-			savesegment(gs, gsindex);
-			if (gsindex)
-				rdmsrl(MSR_KERNEL_GS_BASE, base);
-			else
-				base = task->thread.gs;
-		} else
-			base = task->thread.gs;
+		if (doit)
+			rdmsrl(MSR_KERNEL_GS_BASE, base);
+		else
+			base = task->thread.gsbase;
 		ret = put_user(base, (unsigned long __user *)addr);
 		break;
 	}
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 32e9d9c..e60ef91 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -303,29 +303,11 @@
 
 	switch (offset) {
 	case offsetof(struct user_regs_struct,fs):
-		/*
-		 * If this is setting fs as for normal 64-bit use but
-		 * setting fs_base has implicitly changed it, leave it.
-		 */
-		if ((value == FS_TLS_SEL && task->thread.fsindex == 0 &&
-		     task->thread.fs != 0) ||
-		    (value == 0 && task->thread.fsindex == FS_TLS_SEL &&
-		     task->thread.fs == 0))
-			break;
 		task->thread.fsindex = value;
 		if (task == current)
 			loadsegment(fs, task->thread.fsindex);
 		break;
 	case offsetof(struct user_regs_struct,gs):
-		/*
-		 * If this is setting gs as for normal 64-bit use but
-		 * setting gs_base has implicitly changed it, leave it.
-		 */
-		if ((value == GS_TLS_SEL && task->thread.gsindex == 0 &&
-		     task->thread.gs != 0) ||
-		    (value == 0 && task->thread.gsindex == GS_TLS_SEL &&
-		     task->thread.gs == 0))
-			break;
 		task->thread.gsindex = value;
 		if (task == current)
 			load_gs_index(task->thread.gsindex);
@@ -417,7 +399,7 @@
 		 * to set either thread.fs or thread.fsindex and the
 		 * corresponding GDT slot.
 		 */
-		if (child->thread.fs != value)
+		if (child->thread.fsbase != value)
 			return do_arch_prctl(child, ARCH_SET_FS, value);
 		return 0;
 	case offsetof(struct user_regs_struct,gs_base):
@@ -426,7 +408,7 @@
 		 */
 		if (value >= TASK_SIZE_OF(child))
 			return -EIO;
-		if (child->thread.gs != value)
+		if (child->thread.gsbase != value)
 			return do_arch_prctl(child, ARCH_SET_GS, value);
 		return 0;
 #endif
@@ -453,31 +435,17 @@
 #ifdef CONFIG_X86_64
 	case offsetof(struct user_regs_struct, fs_base): {
 		/*
-		 * do_arch_prctl may have used a GDT slot instead of
-		 * the MSR.  To userland, it appears the same either
-		 * way, except the %fs segment selector might not be 0.
+		 * XXX: This will not behave as expected if called on
+		 * current or if fsindex != 0.
 		 */
-		unsigned int seg = task->thread.fsindex;
-		if (task->thread.fs != 0)
-			return task->thread.fs;
-		if (task == current)
-			asm("movl %%fs,%0" : "=r" (seg));
-		if (seg != FS_TLS_SEL)
-			return 0;
-		return get_desc_base(&task->thread.tls_array[FS_TLS]);
+		return task->thread.fsbase;
 	}
 	case offsetof(struct user_regs_struct, gs_base): {
 		/*
-		 * Exactly the same here as the %fs handling above.
+		 * XXX: This will not behave as expected if called on
+		 * current or if fsindex != 0.
 		 */
-		unsigned int seg = task->thread.gsindex;
-		if (task->thread.gs != 0)
-			return task->thread.gs;
-		if (task == current)
-			asm("movl %%gs,%0" : "=r" (seg));
-		if (seg != GS_TLS_SEL)
-			return 0;
-		return get_desc_base(&task->thread.tls_array[GS_TLS]);
+		return task->thread.gsbase;
 	}
 #endif
 	}
@@ -1266,7 +1234,7 @@
 			compat_ulong_t caddr, compat_ulong_t cdata)
 {
 #ifdef CONFIG_X86_X32_ABI
-	if (!is_ia32_task())
+	if (!in_ia32_syscall())
 		return x32_arch_ptrace(child, request, caddr, cdata);
 #endif
 #ifdef CONFIG_IA32_EMULATION
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index ab0adc0..a9b31eb 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -535,6 +535,15 @@
 	mode = reboot_mode == REBOOT_WARM ? 0x1234 : 0;
 	*((unsigned short *)__va(0x472)) = mode;
 
+	/*
+	 * If an EFI capsule has been registered with the firmware then
+	 * override the reboot= parameter.
+	 */
+	if (efi_capsule_pending(NULL)) {
+		pr_info("EFI capsule is pending, forcing EFI reboot.\n");
+		reboot_type = BOOT_EFI;
+	}
+
 	for (;;) {
 		/* Could also try the reset bit in the Hammer NB */
 		switch (reboot_type) {
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 4af8d06..eceaa08 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -14,6 +14,7 @@
 #include <asm/time.h>
 #include <asm/intel-mid.h>
 #include <asm/rtc.h>
+#include <asm/setup.h>
 
 #ifdef CONFIG_X86_32
 /*
@@ -185,22 +186,7 @@
 		}
 	}
 #endif
-	if (of_have_populated_dt())
-		return 0;
-
-	/* Intel MID platforms don't have ioport rtc */
-	if (intel_mid_identify_cpu())
-		return -ENODEV;
-
-#ifdef CONFIG_ACPI
-	if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_CMOS_RTC) {
-		/* This warning can likely go away again in a year or two. */
-		pr_info("ACPI: not registering RTC platform device\n");
-		return -ENODEV;
-	}
-#endif
-
-	if (paravirt_enabled() && !paravirt_has(RTC))
+	if (!x86_platform.legacy.rtc)
 		return -ENODEV;
 
 	platform_device_register(&rtc_device);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 2367ae0..c4e7b39 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -398,6 +398,11 @@
 
 	memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
 }
+
+static void __init early_initrd_acpi_init(void)
+{
+	early_acpi_table_init((void *)initrd_start, initrd_end - initrd_start);
+}
 #else
 static void __init early_reserve_initrd(void)
 {
@@ -405,6 +410,9 @@
 static void __init reserve_initrd(void)
 {
 }
+static void __init early_initrd_acpi_init(void)
+{
+}
 #endif /* CONFIG_BLK_DEV_INITRD */
 
 static void __init parse_setup_data(void)
@@ -1138,9 +1146,7 @@
 
 	reserve_initrd();
 
-#if defined(CONFIG_ACPI) && defined(CONFIG_BLK_DEV_INITRD)
-	acpi_initrd_override((void *)initrd_start, initrd_end - initrd_start);
-#endif
+	early_initrd_acpi_init();
 
 	vsmp_init();
 
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 548ddf7..22cc2f9 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -248,18 +248,17 @@
 	if (config_enabled(CONFIG_X86_64))
 		sp -= 128;
 
-	if (!onsigstack) {
-		/* This is the X/Open sanctioned signal stack switching.  */
-		if (ka->sa.sa_flags & SA_ONSTACK) {
-			if (current->sas_ss_size)
-				sp = current->sas_ss_sp + current->sas_ss_size;
-		} else if (config_enabled(CONFIG_X86_32) &&
-			   (regs->ss & 0xffff) != __USER_DS &&
-			   !(ka->sa.sa_flags & SA_RESTORER) &&
-			   ka->sa.sa_restorer) {
-				/* This is the legacy signal stack switching. */
-				sp = (unsigned long) ka->sa.sa_restorer;
-		}
+	/* This is the X/Open sanctioned signal stack switching.  */
+	if (ka->sa.sa_flags & SA_ONSTACK) {
+		if (sas_ss_flags(sp) == 0)
+			sp = current->sas_ss_sp + current->sas_ss_size;
+	} else if (config_enabled(CONFIG_X86_32) &&
+		   !onsigstack &&
+		   (regs->ss & 0xffff) != __USER_DS &&
+		   !(ka->sa.sa_flags & SA_RESTORER) &&
+		   ka->sa.sa_restorer) {
+		/* This is the legacy signal stack switching. */
+		sp = (unsigned long) ka->sa.sa_restorer;
 	}
 
 	if (fpu->fpstate_active) {
@@ -391,7 +390,7 @@
 		put_user_ex(&frame->uc, &frame->puc);
 
 		/* Create the ucontext.  */
-		if (cpu_has_xsave)
+		if (boot_cpu_has(X86_FEATURE_XSAVE))
 			put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
 		else
 			put_user_ex(0, &frame->uc.uc_flags);
@@ -442,7 +441,7 @@
 {
 	unsigned long flags;
 
-	if (cpu_has_xsave)
+	if (boot_cpu_has(X86_FEATURE_XSAVE))
 		flags = UC_FP_XSTATE | UC_SIGCONTEXT_SS;
 	else
 		flags = UC_SIGCONTEXT_SS;
@@ -762,7 +761,7 @@
 static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
 {
 #ifdef CONFIG_X86_64
-	if (is_ia32_task())
+	if (in_ia32_syscall())
 		return __NR_ia32_restart_syscall;
 #endif
 #ifdef CONFIG_X86_X32_ABI
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index b2c99f8..fafe8b9 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -332,6 +332,11 @@
 	 * primary cores.
 	 */
 	ncpus = boot_cpu_data.x86_max_cores;
+	if (!ncpus) {
+		pr_warn("x86_max_cores == zero !?!?");
+		ncpus = 1;
+	}
+
 	__max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
 
 	/*
@@ -422,7 +427,7 @@
 
 		if (c->phys_proc_id == o->phys_proc_id &&
 		    per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) &&
-		    c->compute_unit_id == o->compute_unit_id)
+		    c->cpu_core_id == o->cpu_core_id)
 			return topology_sane(c, o, "smt");
 
 	} else if (c->phys_proc_id == o->phys_proc_id &&
@@ -1231,7 +1236,7 @@
 	 * If we couldn't find a local APIC, then get out of here now!
 	 */
 	if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) &&
-	    !cpu_has_apic) {
+	    !boot_cpu_has(X86_FEATURE_APIC)) {
 		if (!disable_apic) {
 			pr_err("BIOS bug, local APIC #%d not detected!...\n",
 				boot_cpu_physical_apicid);
diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c
index b285d4e..623965e 100644
--- a/arch/x86/kernel/sysfb_efi.c
+++ b/arch/x86/kernel/sysfb_efi.c
@@ -68,6 +68,21 @@
 	[M_UNKNOWN] = { NULL, 0, 0, 0, 0, OVERRIDE_NONE }
 };
 
+void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
+{
+	int i;
+
+	for (i = 0; i < M_UNKNOWN; i++) {
+		if (efifb_dmi_list[i].base != 0 &&
+		    !strcmp(opt, efifb_dmi_list[i].optname)) {
+			si->lfb_base = efifb_dmi_list[i].base;
+			si->lfb_linelength = efifb_dmi_list[i].stride;
+			si->lfb_width = efifb_dmi_list[i].width;
+			si->lfb_height = efifb_dmi_list[i].height;
+		}
+	}
+}
+
 #define choose_value(dmivalue, fwvalue, field, flags) ({	\
 		typeof(fwvalue) _ret_ = fwvalue;		\
 		if ((flags) & (field))				\
@@ -106,14 +121,24 @@
 					continue;
 				for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
 					resource_size_t start, end;
+					unsigned long flags;
+
+					flags = pci_resource_flags(dev, i);
+					if (!(flags & IORESOURCE_MEM))
+						continue;
+
+					if (flags & IORESOURCE_UNSET)
+						continue;
+
+					if (pci_resource_len(dev, i) == 0)
+						continue;
 
 					start = pci_resource_start(dev, i);
-					if (start == 0)
-						break;
 					end = pci_resource_end(dev, i);
 					if (screen_info.lfb_base >= start &&
 					    screen_info.lfb_base < end) {
 						found_bar = 1;
+						break;
 					}
 				}
 			}
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index e72a07f..9b0185f 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -74,12 +74,6 @@
 		return;
 	}
 
-	/* only a natively booted kernel should be using TXT */
-	if (paravirt_enabled()) {
-		pr_warning("non-0 tboot_addr but pv_ops is enabled\n");
-		return;
-	}
-
 	/* Map and check for tboot UUID. */
 	set_fixmap(FIX_TBOOT_BASE, boot_params.tboot_addr);
 	tboot = (struct tboot *)fix_to_virt(FIX_TBOOT_BASE);
diff --git a/arch/x86/kernel/tce_64.c b/arch/x86/kernel/tce_64.c
index ab40954..f386bad 100644
--- a/arch/x86/kernel/tce_64.c
+++ b/arch/x86/kernel/tce_64.c
@@ -40,7 +40,7 @@
 static inline void flush_tce(void* tceaddr)
 {
 	/* a single tce can't cross a cache line */
-	if (cpu_has_clflush)
+	if (boot_cpu_has(X86_FEATURE_CLFLUSH))
 		clflush(tceaddr);
 	else
 		wbinvd();
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
index 7fc5e84..9692a5e 100644
--- a/arch/x86/kernel/tls.c
+++ b/arch/x86/kernel/tls.c
@@ -114,6 +114,7 @@
 		       int can_allocate)
 {
 	struct user_desc info;
+	unsigned short __maybe_unused sel, modified_sel;
 
 	if (copy_from_user(&info, u_info, sizeof(info)))
 		return -EFAULT;
@@ -141,6 +142,47 @@
 
 	set_tls_desc(p, idx, &info, 1);
 
+	/*
+	 * If DS, ES, FS, or GS points to the modified segment, forcibly
+	 * refresh it.  Only needed on x86_64 because x86_32 reloads them
+	 * on return to user mode.
+	 */
+	modified_sel = (idx << 3) | 3;
+
+	if (p == current) {
+#ifdef CONFIG_X86_64
+		savesegment(ds, sel);
+		if (sel == modified_sel)
+			loadsegment(ds, sel);
+
+		savesegment(es, sel);
+		if (sel == modified_sel)
+			loadsegment(es, sel);
+
+		savesegment(fs, sel);
+		if (sel == modified_sel)
+			loadsegment(fs, sel);
+
+		savesegment(gs, sel);
+		if (sel == modified_sel)
+			load_gs_index(sel);
+#endif
+
+#ifdef CONFIG_X86_32_LAZY_GS
+		savesegment(gs, sel);
+		if (sel == modified_sel)
+			loadsegment(gs, sel);
+#endif
+	} else {
+#ifdef CONFIG_X86_64
+		if (p->thread.fsindex == modified_sel)
+			p->thread.fsbase = info.base_addr;
+
+		if (p->thread.gsindex == modified_sel)
+			p->thread.gsbase = info.base_addr;
+#endif
+	}
+
 	return 0;
 }
 
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 06cbe25..d159048 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -51,6 +51,7 @@
 #include <asm/processor.h>
 #include <asm/debugreg.h>
 #include <linux/atomic.h>
+#include <asm/text-patching.h>
 #include <asm/ftrace.h>
 #include <asm/traps.h>
 #include <asm/desc.h>
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index c9c4c7c..38ba6de 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -36,7 +36,7 @@
 
 /* native_sched_clock() is called before tsc_init(), so
    we must start with the TSC soft disabled to prevent
-   erroneous rdtsc usage on !cpu_has_tsc processors */
+   erroneous rdtsc usage on !boot_cpu_has(X86_FEATURE_TSC) processors */
 static int __read_mostly tsc_disabled = -1;
 
 static DEFINE_STATIC_KEY_FALSE(__use_tsc);
@@ -834,15 +834,15 @@
 #ifndef CONFIG_SMP
 	unsigned long cpu_khz_old = cpu_khz;
 
-	if (cpu_has_tsc) {
-		tsc_khz = x86_platform.calibrate_tsc();
-		cpu_khz = tsc_khz;
-		cpu_data(0).loops_per_jiffy =
-			cpufreq_scale(cpu_data(0).loops_per_jiffy,
-					cpu_khz_old, cpu_khz);
-		return 0;
-	} else
+	if (!boot_cpu_has(X86_FEATURE_TSC))
 		return -ENODEV;
+
+	tsc_khz = x86_platform.calibrate_tsc();
+	cpu_khz = tsc_khz;
+	cpu_data(0).loops_per_jiffy = cpufreq_scale(cpu_data(0).loops_per_jiffy,
+						    cpu_khz_old, cpu_khz);
+
+	return 0;
 #else
 	return -ENODEV;
 #endif
@@ -922,9 +922,6 @@
 	struct cpufreq_freqs *freq = data;
 	unsigned long *lpj;
 
-	if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC))
-		return 0;
-
 	lpj = &boot_cpu_data.loops_per_jiffy;
 #ifdef CONFIG_SMP
 	if (!(freq->flags & CPUFREQ_CONST_LOOPS))
@@ -954,9 +951,9 @@
 	.notifier_call  = time_cpufreq_notifier
 };
 
-static int __init cpufreq_tsc(void)
+static int __init cpufreq_register_tsc_scaling(void)
 {
-	if (!cpu_has_tsc)
+	if (!boot_cpu_has(X86_FEATURE_TSC))
 		return 0;
 	if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
 		return 0;
@@ -965,7 +962,7 @@
 	return 0;
 }
 
-core_initcall(cpufreq_tsc);
+core_initcall(cpufreq_register_tsc_scaling);
 
 #endif /* CONFIG_CPU_FREQ */
 
@@ -1081,7 +1078,7 @@
  */
 int unsynchronized_tsc(void)
 {
-	if (!cpu_has_tsc || tsc_unstable)
+	if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_unstable)
 		return 1;
 
 #ifdef CONFIG_SMP
@@ -1205,7 +1202,7 @@
 
 static int __init init_tsc_clocksource(void)
 {
-	if (!cpu_has_tsc || tsc_disabled > 0 || !tsc_khz)
+	if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_disabled > 0 || !tsc_khz)
 		return 0;
 
 	if (tsc_clocksource_reliable)
@@ -1242,7 +1239,7 @@
 	u64 lpj;
 	int cpu;
 
-	if (!cpu_has_tsc) {
+	if (!boot_cpu_has(X86_FEATURE_TSC)) {
 		setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
 		return;
 	}
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
index 92ae6ac..6aa0f4d 100644
--- a/arch/x86/kernel/tsc_msr.c
+++ b/arch/x86/kernel/tsc_msr.c
@@ -92,7 +92,7 @@
 
 	if (freq_desc_tables[cpu_index].msr_plat) {
 		rdmsr(MSR_PLATFORM_INFO, lo, hi);
-		ratio = (lo >> 8) & 0x1f;
+		ratio = (lo >> 8) & 0xff;
 	} else {
 		rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
 		ratio = (hi >> 8) & 0x1f;
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index bf4db6e..6c1ff31 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -516,7 +516,7 @@
 
 static inline int sizeof_long(void)
 {
-	return is_ia32_task() ? 4 : 8;
+	return in_ia32_syscall() ? 4 : 8;
 }
 
 static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
@@ -578,7 +578,7 @@
 	riprel_post_xol(auprobe, regs);
 }
 
-static struct uprobe_xol_ops default_xol_ops = {
+static const struct uprobe_xol_ops default_xol_ops = {
 	.pre_xol  = default_pre_xol_op,
 	.post_xol = default_post_xol_op,
 	.abort	  = default_abort_op,
@@ -695,7 +695,7 @@
 		0, insn->immediate.nbytes);
 }
 
-static struct uprobe_xol_ops branch_xol_ops = {
+static const struct uprobe_xol_ops branch_xol_ops = {
 	.emulate  = branch_emulate_op,
 	.post_xol = branch_post_xol_op,
 };
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 4c941f8..9297a00 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -334,7 +334,7 @@
 		__brk_limit = .;
 	}
 
-	. = ALIGN(PAGE_SIZE);
+	. = ALIGN(PAGE_SIZE);		/* keep VO_INIT_SIZE page aligned */
 	_end = .;
 
         STABS_DEBUG
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 8efb839..769af90 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -75,7 +75,7 @@
 		return 0;
 
 	/* Update OSXSAVE bit */
-	if (cpu_has_xsave && best->function == 0x1) {
+	if (boot_cpu_has(X86_FEATURE_XSAVE) && best->function == 0x1) {
 		best->ecx &= ~F(OSXSAVE);
 		if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
 			best->ecx |= F(OSXSAVE);
@@ -534,6 +534,7 @@
 			do_cpuid_1_ent(&entry[i], function, idx);
 			if (idx == 1) {
 				entry[i].eax &= kvm_cpuid_D_1_eax_x86_features;
+				cpuid_mask(&entry[i].eax, CPUID_D_1_EAX);
 				entry[i].ebx = 0;
 				if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
 					entry[i].ebx =
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 0f62943..a2f24af 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -5110,13 +5110,17 @@
 
 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
 {
+	register void *__sp asm(_ASM_SP);
 	ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
+
 	if (!(ctxt->d & ByteOp))
 		fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
+
 	asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
 	    : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
-	      [fastop]"+S"(fop)
+	      [fastop]"+S"(fop), "+r"(__sp)
 	    : "c"(ctxt->src2.val));
+
 	ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
 	if (!fop) /* exception is returned in fop variable */
 		return emulate_de(ctxt);
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 5ff3485..01bd7b7 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1116,6 +1116,11 @@
 		break;
 	case HVCALL_POST_MESSAGE:
 	case HVCALL_SIGNAL_EVENT:
+		/* don't bother userspace if it has no way to handle it */
+		if (!vcpu_to_synic(vcpu)->active) {
+			res = HV_STATUS_INVALID_HYPERCALL_CODE;
+			break;
+		}
 		vcpu->run->exit_reason = KVM_EXIT_HYPERV;
 		vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
 		vcpu->run->hyperv.u.hcall.input = param;
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 9db47090..5f42d03 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -443,7 +443,7 @@
 		spin_lock(&ioapic->lock);
 
 		if (trigger_mode != IOAPIC_LEVEL_TRIG ||
-		    kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
+		    kvm_lapic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
 			continue;
 
 		ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
diff --git a/arch/x86/kvm/iommu.c b/arch/x86/kvm/iommu.c
index a22a488..3069281 100644
--- a/arch/x86/kvm/iommu.c
+++ b/arch/x86/kvm/iommu.c
@@ -254,7 +254,7 @@
 	    !iommu_capable(&pci_bus_type, IOMMU_CAP_INTR_REMAP)) {
 		printk(KERN_WARNING "%s: No interrupt remapping support,"
 		       " disallowing device assignment."
-		       " Re-enble with \"allow_unsafe_assigned_interrupts=1\""
+		       " Re-enable with \"allow_unsafe_assigned_interrupts=1\""
 		       " module option.\n", __func__);
 		iommu_domain_free(kvm->arch.iommu_domain);
 		kvm->arch.iommu_domain = NULL;
diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
index 54ead79..dfb4c64 100644
--- a/arch/x86/kvm/irq_comm.c
+++ b/arch/x86/kvm/irq_comm.c
@@ -382,9 +382,6 @@
 	u32 i, nr_ioapic_pins;
 	int idx;
 
-	/* kvm->irq_routing must be read after clearing
-	 * KVM_SCAN_IOAPIC. */
-	smp_mb();
 	idx = srcu_read_lock(&kvm->irq_srcu);
 	table = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
 	nr_ioapic_pins = min_t(u32, table->nr_rt_entries,
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 443d2a5..bbb5b28 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -59,9 +59,8 @@
 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
 #define apic_debug(fmt, arg...)
 
-#define APIC_LVT_NUM			6
 /* 14 is the version for Xeon and Pentium 8.4.8*/
-#define APIC_VERSION			(0x14UL | ((APIC_LVT_NUM - 1) << 16))
+#define APIC_VERSION			(0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
 #define LAPIC_MMIO_LENGTH		(1 << 12)
 /* followed define is not in apicdef.h */
 #define APIC_SHORT_MASK			0xc0000
@@ -73,14 +72,6 @@
 #define APIC_BROADCAST			0xFF
 #define X2APIC_BROADCAST		0xFFFFFFFFul
 
-#define VEC_POS(v) ((v) & (32 - 1))
-#define REG_POS(v) (((v) >> 5) << 4)
-
-static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
-{
-	*((u32 *) (apic->regs + reg_off)) = val;
-}
-
 static inline int apic_test_vector(int vec, void *bitmap)
 {
 	return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
@@ -94,11 +85,6 @@
 		apic_test_vector(vector, apic->regs + APIC_IRR);
 }
 
-static inline void apic_set_vector(int vec, void *bitmap)
-{
-	set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
-}
-
 static inline void apic_clear_vector(int vec, void *bitmap)
 {
 	clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
@@ -173,7 +159,7 @@
 			continue;
 
 		aid = kvm_apic_id(apic);
-		ldr = kvm_apic_get_reg(apic, APIC_LDR);
+		ldr = kvm_lapic_get_reg(apic, APIC_LDR);
 
 		if (aid < ARRAY_SIZE(new->phys_map))
 			new->phys_map[aid] = apic;
@@ -182,7 +168,7 @@
 			new->mode |= KVM_APIC_MODE_X2APIC;
 		} else if (ldr) {
 			ldr = GET_APIC_LOGICAL_ID(ldr);
-			if (kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
+			if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
 				new->mode |= KVM_APIC_MODE_XAPIC_FLAT;
 			else
 				new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
@@ -212,7 +198,7 @@
 {
 	bool enabled = val & APIC_SPIV_APIC_ENABLED;
 
-	apic_set_reg(apic, APIC_SPIV, val);
+	kvm_lapic_set_reg(apic, APIC_SPIV, val);
 
 	if (enabled != apic->sw_enabled) {
 		apic->sw_enabled = enabled;
@@ -226,13 +212,13 @@
 
 static inline void kvm_apic_set_id(struct kvm_lapic *apic, u8 id)
 {
-	apic_set_reg(apic, APIC_ID, id << 24);
+	kvm_lapic_set_reg(apic, APIC_ID, id << 24);
 	recalculate_apic_map(apic->vcpu->kvm);
 }
 
 static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
 {
-	apic_set_reg(apic, APIC_LDR, id);
+	kvm_lapic_set_reg(apic, APIC_LDR, id);
 	recalculate_apic_map(apic->vcpu->kvm);
 }
 
@@ -240,19 +226,19 @@
 {
 	u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf));
 
-	apic_set_reg(apic, APIC_ID, id << 24);
-	apic_set_reg(apic, APIC_LDR, ldr);
+	kvm_lapic_set_reg(apic, APIC_ID, id << 24);
+	kvm_lapic_set_reg(apic, APIC_LDR, ldr);
 	recalculate_apic_map(apic->vcpu->kvm);
 }
 
 static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
 {
-	return !(kvm_apic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
+	return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
 }
 
 static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type)
 {
-	return kvm_apic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK;
+	return kvm_lapic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK;
 }
 
 static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
@@ -287,10 +273,10 @@
 	feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
 	if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))))
 		v |= APIC_LVR_DIRECTED_EOI;
-	apic_set_reg(apic, APIC_LVR, v);
+	kvm_lapic_set_reg(apic, APIC_LVR, v);
 }
 
-static const unsigned int apic_lvt_mask[APIC_LVT_NUM] = {
+static const unsigned int apic_lvt_mask[KVM_APIC_LVT_NUM] = {
 	LVT_MASK ,      /* part LVTT mask, timer mode mask added at runtime */
 	LVT_MASK | APIC_MODE_MASK,	/* LVTTHMR */
 	LVT_MASK | APIC_MODE_MASK,	/* LVTPC */
@@ -349,16 +335,6 @@
 }
 EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
 
-static inline void apic_set_irr(int vec, struct kvm_lapic *apic)
-{
-	apic_set_vector(vec, apic->regs + APIC_IRR);
-	/*
-	 * irr_pending must be true if any interrupt is pending; set it after
-	 * APIC_IRR to avoid race with apic_clear_irr
-	 */
-	apic->irr_pending = true;
-}
-
 static inline int apic_search_irr(struct kvm_lapic *apic)
 {
 	return find_highest_vector(apic->regs + APIC_IRR);
@@ -416,7 +392,7 @@
 	 * just set SVI.
 	 */
 	if (unlikely(vcpu->arch.apicv_active))
-		kvm_x86_ops->hwapic_isr_update(vcpu->kvm, vec);
+		kvm_x86_ops->hwapic_isr_update(vcpu, vec);
 	else {
 		++apic->isr_count;
 		BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
@@ -464,7 +440,7 @@
 	 * and must be left alone.
 	 */
 	if (unlikely(vcpu->arch.apicv_active))
-		kvm_x86_ops->hwapic_isr_update(vcpu->kvm,
+		kvm_x86_ops->hwapic_isr_update(vcpu,
 					       apic_find_highest_isr(apic));
 	else {
 		--apic->isr_count;
@@ -549,8 +525,8 @@
 	u32 tpr, isrv, ppr, old_ppr;
 	int isr;
 
-	old_ppr = kvm_apic_get_reg(apic, APIC_PROCPRI);
-	tpr = kvm_apic_get_reg(apic, APIC_TASKPRI);
+	old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
+	tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
 	isr = apic_find_highest_isr(apic);
 	isrv = (isr != -1) ? isr : 0;
 
@@ -563,7 +539,7 @@
 		   apic, ppr, isr, isrv);
 
 	if (old_ppr != ppr) {
-		apic_set_reg(apic, APIC_PROCPRI, ppr);
+		kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
 		if (ppr < old_ppr)
 			kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
 	}
@@ -571,7 +547,7 @@
 
 static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
 {
-	apic_set_reg(apic, APIC_TASKPRI, tpr);
+	kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
 	apic_update_ppr(apic);
 }
 
@@ -601,7 +577,7 @@
 	if (kvm_apic_broadcast(apic, mda))
 		return true;
 
-	logical_id = kvm_apic_get_reg(apic, APIC_LDR);
+	logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
 
 	if (apic_x2apic_mode(apic))
 		return ((logical_id >> 16) == (mda >> 16))
@@ -610,7 +586,7 @@
 	logical_id = GET_APIC_LOGICAL_ID(logical_id);
 	mda = GET_APIC_DEST_FIELD(mda);
 
-	switch (kvm_apic_get_reg(apic, APIC_DFR)) {
+	switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
 	case APIC_DFR_FLAT:
 		return (logical_id & mda) != 0;
 	case APIC_DFR_CLUSTER:
@@ -618,7 +594,7 @@
 		       && (logical_id & mda & 0xf) != 0;
 	default:
 		apic_debug("Bad DFR vcpu %d: %08x\n",
-			   apic->vcpu->vcpu_id, kvm_apic_get_reg(apic, APIC_DFR));
+			   apic->vcpu->vcpu_id, kvm_lapic_get_reg(apic, APIC_DFR));
 		return false;
 	}
 }
@@ -668,6 +644,7 @@
 		return false;
 	}
 }
+EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
 
 int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
 		       const unsigned long *bitmap, u32 bitmap_size)
@@ -921,7 +898,7 @@
 
 		if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
 			if (trig_mode)
-				apic_set_vector(vector, apic->regs + APIC_TMR);
+				kvm_lapic_set_vector(vector, apic->regs + APIC_TMR);
 			else
 				apic_clear_vector(vector, apic->regs + APIC_TMR);
 		}
@@ -929,7 +906,7 @@
 		if (vcpu->arch.apicv_active)
 			kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
 		else {
-			apic_set_irr(vector, apic);
+			kvm_lapic_set_irr(vector, apic);
 
 			kvm_make_request(KVM_REQ_EVENT, vcpu);
 			kvm_vcpu_kick(vcpu);
@@ -1073,8 +1050,8 @@
 
 static void apic_send_ipi(struct kvm_lapic *apic)
 {
-	u32 icr_low = kvm_apic_get_reg(apic, APIC_ICR);
-	u32 icr_high = kvm_apic_get_reg(apic, APIC_ICR2);
+	u32 icr_low = kvm_lapic_get_reg(apic, APIC_ICR);
+	u32 icr_high = kvm_lapic_get_reg(apic, APIC_ICR2);
 	struct kvm_lapic_irq irq;
 
 	irq.vector = icr_low & APIC_VECTOR_MASK;
@@ -1111,7 +1088,7 @@
 	ASSERT(apic != NULL);
 
 	/* if initial count is 0, current count should also be 0 */
-	if (kvm_apic_get_reg(apic, APIC_TMICT) == 0 ||
+	if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
 		apic->lapic_timer.period == 0)
 		return 0;
 
@@ -1168,13 +1145,13 @@
 		break;
 	case APIC_PROCPRI:
 		apic_update_ppr(apic);
-		val = kvm_apic_get_reg(apic, offset);
+		val = kvm_lapic_get_reg(apic, offset);
 		break;
 	case APIC_TASKPRI:
 		report_tpr_access(apic, false);
 		/* fall thru */
 	default:
-		val = kvm_apic_get_reg(apic, offset);
+		val = kvm_lapic_get_reg(apic, offset);
 		break;
 	}
 
@@ -1186,7 +1163,7 @@
 	return container_of(dev, struct kvm_lapic, dev);
 }
 
-static int apic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
+int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
 		void *data)
 {
 	unsigned char alignment = offset & 0xf;
@@ -1223,6 +1200,7 @@
 	}
 	return 0;
 }
+EXPORT_SYMBOL_GPL(kvm_lapic_reg_read);
 
 static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
 {
@@ -1240,7 +1218,7 @@
 	if (!apic_mmio_in_range(apic, address))
 		return -EOPNOTSUPP;
 
-	apic_reg_read(apic, offset, len, data);
+	kvm_lapic_reg_read(apic, offset, len, data);
 
 	return 0;
 }
@@ -1249,7 +1227,7 @@
 {
 	u32 tmp1, tmp2, tdcr;
 
-	tdcr = kvm_apic_get_reg(apic, APIC_TDCR);
+	tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
 	tmp1 = tdcr & 0xf;
 	tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
 	apic->divide_count = 0x1 << (tmp2 & 0x7);
@@ -1260,7 +1238,7 @@
 
 static void apic_update_lvtt(struct kvm_lapic *apic)
 {
-	u32 timer_mode = kvm_apic_get_reg(apic, APIC_LVTT) &
+	u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
 			apic->lapic_timer.timer_mode_mask;
 
 	if (apic->lapic_timer.timer_mode != timer_mode) {
@@ -1296,7 +1274,7 @@
 static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
 {
 	struct kvm_lapic *apic = vcpu->arch.apic;
-	u32 reg = kvm_apic_get_reg(apic, APIC_LVTT);
+	u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
 
 	if (kvm_apic_hw_enabled(apic)) {
 		int vec = reg & APIC_VECTOR_MASK;
@@ -1344,7 +1322,7 @@
 	if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
 		/* lapic timer in oneshot or periodic mode */
 		now = apic->lapic_timer.timer.base->get_time();
-		apic->lapic_timer.period = (u64)kvm_apic_get_reg(apic, APIC_TMICT)
+		apic->lapic_timer.period = (u64)kvm_lapic_get_reg(apic, APIC_TMICT)
 			    * APIC_BUS_CYCLE_NS * apic->divide_count;
 
 		if (!apic->lapic_timer.period)
@@ -1369,14 +1347,14 @@
 
 		hrtimer_start(&apic->lapic_timer.timer,
 			      ktime_add_ns(now, apic->lapic_timer.period),
-			      HRTIMER_MODE_ABS);
+			      HRTIMER_MODE_ABS_PINNED);
 
 		apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
 			   PRIx64 ", "
 			   "timer initial count 0x%x, period %lldns, "
 			   "expire @ 0x%016" PRIx64 ".\n", __func__,
 			   APIC_BUS_CYCLE_NS, ktime_to_ns(now),
-			   kvm_apic_get_reg(apic, APIC_TMICT),
+			   kvm_lapic_get_reg(apic, APIC_TMICT),
 			   apic->lapic_timer.period,
 			   ktime_to_ns(ktime_add_ns(now,
 					apic->lapic_timer.period)));
@@ -1402,7 +1380,7 @@
 			expire = ktime_add_ns(now, ns);
 			expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
 			hrtimer_start(&apic->lapic_timer.timer,
-				      expire, HRTIMER_MODE_ABS);
+				      expire, HRTIMER_MODE_ABS_PINNED);
 		} else
 			apic_timer_expired(apic);
 
@@ -1425,7 +1403,7 @@
 	}
 }
 
-static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
+int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
 {
 	int ret = 0;
 
@@ -1457,7 +1435,7 @@
 
 	case APIC_DFR:
 		if (!apic_x2apic_mode(apic)) {
-			apic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
+			kvm_lapic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
 			recalculate_apic_map(apic->vcpu->kvm);
 		} else
 			ret = 1;
@@ -1465,17 +1443,17 @@
 
 	case APIC_SPIV: {
 		u32 mask = 0x3ff;
-		if (kvm_apic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
+		if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
 			mask |= APIC_SPIV_DIRECTED_EOI;
 		apic_set_spiv(apic, val & mask);
 		if (!(val & APIC_SPIV_APIC_ENABLED)) {
 			int i;
 			u32 lvt_val;
 
-			for (i = 0; i < APIC_LVT_NUM; i++) {
-				lvt_val = kvm_apic_get_reg(apic,
+			for (i = 0; i < KVM_APIC_LVT_NUM; i++) {
+				lvt_val = kvm_lapic_get_reg(apic,
 						       APIC_LVTT + 0x10 * i);
-				apic_set_reg(apic, APIC_LVTT + 0x10 * i,
+				kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i,
 					     lvt_val | APIC_LVT_MASKED);
 			}
 			apic_update_lvtt(apic);
@@ -1486,14 +1464,14 @@
 	}
 	case APIC_ICR:
 		/* No delay here, so we always clear the pending bit */
-		apic_set_reg(apic, APIC_ICR, val & ~(1 << 12));
+		kvm_lapic_set_reg(apic, APIC_ICR, val & ~(1 << 12));
 		apic_send_ipi(apic);
 		break;
 
 	case APIC_ICR2:
 		if (!apic_x2apic_mode(apic))
 			val &= 0xff000000;
-		apic_set_reg(apic, APIC_ICR2, val);
+		kvm_lapic_set_reg(apic, APIC_ICR2, val);
 		break;
 
 	case APIC_LVT0:
@@ -1507,7 +1485,7 @@
 			val |= APIC_LVT_MASKED;
 
 		val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4];
-		apic_set_reg(apic, reg, val);
+		kvm_lapic_set_reg(apic, reg, val);
 
 		break;
 
@@ -1515,7 +1493,7 @@
 		if (!kvm_apic_sw_enabled(apic))
 			val |= APIC_LVT_MASKED;
 		val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
-		apic_set_reg(apic, APIC_LVTT, val);
+		kvm_lapic_set_reg(apic, APIC_LVTT, val);
 		apic_update_lvtt(apic);
 		break;
 
@@ -1524,14 +1502,14 @@
 			break;
 
 		hrtimer_cancel(&apic->lapic_timer.timer);
-		apic_set_reg(apic, APIC_TMICT, val);
+		kvm_lapic_set_reg(apic, APIC_TMICT, val);
 		start_apic_timer(apic);
 		break;
 
 	case APIC_TDCR:
 		if (val & 4)
 			apic_debug("KVM_WRITE:TDCR %x\n", val);
-		apic_set_reg(apic, APIC_TDCR, val);
+		kvm_lapic_set_reg(apic, APIC_TDCR, val);
 		update_divide_count(apic);
 		break;
 
@@ -1544,7 +1522,7 @@
 
 	case APIC_SELF_IPI:
 		if (apic_x2apic_mode(apic)) {
-			apic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff));
+			kvm_lapic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff));
 		} else
 			ret = 1;
 		break;
@@ -1556,6 +1534,7 @@
 		apic_debug("Local APIC Write to read-only register %x\n", reg);
 	return ret;
 }
+EXPORT_SYMBOL_GPL(kvm_lapic_reg_write);
 
 static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
 			    gpa_t address, int len, const void *data)
@@ -1585,14 +1564,14 @@
 		apic_debug("%s: offset 0x%x with length 0x%x, and value is "
 			   "0x%x\n", __func__, offset, len, val);
 
-	apic_reg_write(apic, offset & 0xff0, val);
+	kvm_lapic_reg_write(apic, offset & 0xff0, val);
 
 	return 0;
 }
 
 void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
 {
-	apic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
+	kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
 }
 EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
 
@@ -1604,10 +1583,10 @@
 	/* hw has done the conditional check and inst decode */
 	offset &= 0xff0;
 
-	apic_reg_read(vcpu->arch.apic, offset, 4, &val);
+	kvm_lapic_reg_read(vcpu->arch.apic, offset, 4, &val);
 
 	/* TODO: optimize to just emulate side effect w/o one more write */
-	apic_reg_write(vcpu->arch.apic, offset, val);
+	kvm_lapic_reg_write(vcpu->arch.apic, offset, val);
 }
 EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
 
@@ -1667,14 +1646,14 @@
 	struct kvm_lapic *apic = vcpu->arch.apic;
 
 	apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
-		     | (kvm_apic_get_reg(apic, APIC_TASKPRI) & 4));
+		     | (kvm_lapic_get_reg(apic, APIC_TASKPRI) & 4));
 }
 
 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
 {
 	u64 tpr;
 
-	tpr = (u64) kvm_apic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
+	tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
 
 	return (tpr & 0xf0) >> 4;
 }
@@ -1740,28 +1719,28 @@
 		kvm_apic_set_id(apic, vcpu->vcpu_id);
 	kvm_apic_set_version(apic->vcpu);
 
-	for (i = 0; i < APIC_LVT_NUM; i++)
-		apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
+	for (i = 0; i < KVM_APIC_LVT_NUM; i++)
+		kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
 	apic_update_lvtt(apic);
 	if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
-		apic_set_reg(apic, APIC_LVT0,
+		kvm_lapic_set_reg(apic, APIC_LVT0,
 			     SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
-	apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
+	apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
 
-	apic_set_reg(apic, APIC_DFR, 0xffffffffU);
+	kvm_lapic_set_reg(apic, APIC_DFR, 0xffffffffU);
 	apic_set_spiv(apic, 0xff);
-	apic_set_reg(apic, APIC_TASKPRI, 0);
+	kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
 	if (!apic_x2apic_mode(apic))
 		kvm_apic_set_ldr(apic, 0);
-	apic_set_reg(apic, APIC_ESR, 0);
-	apic_set_reg(apic, APIC_ICR, 0);
-	apic_set_reg(apic, APIC_ICR2, 0);
-	apic_set_reg(apic, APIC_TDCR, 0);
-	apic_set_reg(apic, APIC_TMICT, 0);
+	kvm_lapic_set_reg(apic, APIC_ESR, 0);
+	kvm_lapic_set_reg(apic, APIC_ICR, 0);
+	kvm_lapic_set_reg(apic, APIC_ICR2, 0);
+	kvm_lapic_set_reg(apic, APIC_TDCR, 0);
+	kvm_lapic_set_reg(apic, APIC_TMICT, 0);
 	for (i = 0; i < 8; i++) {
-		apic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
-		apic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
-		apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
+		kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
+		kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
+		kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
 	}
 	apic->irr_pending = vcpu->arch.apicv_active;
 	apic->isr_count = vcpu->arch.apicv_active ? 1 : 0;
@@ -1806,7 +1785,7 @@
 
 int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
 {
-	u32 reg = kvm_apic_get_reg(apic, lvt_type);
+	u32 reg = kvm_lapic_get_reg(apic, lvt_type);
 	int vector, mode, trig_mode;
 
 	if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
@@ -1868,7 +1847,7 @@
 	apic->vcpu = vcpu;
 
 	hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
-		     HRTIMER_MODE_ABS);
+		     HRTIMER_MODE_ABS_PINNED);
 	apic->lapic_timer.timer.function = apic_timer_fn;
 
 	/*
@@ -1901,14 +1880,14 @@
 	apic_update_ppr(apic);
 	highest_irr = apic_find_highest_irr(apic);
 	if ((highest_irr == -1) ||
-	    ((highest_irr & 0xF0) <= kvm_apic_get_reg(apic, APIC_PROCPRI)))
+	    ((highest_irr & 0xF0) <= kvm_lapic_get_reg(apic, APIC_PROCPRI)))
 		return -1;
 	return highest_irr;
 }
 
 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
 {
-	u32 lvt0 = kvm_apic_get_reg(vcpu->arch.apic, APIC_LVT0);
+	u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
 	int r = 0;
 
 	if (!kvm_apic_hw_enabled(vcpu->arch.apic))
@@ -1974,7 +1953,7 @@
 	apic_update_ppr(apic);
 	hrtimer_cancel(&apic->lapic_timer.timer);
 	apic_update_lvtt(apic);
-	apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
+	apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
 	update_divide_count(apic);
 	start_apic_timer(apic);
 	apic->irr_pending = true;
@@ -1982,9 +1961,11 @@
 				1 : count_vectors(apic->regs + APIC_ISR);
 	apic->highest_isr_cache = -1;
 	if (vcpu->arch.apicv_active) {
+		if (kvm_x86_ops->apicv_post_state_restore)
+			kvm_x86_ops->apicv_post_state_restore(vcpu);
 		kvm_x86_ops->hwapic_irr_update(vcpu,
 				apic_find_highest_irr(apic));
-		kvm_x86_ops->hwapic_isr_update(vcpu->kvm,
+		kvm_x86_ops->hwapic_isr_update(vcpu,
 				apic_find_highest_isr(apic));
 	}
 	kvm_make_request(KVM_REQ_EVENT, vcpu);
@@ -2003,7 +1984,7 @@
 
 	timer = &vcpu->arch.apic->lapic_timer.timer;
 	if (hrtimer_cancel(timer))
-		hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
+		hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
 }
 
 /*
@@ -2097,7 +2078,7 @@
 	if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
 		return;
 
-	tpr = kvm_apic_get_reg(apic, APIC_TASKPRI) & 0xff;
+	tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
 	max_irr = apic_find_highest_irr(apic);
 	if (max_irr < 0)
 		max_irr = 0;
@@ -2139,8 +2120,8 @@
 
 	/* if this is ICR write vector before command */
 	if (reg == APIC_ICR)
-		apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
-	return apic_reg_write(apic, reg, (u32)data);
+		kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
+	return kvm_lapic_reg_write(apic, reg, (u32)data);
 }
 
 int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
@@ -2157,10 +2138,10 @@
 		return 1;
 	}
 
-	if (apic_reg_read(apic, reg, 4, &low))
+	if (kvm_lapic_reg_read(apic, reg, 4, &low))
 		return 1;
 	if (reg == APIC_ICR)
-		apic_reg_read(apic, APIC_ICR2, 4, &high);
+		kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
 
 	*data = (((u64)high) << 32) | low;
 
@@ -2176,8 +2157,8 @@
 
 	/* if this is ICR write vector before command */
 	if (reg == APIC_ICR)
-		apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
-	return apic_reg_write(apic, reg, (u32)data);
+		kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
+	return kvm_lapic_reg_write(apic, reg, (u32)data);
 }
 
 int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
@@ -2188,10 +2169,10 @@
 	if (!lapic_in_kernel(vcpu))
 		return 1;
 
-	if (apic_reg_read(apic, reg, 4, &low))
+	if (kvm_lapic_reg_read(apic, reg, 4, &low))
 		return 1;
 	if (reg == APIC_ICR)
-		apic_reg_read(apic, APIC_ICR2, 4, &high);
+		kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
 
 	*data = (((u64)high) << 32) | low;
 
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index f71183e..891c6da 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -7,6 +7,10 @@
 
 #define KVM_APIC_INIT		0
 #define KVM_APIC_SIPI		1
+#define KVM_APIC_LVT_NUM	6
+
+#define KVM_APIC_SHORT_MASK	0xc0000
+#define KVM_APIC_DEST_MASK	0x800
 
 struct kvm_timer {
 	struct hrtimer timer;
@@ -59,6 +63,11 @@
 void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value);
 u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu);
 void kvm_apic_set_version(struct kvm_vcpu *vcpu);
+int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val);
+int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
+		       void *data);
+bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
+			   int short_hand, unsigned int dest, int dest_mode);
 
 void __kvm_apic_update_irr(u32 *pir, void *regs);
 void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir);
@@ -99,9 +108,32 @@
 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
 void kvm_lapic_init(void);
 
-static inline u32 kvm_apic_get_reg(struct kvm_lapic *apic, int reg_off)
+#define VEC_POS(v) ((v) & (32 - 1))
+#define REG_POS(v) (((v) >> 5) << 4)
+
+static inline void kvm_lapic_set_vector(int vec, void *bitmap)
 {
-	        return *((u32 *) (apic->regs + reg_off));
+	set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
+}
+
+static inline void kvm_lapic_set_irr(int vec, struct kvm_lapic *apic)
+{
+	kvm_lapic_set_vector(vec, apic->regs + APIC_IRR);
+	/*
+	 * irr_pending must be true if any interrupt is pending; set it after
+	 * APIC_IRR to avoid race with apic_clear_irr
+	 */
+	apic->irr_pending = true;
+}
+
+static inline u32 kvm_lapic_get_reg(struct kvm_lapic *apic, int reg_off)
+{
+	return *((u32 *) (apic->regs + reg_off));
+}
+
+static inline void kvm_lapic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
+{
+	*((u32 *) (apic->regs + reg_off)) = val;
 }
 
 extern struct static_key kvm_no_apic_vcpu;
@@ -169,7 +201,7 @@
 
 static inline int kvm_apic_id(struct kvm_lapic *apic)
 {
-	return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
+	return (kvm_lapic_get_reg(apic, APIC_ID) >> 24) & 0xff;
 }
 
 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 70e95d0..24e8001 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -557,8 +557,15 @@
 	      !is_writable_pte(new_spte))
 		ret = true;
 
-	if (!shadow_accessed_mask)
+	if (!shadow_accessed_mask) {
+		/*
+		 * We don't set page dirty when dropping non-writable spte.
+		 * So do it now if the new spte is becoming non-writable.
+		 */
+		if (ret)
+			kvm_set_pfn_dirty(spte_to_pfn(old_spte));
 		return ret;
+	}
 
 	/*
 	 * Flush TLB when accessed/dirty bits are changed in the page tables,
@@ -605,7 +612,8 @@
 
 	if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
 		kvm_set_pfn_accessed(pfn);
-	if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
+	if (old_spte & (shadow_dirty_mask ? shadow_dirty_mask :
+					    PT_WRITABLE_MASK))
 		kvm_set_pfn_dirty(pfn);
 	return 1;
 }
@@ -1901,18 +1909,17 @@
  * since it has been deleted from active_mmu_pages but still can be found
  * at hast list.
  *
- * for_each_gfn_indirect_valid_sp has skipped that kind of page and
- * kvm_mmu_get_page(), the only user of for_each_gfn_sp(), has skipped
- * all the obsolete pages.
+ * for_each_gfn_valid_sp() has skipped that kind of pages.
  */
-#define for_each_gfn_sp(_kvm, _sp, _gfn)				\
+#define for_each_gfn_valid_sp(_kvm, _sp, _gfn)				\
 	hlist_for_each_entry(_sp,					\
 	  &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
-		if ((_sp)->gfn != (_gfn)) {} else
+		if ((_sp)->gfn != (_gfn) || is_obsolete_sp((_kvm), (_sp)) \
+			|| (_sp)->role.invalid) {} else
 
 #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)			\
-	for_each_gfn_sp(_kvm, _sp, _gfn)				\
-		if ((_sp)->role.direct || (_sp)->role.invalid) {} else
+	for_each_gfn_valid_sp(_kvm, _sp, _gfn)				\
+		if ((_sp)->role.direct) {} else
 
 /* @sp->gfn should be write-protected at the call site */
 static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
@@ -1953,6 +1960,11 @@
 static void mmu_audit_disable(void) { }
 #endif
 
+static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+	return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
+}
+
 static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 			 struct list_head *invalid_list)
 {
@@ -2097,11 +2109,6 @@
 	__clear_sp_write_flooding_count(sp);
 }
 
-static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
-{
-	return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
-}
-
 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
 					     gfn_t gfn,
 					     gva_t gaddr,
@@ -2128,10 +2135,7 @@
 		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
 		role.quadrant = quadrant;
 	}
-	for_each_gfn_sp(vcpu->kvm, sp, gfn) {
-		if (is_obsolete_sp(vcpu->kvm, sp))
-			continue;
-
+	for_each_gfn_valid_sp(vcpu->kvm, sp, gfn) {
 		if (!need_sync && sp->unsync)
 			need_sync = true;
 
@@ -2815,7 +2819,7 @@
 	 */
 	if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
 	    level == PT_PAGE_TABLE_LEVEL &&
-	    PageTransCompound(pfn_to_page(pfn)) &&
+	    PageTransCompoundMap(pfn_to_page(pfn)) &&
 	    !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
 		unsigned long mask;
 		/*
@@ -3836,7 +3840,8 @@
 		__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
 					boot_cpu_data.x86_phys_bits,
 					context->shadow_root_level, false,
-					cpu_has_gbpages, true, true);
+					boot_cpu_has(X86_FEATURE_GBPAGES),
+					true, true);
 	else
 		__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
 					    boot_cpu_data.x86_phys_bits,
@@ -4777,7 +4782,7 @@
 		 */
 		if (sp->role.direct &&
 			!kvm_is_reserved_pfn(pfn) &&
-			PageTransCompound(pfn_to_page(pfn))) {
+			PageTransCompoundMap(pfn_to_page(pfn))) {
 			drop_spte(kvm, sptep);
 			need_tlb_flush = 1;
 			goto restart;
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index b70df72..66b33b9 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -173,10 +173,9 @@
 	int index = (pfec >> 1) +
 		    (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
 	bool fault = (mmu->permissions[index] >> pte_access) & 1;
+	u32 errcode = PFERR_PRESENT_MASK;
 
 	WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
-	pfec |= PFERR_PRESENT_MASK;
-
 	if (unlikely(mmu->pkru_mask)) {
 		u32 pkru_bits, offset;
 
@@ -189,15 +188,15 @@
 		pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3;
 
 		/* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
-		offset = pfec - 1 +
+		offset = (pfec & ~1) +
 			((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
 
 		pkru_bits &= mmu->pkru_mask >> offset;
-		pfec |= -pkru_bits & PFERR_PK_MASK;
+		errcode |= -pkru_bits & PFERR_PK_MASK;
 		fault |= (pkru_bits != 0);
 	}
 
-	return -(uint32_t)fault & pfec;
+	return -(u32)fault & errcode;
 }
 
 void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index 3f8c732..c146f3c 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -44,8 +44,6 @@
 	case MSR_MTRRdefType:
 	case MSR_IA32_CR_PAT:
 		return true;
-	case 0x2f8:
-		return true;
 	}
 	return false;
 }
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 1d971c7..bc019f7 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -360,7 +360,7 @@
 			goto error;
 
 		if (unlikely(is_rsvd_bits_set(mmu, pte, walker->level))) {
-			errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
+			errcode = PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
 			goto error;
 		}
 
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 31346a3..2214214 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -14,6 +14,9 @@
  * the COPYING file in the top-level directory.
  *
  */
+
+#define pr_fmt(fmt) "SVM: " fmt
+
 #include <linux/kvm_host.h>
 
 #include "irq.h"
@@ -32,6 +35,7 @@
 #include <linux/trace_events.h>
 #include <linux/slab.h>
 
+#include <asm/apic.h>
 #include <asm/perf_event.h>
 #include <asm/tlbflush.h>
 #include <asm/desc.h>
@@ -68,6 +72,8 @@
 #define SVM_FEATURE_DECODE_ASSIST  (1 <<  7)
 #define SVM_FEATURE_PAUSE_FILTER   (1 << 10)
 
+#define SVM_AVIC_DOORBELL	0xc001011b
+
 #define NESTED_EXIT_HOST	0	/* Exit handled on host level */
 #define NESTED_EXIT_DONE	1	/* Exit caused nested vmexit  */
 #define NESTED_EXIT_CONTINUE	2	/* Further checks needed      */
@@ -78,6 +84,18 @@
 #define TSC_RATIO_MIN		0x0000000000000001ULL
 #define TSC_RATIO_MAX		0x000000ffffffffffULL
 
+#define AVIC_HPA_MASK	~((0xFFFULL << 52) || 0xFFF)
+
+/*
+ * 0xff is broadcast, so the max index allowed for physical APIC ID
+ * table is 0xfe.  APIC IDs above 0xff are reserved.
+ */
+#define AVIC_MAX_PHYSICAL_ID_COUNT	255
+
+#define AVIC_UNACCEL_ACCESS_WRITE_MASK		1
+#define AVIC_UNACCEL_ACCESS_OFFSET_MASK		0xFF0
+#define AVIC_UNACCEL_ACCESS_VECTOR_MASK		0xFFFFFFFF
+
 static bool erratum_383_found __read_mostly;
 
 static const u32 host_save_user_msrs[] = {
@@ -162,8 +180,21 @@
 
 	/* cached guest cpuid flags for faster access */
 	bool nrips_enabled	: 1;
+
+	u32 ldr_reg;
+	struct page *avic_backing_page;
+	u64 *avic_physical_id_cache;
+	bool avic_is_running;
 };
 
+#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK	(0xFF)
+#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK		(1 << 31)
+
+#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK	(0xFFULL)
+#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK	(0xFFFFFFFFFFULL << 12)
+#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK		(1ULL << 62)
+#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK		(1ULL << 63)
+
 static DEFINE_PER_CPU(u64, current_tsc_ratio);
 #define TSC_RATIO_DEFAULT	0x0100000000ULL
 
@@ -205,6 +236,10 @@
 static int nested = true;
 module_param(nested, int, S_IRUGO);
 
+/* enable / disable AVIC */
+static int avic;
+module_param(avic, int, S_IRUGO);
+
 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 static void svm_flush_tlb(struct kvm_vcpu *vcpu);
 static void svm_complete_interrupts(struct vcpu_svm *svm);
@@ -228,12 +263,18 @@
 	VMCB_SEG,        /* CS, DS, SS, ES, CPL */
 	VMCB_CR2,        /* CR2 only */
 	VMCB_LBR,        /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
+	VMCB_AVIC,       /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
+			  * AVIC PHYSICAL_TABLE pointer,
+			  * AVIC LOGICAL_TABLE pointer
+			  */
 	VMCB_DIRTY_MAX,
 };
 
 /* TPR and CR2 are always written before VMRUN */
 #define VMCB_ALWAYS_DIRTY_MASK	((1U << VMCB_INTR) | (1U << VMCB_CR2))
 
+#define VMCB_AVIC_APIC_BAR_MASK		0xFFFFFFFFFF000ULL
+
 static inline void mark_all_dirty(struct vmcb *vmcb)
 {
 	vmcb->control.clean = 0;
@@ -255,6 +296,23 @@
 	return container_of(vcpu, struct vcpu_svm, vcpu);
 }
 
+static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
+{
+	svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
+	mark_dirty(svm->vmcb, VMCB_AVIC);
+}
+
+static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
+{
+	struct vcpu_svm *svm = to_svm(vcpu);
+	u64 *entry = svm->avic_physical_id_cache;
+
+	if (!entry)
+		return false;
+
+	return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
+}
+
 static void recalc_intercepts(struct vcpu_svm *svm)
 {
 	struct vmcb_control_area *c, *h;
@@ -923,6 +981,12 @@
 	} else
 		kvm_disable_tdp();
 
+	if (avic && (!npt_enabled || !boot_cpu_has(X86_FEATURE_AVIC)))
+		avic = false;
+
+	if (avic)
+		pr_info("AVIC enabled\n");
+
 	return 0;
 
 err:
@@ -1000,6 +1064,22 @@
 	mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
 }
 
+static void avic_init_vmcb(struct vcpu_svm *svm)
+{
+	struct vmcb *vmcb = svm->vmcb;
+	struct kvm_arch *vm_data = &svm->vcpu.kvm->arch;
+	phys_addr_t bpa = page_to_phys(svm->avic_backing_page);
+	phys_addr_t lpa = page_to_phys(vm_data->avic_logical_id_table_page);
+	phys_addr_t ppa = page_to_phys(vm_data->avic_physical_id_table_page);
+
+	vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
+	vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
+	vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK;
+	vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID_COUNT;
+	vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
+	svm->vcpu.arch.apicv_active = true;
+}
+
 static void init_vmcb(struct vcpu_svm *svm)
 {
 	struct vmcb_control_area *control = &svm->vmcb->control;
@@ -1014,7 +1094,8 @@
 	set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
 	set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
 	set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
-	set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
+	if (!kvm_vcpu_apicv_active(&svm->vcpu))
+		set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
 
 	set_dr_intercepts(svm);
 
@@ -1110,9 +1191,197 @@
 		set_intercept(svm, INTERCEPT_PAUSE);
 	}
 
+	if (avic)
+		avic_init_vmcb(svm);
+
 	mark_all_dirty(svm->vmcb);
 
 	enable_gif(svm);
+
+}
+
+static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu, int index)
+{
+	u64 *avic_physical_id_table;
+	struct kvm_arch *vm_data = &vcpu->kvm->arch;
+
+	if (index >= AVIC_MAX_PHYSICAL_ID_COUNT)
+		return NULL;
+
+	avic_physical_id_table = page_address(vm_data->avic_physical_id_table_page);
+
+	return &avic_physical_id_table[index];
+}
+
+/**
+ * Note:
+ * AVIC hardware walks the nested page table to check permissions,
+ * but does not use the SPA address specified in the leaf page
+ * table entry since it uses  address in the AVIC_BACKING_PAGE pointer
+ * field of the VMCB. Therefore, we set up the
+ * APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (4KB) here.
+ */
+static int avic_init_access_page(struct kvm_vcpu *vcpu)
+{
+	struct kvm *kvm = vcpu->kvm;
+	int ret;
+
+	if (kvm->arch.apic_access_page_done)
+		return 0;
+
+	ret = x86_set_memory_region(kvm,
+				    APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
+				    APIC_DEFAULT_PHYS_BASE,
+				    PAGE_SIZE);
+	if (ret)
+		return ret;
+
+	kvm->arch.apic_access_page_done = true;
+	return 0;
+}
+
+static int avic_init_backing_page(struct kvm_vcpu *vcpu)
+{
+	int ret;
+	u64 *entry, new_entry;
+	int id = vcpu->vcpu_id;
+	struct vcpu_svm *svm = to_svm(vcpu);
+
+	ret = avic_init_access_page(vcpu);
+	if (ret)
+		return ret;
+
+	if (id >= AVIC_MAX_PHYSICAL_ID_COUNT)
+		return -EINVAL;
+
+	if (!svm->vcpu.arch.apic->regs)
+		return -EINVAL;
+
+	svm->avic_backing_page = virt_to_page(svm->vcpu.arch.apic->regs);
+
+	/* Setting AVIC backing page address in the phy APIC ID table */
+	entry = avic_get_physical_id_entry(vcpu, id);
+	if (!entry)
+		return -EINVAL;
+
+	new_entry = READ_ONCE(*entry);
+	new_entry = (page_to_phys(svm->avic_backing_page) &
+		     AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
+		     AVIC_PHYSICAL_ID_ENTRY_VALID_MASK;
+	WRITE_ONCE(*entry, new_entry);
+
+	svm->avic_physical_id_cache = entry;
+
+	return 0;
+}
+
+static void avic_vm_destroy(struct kvm *kvm)
+{
+	struct kvm_arch *vm_data = &kvm->arch;
+
+	if (vm_data->avic_logical_id_table_page)
+		__free_page(vm_data->avic_logical_id_table_page);
+	if (vm_data->avic_physical_id_table_page)
+		__free_page(vm_data->avic_physical_id_table_page);
+}
+
+static int avic_vm_init(struct kvm *kvm)
+{
+	int err = -ENOMEM;
+	struct kvm_arch *vm_data = &kvm->arch;
+	struct page *p_page;
+	struct page *l_page;
+
+	if (!avic)
+		return 0;
+
+	/* Allocating physical APIC ID table (4KB) */
+	p_page = alloc_page(GFP_KERNEL);
+	if (!p_page)
+		goto free_avic;
+
+	vm_data->avic_physical_id_table_page = p_page;
+	clear_page(page_address(p_page));
+
+	/* Allocating logical APIC ID table (4KB) */
+	l_page = alloc_page(GFP_KERNEL);
+	if (!l_page)
+		goto free_avic;
+
+	vm_data->avic_logical_id_table_page = l_page;
+	clear_page(page_address(l_page));
+
+	return 0;
+
+free_avic:
+	avic_vm_destroy(kvm);
+	return err;
+}
+
+/**
+ * This function is called during VCPU halt/unhalt.
+ */
+static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
+{
+	u64 entry;
+	int h_physical_id = __default_cpu_present_to_apicid(vcpu->cpu);
+	struct vcpu_svm *svm = to_svm(vcpu);
+
+	if (!kvm_vcpu_apicv_active(vcpu))
+		return;
+
+	svm->avic_is_running = is_run;
+
+	/* ID = 0xff (broadcast), ID > 0xff (reserved) */
+	if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT))
+		return;
+
+	entry = READ_ONCE(*(svm->avic_physical_id_cache));
+	WARN_ON(is_run == !!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK));
+
+	entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
+	if (is_run)
+		entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
+	WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
+}
+
+static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+	u64 entry;
+	/* ID = 0xff (broadcast), ID > 0xff (reserved) */
+	int h_physical_id = __default_cpu_present_to_apicid(cpu);
+	struct vcpu_svm *svm = to_svm(vcpu);
+
+	if (!kvm_vcpu_apicv_active(vcpu))
+		return;
+
+	if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT))
+		return;
+
+	entry = READ_ONCE(*(svm->avic_physical_id_cache));
+	WARN_ON(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
+
+	entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
+	entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
+
+	entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
+	if (svm->avic_is_running)
+		entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
+
+	WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
+}
+
+static void avic_vcpu_put(struct kvm_vcpu *vcpu)
+{
+	u64 entry;
+	struct vcpu_svm *svm = to_svm(vcpu);
+
+	if (!kvm_vcpu_apicv_active(vcpu))
+		return;
+
+	entry = READ_ONCE(*(svm->avic_physical_id_cache));
+	entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
+	WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
 }
 
 static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
@@ -1131,6 +1400,9 @@
 
 	kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy);
 	kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
+
+	if (kvm_vcpu_apicv_active(vcpu) && !init_event)
+		avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE);
 }
 
 static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
@@ -1169,6 +1441,17 @@
 	if (!hsave_page)
 		goto free_page3;
 
+	if (avic) {
+		err = avic_init_backing_page(&svm->vcpu);
+		if (err)
+			goto free_page4;
+	}
+
+	/* We initialize this flag to true to make sure that the is_running
+	 * bit would be set the first time the vcpu is loaded.
+	 */
+	svm->avic_is_running = true;
+
 	svm->nested.hsave = page_address(hsave_page);
 
 	svm->msrpm = page_address(msrpm_pages);
@@ -1187,6 +1470,8 @@
 
 	return &svm->vcpu;
 
+free_page4:
+	__free_page(hsave_page);
 free_page3:
 	__free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
 free_page2:
@@ -1243,6 +1528,8 @@
 	/* This assumes that the kernel never uses MSR_TSC_AUX */
 	if (static_cpu_has(X86_FEATURE_RDTSCP))
 		wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
+
+	avic_vcpu_load(vcpu, cpu);
 }
 
 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
@@ -1250,11 +1537,13 @@
 	struct vcpu_svm *svm = to_svm(vcpu);
 	int i;
 
+	avic_vcpu_put(vcpu);
+
 	++vcpu->stat.host_state_reload;
 	kvm_load_ldt(svm->host.ldt);
 #ifdef CONFIG_X86_64
 	loadsegment(fs, svm->host.fs);
-	wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
+	wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase);
 	load_gs_index(svm->host.gs);
 #else
 #ifdef CONFIG_X86_32_LAZY_GS
@@ -1265,6 +1554,16 @@
 		wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
 }
 
+static void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
+{
+	avic_set_running(vcpu, false);
+}
+
+static void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
+{
+	avic_set_running(vcpu, true);
+}
+
 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
 {
 	return to_svm(vcpu)->vmcb->save.rflags;
@@ -2673,10 +2972,11 @@
 	disable_gif(svm);
 
 	/* After a CLGI no interrupts should come */
-	svm_clear_vintr(svm);
-	svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
-
-	mark_dirty(svm->vmcb, VMCB_INTR);
+	if (!kvm_vcpu_apicv_active(&svm->vcpu)) {
+		svm_clear_vintr(svm);
+		svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
+		mark_dirty(svm->vmcb, VMCB_INTR);
+	}
 
 	return 1;
 }
@@ -3212,6 +3512,10 @@
 	case MSR_VM_IGNNE:
 		vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
 		break;
+	case MSR_IA32_APICBASE:
+		if (kvm_vcpu_apicv_active(vcpu))
+			avic_update_vapic_bar(to_svm(vcpu), data);
+		/* Follow through */
 	default:
 		return kvm_set_msr_common(vcpu, msr);
 	}
@@ -3281,6 +3585,278 @@
 	return nop_interception(svm);
 }
 
+enum avic_ipi_failure_cause {
+	AVIC_IPI_FAILURE_INVALID_INT_TYPE,
+	AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
+	AVIC_IPI_FAILURE_INVALID_TARGET,
+	AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
+};
+
+static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
+{
+	u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
+	u32 icrl = svm->vmcb->control.exit_info_1;
+	u32 id = svm->vmcb->control.exit_info_2 >> 32;
+	u32 index = svm->vmcb->control.exit_info_2 && 0xFF;
+	struct kvm_lapic *apic = svm->vcpu.arch.apic;
+
+	trace_kvm_avic_incomplete_ipi(svm->vcpu.vcpu_id, icrh, icrl, id, index);
+
+	switch (id) {
+	case AVIC_IPI_FAILURE_INVALID_INT_TYPE:
+		/*
+		 * AVIC hardware handles the generation of
+		 * IPIs when the specified Message Type is Fixed
+		 * (also known as fixed delivery mode) and
+		 * the Trigger Mode is edge-triggered. The hardware
+		 * also supports self and broadcast delivery modes
+		 * specified via the Destination Shorthand(DSH)
+		 * field of the ICRL. Logical and physical APIC ID
+		 * formats are supported. All other IPI types cause
+		 * a #VMEXIT, which needs to emulated.
+		 */
+		kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
+		kvm_lapic_reg_write(apic, APIC_ICR, icrl);
+		break;
+	case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
+		int i;
+		struct kvm_vcpu *vcpu;
+		struct kvm *kvm = svm->vcpu.kvm;
+		struct kvm_lapic *apic = svm->vcpu.arch.apic;
+
+		/*
+		 * At this point, we expect that the AVIC HW has already
+		 * set the appropriate IRR bits on the valid target
+		 * vcpus. So, we just need to kick the appropriate vcpu.
+		 */
+		kvm_for_each_vcpu(i, vcpu, kvm) {
+			bool m = kvm_apic_match_dest(vcpu, apic,
+						     icrl & KVM_APIC_SHORT_MASK,
+						     GET_APIC_DEST_FIELD(icrh),
+						     icrl & KVM_APIC_DEST_MASK);
+
+			if (m && !avic_vcpu_is_running(vcpu))
+				kvm_vcpu_wake_up(vcpu);
+		}
+		break;
+	}
+	case AVIC_IPI_FAILURE_INVALID_TARGET:
+		break;
+	case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
+		WARN_ONCE(1, "Invalid backing page\n");
+		break;
+	default:
+		pr_err("Unknown IPI interception\n");
+	}
+
+	return 1;
+}
+
+static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
+{
+	struct kvm_arch *vm_data = &vcpu->kvm->arch;
+	int index;
+	u32 *logical_apic_id_table;
+	int dlid = GET_APIC_LOGICAL_ID(ldr);
+
+	if (!dlid)
+		return NULL;
+
+	if (flat) { /* flat */
+		index = ffs(dlid) - 1;
+		if (index > 7)
+			return NULL;
+	} else { /* cluster */
+		int cluster = (dlid & 0xf0) >> 4;
+		int apic = ffs(dlid & 0x0f) - 1;
+
+		if ((apic < 0) || (apic > 7) ||
+		    (cluster >= 0xf))
+			return NULL;
+		index = (cluster << 2) + apic;
+	}
+
+	logical_apic_id_table = (u32 *) page_address(vm_data->avic_logical_id_table_page);
+
+	return &logical_apic_id_table[index];
+}
+
+static int avic_ldr_write(struct kvm_vcpu *vcpu, u8 g_physical_id, u32 ldr,
+			  bool valid)
+{
+	bool flat;
+	u32 *entry, new_entry;
+
+	flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT;
+	entry = avic_get_logical_id_entry(vcpu, ldr, flat);
+	if (!entry)
+		return -EINVAL;
+
+	new_entry = READ_ONCE(*entry);
+	new_entry &= ~AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
+	new_entry |= (g_physical_id & AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK);
+	if (valid)
+		new_entry |= AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
+	else
+		new_entry &= ~AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
+	WRITE_ONCE(*entry, new_entry);
+
+	return 0;
+}
+
+static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
+{
+	int ret;
+	struct vcpu_svm *svm = to_svm(vcpu);
+	u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
+
+	if (!ldr)
+		return 1;
+
+	ret = avic_ldr_write(vcpu, vcpu->vcpu_id, ldr, true);
+	if (ret && svm->ldr_reg) {
+		avic_ldr_write(vcpu, 0, svm->ldr_reg, false);
+		svm->ldr_reg = 0;
+	} else {
+		svm->ldr_reg = ldr;
+	}
+	return ret;
+}
+
+static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
+{
+	u64 *old, *new;
+	struct vcpu_svm *svm = to_svm(vcpu);
+	u32 apic_id_reg = kvm_lapic_get_reg(vcpu->arch.apic, APIC_ID);
+	u32 id = (apic_id_reg >> 24) & 0xff;
+
+	if (vcpu->vcpu_id == id)
+		return 0;
+
+	old = avic_get_physical_id_entry(vcpu, vcpu->vcpu_id);
+	new = avic_get_physical_id_entry(vcpu, id);
+	if (!new || !old)
+		return 1;
+
+	/* We need to move physical_id_entry to new offset */
+	*new = *old;
+	*old = 0ULL;
+	to_svm(vcpu)->avic_physical_id_cache = new;
+
+	/*
+	 * Also update the guest physical APIC ID in the logical
+	 * APIC ID table entry if already setup the LDR.
+	 */
+	if (svm->ldr_reg)
+		avic_handle_ldr_update(vcpu);
+
+	return 0;
+}
+
+static int avic_handle_dfr_update(struct kvm_vcpu *vcpu)
+{
+	struct vcpu_svm *svm = to_svm(vcpu);
+	struct kvm_arch *vm_data = &vcpu->kvm->arch;
+	u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR);
+	u32 mod = (dfr >> 28) & 0xf;
+
+	/*
+	 * We assume that all local APICs are using the same type.
+	 * If this changes, we need to flush the AVIC logical
+	 * APID id table.
+	 */
+	if (vm_data->ldr_mode == mod)
+		return 0;
+
+	clear_page(page_address(vm_data->avic_logical_id_table_page));
+	vm_data->ldr_mode = mod;
+
+	if (svm->ldr_reg)
+		avic_handle_ldr_update(vcpu);
+	return 0;
+}
+
+static int avic_unaccel_trap_write(struct vcpu_svm *svm)
+{
+	struct kvm_lapic *apic = svm->vcpu.arch.apic;
+	u32 offset = svm->vmcb->control.exit_info_1 &
+				AVIC_UNACCEL_ACCESS_OFFSET_MASK;
+
+	switch (offset) {
+	case APIC_ID:
+		if (avic_handle_apic_id_update(&svm->vcpu))
+			return 0;
+		break;
+	case APIC_LDR:
+		if (avic_handle_ldr_update(&svm->vcpu))
+			return 0;
+		break;
+	case APIC_DFR:
+		avic_handle_dfr_update(&svm->vcpu);
+		break;
+	default:
+		break;
+	}
+
+	kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
+
+	return 1;
+}
+
+static bool is_avic_unaccelerated_access_trap(u32 offset)
+{
+	bool ret = false;
+
+	switch (offset) {
+	case APIC_ID:
+	case APIC_EOI:
+	case APIC_RRR:
+	case APIC_LDR:
+	case APIC_DFR:
+	case APIC_SPIV:
+	case APIC_ESR:
+	case APIC_ICR:
+	case APIC_LVTT:
+	case APIC_LVTTHMR:
+	case APIC_LVTPC:
+	case APIC_LVT0:
+	case APIC_LVT1:
+	case APIC_LVTERR:
+	case APIC_TMICT:
+	case APIC_TDCR:
+		ret = true;
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+static int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
+{
+	int ret = 0;
+	u32 offset = svm->vmcb->control.exit_info_1 &
+		     AVIC_UNACCEL_ACCESS_OFFSET_MASK;
+	u32 vector = svm->vmcb->control.exit_info_2 &
+		     AVIC_UNACCEL_ACCESS_VECTOR_MASK;
+	bool write = (svm->vmcb->control.exit_info_1 >> 32) &
+		     AVIC_UNACCEL_ACCESS_WRITE_MASK;
+	bool trap = is_avic_unaccelerated_access_trap(offset);
+
+	trace_kvm_avic_unaccelerated_access(svm->vcpu.vcpu_id, offset,
+					    trap, write, vector);
+	if (trap) {
+		/* Handling Trap */
+		WARN_ONCE(!write, "svm: Handling trap read.\n");
+		ret = avic_unaccel_trap_write(svm);
+	} else {
+		/* Handling Fault */
+		ret = (emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE);
+	}
+
+	return ret;
+}
+
 static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
 	[SVM_EXIT_READ_CR0]			= cr_interception,
 	[SVM_EXIT_READ_CR3]			= cr_interception,
@@ -3344,6 +3920,8 @@
 	[SVM_EXIT_XSETBV]			= xsetbv_interception,
 	[SVM_EXIT_NPF]				= pf_interception,
 	[SVM_EXIT_RSM]                          = emulate_on_interception,
+	[SVM_EXIT_AVIC_INCOMPLETE_IPI]		= avic_incomplete_ipi_interception,
+	[SVM_EXIT_AVIC_UNACCELERATED_ACCESS]	= avic_unaccelerated_access_interception,
 };
 
 static void dump_vmcb(struct kvm_vcpu *vcpu)
@@ -3375,10 +3953,14 @@
 	pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
 	pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
 	pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
+	pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
 	pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
 	pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
 	pr_err("%-20s%lld\n", "lbr_ctl:", control->lbr_ctl);
 	pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
+	pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
+	pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
+	pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
 	pr_err("VMCB State Save Area:\n");
 	pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
 	       "es:",
@@ -3562,6 +4144,7 @@
 {
 	struct vmcb_control_area *control;
 
+	/* The following fields are ignored when AVIC is enabled */
 	control = &svm->vmcb->control;
 	control->int_vector = irq;
 	control->int_ctl &= ~V_INTR_PRIO_MASK;
@@ -3583,11 +4166,17 @@
 		SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
 }
 
+static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu)
+{
+	return is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK);
+}
+
 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
-	if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
+	if (svm_nested_virtualize_tpr(vcpu) ||
+	    kvm_vcpu_apicv_active(vcpu))
 		return;
 
 	clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
@@ -3606,11 +4195,28 @@
 
 static bool svm_get_enable_apicv(void)
 {
-	return false;
+	return avic;
 }
 
+static void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
+{
+}
+
+static void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
+{
+}
+
+/* Note: Currently only used by Hyper-V. */
 static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
 {
+	struct vcpu_svm *svm = to_svm(vcpu);
+	struct vmcb *vmcb = svm->vmcb;
+
+	if (!avic)
+		return;
+
+	vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
+	mark_dirty(vmcb, VMCB_INTR);
 }
 
 static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
@@ -3623,6 +4229,18 @@
 	return;
 }
 
+static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
+{
+	kvm_lapic_set_irr(vec, vcpu->arch.apic);
+	smp_mb__after_atomic();
+
+	if (avic_vcpu_is_running(vcpu))
+		wrmsrl(SVM_AVIC_DOORBELL,
+		       __default_cpu_present_to_apicid(vcpu->cpu));
+	else
+		kvm_vcpu_wake_up(vcpu);
+}
+
 static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
@@ -3677,6 +4295,9 @@
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
+	if (kvm_vcpu_apicv_active(vcpu))
+		return;
+
 	/*
 	 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
 	 * 1, because that's a separate STGI/VMRUN intercept.  The next time we
@@ -3728,7 +4349,7 @@
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
-	if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
+	if (svm_nested_virtualize_tpr(vcpu))
 		return;
 
 	if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
@@ -3742,7 +4363,8 @@
 	struct vcpu_svm *svm = to_svm(vcpu);
 	u64 cr8;
 
-	if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
+	if (svm_nested_virtualize_tpr(vcpu) ||
+	    kvm_vcpu_apicv_active(vcpu))
 		return;
 
 	cr8 = kvm_get_cr8(vcpu);
@@ -4045,14 +4667,26 @@
 static void svm_cpuid_update(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
+	struct kvm_cpuid_entry2 *entry;
 
 	/* Update nrips enabled cache */
 	svm->nrips_enabled = !!guest_cpuid_has_nrips(&svm->vcpu);
+
+	if (!kvm_vcpu_apicv_active(vcpu))
+		return;
+
+	entry = kvm_find_cpuid_entry(vcpu, 1, 0);
+	if (entry)
+		entry->ecx &= ~bit(X86_FEATURE_X2APIC);
 }
 
 static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
 {
 	switch (func) {
+	case 0x1:
+		if (avic)
+			entry->ecx &= ~bit(X86_FEATURE_X2APIC);
+		break;
 	case 0x80000001:
 		if (nested)
 			entry->ecx |= (1 << 2); /* Set SVM bit */
@@ -4307,6 +4941,15 @@
 {
 }
 
+static inline void avic_post_state_restore(struct kvm_vcpu *vcpu)
+{
+	if (avic_handle_apic_id_update(vcpu) != 0)
+		return;
+	if (avic_handle_dfr_update(vcpu) != 0)
+		return;
+	avic_handle_ldr_update(vcpu);
+}
+
 static struct kvm_x86_ops svm_x86_ops = {
 	.cpu_has_kvm_support = has_svm,
 	.disabled_by_bios = is_disabled,
@@ -4322,9 +4965,14 @@
 	.vcpu_free = svm_free_vcpu,
 	.vcpu_reset = svm_vcpu_reset,
 
+	.vm_init = avic_vm_init,
+	.vm_destroy = avic_vm_destroy,
+
 	.prepare_guest_switch = svm_prepare_guest_switch,
 	.vcpu_load = svm_vcpu_load,
 	.vcpu_put = svm_vcpu_put,
+	.vcpu_blocking = svm_vcpu_blocking,
+	.vcpu_unblocking = svm_vcpu_unblocking,
 
 	.update_bp_intercept = update_bp_intercept,
 	.get_msr = svm_get_msr,
@@ -4382,6 +5030,9 @@
 	.refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
 	.load_eoi_exitmap = svm_load_eoi_exitmap,
 	.sync_pir_to_irr = svm_sync_pir_to_irr,
+	.hwapic_irr_update = svm_hwapic_irr_update,
+	.hwapic_isr_update = svm_hwapic_isr_update,
+	.apicv_post_state_restore = avic_post_state_restore,
 
 	.set_tss_addr = svm_set_tss_addr,
 	.get_tdp_level = get_npt_level,
@@ -4415,6 +5066,7 @@
 	.sched_in = svm_sched_in,
 
 	.pmu_ops = &amd_pmu_ops,
+	.deliver_posted_interrupt = svm_deliver_avic_intr,
 };
 
 static int __init svm_init(void)
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 2f1ea2f..8de9250 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -809,8 +809,7 @@
 
 #define host_clocks					\
 	{VCLOCK_NONE, "none"},				\
-	{VCLOCK_TSC,  "tsc"},				\
-	{VCLOCK_HPET, "hpet"}				\
+	{VCLOCK_TSC,  "tsc"}				\
 
 TRACE_EVENT(kvm_update_master_clock,
 	TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched),
@@ -1292,6 +1291,63 @@
 		  __entry->vcpu_id, __entry->timer_index)
 );
 
+/*
+ * Tracepoint for AMD AVIC
+ */
+TRACE_EVENT(kvm_avic_incomplete_ipi,
+	    TP_PROTO(u32 vcpu, u32 icrh, u32 icrl, u32 id, u32 index),
+	    TP_ARGS(vcpu, icrh, icrl, id, index),
+
+	TP_STRUCT__entry(
+		__field(u32, vcpu)
+		__field(u32, icrh)
+		__field(u32, icrl)
+		__field(u32, id)
+		__field(u32, index)
+	),
+
+	TP_fast_assign(
+		__entry->vcpu = vcpu;
+		__entry->icrh = icrh;
+		__entry->icrl = icrl;
+		__entry->id = id;
+		__entry->index = index;
+	),
+
+	TP_printk("vcpu=%u, icrh:icrl=%#010x:%08x, id=%u, index=%u\n",
+		  __entry->vcpu, __entry->icrh, __entry->icrl,
+		  __entry->id, __entry->index)
+);
+
+TRACE_EVENT(kvm_avic_unaccelerated_access,
+	    TP_PROTO(u32 vcpu, u32 offset, bool ft, bool rw, u32 vec),
+	    TP_ARGS(vcpu, offset, ft, rw, vec),
+
+	TP_STRUCT__entry(
+		__field(u32, vcpu)
+		__field(u32, offset)
+		__field(bool, ft)
+		__field(bool, rw)
+		__field(u32, vec)
+	),
+
+	TP_fast_assign(
+		__entry->vcpu = vcpu;
+		__entry->offset = offset;
+		__entry->ft = ft;
+		__entry->rw = rw;
+		__entry->vec = vec;
+	),
+
+	TP_printk("vcpu=%u, offset=%#x(%s), %s, %s, vec=%#x\n",
+		  __entry->vcpu,
+		  __entry->offset,
+		  __print_symbolic(__entry->offset, kvm_trace_symbol_apic),
+		  __entry->ft ? "trap" : "fault",
+		  __entry->rw ? "write" : "read",
+		  __entry->vec)
+);
+
 #endif /* _TRACE_KVM_H */
 
 #undef TRACE_INCLUDE_PATH
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ee1c8a9..e605d1e 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3103,6 +3103,8 @@
 
 static void kvm_cpu_vmxon(u64 addr)
 {
+	intel_pt_handle_vmx(1);
+
 	asm volatile (ASM_VMX_VMXON_RAX
 			: : "a"(&addr), "m"(addr)
 			: "memory", "cc");
@@ -3172,6 +3174,8 @@
 static void kvm_cpu_vmxoff(void)
 {
 	asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
+
+	intel_pt_handle_vmx(0);
 }
 
 static void hardware_disable(void)
@@ -3386,7 +3390,7 @@
 		}
 	}
 
-	if (cpu_has_xsaves)
+	if (boot_cpu_has(X86_FEATURE_XSAVES))
 		rdmsrl(MSR_IA32_XSS, host_xss);
 
 	return 0;
@@ -5046,8 +5050,8 @@
 		vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
 
 	cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
-	vmx_set_cr0(vcpu, cr0); /* enter rmode */
 	vmx->vcpu.arch.cr0 = cr0;
+	vmx_set_cr0(vcpu, cr0); /* enter rmode */
 	vmx_set_cr4(vcpu, 0);
 	vmx_set_efer(vcpu, 0);
 	vmx_fpu_activate(vcpu);
@@ -8314,19 +8318,19 @@
 		vmcs_write64(APIC_ACCESS_ADDR, hpa);
 }
 
-static void vmx_hwapic_isr_update(struct kvm *kvm, int isr)
+static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
 {
 	u16 status;
 	u8 old;
 
-	if (isr == -1)
-		isr = 0;
+	if (max_isr == -1)
+		max_isr = 0;
 
 	status = vmcs_read16(GUEST_INTR_STATUS);
 	old = status >> 8;
-	if (isr != old) {
+	if (max_isr != old) {
 		status &= 0xff;
-		status |= isr << 8;
+		status |= max_isr << 8;
 		vmcs_write16(GUEST_INTR_STATUS, status);
 	}
 }
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 742d0f7..c805cf4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -161,6 +161,7 @@
 	{ "halt_exits", VCPU_STAT(halt_exits) },
 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
 	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
+	{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
 	{ "hypercalls", VCPU_STAT(hypercalls) },
 	{ "request_irq", VCPU_STAT(request_irq_exits) },
@@ -700,7 +701,6 @@
 		if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
 			return 1;
 	}
-	kvm_put_guest_xcr0(vcpu);
 	vcpu->arch.xcr0 = xcr0;
 
 	if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
@@ -2003,22 +2003,8 @@
 	vcpu->arch.pv_time_enabled = false;
 }
 
-static void accumulate_steal_time(struct kvm_vcpu *vcpu)
-{
-	u64 delta;
-
-	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
-		return;
-
-	delta = current->sched_info.run_delay - vcpu->arch.st.last_steal;
-	vcpu->arch.st.last_steal = current->sched_info.run_delay;
-	vcpu->arch.st.accum_steal = delta;
-}
-
 static void record_steal_time(struct kvm_vcpu *vcpu)
 {
-	accumulate_steal_time(vcpu);
-
 	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
 		return;
 
@@ -2026,9 +2012,26 @@
 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
 		return;
 
-	vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal;
-	vcpu->arch.st.steal.version += 2;
-	vcpu->arch.st.accum_steal = 0;
+	if (vcpu->arch.st.steal.version & 1)
+		vcpu->arch.st.steal.version += 1;  /* first time write, random junk */
+
+	vcpu->arch.st.steal.version += 1;
+
+	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
+
+	smp_wmb();
+
+	vcpu->arch.st.steal.steal += current->sched_info.run_delay -
+		vcpu->arch.st.last_steal;
+	vcpu->arch.st.last_steal = current->sched_info.run_delay;
+
+	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
+
+	smp_wmb();
+
+	vcpu->arch.st.steal.version += 1;
 
 	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
@@ -2612,7 +2615,7 @@
 		r = KVM_MAX_MCE_BANKS;
 		break;
 	case KVM_CAP_XCRS:
-		r = cpu_has_xsave;
+		r = boot_cpu_has(X86_FEATURE_XSAVE);
 		break;
 	case KVM_CAP_TSC_CONTROL:
 		r = kvm_has_tsc_control;
@@ -3095,7 +3098,7 @@
 
 	/* Set XSTATE_BV and possibly XCOMP_BV.  */
 	xsave->header.xfeatures = xstate_bv;
-	if (cpu_has_xsaves)
+	if (boot_cpu_has(X86_FEATURE_XSAVES))
 		xsave->header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED;
 
 	/*
@@ -3122,7 +3125,7 @@
 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
 					 struct kvm_xsave *guest_xsave)
 {
-	if (cpu_has_xsave) {
+	if (boot_cpu_has(X86_FEATURE_XSAVE)) {
 		memset(guest_xsave, 0, sizeof(struct kvm_xsave));
 		fill_xsave((u8 *) guest_xsave->region, vcpu);
 	} else {
@@ -3140,7 +3143,7 @@
 	u64 xstate_bv =
 		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
 
-	if (cpu_has_xsave) {
+	if (boot_cpu_has(X86_FEATURE_XSAVE)) {
 		/*
 		 * Here we allow setting states that are not present in
 		 * CPUID leaf 0xD, index 0, EDX:EAX.  This is for compatibility
@@ -3161,7 +3164,7 @@
 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
 					struct kvm_xcrs *guest_xcrs)
 {
-	if (!cpu_has_xsave) {
+	if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
 		guest_xcrs->nr_xcrs = 0;
 		return;
 	}
@@ -3177,7 +3180,7 @@
 {
 	int i, r = 0;
 
-	if (!cpu_has_xsave)
+	if (!boot_cpu_has(X86_FEATURE_XSAVE))
 		return -EINVAL;
 
 	if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
@@ -5866,7 +5869,7 @@
 
 	perf_register_guest_info_callbacks(&kvm_guest_cbs);
 
-	if (cpu_has_xsave)
+	if (boot_cpu_has(X86_FEATURE_XSAVE))
 		host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
 
 	kvm_lapic_init();
@@ -6095,12 +6098,10 @@
 	}
 
 	/* try to inject new event if pending */
-	if (vcpu->arch.nmi_pending) {
-		if (kvm_x86_ops->nmi_allowed(vcpu)) {
-			--vcpu->arch.nmi_pending;
-			vcpu->arch.nmi_injected = true;
-			kvm_x86_ops->set_nmi(vcpu);
-		}
+	if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
+		--vcpu->arch.nmi_pending;
+		vcpu->arch.nmi_injected = true;
+		kvm_x86_ops->set_nmi(vcpu);
 	} else if (kvm_cpu_has_injectable_intr(vcpu)) {
 		/*
 		 * Because interrupts can be injected asynchronously, we are
@@ -6569,10 +6570,12 @@
 		if (inject_pending_event(vcpu, req_int_win) != 0)
 			req_immediate_exit = true;
 		/* enable NMI/IRQ window open exits if needed */
-		else if (vcpu->arch.nmi_pending)
-			kvm_x86_ops->enable_nmi_window(vcpu);
-		else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
-			kvm_x86_ops->enable_irq_window(vcpu);
+		else {
+			if (vcpu->arch.nmi_pending)
+				kvm_x86_ops->enable_nmi_window(vcpu);
+			if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
+				kvm_x86_ops->enable_irq_window(vcpu);
+		}
 
 		if (kvm_lapic_enabled(vcpu)) {
 			update_cr8_intercept(vcpu);
@@ -6590,8 +6593,6 @@
 	kvm_x86_ops->prepare_guest_switch(vcpu);
 	if (vcpu->fpu_active)
 		kvm_load_guest_fpu(vcpu);
-	kvm_load_guest_xcr0(vcpu);
-
 	vcpu->mode = IN_GUEST_MODE;
 
 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@@ -6618,6 +6619,8 @@
 		goto cancel_injection;
 	}
 
+	kvm_load_guest_xcr0(vcpu);
+
 	if (req_immediate_exit)
 		smp_send_reschedule(vcpu->cpu);
 
@@ -6667,6 +6670,8 @@
 	vcpu->mode = OUTSIDE_GUEST_MODE;
 	smp_wmb();
 
+	kvm_put_guest_xcr0(vcpu);
+
 	/* Interrupt is enabled by handle_external_intr() */
 	kvm_x86_ops->handle_external_intr(vcpu);
 
@@ -7292,7 +7297,7 @@
 static void fx_init(struct kvm_vcpu *vcpu)
 {
 	fpstate_init(&vcpu->arch.guest_fpu.state);
-	if (cpu_has_xsaves)
+	if (boot_cpu_has(X86_FEATURE_XSAVES))
 		vcpu->arch.guest_fpu.state.xsave.header.xcomp_bv =
 			host_xcr0 | XSTATE_COMPACTION_ENABLED;
 
@@ -7314,7 +7319,6 @@
 	 * and assume host would use all available bits.
 	 * Guest xcr0 would be loaded later.
 	 */
-	kvm_put_guest_xcr0(vcpu);
 	vcpu->guest_fpu_loaded = 1;
 	__kernel_fpu_begin();
 	__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
@@ -7323,8 +7327,6 @@
 
 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
 {
-	kvm_put_guest_xcr0(vcpu);
-
 	if (!vcpu->guest_fpu_loaded) {
 		vcpu->fpu_counter = 0;
 		return;
@@ -7754,6 +7756,9 @@
 	kvm_page_track_init(kvm);
 	kvm_mmu_init_vm(kvm);
 
+	if (kvm_x86_ops->vm_init)
+		return kvm_x86_ops->vm_init(kvm);
+
 	return 0;
 }
 
@@ -7875,6 +7880,8 @@
 		x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0);
 		x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
 	}
+	if (kvm_x86_ops->vm_destroy)
+		kvm_x86_ops->vm_destroy(kvm);
 	kvm_iommu_unmap_guest(kvm);
 	kfree(kvm->arch.vpic);
 	kfree(kvm->arch.vioapic);
@@ -8357,19 +8364,21 @@
 }
 EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma);
 
+bool kvm_arch_has_irq_bypass(void)
+{
+	return kvm_x86_ops->update_pi_irte != NULL;
+}
+
 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
 				      struct irq_bypass_producer *prod)
 {
 	struct kvm_kernel_irqfd *irqfd =
 		container_of(cons, struct kvm_kernel_irqfd, consumer);
 
-	if (kvm_x86_ops->update_pi_irte) {
-		irqfd->producer = prod;
-		return kvm_x86_ops->update_pi_irte(irqfd->kvm,
-				prod->irq, irqfd->gsi, 1);
-	}
+	irqfd->producer = prod;
 
-	return -EINVAL;
+	return kvm_x86_ops->update_pi_irte(irqfd->kvm,
+					   prod->irq, irqfd->gsi, 1);
 }
 
 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
@@ -8379,11 +8388,6 @@
 	struct kvm_kernel_irqfd *irqfd =
 		container_of(cons, struct kvm_kernel_irqfd, consumer);
 
-	if (!kvm_x86_ops->update_pi_irte) {
-		WARN_ON(irqfd->producer != NULL);
-		return;
-	}
-
 	WARN_ON(irqfd->producer != prod);
 	irqfd->producer = NULL;
 
@@ -8431,3 +8435,5 @@
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi);
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index fd57d3a..3847e73 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1408,13 +1408,10 @@
 {
 	/* We're under lguest. */
 	pv_info.name = "lguest";
-	/* Paravirt is enabled. */
-	pv_info.paravirt_enabled = 1;
 	/* We're running at privilege level 1, not 0 as normal. */
 	pv_info.kernel_rpl = 1;
 	/* Everyone except Xen runs with this set. */
 	pv_info.shared_kernel_pmd = 1;
-	pv_info.features = 0;
 
 	/*
 	 * We set up all the lguest overrides for sensitive operations.  These
diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
index be110ef..bf2c607 100644
--- a/arch/x86/lib/rwsem.S
+++ b/arch/x86/lib/rwsem.S
@@ -29,8 +29,10 @@
  * there is contention on the semaphore.
  *
  * %eax contains the semaphore pointer on entry. Save the C-clobbered
- * registers (%eax, %edx and %ecx) except %eax whish is either a return
- * value or just clobbered..
+ * registers (%eax, %edx and %ecx) except %eax which is either a return
+ * value or just gets clobbered. Same is true for %edx so make sure GCC
+ * reloads it after the slow path, by making it hold a temporary, for
+ * example see ____down_write().
  */
 
 #define save_common_regs \
@@ -106,6 +108,16 @@
 	ret
 ENDPROC(call_rwsem_down_write_failed)
 
+ENTRY(call_rwsem_down_write_failed_killable)
+	FRAME_BEGIN
+	save_common_regs
+	movq %rax,%rdi
+	call rwsem_down_write_failed_killable
+	restore_common_regs
+	FRAME_END
+	ret
+ENDPROC(call_rwsem_down_write_failed_killable)
+
 ENTRY(call_rwsem_wake)
 	FRAME_BEGIN
 	/* do nothing if still outstanding active readers */
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 91d93b9..b559d92 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -612,7 +612,7 @@
 {
 	stac();
 #ifdef CONFIG_X86_INTEL_USERCOPY
-	if (n > 64 && cpu_has_xmm2)
+	if (n > 64 && static_cpu_has(X86_FEATURE_XMM2))
 		n = __copy_user_zeroing_intel_nocache(to, from, n);
 	else
 		__copy_user_zeroing(to, from, n);
@@ -629,7 +629,7 @@
 {
 	stac();
 #ifdef CONFIG_X86_INTEL_USERCOPY
-	if (n > 64 && cpu_has_xmm2)
+	if (n > 64 && static_cpu_has(X86_FEATURE_XMM2))
 		n = __copy_user_intel_nocache(to, from, n);
 	else
 		__copy_user(to, from, n);
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index f989132..62c0043 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -2,7 +2,7 @@
 KCOV_INSTRUMENT_tlb.o	:= n
 
 obj-y	:=  init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
-	    pat.o pgtable.o physaddr.o gup.o setup_nx.o
+	    pat.o pgtable.o physaddr.o gup.o setup_nx.o tlb.o
 
 # Make sure __phys_addr has no stackprotector
 nostackp := $(call cc-option, -fno-stack-protector)
@@ -12,7 +12,6 @@
 CFLAGS_fault.o := -I$(src)/../include/asm/trace
 
 obj-$(CONFIG_X86_PAT)		+= pat_rbtree.o
-obj-$(CONFIG_SMP)		+= tlb.o
 
 obj-$(CONFIG_X86_32)		+= pgtable_32.o iomap_32.o
 
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 82447b3..4bb53b8 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -1,5 +1,6 @@
 #include <linux/module.h>
 #include <asm/uaccess.h>
+#include <asm/traps.h>
 
 typedef bool (*ex_handler_t)(const struct exception_table_entry *,
 			    struct pt_regs *, int);
@@ -42,6 +43,43 @@
 }
 EXPORT_SYMBOL(ex_handler_ext);
 
+bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup,
+			     struct pt_regs *regs, int trapnr)
+{
+	WARN_ONCE(1, "unchecked MSR access error: RDMSR from 0x%x\n",
+		  (unsigned int)regs->cx);
+
+	/* Pretend that the read succeeded and returned 0. */
+	regs->ip = ex_fixup_addr(fixup);
+	regs->ax = 0;
+	regs->dx = 0;
+	return true;
+}
+EXPORT_SYMBOL(ex_handler_rdmsr_unsafe);
+
+bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup,
+			     struct pt_regs *regs, int trapnr)
+{
+	WARN_ONCE(1, "unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x)\n",
+		  (unsigned int)regs->cx,
+		  (unsigned int)regs->dx, (unsigned int)regs->ax);
+
+	/* Pretend that the write succeeded. */
+	regs->ip = ex_fixup_addr(fixup);
+	return true;
+}
+EXPORT_SYMBOL(ex_handler_wrmsr_unsafe);
+
+bool ex_handler_clear_fs(const struct exception_table_entry *fixup,
+			 struct pt_regs *regs, int trapnr)
+{
+	if (static_cpu_has(X86_BUG_NULL_SEG))
+		asm volatile ("mov %0, %%fs" : : "rm" (__USER_DS));
+	asm volatile ("mov %0, %%fs" : : "rm" (0));
+	return ex_handler_default(fixup, regs, trapnr);
+}
+EXPORT_SYMBOL(ex_handler_clear_fs);
+
 bool ex_has_fault_handler(unsigned long ip)
 {
 	const struct exception_table_entry *e;
@@ -82,24 +120,46 @@
 	return handler(e, regs, trapnr);
 }
 
+extern unsigned int early_recursion_flag;
+
 /* Restricted version used during very early boot */
-int __init early_fixup_exception(unsigned long *ip)
+void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
 {
-	const struct exception_table_entry *e;
-	unsigned long new_ip;
-	ex_handler_t handler;
+	/* Ignore early NMIs. */
+	if (trapnr == X86_TRAP_NMI)
+		return;
 
-	e = search_exception_tables(*ip);
-	if (!e)
-		return 0;
+	if (early_recursion_flag > 2)
+		goto halt_loop;
 
-	new_ip  = ex_fixup_addr(e);
-	handler = ex_fixup_handler(e);
+	if (regs->cs != __KERNEL_CS)
+		goto fail;
 
-	/* special handling not supported during early boot */
-	if (handler != ex_handler_default)
-		return 0;
+	/*
+	 * The full exception fixup machinery is available as soon as
+	 * the early IDT is loaded.  This means that it is the
+	 * responsibility of extable users to either function correctly
+	 * when handlers are invoked early or to simply avoid causing
+	 * exceptions before they're ready to handle them.
+	 *
+	 * This is better than filtering which handlers can be used,
+	 * because refusing to call a handler here is guaranteed to
+	 * result in a hard-to-debug panic.
+	 *
+	 * Keep in mind that not all vectors actually get here.  Early
+	 * fage faults, for example, are special.
+	 */
+	if (fixup_exception(regs, trapnr))
+		return;
 
-	*ip = new_ip;
-	return 1;
+fail:
+	early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n",
+		     (unsigned)trapnr, (unsigned long)regs->cs, regs->ip,
+		     regs->orig_ax, read_cr2());
+
+	show_regs(regs);
+
+halt_loop:
+	while (true)
+		halt();
 }
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 740d7ac..14a9505 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -162,7 +162,7 @@
 	unsigned long ps = memparse(opt, &opt);
 	if (ps == PMD_SIZE) {
 		hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
-	} else if (ps == PUD_SIZE && cpu_has_gbpages) {
+	} else if (ps == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES)) {
 		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
 	} else {
 		printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
@@ -177,7 +177,7 @@
 static __init int gigantic_pages_init(void)
 {
 	/* With compaction or CMA we can allocate gigantic pages at runtime */
-	if (cpu_has_gbpages && !size_to_hstate(1UL << PUD_SHIFT))
+	if (boot_cpu_has(X86_FEATURE_GBPAGES) && !size_to_hstate(1UL << PUD_SHIFT))
 		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
 	return 0;
 }
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
new file mode 100644
index 0000000..ec21796
--- /dev/null
+++ b/arch/x86/mm/ident_map.c
@@ -0,0 +1,79 @@
+/*
+ * Helper routines for building identity mapping page tables. This is
+ * included by both the compressed kernel and the regular kernel.
+ */
+
+static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
+			   unsigned long addr, unsigned long end)
+{
+	addr &= PMD_MASK;
+	for (; addr < end; addr += PMD_SIZE) {
+		pmd_t *pmd = pmd_page + pmd_index(addr);
+
+		if (!pmd_present(*pmd))
+			set_pmd(pmd, __pmd(addr | pmd_flag));
+	}
+}
+
+static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
+			  unsigned long addr, unsigned long end)
+{
+	unsigned long next;
+
+	for (; addr < end; addr = next) {
+		pud_t *pud = pud_page + pud_index(addr);
+		pmd_t *pmd;
+
+		next = (addr & PUD_MASK) + PUD_SIZE;
+		if (next > end)
+			next = end;
+
+		if (pud_present(*pud)) {
+			pmd = pmd_offset(pud, 0);
+			ident_pmd_init(info->pmd_flag, pmd, addr, next);
+			continue;
+		}
+		pmd = (pmd_t *)info->alloc_pgt_page(info->context);
+		if (!pmd)
+			return -ENOMEM;
+		ident_pmd_init(info->pmd_flag, pmd, addr, next);
+		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
+	}
+
+	return 0;
+}
+
+int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
+			      unsigned long addr, unsigned long end)
+{
+	unsigned long next;
+	int result;
+	int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
+
+	for (; addr < end; addr = next) {
+		pgd_t *pgd = pgd_page + pgd_index(addr) + off;
+		pud_t *pud;
+
+		next = (addr & PGDIR_MASK) + PGDIR_SIZE;
+		if (next > end)
+			next = end;
+
+		if (pgd_present(*pgd)) {
+			pud = pud_offset(pgd, 0);
+			result = ident_pud_init(info, pud, addr, next);
+			if (result)
+				return result;
+			continue;
+		}
+
+		pud = (pud_t *)info->alloc_pgt_page(info->context);
+		if (!pud)
+			return -ENOMEM;
+		result = ident_pud_init(info, pud, addr, next);
+		if (result)
+			return result;
+		set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
+	}
+
+	return 0;
+}
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 9d56f27..372aad2 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -157,23 +157,23 @@
 	 * This will simplify cpa(), which otherwise needs to support splitting
 	 * large pages into small in interrupt context, etc.
 	 */
-	if (cpu_has_pse && !debug_pagealloc_enabled())
+	if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled())
 		page_size_mask |= 1 << PG_LEVEL_2M;
 #endif
 
 	/* Enable PSE if available */
-	if (cpu_has_pse)
+	if (boot_cpu_has(X86_FEATURE_PSE))
 		cr4_set_bits_and_update_boot(X86_CR4_PSE);
 
 	/* Enable PGE if available */
-	if (cpu_has_pge) {
+	if (boot_cpu_has(X86_FEATURE_PGE)) {
 		cr4_set_bits_and_update_boot(X86_CR4_PGE);
 		__supported_pte_mask |= _PAGE_GLOBAL;
 	} else
 		__supported_pte_mask &= ~_PAGE_GLOBAL;
 
 	/* Enable 1 GB linear kernel mappings if available: */
-	if (direct_gbpages && cpu_has_gbpages) {
+	if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) {
 		printk(KERN_INFO "Using GB pages for direct mapping\n");
 		page_size_mask |= 1 << PG_LEVEL_1G;
 	} else {
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index bd7a9b9..84df150 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -284,7 +284,7 @@
 	 */
 	mapping_iter = 1;
 
-	if (!cpu_has_pse)
+	if (!boot_cpu_has(X86_FEATURE_PSE))
 		use_pse = 0;
 
 repeat:
@@ -804,9 +804,6 @@
 	BUILD_BUG_ON(VMALLOC_START			>= VMALLOC_END);
 #undef high_memory
 #undef __FIXADDR_TOP
-#ifdef CONFIG_RANDOMIZE_BASE
-	BUILD_BUG_ON(CONFIG_RANDOMIZE_BASE_MAX_OFFSET > KERNEL_IMAGE_SIZE);
-#endif
 
 #ifdef CONFIG_HIGHMEM
 	BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 214afda..bce2e5d 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -58,79 +58,7 @@
 
 #include "mm_internal.h"
 
-static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
-			   unsigned long addr, unsigned long end)
-{
-	addr &= PMD_MASK;
-	for (; addr < end; addr += PMD_SIZE) {
-		pmd_t *pmd = pmd_page + pmd_index(addr);
-
-		if (!pmd_present(*pmd))
-			set_pmd(pmd, __pmd(addr | pmd_flag));
-	}
-}
-static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
-			  unsigned long addr, unsigned long end)
-{
-	unsigned long next;
-
-	for (; addr < end; addr = next) {
-		pud_t *pud = pud_page + pud_index(addr);
-		pmd_t *pmd;
-
-		next = (addr & PUD_MASK) + PUD_SIZE;
-		if (next > end)
-			next = end;
-
-		if (pud_present(*pud)) {
-			pmd = pmd_offset(pud, 0);
-			ident_pmd_init(info->pmd_flag, pmd, addr, next);
-			continue;
-		}
-		pmd = (pmd_t *)info->alloc_pgt_page(info->context);
-		if (!pmd)
-			return -ENOMEM;
-		ident_pmd_init(info->pmd_flag, pmd, addr, next);
-		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
-	}
-
-	return 0;
-}
-
-int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
-			      unsigned long addr, unsigned long end)
-{
-	unsigned long next;
-	int result;
-	int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
-
-	for (; addr < end; addr = next) {
-		pgd_t *pgd = pgd_page + pgd_index(addr) + off;
-		pud_t *pud;
-
-		next = (addr & PGDIR_MASK) + PGDIR_SIZE;
-		if (next > end)
-			next = end;
-
-		if (pgd_present(*pgd)) {
-			pud = pud_offset(pgd, 0);
-			result = ident_pud_init(info, pud, addr, next);
-			if (result)
-				return result;
-			continue;
-		}
-
-		pud = (pud_t *)info->alloc_pgt_page(info->context);
-		if (!pud)
-			return -ENOMEM;
-		result = ident_pud_init(info, pud, addr, next);
-		if (result)
-			return result;
-		set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
-	}
-
-	return 0;
-}
+#include "ident_map.c"
 
 /*
  * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
@@ -1295,7 +1223,7 @@
 	struct vmem_altmap *altmap = to_vmem_altmap(start);
 	int err;
 
-	if (cpu_has_pse)
+	if (boot_cpu_has(X86_FEATURE_PSE))
 		err = vmemmap_populate_hugepages(start, end, node, altmap);
 	else if (altmap) {
 		pr_err_once("%s: no cpu support for altmap allocations\n",
@@ -1338,7 +1266,7 @@
 		}
 		get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);
 
-		if (!cpu_has_pse) {
+		if (!boot_cpu_has(X86_FEATURE_PSE)) {
 			next = (addr + PAGE_SIZE) & PAGE_MASK;
 			pmd = pmd_offset(pud, addr);
 			if (pmd_none(*pmd))
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 0d8d53d..f089491 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -378,7 +378,7 @@
 int __init arch_ioremap_pud_supported(void)
 {
 #ifdef CONFIG_X86_64
-	return cpu_has_gbpages;
+	return boot_cpu_has(X86_FEATURE_GBPAGES);
 #else
 	return 0;
 #endif
@@ -386,7 +386,7 @@
 
 int __init arch_ioremap_pmd_supported(void)
 {
-	return cpu_has_pse;
+	return boot_cpu_has(X86_FEATURE_PSE);
 }
 
 /*
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 01be9ec..7a1f7bb 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -1055,7 +1055,7 @@
 	/*
 	 * Map everything starting from the Gb boundary, possibly with 1G pages
 	 */
-	while (cpu_has_gbpages && end - start >= PUD_SIZE) {
+	while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) {
 		set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
 				   massage_pgprot(pud_pgprot)));
 
@@ -1125,8 +1125,14 @@
 static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
 			       int primary)
 {
-	if (cpa->pgd)
+	if (cpa->pgd) {
+		/*
+		 * Right now, we only execute this code path when mapping
+		 * the EFI virtual memory map regions, no other users
+		 * provide a ->pgd value. This may change in the future.
+		 */
 		return populate_pgd(cpa, vaddr);
+	}
 
 	/*
 	 * Ignore all non primary paths.
@@ -1460,7 +1466,7 @@
 	 * error case we fall back to cpa_flush_all (which uses
 	 * WBINVD):
 	 */
-	if (!ret && cpu_has_clflush) {
+	if (!ret && boot_cpu_has(X86_FEATURE_CLFLUSH)) {
 		if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
 			cpa_flush_array(addr, numpages, cache,
 					cpa.flags, pages);
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index faec01e..fb0604f 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -40,11 +40,22 @@
 static bool boot_cpu_done;
 
 static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
+static void init_cache_modes(void);
 
-static inline void pat_disable(const char *reason)
+void pat_disable(const char *reason)
 {
+	if (!__pat_enabled)
+		return;
+
+	if (boot_cpu_done) {
+		WARN_ONCE(1, "x86/PAT: PAT cannot be disabled after initialization\n");
+		return;
+	}
+
 	__pat_enabled = 0;
 	pr_info("x86/PAT: %s\n", reason);
+
+	init_cache_modes();
 }
 
 static int __init nopat(char *str)
@@ -181,7 +192,7 @@
  * configuration.
  * Using lower indices is preferred, so we start with highest index.
  */
-void pat_init_cache_modes(u64 pat)
+static void __init_cache_modes(u64 pat)
 {
 	enum page_cache_mode cache;
 	char pat_msg[33];
@@ -202,14 +213,11 @@
 {
 	u64 tmp_pat;
 
-	if (!cpu_has_pat) {
+	if (!boot_cpu_has(X86_FEATURE_PAT)) {
 		pat_disable("PAT not supported by CPU.");
 		return;
 	}
 
-	if (!pat_enabled())
-		goto done;
-
 	rdmsrl(MSR_IA32_CR_PAT, tmp_pat);
 	if (!tmp_pat) {
 		pat_disable("PAT MSR is 0, disabled.");
@@ -218,16 +226,12 @@
 
 	wrmsrl(MSR_IA32_CR_PAT, pat);
 
-done:
-	pat_init_cache_modes(pat);
+	__init_cache_modes(pat);
 }
 
 static void pat_ap_init(u64 pat)
 {
-	if (!pat_enabled())
-		return;
-
-	if (!cpu_has_pat) {
+	if (!boot_cpu_has(X86_FEATURE_PAT)) {
 		/*
 		 * If this happens we are on a secondary CPU, but switched to
 		 * PAT on the boot CPU. We have no way to undo PAT.
@@ -238,18 +242,32 @@
 	wrmsrl(MSR_IA32_CR_PAT, pat);
 }
 
-void pat_init(void)
+static void init_cache_modes(void)
 {
-	u64 pat;
-	struct cpuinfo_x86 *c = &boot_cpu_data;
+	u64 pat = 0;
+	static int init_cm_done;
 
-	if (!pat_enabled()) {
+	if (init_cm_done)
+		return;
+
+	if (boot_cpu_has(X86_FEATURE_PAT)) {
+		/*
+		 * CPU supports PAT. Set PAT table to be consistent with
+		 * PAT MSR. This case supports "nopat" boot option, and
+		 * virtual machine environments which support PAT without
+		 * MTRRs. In specific, Xen has unique setup to PAT MSR.
+		 *
+		 * If PAT MSR returns 0, it is considered invalid and emulates
+		 * as No PAT.
+		 */
+		rdmsrl(MSR_IA32_CR_PAT, pat);
+	}
+
+	if (!pat) {
 		/*
 		 * No PAT. Emulate the PAT table that corresponds to the two
-		 * cache bits, PWT (Write Through) and PCD (Cache Disable). This
-		 * setup is the same as the BIOS default setup when the system
-		 * has PAT but the "nopat" boot option has been specified. This
-		 * emulated PAT table is used when MSR_IA32_CR_PAT returns 0.
+		 * cache bits, PWT (Write Through) and PCD (Cache Disable).
+		 * This setup is also the same as the BIOS default setup.
 		 *
 		 * PTE encoding:
 		 *
@@ -266,10 +284,36 @@
 		 */
 		pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) |
 		      PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC);
+	}
 
-	} else if ((c->x86_vendor == X86_VENDOR_INTEL) &&
-		   (((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
-		    ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) {
+	__init_cache_modes(pat);
+
+	init_cm_done = 1;
+}
+
+/**
+ * pat_init - Initialize PAT MSR and PAT table
+ *
+ * This function initializes PAT MSR and PAT table with an OS-defined value
+ * to enable additional cache attributes, WC and WT.
+ *
+ * This function must be called on all CPUs using the specific sequence of
+ * operations defined in Intel SDM. mtrr_rendezvous_handler() provides this
+ * procedure for PAT.
+ */
+void pat_init(void)
+{
+	u64 pat;
+	struct cpuinfo_x86 *c = &boot_cpu_data;
+
+	if (!pat_enabled()) {
+		init_cache_modes();
+		return;
+	}
+
+	if ((c->x86_vendor == X86_VENDOR_INTEL) &&
+	    (((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
+	     ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) {
 		/*
 		 * PAT support with the lower four entries. Intel Pentium 2,
 		 * 3, M, and 4 are affected by PAT errata, which makes the
@@ -734,25 +778,6 @@
 	if (file->f_flags & O_DSYNC)
 		pcm = _PAGE_CACHE_MODE_UC_MINUS;
 
-#ifdef CONFIG_X86_32
-	/*
-	 * On the PPro and successors, the MTRRs are used to set
-	 * memory types for physical addresses outside main memory,
-	 * so blindly setting UC or PWT on those pages is wrong.
-	 * For Pentiums and earlier, the surround logic should disable
-	 * caching for the high addresses through the KEN pin, but
-	 * we maintain the tradition of paranoia in this code.
-	 */
-	if (!pat_enabled() &&
-	    !(boot_cpu_has(X86_FEATURE_MTRR) ||
-	      boot_cpu_has(X86_FEATURE_K6_MTRR) ||
-	      boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
-	      boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
-	    (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
-		pcm = _PAGE_CACHE_MODE_UC;
-	}
-#endif
-
 	*vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
 			     cachemode2protval(pcm));
 	return 1;
diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
index 8bea847..f65a33f 100644
--- a/arch/x86/mm/setup_nx.c
+++ b/arch/x86/mm/setup_nx.c
@@ -32,8 +32,9 @@
 
 void x86_configure_nx(void)
 {
-	/* If disable_nx is set, clear NX on all new mappings going forward. */
-	if (disable_nx)
+	if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx)
+		__supported_pte_mask |= _PAGE_NX;
+	else
 		__supported_pte_mask &= ~_PAGE_NX;
 }
 
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 8f4cc3d..5643fd0 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -28,6 +28,8 @@
  *	Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
  */
 
+#ifdef CONFIG_SMP
+
 struct flush_tlb_info {
 	struct mm_struct *flush_mm;
 	unsigned long flush_start;
@@ -57,6 +59,118 @@
 }
 EXPORT_SYMBOL_GPL(leave_mm);
 
+#endif /* CONFIG_SMP */
+
+void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+	       struct task_struct *tsk)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	switch_mm_irqs_off(prev, next, tsk);
+	local_irq_restore(flags);
+}
+
+void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
+			struct task_struct *tsk)
+{
+	unsigned cpu = smp_processor_id();
+
+	if (likely(prev != next)) {
+#ifdef CONFIG_SMP
+		this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+		this_cpu_write(cpu_tlbstate.active_mm, next);
+#endif
+		cpumask_set_cpu(cpu, mm_cpumask(next));
+
+		/*
+		 * Re-load page tables.
+		 *
+		 * This logic has an ordering constraint:
+		 *
+		 *  CPU 0: Write to a PTE for 'next'
+		 *  CPU 0: load bit 1 in mm_cpumask.  if nonzero, send IPI.
+		 *  CPU 1: set bit 1 in next's mm_cpumask
+		 *  CPU 1: load from the PTE that CPU 0 writes (implicit)
+		 *
+		 * We need to prevent an outcome in which CPU 1 observes
+		 * the new PTE value and CPU 0 observes bit 1 clear in
+		 * mm_cpumask.  (If that occurs, then the IPI will never
+		 * be sent, and CPU 0's TLB will contain a stale entry.)
+		 *
+		 * The bad outcome can occur if either CPU's load is
+		 * reordered before that CPU's store, so both CPUs must
+		 * execute full barriers to prevent this from happening.
+		 *
+		 * Thus, switch_mm needs a full barrier between the
+		 * store to mm_cpumask and any operation that could load
+		 * from next->pgd.  TLB fills are special and can happen
+		 * due to instruction fetches or for no reason at all,
+		 * and neither LOCK nor MFENCE orders them.
+		 * Fortunately, load_cr3() is serializing and gives the
+		 * ordering guarantee we need.
+		 *
+		 */
+		load_cr3(next->pgd);
+
+		trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+
+		/* Stop flush ipis for the previous mm */
+		cpumask_clear_cpu(cpu, mm_cpumask(prev));
+
+		/* Load per-mm CR4 state */
+		load_mm_cr4(next);
+
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+		/*
+		 * Load the LDT, if the LDT is different.
+		 *
+		 * It's possible that prev->context.ldt doesn't match
+		 * the LDT register.  This can happen if leave_mm(prev)
+		 * was called and then modify_ldt changed
+		 * prev->context.ldt but suppressed an IPI to this CPU.
+		 * In this case, prev->context.ldt != NULL, because we
+		 * never set context.ldt to NULL while the mm still
+		 * exists.  That means that next->context.ldt !=
+		 * prev->context.ldt, because mms never share an LDT.
+		 */
+		if (unlikely(prev->context.ldt != next->context.ldt))
+			load_mm_ldt(next);
+#endif
+	}
+#ifdef CONFIG_SMP
+	  else {
+		this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+		BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
+
+		if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
+			/*
+			 * On established mms, the mm_cpumask is only changed
+			 * from irq context, from ptep_clear_flush() while in
+			 * lazy tlb mode, and here. Irqs are blocked during
+			 * schedule, protecting us from simultaneous changes.
+			 */
+			cpumask_set_cpu(cpu, mm_cpumask(next));
+
+			/*
+			 * We were in lazy tlb mode and leave_mm disabled
+			 * tlb flush IPI delivery. We must reload CR3
+			 * to make sure to use no freed page tables.
+			 *
+			 * As above, load_cr3() is serializing and orders TLB
+			 * fills with respect to the mm_cpumask write.
+			 */
+			load_cr3(next->pgd);
+			trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+			load_mm_cr4(next);
+			load_mm_ldt(next);
+		}
+	}
+#endif
+}
+
+#ifdef CONFIG_SMP
+
 /*
  * The flush IPI assumes that a thread switch happens in this order:
  * [cpu0: the cpu that switches]
@@ -104,10 +218,8 @@
 
 	inc_irq_stat(irq_tlb_count);
 
-	if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
+	if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
 		return;
-	if (!f->flush_end)
-		f->flush_end = f->flush_start + PAGE_SIZE;
 
 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
 	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
@@ -135,12 +247,20 @@
 				 unsigned long end)
 {
 	struct flush_tlb_info info;
+
+	if (end == 0)
+		end = start + PAGE_SIZE;
 	info.flush_mm = mm;
 	info.flush_start = start;
 	info.flush_end = end;
 
 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
-	trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start);
+	if (end == TLB_FLUSH_ALL)
+		trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
+	else
+		trace_tlb_flush(TLB_REMOTE_SEND_IPI,
+				(end - start) >> PAGE_SHIFT);
+
 	if (is_uv_system()) {
 		unsigned int cpu;
 
@@ -347,3 +467,5 @@
 	return 0;
 }
 late_initcall(create_tlb_single_page_flush_ceiling);
+
+#endif /* CONFIG_SMP */
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 4286f36..fe04a04 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -110,11 +110,16 @@
 	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
 
 /* pick a register outside of BPF range for JIT internal work */
-#define AUX_REG (MAX_BPF_REG + 1)
+#define AUX_REG (MAX_BPF_JIT_REG + 1)
 
-/* the following table maps BPF registers to x64 registers.
- * x64 register r12 is unused, since if used as base address register
- * in load/store instructions, it always needs an extra byte of encoding
+/* The following table maps BPF registers to x64 registers.
+ *
+ * x64 register r12 is unused, since if used as base address
+ * register in load/store instructions, it always needs an
+ * extra byte of encoding and is callee saved.
+ *
+ *  r9 caches skb->len - skb->data_len
+ * r10 caches skb->data, and used for blinding (if enabled)
  */
 static const int reg2hex[] = {
 	[BPF_REG_0] = 0,  /* rax */
@@ -128,6 +133,7 @@
 	[BPF_REG_8] = 6,  /* r14 callee saved */
 	[BPF_REG_9] = 7,  /* r15 callee saved */
 	[BPF_REG_FP] = 5, /* rbp readonly */
+	[BPF_REG_AX] = 2, /* r10 temp register */
 	[AUX_REG] = 3,    /* r11 temp register */
 };
 
@@ -141,7 +147,8 @@
 			     BIT(AUX_REG) |
 			     BIT(BPF_REG_7) |
 			     BIT(BPF_REG_8) |
-			     BIT(BPF_REG_9));
+			     BIT(BPF_REG_9) |
+			     BIT(BPF_REG_AX));
 }
 
 /* add modifiers if 'reg' maps to x64 registers r8..r15 */
@@ -182,6 +189,7 @@
 struct jit_context {
 	int cleanup_addr; /* epilogue code offset */
 	bool seen_ld_abs;
+	bool seen_ax_reg;
 };
 
 /* maximum number of bytes emitted while JITing one eBPF insn */
@@ -345,6 +353,7 @@
 	struct bpf_insn *insn = bpf_prog->insnsi;
 	int insn_cnt = bpf_prog->len;
 	bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
+	bool seen_ax_reg = ctx->seen_ax_reg | (oldproglen == 0);
 	bool seen_exit = false;
 	u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
 	int i, cnt = 0;
@@ -367,6 +376,9 @@
 		int ilen;
 		u8 *func;
 
+		if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX)
+			ctx->seen_ax_reg = seen_ax_reg = true;
+
 		switch (insn->code) {
 			/* ALU */
 		case BPF_ALU | BPF_ADD | BPF_X:
@@ -1002,6 +1014,10 @@
 			 * sk_load_* helpers also use %r10 and %r9d.
 			 * See bpf_jit.S
 			 */
+			if (seen_ax_reg)
+				/* r10 = skb->data, mov %r10, off32(%rbx) */
+				EMIT3_off32(0x4c, 0x8b, 0x93,
+					    offsetof(struct sk_buff, data));
 			EMIT1_off32(0xE8, jmp_offset); /* call */
 			break;
 
@@ -1073,25 +1089,37 @@
 {
 }
 
-void bpf_int_jit_compile(struct bpf_prog *prog)
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 {
 	struct bpf_binary_header *header = NULL;
+	struct bpf_prog *tmp, *orig_prog = prog;
 	int proglen, oldproglen = 0;
 	struct jit_context ctx = {};
+	bool tmp_blinded = false;
 	u8 *image = NULL;
 	int *addrs;
 	int pass;
 	int i;
 
 	if (!bpf_jit_enable)
-		return;
+		return orig_prog;
 
-	if (!prog || !prog->len)
-		return;
+	tmp = bpf_jit_blind_constants(prog);
+	/* If blinding was requested and we failed during blinding,
+	 * we must fall back to the interpreter.
+	 */
+	if (IS_ERR(tmp))
+		return orig_prog;
+	if (tmp != prog) {
+		tmp_blinded = true;
+		prog = tmp;
+	}
 
 	addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
-	if (!addrs)
-		return;
+	if (!addrs) {
+		prog = orig_prog;
+		goto out;
+	}
 
 	/* Before first pass, make a rough estimation of addrs[]
 	 * each bpf instruction is translated to less than 64 bytes
@@ -1113,21 +1141,25 @@
 			image = NULL;
 			if (header)
 				bpf_jit_binary_free(header);
-			goto out;
+			prog = orig_prog;
+			goto out_addrs;
 		}
 		if (image) {
 			if (proglen != oldproglen) {
 				pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
 				       proglen, oldproglen);
-				goto out;
+				prog = orig_prog;
+				goto out_addrs;
 			}
 			break;
 		}
 		if (proglen == oldproglen) {
 			header = bpf_jit_binary_alloc(proglen, &image,
 						      1, jit_fill_hole);
-			if (!header)
-				goto out;
+			if (!header) {
+				prog = orig_prog;
+				goto out_addrs;
+			}
 		}
 		oldproglen = proglen;
 	}
@@ -1141,8 +1173,14 @@
 		prog->bpf_func = (void *)image;
 		prog->jited = 1;
 	}
-out:
+
+out_addrs:
 	kfree(addrs);
+out:
+	if (tmp_blinded)
+		bpf_jit_prog_release_other(prog, prog == orig_prog ?
+					   tmp : orig_prog);
+	return prog;
 }
 
 void bpf_jit_free(struct bpf_prog *fp)
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 0e07e09..28c0412 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -636,7 +636,7 @@
 	__u8 cpu_model = boot_cpu_data.x86_model;
 	struct op_x86_model_spec *spec = &op_ppro_spec;	/* default */
 
-	if (force_cpu_type == arch_perfmon && cpu_has_arch_perfmon)
+	if (force_cpu_type == arch_perfmon && boot_cpu_has(X86_FEATURE_ARCH_PERFMON))
 		return 0;
 
 	/*
@@ -700,7 +700,7 @@
 	char *cpu_type = NULL;
 	int ret = 0;
 
-	if (!cpu_has_apic)
+	if (!boot_cpu_has(X86_FEATURE_APIC))
 		return -ENODEV;
 
 	if (force_cpu_type == timer)
@@ -761,7 +761,7 @@
 		if (cpu_type)
 			break;
 
-		if (!cpu_has_arch_perfmon)
+		if (!boot_cpu_has(X86_FEATURE_ARCH_PERFMON))
 			return -ENODEV;
 
 		/* use arch perfmon as fallback */
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index d90528e..350f709 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -75,7 +75,7 @@
 	u64 val;
 	int i;
 
-	if (cpu_has_arch_perfmon) {
+	if (boot_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
 		union cpuid10_eax eax;
 		eax.full = cpuid_eax(0xa);
 
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 3cd6983..b2a4e2a 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -396,7 +396,6 @@
 		return -ENODEV;
 
 	printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
-	acpi_irq_penalty_init();
 	pcibios_enable_irq = acpi_pci_irq_enable;
 	pcibios_disable_irq = acpi_pci_irq_disable;
 	x86_init.pci.init_irq = x86_init_noop;
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 381a43c..8196054 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -516,7 +516,7 @@
 
 int __init pcibios_init(void)
 {
-	if (!raw_pci_ops) {
+	if (!raw_pci_ops && !raw_pci_ext_ops) {
 		printk(KERN_WARNING "PCI: System does not support PCI\n");
 		return 0;
 	}
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index b7de192..837ea36 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -552,9 +552,16 @@
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
 
+/*
+ * Broadwell EP Home Agent BARs erroneously return non-zero values when read.
+ *
+ * See http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v4-spec-update.html
+ * entry BDF2.
+ */
 static void pci_bdwep_bar(struct pci_dev *dev)
 {
 	dev->non_compliant_bars = 1;
 }
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_bdwep_bar);
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_bdwep_bar);
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_bdwep_bar);
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index beac4df..4bd08b0 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -445,7 +445,7 @@
 		uint32_t eax = cpuid_eax(xen_cpuid_base() + 4);
 
 		if (((eax & XEN_HVM_CPUID_X2APIC_VIRT) && x2apic_mode) ||
-		    ((eax & XEN_HVM_CPUID_APIC_ACCESS_VIRT) && cpu_has_apic))
+		    ((eax & XEN_HVM_CPUID_APIC_ACCESS_VIRT) && boot_cpu_has(X86_FEATURE_APIC)))
 			return;
 	}
 
diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c
index a243381..6a2f569 100644
--- a/arch/x86/platform/efi/efi-bgrt.c
+++ b/arch/x86/platform/efi/efi-bgrt.c
@@ -43,40 +43,40 @@
 		return;
 
 	if (bgrt_tab->header.length < sizeof(*bgrt_tab)) {
-		pr_err("Ignoring BGRT: invalid length %u (expected %zu)\n",
+		pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n",
 		       bgrt_tab->header.length, sizeof(*bgrt_tab));
 		return;
 	}
 	if (bgrt_tab->version != 1) {
-		pr_err("Ignoring BGRT: invalid version %u (expected 1)\n",
+		pr_notice("Ignoring BGRT: invalid version %u (expected 1)\n",
 		       bgrt_tab->version);
 		return;
 	}
 	if (bgrt_tab->status & 0xfe) {
-		pr_err("Ignoring BGRT: reserved status bits are non-zero %u\n",
+		pr_notice("Ignoring BGRT: reserved status bits are non-zero %u\n",
 		       bgrt_tab->status);
 		return;
 	}
 	if (bgrt_tab->image_type != 0) {
-		pr_err("Ignoring BGRT: invalid image type %u (expected 0)\n",
+		pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n",
 		       bgrt_tab->image_type);
 		return;
 	}
 	if (!bgrt_tab->image_address) {
-		pr_err("Ignoring BGRT: null image address\n");
+		pr_notice("Ignoring BGRT: null image address\n");
 		return;
 	}
 
 	image = memremap(bgrt_tab->image_address, sizeof(bmp_header), MEMREMAP_WB);
 	if (!image) {
-		pr_err("Ignoring BGRT: failed to map image header memory\n");
+		pr_notice("Ignoring BGRT: failed to map image header memory\n");
 		return;
 	}
 
 	memcpy(&bmp_header, image, sizeof(bmp_header));
 	memunmap(image);
 	if (bmp_header.id != 0x4d42) {
-		pr_err("Ignoring BGRT: Incorrect BMP magic number 0x%x (expected 0x4d42)\n",
+		pr_notice("Ignoring BGRT: Incorrect BMP magic number 0x%x (expected 0x4d42)\n",
 			bmp_header.id);
 		return;
 	}
@@ -84,14 +84,14 @@
 
 	bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL | __GFP_NOWARN);
 	if (!bgrt_image) {
-		pr_err("Ignoring BGRT: failed to allocate memory for image (wanted %zu bytes)\n",
+		pr_notice("Ignoring BGRT: failed to allocate memory for image (wanted %zu bytes)\n",
 		       bgrt_image_size);
 		return;
 	}
 
 	image = memremap(bgrt_tab->image_address, bmp_header.size, MEMREMAP_WB);
 	if (!image) {
-		pr_err("Ignoring BGRT: failed to map image memory\n");
+		pr_notice("Ignoring BGRT: failed to map image memory\n");
 		kfree(bgrt_image);
 		bgrt_image = NULL;
 		return;
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 994a7df8..f93545e 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -54,10 +54,6 @@
 #include <asm/rtc.h>
 #include <asm/uv/uv.h>
 
-#define EFI_DEBUG
-
-struct efi_memory_map memmap;
-
 static struct efi efi_phys __initdata;
 static efi_system_table_t efi_systab __initdata;
 
@@ -119,11 +115,10 @@
 
 void __init efi_find_mirror(void)
 {
-	void *p;
+	efi_memory_desc_t *md;
 	u64 mirror_size = 0, total_size = 0;
 
-	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-		efi_memory_desc_t *md = p;
+	for_each_efi_memory_desc(md) {
 		unsigned long long start = md->phys_addr;
 		unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
 
@@ -146,10 +141,9 @@
 
 static void __init do_add_efi_memmap(void)
 {
-	void *p;
+	efi_memory_desc_t *md;
 
-	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-		efi_memory_desc_t *md = p;
+	for_each_efi_memory_desc(md) {
 		unsigned long long start = md->phys_addr;
 		unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
 		int e820_type;
@@ -209,47 +203,47 @@
 #else
 	pmap = (e->efi_memmap |	((__u64)e->efi_memmap_hi << 32));
 #endif
-	memmap.phys_map		= pmap;
-	memmap.nr_map		= e->efi_memmap_size /
+	efi.memmap.phys_map	= pmap;
+	efi.memmap.nr_map	= e->efi_memmap_size /
 				  e->efi_memdesc_size;
-	memmap.desc_size	= e->efi_memdesc_size;
-	memmap.desc_version	= e->efi_memdesc_version;
+	efi.memmap.desc_size	= e->efi_memdesc_size;
+	efi.memmap.desc_version	= e->efi_memdesc_version;
 
-	memblock_reserve(pmap, memmap.nr_map * memmap.desc_size);
+	WARN(efi.memmap.desc_version != 1,
+	     "Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
+	     efi.memmap.desc_version);
 
-	efi.memmap = &memmap;
+	memblock_reserve(pmap, efi.memmap.nr_map * efi.memmap.desc_size);
 
 	return 0;
 }
 
 void __init efi_print_memmap(void)
 {
-#ifdef EFI_DEBUG
 	efi_memory_desc_t *md;
-	void *p;
-	int i;
+	int i = 0;
 
-	for (p = memmap.map, i = 0;
-	     p < memmap.map_end;
-	     p += memmap.desc_size, i++) {
+	for_each_efi_memory_desc(md) {
 		char buf[64];
 
-		md = p;
 		pr_info("mem%02u: %s range=[0x%016llx-0x%016llx] (%lluMB)\n",
-			i, efi_md_typeattr_format(buf, sizeof(buf), md),
+			i++, efi_md_typeattr_format(buf, sizeof(buf), md),
 			md->phys_addr,
 			md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1,
 			(md->num_pages >> (20 - EFI_PAGE_SHIFT)));
 	}
-#endif  /*  EFI_DEBUG  */
 }
 
 void __init efi_unmap_memmap(void)
 {
+	unsigned long size;
+
 	clear_bit(EFI_MEMMAP, &efi.flags);
-	if (memmap.map) {
-		early_memunmap(memmap.map, memmap.nr_map * memmap.desc_size);
-		memmap.map = NULL;
+
+	size = efi.memmap.nr_map * efi.memmap.desc_size;
+	if (efi.memmap.map) {
+		early_memunmap(efi.memmap.map, size);
+		efi.memmap.map = NULL;
 	}
 }
 
@@ -352,8 +346,6 @@
 		       efi.systab->hdr.revision >> 16,
 		       efi.systab->hdr.revision & 0xffff);
 
-	set_bit(EFI_SYSTEM_TABLES, &efi.flags);
-
 	return 0;
 }
 
@@ -440,17 +432,22 @@
 
 static int __init efi_memmap_init(void)
 {
+	unsigned long addr, size;
+
 	if (efi_enabled(EFI_PARAVIRT))
 		return 0;
 
 	/* Map the EFI memory map */
-	memmap.map = early_memremap((unsigned long)memmap.phys_map,
-				   memmap.nr_map * memmap.desc_size);
-	if (memmap.map == NULL) {
+	size = efi.memmap.nr_map * efi.memmap.desc_size;
+	addr = (unsigned long)efi.memmap.phys_map;
+
+	efi.memmap.map = early_memremap(addr, size);
+	if (efi.memmap.map == NULL) {
 		pr_err("Could not map the memory map!\n");
 		return -ENOMEM;
 	}
-	memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size);
+
+	efi.memmap.map_end = efi.memmap.map + size;
 
 	if (add_efi_memmap)
 		do_add_efi_memmap();
@@ -552,12 +549,9 @@
 void __init runtime_code_page_mkexec(void)
 {
 	efi_memory_desc_t *md;
-	void *p;
 
 	/* Make EFI runtime service code area executable */
-	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-		md = p;
-
+	for_each_efi_memory_desc(md) {
 		if (md->type != EFI_RUNTIME_SERVICES_CODE)
 			continue;
 
@@ -604,12 +598,10 @@
 /* Merge contiguous regions of the same type and attribute */
 static void __init efi_merge_regions(void)
 {
-	void *p;
 	efi_memory_desc_t *md, *prev_md = NULL;
 
-	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+	for_each_efi_memory_desc(md) {
 		u64 prev_size;
-		md = p;
 
 		if (!prev_md) {
 			prev_md = md;
@@ -651,30 +643,31 @@
 static void __init save_runtime_map(void)
 {
 #ifdef CONFIG_KEXEC_CORE
+	unsigned long desc_size;
 	efi_memory_desc_t *md;
-	void *tmp, *p, *q = NULL;
+	void *tmp, *q = NULL;
 	int count = 0;
 
 	if (efi_enabled(EFI_OLD_MEMMAP))
 		return;
 
-	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-		md = p;
+	desc_size = efi.memmap.desc_size;
 
+	for_each_efi_memory_desc(md) {
 		if (!(md->attribute & EFI_MEMORY_RUNTIME) ||
 		    (md->type == EFI_BOOT_SERVICES_CODE) ||
 		    (md->type == EFI_BOOT_SERVICES_DATA))
 			continue;
-		tmp = krealloc(q, (count + 1) * memmap.desc_size, GFP_KERNEL);
+		tmp = krealloc(q, (count + 1) * desc_size, GFP_KERNEL);
 		if (!tmp)
 			goto out;
 		q = tmp;
 
-		memcpy(q + count * memmap.desc_size, md, memmap.desc_size);
+		memcpy(q + count * desc_size, md, desc_size);
 		count++;
 	}
 
-	efi_runtime_map_setup(q, count, memmap.desc_size);
+	efi_runtime_map_setup(q, count, desc_size);
 	return;
 
 out:
@@ -714,10 +707,10 @@
 {
 	/* Initial call */
 	if (!entry)
-		return memmap.map_end - memmap.desc_size;
+		return efi.memmap.map_end - efi.memmap.desc_size;
 
-	entry -= memmap.desc_size;
-	if (entry < memmap.map)
+	entry -= efi.memmap.desc_size;
+	if (entry < efi.memmap.map)
 		return NULL;
 
 	return entry;
@@ -759,10 +752,10 @@
 
 	/* Initial call */
 	if (!entry)
-		return memmap.map;
+		return efi.memmap.map;
 
-	entry += memmap.desc_size;
-	if (entry >= memmap.map_end)
+	entry += efi.memmap.desc_size;
+	if (entry >= efi.memmap.map_end)
 		return NULL;
 
 	return entry;
@@ -776,8 +769,11 @@
 {
 	void *p, *new_memmap = NULL;
 	unsigned long left = 0;
+	unsigned long desc_size;
 	efi_memory_desc_t *md;
 
+	desc_size = efi.memmap.desc_size;
+
 	p = NULL;
 	while ((p = efi_map_next_entry(p))) {
 		md = p;
@@ -792,7 +788,7 @@
 		efi_map_region(md);
 		get_systab_virt_addr(md);
 
-		if (left < memmap.desc_size) {
+		if (left < desc_size) {
 			new_memmap = realloc_pages(new_memmap, *pg_shift);
 			if (!new_memmap)
 				return NULL;
@@ -801,10 +797,9 @@
 			(*pg_shift)++;
 		}
 
-		memcpy(new_memmap + (*count * memmap.desc_size), md,
-		       memmap.desc_size);
+		memcpy(new_memmap + (*count * desc_size), md, desc_size);
 
-		left -= memmap.desc_size;
+		left -= desc_size;
 		(*count)++;
 	}
 
@@ -816,7 +811,6 @@
 #ifdef CONFIG_KEXEC_CORE
 	efi_memory_desc_t *md;
 	unsigned int num_pages;
-	void *p;
 
 	efi.systab = NULL;
 
@@ -840,8 +834,7 @@
 	* Map efi regions which were passed via setup_data. The virt_addr is a
 	* fixed addr which was used in first kernel of a kexec boot.
 	*/
-	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-		md = p;
+	for_each_efi_memory_desc(md) {
 		efi_map_region_fixed(md); /* FIXME: add error handling */
 		get_systab_virt_addr(md);
 	}
@@ -850,10 +843,10 @@
 
 	BUG_ON(!efi.systab);
 
-	num_pages = ALIGN(memmap.nr_map * memmap.desc_size, PAGE_SIZE);
+	num_pages = ALIGN(efi.memmap.nr_map * efi.memmap.desc_size, PAGE_SIZE);
 	num_pages >>= PAGE_SHIFT;
 
-	if (efi_setup_page_tables(memmap.phys_map, num_pages)) {
+	if (efi_setup_page_tables(efi.memmap.phys_map, num_pages)) {
 		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
 		return;
 	}
@@ -937,16 +930,16 @@
 
 	if (efi_is_native()) {
 		status = phys_efi_set_virtual_address_map(
-				memmap.desc_size * count,
-				memmap.desc_size,
-				memmap.desc_version,
+				efi.memmap.desc_size * count,
+				efi.memmap.desc_size,
+				efi.memmap.desc_version,
 				(efi_memory_desc_t *)__pa(new_memmap));
 	} else {
 		status = efi_thunk_set_virtual_address_map(
 				efi_phys.set_virtual_address_map,
-				memmap.desc_size * count,
-				memmap.desc_size,
-				memmap.desc_version,
+				efi.memmap.desc_size * count,
+				efi.memmap.desc_size,
+				efi.memmap.desc_version,
 				(efi_memory_desc_t *)__pa(new_memmap));
 	}
 
@@ -1011,13 +1004,11 @@
 u32 efi_mem_type(unsigned long phys_addr)
 {
 	efi_memory_desc_t *md;
-	void *p;
 
 	if (!efi_enabled(EFI_MEMMAP))
 		return 0;
 
-	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-		md = p;
+	for_each_efi_memory_desc(md) {
 		if ((md->phys_addr <= phys_addr) &&
 		    (phys_addr < (md->phys_addr +
 				  (md->num_pages << EFI_PAGE_SHIFT))))
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 49e4dd4..6e7242b 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -55,14 +55,12 @@
 static void __init early_code_mapping_set_exec(int executable)
 {
 	efi_memory_desc_t *md;
-	void *p;
 
 	if (!(__supported_pte_mask & _PAGE_NX))
 		return;
 
 	/* Make EFI service code area executable */
-	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-		md = p;
+	for_each_efi_memory_desc(md) {
 		if (md->type == EFI_RUNTIME_SERVICES_CODE ||
 		    md->type == EFI_BOOT_SERVICES_CODE)
 			efi_set_executable(md, executable);
@@ -253,7 +251,7 @@
 	 * Map all of RAM so that we can access arguments in the 1:1
 	 * mapping when making EFI runtime calls.
 	 */
-	for_each_efi_memory_desc(&memmap, md) {
+	for_each_efi_memory_desc(md) {
 		if (md->type != EFI_CONVENTIONAL_MEMORY &&
 		    md->type != EFI_LOADER_DATA &&
 		    md->type != EFI_LOADER_CODE)
@@ -398,7 +396,6 @@
 	unsigned long pfn;
 	pgd_t *pgd = efi_pgd;
 	efi_memory_desc_t *md;
-	void *p;
 
 	if (efi_enabled(EFI_OLD_MEMMAP)) {
 		if (__supported_pte_mask & _PAGE_NX)
@@ -409,9 +406,8 @@
 	if (!efi_enabled(EFI_NX_PE_DATA))
 		return;
 
-	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+	for_each_efi_memory_desc(md) {
 		unsigned long pf = 0;
-		md = p;
 
 		if (!(md->attribute & EFI_MEMORY_RUNTIME))
 			continue;
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index ab50ada..4480c06 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -195,10 +195,9 @@
 */
 void __init efi_reserve_boot_services(void)
 {
-	void *p;
+	efi_memory_desc_t *md;
 
-	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-		efi_memory_desc_t *md = p;
+	for_each_efi_memory_desc(md) {
 		u64 start = md->phys_addr;
 		u64 size = md->num_pages << EFI_PAGE_SHIFT;
 		bool already_reserved;
@@ -250,10 +249,9 @@
 
 void __init efi_free_boot_services(void)
 {
-	void *p;
+	efi_memory_desc_t *md;
 
-	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-		efi_memory_desc_t *md = p;
+	for_each_efi_memory_desc(md) {
 		unsigned long long start = md->phys_addr;
 		unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
 
@@ -373,5 +371,5 @@
 
 bool efi_poweroff_required(void)
 {
-	return !!acpi_gbl_reduced_hardware;
+	return acpi_gbl_reduced_hardware || acpi_no_s5;
 }
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
index 1584cbe..815fec6 100644
--- a/arch/x86/platform/uv/bios_uv.c
+++ b/arch/x86/platform/uv/bios_uv.c
@@ -21,19 +21,20 @@
 
 #include <linux/efi.h>
 #include <linux/export.h>
+#include <linux/slab.h>
 #include <asm/efi.h>
 #include <linux/io.h>
 #include <asm/uv/bios.h>
 #include <asm/uv/uv_hub.h>
 
-static struct uv_systab uv_systab;
+struct uv_systab *uv_systab;
 
 s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
 {
-	struct uv_systab *tab = &uv_systab;
+	struct uv_systab *tab = uv_systab;
 	s64 ret;
 
-	if (!tab->function)
+	if (!tab || !tab->function)
 		/*
 		 * BIOS does not support UV systab
 		 */
@@ -183,34 +184,31 @@
 }
 EXPORT_SYMBOL_GPL(uv_bios_set_legacy_vga_target);
 
-
 #ifdef CONFIG_EFI
 void uv_bios_init(void)
 {
-	struct uv_systab *tab;
-
-	if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) ||
-	    (efi.uv_systab == (unsigned long)NULL)) {
-		printk(KERN_CRIT "No EFI UV System Table.\n");
-		uv_systab.function = (unsigned long)NULL;
+	uv_systab = NULL;
+	if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) || !efi.uv_systab) {
+		pr_crit("UV: UVsystab: missing\n");
 		return;
 	}
 
-	tab = (struct uv_systab *)ioremap(efi.uv_systab,
-					sizeof(struct uv_systab));
-	if (strncmp(tab->signature, "UVST", 4) != 0)
-		printk(KERN_ERR "bad signature in UV system table!");
+	uv_systab = ioremap(efi.uv_systab, sizeof(struct uv_systab));
+	if (!uv_systab || strncmp(uv_systab->signature, UV_SYSTAB_SIG, 4)) {
+		pr_err("UV: UVsystab: bad signature!\n");
+		iounmap(uv_systab);
+		return;
+	}
 
-	/*
-	 * Copy table to permanent spot for later use.
-	 */
-	memcpy(&uv_systab, tab, sizeof(struct uv_systab));
-	iounmap(tab);
-
-	printk(KERN_INFO "EFI UV System Table Revision %d\n",
-					uv_systab.revision);
+	if (uv_systab->revision >= UV_SYSTAB_VERSION_UV4) {
+		iounmap(uv_systab);
+		uv_systab = ioremap(efi.uv_systab, uv_systab->size);
+		if (!uv_systab) {
+			pr_err("UV: UVsystab: ioremap(%d) failed!\n",
+				uv_systab->size);
+			return;
+		}
+	}
+	pr_info("UV: UVsystab: Revision:%x\n", uv_systab->revision);
 }
-#else	/* !CONFIG_EFI */
-
-void uv_bios_init(void) { }
 #endif
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 3b6ec42..fdb4d42 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -37,7 +37,7 @@
 };
 
 static int timeout_us;
-static int nobau;
+static bool nobau = true;
 static int nobau_perm;
 static cycles_t congested_cycles;
 
@@ -106,13 +106,28 @@
 	"enable:   number times use of the BAU was re-enabled"
 };
 
-static int __init
-setup_nobau(char *arg)
+static int __init setup_bau(char *arg)
 {
-	nobau = 1;
+	int result;
+
+	if (!arg)
+		return -EINVAL;
+
+	result = strtobool(arg, &nobau);
+	if (result)
+		return result;
+
+	/* we need to flip the logic here, so that bau=y sets nobau to false */
+	nobau = !nobau;
+
+	if (!nobau)
+		pr_info("UV BAU Enabled\n");
+	else
+		pr_info("UV BAU Disabled\n");
+
 	return 0;
 }
-early_param("nobau", setup_nobau);
+early_param("bau", setup_bau);
 
 /* base pnode in this partition */
 static int uv_base_pnode __read_mostly;
@@ -131,10 +146,10 @@
 		pr_info("BAU not initialized; cannot be turned on\n");
 		return;
 	}
-	nobau = 0;
+	nobau = false;
 	for_each_present_cpu(cpu) {
 		bcp = &per_cpu(bau_control, cpu);
-		bcp->nobau = 0;
+		bcp->nobau = false;
 	}
 	pr_info("BAU turned on\n");
 	return;
@@ -146,10 +161,10 @@
 	int cpu;
 	struct bau_control *bcp;
 
-	nobau = 1;
+	nobau = true;
 	for_each_present_cpu(cpu) {
 		bcp = &per_cpu(bau_control, cpu);
-		bcp->nobau = 1;
+		bcp->nobau = true;
 	}
 	pr_info("BAU turned off\n");
 	return;
@@ -1886,7 +1901,7 @@
 		bcp = &per_cpu(bau_control, cpu);
 		bcp->baudisabled		= 0;
 		if (nobau)
-			bcp->nobau		= 1;
+			bcp->nobau		= true;
 		bcp->statp			= &per_cpu(ptcstats, cpu);
 		/* time interval to catch a hardware stay-busy bug */
 		bcp->timeout_interval		= usec_2_cycles(2*timeout_us);
@@ -2025,7 +2040,8 @@
 			return 1;
 		}
 		bcp->uvhub_master = *hmasterp;
-		bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id;
+		bcp->uvhub_cpu = uv_cpu_blade_processor_id(cpu);
+
 		if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
 			printk(KERN_EMERG "%d cpus per uvhub invalid\n",
 				bcp->uvhub_cpu);
diff --git a/arch/x86/platform/uv/uv_sysfs.c b/arch/x86/platform/uv/uv_sysfs.c
index 5d4ba30..e9da9eb 100644
--- a/arch/x86/platform/uv/uv_sysfs.c
+++ b/arch/x86/platform/uv/uv_sysfs.c
@@ -34,7 +34,7 @@
 static ssize_t coherence_id_show(struct kobject *kobj,
 			struct kobj_attribute *attr, char *buf)
 {
-	return snprintf(buf, PAGE_SIZE, "%ld\n", partition_coherence_id());
+	return snprintf(buf, PAGE_SIZE, "%ld\n", uv_partition_coherence_id());
 }
 
 static struct kobj_attribute partition_id_attr =
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
index 2b158a9..b333fc4 100644
--- a/arch/x86/platform/uv/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
@@ -165,7 +165,7 @@
 	for_each_present_cpu(cpu) {
 		int nid = cpu_to_node(cpu);
 		int bid = uv_cpu_to_blade_id(cpu);
-		int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id;
+		int bcpu = uv_cpu_blade_processor_id(cpu);
 		struct uv_rtc_timer_head *head = blade_info[bid];
 
 		if (!head) {
@@ -226,7 +226,7 @@
 	int pnode = uv_cpu_to_pnode(cpu);
 	int bid = uv_cpu_to_blade_id(cpu);
 	struct uv_rtc_timer_head *head = blade_info[bid];
-	int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id;
+	int bcpu = uv_cpu_blade_processor_id(cpu);
 	u64 *t = &head->cpu[bcpu].expires;
 	unsigned long flags;
 	int next_cpu;
@@ -262,7 +262,7 @@
 	int pnode = uv_cpu_to_pnode(cpu);
 	int bid = uv_cpu_to_blade_id(cpu);
 	struct uv_rtc_timer_head *head = blade_info[bid];
-	int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id;
+	int bcpu = uv_cpu_blade_processor_id(cpu);
 	u64 *t = &head->cpu[bcpu].expires;
 	unsigned long flags;
 	int rc = 0;
diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
index 291226b..9f14bd3 100644
--- a/arch/x86/power/hibernate_32.c
+++ b/arch/x86/power/hibernate_32.c
@@ -106,7 +106,7 @@
 			 * normal page tables.
 			 * NOTE: We can mark everything as executable here
 			 */
-			if (cpu_has_pse) {
+			if (boot_cpu_has(X86_FEATURE_PSE)) {
 				set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
 				pfn += PTRS_PER_PTE;
 			} else {
diff --git a/arch/x86/ras/Kconfig b/arch/x86/ras/Kconfig
index df280da..d957d5f 100644
--- a/arch/x86/ras/Kconfig
+++ b/arch/x86/ras/Kconfig
@@ -1,4 +1,4 @@
-config AMD_MCE_INJ
+config MCE_AMD_INJ
 	tristate "Simple MCE injection interface for AMD processors"
 	depends on RAS && EDAC_DECODE_MCE && DEBUG_FS && AMD_NB
 	default n
diff --git a/arch/x86/ras/Makefile b/arch/x86/ras/Makefile
index dd2c98b..5f94546 100644
--- a/arch/x86/ras/Makefile
+++ b/arch/x86/ras/Makefile
@@ -1,2 +1,2 @@
-obj-$(CONFIG_AMD_MCE_INJ)		+= mce_amd_inj.o
+obj-$(CONFIG_MCE_AMD_INJ)		+= mce_amd_inj.o
 
diff --git a/arch/x86/ras/mce_amd_inj.c b/arch/x86/ras/mce_amd_inj.c
index 55d38cf..e69f470 100644
--- a/arch/x86/ras/mce_amd_inj.c
+++ b/arch/x86/ras/mce_amd_inj.c
@@ -20,6 +20,7 @@
 #include <linux/pci.h>
 
 #include <asm/mce.h>
+#include <asm/smp.h>
 #include <asm/amd_nb.h>
 #include <asm/irq_vectors.h>
 
@@ -206,7 +207,7 @@
 	struct cpuinfo_x86 *c = &boot_cpu_data;
 	u32 cores_per_node;
 
-	cores_per_node = c->x86_max_cores / amd_get_nodes_per_socket();
+	cores_per_node = (c->x86_max_cores * smp_num_siblings) / amd_get_nodes_per_socket();
 
 	return cores_per_node * node_id;
 }
@@ -289,14 +290,33 @@
 	wrmsr_on_cpu(cpu, MSR_IA32_MCG_STATUS,
 		     (u32)mcg_status, (u32)(mcg_status >> 32));
 
-	wrmsr_on_cpu(cpu, MSR_IA32_MCx_STATUS(b),
-		     (u32)i_mce.status, (u32)(i_mce.status >> 32));
+	if (boot_cpu_has(X86_FEATURE_SMCA)) {
+		if (inj_type == DFR_INT_INJ) {
+			wrmsr_on_cpu(cpu, MSR_AMD64_SMCA_MCx_DESTAT(b),
+				     (u32)i_mce.status, (u32)(i_mce.status >> 32));
 
-	wrmsr_on_cpu(cpu, MSR_IA32_MCx_ADDR(b),
-		     (u32)i_mce.addr, (u32)(i_mce.addr >> 32));
+			wrmsr_on_cpu(cpu, MSR_AMD64_SMCA_MCx_DEADDR(b),
+				     (u32)i_mce.addr, (u32)(i_mce.addr >> 32));
+		} else {
+			wrmsr_on_cpu(cpu, MSR_AMD64_SMCA_MCx_STATUS(b),
+				     (u32)i_mce.status, (u32)(i_mce.status >> 32));
 
-	wrmsr_on_cpu(cpu, MSR_IA32_MCx_MISC(b),
-		     (u32)i_mce.misc, (u32)(i_mce.misc >> 32));
+			wrmsr_on_cpu(cpu, MSR_AMD64_SMCA_MCx_ADDR(b),
+				     (u32)i_mce.addr, (u32)(i_mce.addr >> 32));
+		}
+
+		wrmsr_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(b),
+			     (u32)i_mce.misc, (u32)(i_mce.misc >> 32));
+	} else {
+		wrmsr_on_cpu(cpu, MSR_IA32_MCx_STATUS(b),
+			     (u32)i_mce.status, (u32)(i_mce.status >> 32));
+
+		wrmsr_on_cpu(cpu, MSR_IA32_MCx_ADDR(b),
+			     (u32)i_mce.addr, (u32)(i_mce.addr >> 32));
+
+		wrmsr_on_cpu(cpu, MSR_IA32_MCx_MISC(b),
+			     (u32)i_mce.misc, (u32)(i_mce.misc >> 32));
+	}
 
 	toggle_hw_mce_inject(cpu, false);
 
diff --git a/arch/x86/tools/calc_run_size.sh b/arch/x86/tools/calc_run_size.sh
deleted file mode 100644
index 1a4c17b..0000000
--- a/arch/x86/tools/calc_run_size.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/sh
-#
-# Calculate the amount of space needed to run the kernel, including room for
-# the .bss and .brk sections.
-#
-# Usage:
-# objdump -h a.out | sh calc_run_size.sh
-
-NUM='\([0-9a-fA-F]*[ \t]*\)'
-OUT=$(sed -n 's/^[ \t0-9]*.b[sr][sk][ \t]*'"$NUM$NUM$NUM$NUM"'.*/\1\4/p')
-if [ -z "$OUT" ] ; then
-	echo "Never found .bss or .brk file offset" >&2
-	exit 1
-fi
-
-OUT=$(echo ${OUT# })
-sizeA=$(printf "%d" 0x${OUT%% *})
-OUT=${OUT#* }
-offsetA=$(printf "%d" 0x${OUT%% *})
-OUT=${OUT#* }
-sizeB=$(printf "%d" 0x${OUT%% *})
-OUT=${OUT#* }
-offsetB=$(printf "%d" 0x${OUT%% *})
-
-run_size=$(( $offsetA + $sizeA + $sizeB ))
-
-# BFD linker shows the same file offset in ELF.
-if [ "$offsetA" -ne "$offsetB" ] ; then
-	# Gold linker shows them as consecutive.
-	endB=$(( $offsetB + $sizeB ))
-	if [ "$endB" != "$run_size" ] ; then
-		printf "sizeA: 0x%x\n" $sizeA >&2
-		printf "offsetA: 0x%x\n" $offsetA >&2
-		printf "sizeB: 0x%x\n" $sizeB >&2
-		printf "offsetB: 0x%x\n" $offsetB >&2
-		echo ".bss and .brk are non-contiguous" >&2
-		exit 1
-	fi
-fi
-
-printf "%d\n" $run_size
-exit 0
diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c
index abf4901..db52a7f 100644
--- a/arch/x86/xen/apic.c
+++ b/arch/x86/xen/apic.c
@@ -66,7 +66,7 @@
 
 	ret = HYPERVISOR_platform_op(&op);
 	if (ret)
-		return 0;
+		op.u.pcpu_info.apic_id = BAD_APICID;
 
 	return op.u.pcpu_info.apic_id << 24;
 }
@@ -142,6 +142,14 @@
 {
 }
 
+static int xen_cpu_present_to_apicid(int cpu)
+{
+	if (cpu_present(cpu))
+		return xen_get_apic_id(xen_apic_read(APIC_ID));
+	else
+		return BAD_APICID;
+}
+
 static struct apic xen_pv_apic = {
 	.name 				= "Xen PV",
 	.probe 				= xen_apic_probe_pv,
@@ -162,7 +170,7 @@
 
 	.ioapic_phys_id_map		= default_ioapic_phys_id_map, /* Used on 32-bit */
 	.setup_apic_routing		= NULL,
-	.cpu_present_to_apicid		= default_cpu_present_to_apicid,
+	.cpu_present_to_apicid		= xen_cpu_present_to_apicid,
 	.apicid_to_cpu_present		= physid_set_mask_of_physid, /* Used on 32-bit */
 	.check_phys_apicid_present	= default_check_phys_apicid_present, /* smp_sanity_check needs it */
 	.phys_pkg_id			= xen_phys_pkg_id, /* detect_ht */
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 880862c..760789a 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -75,7 +75,6 @@
 #include <asm/mach_traps.h>
 #include <asm/mwait.h>
 #include <asm/pci_x86.h>
-#include <asm/pat.h>
 #include <asm/cpu.h>
 
 #ifdef CONFIG_ACPI
@@ -1093,6 +1092,26 @@
 	return ret;
 }
 
+static u64 xen_read_msr(unsigned int msr)
+{
+	/*
+	 * This will silently swallow a #GP from RDMSR.  It may be worth
+	 * changing that.
+	 */
+	int err;
+
+	return xen_read_msr_safe(msr, &err);
+}
+
+static void xen_write_msr(unsigned int msr, unsigned low, unsigned high)
+{
+	/*
+	 * This will silently swallow a #GP from WRMSR.  It may be worth
+	 * changing that.
+	 */
+	xen_write_msr_safe(msr, low, high);
+}
+
 void xen_setup_shared_info(void)
 {
 	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
@@ -1187,13 +1206,11 @@
 }
 
 static const struct pv_info xen_info __initconst = {
-	.paravirt_enabled = 1,
 	.shared_kernel_pmd = 0,
 
 #ifdef CONFIG_X86_64
 	.extra_user_64bit_cs = FLAT_USER_CS64,
 #endif
-	.features = 0,
 	.name = "Xen",
 };
 
@@ -1223,8 +1240,11 @@
 
 	.wbinvd = native_wbinvd,
 
-	.read_msr = xen_read_msr_safe,
-	.write_msr = xen_write_msr_safe,
+	.read_msr = xen_read_msr,
+	.write_msr = xen_write_msr,
+
+	.read_msr_safe = xen_read_msr_safe,
+	.write_msr_safe = xen_write_msr_safe,
 
 	.read_pmc = xen_read_pmc,
 
@@ -1469,10 +1489,10 @@
 	 * For BSP, PSE PGE are set in probe_page_size_mask(), for APs
 	 * set them here. For all, OSFXSR OSXMMEXCPT are set in fpu__init_cpu().
 	*/
-	if (cpu_has_pse)
+	if (boot_cpu_has(X86_FEATURE_PSE))
 		cr4_set_bits_and_update_boot(X86_CR4_PSE);
 
-	if (cpu_has_pge)
+	if (boot_cpu_has(X86_FEATURE_PGE))
 		cr4_set_bits_and_update_boot(X86_CR4_PGE);
 }
 
@@ -1506,12 +1526,16 @@
 }
 #endif    /* CONFIG_XEN_PVH */
 
+static void __init xen_dom0_set_legacy_features(void)
+{
+	x86_platform.legacy.rtc = 1;
+}
+
 /* First C function to be called on Xen boot */
 asmlinkage __visible void __init xen_start_kernel(void)
 {
 	struct physdev_set_iopl set_iopl;
 	unsigned long initrd_start = 0;
-	u64 pat;
 	int rc;
 
 	if (!xen_start_info)
@@ -1527,8 +1551,6 @@
 
 	/* Install Xen paravirt ops */
 	pv_info = xen_info;
-	if (xen_initial_domain())
-		pv_info.features |= PV_SUPPORTED_RTC;
 	pv_init_ops = xen_init_ops;
 	if (!xen_pvh_domain()) {
 		pv_cpu_ops = xen_cpu_ops;
@@ -1618,13 +1640,6 @@
 				   xen_start_info->nr_pages);
 	xen_reserve_special_pages();
 
-	/*
-	 * Modify the cache mode translation tables to match Xen's PAT
-	 * configuration.
-	 */
-	rdmsrl(MSR_IA32_CR_PAT, pat);
-	pat_init_cache_modes(pat);
-
 	/* keep using Xen gdt for now; no urgent need to change it */
 
 #ifdef CONFIG_X86_32
@@ -1670,6 +1685,7 @@
 	boot_params.hdr.ramdisk_image = initrd_start;
 	boot_params.hdr.ramdisk_size = xen_start_info->mod_len;
 	boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line);
+	boot_params.hdr.hardware_subarch = X86_SUBARCH_XEN;
 
 	if (!xen_initial_domain()) {
 		add_preferred_console("xenboot", 0, NULL);
@@ -1687,6 +1703,8 @@
 			.u.firmware_info.type = XEN_FW_KBD_SHIFT_FLAGS,
 		};
 
+		x86_platform.set_legacy_features =
+				xen_dom0_set_legacy_features;
 		xen_init_vga(info, xen_start_info->console.dom0.info_size);
 		xen_start_info->console.domU.mfn = 0;
 		xen_start_info->console.domU.evtchn = 0;
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 3c6d17f..719cf29 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -545,6 +545,8 @@
 	 * data back is to call:
 	 */
 	tick_nohz_idle_enter();
+
+	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
 #else /* !CONFIG_HOTPLUG_CPU */
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 9e2ba5c..f42e78d 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -27,6 +27,12 @@
 
 static void xen_qlock_kick(int cpu)
 {
+	int irq = per_cpu(lock_kicker_irq, cpu);
+
+	/* Don't kick if the target's kicker interrupt is not initialized. */
+	if (irq == -1)
+		return;
+
 	xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
 }
 
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index e832d3e..85257af 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -5,7 +5,6 @@
 	def_bool y
 	select ARCH_WANT_FRAME_POINTERS
 	select ARCH_WANT_IPC_PARSE_VERSION
-	select ARCH_WANT_OPTIONAL_GPIOLIB
 	select BUILDTIME_EXTABLE_SORT
 	select CLONE_BACKWARDS
 	select COMMON_CLK
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index b56855a..28cf4c5 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -22,6 +22,7 @@
 generic-y += percpu.h
 generic-y += preempt.h
 generic-y += resource.h
+generic-y += rwsem.h
 generic-y += sections.h
 generic-y += siginfo.h
 generic-y += statfs.h
diff --git a/arch/xtensa/include/asm/rwsem.h b/arch/xtensa/include/asm/rwsem.h
deleted file mode 100644
index 249619e..0000000
--- a/arch/xtensa/include/asm/rwsem.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * include/asm-xtensa/rwsem.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Largely copied from include/asm-ppc/rwsem.h
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_RWSEM_H
-#define _XTENSA_RWSEM_H
-
-#ifndef _LINUX_RWSEM_H
-#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
-#endif
-
-#define RWSEM_UNLOCKED_VALUE		0x00000000
-#define RWSEM_ACTIVE_BIAS		0x00000001
-#define RWSEM_ACTIVE_MASK		0x0000ffff
-#define RWSEM_WAITING_BIAS		(-0x00010000)
-#define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-/*
- * lock for reading
- */
-static inline void __down_read(struct rw_semaphore *sem)
-{
-	if (atomic_add_return(1,(atomic_t *)(&sem->count)) > 0)
-		smp_wmb();
-	else
-		rwsem_down_read_failed(sem);
-}
-
-static inline int __down_read_trylock(struct rw_semaphore *sem)
-{
-	int tmp;
-
-	while ((tmp = sem->count) >= 0) {
-		if (tmp == cmpxchg(&sem->count, tmp,
-				   tmp + RWSEM_ACTIVE_READ_BIAS)) {
-			smp_wmb();
-			return 1;
-		}
-	}
-	return 0;
-}
-
-/*
- * lock for writing
- */
-static inline void __down_write(struct rw_semaphore *sem)
-{
-	int tmp;
-
-	tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
-				(atomic_t *)(&sem->count));
-	if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
-		smp_wmb();
-	else
-		rwsem_down_write_failed(sem);
-}
-
-static inline int __down_write_trylock(struct rw_semaphore *sem)
-{
-	int tmp;
-
-	tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
-		      RWSEM_ACTIVE_WRITE_BIAS);
-	smp_wmb();
-	return tmp == RWSEM_UNLOCKED_VALUE;
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
-	int tmp;
-
-	smp_wmb();
-	tmp = atomic_sub_return(1,(atomic_t *)(&sem->count));
-	if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
-		rwsem_wake(sem);
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
-	smp_wmb();
-	if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
-			      (atomic_t *)(&sem->count)) < 0)
-		rwsem_wake(sem);
-}
-
-/*
- * implement atomic add functionality
- */
-static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
-{
-	atomic_add(delta, (atomic_t *)(&sem->count));
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
-	int tmp;
-
-	smp_wmb();
-	tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
-	if (tmp < 0)
-		rwsem_downgrade_wake(sem);
-}
-
-/*
- * implement exchange and add functionality
- */
-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
-{
-	smp_mb();
-	return atomic_add_return(delta, (atomic_t *)(&sem->count));
-}
-
-#endif	/* _XTENSA_RWSEM_H */
diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c
index 54f0118..a6b00b3 100644
--- a/arch/xtensa/kernel/perf_event.c
+++ b/arch/xtensa/kernel/perf_event.c
@@ -332,14 +332,14 @@
 void perf_callchain_kernel(struct perf_callchain_entry *entry,
 			   struct pt_regs *regs)
 {
-	xtensa_backtrace_kernel(regs, PERF_MAX_STACK_DEPTH,
+	xtensa_backtrace_kernel(regs, sysctl_perf_event_max_stack,
 				callchain_trace, NULL, entry);
 }
 
 void perf_callchain_user(struct perf_callchain_entry *entry,
 			 struct pt_regs *regs)
 {
-	xtensa_backtrace_user(regs, PERF_MAX_STACK_DEPTH,
+	xtensa_backtrace_user(regs, sysctl_perf_event_max_stack,
 			      callchain_trace, entry);
 }
 
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
index 976a385..66a5d15 100644
--- a/arch/xtensa/platforms/iss/network.c
+++ b/arch/xtensa/platforms/iss/network.c
@@ -428,7 +428,7 @@
 	if (len == skb->len) {
 		lp->stats.tx_packets++;
 		lp->stats.tx_bytes += skb->len;
-		dev->trans_start = jiffies;
+		netif_trans_update(dev);
 		netif_start_queue(dev);
 
 		/* this is normally done in the interrupt when tx finishes */
diff --git a/block/bio.c b/block/bio.c
index f124a0a..0e4aa42 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -311,17 +311,6 @@
 	bio_endio(__bio_chain_endio(bio));
 }
 
-/*
- * Increment chain count for the bio. Make sure the CHAIN flag update
- * is visible before the raised count.
- */
-static inline void bio_inc_remaining(struct bio *bio)
-{
-	bio_set_flag(bio, BIO_CHAIN);
-	smp_mb__before_atomic();
-	atomic_inc(&bio->__bi_remaining);
-}
-
 /**
  * bio_chain - chain bio completions
  * @bio: the target bio
@@ -1339,7 +1328,7 @@
 		 * release the pages we didn't map into the bio, if any
 		 */
 		while (j < page_limit)
-			page_cache_release(pages[j++]);
+			put_page(pages[j++]);
 	}
 
 	kfree(pages);
@@ -1365,7 +1354,7 @@
 	for (j = 0; j < nr_pages; j++) {
 		if (!pages[j])
 			break;
-		page_cache_release(pages[j]);
+		put_page(pages[j]);
 	}
  out:
 	kfree(pages);
@@ -1385,7 +1374,7 @@
 		if (bio_data_dir(bio) == READ)
 			set_page_dirty_lock(bvec->bv_page);
 
-		page_cache_release(bvec->bv_page);
+		put_page(bvec->bv_page);
 	}
 
 	bio_put(bio);
@@ -1615,8 +1604,8 @@
  * the BIO and the offending pages and re-dirty the pages in process context.
  *
  * It is expected that bio_check_pages_dirty() will wholly own the BIO from
- * here on.  It will run one page_cache_release() against each page and will
- * run one bio_put() against the BIO.
+ * here on.  It will run one put_page() against each page and will run one
+ * bio_put() against the BIO.
  */
 
 static void bio_dirty_fn(struct work_struct *work);
@@ -1658,7 +1647,7 @@
 		struct page *page = bvec->bv_page;
 
 		if (PageDirty(page) || PageCompound(page)) {
-			page_cache_release(page);
+			put_page(page);
 			bvec->bv_page = NULL;
 		} else {
 			nr_clean_pages++;
diff --git a/block/blk-core.c b/block/blk-core.c
index 827f8ba..2475b1c7 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -706,7 +706,7 @@
 		goto fail_id;
 
 	q->backing_dev_info.ra_pages =
-			(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
+			(VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
 	q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
 	q->backing_dev_info.name = "block";
 	q->node = node_id;
@@ -1523,6 +1523,7 @@
  * blk_add_request_payload - add a payload to a request
  * @rq: request to update
  * @page: page backing the payload
+ * @offset: offset in page
  * @len: length of the payload.
  *
  * This allows to later add a payload to an already submitted request by
@@ -1533,12 +1534,12 @@
  * discard requests should ever use it.
  */
 void blk_add_request_payload(struct request *rq, struct page *page,
-		unsigned int len)
+		int offset, unsigned int len)
 {
 	struct bio *bio = rq->bio;
 
 	bio->bi_io_vec->bv_page = page;
-	bio->bi_io_vec->bv_offset = 0;
+	bio->bi_io_vec->bv_offset = offset;
 	bio->bi_io_vec->bv_len = len;
 
 	bio->bi_iter.bi_size = len;
@@ -1963,7 +1964,8 @@
 	 * drivers without flush support don't have to worry
 	 * about them.
 	 */
-	if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
+	if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
+	    !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
 		bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
 		if (!nr_sectors) {
 			err = 0;
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 9c423e5..b1c91d2 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -95,17 +95,18 @@
 static bool blk_kick_flush(struct request_queue *q,
 			   struct blk_flush_queue *fq);
 
-static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
+static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
 {
 	unsigned int policy = 0;
 
 	if (blk_rq_sectors(rq))
 		policy |= REQ_FSEQ_DATA;
 
-	if (fflags & REQ_FLUSH) {
+	if (fflags & (1UL << QUEUE_FLAG_WC)) {
 		if (rq->cmd_flags & REQ_FLUSH)
 			policy |= REQ_FSEQ_PREFLUSH;
-		if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
+		if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
+		    (rq->cmd_flags & REQ_FUA))
 			policy |= REQ_FSEQ_POSTFLUSH;
 	}
 	return policy;
@@ -384,7 +385,7 @@
 void blk_insert_flush(struct request *rq)
 {
 	struct request_queue *q = rq->q;
-	unsigned int fflags = q->flush_flags;	/* may change, cache */
+	unsigned long fflags = q->queue_flags;	/* may change, cache */
 	unsigned int policy = blk_flush_policy(fflags, rq);
 	struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
 
@@ -393,7 +394,7 @@
 	 * REQ_FLUSH and FUA for the driver.
 	 */
 	rq->cmd_flags &= ~REQ_FLUSH;
-	if (!(fflags & REQ_FUA))
+	if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
 		rq->cmd_flags &= ~REQ_FUA;
 
 	/*
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 9ebf653..23d7f30 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -9,23 +9,81 @@
 
 #include "blk.h"
 
-struct bio_batch {
-	atomic_t		done;
-	int			error;
-	struct completion	*wait;
-};
-
-static void bio_batch_end_io(struct bio *bio)
+static struct bio *next_bio(struct bio *bio, int rw, unsigned int nr_pages,
+		gfp_t gfp)
 {
-	struct bio_batch *bb = bio->bi_private;
+	struct bio *new = bio_alloc(gfp, nr_pages);
 
-	if (bio->bi_error && bio->bi_error != -EOPNOTSUPP)
-		bb->error = bio->bi_error;
-	if (atomic_dec_and_test(&bb->done))
-		complete(bb->wait);
-	bio_put(bio);
+	if (bio) {
+		bio_chain(bio, new);
+		submit_bio(rw, bio);
+	}
+
+	return new;
 }
 
+int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+		sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop)
+{
+	struct request_queue *q = bdev_get_queue(bdev);
+	struct bio *bio = *biop;
+	unsigned int granularity;
+	int alignment;
+
+	if (!q)
+		return -ENXIO;
+	if (!blk_queue_discard(q))
+		return -EOPNOTSUPP;
+	if ((type & REQ_SECURE) && !blk_queue_secdiscard(q))
+		return -EOPNOTSUPP;
+
+	/* Zero-sector (unknown) and one-sector granularities are the same.  */
+	granularity = max(q->limits.discard_granularity >> 9, 1U);
+	alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
+
+	while (nr_sects) {
+		unsigned int req_sects;
+		sector_t end_sect, tmp;
+
+		/* Make sure bi_size doesn't overflow */
+		req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
+
+		/**
+		 * If splitting a request, and the next starting sector would be
+		 * misaligned, stop the discard at the previous aligned sector.
+		 */
+		end_sect = sector + req_sects;
+		tmp = end_sect;
+		if (req_sects < nr_sects &&
+		    sector_div(tmp, granularity) != alignment) {
+			end_sect = end_sect - alignment;
+			sector_div(end_sect, granularity);
+			end_sect = end_sect * granularity + alignment;
+			req_sects = end_sect - sector;
+		}
+
+		bio = next_bio(bio, type, 1, gfp_mask);
+		bio->bi_iter.bi_sector = sector;
+		bio->bi_bdev = bdev;
+
+		bio->bi_iter.bi_size = req_sects << 9;
+		nr_sects -= req_sects;
+		sector = end_sect;
+
+		/*
+		 * We can loop for a long time in here, if someone does
+		 * full device discards (like mkfs). Be nice and allow
+		 * us to schedule out to avoid softlocking if preempt
+		 * is disabled.
+		 */
+		cond_resched();
+	}
+
+	*biop = bio;
+	return 0;
+}
+EXPORT_SYMBOL(__blkdev_issue_discard);
+
 /**
  * blkdev_issue_discard - queue a discard
  * @bdev:	blockdev to issue discard for
@@ -40,92 +98,24 @@
 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
 {
-	DECLARE_COMPLETION_ONSTACK(wait);
-	struct request_queue *q = bdev_get_queue(bdev);
 	int type = REQ_WRITE | REQ_DISCARD;
-	unsigned int granularity;
-	int alignment;
-	struct bio_batch bb;
-	struct bio *bio;
-	int ret = 0;
+	struct bio *bio = NULL;
 	struct blk_plug plug;
+	int ret;
 
-	if (!q)
-		return -ENXIO;
-
-	if (!blk_queue_discard(q))
-		return -EOPNOTSUPP;
-
-	/* Zero-sector (unknown) and one-sector granularities are the same.  */
-	granularity = max(q->limits.discard_granularity >> 9, 1U);
-	alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
-
-	if (flags & BLKDEV_DISCARD_SECURE) {
-		if (!blk_queue_secdiscard(q))
-			return -EOPNOTSUPP;
+	if (flags & BLKDEV_DISCARD_SECURE)
 		type |= REQ_SECURE;
-	}
-
-	atomic_set(&bb.done, 1);
-	bb.error = 0;
-	bb.wait = &wait;
 
 	blk_start_plug(&plug);
-	while (nr_sects) {
-		unsigned int req_sects;
-		sector_t end_sect, tmp;
-
-		bio = bio_alloc(gfp_mask, 1);
-		if (!bio) {
-			ret = -ENOMEM;
-			break;
-		}
-
-		/* Make sure bi_size doesn't overflow */
-		req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
-
-		/*
-		 * If splitting a request, and the next starting sector would be
-		 * misaligned, stop the discard at the previous aligned sector.
-		 */
-		end_sect = sector + req_sects;
-		tmp = end_sect;
-		if (req_sects < nr_sects &&
-		    sector_div(tmp, granularity) != alignment) {
-			end_sect = end_sect - alignment;
-			sector_div(end_sect, granularity);
-			end_sect = end_sect * granularity + alignment;
-			req_sects = end_sect - sector;
-		}
-
-		bio->bi_iter.bi_sector = sector;
-		bio->bi_end_io = bio_batch_end_io;
-		bio->bi_bdev = bdev;
-		bio->bi_private = &bb;
-
-		bio->bi_iter.bi_size = req_sects << 9;
-		nr_sects -= req_sects;
-		sector = end_sect;
-
-		atomic_inc(&bb.done);
-		submit_bio(type, bio);
-
-		/*
-		 * We can loop for a long time in here, if someone does
-		 * full device discards (like mkfs). Be nice and allow
-		 * us to schedule out to avoid softlocking if preempt
-		 * is disabled.
-		 */
-		cond_resched();
+	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, type,
+			&bio);
+	if (!ret && bio) {
+		ret = submit_bio_wait(type, bio);
+		if (ret == -EOPNOTSUPP)
+			ret = 0;
 	}
 	blk_finish_plug(&plug);
 
-	/* Wait for bios in-flight */
-	if (!atomic_dec_and_test(&bb.done))
-		wait_for_completion_io(&wait);
-
-	if (bb.error)
-		return bb.error;
 	return ret;
 }
 EXPORT_SYMBOL(blkdev_issue_discard);
@@ -145,11 +135,9 @@
 			    sector_t nr_sects, gfp_t gfp_mask,
 			    struct page *page)
 {
-	DECLARE_COMPLETION_ONSTACK(wait);
 	struct request_queue *q = bdev_get_queue(bdev);
 	unsigned int max_write_same_sectors;
-	struct bio_batch bb;
-	struct bio *bio;
+	struct bio *bio = NULL;
 	int ret = 0;
 
 	if (!q)
@@ -158,21 +146,10 @@
 	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
 	max_write_same_sectors = UINT_MAX >> 9;
 
-	atomic_set(&bb.done, 1);
-	bb.error = 0;
-	bb.wait = &wait;
-
 	while (nr_sects) {
-		bio = bio_alloc(gfp_mask, 1);
-		if (!bio) {
-			ret = -ENOMEM;
-			break;
-		}
-
+		bio = next_bio(bio, REQ_WRITE | REQ_WRITE_SAME, 1, gfp_mask);
 		bio->bi_iter.bi_sector = sector;
-		bio->bi_end_io = bio_batch_end_io;
 		bio->bi_bdev = bdev;
-		bio->bi_private = &bb;
 		bio->bi_vcnt = 1;
 		bio->bi_io_vec->bv_page = page;
 		bio->bi_io_vec->bv_offset = 0;
@@ -186,18 +163,11 @@
 			bio->bi_iter.bi_size = nr_sects << 9;
 			nr_sects = 0;
 		}
-
-		atomic_inc(&bb.done);
-		submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
 	}
 
-	/* Wait for bios in-flight */
-	if (!atomic_dec_and_test(&bb.done))
-		wait_for_completion_io(&wait);
-
-	if (bb.error)
-		return bb.error;
-	return ret;
+	if (bio)
+		ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio);
+	return ret != -EOPNOTSUPP ? ret : 0;
 }
 EXPORT_SYMBOL(blkdev_issue_write_same);
 
@@ -216,28 +186,15 @@
 				  sector_t nr_sects, gfp_t gfp_mask)
 {
 	int ret;
-	struct bio *bio;
-	struct bio_batch bb;
+	struct bio *bio = NULL;
 	unsigned int sz;
-	DECLARE_COMPLETION_ONSTACK(wait);
 
-	atomic_set(&bb.done, 1);
-	bb.error = 0;
-	bb.wait = &wait;
-
-	ret = 0;
 	while (nr_sects != 0) {
-		bio = bio_alloc(gfp_mask,
-				min(nr_sects, (sector_t)BIO_MAX_PAGES));
-		if (!bio) {
-			ret = -ENOMEM;
-			break;
-		}
-
+		bio = next_bio(bio, WRITE,
+				min(nr_sects, (sector_t)BIO_MAX_PAGES),
+				gfp_mask);
 		bio->bi_iter.bi_sector = sector;
 		bio->bi_bdev   = bdev;
-		bio->bi_end_io = bio_batch_end_io;
-		bio->bi_private = &bb;
 
 		while (nr_sects != 0) {
 			sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
@@ -247,18 +204,11 @@
 			if (ret < (sz << 9))
 				break;
 		}
-		ret = 0;
-		atomic_inc(&bb.done);
-		submit_bio(WRITE, bio);
 	}
 
-	/* Wait for bios in-flight */
-	if (!atomic_dec_and_test(&bb.done))
-		wait_for_completion_io(&wait);
-
-	if (bb.error)
-		return bb.error;
-	return ret;
+	if (bio)
+		return submit_bio_wait(WRITE, bio);
+	return 0;
 }
 
 /**
diff --git a/block/blk-map.c b/block/blk-map.c
index a54f054..b9f88b7 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -9,24 +9,6 @@
 
 #include "blk.h"
 
-static bool iovec_gap_to_prv(struct request_queue *q,
-			     struct iovec *prv, struct iovec *cur)
-{
-	unsigned long prev_end;
-
-	if (!queue_virt_boundary(q))
-		return false;
-
-	if (prv->iov_base == NULL && prv->iov_len == 0)
-		/* prv is not set - don't check */
-		return false;
-
-	prev_end = (unsigned long)(prv->iov_base + prv->iov_len);
-
-	return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) ||
-		prev_end & queue_virt_boundary(q));
-}
-
 int blk_rq_append_bio(struct request_queue *q, struct request *rq,
 		      struct bio *bio)
 {
@@ -125,31 +107,18 @@
 			struct rq_map_data *map_data,
 			const struct iov_iter *iter, gfp_t gfp_mask)
 {
-	struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0};
-	bool copy = (q->dma_pad_mask & iter->count) || map_data;
+	bool copy = false;
+	unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
 	struct bio *bio = NULL;
 	struct iov_iter i;
 	int ret;
 
-	if (!iter || !iter->count)
-		return -EINVAL;
-
-	iov_for_each(iov, i, *iter) {
-		unsigned long uaddr = (unsigned long) iov.iov_base;
-
-		if (!iov.iov_len)
-			return -EINVAL;
-
-		/*
-		 * Keep going so we check length of all segments
-		 */
-		if ((uaddr & queue_dma_alignment(q)) ||
-		    iovec_gap_to_prv(q, &prv, &iov))
-			copy = true;
-
-		prv.iov_base = iov.iov_base;
-		prv.iov_len = iov.iov_len;
-	}
+	if (map_data)
+		copy = true;
+	else if (iov_iter_alignment(iter) & align)
+		copy = true;
+	else if (queue_virt_boundary(q))
+		copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
 
 	i = *iter;
 	do {
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index abdbb47..56a0c37 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -464,15 +464,26 @@
 	}
 }
 
-void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
-		void *priv)
+static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
+		busy_tag_iter_fn *fn, void *priv)
 {
 	if (tags->nr_reserved_tags)
 		bt_tags_for_each(tags, &tags->breserved_tags, 0, fn, priv, true);
 	bt_tags_for_each(tags, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
 			false);
 }
-EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);
+
+void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
+		busy_tag_iter_fn *fn, void *priv)
+{
+	int i;
+
+	for (i = 0; i < tagset->nr_hw_queues; i++) {
+		if (tagset->tags && tagset->tags[i])
+			blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv);
+	}
+}
+EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
 
 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
 		void *priv)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1699baf..7df9c92 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1122,8 +1122,7 @@
 {
 	init_request_from_bio(rq, bio);
 
-	if (blk_do_io_stat(rq))
-		blk_account_io_start(rq, 1);
+	blk_account_io_start(rq, 1);
 }
 
 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
@@ -1496,7 +1495,7 @@
 		int to_do;
 		void *p;
 
-		while (left < order_to_size(this_order - 1) && this_order)
+		while (this_order && left < order_to_size(this_order - 1))
 			this_order--;
 
 		do {
diff --git a/block/blk-settings.c b/block/blk-settings.c
index c7bb666..f679ae1 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -239,8 +239,8 @@
 	struct queue_limits *limits = &q->limits;
 	unsigned int max_sectors;
 
-	if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
-		max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
+	if ((max_hw_sectors << 9) < PAGE_SIZE) {
+		max_hw_sectors = 1 << (PAGE_SHIFT - 9);
 		printk(KERN_INFO "%s: set to minimum %d\n",
 		       __func__, max_hw_sectors);
 	}
@@ -329,8 +329,8 @@
  **/
 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
 {
-	if (max_size < PAGE_CACHE_SIZE) {
-		max_size = PAGE_CACHE_SIZE;
+	if (max_size < PAGE_SIZE) {
+		max_size = PAGE_SIZE;
 		printk(KERN_INFO "%s: set to minimum %d\n",
 		       __func__, max_size);
 	}
@@ -760,8 +760,8 @@
  **/
 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
 {
-	if (mask < PAGE_CACHE_SIZE - 1) {
-		mask = PAGE_CACHE_SIZE - 1;
+	if (mask < PAGE_SIZE - 1) {
+		mask = PAGE_SIZE - 1;
 		printk(KERN_INFO "%s: set to minimum %lx\n",
 		       __func__, mask);
 	}
@@ -820,32 +820,40 @@
 }
 EXPORT_SYMBOL(blk_queue_update_dma_alignment);
 
-/**
- * blk_queue_flush - configure queue's cache flush capability
- * @q:		the request queue for the device
- * @flush:	0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
- *
- * Tell block layer cache flush capability of @q.  If it supports
- * flushing, REQ_FLUSH should be set.  If it supports bypassing
- * write cache for individual writes, REQ_FUA should be set.
- */
-void blk_queue_flush(struct request_queue *q, unsigned int flush)
-{
-	WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
-
-	if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
-		flush &= ~REQ_FUA;
-
-	q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
-}
-EXPORT_SYMBOL_GPL(blk_queue_flush);
-
 void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
 {
-	q->flush_not_queueable = !queueable;
+	spin_lock_irq(q->queue_lock);
+	if (queueable)
+		clear_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
+	else
+		set_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
+	spin_unlock_irq(q->queue_lock);
 }
 EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
 
+/**
+ * blk_queue_write_cache - configure queue's write cache
+ * @q:		the request queue for the device
+ * @wc:		write back cache on or off
+ * @fua:	device supports FUA writes, if true
+ *
+ * Tell the block layer about the write cache of @q.
+ */
+void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
+{
+	spin_lock_irq(q->queue_lock);
+	if (wc)
+		queue_flag_set(QUEUE_FLAG_WC, q);
+	else
+		queue_flag_clear(QUEUE_FLAG_WC, q);
+	if (fua)
+		queue_flag_set(QUEUE_FLAG_FUA, q);
+	else
+		queue_flag_clear(QUEUE_FLAG_FUA, q);
+	spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL_GPL(blk_queue_write_cache);
+
 static int __init blk_settings_init(void)
 {
 	blk_max_low_pfn = max_low_pfn - 1;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index dd937630..9920596 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -76,7 +76,7 @@
 static ssize_t queue_ra_show(struct request_queue *q, char *page)
 {
 	unsigned long ra_kb = q->backing_dev_info.ra_pages <<
-					(PAGE_CACHE_SHIFT - 10);
+					(PAGE_SHIFT - 10);
 
 	return queue_var_show(ra_kb, (page));
 }
@@ -90,7 +90,7 @@
 	if (ret < 0)
 		return ret;
 
-	q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
+	q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10);
 
 	return ret;
 }
@@ -117,7 +117,7 @@
 	if (blk_queue_cluster(q))
 		return queue_var_show(queue_max_segment_size(q), (page));
 
-	return queue_var_show(PAGE_CACHE_SIZE, (page));
+	return queue_var_show(PAGE_SIZE, (page));
 }
 
 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
@@ -198,7 +198,7 @@
 {
 	unsigned long max_sectors_kb,
 		max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
-			page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
+			page_kb = 1 << (PAGE_SHIFT - 10);
 	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
 
 	if (ret < 0)
@@ -347,6 +347,38 @@
 	return ret;
 }
 
+static ssize_t queue_wc_show(struct request_queue *q, char *page)
+{
+	if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
+		return sprintf(page, "write back\n");
+
+	return sprintf(page, "write through\n");
+}
+
+static ssize_t queue_wc_store(struct request_queue *q, const char *page,
+			      size_t count)
+{
+	int set = -1;
+
+	if (!strncmp(page, "write back", 10))
+		set = 1;
+	else if (!strncmp(page, "write through", 13) ||
+		 !strncmp(page, "none", 4))
+		set = 0;
+
+	if (set == -1)
+		return -EINVAL;
+
+	spin_lock_irq(q->queue_lock);
+	if (set)
+		queue_flag_set(QUEUE_FLAG_WC, q);
+	else
+		queue_flag_clear(QUEUE_FLAG_WC, q);
+	spin_unlock_irq(q->queue_lock);
+
+	return count;
+}
+
 static struct queue_sysfs_entry queue_requests_entry = {
 	.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
 	.show = queue_requests_show,
@@ -478,6 +510,12 @@
 	.store = queue_poll_store,
 };
 
+static struct queue_sysfs_entry queue_wc_entry = {
+	.attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR },
+	.show = queue_wc_show,
+	.store = queue_wc_store,
+};
+
 static struct attribute *default_attrs[] = {
 	&queue_requests_entry.attr,
 	&queue_ra_entry.attr,
@@ -503,6 +541,7 @@
 	&queue_iostats_entry.attr,
 	&queue_random_entry.attr,
 	&queue_poll_entry.attr,
+	&queue_wc_entry.attr,
 	NULL,
 };
 
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 2149a1d..47a3e54 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -211,15 +211,14 @@
  *
  * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
  * throtl_grp; otherwise, just "throtl".
- *
- * TODO: this should be made a function and name formatting should happen
- * after testing whether blktrace is enabled.
  */
 #define throtl_log(sq, fmt, args...)	do {				\
 	struct throtl_grp *__tg = sq_to_tg((sq));			\
 	struct throtl_data *__td = sq_to_td((sq));			\
 									\
 	(void)__td;							\
+	if (likely(!blk_trace_note_message_enabled(__td->queue)))	\
+		break;							\
 	if ((__tg)) {							\
 		char __pbuf[128];					\
 									\
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e3c591d..4a34978 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -4075,7 +4075,7 @@
 		 * idle timer unplug to continue working.
 		 */
 		if (cfq_cfqq_wait_request(cfqq)) {
-			if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
+			if (blk_rq_bytes(rq) > PAGE_SIZE ||
 			    cfqd->busy_queues > 1) {
 				cfq_del_timer(cfqd, cfqq);
 				cfq_clear_cfqq_wait_request(cfqq);
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index f678c73..556826a 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -710,7 +710,7 @@
 			return -EINVAL;
 		bdi = blk_get_backing_dev_info(bdev);
 		return compat_put_long(arg,
-				       (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
+				       (bdi->ra_pages * PAGE_SIZE) / 512);
 	case BLKROGET: /* compatible */
 		return compat_put_int(arg, bdev_read_only(bdev) != 0);
 	case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
@@ -729,7 +729,7 @@
 		if (!capable(CAP_SYS_ADMIN))
 			return -EACCES;
 		bdi = blk_get_backing_dev_info(bdev);
-		bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
+		bdi->ra_pages = (arg * 512) / PAGE_SIZE;
 		return 0;
 	case BLKGETSIZE:
 		size = i_size_read(bdev->bd_inode);
diff --git a/block/ioctl.c b/block/ioctl.c
index d8996bb..4ff1f92 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -550,7 +550,7 @@
 		if (!arg)
 			return -EINVAL;
 		bdi = blk_get_backing_dev_info(bdev);
-		return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
+		return put_long(arg, (bdi->ra_pages * PAGE_SIZE) / 512);
 	case BLKROGET:
 		return put_int(arg, bdev_read_only(bdev) != 0);
 	case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
@@ -578,7 +578,7 @@
 		if(!capable(CAP_SYS_ADMIN))
 			return -EACCES;
 		bdi = blk_get_backing_dev_info(bdev);
-		bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
+		bdi->ra_pages = (arg * 512) / PAGE_SIZE;
 		return 0;
 	case BLKBSZSET:
 		return blkdev_bszset(bdev, mode, argp);
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 5d87019..d7eb77e 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -361,15 +361,20 @@
 			goto out_del;
 	}
 
+	err = hd_ref_init(p);
+	if (err) {
+		if (flags & ADDPART_FLAG_WHOLEDISK)
+			goto out_remove_file;
+		goto out_del;
+	}
+
 	/* everything is up and running, commence */
 	rcu_assign_pointer(ptbl->part[partno], p);
 
 	/* suppress uevent if the disk suppresses it */
 	if (!dev_get_uevent_suppress(ddev))
 		kobject_uevent(&pdev->kobj, KOBJ_ADD);
-
-	if (!hd_ref_init(p))
-		return p;
+	return p;
 
 out_free_info:
 	free_part_info(p);
@@ -378,6 +383,8 @@
 out_free:
 	kfree(p);
 	return ERR_PTR(err);
+out_remove_file:
+	device_remove_file(pdev, &dev_attr_whole_disk);
 out_del:
 	kobject_put(p->holder_dir);
 	device_del(pdev);
@@ -566,8 +573,8 @@
 {
 	struct address_space *mapping = bdev->bd_inode->i_mapping;
 
-	return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
-			NULL);
+	return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_SHIFT-9)),
+				 NULL);
 }
 
 unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
@@ -584,9 +591,9 @@
 		if (PageError(page))
 			goto fail;
 		p->v = page;
-		return (unsigned char *)page_address(page) +  ((n & ((1 << (PAGE_CACHE_SHIFT - 9)) - 1)) << 9);
+		return (unsigned char *)page_address(page) +  ((n & ((1 << (PAGE_SHIFT - 9)) - 1)) << 9);
 fail:
-		page_cache_release(page);
+		put_page(page);
 	}
 	p->v = NULL;
 	return NULL;
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
index 26cb624..bcd86e5 100644
--- a/block/partitions/efi.c
+++ b/block/partitions/efi.c
@@ -430,7 +430,7 @@
 	}
 	/* Check that sizeof_partition_entry has the correct value */
 	if (le32_to_cpu((*gpt)->sizeof_partition_entry) != sizeof(gpt_entry)) {
-		pr_debug("GUID Partitition Entry Size check failed.\n");
+		pr_debug("GUID Partition Entry Size check failed.\n");
 		goto fail;
 	}
 
@@ -443,7 +443,7 @@
 			le32_to_cpu((*gpt)->sizeof_partition_entry));
 
 	if (crc != le32_to_cpu((*gpt)->partition_entry_array_crc32)) {
-		pr_debug("GUID Partitition Entry Array CRC check failed.\n");
+		pr_debug("GUID Partition Entry Array CRC check failed.\n");
 		goto fail_ptes;
 	}
 
diff --git a/certs/Kconfig b/certs/Kconfig
index f0f8a44..fc5955f 100644
--- a/certs/Kconfig
+++ b/certs/Kconfig
@@ -17,6 +17,7 @@
 config SYSTEM_TRUSTED_KEYRING
 	bool "Provide system-wide ring of trusted keys"
 	depends on KEYS
+	depends on ASYMMETRIC_KEY_TYPE
 	help
 	  Provide a system keyring to which trusted keys can be added.  Keys in
 	  the keyring are considered to be trusted.  Keys may be added at will
@@ -55,4 +56,12 @@
 	  This is the number of bytes reserved in the kernel image for a
 	  certificate to be inserted.
 
+config SECONDARY_TRUSTED_KEYRING
+	bool "Provide a keyring to which extra trustable keys may be added"
+	depends on SYSTEM_TRUSTED_KEYRING
+	help
+	  If set, provide a keyring to which extra keys may be added, provided
+	  those keys are not blacklisted and are vouched for by a key built
+	  into the kernel or already in the secondary trusted keyring.
+
 endmenu
diff --git a/certs/system_keyring.c b/certs/system_keyring.c
index f418032..50979d6 100644
--- a/certs/system_keyring.c
+++ b/certs/system_keyring.c
@@ -18,29 +18,88 @@
 #include <keys/system_keyring.h>
 #include <crypto/pkcs7.h>
 
-struct key *system_trusted_keyring;
-EXPORT_SYMBOL_GPL(system_trusted_keyring);
+static struct key *builtin_trusted_keys;
+#ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
+static struct key *secondary_trusted_keys;
+#endif
 
 extern __initconst const u8 system_certificate_list[];
 extern __initconst const unsigned long system_certificate_list_size;
 
+/**
+ * restrict_link_to_builtin_trusted - Restrict keyring addition by built in CA
+ *
+ * Restrict the addition of keys into a keyring based on the key-to-be-added
+ * being vouched for by a key in the built in system keyring.
+ */
+int restrict_link_by_builtin_trusted(struct key *keyring,
+				     const struct key_type *type,
+				     const union key_payload *payload)
+{
+	return restrict_link_by_signature(builtin_trusted_keys, type, payload);
+}
+
+#ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
+/**
+ * restrict_link_by_builtin_and_secondary_trusted - Restrict keyring
+ *   addition by both builtin and secondary keyrings
+ *
+ * Restrict the addition of keys into a keyring based on the key-to-be-added
+ * being vouched for by a key in either the built-in or the secondary system
+ * keyrings.
+ */
+int restrict_link_by_builtin_and_secondary_trusted(
+	struct key *keyring,
+	const struct key_type *type,
+	const union key_payload *payload)
+{
+	/* If we have a secondary trusted keyring, then that contains a link
+	 * through to the builtin keyring and the search will follow that link.
+	 */
+	if (type == &key_type_keyring &&
+	    keyring == secondary_trusted_keys &&
+	    payload == &builtin_trusted_keys->payload)
+		/* Allow the builtin keyring to be added to the secondary */
+		return 0;
+
+	return restrict_link_by_signature(secondary_trusted_keys, type, payload);
+}
+#endif
+
 /*
- * Load the compiled-in keys
+ * Create the trusted keyrings
  */
 static __init int system_trusted_keyring_init(void)
 {
-	pr_notice("Initialise system trusted keyring\n");
+	pr_notice("Initialise system trusted keyrings\n");
 
-	system_trusted_keyring =
-		keyring_alloc(".system_keyring",
+	builtin_trusted_keys =
+		keyring_alloc(".builtin_trusted_keys",
 			      KUIDT_INIT(0), KGIDT_INIT(0), current_cred(),
 			      ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
 			      KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH),
-			      KEY_ALLOC_NOT_IN_QUOTA, NULL);
-	if (IS_ERR(system_trusted_keyring))
-		panic("Can't allocate system trusted keyring\n");
+			      KEY_ALLOC_NOT_IN_QUOTA,
+			      NULL, NULL);
+	if (IS_ERR(builtin_trusted_keys))
+		panic("Can't allocate builtin trusted keyring\n");
 
-	set_bit(KEY_FLAG_TRUSTED_ONLY, &system_trusted_keyring->flags);
+#ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
+	secondary_trusted_keys =
+		keyring_alloc(".secondary_trusted_keys",
+			      KUIDT_INIT(0), KGIDT_INIT(0), current_cred(),
+			      ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+			       KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH |
+			       KEY_USR_WRITE),
+			      KEY_ALLOC_NOT_IN_QUOTA,
+			      restrict_link_by_builtin_and_secondary_trusted,
+			      NULL);
+	if (IS_ERR(secondary_trusted_keys))
+		panic("Can't allocate secondary trusted keyring\n");
+
+	if (key_link(secondary_trusted_keys, builtin_trusted_keys) < 0)
+		panic("Can't link trusted keyrings\n");
+#endif
+
 	return 0;
 }
 
@@ -76,7 +135,7 @@
 		if (plen > end - p)
 			goto dodgy_cert;
 
-		key = key_create_or_update(make_key_ref(system_trusted_keyring, 1),
+		key = key_create_or_update(make_key_ref(builtin_trusted_keys, 1),
 					   "asymmetric",
 					   NULL,
 					   p,
@@ -84,8 +143,8 @@
 					   ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
 					   KEY_USR_VIEW | KEY_USR_READ),
 					   KEY_ALLOC_NOT_IN_QUOTA |
-					   KEY_ALLOC_TRUSTED |
-					   KEY_ALLOC_BUILT_IN);
+					   KEY_ALLOC_BUILT_IN |
+					   KEY_ALLOC_BYPASS_RESTRICTION);
 		if (IS_ERR(key)) {
 			pr_err("Problem loading in-kernel X.509 certificate (%ld)\n",
 			       PTR_ERR(key));
@@ -108,19 +167,27 @@
 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
 
 /**
- * Verify a PKCS#7-based signature on system data.
- * @data: The data to be verified.
+ * verify_pkcs7_signature - Verify a PKCS#7-based signature on system data.
+ * @data: The data to be verified (NULL if expecting internal data).
  * @len: Size of @data.
  * @raw_pkcs7: The PKCS#7 message that is the signature.
  * @pkcs7_len: The size of @raw_pkcs7.
+ * @trusted_keys: Trusted keys to use (NULL for builtin trusted keys only,
+ *					(void *)1UL for all trusted keys).
  * @usage: The use to which the key is being put.
+ * @view_content: Callback to gain access to content.
+ * @ctx: Context for callback.
  */
-int system_verify_data(const void *data, unsigned long len,
-		       const void *raw_pkcs7, size_t pkcs7_len,
-		       enum key_being_used_for usage)
+int verify_pkcs7_signature(const void *data, size_t len,
+			   const void *raw_pkcs7, size_t pkcs7_len,
+			   struct key *trusted_keys,
+			   enum key_being_used_for usage,
+			   int (*view_content)(void *ctx,
+					       const void *data, size_t len,
+					       size_t asn1hdrlen),
+			   void *ctx)
 {
 	struct pkcs7_message *pkcs7;
-	bool trusted;
 	int ret;
 
 	pkcs7 = pkcs7_parse_message(raw_pkcs7, pkcs7_len);
@@ -128,7 +195,7 @@
 		return PTR_ERR(pkcs7);
 
 	/* The data should be detached - so we need to supply it. */
-	if (pkcs7_supply_detached_data(pkcs7, data, len) < 0) {
+	if (data && pkcs7_supply_detached_data(pkcs7, data, len) < 0) {
 		pr_err("PKCS#7 signature with non-detached data\n");
 		ret = -EBADMSG;
 		goto error;
@@ -138,13 +205,33 @@
 	if (ret < 0)
 		goto error;
 
-	ret = pkcs7_validate_trust(pkcs7, system_trusted_keyring, &trusted);
-	if (ret < 0)
+	if (!trusted_keys) {
+		trusted_keys = builtin_trusted_keys;
+	} else if (trusted_keys == (void *)1UL) {
+#ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
+		trusted_keys = secondary_trusted_keys;
+#else
+		trusted_keys = builtin_trusted_keys;
+#endif
+	}
+	ret = pkcs7_validate_trust(pkcs7, trusted_keys);
+	if (ret < 0) {
+		if (ret == -ENOKEY)
+			pr_err("PKCS#7 signature not signed with a trusted key\n");
 		goto error;
+	}
 
-	if (!trusted) {
-		pr_err("PKCS#7 signature not signed with a trusted key\n");
-		ret = -ENOKEY;
+	if (view_content) {
+		size_t asn1hdrlen;
+
+		ret = pkcs7_get_content_data(pkcs7, &data, &len, &asn1hdrlen);
+		if (ret < 0) {
+			if (ret == -ENODATA)
+				pr_devel("PKCS#7 message does not contain data\n");
+			goto error;
+		}
+
+		ret = view_content(ctx, data, len, asn1hdrlen);
 	}
 
 error:
@@ -152,6 +239,6 @@
 	pr_devel("<==%s() = %d\n", __func__, ret);
 	return ret;
 }
-EXPORT_SYMBOL_GPL(system_verify_data);
+EXPORT_SYMBOL_GPL(verify_pkcs7_signature);
 
 #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 93a1fdc..1d33beb 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -96,6 +96,7 @@
 config CRYPTO_RSA
 	tristate "RSA algorithm"
 	select CRYPTO_AKCIPHER
+	select CRYPTO_MANAGER
 	select MPILIB
 	select ASN1
 	help
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 5fc1f17..3887a98 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -69,8 +69,9 @@
 	struct scatterlist *sg;
 
 	sg = walk->sg;
-	walk->pg = sg_page(sg);
 	walk->offset = sg->offset;
+	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
+	walk->offset = offset_in_page(walk->offset);
 	walk->entrylen = sg->length;
 
 	if (walk->entrylen > walk->total)
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 147069c..80a0f1a 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -13,7 +13,7 @@
  * any later version.
  */
 
-#include <crypto/aead.h>
+#include <crypto/internal/aead.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/if_alg.h>
 #include <linux/init.h>
@@ -29,15 +29,24 @@
 	struct scatterlist sg[ALG_MAX_PAGES];
 };
 
+struct aead_async_rsgl {
+	struct af_alg_sgl sgl;
+	struct list_head list;
+};
+
+struct aead_async_req {
+	struct scatterlist *tsgl;
+	struct aead_async_rsgl first_rsgl;
+	struct list_head list;
+	struct kiocb *iocb;
+	unsigned int tsgls;
+	char iv[];
+};
+
 struct aead_ctx {
 	struct aead_sg_list tsgl;
-	/*
-	 * RSGL_MAX_ENTRIES is an artificial limit where user space at maximum
-	 * can cause the kernel to allocate RSGL_MAX_ENTRIES * ALG_MAX_PAGES
-	 * pages
-	 */
-#define RSGL_MAX_ENTRIES ALG_MAX_PAGES
-	struct af_alg_sgl rsgl[RSGL_MAX_ENTRIES];
+	struct aead_async_rsgl first_rsgl;
+	struct list_head list;
 
 	void *iv;
 
@@ -75,6 +84,17 @@
 	return ctx->used >= ctx->aead_assoclen + as;
 }
 
+static void aead_reset_ctx(struct aead_ctx *ctx)
+{
+	struct aead_sg_list *sgl = &ctx->tsgl;
+
+	sg_init_table(sgl->sg, ALG_MAX_PAGES);
+	sgl->cur = 0;
+	ctx->used = 0;
+	ctx->more = 0;
+	ctx->merge = 0;
+}
+
 static void aead_put_sgl(struct sock *sk)
 {
 	struct alg_sock *ask = alg_sk(sk);
@@ -90,11 +110,7 @@
 		put_page(sg_page(sg + i));
 		sg_assign_page(sg + i, NULL);
 	}
-	sg_init_table(sg, ALG_MAX_PAGES);
-	sgl->cur = 0;
-	ctx->used = 0;
-	ctx->more = 0;
-	ctx->merge = 0;
+	aead_reset_ctx(ctx);
 }
 
 static void aead_wmem_wakeup(struct sock *sk)
@@ -349,23 +365,188 @@
 	return err ?: size;
 }
 
-static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, int flags)
+#define GET_ASYM_REQ(req, tfm) (struct aead_async_req *) \
+		((char *)req + sizeof(struct aead_request) + \
+		 crypto_aead_reqsize(tfm))
+
+ #define GET_REQ_SIZE(tfm) sizeof(struct aead_async_req) + \
+	crypto_aead_reqsize(tfm) + crypto_aead_ivsize(tfm) + \
+	sizeof(struct aead_request)
+
+static void aead_async_cb(struct crypto_async_request *_req, int err)
+{
+	struct sock *sk = _req->data;
+	struct alg_sock *ask = alg_sk(sk);
+	struct aead_ctx *ctx = ask->private;
+	struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
+	struct aead_request *req = aead_request_cast(_req);
+	struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
+	struct scatterlist *sg = areq->tsgl;
+	struct aead_async_rsgl *rsgl;
+	struct kiocb *iocb = areq->iocb;
+	unsigned int i, reqlen = GET_REQ_SIZE(tfm);
+
+	list_for_each_entry(rsgl, &areq->list, list) {
+		af_alg_free_sg(&rsgl->sgl);
+		if (rsgl != &areq->first_rsgl)
+			sock_kfree_s(sk, rsgl, sizeof(*rsgl));
+	}
+
+	for (i = 0; i < areq->tsgls; i++)
+		put_page(sg_page(sg + i));
+
+	sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
+	sock_kfree_s(sk, req, reqlen);
+	__sock_put(sk);
+	iocb->ki_complete(iocb, err, err);
+}
+
+static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
+			      int flags)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct aead_ctx *ctx = ask->private;
+	struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
+	struct aead_async_req *areq;
+	struct aead_request *req = NULL;
+	struct aead_sg_list *sgl = &ctx->tsgl;
+	struct aead_async_rsgl *last_rsgl = NULL, *rsgl;
+	unsigned int as = crypto_aead_authsize(tfm);
+	unsigned int i, reqlen = GET_REQ_SIZE(tfm);
+	int err = -ENOMEM;
+	unsigned long used;
+	size_t outlen;
+	size_t usedpages = 0;
+
+	lock_sock(sk);
+	if (ctx->more) {
+		err = aead_wait_for_data(sk, flags);
+		if (err)
+			goto unlock;
+	}
+
+	used = ctx->used;
+	outlen = used;
+
+	if (!aead_sufficient_data(ctx))
+		goto unlock;
+
+	req = sock_kmalloc(sk, reqlen, GFP_KERNEL);
+	if (unlikely(!req))
+		goto unlock;
+
+	areq = GET_ASYM_REQ(req, tfm);
+	memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
+	INIT_LIST_HEAD(&areq->list);
+	areq->iocb = msg->msg_iocb;
+	memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
+	aead_request_set_tfm(req, tfm);
+	aead_request_set_ad(req, ctx->aead_assoclen);
+	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				  aead_async_cb, sk);
+	used -= ctx->aead_assoclen + (ctx->enc ? as : 0);
+
+	/* take over all tx sgls from ctx */
+	areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * sgl->cur,
+				  GFP_KERNEL);
+	if (unlikely(!areq->tsgl))
+		goto free;
+
+	sg_init_table(areq->tsgl, sgl->cur);
+	for (i = 0; i < sgl->cur; i++)
+		sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]),
+			    sgl->sg[i].length, sgl->sg[i].offset);
+
+	areq->tsgls = sgl->cur;
+
+	/* create rx sgls */
+	while (iov_iter_count(&msg->msg_iter)) {
+		size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
+				      (outlen - usedpages));
+
+		if (list_empty(&areq->list)) {
+			rsgl = &areq->first_rsgl;
+
+		} else {
+			rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
+			if (unlikely(!rsgl)) {
+				err = -ENOMEM;
+				goto free;
+			}
+		}
+		rsgl->sgl.npages = 0;
+		list_add_tail(&rsgl->list, &areq->list);
+
+		/* make one iovec available as scatterlist */
+		err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
+		if (err < 0)
+			goto free;
+
+		usedpages += err;
+
+		/* chain the new scatterlist with previous one */
+		if (last_rsgl)
+			af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
+
+		last_rsgl = rsgl;
+
+		/* we do not need more iovecs as we have sufficient memory */
+		if (outlen <= usedpages)
+			break;
+
+		iov_iter_advance(&msg->msg_iter, err);
+	}
+	err = -EINVAL;
+	/* ensure output buffer is sufficiently large */
+	if (usedpages < outlen)
+		goto free;
+
+	aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used,
+			       areq->iv);
+	err = ctx->enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
+	if (err) {
+		if (err == -EINPROGRESS) {
+			sock_hold(sk);
+			err = -EIOCBQUEUED;
+			aead_reset_ctx(ctx);
+			goto unlock;
+		} else if (err == -EBADMSG) {
+			aead_put_sgl(sk);
+		}
+		goto free;
+	}
+	aead_put_sgl(sk);
+
+free:
+	list_for_each_entry(rsgl, &areq->list, list) {
+		af_alg_free_sg(&rsgl->sgl);
+		if (rsgl != &areq->first_rsgl)
+			sock_kfree_s(sk, rsgl, sizeof(*rsgl));
+	}
+	if (areq->tsgl)
+		sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
+	if (req)
+		sock_kfree_s(sk, req, reqlen);
+unlock:
+	aead_wmem_wakeup(sk);
+	release_sock(sk);
+	return err ? err : outlen;
+}
+
+static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
 {
 	struct sock *sk = sock->sk;
 	struct alg_sock *ask = alg_sk(sk);
 	struct aead_ctx *ctx = ask->private;
 	unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
 	struct aead_sg_list *sgl = &ctx->tsgl;
-	unsigned int i = 0;
+	struct aead_async_rsgl *last_rsgl = NULL;
+	struct aead_async_rsgl *rsgl, *tmp;
 	int err = -EINVAL;
 	unsigned long used = 0;
 	size_t outlen = 0;
 	size_t usedpages = 0;
-	unsigned int cnt = 0;
-
-	/* Limit number of IOV blocks to be accessed below */
-	if (msg->msg_iter.nr_segs > RSGL_MAX_ENTRIES)
-		return -ENOMSG;
 
 	lock_sock(sk);
 
@@ -417,21 +598,33 @@
 		size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
 				      (outlen - usedpages));
 
+		if (list_empty(&ctx->list)) {
+			rsgl = &ctx->first_rsgl;
+		} else {
+			rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
+			if (unlikely(!rsgl)) {
+				err = -ENOMEM;
+				goto unlock;
+			}
+		}
+		rsgl->sgl.npages = 0;
+		list_add_tail(&rsgl->list, &ctx->list);
+
 		/* make one iovec available as scatterlist */
-		err = af_alg_make_sg(&ctx->rsgl[cnt], &msg->msg_iter,
-				     seglen);
+		err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
 		if (err < 0)
 			goto unlock;
 		usedpages += err;
 		/* chain the new scatterlist with previous one */
-		if (cnt)
-			af_alg_link_sg(&ctx->rsgl[cnt-1], &ctx->rsgl[cnt]);
+		if (last_rsgl)
+			af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
+
+		last_rsgl = rsgl;
 
 		/* we do not need more iovecs as we have sufficient memory */
 		if (outlen <= usedpages)
 			break;
 		iov_iter_advance(&msg->msg_iter, err);
-		cnt++;
 	}
 
 	err = -EINVAL;
@@ -440,8 +633,7 @@
 		goto unlock;
 
 	sg_mark_end(sgl->sg + sgl->cur - 1);
-
-	aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->rsgl[0].sg,
+	aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg,
 			       used, ctx->iv);
 	aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen);
 
@@ -454,23 +646,35 @@
 		/* EBADMSG implies a valid cipher operation took place */
 		if (err == -EBADMSG)
 			aead_put_sgl(sk);
+
 		goto unlock;
 	}
 
 	aead_put_sgl(sk);
-
 	err = 0;
 
 unlock:
-	for (i = 0; i < cnt; i++)
-		af_alg_free_sg(&ctx->rsgl[i]);
-
+	list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
+		af_alg_free_sg(&rsgl->sgl);
+		if (rsgl != &ctx->first_rsgl)
+			sock_kfree_s(sk, rsgl, sizeof(*rsgl));
+		list_del(&rsgl->list);
+	}
+	INIT_LIST_HEAD(&ctx->list);
 	aead_wmem_wakeup(sk);
 	release_sock(sk);
 
 	return err ? err : outlen;
 }
 
+static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored,
+			int flags)
+{
+	return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
+		aead_recvmsg_async(sock, msg, flags) :
+		aead_recvmsg_sync(sock, msg, flags);
+}
+
 static unsigned int aead_poll(struct file *file, struct socket *sock,
 			      poll_table *wait)
 {
@@ -540,6 +744,7 @@
 	unsigned int ivlen = crypto_aead_ivsize(
 				crypto_aead_reqtfm(&ctx->aead_req));
 
+	WARN_ON(atomic_read(&sk->sk_refcnt) != 0);
 	aead_put_sgl(sk);
 	sock_kzfree_s(sk, ctx->iv, ivlen);
 	sock_kfree_s(sk, ctx, ctx->len);
@@ -574,6 +779,7 @@
 	ctx->aead_assoclen = 0;
 	af_alg_init_completion(&ctx->completion);
 	sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES);
+	INIT_LIST_HEAD(&ctx->list);
 
 	ask->private = ctx;
 
diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
index 91a7e04..e28e912 100644
--- a/crypto/asymmetric_keys/Kconfig
+++ b/crypto/asymmetric_keys/Kconfig
@@ -1,5 +1,5 @@
 menuconfig ASYMMETRIC_KEY_TYPE
-	tristate "Asymmetric (public-key cryptographic) key type"
+	bool "Asymmetric (public-key cryptographic) key type"
 	depends on KEYS
 	help
 	  This option provides support for a key type that holds the data for
@@ -40,8 +40,7 @@
 
 config PKCS7_TEST_KEY
 	tristate "PKCS#7 testing key type"
-	depends on PKCS7_MESSAGE_PARSER
-	select SYSTEM_TRUSTED_KEYRING
+	depends on SYSTEM_DATA_VERIFICATION
 	help
 	  This option provides a type of key that can be loaded up from a
 	  PKCS#7 message - provided the message is signed by a trusted key.  If
@@ -54,6 +53,7 @@
 config SIGNED_PE_FILE_VERIFICATION
 	bool "Support for PE file signature verification"
 	depends on PKCS7_MESSAGE_PARSER=y
+	depends on SYSTEM_DATA_VERIFICATION
 	select ASN1
 	select OID_REGISTRY
 	help
diff --git a/crypto/asymmetric_keys/Makefile b/crypto/asymmetric_keys/Makefile
index f904862..6516855 100644
--- a/crypto/asymmetric_keys/Makefile
+++ b/crypto/asymmetric_keys/Makefile
@@ -4,7 +4,10 @@
 
 obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys.o
 
-asymmetric_keys-y := asymmetric_type.o signature.o
+asymmetric_keys-y := \
+	asymmetric_type.o \
+	restrict.o \
+	signature.o
 
 obj-$(CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE) += public_key.o
 
diff --git a/crypto/asymmetric_keys/asymmetric_keys.h b/crypto/asymmetric_keys/asymmetric_keys.h
index 1d450b5..ca8e9ac 100644
--- a/crypto/asymmetric_keys/asymmetric_keys.h
+++ b/crypto/asymmetric_keys/asymmetric_keys.h
@@ -9,6 +9,8 @@
  * 2 of the Licence, or (at your option) any later version.
  */
 
+#include <keys/asymmetric-type.h>
+
 extern struct asymmetric_key_id *asymmetric_key_hex_to_key_id(const char *id);
 
 extern int __asymmetric_key_hex_to_key_id(const char *id,
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
index 9f2165b..6600181 100644
--- a/crypto/asymmetric_keys/asymmetric_type.c
+++ b/crypto/asymmetric_keys/asymmetric_type.c
@@ -35,6 +35,95 @@
 static DECLARE_RWSEM(asymmetric_key_parsers_sem);
 
 /**
+ * find_asymmetric_key - Find a key by ID.
+ * @keyring: The keys to search.
+ * @id_0: The first ID to look for or NULL.
+ * @id_1: The second ID to look for or NULL.
+ * @partial: Use partial match if true, exact if false.
+ *
+ * Find a key in the given keyring by identifier.  The preferred identifier is
+ * the id_0 and the fallback identifier is the id_1.  If both are given, the
+ * lookup is by the former, but the latter must also match.
+ */
+struct key *find_asymmetric_key(struct key *keyring,
+				const struct asymmetric_key_id *id_0,
+				const struct asymmetric_key_id *id_1,
+				bool partial)
+{
+	struct key *key;
+	key_ref_t ref;
+	const char *lookup;
+	char *req, *p;
+	int len;
+
+	if (id_0) {
+		lookup = id_0->data;
+		len = id_0->len;
+	} else {
+		lookup = id_1->data;
+		len = id_1->len;
+	}
+
+	/* Construct an identifier "id:<keyid>". */
+	p = req = kmalloc(2 + 1 + len * 2 + 1, GFP_KERNEL);
+	if (!req)
+		return ERR_PTR(-ENOMEM);
+
+	if (partial) {
+		*p++ = 'i';
+		*p++ = 'd';
+	} else {
+		*p++ = 'e';
+		*p++ = 'x';
+	}
+	*p++ = ':';
+	p = bin2hex(p, lookup, len);
+	*p = 0;
+
+	pr_debug("Look up: \"%s\"\n", req);
+
+	ref = keyring_search(make_key_ref(keyring, 1),
+			     &key_type_asymmetric, req);
+	if (IS_ERR(ref))
+		pr_debug("Request for key '%s' err %ld\n", req, PTR_ERR(ref));
+	kfree(req);
+
+	if (IS_ERR(ref)) {
+		switch (PTR_ERR(ref)) {
+			/* Hide some search errors */
+		case -EACCES:
+		case -ENOTDIR:
+		case -EAGAIN:
+			return ERR_PTR(-ENOKEY);
+		default:
+			return ERR_CAST(ref);
+		}
+	}
+
+	key = key_ref_to_ptr(ref);
+	if (id_0 && id_1) {
+		const struct asymmetric_key_ids *kids = asymmetric_key_ids(key);
+
+		if (!kids->id[0]) {
+			pr_debug("First ID matches, but second is missing\n");
+			goto reject;
+		}
+		if (!asymmetric_key_id_same(id_1, kids->id[1])) {
+			pr_debug("First ID matches, but second does not\n");
+			goto reject;
+		}
+	}
+
+	pr_devel("<==%s() = 0 [%x]\n", __func__, key_serial(key));
+	return key;
+
+reject:
+	key_put(key);
+	return ERR_PTR(-EKEYREJECTED);
+}
+EXPORT_SYMBOL_GPL(find_asymmetric_key);
+
+/**
  * asymmetric_key_generate_id: Construct an asymmetric key ID
  * @val_1: First binary blob
  * @len_1: Length of first binary blob
@@ -331,7 +420,8 @@
 	pr_devel("==>%s()\n", __func__);
 
 	if (subtype) {
-		subtype->destroy(prep->payload.data[asym_crypto]);
+		subtype->destroy(prep->payload.data[asym_crypto],
+				 prep->payload.data[asym_auth]);
 		module_put(subtype->owner);
 	}
 	asymmetric_key_free_kids(kids);
@@ -346,13 +436,15 @@
 	struct asymmetric_key_subtype *subtype = asymmetric_key_subtype(key);
 	struct asymmetric_key_ids *kids = key->payload.data[asym_key_ids];
 	void *data = key->payload.data[asym_crypto];
+	void *auth = key->payload.data[asym_auth];
 
 	key->payload.data[asym_crypto] = NULL;
 	key->payload.data[asym_subtype] = NULL;
 	key->payload.data[asym_key_ids] = NULL;
+	key->payload.data[asym_auth] = NULL;
 
 	if (subtype) {
-		subtype->destroy(data);
+		subtype->destroy(data, auth);
 		module_put(subtype->owner);
 	}
 
diff --git a/crypto/asymmetric_keys/mscode_parser.c b/crypto/asymmetric_keys/mscode_parser.c
index 3242cbf..6a76d5c 100644
--- a/crypto/asymmetric_keys/mscode_parser.c
+++ b/crypto/asymmetric_keys/mscode_parser.c
@@ -21,19 +21,13 @@
 /*
  * Parse a Microsoft Individual Code Signing blob
  */
-int mscode_parse(struct pefile_context *ctx)
+int mscode_parse(void *_ctx, const void *content_data, size_t data_len,
+		 size_t asn1hdrlen)
 {
-	const void *content_data;
-	size_t data_len;
-	int ret;
+	struct pefile_context *ctx = _ctx;
 
-	ret = pkcs7_get_content_data(ctx->pkcs7, &content_data, &data_len, 1);
-
-	if (ret) {
-		pr_debug("PKCS#7 message does not contain data\n");
-		return ret;
-	}
-
+	content_data -= asn1hdrlen;
+	data_len += asn1hdrlen;
 	pr_devel("Data: %zu [%*ph]\n", data_len, (unsigned)(data_len),
 		 content_data);
 
@@ -129,7 +123,6 @@
 {
 	struct pefile_context *ctx = context;
 
-	ctx->digest = value;
-	ctx->digest_len = vlen;
-	return 0;
+	ctx->digest = kmemdup(value, vlen, GFP_KERNEL);
+	return ctx->digest ? 0 : -ENOMEM;
 }
diff --git a/crypto/asymmetric_keys/pkcs7_key_type.c b/crypto/asymmetric_keys/pkcs7_key_type.c
index e2d0edb..3b92523 100644
--- a/crypto/asymmetric_keys/pkcs7_key_type.c
+++ b/crypto/asymmetric_keys/pkcs7_key_type.c
@@ -13,12 +13,9 @@
 #include <linux/key.h>
 #include <linux/err.h>
 #include <linux/module.h>
+#include <linux/verification.h>
 #include <linux/key-type.h>
-#include <keys/asymmetric-type.h>
-#include <crypto/pkcs7.h>
 #include <keys/user-type.h>
-#include <keys/system_keyring.h>
-#include "pkcs7_parser.h"
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("PKCS#7 testing key type");
@@ -29,57 +26,44 @@
 		 "Usage to specify when verifying the PKCS#7 message");
 
 /*
+ * Retrieve the PKCS#7 message content.
+ */
+static int pkcs7_view_content(void *ctx, const void *data, size_t len,
+			      size_t asn1hdrlen)
+{
+	struct key_preparsed_payload *prep = ctx;
+	const void *saved_prep_data;
+	size_t saved_prep_datalen;
+	int ret;
+
+	saved_prep_data = prep->data;
+	saved_prep_datalen = prep->datalen;
+	prep->data = data;
+	prep->datalen = len;
+
+	ret = user_preparse(prep);
+
+	prep->data = saved_prep_data;
+	prep->datalen = saved_prep_datalen;
+	return ret;
+}
+
+/*
  * Preparse a PKCS#7 wrapped and validated data blob.
  */
 static int pkcs7_preparse(struct key_preparsed_payload *prep)
 {
 	enum key_being_used_for usage = pkcs7_usage;
-	struct pkcs7_message *pkcs7;
-	const void *data, *saved_prep_data;
-	size_t datalen, saved_prep_datalen;
-	bool trusted;
-	int ret;
-
-	kenter("");
 
 	if (usage >= NR__KEY_BEING_USED_FOR) {
 		pr_err("Invalid usage type %d\n", usage);
 		return -EINVAL;
 	}
 
-	saved_prep_data = prep->data;
-	saved_prep_datalen = prep->datalen;
-	pkcs7 = pkcs7_parse_message(saved_prep_data, saved_prep_datalen);
-	if (IS_ERR(pkcs7)) {
-		ret = PTR_ERR(pkcs7);
-		goto error;
-	}
-
-	ret = pkcs7_verify(pkcs7, usage);
-	if (ret < 0)
-		goto error_free;
-
-	ret = pkcs7_validate_trust(pkcs7, system_trusted_keyring, &trusted);
-	if (ret < 0)
-		goto error_free;
-	if (!trusted)
-		pr_warn("PKCS#7 message doesn't chain back to a trusted key\n");
-
-	ret = pkcs7_get_content_data(pkcs7, &data, &datalen, false);
-	if (ret < 0)
-		goto error_free;
-
-	prep->data = data;
-	prep->datalen = datalen;
-	ret = user_preparse(prep);
-	prep->data = saved_prep_data;
-	prep->datalen = saved_prep_datalen;
-
-error_free:
-	pkcs7_free_message(pkcs7);
-error:
-	kleave(" = %d", ret);
-	return ret;
+	return verify_pkcs7_signature(NULL, 0,
+				      prep->data, prep->datalen,
+				      NULL, usage,
+				      pkcs7_view_content, prep);
 }
 
 /*
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index 40de03f..af4cd86 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -44,9 +44,7 @@
 static void pkcs7_free_signed_info(struct pkcs7_signed_info *sinfo)
 {
 	if (sinfo) {
-		kfree(sinfo->sig.s);
-		kfree(sinfo->sig.digest);
-		kfree(sinfo->signing_cert_id);
+		public_key_signature_free(sinfo->sig);
 		kfree(sinfo);
 	}
 }
@@ -125,6 +123,10 @@
 	ctx->sinfo = kzalloc(sizeof(struct pkcs7_signed_info), GFP_KERNEL);
 	if (!ctx->sinfo)
 		goto out_no_sinfo;
+	ctx->sinfo->sig = kzalloc(sizeof(struct public_key_signature),
+				  GFP_KERNEL);
+	if (!ctx->sinfo->sig)
+		goto out_no_sig;
 
 	ctx->data = (unsigned long)data;
 	ctx->ppcerts = &ctx->certs;
@@ -150,6 +152,7 @@
 		ctx->certs = cert->next;
 		x509_free_certificate(cert);
 	}
+out_no_sig:
 	pkcs7_free_signed_info(ctx->sinfo);
 out_no_sinfo:
 	pkcs7_free_message(ctx->msg);
@@ -165,24 +168,25 @@
  * @pkcs7: The preparsed PKCS#7 message to access
  * @_data: Place to return a pointer to the data
  * @_data_len: Place to return the data length
- * @want_wrapper: True if the ASN.1 object header should be included in the data
+ * @_headerlen: Size of ASN.1 header not included in _data
  *
- * Get access to the data content of the PKCS#7 message, including, optionally,
- * the header of the ASN.1 object that contains it.  Returns -ENODATA if the
- * data object was missing from the message.
+ * Get access to the data content of the PKCS#7 message.  The size of the
+ * header of the ASN.1 object that contains it is also provided and can be used
+ * to adjust *_data and *_data_len to get the entire object.
+ *
+ * Returns -ENODATA if the data object was missing from the message.
  */
 int pkcs7_get_content_data(const struct pkcs7_message *pkcs7,
 			   const void **_data, size_t *_data_len,
-			   bool want_wrapper)
+			   size_t *_headerlen)
 {
-	size_t wrapper;
-
 	if (!pkcs7->data)
 		return -ENODATA;
 
-	wrapper = want_wrapper ? pkcs7->data_hdrlen : 0;
-	*_data = pkcs7->data - wrapper;
-	*_data_len = pkcs7->data_len + wrapper;
+	*_data = pkcs7->data;
+	*_data_len = pkcs7->data_len;
+	if (_headerlen)
+		*_headerlen = pkcs7->data_hdrlen;
 	return 0;
 }
 EXPORT_SYMBOL_GPL(pkcs7_get_content_data);
@@ -218,25 +222,26 @@
 
 	switch (ctx->last_oid) {
 	case OID_md4:
-		ctx->sinfo->sig.hash_algo = "md4";
+		ctx->sinfo->sig->hash_algo = "md4";
 		break;
 	case OID_md5:
-		ctx->sinfo->sig.hash_algo = "md5";
+		ctx->sinfo->sig->hash_algo = "md5";
 		break;
 	case OID_sha1:
-		ctx->sinfo->sig.hash_algo = "sha1";
+		ctx->sinfo->sig->hash_algo = "sha1";
 		break;
 	case OID_sha256:
-		ctx->sinfo->sig.hash_algo = "sha256";
+		ctx->sinfo->sig->hash_algo = "sha256";
 		break;
 	case OID_sha384:
-		ctx->sinfo->sig.hash_algo = "sha384";
+		ctx->sinfo->sig->hash_algo = "sha384";
 		break;
 	case OID_sha512:
-		ctx->sinfo->sig.hash_algo = "sha512";
+		ctx->sinfo->sig->hash_algo = "sha512";
 		break;
 	case OID_sha224:
-		ctx->sinfo->sig.hash_algo = "sha224";
+		ctx->sinfo->sig->hash_algo = "sha224";
+		break;
 	default:
 		printk("Unsupported digest algo: %u\n", ctx->last_oid);
 		return -ENOPKG;
@@ -255,7 +260,7 @@
 
 	switch (ctx->last_oid) {
 	case OID_rsaEncryption:
-		ctx->sinfo->sig.pkey_algo = "rsa";
+		ctx->sinfo->sig->pkey_algo = "rsa";
 		break;
 	default:
 		printk("Unsupported pkey algo: %u\n", ctx->last_oid);
@@ -615,11 +620,11 @@
 {
 	struct pkcs7_parse_context *ctx = context;
 
-	ctx->sinfo->sig.s = kmemdup(value, vlen, GFP_KERNEL);
-	if (!ctx->sinfo->sig.s)
+	ctx->sinfo->sig->s = kmemdup(value, vlen, GFP_KERNEL);
+	if (!ctx->sinfo->sig->s)
 		return -ENOMEM;
 
-	ctx->sinfo->sig.s_size = vlen;
+	ctx->sinfo->sig->s_size = vlen;
 	return 0;
 }
 
@@ -655,12 +660,16 @@
 
 	pr_devel("SINFO KID: %u [%*phN]\n", kid->len, kid->len, kid->data);
 
-	sinfo->signing_cert_id = kid;
+	sinfo->sig->auth_ids[0] = kid;
 	sinfo->index = ++ctx->sinfo_index;
 	*ctx->ppsinfo = sinfo;
 	ctx->ppsinfo = &sinfo->next;
 	ctx->sinfo = kzalloc(sizeof(struct pkcs7_signed_info), GFP_KERNEL);
 	if (!ctx->sinfo)
 		return -ENOMEM;
+	ctx->sinfo->sig = kzalloc(sizeof(struct public_key_signature),
+				  GFP_KERNEL);
+	if (!ctx->sinfo->sig)
+		return -ENOMEM;
 	return 0;
 }
diff --git a/crypto/asymmetric_keys/pkcs7_parser.h b/crypto/asymmetric_keys/pkcs7_parser.h
index a66b19e..f4e8107 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.h
+++ b/crypto/asymmetric_keys/pkcs7_parser.h
@@ -22,7 +22,6 @@
 	struct pkcs7_signed_info *next;
 	struct x509_certificate *signer; /* Signing certificate (in msg->certs) */
 	unsigned	index;
-	bool		trusted;
 	bool		unsupported_crypto;	/* T if not usable due to missing crypto */
 
 	/* Message digest - the digest of the Content Data (or NULL) */
@@ -41,19 +40,17 @@
 #define	sinfo_has_ms_statement_type	5
 	time64_t	signing_time;
 
-	/* Issuing cert serial number and issuer's name [PKCS#7 or CMS ver 1]
-	 * or issuing cert's SKID [CMS ver 3].
-	 */
-	struct asymmetric_key_id *signing_cert_id;
-
 	/* Message signature.
 	 *
 	 * This contains the generated digest of _either_ the Content Data or
 	 * the Authenticated Attributes [RFC2315 9.3].  If the latter, one of
 	 * the attributes contains the digest of the the Content Data within
 	 * it.
+	 *
+	 * THis also contains the issuing cert serial number and issuer's name
+	 * [PKCS#7 or CMS ver 1] or issuing cert's SKID [CMS ver 3].
 	 */
-	struct public_key_signature sig;
+	struct public_key_signature *sig;
 };
 
 struct pkcs7_message {
diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c
index 3bbdcc7..f6a009d 100644
--- a/crypto/asymmetric_keys/pkcs7_trust.c
+++ b/crypto/asymmetric_keys/pkcs7_trust.c
@@ -27,10 +27,9 @@
 				    struct pkcs7_signed_info *sinfo,
 				    struct key *trust_keyring)
 {
-	struct public_key_signature *sig = &sinfo->sig;
+	struct public_key_signature *sig = sinfo->sig;
 	struct x509_certificate *x509, *last = NULL, *p;
 	struct key *key;
-	bool trusted;
 	int ret;
 
 	kenter(",%u,", sinfo->index);
@@ -42,10 +41,8 @@
 
 	for (x509 = sinfo->signer; x509; x509 = x509->signer) {
 		if (x509->seen) {
-			if (x509->verified) {
-				trusted = x509->trusted;
+			if (x509->verified)
 				goto verified;
-			}
 			kleave(" = -ENOKEY [cached]");
 			return -ENOKEY;
 		}
@@ -54,9 +51,8 @@
 		/* Look to see if this certificate is present in the trusted
 		 * keys.
 		 */
-		key = x509_request_asymmetric_key(trust_keyring,
-						  x509->id, x509->skid,
-						  false);
+		key = find_asymmetric_key(trust_keyring,
+					  x509->id, x509->skid, false);
 		if (!IS_ERR(key)) {
 			/* One of the X.509 certificates in the PKCS#7 message
 			 * is apparently the same as one we already trust.
@@ -80,17 +76,17 @@
 
 		might_sleep();
 		last = x509;
-		sig = &last->sig;
+		sig = last->sig;
 	}
 
 	/* No match - see if the root certificate has a signer amongst the
 	 * trusted keys.
 	 */
-	if (last && (last->akid_id || last->akid_skid)) {
-		key = x509_request_asymmetric_key(trust_keyring,
-						  last->akid_id,
-						  last->akid_skid,
-						  false);
+	if (last && (last->sig->auth_ids[0] || last->sig->auth_ids[1])) {
+		key = find_asymmetric_key(trust_keyring,
+					  last->sig->auth_ids[0],
+					  last->sig->auth_ids[1],
+					  false);
 		if (!IS_ERR(key)) {
 			x509 = last;
 			pr_devel("sinfo %u: Root cert %u signer is key %x\n",
@@ -104,10 +100,8 @@
 	/* As a last resort, see if we have a trusted public key that matches
 	 * the signed info directly.
 	 */
-	key = x509_request_asymmetric_key(trust_keyring,
-					  sinfo->signing_cert_id,
-					  NULL,
-					  false);
+	key = find_asymmetric_key(trust_keyring,
+				  sinfo->sig->auth_ids[0], NULL, false);
 	if (!IS_ERR(key)) {
 		pr_devel("sinfo %u: Direct signer is key %x\n",
 			 sinfo->index, key_serial(key));
@@ -122,7 +116,6 @@
 
 matched:
 	ret = verify_signature(key, sig);
-	trusted = test_bit(KEY_FLAG_TRUSTED, &key->flags);
 	key_put(key);
 	if (ret < 0) {
 		if (ret == -ENOMEM)
@@ -134,12 +127,9 @@
 verified:
 	if (x509) {
 		x509->verified = true;
-		for (p = sinfo->signer; p != x509; p = p->signer) {
+		for (p = sinfo->signer; p != x509; p = p->signer)
 			p->verified = true;
-			p->trusted = trusted;
-		}
 	}
-	sinfo->trusted = trusted;
 	kleave(" = 0");
 	return 0;
 }
@@ -148,7 +138,6 @@
  * pkcs7_validate_trust - Validate PKCS#7 trust chain
  * @pkcs7: The PKCS#7 certificate to validate
  * @trust_keyring: Signing certificates to use as starting points
- * @_trusted: Set to true if trustworth, false otherwise
  *
  * Validate that the certificate chain inside the PKCS#7 message intersects
  * keys we already know and trust.
@@ -170,8 +159,7 @@
  * May also return -ENOMEM.
  */
 int pkcs7_validate_trust(struct pkcs7_message *pkcs7,
-			 struct key *trust_keyring,
-			 bool *_trusted)
+			 struct key *trust_keyring)
 {
 	struct pkcs7_signed_info *sinfo;
 	struct x509_certificate *p;
@@ -191,7 +179,6 @@
 				cached_ret = -ENOPKG;
 			continue;
 		case 0:
-			*_trusted |= sinfo->trusted;
 			cached_ret = 0;
 			continue;
 		default:
diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
index 50be2a1..44b746e 100644
--- a/crypto/asymmetric_keys/pkcs7_verify.c
+++ b/crypto/asymmetric_keys/pkcs7_verify.c
@@ -25,34 +25,36 @@
 static int pkcs7_digest(struct pkcs7_message *pkcs7,
 			struct pkcs7_signed_info *sinfo)
 {
+	struct public_key_signature *sig = sinfo->sig;
 	struct crypto_shash *tfm;
 	struct shash_desc *desc;
-	size_t digest_size, desc_size;
-	void *digest;
+	size_t desc_size;
 	int ret;
 
-	kenter(",%u,%s", sinfo->index, sinfo->sig.hash_algo);
+	kenter(",%u,%s", sinfo->index, sinfo->sig->hash_algo);
 
-	if (!sinfo->sig.hash_algo)
+	if (!sinfo->sig->hash_algo)
 		return -ENOPKG;
 
 	/* Allocate the hashing algorithm we're going to need and find out how
 	 * big the hash operational data will be.
 	 */
-	tfm = crypto_alloc_shash(sinfo->sig.hash_algo, 0, 0);
+	tfm = crypto_alloc_shash(sinfo->sig->hash_algo, 0, 0);
 	if (IS_ERR(tfm))
 		return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm);
 
 	desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
-	sinfo->sig.digest_size = digest_size = crypto_shash_digestsize(tfm);
+	sig->digest_size = crypto_shash_digestsize(tfm);
 
 	ret = -ENOMEM;
-	digest = kzalloc(ALIGN(digest_size, __alignof__(*desc)) + desc_size,
-			 GFP_KERNEL);
-	if (!digest)
+	sig->digest = kmalloc(sig->digest_size, GFP_KERNEL);
+	if (!sig->digest)
 		goto error_no_desc;
 
-	desc = PTR_ALIGN(digest + digest_size, __alignof__(*desc));
+	desc = kzalloc(desc_size, GFP_KERNEL);
+	if (!desc)
+		goto error_no_desc;
+
 	desc->tfm   = tfm;
 	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
@@ -60,10 +62,11 @@
 	ret = crypto_shash_init(desc);
 	if (ret < 0)
 		goto error;
-	ret = crypto_shash_finup(desc, pkcs7->data, pkcs7->data_len, digest);
+	ret = crypto_shash_finup(desc, pkcs7->data, pkcs7->data_len,
+				 sig->digest);
 	if (ret < 0)
 		goto error;
-	pr_devel("MsgDigest = [%*ph]\n", 8, digest);
+	pr_devel("MsgDigest = [%*ph]\n", 8, sig->digest);
 
 	/* However, if there are authenticated attributes, there must be a
 	 * message digest attribute amongst them which corresponds to the
@@ -78,14 +81,15 @@
 			goto error;
 		}
 
-		if (sinfo->msgdigest_len != sinfo->sig.digest_size) {
+		if (sinfo->msgdigest_len != sig->digest_size) {
 			pr_debug("Sig %u: Invalid digest size (%u)\n",
 				 sinfo->index, sinfo->msgdigest_len);
 			ret = -EBADMSG;
 			goto error;
 		}
 
-		if (memcmp(digest, sinfo->msgdigest, sinfo->msgdigest_len) != 0) {
+		if (memcmp(sig->digest, sinfo->msgdigest,
+			   sinfo->msgdigest_len) != 0) {
 			pr_debug("Sig %u: Message digest doesn't match\n",
 				 sinfo->index);
 			ret = -EKEYREJECTED;
@@ -97,7 +101,7 @@
 		 * convert the attributes from a CONT.0 into a SET before we
 		 * hash it.
 		 */
-		memset(digest, 0, sinfo->sig.digest_size);
+		memset(sig->digest, 0, sig->digest_size);
 
 		ret = crypto_shash_init(desc);
 		if (ret < 0)
@@ -107,17 +111,14 @@
 		if (ret < 0)
 			goto error;
 		ret = crypto_shash_finup(desc, sinfo->authattrs,
-					 sinfo->authattrs_len, digest);
+					 sinfo->authattrs_len, sig->digest);
 		if (ret < 0)
 			goto error;
-		pr_devel("AADigest = [%*ph]\n", 8, digest);
+		pr_devel("AADigest = [%*ph]\n", 8, sig->digest);
 	}
 
-	sinfo->sig.digest = digest;
-	digest = NULL;
-
 error:
-	kfree(digest);
+	kfree(desc);
 error_no_desc:
 	crypto_free_shash(tfm);
 	kleave(" = %d", ret);
@@ -144,12 +145,12 @@
 		 * PKCS#7 message - but I can't be 100% sure of that.  It's
 		 * possible this will need element-by-element comparison.
 		 */
-		if (!asymmetric_key_id_same(x509->id, sinfo->signing_cert_id))
+		if (!asymmetric_key_id_same(x509->id, sinfo->sig->auth_ids[0]))
 			continue;
 		pr_devel("Sig %u: Found cert serial match X.509[%u]\n",
 			 sinfo->index, certix);
 
-		if (x509->pub->pkey_algo != sinfo->sig.pkey_algo) {
+		if (x509->pub->pkey_algo != sinfo->sig->pkey_algo) {
 			pr_warn("Sig %u: X.509 algo and PKCS#7 sig algo don't match\n",
 				sinfo->index);
 			continue;
@@ -164,7 +165,7 @@
 	 */
 	pr_debug("Sig %u: Issuing X.509 cert not found (#%*phN)\n",
 		 sinfo->index,
-		 sinfo->signing_cert_id->len, sinfo->signing_cert_id->data);
+		 sinfo->sig->auth_ids[0]->len, sinfo->sig->auth_ids[0]->data);
 	return 0;
 }
 
@@ -174,6 +175,7 @@
 static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7,
 				  struct pkcs7_signed_info *sinfo)
 {
+	struct public_key_signature *sig;
 	struct x509_certificate *x509 = sinfo->signer, *p;
 	struct asymmetric_key_id *auth;
 	int ret;
@@ -188,34 +190,26 @@
 			 x509->subject,
 			 x509->raw_serial_size, x509->raw_serial);
 		x509->seen = true;
-		ret = x509_get_sig_params(x509);
-		if (ret < 0)
-			goto maybe_missing_crypto_in_x509;
+		if (x509->unsupported_key)
+			goto unsupported_crypto_in_x509;
 
 		pr_debug("- issuer %s\n", x509->issuer);
-		if (x509->akid_id)
+		sig = x509->sig;
+		if (sig->auth_ids[0])
 			pr_debug("- authkeyid.id %*phN\n",
-				 x509->akid_id->len, x509->akid_id->data);
-		if (x509->akid_skid)
+				 sig->auth_ids[0]->len, sig->auth_ids[0]->data);
+		if (sig->auth_ids[1])
 			pr_debug("- authkeyid.skid %*phN\n",
-				 x509->akid_skid->len, x509->akid_skid->data);
+				 sig->auth_ids[1]->len, sig->auth_ids[1]->data);
 
-		if ((!x509->akid_id && !x509->akid_skid) ||
-		    strcmp(x509->subject, x509->issuer) == 0) {
+		if (x509->self_signed) {
 			/* If there's no authority certificate specified, then
 			 * the certificate must be self-signed and is the root
 			 * of the chain.  Likewise if the cert is its own
 			 * authority.
 			 */
-			pr_debug("- no auth?\n");
-			if (x509->raw_subject_size != x509->raw_issuer_size ||
-			    memcmp(x509->raw_subject, x509->raw_issuer,
-				   x509->raw_issuer_size) != 0)
-				return 0;
-
-			ret = x509_check_signature(x509->pub, x509);
-			if (ret < 0)
-				goto maybe_missing_crypto_in_x509;
+			if (x509->unsupported_sig)
+				goto unsupported_crypto_in_x509;
 			x509->signer = x509;
 			pr_debug("- self-signed\n");
 			return 0;
@@ -224,7 +218,7 @@
 		/* Look through the X.509 certificates in the PKCS#7 message's
 		 * list to see if the next one is there.
 		 */
-		auth = x509->akid_id;
+		auth = sig->auth_ids[0];
 		if (auth) {
 			pr_debug("- want %*phN\n", auth->len, auth->data);
 			for (p = pkcs7->certs; p; p = p->next) {
@@ -234,7 +228,7 @@
 					goto found_issuer_check_skid;
 			}
 		} else {
-			auth = x509->akid_skid;
+			auth = sig->auth_ids[1];
 			pr_debug("- want %*phN\n", auth->len, auth->data);
 			for (p = pkcs7->certs; p; p = p->next) {
 				if (!p->skid)
@@ -254,8 +248,8 @@
 		/* We matched issuer + serialNumber, but if there's an
 		 * authKeyId.keyId, that must match the CA subjKeyId also.
 		 */
-		if (x509->akid_skid &&
-		    !asymmetric_key_id_same(p->skid, x509->akid_skid)) {
+		if (sig->auth_ids[1] &&
+		    !asymmetric_key_id_same(p->skid, sig->auth_ids[1])) {
 			pr_warn("Sig %u: X.509 chain contains auth-skid nonmatch (%u->%u)\n",
 				sinfo->index, x509->index, p->index);
 			return -EKEYREJECTED;
@@ -267,7 +261,7 @@
 				sinfo->index);
 			return 0;
 		}
-		ret = x509_check_signature(p->pub, x509);
+		ret = public_key_verify_signature(p->pub, p->sig);
 		if (ret < 0)
 			return ret;
 		x509->signer = p;
@@ -279,16 +273,14 @@
 		might_sleep();
 	}
 
-maybe_missing_crypto_in_x509:
+unsupported_crypto_in_x509:
 	/* Just prune the certificate chain at this point if we lack some
 	 * crypto module to go further.  Note, however, we don't want to set
-	 * sinfo->missing_crypto as the signed info block may still be
+	 * sinfo->unsupported_crypto as the signed info block may still be
 	 * validatable against an X.509 cert lower in the chain that we have a
 	 * trusted copy of.
 	 */
-	if (ret == -ENOPKG)
-		return 0;
-	return ret;
+	return 0;
 }
 
 /*
@@ -332,7 +324,7 @@
 	}
 
 	/* Verify the PKCS#7 binary against the key */
-	ret = public_key_verify_signature(sinfo->signer->pub, &sinfo->sig);
+	ret = public_key_verify_signature(sinfo->signer->pub, sinfo->sig);
 	if (ret < 0)
 		return ret;
 
@@ -375,9 +367,8 @@
 		 enum key_being_used_for usage)
 {
 	struct pkcs7_signed_info *sinfo;
-	struct x509_certificate *x509;
 	int enopkg = -ENOPKG;
-	int ret, n;
+	int ret;
 
 	kenter("");
 
@@ -419,12 +410,6 @@
 		return -EINVAL;
 	}
 
-	for (n = 0, x509 = pkcs7->certs; x509; x509 = x509->next, n++) {
-		ret = x509_get_sig_params(x509);
-		if (ret < 0)
-			return ret;
-	}
-
 	for (sinfo = pkcs7->signed_infos; sinfo; sinfo = sinfo->next) {
 		ret = pkcs7_verify_one(pkcs7, sinfo);
 		if (ret < 0) {
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index 0f8b264..fd76b5f 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -39,15 +39,23 @@
 /*
  * Destroy a public key algorithm key.
  */
-void public_key_destroy(void *payload)
+void public_key_free(struct public_key *key)
 {
-	struct public_key *key = payload;
-
-	if (key)
+	if (key) {
 		kfree(key->key);
-	kfree(key);
+		kfree(key);
+	}
 }
-EXPORT_SYMBOL_GPL(public_key_destroy);
+EXPORT_SYMBOL_GPL(public_key_free);
+
+/*
+ * Destroy a public key algorithm key.
+ */
+static void public_key_destroy(void *payload0, void *payload3)
+{
+	public_key_free(payload0);
+	public_key_signature_free(payload3);
+}
 
 struct public_key_completion {
 	struct completion completion;
diff --git a/crypto/asymmetric_keys/restrict.c b/crypto/asymmetric_keys/restrict.c
new file mode 100644
index 0000000..ac4bddf
--- /dev/null
+++ b/crypto/asymmetric_keys/restrict.c
@@ -0,0 +1,108 @@
+/* Instantiate a public key crypto key from an X.509 Certificate
+ *
+ * Copyright (C) 2012, 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "ASYM: "fmt
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <crypto/public_key.h>
+#include "asymmetric_keys.h"
+
+static bool use_builtin_keys;
+static struct asymmetric_key_id *ca_keyid;
+
+#ifndef MODULE
+static struct {
+	struct asymmetric_key_id id;
+	unsigned char data[10];
+} cakey;
+
+static int __init ca_keys_setup(char *str)
+{
+	if (!str)		/* default system keyring */
+		return 1;
+
+	if (strncmp(str, "id:", 3) == 0) {
+		struct asymmetric_key_id *p = &cakey.id;
+		size_t hexlen = (strlen(str) - 3) / 2;
+		int ret;
+
+		if (hexlen == 0 || hexlen > sizeof(cakey.data)) {
+			pr_err("Missing or invalid ca_keys id\n");
+			return 1;
+		}
+
+		ret = __asymmetric_key_hex_to_key_id(str + 3, p, hexlen);
+		if (ret < 0)
+			pr_err("Unparsable ca_keys id hex string\n");
+		else
+			ca_keyid = p;	/* owner key 'id:xxxxxx' */
+	} else if (strcmp(str, "builtin") == 0) {
+		use_builtin_keys = true;
+	}
+
+	return 1;
+}
+__setup("ca_keys=", ca_keys_setup);
+#endif
+
+/**
+ * restrict_link_by_signature - Restrict additions to a ring of public keys
+ * @trust_keyring: A ring of keys that can be used to vouch for the new cert.
+ * @type: The type of key being added.
+ * @payload: The payload of the new key.
+ *
+ * Check the new certificate against the ones in the trust keyring.  If one of
+ * those is the signing key and validates the new certificate, then mark the
+ * new certificate as being trusted.
+ *
+ * Returns 0 if the new certificate was accepted, -ENOKEY if we couldn't find a
+ * matching parent certificate in the trusted list, -EKEYREJECTED if the
+ * signature check fails or the key is blacklisted and some other error if
+ * there is a matching certificate but the signature check cannot be performed.
+ */
+int restrict_link_by_signature(struct key *trust_keyring,
+			       const struct key_type *type,
+			       const union key_payload *payload)
+{
+	const struct public_key_signature *sig;
+	struct key *key;
+	int ret;
+
+	pr_devel("==>%s()\n", __func__);
+
+	if (!trust_keyring)
+		return -ENOKEY;
+
+	if (type != &key_type_asymmetric)
+		return -EOPNOTSUPP;
+
+	sig = payload->data[asym_auth];
+	if (!sig->auth_ids[0] && !sig->auth_ids[1])
+		return 0;
+
+	if (ca_keyid && !asymmetric_key_id_partial(sig->auth_ids[1], ca_keyid))
+		return -EPERM;
+
+	/* See if we have a key that signed this one. */
+	key = find_asymmetric_key(trust_keyring,
+				  sig->auth_ids[0], sig->auth_ids[1],
+				  false);
+	if (IS_ERR(key))
+		return -ENOKEY;
+
+	if (use_builtin_keys && !test_bit(KEY_FLAG_BUILTIN, &key->flags))
+		ret = -ENOKEY;
+	else
+		ret = verify_signature(key, sig);
+	key_put(key);
+	return ret;
+}
diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c
index 004d5fc..11b7ba17 100644
--- a/crypto/asymmetric_keys/signature.c
+++ b/crypto/asymmetric_keys/signature.c
@@ -15,9 +15,27 @@
 #include <keys/asymmetric-subtype.h>
 #include <linux/export.h>
 #include <linux/err.h>
+#include <linux/slab.h>
 #include <crypto/public_key.h>
 #include "asymmetric_keys.h"
 
+/*
+ * Destroy a public key signature.
+ */
+void public_key_signature_free(struct public_key_signature *sig)
+{
+	int i;
+
+	if (sig) {
+		for (i = 0; i < ARRAY_SIZE(sig->auth_ids); i++)
+			kfree(sig->auth_ids[i]);
+		kfree(sig->s);
+		kfree(sig->digest);
+		kfree(sig);
+	}
+}
+EXPORT_SYMBOL_GPL(public_key_signature_free);
+
 /**
  * verify_signature - Initiate the use of an asymmetric key to verify a signature
  * @key: The asymmetric key to verify against
diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c
index 7e8c233..672a94c 100644
--- a/crypto/asymmetric_keys/verify_pefile.c
+++ b/crypto/asymmetric_keys/verify_pefile.c
@@ -16,7 +16,7 @@
 #include <linux/err.h>
 #include <linux/pe.h>
 #include <linux/asn1.h>
-#include <crypto/pkcs7.h>
+#include <linux/verification.h>
 #include <crypto/hash.h>
 #include "verify_pefile.h"
 
@@ -392,9 +392,8 @@
  * verify_pefile_signature - Verify the signature on a PE binary image
  * @pebuf: Buffer containing the PE binary image
  * @pelen: Length of the binary image
- * @trust_keyring: Signing certificates to use as starting points
+ * @trust_keys: Signing certificate(s) to use as starting points
  * @usage: The use to which the key is being put.
- * @_trusted: Set to true if trustworth, false otherwise
  *
  * Validate that the certificate chain inside the PKCS#7 message inside the PE
  * binary image intersects keys we already know and trust.
@@ -418,14 +417,10 @@
  * May also return -ENOMEM.
  */
 int verify_pefile_signature(const void *pebuf, unsigned pelen,
-			    struct key *trusted_keyring,
-			    enum key_being_used_for usage,
-			    bool *_trusted)
+			    struct key *trusted_keys,
+			    enum key_being_used_for usage)
 {
-	struct pkcs7_message *pkcs7;
 	struct pefile_context ctx;
-	const void *data;
-	size_t datalen;
 	int ret;
 
 	kenter("");
@@ -439,19 +434,10 @@
 	if (ret < 0)
 		return ret;
 
-	pkcs7 = pkcs7_parse_message(pebuf + ctx.sig_offset, ctx.sig_len);
-	if (IS_ERR(pkcs7))
-		return PTR_ERR(pkcs7);
-	ctx.pkcs7 = pkcs7;
-
-	ret = pkcs7_get_content_data(ctx.pkcs7, &data, &datalen, false);
-	if (ret < 0 || datalen == 0) {
-		pr_devel("PKCS#7 message does not contain data\n");
-		ret = -EBADMSG;
-		goto error;
-	}
-
-	ret = mscode_parse(&ctx);
+	ret = verify_pkcs7_signature(NULL, 0,
+				     pebuf + ctx.sig_offset, ctx.sig_len,
+				     trusted_keys, usage,
+				     mscode_parse, &ctx);
 	if (ret < 0)
 		goto error;
 
@@ -462,16 +448,8 @@
 	 * contents.
 	 */
 	ret = pefile_digest_pe(pebuf, pelen, &ctx);
-	if (ret < 0)
-		goto error;
-
-	ret = pkcs7_verify(pkcs7, usage);
-	if (ret < 0)
-		goto error;
-
-	ret = pkcs7_validate_trust(pkcs7, trusted_keyring, _trusted);
 
 error:
-	pkcs7_free_message(ctx.pkcs7);
+	kfree(ctx.digest);
 	return ret;
 }
diff --git a/crypto/asymmetric_keys/verify_pefile.h b/crypto/asymmetric_keys/verify_pefile.h
index a133eb8..cd4d209 100644
--- a/crypto/asymmetric_keys/verify_pefile.h
+++ b/crypto/asymmetric_keys/verify_pefile.h
@@ -9,7 +9,6 @@
  * 2 of the Licence, or (at your option) any later version.
  */
 
-#include <linux/verify_pefile.h>
 #include <crypto/pkcs7.h>
 #include <crypto/hash_info.h>
 
@@ -23,7 +22,6 @@
 	unsigned	sig_offset;
 	unsigned	sig_len;
 	const struct section_header *secs;
-	struct pkcs7_message *pkcs7;
 
 	/* PKCS#7 MS Individual Code Signing content */
 	const void	*digest;		/* Digest */
@@ -39,4 +37,5 @@
 /*
  * mscode_parser.c
  */
-extern int mscode_parse(struct pefile_context *ctx);
+extern int mscode_parse(void *_ctx, const void *content_data, size_t data_len,
+			size_t asn1hdrlen);
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 4a29bac..865f46e 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -47,15 +47,12 @@
 void x509_free_certificate(struct x509_certificate *cert)
 {
 	if (cert) {
-		public_key_destroy(cert->pub);
+		public_key_free(cert->pub);
+		public_key_signature_free(cert->sig);
 		kfree(cert->issuer);
 		kfree(cert->subject);
 		kfree(cert->id);
 		kfree(cert->skid);
-		kfree(cert->akid_id);
-		kfree(cert->akid_skid);
-		kfree(cert->sig.digest);
-		kfree(cert->sig.s);
 		kfree(cert);
 	}
 }
@@ -78,6 +75,9 @@
 	cert->pub = kzalloc(sizeof(struct public_key), GFP_KERNEL);
 	if (!cert->pub)
 		goto error_no_ctx;
+	cert->sig = kzalloc(sizeof(struct public_key_signature), GFP_KERNEL);
+	if (!cert->sig)
+		goto error_no_ctx;
 	ctx = kzalloc(sizeof(struct x509_parse_context), GFP_KERNEL);
 	if (!ctx)
 		goto error_no_ctx;
@@ -108,6 +108,11 @@
 
 	cert->pub->keylen = ctx->key_size;
 
+	/* Grab the signature bits */
+	ret = x509_get_sig_params(cert);
+	if (ret < 0)
+		goto error_decode;
+
 	/* Generate cert issuer + serial number key ID */
 	kid = asymmetric_key_generate_id(cert->raw_serial,
 					 cert->raw_serial_size,
@@ -119,6 +124,11 @@
 	}
 	cert->id = kid;
 
+	/* Detect self-signed certificates */
+	ret = x509_check_for_self_signed(cert);
+	if (ret < 0)
+		goto error_decode;
+
 	kfree(ctx);
 	return cert;
 
@@ -188,33 +198,33 @@
 		return -ENOPKG; /* Unsupported combination */
 
 	case OID_md4WithRSAEncryption:
-		ctx->cert->sig.hash_algo = "md4";
-		ctx->cert->sig.pkey_algo = "rsa";
+		ctx->cert->sig->hash_algo = "md4";
+		ctx->cert->sig->pkey_algo = "rsa";
 		break;
 
 	case OID_sha1WithRSAEncryption:
-		ctx->cert->sig.hash_algo = "sha1";
-		ctx->cert->sig.pkey_algo = "rsa";
+		ctx->cert->sig->hash_algo = "sha1";
+		ctx->cert->sig->pkey_algo = "rsa";
 		break;
 
 	case OID_sha256WithRSAEncryption:
-		ctx->cert->sig.hash_algo = "sha256";
-		ctx->cert->sig.pkey_algo = "rsa";
+		ctx->cert->sig->hash_algo = "sha256";
+		ctx->cert->sig->pkey_algo = "rsa";
 		break;
 
 	case OID_sha384WithRSAEncryption:
-		ctx->cert->sig.hash_algo = "sha384";
-		ctx->cert->sig.pkey_algo = "rsa";
+		ctx->cert->sig->hash_algo = "sha384";
+		ctx->cert->sig->pkey_algo = "rsa";
 		break;
 
 	case OID_sha512WithRSAEncryption:
-		ctx->cert->sig.hash_algo = "sha512";
-		ctx->cert->sig.pkey_algo = "rsa";
+		ctx->cert->sig->hash_algo = "sha512";
+		ctx->cert->sig->pkey_algo = "rsa";
 		break;
 
 	case OID_sha224WithRSAEncryption:
-		ctx->cert->sig.hash_algo = "sha224";
-		ctx->cert->sig.pkey_algo = "rsa";
+		ctx->cert->sig->hash_algo = "sha224";
+		ctx->cert->sig->pkey_algo = "rsa";
 		break;
 	}
 
@@ -572,14 +582,14 @@
 
 	pr_debug("AKID: keyid: %*phN\n", (int)vlen, value);
 
-	if (ctx->cert->akid_skid)
+	if (ctx->cert->sig->auth_ids[1])
 		return 0;
 
 	kid = asymmetric_key_generate_id(value, vlen, "", 0);
 	if (IS_ERR(kid))
 		return PTR_ERR(kid);
 	pr_debug("authkeyid %*phN\n", kid->len, kid->data);
-	ctx->cert->akid_skid = kid;
+	ctx->cert->sig->auth_ids[1] = kid;
 	return 0;
 }
 
@@ -611,7 +621,7 @@
 
 	pr_debug("AKID: serial: %*phN\n", (int)vlen, value);
 
-	if (!ctx->akid_raw_issuer || ctx->cert->akid_id)
+	if (!ctx->akid_raw_issuer || ctx->cert->sig->auth_ids[0])
 		return 0;
 
 	kid = asymmetric_key_generate_id(value,
@@ -622,6 +632,6 @@
 		return PTR_ERR(kid);
 
 	pr_debug("authkeyid %*phN\n", kid->len, kid->data);
-	ctx->cert->akid_id = kid;
+	ctx->cert->sig->auth_ids[0] = kid;
 	return 0;
 }
diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h
index dbeed60..05eef1c 100644
--- a/crypto/asymmetric_keys/x509_parser.h
+++ b/crypto/asymmetric_keys/x509_parser.h
@@ -17,13 +17,11 @@
 	struct x509_certificate *next;
 	struct x509_certificate *signer;	/* Certificate that signed this one */
 	struct public_key *pub;			/* Public key details */
-	struct public_key_signature sig;	/* Signature parameters */
+	struct public_key_signature *sig;	/* Signature parameters */
 	char		*issuer;		/* Name of certificate issuer */
 	char		*subject;		/* Name of certificate subject */
 	struct asymmetric_key_id *id;		/* Issuer + Serial number */
 	struct asymmetric_key_id *skid;		/* Subject + subjectKeyId (optional) */
-	struct asymmetric_key_id *akid_id;	/* CA AuthKeyId matching ->id (optional) */
-	struct asymmetric_key_id *akid_skid;	/* CA AuthKeyId matching ->skid (optional) */
 	time64_t	valid_from;
 	time64_t	valid_to;
 	const void	*tbs;			/* Signed data */
@@ -41,8 +39,9 @@
 	unsigned	index;
 	bool		seen;			/* Infinite recursion prevention */
 	bool		verified;
-	bool		trusted;
-	bool		unsupported_crypto;	/* T if can't be verified due to missing crypto */
+	bool		self_signed;		/* T if self-signed (check unsupported_sig too) */
+	bool		unsupported_key;	/* T if key uses unsupported crypto */
+	bool		unsupported_sig;	/* T if signature uses unsupported crypto */
 };
 
 /*
@@ -58,5 +57,4 @@
  * x509_public_key.c
  */
 extern int x509_get_sig_params(struct x509_certificate *cert);
-extern int x509_check_signature(const struct public_key *pub,
-				struct x509_certificate *cert);
+extern int x509_check_for_self_signed(struct x509_certificate *cert);
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index 733c046..fb73229 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -20,256 +20,133 @@
 #include "asymmetric_keys.h"
 #include "x509_parser.h"
 
-static bool use_builtin_keys;
-static struct asymmetric_key_id *ca_keyid;
-
-#ifndef MODULE
-static struct {
-	struct asymmetric_key_id id;
-	unsigned char data[10];
-} cakey;
-
-static int __init ca_keys_setup(char *str)
-{
-	if (!str)		/* default system keyring */
-		return 1;
-
-	if (strncmp(str, "id:", 3) == 0) {
-		struct asymmetric_key_id *p = &cakey.id;
-		size_t hexlen = (strlen(str) - 3) / 2;
-		int ret;
-
-		if (hexlen == 0 || hexlen > sizeof(cakey.data)) {
-			pr_err("Missing or invalid ca_keys id\n");
-			return 1;
-		}
-
-		ret = __asymmetric_key_hex_to_key_id(str + 3, p, hexlen);
-		if (ret < 0)
-			pr_err("Unparsable ca_keys id hex string\n");
-		else
-			ca_keyid = p;	/* owner key 'id:xxxxxx' */
-	} else if (strcmp(str, "builtin") == 0) {
-		use_builtin_keys = true;
-	}
-
-	return 1;
-}
-__setup("ca_keys=", ca_keys_setup);
-#endif
-
-/**
- * x509_request_asymmetric_key - Request a key by X.509 certificate params.
- * @keyring: The keys to search.
- * @id: The issuer & serialNumber to look for or NULL.
- * @skid: The subjectKeyIdentifier to look for or NULL.
- * @partial: Use partial match if true, exact if false.
- *
- * Find a key in the given keyring by identifier.  The preferred identifier is
- * the issuer + serialNumber and the fallback identifier is the
- * subjectKeyIdentifier.  If both are given, the lookup is by the former, but
- * the latter must also match.
- */
-struct key *x509_request_asymmetric_key(struct key *keyring,
-					const struct asymmetric_key_id *id,
-					const struct asymmetric_key_id *skid,
-					bool partial)
-{
-	struct key *key;
-	key_ref_t ref;
-	const char *lookup;
-	char *req, *p;
-	int len;
-
-	if (id) {
-		lookup = id->data;
-		len = id->len;
-	} else {
-		lookup = skid->data;
-		len = skid->len;
-	}
-	
-	/* Construct an identifier "id:<keyid>". */
-	p = req = kmalloc(2 + 1 + len * 2 + 1, GFP_KERNEL);
-	if (!req)
-		return ERR_PTR(-ENOMEM);
-
-	if (partial) {
-		*p++ = 'i';
-		*p++ = 'd';
-	} else {
-		*p++ = 'e';
-		*p++ = 'x';
-	}
-	*p++ = ':';
-	p = bin2hex(p, lookup, len);
-	*p = 0;
-
-	pr_debug("Look up: \"%s\"\n", req);
-
-	ref = keyring_search(make_key_ref(keyring, 1),
-			     &key_type_asymmetric, req);
-	if (IS_ERR(ref))
-		pr_debug("Request for key '%s' err %ld\n", req, PTR_ERR(ref));
-	kfree(req);
-
-	if (IS_ERR(ref)) {
-		switch (PTR_ERR(ref)) {
-			/* Hide some search errors */
-		case -EACCES:
-		case -ENOTDIR:
-		case -EAGAIN:
-			return ERR_PTR(-ENOKEY);
-		default:
-			return ERR_CAST(ref);
-		}
-	}
-
-	key = key_ref_to_ptr(ref);
-	if (id && skid) {
-		const struct asymmetric_key_ids *kids = asymmetric_key_ids(key);
-		if (!kids->id[1]) {
-			pr_debug("issuer+serial match, but expected SKID missing\n");
-			goto reject;
-		}
-		if (!asymmetric_key_id_same(skid, kids->id[1])) {
-			pr_debug("issuer+serial match, but SKID does not\n");
-			goto reject;
-		}
-	}
-	
-	pr_devel("<==%s() = 0 [%x]\n", __func__, key_serial(key));
-	return key;
-
-reject:
-	key_put(key);
-	return ERR_PTR(-EKEYREJECTED);
-}
-EXPORT_SYMBOL_GPL(x509_request_asymmetric_key);
-
 /*
  * Set up the signature parameters in an X.509 certificate.  This involves
  * digesting the signed data and extracting the signature.
  */
 int x509_get_sig_params(struct x509_certificate *cert)
 {
+	struct public_key_signature *sig = cert->sig;
 	struct crypto_shash *tfm;
 	struct shash_desc *desc;
-	size_t digest_size, desc_size;
-	void *digest;
+	size_t desc_size;
 	int ret;
 
 	pr_devel("==>%s()\n", __func__);
 
-	if (cert->unsupported_crypto)
-		return -ENOPKG;
-	if (cert->sig.s)
-		return 0;
+	if (!cert->pub->pkey_algo)
+		cert->unsupported_key = true;
 
-	cert->sig.s = kmemdup(cert->raw_sig, cert->raw_sig_size,
-			      GFP_KERNEL);
-	if (!cert->sig.s)
+	if (!sig->pkey_algo)
+		cert->unsupported_sig = true;
+
+	/* We check the hash if we can - even if we can't then verify it */
+	if (!sig->hash_algo) {
+		cert->unsupported_sig = true;
+		return 0;
+	}
+
+	sig->s = kmemdup(cert->raw_sig, cert->raw_sig_size, GFP_KERNEL);
+	if (!sig->s)
 		return -ENOMEM;
 
-	cert->sig.s_size = cert->raw_sig_size;
+	sig->s_size = cert->raw_sig_size;
 
 	/* Allocate the hashing algorithm we're going to need and find out how
 	 * big the hash operational data will be.
 	 */
-	tfm = crypto_alloc_shash(cert->sig.hash_algo, 0, 0);
+	tfm = crypto_alloc_shash(sig->hash_algo, 0, 0);
 	if (IS_ERR(tfm)) {
 		if (PTR_ERR(tfm) == -ENOENT) {
-			cert->unsupported_crypto = true;
-			return -ENOPKG;
+			cert->unsupported_sig = true;
+			return 0;
 		}
 		return PTR_ERR(tfm);
 	}
 
 	desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
-	digest_size = crypto_shash_digestsize(tfm);
+	sig->digest_size = crypto_shash_digestsize(tfm);
 
-	/* We allocate the hash operational data storage on the end of the
-	 * digest storage space.
-	 */
 	ret = -ENOMEM;
-	digest = kzalloc(ALIGN(digest_size, __alignof__(*desc)) + desc_size,
-			 GFP_KERNEL);
-	if (!digest)
+	sig->digest = kmalloc(sig->digest_size, GFP_KERNEL);
+	if (!sig->digest)
 		goto error;
 
-	cert->sig.digest = digest;
-	cert->sig.digest_size = digest_size;
+	desc = kzalloc(desc_size, GFP_KERNEL);
+	if (!desc)
+		goto error;
 
-	desc = PTR_ALIGN(digest + digest_size, __alignof__(*desc));
 	desc->tfm = tfm;
 	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
 	ret = crypto_shash_init(desc);
 	if (ret < 0)
-		goto error;
+		goto error_2;
 	might_sleep();
-	ret = crypto_shash_finup(desc, cert->tbs, cert->tbs_size, digest);
+	ret = crypto_shash_finup(desc, cert->tbs, cert->tbs_size, sig->digest);
+
+error_2:
+	kfree(desc);
 error:
 	crypto_free_shash(tfm);
 	pr_devel("<==%s() = %d\n", __func__, ret);
 	return ret;
 }
-EXPORT_SYMBOL_GPL(x509_get_sig_params);
 
 /*
- * Check the signature on a certificate using the provided public key
+ * Check for self-signedness in an X.509 cert and if found, check the signature
+ * immediately if we can.
  */
-int x509_check_signature(const struct public_key *pub,
-			 struct x509_certificate *cert)
+int x509_check_for_self_signed(struct x509_certificate *cert)
 {
-	int ret;
+	int ret = 0;
 
 	pr_devel("==>%s()\n", __func__);
 
-	ret = x509_get_sig_params(cert);
-	if (ret < 0)
-		return ret;
+	if (cert->raw_subject_size != cert->raw_issuer_size ||
+	    memcmp(cert->raw_subject, cert->raw_issuer,
+		   cert->raw_issuer_size) != 0)
+		goto not_self_signed;
 
-	ret = public_key_verify_signature(pub, &cert->sig);
-	if (ret == -ENOPKG)
-		cert->unsupported_crypto = true;
-	pr_debug("Cert Verification: %d\n", ret);
-	return ret;
-}
-EXPORT_SYMBOL_GPL(x509_check_signature);
+	if (cert->sig->auth_ids[0] || cert->sig->auth_ids[1]) {
+		/* If the AKID is present it may have one or two parts.  If
+		 * both are supplied, both must match.
+		 */
+		bool a = asymmetric_key_id_same(cert->skid, cert->sig->auth_ids[1]);
+		bool b = asymmetric_key_id_same(cert->id, cert->sig->auth_ids[0]);
 
-/*
- * Check the new certificate against the ones in the trust keyring.  If one of
- * those is the signing key and validates the new certificate, then mark the
- * new certificate as being trusted.
- *
- * Return 0 if the new certificate was successfully validated, 1 if we couldn't
- * find a matching parent certificate in the trusted list and an error if there
- * is a matching certificate but the signature check fails.
- */
-static int x509_validate_trust(struct x509_certificate *cert,
-			       struct key *trust_keyring)
-{
-	struct key *key;
-	int ret = 1;
+		if (!a && !b)
+			goto not_self_signed;
 
-	if (!trust_keyring)
-		return -EOPNOTSUPP;
-
-	if (ca_keyid && !asymmetric_key_id_partial(cert->akid_skid, ca_keyid))
-		return -EPERM;
-
-	key = x509_request_asymmetric_key(trust_keyring,
-					  cert->akid_id, cert->akid_skid,
-					  false);
-	if (!IS_ERR(key))  {
-		if (!use_builtin_keys
-		    || test_bit(KEY_FLAG_BUILTIN, &key->flags))
-			ret = x509_check_signature(key->payload.data[asym_crypto],
-						   cert);
-		key_put(key);
+		ret = -EKEYREJECTED;
+		if (((a && !b) || (b && !a)) &&
+		    cert->sig->auth_ids[0] && cert->sig->auth_ids[1])
+			goto out;
 	}
+
+	ret = -EKEYREJECTED;
+	if (cert->pub->pkey_algo != cert->sig->pkey_algo)
+		goto out;
+
+	ret = public_key_verify_signature(cert->pub, cert->sig);
+	if (ret < 0) {
+		if (ret == -ENOPKG) {
+			cert->unsupported_sig = true;
+			ret = 0;
+		}
+		goto out;
+	}
+
+	pr_devel("Cert Self-signature verified");
+	cert->self_signed = true;
+
+out:
+	pr_devel("<==%s() = %d\n", __func__, ret);
 	return ret;
+
+not_self_signed:
+	pr_devel("<==%s() = 0 [not]\n", __func__);
+	return 0;
 }
 
 /*
@@ -291,34 +168,22 @@
 	pr_devel("Cert Issuer: %s\n", cert->issuer);
 	pr_devel("Cert Subject: %s\n", cert->subject);
 
-	if (!cert->pub->pkey_algo ||
-	    !cert->sig.pkey_algo ||
-	    !cert->sig.hash_algo) {
+	if (cert->unsupported_key) {
 		ret = -ENOPKG;
 		goto error_free_cert;
 	}
 
 	pr_devel("Cert Key Algo: %s\n", cert->pub->pkey_algo);
 	pr_devel("Cert Valid period: %lld-%lld\n", cert->valid_from, cert->valid_to);
-	pr_devel("Cert Signature: %s + %s\n",
-		 cert->sig.pkey_algo,
-		 cert->sig.hash_algo);
 
 	cert->pub->id_type = "X509";
 
-	/* Check the signature on the key if it appears to be self-signed */
-	if ((!cert->akid_skid && !cert->akid_id) ||
-	    asymmetric_key_id_same(cert->skid, cert->akid_skid) ||
-	    asymmetric_key_id_same(cert->id, cert->akid_id)) {
-		ret = x509_check_signature(cert->pub, cert); /* self-signed */
-		if (ret < 0)
-			goto error_free_cert;
-	} else if (!prep->trusted) {
-		ret = x509_validate_trust(cert, get_system_trusted_keyring());
-		if (ret)
-			ret = x509_validate_trust(cert, get_ima_mok_keyring());
-		if (!ret)
-			prep->trusted = 1;
+	if (cert->unsupported_sig) {
+		public_key_signature_free(cert->sig);
+		cert->sig = NULL;
+	} else {
+		pr_devel("Cert Signature: %s + %s\n",
+			 cert->sig->pkey_algo, cert->sig->hash_algo);
 	}
 
 	/* Propose a description */
@@ -353,6 +218,7 @@
 	prep->payload.data[asym_subtype] = &public_key_subtype;
 	prep->payload.data[asym_key_ids] = kids;
 	prep->payload.data[asym_crypto] = cert->pub;
+	prep->payload.data[asym_auth] = cert->sig;
 	prep->description = desc;
 	prep->quotalen = 100;
 
@@ -360,6 +226,7 @@
 	cert->pub = NULL;
 	cert->id = NULL;
 	cert->skid = NULL;
+	cert->sig = NULL;
 	desc = NULL;
 	ret = 0;
 
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 1b86310..0a3538f 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -592,8 +592,10 @@
  ******************************************************************/
 
 #if defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_HMAC)
-static int drbg_kcapi_hash(struct drbg_state *drbg, const unsigned char *key,
-			   unsigned char *outval, const struct list_head *in);
+static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval,
+			   const struct list_head *in);
+static void drbg_kcapi_hmacsetkey(struct drbg_state *drbg,
+				  const unsigned char *key);
 static int drbg_init_hash_kernel(struct drbg_state *drbg);
 static int drbg_fini_hash_kernel(struct drbg_state *drbg);
 #endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */
@@ -619,9 +621,11 @@
 	LIST_HEAD(seedlist);
 	LIST_HEAD(vdatalist);
 
-	if (!reseed)
+	if (!reseed) {
 		/* 10.1.2.3 step 2 -- memset(0) of C is implicit with kzalloc */
 		memset(drbg->V, 1, drbg_statelen(drbg));
+		drbg_kcapi_hmacsetkey(drbg, drbg->C);
+	}
 
 	drbg_string_fill(&seed1, drbg->V, drbg_statelen(drbg));
 	list_add_tail(&seed1.list, &seedlist);
@@ -641,12 +645,13 @@
 			prefix = DRBG_PREFIX1;
 		/* 10.1.2.2 step 1 and 4 -- concatenation and HMAC for key */
 		seed2.buf = &prefix;
-		ret = drbg_kcapi_hash(drbg, drbg->C, drbg->C, &seedlist);
+		ret = drbg_kcapi_hash(drbg, drbg->C, &seedlist);
 		if (ret)
 			return ret;
+		drbg_kcapi_hmacsetkey(drbg, drbg->C);
 
 		/* 10.1.2.2 step 2 and 5 -- HMAC for V */
-		ret = drbg_kcapi_hash(drbg, drbg->C, drbg->V, &vdatalist);
+		ret = drbg_kcapi_hash(drbg, drbg->V, &vdatalist);
 		if (ret)
 			return ret;
 
@@ -681,7 +686,7 @@
 	while (len < buflen) {
 		unsigned int outlen = 0;
 		/* 10.1.2.5 step 4.1 */
-		ret = drbg_kcapi_hash(drbg, drbg->C, drbg->V, &datalist);
+		ret = drbg_kcapi_hash(drbg, drbg->V, &datalist);
 		if (ret)
 			return ret;
 		outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
@@ -796,7 +801,7 @@
 	while (len < outlen) {
 		short blocklen = 0;
 		/* 10.4.1 step 4.1 */
-		ret = drbg_kcapi_hash(drbg, NULL, tmp, entropylist);
+		ret = drbg_kcapi_hash(drbg, tmp, entropylist);
 		if (ret)
 			goto out;
 		/* 10.4.1 step 4.2 */
@@ -874,7 +879,7 @@
 	list_add_tail(&data1.list, &datalist);
 	list_add_tail(&data2.list, &datalist);
 	list_splice_tail(addtl, &datalist);
-	ret = drbg_kcapi_hash(drbg, NULL, drbg->scratchpad, &datalist);
+	ret = drbg_kcapi_hash(drbg, drbg->scratchpad, &datalist);
 	if (ret)
 		goto out;
 
@@ -907,7 +912,7 @@
 	while (len < buflen) {
 		unsigned int outlen = 0;
 		/* 10.1.1.4 step hashgen 4.1 */
-		ret = drbg_kcapi_hash(drbg, NULL, dst, &datalist);
+		ret = drbg_kcapi_hash(drbg, dst, &datalist);
 		if (ret) {
 			len = ret;
 			goto out;
@@ -956,7 +961,7 @@
 	list_add_tail(&data1.list, &datalist);
 	drbg_string_fill(&data2, drbg->V, drbg_statelen(drbg));
 	list_add_tail(&data2.list, &datalist);
-	ret = drbg_kcapi_hash(drbg, NULL, drbg->scratchpad, &datalist);
+	ret = drbg_kcapi_hash(drbg, drbg->scratchpad, &datalist);
 	if (ret) {
 		len = ret;
 		goto out;
@@ -1600,14 +1605,20 @@
 	return 0;
 }
 
-static int drbg_kcapi_hash(struct drbg_state *drbg, const unsigned char *key,
-			   unsigned char *outval, const struct list_head *in)
+static void drbg_kcapi_hmacsetkey(struct drbg_state *drbg,
+				  const unsigned char *key)
+{
+	struct sdesc *sdesc = (struct sdesc *)drbg->priv_data;
+
+	crypto_shash_setkey(sdesc->shash.tfm, key, drbg_statelen(drbg));
+}
+
+static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval,
+			   const struct list_head *in)
 {
 	struct sdesc *sdesc = (struct sdesc *)drbg->priv_data;
 	struct drbg_string *input = NULL;
 
-	if (key)
-		crypto_shash_setkey(sdesc->shash.tfm, key, drbg_statelen(drbg));
 	crypto_shash_init(&sdesc->shash);
 	list_for_each_entry(input, in, list)
 		crypto_shash_update(&sdesc->shash, input->buf, input->len);
diff --git a/crypto/lzo.c b/crypto/lzo.c
index 4b3e925..c3f3dd9 100644
--- a/crypto/lzo.c
+++ b/crypto/lzo.c
@@ -32,7 +32,7 @@
 	struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
 
 	ctx->lzo_comp_mem = kmalloc(LZO1X_MEM_COMPRESS,
-				    GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+				    GFP_KERNEL | __GFP_NOWARN);
 	if (!ctx->lzo_comp_mem)
 		ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS);
 	if (!ctx->lzo_comp_mem)
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 1cea67d..ead8dc0 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -387,16 +387,16 @@
 	req_ctx->child_req.src = req->src;
 	req_ctx->child_req.src_len = req->src_len;
 	req_ctx->child_req.dst = req_ctx->out_sg;
-	req_ctx->child_req.dst_len = ctx->key_size - 1;
+	req_ctx->child_req.dst_len = ctx->key_size ;
 
-	req_ctx->out_buf = kmalloc(ctx->key_size - 1,
+	req_ctx->out_buf = kmalloc(ctx->key_size,
 			(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
 			GFP_KERNEL : GFP_ATOMIC);
 	if (!req_ctx->out_buf)
 		return -ENOMEM;
 
 	pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
-			ctx->key_size - 1, NULL);
+			    ctx->key_size, NULL);
 
 	akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
 	akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
@@ -595,16 +595,16 @@
 	req_ctx->child_req.src = req->src;
 	req_ctx->child_req.src_len = req->src_len;
 	req_ctx->child_req.dst = req_ctx->out_sg;
-	req_ctx->child_req.dst_len = ctx->key_size - 1;
+	req_ctx->child_req.dst_len = ctx->key_size;
 
-	req_ctx->out_buf = kmalloc(ctx->key_size - 1,
+	req_ctx->out_buf = kmalloc(ctx->key_size,
 			(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
 			GFP_KERNEL : GFP_ATOMIC);
 	if (!req_ctx->out_buf)
 		return -ENOMEM;
 
 	pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
-			ctx->key_size - 1, NULL);
+			    ctx->key_size, NULL);
 
 	akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
 	akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index b86883a..c727fb0 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -35,6 +35,10 @@
 
 #include "internal.h"
 
+static bool notests;
+module_param(notests, bool, 0644);
+MODULE_PARM_DESC(notests, "disable crypto self-tests");
+
 #ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
 
 /* a perfect nop */
@@ -1776,6 +1780,7 @@
 static int do_test_rsa(struct crypto_akcipher *tfm,
 		       struct akcipher_testvec *vecs)
 {
+	char *xbuf[XBUFSIZE];
 	struct akcipher_request *req;
 	void *outbuf_enc = NULL;
 	void *outbuf_dec = NULL;
@@ -1784,9 +1789,12 @@
 	int err = -ENOMEM;
 	struct scatterlist src, dst, src_tab[2];
 
+	if (testmgr_alloc_buf(xbuf))
+		return err;
+
 	req = akcipher_request_alloc(tfm, GFP_KERNEL);
 	if (!req)
-		return err;
+		goto free_xbuf;
 
 	init_completion(&result.completion);
 
@@ -1804,9 +1812,14 @@
 	if (!outbuf_enc)
 		goto free_req;
 
+	if (WARN_ON(vecs->m_size > PAGE_SIZE))
+		goto free_all;
+
+	memcpy(xbuf[0], vecs->m, vecs->m_size);
+
 	sg_init_table(src_tab, 2);
-	sg_set_buf(&src_tab[0], vecs->m, 8);
-	sg_set_buf(&src_tab[1], vecs->m + 8, vecs->m_size - 8);
+	sg_set_buf(&src_tab[0], xbuf[0], 8);
+	sg_set_buf(&src_tab[1], xbuf[0] + 8, vecs->m_size - 8);
 	sg_init_one(&dst, outbuf_enc, out_len_max);
 	akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size,
 				   out_len_max);
@@ -1825,7 +1838,7 @@
 		goto free_all;
 	}
 	/* verify that encrypted message is equal to expected */
-	if (memcmp(vecs->c, sg_virt(req->dst), vecs->c_size)) {
+	if (memcmp(vecs->c, outbuf_enc, vecs->c_size)) {
 		pr_err("alg: rsa: encrypt test failed. Invalid output\n");
 		err = -EINVAL;
 		goto free_all;
@@ -1840,7 +1853,13 @@
 		err = -ENOMEM;
 		goto free_all;
 	}
-	sg_init_one(&src, vecs->c, vecs->c_size);
+
+	if (WARN_ON(vecs->c_size > PAGE_SIZE))
+		goto free_all;
+
+	memcpy(xbuf[0], vecs->c, vecs->c_size);
+
+	sg_init_one(&src, xbuf[0], vecs->c_size);
 	sg_init_one(&dst, outbuf_dec, out_len_max);
 	init_completion(&result.completion);
 	akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max);
@@ -1867,6 +1886,8 @@
 	kfree(outbuf_enc);
 free_req:
 	akcipher_request_free(req);
+free_xbuf:
+	testmgr_free_buf(xbuf);
 	return err;
 }
 
@@ -3868,6 +3889,11 @@
 	int j;
 	int rc;
 
+	if (!fips_enabled && notests) {
+		printk_once(KERN_INFO "alg: self-tests disabled\n");
+		return 0;
+	}
+
 	alg_test_descs_check_order();
 
 	if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) {
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 82b96ee..b7e2e77 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -5,10 +5,10 @@
 menuconfig ACPI
 	bool "ACPI (Advanced Configuration and Power Interface) Support"
 	depends on !IA64_HP_SIM
-	depends on IA64 || X86 || (ARM64 && EXPERT)
+	depends on IA64 || X86 || ARM64
 	depends on PCI
 	select PNP
-	default y
+	default y if (IA64 || X86)
 	help
 	  Advanced Configuration and Power Interface (ACPI) support for 
 	  Linux requires an ACPI-compliant platform (hardware/firmware),
@@ -311,12 +311,12 @@
 	bool
 	default ACPI_CUSTOM_DSDT_FILE != ""
 
-config ACPI_INITRD_TABLE_OVERRIDE
-	bool "ACPI tables override via initrd"
+config ACPI_TABLE_UPGRADE
+	bool "Allow upgrading ACPI tables via initrd"
 	depends on BLK_DEV_INITRD && X86
-	default n
+	default y
 	help
-	  This option provides functionality to override arbitrary ACPI tables
+	  This option provides functionality to upgrade arbitrary ACPI tables
 	  via initrd. No functional change if no ACPI tables are passed via
 	  initrd, therefore it's safe to say Y.
 	  See Documentation/acpi/initrd_table_override.txt for details
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index edeb2d1..251ce85 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -18,7 +18,7 @@
 					acpica/
 
 # All the builtin files are in the "acpi." module_param namespace.
-acpi-y				+= osl.o utils.o reboot.o
+acpi-y				+= osi.o osl.o utils.o reboot.o
 acpi-y				+= nvs.o
 
 # Power management related files
@@ -47,6 +47,7 @@
 acpi-y				+= int340x_thermal.o
 acpi-y				+= power.o
 acpi-y				+= event.o
+acpi-$(CONFIG_ACPI_REDUCED_HARDWARE_ONLY) += evged.o
 acpi-y				+= sysfs.o
 acpi-y				+= property.o
 acpi-$(CONFIG_X86)		+= acpi_cmos_rtc.o
diff --git a/drivers/acpi/acpi_amba.c b/drivers/acpi/acpi_amba.c
index 2a61b54..7f77c07 100644
--- a/drivers/acpi/acpi_amba.c
+++ b/drivers/acpi/acpi_amba.c
@@ -35,8 +35,7 @@
 	if (amba_dummy_clk)
 		return;
 
-	amba_dummy_clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL,
-						CLK_IS_ROOT, 0);
+	amba_dummy_clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, 0, 0);
 	clk_register_clkdev(amba_dummy_clk, "apb_pclk", NULL);
 }
 
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index f245bf3..1daf9c4 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -62,8 +62,7 @@
 	if (dev_desc->fixed_clk_rate) {
 		clk = clk_register_fixed_rate(&pdata->adev->dev,
 					dev_name(&pdata->adev->dev),
-					NULL, CLK_IS_ROOT,
-					dev_desc->fixed_clk_rate);
+					NULL, 0, dev_desc->fixed_clk_rate);
 		clk_register_clkdev(clk, NULL, dev_name(&pdata->adev->dev));
 		pdata->clk = clk;
 	}
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index b5e54f2..0d92d0f 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -491,6 +491,58 @@
 }
 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
 
+#ifdef CONFIG_X86
+static bool acpi_hwp_native_thermal_lvt_set;
+static acpi_status __init acpi_hwp_native_thermal_lvt_osc(acpi_handle handle,
+							  u32 lvl,
+							  void *context,
+							  void **rv)
+{
+	u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953";
+	u32 capbuf[2];
+	struct acpi_osc_context osc_context = {
+		.uuid_str = sb_uuid_str,
+		.rev = 1,
+		.cap.length = 8,
+		.cap.pointer = capbuf,
+	};
+
+	if (acpi_hwp_native_thermal_lvt_set)
+		return AE_CTRL_TERMINATE;
+
+	capbuf[0] = 0x0000;
+	capbuf[1] = 0x1000; /* set bit 12 */
+
+	if (ACPI_SUCCESS(acpi_run_osc(handle, &osc_context))) {
+		if (osc_context.ret.pointer && osc_context.ret.length > 1) {
+			u32 *capbuf_ret = osc_context.ret.pointer;
+
+			if (capbuf_ret[1] & 0x1000) {
+				acpi_handle_info(handle,
+					"_OSC native thermal LVT Acked\n");
+				acpi_hwp_native_thermal_lvt_set = true;
+			}
+		}
+		kfree(osc_context.ret.pointer);
+	}
+
+	return AE_OK;
+}
+
+void __init acpi_early_processor_osc(void)
+{
+	if (boot_cpu_has(X86_FEATURE_HWP)) {
+		acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
+				    ACPI_UINT32_MAX,
+				    acpi_hwp_native_thermal_lvt_osc,
+				    NULL, NULL, NULL);
+		acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID,
+				 acpi_hwp_native_thermal_lvt_osc,
+				 NULL, NULL);
+	}
+}
+#endif
+
 /*
  * The following ACPI IDs are known to be suitable for representing as
  * processor devices.
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 4361bc9..3d5b8a0 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -191,19 +191,6 @@
 	u8 _DDC:1;		/* Return the EDID for this device */
 };
 
-struct acpi_video_brightness_flags {
-	u8 _BCL_no_ac_battery_levels:1;	/* no AC/Battery levels in _BCL */
-	u8 _BCL_reversed:1;		/* _BCL package is in a reversed order */
-	u8 _BQC_use_index:1;		/* _BQC returns an index value */
-};
-
-struct acpi_video_device_brightness {
-	int curr;
-	int count;
-	int *levels;
-	struct acpi_video_brightness_flags flags;
-};
-
 struct acpi_video_device {
 	unsigned long device_id;
 	struct acpi_video_device_flags flags;
@@ -325,7 +312,7 @@
  */
 
 static int
-acpi_video_device_lcd_query_levels(struct acpi_video_device *device,
+acpi_video_device_lcd_query_levels(acpi_handle handle,
 				   union acpi_object **levels)
 {
 	int status;
@@ -335,7 +322,7 @@
 
 	*levels = NULL;
 
-	status = acpi_evaluate_object(device->dev->handle, "_BCL", NULL, &buffer);
+	status = acpi_evaluate_object(handle, "_BCL", NULL, &buffer);
 	if (!ACPI_SUCCESS(status))
 		return status;
 	obj = (union acpi_object *)buffer.pointer;
@@ -766,36 +753,28 @@
 	return 0;
 }
 
-
-/*
- *  Arg:
- *	device	: video output device (LCD, CRT, ..)
- *
- *  Return Value:
- *	Maximum brightness level
- *
- *  Allocate and initialize device->brightness.
- */
-
-static int
-acpi_video_init_brightness(struct acpi_video_device *device)
+int acpi_video_get_levels(struct acpi_device *device,
+			  struct acpi_video_device_brightness **dev_br)
 {
 	union acpi_object *obj = NULL;
 	int i, max_level = 0, count = 0, level_ac_battery = 0;
-	unsigned long long level, level_old;
 	union acpi_object *o;
 	struct acpi_video_device_brightness *br = NULL;
-	int result = -EINVAL;
+	int result = 0;
 	u32 value;
 
-	if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) {
+	if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device->handle,
+								&obj))) {
 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available "
 						"LCD brightness level\n"));
+		result = -ENODEV;
 		goto out;
 	}
 
-	if (obj->package.count < 2)
+	if (obj->package.count < 2) {
+		result = -EINVAL;
 		goto out;
+	}
 
 	br = kzalloc(sizeof(*br), GFP_KERNEL);
 	if (!br) {
@@ -861,6 +840,38 @@
 			    "Found unordered _BCL package"));
 
 	br->count = count;
+	*dev_br = br;
+
+out:
+	kfree(obj);
+	return result;
+out_free:
+	kfree(br);
+	goto out;
+}
+EXPORT_SYMBOL(acpi_video_get_levels);
+
+/*
+ *  Arg:
+ *	device	: video output device (LCD, CRT, ..)
+ *
+ *  Return Value:
+ *	Maximum brightness level
+ *
+ *  Allocate and initialize device->brightness.
+ */
+
+static int
+acpi_video_init_brightness(struct acpi_video_device *device)
+{
+	int i, max_level = 0;
+	unsigned long long level, level_old;
+	struct acpi_video_device_brightness *br = NULL;
+	int result = -EINVAL;
+
+	result = acpi_video_get_levels(device->dev, &br);
+	if (result)
+		return result;
 	device->brightness = br;
 
 	/* _BQC uses INDEX while _BCL uses VALUE in some laptops */
@@ -903,17 +914,13 @@
 		goto out_free_levels;
 
 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-			  "found %d brightness levels\n", count - 2));
-	kfree(obj);
-	return result;
+			  "found %d brightness levels\n", br->count - 2));
+	return 0;
 
 out_free_levels:
 	kfree(br->levels);
-out_free:
 	kfree(br);
-out:
 	device->brightness = NULL;
-	kfree(obj);
 	return result;
 }
 
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index f682374..227bb7b 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -43,6 +43,7 @@
 	evxfregn.o
 
 acpi-y +=		\
+	exconcat.o	\
 	exconfig.o	\
 	exconvrt.o	\
 	excreate.o	\
@@ -149,6 +150,7 @@
 acpi-y +=		\
 	utaddress.o	\
 	utalloc.o	\
+	utascii.o	\
 	utbuffer.o	\
 	utcopy.o	\
 	utexcep.o	\
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 993af9e..f6404ea 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -53,7 +53,7 @@
 #define ACPI_DEBUG_BUFFER_SIZE  0x4000	/* 16K buffer for return objects */
 
 struct acpi_db_command_info {
-	char *name;		/* Command Name */
+	const char *name;	/* Command Name */
 	u8 min_args;		/* Minimum arguments required */
 };
 
@@ -64,7 +64,7 @@
 };
 
 struct acpi_db_argument_info {
-	char *name;		/* Argument Name */
+	const char *name;	/* Argument Name */
 };
 
 struct acpi_db_execute_walk {
@@ -196,7 +196,7 @@
 							     acpi_walk_state
 							     *walk_state))
 
- acpi_status acpi_db_display_all_methods(char *display_count_arg);
+acpi_status acpi_db_display_all_methods(char *display_count_arg);
 
 void acpi_db_display_arguments(void);
 
@@ -220,7 +220,7 @@
  * dbexec - debugger control method execution
  */
 void
-acpi_db_execute(char *name, char **args, acpi_object_type * types, u32 flags);
+acpi_db_execute(char *name, char **args, acpi_object_type *types, u32 flags);
 
 void
 acpi_db_create_execution_threads(char *num_threads_arg,
@@ -271,7 +271,7 @@
 acpi_status acpi_db_user_commands(void);
 
 char *acpi_db_get_next_token(char *string,
-			     char **next, acpi_object_type * return_type);
+			     char **next, acpi_object_type *return_type);
 
 /*
  * dbobject
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 010cf81..77af91c 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -72,6 +72,7 @@
 ACPI_HW_DEPENDENT_RETURN_OK(acpi_status
 			    acpi_ev_acquire_global_lock(u16 timeout))
 ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_ev_release_global_lock(void))
+
 acpi_status acpi_ev_remove_global_lock_handler(void);
 
 /*
@@ -198,8 +199,6 @@
 acpi_ev_detach_region(union acpi_operand_object *region_obj,
 		      u8 acpi_ns_is_locked);
 
-void acpi_ev_associate_reg_method(union acpi_operand_object *region_obj);
-
 void
 acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
 			    acpi_adr_space_type space_id, u32 function);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 51b073b..fded776 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -187,6 +187,8 @@
 extern const char *acpi_gbl_lowest_dstate_names[ACPI_NUM_sx_w_METHODS];
 extern const char *acpi_gbl_highest_dstate_names[ACPI_NUM_sx_d_METHODS];
 extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS];
+extern const char acpi_gbl_lower_hex_digits[];
+extern const char acpi_gbl_upper_hex_digits[];
 extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
 
 #ifdef ACPI_DBG_TRACK_ALLOCATIONS
@@ -361,6 +363,15 @@
 
 #endif				/* ACPI_DEBUGGER */
 
+#if defined (ACPI_DISASSEMBLER) || defined (ACPI_ASL_COMPILER)
+
+ACPI_GLOBAL(const char, *acpi_gbl_pld_panel_list[]);
+ACPI_GLOBAL(const char, *acpi_gbl_pld_vertical_position_list[]);
+ACPI_GLOBAL(const char, *acpi_gbl_pld_horizontal_position_list[]);
+ACPI_GLOBAL(const char, *acpi_gbl_pld_shape_list[]);
+
+#endif
+
 /*****************************************************************************
  *
  * Application globals
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index bae1a35..7ead235 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -67,7 +67,7 @@
 typedef const struct acpi_exdump_info {
 	u8 opcode;
 	u8 offset;
-	char *name;
+	const char *name;
 
 } acpi_exdump_info;
 
@@ -370,7 +370,7 @@
 acpi_status
 acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
 			 union acpi_operand_object *operand,
-			 acpi_object_type * return_type,
+			 acpi_object_type *return_type,
 			 union acpi_operand_object **return_desc);
 
 /*
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 9562a10..13331d7 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -278,7 +278,7 @@
 };
 
 typedef
-acpi_status(*acpi_internal_method) (struct acpi_walk_state * walk_state);
+acpi_status (*acpi_internal_method) (struct acpi_walk_state * walk_state);
 
 /*
  * Bitmapped ACPI types. Used internally only
@@ -395,11 +395,12 @@
 
 /* Return object auto-repair info */
 
-typedef acpi_status(*acpi_object_converter) (struct acpi_namespace_node * scope,
-					     union acpi_operand_object
-					     *original_object,
-					     union acpi_operand_object
-					     **converted_object);
+typedef acpi_status (*acpi_object_converter) (struct acpi_namespace_node *
+					      scope,
+					      union acpi_operand_object *
+					      original_object,
+					      union acpi_operand_object **
+					      converted_object);
 
 struct acpi_simple_repair_info {
 	char name[ACPI_NAME_SIZE];
@@ -539,10 +540,10 @@
 	struct acpi_namespace_node *gpe_device;
 };
 
-typedef acpi_status(*acpi_gpe_callback) (struct acpi_gpe_xrupt_info *
-					 gpe_xrupt_info,
-					 struct acpi_gpe_block_info *gpe_block,
-					 void *context);
+typedef acpi_status (*acpi_gpe_callback) (struct acpi_gpe_xrupt_info *
+					  gpe_xrupt_info,
+					  struct acpi_gpe_block_info *
+					  gpe_block, void *context);
 
 /* Information about each particular fixed event */
 
@@ -657,10 +658,11 @@
 };
 
 typedef
-acpi_status(*acpi_parse_downwards) (struct acpi_walk_state * walk_state,
-				    union acpi_parse_object ** out_op);
+acpi_status (*acpi_parse_downwards) (struct acpi_walk_state * walk_state,
+				     union acpi_parse_object ** out_op);
 
-typedef acpi_status(*acpi_parse_upwards) (struct acpi_walk_state * walk_state);
+typedef
+acpi_status (*acpi_parse_upwards) (struct acpi_walk_state * walk_state);
 
 /* Global handlers for AML Notifies */
 
@@ -700,7 +702,8 @@
  *
  ****************************************************************************/
 
-typedef acpi_status(*acpi_execute_op) (struct acpi_walk_state * walk_state);
+typedef
+acpi_status (*acpi_execute_op) (struct acpi_walk_state * walk_state);
 
 /* Address Range info block */
 
@@ -853,24 +856,24 @@
 
 /* Parse object flags */
 
-#define ACPI_PARSEOP_GENERIC            0x01
-#define ACPI_PARSEOP_NAMED              0x02
-#define ACPI_PARSEOP_DEFERRED           0x04
-#define ACPI_PARSEOP_BYTELIST           0x08
-#define ACPI_PARSEOP_IN_STACK           0x10
-#define ACPI_PARSEOP_TARGET             0x20
-#define ACPI_PARSEOP_IN_CACHE           0x80
+#define ACPI_PARSEOP_GENERIC                0x01
+#define ACPI_PARSEOP_NAMED_OBJECT           0x02
+#define ACPI_PARSEOP_DEFERRED               0x04
+#define ACPI_PARSEOP_BYTELIST               0x08
+#define ACPI_PARSEOP_IN_STACK               0x10
+#define ACPI_PARSEOP_TARGET                 0x20
+#define ACPI_PARSEOP_IN_CACHE               0x80
 
 /* Parse object disasm_flags */
 
-#define ACPI_PARSEOP_IGNORE             0x01
-#define ACPI_PARSEOP_PARAMLIST          0x02
-#define ACPI_PARSEOP_EMPTY_TERMLIST     0x04
-#define ACPI_PARSEOP_PREDEF_CHECKED     0x08
-#define ACPI_PARSEOP_CLOSING_PAREN      0x10
-#define ACPI_PARSEOP_COMPOUND           0x20
-#define ACPI_PARSEOP_ASSIGNMENT         0x40
-#define ACPI_PARSEOP_ELSEIF             0x80
+#define ACPI_PARSEOP_IGNORE                 0x01
+#define ACPI_PARSEOP_PARAMETER_LIST         0x02
+#define ACPI_PARSEOP_EMPTY_TERMLIST         0x04
+#define ACPI_PARSEOP_PREDEFINED_CHECKED     0x08
+#define ACPI_PARSEOP_CLOSING_PAREN          0x10
+#define ACPI_PARSEOP_COMPOUND_ASSIGNMENT    0x20
+#define ACPI_PARSEOP_ASSIGNMENT             0x40
+#define ACPI_PARSEOP_ELSEIF                 0x80
 
 /*****************************************************************************
  *
@@ -1096,6 +1099,7 @@
 #define ACPI_EXT_ORIGIN_FROM_FILE           0x02	/* External came from a file */
 #define ACPI_EXT_INTERNAL_PATH_ALLOCATED    0x04	/* Deallocate internal path on completion */
 #define ACPI_EXT_EXTERNAL_EMITTED           0x08	/* External() statement has been emitted */
+#define ACPI_EXT_ORIGIN_FROM_OPCODE         0x10	/* External came from a External() opcode */
 
 struct acpi_external_file {
 	char *path;
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 411c18b..a3b9543 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -260,14 +260,31 @@
 
 #define ACPI_IS_MISALIGNED(value)           (((acpi_size) value) & (sizeof(acpi_size)-1))
 
+/* Generic (power-of-two) rounding */
+
+#define ACPI_IS_ALIGNED(a, s)               (((a) & ((s) - 1)) == 0)
+#define ACPI_IS_POWER_OF_TWO(a)             ACPI_IS_ALIGNED(a, a)
+
 /*
  * Bitmask creation
  * Bit positions start at zero.
  * MASK_BITS_ABOVE creates a mask starting AT the position and above
  * MASK_BITS_BELOW creates a mask starting one bit BELOW the position
+ * MASK_BITS_ABOVE/BELOW accpets a bit offset to create a mask
+ * MASK_BITS_ABOVE/BELOW_32/64 accpets a bit width to create a mask
+ * Note: The ACPI_INTEGER_BIT_SIZE check is used to bypass compiler
+ * differences with the shift operator
  */
 #define ACPI_MASK_BITS_ABOVE(position)      (~((ACPI_UINT64_MAX) << ((u32) (position))))
 #define ACPI_MASK_BITS_BELOW(position)      ((ACPI_UINT64_MAX) << ((u32) (position)))
+#define ACPI_MASK_BITS_ABOVE_32(width)      ((u32) ACPI_MASK_BITS_ABOVE(width))
+#define ACPI_MASK_BITS_BELOW_32(width)      ((u32) ACPI_MASK_BITS_BELOW(width))
+#define ACPI_MASK_BITS_ABOVE_64(width)      ((width) == ACPI_INTEGER_BIT_SIZE ? \
+												ACPI_UINT64_MAX : \
+												ACPI_MASK_BITS_ABOVE(width))
+#define ACPI_MASK_BITS_BELOW_64(width)      ((width) == ACPI_INTEGER_BIT_SIZE ? \
+												(u64) 0 : \
+												ACPI_MASK_BITS_BELOW(width))
 
 /* Bitfields within ACPI registers */
 
@@ -283,10 +300,10 @@
 /* Generic bitfield macros and masks */
 
 #define ACPI_GET_BITS(source_ptr, position, mask) \
-	((*source_ptr >> position) & mask)
+	((*(source_ptr) >> (position)) & (mask))
 
 #define ACPI_SET_BITS(target_ptr, position, mask, value) \
-	(*target_ptr |= ((value & mask) << position))
+	(*(target_ptr) |= (((value) & (mask)) << (position)))
 
 #define ACPI_1BIT_MASK      0x00000001
 #define ACPI_2BIT_MASK      0x00000003
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 022d69c..f33a4ba 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -206,9 +206,10 @@
 void acpi_ns_dump_entry(acpi_handle handle, u32 debug_level);
 
 void
-acpi_ns_dump_pathname(acpi_handle handle, char *msg, u32 level, u32 component);
+acpi_ns_dump_pathname(acpi_handle handle,
+		      const char *msg, u32 level, u32 component);
 
-void acpi_ns_print_pathname(u32 num_segments, char *pathname);
+void acpi_ns_print_pathname(u32 num_segments, const char *pathname);
 
 acpi_status
 acpi_ns_dump_one_object(acpi_handle obj_handle,
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index 7da639d..fc30577 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -139,7 +139,7 @@
  */
 const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode);
 
-char *acpi_ps_get_opcode_name(u16 opcode);
+const char *acpi_ps_get_opcode_name(u16 opcode);
 
 u8 acpi_ps_get_argument_count(u32 op_type);
 
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 5faeab4..888440b 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -129,7 +129,8 @@
 	ACPI_PTYPE2_REV_FIXED = 9,
 	ACPI_PTYPE2_FIX_VAR = 10,
 	ACPI_PTYPE2_VAR_VAR = 11,
-	ACPI_PTYPE2_UUID_PAIR = 12
+	ACPI_PTYPE2_UUID_PAIR = 12,
+	ACPI_PTYPE_CUSTOM = 13
 };
 
 /* Support macros for users of the predefined info table */
@@ -340,7 +341,7 @@
 
 	{{"_BIX", METHOD_0ARGS,
 	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Fixed-length (16 Int),(4 Str) */
-	PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16,
+	PACKAGE_INFO(ACPI_PTYPE_CUSTOM, ACPI_RTYPE_INTEGER, 16,
 		     ACPI_RTYPE_STRING, 4, 0),
 
 	{{"_BLT",
@@ -523,6 +524,9 @@
 	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Fixed-length (4 Int) */
 	PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0, 0, 0),
 
+	{{"_FIT", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},	/* ACPI 6.0 */
+
 	{{"_FIX", METHOD_0ARGS,
 	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Ints) */
 	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0, 0, 0, 0),
@@ -1053,6 +1057,12 @@
 	  METHOD_RETURNS(ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING |
 			 ACPI_RTYPE_BUFFER)}},
 
+	{{"_WPC", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},	/* ACPI 6.1 */
+
+	{{"_WPP", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},	/* ACPI 6.1 */
+
 	PACKAGE_INFO(0, 0, 0, 0, 0, 0)	/* Table terminator */
 };
 #else
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index 5dd58be..63da1e3 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -124,7 +124,7 @@
 typedef const struct acpi_rsdump_info {
 	u8 opcode;
 	u8 offset;
-	char *name;
+	const char *name;
 	const char **pointer;
 
 } acpi_rsdump_info;
@@ -209,7 +209,7 @@
 
 acpi_status
 acpi_rs_get_method_data(acpi_handle handle,
-			char *path, struct acpi_buffer *ret_buffer);
+			const char *path, struct acpi_buffer *ret_buffer);
 
 acpi_status
 acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
@@ -223,16 +223,16 @@
  * rscalc
  */
 acpi_status
-acpi_rs_get_list_length(u8 * aml_buffer,
-			u32 aml_buffer_length, acpi_size * size_needed);
+acpi_rs_get_list_length(u8 *aml_buffer,
+			u32 aml_buffer_length, acpi_size *size_needed);
 
 acpi_status
 acpi_rs_get_aml_length(struct acpi_resource *resource_list,
-		       acpi_size resource_list_size, acpi_size * size_needed);
+		       acpi_size resource_list_size, acpi_size *size_needed);
 
 acpi_status
 acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
-				     acpi_size * buffer_size_needed);
+				     acpi_size *buffer_size_needed);
 
 acpi_status
 acpi_rs_convert_aml_to_resources(u8 * aml,
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index b3b386e..6235642 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -184,7 +184,7 @@
 	/* The first 3 elements are passed by the caller to acpi_ns_evaluate */
 
 	struct acpi_namespace_node *prefix_node;	/* Input: starting node */
-	char *relative_pathname;	/* Input: path relative to prefix_node */
+	const char *relative_pathname;	/* Input: path relative to prefix_node */
 	union acpi_operand_object **parameters;	/* Input: argument list */
 
 	struct acpi_namespace_node *node;	/* Resolved node (prefix_node:relative_pathname) */
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 848ad3a..cd5a135 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -161,8 +161,6 @@
 
 acpi_status acpi_tb_parse_root_table(acpi_physical_address rsdp_address);
 
-u8 acpi_is_valid_signature(char *signature);
-
 /*
  * tbxfload
  */
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index e43ab6f..a7dbb2b 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -136,16 +136,16 @@
 #define ACPI_SMALL_VARIABLE_LENGTH      3
 
 typedef
-acpi_status(*acpi_walk_aml_callback) (u8 *aml,
-				      u32 length,
-				      u32 offset,
-				      u8 resource_index, void **context);
+acpi_status (*acpi_walk_aml_callback) (u8 *aml,
+				       u32 length,
+				       u32 offset,
+				       u8 resource_index, void **context);
 
 typedef
-acpi_status(*acpi_pkg_callback) (u8 object_type,
-				 union acpi_operand_object *source_object,
-				 union acpi_generic_state * state,
-				 void *context);
+acpi_status (*acpi_pkg_callback) (u8 object_type,
+				  union acpi_operand_object * source_object,
+				  union acpi_generic_state * state,
+				  void *context);
 
 struct acpi_pkg_info {
 	u8 *free_space;
@@ -167,6 +167,15 @@
 #define DB_QWORD_DISPLAY    8
 
 /*
+ * utascii - ASCII utilities
+ */
+u8 acpi_ut_valid_nameseg(char *signature);
+
+u8 acpi_ut_valid_name_char(char character, u32 position);
+
+void acpi_ut_check_and_repair_ascii(u8 *name, char *repaired_name, u32 count);
+
+/*
  * utnonansi - Non-ANSI C library functions
  */
 void acpi_ut_strupr(char *src_string);
@@ -175,7 +184,14 @@
 
 int acpi_ut_stricmp(char *string1, char *string2);
 
-acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer);
+acpi_status
+acpi_ut_strtoul64(char *string,
+		  u32 base, u32 max_integer_byte_width, u64 *ret_integer);
+
+/* Values for max_integer_byte_width above */
+
+#define ACPI_MAX32_BYTE_WIDTH       4
+#define ACPI_MAX64_BYTE_WIDTH       8
 
 /*
  * utglobal - Global data structures and procedures
@@ -266,7 +282,8 @@
 void
 acpi_ut_trace_ptr(u32 line_number,
 		  const char *function_name,
-		  const char *module_name, u32 component_id, void *pointer);
+		  const char *module_name,
+		  u32 component_id, const void *pointer);
 
 void
 acpi_ut_trace_u32(u32 line_number,
@@ -276,7 +293,8 @@
 void
 acpi_ut_trace_str(u32 line_number,
 		  const char *function_name,
-		  const char *module_name, u32 component_id, char *string);
+		  const char *module_name,
+		  u32 component_id, const char *string);
 
 void
 acpi_ut_exit(u32 line_number,
@@ -335,12 +353,12 @@
  */
 acpi_status
 acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
-			char *path,
+			const char *path,
 			u32 expected_return_btypes,
 			union acpi_operand_object **return_desc);
 
 acpi_status
-acpi_ut_evaluate_numeric_object(char *object_name,
+acpi_ut_evaluate_numeric_object(const char *object_name,
 				struct acpi_namespace_node *device_node,
 				u64 *value);
 
@@ -415,7 +433,7 @@
 union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size);
 
 acpi_status
-acpi_ut_get_object_size(union acpi_operand_object *obj, acpi_size * obj_length);
+acpi_ut_get_object_size(union acpi_operand_object *obj, acpi_size *obj_length);
 
 /*
  * utosi - Support for the _OSI predefined control method
@@ -526,15 +544,15 @@
 void
 acpi_ut_display_init_pathname(u8 type,
 			      struct acpi_namespace_node *obj_handle,
-			      char *path);
+			      const char *path);
 #endif
 
 /*
  * utownerid - Support for Table/Method Owner IDs
  */
-acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id);
+acpi_status acpi_ut_allocate_owner_id(acpi_owner_id *owner_id);
 
-void acpi_ut_release_owner_id(acpi_owner_id * owner_id);
+void acpi_ut_release_owner_id(acpi_owner_id *owner_id);
 
 /*
  * utresrc
@@ -570,10 +588,6 @@
 void ut_convert_backslashes(char *pathname);
 #endif
 
-u8 acpi_ut_valid_acpi_name(char *name);
-
-u8 acpi_ut_valid_acpi_char(char character, u32 position);
-
 void acpi_ut_repair_name(char *name);
 
 #if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
@@ -628,7 +642,7 @@
 void acpi_ut_dump_allocations(u32 component, const char *module);
 
 acpi_status
-acpi_ut_create_list(char *list_name,
+acpi_ut_create_list(const char *list_name,
 		    u16 object_size, struct acpi_memory_list **return_cache);
 
 #endif				/* ACPI_DBG_TRACK_ALLOCATIONS */
diff --git a/drivers/acpi/acpica/dbcmds.c b/drivers/acpi/acpica/dbcmds.c
index 772178c..62bd446 100644
--- a/drivers/acpi/acpica/dbcmds.c
+++ b/drivers/acpi/acpica/dbcmds.c
@@ -738,9 +738,9 @@
 	original_aml = return_buffer.pointer;
 
 	acpi_dm_compare_aml_resources(original_aml->buffer.pointer,
-				      (acpi_rsdesc_size) original_aml->buffer.
+				      (acpi_rsdesc_size)original_aml->buffer.
 				      length, new_aml.pointer,
-				      (acpi_rsdesc_size) new_aml.length);
+				      (acpi_rsdesc_size)new_aml.length);
 
 	/* Cleanup and exit */
 
diff --git a/drivers/acpi/acpica/dbconvert.c b/drivers/acpi/acpica/dbconvert.c
index 68f4e0f4..7cd07b2 100644
--- a/drivers/acpi/acpica/dbconvert.c
+++ b/drivers/acpi/acpica/dbconvert.c
@@ -194,7 +194,7 @@
  *
  ******************************************************************************/
 
-acpi_status acpi_db_convert_to_package(char *string, union acpi_object * object)
+acpi_status acpi_db_convert_to_package(char *string, union acpi_object *object)
 {
 	char *this;
 	char *next;
@@ -252,7 +252,7 @@
 
 acpi_status
 acpi_db_convert_to_object(acpi_object_type type,
-			  char *string, union acpi_object * object)
+			  char *string, union acpi_object *object)
 {
 	acpi_status status = AE_OK;
 
@@ -277,7 +277,9 @@
 	default:
 
 		object->type = ACPI_TYPE_INTEGER;
-		status = acpi_ut_strtoul64(string, 16, &object->integer.value);
+		status =
+		    acpi_ut_strtoul64(string, 16, acpi_gbl_integer_byte_width,
+				      &object->integer.value);
 		break;
 	}
 
diff --git a/drivers/acpi/acpica/dbexec.c b/drivers/acpi/acpica/dbexec.c
index c814855..12df291 100644
--- a/drivers/acpi/acpica/dbexec.c
+++ b/drivers/acpi/acpica/dbexec.c
@@ -361,7 +361,7 @@
  ******************************************************************************/
 
 void
-acpi_db_execute(char *name, char **args, acpi_object_type * types, u32 flags)
+acpi_db_execute(char *name, char **args, acpi_object_type *types, u32 flags)
 {
 	acpi_status status;
 	struct acpi_buffer return_obj;
diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
index 417c02a..7cd5d2e 100644
--- a/drivers/acpi/acpica/dbinput.c
+++ b/drivers/acpi/acpica/dbinput.c
@@ -57,12 +57,12 @@
 
 static u32 acpi_db_match_command(char *user_command);
 
-static void acpi_db_display_command_info(char *command, u8 display_all);
+static void acpi_db_display_command_info(const char *command, u8 display_all);
 
 static void acpi_db_display_help(char *command);
 
 static u8
-acpi_db_match_command_help(char *command,
+acpi_db_match_command_help(const char *command,
 			   const struct acpi_db_command_help *help);
 
 /*
@@ -348,7 +348,7 @@
  ******************************************************************************/
 
 static u8
-acpi_db_match_command_help(char *command,
+acpi_db_match_command_help(const char *command,
 			   const struct acpi_db_command_help *help)
 {
 	char *invocation = help->invocation;
@@ -402,7 +402,7 @@
  *
  ******************************************************************************/
 
-static void acpi_db_display_command_info(char *command, u8 display_all)
+static void acpi_db_display_command_info(const char *command, u8 display_all)
 {
 	const struct acpi_db_command_help *next;
 	u8 matched;
@@ -466,7 +466,7 @@
  ******************************************************************************/
 
 char *acpi_db_get_next_token(char *string,
-			     char **next, acpi_object_type * return_type)
+			     char **next, acpi_object_type *return_type)
 {
 	char *start;
 	u32 depth;
@@ -656,8 +656,9 @@
 	}
 
 	for (i = CMD_FIRST_VALID; acpi_gbl_db_commands[i].name; i++) {
-		if (strstr(acpi_gbl_db_commands[i].name, user_command) ==
-		    acpi_gbl_db_commands[i].name) {
+		if (strstr
+		    (ACPI_CAST_PTR(char, acpi_gbl_db_commands[i].name),
+		     user_command) == acpi_gbl_db_commands[i].name) {
 			return (i);
 		}
 	}
@@ -683,8 +684,8 @@
 
 acpi_status
 acpi_db_command_dispatch(char *input_buffer,
-			 struct acpi_walk_state * walk_state,
-			 union acpi_parse_object * op)
+			 struct acpi_walk_state *walk_state,
+			 union acpi_parse_object *op)
 {
 	u32 temp;
 	u32 command_index;
diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c
index 3c23b5a..8667f14 100644
--- a/drivers/acpi/acpica/dbnames.c
+++ b/drivers/acpi/acpica/dbnames.c
@@ -285,7 +285,7 @@
 	u32 max_depth = ACPI_UINT32_MAX;
 	acpi_owner_id owner_id;
 
-	owner_id = (acpi_owner_id) strtoul(owner_arg, NULL, 0);
+	owner_id = (acpi_owner_id)strtoul(owner_arg, NULL, 0);
 
 	/* Now we can check for the depth argument */
 
@@ -709,7 +709,7 @@
 		return (AE_OK);
 	}
 
-	if (!acpi_ut_valid_acpi_name(node->name.ascii)) {
+	if (!acpi_ut_valid_nameseg(node->name.ascii)) {
 		acpi_os_printf("Invalid AcpiName for Node %p\n", node);
 		return (AE_OK);
 	}
diff --git a/drivers/acpi/acpica/dbutils.c b/drivers/acpi/acpica/dbutils.c
index b37a2c7..ae80106 100644
--- a/drivers/acpi/acpica/dbutils.c
+++ b/drivers/acpi/acpica/dbutils.c
@@ -56,8 +56,6 @@
 void acpi_db_dump_buffer(u32 address);
 #endif
 
-static char *gbl_hex_to_ascii = "0123456789ABCDEF";
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_db_match_argument
@@ -82,8 +80,9 @@
 	}
 
 	for (i = 0; arguments[i].name; i++) {
-		if (strstr(arguments[i].name, user_argument) ==
-		    arguments[i].name) {
+		if (strstr(ACPI_CAST_PTR(char, arguments[i].name),
+			   ACPI_CAST_PTR(char,
+					 user_argument)) == arguments[i].name) {
 			return (i);
 		}
 	}
@@ -339,7 +338,7 @@
 	buffer[8] = '\0';
 
 	for (i = 7; i >= 0; i--) {
-		buffer[i] = gbl_hex_to_ascii[value & 0x0F];
+		buffer[i] = acpi_gbl_upper_hex_digits[value & 0x0F];
 		value = value >> 4;
 	}
 }
diff --git a/drivers/acpi/acpica/dbxface.c b/drivers/acpi/acpica/dbxface.c
index e94e0d8..124db23 100644
--- a/drivers/acpi/acpica/dbxface.c
+++ b/drivers/acpi/acpica/dbxface.c
@@ -162,8 +162,8 @@
  ******************************************************************************/
 
 acpi_status
-acpi_db_single_step(struct acpi_walk_state * walk_state,
-		    union acpi_parse_object * op, u32 opcode_class)
+acpi_db_single_step(struct acpi_walk_state *walk_state,
+		    union acpi_parse_object *op, u32 opcode_class)
 {
 	union acpi_parse_object *next;
 	acpi_status status = AE_OK;
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index c9a663f..4ddcbf1 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -163,8 +163,8 @@
  ******************************************************************************/
 
 acpi_status
-acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
-			    union acpi_parse_object * op)
+acpi_ds_exec_end_control_op(struct acpi_walk_state *walk_state,
+			    union acpi_parse_object *op)
 {
 	acpi_status status = AE_OK;
 	union acpi_generic_state *control_state;
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index 5aa1c5f..f1e6dcc 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -188,7 +188,7 @@
 
 acpi_status
 acpi_ds_initialize_objects(u32 table_index,
-			   struct acpi_namespace_node * start_node)
+			   struct acpi_namespace_node *start_node)
 {
 	acpi_status status;
 	struct acpi_init_walk_info info;
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 1982310..47c7b52 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -209,7 +209,7 @@
  ******************************************************************************/
 
 acpi_status
-acpi_ds_method_error(acpi_status status, struct acpi_walk_state * walk_state)
+acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
 {
 	u32 aml_offset;
 
@@ -428,6 +428,9 @@
 				obj_desc->method.mutex->mutex.
 				    original_sync_level =
 				    obj_desc->method.mutex->mutex.sync_level;
+
+				obj_desc->method.mutex->mutex.thread_id =
+				    acpi_os_get_thread_id();
 			}
 		}
 
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 8ca9416..f393de9 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -569,7 +569,7 @@
 					/* TBD: May only be temporary */
 
 					obj_desc =
-					    acpi_ut_create_string_object((acpi_size) name_length);
+					    acpi_ut_create_string_object((acpi_size)name_length);
 
 					strncpy(obj_desc->string.pointer,
 						name_string, name_length);
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index d1cedcf..fd34040 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -137,8 +137,8 @@
  ******************************************************************************/
 
 acpi_status
-acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
-		       union acpi_parse_object ** out_op)
+acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
+		       union acpi_parse_object **out_op)
 {
 	union acpi_parse_object *op;
 	struct acpi_namespace_node *node;
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 0bac6e1..762db3f 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -490,8 +490,8 @@
 
 			status =
 			    acpi_ds_create_index_field(op,
-						       (acpi_handle) arg->
-						       common.node, walk_state);
+						       (acpi_handle)arg->common.
+						       node, walk_state);
 			break;
 
 		case AML_BANK_FIELD_OP:
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 3a26ddb..e333869 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -143,8 +143,8 @@
  ******************************************************************************/
 
 acpi_status
-acpi_ds_result_push(union acpi_operand_object * object,
-		    struct acpi_walk_state * walk_state)
+acpi_ds_result_push(union acpi_operand_object *object,
+		    struct acpi_walk_state *walk_state)
 {
 	union acpi_generic_state *state;
 	acpi_status status;
@@ -307,7 +307,7 @@
  ******************************************************************************/
 
 acpi_status
-acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state)
+acpi_ds_obj_stack_push(void *object, struct acpi_walk_state *walk_state)
 {
 	ACPI_FUNCTION_NAME(ds_obj_stack_push);
 
@@ -354,7 +354,7 @@
  ******************************************************************************/
 
 acpi_status
-acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
+acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state *walk_state)
 {
 	u32 i;
 
@@ -411,7 +411,7 @@
 		return;
 	}
 
-	for (i = (s32) pop_count - 1; i >= 0; i--) {
+	for (i = (s32)pop_count - 1; i >= 0; i--) {
 		if (walk_state->num_operands == 0) {
 			return;
 		}
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index b47e62aaf..4b4949c 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -440,7 +440,7 @@
 
 				gpe_event_info =
 				    &gpe_block->
-				    event_info[((acpi_size) i *
+				    event_info[((acpi_size)i *
 						ACPI_GPE_REGISTER_WIDTH) + j];
 				gpe_number =
 				    j + gpe_register_info->base_gpe_number;
@@ -652,7 +652,7 @@
  *
  ******************************************************************************/
 
-acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info * gpe_event_info)
+acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
 {
 	acpi_status status;
 
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 447fa1c..d54014c 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -211,7 +211,7 @@
 
 	/* Allocate the GPE register information block */
 
-	gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->
+	gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size)gpe_block->
 						 register_count *
 						 sizeof(struct
 							acpi_gpe_register_info));
@@ -225,7 +225,7 @@
 	 * Allocate the GPE event_info block. There are eight distinct GPEs
 	 * per register. Initialization to zeros is sufficient.
 	 */
-	gpe_event_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->gpe_count *
+	gpe_event_info = ACPI_ALLOCATE_ZEROED((acpi_size)gpe_block->gpe_count *
 					      sizeof(struct
 						     acpi_gpe_event_info));
 	if (!gpe_event_info) {
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 66c4b5b..3f150d5 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -163,7 +163,7 @@
 
 acpi_status
 acpi_ev_get_gpe_xrupt_block(u32 interrupt_number,
-			    struct acpi_gpe_xrupt_info ** gpe_xrupt_block)
+			    struct acpi_gpe_xrupt_info **gpe_xrupt_block)
 {
 	struct acpi_gpe_xrupt_info *next_gpe_xrupt;
 	struct acpi_gpe_xrupt_info *gpe_xrupt;
@@ -320,7 +320,7 @@
 		/* Now look at the individual GPEs in this byte register */
 
 		for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
-			gpe_event_info = &gpe_block->event_info[((acpi_size) i *
+			gpe_event_info = &gpe_block->event_info[((acpi_size)i *
 								 ACPI_GPE_REGISTER_WIDTH)
 								+ j];
 
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index 0f6be89..24768ca 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -359,7 +359,7 @@
  ******************************************************************************/
 
 acpi_status
-acpi_ev_install_space_handler(struct acpi_namespace_node * node,
+acpi_ev_install_space_handler(struct acpi_namespace_node *node,
 			      acpi_adr_space_type space_id,
 			      acpi_adr_space_handler handler,
 			      acpi_adr_space_setup setup, void *context)
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index c67d78c..f51d43a 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -99,8 +99,7 @@
  ******************************************************************************/
 
 acpi_status
-acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
-			     u32 notify_value)
+acpi_ev_queue_notify_request(struct acpi_namespace_node *node, u32 notify_value)
 {
 	union acpi_operand_object *obj_desc;
 	union acpi_operand_object *handler_list_head = NULL;
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 63924d1..4c6f795 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -526,52 +526,6 @@
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_ev_associate_reg_method
- *
- * PARAMETERS:  region_obj          - Region object
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Find and associate _REG method to a region
- *
- ******************************************************************************/
-
-void acpi_ev_associate_reg_method(union acpi_operand_object *region_obj)
-{
-	acpi_name *reg_name_ptr = (acpi_name *) METHOD_NAME__REG;
-	struct acpi_namespace_node *method_node;
-	struct acpi_namespace_node *node;
-	union acpi_operand_object *region_obj2;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ev_associate_reg_method);
-
-	region_obj2 = acpi_ns_get_secondary_object(region_obj);
-	if (!region_obj2) {
-		return_VOID;
-	}
-
-	node = region_obj->region.node->parent;
-
-	/* Find any "_REG" method associated with this region definition */
-
-	status =
-	    acpi_ns_search_one_scope(*reg_name_ptr, node, ACPI_TYPE_METHOD,
-				     &method_node);
-	if (ACPI_SUCCESS(status)) {
-		/*
-		 * The _REG method is optional and there can be only one per region
-		 * definition. This will be executed when the handler is attached
-		 * or removed
-		 */
-		region_obj2->extra.method_REG = method_node;
-	}
-
-	return_VOID;
-}
-
-/*******************************************************************************
- *
  * FUNCTION:    acpi_ev_execute_reg_method
  *
  * PARAMETERS:  region_obj          - Region object
@@ -589,18 +543,42 @@
 	struct acpi_evaluate_info *info;
 	union acpi_operand_object *args[3];
 	union acpi_operand_object *region_obj2;
+	const acpi_name *reg_name_ptr =
+	    ACPI_CAST_PTR(acpi_name, METHOD_NAME__REG);
+	struct acpi_namespace_node *method_node;
+	struct acpi_namespace_node *node;
 	acpi_status status;
 
 	ACPI_FUNCTION_TRACE(ev_execute_reg_method);
 
+	if (!acpi_gbl_namespace_initialized ||
+	    region_obj->region.handler == NULL) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
 	region_obj2 = acpi_ns_get_secondary_object(region_obj);
 	if (!region_obj2) {
 		return_ACPI_STATUS(AE_NOT_EXIST);
 	}
 
-	if (region_obj2->extra.method_REG == NULL ||
-	    region_obj->region.handler == NULL ||
-	    !acpi_gbl_namespace_initialized) {
+	/*
+	 * Find any "_REG" method associated with this region definition.
+	 * The method should always be updated as this function may be
+	 * invoked after a namespace change.
+	 */
+	node = region_obj->region.node->parent;
+	status =
+	    acpi_ns_search_one_scope(*reg_name_ptr, node, ACPI_TYPE_METHOD,
+				     &method_node);
+	if (ACPI_SUCCESS(status)) {
+		/*
+		 * The _REG method is optional and there can be only one per
+		 * region definition. This will be executed when the handler is
+		 * attached or removed.
+		 */
+		region_obj2->extra.method_REG = method_node;
+	}
+	if (region_obj2->extra.method_REG == NULL) {
 		return_ACPI_STATUS(AE_OK);
 	}
 
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index fda869c..b6ea9c0 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -227,7 +227,7 @@
 
 				/* Install a handler for this PCI root bridge */
 
-				status = acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
+				status = acpi_install_address_space_handler((acpi_handle)pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
 				if (ACPI_FAILURE(status)) {
 					if (status == AE_SAME_HANDLER) {
 						/*
@@ -518,7 +518,6 @@
 		return_ACPI_STATUS(AE_OK);
 	}
 
-	acpi_ev_associate_reg_method(region_obj);
 	region_obj->common.flags |= AOPOBJ_OBJECT_INITIALIZED;
 
 	node = region_obj->region.node->parent;
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 9045671..17cfef7 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -917,7 +917,7 @@
  *              the FADT-defined gpe blocks. Otherwise, the GPE block device.
  *
  ******************************************************************************/
-acpi_status acpi_get_gpe_device(u32 index, acpi_handle * gpe_device)
+acpi_status acpi_get_gpe_device(u32 index, acpi_handle *gpe_device)
 {
 	struct acpi_gpe_device_info info;
 	acpi_status status;
diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c
new file mode 100644
index 0000000..2423fe0
--- /dev/null
+++ b/drivers/acpi/acpica/exconcat.c
@@ -0,0 +1,439 @@
+/******************************************************************************
+ *
+ * Module Name: exconcat - Concatenate-type AML operators
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2016, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acinterp.h"
+#include "amlresrc.h"
+
+#define _COMPONENT          ACPI_EXECUTER
+ACPI_MODULE_NAME("exconcat")
+
+/* Local Prototypes */
+static acpi_status
+acpi_ex_convert_to_object_type_string(union acpi_operand_object *obj_desc,
+				      union acpi_operand_object **result_desc);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_do_concatenate
+ *
+ * PARAMETERS:  operand0            - First source object
+ *              operand1            - Second source object
+ *              actual_return_desc  - Where to place the return object
+ *              walk_state          - Current walk state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Concatenate two objects with the ACPI-defined conversion
+ *              rules as necessary.
+ * NOTE:
+ * Per the ACPI spec (up to 6.1), Concatenate only supports Integer,
+ * String, and Buffer objects. However, we support all objects here
+ * as an extension. This improves the usefulness of both Concatenate
+ * and the Printf/Fprintf macros. The extension returns a string
+ * describing the object type for the other objects.
+ * 02/2016.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_do_concatenate(union acpi_operand_object *operand0,
+		       union acpi_operand_object *operand1,
+		       union acpi_operand_object **actual_return_desc,
+		       struct acpi_walk_state *walk_state)
+{
+	union acpi_operand_object *local_operand0 = operand0;
+	union acpi_operand_object *local_operand1 = operand1;
+	union acpi_operand_object *temp_operand1 = NULL;
+	union acpi_operand_object *return_desc;
+	char *buffer;
+	acpi_object_type operand0_type;
+	acpi_object_type operand1_type;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ex_do_concatenate);
+
+	/* Operand 0 preprocessing */
+
+	switch (operand0->common.type) {
+	case ACPI_TYPE_INTEGER:
+	case ACPI_TYPE_STRING:
+	case ACPI_TYPE_BUFFER:
+
+		operand0_type = operand0->common.type;
+		break;
+
+	default:
+
+		/* For all other types, get the "object type" string */
+
+		status =
+		    acpi_ex_convert_to_object_type_string(operand0,
+							  &local_operand0);
+		if (ACPI_FAILURE(status)) {
+			goto cleanup;
+		}
+
+		operand0_type = ACPI_TYPE_STRING;
+		break;
+	}
+
+	/* Operand 1 preprocessing */
+
+	switch (operand1->common.type) {
+	case ACPI_TYPE_INTEGER:
+	case ACPI_TYPE_STRING:
+	case ACPI_TYPE_BUFFER:
+
+		operand1_type = operand1->common.type;
+		break;
+
+	default:
+
+		/* For all other types, get the "object type" string */
+
+		status =
+		    acpi_ex_convert_to_object_type_string(operand1,
+							  &local_operand1);
+		if (ACPI_FAILURE(status)) {
+			goto cleanup;
+		}
+
+		operand1_type = ACPI_TYPE_STRING;
+		break;
+	}
+
+	/*
+	 * Convert the second operand if necessary. The first operand (0)
+	 * determines the type of the second operand (1) (See the Data Types
+	 * section of the ACPI specification). Both object types are
+	 * guaranteed to be either Integer/String/Buffer by the operand
+	 * resolution mechanism.
+	 */
+	switch (operand0_type) {
+	case ACPI_TYPE_INTEGER:
+
+		status =
+		    acpi_ex_convert_to_integer(local_operand1, &temp_operand1,
+					       16);
+		break;
+
+	case ACPI_TYPE_BUFFER:
+
+		status =
+		    acpi_ex_convert_to_buffer(local_operand1, &temp_operand1);
+		break;
+
+	case ACPI_TYPE_STRING:
+
+		switch (operand1_type) {
+		case ACPI_TYPE_INTEGER:
+		case ACPI_TYPE_STRING:
+		case ACPI_TYPE_BUFFER:
+
+			/* Other types have already been converted to string */
+
+			status =
+			    acpi_ex_convert_to_string(local_operand1,
+						      &temp_operand1,
+						      ACPI_IMPLICIT_CONVERT_HEX);
+			break;
+
+		default:
+
+			status = AE_OK;
+			break;
+		}
+		break;
+
+	default:
+
+		ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X",
+			    operand0->common.type));
+		status = AE_AML_INTERNAL;
+	}
+
+	if (ACPI_FAILURE(status)) {
+		goto cleanup;
+	}
+
+	/* Take care with any newly created operand objects */
+
+	if ((local_operand1 != operand1) && (local_operand1 != temp_operand1)) {
+		acpi_ut_remove_reference(local_operand1);
+	}
+
+	local_operand1 = temp_operand1;
+
+	/*
+	 * Both operands are now known to be the same object type
+	 * (Both are Integer, String, or Buffer), and we can now perform
+	 * the concatenation.
+	 *
+	 * There are three cases to handle, as per the ACPI spec:
+	 *
+	 * 1) Two Integers concatenated to produce a new Buffer
+	 * 2) Two Strings concatenated to produce a new String
+	 * 3) Two Buffers concatenated to produce a new Buffer
+	 */
+	switch (operand0_type) {
+	case ACPI_TYPE_INTEGER:
+
+		/* Result of two Integers is a Buffer */
+		/* Need enough buffer space for two integers */
+
+		return_desc = acpi_ut_create_buffer_object((acpi_size)
+							   ACPI_MUL_2
+							   (acpi_gbl_integer_byte_width));
+		if (!return_desc) {
+			status = AE_NO_MEMORY;
+			goto cleanup;
+		}
+
+		buffer = (char *)return_desc->buffer.pointer;
+
+		/* Copy the first integer, LSB first */
+
+		memcpy(buffer, &operand0->integer.value,
+		       acpi_gbl_integer_byte_width);
+
+		/* Copy the second integer (LSB first) after the first */
+
+		memcpy(buffer + acpi_gbl_integer_byte_width,
+		       &local_operand1->integer.value,
+		       acpi_gbl_integer_byte_width);
+		break;
+
+	case ACPI_TYPE_STRING:
+
+		/* Result of two Strings is a String */
+
+		return_desc = acpi_ut_create_string_object(((acpi_size)
+							    local_operand0->
+							    string.length +
+							    local_operand1->
+							    string.length));
+		if (!return_desc) {
+			status = AE_NO_MEMORY;
+			goto cleanup;
+		}
+
+		buffer = return_desc->string.pointer;
+
+		/* Concatenate the strings */
+
+		strcpy(buffer, local_operand0->string.pointer);
+		strcat(buffer, local_operand1->string.pointer);
+		break;
+
+	case ACPI_TYPE_BUFFER:
+
+		/* Result of two Buffers is a Buffer */
+
+		return_desc = acpi_ut_create_buffer_object(((acpi_size)
+							    operand0->buffer.
+							    length +
+							    local_operand1->
+							    buffer.length));
+		if (!return_desc) {
+			status = AE_NO_MEMORY;
+			goto cleanup;
+		}
+
+		buffer = (char *)return_desc->buffer.pointer;
+
+		/* Concatenate the buffers */
+
+		memcpy(buffer, operand0->buffer.pointer,
+		       operand0->buffer.length);
+		memcpy(buffer + operand0->buffer.length,
+		       local_operand1->buffer.pointer,
+		       local_operand1->buffer.length);
+		break;
+
+	default:
+
+		/* Invalid object type, should not happen here */
+
+		ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X",
+			    operand0->common.type));
+		status = AE_AML_INTERNAL;
+		goto cleanup;
+	}
+
+	*actual_return_desc = return_desc;
+
+cleanup:
+	if (local_operand0 != operand0) {
+		acpi_ut_remove_reference(local_operand0);
+	}
+
+	if (local_operand1 != operand1) {
+		acpi_ut_remove_reference(local_operand1);
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_convert_to_object_type_string
+ *
+ * PARAMETERS:  obj_desc            - Object to be converted
+ *              return_desc         - Where to place the return object
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Convert an object of arbitrary type to a string object that
+ *              contains the namestring for the object. Used for the
+ *              concatenate operator.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ex_convert_to_object_type_string(union acpi_operand_object *obj_desc,
+				      union acpi_operand_object **result_desc)
+{
+	union acpi_operand_object *return_desc;
+	const char *type_string;
+
+	type_string = acpi_ut_get_type_name(obj_desc->common.type);
+
+	return_desc = acpi_ut_create_string_object(((acpi_size)strlen(type_string) + 9));	/* 9 For "[ Object]" */
+	if (!return_desc) {
+		return (AE_NO_MEMORY);
+	}
+
+	strcpy(return_desc->string.pointer, "[");
+	strcat(return_desc->string.pointer, type_string);
+	strcat(return_desc->string.pointer, " Object]");
+
+	*result_desc = return_desc;
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_concat_template
+ *
+ * PARAMETERS:  operand0            - First source object
+ *              operand1            - Second source object
+ *              actual_return_desc  - Where to place the return object
+ *              walk_state          - Current walk state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Concatenate two resource templates
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_concat_template(union acpi_operand_object *operand0,
+			union acpi_operand_object *operand1,
+			union acpi_operand_object **actual_return_desc,
+			struct acpi_walk_state *walk_state)
+{
+	acpi_status status;
+	union acpi_operand_object *return_desc;
+	u8 *new_buf;
+	u8 *end_tag;
+	acpi_size length0;
+	acpi_size length1;
+	acpi_size new_length;
+
+	ACPI_FUNCTION_TRACE(ex_concat_template);
+
+	/*
+	 * Find the end_tag descriptor in each resource template.
+	 * Note1: returned pointers point TO the end_tag, not past it.
+	 * Note2: zero-length buffers are allowed; treated like one end_tag
+	 */
+
+	/* Get the length of the first resource template */
+
+	status = acpi_ut_get_resource_end_tag(operand0, &end_tag);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	length0 = ACPI_PTR_DIFF(end_tag, operand0->buffer.pointer);
+
+	/* Get the length of the second resource template */
+
+	status = acpi_ut_get_resource_end_tag(operand1, &end_tag);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	length1 = ACPI_PTR_DIFF(end_tag, operand1->buffer.pointer);
+
+	/* Combine both lengths, minimum size will be 2 for end_tag */
+
+	new_length = length0 + length1 + sizeof(struct aml_resource_end_tag);
+
+	/* Create a new buffer object for the result (with one end_tag) */
+
+	return_desc = acpi_ut_create_buffer_object(new_length);
+	if (!return_desc) {
+		return_ACPI_STATUS(AE_NO_MEMORY);
+	}
+
+	/*
+	 * Copy the templates to the new buffer, 0 first, then 1 follows. One
+	 * end_tag descriptor is copied from Operand1.
+	 */
+	new_buf = return_desc->buffer.pointer;
+	memcpy(new_buf, operand0->buffer.pointer, length0);
+	memcpy(new_buf + length0, operand1->buffer.pointer, length1);
+
+	/* Insert end_tag and set the checksum to zero, means "ignore checksum" */
+
+	new_buf[new_length - 1] = 0;
+	new_buf[new_length - 2] = ACPI_RESOURCE_NAME_END_TAG | 1;
+
+	/* Return the completed resource template */
+
+	*actual_return_desc = return_desc;
+	return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index f741613..a1d177d 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -118,7 +118,9 @@
 	/* Execute any module-level code that was found in the table */
 
 	acpi_ex_exit_interpreter();
-	acpi_ns_exec_module_code_list();
+	if (acpi_gbl_group_module_level_code) {
+		acpi_ns_exec_module_code_list();
+	}
 	acpi_ex_enter_interpreter();
 
 	/*
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index 0b9f2c1..b7e9b3d 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -124,7 +124,9 @@
 		 * of ACPI 3.0) is that the to_integer() operator allows both decimal
 		 * and hexadecimal strings (hex prefixed with "0x").
 		 */
-		status = acpi_ut_strtoul64((char *)pointer, flags, &result);
+		status = acpi_ut_strtoul64((char *)pointer, flags,
+					   acpi_gbl_integer_byte_width,
+					   &result);
 		if (ACPI_FAILURE(status)) {
 			return_ACPI_STATUS(status);
 		}
@@ -439,7 +441,7 @@
 		 * Need enough space for one ASCII integer (plus null terminator)
 		 */
 		return_desc =
-		    acpi_ut_create_string_object((acpi_size) string_length);
+		    acpi_ut_create_string_object((acpi_size)string_length);
 		if (!return_desc) {
 			return_ACPI_STATUS(AE_NO_MEMORY);
 		}
@@ -518,7 +520,7 @@
 		}
 
 		return_desc =
-		    acpi_ut_create_string_object((acpi_size) string_length);
+		    acpi_ut_create_string_object((acpi_size)string_length);
 		if (!return_desc) {
 			return_ACPI_STATUS(AE_NO_MEMORY);
 		}
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index bea9612..613ba6e 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -394,7 +394,7 @@
 	obj_desc->processor.proc_id = (u8) operand[1]->integer.value;
 	obj_desc->processor.length = (u8) operand[3]->integer.value;
 	obj_desc->processor.address =
-	    (acpi_io_address) operand[2]->integer.value;
+	    (acpi_io_address)operand[2]->integer.value;
 
 	/* Install the processor object in the parent Node */
 
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index ee30974..fce6b2e 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -55,9 +55,9 @@
  */
 #if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
 /* Local prototypes */
-static void acpi_ex_out_string(char *title, char *value);
+static void acpi_ex_out_string(const char *title, const char *value);
 
-static void acpi_ex_out_pointer(char *title, void *value);
+static void acpi_ex_out_pointer(const char *title, const void *value);
 
 static void
 acpi_ex_dump_object(union acpi_operand_object *obj_desc,
@@ -365,8 +365,7 @@
 		    struct acpi_exdump_info *info)
 {
 	u8 *target;
-	char *name;
-	const char *reference_name;
+	const char *name;
 	u8 count;
 	union acpi_operand_object *start;
 	union acpi_operand_object *data = NULL;
@@ -459,9 +458,9 @@
 
 		case ACPI_EXD_REFERENCE:
 
-			reference_name = acpi_ut_get_reference_name(obj_desc);
 			acpi_ex_out_string("Class Name",
-					   ACPI_CAST_PTR(char, reference_name));
+					   acpi_ut_get_reference_name
+					   (obj_desc));
 			acpi_ex_dump_reference_obj(obj_desc);
 			break;
 
@@ -934,12 +933,12 @@
  *
  ******************************************************************************/
 
-static void acpi_ex_out_string(char *title, char *value)
+static void acpi_ex_out_string(const char *title, const char *value)
 {
 	acpi_os_printf("%20s : %s\n", title, value);
 }
 
-static void acpi_ex_out_pointer(char *title, void *value)
+static void acpi_ex_out_pointer(const char *title, const void *value)
 {
 	acpi_os_printf("%20s : %p\n", title, value);
 }
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index d5d8020..d7d3ee3 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -126,7 +126,7 @@
  ******************************************************************************/
 
 acpi_status
-acpi_ex_read_data_from_field(struct acpi_walk_state * walk_state,
+acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
 			     union acpi_operand_object *obj_desc,
 			     union acpi_operand_object **ret_buffer_desc)
 {
@@ -233,7 +233,7 @@
 	 * Note: Field.length is in bits.
 	 */
 	length =
-	    (acpi_size) ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length);
+	    (acpi_size)ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length);
 
 	if (length > acpi_gbl_integer_byte_width) {
 
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index f0c5ed0..ee76d29 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -164,7 +164,7 @@
 			if (ACPI_ROUND_UP(rgn_desc->region.length,
 					  obj_desc->common_field.
 					  access_byte_width) >=
-			    ((acpi_size) obj_desc->common_field.
+			    ((acpi_size)obj_desc->common_field.
 			     base_byte_offset +
 			     obj_desc->common_field.access_byte_width +
 			     field_datum_byte_offset)) {
@@ -897,17 +897,9 @@
 
 	access_bit_width = ACPI_MUL_8(obj_desc->common_field.access_byte_width);
 
-	/*
-	 * Create the bitmasks used for bit insertion.
-	 * Note: This if/else is used to bypass compiler differences with the
-	 * shift operator
-	 */
-	if (access_bit_width == ACPI_INTEGER_BIT_SIZE) {
-		width_mask = ACPI_UINT64_MAX;
-	} else {
-		width_mask = ACPI_MASK_BITS_ABOVE(access_bit_width);
-	}
+	/* Create the bitmasks used for bit insertion */
 
+	width_mask = ACPI_MASK_BITS_ABOVE_64(access_bit_width);
 	mask = width_mask &
 	    ACPI_MASK_BITS_BELOW(obj_desc->common_field.start_field_bit_offset);
 
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index db30ae4..4f7e667 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -45,7 +45,6 @@
 #include "accommon.h"
 #include "acinterp.h"
 #include "amlcode.h"
-#include "amlresrc.h"
 
 #define _COMPONENT          ACPI_EXECUTER
 ACPI_MODULE_NAME("exmisc")
@@ -140,295 +139,6 @@
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_ex_concat_template
- *
- * PARAMETERS:  operand0            - First source object
- *              operand1            - Second source object
- *              actual_return_desc  - Where to place the return object
- *              walk_state          - Current walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Concatenate two resource templates
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_concat_template(union acpi_operand_object *operand0,
-			union acpi_operand_object *operand1,
-			union acpi_operand_object **actual_return_desc,
-			struct acpi_walk_state *walk_state)
-{
-	acpi_status status;
-	union acpi_operand_object *return_desc;
-	u8 *new_buf;
-	u8 *end_tag;
-	acpi_size length0;
-	acpi_size length1;
-	acpi_size new_length;
-
-	ACPI_FUNCTION_TRACE(ex_concat_template);
-
-	/*
-	 * Find the end_tag descriptor in each resource template.
-	 * Note1: returned pointers point TO the end_tag, not past it.
-	 * Note2: zero-length buffers are allowed; treated like one end_tag
-	 */
-
-	/* Get the length of the first resource template */
-
-	status = acpi_ut_get_resource_end_tag(operand0, &end_tag);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	length0 = ACPI_PTR_DIFF(end_tag, operand0->buffer.pointer);
-
-	/* Get the length of the second resource template */
-
-	status = acpi_ut_get_resource_end_tag(operand1, &end_tag);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	length1 = ACPI_PTR_DIFF(end_tag, operand1->buffer.pointer);
-
-	/* Combine both lengths, minimum size will be 2 for end_tag */
-
-	new_length = length0 + length1 + sizeof(struct aml_resource_end_tag);
-
-	/* Create a new buffer object for the result (with one end_tag) */
-
-	return_desc = acpi_ut_create_buffer_object(new_length);
-	if (!return_desc) {
-		return_ACPI_STATUS(AE_NO_MEMORY);
-	}
-
-	/*
-	 * Copy the templates to the new buffer, 0 first, then 1 follows. One
-	 * end_tag descriptor is copied from Operand1.
-	 */
-	new_buf = return_desc->buffer.pointer;
-	memcpy(new_buf, operand0->buffer.pointer, length0);
-	memcpy(new_buf + length0, operand1->buffer.pointer, length1);
-
-	/* Insert end_tag and set the checksum to zero, means "ignore checksum" */
-
-	new_buf[new_length - 1] = 0;
-	new_buf[new_length - 2] = ACPI_RESOURCE_NAME_END_TAG | 1;
-
-	/* Return the completed resource template */
-
-	*actual_return_desc = return_desc;
-	return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_do_concatenate
- *
- * PARAMETERS:  operand0            - First source object
- *              operand1            - Second source object
- *              actual_return_desc  - Where to place the return object
- *              walk_state          - Current walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Concatenate two objects OF THE SAME TYPE.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_do_concatenate(union acpi_operand_object *operand0,
-		       union acpi_operand_object *operand1,
-		       union acpi_operand_object **actual_return_desc,
-		       struct acpi_walk_state *walk_state)
-{
-	union acpi_operand_object *local_operand1 = operand1;
-	union acpi_operand_object *return_desc;
-	char *new_buf;
-	const char *type_string;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(ex_do_concatenate);
-
-	/*
-	 * Convert the second operand if necessary. The first operand
-	 * determines the type of the second operand, (See the Data Types
-	 * section of the ACPI specification.)  Both object types are
-	 * guaranteed to be either Integer/String/Buffer by the operand
-	 * resolution mechanism.
-	 */
-	switch (operand0->common.type) {
-	case ACPI_TYPE_INTEGER:
-
-		status =
-		    acpi_ex_convert_to_integer(operand1, &local_operand1, 16);
-		break;
-
-	case ACPI_TYPE_STRING:
-		/*
-		 * Per the ACPI spec, Concatenate only supports int/str/buf.
-		 * However, we support all objects here as an extension.
-		 * This improves the usefulness of the Printf() macro.
-		 * 12/2015.
-		 */
-		switch (operand1->common.type) {
-		case ACPI_TYPE_INTEGER:
-		case ACPI_TYPE_STRING:
-		case ACPI_TYPE_BUFFER:
-
-			status =
-			    acpi_ex_convert_to_string(operand1, &local_operand1,
-						      ACPI_IMPLICIT_CONVERT_HEX);
-			break;
-
-		default:
-			/*
-			 * Just emit a string containing the object type.
-			 */
-			type_string =
-			    acpi_ut_get_type_name(operand1->common.type);
-
-			local_operand1 = acpi_ut_create_string_object(((acpi_size) strlen(type_string) + 9));	/* 9 For "[Object]" */
-			if (!local_operand1) {
-				status = AE_NO_MEMORY;
-				goto cleanup;
-			}
-
-			strcpy(local_operand1->string.pointer, "[");
-			strcat(local_operand1->string.pointer, type_string);
-			strcat(local_operand1->string.pointer, " Object]");
-			status = AE_OK;
-			break;
-		}
-		break;
-
-	case ACPI_TYPE_BUFFER:
-
-		status = acpi_ex_convert_to_buffer(operand1, &local_operand1);
-		break;
-
-	default:
-
-		ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X",
-			    operand0->common.type));
-		status = AE_AML_INTERNAL;
-	}
-
-	if (ACPI_FAILURE(status)) {
-		goto cleanup;
-	}
-
-	/*
-	 * Both operands are now known to be the same object type
-	 * (Both are Integer, String, or Buffer), and we can now perform the
-	 * concatenation.
-	 */
-
-	/*
-	 * There are three cases to handle:
-	 *
-	 * 1) Two Integers concatenated to produce a new Buffer
-	 * 2) Two Strings concatenated to produce a new String
-	 * 3) Two Buffers concatenated to produce a new Buffer
-	 */
-	switch (operand0->common.type) {
-	case ACPI_TYPE_INTEGER:
-
-		/* Result of two Integers is a Buffer */
-		/* Need enough buffer space for two integers */
-
-		return_desc = acpi_ut_create_buffer_object((acpi_size)
-							   ACPI_MUL_2
-							   (acpi_gbl_integer_byte_width));
-		if (!return_desc) {
-			status = AE_NO_MEMORY;
-			goto cleanup;
-		}
-
-		new_buf = (char *)return_desc->buffer.pointer;
-
-		/* Copy the first integer, LSB first */
-
-		memcpy(new_buf, &operand0->integer.value,
-		       acpi_gbl_integer_byte_width);
-
-		/* Copy the second integer (LSB first) after the first */
-
-		memcpy(new_buf + acpi_gbl_integer_byte_width,
-		       &local_operand1->integer.value,
-		       acpi_gbl_integer_byte_width);
-		break;
-
-	case ACPI_TYPE_STRING:
-
-		/* Result of two Strings is a String */
-
-		return_desc = acpi_ut_create_string_object(((acpi_size)
-							    operand0->string.
-							    length +
-							    local_operand1->
-							    string.length));
-		if (!return_desc) {
-			status = AE_NO_MEMORY;
-			goto cleanup;
-		}
-
-		new_buf = return_desc->string.pointer;
-
-		/* Concatenate the strings */
-
-		strcpy(new_buf, operand0->string.pointer);
-		strcat(new_buf, local_operand1->string.pointer);
-		break;
-
-	case ACPI_TYPE_BUFFER:
-
-		/* Result of two Buffers is a Buffer */
-
-		return_desc = acpi_ut_create_buffer_object(((acpi_size)
-							    operand0->buffer.
-							    length +
-							    local_operand1->
-							    buffer.length));
-		if (!return_desc) {
-			status = AE_NO_MEMORY;
-			goto cleanup;
-		}
-
-		new_buf = (char *)return_desc->buffer.pointer;
-
-		/* Concatenate the buffers */
-
-		memcpy(new_buf, operand0->buffer.pointer,
-		       operand0->buffer.length);
-		memcpy(new_buf + operand0->buffer.length,
-		       local_operand1->buffer.pointer,
-		       local_operand1->buffer.length);
-		break;
-
-	default:
-
-		/* Invalid object type, should not happen here */
-
-		ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X",
-			    operand0->common.type));
-		status = AE_AML_INTERNAL;
-		goto cleanup;
-	}
-
-	*actual_return_desc = return_desc;
-
-cleanup:
-	if (local_operand1 != operand1) {
-		acpi_ut_remove_reference(local_operand1);
-	}
-	return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
  * FUNCTION:    acpi_ex_do_math_op
  *
  * PARAMETERS:  opcode              - AML opcode
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 27c11ab..3d6af93 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -178,7 +178,7 @@
 
 	for (index = 0;
 	     (index < ACPI_NAME_SIZE)
-	     && (acpi_ut_valid_acpi_char(*aml_address, 0)); index++) {
+	     && (acpi_ut_valid_name_char(*aml_address, 0)); index++) {
 		char_buf[index] = *aml_address++;
 		ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "%c\n", char_buf[index]));
 	}
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 5aa21c4..69e4e26 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -184,7 +184,7 @@
 		/* Get the Integer values from the objects */
 
 		index = operand[1]->integer.value;
-		length = (acpi_size) operand[2]->integer.value;
+		length = (acpi_size)operand[2]->integer.value;
 
 		/*
 		 * If the index is beyond the length of the String/Buffer, or if the
@@ -198,8 +198,8 @@
 
 		else if ((index + length) > operand[0]->string.length) {
 			length =
-			    (acpi_size) operand[0]->string.length -
-			    (acpi_size) index;
+			    (acpi_size)operand[0]->string.length -
+			    (acpi_size)index;
 		}
 
 		/* Strings always have a sub-pointer, not so for buffers */
@@ -209,7 +209,7 @@
 
 			/* Always allocate a new buffer for the String */
 
-			buffer = ACPI_ALLOCATE_ZEROED((acpi_size) length + 1);
+			buffer = ACPI_ALLOCATE_ZEROED((acpi_size)length + 1);
 			if (!buffer) {
 				status = AE_NO_MEMORY;
 				goto cleanup;
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index e2b6348..786d53b 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -207,7 +207,7 @@
  *
  ******************************************************************************/
 
-acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
+acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state *walk_state)
 {
 	union acpi_operand_object **operand = &walk_state->operands[0];
 	union acpi_operand_object *return_desc = NULL;
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 076074d..31b381c 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -325,15 +325,15 @@
 	switch (function) {
 	case ACPI_READ:
 
-		status = acpi_hw_read_port((acpi_io_address) address,
+		status = acpi_hw_read_port((acpi_io_address)address,
 					   &value32, bit_width);
 		*value = value32;
 		break;
 
 	case ACPI_WRITE:
 
-		status = acpi_hw_write_port((acpi_io_address) address,
-					    (u32) * value, bit_width);
+		status = acpi_hw_write_port((acpi_io_address)address,
+					    (u32)*value, bit_width);
 		break;
 
 	default:
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index c1e8bfb..a183cb7 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -93,7 +93,7 @@
 	 */
 	node = *object_ptr;
 	source_desc = acpi_ns_get_attached_object(node);
-	entry_type = acpi_ns_get_type((acpi_handle) node);
+	entry_type = acpi_ns_get_type((acpi_handle)node);
 
 	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Entry=%p SourceDesc=%p [%s]\n",
 			  node, source_desc,
@@ -106,7 +106,7 @@
 
 		node = ACPI_CAST_PTR(struct acpi_namespace_node, node->object);
 		source_desc = acpi_ns_get_attached_object(node);
-		entry_type = acpi_ns_get_type((acpi_handle) node);
+		entry_type = acpi_ns_get_type((acpi_handle)node);
 		*object_ptr = node;
 	}
 
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index fedacf1..e1d3878 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -334,7 +334,7 @@
 acpi_status
 acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
 			 union acpi_operand_object *operand,
-			 acpi_object_type * return_type,
+			 acpi_object_type *return_type,
 			 union acpi_operand_object **return_desc)
 {
 	union acpi_operand_object *obj_desc = ACPI_CAST_PTR(void, operand);
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index cc2c26c..27b41fd 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -131,8 +131,8 @@
 
 acpi_status
 acpi_ex_resolve_operands(u16 opcode,
-			 union acpi_operand_object ** stack_ptr,
-			 struct acpi_walk_state * walk_state)
+			 union acpi_operand_object **stack_ptr,
+			 struct acpi_walk_state *walk_state)
 {
 	union acpi_operand_object *obj_desc;
 	acpi_status status = AE_OK;
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index 28b7248..1dab827 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -188,7 +188,7 @@
 		 * Clear old string and copy in the new one
 		 */
 		memset(target_desc->string.pointer, 0,
-		       (acpi_size) target_desc->string.length + 1);
+		       (acpi_size)target_desc->string.length + 1);
 		memcpy(target_desc->string.pointer, buffer, length);
 	} else {
 		/*
@@ -204,7 +204,7 @@
 		}
 
 		target_desc->string.pointer =
-		    ACPI_ALLOCATE_ZEROED((acpi_size) length + 1);
+		    ACPI_ALLOCATE_ZEROED((acpi_size)length + 1);
 
 		if (!target_desc->string.pointer) {
 			return_ACPI_STATUS(AE_NO_MEMORY);
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 4d44bc1..425f133 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -301,8 +301,8 @@
  *
  * FUNCTION:    acpi_ex_eisa_id_to_string
  *
- * PARAMETERS:  compressed_id   - EISAID to be converted
- *              out_string      - Where to put the converted string (8 bytes)
+ * PARAMETERS:  out_string      - Where to put the converted string (8 bytes)
+ *              compressed_id   - EISAID to be converted
  *
  * RETURN:      None
  *
@@ -354,7 +354,7 @@
  *                                possible 64-bit integer.
  *              value           - Value to be converted
  *
- * RETURN:      None, string
+ * RETURN:      Converted string in out_string
  *
  * DESCRIPTION: Convert a 64-bit integer to decimal string representation.
  *              Assumes string buffer is large enough to hold the string. The
@@ -384,9 +384,9 @@
  * FUNCTION:    acpi_ex_pci_cls_to_string
  *
  * PARAMETERS:  out_string      - Where to put the converted string (7 bytes)
- * PARAMETERS:  class_code      - PCI class code to be converted (3 bytes)
+ *              class_code      - PCI class code to be converted (3 bytes)
  *
- * RETURN:      None
+ * RETURN:      Converted string in out_string
  *
  * DESCRIPTION: Convert 3-bytes PCI class code to string representation.
  *              Return buffer must be large enough to hold the string. The
@@ -417,7 +417,7 @@
  *
  * PARAMETERS:  space_id            - ID to be validated
  *
- * RETURN:      TRUE if valid/supported ID.
+ * RETURN:      TRUE if space_id is a valid/supported ID.
  *
  * DESCRIPTION: Validate an operation region space_ID.
  *
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 1c4f451..bdecd5e 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -166,7 +166,7 @@
  *
  ******************************************************************************/
 
-acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
+acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info *gpe_event_info)
 {
 	struct acpi_gpe_register_info *gpe_register_info;
 	acpi_status status;
@@ -206,7 +206,7 @@
  ******************************************************************************/
 
 acpi_status
-acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
+acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info,
 		       acpi_event_status *event_status)
 {
 	u32 in_byte;
@@ -391,7 +391,7 @@
 
 acpi_status
 acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
-				 struct acpi_gpe_block_info * gpe_block,
+				 struct acpi_gpe_block_info *gpe_block,
 				 void *context)
 {
 	u32 i;
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 5ba0498..0f18dbc 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -51,6 +51,10 @@
 
 #if (!ACPI_REDUCED_HARDWARE)
 /* Local Prototypes */
+static u8
+acpi_hw_get_access_bit_width(struct acpi_generic_address *reg,
+			     u8 max_bit_width);
+
 static acpi_status
 acpi_hw_read_multiple(u32 *value,
 		      struct acpi_generic_address *register_a,
@@ -65,6 +69,48 @@
 
 /******************************************************************************
  *
+ * FUNCTION:    acpi_hw_get_access_bit_width
+ *
+ * PARAMETERS:  reg                 - GAS register structure
+ *              max_bit_width       - Max bit_width supported (32 or 64)
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Obtain optimal access bit width
+ *
+ ******************************************************************************/
+
+static u8
+acpi_hw_get_access_bit_width(struct acpi_generic_address *reg, u8 max_bit_width)
+{
+	u64 address;
+
+	if (!reg->access_width) {
+		/*
+		 * Detect old register descriptors where only the bit_width field
+		 * makes senses. The target address is copied to handle possible
+		 * alignment issues.
+		 */
+		ACPI_MOVE_64_TO_64(&address, &reg->address);
+		if (!reg->bit_offset && reg->bit_width &&
+		    ACPI_IS_POWER_OF_TWO(reg->bit_width) &&
+		    ACPI_IS_ALIGNED(reg->bit_width, 8) &&
+		    ACPI_IS_ALIGNED(address, reg->bit_width)) {
+			return (reg->bit_width);
+		} else {
+			if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+				return (32);
+			} else {
+				return (max_bit_width);
+			}
+		}
+	} else {
+		return (1 << (reg->access_width + 2));
+	}
+}
+
+/******************************************************************************
+ *
  * FUNCTION:    acpi_hw_validate_register
  *
  * PARAMETERS:  reg                 - GAS register structure
@@ -83,6 +129,8 @@
 acpi_hw_validate_register(struct acpi_generic_address *reg,
 			  u8 max_bit_width, u64 *address)
 {
+	u8 bit_width;
+	u8 access_width;
 
 	/* Must have a valid pointer to a GAS structure */
 
@@ -109,23 +157,25 @@
 		return (AE_SUPPORT);
 	}
 
-	/* Validate the bit_width */
+	/* Validate the access_width */
 
-	if ((reg->bit_width != 8) &&
-	    (reg->bit_width != 16) &&
-	    (reg->bit_width != 32) && (reg->bit_width != max_bit_width)) {
+	if (reg->access_width > 4) {
 		ACPI_ERROR((AE_INFO,
-			    "Unsupported register bit width: 0x%X",
-			    reg->bit_width));
+			    "Unsupported register access width: 0x%X",
+			    reg->access_width));
 		return (AE_SUPPORT);
 	}
 
-	/* Validate the bit_offset. Just a warning for now. */
+	/* Validate the bit_width, convert access_width into number of bits */
 
-	if (reg->bit_offset != 0) {
+	access_width = acpi_hw_get_access_bit_width(reg, max_bit_width);
+	bit_width =
+	    ACPI_ROUND_UP(reg->bit_offset + reg->bit_width, access_width);
+	if (max_bit_width < bit_width) {
 		ACPI_WARNING((AE_INFO,
-			      "Unsupported register bit offset: 0x%X",
-			      reg->bit_offset));
+			      "Requested bit width 0x%X is smaller than register bit width 0x%X",
+			      max_bit_width, bit_width));
+		return (AE_SUPPORT);
 	}
 
 	return (AE_OK);
@@ -145,17 +195,19 @@
  *              64-bit values is not needed.
  *
  * LIMITATIONS: <These limitations also apply to acpi_hw_write>
- *      bit_width must be exactly 8, 16, or 32.
  *      space_ID must be system_memory or system_IO.
- *      bit_offset and access_width are currently ignored, as there has
- *          not been a need to implement these.
  *
  ******************************************************************************/
 
 acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
 {
 	u64 address;
+	u8 access_width;
+	u32 bit_width;
+	u8 bit_offset;
 	u64 value64;
+	u32 value32;
+	u8 index;
 	acpi_status status;
 
 	ACPI_FUNCTION_NAME(hw_read);
@@ -167,28 +219,75 @@
 		return (status);
 	}
 
-	/* Initialize entire 32-bit return value to zero */
-
+	/*
+	 * Initialize entire 32-bit return value to zero, convert access_width
+	 * into number of bits based
+	 */
 	*value = 0;
+	access_width = acpi_hw_get_access_bit_width(reg, 32);
+	bit_width = reg->bit_offset + reg->bit_width;
+	bit_offset = reg->bit_offset;
 
 	/*
 	 * Two address spaces supported: Memory or IO. PCI_Config is
 	 * not supported here because the GAS structure is insufficient
 	 */
-	if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
-		status = acpi_os_read_memory((acpi_physical_address)
-					     address, &value64, reg->bit_width);
+	index = 0;
+	while (bit_width) {
+		if (bit_offset >= access_width) {
+			value32 = 0;
+			bit_offset -= access_width;
+		} else {
+			if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+				status =
+				    acpi_os_read_memory((acpi_physical_address)
+							address +
+							index *
+							ACPI_DIV_8
+							(access_width),
+							&value64, access_width);
+				value32 = (u32)value64;
+			} else {	/* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
 
-		*value = (u32)value64;
-	} else {		/* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
+				status = acpi_hw_read_port((acpi_io_address)
+							   address +
+							   index *
+							   ACPI_DIV_8
+							   (access_width),
+							   &value32,
+							   access_width);
+			}
 
-		status = acpi_hw_read_port((acpi_io_address)
-					   address, value, reg->bit_width);
+			/*
+			 * Use offset style bit masks because:
+			 * bit_offset < access_width/bit_width < access_width, and
+			 * access_width is ensured to be less than 32-bits by
+			 * acpi_hw_validate_register().
+			 */
+			if (bit_offset) {
+				value32 &= ACPI_MASK_BITS_BELOW(bit_offset);
+				bit_offset = 0;
+			}
+			if (bit_width < access_width) {
+				value32 &= ACPI_MASK_BITS_ABOVE(bit_width);
+			}
+		}
+
+		/*
+		 * Use offset style bit writes because "Index * AccessWidth" is
+		 * ensured to be less than 32-bits by acpi_hw_validate_register().
+		 */
+		ACPI_SET_BITS(value, index * access_width,
+			      ACPI_MASK_BITS_ABOVE_32(access_width), value32);
+
+		bit_width -=
+		    bit_width > access_width ? access_width : bit_width;
+		index++;
 	}
 
 	ACPI_DEBUG_PRINT((ACPI_DB_IO,
 			  "Read:  %8.8X width %2d from %8.8X%8.8X (%s)\n",
-			  *value, reg->bit_width, ACPI_FORMAT_UINT64(address),
+			  *value, access_width, ACPI_FORMAT_UINT64(address),
 			  acpi_ut_get_region_name(reg->space_id)));
 
 	return (status);
@@ -212,6 +311,12 @@
 acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
 {
 	u64 address;
+	u8 access_width;
+	u32 bit_width;
+	u8 bit_offset;
+	u64 value64;
+	u32 new_value32, old_value32;
+	u8 index;
 	acpi_status status;
 
 	ACPI_FUNCTION_NAME(hw_write);
@@ -223,23 +328,145 @@
 		return (status);
 	}
 
+	/* Convert access_width into number of bits based */
+
+	access_width = acpi_hw_get_access_bit_width(reg, 32);
+	bit_width = reg->bit_offset + reg->bit_width;
+	bit_offset = reg->bit_offset;
+
 	/*
 	 * Two address spaces supported: Memory or IO. PCI_Config is
 	 * not supported here because the GAS structure is insufficient
 	 */
-	if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
-		status = acpi_os_write_memory((acpi_physical_address)
-					      address, (u64)value,
-					      reg->bit_width);
-	} else {		/* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
+	index = 0;
+	while (bit_width) {
+		/*
+		 * Use offset style bit reads because "Index * AccessWidth" is
+		 * ensured to be less than 32-bits by acpi_hw_validate_register().
+		 */
+		new_value32 = ACPI_GET_BITS(&value, index * access_width,
+					    ACPI_MASK_BITS_ABOVE_32
+					    (access_width));
 
-		status = acpi_hw_write_port((acpi_io_address)
-					    address, value, reg->bit_width);
+		if (bit_offset >= access_width) {
+			bit_offset -= access_width;
+		} else {
+			/*
+			 * Use offset style bit masks because access_width is ensured
+			 * to be less than 32-bits by acpi_hw_validate_register() and
+			 * bit_offset/bit_width is less than access_width here.
+			 */
+			if (bit_offset) {
+				new_value32 &= ACPI_MASK_BITS_BELOW(bit_offset);
+			}
+			if (bit_width < access_width) {
+				new_value32 &= ACPI_MASK_BITS_ABOVE(bit_width);
+			}
+
+			if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+				if (bit_offset || bit_width < access_width) {
+					/*
+					 * Read old values in order not to modify the bits that
+					 * are beyond the register bit_width/bit_offset setting.
+					 */
+					status =
+					    acpi_os_read_memory((acpi_physical_address)
+								address +
+								index *
+								ACPI_DIV_8
+								(access_width),
+								&value64,
+								access_width);
+					old_value32 = (u32)value64;
+
+					/*
+					 * Use offset style bit masks because access_width is
+					 * ensured to be less than 32-bits by
+					 * acpi_hw_validate_register() and bit_offset/bit_width is
+					 * less than access_width here.
+					 */
+					if (bit_offset) {
+						old_value32 &=
+						    ACPI_MASK_BITS_ABOVE
+						    (bit_offset);
+						bit_offset = 0;
+					}
+					if (bit_width < access_width) {
+						old_value32 &=
+						    ACPI_MASK_BITS_BELOW
+						    (bit_width);
+					}
+
+					new_value32 |= old_value32;
+				}
+
+				value64 = (u64)new_value32;
+				status =
+				    acpi_os_write_memory((acpi_physical_address)
+							 address +
+							 index *
+							 ACPI_DIV_8
+							 (access_width),
+							 value64, access_width);
+			} else {	/* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
+
+				if (bit_offset || bit_width < access_width) {
+					/*
+					 * Read old values in order not to modify the bits that
+					 * are beyond the register bit_width/bit_offset setting.
+					 */
+					status =
+					    acpi_hw_read_port((acpi_io_address)
+							      address +
+							      index *
+							      ACPI_DIV_8
+							      (access_width),
+							      &old_value32,
+							      access_width);
+
+					/*
+					 * Use offset style bit masks because access_width is
+					 * ensured to be less than 32-bits by
+					 * acpi_hw_validate_register() and bit_offset/bit_width is
+					 * less than access_width here.
+					 */
+					if (bit_offset) {
+						old_value32 &=
+						    ACPI_MASK_BITS_ABOVE
+						    (bit_offset);
+						bit_offset = 0;
+					}
+					if (bit_width < access_width) {
+						old_value32 &=
+						    ACPI_MASK_BITS_BELOW
+						    (bit_width);
+					}
+
+					new_value32 |= old_value32;
+				}
+
+				status = acpi_hw_write_port((acpi_io_address)
+							    address +
+							    index *
+							    ACPI_DIV_8
+							    (access_width),
+							    new_value32,
+							    access_width);
+			}
+		}
+
+		/*
+		 * Index * access_width is ensured to be less than 32-bits by
+		 * acpi_hw_validate_register().
+		 */
+		bit_width -=
+		    bit_width > access_width ? access_width : bit_width;
+		index++;
 	}
 
 	ACPI_DEBUG_PRINT((ACPI_DB_IO,
 			  "Wrote: %8.8X width %2d   to %8.8X%8.8X (%s)\n",
-			  value, reg->bit_width, ACPI_FORMAT_UINT64(address),
+			  value, access_width, ACPI_FORMAT_UINT64(address),
 			  acpi_ut_get_region_name(reg->space_id)));
 
 	return (status);
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index a01ddb3..98c26ff 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -91,10 +91,9 @@
 		 * compatibility with other ACPI implementations that have allowed
 		 * BIOS code with bad register width values to go unnoticed.
 		 */
-		status =
-		    acpi_os_write_port((acpi_io_address) reset_reg->address,
-				       acpi_gbl_FADT.reset_value,
-				       ACPI_RESET_REGISTER_WIDTH);
+		status = acpi_os_write_port((acpi_io_address)reset_reg->address,
+					    acpi_gbl_FADT.reset_value,
+					    ACPI_RESET_REGISTER_WIDTH);
 	} else {
 		/* Write the reset value to the reset register */
 
@@ -504,9 +503,7 @@
 	 * Evaluate the \_Sx namespace object containing the register values
 	 * for this state
 	 */
-	info->relative_pathname = ACPI_CAST_PTR(char,
-						acpi_gbl_sleep_state_names
-						[sleep_state]);
+	info->relative_pathname = acpi_gbl_sleep_state_names[sleep_state];
 
 	status = acpi_ns_evaluate(info);
 	if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 697af81..426a630 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -107,9 +107,10 @@
 			continue;
 		}
 
-		status = acpi_ns_lookup(NULL, init_val->name, init_val->type,
-					ACPI_IMODE_LOAD_PASS2,
-					ACPI_NS_NO_UPSEARCH, NULL, &new_node);
+		status =
+		    acpi_ns_lookup(NULL, (char *)init_val->name, init_val->type,
+				   ACPI_IMODE_LOAD_PASS2, ACPI_NS_NO_UPSEARCH,
+				   NULL, &new_node);
 		if (ACPI_FAILURE(status)) {
 			ACPI_EXCEPTION((AE_INFO, status,
 					"Could not create predefined name %s",
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index 878e8fb..c803bda 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -79,7 +79,8 @@
 		/* String-to-Integer conversion */
 
 		status = acpi_ut_strtoul64(original_object->string.pointer,
-					   ACPI_ANY_BASE, &value);
+					   ACPI_ANY_BASE,
+					   acpi_gbl_integer_byte_width, &value);
 		if (ACPI_FAILURE(status)) {
 			return (status);
 		}
@@ -317,7 +318,7 @@
  ******************************************************************************/
 
 acpi_status
-acpi_ns_convert_to_unicode(struct acpi_namespace_node * scope,
+acpi_ns_convert_to_unicode(struct acpi_namespace_node *scope,
 			   union acpi_operand_object *original_object,
 			   union acpi_operand_object **return_object)
 {
@@ -384,7 +385,7 @@
  ******************************************************************************/
 
 acpi_status
-acpi_ns_convert_to_resource(struct acpi_namespace_node * scope,
+acpi_ns_convert_to_resource(struct acpi_namespace_node *scope,
 			    union acpi_operand_object *original_object,
 			    union acpi_operand_object **return_object)
 {
@@ -463,7 +464,7 @@
  ******************************************************************************/
 
 acpi_status
-acpi_ns_convert_to_reference(struct acpi_namespace_node * scope,
+acpi_ns_convert_to_reference(struct acpi_namespace_node *scope,
 			     union acpi_operand_object *original_object,
 			     union acpi_operand_object **return_object)
 {
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index af236e3..ce1f860 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -81,7 +81,7 @@
  *
  ******************************************************************************/
 
-void acpi_ns_print_pathname(u32 num_segments, char *pathname)
+void acpi_ns_print_pathname(u32 num_segments, const char *pathname)
 {
 	u32 i;
 
@@ -114,6 +114,9 @@
 	acpi_os_printf("]\n");
 }
 
+#ifdef ACPI_OBSOLETE_FUNCTIONS
+/* Not used at this time, perhaps later */
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ns_dump_pathname
@@ -131,7 +134,8 @@
  ******************************************************************************/
 
 void
-acpi_ns_dump_pathname(acpi_handle handle, char *msg, u32 level, u32 component)
+acpi_ns_dump_pathname(acpi_handle handle,
+		      const char *msg, u32 level, u32 component)
 {
 
 	ACPI_FUNCTION_TRACE(ns_dump_pathname);
@@ -148,6 +152,7 @@
 	acpi_os_printf("\n");
 	return_VOID;
 }
+#endif
 
 /*******************************************************************************
  *
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index d4aa8b6..36643a8 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -140,6 +140,7 @@
 {
 	acpi_status status = AE_OK;
 	struct acpi_device_walk_info info;
+	acpi_handle handle;
 
 	ACPI_FUNCTION_TRACE(ns_initialize_devices);
 
@@ -190,6 +191,27 @@
 		if (ACPI_SUCCESS(status)) {
 			info.num_INI++;
 		}
+
+		/*
+		 * Execute \_SB._INI.
+		 * There appears to be a strict order requirement for \_SB._INI,
+		 * which should be evaluated before any _REG evaluations.
+		 */
+		status = acpi_get_handle(NULL, "\\_SB", &handle);
+		if (ACPI_SUCCESS(status)) {
+			memset(info.evaluate_info, 0,
+			       sizeof(struct acpi_evaluate_info));
+			info.evaluate_info->prefix_node = handle;
+			info.evaluate_info->relative_pathname =
+			    METHOD_NAME__INI;
+			info.evaluate_info->parameters = NULL;
+			info.evaluate_info->flags = ACPI_IGNORE_RETURN_VALUE;
+
+			status = acpi_ns_evaluate(info.evaluate_info);
+			if (ACPI_SUCCESS(status)) {
+				info.num_INI++;
+			}
+		}
 	}
 
 	/*
@@ -198,6 +220,12 @@
 	 * Note: Any objects accessed by the _REG methods will be automatically
 	 * initialized, even if they contain executable AML (see the call to
 	 * acpi_ns_initialize_objects below).
+	 *
+	 * Note: According to the ACPI specification, we actually needn't execute
+	 * _REG for system_memory/system_io operation regions, but for PCI_Config
+	 * operation regions, it is required to evaluate _REG for those on a PCI
+	 * root bus that doesn't contain _BBN object. So this code is kept here
+	 * in order not to break things.
 	 */
 	if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
 		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
@@ -592,33 +620,37 @@
 	 * Note: We know there is an _INI within this subtree, but it may not be
 	 * under this particular device, it may be lower in the branch.
 	 */
-	ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
-			(ACPI_TYPE_METHOD, device_node, METHOD_NAME__INI));
+	if (!ACPI_COMPARE_NAME(device_node->name.ascii, "_SB_") ||
+	    device_node->parent != acpi_gbl_root_node) {
+		ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+				(ACPI_TYPE_METHOD, device_node,
+				 METHOD_NAME__INI));
 
-	memset(info, 0, sizeof(struct acpi_evaluate_info));
-	info->prefix_node = device_node;
-	info->relative_pathname = METHOD_NAME__INI;
-	info->parameters = NULL;
-	info->flags = ACPI_IGNORE_RETURN_VALUE;
+		memset(info, 0, sizeof(struct acpi_evaluate_info));
+		info->prefix_node = device_node;
+		info->relative_pathname = METHOD_NAME__INI;
+		info->parameters = NULL;
+		info->flags = ACPI_IGNORE_RETURN_VALUE;
 
-	status = acpi_ns_evaluate(info);
-
-	if (ACPI_SUCCESS(status)) {
-		walk_info->num_INI++;
-	}
+		status = acpi_ns_evaluate(info);
+		if (ACPI_SUCCESS(status)) {
+			walk_info->num_INI++;
+		}
 #ifdef ACPI_DEBUG_OUTPUT
-	else if (status != AE_NOT_FOUND) {
+		else if (status != AE_NOT_FOUND) {
 
-		/* Ignore error and move on to next device */
+			/* Ignore error and move on to next device */
 
-		char *scope_name =
-		    acpi_ns_get_normalized_pathname(device_node, TRUE);
+			char *scope_name =
+			    acpi_ns_get_normalized_pathname(device_node, TRUE);
 
-		ACPI_EXCEPTION((AE_INFO, status, "during %s._INI execution",
-				scope_name));
-		ACPI_FREE(scope_name);
-	}
+			ACPI_EXCEPTION((AE_INFO, status,
+					"during %s._INI execution",
+					scope_name));
+			ACPI_FREE(scope_name);
+		}
 #endif
+	}
 
 	/* Ignore errors from above */
 
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 75cdb87..b5e2b0a 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -123,8 +123,8 @@
 		(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 		acpi_ns_delete_namespace_by_owner(acpi_gbl_root_table_list.
 						  tables[table_index].owner_id);
-		acpi_tb_release_owner_id(table_index);
 
+		acpi_tb_release_owner_id(table_index);
 		return_ACPI_STATUS(status);
 	}
 
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index eb6e1b8..f03dd41 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -113,7 +113,7 @@
 
 acpi_status
 acpi_ns_handle_to_pathname(acpi_handle target_handle,
-			   struct acpi_buffer * buffer, u8 no_trailing)
+			   struct acpi_buffer *buffer, u8 no_trailing)
 {
 	acpi_status status;
 	struct acpi_namespace_node *node;
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index 051306f..cfa2bb7 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -399,7 +399,7 @@
  ******************************************************************************/
 
 acpi_status
-acpi_ns_detach_data(struct acpi_namespace_node * node,
+acpi_ns_detach_data(struct acpi_namespace_node *node,
 		    acpi_object_handler handler)
 {
 	union acpi_operand_object *obj_desc;
@@ -444,7 +444,7 @@
  ******************************************************************************/
 
 acpi_status
-acpi_ns_get_attached_data(struct acpi_namespace_node * node,
+acpi_ns_get_attached_data(struct acpi_namespace_node *node,
 			  acpi_object_handler handler, void **data)
 {
 	union acpi_operand_object *obj_desc;
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index 9047f28..fbedc6e 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -62,6 +62,10 @@
 			       u32 count1,
 			       u8 type2, u32 count2, u32 start_index);
 
+static acpi_status
+acpi_ns_custom_package(struct acpi_evaluate_info *info,
+		       union acpi_operand_object **elements, u32 count);
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ns_check_package
@@ -135,6 +139,11 @@
 	 * PTYPE2 packages contain subpackages
 	 */
 	switch (package->ret_info.type) {
+	case ACPI_PTYPE_CUSTOM:
+
+		status = acpi_ns_custom_package(info, elements, count);
+		break;
+
 	case ACPI_PTYPE1_FIXED:
 		/*
 		 * The package count is fixed and there are no subpackages
@@ -179,6 +188,7 @@
 			if (ACPI_FAILURE(status)) {
 				return (status);
 			}
+
 			elements++;
 		}
 		break;
@@ -225,6 +235,7 @@
 					return (status);
 				}
 			}
+
 			elements++;
 		}
 		break;
@@ -569,11 +580,13 @@
 			if (sub_package->package.count < expected_count) {
 				goto package_too_small;
 			}
+
 			if (sub_package->package.count <
 			    package->ret_info.count1) {
 				expected_count = package->ret_info.count1;
 				goto package_too_small;
 			}
+
 			if (expected_count == 0) {
 				/*
 				 * Either the num_entries element was originally zero or it was
@@ -622,6 +635,83 @@
 
 /*******************************************************************************
  *
+ * FUNCTION:    acpi_ns_custom_package
+ *
+ * PARAMETERS:  info                - Method execution information block
+ *              elements            - Pointer to the package elements array
+ *              count               - Element count for the package
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Check a returned package object for the correct count and
+ *              correct type of all sub-objects.
+ *
+ * NOTE: Currently used for the _BIX method only. When needed for two or more
+ * methods, probably a detect/dispatch mechanism will be required.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_custom_package(struct acpi_evaluate_info *info,
+		       union acpi_operand_object **elements, u32 count)
+{
+	u32 expected_count;
+	u32 version;
+	acpi_status status = AE_OK;
+
+	ACPI_FUNCTION_NAME(ns_custom_package);
+
+	/* Get version number, must be Integer */
+
+	if ((*elements)->common.type != ACPI_TYPE_INTEGER) {
+		ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname,
+				      info->node_flags,
+				      "Return Package has invalid object type for version number"));
+		return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+	}
+
+	version = (u32)(*elements)->integer.value;
+	expected_count = 21;	/* Version 1 */
+
+	if (version == 0) {
+		expected_count = 20;	/* Version 0 */
+	}
+
+	if (count < expected_count) {
+		ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname,
+				      info->node_flags,
+				      "Return Package is too small - found %u elements, expected %u",
+				      count, expected_count));
+		return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
+	} else if (count > expected_count) {
+		ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
+				  "%s: Return Package is larger than needed - "
+				  "found %u, expected %u\n",
+				  info->full_pathname, count, expected_count));
+	}
+
+	/* Validate all elements of the returned package */
+
+	status = acpi_ns_check_package_elements(info, elements,
+						ACPI_RTYPE_INTEGER, 16,
+						ACPI_RTYPE_STRING, 4, 0);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Version 1 has a single trailing integer */
+
+	if (version > 0) {
+		status = acpi_ns_check_package_elements(info, elements + 20,
+							ACPI_RTYPE_INTEGER, 1,
+							0, 0, 20);
+	}
+
+	return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
  * FUNCTION:    acpi_ns_check_package_elements
  *
  * PARAMETERS:  info            - Method execution information block
@@ -661,6 +751,7 @@
 		if (ACPI_FAILURE(status)) {
 			return (status);
 		}
+
 		this_element++;
 	}
 
@@ -671,6 +762,7 @@
 		if (ACPI_FAILURE(status)) {
 			return (status);
 		}
+
 		this_element++;
 	}
 
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 805e36d..9523d41 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -399,7 +399,7 @@
  ******************************************************************************/
 
 acpi_status
-acpi_ns_repair_null_element(struct acpi_evaluate_info * info,
+acpi_ns_repair_null_element(struct acpi_evaluate_info *info,
 			    u32 expected_btypes,
 			    u32 package_index,
 			    union acpi_operand_object **return_object_ptr)
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 63edbbb..d533612 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -54,9 +54,9 @@
  * be repaired on a per-name basis.
  */
 typedef
-acpi_status(*acpi_repair_function) (struct acpi_evaluate_info * info,
-				    union acpi_operand_object
-				    **return_object_ptr);
+acpi_status (*acpi_repair_function) (struct acpi_evaluate_info * info,
+				     union acpi_operand_object **
+				     return_object_ptr);
 
 typedef struct acpi_repair_info {
 	char name[ACPI_NAME_SIZE];
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index c72cc62..784a30b 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -272,11 +272,11 @@
 			result = &internal_name[i];
 		} else if (num_segments == 2) {
 			internal_name[i] = AML_DUAL_NAME_PREFIX;
-			result = &internal_name[(acpi_size) i + 1];
+			result = &internal_name[(acpi_size)i + 1];
 		} else {
 			internal_name[i] = AML_MULTI_NAME_PREFIX_OP;
-			internal_name[(acpi_size) i + 1] = (char)num_segments;
-			result = &internal_name[(acpi_size) i + 2];
+			internal_name[(acpi_size)i + 1] = (char)num_segments;
+			result = &internal_name[(acpi_size)i + 2];
 		}
 	}
 
@@ -456,7 +456,7 @@
 
 			names_index = prefix_length + 2;
 			num_segments = (u8)
-			    internal_name[(acpi_size) prefix_length + 1];
+			    internal_name[(acpi_size)prefix_length + 1];
 			break;
 
 		case AML_DUAL_NAME_PREFIX:
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index a7deeaa..d2a9b4f 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -256,7 +256,7 @@
 		 * Allocate a new parameter block for the internal objects
 		 * Add 1 to count to allow for null terminated internal list
 		 */
-		info->parameters = ACPI_ALLOCATE_ZEROED(((acpi_size) info->
+		info->parameters = ACPI_ALLOCATE_ZEROED(((acpi_size)info->
 							 param_count +
 							 1) * sizeof(void *));
 		if (!info->parameters) {
@@ -280,13 +280,12 @@
 		info->parameters[info->param_count] = NULL;
 	}
 
-#if 0
+#ifdef _FUTURE_FEATURE
 
 	/*
 	 * Begin incoming argument count analysis. Check for too few args
 	 * and too many args.
 	 */
-
 	switch (acpi_ns_get_type(info->node)) {
 	case ACPI_TYPE_METHOD:
 
@@ -370,68 +369,68 @@
 	 * If we are expecting a return value, and all went well above,
 	 * copy the return value to an external object.
 	 */
-	if (return_buffer) {
-		if (!info->return_object) {
-			return_buffer->length = 0;
+	if (!return_buffer) {
+		goto cleanup_return_object;
+	}
+
+	if (!info->return_object) {
+		return_buffer->length = 0;
+		goto cleanup;
+	}
+
+	if (ACPI_GET_DESCRIPTOR_TYPE(info->return_object) ==
+	    ACPI_DESC_TYPE_NAMED) {
+		/*
+		 * If we received a NS Node as a return object, this means that
+		 * the object we are evaluating has nothing interesting to
+		 * return (such as a mutex, etc.)  We return an error because
+		 * these types are essentially unsupported by this interface.
+		 * We don't check up front because this makes it easier to add
+		 * support for various types at a later date if necessary.
+		 */
+		status = AE_TYPE;
+		info->return_object = NULL;	/* No need to delete a NS Node */
+		return_buffer->length = 0;
+	}
+
+	if (ACPI_FAILURE(status)) {
+		goto cleanup_return_object;
+	}
+
+	/* Dereference Index and ref_of references */
+
+	acpi_ns_resolve_references(info);
+
+	/* Get the size of the returned object */
+
+	status = acpi_ut_get_object_size(info->return_object,
+					 &buffer_space_needed);
+	if (ACPI_SUCCESS(status)) {
+
+		/* Validate/Allocate/Clear caller buffer */
+
+		status = acpi_ut_initialize_buffer(return_buffer,
+						   buffer_space_needed);
+		if (ACPI_FAILURE(status)) {
+			/*
+			 * Caller's buffer is too small or a new one can't
+			 * be allocated
+			 */
+			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+					  "Needed buffer size %X, %s\n",
+					  (u32)buffer_space_needed,
+					  acpi_format_exception(status)));
 		} else {
-			if (ACPI_GET_DESCRIPTOR_TYPE(info->return_object) ==
-			    ACPI_DESC_TYPE_NAMED) {
-				/*
-				 * If we received a NS Node as a return object, this means that
-				 * the object we are evaluating has nothing interesting to
-				 * return (such as a mutex, etc.)  We return an error because
-				 * these types are essentially unsupported by this interface.
-				 * We don't check up front because this makes it easier to add
-				 * support for various types at a later date if necessary.
-				 */
-				status = AE_TYPE;
-				info->return_object = NULL;	/* No need to delete a NS Node */
-				return_buffer->length = 0;
-			}
+			/* We have enough space for the object, build it */
 
-			if (ACPI_SUCCESS(status)) {
-
-				/* Dereference Index and ref_of references */
-
-				acpi_ns_resolve_references(info);
-
-				/* Get the size of the returned object */
-
-				status =
-				    acpi_ut_get_object_size(info->return_object,
-							    &buffer_space_needed);
-				if (ACPI_SUCCESS(status)) {
-
-					/* Validate/Allocate/Clear caller buffer */
-
-					status =
-					    acpi_ut_initialize_buffer
-					    (return_buffer,
-					     buffer_space_needed);
-					if (ACPI_FAILURE(status)) {
-						/*
-						 * Caller's buffer is too small or a new one can't
-						 * be allocated
-						 */
-						ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-								  "Needed buffer size %X, %s\n",
-								  (u32)
-								  buffer_space_needed,
-								  acpi_format_exception
-								  (status)));
-					} else {
-						/* We have enough space for the object, build it */
-
-						status =
-						    acpi_ut_copy_iobject_to_eobject
-						    (info->return_object,
-						     return_buffer);
-					}
-				}
-			}
+			status =
+			    acpi_ut_copy_iobject_to_eobject(info->return_object,
+							    return_buffer);
 		}
 	}
 
+cleanup_return_object:
+
 	if (info->return_object) {
 		/*
 		 * Delete the internal return object. NOTE: Interpreter must be
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 285b820..76a1bd4 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -78,7 +78,7 @@
 
 acpi_status
 acpi_get_handle(acpi_handle parent,
-		acpi_string pathname, acpi_handle * ret_handle)
+		acpi_string pathname, acpi_handle *ret_handle)
 {
 	acpi_status status;
 	struct acpi_namespace_node *node = NULL;
@@ -155,7 +155,7 @@
  *
  ******************************************************************************/
 acpi_status
-acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
+acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer *buffer)
 {
 	acpi_status status;
 	struct acpi_namespace_node *node;
@@ -448,7 +448,7 @@
 		/* Point past the CID PNP_DEVICE_ID array */
 
 		next_id_string +=
-		    ((acpi_size) cid_list->count *
+		    ((acpi_size)cid_list->count *
 		     sizeof(struct acpi_pnp_device_id));
 	}
 
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index c312cd4..32d372b 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -63,7 +63,7 @@
  * DESCRIPTION: This routine returns the type associatd with a particular handle
  *
  ******************************************************************************/
-acpi_status acpi_get_type(acpi_handle handle, acpi_object_type * ret_type)
+acpi_status acpi_get_type(acpi_handle handle, acpi_object_type *ret_type)
 {
 	struct acpi_namespace_node *node;
 	acpi_status status;
@@ -115,7 +115,7 @@
  *              Handle.
  *
  ******************************************************************************/
-acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle)
+acpi_status acpi_get_parent(acpi_handle handle, acpi_handle *ret_handle)
 {
 	struct acpi_namespace_node *node;
 	struct acpi_namespace_node *parent_node;
@@ -183,7 +183,7 @@
 acpi_status
 acpi_get_next_object(acpi_object_type type,
 		     acpi_handle parent,
-		     acpi_handle child, acpi_handle * ret_handle)
+		     acpi_handle child, acpi_handle *ret_handle)
 {
 	acpi_status status;
 	struct acpi_namespace_node *node;
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index d48cbed..c29c930 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -87,7 +87,7 @@
 	 * used to encode the package length, either 0,1,2, or 3
 	 */
 	byte_count = (aml[0] >> 6);
-	parser_state->aml += ((acpi_size) byte_count + 1);
+	parser_state->aml += ((acpi_size)byte_count + 1);
 
 	/* Get bytes 3, 2, 1 as needed */
 
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index cfd17a4..177b05b 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -158,7 +158,7 @@
  *
  ******************************************************************************/
 
-char *acpi_ps_get_opcode_name(u16 opcode)
+const char *acpi_ps_get_opcode_name(u16 opcode)
 {
 #if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUG_OUTPUT)
 
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 8038ed2..0a23897 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -130,8 +130,8 @@
  ******************************************************************************/
 
 acpi_status
-acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
-			 union acpi_parse_object * op)
+acpi_ps_complete_this_op(struct acpi_walk_state *walk_state,
+			 union acpi_parse_object *op)
 {
 	union acpi_parse_object *prev;
 	union acpi_parse_object *next;
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index b28b0da..89cb4bf 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -128,7 +128,7 @@
 	if (op_info->flags & AML_DEFER) {
 		flags = ACPI_PARSEOP_DEFERRED;
 	} else if (op_info->flags & AML_NAMED) {
-		flags = ACPI_PARSEOP_NAMED;
+		flags = ACPI_PARSEOP_NAMED_OBJECT;
 	} else if (opcode == AML_INT_BYTELIST_OP) {
 		flags = ACPI_PARSEOP_BYTELIST;
 	}
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 04b37fc..cf30cd82 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -115,7 +115,7 @@
  *
  ******************************************************************************/
 
-acpi_status acpi_ps_execute_method(struct acpi_evaluate_info * info)
+acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
 {
 	acpi_status status;
 	union acpi_parse_object *op;
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 2b1209d..f1e83ad 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -112,7 +112,7 @@
 	 * resource_source_index (1).
 	 */
 	if (resource_source->string_ptr) {
-		return ((acpi_rs_length) (resource_source->string_length + 1));
+		return ((acpi_rs_length)(resource_source->string_length + 1));
 	}
 
 	return (0);
@@ -188,7 +188,7 @@
 
 acpi_status
 acpi_rs_get_aml_length(struct acpi_resource *resource,
-		       acpi_size resource_list_size, acpi_size * size_needed)
+		       acpi_size resource_list_size, acpi_size *size_needed)
 {
 	acpi_size aml_size_needed = 0;
 	struct acpi_resource *resource_end;
@@ -278,11 +278,11 @@
 			 * 16-Bit Address Resource:
 			 * Add the size of the optional resource_source info
 			 */
-			total_size = (acpi_rs_length) (total_size +
-						       acpi_rs_struct_option_length
-						       (&resource->data.
-							address16.
-							resource_source));
+			total_size = (acpi_rs_length)(total_size +
+						      acpi_rs_struct_option_length
+						      (&resource->data.
+						       address16.
+						       resource_source));
 			break;
 
 		case ACPI_RESOURCE_TYPE_ADDRESS32:
@@ -290,11 +290,11 @@
 			 * 32-Bit Address Resource:
 			 * Add the size of the optional resource_source info
 			 */
-			total_size = (acpi_rs_length) (total_size +
-						       acpi_rs_struct_option_length
-						       (&resource->data.
-							address32.
-							resource_source));
+			total_size = (acpi_rs_length)(total_size +
+						      acpi_rs_struct_option_length
+						      (&resource->data.
+						       address32.
+						       resource_source));
 			break;
 
 		case ACPI_RESOURCE_TYPE_ADDRESS64:
@@ -302,11 +302,11 @@
 			 * 64-Bit Address Resource:
 			 * Add the size of the optional resource_source info
 			 */
-			total_size = (acpi_rs_length) (total_size +
-						       acpi_rs_struct_option_length
-						       (&resource->data.
-							address64.
-							resource_source));
+			total_size = (acpi_rs_length)(total_size +
+						      acpi_rs_struct_option_length
+						      (&resource->data.
+						       address64.
+						       resource_source));
 			break;
 
 		case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
@@ -315,28 +315,28 @@
 			 * Add the size of each additional optional interrupt beyond the
 			 * required 1 (4 bytes for each u32 interrupt number)
 			 */
-			total_size = (acpi_rs_length) (total_size +
-						       ((resource->data.
-							 extended_irq.
-							 interrupt_count -
-							 1) * 4) +
-						       /* Add the size of the optional resource_source info */
-						       acpi_rs_struct_option_length
-						       (&resource->data.
+			total_size = (acpi_rs_length)(total_size +
+						      ((resource->data.
 							extended_irq.
-							resource_source));
+							interrupt_count -
+							1) * 4) +
+						      /* Add the size of the optional resource_source info */
+						      acpi_rs_struct_option_length
+						      (&resource->data.
+						       extended_irq.
+						       resource_source));
 			break;
 
 		case ACPI_RESOURCE_TYPE_GPIO:
 
-			total_size = (acpi_rs_length) (total_size +
-						       (resource->data.gpio.
-							pin_table_length * 2) +
-						       resource->data.gpio.
-						       resource_source.
-						       string_length +
-						       resource->data.gpio.
-						       vendor_length);
+			total_size = (acpi_rs_length)(total_size +
+						      (resource->data.gpio.
+						       pin_table_length * 2) +
+						      resource->data.gpio.
+						      resource_source.
+						      string_length +
+						      resource->data.gpio.
+						      vendor_length);
 
 			break;
 
@@ -348,14 +348,14 @@
 								   common_serial_bus.
 								   type];
 
-			total_size = (acpi_rs_length) (total_size +
-						       resource->data.
-						       i2c_serial_bus.
-						       resource_source.
-						       string_length +
-						       resource->data.
-						       i2c_serial_bus.
-						       vendor_length);
+			total_size = (acpi_rs_length)(total_size +
+						      resource->data.
+						      i2c_serial_bus.
+						      resource_source.
+						      string_length +
+						      resource->data.
+						      i2c_serial_bus.
+						      vendor_length);
 
 			break;
 
@@ -397,8 +397,8 @@
  ******************************************************************************/
 
 acpi_status
-acpi_rs_get_list_length(u8 * aml_buffer,
-			u32 aml_buffer_length, acpi_size * size_needed)
+acpi_rs_get_list_length(u8 *aml_buffer,
+			u32 aml_buffer_length, acpi_size *size_needed)
 {
 	acpi_status status;
 	u8 *end_aml;
@@ -610,7 +610,7 @@
 
 acpi_status
 acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
-				     acpi_size * buffer_size_needed)
+				     acpi_size *buffer_size_needed)
 {
 	u32 number_of_elements;
 	acpi_size temp_size_needed = 0;
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 1297889..809b61c 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -347,7 +347,7 @@
 					   (u8 *) output_buffer->pointer);
 				path_buffer.pointer = user_prt->source;
 
-				status = acpi_ns_handle_to_pathname((acpi_handle) node, &path_buffer, FALSE);
+				status = acpi_ns_handle_to_pathname((acpi_handle)node, &path_buffer, FALSE);
 
 				/* +1 to include null terminator */
 
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index 23a17c8..5ffdb56 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -52,17 +52,17 @@
  * All functions in this module are used by the AML Debugger only
  */
 /* Local prototypes */
-static void acpi_rs_out_string(char *title, char *value);
+static void acpi_rs_out_string(const char *title, const char *value);
 
-static void acpi_rs_out_integer8(char *title, u8 value);
+static void acpi_rs_out_integer8(const char *title, u8 value);
 
-static void acpi_rs_out_integer16(char *title, u16 value);
+static void acpi_rs_out_integer16(const char *title, u16 value);
 
-static void acpi_rs_out_integer32(char *title, u32 value);
+static void acpi_rs_out_integer32(const char *title, u32 value);
 
-static void acpi_rs_out_integer64(char *title, u64 value);
+static void acpi_rs_out_integer64(const char *title, u64 value);
 
-static void acpi_rs_out_title(char *title);
+static void acpi_rs_out_title(const char *title);
 
 static void acpi_rs_dump_byte_list(u16 length, u8 *data);
 
@@ -208,7 +208,7 @@
 {
 	u8 *target = NULL;
 	u8 *previous_target;
-	char *name;
+	const char *name;
 	u8 count;
 
 	/* First table entry must contain the table length (# of table entries) */
@@ -248,10 +248,8 @@
 		case ACPI_RSD_UINT8:
 
 			if (table->pointer) {
-				acpi_rs_out_string(name, ACPI_CAST_PTR(char,
-								       table->
-								       pointer
-								       [*target]));
+				acpi_rs_out_string(name,
+						   table->pointer[*target]);
 			} else {
 				acpi_rs_out_integer8(name, ACPI_GET8(target));
 			}
@@ -276,26 +274,20 @@
 
 		case ACPI_RSD_1BITFLAG:
 
-			acpi_rs_out_string(name, ACPI_CAST_PTR(char,
-							       table->
-							       pointer[*target &
-								       0x01]));
+			acpi_rs_out_string(name,
+					   table->pointer[*target & 0x01]);
 			break;
 
 		case ACPI_RSD_2BITFLAG:
 
-			acpi_rs_out_string(name, ACPI_CAST_PTR(char,
-							       table->
-							       pointer[*target &
-								       0x03]));
+			acpi_rs_out_string(name,
+					   table->pointer[*target & 0x03]);
 			break;
 
 		case ACPI_RSD_3BITFLAG:
 
-			acpi_rs_out_string(name, ACPI_CAST_PTR(char,
-							       table->
-							       pointer[*target &
-								       0x07]));
+			acpi_rs_out_string(name,
+					   table->pointer[*target & 0x07]);
 			break;
 
 		case ACPI_RSD_SHORTLIST:
@@ -481,7 +473,7 @@
  *
  ******************************************************************************/
 
-static void acpi_rs_out_string(char *title, char *value)
+static void acpi_rs_out_string(const char *title, const char *value)
 {
 
 	acpi_os_printf("%27s : %s", title, value);
@@ -491,30 +483,30 @@
 	acpi_os_printf("\n");
 }
 
-static void acpi_rs_out_integer8(char *title, u8 value)
+static void acpi_rs_out_integer8(const char *title, u8 value)
 {
 	acpi_os_printf("%27s : %2.2X\n", title, value);
 }
 
-static void acpi_rs_out_integer16(char *title, u16 value)
+static void acpi_rs_out_integer16(const char *title, u16 value)
 {
 
 	acpi_os_printf("%27s : %4.4X\n", title, value);
 }
 
-static void acpi_rs_out_integer32(char *title, u32 value)
+static void acpi_rs_out_integer32(const char *title, u32 value)
 {
 
 	acpi_os_printf("%27s : %8.8X\n", title, value);
 }
 
-static void acpi_rs_out_integer64(char *title, u64 value)
+static void acpi_rs_out_integer64(const char *title, u64 value)
 {
 
 	acpi_os_printf("%27s : %8.8X%8.8X\n", title, ACPI_FORMAT_UINT64(value));
 }
 
-static void acpi_rs_out_title(char *title)
+static void acpi_rs_out_title(const char *title)
 {
 
 	acpi_os_printf("%27s : ", title);
diff --git a/drivers/acpi/acpica/rsdumpinfo.c b/drivers/acpi/acpica/rsdumpinfo.c
index 5c34913..61e8f16 100644
--- a/drivers/acpi/acpica/rsdumpinfo.c
+++ b/drivers/acpi/acpica/rsdumpinfo.c
@@ -330,19 +330,20 @@
 	{ACPI_RSD_UINT8,    ACPI_RSD_OFFSET (common_serial_bus.type),           "Type",                     acpi_gbl_sbt_decode}, \
 	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.producer_consumer), "ProducerConsumer",      acpi_gbl_consume_decode}, \
 	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.slave_mode),     "SlaveMode",                acpi_gbl_sm_decode}, \
+	{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.connection_sharing),"ConnectionSharing",     acpi_gbl_shr_decode}, \
 	{ACPI_RSD_UINT8,    ACPI_RSD_OFFSET (common_serial_bus.type_revision_id), "TypeRevisionId",         NULL}, \
 	{ACPI_RSD_UINT16,   ACPI_RSD_OFFSET (common_serial_bus.type_data_length), "TypeDataLength",         NULL}, \
 	{ACPI_RSD_SOURCE,   ACPI_RSD_OFFSET (common_serial_bus.resource_source), "ResourceSource",          NULL}, \
 	{ACPI_RSD_UINT16,   ACPI_RSD_OFFSET (common_serial_bus.vendor_length),  "VendorLength",             NULL}, \
 	{ACPI_RSD_SHORTLISTX,ACPI_RSD_OFFSET (common_serial_bus.vendor_data),   "VendorData",               NULL},
 
-struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[10] = {
+struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[11] = {
 	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_common_serial_bus),
 	 "Common Serial Bus", NULL},
 	ACPI_RS_DUMP_COMMON_SERIAL_BUS
 };
 
-struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[13] = {
+struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[14] = {
 	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_i2c_serial_bus),
 	 "I2C Serial Bus", NULL},
 	ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
@@ -355,7 +356,7 @@
 	 "SlaveAddress", NULL},
 };
 
-struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[17] = {
+struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[18] = {
 	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_spi_serial_bus),
 	 "Spi Serial Bus", NULL},
 	ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
@@ -376,7 +377,7 @@
 	 "ConnectionSpeed", NULL},
 };
 
-struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[19] = {
+struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[20] = {
 	{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_uart_serial_bus),
 	 "Uart Serial Bus", NULL},
 	ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_2BITFLAG,
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index ce3d0b7..25165ca 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -87,7 +87,7 @@
 		return_ACPI_STATUS(AE_BAD_PARAMETER);
 	}
 
-	if (((acpi_size) resource) & 0x3) {
+	if (((acpi_size)resource) & 0x3) {
 
 		/* Each internal resource struct is expected to be 32-bit aligned */
 
diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c
index 8a01296..b82c061 100644
--- a/drivers/acpi/acpica/rsserial.c
+++ b/drivers/acpi/acpica/rsserial.c
@@ -151,7 +151,7 @@
  *
  ******************************************************************************/
 
-struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[16] = {
+struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[17] = {
 	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
 	 ACPI_RS_SIZE(struct acpi_resource_i2c_serialbus),
 	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_i2c_serial_bus)},
@@ -177,6 +177,11 @@
 	 AML_OFFSET(common_serial_bus.flags),
 	 1},
 
+	{ACPI_RSC_1BITFLAG,
+	 ACPI_RS_OFFSET(data.common_serial_bus.connection_sharing),
+	 AML_OFFSET(common_serial_bus.flags),
+	 2},
+
 	{ACPI_RSC_MOVE8,
 	 ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
 	 AML_OFFSET(common_serial_bus.type_revision_id),
@@ -237,7 +242,7 @@
  *
  ******************************************************************************/
 
-struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[20] = {
+struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[21] = {
 	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
 	 ACPI_RS_SIZE(struct acpi_resource_spi_serialbus),
 	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_spi_serial_bus)},
@@ -263,6 +268,11 @@
 	 AML_OFFSET(common_serial_bus.flags),
 	 1},
 
+	{ACPI_RSC_1BITFLAG,
+	 ACPI_RS_OFFSET(data.common_serial_bus.connection_sharing),
+	 AML_OFFSET(common_serial_bus.flags),
+	 2},
+
 	{ACPI_RSC_MOVE8,
 	 ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
 	 AML_OFFSET(common_serial_bus.type_revision_id),
@@ -339,7 +349,7 @@
  *
  ******************************************************************************/
 
-struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[22] = {
+struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[23] = {
 	{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
 	 ACPI_RS_SIZE(struct acpi_resource_uart_serialbus),
 	 ACPI_RSC_TABLE_SIZE(acpi_rs_convert_uart_serial_bus)},
@@ -365,6 +375,11 @@
 	 AML_OFFSET(common_serial_bus.flags),
 	 1},
 
+	{ACPI_RSC_1BITFLAG,
+	 ACPI_RS_OFFSET(data.common_serial_bus.connection_sharing),
+	 AML_OFFSET(common_serial_bus.flags),
+	 2},
+
 	{ACPI_RSC_MOVE8,
 	 ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
 	 AML_OFFSET(common_serial_bus.type_revision_id),
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index cf06e49..fa491c6 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -338,7 +338,7 @@
 	 * Note: Some resource descriptors will have an additional null, so
 	 * we add 1 to the minimum length.
 	 */
-	if (total_length > (acpi_rsdesc_size) (minimum_length + 1)) {
+	if (total_length > (acpi_rsdesc_size)(minimum_length + 1)) {
 
 		/* Get the resource_source_index */
 
@@ -377,7 +377,7 @@
 				   ACPI_CAST_PTR(char,
 						 &aml_resource_source[1]));
 
-		return ((acpi_rs_length) total_length);
+		return ((acpi_rs_length)total_length);
 	}
 
 	/* resource_source is not present */
@@ -406,9 +406,9 @@
  ******************************************************************************/
 
 acpi_rsdesc_size
-acpi_rs_set_resource_source(union aml_resource * aml,
+acpi_rs_set_resource_source(union aml_resource *aml,
 			    acpi_rs_length minimum_length,
-			    struct acpi_resource_source * resource_source)
+			    struct acpi_resource_source *resource_source)
 {
 	u8 *aml_resource_source;
 	acpi_rsdesc_size descriptor_length;
@@ -466,8 +466,8 @@
  ******************************************************************************/
 
 acpi_status
-acpi_rs_get_prt_method_data(struct acpi_namespace_node * node,
-			    struct acpi_buffer * ret_buffer)
+acpi_rs_get_prt_method_data(struct acpi_namespace_node *node,
+			    struct acpi_buffer *ret_buffer)
 {
 	union acpi_operand_object *obj_desc;
 	acpi_status status;
@@ -671,7 +671,7 @@
 
 acpi_status
 acpi_rs_get_method_data(acpi_handle handle,
-			char *path, struct acpi_buffer *ret_buffer)
+			const char *path, struct acpi_buffer *ret_buffer)
 {
 	union acpi_operand_object *obj_desc;
 	acpi_status status;
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 900933b..465ed81 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -433,8 +433,8 @@
 acpi_status
 acpi_get_vendor_resource(acpi_handle device_handle,
 			 char *name,
-			 struct acpi_vendor_uuid * uuid,
-			 struct acpi_buffer * ret_buffer)
+			 struct acpi_vendor_uuid *uuid,
+			 struct acpi_buffer *ret_buffer)
 {
 	struct acpi_vendor_walk_info info;
 	acpi_status status;
@@ -539,7 +539,7 @@
  ******************************************************************************/
 
 acpi_status
-acpi_walk_resource_buffer(struct acpi_buffer * buffer,
+acpi_walk_resource_buffer(struct acpi_buffer *buffer,
 			  acpi_walk_resource_callback user_function,
 			  void *context)
 {
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 7da79ce..1388a19 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -368,7 +368,7 @@
  *****************************************************************************/
 
 acpi_status
-acpi_tb_verify_temp_table(struct acpi_table_desc * table_desc, char *signature)
+acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, char *signature)
 {
 	acpi_status status = AE_OK;
 
@@ -401,9 +401,9 @@
 			ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY,
 					"%4.4s 0x%8.8X%8.8X"
 					" Attempted table install failed",
-					acpi_ut_valid_acpi_name(table_desc->
-								signature.
-								ascii) ?
+					acpi_ut_valid_nameseg(table_desc->
+							      signature.
+							      ascii) ?
 					table_desc->signature.ascii : "????",
 					ACPI_FORMAT_UINT64(table_desc->
 							   address)));
@@ -454,7 +454,7 @@
 		table_count = acpi_gbl_root_table_list.current_table_count;
 	}
 
-	tables = ACPI_ALLOCATE_ZEROED(((acpi_size) table_count +
+	tables = ACPI_ALLOCATE_ZEROED(((acpi_size)table_count +
 				       ACPI_ROOT_TABLE_SIZE_INCREMENT) *
 				      sizeof(struct acpi_table_desc));
 	if (!tables) {
@@ -467,8 +467,7 @@
 
 	if (acpi_gbl_root_table_list.tables) {
 		memcpy(tables, acpi_gbl_root_table_list.tables,
-		       (acpi_size) table_count *
-		       sizeof(struct acpi_table_desc));
+		       (acpi_size)table_count * sizeof(struct acpi_table_desc));
 
 		if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
 			ACPI_FREE(acpi_gbl_root_table_list.tables);
@@ -701,7 +700,7 @@
  *
  ******************************************************************************/
 
-acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id * owner_id)
+acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id)
 {
 	acpi_status status = AE_BAD_PARAMETER;
 
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index a79e4f3..6208069 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -53,7 +53,7 @@
 acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
 			     u8 space_id,
 			     u8 byte_width,
-			     u64 address, char *register_name, u8 flags);
+			     u64 address, const char *register_name, u8 flags);
 
 static void acpi_tb_convert_fadt(void);
 
@@ -65,7 +65,7 @@
 /* Table for conversion of FADT to common internal format and FADT validation */
 
 typedef struct acpi_fadt_info {
-	char *name;
+	const char *name;
 	u16 address64;
 	u16 address32;
 	u16 length;
@@ -192,7 +192,7 @@
 acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
 			     u8 space_id,
 			     u8 byte_width,
-			     u64 address, char *register_name, u8 flags)
+			     u64 address, const char *register_name, u8 flags)
 {
 	u8 bit_width;
 
@@ -344,7 +344,7 @@
 
 	/* Obtain the DSDT and FACS tables via their addresses within the FADT */
 
-	acpi_tb_install_fixed_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt,
+	acpi_tb_install_fixed_table((acpi_physical_address)acpi_gbl_FADT.Xdsdt,
 				    ACPI_SIG_DSDT, &acpi_gbl_dsdt_index);
 
 	/* If Hardware Reduced flag is set, there is no FACS */
@@ -385,14 +385,15 @@
 {
 	/*
 	 * Check if the FADT is larger than the largest table that we expect
-	 * (the ACPI 5.0 version). If so, truncate the table, and issue
-	 * a warning.
+	 * (typically the current ACPI specification version). If so, truncate
+	 * the table, and issue a warning.
 	 */
 	if (length > sizeof(struct acpi_table_fadt)) {
 		ACPI_BIOS_WARNING((AE_INFO,
-				   "FADT (revision %u) is longer than ACPI 5.0 version, "
+				   "FADT (revision %u) is longer than %s length, "
 				   "truncating length %u to %u",
-				   table->revision, length,
+				   table->revision, ACPI_FADT_CONFORMANCE,
+				   length,
 				   (u32)sizeof(struct acpi_table_fadt)));
 	}
 
@@ -467,7 +468,7 @@
 
 static void acpi_tb_convert_fadt(void)
 {
-	char *name;
+	const char *name;
 	struct acpi_generic_address *address64;
 	u32 address32;
 	u8 length;
@@ -646,9 +647,12 @@
 			if ((address64->address && !length) ||
 			    (!address64->address && length)) {
 				ACPI_BIOS_WARNING((AE_INFO,
-						   "Optional FADT field %s has zero address or length: "
-						   "0x%8.8X%8.8X/0x%X",
-						   name,
+						   "Optional FADT field %s has valid %s but zero %s: "
+						   "0x%8.8X%8.8X/0x%X", name,
+						   (length ? "Length" :
+						    "Address"),
+						   (length ? "Address" :
+						    "Length"),
 						   ACPI_FORMAT_UINT64
 						   (address64->address),
 						   length));
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index f2d0803..e348d61 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -76,7 +76,7 @@
 
 	/* Validate the input table signature */
 
-	if (!acpi_is_valid_signature(signature)) {
+	if (!acpi_ut_valid_nameseg(signature)) {
 		return_ACPI_STATUS(AE_BAD_SIGNATURE);
 	}
 
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 4dc6108..8b13052 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -299,9 +299,9 @@
 			ACPI_BIOS_ERROR((AE_INFO,
 					 "Table has invalid signature [%4.4s] (0x%8.8X), "
 					 "must be SSDT or OEMx",
-					 acpi_ut_valid_acpi_name(new_table_desc.
-								 signature.
-								 ascii) ?
+					 acpi_ut_valid_nameseg(new_table_desc.
+							       signature.
+							       ascii) ?
 					 new_table_desc.signature.
 					 ascii : "????",
 					 new_table_desc.signature.integer));
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 9240c76..e285539 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -231,7 +231,7 @@
 					   ACPI_FORMAT_UINT64(address64)));
 		}
 #endif
-		return ((acpi_physical_address) (address64));
+		return ((acpi_physical_address)(address64));
 	}
 }
 
@@ -287,12 +287,12 @@
 		 * the XSDT if the revision is > 1 and the XSDT pointer is present,
 		 * as per the ACPI specification.
 		 */
-		address = (acpi_physical_address) rsdp->xsdt_physical_address;
+		address = (acpi_physical_address)rsdp->xsdt_physical_address;
 		table_entry_size = ACPI_XSDT_ENTRY_SIZE;
 	} else {
 		/* Root table is an RSDT (32-bit physical addresses) */
 
-		address = (acpi_physical_address) rsdp->rsdt_physical_address;
+		address = (acpi_physical_address)rsdp->rsdt_physical_address;
 		table_entry_size = ACPI_RSDT_ENTRY_SIZE;
 	}
 
@@ -380,30 +380,3 @@
 	acpi_os_unmap_memory(table, length);
 	return_ACPI_STATUS(AE_OK);
 }
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_is_valid_signature
- *
- * PARAMETERS:  signature           - Sig string to be validated
- *
- * RETURN:      TRUE if signature is has 4 valid ACPI characters
- *
- * DESCRIPTION: Validate an ACPI table signature.
- *
- ******************************************************************************/
-
-u8 acpi_is_valid_signature(char *signature)
-{
-	u32 i;
-
-	/* Validate each character in the signature */
-
-	for (i = 0; i < ACPI_NAME_SIZE; i++) {
-		if (!acpi_ut_valid_acpi_char(signature[i], i)) {
-			return (FALSE);
-		}
-	}
-
-	return (TRUE);
-}
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 326df65..3ecec93 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -99,7 +99,7 @@
  ******************************************************************************/
 
 acpi_status __init
-acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
+acpi_initialize_tables(struct acpi_table_desc *initial_table_array,
 		       u32 initial_table_count, u8 allow_resize)
 {
 	acpi_physical_address rsdp_address;
@@ -120,7 +120,7 @@
 		/* Root Table Array has been statically allocated by the host */
 
 		memset(initial_table_array, 0,
-		       (acpi_size) initial_table_count *
+		       (acpi_size)initial_table_count *
 		       sizeof(struct acpi_table_desc));
 
 		acpi_gbl_root_table_list.tables = initial_table_array;
@@ -352,7 +352,7 @@
  *
  ******************************************************************************/
 acpi_status
-acpi_get_table_by_index(u32 table_index, struct acpi_table_header ** table)
+acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
 {
 	acpi_status status;
 
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 3151968..ac71abc 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -82,7 +82,7 @@
 	 * their customized default region handlers.
 	 */
 	status = acpi_ev_install_region_handlers();
-	if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) {
+	if (ACPI_FAILURE(status)) {
 		ACPI_EXCEPTION((AE_INFO, status,
 				"During Region initialization"));
 		return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index b9a78e4..adb6cfc 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -90,7 +90,7 @@
  *
  ******************************************************************************/
 
-acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp * rsdp)
+acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
 {
 
 	/*
@@ -142,7 +142,7 @@
  *
  ******************************************************************************/
 
-acpi_status __init acpi_find_root_pointer(acpi_physical_address * table_address)
+acpi_status __init acpi_find_root_pointer(acpi_physical_address *table_address)
 {
 	u8 *table_ptr;
 	u8 *mem_rover;
@@ -201,7 +201,7 @@
 			    (u32) ACPI_PTR_DIFF(mem_rover, table_ptr);
 
 			*table_address =
-			    (acpi_physical_address) physical_address;
+			    (acpi_physical_address)physical_address;
 			return_ACPI_STATUS(AE_OK);
 		}
 	}
@@ -234,7 +234,7 @@
 		    (ACPI_HI_RSDP_WINDOW_BASE +
 		     ACPI_PTR_DIFF(mem_rover, table_ptr));
 
-		*table_address = (acpi_physical_address) physical_address;
+		*table_address = (acpi_physical_address)physical_address;
 		return_ACPI_STATUS(AE_OK);
 	}
 
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 3dbdc3a..13324a2 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -231,7 +231,7 @@
  *
  ******************************************************************************/
 
-acpi_status acpi_ut_validate_buffer(struct acpi_buffer * buffer)
+acpi_status acpi_ut_validate_buffer(struct acpi_buffer *buffer)
 {
 
 	/* Obviously, the structure pointer must be valid */
@@ -272,8 +272,7 @@
  ******************************************************************************/
 
 acpi_status
-acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
-			  acpi_size required_length)
+acpi_ut_initialize_buffer(struct acpi_buffer *buffer, acpi_size required_length)
 {
 	acpi_size input_buffer_length;
 
diff --git a/drivers/acpi/acpica/utascii.c b/drivers/acpi/acpica/utascii.c
new file mode 100644
index 0000000..706c1f3
--- /dev/null
+++ b/drivers/acpi/acpica/utascii.c
@@ -0,0 +1,140 @@
+/******************************************************************************
+ *
+ * Module Name: utascii - Utility ascii functions
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2016, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_valid_nameseg
+ *
+ * PARAMETERS:  name            - The name or table signature to be examined.
+ *                                Four characters, does not have to be a
+ *                                NULL terminated string.
+ *
+ * RETURN:      TRUE if signature is has 4 valid ACPI characters
+ *
+ * DESCRIPTION: Validate an ACPI table signature.
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_valid_nameseg(char *name)
+{
+	u32 i;
+
+	/* Validate each character in the signature */
+
+	for (i = 0; i < ACPI_NAME_SIZE; i++) {
+		if (!acpi_ut_valid_name_char(name[i], i)) {
+			return (FALSE);
+		}
+	}
+
+	return (TRUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_valid_name_char
+ *
+ * PARAMETERS:  char            - The character to be examined
+ *              position        - Byte position (0-3)
+ *
+ * RETURN:      TRUE if the character is valid, FALSE otherwise
+ *
+ * DESCRIPTION: Check for a valid ACPI character. Must be one of:
+ *              1) Upper case alpha
+ *              2) numeric
+ *              3) underscore
+ *
+ *              We allow a '!' as the last character because of the ASF! table
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_valid_name_char(char character, u32 position)
+{
+
+	if (!((character >= 'A' && character <= 'Z') ||
+	      (character >= '0' && character <= '9') || (character == '_'))) {
+
+		/* Allow a '!' in the last position */
+
+		if (character == '!' && position == 3) {
+			return (TRUE);
+		}
+
+		return (FALSE);
+	}
+
+	return (TRUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_check_and_repair_ascii
+ *
+ * PARAMETERS:  name                - Ascii string
+ *              count               - Number of characters to check
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Ensure that the requested number of characters are printable
+ *              Ascii characters. Sets non-printable and null chars to <space>.
+ *
+ ******************************************************************************/
+
+void acpi_ut_check_and_repair_ascii(u8 *name, char *repaired_name, u32 count)
+{
+	u32 i;
+
+	for (i = 0; i < count; i++) {
+		repaired_name[i] = (char)name[i];
+
+		if (!name[i]) {
+			return;
+		}
+		if (!isprint(name[i])) {
+			repaired_name[i] = ' ';
+		}
+	}
+}
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index 0cfb2b8..bd31faf 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -106,31 +106,31 @@
 			default:	/* Default is BYTE display */
 
 				acpi_os_printf("%02X ",
-					       buffer[(acpi_size) i + j]);
+					       buffer[(acpi_size)i + j]);
 				break;
 
 			case DB_WORD_DISPLAY:
 
 				ACPI_MOVE_16_TO_32(&temp32,
-						   &buffer[(acpi_size) i + j]);
+						   &buffer[(acpi_size)i + j]);
 				acpi_os_printf("%04X ", temp32);
 				break;
 
 			case DB_DWORD_DISPLAY:
 
 				ACPI_MOVE_32_TO_32(&temp32,
-						   &buffer[(acpi_size) i + j]);
+						   &buffer[(acpi_size)i + j]);
 				acpi_os_printf("%08X ", temp32);
 				break;
 
 			case DB_QWORD_DISPLAY:
 
 				ACPI_MOVE_32_TO_32(&temp32,
-						   &buffer[(acpi_size) i + j]);
+						   &buffer[(acpi_size)i + j]);
 				acpi_os_printf("%08X", temp32);
 
 				ACPI_MOVE_32_TO_32(&temp32,
-						   &buffer[(acpi_size) i + j +
+						   &buffer[(acpi_size)i + j +
 							   4]);
 				acpi_os_printf("%08X ", temp32);
 				break;
@@ -158,7 +158,7 @@
 				acpi_os_printf("// ");
 			}
 
-			buf_char = buffer[(acpi_size) i + j];
+			buf_char = buffer[(acpi_size)i + j];
 			if (isprint(buf_char)) {
 				acpi_os_printf("%c", buf_char);
 			} else {
@@ -274,31 +274,31 @@
 			default:	/* Default is BYTE display */
 
 				acpi_ut_file_printf(file, "%02X ",
-						    buffer[(acpi_size) i + j]);
+						    buffer[(acpi_size)i + j]);
 				break;
 
 			case DB_WORD_DISPLAY:
 
 				ACPI_MOVE_16_TO_32(&temp32,
-						   &buffer[(acpi_size) i + j]);
+						   &buffer[(acpi_size)i + j]);
 				acpi_ut_file_printf(file, "%04X ", temp32);
 				break;
 
 			case DB_DWORD_DISPLAY:
 
 				ACPI_MOVE_32_TO_32(&temp32,
-						   &buffer[(acpi_size) i + j]);
+						   &buffer[(acpi_size)i + j]);
 				acpi_ut_file_printf(file, "%08X ", temp32);
 				break;
 
 			case DB_QWORD_DISPLAY:
 
 				ACPI_MOVE_32_TO_32(&temp32,
-						   &buffer[(acpi_size) i + j]);
+						   &buffer[(acpi_size)i + j]);
 				acpi_ut_file_printf(file, "%08X", temp32);
 
 				ACPI_MOVE_32_TO_32(&temp32,
-						   &buffer[(acpi_size) i + j +
+						   &buffer[(acpi_size)i + j +
 							   4]);
 				acpi_ut_file_printf(file, "%08X ", temp32);
 				break;
@@ -318,7 +318,7 @@
 				return;
 			}
 
-			buf_char = buffer[(acpi_size) i + j];
+			buf_char = buffer[(acpi_size)i + j];
 			if (isprint(buf_char)) {
 				acpi_ut_file_printf(file, "%c", buf_char);
 			} else {
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index f8e9978..3b8d23e 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -105,7 +105,7 @@
  *
  ******************************************************************************/
 
-acpi_status acpi_os_purge_cache(struct acpi_memory_list * cache)
+acpi_status acpi_os_purge_cache(struct acpi_memory_list *cache)
 {
 	void *next;
 	acpi_status status;
@@ -151,7 +151,7 @@
  *
  ******************************************************************************/
 
-acpi_status acpi_os_delete_cache(struct acpi_memory_list * cache)
+acpi_status acpi_os_delete_cache(struct acpi_memory_list *cache)
 {
 	acpi_status status;
 
@@ -184,8 +184,7 @@
  *
  ******************************************************************************/
 
-acpi_status
-acpi_os_release_object(struct acpi_memory_list * cache, void *object)
+acpi_status acpi_os_release_object(struct acpi_memory_list *cache, void *object)
 {
 	acpi_status status;
 
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 98d53e5..82f9714 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -53,7 +53,7 @@
 static acpi_status
 acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
 				union acpi_object *external_object,
-				u8 * data_space, acpi_size * buffer_space_used);
+				u8 *data_space, acpi_size *buffer_space_used);
 
 static acpi_status
 acpi_ut_copy_ielement_to_ielement(u8 object_type,
@@ -63,7 +63,7 @@
 
 static acpi_status
 acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object,
-				  u8 * buffer, acpi_size * space_used);
+				  u8 *buffer, acpi_size *space_used);
 
 static acpi_status
 acpi_ut_copy_esimple_to_isimple(union acpi_object *user_obj,
@@ -111,7 +111,7 @@
 static acpi_status
 acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
 				union acpi_object *external_object,
-				u8 * data_space, acpi_size * buffer_space_used)
+				u8 *data_space, acpi_size *buffer_space_used)
 {
 	acpi_status status = AE_OK;
 
@@ -151,7 +151,7 @@
 
 		memcpy((void *)data_space,
 		       (void *)internal_object->string.pointer,
-		       (acpi_size) internal_object->string.length + 1);
+		       (acpi_size)internal_object->string.length + 1);
 		break;
 
 	case ACPI_TYPE_BUFFER:
@@ -331,7 +331,7 @@
 
 static acpi_status
 acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object,
-				  u8 * buffer, acpi_size * space_used)
+				  u8 *buffer, acpi_size *space_used)
 {
 	union acpi_object *external_object;
 	acpi_status status;
@@ -362,7 +362,7 @@
 	 * Leave room for an array of ACPI_OBJECTS in the buffer
 	 * and move the free space past it
 	 */
-	info.length += (acpi_size) external_object->package.count *
+	info.length += (acpi_size)external_object->package.count *
 	    ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object));
 	info.free_space += external_object->package.count *
 	    ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object));
@@ -738,7 +738,7 @@
 		 */
 		if (source_desc->string.pointer) {
 			dest_desc->string.pointer =
-			    ACPI_ALLOCATE((acpi_size) source_desc->string.
+			    ACPI_ALLOCATE((acpi_size)source_desc->string.
 					  length + 1);
 			if (!dest_desc->string.pointer) {
 				return (AE_NO_MEMORY);
@@ -748,7 +748,7 @@
 
 			memcpy(dest_desc->string.pointer,
 			       source_desc->string.pointer,
-			       (acpi_size) source_desc->string.length + 1);
+			       (acpi_size)source_desc->string.length + 1);
 		}
 		break;
 
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 1cfc5f6..5744222 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -51,13 +51,9 @@
 ACPI_MODULE_NAME("utdebug")
 
 #ifdef ACPI_DEBUG_OUTPUT
-static acpi_thread_id acpi_gbl_prev_thread_id = (acpi_thread_id) 0xFFFFFFFF;
-static char *acpi_gbl_fn_entry_str = "----Entry";
-static char *acpi_gbl_fn_exit_str = "----Exit-";
-
-/* Local prototypes */
-
-static const char *acpi_ut_trim_function_name(const char *function_name);
+static acpi_thread_id acpi_gbl_previous_thread_id = (acpi_thread_id) 0xFFFFFFFF;
+static const char *acpi_gbl_function_entry_prefix = "----Entry";
+static const char *acpi_gbl_function_exit_prefix = "----Exit-";
 
 /*******************************************************************************
  *
@@ -178,14 +174,14 @@
 	 * Thread tracking and context switch notification
 	 */
 	thread_id = acpi_os_get_thread_id();
-	if (thread_id != acpi_gbl_prev_thread_id) {
+	if (thread_id != acpi_gbl_previous_thread_id) {
 		if (ACPI_LV_THREADS & acpi_dbg_level) {
 			acpi_os_printf
 			    ("\n**** Context Switch from TID %u to TID %u ****\n\n",
-			     (u32)acpi_gbl_prev_thread_id, (u32)thread_id);
+			     (u32)acpi_gbl_previous_thread_id, (u32)thread_id);
 		}
 
-		acpi_gbl_prev_thread_id = thread_id;
+		acpi_gbl_previous_thread_id = thread_id;
 		acpi_gbl_nesting_level = 0;
 	}
 
@@ -287,7 +283,8 @@
 	if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
 		acpi_debug_print(ACPI_LV_FUNCTIONS,
 				 line_number, function_name, module_name,
-				 component_id, "%s\n", acpi_gbl_fn_entry_str);
+				 component_id, "%s\n",
+				 acpi_gbl_function_entry_prefix);
 	}
 }
 
@@ -312,7 +309,8 @@
 void
 acpi_ut_trace_ptr(u32 line_number,
 		  const char *function_name,
-		  const char *module_name, u32 component_id, void *pointer)
+		  const char *module_name,
+		  u32 component_id, const void *pointer)
 {
 
 	acpi_gbl_nesting_level++;
@@ -323,8 +321,8 @@
 	if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
 		acpi_debug_print(ACPI_LV_FUNCTIONS,
 				 line_number, function_name, module_name,
-				 component_id, "%s %p\n", acpi_gbl_fn_entry_str,
-				 pointer);
+				 component_id, "%s %p\n",
+				 acpi_gbl_function_entry_prefix, pointer);
 	}
 }
 
@@ -348,7 +346,7 @@
 void
 acpi_ut_trace_str(u32 line_number,
 		  const char *function_name,
-		  const char *module_name, u32 component_id, char *string)
+		  const char *module_name, u32 component_id, const char *string)
 {
 
 	acpi_gbl_nesting_level++;
@@ -359,8 +357,8 @@
 	if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
 		acpi_debug_print(ACPI_LV_FUNCTIONS,
 				 line_number, function_name, module_name,
-				 component_id, "%s %s\n", acpi_gbl_fn_entry_str,
-				 string);
+				 component_id, "%s %s\n",
+				 acpi_gbl_function_entry_prefix, string);
 	}
 }
 
@@ -396,7 +394,7 @@
 		acpi_debug_print(ACPI_LV_FUNCTIONS,
 				 line_number, function_name, module_name,
 				 component_id, "%s %08X\n",
-				 acpi_gbl_fn_entry_str, integer);
+				 acpi_gbl_function_entry_prefix, integer);
 	}
 }
 
@@ -427,7 +425,8 @@
 	if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
 		acpi_debug_print(ACPI_LV_FUNCTIONS,
 				 line_number, function_name, module_name,
-				 component_id, "%s\n", acpi_gbl_fn_exit_str);
+				 component_id, "%s\n",
+				 acpi_gbl_function_exit_prefix);
 	}
 
 	if (acpi_gbl_nesting_level) {
@@ -467,14 +466,14 @@
 			acpi_debug_print(ACPI_LV_FUNCTIONS,
 					 line_number, function_name,
 					 module_name, component_id, "%s %s\n",
-					 acpi_gbl_fn_exit_str,
+					 acpi_gbl_function_exit_prefix,
 					 acpi_format_exception(status));
 		} else {
 			acpi_debug_print(ACPI_LV_FUNCTIONS,
 					 line_number, function_name,
 					 module_name, component_id,
 					 "%s ****Exception****: %s\n",
-					 acpi_gbl_fn_exit_str,
+					 acpi_gbl_function_exit_prefix,
 					 acpi_format_exception(status));
 		}
 	}
@@ -514,7 +513,7 @@
 		acpi_debug_print(ACPI_LV_FUNCTIONS,
 				 line_number, function_name, module_name,
 				 component_id, "%s %8.8X%8.8X\n",
-				 acpi_gbl_fn_exit_str,
+				 acpi_gbl_function_exit_prefix,
 				 ACPI_FORMAT_UINT64(value));
 	}
 
@@ -552,8 +551,8 @@
 	if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
 		acpi_debug_print(ACPI_LV_FUNCTIONS,
 				 line_number, function_name, module_name,
-				 component_id, "%s %p\n", acpi_gbl_fn_exit_str,
-				 ptr);
+				 component_id, "%s %p\n",
+				 acpi_gbl_function_exit_prefix, ptr);
 	}
 
 	if (acpi_gbl_nesting_level) {
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 6ba65b0..efd7988 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -446,7 +446,7 @@
 
 /* Names for Notify() values, used for debug output */
 
-static const char *acpi_gbl_generic_notify[ACPI_NOTIFY_MAX + 1] = {
+static const char *acpi_gbl_generic_notify[ACPI_GENERIC_NOTIFY_MAX + 1] = {
 	/* 00 */ "Bus Check",
 	/* 01 */ "Device Check",
 	/* 02 */ "Device Wake",
@@ -459,49 +459,53 @@
 	/* 09 */ "Device PLD Check",
 	/* 0A */ "Reserved",
 	/* 0B */ "System Locality Update",
-	/* 0C */ "Shutdown Request",
+					/* 0C */ "Shutdown Request",
+					/* Reserved in ACPI 6.0 */
 	/* 0D */ "System Resource Affinity Update"
 };
 
-static const char *acpi_gbl_device_notify[4] = {
+static const char *acpi_gbl_device_notify[5] = {
 	/* 80 */ "Status Change",
 	/* 81 */ "Information Change",
 	/* 82 */ "Device-Specific Change",
-	/* 83 */ "Device-Specific Change"
+	/* 83 */ "Device-Specific Change",
+	/* 84 */ "Reserved"
 };
 
-static const char *acpi_gbl_processor_notify[4] = {
+static const char *acpi_gbl_processor_notify[5] = {
 	/* 80 */ "Performance Capability Change",
 	/* 81 */ "C-State Change",
 	/* 82 */ "Throttling Capability Change",
-	/* 83 */ "Device-Specific Change"
+	/* 83 */ "Guaranteed Change",
+	/* 84 */ "Minimum Excursion"
 };
 
-static const char *acpi_gbl_thermal_notify[4] = {
+static const char *acpi_gbl_thermal_notify[5] = {
 	/* 80 */ "Thermal Status Change",
 	/* 81 */ "Thermal Trip Point Change",
 	/* 82 */ "Thermal Device List Change",
-	/* 83 */ "Thermal Relationship Change"
+	/* 83 */ "Thermal Relationship Change",
+	/* 84 */ "Reserved"
 };
 
 const char *acpi_ut_get_notify_name(u32 notify_value, acpi_object_type type)
 {
 
-	/* 00 - 0D are common to all object types */
+	/* 00 - 0D are "common to all object types" (from ACPI Spec) */
 
-	if (notify_value <= ACPI_NOTIFY_MAX) {
+	if (notify_value <= ACPI_GENERIC_NOTIFY_MAX) {
 		return (acpi_gbl_generic_notify[notify_value]);
 	}
 
-	/* 0D - 7F are reserved */
+	/* 0E - 7F are reserved */
 
 	if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
 		return ("Reserved");
 	}
 
-	/* 80 - 83 are per-object-type */
+	/* 80 - 84 are per-object-type */
 
-	if (notify_value <= 0x83) {
+	if (notify_value <= ACPI_SPECIFIC_NOTIFY_MAX) {
 		switch (type) {
 		case ACPI_TYPE_ANY:
 		case ACPI_TYPE_DEVICE:
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 17b9f3e..7bad13f 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -69,7 +69,7 @@
 
 acpi_status
 acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
-			char *path,
+			const char *path,
 			u32 expected_return_btypes,
 			union acpi_operand_object **return_desc)
 {
@@ -204,7 +204,7 @@
  ******************************************************************************/
 
 acpi_status
-acpi_ut_evaluate_numeric_object(char *object_name,
+acpi_ut_evaluate_numeric_object(const char *object_name,
 				struct acpi_namespace_node *device_node,
 				u64 *value)
 {
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 48fffcf..dd3fd7f 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -80,6 +80,11 @@
 	"_S4D"
 };
 
+/* Hex-to-ascii */
+
+const char acpi_gbl_lower_hex_digits[] = "0123456789abcdef";
+const char acpi_gbl_upper_hex_digits[] = "0123456789ABCDEF";
+
 /*******************************************************************************
  *
  * Namespace globals
@@ -221,6 +226,49 @@
 };
 #endif				/* !ACPI_REDUCED_HARDWARE */
 
+#if defined (ACPI_DISASSEMBLER) || defined (ACPI_ASL_COMPILER)
+
+/* to_pld macro: compile/disassemble strings */
+
+const char *acpi_gbl_pld_panel_list[] = {
+	"TOP",
+	"BOTTOM",
+	"LEFT",
+	"RIGHT",
+	"FRONT",
+	"BACK",
+	"UNKNOWN",
+	NULL
+};
+
+const char *acpi_gbl_pld_vertical_position_list[] = {
+	"UPPER",
+	"CENTER",
+	"LOWER",
+	NULL
+};
+
+const char *acpi_gbl_pld_horizontal_position_list[] = {
+	"LEFT",
+	"CENTER",
+	"RIGHT",
+	NULL
+};
+
+const char *acpi_gbl_pld_shape_list[] = {
+	"ROUND",
+	"OVAL",
+	"SQUARE",
+	"VERTICALRECTANGLE",
+	"HORIZONTALRECTANGLE",
+	"VERTICALTRAPEZOID",
+	"HORIZONTALTRAPEZOID",
+	"UNKNOWN",
+	"CHAMFERED",
+	NULL
+};
+#endif
+
 /* Public globals */
 
 ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 6fb4ec3..f7cd2d5 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -95,7 +95,7 @@
 
 	hid =
 	    ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) +
-				 (acpi_size) length);
+				 (acpi_size)length);
 	if (!hid) {
 		status = AE_NO_MEMORY;
 		goto cleanup;
@@ -173,7 +173,7 @@
 
 	uid =
 	    ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) +
-				 (acpi_size) length);
+				 (acpi_size)length);
 	if (!uid) {
 		status = AE_NO_MEMORY;
 		goto cleanup;
@@ -309,7 +309,7 @@
 	/* Area for CID strings starts after the CID PNP_DEVICE_ID array */
 
 	next_id_string = ACPI_CAST_PTR(char, cid_list->ids) +
-	    ((acpi_size) count * sizeof(struct acpi_pnp_device_id));
+	    ((acpi_size)count * sizeof(struct acpi_pnp_device_id));
 
 	/* Copy/convert the CIDs to the return buffer */
 
@@ -413,7 +413,7 @@
 
 	cls =
 	    ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) +
-				 (acpi_size) length);
+				 (acpi_size)length);
 	if (!cls) {
 		status = AE_NO_MEMORY;
 		goto cleanup;
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index 6673720..2d6530e 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -236,8 +236,8 @@
 			}
 
 			remainder.full = remainder.full - dividend.full;
-			remainder.part.hi = (u32) - ((s32) remainder.part.hi);
-			remainder.part.lo = (u32) - ((s32) remainder.part.lo);
+			remainder.part.hi = (u32)-((s32)remainder.part.hi);
+			remainder.part.lo = (u32)-((s32)remainder.part.lo);
 
 			if (remainder.part.lo) {
 				remainder.part.hi--;
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index d938c27..389de3b 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -361,7 +361,7 @@
 void
 acpi_ut_display_init_pathname(u8 type,
 			      struct acpi_namespace_node *obj_handle,
-			      char *path)
+			      const char *path)
 {
 	acpi_status status;
 	struct acpi_buffer buffer;
diff --git a/drivers/acpi/acpica/utnonansi.c b/drivers/acpi/acpica/utnonansi.c
index d5c3adf..3465fe2 100644
--- a/drivers/acpi/acpica/utnonansi.c
+++ b/drivers/acpi/acpica/utnonansi.c
@@ -205,37 +205,41 @@
  *
  * FUNCTION:    acpi_ut_strtoul64
  *
- * PARAMETERS:  string          - Null terminated string
- *              base            - Radix of the string: 16 or ACPI_ANY_BASE;
- *                                ACPI_ANY_BASE means 'in behalf of to_integer'
- *              ret_integer     - Where the converted integer is returned
+ * PARAMETERS:  string                  - Null terminated string
+ *              base                    - Radix of the string: 16 or 10 or
+ *                                        ACPI_ANY_BASE
+ *              max_integer_byte_width  - Maximum allowable integer,in bytes:
+ *                                        4 or 8 (32 or 64 bits)
+ *              ret_integer             - Where the converted integer is
+ *                                        returned
  *
  * RETURN:      Status and Converted value
  *
  * DESCRIPTION: Convert a string into an unsigned value. Performs either a
- *              32-bit or 64-bit conversion, depending on the current mode
- *              of the interpreter.
+ *              32-bit or 64-bit conversion, depending on the input integer
+ *              size (often the current mode of the interpreter).
  *
- * NOTES:       acpi_gbl_integer_byte_width should be set to the proper width.
+ * NOTES:       Negative numbers are not supported, as they are not supported
+ *              by ACPI.
+ *
+ *              acpi_gbl_integer_byte_width should be set to the proper width.
  *              For the core ACPICA code, this width depends on the DSDT
- *              version. For iASL, the default byte width is always 8.
+ *              version. For iASL, the default byte width is always 8 for the
+ *              parser, but error checking is performed later to flag cases
+ *              where a 64-bit constant is defined in a 32-bit DSDT/SSDT.
  *
  *              Does not support Octal strings, not needed at this time.
  *
- *              There is an earlier version of the function after this one,
- *              below. It is slightly different than this one, and the two
- *              may eventually may need to be merged. (01/2016).
- *
  ******************************************************************************/
 
-acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
+acpi_status
+acpi_ut_strtoul64(char *string,
+		  u32 base, u32 max_integer_byte_width, u64 *ret_integer)
 {
 	u32 this_digit = 0;
 	u64 return_value = 0;
 	u64 quotient;
 	u64 dividend;
-	u32 to_integer_op = (base == ACPI_ANY_BASE);
-	u32 mode32 = (acpi_gbl_integer_byte_width == 4);
 	u8 valid_digits = 0;
 	u8 sign_of0x = 0;
 	u8 term = 0;
@@ -244,6 +248,7 @@
 
 	switch (base) {
 	case ACPI_ANY_BASE:
+	case 10:
 	case 16:
 
 		break;
@@ -265,9 +270,9 @@
 		string++;
 	}
 
-	if (to_integer_op) {
+	if (base == ACPI_ANY_BASE) {
 		/*
-		 * Base equal to ACPI_ANY_BASE means 'ToInteger operation case'.
+		 * Base equal to ACPI_ANY_BASE means 'Either decimal or hex'.
 		 * We need to determine if it is decimal or hexadecimal.
 		 */
 		if ((*string == '0') && (tolower((int)*(string + 1)) == 'x')) {
@@ -284,7 +289,7 @@
 	/* Any string left? Check that '0x' is not followed by white space. */
 
 	if (!(*string) || isspace((int)*string) || *string == '\t') {
-		if (to_integer_op) {
+		if (base == ACPI_ANY_BASE) {
 			goto error_exit;
 		} else {
 			goto all_done;
@@ -292,10 +297,11 @@
 	}
 
 	/*
-	 * Perform a 32-bit or 64-bit conversion, depending upon the current
-	 * execution mode of the interpreter
+	 * Perform a 32-bit or 64-bit conversion, depending upon the input
+	 * byte width
 	 */
-	dividend = (mode32) ? ACPI_UINT32_MAX : ACPI_UINT64_MAX;
+	dividend = (max_integer_byte_width <= ACPI_MAX32_BYTE_WIDTH) ?
+	    ACPI_UINT32_MAX : ACPI_UINT64_MAX;
 
 	/* Main loop: convert the string to a 32- or 64-bit integer */
 
@@ -323,7 +329,7 @@
 		}
 
 		if (term) {
-			if (to_integer_op) {
+			if (base == ACPI_ANY_BASE) {
 				goto error_exit;
 			} else {
 				break;
@@ -338,12 +344,13 @@
 
 		valid_digits++;
 
-		if (sign_of0x
-		    && ((valid_digits > 16)
-			|| ((valid_digits > 8) && mode32))) {
+		if (sign_of0x && ((valid_digits > 16) ||
+				  ((valid_digits > 8)
+				   && (max_integer_byte_width <=
+				       ACPI_MAX32_BYTE_WIDTH)))) {
 			/*
 			 * This is to_integer operation case.
-			 * No any restrictions for string-to-integer conversion,
+			 * No restrictions for string-to-integer conversion,
 			 * see ACPI spec.
 			 */
 			goto error_exit;
@@ -355,7 +362,7 @@
 					   &quotient, NULL);
 
 		if (return_value > quotient) {
-			if (to_integer_op) {
+			if (base == ACPI_ANY_BASE) {
 				goto error_exit;
 			} else {
 				break;
@@ -378,7 +385,8 @@
 	return_ACPI_STATUS(AE_OK);
 
 error_exit:
-	/* Base was set/validated above */
+
+	/* Base was set/validated above (10 or 16) */
 
 	if (base == 10) {
 		return_ACPI_STATUS(AE_BAD_DECIMAL_CONSTANT);
@@ -388,8 +396,7 @@
 }
 
 #ifdef _OBSOLETE_FUNCTIONS
-/* TBD: use version in ACPICA main code base? */
-/* DONE: 01/2016 */
+/* Removed: 01/2016 */
 
 /*******************************************************************************
  *
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index edad3f0..72b9a06 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -51,11 +51,11 @@
 /* Local prototypes */
 static acpi_status
 acpi_ut_get_simple_object_size(union acpi_operand_object *obj,
-			       acpi_size * obj_length);
+			       acpi_size *obj_length);
 
 static acpi_status
 acpi_ut_get_package_object_size(union acpi_operand_object *obj,
-				acpi_size * obj_length);
+				acpi_size *obj_length);
 
 static acpi_status
 acpi_ut_get_element_length(u8 object_type,
@@ -177,7 +177,7 @@
 	 * Create the element array. Count+1 allows the array to be null
 	 * terminated.
 	 */
-	package_elements = ACPI_ALLOCATE_ZEROED(((acpi_size) count +
+	package_elements = ACPI_ALLOCATE_ZEROED(((acpi_size)count +
 						 1) * sizeof(void *));
 	if (!package_elements) {
 		ACPI_FREE(package_desc);
@@ -454,7 +454,7 @@
 
 static acpi_status
 acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
-			       acpi_size * obj_length)
+			       acpi_size *obj_length)
 {
 	acpi_size length;
 	acpi_size size;
@@ -495,12 +495,12 @@
 	switch (internal_object->common.type) {
 	case ACPI_TYPE_STRING:
 
-		length += (acpi_size) internal_object->string.length + 1;
+		length += (acpi_size)internal_object->string.length + 1;
 		break;
 
 	case ACPI_TYPE_BUFFER:
 
-		length += (acpi_size) internal_object->buffer.length;
+		length += (acpi_size)internal_object->buffer.length;
 		break;
 
 	case ACPI_TYPE_INTEGER:
@@ -640,7 +640,7 @@
 
 static acpi_status
 acpi_ut_get_package_object_size(union acpi_operand_object *internal_object,
-				acpi_size * obj_length)
+				acpi_size *obj_length)
 {
 	acpi_status status;
 	struct acpi_pkg_info info;
@@ -665,7 +665,7 @@
 	 */
 	info.length +=
 	    ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object)) *
-	    (acpi_size) info.num_packages;
+	    (acpi_size)info.num_packages;
 
 	/* Return the total package length */
 
@@ -689,7 +689,7 @@
 
 acpi_status
 acpi_ut_get_object_size(union acpi_operand_object *internal_object,
-			acpi_size * obj_length)
+			acpi_size *obj_length)
 {
 	acpi_status status;
 
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index b5cfe57..3f5fed6 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -150,7 +150,7 @@
 	     i < (ACPI_ARRAY_LENGTH(acpi_default_supported_interfaces) - 1);
 	     i++) {
 		acpi_default_supported_interfaces[i].next =
-		    &acpi_default_supported_interfaces[(acpi_size) i + 1];
+		    &acpi_default_supported_interfaces[(acpi_size)i + 1];
 	}
 
 	acpi_os_release_mutex(acpi_gbl_osi_mutex);
@@ -397,7 +397,7 @@
  *
  ******************************************************************************/
 
-acpi_status acpi_ut_osi_implementation(struct acpi_walk_state * walk_state)
+acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state)
 {
 	union acpi_operand_object *string_desc;
 	union acpi_operand_object *return_desc;
diff --git a/drivers/acpi/acpica/utownerid.c b/drivers/acpi/acpica/utownerid.c
index 813520a..3cd573c 100644
--- a/drivers/acpi/acpica/utownerid.c
+++ b/drivers/acpi/acpica/utownerid.c
@@ -61,7 +61,7 @@
  *              when the method exits or the table is unloaded.
  *
  ******************************************************************************/
-acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
+acpi_status acpi_ut_allocate_owner_id(acpi_owner_id *owner_id)
 {
 	u32 i;
 	u32 j;
@@ -122,7 +122,7 @@
 				 * permanently allocated (prevents +1 overflow)
 				 */
 				*owner_id =
-				    (acpi_owner_id) ((k + 1) + ACPI_MUL_32(j));
+				    (acpi_owner_id)((k + 1) + ACPI_MUL_32(j));
 
 				ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
 						  "Allocated OwnerId: %2.2X\n",
@@ -167,7 +167,7 @@
  *
  ******************************************************************************/
 
-void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
+void acpi_ut_release_owner_id(acpi_owner_id *owner_id_ptr)
 {
 	acpi_owner_id owner_id = *owner_id_ptr;
 	acpi_status status;
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index 8c218ad..dd084cf 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -67,11 +67,6 @@
 
 static char *acpi_ut_put_number(char *string, u64 number, u8 base, u8 upper);
 
-/* Module globals */
-
-static const char acpi_gbl_lower_hex_digits[] = "0123456789abcdef";
-static const char acpi_gbl_upper_hex_digits[] = "0123456789ABCDEF";
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ut_bound_string_length
@@ -269,9 +264,9 @@
 
 	sign = '\0';
 	if (type & ACPI_FORMAT_SIGN) {
-		if ((s64) number < 0) {
+		if ((s64)number < 0) {
 			sign = '-';
-			number = -(s64) number;
+			number = -(s64)number;
 			width--;
 		} else if (type & ACPI_FORMAT_SIGN_PLUS) {
 			sign = '+';
@@ -409,7 +404,7 @@
 		width = -1;
 		if (isdigit((int)*format)) {
 			format = acpi_ut_scan_number(format, &number);
-			width = (s32) number;
+			width = (s32)number;
 		} else if (*format == '*') {
 			++format;
 			width = va_arg(args, int);
@@ -426,7 +421,7 @@
 			++format;
 			if (isdigit((int)*format)) {
 				format = acpi_ut_scan_number(format, &number);
-				precision = (s32) number;
+				precision = (s32)number;
 			} else if (*format == '*') {
 				++format;
 				precision = va_arg(args, int);
@@ -555,17 +550,17 @@
 		if (qualifier == 'L') {
 			number = va_arg(args, u64);
 			if (type & ACPI_FORMAT_SIGN) {
-				number = (s64) number;
+				number = (s64)number;
 			}
 		} else if (qualifier == 'l') {
 			number = va_arg(args, unsigned long);
 			if (type & ACPI_FORMAT_SIGN) {
-				number = (s32) number;
+				number = (s32)number;
 			}
 		} else if (qualifier == 'h') {
 			number = (u16)va_arg(args, int);
 			if (type & ACPI_FORMAT_SIGN) {
-				number = (s16) number;
+				number = (s16)number;
 			}
 		} else {
 			number = va_arg(args, unsigned int);
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
index 0b00572..288913a 100644
--- a/drivers/acpi/acpica/utstring.c
+++ b/drivers/acpi/acpica/utstring.c
@@ -130,7 +130,7 @@
 			} else {
 				/* All others will be Hex escapes */
 
-				acpi_os_printf("\\x%2.2X", (s32) string[i]);
+				acpi_os_printf("\\x%2.2X", (s32)string[i]);
 			}
 			break;
 		}
@@ -145,73 +145,6 @@
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_ut_valid_acpi_char
- *
- * PARAMETERS:  char            - The character to be examined
- *              position        - Byte position (0-3)
- *
- * RETURN:      TRUE if the character is valid, FALSE otherwise
- *
- * DESCRIPTION: Check for a valid ACPI character. Must be one of:
- *              1) Upper case alpha
- *              2) numeric
- *              3) underscore
- *
- *              We allow a '!' as the last character because of the ASF! table
- *
- ******************************************************************************/
-
-u8 acpi_ut_valid_acpi_char(char character, u32 position)
-{
-
-	if (!((character >= 'A' && character <= 'Z') ||
-	      (character >= '0' && character <= '9') || (character == '_'))) {
-
-		/* Allow a '!' in the last position */
-
-		if (character == '!' && position == 3) {
-			return (TRUE);
-		}
-
-		return (FALSE);
-	}
-
-	return (TRUE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_valid_acpi_name
- *
- * PARAMETERS:  name            - The name to be examined. Does not have to
- *                                be NULL terminated string.
- *
- * RETURN:      TRUE if the name is valid, FALSE otherwise
- *
- * DESCRIPTION: Check for a valid ACPI name. Each character must be one of:
- *              1) Upper case alpha
- *              2) numeric
- *              3) underscore
- *
- ******************************************************************************/
-
-u8 acpi_ut_valid_acpi_name(char *name)
-{
-	u32 i;
-
-	ACPI_FUNCTION_ENTRY();
-
-	for (i = 0; i < ACPI_NAME_SIZE; i++) {
-		if (!acpi_ut_valid_acpi_char(name[i], i)) {
-			return (FALSE);
-		}
-	}
-
-	return (TRUE);
-}
-
-/*******************************************************************************
- *
  * FUNCTION:    acpi_ut_repair_name
  *
  * PARAMETERS:  name            - The ACPI name to be repaired
@@ -253,7 +186,7 @@
 	/* Check each character in the name */
 
 	for (i = 0; i < ACPI_NAME_SIZE; i++) {
-		if (acpi_ut_valid_acpi_char(name[i], i)) {
+		if (acpi_ut_valid_name_char(name[i], i)) {
 			continue;
 		}
 
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 60c406a..0df07df 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -90,7 +90,7 @@
  ******************************************************************************/
 
 acpi_status
-acpi_ut_create_list(char *list_name,
+acpi_ut_create_list(const char *list_name,
 		    u16 object_size, struct acpi_memory_list **return_cache)
 {
 	struct acpi_memory_list *cache;
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 68d4673..d9e6aac 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -127,7 +127,7 @@
  *              and the value of out_buffer is undefined.
  *
  ******************************************************************************/
-acpi_status acpi_get_system_info(struct acpi_buffer * out_buffer)
+acpi_status acpi_get_system_info(struct acpi_buffer *out_buffer)
 {
 	struct acpi_system_info *info_ptr;
 	acpi_status status;
@@ -483,7 +483,7 @@
  ******************************************************************************/
 acpi_status
 acpi_decode_pld_buffer(u8 *in_buffer,
-		       acpi_size length, struct acpi_pld_info ** return_buffer)
+		       acpi_size length, struct acpi_pld_info **return_buffer)
 {
 	struct acpi_pld_info *pld_info;
 	u32 *buffer = ACPI_CAST_PTR(u32, in_buffer);
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 96809cd..bdc67ba 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -3,7 +3,7 @@
  *
  *  Check to see if the given machine has a known bad ACPI BIOS
  *  or if the BIOS is too old.
- *  Check given machine against acpi_osi_dmi_table[].
+ *  Check given machine against acpi_rev_dmi_table[].
  *
  *  Copyright (C) 2004 Len Brown <len.brown@intel.com>
  *  Copyright (C) 2002 Andy Grover <andrew.grover@intel.com>
@@ -47,7 +47,7 @@
 	u32 is_critical_error;
 };
 
-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
+static struct dmi_system_id acpi_rev_dmi_table[] __initdata;
 
 /*
  * POLICY: If *anything* doesn't work, put it on the blacklist.
@@ -128,36 +128,12 @@
 		}
 	}
 
-	dmi_check_system(acpi_osi_dmi_table);
+	(void)early_acpi_osi_init();
+	dmi_check_system(acpi_rev_dmi_table);
 
 	return blacklisted;
 }
 #ifdef CONFIG_DMI
-static int __init dmi_enable_osi_linux(const struct dmi_system_id *d)
-{
-	acpi_dmi_osi_linux(1, d);	/* enable */
-	return 0;
-}
-static int __init dmi_disable_osi_vista(const struct dmi_system_id *d)
-{
-	printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
-	acpi_osi_setup("!Windows 2006");
-	acpi_osi_setup("!Windows 2006 SP1");
-	acpi_osi_setup("!Windows 2006 SP2");
-	return 0;
-}
-static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
-{
-	printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
-	acpi_osi_setup("!Windows 2009");
-	return 0;
-}
-static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
-{
-	printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
-	acpi_osi_setup("!Windows 2012");
-	return 0;
-}
 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
 static int __init dmi_enable_rev_override(const struct dmi_system_id *d)
 {
@@ -168,169 +144,7 @@
 }
 #endif
 
-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
-	{
-	.callback = dmi_disable_osi_vista,
-	.ident = "Fujitsu Siemens",
-	.matches = {
-		     DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
-		     DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile V5505"),
-		},
-	},
-	{
-	/*
-	 * There have a NVIF method in MSI GX723 DSDT need call by Nvidia
-	 * driver (e.g. nouveau) when user press brightness hotkey.
-	 * Currently, nouveau driver didn't do the job and it causes there
-	 * have a infinite while loop in DSDT when user press hotkey.
-	 * We add MSI GX723's dmi information to this table for workaround
-	 * this issue.
-	 * Will remove MSI GX723 from the table after nouveau grows support.
-	 */
-	.callback = dmi_disable_osi_vista,
-	.ident = "MSI GX723",
-	.matches = {
-		     DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
-		     DMI_MATCH(DMI_PRODUCT_NAME, "GX723"),
-		},
-	},
-	{
-	.callback = dmi_disable_osi_vista,
-	.ident = "Sony VGN-NS10J_S",
-	.matches = {
-		     DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
-		     DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NS10J_S"),
-		},
-	},
-	{
-	.callback = dmi_disable_osi_vista,
-	.ident = "Sony VGN-SR290J",
-	.matches = {
-		     DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
-		     DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR290J"),
-		},
-	},
-	{
-	.callback = dmi_disable_osi_vista,
-	.ident = "VGN-NS50B_L",
-	.matches = {
-		     DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
-		     DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NS50B_L"),
-		},
-	},
-	{
-	.callback = dmi_disable_osi_vista,
-	.ident = "VGN-SR19XN",
-	.matches = {
-		     DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
-		     DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR19XN"),
-		},
-	},
-	{
-	.callback = dmi_disable_osi_vista,
-	.ident = "Toshiba Satellite L355",
-	.matches = {
-		     DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
-		     DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"),
-		},
-	},
-	{
-	.callback = dmi_disable_osi_win7,
-	.ident = "ASUS K50IJ",
-	.matches = {
-		     DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
-		     DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"),
-		},
-	},
-	{
-	.callback = dmi_disable_osi_vista,
-	.ident = "Toshiba P305D",
-	.matches = {
-		     DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
-		     DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"),
-		},
-	},
-	{
-	.callback = dmi_disable_osi_vista,
-	.ident = "Toshiba NB100",
-	.matches = {
-		     DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
-		     DMI_MATCH(DMI_PRODUCT_NAME, "NB100"),
-		},
-	},
-
-	/*
-	 * The wireless hotkey does not work on those machines when
-	 * returning true for _OSI("Windows 2012")
-	 */
-	{
-	.callback = dmi_disable_osi_win8,
-	.ident = "Dell Inspiron 7737",
-	.matches = {
-		    DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-		    DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"),
-		},
-	},
-	{
-	.callback = dmi_disable_osi_win8,
-	.ident = "Dell Inspiron 7537",
-	.matches = {
-		    DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-		    DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7537"),
-		},
-	},
-	{
-	.callback = dmi_disable_osi_win8,
-	.ident = "Dell Inspiron 5437",
-	.matches = {
-		    DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-		    DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5437"),
-		},
-	},
-	{
-	.callback = dmi_disable_osi_win8,
-	.ident = "Dell Inspiron 3437",
-	.matches = {
-		    DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-		    DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 3437"),
-		},
-	},
-	{
-	.callback = dmi_disable_osi_win8,
-	.ident = "Dell Vostro 3446",
-	.matches = {
-		    DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-		    DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3446"),
-		},
-	},
-	{
-	.callback = dmi_disable_osi_win8,
-	.ident = "Dell Vostro 3546",
-	.matches = {
-		    DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-		    DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3546"),
-		},
-	},
-
-	/*
-	 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
-	 * Linux ignores it, except for the machines enumerated below.
-	 */
-
-	/*
-	 * Without this this EEEpc exports a non working WMI interface, with
-	 * this it exports a working "good old" eeepc_laptop interface, fixing
-	 * both brightness control, and rfkill not working.
-	 */
-	{
-	.callback = dmi_enable_osi_linux,
-	.ident = "Asus EEE PC 1015PX",
-	.matches = {
-		     DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
-		     DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"),
-		},
-	},
-
+static struct dmi_system_id acpi_rev_dmi_table[] __initdata = {
 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
 	/*
 	 * DELL XPS 13 (2015) switches sound between HDA and I2S
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 0e85678..31e8da6 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -925,11 +925,13 @@
 		goto error0;
 	}
 
-	status = acpi_load_tables();
-	if (ACPI_FAILURE(status)) {
-		printk(KERN_ERR PREFIX
-		       "Unable to load the System Description Tables\n");
-		goto error0;
+	if (acpi_gbl_group_module_level_code) {
+		status = acpi_load_tables();
+		if (ACPI_FAILURE(status)) {
+			printk(KERN_ERR PREFIX
+			       "Unable to load the System Description Tables\n");
+			goto error0;
+		}
 	}
 
 #ifdef CONFIG_X86
@@ -995,17 +997,10 @@
 
 	acpi_os_initialize1();
 
-	status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE);
-	if (ACPI_FAILURE(status)) {
-		printk(KERN_ERR PREFIX
-		       "Unable to start the ACPI Interpreter\n");
-		goto error1;
-	}
-
 	/*
 	 * ACPI 2.0 requires the EC driver to be loaded and work before
-	 * the EC device is found in the namespace (i.e. before acpi_initialize_objects()
-	 * is called).
+	 * the EC device is found in the namespace (i.e. before
+	 * acpi_load_tables() is called).
 	 *
 	 * This is accomplished by looking for the ECDT table, and getting
 	 * the EC parameters out of that.
@@ -1013,12 +1008,31 @@
 	status = acpi_ec_ecdt_probe();
 	/* Ignore result. Not having an ECDT is not fatal. */
 
+	if (!acpi_gbl_group_module_level_code) {
+		status = acpi_load_tables();
+		if (ACPI_FAILURE(status)) {
+			printk(KERN_ERR PREFIX
+			       "Unable to load the System Description Tables\n");
+			goto error1;
+		}
+	}
+
+	status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE);
+	if (ACPI_FAILURE(status)) {
+		printk(KERN_ERR PREFIX
+		       "Unable to start the ACPI Interpreter\n");
+		goto error1;
+	}
+
 	status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION);
 	if (ACPI_FAILURE(status)) {
 		printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n");
 		goto error1;
 	}
 
+	/* Set capability bits for _OSC under processor scope */
+	acpi_early_processor_osc();
+
 	/*
 	 * _OSC method may exist in module level code,
 	 * so it must be run after ACPI_FULL_INITIALIZATION
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index b9afb47d..7b2c48f 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -35,7 +35,7 @@
 	if (result)
 		return result;
 
-	result = sprintf(buf, "%s\n", (char*)path.pointer);
+	result = sprintf(buf, "%s\n", (char *)path.pointer);
 	kfree(path.pointer);
 	return result;
 }
@@ -333,7 +333,8 @@
 EXPORT_SYMBOL_GPL(acpi_device_modalias);
 
 static ssize_t
-acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) {
+acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
 	return __acpi_device_modalias(to_acpi_device(dev), buf, 1024);
 }
 static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
@@ -397,7 +398,8 @@
 static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
 
 static ssize_t
-acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) {
+acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
 	struct acpi_device *acpi_dev = to_acpi_device(dev);
 
 	return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev));
@@ -467,12 +469,27 @@
 
 	status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun);
 	if (ACPI_FAILURE(status))
-		return -ENODEV;
+		return -EIO;
 
 	return sprintf(buf, "%llu\n", sun);
 }
 static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
 
+static ssize_t
+acpi_device_hrv_show(struct device *dev, struct device_attribute *attr,
+		     char *buf) {
+	struct acpi_device *acpi_dev = to_acpi_device(dev);
+	acpi_status status;
+	unsigned long long hrv;
+
+	status = acpi_evaluate_integer(acpi_dev->handle, "_HRV", NULL, &hrv);
+	if (ACPI_FAILURE(status))
+		return -EIO;
+
+	return sprintf(buf, "%llu\n", hrv);
+}
+static DEVICE_ATTR(hrv, 0444, acpi_device_hrv_show, NULL);
+
 static ssize_t status_show(struct device *dev, struct device_attribute *attr,
 				char *buf) {
 	struct acpi_device *acpi_dev = to_acpi_device(dev);
@@ -481,7 +498,7 @@
 
 	status = acpi_evaluate_integer(acpi_dev->handle, "_STA", NULL, &sta);
 	if (ACPI_FAILURE(status))
-		return -ENODEV;
+		return -EIO;
 
 	return sprintf(buf, "%llu\n", sta);
 }
@@ -541,16 +558,22 @@
 			goto end;
 	}
 
+	if (acpi_has_method(dev->handle, "_HRV")) {
+		result = device_create_file(&dev->dev, &dev_attr_hrv);
+		if (result)
+			goto end;
+	}
+
 	if (acpi_has_method(dev->handle, "_STA")) {
 		result = device_create_file(&dev->dev, &dev_attr_status);
 		if (result)
 			goto end;
 	}
 
-        /*
-         * If device has _EJ0, 'eject' file is created that is used to trigger
-         * hot-removal function from userland.
-         */
+	/*
+	 * If device has _EJ0, 'eject' file is created that is used to trigger
+	 * hot-removal function from userland.
+	 */
 	if (acpi_has_method(dev->handle, "_EJ0")) {
 		result = device_create_file(&dev->dev, &dev_attr_eject);
 		if (result)
@@ -604,6 +627,9 @@
 	if (acpi_has_method(dev->handle, "_SUN"))
 		device_remove_file(&dev->dev, &dev_attr_sun);
 
+	if (acpi_has_method(dev->handle, "_HRV"))
+		device_remove_file(&dev->dev, &dev_attr_hrv);
+
 	if (dev->pnp.unique_id)
 		device_remove_file(&dev->dev, &dev_attr_uid);
 	if (dev->pnp.type.bus_address)
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index b420fb4..0e70181 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -105,8 +105,8 @@
 enum {
 	EC_FLAGS_QUERY_PENDING,		/* Query is pending */
 	EC_FLAGS_QUERY_GUARDING,	/* Guard for SCI_EVT check */
-	EC_FLAGS_HANDLERS_INSTALLED,	/* Handlers for GPE and
-					 * OpReg are installed */
+	EC_FLAGS_GPE_HANDLER_INSTALLED,	/* GPE handler installed */
+	EC_FLAGS_EC_HANDLER_INSTALLED,	/* OpReg handler installed */
 	EC_FLAGS_STARTED,		/* Driver is started */
 	EC_FLAGS_STOPPED,		/* Driver is stopped */
 	EC_FLAGS_COMMAND_STORM,		/* GPE storms occurred to the
@@ -175,10 +175,9 @@
 struct acpi_ec *boot_ec, *first_ec;
 EXPORT_SYMBOL(first_ec);
 
-static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
-static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
 static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
 static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
+static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
 
 /* --------------------------------------------------------------------------
  *                           Logging/Debugging
@@ -367,7 +366,8 @@
 static void acpi_ec_submit_request(struct acpi_ec *ec)
 {
 	ec->reference_count++;
-	if (ec->reference_count == 1)
+	if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags) &&
+	    ec->reference_count == 1)
 		acpi_ec_enable_gpe(ec, true);
 }
 
@@ -376,7 +376,8 @@
 	bool flushed = false;
 
 	ec->reference_count--;
-	if (ec->reference_count == 0)
+	if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags) &&
+	    ec->reference_count == 0)
 		acpi_ec_disable_gpe(ec, true);
 	flushed = acpi_ec_flushed(ec);
 	if (flushed)
@@ -1287,52 +1288,64 @@
 {
 	acpi_status status;
 
-	if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
-		return 0;
-	status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
-				  ACPI_GPE_EDGE_TRIGGERED,
-				  &acpi_ec_gpe_handler, ec);
-	if (ACPI_FAILURE(status))
-		return -ENODEV;
-
 	acpi_ec_start(ec, false);
-	status = acpi_install_address_space_handler(ec->handle,
-						    ACPI_ADR_SPACE_EC,
-						    &acpi_ec_space_handler,
-						    NULL, ec);
-	if (ACPI_FAILURE(status)) {
-		if (status == AE_NOT_FOUND) {
-			/*
-			 * Maybe OS fails in evaluating the _REG object.
-			 * The AE_NOT_FOUND error will be ignored and OS
-			 * continue to initialize EC.
-			 */
-			pr_err("Fail in evaluating the _REG object"
-				" of EC device. Broken bios is suspected.\n");
-		} else {
-			acpi_ec_stop(ec, false);
-			acpi_remove_gpe_handler(NULL, ec->gpe,
-				&acpi_ec_gpe_handler);
-			return -ENODEV;
+
+	if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
+		status = acpi_install_address_space_handler(ec->handle,
+							    ACPI_ADR_SPACE_EC,
+							    &acpi_ec_space_handler,
+							    NULL, ec);
+		if (ACPI_FAILURE(status)) {
+			if (status == AE_NOT_FOUND) {
+				/*
+				 * Maybe OS fails in evaluating the _REG
+				 * object. The AE_NOT_FOUND error will be
+				 * ignored and OS * continue to initialize
+				 * EC.
+				 */
+				pr_err("Fail in evaluating the _REG object"
+					" of EC device. Broken bios is suspected.\n");
+			} else {
+				acpi_ec_stop(ec, false);
+				return -ENODEV;
+			}
+		}
+		set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
+	}
+
+	if (!test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
+		status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
+					  ACPI_GPE_EDGE_TRIGGERED,
+					  &acpi_ec_gpe_handler, ec);
+		/* This is not fatal as we can poll EC events */
+		if (ACPI_SUCCESS(status)) {
+			set_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
+			if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
+			    ec->reference_count >= 1)
+				acpi_ec_enable_gpe(ec, true);
 		}
 	}
 
-	set_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
 	return 0;
 }
 
 static void ec_remove_handlers(struct acpi_ec *ec)
 {
-	if (!test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
-		return;
 	acpi_ec_stop(ec, false);
-	if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
-				ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
-		pr_err("failed to remove space handler\n");
-	if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
-				&acpi_ec_gpe_handler)))
-		pr_err("failed to remove gpe handler\n");
-	clear_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
+
+	if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
+		if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
+					ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
+			pr_err("failed to remove space handler\n");
+		clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
+	}
+
+	if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
+		if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
+					&acpi_ec_gpe_handler)))
+			pr_err("failed to remove gpe handler\n");
+		clear_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
+	}
 }
 
 static int acpi_ec_add(struct acpi_device *device)
@@ -1344,11 +1357,12 @@
 	strcpy(acpi_device_class(device), ACPI_EC_CLASS);
 
 	/* Check for boot EC */
-	if (boot_ec &&
-	    (boot_ec->handle == device->handle ||
-	     boot_ec->handle == ACPI_ROOT_OBJECT)) {
+	if (boot_ec) {
 		ec = boot_ec;
 		boot_ec = NULL;
+		ec_remove_handlers(ec);
+		if (first_ec == ec)
+			first_ec = NULL;
 	} else {
 		ec = make_acpi_ec();
 		if (!ec)
@@ -1434,7 +1448,7 @@
 
 int __init acpi_boot_ec_enable(void)
 {
-	if (!boot_ec || test_bit(EC_FLAGS_HANDLERS_INSTALLED, &boot_ec->flags))
+	if (!boot_ec)
 		return 0;
 	if (!ec_install_handlers(boot_ec)) {
 		first_ec = boot_ec;
@@ -1448,20 +1462,6 @@
 	{"", 0},
 };
 
-/* Some BIOS do not survive early DSDT scan, skip it */
-static int ec_skip_dsdt_scan(const struct dmi_system_id *id)
-{
-	EC_FLAGS_SKIP_DSDT_SCAN = 1;
-	return 0;
-}
-
-/* ASUStek often supplies us with broken ECDT, validate it */
-static int ec_validate_ecdt(const struct dmi_system_id *id)
-{
-	EC_FLAGS_VALIDATE_ECDT = 1;
-	return 0;
-}
-
 #if 0
 /*
  * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not
@@ -1503,30 +1503,29 @@
 	return 0;
 }
 
+static int ec_correct_ecdt(const struct dmi_system_id *id)
+{
+	pr_debug("Detected system needing ECDT address correction.\n");
+	EC_FLAGS_CORRECT_ECDT = 1;
+	return 0;
+}
+
 static struct dmi_system_id ec_dmi_table[] __initdata = {
 	{
-	ec_skip_dsdt_scan, "Compal JFL92", {
-	DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
-	DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL},
+	ec_correct_ecdt, "Asus L4R", {
+	DMI_MATCH(DMI_BIOS_VERSION, "1008.006"),
+	DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),
+	DMI_MATCH(DMI_BOARD_NAME, "L4R") }, NULL},
 	{
-	ec_validate_ecdt, "MSI MS-171F", {
+	ec_correct_ecdt, "Asus M6R", {
+	DMI_MATCH(DMI_BIOS_VERSION, "0207"),
+	DMI_MATCH(DMI_PRODUCT_NAME, "M6R"),
+	DMI_MATCH(DMI_BOARD_NAME, "M6R") }, NULL},
+	{
+	ec_correct_ecdt, "MSI MS-171F", {
 	DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
 	DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL},
 	{
-	ec_validate_ecdt, "ASUS hardware", {
-	DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
-	{
-	ec_validate_ecdt, "ASUS hardware", {
-	DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL},
-	{
-	ec_skip_dsdt_scan, "HP Folio 13", {
-	DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
-	DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13"),}, NULL},
-	{
-	ec_validate_ecdt, "ASUS hardware", {
-	DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
-	DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
-	{
 	ec_clear_on_resume, "Samsung hardware", {
 	DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
 	{},
@@ -1534,8 +1533,8 @@
 
 int __init acpi_ec_ecdt_probe(void)
 {
+	int ret = 0;
 	acpi_status status;
-	struct acpi_ec *saved_ec = NULL;
 	struct acpi_table_ecdt *ecdt_ptr;
 
 	boot_ec = make_acpi_ec();
@@ -1547,67 +1546,45 @@
 	dmi_check_system(ec_dmi_table);
 	status = acpi_get_table(ACPI_SIG_ECDT, 1,
 				(struct acpi_table_header **)&ecdt_ptr);
-	if (ACPI_SUCCESS(status)) {
-		pr_info("EC description table is found, configuring boot EC\n");
+	if (ACPI_FAILURE(status)) {
+		ret = -ENODEV;
+		goto error;
+	}
+
+	if (!ecdt_ptr->control.address || !ecdt_ptr->data.address) {
+		/*
+		 * Asus X50GL:
+		 * https://bugzilla.kernel.org/show_bug.cgi?id=11880
+		 */
+		ret = -ENODEV;
+		goto error;
+	}
+
+	pr_info("EC description table is found, configuring boot EC\n");
+	if (EC_FLAGS_CORRECT_ECDT) {
+		/*
+		 * Asus L4R, Asus M6R
+		 * https://bugzilla.kernel.org/show_bug.cgi?id=9399
+		 * MSI MS-171F
+		 * https://bugzilla.kernel.org/show_bug.cgi?id=12461
+		 */
+		boot_ec->command_addr = ecdt_ptr->data.address;
+		boot_ec->data_addr = ecdt_ptr->control.address;
+	} else {
 		boot_ec->command_addr = ecdt_ptr->control.address;
 		boot_ec->data_addr = ecdt_ptr->data.address;
-		boot_ec->gpe = ecdt_ptr->gpe;
-		boot_ec->handle = ACPI_ROOT_OBJECT;
-		acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id,
-				&boot_ec->handle);
-		/* Don't trust ECDT, which comes from ASUSTek */
-		if (!EC_FLAGS_VALIDATE_ECDT)
-			goto install;
-		saved_ec = kmemdup(boot_ec, sizeof(struct acpi_ec), GFP_KERNEL);
-		if (!saved_ec)
-			return -ENOMEM;
-	/* fall through */
 	}
-
-	if (EC_FLAGS_SKIP_DSDT_SCAN) {
-		kfree(saved_ec);
-		return -ENODEV;
-	}
-
-	/* This workaround is needed only on some broken machines,
-	 * which require early EC, but fail to provide ECDT */
-	pr_debug("Look up EC in DSDT\n");
-	status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device,
-					boot_ec, NULL);
-	/* Check that acpi_get_devices actually find something */
-	if (ACPI_FAILURE(status) || !boot_ec->handle)
-		goto error;
-	if (saved_ec) {
-		/* try to find good ECDT from ASUSTek */
-		if (saved_ec->command_addr != boot_ec->command_addr ||
-		    saved_ec->data_addr != boot_ec->data_addr ||
-		    saved_ec->gpe != boot_ec->gpe ||
-		    saved_ec->handle != boot_ec->handle)
-			pr_info("ASUSTek keeps feeding us with broken "
-			"ECDT tables, which are very hard to workaround. "
-			"Trying to use DSDT EC info instead. Please send "
-			"output of acpidump to linux-acpi@vger.kernel.org\n");
-		kfree(saved_ec);
-		saved_ec = NULL;
-	} else {
-		/* We really need to limit this workaround, the only ASUS,
-		* which needs it, has fake EC._INI method, so use it as flag.
-		* Keep boot_ec struct as it will be needed soon.
-		*/
-		if (!dmi_name_in_vendors("ASUS") ||
-		    !acpi_has_method(boot_ec->handle, "_INI"))
-			return -ENODEV;
-	}
-install:
-	if (!ec_install_handlers(boot_ec)) {
+	boot_ec->gpe = ecdt_ptr->gpe;
+	boot_ec->handle = ACPI_ROOT_OBJECT;
+	ret = ec_install_handlers(boot_ec);
+	if (!ret)
 		first_ec = boot_ec;
-		return 0;
-	}
 error:
-	kfree(boot_ec);
-	kfree(saved_ec);
-	boot_ec = NULL;
-	return -ENODEV;
+	if (ret) {
+		kfree(boot_ec);
+		boot_ec = NULL;
+	}
+	return ret;
 }
 
 static int param_set_event_clearing(const char *val, struct kernel_param *kp)
diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
new file mode 100644
index 0000000..46f0603
--- /dev/null
+++ b/drivers/acpi/evged.c
@@ -0,0 +1,154 @@
+/*
+ * Generic Event Device for ACPI.
+ *
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Generic Event Device allows platforms to handle interrupts in ACPI
+ * ASL statements. It follows very similar to  _EVT method approach
+ * from GPIO events. All interrupts are listed in _CRS and the handler
+ * is written in _EVT method. Here is an example.
+ *
+ * Device (GED0)
+ * {
+ *
+ *     Name (_HID, "ACPI0013")
+ *     Name (_UID, 0)
+ *     Method (_CRS, 0x0, Serialized)
+ *     {
+ *		Name (RBUF, ResourceTemplate ()
+ *		{
+ *		Interrupt(ResourceConsumer, Edge, ActiveHigh, Shared, , , )
+ *		{123}
+ *		}
+ *     })
+ *
+ *     Method (_EVT, 1) {
+ *             if (Lequal(123, Arg0))
+ *             {
+ *             }
+ *     }
+ * }
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+
+#define MODULE_NAME	"acpi-ged"
+
+struct acpi_ged_event {
+	struct list_head node;
+	struct device *dev;
+	unsigned int gsi;
+	unsigned int irq;
+	acpi_handle handle;
+};
+
+static irqreturn_t acpi_ged_irq_handler(int irq, void *data)
+{
+	struct acpi_ged_event *event = data;
+	acpi_status acpi_ret;
+
+	acpi_ret = acpi_execute_simple_method(event->handle, NULL, event->gsi);
+	if (ACPI_FAILURE(acpi_ret))
+		dev_err_once(event->dev, "IRQ method execution failed\n");
+
+	return IRQ_HANDLED;
+}
+
+static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
+					      void *context)
+{
+	struct acpi_ged_event *event;
+	unsigned int irq;
+	unsigned int gsi;
+	unsigned int irqflags = IRQF_ONESHOT;
+	struct device *dev = context;
+	acpi_handle handle = ACPI_HANDLE(dev);
+	acpi_handle evt_handle;
+	struct resource r;
+	struct acpi_resource_irq *p = &ares->data.irq;
+	struct acpi_resource_extended_irq *pext = &ares->data.extended_irq;
+
+	if (ares->type == ACPI_RESOURCE_TYPE_END_TAG)
+		return AE_OK;
+
+	if (!acpi_dev_resource_interrupt(ares, 0, &r)) {
+		dev_err(dev, "unable to parse IRQ resource\n");
+		return AE_ERROR;
+	}
+	if (ares->type == ACPI_RESOURCE_TYPE_IRQ)
+		gsi = p->interrupts[0];
+	else
+		gsi = pext->interrupts[0];
+
+	irq = r.start;
+
+	if (ACPI_FAILURE(acpi_get_handle(handle, "_EVT", &evt_handle))) {
+		dev_err(dev, "cannot locate _EVT method\n");
+		return AE_ERROR;
+	}
+
+	dev_info(dev, "GED listening GSI %u @ IRQ %u\n", gsi, irq);
+
+	event = devm_kzalloc(dev, sizeof(*event), GFP_KERNEL);
+	if (!event)
+		return AE_ERROR;
+
+	event->gsi = gsi;
+	event->dev = dev;
+	event->irq = irq;
+	event->handle = evt_handle;
+
+	if (r.flags & IORESOURCE_IRQ_SHAREABLE)
+		irqflags |= IRQF_SHARED;
+
+	if (devm_request_threaded_irq(dev, irq, NULL, acpi_ged_irq_handler,
+				      irqflags, "ACPI:Ged", event)) {
+		dev_err(dev, "failed to setup event handler for irq %u\n", irq);
+		return AE_ERROR;
+	}
+
+	return AE_OK;
+}
+
+static int ged_probe(struct platform_device *pdev)
+{
+	acpi_status acpi_ret;
+
+	acpi_ret = acpi_walk_resources(ACPI_HANDLE(&pdev->dev), "_CRS",
+				       acpi_ged_request_interrupt, &pdev->dev);
+	if (ACPI_FAILURE(acpi_ret)) {
+		dev_err(&pdev->dev, "unable to parse the _CRS record\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static const struct acpi_device_id ged_acpi_ids[] = {
+	{"ACPI0013"},
+	{},
+};
+
+static struct platform_driver ged_driver = {
+	.probe = ged_probe,
+	.driver = {
+		.name = MODULE_NAME,
+		.acpi_match_table = ACPI_PTR(ged_acpi_ids),
+	},
+};
+builtin_platform_driver(ged_driver);
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index a37508e..9bb0773 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -20,7 +20,8 @@
 
 #define PREFIX "ACPI: "
 
-void acpi_initrd_initialize_tables(void);
+int early_acpi_osi_init(void);
+int acpi_osi_init(void);
 acpi_status acpi_os_initialize1(void);
 void init_acpi_device_notify(void);
 int acpi_scan_init(void);
@@ -145,6 +146,12 @@
 static inline void acpi_early_processor_set_pdc(void) {}
 #endif
 
+#ifdef CONFIG_X86
+void acpi_early_processor_osc(void);
+#else
+static inline void acpi_early_processor_osc(void) {}
+#endif
+
 /* --------------------------------------------------------------------------
                                   Embedded Controller
    -------------------------------------------------------------------------- */
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index d0f35e6..63cc9db 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -287,8 +287,11 @@
 					offset);
 			rc = -ENXIO;
 		}
-	} else
+	} else {
 		rc = 0;
+		if (cmd_rc)
+			*cmd_rc = xlat_status(buf, cmd);
+	}
 
  out:
 	ACPI_FREE(out_obj);
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 72b6e9e..d176e0e 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -327,10 +327,18 @@
 
 	/* SRAT: Static Resource Affinity Table */
 	if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
-		acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
-				     acpi_parse_x2apic_affinity, 0);
-		acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
-				     acpi_parse_processor_affinity, 0);
+		struct acpi_subtable_proc srat_proc[2];
+
+		memset(srat_proc, 0, sizeof(srat_proc));
+		srat_proc[0].id = ACPI_SRAT_TYPE_CPU_AFFINITY;
+		srat_proc[0].handler = acpi_parse_processor_affinity;
+		srat_proc[1].id = ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY;
+		srat_proc[1].handler = acpi_parse_x2apic_affinity;
+
+		acpi_table_parse_entries_array(ACPI_SIG_SRAT,
+					sizeof(struct acpi_table_srat),
+					srat_proc, ARRAY_SIZE(srat_proc), 0);
+
 		cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
 					    acpi_parse_memory_affinity,
 					    NR_NODE_MEMBLKS);
diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c
new file mode 100644
index 0000000..849f9d2
--- /dev/null
+++ b/drivers/acpi/osi.c
@@ -0,0 +1,522 @@
+/*
+ *  osi.c - _OSI implementation
+ *
+ *  Copyright (C) 2016 Intel Corporation
+ *    Author: Lv Zheng <lv.zheng@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or (at
+ *  your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+/* Uncomment next line to get verbose printout */
+/* #define DEBUG */
+#define pr_fmt(fmt) "ACPI: " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+
+#include "internal.h"
+
+
+#define OSI_STRING_LENGTH_MAX	64
+#define OSI_STRING_ENTRIES_MAX	16
+
+struct acpi_osi_entry {
+	char string[OSI_STRING_LENGTH_MAX];
+	bool enable;
+};
+
+static struct acpi_osi_config {
+	u8		default_disabling;
+	unsigned int	linux_enable:1;
+	unsigned int	linux_dmi:1;
+	unsigned int	linux_cmdline:1;
+	unsigned int	darwin_enable:1;
+	unsigned int	darwin_dmi:1;
+	unsigned int	darwin_cmdline:1;
+} osi_config;
+
+static struct acpi_osi_config osi_config;
+static struct acpi_osi_entry
+osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
+	{"Module Device", true},
+	{"Processor Device", true},
+	{"3.0 _SCP Extensions", true},
+	{"Processor Aggregator Device", true},
+};
+
+static u32 acpi_osi_handler(acpi_string interface, u32 supported)
+{
+	if (!strcmp("Linux", interface)) {
+		pr_notice_once(FW_BUG
+			"BIOS _OSI(Linux) query %s%s\n",
+			osi_config.linux_enable ? "honored" : "ignored",
+			osi_config.linux_cmdline ? " via cmdline" :
+			osi_config.linux_dmi ? " via DMI" : "");
+	}
+	if (!strcmp("Darwin", interface)) {
+		pr_notice_once(
+			"BIOS _OSI(Darwin) query %s%s\n",
+			osi_config.darwin_enable ? "honored" : "ignored",
+			osi_config.darwin_cmdline ? " via cmdline" :
+			osi_config.darwin_dmi ? " via DMI" : "");
+	}
+
+	return supported;
+}
+
+void __init acpi_osi_setup(char *str)
+{
+	struct acpi_osi_entry *osi;
+	bool enable = true;
+	int i;
+
+	if (!acpi_gbl_create_osi_method)
+		return;
+
+	if (str == NULL || *str == '\0') {
+		pr_info("_OSI method disabled\n");
+		acpi_gbl_create_osi_method = FALSE;
+		return;
+	}
+
+	if (*str == '!') {
+		str++;
+		if (*str == '\0') {
+			/* Do not override acpi_osi=!* */
+			if (!osi_config.default_disabling)
+				osi_config.default_disabling =
+					ACPI_DISABLE_ALL_VENDOR_STRINGS;
+			return;
+		} else if (*str == '*') {
+			osi_config.default_disabling = ACPI_DISABLE_ALL_STRINGS;
+			for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
+				osi = &osi_setup_entries[i];
+				osi->enable = false;
+			}
+			return;
+		} else if (*str == '!') {
+			osi_config.default_disabling = 0;
+			return;
+		}
+		enable = false;
+	}
+
+	for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
+		osi = &osi_setup_entries[i];
+		if (!strcmp(osi->string, str)) {
+			osi->enable = enable;
+			break;
+		} else if (osi->string[0] == '\0') {
+			osi->enable = enable;
+			strncpy(osi->string, str, OSI_STRING_LENGTH_MAX);
+			break;
+		}
+	}
+}
+
+static void __init __acpi_osi_setup_darwin(bool enable)
+{
+	osi_config.darwin_enable = !!enable;
+	if (enable) {
+		acpi_osi_setup("!");
+		acpi_osi_setup("Darwin");
+	} else {
+		acpi_osi_setup("!!");
+		acpi_osi_setup("!Darwin");
+	}
+}
+
+static void __init acpi_osi_setup_darwin(bool enable)
+{
+	/* Override acpi_osi_dmi_blacklisted() */
+	osi_config.darwin_dmi = 0;
+	osi_config.darwin_cmdline = 1;
+	__acpi_osi_setup_darwin(enable);
+}
+
+/*
+ * The story of _OSI(Linux)
+ *
+ * From pre-history through Linux-2.6.22, Linux responded TRUE upon a BIOS
+ * OSI(Linux) query.
+ *
+ * Unfortunately, reference BIOS writers got wind of this and put
+ * OSI(Linux) in their example code, quickly exposing this string as
+ * ill-conceived and opening the door to an un-bounded number of BIOS
+ * incompatibilities.
+ *
+ * For example, OSI(Linux) was used on resume to re-POST a video card on
+ * one system, because Linux at that time could not do a speedy restore in
+ * its native driver. But then upon gaining quick native restore
+ * capability, Linux has no way to tell the BIOS to skip the time-consuming
+ * POST -- putting Linux at a permanent performance disadvantage. On
+ * another system, the BIOS writer used OSI(Linux) to infer native OS
+ * support for IPMI!  On other systems, OSI(Linux) simply got in the way of
+ * Linux claiming to be compatible with other operating systems, exposing
+ * BIOS issues such as skipped device initialization.
+ *
+ * So "Linux" turned out to be a really poor chose of OSI string, and from
+ * Linux-2.6.23 onward we respond FALSE.
+ *
+ * BIOS writers should NOT query _OSI(Linux) on future systems. Linux will
+ * complain on the console when it sees it, and return FALSE. To get Linux
+ * to return TRUE for your system  will require a kernel source update to
+ * add a DMI entry, or boot with "acpi_osi=Linux"
+ */
+static void __init __acpi_osi_setup_linux(bool enable)
+{
+	osi_config.linux_enable = !!enable;
+	if (enable)
+		acpi_osi_setup("Linux");
+	else
+		acpi_osi_setup("!Linux");
+}
+
+static void __init acpi_osi_setup_linux(bool enable)
+{
+	/* Override acpi_osi_dmi_blacklisted() */
+	osi_config.linux_dmi = 0;
+	osi_config.linux_cmdline = 1;
+	__acpi_osi_setup_linux(enable);
+}
+
+/*
+ * Modify the list of "OS Interfaces" reported to BIOS via _OSI
+ *
+ * empty string disables _OSI
+ * string starting with '!' disables that string
+ * otherwise string is added to list, augmenting built-in strings
+ */
+static void __init acpi_osi_setup_late(void)
+{
+	struct acpi_osi_entry *osi;
+	char *str;
+	int i;
+	acpi_status status;
+
+	if (osi_config.default_disabling) {
+		status = acpi_update_interfaces(osi_config.default_disabling);
+		if (ACPI_SUCCESS(status))
+			pr_info("Disabled all _OSI OS vendors%s\n",
+				osi_config.default_disabling ==
+				ACPI_DISABLE_ALL_STRINGS ?
+				" and feature groups" : "");
+	}
+
+	for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
+		osi = &osi_setup_entries[i];
+		str = osi->string;
+		if (*str == '\0')
+			break;
+		if (osi->enable) {
+			status = acpi_install_interface(str);
+			if (ACPI_SUCCESS(status))
+				pr_info("Added _OSI(%s)\n", str);
+		} else {
+			status = acpi_remove_interface(str);
+			if (ACPI_SUCCESS(status))
+				pr_info("Deleted _OSI(%s)\n", str);
+		}
+	}
+}
+
+static int __init osi_setup(char *str)
+{
+	if (str && !strcmp("Linux", str))
+		acpi_osi_setup_linux(true);
+	else if (str && !strcmp("!Linux", str))
+		acpi_osi_setup_linux(false);
+	else if (str && !strcmp("Darwin", str))
+		acpi_osi_setup_darwin(true);
+	else if (str && !strcmp("!Darwin", str))
+		acpi_osi_setup_darwin(false);
+	else
+		acpi_osi_setup(str);
+
+	return 1;
+}
+__setup("acpi_osi=", osi_setup);
+
+bool acpi_osi_is_win8(void)
+{
+	return acpi_gbl_osi_data >= ACPI_OSI_WIN_8;
+}
+EXPORT_SYMBOL(acpi_osi_is_win8);
+
+static void __init acpi_osi_dmi_darwin(bool enable,
+				       const struct dmi_system_id *d)
+{
+	pr_notice("DMI detected to setup _OSI(\"Darwin\"): %s\n", d->ident);
+	osi_config.darwin_dmi = 1;
+	__acpi_osi_setup_darwin(enable);
+}
+
+void __init acpi_osi_dmi_linux(bool enable, const struct dmi_system_id *d)
+{
+	pr_notice("DMI detected to setup _OSI(\"Linux\"): %s\n", d->ident);
+	osi_config.linux_dmi = 1;
+	__acpi_osi_setup_linux(enable);
+}
+
+static int __init dmi_enable_osi_darwin(const struct dmi_system_id *d)
+{
+	acpi_osi_dmi_darwin(true, d);
+
+	return 0;
+}
+
+static int __init dmi_enable_osi_linux(const struct dmi_system_id *d)
+{
+	acpi_osi_dmi_linux(true, d);
+
+	return 0;
+}
+
+static int __init dmi_disable_osi_vista(const struct dmi_system_id *d)
+{
+	pr_notice("DMI detected: %s\n", d->ident);
+	acpi_osi_setup("!Windows 2006");
+	acpi_osi_setup("!Windows 2006 SP1");
+	acpi_osi_setup("!Windows 2006 SP2");
+
+	return 0;
+}
+
+static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
+{
+	pr_notice("DMI detected: %s\n", d->ident);
+	acpi_osi_setup("!Windows 2009");
+
+	return 0;
+}
+
+static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
+{
+	pr_notice("DMI detected: %s\n", d->ident);
+	acpi_osi_setup("!Windows 2012");
+
+	return 0;
+}
+
+/*
+ * Linux default _OSI response behavior is determined by this DMI table.
+ *
+ * Note that _OSI("Linux")/_OSI("Darwin") determined here can be overridden
+ * by acpi_osi=!Linux/acpi_osi=!Darwin command line options.
+ */
+static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
+	{
+	.callback = dmi_disable_osi_vista,
+	.ident = "Fujitsu Siemens",
+	.matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+		     DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile V5505"),
+		},
+	},
+	{
+	/*
+	 * There have a NVIF method in MSI GX723 DSDT need call by Nvidia
+	 * driver (e.g. nouveau) when user press brightness hotkey.
+	 * Currently, nouveau driver didn't do the job and it causes there
+	 * have a infinite while loop in DSDT when user press hotkey.
+	 * We add MSI GX723's dmi information to this table for workaround
+	 * this issue.
+	 * Will remove MSI GX723 from the table after nouveau grows support.
+	 */
+	.callback = dmi_disable_osi_vista,
+	.ident = "MSI GX723",
+	.matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
+		     DMI_MATCH(DMI_PRODUCT_NAME, "GX723"),
+		},
+	},
+	{
+	.callback = dmi_disable_osi_vista,
+	.ident = "Sony VGN-NS10J_S",
+	.matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+		     DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NS10J_S"),
+		},
+	},
+	{
+	.callback = dmi_disable_osi_vista,
+	.ident = "Sony VGN-SR290J",
+	.matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+		     DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR290J"),
+		},
+	},
+	{
+	.callback = dmi_disable_osi_vista,
+	.ident = "VGN-NS50B_L",
+	.matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+		     DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NS50B_L"),
+		},
+	},
+	{
+	.callback = dmi_disable_osi_vista,
+	.ident = "VGN-SR19XN",
+	.matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+		     DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR19XN"),
+		},
+	},
+	{
+	.callback = dmi_disable_osi_vista,
+	.ident = "Toshiba Satellite L355",
+	.matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+		     DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"),
+		},
+	},
+	{
+	.callback = dmi_disable_osi_win7,
+	.ident = "ASUS K50IJ",
+	.matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+		     DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"),
+		},
+	},
+	{
+	.callback = dmi_disable_osi_vista,
+	.ident = "Toshiba P305D",
+	.matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+		     DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"),
+		},
+	},
+	{
+	.callback = dmi_disable_osi_vista,
+	.ident = "Toshiba NB100",
+	.matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+		     DMI_MATCH(DMI_PRODUCT_NAME, "NB100"),
+		},
+	},
+
+	/*
+	 * The wireless hotkey does not work on those machines when
+	 * returning true for _OSI("Windows 2012")
+	 */
+	{
+	.callback = dmi_disable_osi_win8,
+	.ident = "Dell Inspiron 7737",
+	.matches = {
+		    DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+		    DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"),
+		},
+	},
+	{
+	.callback = dmi_disable_osi_win8,
+	.ident = "Dell Inspiron 7537",
+	.matches = {
+		    DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+		    DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7537"),
+		},
+	},
+	{
+	.callback = dmi_disable_osi_win8,
+	.ident = "Dell Inspiron 5437",
+	.matches = {
+		    DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+		    DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5437"),
+		},
+	},
+	{
+	.callback = dmi_disable_osi_win8,
+	.ident = "Dell Inspiron 3437",
+	.matches = {
+		    DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+		    DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 3437"),
+		},
+	},
+	{
+	.callback = dmi_disable_osi_win8,
+	.ident = "Dell Vostro 3446",
+	.matches = {
+		    DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+		    DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3446"),
+		},
+	},
+	{
+	.callback = dmi_disable_osi_win8,
+	.ident = "Dell Vostro 3546",
+	.matches = {
+		    DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+		    DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3546"),
+		},
+	},
+
+	/*
+	 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
+	 * Linux ignores it, except for the machines enumerated below.
+	 */
+
+	/*
+	 * Without this this EEEpc exports a non working WMI interface, with
+	 * this it exports a working "good old" eeepc_laptop interface, fixing
+	 * both brightness control, and rfkill not working.
+	 */
+	{
+	.callback = dmi_enable_osi_linux,
+	.ident = "Asus EEE PC 1015PX",
+	.matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
+		     DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"),
+		},
+	},
+
+	/*
+	 * Enable _OSI("Darwin") for all apple platforms.
+	 */
+	{
+	.callback = dmi_enable_osi_darwin,
+	.ident = "Apple hardware",
+	.matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+		},
+	},
+	{
+	.callback = dmi_enable_osi_darwin,
+	.ident = "Apple hardware",
+	.matches = {
+		     DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
+		},
+	},
+	{}
+};
+
+static __init void acpi_osi_dmi_blacklisted(void)
+{
+	dmi_check_system(acpi_osi_dmi_table);
+}
+
+int __init early_acpi_osi_init(void)
+{
+	acpi_osi_dmi_blacklisted();
+
+	return 0;
+}
+
+int __init acpi_osi_init(void)
+{
+	acpi_install_interface_handler(acpi_osi_handler);
+	acpi_osi_setup_late();
+
+	return 0;
+}
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 814d5f8..b108f13 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -56,10 +56,6 @@
 	struct work_struct work;
 };
 
-#ifdef CONFIG_ACPI_CUSTOM_DSDT
-#include CONFIG_ACPI_CUSTOM_DSDT_FILE
-#endif
-
 #ifdef ENABLE_DEBUGGER
 #include <linux/kdb.h>
 
@@ -96,72 +92,6 @@
 static LIST_HEAD(acpi_ioremaps);
 static DEFINE_MUTEX(acpi_ioremap_lock);
 
-static void __init acpi_osi_setup_late(void);
-
-/*
- * The story of _OSI(Linux)
- *
- * From pre-history through Linux-2.6.22,
- * Linux responded TRUE upon a BIOS OSI(Linux) query.
- *
- * Unfortunately, reference BIOS writers got wind of this
- * and put OSI(Linux) in their example code, quickly exposing
- * this string as ill-conceived and opening the door to
- * an un-bounded number of BIOS incompatibilities.
- *
- * For example, OSI(Linux) was used on resume to re-POST a
- * video card on one system, because Linux at that time
- * could not do a speedy restore in its native driver.
- * But then upon gaining quick native restore capability,
- * Linux has no way to tell the BIOS to skip the time-consuming
- * POST -- putting Linux at a permanent performance disadvantage.
- * On another system, the BIOS writer used OSI(Linux)
- * to infer native OS support for IPMI!  On other systems,
- * OSI(Linux) simply got in the way of Linux claiming to
- * be compatible with other operating systems, exposing
- * BIOS issues such as skipped device initialization.
- *
- * So "Linux" turned out to be a really poor chose of
- * OSI string, and from Linux-2.6.23 onward we respond FALSE.
- *
- * BIOS writers should NOT query _OSI(Linux) on future systems.
- * Linux will complain on the console when it sees it, and return FALSE.
- * To get Linux to return TRUE for your system  will require
- * a kernel source update to add a DMI entry,
- * or boot with "acpi_osi=Linux"
- */
-
-static struct osi_linux {
-	unsigned int	enable:1;
-	unsigned int	dmi:1;
-	unsigned int	cmdline:1;
-	unsigned int	default_disabling:1;
-} osi_linux = {0, 0, 0, 0};
-
-static u32 acpi_osi_handler(acpi_string interface, u32 supported)
-{
-	if (!strcmp("Linux", interface)) {
-
-		printk_once(KERN_NOTICE FW_BUG PREFIX
-			"BIOS _OSI(Linux) query %s%s\n",
-			osi_linux.enable ? "honored" : "ignored",
-			osi_linux.cmdline ? " via cmdline" :
-			osi_linux.dmi ? " via DMI" : "");
-	}
-
-	if (!strcmp("Darwin", interface)) {
-		/*
-		 * Apple firmware will behave poorly if it receives positive
-		 * answers to "Darwin" and any other OS. Respond positively
-		 * to Darwin and then disable all other vendor strings.
-		 */
-		acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
-		supported = ACPI_UINT32_MAX;
-	}
-
-	return supported;
-}
-
 static void __init acpi_request_region (struct acpi_generic_address *gas,
 	unsigned int length, char *desc)
 {
@@ -582,7 +512,7 @@
 
 acpi_status
 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
-			    char **new_val)
+			    acpi_string *new_val)
 {
 	if (!init_val || !new_val)
 		return AE_BAD_PARAMETER;
@@ -602,280 +532,6 @@
 	return AE_OK;
 }
 
-static void acpi_table_taint(struct acpi_table_header *table)
-{
-	pr_warn(PREFIX
-		"Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
-		table->signature, table->oem_table_id);
-	add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
-}
-
-#ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
-#include <linux/earlycpio.h>
-#include <linux/memblock.h>
-
-static u64 acpi_tables_addr;
-static int all_tables_size;
-
-/* Copied from acpica/tbutils.c:acpi_tb_checksum() */
-static u8 __init acpi_table_checksum(u8 *buffer, u32 length)
-{
-	u8 sum = 0;
-	u8 *end = buffer + length;
-
-	while (buffer < end)
-		sum = (u8) (sum + *(buffer++));
-	return sum;
-}
-
-/* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
-static const char * const table_sigs[] = {
-	ACPI_SIG_BERT, ACPI_SIG_CPEP, ACPI_SIG_ECDT, ACPI_SIG_EINJ,
-	ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT, ACPI_SIG_MSCT,
-	ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT, ACPI_SIG_ASF,
-	ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR, ACPI_SIG_HPET,
-	ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG, ACPI_SIG_MCHI,
-	ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA,
-	ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT,
-	ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT,
-	ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, NULL };
-
-#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
-
-#define ACPI_OVERRIDE_TABLES 64
-static struct cpio_data __initdata acpi_initrd_files[ACPI_OVERRIDE_TABLES];
-static DECLARE_BITMAP(acpi_initrd_installed, ACPI_OVERRIDE_TABLES);
-
-#define MAP_CHUNK_SIZE   (NR_FIX_BTMAPS << PAGE_SHIFT)
-
-void __init acpi_initrd_override(void *data, size_t size)
-{
-	int sig, no, table_nr = 0, total_offset = 0;
-	long offset = 0;
-	struct acpi_table_header *table;
-	char cpio_path[32] = "kernel/firmware/acpi/";
-	struct cpio_data file;
-
-	if (data == NULL || size == 0)
-		return;
-
-	for (no = 0; no < ACPI_OVERRIDE_TABLES; no++) {
-		file = find_cpio_data(cpio_path, data, size, &offset);
-		if (!file.data)
-			break;
-
-		data += offset;
-		size -= offset;
-
-		if (file.size < sizeof(struct acpi_table_header)) {
-			pr_err("ACPI OVERRIDE: Table smaller than ACPI header [%s%s]\n",
-				cpio_path, file.name);
-			continue;
-		}
-
-		table = file.data;
-
-		for (sig = 0; table_sigs[sig]; sig++)
-			if (!memcmp(table->signature, table_sigs[sig], 4))
-				break;
-
-		if (!table_sigs[sig]) {
-			pr_err("ACPI OVERRIDE: Unknown signature [%s%s]\n",
-				cpio_path, file.name);
-			continue;
-		}
-		if (file.size != table->length) {
-			pr_err("ACPI OVERRIDE: File length does not match table length [%s%s]\n",
-				cpio_path, file.name);
-			continue;
-		}
-		if (acpi_table_checksum(file.data, table->length)) {
-			pr_err("ACPI OVERRIDE: Bad table checksum [%s%s]\n",
-				cpio_path, file.name);
-			continue;
-		}
-
-		pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
-			table->signature, cpio_path, file.name, table->length);
-
-		all_tables_size += table->length;
-		acpi_initrd_files[table_nr].data = file.data;
-		acpi_initrd_files[table_nr].size = file.size;
-		table_nr++;
-	}
-	if (table_nr == 0)
-		return;
-
-	acpi_tables_addr =
-		memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT,
-				       all_tables_size, PAGE_SIZE);
-	if (!acpi_tables_addr) {
-		WARN_ON(1);
-		return;
-	}
-	/*
-	 * Only calling e820_add_reserve does not work and the
-	 * tables are invalid (memory got used) later.
-	 * memblock_reserve works as expected and the tables won't get modified.
-	 * But it's not enough on X86 because ioremap will
-	 * complain later (used by acpi_os_map_memory) that the pages
-	 * that should get mapped are not marked "reserved".
-	 * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
-	 * works fine.
-	 */
-	memblock_reserve(acpi_tables_addr, all_tables_size);
-	arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
-
-	/*
-	 * early_ioremap only can remap 256k one time. If we map all
-	 * tables one time, we will hit the limit. Need to map chunks
-	 * one by one during copying the same as that in relocate_initrd().
-	 */
-	for (no = 0; no < table_nr; no++) {
-		unsigned char *src_p = acpi_initrd_files[no].data;
-		phys_addr_t size = acpi_initrd_files[no].size;
-		phys_addr_t dest_addr = acpi_tables_addr + total_offset;
-		phys_addr_t slop, clen;
-		char *dest_p;
-
-		total_offset += size;
-
-		while (size) {
-			slop = dest_addr & ~PAGE_MASK;
-			clen = size;
-			if (clen > MAP_CHUNK_SIZE - slop)
-				clen = MAP_CHUNK_SIZE - slop;
-			dest_p = early_ioremap(dest_addr & PAGE_MASK,
-						 clen + slop);
-			memcpy(dest_p + slop, src_p, clen);
-			early_iounmap(dest_p, clen + slop);
-			src_p += clen;
-			dest_addr += clen;
-			size -= clen;
-		}
-	}
-}
-
-acpi_status
-acpi_os_physical_table_override(struct acpi_table_header *existing_table,
-				acpi_physical_address *address, u32 *length)
-{
-	int table_offset = 0;
-	int table_index = 0;
-	struct acpi_table_header *table;
-	u32 table_length;
-
-	*length = 0;
-	*address = 0;
-	if (!acpi_tables_addr)
-		return AE_OK;
-
-	while (table_offset + ACPI_HEADER_SIZE <= all_tables_size) {
-		table = acpi_os_map_memory(acpi_tables_addr + table_offset,
-					   ACPI_HEADER_SIZE);
-		if (table_offset + table->length > all_tables_size) {
-			acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
-			WARN_ON(1);
-			return AE_OK;
-		}
-
-		table_length = table->length;
-
-		/* Only override tables matched */
-		if (test_bit(table_index, acpi_initrd_installed) ||
-		    memcmp(existing_table->signature, table->signature, 4) ||
-		    memcmp(table->oem_table_id, existing_table->oem_table_id,
-			   ACPI_OEM_TABLE_ID_SIZE)) {
-			acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
-			goto next_table;
-		}
-
-		*length = table_length;
-		*address = acpi_tables_addr + table_offset;
-		acpi_table_taint(existing_table);
-		acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
-		set_bit(table_index, acpi_initrd_installed);
-		break;
-
-next_table:
-		table_offset += table_length;
-		table_index++;
-	}
-	return AE_OK;
-}
-
-void __init acpi_initrd_initialize_tables(void)
-{
-	int table_offset = 0;
-	int table_index = 0;
-	u32 table_length;
-	struct acpi_table_header *table;
-
-	if (!acpi_tables_addr)
-		return;
-
-	while (table_offset + ACPI_HEADER_SIZE <= all_tables_size) {
-		table = acpi_os_map_memory(acpi_tables_addr + table_offset,
-					   ACPI_HEADER_SIZE);
-		if (table_offset + table->length > all_tables_size) {
-			acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
-			WARN_ON(1);
-			return;
-		}
-
-		table_length = table->length;
-
-		/* Skip RSDT/XSDT which should only be used for override */
-		if (test_bit(table_index, acpi_initrd_installed) ||
-		    ACPI_COMPARE_NAME(table->signature, ACPI_SIG_RSDT) ||
-		    ACPI_COMPARE_NAME(table->signature, ACPI_SIG_XSDT)) {
-			acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
-			goto next_table;
-		}
-
-		acpi_table_taint(table);
-		acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
-		acpi_install_table(acpi_tables_addr + table_offset, TRUE);
-		set_bit(table_index, acpi_initrd_installed);
-next_table:
-		table_offset += table_length;
-		table_index++;
-	}
-}
-#else
-acpi_status
-acpi_os_physical_table_override(struct acpi_table_header *existing_table,
-				acpi_physical_address *address,
-				u32 *table_length)
-{
-	*table_length = 0;
-	*address = 0;
-	return AE_OK;
-}
-
-void __init acpi_initrd_initialize_tables(void)
-{
-}
-#endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
-
-acpi_status
-acpi_os_table_override(struct acpi_table_header *existing_table,
-		       struct acpi_table_header **new_table)
-{
-	if (!existing_table || !new_table)
-		return AE_BAD_PARAMETER;
-
-	*new_table = NULL;
-
-#ifdef CONFIG_ACPI_CUSTOM_DSDT
-	if (strncmp(existing_table->signature, "DSDT", 4) == 0)
-		*new_table = (struct acpi_table_header *)AmlCode;
-#endif
-	if (*new_table != NULL)
-		acpi_table_taint(existing_table);
-	return AE_OK;
-}
-
 static irqreturn_t acpi_irq(int irq, void *dev_id)
 {
 	u32 handled;
@@ -1717,156 +1373,6 @@
 
 __setup("acpi_os_name=", acpi_os_name_setup);
 
-#define	OSI_STRING_LENGTH_MAX 64	/* arbitrary */
-#define	OSI_STRING_ENTRIES_MAX 16	/* arbitrary */
-
-struct osi_setup_entry {
-	char string[OSI_STRING_LENGTH_MAX];
-	bool enable;
-};
-
-static struct osi_setup_entry
-		osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
-	{"Module Device", true},
-	{"Processor Device", true},
-	{"3.0 _SCP Extensions", true},
-	{"Processor Aggregator Device", true},
-};
-
-void __init acpi_osi_setup(char *str)
-{
-	struct osi_setup_entry *osi;
-	bool enable = true;
-	int i;
-
-	if (!acpi_gbl_create_osi_method)
-		return;
-
-	if (str == NULL || *str == '\0') {
-		printk(KERN_INFO PREFIX "_OSI method disabled\n");
-		acpi_gbl_create_osi_method = FALSE;
-		return;
-	}
-
-	if (*str == '!') {
-		str++;
-		if (*str == '\0') {
-			osi_linux.default_disabling = 1;
-			return;
-		} else if (*str == '*') {
-			acpi_update_interfaces(ACPI_DISABLE_ALL_STRINGS);
-			for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
-				osi = &osi_setup_entries[i];
-				osi->enable = false;
-			}
-			return;
-		}
-		enable = false;
-	}
-
-	for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
-		osi = &osi_setup_entries[i];
-		if (!strcmp(osi->string, str)) {
-			osi->enable = enable;
-			break;
-		} else if (osi->string[0] == '\0') {
-			osi->enable = enable;
-			strncpy(osi->string, str, OSI_STRING_LENGTH_MAX);
-			break;
-		}
-	}
-}
-
-static void __init set_osi_linux(unsigned int enable)
-{
-	if (osi_linux.enable != enable)
-		osi_linux.enable = enable;
-
-	if (osi_linux.enable)
-		acpi_osi_setup("Linux");
-	else
-		acpi_osi_setup("!Linux");
-
-	return;
-}
-
-static void __init acpi_cmdline_osi_linux(unsigned int enable)
-{
-	osi_linux.cmdline = 1;	/* cmdline set the default and override DMI */
-	osi_linux.dmi = 0;
-	set_osi_linux(enable);
-
-	return;
-}
-
-void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
-{
-	printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
-
-	if (enable == -1)
-		return;
-
-	osi_linux.dmi = 1;	/* DMI knows that this box asks OSI(Linux) */
-	set_osi_linux(enable);
-
-	return;
-}
-
-/*
- * Modify the list of "OS Interfaces" reported to BIOS via _OSI
- *
- * empty string disables _OSI
- * string starting with '!' disables that string
- * otherwise string is added to list, augmenting built-in strings
- */
-static void __init acpi_osi_setup_late(void)
-{
-	struct osi_setup_entry *osi;
-	char *str;
-	int i;
-	acpi_status status;
-
-	if (osi_linux.default_disabling) {
-		status = acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
-
-		if (ACPI_SUCCESS(status))
-			printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors\n");
-	}
-
-	for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
-		osi = &osi_setup_entries[i];
-		str = osi->string;
-
-		if (*str == '\0')
-			break;
-		if (osi->enable) {
-			status = acpi_install_interface(str);
-
-			if (ACPI_SUCCESS(status))
-				printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
-		} else {
-			status = acpi_remove_interface(str);
-
-			if (ACPI_SUCCESS(status))
-				printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
-		}
-	}
-}
-
-static int __init osi_setup(char *str)
-{
-	if (str && !strcmp("Linux", str))
-		acpi_cmdline_osi_linux(1);
-	else if (str && !strcmp("!Linux", str))
-		acpi_cmdline_osi_linux(0);
-	else
-		acpi_osi_setup(str);
-
-	return 1;
-}
-
-__setup("acpi_osi=", osi_setup);
-
 /*
  * Disable the auto-serialization of named objects creation methods.
  *
@@ -1986,12 +1492,6 @@
 }
 EXPORT_SYMBOL(acpi_resources_are_enforced);
 
-bool acpi_osi_is_win8(void)
-{
-	return acpi_gbl_osi_data >= ACPI_OSI_WIN_8;
-}
-EXPORT_SYMBOL(acpi_osi_is_win8);
-
 /*
  * Deallocate the memory for a spinlock.
  */
@@ -2157,8 +1657,7 @@
 	BUG_ON(!kacpid_wq);
 	BUG_ON(!kacpi_notify_wq);
 	BUG_ON(!kacpi_hotplug_wq);
-	acpi_install_interface_handler(acpi_osi_handler);
-	acpi_osi_setup_late();
+	acpi_osi_init();
 	return AE_OK;
 }
 
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index ededa90..8fc7323 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -36,6 +36,7 @@
 #include <linux/mutex.h>
 #include <linux/slab.h>
 #include <linux/acpi.h>
+#include <linux/irq.h>
 
 #include "internal.h"
 
@@ -437,17 +438,15 @@
  * enabled system.
  */
 
-#define ACPI_MAX_IRQS		256
-#define ACPI_MAX_ISA_IRQ	16
+#define ACPI_MAX_ISA_IRQS	16
 
-#define PIRQ_PENALTY_PCI_AVAILABLE	(0)
 #define PIRQ_PENALTY_PCI_POSSIBLE	(16*16)
 #define PIRQ_PENALTY_PCI_USING		(16*16*16)
 #define PIRQ_PENALTY_ISA_TYPICAL	(16*16*16*16)
 #define PIRQ_PENALTY_ISA_USED		(16*16*16*16*16)
 #define PIRQ_PENALTY_ISA_ALWAYS		(16*16*16*16*16*16)
 
-static int acpi_irq_penalty[ACPI_MAX_IRQS] = {
+static int acpi_isa_irq_penalty[ACPI_MAX_ISA_IRQS] = {
 	PIRQ_PENALTY_ISA_ALWAYS,	/* IRQ0 timer */
 	PIRQ_PENALTY_ISA_ALWAYS,	/* IRQ1 keyboard */
 	PIRQ_PENALTY_ISA_ALWAYS,	/* IRQ2 cascade */
@@ -457,9 +456,9 @@
 	PIRQ_PENALTY_ISA_TYPICAL,	/* IRQ6 */
 	PIRQ_PENALTY_ISA_TYPICAL,	/* IRQ7 parallel, spurious */
 	PIRQ_PENALTY_ISA_TYPICAL,	/* IRQ8 rtc, sometimes */
-	PIRQ_PENALTY_PCI_AVAILABLE,	/* IRQ9  PCI, often acpi */
-	PIRQ_PENALTY_PCI_AVAILABLE,	/* IRQ10 PCI */
-	PIRQ_PENALTY_PCI_AVAILABLE,	/* IRQ11 PCI */
+	0,				/* IRQ9  PCI, often acpi */
+	0,				/* IRQ10 PCI */
+	0,				/* IRQ11 PCI */
 	PIRQ_PENALTY_ISA_USED,		/* IRQ12 mouse */
 	PIRQ_PENALTY_ISA_USED,		/* IRQ13 fpe, sometimes */
 	PIRQ_PENALTY_ISA_USED,		/* IRQ14 ide0 */
@@ -467,39 +466,58 @@
 	/* >IRQ15 */
 };
 
-int __init acpi_irq_penalty_init(void)
+static int acpi_irq_pci_sharing_penalty(int irq)
 {
 	struct acpi_pci_link *link;
-	int i;
+	int penalty = 0;
 
-	/*
-	 * Update penalties to facilitate IRQ balancing.
-	 */
 	list_for_each_entry(link, &acpi_link_list, list) {
-
 		/*
-		 * reflect the possible and active irqs in the penalty table --
-		 * useful for breaking ties.
+		 * If a link is active, penalize its IRQ heavily
+		 * so we try to choose a different IRQ.
 		 */
-		if (link->irq.possible_count) {
-			int penalty =
-			    PIRQ_PENALTY_PCI_POSSIBLE /
-			    link->irq.possible_count;
+		if (link->irq.active && link->irq.active == irq)
+			penalty += PIRQ_PENALTY_PCI_USING;
+		else {
+			int i;
 
-			for (i = 0; i < link->irq.possible_count; i++) {
-				if (link->irq.possible[i] < ACPI_MAX_ISA_IRQ)
-					acpi_irq_penalty[link->irq.
-							 possible[i]] +=
-					    penalty;
-			}
-
-		} else if (link->irq.active) {
-			acpi_irq_penalty[link->irq.active] +=
-			    PIRQ_PENALTY_PCI_POSSIBLE;
+			/*
+			 * If a link is inactive, penalize the IRQs it
+			 * might use, but not as severely.
+			 */
+			for (i = 0; i < link->irq.possible_count; i++)
+				if (link->irq.possible[i] == irq)
+					penalty += PIRQ_PENALTY_PCI_POSSIBLE /
+						link->irq.possible_count;
 		}
 	}
 
-	return 0;
+	return penalty;
+}
+
+static int acpi_irq_get_penalty(int irq)
+{
+	int penalty = 0;
+
+	if (irq < ACPI_MAX_ISA_IRQS)
+		penalty += acpi_isa_irq_penalty[irq];
+
+	/*
+	* Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict
+	* with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be
+	* use for PCI IRQs.
+	*/
+	if (irq == acpi_gbl_FADT.sci_interrupt) {
+		u32 type = irq_get_trigger_type(irq) & IRQ_TYPE_SENSE_MASK;
+
+		if (type != IRQ_TYPE_LEVEL_LOW)
+			penalty += PIRQ_PENALTY_ISA_ALWAYS;
+		else
+			penalty += PIRQ_PENALTY_PCI_USING;
+	}
+
+	penalty += acpi_irq_pci_sharing_penalty(irq);
+	return penalty;
 }
 
 static int acpi_irq_balance = -1;	/* 0: static, 1: balance */
@@ -547,12 +565,12 @@
 		 * the use of IRQs 9, 10, 11, and >15.
 		 */
 		for (i = (link->irq.possible_count - 1); i >= 0; i--) {
-			if (acpi_irq_penalty[irq] >
-			    acpi_irq_penalty[link->irq.possible[i]])
+			if (acpi_irq_get_penalty(irq) >
+			    acpi_irq_get_penalty(link->irq.possible[i]))
 				irq = link->irq.possible[i];
 		}
 	}
-	if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) {
+	if (acpi_irq_get_penalty(irq) >= PIRQ_PENALTY_ISA_ALWAYS) {
 		printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. "
 			    "Try pci=noacpi or acpi=off\n",
 			    acpi_device_name(link->device),
@@ -568,7 +586,6 @@
 			    acpi_device_bid(link->device));
 		return -ENODEV;
 	} else {
-		acpi_irq_penalty[link->irq.active] += PIRQ_PENALTY_PCI_USING;
 		printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
 		       acpi_device_name(link->device),
 		       acpi_device_bid(link->device), link->irq.active);
@@ -778,7 +795,7 @@
 }
 
 /*
- * modify acpi_irq_penalty[] from cmdline
+ * modify acpi_isa_irq_penalty[] from cmdline
  */
 static int __init acpi_irq_penalty_update(char *str, int used)
 {
@@ -787,23 +804,24 @@
 	for (i = 0; i < 16; i++) {
 		int retval;
 		int irq;
+		int new_penalty;
 
 		retval = get_option(&str, &irq);
 
 		if (!retval)
 			break;	/* no number found */
 
-		if (irq < 0)
-			continue;
-
-		if (irq >= ARRAY_SIZE(acpi_irq_penalty))
+		/* see if this is a ISA IRQ */
+		if ((irq < 0) || (irq >= ACPI_MAX_ISA_IRQS))
 			continue;
 
 		if (used)
-			acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
+			new_penalty = acpi_irq_get_penalty(irq) +
+					PIRQ_PENALTY_ISA_USED;
 		else
-			acpi_irq_penalty[irq] = PIRQ_PENALTY_PCI_AVAILABLE;
+			new_penalty = 0;
 
+		acpi_isa_irq_penalty[irq] = new_penalty;
 		if (retval != 2)	/* no next number */
 			break;
 	}
@@ -819,34 +837,15 @@
  */
 void acpi_penalize_isa_irq(int irq, int active)
 {
-	if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
-		if (active)
-			acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
-		else
-			acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
-	}
+	if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty)))
+		acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) +
+			active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING;
 }
 
 bool acpi_isa_irq_available(int irq)
 {
-	return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) ||
-			    acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS);
-}
-
-/*
- * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with
- * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for
- * PCI IRQs.
- */
-void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
-{
-	if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
-		if (trigger != ACPI_MADT_TRIGGER_LEVEL ||
-		    polarity != ACPI_MADT_POLARITY_ACTIVE_LOW)
-			acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_ALWAYS;
-		else
-			acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
-	}
+	return irq >= 0 && (irq >= ARRAY_SIZE(acpi_isa_irq_penalty) ||
+		    acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS);
 }
 
 /*
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 2a8b596..7a2e4d4 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -26,6 +26,11 @@
 #include "internal.h"
 #include "sleep.h"
 
+/*
+ * Some HW-full platforms do not have _S5, so they may need
+ * to leverage efi power off for a shutdown.
+ */
+bool acpi_no_s5;
 static u8 sleep_states[ACPI_S_STATE_COUNT];
 
 static void acpi_sleep_tts_switch(u32 acpi_state)
@@ -882,6 +887,8 @@
 		sleep_states[ACPI_STATE_S5] = 1;
 		pm_power_off_prepare = acpi_power_off_prepare;
 		pm_power_off = acpi_power_off;
+	} else {
+		acpi_no_s5 = true;
 	}
 
 	supported[0] = 0;
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 0243d37..4b3a9e2 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -555,23 +555,22 @@
 static int get_status(u32 index, acpi_event_status *status,
 		      acpi_handle *handle)
 {
-	int result = 0;
+	int result;
 
 	if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
-		goto end;
+		return -EINVAL;
 
 	if (index < num_gpes) {
 		result = acpi_get_gpe_device(index, handle);
 		if (result) {
 			ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND,
 					"Invalid GPE 0x%x", index));
-			goto end;
+			return result;
 		}
 		result = acpi_get_gpe_status(*handle, index, status);
 	} else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS))
 		result = acpi_get_event_status(index - num_gpes, status);
 
-end:
 	return result;
 }
 
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index f49c024..a372f9e 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -32,8 +32,14 @@
 #include <linux/errno.h>
 #include <linux/acpi.h>
 #include <linux/bootmem.h>
+#include <linux/earlycpio.h>
+#include <linux/memblock.h>
 #include "internal.h"
 
+#ifdef CONFIG_ACPI_CUSTOM_DSDT
+#include CONFIG_ACPI_CUSTOM_DSDT_FILE
+#endif
+
 #define ACPI_MAX_TABLES		128
 
 static char *mps_inti_flags_polarity[] = { "dfl", "high", "res", "low" };
@@ -433,6 +439,314 @@
 	return;
 }
 
+static void acpi_table_taint(struct acpi_table_header *table)
+{
+	pr_warn("Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
+		table->signature, table->oem_table_id);
+	add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
+}
+
+#ifdef CONFIG_ACPI_TABLE_UPGRADE
+static u64 acpi_tables_addr;
+static int all_tables_size;
+
+/* Copied from acpica/tbutils.c:acpi_tb_checksum() */
+static u8 __init acpi_table_checksum(u8 *buffer, u32 length)
+{
+	u8 sum = 0;
+	u8 *end = buffer + length;
+
+	while (buffer < end)
+		sum = (u8) (sum + *(buffer++));
+	return sum;
+}
+
+/* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
+static const char * const table_sigs[] = {
+	ACPI_SIG_BERT, ACPI_SIG_CPEP, ACPI_SIG_ECDT, ACPI_SIG_EINJ,
+	ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT, ACPI_SIG_MSCT,
+	ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT, ACPI_SIG_ASF,
+	ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR, ACPI_SIG_HPET,
+	ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG, ACPI_SIG_MCHI,
+	ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA,
+	ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT,
+	ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT,
+	ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, NULL };
+
+#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
+
+#define NR_ACPI_INITRD_TABLES 64
+static struct cpio_data __initdata acpi_initrd_files[NR_ACPI_INITRD_TABLES];
+static DECLARE_BITMAP(acpi_initrd_installed, NR_ACPI_INITRD_TABLES);
+
+#define MAP_CHUNK_SIZE   (NR_FIX_BTMAPS << PAGE_SHIFT)
+
+static void __init acpi_table_initrd_init(void *data, size_t size)
+{
+	int sig, no, table_nr = 0, total_offset = 0;
+	long offset = 0;
+	struct acpi_table_header *table;
+	char cpio_path[32] = "kernel/firmware/acpi/";
+	struct cpio_data file;
+
+	if (data == NULL || size == 0)
+		return;
+
+	for (no = 0; no < NR_ACPI_INITRD_TABLES; no++) {
+		file = find_cpio_data(cpio_path, data, size, &offset);
+		if (!file.data)
+			break;
+
+		data += offset;
+		size -= offset;
+
+		if (file.size < sizeof(struct acpi_table_header)) {
+			pr_err("ACPI OVERRIDE: Table smaller than ACPI header [%s%s]\n",
+				cpio_path, file.name);
+			continue;
+		}
+
+		table = file.data;
+
+		for (sig = 0; table_sigs[sig]; sig++)
+			if (!memcmp(table->signature, table_sigs[sig], 4))
+				break;
+
+		if (!table_sigs[sig]) {
+			pr_err("ACPI OVERRIDE: Unknown signature [%s%s]\n",
+				cpio_path, file.name);
+			continue;
+		}
+		if (file.size != table->length) {
+			pr_err("ACPI OVERRIDE: File length does not match table length [%s%s]\n",
+				cpio_path, file.name);
+			continue;
+		}
+		if (acpi_table_checksum(file.data, table->length)) {
+			pr_err("ACPI OVERRIDE: Bad table checksum [%s%s]\n",
+				cpio_path, file.name);
+			continue;
+		}
+
+		pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
+			table->signature, cpio_path, file.name, table->length);
+
+		all_tables_size += table->length;
+		acpi_initrd_files[table_nr].data = file.data;
+		acpi_initrd_files[table_nr].size = file.size;
+		table_nr++;
+	}
+	if (table_nr == 0)
+		return;
+
+	acpi_tables_addr =
+		memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT,
+				       all_tables_size, PAGE_SIZE);
+	if (!acpi_tables_addr) {
+		WARN_ON(1);
+		return;
+	}
+	/*
+	 * Only calling e820_add_reserve does not work and the
+	 * tables are invalid (memory got used) later.
+	 * memblock_reserve works as expected and the tables won't get modified.
+	 * But it's not enough on X86 because ioremap will
+	 * complain later (used by acpi_os_map_memory) that the pages
+	 * that should get mapped are not marked "reserved".
+	 * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
+	 * works fine.
+	 */
+	memblock_reserve(acpi_tables_addr, all_tables_size);
+	arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
+
+	/*
+	 * early_ioremap only can remap 256k one time. If we map all
+	 * tables one time, we will hit the limit. Need to map chunks
+	 * one by one during copying the same as that in relocate_initrd().
+	 */
+	for (no = 0; no < table_nr; no++) {
+		unsigned char *src_p = acpi_initrd_files[no].data;
+		phys_addr_t size = acpi_initrd_files[no].size;
+		phys_addr_t dest_addr = acpi_tables_addr + total_offset;
+		phys_addr_t slop, clen;
+		char *dest_p;
+
+		total_offset += size;
+
+		while (size) {
+			slop = dest_addr & ~PAGE_MASK;
+			clen = size;
+			if (clen > MAP_CHUNK_SIZE - slop)
+				clen = MAP_CHUNK_SIZE - slop;
+			dest_p = early_ioremap(dest_addr & PAGE_MASK,
+						 clen + slop);
+			memcpy(dest_p + slop, src_p, clen);
+			early_iounmap(dest_p, clen + slop);
+			src_p += clen;
+			dest_addr += clen;
+			size -= clen;
+		}
+	}
+}
+
+static acpi_status
+acpi_table_initrd_override(struct acpi_table_header *existing_table,
+			   acpi_physical_address *address, u32 *length)
+{
+	int table_offset = 0;
+	int table_index = 0;
+	struct acpi_table_header *table;
+	u32 table_length;
+
+	*length = 0;
+	*address = 0;
+	if (!acpi_tables_addr)
+		return AE_OK;
+
+	while (table_offset + ACPI_HEADER_SIZE <= all_tables_size) {
+		table = acpi_os_map_memory(acpi_tables_addr + table_offset,
+					   ACPI_HEADER_SIZE);
+		if (table_offset + table->length > all_tables_size) {
+			acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+			WARN_ON(1);
+			return AE_OK;
+		}
+
+		table_length = table->length;
+
+		/* Only override tables matched */
+		if (memcmp(existing_table->signature, table->signature, 4) ||
+		    memcmp(table->oem_id, existing_table->oem_id,
+			   ACPI_OEM_ID_SIZE) ||
+		    memcmp(table->oem_table_id, existing_table->oem_table_id,
+			   ACPI_OEM_TABLE_ID_SIZE)) {
+			acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+			goto next_table;
+		}
+		/*
+		 * Mark the table to avoid being used in
+		 * acpi_table_initrd_scan() and check the revision.
+		 */
+		if (test_and_set_bit(table_index, acpi_initrd_installed) ||
+		    existing_table->oem_revision >= table->oem_revision) {
+			acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+			goto next_table;
+		}
+
+		*length = table_length;
+		*address = acpi_tables_addr + table_offset;
+		pr_info("Table Upgrade: override [%4.4s-%6.6s-%8.8s]\n",
+			table->signature, table->oem_id,
+			table->oem_table_id);
+		acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+		break;
+
+next_table:
+		table_offset += table_length;
+		table_index++;
+	}
+	return AE_OK;
+}
+
+static void __init acpi_table_initrd_scan(void)
+{
+	int table_offset = 0;
+	int table_index = 0;
+	u32 table_length;
+	struct acpi_table_header *table;
+
+	if (!acpi_tables_addr)
+		return;
+
+	while (table_offset + ACPI_HEADER_SIZE <= all_tables_size) {
+		table = acpi_os_map_memory(acpi_tables_addr + table_offset,
+					   ACPI_HEADER_SIZE);
+		if (table_offset + table->length > all_tables_size) {
+			acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+			WARN_ON(1);
+			return;
+		}
+
+		table_length = table->length;
+
+		/* Skip RSDT/XSDT which should only be used for override */
+		if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_RSDT) ||
+		    ACPI_COMPARE_NAME(table->signature, ACPI_SIG_XSDT)) {
+			acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+			goto next_table;
+		}
+		/*
+		 * Mark the table to avoid being used in
+		 * acpi_table_initrd_override(). Though this is not possible
+		 * because override is disabled in acpi_install_table().
+		 */
+		if (test_and_set_bit(table_index, acpi_initrd_installed)) {
+			acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+			goto next_table;
+		}
+
+		pr_info("Table Upgrade: install [%4.4s-%6.6s-%8.8s]\n",
+			table->signature, table->oem_id,
+			table->oem_table_id);
+		acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+		acpi_install_table(acpi_tables_addr + table_offset, TRUE);
+next_table:
+		table_offset += table_length;
+		table_index++;
+	}
+}
+#else
+static void __init acpi_table_initrd_init(void *data, size_t size)
+{
+}
+
+static acpi_status
+acpi_table_initrd_override(struct acpi_table_header *existing_table,
+			   acpi_physical_address *address,
+			   u32 *table_length)
+{
+	*table_length = 0;
+	*address = 0;
+	return AE_OK;
+}
+
+static void __init acpi_table_initrd_scan(void)
+{
+}
+#endif /* CONFIG_ACPI_TABLE_UPGRADE */
+
+acpi_status
+acpi_os_physical_table_override(struct acpi_table_header *existing_table,
+				acpi_physical_address *address,
+				u32 *table_length)
+{
+	return acpi_table_initrd_override(existing_table, address,
+					  table_length);
+}
+
+acpi_status
+acpi_os_table_override(struct acpi_table_header *existing_table,
+		       struct acpi_table_header **new_table)
+{
+	if (!existing_table || !new_table)
+		return AE_BAD_PARAMETER;
+
+	*new_table = NULL;
+
+#ifdef CONFIG_ACPI_CUSTOM_DSDT
+	if (strncmp(existing_table->signature, "DSDT", 4) == 0)
+		*new_table = (struct acpi_table_header *)AmlCode;
+#endif
+	if (*new_table != NULL)
+		acpi_table_taint(existing_table);
+	return AE_OK;
+}
+
+void __init early_acpi_table_init(void *data, size_t size)
+{
+	acpi_table_initrd_init(data, size);
+}
+
 /*
  * acpi_table_init()
  *
@@ -457,7 +771,7 @@
 	status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
 	if (ACPI_FAILURE(status))
 		return -EINVAL;
-	acpi_initrd_initialize_tables();
+	acpi_table_initrd_scan();
 
 	check_multiple_madt();
 	return 0;
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 050673f..ac832bf 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -707,7 +707,7 @@
 EXPORT_SYMBOL(acpi_check_dsm);
 
 /**
- * acpi_dev_present - Detect presence of a given ACPI device in the system.
+ * acpi_dev_found - Detect presence of a given ACPI device in the namespace.
  * @hid: Hardware ID of the device.
  *
  * Return %true if the device was present at the moment of invocation.
@@ -719,7 +719,7 @@
  * instead). Calling from module_init() is fine (which is synonymous
  * with device_initcall()).
  */
-bool acpi_dev_present(const char *hid)
+bool acpi_dev_found(const char *hid)
 {
 	struct acpi_device_bus_id *acpi_device_bus_id;
 	bool found = false;
@@ -734,7 +734,7 @@
 
 	return found;
 }
-EXPORT_SYMBOL(acpi_dev_present);
+EXPORT_SYMBOL(acpi_dev_found);
 
 /*
  * acpi_backlight= handling, this is done here rather then in video_detect.c
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 1316ddd..3d13276 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -358,7 +358,7 @@
 	if (!(video_caps & ACPI_VIDEO_BACKLIGHT))
 		return acpi_backlight_vendor;
 
-	if (acpi_osi_is_win8() && backlight_device_registered(BACKLIGHT_RAW))
+	if (acpi_osi_is_win8() && backlight_device_get_by_type(BACKLIGHT_RAW))
 		return acpi_backlight_native;
 
 	return acpi_backlight_video;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 5083f85..cfa936a 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -202,6 +202,14 @@
 
 	  If unsure, say N.
 
+config SATA_AHCI_SEATTLE
+	tristate "AMD Seattle 6.0Gbps AHCI SATA host controller support"
+	depends on ARCH_SEATTLE
+	help
+	 This option enables support for AMD Seattle SATA host controller.
+
+	 If unsure, say N
+
 config SATA_INIC162X
 	tristate "Initio 162x SATA support (Very Experimental)"
 	depends on PCI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 1857952..0b2afb7 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -4,6 +4,7 @@
 # non-SFF interface
 obj-$(CONFIG_SATA_AHCI)		+= ahci.o libahci.o
 obj-$(CONFIG_SATA_ACARD_AHCI)	+= acard-ahci.o libahci.o
+obj-$(CONFIG_SATA_AHCI_SEATTLE)	+= ahci_seattle.o libahci.o libahci_platform.o
 obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o libahci_platform.o
 obj-$(CONFIG_SATA_FSL)		+= sata_fsl.o
 obj-$(CONFIG_SATA_INIC162X)	+= sata_inic162x.o
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 4044233..62a04c8 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -51,6 +51,9 @@
 	if (rc)
 		return rc;
 
+	of_property_read_u32(dev->of_node,
+			     "ports-implemented", &hpriv->force_port_map);
+
 	if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
 		hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
 
diff --git a/drivers/ata/ahci_seattle.c b/drivers/ata/ahci_seattle.c
new file mode 100644
index 0000000..6e702ab
--- /dev/null
+++ b/drivers/ata/ahci_seattle.c
@@ -0,0 +1,210 @@
+/*
+ * AMD Seattle AHCI SATA driver
+ *
+ * Copyright (c) 2015, Advanced Micro Devices
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include <linux/ahci_platform.h>
+#include <linux/acpi.h>
+#include <linux/pci_ids.h>
+#include "ahci.h"
+
+/* SGPIO Control Register definition
+ *
+ * Bit		Type		Description
+ * 31		RW		OD7.2 (activity)
+ * 30		RW		OD7.1 (locate)
+ * 29		RW		OD7.0 (fault)
+ * 28...8	RW		OD6.2...OD0.0 (3bits per port, 1 bit per LED)
+ * 7		RO		SGPIO feature flag
+ * 6:4		RO		Reserved
+ * 3:0		RO		Number of ports (0 means no port supported)
+ */
+#define ACTIVITY_BIT_POS(x)		(8 + (3 * x))
+#define LOCATE_BIT_POS(x)		(ACTIVITY_BIT_POS(x) + 1)
+#define FAULT_BIT_POS(x)		(LOCATE_BIT_POS(x) + 1)
+
+#define ACTIVITY_MASK			0x00010000
+#define LOCATE_MASK			0x00080000
+#define FAULT_MASK			0x00400000
+
+#define DRV_NAME "ahci-seattle"
+
+static ssize_t seattle_transmit_led_message(struct ata_port *ap, u32 state,
+					    ssize_t size);
+
+struct seattle_plat_data {
+	void __iomem *sgpio_ctrl;
+};
+
+static struct ata_port_operations ahci_port_ops = {
+	.inherits		= &ahci_ops,
+};
+
+static const struct ata_port_info ahci_port_info = {
+	.flags		= AHCI_FLAG_COMMON,
+	.pio_mask	= ATA_PIO4,
+	.udma_mask	= ATA_UDMA6,
+	.port_ops	= &ahci_port_ops,
+};
+
+static struct ata_port_operations ahci_seattle_ops = {
+	.inherits		= &ahci_ops,
+	.transmit_led_message   = seattle_transmit_led_message,
+};
+
+static const struct ata_port_info ahci_port_seattle_info = {
+	.flags		= AHCI_FLAG_COMMON | ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY,
+	.link_flags	= ATA_LFLAG_SW_ACTIVITY,
+	.pio_mask	= ATA_PIO4,
+	.udma_mask	= ATA_UDMA6,
+	.port_ops	= &ahci_seattle_ops,
+};
+
+static struct scsi_host_template ahci_platform_sht = {
+	AHCI_SHT(DRV_NAME),
+};
+
+static ssize_t seattle_transmit_led_message(struct ata_port *ap, u32 state,
+					    ssize_t size)
+{
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	struct ahci_port_priv *pp = ap->private_data;
+	struct seattle_plat_data *plat_data = hpriv->plat_data;
+	unsigned long flags;
+	int pmp;
+	struct ahci_em_priv *emp;
+	u32 val;
+
+	/* get the slot number from the message */
+	pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
+	if (pmp >= EM_MAX_SLOTS)
+		return -EINVAL;
+	emp = &pp->em_priv[pmp];
+
+	val = ioread32(plat_data->sgpio_ctrl);
+	if (state & ACTIVITY_MASK)
+		val |= 1 << ACTIVITY_BIT_POS((ap->port_no));
+	else
+		val &= ~(1 << ACTIVITY_BIT_POS((ap->port_no)));
+
+	if (state & LOCATE_MASK)
+		val |= 1 << LOCATE_BIT_POS((ap->port_no));
+	else
+		val &= ~(1 << LOCATE_BIT_POS((ap->port_no)));
+
+	if (state & FAULT_MASK)
+		val |= 1 << FAULT_BIT_POS((ap->port_no));
+	else
+		val &= ~(1 << FAULT_BIT_POS((ap->port_no)));
+
+	iowrite32(val, plat_data->sgpio_ctrl);
+
+	spin_lock_irqsave(ap->lock, flags);
+
+	/* save off new led state for port/slot */
+	emp->led_state = state;
+
+	spin_unlock_irqrestore(ap->lock, flags);
+
+	return size;
+}
+
+static const struct ata_port_info *ahci_seattle_get_port_info(
+		struct platform_device *pdev, struct ahci_host_priv *hpriv)
+{
+	struct device *dev = &pdev->dev;
+	struct seattle_plat_data *plat_data;
+	u32 val;
+
+	plat_data = devm_kzalloc(dev, sizeof(*plat_data), GFP_KERNEL);
+	if (IS_ERR(plat_data))
+		return &ahci_port_info;
+
+	plat_data->sgpio_ctrl = devm_ioremap_resource(dev,
+			      platform_get_resource(pdev, IORESOURCE_MEM, 1));
+	if (IS_ERR(plat_data->sgpio_ctrl))
+		return &ahci_port_info;
+
+	val = ioread32(plat_data->sgpio_ctrl);
+
+	if (!(val & 0xf))
+		return &ahci_port_info;
+
+	hpriv->em_loc = 0;
+	hpriv->em_buf_sz = 4;
+	hpriv->em_msg_type = EM_MSG_TYPE_LED;
+	hpriv->plat_data = plat_data;
+
+	dev_info(dev, "SGPIO LED control is enabled.\n");
+	return &ahci_port_seattle_info;
+}
+
+static int ahci_seattle_probe(struct platform_device *pdev)
+{
+	int rc;
+	struct ahci_host_priv *hpriv;
+
+	hpriv = ahci_platform_get_resources(pdev);
+	if (IS_ERR(hpriv))
+		return PTR_ERR(hpriv);
+
+	rc = ahci_platform_enable_resources(hpriv);
+	if (rc)
+		return rc;
+
+	rc = ahci_platform_init_host(pdev, hpriv,
+				     ahci_seattle_get_port_info(pdev, hpriv),
+				     &ahci_platform_sht);
+	if (rc)
+		goto disable_resources;
+
+	return 0;
+disable_resources:
+	ahci_platform_disable_resources(hpriv);
+	return rc;
+}
+
+static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend,
+			 ahci_platform_resume);
+
+static const struct acpi_device_id ahci_acpi_match[] = {
+	{ "AMDI0600", 0 },
+	{}
+};
+MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
+
+static struct platform_driver ahci_seattle_driver = {
+	.probe = ahci_seattle_probe,
+	.remove = ata_platform_remove_one,
+	.driver = {
+		.name = DRV_NAME,
+		.acpi_match_table = ahci_acpi_match,
+		.pm = &ahci_pm_ops,
+	},
+};
+module_platform_driver(ahci_seattle_driver);
+
+MODULE_DESCRIPTION("Seattle AHCI SATA platform driver");
+MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 3982054..71b0719 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -507,6 +507,7 @@
 		dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
 			 port_map, hpriv->force_port_map);
 		port_map = hpriv->force_port_map;
+		hpriv->saved_port_map = port_map;
 	}
 
 	if (hpriv->mask_port_map) {
@@ -2549,8 +2550,8 @@
 
 	if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) {
 		if (hpriv->irq_handler)
-			dev_warn(host->dev, "both AHCI_HFLAG_MULTI_MSI flag set \
-				 and custom irq handler implemented\n");
+			dev_warn(host->dev,
+			         "both AHCI_HFLAG_MULTI_MSI flag set and custom irq handler implemented\n");
 
 		rc = ahci_host_activate_multi_irqs(host, sht);
 	} else {
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 55e257c..6f33ace 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -884,7 +884,7 @@
  *	@udma_mask: resulting udma_mask
  *
  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
- *	Any NULL distination masks will be ignored.
+ *	Any NULL destination masks will be ignored.
  */
 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
 			 unsigned long *mwdma_mask, unsigned long *udma_mask)
@@ -3399,7 +3399,7 @@
  *	EH context.
  *
  *	RETURNS:
- *	0 if @linke is ready before @deadline; otherwise, -errno.
+ *	0 if @link is ready before @deadline; otherwise, -errno.
  */
 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
 		   int (*check_ready)(struct ata_link *link))
@@ -3480,7 +3480,7 @@
  *	EH context.
  *
  *	RETURNS:
- *	0 if @linke is ready before @deadline; otherwise, -errno.
+ *	0 if @link is ready before @deadline; otherwise, -errno.
  */
 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
 				int (*check_ready)(struct ata_link *link))
@@ -3493,7 +3493,7 @@
 /**
  *	sata_link_debounce - debounce SATA phy status
  *	@link: ATA link to debounce SATA phy status for
- *	@params: timing parameters { interval, duratinon, timeout } in msec
+ *	@params: timing parameters { interval, duration, timeout } in msec
  *	@deadline: deadline jiffies for the operation
  *
  *	Make sure SStatus of @link reaches stable state, determined by
@@ -3563,7 +3563,7 @@
 /**
  *	sata_link_resume - resume SATA link
  *	@link: ATA link to resume SATA
- *	@params: timing parameters { interval, duratinon, timeout } in msec
+ *	@params: timing parameters { interval, duration, timeout } in msec
  *	@deadline: deadline jiffies for the operation
  *
  *	Resume SATA phy @link and debounce it.
@@ -3746,7 +3746,7 @@
 /**
  *	sata_link_hardreset - reset link via SATA phy reset
  *	@link: link to reset
- *	@timing: timing parameters { interval, duratinon, timeout } in msec
+ *	@timing: timing parameters { interval, duration, timeout } in msec
  *	@deadline: deadline jiffies for the operation
  *	@online: optional out parameter indicating link onlineness
  *	@check_ready: optional callback to check link readiness
@@ -4528,6 +4528,7 @@
 {
 	struct ata_taskfile tf;
 	unsigned int err_mask;
+	unsigned long timeout = 0;
 
 	/* set up set-features taskfile */
 	DPRINTK("set features - SATA features\n");
@@ -4539,7 +4540,10 @@
 	tf.protocol = ATA_PROT_NODATA;
 	tf.nsect = feature;
 
-	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+	if (enable == SETFEATURES_SPINUP)
+		timeout = ata_probe_timeout ?
+			  ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
+	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
 
 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
 	return err_mask;
@@ -6208,7 +6212,7 @@
  *
  *	After allocating an ATA host and initializing it, most libata
  *	LLDs perform three steps to activate the host - start host,
- *	request IRQ and register it.  This helper takes necessasry
+ *	request IRQ and register it.  This helper takes necessary
  *	arguments and performs the three steps in one go.
  *
  *	An invalid IRQ skips the IRQ registration and expects the host to
@@ -6261,7 +6265,7 @@
 }
 
 /**
- *	ata_port_detach - Detach ATA port in prepration of device removal
+ *	ata_port_detach - Detach ATA port in preparation of device removal
  *	@ap: ATA port to be detached
  *
  *	Detach all ATA devices and the associated SCSI devices of @ap;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 567859c..7bcc870 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1109,7 +1109,7 @@
  *	@rq: request to be checked
  *
  *	ATAPI commands which transfer variable length data to host
- *	might overflow due to application error or hardare bug.  This
+ *	might overflow due to application error or hardware bug.  This
  *	function checks whether overflow should be drained and ignored
  *	for @request.
  *
@@ -3439,14 +3439,11 @@
 {
 #ifdef ATA_DEBUG
 	struct scsi_device *scsidev = cmd->device;
-	u8 *scsicmd = cmd->cmnd;
 
-	DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+	DPRINTK("CDB (%u:%d,%d,%d) %9ph\n",
 		ap->print_id,
 		scsidev->channel, scsidev->id, scsidev->lun,
-		scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3],
-		scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7],
-		scsicmd[8]);
+		cmd->cmnd);
 #endif
 }
 
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index d7c7320..188f2f2 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -294,7 +294,7 @@
 
 static struct scsi_host_template pata_icside_sht = {
 	ATA_BASE_SHT(DRV_NAME),
-	.sg_tablesize		= SCSI_MAX_SG_CHAIN_SEGMENTS,
+	.sg_tablesize		= SG_MAX_SEGMENTS,
 	.dma_boundary		= IOMD_DMA_BOUNDARY,
 };
 
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 9020349..2cb6f7e 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -201,8 +201,8 @@
 static struct dw_dma_slave sata_dwc_dma_dws = {
 	.src_id = 0,
 	.dst_id = 0,
-	.src_master = 0,
-	.dst_master = 1,
+	.m_master = 1,
+	.p_master = 0,
 };
 
 /*
@@ -1248,7 +1248,7 @@
 	hsdev->dma->dev = &ofdev->dev;
 
 	/* Initialize AHB DMAC */
-	err = dw_dma_probe(hsdev->dma, NULL);
+	err = dw_dma_probe(hsdev->dma);
 	if (err)
 		goto error_dma_iomap;
 
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index f437afa..6482d47 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -322,16 +322,16 @@
 /**
  * platform_device_add_properties - add built-in properties to a platform device
  * @pdev: platform device to add properties to
- * @pset: properties to add
+ * @properties: null terminated array of properties to add
  *
- * The function will take deep copy of the properties in @pset and attach
- * the copy to the platform device. The memory associated with properties
- * will be freed when the platform device is released.
+ * The function will take deep copy of @properties and attach the copy to the
+ * platform device. The memory associated with properties will be freed when the
+ * platform device is released.
  */
 int platform_device_add_properties(struct platform_device *pdev,
-				   const struct property_set *pset)
+				   struct property_entry *properties)
 {
-	return device_add_property_set(&pdev->dev, pset);
+	return device_add_properties(&pdev->dev, properties);
 }
 EXPORT_SYMBOL_GPL(platform_device_add_properties);
 
@@ -447,7 +447,7 @@
 				release_resource(r);
 		}
 
-		device_remove_property_set(&pdev->dev);
+		device_remove_properties(&pdev->dev);
 	}
 }
 EXPORT_SYMBOL_GPL(platform_device_del);
@@ -526,8 +526,9 @@
 	if (ret)
 		goto err;
 
-	if (pdevinfo->pset) {
-		ret = platform_device_add_properties(pdev, pdevinfo->pset);
+	if (pdevinfo->properties) {
+		ret = platform_device_add_properties(pdev,
+						     pdevinfo->properties);
 		if (ret)
 			goto err;
 	}
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 0e64a1b..3657ac1 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -159,7 +159,7 @@
 
 	count = of_count_phandle_with_args(dev->of_node, "clocks",
 					   "#clock-cells");
-	if (count == 0)
+	if (count <= 0)
 		return -ENODEV;
 
 	clks = kcalloc(count, sizeof(*clks), GFP_KERNEL);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 56705b5..de23b64 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -229,17 +229,6 @@
 	return ret;
 }
 
-static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
-{
-	return GENPD_DEV_CALLBACK(genpd, int, save_state, dev);
-}
-
-static int genpd_restore_dev(struct generic_pm_domain *genpd,
-			struct device *dev)
-{
-	return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev);
-}
-
 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
 				     unsigned long val, void *ptr)
 {
@@ -372,17 +361,63 @@
 }
 
 /**
- * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
+ * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
+ * @dev: Device to handle.
+ */
+static int __genpd_runtime_suspend(struct device *dev)
+{
+	int (*cb)(struct device *__dev);
+
+	if (dev->type && dev->type->pm)
+		cb = dev->type->pm->runtime_suspend;
+	else if (dev->class && dev->class->pm)
+		cb = dev->class->pm->runtime_suspend;
+	else if (dev->bus && dev->bus->pm)
+		cb = dev->bus->pm->runtime_suspend;
+	else
+		cb = NULL;
+
+	if (!cb && dev->driver && dev->driver->pm)
+		cb = dev->driver->pm->runtime_suspend;
+
+	return cb ? cb(dev) : 0;
+}
+
+/**
+ * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
+ * @dev: Device to handle.
+ */
+static int __genpd_runtime_resume(struct device *dev)
+{
+	int (*cb)(struct device *__dev);
+
+	if (dev->type && dev->type->pm)
+		cb = dev->type->pm->runtime_resume;
+	else if (dev->class && dev->class->pm)
+		cb = dev->class->pm->runtime_resume;
+	else if (dev->bus && dev->bus->pm)
+		cb = dev->bus->pm->runtime_resume;
+	else
+		cb = NULL;
+
+	if (!cb && dev->driver && dev->driver->pm)
+		cb = dev->driver->pm->runtime_resume;
+
+	return cb ? cb(dev) : 0;
+}
+
+/**
+ * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
  * @dev: Device to suspend.
  *
  * Carry out a runtime suspend of a device under the assumption that its
  * pm_domain field points to the domain member of an object of type
  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
  */
-static int pm_genpd_runtime_suspend(struct device *dev)
+static int genpd_runtime_suspend(struct device *dev)
 {
 	struct generic_pm_domain *genpd;
-	bool (*stop_ok)(struct device *__dev);
+	bool (*suspend_ok)(struct device *__dev);
 	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
 	bool runtime_pm = pm_runtime_enabled(dev);
 	ktime_t time_start;
@@ -401,21 +436,21 @@
 	 * runtime PM is disabled. Under these circumstances, we shall skip
 	 * validating/measuring the PM QoS latency.
 	 */
-	stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
-	if (runtime_pm && stop_ok && !stop_ok(dev))
+	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
+	if (runtime_pm && suspend_ok && !suspend_ok(dev))
 		return -EBUSY;
 
 	/* Measure suspend latency. */
 	if (runtime_pm)
 		time_start = ktime_get();
 
-	ret = genpd_save_dev(genpd, dev);
+	ret = __genpd_runtime_suspend(dev);
 	if (ret)
 		return ret;
 
 	ret = genpd_stop_dev(genpd, dev);
 	if (ret) {
-		genpd_restore_dev(genpd, dev);
+		__genpd_runtime_resume(dev);
 		return ret;
 	}
 
@@ -446,14 +481,14 @@
 }
 
 /**
- * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
+ * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
  * @dev: Device to resume.
  *
  * Carry out a runtime resume of a device under the assumption that its
  * pm_domain field points to the domain member of an object of type
  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
  */
-static int pm_genpd_runtime_resume(struct device *dev)
+static int genpd_runtime_resume(struct device *dev)
 {
 	struct generic_pm_domain *genpd;
 	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
@@ -491,7 +526,7 @@
 	if (ret)
 		goto err_poweroff;
 
-	ret = genpd_restore_dev(genpd, dev);
+	ret = __genpd_runtime_resume(dev);
 	if (ret)
 		goto err_stop;
 
@@ -695,15 +730,6 @@
 	 * at this point and a system wakeup event should be reported if it's
 	 * set up to wake up the system from sleep states.
 	 */
-	pm_runtime_get_noresume(dev);
-	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
-		pm_wakeup_event(dev, 0);
-
-	if (pm_wakeup_pending()) {
-		pm_runtime_put(dev);
-		return -EBUSY;
-	}
-
 	if (resume_needed(dev, genpd))
 		pm_runtime_resume(dev);
 
@@ -716,10 +742,8 @@
 
 	mutex_unlock(&genpd->lock);
 
-	if (genpd->suspend_power_off) {
-		pm_runtime_put_noidle(dev);
+	if (genpd->suspend_power_off)
 		return 0;
-	}
 
 	/*
 	 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
@@ -741,7 +765,6 @@
 		pm_runtime_enable(dev);
 	}
 
-	pm_runtime_put(dev);
 	return ret;
 }
 
@@ -1427,54 +1450,6 @@
 }
 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
 
-/* Default device callbacks for generic PM domains. */
-
-/**
- * pm_genpd_default_save_state - Default "save device state" for PM domains.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_save_state(struct device *dev)
-{
-	int (*cb)(struct device *__dev);
-
-	if (dev->type && dev->type->pm)
-		cb = dev->type->pm->runtime_suspend;
-	else if (dev->class && dev->class->pm)
-		cb = dev->class->pm->runtime_suspend;
-	else if (dev->bus && dev->bus->pm)
-		cb = dev->bus->pm->runtime_suspend;
-	else
-		cb = NULL;
-
-	if (!cb && dev->driver && dev->driver->pm)
-		cb = dev->driver->pm->runtime_suspend;
-
-	return cb ? cb(dev) : 0;
-}
-
-/**
- * pm_genpd_default_restore_state - Default PM domains "restore device state".
- * @dev: Device to handle.
- */
-static int pm_genpd_default_restore_state(struct device *dev)
-{
-	int (*cb)(struct device *__dev);
-
-	if (dev->type && dev->type->pm)
-		cb = dev->type->pm->runtime_resume;
-	else if (dev->class && dev->class->pm)
-		cb = dev->class->pm->runtime_resume;
-	else if (dev->bus && dev->bus->pm)
-		cb = dev->bus->pm->runtime_resume;
-	else
-		cb = NULL;
-
-	if (!cb && dev->driver && dev->driver->pm)
-		cb = dev->driver->pm->runtime_resume;
-
-	return cb ? cb(dev) : 0;
-}
-
 /**
  * pm_genpd_init - Initialize a generic I/O PM domain object.
  * @genpd: PM domain object to initialize.
@@ -1498,8 +1473,8 @@
 	genpd->device_count = 0;
 	genpd->max_off_time_ns = -1;
 	genpd->max_off_time_changed = true;
-	genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
-	genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
+	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
+	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
 	genpd->domain.ops.prepare = pm_genpd_prepare;
 	genpd->domain.ops.suspend = pm_genpd_suspend;
 	genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
@@ -1520,8 +1495,6 @@
 	genpd->domain.ops.restore_early = pm_genpd_resume_early;
 	genpd->domain.ops.restore = pm_genpd_resume;
 	genpd->domain.ops.complete = pm_genpd_complete;
-	genpd->dev_ops.save_state = pm_genpd_default_save_state;
-	genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
 
 	if (genpd->flags & GENPD_FLAG_PM_CLK) {
 		genpd->dev_ops.stop = pm_clk_suspend;
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 00a5436..2e0fce7 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -37,10 +37,10 @@
 }
 
 /**
- * default_stop_ok - Default PM domain governor routine for stopping devices.
+ * default_suspend_ok - Default PM domain governor routine to suspend devices.
  * @dev: Device to check.
  */
-static bool default_stop_ok(struct device *dev)
+static bool default_suspend_ok(struct device *dev)
 {
 	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
 	unsigned long flags;
@@ -51,13 +51,13 @@
 	spin_lock_irqsave(&dev->power.lock, flags);
 
 	if (!td->constraint_changed) {
-		bool ret = td->cached_stop_ok;
+		bool ret = td->cached_suspend_ok;
 
 		spin_unlock_irqrestore(&dev->power.lock, flags);
 		return ret;
 	}
 	td->constraint_changed = false;
-	td->cached_stop_ok = false;
+	td->cached_suspend_ok = false;
 	td->effective_constraint_ns = -1;
 	constraint_ns = __dev_pm_qos_read_value(dev);
 
@@ -83,13 +83,13 @@
 			return false;
 	}
 	td->effective_constraint_ns = constraint_ns;
-	td->cached_stop_ok = constraint_ns >= 0;
+	td->cached_suspend_ok = constraint_ns >= 0;
 
 	/*
 	 * The children have been suspended already, so we don't need to take
-	 * their stop latencies into account here.
+	 * their suspend latencies into account here.
 	 */
-	return td->cached_stop_ok;
+	return td->cached_suspend_ok;
 }
 
 /**
@@ -150,7 +150,7 @@
 		 */
 		td = &to_gpd_data(pdd)->td;
 		constraint_ns = td->effective_constraint_ns;
-		/* default_stop_ok() need not be called before us. */
+		/* default_suspend_ok() need not be called before us. */
 		if (constraint_ns < 0) {
 			constraint_ns = dev_pm_qos_read_value(pdd->dev);
 			constraint_ns *= NSEC_PER_USEC;
@@ -227,7 +227,7 @@
 }
 
 struct dev_power_governor simple_qos_governor = {
-	.stop_ok = default_stop_ok,
+	.suspend_ok = default_suspend_ok,
 	.power_down_ok = default_power_down_ok,
 };
 
@@ -236,5 +236,5 @@
  */
 struct dev_power_governor pm_domain_always_on_gov = {
 	.power_down_ok = always_on_power_down_ok,
-	.stop_ok = default_stop_ok,
+	.suspend_ok = default_suspend_ok,
 };
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 6e7c3cc..c81667d 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1556,7 +1556,6 @@
 static int device_prepare(struct device *dev, pm_message_t state)
 {
 	int (*callback)(struct device *) = NULL;
-	char *info = NULL;
 	int ret = 0;
 
 	if (dev->power.syscore)
@@ -1579,24 +1578,17 @@
 		goto unlock;
 	}
 
-	if (dev->pm_domain) {
-		info = "preparing power domain ";
+	if (dev->pm_domain)
 		callback = dev->pm_domain->ops.prepare;
-	} else if (dev->type && dev->type->pm) {
-		info = "preparing type ";
+	else if (dev->type && dev->type->pm)
 		callback = dev->type->pm->prepare;
-	} else if (dev->class && dev->class->pm) {
-		info = "preparing class ";
+	else if (dev->class && dev->class->pm)
 		callback = dev->class->pm->prepare;
-	} else if (dev->bus && dev->bus->pm) {
-		info = "preparing bus ";
+	else if (dev->bus && dev->bus->pm)
 		callback = dev->bus->pm->prepare;
-	}
 
-	if (!callback && dev->driver && dev->driver->pm) {
-		info = "preparing driver ";
+	if (!callback && dev->driver && dev->driver->pm)
 		callback = dev->driver->pm->prepare;
-	}
 
 	if (callback)
 		ret = callback(dev);
diff --git a/drivers/base/power/opp/Makefile b/drivers/base/power/opp/Makefile
index 19837ef..e70ceb4 100644
--- a/drivers/base/power/opp/Makefile
+++ b/drivers/base/power/opp/Makefile
@@ -1,3 +1,4 @@
 ccflags-$(CONFIG_DEBUG_DRIVER)	:= -DDEBUG
 obj-y				+= core.o cpu.o
+obj-$(CONFIG_OF)		+= of.o
 obj-$(CONFIG_DEBUG_FS)		+= debugfs.o
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index 433b600..7c04c87 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -18,7 +18,6 @@
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/device.h>
-#include <linux/of.h>
 #include <linux/export.h>
 #include <linux/regulator/consumer.h>
 
@@ -29,7 +28,7 @@
  * from here, with each opp_table containing the list of opps it supports in
  * various states of availability.
  */
-static LIST_HEAD(opp_tables);
+LIST_HEAD(opp_tables);
 /* Lock to allow exclusive modification to the device and opp lists */
 DEFINE_MUTEX(opp_table_lock);
 
@@ -53,26 +52,6 @@
 	return NULL;
 }
 
-static struct opp_table *_managed_opp(const struct device_node *np)
-{
-	struct opp_table *opp_table;
-
-	list_for_each_entry_rcu(opp_table, &opp_tables, node) {
-		if (opp_table->np == np) {
-			/*
-			 * Multiple devices can point to the same OPP table and
-			 * so will have same node-pointer, np.
-			 *
-			 * But the OPPs will be considered as shared only if the
-			 * OPP table contains a "opp-shared" property.
-			 */
-			return opp_table->shared_opp ? opp_table : NULL;
-		}
-	}
-
-	return NULL;
-}
-
 /**
  * _find_opp_table() - find opp_table struct using device pointer
  * @dev:	device pointer used to lookup OPP table
@@ -259,9 +238,6 @@
 	reg = opp_table->regulator;
 	if (IS_ERR(reg)) {
 		/* Regulator may not be required for device */
-		if (reg)
-			dev_err(dev, "%s: Invalid regulator (%ld)\n", __func__,
-				PTR_ERR(reg));
 		rcu_read_unlock();
 		return 0;
 	}
@@ -760,7 +736,6 @@
 {
 	struct opp_table *opp_table;
 	struct opp_device *opp_dev;
-	struct device_node *np;
 	int ret;
 
 	/* Check for existing table for 'dev' first */
@@ -784,20 +759,7 @@
 		return NULL;
 	}
 
-	/*
-	 * Only required for backward compatibility with v1 bindings, but isn't
-	 * harmful for other cases. And so we do it unconditionally.
-	 */
-	np = of_node_get(dev->of_node);
-	if (np) {
-		u32 val;
-
-		if (!of_property_read_u32(np, "clock-latency", &val))
-			opp_table->clock_latency_ns_max = val;
-		of_property_read_u32(np, "voltage-tolerance",
-				     &opp_table->voltage_tolerance_v1);
-		of_node_put(np);
-	}
+	_of_init_opp_table(opp_table, dev);
 
 	/* Set regulator to a non-NULL error value */
 	opp_table->regulator = ERR_PTR(-ENXIO);
@@ -893,8 +855,8 @@
  * It is assumed that the caller holds required mutex for an RCU updater
  * strategy.
  */
-static void _opp_remove(struct opp_table *opp_table,
-			struct dev_pm_opp *opp, bool notify)
+void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp,
+		 bool notify)
 {
 	/*
 	 * Notify the changes in the availability of the operable
@@ -955,8 +917,8 @@
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
 
-static struct dev_pm_opp *_allocate_opp(struct device *dev,
-					struct opp_table **opp_table)
+struct dev_pm_opp *_allocate_opp(struct device *dev,
+				 struct opp_table **opp_table)
 {
 	struct dev_pm_opp *opp;
 
@@ -992,8 +954,8 @@
 	return true;
 }
 
-static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
-		    struct opp_table *opp_table)
+int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
+	     struct opp_table *opp_table)
 {
 	struct dev_pm_opp *opp;
 	struct list_head *head = &opp_table->opp_list;
@@ -1069,8 +1031,8 @@
  *		Duplicate OPPs (both freq and volt are same) and !opp->available
  * -ENOMEM	Memory allocation failure
  */
-static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
-		       bool dynamic)
+int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
+		bool dynamic)
 {
 	struct opp_table *opp_table;
 	struct dev_pm_opp *new_opp;
@@ -1115,83 +1077,6 @@
 	return ret;
 }
 
-/* TODO: Support multiple regulators */
-static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
-			      struct opp_table *opp_table)
-{
-	u32 microvolt[3] = {0};
-	u32 val;
-	int count, ret;
-	struct property *prop = NULL;
-	char name[NAME_MAX];
-
-	/* Search for "opp-microvolt-<name>" */
-	if (opp_table->prop_name) {
-		snprintf(name, sizeof(name), "opp-microvolt-%s",
-			 opp_table->prop_name);
-		prop = of_find_property(opp->np, name, NULL);
-	}
-
-	if (!prop) {
-		/* Search for "opp-microvolt" */
-		sprintf(name, "opp-microvolt");
-		prop = of_find_property(opp->np, name, NULL);
-
-		/* Missing property isn't a problem, but an invalid entry is */
-		if (!prop)
-			return 0;
-	}
-
-	count = of_property_count_u32_elems(opp->np, name);
-	if (count < 0) {
-		dev_err(dev, "%s: Invalid %s property (%d)\n",
-			__func__, name, count);
-		return count;
-	}
-
-	/* There can be one or three elements here */
-	if (count != 1 && count != 3) {
-		dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
-			__func__, name, count);
-		return -EINVAL;
-	}
-
-	ret = of_property_read_u32_array(opp->np, name, microvolt, count);
-	if (ret) {
-		dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
-		return -EINVAL;
-	}
-
-	opp->u_volt = microvolt[0];
-
-	if (count == 1) {
-		opp->u_volt_min = opp->u_volt;
-		opp->u_volt_max = opp->u_volt;
-	} else {
-		opp->u_volt_min = microvolt[1];
-		opp->u_volt_max = microvolt[2];
-	}
-
-	/* Search for "opp-microamp-<name>" */
-	prop = NULL;
-	if (opp_table->prop_name) {
-		snprintf(name, sizeof(name), "opp-microamp-%s",
-			 opp_table->prop_name);
-		prop = of_find_property(opp->np, name, NULL);
-	}
-
-	if (!prop) {
-		/* Search for "opp-microamp" */
-		sprintf(name, "opp-microamp");
-		prop = of_find_property(opp->np, name, NULL);
-	}
-
-	if (prop && !of_property_read_u32(opp->np, name, &val))
-		opp->u_amp = val;
-
-	return 0;
-}
-
 /**
  * dev_pm_opp_set_supported_hw() - Set supported platforms
  * @dev: Device for which supported-hw has to be set.
@@ -1520,144 +1405,6 @@
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
 
-static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
-			      struct device_node *np)
-{
-	unsigned int count = opp_table->supported_hw_count;
-	u32 version;
-	int ret;
-
-	if (!opp_table->supported_hw)
-		return true;
-
-	while (count--) {
-		ret = of_property_read_u32_index(np, "opp-supported-hw", count,
-						 &version);
-		if (ret) {
-			dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
-				 __func__, count, ret);
-			return false;
-		}
-
-		/* Both of these are bitwise masks of the versions */
-		if (!(version & opp_table->supported_hw[count]))
-			return false;
-	}
-
-	return true;
-}
-
-/**
- * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
- * @dev:	device for which we do this operation
- * @np:		device node
- *
- * This function adds an opp definition to the opp table and returns status. The
- * opp can be controlled using dev_pm_opp_enable/disable functions and may be
- * removed by dev_pm_opp_remove.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
- *
- * Return:
- * 0		On success OR
- *		Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST	Freq are same and volt are different OR
- *		Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM	Memory allocation failure
- * -EINVAL	Failed parsing the OPP node
- */
-static int _opp_add_static_v2(struct device *dev, struct device_node *np)
-{
-	struct opp_table *opp_table;
-	struct dev_pm_opp *new_opp;
-	u64 rate;
-	u32 val;
-	int ret;
-
-	/* Hold our table modification lock here */
-	mutex_lock(&opp_table_lock);
-
-	new_opp = _allocate_opp(dev, &opp_table);
-	if (!new_opp) {
-		ret = -ENOMEM;
-		goto unlock;
-	}
-
-	ret = of_property_read_u64(np, "opp-hz", &rate);
-	if (ret < 0) {
-		dev_err(dev, "%s: opp-hz not found\n", __func__);
-		goto free_opp;
-	}
-
-	/* Check if the OPP supports hardware's hierarchy of versions or not */
-	if (!_opp_is_supported(dev, opp_table, np)) {
-		dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
-		goto free_opp;
-	}
-
-	/*
-	 * Rate is defined as an unsigned long in clk API, and so casting
-	 * explicitly to its type. Must be fixed once rate is 64 bit
-	 * guaranteed in clk API.
-	 */
-	new_opp->rate = (unsigned long)rate;
-	new_opp->turbo = of_property_read_bool(np, "turbo-mode");
-
-	new_opp->np = np;
-	new_opp->dynamic = false;
-	new_opp->available = true;
-
-	if (!of_property_read_u32(np, "clock-latency-ns", &val))
-		new_opp->clock_latency_ns = val;
-
-	ret = opp_parse_supplies(new_opp, dev, opp_table);
-	if (ret)
-		goto free_opp;
-
-	ret = _opp_add(dev, new_opp, opp_table);
-	if (ret)
-		goto free_opp;
-
-	/* OPP to select on device suspend */
-	if (of_property_read_bool(np, "opp-suspend")) {
-		if (opp_table->suspend_opp) {
-			dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
-				 __func__, opp_table->suspend_opp->rate,
-				 new_opp->rate);
-		} else {
-			new_opp->suspend = true;
-			opp_table->suspend_opp = new_opp;
-		}
-	}
-
-	if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
-		opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
-
-	mutex_unlock(&opp_table_lock);
-
-	pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
-		 __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
-		 new_opp->u_volt_min, new_opp->u_volt_max,
-		 new_opp->clock_latency_ns);
-
-	/*
-	 * Notify the changes in the availability of the operable
-	 * frequency/voltage list.
-	 */
-	srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
-	return 0;
-
-free_opp:
-	_opp_remove(opp_table, new_opp, false);
-unlock:
-	mutex_unlock(&opp_table_lock);
-	return ret;
-}
-
 /**
  * dev_pm_opp_add()  - Add an OPP table from a table definitions
  * @dev:	device for which we do this operation
@@ -1845,21 +1592,11 @@
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
 
-#ifdef CONFIG_OF
-/**
- * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
- *				  entries
- * @dev:	device pointer used to lookup OPP table.
- *
- * Free OPPs created using static entries present in DT.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function indirectly uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
+/*
+ * Free OPPs either created using static entries present in DT or even the
+ * dynamically added entries based on remove_all param.
  */
-void dev_pm_opp_of_remove_table(struct device *dev)
+void _dev_pm_opp_remove_table(struct device *dev, bool remove_all)
 {
 	struct opp_table *opp_table;
 	struct dev_pm_opp *opp, *tmp;
@@ -1884,7 +1621,7 @@
 	if (list_is_singular(&opp_table->dev_list)) {
 		/* Free static OPPs */
 		list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
-			if (!opp->dynamic)
+			if (remove_all || !opp->dynamic)
 				_opp_remove(opp_table, opp, true);
 		}
 	} else {
@@ -1894,160 +1631,22 @@
 unlock:
 	mutex_unlock(&opp_table_lock);
 }
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
-
-/* Returns opp descriptor node for a device, caller must do of_node_put() */
-struct device_node *_of_get_opp_desc_node(struct device *dev)
-{
-	/*
-	 * TODO: Support for multiple OPP tables.
-	 *
-	 * There should be only ONE phandle present in "operating-points-v2"
-	 * property.
-	 */
-
-	return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
-}
-
-/* Initializes OPP tables based on new bindings */
-static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
-{
-	struct device_node *np;
-	struct opp_table *opp_table;
-	int ret = 0, count = 0;
-
-	mutex_lock(&opp_table_lock);
-
-	opp_table = _managed_opp(opp_np);
-	if (opp_table) {
-		/* OPPs are already managed */
-		if (!_add_opp_dev(dev, opp_table))
-			ret = -ENOMEM;
-		mutex_unlock(&opp_table_lock);
-		return ret;
-	}
-	mutex_unlock(&opp_table_lock);
-
-	/* We have opp-table node now, iterate over it and add OPPs */
-	for_each_available_child_of_node(opp_np, np) {
-		count++;
-
-		ret = _opp_add_static_v2(dev, np);
-		if (ret) {
-			dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
-				ret);
-			goto free_table;
-		}
-	}
-
-	/* There should be one of more OPP defined */
-	if (WARN_ON(!count))
-		return -ENOENT;
-
-	mutex_lock(&opp_table_lock);
-
-	opp_table = _find_opp_table(dev);
-	if (WARN_ON(IS_ERR(opp_table))) {
-		ret = PTR_ERR(opp_table);
-		mutex_unlock(&opp_table_lock);
-		goto free_table;
-	}
-
-	opp_table->np = opp_np;
-	opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared");
-
-	mutex_unlock(&opp_table_lock);
-
-	return 0;
-
-free_table:
-	dev_pm_opp_of_remove_table(dev);
-
-	return ret;
-}
-
-/* Initializes OPP tables based on old-deprecated bindings */
-static int _of_add_opp_table_v1(struct device *dev)
-{
-	const struct property *prop;
-	const __be32 *val;
-	int nr;
-
-	prop = of_find_property(dev->of_node, "operating-points", NULL);
-	if (!prop)
-		return -ENODEV;
-	if (!prop->value)
-		return -ENODATA;
-
-	/*
-	 * Each OPP is a set of tuples consisting of frequency and
-	 * voltage like <freq-kHz vol-uV>.
-	 */
-	nr = prop->length / sizeof(u32);
-	if (nr % 2) {
-		dev_err(dev, "%s: Invalid OPP table\n", __func__);
-		return -EINVAL;
-	}
-
-	val = prop->value;
-	while (nr) {
-		unsigned long freq = be32_to_cpup(val++) * 1000;
-		unsigned long volt = be32_to_cpup(val++);
-
-		if (_opp_add_v1(dev, freq, volt, false))
-			dev_warn(dev, "%s: Failed to add OPP %ld\n",
-				 __func__, freq);
-		nr -= 2;
-	}
-
-	return 0;
-}
 
 /**
- * dev_pm_opp_of_add_table() - Initialize opp table from device tree
+ * dev_pm_opp_remove_table() - Free all OPPs associated with the device
  * @dev:	device pointer used to lookup OPP table.
  *
- * Register the initial OPP table with the OPP library for given device.
+ * Free both OPPs created using static entries present in DT and the
+ * dynamically added entries.
  *
  * Locking: The internal opp_table and opp structures are RCU protected.
  * Hence this function indirectly uses RCU updater strategy with mutex locks
  * to keep the integrity of the internal data structures. Callers should ensure
  * that this function is *NOT* called under RCU protection or in contexts where
  * mutex cannot be locked.
- *
- * Return:
- * 0		On success OR
- *		Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST	Freq are same and volt are different OR
- *		Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM	Memory allocation failure
- * -ENODEV	when 'operating-points' property is not found or is invalid data
- *		in device node.
- * -ENODATA	when empty 'operating-points' property is found
- * -EINVAL	when invalid entries are found in opp-v2 table
  */
-int dev_pm_opp_of_add_table(struct device *dev)
+void dev_pm_opp_remove_table(struct device *dev)
 {
-	struct device_node *opp_np;
-	int ret;
-
-	/*
-	 * OPPs have two version of bindings now. The older one is deprecated,
-	 * try for the new binding first.
-	 */
-	opp_np = _of_get_opp_desc_node(dev);
-	if (!opp_np) {
-		/*
-		 * Try old-deprecated bindings for backward compatibility with
-		 * older dtbs.
-		 */
-		return _of_add_opp_table_v1(dev);
-	}
-
-	ret = _of_add_opp_table_v2(dev, opp_np);
-	of_node_put(opp_np);
-
-	return ret;
+	_dev_pm_opp_remove_table(dev, true);
 }
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
-#endif
+EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
diff --git a/drivers/base/power/opp/cpu.c b/drivers/base/power/opp/cpu.c
index ba2bdbd..83d6e7b 100644
--- a/drivers/base/power/opp/cpu.c
+++ b/drivers/base/power/opp/cpu.c
@@ -18,7 +18,6 @@
 #include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/export.h>
-#include <linux/of.h>
 #include <linux/slab.h>
 
 #include "opp.h"
@@ -119,8 +118,66 @@
 EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
 #endif	/* CONFIG_CPU_FREQ */
 
-/* Required only for V1 bindings, as v2 can manage it from DT itself */
-int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
+void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of)
+{
+	struct device *cpu_dev;
+	int cpu;
+
+	WARN_ON(cpumask_empty(cpumask));
+
+	for_each_cpu(cpu, cpumask) {
+		cpu_dev = get_cpu_device(cpu);
+		if (!cpu_dev) {
+			pr_err("%s: failed to get cpu%d device\n", __func__,
+			       cpu);
+			continue;
+		}
+
+		if (of)
+			dev_pm_opp_of_remove_table(cpu_dev);
+		else
+			dev_pm_opp_remove_table(cpu_dev);
+	}
+}
+
+/**
+ * dev_pm_opp_cpumask_remove_table() - Removes OPP table for @cpumask
+ * @cpumask:	cpumask for which OPP table needs to be removed
+ *
+ * This removes the OPP tables for CPUs present in the @cpumask.
+ * This should be used to remove all the OPPs entries associated with
+ * the cpus in @cpumask.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask)
+{
+	_dev_pm_opp_cpumask_remove_table(cpumask, false);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table);
+
+/**
+ * dev_pm_opp_set_sharing_cpus() - Mark OPP table as shared by few CPUs
+ * @cpu_dev:	CPU device for which we do this operation
+ * @cpumask:	cpumask of the CPUs which share the OPP table with @cpu_dev
+ *
+ * This marks OPP table of the @cpu_dev as shared by the CPUs present in
+ * @cpumask.
+ *
+ * Returns -ENODEV if OPP table isn't already present.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev,
+				const struct cpumask *cpumask)
 {
 	struct opp_device *opp_dev;
 	struct opp_table *opp_table;
@@ -131,7 +188,7 @@
 
 	opp_table = _find_opp_table(cpu_dev);
 	if (IS_ERR(opp_table)) {
-		ret = -EINVAL;
+		ret = PTR_ERR(opp_table);
 		goto unlock;
 	}
 
@@ -152,6 +209,9 @@
 				__func__, cpu);
 			continue;
 		}
+
+		/* Mark opp-table as multiple CPUs are sharing it now */
+		opp_table->shared_opp = true;
 	}
 unlock:
 	mutex_unlock(&opp_table_lock);
@@ -160,112 +220,47 @@
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
 
-#ifdef CONFIG_OF
-void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask)
-{
-	struct device *cpu_dev;
-	int cpu;
-
-	WARN_ON(cpumask_empty(cpumask));
-
-	for_each_cpu(cpu, cpumask) {
-		cpu_dev = get_cpu_device(cpu);
-		if (!cpu_dev) {
-			pr_err("%s: failed to get cpu%d device\n", __func__,
-			       cpu);
-			continue;
-		}
-
-		dev_pm_opp_of_remove_table(cpu_dev);
-	}
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
-
-int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask)
-{
-	struct device *cpu_dev;
-	int cpu, ret = 0;
-
-	WARN_ON(cpumask_empty(cpumask));
-
-	for_each_cpu(cpu, cpumask) {
-		cpu_dev = get_cpu_device(cpu);
-		if (!cpu_dev) {
-			pr_err("%s: failed to get cpu%d device\n", __func__,
-			       cpu);
-			continue;
-		}
-
-		ret = dev_pm_opp_of_add_table(cpu_dev);
-		if (ret) {
-			pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
-			       __func__, cpu, ret);
-
-			/* Free all other OPPs */
-			dev_pm_opp_of_cpumask_remove_table(cpumask);
-			break;
-		}
-	}
-
-	return ret;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
-
-/*
- * Works only for OPP v2 bindings.
+/**
+ * dev_pm_opp_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with @cpu_dev
+ * @cpu_dev:	CPU device for which we do this operation
+ * @cpumask:	cpumask to update with information of sharing CPUs
  *
- * Returns -ENOENT if operating-points-v2 bindings aren't supported.
+ * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
+ *
+ * Returns -ENODEV if OPP table isn't already present.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
  */
-int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
+int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
 {
-	struct device_node *np, *tmp_np;
-	struct device *tcpu_dev;
-	int cpu, ret = 0;
+	struct opp_device *opp_dev;
+	struct opp_table *opp_table;
+	int ret = 0;
 
-	/* Get OPP descriptor node */
-	np = _of_get_opp_desc_node(cpu_dev);
-	if (!np) {
-		dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__);
-		return -ENOENT;
+	mutex_lock(&opp_table_lock);
+
+	opp_table = _find_opp_table(cpu_dev);
+	if (IS_ERR(opp_table)) {
+		ret = PTR_ERR(opp_table);
+		goto unlock;
 	}
 
-	cpumask_set_cpu(cpu_dev->id, cpumask);
+	cpumask_clear(cpumask);
 
-	/* OPPs are shared ? */
-	if (!of_property_read_bool(np, "opp-shared"))
-		goto put_cpu_node;
-
-	for_each_possible_cpu(cpu) {
-		if (cpu == cpu_dev->id)
-			continue;
-
-		tcpu_dev = get_cpu_device(cpu);
-		if (!tcpu_dev) {
-			dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
-				__func__, cpu);
-			ret = -ENODEV;
-			goto put_cpu_node;
-		}
-
-		/* Get OPP descriptor node */
-		tmp_np = _of_get_opp_desc_node(tcpu_dev);
-		if (!tmp_np) {
-			dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n",
-				__func__);
-			ret = -ENOENT;
-			goto put_cpu_node;
-		}
-
-		/* CPUs are sharing opp node */
-		if (np == tmp_np)
-			cpumask_set_cpu(cpu, cpumask);
-
-		of_node_put(tmp_np);
+	if (opp_table->shared_opp) {
+		list_for_each_entry(opp_dev, &opp_table->dev_list, node)
+			cpumask_set_cpu(opp_dev->dev->id, cpumask);
+	} else {
+		cpumask_set_cpu(cpu_dev->id, cpumask);
 	}
 
-put_cpu_node:
-	of_node_put(np);
+unlock:
+	mutex_unlock(&opp_table_lock);
+
 	return ret;
 }
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
-#endif
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_sharing_cpus);
diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c
new file mode 100644
index 0000000..94d2010
--- /dev/null
+++ b/drivers/base/power/opp/of.c
@@ -0,0 +1,591 @@
+/*
+ * Generic OPP OF helpers
+ *
+ * Copyright (C) 2009-2010 Texas Instruments Incorporated.
+ *	Nishanth Menon
+ *	Romit Dasgupta
+ *	Kevin Hilman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpu.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/export.h>
+
+#include "opp.h"
+
+static struct opp_table *_managed_opp(const struct device_node *np)
+{
+	struct opp_table *opp_table;
+
+	list_for_each_entry_rcu(opp_table, &opp_tables, node) {
+		if (opp_table->np == np) {
+			/*
+			 * Multiple devices can point to the same OPP table and
+			 * so will have same node-pointer, np.
+			 *
+			 * But the OPPs will be considered as shared only if the
+			 * OPP table contains a "opp-shared" property.
+			 */
+			return opp_table->shared_opp ? opp_table : NULL;
+		}
+	}
+
+	return NULL;
+}
+
+void _of_init_opp_table(struct opp_table *opp_table, struct device *dev)
+{
+	struct device_node *np;
+
+	/*
+	 * Only required for backward compatibility with v1 bindings, but isn't
+	 * harmful for other cases. And so we do it unconditionally.
+	 */
+	np = of_node_get(dev->of_node);
+	if (np) {
+		u32 val;
+
+		if (!of_property_read_u32(np, "clock-latency", &val))
+			opp_table->clock_latency_ns_max = val;
+		of_property_read_u32(np, "voltage-tolerance",
+				     &opp_table->voltage_tolerance_v1);
+		of_node_put(np);
+	}
+}
+
+static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
+			      struct device_node *np)
+{
+	unsigned int count = opp_table->supported_hw_count;
+	u32 version;
+	int ret;
+
+	if (!opp_table->supported_hw)
+		return true;
+
+	while (count--) {
+		ret = of_property_read_u32_index(np, "opp-supported-hw", count,
+						 &version);
+		if (ret) {
+			dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
+				 __func__, count, ret);
+			return false;
+		}
+
+		/* Both of these are bitwise masks of the versions */
+		if (!(version & opp_table->supported_hw[count]))
+			return false;
+	}
+
+	return true;
+}
+
+/* TODO: Support multiple regulators */
+static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
+			      struct opp_table *opp_table)
+{
+	u32 microvolt[3] = {0};
+	u32 val;
+	int count, ret;
+	struct property *prop = NULL;
+	char name[NAME_MAX];
+
+	/* Search for "opp-microvolt-<name>" */
+	if (opp_table->prop_name) {
+		snprintf(name, sizeof(name), "opp-microvolt-%s",
+			 opp_table->prop_name);
+		prop = of_find_property(opp->np, name, NULL);
+	}
+
+	if (!prop) {
+		/* Search for "opp-microvolt" */
+		sprintf(name, "opp-microvolt");
+		prop = of_find_property(opp->np, name, NULL);
+
+		/* Missing property isn't a problem, but an invalid entry is */
+		if (!prop)
+			return 0;
+	}
+
+	count = of_property_count_u32_elems(opp->np, name);
+	if (count < 0) {
+		dev_err(dev, "%s: Invalid %s property (%d)\n",
+			__func__, name, count);
+		return count;
+	}
+
+	/* There can be one or three elements here */
+	if (count != 1 && count != 3) {
+		dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
+			__func__, name, count);
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32_array(opp->np, name, microvolt, count);
+	if (ret) {
+		dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
+		return -EINVAL;
+	}
+
+	opp->u_volt = microvolt[0];
+
+	if (count == 1) {
+		opp->u_volt_min = opp->u_volt;
+		opp->u_volt_max = opp->u_volt;
+	} else {
+		opp->u_volt_min = microvolt[1];
+		opp->u_volt_max = microvolt[2];
+	}
+
+	/* Search for "opp-microamp-<name>" */
+	prop = NULL;
+	if (opp_table->prop_name) {
+		snprintf(name, sizeof(name), "opp-microamp-%s",
+			 opp_table->prop_name);
+		prop = of_find_property(opp->np, name, NULL);
+	}
+
+	if (!prop) {
+		/* Search for "opp-microamp" */
+		sprintf(name, "opp-microamp");
+		prop = of_find_property(opp->np, name, NULL);
+	}
+
+	if (prop && !of_property_read_u32(opp->np, name, &val))
+		opp->u_amp = val;
+
+	return 0;
+}
+
+/**
+ * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
+ *				  entries
+ * @dev:	device pointer used to lookup OPP table.
+ *
+ * Free OPPs created using static entries present in DT.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function indirectly uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_of_remove_table(struct device *dev)
+{
+	_dev_pm_opp_remove_table(dev, false);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
+
+/* Returns opp descriptor node for a device, caller must do of_node_put() */
+struct device_node *_of_get_opp_desc_node(struct device *dev)
+{
+	/*
+	 * TODO: Support for multiple OPP tables.
+	 *
+	 * There should be only ONE phandle present in "operating-points-v2"
+	 * property.
+	 */
+
+	return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
+}
+
+/**
+ * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
+ * @dev:	device for which we do this operation
+ * @np:		device node
+ *
+ * This function adds an opp definition to the opp table and returns status. The
+ * opp can be controlled using dev_pm_opp_enable/disable functions and may be
+ * removed by dev_pm_opp_remove.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ *
+ * Return:
+ * 0		On success OR
+ *		Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST	Freq are same and volt are different OR
+ *		Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM	Memory allocation failure
+ * -EINVAL	Failed parsing the OPP node
+ */
+static int _opp_add_static_v2(struct device *dev, struct device_node *np)
+{
+	struct opp_table *opp_table;
+	struct dev_pm_opp *new_opp;
+	u64 rate;
+	u32 val;
+	int ret;
+
+	/* Hold our table modification lock here */
+	mutex_lock(&opp_table_lock);
+
+	new_opp = _allocate_opp(dev, &opp_table);
+	if (!new_opp) {
+		ret = -ENOMEM;
+		goto unlock;
+	}
+
+	ret = of_property_read_u64(np, "opp-hz", &rate);
+	if (ret < 0) {
+		dev_err(dev, "%s: opp-hz not found\n", __func__);
+		goto free_opp;
+	}
+
+	/* Check if the OPP supports hardware's hierarchy of versions or not */
+	if (!_opp_is_supported(dev, opp_table, np)) {
+		dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
+		goto free_opp;
+	}
+
+	/*
+	 * Rate is defined as an unsigned long in clk API, and so casting
+	 * explicitly to its type. Must be fixed once rate is 64 bit
+	 * guaranteed in clk API.
+	 */
+	new_opp->rate = (unsigned long)rate;
+	new_opp->turbo = of_property_read_bool(np, "turbo-mode");
+
+	new_opp->np = np;
+	new_opp->dynamic = false;
+	new_opp->available = true;
+
+	if (!of_property_read_u32(np, "clock-latency-ns", &val))
+		new_opp->clock_latency_ns = val;
+
+	ret = opp_parse_supplies(new_opp, dev, opp_table);
+	if (ret)
+		goto free_opp;
+
+	ret = _opp_add(dev, new_opp, opp_table);
+	if (ret)
+		goto free_opp;
+
+	/* OPP to select on device suspend */
+	if (of_property_read_bool(np, "opp-suspend")) {
+		if (opp_table->suspend_opp) {
+			dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
+				 __func__, opp_table->suspend_opp->rate,
+				 new_opp->rate);
+		} else {
+			new_opp->suspend = true;
+			opp_table->suspend_opp = new_opp;
+		}
+	}
+
+	if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
+		opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
+
+	mutex_unlock(&opp_table_lock);
+
+	pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
+		 __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
+		 new_opp->u_volt_min, new_opp->u_volt_max,
+		 new_opp->clock_latency_ns);
+
+	/*
+	 * Notify the changes in the availability of the operable
+	 * frequency/voltage list.
+	 */
+	srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
+	return 0;
+
+free_opp:
+	_opp_remove(opp_table, new_opp, false);
+unlock:
+	mutex_unlock(&opp_table_lock);
+	return ret;
+}
+
+/* Initializes OPP tables based on new bindings */
+static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
+{
+	struct device_node *np;
+	struct opp_table *opp_table;
+	int ret = 0, count = 0;
+
+	mutex_lock(&opp_table_lock);
+
+	opp_table = _managed_opp(opp_np);
+	if (opp_table) {
+		/* OPPs are already managed */
+		if (!_add_opp_dev(dev, opp_table))
+			ret = -ENOMEM;
+		mutex_unlock(&opp_table_lock);
+		return ret;
+	}
+	mutex_unlock(&opp_table_lock);
+
+	/* We have opp-table node now, iterate over it and add OPPs */
+	for_each_available_child_of_node(opp_np, np) {
+		count++;
+
+		ret = _opp_add_static_v2(dev, np);
+		if (ret) {
+			dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
+				ret);
+			goto free_table;
+		}
+	}
+
+	/* There should be one of more OPP defined */
+	if (WARN_ON(!count))
+		return -ENOENT;
+
+	mutex_lock(&opp_table_lock);
+
+	opp_table = _find_opp_table(dev);
+	if (WARN_ON(IS_ERR(opp_table))) {
+		ret = PTR_ERR(opp_table);
+		mutex_unlock(&opp_table_lock);
+		goto free_table;
+	}
+
+	opp_table->np = opp_np;
+	opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared");
+
+	mutex_unlock(&opp_table_lock);
+
+	return 0;
+
+free_table:
+	dev_pm_opp_of_remove_table(dev);
+
+	return ret;
+}
+
+/* Initializes OPP tables based on old-deprecated bindings */
+static int _of_add_opp_table_v1(struct device *dev)
+{
+	const struct property *prop;
+	const __be32 *val;
+	int nr;
+
+	prop = of_find_property(dev->of_node, "operating-points", NULL);
+	if (!prop)
+		return -ENODEV;
+	if (!prop->value)
+		return -ENODATA;
+
+	/*
+	 * Each OPP is a set of tuples consisting of frequency and
+	 * voltage like <freq-kHz vol-uV>.
+	 */
+	nr = prop->length / sizeof(u32);
+	if (nr % 2) {
+		dev_err(dev, "%s: Invalid OPP table\n", __func__);
+		return -EINVAL;
+	}
+
+	val = prop->value;
+	while (nr) {
+		unsigned long freq = be32_to_cpup(val++) * 1000;
+		unsigned long volt = be32_to_cpup(val++);
+
+		if (_opp_add_v1(dev, freq, volt, false))
+			dev_warn(dev, "%s: Failed to add OPP %ld\n",
+				 __func__, freq);
+		nr -= 2;
+	}
+
+	return 0;
+}
+
+/**
+ * dev_pm_opp_of_add_table() - Initialize opp table from device tree
+ * @dev:	device pointer used to lookup OPP table.
+ *
+ * Register the initial OPP table with the OPP library for given device.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function indirectly uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ *
+ * Return:
+ * 0		On success OR
+ *		Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST	Freq are same and volt are different OR
+ *		Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM	Memory allocation failure
+ * -ENODEV	when 'operating-points' property is not found or is invalid data
+ *		in device node.
+ * -ENODATA	when empty 'operating-points' property is found
+ * -EINVAL	when invalid entries are found in opp-v2 table
+ */
+int dev_pm_opp_of_add_table(struct device *dev)
+{
+	struct device_node *opp_np;
+	int ret;
+
+	/*
+	 * OPPs have two version of bindings now. The older one is deprecated,
+	 * try for the new binding first.
+	 */
+	opp_np = _of_get_opp_desc_node(dev);
+	if (!opp_np) {
+		/*
+		 * Try old-deprecated bindings for backward compatibility with
+		 * older dtbs.
+		 */
+		return _of_add_opp_table_v1(dev);
+	}
+
+	ret = _of_add_opp_table_v2(dev, opp_np);
+	of_node_put(opp_np);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
+
+/* CPU device specific helpers */
+
+/**
+ * dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask
+ * @cpumask:	cpumask for which OPP table needs to be removed
+ *
+ * This removes the OPP tables for CPUs present in the @cpumask.
+ * This should be used only to remove static entries created from DT.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
+{
+	_dev_pm_opp_cpumask_remove_table(cpumask, true);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
+
+/**
+ * dev_pm_opp_of_cpumask_add_table() - Adds OPP table for @cpumask
+ * @cpumask:	cpumask for which OPP table needs to be added.
+ *
+ * This adds the OPP tables for CPUs present in the @cpumask.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
+{
+	struct device *cpu_dev;
+	int cpu, ret = 0;
+
+	WARN_ON(cpumask_empty(cpumask));
+
+	for_each_cpu(cpu, cpumask) {
+		cpu_dev = get_cpu_device(cpu);
+		if (!cpu_dev) {
+			pr_err("%s: failed to get cpu%d device\n", __func__,
+			       cpu);
+			continue;
+		}
+
+		ret = dev_pm_opp_of_add_table(cpu_dev);
+		if (ret) {
+			pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
+			       __func__, cpu, ret);
+
+			/* Free all other OPPs */
+			dev_pm_opp_of_cpumask_remove_table(cpumask);
+			break;
+		}
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
+
+/*
+ * Works only for OPP v2 bindings.
+ *
+ * Returns -ENOENT if operating-points-v2 bindings aren't supported.
+ */
+/**
+ * dev_pm_opp_of_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with
+ *				      @cpu_dev using operating-points-v2
+ *				      bindings.
+ *
+ * @cpu_dev:	CPU device for which we do this operation
+ * @cpumask:	cpumask to update with information of sharing CPUs
+ *
+ * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
+ *
+ * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
+				   struct cpumask *cpumask)
+{
+	struct device_node *np, *tmp_np;
+	struct device *tcpu_dev;
+	int cpu, ret = 0;
+
+	/* Get OPP descriptor node */
+	np = _of_get_opp_desc_node(cpu_dev);
+	if (!np) {
+		dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__);
+		return -ENOENT;
+	}
+
+	cpumask_set_cpu(cpu_dev->id, cpumask);
+
+	/* OPPs are shared ? */
+	if (!of_property_read_bool(np, "opp-shared"))
+		goto put_cpu_node;
+
+	for_each_possible_cpu(cpu) {
+		if (cpu == cpu_dev->id)
+			continue;
+
+		tcpu_dev = get_cpu_device(cpu);
+		if (!tcpu_dev) {
+			dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
+				__func__, cpu);
+			ret = -ENODEV;
+			goto put_cpu_node;
+		}
+
+		/* Get OPP descriptor node */
+		tmp_np = _of_get_opp_desc_node(tcpu_dev);
+		if (!tmp_np) {
+			dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n",
+				__func__);
+			ret = -ENOENT;
+			goto put_cpu_node;
+		}
+
+		/* CPUs are sharing opp node */
+		if (np == tmp_np)
+			cpumask_set_cpu(cpu, cpumask);
+
+		of_node_put(tmp_np);
+	}
+
+put_cpu_node:
+	of_node_put(np);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h
index f67f806..20f3be2 100644
--- a/drivers/base/power/opp/opp.h
+++ b/drivers/base/power/opp/opp.h
@@ -28,6 +28,8 @@
 /* Lock to allow exclusive modification to the device and opp lists */
 extern struct mutex opp_table_lock;
 
+extern struct list_head opp_tables;
+
 /*
  * Internal data structure organization with the OPP layer library is as
  * follows:
@@ -183,6 +185,18 @@
 struct opp_table *_find_opp_table(struct device *dev);
 struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
 struct device_node *_of_get_opp_desc_node(struct device *dev);
+void _dev_pm_opp_remove_table(struct device *dev, bool remove_all);
+struct dev_pm_opp *_allocate_opp(struct device *dev, struct opp_table **opp_table);
+int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table);
+void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp, bool notify);
+int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, bool dynamic);
+void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of);
+
+#ifdef CONFIG_OF
+void _of_init_opp_table(struct opp_table *opp_table, struct device *dev);
+#else
+static inline void _of_init_opp_table(struct opp_table *opp_table, struct device *dev) {}
+#endif
 
 #ifdef CONFIG_DEBUG_FS
 void opp_debug_remove_one(struct dev_pm_opp *opp);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 4c70550..b746904 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1506,11 +1506,16 @@
 		goto out;
 	}
 
-	ret = callback(dev);
+	ret = pm_runtime_set_active(dev);
 	if (ret)
 		goto out;
 
-	pm_runtime_set_active(dev);
+	ret = callback(dev);
+	if (ret) {
+		pm_runtime_set_suspended(dev);
+		goto out;
+	}
+
 	pm_runtime_mark_last_busy(dev);
 out:
 	pm_runtime_enable(dev);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index a1e0b9a..5fb7718 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -246,6 +246,8 @@
 		return -EEXIST;
 	}
 	dev->power.wakeup = ws;
+	if (dev->power.wakeirq)
+		device_wakeup_attach_irq(dev, dev->power.wakeirq);
 	spin_unlock_irq(&dev->power.lock);
 	return 0;
 }
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 9b1a65d..f38c21d 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -19,9 +19,14 @@
 #include <linux/etherdevice.h>
 #include <linux/phy.h>
 
+struct property_set {
+	struct fwnode_handle fwnode;
+	struct property_entry *properties;
+};
+
 static inline bool is_pset_node(struct fwnode_handle *fwnode)
 {
-	return fwnode && fwnode->type == FWNODE_PDATA;
+	return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_PDATA;
 }
 
 static inline struct property_set *to_pset_node(struct fwnode_handle *fwnode)
@@ -801,14 +806,14 @@
 }
 
 /**
- * device_remove_property_set - Remove properties from a device object.
+ * device_remove_properties - Remove properties from a device object.
  * @dev: Device whose properties to remove.
  *
  * The function removes properties previously associated to the device
- * secondary firmware node with device_add_property_set(). Memory allocated
+ * secondary firmware node with device_add_properties(). Memory allocated
  * to the properties will also be released.
  */
-void device_remove_property_set(struct device *dev)
+void device_remove_properties(struct device *dev)
 {
 	struct fwnode_handle *fwnode;
 
@@ -831,24 +836,27 @@
 		}
 	}
 }
-EXPORT_SYMBOL_GPL(device_remove_property_set);
+EXPORT_SYMBOL_GPL(device_remove_properties);
 
 /**
- * device_add_property_set - Add a collection of properties to a device object.
+ * device_add_properties - Add a collection of properties to a device object.
  * @dev: Device to add properties to.
- * @pset: Collection of properties to add.
+ * @properties: Collection of properties to add.
  *
- * Associate a collection of device properties represented by @pset with @dev
- * as its secondary firmware node. The function takes a copy of @pset.
+ * Associate a collection of device properties represented by @properties with
+ * @dev as its secondary firmware node. The function takes a copy of
+ * @properties.
  */
-int device_add_property_set(struct device *dev, const struct property_set *pset)
+int device_add_properties(struct device *dev, struct property_entry *properties)
 {
-	struct property_set *p;
+	struct property_set *p, pset;
 
-	if (!pset)
+	if (!properties)
 		return -EINVAL;
 
-	p = pset_copy_set(pset);
+	pset.properties = properties;
+
+	p = pset_copy_set(&pset);
 	if (IS_ERR(p))
 		return PTR_ERR(p);
 
@@ -856,7 +864,7 @@
 	set_secondary_fwnode(dev, &p->fwnode);
 	return 0;
 }
-EXPORT_SYMBOL_GPL(device_add_property_set);
+EXPORT_SYMBOL_GPL(device_add_properties);
 
 /**
  * device_get_next_child_node - Return the next child node handle for a device
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 5c79526..a038033 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -13,6 +13,7 @@
 #ifndef _REGMAP_INTERNAL_H
 #define _REGMAP_INTERNAL_H
 
+#include <linux/device.h>
 #include <linux/regmap.h>
 #include <linux/fs.h>
 #include <linux/list.h>
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
index 3ee7255..4d2e50b 100644
--- a/drivers/base/regmap/regcache-flat.c
+++ b/drivers/base/regmap/regcache-flat.c
@@ -27,7 +27,7 @@
 	int i;
 	unsigned int *cache;
 
-	if (!map || map->reg_stride_order < 0)
+	if (!map || map->reg_stride_order < 0 || !map->max_register)
 		return -EINVAL;
 
 	map->cache = kcalloc(regcache_flat_get_index(map, map->max_register)
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 4170b7d..df7ff729 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -529,7 +529,7 @@
  * regcache_cache_bypass: Put a register map into cache bypass mode
  *
  * @map: map to configure
- * @cache_bypass: flag if changes should not be written to the hardware
+ * @cache_bypass: flag if changes should not be written to the cache
  *
  * When a register map is marked with the cache bypass option, writes
  * to the register map API will only update the hardware and not the
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 7526906..5189fd6 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -23,6 +23,8 @@
 #include <linux/regmap.h>
 #include <linux/slab.h>
 
+#include "internal.h"
+
 struct regmap_mmio_context {
 	void __iomem *regs;
 	unsigned val_bytes;
@@ -212,6 +214,7 @@
 	.reg_write = regmap_mmio_write,
 	.reg_read = regmap_mmio_read,
 	.free_context = regmap_mmio_free_context,
+	.val_format_endian_default = REGMAP_ENDIAN_LITTLE,
 };
 
 static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
@@ -245,7 +248,7 @@
 	ctx->val_bytes = config->val_bits / 8;
 	ctx->clk = ERR_PTR(-ENODEV);
 
-	switch (config->reg_format_endian) {
+	switch (regmap_get_val_endian(dev, &regmap_mmio, config)) {
 	case REGMAP_ENDIAN_DEFAULT:
 	case REGMAP_ENDIAN_LITTLE:
 #ifdef __LITTLE_ENDIAN
diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
index 7e58f65..4a36e41 100644
--- a/drivers/base/regmap/regmap-spmi.c
+++ b/drivers/base/regmap/regmap-spmi.c
@@ -142,7 +142,7 @@
 	while (val_size) {
 		len = min_t(size_t, val_size, 8);
 
-		err = spmi_ext_register_readl(context, addr, val, val_size);
+		err = spmi_ext_register_readl(context, addr, val, len);
 		if (err)
 			goto err_out;
 
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 786be8f..1f63547 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -136,7 +136,6 @@
 	return false;
 }
 
-#if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS)
 static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
 						     struct bcma_device *core)
 {
@@ -184,7 +183,7 @@
 	struct of_phandle_args out_irq;
 	int ret;
 
-	if (!parent || !parent->dev.of_node)
+	if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node)
 		return 0;
 
 	ret = bcma_of_irq_parse(parent, core, &out_irq, num);
@@ -202,23 +201,15 @@
 {
 	struct device_node *node;
 
+	if (!IS_ENABLED(CONFIG_OF_IRQ))
+		return;
+
 	node = bcma_of_find_child_device(parent, core);
 	if (node)
 		core->dev.of_node = node;
 
 	core->irq = bcma_of_get_irq(parent, core, 0);
 }
-#else
-static void bcma_of_fill_device(struct platform_device *parent,
-				struct bcma_device *core)
-{
-}
-static inline unsigned int bcma_of_get_irq(struct platform_device *parent,
-					   struct bcma_device *core, int num)
-{
-	return 0;
-}
-#endif /* CONFIG_OF */
 
 unsigned int bcma_core_irq(struct bcma_device *core, int num)
 {
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index dd73e1f..ec9d861 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -397,7 +397,7 @@
 	WARN_ON(d->flags & DEVFL_UP);
 	blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
 	q->backing_dev_info.name = "aoe";
-	q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
+	q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_SIZE;
 	d->bufpool = mp;
 	d->blkq = gd->queue = q;
 	q->queuedata = d;
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index f7ecc28..51a071e 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -374,7 +374,7 @@
 		       struct page *page, int rw)
 {
 	struct brd_device *brd = bdev->bd_disk->private_data;
-	int err = brd_do_bvec(brd, page, PAGE_CACHE_SIZE, 0, rw, sector);
+	int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, rw, sector);
 	page_endio(page, rw & WRITE, err);
 	return err;
 }
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index c227fd4..7a1cf7e 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1327,8 +1327,8 @@
 #endif
 #endif
 
-/* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE,
- * so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte.
+/* BIO_MAX_SIZE is 256 * PAGE_SIZE,
+ * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
  * Since we may live in a mixed-platform cluster,
  * we limit us to a platform agnostic constant here for now.
  * A followup commit may allow even bigger BIO sizes,
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index fa20977..2ba1494 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2761,7 +2761,7 @@
 	q->backing_dev_info.congested_data = device;
 
 	blk_queue_make_request(q, drbd_make_request);
-	blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
+	blk_queue_write_cache(q, true, true);
 	/* Setting the max_hw_sectors to an odd value of 8kibyte here
 	   This triggers a max_bio_size message upon first attach or connect */
 	blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 226eb0c..0bac9c8 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1178,7 +1178,7 @@
 	blk_queue_max_hw_sectors(q, max_hw_sectors);
 	/* This is the workaround for "bio would need to, but cannot, be split" */
 	blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
-	blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
+	blk_queue_segment_boundary(q, PAGE_SIZE-1);
 
 	if (b) {
 		struct drbd_connection *connection = first_peer_device(device)->connection;
@@ -3633,14 +3633,15 @@
 		goto nla_put_failure;
 	if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
 	    nla_put_u32(skb, T_current_state, device->state.i) ||
-	    nla_put_u64(skb, T_ed_uuid, device->ed_uuid) ||
-	    nla_put_u64(skb, T_capacity, drbd_get_capacity(device->this_bdev)) ||
-	    nla_put_u64(skb, T_send_cnt, device->send_cnt) ||
-	    nla_put_u64(skb, T_recv_cnt, device->recv_cnt) ||
-	    nla_put_u64(skb, T_read_cnt, device->read_cnt) ||
-	    nla_put_u64(skb, T_writ_cnt, device->writ_cnt) ||
-	    nla_put_u64(skb, T_al_writ_cnt, device->al_writ_cnt) ||
-	    nla_put_u64(skb, T_bm_writ_cnt, device->bm_writ_cnt) ||
+	    nla_put_u64_0pad(skb, T_ed_uuid, device->ed_uuid) ||
+	    nla_put_u64_0pad(skb, T_capacity,
+			     drbd_get_capacity(device->this_bdev)) ||
+	    nla_put_u64_0pad(skb, T_send_cnt, device->send_cnt) ||
+	    nla_put_u64_0pad(skb, T_recv_cnt, device->recv_cnt) ||
+	    nla_put_u64_0pad(skb, T_read_cnt, device->read_cnt) ||
+	    nla_put_u64_0pad(skb, T_writ_cnt, device->writ_cnt) ||
+	    nla_put_u64_0pad(skb, T_al_writ_cnt, device->al_writ_cnt) ||
+	    nla_put_u64_0pad(skb, T_bm_writ_cnt, device->bm_writ_cnt) ||
 	    nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&device->ap_bio_cnt)) ||
 	    nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&device->ap_pending_cnt)) ||
 	    nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&device->rs_pending_cnt)))
@@ -3657,13 +3658,16 @@
 			goto nla_put_failure;
 
 		if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) ||
-		    nla_put_u64(skb, T_bits_total, drbd_bm_bits(device)) ||
-		    nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(device)))
+		    nla_put_u64_0pad(skb, T_bits_total, drbd_bm_bits(device)) ||
+		    nla_put_u64_0pad(skb, T_bits_oos,
+				     drbd_bm_total_weight(device)))
 			goto nla_put_failure;
 		if (C_SYNC_SOURCE <= device->state.conn &&
 		    C_PAUSED_SYNC_T >= device->state.conn) {
-			if (nla_put_u64(skb, T_bits_rs_total, device->rs_total) ||
-			    nla_put_u64(skb, T_bits_rs_failed, device->rs_failed))
+			if (nla_put_u64_0pad(skb, T_bits_rs_total,
+					     device->rs_total) ||
+			    nla_put_u64_0pad(skb, T_bits_rs_failed,
+					     device->rs_failed))
 				goto nla_put_failure;
 		}
 	}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 423f4ca..1fa8cc2 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -488,6 +488,12 @@
 	bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
 	iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
 		      bio_segments(bio), blk_rq_bytes(cmd->rq));
+	/*
+	 * This bio may be started from the middle of the 'bvec'
+	 * because of bio splitting, so offset from the bvec must
+	 * be passed to iov iterator
+	 */
+	iter.iov_offset = bio->bi_iter.bi_bvec_done;
 
 	cmd->iocb.ki_pos = pos;
 	cmd->iocb.ki_filp = file;
@@ -937,7 +943,7 @@
 	mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
 
 	if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
-		blk_queue_flush(lo->lo_queue, REQ_FLUSH);
+		blk_queue_write_cache(lo->lo_queue, true, false);
 
 	loop_update_dio(lo);
 	set_capacity(lo->lo_disk, size);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 25824c1..6053e46 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3000,14 +3000,14 @@
 					"Completion workers still active!");
 
 			spin_lock(dd->queue->queue_lock);
-			blk_mq_all_tag_busy_iter(*dd->tags.tags,
+			blk_mq_tagset_busy_iter(&dd->tags,
 							mtip_queue_cmd, dd);
 			spin_unlock(dd->queue->queue_lock);
 
 			set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags);
 
 			if (mtip_device_reset(dd))
-				blk_mq_all_tag_busy_iter(*dd->tags.tags,
+				blk_mq_tagset_busy_iter(&dd->tags,
 							mtip_abort_cmd, dd);
 
 			clear_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags);
@@ -4023,12 +4023,6 @@
 	blk_queue_io_min(dd->queue, 4096);
 	blk_queue_bounce_limit(dd->queue, dd->pdev->dma_mask);
 
-	/*
-	 * write back cache is not supported in the device. FUA depends on
-	 * write back cache support, hence setting flush support to zero.
-	 */
-	blk_queue_flush(dd->queue, 0);
-
 	/* Signal trim support */
 	if (dd->trim_supp == true) {
 		set_bit(QUEUE_FLAG_DISCARD, &dd->queue->queue_flags);
@@ -4174,7 +4168,7 @@
 
 	blk_mq_freeze_queue_start(dd->queue);
 	blk_mq_stop_hw_queues(dd->queue);
-	blk_mq_all_tag_busy_iter(dd->tags.tags[0], mtip_no_dev_cleanup, dd);
+	blk_mq_tagset_busy_iter(&dd->tags, mtip_no_dev_cleanup, dd);
 
 	/*
 	 * Delete our gendisk structure. This also removes the device
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 08afbc7..31e73a7 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -693,9 +693,9 @@
 	if (nbd->flags & NBD_FLAG_SEND_TRIM)
 		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
 	if (nbd->flags & NBD_FLAG_SEND_FLUSH)
-		blk_queue_flush(nbd->disk->queue, REQ_FLUSH);
+		blk_queue_write_cache(nbd->disk->queue, true, false);
 	else
-		blk_queue_flush(nbd->disk->queue, 0);
+		blk_queue_write_cache(nbd->disk->queue, false, false);
 }
 
 static int nbd_dev_dbg_init(struct nbd_device *nbd);
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c
index 1b709a4..c2854a2 100644
--- a/drivers/block/osdblk.c
+++ b/drivers/block/osdblk.c
@@ -437,7 +437,7 @@
 	blk_queue_stack_limits(q, osd_request_queue(osdev->osd));
 
 	blk_queue_prep_rq(q, blk_queue_start_tag);
-	blk_queue_flush(q, REQ_FLUSH);
+	blk_queue_write_cache(q, true, false);
 
 	disk->queue = q;
 
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index c120d70..4b7e405 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -468,7 +468,7 @@
 	blk_queue_dma_alignment(queue, dev->blk_size-1);
 	blk_queue_logical_block_size(queue, dev->blk_size);
 
-	blk_queue_flush(queue, REQ_FLUSH);
+	blk_queue_write_cache(queue, true, false);
 
 	blk_queue_max_segments(queue, -1);
 	blk_queue_max_segment_size(queue, dev->bounce_size);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 9c62344..0ede6d7 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -538,7 +538,6 @@
 				u8 *order, u64 *snap_size);
 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
 		u64 *snap_features);
-static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
 
 static int rbd_open(struct block_device *bdev, fmode_t mode)
 {
@@ -1953,7 +1952,7 @@
 
 	osdc = &rbd_dev->rbd_client->client->osdc;
 	osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
-					  GFP_ATOMIC);
+					  GFP_NOIO);
 	if (!osd_req)
 		return NULL;	/* ENOMEM */
 
@@ -2002,7 +2001,7 @@
 	rbd_dev = img_request->rbd_dev;
 	osdc = &rbd_dev->rbd_client->client->osdc;
 	osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
-						false, GFP_ATOMIC);
+						false, GFP_NOIO);
 	if (!osd_req)
 		return NULL;	/* ENOMEM */
 
@@ -2504,7 +2503,7 @@
 					bio_chain_clone_range(&bio_list,
 								&bio_offset,
 								clone_size,
-								GFP_ATOMIC);
+								GFP_NOIO);
 			if (!obj_request->bio_list)
 				goto out_unwind;
 		} else if (type == OBJ_REQUEST_PAGES) {
@@ -3127,9 +3126,6 @@
 	struct rbd_device *rbd_dev = (struct rbd_device *)data;
 	int ret;
 
-	if (!rbd_dev)
-		return;
-
 	dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
 		rbd_dev->header_name, (unsigned long long)notify_id,
 		(unsigned int)opcode);
@@ -3263,6 +3259,9 @@
 
 	ceph_osdc_cancel_event(rbd_dev->watch_event);
 	rbd_dev->watch_event = NULL;
+
+	dout("%s flushing notifies\n", __func__);
+	ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
 }
 
 /*
@@ -3642,21 +3641,14 @@
 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
 {
 	sector_t size;
-	bool removing;
 
 	/*
-	 * Don't hold the lock while doing disk operations,
-	 * or lock ordering will conflict with the bdev mutex via:
-	 * rbd_add() -> blkdev_get() -> rbd_open()
+	 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
+	 * try to update its size.  If REMOVING is set, updating size
+	 * is just useless work since the device can't be opened.
 	 */
-	spin_lock_irq(&rbd_dev->lock);
-	removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
-	spin_unlock_irq(&rbd_dev->lock);
-	/*
-	 * If the device is being removed, rbd_dev->disk has
-	 * been destroyed, so don't try to update its size
-	 */
-	if (!removing) {
+	if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
+	    !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
 		size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
 		dout("setting size to %llu sectors", (unsigned long long)size);
 		set_capacity(rbd_dev->disk, size);
@@ -4191,7 +4183,7 @@
 		__le64 features;
 		__le64 incompat;
 	} __attribute__ ((packed)) features_buf = { 0 };
-	u64 incompat;
+	u64 unsup;
 	int ret;
 
 	ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
@@ -4204,9 +4196,12 @@
 	if (ret < sizeof (features_buf))
 		return -ERANGE;
 
-	incompat = le64_to_cpu(features_buf.incompat);
-	if (incompat & ~RBD_FEATURES_SUPPORTED)
+	unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
+	if (unsup) {
+		rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
+			 unsup);
 		return -ENXIO;
+	}
 
 	*snap_features = le64_to_cpu(features_buf.features);
 
@@ -5187,6 +5182,10 @@
 	return ret;
 }
 
+/*
+ * rbd_dev->header_rwsem must be locked for write and will be unlocked
+ * upon return.
+ */
 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
 {
 	int ret;
@@ -5195,7 +5194,7 @@
 
 	ret = rbd_dev_id_get(rbd_dev);
 	if (ret)
-		return ret;
+		goto err_out_unlock;
 
 	BUILD_BUG_ON(DEV_NAME_LEN
 			< sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
@@ -5236,8 +5235,9 @@
 	/* Everything's ready.  Announce the disk to the world. */
 
 	set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
-	add_disk(rbd_dev->disk);
+	up_write(&rbd_dev->header_rwsem);
 
+	add_disk(rbd_dev->disk);
 	pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
 		(unsigned long long) rbd_dev->mapping.size);
 
@@ -5252,6 +5252,8 @@
 		unregister_blkdev(rbd_dev->major, rbd_dev->name);
 err_out_id:
 	rbd_dev_id_put(rbd_dev);
+err_out_unlock:
+	up_write(&rbd_dev->header_rwsem);
 	return ret;
 }
 
@@ -5442,6 +5444,7 @@
 	spec = NULL;		/* rbd_dev now owns this */
 	rbd_opts = NULL;	/* rbd_dev now owns this */
 
+	down_write(&rbd_dev->header_rwsem);
 	rc = rbd_dev_image_probe(rbd_dev, 0);
 	if (rc < 0)
 		goto err_out_rbd_dev;
@@ -5471,6 +5474,7 @@
 	return rc;
 
 err_out_rbd_dev:
+	up_write(&rbd_dev->header_rwsem);
 	rbd_dev_destroy(rbd_dev);
 err_out_client:
 	rbd_put_client(rbdc);
@@ -5577,12 +5581,6 @@
 		return ret;
 
 	rbd_dev_header_unwatch_sync(rbd_dev);
-	/*
-	 * flush remaining watch callbacks - these must be complete
-	 * before the osd_client is shutdown
-	 */
-	dout("%s: flushing notifies", __func__);
-	ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
 
 	/*
 	 * Don't free anything from rbd_dev->disk until after all
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 586f916..910e065 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -133,7 +133,6 @@
 #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
 
 #define INQ_STD_NBYTES 36
-#define SKD_DISCARD_CDB_LENGTH	24
 
 enum skd_drvr_state {
 	SKD_DRVR_STATE_LOAD,
@@ -212,7 +211,6 @@
 
 	struct request *req;
 	u8 flush_cmd;
-	u8 discard_page;
 
 	u32 timeout_stamp;
 	u8 sg_data_dir;
@@ -230,7 +228,6 @@
 };
 #define SKD_DATA_DIR_HOST_TO_CARD       1
 #define SKD_DATA_DIR_CARD_TO_HOST       2
-#define SKD_DATA_DIR_NONE		3	/* especially for DISCARD requests. */
 
 struct skd_special_context {
 	struct skd_request_context req;
@@ -540,31 +537,6 @@
 	scsi_req->cdb[9] = 0;
 }
 
-static void
-skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
-		     struct skd_request_context *skreq,
-		     struct page *page,
-		     u32 lba, u32 count)
-{
-	char *buf;
-	unsigned long len;
-	struct request *req;
-
-	buf = page_address(page);
-	len = SKD_DISCARD_CDB_LENGTH;
-
-	scsi_req->cdb[0] = UNMAP;
-	scsi_req->cdb[8] = len;
-
-	put_unaligned_be16(6 + 16, &buf[0]);
-	put_unaligned_be16(16, &buf[2]);
-	put_unaligned_be64(lba, &buf[8]);
-	put_unaligned_be32(count, &buf[16]);
-
-	req = skreq->req;
-	blk_add_request_payload(req, page, len);
-}
-
 static void skd_request_fn_not_online(struct request_queue *q);
 
 static void skd_request_fn(struct request_queue *q)
@@ -575,7 +547,6 @@
 	struct skd_request_context *skreq;
 	struct request *req = NULL;
 	struct skd_scsi_request *scsi_req;
-	struct page *page;
 	unsigned long io_flags;
 	int error;
 	u32 lba;
@@ -669,7 +640,6 @@
 		skreq->flush_cmd = 0;
 		skreq->n_sg = 0;
 		skreq->sg_byte_count = 0;
-		skreq->discard_page = 0;
 
 		/*
 		 * OK to now dequeue request from q.
@@ -735,18 +705,7 @@
 		else
 			skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
 
-		if (io_flags & REQ_DISCARD) {
-			page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
-			if (!page) {
-				pr_err("request_fn:Page allocation failed.\n");
-				skd_end_request(skdev, skreq, -ENOMEM);
-				break;
-			}
-			skreq->discard_page = 1;
-			req->completion_data = page;
-			skd_prep_discard_cdb(scsi_req, skreq, page, lba, count);
-
-		} else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
+		if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
 			skd_prep_zerosize_flush_cdb(scsi_req, skreq);
 			SKD_ASSERT(skreq->flush_cmd == 1);
 
@@ -851,16 +810,6 @@
 static void skd_end_request(struct skd_device *skdev,
 			    struct skd_request_context *skreq, int error)
 {
-	struct request *req = skreq->req;
-	unsigned int io_flags = req->cmd_flags;
-
-	if ((io_flags & REQ_DISCARD) &&
-		(skreq->discard_page == 1)) {
-		pr_debug("%s:%s:%d, free the page!",
-			 skdev->name, __func__, __LINE__);
-		__free_page(req->completion_data);
-	}
-
 	if (unlikely(error)) {
 		struct request *req = skreq->req;
 		char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
@@ -4412,19 +4361,13 @@
 	disk->queue = q;
 	q->queuedata = skdev;
 
-	blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
+	blk_queue_write_cache(q, true, true);
 	blk_queue_max_segments(q, skdev->sgs_per_request);
 	blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
 
 	/* set sysfs ptimal_io_size to 8K */
 	blk_queue_io_opt(q, 8192);
 
-	/* DISCARD Flag initialization. */
-	q->limits.discard_granularity = 8192;
-	q->limits.discard_alignment = 0;
-	blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
-	q->limits.discard_zeroes_data = 1;
-	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
 	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
 
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 28cff0d..42758b5 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -493,11 +493,7 @@
 	u8 writeback = virtblk_get_cache_mode(vdev);
 	struct virtio_blk *vblk = vdev->priv;
 
-	if (writeback)
-		blk_queue_flush(vblk->disk->queue, REQ_FLUSH);
-	else
-		blk_queue_flush(vblk->disk->queue, 0);
-
+	blk_queue_write_cache(vblk->disk->queue, writeback, false);
 	revalidate_disk(vblk->disk);
 }
 
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 26aa080..3355f1c 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -477,7 +477,7 @@
 		vbd->type |= VDISK_REMOVABLE;
 
 	q = bdev_get_queue(bdev);
-	if (q && q->flush_flags)
+	if (q && test_bit(QUEUE_FLAG_WC, &q->queue_flags))
 		vbd->flush_support = true;
 
 	if (q && blk_queue_secdiscard(q))
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 6405b65..ca13df8 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -998,7 +998,8 @@
 
 static void xlvbd_flush(struct blkfront_info *info)
 {
-	blk_queue_flush(info->rq, info->feature_flush);
+	blk_queue_write_cache(info->rq, info->feature_flush & REQ_FLUSH,
+				info->feature_flush & REQ_FUA);
 	pr_info("blkfront: %s: %s %s %s %s %s\n",
 		info->gd->disk_name, flush_info(info->feature_flush),
 		"persistent grants:", info->feature_persistent ?
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 47ca4b3..2589468 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -122,6 +122,7 @@
 	{ USB_DEVICE(0x13d3, 0x3432) },
 	{ USB_DEVICE(0x13d3, 0x3472) },
 	{ USB_DEVICE(0x13d3, 0x3474) },
+	{ USB_DEVICE(0x13d3, 0x3487) },
 
 	/* Atheros AR5BBU12 with sflash firmware */
 	{ USB_DEVICE(0x0489, 0xE02C) },
@@ -188,6 +189,7 @@
 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x13d3, 0x3487), .driver_info = BTUSB_ATH3012 },
 
 	/* Atheros AR5BBU22 with sflash firmware */
 	{ USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
@@ -206,7 +208,8 @@
 				const struct firmware *firmware)
 {
 	u8 *send_buf;
-	int err, pipe, len, size, sent = 0;
+	int len = 0;
+	int err, pipe, size, sent = 0;
 	int count = firmware->size;
 
 	BT_DBG("udev %p", udev);
@@ -302,7 +305,8 @@
 		const struct firmware *firmware)
 {
 	u8 *send_buf;
-	int err, pipe, len, size, count, sent = 0;
+	int len = 0;
+	int err, pipe, size, count, sent = 0;
 	int ret;
 
 	count = firmware->size;
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 0590473..f742384 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -23,6 +23,17 @@
 #include <linux/bitops.h>
 #include <linux/slab.h>
 #include <net/bluetooth/bluetooth.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/of_irq.h>
 
 #define BTM_HEADER_LEN			4
 #define BTM_UPLD_SIZE			2312
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index f25a825..7ad8d61 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -510,34 +510,39 @@
 static int btmrvl_check_device_tree(struct btmrvl_private *priv)
 {
 	struct device_node *dt_node;
+	struct btmrvl_sdio_card *card = priv->btmrvl_dev.card;
 	u8 cal_data[BT_CAL_HDR_LEN + BT_CAL_DATA_SIZE];
-	int ret;
-	u32 val;
+	int ret = 0;
+	u16 gpio, gap;
 
-	for_each_compatible_node(dt_node, NULL, "btmrvl,cfgdata") {
-		ret = of_property_read_u32(dt_node, "btmrvl,gpio-gap", &val);
-		if (!ret)
-			priv->btmrvl_dev.gpio_gap = val;
+	if (card->plt_of_node) {
+		dt_node = card->plt_of_node;
+		ret = of_property_read_u16(dt_node, "marvell,wakeup-pin",
+					   &gpio);
+		if (ret)
+			gpio = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8;
 
-		ret = of_property_read_u8_array(dt_node, "btmrvl,cal-data",
+		ret = of_property_read_u16(dt_node, "marvell,wakeup-gap-ms",
+					   &gap);
+		if (ret)
+			gap = (u8)(priv->btmrvl_dev.gpio_gap & 0x00ff);
+
+		priv->btmrvl_dev.gpio_gap = (gpio << 8) + gap;
+
+		ret = of_property_read_u8_array(dt_node, "marvell,cal-data",
 						cal_data + BT_CAL_HDR_LEN,
 						BT_CAL_DATA_SIZE);
-		if (ret) {
-			of_node_put(dt_node);
+		if (ret)
 			return ret;
-		}
 
 		BT_DBG("Use cal data from device tree");
 		ret = btmrvl_download_cal_data(priv, cal_data,
 					       BT_CAL_DATA_SIZE);
-		if (ret) {
+		if (ret)
 			BT_ERR("Fail to download calibrate data");
-			of_node_put(dt_node);
-			return ret;
-		}
 	}
 
-	return 0;
+	return ret;
 }
 
 static int btmrvl_setup(struct hci_dev *hdev)
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index c6ef248..f425ddf 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -52,6 +52,68 @@
 	{"EXTLAST", NULL, 0, 0xFE},
 };
 
+static const struct of_device_id btmrvl_sdio_of_match_table[] = {
+	{ .compatible = "marvell,sd8897-bt" },
+	{ .compatible = "marvell,sd8997-bt" },
+	{ }
+};
+
+static irqreturn_t btmrvl_wake_irq_bt(int irq, void *priv)
+{
+	struct btmrvl_plt_wake_cfg *cfg = priv;
+
+	if (cfg->irq_bt >= 0) {
+		pr_info("%s: wake by bt", __func__);
+		cfg->wake_by_bt = true;
+		disable_irq_nosync(irq);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/* This function parses device tree node using mmc subnode devicetree API.
+ * The device node is saved in card->plt_of_node.
+ * If the device tree node exists and includes interrupts attributes, this
+ * function will request platform specific wakeup interrupt.
+ */
+static int btmrvl_sdio_probe_of(struct device *dev,
+				struct btmrvl_sdio_card *card)
+{
+	struct btmrvl_plt_wake_cfg *cfg;
+	int ret;
+
+	if (!dev->of_node ||
+	    !of_match_node(btmrvl_sdio_of_match_table, dev->of_node)) {
+		pr_err("sdio platform data not available");
+		return -1;
+	}
+
+	card->plt_of_node = dev->of_node;
+
+	card->plt_wake_cfg = devm_kzalloc(dev, sizeof(*card->plt_wake_cfg),
+					  GFP_KERNEL);
+	cfg = card->plt_wake_cfg;
+	if (cfg && card->plt_of_node) {
+		cfg->irq_bt = irq_of_parse_and_map(card->plt_of_node, 0);
+		if (!cfg->irq_bt) {
+			dev_err(dev, "fail to parse irq_bt from device tree");
+		} else {
+			ret = devm_request_irq(dev, cfg->irq_bt,
+					       btmrvl_wake_irq_bt,
+					       IRQF_TRIGGER_LOW,
+					       "bt_wake", cfg);
+			if (ret) {
+				dev_err(dev,
+					"Failed to request irq_bt %d (%d)\n",
+					cfg->irq_bt, ret);
+			}
+			disable_irq(cfg->irq_bt);
+		}
+	}
+
+	return 0;
+}
+
 /* The btmrvl_sdio_remove() callback function is called
  * when user removes this module from kernel space or ejects
  * the card from the slot. The driver handles these 2 cases
@@ -1464,6 +1526,9 @@
 
 	btmrvl_sdio_enable_host_int(card);
 
+	/* Device tree node parsing and platform specific configuration*/
+	btmrvl_sdio_probe_of(&func->dev, card);
+
 	priv = btmrvl_add_card(card);
 	if (!priv) {
 		BT_ERR("Initializing card failed!");
@@ -1544,6 +1609,13 @@
 		return 0;
 	}
 
+	/* Enable platform specific wakeup interrupt */
+	if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0) {
+		card->plt_wake_cfg->wake_by_bt = false;
+		enable_irq(card->plt_wake_cfg->irq_bt);
+		enable_irq_wake(card->plt_wake_cfg->irq_bt);
+	}
+
 	priv = card->priv;
 	priv->adapter->is_suspending = true;
 	hcidev = priv->btmrvl_dev.hcidev;
@@ -1606,6 +1678,13 @@
 	BT_DBG("%s: SDIO resume", hcidev->name);
 	hci_resume_dev(hcidev);
 
+	/* Disable platform specific wakeup interrupt */
+	if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0) {
+		disable_irq_wake(card->plt_wake_cfg->irq_bt);
+		if (!card->plt_wake_cfg->wake_by_bt)
+			disable_irq(card->plt_wake_cfg->irq_bt);
+	}
+
 	return 0;
 }
 
diff --git a/drivers/bluetooth/btmrvl_sdio.h b/drivers/bluetooth/btmrvl_sdio.h
index 1a3bd06..3a522d2 100644
--- a/drivers/bluetooth/btmrvl_sdio.h
+++ b/drivers/bluetooth/btmrvl_sdio.h
@@ -62,6 +62,10 @@
 
 #define FIRMWARE_READY				0xfedc
 
+struct btmrvl_plt_wake_cfg {
+	int irq_bt;
+	bool wake_by_bt;
+};
 
 struct btmrvl_sdio_card_reg {
 	u8 cfg;
@@ -97,6 +101,8 @@
 	u16 sd_blksz_fw_dl;
 	u8 rx_unit;
 	struct btmrvl_private *priv;
+	struct device_node *plt_of_node;
+	struct btmrvl_plt_wake_cfg *plt_wake_cfg;
 };
 
 struct btmrvl_sdio_device {
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 0d4e372..a3be65e62 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -236,6 +236,7 @@
 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x13d3, 0x3487), .driver_info = BTUSB_ATH3012 },
 
 	/* Atheros AR5BBU12 with sflash firmware */
 	{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
@@ -2001,12 +2002,13 @@
 		return -EINVAL;
 	}
 
-	/* At the moment only the hardware variant iBT 3.0 (LnP/SfP) is
-	 * supported by this firmware loading method. This check has been
-	 * put in place to ensure correct forward compatibility options
-	 * when newer hardware variants come along.
+	/* At the moment the iBT 3.0 hardware variants 0x0b (LnP/SfP)
+	 * and 0x0c (WsP) are supported by this firmware loading method.
+	 *
+	 * This check has been put in place to ensure correct forward
+	 * compatibility options when newer hardware variants come along.
 	 */
-	if (ver.hw_variant != 0x0b) {
+	if (ver.hw_variant != 0x0b && ver.hw_variant != 0x0c) {
 		BT_ERR("%s: Unsupported Intel hardware variant (%u)",
 		       hdev->name, ver.hw_variant);
 		return -EINVAL;
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index d8881dc..1c97eda 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -825,6 +825,7 @@
 	{ "BCM2E64", 0 },
 	{ "BCM2E65", 0 },
 	{ "BCM2E67", 0 },
+	{ "BCM2E71", 0 },
 	{ "BCM2E7B", 0 },
 	{ "BCM2E7C", 0 },
 	{ },
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 064f2fe..d7d23ce 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -102,13 +102,12 @@
 /* Initialise the crc calculator */
 #define BCSP_CRC_INIT(x) x = 0xffff
 
-/*
-   Update crc with next data byte
-
-   Implementation note
-        The data byte is treated as two nibbles.  The crc is generated
-        in reverse, i.e., bits are fed into the register from the top.
-*/
+/* Update crc with next data byte
+ *
+ * Implementation note
+ *     The data byte is treated as two nibbles.  The crc is generated
+ *     in reverse, i.e., bits are fed into the register from the top.
+ */
 static void bcsp_crc_update(u16 *crc, u8 d)
 {
 	u16 reg = *crc;
@@ -223,9 +222,10 @@
 	}
 
 	/* Max len of packet: (original len +4(bcsp hdr) +2(crc))*2
-	   (because bytes 0xc0 and 0xdb are escaped, worst case is
-	   when the packet is all made of 0xc0 and 0xdb :) )
-	   + 2 (0xc0 delimiters at start and end). */
+	 * (because bytes 0xc0 and 0xdb are escaped, worst case is
+	 * when the packet is all made of 0xc0 and 0xdb :) )
+	 * + 2 (0xc0 delimiters at start and end).
+	 */
 
 	nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
 	if (!nskb)
@@ -285,7 +285,7 @@
 	struct bcsp_struct *bcsp = hu->priv;
 	unsigned long flags;
 	struct sk_buff *skb;
-	
+
 	/* First of all, check for unreliable messages in the queue,
 	   since they have priority */
 
@@ -305,8 +305,9 @@
 	}
 
 	/* Now, try to send a reliable pkt. We can only send a
-	   reliable packet if the number of packets sent but not yet ack'ed
-	   is < than the winsize */
+	 * reliable packet if the number of packets sent but not yet ack'ed
+	 * is < than the winsize
+	 */
 
 	spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING);
 
@@ -332,12 +333,14 @@
 	spin_unlock_irqrestore(&bcsp->unack.lock, flags);
 
 	/* We could not send a reliable packet, either because there are
-	   none or because there are too many unack'ed pkts. Did we receive
-	   any packets we have not acknowledged yet ? */
+	 * none or because there are too many unack'ed pkts. Did we receive
+	 * any packets we have not acknowledged yet ?
+	 */
 
 	if (bcsp->txack_req) {
 		/* if so, craft an empty ACK pkt and send it on BCSP unreliable
-		   channel 0 */
+		 * channel 0
+		 */
 		struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, NULL, 0, BCSP_ACK_PKT);
 		return nskb;
 	}
@@ -399,8 +402,9 @@
 }
 
 /* Handle BCSP link-establishment packets. When we
-   detect a "sync" packet, symptom that the BT module has reset,
-   we do nothing :) (yet) */
+ * detect a "sync" packet, symptom that the BT module has reset,
+ * we do nothing :) (yet)
+ */
 static void bcsp_handle_le_pkt(struct hci_uart *hu)
 {
 	struct bcsp_struct *bcsp = hu->priv;
@@ -462,7 +466,7 @@
 		case 0xdd:
 			memcpy(skb_put(bcsp->rx_skb, 1), &db, 1);
 			if ((bcsp->rx_skb->data[0] & 0x40) != 0 &&
-					bcsp->rx_state != BCSP_W4_CRC) 
+					bcsp->rx_state != BCSP_W4_CRC)
 				bcsp_crc_update(&bcsp->message_crc, 0xdb);
 			bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC;
 			bcsp->rx_count--;
@@ -534,7 +538,7 @@
 			} else {
 				BT_ERR("Packet for unknown channel (%u %s)",
 					bcsp->rx_skb->data[1] & 0x0f,
-					bcsp->rx_skb->data[0] & 0x80 ? 
+					bcsp->rx_skb->data[0] & 0x80 ?
 					"reliable" : "unreliable");
 				kfree_skb(bcsp->rx_skb);
 			}
@@ -562,7 +566,7 @@
 	struct bcsp_struct *bcsp = hu->priv;
 	const unsigned char *ptr;
 
-	BT_DBG("hu %p count %d rx_state %d rx_count %ld", 
+	BT_DBG("hu %p count %d rx_state %d rx_count %ld",
 		hu, count, bcsp->rx_state, bcsp->rx_count);
 
 	ptr = data;
@@ -591,7 +595,7 @@
 				continue;
 			}
 			if (bcsp->rx_skb->data[0] & 0x80	/* reliable pkt */
-			    		&& (bcsp->rx_skb->data[0] & 0x07) != bcsp->rxseq_txack) {
+						&& (bcsp->rx_skb->data[0] & 0x07) != bcsp->rxseq_txack) {
 				BT_ERR("Out-of-order packet arrived, got %u expected %u",
 					bcsp->rx_skb->data[0] & 0x07, bcsp->rxseq_txack);
 
@@ -601,7 +605,7 @@
 				continue;
 			}
 			bcsp->rx_state = BCSP_W4_DATA;
-			bcsp->rx_count = (bcsp->rx_skb->data[1] >> 4) + 
+			bcsp->rx_count = (bcsp->rx_skb->data[1] >> 4) +
 					(bcsp->rx_skb->data[2] << 4);	/* May be 0 */
 			continue;
 
@@ -615,7 +619,7 @@
 
 		case BCSP_W4_CRC:
 			if (bitrev16(bcsp->message_crc) != bscp_get_crc(bcsp)) {
-				BT_ERR ("Checksum failed: computed %04x received %04x",
+				BT_ERR("Checksum failed: computed %04x received %04x",
 					bitrev16(bcsp->message_crc),
 					bscp_get_crc(bcsp));
 
@@ -653,8 +657,9 @@
 				BCSP_CRC_INIT(bcsp->message_crc);
 
 				/* Do not increment ptr or decrement count
-				 * Allocate packet. Max len of a BCSP pkt= 
-				 * 0xFFF (payload) +4 (header) +2 (crc) */
+				 * Allocate packet. Max len of a BCSP pkt=
+				 * 0xFFF (payload) +4 (header) +2 (crc)
+				 */
 
 				bcsp->rx_skb = bt_skb_alloc(0x1005, GFP_ATOMIC);
 				if (!bcsp->rx_skb) {
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 91d6051..f6f2b01 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -1210,8 +1210,7 @@
 
 	idev->pdev = pdev;
 
-	idev->reset = devm_gpiod_get_optional(&pdev->dev, "reset",
-					      GPIOD_OUT_LOW);
+	idev->reset = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW);
 	if (IS_ERR(idev->reset)) {
 		dev_err(&pdev->dev, "Unable to retrieve gpio\n");
 		return PTR_ERR(idev->reset);
@@ -1223,8 +1222,7 @@
 
 		dev_err(&pdev->dev, "No IRQ, falling back to gpio-irq\n");
 
-		host_wake = devm_gpiod_get_optional(&pdev->dev, "host-wake",
-						    GPIOD_IN);
+		host_wake = devm_gpiod_get(&pdev->dev, "host-wake", GPIOD_IN);
 		if (IS_ERR(host_wake)) {
 			dev_err(&pdev->dev, "Unable to retrieve IRQ\n");
 			goto no_irq;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index c00168a..49b3e1e 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -227,7 +227,7 @@
 	tty_ldisc_flush(tty);
 	tty_driver_flush_buffer(tty);
 
-	if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
+	if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
 		hu->proto->flush(hu);
 
 	return 0;
@@ -492,7 +492,7 @@
 
 	cancel_work_sync(&hu->write_work);
 
-	if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
+	if (test_and_clear_bit(HCI_UART_PROTO_READY, &hu->flags)) {
 		if (hdev) {
 			if (test_bit(HCI_UART_REGISTERED, &hu->flags))
 				hci_unregister_dev(hdev);
@@ -500,6 +500,7 @@
 		}
 		hu->proto->close(hu);
 	}
+	clear_bit(HCI_UART_PROTO_SET, &hu->flags);
 
 	kfree(hu);
 }
@@ -526,7 +527,7 @@
 	if (tty != hu->tty)
 		return;
 
-	if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
+	if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
 		hci_uart_tx_wakeup(hu);
 }
 
@@ -550,7 +551,7 @@
 	if (!hu || tty != hu->tty)
 		return;
 
-	if (!test_bit(HCI_UART_PROTO_SET, &hu->flags))
+	if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
 		return;
 
 	/* It does not need a lock here as it is already protected by a mutex in
@@ -638,9 +639,11 @@
 		return err;
 
 	hu->proto = p;
+	set_bit(HCI_UART_PROTO_READY, &hu->flags);
 
 	err = hci_uart_register_dev(hu);
 	if (err) {
+		clear_bit(HCI_UART_PROTO_READY, &hu->flags);
 		p->close(hu);
 		return err;
 	}
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 4814ff0..839bad1 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -95,6 +95,7 @@
 /* HCI_UART proto flag bits */
 #define HCI_UART_PROTO_SET	0
 #define HCI_UART_REGISTERED	1
+#define HCI_UART_PROTO_READY	2
 
 /* TX states  */
 #define HCI_UART_SENDING	1
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 80783dc..aba3121 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -50,6 +50,7 @@
 	wait_queue_head_t read_wait;
 	struct sk_buff_head readq;
 
+	struct mutex open_mutex;
 	struct delayed_work open_timeout;
 };
 
@@ -87,12 +88,15 @@
 	return 0;
 }
 
-static int vhci_create_device(struct vhci_data *data, __u8 opcode)
+static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
 {
 	struct hci_dev *hdev;
 	struct sk_buff *skb;
 	__u8 dev_type;
 
+	if (data->hdev)
+		return -EBADFD;
+
 	/* bits 0-1 are dev_type (BR/EDR or AMP) */
 	dev_type = opcode & 0x03;
 
@@ -151,6 +155,17 @@
 	return 0;
 }
 
+static int vhci_create_device(struct vhci_data *data, __u8 opcode)
+{
+	int err;
+
+	mutex_lock(&data->open_mutex);
+	err = __vhci_create_device(data, opcode);
+	mutex_unlock(&data->open_mutex);
+
+	return err;
+}
+
 static inline ssize_t vhci_get_user(struct vhci_data *data,
 				    struct iov_iter *from)
 {
@@ -189,11 +204,6 @@
 		break;
 
 	case HCI_VENDOR_PKT:
-		if (data->hdev) {
-			kfree_skb(skb);
-			return -EBADFD;
-		}
-
 		cancel_delayed_work_sync(&data->open_timeout);
 
 		opcode = *((__u8 *) skb->data);
@@ -320,6 +330,7 @@
 	skb_queue_head_init(&data->readq);
 	init_waitqueue_head(&data->read_wait);
 
+	mutex_init(&data->open_mutex);
 	INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout);
 
 	file->private_data = data;
@@ -333,15 +344,18 @@
 static int vhci_release(struct inode *inode, struct file *file)
 {
 	struct vhci_data *data = file->private_data;
-	struct hci_dev *hdev = data->hdev;
+	struct hci_dev *hdev;
 
 	cancel_delayed_work_sync(&data->open_timeout);
 
+	hdev = data->hdev;
+
 	if (hdev) {
 		hci_unregister_dev(hdev);
 		hci_free_dev(hdev);
 	}
 
+	skb_queue_purge(&data->readq);
 	file->private_data = NULL;
 	kfree(data);
 
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index d4a3a31..c5a7de9b 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -48,7 +48,7 @@
 	  If unsure, say Y
 
 config ARM_CCN
-	bool "ARM CCN driver support"
+	tristate "ARM CCN driver support"
 	depends on ARM || ARM64
 	depends on PERF_EVENTS
 	help
@@ -58,6 +58,7 @@
 config BRCMSTB_GISB_ARB
 	bool "Broadcom STB GISB bus arbiter"
 	depends on ARM || MIPS
+	default ARCH_BRCMSTB || BMIPS_GENERIC
 	help
 	  Driver for the Broadcom Set Top Box System-on-a-chip internal bus
 	  arbiter. This driver provides timeout and target abort error handling
@@ -110,7 +111,7 @@
 config SIMPLE_PM_BUS
 	bool "Simple Power-Managed Bus Driver"
 	depends on OF && PM
-	depends on ARCH_SHMOBILE || COMPILE_TEST
+	depends on ARCH_RENESAS || COMPILE_TEST
 	help
 	  Driver for transparent busses that don't need a real driver, but
 	  where the bus controller is part of a PM domain, or under the control
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index 7082c72..acc3eb5 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -1189,7 +1189,7 @@
 		perf_pmu_migrate_context(&dt->pmu, cpu, target);
 		cpumask_set_cpu(target, &dt->cpu);
 		if (ccn->irq)
-			WARN_ON(irq_set_affinity(ccn->irq, &dt->cpu) != 0);
+			WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0);
 	default:
 		break;
 	}
@@ -1278,7 +1278,7 @@
 
 	/* Also make sure that the overflow interrupt is handled by this CPU */
 	if (ccn->irq) {
-		err = irq_set_affinity(ccn->irq, &ccn->dt.cpu);
+		err = irq_set_affinity_hint(ccn->irq, &ccn->dt.cpu);
 		if (err) {
 			dev_err(ccn->dev, "Failed to set interrupt affinity!\n");
 			goto error_set_affinity;
@@ -1306,7 +1306,8 @@
 {
 	int i;
 
-	irq_set_affinity(ccn->irq, cpu_possible_mask);
+	if (ccn->irq)
+		irq_set_affinity_hint(ccn->irq, NULL);
 	unregister_cpu_notifier(&ccn->dt.cpu_nb);
 	for (i = 0; i < ccn->num_xps; i++)
 		writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index f364fa4..72fe0a5 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -30,6 +30,10 @@
 #include <asm/signal.h>
 #endif
 
+#ifdef CONFIG_MIPS
+#include <asm/traps.h>
+#endif
+
 #define  ARB_ERR_CAP_CLEAR		(1 << 0)
 #define  ARB_ERR_CAP_STATUS_TIMEOUT	(1 << 12)
 #define  ARB_ERR_CAP_STATUS_TEA		(1 << 11)
@@ -238,6 +242,29 @@
 }
 #endif
 
+#ifdef CONFIG_MIPS
+static int brcmstb_bus_error_handler(struct pt_regs *regs, int is_fixup)
+{
+	int ret = 0;
+	struct brcmstb_gisb_arb_device *gdev;
+	u32 cap_status;
+
+	list_for_each_entry(gdev, &brcmstb_gisb_arb_device_list, next) {
+		cap_status = gisb_read(gdev, ARB_ERR_CAP_STATUS);
+
+		/* Invalid captured address, bail out */
+		if (!(cap_status & ARB_ERR_CAP_STATUS_VALID)) {
+			is_fixup = 1;
+			goto out;
+		}
+
+		ret |= brcmstb_gisb_arb_decode_addr(gdev, "bus error");
+	}
+out:
+	return is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL;
+}
+#endif
+
 static irqreturn_t brcmstb_gisb_timeout_handler(int irq, void *dev_id)
 {
 	brcmstb_gisb_arb_decode_addr(dev_id, "timeout");
@@ -355,6 +382,9 @@
 	hook_fault_code(22, brcmstb_bus_error_handler, SIGBUS, 0,
 			"imprecise external abort");
 #endif
+#ifdef CONFIG_MIPS
+	board_be_handler = brcmstb_bus_error_handler;
+#endif
 
 	dev_info(&pdev->dev, "registered mem: %p, irqs: %d, %d\n",
 			gdev->base, timeout_irq, tea_irq);
diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
index 1c543ef..cad49bc 100644
--- a/drivers/bus/mips_cdmm.c
+++ b/drivers/bus/mips_cdmm.c
@@ -599,8 +599,8 @@
  * mips_cdmm_bus_down() - Tear down the CDMM bus.
  * @data:	Pointer to unsigned int CPU number.
  *
- * This work_on_cpu callback function is executed on a given CPU to call the
- * CDMM driver cpu_down callback for all devices on that CPU.
+ * This function is executed on the hotplugged CPU and calls the CDMM
+ * driver cpu_down callback for all devices on that CPU.
  */
 static long mips_cdmm_bus_down(void *data)
 {
@@ -630,7 +630,9 @@
  * CDMM devices on that CPU, or to call the CDMM driver cpu_up callback for all
  * devices already discovered on that CPU.
  *
- * It is used during initialisation and when CPUs are brought online.
+ * It is used as work_on_cpu callback function during
+ * initialisation. When CPUs are brought online the function is
+ * invoked directly on the hotplugged CPU.
  */
 static long mips_cdmm_bus_up(void *data)
 {
@@ -677,10 +679,10 @@
 	switch (action & ~CPU_TASKS_FROZEN) {
 	case CPU_ONLINE:
 	case CPU_DOWN_FAILED:
-		work_on_cpu(cpu, mips_cdmm_bus_up, &cpu);
+		mips_cdmm_bus_up(&cpu);
 		break;
 	case CPU_DOWN_PREPARE:
-		work_on_cpu(cpu, mips_cdmm_bus_down, &cpu);
+		mips_cdmm_bus_down(&cpu);
 		break;
 	default:
 		return NOTIFY_DONE;
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index c2e5286..ce54a01 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -972,7 +972,7 @@
 		}
 	}
 
-	pr_err("invalid dram address 0x%x\n", phyaddr);
+	pr_err("invalid dram address %pa\n", &phyaddr);
 	return -EINVAL;
 }
 EXPORT_SYMBOL_GPL(mvebu_mbus_get_dram_win_info);
diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c
index 834a2ae..350b730 100644
--- a/drivers/bus/uniphier-system-bus.c
+++ b/drivers/bus/uniphier-system-bus.c
@@ -108,7 +108,7 @@
 
 	for (i = 0; i < ARRAY_SIZE(priv->bank); i++) {
 		for (j = i + 1; j < ARRAY_SIZE(priv->bank); j++) {
-			if (priv->bank[i].end > priv->bank[j].base ||
+			if (priv->bank[i].end > priv->bank[j].base &&
 			    priv->bank[i].base < priv->bank[j].end) {
 				dev_err(priv->dev,
 					"region overlap between bank%d and bank%d\n",
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 67ee8b0..ac51149 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -268,19 +268,6 @@
 
 	  If unsure, say Y.
 
-config HW_RANDOM_PPC4XX
-	tristate "PowerPC 4xx generic true random number generator support"
-	depends on PPC && 4xx
-	default HW_RANDOM
-	---help---
-	 This driver provides the kernel-side support for the TRNG hardware
-	 found in the security function of some PowerPC 4xx SoCs.
-
-	 To compile this driver as a module, choose M here: the
-	 module will be called ppc4xx-rng.
-
-	 If unsure, say N.
-
 config HW_RANDOM_PSERIES
 	tristate "pSeries HW Random Number Generator support"
 	depends on PPC64 && IBMVIO
@@ -309,7 +296,8 @@
 
 config HW_RANDOM_EXYNOS
 	tristate "EXYNOS HW random number generator support"
-	depends on ARCH_EXYNOS
+	depends on ARCH_EXYNOS || COMPILE_TEST
+	depends on HAS_IOMEM
 	default HW_RANDOM
 	---help---
 	  This driver provides kernel-side support for the Random Number
@@ -333,6 +321,19 @@
 
 	  If unsure, say Y.
 
+config HW_RANDOM_HISI
+	tristate "Hisilicon Random Number Generator support"
+	depends on HW_RANDOM && ARCH_HISI
+	default HW_RANDOM
+	---help---
+	  This driver provides kernel-side support for the Random Number
+	  Generator hardware found on Hisilicon Hip04 and Hip05 SoC.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called hisi-rng.
+
+	  If unsure, say Y.
+
 config HW_RANDOM_MSM
 	tristate "Qualcomm SoCs Random Number Generator support"
 	depends on HW_RANDOM && ARCH_QCOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index f5a6fa7..63022b4 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -22,10 +22,10 @@
 obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
 obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
 obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
-obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o
 obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
 obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
 obj-$(CONFIG_HW_RANDOM_EXYNOS)	+= exynos-rng.o
+obj-$(CONFIG_HW_RANDOM_HISI)	+= hisi-rng.o
 obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o
 obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
 obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c
index ca9c403..5132c9c 100644
--- a/drivers/char/hw_random/bcm63xx-rng.c
+++ b/drivers/char/hw_random/bcm63xx-rng.c
@@ -12,6 +12,7 @@
 #include <linux/clk.h>
 #include <linux/platform_device.h>
 #include <linux/hw_random.h>
+#include <linux/of.h>
 
 #define RNG_CTRL			0x00
 #define RNG_EN				(1 << 0)
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
index ada0812..ed44561 100644
--- a/drivers/char/hw_random/exynos-rng.c
+++ b/drivers/char/hw_random/exynos-rng.c
@@ -2,7 +2,7 @@
  * exynos-rng.c - Random Number Generator driver for the exynos
  *
  * Copyright (C) 2012 Samsung Electronics
- * Jonghwa Lee <jonghwa3.lee@smasung.com>
+ * Jonghwa Lee <jonghwa3.lee@samsung.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -77,7 +77,8 @@
 
 	pm_runtime_get_sync(exynos_rng->dev);
 	ret = exynos_rng_configure(exynos_rng);
-	pm_runtime_put_noidle(exynos_rng->dev);
+	pm_runtime_mark_last_busy(exynos_rng->dev);
+	pm_runtime_put_autosuspend(exynos_rng->dev);
 
 	return ret;
 }
@@ -89,6 +90,7 @@
 						struct exynos_rng, rng);
 	u32 *data = buf;
 	int retry = 100;
+	int ret = 4;
 
 	pm_runtime_get_sync(exynos_rng->dev);
 
@@ -97,23 +99,27 @@
 	while (!(exynos_rng_readl(exynos_rng,
 			EXYNOS_PRNG_STATUS_OFFSET) & PRNG_DONE) && --retry)
 		cpu_relax();
-	if (!retry)
-		return -ETIMEDOUT;
+	if (!retry) {
+		ret = -ETIMEDOUT;
+		goto out;
+	}
 
 	exynos_rng_writel(exynos_rng, PRNG_DONE, EXYNOS_PRNG_STATUS_OFFSET);
 
 	*data = exynos_rng_readl(exynos_rng, EXYNOS_PRNG_OUT1_OFFSET);
 
+out:
 	pm_runtime_mark_last_busy(exynos_rng->dev);
 	pm_runtime_put_sync_autosuspend(exynos_rng->dev);
 
-	return 4;
+	return ret;
 }
 
 static int exynos_rng_probe(struct platform_device *pdev)
 {
 	struct exynos_rng *exynos_rng;
 	struct resource *res;
+	int ret;
 
 	exynos_rng = devm_kzalloc(&pdev->dev, sizeof(struct exynos_rng),
 					GFP_KERNEL);
@@ -141,7 +147,21 @@
 	pm_runtime_use_autosuspend(&pdev->dev);
 	pm_runtime_enable(&pdev->dev);
 
-	return devm_hwrng_register(&pdev->dev, &exynos_rng->rng);
+	ret = devm_hwrng_register(&pdev->dev, &exynos_rng->rng);
+	if (ret) {
+		pm_runtime_dont_use_autosuspend(&pdev->dev);
+		pm_runtime_disable(&pdev->dev);
+	}
+
+	return ret;
+}
+
+static int exynos_rng_remove(struct platform_device *pdev)
+{
+	pm_runtime_dont_use_autosuspend(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+
+	return 0;
 }
 
 static int __maybe_unused exynos_rng_runtime_suspend(struct device *dev)
@@ -201,6 +221,7 @@
 		.of_match_table = exynos_rng_dt_match,
 	},
 	.probe		= exynos_rng_probe,
+	.remove		= exynos_rng_remove,
 };
 
 module_platform_driver(exynos_rng_driver);
diff --git a/drivers/char/hw_random/hisi-rng.c b/drivers/char/hw_random/hisi-rng.c
new file mode 100644
index 0000000..40d9657
--- /dev/null
+++ b/drivers/char/hw_random/hisi-rng.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2016 HiSilicon Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+
+#define RNG_SEED	0x0
+#define RNG_CTRL	0x4
+  #define RNG_SEED_SEL	BIT(2)
+  #define RNG_RING_EN	BIT(1)
+  #define RNG_EN	BIT(0)
+#define RNG_RAN_NUM	0x10
+#define RNG_PHY_SEED	0x14
+
+#define to_hisi_rng(p)	container_of(p, struct hisi_rng, rng)
+
+static int seed_sel;
+module_param(seed_sel, int, S_IRUGO);
+MODULE_PARM_DESC(seed_sel, "Auto reload seed. 0, use LFSR(default); 1, use ring oscillator.");
+
+struct hisi_rng {
+	void __iomem *base;
+	struct hwrng rng;
+};
+
+static int hisi_rng_init(struct hwrng *rng)
+{
+	struct hisi_rng *hrng = to_hisi_rng(rng);
+	int val = RNG_EN;
+	u32 seed;
+
+	/* get a random number as initial seed */
+	get_random_bytes(&seed, sizeof(seed));
+
+	writel_relaxed(seed, hrng->base + RNG_SEED);
+
+	/**
+	 * The seed is reload periodically, there are two choice
+	 * of seeds, default seed using the value from LFSR, or
+	 * will use seed generated by ring oscillator.
+	 */
+	if (seed_sel == 1)
+		val |= RNG_RING_EN | RNG_SEED_SEL;
+
+	writel_relaxed(val, hrng->base + RNG_CTRL);
+	return 0;
+}
+
+static void hisi_rng_cleanup(struct hwrng *rng)
+{
+	struct hisi_rng *hrng = to_hisi_rng(rng);
+
+	writel_relaxed(0, hrng->base + RNG_CTRL);
+}
+
+static int hisi_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+	struct hisi_rng *hrng = to_hisi_rng(rng);
+	u32 *data = buf;
+
+	*data = readl_relaxed(hrng->base + RNG_RAN_NUM);
+	return 4;
+}
+
+static int hisi_rng_probe(struct platform_device *pdev)
+{
+	struct hisi_rng *rng;
+	struct resource *res;
+	int ret;
+
+	rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+	if (!rng)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, rng);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	rng->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(rng->base))
+		return PTR_ERR(rng->base);
+
+	rng->rng.name = pdev->name;
+	rng->rng.init = hisi_rng_init;
+	rng->rng.cleanup = hisi_rng_cleanup;
+	rng->rng.read = hisi_rng_read;
+
+	ret = devm_hwrng_register(&pdev->dev, &rng->rng);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to register hwrng\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id hisi_rng_dt_ids[] = {
+	{ .compatible = "hisilicon,hip04-rng" },
+	{ .compatible = "hisilicon,hip05-rng" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, hisi_rng_dt_ids);
+
+static struct platform_driver hisi_rng_driver = {
+	.probe		= hisi_rng_probe,
+	.driver		= {
+		.name	= "hisi-rng",
+		.of_match_table = of_match_ptr(hisi_rng_dt_ids),
+	},
+};
+
+module_platform_driver(hisi_rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kefeng Wang <wangkefeng.wang@huawei>");
+MODULE_DESCRIPTION("Hisilicon random number generator driver");
diff --git a/drivers/char/hw_random/ppc4xx-rng.c b/drivers/char/hw_random/ppc4xx-rng.c
deleted file mode 100644
index c0db438..0000000
--- a/drivers/char/hw_random/ppc4xx-rng.c
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Generic PowerPC 44x RNG driver
- *
- * Copyright 2011 IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; version 2 of the License.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/hw_random.h>
-#include <linux/delay.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <asm/io.h>
-
-#define PPC4XX_TRNG_DEV_CTRL 0x60080
-
-#define PPC4XX_TRNGE 0x00020000
-#define PPC4XX_TRNG_CTRL 0x0008
-#define PPC4XX_TRNG_CTRL_DALM 0x20
-#define PPC4XX_TRNG_STAT 0x0004
-#define PPC4XX_TRNG_STAT_B 0x1
-#define PPC4XX_TRNG_DATA 0x0000
-
-#define MODULE_NAME "ppc4xx_rng"
-
-static int ppc4xx_rng_data_present(struct hwrng *rng, int wait)
-{
-	void __iomem *rng_regs = (void __iomem *) rng->priv;
-	int busy, i, present = 0;
-
-	for (i = 0; i < 20; i++) {
-		busy = (in_le32(rng_regs + PPC4XX_TRNG_STAT) & PPC4XX_TRNG_STAT_B);
-		if (!busy || !wait) {
-			present = 1;
-			break;
-		}
-		udelay(10);
-	}
-	return present;
-}
-
-static int ppc4xx_rng_data_read(struct hwrng *rng, u32 *data)
-{
-	void __iomem *rng_regs = (void __iomem *) rng->priv;
-	*data = in_le32(rng_regs + PPC4XX_TRNG_DATA);
-	return 4;
-}
-
-static int ppc4xx_rng_enable(int enable)
-{
-	struct device_node *ctrl;
-	void __iomem *ctrl_reg;
-	int err = 0;
-	u32 val;
-
-	/* Find the main crypto device node and map it to turn the TRNG on */
-	ctrl = of_find_compatible_node(NULL, NULL, "amcc,ppc4xx-crypto");
-	if (!ctrl)
-		return -ENODEV;
-
-	ctrl_reg = of_iomap(ctrl, 0);
-	if (!ctrl_reg) {
-		err = -ENODEV;
-		goto out;
-	}
-
-	val = in_le32(ctrl_reg + PPC4XX_TRNG_DEV_CTRL);
-
-	if (enable)
-		val |= PPC4XX_TRNGE;
-	else
-		val = val & ~PPC4XX_TRNGE;
-
-	out_le32(ctrl_reg + PPC4XX_TRNG_DEV_CTRL, val);
-	iounmap(ctrl_reg);
-
-out:
-	of_node_put(ctrl);
-
-	return err;
-}
-
-static struct hwrng ppc4xx_rng = {
-	.name = MODULE_NAME,
-	.data_present = ppc4xx_rng_data_present,
-	.data_read = ppc4xx_rng_data_read,
-};
-
-static int ppc4xx_rng_probe(struct platform_device *dev)
-{
-	void __iomem *rng_regs;
-	int err = 0;
-
-	rng_regs = of_iomap(dev->dev.of_node, 0);
-	if (!rng_regs)
-		return -ENODEV;
-
-	err = ppc4xx_rng_enable(1);
-	if (err)
-		return err;
-
-	out_le32(rng_regs + PPC4XX_TRNG_CTRL, PPC4XX_TRNG_CTRL_DALM);
-	ppc4xx_rng.priv = (unsigned long) rng_regs;
-
-	err = hwrng_register(&ppc4xx_rng);
-
-	return err;
-}
-
-static int ppc4xx_rng_remove(struct platform_device *dev)
-{
-	void __iomem *rng_regs = (void __iomem *) ppc4xx_rng.priv;
-
-	hwrng_unregister(&ppc4xx_rng);
-	ppc4xx_rng_enable(0);
-	iounmap(rng_regs);
-
-	return 0;
-}
-
-static const struct of_device_id ppc4xx_rng_match[] = {
-	{ .compatible = "ppc4xx-rng", },
-	{ .compatible = "amcc,ppc460ex-rng", },
-	{ .compatible = "amcc,ppc440epx-rng", },
-	{},
-};
-MODULE_DEVICE_TABLE(of, ppc4xx_rng_match);
-
-static struct platform_driver ppc4xx_rng_driver = {
-	.driver = {
-		.name = MODULE_NAME,
-		.of_match_table = ppc4xx_rng_match,
-	},
-	.probe = ppc4xx_rng_probe,
-	.remove = ppc4xx_rng_remove,
-};
-
-module_platform_driver(ppc4xx_rng_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Josh Boyer <jwboyer@linux.vnet.ibm.com>");
-MODULE_DESCRIPTION("HW RNG driver for PPC 4xx processors");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 1e25b52..7b1c412 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -104,7 +104,7 @@
 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT	1
 
 enum si_type {
-    SI_KCS, SI_SMIC, SI_BT
+	SI_KCS, SI_SMIC, SI_BT
 };
 
 static const char * const si_to_str[] = { "kcs", "smic", "bt" };
@@ -410,7 +410,7 @@
 
 		rv = SI_SM_CALL_WITHOUT_DELAY;
 	}
- out:
+out:
 	return rv;
 }
 
@@ -539,7 +539,7 @@
 
 static void handle_flags(struct smi_info *smi_info)
 {
- retry:
+retry:
 	if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
 		/* Watchdog pre-timeout */
 		smi_inc_stat(smi_info, watchdog_pretimeouts);
@@ -831,7 +831,7 @@
 {
 	enum si_sm_result si_sm_result;
 
- restart:
+restart:
 	/*
 	 * There used to be a loop here that waited a little while
 	 * (around 25us) before giving up.  That turned out to be
@@ -944,7 +944,7 @@
 			smi_info->timer_running = false;
 	}
 
- out:
+out:
 	return si_sm_result;
 }
 
@@ -1190,7 +1190,7 @@
 		timeout = jiffies + SI_TIMEOUT_JIFFIES;
 	}
 
- do_mod_timer:
+do_mod_timer:
 	if (smi_result != SI_SM_IDLE)
 		smi_mod_timer(smi_info, timeout);
 	else
@@ -1576,10 +1576,9 @@
 		if (request_region(addr + idx * info->io.regspacing,
 				   info->io.regsize, DEVICE_NAME) == NULL) {
 			/* Undo allocations */
-			while (idx--) {
+			while (idx--)
 				release_region(addr + idx * info->io.regspacing,
 					       info->io.regsize);
-			}
 			return -EIO;
 		}
 	}
@@ -1638,25 +1637,28 @@
 }
 #endif
 
-static void mem_cleanup(struct smi_info *info)
+static void mem_region_cleanup(struct smi_info *info, int num)
 {
 	unsigned long addr = info->io.addr_data;
-	int           mapsize;
+	int idx;
 
+	for (idx = 0; idx < num; idx++)
+		release_mem_region(addr + idx * info->io.regspacing,
+				   info->io.regsize);
+}
+
+static void mem_cleanup(struct smi_info *info)
+{
 	if (info->io.addr) {
 		iounmap(info->io.addr);
-
-		mapsize = ((info->io_size * info->io.regspacing)
-			   - (info->io.regspacing - info->io.regsize));
-
-		release_mem_region(addr, mapsize);
+		mem_region_cleanup(info, info->io_size);
 	}
 }
 
 static int mem_setup(struct smi_info *info)
 {
 	unsigned long addr = info->io.addr_data;
-	int           mapsize;
+	int           mapsize, idx;
 
 	if (!addr)
 		return -ENODEV;
@@ -1693,6 +1695,21 @@
 	}
 
 	/*
+	 * Some BIOSes reserve disjoint memory regions in their ACPI
+	 * tables.  This causes problems when trying to request the
+	 * entire region.  Therefore we must request each register
+	 * separately.
+	 */
+	for (idx = 0; idx < info->io_size; idx++) {
+		if (request_mem_region(addr + idx * info->io.regspacing,
+				       info->io.regsize, DEVICE_NAME) == NULL) {
+			/* Undo allocations */
+			mem_region_cleanup(info, idx);
+			return -EIO;
+		}
+	}
+
+	/*
 	 * Calculate the total amount of memory to claim.  This is an
 	 * unusual looking calculation, but it avoids claiming any
 	 * more memory than it has to.  It will claim everything
@@ -1701,13 +1718,9 @@
 	 */
 	mapsize = ((info->io_size * info->io.regspacing)
 		   - (info->io.regspacing - info->io.regsize));
-
-	if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
-		return -EIO;
-
 	info->io.addr = ioremap(addr, mapsize);
 	if (info->io.addr == NULL) {
-		release_mem_region(addr, mapsize);
+		mem_region_cleanup(info, info->io_size);
 		return -EIO;
 	}
 	return 0;
@@ -1975,7 +1988,7 @@
 		}
 	}
 	rv = len;
- out:
+out:
 	kfree(str);
 	return rv;
 }
@@ -2945,7 +2958,7 @@
 	/* Check and record info from the get device id, in case we need it. */
 	rv = ipmi_demangle_device_id(resp, resp_len, &smi_info->device_id);
 
- out:
+out:
 	kfree(resp);
 	return rv;
 }
@@ -3192,7 +3205,7 @@
 	else
 		smi_info->supports_event_msg_buff = true;
 
- out:
+out:
 	kfree(resp);
 	return rv;
 }
@@ -3718,10 +3731,10 @@
 
 	return 0;
 
- out_err_stop_timer:
+out_err_stop_timer:
 	wait_for_timer_and_thread(new_smi);
 
- out_err:
+out_err:
 	new_smi->interrupt_disabled = true;
 
 	if (new_smi->intf) {
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 8b3be8b..097c868 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -1870,7 +1870,7 @@
 		return -EIO;
 	}
 
-	myaddr = spmi->addr.address >> 1;
+	myaddr = spmi->addr.address & 0x7f;
 
 	return new_ssif_client(myaddr, NULL, 0, 0, SI_SPMI);
 }
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 22c2765..e524e83 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -3969,7 +3969,7 @@
 	dev_kfree_skb(skb);
 
 	/* save start time for transmit timeout detection */
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 
 	/* start hardware transmitter if necessary */
 	spin_lock_irqsave(&info->lock, flags);
@@ -4032,7 +4032,7 @@
 	tty_kref_put(tty);
 
 	/* enable network layer transmit */
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	netif_start_queue(dev);
 
 	/* inform generic HDLC layer of current DCD status */
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 16f7d33..90518cd 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -197,10 +197,14 @@
 	---help---
 	  Support for the Marvell PXA SoC.
 
+config COMMON_CLK_PIC32
+	def_bool COMMON_CLK && MACH_PIC32
+
 source "drivers/clk/bcm/Kconfig"
 source "drivers/clk/hisilicon/Kconfig"
 source "drivers/clk/mvebu/Kconfig"
 source "drivers/clk/qcom/Kconfig"
+source "drivers/clk/renesas/Kconfig"
 source "drivers/clk/samsung/Kconfig"
 source "drivers/clk/tegra/Kconfig"
 source "drivers/clk/ti/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 46869d6..18e64bb 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -58,6 +58,7 @@
 obj-$(CONFIG_MACH_INGENIC)		+= ingenic/
 obj-$(CONFIG_COMMON_CLK_KEYSTONE)	+= keystone/
 obj-$(CONFIG_ARCH_MEDIATEK)		+= mediatek/
+obj-$(CONFIG_MACH_PIC32)		+= microchip/
 ifeq ($(CONFIG_COMMON_CLK), y)
 obj-$(CONFIG_ARCH_MMP)			+= mmp/
 endif
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index 02e1818..2beb396f 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -394,7 +394,7 @@
 		clk[IMX6QDL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1", 2, 7);
 	} else {
 		clk[IMX6QDL_CLK_ECSPI_ROOT] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6);
-		clk[IMX6QDL_CLK_CAN_ROOT] = imx_clk_divider("can_root", "pll3_60", base + 0x20, 2, 6);
+		clk[IMX6QDL_CLK_CAN_ROOT] = imx_clk_divider("can_root", "pll3_60m", base + 0x20, 2, 6);
 		clk[IMX6QDL_CLK_IPG_PER] = imx_clk_fixup_divider("ipg_per", "ipg", base + 0x1c, 0, 6, imx_cscmr1_fixup);
 		clk[IMX6QDL_CLK_UART_SERIAL_PODF] = imx_clk_divider("uart_serial_podf", "pll3_80m",          base + 0x24, 0,  6);
 		clk[IMX6QDL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c
index 9e9fe4b..309049d 100644
--- a/drivers/clk/mediatek/reset.c
+++ b/drivers/clk/mediatek/reset.c
@@ -57,7 +57,7 @@
 	return mtk_reset_deassert(rcdev, id);
 }
 
-static struct reset_control_ops mtk_reset_ops = {
+static const struct reset_control_ops mtk_reset_ops = {
 	.assert = mtk_reset_assert,
 	.deassert = mtk_reset_deassert,
 	.reset = mtk_reset,
diff --git a/drivers/clk/microchip/Makefile b/drivers/clk/microchip/Makefile
new file mode 100644
index 0000000..2152f41
--- /dev/null
+++ b/drivers/clk/microchip/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_COMMON_CLK_PIC32) += clk-core.o
+obj-$(CONFIG_PIC32MZDA) += clk-pic32mzda.o
diff --git a/drivers/clk/microchip/clk-core.c b/drivers/clk/microchip/clk-core.c
new file mode 100644
index 0000000..ca85cea
--- /dev/null
+++ b/drivers/clk/microchip/clk-core.c
@@ -0,0 +1,1031 @@
+/*
+ * Purna Chandra Mandal,<purna.mandal@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc.  All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <asm/mach-pic32/pic32.h>
+#include <asm/traps.h>
+
+#include "clk-core.h"
+
+/* OSCCON Reg fields */
+#define OSC_CUR_MASK		0x07
+#define OSC_CUR_SHIFT		12
+#define OSC_NEW_MASK		0x07
+#define OSC_NEW_SHIFT		8
+#define OSC_SWEN		BIT(0)
+
+/* SPLLCON Reg fields */
+#define PLL_RANGE_MASK		0x07
+#define PLL_RANGE_SHIFT		0
+#define PLL_ICLK_MASK		0x01
+#define PLL_ICLK_SHIFT		7
+#define PLL_IDIV_MASK		0x07
+#define PLL_IDIV_SHIFT		8
+#define PLL_ODIV_MASK		0x07
+#define PLL_ODIV_SHIFT		24
+#define PLL_MULT_MASK		0x7F
+#define PLL_MULT_SHIFT		16
+#define PLL_MULT_MAX		128
+#define PLL_ODIV_MIN		1
+#define PLL_ODIV_MAX		5
+
+/* Peripheral Bus Clock Reg Fields */
+#define PB_DIV_MASK		0x7f
+#define PB_DIV_SHIFT		0
+#define PB_DIV_READY		BIT(11)
+#define PB_DIV_ENABLE		BIT(15)
+#define PB_DIV_MAX		128
+#define PB_DIV_MIN		0
+
+/* Reference Oscillator Control Reg fields */
+#define REFO_SEL_MASK		0x0f
+#define REFO_SEL_SHIFT		0
+#define REFO_ACTIVE		BIT(8)
+#define REFO_DIVSW_EN		BIT(9)
+#define REFO_OE			BIT(12)
+#define REFO_ON			BIT(15)
+#define REFO_DIV_SHIFT		16
+#define REFO_DIV_MASK		0x7fff
+
+/* Reference Oscillator Trim Register Fields */
+#define REFO_TRIM_REG		0x10
+#define REFO_TRIM_MASK		0x1ff
+#define REFO_TRIM_SHIFT		23
+#define REFO_TRIM_MAX		511
+
+/* Mux Slew Control Register fields */
+#define SLEW_BUSY		BIT(0)
+#define SLEW_DOWNEN		BIT(1)
+#define SLEW_UPEN		BIT(2)
+#define SLEW_DIV		0x07
+#define SLEW_DIV_SHIFT		8
+#define SLEW_SYSDIV		0x0f
+#define SLEW_SYSDIV_SHIFT	20
+
+/* Clock Poll Timeout */
+#define LOCK_TIMEOUT_US         USEC_PER_MSEC
+
+/* SoC specific clock needed during SPLL clock rate switch */
+static struct clk_hw *pic32_sclk_hw;
+
+/* add instruction pipeline delay while CPU clock is in-transition. */
+#define cpu_nop5()			\
+do {					\
+	__asm__ __volatile__("nop");	\
+	__asm__ __volatile__("nop");	\
+	__asm__ __volatile__("nop");	\
+	__asm__ __volatile__("nop");	\
+	__asm__ __volatile__("nop");	\
+} while (0)
+
+/* Perpheral bus clocks */
+struct pic32_periph_clk {
+	struct clk_hw hw;
+	void __iomem *ctrl_reg;
+	struct pic32_clk_common *core;
+};
+
+#define clkhw_to_pbclk(_hw)	container_of(_hw, struct pic32_periph_clk, hw)
+
+static int pbclk_is_enabled(struct clk_hw *hw)
+{
+	struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
+
+	return readl(pb->ctrl_reg) & PB_DIV_ENABLE;
+}
+
+static int pbclk_enable(struct clk_hw *hw)
+{
+	struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
+
+	writel(PB_DIV_ENABLE, PIC32_SET(pb->ctrl_reg));
+	return 0;
+}
+
+static void pbclk_disable(struct clk_hw *hw)
+{
+	struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
+
+	writel(PB_DIV_ENABLE, PIC32_CLR(pb->ctrl_reg));
+}
+
+static unsigned long calc_best_divided_rate(unsigned long rate,
+					    unsigned long parent_rate,
+					    u32 divider_max,
+					    u32 divider_min)
+{
+	unsigned long divided_rate, divided_rate_down, best_rate;
+	unsigned long div, div_up;
+
+	/* eq. clk_rate = parent_rate / divider.
+	 *
+	 * Find best divider to produce closest of target divided rate.
+	 */
+	div = parent_rate / rate;
+	div = clamp_val(div, divider_min, divider_max);
+	div_up = clamp_val(div + 1, divider_min, divider_max);
+
+	divided_rate = parent_rate / div;
+	divided_rate_down = parent_rate / div_up;
+	if (abs(rate - divided_rate_down) < abs(rate - divided_rate))
+		best_rate = divided_rate_down;
+	else
+		best_rate = divided_rate;
+
+	return best_rate;
+}
+
+static inline u32 pbclk_read_pbdiv(struct pic32_periph_clk *pb)
+{
+	return ((readl(pb->ctrl_reg) >> PB_DIV_SHIFT) & PB_DIV_MASK) + 1;
+}
+
+static unsigned long pbclk_recalc_rate(struct clk_hw *hw,
+				       unsigned long parent_rate)
+{
+	struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
+
+	return parent_rate / pbclk_read_pbdiv(pb);
+}
+
+static long pbclk_round_rate(struct clk_hw *hw, unsigned long rate,
+			     unsigned long *parent_rate)
+{
+	return calc_best_divided_rate(rate, *parent_rate,
+				      PB_DIV_MAX, PB_DIV_MIN);
+}
+
+static int pbclk_set_rate(struct clk_hw *hw, unsigned long rate,
+			  unsigned long parent_rate)
+{
+	struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
+	unsigned long flags;
+	u32 v, div;
+	int err;
+
+	/* check & wait for DIV_READY */
+	err = readl_poll_timeout(pb->ctrl_reg, v, v & PB_DIV_READY,
+				 1, LOCK_TIMEOUT_US);
+	if (err)
+		return err;
+
+	/* calculate clkdiv and best rate */
+	div = DIV_ROUND_CLOSEST(parent_rate, rate);
+
+	spin_lock_irqsave(&pb->core->reg_lock, flags);
+
+	/* apply new div */
+	v = readl(pb->ctrl_reg);
+	v &= ~PB_DIV_MASK;
+	v |= (div - 1);
+
+	pic32_syskey_unlock();
+
+	writel(v, pb->ctrl_reg);
+
+	spin_unlock_irqrestore(&pb->core->reg_lock, flags);
+
+	/* wait again, for pbdivready */
+	err = readl_poll_timeout_atomic(pb->ctrl_reg, v, v & PB_DIV_READY,
+					1, LOCK_TIMEOUT_US);
+	if (err)
+		return err;
+
+	/* confirm that new div is applied correctly */
+	return (pbclk_read_pbdiv(pb) == div) ? 0 : -EBUSY;
+}
+
+const struct clk_ops pic32_pbclk_ops = {
+	.enable		= pbclk_enable,
+	.disable	= pbclk_disable,
+	.is_enabled	= pbclk_is_enabled,
+	.recalc_rate	= pbclk_recalc_rate,
+	.round_rate	= pbclk_round_rate,
+	.set_rate	= pbclk_set_rate,
+};
+
+struct clk *pic32_periph_clk_register(const struct pic32_periph_clk_data *desc,
+				      struct pic32_clk_common *core)
+{
+	struct pic32_periph_clk *pbclk;
+	struct clk *clk;
+
+	pbclk = devm_kzalloc(core->dev, sizeof(*pbclk), GFP_KERNEL);
+	if (!pbclk)
+		return ERR_PTR(-ENOMEM);
+
+	pbclk->hw.init = &desc->init_data;
+	pbclk->core = core;
+	pbclk->ctrl_reg = desc->ctrl_reg + core->iobase;
+
+	clk = devm_clk_register(core->dev, &pbclk->hw);
+	if (IS_ERR(clk)) {
+		dev_err(core->dev, "%s: clk_register() failed\n", __func__);
+		devm_kfree(core->dev, pbclk);
+	}
+
+	return clk;
+}
+
+/* Reference oscillator operations */
+struct pic32_ref_osc {
+	struct clk_hw hw;
+	void __iomem *ctrl_reg;
+	const u32 *parent_map;
+	struct pic32_clk_common *core;
+};
+
+#define clkhw_to_refosc(_hw)	container_of(_hw, struct pic32_ref_osc, hw)
+
+static int roclk_is_enabled(struct clk_hw *hw)
+{
+	struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
+
+	return readl(refo->ctrl_reg) & REFO_ON;
+}
+
+static int roclk_enable(struct clk_hw *hw)
+{
+	struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
+
+	writel(REFO_ON | REFO_OE, PIC32_SET(refo->ctrl_reg));
+	return 0;
+}
+
+static void roclk_disable(struct clk_hw *hw)
+{
+	struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
+
+	writel(REFO_ON | REFO_OE, PIC32_CLR(refo->ctrl_reg));
+}
+
+static void roclk_init(struct clk_hw *hw)
+{
+	/* initialize clock in disabled state */
+	roclk_disable(hw);
+}
+
+static u8 roclk_get_parent(struct clk_hw *hw)
+{
+	struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
+	u32 v, i;
+
+	v = (readl(refo->ctrl_reg) >> REFO_SEL_SHIFT) & REFO_SEL_MASK;
+
+	if (!refo->parent_map)
+		return v;
+
+	for (i = 0; i < clk_hw_get_num_parents(hw); i++)
+		if (refo->parent_map[i] == v)
+			return i;
+
+	return -EINVAL;
+}
+
+static unsigned long roclk_calc_rate(unsigned long parent_rate,
+				     u32 rodiv, u32 rotrim)
+{
+	u64 rate64;
+
+	/* fout = fin / [2 * {div + (trim / 512)}]
+	 *	= fin * 512 / [1024 * div + 2 * trim]
+	 *	= fin * 256 / (512 * div + trim)
+	 *	= (fin << 8) / ((div << 9) + trim)
+	 */
+	if (rotrim) {
+		rodiv = (rodiv << 9) + rotrim;
+		rate64 = parent_rate;
+		rate64 <<= 8;
+		do_div(rate64, rodiv);
+	} else if (rodiv) {
+		rate64 = parent_rate / (rodiv << 1);
+	} else {
+		rate64 = parent_rate;
+	}
+	return rate64;
+}
+
+static void roclk_calc_div_trim(unsigned long rate,
+				unsigned long parent_rate,
+				u32 *rodiv_p, u32 *rotrim_p)
+{
+	u32 div, rotrim, rodiv;
+	u64 frac;
+
+	/* Find integer approximation of floating-point arithmetic.
+	 *      fout = fin / [2 * {rodiv + (rotrim / 512)}] ... (1)
+	 * i.e. fout = fin / 2 * DIV
+	 *      whereas DIV = rodiv + (rotrim / 512)
+	 *
+	 * Since kernel does not perform floating-point arithmatic so
+	 * (rotrim/512) will be zero. And DIV & rodiv will result same.
+	 *
+	 * ie. fout = (fin * 256) / [(512 * rodiv) + rotrim]  ... from (1)
+	 * ie. rotrim = ((fin * 256) / fout) - (512 * DIV)
+	 */
+	if (parent_rate <= rate) {
+		div = 0;
+		frac = 0;
+		rodiv = 0;
+		rotrim = 0;
+	} else {
+		div = parent_rate / (rate << 1);
+		frac = parent_rate;
+		frac <<= 8;
+		do_div(frac, rate);
+		frac -= (u64)(div << 9);
+
+		rodiv = (div > REFO_DIV_MASK) ? REFO_DIV_MASK : div;
+		rotrim = (frac >= REFO_TRIM_MAX) ? REFO_TRIM_MAX : frac;
+	}
+
+	if (rodiv_p)
+		*rodiv_p = rodiv;
+
+	if (rotrim_p)
+		*rotrim_p = rotrim;
+}
+
+static unsigned long roclk_recalc_rate(struct clk_hw *hw,
+				       unsigned long parent_rate)
+{
+	struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
+	u32 v, rodiv, rotrim;
+
+	/* get rodiv */
+	v = readl(refo->ctrl_reg);
+	rodiv = (v >> REFO_DIV_SHIFT) & REFO_DIV_MASK;
+
+	/* get trim */
+	v = readl(refo->ctrl_reg + REFO_TRIM_REG);
+	rotrim = (v >> REFO_TRIM_SHIFT) & REFO_TRIM_MASK;
+
+	return roclk_calc_rate(parent_rate, rodiv, rotrim);
+}
+
+static long roclk_round_rate(struct clk_hw *hw, unsigned long rate,
+			     unsigned long *parent_rate)
+{
+	u32 rotrim, rodiv;
+
+	/* calculate dividers for new rate */
+	roclk_calc_div_trim(rate, *parent_rate, &rodiv, &rotrim);
+
+	/* caclulate new rate (rounding) based on new rodiv & rotrim */
+	return roclk_calc_rate(*parent_rate, rodiv, rotrim);
+}
+
+static int roclk_determine_rate(struct clk_hw *hw,
+				struct clk_rate_request *req)
+{
+	struct clk_hw *parent_clk, *best_parent_clk = NULL;
+	unsigned int i, delta, best_delta = -1;
+	unsigned long parent_rate, best_parent_rate = 0;
+	unsigned long best = 0, nearest_rate;
+
+	/* find a parent which can generate nearest clkrate >= rate */
+	for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+		/* get parent */
+		parent_clk = clk_hw_get_parent_by_index(hw, i);
+		if (!parent_clk)
+			continue;
+
+		/* skip if parent runs slower than target rate */
+		parent_rate = clk_hw_get_rate(parent_clk);
+		if (req->rate > parent_rate)
+			continue;
+
+		nearest_rate = roclk_round_rate(hw, req->rate, &parent_rate);
+		delta = abs(nearest_rate - req->rate);
+		if ((nearest_rate >= req->rate) && (delta < best_delta)) {
+			best_parent_clk = parent_clk;
+			best_parent_rate = parent_rate;
+			best = nearest_rate;
+			best_delta = delta;
+
+			if (delta == 0)
+				break;
+		}
+	}
+
+	/* if no match found, retain old rate */
+	if (!best_parent_clk) {
+		pr_err("%s:%s, no parent found for rate %lu.\n",
+		       __func__, clk_hw_get_name(hw), req->rate);
+		return clk_hw_get_rate(hw);
+	}
+
+	pr_debug("%s,rate %lu, best_parent(%s, %lu), best %lu, delta %d\n",
+		 clk_hw_get_name(hw), req->rate,
+		 clk_hw_get_name(best_parent_clk), best_parent_rate,
+		 best, best_delta);
+
+	if (req->best_parent_rate)
+		req->best_parent_rate = best_parent_rate;
+
+	if (req->best_parent_hw)
+		req->best_parent_hw = best_parent_clk;
+
+	return best;
+}
+
+static int roclk_set_parent(struct clk_hw *hw, u8 index)
+{
+	struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
+	unsigned long flags;
+	u32 v;
+	int err;
+
+	if (refo->parent_map)
+		index = refo->parent_map[index];
+
+	/* wait until ACTIVE bit is zero or timeout */
+	err = readl_poll_timeout(refo->ctrl_reg, v, !(v & REFO_ACTIVE),
+				 1, LOCK_TIMEOUT_US);
+	if (err) {
+		pr_err("%s: poll failed, clk active\n", clk_hw_get_name(hw));
+		return err;
+	}
+
+	spin_lock_irqsave(&refo->core->reg_lock, flags);
+
+	pic32_syskey_unlock();
+
+	/* calculate & apply new */
+	v = readl(refo->ctrl_reg);
+	v &= ~(REFO_SEL_MASK << REFO_SEL_SHIFT);
+	v |= index << REFO_SEL_SHIFT;
+
+	writel(v, refo->ctrl_reg);
+
+	spin_unlock_irqrestore(&refo->core->reg_lock, flags);
+
+	return 0;
+}
+
+static int roclk_set_rate_and_parent(struct clk_hw *hw,
+				     unsigned long rate,
+				     unsigned long parent_rate,
+				     u8 index)
+{
+	struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
+	unsigned long flags;
+	u32 trim, rodiv, v;
+	int err;
+
+	/* calculate new rodiv & rotrim for new rate */
+	roclk_calc_div_trim(rate, parent_rate, &rodiv, &trim);
+
+	pr_debug("parent_rate = %lu, rate = %lu, div = %d, trim = %d\n",
+		 parent_rate, rate, rodiv, trim);
+
+	/* wait till source change is active */
+	err = readl_poll_timeout(refo->ctrl_reg, v,
+				 !(v & (REFO_ACTIVE | REFO_DIVSW_EN)),
+				 1, LOCK_TIMEOUT_US);
+	if (err) {
+		pr_err("%s: poll timedout, clock is still active\n", __func__);
+		return err;
+	}
+
+	spin_lock_irqsave(&refo->core->reg_lock, flags);
+	v = readl(refo->ctrl_reg);
+
+	pic32_syskey_unlock();
+
+	/* apply parent, if required */
+	if (refo->parent_map)
+		index = refo->parent_map[index];
+
+	v &= ~(REFO_SEL_MASK << REFO_SEL_SHIFT);
+	v |= index << REFO_SEL_SHIFT;
+
+	/* apply RODIV */
+	v &= ~(REFO_DIV_MASK << REFO_DIV_SHIFT);
+	v |= rodiv << REFO_DIV_SHIFT;
+	writel(v, refo->ctrl_reg);
+
+	/* apply ROTRIM */
+	v = readl(refo->ctrl_reg + REFO_TRIM_REG);
+	v &= ~(REFO_TRIM_MASK << REFO_TRIM_SHIFT);
+	v |= trim << REFO_TRIM_SHIFT;
+	writel(v, refo->ctrl_reg + REFO_TRIM_REG);
+
+	/* enable & activate divider switching */
+	writel(REFO_ON | REFO_DIVSW_EN, PIC32_SET(refo->ctrl_reg));
+
+	/* wait till divswen is in-progress */
+	err = readl_poll_timeout_atomic(refo->ctrl_reg, v, !(v & REFO_DIVSW_EN),
+					1, LOCK_TIMEOUT_US);
+	/* leave the clk gated as it was */
+	writel(REFO_ON, PIC32_CLR(refo->ctrl_reg));
+
+	spin_unlock_irqrestore(&refo->core->reg_lock, flags);
+
+	return err;
+}
+
+static int roclk_set_rate(struct clk_hw *hw, unsigned long rate,
+			  unsigned long parent_rate)
+{
+	u8 index = roclk_get_parent(hw);
+
+	return roclk_set_rate_and_parent(hw, rate, parent_rate, index);
+}
+
+const struct clk_ops pic32_roclk_ops = {
+	.enable			= roclk_enable,
+	.disable		= roclk_disable,
+	.is_enabled		= roclk_is_enabled,
+	.get_parent		= roclk_get_parent,
+	.set_parent		= roclk_set_parent,
+	.determine_rate		= roclk_determine_rate,
+	.recalc_rate		= roclk_recalc_rate,
+	.set_rate_and_parent	= roclk_set_rate_and_parent,
+	.set_rate		= roclk_set_rate,
+	.init			= roclk_init,
+};
+
+struct clk *pic32_refo_clk_register(const struct pic32_ref_osc_data *data,
+				    struct pic32_clk_common *core)
+{
+	struct pic32_ref_osc *refo;
+	struct clk *clk;
+
+	refo = devm_kzalloc(core->dev, sizeof(*refo), GFP_KERNEL);
+	if (!refo)
+		return ERR_PTR(-ENOMEM);
+
+	refo->core = core;
+	refo->hw.init = &data->init_data;
+	refo->ctrl_reg = data->ctrl_reg + core->iobase;
+	refo->parent_map = data->parent_map;
+
+	clk = devm_clk_register(core->dev, &refo->hw);
+	if (IS_ERR(clk))
+		dev_err(core->dev, "%s: clk_register() failed\n", __func__);
+
+	return clk;
+}
+
+struct pic32_sys_pll {
+	struct clk_hw hw;
+	void __iomem *ctrl_reg;
+	void __iomem *status_reg;
+	u32 lock_mask;
+	u32 idiv; /* PLL iclk divider, treated fixed */
+	struct pic32_clk_common *core;
+};
+
+#define clkhw_to_spll(_hw)	container_of(_hw, struct pic32_sys_pll, hw)
+
+static inline u32 spll_odiv_to_divider(u32 odiv)
+{
+	odiv = clamp_val(odiv, PLL_ODIV_MIN, PLL_ODIV_MAX);
+
+	return 1 << odiv;
+}
+
+static unsigned long spll_calc_mult_div(struct pic32_sys_pll *pll,
+					unsigned long rate,
+					unsigned long parent_rate,
+					u32 *mult_p, u32 *odiv_p)
+{
+	u32 mul, div, best_mul = 1, best_div = 1;
+	unsigned long new_rate, best_rate = rate;
+	unsigned int best_delta = -1, delta, match_found = 0;
+	u64 rate64;
+
+	parent_rate /= pll->idiv;
+
+	for (mul = 1; mul <= PLL_MULT_MAX; mul++) {
+		for (div = PLL_ODIV_MIN; div <= PLL_ODIV_MAX; div++) {
+			rate64 = parent_rate;
+			rate64 *= mul;
+			do_div(rate64, 1 << div);
+			new_rate = rate64;
+			delta = abs(rate - new_rate);
+			if ((new_rate >= rate) && (delta < best_delta)) {
+				best_delta = delta;
+				best_rate = new_rate;
+				best_mul = mul;
+				best_div = div;
+				match_found = 1;
+			}
+		}
+	}
+
+	if (!match_found) {
+		pr_warn("spll: no match found\n");
+		return 0;
+	}
+
+	pr_debug("rate %lu, par_rate %lu/mult %u, div %u, best_rate %lu\n",
+		 rate, parent_rate, best_mul, best_div, best_rate);
+
+	if (mult_p)
+		*mult_p = best_mul - 1;
+
+	if (odiv_p)
+		*odiv_p = best_div;
+
+	return best_rate;
+}
+
+static unsigned long spll_clk_recalc_rate(struct clk_hw *hw,
+					  unsigned long parent_rate)
+{
+	struct pic32_sys_pll *pll = clkhw_to_spll(hw);
+	unsigned long pll_in_rate;
+	u32 mult, odiv, div, v;
+	u64 rate64;
+
+	v = readl(pll->ctrl_reg);
+	odiv = ((v >> PLL_ODIV_SHIFT) & PLL_ODIV_MASK);
+	mult = ((v >> PLL_MULT_SHIFT) & PLL_MULT_MASK) + 1;
+	div = spll_odiv_to_divider(odiv);
+
+	/* pll_in_rate = parent_rate / idiv
+	 * pll_out_rate = pll_in_rate * mult / div;
+	 */
+	pll_in_rate = parent_rate / pll->idiv;
+	rate64 = pll_in_rate;
+	rate64 *= mult;
+	do_div(rate64, div);
+
+	return rate64;
+}
+
+static long spll_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long *parent_rate)
+{
+	struct pic32_sys_pll *pll = clkhw_to_spll(hw);
+
+	return spll_calc_mult_div(pll, rate, *parent_rate, NULL, NULL);
+}
+
+static int spll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+			     unsigned long parent_rate)
+{
+	struct pic32_sys_pll *pll = clkhw_to_spll(hw);
+	unsigned long ret, flags;
+	u32 mult, odiv, v;
+	int err;
+
+	ret = spll_calc_mult_div(pll, rate, parent_rate, &mult, &odiv);
+	if (!ret)
+		return -EINVAL;
+
+	/*
+	 * We can't change SPLL counters when it is in-active use
+	 * by SYSCLK. So check before applying new counters/rate.
+	 */
+
+	/* Is spll_clk active parent of sys_clk ? */
+	if (unlikely(clk_hw_get_parent(pic32_sclk_hw) == hw)) {
+		pr_err("%s: failed, clk in-use\n", __func__);
+		return -EBUSY;
+	}
+
+	spin_lock_irqsave(&pll->core->reg_lock, flags);
+
+	/* apply new multiplier & divisor */
+	v = readl(pll->ctrl_reg);
+	v &= ~(PLL_MULT_MASK << PLL_MULT_SHIFT);
+	v &= ~(PLL_ODIV_MASK << PLL_ODIV_SHIFT);
+	v |= (mult << PLL_MULT_SHIFT) | (odiv << PLL_ODIV_SHIFT);
+
+	/* sys unlock before write */
+	pic32_syskey_unlock();
+
+	writel(v, pll->ctrl_reg);
+	cpu_relax();
+
+	/* insert few nops (5-stage) to ensure CPU does not hang */
+	cpu_nop5();
+	cpu_nop5();
+
+	/* Wait until PLL is locked (maximum 100 usecs). */
+	err = readl_poll_timeout_atomic(pll->status_reg, v,
+					v & pll->lock_mask, 1, 100);
+	spin_unlock_irqrestore(&pll->core->reg_lock, flags);
+
+	return err;
+}
+
+/* SPLL clock operation */
+const struct clk_ops pic32_spll_ops = {
+	.recalc_rate	= spll_clk_recalc_rate,
+	.round_rate	= spll_clk_round_rate,
+	.set_rate	= spll_clk_set_rate,
+};
+
+struct clk *pic32_spll_clk_register(const struct pic32_sys_pll_data *data,
+				    struct pic32_clk_common *core)
+{
+	struct pic32_sys_pll *spll;
+	struct clk *clk;
+
+	spll = devm_kzalloc(core->dev, sizeof(*spll), GFP_KERNEL);
+	if (!spll)
+		return ERR_PTR(-ENOMEM);
+
+	spll->core = core;
+	spll->hw.init = &data->init_data;
+	spll->ctrl_reg = data->ctrl_reg + core->iobase;
+	spll->status_reg = data->status_reg + core->iobase;
+	spll->lock_mask = data->lock_mask;
+
+	/* cache PLL idiv; PLL driver uses it as constant.*/
+	spll->idiv = (readl(spll->ctrl_reg) >> PLL_IDIV_SHIFT) & PLL_IDIV_MASK;
+	spll->idiv += 1;
+
+	clk = devm_clk_register(core->dev, &spll->hw);
+	if (IS_ERR(clk))
+		dev_err(core->dev, "sys_pll: clk_register() failed\n");
+
+	return clk;
+}
+
+/* System mux clock(aka SCLK) */
+
+struct pic32_sys_clk {
+	struct clk_hw hw;
+	void __iomem *mux_reg;
+	void __iomem *slew_reg;
+	u32 slew_div;
+	const u32 *parent_map;
+	struct pic32_clk_common *core;
+};
+
+#define clkhw_to_sys_clk(_hw)	container_of(_hw, struct pic32_sys_clk, hw)
+
+static unsigned long sclk_get_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+	struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
+	u32 div;
+
+	div = (readl(sclk->slew_reg) >> SLEW_SYSDIV_SHIFT) & SLEW_SYSDIV;
+	div += 1; /* sys-div to divider */
+
+	return parent_rate / div;
+}
+
+static long sclk_round_rate(struct clk_hw *hw, unsigned long rate,
+			    unsigned long *parent_rate)
+{
+	return calc_best_divided_rate(rate, *parent_rate, SLEW_SYSDIV, 1);
+}
+
+static int sclk_set_rate(struct clk_hw *hw,
+			 unsigned long rate, unsigned long parent_rate)
+{
+	struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
+	unsigned long flags;
+	u32 v, div;
+	int err;
+
+	div = parent_rate / rate;
+
+	spin_lock_irqsave(&sclk->core->reg_lock, flags);
+
+	/* apply new div */
+	v = readl(sclk->slew_reg);
+	v &= ~(SLEW_SYSDIV << SLEW_SYSDIV_SHIFT);
+	v |= (div - 1) << SLEW_SYSDIV_SHIFT;
+
+	pic32_syskey_unlock();
+
+	writel(v, sclk->slew_reg);
+
+	/* wait until BUSY is cleared */
+	err = readl_poll_timeout_atomic(sclk->slew_reg, v,
+					!(v & SLEW_BUSY), 1, LOCK_TIMEOUT_US);
+
+	spin_unlock_irqrestore(&sclk->core->reg_lock, flags);
+
+	return err;
+}
+
+static u8 sclk_get_parent(struct clk_hw *hw)
+{
+	struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
+	u32 i, v;
+
+	v = (readl(sclk->mux_reg) >> OSC_CUR_SHIFT) & OSC_CUR_MASK;
+
+	if (!sclk->parent_map)
+		return v;
+
+	for (i = 0; i < clk_hw_get_num_parents(hw); i++)
+		if (sclk->parent_map[i] == v)
+			return i;
+	return -EINVAL;
+}
+
+static int sclk_set_parent(struct clk_hw *hw, u8 index)
+{
+	struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
+	unsigned long flags;
+	u32 nosc, cosc, v;
+	int err;
+
+	spin_lock_irqsave(&sclk->core->reg_lock, flags);
+
+	/* find new_osc */
+	nosc = sclk->parent_map ? sclk->parent_map[index] : index;
+
+	/* set new parent */
+	v = readl(sclk->mux_reg);
+	v &= ~(OSC_NEW_MASK << OSC_NEW_SHIFT);
+	v |= nosc << OSC_NEW_SHIFT;
+
+	pic32_syskey_unlock();
+
+	writel(v, sclk->mux_reg);
+
+	/* initate switch */
+	writel(OSC_SWEN, PIC32_SET(sclk->mux_reg));
+	cpu_relax();
+
+	/* add nop to flush pipeline (as cpu_clk is in-flux) */
+	cpu_nop5();
+
+	/* wait for SWEN bit to clear */
+	err = readl_poll_timeout_atomic(sclk->slew_reg, v,
+					!(v & OSC_SWEN), 1, LOCK_TIMEOUT_US);
+
+	spin_unlock_irqrestore(&sclk->core->reg_lock, flags);
+
+	/*
+	 * SCLK clock-switching logic might reject a clock switching request
+	 * if pre-requisites (like new clk_src not present or unstable) are
+	 * not met.
+	 * So confirm before claiming success.
+	 */
+	cosc = (readl(sclk->mux_reg) >> OSC_CUR_SHIFT) & OSC_CUR_MASK;
+	if (cosc != nosc) {
+		pr_err("%s: err, failed to set_parent() to %d, current %d\n",
+		       clk_hw_get_name(hw), nosc, cosc);
+		err = -EBUSY;
+	}
+
+	return err;
+}
+
+static void sclk_init(struct clk_hw *hw)
+{
+	struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
+	unsigned long flags;
+	u32 v;
+
+	/* Maintain reference to this clk, required in spll_clk_set_rate() */
+	pic32_sclk_hw = hw;
+
+	/* apply slew divider on both up and down scaling */
+	if (sclk->slew_div) {
+		spin_lock_irqsave(&sclk->core->reg_lock, flags);
+		v = readl(sclk->slew_reg);
+		v &= ~(SLEW_DIV << SLEW_DIV_SHIFT);
+		v |= sclk->slew_div << SLEW_DIV_SHIFT;
+		v |= SLEW_DOWNEN | SLEW_UPEN;
+		writel(v, sclk->slew_reg);
+		spin_unlock_irqrestore(&sclk->core->reg_lock, flags);
+	}
+}
+
+/* sclk with post-divider */
+const struct clk_ops pic32_sclk_ops = {
+	.get_parent	= sclk_get_parent,
+	.set_parent	= sclk_set_parent,
+	.round_rate	= sclk_round_rate,
+	.set_rate	= sclk_set_rate,
+	.recalc_rate	= sclk_get_rate,
+	.init		= sclk_init,
+	.determine_rate = __clk_mux_determine_rate,
+};
+
+/* sclk with no slew and no post-divider */
+const struct clk_ops pic32_sclk_no_div_ops = {
+	.get_parent	= sclk_get_parent,
+	.set_parent	= sclk_set_parent,
+	.init		= sclk_init,
+	.determine_rate = __clk_mux_determine_rate,
+};
+
+struct clk *pic32_sys_clk_register(const struct pic32_sys_clk_data *data,
+				   struct pic32_clk_common *core)
+{
+	struct pic32_sys_clk *sclk;
+	struct clk *clk;
+
+	sclk = devm_kzalloc(core->dev, sizeof(*sclk), GFP_KERNEL);
+	if (!sclk)
+		return ERR_PTR(-ENOMEM);
+
+	sclk->core = core;
+	sclk->hw.init = &data->init_data;
+	sclk->mux_reg = data->mux_reg + core->iobase;
+	sclk->slew_reg = data->slew_reg + core->iobase;
+	sclk->slew_div = data->slew_div;
+	sclk->parent_map = data->parent_map;
+
+	clk = devm_clk_register(core->dev, &sclk->hw);
+	if (IS_ERR(clk))
+		dev_err(core->dev, "%s: clk register failed\n", __func__);
+
+	return clk;
+}
+
+/* secondary oscillator */
+struct pic32_sec_osc {
+	struct clk_hw hw;
+	void __iomem *enable_reg;
+	void __iomem *status_reg;
+	u32 enable_mask;
+	u32 status_mask;
+	unsigned long fixed_rate;
+	struct pic32_clk_common *core;
+};
+
+#define clkhw_to_sosc(_hw)	container_of(_hw, struct pic32_sec_osc, hw)
+static int sosc_clk_enable(struct clk_hw *hw)
+{
+	struct pic32_sec_osc *sosc = clkhw_to_sosc(hw);
+	u32 v;
+
+	/* enable SOSC */
+	pic32_syskey_unlock();
+	writel(sosc->enable_mask, PIC32_SET(sosc->enable_reg));
+
+	/* wait till warm-up period expires or ready-status is updated */
+	return readl_poll_timeout_atomic(sosc->status_reg, v,
+					 v & sosc->status_mask, 1, 100);
+}
+
+static void sosc_clk_disable(struct clk_hw *hw)
+{
+	struct pic32_sec_osc *sosc = clkhw_to_sosc(hw);
+
+	pic32_syskey_unlock();
+	writel(sosc->enable_mask, PIC32_CLR(sosc->enable_reg));
+}
+
+static int sosc_clk_is_enabled(struct clk_hw *hw)
+{
+	struct pic32_sec_osc *sosc = clkhw_to_sosc(hw);
+	u32 enabled, ready;
+
+	/* check enabled and ready status */
+	enabled = readl(sosc->enable_reg) & sosc->enable_mask;
+	ready = readl(sosc->status_reg) & sosc->status_mask;
+
+	return enabled && ready;
+}
+
+static unsigned long sosc_clk_calc_rate(struct clk_hw *hw,
+					unsigned long parent_rate)
+{
+	return clkhw_to_sosc(hw)->fixed_rate;
+}
+
+const struct clk_ops pic32_sosc_ops = {
+	.enable = sosc_clk_enable,
+	.disable = sosc_clk_disable,
+	.is_enabled = sosc_clk_is_enabled,
+	.recalc_rate = sosc_clk_calc_rate,
+};
+
+struct clk *pic32_sosc_clk_register(const struct pic32_sec_osc_data *data,
+				    struct pic32_clk_common *core)
+{
+	struct pic32_sec_osc *sosc;
+
+	sosc = devm_kzalloc(core->dev, sizeof(*sosc), GFP_KERNEL);
+	if (!sosc)
+		return ERR_PTR(-ENOMEM);
+
+	sosc->core = core;
+	sosc->hw.init = &data->init_data;
+	sosc->fixed_rate = data->fixed_rate;
+	sosc->enable_mask = data->enable_mask;
+	sosc->status_mask = data->status_mask;
+	sosc->enable_reg = data->enable_reg + core->iobase;
+	sosc->status_reg = data->status_reg + core->iobase;
+
+	return devm_clk_register(core->dev, &sosc->hw);
+}
diff --git a/drivers/clk/microchip/clk-core.h b/drivers/clk/microchip/clk-core.h
new file mode 100644
index 0000000..8566642
--- /dev/null
+++ b/drivers/clk/microchip/clk-core.h
@@ -0,0 +1,84 @@
+/*
+ * Purna Chandra Mandal,<purna.mandal@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc.  All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+#ifndef __MICROCHIP_CLK_PIC32_H_
+#define __MICROCHIP_CLK_PIC32_H_
+
+#include <linux/clk-provider.h>
+
+/* PIC32 clock data */
+struct pic32_clk_common {
+	struct device *dev;
+	void __iomem *iobase;
+	spinlock_t reg_lock; /* clock lock */
+};
+
+/* System PLL clock */
+struct pic32_sys_pll_data {
+	struct clk_init_data init_data;
+	const u32 ctrl_reg;
+	const u32 status_reg;
+	const u32 lock_mask;
+};
+
+/* System clock */
+struct pic32_sys_clk_data {
+	struct clk_init_data init_data;
+	const u32 mux_reg;
+	const u32 slew_reg;
+	const u32 *parent_map;
+	const u32 slew_div;
+};
+
+/* Reference Oscillator clock */
+struct pic32_ref_osc_data {
+	struct clk_init_data init_data;
+	const u32 ctrl_reg;
+	const u32 *parent_map;
+};
+
+/* Peripheral Bus clock */
+struct pic32_periph_clk_data {
+	struct clk_init_data init_data;
+	const u32 ctrl_reg;
+};
+
+/* External Secondary Oscillator clock  */
+struct pic32_sec_osc_data {
+	struct clk_init_data init_data;
+	const u32 enable_reg;
+	const u32 status_reg;
+	const u32 enable_mask;
+	const u32 status_mask;
+	const unsigned long fixed_rate;
+};
+
+extern const struct clk_ops pic32_pbclk_ops;
+extern const struct clk_ops pic32_sclk_ops;
+extern const struct clk_ops pic32_sclk_no_div_ops;
+extern const struct clk_ops pic32_spll_ops;
+extern const struct clk_ops pic32_roclk_ops;
+extern const struct clk_ops pic32_sosc_ops;
+
+struct clk *pic32_periph_clk_register(const struct pic32_periph_clk_data *data,
+				      struct pic32_clk_common *core);
+struct clk *pic32_refo_clk_register(const struct pic32_ref_osc_data *data,
+				    struct pic32_clk_common *core);
+struct clk *pic32_sys_clk_register(const struct pic32_sys_clk_data *data,
+				   struct pic32_clk_common *core);
+struct clk *pic32_spll_clk_register(const struct pic32_sys_pll_data *data,
+				    struct pic32_clk_common *core);
+struct clk *pic32_sosc_clk_register(const struct pic32_sec_osc_data *data,
+				    struct pic32_clk_common *core);
+
+#endif /* __MICROCHIP_CLK_PIC32_H_*/
diff --git a/drivers/clk/microchip/clk-pic32mzda.c b/drivers/clk/microchip/clk-pic32mzda.c
new file mode 100644
index 0000000..020a29a
--- /dev/null
+++ b/drivers/clk/microchip/clk-pic32mzda.c
@@ -0,0 +1,275 @@
+/*
+ * Purna Chandra Mandal,<purna.mandal@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc.  All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+#include <dt-bindings/clock/microchip,pic32-clock.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <asm/traps.h>
+
+#include "clk-core.h"
+
+/* FRC Postscaler */
+#define OSC_FRCDIV_MASK		0x07
+#define OSC_FRCDIV_SHIFT	24
+
+/* SPLL fields */
+#define PLL_ICLK_MASK		0x01
+#define PLL_ICLK_SHIFT		7
+
+#define DECLARE_PERIPHERAL_CLOCK(__clk_name, __reg, __flags)	\
+	{							\
+		.ctrl_reg = (__reg),				\
+		.init_data = {					\
+			.name = (__clk_name),			\
+			.parent_names = (const char *[]) {	\
+				"sys_clk"			\
+			},					\
+			.num_parents = 1,			\
+			.ops = &pic32_pbclk_ops,		\
+			.flags = (__flags),			\
+		},						\
+	}
+
+#define DECLARE_REFO_CLOCK(__clkid, __reg)				\
+	{								\
+		.ctrl_reg = (__reg),					\
+		.init_data = {						\
+			.name = "refo" #__clkid "_clk",			\
+			.parent_names = (const char *[]) {		\
+				"sys_clk", "pb1_clk", "posc_clk",	\
+				"frc_clk", "lprc_clk", "sosc_clk",	\
+				"sys_pll", "refi" #__clkid "_clk",	\
+				"bfrc_clk",				\
+			},						\
+			.num_parents = 9,				\
+			.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE,\
+			.ops = &pic32_roclk_ops,			\
+		},							\
+		.parent_map = (const u32[]) {				\
+			0, 1, 2, 3, 4, 5, 7, 8, 9			\
+		},							\
+	}
+
+static const struct pic32_ref_osc_data ref_clks[] = {
+	DECLARE_REFO_CLOCK(1, 0x80),
+	DECLARE_REFO_CLOCK(2, 0xa0),
+	DECLARE_REFO_CLOCK(3, 0xc0),
+	DECLARE_REFO_CLOCK(4, 0xe0),
+	DECLARE_REFO_CLOCK(5, 0x100),
+};
+
+static const struct pic32_periph_clk_data periph_clocks[] = {
+	DECLARE_PERIPHERAL_CLOCK("pb1_clk", 0x140, 0),
+	DECLARE_PERIPHERAL_CLOCK("pb2_clk", 0x150, CLK_IGNORE_UNUSED),
+	DECLARE_PERIPHERAL_CLOCK("pb3_clk", 0x160, 0),
+	DECLARE_PERIPHERAL_CLOCK("pb4_clk", 0x170, 0),
+	DECLARE_PERIPHERAL_CLOCK("pb5_clk", 0x180, 0),
+	DECLARE_PERIPHERAL_CLOCK("pb6_clk", 0x190, 0),
+	DECLARE_PERIPHERAL_CLOCK("cpu_clk", 0x1a0, CLK_IGNORE_UNUSED),
+};
+
+static const struct pic32_sys_clk_data sys_mux_clk = {
+	.slew_reg = 0x1c0,
+	.slew_div = 2, /* step of div_4 -> div_2 -> no_div */
+	.init_data = {
+		.name = "sys_clk",
+		.parent_names = (const char *[]) {
+			"frcdiv_clk", "sys_pll", "posc_clk",
+			"sosc_clk", "lprc_clk", "frcdiv_clk",
+		},
+		.num_parents = 6,
+		.ops = &pic32_sclk_ops,
+	},
+	.parent_map = (const u32[]) {
+		0, 1, 2, 4, 5, 7,
+	},
+};
+
+static const struct pic32_sys_pll_data sys_pll = {
+	.ctrl_reg = 0x020,
+	.status_reg = 0x1d0,
+	.lock_mask = BIT(7),
+	.init_data = {
+		.name = "sys_pll",
+		.parent_names = (const char *[]) {
+			"spll_mux_clk"
+		},
+		.num_parents = 1,
+		.ops = &pic32_spll_ops,
+	},
+};
+
+static const struct pic32_sec_osc_data sosc_clk = {
+	.status_reg = 0x1d0,
+	.enable_mask = BIT(1),
+	.status_mask = BIT(4),
+	.init_data = {
+		.name = "sosc_clk",
+		.parent_names = NULL,
+		.ops = &pic32_sosc_ops,
+	},
+};
+
+static int pic32mzda_critical_clks[] = {
+	PB2CLK, PB7CLK
+};
+
+/* PIC32MZDA clock data */
+struct pic32mzda_clk_data {
+	struct clk *clks[MAXCLKS];
+	struct pic32_clk_common core;
+	struct clk_onecell_data onecell_data;
+	struct notifier_block failsafe_notifier;
+};
+
+static int pic32_fscm_nmi(struct notifier_block *nb,
+			  unsigned long action, void *data)
+{
+	struct pic32mzda_clk_data *cd;
+
+	cd  = container_of(nb, struct pic32mzda_clk_data, failsafe_notifier);
+
+	/* SYSCLK is now running from BFRCCLK. Report clock failure. */
+	if (readl(cd->core.iobase) & BIT(2))
+		pr_alert("pic32-clk: FSCM detected clk failure.\n");
+
+	/* TODO: detect reason of failure and recover accordingly */
+
+	return NOTIFY_OK;
+}
+
+static int pic32mzda_clk_probe(struct platform_device *pdev)
+{
+	const char *const pll_mux_parents[] = {"posc_clk", "frc_clk"};
+	struct device_node *np = pdev->dev.of_node;
+	struct pic32mzda_clk_data *cd;
+	struct pic32_clk_common *core;
+	struct clk *pll_mux_clk, *clk;
+	struct clk **clks;
+	int nr_clks, i, ret;
+
+	cd = devm_kzalloc(&pdev->dev, sizeof(*cd), GFP_KERNEL);
+	if (!cd)
+		return -ENOMEM;
+
+	core = &cd->core;
+	core->iobase = of_io_request_and_map(np, 0, of_node_full_name(np));
+	if (IS_ERR(core->iobase)) {
+		dev_err(&pdev->dev, "pic32-clk: failed to map registers\n");
+		return PTR_ERR(core->iobase);
+	}
+
+	spin_lock_init(&core->reg_lock);
+	core->dev = &pdev->dev;
+	clks = &cd->clks[0];
+
+	/* register fixed rate clocks */
+	clks[POSCCLK] = clk_register_fixed_rate(&pdev->dev, "posc_clk", NULL,
+						CLK_IS_ROOT, 24000000);
+	clks[FRCCLK] =  clk_register_fixed_rate(&pdev->dev, "frc_clk", NULL,
+						CLK_IS_ROOT, 8000000);
+	clks[BFRCCLK] = clk_register_fixed_rate(&pdev->dev, "bfrc_clk", NULL,
+						CLK_IS_ROOT, 8000000);
+	clks[LPRCCLK] = clk_register_fixed_rate(&pdev->dev, "lprc_clk", NULL,
+						CLK_IS_ROOT, 32000);
+	clks[UPLLCLK] = clk_register_fixed_rate(&pdev->dev, "usbphy_clk", NULL,
+						CLK_IS_ROOT, 24000000);
+	/* fixed rate (optional) clock */
+	if (of_find_property(np, "microchip,pic32mzda-sosc", NULL)) {
+		pr_info("pic32-clk: dt requests SOSC.\n");
+		clks[SOSCCLK] = pic32_sosc_clk_register(&sosc_clk, core);
+	}
+	/* divider clock */
+	clks[FRCDIVCLK] = clk_register_divider(&pdev->dev, "frcdiv_clk",
+					       "frc_clk", 0,
+					       core->iobase,
+					       OSC_FRCDIV_SHIFT,
+					       OSC_FRCDIV_MASK,
+					       CLK_DIVIDER_POWER_OF_TWO,
+					       &core->reg_lock);
+	/* PLL ICLK mux */
+	pll_mux_clk = clk_register_mux(&pdev->dev, "spll_mux_clk",
+				       pll_mux_parents, 2, 0,
+				       core->iobase + 0x020,
+				       PLL_ICLK_SHIFT, 1, 0, &core->reg_lock);
+	if (IS_ERR(pll_mux_clk))
+		pr_err("spll_mux_clk: clk register failed\n");
+
+	/* PLL */
+	clks[PLLCLK] = pic32_spll_clk_register(&sys_pll, core);
+	/* SYSTEM clock */
+	clks[SCLK] = pic32_sys_clk_register(&sys_mux_clk, core);
+	/* Peripheral bus clocks */
+	for (nr_clks = PB1CLK, i = 0; nr_clks <= PB7CLK; i++, nr_clks++)
+		clks[nr_clks] = pic32_periph_clk_register(&periph_clocks[i],
+							  core);
+	/* Reference oscillator clock */
+	for (nr_clks = REF1CLK, i = 0; nr_clks <= REF5CLK; i++, nr_clks++)
+		clks[nr_clks] = pic32_refo_clk_register(&ref_clks[i], core);
+
+	/* register clkdev */
+	for (i = 0; i < MAXCLKS; i++) {
+		if (IS_ERR(clks[i]))
+			continue;
+		clk_register_clkdev(clks[i], NULL, __clk_get_name(clks[i]));
+	}
+
+	/* register clock provider */
+	cd->onecell_data.clks = clks;
+	cd->onecell_data.clk_num = MAXCLKS;
+	ret = of_clk_add_provider(np, of_clk_src_onecell_get,
+				  &cd->onecell_data);
+	if (ret)
+		return ret;
+
+	/* force enable critical clocks */
+	for (i = 0; i < ARRAY_SIZE(pic32mzda_critical_clks); i++) {
+		clk = clks[pic32mzda_critical_clks[i]];
+		if (clk_prepare_enable(clk))
+			dev_err(&pdev->dev, "clk_prepare_enable(%s) failed\n",
+				__clk_get_name(clk));
+	}
+
+	/* register NMI for failsafe clock monitor */
+	cd->failsafe_notifier.notifier_call = pic32_fscm_nmi;
+	return register_nmi_notifier(&cd->failsafe_notifier);
+}
+
+static const struct of_device_id pic32mzda_clk_match_table[] = {
+	{ .compatible = "microchip,pic32mzda-clk", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, pic32mzda_clk_match_table);
+
+static struct platform_driver pic32mzda_clk_driver = {
+	.probe		= pic32mzda_clk_probe,
+	.driver		= {
+		.name	= "clk-pic32mzda",
+		.of_match_table = pic32mzda_clk_match_table,
+	},
+};
+
+static int __init microchip_pic32mzda_clk_init(void)
+{
+	return platform_driver_register(&pic32mzda_clk_driver);
+}
+core_initcall(microchip_pic32mzda_clk_init);
+
+MODULE_DESCRIPTION("Microchip PIC32MZDA Clock Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:clk-pic32mzda");
diff --git a/drivers/clk/mmp/reset.c b/drivers/clk/mmp/reset.c
index b54da1f..b4e4d6a 100644
--- a/drivers/clk/mmp/reset.c
+++ b/drivers/clk/mmp/reset.c
@@ -74,7 +74,7 @@
 	return 0;
 }
 
-static struct reset_control_ops mmp_clk_reset_ops = {
+static const struct reset_control_ops mmp_clk_reset_ops = {
 	.assert		= mmp_clk_reset_assert,
 	.deassert	= mmp_clk_reset_deassert,
 };
diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c
index 5428efb..3cd1af0 100644
--- a/drivers/clk/qcom/gcc-ipq4019.c
+++ b/drivers/clk/qcom/gcc-ipq4019.c
@@ -129,20 +129,10 @@
 };
 
 #define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-#define P_XO 0
-#define FE_PLL_200 1
-#define FE_PLL_500 2
-#define DDRC_PLL_666  3
-
-#define DDRC_PLL_666_SDCC  1
-#define FE_PLL_125_DLY 1
-
-#define FE_PLL_WCSS2G 1
-#define FE_PLL_WCSS5G 1
 
 static const struct freq_tbl ftbl_gcc_audio_pwm_clk[] = {
 	F(48000000, P_XO, 1, 0, 0),
-	F(200000000, FE_PLL_200, 1, 0, 0),
+	F(200000000, P_FEPLL200, 1, 0, 0),
 	{ }
 };
 
@@ -334,15 +324,15 @@
 };
 
 static const struct freq_tbl ftbl_gcc_blsp1_uart1_2_apps_clk[] = {
-	F(1843200, FE_PLL_200, 1, 144, 15625),
-	F(3686400, FE_PLL_200, 1, 288, 15625),
-	F(7372800, FE_PLL_200, 1, 576, 15625),
-	F(14745600, FE_PLL_200, 1, 1152, 15625),
-	F(16000000, FE_PLL_200, 1, 2, 25),
+	F(1843200, P_FEPLL200, 1, 144, 15625),
+	F(3686400, P_FEPLL200, 1, 288, 15625),
+	F(7372800, P_FEPLL200, 1, 576, 15625),
+	F(14745600, P_FEPLL200, 1, 1152, 15625),
+	F(16000000, P_FEPLL200, 1, 2, 25),
 	F(24000000, P_XO, 1, 1, 2),
-	F(32000000, FE_PLL_200, 1, 4, 25),
-	F(40000000, FE_PLL_200, 1, 1, 5),
-	F(46400000, FE_PLL_200, 1, 29, 125),
+	F(32000000, P_FEPLL200, 1, 4, 25),
+	F(40000000, P_FEPLL200, 1, 1, 5),
+	F(46400000, P_FEPLL200, 1, 29, 125),
 	F(48000000, P_XO, 1, 0, 0),
 	{ }
 };
@@ -410,9 +400,9 @@
 };
 
 static const struct freq_tbl ftbl_gcc_gp_clk[] = {
-	F(1250000,  FE_PLL_200, 1, 16, 0),
-	F(2500000,  FE_PLL_200, 1,  8, 0),
-	F(5000000,  FE_PLL_200, 1,  4, 0),
+	F(1250000,  P_FEPLL200, 1, 16, 0),
+	F(2500000,  P_FEPLL200, 1,  8, 0),
+	F(5000000,  P_FEPLL200, 1,  4, 0),
 	{ }
 };
 
@@ -512,11 +502,11 @@
 static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk[] = {
 	F(144000,    P_XO,			1,  3, 240),
 	F(400000,    P_XO,			1,  1, 0),
-	F(20000000,  FE_PLL_500,		1,  1, 25),
-	F(25000000,  FE_PLL_500,		1,  1, 20),
-	F(50000000,  FE_PLL_500,		1,  1, 10),
-	F(100000000, FE_PLL_500,		1,  1, 5),
-	F(193000000, DDRC_PLL_666_SDCC,		1,  0, 0),
+	F(20000000,  P_FEPLL500,		1,  1, 25),
+	F(25000000,  P_FEPLL500,		1,  1, 20),
+	F(50000000,  P_FEPLL500,		1,  1, 10),
+	F(100000000, P_FEPLL500,		1,  1, 5),
+	F(193000000, P_DDRPLL,		1,  0, 0),
 	{ }
 };
 
@@ -536,9 +526,9 @@
 
 static const struct freq_tbl ftbl_gcc_apps_clk[] = {
 	F(48000000, P_XO,	   1, 0, 0),
-	F(200000000, FE_PLL_200,   1, 0, 0),
-	F(500000000, FE_PLL_500,   1, 0, 0),
-	F(626000000, DDRC_PLL_666, 1, 0, 0),
+	F(200000000, P_FEPLL200,   1, 0, 0),
+	F(500000000, P_FEPLL500,   1, 0, 0),
+	F(626000000, P_DDRPLLAPSS, 1, 0, 0),
 	{ }
 };
 
@@ -557,7 +547,7 @@
 
 static const struct freq_tbl ftbl_gcc_apps_ahb_clk[] = {
 	F(48000000, P_XO,	   1, 0, 0),
-	F(100000000, FE_PLL_200,   2, 0, 0),
+	F(100000000, P_FEPLL200,   2, 0, 0),
 	{ }
 };
 
@@ -940,7 +930,7 @@
 };
 
 static const struct freq_tbl ftbl_gcc_usb30_mock_utmi_clk[] = {
-	F(2000000, FE_PLL_200, 10, 0, 0),
+	F(2000000, P_FEPLL200, 10, 0, 0),
 	{ }
 };
 
@@ -1007,7 +997,7 @@
 };
 
 static const struct freq_tbl ftbl_gcc_fephy_dly_clk[] = {
-	F(125000000, FE_PLL_125_DLY, 1, 0, 0),
+	F(125000000, P_FEPLL125DLY, 1, 0, 0),
 	{ }
 };
 
@@ -1027,7 +1017,7 @@
 
 static const struct freq_tbl ftbl_gcc_wcss2g_clk[] = {
 	F(48000000, P_XO, 1, 0, 0),
-	F(250000000, FE_PLL_WCSS2G, 1, 0, 0),
+	F(250000000, P_FEPLLWCSS2G, 1, 0, 0),
 	{ }
 };
 
@@ -1097,7 +1087,7 @@
 
 static const struct freq_tbl ftbl_gcc_wcss5g_clk[] = {
 	F(48000000, P_XO, 1, 0, 0),
-	F(250000000, FE_PLL_WCSS5G, 1, 0, 0),
+	F(250000000, P_FEPLLWCSS5G, 1, 0, 0),
 	{ }
 };
 
@@ -1325,6 +1315,16 @@
 
 static int gcc_ipq4019_probe(struct platform_device *pdev)
 {
+	struct device *dev = &pdev->dev;
+
+	clk_register_fixed_rate(dev, "fepll125", "xo", 0, 200000000);
+	clk_register_fixed_rate(dev, "fepll125dly", "xo", 0, 200000000);
+	clk_register_fixed_rate(dev, "fepllwcss2g", "xo", 0, 200000000);
+	clk_register_fixed_rate(dev, "fepllwcss5g", "xo", 0, 200000000);
+	clk_register_fixed_rate(dev, "fepll200", "xo", 0, 200000000);
+	clk_register_fixed_rate(dev, "fepll500", "xo", 0, 200000000);
+	clk_register_fixed_rate(dev, "ddrpllapss", "xo", 0, 666000000);
+
 	return qcom_cc_probe(pdev, &gcc_ipq4019_desc);
 }
 
diff --git a/drivers/clk/qcom/reset.c b/drivers/clk/qcom/reset.c
index 6c977d3..0324d8d 100644
--- a/drivers/clk/qcom/reset.c
+++ b/drivers/clk/qcom/reset.c
@@ -55,7 +55,7 @@
 	return regmap_update_bits(rst->regmap, map->reg, mask, 0);
 }
 
-struct reset_control_ops qcom_reset_ops = {
+const struct reset_control_ops qcom_reset_ops = {
 	.reset = qcom_reset,
 	.assert = qcom_reset_assert,
 	.deassert = qcom_reset_deassert,
diff --git a/drivers/clk/qcom/reset.h b/drivers/clk/qcom/reset.h
index 0e11e21..cda8779 100644
--- a/drivers/clk/qcom/reset.h
+++ b/drivers/clk/qcom/reset.h
@@ -32,6 +32,6 @@
 #define to_qcom_reset_controller(r) \
 	container_of(r, struct qcom_reset_controller, rcdev);
 
-extern struct reset_control_ops qcom_reset_ops;
+extern const struct reset_control_ops qcom_reset_ops;
 
 #endif
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
new file mode 100644
index 0000000..2115ce4
--- /dev/null
+++ b/drivers/clk/renesas/Kconfig
@@ -0,0 +1,16 @@
+config CLK_RENESAS_CPG_MSSR
+	bool
+	default y if ARCH_R8A7795
+
+config CLK_RENESAS_CPG_MSTP
+	bool
+	default y if ARCH_R7S72100
+	default y if ARCH_R8A73A4
+	default y if ARCH_R8A7740
+	default y if ARCH_R8A7778
+	default y if ARCH_R8A7779
+	default y if ARCH_R8A7790
+	default y if ARCH_R8A7791
+	default y if ARCH_R8A7793
+	default y if ARCH_R8A7794
+	default y if ARCH_SH73A0
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index 7e2579b..ead8bb8 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -1,13 +1,15 @@
 obj-$(CONFIG_ARCH_EMEV2)		+= clk-emev2.o
-obj-$(CONFIG_ARCH_R7S72100)		+= clk-rz.o clk-mstp.o
-obj-$(CONFIG_ARCH_R8A73A4)		+= clk-r8a73a4.o clk-mstp.o clk-div6.o
-obj-$(CONFIG_ARCH_R8A7740)		+= clk-r8a7740.o clk-mstp.o clk-div6.o
-obj-$(CONFIG_ARCH_R8A7778)		+= clk-r8a7778.o clk-mstp.o
-obj-$(CONFIG_ARCH_R8A7779)		+= clk-r8a7779.o clk-mstp.o
-obj-$(CONFIG_ARCH_R8A7790)		+= clk-rcar-gen2.o clk-mstp.o clk-div6.o
-obj-$(CONFIG_ARCH_R8A7791)		+= clk-rcar-gen2.o clk-mstp.o clk-div6.o
-obj-$(CONFIG_ARCH_R8A7793)		+= clk-rcar-gen2.o clk-mstp.o clk-div6.o
-obj-$(CONFIG_ARCH_R8A7794)		+= clk-rcar-gen2.o clk-mstp.o clk-div6.o
-obj-$(CONFIG_ARCH_R8A7795)		+= renesas-cpg-mssr.o \
-					   r8a7795-cpg-mssr.o clk-div6.o
-obj-$(CONFIG_ARCH_SH73A0)		+= clk-sh73a0.o clk-mstp.o clk-div6.o
+obj-$(CONFIG_ARCH_R7S72100)		+= clk-rz.o
+obj-$(CONFIG_ARCH_R8A73A4)		+= clk-r8a73a4.o clk-div6.o
+obj-$(CONFIG_ARCH_R8A7740)		+= clk-r8a7740.o clk-div6.o
+obj-$(CONFIG_ARCH_R8A7778)		+= clk-r8a7778.o
+obj-$(CONFIG_ARCH_R8A7779)		+= clk-r8a7779.o
+obj-$(CONFIG_ARCH_R8A7790)		+= clk-rcar-gen2.o clk-div6.o
+obj-$(CONFIG_ARCH_R8A7791)		+= clk-rcar-gen2.o clk-div6.o
+obj-$(CONFIG_ARCH_R8A7793)		+= clk-rcar-gen2.o clk-div6.o
+obj-$(CONFIG_ARCH_R8A7794)		+= clk-rcar-gen2.o clk-div6.o
+obj-$(CONFIG_ARCH_R8A7795)		+= r8a7795-cpg-mssr.o
+obj-$(CONFIG_ARCH_SH73A0)		+= clk-sh73a0.o clk-div6.o
+
+obj-$(CONFIG_CLK_RENESAS_CPG_MSSR)	+= renesas-cpg-mssr.o clk-div6.o
+obj-$(CONFIG_CLK_RENESAS_CPG_MSTP)	+= clk-mstp.o
diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index 3d44e18..8b597b9 100644
--- a/drivers/clk/renesas/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -243,9 +243,7 @@
 }
 CLK_OF_DECLARE(cpg_mstp_clks, "renesas,cpg-mstp-clocks", cpg_mstp_clocks_init);
 
-
-#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
-int cpg_mstp_attach_dev(struct generic_pm_domain *domain, struct device *dev)
+int cpg_mstp_attach_dev(struct generic_pm_domain *unused, struct device *dev)
 {
 	struct device_node *np = dev->of_node;
 	struct of_phandle_args clkspec;
@@ -297,7 +295,7 @@
 	return error;
 }
 
-void cpg_mstp_detach_dev(struct generic_pm_domain *domain, struct device *dev)
+void cpg_mstp_detach_dev(struct generic_pm_domain *unused, struct device *dev)
 {
 	if (!list_empty(&dev->power.subsys_data->clock_list))
 		pm_clk_destroy(dev);
@@ -326,4 +324,3 @@
 
 	of_genpd_add_provider_simple(np, pd);
 }
-#endif /* !CONFIG_PM_GENERIC_DOMAINS_OF */
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index b2198aef..6af7f5b 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -13,6 +13,7 @@
  */
 
 #include <linux/bug.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/device.h>
 #include <linux/err.h>
@@ -26,6 +27,7 @@
 
 #include "renesas-cpg-mssr.h"
 
+#define CPG_RCKCR	0x240
 
 enum clk_ids {
 	/* Core Clock Outputs exported to DT */
@@ -50,6 +52,7 @@
 	CLK_S3,
 	CLK_SDSRC,
 	CLK_SSPSRC,
+	CLK_RINT,
 
 	/* Module Clocks */
 	MOD_CLK_BASE
@@ -63,8 +66,12 @@
 	CLK_TYPE_GEN3_PLL3,
 	CLK_TYPE_GEN3_PLL4,
 	CLK_TYPE_GEN3_SD,
+	CLK_TYPE_GEN3_R,
 };
 
+#define DEF_GEN3_SD(_name, _id, _parent, _offset)	\
+	DEF_BASE(_name, _id, CLK_TYPE_GEN3_SD, _parent, .offset = _offset)
+
 static const struct cpg_core_clk r8a7795_core_clks[] __initconst = {
 	/* External Clock Inputs */
 	DEF_INPUT("extal",  CLK_EXTAL),
@@ -102,10 +109,10 @@
 	DEF_FIXED("s3d2",       R8A7795_CLK_S3D2,  CLK_S3,         2, 1),
 	DEF_FIXED("s3d4",       R8A7795_CLK_S3D4,  CLK_S3,         4, 1),
 
-	DEF_SD("sd0",           R8A7795_CLK_SD0,   CLK_PLL1_DIV2, 0x0074),
-	DEF_SD("sd1",           R8A7795_CLK_SD1,   CLK_PLL1_DIV2, 0x0078),
-	DEF_SD("sd2",           R8A7795_CLK_SD2,   CLK_PLL1_DIV2, 0x0268),
-	DEF_SD("sd3",           R8A7795_CLK_SD3,   CLK_PLL1_DIV2, 0x026c),
+	DEF_GEN3_SD("sd0",      R8A7795_CLK_SD0,   CLK_PLL1_DIV2, 0x0074),
+	DEF_GEN3_SD("sd1",      R8A7795_CLK_SD1,   CLK_PLL1_DIV2, 0x0078),
+	DEF_GEN3_SD("sd2",      R8A7795_CLK_SD2,   CLK_PLL1_DIV2, 0x0268),
+	DEF_GEN3_SD("sd3",      R8A7795_CLK_SD3,   CLK_PLL1_DIV2, 0x026c),
 
 	DEF_FIXED("cl",         R8A7795_CLK_CL,    CLK_PLL1_DIV2, 48, 1),
 	DEF_FIXED("cp",         R8A7795_CLK_CP,    CLK_EXTAL,      2, 1),
@@ -113,6 +120,11 @@
 	DEF_DIV6P1("mso",       R8A7795_CLK_MSO,   CLK_PLL1_DIV4, 0x014),
 	DEF_DIV6P1("hdmi",      R8A7795_CLK_HDMI,  CLK_PLL1_DIV2, 0x250),
 	DEF_DIV6P1("canfd",     R8A7795_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
+
+	DEF_DIV6_RO("osc",      R8A7795_CLK_OSC,   CLK_EXTAL, CPG_RCKCR, 8),
+	DEF_DIV6_RO("r_int",    CLK_RINT,          CLK_EXTAL, CPG_RCKCR, 32),
+
+	DEF_BASE("r",           R8A7795_CLK_R, CLK_TYPE_GEN3_R, CLK_RINT),
 };
 
 static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = {
@@ -139,6 +151,7 @@
 	DEF_MOD("usb3-if0",		 328,	R8A7795_CLK_S3D1),
 	DEF_MOD("usb-dmac0",		 330,	R8A7795_CLK_S3D1),
 	DEF_MOD("usb-dmac1",		 331,	R8A7795_CLK_S3D1),
+	DEF_MOD("rwdt0",		 402,	R8A7795_CLK_R),
 	DEF_MOD("intc-ex",		 407,	R8A7795_CLK_CP),
 	DEF_MOD("intc-ap",		 408,	R8A7795_CLK_S3D1),
 	DEF_MOD("audmac0",		 502,	R8A7795_CLK_S3D4),
@@ -148,6 +161,7 @@
 	DEF_MOD("hscif2",		 518,	R8A7795_CLK_S3D1),
 	DEF_MOD("hscif1",		 519,	R8A7795_CLK_S3D1),
 	DEF_MOD("hscif0",		 520,	R8A7795_CLK_S3D1),
+	DEF_MOD("pwm",			 523,	R8A7795_CLK_S3D4),
 	DEF_MOD("fcpvd3",		 600,	R8A7795_CLK_S2D1),
 	DEF_MOD("fcpvd2",		 601,	R8A7795_CLK_S2D1),
 	DEF_MOD("fcpvd1",		 602,	R8A7795_CLK_S2D1),
@@ -578,6 +592,18 @@
 	case CLK_TYPE_GEN3_SD:
 		return cpg_sd_clk_register(core, base, __clk_get_name(parent));
 
+	case CLK_TYPE_GEN3_R:
+		/* RINT is default. Only if EXTALR is populated, we switch to it */
+		value = readl(base + CPG_RCKCR) & 0x3f;
+
+		if (clk_get_rate(clks[CLK_EXTALR])) {
+			parent = clks[CLK_EXTALR];
+			value |= BIT(15);
+		}
+
+		writel(value, base + CPG_RCKCR);
+		break;
+
 	default:
 		return ERR_PTR(-EINVAL);
 	}
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 58e24b3..1f2dc362 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -15,6 +15,7 @@
 
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
+#include <linux/clk/renesas.h>
 #include <linux/device.h>
 #include <linux/init.h>
 #include <linux/mod_devicetable.h>
@@ -253,7 +254,7 @@
 {
 	struct clk *clk = NULL, *parent;
 	struct device *dev = priv->dev;
-	unsigned int id = core->id;
+	unsigned int id = core->id, div = core->div;
 	const char *parent_name;
 
 	WARN_DEBUG(id >= priv->num_core_clks);
@@ -266,6 +267,7 @@
 
 	case CLK_TYPE_FF:
 	case CLK_TYPE_DIV6P1:
+	case CLK_TYPE_DIV6_RO:
 		WARN_DEBUG(core->parent >= priv->num_core_clks);
 		parent = priv->clks[core->parent];
 		if (IS_ERR(parent)) {
@@ -274,13 +276,18 @@
 		}
 
 		parent_name = __clk_get_name(parent);
-		if (core->type == CLK_TYPE_FF) {
-			clk = clk_register_fixed_factor(NULL, core->name,
-							parent_name, 0,
-							core->mult, core->div);
-		} else {
+
+		if (core->type == CLK_TYPE_DIV6_RO)
+			/* Multiply with the DIV6 register value */
+			div *= (readl(priv->base + core->offset) & 0x3f) + 1;
+
+		if (core->type == CLK_TYPE_DIV6P1) {
 			clk = cpg_div6_register(core->name, 1, &parent_name,
 						priv->base + core->offset);
+		} else {
+			clk = clk_register_fixed_factor(NULL, core->name,
+							parent_name, 0,
+							core->mult, div);
 		}
 		break;
 
@@ -375,8 +382,6 @@
 	kfree(clock);
 }
 
-
-#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
 struct cpg_mssr_clk_domain {
 	struct generic_pm_domain genpd;
 	struct device_node *np;
@@ -384,6 +389,8 @@
 	unsigned int core_pm_clks[0];
 };
 
+static struct cpg_mssr_clk_domain *cpg_mssr_clk_domain;
+
 static bool cpg_mssr_is_pm_clk(const struct of_phandle_args *clkspec,
 			       struct cpg_mssr_clk_domain *pd)
 {
@@ -407,17 +414,20 @@
 	}
 }
 
-static int cpg_mssr_attach_dev(struct generic_pm_domain *genpd,
-			       struct device *dev)
+int cpg_mssr_attach_dev(struct generic_pm_domain *unused, struct device *dev)
 {
-	struct cpg_mssr_clk_domain *pd =
-		container_of(genpd, struct cpg_mssr_clk_domain, genpd);
+	struct cpg_mssr_clk_domain *pd = cpg_mssr_clk_domain;
 	struct device_node *np = dev->of_node;
 	struct of_phandle_args clkspec;
 	struct clk *clk;
 	int i = 0;
 	int error;
 
+	if (!pd) {
+		dev_dbg(dev, "CPG/MSSR clock domain not yet available\n");
+		return -EPROBE_DEFER;
+	}
+
 	while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
 					   &clkspec)) {
 		if (cpg_mssr_is_pm_clk(&clkspec, pd))
@@ -457,8 +467,7 @@
 	return error;
 }
 
-static void cpg_mssr_detach_dev(struct generic_pm_domain *genpd,
-				struct device *dev)
+void cpg_mssr_detach_dev(struct generic_pm_domain *unused, struct device *dev)
 {
 	if (!list_empty(&dev->power.subsys_data->clock_list))
 		pm_clk_destroy(dev);
@@ -487,19 +496,11 @@
 	pm_genpd_init(genpd, &simple_qos_governor, false);
 	genpd->attach_dev = cpg_mssr_attach_dev;
 	genpd->detach_dev = cpg_mssr_detach_dev;
+	cpg_mssr_clk_domain = pd;
 
 	of_genpd_add_provider_simple(np, genpd);
 	return 0;
 }
-#else
-static inline int cpg_mssr_add_clk_domain(struct device *dev,
-					  const unsigned int *core_pm_clks,
-					  unsigned int num_core_pm_clks)
-{
-	return 0;
-}
-#endif /* !CONFIG_PM_GENERIC_DOMAINS_OF */
-
 
 static const struct of_device_id cpg_mssr_match[] = {
 #ifdef CONFIG_ARCH_R8A7795
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index 952b695..0d1e3e8 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -37,6 +37,7 @@
 	CLK_TYPE_IN,		/* External Clock Input */
 	CLK_TYPE_FF,		/* Fixed Factor Clock */
 	CLK_TYPE_DIV6P1,	/* DIV6 Clock with 1 parent clock */
+	CLK_TYPE_DIV6_RO,	/* DIV6 Clock read only with extra divisor */
 
 	/* Custom definitions start here */
 	CLK_TYPE_CUSTOM,
@@ -53,9 +54,8 @@
 	DEF_BASE(_name, _id, CLK_TYPE_FF, _parent, .div = _div, .mult = _mult)
 #define DEF_DIV6P1(_name, _id, _parent, _offset)	\
 	DEF_BASE(_name, _id, CLK_TYPE_DIV6P1, _parent, .offset = _offset)
-#define DEF_SD(_name, _id, _parent, _offset)	\
-	DEF_BASE(_name, _id, CLK_TYPE_GEN3_SD, _parent, .offset = _offset)
-
+#define DEF_DIV6_RO(_name, _id, _parent, _offset, _div)	\
+	DEF_BASE(_name, _id, CLK_TYPE_DIV6_RO, _parent, .offset = _offset, .div = _div, .mult = 1)
 
     /*
      * Definitions of Module Clocks
diff --git a/drivers/clk/rockchip/softrst.c b/drivers/clk/rockchip/softrst.c
index 552f7bb..21218987 100644
--- a/drivers/clk/rockchip/softrst.c
+++ b/drivers/clk/rockchip/softrst.c
@@ -81,7 +81,7 @@
 	return 0;
 }
 
-static struct reset_control_ops rockchip_softrst_ops = {
+static const struct reset_control_ops rockchip_softrst_ops = {
 	.assert		= rockchip_softrst_assert,
 	.deassert	= rockchip_softrst_deassert,
 };
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index fdd41b1..16575ee 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -302,10 +302,12 @@
 
 	/* SRC_FSYS */
 	MUX(CLK_MOUT_TSADC, "mout_tsadc", group_sclk_p, SRC_FSYS, 28, 4),
+	MUX(CLK_MOUT_MMC2, "mout_mmc2", group_sclk_p, SRC_FSYS, 8, 4),
 	MUX(CLK_MOUT_MMC1, "mout_mmc1", group_sclk_p, SRC_FSYS, 4, 4),
 	MUX(CLK_MOUT_MMC0, "mout_mmc0", group_sclk_p, SRC_FSYS, 0, 4),
 
 	/* SRC_PERIL0 */
+	MUX(CLK_MOUT_UART2, "mout_uart2", group_sclk_p, SRC_PERIL0, 8, 4),
 	MUX(CLK_MOUT_UART1, "mout_uart1", group_sclk_p, SRC_PERIL0, 4, 4),
 	MUX(CLK_MOUT_UART0, "mout_uart0", group_sclk_p, SRC_PERIL0, 0, 4),
 
@@ -389,7 +391,13 @@
 		CLK_SET_RATE_PARENT, 0),
 	DIV(CLK_DIV_MMC0, "div_mmc0", "mout_mmc0", DIV_FSYS1, 0, 4),
 
+	/* DIV_FSYS2 */
+	DIV_F(CLK_DIV_MMC2_PRE, "div_mmc2_pre", "div_mmc2", DIV_FSYS2, 8, 8,
+		CLK_SET_RATE_PARENT, 0),
+	DIV(CLK_DIV_MMC2, "div_mmc2", "mout_mmc2", DIV_FSYS2, 0, 4),
+
 	/* DIV_PERIL0 */
+	DIV(CLK_DIV_UART2, "div_uart2", "mout_uart2", DIV_PERIL0, 8, 4),
 	DIV(CLK_DIV_UART1, "div_uart1", "mout_uart1", DIV_PERIL0, 4, 4),
 	DIV(CLK_DIV_UART0, "div_uart0", "mout_uart0", DIV_PERIL0, 0, 4),
 
@@ -538,6 +546,8 @@
 		GATE_SCLK_FSYS, 9, CLK_SET_RATE_PARENT, 0),
 	GATE(CLK_SCLK_EBI, "sclk_ebi", "div_ebi",
 		GATE_SCLK_FSYS, 6, CLK_SET_RATE_PARENT, 0),
+	GATE(CLK_SCLK_MMC2, "sclk_mmc2", "div_mmc2_pre",
+		GATE_SCLK_FSYS, 2, CLK_SET_RATE_PARENT, 0),
 	GATE(CLK_SCLK_MMC1, "sclk_mmc1", "div_mmc1_pre",
 		GATE_SCLK_FSYS, 1, CLK_SET_RATE_PARENT, 0),
 	GATE(CLK_SCLK_MMC0, "sclk_mmc0", "div_mmc0_pre",
@@ -552,6 +562,9 @@
 		GATE_SCLK_PERIL, 7, CLK_SET_RATE_PARENT, 0),
 	GATE(CLK_SCLK_SPI0, "sclk_spi0", "div_spi0_pre",
 		GATE_SCLK_PERIL, 6, CLK_SET_RATE_PARENT, 0),
+
+	GATE(CLK_SCLK_UART2, "sclk_uart2", "div_uart2",
+		GATE_SCLK_PERIL, 2, CLK_SET_RATE_PARENT, 0),
 	GATE(CLK_SCLK_UART1, "sclk_uart1", "div_uart1",
 		GATE_SCLK_PERIL, 1, CLK_SET_RATE_PARENT, 0),
 	GATE(CLK_SCLK_UART0, "sclk_uart0", "div_uart0",
@@ -630,6 +643,7 @@
 	GATE(CLK_USBOTG, "usbotg", "div_aclk_200", GATE_IP_FSYS, 13, 0, 0),
 	GATE(CLK_USBHOST, "usbhost", "div_aclk_200", GATE_IP_FSYS, 12, 0, 0),
 	GATE(CLK_SROMC, "sromc", "div_aclk_200", GATE_IP_FSYS, 11, 0, 0),
+	GATE(CLK_SDMMC2, "sdmmc2", "div_aclk_200", GATE_IP_FSYS, 7, 0, 0),
 	GATE(CLK_SDMMC1, "sdmmc1", "div_aclk_200", GATE_IP_FSYS, 6, 0, 0),
 	GATE(CLK_SDMMC0, "sdmmc0", "div_aclk_200", GATE_IP_FSYS, 5, 0, 0),
 	GATE(CLK_PDMA1, "pdma1", "div_aclk_200", GATE_IP_FSYS, 1, 0, 0),
@@ -649,6 +663,7 @@
 	GATE(CLK_I2C2, "i2c2", "div_aclk_100", GATE_IP_PERIL, 8, 0, 0),
 	GATE(CLK_I2C1, "i2c1", "div_aclk_100", GATE_IP_PERIL, 7, 0, 0),
 	GATE(CLK_I2C0, "i2c0", "div_aclk_100", GATE_IP_PERIL, 6, 0, 0),
+	GATE(CLK_UART2, "uart2", "div_aclk_100", GATE_IP_PERIL, 2, 0, 0),
 	GATE(CLK_UART1, "uart1", "div_aclk_100", GATE_IP_PERIL, 1, 0, 0),
 	GATE(CLK_UART0, "uart0", "div_aclk_100", GATE_IP_PERIL, 0, 0, 0),
 };
diff --git a/drivers/clk/sirf/clk-atlas7.c b/drivers/clk/sirf/clk-atlas7.c
index 957aae6..d0c6c9a 100644
--- a/drivers/clk/sirf/clk-atlas7.c
+++ b/drivers/clk/sirf/clk-atlas7.c
@@ -1423,7 +1423,7 @@
 	return 0;
 }
 
-static struct reset_control_ops atlas7_rst_ops = {
+static const struct reset_control_ops atlas7_rst_ops = {
 	.reset = atlas7_reset_module,
 };
 
diff --git a/drivers/clk/sunxi/clk-a10-ve.c b/drivers/clk/sunxi/clk-a10-ve.c
index 044c171..d9ea22e 100644
--- a/drivers/clk/sunxi/clk-a10-ve.c
+++ b/drivers/clk/sunxi/clk-a10-ve.c
@@ -85,7 +85,7 @@
 	return 0;
 }
 
-static struct reset_control_ops sunxi_ve_reset_ops = {
+static const struct reset_control_ops sunxi_ve_reset_ops = {
 	.assert		= sunxi_ve_reset_assert,
 	.deassert	= sunxi_ve_reset_deassert,
 };
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
index a9b1761..028dd83 100644
--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -83,7 +83,7 @@
 	return 0;
 }
 
-static struct reset_control_ops sun9i_mmc_reset_ops = {
+static const struct reset_control_ops sun9i_mmc_reset_ops = {
 	.assert		= sun9i_mmc_reset_assert,
 	.deassert	= sun9i_mmc_reset_deassert,
 };
diff --git a/drivers/clk/sunxi/clk-usb.c b/drivers/clk/sunxi/clk-usb.c
index 5432b1c..fe0c3d1 100644
--- a/drivers/clk/sunxi/clk-usb.c
+++ b/drivers/clk/sunxi/clk-usb.c
@@ -76,7 +76,7 @@
 	return 0;
 }
 
-static struct reset_control_ops sunxi_usb_reset_ops = {
+static const struct reset_control_ops sunxi_usb_reset_ops = {
 	.assert		= sunxi_usb_reset_assert,
 	.deassert	= sunxi_usb_reset_deassert,
 };
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 7ad6383..837e5cb 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -623,7 +623,7 @@
 	case OSC_CTRL_PLL_REF_DIV_4:
 		return 4;
 	default:
-		pr_err("Invalied pll ref divider %d\n", pll_ref_div);
+		pr_err("Invalid pll ref divider %d\n", pll_ref_div);
 		BUG();
 	}
 	return 0;
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 637041f..3d0edee 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -175,6 +175,19 @@
 #define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN BIT(14)
 #define UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN BIT(12)
 
+#define SATA_PLL_CFG0				0x490
+#define SATA_PLL_CFG0_PADPLL_RESET_SWCTL	BIT(0)
+#define SATA_PLL_CFG0_PADPLL_USE_LOCKDET	BIT(2)
+#define SATA_PLL_CFG0_PADPLL_SLEEP_IDDQ		BIT(13)
+#define SATA_PLL_CFG0_SEQ_ENABLE		BIT(24)
+
+#define XUSBIO_PLL_CFG0				0x51c
+#define XUSBIO_PLL_CFG0_PADPLL_RESET_SWCTL	BIT(0)
+#define XUSBIO_PLL_CFG0_CLK_ENABLE_SWCTL	BIT(2)
+#define XUSBIO_PLL_CFG0_PADPLL_USE_LOCKDET	BIT(6)
+#define XUSBIO_PLL_CFG0_PADPLL_SLEEP_IDDQ	BIT(13)
+#define XUSBIO_PLL_CFG0_SEQ_ENABLE		BIT(24)
+
 #define UTMIPLL_HW_PWRDN_CFG0			0x52c
 #define UTMIPLL_HW_PWRDN_CFG0_UTMIPLL_LOCK	BIT(31)
 #define UTMIPLL_HW_PWRDN_CFG0_SEQ_START_STATE	BIT(25)
@@ -416,6 +429,51 @@
 #define PLLU_MISC0_WRITE_MASK		0xbfffffff
 #define PLLU_MISC1_WRITE_MASK		0x00000007
 
+void tegra210_xusb_pll_hw_control_enable(void)
+{
+	u32 val;
+
+	val = readl_relaxed(clk_base + XUSBIO_PLL_CFG0);
+	val &= ~(XUSBIO_PLL_CFG0_CLK_ENABLE_SWCTL |
+		 XUSBIO_PLL_CFG0_PADPLL_RESET_SWCTL);
+	val |= XUSBIO_PLL_CFG0_PADPLL_USE_LOCKDET |
+	       XUSBIO_PLL_CFG0_PADPLL_SLEEP_IDDQ;
+	writel_relaxed(val, clk_base + XUSBIO_PLL_CFG0);
+}
+EXPORT_SYMBOL_GPL(tegra210_xusb_pll_hw_control_enable);
+
+void tegra210_xusb_pll_hw_sequence_start(void)
+{
+	u32 val;
+
+	val = readl_relaxed(clk_base + XUSBIO_PLL_CFG0);
+	val |= XUSBIO_PLL_CFG0_SEQ_ENABLE;
+	writel_relaxed(val, clk_base + XUSBIO_PLL_CFG0);
+}
+EXPORT_SYMBOL_GPL(tegra210_xusb_pll_hw_sequence_start);
+
+void tegra210_sata_pll_hw_control_enable(void)
+{
+	u32 val;
+
+	val = readl_relaxed(clk_base + SATA_PLL_CFG0);
+	val &= ~SATA_PLL_CFG0_PADPLL_RESET_SWCTL;
+	val |= SATA_PLL_CFG0_PADPLL_USE_LOCKDET |
+	       SATA_PLL_CFG0_PADPLL_SLEEP_IDDQ;
+	writel_relaxed(val, clk_base + SATA_PLL_CFG0);
+}
+EXPORT_SYMBOL_GPL(tegra210_sata_pll_hw_control_enable);
+
+void tegra210_sata_pll_hw_sequence_start(void)
+{
+	u32 val;
+
+	val = readl_relaxed(clk_base + SATA_PLL_CFG0);
+	val |= SATA_PLL_CFG0_SEQ_ENABLE;
+	writel_relaxed(val, clk_base + SATA_PLL_CFG0);
+}
+EXPORT_SYMBOL_GPL(tegra210_sata_pll_hw_sequence_start);
+
 static inline void _pll_misc_chk_default(void __iomem *base,
 					struct tegra_clk_pll_params *params,
 					u8 misc_num, u32 default_val, u32 mask)
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index 2a3a4fe..f60fe2e 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -271,7 +271,7 @@
 	}
 }
 
-static struct reset_control_ops rst_ops = {
+static const struct reset_control_ops rst_ops = {
 	.assert = tegra_clk_rst_assert,
 	.deassert = tegra_clk_rst_deassert,
 };
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index a911d7d..6b5a309 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -223,7 +223,7 @@
 	DT_CLK(NULL, "mcasp6_aux_gfclk_mux", "mcasp6_aux_gfclk_mux"),
 	DT_CLK(NULL, "mcasp7_ahclkx_mux", "mcasp7_ahclkx_mux"),
 	DT_CLK(NULL, "mcasp7_aux_gfclk_mux", "mcasp7_aux_gfclk_mux"),
-	DT_CLK(NULL, "mcasp8_ahclk_mux", "mcasp8_ahclk_mux"),
+	DT_CLK(NULL, "mcasp8_ahclkx_mux", "mcasp8_ahclkx_mux"),
 	DT_CLK(NULL, "mcasp8_aux_gfclk_mux", "mcasp8_aux_gfclk_mux"),
 	DT_CLK(NULL, "mmc1_fclk_mux", "mmc1_fclk_mux"),
 	DT_CLK(NULL, "mmc1_fclk_div", "mmc1_fclk_div"),
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index c346be6..47352d2 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -181,11 +181,27 @@
 	  This option enables support for Texas Instruments 32.768 Hz clocksource
 	  available on many OMAP-like platforms.
 
+config CLKSRC_NPS
+	bool "NPS400 clocksource driver" if COMPILE_TEST
+	depends on !PHYS_ADDR_T_64BIT
+	select CLKSRC_MMIO
+	select CLKSRC_OF if OF
+	help
+	  NPS400 clocksource support.
+	  Got 64 bit counter with update rate up to 1000MHz.
+	  This counter is accessed via couple of 32 bit memory mapped registers.
+
 config CLKSRC_STM32
 	bool "Clocksource for STM32 SoCs" if !ARCH_STM32
 	depends on OF && ARM && (ARCH_STM32 || COMPILE_TEST)
 	select CLKSRC_MMIO
 
+config CLKSRC_MPS2
+	bool "Clocksource for MPS2 SoCs" if COMPILE_TEST
+	depends on GENERIC_SCHED_CLOCK
+	select CLKSRC_MMIO
+	select CLKSRC_OF
+
 config ARM_ARCH_TIMER
 	bool
 	select CLKSRC_OF if OF
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index dc2b899..473974f 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -39,6 +39,7 @@
 obj-$(CONFIG_CLKSRC_STM32)	+= timer-stm32.o
 obj-$(CONFIG_CLKSRC_EXYNOS_MCT)	+= exynos_mct.o
 obj-$(CONFIG_CLKSRC_LPC32XX)	+= time-lpc32xx.o
+obj-$(CONFIG_CLKSRC_MPS2)	+= mps2-timer.o
 obj-$(CONFIG_CLKSRC_SAMSUNG_PWM)	+= samsung_pwm_timer.o
 obj-$(CONFIG_FSL_FTM_TIMER)	+= fsl_ftm_timer.o
 obj-$(CONFIG_VF_PIT_TIMER)	+= vf_pit_timer.o
@@ -46,6 +47,7 @@
 obj-$(CONFIG_MTK_TIMER)		+= mtk_timer.o
 obj-$(CONFIG_CLKSRC_PISTACHIO)	+= time-pistachio.o
 obj-$(CONFIG_CLKSRC_TI_32K)	+= timer-ti-32k.o
+obj-$(CONFIG_CLKSRC_NPS)	+= timer-nps.o
 
 obj-$(CONFIG_ARM_ARCH_TIMER)		+= arm_arch_timer.o
 obj-$(CONFIG_ARM_GLOBAL_TIMER)		+= arm_global_timer.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 5152b38..4814446 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -468,11 +468,11 @@
 	.mask	= CLOCKSOURCE_MASK(56),
 };
 
-static struct timecounter timecounter;
+static struct arch_timer_kvm_info arch_timer_kvm_info;
 
-struct timecounter *arch_timer_get_timecounter(void)
+struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
 {
-	return &timecounter;
+	return &arch_timer_kvm_info;
 }
 
 static void __init arch_counter_register(unsigned type)
@@ -500,7 +500,8 @@
 	clocksource_register_hz(&clocksource_counter, arch_timer_rate);
 	cyclecounter.mult = clocksource_counter.mult;
 	cyclecounter.shift = clocksource_counter.shift;
-	timecounter_init(&timecounter, &cyclecounter, start_count);
+	timecounter_init(&arch_timer_kvm_info.timecounter,
+			 &cyclecounter, start_count);
 
 	/* 56 bits minimum, so we assume worst case rollover */
 	sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
@@ -744,6 +745,8 @@
 
 	arch_timer_register();
 	arch_timer_common_init();
+
+	arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI];
 }
 
 static void __init arch_timer_of_init(struct device_node *np)
diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c
index 6334526..797505a 100644
--- a/drivers/clocksource/dw_apb_timer.c
+++ b/drivers/clocksource/dw_apb_timer.c
@@ -264,6 +264,7 @@
 	dw_ced->ced.set_state_shutdown = apbt_shutdown;
 	dw_ced->ced.set_state_periodic = apbt_set_periodic;
 	dw_ced->ced.set_state_oneshot = apbt_set_oneshot;
+	dw_ced->ced.set_state_oneshot_stopped = apbt_shutdown;
 	dw_ced->ced.tick_resume = apbt_resume;
 	dw_ced->ced.set_next_event = apbt_next_event;
 	dw_ced->ced.irq = dw_ced->timer.irq;
diff --git a/drivers/clocksource/mps2-timer.c b/drivers/clocksource/mps2-timer.c
new file mode 100644
index 0000000..3d33a5e
--- /dev/null
+++ b/drivers/clocksource/mps2-timer.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2015 ARM Limited
+ *
+ * Author: Vladimir Murzin <vladimir.murzin@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+
+#define TIMER_CTRL		0x0
+#define TIMER_CTRL_ENABLE	BIT(0)
+#define TIMER_CTRL_IE		BIT(3)
+
+#define TIMER_VALUE		0x4
+#define TIMER_RELOAD		0x8
+#define TIMER_INT		0xc
+
+struct clockevent_mps2 {
+	void __iomem *reg;
+	u32 clock_count_per_tick;
+	struct clock_event_device clkevt;
+};
+
+static void __iomem *sched_clock_base;
+
+static u64 notrace mps2_sched_read(void)
+{
+	return ~readl_relaxed(sched_clock_base + TIMER_VALUE);
+}
+
+static inline struct clockevent_mps2 *to_mps2_clkevt(struct clock_event_device *c)
+{
+	return container_of(c, struct clockevent_mps2, clkevt);
+}
+
+static void clockevent_mps2_writel(u32 val, struct clock_event_device *c, u32 offset)
+{
+	writel_relaxed(val, to_mps2_clkevt(c)->reg + offset);
+}
+
+static int mps2_timer_shutdown(struct clock_event_device *ce)
+{
+	clockevent_mps2_writel(0, ce, TIMER_RELOAD);
+	clockevent_mps2_writel(0, ce, TIMER_CTRL);
+
+	return 0;
+}
+
+static int mps2_timer_set_next_event(unsigned long next, struct clock_event_device *ce)
+{
+	clockevent_mps2_writel(next, ce, TIMER_VALUE);
+	clockevent_mps2_writel(TIMER_CTRL_IE | TIMER_CTRL_ENABLE, ce, TIMER_CTRL);
+
+	return 0;
+}
+
+static int mps2_timer_set_periodic(struct clock_event_device *ce)
+{
+	u32 clock_count_per_tick = to_mps2_clkevt(ce)->clock_count_per_tick;
+
+	clockevent_mps2_writel(clock_count_per_tick, ce, TIMER_RELOAD);
+	clockevent_mps2_writel(clock_count_per_tick, ce, TIMER_VALUE);
+	clockevent_mps2_writel(TIMER_CTRL_IE | TIMER_CTRL_ENABLE, ce, TIMER_CTRL);
+
+	return 0;
+}
+
+static irqreturn_t mps2_timer_interrupt(int irq, void *dev_id)
+{
+	struct clockevent_mps2 *ce = dev_id;
+	u32 status = readl_relaxed(ce->reg + TIMER_INT);
+
+	if (!status) {
+		pr_warn("spurious interrupt\n");
+		return IRQ_NONE;
+	}
+
+	writel_relaxed(1, ce->reg + TIMER_INT);
+
+	ce->clkevt.event_handler(&ce->clkevt);
+
+	return IRQ_HANDLED;
+}
+
+static int __init mps2_clockevent_init(struct device_node *np)
+{
+	void __iomem *base;
+	struct clk *clk = NULL;
+	struct clockevent_mps2 *ce;
+	u32 rate;
+	int irq, ret;
+	const char *name = "mps2-clkevt";
+
+	ret = of_property_read_u32(np, "clock-frequency", &rate);
+	if (ret) {
+		clk = of_clk_get(np, 0);
+		if (IS_ERR(clk)) {
+			ret = PTR_ERR(clk);
+			pr_err("failed to get clock for clockevent: %d\n", ret);
+			goto out;
+		}
+
+		ret = clk_prepare_enable(clk);
+		if (ret) {
+			pr_err("failed to enable clock for clockevent: %d\n", ret);
+			goto out_clk_put;
+		}
+
+		rate = clk_get_rate(clk);
+	}
+
+	base = of_iomap(np, 0);
+	if (!base) {
+		ret = -EADDRNOTAVAIL;
+		pr_err("failed to map register for clockevent: %d\n", ret);
+		goto out_clk_disable;
+	}
+
+	irq = irq_of_parse_and_map(np, 0);
+	if (!irq) {
+		ret = -ENOENT;
+		pr_err("failed to get irq for clockevent: %d\n", ret);
+		goto out_iounmap;
+	}
+
+	ce = kzalloc(sizeof(*ce), GFP_KERNEL);
+	if (!ce) {
+		ret = -ENOMEM;
+		goto out_iounmap;
+	}
+
+	ce->reg = base;
+	ce->clock_count_per_tick = DIV_ROUND_CLOSEST(rate, HZ);
+	ce->clkevt.irq = irq;
+	ce->clkevt.name = name;
+	ce->clkevt.rating = 200;
+	ce->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+	ce->clkevt.cpumask = cpu_possible_mask;
+	ce->clkevt.set_state_shutdown	= mps2_timer_shutdown,
+	ce->clkevt.set_state_periodic	= mps2_timer_set_periodic,
+	ce->clkevt.set_state_oneshot	= mps2_timer_shutdown,
+	ce->clkevt.set_next_event	= mps2_timer_set_next_event;
+
+	/* Ensure timer is disabled */
+	writel_relaxed(0, base + TIMER_CTRL);
+
+	ret = request_irq(irq, mps2_timer_interrupt, IRQF_TIMER, name, ce);
+	if (ret) {
+		pr_err("failed to request irq for clockevent: %d\n", ret);
+		goto out_kfree;
+	}
+
+	clockevents_config_and_register(&ce->clkevt, rate, 0xf, 0xffffffff);
+
+	return 0;
+
+out_kfree:
+	kfree(ce);
+out_iounmap:
+	iounmap(base);
+out_clk_disable:
+	/* clk_{disable, unprepare, put}() can handle NULL as a parameter */
+	clk_disable_unprepare(clk);
+out_clk_put:
+	clk_put(clk);
+out:
+	return ret;
+}
+
+static int __init mps2_clocksource_init(struct device_node *np)
+{
+	void __iomem *base;
+	struct clk *clk = NULL;
+	u32 rate;
+	int ret;
+	const char *name = "mps2-clksrc";
+
+	ret = of_property_read_u32(np, "clock-frequency", &rate);
+	if (ret) {
+		clk = of_clk_get(np, 0);
+		if (IS_ERR(clk)) {
+			ret = PTR_ERR(clk);
+			pr_err("failed to get clock for clocksource: %d\n", ret);
+			goto out;
+		}
+
+		ret = clk_prepare_enable(clk);
+		if (ret) {
+			pr_err("failed to enable clock for clocksource: %d\n", ret);
+			goto out_clk_put;
+		}
+
+		rate = clk_get_rate(clk);
+	}
+
+	base = of_iomap(np, 0);
+	if (!base) {
+		ret = -EADDRNOTAVAIL;
+		pr_err("failed to map register for clocksource: %d\n", ret);
+		goto out_clk_disable;
+	}
+
+	/* Ensure timer is disabled */
+	writel_relaxed(0, base + TIMER_CTRL);
+
+	/* ... and set it up as free-running clocksource */
+	writel_relaxed(0xffffffff, base + TIMER_VALUE);
+	writel_relaxed(0xffffffff, base + TIMER_RELOAD);
+
+	writel_relaxed(TIMER_CTRL_ENABLE, base + TIMER_CTRL);
+
+	ret = clocksource_mmio_init(base + TIMER_VALUE, name,
+				    rate, 200, 32,
+				    clocksource_mmio_readl_down);
+	if (ret) {
+		pr_err("failed to init clocksource: %d\n", ret);
+		goto out_iounmap;
+	}
+
+	sched_clock_base = base;
+	sched_clock_register(mps2_sched_read, 32, rate);
+
+	return 0;
+
+out_iounmap:
+	iounmap(base);
+out_clk_disable:
+	/* clk_{disable, unprepare, put}() can handle NULL as a parameter */
+	clk_disable_unprepare(clk);
+out_clk_put:
+	clk_put(clk);
+out:
+	return ret;
+}
+
+static void __init mps2_timer_init(struct device_node *np)
+{
+	static int has_clocksource, has_clockevent;
+	int ret;
+
+	if (!has_clocksource) {
+		ret = mps2_clocksource_init(np);
+		if (!ret) {
+			has_clocksource = 1;
+			return;
+		}
+	}
+
+	if (!has_clockevent) {
+		ret = mps2_clockevent_init(np);
+		if (!ret) {
+			has_clockevent = 1;
+			return;
+		}
+	}
+}
+
+CLOCKSOURCE_OF_DECLARE(mps2_timer, "arm,mps2-timer", mps2_timer_init);
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
index d67bc35..7e583f8 100644
--- a/drivers/clocksource/mtk_timer.c
+++ b/drivers/clocksource/mtk_timer.c
@@ -152,7 +152,7 @@
 }
 
 static void
-mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option)
+__init mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option)
 {
 	writel(TIMER_CTRL_CLEAR | TIMER_CTRL_DISABLE,
 		evt->gpt_base + TIMER_CTRL_REG(timer));
diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/tango_xtal.c
index 2bcecaf..c407c47 100644
--- a/drivers/clocksource/tango_xtal.c
+++ b/drivers/clocksource/tango_xtal.c
@@ -42,7 +42,7 @@
 
 	ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350,
 				    32, clocksource_mmio_readl_up);
-	if (!ret) {
+	if (ret) {
 		pr_err("%s: registration failed\n", np->full_name);
 		return;
 	}
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c
index 38333ab..7b94ad2 100644
--- a/drivers/clocksource/tegra20_timer.c
+++ b/drivers/clocksource/tegra20_timer.c
@@ -258,17 +258,3 @@
 	register_persistent_clock(NULL, tegra_read_persistent_clock64);
 }
 CLOCKSOURCE_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
-
-#ifdef CONFIG_PM
-static u32 usec_config;
-
-void tegra_timer_suspend(void)
-{
-	usec_config = timer_readl(TIMERUS_USEC_CFG);
-}
-
-void tegra_timer_resume(void)
-{
-	timer_writel(usec_config, TIMERUS_USEC_CFG);
-}
-#endif
diff --git a/drivers/clocksource/timer-nps.c b/drivers/clocksource/timer-nps.c
new file mode 100644
index 0000000..d461089
--- /dev/null
+++ b/drivers/clocksource/timer-nps.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/cpu.h>
+#include <soc/nps/common.h>
+
+#define NPS_MSU_TICK_LOW	0xC8
+#define NPS_CLUSTER_OFFSET	8
+#define NPS_CLUSTER_NUM		16
+
+/* This array is per cluster of CPUs (Each NPS400 cluster got 256 CPUs) */
+static void *nps_msu_reg_low_addr[NPS_CLUSTER_NUM] __read_mostly;
+
+static unsigned long nps_timer_rate;
+
+static cycle_t nps_clksrc_read(struct clocksource *clksrc)
+{
+	int cluster = raw_smp_processor_id() >> NPS_CLUSTER_OFFSET;
+
+	return (cycle_t)ioread32be(nps_msu_reg_low_addr[cluster]);
+}
+
+static void __init nps_setup_clocksource(struct device_node *node,
+					 struct clk *clk)
+{
+	int ret, cluster;
+
+	for (cluster = 0; cluster < NPS_CLUSTER_NUM; cluster++)
+		nps_msu_reg_low_addr[cluster] =
+			nps_host_reg((cluster << NPS_CLUSTER_OFFSET),
+				 NPS_MSU_BLKID, NPS_MSU_TICK_LOW);
+
+	ret = clk_prepare_enable(clk);
+	if (ret) {
+		pr_err("Couldn't enable parent clock\n");
+		return;
+	}
+
+	nps_timer_rate = clk_get_rate(clk);
+
+	ret = clocksource_mmio_init(nps_msu_reg_low_addr, "EZnps-tick",
+				    nps_timer_rate, 301, 32, nps_clksrc_read);
+	if (ret) {
+		pr_err("Couldn't register clock source.\n");
+		clk_disable_unprepare(clk);
+	}
+}
+
+static void __init nps_timer_init(struct device_node *node)
+{
+	struct clk *clk;
+
+	clk = of_clk_get(node, 0);
+	if (IS_ERR(clk)) {
+		pr_err("Can't get timer clock.\n");
+		return;
+	}
+
+	nps_setup_clocksource(node, clk);
+}
+
+CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer",
+		       nps_timer_init);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index a7f4585..b7445b6 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -18,7 +18,11 @@
 
 if CPU_FREQ
 
+config CPU_FREQ_GOV_ATTR_SET
+	bool
+
 config CPU_FREQ_GOV_COMMON
+	select CPU_FREQ_GOV_ATTR_SET
 	select IRQ_WORK
 	bool
 
@@ -103,6 +107,17 @@
 	  Be aware that not all cpufreq drivers support the conservative
 	  governor. If unsure have a look at the help section of the
 	  driver. Fallback governor will be the performance governor.
+
+config CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
+	bool "schedutil"
+	depends on SMP
+	select CPU_FREQ_GOV_SCHEDUTIL
+	select CPU_FREQ_GOV_PERFORMANCE
+	help
+	  Use the 'schedutil' CPUFreq governor by default. If unsure,
+	  have a look at the help section of that governor. The fallback
+	  governor will be 'performance'.
+
 endchoice
 
 config CPU_FREQ_GOV_PERFORMANCE
@@ -184,6 +199,26 @@
 
 	  If in doubt, say N.
 
+config CPU_FREQ_GOV_SCHEDUTIL
+	tristate "'schedutil' cpufreq policy governor"
+	depends on CPU_FREQ && SMP
+	select CPU_FREQ_GOV_ATTR_SET
+	select IRQ_WORK
+	help
+	  This governor makes decisions based on the utilization data provided
+	  by the scheduler.  It sets the CPU frequency to be proportional to
+	  the utilization/capacity ratio coming from the scheduler.  If the
+	  utilization is frequency-invariant, the new frequency is also
+	  proportional to the maximum available frequency.  If that is not the
+	  case, it is proportional to the current frequency of the CPU.  The
+	  frequency tipping point is at utilization/capacity equal to 80% in
+	  both cases.
+
+	  To compile this driver as a module, choose M here: the module will
+	  be called cpufreq_schedutil.
+
+	  If in doubt, say N.
+
 comment "CPU frequency scaling drivers"
 
 config CPUFREQ_DT
@@ -191,6 +226,7 @@
 	depends on HAVE_CLK && OF
 	# if CPU_THERMAL is on and THERMAL=m, CPUFREQ_DT cannot be =y:
 	depends on !CPU_THERMAL || THERMAL
+	select CPUFREQ_DT_PLATDEV
 	select PM_OPP
 	help
 	  This adds a generic DT based cpufreq driver for frequency management.
@@ -199,6 +235,15 @@
 
 	  If in doubt, say N.
 
+config CPUFREQ_DT_PLATDEV
+	bool
+	help
+	  This adds a generic DT based cpufreq platdev driver for frequency
+	  management.  This creates a 'cpufreq-dt' platform device, on the
+	  supported platforms.
+
+	  If in doubt, say N.
+
 if X86
 source "drivers/cpufreq/Kconfig.x86"
 endif
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 14b1f93..d89b8af 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -50,15 +50,6 @@
 
 	  If in doubt, say N.
 
-config ARM_HISI_ACPU_CPUFREQ
-	tristate "Hisilicon ACPU CPUfreq driver"
-	depends on ARCH_HISI && CPUFREQ_DT
-	select PM_OPP
-	help
-	  This enables the hisilicon ACPU CPUfreq driver.
-
-	  If in doubt, say N.
-
 config ARM_IMX6Q_CPUFREQ
 	tristate "Freescale i.MX6 cpufreq support"
 	depends on ARCH_MXC
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index c59bdcb..adbd1de 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -5,6 +5,7 @@
 config X86_INTEL_PSTATE
        bool "Intel P state control"
        depends on X86
+       select ACPI_PROCESSOR if ACPI
        help
           This driver provides a P state for Intel core processors.
 	  The driver implements an internal governor and will become
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 9e63fb1..0a9b6a09 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -11,8 +11,10 @@
 obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND)	+= cpufreq_ondemand.o
 obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE)	+= cpufreq_conservative.o
 obj-$(CONFIG_CPU_FREQ_GOV_COMMON)		+= cpufreq_governor.o
+obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET)	+= cpufreq_governor_attr_set.o
 
 obj-$(CONFIG_CPUFREQ_DT)		+= cpufreq-dt.o
+obj-$(CONFIG_CPUFREQ_DT_PLATDEV)	+= cpufreq-dt-platdev.o
 
 ##################################################################################
 # x86 drivers.
@@ -53,7 +55,6 @@
 obj-$(CONFIG_UX500_SOC_DB8500)		+= dbx500-cpufreq.o
 obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ)	+= exynos5440-cpufreq.o
 obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ)	+= highbank-cpufreq.o
-obj-$(CONFIG_ARM_HISI_ACPU_CPUFREQ)	+= hisi-acpu-cpufreq.o
 obj-$(CONFIG_ARM_IMX6Q_CPUFREQ)		+= imx6q-cpufreq.o
 obj-$(CONFIG_ARM_INTEGRATOR)		+= integrator-cpufreq.o
 obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ)	+= kirkwood-cpufreq.o
@@ -78,6 +79,7 @@
 obj-$(CONFIG_ARM_TEGRA124_CPUFREQ)	+= tegra124-cpufreq.o
 obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ)	+= vexpress-spc-cpufreq.o
 obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
+obj-$(CONFIG_MACH_MVEBU_V7)		+= mvebu-cpufreq.o
 
 
 ##################################################################################
@@ -100,7 +102,7 @@
 obj-$(CONFIG_ETRAXFS)			+= cris-etraxfs-cpufreq.o
 obj-$(CONFIG_IA64_ACPI_CPUFREQ)		+= ia64-acpi-cpufreq.o
 obj-$(CONFIG_LOONGSON2_CPUFREQ)		+= loongson2_cpufreq.o
-obj-$(CONFIG_LOONGSON1_CPUFREQ)		+= ls1x-cpufreq.o
+obj-$(CONFIG_LOONGSON1_CPUFREQ)		+= loongson1-cpufreq.o
 obj-$(CONFIG_SH_CPU_FREQ)		+= sh-cpufreq.o
 obj-$(CONFIG_SPARC_US2E_CPUFREQ)	+= sparc-us2e-cpufreq.o
 obj-$(CONFIG_SPARC_US3_CPUFREQ)		+= sparc-us3-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index fb57121..32a1505 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -25,6 +25,8 @@
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -50,8 +52,6 @@
 MODULE_DESCRIPTION("ACPI Processor P-States Driver");
 MODULE_LICENSE("GPL");
 
-#define PFX "acpi-cpufreq: "
-
 enum {
 	UNDEFINED_CAPABLE = 0,
 	SYSTEM_INTEL_MSR_CAPABLE,
@@ -65,7 +65,6 @@
 #define MSR_K7_HWCR_CPB_DIS	(1ULL << 25)
 
 struct acpi_cpufreq_data {
-	struct cpufreq_frequency_table *freq_table;
 	unsigned int resume;
 	unsigned int cpu_feature;
 	unsigned int acpi_perf_cpu;
@@ -200,8 +199,9 @@
 	return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
 }
 
-static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
+static unsigned extract_io(struct cpufreq_policy *policy, u32 value)
 {
+	struct acpi_cpufreq_data *data = policy->driver_data;
 	struct acpi_processor_performance *perf;
 	int i;
 
@@ -209,13 +209,14 @@
 
 	for (i = 0; i < perf->state_count; i++) {
 		if (value == perf->states[i].status)
-			return data->freq_table[i].frequency;
+			return policy->freq_table[i].frequency;
 	}
 	return 0;
 }
 
-static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
+static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr)
 {
+	struct acpi_cpufreq_data *data = policy->driver_data;
 	struct cpufreq_frequency_table *pos;
 	struct acpi_processor_performance *perf;
 
@@ -226,20 +227,22 @@
 
 	perf = to_perf_data(data);
 
-	cpufreq_for_each_entry(pos, data->freq_table)
+	cpufreq_for_each_entry(pos, policy->freq_table)
 		if (msr == perf->states[pos->driver_data].status)
 			return pos->frequency;
-	return data->freq_table[0].frequency;
+	return policy->freq_table[0].frequency;
 }
 
-static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
+static unsigned extract_freq(struct cpufreq_policy *policy, u32 val)
 {
+	struct acpi_cpufreq_data *data = policy->driver_data;
+
 	switch (data->cpu_feature) {
 	case SYSTEM_INTEL_MSR_CAPABLE:
 	case SYSTEM_AMD_MSR_CAPABLE:
-		return extract_msr(val, data);
+		return extract_msr(policy, val);
 	case SYSTEM_IO_CAPABLE:
-		return extract_io(val, data);
+		return extract_io(policy, val);
 	default:
 		return 0;
 	}
@@ -374,11 +377,11 @@
 		return 0;
 
 	data = policy->driver_data;
-	if (unlikely(!data || !data->freq_table))
+	if (unlikely(!data || !policy->freq_table))
 		return 0;
 
-	cached_freq = data->freq_table[to_perf_data(data)->state].frequency;
-	freq = extract_freq(get_cur_val(cpumask_of(cpu), data), data);
+	cached_freq = policy->freq_table[to_perf_data(data)->state].frequency;
+	freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data));
 	if (freq != cached_freq) {
 		/*
 		 * The dreaded BIOS frequency change behind our back.
@@ -392,14 +395,15 @@
 	return freq;
 }
 
-static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
-				struct acpi_cpufreq_data *data)
+static unsigned int check_freqs(struct cpufreq_policy *policy,
+				const struct cpumask *mask, unsigned int freq)
 {
+	struct acpi_cpufreq_data *data = policy->driver_data;
 	unsigned int cur_freq;
 	unsigned int i;
 
 	for (i = 0; i < 100; i++) {
-		cur_freq = extract_freq(get_cur_val(mask, data), data);
+		cur_freq = extract_freq(policy, get_cur_val(mask, data));
 		if (cur_freq == freq)
 			return 1;
 		udelay(10);
@@ -416,12 +420,12 @@
 	unsigned int next_perf_state = 0; /* Index into perf table */
 	int result = 0;
 
-	if (unlikely(data == NULL || data->freq_table == NULL)) {
+	if (unlikely(!data)) {
 		return -ENODEV;
 	}
 
 	perf = to_perf_data(data);
-	next_perf_state = data->freq_table[index].driver_data;
+	next_perf_state = policy->freq_table[index].driver_data;
 	if (perf->state == next_perf_state) {
 		if (unlikely(data->resume)) {
 			pr_debug("Called after resume, resetting to P%d\n",
@@ -444,8 +448,8 @@
 	drv_write(data, mask, perf->states[next_perf_state].control);
 
 	if (acpi_pstate_strict) {
-		if (!check_freqs(mask, data->freq_table[index].frequency,
-					data)) {
+		if (!check_freqs(policy, mask,
+				 policy->freq_table[index].frequency)) {
 			pr_debug("acpi_cpufreq_target failed (%d)\n",
 				policy->cpu);
 			result = -EAGAIN;
@@ -458,6 +462,43 @@
 	return result;
 }
 
+unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy,
+				      unsigned int target_freq)
+{
+	struct acpi_cpufreq_data *data = policy->driver_data;
+	struct acpi_processor_performance *perf;
+	struct cpufreq_frequency_table *entry;
+	unsigned int next_perf_state, next_freq, freq;
+
+	/*
+	 * Find the closest frequency above target_freq.
+	 *
+	 * The table is sorted in the reverse order with respect to the
+	 * frequency and all of the entries are valid (see the initialization).
+	 */
+	entry = policy->freq_table;
+	do {
+		entry++;
+		freq = entry->frequency;
+	} while (freq >= target_freq && freq != CPUFREQ_TABLE_END);
+	entry--;
+	next_freq = entry->frequency;
+	next_perf_state = entry->driver_data;
+
+	perf = to_perf_data(data);
+	if (perf->state == next_perf_state) {
+		if (unlikely(data->resume))
+			data->resume = 0;
+		else
+			return next_freq;
+	}
+
+	data->cpu_freq_write(&perf->control_register,
+			     perf->states[next_perf_state].control);
+	perf->state = next_perf_state;
+	return next_freq;
+}
+
 static unsigned long
 acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
 {
@@ -611,10 +652,7 @@
 		if ((c->x86 == 15) &&
 		    (c->x86_model == 6) &&
 		    (c->x86_mask == 8)) {
-			printk(KERN_INFO "acpi-cpufreq: Intel(R) "
-			    "Xeon(R) 7100 Errata AL30, processors may "
-			    "lock up on frequency changes: disabling "
-			    "acpi-cpufreq.\n");
+			pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n");
 			return -ENODEV;
 		    }
 		}
@@ -631,6 +669,7 @@
 	unsigned int result = 0;
 	struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
 	struct acpi_processor_performance *perf;
+	struct cpufreq_frequency_table *freq_table;
 #ifdef CONFIG_SMP
 	static int blacklisted;
 #endif
@@ -690,7 +729,7 @@
 		cpumask_copy(data->freqdomain_cpus,
 			     topology_sibling_cpumask(cpu));
 		policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
-		pr_info_once(PFX "overriding BIOS provided _PSD data\n");
+		pr_info_once("overriding BIOS provided _PSD data\n");
 	}
 #endif
 
@@ -742,9 +781,9 @@
 		goto err_unreg;
 	}
 
-	data->freq_table = kzalloc(sizeof(*data->freq_table) *
+	freq_table = kzalloc(sizeof(*freq_table) *
 		    (perf->state_count+1), GFP_KERNEL);
-	if (!data->freq_table) {
+	if (!freq_table) {
 		result = -ENOMEM;
 		goto err_unreg;
 	}
@@ -762,30 +801,29 @@
 	if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
 	    policy->cpuinfo.transition_latency > 20 * 1000) {
 		policy->cpuinfo.transition_latency = 20 * 1000;
-		printk_once(KERN_INFO
-			    "P-state transition latency capped at 20 uS\n");
+		pr_info_once("P-state transition latency capped at 20 uS\n");
 	}
 
 	/* table init */
 	for (i = 0; i < perf->state_count; i++) {
 		if (i > 0 && perf->states[i].core_frequency >=
-		    data->freq_table[valid_states-1].frequency / 1000)
+		    freq_table[valid_states-1].frequency / 1000)
 			continue;
 
-		data->freq_table[valid_states].driver_data = i;
-		data->freq_table[valid_states].frequency =
+		freq_table[valid_states].driver_data = i;
+		freq_table[valid_states].frequency =
 		    perf->states[i].core_frequency * 1000;
 		valid_states++;
 	}
-	data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
+	freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
 	perf->state = 0;
 
-	result = cpufreq_table_validate_and_show(policy, data->freq_table);
+	result = cpufreq_table_validate_and_show(policy, freq_table);
 	if (result)
 		goto err_freqfree;
 
 	if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
-		printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");
+		pr_warn(FW_WARN "P-state 0 is not max freq\n");
 
 	switch (perf->control_register.space_id) {
 	case ACPI_ADR_SPACE_SYSTEM_IO:
@@ -821,10 +859,13 @@
 	 */
 	data->resume = 1;
 
+	policy->fast_switch_possible = !acpi_pstate_strict &&
+		!(policy_is_shared(policy) && policy->shared_type != CPUFREQ_SHARED_TYPE_ANY);
+
 	return result;
 
 err_freqfree:
-	kfree(data->freq_table);
+	kfree(freq_table);
 err_unreg:
 	acpi_processor_unregister_performance(cpu);
 err_free_mask:
@@ -842,13 +883,12 @@
 
 	pr_debug("acpi_cpufreq_cpu_exit\n");
 
-	if (data) {
-		policy->driver_data = NULL;
-		acpi_processor_unregister_performance(data->acpi_perf_cpu);
-		free_cpumask_var(data->freqdomain_cpus);
-		kfree(data->freq_table);
-		kfree(data);
-	}
+	policy->fast_switch_possible = false;
+	policy->driver_data = NULL;
+	acpi_processor_unregister_performance(data->acpi_perf_cpu);
+	free_cpumask_var(data->freqdomain_cpus);
+	kfree(policy->freq_table);
+	kfree(data);
 
 	return 0;
 }
@@ -876,6 +916,7 @@
 static struct cpufreq_driver acpi_cpufreq_driver = {
 	.verify		= cpufreq_generic_frequency_table_verify,
 	.target_index	= acpi_cpufreq_target,
+	.fast_switch	= acpi_cpufreq_fast_switch,
 	.bios_limit	= acpi_processor_get_bios_limit,
 	.init		= acpi_cpufreq_cpu_init,
 	.exit		= acpi_cpufreq_cpu_exit,
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index c251247..4180422 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -298,7 +298,8 @@
 	return 0;
 }
 
-static void _put_cluster_clk_and_freq_table(struct device *cpu_dev)
+static void _put_cluster_clk_and_freq_table(struct device *cpu_dev,
+					    const struct cpumask *cpumask)
 {
 	u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
 
@@ -308,11 +309,12 @@
 	clk_put(clk[cluster]);
 	dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
 	if (arm_bL_ops->free_opp_table)
-		arm_bL_ops->free_opp_table(cpu_dev);
+		arm_bL_ops->free_opp_table(cpumask);
 	dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
 }
 
-static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
+static void put_cluster_clk_and_freq_table(struct device *cpu_dev,
+					   const struct cpumask *cpumask)
 {
 	u32 cluster = cpu_to_cluster(cpu_dev->id);
 	int i;
@@ -321,7 +323,7 @@
 		return;
 
 	if (cluster < MAX_CLUSTERS)
-		return _put_cluster_clk_and_freq_table(cpu_dev);
+		return _put_cluster_clk_and_freq_table(cpu_dev, cpumask);
 
 	for_each_present_cpu(i) {
 		struct device *cdev = get_cpu_device(i);
@@ -330,14 +332,15 @@
 			return;
 		}
 
-		_put_cluster_clk_and_freq_table(cdev);
+		_put_cluster_clk_and_freq_table(cdev, cpumask);
 	}
 
 	/* free virtual table */
 	kfree(freq_table[cluster]);
 }
 
-static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
+static int _get_cluster_clk_and_freq_table(struct device *cpu_dev,
+					   const struct cpumask *cpumask)
 {
 	u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
 	int ret;
@@ -345,7 +348,7 @@
 	if (freq_table[cluster])
 		return 0;
 
-	ret = arm_bL_ops->init_opp_table(cpu_dev);
+	ret = arm_bL_ops->init_opp_table(cpumask);
 	if (ret) {
 		dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n",
 				__func__, cpu_dev->id, ret);
@@ -374,14 +377,15 @@
 
 free_opp_table:
 	if (arm_bL_ops->free_opp_table)
-		arm_bL_ops->free_opp_table(cpu_dev);
+		arm_bL_ops->free_opp_table(cpumask);
 out:
 	dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
 			cluster);
 	return ret;
 }
 
-static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
+static int get_cluster_clk_and_freq_table(struct device *cpu_dev,
+					  const struct cpumask *cpumask)
 {
 	u32 cluster = cpu_to_cluster(cpu_dev->id);
 	int i, ret;
@@ -390,7 +394,7 @@
 		return 0;
 
 	if (cluster < MAX_CLUSTERS) {
-		ret = _get_cluster_clk_and_freq_table(cpu_dev);
+		ret = _get_cluster_clk_and_freq_table(cpu_dev, cpumask);
 		if (ret)
 			atomic_dec(&cluster_usage[cluster]);
 		return ret;
@@ -407,7 +411,7 @@
 			return -ENODEV;
 		}
 
-		ret = _get_cluster_clk_and_freq_table(cdev);
+		ret = _get_cluster_clk_and_freq_table(cdev, cpumask);
 		if (ret)
 			goto put_clusters;
 	}
@@ -433,7 +437,7 @@
 			return -ENODEV;
 		}
 
-		_put_cluster_clk_and_freq_table(cdev);
+		_put_cluster_clk_and_freq_table(cdev, cpumask);
 	}
 
 	atomic_dec(&cluster_usage[cluster]);
@@ -455,18 +459,6 @@
 		return -ENODEV;
 	}
 
-	ret = get_cluster_clk_and_freq_table(cpu_dev);
-	if (ret)
-		return ret;
-
-	ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]);
-	if (ret) {
-		dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
-				policy->cpu, cur_cluster);
-		put_cluster_clk_and_freq_table(cpu_dev);
-		return ret;
-	}
-
 	if (cur_cluster < MAX_CLUSTERS) {
 		int cpu;
 
@@ -479,6 +471,18 @@
 		per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
 	}
 
+	ret = get_cluster_clk_and_freq_table(cpu_dev, policy->cpus);
+	if (ret)
+		return ret;
+
+	ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]);
+	if (ret) {
+		dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
+			policy->cpu, cur_cluster);
+		put_cluster_clk_and_freq_table(cpu_dev, policy->cpus);
+		return ret;
+	}
+
 	if (arm_bL_ops->get_transition_latency)
 		policy->cpuinfo.transition_latency =
 			arm_bL_ops->get_transition_latency(cpu_dev);
@@ -509,7 +513,7 @@
 		return -ENODEV;
 	}
 
-	put_cluster_clk_and_freq_table(cpu_dev);
+	put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus);
 	dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
 
 	return 0;
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
index b88889d..184d7c3 100644
--- a/drivers/cpufreq/arm_big_little.h
+++ b/drivers/cpufreq/arm_big_little.h
@@ -30,11 +30,11 @@
 	 * This must set opp table for cpu_dev in a similar way as done by
 	 * dev_pm_opp_of_add_table().
 	 */
-	int (*init_opp_table)(struct device *cpu_dev);
+	int (*init_opp_table)(const struct cpumask *cpumask);
 
 	/* Optional */
 	int (*get_transition_latency)(struct device *cpu_dev);
-	void (*free_opp_table)(struct device *cpu_dev);
+	void (*free_opp_table)(const struct cpumask *cpumask);
 };
 
 int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c
index 16ddeef..39b3f51 100644
--- a/drivers/cpufreq/arm_big_little_dt.c
+++ b/drivers/cpufreq/arm_big_little_dt.c
@@ -43,23 +43,6 @@
 	return np;
 }
 
-static int dt_init_opp_table(struct device *cpu_dev)
-{
-	struct device_node *np;
-	int ret;
-
-	np = of_node_get(cpu_dev->of_node);
-	if (!np) {
-		pr_err("failed to find cpu%d node\n", cpu_dev->id);
-		return -ENOENT;
-	}
-
-	ret = dev_pm_opp_of_add_table(cpu_dev);
-	of_node_put(np);
-
-	return ret;
-}
-
 static int dt_get_transition_latency(struct device *cpu_dev)
 {
 	struct device_node *np;
@@ -81,8 +64,8 @@
 static struct cpufreq_arm_bL_ops dt_bL_ops = {
 	.name	= "dt-bl",
 	.get_transition_latency = dt_get_transition_latency,
-	.init_opp_table = dt_init_opp_table,
-	.free_opp_table = dev_pm_opp_of_remove_table,
+	.init_opp_table = dev_pm_opp_of_cpumask_add_table,
+	.free_opp_table = dev_pm_opp_of_cpumask_remove_table,
 };
 
 static int generic_bL_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 7c0bdfb..8882b8e 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -173,4 +173,25 @@
 	return -ENODEV;
 }
 
+static void __exit cppc_cpufreq_exit(void)
+{
+	struct cpudata *cpu;
+	int i;
+
+	cpufreq_unregister_driver(&cppc_cpufreq_driver);
+
+	for_each_possible_cpu(i) {
+		cpu = all_cpu_data[i];
+		free_cpumask_var(cpu->shared_cpu_map);
+		kfree(cpu);
+	}
+
+	kfree(all_cpu_data);
+}
+
+module_exit(cppc_cpufreq_exit);
+MODULE_AUTHOR("Ashwin Chaugule");
+MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
+MODULE_LICENSE("GPL");
+
 late_initcall(cppc_cpufreq_init);
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
new file mode 100644
index 0000000..3646b14
--- /dev/null
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2016 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+static const struct of_device_id machines[] __initconst = {
+	{ .compatible = "allwinner,sun4i-a10", },
+	{ .compatible = "allwinner,sun5i-a10s", },
+	{ .compatible = "allwinner,sun5i-a13", },
+	{ .compatible = "allwinner,sun5i-r8", },
+	{ .compatible = "allwinner,sun6i-a31", },
+	{ .compatible = "allwinner,sun6i-a31s", },
+	{ .compatible = "allwinner,sun7i-a20", },
+	{ .compatible = "allwinner,sun8i-a23", },
+	{ .compatible = "allwinner,sun8i-a33", },
+	{ .compatible = "allwinner,sun8i-a83t", },
+	{ .compatible = "allwinner,sun8i-h3", },
+
+	{ .compatible = "hisilicon,hi6220", },
+
+	{ .compatible = "fsl,imx27", },
+	{ .compatible = "fsl,imx51", },
+	{ .compatible = "fsl,imx53", },
+	{ .compatible = "fsl,imx7d", },
+
+	{ .compatible = "marvell,berlin", },
+
+	{ .compatible = "samsung,exynos3250", },
+	{ .compatible = "samsung,exynos4210", },
+	{ .compatible = "samsung,exynos4212", },
+	{ .compatible = "samsung,exynos4412", },
+	{ .compatible = "samsung,exynos5250", },
+#ifndef CONFIG_BL_SWITCHER
+	{ .compatible = "samsung,exynos5420", },
+	{ .compatible = "samsung,exynos5800", },
+#endif
+
+	{ .compatible = "renesas,emev2", },
+	{ .compatible = "renesas,r7s72100", },
+	{ .compatible = "renesas,r8a73a4", },
+	{ .compatible = "renesas,r8a7740", },
+	{ .compatible = "renesas,r8a7778", },
+	{ .compatible = "renesas,r8a7779", },
+	{ .compatible = "renesas,r8a7790", },
+	{ .compatible = "renesas,r8a7791", },
+	{ .compatible = "renesas,r8a7793", },
+	{ .compatible = "renesas,r8a7794", },
+	{ .compatible = "renesas,sh73a0", },
+
+	{ .compatible = "rockchip,rk2928", },
+	{ .compatible = "rockchip,rk3036", },
+	{ .compatible = "rockchip,rk3066a", },
+	{ .compatible = "rockchip,rk3066b", },
+	{ .compatible = "rockchip,rk3188", },
+	{ .compatible = "rockchip,rk3228", },
+	{ .compatible = "rockchip,rk3288", },
+	{ .compatible = "rockchip,rk3366", },
+	{ .compatible = "rockchip,rk3368", },
+	{ .compatible = "rockchip,rk3399", },
+
+	{ .compatible = "sigma,tango4" },
+
+	{ .compatible = "ti,omap2", },
+	{ .compatible = "ti,omap3", },
+	{ .compatible = "ti,omap4", },
+	{ .compatible = "ti,omap5", },
+
+	{ .compatible = "xlnx,zynq-7000", },
+};
+
+static int __init cpufreq_dt_platdev_init(void)
+{
+	struct device_node *np = of_find_node_by_path("/");
+
+	if (!np)
+		return -ENODEV;
+
+	if (!of_match_node(machines, np))
+		return -ENODEV;
+
+	of_node_put(of_root);
+
+	return PTR_ERR_OR_ZERO(platform_device_register_simple("cpufreq-dt", -1,
+							       NULL, 0));
+}
+device_initcall(cpufreq_dt_platdev_init);
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index f951f91..3957de8 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -4,9 +4,6 @@
  * Copyright (C) 2014 Linaro.
  * Viresh Kumar <viresh.kumar@linaro.org>
  *
- * The OPP code in function set_target() is reused from
- * drivers/cpufreq/omap-cpufreq.c
- *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
@@ -18,7 +15,6 @@
 #include <linux/cpu.h>
 #include <linux/cpu_cooling.h>
 #include <linux/cpufreq.h>
-#include <linux/cpufreq-dt.h>
 #include <linux/cpumask.h>
 #include <linux/err.h>
 #include <linux/module.h>
@@ -150,7 +146,7 @@
 	struct clk *cpu_clk;
 	struct dev_pm_opp *suspend_opp;
 	unsigned int transition_latency;
-	bool opp_v1 = false;
+	bool fallback = false;
 	const char *name;
 	int ret;
 
@@ -170,14 +166,16 @@
 	/* Get OPP-sharing information from "operating-points-v2" bindings */
 	ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, policy->cpus);
 	if (ret) {
+		if (ret != -ENOENT)
+			goto out_put_clk;
+
 		/*
 		 * operating-points-v2 not supported, fallback to old method of
-		 * finding shared-OPPs for backward compatibility.
+		 * finding shared-OPPs for backward compatibility if the
+		 * platform hasn't set sharing CPUs.
 		 */
-		if (ret == -ENOENT)
-			opp_v1 = true;
-		else
-			goto out_put_clk;
+		if (dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus))
+			fallback = true;
 	}
 
 	/*
@@ -217,11 +215,8 @@
 		goto out_free_opp;
 	}
 
-	if (opp_v1) {
-		struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data();
-
-		if (!pd || !pd->independent_clocks)
-			cpumask_setall(policy->cpus);
+	if (fallback) {
+		cpumask_setall(policy->cpus);
 
 		/*
 		 * OPP tables are initialized only for policy->cpu, do it for
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index db69eeb..5503d49 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -7,6 +7,8 @@
  *  BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
@@ -56,8 +58,6 @@
 MODULE_PARM_DESC(min_fsb,
 		"Minimum FSB to use, if not defined: current FSB - 50");
 
-#define PFX "cpufreq-nforce2: "
-
 /**
  * nforce2_calc_fsb - calculate FSB
  * @pll: PLL value
@@ -174,13 +174,13 @@
 	int pll = 0;
 
 	if ((fsb > max_fsb) || (fsb < NFORCE2_MIN_FSB)) {
-		printk(KERN_ERR PFX "FSB %d is out of range!\n", fsb);
+		pr_err("FSB %d is out of range!\n", fsb);
 		return -EINVAL;
 	}
 
 	tfsb = nforce2_fsb_read(0);
 	if (!tfsb) {
-		printk(KERN_ERR PFX "Error while reading the FSB\n");
+		pr_err("Error while reading the FSB\n");
 		return -EINVAL;
 	}
 
@@ -276,8 +276,7 @@
 	/* local_irq_save(flags); */
 
 	if (nforce2_set_fsb(target_fsb) < 0)
-		printk(KERN_ERR PFX "Changing FSB to %d failed\n",
-			target_fsb);
+		pr_err("Changing FSB to %d failed\n", target_fsb);
 	else
 		pr_debug("Changed FSB successfully to %d\n",
 			target_fsb);
@@ -325,8 +324,7 @@
 	/* FIX: Get FID from CPU */
 	if (!fid) {
 		if (!cpu_khz) {
-			printk(KERN_WARNING PFX
-			"cpu_khz not set, can't calculate multiplier!\n");
+			pr_warn("cpu_khz not set, can't calculate multiplier!\n");
 			return -ENODEV;
 		}
 
@@ -341,8 +339,8 @@
 		}
 	}
 
-	printk(KERN_INFO PFX "FSB currently at %i MHz, FID %d.%d\n", fsb,
-	       fid / 10, fid % 10);
+	pr_info("FSB currently at %i MHz, FID %d.%d\n",
+		fsb, fid / 10, fid % 10);
 
 	/* Set maximum FSB to FSB at boot time */
 	max_fsb = nforce2_fsb_read(1);
@@ -401,11 +399,9 @@
 	if (nforce2_dev == NULL)
 		return -ENODEV;
 
-	printk(KERN_INFO PFX "Detected nForce2 chipset revision %X\n",
-	       nforce2_dev->revision);
-	printk(KERN_INFO PFX
-	       "FSB changing is maybe unstable and can lead to "
-	       "crashes and data loss.\n");
+	pr_info("Detected nForce2 chipset revision %X\n",
+		nforce2_dev->revision);
+	pr_info("FSB changing is maybe unstable and can lead to crashes and data loss\n");
 
 	return 0;
 }
@@ -423,7 +419,7 @@
 
 	/* detect chipset */
 	if (nforce2_detect_chipset()) {
-		printk(KERN_INFO PFX "No nForce2 chipset.\n");
+		pr_info("No nForce2 chipset\n");
 		return -ENODEV;
 	}
 
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b87596b..035513b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -78,6 +78,11 @@
 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
 static int cpufreq_start_governor(struct cpufreq_policy *policy);
 
+static inline int cpufreq_exit_governor(struct cpufreq_policy *policy)
+{
+	return cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+}
+
 /**
  * Two notifier lists: the "policy" list is involved in the
  * validation process for a new CPU frequency policy; the
@@ -429,6 +434,73 @@
 }
 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
 
+/*
+ * Fast frequency switching status count.  Positive means "enabled", negative
+ * means "disabled" and 0 means "not decided yet".
+ */
+static int cpufreq_fast_switch_count;
+static DEFINE_MUTEX(cpufreq_fast_switch_lock);
+
+static void cpufreq_list_transition_notifiers(void)
+{
+	struct notifier_block *nb;
+
+	pr_info("Registered transition notifiers:\n");
+
+	mutex_lock(&cpufreq_transition_notifier_list.mutex);
+
+	for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
+		pr_info("%pF\n", nb->notifier_call);
+
+	mutex_unlock(&cpufreq_transition_notifier_list.mutex);
+}
+
+/**
+ * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
+ * @policy: cpufreq policy to enable fast frequency switching for.
+ *
+ * Try to enable fast frequency switching for @policy.
+ *
+ * The attempt will fail if there is at least one transition notifier registered
+ * at this point, as fast frequency switching is quite fundamentally at odds
+ * with transition notifiers.  Thus if successful, it will make registration of
+ * transition notifiers fail going forward.
+ */
+void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
+{
+	lockdep_assert_held(&policy->rwsem);
+
+	if (!policy->fast_switch_possible)
+		return;
+
+	mutex_lock(&cpufreq_fast_switch_lock);
+	if (cpufreq_fast_switch_count >= 0) {
+		cpufreq_fast_switch_count++;
+		policy->fast_switch_enabled = true;
+	} else {
+		pr_warn("CPU%u: Fast frequency switching not enabled\n",
+			policy->cpu);
+		cpufreq_list_transition_notifiers();
+	}
+	mutex_unlock(&cpufreq_fast_switch_lock);
+}
+EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
+
+/**
+ * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
+ * @policy: cpufreq policy to disable fast frequency switching for.
+ */
+void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
+{
+	mutex_lock(&cpufreq_fast_switch_lock);
+	if (policy->fast_switch_enabled) {
+		policy->fast_switch_enabled = false;
+		if (!WARN_ON(cpufreq_fast_switch_count <= 0))
+			cpufreq_fast_switch_count--;
+	}
+	mutex_unlock(&cpufreq_fast_switch_lock);
+}
+EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
 
 /*********************************************************************
  *                          SYSFS INTERFACE                          *
@@ -1248,26 +1320,24 @@
  */
 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
 {
+	struct cpufreq_policy *policy;
 	unsigned cpu = dev->id;
-	int ret;
 
 	dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
 
-	if (cpu_online(cpu)) {
-		ret = cpufreq_online(cpu);
-	} else {
-		/*
-		 * A hotplug notifier will follow and we will handle it as CPU
-		 * online then.  For now, just create the sysfs link, unless
-		 * there is no policy or the link is already present.
-		 */
-		struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
+	if (cpu_online(cpu))
+		return cpufreq_online(cpu);
 
-		ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
-			? add_cpu_dev_symlink(policy, cpu) : 0;
-	}
+	/*
+	 * A hotplug notifier will follow and we will handle it as CPU online
+	 * then.  For now, just create the sysfs link, unless there is no policy
+	 * or the link is already present.
+	 */
+	policy = per_cpu(cpufreq_cpu_data, cpu);
+	if (!policy || cpumask_test_and_set_cpu(cpu, policy->real_cpus))
+		return 0;
 
-	return ret;
+	return add_cpu_dev_symlink(policy, cpu);
 }
 
 static void cpufreq_offline(unsigned int cpu)
@@ -1319,7 +1389,7 @@
 
 	/* If cpu is last user of policy, free policy */
 	if (has_target()) {
-		ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+		ret = cpufreq_exit_governor(policy);
 		if (ret)
 			pr_err("%s: Failed to exit governor\n", __func__);
 	}
@@ -1447,8 +1517,12 @@
 
 	ret_freq = cpufreq_driver->get(policy->cpu);
 
-	/* Updating inactive policies is invalid, so avoid doing that. */
-	if (unlikely(policy_is_inactive(policy)))
+	/*
+	 * Updating inactive policies is invalid, so avoid doing that.  Also
+	 * if fast frequency switching is used with the given policy, the check
+	 * against policy->cur is pointless, so skip it in that case too.
+	 */
+	if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
 		return ret_freq;
 
 	if (ret_freq && policy->cur &&
@@ -1491,6 +1565,9 @@
 {
 	unsigned int new_freq;
 
+	if (cpufreq_suspended)
+		return 0;
+
 	new_freq = cpufreq_driver->get(policy->cpu);
 	if (!new_freq)
 		return 0;
@@ -1554,21 +1631,25 @@
 	if (!cpufreq_driver)
 		return;
 
-	if (!has_target())
+	if (!has_target() && !cpufreq_driver->suspend)
 		goto suspend;
 
 	pr_debug("%s: Suspending Governors\n", __func__);
 
 	for_each_active_policy(policy) {
-		down_write(&policy->rwsem);
-		ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
-		up_write(&policy->rwsem);
+		if (has_target()) {
+			down_write(&policy->rwsem);
+			ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+			up_write(&policy->rwsem);
 
-		if (ret)
-			pr_err("%s: Failed to stop governor for policy: %p\n",
-				__func__, policy);
-		else if (cpufreq_driver->suspend
-		    && cpufreq_driver->suspend(policy))
+			if (ret) {
+				pr_err("%s: Failed to stop governor for policy: %p\n",
+					__func__, policy);
+				continue;
+			}
+		}
+
+		if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
 			pr_err("%s: Failed to suspend driver: %p\n", __func__,
 				policy);
 	}
@@ -1593,7 +1674,7 @@
 
 	cpufreq_suspended = false;
 
-	if (!has_target())
+	if (!has_target() && !cpufreq_driver->resume)
 		return;
 
 	pr_debug("%s: Resuming Governors\n", __func__);
@@ -1602,7 +1683,7 @@
 		if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
 			pr_err("%s: Failed to resume driver: %p\n", __func__,
 				policy);
-		} else {
+		} else if (has_target()) {
 			down_write(&policy->rwsem);
 			ret = cpufreq_start_governor(policy);
 			up_write(&policy->rwsem);
@@ -1672,8 +1753,18 @@
 
 	switch (list) {
 	case CPUFREQ_TRANSITION_NOTIFIER:
+		mutex_lock(&cpufreq_fast_switch_lock);
+
+		if (cpufreq_fast_switch_count > 0) {
+			mutex_unlock(&cpufreq_fast_switch_lock);
+			return -EBUSY;
+		}
 		ret = srcu_notifier_chain_register(
 				&cpufreq_transition_notifier_list, nb);
+		if (!ret)
+			cpufreq_fast_switch_count--;
+
+		mutex_unlock(&cpufreq_fast_switch_lock);
 		break;
 	case CPUFREQ_POLICY_NOTIFIER:
 		ret = blocking_notifier_chain_register(
@@ -1706,8 +1797,14 @@
 
 	switch (list) {
 	case CPUFREQ_TRANSITION_NOTIFIER:
+		mutex_lock(&cpufreq_fast_switch_lock);
+
 		ret = srcu_notifier_chain_unregister(
 				&cpufreq_transition_notifier_list, nb);
+		if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
+			cpufreq_fast_switch_count++;
+
+		mutex_unlock(&cpufreq_fast_switch_lock);
 		break;
 	case CPUFREQ_POLICY_NOTIFIER:
 		ret = blocking_notifier_chain_unregister(
@@ -1726,6 +1823,37 @@
  *                              GOVERNORS                            *
  *********************************************************************/
 
+/**
+ * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
+ * @policy: cpufreq policy to switch the frequency for.
+ * @target_freq: New frequency to set (may be approximate).
+ *
+ * Carry out a fast frequency switch without sleeping.
+ *
+ * The driver's ->fast_switch() callback invoked by this function must be
+ * suitable for being called from within RCU-sched read-side critical sections
+ * and it is expected to select the minimum available frequency greater than or
+ * equal to @target_freq (CPUFREQ_RELATION_L).
+ *
+ * This function must not be called if policy->fast_switch_enabled is unset.
+ *
+ * Governors calling this function must guarantee that it will never be invoked
+ * twice in parallel for the same policy and that it will never be called in
+ * parallel with either ->target() or ->target_index() for the same policy.
+ *
+ * If CPUFREQ_ENTRY_INVALID is returned by the driver's ->fast_switch()
+ * callback to indicate an error condition, the hardware configuration must be
+ * preserved.
+ */
+unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
+					unsigned int target_freq)
+{
+	clamp_val(target_freq, policy->min, policy->max);
+
+	return cpufreq_driver->fast_switch(policy, target_freq);
+}
+EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
+
 /* Must set freqs->new to intermediate frequency */
 static int __target_intermediate(struct cpufreq_policy *policy,
 				 struct cpufreq_freqs *freqs, int index)
@@ -2101,7 +2229,7 @@
 			return ret;
 		}
 
-		ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+		ret = cpufreq_exit_governor(policy);
 		if (ret) {
 			pr_err("%s: Failed to Exit Governor: %s (%d)\n",
 			       __func__, old_gov->name, ret);
@@ -2118,7 +2246,7 @@
 			pr_debug("cpufreq: governor change\n");
 			return 0;
 		}
-		cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+		cpufreq_exit_governor(policy);
 	}
 
 	/* new governor failed, so re-start old one */
@@ -2186,16 +2314,13 @@
 
 	switch (action & ~CPU_TASKS_FROZEN) {
 	case CPU_ONLINE:
+	case CPU_DOWN_FAILED:
 		cpufreq_online(cpu);
 		break;
 
 	case CPU_DOWN_PREPARE:
 		cpufreq_offline(cpu);
 		break;
-
-	case CPU_DOWN_FAILED:
-		cpufreq_online(cpu);
-		break;
 	}
 	return NOTIFY_OK;
 }
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index bf4913f..316df24 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -129,9 +129,10 @@
 /************************** sysfs interface ************************/
 static struct dbs_governor cs_dbs_gov;
 
-static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
-		const char *buf, size_t count)
+static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
+					  const char *buf, size_t count)
 {
+	struct dbs_data *dbs_data = to_dbs_data(attr_set);
 	unsigned int input;
 	int ret;
 	ret = sscanf(buf, "%u", &input);
@@ -143,9 +144,10 @@
 	return count;
 }
 
-static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
-		size_t count)
+static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
+				  const char *buf, size_t count)
 {
+	struct dbs_data *dbs_data = to_dbs_data(attr_set);
 	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
 	unsigned int input;
 	int ret;
@@ -158,9 +160,10 @@
 	return count;
 }
 
-static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
-		size_t count)
+static ssize_t store_down_threshold(struct gov_attr_set *attr_set,
+				    const char *buf, size_t count)
 {
+	struct dbs_data *dbs_data = to_dbs_data(attr_set);
 	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
 	unsigned int input;
 	int ret;
@@ -175,9 +178,10 @@
 	return count;
 }
 
-static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
-		const char *buf, size_t count)
+static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
+				      const char *buf, size_t count)
 {
+	struct dbs_data *dbs_data = to_dbs_data(attr_set);
 	unsigned int input;
 	int ret;
 
@@ -199,9 +203,10 @@
 	return count;
 }
 
-static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf,
-		size_t count)
+static ssize_t store_freq_step(struct gov_attr_set *attr_set, const char *buf,
+			       size_t count)
 {
+	struct dbs_data *dbs_data = to_dbs_data(attr_set);
 	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
 	unsigned int input;
 	int ret;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 10a5cfe..be498d5 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -43,9 +43,10 @@
  * This must be called with dbs_data->mutex held, otherwise traversing
  * policy_dbs_list isn't safe.
  */
-ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
+ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
 			    size_t count)
 {
+	struct dbs_data *dbs_data = to_dbs_data(attr_set);
 	struct policy_dbs_info *policy_dbs;
 	unsigned int rate;
 	int ret;
@@ -59,7 +60,7 @@
 	 * We are operating under dbs_data->mutex and so the list and its
 	 * entries can't be freed concurrently.
 	 */
-	list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
+	list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
 		mutex_lock(&policy_dbs->timer_mutex);
 		/*
 		 * On 32-bit architectures this may race with the
@@ -96,13 +97,13 @@
 {
 	struct policy_dbs_info *policy_dbs;
 
-	list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
+	list_for_each_entry(policy_dbs, &dbs_data->attr_set.policy_list, list) {
 		unsigned int j;
 
 		for_each_cpu(j, policy_dbs->policy->cpus) {
 			struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
 
-			j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall,
+			j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time,
 								  dbs_data->io_is_busy);
 			if (dbs_data->ignore_nice_load)
 				j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
@@ -111,54 +112,6 @@
 }
 EXPORT_SYMBOL_GPL(gov_update_cpu_data);
 
-static inline struct dbs_data *to_dbs_data(struct kobject *kobj)
-{
-	return container_of(kobj, struct dbs_data, kobj);
-}
-
-static inline struct governor_attr *to_gov_attr(struct attribute *attr)
-{
-	return container_of(attr, struct governor_attr, attr);
-}
-
-static ssize_t governor_show(struct kobject *kobj, struct attribute *attr,
-			     char *buf)
-{
-	struct dbs_data *dbs_data = to_dbs_data(kobj);
-	struct governor_attr *gattr = to_gov_attr(attr);
-
-	return gattr->show(dbs_data, buf);
-}
-
-static ssize_t governor_store(struct kobject *kobj, struct attribute *attr,
-			      const char *buf, size_t count)
-{
-	struct dbs_data *dbs_data = to_dbs_data(kobj);
-	struct governor_attr *gattr = to_gov_attr(attr);
-	int ret = -EBUSY;
-
-	mutex_lock(&dbs_data->mutex);
-
-	if (dbs_data->usage_count)
-		ret = gattr->store(dbs_data, buf, count);
-
-	mutex_unlock(&dbs_data->mutex);
-
-	return ret;
-}
-
-/*
- * Sysfs Ops for accessing governor attributes.
- *
- * All show/store invocations for governor specific sysfs attributes, will first
- * call the below show/store callbacks and the attribute specific callback will
- * be called from within it.
- */
-static const struct sysfs_ops governor_sysfs_ops = {
-	.show	= governor_show,
-	.store	= governor_store,
-};
-
 unsigned int dbs_update(struct cpufreq_policy *policy)
 {
 	struct policy_dbs_info *policy_dbs = policy->governor_data;
@@ -184,21 +137,17 @@
 	/* Get Absolute Load */
 	for_each_cpu(j, policy->cpus) {
 		struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
-		u64 cur_wall_time, cur_idle_time;
-		unsigned int idle_time, wall_time;
+		u64 update_time, cur_idle_time;
+		unsigned int idle_time, time_elapsed;
 		unsigned int load;
 
-		cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
+		cur_idle_time = get_cpu_idle_time(j, &update_time, io_busy);
 
-		wall_time = cur_wall_time - j_cdbs->prev_cpu_wall;
-		j_cdbs->prev_cpu_wall = cur_wall_time;
+		time_elapsed = update_time - j_cdbs->prev_update_time;
+		j_cdbs->prev_update_time = update_time;
 
-		if (cur_idle_time <= j_cdbs->prev_cpu_idle) {
-			idle_time = 0;
-		} else {
-			idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
-			j_cdbs->prev_cpu_idle = cur_idle_time;
-		}
+		idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
+		j_cdbs->prev_cpu_idle = cur_idle_time;
 
 		if (ignore_nice) {
 			u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
@@ -207,47 +156,62 @@
 			j_cdbs->prev_cpu_nice = cur_nice;
 		}
 
-		if (unlikely(!wall_time || wall_time < idle_time))
-			continue;
-
-		/*
-		 * If the CPU had gone completely idle, and a task just woke up
-		 * on this CPU now, it would be unfair to calculate 'load' the
-		 * usual way for this elapsed time-window, because it will show
-		 * near-zero load, irrespective of how CPU intensive that task
-		 * actually is. This is undesirable for latency-sensitive bursty
-		 * workloads.
-		 *
-		 * To avoid this, we reuse the 'load' from the previous
-		 * time-window and give this task a chance to start with a
-		 * reasonably high CPU frequency. (However, we shouldn't over-do
-		 * this copy, lest we get stuck at a high load (high frequency)
-		 * for too long, even when the current system load has actually
-		 * dropped down. So we perform the copy only once, upon the
-		 * first wake-up from idle.)
-		 *
-		 * Detecting this situation is easy: the governor's utilization
-		 * update handler would not have run during CPU-idle periods.
-		 * Hence, an unusually large 'wall_time' (as compared to the
-		 * sampling rate) indicates this scenario.
-		 *
-		 * prev_load can be zero in two cases and we must recalculate it
-		 * for both cases:
-		 * - during long idle intervals
-		 * - explicitly set to zero
-		 */
-		if (unlikely(wall_time > (2 * sampling_rate) &&
-			     j_cdbs->prev_load)) {
-			load = j_cdbs->prev_load;
-
+		if (unlikely(!time_elapsed)) {
 			/*
-			 * Perform a destructive copy, to ensure that we copy
-			 * the previous load only once, upon the first wake-up
-			 * from idle.
+			 * That can only happen when this function is called
+			 * twice in a row with a very short interval between the
+			 * calls, so the previous load value can be used then.
 			 */
+			load = j_cdbs->prev_load;
+		} else if (unlikely(time_elapsed > 2 * sampling_rate &&
+				    j_cdbs->prev_load)) {
+			/*
+			 * If the CPU had gone completely idle and a task has
+			 * just woken up on this CPU now, it would be unfair to
+			 * calculate 'load' the usual way for this elapsed
+			 * time-window, because it would show near-zero load,
+			 * irrespective of how CPU intensive that task actually
+			 * was. This is undesirable for latency-sensitive bursty
+			 * workloads.
+			 *
+			 * To avoid this, reuse the 'load' from the previous
+			 * time-window and give this task a chance to start with
+			 * a reasonably high CPU frequency. However, that
+			 * shouldn't be over-done, lest we get stuck at a high
+			 * load (high frequency) for too long, even when the
+			 * current system load has actually dropped down, so
+			 * clear prev_load to guarantee that the load will be
+			 * computed again next time.
+			 *
+			 * Detecting this situation is easy: the governor's
+			 * utilization update handler would not have run during
+			 * CPU-idle periods.  Hence, an unusually large
+			 * 'time_elapsed' (as compared to the sampling rate)
+			 * indicates this scenario.
+			 */
+			load = j_cdbs->prev_load;
 			j_cdbs->prev_load = 0;
 		} else {
-			load = 100 * (wall_time - idle_time) / wall_time;
+			if (time_elapsed >= idle_time) {
+				load = 100 * (time_elapsed - idle_time) / time_elapsed;
+			} else {
+				/*
+				 * That can happen if idle_time is returned by
+				 * get_cpu_idle_time_jiffy().  In that case
+				 * idle_time is roughly equal to the difference
+				 * between time_elapsed and "busy time" obtained
+				 * from CPU statistics.  Then, the "busy time"
+				 * can end up being greater than time_elapsed
+				 * (for example, if jiffies_64 and the CPU
+				 * statistics are updated by different CPUs),
+				 * so idle_time may in fact be negative.  That
+				 * means, though, that the CPU was busy all
+				 * the time (on the rough average) during the
+				 * last sampling interval and 100 can be
+				 * returned as the load.
+				 */
+				load = (int)idle_time < 0 ? 100 : 0;
+			}
 			j_cdbs->prev_load = load;
 		}
 
@@ -258,43 +222,6 @@
 }
 EXPORT_SYMBOL_GPL(dbs_update);
 
-static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
-				unsigned int delay_us)
-{
-	struct cpufreq_policy *policy = policy_dbs->policy;
-	int cpu;
-
-	gov_update_sample_delay(policy_dbs, delay_us);
-	policy_dbs->last_sample_time = 0;
-
-	for_each_cpu(cpu, policy->cpus) {
-		struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
-
-		cpufreq_set_update_util_data(cpu, &cdbs->update_util);
-	}
-}
-
-static inline void gov_clear_update_util(struct cpufreq_policy *policy)
-{
-	int i;
-
-	for_each_cpu(i, policy->cpus)
-		cpufreq_set_update_util_data(i, NULL);
-
-	synchronize_sched();
-}
-
-static void gov_cancel_work(struct cpufreq_policy *policy)
-{
-	struct policy_dbs_info *policy_dbs = policy->governor_data;
-
-	gov_clear_update_util(policy_dbs->policy);
-	irq_work_sync(&policy_dbs->irq_work);
-	cancel_work_sync(&policy_dbs->work);
-	atomic_set(&policy_dbs->work_count, 0);
-	policy_dbs->work_in_progress = false;
-}
-
 static void dbs_work_handler(struct work_struct *work)
 {
 	struct policy_dbs_info *policy_dbs;
@@ -382,6 +309,44 @@
 	irq_work_queue(&policy_dbs->irq_work);
 }
 
+static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
+				unsigned int delay_us)
+{
+	struct cpufreq_policy *policy = policy_dbs->policy;
+	int cpu;
+
+	gov_update_sample_delay(policy_dbs, delay_us);
+	policy_dbs->last_sample_time = 0;
+
+	for_each_cpu(cpu, policy->cpus) {
+		struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
+
+		cpufreq_add_update_util_hook(cpu, &cdbs->update_util,
+					     dbs_update_util_handler);
+	}
+}
+
+static inline void gov_clear_update_util(struct cpufreq_policy *policy)
+{
+	int i;
+
+	for_each_cpu(i, policy->cpus)
+		cpufreq_remove_update_util_hook(i);
+
+	synchronize_sched();
+}
+
+static void gov_cancel_work(struct cpufreq_policy *policy)
+{
+	struct policy_dbs_info *policy_dbs = policy->governor_data;
+
+	gov_clear_update_util(policy_dbs->policy);
+	irq_work_sync(&policy_dbs->irq_work);
+	cancel_work_sync(&policy_dbs->work);
+	atomic_set(&policy_dbs->work_count, 0);
+	policy_dbs->work_in_progress = false;
+}
+
 static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
 						     struct dbs_governor *gov)
 {
@@ -404,7 +369,6 @@
 		struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
 
 		j_cdbs->policy_dbs = policy_dbs;
-		j_cdbs->update_util.func = dbs_update_util_handler;
 	}
 	return policy_dbs;
 }
@@ -453,10 +417,7 @@
 		policy_dbs->dbs_data = dbs_data;
 		policy->governor_data = policy_dbs;
 
-		mutex_lock(&dbs_data->mutex);
-		dbs_data->usage_count++;
-		list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
-		mutex_unlock(&dbs_data->mutex);
+		gov_attr_set_get(&dbs_data->attr_set, &policy_dbs->list);
 		goto out;
 	}
 
@@ -466,8 +427,7 @@
 		goto free_policy_dbs_info;
 	}
 
-	INIT_LIST_HEAD(&dbs_data->policy_dbs_list);
-	mutex_init(&dbs_data->mutex);
+	gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list);
 
 	ret = gov->init(dbs_data, !policy->governor->initialized);
 	if (ret)
@@ -487,14 +447,11 @@
 	if (!have_governor_per_policy())
 		gov->gdbs_data = dbs_data;
 
+	policy_dbs->dbs_data = dbs_data;
 	policy->governor_data = policy_dbs;
 
-	policy_dbs->dbs_data = dbs_data;
-	dbs_data->usage_count = 1;
-	list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
-
 	gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
-	ret = kobject_init_and_add(&dbs_data->kobj, &gov->kobj_type,
+	ret = kobject_init_and_add(&dbs_data->attr_set.kobj, &gov->kobj_type,
 				   get_governor_parent_kobj(policy),
 				   "%s", gov->gov.name);
 	if (!ret)
@@ -523,29 +480,21 @@
 	struct dbs_governor *gov = dbs_governor_of(policy);
 	struct policy_dbs_info *policy_dbs = policy->governor_data;
 	struct dbs_data *dbs_data = policy_dbs->dbs_data;
-	int count;
+	unsigned int count;
 
 	/* Protect gov->gdbs_data against concurrent updates. */
 	mutex_lock(&gov_dbs_data_mutex);
 
-	mutex_lock(&dbs_data->mutex);
-	list_del(&policy_dbs->list);
-	count = --dbs_data->usage_count;
-	mutex_unlock(&dbs_data->mutex);
+	count = gov_attr_set_put(&dbs_data->attr_set, &policy_dbs->list);
+
+	policy->governor_data = NULL;
 
 	if (!count) {
-		kobject_put(&dbs_data->kobj);
-
-		policy->governor_data = NULL;
-
 		if (!have_governor_per_policy())
 			gov->gdbs_data = NULL;
 
 		gov->exit(dbs_data, policy->governor->initialized == 1);
-		mutex_destroy(&dbs_data->mutex);
 		kfree(dbs_data);
-	} else {
-		policy->governor_data = NULL;
 	}
 
 	free_policy_dbs_info(policy_dbs, gov);
@@ -574,12 +523,12 @@
 
 	for_each_cpu(j, policy->cpus) {
 		struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
-		unsigned int prev_load;
 
-		j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
-
-		prev_load = j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle;
-		j_cdbs->prev_load = 100 * prev_load / (unsigned int)j_cdbs->prev_cpu_wall;
+		j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time, io_busy);
+		/*
+		 * Make the first invocation of dbs_update() compute the load.
+		 */
+		j_cdbs->prev_load = 0;
 
 		if (ignore_nice)
 			j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 61ff82f..34eb214 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -24,20 +24,6 @@
 #include <linux/module.h>
 #include <linux/mutex.h>
 
-/*
- * The polling frequency depends on the capability of the processor. Default
- * polling frequency is 1000 times the transition latency of the processor. The
- * governor will work on any processor with transition latency <= 10ms, using
- * appropriate sampling rate.
- *
- * For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL)
- * this governor will not work. All times here are in us (micro seconds).
- */
-#define MIN_SAMPLING_RATE_RATIO			(2)
-#define LATENCY_MULTIPLIER			(1000)
-#define MIN_LATENCY_MULTIPLIER			(20)
-#define TRANSITION_LATENCY_LIMIT		(10 * 1000 * 1000)
-
 /* Ondemand Sampling types */
 enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
 
@@ -52,7 +38,7 @@
 
 /* Governor demand based switching data (per-policy or global). */
 struct dbs_data {
-	int usage_count;
+	struct gov_attr_set attr_set;
 	void *tuners;
 	unsigned int min_sampling_rate;
 	unsigned int ignore_nice_load;
@@ -60,37 +46,27 @@
 	unsigned int sampling_down_factor;
 	unsigned int up_threshold;
 	unsigned int io_is_busy;
-
-	struct kobject kobj;
-	struct list_head policy_dbs_list;
-	/*
-	 * Protect concurrent updates to governor tunables from sysfs,
-	 * policy_dbs_list and usage_count.
-	 */
-	struct mutex mutex;
 };
 
-/* Governor's specific attributes */
-struct dbs_data;
-struct governor_attr {
-	struct attribute attr;
-	ssize_t (*show)(struct dbs_data *dbs_data, char *buf);
-	ssize_t (*store)(struct dbs_data *dbs_data, const char *buf,
-			 size_t count);
-};
+static inline struct dbs_data *to_dbs_data(struct gov_attr_set *attr_set)
+{
+	return container_of(attr_set, struct dbs_data, attr_set);
+}
 
 #define gov_show_one(_gov, file_name)					\
 static ssize_t show_##file_name						\
-(struct dbs_data *dbs_data, char *buf)					\
+(struct gov_attr_set *attr_set, char *buf)				\
 {									\
+	struct dbs_data *dbs_data = to_dbs_data(attr_set);		\
 	struct _gov##_dbs_tuners *tuners = dbs_data->tuners;		\
 	return sprintf(buf, "%u\n", tuners->file_name);			\
 }
 
 #define gov_show_one_common(file_name)					\
 static ssize_t show_##file_name						\
-(struct dbs_data *dbs_data, char *buf)					\
+(struct gov_attr_set *attr_set, char *buf)				\
 {									\
+	struct dbs_data *dbs_data = to_dbs_data(attr_set);		\
 	return sprintf(buf, "%u\n", dbs_data->file_name);		\
 }
 
@@ -135,7 +111,7 @@
 /* Per cpu structures */
 struct cpu_dbs_info {
 	u64 prev_cpu_idle;
-	u64 prev_cpu_wall;
+	u64 prev_update_time;
 	u64 prev_cpu_nice;
 	/*
 	 * Used to keep track of load in the previous interval. However, when
@@ -184,7 +160,7 @@
 		(struct cpufreq_policy *, unsigned int, unsigned int),
 		unsigned int powersave_bias);
 void od_unregister_powersave_bias_handler(void);
-ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
+ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
 			    size_t count);
 void gov_update_cpu_data(struct dbs_data *dbs_data);
 #endif /* _CPUFREQ_GOVERNOR_H */
diff --git a/drivers/cpufreq/cpufreq_governor_attr_set.c b/drivers/cpufreq/cpufreq_governor_attr_set.c
new file mode 100644
index 0000000..52841f8
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_governor_attr_set.c
@@ -0,0 +1,84 @@
+/*
+ * Abstract code for CPUFreq governor tunable sysfs attributes.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "cpufreq_governor.h"
+
+static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj)
+{
+	return container_of(kobj, struct gov_attr_set, kobj);
+}
+
+static inline struct governor_attr *to_gov_attr(struct attribute *attr)
+{
+	return container_of(attr, struct governor_attr, attr);
+}
+
+static ssize_t governor_show(struct kobject *kobj, struct attribute *attr,
+			     char *buf)
+{
+	struct governor_attr *gattr = to_gov_attr(attr);
+
+	return gattr->show(to_gov_attr_set(kobj), buf);
+}
+
+static ssize_t governor_store(struct kobject *kobj, struct attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct gov_attr_set *attr_set = to_gov_attr_set(kobj);
+	struct governor_attr *gattr = to_gov_attr(attr);
+	int ret;
+
+	mutex_lock(&attr_set->update_lock);
+	ret = attr_set->usage_count ? gattr->store(attr_set, buf, count) : -EBUSY;
+	mutex_unlock(&attr_set->update_lock);
+	return ret;
+}
+
+const struct sysfs_ops governor_sysfs_ops = {
+	.show	= governor_show,
+	.store	= governor_store,
+};
+EXPORT_SYMBOL_GPL(governor_sysfs_ops);
+
+void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node)
+{
+	INIT_LIST_HEAD(&attr_set->policy_list);
+	mutex_init(&attr_set->update_lock);
+	attr_set->usage_count = 1;
+	list_add(list_node, &attr_set->policy_list);
+}
+EXPORT_SYMBOL_GPL(gov_attr_set_init);
+
+void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node)
+{
+	mutex_lock(&attr_set->update_lock);
+	attr_set->usage_count++;
+	list_add(list_node, &attr_set->policy_list);
+	mutex_unlock(&attr_set->update_lock);
+}
+EXPORT_SYMBOL_GPL(gov_attr_set_get);
+
+unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node)
+{
+	unsigned int count;
+
+	mutex_lock(&attr_set->update_lock);
+	list_del(list_node);
+	count = --attr_set->usage_count;
+	mutex_unlock(&attr_set->update_lock);
+	if (count)
+		return count;
+
+	kobject_put(&attr_set->kobj);
+	mutex_destroy(&attr_set->update_lock);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(gov_attr_set_put);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index acd8027..3001634 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -207,9 +207,10 @@
 /************************** sysfs interface ************************/
 static struct dbs_governor od_dbs_gov;
 
-static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
-		size_t count)
+static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf,
+				size_t count)
 {
+	struct dbs_data *dbs_data = to_dbs_data(attr_set);
 	unsigned int input;
 	int ret;
 
@@ -224,9 +225,10 @@
 	return count;
 }
 
-static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
-		size_t count)
+static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
+				  const char *buf, size_t count)
 {
+	struct dbs_data *dbs_data = to_dbs_data(attr_set);
 	unsigned int input;
 	int ret;
 	ret = sscanf(buf, "%u", &input);
@@ -240,9 +242,10 @@
 	return count;
 }
 
-static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
-		const char *buf, size_t count)
+static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
+					  const char *buf, size_t count)
 {
+	struct dbs_data *dbs_data = to_dbs_data(attr_set);
 	struct policy_dbs_info *policy_dbs;
 	unsigned int input;
 	int ret;
@@ -254,7 +257,7 @@
 	dbs_data->sampling_down_factor = input;
 
 	/* Reset down sampling multiplier in case it was active */
-	list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
+	list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
 		/*
 		 * Doing this without locking might lead to using different
 		 * rate_mult values in od_update() and od_dbs_timer().
@@ -267,9 +270,10 @@
 	return count;
 }
 
-static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
-		const char *buf, size_t count)
+static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
+				      const char *buf, size_t count)
 {
+	struct dbs_data *dbs_data = to_dbs_data(attr_set);
 	unsigned int input;
 	int ret;
 
@@ -291,9 +295,10 @@
 	return count;
 }
 
-static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
-		size_t count)
+static ssize_t store_powersave_bias(struct gov_attr_set *attr_set,
+				    const char *buf, size_t count)
 {
+	struct dbs_data *dbs_data = to_dbs_data(attr_set);
 	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 	struct policy_dbs_info *policy_dbs;
 	unsigned int input;
@@ -308,7 +313,7 @@
 
 	od_tuners->powersave_bias = input;
 
-	list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list)
+	list_for_each_entry(policy_dbs, &attr_set->policy_list, list)
 		ondemand_powersave_bias_init(policy_dbs->policy);
 
 	return count;
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 4d16f45..9f3dec9 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -17,6 +17,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/slab.h>
 
 static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
 static DEFINE_MUTEX(userspace_mutex);
@@ -31,6 +32,7 @@
 static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
 {
 	int ret = -EINVAL;
+	unsigned int *setspeed = policy->governor_data;
 
 	pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
 
@@ -38,6 +40,8 @@
 	if (!per_cpu(cpu_is_managed, policy->cpu))
 		goto err;
 
+	*setspeed = freq;
+
 	ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
  err:
 	mutex_unlock(&userspace_mutex);
@@ -49,19 +53,45 @@
 	return sprintf(buf, "%u\n", policy->cur);
 }
 
+static int cpufreq_userspace_policy_init(struct cpufreq_policy *policy)
+{
+	unsigned int *setspeed;
+
+	setspeed = kzalloc(sizeof(*setspeed), GFP_KERNEL);
+	if (!setspeed)
+		return -ENOMEM;
+
+	policy->governor_data = setspeed;
+	return 0;
+}
+
 static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
 				   unsigned int event)
 {
+	unsigned int *setspeed = policy->governor_data;
 	unsigned int cpu = policy->cpu;
 	int rc = 0;
 
+	if (event == CPUFREQ_GOV_POLICY_INIT)
+		return cpufreq_userspace_policy_init(policy);
+
+	if (!setspeed)
+		return -EINVAL;
+
 	switch (event) {
+	case CPUFREQ_GOV_POLICY_EXIT:
+		mutex_lock(&userspace_mutex);
+		policy->governor_data = NULL;
+		kfree(setspeed);
+		mutex_unlock(&userspace_mutex);
+		break;
 	case CPUFREQ_GOV_START:
 		BUG_ON(!policy->cur);
 		pr_debug("started managing cpu %u\n", cpu);
 
 		mutex_lock(&userspace_mutex);
 		per_cpu(cpu_is_managed, cpu) = 1;
+		*setspeed = policy->cur;
 		mutex_unlock(&userspace_mutex);
 		break;
 	case CPUFREQ_GOV_STOP:
@@ -69,20 +99,23 @@
 
 		mutex_lock(&userspace_mutex);
 		per_cpu(cpu_is_managed, cpu) = 0;
+		*setspeed = 0;
 		mutex_unlock(&userspace_mutex);
 		break;
 	case CPUFREQ_GOV_LIMITS:
 		mutex_lock(&userspace_mutex);
-		pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz\n",
-			cpu, policy->min, policy->max,
-			policy->cur);
+		pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n",
+			cpu, policy->min, policy->max, policy->cur, *setspeed);
 
-		if (policy->max < policy->cur)
+		if (policy->max < *setspeed)
 			__cpufreq_driver_target(policy, policy->max,
 						CPUFREQ_RELATION_H);
-		else if (policy->min > policy->cur)
+		else if (policy->min > *setspeed)
 			__cpufreq_driver_target(policy, policy->min,
 						CPUFREQ_RELATION_L);
+		else
+			__cpufreq_driver_target(policy, *setspeed,
+						CPUFREQ_RELATION_L);
 		mutex_unlock(&userspace_mutex);
 		break;
 	}
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 4085244c..cdf097b 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -6,6 +6,8 @@
  *  BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -20,7 +22,7 @@
 #include <asm/msr.h>
 #include <asm/tsc.h>
 
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
 #include <linux/acpi.h>
 #include <acpi/processor.h>
 #endif
@@ -33,7 +35,7 @@
 
 struct eps_cpu_data {
 	u32 fsb;
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
 	u32 bios_limit;
 #endif
 	struct cpufreq_frequency_table freq_table[];
@@ -46,7 +48,7 @@
 static int voltage_failsafe_off;
 static int set_max_voltage;
 
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
 static int ignore_acpi_limit;
 
 static struct acpi_processor_performance *eps_acpi_cpu_perf;
@@ -141,11 +143,9 @@
 	/* Print voltage and multiplier */
 	rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
 	current_voltage = lo & 0xff;
-	printk(KERN_INFO "eps: Current voltage = %dmV\n",
-		current_voltage * 16 + 700);
+	pr_info("Current voltage = %dmV\n", current_voltage * 16 + 700);
 	current_multiplier = (lo >> 8) & 0xff;
-	printk(KERN_INFO "eps: Current multiplier = %d\n",
-		current_multiplier);
+	pr_info("Current multiplier = %d\n", current_multiplier);
 	}
 #endif
 	return 0;
@@ -166,7 +166,7 @@
 	dest_state = centaur->freq_table[index].driver_data & 0xffff;
 	ret = eps_set_state(centaur, policy, dest_state);
 	if (ret)
-		printk(KERN_ERR "eps: Timeout!\n");
+		pr_err("Timeout!\n");
 	return ret;
 }
 
@@ -186,7 +186,7 @@
 	int k, step, voltage;
 	int ret;
 	int states;
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
 	unsigned int limit;
 #endif
 
@@ -194,36 +194,36 @@
 		return -ENODEV;
 
 	/* Check brand */
-	printk(KERN_INFO "eps: Detected VIA ");
+	pr_info("Detected VIA ");
 
 	switch (c->x86_model) {
 	case 10:
 		rdmsr(0x1153, lo, hi);
 		brand = (((lo >> 2) ^ lo) >> 18) & 3;
-		printk(KERN_CONT "Model A ");
+		pr_cont("Model A ");
 		break;
 	case 13:
 		rdmsr(0x1154, lo, hi);
 		brand = (((lo >> 4) ^ (lo >> 2))) & 0x000000ff;
-		printk(KERN_CONT "Model D ");
+		pr_cont("Model D ");
 		break;
 	}
 
 	switch (brand) {
 	case EPS_BRAND_C7M:
-		printk(KERN_CONT "C7-M\n");
+		pr_cont("C7-M\n");
 		break;
 	case EPS_BRAND_C7:
-		printk(KERN_CONT "C7\n");
+		pr_cont("C7\n");
 		break;
 	case EPS_BRAND_EDEN:
-		printk(KERN_CONT "Eden\n");
+		pr_cont("Eden\n");
 		break;
 	case EPS_BRAND_C7D:
-		printk(KERN_CONT "C7-D\n");
+		pr_cont("C7-D\n");
 		break;
 	case EPS_BRAND_C3:
-		printk(KERN_CONT "C3\n");
+		pr_cont("C3\n");
 		return -ENODEV;
 		break;
 	}
@@ -235,7 +235,7 @@
 		/* Can be locked at 0 */
 		rdmsrl(MSR_IA32_MISC_ENABLE, val);
 		if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
-			printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n");
+			pr_info("Can't enable Enhanced PowerSaver\n");
 			return -ENODEV;
 		}
 	}
@@ -243,22 +243,19 @@
 	/* Print voltage and multiplier */
 	rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
 	current_voltage = lo & 0xff;
-	printk(KERN_INFO "eps: Current voltage = %dmV\n",
-			current_voltage * 16 + 700);
+	pr_info("Current voltage = %dmV\n", current_voltage * 16 + 700);
 	current_multiplier = (lo >> 8) & 0xff;
-	printk(KERN_INFO "eps: Current multiplier = %d\n", current_multiplier);
+	pr_info("Current multiplier = %d\n", current_multiplier);
 
 	/* Print limits */
 	max_voltage = hi & 0xff;
-	printk(KERN_INFO "eps: Highest voltage = %dmV\n",
-			max_voltage * 16 + 700);
+	pr_info("Highest voltage = %dmV\n", max_voltage * 16 + 700);
 	max_multiplier = (hi >> 8) & 0xff;
-	printk(KERN_INFO "eps: Highest multiplier = %d\n", max_multiplier);
+	pr_info("Highest multiplier = %d\n", max_multiplier);
 	min_voltage = (hi >> 16) & 0xff;
-	printk(KERN_INFO "eps: Lowest voltage = %dmV\n",
-			min_voltage * 16 + 700);
+	pr_info("Lowest voltage = %dmV\n", min_voltage * 16 + 700);
 	min_multiplier = (hi >> 24) & 0xff;
-	printk(KERN_INFO "eps: Lowest multiplier = %d\n", min_multiplier);
+	pr_info("Lowest multiplier = %d\n", min_multiplier);
 
 	/* Sanity checks */
 	if (current_multiplier == 0 || max_multiplier == 0
@@ -276,34 +273,30 @@
 
 	/* Check for systems using underclocked CPU */
 	if (!freq_failsafe_off && max_multiplier != current_multiplier) {
-		printk(KERN_INFO "eps: Your processor is running at different "
-			"frequency then its maximum. Aborting.\n");
-		printk(KERN_INFO "eps: You can use freq_failsafe_off option "
-			"to disable this check.\n");
+		pr_info("Your processor is running at different frequency then its maximum. Aborting.\n");
+		pr_info("You can use freq_failsafe_off option to disable this check.\n");
 		return -EINVAL;
 	}
 	if (!voltage_failsafe_off && max_voltage != current_voltage) {
-		printk(KERN_INFO "eps: Your processor is running at different "
-			"voltage then its maximum. Aborting.\n");
-		printk(KERN_INFO "eps: You can use voltage_failsafe_off "
-			"option to disable this check.\n");
+		pr_info("Your processor is running at different voltage then its maximum. Aborting.\n");
+		pr_info("You can use voltage_failsafe_off option to disable this check.\n");
 		return -EINVAL;
 	}
 
 	/* Calc FSB speed */
 	fsb = cpu_khz / current_multiplier;
 
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
 	/* Check for ACPI processor speed limit */
 	if (!ignore_acpi_limit && !eps_acpi_init()) {
 		if (!acpi_processor_get_bios_limit(policy->cpu, &limit)) {
-			printk(KERN_INFO "eps: ACPI limit %u.%uGHz\n",
+			pr_info("ACPI limit %u.%uGHz\n",
 				limit/1000000,
 				(limit%1000000)/10000);
 			eps_acpi_exit(policy);
 			/* Check if max_multiplier is in BIOS limits */
 			if (limit && max_multiplier * fsb > limit) {
-				printk(KERN_INFO "eps: Aborting.\n");
+				pr_info("Aborting\n");
 				return -EINVAL;
 			}
 		}
@@ -319,8 +312,7 @@
 		v = (set_max_voltage - 700) / 16;
 		/* Check if voltage is within limits */
 		if (v >= min_voltage && v <= max_voltage) {
-			printk(KERN_INFO "eps: Setting %dmV as maximum.\n",
-				v * 16 + 700);
+			pr_info("Setting %dmV as maximum\n", v * 16 + 700);
 			max_voltage = v;
 		}
 	}
@@ -341,7 +333,7 @@
 
 	/* Copy basic values */
 	centaur->fsb = fsb;
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
 	centaur->bios_limit = limit;
 #endif
 
@@ -426,7 +418,7 @@
 MODULE_PARM_DESC(freq_failsafe_off, "Disable current vs max frequency check");
 module_param(voltage_failsafe_off, int, 0644);
 MODULE_PARM_DESC(voltage_failsafe_off, "Disable current vs max voltage check");
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
 module_param(ignore_acpi_limit, int, 0644);
 MODULE_PARM_DESC(ignore_acpi_limit, "Don't check ACPI's processor speed limit");
 #endif
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index 1c06e78..bfce11c 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -16,6 +16,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -185,7 +187,7 @@
 static int __init elanfreq_setup(char *str)
 {
 	max_freq = simple_strtoul(str, &str, 0);
-	printk(KERN_WARNING "You're using the deprecated elanfreq command line option. Use elanfreq.max_freq instead, please!\n");
+	pr_warn("You're using the deprecated elanfreq command line option. Use elanfreq.max_freq instead, please!\n");
 	return 1;
 }
 __setup("elanfreq=", elanfreq_setup);
diff --git a/drivers/cpufreq/hisi-acpu-cpufreq.c b/drivers/cpufreq/hisi-acpu-cpufreq.c
deleted file mode 100644
index 026d5b2..0000000
--- a/drivers/cpufreq/hisi-acpu-cpufreq.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Hisilicon Platforms Using ACPU CPUFreq Support
- *
- * Copyright (c) 2015 Hisilicon Limited.
- * Copyright (c) 2015 Linaro Limited.
- *
- * Leo Yan <leo.yan@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-
-static int __init hisi_acpu_cpufreq_driver_init(void)
-{
-	struct platform_device *pdev;
-
-	if (!of_machine_is_compatible("hisilicon,hi6220"))
-		return -ENODEV;
-
-	pdev = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
-	return PTR_ERR_OR_ZERO(pdev);
-}
-module_init(hisi_acpu_cpufreq_driver_init);
-
-MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
-MODULE_DESCRIPTION("Hisilicon acpu cpufreq driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
index 0202429..759612d 100644
--- a/drivers/cpufreq/ia64-acpi-cpufreq.c
+++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -8,6 +8,8 @@
  *      Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/module.h>
@@ -118,8 +120,7 @@
 
 	if (ret) {
 		set_cpus_allowed_ptr(current, &saved_mask);
-		printk(KERN_WARNING "get performance failed with error %d\n",
-		       ret);
+		pr_warn("get performance failed with error %d\n", ret);
 		ret = 0;
 		goto migrate_end;
 	}
@@ -177,7 +178,7 @@
 
 	ret = processor_set_pstate(value);
 	if (ret) {
-		printk(KERN_WARNING "Transition failed with error %d\n", ret);
+		pr_warn("Transition failed with error %d\n", ret);
 		retval = -ENODEV;
 		goto migrate_end;
 	}
@@ -291,8 +292,7 @@
 	/* notify BIOS that we exist */
 	acpi_processor_notify_smm(THIS_MODULE);
 
-	printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management "
-	       "activated.\n", cpu);
+	pr_info("CPU%u - ACPI performance management activated\n", cpu);
 
 	for (i = 0; i < data->acpi_data.state_count; i++)
 		pr_debug("     %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n",
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 4b64452..b76a98d 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -10,6 +10,8 @@
  * of the License.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/kernel_stat.h>
 #include <linux/module.h>
@@ -39,10 +41,17 @@
 #define ATOM_TURBO_RATIOS	0x66c
 #define ATOM_TURBO_VIDS		0x66d
 
+#ifdef CONFIG_ACPI
+#include <acpi/processor.h>
+#endif
+
 #define FRAC_BITS 8
 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
 #define fp_toint(X) ((X) >> FRAC_BITS)
 
+#define EXT_BITS 6
+#define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
+
 static inline int32_t mul_fp(int32_t x, int32_t y)
 {
 	return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
@@ -64,8 +73,37 @@
 	return ret;
 }
 
+static inline u64 mul_ext_fp(u64 x, u64 y)
+{
+	return (x * y) >> EXT_FRAC_BITS;
+}
+
+static inline u64 div_ext_fp(u64 x, u64 y)
+{
+	return div64_u64(x << EXT_FRAC_BITS, y);
+}
+
+/**
+ * struct sample -	Store performance sample
+ * @core_avg_perf:	Ratio of APERF/MPERF which is the actual average
+ *			performance during last sample period
+ * @busy_scaled:	Scaled busy value which is used to calculate next
+ *			P state. This can be different than core_avg_perf
+ *			to account for cpu idle period
+ * @aperf:		Difference of actual performance frequency clock count
+ *			read from APERF MSR between last and current sample
+ * @mperf:		Difference of maximum performance frequency clock count
+ *			read from MPERF MSR between last and current sample
+ * @tsc:		Difference of time stamp counter between last and
+ *			current sample
+ * @freq:		Effective frequency calculated from APERF/MPERF
+ * @time:		Current time from scheduler
+ *
+ * This structure is used in the cpudata structure to store performance sample
+ * data for choosing next P State.
+ */
 struct sample {
-	int32_t core_pct_busy;
+	int32_t core_avg_perf;
 	int32_t busy_scaled;
 	u64 aperf;
 	u64 mperf;
@@ -74,6 +112,20 @@
 	u64 time;
 };
 
+/**
+ * struct pstate_data - Store P state data
+ * @current_pstate:	Current requested P state
+ * @min_pstate:		Min P state possible for this platform
+ * @max_pstate:		Max P state possible for this platform
+ * @max_pstate_physical:This is physical Max P state for a processor
+ *			This can be higher than the max_pstate which can
+ *			be limited by platform thermal design power limits
+ * @scaling:		Scaling factor to  convert frequency to cpufreq
+ *			frequency units
+ * @turbo_pstate:	Max Turbo P state possible for this platform
+ *
+ * Stores the per cpu model P state limits and current P state.
+ */
 struct pstate_data {
 	int	current_pstate;
 	int	min_pstate;
@@ -83,6 +135,19 @@
 	int	turbo_pstate;
 };
 
+/**
+ * struct vid_data -	Stores voltage information data
+ * @min:		VID data for this platform corresponding to
+ *			the lowest P state
+ * @max:		VID data corresponding to the highest P State.
+ * @turbo:		VID data for turbo P state
+ * @ratio:		Ratio of (vid max - vid min) /
+ *			(max P state - Min P State)
+ *
+ * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
+ * This data is used in Atom platforms, where in addition to target P state,
+ * the voltage data needs to be specified to select next P State.
+ */
 struct vid_data {
 	int min;
 	int max;
@@ -90,6 +155,18 @@
 	int32_t ratio;
 };
 
+/**
+ * struct _pid -	Stores PID data
+ * @setpoint:		Target set point for busyness or performance
+ * @integral:		Storage for accumulated error values
+ * @p_gain:		PID proportional gain
+ * @i_gain:		PID integral gain
+ * @d_gain:		PID derivative gain
+ * @deadband:		PID deadband
+ * @last_err:		Last error storage for integral part of PID calculation
+ *
+ * Stores PID coefficients and last error for PID controller.
+ */
 struct _pid {
 	int setpoint;
 	int32_t integral;
@@ -100,10 +177,31 @@
 	int32_t last_err;
 };
 
+/**
+ * struct cpudata -	Per CPU instance data storage
+ * @cpu:		CPU number for this instance data
+ * @update_util:	CPUFreq utility callback information
+ * @update_util_set:	CPUFreq utility callback is set
+ * @pstate:		Stores P state limits for this CPU
+ * @vid:		Stores VID limits for this CPU
+ * @pid:		Stores PID parameters for this CPU
+ * @last_sample_time:	Last Sample time
+ * @prev_aperf:		Last APERF value read from APERF MSR
+ * @prev_mperf:		Last MPERF value read from MPERF MSR
+ * @prev_tsc:		Last timestamp counter (TSC) value
+ * @prev_cummulative_iowait: IO Wait time difference from last and
+ *			current sample
+ * @sample:		Storage for storing last Sample data
+ * @acpi_perf_data:	Stores ACPI perf information read from _PSS
+ * @valid_pss_table:	Set to true for valid ACPI _PSS entries found
+ *
+ * This structure stores per CPU instance data for all CPUs.
+ */
 struct cpudata {
 	int cpu;
 
 	struct update_util_data update_util;
+	bool   update_util_set;
 
 	struct pstate_data pstate;
 	struct vid_data vid;
@@ -115,9 +213,26 @@
 	u64	prev_tsc;
 	u64	prev_cummulative_iowait;
 	struct sample sample;
+#ifdef CONFIG_ACPI
+	struct acpi_processor_performance acpi_perf_data;
+	bool valid_pss_table;
+#endif
 };
 
 static struct cpudata **all_cpu_data;
+
+/**
+ * struct pid_adjust_policy - Stores static PID configuration data
+ * @sample_rate_ms:	PID calculation sample rate in ms
+ * @sample_rate_ns:	Sample rate calculation in ns
+ * @deadband:		PID deadband
+ * @setpoint:		PID Setpoint
+ * @p_gain_pct:		PID proportional gain
+ * @i_gain_pct:		PID integral gain
+ * @d_gain_pct:		PID derivative gain
+ *
+ * Stores per CPU model static PID configuration data.
+ */
 struct pstate_adjust_policy {
 	int sample_rate_ms;
 	s64 sample_rate_ns;
@@ -128,6 +243,20 @@
 	int i_gain_pct;
 };
 
+/**
+ * struct pstate_funcs - Per CPU model specific callbacks
+ * @get_max:		Callback to get maximum non turbo effective P state
+ * @get_max_physical:	Callback to get maximum non turbo physical P state
+ * @get_min:		Callback to get minimum P state
+ * @get_turbo:		Callback to get turbo P state
+ * @get_scaling:	Callback to get frequency scaling factor
+ * @get_val:		Callback to convert P state to actual MSR write value
+ * @get_vid:		Callback to get VID data for Atom platforms
+ * @get_target_pstate:	Callback to a function to calculate next P state to use
+ *
+ * Core and Atom CPU models have different way to get P State limits. This
+ * structure is used to store those callbacks.
+ */
 struct pstate_funcs {
 	int (*get_max)(void);
 	int (*get_max_physical)(void);
@@ -139,6 +268,11 @@
 	int32_t (*get_target_pstate)(struct cpudata *);
 };
 
+/**
+ * struct cpu_defaults- Per CPU model default config data
+ * @pid_policy:	PID config data
+ * @funcs:		Callback function data
+ */
 struct cpu_defaults {
 	struct pstate_adjust_policy pid_policy;
 	struct pstate_funcs funcs;
@@ -151,6 +285,37 @@
 static struct pstate_funcs pstate_funcs;
 static int hwp_active;
 
+#ifdef CONFIG_ACPI
+static bool acpi_ppc;
+#endif
+
+/**
+ * struct perf_limits - Store user and policy limits
+ * @no_turbo:		User requested turbo state from intel_pstate sysfs
+ * @turbo_disabled:	Platform turbo status either from msr
+ *			MSR_IA32_MISC_ENABLE or when maximum available pstate
+ *			matches the maximum turbo pstate
+ * @max_perf_pct:	Effective maximum performance limit in percentage, this
+ *			is minimum of either limits enforced by cpufreq policy
+ *			or limits from user set limits via intel_pstate sysfs
+ * @min_perf_pct:	Effective minimum performance limit in percentage, this
+ *			is maximum of either limits enforced by cpufreq policy
+ *			or limits from user set limits via intel_pstate sysfs
+ * @max_perf:		This is a scaled value between 0 to 255 for max_perf_pct
+ *			This value is used to limit max pstate
+ * @min_perf:		This is a scaled value between 0 to 255 for min_perf_pct
+ *			This value is used to limit min pstate
+ * @max_policy_pct:	The maximum performance in percentage enforced by
+ *			cpufreq setpolicy interface
+ * @max_sysfs_pct:	The maximum performance in percentage enforced by
+ *			intel pstate sysfs interface
+ * @min_policy_pct:	The minimum performance in percentage enforced by
+ *			cpufreq setpolicy interface
+ * @min_sysfs_pct:	The minimum performance in percentage enforced by
+ *			intel pstate sysfs interface
+ *
+ * Storage for user and policy defined limits.
+ */
 struct perf_limits {
 	int no_turbo;
 	int turbo_disabled;
@@ -196,6 +361,124 @@
 static struct perf_limits *limits = &powersave_limits;
 #endif
 
+#ifdef CONFIG_ACPI
+
+static bool intel_pstate_get_ppc_enable_status(void)
+{
+	if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
+	    acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
+		return true;
+
+	return acpi_ppc;
+}
+
+/*
+ * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and
+ * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and
+ * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state
+ * ratio, out of it only high 8 bits are used. For example 0x1700 is setting
+ * target ratio 0x17. The _PSS control value stores in a format which can be
+ * directly written to PERF_CTL MSR. But in intel_pstate driver this shift
+ * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()).
+ * This function converts the _PSS control value to intel pstate driver format
+ * for comparison and assignment.
+ */
+static int convert_to_native_pstate_format(struct cpudata *cpu, int index)
+{
+	return cpu->acpi_perf_data.states[index].control >> 8;
+}
+
+static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
+{
+	struct cpudata *cpu;
+	int turbo_pss_ctl;
+	int ret;
+	int i;
+
+	if (hwp_active)
+		return;
+
+	if (!intel_pstate_get_ppc_enable_status())
+		return;
+
+	cpu = all_cpu_data[policy->cpu];
+
+	ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
+						  policy->cpu);
+	if (ret)
+		return;
+
+	/*
+	 * Check if the control value in _PSS is for PERF_CTL MSR, which should
+	 * guarantee that the states returned by it map to the states in our
+	 * list directly.
+	 */
+	if (cpu->acpi_perf_data.control_register.space_id !=
+						ACPI_ADR_SPACE_FIXED_HARDWARE)
+		goto err;
+
+	/*
+	 * If there is only one entry _PSS, simply ignore _PSS and continue as
+	 * usual without taking _PSS into account
+	 */
+	if (cpu->acpi_perf_data.state_count < 2)
+		goto err;
+
+	pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu);
+	for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
+		pr_debug("     %cP%d: %u MHz, %u mW, 0x%x\n",
+			 (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
+			 (u32) cpu->acpi_perf_data.states[i].core_frequency,
+			 (u32) cpu->acpi_perf_data.states[i].power,
+			 (u32) cpu->acpi_perf_data.states[i].control);
+	}
+
+	/*
+	 * The _PSS table doesn't contain whole turbo frequency range.
+	 * This just contains +1 MHZ above the max non turbo frequency,
+	 * with control value corresponding to max turbo ratio. But
+	 * when cpufreq set policy is called, it will call with this
+	 * max frequency, which will cause a reduced performance as
+	 * this driver uses real max turbo frequency as the max
+	 * frequency. So correct this frequency in _PSS table to
+	 * correct max turbo frequency based on the turbo ratio.
+	 * Also need to convert to MHz as _PSS freq is in MHz.
+	 */
+	turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0);
+	if (turbo_pss_ctl > cpu->pstate.max_pstate)
+		cpu->acpi_perf_data.states[0].core_frequency =
+					policy->cpuinfo.max_freq / 1000;
+	cpu->valid_pss_table = true;
+	pr_info("_PPC limits will be enforced\n");
+
+	return;
+
+ err:
+	cpu->valid_pss_table = false;
+	acpi_processor_unregister_performance(policy->cpu);
+}
+
+static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
+{
+	struct cpudata *cpu;
+
+	cpu = all_cpu_data[policy->cpu];
+	if (!cpu->valid_pss_table)
+		return;
+
+	acpi_processor_unregister_performance(policy->cpu);
+}
+
+#else
+static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
+{
+}
+
+static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
+{
+}
+#endif
+
 static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
 			     int deadband, int integral) {
 	pid->setpoint = int_tofp(setpoint);
@@ -206,17 +489,17 @@
 
 static inline void pid_p_gain_set(struct _pid *pid, int percent)
 {
-	pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
+	pid->p_gain = div_fp(percent, 100);
 }
 
 static inline void pid_i_gain_set(struct _pid *pid, int percent)
 {
-	pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
+	pid->i_gain = div_fp(percent, 100);
 }
 
 static inline void pid_d_gain_set(struct _pid *pid, int percent)
 {
-	pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
+	pid->d_gain = div_fp(percent, 100);
 }
 
 static signed int pid_calc(struct _pid *pid, int32_t busy)
@@ -318,6 +601,14 @@
 	}
 }
 
+static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
+{
+	if (hwp_active)
+		intel_pstate_hwp_set(policy->cpus);
+
+	return 0;
+}
+
 static void intel_pstate_hwp_set_online_cpus(void)
 {
 	get_online_cpus();
@@ -394,7 +685,7 @@
 
 	total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
 	no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
-	turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total));
+	turbo_fp = div_fp(no_turbo, total);
 	turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
 	return sprintf(buf, "%u\n", turbo_pct);
 }
@@ -436,7 +727,7 @@
 
 	update_turbo_state();
 	if (limits->turbo_disabled) {
-		pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n");
+		pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
 		return -EPERM;
 	}
 
@@ -465,8 +756,7 @@
 				   limits->max_perf_pct);
 	limits->max_perf_pct = max(limits->min_perf_pct,
 				   limits->max_perf_pct);
-	limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
-				  int_tofp(100));
+	limits->max_perf = div_fp(limits->max_perf_pct, 100);
 
 	if (hwp_active)
 		intel_pstate_hwp_set_online_cpus();
@@ -490,8 +780,7 @@
 				   limits->min_perf_pct);
 	limits->min_perf_pct = min(limits->max_perf_pct,
 				   limits->min_perf_pct);
-	limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
-				  int_tofp(100));
+	limits->min_perf = div_fp(limits->min_perf_pct, 100);
 
 	if (hwp_active)
 		intel_pstate_hwp_set_online_cpus();
@@ -678,6 +967,11 @@
 			if (err)
 				goto skip_tar;
 
+			/* For level 1 and 2, bits[23:16] contain the ratio */
+			if (tdp_ctrl)
+				tdp_ratio >>= 16;
+
+			tdp_ratio &= 0xff; /* ratios are only 8 bits long */
 			if (tdp_ratio - 1 == tar) {
 				max_pstate = tar;
 				pr_debug("max_pstate=TAC %x\n", max_pstate);
@@ -871,15 +1165,11 @@
 	intel_pstate_set_min_pstate(cpu);
 }
 
-static inline void intel_pstate_calc_busy(struct cpudata *cpu)
+static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
 {
 	struct sample *sample = &cpu->sample;
-	int64_t core_pct;
 
-	core_pct = int_tofp(sample->aperf) * int_tofp(100);
-	core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
-
-	sample->core_pct_busy = (int32_t)core_pct;
+	sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf);
 }
 
 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
@@ -910,13 +1200,26 @@
 	cpu->prev_aperf = aperf;
 	cpu->prev_mperf = mperf;
 	cpu->prev_tsc = tsc;
-	return true;
+	/*
+	 * First time this function is invoked in a given cycle, all of the
+	 * previous sample data fields are equal to zero or stale and they must
+	 * be populated with meaningful numbers for things to work, so assume
+	 * that sample.time will always be reset before setting the utilization
+	 * update hook and make the caller skip the sample then.
+	 */
+	return !!cpu->last_sample_time;
 }
 
 static inline int32_t get_avg_frequency(struct cpudata *cpu)
 {
-	return div64_u64(cpu->pstate.max_pstate_physical * cpu->sample.aperf *
-		cpu->pstate.scaling, cpu->sample.mperf);
+	return mul_ext_fp(cpu->sample.core_avg_perf,
+			  cpu->pstate.max_pstate_physical * cpu->pstate.scaling);
+}
+
+static inline int32_t get_avg_pstate(struct cpudata *cpu)
+{
+	return mul_ext_fp(cpu->pstate.max_pstate_physical,
+			  cpu->sample.core_avg_perf);
 }
 
 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
@@ -951,48 +1254,43 @@
 	cpu_load = div64_u64(int_tofp(100) * mperf, sample->tsc);
 	cpu->sample.busy_scaled = cpu_load;
 
-	return cpu->pstate.current_pstate - pid_calc(&cpu->pid, cpu_load);
+	return get_avg_pstate(cpu) - pid_calc(&cpu->pid, cpu_load);
 }
 
 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
 {
-	int32_t core_busy, max_pstate, current_pstate, sample_ratio;
+	int32_t perf_scaled, max_pstate, current_pstate, sample_ratio;
 	u64 duration_ns;
 
-	intel_pstate_calc_busy(cpu);
-
 	/*
-	 * core_busy is the ratio of actual performance to max
-	 * max_pstate is the max non turbo pstate available
-	 * current_pstate was the pstate that was requested during
-	 * 	the last sample period.
-	 *
-	 * We normalize core_busy, which was our actual percent
-	 * performance to what we requested during the last sample
-	 * period. The result will be a percentage of busy at a
-	 * specified pstate.
+	 * perf_scaled is the average performance during the last sampling
+	 * period scaled by the ratio of the maximum P-state to the P-state
+	 * requested last time (in percent).  That measures the system's
+	 * response to the previous P-state selection.
 	 */
-	core_busy = cpu->sample.core_pct_busy;
-	max_pstate = int_tofp(cpu->pstate.max_pstate_physical);
-	current_pstate = int_tofp(cpu->pstate.current_pstate);
-	core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
+	max_pstate = cpu->pstate.max_pstate_physical;
+	current_pstate = cpu->pstate.current_pstate;
+	perf_scaled = mul_ext_fp(cpu->sample.core_avg_perf,
+			       div_fp(100 * max_pstate, current_pstate));
 
 	/*
 	 * Since our utilization update callback will not run unless we are
 	 * in C0, check if the actual elapsed time is significantly greater (3x)
 	 * than our sample interval.  If it is, then we were idle for a long
-	 * enough period of time to adjust our busyness.
+	 * enough period of time to adjust our performance metric.
 	 */
 	duration_ns = cpu->sample.time - cpu->last_sample_time;
-	if ((s64)duration_ns > pid_params.sample_rate_ns * 3
-	    && cpu->last_sample_time > 0) {
-		sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
-				      int_tofp(duration_ns));
-		core_busy = mul_fp(core_busy, sample_ratio);
+	if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
+		sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns);
+		perf_scaled = mul_fp(perf_scaled, sample_ratio);
+	} else {
+		sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
+		if (sample_ratio < int_tofp(1))
+			perf_scaled = 0;
 	}
 
-	cpu->sample.busy_scaled = core_busy;
-	return cpu->pstate.current_pstate - pid_calc(&cpu->pid, core_busy);
+	cpu->sample.busy_scaled = perf_scaled;
+	return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled);
 }
 
 static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
@@ -1022,7 +1320,7 @@
 	intel_pstate_update_pstate(cpu, target_pstate);
 
 	sample = &cpu->sample;
-	trace_pstate_sample(fp_toint(sample->core_pct_busy),
+	trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf),
 		fp_toint(sample->busy_scaled),
 		from,
 		cpu->pstate.current_pstate,
@@ -1041,8 +1339,11 @@
 	if ((s64)delta_ns >= pid_params.sample_rate_ns) {
 		bool sample_taken = intel_pstate_sample(cpu, time);
 
-		if (sample_taken && !hwp_active)
-			intel_pstate_adjust_busy_pstate(cpu);
+		if (sample_taken) {
+			intel_pstate_calc_avg_perf(cpu);
+			if (!hwp_active)
+				intel_pstate_adjust_busy_pstate(cpu);
+		}
 	}
 }
 
@@ -1100,44 +1401,86 @@
 	intel_pstate_get_cpu_pstates(cpu);
 
 	intel_pstate_busy_pid_reset(cpu);
-	intel_pstate_sample(cpu, 0);
 
-	cpu->update_util.func = intel_pstate_update_util;
-	cpufreq_set_update_util_data(cpunum, &cpu->update_util);
-
-	pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
+	pr_debug("controlling: cpu %d\n", cpunum);
 
 	return 0;
 }
 
 static unsigned int intel_pstate_get(unsigned int cpu_num)
 {
-	struct sample *sample;
-	struct cpudata *cpu;
+	struct cpudata *cpu = all_cpu_data[cpu_num];
 
-	cpu = all_cpu_data[cpu_num];
-	if (!cpu)
-		return 0;
-	sample = &cpu->sample;
-	return get_avg_frequency(cpu);
+	return cpu ? get_avg_frequency(cpu) : 0;
+}
+
+static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
+{
+	struct cpudata *cpu = all_cpu_data[cpu_num];
+
+	/* Prevent intel_pstate_update_util() from using stale data. */
+	cpu->sample.time = 0;
+	cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
+				     intel_pstate_update_util);
+	cpu->update_util_set = true;
+}
+
+static void intel_pstate_clear_update_util_hook(unsigned int cpu)
+{
+	struct cpudata *cpu_data = all_cpu_data[cpu];
+
+	if (!cpu_data->update_util_set)
+		return;
+
+	cpufreq_remove_update_util_hook(cpu);
+	cpu_data->update_util_set = false;
+	synchronize_sched();
+}
+
+static void intel_pstate_set_performance_limits(struct perf_limits *limits)
+{
+	limits->no_turbo = 0;
+	limits->turbo_disabled = 0;
+	limits->max_perf_pct = 100;
+	limits->max_perf = int_tofp(1);
+	limits->min_perf_pct = 100;
+	limits->min_perf = int_tofp(1);
+	limits->max_policy_pct = 100;
+	limits->max_sysfs_pct = 100;
+	limits->min_policy_pct = 0;
+	limits->min_sysfs_pct = 0;
 }
 
 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 {
+	struct cpudata *cpu;
+
 	if (!policy->cpuinfo.max_freq)
 		return -ENODEV;
 
-	if (policy->policy == CPUFREQ_POLICY_PERFORMANCE &&
-	    policy->max >= policy->cpuinfo.max_freq) {
-		pr_debug("intel_pstate: set performance\n");
-		limits = &performance_limits;
-		if (hwp_active)
-			intel_pstate_hwp_set(policy->cpus);
-		return 0;
+	intel_pstate_clear_update_util_hook(policy->cpu);
+
+	cpu = all_cpu_data[0];
+	if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate) {
+		if (policy->max < policy->cpuinfo.max_freq &&
+		    policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
+			pr_debug("policy->max > max non turbo frequency\n");
+			policy->max = policy->cpuinfo.max_freq;
+		}
 	}
 
-	pr_debug("intel_pstate: set powersave\n");
-	limits = &powersave_limits;
+	if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+		limits = &performance_limits;
+		if (policy->max >= policy->cpuinfo.max_freq) {
+			pr_debug("set performance\n");
+			intel_pstate_set_performance_limits(limits);
+			goto out;
+		}
+	} else {
+		pr_debug("set powersave\n");
+		limits = &powersave_limits;
+	}
+
 	limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
 	limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100);
 	limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
@@ -1158,13 +1501,13 @@
 	/* Make sure min_perf_pct <= max_perf_pct */
 	limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
 
-	limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
-				  int_tofp(100));
-	limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
-				  int_tofp(100));
+	limits->min_perf = div_fp(limits->min_perf_pct, 100);
+	limits->max_perf = div_fp(limits->max_perf_pct, 100);
 
-	if (hwp_active)
-		intel_pstate_hwp_set(policy->cpus);
+ out:
+	intel_pstate_set_update_util_hook(policy->cpu);
+
+	intel_pstate_hwp_set_policy(policy);
 
 	return 0;
 }
@@ -1185,10 +1528,9 @@
 	int cpu_num = policy->cpu;
 	struct cpudata *cpu = all_cpu_data[cpu_num];
 
-	pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
+	pr_debug("CPU %d exiting\n", cpu_num);
 
-	cpufreq_set_update_util_data(cpu_num, NULL);
-	synchronize_sched();
+	intel_pstate_clear_update_util_hook(cpu_num);
 
 	if (hwp_active)
 		return;
@@ -1219,18 +1561,28 @@
 	policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
 	policy->cpuinfo.max_freq =
 		cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+	intel_pstate_init_acpi_perf_limits(policy);
 	policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
 	cpumask_set_cpu(policy->cpu, policy->cpus);
 
 	return 0;
 }
 
+static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
+{
+	intel_pstate_exit_perf_limits(policy);
+
+	return 0;
+}
+
 static struct cpufreq_driver intel_pstate_driver = {
 	.flags		= CPUFREQ_CONST_LOOPS,
 	.verify		= intel_pstate_verify_policy,
 	.setpolicy	= intel_pstate_set_policy,
+	.resume		= intel_pstate_hwp_set_policy,
 	.get		= intel_pstate_get,
 	.init		= intel_pstate_cpu_init,
+	.exit		= intel_pstate_cpu_exit,
 	.stop_cpu	= intel_pstate_stop_cpu,
 	.name		= "intel_pstate",
 };
@@ -1274,8 +1626,7 @@
 
 }
 
-#if IS_ENABLED(CONFIG_ACPI)
-#include <acpi/processor.h>
+#ifdef CONFIG_ACPI
 
 static bool intel_pstate_no_acpi_pss(void)
 {
@@ -1431,7 +1782,7 @@
 	if (intel_pstate_platform_pwr_mgmt_exists())
 		return -ENODEV;
 
-	pr_info("Intel P-state driver initializing.\n");
+	pr_info("Intel P-state driver initializing\n");
 
 	all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
 	if (!all_cpu_data)
@@ -1448,15 +1799,14 @@
 	intel_pstate_sysfs_expose_params();
 
 	if (hwp_active)
-		pr_info("intel_pstate: HWP enabled\n");
+		pr_info("HWP enabled\n");
 
 	return rc;
 out:
 	get_online_cpus();
 	for_each_online_cpu(cpu) {
 		if (all_cpu_data[cpu]) {
-			cpufreq_set_update_util_data(cpu, NULL);
-			synchronize_sched();
+			intel_pstate_clear_update_util_hook(cpu);
 			kfree(all_cpu_data[cpu]);
 		}
 	}
@@ -1475,13 +1825,19 @@
 	if (!strcmp(str, "disable"))
 		no_load = 1;
 	if (!strcmp(str, "no_hwp")) {
-		pr_info("intel_pstate: HWP disabled\n");
+		pr_info("HWP disabled\n");
 		no_hwp = 1;
 	}
 	if (!strcmp(str, "force"))
 		force_load = 1;
 	if (!strcmp(str, "hwp_only"))
 		hwp_only = 1;
+
+#ifdef CONFIG_ACPI
+	if (!strcmp(str, "support_acpi_ppc"))
+		acpi_ppc = true;
+#endif
+
 	return 0;
 }
 early_param("intel_pstate", intel_pstate_setup);
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 0f6b229..c46a12d 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -21,6 +21,8 @@
  *  BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
@@ -40,8 +42,6 @@
 
 #include "longhaul.h"
 
-#define PFX "longhaul: "
-
 #define TYPE_LONGHAUL_V1	1
 #define TYPE_LONGHAUL_V2	2
 #define TYPE_POWERSAVER		3
@@ -347,14 +347,13 @@
 	freqs.new = calc_speed(longhaul_get_cpu_mult());
 	/* Check if requested frequency is set. */
 	if (unlikely(freqs.new != speed)) {
-		printk(KERN_INFO PFX "Failed to set requested frequency!\n");
+		pr_info("Failed to set requested frequency!\n");
 		/* Revision ID = 1 but processor is expecting revision key
 		 * equal to 0. Jumpers at the bottom of processor will change
 		 * multiplier and FSB, but will not change bits in Longhaul
 		 * MSR nor enable voltage scaling. */
 		if (!revid_errata) {
-			printk(KERN_INFO PFX "Enabling \"Ignore Revision ID\" "
-						"option.\n");
+			pr_info("Enabling \"Ignore Revision ID\" option\n");
 			revid_errata = 1;
 			msleep(200);
 			goto retry_loop;
@@ -364,11 +363,10 @@
 		 * but it doesn't change frequency. I tried poking various
 		 * bits in northbridge registers, but without success. */
 		if (longhaul_flags & USE_ACPI_C3) {
-			printk(KERN_INFO PFX "Disabling ACPI C3 support.\n");
+			pr_info("Disabling ACPI C3 support\n");
 			longhaul_flags &= ~USE_ACPI_C3;
 			if (revid_errata) {
-				printk(KERN_INFO PFX "Disabling \"Ignore "
-						"Revision ID\" option.\n");
+				pr_info("Disabling \"Ignore Revision ID\" option\n");
 				revid_errata = 0;
 			}
 			msleep(200);
@@ -379,7 +377,7 @@
 		 * RevID = 1. RevID errata will make things right. Just
 		 * to be 100% sure. */
 		if (longhaul_version == TYPE_LONGHAUL_V2) {
-			printk(KERN_INFO PFX "Switching to Longhaul ver. 1\n");
+			pr_info("Switching to Longhaul ver. 1\n");
 			longhaul_version = TYPE_LONGHAUL_V1;
 			msleep(200);
 			goto retry_loop;
@@ -387,8 +385,7 @@
 	}
 
 	if (!bm_timeout) {
-		printk(KERN_INFO PFX "Warning: Timeout while waiting for "
-				"idle PCI bus.\n");
+		pr_info("Warning: Timeout while waiting for idle PCI bus\n");
 		return -EBUSY;
 	}
 
@@ -433,12 +430,12 @@
 	/* Get current frequency */
 	mult = longhaul_get_cpu_mult();
 	if (mult == -1) {
-		printk(KERN_INFO PFX "Invalid (reserved) multiplier!\n");
+		pr_info("Invalid (reserved) multiplier!\n");
 		return -EINVAL;
 	}
 	fsb = guess_fsb(mult);
 	if (fsb == 0) {
-		printk(KERN_INFO PFX "Invalid (reserved) FSB!\n");
+		pr_info("Invalid (reserved) FSB!\n");
 		return -EINVAL;
 	}
 	/* Get max multiplier - as we always did.
@@ -468,11 +465,11 @@
 		 print_speed(highest_speed/1000));
 
 	if (lowest_speed == highest_speed) {
-		printk(KERN_INFO PFX "highestspeed == lowest, aborting.\n");
+		pr_info("highestspeed == lowest, aborting\n");
 		return -EINVAL;
 	}
 	if (lowest_speed > highest_speed) {
-		printk(KERN_INFO PFX "nonsense! lowest (%d > %d) !\n",
+		pr_info("nonsense! lowest (%d > %d) !\n",
 			lowest_speed, highest_speed);
 		return -EINVAL;
 	}
@@ -538,16 +535,16 @@
 
 	rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
 	if (!(longhaul.bits.RevisionID & 1)) {
-		printk(KERN_INFO PFX "Voltage scaling not supported by CPU.\n");
+		pr_info("Voltage scaling not supported by CPU\n");
 		return;
 	}
 
 	if (!longhaul.bits.VRMRev) {
-		printk(KERN_INFO PFX "VRM 8.5\n");
+		pr_info("VRM 8.5\n");
 		vrm_mV_table = &vrm85_mV[0];
 		mV_vrm_table = &mV_vrm85[0];
 	} else {
-		printk(KERN_INFO PFX "Mobile VRM\n");
+		pr_info("Mobile VRM\n");
 		if (cpu_model < CPU_NEHEMIAH)
 			return;
 		vrm_mV_table = &mobilevrm_mV[0];
@@ -558,27 +555,21 @@
 	maxvid = vrm_mV_table[longhaul.bits.MaximumVID];
 
 	if (minvid.mV == 0 || maxvid.mV == 0 || minvid.mV > maxvid.mV) {
-		printk(KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. "
-					"Voltage scaling disabled.\n",
-					minvid.mV/1000, minvid.mV%1000,
-					maxvid.mV/1000, maxvid.mV%1000);
+		pr_info("Bogus values Min:%d.%03d Max:%d.%03d - Voltage scaling disabled\n",
+			minvid.mV/1000, minvid.mV%1000,
+			maxvid.mV/1000, maxvid.mV%1000);
 		return;
 	}
 
 	if (minvid.mV == maxvid.mV) {
-		printk(KERN_INFO PFX "Claims to support voltage scaling but "
-				"min & max are both %d.%03d. "
-				"Voltage scaling disabled\n",
-				maxvid.mV/1000, maxvid.mV%1000);
+		pr_info("Claims to support voltage scaling but min & max are both %d.%03d - Voltage scaling disabled\n",
+			maxvid.mV/1000, maxvid.mV%1000);
 		return;
 	}
 
 	/* How many voltage steps*/
 	numvscales = maxvid.pos - minvid.pos + 1;
-	printk(KERN_INFO PFX
-		"Max VID=%d.%03d  "
-		"Min VID=%d.%03d, "
-		"%d possible voltage scales\n",
+	pr_info("Max VID=%d.%03d  Min VID=%d.%03d, %d possible voltage scales\n",
 		maxvid.mV/1000, maxvid.mV%1000,
 		minvid.mV/1000, minvid.mV%1000,
 		numvscales);
@@ -617,12 +608,12 @@
 			pos = minvid.pos;
 		freq_pos->driver_data |= mV_vrm_table[pos] << 8;
 		vid = vrm_mV_table[mV_vrm_table[pos]];
-		printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n",
+		pr_info("f: %d kHz, index: %d, vid: %d mV\n",
 			speed, (int)(freq_pos - longhaul_table), vid.mV);
 	}
 
 	can_scale_voltage = 1;
-	printk(KERN_INFO PFX "Voltage scaling enabled.\n");
+	pr_info("Voltage scaling enabled\n");
 }
 
 
@@ -720,8 +711,7 @@
 			pci_write_config_byte(dev, reg, pci_cmd);
 			pci_read_config_byte(dev, reg, &pci_cmd);
 			if (!(pci_cmd & 1<<7)) {
-				printk(KERN_ERR PFX
-					"Can't enable access to port 0x22.\n");
+				pr_err("Can't enable access to port 0x22\n");
 				status = 0;
 			}
 		}
@@ -758,8 +748,7 @@
 		if (pci_cmd & 1 << 7) {
 			pci_read_config_dword(dev, 0x88, &acpi_regs_addr);
 			acpi_regs_addr &= 0xff00;
-			printk(KERN_INFO PFX "ACPI I/O at 0x%x\n",
-					acpi_regs_addr);
+			pr_info("ACPI I/O at 0x%x\n", acpi_regs_addr);
 		}
 
 		pci_dev_put(dev);
@@ -853,14 +842,14 @@
 			longhaul_version = TYPE_LONGHAUL_V1;
 	}
 
-	printk(KERN_INFO PFX "VIA %s CPU detected.  ", cpuname);
+	pr_info("VIA %s CPU detected.  ", cpuname);
 	switch (longhaul_version) {
 	case TYPE_LONGHAUL_V1:
 	case TYPE_LONGHAUL_V2:
-		printk(KERN_CONT "Longhaul v%d supported.\n", longhaul_version);
+		pr_cont("Longhaul v%d supported\n", longhaul_version);
 		break;
 	case TYPE_POWERSAVER:
-		printk(KERN_CONT "Powersaver supported.\n");
+		pr_cont("Powersaver supported\n");
 		break;
 	};
 
@@ -889,15 +878,14 @@
 	if (!(longhaul_flags & USE_ACPI_C3
 	     || longhaul_flags & USE_NORTHBRIDGE)
 	    && ((pr == NULL) || !(pr->flags.bm_control))) {
-		printk(KERN_ERR PFX
-			"No ACPI support. Unsupported northbridge.\n");
+		pr_err("No ACPI support: Unsupported northbridge\n");
 		return -ENODEV;
 	}
 
 	if (longhaul_flags & USE_NORTHBRIDGE)
-		printk(KERN_INFO PFX "Using northbridge support.\n");
+		pr_info("Using northbridge support\n");
 	if (longhaul_flags & USE_ACPI_C3)
-		printk(KERN_INFO PFX "Using ACPI support.\n");
+		pr_info("Using ACPI support\n");
 
 	ret = longhaul_get_ranges();
 	if (ret != 0)
@@ -934,20 +922,18 @@
 		return -ENODEV;
 
 	if (!enable) {
-		printk(KERN_ERR PFX "Option \"enable\" not set. Aborting.\n");
+		pr_err("Option \"enable\" not set - Aborting\n");
 		return -ENODEV;
 	}
 #ifdef CONFIG_SMP
 	if (num_online_cpus() > 1) {
-		printk(KERN_ERR PFX "More than 1 CPU detected, "
-				"longhaul disabled.\n");
+		pr_err("More than 1 CPU detected, longhaul disabled\n");
 		return -ENODEV;
 	}
 #endif
 #ifdef CONFIG_X86_IO_APIC
-	if (cpu_has_apic) {
-		printk(KERN_ERR PFX "APIC detected. Longhaul is currently "
-				"broken in this configuration.\n");
+	if (boot_cpu_has(X86_FEATURE_APIC)) {
+		pr_err("APIC detected. Longhaul is currently broken in this configuration.\n");
 		return -ENODEV;
 	}
 #endif
@@ -955,7 +941,7 @@
 	case 6 ... 9:
 		return cpufreq_register_driver(&longhaul_driver);
 	case 10:
-		printk(KERN_ERR PFX "Use acpi-cpufreq driver for VIA C7\n");
+		pr_err("Use acpi-cpufreq driver for VIA C7\n");
 	default:
 		;
 	}
diff --git a/drivers/cpufreq/loongson1-cpufreq.c b/drivers/cpufreq/loongson1-cpufreq.c
new file mode 100644
index 0000000..be89416
--- /dev/null
+++ b/drivers/cpufreq/loongson1-cpufreq.c
@@ -0,0 +1,222 @@
+/*
+ * CPU Frequency Scaling for Loongson 1 SoC
+ *
+ * Copyright (C) 2014-2016 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <cpufreq.h>
+#include <loongson1.h>
+
+struct ls1x_cpufreq {
+	struct device *dev;
+	struct clk *clk;	/* CPU clk */
+	struct clk *mux_clk;	/* MUX of CPU clk */
+	struct clk *pll_clk;	/* PLL clk */
+	struct clk *osc_clk;	/* OSC clk */
+	unsigned int max_freq;
+	unsigned int min_freq;
+};
+
+static struct ls1x_cpufreq *cpufreq;
+
+static int ls1x_cpufreq_notifier(struct notifier_block *nb,
+				 unsigned long val, void *data)
+{
+	if (val == CPUFREQ_POSTCHANGE)
+		current_cpu_data.udelay_val = loops_per_jiffy;
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block ls1x_cpufreq_notifier_block = {
+	.notifier_call = ls1x_cpufreq_notifier
+};
+
+static int ls1x_cpufreq_target(struct cpufreq_policy *policy,
+			       unsigned int index)
+{
+	struct device *cpu_dev = get_cpu_device(policy->cpu);
+	unsigned int old_freq, new_freq;
+
+	old_freq = policy->cur;
+	new_freq = policy->freq_table[index].frequency;
+
+	/*
+	 * The procedure of reconfiguring CPU clk is as below.
+	 *
+	 *  - Reparent CPU clk to OSC clk
+	 *  - Reset CPU clock (very important)
+	 *  - Reconfigure CPU DIV
+	 *  - Reparent CPU clk back to CPU DIV clk
+	 */
+
+	clk_set_parent(policy->clk, cpufreq->osc_clk);
+	__raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) | RST_CPU_EN | RST_CPU,
+		     LS1X_CLK_PLL_DIV);
+	__raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) & ~(RST_CPU_EN | RST_CPU),
+		     LS1X_CLK_PLL_DIV);
+	clk_set_rate(cpufreq->mux_clk, new_freq * 1000);
+	clk_set_parent(policy->clk, cpufreq->mux_clk);
+	dev_dbg(cpu_dev, "%u KHz --> %u KHz\n", old_freq, new_freq);
+
+	return 0;
+}
+
+static int ls1x_cpufreq_init(struct cpufreq_policy *policy)
+{
+	struct device *cpu_dev = get_cpu_device(policy->cpu);
+	struct cpufreq_frequency_table *freq_tbl;
+	unsigned int pll_freq, freq;
+	int steps, i, ret;
+
+	pll_freq = clk_get_rate(cpufreq->pll_clk) / 1000;
+
+	steps = 1 << DIV_CPU_WIDTH;
+	freq_tbl = kcalloc(steps, sizeof(*freq_tbl), GFP_KERNEL);
+	if (!freq_tbl)
+		return -ENOMEM;
+
+	for (i = 0; i < (steps - 1); i++) {
+		freq = pll_freq / (i + 1);
+		if ((freq < cpufreq->min_freq) || (freq > cpufreq->max_freq))
+			freq_tbl[i].frequency = CPUFREQ_ENTRY_INVALID;
+		else
+			freq_tbl[i].frequency = freq;
+		dev_dbg(cpu_dev,
+			"cpufreq table: index %d: frequency %d\n", i,
+			freq_tbl[i].frequency);
+	}
+	freq_tbl[i].frequency = CPUFREQ_TABLE_END;
+
+	policy->clk = cpufreq->clk;
+	ret = cpufreq_generic_init(policy, freq_tbl, 0);
+	if (ret)
+		kfree(freq_tbl);
+
+	return ret;
+}
+
+static int ls1x_cpufreq_exit(struct cpufreq_policy *policy)
+{
+	kfree(policy->freq_table);
+	return 0;
+}
+
+static struct cpufreq_driver ls1x_cpufreq_driver = {
+	.name		= "cpufreq-ls1x",
+	.flags		= CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= ls1x_cpufreq_target,
+	.get		= cpufreq_generic_get,
+	.init		= ls1x_cpufreq_init,
+	.exit		= ls1x_cpufreq_exit,
+	.attr		= cpufreq_generic_attr,
+};
+
+static int ls1x_cpufreq_remove(struct platform_device *pdev)
+{
+	cpufreq_unregister_notifier(&ls1x_cpufreq_notifier_block,
+				    CPUFREQ_TRANSITION_NOTIFIER);
+	cpufreq_unregister_driver(&ls1x_cpufreq_driver);
+
+	return 0;
+}
+
+static int ls1x_cpufreq_probe(struct platform_device *pdev)
+{
+	struct plat_ls1x_cpufreq *pdata = dev_get_platdata(&pdev->dev);
+	struct clk *clk;
+	int ret;
+
+	if (!pdata || !pdata->clk_name || !pdata->osc_clk_name) {
+		dev_err(&pdev->dev, "platform data missing\n");
+		return -EINVAL;
+	}
+
+	cpufreq =
+	    devm_kzalloc(&pdev->dev, sizeof(struct ls1x_cpufreq), GFP_KERNEL);
+	if (!cpufreq)
+		return -ENOMEM;
+
+	cpufreq->dev = &pdev->dev;
+
+	clk = devm_clk_get(&pdev->dev, pdata->clk_name);
+	if (IS_ERR(clk)) {
+		dev_err(&pdev->dev, "unable to get %s clock\n",
+			pdata->clk_name);
+		return PTR_ERR(clk);
+	}
+	cpufreq->clk = clk;
+
+	clk = clk_get_parent(clk);
+	if (IS_ERR(clk)) {
+		dev_err(&pdev->dev, "unable to get parent of %s clock\n",
+			__clk_get_name(cpufreq->clk));
+		return PTR_ERR(clk);
+	}
+	cpufreq->mux_clk = clk;
+
+	clk = clk_get_parent(clk);
+	if (IS_ERR(clk)) {
+		dev_err(&pdev->dev, "unable to get parent of %s clock\n",
+			__clk_get_name(cpufreq->mux_clk));
+		return PTR_ERR(clk);
+	}
+	cpufreq->pll_clk = clk;
+
+	clk = devm_clk_get(&pdev->dev, pdata->osc_clk_name);
+	if (IS_ERR(clk)) {
+		dev_err(&pdev->dev, "unable to get %s clock\n",
+			pdata->osc_clk_name);
+		return PTR_ERR(clk);
+	}
+	cpufreq->osc_clk = clk;
+
+	cpufreq->max_freq = pdata->max_freq;
+	cpufreq->min_freq = pdata->min_freq;
+
+	ret = cpufreq_register_driver(&ls1x_cpufreq_driver);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"failed to register CPUFreq driver: %d\n", ret);
+		return ret;
+	}
+
+	ret = cpufreq_register_notifier(&ls1x_cpufreq_notifier_block,
+					CPUFREQ_TRANSITION_NOTIFIER);
+
+	if (ret) {
+		dev_err(&pdev->dev,
+			"failed to register CPUFreq notifier: %d\n",ret);
+		cpufreq_unregister_driver(&ls1x_cpufreq_driver);
+	}
+
+	return ret;
+}
+
+static struct platform_driver ls1x_cpufreq_platdrv = {
+	.probe	= ls1x_cpufreq_probe,
+	.remove	= ls1x_cpufreq_remove,
+	.driver	= {
+		.name	= "ls1x-cpufreq",
+	},
+};
+
+module_platform_driver(ls1x_cpufreq_platdrv);
+
+MODULE_AUTHOR("Kelvin Cheung <keguang.zhang@gmail.com>");
+MODULE_DESCRIPTION("Loongson1 CPUFreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index cd593c1..6bbdac1 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -10,6 +10,9 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/cpufreq.h>
 #include <linux/module.h>
 #include <linux/err.h>
@@ -76,7 +79,7 @@
 
 	cpuclk = clk_get(NULL, "cpu_clk");
 	if (IS_ERR(cpuclk)) {
-		printk(KERN_ERR "cpufreq: couldn't get CPU clk\n");
+		pr_err("couldn't get CPU clk\n");
 		return PTR_ERR(cpuclk);
 	}
 
@@ -163,7 +166,7 @@
 	if (ret)
 		return ret;
 
-	pr_info("cpufreq: Loongson-2F CPU frequency driver.\n");
+	pr_info("Loongson-2F CPU frequency driver\n");
 
 	cpufreq_register_notifier(&loongson2_cpufreq_notifier_block,
 				  CPUFREQ_TRANSITION_NOTIFIER);
diff --git a/drivers/cpufreq/ls1x-cpufreq.c b/drivers/cpufreq/ls1x-cpufreq.c
deleted file mode 100644
index 262581b..0000000
--- a/drivers/cpufreq/ls1x-cpufreq.c
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * CPU Frequency Scaling for Loongson 1 SoC
- *
- * Copyright (C) 2014 Zhang, Keguang <keguang.zhang@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/cpu.h>
-#include <linux/cpufreq.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include <cpufreq.h>
-#include <loongson1.h>
-
-static struct {
-	struct device *dev;
-	struct clk *clk;	/* CPU clk */
-	struct clk *mux_clk;	/* MUX of CPU clk */
-	struct clk *pll_clk;	/* PLL clk */
-	struct clk *osc_clk;	/* OSC clk */
-	unsigned int max_freq;
-	unsigned int min_freq;
-} ls1x_cpufreq;
-
-static int ls1x_cpufreq_notifier(struct notifier_block *nb,
-				 unsigned long val, void *data)
-{
-	if (val == CPUFREQ_POSTCHANGE)
-		current_cpu_data.udelay_val = loops_per_jiffy;
-
-	return NOTIFY_OK;
-}
-
-static struct notifier_block ls1x_cpufreq_notifier_block = {
-	.notifier_call = ls1x_cpufreq_notifier
-};
-
-static int ls1x_cpufreq_target(struct cpufreq_policy *policy,
-			       unsigned int index)
-{
-	unsigned int old_freq, new_freq;
-
-	old_freq = policy->cur;
-	new_freq = policy->freq_table[index].frequency;
-
-	/*
-	 * The procedure of reconfiguring CPU clk is as below.
-	 *
-	 *  - Reparent CPU clk to OSC clk
-	 *  - Reset CPU clock (very important)
-	 *  - Reconfigure CPU DIV
-	 *  - Reparent CPU clk back to CPU DIV clk
-	 */
-
-	dev_dbg(ls1x_cpufreq.dev, "%u KHz --> %u KHz\n", old_freq, new_freq);
-	clk_set_parent(policy->clk, ls1x_cpufreq.osc_clk);
-	__raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) | RST_CPU_EN | RST_CPU,
-		     LS1X_CLK_PLL_DIV);
-	__raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) & ~(RST_CPU_EN | RST_CPU),
-		     LS1X_CLK_PLL_DIV);
-	clk_set_rate(ls1x_cpufreq.mux_clk, new_freq * 1000);
-	clk_set_parent(policy->clk, ls1x_cpufreq.mux_clk);
-
-	return 0;
-}
-
-static int ls1x_cpufreq_init(struct cpufreq_policy *policy)
-{
-	struct cpufreq_frequency_table *freq_tbl;
-	unsigned int pll_freq, freq;
-	int steps, i, ret;
-
-	pll_freq = clk_get_rate(ls1x_cpufreq.pll_clk) / 1000;
-
-	steps = 1 << DIV_CPU_WIDTH;
-	freq_tbl = kzalloc(sizeof(*freq_tbl) * steps, GFP_KERNEL);
-	if (!freq_tbl) {
-		dev_err(ls1x_cpufreq.dev,
-			"failed to alloc cpufreq_frequency_table\n");
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	for (i = 0; i < (steps - 1); i++) {
-		freq = pll_freq / (i + 1);
-		if ((freq < ls1x_cpufreq.min_freq) ||
-		    (freq > ls1x_cpufreq.max_freq))
-			freq_tbl[i].frequency = CPUFREQ_ENTRY_INVALID;
-		else
-			freq_tbl[i].frequency = freq;
-		dev_dbg(ls1x_cpufreq.dev,
-			"cpufreq table: index %d: frequency %d\n", i,
-			freq_tbl[i].frequency);
-	}
-	freq_tbl[i].frequency = CPUFREQ_TABLE_END;
-
-	policy->clk = ls1x_cpufreq.clk;
-	ret = cpufreq_generic_init(policy, freq_tbl, 0);
-	if (ret)
-		kfree(freq_tbl);
-out:
-	return ret;
-}
-
-static int ls1x_cpufreq_exit(struct cpufreq_policy *policy)
-{
-	kfree(policy->freq_table);
-	return 0;
-}
-
-static struct cpufreq_driver ls1x_cpufreq_driver = {
-	.name		= "cpufreq-ls1x",
-	.flags		= CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
-	.verify		= cpufreq_generic_frequency_table_verify,
-	.target_index	= ls1x_cpufreq_target,
-	.get		= cpufreq_generic_get,
-	.init		= ls1x_cpufreq_init,
-	.exit		= ls1x_cpufreq_exit,
-	.attr		= cpufreq_generic_attr,
-};
-
-static int ls1x_cpufreq_remove(struct platform_device *pdev)
-{
-	cpufreq_unregister_notifier(&ls1x_cpufreq_notifier_block,
-				    CPUFREQ_TRANSITION_NOTIFIER);
-	cpufreq_unregister_driver(&ls1x_cpufreq_driver);
-
-	return 0;
-}
-
-static int ls1x_cpufreq_probe(struct platform_device *pdev)
-{
-	struct plat_ls1x_cpufreq *pdata = pdev->dev.platform_data;
-	struct clk *clk;
-	int ret;
-
-	if (!pdata || !pdata->clk_name || !pdata->osc_clk_name)
-		return -EINVAL;
-
-	ls1x_cpufreq.dev = &pdev->dev;
-
-	clk = devm_clk_get(&pdev->dev, pdata->clk_name);
-	if (IS_ERR(clk)) {
-		dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n",
-			pdata->clk_name);
-		ret = PTR_ERR(clk);
-		goto out;
-	}
-	ls1x_cpufreq.clk = clk;
-
-	clk = clk_get_parent(clk);
-	if (IS_ERR(clk)) {
-		dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n",
-			__clk_get_name(ls1x_cpufreq.clk));
-		ret = PTR_ERR(clk);
-		goto out;
-	}
-	ls1x_cpufreq.mux_clk = clk;
-
-	clk = clk_get_parent(clk);
-	if (IS_ERR(clk)) {
-		dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n",
-			__clk_get_name(ls1x_cpufreq.mux_clk));
-		ret = PTR_ERR(clk);
-		goto out;
-	}
-	ls1x_cpufreq.pll_clk = clk;
-
-	clk = devm_clk_get(&pdev->dev, pdata->osc_clk_name);
-	if (IS_ERR(clk)) {
-		dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n",
-			pdata->osc_clk_name);
-		ret = PTR_ERR(clk);
-		goto out;
-	}
-	ls1x_cpufreq.osc_clk = clk;
-
-	ls1x_cpufreq.max_freq = pdata->max_freq;
-	ls1x_cpufreq.min_freq = pdata->min_freq;
-
-	ret = cpufreq_register_driver(&ls1x_cpufreq_driver);
-	if (ret) {
-		dev_err(ls1x_cpufreq.dev,
-			"failed to register cpufreq driver: %d\n", ret);
-		goto out;
-	}
-
-	ret = cpufreq_register_notifier(&ls1x_cpufreq_notifier_block,
-					CPUFREQ_TRANSITION_NOTIFIER);
-
-	if (!ret)
-		goto out;
-
-	dev_err(ls1x_cpufreq.dev, "failed to register cpufreq notifier: %d\n",
-		ret);
-
-	cpufreq_unregister_driver(&ls1x_cpufreq_driver);
-out:
-	return ret;
-}
-
-static struct platform_driver ls1x_cpufreq_platdrv = {
-	.driver = {
-		.name	= "ls1x-cpufreq",
-	},
-	.probe		= ls1x_cpufreq_probe,
-	.remove		= ls1x_cpufreq_remove,
-};
-
-module_platform_driver(ls1x_cpufreq_platdrv);
-
-MODULE_AUTHOR("Kelvin Cheung <keguang.zhang@gmail.com>");
-MODULE_DESCRIPTION("Loongson 1 CPUFreq driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
index cc3408f..d9df893 100644
--- a/drivers/cpufreq/maple-cpufreq.c
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -13,6 +13,8 @@
 
 #undef DEBUG
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/errno.h>
@@ -174,7 +176,7 @@
 	/* Get first CPU node */
 	cpunode = of_cpu_device_node_get(0);
 	if (cpunode == NULL) {
-		printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n");
+		pr_err("Can't find any CPU 0 node\n");
 		goto bail_noprops;
 	}
 
@@ -182,8 +184,7 @@
 	/* we actually don't care on which CPU to access PVR */
 	pvr_hi = PVR_VER(mfspr(SPRN_PVR));
 	if (pvr_hi != 0x3c && pvr_hi != 0x44) {
-		printk(KERN_ERR "cpufreq: Unsupported CPU version (%x)\n",
-				pvr_hi);
+		pr_err("Unsupported CPU version (%x)\n", pvr_hi);
 		goto bail_noprops;
 	}
 
@@ -222,8 +223,8 @@
 	maple_pmode_cur = -1;
 	maple_scom_switch_freq(maple_scom_query_freq());
 
-	printk(KERN_INFO "Registering Maple CPU frequency driver\n");
-	printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
+	pr_info("Registering Maple CPU frequency driver\n");
+	pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
 		maple_cpu_freqs[1].frequency/1000,
 		maple_cpu_freqs[0].frequency/1000,
 		maple_cpu_freqs[maple_pmode_cur].frequency/1000);
diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mt8173-cpufreq.c
index 2058e6d..6f602c7 100644
--- a/drivers/cpufreq/mt8173-cpufreq.c
+++ b/drivers/cpufreq/mt8173-cpufreq.c
@@ -59,11 +59,8 @@
 static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu)
 {
 	struct mtk_cpu_dvfs_info *info;
-	struct list_head *list;
 
-	list_for_each(list, &dvfs_info_list) {
-		info = list_entry(list, struct mtk_cpu_dvfs_info, list_head);
-
+	list_for_each_entry(info, &dvfs_info_list, list_head) {
 		if (cpumask_test_cpu(cpu, &info->cpus))
 			return info;
 	}
@@ -524,8 +521,7 @@
 
 static int mt8173_cpufreq_probe(struct platform_device *pdev)
 {
-	struct mtk_cpu_dvfs_info *info;
-	struct list_head *list, *tmp;
+	struct mtk_cpu_dvfs_info *info, *tmp;
 	int cpu, ret;
 
 	for_each_possible_cpu(cpu) {
@@ -559,11 +555,9 @@
 	return 0;
 
 release_dvfs_info_list:
-	list_for_each_safe(list, tmp, &dvfs_info_list) {
-		info = list_entry(list, struct mtk_cpu_dvfs_info, list_head);
-
+	list_for_each_entry_safe(info, tmp, &dvfs_info_list, list_head) {
 		mtk_cpu_dvfs_info_release(info);
-		list_del(list);
+		list_del(&info->list_head);
 	}
 
 	return ret;
diff --git a/drivers/cpufreq/mvebu-cpufreq.c b/drivers/cpufreq/mvebu-cpufreq.c
new file mode 100644
index 0000000..e920889
--- /dev/null
+++ b/drivers/cpufreq/mvebu-cpufreq.c
@@ -0,0 +1,107 @@
+/*
+ * CPUFreq support for Armada 370/XP platforms.
+ *
+ * Copyright (C) 2012-2016 Marvell
+ *
+ * Yehuda Yitschak <yehuday@marvell.com>
+ * Gregory Clement <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) "mvebu-pmsu: " fmt
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/resource.h>
+
+static int __init armada_xp_pmsu_cpufreq_init(void)
+{
+	struct device_node *np;
+	struct resource res;
+	int ret, cpu;
+
+	if (!of_machine_is_compatible("marvell,armadaxp"))
+		return 0;
+
+	/*
+	 * In order to have proper cpufreq handling, we need to ensure
+	 * that the Device Tree description of the CPU clock includes
+	 * the definition of the PMU DFS registers. If not, we do not
+	 * register the clock notifier and the cpufreq driver. This
+	 * piece of code is only for compatibility with old Device
+	 * Trees.
+	 */
+	np = of_find_compatible_node(NULL, NULL, "marvell,armada-xp-cpu-clock");
+	if (!np)
+		return 0;
+
+	ret = of_address_to_resource(np, 1, &res);
+	if (ret) {
+		pr_warn(FW_WARN "not enabling cpufreq, deprecated armada-xp-cpu-clock binding\n");
+		of_node_put(np);
+		return 0;
+	}
+
+	of_node_put(np);
+
+	/*
+	 * For each CPU, this loop registers the operating points
+	 * supported (which are the nominal CPU frequency and half of
+	 * it), and registers the clock notifier that will take care
+	 * of doing the PMSU part of a frequency transition.
+	 */
+	for_each_possible_cpu(cpu) {
+		struct device *cpu_dev;
+		struct clk *clk;
+		int ret;
+
+		cpu_dev = get_cpu_device(cpu);
+		if (!cpu_dev) {
+			pr_err("Cannot get CPU %d\n", cpu);
+			continue;
+		}
+
+		clk = clk_get(cpu_dev, 0);
+		if (IS_ERR(clk)) {
+			pr_err("Cannot get clock for CPU %d\n", cpu);
+			return PTR_ERR(clk);
+		}
+
+		/*
+		 * In case of a failure of dev_pm_opp_add(), we don't
+		 * bother with cleaning up the registered OPP (there's
+		 * no function to do so), and simply cancel the
+		 * registration of the cpufreq device.
+		 */
+		ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk), 0);
+		if (ret) {
+			clk_put(clk);
+			return ret;
+		}
+
+		ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk) / 2, 0);
+		if (ret) {
+			clk_put(clk);
+			return ret;
+		}
+
+		ret = dev_pm_opp_set_sharing_cpus(cpu_dev,
+						  cpumask_of(cpu_dev->id));
+		if (ret)
+			dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
+				__func__, ret);
+	}
+
+	platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+	return 0;
+}
+device_initcall(armada_xp_pmsu_cpufreq_init);
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index e3866e0..cead9be 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -13,6 +13,9 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
@@ -163,13 +166,13 @@
 {
 	mpu_dev = get_cpu_device(0);
 	if (!mpu_dev) {
-		pr_warning("%s: unable to get the mpu device\n", __func__);
+		pr_warn("%s: unable to get the MPU device\n", __func__);
 		return -EINVAL;
 	}
 
 	mpu_reg = regulator_get(mpu_dev, "vcc");
 	if (IS_ERR(mpu_reg)) {
-		pr_warning("%s: unable to get MPU regulator\n", __func__);
+		pr_warn("%s: unable to get MPU regulator\n", __func__);
 		mpu_reg = NULL;
 	} else {
 		/* 
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index 5dd95da..fd77812 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -20,6 +20,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -35,8 +37,6 @@
 
 #include "speedstep-lib.h"
 
-#define PFX	"p4-clockmod: "
-
 /*
  * Duty Cycle (3bits), note DC_DISABLE is not specified in
  * intel docs i just use it to mean disable
@@ -124,11 +124,7 @@
 {
 	if (c->x86 == 0x06) {
 		if (cpu_has(c, X86_FEATURE_EST))
-			printk_once(KERN_WARNING PFX "Warning: EST-capable "
-			       "CPU detected. The acpi-cpufreq module offers "
-			       "voltage scaling in addition to frequency "
-			       "scaling. You should use that instead of "
-			       "p4-clockmod, if possible.\n");
+			pr_warn_once("Warning: EST-capable CPU detected. The acpi-cpufreq module offers voltage scaling in addition to frequency scaling. You should use that instead of p4-clockmod, if possible.\n");
 		switch (c->x86_model) {
 		case 0x0E: /* Core */
 		case 0x0F: /* Core Duo */
@@ -152,11 +148,7 @@
 	p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
 
 	if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
-		printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
-		       "The speedstep-ich or acpi cpufreq modules offer "
-		       "voltage scaling in addition of frequency scaling. "
-		       "You should use either one instead of p4-clockmod, "
-		       "if possible.\n");
+		pr_warn("Warning: Pentium 4-M detected. The speedstep-ich or acpi cpufreq modules offer voltage scaling in addition of frequency scaling. You should use either one instead of p4-clockmod, if possible.\n");
 		return speedstep_get_frequency(SPEEDSTEP_CPU_P4M);
 	}
 
@@ -265,8 +257,7 @@
 
 	ret = cpufreq_register_driver(&p4clockmod_driver);
 	if (!ret)
-		printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock "
-				"Modulation available\n");
+		pr_info("P4/Xeon(TM) CPU On-Demand Clock Modulation available\n");
 
 	return ret;
 }
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index 1f49d97..b7b576e 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -13,6 +13,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/errno.h>
@@ -481,13 +483,13 @@
 		freqs = of_get_property(cpunode, "bus-frequencies", &lenp);
 		lenp /= sizeof(u32);
 		if (freqs == NULL || lenp != 2) {
-			printk(KERN_ERR "cpufreq: bus-frequencies incorrect or missing\n");
+			pr_err("bus-frequencies incorrect or missing\n");
 			return 1;
 		}
 		ratio = of_get_property(cpunode, "processor-to-bus-ratio*2",
 						NULL);
 		if (ratio == NULL) {
-			printk(KERN_ERR "cpufreq: processor-to-bus-ratio*2 missing\n");
+			pr_err("processor-to-bus-ratio*2 missing\n");
 			return 1;
 		}
 
@@ -550,7 +552,7 @@
 	if (volt_gpio_np)
 		voltage_gpio = read_gpio(volt_gpio_np);
 	if (!voltage_gpio){
-		printk(KERN_ERR "cpufreq: missing cpu-vcore-select gpio\n");
+		pr_err("missing cpu-vcore-select gpio\n");
 		return 1;
 	}
 
@@ -675,9 +677,9 @@
 	pmac_cpu_freqs[CPUFREQ_HIGH].frequency = hi_freq;
 	ppc_proc_freq = cur_freq * 1000ul;
 
-	printk(KERN_INFO "Registering PowerMac CPU frequency driver\n");
-	printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n",
-	       low_freq/1000, hi_freq/1000, cur_freq/1000);
+	pr_info("Registering PowerMac CPU frequency driver\n");
+	pr_info("Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n",
+		low_freq/1000, hi_freq/1000, cur_freq/1000);
 
 	return cpufreq_register_driver(&pmac_cpufreq_driver);
 }
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c
index 4ff8687..267e089 100644
--- a/drivers/cpufreq/pmac64-cpufreq.c
+++ b/drivers/cpufreq/pmac64-cpufreq.c
@@ -12,6 +12,8 @@
 
 #undef DEBUG
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/errno.h>
@@ -138,7 +140,7 @@
 		usleep_range(1000, 1000);
 	}
 	if (done == 0)
-		printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
+		pr_warn("Timeout in clock slewing !\n");
 }
 
 
@@ -266,7 +268,7 @@
 		rc = pmf_call_one(pfunc_cpu_setfreq_low, NULL);
 
 	if (rc)
-		printk(KERN_WARNING "cpufreq: pfunc switch error %d\n", rc);
+		pr_warn("pfunc switch error %d\n", rc);
 
 	/* It's an irq GPIO so we should be able to just block here,
 	 * I'll do that later after I've properly tested the IRQ code for
@@ -282,7 +284,7 @@
 		usleep_range(500, 500);
 	}
 	if (done == 0)
-		printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
+		pr_warn("Timeout in clock slewing !\n");
 
 	/* If frequency is going down, last ramp the voltage */
 	if (speed_mode > g5_pmode_cur)
@@ -368,7 +370,7 @@
 	}
 	pvr_hi = (*valp) >> 16;
 	if (pvr_hi != 0x3c && pvr_hi != 0x44) {
-		printk(KERN_ERR "cpufreq: Unsupported CPU version\n");
+		pr_err("Unsupported CPU version\n");
 		goto bail_noprops;
 	}
 
@@ -403,8 +405,7 @@
 
 		root = of_find_node_by_path("/");
 		if (root == NULL) {
-			printk(KERN_ERR "cpufreq: Can't find root of "
-			       "device tree\n");
+			pr_err("Can't find root of device tree\n");
 			goto bail_noprops;
 		}
 		pfunc_set_vdnap0 = pmf_find_function(root, "set-vdnap0");
@@ -412,8 +413,7 @@
 			pmf_find_function(root, "slewing-done");
 		if (pfunc_set_vdnap0 == NULL ||
 		    pfunc_vdnap0_complete == NULL) {
-			printk(KERN_ERR "cpufreq: Can't find required "
-			       "platform function\n");
+			pr_err("Can't find required platform function\n");
 			goto bail_noprops;
 		}
 
@@ -453,10 +453,10 @@
 	g5_pmode_cur = -1;
 	g5_switch_freq(g5_query_freq());
 
-	printk(KERN_INFO "Registering G5 CPU frequency driver\n");
-	printk(KERN_INFO "Frequency method: %s, Voltage method: %s\n",
-	       freq_method, volt_method);
-	printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
+	pr_info("Registering G5 CPU frequency driver\n");
+	pr_info("Frequency method: %s, Voltage method: %s\n",
+		freq_method, volt_method);
+	pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
 		g5_cpu_freqs[1].frequency/1000,
 		g5_cpu_freqs[0].frequency/1000,
 		g5_cpu_freqs[g5_pmode_cur].frequency/1000);
@@ -493,7 +493,7 @@
 	if (cpuid != NULL)
 		eeprom = of_get_property(cpuid, "cpuid", NULL);
 	if (eeprom == NULL) {
-		printk(KERN_ERR "cpufreq: Can't find cpuid EEPROM !\n");
+		pr_err("Can't find cpuid EEPROM !\n");
 		rc = -ENODEV;
 		goto bail;
 	}
@@ -511,7 +511,7 @@
 		break;
 	}
 	if (hwclock == NULL) {
-		printk(KERN_ERR "cpufreq: Can't find i2c clock chip !\n");
+		pr_err("Can't find i2c clock chip !\n");
 		rc = -ENODEV;
 		goto bail;
 	}
@@ -539,7 +539,7 @@
 	/* Check we have minimum requirements */
 	if (pfunc_cpu_getfreq == NULL || pfunc_cpu_setfreq_high == NULL ||
 	    pfunc_cpu_setfreq_low == NULL || pfunc_slewing_done == NULL) {
-		printk(KERN_ERR "cpufreq: Can't find platform functions !\n");
+		pr_err("Can't find platform functions !\n");
 		rc = -ENODEV;
 		goto bail;
 	}
@@ -567,7 +567,7 @@
 	/* Get max frequency from device-tree */
 	valp = of_get_property(cpunode, "clock-frequency", NULL);
 	if (!valp) {
-		printk(KERN_ERR "cpufreq: Can't find CPU frequency !\n");
+		pr_err("Can't find CPU frequency !\n");
 		rc = -ENODEV;
 		goto bail;
 	}
@@ -583,8 +583,7 @@
 
 	/* Check for machines with no useful settings */
 	if (il == ih) {
-		printk(KERN_WARNING "cpufreq: No low frequency mode available"
-		       " on this model !\n");
+		pr_warn("No low frequency mode available on this model !\n");
 		rc = -ENODEV;
 		goto bail;
 	}
@@ -595,7 +594,7 @@
 
 	/* Sanity check */
 	if (min_freq >= max_freq || min_freq < 1000) {
-		printk(KERN_ERR "cpufreq: Can't calculate low frequency !\n");
+		pr_err("Can't calculate low frequency !\n");
 		rc = -ENXIO;
 		goto bail;
 	}
@@ -619,10 +618,10 @@
 	g5_pmode_cur = -1;
 	g5_switch_freq(g5_query_freq());
 
-	printk(KERN_INFO "Registering G5 CPU frequency driver\n");
-	printk(KERN_INFO "Frequency method: i2c/pfunc, "
-	       "Voltage method: %s\n", has_volt ? "i2c/pfunc" : "none");
-	printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
+	pr_info("Registering G5 CPU frequency driver\n");
+	pr_info("Frequency method: i2c/pfunc, Voltage method: %s\n",
+		has_volt ? "i2c/pfunc" : "none");
+	pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
 		g5_cpu_freqs[1].frequency/1000,
 		g5_cpu_freqs[0].frequency/1000,
 		g5_cpu_freqs[g5_pmode_cur].frequency/1000);
@@ -654,7 +653,7 @@
 	/* Get first CPU node */
 	cpunode = of_cpu_device_node_get(0);
 	if (cpunode == NULL) {
-		pr_err("cpufreq: Can't find any CPU node\n");
+		pr_err("Can't find any CPU node\n");
 		return -ENODEV;
 	}
 
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index e6f24b2..dedd256 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -8,6 +8,8 @@
  *  BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -22,7 +24,6 @@
 #define POWERNOW_IOPORT 0xfff0          /* it doesn't matter where, as long
 					   as it is unused */
 
-#define PFX "powernow-k6: "
 static unsigned int                     busfreq;   /* FSB, in 10 kHz */
 static unsigned int                     max_multiplier;
 
@@ -141,7 +142,7 @@
 {
 
 	if (clock_ratio[best_i].driver_data > max_multiplier) {
-		printk(KERN_ERR PFX "invalid target frequency\n");
+		pr_err("invalid target frequency\n");
 		return -EINVAL;
 	}
 
@@ -175,13 +176,14 @@
 				max_multiplier = param_max_multiplier;
 				goto have_max_multiplier;
 			}
-		printk(KERN_ERR "powernow-k6: invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n");
+		pr_err("invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n");
 		return -EINVAL;
 	}
 
 	if (!max_multiplier) {
-		printk(KERN_WARNING "powernow-k6: unknown frequency %u, cannot determine current multiplier\n", khz);
-		printk(KERN_WARNING "powernow-k6: use module parameters max_multiplier and bus_frequency\n");
+		pr_warn("unknown frequency %u, cannot determine current multiplier\n",
+			khz);
+		pr_warn("use module parameters max_multiplier and bus_frequency\n");
 		return -EOPNOTSUPP;
 	}
 
@@ -193,7 +195,7 @@
 			busfreq = param_busfreq / 10;
 			goto have_busfreq;
 		}
-		printk(KERN_ERR "powernow-k6: invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n");
+		pr_err("invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n");
 		return -EINVAL;
 	}
 
@@ -275,7 +277,7 @@
 		return -ENODEV;
 
 	if (!request_region(POWERNOW_IOPORT, 16, "PowerNow!")) {
-		printk(KERN_INFO PFX "PowerNow IOPORT region already used.\n");
+		pr_info("PowerNow IOPORT region already used\n");
 		return -EIO;
 	}
 
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index c1ae199..9f013ed 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -13,6 +13,8 @@
  *  - We disable half multipliers if ACPI is used on A0 stepping CPUs.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
@@ -35,9 +37,6 @@
 
 #include "powernow-k7.h"
 
-#define PFX "powernow: "
-
-
 struct psb_s {
 	u8 signature[10];
 	u8 tableversion;
@@ -127,14 +126,13 @@
 	maxei = cpuid_eax(0x80000000);
 	if (maxei < 0x80000007) {	/* Any powernow info ? */
 #ifdef MODULE
-		printk(KERN_INFO PFX "No powernow capabilities detected\n");
+		pr_info("No powernow capabilities detected\n");
 #endif
 		return 0;
 	}
 
 	if ((c->x86_model == 6) && (c->x86_mask == 0)) {
-		printk(KERN_INFO PFX "K7 660[A0] core detected, "
-				"enabling errata workarounds\n");
+		pr_info("K7 660[A0] core detected, enabling errata workarounds\n");
 		have_a0 = 1;
 	}
 
@@ -144,22 +142,22 @@
 	if (!(edx & (1 << 1 | 1 << 2)))
 		return 0;
 
-	printk(KERN_INFO PFX "PowerNOW! Technology present. Can scale: ");
+	pr_info("PowerNOW! Technology present. Can scale: ");
 
 	if (edx & 1 << 1) {
-		printk("frequency");
+		pr_cont("frequency");
 		can_scale_bus = 1;
 	}
 
 	if ((edx & (1 << 1 | 1 << 2)) == 0x6)
-		printk(" and ");
+		pr_cont(" and ");
 
 	if (edx & 1 << 2) {
-		printk("voltage");
+		pr_cont("voltage");
 		can_scale_vid = 1;
 	}
 
-	printk(".\n");
+	pr_cont("\n");
 	return 1;
 }
 
@@ -427,16 +425,14 @@
 err05:
 	kfree(acpi_processor_perf);
 err0:
-	printk(KERN_WARNING PFX "ACPI perflib can not be used on "
-			"this platform\n");
+	pr_warn("ACPI perflib can not be used on this platform\n");
 	acpi_processor_perf = NULL;
 	return retval;
 }
 #else
 static int powernow_acpi_init(void)
 {
-	printk(KERN_INFO PFX "no support for ACPI processor found."
-	       "  Please recompile your kernel with ACPI processor\n");
+	pr_info("no support for ACPI processor found - please recompile your kernel with ACPI processor\n");
 	return -EINVAL;
 }
 #endif
@@ -468,8 +464,7 @@
 			psb = (struct psb_s *) p;
 			pr_debug("Table version: 0x%x\n", psb->tableversion);
 			if (psb->tableversion != 0x12) {
-				printk(KERN_INFO PFX "Sorry, only v1.2 tables"
-						" supported right now\n");
+				pr_info("Sorry, only v1.2 tables supported right now\n");
 				return -ENODEV;
 			}
 
@@ -481,10 +476,8 @@
 
 			latency = psb->settlingtime;
 			if (latency < 100) {
-				printk(KERN_INFO PFX "BIOS set settling time "
-						"to %d microseconds. "
-						"Should be at least 100. "
-						"Correcting.\n", latency);
+				pr_info("BIOS set settling time to %d microseconds. Should be at least 100. Correcting.\n",
+					latency);
 				latency = 100;
 			}
 			pr_debug("Settling Time: %d microseconds.\n",
@@ -516,10 +509,9 @@
 						p += 2;
 				}
 			}
-			printk(KERN_INFO PFX "No PST tables match this cpuid "
-					"(0x%x)\n", etuple);
-			printk(KERN_INFO PFX "This is indicative of a broken "
-					"BIOS.\n");
+			pr_info("No PST tables match this cpuid (0x%x)\n",
+				etuple);
+			pr_info("This is indicative of a broken BIOS\n");
 
 			return -EINVAL;
 		}
@@ -552,7 +544,7 @@
 	sgtc = 100 * m * latency;
 	sgtc = sgtc / 3;
 	if (sgtc > 0xfffff) {
-		printk(KERN_WARNING PFX "SGTC too large %d\n", sgtc);
+		pr_warn("SGTC too large %d\n", sgtc);
 		sgtc = 0xfffff;
 	}
 	return sgtc;
@@ -574,14 +566,10 @@
 
 static int acer_cpufreq_pst(const struct dmi_system_id *d)
 {
-	printk(KERN_WARNING PFX
-		"%s laptop with broken PST tables in BIOS detected.\n",
+	pr_warn("%s laptop with broken PST tables in BIOS detected\n",
 		d->ident);
-	printk(KERN_WARNING PFX
-		"You need to downgrade to 3A21 (09/09/2002), or try a newer "
-		"BIOS than 3A71 (01/20/2003)\n");
-	printk(KERN_WARNING PFX
-		"cpufreq scaling has been disabled as a result of this.\n");
+	pr_warn("You need to downgrade to 3A21 (09/09/2002), or try a newer BIOS than 3A71 (01/20/2003)\n");
+	pr_warn("cpufreq scaling has been disabled as a result of this\n");
 	return 0;
 }
 
@@ -616,40 +604,38 @@
 
 	fsb = (10 * cpu_khz) / fid_codes[fidvidstatus.bits.CFID];
 	if (!fsb) {
-		printk(KERN_WARNING PFX "can not determine bus frequency\n");
+		pr_warn("can not determine bus frequency\n");
 		return -EINVAL;
 	}
 	pr_debug("FSB: %3dMHz\n", fsb/1000);
 
 	if (dmi_check_system(powernow_dmi_table) || acpi_force) {
-		printk(KERN_INFO PFX "PSB/PST known to be broken.  "
-				"Trying ACPI instead\n");
+		pr_info("PSB/PST known to be broken - trying ACPI instead\n");
 		result = powernow_acpi_init();
 	} else {
 		result = powernow_decode_bios(fidvidstatus.bits.MFID,
 				fidvidstatus.bits.SVID);
 		if (result) {
-			printk(KERN_INFO PFX "Trying ACPI perflib\n");
+			pr_info("Trying ACPI perflib\n");
 			maximum_speed = 0;
 			minimum_speed = -1;
 			latency = 0;
 			result = powernow_acpi_init();
 			if (result) {
-				printk(KERN_INFO PFX
-					"ACPI and legacy methods failed\n");
+				pr_info("ACPI and legacy methods failed\n");
 			}
 		} else {
 			/* SGTC use the bus clock as timer */
 			latency = fixup_sgtc();
-			printk(KERN_INFO PFX "SGTC: %d\n", latency);
+			pr_info("SGTC: %d\n", latency);
 		}
 	}
 
 	if (result)
 		return result;
 
-	printk(KERN_INFO PFX "Minimum speed %d MHz. Maximum speed %d MHz.\n",
-				minimum_speed/1000, maximum_speed/1000);
+	pr_info("Minimum speed %d MHz - Maximum speed %d MHz\n",
+		minimum_speed/1000, maximum_speed/1000);
 
 	policy->cpuinfo.transition_latency =
 		cpufreq_scale(2000000UL, fsb, latency);
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 39ac78c..54c4536 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -36,12 +36,56 @@
 #include <asm/reg.h>
 #include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
 #include <asm/opal.h>
+#include <linux/timer.h>
 
 #define POWERNV_MAX_PSTATES	256
 #define PMSR_PSAFE_ENABLE	(1UL << 30)
 #define PMSR_SPR_EM_DISABLE	(1UL << 31)
 #define PMSR_MAX(x)		((x >> 32) & 0xFF)
 
+#define MAX_RAMP_DOWN_TIME				5120
+/*
+ * On an idle system we want the global pstate to ramp-down from max value to
+ * min over a span of ~5 secs. Also we want it to initially ramp-down slowly and
+ * then ramp-down rapidly later on.
+ *
+ * This gives a percentage rampdown for time elapsed in milliseconds.
+ * ramp_down_percentage = ((ms * ms) >> 18)
+ *			~= 3.8 * (sec * sec)
+ *
+ * At 0 ms	ramp_down_percent = 0
+ * At 5120 ms	ramp_down_percent = 100
+ */
+#define ramp_down_percent(time)		((time * time) >> 18)
+
+/* Interval after which the timer is queued to bring down global pstate */
+#define GPSTATE_TIMER_INTERVAL				2000
+
+/**
+ * struct global_pstate_info -	Per policy data structure to maintain history of
+ *				global pstates
+ * @highest_lpstate:		The local pstate from which we are ramping down
+ * @elapsed_time:		Time in ms spent in ramping down from
+ *				highest_lpstate
+ * @last_sampled_time:		Time from boot in ms when global pstates were
+ *				last set
+ * @last_lpstate,last_gpstate:	Last set values for local and global pstates
+ * @timer:			Is used for ramping down if cpu goes idle for
+ *				a long time with global pstate held high
+ * @gpstate_lock:		A spinlock to maintain synchronization between
+ *				routines called by the timer handler and
+ *				governer's target_index calls
+ */
+struct global_pstate_info {
+	int highest_lpstate;
+	unsigned int elapsed_time;
+	unsigned int last_sampled_time;
+	int last_lpstate;
+	int last_gpstate;
+	spinlock_t gpstate_lock;
+	struct timer_list timer;
+};
+
 static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
 static bool rebooting, throttled, occ_reset;
 
@@ -94,6 +138,17 @@
 	int nr_pstates;
 } powernv_pstate_info;
 
+static inline void reset_gpstates(struct cpufreq_policy *policy)
+{
+	struct global_pstate_info *gpstates = policy->driver_data;
+
+	gpstates->highest_lpstate = 0;
+	gpstates->elapsed_time = 0;
+	gpstates->last_sampled_time = 0;
+	gpstates->last_lpstate = 0;
+	gpstates->last_gpstate = 0;
+}
+
 /*
  * Initialize the freq table based on data obtained
  * from the firmware passed via device-tree
@@ -285,6 +340,7 @@
 struct powernv_smp_call_data {
 	unsigned int freq;
 	int pstate_id;
+	int gpstate_id;
 };
 
 /*
@@ -343,19 +399,21 @@
  * (struct powernv_smp_call_data *) and the pstate_id which needs to be set
  * on this CPU should be present in freq_data->pstate_id.
  */
-static void set_pstate(void *freq_data)
+static void set_pstate(void *data)
 {
 	unsigned long val;
-	unsigned long pstate_ul =
-		((struct powernv_smp_call_data *) freq_data)->pstate_id;
+	struct powernv_smp_call_data *freq_data = data;
+	unsigned long pstate_ul = freq_data->pstate_id;
+	unsigned long gpstate_ul = freq_data->gpstate_id;
 
 	val = get_pmspr(SPRN_PMCR);
 	val = val & 0x0000FFFFFFFFFFFFULL;
 
 	pstate_ul = pstate_ul & 0xFF;
+	gpstate_ul = gpstate_ul & 0xFF;
 
 	/* Set both global(bits 56..63) and local(bits 48..55) PStates */
-	val = val | (pstate_ul << 56) | (pstate_ul << 48);
+	val = val | (gpstate_ul << 56) | (pstate_ul << 48);
 
 	pr_debug("Setting cpu %d pmcr to %016lX\n",
 			raw_smp_processor_id(), val);
@@ -424,6 +482,111 @@
 	}
 }
 
+/**
+ * calc_global_pstate - Calculate global pstate
+ * @elapsed_time:	Elapsed time in milliseconds
+ * @local_pstate:	New local pstate
+ * @highest_lpstate:	pstate from which its ramping down
+ *
+ * Finds the appropriate global pstate based on the pstate from which its
+ * ramping down and the time elapsed in ramping down. It follows a quadratic
+ * equation which ensures that it reaches ramping down to pmin in 5sec.
+ */
+static inline int calc_global_pstate(unsigned int elapsed_time,
+				     int highest_lpstate, int local_pstate)
+{
+	int pstate_diff;
+
+	/*
+	 * Using ramp_down_percent we get the percentage of rampdown
+	 * that we are expecting to be dropping. Difference between
+	 * highest_lpstate and powernv_pstate_info.min will give a absolute
+	 * number of how many pstates we will drop eventually by the end of
+	 * 5 seconds, then just scale it get the number pstates to be dropped.
+	 */
+	pstate_diff =  ((int)ramp_down_percent(elapsed_time) *
+			(highest_lpstate - powernv_pstate_info.min)) / 100;
+
+	/* Ensure that global pstate is >= to local pstate */
+	if (highest_lpstate - pstate_diff < local_pstate)
+		return local_pstate;
+	else
+		return highest_lpstate - pstate_diff;
+}
+
+static inline void  queue_gpstate_timer(struct global_pstate_info *gpstates)
+{
+	unsigned int timer_interval;
+
+	/*
+	 * Setting up timer to fire after GPSTATE_TIMER_INTERVAL ms, But
+	 * if it exceeds MAX_RAMP_DOWN_TIME ms for ramp down time.
+	 * Set timer such that it fires exactly at MAX_RAMP_DOWN_TIME
+	 * seconds of ramp down time.
+	 */
+	if ((gpstates->elapsed_time + GPSTATE_TIMER_INTERVAL)
+	     > MAX_RAMP_DOWN_TIME)
+		timer_interval = MAX_RAMP_DOWN_TIME - gpstates->elapsed_time;
+	else
+		timer_interval = GPSTATE_TIMER_INTERVAL;
+
+	mod_timer_pinned(&gpstates->timer, jiffies +
+			msecs_to_jiffies(timer_interval));
+}
+
+/**
+ * gpstate_timer_handler
+ *
+ * @data: pointer to cpufreq_policy on which timer was queued
+ *
+ * This handler brings down the global pstate closer to the local pstate
+ * according quadratic equation. Queues a new timer if it is still not equal
+ * to local pstate
+ */
+void gpstate_timer_handler(unsigned long data)
+{
+	struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
+	struct global_pstate_info *gpstates = policy->driver_data;
+	int gpstate_id;
+	unsigned int time_diff = jiffies_to_msecs(jiffies)
+					- gpstates->last_sampled_time;
+	struct powernv_smp_call_data freq_data;
+
+	if (!spin_trylock(&gpstates->gpstate_lock))
+		return;
+
+	gpstates->last_sampled_time += time_diff;
+	gpstates->elapsed_time += time_diff;
+	freq_data.pstate_id = gpstates->last_lpstate;
+
+	if ((gpstates->last_gpstate == freq_data.pstate_id) ||
+	    (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME)) {
+		gpstate_id = freq_data.pstate_id;
+		reset_gpstates(policy);
+		gpstates->highest_lpstate = freq_data.pstate_id;
+	} else {
+		gpstate_id = calc_global_pstate(gpstates->elapsed_time,
+						gpstates->highest_lpstate,
+						freq_data.pstate_id);
+	}
+
+	/*
+	 * If local pstate is equal to global pstate, rampdown is over
+	 * So timer is not required to be queued.
+	 */
+	if (gpstate_id != freq_data.pstate_id)
+		queue_gpstate_timer(gpstates);
+
+	freq_data.gpstate_id = gpstate_id;
+	gpstates->last_gpstate = freq_data.gpstate_id;
+	gpstates->last_lpstate = freq_data.pstate_id;
+
+	spin_unlock(&gpstates->gpstate_lock);
+
+	/* Timer may get migrated to a different cpu on cpu hot unplug */
+	smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
+}
+
 /*
  * powernv_cpufreq_target_index: Sets the frequency corresponding to
  * the cpufreq table entry indexed by new_index on the cpus in the
@@ -433,6 +596,8 @@
 					unsigned int new_index)
 {
 	struct powernv_smp_call_data freq_data;
+	unsigned int cur_msec, gpstate_id;
+	struct global_pstate_info *gpstates = policy->driver_data;
 
 	if (unlikely(rebooting) && new_index != get_nominal_index())
 		return 0;
@@ -440,28 +605,81 @@
 	if (!throttled)
 		powernv_cpufreq_throttle_check(NULL);
 
+	cur_msec = jiffies_to_msecs(get_jiffies_64());
+
+	spin_lock(&gpstates->gpstate_lock);
 	freq_data.pstate_id = powernv_freqs[new_index].driver_data;
 
+	if (!gpstates->last_sampled_time) {
+		gpstate_id = freq_data.pstate_id;
+		gpstates->highest_lpstate = freq_data.pstate_id;
+		goto gpstates_done;
+	}
+
+	if (gpstates->last_gpstate > freq_data.pstate_id) {
+		gpstates->elapsed_time += cur_msec -
+						 gpstates->last_sampled_time;
+
+		/*
+		 * If its has been ramping down for more than MAX_RAMP_DOWN_TIME
+		 * we should be resetting all global pstate related data. Set it
+		 * equal to local pstate to start fresh.
+		 */
+		if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) {
+			reset_gpstates(policy);
+			gpstates->highest_lpstate = freq_data.pstate_id;
+			gpstate_id = freq_data.pstate_id;
+		} else {
+		/* Elaspsed_time is less than 5 seconds, continue to rampdown */
+			gpstate_id = calc_global_pstate(gpstates->elapsed_time,
+							gpstates->highest_lpstate,
+							freq_data.pstate_id);
+		}
+	} else {
+		reset_gpstates(policy);
+		gpstates->highest_lpstate = freq_data.pstate_id;
+		gpstate_id = freq_data.pstate_id;
+	}
+
+	/*
+	 * If local pstate is equal to global pstate, rampdown is over
+	 * So timer is not required to be queued.
+	 */
+	if (gpstate_id != freq_data.pstate_id)
+		queue_gpstate_timer(gpstates);
+	else
+		del_timer_sync(&gpstates->timer);
+
+gpstates_done:
+	freq_data.gpstate_id = gpstate_id;
+	gpstates->last_sampled_time = cur_msec;
+	gpstates->last_gpstate = freq_data.gpstate_id;
+	gpstates->last_lpstate = freq_data.pstate_id;
+
+	spin_unlock(&gpstates->gpstate_lock);
+
 	/*
 	 * Use smp_call_function to send IPI and execute the
 	 * mtspr on target CPU.  We could do that without IPI
 	 * if current CPU is within policy->cpus (core)
 	 */
 	smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
-
 	return 0;
 }
 
 static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
-	int base, i;
+	int base, i, ret;
+	struct kernfs_node *kn;
+	struct global_pstate_info *gpstates;
 
 	base = cpu_first_thread_sibling(policy->cpu);
 
 	for (i = 0; i < threads_per_core; i++)
 		cpumask_set_cpu(base + i, policy->cpus);
 
-	if (!policy->driver_data) {
+	kn = kernfs_find_and_get(policy->kobj.sd, throttle_attr_grp.name);
+	if (!kn) {
 		int ret;
 
 		ret = sysfs_create_group(&policy->kobj, &throttle_attr_grp);
@@ -470,13 +688,37 @@
 				policy->cpu);
 			return ret;
 		}
-		/*
-		 * policy->driver_data is used as a flag for one-time
-		 * creation of throttle sysfs files.
-		 */
-		policy->driver_data = policy;
+	} else {
+		kernfs_put(kn);
 	}
-	return cpufreq_table_validate_and_show(policy, powernv_freqs);
+
+	gpstates =  kzalloc(sizeof(*gpstates), GFP_KERNEL);
+	if (!gpstates)
+		return -ENOMEM;
+
+	policy->driver_data = gpstates;
+
+	/* initialize timer */
+	init_timer_deferrable(&gpstates->timer);
+	gpstates->timer.data = (unsigned long)policy;
+	gpstates->timer.function = gpstate_timer_handler;
+	gpstates->timer.expires = jiffies +
+				msecs_to_jiffies(GPSTATE_TIMER_INTERVAL);
+	spin_lock_init(&gpstates->gpstate_lock);
+	ret = cpufreq_table_validate_and_show(policy, powernv_freqs);
+
+	if (ret < 0)
+		kfree(policy->driver_data);
+
+	return ret;
+}
+
+static int powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+	/* timer is deleted in cpufreq_cpu_stop() */
+	kfree(policy->driver_data);
+
+	return 0;
 }
 
 static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
@@ -604,15 +846,19 @@
 static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
 {
 	struct powernv_smp_call_data freq_data;
+	struct global_pstate_info *gpstates = policy->driver_data;
 
 	freq_data.pstate_id = powernv_pstate_info.min;
+	freq_data.gpstate_id = powernv_pstate_info.min;
 	smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
+	del_timer_sync(&gpstates->timer);
 }
 
 static struct cpufreq_driver powernv_cpufreq_driver = {
 	.name		= "powernv-cpufreq",
 	.flags		= CPUFREQ_CONST_LOOPS,
 	.init		= powernv_cpufreq_cpu_init,
+	.exit		= powernv_cpufreq_cpu_exit,
 	.verify		= cpufreq_generic_frequency_table_verify,
 	.target_index	= powernv_cpufreq_target_index,
 	.get		= powernv_cpufreq_get,
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.h b/drivers/cpufreq/ppc_cbe_cpufreq.h
index b4c00a5..3eace72 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.h
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.h
@@ -17,7 +17,7 @@
 
 int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode);
 
-#if defined(CONFIG_CPU_FREQ_CBE_PMI) || defined(CONFIG_CPU_FREQ_CBE_PMI_MODULE)
+#if IS_ENABLED(CONFIG_CPU_FREQ_CBE_PMI)
 extern bool cbe_cpufreq_has_pmi;
 #else
 #define cbe_cpufreq_has_pmi (0)
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
index 7969f76..7c4cd5c 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
@@ -23,7 +23,7 @@
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/timer.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/of_platform.h>
 
 #include <asm/processor.h>
@@ -142,15 +142,4 @@
 
 	return 0;
 }
-
-static void __exit cbe_cpufreq_pmi_exit(void)
-{
-	cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
-	pmi_unregister_handler(&cbe_pmi_handler);
-}
-
-module_init(cbe_cpufreq_pmi_init);
-module_exit(cbe_cpufreq_pmi_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
+device_initcall(cbe_cpufreq_pmi_init);
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index 46fee15..ce345bf 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -29,6 +29,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/sched.h>
@@ -186,8 +188,7 @@
 
 	ret = regulator_set_voltage(vcc_core, vmin, vmax);
 	if (ret)
-		pr_err("cpufreq: Failed to set vcc_core in [%dmV..%dmV]\n",
-		       vmin, vmax);
+		pr_err("Failed to set vcc_core in [%dmV..%dmV]\n", vmin, vmax);
 	return ret;
 }
 
@@ -195,10 +196,10 @@
 {
 	vcc_core = regulator_get(NULL, "vcc_core");
 	if (IS_ERR(vcc_core)) {
-		pr_info("cpufreq: Didn't find vcc_core regulator\n");
+		pr_info("Didn't find vcc_core regulator\n");
 		vcc_core = NULL;
 	} else {
-		pr_info("cpufreq: Found vcc_core regulator\n");
+		pr_info("Found vcc_core regulator\n");
 	}
 }
 #else
@@ -233,9 +234,8 @@
 {
 	if (!pxa27x_maxfreq) {
 		pxa27x_maxfreq = 416000;
-		printk(KERN_INFO "PXA CPU 27x max frequency not defined "
-		       "(pxa27x_maxfreq), assuming pxa271 with %dkHz maxfreq\n",
-		       pxa27x_maxfreq);
+		pr_info("PXA CPU 27x max frequency not defined (pxa27x_maxfreq), assuming pxa271 with %dkHz maxfreq\n",
+			pxa27x_maxfreq);
 	} else {
 		pxa27x_maxfreq *= 1000;
 	}
@@ -408,7 +408,7 @@
 	 */
 	if (cpu_is_pxa25x()) {
 		find_freq_tables(&pxa255_freq_table, &pxa255_freqs);
-		pr_info("PXA255 cpufreq using %s frequency table\n",
+		pr_info("using %s frequency table\n",
 			pxa255_turbo_table ? "turbo" : "run");
 
 		cpufreq_table_validate_and_show(policy, pxa255_freq_table);
@@ -417,7 +417,7 @@
 		cpufreq_table_validate_and_show(policy, pxa27x_freq_table);
 	}
 
-	printk(KERN_INFO "PXA CPU frequency change support initialized\n");
+	pr_info("frequency change support initialized\n");
 
 	return 0;
 }
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index b23e525..53d8c3f 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -301,10 +301,11 @@
 	return -ENODEV;
 }
 
-static int __exit qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
 {
 	struct cpu_data *data = policy->driver_data;
 
+	cpufreq_cooling_unregister(data->cdev);
 	kfree(data->pclk);
 	kfree(data->table);
 	kfree(data);
@@ -333,8 +334,8 @@
 		cpud->cdev = of_cpufreq_cooling_register(np,
 							 policy->related_cpus);
 
-		if (IS_ERR(cpud->cdev)) {
-			pr_err("Failed to register cooling device cpu%d: %ld\n",
+		if (IS_ERR(cpud->cdev) && PTR_ERR(cpud->cdev) != -ENOSYS) {
+			pr_err("cpu%d is not running as cooling device: %ld\n",
 					policy->cpu, PTR_ERR(cpud->cdev));
 
 			cpud->cdev = NULL;
@@ -348,7 +349,7 @@
 	.name		= "qoriq_cpufreq",
 	.flags		= CPUFREQ_CONST_LOOPS,
 	.init		= qoriq_cpufreq_cpu_init,
-	.exit		= __exit_p(qoriq_cpufreq_cpu_exit),
+	.exit		= qoriq_cpufreq_cpu_exit,
 	.verify		= cpufreq_generic_frequency_table_verify,
 	.target_index	= qoriq_cpufreq_target,
 	.get		= cpufreq_generic_get,
diff --git a/drivers/cpufreq/s3c2412-cpufreq.c b/drivers/cpufreq/s3c2412-cpufreq.c
index eb26213..b04b6f0 100644
--- a/drivers/cpufreq/s3c2412-cpufreq.c
+++ b/drivers/cpufreq/s3c2412-cpufreq.c
@@ -10,6 +10,8 @@
  * published by the Free Software Foundation.
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
@@ -197,21 +199,20 @@
 
 	hclk = clk_get(NULL, "hclk");
 	if (IS_ERR(hclk)) {
-		printk(KERN_ERR "%s: cannot find hclk clock\n", __func__);
+		pr_err("cannot find hclk clock\n");
 		return -ENOENT;
 	}
 
 	fclk = clk_get(NULL, "fclk");
 	if (IS_ERR(fclk)) {
-		printk(KERN_ERR "%s: cannot find fclk clock\n", __func__);
+		pr_err("cannot find fclk clock\n");
 		goto err_fclk;
 	}
 
 	fclk_rate = clk_get_rate(fclk);
 	if (fclk_rate > 200000000) {
-		printk(KERN_INFO
-		       "%s: fclk %ld MHz, assuming 266MHz capable part\n",
-		       __func__, fclk_rate / 1000000);
+		pr_info("fclk %ld MHz, assuming 266MHz capable part\n",
+			fclk_rate / 1000000);
 		s3c2412_cpufreq_info.max.fclk = 266000000;
 		s3c2412_cpufreq_info.max.hclk = 133000000;
 		s3c2412_cpufreq_info.max.pclk =  66000000;
@@ -219,13 +220,13 @@
 
 	armclk = clk_get(NULL, "armclk");
 	if (IS_ERR(armclk)) {
-		printk(KERN_ERR "%s: cannot find arm clock\n", __func__);
+		pr_err("cannot find arm clock\n");
 		goto err_armclk;
 	}
 
 	xtal = clk_get(NULL, "xtal");
 	if (IS_ERR(xtal)) {
-		printk(KERN_ERR "%s: cannot find xtal clock\n", __func__);
+		pr_err("cannot find xtal clock\n");
 		goto err_xtal;
 	}
 
diff --git a/drivers/cpufreq/s3c2440-cpufreq.c b/drivers/cpufreq/s3c2440-cpufreq.c
index 0129f5c..d0d75b6 100644
--- a/drivers/cpufreq/s3c2440-cpufreq.c
+++ b/drivers/cpufreq/s3c2440-cpufreq.c
@@ -11,6 +11,8 @@
  * published by the Free Software Foundation.
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
@@ -66,7 +68,7 @@
 		     __func__, fclk, armclk, hclk_max);
 
 	if (armclk > fclk) {
-		printk(KERN_WARNING "%s: armclk > fclk\n", __func__);
+		pr_warn("%s: armclk > fclk\n", __func__);
 		armclk = fclk;
 	}
 
@@ -273,7 +275,7 @@
 	armclk = s3c_cpufreq_clk_get(NULL, "armclk");
 
 	if (IS_ERR(xtal) || IS_ERR(hclk) || IS_ERR(fclk) || IS_ERR(armclk)) {
-		printk(KERN_ERR "%s: failed to get clocks\n", __func__);
+		pr_err("%s: failed to get clocks\n", __func__);
 		return -ENOENT;
 	}
 
diff --git a/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c b/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
index 9b7b428..4d976e8 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
@@ -10,6 +10,8 @@
  * published by the Free Software Foundation.
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/init.h>
 #include <linux/export.h>
 #include <linux/interrupt.h>
@@ -178,7 +180,7 @@
 {
 	dbgfs_root = debugfs_create_dir("s3c-cpufreq", NULL);
 	if (IS_ERR(dbgfs_root)) {
-		printk(KERN_ERR "%s: error creating debugfs root\n", __func__);
+		pr_err("%s: error creating debugfs root\n", __func__);
 		return PTR_ERR(dbgfs_root);
 	}
 
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index 68ef8fd..ae8eaed 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -10,6 +10,8 @@
  * published by the Free Software Foundation.
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
@@ -175,7 +177,7 @@
 	cpu_new.freq.fclk = cpu_new.pll.frequency;
 
 	if (s3c_cpufreq_calcdivs(&cpu_new) < 0) {
-		printk(KERN_ERR "no divisors for %d\n", target_freq);
+		pr_err("no divisors for %d\n", target_freq);
 		goto err_notpossible;
 	}
 
@@ -187,7 +189,7 @@
 
 	if (cpu_new.freq.hclk != cpu_cur.freq.hclk) {
 		if (s3c_cpufreq_calcio(&cpu_new) < 0) {
-			printk(KERN_ERR "%s: no IO timings\n", __func__);
+			pr_err("%s: no IO timings\n", __func__);
 			goto err_notpossible;
 		}
 	}
@@ -262,7 +264,7 @@
 	return 0;
 
  err_notpossible:
-	printk(KERN_ERR "no compatible settings for %d\n", target_freq);
+	pr_err("no compatible settings for %d\n", target_freq);
 	return -EINVAL;
 }
 
@@ -331,7 +333,7 @@
 						     &index);
 
 		if (ret < 0) {
-			printk(KERN_ERR "%s: no PLL available\n", __func__);
+			pr_err("%s: no PLL available\n", __func__);
 			goto err_notpossible;
 		}
 
@@ -346,7 +348,7 @@
 	return s3c_cpufreq_settarget(policy, target_freq, pll);
 
  err_notpossible:
-	printk(KERN_ERR "no compatible settings for %d\n", target_freq);
+	pr_err("no compatible settings for %d\n", target_freq);
 	return -EINVAL;
 }
 
@@ -356,7 +358,7 @@
 
 	clk = clk_get(dev, name);
 	if (IS_ERR(clk))
-		printk(KERN_ERR "cpufreq: failed to get clock '%s'\n", name);
+		pr_err("failed to get clock '%s'\n", name);
 
 	return clk;
 }
@@ -378,15 +380,16 @@
 
 	if (IS_ERR(clk_fclk) || IS_ERR(clk_hclk) || IS_ERR(clk_pclk) ||
 	    IS_ERR(_clk_mpll) || IS_ERR(clk_arm) || IS_ERR(_clk_xtal)) {
-		printk(KERN_ERR "%s: could not get clock(s)\n", __func__);
+		pr_err("%s: could not get clock(s)\n", __func__);
 		return -ENOENT;
 	}
 
-	printk(KERN_INFO "%s: clocks f=%lu,h=%lu,p=%lu,a=%lu\n", __func__,
-	       clk_get_rate(clk_fclk) / 1000,
-	       clk_get_rate(clk_hclk) / 1000,
-	       clk_get_rate(clk_pclk) / 1000,
-	       clk_get_rate(clk_arm) / 1000);
+	pr_info("%s: clocks f=%lu,h=%lu,p=%lu,a=%lu\n",
+		__func__,
+		clk_get_rate(clk_fclk) / 1000,
+		clk_get_rate(clk_hclk) / 1000,
+		clk_get_rate(clk_pclk) / 1000,
+		clk_get_rate(clk_arm) / 1000);
 
 	return 0;
 }
@@ -424,7 +427,7 @@
 
 	ret = s3c_cpufreq_settarget(NULL, suspend_freq, &suspend_pll);
 	if (ret) {
-		printk(KERN_ERR "%s: failed to reset pll/freq\n", __func__);
+		pr_err("%s: failed to reset pll/freq\n", __func__);
 		return ret;
 	}
 
@@ -449,13 +452,12 @@
 int s3c_cpufreq_register(struct s3c_cpufreq_info *info)
 {
 	if (!info || !info->name) {
-		printk(KERN_ERR "%s: failed to pass valid information\n",
-		       __func__);
+		pr_err("%s: failed to pass valid information\n", __func__);
 		return -EINVAL;
 	}
 
-	printk(KERN_INFO "S3C24XX CPU Frequency driver, %s cpu support\n",
-	       info->name);
+	pr_info("S3C24XX CPU Frequency driver, %s cpu support\n",
+		info->name);
 
 	/* check our driver info has valid data */
 
@@ -478,7 +480,7 @@
 	struct s3c_cpufreq_board *ours;
 
 	if (!board) {
-		printk(KERN_INFO "%s: no board data\n", __func__);
+		pr_info("%s: no board data\n", __func__);
 		return -EINVAL;
 	}
 
@@ -487,7 +489,7 @@
 
 	ours = kzalloc(sizeof(*ours), GFP_KERNEL);
 	if (ours == NULL) {
-		printk(KERN_ERR "%s: no memory\n", __func__);
+		pr_err("%s: no memory\n", __func__);
 		return -ENOMEM;
 	}
 
@@ -502,15 +504,15 @@
 	int ret;
 
 	if (!cpu_cur.info->get_iotiming) {
-		printk(KERN_ERR "%s: get_iotiming undefined\n", __func__);
+		pr_err("%s: get_iotiming undefined\n", __func__);
 		return -ENOENT;
 	}
 
-	printk(KERN_INFO "%s: working out IO settings\n", __func__);
+	pr_info("%s: working out IO settings\n", __func__);
 
 	ret = (cpu_cur.info->get_iotiming)(&cpu_cur, &s3c24xx_iotiming);
 	if (ret)
-		printk(KERN_ERR "%s: failed to get timings\n", __func__);
+		pr_err("%s: failed to get timings\n", __func__);
 
 	return ret;
 }
@@ -561,7 +563,7 @@
 	val = calc_locktime(rate, cpu_cur.info->locktime_u) << bits;
 	val |= calc_locktime(rate, cpu_cur.info->locktime_m);
 
-	printk(KERN_INFO "%s: new locktime is 0x%08x\n", __func__, val);
+	pr_info("%s: new locktime is 0x%08x\n", __func__, val);
 	__raw_writel(val, S3C2410_LOCKTIME);
 }
 
@@ -580,7 +582,7 @@
 
 	ftab = kzalloc(sizeof(*ftab) * size, GFP_KERNEL);
 	if (!ftab) {
-		printk(KERN_ERR "%s: no memory for tables\n", __func__);
+		pr_err("%s: no memory for tables\n", __func__);
 		return -ENOMEM;
 	}
 
@@ -608,15 +610,14 @@
 		if (cpu_cur.board->auto_io) {
 			ret = s3c_cpufreq_auto_io();
 			if (ret) {
-				printk(KERN_ERR "%s: failed to get io timing\n",
+				pr_err("%s: failed to get io timing\n",
 				       __func__);
 				goto out;
 			}
 		}
 
 		if (cpu_cur.board->need_io && !cpu_cur.info->set_iotiming) {
-			printk(KERN_ERR "%s: no IO support registered\n",
-			       __func__);
+			pr_err("%s: no IO support registered\n", __func__);
 			ret = -EINVAL;
 			goto out;
 		}
@@ -666,9 +667,9 @@
 		vals += plls_no;
 		vals->frequency = CPUFREQ_TABLE_END;
 
-		printk(KERN_INFO "cpufreq: %d PLL entries\n", plls_no);
+		pr_info("%d PLL entries\n", plls_no);
 	} else
-		printk(KERN_ERR "cpufreq: no memory for PLL tables\n");
+		pr_err("no memory for PLL tables\n");
 
 	return vals ? 0 : -ENOMEM;
 }
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index a145b31..06d8591 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -9,6 +9,8 @@
  * published by the Free Software Foundation.
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -205,7 +207,7 @@
 	} else if (ch == DMC1) {
 		reg = (dmc_base[1] + 0x30);
 	} else {
-		printk(KERN_ERR "Cannot find DMC port\n");
+		pr_err("Cannot find DMC port\n");
 		return;
 	}
 
@@ -534,7 +536,7 @@
 	mem_type = check_mem_type(dmc_base[0]);
 
 	if ((mem_type != LPDDR) && (mem_type != LPDDR2)) {
-		printk(KERN_ERR "CPUFreq doesn't support this memory type\n");
+		pr_err("CPUFreq doesn't support this memory type\n");
 		ret = -EINVAL;
 		goto out_dmc1;
 	}
@@ -635,13 +637,13 @@
 
 	arm_regulator = regulator_get(NULL, "vddarm");
 	if (IS_ERR(arm_regulator)) {
-		pr_err("failed to get regulator vddarm");
+		pr_err("failed to get regulator vddarm\n");
 		return PTR_ERR(arm_regulator);
 	}
 
 	int_regulator = regulator_get(NULL, "vddint");
 	if (IS_ERR(int_regulator)) {
-		pr_err("failed to get regulator vddint");
+		pr_err("failed to get regulator vddint\n");
 		regulator_put(arm_regulator);
 		return PTR_ERR(int_regulator);
 	}
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index ac84e48..4225501 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -13,6 +13,8 @@
  *	2005-03-30: - initial revision
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -30,8 +32,6 @@
 
 static __u8 __iomem *cpuctl;
 
-#define PFX "sc520_freq: "
-
 static struct cpufreq_frequency_table sc520_freq_table[] = {
 	{0, 0x01,	100000},
 	{0, 0x02,	133000},
@@ -44,8 +44,8 @@
 
 	switch (clockspeed_reg & 0x03) {
 	default:
-		printk(KERN_ERR PFX "error: cpuctl register has unexpected "
-				"value %02x\n", clockspeed_reg);
+		pr_err("error: cpuctl register has unexpected value %02x\n",
+		       clockspeed_reg);
 	case 0x01:
 		return 100000;
 	case 0x02:
@@ -112,7 +112,7 @@
 
 	cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1);
 	if (!cpuctl) {
-		printk(KERN_ERR "sc520_freq: error: failed to remap memory\n");
+		pr_err("sc520_freq: error: failed to remap memory\n");
 		return -ENOMEM;
 	}
 
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index de5e89b..e8a7bf5 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -18,6 +18,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/cpu.h>
 #include <linux/cpufreq.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -38,35 +39,6 @@
 	return scpi_ops->dvfs_get_info(domain);
 }
 
-static int scpi_opp_table_ops(struct device *cpu_dev, bool remove)
-{
-	int idx, ret = 0;
-	struct scpi_opp *opp;
-	struct scpi_dvfs_info *info = scpi_get_dvfs_info(cpu_dev);
-
-	if (IS_ERR(info))
-		return PTR_ERR(info);
-
-	if (!info->opps)
-		return -EIO;
-
-	for (opp = info->opps, idx = 0; idx < info->count; idx++, opp++) {
-		if (remove)
-			dev_pm_opp_remove(cpu_dev, opp->freq);
-		else
-			ret = dev_pm_opp_add(cpu_dev, opp->freq,
-					     opp->m_volt * 1000);
-		if (ret) {
-			dev_warn(cpu_dev, "failed to add opp %uHz %umV\n",
-				 opp->freq, opp->m_volt);
-			while (idx-- > 0)
-				dev_pm_opp_remove(cpu_dev, (--opp)->freq);
-			return ret;
-		}
-	}
-	return ret;
-}
-
 static int scpi_get_transition_latency(struct device *cpu_dev)
 {
 	struct scpi_dvfs_info *info = scpi_get_dvfs_info(cpu_dev);
@@ -76,21 +48,42 @@
 	return info->latency;
 }
 
-static int scpi_init_opp_table(struct device *cpu_dev)
+static int scpi_init_opp_table(const struct cpumask *cpumask)
 {
-	return scpi_opp_table_ops(cpu_dev, false);
-}
+	int idx, ret;
+	struct scpi_opp *opp;
+	struct device *cpu_dev = get_cpu_device(cpumask_first(cpumask));
+	struct scpi_dvfs_info *info = scpi_get_dvfs_info(cpu_dev);
 
-static void scpi_free_opp_table(struct device *cpu_dev)
-{
-	scpi_opp_table_ops(cpu_dev, true);
+	if (IS_ERR(info))
+		return PTR_ERR(info);
+
+	if (!info->opps)
+		return -EIO;
+
+	for (opp = info->opps, idx = 0; idx < info->count; idx++, opp++) {
+		ret = dev_pm_opp_add(cpu_dev, opp->freq, opp->m_volt * 1000);
+		if (ret) {
+			dev_warn(cpu_dev, "failed to add opp %uHz %umV\n",
+				 opp->freq, opp->m_volt);
+			while (idx-- > 0)
+				dev_pm_opp_remove(cpu_dev, (--opp)->freq);
+			return ret;
+		}
+	}
+
+	ret = dev_pm_opp_set_sharing_cpus(cpu_dev, cpumask);
+	if (ret)
+		dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
+			__func__, ret);
+	return ret;
 }
 
 static struct cpufreq_arm_bL_ops scpi_cpufreq_ops = {
 	.name	= "scpi",
 	.get_transition_latency = scpi_get_transition_latency,
 	.init_opp_table = scpi_init_opp_table,
-	.free_opp_table = scpi_free_opp_table,
+	.free_opp_table = dev_pm_opp_cpumask_remove_table,
 };
 
 static int scpi_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index 7d4a315..41bc539 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -13,6 +13,8 @@
  * Copyright (C) 2003 Jeremy Fitzhardinge <jeremy@goop.org>
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -27,7 +29,6 @@
 #include <asm/cpufeature.h>
 #include <asm/cpu_device_id.h>
 
-#define PFX		"speedstep-centrino: "
 #define MAINTAINER	"linux-pm@vger.kernel.org"
 
 #define INTEL_MSR_RANGE	(0xffff)
@@ -386,8 +387,7 @@
 		/* check to see if it stuck */
 		rdmsr(MSR_IA32_MISC_ENABLE, l, h);
 		if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
-			printk(KERN_INFO PFX
-				"couldn't enable Enhanced SpeedStep\n");
+			pr_info("couldn't enable Enhanced SpeedStep\n");
 			return -ENODEV;
 		}
 	}
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index 37555c6..b86953a 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -18,6 +18,8 @@
  *                        SPEEDSTEP - DEFINITIONS                    *
  *********************************************************************/
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -68,13 +70,13 @@
 	/* get PMBASE */
 	pci_read_config_dword(speedstep_chipset_dev, 0x40, &pmbase);
 	if (!(pmbase & 0x01)) {
-		printk(KERN_ERR "speedstep-ich: could not find speedstep register\n");
+		pr_err("could not find speedstep register\n");
 		return -ENODEV;
 	}
 
 	pmbase &= 0xFFFFFFFE;
 	if (!pmbase) {
-		printk(KERN_ERR "speedstep-ich: could not find speedstep register\n");
+		pr_err("could not find speedstep register\n");
 		return -ENODEV;
 	}
 
@@ -136,7 +138,7 @@
 		pr_debug("change to %u MHz succeeded\n",
 			speedstep_get_frequency(speedstep_processor) / 1000);
 	else
-		printk(KERN_ERR "cpufreq: change failed - I/O error\n");
+		pr_err("change failed - I/O error\n");
 
 	return;
 }
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index 15d3214..1b80621 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -8,6 +8,8 @@
  *  BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
@@ -153,7 +155,7 @@
 		fsb = 333333;
 		break;
 	default:
-		printk(KERN_ERR "PCORE - MSR_FSB_FREQ undefined value");
+		pr_err("PCORE - MSR_FSB_FREQ undefined value\n");
 	}
 
 	rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
@@ -453,11 +455,8 @@
 		 */
 		if (*transition_latency > 10000000 ||
 		    *transition_latency < 50000) {
-			printk(KERN_WARNING PFX "frequency transition "
-					"measured seems out of range (%u "
-					"nSec), falling back to a safe one of"
-					"%u nSec.\n",
-					*transition_latency, 500000);
+			pr_warn("frequency transition measured seems out of range (%u nSec), falling back to a safe one of %u nSec\n",
+				*transition_latency, 500000);
 			*transition_latency = 500000;
 		}
 	}
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index 819229e..770a9ae 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -12,6 +12,8 @@
  *                        SPEEDSTEP - DEFINITIONS                    *
  *********************************************************************/
 
+#define pr_fmt(fmt) "cpufreq: " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
@@ -204,9 +206,8 @@
 			(speedstep_freqs[new_state].frequency / 1000),
 			retry, result);
 	else
-		printk(KERN_ERR "cpufreq: change to state %u "
-			"failed with new_state %u and result %u\n",
-			state, new_state, result);
+		pr_err("change to state %u failed with new_state %u and result %u\n",
+		       state, new_state, result);
 
 	return;
 }
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
index a9c659f..0404203 100644
--- a/drivers/cpufreq/sti-cpufreq.c
+++ b/drivers/cpufreq/sti-cpufreq.c
@@ -259,6 +259,10 @@
 {
 	int ret;
 
+	if ((!of_machine_is_compatible("st,stih407")) &&
+		(!of_machine_is_compatible("st,stih410")))
+		return -ENODEV;
+
 	ddata.cpu = get_cpu_device(0);
 	if (!ddata.cpu) {
 		dev_err(ddata.cpu, "Failed to get device for CPU0\n");
diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
index 20bcceb..4353025 100644
--- a/drivers/cpufreq/tegra124-cpufreq.c
+++ b/drivers/cpufreq/tegra124-cpufreq.c
@@ -14,7 +14,6 @@
 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
 
 #include <linux/clk.h>
-#include <linux/cpufreq-dt.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -69,10 +68,6 @@
 	clk_set_parent(priv->cpu_clk, priv->pllx_clk);
 }
 
-static struct cpufreq_dt_platform_data cpufreq_dt_pd = {
-	.independent_clocks = false,
-};
-
 static int tegra124_cpufreq_probe(struct platform_device *pdev)
 {
 	struct tegra124_cpufreq_priv *priv;
@@ -129,8 +124,6 @@
 
 	cpufreq_dt_devinfo.name = "cpufreq-dt";
 	cpufreq_dt_devinfo.parent = &pdev->dev;
-	cpufreq_dt_devinfo.data = &cpufreq_dt_pd;
-	cpufreq_dt_devinfo.size_data = sizeof(cpufreq_dt_pd);
 
 	priv->cpufreq_dt_pdev =
 		platform_device_register_full(&cpufreq_dt_devinfo);
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index 433e93f..87e5bdc 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -18,6 +18,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/cpu.h>
 #include <linux/cpufreq.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -26,8 +27,9 @@
 
 #include "arm_big_little.h"
 
-static int ve_spc_init_opp_table(struct device *cpu_dev)
+static int ve_spc_init_opp_table(const struct cpumask *cpumask)
 {
+	struct device *cpu_dev = get_cpu_device(cpumask_first(cpumask));
 	/*
 	 * platform specific SPC code must initialise the opp table
 	 * so just check if the OPP count is non-zero
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index 545069d..e342565e 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -50,7 +50,7 @@
 		 * call the CPU ops suspend protocol with idle index as a
 		 * parameter.
 		 */
-		arm_cpuidle_suspend(idx);
+		ret = arm_cpuidle_suspend(idx);
 
 		cpu_pm_exit();
 	}
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index f996efc..2b8e6ce 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -173,7 +173,7 @@
 
 	struct cpuidle_state *target_state = &drv->states[index];
 	bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
-	ktime_t time_start, time_end;
+	u64 time_start, time_end;
 	s64 diff;
 
 	/*
@@ -195,13 +195,13 @@
 	sched_idle_set_state(target_state);
 
 	trace_cpu_idle_rcuidle(index, dev->cpu);
-	time_start = ktime_get();
+	time_start = local_clock();
 
 	stop_critical_timings();
 	entered_state = target_state->enter(dev, drv, index);
 	start_critical_timings();
 
-	time_end = ktime_get();
+	time_end = local_clock();
 	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
 
 	/* The cpu is no longer idle or about to enter idle. */
@@ -217,7 +217,11 @@
 	if (!cpuidle_state_is_coupled(drv, entered_state))
 		local_irq_enable();
 
-	diff = ktime_to_us(ktime_sub(time_end, time_start));
+	/*
+	 * local_clock() returns the time in nanosecond, let's shift
+	 * by 10 (divide by 1024) to have microsecond based time.
+	 */
+	diff = (time_end - time_start) >> 10;
 	if (diff > INT_MAX)
 		diff = INT_MAX;
 
@@ -433,6 +437,8 @@
 	list_del(&dev->device_list);
 	per_cpu(cpuidle_devices, dev->cpu) = NULL;
 	module_put(drv->owner);
+
+	dev->registered = 0;
 }
 
 static void __cpuidle_device_init(struct cpuidle_device *dev)
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 477fffd..d77ba2f 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -279,6 +279,14 @@
 	help
 	  This option allows you to have support for AMCC crypto acceleration.
 
+config HW_RANDOM_PPC4XX
+	bool "PowerPC 4xx generic true random number generator support"
+	depends on CRYPTO_DEV_PPC4XX && HW_RANDOM
+	default y
+	---help---
+	 This option provides the kernel-side support for the TRNG hardware
+	 found in the security function of some PowerPC 4xx SoCs.
+
 config CRYPTO_DEV_OMAP_SHAM
 	tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator"
 	depends on ARCH_OMAP2PLUS
@@ -302,15 +310,16 @@
 	  want to use the OMAP module for AES algorithms.
 
 config CRYPTO_DEV_OMAP_DES
-	tristate "Support for OMAP DES3DES hw engine"
+	tristate "Support for OMAP DES/3DES hw engine"
 	depends on ARCH_OMAP2PLUS
 	select CRYPTO_DES
 	select CRYPTO_BLKCIPHER
+	select CRYPTO_ENGINE
 	help
 	  OMAP processors have DES/3DES module accelerator. Select this if you
 	  want to use the OMAP module for DES and 3DES algorithms. Currently
-	  the ECB and CBC modes of operation supported by the driver. Also
-	  accesses made on unaligned boundaries are also supported.
+	  the ECB and CBC modes of operation are supported by the driver. Also
+	  accesses made on unaligned boundaries are supported.
 
 config CRYPTO_DEV_PICOXCELL
 	tristate "Support for picoXcell IPSEC and Layer2 crypto engines"
@@ -340,9 +349,19 @@
 	  This option enables support for the SAHARA HW crypto accelerator
 	  found in some Freescale i.MX chips.
 
+config CRYPTO_DEV_MXC_SCC
+	tristate "Support for Freescale Security Controller (SCC)"
+	depends on ARCH_MXC && OF
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_DES
+	help
+	  This option enables support for the Security Controller (SCC)
+	  found in Freescale i.MX25 chips.
+
 config CRYPTO_DEV_S5P
 	tristate "Support for Samsung S5PV210/Exynos crypto accelerator"
-	depends on ARCH_S5PV210 || ARCH_EXYNOS
+	depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
+	depends on HAS_IOMEM && HAS_DMA
 	select CRYPTO_AES
 	select CRYPTO_BLKCIPHER
 	help
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 713de9d..3c6432dd 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -23,6 +23,7 @@
 obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
 obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
 obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
+obj-$(CONFIG_CRYPTO_DEV_MXC_SCC) += mxc-scc.o
 obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
 obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
 obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
diff --git a/drivers/crypto/amcc/Makefile b/drivers/crypto/amcc/Makefile
index 5c0c62b..b955399 100644
--- a/drivers/crypto/amcc/Makefile
+++ b/drivers/crypto/amcc/Makefile
@@ -1,2 +1,3 @@
 obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o
 crypto4xx-y :=  crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o
+crypto4xx-$(CONFIG_HW_RANDOM_PPC4XX) += crypto4xx_trng.o
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 62134c8..dae1e39 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -40,6 +40,7 @@
 #include "crypto4xx_reg_def.h"
 #include "crypto4xx_core.h"
 #include "crypto4xx_sa.h"
+#include "crypto4xx_trng.h"
 
 #define PPC4XX_SEC_VERSION_STR			"0.5"
 
@@ -1225,6 +1226,7 @@
 	if (rc)
 		goto err_start_dev;
 
+	ppc4xx_trng_probe(core_dev);
 	return 0;
 
 err_start_dev:
@@ -1252,6 +1254,8 @@
 	struct device *dev = &ofdev->dev;
 	struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
 
+	ppc4xx_trng_remove(core_dev);
+
 	free_irq(core_dev->irq, dev);
 	irq_dispose_mapping(core_dev->irq);
 
@@ -1272,7 +1276,7 @@
 
 static struct platform_driver crypto4xx_driver = {
 	.driver = {
-		.name = "crypto4xx",
+		.name = MODULE_NAME,
 		.of_match_table = crypto4xx_match,
 	},
 	.probe		= crypto4xx_probe,
@@ -1284,4 +1288,3 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
 MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");
-
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index bac0bde..ecfdcfe 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -24,6 +24,8 @@
 
 #include <crypto/internal/hash.h>
 
+#define MODULE_NAME "crypto4xx"
+
 #define PPC460SX_SDR0_SRST                      0x201
 #define PPC405EX_SDR0_SRST                      0x200
 #define PPC460EX_SDR0_SRST                      0x201
@@ -72,6 +74,7 @@
 	char *name;
 	u64  ce_phy_address;
 	void __iomem *ce_base;
+	void __iomem *trng_base;
 
 	void *pdr;			/* base address of packet
 					descriptor ring */
@@ -106,6 +109,7 @@
 	struct device *device;
 	struct platform_device *ofdev;
 	struct crypto4xx_device *dev;
+	struct hwrng *trng;
 	u32 int_status;
 	u32 irq;
 	struct tasklet_struct tasklet;
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h
index 5f5fbc0..46fe57c 100644
--- a/drivers/crypto/amcc/crypto4xx_reg_def.h
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -125,6 +125,7 @@
 #define PPC4XX_INTERRUPT_CLR			0x3ffff
 #define PPC4XX_PRNG_CTRL_AUTO_EN		0x3
 #define PPC4XX_DC_3DES_EN			1
+#define PPC4XX_TRNG_EN				0x00020000
 #define PPC4XX_INT_DESCR_CNT			4
 #define PPC4XX_INT_TIMEOUT_CNT			0
 #define PPC4XX_INT_CFG				1
diff --git a/drivers/crypto/amcc/crypto4xx_trng.c b/drivers/crypto/amcc/crypto4xx_trng.c
new file mode 100644
index 0000000..677ca17
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_trng.c
@@ -0,0 +1,131 @@
+/*
+ * Generic PowerPC 44x RNG driver
+ *
+ * Copyright 2011 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+
+#include "crypto4xx_core.h"
+#include "crypto4xx_trng.h"
+#include "crypto4xx_reg_def.h"
+
+#define PPC4XX_TRNG_CTRL	0x0008
+#define PPC4XX_TRNG_CTRL_DALM	0x20
+#define PPC4XX_TRNG_STAT	0x0004
+#define PPC4XX_TRNG_STAT_B	0x1
+#define PPC4XX_TRNG_DATA	0x0000
+
+static int ppc4xx_trng_data_present(struct hwrng *rng, int wait)
+{
+	struct crypto4xx_device *dev = (void *)rng->priv;
+	int busy, i, present = 0;
+
+	for (i = 0; i < 20; i++) {
+		busy = (in_le32(dev->trng_base + PPC4XX_TRNG_STAT) &
+			PPC4XX_TRNG_STAT_B);
+		if (!busy || !wait) {
+			present = 1;
+			break;
+		}
+		udelay(10);
+	}
+	return present;
+}
+
+static int ppc4xx_trng_data_read(struct hwrng *rng, u32 *data)
+{
+	struct crypto4xx_device *dev = (void *)rng->priv;
+	*data = in_le32(dev->trng_base + PPC4XX_TRNG_DATA);
+	return 4;
+}
+
+static void ppc4xx_trng_enable(struct crypto4xx_device *dev, bool enable)
+{
+	u32 device_ctrl;
+
+	device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
+	if (enable)
+		device_ctrl |= PPC4XX_TRNG_EN;
+	else
+		device_ctrl &= ~PPC4XX_TRNG_EN;
+	writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
+}
+
+static const struct of_device_id ppc4xx_trng_match[] = {
+	{ .compatible = "ppc4xx-rng", },
+	{ .compatible = "amcc,ppc460ex-rng", },
+	{ .compatible = "amcc,ppc440epx-rng", },
+	{},
+};
+
+void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev)
+{
+	struct crypto4xx_device *dev = core_dev->dev;
+	struct device_node *trng = NULL;
+	struct hwrng *rng = NULL;
+	int err;
+
+	/* Find the TRNG device node and map it */
+	trng = of_find_matching_node(NULL, ppc4xx_trng_match);
+	if (!trng || !of_device_is_available(trng))
+		return;
+
+	dev->trng_base = of_iomap(trng, 0);
+	of_node_put(trng);
+	if (!dev->trng_base)
+		goto err_out;
+
+	rng = kzalloc(sizeof(*rng), GFP_KERNEL);
+	if (!rng)
+		goto err_out;
+
+	rng->name = MODULE_NAME;
+	rng->data_present = ppc4xx_trng_data_present;
+	rng->data_read = ppc4xx_trng_data_read;
+	rng->priv = (unsigned long) dev;
+	core_dev->trng = rng;
+	ppc4xx_trng_enable(dev, true);
+	out_le32(dev->trng_base + PPC4XX_TRNG_CTRL, PPC4XX_TRNG_CTRL_DALM);
+	err = devm_hwrng_register(core_dev->device, core_dev->trng);
+	if (err) {
+		ppc4xx_trng_enable(dev, false);
+		dev_err(core_dev->device, "failed to register hwrng (%d).\n",
+			err);
+		goto err_out;
+	}
+	return;
+
+err_out:
+	of_node_put(trng);
+	iounmap(dev->trng_base);
+	kfree(rng);
+	dev->trng_base = NULL;
+	core_dev->trng = NULL;
+}
+
+void ppc4xx_trng_remove(struct crypto4xx_core_device *core_dev)
+{
+	if (core_dev && core_dev->trng) {
+		struct crypto4xx_device *dev = core_dev->dev;
+
+		devm_hwrng_unregister(core_dev->device, core_dev->trng);
+		ppc4xx_trng_enable(dev, false);
+		iounmap(dev->trng_base);
+		kfree(core_dev->trng);
+	}
+}
+
+MODULE_ALIAS("ppc4xx_rng");
diff --git a/drivers/crypto/amcc/crypto4xx_trng.h b/drivers/crypto/amcc/crypto4xx_trng.h
new file mode 100644
index 0000000..931d225
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_trng.h
@@ -0,0 +1,34 @@
+/**
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file defines the security context
+ * associate format.
+ */
+
+#ifndef __CRYPTO4XX_TRNG_H__
+#define __CRYPTO4XX_TRNG_H__
+
+#ifdef CONFIG_HW_RANDOM_PPC4XX
+void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev);
+void ppc4xx_trng_remove(struct crypto4xx_core_device *core_dev);
+#else
+static inline void ppc4xx_trng_probe(
+	struct crypto4xx_device *dev __maybe_unused) { }
+static inline void ppc4xx_trng_remove(
+	struct crypto4xx_device *dev __maybe_unused) { }
+#endif
+
+#endif
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 6fd63a6..5ef4be2 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -248,7 +248,7 @@
 struct device *caam_jr_alloc(void)
 {
 	struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
-	struct device *dev = NULL;
+	struct device *dev = ERR_PTR(-ENODEV);
 	int min_tfm_cnt	= INT_MAX;
 	int tfm_cnt;
 
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index 6e37845..2238f77 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -3,6 +3,8 @@
 	depends on CRYPTO_DEV_CCP
 	default m
 	select HW_RANDOM
+	select DMA_ENGINE
+	select DMADEVICES
 	select CRYPTO_SHA1
 	select CRYPTO_SHA256
 	help
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index b750592..ee4d274 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -1,5 +1,9 @@
 obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
-ccp-objs := ccp-dev.o ccp-ops.o ccp-dev-v3.o ccp-platform.o
+ccp-objs := ccp-dev.o \
+	    ccp-ops.o \
+	    ccp-dev-v3.o \
+	    ccp-platform.o \
+	    ccp-dmaengine.o
 ccp-$(CONFIG_PCI) += ccp-pci.o
 
 obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 3d9acc5..60fc0fa 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -225,6 +225,9 @@
 	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
 	struct ccp_aes_cmac_exp_ctx state;
 
+	/* Don't let anything leak to 'out' */
+	memset(&state, 0, sizeof(state));
+
 	state.null_msg = rctx->null_msg;
 	memcpy(state.iv, rctx->iv, sizeof(state.iv));
 	state.buf_count = rctx->buf_count;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index b5ad728..8f36af6 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -212,6 +212,9 @@
 	struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
 	struct ccp_sha_exp_ctx state;
 
+	/* Don't let anything leak to 'out' */
+	memset(&state, 0, sizeof(state));
+
 	state.type = rctx->type;
 	state.msg_bits = rctx->msg_bits;
 	state.first = rctx->first;
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 7d5eab4..d7a7103 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -406,6 +406,11 @@
 		goto e_kthread;
 	}
 
+	/* Register the DMA engine support */
+	ret = ccp_dmaengine_register(ccp);
+	if (ret)
+		goto e_hwrng;
+
 	ccp_add_device(ccp);
 
 	/* Enable interrupts */
@@ -413,6 +418,9 @@
 
 	return 0;
 
+e_hwrng:
+	hwrng_unregister(&ccp->hwrng);
+
 e_kthread:
 	for (i = 0; i < ccp->cmd_q_count; i++)
 		if (ccp->cmd_q[i].kthread)
@@ -436,6 +444,9 @@
 	/* Remove this device from the list of available units first */
 	ccp_del_device(ccp);
 
+	/* Unregister the DMA engine */
+	ccp_dmaengine_unregister(ccp);
+
 	/* Unregister the RNG */
 	hwrng_unregister(&ccp->hwrng);
 
@@ -515,7 +526,7 @@
 	return IRQ_HANDLED;
 }
 
-static struct ccp_actions ccp3_actions = {
+static const struct ccp_actions ccp3_actions = {
 	.perform_aes = ccp_perform_aes,
 	.perform_xts_aes = ccp_perform_xts_aes,
 	.perform_sha = ccp_perform_sha,
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 4dbc187..87b9f2b 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -16,7 +16,7 @@
 #include <linux/sched.h>
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
-#include <linux/rwlock_types.h>
+#include <linux/spinlock_types.h>
 #include <linux/types.h>
 #include <linux/mutex.h>
 #include <linux/delay.h>
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 7745d0b..bd41ffce 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -22,6 +22,9 @@
 #include <linux/dmapool.h>
 #include <linux/hw_random.h>
 #include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/dmaengine.h>
 
 #define MAX_CCP_NAME_LEN		16
 #define MAX_DMAPOOL_NAME_LEN		32
@@ -159,7 +162,7 @@
 /* Structure to hold CCP version-specific values */
 struct ccp_vdata {
 	unsigned int version;
-	struct ccp_actions *perform;
+	const struct ccp_actions *perform;
 };
 
 extern struct ccp_vdata ccpv3;
@@ -167,6 +170,39 @@
 struct ccp_device;
 struct ccp_cmd;
 
+struct ccp_dma_cmd {
+	struct list_head entry;
+
+	struct ccp_cmd ccp_cmd;
+};
+
+struct ccp_dma_desc {
+	struct list_head entry;
+
+	struct ccp_device *ccp;
+
+	struct list_head pending;
+	struct list_head active;
+
+	enum dma_status status;
+	struct dma_async_tx_descriptor tx_desc;
+	size_t len;
+};
+
+struct ccp_dma_chan {
+	struct ccp_device *ccp;
+
+	spinlock_t lock;
+	struct list_head pending;
+	struct list_head active;
+	struct list_head complete;
+
+	struct tasklet_struct cleanup_tasklet;
+
+	enum dma_status status;
+	struct dma_chan dma_chan;
+};
+
 struct ccp_cmd_queue {
 	struct ccp_device *ccp;
 
@@ -261,6 +297,14 @@
 	unsigned int hwrng_retries;
 
 	/*
+	 * Support for the CCP DMA capabilities
+	 */
+	struct dma_device dma_dev;
+	struct ccp_dma_chan *ccp_dma_chan;
+	struct kmem_cache *dma_cmd_cache;
+	struct kmem_cache *dma_desc_cache;
+
+	/*
 	 * A counter used to generate job-ids for cmds submitted to the CCP
 	 */
 	atomic_t current_id ____cacheline_aligned;
@@ -418,4 +462,7 @@
 
 int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd);
 
+int ccp_dmaengine_register(struct ccp_device *ccp);
+void ccp_dmaengine_unregister(struct ccp_device *ccp);
+
 #endif
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
new file mode 100644
index 0000000..94f77b0
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -0,0 +1,727 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Gary R Hook <gary.hook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/dmaengine.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/ccp.h>
+
+#include "ccp-dev.h"
+#include "../../dma/dmaengine.h"
+
+#define CCP_DMA_WIDTH(_mask)		\
+({					\
+	u64 mask = _mask + 1;		\
+	(mask == 0) ? 64 : fls64(mask);	\
+})
+
+static void ccp_free_cmd_resources(struct ccp_device *ccp,
+				   struct list_head *list)
+{
+	struct ccp_dma_cmd *cmd, *ctmp;
+
+	list_for_each_entry_safe(cmd, ctmp, list, entry) {
+		list_del(&cmd->entry);
+		kmem_cache_free(ccp->dma_cmd_cache, cmd);
+	}
+}
+
+static void ccp_free_desc_resources(struct ccp_device *ccp,
+				    struct list_head *list)
+{
+	struct ccp_dma_desc *desc, *dtmp;
+
+	list_for_each_entry_safe(desc, dtmp, list, entry) {
+		ccp_free_cmd_resources(ccp, &desc->active);
+		ccp_free_cmd_resources(ccp, &desc->pending);
+
+		list_del(&desc->entry);
+		kmem_cache_free(ccp->dma_desc_cache, desc);
+	}
+}
+
+static void ccp_free_chan_resources(struct dma_chan *dma_chan)
+{
+	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+						 dma_chan);
+	unsigned long flags;
+
+	dev_dbg(chan->ccp->dev, "%s - chan=%p\n", __func__, chan);
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	ccp_free_desc_resources(chan->ccp, &chan->complete);
+	ccp_free_desc_resources(chan->ccp, &chan->active);
+	ccp_free_desc_resources(chan->ccp, &chan->pending);
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static void ccp_cleanup_desc_resources(struct ccp_device *ccp,
+				       struct list_head *list)
+{
+	struct ccp_dma_desc *desc, *dtmp;
+
+	list_for_each_entry_safe_reverse(desc, dtmp, list, entry) {
+		if (!async_tx_test_ack(&desc->tx_desc))
+			continue;
+
+		dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
+
+		ccp_free_cmd_resources(ccp, &desc->active);
+		ccp_free_cmd_resources(ccp, &desc->pending);
+
+		list_del(&desc->entry);
+		kmem_cache_free(ccp->dma_desc_cache, desc);
+	}
+}
+
+static void ccp_do_cleanup(unsigned long data)
+{
+	struct ccp_dma_chan *chan = (struct ccp_dma_chan *)data;
+	unsigned long flags;
+
+	dev_dbg(chan->ccp->dev, "%s - chan=%s\n", __func__,
+		dma_chan_name(&chan->dma_chan));
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	ccp_cleanup_desc_resources(chan->ccp, &chan->complete);
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static int ccp_issue_next_cmd(struct ccp_dma_desc *desc)
+{
+	struct ccp_dma_cmd *cmd;
+	int ret;
+
+	cmd = list_first_entry(&desc->pending, struct ccp_dma_cmd, entry);
+	list_move(&cmd->entry, &desc->active);
+
+	dev_dbg(desc->ccp->dev, "%s - tx %d, cmd=%p\n", __func__,
+		desc->tx_desc.cookie, cmd);
+
+	ret = ccp_enqueue_cmd(&cmd->ccp_cmd);
+	if (!ret || (ret == -EINPROGRESS) || (ret == -EBUSY))
+		return 0;
+
+	dev_dbg(desc->ccp->dev, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__,
+		ret, desc->tx_desc.cookie, cmd);
+
+	return ret;
+}
+
+static void ccp_free_active_cmd(struct ccp_dma_desc *desc)
+{
+	struct ccp_dma_cmd *cmd;
+
+	cmd = list_first_entry_or_null(&desc->active, struct ccp_dma_cmd,
+				       entry);
+	if (!cmd)
+		return;
+
+	dev_dbg(desc->ccp->dev, "%s - freeing tx %d cmd=%p\n",
+		__func__, desc->tx_desc.cookie, cmd);
+
+	list_del(&cmd->entry);
+	kmem_cache_free(desc->ccp->dma_cmd_cache, cmd);
+}
+
+static struct ccp_dma_desc *__ccp_next_dma_desc(struct ccp_dma_chan *chan,
+						struct ccp_dma_desc *desc)
+{
+	/* Move current DMA descriptor to the complete list */
+	if (desc)
+		list_move(&desc->entry, &chan->complete);
+
+	/* Get the next DMA descriptor on the active list */
+	desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
+					entry);
+
+	return desc;
+}
+
+static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
+						   struct ccp_dma_desc *desc)
+{
+	struct dma_async_tx_descriptor *tx_desc;
+	unsigned long flags;
+
+	/* Loop over descriptors until one is found with commands */
+	do {
+		if (desc) {
+			/* Remove the DMA command from the list and free it */
+			ccp_free_active_cmd(desc);
+
+			if (!list_empty(&desc->pending)) {
+				/* No errors, keep going */
+				if (desc->status != DMA_ERROR)
+					return desc;
+
+				/* Error, free remaining commands and move on */
+				ccp_free_cmd_resources(desc->ccp,
+						       &desc->pending);
+			}
+
+			tx_desc = &desc->tx_desc;
+		} else {
+			tx_desc = NULL;
+		}
+
+		spin_lock_irqsave(&chan->lock, flags);
+
+		if (desc) {
+			if (desc->status != DMA_ERROR)
+				desc->status = DMA_COMPLETE;
+
+			dev_dbg(desc->ccp->dev,
+				"%s - tx %d complete, status=%u\n", __func__,
+				desc->tx_desc.cookie, desc->status);
+
+			dma_cookie_complete(tx_desc);
+		}
+
+		desc = __ccp_next_dma_desc(chan, desc);
+
+		spin_unlock_irqrestore(&chan->lock, flags);
+
+		if (tx_desc) {
+			if (tx_desc->callback &&
+			    (tx_desc->flags & DMA_PREP_INTERRUPT))
+				tx_desc->callback(tx_desc->callback_param);
+
+			dma_run_dependencies(tx_desc);
+		}
+	} while (desc);
+
+	return NULL;
+}
+
+static struct ccp_dma_desc *__ccp_pending_to_active(struct ccp_dma_chan *chan)
+{
+	struct ccp_dma_desc *desc;
+
+	if (list_empty(&chan->pending))
+		return NULL;
+
+	desc = list_empty(&chan->active)
+		? list_first_entry(&chan->pending, struct ccp_dma_desc, entry)
+		: NULL;
+
+	list_splice_tail_init(&chan->pending, &chan->active);
+
+	return desc;
+}
+
+static void ccp_cmd_callback(void *data, int err)
+{
+	struct ccp_dma_desc *desc = data;
+	struct ccp_dma_chan *chan;
+	int ret;
+
+	if (err == -EINPROGRESS)
+		return;
+
+	chan = container_of(desc->tx_desc.chan, struct ccp_dma_chan,
+			    dma_chan);
+
+	dev_dbg(chan->ccp->dev, "%s - tx %d callback, err=%d\n",
+		__func__, desc->tx_desc.cookie, err);
+
+	if (err)
+		desc->status = DMA_ERROR;
+
+	while (true) {
+		/* Check for DMA descriptor completion */
+		desc = ccp_handle_active_desc(chan, desc);
+
+		/* Don't submit cmd if no descriptor or DMA is paused */
+		if (!desc || (chan->status == DMA_PAUSED))
+			break;
+
+		ret = ccp_issue_next_cmd(desc);
+		if (!ret)
+			break;
+
+		desc->status = DMA_ERROR;
+	}
+
+	tasklet_schedule(&chan->cleanup_tasklet);
+}
+
+static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
+{
+	struct ccp_dma_desc *desc = container_of(tx_desc, struct ccp_dma_desc,
+						 tx_desc);
+	struct ccp_dma_chan *chan;
+	dma_cookie_t cookie;
+	unsigned long flags;
+
+	chan = container_of(tx_desc->chan, struct ccp_dma_chan, dma_chan);
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	cookie = dma_cookie_assign(tx_desc);
+	list_add_tail(&desc->entry, &chan->pending);
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	dev_dbg(chan->ccp->dev, "%s - added tx descriptor %d to pending list\n",
+		__func__, cookie);
+
+	return cookie;
+}
+
+static struct ccp_dma_cmd *ccp_alloc_dma_cmd(struct ccp_dma_chan *chan)
+{
+	struct ccp_dma_cmd *cmd;
+
+	cmd = kmem_cache_alloc(chan->ccp->dma_cmd_cache, GFP_NOWAIT);
+	if (cmd)
+		memset(cmd, 0, sizeof(*cmd));
+
+	return cmd;
+}
+
+static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
+					       unsigned long flags)
+{
+	struct ccp_dma_desc *desc;
+
+	desc = kmem_cache_alloc(chan->ccp->dma_desc_cache, GFP_NOWAIT);
+	if (!desc)
+		return NULL;
+
+	memset(desc, 0, sizeof(*desc));
+
+	dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan);
+	desc->tx_desc.flags = flags;
+	desc->tx_desc.tx_submit = ccp_tx_submit;
+	desc->ccp = chan->ccp;
+	INIT_LIST_HEAD(&desc->pending);
+	INIT_LIST_HEAD(&desc->active);
+	desc->status = DMA_IN_PROGRESS;
+
+	return desc;
+}
+
+static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
+					    struct scatterlist *dst_sg,
+					    unsigned int dst_nents,
+					    struct scatterlist *src_sg,
+					    unsigned int src_nents,
+					    unsigned long flags)
+{
+	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+						 dma_chan);
+	struct ccp_device *ccp = chan->ccp;
+	struct ccp_dma_desc *desc;
+	struct ccp_dma_cmd *cmd;
+	struct ccp_cmd *ccp_cmd;
+	struct ccp_passthru_nomap_engine *ccp_pt;
+	unsigned int src_offset, src_len;
+	unsigned int dst_offset, dst_len;
+	unsigned int len;
+	unsigned long sflags;
+	size_t total_len;
+
+	if (!dst_sg || !src_sg)
+		return NULL;
+
+	if (!dst_nents || !src_nents)
+		return NULL;
+
+	desc = ccp_alloc_dma_desc(chan, flags);
+	if (!desc)
+		return NULL;
+
+	total_len = 0;
+
+	src_len = sg_dma_len(src_sg);
+	src_offset = 0;
+
+	dst_len = sg_dma_len(dst_sg);
+	dst_offset = 0;
+
+	while (true) {
+		if (!src_len) {
+			src_nents--;
+			if (!src_nents)
+				break;
+
+			src_sg = sg_next(src_sg);
+			if (!src_sg)
+				break;
+
+			src_len = sg_dma_len(src_sg);
+			src_offset = 0;
+			continue;
+		}
+
+		if (!dst_len) {
+			dst_nents--;
+			if (!dst_nents)
+				break;
+
+			dst_sg = sg_next(dst_sg);
+			if (!dst_sg)
+				break;
+
+			dst_len = sg_dma_len(dst_sg);
+			dst_offset = 0;
+			continue;
+		}
+
+		len = min(dst_len, src_len);
+
+		cmd = ccp_alloc_dma_cmd(chan);
+		if (!cmd)
+			goto err;
+
+		ccp_cmd = &cmd->ccp_cmd;
+		ccp_pt = &ccp_cmd->u.passthru_nomap;
+		ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
+		ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
+		ccp_cmd->engine = CCP_ENGINE_PASSTHRU;
+		ccp_pt->bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+		ccp_pt->byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+		ccp_pt->src_dma = sg_dma_address(src_sg) + src_offset;
+		ccp_pt->dst_dma = sg_dma_address(dst_sg) + dst_offset;
+		ccp_pt->src_len = len;
+		ccp_pt->final = 1;
+		ccp_cmd->callback = ccp_cmd_callback;
+		ccp_cmd->data = desc;
+
+		list_add_tail(&cmd->entry, &desc->pending);
+
+		dev_dbg(ccp->dev,
+			"%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__,
+			cmd, &ccp_pt->src_dma,
+			&ccp_pt->dst_dma, ccp_pt->src_len);
+
+		total_len += len;
+
+		src_len -= len;
+		src_offset += len;
+
+		dst_len -= len;
+		dst_offset += len;
+	}
+
+	desc->len = total_len;
+
+	if (list_empty(&desc->pending))
+		goto err;
+
+	dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
+
+	spin_lock_irqsave(&chan->lock, sflags);
+
+	list_add_tail(&desc->entry, &chan->pending);
+
+	spin_unlock_irqrestore(&chan->lock, sflags);
+
+	return desc;
+
+err:
+	ccp_free_cmd_resources(ccp, &desc->pending);
+	kmem_cache_free(ccp->dma_desc_cache, desc);
+
+	return NULL;
+}
+
+static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
+	struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, size_t len,
+	unsigned long flags)
+{
+	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+						 dma_chan);
+	struct ccp_dma_desc *desc;
+	struct scatterlist dst_sg, src_sg;
+
+	dev_dbg(chan->ccp->dev,
+		"%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n",
+		__func__, &src, &dst, len, flags);
+
+	sg_init_table(&dst_sg, 1);
+	sg_dma_address(&dst_sg) = dst;
+	sg_dma_len(&dst_sg) = len;
+
+	sg_init_table(&src_sg, 1);
+	sg_dma_address(&src_sg) = src;
+	sg_dma_len(&src_sg) = len;
+
+	desc = ccp_create_desc(dma_chan, &dst_sg, 1, &src_sg, 1, flags);
+	if (!desc)
+		return NULL;
+
+	return &desc->tx_desc;
+}
+
+static struct dma_async_tx_descriptor *ccp_prep_dma_sg(
+	struct dma_chan *dma_chan, struct scatterlist *dst_sg,
+	unsigned int dst_nents, struct scatterlist *src_sg,
+	unsigned int src_nents, unsigned long flags)
+{
+	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+						 dma_chan);
+	struct ccp_dma_desc *desc;
+
+	dev_dbg(chan->ccp->dev,
+		"%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n",
+		__func__, src_sg, src_nents, dst_sg, dst_nents, flags);
+
+	desc = ccp_create_desc(dma_chan, dst_sg, dst_nents, src_sg, src_nents,
+			       flags);
+	if (!desc)
+		return NULL;
+
+	return &desc->tx_desc;
+}
+
+static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
+	struct dma_chan *dma_chan, unsigned long flags)
+{
+	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+						 dma_chan);
+	struct ccp_dma_desc *desc;
+
+	desc = ccp_alloc_dma_desc(chan, flags);
+	if (!desc)
+		return NULL;
+
+	return &desc->tx_desc;
+}
+
+static void ccp_issue_pending(struct dma_chan *dma_chan)
+{
+	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+						 dma_chan);
+	struct ccp_dma_desc *desc;
+	unsigned long flags;
+
+	dev_dbg(chan->ccp->dev, "%s\n", __func__);
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	desc = __ccp_pending_to_active(chan);
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	/* If there was nothing active, start processing */
+	if (desc)
+		ccp_cmd_callback(desc, 0);
+}
+
+static enum dma_status ccp_tx_status(struct dma_chan *dma_chan,
+				     dma_cookie_t cookie,
+				     struct dma_tx_state *state)
+{
+	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+						 dma_chan);
+	struct ccp_dma_desc *desc;
+	enum dma_status ret;
+	unsigned long flags;
+
+	if (chan->status == DMA_PAUSED) {
+		ret = DMA_PAUSED;
+		goto out;
+	}
+
+	ret = dma_cookie_status(dma_chan, cookie, state);
+	if (ret == DMA_COMPLETE) {
+		spin_lock_irqsave(&chan->lock, flags);
+
+		/* Get status from complete chain, if still there */
+		list_for_each_entry(desc, &chan->complete, entry) {
+			if (desc->tx_desc.cookie != cookie)
+				continue;
+
+			ret = desc->status;
+			break;
+		}
+
+		spin_unlock_irqrestore(&chan->lock, flags);
+	}
+
+out:
+	dev_dbg(chan->ccp->dev, "%s - %u\n", __func__, ret);
+
+	return ret;
+}
+
+static int ccp_pause(struct dma_chan *dma_chan)
+{
+	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+						 dma_chan);
+
+	chan->status = DMA_PAUSED;
+
+	/*TODO: Wait for active DMA to complete before returning? */
+
+	return 0;
+}
+
+static int ccp_resume(struct dma_chan *dma_chan)
+{
+	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+						 dma_chan);
+	struct ccp_dma_desc *desc;
+	unsigned long flags;
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
+					entry);
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	/* Indicate the channel is running again */
+	chan->status = DMA_IN_PROGRESS;
+
+	/* If there was something active, re-start */
+	if (desc)
+		ccp_cmd_callback(desc, 0);
+
+	return 0;
+}
+
+static int ccp_terminate_all(struct dma_chan *dma_chan)
+{
+	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+						 dma_chan);
+	unsigned long flags;
+
+	dev_dbg(chan->ccp->dev, "%s\n", __func__);
+
+	/*TODO: Wait for active DMA to complete before continuing */
+
+	spin_lock_irqsave(&chan->lock, flags);
+
+	/*TODO: Purge the complete list? */
+	ccp_free_desc_resources(chan->ccp, &chan->active);
+	ccp_free_desc_resources(chan->ccp, &chan->pending);
+
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	return 0;
+}
+
+int ccp_dmaengine_register(struct ccp_device *ccp)
+{
+	struct ccp_dma_chan *chan;
+	struct dma_device *dma_dev = &ccp->dma_dev;
+	struct dma_chan *dma_chan;
+	char *dma_cmd_cache_name;
+	char *dma_desc_cache_name;
+	unsigned int i;
+	int ret;
+
+	ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count,
+					 sizeof(*(ccp->ccp_dma_chan)),
+					 GFP_KERNEL);
+	if (!ccp->ccp_dma_chan)
+		return -ENOMEM;
+
+	dma_cmd_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
+					    "%s-dmaengine-cmd-cache",
+					    ccp->name);
+	if (!dma_cmd_cache_name)
+		return -ENOMEM;
+
+	ccp->dma_cmd_cache = kmem_cache_create(dma_cmd_cache_name,
+					       sizeof(struct ccp_dma_cmd),
+					       sizeof(void *),
+					       SLAB_HWCACHE_ALIGN, NULL);
+	if (!ccp->dma_cmd_cache)
+		return -ENOMEM;
+
+	dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
+					     "%s-dmaengine-desc-cache",
+					     ccp->name);
+	if (!dma_cmd_cache_name)
+		return -ENOMEM;
+	ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name,
+						sizeof(struct ccp_dma_desc),
+						sizeof(void *),
+						SLAB_HWCACHE_ALIGN, NULL);
+	if (!ccp->dma_desc_cache) {
+		ret = -ENOMEM;
+		goto err_cache;
+	}
+
+	dma_dev->dev = ccp->dev;
+	dma_dev->src_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
+	dma_dev->dst_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
+	dma_dev->directions = DMA_MEM_TO_MEM;
+	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+	dma_cap_set(DMA_SG, dma_dev->cap_mask);
+	dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
+
+	INIT_LIST_HEAD(&dma_dev->channels);
+	for (i = 0; i < ccp->cmd_q_count; i++) {
+		chan = ccp->ccp_dma_chan + i;
+		dma_chan = &chan->dma_chan;
+
+		chan->ccp = ccp;
+
+		spin_lock_init(&chan->lock);
+		INIT_LIST_HEAD(&chan->pending);
+		INIT_LIST_HEAD(&chan->active);
+		INIT_LIST_HEAD(&chan->complete);
+
+		tasklet_init(&chan->cleanup_tasklet, ccp_do_cleanup,
+			     (unsigned long)chan);
+
+		dma_chan->device = dma_dev;
+		dma_cookie_init(dma_chan);
+
+		list_add_tail(&dma_chan->device_node, &dma_dev->channels);
+	}
+
+	dma_dev->device_free_chan_resources = ccp_free_chan_resources;
+	dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
+	dma_dev->device_prep_dma_sg = ccp_prep_dma_sg;
+	dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
+	dma_dev->device_issue_pending = ccp_issue_pending;
+	dma_dev->device_tx_status = ccp_tx_status;
+	dma_dev->device_pause = ccp_pause;
+	dma_dev->device_resume = ccp_resume;
+	dma_dev->device_terminate_all = ccp_terminate_all;
+
+	ret = dma_async_device_register(dma_dev);
+	if (ret)
+		goto err_reg;
+
+	return 0;
+
+err_reg:
+	kmem_cache_destroy(ccp->dma_desc_cache);
+
+err_cache:
+	kmem_cache_destroy(ccp->dma_cmd_cache);
+
+	return ret;
+}
+
+void ccp_dmaengine_unregister(struct ccp_device *ccp)
+{
+	struct dma_device *dma_dev = &ccp->dma_dev;
+
+	dma_async_device_unregister(dma_dev);
+
+	kmem_cache_destroy(ccp->dma_desc_cache);
+	kmem_cache_destroy(ccp->dma_cmd_cache);
+}
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index eefdf59..ffa2891 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1427,6 +1427,70 @@
 	return ret;
 }
 
+static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
+				      struct ccp_cmd *cmd)
+{
+	struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
+	struct ccp_dm_workarea mask;
+	struct ccp_op op;
+	int ret;
+
+	if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
+		return -EINVAL;
+
+	if (!pt->src_dma || !pt->dst_dma)
+		return -EINVAL;
+
+	if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
+		if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
+			return -EINVAL;
+		if (!pt->mask)
+			return -EINVAL;
+	}
+
+	BUILD_BUG_ON(CCP_PASSTHRU_KSB_COUNT != 1);
+
+	memset(&op, 0, sizeof(op));
+	op.cmd_q = cmd_q;
+	op.jobid = ccp_gen_jobid(cmd_q->ccp);
+
+	if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
+		/* Load the mask */
+		op.ksb_key = cmd_q->ksb_key;
+
+		mask.length = pt->mask_len;
+		mask.dma.address = pt->mask;
+		mask.dma.length = pt->mask_len;
+
+		ret = ccp_copy_to_ksb(cmd_q, &mask, op.jobid, op.ksb_key,
+				     CCP_PASSTHRU_BYTESWAP_NOOP);
+		if (ret) {
+			cmd->engine_error = cmd_q->cmd_error;
+			return ret;
+		}
+	}
+
+	/* Send data to the CCP Passthru engine */
+	op.eom = 1;
+	op.soc = 1;
+
+	op.src.type = CCP_MEMTYPE_SYSTEM;
+	op.src.u.dma.address = pt->src_dma;
+	op.src.u.dma.offset = 0;
+	op.src.u.dma.length = pt->src_len;
+
+	op.dst.type = CCP_MEMTYPE_SYSTEM;
+	op.dst.u.dma.address = pt->dst_dma;
+	op.dst.u.dma.offset = 0;
+	op.dst.u.dma.length = pt->src_len;
+
+	ret = cmd_q->ccp->vdata->perform->perform_passthru(&op);
+	if (ret)
+		cmd->engine_error = cmd_q->cmd_error;
+
+	return ret;
+}
+
 static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 {
 	struct ccp_ecc_engine *ecc = &cmd->u.ecc;
@@ -1762,7 +1826,10 @@
 		ret = ccp_run_rsa_cmd(cmd_q, cmd);
 		break;
 	case CCP_ENGINE_PASSTHRU:
-		ret = ccp_run_passthru_cmd(cmd_q, cmd);
+		if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP)
+			ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd);
+		else
+			ret = ccp_run_passthru_cmd(cmd_q, cmd);
 		break;
 	case CCP_ENGINE_ECC:
 		ret = ccp_run_ecc_cmd(cmd_q, cmd);
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 80239ae..e8ef9fd 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -475,18 +475,18 @@
 		engine->regs = cesa->regs + CESA_ENGINE_OFF(i);
 
 		if (dram && cesa->caps->has_tdma)
-			mv_cesa_conf_mbus_windows(&cesa->engines[i], dram);
+			mv_cesa_conf_mbus_windows(engine, dram);
 
-		writel(0, cesa->engines[i].regs + CESA_SA_INT_STATUS);
+		writel(0, engine->regs + CESA_SA_INT_STATUS);
 		writel(CESA_SA_CFG_STOP_DIG_ERR,
-		       cesa->engines[i].regs + CESA_SA_CFG);
+		       engine->regs + CESA_SA_CFG);
 		writel(engine->sram_dma & CESA_SA_SRAM_MSK,
-		       cesa->engines[i].regs + CESA_SA_DESC_P0);
+		       engine->regs + CESA_SA_DESC_P0);
 
 		ret = devm_request_threaded_irq(dev, irq, NULL, mv_cesa_int,
 						IRQF_ONESHOT,
 						dev_name(&pdev->dev),
-						&cesa->engines[i]);
+						engine);
 		if (ret)
 			goto err_cleanup;
 	}
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 7ca2e0f..7a5058d 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -768,8 +768,7 @@
 	*len = creq->len;
 	memcpy(hash, creq->state, digsize);
 	memset(cache, 0, blocksize);
-	if (creq->cache)
-		memcpy(cache, creq->cache, creq->cache_ptr);
+	memcpy(cache, creq->cache, creq->cache_ptr);
 
 	return 0;
 }
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
index 7642798..0ad8f1e 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/tdma.c
@@ -99,12 +99,11 @@
 	struct mv_cesa_tdma_desc *new_tdma = NULL;
 	dma_addr_t dma_handle;
 
-	new_tdma = dma_pool_alloc(cesa_dev->dma->tdma_desc_pool, flags,
-				  &dma_handle);
+	new_tdma = dma_pool_zalloc(cesa_dev->dma->tdma_desc_pool, flags,
+				   &dma_handle);
 	if (!new_tdma)
 		return ERR_PTR(-ENOMEM);
 
-	memset(new_tdma, 0, sizeof(*new_tdma));
 	new_tdma->cur_dma = dma_handle;
 	if (chain->last) {
 		chain->last->next_dma = cpu_to_le32(dma_handle);
diff --git a/drivers/crypto/mxc-scc.c b/drivers/crypto/mxc-scc.c
new file mode 100644
index 0000000..ff383ef
--- /dev/null
+++ b/drivers/crypto/mxc-scc.c
@@ -0,0 +1,765 @@
+/*
+ * Copyright (C) 2016 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
+ *
+ * The driver is based on information gathered from
+ * drivers/mxc/security/mxc_scc.c which can be found in
+ * the Freescale linux-2.6-imx.git in the imx_2.6.35_maintain branch.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <crypto/algapi.h>
+#include <crypto/des.h>
+
+/* Secure Memory (SCM) registers */
+#define SCC_SCM_RED_START		0x0000
+#define SCC_SCM_BLACK_START		0x0004
+#define SCC_SCM_LENGTH			0x0008
+#define SCC_SCM_CTRL			0x000C
+#define SCC_SCM_STATUS			0x0010
+#define SCC_SCM_ERROR_STATUS		0x0014
+#define SCC_SCM_INTR_CTRL		0x0018
+#define SCC_SCM_CFG			0x001C
+#define SCC_SCM_INIT_VECTOR_0		0x0020
+#define SCC_SCM_INIT_VECTOR_1		0x0024
+#define SCC_SCM_RED_MEMORY		0x0400
+#define SCC_SCM_BLACK_MEMORY		0x0800
+
+/* Security Monitor (SMN) Registers */
+#define SCC_SMN_STATUS			0x1000
+#define SCC_SMN_COMMAND		0x1004
+#define SCC_SMN_SEQ_START		0x1008
+#define SCC_SMN_SEQ_END		0x100C
+#define SCC_SMN_SEQ_CHECK		0x1010
+#define SCC_SMN_BIT_COUNT		0x1014
+#define SCC_SMN_BITBANK_INC_SIZE	0x1018
+#define SCC_SMN_BITBANK_DECREMENT	0x101C
+#define SCC_SMN_COMPARE_SIZE		0x1020
+#define SCC_SMN_PLAINTEXT_CHECK	0x1024
+#define SCC_SMN_CIPHERTEXT_CHECK	0x1028
+#define SCC_SMN_TIMER_IV		0x102C
+#define SCC_SMN_TIMER_CONTROL		0x1030
+#define SCC_SMN_DEBUG_DETECT_STAT	0x1034
+#define SCC_SMN_TIMER			0x1038
+
+#define SCC_SCM_CTRL_START_CIPHER	BIT(2)
+#define SCC_SCM_CTRL_CBC_MODE		BIT(1)
+#define SCC_SCM_CTRL_DECRYPT_MODE	BIT(0)
+
+#define SCC_SCM_STATUS_LEN_ERR		BIT(12)
+#define SCC_SCM_STATUS_SMN_UNBLOCKED	BIT(11)
+#define SCC_SCM_STATUS_CIPHERING_DONE	BIT(10)
+#define SCC_SCM_STATUS_ZEROIZING_DONE	BIT(9)
+#define SCC_SCM_STATUS_INTR_STATUS	BIT(8)
+#define SCC_SCM_STATUS_SEC_KEY		BIT(7)
+#define SCC_SCM_STATUS_INTERNAL_ERR	BIT(6)
+#define SCC_SCM_STATUS_BAD_SEC_KEY	BIT(5)
+#define SCC_SCM_STATUS_ZEROIZE_FAIL	BIT(4)
+#define SCC_SCM_STATUS_SMN_BLOCKED	BIT(3)
+#define SCC_SCM_STATUS_CIPHERING	BIT(2)
+#define SCC_SCM_STATUS_ZEROIZING	BIT(1)
+#define SCC_SCM_STATUS_BUSY		BIT(0)
+
+#define SCC_SMN_STATUS_STATE_MASK	0x0000001F
+#define SCC_SMN_STATE_START		0x0
+/* The SMN is zeroizing its RAM during reset */
+#define SCC_SMN_STATE_ZEROIZE_RAM	0x5
+/* SMN has passed internal checks */
+#define SCC_SMN_STATE_HEALTH_CHECK	0x6
+/* Fatal Security Violation. SMN is locked, SCM is inoperative. */
+#define SCC_SMN_STATE_FAIL		0x9
+/* SCC is in secure state. SCM is using secret key. */
+#define SCC_SMN_STATE_SECURE		0xA
+/* SCC is not secure. SCM is using default key. */
+#define SCC_SMN_STATE_NON_SECURE	0xC
+
+#define SCC_SCM_INTR_CTRL_ZEROIZE_MEM	BIT(2)
+#define SCC_SCM_INTR_CTRL_CLR_INTR	BIT(1)
+#define SCC_SCM_INTR_CTRL_MASK_INTR	BIT(0)
+
+/* Size, in blocks, of Red memory. */
+#define SCC_SCM_CFG_BLACK_SIZE_MASK	0x07fe0000
+#define SCC_SCM_CFG_BLACK_SIZE_SHIFT	17
+/* Size, in blocks, of Black memory. */
+#define SCC_SCM_CFG_RED_SIZE_MASK	0x0001ff80
+#define SCC_SCM_CFG_RED_SIZE_SHIFT	7
+/* Number of bytes per block. */
+#define SCC_SCM_CFG_BLOCK_SIZE_MASK	0x0000007f
+
+#define SCC_SMN_COMMAND_TAMPER_LOCK	BIT(4)
+#define SCC_SMN_COMMAND_CLR_INTR	BIT(3)
+#define SCC_SMN_COMMAND_CLR_BIT_BANK	BIT(2)
+#define SCC_SMN_COMMAND_EN_INTR	BIT(1)
+#define SCC_SMN_COMMAND_SET_SOFTWARE_ALARM  BIT(0)
+
+#define SCC_KEY_SLOTS			20
+#define SCC_MAX_KEY_SIZE		32
+#define SCC_KEY_SLOT_SIZE		32
+
+#define SCC_CRC_CCITT_START		0xFFFF
+
+/*
+ * Offset into each RAM of the base of the area which is not
+ * used for Stored Keys.
+ */
+#define SCC_NON_RESERVED_OFFSET	(SCC_KEY_SLOTS * SCC_KEY_SLOT_SIZE)
+
+/* Fixed padding for appending to plaintext to fill out a block */
+static char scc_block_padding[8] = { 0x80, 0, 0, 0, 0, 0, 0, 0 };
+
+enum mxc_scc_state {
+	SCC_STATE_OK,
+	SCC_STATE_UNIMPLEMENTED,
+	SCC_STATE_FAILED
+};
+
+struct mxc_scc {
+	struct device		*dev;
+	void __iomem		*base;
+	struct clk		*clk;
+	bool			hw_busy;
+	spinlock_t		lock;
+	struct crypto_queue	queue;
+	struct crypto_async_request *req;
+	int			block_size_bytes;
+	int			black_ram_size_blocks;
+	int			memory_size_bytes;
+	int			bytes_remaining;
+
+	void __iomem		*red_memory;
+	void __iomem		*black_memory;
+};
+
+struct mxc_scc_ctx {
+	struct mxc_scc		*scc;
+	struct scatterlist	*sg_src;
+	size_t			src_nents;
+	struct scatterlist	*sg_dst;
+	size_t			dst_nents;
+	unsigned int		offset;
+	unsigned int		size;
+	unsigned int		ctrl;
+};
+
+struct mxc_scc_crypto_tmpl {
+	struct mxc_scc *scc;
+	struct crypto_alg alg;
+};
+
+static int mxc_scc_get_data(struct mxc_scc_ctx *ctx,
+			    struct crypto_async_request *req)
+{
+	struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
+	struct mxc_scc *scc = ctx->scc;
+	size_t len;
+	void __iomem *from;
+
+	if (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE)
+		from = scc->red_memory;
+	else
+		from = scc->black_memory;
+
+	dev_dbg(scc->dev, "pcopy: from 0x%p %d bytes\n", from,
+		ctx->dst_nents * 8);
+	len = sg_pcopy_from_buffer(ablkreq->dst, ctx->dst_nents,
+				   from, ctx->size, ctx->offset);
+	if (!len) {
+		dev_err(scc->dev, "pcopy err from 0x%p (len=%d)\n", from, len);
+		return -EINVAL;
+	}
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "red memory@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4,
+		       scc->red_memory, ctx->size, 1);
+	print_hex_dump(KERN_ERR,
+		       "black memory@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4,
+		       scc->black_memory, ctx->size, 1);
+#endif
+
+	ctx->offset += len;
+
+	if (ctx->offset < ablkreq->nbytes)
+		return -EINPROGRESS;
+
+	return 0;
+}
+
+static int mxc_scc_ablkcipher_req_init(struct ablkcipher_request *req,
+				       struct mxc_scc_ctx *ctx)
+{
+	struct mxc_scc *scc = ctx->scc;
+	int nents;
+
+	nents = sg_nents_for_len(req->src, req->nbytes);
+	if (nents < 0) {
+		dev_err(scc->dev, "Invalid number of src SC");
+		return nents;
+	}
+	ctx->src_nents = nents;
+
+	nents = sg_nents_for_len(req->dst, req->nbytes);
+	if (nents < 0) {
+		dev_err(scc->dev, "Invalid number of dst SC");
+		return nents;
+	}
+	ctx->dst_nents = nents;
+
+	ctx->size = 0;
+	ctx->offset = 0;
+
+	return 0;
+}
+
+static int mxc_scc_ablkcipher_req_complete(struct crypto_async_request *req,
+					   struct mxc_scc_ctx *ctx,
+					   int result)
+{
+	struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
+	struct mxc_scc *scc = ctx->scc;
+
+	scc->req = NULL;
+	scc->bytes_remaining = scc->memory_size_bytes;
+
+	if (ctx->ctrl & SCC_SCM_CTRL_CBC_MODE)
+		memcpy(ablkreq->info, scc->base + SCC_SCM_INIT_VECTOR_0,
+		       scc->block_size_bytes);
+
+	req->complete(req, result);
+	scc->hw_busy = false;
+
+	return 0;
+}
+
+static int mxc_scc_put_data(struct mxc_scc_ctx *ctx,
+			     struct ablkcipher_request *req)
+{
+	u8 padding_buffer[sizeof(u16) + sizeof(scc_block_padding)];
+	size_t len = min_t(size_t, req->nbytes - ctx->offset,
+			   ctx->scc->bytes_remaining);
+	unsigned int padding_byte_count = 0;
+	struct mxc_scc *scc = ctx->scc;
+	void __iomem *to;
+
+	if (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE)
+		to = scc->black_memory;
+	else
+		to = scc->red_memory;
+
+	if (ctx->ctrl & SCC_SCM_CTRL_CBC_MODE && req->info)
+		memcpy(scc->base + SCC_SCM_INIT_VECTOR_0, req->info,
+		       scc->block_size_bytes);
+
+	len = sg_pcopy_to_buffer(req->src, ctx->src_nents,
+				 to, len, ctx->offset);
+	if (!len) {
+		dev_err(scc->dev, "pcopy err to 0x%p (len=%d)\n", to, len);
+		return -EINVAL;
+	}
+
+	ctx->size = len;
+
+#ifdef DEBUG
+	dev_dbg(scc->dev, "copied %d bytes to 0x%p\n", len, to);
+	print_hex_dump(KERN_ERR,
+		       "init vector0@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4,
+		       scc->base + SCC_SCM_INIT_VECTOR_0, scc->block_size_bytes,
+		       1);
+	print_hex_dump(KERN_ERR,
+		       "red memory@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4,
+		       scc->red_memory, ctx->size, 1);
+	print_hex_dump(KERN_ERR,
+		       "black memory@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4,
+		       scc->black_memory, ctx->size, 1);
+#endif
+
+	scc->bytes_remaining -= len;
+
+	padding_byte_count = len % scc->block_size_bytes;
+
+	if (padding_byte_count) {
+		memcpy(padding_buffer, scc_block_padding, padding_byte_count);
+		memcpy(to + len, padding_buffer, padding_byte_count);
+		ctx->size += padding_byte_count;
+	}
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "data to encrypt@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4,
+		       to, ctx->size, 1);
+#endif
+
+	return 0;
+}
+
+static void mxc_scc_ablkcipher_next(struct mxc_scc_ctx *ctx,
+				    struct crypto_async_request *req)
+{
+	struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
+	struct mxc_scc *scc = ctx->scc;
+	int err;
+
+	dev_dbg(scc->dev, "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
+		ablkreq->nbytes, ablkreq->src, ablkreq->dst);
+
+	writel(0, scc->base + SCC_SCM_ERROR_STATUS);
+
+	err = mxc_scc_put_data(ctx, ablkreq);
+	if (err) {
+		mxc_scc_ablkcipher_req_complete(req, ctx, err);
+		return;
+	}
+
+	dev_dbg(scc->dev, "Start encryption (0x%p/0x%p)\n",
+		(void *)readl(scc->base + SCC_SCM_RED_START),
+		(void *)readl(scc->base + SCC_SCM_BLACK_START));
+
+	/* clear interrupt control registers */
+	writel(SCC_SCM_INTR_CTRL_CLR_INTR,
+	       scc->base + SCC_SCM_INTR_CTRL);
+
+	writel((ctx->size / ctx->scc->block_size_bytes) - 1,
+	       scc->base + SCC_SCM_LENGTH);
+
+	dev_dbg(scc->dev, "Process %d block(s) in 0x%p\n",
+		ctx->size / ctx->scc->block_size_bytes,
+		(ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE) ? scc->black_memory :
+		scc->red_memory);
+
+	writel(ctx->ctrl, scc->base + SCC_SCM_CTRL);
+}
+
+static irqreturn_t mxc_scc_int(int irq, void *priv)
+{
+	struct crypto_async_request *req;
+	struct mxc_scc_ctx *ctx;
+	struct mxc_scc *scc = priv;
+	int status;
+	int ret;
+
+	status = readl(scc->base + SCC_SCM_STATUS);
+
+	/* clear interrupt control registers */
+	writel(SCC_SCM_INTR_CTRL_CLR_INTR, scc->base + SCC_SCM_INTR_CTRL);
+
+	if (status & SCC_SCM_STATUS_BUSY)
+		return IRQ_NONE;
+
+	req = scc->req;
+	if (req) {
+		ctx = crypto_tfm_ctx(req->tfm);
+		ret = mxc_scc_get_data(ctx, req);
+		if (ret != -EINPROGRESS)
+			mxc_scc_ablkcipher_req_complete(req, ctx, ret);
+		else
+			mxc_scc_ablkcipher_next(ctx, req);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int mxc_scc_cra_init(struct crypto_tfm *tfm)
+{
+	struct mxc_scc_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct mxc_scc_crypto_tmpl *algt;
+
+	algt = container_of(alg, struct mxc_scc_crypto_tmpl, alg);
+
+	ctx->scc = algt->scc;
+	return 0;
+}
+
+static void mxc_scc_dequeue_req_unlocked(struct mxc_scc_ctx *ctx)
+{
+	struct crypto_async_request *req, *backlog;
+
+	if (ctx->scc->hw_busy)
+		return;
+
+	spin_lock_bh(&ctx->scc->lock);
+	backlog = crypto_get_backlog(&ctx->scc->queue);
+	req = crypto_dequeue_request(&ctx->scc->queue);
+	ctx->scc->req = req;
+	ctx->scc->hw_busy = true;
+	spin_unlock_bh(&ctx->scc->lock);
+
+	if (!req)
+		return;
+
+	if (backlog)
+		backlog->complete(backlog, -EINPROGRESS);
+
+	mxc_scc_ablkcipher_next(ctx, req);
+}
+
+static int mxc_scc_queue_req(struct mxc_scc_ctx *ctx,
+			     struct crypto_async_request *req)
+{
+	int ret;
+
+	spin_lock_bh(&ctx->scc->lock);
+	ret = crypto_enqueue_request(&ctx->scc->queue, req);
+	spin_unlock_bh(&ctx->scc->lock);
+
+	if (ret != -EINPROGRESS)
+		return ret;
+
+	mxc_scc_dequeue_req_unlocked(ctx);
+
+	return -EINPROGRESS;
+}
+
+static int mxc_scc_des3_op(struct mxc_scc_ctx *ctx,
+			   struct ablkcipher_request *req)
+{
+	int err;
+
+	err = mxc_scc_ablkcipher_req_init(req, ctx);
+	if (err)
+		return err;
+
+	return mxc_scc_queue_req(ctx, &req->base);
+}
+
+static int mxc_scc_ecb_des_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
+	struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+	ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
+
+	return mxc_scc_des3_op(ctx, req);
+}
+
+static int mxc_scc_ecb_des_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
+	struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+	ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
+	ctx->ctrl |= SCC_SCM_CTRL_DECRYPT_MODE;
+
+	return mxc_scc_des3_op(ctx, req);
+}
+
+static int mxc_scc_cbc_des_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
+	struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+	ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
+	ctx->ctrl |= SCC_SCM_CTRL_CBC_MODE;
+
+	return mxc_scc_des3_op(ctx, req);
+}
+
+static int mxc_scc_cbc_des_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
+	struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+	ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
+	ctx->ctrl |= SCC_SCM_CTRL_CBC_MODE;
+	ctx->ctrl |= SCC_SCM_CTRL_DECRYPT_MODE;
+
+	return mxc_scc_des3_op(ctx, req);
+}
+
+static void mxc_scc_hw_init(struct mxc_scc *scc)
+{
+	int offset;
+
+	offset = SCC_NON_RESERVED_OFFSET / scc->block_size_bytes;
+
+	/* Fill the RED_START register */
+	writel(offset, scc->base + SCC_SCM_RED_START);
+
+	/* Fill the BLACK_START register */
+	writel(offset, scc->base + SCC_SCM_BLACK_START);
+
+	scc->red_memory = scc->base + SCC_SCM_RED_MEMORY +
+			  SCC_NON_RESERVED_OFFSET;
+
+	scc->black_memory = scc->base + SCC_SCM_BLACK_MEMORY +
+			    SCC_NON_RESERVED_OFFSET;
+
+	scc->bytes_remaining = scc->memory_size_bytes;
+}
+
+static int mxc_scc_get_config(struct mxc_scc *scc)
+{
+	int config;
+
+	config = readl(scc->base + SCC_SCM_CFG);
+
+	scc->block_size_bytes = config & SCC_SCM_CFG_BLOCK_SIZE_MASK;
+
+	scc->black_ram_size_blocks = config & SCC_SCM_CFG_BLACK_SIZE_MASK;
+
+	scc->memory_size_bytes = (scc->block_size_bytes *
+				  scc->black_ram_size_blocks) -
+				  SCC_NON_RESERVED_OFFSET;
+
+	return 0;
+}
+
+static enum mxc_scc_state mxc_scc_get_state(struct mxc_scc *scc)
+{
+	enum mxc_scc_state state;
+	int status;
+
+	status = readl(scc->base + SCC_SMN_STATUS) &
+		       SCC_SMN_STATUS_STATE_MASK;
+
+	/* If in Health Check, try to bringup to secure state */
+	if (status & SCC_SMN_STATE_HEALTH_CHECK) {
+		/*
+		 * Write a simple algorithm to the Algorithm Sequence
+		 * Checker (ASC)
+		 */
+		writel(0xaaaa, scc->base + SCC_SMN_SEQ_START);
+		writel(0x5555, scc->base + SCC_SMN_SEQ_END);
+		writel(0x5555, scc->base + SCC_SMN_SEQ_CHECK);
+
+		status = readl(scc->base + SCC_SMN_STATUS) &
+			       SCC_SMN_STATUS_STATE_MASK;
+	}
+
+	switch (status) {
+	case SCC_SMN_STATE_NON_SECURE:
+	case SCC_SMN_STATE_SECURE:
+		state = SCC_STATE_OK;
+		break;
+	case SCC_SMN_STATE_FAIL:
+		state = SCC_STATE_FAILED;
+		break;
+	default:
+		state = SCC_STATE_UNIMPLEMENTED;
+		break;
+	}
+
+	return state;
+}
+
+static struct mxc_scc_crypto_tmpl scc_ecb_des = {
+	.alg = {
+		.cra_name = "ecb(des3_ede)",
+		.cra_driver_name = "ecb-des3-scc",
+		.cra_priority = 300,
+		.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct mxc_scc_ctx),
+		.cra_alignmask = 0,
+		.cra_type = &crypto_ablkcipher_type,
+		.cra_module = THIS_MODULE,
+		.cra_init = mxc_scc_cra_init,
+		.cra_u.ablkcipher = {
+			.min_keysize = DES3_EDE_KEY_SIZE,
+			.max_keysize = DES3_EDE_KEY_SIZE,
+			.encrypt = mxc_scc_ecb_des_encrypt,
+			.decrypt = mxc_scc_ecb_des_decrypt,
+		}
+	}
+};
+
+static struct mxc_scc_crypto_tmpl scc_cbc_des = {
+	.alg = {
+		.cra_name = "cbc(des3_ede)",
+		.cra_driver_name = "cbc-des3-scc",
+		.cra_priority = 300,
+		.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct mxc_scc_ctx),
+		.cra_alignmask = 0,
+		.cra_type = &crypto_ablkcipher_type,
+		.cra_module = THIS_MODULE,
+		.cra_init = mxc_scc_cra_init,
+		.cra_u.ablkcipher = {
+			.min_keysize = DES3_EDE_KEY_SIZE,
+			.max_keysize = DES3_EDE_KEY_SIZE,
+			.encrypt = mxc_scc_cbc_des_encrypt,
+			.decrypt = mxc_scc_cbc_des_decrypt,
+		}
+	}
+};
+
+static struct mxc_scc_crypto_tmpl *scc_crypto_algs[] = {
+	&scc_ecb_des,
+	&scc_cbc_des,
+};
+
+static int mxc_scc_crypto_register(struct mxc_scc *scc)
+{
+	int i;
+	int err = 0;
+
+	for (i = 0; i < ARRAY_SIZE(scc_crypto_algs); i++) {
+		scc_crypto_algs[i]->scc = scc;
+		err = crypto_register_alg(&scc_crypto_algs[i]->alg);
+		if (err)
+			goto err_out;
+	}
+
+	return 0;
+
+err_out:
+	while (--i >= 0)
+		crypto_unregister_alg(&scc_crypto_algs[i]->alg);
+
+	return err;
+}
+
+static void mxc_scc_crypto_unregister(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(scc_crypto_algs); i++)
+		crypto_unregister_alg(&scc_crypto_algs[i]->alg);
+}
+
+static int mxc_scc_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	struct mxc_scc *scc;
+	enum mxc_scc_state state;
+	int irq;
+	int ret;
+	int i;
+
+	scc = devm_kzalloc(dev, sizeof(*scc), GFP_KERNEL);
+	if (!scc)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	scc->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(scc->base))
+		return PTR_ERR(scc->base);
+
+	scc->clk = devm_clk_get(&pdev->dev, "ipg");
+	if (IS_ERR(scc->clk)) {
+		dev_err(dev, "Could not get ipg clock\n");
+		return PTR_ERR(scc->clk);
+	}
+
+	clk_prepare_enable(scc->clk);
+
+	/* clear error status register */
+	writel(0x0, scc->base + SCC_SCM_ERROR_STATUS);
+
+	/* clear interrupt control registers */
+	writel(SCC_SCM_INTR_CTRL_CLR_INTR |
+	       SCC_SCM_INTR_CTRL_MASK_INTR,
+	       scc->base + SCC_SCM_INTR_CTRL);
+
+	writel(SCC_SMN_COMMAND_CLR_INTR |
+	       SCC_SMN_COMMAND_EN_INTR,
+	       scc->base + SCC_SMN_COMMAND);
+
+	scc->dev = dev;
+	platform_set_drvdata(pdev, scc);
+
+	ret = mxc_scc_get_config(scc);
+	if (ret)
+		goto err_out;
+
+	state = mxc_scc_get_state(scc);
+
+	if (state != SCC_STATE_OK) {
+		dev_err(dev, "SCC in unusable state %d\n", state);
+		ret = -EINVAL;
+		goto err_out;
+	}
+
+	mxc_scc_hw_init(scc);
+
+	spin_lock_init(&scc->lock);
+	/* FIXME: calculate queue from RAM slots */
+	crypto_init_queue(&scc->queue, 50);
+
+	for (i = 0; i < 2; i++) {
+		irq = platform_get_irq(pdev, i);
+		if (irq < 0) {
+			dev_err(dev, "failed to get irq resource\n");
+			ret = -EINVAL;
+			goto err_out;
+		}
+
+		ret = devm_request_threaded_irq(dev, irq, NULL, mxc_scc_int,
+						IRQF_ONESHOT, dev_name(dev), scc);
+		if (ret)
+			goto err_out;
+	}
+
+	ret = mxc_scc_crypto_register(scc);
+	if (ret) {
+		dev_err(dev, "could not register algorithms");
+		goto err_out;
+	}
+
+	dev_info(dev, "registered successfully.\n");
+
+	return 0;
+
+err_out:
+	clk_disable_unprepare(scc->clk);
+
+	return ret;
+}
+
+static int mxc_scc_remove(struct platform_device *pdev)
+{
+	struct mxc_scc *scc = platform_get_drvdata(pdev);
+
+	mxc_scc_crypto_unregister();
+
+	clk_disable_unprepare(scc->clk);
+
+	return 0;
+}
+
+static const struct of_device_id mxc_scc_dt_ids[] = {
+	{ .compatible = "fsl,imx25-scc", .data = NULL, },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxc_scc_dt_ids);
+
+static struct platform_driver mxc_scc_driver = {
+	.probe	= mxc_scc_probe,
+	.remove	= mxc_scc_remove,
+	.driver	= {
+		.name		= "mxc-scc",
+		.of_match_table	= mxc_scc_dt_ids,
+	},
+};
+
+module_platform_driver(mxc_scc_driver);
+MODULE_AUTHOR("Steffen Trumtrar <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("Freescale i.MX25 SCC Crypto driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index b85a7a7..c5aac25 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1598,7 +1598,7 @@
 
 static void free_queue(void *p, unsigned long q_type)
 {
-	return kmem_cache_free(queue_cache[q_type - 1], p);
+	kmem_cache_free(queue_cache[q_type - 1], p);
 }
 
 static int queue_cache_init(void)
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index d420ec7..ce174d3 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -26,7 +26,6 @@
 #include <linux/scatterlist.h>
 #include <linux/dma-mapping.h>
 #include <linux/dmaengine.h>
-#include <linux/omap-dma.h>
 #include <linux/pm_runtime.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -176,9 +175,7 @@
 
 	struct scatter_walk		in_walk;
 	struct scatter_walk		out_walk;
-	int			dma_in;
 	struct dma_chan		*dma_lch_in;
-	int			dma_out;
 	struct dma_chan		*dma_lch_out;
 	int			in_sg_len;
 	int			out_sg_len;
@@ -351,30 +348,21 @@
 
 static int omap_aes_dma_init(struct omap_aes_dev *dd)
 {
-	int err = -ENOMEM;
-	dma_cap_mask_t mask;
+	int err;
 
 	dd->dma_lch_out = NULL;
 	dd->dma_lch_in = NULL;
 
-	dma_cap_zero(mask);
-	dma_cap_set(DMA_SLAVE, mask);
-
-	dd->dma_lch_in = dma_request_slave_channel_compat(mask,
-							  omap_dma_filter_fn,
-							  &dd->dma_in,
-							  dd->dev, "rx");
-	if (!dd->dma_lch_in) {
+	dd->dma_lch_in = dma_request_chan(dd->dev, "rx");
+	if (IS_ERR(dd->dma_lch_in)) {
 		dev_err(dd->dev, "Unable to request in DMA channel\n");
-		goto err_dma_in;
+		return PTR_ERR(dd->dma_lch_in);
 	}
 
-	dd->dma_lch_out = dma_request_slave_channel_compat(mask,
-							   omap_dma_filter_fn,
-							   &dd->dma_out,
-							   dd->dev, "tx");
-	if (!dd->dma_lch_out) {
+	dd->dma_lch_out = dma_request_chan(dd->dev, "tx");
+	if (IS_ERR(dd->dma_lch_out)) {
 		dev_err(dd->dev, "Unable to request out DMA channel\n");
+		err = PTR_ERR(dd->dma_lch_out);
 		goto err_dma_out;
 	}
 
@@ -382,14 +370,15 @@
 
 err_dma_out:
 	dma_release_channel(dd->dma_lch_in);
-err_dma_in:
-	if (err)
-		pr_err("error: %d\n", err);
+
 	return err;
 }
 
 static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
 {
+	if (dd->pio_only)
+		return;
+
 	dma_release_channel(dd->dma_lch_out);
 	dma_release_channel(dd->dma_lch_in);
 }
@@ -1080,9 +1069,6 @@
 		goto err;
 	}
 
-	dd->dma_out = -1; /* Dummy value that's unused */
-	dd->dma_in = -1; /* Dummy value that's unused */
-
 	dd->pdata = match->data;
 
 err:
@@ -1116,24 +1102,6 @@
 	}
 	memcpy(res, r, sizeof(*res));
 
-	/* Get the DMA out channel */
-	r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-	if (!r) {
-		dev_err(dev, "no DMA out resource info\n");
-		err = -ENODEV;
-		goto err;
-	}
-	dd->dma_out = r->start;
-
-	/* Get the DMA in channel */
-	r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
-	if (!r) {
-		dev_err(dev, "no DMA in resource info\n");
-		err = -ENODEV;
-		goto err;
-	}
-	dd->dma_in = r->start;
-
 	/* Only OMAP2/3 can be non-DT */
 	dd->pdata = &omap_aes_pdata_omap2;
 
@@ -1191,7 +1159,9 @@
 	tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
 
 	err = omap_aes_dma_init(dd);
-	if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) {
+	if (err == -EPROBE_DEFER) {
+		goto err_irq;
+	} else if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) {
 		dd->pio_only = 1;
 
 		irq = platform_get_irq(pdev, 0);
@@ -1248,8 +1218,8 @@
 		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
 			crypto_unregister_alg(
 					&dd->pdata->algs_info[i].algs_list[j]);
-	if (!dd->pio_only)
-		omap_aes_dma_cleanup(dd);
+
+	omap_aes_dma_cleanup(dd);
 err_irq:
 	tasklet_kill(&dd->done_task);
 	pm_runtime_disable(dev);
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index dd7b93f..3eedb03 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -29,7 +29,6 @@
 #include <linux/scatterlist.h>
 #include <linux/dma-mapping.h>
 #include <linux/dmaengine.h>
-#include <linux/omap-dma.h>
 #include <linux/pm_runtime.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -39,6 +38,7 @@
 #include <linux/interrupt.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/des.h>
+#include <crypto/algapi.h>
 
 #define DST_MAXBURST			2
 
@@ -132,14 +132,10 @@
 	unsigned long		flags;
 	int			err;
 
-	/* spinlock used for queues */
-	spinlock_t		lock;
-	struct crypto_queue	queue;
-
 	struct tasklet_struct	done_task;
-	struct tasklet_struct	queue_task;
 
 	struct ablkcipher_request	*req;
+	struct crypto_engine		*engine;
 	/*
 	 * total is used by PIO mode for book keeping so introduce
 	 * variable total_save as need it to calc page_order
@@ -158,9 +154,7 @@
 
 	struct scatter_walk		in_walk;
 	struct scatter_walk		out_walk;
-	int			dma_in;
 	struct dma_chan		*dma_lch_in;
-	int			dma_out;
 	struct dma_chan		*dma_lch_out;
 	int			in_sg_len;
 	int			out_sg_len;
@@ -340,30 +334,21 @@
 
 static int omap_des_dma_init(struct omap_des_dev *dd)
 {
-	int err = -ENOMEM;
-	dma_cap_mask_t mask;
+	int err;
 
 	dd->dma_lch_out = NULL;
 	dd->dma_lch_in = NULL;
 
-	dma_cap_zero(mask);
-	dma_cap_set(DMA_SLAVE, mask);
-
-	dd->dma_lch_in = dma_request_slave_channel_compat(mask,
-							  omap_dma_filter_fn,
-							  &dd->dma_in,
-							  dd->dev, "rx");
-	if (!dd->dma_lch_in) {
+	dd->dma_lch_in = dma_request_chan(dd->dev, "rx");
+	if (IS_ERR(dd->dma_lch_in)) {
 		dev_err(dd->dev, "Unable to request in DMA channel\n");
-		goto err_dma_in;
+		return PTR_ERR(dd->dma_lch_in);
 	}
 
-	dd->dma_lch_out = dma_request_slave_channel_compat(mask,
-							   omap_dma_filter_fn,
-							   &dd->dma_out,
-							   dd->dev, "tx");
-	if (!dd->dma_lch_out) {
+	dd->dma_lch_out = dma_request_chan(dd->dev, "tx");
+	if (IS_ERR(dd->dma_lch_out)) {
 		dev_err(dd->dev, "Unable to request out DMA channel\n");
+		err = PTR_ERR(dd->dma_lch_out);
 		goto err_dma_out;
 	}
 
@@ -371,14 +356,15 @@
 
 err_dma_out:
 	dma_release_channel(dd->dma_lch_in);
-err_dma_in:
-	if (err)
-		pr_err("error: %d\n", err);
+
 	return err;
 }
 
 static void omap_des_dma_cleanup(struct omap_des_dev *dd)
 {
+	if (dd->pio_only)
+		return;
+
 	dma_release_channel(dd->dma_lch_out);
 	dma_release_channel(dd->dma_lch_in);
 }
@@ -520,9 +506,7 @@
 	pr_debug("err: %d\n", err);
 
 	pm_runtime_put(dd->dev);
-	dd->flags &= ~FLAGS_BUSY;
-
-	req->base.complete(&req->base, err);
+	crypto_finalize_request(dd->engine, req, err);
 }
 
 static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
@@ -585,34 +569,24 @@
 }
 
 static int omap_des_handle_queue(struct omap_des_dev *dd,
-			       struct ablkcipher_request *req)
+				 struct ablkcipher_request *req)
 {
-	struct crypto_async_request *async_req, *backlog;
-	struct omap_des_ctx *ctx;
-	struct omap_des_reqctx *rctx;
-	unsigned long flags;
-	int err, ret = 0;
-
-	spin_lock_irqsave(&dd->lock, flags);
 	if (req)
-		ret = ablkcipher_enqueue_request(&dd->queue, req);
-	if (dd->flags & FLAGS_BUSY) {
-		spin_unlock_irqrestore(&dd->lock, flags);
-		return ret;
-	}
-	backlog = crypto_get_backlog(&dd->queue);
-	async_req = crypto_dequeue_request(&dd->queue);
-	if (async_req)
-		dd->flags |= FLAGS_BUSY;
-	spin_unlock_irqrestore(&dd->lock, flags);
+		return crypto_transfer_request_to_engine(dd->engine, req);
 
-	if (!async_req)
-		return ret;
+	return 0;
+}
 
-	if (backlog)
-		backlog->complete(backlog, -EINPROGRESS);
+static int omap_des_prepare_req(struct crypto_engine *engine,
+				struct ablkcipher_request *req)
+{
+	struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
+			crypto_ablkcipher_reqtfm(req));
+	struct omap_des_dev *dd = omap_des_find_dev(ctx);
+	struct omap_des_reqctx *rctx;
 
-	req = ablkcipher_request_cast(async_req);
+	if (!dd)
+		return -ENODEV;
 
 	/* assign new request to device */
 	dd->req = req;
@@ -642,16 +616,20 @@
 	dd->ctx = ctx;
 	ctx->dd = dd;
 
-	err = omap_des_write_ctrl(dd);
-	if (!err)
-		err = omap_des_crypt_dma_start(dd);
-	if (err) {
-		/* des_task will not finish it, so do it here */
-		omap_des_finish_req(dd, err);
-		tasklet_schedule(&dd->queue_task);
-	}
+	return omap_des_write_ctrl(dd);
+}
 
-	return ret; /* return ret, which is enqueue return value */
+static int omap_des_crypt_req(struct crypto_engine *engine,
+			      struct ablkcipher_request *req)
+{
+	struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
+			crypto_ablkcipher_reqtfm(req));
+	struct omap_des_dev *dd = omap_des_find_dev(ctx);
+
+	if (!dd)
+		return -ENODEV;
+
+	return omap_des_crypt_dma_start(dd);
 }
 
 static void omap_des_done_task(unsigned long data)
@@ -683,18 +661,10 @@
 	}
 
 	omap_des_finish_req(dd, 0);
-	omap_des_handle_queue(dd, NULL);
 
 	pr_debug("exit\n");
 }
 
-static void omap_des_queue_task(unsigned long data)
-{
-	struct omap_des_dev *dd = (struct omap_des_dev *)data;
-
-	omap_des_handle_queue(dd, NULL);
-}
-
 static int omap_des_crypt(struct ablkcipher_request *req, unsigned long mode)
 {
 	struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
@@ -999,8 +969,6 @@
 		return -EINVAL;
 	}
 
-	dd->dma_out = -1; /* Dummy value that's unused */
-	dd->dma_in = -1; /* Dummy value that's unused */
 	dd->pdata = match->data;
 
 	return 0;
@@ -1016,33 +984,10 @@
 static int omap_des_get_pdev(struct omap_des_dev *dd,
 		struct platform_device *pdev)
 {
-	struct device *dev = &pdev->dev;
-	struct resource *r;
-	int err = 0;
-
-	/* Get the DMA out channel */
-	r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-	if (!r) {
-		dev_err(dev, "no DMA out resource info\n");
-		err = -ENODEV;
-		goto err;
-	}
-	dd->dma_out = r->start;
-
-	/* Get the DMA in channel */
-	r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
-	if (!r) {
-		dev_err(dev, "no DMA in resource info\n");
-		err = -ENODEV;
-		goto err;
-	}
-	dd->dma_in = r->start;
-
 	/* non-DT devices get pdata from pdev */
 	dd->pdata = pdev->dev.platform_data;
 
-err:
-	return err;
+	return 0;
 }
 
 static int omap_des_probe(struct platform_device *pdev)
@@ -1062,9 +1007,6 @@
 	dd->dev = dev;
 	platform_set_drvdata(pdev, dd);
 
-	spin_lock_init(&dd->lock);
-	crypto_init_queue(&dd->queue, OMAP_DES_QUEUE_LENGTH);
-
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!res) {
 		dev_err(dev, "no MEM resource info\n");
@@ -1103,10 +1045,11 @@
 		 (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
 
 	tasklet_init(&dd->done_task, omap_des_done_task, (unsigned long)dd);
-	tasklet_init(&dd->queue_task, omap_des_queue_task, (unsigned long)dd);
 
 	err = omap_des_dma_init(dd);
-	if (err && DES_REG_IRQ_STATUS(dd) && DES_REG_IRQ_ENABLE(dd)) {
+	if (err == -EPROBE_DEFER) {
+		goto err_irq;
+	} else if (err && DES_REG_IRQ_STATUS(dd) && DES_REG_IRQ_ENABLE(dd)) {
 		dd->pio_only = 1;
 
 		irq = platform_get_irq(pdev, 0);
@@ -1144,17 +1087,30 @@
 		}
 	}
 
+	/* Initialize des crypto engine */
+	dd->engine = crypto_engine_alloc_init(dev, 1);
+	if (!dd->engine)
+		goto err_algs;
+
+	dd->engine->prepare_request = omap_des_prepare_req;
+	dd->engine->crypt_one_request = omap_des_crypt_req;
+	err = crypto_engine_start(dd->engine);
+	if (err)
+		goto err_engine;
+
 	return 0;
+
+err_engine:
+	crypto_engine_exit(dd->engine);
 err_algs:
 	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
 		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
 			crypto_unregister_alg(
 					&dd->pdata->algs_info[i].algs_list[j]);
-	if (!dd->pio_only)
-		omap_des_dma_cleanup(dd);
+
+	omap_des_dma_cleanup(dd);
 err_irq:
 	tasklet_kill(&dd->done_task);
-	tasklet_kill(&dd->queue_task);
 err_get:
 	pm_runtime_disable(dev);
 err_res:
@@ -1182,7 +1138,6 @@
 					&dd->pdata->algs_info[i].algs_list[j]);
 
 	tasklet_kill(&dd->done_task);
-	tasklet_kill(&dd->queue_task);
 	omap_des_dma_cleanup(dd);
 	pm_runtime_disable(dd->dev);
 	dd = NULL;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 48adb2a..6eefaa2 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -29,7 +29,6 @@
 #include <linux/scatterlist.h>
 #include <linux/dma-mapping.h>
 #include <linux/dmaengine.h>
-#include <linux/omap-dma.h>
 #include <linux/pm_runtime.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -219,7 +218,6 @@
 	int			irq;
 	spinlock_t		lock;
 	int			err;
-	unsigned int		dma;
 	struct dma_chan		*dma_lch;
 	struct tasklet_struct	done_task;
 	u8			polling_mode;
@@ -1842,7 +1840,6 @@
 		goto err;
 	}
 
-	dd->dma = -1; /* Dummy value that's unused */
 	dd->pdata = match->data;
 
 err:
@@ -1884,15 +1881,6 @@
 		goto err;
 	}
 
-	/* Get the DMA */
-	r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-	if (!r) {
-		dev_err(dev, "no DMA resource info\n");
-		err = -ENODEV;
-		goto err;
-	}
-	dd->dma = r->start;
-
 	/* Only OMAP2/3 can be non-DT */
 	dd->pdata = &omap_sham_pdata_omap2;
 
@@ -1946,9 +1934,12 @@
 	dma_cap_zero(mask);
 	dma_cap_set(DMA_SLAVE, mask);
 
-	dd->dma_lch = dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
-						       &dd->dma, dev, "rx");
-	if (!dd->dma_lch) {
+	dd->dma_lch = dma_request_chan(dev, "rx");
+	if (IS_ERR(dd->dma_lch)) {
+		err = PTR_ERR(dd->dma_lch);
+		if (err == -EPROBE_DEFER)
+			goto data_err;
+
 		dd->polling_mode = 1;
 		dev_dbg(dev, "using polling mode instead of dma\n");
 	}
@@ -1995,7 +1986,7 @@
 					&dd->pdata->algs_info[i].algs_list[j]);
 err_pm:
 	pm_runtime_disable(dev);
-	if (dd->dma_lch)
+	if (dd->polling_mode)
 		dma_release_channel(dd->dma_lch);
 data_err:
 	dev_err(dev, "initialization failed.\n");
@@ -2021,7 +2012,7 @@
 	tasklet_kill(&dd->done_task);
 	pm_runtime_disable(&pdev->dev);
 
-	if (dd->dma_lch)
+	if (!dd->polling_mode)
 		dma_release_channel(dd->dma_lch);
 
 	return 0;
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
index e13bd08..640c3fc 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
@@ -300,9 +300,7 @@
 		pr_err("QAT: Driver removal failed\n");
 		return;
 	}
-	if (adf_dev_stop(accel_dev))
-		dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
-
+	adf_dev_stop(accel_dev);
 	adf_dev_shutdown(accel_dev);
 	adf_disable_aer(accel_dev);
 	adf_cleanup_accel(accel_dev);
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
index 1af321c..d2d0ae4 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
@@ -109,29 +109,6 @@
 {
 }
 
-static int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
-{
-	u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
-		(ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
-
-	if (adf_iov_putmsg(accel_dev, msg, 0)) {
-		dev_err(&GET_DEV(accel_dev),
-			"Failed to send Init event to PF\n");
-		return -EFAULT;
-	}
-	return 0;
-}
-
-static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
-{
-	u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
-	    (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
-
-	if (adf_iov_putmsg(accel_dev, msg, 0))
-		dev_err(&GET_DEV(accel_dev),
-			"Failed to send Shutdown event to PF\n");
-}
-
 void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
 {
 	hw_data->dev_class = &c3xxxiov_class;
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
index 1ac4ae9..949d77b 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
@@ -238,6 +238,8 @@
 	if (ret)
 		goto out_err_free_reg;
 
+	set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
 	ret = adf_dev_init(accel_dev);
 	if (ret)
 		goto out_err_dev_shutdown;
@@ -270,9 +272,7 @@
 		pr_err("QAT: Driver removal failed\n");
 		return;
 	}
-	if (adf_dev_stop(accel_dev))
-		dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
-
+	adf_dev_stop(accel_dev);
 	adf_dev_shutdown(accel_dev);
 	adf_cleanup_accel(accel_dev);
 	adf_cleanup_pci_dev(accel_dev);
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
index 512c565..bc5cbc1 100644
--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
@@ -300,9 +300,7 @@
 		pr_err("QAT: Driver removal failed\n");
 		return;
 	}
-	if (adf_dev_stop(accel_dev))
-		dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
-
+	adf_dev_stop(accel_dev);
 	adf_dev_shutdown(accel_dev);
 	adf_disable_aer(accel_dev);
 	adf_cleanup_accel(accel_dev);
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
index baf4b509..38e4bc0 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
@@ -109,29 +109,6 @@
 {
 }
 
-static int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
-{
-	u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
-		(ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
-
-	if (adf_iov_putmsg(accel_dev, msg, 0)) {
-		dev_err(&GET_DEV(accel_dev),
-			"Failed to send Init event to PF\n");
-		return -EFAULT;
-	}
-	return 0;
-}
-
-static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
-{
-	u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
-	    (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
-
-	if (adf_iov_putmsg(accel_dev, msg, 0))
-		dev_err(&GET_DEV(accel_dev),
-			"Failed to send Shutdown event to PF\n");
-}
-
 void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
 {
 	hw_data->dev_class = &c62xiov_class;
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
index d2e4b92..7540ce1 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
@@ -238,6 +238,8 @@
 	if (ret)
 		goto out_err_free_reg;
 
+	set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
 	ret = adf_dev_init(accel_dev);
 	if (ret)
 		goto out_err_dev_shutdown;
@@ -270,9 +272,7 @@
 		pr_err("QAT: Driver removal failed\n");
 		return;
 	}
-	if (adf_dev_stop(accel_dev))
-		dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
-
+	adf_dev_stop(accel_dev);
 	adf_dev_shutdown(accel_dev);
 	adf_cleanup_accel(accel_dev);
 	adf_cleanup_pci_dev(accel_dev);
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
index 29c7c53..6d74b91 100644
--- a/drivers/crypto/qat/qat_common/Makefile
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -9,7 +9,6 @@
 obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
 intel_qat-objs := adf_cfg.o \
 	adf_isr.o \
-	adf_vf_isr.o \
 	adf_ctl_drv.o \
 	adf_dev_mgr.o \
 	adf_init.o \
@@ -27,4 +26,5 @@
 	qat_hal.o
 
 intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
-intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_pf2vf_msg.o
+intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_pf2vf_msg.o \
+			       adf_vf2pf_msg.o adf_vf_isr.o
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c
index eb557f6..ce7c462 100644
--- a/drivers/crypto/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/qat/qat_common/adf_admin.c
@@ -61,7 +61,7 @@
 #define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
 #define ADF_ADMINMSG_LEN 32
 
-static const u8 const_tab[1024] = {
+static const u8 const_tab[1024] __aligned(1024) = {
 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
index 1357511..7632ed0 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_strings.h
+++ b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
@@ -57,10 +57,8 @@
 #define ADF_RING_DC_SIZE "NumConcurrentRequests"
 #define ADF_RING_ASYM_TX "RingAsymTx"
 #define ADF_RING_SYM_TX "RingSymTx"
-#define ADF_RING_RND_TX "RingNrbgTx"
 #define ADF_RING_ASYM_RX "RingAsymRx"
 #define ADF_RING_SYM_RX "RingSymRx"
-#define ADF_RING_RND_RX "RingNrbgRx"
 #define ADF_RING_DC_TX "RingTx"
 #define ADF_RING_DC_RX "RingRx"
 #define ADF_ETRMGR_BANK "Bank"
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 0e82ce3..75faa39 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -67,7 +67,7 @@
 #define ADF_STATUS_AE_INITIALISED 4
 #define ADF_STATUS_AE_UCODE_LOADED 5
 #define ADF_STATUS_AE_STARTED 6
-#define ADF_STATUS_ORPHAN_TH_RUNNING 7
+#define ADF_STATUS_PF_RUNNING 7
 #define ADF_STATUS_IRQ_ALLOCATED 8
 
 enum adf_dev_reset_mode {
@@ -103,7 +103,7 @@
 
 int adf_dev_init(struct adf_accel_dev *accel_dev);
 int adf_dev_start(struct adf_accel_dev *accel_dev);
-int adf_dev_stop(struct adf_accel_dev *accel_dev);
+void adf_dev_stop(struct adf_accel_dev *accel_dev);
 void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
 
 int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr);
@@ -236,6 +236,13 @@
 				 uint32_t vf_mask);
 void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
 void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+
+int adf_vf2pf_init(struct adf_accel_dev *accel_dev);
+void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev);
+int adf_init_pf_wq(void);
+void adf_exit_pf_wq(void);
+int adf_init_vf_wq(void);
+void adf_exit_vf_wq(void);
 #else
 static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
 {
@@ -253,5 +260,33 @@
 static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
 {
 }
+
+static inline int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+{
+	return 0;
+}
+
+static inline void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline int adf_init_pf_wq(void)
+{
+	return 0;
+}
+
+static inline void adf_exit_pf_wq(void)
+{
+}
+
+static inline int adf_init_vf_wq(void)
+{
+	return 0;
+}
+
+static inline void adf_exit_vf_wq(void)
+{
+}
+
 #endif
 #endif
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 5c897e6..abc7a7f 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -270,26 +270,33 @@
 	return 0;
 }
 
-static int adf_ctl_stop_devices(uint32_t id)
+static void adf_ctl_stop_devices(uint32_t id)
 {
 	struct adf_accel_dev *accel_dev;
-	int ret = 0;
 
-	list_for_each_entry_reverse(accel_dev, adf_devmgr_get_head(), list) {
+	list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
 		if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
 			if (!adf_dev_started(accel_dev))
 				continue;
 
-			if (adf_dev_stop(accel_dev)) {
-				dev_err(&GET_DEV(accel_dev),
-					"Failed to stop qat_dev%d\n", id);
-				ret = -EFAULT;
-			} else {
-				adf_dev_shutdown(accel_dev);
-			}
+			/* First stop all VFs */
+			if (!accel_dev->is_vf)
+				continue;
+
+			adf_dev_stop(accel_dev);
+			adf_dev_shutdown(accel_dev);
 		}
 	}
-	return ret;
+
+	list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
+		if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+			if (!adf_dev_started(accel_dev))
+				continue;
+
+			adf_dev_stop(accel_dev);
+			adf_dev_shutdown(accel_dev);
+		}
+	}
 }
 
 static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
@@ -318,9 +325,8 @@
 		pr_info("QAT: Stopping acceleration device qat_dev%d.\n",
 			ctl_data->device_id);
 
-	ret = adf_ctl_stop_devices(ctl_data->device_id);
-	if (ret)
-		pr_err("QAT: failed to stop device.\n");
+	adf_ctl_stop_devices(ctl_data->device_id);
+
 out:
 	kfree(ctl_data);
 	return ret;
@@ -462,12 +468,22 @@
 	if (adf_init_aer())
 		goto err_aer;
 
+	if (adf_init_pf_wq())
+		goto err_pf_wq;
+
+	if (adf_init_vf_wq())
+		goto err_vf_wq;
+
 	if (qat_crypto_register())
 		goto err_crypto_register;
 
 	return 0;
 
 err_crypto_register:
+	adf_exit_vf_wq();
+err_vf_wq:
+	adf_exit_pf_wq();
+err_pf_wq:
 	adf_exit_aer();
 err_aer:
 	adf_chr_drv_destroy();
@@ -480,6 +496,8 @@
 {
 	adf_chr_drv_destroy();
 	adf_exit_aer();
+	adf_exit_vf_wq();
+	adf_exit_pf_wq();
 	qat_crypto_unregister();
 	adf_clean_vf_map(false);
 	mutex_destroy(&adf_ctl_lock);
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index ef5575e..888c667 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -236,9 +236,9 @@
  * is shuting down.
  * To be used by QAT device specific drivers.
  *
- * Return: 0 on success, error code otherwise.
+ * Return: void
  */
-int adf_dev_stop(struct adf_accel_dev *accel_dev)
+void adf_dev_stop(struct adf_accel_dev *accel_dev)
 {
 	struct service_hndl *service;
 	struct list_head *list_itr;
@@ -246,9 +246,9 @@
 	int ret;
 
 	if (!adf_dev_started(accel_dev) &&
-	    !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) {
-		return 0;
-	}
+	    !test_bit(ADF_STATUS_STARTING, &accel_dev->status))
+		return;
+
 	clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
 	clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
 
@@ -279,8 +279,6 @@
 		else
 			clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
 	}
-
-	return 0;
 }
 EXPORT_SYMBOL_GPL(adf_dev_stop);
 
@@ -329,6 +327,8 @@
 			clear_bit(accel_dev->accel_id, &service->init_status);
 	}
 
+	hw_data->disable_iov(accel_dev);
+
 	if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
 		hw_data->free_irq(accel_dev);
 		clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
@@ -344,7 +344,6 @@
 	if (hw_data->exit_admin_comms)
 		hw_data->exit_admin_comms(accel_dev);
 
-	hw_data->disable_iov(accel_dev);
 	adf_cleanup_etr_data(accel_dev);
 	adf_dev_restore(accel_dev);
 }
diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
index b81f79a..06d4901 100644
--- a/drivers/crypto/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_isr.c
@@ -302,7 +302,7 @@
 }
 
 /**
- * adf_vf_isr_resource_free() - Free IRQ for acceleration device
+ * adf_isr_resource_free() - Free IRQ for acceleration device
  * @accel_dev:  Pointer to acceleration device.
  *
  * Function frees interrupts for acceleration device.
@@ -317,7 +317,7 @@
 EXPORT_SYMBOL_GPL(adf_isr_resource_free);
 
 /**
- * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device
+ * adf_isr_resource_alloc() - Allocate IRQ for acceleration device
  * @accel_dev:  Pointer to acceleration device.
  *
  * Function allocates interrupts for acceleration device.
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
index 1117a8b..4a526e2 100644
--- a/drivers/crypto/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -119,11 +119,6 @@
 	int i;
 	u32 reg;
 
-	/* Workqueue for PF2VF responses */
-	pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
-	if (!pf2vf_resp_wq)
-		return -ENOMEM;
-
 	for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
 	     i++, vf_info++) {
 		/* This ptr will be populated when VFs will be created */
@@ -216,11 +211,6 @@
 
 	kfree(accel_dev->pf.vf_info);
 	accel_dev->pf.vf_info = NULL;
-
-	if (pf2vf_resp_wq) {
-		destroy_workqueue(pf2vf_resp_wq);
-		pf2vf_resp_wq = NULL;
-	}
 }
 EXPORT_SYMBOL_GPL(adf_disable_sriov);
 
@@ -259,13 +249,7 @@
 			return -EBUSY;
 		}
 
-		if (adf_dev_stop(accel_dev)) {
-			dev_err(&GET_DEV(accel_dev),
-				"Failed to stop qat_dev%d\n",
-				accel_dev->accel_id);
-			return -EFAULT;
-		}
-
+		adf_dev_stop(accel_dev);
 		adf_dev_shutdown(accel_dev);
 	}
 
@@ -304,3 +288,19 @@
 	return numvfs;
 }
 EXPORT_SYMBOL_GPL(adf_sriov_configure);
+
+int __init adf_init_pf_wq(void)
+{
+	/* Workqueue for PF2VF responses */
+	pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
+
+	return !pf2vf_resp_wq ? -ENOMEM : 0;
+}
+
+void adf_exit_pf_wq(void)
+{
+	if (pf2vf_resp_wq) {
+		destroy_workqueue(pf2vf_resp_wq);
+		pf2vf_resp_wq = NULL;
+	}
+}
diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
new file mode 100644
index 0000000..cd5f37d
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
@@ -0,0 +1,92 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pf2vf_msg.h"
+
+/**
+ * adf_vf2pf_init() - send init msg to PF
+ * @accel_dev:  Pointer to acceleration VF device.
+ *
+ * Function sends an init messge from the VF to a PF
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+{
+	u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
+		(ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
+
+	if (adf_iov_putmsg(accel_dev, msg, 0)) {
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to send Init event to PF\n");
+		return -EFAULT;
+	}
+	set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_vf2pf_init);
+
+/**
+ * adf_vf2pf_shutdown() - send shutdown msg to PF
+ * @accel_dev:  Pointer to acceleration VF device.
+ *
+ * Function sends a shutdown messge from the VF to a PF
+ *
+ * Return: void
+ */
+void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+{
+	u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
+	    (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
+
+	if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status))
+		if (adf_iov_putmsg(accel_dev, msg, 0))
+			dev_err(&GET_DEV(accel_dev),
+				"Failed to send Shutdown event to PF\n");
+}
+EXPORT_SYMBOL_GPL(adf_vf2pf_shutdown);
diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
index 09427b3..aa689ca 100644
--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
@@ -51,6 +51,7 @@
 #include <linux/slab.h>
 #include <linux/errno.h>
 #include <linux/interrupt.h>
+#include <linux/workqueue.h>
 #include "adf_accel_devices.h"
 #include "adf_common_drv.h"
 #include "adf_cfg.h"
@@ -64,6 +65,13 @@
 #define ADF_VINTSOU_BUN		BIT(0)
 #define ADF_VINTSOU_PF2VF	BIT(1)
 
+static struct workqueue_struct *adf_vf_stop_wq;
+
+struct adf_vf_stop_data {
+	struct adf_accel_dev *accel_dev;
+	struct work_struct work;
+};
+
 static int adf_enable_msi(struct adf_accel_dev *accel_dev)
 {
 	struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
@@ -90,6 +98,20 @@
 	pci_disable_msi(pdev);
 }
 
+static void adf_dev_stop_async(struct work_struct *work)
+{
+	struct adf_vf_stop_data *stop_data =
+		container_of(work, struct adf_vf_stop_data, work);
+	struct adf_accel_dev *accel_dev = stop_data->accel_dev;
+
+	adf_dev_stop(accel_dev);
+	adf_dev_shutdown(accel_dev);
+
+	/* Re-enable PF2VF interrupts */
+	adf_enable_pf2vf_interrupts(accel_dev);
+	kfree(stop_data);
+}
+
 static void adf_pf2vf_bh_handler(void *data)
 {
 	struct adf_accel_dev *accel_dev = data;
@@ -107,11 +129,29 @@
 		goto err;
 
 	switch ((msg & ADF_PF2VF_MSGTYPE_MASK) >> ADF_PF2VF_MSGTYPE_SHIFT) {
-	case ADF_PF2VF_MSGTYPE_RESTARTING:
+	case ADF_PF2VF_MSGTYPE_RESTARTING: {
+		struct adf_vf_stop_data *stop_data;
+
 		dev_dbg(&GET_DEV(accel_dev),
 			"Restarting msg received from PF 0x%x\n", msg);
-		adf_dev_stop(accel_dev);
-		break;
+
+		clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
+		stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC);
+		if (!stop_data) {
+			dev_err(&GET_DEV(accel_dev),
+				"Couldn't schedule stop for vf_%d\n",
+				accel_dev->accel_id);
+			return;
+		}
+		stop_data->accel_dev = accel_dev;
+		INIT_WORK(&stop_data->work, adf_dev_stop_async);
+		queue_work(adf_vf_stop_wq, &stop_data->work);
+		/* To ack, clear the PF2VFINT bit */
+		msg &= ~BIT(0);
+		ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg);
+		return;
+	}
 	case ADF_PF2VF_MSGTYPE_VERSION_RESP:
 		dev_dbg(&GET_DEV(accel_dev),
 			"Version resp received from PF 0x%x\n", msg);
@@ -278,3 +318,18 @@
 	return -EFAULT;
 }
 EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
+
+int __init adf_init_vf_wq(void)
+{
+	adf_vf_stop_wq = create_workqueue("adf_vf_stop_wq");
+
+	return !adf_vf_stop_wq ? -EFAULT : 0;
+}
+
+void adf_exit_vf_wq(void)
+{
+	if (adf_vf_stop_wq)
+		destroy_workqueue(adf_vf_stop_wq);
+
+	adf_vf_stop_wq = NULL;
+}
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index e5c0727..05f49d4 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -593,7 +593,7 @@
 
 	ret = -ENOMEM;
 	ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
-	if (!ctx->n)
+	if (!ctx->d)
 		goto err;
 
 	memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
@@ -711,7 +711,7 @@
 	}
 	qat_crypto_put_instance(ctx->inst);
 	ctx->n = NULL;
-	ctx->d = NULL;
+	ctx->e = NULL;
 	ctx->d = NULL;
 }
 
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index a8c4b92..4d2de28 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -302,9 +302,7 @@
 		pr_err("QAT: Driver removal failed\n");
 		return;
 	}
-	if (adf_dev_stop(accel_dev))
-		dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
-
+	adf_dev_stop(accel_dev);
 	adf_dev_shutdown(accel_dev);
 	adf_disable_aer(accel_dev);
 	adf_cleanup_accel(accel_dev);
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
index dc04ab6..a3b4dd8 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
@@ -109,29 +109,6 @@
 {
 }
 
-static int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
-{
-	u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
-		(ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
-
-	if (adf_iov_putmsg(accel_dev, msg, 0)) {
-		dev_err(&GET_DEV(accel_dev),
-			"Failed to send Init event to PF\n");
-		return -EFAULT;
-	}
-	return 0;
-}
-
-static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
-{
-	u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
-	    (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
-
-	if (adf_iov_putmsg(accel_dev, msg, 0))
-		dev_err(&GET_DEV(accel_dev),
-			"Failed to send Shutdown event to PF\n");
-}
-
 void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
 {
 	hw_data->dev_class = &dh895xcciov_class;
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
index f8cc4bf..60df986 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -238,6 +238,8 @@
 	if (ret)
 		goto out_err_free_reg;
 
+	set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
 	ret = adf_dev_init(accel_dev);
 	if (ret)
 		goto out_err_dev_shutdown;
@@ -270,9 +272,7 @@
 		pr_err("QAT: Driver removal failed\n");
 		return;
 	}
-	if (adf_dev_stop(accel_dev))
-		dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
-
+	adf_dev_stop(accel_dev);
 	adf_dev_shutdown(accel_dev);
 	adf_cleanup_accel(accel_dev);
 	adf_cleanup_pci_dev(accel_dev);
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 5f161a9..2b3a0cf 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -11,65 +11,64 @@
  *
  */
 
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
 #include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/crypto.h>
-#include <linux/interrupt.h>
 
-#include <crypto/algapi.h>
-#include <crypto/aes.h>
 #include <crypto/ctr.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
 
 #define _SBF(s, v)                      ((v) << (s))
-#define _BIT(b)                         _SBF(b, 1)
 
 /* Feed control registers */
 #define SSS_REG_FCINTSTAT               0x0000
-#define SSS_FCINTSTAT_BRDMAINT          _BIT(3)
-#define SSS_FCINTSTAT_BTDMAINT          _BIT(2)
-#define SSS_FCINTSTAT_HRDMAINT          _BIT(1)
-#define SSS_FCINTSTAT_PKDMAINT          _BIT(0)
+#define SSS_FCINTSTAT_BRDMAINT          BIT(3)
+#define SSS_FCINTSTAT_BTDMAINT          BIT(2)
+#define SSS_FCINTSTAT_HRDMAINT          BIT(1)
+#define SSS_FCINTSTAT_PKDMAINT          BIT(0)
 
 #define SSS_REG_FCINTENSET              0x0004
-#define SSS_FCINTENSET_BRDMAINTENSET    _BIT(3)
-#define SSS_FCINTENSET_BTDMAINTENSET    _BIT(2)
-#define SSS_FCINTENSET_HRDMAINTENSET    _BIT(1)
-#define SSS_FCINTENSET_PKDMAINTENSET    _BIT(0)
+#define SSS_FCINTENSET_BRDMAINTENSET    BIT(3)
+#define SSS_FCINTENSET_BTDMAINTENSET    BIT(2)
+#define SSS_FCINTENSET_HRDMAINTENSET    BIT(1)
+#define SSS_FCINTENSET_PKDMAINTENSET    BIT(0)
 
 #define SSS_REG_FCINTENCLR              0x0008
-#define SSS_FCINTENCLR_BRDMAINTENCLR    _BIT(3)
-#define SSS_FCINTENCLR_BTDMAINTENCLR    _BIT(2)
-#define SSS_FCINTENCLR_HRDMAINTENCLR    _BIT(1)
-#define SSS_FCINTENCLR_PKDMAINTENCLR    _BIT(0)
+#define SSS_FCINTENCLR_BRDMAINTENCLR    BIT(3)
+#define SSS_FCINTENCLR_BTDMAINTENCLR    BIT(2)
+#define SSS_FCINTENCLR_HRDMAINTENCLR    BIT(1)
+#define SSS_FCINTENCLR_PKDMAINTENCLR    BIT(0)
 
 #define SSS_REG_FCINTPEND               0x000C
-#define SSS_FCINTPEND_BRDMAINTP         _BIT(3)
-#define SSS_FCINTPEND_BTDMAINTP         _BIT(2)
-#define SSS_FCINTPEND_HRDMAINTP         _BIT(1)
-#define SSS_FCINTPEND_PKDMAINTP         _BIT(0)
+#define SSS_FCINTPEND_BRDMAINTP         BIT(3)
+#define SSS_FCINTPEND_BTDMAINTP         BIT(2)
+#define SSS_FCINTPEND_HRDMAINTP         BIT(1)
+#define SSS_FCINTPEND_PKDMAINTP         BIT(0)
 
 #define SSS_REG_FCFIFOSTAT              0x0010
-#define SSS_FCFIFOSTAT_BRFIFOFUL        _BIT(7)
-#define SSS_FCFIFOSTAT_BRFIFOEMP        _BIT(6)
-#define SSS_FCFIFOSTAT_BTFIFOFUL        _BIT(5)
-#define SSS_FCFIFOSTAT_BTFIFOEMP        _BIT(4)
-#define SSS_FCFIFOSTAT_HRFIFOFUL        _BIT(3)
-#define SSS_FCFIFOSTAT_HRFIFOEMP        _BIT(2)
-#define SSS_FCFIFOSTAT_PKFIFOFUL        _BIT(1)
-#define SSS_FCFIFOSTAT_PKFIFOEMP        _BIT(0)
+#define SSS_FCFIFOSTAT_BRFIFOFUL        BIT(7)
+#define SSS_FCFIFOSTAT_BRFIFOEMP        BIT(6)
+#define SSS_FCFIFOSTAT_BTFIFOFUL        BIT(5)
+#define SSS_FCFIFOSTAT_BTFIFOEMP        BIT(4)
+#define SSS_FCFIFOSTAT_HRFIFOFUL        BIT(3)
+#define SSS_FCFIFOSTAT_HRFIFOEMP        BIT(2)
+#define SSS_FCFIFOSTAT_PKFIFOFUL        BIT(1)
+#define SSS_FCFIFOSTAT_PKFIFOEMP        BIT(0)
 
 #define SSS_REG_FCFIFOCTRL              0x0014
-#define SSS_FCFIFOCTRL_DESSEL           _BIT(2)
+#define SSS_FCFIFOCTRL_DESSEL           BIT(2)
 #define SSS_HASHIN_INDEPENDENT          _SBF(0, 0x00)
 #define SSS_HASHIN_CIPHER_INPUT         _SBF(0, 0x01)
 #define SSS_HASHIN_CIPHER_OUTPUT        _SBF(0, 0x02)
@@ -77,52 +76,52 @@
 #define SSS_REG_FCBRDMAS                0x0020
 #define SSS_REG_FCBRDMAL                0x0024
 #define SSS_REG_FCBRDMAC                0x0028
-#define SSS_FCBRDMAC_BYTESWAP           _BIT(1)
-#define SSS_FCBRDMAC_FLUSH              _BIT(0)
+#define SSS_FCBRDMAC_BYTESWAP           BIT(1)
+#define SSS_FCBRDMAC_FLUSH              BIT(0)
 
 #define SSS_REG_FCBTDMAS                0x0030
 #define SSS_REG_FCBTDMAL                0x0034
 #define SSS_REG_FCBTDMAC                0x0038
-#define SSS_FCBTDMAC_BYTESWAP           _BIT(1)
-#define SSS_FCBTDMAC_FLUSH              _BIT(0)
+#define SSS_FCBTDMAC_BYTESWAP           BIT(1)
+#define SSS_FCBTDMAC_FLUSH              BIT(0)
 
 #define SSS_REG_FCHRDMAS                0x0040
 #define SSS_REG_FCHRDMAL                0x0044
 #define SSS_REG_FCHRDMAC                0x0048
-#define SSS_FCHRDMAC_BYTESWAP           _BIT(1)
-#define SSS_FCHRDMAC_FLUSH              _BIT(0)
+#define SSS_FCHRDMAC_BYTESWAP           BIT(1)
+#define SSS_FCHRDMAC_FLUSH              BIT(0)
 
 #define SSS_REG_FCPKDMAS                0x0050
 #define SSS_REG_FCPKDMAL                0x0054
 #define SSS_REG_FCPKDMAC                0x0058
-#define SSS_FCPKDMAC_BYTESWAP           _BIT(3)
-#define SSS_FCPKDMAC_DESCEND            _BIT(2)
-#define SSS_FCPKDMAC_TRANSMIT           _BIT(1)
-#define SSS_FCPKDMAC_FLUSH              _BIT(0)
+#define SSS_FCPKDMAC_BYTESWAP           BIT(3)
+#define SSS_FCPKDMAC_DESCEND            BIT(2)
+#define SSS_FCPKDMAC_TRANSMIT           BIT(1)
+#define SSS_FCPKDMAC_FLUSH              BIT(0)
 
 #define SSS_REG_FCPKDMAO                0x005C
 
 /* AES registers */
 #define SSS_REG_AES_CONTROL		0x00
-#define SSS_AES_BYTESWAP_DI             _BIT(11)
-#define SSS_AES_BYTESWAP_DO             _BIT(10)
-#define SSS_AES_BYTESWAP_IV             _BIT(9)
-#define SSS_AES_BYTESWAP_CNT            _BIT(8)
-#define SSS_AES_BYTESWAP_KEY            _BIT(7)
-#define SSS_AES_KEY_CHANGE_MODE         _BIT(6)
+#define SSS_AES_BYTESWAP_DI             BIT(11)
+#define SSS_AES_BYTESWAP_DO             BIT(10)
+#define SSS_AES_BYTESWAP_IV             BIT(9)
+#define SSS_AES_BYTESWAP_CNT            BIT(8)
+#define SSS_AES_BYTESWAP_KEY            BIT(7)
+#define SSS_AES_KEY_CHANGE_MODE         BIT(6)
 #define SSS_AES_KEY_SIZE_128            _SBF(4, 0x00)
 #define SSS_AES_KEY_SIZE_192            _SBF(4, 0x01)
 #define SSS_AES_KEY_SIZE_256            _SBF(4, 0x02)
-#define SSS_AES_FIFO_MODE               _BIT(3)
+#define SSS_AES_FIFO_MODE               BIT(3)
 #define SSS_AES_CHAIN_MODE_ECB          _SBF(1, 0x00)
 #define SSS_AES_CHAIN_MODE_CBC          _SBF(1, 0x01)
 #define SSS_AES_CHAIN_MODE_CTR          _SBF(1, 0x02)
-#define SSS_AES_MODE_DECRYPT            _BIT(0)
+#define SSS_AES_MODE_DECRYPT            BIT(0)
 
 #define SSS_REG_AES_STATUS		0x04
-#define SSS_AES_BUSY                    _BIT(2)
-#define SSS_AES_INPUT_READY             _BIT(1)
-#define SSS_AES_OUTPUT_READY            _BIT(0)
+#define SSS_AES_BUSY                    BIT(2)
+#define SSS_AES_INPUT_READY             BIT(1)
+#define SSS_AES_OUTPUT_READY            BIT(0)
 
 #define SSS_REG_AES_IN_DATA(s)		(0x10 + (s << 2))
 #define SSS_REG_AES_OUT_DATA(s)		(0x20 + (s << 2))
@@ -139,7 +138,7 @@
 						SSS_AES_REG(dev, reg))
 
 /* HW engine modes */
-#define FLAGS_AES_DECRYPT               _BIT(0)
+#define FLAGS_AES_DECRYPT               BIT(0)
 #define FLAGS_AES_MODE_MASK             _SBF(1, 0x03)
 #define FLAGS_AES_CBC                   _SBF(1, 0x01)
 #define FLAGS_AES_CTR                   _SBF(1, 0x02)
@@ -149,7 +148,6 @@
 
 /**
  * struct samsung_aes_variant - platform specific SSS driver data
- * @has_hash_irq: true if SSS module uses hash interrupt, false otherwise
  * @aes_offset: AES register offset from SSS module's base.
  *
  * Specifies platform specific configuration of SSS module.
@@ -157,7 +155,6 @@
  * expansion of its usage.
  */
 struct samsung_aes_variant {
-	bool			    has_hash_irq;
 	unsigned int		    aes_offset;
 };
 
@@ -178,7 +175,6 @@
 	struct clk                 *clk;
 	void __iomem               *ioaddr;
 	void __iomem               *aes_ioaddr;
-	int                         irq_hash;
 	int                         irq_fc;
 
 	struct ablkcipher_request  *req;
@@ -186,6 +182,10 @@
 	struct scatterlist         *sg_src;
 	struct scatterlist         *sg_dst;
 
+	/* In case of unaligned access: */
+	struct scatterlist         *sg_src_cpy;
+	struct scatterlist         *sg_dst_cpy;
+
 	struct tasklet_struct       tasklet;
 	struct crypto_queue         queue;
 	bool                        busy;
@@ -197,12 +197,10 @@
 static struct s5p_aes_dev *s5p_dev;
 
 static const struct samsung_aes_variant s5p_aes_data = {
-	.has_hash_irq	= true,
 	.aes_offset	= 0x4000,
 };
 
 static const struct samsung_aes_variant exynos_aes_data = {
-	.has_hash_irq	= false,
 	.aes_offset	= 0x200,
 };
 
@@ -245,8 +243,45 @@
 	SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
 }
 
+static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
+{
+	int len;
+
+	if (!*sg)
+		return;
+
+	len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
+	free_pages((unsigned long)sg_virt(*sg), get_order(len));
+
+	kfree(*sg);
+	*sg = NULL;
+}
+
+static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
+			    unsigned int nbytes, int out)
+{
+	struct scatter_walk walk;
+
+	if (!nbytes)
+		return;
+
+	scatterwalk_start(&walk, sg);
+	scatterwalk_copychunks(buf, &walk, nbytes, out);
+	scatterwalk_done(&walk, out, 0);
+}
+
 static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
 {
+	if (dev->sg_dst_cpy) {
+		dev_dbg(dev->dev,
+			"Copying %d bytes of output data back to original place\n",
+			dev->req->nbytes);
+		s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
+				dev->req->nbytes, 1);
+	}
+	s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
+	s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
+
 	/* holding a lock outside */
 	dev->req->base.complete(&dev->req->base, err);
 	dev->busy = false;
@@ -262,15 +297,37 @@
 	dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
 }
 
+static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
+			    struct scatterlist **dst)
+{
+	void *pages;
+	int len;
+
+	*dst = kmalloc(sizeof(**dst), GFP_ATOMIC);
+	if (!*dst)
+		return -ENOMEM;
+
+	len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
+	pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len));
+	if (!pages) {
+		kfree(*dst);
+		*dst = NULL;
+		return -ENOMEM;
+	}
+
+	s5p_sg_copy_buf(pages, src, dev->req->nbytes, 0);
+
+	sg_init_table(*dst, 1);
+	sg_set_buf(*dst, pages, len);
+
+	return 0;
+}
+
 static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
 {
 	int err;
 
-	if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE)) {
-		err = -EINVAL;
-		goto exit;
-	}
-	if (!sg_dma_len(sg)) {
+	if (!sg->length) {
 		err = -EINVAL;
 		goto exit;
 	}
@@ -284,7 +341,7 @@
 	dev->sg_dst = sg;
 	err = 0;
 
- exit:
+exit:
 	return err;
 }
 
@@ -292,11 +349,7 @@
 {
 	int err;
 
-	if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE)) {
-		err = -EINVAL;
-		goto exit;
-	}
-	if (!sg_dma_len(sg)) {
+	if (!sg->length) {
 		err = -EINVAL;
 		goto exit;
 	}
@@ -310,47 +363,59 @@
 	dev->sg_src = sg;
 	err = 0;
 
- exit:
+exit:
 	return err;
 }
 
-static void s5p_aes_tx(struct s5p_aes_dev *dev)
+/*
+ * Returns true if new transmitting (output) data is ready and its
+ * address+length have to be written to device (by calling
+ * s5p_set_dma_outdata()). False otherwise.
+ */
+static bool s5p_aes_tx(struct s5p_aes_dev *dev)
 {
 	int err = 0;
+	bool ret = false;
 
 	s5p_unset_outdata(dev);
 
 	if (!sg_is_last(dev->sg_dst)) {
 		err = s5p_set_outdata(dev, sg_next(dev->sg_dst));
-		if (err) {
+		if (err)
 			s5p_aes_complete(dev, err);
-			return;
-		}
-
-		s5p_set_dma_outdata(dev, dev->sg_dst);
+		else
+			ret = true;
 	} else {
 		s5p_aes_complete(dev, err);
 
 		dev->busy = true;
 		tasklet_schedule(&dev->tasklet);
 	}
+
+	return ret;
 }
 
-static void s5p_aes_rx(struct s5p_aes_dev *dev)
+/*
+ * Returns true if new receiving (input) data is ready and its
+ * address+length have to be written to device (by calling
+ * s5p_set_dma_indata()). False otherwise.
+ */
+static bool s5p_aes_rx(struct s5p_aes_dev *dev)
 {
 	int err;
+	bool ret = false;
 
 	s5p_unset_indata(dev);
 
 	if (!sg_is_last(dev->sg_src)) {
 		err = s5p_set_indata(dev, sg_next(dev->sg_src));
-		if (err) {
+		if (err)
 			s5p_aes_complete(dev, err);
-			return;
-		}
-
-		s5p_set_dma_indata(dev, dev->sg_src);
+		else
+			ret = true;
 	}
+
+	return ret;
 }
 
 static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
@@ -359,18 +424,29 @@
 	struct s5p_aes_dev     *dev  = platform_get_drvdata(pdev);
 	uint32_t                status;
 	unsigned long           flags;
+	bool			set_dma_tx = false;
+	bool			set_dma_rx = false;
 
 	spin_lock_irqsave(&dev->lock, flags);
 
-	if (irq == dev->irq_fc) {
-		status = SSS_READ(dev, FCINTSTAT);
-		if (status & SSS_FCINTSTAT_BRDMAINT)
-			s5p_aes_rx(dev);
-		if (status & SSS_FCINTSTAT_BTDMAINT)
-			s5p_aes_tx(dev);
+	status = SSS_READ(dev, FCINTSTAT);
+	if (status & SSS_FCINTSTAT_BRDMAINT)
+		set_dma_rx = s5p_aes_rx(dev);
+	if (status & SSS_FCINTSTAT_BTDMAINT)
+		set_dma_tx = s5p_aes_tx(dev);
 
-		SSS_WRITE(dev, FCINTPEND, status);
-	}
+	SSS_WRITE(dev, FCINTPEND, status);
+
+	/*
+	 * Writing length of DMA block (either receiving or transmitting)
+	 * will start the operation immediately, so this should be done
+	 * at the end (even after clearing pending interrupts to not miss the
+	 * interrupt).
+	 */
+	if (set_dma_tx)
+		s5p_set_dma_outdata(dev, dev->sg_dst);
+	if (set_dma_rx)
+		s5p_set_dma_indata(dev, dev->sg_src);
 
 	spin_unlock_irqrestore(&dev->lock, flags);
 
@@ -395,6 +471,71 @@
 	memcpy_toio(keystart, key, keylen);
 }
 
+static bool s5p_is_sg_aligned(struct scatterlist *sg)
+{
+	while (sg) {
+		if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
+			return false;
+		sg = sg_next(sg);
+	}
+
+	return true;
+}
+
+static int s5p_set_indata_start(struct s5p_aes_dev *dev,
+				struct ablkcipher_request *req)
+{
+	struct scatterlist *sg;
+	int err;
+
+	dev->sg_src_cpy = NULL;
+	sg = req->src;
+	if (!s5p_is_sg_aligned(sg)) {
+		dev_dbg(dev->dev,
+			"At least one unaligned source scatter list, making a copy\n");
+		err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy);
+		if (err)
+			return err;
+
+		sg = dev->sg_src_cpy;
+	}
+
+	err = s5p_set_indata(dev, sg);
+	if (err) {
+		s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
+		return err;
+	}
+
+	return 0;
+}
+
+static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
+				struct ablkcipher_request *req)
+{
+	struct scatterlist *sg;
+	int err;
+
+	dev->sg_dst_cpy = NULL;
+	sg = req->dst;
+	if (!s5p_is_sg_aligned(sg)) {
+		dev_dbg(dev->dev,
+			"At least one unaligned dest scatter list, making a copy\n");
+		err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy);
+		if (err)
+			return err;
+
+		sg = dev->sg_dst_cpy;
+	}
+
+	err = s5p_set_outdata(dev, sg);
+	if (err) {
+		s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
+		return err;
+	}
+
+	return 0;
+}
+
 static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
 {
 	struct ablkcipher_request  *req = dev->req;
@@ -431,19 +572,19 @@
 		  SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR);
 	SSS_WRITE(dev, FCFIFOCTRL, 0x00);
 
-	err = s5p_set_indata(dev, req->src);
+	err = s5p_set_indata_start(dev, req);
 	if (err)
 		goto indata_error;
 
-	err = s5p_set_outdata(dev, req->dst);
+	err = s5p_set_outdata_start(dev, req);
 	if (err)
 		goto outdata_error;
 
 	SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
 	s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen);
 
-	s5p_set_dma_indata(dev,  req->src);
-	s5p_set_dma_outdata(dev, req->dst);
+	s5p_set_dma_indata(dev,  dev->sg_src);
+	s5p_set_dma_outdata(dev, dev->sg_dst);
 
 	SSS_WRITE(dev, FCINTENSET,
 		  SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET);
@@ -452,10 +593,10 @@
 
 	return;
 
- outdata_error:
+outdata_error:
 	s5p_unset_indata(dev);
 
- indata_error:
+indata_error:
 	s5p_aes_complete(dev, err);
 	spin_unlock_irqrestore(&dev->lock, flags);
 }
@@ -506,7 +647,7 @@
 
 	tasklet_schedule(&dev->tasklet);
 
- exit:
+exit:
 	return err;
 }
 
@@ -671,21 +812,6 @@
 		goto err_irq;
 	}
 
-	if (variant->has_hash_irq) {
-		pdata->irq_hash = platform_get_irq(pdev, 1);
-		if (pdata->irq_hash < 0) {
-			err = pdata->irq_hash;
-			dev_warn(dev, "hash interrupt is not available.\n");
-			goto err_irq;
-		}
-		err = devm_request_irq(dev, pdata->irq_hash, s5p_aes_interrupt,
-				       IRQF_SHARED, pdev->name, pdev);
-		if (err < 0) {
-			dev_warn(dev, "hash interrupt is not available.\n");
-			goto err_irq;
-		}
-	}
-
 	pdata->busy = false;
 	pdata->variant = variant;
 	pdata->dev = dev;
@@ -705,7 +831,7 @@
 
 	return 0;
 
- err_algs:
+err_algs:
 	dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name, err);
 
 	for (j = 0; j < i; j++)
@@ -713,7 +839,7 @@
 
 	tasklet_kill(&pdata->tasklet);
 
- err_irq:
+err_irq:
 	clk_disable_unprepare(pdata->clk);
 
 	s5p_dev = NULL;
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
index 7be3fbc..3830d7c 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -35,6 +35,7 @@
 	unsigned int todo;
 	struct sg_mapping_iter mi, mo;
 	unsigned int oi, oo; /* offset for in and out */
+	unsigned long flags;
 
 	if (areq->nbytes == 0)
 		return 0;
@@ -49,7 +50,7 @@
 		return -EINVAL;
 	}
 
-	spin_lock_bh(&ss->slock);
+	spin_lock_irqsave(&ss->slock, flags);
 
 	for (i = 0; i < op->keylen; i += 4)
 		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
@@ -117,7 +118,7 @@
 	sg_miter_stop(&mi);
 	sg_miter_stop(&mo);
 	writel(0, ss->base + SS_CTL);
-	spin_unlock_bh(&ss->slock);
+	spin_unlock_irqrestore(&ss->slock, flags);
 	return err;
 }
 
@@ -149,6 +150,7 @@
 	unsigned int ob = 0;	/* offset in buf */
 	unsigned int obo = 0;	/* offset in bufo*/
 	unsigned int obl = 0;	/* length of data in bufo */
+	unsigned long flags;
 
 	if (areq->nbytes == 0)
 		return 0;
@@ -181,7 +183,7 @@
 	if (no_chunk == 1)
 		return sun4i_ss_opti_poll(areq);
 
-	spin_lock_bh(&ss->slock);
+	spin_lock_irqsave(&ss->slock, flags);
 
 	for (i = 0; i < op->keylen; i += 4)
 		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
@@ -307,7 +309,7 @@
 	sg_miter_stop(&mi);
 	sg_miter_stop(&mo);
 	writel(0, ss->base + SS_CTL);
-	spin_unlock_bh(&ss->slock);
+	spin_unlock_irqrestore(&ss->slock, flags);
 
 	return err;
 }
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index a0d4a08..b7ee8d3 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -63,6 +63,14 @@
 		ptr->eptr = upper_32_bits(dma_addr);
 }
 
+static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
+			     struct talitos_ptr *src_ptr, bool is_sec1)
+{
+	dst_ptr->ptr = src_ptr->ptr;
+	if (!is_sec1)
+		dst_ptr->eptr = src_ptr->eptr;
+}
+
 static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
 			       bool is_sec1)
 {
@@ -827,6 +835,16 @@
 	struct scatterlist *psrc;
 };
 
+struct talitos_export_state {
+	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
+	u8 buf[HASH_MAX_BLOCK_SIZE];
+	unsigned int swinit;
+	unsigned int first;
+	unsigned int last;
+	unsigned int to_hash_later;
+	unsigned int nbuf;
+};
+
 static int aead_setkey(struct crypto_aead *authenc,
 		       const u8 *key, unsigned int keylen)
 {
@@ -1083,21 +1101,20 @@
 	sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
 			      (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
 							   : DMA_TO_DEVICE);
-
 	/* hmac data */
 	desc->ptr[1].len = cpu_to_be16(areq->assoclen);
 	if (sg_count > 1 &&
 	    (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
 					 areq->assoclen,
 					 &edesc->link_tbl[tbl_off])) > 1) {
-		tbl_off += ret;
-
 		to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
 			       sizeof(struct talitos_ptr), 0);
 		desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
 
 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
 					   edesc->dma_len, DMA_BIDIRECTIONAL);
+
+		tbl_off += ret;
 	} else {
 		to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
 		desc->ptr[1].j_extent = 0;
@@ -1126,11 +1143,13 @@
 	if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
 		sg_link_tbl_len += authsize;
 
-	if (sg_count > 1 &&
-	    (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
-					 sg_link_tbl_len,
-					 &edesc->link_tbl[tbl_off])) > 1) {
-		tbl_off += ret;
+	if (sg_count == 1) {
+		to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
+			       areq->assoclen, 0);
+	} else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
+						areq->assoclen, sg_link_tbl_len,
+						&edesc->link_tbl[tbl_off])) >
+		   1) {
 		desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
 		to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
 					      tbl_off *
@@ -1138,8 +1157,10 @@
 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
 					   edesc->dma_len,
 					   DMA_BIDIRECTIONAL);
-	} else
-		to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
+		tbl_off += ret;
+	} else {
+		copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
+	}
 
 	/* cipher out */
 	desc->ptr[5].len = cpu_to_be16(cryptlen);
@@ -1151,11 +1172,13 @@
 
 	edesc->icv_ool = false;
 
-	if (sg_count > 1 &&
-	    (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
+	if (sg_count == 1) {
+		to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
+			       areq->assoclen, 0);
+	} else if ((sg_count =
+			sg_to_link_tbl_offset(areq->dst, sg_count,
 					      areq->assoclen, cryptlen,
-					      &edesc->link_tbl[tbl_off])) >
-	    1) {
+					      &edesc->link_tbl[tbl_off])) > 1) {
 		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
 
 		to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
@@ -1178,8 +1201,9 @@
 					   edesc->dma_len, DMA_BIDIRECTIONAL);
 
 		edesc->icv_ool = true;
-	} else
-		to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
+	} else {
+		copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
+	}
 
 	/* iv out */
 	map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
@@ -1967,6 +1991,46 @@
 	return ahash_process_req(areq, areq->nbytes);
 }
 
+static int ahash_export(struct ahash_request *areq, void *out)
+{
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+	struct talitos_export_state *export = out;
+
+	memcpy(export->hw_context, req_ctx->hw_context,
+	       req_ctx->hw_context_size);
+	memcpy(export->buf, req_ctx->buf, req_ctx->nbuf);
+	export->swinit = req_ctx->swinit;
+	export->first = req_ctx->first;
+	export->last = req_ctx->last;
+	export->to_hash_later = req_ctx->to_hash_later;
+	export->nbuf = req_ctx->nbuf;
+
+	return 0;
+}
+
+static int ahash_import(struct ahash_request *areq, const void *in)
+{
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+	const struct talitos_export_state *export = in;
+
+	memset(req_ctx, 0, sizeof(*req_ctx));
+	req_ctx->hw_context_size =
+		(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
+			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
+			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
+	memcpy(req_ctx->hw_context, export->hw_context,
+	       req_ctx->hw_context_size);
+	memcpy(req_ctx->buf, export->buf, export->nbuf);
+	req_ctx->swinit = export->swinit;
+	req_ctx->first = export->first;
+	req_ctx->last = export->last;
+	req_ctx->to_hash_later = export->to_hash_later;
+	req_ctx->nbuf = export->nbuf;
+
+	return 0;
+}
+
 struct keyhash_result {
 	struct completion completion;
 	int err;
@@ -2444,6 +2508,7 @@
 	{	.type = CRYPTO_ALG_TYPE_AHASH,
 		.alg.hash = {
 			.halg.digestsize = MD5_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct talitos_export_state),
 			.halg.base = {
 				.cra_name = "md5",
 				.cra_driver_name = "md5-talitos",
@@ -2459,6 +2524,7 @@
 	{	.type = CRYPTO_ALG_TYPE_AHASH,
 		.alg.hash = {
 			.halg.digestsize = SHA1_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct talitos_export_state),
 			.halg.base = {
 				.cra_name = "sha1",
 				.cra_driver_name = "sha1-talitos",
@@ -2474,6 +2540,7 @@
 	{	.type = CRYPTO_ALG_TYPE_AHASH,
 		.alg.hash = {
 			.halg.digestsize = SHA224_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct talitos_export_state),
 			.halg.base = {
 				.cra_name = "sha224",
 				.cra_driver_name = "sha224-talitos",
@@ -2489,6 +2556,7 @@
 	{	.type = CRYPTO_ALG_TYPE_AHASH,
 		.alg.hash = {
 			.halg.digestsize = SHA256_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct talitos_export_state),
 			.halg.base = {
 				.cra_name = "sha256",
 				.cra_driver_name = "sha256-talitos",
@@ -2504,6 +2572,7 @@
 	{	.type = CRYPTO_ALG_TYPE_AHASH,
 		.alg.hash = {
 			.halg.digestsize = SHA384_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct talitos_export_state),
 			.halg.base = {
 				.cra_name = "sha384",
 				.cra_driver_name = "sha384-talitos",
@@ -2519,6 +2588,7 @@
 	{	.type = CRYPTO_ALG_TYPE_AHASH,
 		.alg.hash = {
 			.halg.digestsize = SHA512_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct talitos_export_state),
 			.halg.base = {
 				.cra_name = "sha512",
 				.cra_driver_name = "sha512-talitos",
@@ -2534,6 +2604,7 @@
 	{	.type = CRYPTO_ALG_TYPE_AHASH,
 		.alg.hash = {
 			.halg.digestsize = MD5_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct talitos_export_state),
 			.halg.base = {
 				.cra_name = "hmac(md5)",
 				.cra_driver_name = "hmac-md5-talitos",
@@ -2549,6 +2620,7 @@
 	{	.type = CRYPTO_ALG_TYPE_AHASH,
 		.alg.hash = {
 			.halg.digestsize = SHA1_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct talitos_export_state),
 			.halg.base = {
 				.cra_name = "hmac(sha1)",
 				.cra_driver_name = "hmac-sha1-talitos",
@@ -2564,6 +2636,7 @@
 	{	.type = CRYPTO_ALG_TYPE_AHASH,
 		.alg.hash = {
 			.halg.digestsize = SHA224_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct talitos_export_state),
 			.halg.base = {
 				.cra_name = "hmac(sha224)",
 				.cra_driver_name = "hmac-sha224-talitos",
@@ -2579,6 +2652,7 @@
 	{	.type = CRYPTO_ALG_TYPE_AHASH,
 		.alg.hash = {
 			.halg.digestsize = SHA256_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct talitos_export_state),
 			.halg.base = {
 				.cra_name = "hmac(sha256)",
 				.cra_driver_name = "hmac-sha256-talitos",
@@ -2594,6 +2668,7 @@
 	{	.type = CRYPTO_ALG_TYPE_AHASH,
 		.alg.hash = {
 			.halg.digestsize = SHA384_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct talitos_export_state),
 			.halg.base = {
 				.cra_name = "hmac(sha384)",
 				.cra_driver_name = "hmac-sha384-talitos",
@@ -2609,6 +2684,7 @@
 	{	.type = CRYPTO_ALG_TYPE_AHASH,
 		.alg.hash = {
 			.halg.digestsize = SHA512_DIGEST_SIZE,
+			.halg.statesize = sizeof(struct talitos_export_state),
 			.halg.base = {
 				.cra_name = "hmac(sha512)",
 				.cra_driver_name = "hmac-sha512-talitos",
@@ -2629,21 +2705,11 @@
 	struct talitos_alg_template algt;
 };
 
-static int talitos_cra_init(struct crypto_tfm *tfm)
+static int talitos_init_common(struct talitos_ctx *ctx,
+			       struct talitos_crypto_alg *talitos_alg)
 {
-	struct crypto_alg *alg = tfm->__crt_alg;
-	struct talitos_crypto_alg *talitos_alg;
-	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
 	struct talitos_private *priv;
 
-	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
-		talitos_alg = container_of(__crypto_ahash_alg(alg),
-					   struct talitos_crypto_alg,
-					   algt.alg.hash);
-	else
-		talitos_alg = container_of(alg, struct talitos_crypto_alg,
-					   algt.alg.crypto);
-
 	/* update context with ptr to dev */
 	ctx->dev = talitos_alg->dev;
 
@@ -2661,10 +2727,33 @@
 	return 0;
 }
 
+static int talitos_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct talitos_crypto_alg *talitos_alg;
+	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
+		talitos_alg = container_of(__crypto_ahash_alg(alg),
+					   struct talitos_crypto_alg,
+					   algt.alg.hash);
+	else
+		talitos_alg = container_of(alg, struct talitos_crypto_alg,
+					   algt.alg.crypto);
+
+	return talitos_init_common(ctx, talitos_alg);
+}
+
 static int talitos_cra_init_aead(struct crypto_aead *tfm)
 {
-	talitos_cra_init(crypto_aead_tfm(tfm));
-	return 0;
+	struct aead_alg *alg = crypto_aead_alg(tfm);
+	struct talitos_crypto_alg *talitos_alg;
+	struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
+
+	talitos_alg = container_of(alg, struct talitos_crypto_alg,
+				   algt.alg.aead);
+
+	return talitos_init_common(ctx, talitos_alg);
 }
 
 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
@@ -2787,6 +2876,8 @@
 		t_alg->algt.alg.hash.finup = ahash_finup;
 		t_alg->algt.alg.hash.digest = ahash_digest;
 		t_alg->algt.alg.hash.setkey = ahash_setkey;
+		t_alg->algt.alg.hash.import = ahash_import;
+		t_alg->algt.alg.hash.export = ahash_export;
 
 		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
 		    !strncmp(alg->cra_name, "hmac", 4)) {
diff --git a/drivers/crypto/vmx/ppc-xlate.pl b/drivers/crypto/vmx/ppc-xlate.pl
index b999733..9f4994c 100644
--- a/drivers/crypto/vmx/ppc-xlate.pl
+++ b/drivers/crypto/vmx/ppc-xlate.pl
@@ -139,6 +139,26 @@
     "	vor	$vx,$vy,$vy";
 };
 
+# Some ABIs specify vrsave, special-purpose register #256, as reserved
+# for system use.
+my $no_vrsave = ($flavour =~ /aix|linux64le/);
+my $mtspr = sub {
+    my ($f,$idx,$ra) = @_;
+    if ($idx == 256 && $no_vrsave) {
+	"	or	$ra,$ra,$ra";
+    } else {
+	"	mtspr	$idx,$ra";
+    }
+};
+my $mfspr = sub {
+    my ($f,$rd,$idx) = @_;
+    if ($idx == 256 && $no_vrsave) {
+	"	li	$rd,-1";
+    } else {
+	"	mfspr	$rd,$idx";
+    }
+};
+
 # PowerISA 2.06 stuff
 sub vsxmem_op {
     my ($f, $vrt, $ra, $rb, $op) = @_;
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 4de78c5..78dac0e 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -64,30 +64,32 @@
 	  Otherwise, the governor does not change the frequency
 	  given at the initialization.
 
+config DEVFREQ_GOV_PASSIVE
+	tristate "Passive"
+	help
+	  Sets the frequency based on the frequency of its parent devfreq
+	  device. This governor does not change the frequency by itself
+	  through sysfs entries. The passive governor recommends that
+	  devfreq device uses the OPP table to get the frequency/voltage.
+
 comment "DEVFREQ Drivers"
 
-config ARM_EXYNOS4_BUS_DEVFREQ
-	bool "ARM Exynos4210/4212/4412 Memory Bus DEVFREQ Driver"
-	depends on (CPU_EXYNOS4210 || SOC_EXYNOS4212 || SOC_EXYNOS4412) && !ARCH_MULTIPLATFORM
+config ARM_EXYNOS_BUS_DEVFREQ
+	bool "ARM EXYNOS Generic Memory Bus DEVFREQ Driver"
+	depends on ARCH_EXYNOS
 	select DEVFREQ_GOV_SIMPLE_ONDEMAND
+	select DEVFREQ_GOV_PASSIVE
+	select DEVFREQ_EVENT_EXYNOS_PPMU
+	select PM_DEVFREQ_EVENT
 	select PM_OPP
 	help
-	  This adds the DEVFREQ driver for Exynos4210 memory bus (vdd_int)
-	  and Exynos4212/4412 memory interface and bus (vdd_mif + vdd_int).
-	  It reads PPMU counters of memory controllers and adjusts
-	  the operating frequencies and voltages with OPP support.
+	  This adds the common DEVFREQ driver for Exynos Memory bus. Exynos
+	  Memory bus has one more group of memory bus (e.g, MIF and INT block).
+	  Each memory bus group could contain many memoby bus block. It reads
+	  PPMU counters of memory controllers by using DEVFREQ-event device
+	  and adjusts the operating frequencies and voltages with OPP support.
 	  This does not yet operate with optimal voltages.
 
-config ARM_EXYNOS5_BUS_DEVFREQ
-	tristate "ARM Exynos5250 Bus DEVFREQ Driver"
-	depends on SOC_EXYNOS5250
-	select DEVFREQ_GOV_SIMPLE_ONDEMAND
-	select PM_OPP
-	help
-	  This adds the DEVFREQ driver for Exynos5250 bus interface (vdd_int).
-	  It reads PPMU counters of memory controllers and adjusts the
-	  operating frequencies and voltages with OPP support.
-
 config ARM_TEGRA_DEVFREQ
        tristate "Tegra DEVFREQ Driver"
        depends on ARCH_TEGRA_124_SOC
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 5134f9e..09f11d9 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -4,10 +4,10 @@
 obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE)	+= governor_performance.o
 obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE)	+= governor_powersave.o
 obj-$(CONFIG_DEVFREQ_GOV_USERSPACE)	+= governor_userspace.o
+obj-$(CONFIG_DEVFREQ_GOV_PASSIVE)	+= governor_passive.o
 
 # DEVFREQ Drivers
-obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ)	+= exynos/
-obj-$(CONFIG_ARM_EXYNOS5_BUS_DEVFREQ)	+= exynos/
+obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ)	+= exynos-bus.o
 obj-$(CONFIG_ARM_TEGRA_DEVFREQ)		+= tegra-devfreq.o
 
 # DEVFREQ Event Drivers
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
index 38bf144..39b048e 100644
--- a/drivers/devfreq/devfreq-event.c
+++ b/drivers/devfreq/devfreq-event.c
@@ -235,6 +235,11 @@
 
 	mutex_lock(&devfreq_event_list_lock);
 	list_for_each_entry(edev, &devfreq_event_list, node) {
+		if (edev->dev.parent && edev->dev.parent->of_node == node)
+			goto out;
+	}
+
+	list_for_each_entry(edev, &devfreq_event_list, node) {
 		if (!strcmp(edev->desc->name, node->name))
 			goto out;
 	}
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 984c5e9..1d6c803 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -25,6 +25,7 @@
 #include <linux/list.h>
 #include <linux/printk.h>
 #include <linux/hrtimer.h>
+#include <linux/of.h>
 #include "governor.h"
 
 static struct class *devfreq_class;
@@ -188,6 +189,29 @@
 	return ERR_PTR(-ENODEV);
 }
 
+static int devfreq_notify_transition(struct devfreq *devfreq,
+		struct devfreq_freqs *freqs, unsigned int state)
+{
+	if (!devfreq)
+		return -EINVAL;
+
+	switch (state) {
+	case DEVFREQ_PRECHANGE:
+		srcu_notifier_call_chain(&devfreq->transition_notifier_list,
+				DEVFREQ_PRECHANGE, freqs);
+		break;
+
+	case DEVFREQ_POSTCHANGE:
+		srcu_notifier_call_chain(&devfreq->transition_notifier_list,
+				DEVFREQ_POSTCHANGE, freqs);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 /* Load monitoring helper functions for governors use */
 
 /**
@@ -199,7 +223,8 @@
  */
 int update_devfreq(struct devfreq *devfreq)
 {
-	unsigned long freq;
+	struct devfreq_freqs freqs;
+	unsigned long freq, cur_freq;
 	int err = 0;
 	u32 flags = 0;
 
@@ -233,10 +258,22 @@
 		flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
 	}
 
+	if (devfreq->profile->get_cur_freq)
+		devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq);
+	else
+		cur_freq = devfreq->previous_freq;
+
+	freqs.old = cur_freq;
+	freqs.new = freq;
+	devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
+
 	err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
 	if (err)
 		return err;
 
+	freqs.new = freq;
+	devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
+
 	if (devfreq->profile->freq_table)
 		if (devfreq_update_status(devfreq, freq))
 			dev_err(&devfreq->dev,
@@ -541,6 +578,8 @@
 		goto err_out;
 	}
 
+	srcu_init_notifier_head(&devfreq->transition_notifier_list);
+
 	mutex_unlock(&devfreq->lock);
 
 	mutex_lock(&devfreq_list_lock);
@@ -639,6 +678,49 @@
 }
 EXPORT_SYMBOL(devm_devfreq_add_device);
 
+#ifdef CONFIG_OF
+/*
+ * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree
+ * @dev - instance to the given device
+ * @index - index into list of devfreq
+ *
+ * return the instance of devfreq device
+ */
+struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
+{
+	struct device_node *node;
+	struct devfreq *devfreq;
+
+	if (!dev)
+		return ERR_PTR(-EINVAL);
+
+	if (!dev->of_node)
+		return ERR_PTR(-EINVAL);
+
+	node = of_parse_phandle(dev->of_node, "devfreq", index);
+	if (!node)
+		return ERR_PTR(-ENODEV);
+
+	mutex_lock(&devfreq_list_lock);
+	list_for_each_entry(devfreq, &devfreq_list, node) {
+		if (devfreq->dev.parent
+			&& devfreq->dev.parent->of_node == node) {
+			mutex_unlock(&devfreq_list_lock);
+			return devfreq;
+		}
+	}
+	mutex_unlock(&devfreq_list_lock);
+
+	return ERR_PTR(-EPROBE_DEFER);
+}
+#else
+struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
+{
+	return ERR_PTR(-ENODEV);
+}
+#endif /* CONFIG_OF */
+EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle);
+
 /**
  * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device()
  * @dev:	the device to add devfreq feature.
@@ -1266,6 +1348,129 @@
 }
 EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier);
 
+/**
+ * devfreq_register_notifier() - Register a driver with devfreq
+ * @devfreq:	The devfreq object.
+ * @nb:		The notifier block to register.
+ * @list:	DEVFREQ_TRANSITION_NOTIFIER.
+ */
+int devfreq_register_notifier(struct devfreq *devfreq,
+				struct notifier_block *nb,
+				unsigned int list)
+{
+	int ret = 0;
+
+	if (!devfreq)
+		return -EINVAL;
+
+	switch (list) {
+	case DEVFREQ_TRANSITION_NOTIFIER:
+		ret = srcu_notifier_chain_register(
+				&devfreq->transition_notifier_list, nb);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(devfreq_register_notifier);
+
+/*
+ * devfreq_unregister_notifier() - Unregister a driver with devfreq
+ * @devfreq:	The devfreq object.
+ * @nb:		The notifier block to be unregistered.
+ * @list:	DEVFREQ_TRANSITION_NOTIFIER.
+ */
+int devfreq_unregister_notifier(struct devfreq *devfreq,
+				struct notifier_block *nb,
+				unsigned int list)
+{
+	int ret = 0;
+
+	if (!devfreq)
+		return -EINVAL;
+
+	switch (list) {
+	case DEVFREQ_TRANSITION_NOTIFIER:
+		ret = srcu_notifier_chain_unregister(
+				&devfreq->transition_notifier_list, nb);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(devfreq_unregister_notifier);
+
+struct devfreq_notifier_devres {
+	struct devfreq *devfreq;
+	struct notifier_block *nb;
+	unsigned int list;
+};
+
+static void devm_devfreq_notifier_release(struct device *dev, void *res)
+{
+	struct devfreq_notifier_devres *this = res;
+
+	devfreq_unregister_notifier(this->devfreq, this->nb, this->list);
+}
+
+/**
+ * devm_devfreq_register_notifier()
+	- Resource-managed devfreq_register_notifier()
+ * @dev:	The devfreq user device. (parent of devfreq)
+ * @devfreq:	The devfreq object.
+ * @nb:		The notifier block to be unregistered.
+ * @list:	DEVFREQ_TRANSITION_NOTIFIER.
+ */
+int devm_devfreq_register_notifier(struct device *dev,
+				struct devfreq *devfreq,
+				struct notifier_block *nb,
+				unsigned int list)
+{
+	struct devfreq_notifier_devres *ptr;
+	int ret;
+
+	ptr = devres_alloc(devm_devfreq_notifier_release, sizeof(*ptr),
+				GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	ret = devfreq_register_notifier(devfreq, nb, list);
+	if (ret) {
+		devres_free(ptr);
+		return ret;
+	}
+
+	ptr->devfreq = devfreq;
+	ptr->nb = nb;
+	ptr->list = list;
+	devres_add(dev, ptr);
+
+	return 0;
+}
+EXPORT_SYMBOL(devm_devfreq_register_notifier);
+
+/**
+ * devm_devfreq_unregister_notifier()
+	- Resource-managed devfreq_unregister_notifier()
+ * @dev:	The devfreq user device. (parent of devfreq)
+ * @devfreq:	The devfreq object.
+ * @nb:		The notifier block to be unregistered.
+ * @list:	DEVFREQ_TRANSITION_NOTIFIER.
+ */
+void devm_devfreq_unregister_notifier(struct device *dev,
+				struct devfreq *devfreq,
+				struct notifier_block *nb,
+				unsigned int list)
+{
+	WARN_ON(devres_release(dev, devm_devfreq_notifier_release,
+			       devm_devfreq_dev_match, devfreq));
+}
+EXPORT_SYMBOL(devm_devfreq_unregister_notifier);
+
 MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
 MODULE_DESCRIPTION("devfreq class support");
 MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig
index a11720a..1e8b4f4 100644
--- a/drivers/devfreq/event/Kconfig
+++ b/drivers/devfreq/event/Kconfig
@@ -13,6 +13,14 @@
 
 if PM_DEVFREQ_EVENT
 
+config DEVFREQ_EVENT_EXYNOS_NOCP
+	bool "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver"
+	depends on ARCH_EXYNOS
+	select PM_OPP
+	help
+	  This add the devfreq-event driver for Exynos SoC. It provides NoC
+	  (Network on Chip) Probe counters to measure the bandwidth of AXI bus.
+
 config DEVFREQ_EVENT_EXYNOS_PPMU
 	bool "EXYNOS PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver"
 	depends on ARCH_EXYNOS
diff --git a/drivers/devfreq/event/Makefile b/drivers/devfreq/event/Makefile
index be146ea..3d6afd3 100644
--- a/drivers/devfreq/event/Makefile
+++ b/drivers/devfreq/event/Makefile
@@ -1,2 +1,4 @@
 # Exynos DEVFREQ Event Drivers
+
+obj-$(CONFIG_DEVFREQ_EVENT_EXYNOS_NOCP) += exynos-nocp.o
 obj-$(CONFIG_DEVFREQ_EVENT_EXYNOS_PPMU) += exynos-ppmu.o
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c
new file mode 100644
index 0000000..6b6a5f3
--- /dev/null
+++ b/drivers/devfreq/event/exynos-nocp.c
@@ -0,0 +1,304 @@
+/*
+ * exynos-nocp.c - EXYNOS NoC (Network On Chip) Probe support
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ * Author : Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/devfreq-event.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "exynos-nocp.h"
+
+struct exynos_nocp {
+	struct devfreq_event_dev *edev;
+	struct devfreq_event_desc desc;
+
+	struct device *dev;
+
+	struct regmap *regmap;
+	struct clk *clk;
+};
+
+/*
+ * The devfreq-event ops structure for nocp probe.
+ */
+static int exynos_nocp_set_event(struct devfreq_event_dev *edev)
+{
+	struct exynos_nocp *nocp = devfreq_event_get_drvdata(edev);
+	int ret;
+
+	/* Disable NoC probe */
+	ret = regmap_update_bits(nocp->regmap, NOCP_MAIN_CTL,
+				NOCP_MAIN_CTL_STATEN_MASK, 0);
+	if (ret < 0) {
+		dev_err(nocp->dev, "failed to disable the NoC probe device\n");
+		return ret;
+	}
+
+	/* Set a statistics dump period to 0 */
+	ret = regmap_write(nocp->regmap, NOCP_STAT_PERIOD, 0x0);
+	if (ret < 0)
+		goto out;
+
+	/* Set the IntEvent fields of *_SRC */
+	ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_0_SRC,
+				NOCP_CNT_SRC_INTEVENT_MASK,
+				NOCP_CNT_SRC_INTEVENT_BYTE_MASK);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_1_SRC,
+				NOCP_CNT_SRC_INTEVENT_MASK,
+				NOCP_CNT_SRC_INTEVENT_CHAIN_MASK);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_2_SRC,
+				NOCP_CNT_SRC_INTEVENT_MASK,
+				NOCP_CNT_SRC_INTEVENT_CYCLE_MASK);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_3_SRC,
+				NOCP_CNT_SRC_INTEVENT_MASK,
+				NOCP_CNT_SRC_INTEVENT_CHAIN_MASK);
+	if (ret < 0)
+		goto out;
+
+
+	/* Set an alarm with a max/min value of 0 to generate StatALARM */
+	ret = regmap_write(nocp->regmap, NOCP_STAT_ALARM_MIN, 0x0);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_write(nocp->regmap, NOCP_STAT_ALARM_MAX, 0x0);
+	if (ret < 0)
+		goto out;
+
+	/* Set AlarmMode */
+	ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_0_ALARM_MODE,
+				NOCP_CNT_ALARM_MODE_MASK,
+				NOCP_CNT_ALARM_MODE_MIN_MAX_MASK);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_1_ALARM_MODE,
+				NOCP_CNT_ALARM_MODE_MASK,
+				NOCP_CNT_ALARM_MODE_MIN_MAX_MASK);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_2_ALARM_MODE,
+				NOCP_CNT_ALARM_MODE_MASK,
+				NOCP_CNT_ALARM_MODE_MIN_MAX_MASK);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_3_ALARM_MODE,
+				NOCP_CNT_ALARM_MODE_MASK,
+				NOCP_CNT_ALARM_MODE_MIN_MAX_MASK);
+	if (ret < 0)
+		goto out;
+
+	/* Enable the measurements by setting AlarmEn and StatEn */
+	ret = regmap_update_bits(nocp->regmap, NOCP_MAIN_CTL,
+			NOCP_MAIN_CTL_STATEN_MASK | NOCP_MAIN_CTL_ALARMEN_MASK,
+			NOCP_MAIN_CTL_STATEN_MASK | NOCP_MAIN_CTL_ALARMEN_MASK);
+	if (ret < 0)
+		goto out;
+
+	/* Set GlobalEN */
+	ret = regmap_update_bits(nocp->regmap, NOCP_CFG_CTL,
+				NOCP_CFG_CTL_GLOBALEN_MASK,
+				NOCP_CFG_CTL_GLOBALEN_MASK);
+	if (ret < 0)
+		goto out;
+
+	/* Enable NoC probe */
+	ret = regmap_update_bits(nocp->regmap, NOCP_MAIN_CTL,
+				NOCP_MAIN_CTL_STATEN_MASK,
+				NOCP_MAIN_CTL_STATEN_MASK);
+	if (ret < 0)
+		goto out;
+
+	return 0;
+
+out:
+	/* Reset NoC probe */
+	if (regmap_update_bits(nocp->regmap, NOCP_MAIN_CTL,
+				NOCP_MAIN_CTL_STATEN_MASK, 0)) {
+		dev_err(nocp->dev, "Failed to reset NoC probe device\n");
+	}
+
+	return ret;
+}
+
+static int exynos_nocp_get_event(struct devfreq_event_dev *edev,
+				struct devfreq_event_data *edata)
+{
+	struct exynos_nocp *nocp = devfreq_event_get_drvdata(edev);
+	unsigned int counter[4];
+	int ret;
+
+	/* Read cycle count */
+	ret = regmap_read(nocp->regmap, NOCP_COUNTERS_0_VAL, &counter[0]);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_read(nocp->regmap, NOCP_COUNTERS_1_VAL, &counter[1]);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_read(nocp->regmap, NOCP_COUNTERS_2_VAL, &counter[2]);
+	if (ret < 0)
+		goto out;
+
+	ret = regmap_read(nocp->regmap, NOCP_COUNTERS_3_VAL, &counter[3]);
+	if (ret < 0)
+		goto out;
+
+	edata->load_count = ((counter[1] << 16) | counter[0]);
+	edata->total_count = ((counter[3] << 16) | counter[2]);
+
+	dev_dbg(&edev->dev, "%s (event: %ld/%ld)\n", edev->desc->name,
+					edata->load_count, edata->total_count);
+
+	return 0;
+
+out:
+	edata->load_count = 0;
+	edata->total_count = 0;
+
+	dev_err(nocp->dev, "Failed to read the counter of NoC probe device\n");
+
+	return ret;
+}
+
+static const struct devfreq_event_ops exynos_nocp_ops = {
+	.set_event = exynos_nocp_set_event,
+	.get_event = exynos_nocp_get_event,
+};
+
+static const struct of_device_id exynos_nocp_id_match[] = {
+	{ .compatible = "samsung,exynos5420-nocp", },
+	{ /* sentinel */ },
+};
+
+static struct regmap_config exynos_nocp_regmap_config = {
+	.reg_bits = 32,
+	.val_bits = 32,
+	.reg_stride = 4,
+	.max_register = NOCP_COUNTERS_3_VAL,
+};
+
+static int exynos_nocp_parse_dt(struct platform_device *pdev,
+				struct exynos_nocp *nocp)
+{
+	struct device *dev = nocp->dev;
+	struct device_node *np = dev->of_node;
+	struct resource *res;
+	void __iomem *base;
+
+	if (!np) {
+		dev_err(dev, "failed to find devicetree node\n");
+		return -EINVAL;
+	}
+
+	nocp->clk = devm_clk_get(dev, "nocp");
+	if (IS_ERR(nocp->clk))
+		nocp->clk = NULL;
+
+	/* Maps the memory mapped IO to control nocp register */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (IS_ERR(res))
+		return PTR_ERR(res);
+
+	base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	exynos_nocp_regmap_config.max_register = resource_size(res) - 4;
+
+	nocp->regmap = devm_regmap_init_mmio(dev, base,
+					&exynos_nocp_regmap_config);
+	if (IS_ERR(nocp->regmap)) {
+		dev_err(dev, "failed to initialize regmap\n");
+		return PTR_ERR(nocp->regmap);
+	}
+
+	return 0;
+}
+
+static int exynos_nocp_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct exynos_nocp *nocp;
+	int ret;
+
+	nocp = devm_kzalloc(&pdev->dev, sizeof(*nocp), GFP_KERNEL);
+	if (!nocp)
+		return -ENOMEM;
+
+	nocp->dev = &pdev->dev;
+
+	/* Parse dt data to get resource */
+	ret = exynos_nocp_parse_dt(pdev, nocp);
+	if (ret < 0) {
+		dev_err(&pdev->dev,
+			"failed to parse devicetree for resource\n");
+		return ret;
+	}
+
+	/* Add devfreq-event device to measure the bandwidth of NoC */
+	nocp->desc.ops = &exynos_nocp_ops;
+	nocp->desc.driver_data = nocp;
+	nocp->desc.name = np->full_name;
+	nocp->edev = devm_devfreq_event_add_edev(&pdev->dev, &nocp->desc);
+	if (IS_ERR(nocp->edev)) {
+		dev_err(&pdev->dev,
+			"failed to add devfreq-event device\n");
+		return PTR_ERR(nocp->edev);
+	}
+	platform_set_drvdata(pdev, nocp);
+
+	clk_prepare_enable(nocp->clk);
+
+	pr_info("exynos-nocp: new NoC Probe device registered: %s\n",
+			dev_name(dev));
+
+	return 0;
+}
+
+static int exynos_nocp_remove(struct platform_device *pdev)
+{
+	struct exynos_nocp *nocp = platform_get_drvdata(pdev);
+
+	clk_disable_unprepare(nocp->clk);
+
+	return 0;
+}
+
+static struct platform_driver exynos_nocp_driver = {
+	.probe	= exynos_nocp_probe,
+	.remove	= exynos_nocp_remove,
+	.driver = {
+		.name	= "exynos-nocp",
+		.of_match_table = exynos_nocp_id_match,
+	},
+};
+module_platform_driver(exynos_nocp_driver);
+
+MODULE_DESCRIPTION("Exynos NoC (Network on Chip) Probe driver");
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/event/exynos-nocp.h b/drivers/devfreq/event/exynos-nocp.h
new file mode 100644
index 0000000..28564db
--- /dev/null
+++ b/drivers/devfreq/event/exynos-nocp.h
@@ -0,0 +1,78 @@
+/*
+ * exynos-nocp.h - EXYNOS NoC (Network on Chip) Probe header file
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ * Author : Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __EXYNOS_NOCP_H__
+#define __EXYNOS_NOCP_H__
+
+enum nocp_reg {
+	NOCP_ID_REVISION_ID		= 0x04,
+	NOCP_MAIN_CTL			= 0x08,
+	NOCP_CFG_CTL			= 0x0C,
+
+	NOCP_STAT_PERIOD		= 0x24,
+	NOCP_STAT_GO			= 0x28,
+	NOCP_STAT_ALARM_MIN		= 0x2C,
+	NOCP_STAT_ALARM_MAX		= 0x30,
+	NOCP_STAT_ALARM_STATUS		= 0x34,
+	NOCP_STAT_ALARM_CLR		= 0x38,
+
+	NOCP_COUNTERS_0_SRC		= 0x138,
+	NOCP_COUNTERS_0_ALARM_MODE	= 0x13C,
+	NOCP_COUNTERS_0_VAL		= 0x140,
+
+	NOCP_COUNTERS_1_SRC		= 0x14C,
+	NOCP_COUNTERS_1_ALARM_MODE	= 0x150,
+	NOCP_COUNTERS_1_VAL		= 0x154,
+
+	NOCP_COUNTERS_2_SRC		= 0x160,
+	NOCP_COUNTERS_2_ALARM_MODE	= 0x164,
+	NOCP_COUNTERS_2_VAL		= 0x168,
+
+	NOCP_COUNTERS_3_SRC		= 0x174,
+	NOCP_COUNTERS_3_ALARM_MODE	= 0x178,
+	NOCP_COUNTERS_3_VAL		= 0x17C,
+};
+
+/* NOCP_MAIN_CTL register */
+#define NOCP_MAIN_CTL_ERREN_MASK		BIT(0)
+#define NOCP_MAIN_CTL_TRACEEN_MASK		BIT(1)
+#define NOCP_MAIN_CTL_PAYLOADEN_MASK		BIT(2)
+#define NOCP_MAIN_CTL_STATEN_MASK		BIT(3)
+#define NOCP_MAIN_CTL_ALARMEN_MASK		BIT(4)
+#define NOCP_MAIN_CTL_STATCONDDUMP_MASK	BIT(5)
+#define NOCP_MAIN_CTL_INTRUSIVEMODE_MASK	BIT(6)
+
+/* NOCP_CFG_CTL register */
+#define NOCP_CFG_CTL_GLOBALEN_MASK		BIT(0)
+#define NOCP_CFG_CTL_ACTIVE_MASK		BIT(1)
+
+/* NOCP_COUNTERS_x_SRC register */
+#define NOCP_CNT_SRC_INTEVENT_SHIFT		0
+#define NOCP_CNT_SRC_INTEVENT_MASK		(0x1F << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_OFF_MASK		(0x0 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_CYCLE_MASK	(0x1 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_IDLE_MASK		(0x2 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_XFER_MASK		(0x3 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_BUSY_MASK		(0x4 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_WAIT_MASK		(0x5 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_PKT_MASK		(0x6 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_BYTE_MASK		(0x8 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_CHAIN_MASK	(0x10 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+
+/* NOCP_COUNTERS_x_ALARM_MODE register */
+#define NOCP_CNT_ALARM_MODE_SHIFT		0
+#define NOCP_CNT_ALARM_MODE_MASK		(0x3 << NOCP_CNT_ALARM_MODE_SHIFT)
+#define NOCP_CNT_ALARM_MODE_OFF_MASK		(0x0 << NOCP_CNT_ALARM_MODE_SHIFT)
+#define NOCP_CNT_ALARM_MODE_MIN_MASK		(0x1 << NOCP_CNT_ALARM_MODE_SHIFT)
+#define NOCP_CNT_ALARM_MODE_MAX_MASK		(0x2 << NOCP_CNT_ALARM_MODE_SHIFT)
+#define NOCP_CNT_ALARM_MODE_MIN_MAX_MASK	(0x3 << NOCP_CNT_ALARM_MODE_SHIFT)
+
+#endif /* __EXYNOS_NOCP_H__ */
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
new file mode 100644
index 0000000..2363d0a
--- /dev/null
+++ b/drivers/devfreq/exynos-bus.c
@@ -0,0 +1,570 @@
+/*
+ * Generic Exynos Bus frequency driver with DEVFREQ Framework
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ * Author : Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This driver support Exynos Bus frequency feature by using
+ * DEVFREQ framework and is based on drivers/devfreq/exynos/exynos4_bus.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include <linux/devfreq-event.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_opp.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#define DEFAULT_SATURATION_RATIO	40
+#define DEFAULT_VOLTAGE_TOLERANCE	2
+
+struct exynos_bus {
+	struct device *dev;
+
+	struct devfreq *devfreq;
+	struct devfreq_event_dev **edev;
+	unsigned int edev_count;
+	struct mutex lock;
+
+	struct dev_pm_opp *curr_opp;
+
+	struct regulator *regulator;
+	struct clk *clk;
+	unsigned int voltage_tolerance;
+	unsigned int ratio;
+};
+
+/*
+ * Control the devfreq-event device to get the current state of bus
+ */
+#define exynos_bus_ops_edev(ops)				\
+static int exynos_bus_##ops(struct exynos_bus *bus)		\
+{								\
+	int i, ret;						\
+								\
+	for (i = 0; i < bus->edev_count; i++) {			\
+		if (!bus->edev[i])				\
+			continue;				\
+		ret = devfreq_event_##ops(bus->edev[i]);	\
+		if (ret < 0)					\
+			return ret;				\
+	}							\
+								\
+	return 0;						\
+}
+exynos_bus_ops_edev(enable_edev);
+exynos_bus_ops_edev(disable_edev);
+exynos_bus_ops_edev(set_event);
+
+static int exynos_bus_get_event(struct exynos_bus *bus,
+				struct devfreq_event_data *edata)
+{
+	struct devfreq_event_data event_data;
+	unsigned long load_count = 0, total_count = 0;
+	int i, ret = 0;
+
+	for (i = 0; i < bus->edev_count; i++) {
+		if (!bus->edev[i])
+			continue;
+
+		ret = devfreq_event_get_event(bus->edev[i], &event_data);
+		if (ret < 0)
+			return ret;
+
+		if (i == 0 || event_data.load_count > load_count) {
+			load_count = event_data.load_count;
+			total_count = event_data.total_count;
+		}
+	}
+
+	edata->load_count = load_count;
+	edata->total_count = total_count;
+
+	return ret;
+}
+
+/*
+ * Must necessary function for devfreq simple-ondemand governor
+ */
+static int exynos_bus_target(struct device *dev, unsigned long *freq, u32 flags)
+{
+	struct exynos_bus *bus = dev_get_drvdata(dev);
+	struct dev_pm_opp *new_opp;
+	unsigned long old_freq, new_freq, old_volt, new_volt, tol;
+	int ret = 0;
+
+	/* Get new opp-bus instance according to new bus clock */
+	rcu_read_lock();
+	new_opp = devfreq_recommended_opp(dev, freq, flags);
+	if (IS_ERR(new_opp)) {
+		dev_err(dev, "failed to get recommended opp instance\n");
+		rcu_read_unlock();
+		return PTR_ERR(new_opp);
+	}
+
+	new_freq = dev_pm_opp_get_freq(new_opp);
+	new_volt = dev_pm_opp_get_voltage(new_opp);
+	old_freq = dev_pm_opp_get_freq(bus->curr_opp);
+	old_volt = dev_pm_opp_get_voltage(bus->curr_opp);
+	rcu_read_unlock();
+
+	if (old_freq == new_freq)
+		return 0;
+	tol = new_volt * bus->voltage_tolerance / 100;
+
+	/* Change voltage and frequency according to new OPP level */
+	mutex_lock(&bus->lock);
+
+	if (old_freq < new_freq) {
+		ret = regulator_set_voltage_tol(bus->regulator, new_volt, tol);
+		if (ret < 0) {
+			dev_err(bus->dev, "failed to set voltage\n");
+			goto out;
+		}
+	}
+
+	ret = clk_set_rate(bus->clk, new_freq);
+	if (ret < 0) {
+		dev_err(dev, "failed to change clock of bus\n");
+		clk_set_rate(bus->clk, old_freq);
+		goto out;
+	}
+
+	if (old_freq > new_freq) {
+		ret = regulator_set_voltage_tol(bus->regulator, new_volt, tol);
+		if (ret < 0) {
+			dev_err(bus->dev, "failed to set voltage\n");
+			goto out;
+		}
+	}
+	bus->curr_opp = new_opp;
+
+	dev_dbg(dev, "Set the frequency of bus (%lukHz -> %lukHz)\n",
+			old_freq/1000, new_freq/1000);
+out:
+	mutex_unlock(&bus->lock);
+
+	return ret;
+}
+
+static int exynos_bus_get_dev_status(struct device *dev,
+				     struct devfreq_dev_status *stat)
+{
+	struct exynos_bus *bus = dev_get_drvdata(dev);
+	struct devfreq_event_data edata;
+	int ret;
+
+	rcu_read_lock();
+	stat->current_frequency = dev_pm_opp_get_freq(bus->curr_opp);
+	rcu_read_unlock();
+
+	ret = exynos_bus_get_event(bus, &edata);
+	if (ret < 0) {
+		stat->total_time = stat->busy_time = 0;
+		goto err;
+	}
+
+	stat->busy_time = (edata.load_count * 100) / bus->ratio;
+	stat->total_time = edata.total_count;
+
+	dev_dbg(dev, "Usage of devfreq-event : %lu/%lu\n", stat->busy_time,
+							stat->total_time);
+
+err:
+	ret = exynos_bus_set_event(bus);
+	if (ret < 0) {
+		dev_err(dev, "failed to set event to devfreq-event devices\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+static void exynos_bus_exit(struct device *dev)
+{
+	struct exynos_bus *bus = dev_get_drvdata(dev);
+	int ret;
+
+	ret = exynos_bus_disable_edev(bus);
+	if (ret < 0)
+		dev_warn(dev, "failed to disable the devfreq-event devices\n");
+
+	if (bus->regulator)
+		regulator_disable(bus->regulator);
+
+	dev_pm_opp_of_remove_table(dev);
+	clk_disable_unprepare(bus->clk);
+}
+
+/*
+ * Must necessary function for devfreq passive governor
+ */
+static int exynos_bus_passive_target(struct device *dev, unsigned long *freq,
+					u32 flags)
+{
+	struct exynos_bus *bus = dev_get_drvdata(dev);
+	struct dev_pm_opp *new_opp;
+	unsigned long old_freq, new_freq;
+	int ret = 0;
+
+	/* Get new opp-bus instance according to new bus clock */
+	rcu_read_lock();
+	new_opp = devfreq_recommended_opp(dev, freq, flags);
+	if (IS_ERR(new_opp)) {
+		dev_err(dev, "failed to get recommended opp instance\n");
+		rcu_read_unlock();
+		return PTR_ERR(new_opp);
+	}
+
+	new_freq = dev_pm_opp_get_freq(new_opp);
+	old_freq = dev_pm_opp_get_freq(bus->curr_opp);
+	rcu_read_unlock();
+
+	if (old_freq == new_freq)
+		return 0;
+
+	/* Change the frequency according to new OPP level */
+	mutex_lock(&bus->lock);
+
+	ret = clk_set_rate(bus->clk, new_freq);
+	if (ret < 0) {
+		dev_err(dev, "failed to set the clock of bus\n");
+		goto out;
+	}
+
+	*freq = new_freq;
+	bus->curr_opp = new_opp;
+
+	dev_dbg(dev, "Set the frequency of bus (%lukHz -> %lukHz)\n",
+			old_freq/1000, new_freq/1000);
+out:
+	mutex_unlock(&bus->lock);
+
+	return ret;
+}
+
+static void exynos_bus_passive_exit(struct device *dev)
+{
+	struct exynos_bus *bus = dev_get_drvdata(dev);
+
+	dev_pm_opp_of_remove_table(dev);
+	clk_disable_unprepare(bus->clk);
+}
+
+static int exynos_bus_parent_parse_of(struct device_node *np,
+					struct exynos_bus *bus)
+{
+	struct device *dev = bus->dev;
+	int i, ret, count, size;
+
+	/* Get the regulator to provide each bus with the power */
+	bus->regulator = devm_regulator_get(dev, "vdd");
+	if (IS_ERR(bus->regulator)) {
+		dev_err(dev, "failed to get VDD regulator\n");
+		return PTR_ERR(bus->regulator);
+	}
+
+	ret = regulator_enable(bus->regulator);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable VDD regulator\n");
+		return ret;
+	}
+
+	/*
+	 * Get the devfreq-event devices to get the current utilization of
+	 * buses. This raw data will be used in devfreq ondemand governor.
+	 */
+	count = devfreq_event_get_edev_count(dev);
+	if (count < 0) {
+		dev_err(dev, "failed to get the count of devfreq-event dev\n");
+		ret = count;
+		goto err_regulator;
+	}
+	bus->edev_count = count;
+
+	size = sizeof(*bus->edev) * count;
+	bus->edev = devm_kzalloc(dev, size, GFP_KERNEL);
+	if (!bus->edev) {
+		ret = -ENOMEM;
+		goto err_regulator;
+	}
+
+	for (i = 0; i < count; i++) {
+		bus->edev[i] = devfreq_event_get_edev_by_phandle(dev, i);
+		if (IS_ERR(bus->edev[i])) {
+			ret = -EPROBE_DEFER;
+			goto err_regulator;
+		}
+	}
+
+	/*
+	 * Optionally, Get the saturation ratio according to Exynos SoC
+	 * When measuring the utilization of each AXI bus with devfreq-event
+	 * devices, the measured real cycle might be much lower than the
+	 * total cycle of bus during sampling rate. In result, the devfreq
+	 * simple-ondemand governor might not decide to change the current
+	 * frequency due to too utilization (= real cycle/total cycle).
+	 * So, this property is used to adjust the utilization when calculating
+	 * the busy_time in exynos_bus_get_dev_status().
+	 */
+	if (of_property_read_u32(np, "exynos,saturation-ratio", &bus->ratio))
+		bus->ratio = DEFAULT_SATURATION_RATIO;
+
+	if (of_property_read_u32(np, "exynos,voltage-tolerance",
+					&bus->voltage_tolerance))
+		bus->voltage_tolerance = DEFAULT_VOLTAGE_TOLERANCE;
+
+	return 0;
+
+err_regulator:
+	regulator_disable(bus->regulator);
+
+	return ret;
+}
+
+static int exynos_bus_parse_of(struct device_node *np,
+			      struct exynos_bus *bus)
+{
+	struct device *dev = bus->dev;
+	unsigned long rate;
+	int ret;
+
+	/* Get the clock to provide each bus with source clock */
+	bus->clk = devm_clk_get(dev, "bus");
+	if (IS_ERR(bus->clk)) {
+		dev_err(dev, "failed to get bus clock\n");
+		return PTR_ERR(bus->clk);
+	}
+
+	ret = clk_prepare_enable(bus->clk);
+	if (ret < 0) {
+		dev_err(dev, "failed to get enable clock\n");
+		return ret;
+	}
+
+	/* Get the freq and voltage from OPP table to scale the bus freq */
+	rcu_read_lock();
+	ret = dev_pm_opp_of_add_table(dev);
+	if (ret < 0) {
+		dev_err(dev, "failed to get OPP table\n");
+		rcu_read_unlock();
+		goto err_clk;
+	}
+
+	rate = clk_get_rate(bus->clk);
+	bus->curr_opp = devfreq_recommended_opp(dev, &rate, 0);
+	if (IS_ERR(bus->curr_opp)) {
+		dev_err(dev, "failed to find dev_pm_opp\n");
+		rcu_read_unlock();
+		ret = PTR_ERR(bus->curr_opp);
+		goto err_opp;
+	}
+	rcu_read_unlock();
+
+	return 0;
+
+err_opp:
+	dev_pm_opp_of_remove_table(dev);
+err_clk:
+	clk_disable_unprepare(bus->clk);
+
+	return ret;
+}
+
+static int exynos_bus_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct devfreq_dev_profile *profile;
+	struct devfreq_simple_ondemand_data *ondemand_data;
+	struct devfreq_passive_data *passive_data;
+	struct devfreq *parent_devfreq;
+	struct exynos_bus *bus;
+	int ret, max_state;
+	unsigned long min_freq, max_freq;
+
+	if (!np) {
+		dev_err(dev, "failed to find devicetree node\n");
+		return -EINVAL;
+	}
+
+	bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
+	if (!bus)
+		return -ENOMEM;
+	mutex_init(&bus->lock);
+	bus->dev = &pdev->dev;
+	platform_set_drvdata(pdev, bus);
+
+	/* Parse the device-tree to get the resource information */
+	ret = exynos_bus_parse_of(np, bus);
+	if (ret < 0)
+		goto err;
+
+	profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
+	if (!profile) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	if (of_parse_phandle(dev->of_node, "devfreq", 0))
+		goto passive;
+	else
+		ret = exynos_bus_parent_parse_of(np, bus);
+
+	if (ret < 0)
+		goto err;
+
+	/* Initialize the struct profile and governor data for parent device */
+	profile->polling_ms = 50;
+	profile->target = exynos_bus_target;
+	profile->get_dev_status = exynos_bus_get_dev_status;
+	profile->exit = exynos_bus_exit;
+
+	ondemand_data = devm_kzalloc(dev, sizeof(*ondemand_data), GFP_KERNEL);
+	if (!ondemand_data) {
+		ret = -ENOMEM;
+		goto err;
+	}
+	ondemand_data->upthreshold = 40;
+	ondemand_data->downdifferential = 5;
+
+	/* Add devfreq device to monitor and handle the exynos bus */
+	bus->devfreq = devm_devfreq_add_device(dev, profile, "simple_ondemand",
+						ondemand_data);
+	if (IS_ERR(bus->devfreq)) {
+		dev_err(dev, "failed to add devfreq device\n");
+		ret = PTR_ERR(bus->devfreq);
+		goto err;
+	}
+
+	/* Register opp_notifier to catch the change of OPP  */
+	ret = devm_devfreq_register_opp_notifier(dev, bus->devfreq);
+	if (ret < 0) {
+		dev_err(dev, "failed to register opp notifier\n");
+		goto err;
+	}
+
+	/*
+	 * Enable devfreq-event to get raw data which is used to determine
+	 * current bus load.
+	 */
+	ret = exynos_bus_enable_edev(bus);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable devfreq-event devices\n");
+		goto err;
+	}
+
+	ret = exynos_bus_set_event(bus);
+	if (ret < 0) {
+		dev_err(dev, "failed to set event to devfreq-event devices\n");
+		goto err;
+	}
+
+	goto out;
+passive:
+	/* Initialize the struct profile and governor data for passive device */
+	profile->target = exynos_bus_passive_target;
+	profile->exit = exynos_bus_passive_exit;
+
+	/* Get the instance of parent devfreq device */
+	parent_devfreq = devfreq_get_devfreq_by_phandle(dev, 0);
+	if (IS_ERR(parent_devfreq)) {
+		ret = -EPROBE_DEFER;
+		goto err;
+	}
+
+	passive_data = devm_kzalloc(dev, sizeof(*passive_data), GFP_KERNEL);
+	if (!passive_data) {
+		ret = -ENOMEM;
+		goto err;
+	}
+	passive_data->parent = parent_devfreq;
+
+	/* Add devfreq device for exynos bus with passive governor */
+	bus->devfreq = devm_devfreq_add_device(dev, profile, "passive",
+						passive_data);
+	if (IS_ERR(bus->devfreq)) {
+		dev_err(dev,
+			"failed to add devfreq dev with passive governor\n");
+		ret = -EPROBE_DEFER;
+		goto err;
+	}
+
+out:
+	max_state = bus->devfreq->profile->max_state;
+	min_freq = (bus->devfreq->profile->freq_table[0] / 1000);
+	max_freq = (bus->devfreq->profile->freq_table[max_state - 1] / 1000);
+	pr_info("exynos-bus: new bus device registered: %s (%6ld KHz ~ %6ld KHz)\n",
+			dev_name(dev), min_freq, max_freq);
+
+	return 0;
+
+err:
+	dev_pm_opp_of_remove_table(dev);
+	clk_disable_unprepare(bus->clk);
+
+	return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int exynos_bus_resume(struct device *dev)
+{
+	struct exynos_bus *bus = dev_get_drvdata(dev);
+	int ret;
+
+	ret = exynos_bus_enable_edev(bus);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable the devfreq-event devices\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int exynos_bus_suspend(struct device *dev)
+{
+	struct exynos_bus *bus = dev_get_drvdata(dev);
+	int ret;
+
+	ret = exynos_bus_disable_edev(bus);
+	if (ret < 0) {
+		dev_err(dev, "failed to disable the devfreq-event devices\n");
+		return ret;
+	}
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops exynos_bus_pm = {
+	SET_SYSTEM_SLEEP_PM_OPS(exynos_bus_suspend, exynos_bus_resume)
+};
+
+static const struct of_device_id exynos_bus_of_match[] = {
+	{ .compatible = "samsung,exynos-bus", },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, exynos_bus_of_match);
+
+static struct platform_driver exynos_bus_platdrv = {
+	.probe		= exynos_bus_probe,
+	.driver = {
+		.name	= "exynos-bus",
+		.pm	= &exynos_bus_pm,
+		.of_match_table = of_match_ptr(exynos_bus_of_match),
+	},
+};
+module_platform_driver(exynos_bus_platdrv);
+
+MODULE_DESCRIPTION("Generic Exynos Bus frequency driver");
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/exynos/Makefile b/drivers/devfreq/exynos/Makefile
deleted file mode 100644
index 49bc917..0000000
--- a/drivers/devfreq/exynos/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# Exynos DEVFREQ Drivers
-obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ)	+= exynos_ppmu.o exynos4_bus.o
-obj-$(CONFIG_ARM_EXYNOS5_BUS_DEVFREQ)	+= exynos_ppmu.o exynos5_bus.o
diff --git a/drivers/devfreq/exynos/exynos4_bus.c b/drivers/devfreq/exynos/exynos4_bus.c
deleted file mode 100644
index da95092..0000000
--- a/drivers/devfreq/exynos/exynos4_bus.c
+++ /dev/null
@@ -1,1055 +0,0 @@
-/* drivers/devfreq/exynos4210_memorybus.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com/
- *	MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * EXYNOS4 - Memory/Bus clock frequency scaling support in DEVFREQ framework
- *	This version supports EXYNOS4210 only. This changes bus frequencies
- *	and vddint voltages. Exynos4412/4212 should be able to be supported
- *	with minor modifications.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/suspend.h>
-#include <linux/pm_opp.h>
-#include <linux/devfreq.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
-#include <linux/module.h>
-
-#include <mach/map.h>
-
-#include "exynos_ppmu.h"
-#include "exynos4_bus.h"
-
-#define MAX_SAFEVOLT	1200000 /* 1.2V */
-
-enum exynos4_busf_type {
-	TYPE_BUSF_EXYNOS4210,
-	TYPE_BUSF_EXYNOS4x12,
-};
-
-/* Assume that the bus is saturated if the utilization is 40% */
-#define BUS_SATURATION_RATIO	40
-
-enum busclk_level_idx {
-	LV_0 = 0,
-	LV_1,
-	LV_2,
-	LV_3,
-	LV_4,
-	_LV_END
-};
-
-enum exynos_ppmu_idx {
-	PPMU_DMC0,
-	PPMU_DMC1,
-	PPMU_END,
-};
-
-#define EX4210_LV_MAX	LV_2
-#define EX4x12_LV_MAX	LV_4
-#define EX4210_LV_NUM	(LV_2 + 1)
-#define EX4x12_LV_NUM	(LV_4 + 1)
-
-/**
- * struct busfreq_opp_info - opp information for bus
- * @rate:	Frequency in hertz
- * @volt:	Voltage in microvolts corresponding to this OPP
- */
-struct busfreq_opp_info {
-	unsigned long rate;
-	unsigned long volt;
-};
-
-struct busfreq_data {
-	enum exynos4_busf_type type;
-	struct device *dev;
-	struct devfreq *devfreq;
-	bool disabled;
-	struct regulator *vdd_int;
-	struct regulator *vdd_mif; /* Exynos4412/4212 only */
-	struct busfreq_opp_info curr_oppinfo;
-	struct busfreq_ppmu_data ppmu_data;
-
-	struct notifier_block pm_notifier;
-	struct mutex lock;
-
-	/* Dividers calculated at boot/probe-time */
-	unsigned int dmc_divtable[_LV_END]; /* DMC0 */
-	unsigned int top_divtable[_LV_END];
-};
-
-/* 4210 controls clock of mif and voltage of int */
-static struct bus_opp_table exynos4210_busclk_table[] = {
-	{LV_0, 400000, 1150000},
-	{LV_1, 267000, 1050000},
-	{LV_2, 133000, 1025000},
-	{0, 0, 0},
-};
-
-/*
- * MIF is the main control knob clock for Exynos4x12 MIF/INT
- * clock and voltage of both mif/int are controlled.
- */
-static struct bus_opp_table exynos4x12_mifclk_table[] = {
-	{LV_0, 400000, 1100000},
-	{LV_1, 267000, 1000000},
-	{LV_2, 160000, 950000},
-	{LV_3, 133000, 950000},
-	{LV_4, 100000, 950000},
-	{0, 0, 0},
-};
-
-/*
- * INT is not the control knob of 4x12. LV_x is not meant to represent
- * the current performance. (MIF does)
- */
-static struct bus_opp_table exynos4x12_intclk_table[] = {
-	{LV_0, 200000, 1000000},
-	{LV_1, 160000, 950000},
-	{LV_2, 133000, 925000},
-	{LV_3, 100000, 900000},
-	{0, 0, 0},
-};
-
-/* TODO: asv volt definitions are "__initdata"? */
-/* Some chips have different operating voltages */
-static unsigned int exynos4210_asv_volt[][EX4210_LV_NUM] = {
-	{1150000, 1050000, 1050000},
-	{1125000, 1025000, 1025000},
-	{1100000, 1000000, 1000000},
-	{1075000, 975000, 975000},
-	{1050000, 950000, 950000},
-};
-
-static unsigned int exynos4x12_mif_step_50[][EX4x12_LV_NUM] = {
-	/* 400      267     160     133     100 */
-	{1050000, 950000, 900000, 900000, 900000}, /* ASV0 */
-	{1050000, 950000, 900000, 900000, 900000}, /* ASV1 */
-	{1050000, 950000, 900000, 900000, 900000}, /* ASV2 */
-	{1050000, 900000, 900000, 900000, 900000}, /* ASV3 */
-	{1050000, 900000, 900000, 900000, 850000}, /* ASV4 */
-	{1050000, 900000, 900000, 850000, 850000}, /* ASV5 */
-	{1050000, 900000, 850000, 850000, 850000}, /* ASV6 */
-	{1050000, 900000, 850000, 850000, 850000}, /* ASV7 */
-	{1050000, 900000, 850000, 850000, 850000}, /* ASV8 */
-};
-
-static unsigned int exynos4x12_int_volt[][EX4x12_LV_NUM] = {
-	/* 200    160      133     100 */
-	{1000000, 950000, 925000, 900000}, /* ASV0 */
-	{975000,  925000, 925000, 900000}, /* ASV1 */
-	{950000,  925000, 900000, 875000}, /* ASV2 */
-	{950000,  900000, 900000, 875000}, /* ASV3 */
-	{925000,  875000, 875000, 875000}, /* ASV4 */
-	{900000,  850000, 850000, 850000}, /* ASV5 */
-	{900000,  850000, 850000, 850000}, /* ASV6 */
-	{900000,  850000, 850000, 850000}, /* ASV7 */
-	{900000,  850000, 850000, 850000}, /* ASV8 */
-};
-
-/*** Clock Divider Data for Exynos4210 ***/
-static unsigned int exynos4210_clkdiv_dmc0[][8] = {
-	/*
-	 * Clock divider value for following
-	 * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
-	 *		DIVDMCP, DIVCOPY2, DIVCORE_TIMERS }
-	 */
-
-	/* DMC L0: 400MHz */
-	{ 3, 1, 1, 1, 1, 1, 3, 1 },
-	/* DMC L1: 266.7MHz */
-	{ 4, 1, 1, 2, 1, 1, 3, 1 },
-	/* DMC L2: 133MHz */
-	{ 5, 1, 1, 5, 1, 1, 3, 1 },
-};
-static unsigned int exynos4210_clkdiv_top[][5] = {
-	/*
-	 * Clock divider value for following
-	 * { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND }
-	 */
-	/* ACLK200 L0: 200MHz */
-	{ 3, 7, 4, 5, 1 },
-	/* ACLK200 L1: 160MHz */
-	{ 4, 7, 5, 6, 1 },
-	/* ACLK200 L2: 133MHz */
-	{ 5, 7, 7, 7, 1 },
-};
-static unsigned int exynos4210_clkdiv_lr_bus[][2] = {
-	/*
-	 * Clock divider value for following
-	 * { DIVGDL/R, DIVGPL/R }
-	 */
-	/* ACLK_GDL/R L1: 200MHz */
-	{ 3, 1 },
-	/* ACLK_GDL/R L2: 160MHz */
-	{ 4, 1 },
-	/* ACLK_GDL/R L3: 133MHz */
-	{ 5, 1 },
-};
-
-/*** Clock Divider Data for Exynos4212/4412 ***/
-static unsigned int exynos4x12_clkdiv_dmc0[][6] = {
-	/*
-	 * Clock divider value for following
-	 * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
-	 *              DIVDMCP}
-	 */
-
-	/* DMC L0: 400MHz */
-	{3, 1, 1, 1, 1, 1},
-	/* DMC L1: 266.7MHz */
-	{4, 1, 1, 2, 1, 1},
-	/* DMC L2: 160MHz */
-	{5, 1, 1, 4, 1, 1},
-	/* DMC L3: 133MHz */
-	{5, 1, 1, 5, 1, 1},
-	/* DMC L4: 100MHz */
-	{7, 1, 1, 7, 1, 1},
-};
-static unsigned int exynos4x12_clkdiv_dmc1[][6] = {
-	/*
-	 * Clock divider value for following
-	 * { G2DACP, DIVC2C, DIVC2C_ACLK }
-	 */
-
-	/* DMC L0: 400MHz */
-	{3, 1, 1},
-	/* DMC L1: 266.7MHz */
-	{4, 2, 1},
-	/* DMC L2: 160MHz */
-	{5, 4, 1},
-	/* DMC L3: 133MHz */
-	{5, 5, 1},
-	/* DMC L4: 100MHz */
-	{7, 7, 1},
-};
-static unsigned int exynos4x12_clkdiv_top[][5] = {
-	/*
-	 * Clock divider value for following
-	 * { DIVACLK266_GPS, DIVACLK100, DIVACLK160,
-		DIVACLK133, DIVONENAND }
-	 */
-
-	/* ACLK_GDL/R L0: 200MHz */
-	{2, 7, 4, 5, 1},
-	/* ACLK_GDL/R L1: 200MHz */
-	{2, 7, 4, 5, 1},
-	/* ACLK_GDL/R L2: 160MHz */
-	{4, 7, 5, 7, 1},
-	/* ACLK_GDL/R L3: 133MHz */
-	{4, 7, 5, 7, 1},
-	/* ACLK_GDL/R L4: 100MHz */
-	{7, 7, 7, 7, 1},
-};
-static unsigned int exynos4x12_clkdiv_lr_bus[][2] = {
-	/*
-	 * Clock divider value for following
-	 * { DIVGDL/R, DIVGPL/R }
-	 */
-
-	/* ACLK_GDL/R L0: 200MHz */
-	{3, 1},
-	/* ACLK_GDL/R L1: 200MHz */
-	{3, 1},
-	/* ACLK_GDL/R L2: 160MHz */
-	{4, 1},
-	/* ACLK_GDL/R L3: 133MHz */
-	{5, 1},
-	/* ACLK_GDL/R L4: 100MHz */
-	{7, 1},
-};
-static unsigned int exynos4x12_clkdiv_sclkip[][3] = {
-	/*
-	 * Clock divider value for following
-	 * { DIVMFC, DIVJPEG, DIVFIMC0~3}
-	 */
-
-	/* SCLK_MFC: 200MHz */
-	{3, 3, 4},
-	/* SCLK_MFC: 200MHz */
-	{3, 3, 4},
-	/* SCLK_MFC: 160MHz */
-	{4, 4, 5},
-	/* SCLK_MFC: 133MHz */
-	{5, 5, 5},
-	/* SCLK_MFC: 100MHz */
-	{7, 7, 7},
-};
-
-
-static int exynos4210_set_busclk(struct busfreq_data *data,
-				 struct busfreq_opp_info *oppi)
-{
-	unsigned int index;
-	unsigned int tmp;
-
-	for (index = LV_0; index < EX4210_LV_NUM; index++)
-		if (oppi->rate == exynos4210_busclk_table[index].clk)
-			break;
-
-	if (index == EX4210_LV_NUM)
-		return -EINVAL;
-
-	/* Change Divider - DMC0 */
-	tmp = data->dmc_divtable[index];
-
-	__raw_writel(tmp, EXYNOS4_CLKDIV_DMC0);
-
-	do {
-		tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_DMC0);
-	} while (tmp & 0x11111111);
-
-	/* Change Divider - TOP */
-	tmp = data->top_divtable[index];
-
-	__raw_writel(tmp, EXYNOS4_CLKDIV_TOP);
-
-	do {
-		tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_TOP);
-	} while (tmp & 0x11111);
-
-	/* Change Divider - LEFTBUS */
-	tmp = __raw_readl(EXYNOS4_CLKDIV_LEFTBUS);
-
-	tmp &= ~(EXYNOS4_CLKDIV_BUS_GDLR_MASK | EXYNOS4_CLKDIV_BUS_GPLR_MASK);
-
-	tmp |= ((exynos4210_clkdiv_lr_bus[index][0] <<
-				EXYNOS4_CLKDIV_BUS_GDLR_SHIFT) |
-		(exynos4210_clkdiv_lr_bus[index][1] <<
-				EXYNOS4_CLKDIV_BUS_GPLR_SHIFT));
-
-	__raw_writel(tmp, EXYNOS4_CLKDIV_LEFTBUS);
-
-	do {
-		tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_LEFTBUS);
-	} while (tmp & 0x11);
-
-	/* Change Divider - RIGHTBUS */
-	tmp = __raw_readl(EXYNOS4_CLKDIV_RIGHTBUS);
-
-	tmp &= ~(EXYNOS4_CLKDIV_BUS_GDLR_MASK | EXYNOS4_CLKDIV_BUS_GPLR_MASK);
-
-	tmp |= ((exynos4210_clkdiv_lr_bus[index][0] <<
-				EXYNOS4_CLKDIV_BUS_GDLR_SHIFT) |
-		(exynos4210_clkdiv_lr_bus[index][1] <<
-				EXYNOS4_CLKDIV_BUS_GPLR_SHIFT));
-
-	__raw_writel(tmp, EXYNOS4_CLKDIV_RIGHTBUS);
-
-	do {
-		tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_RIGHTBUS);
-	} while (tmp & 0x11);
-
-	return 0;
-}
-
-static int exynos4x12_set_busclk(struct busfreq_data *data,
-				 struct busfreq_opp_info *oppi)
-{
-	unsigned int index;
-	unsigned int tmp;
-
-	for (index = LV_0; index < EX4x12_LV_NUM; index++)
-		if (oppi->rate == exynos4x12_mifclk_table[index].clk)
-			break;
-
-	if (index == EX4x12_LV_NUM)
-		return -EINVAL;
-
-	/* Change Divider - DMC0 */
-	tmp = data->dmc_divtable[index];
-
-	__raw_writel(tmp, EXYNOS4_CLKDIV_DMC0);
-
-	do {
-		tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_DMC0);
-	} while (tmp & 0x11111111);
-
-	/* Change Divider - DMC1 */
-	tmp = __raw_readl(EXYNOS4_CLKDIV_DMC1);
-
-	tmp &= ~(EXYNOS4_CLKDIV_DMC1_G2D_ACP_MASK |
-		EXYNOS4_CLKDIV_DMC1_C2C_MASK |
-		EXYNOS4_CLKDIV_DMC1_C2CACLK_MASK);
-
-	tmp |= ((exynos4x12_clkdiv_dmc1[index][0] <<
-				EXYNOS4_CLKDIV_DMC1_G2D_ACP_SHIFT) |
-		(exynos4x12_clkdiv_dmc1[index][1] <<
-				EXYNOS4_CLKDIV_DMC1_C2C_SHIFT) |
-		(exynos4x12_clkdiv_dmc1[index][2] <<
-				EXYNOS4_CLKDIV_DMC1_C2CACLK_SHIFT));
-
-	__raw_writel(tmp, EXYNOS4_CLKDIV_DMC1);
-
-	do {
-		tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_DMC1);
-	} while (tmp & 0x111111);
-
-	/* Change Divider - TOP */
-	tmp = __raw_readl(EXYNOS4_CLKDIV_TOP);
-
-	tmp &= ~(EXYNOS4_CLKDIV_TOP_ACLK266_GPS_MASK |
-		EXYNOS4_CLKDIV_TOP_ACLK100_MASK |
-		EXYNOS4_CLKDIV_TOP_ACLK160_MASK |
-		EXYNOS4_CLKDIV_TOP_ACLK133_MASK |
-		EXYNOS4_CLKDIV_TOP_ONENAND_MASK);
-
-	tmp |= ((exynos4x12_clkdiv_top[index][0] <<
-				EXYNOS4_CLKDIV_TOP_ACLK266_GPS_SHIFT) |
-		(exynos4x12_clkdiv_top[index][1] <<
-				EXYNOS4_CLKDIV_TOP_ACLK100_SHIFT) |
-		(exynos4x12_clkdiv_top[index][2] <<
-				EXYNOS4_CLKDIV_TOP_ACLK160_SHIFT) |
-		(exynos4x12_clkdiv_top[index][3] <<
-				EXYNOS4_CLKDIV_TOP_ACLK133_SHIFT) |
-		(exynos4x12_clkdiv_top[index][4] <<
-				EXYNOS4_CLKDIV_TOP_ONENAND_SHIFT));
-
-	__raw_writel(tmp, EXYNOS4_CLKDIV_TOP);
-
-	do {
-		tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_TOP);
-	} while (tmp & 0x11111);
-
-	/* Change Divider - LEFTBUS */
-	tmp = __raw_readl(EXYNOS4_CLKDIV_LEFTBUS);
-
-	tmp &= ~(EXYNOS4_CLKDIV_BUS_GDLR_MASK | EXYNOS4_CLKDIV_BUS_GPLR_MASK);
-
-	tmp |= ((exynos4x12_clkdiv_lr_bus[index][0] <<
-				EXYNOS4_CLKDIV_BUS_GDLR_SHIFT) |
-		(exynos4x12_clkdiv_lr_bus[index][1] <<
-				EXYNOS4_CLKDIV_BUS_GPLR_SHIFT));
-
-	__raw_writel(tmp, EXYNOS4_CLKDIV_LEFTBUS);
-
-	do {
-		tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_LEFTBUS);
-	} while (tmp & 0x11);
-
-	/* Change Divider - RIGHTBUS */
-	tmp = __raw_readl(EXYNOS4_CLKDIV_RIGHTBUS);
-
-	tmp &= ~(EXYNOS4_CLKDIV_BUS_GDLR_MASK | EXYNOS4_CLKDIV_BUS_GPLR_MASK);
-
-	tmp |= ((exynos4x12_clkdiv_lr_bus[index][0] <<
-				EXYNOS4_CLKDIV_BUS_GDLR_SHIFT) |
-		(exynos4x12_clkdiv_lr_bus[index][1] <<
-				EXYNOS4_CLKDIV_BUS_GPLR_SHIFT));
-
-	__raw_writel(tmp, EXYNOS4_CLKDIV_RIGHTBUS);
-
-	do {
-		tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_RIGHTBUS);
-	} while (tmp & 0x11);
-
-	/* Change Divider - MFC */
-	tmp = __raw_readl(EXYNOS4_CLKDIV_MFC);
-
-	tmp &= ~(EXYNOS4_CLKDIV_MFC_MASK);
-
-	tmp |= ((exynos4x12_clkdiv_sclkip[index][0] <<
-				EXYNOS4_CLKDIV_MFC_SHIFT));
-
-	__raw_writel(tmp, EXYNOS4_CLKDIV_MFC);
-
-	do {
-		tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_MFC);
-	} while (tmp & 0x1);
-
-	/* Change Divider - JPEG */
-	tmp = __raw_readl(EXYNOS4_CLKDIV_CAM1);
-
-	tmp &= ~(EXYNOS4_CLKDIV_CAM1_JPEG_MASK);
-
-	tmp |= ((exynos4x12_clkdiv_sclkip[index][1] <<
-				EXYNOS4_CLKDIV_CAM1_JPEG_SHIFT));
-
-	__raw_writel(tmp, EXYNOS4_CLKDIV_CAM1);
-
-	do {
-		tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_CAM1);
-	} while (tmp & 0x1);
-
-	/* Change Divider - FIMC0~3 */
-	tmp = __raw_readl(EXYNOS4_CLKDIV_CAM);
-
-	tmp &= ~(EXYNOS4_CLKDIV_CAM_FIMC0_MASK | EXYNOS4_CLKDIV_CAM_FIMC1_MASK |
-		EXYNOS4_CLKDIV_CAM_FIMC2_MASK | EXYNOS4_CLKDIV_CAM_FIMC3_MASK);
-
-	tmp |= ((exynos4x12_clkdiv_sclkip[index][2] <<
-				EXYNOS4_CLKDIV_CAM_FIMC0_SHIFT) |
-		(exynos4x12_clkdiv_sclkip[index][2] <<
-				EXYNOS4_CLKDIV_CAM_FIMC1_SHIFT) |
-		(exynos4x12_clkdiv_sclkip[index][2] <<
-				EXYNOS4_CLKDIV_CAM_FIMC2_SHIFT) |
-		(exynos4x12_clkdiv_sclkip[index][2] <<
-				EXYNOS4_CLKDIV_CAM_FIMC3_SHIFT));
-
-	__raw_writel(tmp, EXYNOS4_CLKDIV_CAM);
-
-	do {
-		tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_CAM1);
-	} while (tmp & 0x1111);
-
-	return 0;
-}
-
-static int exynos4x12_get_intspec(unsigned long mifclk)
-{
-	int i = 0;
-
-	while (exynos4x12_intclk_table[i].clk) {
-		if (exynos4x12_intclk_table[i].clk <= mifclk)
-			return i;
-		i++;
-	}
-
-	return -EINVAL;
-}
-
-static int exynos4_bus_setvolt(struct busfreq_data *data,
-			       struct busfreq_opp_info *oppi,
-			       struct busfreq_opp_info *oldoppi)
-{
-	int err = 0, tmp;
-	unsigned long volt = oppi->volt;
-
-	switch (data->type) {
-	case TYPE_BUSF_EXYNOS4210:
-		/* OPP represents DMC clock + INT voltage */
-		err = regulator_set_voltage(data->vdd_int, volt,
-					    MAX_SAFEVOLT);
-		break;
-	case TYPE_BUSF_EXYNOS4x12:
-		/* OPP represents MIF clock + MIF voltage */
-		err = regulator_set_voltage(data->vdd_mif, volt,
-					    MAX_SAFEVOLT);
-		if (err)
-			break;
-
-		tmp = exynos4x12_get_intspec(oppi->rate);
-		if (tmp < 0) {
-			err = tmp;
-			regulator_set_voltage(data->vdd_mif,
-					      oldoppi->volt,
-					      MAX_SAFEVOLT);
-			break;
-		}
-		err = regulator_set_voltage(data->vdd_int,
-					    exynos4x12_intclk_table[tmp].volt,
-					    MAX_SAFEVOLT);
-		/*  Try to recover */
-		if (err)
-			regulator_set_voltage(data->vdd_mif,
-					      oldoppi->volt,
-					      MAX_SAFEVOLT);
-		break;
-	default:
-		err = -EINVAL;
-	}
-
-	return err;
-}
-
-static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
-			      u32 flags)
-{
-	int err = 0;
-	struct platform_device *pdev = container_of(dev, struct platform_device,
-						    dev);
-	struct busfreq_data *data = platform_get_drvdata(pdev);
-	struct dev_pm_opp *opp;
-	unsigned long freq;
-	unsigned long old_freq = data->curr_oppinfo.rate;
-	struct busfreq_opp_info	new_oppinfo;
-
-	rcu_read_lock();
-	opp = devfreq_recommended_opp(dev, _freq, flags);
-	if (IS_ERR(opp)) {
-		rcu_read_unlock();
-		return PTR_ERR(opp);
-	}
-	new_oppinfo.rate = dev_pm_opp_get_freq(opp);
-	new_oppinfo.volt = dev_pm_opp_get_voltage(opp);
-	rcu_read_unlock();
-	freq = new_oppinfo.rate;
-
-	if (old_freq == freq)
-		return 0;
-
-	dev_dbg(dev, "targeting %lukHz %luuV\n", freq, new_oppinfo.volt);
-
-	mutex_lock(&data->lock);
-
-	if (data->disabled)
-		goto out;
-
-	if (old_freq < freq)
-		err = exynos4_bus_setvolt(data, &new_oppinfo,
-					  &data->curr_oppinfo);
-	if (err)
-		goto out;
-
-	if (old_freq != freq) {
-		switch (data->type) {
-		case TYPE_BUSF_EXYNOS4210:
-			err = exynos4210_set_busclk(data, &new_oppinfo);
-			break;
-		case TYPE_BUSF_EXYNOS4x12:
-			err = exynos4x12_set_busclk(data, &new_oppinfo);
-			break;
-		default:
-			err = -EINVAL;
-		}
-	}
-	if (err)
-		goto out;
-
-	if (old_freq > freq)
-		err = exynos4_bus_setvolt(data, &new_oppinfo,
-					  &data->curr_oppinfo);
-	if (err)
-		goto out;
-
-	data->curr_oppinfo = new_oppinfo;
-out:
-	mutex_unlock(&data->lock);
-	return err;
-}
-
-static int exynos4_bus_get_dev_status(struct device *dev,
-				      struct devfreq_dev_status *stat)
-{
-	struct busfreq_data *data = dev_get_drvdata(dev);
-	struct busfreq_ppmu_data *ppmu_data = &data->ppmu_data;
-	int busier;
-
-	exynos_read_ppmu(ppmu_data);
-	busier = exynos_get_busier_ppmu(ppmu_data);
-	stat->current_frequency = data->curr_oppinfo.rate;
-
-	/* Number of cycles spent on memory access */
-	stat->busy_time = ppmu_data->ppmu[busier].count[PPMU_PMNCNT3];
-	stat->busy_time *= 100 / BUS_SATURATION_RATIO;
-	stat->total_time = ppmu_data->ppmu[busier].ccnt;
-
-	/* If the counters have overflown, retry */
-	if (ppmu_data->ppmu[busier].ccnt_overflow ||
-	    ppmu_data->ppmu[busier].count_overflow[0])
-		return -EAGAIN;
-
-	return 0;
-}
-
-static struct devfreq_dev_profile exynos4_devfreq_profile = {
-	.initial_freq	= 400000,
-	.polling_ms	= 50,
-	.target		= exynos4_bus_target,
-	.get_dev_status	= exynos4_bus_get_dev_status,
-};
-
-static int exynos4210_init_tables(struct busfreq_data *data)
-{
-	u32 tmp;
-	int mgrp;
-	int i, err = 0;
-
-	tmp = __raw_readl(EXYNOS4_CLKDIV_DMC0);
-	for (i = LV_0; i < EX4210_LV_NUM; i++) {
-		tmp &= ~(EXYNOS4_CLKDIV_DMC0_ACP_MASK |
-			EXYNOS4_CLKDIV_DMC0_ACPPCLK_MASK |
-			EXYNOS4_CLKDIV_DMC0_DPHY_MASK |
-			EXYNOS4_CLKDIV_DMC0_DMC_MASK |
-			EXYNOS4_CLKDIV_DMC0_DMCD_MASK |
-			EXYNOS4_CLKDIV_DMC0_DMCP_MASK |
-			EXYNOS4_CLKDIV_DMC0_COPY2_MASK |
-			EXYNOS4_CLKDIV_DMC0_CORETI_MASK);
-
-		tmp |= ((exynos4210_clkdiv_dmc0[i][0] <<
-					EXYNOS4_CLKDIV_DMC0_ACP_SHIFT) |
-			(exynos4210_clkdiv_dmc0[i][1] <<
-					EXYNOS4_CLKDIV_DMC0_ACPPCLK_SHIFT) |
-			(exynos4210_clkdiv_dmc0[i][2] <<
-					EXYNOS4_CLKDIV_DMC0_DPHY_SHIFT) |
-			(exynos4210_clkdiv_dmc0[i][3] <<
-					EXYNOS4_CLKDIV_DMC0_DMC_SHIFT) |
-			(exynos4210_clkdiv_dmc0[i][4] <<
-					EXYNOS4_CLKDIV_DMC0_DMCD_SHIFT) |
-			(exynos4210_clkdiv_dmc0[i][5] <<
-					EXYNOS4_CLKDIV_DMC0_DMCP_SHIFT) |
-			(exynos4210_clkdiv_dmc0[i][6] <<
-					EXYNOS4_CLKDIV_DMC0_COPY2_SHIFT) |
-			(exynos4210_clkdiv_dmc0[i][7] <<
-					EXYNOS4_CLKDIV_DMC0_CORETI_SHIFT));
-
-		data->dmc_divtable[i] = tmp;
-	}
-
-	tmp = __raw_readl(EXYNOS4_CLKDIV_TOP);
-	for (i = LV_0; i <  EX4210_LV_NUM; i++) {
-		tmp &= ~(EXYNOS4_CLKDIV_TOP_ACLK200_MASK |
-			EXYNOS4_CLKDIV_TOP_ACLK100_MASK |
-			EXYNOS4_CLKDIV_TOP_ACLK160_MASK |
-			EXYNOS4_CLKDIV_TOP_ACLK133_MASK |
-			EXYNOS4_CLKDIV_TOP_ONENAND_MASK);
-
-		tmp |= ((exynos4210_clkdiv_top[i][0] <<
-					EXYNOS4_CLKDIV_TOP_ACLK200_SHIFT) |
-			(exynos4210_clkdiv_top[i][1] <<
-					EXYNOS4_CLKDIV_TOP_ACLK100_SHIFT) |
-			(exynos4210_clkdiv_top[i][2] <<
-					EXYNOS4_CLKDIV_TOP_ACLK160_SHIFT) |
-			(exynos4210_clkdiv_top[i][3] <<
-					EXYNOS4_CLKDIV_TOP_ACLK133_SHIFT) |
-			(exynos4210_clkdiv_top[i][4] <<
-					EXYNOS4_CLKDIV_TOP_ONENAND_SHIFT));
-
-		data->top_divtable[i] = tmp;
-	}
-
-	/*
-	 * TODO: init tmp based on busfreq_data
-	 * (device-tree or platform-data)
-	 */
-	tmp = 0; /* Max voltages for the reliability of the unknown */
-
-	pr_debug("ASV Group of Exynos4 is %d\n", tmp);
-	/* Use merged grouping for voltage */
-	switch (tmp) {
-	case 0:
-		mgrp = 0;
-		break;
-	case 1:
-	case 2:
-		mgrp = 1;
-		break;
-	case 3:
-	case 4:
-		mgrp = 2;
-		break;
-	case 5:
-	case 6:
-		mgrp = 3;
-		break;
-	case 7:
-		mgrp = 4;
-		break;
-	default:
-		pr_warn("Unknown ASV Group. Use max voltage.\n");
-		mgrp = 0;
-	}
-
-	for (i = LV_0; i < EX4210_LV_NUM; i++)
-		exynos4210_busclk_table[i].volt = exynos4210_asv_volt[mgrp][i];
-
-	for (i = LV_0; i < EX4210_LV_NUM; i++) {
-		err = dev_pm_opp_add(data->dev, exynos4210_busclk_table[i].clk,
-			      exynos4210_busclk_table[i].volt);
-		if (err) {
-			dev_err(data->dev, "Cannot add opp entries.\n");
-			return err;
-		}
-	}
-
-
-	return 0;
-}
-
-static int exynos4x12_init_tables(struct busfreq_data *data)
-{
-	unsigned int i;
-	unsigned int tmp;
-	int ret;
-
-	/* Enable pause function for DREX2 DVFS */
-	tmp = __raw_readl(EXYNOS4_DMC_PAUSE_CTRL);
-	tmp |= EXYNOS4_DMC_PAUSE_ENABLE;
-	__raw_writel(tmp, EXYNOS4_DMC_PAUSE_CTRL);
-
-	tmp = __raw_readl(EXYNOS4_CLKDIV_DMC0);
-
-	for (i = 0; i <  EX4x12_LV_NUM; i++) {
-		tmp &= ~(EXYNOS4_CLKDIV_DMC0_ACP_MASK |
-			EXYNOS4_CLKDIV_DMC0_ACPPCLK_MASK |
-			EXYNOS4_CLKDIV_DMC0_DPHY_MASK |
-			EXYNOS4_CLKDIV_DMC0_DMC_MASK |
-			EXYNOS4_CLKDIV_DMC0_DMCD_MASK |
-			EXYNOS4_CLKDIV_DMC0_DMCP_MASK);
-
-		tmp |= ((exynos4x12_clkdiv_dmc0[i][0] <<
-					EXYNOS4_CLKDIV_DMC0_ACP_SHIFT) |
-			(exynos4x12_clkdiv_dmc0[i][1] <<
-					EXYNOS4_CLKDIV_DMC0_ACPPCLK_SHIFT) |
-			(exynos4x12_clkdiv_dmc0[i][2] <<
-					EXYNOS4_CLKDIV_DMC0_DPHY_SHIFT) |
-			(exynos4x12_clkdiv_dmc0[i][3] <<
-					EXYNOS4_CLKDIV_DMC0_DMC_SHIFT) |
-			(exynos4x12_clkdiv_dmc0[i][4] <<
-					EXYNOS4_CLKDIV_DMC0_DMCD_SHIFT) |
-			(exynos4x12_clkdiv_dmc0[i][5] <<
-					EXYNOS4_CLKDIV_DMC0_DMCP_SHIFT));
-
-		data->dmc_divtable[i] = tmp;
-	}
-
-	tmp = 0; /* Max voltages for the reliability of the unknown */
-
-	if (tmp > 8)
-		tmp = 0;
-	pr_debug("ASV Group of Exynos4x12 is %d\n", tmp);
-
-	for (i = 0; i < EX4x12_LV_NUM; i++) {
-		exynos4x12_mifclk_table[i].volt =
-			exynos4x12_mif_step_50[tmp][i];
-		exynos4x12_intclk_table[i].volt =
-			exynos4x12_int_volt[tmp][i];
-	}
-
-	for (i = 0; i < EX4x12_LV_NUM; i++) {
-		ret = dev_pm_opp_add(data->dev, exynos4x12_mifclk_table[i].clk,
-			      exynos4x12_mifclk_table[i].volt);
-		if (ret) {
-			dev_err(data->dev, "Fail to add opp entries.\n");
-			return ret;
-		}
-	}
-
-	return 0;
-}
-
-static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
-		unsigned long event, void *ptr)
-{
-	struct busfreq_data *data = container_of(this, struct busfreq_data,
-						 pm_notifier);
-	struct dev_pm_opp *opp;
-	struct busfreq_opp_info	new_oppinfo;
-	unsigned long maxfreq = ULONG_MAX;
-	int err = 0;
-
-	switch (event) {
-	case PM_SUSPEND_PREPARE:
-		/* Set Fastest and Deactivate DVFS */
-		mutex_lock(&data->lock);
-
-		data->disabled = true;
-
-		rcu_read_lock();
-		opp = dev_pm_opp_find_freq_floor(data->dev, &maxfreq);
-		if (IS_ERR(opp)) {
-			rcu_read_unlock();
-			dev_err(data->dev, "%s: unable to find a min freq\n",
-				__func__);
-			mutex_unlock(&data->lock);
-			return PTR_ERR(opp);
-		}
-		new_oppinfo.rate = dev_pm_opp_get_freq(opp);
-		new_oppinfo.volt = dev_pm_opp_get_voltage(opp);
-		rcu_read_unlock();
-
-		err = exynos4_bus_setvolt(data, &new_oppinfo,
-					  &data->curr_oppinfo);
-		if (err)
-			goto unlock;
-
-		switch (data->type) {
-		case TYPE_BUSF_EXYNOS4210:
-			err = exynos4210_set_busclk(data, &new_oppinfo);
-			break;
-		case TYPE_BUSF_EXYNOS4x12:
-			err = exynos4x12_set_busclk(data, &new_oppinfo);
-			break;
-		default:
-			err = -EINVAL;
-		}
-		if (err)
-			goto unlock;
-
-		data->curr_oppinfo = new_oppinfo;
-unlock:
-		mutex_unlock(&data->lock);
-		if (err)
-			return err;
-		return NOTIFY_OK;
-	case PM_POST_RESTORE:
-	case PM_POST_SUSPEND:
-		/* Reactivate */
-		mutex_lock(&data->lock);
-		data->disabled = false;
-		mutex_unlock(&data->lock);
-		return NOTIFY_OK;
-	}
-
-	return NOTIFY_DONE;
-}
-
-static int exynos4_busfreq_probe(struct platform_device *pdev)
-{
-	struct busfreq_data *data;
-	struct busfreq_ppmu_data *ppmu_data;
-	struct dev_pm_opp *opp;
-	struct device *dev = &pdev->dev;
-	int err = 0;
-
-	data = devm_kzalloc(&pdev->dev, sizeof(struct busfreq_data), GFP_KERNEL);
-	if (data == NULL) {
-		dev_err(dev, "Cannot allocate memory.\n");
-		return -ENOMEM;
-	}
-
-	ppmu_data = &data->ppmu_data;
-	ppmu_data->ppmu_end = PPMU_END;
-	ppmu_data->ppmu = devm_kzalloc(dev,
-				       sizeof(struct exynos_ppmu) * PPMU_END,
-				       GFP_KERNEL);
-	if (!ppmu_data->ppmu) {
-		dev_err(dev, "Failed to allocate memory for exynos_ppmu\n");
-		return -ENOMEM;
-	}
-
-	data->type = pdev->id_entry->driver_data;
-	ppmu_data->ppmu[PPMU_DMC0].hw_base = S5P_VA_DMC0;
-	ppmu_data->ppmu[PPMU_DMC1].hw_base = S5P_VA_DMC1;
-	data->pm_notifier.notifier_call = exynos4_busfreq_pm_notifier_event;
-	data->dev = dev;
-	mutex_init(&data->lock);
-
-	switch (data->type) {
-	case TYPE_BUSF_EXYNOS4210:
-		err = exynos4210_init_tables(data);
-		break;
-	case TYPE_BUSF_EXYNOS4x12:
-		err = exynos4x12_init_tables(data);
-		break;
-	default:
-		dev_err(dev, "Cannot determine the device id %d\n", data->type);
-		err = -EINVAL;
-	}
-	if (err) {
-		dev_err(dev, "Cannot initialize busfreq table %d\n",
-			     data->type);
-		return err;
-	}
-
-	data->vdd_int = devm_regulator_get(dev, "vdd_int");
-	if (IS_ERR(data->vdd_int)) {
-		dev_err(dev, "Cannot get the regulator \"vdd_int\"\n");
-		return PTR_ERR(data->vdd_int);
-	}
-	if (data->type == TYPE_BUSF_EXYNOS4x12) {
-		data->vdd_mif = devm_regulator_get(dev, "vdd_mif");
-		if (IS_ERR(data->vdd_mif)) {
-			dev_err(dev, "Cannot get the regulator \"vdd_mif\"\n");
-			return PTR_ERR(data->vdd_mif);
-		}
-	}
-
-	rcu_read_lock();
-	opp = dev_pm_opp_find_freq_floor(dev,
-					 &exynos4_devfreq_profile.initial_freq);
-	if (IS_ERR(opp)) {
-		rcu_read_unlock();
-		dev_err(dev, "Invalid initial frequency %lu kHz.\n",
-			exynos4_devfreq_profile.initial_freq);
-		return PTR_ERR(opp);
-	}
-	data->curr_oppinfo.rate = dev_pm_opp_get_freq(opp);
-	data->curr_oppinfo.volt = dev_pm_opp_get_voltage(opp);
-	rcu_read_unlock();
-
-	platform_set_drvdata(pdev, data);
-
-	data->devfreq = devm_devfreq_add_device(dev, &exynos4_devfreq_profile,
-					   "simple_ondemand", NULL);
-	if (IS_ERR(data->devfreq))
-		return PTR_ERR(data->devfreq);
-
-	/*
-	 * Start PPMU (Performance Profiling Monitoring Unit) to check
-	 * utilization of each IP in the Exynos4 SoC.
-	 */
-	busfreq_mon_reset(ppmu_data);
-
-	/* Register opp_notifier for Exynos4 busfreq */
-	err = devm_devfreq_register_opp_notifier(dev, data->devfreq);
-	if (err < 0) {
-		dev_err(dev, "Failed to register opp notifier\n");
-		return err;
-	}
-
-	/* Register pm_notifier for Exynos4 busfreq */
-	err = register_pm_notifier(&data->pm_notifier);
-	if (err) {
-		dev_err(dev, "Failed to setup pm notifier\n");
-		return err;
-	}
-
-	return 0;
-}
-
-static int exynos4_busfreq_remove(struct platform_device *pdev)
-{
-	struct busfreq_data *data = platform_get_drvdata(pdev);
-
-	/* Unregister all of notifier chain */
-	unregister_pm_notifier(&data->pm_notifier);
-
-	return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int exynos4_busfreq_resume(struct device *dev)
-{
-	struct busfreq_data *data = dev_get_drvdata(dev);
-	struct busfreq_ppmu_data *ppmu_data = &data->ppmu_data;
-
-	busfreq_mon_reset(ppmu_data);
-	return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(exynos4_busfreq_pm_ops, NULL, exynos4_busfreq_resume);
-
-static const struct platform_device_id exynos4_busfreq_id[] = {
-	{ "exynos4210-busfreq", TYPE_BUSF_EXYNOS4210 },
-	{ "exynos4412-busfreq", TYPE_BUSF_EXYNOS4x12 },
-	{ "exynos4212-busfreq", TYPE_BUSF_EXYNOS4x12 },
-	{ },
-};
-
-static struct platform_driver exynos4_busfreq_driver = {
-	.probe	= exynos4_busfreq_probe,
-	.remove	= exynos4_busfreq_remove,
-	.id_table = exynos4_busfreq_id,
-	.driver = {
-		.name	= "exynos4-busfreq",
-		.pm	= &exynos4_busfreq_pm_ops,
-	},
-};
-
-static int __init exynos4_busfreq_init(void)
-{
-	return platform_driver_register(&exynos4_busfreq_driver);
-}
-late_initcall(exynos4_busfreq_init);
-
-static void __exit exynos4_busfreq_exit(void)
-{
-	platform_driver_unregister(&exynos4_busfreq_driver);
-}
-module_exit(exynos4_busfreq_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("EXYNOS4 busfreq driver with devfreq framework");
-MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
diff --git a/drivers/devfreq/exynos/exynos4_bus.h b/drivers/devfreq/exynos/exynos4_bus.h
deleted file mode 100644
index 94c73c1..0000000
--- a/drivers/devfreq/exynos/exynos4_bus.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright (c) 2013 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com/
- *
- * EXYNOS4 BUS header
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __DEVFREQ_EXYNOS4_BUS_H
-#define __DEVFREQ_EXYNOS4_BUS_H __FILE__
-
-#include <mach/map.h>
-
-#define EXYNOS4_CLKDIV_LEFTBUS			(S5P_VA_CMU + 0x04500)
-#define EXYNOS4_CLKDIV_STAT_LEFTBUS		(S5P_VA_CMU + 0x04600)
-
-#define EXYNOS4_CLKDIV_RIGHTBUS			(S5P_VA_CMU + 0x08500)
-#define EXYNOS4_CLKDIV_STAT_RIGHTBUS		(S5P_VA_CMU + 0x08600)
-
-#define EXYNOS4_CLKDIV_TOP			(S5P_VA_CMU + 0x0C510)
-#define EXYNOS4_CLKDIV_CAM			(S5P_VA_CMU + 0x0C520)
-#define EXYNOS4_CLKDIV_MFC			(S5P_VA_CMU + 0x0C528)
-
-#define EXYNOS4_CLKDIV_STAT_TOP			(S5P_VA_CMU + 0x0C610)
-#define EXYNOS4_CLKDIV_STAT_MFC			(S5P_VA_CMU + 0x0C628)
-
-#define EXYNOS4210_CLKGATE_IP_IMAGE		(S5P_VA_CMU + 0x0C930)
-#define EXYNOS4212_CLKGATE_IP_IMAGE		(S5P_VA_CMU + 0x04930)
-
-#define EXYNOS4_CLKDIV_DMC0			(S5P_VA_CMU + 0x10500)
-#define EXYNOS4_CLKDIV_DMC1			(S5P_VA_CMU + 0x10504)
-#define EXYNOS4_CLKDIV_STAT_DMC0		(S5P_VA_CMU + 0x10600)
-#define EXYNOS4_CLKDIV_STAT_DMC1		(S5P_VA_CMU + 0x10604)
-
-#define EXYNOS4_DMC_PAUSE_CTRL			(S5P_VA_CMU + 0x11094)
-#define EXYNOS4_DMC_PAUSE_ENABLE		(1 << 0)
-
-#define EXYNOS4_CLKDIV_DMC0_ACP_SHIFT		(0)
-#define EXYNOS4_CLKDIV_DMC0_ACP_MASK		(0x7 << EXYNOS4_CLKDIV_DMC0_ACP_SHIFT)
-#define EXYNOS4_CLKDIV_DMC0_ACPPCLK_SHIFT	(4)
-#define EXYNOS4_CLKDIV_DMC0_ACPPCLK_MASK	(0x7 << EXYNOS4_CLKDIV_DMC0_ACPPCLK_SHIFT)
-#define EXYNOS4_CLKDIV_DMC0_DPHY_SHIFT		(8)
-#define EXYNOS4_CLKDIV_DMC0_DPHY_MASK		(0x7 << EXYNOS4_CLKDIV_DMC0_DPHY_SHIFT)
-#define EXYNOS4_CLKDIV_DMC0_DMC_SHIFT		(12)
-#define EXYNOS4_CLKDIV_DMC0_DMC_MASK		(0x7 << EXYNOS4_CLKDIV_DMC0_DMC_SHIFT)
-#define EXYNOS4_CLKDIV_DMC0_DMCD_SHIFT		(16)
-#define EXYNOS4_CLKDIV_DMC0_DMCD_MASK		(0x7 << EXYNOS4_CLKDIV_DMC0_DMCD_SHIFT)
-#define EXYNOS4_CLKDIV_DMC0_DMCP_SHIFT		(20)
-#define EXYNOS4_CLKDIV_DMC0_DMCP_MASK		(0x7 << EXYNOS4_CLKDIV_DMC0_DMCP_SHIFT)
-#define EXYNOS4_CLKDIV_DMC0_COPY2_SHIFT		(24)
-#define EXYNOS4_CLKDIV_DMC0_COPY2_MASK		(0x7 << EXYNOS4_CLKDIV_DMC0_COPY2_SHIFT)
-#define EXYNOS4_CLKDIV_DMC0_CORETI_SHIFT	(28)
-#define EXYNOS4_CLKDIV_DMC0_CORETI_MASK		(0x7 << EXYNOS4_CLKDIV_DMC0_CORETI_SHIFT)
-
-#define EXYNOS4_CLKDIV_DMC1_G2D_ACP_SHIFT	(0)
-#define EXYNOS4_CLKDIV_DMC1_G2D_ACP_MASK	(0xf << EXYNOS4_CLKDIV_DMC1_G2D_ACP_SHIFT)
-#define EXYNOS4_CLKDIV_DMC1_C2C_SHIFT		(4)
-#define EXYNOS4_CLKDIV_DMC1_C2C_MASK		(0x7 << EXYNOS4_CLKDIV_DMC1_C2C_SHIFT)
-#define EXYNOS4_CLKDIV_DMC1_PWI_SHIFT		(8)
-#define EXYNOS4_CLKDIV_DMC1_PWI_MASK		(0xf << EXYNOS4_CLKDIV_DMC1_PWI_SHIFT)
-#define EXYNOS4_CLKDIV_DMC1_C2CACLK_SHIFT	(12)
-#define EXYNOS4_CLKDIV_DMC1_C2CACLK_MASK	(0x7 << EXYNOS4_CLKDIV_DMC1_C2CACLK_SHIFT)
-#define EXYNOS4_CLKDIV_DMC1_DVSEM_SHIFT		(16)
-#define EXYNOS4_CLKDIV_DMC1_DVSEM_MASK		(0x7f << EXYNOS4_CLKDIV_DMC1_DVSEM_SHIFT)
-#define EXYNOS4_CLKDIV_DMC1_DPM_SHIFT		(24)
-#define EXYNOS4_CLKDIV_DMC1_DPM_MASK		(0x7f << EXYNOS4_CLKDIV_DMC1_DPM_SHIFT)
-
-#define EXYNOS4_CLKDIV_MFC_SHIFT		(0)
-#define EXYNOS4_CLKDIV_MFC_MASK			(0x7 << EXYNOS4_CLKDIV_MFC_SHIFT)
-
-#define EXYNOS4_CLKDIV_TOP_ACLK200_SHIFT	(0)
-#define EXYNOS4_CLKDIV_TOP_ACLK200_MASK		(0x7 << EXYNOS4_CLKDIV_TOP_ACLK200_SHIFT)
-#define EXYNOS4_CLKDIV_TOP_ACLK100_SHIFT	(4)
-#define EXYNOS4_CLKDIV_TOP_ACLK100_MASK		(0xF << EXYNOS4_CLKDIV_TOP_ACLK100_SHIFT)
-#define EXYNOS4_CLKDIV_TOP_ACLK160_SHIFT	(8)
-#define EXYNOS4_CLKDIV_TOP_ACLK160_MASK		(0x7 << EXYNOS4_CLKDIV_TOP_ACLK160_SHIFT)
-#define EXYNOS4_CLKDIV_TOP_ACLK133_SHIFT	(12)
-#define EXYNOS4_CLKDIV_TOP_ACLK133_MASK		(0x7 << EXYNOS4_CLKDIV_TOP_ACLK133_SHIFT)
-#define EXYNOS4_CLKDIV_TOP_ONENAND_SHIFT	(16)
-#define EXYNOS4_CLKDIV_TOP_ONENAND_MASK		(0x7 << EXYNOS4_CLKDIV_TOP_ONENAND_SHIFT)
-#define EXYNOS4_CLKDIV_TOP_ACLK266_GPS_SHIFT	(20)
-#define EXYNOS4_CLKDIV_TOP_ACLK266_GPS_MASK	(0x7 << EXYNOS4_CLKDIV_TOP_ACLK266_GPS_SHIFT)
-#define EXYNOS4_CLKDIV_TOP_ACLK400_MCUISP_SHIFT	(24)
-#define EXYNOS4_CLKDIV_TOP_ACLK400_MCUISP_MASK	(0x7 << EXYNOS4_CLKDIV_TOP_ACLK400_MCUISP_SHIFT)
-
-#define EXYNOS4_CLKDIV_BUS_GDLR_SHIFT		(0)
-#define EXYNOS4_CLKDIV_BUS_GDLR_MASK		(0x7 << EXYNOS4_CLKDIV_BUS_GDLR_SHIFT)
-#define EXYNOS4_CLKDIV_BUS_GPLR_SHIFT		(4)
-#define EXYNOS4_CLKDIV_BUS_GPLR_MASK		(0x7 << EXYNOS4_CLKDIV_BUS_GPLR_SHIFT)
-
-#define EXYNOS4_CLKDIV_CAM_FIMC0_SHIFT		(0)
-#define EXYNOS4_CLKDIV_CAM_FIMC0_MASK		(0xf << EXYNOS4_CLKDIV_CAM_FIMC0_SHIFT)
-#define EXYNOS4_CLKDIV_CAM_FIMC1_SHIFT		(4)
-#define EXYNOS4_CLKDIV_CAM_FIMC1_MASK		(0xf << EXYNOS4_CLKDIV_CAM_FIMC1_SHIFT)
-#define EXYNOS4_CLKDIV_CAM_FIMC2_SHIFT		(8)
-#define EXYNOS4_CLKDIV_CAM_FIMC2_MASK		(0xf << EXYNOS4_CLKDIV_CAM_FIMC2_SHIFT)
-#define EXYNOS4_CLKDIV_CAM_FIMC3_SHIFT		(12)
-#define EXYNOS4_CLKDIV_CAM_FIMC3_MASK		(0xf << EXYNOS4_CLKDIV_CAM_FIMC3_SHIFT)
-
-#define EXYNOS4_CLKDIV_CAM1			(S5P_VA_CMU + 0x0C568)
-
-#define EXYNOS4_CLKDIV_STAT_CAM1		(S5P_VA_CMU + 0x0C668)
-
-#define EXYNOS4_CLKDIV_CAM1_JPEG_SHIFT		(0)
-#define EXYNOS4_CLKDIV_CAM1_JPEG_MASK		(0xf << EXYNOS4_CLKDIV_CAM1_JPEG_SHIFT)
-
-#endif /* __DEVFREQ_EXYNOS4_BUS_H */
diff --git a/drivers/devfreq/exynos/exynos5_bus.c b/drivers/devfreq/exynos/exynos5_bus.c
deleted file mode 100644
index 297ea30..0000000
--- a/drivers/devfreq/exynos/exynos5_bus.c
+++ /dev/null
@@ -1,431 +0,0 @@
-/*
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com/
- *
- * EXYNOS5 INT clock frequency scaling support using DEVFREQ framework
- * Based on work done by Jonghwan Choi <jhbird.choi@samsung.com>
- * Support for only EXYNOS5250 is present.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/devfreq.h>
-#include <linux/io.h>
-#include <linux/pm_opp.h>
-#include <linux/slab.h>
-#include <linux/suspend.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/pm_qos.h>
-#include <linux/regulator/consumer.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-
-#include "exynos_ppmu.h"
-
-#define MAX_SAFEVOLT			1100000 /* 1.10V */
-/* Assume that the bus is saturated if the utilization is 25% */
-#define INT_BUS_SATURATION_RATIO	25
-
-enum int_level_idx {
-	LV_0,
-	LV_1,
-	LV_2,
-	LV_3,
-	LV_4,
-	_LV_END
-};
-
-enum exynos_ppmu_list {
-	PPMU_RIGHT,
-	PPMU_END,
-};
-
-struct busfreq_data_int {
-	struct device *dev;
-	struct devfreq *devfreq;
-	struct regulator *vdd_int;
-	struct busfreq_ppmu_data ppmu_data;
-	unsigned long curr_freq;
-	bool disabled;
-
-	struct notifier_block pm_notifier;
-	struct mutex lock;
-	struct pm_qos_request int_req;
-	struct clk *int_clk;
-};
-
-struct int_bus_opp_table {
-	unsigned int idx;
-	unsigned long clk;
-	unsigned long volt;
-};
-
-static struct int_bus_opp_table exynos5_int_opp_table[] = {
-	{LV_0, 266000, 1025000},
-	{LV_1, 200000, 1025000},
-	{LV_2, 160000, 1025000},
-	{LV_3, 133000, 1025000},
-	{LV_4, 100000, 1025000},
-	{0, 0, 0},
-};
-
-static int exynos5_int_setvolt(struct busfreq_data_int *data,
-				unsigned long volt)
-{
-	return regulator_set_voltage(data->vdd_int, volt, MAX_SAFEVOLT);
-}
-
-static int exynos5_busfreq_int_target(struct device *dev, unsigned long *_freq,
-			      u32 flags)
-{
-	int err = 0;
-	struct platform_device *pdev = container_of(dev, struct platform_device,
-						    dev);
-	struct busfreq_data_int *data = platform_get_drvdata(pdev);
-	struct dev_pm_opp *opp;
-	unsigned long old_freq, freq;
-	unsigned long volt;
-
-	rcu_read_lock();
-	opp = devfreq_recommended_opp(dev, _freq, flags);
-	if (IS_ERR(opp)) {
-		rcu_read_unlock();
-		dev_err(dev, "%s: Invalid OPP.\n", __func__);
-		return PTR_ERR(opp);
-	}
-
-	freq = dev_pm_opp_get_freq(opp);
-	volt = dev_pm_opp_get_voltage(opp);
-	rcu_read_unlock();
-
-	old_freq = data->curr_freq;
-
-	if (old_freq == freq)
-		return 0;
-
-	dev_dbg(dev, "targeting %lukHz %luuV\n", freq, volt);
-
-	mutex_lock(&data->lock);
-
-	if (data->disabled)
-		goto out;
-
-	if (freq > exynos5_int_opp_table[0].clk)
-		pm_qos_update_request(&data->int_req, freq * 16 / 1000);
-	else
-		pm_qos_update_request(&data->int_req, -1);
-
-	if (old_freq < freq)
-		err = exynos5_int_setvolt(data, volt);
-	if (err)
-		goto out;
-
-	err = clk_set_rate(data->int_clk, freq * 1000);
-
-	if (err)
-		goto out;
-
-	if (old_freq > freq)
-		err = exynos5_int_setvolt(data, volt);
-	if (err)
-		goto out;
-
-	data->curr_freq = freq;
-out:
-	mutex_unlock(&data->lock);
-	return err;
-}
-
-static int exynos5_int_get_dev_status(struct device *dev,
-				      struct devfreq_dev_status *stat)
-{
-	struct platform_device *pdev = container_of(dev, struct platform_device,
-						    dev);
-	struct busfreq_data_int *data = platform_get_drvdata(pdev);
-	struct busfreq_ppmu_data *ppmu_data = &data->ppmu_data;
-	int busier_dmc;
-
-	exynos_read_ppmu(ppmu_data);
-	busier_dmc = exynos_get_busier_ppmu(ppmu_data);
-
-	stat->current_frequency = data->curr_freq;
-
-	/* Number of cycles spent on memory access */
-	stat->busy_time = ppmu_data->ppmu[busier_dmc].count[PPMU_PMNCNT3];
-	stat->busy_time *= 100 / INT_BUS_SATURATION_RATIO;
-	stat->total_time = ppmu_data->ppmu[busier_dmc].ccnt;
-
-	return 0;
-}
-
-static struct devfreq_dev_profile exynos5_devfreq_int_profile = {
-	.initial_freq		= 160000,
-	.polling_ms		= 100,
-	.target			= exynos5_busfreq_int_target,
-	.get_dev_status		= exynos5_int_get_dev_status,
-};
-
-static int exynos5250_init_int_tables(struct busfreq_data_int *data)
-{
-	int i, err = 0;
-
-	for (i = LV_0; i < _LV_END; i++) {
-		err = dev_pm_opp_add(data->dev, exynos5_int_opp_table[i].clk,
-				exynos5_int_opp_table[i].volt);
-		if (err) {
-			dev_err(data->dev, "Cannot add opp entries.\n");
-			return err;
-		}
-	}
-
-	return 0;
-}
-
-static int exynos5_busfreq_int_pm_notifier_event(struct notifier_block *this,
-		unsigned long event, void *ptr)
-{
-	struct busfreq_data_int *data = container_of(this,
-					struct busfreq_data_int, pm_notifier);
-	struct dev_pm_opp *opp;
-	unsigned long maxfreq = ULONG_MAX;
-	unsigned long freq;
-	unsigned long volt;
-	int err = 0;
-
-	switch (event) {
-	case PM_SUSPEND_PREPARE:
-		/* Set Fastest and Deactivate DVFS */
-		mutex_lock(&data->lock);
-
-		data->disabled = true;
-
-		rcu_read_lock();
-		opp = dev_pm_opp_find_freq_floor(data->dev, &maxfreq);
-		if (IS_ERR(opp)) {
-			rcu_read_unlock();
-			err = PTR_ERR(opp);
-			goto unlock;
-		}
-		freq = dev_pm_opp_get_freq(opp);
-		volt = dev_pm_opp_get_voltage(opp);
-		rcu_read_unlock();
-
-		err = exynos5_int_setvolt(data, volt);
-		if (err)
-			goto unlock;
-
-		err = clk_set_rate(data->int_clk, freq * 1000);
-
-		if (err)
-			goto unlock;
-
-		data->curr_freq = freq;
-unlock:
-		mutex_unlock(&data->lock);
-		if (err)
-			return NOTIFY_BAD;
-		return NOTIFY_OK;
-	case PM_POST_RESTORE:
-	case PM_POST_SUSPEND:
-		/* Reactivate */
-		mutex_lock(&data->lock);
-		data->disabled = false;
-		mutex_unlock(&data->lock);
-		return NOTIFY_OK;
-	}
-
-	return NOTIFY_DONE;
-}
-
-static int exynos5_busfreq_int_probe(struct platform_device *pdev)
-{
-	struct busfreq_data_int *data;
-	struct busfreq_ppmu_data *ppmu_data;
-	struct dev_pm_opp *opp;
-	struct device *dev = &pdev->dev;
-	struct device_node *np;
-	unsigned long initial_freq;
-	unsigned long initial_volt;
-	int err = 0;
-	int i;
-
-	data = devm_kzalloc(&pdev->dev, sizeof(struct busfreq_data_int),
-				GFP_KERNEL);
-	if (data == NULL) {
-		dev_err(dev, "Cannot allocate memory.\n");
-		return -ENOMEM;
-	}
-
-	ppmu_data = &data->ppmu_data;
-	ppmu_data->ppmu_end = PPMU_END;
-	ppmu_data->ppmu = devm_kzalloc(dev,
-				       sizeof(struct exynos_ppmu) * PPMU_END,
-				       GFP_KERNEL);
-	if (!ppmu_data->ppmu) {
-		dev_err(dev, "Failed to allocate memory for exynos_ppmu\n");
-		return -ENOMEM;
-	}
-
-	np = of_find_compatible_node(NULL, NULL, "samsung,exynos5250-ppmu");
-	if (np == NULL) {
-		pr_err("Unable to find PPMU node\n");
-		return -ENOENT;
-	}
-
-	for (i = 0; i < ppmu_data->ppmu_end; i++) {
-		/* map PPMU memory region */
-		ppmu_data->ppmu[i].hw_base = of_iomap(np, i);
-		if (ppmu_data->ppmu[i].hw_base == NULL) {
-			dev_err(&pdev->dev, "failed to map memory region\n");
-			return -ENOMEM;
-		}
-	}
-	data->pm_notifier.notifier_call = exynos5_busfreq_int_pm_notifier_event;
-	data->dev = dev;
-	mutex_init(&data->lock);
-
-	err = exynos5250_init_int_tables(data);
-	if (err)
-		return err;
-
-	data->vdd_int = devm_regulator_get(dev, "vdd_int");
-	if (IS_ERR(data->vdd_int)) {
-		dev_err(dev, "Cannot get the regulator \"vdd_int\"\n");
-		return PTR_ERR(data->vdd_int);
-	}
-
-	data->int_clk = devm_clk_get(dev, "int_clk");
-	if (IS_ERR(data->int_clk)) {
-		dev_err(dev, "Cannot get clock \"int_clk\"\n");
-		return PTR_ERR(data->int_clk);
-	}
-
-	rcu_read_lock();
-	opp = dev_pm_opp_find_freq_floor(dev,
-			&exynos5_devfreq_int_profile.initial_freq);
-	if (IS_ERR(opp)) {
-		rcu_read_unlock();
-		dev_err(dev, "Invalid initial frequency %lu kHz.\n",
-		       exynos5_devfreq_int_profile.initial_freq);
-		return PTR_ERR(opp);
-	}
-	initial_freq = dev_pm_opp_get_freq(opp);
-	initial_volt = dev_pm_opp_get_voltage(opp);
-	rcu_read_unlock();
-	data->curr_freq = initial_freq;
-
-	err = clk_set_rate(data->int_clk, initial_freq * 1000);
-	if (err) {
-		dev_err(dev, "Failed to set initial frequency\n");
-		return err;
-	}
-
-	err = exynos5_int_setvolt(data, initial_volt);
-	if (err)
-		return err;
-
-	platform_set_drvdata(pdev, data);
-
-	busfreq_mon_reset(ppmu_data);
-
-	data->devfreq = devm_devfreq_add_device(dev, &exynos5_devfreq_int_profile,
-					   "simple_ondemand", NULL);
-	if (IS_ERR(data->devfreq))
-		return PTR_ERR(data->devfreq);
-
-	err = devm_devfreq_register_opp_notifier(dev, data->devfreq);
-	if (err < 0) {
-		dev_err(dev, "Failed to register opp notifier\n");
-		return err;
-	}
-
-	err = register_pm_notifier(&data->pm_notifier);
-	if (err) {
-		dev_err(dev, "Failed to setup pm notifier\n");
-		return err;
-	}
-
-	/* TODO: Add a new QOS class for int/mif bus */
-	pm_qos_add_request(&data->int_req, PM_QOS_NETWORK_THROUGHPUT, -1);
-
-	return 0;
-}
-
-static int exynos5_busfreq_int_remove(struct platform_device *pdev)
-{
-	struct busfreq_data_int *data = platform_get_drvdata(pdev);
-
-	pm_qos_remove_request(&data->int_req);
-	unregister_pm_notifier(&data->pm_notifier);
-
-	return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int exynos5_busfreq_int_resume(struct device *dev)
-{
-	struct platform_device *pdev = container_of(dev, struct platform_device,
-						    dev);
-	struct busfreq_data_int *data = platform_get_drvdata(pdev);
-	struct busfreq_ppmu_data *ppmu_data = &data->ppmu_data;
-
-	busfreq_mon_reset(ppmu_data);
-	return 0;
-}
-static const struct dev_pm_ops exynos5_busfreq_int_pm = {
-	.resume	= exynos5_busfreq_int_resume,
-};
-#endif
-static SIMPLE_DEV_PM_OPS(exynos5_busfreq_int_pm_ops, NULL,
-			 exynos5_busfreq_int_resume);
-
-/* platform device pointer for exynos5 devfreq device. */
-static struct platform_device *exynos5_devfreq_pdev;
-
-static struct platform_driver exynos5_busfreq_int_driver = {
-	.probe		= exynos5_busfreq_int_probe,
-	.remove		= exynos5_busfreq_int_remove,
-	.driver		= {
-		.name		= "exynos5-bus-int",
-		.pm		= &exynos5_busfreq_int_pm_ops,
-	},
-};
-
-static int __init exynos5_busfreq_int_init(void)
-{
-	int ret;
-
-	ret = platform_driver_register(&exynos5_busfreq_int_driver);
-	if (ret < 0)
-		goto out;
-
-	exynos5_devfreq_pdev =
-		platform_device_register_simple("exynos5-bus-int", -1, NULL, 0);
-	if (IS_ERR(exynos5_devfreq_pdev)) {
-		ret = PTR_ERR(exynos5_devfreq_pdev);
-		goto out1;
-	}
-
-	return 0;
-out1:
-	platform_driver_unregister(&exynos5_busfreq_int_driver);
-out:
-	return ret;
-}
-late_initcall(exynos5_busfreq_int_init);
-
-static void __exit exynos5_busfreq_int_exit(void)
-{
-	platform_device_unregister(exynos5_devfreq_pdev);
-	platform_driver_unregister(&exynos5_busfreq_int_driver);
-}
-module_exit(exynos5_busfreq_int_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("EXYNOS5 busfreq driver with devfreq framework");
diff --git a/drivers/devfreq/exynos/exynos_ppmu.c b/drivers/devfreq/exynos/exynos_ppmu.c
deleted file mode 100644
index 97b75e5..0000000
--- a/drivers/devfreq/exynos/exynos_ppmu.c
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com/
- *
- * EXYNOS - PPMU support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/io.h>
-
-#include "exynos_ppmu.h"
-
-void exynos_ppmu_reset(void __iomem *ppmu_base)
-{
-	__raw_writel(PPMU_CYCLE_RESET | PPMU_COUNTER_RESET, ppmu_base);
-	__raw_writel(PPMU_ENABLE_CYCLE  |
-		     PPMU_ENABLE_COUNT0 |
-		     PPMU_ENABLE_COUNT1 |
-		     PPMU_ENABLE_COUNT2 |
-		     PPMU_ENABLE_COUNT3,
-		     ppmu_base + PPMU_CNTENS);
-}
-
-void exynos_ppmu_setevent(void __iomem *ppmu_base, unsigned int ch,
-			unsigned int evt)
-{
-	__raw_writel(evt, ppmu_base + PPMU_BEVTSEL(ch));
-}
-
-void exynos_ppmu_start(void __iomem *ppmu_base)
-{
-	__raw_writel(PPMU_ENABLE, ppmu_base);
-}
-
-void exynos_ppmu_stop(void __iomem *ppmu_base)
-{
-	__raw_writel(PPMU_DISABLE, ppmu_base);
-}
-
-unsigned int exynos_ppmu_read(void __iomem *ppmu_base, unsigned int ch)
-{
-	unsigned int total;
-
-	if (ch == PPMU_PMNCNT3)
-		total = ((__raw_readl(ppmu_base + PMCNT_OFFSET(ch)) << 8) |
-			  __raw_readl(ppmu_base + PMCNT_OFFSET(ch + 1)));
-	else
-		total = __raw_readl(ppmu_base + PMCNT_OFFSET(ch));
-
-	return total;
-}
-
-void busfreq_mon_reset(struct busfreq_ppmu_data *ppmu_data)
-{
-	unsigned int i;
-
-	for (i = 0; i < ppmu_data->ppmu_end; i++) {
-		void __iomem *ppmu_base = ppmu_data->ppmu[i].hw_base;
-
-		/* Reset the performance and cycle counters */
-		exynos_ppmu_reset(ppmu_base);
-
-		/* Setup count registers to monitor read/write transactions */
-		ppmu_data->ppmu[i].event[PPMU_PMNCNT3] = RDWR_DATA_COUNT;
-		exynos_ppmu_setevent(ppmu_base, PPMU_PMNCNT3,
-					ppmu_data->ppmu[i].event[PPMU_PMNCNT3]);
-
-		exynos_ppmu_start(ppmu_base);
-	}
-}
-EXPORT_SYMBOL(busfreq_mon_reset);
-
-void exynos_read_ppmu(struct busfreq_ppmu_data *ppmu_data)
-{
-	int i, j;
-
-	for (i = 0; i < ppmu_data->ppmu_end; i++) {
-		void __iomem *ppmu_base = ppmu_data->ppmu[i].hw_base;
-
-		exynos_ppmu_stop(ppmu_base);
-
-		/* Update local data from PPMU */
-		ppmu_data->ppmu[i].ccnt = __raw_readl(ppmu_base + PPMU_CCNT);
-
-		for (j = PPMU_PMNCNT0; j < PPMU_PMNCNT_MAX; j++) {
-			if (ppmu_data->ppmu[i].event[j] == 0)
-				ppmu_data->ppmu[i].count[j] = 0;
-			else
-				ppmu_data->ppmu[i].count[j] =
-					exynos_ppmu_read(ppmu_base, j);
-		}
-	}
-
-	busfreq_mon_reset(ppmu_data);
-}
-EXPORT_SYMBOL(exynos_read_ppmu);
-
-int exynos_get_busier_ppmu(struct busfreq_ppmu_data *ppmu_data)
-{
-	unsigned int count = 0;
-	int i, j, busy = 0;
-
-	for (i = 0; i < ppmu_data->ppmu_end; i++) {
-		for (j = PPMU_PMNCNT0; j < PPMU_PMNCNT_MAX; j++) {
-			if (ppmu_data->ppmu[i].count[j] > count) {
-				count = ppmu_data->ppmu[i].count[j];
-				busy = i;
-			}
-		}
-	}
-
-	return busy;
-}
-EXPORT_SYMBOL(exynos_get_busier_ppmu);
diff --git a/drivers/devfreq/exynos/exynos_ppmu.h b/drivers/devfreq/exynos/exynos_ppmu.h
deleted file mode 100644
index 71f17ba..0000000
--- a/drivers/devfreq/exynos/exynos_ppmu.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com/
- *
- * EXYNOS PPMU header
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __DEVFREQ_EXYNOS_PPMU_H
-#define __DEVFREQ_EXYNOS_PPMU_H __FILE__
-
-#include <linux/ktime.h>
-
-/* For PPMU Control */
-#define PPMU_ENABLE             BIT(0)
-#define PPMU_DISABLE            0x0
-#define PPMU_CYCLE_RESET        BIT(1)
-#define PPMU_COUNTER_RESET      BIT(2)
-
-#define PPMU_ENABLE_COUNT0      BIT(0)
-#define PPMU_ENABLE_COUNT1      BIT(1)
-#define PPMU_ENABLE_COUNT2      BIT(2)
-#define PPMU_ENABLE_COUNT3      BIT(3)
-#define PPMU_ENABLE_CYCLE       BIT(31)
-
-#define PPMU_CNTENS		0x10
-#define PPMU_FLAG		0x50
-#define PPMU_CCNT_OVERFLOW	BIT(31)
-#define PPMU_CCNT		0x100
-
-#define PPMU_PMCNT0		0x110
-#define PPMU_PMCNT_OFFSET	0x10
-#define PMCNT_OFFSET(x)		(PPMU_PMCNT0 + (PPMU_PMCNT_OFFSET * x))
-
-#define PPMU_BEVT0SEL		0x1000
-#define PPMU_BEVTSEL_OFFSET	0x100
-#define PPMU_BEVTSEL(x)		(PPMU_BEVT0SEL + (ch * PPMU_BEVTSEL_OFFSET))
-
-/* For Event Selection */
-#define RD_DATA_COUNT		0x5
-#define WR_DATA_COUNT		0x6
-#define RDWR_DATA_COUNT		0x7
-
-enum ppmu_counter {
-	PPMU_PMNCNT0,
-	PPMU_PMCCNT1,
-	PPMU_PMNCNT2,
-	PPMU_PMNCNT3,
-	PPMU_PMNCNT_MAX,
-};
-
-struct bus_opp_table {
-	unsigned int idx;
-	unsigned long clk;
-	unsigned long volt;
-};
-
-struct exynos_ppmu {
-	void __iomem *hw_base;
-	unsigned int ccnt;
-	unsigned int event[PPMU_PMNCNT_MAX];
-	unsigned int count[PPMU_PMNCNT_MAX];
-	unsigned long long ns;
-	ktime_t reset_time;
-	bool ccnt_overflow;
-	bool count_overflow[PPMU_PMNCNT_MAX];
-};
-
-struct busfreq_ppmu_data {
-	struct exynos_ppmu *ppmu;
-	int ppmu_end;
-};
-
-void exynos_ppmu_reset(void __iomem *ppmu_base);
-void exynos_ppmu_setevent(void __iomem *ppmu_base, unsigned int ch,
-			unsigned int evt);
-void exynos_ppmu_start(void __iomem *ppmu_base);
-void exynos_ppmu_stop(void __iomem *ppmu_base);
-unsigned int exynos_ppmu_read(void __iomem *ppmu_base, unsigned int ch);
-void busfreq_mon_reset(struct busfreq_ppmu_data *ppmu_data);
-void exynos_read_ppmu(struct busfreq_ppmu_data *ppmu_data);
-int exynos_get_busier_ppmu(struct busfreq_ppmu_data *ppmu_data);
-#endif /* __DEVFREQ_EXYNOS_PPMU_H */
diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
new file mode 100644
index 0000000..9ef46e2
--- /dev/null
+++ b/drivers/devfreq/governor_passive.c
@@ -0,0 +1,205 @@
+/*
+ * linux/drivers/devfreq/governor_passive.c
+ *
+ * Copyright (C) 2016 Samsung Electronics
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ * Author: MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/devfreq.h>
+#include "governor.h"
+
+static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
+					unsigned long *freq)
+{
+	struct devfreq_passive_data *p_data
+			= (struct devfreq_passive_data *)devfreq->data;
+	struct devfreq *parent_devfreq = (struct devfreq *)p_data->parent;
+	unsigned long child_freq = ULONG_MAX;
+	struct dev_pm_opp *opp;
+	int i, count, ret = 0;
+
+	/*
+	 * If the devfreq device with passive governor has the specific method
+	 * to determine the next frequency, should use the get_target_freq()
+	 * of struct devfreq_passive_data.
+	 */
+	if (p_data->get_target_freq) {
+		ret = p_data->get_target_freq(devfreq, freq);
+		goto out;
+	}
+
+	/*
+	 * If the parent and passive devfreq device uses the OPP table,
+	 * get the next frequency by using the OPP table.
+	 */
+
+	/*
+	 * - parent devfreq device uses the governors except for passive.
+	 * - passive devfreq device uses the passive governor.
+	 *
+	 * Each devfreq has the OPP table. After deciding the new frequency
+	 * from the governor of parent devfreq device, the passive governor
+	 * need to get the index of new frequency on OPP table of parent
+	 * device. And then the index is used for getting the suitable
+	 * new frequency for passive devfreq device.
+	 */
+	if (!devfreq->profile || !devfreq->profile->freq_table
+		|| devfreq->profile->max_state <= 0)
+		return -EINVAL;
+
+	/*
+	 * The passive governor have to get the correct frequency from OPP
+	 * list of parent device. Because in this case, *freq is temporary
+	 * value which is decided by ondemand governor.
+	 */
+	rcu_read_lock();
+	opp = devfreq_recommended_opp(parent_devfreq->dev.parent, freq, 0);
+	rcu_read_unlock();
+	if (IS_ERR(opp)) {
+		ret = PTR_ERR(opp);
+		goto out;
+	}
+
+	/*
+	 * Get the OPP table's index of decided freqeuncy by governor
+	 * of parent device.
+	 */
+	for (i = 0; i < parent_devfreq->profile->max_state; i++)
+		if (parent_devfreq->profile->freq_table[i] == *freq)
+			break;
+
+	if (i == parent_devfreq->profile->max_state) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* Get the suitable frequency by using index of parent device. */
+	if (i < devfreq->profile->max_state) {
+		child_freq = devfreq->profile->freq_table[i];
+	} else {
+		count = devfreq->profile->max_state;
+		child_freq = devfreq->profile->freq_table[count - 1];
+	}
+
+	/* Return the suitable frequency for passive device. */
+	*freq = child_freq;
+
+out:
+	return ret;
+}
+
+static int update_devfreq_passive(struct devfreq *devfreq, unsigned long freq)
+{
+	int ret;
+
+	if (!devfreq->governor)
+		return -EINVAL;
+
+	mutex_lock_nested(&devfreq->lock, SINGLE_DEPTH_NESTING);
+
+	ret = devfreq->governor->get_target_freq(devfreq, &freq);
+	if (ret < 0)
+		goto out;
+
+	ret = devfreq->profile->target(devfreq->dev.parent, &freq, 0);
+	if (ret < 0)
+		goto out;
+
+	devfreq->previous_freq = freq;
+
+out:
+	mutex_unlock(&devfreq->lock);
+
+	return 0;
+}
+
+static int devfreq_passive_notifier_call(struct notifier_block *nb,
+				unsigned long event, void *ptr)
+{
+	struct devfreq_passive_data *data
+			= container_of(nb, struct devfreq_passive_data, nb);
+	struct devfreq *devfreq = (struct devfreq *)data->this;
+	struct devfreq *parent = (struct devfreq *)data->parent;
+	struct devfreq_freqs *freqs = (struct devfreq_freqs *)ptr;
+	unsigned long freq = freqs->new;
+
+	switch (event) {
+	case DEVFREQ_PRECHANGE:
+		if (parent->previous_freq > freq)
+			update_devfreq_passive(devfreq, freq);
+		break;
+	case DEVFREQ_POSTCHANGE:
+		if (parent->previous_freq < freq)
+			update_devfreq_passive(devfreq, freq);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static int devfreq_passive_event_handler(struct devfreq *devfreq,
+				unsigned int event, void *data)
+{
+	struct device *dev = devfreq->dev.parent;
+	struct devfreq_passive_data *p_data
+			= (struct devfreq_passive_data *)devfreq->data;
+	struct devfreq *parent = (struct devfreq *)p_data->parent;
+	struct notifier_block *nb = &p_data->nb;
+	int ret = 0;
+
+	if (!parent)
+		return -EPROBE_DEFER;
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		if (!p_data->this)
+			p_data->this = devfreq;
+
+		nb->notifier_call = devfreq_passive_notifier_call;
+		ret = devm_devfreq_register_notifier(dev, parent, nb,
+					DEVFREQ_TRANSITION_NOTIFIER);
+		break;
+	case DEVFREQ_GOV_STOP:
+		devm_devfreq_unregister_notifier(dev, parent, nb,
+					DEVFREQ_TRANSITION_NOTIFIER);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+static struct devfreq_governor devfreq_passive = {
+	.name = "passive",
+	.get_target_freq = devfreq_passive_get_target_freq,
+	.event_handler = devfreq_passive_event_handler,
+};
+
+static int __init devfreq_passive_init(void)
+{
+	return devfreq_add_governor(&devfreq_passive);
+}
+subsys_initcall(devfreq_passive_init);
+
+static void __exit devfreq_passive_exit(void)
+{
+	int ret;
+
+	ret = devfreq_remove_governor(&devfreq_passive);
+	if (ret)
+		pr_err("%s: failed remove governor %d\n", __func__, ret);
+}
+module_exit(devfreq_passive_exit);
+
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_DESCRIPTION("DEVFREQ Passive governor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index d96d87c..8c98779 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -332,7 +332,7 @@
 
 config MV_XOR
 	bool "Marvell XOR engine support"
-	depends on PLAT_ORION
+	depends on PLAT_ORION || ARCH_MVEBU || COMPILE_TEST
 	select DMA_ENGINE
 	select DMA_ENGINE_RAID
 	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
@@ -467,6 +467,20 @@
 	  This DMA controller transfers data from memory to peripheral fifo
 	  or vice versa. It does not support memory to memory data transfer.
 
+config TEGRA210_ADMA
+	bool "NVIDIA Tegra210 ADMA support"
+	depends on ARCH_TEGRA_210_SOC
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	select PM_CLK
+	help
+	  Support for the NVIDIA Tegra210 ADMA controller driver. The
+	  DMA controller has multiple DMA channels and is used to service
+	  various audio clients in the Tegra210 audio processing engine
+	  (APE). This DMA controller transfers data from memory to
+	  peripheral and vice versa. It does not support memory to
+	  memory data transfer.
+
 config TIMB_DMA
 	tristate "Timberdale FPGA DMA support"
 	depends on MFD_TIMBERDALE
@@ -507,7 +521,7 @@
 
 config XILINX_VDMA
 	tristate "Xilinx AXI VDMA Engine"
-	depends on (ARCH_ZYNQ || MICROBLAZE)
+	depends on (ARCH_ZYNQ || MICROBLAZE || ARM64)
 	select DMA_ENGINE
 	help
 	  Enable support for Xilinx AXI VDMA Soft IP.
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 6084127..614f28b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -59,6 +59,7 @@
 obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o
 obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
 obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
+obj-$(CONFIG_TEGRA210_ADMA) += tegra210-adma.o
 obj-$(CONFIG_TIMB_DMA) += timb_dma.o
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 9b42c05..81db1c4 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -107,16 +107,20 @@
 /**
  * struct vendor_data - vendor-specific config parameters for PL08x derivatives
  * @channels: the number of channels available in this variant
+ * @signals: the number of request signals available from the hardware
  * @dualmaster: whether this version supports dual AHB masters or not.
  * @nomadik: whether the channels have Nomadik security extension bits
  *	that need to be checked for permission before use and some registers are
  *	missing
  * @pl080s: whether this version is a PL080S, which has separate register and
  *	LLI word for transfer size.
+ * @max_transfer_size: the maximum single element transfer size for this
+ *	PL08x variant.
  */
 struct vendor_data {
 	u8 config_offset;
 	u8 channels;
+	u8 signals;
 	bool dualmaster;
 	bool nomadik;
 	bool pl080s;
@@ -235,7 +239,7 @@
 	struct virt_dma_chan vc;
 	struct pl08x_phy_chan *phychan;
 	const char *name;
-	const struct pl08x_channel_data *cd;
+	struct pl08x_channel_data *cd;
 	struct dma_slave_config cfg;
 	struct pl08x_txd *at;
 	struct pl08x_driver_data *host;
@@ -1909,6 +1913,12 @@
 
 		if (slave) {
 			chan->cd = &pl08x->pd->slave_channels[i];
+			/*
+			 * Some implementations have muxed signals, whereas some
+			 * use a mux in front of the signals and need dynamic
+			 * assignment of signals.
+			 */
+			chan->signal = i;
 			pl08x_dma_slave_init(chan);
 		} else {
 			chan->cd = &pl08x->pd->memcpy_channel;
@@ -2050,40 +2060,33 @@
 				       struct of_dma *ofdma)
 {
 	struct pl08x_driver_data *pl08x = ofdma->of_dma_data;
-	struct pl08x_channel_data *data;
-	struct pl08x_dma_chan *chan;
 	struct dma_chan *dma_chan;
+	struct pl08x_dma_chan *plchan;
 
 	if (!pl08x)
 		return NULL;
 
-	if (dma_spec->args_count != 2)
+	if (dma_spec->args_count != 2) {
+		dev_err(&pl08x->adev->dev,
+			"DMA channel translation requires two cells\n");
 		return NULL;
+	}
 
 	dma_chan = pl08x_find_chan_id(pl08x, dma_spec->args[0]);
-	if (dma_chan)
-		return dma_get_slave_channel(dma_chan);
-
-	chan = devm_kzalloc(pl08x->slave.dev, sizeof(*chan) + sizeof(*data),
-			    GFP_KERNEL);
-	if (!chan)
+	if (!dma_chan) {
+		dev_err(&pl08x->adev->dev,
+			"DMA slave channel not found\n");
 		return NULL;
+	}
 
-	data = (void *)&chan[1];
-	data->bus_id = "(none)";
-	data->periph_buses = dma_spec->args[1];
+	plchan = to_pl08x_chan(dma_chan);
+	dev_dbg(&pl08x->adev->dev,
+		"translated channel for signal %d\n",
+		dma_spec->args[0]);
 
-	chan->cd = data;
-	chan->host = pl08x;
-	chan->slave = true;
-	chan->name = data->bus_id;
-	chan->state = PL08X_CHAN_IDLE;
-	chan->signal = dma_spec->args[0];
-	chan->vc.desc_free = pl08x_desc_free;
-
-	vchan_init(&chan->vc, &pl08x->slave);
-
-	return dma_get_slave_channel(&chan->vc.chan);
+	/* Augment channel data for applicable AHB buses */
+	plchan->cd->periph_buses = dma_spec->args[1];
+	return dma_get_slave_channel(dma_chan);
 }
 
 static int pl08x_of_probe(struct amba_device *adev,
@@ -2091,9 +2094,11 @@
 			  struct device_node *np)
 {
 	struct pl08x_platform_data *pd;
+	struct pl08x_channel_data *chanp = NULL;
 	u32 cctl_memcpy = 0;
 	u32 val;
 	int ret;
+	int i;
 
 	pd = devm_kzalloc(&adev->dev, sizeof(*pd), GFP_KERNEL);
 	if (!pd)
@@ -2195,6 +2200,27 @@
 	/* Use the buses that can access memory, obviously */
 	pd->memcpy_channel.periph_buses = pd->mem_buses;
 
+	/*
+	 * Allocate channel data for all possible slave channels (one
+	 * for each possible signal), channels will then be allocated
+	 * for a device and have it's AHB interfaces set up at
+	 * translation time.
+	 */
+	chanp = devm_kcalloc(&adev->dev,
+			pl08x->vd->signals,
+			sizeof(struct pl08x_channel_data),
+			GFP_KERNEL);
+	if (!chanp)
+		return -ENOMEM;
+
+	pd->slave_channels = chanp;
+	for (i = 0; i < pl08x->vd->signals; i++) {
+		/* chanp->periph_buses will be assigned at translation */
+		chanp->bus_id = kasprintf(GFP_KERNEL, "slave%d", i);
+		chanp++;
+	}
+	pd->num_slave_channels = pl08x->vd->signals;
+
 	pl08x->pd = pd;
 
 	return of_dma_controller_register(adev->dev.of_node, pl08x_of_xlate,
@@ -2234,6 +2260,10 @@
 		goto out_no_pl08x;
 	}
 
+	/* Assign useful pointers to the driver state */
+	pl08x->adev = adev;
+	pl08x->vd = vd;
+
 	/* Initialize memcpy engine */
 	dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
 	pl08x->memcpy.dev = &adev->dev;
@@ -2284,10 +2314,6 @@
 		}
 	}
 
-	/* Assign useful pointers to the driver state */
-	pl08x->adev = adev;
-	pl08x->vd = vd;
-
 	/* By default, AHB1 only.  If dualmaster, from platform */
 	pl08x->lli_buses = PL08X_AHB1;
 	pl08x->mem_buses = PL08X_AHB1;
@@ -2438,6 +2464,7 @@
 static struct vendor_data vendor_pl080 = {
 	.config_offset = PL080_CH_CONFIG,
 	.channels = 8,
+	.signals = 16,
 	.dualmaster = true,
 	.max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
 };
@@ -2445,6 +2472,7 @@
 static struct vendor_data vendor_nomadik = {
 	.config_offset = PL080_CH_CONFIG,
 	.channels = 8,
+	.signals = 32,
 	.dualmaster = true,
 	.nomadik = true,
 	.max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
@@ -2453,6 +2481,7 @@
 static struct vendor_data vendor_pl080s = {
 	.config_offset = PL080S_CH_CONFIG,
 	.channels = 8,
+	.signals = 32,
 	.pl080s = true,
 	.max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK,
 };
@@ -2460,6 +2489,7 @@
 static struct vendor_data vendor_pl081 = {
 	.config_offset = PL080_CH_CONFIG,
 	.channels = 2,
+	.signals = 16,
 	.dualmaster = false,
 	.max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
 };
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 996c4b0..6149b27 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -46,6 +46,9 @@
 
 #include "virt-dma.h"
 
+#define BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED 14
+#define BCM2835_DMA_CHAN_NAME_SIZE 8
+
 struct bcm2835_dmadev {
 	struct dma_device ddev;
 	spinlock_t lock;
@@ -73,7 +76,6 @@
 	struct list_head node;
 
 	struct dma_slave_config	cfg;
-	bool cyclic;
 	unsigned int dreq;
 
 	int ch;
@@ -82,6 +84,9 @@
 
 	void __iomem *chan_base;
 	int irq_number;
+	unsigned int irq_flags;
+
+	bool is_lite_channel;
 };
 
 struct bcm2835_desc {
@@ -89,47 +94,104 @@
 	struct virt_dma_desc vd;
 	enum dma_transfer_direction dir;
 
-	struct bcm2835_cb_entry *cb_list;
-
 	unsigned int frames;
 	size_t size;
+
+	bool cyclic;
+
+	struct bcm2835_cb_entry cb_list[];
 };
 
 #define BCM2835_DMA_CS		0x00
 #define BCM2835_DMA_ADDR	0x04
+#define BCM2835_DMA_TI		0x08
 #define BCM2835_DMA_SOURCE_AD	0x0c
 #define BCM2835_DMA_DEST_AD	0x10
-#define BCM2835_DMA_NEXTCB	0x1C
+#define BCM2835_DMA_LEN		0x14
+#define BCM2835_DMA_STRIDE	0x18
+#define BCM2835_DMA_NEXTCB	0x1c
+#define BCM2835_DMA_DEBUG	0x20
 
 /* DMA CS Control and Status bits */
-#define BCM2835_DMA_ACTIVE	BIT(0)
-#define BCM2835_DMA_INT	BIT(2)
+#define BCM2835_DMA_ACTIVE	BIT(0)  /* activate the DMA */
+#define BCM2835_DMA_END		BIT(1)  /* current CB has ended */
+#define BCM2835_DMA_INT		BIT(2)  /* interrupt status */
+#define BCM2835_DMA_DREQ	BIT(3)  /* DREQ state */
 #define BCM2835_DMA_ISPAUSED	BIT(4)  /* Pause requested or not active */
 #define BCM2835_DMA_ISHELD	BIT(5)  /* Is held by DREQ flow control */
-#define BCM2835_DMA_ERR	BIT(8)
+#define BCM2835_DMA_WAITING_FOR_WRITES BIT(6) /* waiting for last
+					       * AXI-write to ack
+					       */
+#define BCM2835_DMA_ERR		BIT(8)
+#define BCM2835_DMA_PRIORITY(x) ((x & 15) << 16) /* AXI priority */
+#define BCM2835_DMA_PANIC_PRIORITY(x) ((x & 15) << 20) /* panic priority */
+/* current value of TI.BCM2835_DMA_WAIT_RESP */
+#define BCM2835_DMA_WAIT_FOR_WRITES BIT(28)
+#define BCM2835_DMA_DIS_DEBUG	BIT(29) /* disable debug pause signal */
 #define BCM2835_DMA_ABORT	BIT(30) /* Stop current CB, go to next, WO */
 #define BCM2835_DMA_RESET	BIT(31) /* WO, self clearing */
 
+/* Transfer information bits - also bcm2835_cb.info field */
 #define BCM2835_DMA_INT_EN	BIT(0)
+#define BCM2835_DMA_TDMODE	BIT(1) /* 2D-Mode */
+#define BCM2835_DMA_WAIT_RESP	BIT(3) /* wait for AXI-write to be acked */
 #define BCM2835_DMA_D_INC	BIT(4)
-#define BCM2835_DMA_D_DREQ	BIT(6)
+#define BCM2835_DMA_D_WIDTH	BIT(5) /* 128bit writes if set */
+#define BCM2835_DMA_D_DREQ	BIT(6) /* enable DREQ for destination */
+#define BCM2835_DMA_D_IGNORE	BIT(7) /* ignore destination writes */
 #define BCM2835_DMA_S_INC	BIT(8)
-#define BCM2835_DMA_S_DREQ	BIT(10)
+#define BCM2835_DMA_S_WIDTH	BIT(9) /* 128bit writes if set */
+#define BCM2835_DMA_S_DREQ	BIT(10) /* enable SREQ for source */
+#define BCM2835_DMA_S_IGNORE	BIT(11) /* ignore source reads - read 0 */
+#define BCM2835_DMA_BURST_LENGTH(x) ((x & 15) << 12)
+#define BCM2835_DMA_PER_MAP(x)	((x & 31) << 16) /* REQ source */
+#define BCM2835_DMA_WAIT(x)	((x & 31) << 21) /* add DMA-wait cycles */
+#define BCM2835_DMA_NO_WIDE_BURSTS BIT(26) /* no 2 beat write bursts */
 
-#define BCM2835_DMA_PER_MAP(x)	((x) << 16)
+/* debug register bits */
+#define BCM2835_DMA_DEBUG_LAST_NOT_SET_ERR	BIT(0)
+#define BCM2835_DMA_DEBUG_FIFO_ERR		BIT(1)
+#define BCM2835_DMA_DEBUG_READ_ERR		BIT(2)
+#define BCM2835_DMA_DEBUG_OUTSTANDING_WRITES_SHIFT 4
+#define BCM2835_DMA_DEBUG_OUTSTANDING_WRITES_BITS 4
+#define BCM2835_DMA_DEBUG_ID_SHIFT		16
+#define BCM2835_DMA_DEBUG_ID_BITS		9
+#define BCM2835_DMA_DEBUG_STATE_SHIFT		16
+#define BCM2835_DMA_DEBUG_STATE_BITS		9
+#define BCM2835_DMA_DEBUG_VERSION_SHIFT		25
+#define BCM2835_DMA_DEBUG_VERSION_BITS		3
+#define BCM2835_DMA_DEBUG_LITE			BIT(28)
+
+/* shared registers for all dma channels */
+#define BCM2835_DMA_INT_STATUS         0xfe0
+#define BCM2835_DMA_ENABLE             0xff0
 
 #define BCM2835_DMA_DATA_TYPE_S8	1
 #define BCM2835_DMA_DATA_TYPE_S16	2
 #define BCM2835_DMA_DATA_TYPE_S32	4
 #define BCM2835_DMA_DATA_TYPE_S128	16
 
-#define BCM2835_DMA_BULK_MASK	BIT(0)
-#define BCM2835_DMA_FIQ_MASK	(BIT(2) | BIT(3))
-
 /* Valid only for channels 0 - 14, 15 has its own base address */
 #define BCM2835_DMA_CHAN(n)	((n) << 8) /* Base address */
 #define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n))
 
+/* the max dma length for different channels */
+#define MAX_DMA_LEN SZ_1G
+#define MAX_LITE_DMA_LEN (SZ_64K - 4)
+
+static inline size_t bcm2835_dma_max_frame_length(struct bcm2835_chan *c)
+{
+	/* lite and normal channels have different max frame length */
+	return c->is_lite_channel ? MAX_LITE_DMA_LEN : MAX_DMA_LEN;
+}
+
+/* how many frames of max_len size do we need to transfer len bytes */
+static inline size_t bcm2835_dma_frames_for_length(size_t len,
+						   size_t max_len)
+{
+	return DIV_ROUND_UP(len, max_len);
+}
+
 static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d)
 {
 	return container_of(d, struct bcm2835_dmadev, ddev);
@@ -146,19 +208,209 @@
 	return container_of(t, struct bcm2835_desc, vd.tx);
 }
 
-static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
+static void bcm2835_dma_free_cb_chain(struct bcm2835_desc *desc)
 {
-	struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd);
-	int i;
+	size_t i;
 
 	for (i = 0; i < desc->frames; i++)
 		dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb,
 			      desc->cb_list[i].paddr);
 
-	kfree(desc->cb_list);
 	kfree(desc);
 }
 
+static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
+{
+	bcm2835_dma_free_cb_chain(
+		container_of(vd, struct bcm2835_desc, vd));
+}
+
+static void bcm2835_dma_create_cb_set_length(
+	struct bcm2835_chan *chan,
+	struct bcm2835_dma_cb *control_block,
+	size_t len,
+	size_t period_len,
+	size_t *total_len,
+	u32 finalextrainfo)
+{
+	size_t max_len = bcm2835_dma_max_frame_length(chan);
+
+	/* set the length taking lite-channel limitations into account */
+	control_block->length = min_t(u32, len, max_len);
+
+	/* finished if we have no period_length */
+	if (!period_len)
+		return;
+
+	/*
+	 * period_len means: that we need to generate
+	 * transfers that are terminating at every
+	 * multiple of period_len - this is typically
+	 * used to set the interrupt flag in info
+	 * which is required during cyclic transfers
+	 */
+
+	/* have we filled in period_length yet? */
+	if (*total_len + control_block->length < period_len)
+		return;
+
+	/* calculate the length that remains to reach period_length */
+	control_block->length = period_len - *total_len;
+
+	/* reset total_length for next period */
+	*total_len = 0;
+
+	/* add extrainfo bits in info */
+	control_block->info |= finalextrainfo;
+}
+
+static inline size_t bcm2835_dma_count_frames_for_sg(
+	struct bcm2835_chan *c,
+	struct scatterlist *sgl,
+	unsigned int sg_len)
+{
+	size_t frames = 0;
+	struct scatterlist *sgent;
+	unsigned int i;
+	size_t plength = bcm2835_dma_max_frame_length(c);
+
+	for_each_sg(sgl, sgent, sg_len, i)
+		frames += bcm2835_dma_frames_for_length(
+			sg_dma_len(sgent), plength);
+
+	return frames;
+}
+
+/**
+ * bcm2835_dma_create_cb_chain - create a control block and fills data in
+ *
+ * @chan:           the @dma_chan for which we run this
+ * @direction:      the direction in which we transfer
+ * @cyclic:         it is a cyclic transfer
+ * @info:           the default info bits to apply per controlblock
+ * @frames:         number of controlblocks to allocate
+ * @src:            the src address to assign (if the S_INC bit is set
+ *                  in @info, then it gets incremented)
+ * @dst:            the dst address to assign (if the D_INC bit is set
+ *                  in @info, then it gets incremented)
+ * @buf_len:        the full buffer length (may also be 0)
+ * @period_len:     the period length when to apply @finalextrainfo
+ *                  in addition to the last transfer
+ *                  this will also break some control-blocks early
+ * @finalextrainfo: additional bits in last controlblock
+ *                  (or when period_len is reached in case of cyclic)
+ * @gfp:            the GFP flag to use for allocation
+ */
+static struct bcm2835_desc *bcm2835_dma_create_cb_chain(
+	struct dma_chan *chan, enum dma_transfer_direction direction,
+	bool cyclic, u32 info, u32 finalextrainfo, size_t frames,
+	dma_addr_t src, dma_addr_t dst, size_t buf_len,
+	size_t period_len, gfp_t gfp)
+{
+	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+	size_t len = buf_len, total_len;
+	size_t frame;
+	struct bcm2835_desc *d;
+	struct bcm2835_cb_entry *cb_entry;
+	struct bcm2835_dma_cb *control_block;
+
+	if (!frames)
+		return NULL;
+
+	/* allocate and setup the descriptor. */
+	d = kzalloc(sizeof(*d) + frames * sizeof(struct bcm2835_cb_entry),
+		    gfp);
+	if (!d)
+		return NULL;
+
+	d->c = c;
+	d->dir = direction;
+	d->cyclic = cyclic;
+
+	/*
+	 * Iterate over all frames, create a control block
+	 * for each frame and link them together.
+	 */
+	for (frame = 0, total_len = 0; frame < frames; d->frames++, frame++) {
+		cb_entry = &d->cb_list[frame];
+		cb_entry->cb = dma_pool_alloc(c->cb_pool, gfp,
+					      &cb_entry->paddr);
+		if (!cb_entry->cb)
+			goto error_cb;
+
+		/* fill in the control block */
+		control_block = cb_entry->cb;
+		control_block->info = info;
+		control_block->src = src;
+		control_block->dst = dst;
+		control_block->stride = 0;
+		control_block->next = 0;
+		/* set up length in control_block if requested */
+		if (buf_len) {
+			/* calculate length honoring period_length */
+			bcm2835_dma_create_cb_set_length(
+				c, control_block,
+				len, period_len, &total_len,
+				cyclic ? finalextrainfo : 0);
+
+			/* calculate new remaining length */
+			len -= control_block->length;
+		}
+
+		/* link this the last controlblock */
+		if (frame)
+			d->cb_list[frame - 1].cb->next = cb_entry->paddr;
+
+		/* update src and dst and length */
+		if (src && (info & BCM2835_DMA_S_INC))
+			src += control_block->length;
+		if (dst && (info & BCM2835_DMA_D_INC))
+			dst += control_block->length;
+
+		/* Length of total transfer */
+		d->size += control_block->length;
+	}
+
+	/* the last frame requires extra flags */
+	d->cb_list[d->frames - 1].cb->info |= finalextrainfo;
+
+	/* detect a size missmatch */
+	if (buf_len && (d->size != buf_len))
+		goto error_cb;
+
+	return d;
+error_cb:
+	bcm2835_dma_free_cb_chain(d);
+
+	return NULL;
+}
+
+static void bcm2835_dma_fill_cb_chain_with_sg(
+	struct dma_chan *chan,
+	enum dma_transfer_direction direction,
+	struct bcm2835_cb_entry *cb,
+	struct scatterlist *sgl,
+	unsigned int sg_len)
+{
+	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+	size_t max_len = bcm2835_dma_max_frame_length(c);
+	unsigned int i, len;
+	dma_addr_t addr;
+	struct scatterlist *sgent;
+
+	for_each_sg(sgl, sgent, sg_len, i) {
+		for (addr = sg_dma_address(sgent), len = sg_dma_len(sgent);
+		     len > 0;
+		     addr += cb->cb->length, len -= cb->cb->length, cb++) {
+			if (direction == DMA_DEV_TO_MEM)
+				cb->cb->dst = addr;
+			else
+				cb->cb->src = addr;
+			cb->cb->length = min(len, max_len);
+		}
+	}
+}
+
 static int bcm2835_dma_abort(void __iomem *chan_base)
 {
 	unsigned long cs;
@@ -218,6 +470,15 @@
 	struct bcm2835_desc *d;
 	unsigned long flags;
 
+	/* check the shared interrupt */
+	if (c->irq_flags & IRQF_SHARED) {
+		/* check if the interrupt is enabled */
+		flags = readl(c->chan_base + BCM2835_DMA_CS);
+		/* if not set then we are not the reason for the irq */
+		if (!(flags & BCM2835_DMA_INT))
+			return IRQ_NONE;
+	}
+
 	spin_lock_irqsave(&c->vc.lock, flags);
 
 	/* Acknowledge interrupt */
@@ -226,12 +487,18 @@
 	d = c->desc;
 
 	if (d) {
-		/* TODO Only works for cyclic DMA */
-		vchan_cyclic_callback(&d->vd);
-	}
+		if (d->cyclic) {
+			/* call the cyclic callback */
+			vchan_cyclic_callback(&d->vd);
 
-	/* Keep the DMA engine running */
-	writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
+			/* Keep the DMA engine running */
+			writel(BCM2835_DMA_ACTIVE,
+			       c->chan_base + BCM2835_DMA_CS);
+		} else {
+			vchan_cookie_complete(&c->desc->vd);
+			bcm2835_dma_start_desc(c);
+		}
+	}
 
 	spin_unlock_irqrestore(&c->vc.lock, flags);
 
@@ -252,8 +519,8 @@
 		return -ENOMEM;
 	}
 
-	return request_irq(c->irq_number,
-			bcm2835_dma_callback, 0, "DMA IRQ", c);
+	return request_irq(c->irq_number, bcm2835_dma_callback,
+			   c->irq_flags, "DMA IRQ", c);
 }
 
 static void bcm2835_dma_free_chan_resources(struct dma_chan *chan)
@@ -339,8 +606,6 @@
 	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
 	unsigned long flags;
 
-	c->cyclic = true; /* Nothing else is implemented */
-
 	spin_lock_irqsave(&c->vc.lock, flags);
 	if (vchan_issue_pending(&c->vc) && !c->desc)
 		bcm2835_dma_start_desc(c);
@@ -348,18 +613,98 @@
 	spin_unlock_irqrestore(&c->vc.lock, flags);
 }
 
+struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_memcpy(
+	struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
+	size_t len, unsigned long flags)
+{
+	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+	struct bcm2835_desc *d;
+	u32 info = BCM2835_DMA_D_INC | BCM2835_DMA_S_INC;
+	u32 extra = BCM2835_DMA_INT_EN | BCM2835_DMA_WAIT_RESP;
+	size_t max_len = bcm2835_dma_max_frame_length(c);
+	size_t frames;
+
+	/* if src, dst or len is not given return with an error */
+	if (!src || !dst || !len)
+		return NULL;
+
+	/* calculate number of frames */
+	frames = bcm2835_dma_frames_for_length(len, max_len);
+
+	/* allocate the CB chain - this also fills in the pointers */
+	d = bcm2835_dma_create_cb_chain(chan, DMA_MEM_TO_MEM, false,
+					info, extra, frames,
+					src, dst, len, 0, GFP_KERNEL);
+	if (!d)
+		return NULL;
+
+	return vchan_tx_prep(&c->vc, &d->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *bcm2835_dma_prep_slave_sg(
+	struct dma_chan *chan,
+	struct scatterlist *sgl, unsigned int sg_len,
+	enum dma_transfer_direction direction,
+	unsigned long flags, void *context)
+{
+	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+	struct bcm2835_desc *d;
+	dma_addr_t src = 0, dst = 0;
+	u32 info = BCM2835_DMA_WAIT_RESP;
+	u32 extra = BCM2835_DMA_INT_EN;
+	size_t frames;
+
+	if (!is_slave_direction(direction)) {
+		dev_err(chan->device->dev,
+			"%s: bad direction?\n", __func__);
+		return NULL;
+	}
+
+	if (c->dreq != 0)
+		info |= BCM2835_DMA_PER_MAP(c->dreq);
+
+	if (direction == DMA_DEV_TO_MEM) {
+		if (c->cfg.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
+			return NULL;
+		src = c->cfg.src_addr;
+		info |= BCM2835_DMA_S_DREQ | BCM2835_DMA_D_INC;
+	} else {
+		if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
+			return NULL;
+		dst = c->cfg.dst_addr;
+		info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC;
+	}
+
+	/* count frames in sg list */
+	frames = bcm2835_dma_count_frames_for_sg(c, sgl, sg_len);
+
+	/* allocate the CB chain */
+	d = bcm2835_dma_create_cb_chain(chan, direction, false,
+					info, extra,
+					frames, src, dst, 0, 0,
+					GFP_KERNEL);
+	if (!d)
+		return NULL;
+
+	/* fill in frames with scatterlist pointers */
+	bcm2835_dma_fill_cb_chain_with_sg(chan, direction, d->cb_list,
+					  sgl, sg_len);
+
+	return vchan_tx_prep(&c->vc, &d->vd, flags);
+}
+
 static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
 	struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
 	size_t period_len, enum dma_transfer_direction direction,
 	unsigned long flags)
 {
 	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
-	enum dma_slave_buswidth dev_width;
 	struct bcm2835_desc *d;
-	dma_addr_t dev_addr;
-	unsigned int es, sync_type;
-	unsigned int frame;
-	int i;
+	dma_addr_t src, dst;
+	u32 info = BCM2835_DMA_WAIT_RESP;
+	u32 extra = BCM2835_DMA_INT_EN;
+	size_t max_len = bcm2835_dma_max_frame_length(c);
+	size_t frames;
 
 	/* Grab configuration */
 	if (!is_slave_direction(direction)) {
@@ -367,103 +712,61 @@
 		return NULL;
 	}
 
-	if (direction == DMA_DEV_TO_MEM) {
-		dev_addr = c->cfg.src_addr;
-		dev_width = c->cfg.src_addr_width;
-		sync_type = BCM2835_DMA_S_DREQ;
-	} else {
-		dev_addr = c->cfg.dst_addr;
-		dev_width = c->cfg.dst_addr_width;
-		sync_type = BCM2835_DMA_D_DREQ;
-	}
-
-	/* Bus width translates to the element size (ES) */
-	switch (dev_width) {
-	case DMA_SLAVE_BUSWIDTH_4_BYTES:
-		es = BCM2835_DMA_DATA_TYPE_S32;
-		break;
-	default:
+	if (!buf_len) {
+		dev_err(chan->device->dev,
+			"%s: bad buffer length (= 0)\n", __func__);
 		return NULL;
 	}
 
-	/* Now allocate and setup the descriptor. */
-	d = kzalloc(sizeof(*d), GFP_NOWAIT);
-	if (!d)
-		return NULL;
-
-	d->c = c;
-	d->dir = direction;
-	d->frames = buf_len / period_len;
-
-	d->cb_list = kcalloc(d->frames, sizeof(*d->cb_list), GFP_KERNEL);
-	if (!d->cb_list) {
-		kfree(d);
-		return NULL;
-	}
-	/* Allocate memory for control blocks */
-	for (i = 0; i < d->frames; i++) {
-		struct bcm2835_cb_entry *cb_entry = &d->cb_list[i];
-
-		cb_entry->cb = dma_pool_zalloc(c->cb_pool, GFP_ATOMIC,
-					       &cb_entry->paddr);
-		if (!cb_entry->cb)
-			goto error_cb;
-	}
-
 	/*
-	 * Iterate over all frames, create a control block
-	 * for each frame and link them together.
+	 * warn if buf_len is not a multiple of period_len - this may leed
+	 * to unexpected latencies for interrupts and thus audiable clicks
 	 */
-	for (frame = 0; frame < d->frames; frame++) {
-		struct bcm2835_dma_cb *control_block = d->cb_list[frame].cb;
+	if (buf_len % period_len)
+		dev_warn_once(chan->device->dev,
+			      "%s: buffer_length (%zd) is not a multiple of period_len (%zd)\n",
+			      __func__, buf_len, period_len);
 
-		/* Setup adresses */
-		if (d->dir == DMA_DEV_TO_MEM) {
-			control_block->info = BCM2835_DMA_D_INC;
-			control_block->src = dev_addr;
-			control_block->dst = buf_addr + frame * period_len;
-		} else {
-			control_block->info = BCM2835_DMA_S_INC;
-			control_block->src = buf_addr + frame * period_len;
-			control_block->dst = dev_addr;
-		}
+	/* Setup DREQ channel */
+	if (c->dreq != 0)
+		info |= BCM2835_DMA_PER_MAP(c->dreq);
 
-		/* Enable interrupt */
-		control_block->info |= BCM2835_DMA_INT_EN;
-
-		/* Setup synchronization */
-		if (sync_type != 0)
-			control_block->info |= sync_type;
-
-		/* Setup DREQ channel */
-		if (c->dreq != 0)
-			control_block->info |=
-				BCM2835_DMA_PER_MAP(c->dreq);
-
-		/* Length of a frame */
-		control_block->length = period_len;
-		d->size += control_block->length;
-
-		/*
-		 * Next block is the next frame.
-		 * This DMA engine driver currently only supports cyclic DMA.
-		 * Therefore, wrap around at number of frames.
-		 */
-		control_block->next = d->cb_list[((frame + 1) % d->frames)].paddr;
+	if (direction == DMA_DEV_TO_MEM) {
+		if (c->cfg.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
+			return NULL;
+		src = c->cfg.src_addr;
+		dst = buf_addr;
+		info |= BCM2835_DMA_S_DREQ | BCM2835_DMA_D_INC;
+	} else {
+		if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
+			return NULL;
+		dst = c->cfg.dst_addr;
+		src = buf_addr;
+		info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC;
 	}
 
+	/* calculate number of frames */
+	frames = /* number of periods */
+		 DIV_ROUND_UP(buf_len, period_len) *
+		 /* number of frames per period */
+		 bcm2835_dma_frames_for_length(period_len, max_len);
+
+	/*
+	 * allocate the CB chain
+	 * note that we need to use GFP_NOWAIT, as the ALSA i2s dmaengine
+	 * implementation calls prep_dma_cyclic with interrupts disabled.
+	 */
+	d = bcm2835_dma_create_cb_chain(chan, direction, true,
+					info, extra,
+					frames, src, dst, buf_len,
+					period_len, GFP_NOWAIT);
+	if (!d)
+		return NULL;
+
+	/* wrap around into a loop */
+	d->cb_list[d->frames - 1].cb->next = d->cb_list[0].paddr;
+
 	return vchan_tx_prep(&c->vc, &d->vd, flags);
-error_cb:
-	i--;
-	for (; i >= 0; i--) {
-		struct bcm2835_cb_entry *cb_entry = &d->cb_list[i];
-
-		dma_pool_free(c->cb_pool, cb_entry->cb, cb_entry->paddr);
-	}
-
-	kfree(d->cb_list);
-	kfree(d);
-	return NULL;
 }
 
 static int bcm2835_dma_slave_config(struct dma_chan *chan,
@@ -529,7 +832,8 @@
 	return 0;
 }
 
-static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq)
+static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id,
+				 int irq, unsigned int irq_flags)
 {
 	struct bcm2835_chan *c;
 
@@ -544,6 +848,12 @@
 	c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id);
 	c->ch = chan_id;
 	c->irq_number = irq;
+	c->irq_flags = irq_flags;
+
+	/* check in DEBUG register if this is a LITE channel */
+	if (readl(c->chan_base + BCM2835_DMA_DEBUG) &
+		BCM2835_DMA_DEBUG_LITE)
+		c->is_lite_channel = true;
 
 	return 0;
 }
@@ -587,9 +897,11 @@
 	struct resource *res;
 	void __iomem *base;
 	int rc;
-	int i;
-	int irq;
+	int i, j;
+	int irq[BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED + 1];
+	int irq_flags;
 	uint32_t chans_available;
+	char chan_name[BCM2835_DMA_CHAN_NAME_SIZE];
 
 	if (!pdev->dev.dma_mask)
 		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
@@ -615,16 +927,22 @@
 	dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
 	dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask);
 	dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
+	dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
+	dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
 	od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources;
 	od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
 	od->ddev.device_tx_status = bcm2835_dma_tx_status;
 	od->ddev.device_issue_pending = bcm2835_dma_issue_pending;
 	od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic;
+	od->ddev.device_prep_slave_sg = bcm2835_dma_prep_slave_sg;
+	od->ddev.device_prep_dma_memcpy = bcm2835_dma_prep_dma_memcpy;
 	od->ddev.device_config = bcm2835_dma_slave_config;
 	od->ddev.device_terminate_all = bcm2835_dma_terminate_all;
 	od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
 	od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
-	od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
+			      BIT(DMA_MEM_TO_MEM);
+	od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
 	od->ddev.dev = &pdev->dev;
 	INIT_LIST_HEAD(&od->ddev.channels);
 	spin_lock_init(&od->lock);
@@ -640,22 +958,48 @@
 		goto err_no_dma;
 	}
 
-	/*
-	 * Do not use the FIQ and BULK channels,
-	 * because they are used by the GPU.
-	 */
-	chans_available &= ~(BCM2835_DMA_FIQ_MASK | BCM2835_DMA_BULK_MASK);
-
-	for (i = 0; i < pdev->num_resources; i++) {
-		irq = platform_get_irq(pdev, i);
-		if (irq < 0)
-			break;
-
-		if (chans_available & (1 << i)) {
-			rc = bcm2835_dma_chan_init(od, i, irq);
-			if (rc)
-				goto err_no_dma;
+	/* get irqs for each channel that we support */
+	for (i = 0; i <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; i++) {
+		/* skip masked out channels */
+		if (!(chans_available & (1 << i))) {
+			irq[i] = -1;
+			continue;
 		}
+
+		/* get the named irq */
+		snprintf(chan_name, sizeof(chan_name), "dma%i", i);
+		irq[i] = platform_get_irq_byname(pdev, chan_name);
+		if (irq[i] >= 0)
+			continue;
+
+		/* legacy device tree case handling */
+		dev_warn_once(&pdev->dev,
+			      "missing interrupt-names property in device tree - legacy interpretation is used\n");
+		/*
+		 * in case of channel >= 11
+		 * use the 11th interrupt and that is shared
+		 */
+		irq[i] = platform_get_irq(pdev, i < 11 ? i : 11);
+	}
+
+	/* get irqs for each channel */
+	for (i = 0; i <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; i++) {
+		/* skip channels without irq */
+		if (irq[i] < 0)
+			continue;
+
+		/* check if there are other channels that also use this irq */
+		irq_flags = 0;
+		for (j = 0; j <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; j++)
+			if ((i != j) && (irq[j] == irq[i])) {
+				irq_flags = IRQF_SHARED;
+				break;
+			}
+
+		/* initialize the channel */
+		rc = bcm2835_dma_chan_init(od, i, irq[i], irq_flags);
+		if (rc)
+			goto err_no_dma;
 	}
 
 	dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 0cb259c5..8c9f45f 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -289,7 +289,7 @@
 	do {
 		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
-			pr_err("%s: timeout!\n", __func__);
+			dev_err(chan->device->dev, "%s: timeout!\n", __func__);
 			return DMA_ERROR;
 		}
 		if (status != DMA_IN_PROGRESS)
@@ -482,7 +482,8 @@
 	device = chan->device;
 
 	/* check if the channel supports slave transactions */
-	if (!test_bit(DMA_SLAVE, device->cap_mask.bits))
+	if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
+	      test_bit(DMA_CYCLIC, device->cap_mask.bits)))
 		return -ENXIO;
 
 	/*
@@ -518,7 +519,7 @@
 	struct dma_chan *chan;
 
 	if (mask && !__dma_device_satisfies_mask(dev, mask)) {
-		pr_debug("%s: wrong capabilities\n", __func__);
+		dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
 		return NULL;
 	}
 	/* devices with multiple channels need special handling as we need to
@@ -533,12 +534,12 @@
 
 	list_for_each_entry(chan, &dev->channels, device_node) {
 		if (chan->client_count) {
-			pr_debug("%s: %s busy\n",
+			dev_dbg(dev->dev, "%s: %s busy\n",
 				 __func__, dma_chan_name(chan));
 			continue;
 		}
 		if (fn && !fn(chan, fn_param)) {
-			pr_debug("%s: %s filter said false\n",
+			dev_dbg(dev->dev, "%s: %s filter said false\n",
 				 __func__, dma_chan_name(chan));
 			continue;
 		}
@@ -567,11 +568,12 @@
 
 		if (err) {
 			if (err == -ENODEV) {
-				pr_debug("%s: %s module removed\n", __func__,
-					 dma_chan_name(chan));
+				dev_dbg(device->dev, "%s: %s module removed\n",
+					__func__, dma_chan_name(chan));
 				list_del_rcu(&device->global_node);
 			} else
-				pr_debug("%s: failed to get %s: (%d)\n",
+				dev_dbg(device->dev,
+					"%s: failed to get %s: (%d)\n",
 					 __func__, dma_chan_name(chan), err);
 
 			if (--device->privatecnt == 0)
@@ -602,7 +604,8 @@
 		device->privatecnt++;
 		err = dma_chan_get(chan);
 		if (err) {
-			pr_debug("%s: failed to get %s: (%d)\n",
+			dev_dbg(chan->device->dev,
+				"%s: failed to get %s: (%d)\n",
 				__func__, dma_chan_name(chan), err);
 			chan = NULL;
 			if (--device->privatecnt == 0)
@@ -814,8 +817,9 @@
 				list_del_rcu(&device->global_node);
 				break;
 			} else if (err)
-				pr_debug("%s: failed to get %s: (%d)\n",
-				       __func__, dma_chan_name(chan), err);
+				dev_dbg(chan->device->dev,
+					"%s: failed to get %s: (%d)\n",
+					__func__, dma_chan_name(chan), err);
 		}
 	}
 
@@ -862,12 +866,12 @@
 		return false;
 	#endif
 
-	#if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
+	#if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
 	if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
 		return false;
 	#endif
 
-	#if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
+	#if IS_ENABLED(CONFIG_ASYNC_XOR)
 	if (!dma_has_cap(DMA_XOR, device->cap_mask))
 		return false;
 
@@ -877,7 +881,7 @@
 	#endif
 	#endif
 
-	#if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
+	#if IS_ENABLED(CONFIG_ASYNC_PQ)
 	if (!dma_has_cap(DMA_PQ, device->cap_mask))
 		return false;
 
@@ -1222,8 +1226,9 @@
 
 	while (tx->cookie == -EBUSY) {
 		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
-			pr_err("%s timeout waiting for descriptor submission\n",
-			       __func__);
+			dev_err(tx->chan->device->dev,
+				"%s timeout waiting for descriptor submission\n",
+				__func__);
 			return DMA_ERROR;
 		}
 		cpu_relax();
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 5ad0ec1..edf053f 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -45,22 +45,19 @@
 			DW_DMA_MSIZE_16;			\
 		u8 _dmsize = _is_slave ? _sconfig->dst_maxburst :	\
 			DW_DMA_MSIZE_16;			\
+		u8 _dms = (_dwc->direction == DMA_MEM_TO_DEV) ?		\
+			_dwc->p_master : _dwc->m_master;		\
+		u8 _sms = (_dwc->direction == DMA_DEV_TO_MEM) ?		\
+			_dwc->p_master : _dwc->m_master;		\
 								\
 		(DWC_CTLL_DST_MSIZE(_dmsize)			\
 		 | DWC_CTLL_SRC_MSIZE(_smsize)			\
 		 | DWC_CTLL_LLP_D_EN				\
 		 | DWC_CTLL_LLP_S_EN				\
-		 | DWC_CTLL_DMS(_dwc->dst_master)		\
-		 | DWC_CTLL_SMS(_dwc->src_master));		\
+		 | DWC_CTLL_DMS(_dms)				\
+		 | DWC_CTLL_SMS(_sms));				\
 	})
 
-/*
- * Number of descriptors to allocate for each channel. This should be
- * made configurable somehow; preferably, the clients (at least the
- * ones using slave transfers) should be able to give us a hint.
- */
-#define NR_DESCS_PER_CHANNEL	64
-
 /* The set of bus widths supported by the DMA controller */
 #define DW_DMA_BUSWIDTHS			  \
 	BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED)	| \
@@ -80,76 +77,78 @@
 	return to_dw_desc(dwc->active_list.next);
 }
 
-static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
+static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
 {
-	struct dw_desc *desc, *_desc;
-	struct dw_desc *ret = NULL;
-	unsigned int i = 0;
-	unsigned long flags;
+	struct dw_desc		*desc = txd_to_dw_desc(tx);
+	struct dw_dma_chan	*dwc = to_dw_dma_chan(tx->chan);
+	dma_cookie_t		cookie;
+	unsigned long		flags;
 
 	spin_lock_irqsave(&dwc->lock, flags);
-	list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
-		i++;
-		if (async_tx_test_ack(&desc->txd)) {
-			list_del(&desc->desc_node);
-			ret = desc;
-			break;
-		}
-		dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
-	}
+	cookie = dma_cookie_assign(tx);
+
+	/*
+	 * REVISIT: We should attempt to chain as many descriptors as
+	 * possible, perhaps even appending to those already submitted
+	 * for DMA. But this is hard to do in a race-free manner.
+	 */
+
+	list_add_tail(&desc->desc_node, &dwc->queue);
 	spin_unlock_irqrestore(&dwc->lock, flags);
+	dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n",
+		 __func__, desc->txd.cookie);
 
-	dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
-
-	return ret;
+	return cookie;
 }
 
-/*
- * Move a descriptor, including any children, to the free list.
- * `desc' must not be on any lists.
- */
+static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
+{
+	struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+	struct dw_desc *desc;
+	dma_addr_t phys;
+
+	desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys);
+	if (!desc)
+		return NULL;
+
+	dwc->descs_allocated++;
+	INIT_LIST_HEAD(&desc->tx_list);
+	dma_async_tx_descriptor_init(&desc->txd, &dwc->chan);
+	desc->txd.tx_submit = dwc_tx_submit;
+	desc->txd.flags = DMA_CTRL_ACK;
+	desc->txd.phys = phys;
+	return desc;
+}
+
 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
 {
-	unsigned long flags;
+	struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+	struct dw_desc *child, *_next;
 
-	if (desc) {
-		struct dw_desc *child;
+	if (unlikely(!desc))
+		return;
 
-		spin_lock_irqsave(&dwc->lock, flags);
-		list_for_each_entry(child, &desc->tx_list, desc_node)
-			dev_vdbg(chan2dev(&dwc->chan),
-					"moving child desc %p to freelist\n",
-					child);
-		list_splice_init(&desc->tx_list, &dwc->free_list);
-		dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
-		list_add(&desc->desc_node, &dwc->free_list);
-		spin_unlock_irqrestore(&dwc->lock, flags);
+	list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) {
+		list_del(&child->desc_node);
+		dma_pool_free(dw->desc_pool, child, child->txd.phys);
+		dwc->descs_allocated--;
 	}
+
+	dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
+	dwc->descs_allocated--;
 }
 
 static void dwc_initialize(struct dw_dma_chan *dwc)
 {
 	struct dw_dma *dw = to_dw_dma(dwc->chan.device);
-	struct dw_dma_slave *dws = dwc->chan.private;
 	u32 cfghi = DWC_CFGH_FIFO_MODE;
 	u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
 
-	if (dwc->initialized == true)
+	if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
 		return;
 
-	if (dws) {
-		/*
-		 * We need controller-specific data to set up slave
-		 * transfers.
-		 */
-		BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
-
-		cfghi |= DWC_CFGH_DST_PER(dws->dst_id);
-		cfghi |= DWC_CFGH_SRC_PER(dws->src_id);
-	} else {
-		cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
-		cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
-	}
+	cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
+	cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
 
 	channel_writel(dwc, CFG_LO, cfglo);
 	channel_writel(dwc, CFG_HI, cfghi);
@@ -158,26 +157,11 @@
 	channel_set_bit(dw, MASK.XFER, dwc->mask);
 	channel_set_bit(dw, MASK.ERROR, dwc->mask);
 
-	dwc->initialized = true;
+	set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
 }
 
 /*----------------------------------------------------------------------*/
 
-static inline unsigned int dwc_fast_ffs(unsigned long long v)
-{
-	/*
-	 * We can be a lot more clever here, but this should take care
-	 * of the most common optimization.
-	 */
-	if (!(v & 7))
-		return 3;
-	else if (!(v & 3))
-		return 2;
-	else if (!(v & 1))
-		return 1;
-	return 0;
-}
-
 static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
 {
 	dev_err(chan2dev(&dwc->chan),
@@ -209,12 +193,12 @@
 	 * Software emulation of LLP mode relies on interrupts to continue
 	 * multi block transfer.
 	 */
-	ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;
+	ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN;
 
-	channel_writel(dwc, SAR, desc->lli.sar);
-	channel_writel(dwc, DAR, desc->lli.dar);
+	channel_writel(dwc, SAR, lli_read(desc, sar));
+	channel_writel(dwc, DAR, lli_read(desc, dar));
 	channel_writel(dwc, CTL_LO, ctllo);
-	channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
+	channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi));
 	channel_set_bit(dw, CH_EN, dwc->mask);
 
 	/* Move pointer to next descriptor */
@@ -225,6 +209,7 @@
 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
 {
 	struct dw_dma	*dw = to_dw_dma(dwc->chan.device);
+	u8		lms = DWC_LLP_LMS(dwc->m_master);
 	unsigned long	was_soft_llp;
 
 	/* ASSERT:  channel is idle */
@@ -249,7 +234,7 @@
 
 		dwc_initialize(dwc);
 
-		dwc->residue = first->total_len;
+		first->residue = first->total_len;
 		dwc->tx_node_active = &first->tx_list;
 
 		/* Submit first block */
@@ -260,9 +245,8 @@
 
 	dwc_initialize(dwc);
 
-	channel_writel(dwc, LLP, first->txd.phys);
-	channel_writel(dwc, CTL_LO,
-			DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
+	channel_writel(dwc, LLP, first->txd.phys | lms);
+	channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
 	channel_writel(dwc, CTL_HI, 0);
 	channel_set_bit(dw, CH_EN, dwc->mask);
 }
@@ -305,11 +289,7 @@
 	list_for_each_entry(child, &desc->tx_list, desc_node)
 		async_tx_ack(&child->txd);
 	async_tx_ack(&desc->txd);
-
-	list_splice_init(&desc->tx_list, &dwc->free_list);
-	list_move(&desc->desc_node, &dwc->free_list);
-
-	dma_descriptor_unmap(txd);
+	dwc_desc_put(dwc, desc);
 	spin_unlock_irqrestore(&dwc->lock, flags);
 
 	if (callback)
@@ -380,11 +360,11 @@
 
 			head = &desc->tx_list;
 			if (active != head) {
-				/* Update desc to reflect last sent one */
-				if (active != head->next)
-					desc = to_dw_desc(active->prev);
-
-				dwc->residue -= desc->len;
+				/* Update residue to reflect last sent descriptor */
+				if (active == head->next)
+					desc->residue -= desc->len;
+				else
+					desc->residue -= to_dw_desc(active->prev)->len;
 
 				child = to_dw_desc(active);
 
@@ -399,8 +379,6 @@
 			clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
 		}
 
-		dwc->residue = 0;
-
 		spin_unlock_irqrestore(&dwc->lock, flags);
 
 		dwc_complete_all(dw, dwc);
@@ -408,7 +386,6 @@
 	}
 
 	if (list_empty(&dwc->active_list)) {
-		dwc->residue = 0;
 		spin_unlock_irqrestore(&dwc->lock, flags);
 		return;
 	}
@@ -423,31 +400,31 @@
 
 	list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
 		/* Initial residue value */
-		dwc->residue = desc->total_len;
+		desc->residue = desc->total_len;
 
 		/* Check first descriptors addr */
-		if (desc->txd.phys == llp) {
+		if (desc->txd.phys == DWC_LLP_LOC(llp)) {
 			spin_unlock_irqrestore(&dwc->lock, flags);
 			return;
 		}
 
 		/* Check first descriptors llp */
-		if (desc->lli.llp == llp) {
+		if (lli_read(desc, llp) == llp) {
 			/* This one is currently in progress */
-			dwc->residue -= dwc_get_sent(dwc);
+			desc->residue -= dwc_get_sent(dwc);
 			spin_unlock_irqrestore(&dwc->lock, flags);
 			return;
 		}
 
-		dwc->residue -= desc->len;
+		desc->residue -= desc->len;
 		list_for_each_entry(child, &desc->tx_list, desc_node) {
-			if (child->lli.llp == llp) {
+			if (lli_read(child, llp) == llp) {
 				/* Currently in progress */
-				dwc->residue -= dwc_get_sent(dwc);
+				desc->residue -= dwc_get_sent(dwc);
 				spin_unlock_irqrestore(&dwc->lock, flags);
 				return;
 			}
-			dwc->residue -= child->len;
+			desc->residue -= child->len;
 		}
 
 		/*
@@ -469,10 +446,14 @@
 	spin_unlock_irqrestore(&dwc->lock, flags);
 }
 
-static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
+static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc)
 {
 	dev_crit(chan2dev(&dwc->chan), "  desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
-		 lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
+		 lli_read(desc, sar),
+		 lli_read(desc, dar),
+		 lli_read(desc, llp),
+		 lli_read(desc, ctlhi),
+		 lli_read(desc, ctllo));
 }
 
 static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
@@ -508,9 +489,9 @@
 	 */
 	dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
 				       "  cookie: %d\n", bad_desc->txd.cookie);
-	dwc_dump_lli(dwc, &bad_desc->lli);
+	dwc_dump_lli(dwc, bad_desc);
 	list_for_each_entry(child, &bad_desc->tx_list, desc_node)
-		dwc_dump_lli(dwc, &child->lli);
+		dwc_dump_lli(dwc, child);
 
 	spin_unlock_irqrestore(&dwc->lock, flags);
 
@@ -561,7 +542,7 @@
 	 */
 	if (unlikely(status_err & dwc->mask) ||
 			unlikely(status_xfer & dwc->mask)) {
-		int i;
+		unsigned int i;
 
 		dev_err(chan2dev(&dwc->chan),
 			"cyclic DMA unexpected %s interrupt, stopping DMA transfer\n",
@@ -583,7 +564,7 @@
 		dma_writel(dw, CLEAR.XFER, dwc->mask);
 
 		for (i = 0; i < dwc->cdesc->periods; i++)
-			dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
+			dwc_dump_lli(dwc, dwc->cdesc->desc[i]);
 
 		spin_unlock_irqrestore(&dwc->lock, flags);
 	}
@@ -601,7 +582,7 @@
 	u32 status_block;
 	u32 status_xfer;
 	u32 status_err;
-	int i;
+	unsigned int i;
 
 	status_block = dma_readl(dw, RAW.BLOCK);
 	status_xfer = dma_readl(dw, RAW.XFER);
@@ -670,30 +651,6 @@
 
 /*----------------------------------------------------------------------*/
 
-static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
-{
-	struct dw_desc		*desc = txd_to_dw_desc(tx);
-	struct dw_dma_chan	*dwc = to_dw_dma_chan(tx->chan);
-	dma_cookie_t		cookie;
-	unsigned long		flags;
-
-	spin_lock_irqsave(&dwc->lock, flags);
-	cookie = dma_cookie_assign(tx);
-
-	/*
-	 * REVISIT: We should attempt to chain as many descriptors as
-	 * possible, perhaps even appending to those already submitted
-	 * for DMA. But this is hard to do in a race-free manner.
-	 */
-
-	dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, desc->txd.cookie);
-	list_add_tail(&desc->desc_node, &dwc->queue);
-
-	spin_unlock_irqrestore(&dwc->lock, flags);
-
-	return cookie;
-}
-
 static struct dma_async_tx_descriptor *
 dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 		size_t len, unsigned long flags)
@@ -705,10 +662,12 @@
 	struct dw_desc		*prev;
 	size_t			xfer_count;
 	size_t			offset;
+	u8			m_master = dwc->m_master;
 	unsigned int		src_width;
 	unsigned int		dst_width;
-	unsigned int		data_width;
+	unsigned int		data_width = dw->pdata->data_width[m_master];
 	u32			ctllo;
+	u8			lms = DWC_LLP_LMS(m_master);
 
 	dev_vdbg(chan2dev(chan),
 			"%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
@@ -721,11 +680,7 @@
 
 	dwc->direction = DMA_MEM_TO_MEM;
 
-	data_width = min_t(unsigned int, dw->data_width[dwc->src_master],
-			   dw->data_width[dwc->dst_master]);
-
-	src_width = dst_width = min_t(unsigned int, data_width,
-				      dwc_fast_ffs(src | dest | len));
+	src_width = dst_width = __ffs(data_width | src | dest | len);
 
 	ctllo = DWC_DEFAULT_CTLLO(chan)
 			| DWC_CTLL_DST_WIDTH(dst_width)
@@ -743,27 +698,27 @@
 		if (!desc)
 			goto err_desc_get;
 
-		desc->lli.sar = src + offset;
-		desc->lli.dar = dest + offset;
-		desc->lli.ctllo = ctllo;
-		desc->lli.ctlhi = xfer_count;
+		lli_write(desc, sar, src + offset);
+		lli_write(desc, dar, dest + offset);
+		lli_write(desc, ctllo, ctllo);
+		lli_write(desc, ctlhi, xfer_count);
 		desc->len = xfer_count << src_width;
 
 		if (!first) {
 			first = desc;
 		} else {
-			prev->lli.llp = desc->txd.phys;
-			list_add_tail(&desc->desc_node,
-					&first->tx_list);
+			lli_write(prev, llp, desc->txd.phys | lms);
+			list_add_tail(&desc->desc_node, &first->tx_list);
 		}
 		prev = desc;
 	}
 
 	if (flags & DMA_PREP_INTERRUPT)
 		/* Trigger interrupt after last block */
-		prev->lli.ctllo |= DWC_CTLL_INT_EN;
+		lli_set(prev, ctllo, DWC_CTLL_INT_EN);
 
 	prev->lli.llp = 0;
+	lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
 	first->txd.flags = flags;
 	first->total_len = len;
 
@@ -785,10 +740,12 @@
 	struct dw_desc		*prev;
 	struct dw_desc		*first;
 	u32			ctllo;
+	u8			m_master = dwc->m_master;
+	u8			lms = DWC_LLP_LMS(m_master);
 	dma_addr_t		reg;
 	unsigned int		reg_width;
 	unsigned int		mem_width;
-	unsigned int		data_width;
+	unsigned int		data_width = dw->pdata->data_width[m_master];
 	unsigned int		i;
 	struct scatterlist	*sg;
 	size_t			total_len = 0;
@@ -814,8 +771,6 @@
 		ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
 			DWC_CTLL_FC(DW_DMA_FC_D_M2P);
 
-		data_width = dw->data_width[dwc->src_master];
-
 		for_each_sg(sgl, sg, sg_len, i) {
 			struct dw_desc	*desc;
 			u32		len, dlen, mem;
@@ -823,17 +778,16 @@
 			mem = sg_dma_address(sg);
 			len = sg_dma_len(sg);
 
-			mem_width = min_t(unsigned int,
-					  data_width, dwc_fast_ffs(mem | len));
+			mem_width = __ffs(data_width | mem | len);
 
 slave_sg_todev_fill_desc:
 			desc = dwc_desc_get(dwc);
 			if (!desc)
 				goto err_desc_get;
 
-			desc->lli.sar = mem;
-			desc->lli.dar = reg;
-			desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
+			lli_write(desc, sar, mem);
+			lli_write(desc, dar, reg);
+			lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
 			if ((len >> mem_width) > dwc->block_size) {
 				dlen = dwc->block_size << mem_width;
 				mem += dlen;
@@ -843,15 +797,14 @@
 				len = 0;
 			}
 
-			desc->lli.ctlhi = dlen >> mem_width;
+			lli_write(desc, ctlhi, dlen >> mem_width);
 			desc->len = dlen;
 
 			if (!first) {
 				first = desc;
 			} else {
-				prev->lli.llp = desc->txd.phys;
-				list_add_tail(&desc->desc_node,
-						&first->tx_list);
+				lli_write(prev, llp, desc->txd.phys | lms);
+				list_add_tail(&desc->desc_node, &first->tx_list);
 			}
 			prev = desc;
 			total_len += dlen;
@@ -871,8 +824,6 @@
 		ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
 			DWC_CTLL_FC(DW_DMA_FC_D_P2M);
 
-		data_width = dw->data_width[dwc->dst_master];
-
 		for_each_sg(sgl, sg, sg_len, i) {
 			struct dw_desc	*desc;
 			u32		len, dlen, mem;
@@ -880,17 +831,16 @@
 			mem = sg_dma_address(sg);
 			len = sg_dma_len(sg);
 
-			mem_width = min_t(unsigned int,
-					  data_width, dwc_fast_ffs(mem | len));
+			mem_width = __ffs(data_width | mem | len);
 
 slave_sg_fromdev_fill_desc:
 			desc = dwc_desc_get(dwc);
 			if (!desc)
 				goto err_desc_get;
 
-			desc->lli.sar = reg;
-			desc->lli.dar = mem;
-			desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
+			lli_write(desc, sar, reg);
+			lli_write(desc, dar, mem);
+			lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
 			if ((len >> reg_width) > dwc->block_size) {
 				dlen = dwc->block_size << reg_width;
 				mem += dlen;
@@ -899,15 +849,14 @@
 				dlen = len;
 				len = 0;
 			}
-			desc->lli.ctlhi = dlen >> reg_width;
+			lli_write(desc, ctlhi, dlen >> reg_width);
 			desc->len = dlen;
 
 			if (!first) {
 				first = desc;
 			} else {
-				prev->lli.llp = desc->txd.phys;
-				list_add_tail(&desc->desc_node,
-						&first->tx_list);
+				lli_write(prev, llp, desc->txd.phys | lms);
+				list_add_tail(&desc->desc_node, &first->tx_list);
 			}
 			prev = desc;
 			total_len += dlen;
@@ -922,9 +871,10 @@
 
 	if (flags & DMA_PREP_INTERRUPT)
 		/* Trigger interrupt after last block */
-		prev->lli.ctllo |= DWC_CTLL_INT_EN;
+		lli_set(prev, ctllo, DWC_CTLL_INT_EN);
 
 	prev->lli.llp = 0;
+	lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
 	first->total_len = total_len;
 
 	return &first->txd;
@@ -941,7 +891,7 @@
 	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
 	struct dw_dma_slave *dws = param;
 
-	if (!dws || dws->dma_dev != chan->device->dev)
+	if (dws->dma_dev != chan->device->dev)
 		return false;
 
 	/* We have to copy data since dws can be temporary storage */
@@ -949,8 +899,8 @@
 	dwc->src_id = dws->src_id;
 	dwc->dst_id = dws->dst_id;
 
-	dwc->src_master = dws->src_master;
-	dwc->dst_master = dws->dst_master;
+	dwc->m_master = dws->m_master;
+	dwc->p_master = dws->p_master;
 
 	return true;
 }
@@ -1003,7 +953,7 @@
 	while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
 		udelay(2);
 
-	dwc->paused = true;
+	set_bit(DW_DMA_IS_PAUSED, &dwc->flags);
 
 	spin_unlock_irqrestore(&dwc->lock, flags);
 
@@ -1016,7 +966,7 @@
 
 	channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
 
-	dwc->paused = false;
+	clear_bit(DW_DMA_IS_PAUSED, &dwc->flags);
 }
 
 static int dwc_resume(struct dma_chan *chan)
@@ -1024,12 +974,10 @@
 	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
 	unsigned long		flags;
 
-	if (!dwc->paused)
-		return 0;
-
 	spin_lock_irqsave(&dwc->lock, flags);
 
-	dwc_chan_resume(dwc);
+	if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
+		dwc_chan_resume(dwc);
 
 	spin_unlock_irqrestore(&dwc->lock, flags);
 
@@ -1065,16 +1013,37 @@
 	return 0;
 }
 
-static inline u32 dwc_get_residue(struct dw_dma_chan *dwc)
+static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c)
 {
+	struct dw_desc *desc;
+
+	list_for_each_entry(desc, &dwc->active_list, desc_node)
+		if (desc->txd.cookie == c)
+			return desc;
+
+	return NULL;
+}
+
+static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie)
+{
+	struct dw_desc *desc;
 	unsigned long flags;
 	u32 residue;
 
 	spin_lock_irqsave(&dwc->lock, flags);
 
-	residue = dwc->residue;
-	if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
-		residue -= dwc_get_sent(dwc);
+	desc = dwc_find_desc(dwc, cookie);
+	if (desc) {
+		if (desc == dwc_first_active(dwc)) {
+			residue = desc->residue;
+			if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
+				residue -= dwc_get_sent(dwc);
+		} else {
+			residue = desc->total_len;
+		}
+	} else {
+		residue = 0;
+	}
 
 	spin_unlock_irqrestore(&dwc->lock, flags);
 	return residue;
@@ -1095,10 +1064,12 @@
 	dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
 
 	ret = dma_cookie_status(chan, cookie, txstate);
-	if (ret != DMA_COMPLETE)
-		dma_set_residue(txstate, dwc_get_residue(dwc));
+	if (ret == DMA_COMPLETE)
+		return ret;
 
-	if (dwc->paused && ret == DMA_IN_PROGRESS)
+	dma_set_residue(txstate, dwc_get_residue(dwc, cookie));
+
+	if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags) && ret == DMA_IN_PROGRESS)
 		return DMA_PAUSED;
 
 	return ret;
@@ -1119,7 +1090,7 @@
 
 static void dw_dma_off(struct dw_dma *dw)
 {
-	int i;
+	unsigned int i;
 
 	dma_writel(dw, CFG, 0);
 
@@ -1133,7 +1104,7 @@
 		cpu_relax();
 
 	for (i = 0; i < dw->dma.chancnt; i++)
-		dw->chan[i].initialized = false;
+		clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags);
 }
 
 static void dw_dma_on(struct dw_dma *dw)
@@ -1145,9 +1116,6 @@
 {
 	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
 	struct dw_dma		*dw = to_dw_dma(chan->device);
-	struct dw_desc		*desc;
-	int			i;
-	unsigned long		flags;
 
 	dev_vdbg(chan2dev(chan), "%s\n", __func__);
 
@@ -1165,53 +1133,26 @@
 	 * doesn't mean what you think it means), and status writeback.
 	 */
 
+	/*
+	 * We need controller-specific data to set up slave transfers.
+	 */
+	if (chan->private && !dw_dma_filter(chan, chan->private)) {
+		dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
+		return -EINVAL;
+	}
+
 	/* Enable controller here if needed */
 	if (!dw->in_use)
 		dw_dma_on(dw);
 	dw->in_use |= dwc->mask;
 
-	spin_lock_irqsave(&dwc->lock, flags);
-	i = dwc->descs_allocated;
-	while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
-		dma_addr_t phys;
-
-		spin_unlock_irqrestore(&dwc->lock, flags);
-
-		desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys);
-		if (!desc)
-			goto err_desc_alloc;
-
-		memset(desc, 0, sizeof(struct dw_desc));
-
-		INIT_LIST_HEAD(&desc->tx_list);
-		dma_async_tx_descriptor_init(&desc->txd, chan);
-		desc->txd.tx_submit = dwc_tx_submit;
-		desc->txd.flags = DMA_CTRL_ACK;
-		desc->txd.phys = phys;
-
-		dwc_desc_put(dwc, desc);
-
-		spin_lock_irqsave(&dwc->lock, flags);
-		i = ++dwc->descs_allocated;
-	}
-
-	spin_unlock_irqrestore(&dwc->lock, flags);
-
-	dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
-
-	return i;
-
-err_desc_alloc:
-	dev_info(chan2dev(chan), "only allocated %d descriptors\n", i);
-
-	return i;
+	return 0;
 }
 
 static void dwc_free_chan_resources(struct dma_chan *chan)
 {
 	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
 	struct dw_dma		*dw = to_dw_dma(chan->device);
-	struct dw_desc		*desc, *_desc;
 	unsigned long		flags;
 	LIST_HEAD(list);
 
@@ -1224,9 +1165,15 @@
 	BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
 
 	spin_lock_irqsave(&dwc->lock, flags);
-	list_splice_init(&dwc->free_list, &list);
-	dwc->descs_allocated = 0;
-	dwc->initialized = false;
+
+	/* Clear custom channel configuration */
+	dwc->src_id = 0;
+	dwc->dst_id = 0;
+
+	dwc->m_master = 0;
+	dwc->p_master = 0;
+
+	clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
 
 	/* Disable interrupts */
 	channel_clear_bit(dw, MASK.XFER, dwc->mask);
@@ -1240,11 +1187,6 @@
 	if (!dw->in_use)
 		dw_dma_off(dw);
 
-	list_for_each_entry_safe(desc, _desc, &list, desc_node) {
-		dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
-		dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
-	}
-
 	dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
 }
 
@@ -1322,6 +1264,7 @@
 	struct dw_cyclic_desc		*retval = NULL;
 	struct dw_desc			*desc;
 	struct dw_desc			*last = NULL;
+	u8				lms = DWC_LLP_LMS(dwc->m_master);
 	unsigned long			was_cyclic;
 	unsigned int			reg_width;
 	unsigned int			periods;
@@ -1375,9 +1318,6 @@
 
 	retval = ERR_PTR(-ENOMEM);
 
-	if (periods > NR_DESCS_PER_CHANNEL)
-		goto out_err;
-
 	cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
 	if (!cdesc)
 		goto out_err;
@@ -1393,50 +1333,50 @@
 
 		switch (direction) {
 		case DMA_MEM_TO_DEV:
-			desc->lli.dar = sconfig->dst_addr;
-			desc->lli.sar = buf_addr + (period_len * i);
-			desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
-					| DWC_CTLL_DST_WIDTH(reg_width)
-					| DWC_CTLL_SRC_WIDTH(reg_width)
-					| DWC_CTLL_DST_FIX
-					| DWC_CTLL_SRC_INC
-					| DWC_CTLL_INT_EN);
+			lli_write(desc, dar, sconfig->dst_addr);
+			lli_write(desc, sar, buf_addr + period_len * i);
+			lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan)
+				| DWC_CTLL_DST_WIDTH(reg_width)
+				| DWC_CTLL_SRC_WIDTH(reg_width)
+				| DWC_CTLL_DST_FIX
+				| DWC_CTLL_SRC_INC
+				| DWC_CTLL_INT_EN));
 
-			desc->lli.ctllo |= sconfig->device_fc ?
-				DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
-				DWC_CTLL_FC(DW_DMA_FC_D_M2P);
+			lli_set(desc, ctllo, sconfig->device_fc ?
+					DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
+					DWC_CTLL_FC(DW_DMA_FC_D_M2P));
 
 			break;
 		case DMA_DEV_TO_MEM:
-			desc->lli.dar = buf_addr + (period_len * i);
-			desc->lli.sar = sconfig->src_addr;
-			desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
-					| DWC_CTLL_SRC_WIDTH(reg_width)
-					| DWC_CTLL_DST_WIDTH(reg_width)
-					| DWC_CTLL_DST_INC
-					| DWC_CTLL_SRC_FIX
-					| DWC_CTLL_INT_EN);
+			lli_write(desc, dar, buf_addr + period_len * i);
+			lli_write(desc, sar, sconfig->src_addr);
+			lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan)
+				| DWC_CTLL_SRC_WIDTH(reg_width)
+				| DWC_CTLL_DST_WIDTH(reg_width)
+				| DWC_CTLL_DST_INC
+				| DWC_CTLL_SRC_FIX
+				| DWC_CTLL_INT_EN));
 
-			desc->lli.ctllo |= sconfig->device_fc ?
-				DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
-				DWC_CTLL_FC(DW_DMA_FC_D_P2M);
+			lli_set(desc, ctllo, sconfig->device_fc ?
+					DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
+					DWC_CTLL_FC(DW_DMA_FC_D_P2M));
 
 			break;
 		default:
 			break;
 		}
 
-		desc->lli.ctlhi = (period_len >> reg_width);
+		lli_write(desc, ctlhi, period_len >> reg_width);
 		cdesc->desc[i] = desc;
 
 		if (last)
-			last->lli.llp = desc->txd.phys;
+			lli_write(last, llp, desc->txd.phys | lms);
 
 		last = desc;
 	}
 
 	/* Let's make a cyclic list */
-	last->lli.llp = cdesc->desc[0]->txd.phys;
+	lli_write(last, llp, cdesc->desc[0]->txd.phys | lms);
 
 	dev_dbg(chan2dev(&dwc->chan),
 			"cyclic prepared buf %pad len %zu period %zu periods %d\n",
@@ -1467,7 +1407,7 @@
 	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
 	struct dw_dma		*dw = to_dw_dma(dwc->chan.device);
 	struct dw_cyclic_desc	*cdesc = dwc->cdesc;
-	int			i;
+	unsigned int		i;
 	unsigned long		flags;
 
 	dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
@@ -1491,32 +1431,38 @@
 	kfree(cdesc->desc);
 	kfree(cdesc);
 
+	dwc->cdesc = NULL;
+
 	clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
 }
 EXPORT_SYMBOL(dw_dma_cyclic_free);
 
 /*----------------------------------------------------------------------*/
 
-int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
+int dw_dma_probe(struct dw_dma_chip *chip)
 {
+	struct dw_dma_platform_data *pdata;
 	struct dw_dma		*dw;
 	bool			autocfg = false;
 	unsigned int		dw_params;
-	unsigned int		max_blk_size = 0;
+	unsigned int		i;
 	int			err;
-	int			i;
 
 	dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
 	if (!dw)
 		return -ENOMEM;
 
+	dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL);
+	if (!dw->pdata)
+		return -ENOMEM;
+
 	dw->regs = chip->regs;
 	chip->dw = dw;
 
 	pm_runtime_get_sync(chip->dev);
 
-	if (!pdata) {
-		dw_params = dma_read_byaddr(chip->regs, DW_PARAMS);
+	if (!chip->pdata) {
+		dw_params = dma_readl(dw, DW_PARAMS);
 		dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
 
 		autocfg = dw_params >> DW_PARAMS_EN & 1;
@@ -1525,29 +1471,31 @@
 			goto err_pdata;
 		}
 
-		pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL);
-		if (!pdata) {
-			err = -ENOMEM;
-			goto err_pdata;
-		}
+		/* Reassign the platform data pointer */
+		pdata = dw->pdata;
 
 		/* Get hardware configuration parameters */
 		pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1;
 		pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
 		for (i = 0; i < pdata->nr_masters; i++) {
 			pdata->data_width[i] =
-				(dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2;
+				4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3);
 		}
-		max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
+		pdata->block_size = dma_readl(dw, MAX_BLK_SIZE);
 
 		/* Fill platform data with the default values */
 		pdata->is_private = true;
 		pdata->is_memcpy = true;
 		pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
 		pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
-	} else if (pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
+	} else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
 		err = -EINVAL;
 		goto err_pdata;
+	} else {
+		memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata));
+
+		/* Reassign the platform data pointer */
+		pdata = dw->pdata;
 	}
 
 	dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan),
@@ -1557,11 +1505,6 @@
 		goto err_pdata;
 	}
 
-	/* Get hardware configuration parameters */
-	dw->nr_masters = pdata->nr_masters;
-	for (i = 0; i < dw->nr_masters; i++)
-		dw->data_width[i] = pdata->data_width[i];
-
 	/* Calculate all channel mask before DMA setup */
 	dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
 
@@ -1608,7 +1551,6 @@
 
 		INIT_LIST_HEAD(&dwc->active_list);
 		INIT_LIST_HEAD(&dwc->queue);
-		INIT_LIST_HEAD(&dwc->free_list);
 
 		channel_clear_bit(dw, CH_EN, dwc->mask);
 
@@ -1616,11 +1558,9 @@
 
 		/* Hardware configuration */
 		if (autocfg) {
-			unsigned int dwc_params;
 			unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
-			void __iomem *addr = chip->regs + r * sizeof(u32);
-
-			dwc_params = dma_read_byaddr(addr, DWC_PARAMS);
+			void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r];
+			unsigned int dwc_params = dma_readl_native(addr);
 
 			dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
 					   dwc_params);
@@ -1631,16 +1571,15 @@
 			 * up to 0x0a for 4095.
 			 */
 			dwc->block_size =
-				(4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
+				(4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1;
 			dwc->nollp =
 				(dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
 		} else {
 			dwc->block_size = pdata->block_size;
 
 			/* Check if channel supports multi block transfer */
-			channel_writel(dwc, LLP, 0xfffffffc);
-			dwc->nollp =
-				(channel_readl(dwc, LLP) & 0xfffffffc) == 0;
+			channel_writel(dwc, LLP, DWC_LLP_LOC(0xffffffff));
+			dwc->nollp = DWC_LLP_LOC(channel_readl(dwc, LLP)) == 0;
 			channel_writel(dwc, LLP, 0);
 		}
 	}
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index 358f968..0ae6c3b 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -17,8 +17,8 @@
 
 static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
 {
+	const struct dw_dma_platform_data *pdata = (void *)pid->driver_data;
 	struct dw_dma_chip *chip;
-	struct dw_dma_platform_data *pdata = (void *)pid->driver_data;
 	int ret;
 
 	ret = pcim_enable_device(pdev);
@@ -49,8 +49,9 @@
 	chip->dev = &pdev->dev;
 	chip->regs = pcim_iomap_table(pdev)[0];
 	chip->irq = pdev->irq;
+	chip->pdata = pdata;
 
-	ret = dw_dma_probe(chip, pdata);
+	ret = dw_dma_probe(chip);
 	if (ret)
 		return ret;
 
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 26edbe3..5bda0eb 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -42,13 +42,13 @@
 
 	slave.src_id = dma_spec->args[0];
 	slave.dst_id = dma_spec->args[0];
-	slave.src_master = dma_spec->args[1];
-	slave.dst_master = dma_spec->args[2];
+	slave.m_master = dma_spec->args[1];
+	slave.p_master = dma_spec->args[2];
 
 	if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
 		    slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
-		    slave.src_master >= dw->nr_masters ||
-		    slave.dst_master >= dw->nr_masters))
+		    slave.m_master >= dw->pdata->nr_masters ||
+		    slave.p_master >= dw->pdata->nr_masters))
 		return NULL;
 
 	dma_cap_zero(cap);
@@ -66,8 +66,8 @@
 		.dma_dev = dma_spec->dev,
 		.src_id = dma_spec->slave_id,
 		.dst_id = dma_spec->slave_id,
-		.src_master = 1,
-		.dst_master = 0,
+		.m_master = 0,
+		.p_master = 1,
 	};
 
 	return dw_dma_filter(chan, &slave);
@@ -103,6 +103,7 @@
 	struct device_node *np = pdev->dev.of_node;
 	struct dw_dma_platform_data *pdata;
 	u32 tmp, arr[DW_DMA_MAX_NR_MASTERS];
+	u32 nr_masters;
 	u32 nr_channels;
 
 	if (!np) {
@@ -110,6 +111,11 @@
 		return NULL;
 	}
 
+	if (of_property_read_u32(np, "dma-masters", &nr_masters))
+		return NULL;
+	if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS)
+		return NULL;
+
 	if (of_property_read_u32(np, "dma-channels", &nr_channels))
 		return NULL;
 
@@ -117,6 +123,7 @@
 	if (!pdata)
 		return NULL;
 
+	pdata->nr_masters = nr_masters;
 	pdata->nr_channels = nr_channels;
 
 	if (of_property_read_bool(np, "is_private"))
@@ -131,17 +138,13 @@
 	if (!of_property_read_u32(np, "block_size", &tmp))
 		pdata->block_size = tmp;
 
-	if (!of_property_read_u32(np, "dma-masters", &tmp)) {
-		if (tmp > DW_DMA_MAX_NR_MASTERS)
-			return NULL;
-
-		pdata->nr_masters = tmp;
-	}
-
-	if (!of_property_read_u32_array(np, "data_width", arr,
-				pdata->nr_masters))
-		for (tmp = 0; tmp < pdata->nr_masters; tmp++)
+	if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) {
+		for (tmp = 0; tmp < nr_masters; tmp++)
 			pdata->data_width[tmp] = arr[tmp];
+	} else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
+		for (tmp = 0; tmp < nr_masters; tmp++)
+			pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
+	}
 
 	return pdata;
 }
@@ -158,7 +161,7 @@
 	struct dw_dma_chip *chip;
 	struct device *dev = &pdev->dev;
 	struct resource *mem;
-	struct dw_dma_platform_data *pdata;
+	const struct dw_dma_platform_data *pdata;
 	int err;
 
 	chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
@@ -183,6 +186,7 @@
 		pdata = dw_dma_parse_dt(pdev);
 
 	chip->dev = dev;
+	chip->pdata = pdata;
 
 	chip->clk = devm_clk_get(chip->dev, "hclk");
 	if (IS_ERR(chip->clk))
@@ -193,7 +197,7 @@
 
 	pm_runtime_enable(&pdev->dev);
 
-	err = dw_dma_probe(chip, pdata);
+	err = dw_dma_probe(chip);
 	if (err)
 		goto err_dw_dma_probe;
 
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index 0a50c18..4b7bd78 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -114,10 +114,6 @@
 #define dma_writel_native writel
 #endif
 
-/* To access the registers in early stage of probe */
-#define dma_read_byaddr(addr, name) \
-	dma_readl_native((addr) + offsetof(struct dw_dma_regs, name))
-
 /* Bitfields in DW_PARAMS */
 #define DW_PARAMS_NR_CHAN	8		/* number of channels */
 #define DW_PARAMS_NR_MASTER	11		/* number of AHB masters */
@@ -143,6 +139,10 @@
 	DW_DMA_MSIZE_256,
 };
 
+/* Bitfields in LLP */
+#define DWC_LLP_LMS(x)		((x) & 3)	/* list master select */
+#define DWC_LLP_LOC(x)		((x) & ~3)	/* next lli */
+
 /* Bitfields in CTL_LO */
 #define DWC_CTLL_INT_EN		(1 << 0)	/* irqs enabled? */
 #define DWC_CTLL_DST_WIDTH(n)	((n)<<1)	/* bytes per element */
@@ -216,6 +216,8 @@
 enum dw_dmac_flags {
 	DW_DMA_IS_CYCLIC = 0,
 	DW_DMA_IS_SOFT_LLP = 1,
+	DW_DMA_IS_PAUSED = 2,
+	DW_DMA_IS_INITIALIZED = 3,
 };
 
 struct dw_dma_chan {
@@ -224,8 +226,6 @@
 	u8				mask;
 	u8				priority;
 	enum dma_transfer_direction	direction;
-	bool				paused;
-	bool				initialized;
 
 	/* software emulation of the LLP transfers */
 	struct list_head	*tx_node_active;
@@ -236,8 +236,6 @@
 	unsigned long		flags;
 	struct list_head	active_list;
 	struct list_head	queue;
-	struct list_head	free_list;
-	u32			residue;
 	struct dw_cyclic_desc	*cdesc;
 
 	unsigned int		descs_allocated;
@@ -249,8 +247,8 @@
 	/* custom slave configuration */
 	u8			src_id;
 	u8			dst_id;
-	u8			src_master;
-	u8			dst_master;
+	u8			m_master;
+	u8			p_master;
 
 	/* configuration passed via .device_config */
 	struct dma_slave_config dma_sconfig;
@@ -283,9 +281,8 @@
 	u8			all_chan_mask;
 	u8			in_use;
 
-	/* hardware configuration */
-	unsigned char		nr_masters;
-	unsigned char		data_width[DW_DMA_MAX_NR_MASTERS];
+	/* platform data */
+	struct dw_dma_platform_data	*pdata;
 };
 
 static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
@@ -308,32 +305,51 @@
 	return container_of(ddev, struct dw_dma, dma);
 }
 
+#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
+typedef __be32 __dw32;
+#else
+typedef __le32 __dw32;
+#endif
+
 /* LLI == Linked List Item; a.k.a. DMA block descriptor */
 struct dw_lli {
 	/* values that are not changed by hardware */
-	u32		sar;
-	u32		dar;
-	u32		llp;		/* chain to next lli */
-	u32		ctllo;
+	__dw32		sar;
+	__dw32		dar;
+	__dw32		llp;		/* chain to next lli */
+	__dw32		ctllo;
 	/* values that may get written back: */
-	u32		ctlhi;
+	__dw32		ctlhi;
 	/* sstat and dstat can snapshot peripheral register state.
 	 * silicon config may discard either or both...
 	 */
-	u32		sstat;
-	u32		dstat;
+	__dw32		sstat;
+	__dw32		dstat;
 };
 
 struct dw_desc {
 	/* FIRST values the hardware uses */
 	struct dw_lli			lli;
 
+#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
+#define lli_set(d, reg, v)		((d)->lli.reg |= cpu_to_be32(v))
+#define lli_clear(d, reg, v)		((d)->lli.reg &= ~cpu_to_be32(v))
+#define lli_read(d, reg)		be32_to_cpu((d)->lli.reg)
+#define lli_write(d, reg, v)		((d)->lli.reg = cpu_to_be32(v))
+#else
+#define lli_set(d, reg, v)		((d)->lli.reg |= cpu_to_le32(v))
+#define lli_clear(d, reg, v)		((d)->lli.reg &= ~cpu_to_le32(v))
+#define lli_read(d, reg)		le32_to_cpu((d)->lli.reg)
+#define lli_write(d, reg, v)		((d)->lli.reg = cpu_to_le32(v))
+#endif
+
 	/* THEN values for driver housekeeping */
 	struct list_head		desc_node;
 	struct list_head		tx_list;
 	struct dma_async_tx_descriptor	txd;
 	size_t				len;
 	size_t				total_len;
+	u32				residue;
 };
 
 #define to_dw_desc(h)	list_entry(h, struct dw_desc, desc_node)
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index ee3463e..8181ed1 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1238,6 +1238,7 @@
 	struct edma_desc *edesc;
 	dma_addr_t src_addr, dst_addr;
 	enum dma_slave_buswidth dev_width;
+	bool use_intermediate = false;
 	u32 burst;
 	int i, ret, nslots;
 
@@ -1279,8 +1280,21 @@
 	 * but the synchronization is difficult to achieve with Cyclic and
 	 * cannot be guaranteed, so we error out early.
 	 */
-	if (nslots > MAX_NR_SG)
-		return NULL;
+	if (nslots > MAX_NR_SG) {
+		/*
+		 * If the burst and period sizes are the same, we can put
+		 * the full buffer into a single period and activate
+		 * intermediate interrupts. This will produce interrupts
+		 * after each burst, which is also after each desired period.
+		 */
+		if (burst == period_len) {
+			period_len = buf_len;
+			nslots = 2;
+			use_intermediate = true;
+		} else {
+			return NULL;
+		}
+	}
 
 	edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
 			GFP_ATOMIC);
@@ -1358,8 +1372,13 @@
 		/*
 		 * Enable period interrupt only if it is requested
 		 */
-		if (tx_flags & DMA_PREP_INTERRUPT)
+		if (tx_flags & DMA_PREP_INTERRUPT) {
 			edesc->pset[i].param.opt |= TCINTEN;
+
+			/* Also enable intermediate interrupts if necessary */
+			if (use_intermediate)
+				edesc->pset[i].param.opt |= ITCINTEN;
+		}
 	}
 
 	/* Place the cyclic channel to highest priority queue */
@@ -1518,8 +1537,17 @@
 
 	dev_vdbg(ecc->dev, "dma_ccerr_handler\n");
 
-	if (!edma_error_pending(ecc))
+	if (!edma_error_pending(ecc)) {
+		/*
+		 * The registers indicate no pending error event but the irq
+		 * handler has been called.
+		 * Ask eDMA to re-evaluate the error registers.
+		 */
+		dev_err(ecc->dev, "%s: Error interrupt without error event!\n",
+			__func__);
+		edma_write(ecc, EDMA_EEVAL, 1);
 		return IRQ_NONE;
+	}
 
 	while (1) {
 		/* Event missed register(s) */
@@ -1570,32 +1598,6 @@
 	return IRQ_HANDLED;
 }
 
-static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
-{
-	struct platform_device *tc_pdev;
-	int ret;
-
-	if (!IS_ENABLED(CONFIG_OF) || !tc)
-		return;
-
-	tc_pdev = of_find_device_by_node(tc->node);
-	if (!tc_pdev) {
-		pr_err("%s: TPTC device is not found\n", __func__);
-		return;
-	}
-	if (!pm_runtime_enabled(&tc_pdev->dev))
-		pm_runtime_enable(&tc_pdev->dev);
-
-	if (enable)
-		ret = pm_runtime_get_sync(&tc_pdev->dev);
-	else
-		ret = pm_runtime_put_sync(&tc_pdev->dev);
-
-	if (ret < 0)
-		pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__,
-		       enable ? "get" : "put", dev_name(&tc_pdev->dev));
-}
-
 /* Alloc channel resources */
 static int edma_alloc_chan_resources(struct dma_chan *chan)
 {
@@ -1632,8 +1634,6 @@
 		EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
 		echan->hw_triggered ? "HW" : "SW");
 
-	edma_tc_set_pm_state(echan->tc, true);
-
 	return 0;
 
 err_slot:
@@ -1670,7 +1670,6 @@
 		echan->alloced = false;
 	}
 
-	edma_tc_set_pm_state(echan->tc, false);
 	echan->tc = NULL;
 	echan->hw_triggered = false;
 
@@ -2417,10 +2416,8 @@
 	int i;
 
 	for (i = 0; i < ecc->num_channels; i++) {
-		if (echan[i].alloced) {
+		if (echan[i].alloced)
 			edma_setup_interrupt(&echan[i], false);
-			edma_tc_set_pm_state(echan[i].tc, false);
-		}
 	}
 
 	return 0;
@@ -2450,8 +2447,6 @@
 
 			/* Set up channel -> slot mapping for the entry slot */
 			edma_set_chmap(&echan[i], echan[i].slot[0]);
-
-			edma_tc_set_pm_state(echan[i].tc, true);
 		}
 	}
 
@@ -2475,7 +2470,8 @@
 
 static int edma_tptc_probe(struct platform_device *pdev)
 {
-	return 0;
+	pm_runtime_enable(&pdev->dev);
+	return pm_runtime_get_sync(&pdev->dev);
 }
 
 static struct platform_driver edma_tptc_driver = {
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index aac85c3..a8828ed 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -462,13 +462,12 @@
 	struct fsl_desc_sw *desc;
 	dma_addr_t pdesc;
 
-	desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
+	desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
 	if (!desc) {
 		chan_dbg(chan, "out of memory for link descriptor\n");
 		return NULL;
 	}
 
-	memset(desc, 0, sizeof(*desc));
 	INIT_LIST_HEAD(&desc->tx_list);
 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
 	desc->async_tx.tx_submit = fsl_dma_tx_submit;
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index eef145e..f8c5cd5 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -64,10 +64,10 @@
 
 	if (hsuc->direction == DMA_MEM_TO_DEV) {
 		bsr = config->dst_maxburst;
-		mtsr = config->dst_addr_width;
+		mtsr = config->src_addr_width;
 	} else if (hsuc->direction == DMA_DEV_TO_MEM) {
 		bsr = config->src_maxburst;
-		mtsr = config->src_addr_width;
+		mtsr = config->dst_addr_width;
 	}
 
 	hsu_chan_disable(hsuc);
@@ -77,8 +77,8 @@
 	hsu_chan_writel(hsuc, HSU_CH_MTSR, mtsr);
 
 	/* Set descriptors */
-	count = (desc->nents - desc->active) % HSU_DMA_CHAN_NR_DESC;
-	for (i = 0; i < count; i++) {
+	count = desc->nents - desc->active;
+	for (i = 0; i < count && i < HSU_DMA_CHAN_NR_DESC; i++) {
 		hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr);
 		hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len);
 
@@ -135,7 +135,7 @@
 	sr = hsu_chan_readl(hsuc, HSU_CH_SR);
 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 
-	return sr;
+	return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
 }
 
 irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
@@ -160,7 +160,7 @@
 		return IRQ_NONE;
 
 	/* Timeout IRQ, need wait some time, see Errata 2 */
-	if (hsuc->direction == DMA_DEV_TO_MEM && (sr & HSU_CH_SR_DESCTO_ANY))
+	if (sr & HSU_CH_SR_DESCTO_ANY)
 		udelay(2);
 
 	sr &= ~HSU_CH_SR_DESCTO_ANY;
@@ -254,10 +254,13 @@
 static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
 {
 	struct hsu_dma_desc *desc = hsuc->desc;
-	size_t bytes = desc->length;
+	size_t bytes = 0;
 	int i;
 
-	i = desc->active % HSU_DMA_CHAN_NR_DESC;
+	for (i = desc->active; i < desc->nents; i++)
+		bytes += desc->sg[i].len;
+
+	i = HSU_DMA_CHAN_NR_DESC - 1;
 	do {
 		bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
 	} while (--i >= 0);
@@ -417,6 +420,8 @@
 
 	hsu->dma.dev = chip->dev;
 
+	dma_set_max_seg_size(hsu->dma.dev, HSU_CH_DxTSR_MASK);
+
 	ret = dma_async_device_register(&hsu->dma);
 	if (ret)
 		return ret;
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
index 578a8ee..486b023b 100644
--- a/drivers/dma/hsu/hsu.h
+++ b/drivers/dma/hsu/hsu.h
@@ -41,6 +41,9 @@
 #define HSU_CH_SR_DESCTO(x)	BIT(8 + (x))
 #define HSU_CH_SR_DESCTO_ANY	(BIT(11) | BIT(10) | BIT(9) | BIT(8))
 #define HSU_CH_SR_CHE		BIT(15)
+#define HSU_CH_SR_DESCE(x)	BIT(16 + (x))
+#define HSU_CH_SR_DESCE_ANY	(BIT(19) | BIT(18) | BIT(17) | BIT(16))
+#define HSU_CH_SR_CDESC_ANY	(BIT(31) | BIT(30))
 
 /* Bits in HSU_CH_CR */
 #define HSU_CH_CR_CHA		BIT(0)
@@ -55,6 +58,10 @@
 #define HSU_CH_DCR_CHEI		BIT(23)
 #define HSU_CH_DCR_CHTOI(x)	BIT(24 + (x))
 
+/* Bits in HSU_CH_DxTSR */
+#define HSU_CH_DxTSR_MASK	GENMASK(15, 0)
+#define HSU_CH_DxTSR_TSR(x)	((x) & HSU_CH_DxTSR_MASK)
+
 struct hsu_dma_sg {
 	dma_addr_t addr;
 	unsigned int len;
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index efdee1a..d406056 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -690,12 +690,11 @@
 	/* allocate a completion writeback area */
 	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
 	ioat_chan->completion =
-		dma_pool_alloc(ioat_chan->ioat_dma->completion_pool,
-			       GFP_KERNEL, &ioat_chan->completion_dma);
+		dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool,
+				GFP_KERNEL, &ioat_chan->completion_dma);
 	if (!ioat_chan->completion)
 		return -ENOMEM;
 
-	memset(ioat_chan->completion, 0, sizeof(*ioat_chan->completion));
 	writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF,
 	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
 	writel(((u64)ioat_chan->completion_dma) >> 32,
@@ -1074,6 +1073,7 @@
 	struct ioatdma_chan *ioat_chan;
 	bool is_raid_device = false;
 	int err;
+	u16 val16;
 
 	dma = &ioat_dma->dma_dev;
 	dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock;
@@ -1173,6 +1173,17 @@
 	if (dca)
 		ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base);
 
+	/* disable relaxed ordering */
+	err = pcie_capability_read_word(pdev, IOAT_DEVCTRL_OFFSET, &val16);
+	if (err)
+		return err;
+
+	/* clear relaxed ordering enable */
+	val16 &= ~IOAT_DEVCTRL_ROE;
+	err = pcie_capability_write_word(pdev, IOAT_DEVCTRL_OFFSET, val16);
+	if (err)
+		return err;
+
 	return 0;
 }
 
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index 4994a36..7053498 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -26,6 +26,13 @@
 #define IOAT_PCI_CHANERR_INT_OFFSET		0x180
 #define IOAT_PCI_CHANERRMASK_INT_OFFSET		0x184
 
+/* PCIe config registers */
+
+/* EXPCAPID + N */
+#define IOAT_DEVCTRL_OFFSET			0x8
+/* relaxed ordering enable */
+#define IOAT_DEVCTRL_ROE			0x10
+
 /* MMIO Device Registers */
 #define IOAT_CHANCNT_OFFSET			0x00	/*  8-bit */
 
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index e39457f..56f1fd6 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -364,13 +364,12 @@
 	struct mmp_pdma_desc_sw *desc;
 	dma_addr_t pdesc;
 
-	desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
+	desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
 	if (!desc) {
 		dev_err(chan->dev, "out of memory for link descriptor\n");
 		return NULL;
 	}
 
-	memset(desc, 0, sizeof(*desc));
 	INIT_LIST_HEAD(&desc->tx_list);
 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
 	/* each desc has submit */
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index aae76fb..ccadafa 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -3,6 +3,7 @@
  * Copyright (C) Semihalf 2009
  * Copyright (C) Ilya Yanok, Emcraft Systems 2010
  * Copyright (C) Alexander Popov, Promcontroller 2014
+ * Copyright (C) Mario Six, Guntermann & Drunck GmbH, 2016
  *
  * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
  * (defines, structures and comments) was taken from MPC5121 DMA driver
@@ -26,18 +27,19 @@
  */
 
 /*
- * MPC512x and MPC8308 DMA driver. It supports
- * memory to memory data transfers (tested using dmatest module) and
- * data transfers between memory and peripheral I/O memory
- * by means of slave scatter/gather with these limitations:
- *  - chunked transfers (described by s/g lists with more than one item)
- *     are refused as long as proper support for scatter/gather is missing;
- *  - transfers on MPC8308 always start from software as this SoC appears
- *     not to have external request lines for peripheral flow control;
- *  - only peripheral devices with 4-byte FIFO access register are supported;
- *  - minimal memory <-> I/O memory transfer chunk is 4 bytes and consequently
- *     source and destination addresses must be 4-byte aligned
- *     and transfer size must be aligned on (4 * maxburst) boundary;
+ * MPC512x and MPC8308 DMA driver. It supports memory to memory data transfers
+ * (tested using dmatest module) and data transfers between memory and
+ * peripheral I/O memory by means of slave scatter/gather with these
+ * limitations:
+ *  - chunked transfers (described by s/g lists with more than one item) are
+ *     refused as long as proper support for scatter/gather is missing
+ *  - transfers on MPC8308 always start from software as this SoC does not have
+ *     external request lines for peripheral flow control
+ *  - memory <-> I/O memory transfer chunks of sizes of 1, 2, 4, 16 (for
+ *     MPC512x), and 32 bytes are supported, and, consequently, source
+ *     addresses and destination addresses must be aligned accordingly;
+ *     furthermore, for MPC512x SoCs, the transfer size must be aligned on
+ *     (chunk size * maxburst)
  */
 
 #include <linux/module.h>
@@ -213,8 +215,10 @@
 	/* Settings for access to peripheral FIFO */
 	dma_addr_t			src_per_paddr;
 	u32				src_tcd_nunits;
+	u8				swidth;
 	dma_addr_t			dst_per_paddr;
 	u32				dst_tcd_nunits;
+	u8				dwidth;
 
 	/* Lock for this structure */
 	spinlock_t			lock;
@@ -247,6 +251,7 @@
 static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
 {
 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
+
 	return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
 }
 
@@ -254,9 +259,9 @@
  * Execute all queued DMA descriptors.
  *
  * Following requirements must be met while calling mpc_dma_execute():
- * 	a) mchan->lock is acquired,
- * 	b) mchan->active list is empty,
- * 	c) mchan->queued list contains at least one entry.
+ *	a) mchan->lock is acquired,
+ *	b) mchan->active list is empty,
+ *	c) mchan->queued list contains at least one entry.
  */
 static void mpc_dma_execute(struct mpc_dma_chan *mchan)
 {
@@ -446,20 +451,15 @@
 		if (es & MPC_DMA_DMAES_SAE)
 			dev_err(mdma->dma.dev, "- Source Address Error\n");
 		if (es & MPC_DMA_DMAES_SOE)
-			dev_err(mdma->dma.dev, "- Source Offset"
-						" Configuration Error\n");
+			dev_err(mdma->dma.dev, "- Source Offset Configuration Error\n");
 		if (es & MPC_DMA_DMAES_DAE)
-			dev_err(mdma->dma.dev, "- Destination Address"
-								" Error\n");
+			dev_err(mdma->dma.dev, "- Destination Address Error\n");
 		if (es & MPC_DMA_DMAES_DOE)
-			dev_err(mdma->dma.dev, "- Destination Offset"
-						" Configuration Error\n");
+			dev_err(mdma->dma.dev, "- Destination Offset Configuration Error\n");
 		if (es & MPC_DMA_DMAES_NCE)
-			dev_err(mdma->dma.dev, "- NBytes/Citter"
-						" Configuration Error\n");
+			dev_err(mdma->dma.dev, "- NBytes/Citter Configuration Error\n");
 		if (es & MPC_DMA_DMAES_SGE)
-			dev_err(mdma->dma.dev, "- Scatter/Gather"
-						" Configuration Error\n");
+			dev_err(mdma->dma.dev, "- Scatter/Gather Configuration Error\n");
 		if (es & MPC_DMA_DMAES_SBE)
 			dev_err(mdma->dma.dev, "- Source Bus Error\n");
 		if (es & MPC_DMA_DMAES_DBE)
@@ -518,8 +518,8 @@
 	for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
 		mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
 		if (!mdesc) {
-			dev_notice(mdma->dma.dev, "Memory allocation error. "
-					"Allocated only %u descriptors\n", i);
+			dev_notice(mdma->dma.dev,
+				"Memory allocation error. Allocated only %u descriptors\n", i);
 			break;
 		}
 
@@ -684,6 +684,15 @@
 	return &mdesc->desc;
 }
 
+inline u8 buswidth_to_dmatsize(u8 buswidth)
+{
+	u8 res;
+
+	for (res = 0; buswidth > 1; buswidth /= 2)
+		res++;
+	return res;
+}
+
 static struct dma_async_tx_descriptor *
 mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 		unsigned int sg_len, enum dma_transfer_direction direction,
@@ -742,39 +751,54 @@
 
 		memset(tcd, 0, sizeof(struct mpc_dma_tcd));
 
-		if (!IS_ALIGNED(sg_dma_address(sg), 4))
-			goto err_prep;
-
 		if (direction == DMA_DEV_TO_MEM) {
 			tcd->saddr = per_paddr;
 			tcd->daddr = sg_dma_address(sg);
+
+			if (!IS_ALIGNED(sg_dma_address(sg), mchan->dwidth))
+				goto err_prep;
+
 			tcd->soff = 0;
-			tcd->doff = 4;
+			tcd->doff = mchan->dwidth;
 		} else {
 			tcd->saddr = sg_dma_address(sg);
 			tcd->daddr = per_paddr;
-			tcd->soff = 4;
+
+			if (!IS_ALIGNED(sg_dma_address(sg), mchan->swidth))
+				goto err_prep;
+
+			tcd->soff = mchan->swidth;
 			tcd->doff = 0;
 		}
 
-		tcd->ssize = MPC_DMA_TSIZE_4;
-		tcd->dsize = MPC_DMA_TSIZE_4;
+		tcd->ssize = buswidth_to_dmatsize(mchan->swidth);
+		tcd->dsize = buswidth_to_dmatsize(mchan->dwidth);
 
-		len = sg_dma_len(sg);
-		tcd->nbytes = tcd_nunits * 4;
-		if (!IS_ALIGNED(len, tcd->nbytes))
-			goto err_prep;
+		if (mdma->is_mpc8308) {
+			tcd->nbytes = sg_dma_len(sg);
+			if (!IS_ALIGNED(tcd->nbytes, mchan->swidth))
+				goto err_prep;
 
-		iter = len / tcd->nbytes;
-		if (iter >= 1 << 15) {
-			/* len is too big */
-			goto err_prep;
+			/* No major loops for MPC8303 */
+			tcd->biter = 1;
+			tcd->citer = 1;
+		} else {
+			len = sg_dma_len(sg);
+			tcd->nbytes = tcd_nunits * tcd->ssize;
+			if (!IS_ALIGNED(len, tcd->nbytes))
+				goto err_prep;
+
+			iter = len / tcd->nbytes;
+			if (iter >= 1 << 15) {
+				/* len is too big */
+				goto err_prep;
+			}
+			/* citer_linkch contains the high bits of iter */
+			tcd->biter = iter & 0x1ff;
+			tcd->biter_linkch = iter >> 9;
+			tcd->citer = tcd->biter;
+			tcd->citer_linkch = tcd->biter_linkch;
 		}
-		/* citer_linkch contains the high bits of iter */
-		tcd->biter = iter & 0x1ff;
-		tcd->biter_linkch = iter >> 9;
-		tcd->citer = tcd->biter;
-		tcd->citer_linkch = tcd->biter_linkch;
 
 		tcd->e_sg = 0;
 		tcd->d_req = 1;
@@ -796,40 +820,62 @@
 	return NULL;
 }
 
+inline bool is_buswidth_valid(u8 buswidth, bool is_mpc8308)
+{
+	switch (buswidth) {
+	case 16:
+		if (is_mpc8308)
+			return false;
+	case 1:
+	case 2:
+	case 4:
+	case 32:
+		break;
+	default:
+		return false;
+	}
+
+	return true;
+}
+
 static int mpc_dma_device_config(struct dma_chan *chan,
 				 struct dma_slave_config *cfg)
 {
 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
+	struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
 	unsigned long flags;
 
 	/*
 	 * Software constraints:
-	 *  - only transfers between a peripheral device and
-	 *     memory are supported;
-	 *  - only peripheral devices with 4-byte FIFO access register
-	 *     are supported;
-	 *  - minimal transfer chunk is 4 bytes and consequently
-	 *     source and destination addresses must be 4-byte aligned
-	 *     and transfer size must be aligned on (4 * maxburst)
-	 *     boundary;
-	 *  - during the transfer RAM address is being incremented by
-	 *     the size of minimal transfer chunk;
-	 *  - peripheral port's address is constant during the transfer.
+	 *  - only transfers between a peripheral device and memory are
+	 *     supported
+	 *  - transfer chunk sizes of 1, 2, 4, 16 (for MPC512x), and 32 bytes
+	 *     are supported, and, consequently, source addresses and
+	 *     destination addresses; must be aligned accordingly; furthermore,
+	 *     for MPC512x SoCs, the transfer size must be aligned on (chunk
+	 *     size * maxburst)
+	 *  - during the transfer, the RAM address is incremented by the size
+	 *     of transfer chunk
+	 *  - the peripheral port's address is constant during the transfer.
 	 */
 
-	if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
-	    cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
-	    !IS_ALIGNED(cfg->src_addr, 4) ||
-	    !IS_ALIGNED(cfg->dst_addr, 4)) {
+	if (!IS_ALIGNED(cfg->src_addr, cfg->src_addr_width) ||
+	    !IS_ALIGNED(cfg->dst_addr, cfg->dst_addr_width)) {
 		return -EINVAL;
 	}
 
+	if (!is_buswidth_valid(cfg->src_addr_width, mdma->is_mpc8308) ||
+	    !is_buswidth_valid(cfg->dst_addr_width, mdma->is_mpc8308))
+		return -EINVAL;
+
 	spin_lock_irqsave(&mchan->lock, flags);
 
 	mchan->src_per_paddr = cfg->src_addr;
 	mchan->src_tcd_nunits = cfg->src_maxburst;
+	mchan->swidth = cfg->src_addr_width;
 	mchan->dst_per_paddr = cfg->dst_addr;
 	mchan->dst_tcd_nunits = cfg->dst_maxburst;
+	mchan->dwidth = cfg->dst_addr_width;
 
 	/* Apply defaults */
 	if (mchan->src_tcd_nunits == 0)
@@ -875,7 +921,6 @@
 
 	mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
 	if (!mdma) {
-		dev_err(dev, "Memory exhausted!\n");
 		retval = -ENOMEM;
 		goto err;
 	}
@@ -999,7 +1044,8 @@
 		out_be32(&mdma->regs->dmaerrl, 0xFFFF);
 	} else {
 		out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
-					MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
+						MPC_DMA_DMACR_ERGA |
+						MPC_DMA_DMACR_ERCA);
 
 		/* Disable hardware DMA requests */
 		out_be32(&mdma->regs->dmaerqh, 0);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 3922a5d..25d1dad 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -31,6 +31,12 @@
 #include "dmaengine.h"
 #include "mv_xor.h"
 
+enum mv_xor_type {
+	XOR_ORION,
+	XOR_ARMADA_38X,
+	XOR_ARMADA_37XX,
+};
+
 enum mv_xor_mode {
 	XOR_MODE_IN_REG,
 	XOR_MODE_IN_DESC,
@@ -477,7 +483,7 @@
 	BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
 
 	dev_dbg(mv_chan_to_devp(mv_chan),
-		"%s src_cnt: %d len: %u dest %pad flags: %ld\n",
+		"%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
 		__func__, src_cnt, len, &dest, flags);
 
 	sw_desc = mv_chan_alloc_slot(mv_chan);
@@ -933,7 +939,7 @@
 static struct mv_xor_chan *
 mv_xor_channel_add(struct mv_xor_device *xordev,
 		   struct platform_device *pdev,
-		   int idx, dma_cap_mask_t cap_mask, int irq, int op_in_desc)
+		   int idx, dma_cap_mask_t cap_mask, int irq)
 {
 	int ret = 0;
 	struct mv_xor_chan *mv_chan;
@@ -945,7 +951,10 @@
 
 	mv_chan->idx = idx;
 	mv_chan->irq = irq;
-	mv_chan->op_in_desc = op_in_desc;
+	if (xordev->xor_type == XOR_ORION)
+		mv_chan->op_in_desc = XOR_MODE_IN_REG;
+	else
+		mv_chan->op_in_desc = XOR_MODE_IN_DESC;
 
 	dma_dev = &mv_chan->dmadev;
 
@@ -1085,6 +1094,33 @@
 	writel(0, base + WINDOW_OVERRIDE_CTRL(1));
 }
 
+static void
+mv_xor_conf_mbus_windows_a3700(struct mv_xor_device *xordev)
+{
+	void __iomem *base = xordev->xor_high_base;
+	u32 win_enable = 0;
+	int i;
+
+	for (i = 0; i < 8; i++) {
+		writel(0, base + WINDOW_BASE(i));
+		writel(0, base + WINDOW_SIZE(i));
+		if (i < 4)
+			writel(0, base + WINDOW_REMAP_HIGH(i));
+	}
+	/*
+	 * For Armada3700 open default 4GB Mbus window. The dram
+	 * related configuration are done at AXIS level.
+	 */
+	writel(0xffff0000, base + WINDOW_SIZE(0));
+	win_enable |= 1;
+	win_enable |= 3 << 16;
+
+	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
+	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
+	writel(0, base + WINDOW_OVERRIDE_CTRL(0));
+	writel(0, base + WINDOW_OVERRIDE_CTRL(1));
+}
+
 /*
  * Since this XOR driver is basically used only for RAID5, we don't
  * need to care about synchronizing ->suspend with DMA activity,
@@ -1129,6 +1165,11 @@
 			       XOR_INTR_MASK(mv_chan));
 	}
 
+	if (xordev->xor_type == XOR_ARMADA_37XX) {
+		mv_xor_conf_mbus_windows_a3700(xordev);
+		return 0;
+	}
+
 	dram = mv_mbus_dram_info();
 	if (dram)
 		mv_xor_conf_mbus_windows(xordev, dram);
@@ -1137,8 +1178,9 @@
 }
 
 static const struct of_device_id mv_xor_dt_ids[] = {
-	{ .compatible = "marvell,orion-xor", .data = (void *)XOR_MODE_IN_REG },
-	{ .compatible = "marvell,armada-380-xor", .data = (void *)XOR_MODE_IN_DESC },
+	{ .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION },
+	{ .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X },
+	{ .compatible = "marvell,armada-3700-xor", .data = (void *)XOR_ARMADA_37XX },
 	{},
 };
 
@@ -1152,7 +1194,6 @@
 	struct resource *res;
 	unsigned int max_engines, max_channels;
 	int i, ret;
-	int op_in_desc;
 
 	dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
 
@@ -1180,12 +1221,30 @@
 
 	platform_set_drvdata(pdev, xordev);
 
+
+	/*
+	 * We need to know which type of XOR device we use before
+	 * setting up. In non-dt case it can only be the legacy one.
+	 */
+	xordev->xor_type = XOR_ORION;
+	if (pdev->dev.of_node) {
+		const struct of_device_id *of_id =
+			of_match_device(mv_xor_dt_ids,
+					&pdev->dev);
+
+		xordev->xor_type = (uintptr_t)of_id->data;
+	}
+
 	/*
 	 * (Re-)program MBUS remapping windows if we are asked to.
 	 */
-	dram = mv_mbus_dram_info();
-	if (dram)
-		mv_xor_conf_mbus_windows(xordev, dram);
+	if (xordev->xor_type == XOR_ARMADA_37XX) {
+		mv_xor_conf_mbus_windows_a3700(xordev);
+	} else {
+		dram = mv_mbus_dram_info();
+		if (dram)
+			mv_xor_conf_mbus_windows(xordev, dram);
+	}
 
 	/* Not all platforms can gate the clock, so it is not
 	 * an error if the clock does not exists.
@@ -1199,12 +1258,16 @@
 	 * order for async_tx to perform well. So we limit the number
 	 * of engines and channels so that we take into account this
 	 * constraint. Note that we also want to use channels from
-	 * separate engines when possible.
+	 * separate engines when possible.  For dual-CPU Armada 3700
+	 * SoC with single XOR engine allow using its both channels.
 	 */
 	max_engines = num_present_cpus();
-	max_channels = min_t(unsigned int,
-			     MV_XOR_MAX_CHANNELS,
-			     DIV_ROUND_UP(num_present_cpus(), 2));
+	if (xordev->xor_type == XOR_ARMADA_37XX)
+		max_channels =	num_present_cpus();
+	else
+		max_channels = min_t(unsigned int,
+				     MV_XOR_MAX_CHANNELS,
+				     DIV_ROUND_UP(num_present_cpus(), 2));
 
 	if (mv_xor_engine_count >= max_engines)
 		return 0;
@@ -1212,15 +1275,11 @@
 	if (pdev->dev.of_node) {
 		struct device_node *np;
 		int i = 0;
-		const struct of_device_id *of_id =
-			of_match_device(mv_xor_dt_ids,
-					&pdev->dev);
 
 		for_each_child_of_node(pdev->dev.of_node, np) {
 			struct mv_xor_chan *chan;
 			dma_cap_mask_t cap_mask;
 			int irq;
-			op_in_desc = (int)of_id->data;
 
 			if (i >= max_channels)
 				continue;
@@ -1237,7 +1296,7 @@
 			}
 
 			chan = mv_xor_channel_add(xordev, pdev, i,
-						  cap_mask, irq, op_in_desc);
+						  cap_mask, irq);
 			if (IS_ERR(chan)) {
 				ret = PTR_ERR(chan);
 				irq_dispose_mapping(irq);
@@ -1266,8 +1325,7 @@
 			}
 
 			chan = mv_xor_channel_add(xordev, pdev, i,
-						  cd->cap_mask, irq,
-						  XOR_MODE_IN_REG);
+						  cd->cap_mask, irq);
 			if (IS_ERR(chan)) {
 				ret = PTR_ERR(chan);
 				goto err_channel_add;
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index c19fe30..bf56e08 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -85,6 +85,7 @@
 	void __iomem	     *xor_high_base;
 	struct clk	     *clk;
 	struct mv_xor_chan   *channels[MV_XOR_MAX_CHANNELS];
+	int		     xor_type;
 };
 
 /**
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index 1e1f298..faae0bf 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -240,8 +240,9 @@
 	struct of_phandle_args	dma_spec;
 	struct of_dma		*ofdma;
 	struct dma_chan		*chan;
-	int			count, i;
+	int			count, i, start;
 	int			ret_no_channel = -ENODEV;
+	static atomic_t		last_index;
 
 	if (!np || !name) {
 		pr_err("%s: not enough information provided\n", __func__);
@@ -259,8 +260,15 @@
 		return ERR_PTR(-ENODEV);
 	}
 
+	/*
+	 * approximate an average distribution across multiple
+	 * entries with the same name
+	 */
+	start = atomic_inc_return(&last_index);
 	for (i = 0; i < count; i++) {
-		if (of_dma_match_channel(np, name, i, &dma_spec))
+		if (of_dma_match_channel(np, name,
+					 (i + start) % count,
+					 &dma_spec))
 			continue;
 
 		mutex_lock(&of_dma_lock);
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 43bd5ae..1e984e1 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -48,6 +48,7 @@
 	unsigned dma_sig;
 	bool cyclic;
 	bool paused;
+	bool running;
 
 	int dma_ch;
 	struct omap_desc *desc;
@@ -294,6 +295,8 @@
 
 	/* Enable channel */
 	omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
+
+	c->running = true;
 }
 
 static void omap_dma_stop(struct omap_chan *c)
@@ -355,6 +358,8 @@
 
 		omap_dma_chan_write(c, CLNK_CTRL, val);
 	}
+
+	c->running = false;
 }
 
 static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
@@ -673,15 +678,20 @@
 	struct omap_chan *c = to_omap_dma_chan(chan);
 	struct virt_dma_desc *vd;
 	enum dma_status ret;
-	uint32_t ccr;
 	unsigned long flags;
 
-	ccr = omap_dma_chan_read(c, CCR);
-	/* The channel is no longer active, handle the completion right away */
-	if (!(ccr & CCR_ENABLE))
-		omap_dma_callback(c->dma_ch, 0, c);
-
 	ret = dma_cookie_status(chan, cookie, txstate);
+
+	if (!c->paused && c->running) {
+		uint32_t ccr = omap_dma_chan_read(c, CCR);
+		/*
+		 * The channel is no longer active, set the return value
+		 * accordingly
+		 */
+		if (!(ccr & CCR_ENABLE))
+			ret = DMA_COMPLETE;
+	}
+
 	if (ret == DMA_COMPLETE || !txstate)
 		return ret;
 
@@ -945,9 +955,7 @@
 	d->ccr = c->ccr;
 	d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
 
-	d->cicr = CICR_DROP_IE;
-	if (tx_flags & DMA_PREP_INTERRUPT)
-		d->cicr |= CICR_FRAME_IE;
+	d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
 
 	d->csdp = data_type;
 
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 77c1c44..e756a30c 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -117,6 +117,7 @@
 	/* protected by vc->lock */
 	struct pxad_phy		*phy;
 	struct dma_pool		*desc_pool;	/* Descriptors pool */
+	dma_cookie_t		bus_error;
 };
 
 struct pxad_device {
@@ -563,6 +564,7 @@
 			return;
 		}
 	}
+	chan->bus_error = 0;
 
 	/*
 	 * Program the descriptor's address into the DMA controller,
@@ -666,6 +668,7 @@
 	struct virt_dma_desc *vd, *tmp;
 	unsigned int dcsr;
 	unsigned long flags;
+	dma_cookie_t last_started = 0;
 
 	BUG_ON(!chan);
 
@@ -678,6 +681,7 @@
 		dev_dbg(&chan->vc.chan.dev->device,
 			"%s(): checking txd %p[%x]: completed=%d\n",
 			__func__, vd, vd->tx.cookie, is_desc_completed(vd));
+		last_started = vd->tx.cookie;
 		if (to_pxad_sw_desc(vd)->cyclic) {
 			vchan_cyclic_callback(vd);
 			break;
@@ -690,7 +694,12 @@
 		}
 	}
 
-	if (dcsr & PXA_DCSR_STOPSTATE) {
+	if (dcsr & PXA_DCSR_BUSERR) {
+		chan->bus_error = last_started;
+		phy_disable(phy);
+	}
+
+	if (!chan->bus_error && dcsr & PXA_DCSR_STOPSTATE) {
 		dev_dbg(&chan->vc.chan.dev->device,
 		"%s(): channel stopped, submitted_empty=%d issued_empty=%d",
 			__func__,
@@ -1249,6 +1258,9 @@
 	struct pxad_chan *chan = to_pxad_chan(dchan);
 	enum dma_status ret;
 
+	if (cookie == chan->bus_error)
+		return DMA_ERROR;
+
 	ret = dma_cookie_status(dchan, cookie, txstate);
 	if (likely(txstate && (ret != DMA_ERROR)))
 		dma_set_residue(txstate, pxad_residue(chan, cookie));
@@ -1321,7 +1333,7 @@
 	return 0;
 }
 
-static const struct of_device_id const pxad_dt_ids[] = {
+static const struct of_device_id pxad_dt_ids[] = {
 	{ .compatible = "marvell,pdma-1.0", },
 	{}
 };
diff --git a/drivers/dma/qcom/Makefile b/drivers/dma/qcom/Makefile
index bfea699..4bfc38b 100644
--- a/drivers/dma/qcom/Makefile
+++ b/drivers/dma/qcom/Makefile
@@ -1,3 +1,5 @@
 obj-$(CONFIG_QCOM_BAM_DMA) += bam_dma.o
 obj-$(CONFIG_QCOM_HIDMA_MGMT) += hdma_mgmt.o
 hdma_mgmt-objs	 := hidma_mgmt.o hidma_mgmt_sys.o
+obj-$(CONFIG_QCOM_HIDMA) +=  hdma.o
+hdma-objs        := hidma_ll.o hidma.o hidma_dbg.o
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index d5e0a9c..969b481 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -342,7 +342,7 @@
 
 #define BAM_DESC_FIFO_SIZE	SZ_32K
 #define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1)
-#define BAM_MAX_DATA_SIZE	(SZ_32K - 8)
+#define BAM_FIFO_SIZE	(SZ_32K - 8)
 
 struct bam_chan {
 	struct virt_dma_chan vc;
@@ -387,6 +387,7 @@
 
 	/* execution environment ID, from DT */
 	u32 ee;
+	bool controlled_remotely;
 
 	const struct reg_offset_data *layout;
 
@@ -458,7 +459,7 @@
 	 */
 	writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)),
 			bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR));
-	writel_relaxed(BAM_DESC_FIFO_SIZE,
+	writel_relaxed(BAM_FIFO_SIZE,
 			bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES));
 
 	/* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */
@@ -604,7 +605,7 @@
 
 	/* calculate number of required entries */
 	for_each_sg(sgl, sg, sg_len, i)
-		num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_MAX_DATA_SIZE);
+		num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE);
 
 	/* allocate enough room to accomodate the number of entries */
 	async_desc = kzalloc(sizeof(*async_desc) +
@@ -635,10 +636,10 @@
 			desc->addr = cpu_to_le32(sg_dma_address(sg) +
 						 curr_offset);
 
-			if (remainder > BAM_MAX_DATA_SIZE) {
-				desc->size = cpu_to_le16(BAM_MAX_DATA_SIZE);
-				remainder -= BAM_MAX_DATA_SIZE;
-				curr_offset += BAM_MAX_DATA_SIZE;
+			if (remainder > BAM_FIFO_SIZE) {
+				desc->size = cpu_to_le16(BAM_FIFO_SIZE);
+				remainder -= BAM_FIFO_SIZE;
+				curr_offset += BAM_FIFO_SIZE;
 			} else {
 				desc->size = cpu_to_le16(remainder);
 				remainder = 0;
@@ -801,13 +802,17 @@
 	if (srcs & P_IRQ)
 		tasklet_schedule(&bdev->task);
 
-	if (srcs & BAM_IRQ)
+	if (srcs & BAM_IRQ) {
 		clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS));
 
-	/* don't allow reorder of the various accesses to the BAM registers */
-	mb();
+		/*
+		 * don't allow reorder of the various accesses to the BAM
+		 * registers
+		 */
+		mb();
 
-	writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR));
+		writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR));
+	}
 
 	return IRQ_HANDLED;
 }
@@ -1038,6 +1043,9 @@
 	val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
 	bdev->num_channels = val & BAM_NUM_PIPES_MASK;
 
+	if (bdev->controlled_remotely)
+		return 0;
+
 	/* s/w reset bam */
 	/* after reset all pipes are disabled and idle */
 	val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL));
@@ -1125,6 +1133,9 @@
 		return ret;
 	}
 
+	bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node,
+						"qcom,controlled-remotely");
+
 	bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
 	if (IS_ERR(bdev->bamclk))
 		return PTR_ERR(bdev->bamclk);
@@ -1163,7 +1174,7 @@
 	/* set max dma segment size */
 	bdev->common.dev = bdev->dev;
 	bdev->common.dev->dma_parms = &bdev->dma_parms;
-	ret = dma_set_max_seg_size(bdev->common.dev, BAM_MAX_DATA_SIZE);
+	ret = dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE);
 	if (ret) {
 		dev_err(bdev->dev, "cannot set maximum segment size\n");
 		goto err_bam_channel_exit;
@@ -1234,6 +1245,9 @@
 		bam_dma_terminate_all(&bdev->channels[i].vc.chan);
 		tasklet_kill(&bdev->channels[i].vc.task);
 
+		if (!bdev->channels[i].fifo_virt)
+			continue;
+
 		dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
 			    bdev->channels[i].fifo_virt,
 			    bdev->channels[i].fifo_phys);
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index cccc78e..41b5c6d 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -1,7 +1,7 @@
 /*
  * Qualcomm Technologies HIDMA DMA engine interface
  *
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -404,7 +404,7 @@
 	spin_unlock_irqrestore(&mchan->lock, irqflags);
 
 	/* this suspends the existing transfer */
-	rc = hidma_ll_pause(dmadev->lldev);
+	rc = hidma_ll_disable(dmadev->lldev);
 	if (rc) {
 		dev_err(dmadev->ddev.dev, "channel did not pause\n");
 		goto out;
@@ -427,7 +427,7 @@
 		list_move(&mdesc->node, &mchan->free);
 	}
 
-	rc = hidma_ll_resume(dmadev->lldev);
+	rc = hidma_ll_enable(dmadev->lldev);
 out:
 	pm_runtime_mark_last_busy(dmadev->ddev.dev);
 	pm_runtime_put_autosuspend(dmadev->ddev.dev);
@@ -488,7 +488,7 @@
 	dmadev = to_hidma_dev(mchan->chan.device);
 	if (!mchan->paused) {
 		pm_runtime_get_sync(dmadev->ddev.dev);
-		if (hidma_ll_pause(dmadev->lldev))
+		if (hidma_ll_disable(dmadev->lldev))
 			dev_warn(dmadev->ddev.dev, "channel did not stop\n");
 		mchan->paused = true;
 		pm_runtime_mark_last_busy(dmadev->ddev.dev);
@@ -507,7 +507,7 @@
 	dmadev = to_hidma_dev(mchan->chan.device);
 	if (mchan->paused) {
 		pm_runtime_get_sync(dmadev->ddev.dev);
-		rc = hidma_ll_resume(dmadev->lldev);
+		rc = hidma_ll_enable(dmadev->lldev);
 		if (!rc)
 			mchan->paused = false;
 		else
@@ -530,6 +530,43 @@
 	return hidma_ll_inthandler(chirq, lldev);
 }
 
+static ssize_t hidma_show_values(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct hidma_dev *mdev = platform_get_drvdata(pdev);
+
+	buf[0] = 0;
+
+	if (strcmp(attr->attr.name, "chid") == 0)
+		sprintf(buf, "%d\n", mdev->chidx);
+
+	return strlen(buf);
+}
+
+static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name,
+				    int mode)
+{
+	struct device_attribute *attrs;
+	char *name_copy;
+
+	attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
+			     GFP_KERNEL);
+	if (!attrs)
+		return -ENOMEM;
+
+	name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
+	if (!name_copy)
+		return -ENOMEM;
+
+	attrs->attr.name = name_copy;
+	attrs->attr.mode = mode;
+	attrs->show = hidma_show_values;
+	sysfs_attr_init(&attrs->attr);
+
+	return device_create_file(dev->ddev.dev, attrs);
+}
+
 static int hidma_probe(struct platform_device *pdev)
 {
 	struct hidma_dev *dmadev;
@@ -644,6 +681,8 @@
 
 	dmadev->irq = chirq;
 	tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
+	hidma_debug_init(dmadev);
+	hidma_create_sysfs_entry(dmadev, "chid", S_IRUGO);
 	dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
 	platform_set_drvdata(pdev, dmadev);
 	pm_runtime_mark_last_busy(dmadev->ddev.dev);
@@ -651,6 +690,7 @@
 	return 0;
 
 uninit:
+	hidma_debug_uninit(dmadev);
 	hidma_ll_uninit(dmadev->lldev);
 dmafree:
 	if (dmadev)
@@ -668,6 +708,7 @@
 	pm_runtime_get_sync(dmadev->ddev.dev);
 	dma_async_device_unregister(&dmadev->ddev);
 	devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
+	hidma_debug_uninit(dmadev);
 	hidma_ll_uninit(dmadev->lldev);
 	hidma_free(dmadev);
 
@@ -689,7 +730,6 @@
 	{.compatible = "qcom,hidma-1.0",},
 	{},
 };
-
 MODULE_DEVICE_TABLE(of, hidma_match);
 
 static struct platform_driver hidma_driver = {
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h
index 231e306..db413a5 100644
--- a/drivers/dma/qcom/hidma.h
+++ b/drivers/dma/qcom/hidma.h
@@ -1,7 +1,7 @@
 /*
  * Qualcomm Technologies HIDMA data structures
  *
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -20,32 +20,29 @@
 #include <linux/interrupt.h>
 #include <linux/dmaengine.h>
 
-#define TRE_SIZE			32 /* each TRE is 32 bytes  */
-#define TRE_CFG_IDX			0
-#define TRE_LEN_IDX			1
-#define TRE_SRC_LOW_IDX		2
-#define TRE_SRC_HI_IDX			3
-#define TRE_DEST_LOW_IDX		4
-#define TRE_DEST_HI_IDX		5
-
-struct hidma_tx_status {
-	u8 err_info;			/* error record in this transfer    */
-	u8 err_code;			/* completion code		    */
-};
+#define HIDMA_TRE_SIZE			32 /* each TRE is 32 bytes  */
+#define HIDMA_TRE_CFG_IDX		0
+#define HIDMA_TRE_LEN_IDX		1
+#define HIDMA_TRE_SRC_LOW_IDX		2
+#define HIDMA_TRE_SRC_HI_IDX		3
+#define HIDMA_TRE_DEST_LOW_IDX		4
+#define HIDMA_TRE_DEST_HI_IDX		5
 
 struct hidma_tre {
 	atomic_t allocated;		/* if this channel is allocated	    */
 	bool queued;			/* flag whether this is pending     */
 	u16 status;			/* status			    */
-	u32 chidx;			/* index of the tre		    */
+	u32 idx;			/* index of the tre		    */
 	u32 dma_sig;			/* signature of the tre		    */
 	const char *dev_name;		/* name of the device		    */
 	void (*callback)(void *data);	/* requester callback		    */
 	void *data;			/* Data associated with this channel*/
 	struct hidma_lldev *lldev;	/* lldma device pointer		    */
-	u32 tre_local[TRE_SIZE / sizeof(u32) + 1]; /* TRE local copy        */
+	u32 tre_local[HIDMA_TRE_SIZE / sizeof(u32) + 1]; /* TRE local copy  */
 	u32 tre_index;			/* the offset where this was written*/
 	u32 int_flags;			/* interrupt flags		    */
+	u8 err_info;			/* error record in this transfer    */
+	u8 err_code;			/* completion code		    */
 };
 
 struct hidma_lldev {
@@ -61,22 +58,21 @@
 	void __iomem *evca;		/* Event Channel address          */
 	struct hidma_tre
 		**pending_tre_list;	/* Pointers to pending TREs	  */
-	struct hidma_tx_status
-		*tx_status_list;	/* Pointers to pending TREs status*/
 	s32 pending_tre_count;		/* Number of TREs pending	  */
 
 	void *tre_ring;			/* TRE ring			  */
-	dma_addr_t tre_ring_handle;	/* TRE ring to be shared with HW  */
+	dma_addr_t tre_dma;		/* TRE ring to be shared with HW  */
 	u32 tre_ring_size;		/* Byte size of the ring	  */
 	u32 tre_processed_off;		/* last processed TRE		  */
 
 	void *evre_ring;		/* EVRE ring			   */
-	dma_addr_t evre_ring_handle;	/* EVRE ring to be shared with HW  */
+	dma_addr_t evre_dma;		/* EVRE ring to be shared with HW  */
 	u32 evre_ring_size;		/* Byte size of the ring	   */
 	u32 evre_processed_off;		/* last processed EVRE		   */
 
 	u32 tre_write_offset;           /* TRE write location              */
 	struct tasklet_struct task;	/* task delivering notifications   */
+	struct tasklet_struct rst_task;	/* task to reset HW                */
 	DECLARE_KFIFO_PTR(handoff_fifo,
 		struct hidma_tre *);    /* pending TREs FIFO               */
 };
@@ -145,8 +141,8 @@
 bool hidma_ll_isenabled(struct hidma_lldev *llhndl);
 void hidma_ll_queue_request(struct hidma_lldev *llhndl, u32 tre_ch);
 void hidma_ll_start(struct hidma_lldev *llhndl);
-int hidma_ll_pause(struct hidma_lldev *llhndl);
-int hidma_ll_resume(struct hidma_lldev *llhndl);
+int hidma_ll_disable(struct hidma_lldev *lldev);
+int hidma_ll_enable(struct hidma_lldev *llhndl);
 void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch,
 	dma_addr_t src, dma_addr_t dest, u32 len, u32 flags);
 int hidma_ll_setup(struct hidma_lldev *lldev);
@@ -157,4 +153,6 @@
 irqreturn_t hidma_ll_inthandler(int irq, void *arg);
 void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info,
 				u8 err_code);
+int hidma_debug_init(struct hidma_dev *dmadev);
+void hidma_debug_uninit(struct hidma_dev *dmadev);
 #endif
diff --git a/drivers/dma/qcom/hidma_dbg.c b/drivers/dma/qcom/hidma_dbg.c
new file mode 100644
index 0000000..fa827e5
--- /dev/null
+++ b/drivers/dma/qcom/hidma_dbg.c
@@ -0,0 +1,217 @@
+/*
+ * Qualcomm Technologies HIDMA debug file
+ *
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/pm_runtime.h>
+
+#include "hidma.h"
+
+static void hidma_ll_chstats(struct seq_file *s, void *llhndl, u32 tre_ch)
+{
+	struct hidma_lldev *lldev = llhndl;
+	struct hidma_tre *tre;
+	u32 length;
+	dma_addr_t src_start;
+	dma_addr_t dest_start;
+	u32 *tre_local;
+
+	if (tre_ch >= lldev->nr_tres) {
+		dev_err(lldev->dev, "invalid TRE number in chstats:%d", tre_ch);
+		return;
+	}
+	tre = &lldev->trepool[tre_ch];
+	seq_printf(s, "------Channel %d -----\n", tre_ch);
+	seq_printf(s, "allocated=%d\n", atomic_read(&tre->allocated));
+	seq_printf(s, "queued = 0x%x\n", tre->queued);
+	seq_printf(s, "err_info = 0x%x\n", tre->err_info);
+	seq_printf(s, "err_code = 0x%x\n", tre->err_code);
+	seq_printf(s, "status = 0x%x\n", tre->status);
+	seq_printf(s, "idx = 0x%x\n", tre->idx);
+	seq_printf(s, "dma_sig = 0x%x\n", tre->dma_sig);
+	seq_printf(s, "dev_name=%s\n", tre->dev_name);
+	seq_printf(s, "callback=%p\n", tre->callback);
+	seq_printf(s, "data=%p\n", tre->data);
+	seq_printf(s, "tre_index = 0x%x\n", tre->tre_index);
+
+	tre_local = &tre->tre_local[0];
+	src_start = tre_local[HIDMA_TRE_SRC_LOW_IDX];
+	src_start = ((u64) (tre_local[HIDMA_TRE_SRC_HI_IDX]) << 32) + src_start;
+	dest_start = tre_local[HIDMA_TRE_DEST_LOW_IDX];
+	dest_start += ((u64) (tre_local[HIDMA_TRE_DEST_HI_IDX]) << 32);
+	length = tre_local[HIDMA_TRE_LEN_IDX];
+
+	seq_printf(s, "src=%pap\n", &src_start);
+	seq_printf(s, "dest=%pap\n", &dest_start);
+	seq_printf(s, "length = 0x%x\n", length);
+}
+
+static void hidma_ll_devstats(struct seq_file *s, void *llhndl)
+{
+	struct hidma_lldev *lldev = llhndl;
+
+	seq_puts(s, "------Device -----\n");
+	seq_printf(s, "lldev init = 0x%x\n", lldev->initialized);
+	seq_printf(s, "trch_state = 0x%x\n", lldev->trch_state);
+	seq_printf(s, "evch_state = 0x%x\n", lldev->evch_state);
+	seq_printf(s, "chidx = 0x%x\n", lldev->chidx);
+	seq_printf(s, "nr_tres = 0x%x\n", lldev->nr_tres);
+	seq_printf(s, "trca=%p\n", lldev->trca);
+	seq_printf(s, "tre_ring=%p\n", lldev->tre_ring);
+	seq_printf(s, "tre_ring_handle=%pap\n", &lldev->tre_dma);
+	seq_printf(s, "tre_ring_size = 0x%x\n", lldev->tre_ring_size);
+	seq_printf(s, "tre_processed_off = 0x%x\n", lldev->tre_processed_off);
+	seq_printf(s, "pending_tre_count=%d\n", lldev->pending_tre_count);
+	seq_printf(s, "evca=%p\n", lldev->evca);
+	seq_printf(s, "evre_ring=%p\n", lldev->evre_ring);
+	seq_printf(s, "evre_ring_handle=%pap\n", &lldev->evre_dma);
+	seq_printf(s, "evre_ring_size = 0x%x\n", lldev->evre_ring_size);
+	seq_printf(s, "evre_processed_off = 0x%x\n", lldev->evre_processed_off);
+	seq_printf(s, "tre_write_offset = 0x%x\n", lldev->tre_write_offset);
+}
+
+/*
+ * hidma_chan_stats: display HIDMA channel statistics
+ *
+ * Display the statistics for the current HIDMA virtual channel device.
+ */
+static int hidma_chan_stats(struct seq_file *s, void *unused)
+{
+	struct hidma_chan *mchan = s->private;
+	struct hidma_desc *mdesc;
+	struct hidma_dev *dmadev = mchan->dmadev;
+
+	pm_runtime_get_sync(dmadev->ddev.dev);
+	seq_printf(s, "paused=%u\n", mchan->paused);
+	seq_printf(s, "dma_sig=%u\n", mchan->dma_sig);
+	seq_puts(s, "prepared\n");
+	list_for_each_entry(mdesc, &mchan->prepared, node)
+		hidma_ll_chstats(s, mchan->dmadev->lldev, mdesc->tre_ch);
+
+	seq_puts(s, "active\n");
+	list_for_each_entry(mdesc, &mchan->active, node)
+		hidma_ll_chstats(s, mchan->dmadev->lldev, mdesc->tre_ch);
+
+	seq_puts(s, "completed\n");
+	list_for_each_entry(mdesc, &mchan->completed, node)
+		hidma_ll_chstats(s, mchan->dmadev->lldev, mdesc->tre_ch);
+
+	hidma_ll_devstats(s, mchan->dmadev->lldev);
+	pm_runtime_mark_last_busy(dmadev->ddev.dev);
+	pm_runtime_put_autosuspend(dmadev->ddev.dev);
+	return 0;
+}
+
+/*
+ * hidma_dma_info: display HIDMA device info
+ *
+ * Display the info for the current HIDMA device.
+ */
+static int hidma_dma_info(struct seq_file *s, void *unused)
+{
+	struct hidma_dev *dmadev = s->private;
+	resource_size_t sz;
+
+	seq_printf(s, "nr_descriptors=%d\n", dmadev->nr_descriptors);
+	seq_printf(s, "dev_trca=%p\n", &dmadev->dev_trca);
+	seq_printf(s, "dev_trca_phys=%pa\n", &dmadev->trca_resource->start);
+	sz = resource_size(dmadev->trca_resource);
+	seq_printf(s, "dev_trca_size=%pa\n", &sz);
+	seq_printf(s, "dev_evca=%p\n", &dmadev->dev_evca);
+	seq_printf(s, "dev_evca_phys=%pa\n", &dmadev->evca_resource->start);
+	sz = resource_size(dmadev->evca_resource);
+	seq_printf(s, "dev_evca_size=%pa\n", &sz);
+	return 0;
+}
+
+static int hidma_chan_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, hidma_chan_stats, inode->i_private);
+}
+
+static int hidma_dma_info_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, hidma_dma_info, inode->i_private);
+}
+
+static const struct file_operations hidma_chan_fops = {
+	.open = hidma_chan_stats_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static const struct file_operations hidma_dma_fops = {
+	.open = hidma_dma_info_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+void hidma_debug_uninit(struct hidma_dev *dmadev)
+{
+	debugfs_remove_recursive(dmadev->debugfs);
+	debugfs_remove_recursive(dmadev->stats);
+}
+
+int hidma_debug_init(struct hidma_dev *dmadev)
+{
+	int rc = 0;
+	int chidx = 0;
+	struct list_head *position = NULL;
+
+	dmadev->debugfs = debugfs_create_dir(dev_name(dmadev->ddev.dev), NULL);
+	if (!dmadev->debugfs) {
+		rc = -ENODEV;
+		return rc;
+	}
+
+	/* walk through the virtual channel list */
+	list_for_each(position, &dmadev->ddev.channels) {
+		struct hidma_chan *chan;
+
+		chan = list_entry(position, struct hidma_chan,
+				  chan.device_node);
+		sprintf(chan->dbg_name, "chan%d", chidx);
+		chan->debugfs = debugfs_create_dir(chan->dbg_name,
+						   dmadev->debugfs);
+		if (!chan->debugfs) {
+			rc = -ENOMEM;
+			goto cleanup;
+		}
+		chan->stats = debugfs_create_file("stats", S_IRUGO,
+						  chan->debugfs, chan,
+						  &hidma_chan_fops);
+		if (!chan->stats) {
+			rc = -ENOMEM;
+			goto cleanup;
+		}
+		chidx++;
+	}
+
+	dmadev->stats = debugfs_create_file("stats", S_IRUGO,
+					    dmadev->debugfs, dmadev,
+					    &hidma_dma_fops);
+	if (!dmadev->stats) {
+		rc = -ENOMEM;
+		goto cleanup;
+	}
+
+	return 0;
+cleanup:
+	hidma_debug_uninit(dmadev);
+	return rc;
+}
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
new file mode 100644
index 0000000..f392900
--- /dev/null
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -0,0 +1,872 @@
+/*
+ * Qualcomm Technologies HIDMA DMA engine low level code
+ *
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <linux/iopoll.h>
+#include <linux/kfifo.h>
+#include <linux/bitops.h>
+
+#include "hidma.h"
+
+#define HIDMA_EVRE_SIZE			16	/* each EVRE is 16 bytes */
+
+#define HIDMA_TRCA_CTRLSTS_REG			0x000
+#define HIDMA_TRCA_RING_LOW_REG		0x008
+#define HIDMA_TRCA_RING_HIGH_REG		0x00C
+#define HIDMA_TRCA_RING_LEN_REG		0x010
+#define HIDMA_TRCA_DOORBELL_REG		0x400
+
+#define HIDMA_EVCA_CTRLSTS_REG			0x000
+#define HIDMA_EVCA_INTCTRL_REG			0x004
+#define HIDMA_EVCA_RING_LOW_REG		0x008
+#define HIDMA_EVCA_RING_HIGH_REG		0x00C
+#define HIDMA_EVCA_RING_LEN_REG		0x010
+#define HIDMA_EVCA_WRITE_PTR_REG		0x020
+#define HIDMA_EVCA_DOORBELL_REG		0x400
+
+#define HIDMA_EVCA_IRQ_STAT_REG		0x100
+#define HIDMA_EVCA_IRQ_CLR_REG			0x108
+#define HIDMA_EVCA_IRQ_EN_REG			0x110
+
+#define HIDMA_EVRE_CFG_IDX			0
+
+#define HIDMA_EVRE_ERRINFO_BIT_POS		24
+#define HIDMA_EVRE_CODE_BIT_POS		28
+
+#define HIDMA_EVRE_ERRINFO_MASK		GENMASK(3, 0)
+#define HIDMA_EVRE_CODE_MASK			GENMASK(3, 0)
+
+#define HIDMA_CH_CONTROL_MASK			GENMASK(7, 0)
+#define HIDMA_CH_STATE_MASK			GENMASK(7, 0)
+#define HIDMA_CH_STATE_BIT_POS			0x8
+
+#define HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS	0
+#define HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS	1
+#define HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS	9
+#define HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS	10
+#define HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS	11
+#define HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS	14
+
+#define ENABLE_IRQS (BIT(HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS)	| \
+		     BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS)	| \
+		     BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS)	| \
+		     BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS)	| \
+		     BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS)	| \
+		     BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS))
+
+#define HIDMA_INCREMENT_ITERATOR(iter, size, ring_size)	\
+do {								\
+	iter += size;						\
+	if (iter >= ring_size)					\
+		iter -= ring_size;				\
+} while (0)
+
+#define HIDMA_CH_STATE(val)	\
+	((val >> HIDMA_CH_STATE_BIT_POS) & HIDMA_CH_STATE_MASK)
+
+#define HIDMA_ERR_INT_MASK				\
+	(BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS)   |	\
+	 BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) |	\
+	 BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS)	    |	\
+	 BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS)    |	\
+	 BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS))
+
+enum ch_command {
+	HIDMA_CH_DISABLE = 0,
+	HIDMA_CH_ENABLE = 1,
+	HIDMA_CH_SUSPEND = 2,
+	HIDMA_CH_RESET = 9,
+};
+
+enum ch_state {
+	HIDMA_CH_DISABLED = 0,
+	HIDMA_CH_ENABLED = 1,
+	HIDMA_CH_RUNNING = 2,
+	HIDMA_CH_SUSPENDED = 3,
+	HIDMA_CH_STOPPED = 4,
+};
+
+enum tre_type {
+	HIDMA_TRE_MEMCPY = 3,
+};
+
+enum err_code {
+	HIDMA_EVRE_STATUS_COMPLETE = 1,
+	HIDMA_EVRE_STATUS_ERROR = 4,
+};
+
+static int hidma_is_chan_enabled(int state)
+{
+	switch (state) {
+	case HIDMA_CH_ENABLED:
+	case HIDMA_CH_RUNNING:
+		return true;
+	default:
+		return false;
+	}
+}
+
+void hidma_ll_free(struct hidma_lldev *lldev, u32 tre_ch)
+{
+	struct hidma_tre *tre;
+
+	if (tre_ch >= lldev->nr_tres) {
+		dev_err(lldev->dev, "invalid TRE number in free:%d", tre_ch);
+		return;
+	}
+
+	tre = &lldev->trepool[tre_ch];
+	if (atomic_read(&tre->allocated) != true) {
+		dev_err(lldev->dev, "trying to free an unused TRE:%d", tre_ch);
+		return;
+	}
+
+	atomic_set(&tre->allocated, 0);
+}
+
+int hidma_ll_request(struct hidma_lldev *lldev, u32 sig, const char *dev_name,
+		     void (*callback)(void *data), void *data, u32 *tre_ch)
+{
+	unsigned int i;
+	struct hidma_tre *tre;
+	u32 *tre_local;
+
+	if (!tre_ch || !lldev)
+		return -EINVAL;
+
+	/* need to have at least one empty spot in the queue */
+	for (i = 0; i < lldev->nr_tres - 1; i++) {
+		if (atomic_add_unless(&lldev->trepool[i].allocated, 1, 1))
+			break;
+	}
+
+	if (i == (lldev->nr_tres - 1))
+		return -ENOMEM;
+
+	tre = &lldev->trepool[i];
+	tre->dma_sig = sig;
+	tre->dev_name = dev_name;
+	tre->callback = callback;
+	tre->data = data;
+	tre->idx = i;
+	tre->status = 0;
+	tre->queued = 0;
+	tre->err_code = 0;
+	tre->err_info = 0;
+	tre->lldev = lldev;
+	tre_local = &tre->tre_local[0];
+	tre_local[HIDMA_TRE_CFG_IDX] = HIDMA_TRE_MEMCPY;
+	tre_local[HIDMA_TRE_CFG_IDX] |= (lldev->chidx & 0xFF) << 8;
+	tre_local[HIDMA_TRE_CFG_IDX] |= BIT(16);	/* set IEOB */
+	*tre_ch = i;
+	if (callback)
+		callback(data);
+	return 0;
+}
+
+/*
+ * Multiple TREs may be queued and waiting in the pending queue.
+ */
+static void hidma_ll_tre_complete(unsigned long arg)
+{
+	struct hidma_lldev *lldev = (struct hidma_lldev *)arg;
+	struct hidma_tre *tre;
+
+	while (kfifo_out(&lldev->handoff_fifo, &tre, 1)) {
+		/* call the user if it has been read by the hardware */
+		if (tre->callback)
+			tre->callback(tre->data);
+	}
+}
+
+static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator,
+				u8 err_info, u8 err_code)
+{
+	struct hidma_tre *tre;
+	unsigned long flags;
+
+	spin_lock_irqsave(&lldev->lock, flags);
+	tre = lldev->pending_tre_list[tre_iterator / HIDMA_TRE_SIZE];
+	if (!tre) {
+		spin_unlock_irqrestore(&lldev->lock, flags);
+		dev_warn(lldev->dev, "tre_index [%d] and tre out of sync\n",
+			 tre_iterator / HIDMA_TRE_SIZE);
+		return -EINVAL;
+	}
+	lldev->pending_tre_list[tre->tre_index] = NULL;
+
+	/*
+	 * Keep track of pending TREs that SW is expecting to receive
+	 * from HW. We got one now. Decrement our counter.
+	 */
+	lldev->pending_tre_count--;
+	if (lldev->pending_tre_count < 0) {
+		dev_warn(lldev->dev, "tre count mismatch on completion");
+		lldev->pending_tre_count = 0;
+	}
+
+	spin_unlock_irqrestore(&lldev->lock, flags);
+
+	tre->err_info = err_info;
+	tre->err_code = err_code;
+	tre->queued = 0;
+
+	kfifo_put(&lldev->handoff_fifo, tre);
+	tasklet_schedule(&lldev->task);
+
+	return 0;
+}
+
+/*
+ * Called to handle the interrupt for the channel.
+ * Return a positive number if TRE or EVRE were consumed on this run.
+ * Return a positive number if there are pending TREs or EVREs.
+ * Return 0 if there is nothing to consume or no pending TREs/EVREs found.
+ */
+static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
+{
+	u32 evre_ring_size = lldev->evre_ring_size;
+	u32 tre_ring_size = lldev->tre_ring_size;
+	u32 err_info, err_code, evre_write_off;
+	u32 tre_iterator, evre_iterator;
+	u32 num_completed = 0;
+
+	evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
+	tre_iterator = lldev->tre_processed_off;
+	evre_iterator = lldev->evre_processed_off;
+
+	if ((evre_write_off > evre_ring_size) ||
+	    (evre_write_off % HIDMA_EVRE_SIZE)) {
+		dev_err(lldev->dev, "HW reports invalid EVRE write offset\n");
+		return 0;
+	}
+
+	/*
+	 * By the time control reaches here the number of EVREs and TREs
+	 * may not match. Only consume the ones that hardware told us.
+	 */
+	while ((evre_iterator != evre_write_off)) {
+		u32 *current_evre = lldev->evre_ring + evre_iterator;
+		u32 cfg;
+
+		cfg = current_evre[HIDMA_EVRE_CFG_IDX];
+		err_info = cfg >> HIDMA_EVRE_ERRINFO_BIT_POS;
+		err_info &= HIDMA_EVRE_ERRINFO_MASK;
+		err_code =
+		    (cfg >> HIDMA_EVRE_CODE_BIT_POS) & HIDMA_EVRE_CODE_MASK;
+
+		if (hidma_post_completed(lldev, tre_iterator, err_info,
+					 err_code))
+			break;
+
+		HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
+					 tre_ring_size);
+		HIDMA_INCREMENT_ITERATOR(evre_iterator, HIDMA_EVRE_SIZE,
+					 evre_ring_size);
+
+		/*
+		 * Read the new event descriptor written by the HW.
+		 * As we are processing the delivered events, other events
+		 * get queued to the SW for processing.
+		 */
+		evre_write_off =
+		    readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
+		num_completed++;
+	}
+
+	if (num_completed) {
+		u32 evre_read_off = (lldev->evre_processed_off +
+				     HIDMA_EVRE_SIZE * num_completed);
+		u32 tre_read_off = (lldev->tre_processed_off +
+				    HIDMA_TRE_SIZE * num_completed);
+
+		evre_read_off = evre_read_off % evre_ring_size;
+		tre_read_off = tre_read_off % tre_ring_size;
+
+		writel(evre_read_off, lldev->evca + HIDMA_EVCA_DOORBELL_REG);
+
+		/* record the last processed tre offset */
+		lldev->tre_processed_off = tre_read_off;
+		lldev->evre_processed_off = evre_read_off;
+	}
+
+	return num_completed;
+}
+
+void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info,
+			       u8 err_code)
+{
+	u32 tre_iterator;
+	u32 tre_ring_size = lldev->tre_ring_size;
+	int num_completed = 0;
+	u32 tre_read_off;
+
+	tre_iterator = lldev->tre_processed_off;
+	while (lldev->pending_tre_count) {
+		if (hidma_post_completed(lldev, tre_iterator, err_info,
+					 err_code))
+			break;
+		HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
+					 tre_ring_size);
+		num_completed++;
+	}
+	tre_read_off = (lldev->tre_processed_off +
+			HIDMA_TRE_SIZE * num_completed);
+
+	tre_read_off = tre_read_off % tre_ring_size;
+
+	/* record the last processed tre offset */
+	lldev->tre_processed_off = tre_read_off;
+}
+
+static int hidma_ll_reset(struct hidma_lldev *lldev)
+{
+	u32 val;
+	int ret;
+
+	val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+	val &= ~(HIDMA_CH_CONTROL_MASK << 16);
+	val |= HIDMA_CH_RESET << 16;
+	writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+
+	/*
+	 * Delay 10ms after reset to allow DMA logic to quiesce.
+	 * Do a polled read up to 1ms and 10ms maximum.
+	 */
+	ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
+				 HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED,
+				 1000, 10000);
+	if (ret) {
+		dev_err(lldev->dev, "transfer channel did not reset\n");
+		return ret;
+	}
+
+	val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+	val &= ~(HIDMA_CH_CONTROL_MASK << 16);
+	val |= HIDMA_CH_RESET << 16;
+	writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+
+	/*
+	 * Delay 10ms after reset to allow DMA logic to quiesce.
+	 * Do a polled read up to 1ms and 10ms maximum.
+	 */
+	ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
+				 HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED,
+				 1000, 10000);
+	if (ret)
+		return ret;
+
+	lldev->trch_state = HIDMA_CH_DISABLED;
+	lldev->evch_state = HIDMA_CH_DISABLED;
+	return 0;
+}
+
+/*
+ * Abort all transactions and perform a reset.
+ */
+static void hidma_ll_abort(unsigned long arg)
+{
+	struct hidma_lldev *lldev = (struct hidma_lldev *)arg;
+	u8 err_code = HIDMA_EVRE_STATUS_ERROR;
+	u8 err_info = 0xFF;
+	int rc;
+
+	hidma_cleanup_pending_tre(lldev, err_info, err_code);
+
+	/* reset the channel for recovery */
+	rc = hidma_ll_setup(lldev);
+	if (rc) {
+		dev_err(lldev->dev, "channel reinitialize failed after error\n");
+		return;
+	}
+	writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+}
+
+/*
+ * The interrupt handler for HIDMA will try to consume as many pending
+ * EVRE from the event queue as possible. Each EVRE has an associated
+ * TRE that holds the user interface parameters. EVRE reports the
+ * result of the transaction. Hardware guarantees ordering between EVREs
+ * and TREs. We use last processed offset to figure out which TRE is
+ * associated with which EVRE. If two TREs are consumed by HW, the EVREs
+ * are in order in the event ring.
+ *
+ * This handler will do a one pass for consuming EVREs. Other EVREs may
+ * be delivered while we are working. It will try to consume incoming
+ * EVREs one more time and return.
+ *
+ * For unprocessed EVREs, hardware will trigger another interrupt until
+ * all the interrupt bits are cleared.
+ *
+ * Hardware guarantees that by the time interrupt is observed, all data
+ * transactions in flight are delivered to their respective places and
+ * are visible to the CPU.
+ *
+ * On demand paging for IOMMU is only supported for PCIe via PRI
+ * (Page Request Interface) not for HIDMA. All other hardware instances
+ * including HIDMA work on pinned DMA addresses.
+ *
+ * HIDMA is not aware of IOMMU presence since it follows the DMA API. All
+ * IOMMU latency will be built into the data movement time. By the time
+ * interrupt happens, IOMMU lookups + data movement has already taken place.
+ *
+ * While the first read in a typical PCI endpoint ISR flushes all outstanding
+ * requests traditionally to the destination, this concept does not apply
+ * here for this HW.
+ */
+irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
+{
+	struct hidma_lldev *lldev = arg;
+	u32 status;
+	u32 enable;
+	u32 cause;
+
+	/*
+	 * Fine tuned for this HW...
+	 *
+	 * This ISR has been designed for this particular hardware. Relaxed
+	 * read and write accessors are used for performance reasons due to
+	 * interrupt delivery guarantees. Do not copy this code blindly and
+	 * expect that to work.
+	 */
+	status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
+	enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+	cause = status & enable;
+
+	while (cause) {
+		if (cause & HIDMA_ERR_INT_MASK) {
+			dev_err(lldev->dev, "error 0x%x, resetting...\n",
+					cause);
+
+			/* Clear out pending interrupts */
+			writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+
+			tasklet_schedule(&lldev->rst_task);
+			goto out;
+		}
+
+		/*
+		 * Try to consume as many EVREs as possible.
+		 */
+		hidma_handle_tre_completion(lldev);
+
+		/* We consumed TREs or there are pending TREs or EVREs. */
+		writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+
+		/*
+		 * Another interrupt might have arrived while we are
+		 * processing this one. Read the new cause.
+		 */
+		status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
+		enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+		cause = status & enable;
+	}
+
+out:
+	return IRQ_HANDLED;
+}
+
+int hidma_ll_enable(struct hidma_lldev *lldev)
+{
+	u32 val;
+	int ret;
+
+	val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+	val &= ~(HIDMA_CH_CONTROL_MASK << 16);
+	val |= HIDMA_CH_ENABLE << 16;
+	writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+
+	ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
+				 hidma_is_chan_enabled(HIDMA_CH_STATE(val)),
+				 1000, 10000);
+	if (ret) {
+		dev_err(lldev->dev, "event channel did not get enabled\n");
+		return ret;
+	}
+
+	val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+	val &= ~(HIDMA_CH_CONTROL_MASK << 16);
+	val |= HIDMA_CH_ENABLE << 16;
+	writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+
+	ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
+				 hidma_is_chan_enabled(HIDMA_CH_STATE(val)),
+				 1000, 10000);
+	if (ret) {
+		dev_err(lldev->dev, "transfer channel did not get enabled\n");
+		return ret;
+	}
+
+	lldev->trch_state = HIDMA_CH_ENABLED;
+	lldev->evch_state = HIDMA_CH_ENABLED;
+
+	return 0;
+}
+
+void hidma_ll_start(struct hidma_lldev *lldev)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&lldev->lock, irqflags);
+	writel(lldev->tre_write_offset, lldev->trca + HIDMA_TRCA_DOORBELL_REG);
+	spin_unlock_irqrestore(&lldev->lock, irqflags);
+}
+
+bool hidma_ll_isenabled(struct hidma_lldev *lldev)
+{
+	u32 val;
+
+	val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+	lldev->trch_state = HIDMA_CH_STATE(val);
+	val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+	lldev->evch_state = HIDMA_CH_STATE(val);
+
+	/* both channels have to be enabled before calling this function */
+	if (hidma_is_chan_enabled(lldev->trch_state) &&
+	    hidma_is_chan_enabled(lldev->evch_state))
+		return true;
+
+	return false;
+}
+
+void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch)
+{
+	struct hidma_tre *tre;
+	unsigned long flags;
+
+	tre = &lldev->trepool[tre_ch];
+
+	/* copy the TRE into its location in the TRE ring */
+	spin_lock_irqsave(&lldev->lock, flags);
+	tre->tre_index = lldev->tre_write_offset / HIDMA_TRE_SIZE;
+	lldev->pending_tre_list[tre->tre_index] = tre;
+	memcpy(lldev->tre_ring + lldev->tre_write_offset,
+			&tre->tre_local[0], HIDMA_TRE_SIZE);
+	tre->err_code = 0;
+	tre->err_info = 0;
+	tre->queued = 1;
+	lldev->pending_tre_count++;
+	lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE)
+					% lldev->tre_ring_size;
+	spin_unlock_irqrestore(&lldev->lock, flags);
+}
+
+/*
+ * Note that even though we stop this channel if there is a pending transaction
+ * in flight it will complete and follow the callback. This request will
+ * prevent further requests to be made.
+ */
+int hidma_ll_disable(struct hidma_lldev *lldev)
+{
+	u32 val;
+	int ret;
+
+	val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+	lldev->evch_state = HIDMA_CH_STATE(val);
+	val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+	lldev->trch_state = HIDMA_CH_STATE(val);
+
+	/* already suspended by this OS */
+	if ((lldev->trch_state == HIDMA_CH_SUSPENDED) ||
+	    (lldev->evch_state == HIDMA_CH_SUSPENDED))
+		return 0;
+
+	/* already stopped by the manager */
+	if ((lldev->trch_state == HIDMA_CH_STOPPED) ||
+	    (lldev->evch_state == HIDMA_CH_STOPPED))
+		return 0;
+
+	val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+	val &= ~(HIDMA_CH_CONTROL_MASK << 16);
+	val |= HIDMA_CH_SUSPEND << 16;
+	writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+
+	/*
+	 * Start the wait right after the suspend is confirmed.
+	 * Do a polled read up to 1ms and 10ms maximum.
+	 */
+	ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
+				 HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED,
+				 1000, 10000);
+	if (ret)
+		return ret;
+
+	val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+	val &= ~(HIDMA_CH_CONTROL_MASK << 16);
+	val |= HIDMA_CH_SUSPEND << 16;
+	writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+
+	/*
+	 * Start the wait right after the suspend is confirmed
+	 * Delay up to 10ms after reset to allow DMA logic to quiesce.
+	 */
+	ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
+				 HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED,
+				 1000, 10000);
+	if (ret)
+		return ret;
+
+	lldev->trch_state = HIDMA_CH_SUSPENDED;
+	lldev->evch_state = HIDMA_CH_SUSPENDED;
+	return 0;
+}
+
+void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch,
+				  dma_addr_t src, dma_addr_t dest, u32 len,
+				  u32 flags)
+{
+	struct hidma_tre *tre;
+	u32 *tre_local;
+
+	if (tre_ch >= lldev->nr_tres) {
+		dev_err(lldev->dev, "invalid TRE number in transfer params:%d",
+			tre_ch);
+		return;
+	}
+
+	tre = &lldev->trepool[tre_ch];
+	if (atomic_read(&tre->allocated) != true) {
+		dev_err(lldev->dev, "trying to set params on an unused TRE:%d",
+			tre_ch);
+		return;
+	}
+
+	tre_local = &tre->tre_local[0];
+	tre_local[HIDMA_TRE_LEN_IDX] = len;
+	tre_local[HIDMA_TRE_SRC_LOW_IDX] = lower_32_bits(src);
+	tre_local[HIDMA_TRE_SRC_HI_IDX] = upper_32_bits(src);
+	tre_local[HIDMA_TRE_DEST_LOW_IDX] = lower_32_bits(dest);
+	tre_local[HIDMA_TRE_DEST_HI_IDX] = upper_32_bits(dest);
+	tre->int_flags = flags;
+}
+
+/*
+ * Called during initialization and after an error condition
+ * to restore hardware state.
+ */
+int hidma_ll_setup(struct hidma_lldev *lldev)
+{
+	int rc;
+	u64 addr;
+	u32 val;
+	u32 nr_tres = lldev->nr_tres;
+
+	lldev->pending_tre_count = 0;
+	lldev->tre_processed_off = 0;
+	lldev->evre_processed_off = 0;
+	lldev->tre_write_offset = 0;
+
+	/* disable interrupts */
+	writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+
+	/* clear all pending interrupts */
+	val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
+	writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+
+	rc = hidma_ll_reset(lldev);
+	if (rc)
+		return rc;
+
+	/*
+	 * Clear all pending interrupts again.
+	 * Otherwise, we observe reset complete interrupts.
+	 */
+	val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
+	writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+
+	/* disable interrupts again after reset */
+	writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+
+	addr = lldev->tre_dma;
+	writel(lower_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_LOW_REG);
+	writel(upper_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_HIGH_REG);
+	writel(lldev->tre_ring_size, lldev->trca + HIDMA_TRCA_RING_LEN_REG);
+
+	addr = lldev->evre_dma;
+	writel(lower_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_LOW_REG);
+	writel(upper_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_HIGH_REG);
+	writel(HIDMA_EVRE_SIZE * nr_tres,
+			lldev->evca + HIDMA_EVCA_RING_LEN_REG);
+
+	/* support IRQ only for now */
+	val = readl(lldev->evca + HIDMA_EVCA_INTCTRL_REG);
+	val &= ~0xF;
+	val |= 0x1;
+	writel(val, lldev->evca + HIDMA_EVCA_INTCTRL_REG);
+
+	/* clear all pending interrupts and enable them */
+	writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+	writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+
+	return hidma_ll_enable(lldev);
+}
+
+struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
+				  void __iomem *trca, void __iomem *evca,
+				  u8 chidx)
+{
+	u32 required_bytes;
+	struct hidma_lldev *lldev;
+	int rc;
+	size_t sz;
+
+	if (!trca || !evca || !dev || !nr_tres)
+		return NULL;
+
+	/* need at least four TREs */
+	if (nr_tres < 4)
+		return NULL;
+
+	/* need an extra space */
+	nr_tres += 1;
+
+	lldev = devm_kzalloc(dev, sizeof(struct hidma_lldev), GFP_KERNEL);
+	if (!lldev)
+		return NULL;
+
+	lldev->evca = evca;
+	lldev->trca = trca;
+	lldev->dev = dev;
+	sz = sizeof(struct hidma_tre);
+	lldev->trepool = devm_kcalloc(lldev->dev, nr_tres, sz, GFP_KERNEL);
+	if (!lldev->trepool)
+		return NULL;
+
+	required_bytes = sizeof(lldev->pending_tre_list[0]);
+	lldev->pending_tre_list = devm_kcalloc(dev, nr_tres, required_bytes,
+					       GFP_KERNEL);
+	if (!lldev->pending_tre_list)
+		return NULL;
+
+	sz = (HIDMA_TRE_SIZE + 1) * nr_tres;
+	lldev->tre_ring = dmam_alloc_coherent(dev, sz, &lldev->tre_dma,
+					      GFP_KERNEL);
+	if (!lldev->tre_ring)
+		return NULL;
+
+	memset(lldev->tre_ring, 0, (HIDMA_TRE_SIZE + 1) * nr_tres);
+	lldev->tre_ring_size = HIDMA_TRE_SIZE * nr_tres;
+	lldev->nr_tres = nr_tres;
+
+	/* the TRE ring has to be TRE_SIZE aligned */
+	if (!IS_ALIGNED(lldev->tre_dma, HIDMA_TRE_SIZE)) {
+		u8 tre_ring_shift;
+
+		tre_ring_shift = lldev->tre_dma % HIDMA_TRE_SIZE;
+		tre_ring_shift = HIDMA_TRE_SIZE - tre_ring_shift;
+		lldev->tre_dma += tre_ring_shift;
+		lldev->tre_ring += tre_ring_shift;
+	}
+
+	sz = (HIDMA_EVRE_SIZE + 1) * nr_tres;
+	lldev->evre_ring = dmam_alloc_coherent(dev, sz, &lldev->evre_dma,
+					       GFP_KERNEL);
+	if (!lldev->evre_ring)
+		return NULL;
+
+	memset(lldev->evre_ring, 0, (HIDMA_EVRE_SIZE + 1) * nr_tres);
+	lldev->evre_ring_size = HIDMA_EVRE_SIZE * nr_tres;
+
+	/* the EVRE ring has to be EVRE_SIZE aligned */
+	if (!IS_ALIGNED(lldev->evre_dma, HIDMA_EVRE_SIZE)) {
+		u8 evre_ring_shift;
+
+		evre_ring_shift = lldev->evre_dma % HIDMA_EVRE_SIZE;
+		evre_ring_shift = HIDMA_EVRE_SIZE - evre_ring_shift;
+		lldev->evre_dma += evre_ring_shift;
+		lldev->evre_ring += evre_ring_shift;
+	}
+	lldev->nr_tres = nr_tres;
+	lldev->chidx = chidx;
+
+	sz = nr_tres * sizeof(struct hidma_tre *);
+	rc = kfifo_alloc(&lldev->handoff_fifo, sz, GFP_KERNEL);
+	if (rc)
+		return NULL;
+
+	rc = hidma_ll_setup(lldev);
+	if (rc)
+		return NULL;
+
+	spin_lock_init(&lldev->lock);
+	tasklet_init(&lldev->rst_task, hidma_ll_abort, (unsigned long)lldev);
+	tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev);
+	lldev->initialized = 1;
+	writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+	return lldev;
+}
+
+int hidma_ll_uninit(struct hidma_lldev *lldev)
+{
+	u32 required_bytes;
+	int rc = 0;
+	u32 val;
+
+	if (!lldev)
+		return -ENODEV;
+
+	if (!lldev->initialized)
+		return 0;
+
+	lldev->initialized = 0;
+
+	required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres;
+	tasklet_kill(&lldev->task);
+	memset(lldev->trepool, 0, required_bytes);
+	lldev->trepool = NULL;
+	lldev->pending_tre_count = 0;
+	lldev->tre_write_offset = 0;
+
+	rc = hidma_ll_reset(lldev);
+
+	/*
+	 * Clear all pending interrupts again.
+	 * Otherwise, we observe reset complete interrupts.
+	 */
+	val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
+	writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+	writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+	return rc;
+}
+
+enum dma_status hidma_ll_status(struct hidma_lldev *lldev, u32 tre_ch)
+{
+	enum dma_status ret = DMA_ERROR;
+	struct hidma_tre *tre;
+	unsigned long flags;
+	u8 err_code;
+
+	spin_lock_irqsave(&lldev->lock, flags);
+
+	tre = &lldev->trepool[tre_ch];
+	err_code = tre->err_code;
+
+	if (err_code & HIDMA_EVRE_STATUS_COMPLETE)
+		ret = DMA_COMPLETE;
+	else if (err_code & HIDMA_EVRE_STATUS_ERROR)
+		ret = DMA_ERROR;
+	else
+		ret = DMA_IN_PROGRESS;
+	spin_unlock_irqrestore(&lldev->lock, flags);
+
+	return ret;
+}
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index ef491b8..c0e3653 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -1,7 +1,7 @@
 /*
  * Qualcomm Technologies HIDMA DMA engine Management interface
  *
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -17,13 +17,14 @@
 #include <linux/acpi.h>
 #include <linux/of.h>
 #include <linux/property.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
 #include <linux/module.h>
 #include <linux/uaccess.h>
 #include <linux/slab.h>
 #include <linux/pm_runtime.h>
 #include <linux/bitops.h>
+#include <linux/dma-mapping.h>
 
 #include "hidma_mgmt.h"
 
@@ -298,5 +299,109 @@
 	},
 };
 
-module_platform_driver(hidma_mgmt_driver);
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
+static int object_counter;
+
+static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
+{
+	struct platform_device *pdev_parent = of_find_device_by_node(np);
+	struct platform_device_info pdevinfo;
+	struct of_phandle_args out_irq;
+	struct device_node *child;
+	struct resource *res;
+	const __be32 *cell;
+	int ret = 0, size, i, num;
+	u64 addr, addr_size;
+
+	for_each_available_child_of_node(np, child) {
+		struct resource *res_iter;
+		struct platform_device *new_pdev;
+
+		cell = of_get_property(child, "reg", &size);
+		if (!cell) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		size /= sizeof(*cell);
+		num = size /
+			(of_n_addr_cells(child) + of_n_size_cells(child)) + 1;
+
+		/* allocate a resource array */
+		res = kcalloc(num, sizeof(*res), GFP_KERNEL);
+		if (!res) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		/* read each reg value */
+		i = 0;
+		res_iter = res;
+		while (i < size) {
+			addr = of_read_number(&cell[i],
+					      of_n_addr_cells(child));
+			i += of_n_addr_cells(child);
+
+			addr_size = of_read_number(&cell[i],
+						   of_n_size_cells(child));
+			i += of_n_size_cells(child);
+
+			res_iter->start = addr;
+			res_iter->end = res_iter->start + addr_size - 1;
+			res_iter->flags = IORESOURCE_MEM;
+			res_iter++;
+		}
+
+		ret = of_irq_parse_one(child, 0, &out_irq);
+		if (ret)
+			goto out;
+
+		res_iter->start = irq_create_of_mapping(&out_irq);
+		res_iter->name = "hidma event irq";
+		res_iter->flags = IORESOURCE_IRQ;
+
+		memset(&pdevinfo, 0, sizeof(pdevinfo));
+		pdevinfo.fwnode = &child->fwnode;
+		pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL;
+		pdevinfo.name = child->name;
+		pdevinfo.id = object_counter++;
+		pdevinfo.res = res;
+		pdevinfo.num_res = num;
+		pdevinfo.data = NULL;
+		pdevinfo.size_data = 0;
+		pdevinfo.dma_mask = DMA_BIT_MASK(64);
+		new_pdev = platform_device_register_full(&pdevinfo);
+		if (!new_pdev) {
+			ret = -ENODEV;
+			goto out;
+		}
+		of_dma_configure(&new_pdev->dev, child);
+
+		kfree(res);
+		res = NULL;
+	}
+out:
+	kfree(res);
+
+	return ret;
+}
+#endif
+
+static int __init hidma_mgmt_init(void)
+{
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
+	struct device_node *child;
+
+	for (child = of_find_matching_node(NULL, hidma_mgmt_match); child;
+	     child = of_find_matching_node(child, hidma_mgmt_match)) {
+		/* device tree based firmware here */
+		hidma_mgmt_of_populate_channels(child);
+		of_node_put(child);
+	}
+#endif
+	platform_driver_register(&hidma_mgmt_driver);
+
+	return 0;
+}
+module_init(hidma_mgmt_init);
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 2db12e4..5065ca4 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -146,6 +146,8 @@
 	struct dma_slave_config	cfg;
 	struct sun6i_pchan	*phy;
 	u8			port;
+	u8			irq_type;
+	bool			cyclic;
 };
 
 struct sun6i_dma_dev {
@@ -254,6 +256,30 @@
 	return addr_width >> 1;
 }
 
+static size_t sun6i_get_chan_size(struct sun6i_pchan *pchan)
+{
+	struct sun6i_desc *txd = pchan->desc;
+	struct sun6i_dma_lli *lli;
+	size_t bytes;
+	dma_addr_t pos;
+
+	pos = readl(pchan->base + DMA_CHAN_LLI_ADDR);
+	bytes = readl(pchan->base + DMA_CHAN_CUR_CNT);
+
+	if (pos == LLI_LAST_ITEM)
+		return bytes;
+
+	for (lli = txd->v_lli; lli; lli = lli->v_lli_next) {
+		if (lli->p_lli_next == pos) {
+			for (lli = lli->v_lli_next; lli; lli = lli->v_lli_next)
+				bytes += lli->len;
+			break;
+		}
+	}
+
+	return bytes;
+}
+
 static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev,
 			       struct sun6i_dma_lli *next,
 			       dma_addr_t next_phy,
@@ -276,45 +302,6 @@
 	return next;
 }
 
-static inline int sun6i_dma_cfg_lli(struct sun6i_dma_lli *lli,
-				    dma_addr_t src,
-				    dma_addr_t dst, u32 len,
-				    struct dma_slave_config *config)
-{
-	u8 src_width, dst_width, src_burst, dst_burst;
-
-	if (!config)
-		return -EINVAL;
-
-	src_burst = convert_burst(config->src_maxburst);
-	if (src_burst)
-		return src_burst;
-
-	dst_burst = convert_burst(config->dst_maxburst);
-	if (dst_burst)
-		return dst_burst;
-
-	src_width = convert_buswidth(config->src_addr_width);
-	if (src_width)
-		return src_width;
-
-	dst_width = convert_buswidth(config->dst_addr_width);
-	if (dst_width)
-		return dst_width;
-
-	lli->cfg = DMA_CHAN_CFG_SRC_BURST(src_burst) |
-		DMA_CHAN_CFG_SRC_WIDTH(src_width) |
-		DMA_CHAN_CFG_DST_BURST(dst_burst) |
-		DMA_CHAN_CFG_DST_WIDTH(dst_width);
-
-	lli->src = src;
-	lli->dst = dst;
-	lli->len = len;
-	lli->para = NORMAL_WAIT;
-
-	return 0;
-}
-
 static inline void sun6i_dma_dump_lli(struct sun6i_vchan *vchan,
 				      struct sun6i_dma_lli *lli)
 {
@@ -381,9 +368,13 @@
 	irq_reg = pchan->idx / DMA_IRQ_CHAN_NR;
 	irq_offset = pchan->idx % DMA_IRQ_CHAN_NR;
 
-	irq_val = readl(sdev->base + DMA_IRQ_EN(irq_offset));
-	irq_val |= DMA_IRQ_QUEUE << (irq_offset * DMA_IRQ_CHAN_WIDTH);
-	writel(irq_val, sdev->base + DMA_IRQ_EN(irq_offset));
+	vchan->irq_type = vchan->cyclic ? DMA_IRQ_PKG : DMA_IRQ_QUEUE;
+
+	irq_val = readl(sdev->base + DMA_IRQ_EN(irq_reg));
+	irq_val &= ~((DMA_IRQ_HALF | DMA_IRQ_PKG | DMA_IRQ_QUEUE) <<
+			(irq_offset * DMA_IRQ_CHAN_WIDTH));
+	irq_val |= vchan->irq_type << (irq_offset * DMA_IRQ_CHAN_WIDTH);
+	writel(irq_val, sdev->base + DMA_IRQ_EN(irq_reg));
 
 	writel(pchan->desc->p_lli, pchan->base + DMA_CHAN_LLI_ADDR);
 	writel(DMA_CHAN_ENABLE_START, pchan->base + DMA_CHAN_ENABLE);
@@ -479,11 +470,12 @@
 		writel(status, sdev->base + DMA_IRQ_STAT(i));
 
 		for (j = 0; (j < DMA_IRQ_CHAN_NR) && status; j++) {
-			if (status & DMA_IRQ_QUEUE) {
-				pchan = sdev->pchans + j;
-				vchan = pchan->vchan;
-
-				if (vchan) {
+			pchan = sdev->pchans + j;
+			vchan = pchan->vchan;
+			if (vchan && (status & vchan->irq_type)) {
+				if (vchan->cyclic) {
+					vchan_cyclic_callback(&pchan->desc->vd);
+				} else {
 					spin_lock(&vchan->vc.lock);
 					vchan_cookie_complete(&pchan->desc->vd);
 					pchan->done = pchan->desc;
@@ -502,6 +494,55 @@
 	return ret;
 }
 
+static int set_config(struct sun6i_dma_dev *sdev,
+			struct dma_slave_config *sconfig,
+			enum dma_transfer_direction direction,
+			u32 *p_cfg)
+{
+	s8 src_width, dst_width, src_burst, dst_burst;
+
+	switch (direction) {
+	case DMA_MEM_TO_DEV:
+		src_burst = convert_burst(sconfig->src_maxburst ?
+					sconfig->src_maxburst : 8);
+		src_width = convert_buswidth(sconfig->src_addr_width !=
+						DMA_SLAVE_BUSWIDTH_UNDEFINED ?
+				sconfig->src_addr_width :
+				DMA_SLAVE_BUSWIDTH_4_BYTES);
+		dst_burst = convert_burst(sconfig->dst_maxburst);
+		dst_width = convert_buswidth(sconfig->dst_addr_width);
+		break;
+	case DMA_DEV_TO_MEM:
+		src_burst = convert_burst(sconfig->src_maxburst);
+		src_width = convert_buswidth(sconfig->src_addr_width);
+		dst_burst = convert_burst(sconfig->dst_maxburst ?
+					sconfig->dst_maxburst : 8);
+		dst_width = convert_buswidth(sconfig->dst_addr_width !=
+						DMA_SLAVE_BUSWIDTH_UNDEFINED ?
+				sconfig->dst_addr_width :
+				DMA_SLAVE_BUSWIDTH_4_BYTES);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (src_burst < 0)
+		return src_burst;
+	if (src_width < 0)
+		return src_width;
+	if (dst_burst < 0)
+		return dst_burst;
+	if (dst_width < 0)
+		return dst_width;
+
+	*p_cfg = DMA_CHAN_CFG_SRC_BURST(src_burst) |
+		DMA_CHAN_CFG_SRC_WIDTH(src_width) |
+		DMA_CHAN_CFG_DST_BURST(dst_burst) |
+		DMA_CHAN_CFG_DST_WIDTH(dst_width);
+
+	return 0;
+}
+
 static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
 		struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 		size_t len, unsigned long flags)
@@ -569,13 +610,15 @@
 	struct sun6i_desc *txd;
 	struct scatterlist *sg;
 	dma_addr_t p_lli;
+	u32 lli_cfg;
 	int i, ret;
 
 	if (!sgl)
 		return NULL;
 
-	if (!is_slave_direction(dir)) {
-		dev_err(chan2dev(chan), "Invalid DMA direction\n");
+	ret = set_config(sdev, sconfig, dir, &lli_cfg);
+	if (ret) {
+		dev_err(chan2dev(chan), "Invalid DMA configuration\n");
 		return NULL;
 	}
 
@@ -588,14 +631,14 @@
 		if (!v_lli)
 			goto err_lli_free;
 
-		if (dir == DMA_MEM_TO_DEV) {
-			ret = sun6i_dma_cfg_lli(v_lli, sg_dma_address(sg),
-						sconfig->dst_addr, sg_dma_len(sg),
-						sconfig);
-			if (ret)
-				goto err_cur_lli_free;
+		v_lli->len = sg_dma_len(sg);
+		v_lli->para = NORMAL_WAIT;
 
-			v_lli->cfg |= DMA_CHAN_CFG_DST_IO_MODE |
+		if (dir == DMA_MEM_TO_DEV) {
+			v_lli->src = sg_dma_address(sg);
+			v_lli->dst = sconfig->dst_addr;
+			v_lli->cfg = lli_cfg |
+				DMA_CHAN_CFG_DST_IO_MODE |
 				DMA_CHAN_CFG_SRC_LINEAR_MODE |
 				DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
 				DMA_CHAN_CFG_DST_DRQ(vchan->port);
@@ -607,13 +650,10 @@
 				sg_dma_len(sg), flags);
 
 		} else {
-			ret = sun6i_dma_cfg_lli(v_lli, sconfig->src_addr,
-						sg_dma_address(sg), sg_dma_len(sg),
-						sconfig);
-			if (ret)
-				goto err_cur_lli_free;
-
-			v_lli->cfg |= DMA_CHAN_CFG_DST_LINEAR_MODE |
+			v_lli->src = sconfig->src_addr;
+			v_lli->dst = sg_dma_address(sg);
+			v_lli->cfg = lli_cfg |
+				DMA_CHAN_CFG_DST_LINEAR_MODE |
 				DMA_CHAN_CFG_SRC_IO_MODE |
 				DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
 				DMA_CHAN_CFG_SRC_DRQ(vchan->port);
@@ -634,8 +674,78 @@
 
 	return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
 
-err_cur_lli_free:
-	dma_pool_free(sdev->pool, v_lli, p_lli);
+err_lli_free:
+	for (prev = txd->v_lli; prev; prev = prev->v_lli_next)
+		dma_pool_free(sdev->pool, prev, virt_to_phys(prev));
+	kfree(txd);
+	return NULL;
+}
+
+static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic(
+					struct dma_chan *chan,
+					dma_addr_t buf_addr,
+					size_t buf_len,
+					size_t period_len,
+					enum dma_transfer_direction dir,
+					unsigned long flags)
+{
+	struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
+	struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
+	struct dma_slave_config *sconfig = &vchan->cfg;
+	struct sun6i_dma_lli *v_lli, *prev = NULL;
+	struct sun6i_desc *txd;
+	dma_addr_t p_lli;
+	u32 lli_cfg;
+	unsigned int i, periods = buf_len / period_len;
+	int ret;
+
+	ret = set_config(sdev, sconfig, dir, &lli_cfg);
+	if (ret) {
+		dev_err(chan2dev(chan), "Invalid DMA configuration\n");
+		return NULL;
+	}
+
+	txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
+	if (!txd)
+		return NULL;
+
+	for (i = 0; i < periods; i++) {
+		v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli);
+		if (!v_lli) {
+			dev_err(sdev->slave.dev, "Failed to alloc lli memory\n");
+			goto err_lli_free;
+		}
+
+		v_lli->len = period_len;
+		v_lli->para = NORMAL_WAIT;
+
+		if (dir == DMA_MEM_TO_DEV) {
+			v_lli->src = buf_addr + period_len * i;
+			v_lli->dst = sconfig->dst_addr;
+			v_lli->cfg = lli_cfg |
+				DMA_CHAN_CFG_DST_IO_MODE |
+				DMA_CHAN_CFG_SRC_LINEAR_MODE |
+				DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
+				DMA_CHAN_CFG_DST_DRQ(vchan->port);
+		} else {
+			v_lli->src = sconfig->src_addr;
+			v_lli->dst = buf_addr + period_len * i;
+			v_lli->cfg = lli_cfg |
+				DMA_CHAN_CFG_DST_LINEAR_MODE |
+				DMA_CHAN_CFG_SRC_IO_MODE |
+				DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
+				DMA_CHAN_CFG_SRC_DRQ(vchan->port);
+		}
+
+		prev = sun6i_dma_lli_add(prev, v_lli, p_lli, txd);
+	}
+
+	prev->p_lli_next = txd->p_lli;		/* cyclic list */
+
+	vchan->cyclic = true;
+
+	return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
+
 err_lli_free:
 	for (prev = txd->v_lli; prev; prev = prev->v_lli_next)
 		dma_pool_free(sdev->pool, prev, virt_to_phys(prev));
@@ -712,6 +822,16 @@
 
 	spin_lock_irqsave(&vchan->vc.lock, flags);
 
+	if (vchan->cyclic) {
+		vchan->cyclic = false;
+		if (pchan && pchan->desc) {
+			struct virt_dma_desc *vd = &pchan->desc->vd;
+			struct virt_dma_chan *vc = &vchan->vc;
+
+			list_add_tail(&vd->node, &vc->desc_completed);
+		}
+	}
+
 	vchan_get_all_descriptors(&vchan->vc, &head);
 
 	if (pchan) {
@@ -759,7 +879,7 @@
 	} else if (!pchan || !pchan->desc) {
 		bytes = 0;
 	} else {
-		bytes = readl(pchan->base + DMA_CHAN_CUR_CNT);
+		bytes = sun6i_get_chan_size(pchan);
 	}
 
 	spin_unlock_irqrestore(&vchan->vc.lock, flags);
@@ -963,6 +1083,7 @@
 	dma_cap_set(DMA_PRIVATE, sdc->slave.cap_mask);
 	dma_cap_set(DMA_MEMCPY, sdc->slave.cap_mask);
 	dma_cap_set(DMA_SLAVE, sdc->slave.cap_mask);
+	dma_cap_set(DMA_CYCLIC, sdc->slave.cap_mask);
 
 	INIT_LIST_HEAD(&sdc->slave.channels);
 	sdc->slave.device_free_chan_resources	= sun6i_dma_free_chan_resources;
@@ -970,6 +1091,7 @@
 	sdc->slave.device_issue_pending		= sun6i_dma_issue_pending;
 	sdc->slave.device_prep_slave_sg		= sun6i_dma_prep_slave_sg;
 	sdc->slave.device_prep_dma_memcpy	= sun6i_dma_prep_dma_memcpy;
+	sdc->slave.device_prep_dma_cyclic	= sun6i_dma_prep_dma_cyclic;
 	sdc->slave.copy_align			= DMAENGINE_ALIGN_4_BYTES;
 	sdc->slave.device_config		= sun6i_dma_config;
 	sdc->slave.device_pause			= sun6i_dma_pause;
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 3871f29..01e316f 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -54,6 +54,7 @@
 #define TEGRA_APBDMA_CSR_ONCE			BIT(27)
 #define TEGRA_APBDMA_CSR_FLOW			BIT(21)
 #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT		16
+#define TEGRA_APBDMA_CSR_REQ_SEL_MASK		0x1F
 #define TEGRA_APBDMA_CSR_WCOUNT_MASK		0xFFFC
 
 /* STATUS register */
@@ -114,6 +115,8 @@
 /* Channel base address offset from APBDMA base address */
 #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET	0x1000
 
+#define TEGRA_APBDMA_SLAVE_ID_INVALID	(TEGRA_APBDMA_CSR_REQ_SEL_MASK + 1)
+
 struct tegra_dma;
 
 /*
@@ -353,8 +356,11 @@
 	}
 
 	memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
-	if (!tdc->slave_id)
+	if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID) {
+		if (sconfig->slave_id > TEGRA_APBDMA_CSR_REQ_SEL_MASK)
+			return -EINVAL;
 		tdc->slave_id = sconfig->slave_id;
+	}
 	tdc->config_init = true;
 	return 0;
 }
@@ -1236,7 +1242,7 @@
 	}
 	pm_runtime_put(tdma->dev);
 
-	tdc->slave_id = 0;
+	tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
 }
 
 static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
@@ -1246,6 +1252,11 @@
 	struct dma_chan *chan;
 	struct tegra_dma_channel *tdc;
 
+	if (dma_spec->args[0] > TEGRA_APBDMA_CSR_REQ_SEL_MASK) {
+		dev_err(tdma->dev, "Invalid slave id: %d\n", dma_spec->args[0]);
+		return NULL;
+	}
+
 	chan = dma_get_any_slave_channel(&tdma->dma_dev);
 	if (!chan)
 		return NULL;
@@ -1389,6 +1400,7 @@
 				&tdma->dma_dev.channels);
 		tdc->tdma = tdma;
 		tdc->id = i;
+		tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
 
 		tasklet_init(&tdc->tasklet, tegra_dma_tasklet,
 				(unsigned long)tdc);
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
new file mode 100644
index 0000000..c4b121c
--- /dev/null
+++ b/drivers/dma/tegra210-adma.c
@@ -0,0 +1,840 @@
+/*
+ * ADMA driver for Nvidia's Tegra210 ADMA controller.
+ *
+ * Copyright (c) 2016, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/of_irq.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include "virt-dma.h"
+
+#define ADMA_CH_CMD					0x00
+#define ADMA_CH_STATUS					0x0c
+#define ADMA_CH_STATUS_XFER_EN				BIT(0)
+
+#define ADMA_CH_INT_STATUS				0x10
+#define ADMA_CH_INT_STATUS_XFER_DONE			BIT(0)
+
+#define ADMA_CH_INT_CLEAR				0x1c
+#define ADMA_CH_CTRL					0x24
+#define ADMA_CH_CTRL_TX_REQ(val)			(((val) & 0xf) << 28)
+#define ADMA_CH_CTRL_TX_REQ_MAX				10
+#define ADMA_CH_CTRL_RX_REQ(val)			(((val) & 0xf) << 24)
+#define ADMA_CH_CTRL_RX_REQ_MAX				10
+#define ADMA_CH_CTRL_DIR(val)				(((val) & 0xf) << 12)
+#define ADMA_CH_CTRL_DIR_AHUB2MEM			2
+#define ADMA_CH_CTRL_DIR_MEM2AHUB			4
+#define ADMA_CH_CTRL_MODE_CONTINUOUS			(2 << 8)
+#define ADMA_CH_CTRL_FLOWCTRL_EN			BIT(1)
+
+#define ADMA_CH_CONFIG					0x28
+#define ADMA_CH_CONFIG_SRC_BUF(val)			(((val) & 0x7) << 28)
+#define ADMA_CH_CONFIG_TRG_BUF(val)			(((val) & 0x7) << 24)
+#define ADMA_CH_CONFIG_BURST_SIZE(val)			(((val) & 0x7) << 20)
+#define ADMA_CH_CONFIG_BURST_16				5
+#define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val)		((val) & 0xf)
+#define ADMA_CH_CONFIG_MAX_BUFS				8
+
+#define ADMA_CH_FIFO_CTRL				0x2c
+#define ADMA_CH_FIFO_CTRL_OVRFW_THRES(val)		(((val) & 0xf) << 24)
+#define ADMA_CH_FIFO_CTRL_STARV_THRES(val)		(((val) & 0xf) << 16)
+#define ADMA_CH_FIFO_CTRL_TX_SIZE(val)			(((val) & 0xf) << 8)
+#define ADMA_CH_FIFO_CTRL_RX_SIZE(val)			((val) & 0xf)
+
+#define ADMA_CH_LOWER_SRC_ADDR				0x34
+#define ADMA_CH_LOWER_TRG_ADDR				0x3c
+#define ADMA_CH_TC					0x44
+#define ADMA_CH_TC_COUNT_MASK				0x3ffffffc
+
+#define ADMA_CH_XFER_STATUS				0x54
+#define ADMA_CH_XFER_STATUS_COUNT_MASK			0xffff
+
+#define ADMA_GLOBAL_CMD					0xc00
+#define ADMA_GLOBAL_SOFT_RESET				0xc04
+#define ADMA_GLOBAL_INT_CLEAR				0xc20
+#define ADMA_GLOBAL_CTRL				0xc24
+
+#define ADMA_CH_REG_OFFSET(a)				(a * 0x80)
+
+#define ADMA_CH_FIFO_CTRL_DEFAULT	(ADMA_CH_FIFO_CTRL_OVRFW_THRES(1) | \
+					 ADMA_CH_FIFO_CTRL_STARV_THRES(1) | \
+					 ADMA_CH_FIFO_CTRL_TX_SIZE(3)     | \
+					 ADMA_CH_FIFO_CTRL_RX_SIZE(3))
+struct tegra_adma;
+
+/*
+ * struct tegra_adma_chip_data - Tegra chip specific data
+ * @nr_channels: Number of DMA channels available.
+ */
+struct tegra_adma_chip_data {
+	int nr_channels;
+};
+
+/*
+ * struct tegra_adma_chan_regs - Tegra ADMA channel registers
+ */
+struct tegra_adma_chan_regs {
+	unsigned int ctrl;
+	unsigned int config;
+	unsigned int src_addr;
+	unsigned int trg_addr;
+	unsigned int fifo_ctrl;
+	unsigned int tc;
+};
+
+/*
+ * struct tegra_adma_desc - Tegra ADMA descriptor to manage transfer requests.
+ */
+struct tegra_adma_desc {
+	struct virt_dma_desc		vd;
+	struct tegra_adma_chan_regs	ch_regs;
+	size_t				buf_len;
+	size_t				period_len;
+	size_t				num_periods;
+};
+
+/*
+ * struct tegra_adma_chan - Tegra ADMA channel information
+ */
+struct tegra_adma_chan {
+	struct virt_dma_chan		vc;
+	struct tegra_adma_desc		*desc;
+	struct tegra_adma		*tdma;
+	int				irq;
+	void __iomem			*chan_addr;
+
+	/* Slave channel configuration info */
+	struct dma_slave_config		sconfig;
+	enum dma_transfer_direction	sreq_dir;
+	unsigned int			sreq_index;
+	bool				sreq_reserved;
+
+	/* Transfer count and position info */
+	unsigned int			tx_buf_count;
+	unsigned int			tx_buf_pos;
+};
+
+/*
+ * struct tegra_adma - Tegra ADMA controller information
+ */
+struct tegra_adma {
+	struct dma_device		dma_dev;
+	struct device			*dev;
+	void __iomem			*base_addr;
+	unsigned int			nr_channels;
+	unsigned long			rx_requests_reserved;
+	unsigned long			tx_requests_reserved;
+
+	/* Used to store global command register state when suspending */
+	unsigned int			global_cmd;
+
+	/* Last member of the structure */
+	struct tegra_adma_chan		channels[0];
+};
+
+static inline void tdma_write(struct tegra_adma *tdma, u32 reg, u32 val)
+{
+	writel(val, tdma->base_addr + reg);
+}
+
+static inline u32 tdma_read(struct tegra_adma *tdma, u32 reg)
+{
+	return readl(tdma->base_addr + reg);
+}
+
+static inline void tdma_ch_write(struct tegra_adma_chan *tdc, u32 reg, u32 val)
+{
+	writel(val, tdc->chan_addr + reg);
+}
+
+static inline u32 tdma_ch_read(struct tegra_adma_chan *tdc, u32 reg)
+{
+	return readl(tdc->chan_addr + reg);
+}
+
+static inline struct tegra_adma_chan *to_tegra_adma_chan(struct dma_chan *dc)
+{
+	return container_of(dc, struct tegra_adma_chan, vc.chan);
+}
+
+static inline struct tegra_adma_desc *to_tegra_adma_desc(
+		struct dma_async_tx_descriptor *td)
+{
+	return container_of(td, struct tegra_adma_desc, vd.tx);
+}
+
+static inline struct device *tdc2dev(struct tegra_adma_chan *tdc)
+{
+	return tdc->tdma->dev;
+}
+
+static void tegra_adma_desc_free(struct virt_dma_desc *vd)
+{
+	kfree(container_of(vd, struct tegra_adma_desc, vd));
+}
+
+static int tegra_adma_slave_config(struct dma_chan *dc,
+				   struct dma_slave_config *sconfig)
+{
+	struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
+
+	memcpy(&tdc->sconfig, sconfig, sizeof(*sconfig));
+
+	return 0;
+}
+
+static int tegra_adma_init(struct tegra_adma *tdma)
+{
+	u32 status;
+	int ret;
+
+	/* Clear any interrupts */
+	tdma_write(tdma, ADMA_GLOBAL_INT_CLEAR, 0x1);
+
+	/* Assert soft reset */
+	tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1);
+
+	/* Wait for reset to clear */
+	ret = readx_poll_timeout(readl,
+				 tdma->base_addr + ADMA_GLOBAL_SOFT_RESET,
+				 status, status == 0, 20, 10000);
+	if (ret)
+		return ret;
+
+	/* Enable global ADMA registers */
+	tdma_write(tdma, ADMA_GLOBAL_CMD, 1);
+
+	return 0;
+}
+
+static int tegra_adma_request_alloc(struct tegra_adma_chan *tdc,
+				    enum dma_transfer_direction direction)
+{
+	struct tegra_adma *tdma = tdc->tdma;
+	unsigned int sreq_index = tdc->sreq_index;
+
+	if (tdc->sreq_reserved)
+		return tdc->sreq_dir == direction ? 0 : -EINVAL;
+
+	switch (direction) {
+	case DMA_MEM_TO_DEV:
+		if (sreq_index > ADMA_CH_CTRL_TX_REQ_MAX) {
+			dev_err(tdma->dev, "invalid DMA request\n");
+			return -EINVAL;
+		}
+
+		if (test_and_set_bit(sreq_index, &tdma->tx_requests_reserved)) {
+			dev_err(tdma->dev, "DMA request reserved\n");
+			return -EINVAL;
+		}
+		break;
+
+	case DMA_DEV_TO_MEM:
+		if (sreq_index > ADMA_CH_CTRL_RX_REQ_MAX) {
+			dev_err(tdma->dev, "invalid DMA request\n");
+			return -EINVAL;
+		}
+
+		if (test_and_set_bit(sreq_index, &tdma->rx_requests_reserved)) {
+			dev_err(tdma->dev, "DMA request reserved\n");
+			return -EINVAL;
+		}
+		break;
+
+	default:
+		dev_WARN(tdma->dev, "channel %s has invalid transfer type\n",
+			 dma_chan_name(&tdc->vc.chan));
+		return -EINVAL;
+	}
+
+	tdc->sreq_dir = direction;
+	tdc->sreq_reserved = true;
+
+	return 0;
+}
+
+static void tegra_adma_request_free(struct tegra_adma_chan *tdc)
+{
+	struct tegra_adma *tdma = tdc->tdma;
+
+	if (!tdc->sreq_reserved)
+		return;
+
+	switch (tdc->sreq_dir) {
+	case DMA_MEM_TO_DEV:
+		clear_bit(tdc->sreq_index, &tdma->tx_requests_reserved);
+		break;
+
+	case DMA_DEV_TO_MEM:
+		clear_bit(tdc->sreq_index, &tdma->rx_requests_reserved);
+		break;
+
+	default:
+		dev_WARN(tdma->dev, "channel %s has invalid transfer type\n",
+			 dma_chan_name(&tdc->vc.chan));
+		return;
+	}
+
+	tdc->sreq_reserved = false;
+}
+
+static u32 tegra_adma_irq_status(struct tegra_adma_chan *tdc)
+{
+	u32 status = tdma_ch_read(tdc, ADMA_CH_INT_STATUS);
+
+	return status & ADMA_CH_INT_STATUS_XFER_DONE;
+}
+
+static u32 tegra_adma_irq_clear(struct tegra_adma_chan *tdc)
+{
+	u32 status = tegra_adma_irq_status(tdc);
+
+	if (status)
+		tdma_ch_write(tdc, ADMA_CH_INT_CLEAR, status);
+
+	return status;
+}
+
+static void tegra_adma_stop(struct tegra_adma_chan *tdc)
+{
+	unsigned int status;
+
+	/* Disable ADMA */
+	tdma_ch_write(tdc, ADMA_CH_CMD, 0);
+
+	/* Clear interrupt status */
+	tegra_adma_irq_clear(tdc);
+
+	if (readx_poll_timeout_atomic(readl, tdc->chan_addr + ADMA_CH_STATUS,
+			status, !(status & ADMA_CH_STATUS_XFER_EN),
+			20, 10000)) {
+		dev_err(tdc2dev(tdc), "unable to stop DMA channel\n");
+		return;
+	}
+
+	kfree(tdc->desc);
+	tdc->desc = NULL;
+}
+
+static void tegra_adma_start(struct tegra_adma_chan *tdc)
+{
+	struct virt_dma_desc *vd = vchan_next_desc(&tdc->vc);
+	struct tegra_adma_chan_regs *ch_regs;
+	struct tegra_adma_desc *desc;
+
+	if (!vd)
+		return;
+
+	list_del(&vd->node);
+
+	desc = to_tegra_adma_desc(&vd->tx);
+
+	if (!desc) {
+		dev_warn(tdc2dev(tdc), "unable to start DMA, no descriptor\n");
+		return;
+	}
+
+	ch_regs = &desc->ch_regs;
+
+	tdc->tx_buf_pos = 0;
+	tdc->tx_buf_count = 0;
+	tdma_ch_write(tdc, ADMA_CH_TC, ch_regs->tc);
+	tdma_ch_write(tdc, ADMA_CH_CTRL, ch_regs->ctrl);
+	tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR, ch_regs->src_addr);
+	tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR, ch_regs->trg_addr);
+	tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_regs->fifo_ctrl);
+	tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_regs->config);
+
+	/* Start ADMA */
+	tdma_ch_write(tdc, ADMA_CH_CMD, 1);
+
+	tdc->desc = desc;
+}
+
+static unsigned int tegra_adma_get_residue(struct tegra_adma_chan *tdc)
+{
+	struct tegra_adma_desc *desc = tdc->desc;
+	unsigned int max = ADMA_CH_XFER_STATUS_COUNT_MASK + 1;
+	unsigned int pos = tdma_ch_read(tdc, ADMA_CH_XFER_STATUS);
+	unsigned int periods_remaining;
+
+	/*
+	 * Handle wrap around of buffer count register
+	 */
+	if (pos < tdc->tx_buf_pos)
+		tdc->tx_buf_count += pos + (max - tdc->tx_buf_pos);
+	else
+		tdc->tx_buf_count += pos - tdc->tx_buf_pos;
+
+	periods_remaining = tdc->tx_buf_count % desc->num_periods;
+	tdc->tx_buf_pos = pos;
+
+	return desc->buf_len - (periods_remaining * desc->period_len);
+}
+
+static irqreturn_t tegra_adma_isr(int irq, void *dev_id)
+{
+	struct tegra_adma_chan *tdc = dev_id;
+	unsigned long status;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tdc->vc.lock, flags);
+
+	status = tegra_adma_irq_clear(tdc);
+	if (status == 0 || !tdc->desc) {
+		spin_unlock_irqrestore(&tdc->vc.lock, flags);
+		return IRQ_NONE;
+	}
+
+	vchan_cyclic_callback(&tdc->desc->vd);
+
+	spin_unlock_irqrestore(&tdc->vc.lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+static void tegra_adma_issue_pending(struct dma_chan *dc)
+{
+	struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
+	unsigned long flags;
+
+	spin_lock_irqsave(&tdc->vc.lock, flags);
+
+	if (vchan_issue_pending(&tdc->vc)) {
+		if (!tdc->desc)
+			tegra_adma_start(tdc);
+	}
+
+	spin_unlock_irqrestore(&tdc->vc.lock, flags);
+}
+
+static int tegra_adma_terminate_all(struct dma_chan *dc)
+{
+	struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	spin_lock_irqsave(&tdc->vc.lock, flags);
+
+	if (tdc->desc)
+		tegra_adma_stop(tdc);
+
+	tegra_adma_request_free(tdc);
+	vchan_get_all_descriptors(&tdc->vc, &head);
+	spin_unlock_irqrestore(&tdc->vc.lock, flags);
+	vchan_dma_desc_free_list(&tdc->vc, &head);
+
+	return 0;
+}
+
+static enum dma_status tegra_adma_tx_status(struct dma_chan *dc,
+					    dma_cookie_t cookie,
+					    struct dma_tx_state *txstate)
+{
+	struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
+	struct tegra_adma_desc *desc;
+	struct virt_dma_desc *vd;
+	enum dma_status ret;
+	unsigned long flags;
+	unsigned int residual;
+
+	ret = dma_cookie_status(dc, cookie, txstate);
+	if (ret == DMA_COMPLETE || !txstate)
+		return ret;
+
+	spin_lock_irqsave(&tdc->vc.lock, flags);
+
+	vd = vchan_find_desc(&tdc->vc, cookie);
+	if (vd) {
+		desc = to_tegra_adma_desc(&vd->tx);
+		residual = desc->ch_regs.tc;
+	} else if (tdc->desc && tdc->desc->vd.tx.cookie == cookie) {
+		residual = tegra_adma_get_residue(tdc);
+	} else {
+		residual = 0;
+	}
+
+	spin_unlock_irqrestore(&tdc->vc.lock, flags);
+
+	dma_set_residue(txstate, residual);
+
+	return ret;
+}
+
+static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
+				      struct tegra_adma_desc *desc,
+				      dma_addr_t buf_addr,
+				      enum dma_transfer_direction direction)
+{
+	struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs;
+	unsigned int burst_size, adma_dir;
+
+	if (desc->num_periods > ADMA_CH_CONFIG_MAX_BUFS)
+		return -EINVAL;
+
+	switch (direction) {
+	case DMA_MEM_TO_DEV:
+		adma_dir = ADMA_CH_CTRL_DIR_MEM2AHUB;
+		burst_size = fls(tdc->sconfig.dst_maxburst);
+		ch_regs->config = ADMA_CH_CONFIG_SRC_BUF(desc->num_periods - 1);
+		ch_regs->ctrl = ADMA_CH_CTRL_TX_REQ(tdc->sreq_index);
+		ch_regs->src_addr = buf_addr;
+		break;
+
+	case DMA_DEV_TO_MEM:
+		adma_dir = ADMA_CH_CTRL_DIR_AHUB2MEM;
+		burst_size = fls(tdc->sconfig.src_maxburst);
+		ch_regs->config = ADMA_CH_CONFIG_TRG_BUF(desc->num_periods - 1);
+		ch_regs->ctrl = ADMA_CH_CTRL_RX_REQ(tdc->sreq_index);
+		ch_regs->trg_addr = buf_addr;
+		break;
+
+	default:
+		dev_err(tdc2dev(tdc), "DMA direction is not supported\n");
+		return -EINVAL;
+	}
+
+	if (!burst_size || burst_size > ADMA_CH_CONFIG_BURST_16)
+		burst_size = ADMA_CH_CONFIG_BURST_16;
+
+	ch_regs->ctrl |= ADMA_CH_CTRL_DIR(adma_dir) |
+			 ADMA_CH_CTRL_MODE_CONTINUOUS |
+			 ADMA_CH_CTRL_FLOWCTRL_EN;
+	ch_regs->config |= ADMA_CH_CONFIG_BURST_SIZE(burst_size);
+	ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
+	ch_regs->fifo_ctrl = ADMA_CH_FIFO_CTRL_DEFAULT;
+	ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK;
+
+	return tegra_adma_request_alloc(tdc, direction);
+}
+
+static struct dma_async_tx_descriptor *tegra_adma_prep_dma_cyclic(
+	struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
+	size_t period_len, enum dma_transfer_direction direction,
+	unsigned long flags)
+{
+	struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
+	struct tegra_adma_desc *desc = NULL;
+
+	if (!buf_len || !period_len || period_len > ADMA_CH_TC_COUNT_MASK) {
+		dev_err(tdc2dev(tdc), "invalid buffer/period len\n");
+		return NULL;
+	}
+
+	if (buf_len % period_len) {
+		dev_err(tdc2dev(tdc), "buf_len not a multiple of period_len\n");
+		return NULL;
+	}
+
+	if (!IS_ALIGNED(buf_addr, 4)) {
+		dev_err(tdc2dev(tdc), "invalid buffer alignment\n");
+		return NULL;
+	}
+
+	desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+	if (!desc)
+		return NULL;
+
+	desc->buf_len = buf_len;
+	desc->period_len = period_len;
+	desc->num_periods = buf_len / period_len;
+
+	if (tegra_adma_set_xfer_params(tdc, desc, buf_addr, direction)) {
+		kfree(desc);
+		return NULL;
+	}
+
+	return vchan_tx_prep(&tdc->vc, &desc->vd, flags);
+}
+
+static int tegra_adma_alloc_chan_resources(struct dma_chan *dc)
+{
+	struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
+	int ret;
+
+	ret = request_irq(tdc->irq, tegra_adma_isr, 0, dma_chan_name(dc), tdc);
+	if (ret) {
+		dev_err(tdc2dev(tdc), "failed to get interrupt for %s\n",
+			dma_chan_name(dc));
+		return ret;
+	}
+
+	ret = pm_runtime_get_sync(tdc2dev(tdc));
+	if (ret < 0) {
+		free_irq(tdc->irq, tdc);
+		return ret;
+	}
+
+	dma_cookie_init(&tdc->vc.chan);
+
+	return 0;
+}
+
+static void tegra_adma_free_chan_resources(struct dma_chan *dc)
+{
+	struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
+
+	tegra_adma_terminate_all(dc);
+	vchan_free_chan_resources(&tdc->vc);
+	tasklet_kill(&tdc->vc.task);
+	free_irq(tdc->irq, tdc);
+	pm_runtime_put(tdc2dev(tdc));
+
+	tdc->sreq_index = 0;
+	tdc->sreq_dir = DMA_TRANS_NONE;
+}
+
+static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
+					   struct of_dma *ofdma)
+{
+	struct tegra_adma *tdma = ofdma->of_dma_data;
+	struct tegra_adma_chan *tdc;
+	struct dma_chan *chan;
+	unsigned int sreq_index;
+
+	if (dma_spec->args_count != 1)
+		return NULL;
+
+	sreq_index = dma_spec->args[0];
+
+	if (sreq_index == 0) {
+		dev_err(tdma->dev, "DMA request must not be 0\n");
+		return NULL;
+	}
+
+	chan = dma_get_any_slave_channel(&tdma->dma_dev);
+	if (!chan)
+		return NULL;
+
+	tdc = to_tegra_adma_chan(chan);
+	tdc->sreq_index = sreq_index;
+
+	return chan;
+}
+
+static int tegra_adma_runtime_suspend(struct device *dev)
+{
+	struct tegra_adma *tdma = dev_get_drvdata(dev);
+
+	tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD);
+
+	return pm_clk_suspend(dev);
+}
+
+static int tegra_adma_runtime_resume(struct device *dev)
+{
+	struct tegra_adma *tdma = dev_get_drvdata(dev);
+	int ret;
+
+	ret = pm_clk_resume(dev);
+	if (ret)
+		return ret;
+
+	tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd);
+
+	return 0;
+}
+
+static const struct tegra_adma_chip_data tegra210_chip_data = {
+	.nr_channels = 22,
+};
+
+static const struct of_device_id tegra_adma_of_match[] = {
+	{ .compatible = "nvidia,tegra210-adma", .data = &tegra210_chip_data },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, tegra_adma_of_match);
+
+static int tegra_adma_probe(struct platform_device *pdev)
+{
+	const struct tegra_adma_chip_data *cdata;
+	struct tegra_adma *tdma;
+	struct resource	*res;
+	struct clk *clk;
+	int ret, i;
+
+	cdata = of_device_get_match_data(&pdev->dev);
+	if (!cdata) {
+		dev_err(&pdev->dev, "device match data not found\n");
+		return -ENODEV;
+	}
+
+	tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
+			    sizeof(struct tegra_adma_chan), GFP_KERNEL);
+	if (!tdma)
+		return -ENOMEM;
+
+	tdma->dev = &pdev->dev;
+	tdma->nr_channels = cdata->nr_channels;
+	platform_set_drvdata(pdev, tdma);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(tdma->base_addr))
+		return PTR_ERR(tdma->base_addr);
+
+	ret = pm_clk_create(&pdev->dev);
+	if (ret)
+		return ret;
+
+	clk = clk_get(&pdev->dev, "d_audio");
+	if (IS_ERR(clk)) {
+		dev_err(&pdev->dev, "ADMA clock not found\n");
+		ret = PTR_ERR(clk);
+		goto clk_destroy;
+	}
+
+	ret = pm_clk_add_clk(&pdev->dev, clk);
+	if (ret) {
+		clk_put(clk);
+		goto clk_destroy;
+	}
+
+	pm_runtime_enable(&pdev->dev);
+
+	ret = pm_runtime_get_sync(&pdev->dev);
+	if (ret < 0)
+		goto rpm_disable;
+
+	ret = tegra_adma_init(tdma);
+	if (ret)
+		goto rpm_put;
+
+	INIT_LIST_HEAD(&tdma->dma_dev.channels);
+	for (i = 0; i < tdma->nr_channels; i++) {
+		struct tegra_adma_chan *tdc = &tdma->channels[i];
+
+		tdc->chan_addr = tdma->base_addr + ADMA_CH_REG_OFFSET(i);
+
+		tdc->irq = of_irq_get(pdev->dev.of_node, i);
+		if (tdc->irq < 0) {
+			ret = tdc->irq;
+			goto irq_dispose;
+		}
+
+		vchan_init(&tdc->vc, &tdma->dma_dev);
+		tdc->vc.desc_free = tegra_adma_desc_free;
+		tdc->tdma = tdma;
+	}
+
+	dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
+	dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
+	dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
+
+	tdma->dma_dev.dev = &pdev->dev;
+	tdma->dma_dev.device_alloc_chan_resources =
+					tegra_adma_alloc_chan_resources;
+	tdma->dma_dev.device_free_chan_resources =
+					tegra_adma_free_chan_resources;
+	tdma->dma_dev.device_issue_pending = tegra_adma_issue_pending;
+	tdma->dma_dev.device_prep_dma_cyclic = tegra_adma_prep_dma_cyclic;
+	tdma->dma_dev.device_config = tegra_adma_slave_config;
+	tdma->dma_dev.device_tx_status = tegra_adma_tx_status;
+	tdma->dma_dev.device_terminate_all = tegra_adma_terminate_all;
+	tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+	tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+	tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+
+	ret = dma_async_device_register(&tdma->dma_dev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "ADMA registration failed: %d\n", ret);
+		goto irq_dispose;
+	}
+
+	ret = of_dma_controller_register(pdev->dev.of_node,
+					 tegra_dma_of_xlate, tdma);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "ADMA OF registration failed %d\n", ret);
+		goto dma_remove;
+	}
+
+	pm_runtime_put(&pdev->dev);
+
+	dev_info(&pdev->dev, "Tegra210 ADMA driver registered %d channels\n",
+		 tdma->nr_channels);
+
+	return 0;
+
+dma_remove:
+	dma_async_device_unregister(&tdma->dma_dev);
+irq_dispose:
+	while (--i >= 0)
+		irq_dispose_mapping(tdma->channels[i].irq);
+rpm_put:
+	pm_runtime_put_sync(&pdev->dev);
+rpm_disable:
+	pm_runtime_disable(&pdev->dev);
+clk_destroy:
+	pm_clk_destroy(&pdev->dev);
+
+	return ret;
+}
+
+static int tegra_adma_remove(struct platform_device *pdev)
+{
+	struct tegra_adma *tdma = platform_get_drvdata(pdev);
+	int i;
+
+	dma_async_device_unregister(&tdma->dma_dev);
+
+	for (i = 0; i < tdma->nr_channels; ++i)
+		irq_dispose_mapping(tdma->channels[i].irq);
+
+	pm_runtime_put_sync(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	pm_clk_destroy(&pdev->dev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_adma_pm_suspend(struct device *dev)
+{
+	return pm_runtime_suspended(dev) == false;
+}
+#endif
+
+static const struct dev_pm_ops tegra_adma_dev_pm_ops = {
+	SET_RUNTIME_PM_OPS(tegra_adma_runtime_suspend,
+			   tegra_adma_runtime_resume, NULL)
+	SET_SYSTEM_SLEEP_PM_OPS(tegra_adma_pm_suspend, NULL)
+};
+
+static struct platform_driver tegra_admac_driver = {
+	.driver = {
+		.name	= "tegra-adma",
+		.pm	= &tegra_adma_dev_pm_ops,
+		.of_match_table = tegra_adma_of_match,
+	},
+	.probe		= tegra_adma_probe,
+	.remove		= tegra_adma_remove,
+};
+
+module_platform_driver(tegra_admac_driver);
+
+MODULE_ALIAS("platform:tegra210-adma");
+MODULE_DESCRIPTION("NVIDIA Tegra ADMA driver");
+MODULE_AUTHOR("Dara Ramesh <dramesh@nvidia.com>");
+MODULE_AUTHOR("Jon Hunter <jonathanh@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 0ee0321..df91185 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -16,6 +16,15 @@
  * video device (S2MM). Initialization, status, interrupt and management
  * registers are accessed through an AXI4-Lite slave interface.
  *
+ * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
+ * provides high-bandwidth one dimensional direct memory access between memory
+ * and AXI4-Stream target peripherals. It supports one receive and one
+ * transmit channel, both of them optional at synthesis time.
+ *
+ * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
+ * Access (DMA) between a memory-mapped source address and a memory-mapped
+ * destination address.
+ *
  * This program is free software: you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation, either version 2 of the License, or
@@ -35,116 +44,138 @@
 #include <linux/of_platform.h>
 #include <linux/of_irq.h>
 #include <linux/slab.h>
+#include <linux/clk.h>
 
 #include "../dmaengine.h"
 
 /* Register/Descriptor Offsets */
-#define XILINX_VDMA_MM2S_CTRL_OFFSET		0x0000
-#define XILINX_VDMA_S2MM_CTRL_OFFSET		0x0030
+#define XILINX_DMA_MM2S_CTRL_OFFSET		0x0000
+#define XILINX_DMA_S2MM_CTRL_OFFSET		0x0030
 #define XILINX_VDMA_MM2S_DESC_OFFSET		0x0050
 #define XILINX_VDMA_S2MM_DESC_OFFSET		0x00a0
 
 /* Control Registers */
-#define XILINX_VDMA_REG_DMACR			0x0000
-#define XILINX_VDMA_DMACR_DELAY_MAX		0xff
-#define XILINX_VDMA_DMACR_DELAY_SHIFT		24
-#define XILINX_VDMA_DMACR_FRAME_COUNT_MAX	0xff
-#define XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT	16
-#define XILINX_VDMA_DMACR_ERR_IRQ		BIT(14)
-#define XILINX_VDMA_DMACR_DLY_CNT_IRQ		BIT(13)
-#define XILINX_VDMA_DMACR_FRM_CNT_IRQ		BIT(12)
-#define XILINX_VDMA_DMACR_MASTER_SHIFT		8
-#define XILINX_VDMA_DMACR_FSYNCSRC_SHIFT	5
-#define XILINX_VDMA_DMACR_FRAMECNT_EN		BIT(4)
-#define XILINX_VDMA_DMACR_GENLOCK_EN		BIT(3)
-#define XILINX_VDMA_DMACR_RESET			BIT(2)
-#define XILINX_VDMA_DMACR_CIRC_EN		BIT(1)
-#define XILINX_VDMA_DMACR_RUNSTOP		BIT(0)
-#define XILINX_VDMA_DMACR_FSYNCSRC_MASK		GENMASK(6, 5)
+#define XILINX_DMA_REG_DMACR			0x0000
+#define XILINX_DMA_DMACR_DELAY_MAX		0xff
+#define XILINX_DMA_DMACR_DELAY_SHIFT		24
+#define XILINX_DMA_DMACR_FRAME_COUNT_MAX	0xff
+#define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT	16
+#define XILINX_DMA_DMACR_ERR_IRQ		BIT(14)
+#define XILINX_DMA_DMACR_DLY_CNT_IRQ		BIT(13)
+#define XILINX_DMA_DMACR_FRM_CNT_IRQ		BIT(12)
+#define XILINX_DMA_DMACR_MASTER_SHIFT		8
+#define XILINX_DMA_DMACR_FSYNCSRC_SHIFT	5
+#define XILINX_DMA_DMACR_FRAMECNT_EN		BIT(4)
+#define XILINX_DMA_DMACR_GENLOCK_EN		BIT(3)
+#define XILINX_DMA_DMACR_RESET			BIT(2)
+#define XILINX_DMA_DMACR_CIRC_EN		BIT(1)
+#define XILINX_DMA_DMACR_RUNSTOP		BIT(0)
+#define XILINX_DMA_DMACR_FSYNCSRC_MASK		GENMASK(6, 5)
 
-#define XILINX_VDMA_REG_DMASR			0x0004
-#define XILINX_VDMA_DMASR_EOL_LATE_ERR		BIT(15)
-#define XILINX_VDMA_DMASR_ERR_IRQ		BIT(14)
-#define XILINX_VDMA_DMASR_DLY_CNT_IRQ		BIT(13)
-#define XILINX_VDMA_DMASR_FRM_CNT_IRQ		BIT(12)
-#define XILINX_VDMA_DMASR_SOF_LATE_ERR		BIT(11)
-#define XILINX_VDMA_DMASR_SG_DEC_ERR		BIT(10)
-#define XILINX_VDMA_DMASR_SG_SLV_ERR		BIT(9)
-#define XILINX_VDMA_DMASR_EOF_EARLY_ERR		BIT(8)
-#define XILINX_VDMA_DMASR_SOF_EARLY_ERR		BIT(7)
-#define XILINX_VDMA_DMASR_DMA_DEC_ERR		BIT(6)
-#define XILINX_VDMA_DMASR_DMA_SLAVE_ERR		BIT(5)
-#define XILINX_VDMA_DMASR_DMA_INT_ERR		BIT(4)
-#define XILINX_VDMA_DMASR_IDLE			BIT(1)
-#define XILINX_VDMA_DMASR_HALTED		BIT(0)
-#define XILINX_VDMA_DMASR_DELAY_MASK		GENMASK(31, 24)
-#define XILINX_VDMA_DMASR_FRAME_COUNT_MASK	GENMASK(23, 16)
+#define XILINX_DMA_REG_DMASR			0x0004
+#define XILINX_DMA_DMASR_EOL_LATE_ERR		BIT(15)
+#define XILINX_DMA_DMASR_ERR_IRQ		BIT(14)
+#define XILINX_DMA_DMASR_DLY_CNT_IRQ		BIT(13)
+#define XILINX_DMA_DMASR_FRM_CNT_IRQ		BIT(12)
+#define XILINX_DMA_DMASR_SOF_LATE_ERR		BIT(11)
+#define XILINX_DMA_DMASR_SG_DEC_ERR		BIT(10)
+#define XILINX_DMA_DMASR_SG_SLV_ERR		BIT(9)
+#define XILINX_DMA_DMASR_EOF_EARLY_ERR		BIT(8)
+#define XILINX_DMA_DMASR_SOF_EARLY_ERR		BIT(7)
+#define XILINX_DMA_DMASR_DMA_DEC_ERR		BIT(6)
+#define XILINX_DMA_DMASR_DMA_SLAVE_ERR		BIT(5)
+#define XILINX_DMA_DMASR_DMA_INT_ERR		BIT(4)
+#define XILINX_DMA_DMASR_IDLE			BIT(1)
+#define XILINX_DMA_DMASR_HALTED		BIT(0)
+#define XILINX_DMA_DMASR_DELAY_MASK		GENMASK(31, 24)
+#define XILINX_DMA_DMASR_FRAME_COUNT_MASK	GENMASK(23, 16)
 
-#define XILINX_VDMA_REG_CURDESC			0x0008
-#define XILINX_VDMA_REG_TAILDESC		0x0010
-#define XILINX_VDMA_REG_REG_INDEX		0x0014
-#define XILINX_VDMA_REG_FRMSTORE		0x0018
-#define XILINX_VDMA_REG_THRESHOLD		0x001c
-#define XILINX_VDMA_REG_FRMPTR_STS		0x0024
-#define XILINX_VDMA_REG_PARK_PTR		0x0028
-#define XILINX_VDMA_PARK_PTR_WR_REF_SHIFT	8
-#define XILINX_VDMA_PARK_PTR_RD_REF_SHIFT	0
-#define XILINX_VDMA_REG_VDMA_VERSION		0x002c
+#define XILINX_DMA_REG_CURDESC			0x0008
+#define XILINX_DMA_REG_TAILDESC		0x0010
+#define XILINX_DMA_REG_REG_INDEX		0x0014
+#define XILINX_DMA_REG_FRMSTORE		0x0018
+#define XILINX_DMA_REG_THRESHOLD		0x001c
+#define XILINX_DMA_REG_FRMPTR_STS		0x0024
+#define XILINX_DMA_REG_PARK_PTR		0x0028
+#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT	8
+#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT	0
+#define XILINX_DMA_REG_VDMA_VERSION		0x002c
 
 /* Register Direct Mode Registers */
-#define XILINX_VDMA_REG_VSIZE			0x0000
-#define XILINX_VDMA_REG_HSIZE			0x0004
+#define XILINX_DMA_REG_VSIZE			0x0000
+#define XILINX_DMA_REG_HSIZE			0x0004
 
-#define XILINX_VDMA_REG_FRMDLY_STRIDE		0x0008
-#define XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT	24
-#define XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT	0
+#define XILINX_DMA_REG_FRMDLY_STRIDE		0x0008
+#define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT	24
+#define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT	0
 
 #define XILINX_VDMA_REG_START_ADDRESS(n)	(0x000c + 4 * (n))
+#define XILINX_VDMA_REG_START_ADDRESS_64(n)	(0x000c + 8 * (n))
 
 /* HW specific definitions */
-#define XILINX_VDMA_MAX_CHANS_PER_DEVICE	0x2
+#define XILINX_DMA_MAX_CHANS_PER_DEVICE	0x2
 
-#define XILINX_VDMA_DMAXR_ALL_IRQ_MASK	\
-		(XILINX_VDMA_DMASR_FRM_CNT_IRQ | \
-		 XILINX_VDMA_DMASR_DLY_CNT_IRQ | \
-		 XILINX_VDMA_DMASR_ERR_IRQ)
+#define XILINX_DMA_DMAXR_ALL_IRQ_MASK	\
+		(XILINX_DMA_DMASR_FRM_CNT_IRQ | \
+		 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
+		 XILINX_DMA_DMASR_ERR_IRQ)
 
-#define XILINX_VDMA_DMASR_ALL_ERR_MASK	\
-		(XILINX_VDMA_DMASR_EOL_LATE_ERR | \
-		 XILINX_VDMA_DMASR_SOF_LATE_ERR | \
-		 XILINX_VDMA_DMASR_SG_DEC_ERR | \
-		 XILINX_VDMA_DMASR_SG_SLV_ERR | \
-		 XILINX_VDMA_DMASR_EOF_EARLY_ERR | \
-		 XILINX_VDMA_DMASR_SOF_EARLY_ERR | \
-		 XILINX_VDMA_DMASR_DMA_DEC_ERR | \
-		 XILINX_VDMA_DMASR_DMA_SLAVE_ERR | \
-		 XILINX_VDMA_DMASR_DMA_INT_ERR)
+#define XILINX_DMA_DMASR_ALL_ERR_MASK	\
+		(XILINX_DMA_DMASR_EOL_LATE_ERR | \
+		 XILINX_DMA_DMASR_SOF_LATE_ERR | \
+		 XILINX_DMA_DMASR_SG_DEC_ERR | \
+		 XILINX_DMA_DMASR_SG_SLV_ERR | \
+		 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
+		 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
+		 XILINX_DMA_DMASR_DMA_DEC_ERR | \
+		 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
+		 XILINX_DMA_DMASR_DMA_INT_ERR)
 
 /*
  * Recoverable errors are DMA Internal error, SOF Early, EOF Early
  * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
  * is enabled in the h/w system.
  */
-#define XILINX_VDMA_DMASR_ERR_RECOVER_MASK	\
-		(XILINX_VDMA_DMASR_SOF_LATE_ERR | \
-		 XILINX_VDMA_DMASR_EOF_EARLY_ERR | \
-		 XILINX_VDMA_DMASR_SOF_EARLY_ERR | \
-		 XILINX_VDMA_DMASR_DMA_INT_ERR)
+#define XILINX_DMA_DMASR_ERR_RECOVER_MASK	\
+		(XILINX_DMA_DMASR_SOF_LATE_ERR | \
+		 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
+		 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
+		 XILINX_DMA_DMASR_DMA_INT_ERR)
 
 /* Axi VDMA Flush on Fsync bits */
-#define XILINX_VDMA_FLUSH_S2MM		3
-#define XILINX_VDMA_FLUSH_MM2S		2
-#define XILINX_VDMA_FLUSH_BOTH		1
+#define XILINX_DMA_FLUSH_S2MM		3
+#define XILINX_DMA_FLUSH_MM2S		2
+#define XILINX_DMA_FLUSH_BOTH		1
 
 /* Delay loop counter to prevent hardware failure */
-#define XILINX_VDMA_LOOP_COUNT		1000000
+#define XILINX_DMA_LOOP_COUNT		1000000
+
+/* AXI DMA Specific Registers/Offsets */
+#define XILINX_DMA_REG_SRCDSTADDR	0x18
+#define XILINX_DMA_REG_BTT		0x28
+
+/* AXI DMA Specific Masks/Bit fields */
+#define XILINX_DMA_MAX_TRANS_LEN	GENMASK(22, 0)
+#define XILINX_DMA_CR_COALESCE_MAX	GENMASK(23, 16)
+#define XILINX_DMA_CR_COALESCE_SHIFT	16
+#define XILINX_DMA_BD_SOP		BIT(27)
+#define XILINX_DMA_BD_EOP		BIT(26)
+#define XILINX_DMA_COALESCE_MAX		255
+#define XILINX_DMA_NUM_APP_WORDS	5
+
+/* AXI CDMA Specific Registers/Offsets */
+#define XILINX_CDMA_REG_SRCADDR		0x18
+#define XILINX_CDMA_REG_DSTADDR		0x20
+
+/* AXI CDMA Specific Masks */
+#define XILINX_CDMA_CR_SGMODE          BIT(3)
 
 /**
  * struct xilinx_vdma_desc_hw - Hardware Descriptor
  * @next_desc: Next Descriptor Pointer @0x00
  * @pad1: Reserved @0x04
  * @buf_addr: Buffer address @0x08
- * @pad2: Reserved @0x0C
+ * @buf_addr_msb: MSB of Buffer address @0x0C
  * @vsize: Vertical Size @0x10
  * @hsize: Horizontal Size @0x14
  * @stride: Number of bytes between the first
@@ -154,13 +185,59 @@
 	u32 next_desc;
 	u32 pad1;
 	u32 buf_addr;
-	u32 pad2;
+	u32 buf_addr_msb;
 	u32 vsize;
 	u32 hsize;
 	u32 stride;
 } __aligned(64);
 
 /**
+ * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
+ * @next_desc: Next Descriptor Pointer @0x00
+ * @pad1: Reserved @0x04
+ * @buf_addr: Buffer address @0x08
+ * @pad2: Reserved @0x0C
+ * @pad3: Reserved @0x10
+ * @pad4: Reserved @0x14
+ * @control: Control field @0x18
+ * @status: Status field @0x1C
+ * @app: APP Fields @0x20 - 0x30
+ */
+struct xilinx_axidma_desc_hw {
+	u32 next_desc;
+	u32 pad1;
+	u32 buf_addr;
+	u32 pad2;
+	u32 pad3;
+	u32 pad4;
+	u32 control;
+	u32 status;
+	u32 app[XILINX_DMA_NUM_APP_WORDS];
+} __aligned(64);
+
+/**
+ * struct xilinx_cdma_desc_hw - Hardware Descriptor
+ * @next_desc: Next Descriptor Pointer @0x00
+ * @pad1: Reserved @0x04
+ * @src_addr: Source address @0x08
+ * @pad2: Reserved @0x0C
+ * @dest_addr: Destination address @0x10
+ * @pad3: Reserved @0x14
+ * @control: Control field @0x18
+ * @status: Status field @0x1C
+ */
+struct xilinx_cdma_desc_hw {
+	u32 next_desc;
+	u32 pad1;
+	u32 src_addr;
+	u32 pad2;
+	u32 dest_addr;
+	u32 pad3;
+	u32 control;
+	u32 status;
+} __aligned(64);
+
+/**
  * struct xilinx_vdma_tx_segment - Descriptor segment
  * @hw: Hardware descriptor
  * @node: Node in the descriptor segments list
@@ -173,19 +250,43 @@
 } __aligned(64);
 
 /**
- * struct xilinx_vdma_tx_descriptor - Per Transaction structure
+ * struct xilinx_axidma_tx_segment - Descriptor segment
+ * @hw: Hardware descriptor
+ * @node: Node in the descriptor segments list
+ * @phys: Physical address of segment
+ */
+struct xilinx_axidma_tx_segment {
+	struct xilinx_axidma_desc_hw hw;
+	struct list_head node;
+	dma_addr_t phys;
+} __aligned(64);
+
+/**
+ * struct xilinx_cdma_tx_segment - Descriptor segment
+ * @hw: Hardware descriptor
+ * @node: Node in the descriptor segments list
+ * @phys: Physical address of segment
+ */
+struct xilinx_cdma_tx_segment {
+	struct xilinx_cdma_desc_hw hw;
+	struct list_head node;
+	dma_addr_t phys;
+} __aligned(64);
+
+/**
+ * struct xilinx_dma_tx_descriptor - Per Transaction structure
  * @async_tx: Async transaction descriptor
  * @segments: TX segments list
  * @node: Node in the channel descriptors list
  */
-struct xilinx_vdma_tx_descriptor {
+struct xilinx_dma_tx_descriptor {
 	struct dma_async_tx_descriptor async_tx;
 	struct list_head segments;
 	struct list_head node;
 };
 
 /**
- * struct xilinx_vdma_chan - Driver specific VDMA channel structure
+ * struct xilinx_dma_chan - Driver specific DMA channel structure
  * @xdev: Driver specific device structure
  * @ctrl_offset: Control registers offset
  * @desc_offset: TX descriptor registers offset
@@ -207,9 +308,14 @@
  * @config: Device configuration info
  * @flush_on_fsync: Flush on Frame sync
  * @desc_pendingcount: Descriptor pending count
+ * @ext_addr: Indicates 64 bit addressing is supported by dma channel
+ * @desc_submitcount: Descriptor h/w submitted count
+ * @residue: Residue for AXI DMA
+ * @seg_v: Statically allocated segments base
+ * @start_transfer: Differentiate b/w DMA IP's transfer
  */
-struct xilinx_vdma_chan {
-	struct xilinx_vdma_device *xdev;
+struct xilinx_dma_chan {
+	struct xilinx_dma_device *xdev;
 	u32 ctrl_offset;
 	u32 desc_offset;
 	spinlock_t lock;
@@ -230,73 +336,122 @@
 	struct xilinx_vdma_config config;
 	bool flush_on_fsync;
 	u32 desc_pendingcount;
+	bool ext_addr;
+	u32 desc_submitcount;
+	u32 residue;
+	struct xilinx_axidma_tx_segment *seg_v;
+	void (*start_transfer)(struct xilinx_dma_chan *chan);
+};
+
+struct xilinx_dma_config {
+	enum xdma_ip_type dmatype;
+	int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
+			struct clk **tx_clk, struct clk **txs_clk,
+			struct clk **rx_clk, struct clk **rxs_clk);
 };
 
 /**
- * struct xilinx_vdma_device - VDMA device structure
+ * struct xilinx_dma_device - DMA device structure
  * @regs: I/O mapped base address
  * @dev: Device Structure
  * @common: DMA device structure
- * @chan: Driver specific VDMA channel
+ * @chan: Driver specific DMA channel
  * @has_sg: Specifies whether Scatter-Gather is present or not
  * @flush_on_fsync: Flush on frame sync
+ * @ext_addr: Indicates 64 bit addressing is supported by dma device
+ * @pdev: Platform device structure pointer
+ * @dma_config: DMA config structure
+ * @axi_clk: DMA Axi4-lite interace clock
+ * @tx_clk: DMA mm2s clock
+ * @txs_clk: DMA mm2s stream clock
+ * @rx_clk: DMA s2mm clock
+ * @rxs_clk: DMA s2mm stream clock
  */
-struct xilinx_vdma_device {
+struct xilinx_dma_device {
 	void __iomem *regs;
 	struct device *dev;
 	struct dma_device common;
-	struct xilinx_vdma_chan *chan[XILINX_VDMA_MAX_CHANS_PER_DEVICE];
+	struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
 	bool has_sg;
 	u32 flush_on_fsync;
+	bool ext_addr;
+	struct platform_device  *pdev;
+	const struct xilinx_dma_config *dma_config;
+	struct clk *axi_clk;
+	struct clk *tx_clk;
+	struct clk *txs_clk;
+	struct clk *rx_clk;
+	struct clk *rxs_clk;
 };
 
 /* Macros */
 #define to_xilinx_chan(chan) \
-	container_of(chan, struct xilinx_vdma_chan, common)
-#define to_vdma_tx_descriptor(tx) \
-	container_of(tx, struct xilinx_vdma_tx_descriptor, async_tx)
-#define xilinx_vdma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
+	container_of(chan, struct xilinx_dma_chan, common)
+#define to_dma_tx_descriptor(tx) \
+	container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
+#define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
 	readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
 			   cond, delay_us, timeout_us)
 
 /* IO accessors */
-static inline u32 vdma_read(struct xilinx_vdma_chan *chan, u32 reg)
+static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
 {
 	return ioread32(chan->xdev->regs + reg);
 }
 
-static inline void vdma_write(struct xilinx_vdma_chan *chan, u32 reg, u32 value)
+static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
 {
 	iowrite32(value, chan->xdev->regs + reg);
 }
 
-static inline void vdma_desc_write(struct xilinx_vdma_chan *chan, u32 reg,
+static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
 				   u32 value)
 {
-	vdma_write(chan, chan->desc_offset + reg, value);
+	dma_write(chan, chan->desc_offset + reg, value);
 }
 
-static inline u32 vdma_ctrl_read(struct xilinx_vdma_chan *chan, u32 reg)
+static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
 {
-	return vdma_read(chan, chan->ctrl_offset + reg);
+	return dma_read(chan, chan->ctrl_offset + reg);
 }
 
-static inline void vdma_ctrl_write(struct xilinx_vdma_chan *chan, u32 reg,
+static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
 				   u32 value)
 {
-	vdma_write(chan, chan->ctrl_offset + reg, value);
+	dma_write(chan, chan->ctrl_offset + reg, value);
 }
 
-static inline void vdma_ctrl_clr(struct xilinx_vdma_chan *chan, u32 reg,
+static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
 				 u32 clr)
 {
-	vdma_ctrl_write(chan, reg, vdma_ctrl_read(chan, reg) & ~clr);
+	dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
 }
 
-static inline void vdma_ctrl_set(struct xilinx_vdma_chan *chan, u32 reg,
+static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
 				 u32 set)
 {
-	vdma_ctrl_write(chan, reg, vdma_ctrl_read(chan, reg) | set);
+	dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
+}
+
+/**
+ * vdma_desc_write_64 - 64-bit descriptor write
+ * @chan: Driver specific VDMA channel
+ * @reg: Register to write
+ * @value_lsb: lower address of the descriptor.
+ * @value_msb: upper address of the descriptor.
+ *
+ * Since vdma driver is trying to write to a register offset which is not a
+ * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
+ * instead of a single 64 bit register write.
+ */
+static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
+				      u32 value_lsb, u32 value_msb)
+{
+	/* Write the lsb 32 bits*/
+	writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
+
+	/* Write the msb 32 bits */
+	writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
 }
 
 /* -----------------------------------------------------------------------------
@@ -305,16 +460,37 @@
 
 /**
  * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
- * @chan: Driver specific VDMA channel
+ * @chan: Driver specific DMA channel
  *
  * Return: The allocated segment on success and NULL on failure.
  */
 static struct xilinx_vdma_tx_segment *
-xilinx_vdma_alloc_tx_segment(struct xilinx_vdma_chan *chan)
+xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
 {
 	struct xilinx_vdma_tx_segment *segment;
 	dma_addr_t phys;
 
+	segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
+	if (!segment)
+		return NULL;
+
+	segment->phys = phys;
+
+	return segment;
+}
+
+/**
+ * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
+ * @chan: Driver specific DMA channel
+ *
+ * Return: The allocated segment on success and NULL on failure.
+ */
+static struct xilinx_cdma_tx_segment *
+xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
+{
+	struct xilinx_cdma_tx_segment *segment;
+	dma_addr_t phys;
+
 	segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys);
 	if (!segment)
 		return NULL;
@@ -326,26 +502,70 @@
 }
 
 /**
- * xilinx_vdma_free_tx_segment - Free transaction segment
- * @chan: Driver specific VDMA channel
- * @segment: VDMA transaction segment
+ * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
+ * @chan: Driver specific DMA channel
+ *
+ * Return: The allocated segment on success and NULL on failure.
  */
-static void xilinx_vdma_free_tx_segment(struct xilinx_vdma_chan *chan,
+static struct xilinx_axidma_tx_segment *
+xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
+{
+	struct xilinx_axidma_tx_segment *segment;
+	dma_addr_t phys;
+
+	segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys);
+	if (!segment)
+		return NULL;
+
+	memset(segment, 0, sizeof(*segment));
+	segment->phys = phys;
+
+	return segment;
+}
+
+/**
+ * xilinx_dma_free_tx_segment - Free transaction segment
+ * @chan: Driver specific DMA channel
+ * @segment: DMA transaction segment
+ */
+static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
+				struct xilinx_axidma_tx_segment *segment)
+{
+	dma_pool_free(chan->desc_pool, segment, segment->phys);
+}
+
+/**
+ * xilinx_cdma_free_tx_segment - Free transaction segment
+ * @chan: Driver specific DMA channel
+ * @segment: DMA transaction segment
+ */
+static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
+				struct xilinx_cdma_tx_segment *segment)
+{
+	dma_pool_free(chan->desc_pool, segment, segment->phys);
+}
+
+/**
+ * xilinx_vdma_free_tx_segment - Free transaction segment
+ * @chan: Driver specific DMA channel
+ * @segment: DMA transaction segment
+ */
+static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
 					struct xilinx_vdma_tx_segment *segment)
 {
 	dma_pool_free(chan->desc_pool, segment, segment->phys);
 }
 
 /**
- * xilinx_vdma_tx_descriptor - Allocate transaction descriptor
- * @chan: Driver specific VDMA channel
+ * xilinx_dma_tx_descriptor - Allocate transaction descriptor
+ * @chan: Driver specific DMA channel
  *
  * Return: The allocated descriptor on success and NULL on failure.
  */
-static struct xilinx_vdma_tx_descriptor *
-xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan)
+static struct xilinx_dma_tx_descriptor *
+xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
 {
-	struct xilinx_vdma_tx_descriptor *desc;
+	struct xilinx_dma_tx_descriptor *desc;
 
 	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 	if (!desc)
@@ -357,22 +577,38 @@
 }
 
 /**
- * xilinx_vdma_free_tx_descriptor - Free transaction descriptor
- * @chan: Driver specific VDMA channel
- * @desc: VDMA transaction descriptor
+ * xilinx_dma_free_tx_descriptor - Free transaction descriptor
+ * @chan: Driver specific DMA channel
+ * @desc: DMA transaction descriptor
  */
 static void
-xilinx_vdma_free_tx_descriptor(struct xilinx_vdma_chan *chan,
-			       struct xilinx_vdma_tx_descriptor *desc)
+xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
+			       struct xilinx_dma_tx_descriptor *desc)
 {
 	struct xilinx_vdma_tx_segment *segment, *next;
+	struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
+	struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
 
 	if (!desc)
 		return;
 
-	list_for_each_entry_safe(segment, next, &desc->segments, node) {
-		list_del(&segment->node);
-		xilinx_vdma_free_tx_segment(chan, segment);
+	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+		list_for_each_entry_safe(segment, next, &desc->segments, node) {
+			list_del(&segment->node);
+			xilinx_vdma_free_tx_segment(chan, segment);
+		}
+	} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+		list_for_each_entry_safe(cdma_segment, cdma_next,
+					 &desc->segments, node) {
+			list_del(&cdma_segment->node);
+			xilinx_cdma_free_tx_segment(chan, cdma_segment);
+		}
+	} else {
+		list_for_each_entry_safe(axidma_segment, axidma_next,
+					 &desc->segments, node) {
+			list_del(&axidma_segment->node);
+			xilinx_dma_free_tx_segment(chan, axidma_segment);
+		}
 	}
 
 	kfree(desc);
@@ -381,60 +617,62 @@
 /* Required functions */
 
 /**
- * xilinx_vdma_free_desc_list - Free descriptors list
- * @chan: Driver specific VDMA channel
+ * xilinx_dma_free_desc_list - Free descriptors list
+ * @chan: Driver specific DMA channel
  * @list: List to parse and delete the descriptor
  */
-static void xilinx_vdma_free_desc_list(struct xilinx_vdma_chan *chan,
+static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
 					struct list_head *list)
 {
-	struct xilinx_vdma_tx_descriptor *desc, *next;
+	struct xilinx_dma_tx_descriptor *desc, *next;
 
 	list_for_each_entry_safe(desc, next, list, node) {
 		list_del(&desc->node);
-		xilinx_vdma_free_tx_descriptor(chan, desc);
+		xilinx_dma_free_tx_descriptor(chan, desc);
 	}
 }
 
 /**
- * xilinx_vdma_free_descriptors - Free channel descriptors
- * @chan: Driver specific VDMA channel
+ * xilinx_dma_free_descriptors - Free channel descriptors
+ * @chan: Driver specific DMA channel
  */
-static void xilinx_vdma_free_descriptors(struct xilinx_vdma_chan *chan)
+static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
 {
 	unsigned long flags;
 
 	spin_lock_irqsave(&chan->lock, flags);
 
-	xilinx_vdma_free_desc_list(chan, &chan->pending_list);
-	xilinx_vdma_free_desc_list(chan, &chan->done_list);
-	xilinx_vdma_free_desc_list(chan, &chan->active_list);
+	xilinx_dma_free_desc_list(chan, &chan->pending_list);
+	xilinx_dma_free_desc_list(chan, &chan->done_list);
+	xilinx_dma_free_desc_list(chan, &chan->active_list);
 
 	spin_unlock_irqrestore(&chan->lock, flags);
 }
 
 /**
- * xilinx_vdma_free_chan_resources - Free channel resources
+ * xilinx_dma_free_chan_resources - Free channel resources
  * @dchan: DMA channel
  */
-static void xilinx_vdma_free_chan_resources(struct dma_chan *dchan)
+static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
 {
-	struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
 
 	dev_dbg(chan->dev, "Free all channel resources.\n");
 
-	xilinx_vdma_free_descriptors(chan);
+	xilinx_dma_free_descriptors(chan);
+	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+		xilinx_dma_free_tx_segment(chan, chan->seg_v);
 	dma_pool_destroy(chan->desc_pool);
 	chan->desc_pool = NULL;
 }
 
 /**
- * xilinx_vdma_chan_desc_cleanup - Clean channel descriptors
- * @chan: Driver specific VDMA channel
+ * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
+ * @chan: Driver specific DMA channel
  */
-static void xilinx_vdma_chan_desc_cleanup(struct xilinx_vdma_chan *chan)
+static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
 {
-	struct xilinx_vdma_tx_descriptor *desc, *next;
+	struct xilinx_dma_tx_descriptor *desc, *next;
 	unsigned long flags;
 
 	spin_lock_irqsave(&chan->lock, flags);
@@ -457,32 +695,32 @@
 
 		/* Run any dependencies, then free the descriptor */
 		dma_run_dependencies(&desc->async_tx);
-		xilinx_vdma_free_tx_descriptor(chan, desc);
+		xilinx_dma_free_tx_descriptor(chan, desc);
 	}
 
 	spin_unlock_irqrestore(&chan->lock, flags);
 }
 
 /**
- * xilinx_vdma_do_tasklet - Schedule completion tasklet
- * @data: Pointer to the Xilinx VDMA channel structure
+ * xilinx_dma_do_tasklet - Schedule completion tasklet
+ * @data: Pointer to the Xilinx DMA channel structure
  */
-static void xilinx_vdma_do_tasklet(unsigned long data)
+static void xilinx_dma_do_tasklet(unsigned long data)
 {
-	struct xilinx_vdma_chan *chan = (struct xilinx_vdma_chan *)data;
+	struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
 
-	xilinx_vdma_chan_desc_cleanup(chan);
+	xilinx_dma_chan_desc_cleanup(chan);
 }
 
 /**
- * xilinx_vdma_alloc_chan_resources - Allocate channel resources
+ * xilinx_dma_alloc_chan_resources - Allocate channel resources
  * @dchan: DMA channel
  *
  * Return: '0' on success and failure value on error
  */
-static int xilinx_vdma_alloc_chan_resources(struct dma_chan *dchan)
+static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
 {
-	struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
 
 	/* Has this channel already been allocated? */
 	if (chan->desc_pool)
@@ -492,10 +730,26 @@
 	 * We need the descriptor to be aligned to 64bytes
 	 * for meeting Xilinx VDMA specification requirement.
 	 */
-	chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
-				chan->dev,
-				sizeof(struct xilinx_vdma_tx_segment),
-				__alignof__(struct xilinx_vdma_tx_segment), 0);
+	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+		chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool",
+				   chan->dev,
+				   sizeof(struct xilinx_axidma_tx_segment),
+				   __alignof__(struct xilinx_axidma_tx_segment),
+				   0);
+	} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+		chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
+				   chan->dev,
+				   sizeof(struct xilinx_cdma_tx_segment),
+				   __alignof__(struct xilinx_cdma_tx_segment),
+				   0);
+	} else {
+		chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
+				     chan->dev,
+				     sizeof(struct xilinx_vdma_tx_segment),
+				     __alignof__(struct xilinx_vdma_tx_segment),
+				     0);
+	}
+
 	if (!chan->desc_pool) {
 		dev_err(chan->dev,
 			"unable to allocate channel %d descriptor pool\n",
@@ -503,110 +757,160 @@
 		return -ENOMEM;
 	}
 
+	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+		/*
+		 * For AXI DMA case after submitting a pending_list, keep
+		 * an extra segment allocated so that the "next descriptor"
+		 * pointer on the tail descriptor always points to a
+		 * valid descriptor, even when paused after reaching taildesc.
+		 * This way, it is possible to issue additional
+		 * transfers without halting and restarting the channel.
+		 */
+		chan->seg_v = xilinx_axidma_alloc_tx_segment(chan);
+
 	dma_cookie_init(dchan);
+
+	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+		/* For AXI DMA resetting once channel will reset the
+		 * other channel as well so enable the interrupts here.
+		 */
+		dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+			      XILINX_DMA_DMAXR_ALL_IRQ_MASK);
+	}
+
+	if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
+		dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+			     XILINX_CDMA_CR_SGMODE);
+
 	return 0;
 }
 
 /**
- * xilinx_vdma_tx_status - Get VDMA transaction status
+ * xilinx_dma_tx_status - Get DMA transaction status
  * @dchan: DMA channel
  * @cookie: Transaction identifier
  * @txstate: Transaction state
  *
  * Return: DMA transaction status
  */
-static enum dma_status xilinx_vdma_tx_status(struct dma_chan *dchan,
+static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
 					dma_cookie_t cookie,
 					struct dma_tx_state *txstate)
 {
-	return dma_cookie_status(dchan, cookie, txstate);
+	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+	struct xilinx_dma_tx_descriptor *desc;
+	struct xilinx_axidma_tx_segment *segment;
+	struct xilinx_axidma_desc_hw *hw;
+	enum dma_status ret;
+	unsigned long flags;
+	u32 residue = 0;
+
+	ret = dma_cookie_status(dchan, cookie, txstate);
+	if (ret == DMA_COMPLETE || !txstate)
+		return ret;
+
+	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+		spin_lock_irqsave(&chan->lock, flags);
+
+		desc = list_last_entry(&chan->active_list,
+				       struct xilinx_dma_tx_descriptor, node);
+		if (chan->has_sg) {
+			list_for_each_entry(segment, &desc->segments, node) {
+				hw = &segment->hw;
+				residue += (hw->control - hw->status) &
+					   XILINX_DMA_MAX_TRANS_LEN;
+			}
+		}
+		spin_unlock_irqrestore(&chan->lock, flags);
+
+		chan->residue = residue;
+		dma_set_residue(txstate, chan->residue);
+	}
+
+	return ret;
 }
 
 /**
- * xilinx_vdma_is_running - Check if VDMA channel is running
- * @chan: Driver specific VDMA channel
+ * xilinx_dma_is_running - Check if DMA channel is running
+ * @chan: Driver specific DMA channel
  *
  * Return: '1' if running, '0' if not.
  */
-static bool xilinx_vdma_is_running(struct xilinx_vdma_chan *chan)
+static bool xilinx_dma_is_running(struct xilinx_dma_chan *chan)
 {
-	return !(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
-		 XILINX_VDMA_DMASR_HALTED) &&
-		(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
-		 XILINX_VDMA_DMACR_RUNSTOP);
+	return !(dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
+		 XILINX_DMA_DMASR_HALTED) &&
+		(dma_ctrl_read(chan, XILINX_DMA_REG_DMACR) &
+		 XILINX_DMA_DMACR_RUNSTOP);
 }
 
 /**
- * xilinx_vdma_is_idle - Check if VDMA channel is idle
- * @chan: Driver specific VDMA channel
+ * xilinx_dma_is_idle - Check if DMA channel is idle
+ * @chan: Driver specific DMA channel
  *
  * Return: '1' if idle, '0' if not.
  */
-static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan *chan)
+static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan)
 {
-	return vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
-		XILINX_VDMA_DMASR_IDLE;
+	return dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
+		XILINX_DMA_DMASR_IDLE;
 }
 
 /**
- * xilinx_vdma_halt - Halt VDMA channel
- * @chan: Driver specific VDMA channel
+ * xilinx_dma_halt - Halt DMA channel
+ * @chan: Driver specific DMA channel
  */
-static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)
+static void xilinx_dma_halt(struct xilinx_dma_chan *chan)
 {
 	int err;
 	u32 val;
 
-	vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
+	dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
 
 	/* Wait for the hardware to halt */
-	err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val,
-				      (val & XILINX_VDMA_DMASR_HALTED), 0,
-				      XILINX_VDMA_LOOP_COUNT);
+	err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
+				      (val & XILINX_DMA_DMASR_HALTED), 0,
+				      XILINX_DMA_LOOP_COUNT);
 
 	if (err) {
 		dev_err(chan->dev, "Cannot stop channel %p: %x\n",
-			chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
+			chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
 		chan->err = true;
 	}
-
-	return;
 }
 
 /**
- * xilinx_vdma_start - Start VDMA channel
- * @chan: Driver specific VDMA channel
+ * xilinx_dma_start - Start DMA channel
+ * @chan: Driver specific DMA channel
  */
-static void xilinx_vdma_start(struct xilinx_vdma_chan *chan)
+static void xilinx_dma_start(struct xilinx_dma_chan *chan)
 {
 	int err;
 	u32 val;
 
-	vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
+	dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
 
 	/* Wait for the hardware to start */
-	err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val,
-				      !(val & XILINX_VDMA_DMASR_HALTED), 0,
-				      XILINX_VDMA_LOOP_COUNT);
+	err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
+				      !(val & XILINX_DMA_DMASR_HALTED), 0,
+				      XILINX_DMA_LOOP_COUNT);
 
 	if (err) {
 		dev_err(chan->dev, "Cannot start channel %p: %x\n",
-			chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
+			chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
 
 		chan->err = true;
 	}
-
-	return;
 }
 
 /**
  * xilinx_vdma_start_transfer - Starts VDMA transfer
  * @chan: Driver specific channel struct pointer
  */
-static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
+static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
 {
 	struct xilinx_vdma_config *config = &chan->config;
-	struct xilinx_vdma_tx_descriptor *desc, *tail_desc;
+	struct xilinx_dma_tx_descriptor *desc, *tail_desc;
 	u32 reg;
 	struct xilinx_vdma_tx_segment *tail_segment;
 
@@ -618,16 +922,16 @@
 		return;
 
 	desc = list_first_entry(&chan->pending_list,
-				struct xilinx_vdma_tx_descriptor, node);
+				struct xilinx_dma_tx_descriptor, node);
 	tail_desc = list_last_entry(&chan->pending_list,
-				    struct xilinx_vdma_tx_descriptor, node);
+				    struct xilinx_dma_tx_descriptor, node);
 
 	tail_segment = list_last_entry(&tail_desc->segments,
 				       struct xilinx_vdma_tx_segment, node);
 
 	/* If it is SG mode and hardware is busy, cannot submit */
-	if (chan->has_sg && xilinx_vdma_is_running(chan) &&
-	    !xilinx_vdma_is_idle(chan)) {
+	if (chan->has_sg && xilinx_dma_is_running(chan) &&
+	    !xilinx_dma_is_idle(chan)) {
 		dev_dbg(chan->dev, "DMA controller still busy\n");
 		return;
 	}
@@ -637,19 +941,19 @@
 	 * done, start new transfers
 	 */
 	if (chan->has_sg)
-		vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC,
+		dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
 				desc->async_tx.phys);
 
 	/* Configure the hardware using info in the config structure */
-	reg = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR);
+	reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
 
 	if (config->frm_cnt_en)
-		reg |= XILINX_VDMA_DMACR_FRAMECNT_EN;
+		reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
 	else
-		reg &= ~XILINX_VDMA_DMACR_FRAMECNT_EN;
+		reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
 
 	/* Configure channel to allow number frame buffers */
-	vdma_ctrl_write(chan, XILINX_VDMA_REG_FRMSTORE,
+	dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE,
 			chan->desc_pendingcount);
 
 	/*
@@ -657,45 +961,53 @@
 	 * In direct register mode, if not parking, enable circular mode
 	 */
 	if (chan->has_sg || !config->park)
-		reg |= XILINX_VDMA_DMACR_CIRC_EN;
+		reg |= XILINX_DMA_DMACR_CIRC_EN;
 
 	if (config->park)
-		reg &= ~XILINX_VDMA_DMACR_CIRC_EN;
+		reg &= ~XILINX_DMA_DMACR_CIRC_EN;
 
-	vdma_ctrl_write(chan, XILINX_VDMA_REG_DMACR, reg);
+	dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
 
 	if (config->park && (config->park_frm >= 0) &&
 			(config->park_frm < chan->num_frms)) {
 		if (chan->direction == DMA_MEM_TO_DEV)
-			vdma_write(chan, XILINX_VDMA_REG_PARK_PTR,
+			dma_write(chan, XILINX_DMA_REG_PARK_PTR,
 				config->park_frm <<
-					XILINX_VDMA_PARK_PTR_RD_REF_SHIFT);
+					XILINX_DMA_PARK_PTR_RD_REF_SHIFT);
 		else
-			vdma_write(chan, XILINX_VDMA_REG_PARK_PTR,
+			dma_write(chan, XILINX_DMA_REG_PARK_PTR,
 				config->park_frm <<
-					XILINX_VDMA_PARK_PTR_WR_REF_SHIFT);
+					XILINX_DMA_PARK_PTR_WR_REF_SHIFT);
 	}
 
 	/* Start the hardware */
-	xilinx_vdma_start(chan);
+	xilinx_dma_start(chan);
 
 	if (chan->err)
 		return;
 
 	/* Start the transfer */
 	if (chan->has_sg) {
-		vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC,
+		dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
 				tail_segment->phys);
 	} else {
 		struct xilinx_vdma_tx_segment *segment, *last = NULL;
 		int i = 0;
 
-		list_for_each_entry(desc, &chan->pending_list, node) {
-			segment = list_first_entry(&desc->segments,
-					   struct xilinx_vdma_tx_segment, node);
-			vdma_desc_write(chan,
+		if (chan->desc_submitcount < chan->num_frms)
+			i = chan->desc_submitcount;
+
+		list_for_each_entry(segment, &desc->segments, node) {
+			if (chan->ext_addr)
+				vdma_desc_write_64(chan,
+					XILINX_VDMA_REG_START_ADDRESS_64(i++),
+					segment->hw.buf_addr,
+					segment->hw.buf_addr_msb);
+			else
+				vdma_desc_write(chan,
 					XILINX_VDMA_REG_START_ADDRESS(i++),
 					segment->hw.buf_addr);
+
 			last = segment;
 		}
 
@@ -703,10 +1015,79 @@
 			return;
 
 		/* HW expects these parameters to be same for one transaction */
-		vdma_desc_write(chan, XILINX_VDMA_REG_HSIZE, last->hw.hsize);
-		vdma_desc_write(chan, XILINX_VDMA_REG_FRMDLY_STRIDE,
+		vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
+		vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
 				last->hw.stride);
-		vdma_desc_write(chan, XILINX_VDMA_REG_VSIZE, last->hw.vsize);
+		vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
+	}
+
+	if (!chan->has_sg) {
+		list_del(&desc->node);
+		list_add_tail(&desc->node, &chan->active_list);
+		chan->desc_submitcount++;
+		chan->desc_pendingcount--;
+		if (chan->desc_submitcount == chan->num_frms)
+			chan->desc_submitcount = 0;
+	} else {
+		list_splice_tail_init(&chan->pending_list, &chan->active_list);
+		chan->desc_pendingcount = 0;
+	}
+}
+
+/**
+ * xilinx_cdma_start_transfer - Starts cdma transfer
+ * @chan: Driver specific channel struct pointer
+ */
+static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
+{
+	struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
+	struct xilinx_cdma_tx_segment *tail_segment;
+	u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
+
+	if (chan->err)
+		return;
+
+	if (list_empty(&chan->pending_list))
+		return;
+
+	head_desc = list_first_entry(&chan->pending_list,
+				     struct xilinx_dma_tx_descriptor, node);
+	tail_desc = list_last_entry(&chan->pending_list,
+				    struct xilinx_dma_tx_descriptor, node);
+	tail_segment = list_last_entry(&tail_desc->segments,
+				       struct xilinx_cdma_tx_segment, node);
+
+	if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
+		ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
+		ctrl_reg |= chan->desc_pendingcount <<
+				XILINX_DMA_CR_COALESCE_SHIFT;
+		dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
+	}
+
+	if (chan->has_sg) {
+		dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
+			   head_desc->async_tx.phys);
+
+		/* Update tail ptr register which will start the transfer */
+		dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
+			       tail_segment->phys);
+	} else {
+		/* In simple mode */
+		struct xilinx_cdma_tx_segment *segment;
+		struct xilinx_cdma_desc_hw *hw;
+
+		segment = list_first_entry(&head_desc->segments,
+					   struct xilinx_cdma_tx_segment,
+					   node);
+
+		hw = &segment->hw;
+
+		dma_ctrl_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr);
+		dma_ctrl_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr);
+
+		/* Start the transfer */
+		dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
+				hw->control & XILINX_DMA_MAX_TRANS_LEN);
 	}
 
 	list_splice_tail_init(&chan->pending_list, &chan->active_list);
@@ -714,28 +1095,113 @@
 }
 
 /**
- * xilinx_vdma_issue_pending - Issue pending transactions
+ * xilinx_dma_start_transfer - Starts DMA transfer
+ * @chan: Driver specific channel struct pointer
+ */
+static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
+{
+	struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
+	struct xilinx_axidma_tx_segment *tail_segment, *old_head, *new_head;
+	u32 reg;
+
+	if (chan->err)
+		return;
+
+	if (list_empty(&chan->pending_list))
+		return;
+
+	/* If it is SG mode and hardware is busy, cannot submit */
+	if (chan->has_sg && xilinx_dma_is_running(chan) &&
+	    !xilinx_dma_is_idle(chan)) {
+		dev_dbg(chan->dev, "DMA controller still busy\n");
+		return;
+	}
+
+	head_desc = list_first_entry(&chan->pending_list,
+				     struct xilinx_dma_tx_descriptor, node);
+	tail_desc = list_last_entry(&chan->pending_list,
+				    struct xilinx_dma_tx_descriptor, node);
+	tail_segment = list_last_entry(&tail_desc->segments,
+				       struct xilinx_axidma_tx_segment, node);
+
+	old_head = list_first_entry(&head_desc->segments,
+				struct xilinx_axidma_tx_segment, node);
+	new_head = chan->seg_v;
+	/* Copy Buffer Descriptor fields. */
+	new_head->hw = old_head->hw;
+
+	/* Swap and save new reserve */
+	list_replace_init(&old_head->node, &new_head->node);
+	chan->seg_v = old_head;
+
+	tail_segment->hw.next_desc = chan->seg_v->phys;
+	head_desc->async_tx.phys = new_head->phys;
+
+	reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
+
+	if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
+		reg &= ~XILINX_DMA_CR_COALESCE_MAX;
+		reg |= chan->desc_pendingcount <<
+				  XILINX_DMA_CR_COALESCE_SHIFT;
+		dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+	}
+
+	if (chan->has_sg)
+		dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
+			       head_desc->async_tx.phys);
+
+	xilinx_dma_start(chan);
+
+	if (chan->err)
+		return;
+
+	/* Start the transfer */
+	if (chan->has_sg) {
+		dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
+			       tail_segment->phys);
+	} else {
+		struct xilinx_axidma_tx_segment *segment;
+		struct xilinx_axidma_desc_hw *hw;
+
+		segment = list_first_entry(&head_desc->segments,
+					   struct xilinx_axidma_tx_segment,
+					   node);
+		hw = &segment->hw;
+
+		dma_ctrl_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
+
+		/* Start the transfer */
+		dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
+			       hw->control & XILINX_DMA_MAX_TRANS_LEN);
+	}
+
+	list_splice_tail_init(&chan->pending_list, &chan->active_list);
+	chan->desc_pendingcount = 0;
+}
+
+/**
+ * xilinx_dma_issue_pending - Issue pending transactions
  * @dchan: DMA channel
  */
-static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
+static void xilinx_dma_issue_pending(struct dma_chan *dchan)
 {
-	struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
 	unsigned long flags;
 
 	spin_lock_irqsave(&chan->lock, flags);
-	xilinx_vdma_start_transfer(chan);
+	chan->start_transfer(chan);
 	spin_unlock_irqrestore(&chan->lock, flags);
 }
 
 /**
- * xilinx_vdma_complete_descriptor - Mark the active descriptor as complete
+ * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
  * @chan : xilinx DMA channel
  *
  * CONTEXT: hardirq
  */
-static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
+static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
 {
-	struct xilinx_vdma_tx_descriptor *desc, *next;
+	struct xilinx_dma_tx_descriptor *desc, *next;
 
 	/* This function was invoked with lock held */
 	if (list_empty(&chan->active_list))
@@ -749,27 +1215,27 @@
 }
 
 /**
- * xilinx_vdma_reset - Reset VDMA channel
- * @chan: Driver specific VDMA channel
+ * xilinx_dma_reset - Reset DMA channel
+ * @chan: Driver specific DMA channel
  *
  * Return: '0' on success and failure value on error
  */
-static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan)
+static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
 {
 	int err;
 	u32 tmp;
 
-	vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RESET);
+	dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
 
 	/* Wait for the hardware to finish reset */
-	err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMACR, tmp,
-				      !(tmp & XILINX_VDMA_DMACR_RESET), 0,
-				      XILINX_VDMA_LOOP_COUNT);
+	err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
+				      !(tmp & XILINX_DMA_DMACR_RESET), 0,
+				      XILINX_DMA_LOOP_COUNT);
 
 	if (err) {
 		dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
-			vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR),
-			vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
+			dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
+			dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
 		return -ETIMEDOUT;
 	}
 
@@ -779,48 +1245,48 @@
 }
 
 /**
- * xilinx_vdma_chan_reset - Reset VDMA channel and enable interrupts
- * @chan: Driver specific VDMA channel
+ * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
+ * @chan: Driver specific DMA channel
  *
  * Return: '0' on success and failure value on error
  */
-static int xilinx_vdma_chan_reset(struct xilinx_vdma_chan *chan)
+static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
 {
 	int err;
 
 	/* Reset VDMA */
-	err = xilinx_vdma_reset(chan);
+	err = xilinx_dma_reset(chan);
 	if (err)
 		return err;
 
 	/* Enable interrupts */
-	vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR,
-		      XILINX_VDMA_DMAXR_ALL_IRQ_MASK);
+	dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+		      XILINX_DMA_DMAXR_ALL_IRQ_MASK);
 
 	return 0;
 }
 
 /**
- * xilinx_vdma_irq_handler - VDMA Interrupt handler
+ * xilinx_dma_irq_handler - DMA Interrupt handler
  * @irq: IRQ number
- * @data: Pointer to the Xilinx VDMA channel structure
+ * @data: Pointer to the Xilinx DMA channel structure
  *
  * Return: IRQ_HANDLED/IRQ_NONE
  */
-static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
+static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
 {
-	struct xilinx_vdma_chan *chan = data;
+	struct xilinx_dma_chan *chan = data;
 	u32 status;
 
 	/* Read the status and ack the interrupts. */
-	status = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR);
-	if (!(status & XILINX_VDMA_DMAXR_ALL_IRQ_MASK))
+	status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
+	if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
 		return IRQ_NONE;
 
-	vdma_ctrl_write(chan, XILINX_VDMA_REG_DMASR,
-			status & XILINX_VDMA_DMAXR_ALL_IRQ_MASK);
+	dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
+			status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
 
-	if (status & XILINX_VDMA_DMASR_ERR_IRQ) {
+	if (status & XILINX_DMA_DMASR_ERR_IRQ) {
 		/*
 		 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
 		 * error is recoverable, ignore it. Otherwise flag the error.
@@ -828,22 +1294,23 @@
 		 * Only recoverable errors can be cleared in the DMASR register,
 		 * make sure not to write to other error bits to 1.
 		 */
-		u32 errors = status & XILINX_VDMA_DMASR_ALL_ERR_MASK;
-		vdma_ctrl_write(chan, XILINX_VDMA_REG_DMASR,
-				errors & XILINX_VDMA_DMASR_ERR_RECOVER_MASK);
+		u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
+
+		dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
+				errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
 
 		if (!chan->flush_on_fsync ||
-		    (errors & ~XILINX_VDMA_DMASR_ERR_RECOVER_MASK)) {
+		    (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
 			dev_err(chan->dev,
 				"Channel %p has errors %x, cdr %x tdr %x\n",
 				chan, errors,
-				vdma_ctrl_read(chan, XILINX_VDMA_REG_CURDESC),
-				vdma_ctrl_read(chan, XILINX_VDMA_REG_TAILDESC));
+				dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
+				dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
 			chan->err = true;
 		}
 	}
 
-	if (status & XILINX_VDMA_DMASR_DLY_CNT_IRQ) {
+	if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
 		/*
 		 * Device takes too long to do the transfer when user requires
 		 * responsiveness.
@@ -851,10 +1318,10 @@
 		dev_dbg(chan->dev, "Inter-packet latency too long\n");
 	}
 
-	if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) {
+	if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
 		spin_lock(&chan->lock);
-		xilinx_vdma_complete_descriptor(chan);
-		xilinx_vdma_start_transfer(chan);
+		xilinx_dma_complete_descriptor(chan);
+		chan->start_transfer(chan);
 		spin_unlock(&chan->lock);
 	}
 
@@ -867,11 +1334,13 @@
  * @chan: Driver specific dma channel
  * @desc: dma transaction descriptor
  */
-static void append_desc_queue(struct xilinx_vdma_chan *chan,
-			      struct xilinx_vdma_tx_descriptor *desc)
+static void append_desc_queue(struct xilinx_dma_chan *chan,
+			      struct xilinx_dma_tx_descriptor *desc)
 {
 	struct xilinx_vdma_tx_segment *tail_segment;
-	struct xilinx_vdma_tx_descriptor *tail_desc;
+	struct xilinx_dma_tx_descriptor *tail_desc;
+	struct xilinx_axidma_tx_segment *axidma_tail_segment;
+	struct xilinx_cdma_tx_segment *cdma_tail_segment;
 
 	if (list_empty(&chan->pending_list))
 		goto append;
@@ -881,10 +1350,23 @@
 	 * that already exists in memory.
 	 */
 	tail_desc = list_last_entry(&chan->pending_list,
-				    struct xilinx_vdma_tx_descriptor, node);
-	tail_segment = list_last_entry(&tail_desc->segments,
-				       struct xilinx_vdma_tx_segment, node);
-	tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+				    struct xilinx_dma_tx_descriptor, node);
+	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+		tail_segment = list_last_entry(&tail_desc->segments,
+					       struct xilinx_vdma_tx_segment,
+					       node);
+		tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+	} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+		cdma_tail_segment = list_last_entry(&tail_desc->segments,
+						struct xilinx_cdma_tx_segment,
+						node);
+		cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+	} else {
+		axidma_tail_segment = list_last_entry(&tail_desc->segments,
+					       struct xilinx_axidma_tx_segment,
+					       node);
+		axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+	}
 
 	/*
 	 * Add the software descriptor and all children to the list
@@ -894,22 +1376,23 @@
 	list_add_tail(&desc->node, &chan->pending_list);
 	chan->desc_pendingcount++;
 
-	if (unlikely(chan->desc_pendingcount > chan->num_frms)) {
+	if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
+	    && unlikely(chan->desc_pendingcount > chan->num_frms)) {
 		dev_dbg(chan->dev, "desc pendingcount is too high\n");
 		chan->desc_pendingcount = chan->num_frms;
 	}
 }
 
 /**
- * xilinx_vdma_tx_submit - Submit DMA transaction
+ * xilinx_dma_tx_submit - Submit DMA transaction
  * @tx: Async transaction descriptor
  *
  * Return: cookie value on success and failure value on error
  */
-static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx)
+static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 {
-	struct xilinx_vdma_tx_descriptor *desc = to_vdma_tx_descriptor(tx);
-	struct xilinx_vdma_chan *chan = to_xilinx_chan(tx->chan);
+	struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
+	struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
 	dma_cookie_t cookie;
 	unsigned long flags;
 	int err;
@@ -919,7 +1402,7 @@
 		 * If reset fails, need to hard reset the system.
 		 * Channel is no longer functional
 		 */
-		err = xilinx_vdma_chan_reset(chan);
+		err = xilinx_dma_chan_reset(chan);
 		if (err < 0)
 			return err;
 	}
@@ -950,8 +1433,8 @@
 				 struct dma_interleaved_template *xt,
 				 unsigned long flags)
 {
-	struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
-	struct xilinx_vdma_tx_descriptor *desc;
+	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+	struct xilinx_dma_tx_descriptor *desc;
 	struct xilinx_vdma_tx_segment *segment, *prev = NULL;
 	struct xilinx_vdma_desc_hw *hw;
 
@@ -965,12 +1448,12 @@
 		return NULL;
 
 	/* Allocate a transaction descriptor. */
-	desc = xilinx_vdma_alloc_tx_descriptor(chan);
+	desc = xilinx_dma_alloc_tx_descriptor(chan);
 	if (!desc)
 		return NULL;
 
 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
-	desc->async_tx.tx_submit = xilinx_vdma_tx_submit;
+	desc->async_tx.tx_submit = xilinx_dma_tx_submit;
 	async_tx_ack(&desc->async_tx);
 
 	/* Allocate the link descriptor from DMA pool */
@@ -983,14 +1466,25 @@
 	hw->vsize = xt->numf;
 	hw->hsize = xt->sgl[0].size;
 	hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
-			XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT;
+			XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
 	hw->stride |= chan->config.frm_dly <<
-			XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
+			XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
 
-	if (xt->dir != DMA_MEM_TO_DEV)
-		hw->buf_addr = xt->dst_start;
-	else
-		hw->buf_addr = xt->src_start;
+	if (xt->dir != DMA_MEM_TO_DEV) {
+		if (chan->ext_addr) {
+			hw->buf_addr = lower_32_bits(xt->dst_start);
+			hw->buf_addr_msb = upper_32_bits(xt->dst_start);
+		} else {
+			hw->buf_addr = xt->dst_start;
+		}
+	} else {
+		if (chan->ext_addr) {
+			hw->buf_addr = lower_32_bits(xt->src_start);
+			hw->buf_addr_msb = upper_32_bits(xt->src_start);
+		} else {
+			hw->buf_addr = xt->src_start;
+		}
+	}
 
 	/* Insert the segment into the descriptor segments list. */
 	list_add_tail(&segment->node, &desc->segments);
@@ -1005,29 +1499,194 @@
 	return &desc->async_tx;
 
 error:
-	xilinx_vdma_free_tx_descriptor(chan, desc);
+	xilinx_dma_free_tx_descriptor(chan, desc);
 	return NULL;
 }
 
 /**
- * xilinx_vdma_terminate_all - Halt the channel and free descriptors
- * @chan: Driver specific VDMA Channel pointer
+ * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
+ * @dchan: DMA channel
+ * @dma_dst: destination address
+ * @dma_src: source address
+ * @len: transfer length
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
  */
-static int xilinx_vdma_terminate_all(struct dma_chan *dchan)
+static struct dma_async_tx_descriptor *
+xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
+			dma_addr_t dma_src, size_t len, unsigned long flags)
 {
-	struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+	struct xilinx_dma_tx_descriptor *desc;
+	struct xilinx_cdma_tx_segment *segment, *prev;
+	struct xilinx_cdma_desc_hw *hw;
+
+	if (!len || len > XILINX_DMA_MAX_TRANS_LEN)
+		return NULL;
+
+	desc = xilinx_dma_alloc_tx_descriptor(chan);
+	if (!desc)
+		return NULL;
+
+	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+	desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+	/* Allocate the link descriptor from DMA pool */
+	segment = xilinx_cdma_alloc_tx_segment(chan);
+	if (!segment)
+		goto error;
+
+	hw = &segment->hw;
+	hw->control = len;
+	hw->src_addr = dma_src;
+	hw->dest_addr = dma_dst;
+
+	/* Fill the previous next descriptor with current */
+	prev = list_last_entry(&desc->segments,
+			       struct xilinx_cdma_tx_segment, node);
+	prev->hw.next_desc = segment->phys;
+
+	/* Insert the segment into the descriptor segments list. */
+	list_add_tail(&segment->node, &desc->segments);
+
+	prev = segment;
+
+	/* Link the last hardware descriptor with the first. */
+	segment = list_first_entry(&desc->segments,
+				struct xilinx_cdma_tx_segment, node);
+	desc->async_tx.phys = segment->phys;
+	prev->hw.next_desc = segment->phys;
+
+	return &desc->async_tx;
+
+error:
+	xilinx_dma_free_tx_descriptor(chan, desc);
+	return NULL;
+}
+
+/**
+ * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
+ * @dchan: DMA channel
+ * @sgl: scatterlist to transfer to/from
+ * @sg_len: number of entries in @scatterlist
+ * @direction: DMA direction
+ * @flags: transfer ack flags
+ * @context: APP words of the descriptor
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
+	struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
+	enum dma_transfer_direction direction, unsigned long flags,
+	void *context)
+{
+	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+	struct xilinx_dma_tx_descriptor *desc;
+	struct xilinx_axidma_tx_segment *segment = NULL, *prev = NULL;
+	u32 *app_w = (u32 *)context;
+	struct scatterlist *sg;
+	size_t copy;
+	size_t sg_used;
+	unsigned int i;
+
+	if (!is_slave_direction(direction))
+		return NULL;
+
+	/* Allocate a transaction descriptor. */
+	desc = xilinx_dma_alloc_tx_descriptor(chan);
+	if (!desc)
+		return NULL;
+
+	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+	desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+	/* Build transactions using information in the scatter gather list */
+	for_each_sg(sgl, sg, sg_len, i) {
+		sg_used = 0;
+
+		/* Loop until the entire scatterlist entry is used */
+		while (sg_used < sg_dma_len(sg)) {
+			struct xilinx_axidma_desc_hw *hw;
+
+			/* Get a free segment */
+			segment = xilinx_axidma_alloc_tx_segment(chan);
+			if (!segment)
+				goto error;
+
+			/*
+			 * Calculate the maximum number of bytes to transfer,
+			 * making sure it is less than the hw limit
+			 */
+			copy = min_t(size_t, sg_dma_len(sg) - sg_used,
+				     XILINX_DMA_MAX_TRANS_LEN);
+			hw = &segment->hw;
+
+			/* Fill in the descriptor */
+			hw->buf_addr = sg_dma_address(sg) + sg_used;
+
+			hw->control = copy;
+
+			if (chan->direction == DMA_MEM_TO_DEV) {
+				if (app_w)
+					memcpy(hw->app, app_w, sizeof(u32) *
+					       XILINX_DMA_NUM_APP_WORDS);
+			}
+
+			if (prev)
+				prev->hw.next_desc = segment->phys;
+
+			prev = segment;
+			sg_used += copy;
+
+			/*
+			 * Insert the segment into the descriptor segments
+			 * list.
+			 */
+			list_add_tail(&segment->node, &desc->segments);
+		}
+	}
+
+	segment = list_first_entry(&desc->segments,
+				   struct xilinx_axidma_tx_segment, node);
+	desc->async_tx.phys = segment->phys;
+	prev->hw.next_desc = segment->phys;
+
+	/* For the last DMA_MEM_TO_DEV transfer, set EOP */
+	if (chan->direction == DMA_MEM_TO_DEV) {
+		segment->hw.control |= XILINX_DMA_BD_SOP;
+		segment = list_last_entry(&desc->segments,
+					  struct xilinx_axidma_tx_segment,
+					  node);
+		segment->hw.control |= XILINX_DMA_BD_EOP;
+	}
+
+	return &desc->async_tx;
+
+error:
+	xilinx_dma_free_tx_descriptor(chan, desc);
+	return NULL;
+}
+
+/**
+ * xilinx_dma_terminate_all - Halt the channel and free descriptors
+ * @chan: Driver specific DMA Channel pointer
+ */
+static int xilinx_dma_terminate_all(struct dma_chan *dchan)
+{
+	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
 
 	/* Halt the DMA engine */
-	xilinx_vdma_halt(chan);
+	xilinx_dma_halt(chan);
 
 	/* Remove and free all of the descriptors in the lists */
-	xilinx_vdma_free_descriptors(chan);
+	xilinx_dma_free_descriptors(chan);
 
 	return 0;
 }
 
 /**
- * xilinx_vdma_channel_set_config - Configure VDMA channel
+ * xilinx_dma_channel_set_config - Configure VDMA channel
  * Run-time configuration for Axi VDMA, supports:
  * . halt the channel
  * . configure interrupt coalescing and inter-packet delay threshold
@@ -1042,13 +1701,13 @@
 int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
 					struct xilinx_vdma_config *cfg)
 {
-	struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
 	u32 dmacr;
 
 	if (cfg->reset)
-		return xilinx_vdma_chan_reset(chan);
+		return xilinx_dma_chan_reset(chan);
 
-	dmacr = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR);
+	dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
 
 	chan->config.frm_dly = cfg->frm_dly;
 	chan->config.park = cfg->park;
@@ -1058,8 +1717,8 @@
 	chan->config.master = cfg->master;
 
 	if (cfg->gen_lock && chan->genlock) {
-		dmacr |= XILINX_VDMA_DMACR_GENLOCK_EN;
-		dmacr |= cfg->master << XILINX_VDMA_DMACR_MASTER_SHIFT;
+		dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
+		dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
 	}
 
 	chan->config.frm_cnt_en = cfg->frm_cnt_en;
@@ -1071,21 +1730,21 @@
 	chan->config.coalesc = cfg->coalesc;
 	chan->config.delay = cfg->delay;
 
-	if (cfg->coalesc <= XILINX_VDMA_DMACR_FRAME_COUNT_MAX) {
-		dmacr |= cfg->coalesc << XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT;
+	if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
+		dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
 		chan->config.coalesc = cfg->coalesc;
 	}
 
-	if (cfg->delay <= XILINX_VDMA_DMACR_DELAY_MAX) {
-		dmacr |= cfg->delay << XILINX_VDMA_DMACR_DELAY_SHIFT;
+	if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
+		dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
 		chan->config.delay = cfg->delay;
 	}
 
 	/* FSync Source selection */
-	dmacr &= ~XILINX_VDMA_DMACR_FSYNCSRC_MASK;
-	dmacr |= cfg->ext_fsync << XILINX_VDMA_DMACR_FSYNCSRC_SHIFT;
+	dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
+	dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
 
-	vdma_ctrl_write(chan, XILINX_VDMA_REG_DMACR, dmacr);
+	dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
 
 	return 0;
 }
@@ -1096,14 +1755,14 @@
  */
 
 /**
- * xilinx_vdma_chan_remove - Per Channel remove function
- * @chan: Driver specific VDMA channel
+ * xilinx_dma_chan_remove - Per Channel remove function
+ * @chan: Driver specific DMA channel
  */
-static void xilinx_vdma_chan_remove(struct xilinx_vdma_chan *chan)
+static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
 {
 	/* Disable all interrupts */
-	vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR,
-		      XILINX_VDMA_DMAXR_ALL_IRQ_MASK);
+	dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
+		      XILINX_DMA_DMAXR_ALL_IRQ_MASK);
 
 	if (chan->irq > 0)
 		free_irq(chan->irq, chan);
@@ -1113,8 +1772,197 @@
 	list_del(&chan->common.device_node);
 }
 
+static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
+			    struct clk **tx_clk, struct clk **rx_clk,
+			    struct clk **sg_clk, struct clk **tmp_clk)
+{
+	int err;
+
+	*tmp_clk = NULL;
+
+	*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
+	if (IS_ERR(*axi_clk)) {
+		err = PTR_ERR(*axi_clk);
+		dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
+		return err;
+	}
+
+	*tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
+	if (IS_ERR(*tx_clk))
+		*tx_clk = NULL;
+
+	*rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
+	if (IS_ERR(*rx_clk))
+		*rx_clk = NULL;
+
+	*sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
+	if (IS_ERR(*sg_clk))
+		*sg_clk = NULL;
+
+	err = clk_prepare_enable(*axi_clk);
+	if (err) {
+		dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
+		return err;
+	}
+
+	err = clk_prepare_enable(*tx_clk);
+	if (err) {
+		dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
+		goto err_disable_axiclk;
+	}
+
+	err = clk_prepare_enable(*rx_clk);
+	if (err) {
+		dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
+		goto err_disable_txclk;
+	}
+
+	err = clk_prepare_enable(*sg_clk);
+	if (err) {
+		dev_err(&pdev->dev, "failed to enable sg_clk (%u)\n", err);
+		goto err_disable_rxclk;
+	}
+
+	return 0;
+
+err_disable_rxclk:
+	clk_disable_unprepare(*rx_clk);
+err_disable_txclk:
+	clk_disable_unprepare(*tx_clk);
+err_disable_axiclk:
+	clk_disable_unprepare(*axi_clk);
+
+	return err;
+}
+
+static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
+			    struct clk **dev_clk, struct clk **tmp_clk,
+			    struct clk **tmp1_clk, struct clk **tmp2_clk)
+{
+	int err;
+
+	*tmp_clk = NULL;
+	*tmp1_clk = NULL;
+	*tmp2_clk = NULL;
+
+	*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
+	if (IS_ERR(*axi_clk)) {
+		err = PTR_ERR(*axi_clk);
+		dev_err(&pdev->dev, "failed to get axi_clk (%u)\n", err);
+		return err;
+	}
+
+	*dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
+	if (IS_ERR(*dev_clk)) {
+		err = PTR_ERR(*dev_clk);
+		dev_err(&pdev->dev, "failed to get dev_clk (%u)\n", err);
+		return err;
+	}
+
+	err = clk_prepare_enable(*axi_clk);
+	if (err) {
+		dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
+		return err;
+	}
+
+	err = clk_prepare_enable(*dev_clk);
+	if (err) {
+		dev_err(&pdev->dev, "failed to enable dev_clk (%u)\n", err);
+		goto err_disable_axiclk;
+	}
+
+	return 0;
+
+err_disable_axiclk:
+	clk_disable_unprepare(*axi_clk);
+
+	return err;
+}
+
+static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
+			    struct clk **tx_clk, struct clk **txs_clk,
+			    struct clk **rx_clk, struct clk **rxs_clk)
+{
+	int err;
+
+	*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
+	if (IS_ERR(*axi_clk)) {
+		err = PTR_ERR(*axi_clk);
+		dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
+		return err;
+	}
+
+	*tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
+	if (IS_ERR(*tx_clk))
+		*tx_clk = NULL;
+
+	*txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
+	if (IS_ERR(*txs_clk))
+		*txs_clk = NULL;
+
+	*rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
+	if (IS_ERR(*rx_clk))
+		*rx_clk = NULL;
+
+	*rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
+	if (IS_ERR(*rxs_clk))
+		*rxs_clk = NULL;
+
+	err = clk_prepare_enable(*axi_clk);
+	if (err) {
+		dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
+		return err;
+	}
+
+	err = clk_prepare_enable(*tx_clk);
+	if (err) {
+		dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
+		goto err_disable_axiclk;
+	}
+
+	err = clk_prepare_enable(*txs_clk);
+	if (err) {
+		dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err);
+		goto err_disable_txclk;
+	}
+
+	err = clk_prepare_enable(*rx_clk);
+	if (err) {
+		dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
+		goto err_disable_txsclk;
+	}
+
+	err = clk_prepare_enable(*rxs_clk);
+	if (err) {
+		dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err);
+		goto err_disable_rxclk;
+	}
+
+	return 0;
+
+err_disable_rxclk:
+	clk_disable_unprepare(*rx_clk);
+err_disable_txsclk:
+	clk_disable_unprepare(*txs_clk);
+err_disable_txclk:
+	clk_disable_unprepare(*tx_clk);
+err_disable_axiclk:
+	clk_disable_unprepare(*axi_clk);
+
+	return err;
+}
+
+static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
+{
+	clk_disable_unprepare(xdev->rxs_clk);
+	clk_disable_unprepare(xdev->rx_clk);
+	clk_disable_unprepare(xdev->txs_clk);
+	clk_disable_unprepare(xdev->tx_clk);
+	clk_disable_unprepare(xdev->axi_clk);
+}
+
 /**
- * xilinx_vdma_chan_probe - Per Channel Probing
+ * xilinx_dma_chan_probe - Per Channel Probing
  * It get channel features from the device tree entry and
  * initialize special channel handling routines
  *
@@ -1123,10 +1971,10 @@
  *
  * Return: '0' on success and failure value on error
  */
-static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
+static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
 				  struct device_node *node)
 {
-	struct xilinx_vdma_chan *chan;
+	struct xilinx_dma_chan *chan;
 	bool has_dre = false;
 	u32 value, width;
 	int err;
@@ -1140,6 +1988,7 @@
 	chan->xdev = xdev;
 	chan->has_sg = xdev->has_sg;
 	chan->desc_pendingcount = 0x0;
+	chan->ext_addr = xdev->ext_addr;
 
 	spin_lock_init(&chan->lock);
 	INIT_LIST_HEAD(&chan->pending_list);
@@ -1169,23 +2018,27 @@
 		chan->direction = DMA_MEM_TO_DEV;
 		chan->id = 0;
 
-		chan->ctrl_offset = XILINX_VDMA_MM2S_CTRL_OFFSET;
-		chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
+		chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
+		if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+			chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
 
-		if (xdev->flush_on_fsync == XILINX_VDMA_FLUSH_BOTH ||
-		    xdev->flush_on_fsync == XILINX_VDMA_FLUSH_MM2S)
-			chan->flush_on_fsync = true;
+			if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
+			    xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
+				chan->flush_on_fsync = true;
+		}
 	} else if (of_device_is_compatible(node,
 					    "xlnx,axi-vdma-s2mm-channel")) {
 		chan->direction = DMA_DEV_TO_MEM;
 		chan->id = 1;
 
-		chan->ctrl_offset = XILINX_VDMA_S2MM_CTRL_OFFSET;
-		chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
+		chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
+		if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+			chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
 
-		if (xdev->flush_on_fsync == XILINX_VDMA_FLUSH_BOTH ||
-		    xdev->flush_on_fsync == XILINX_VDMA_FLUSH_S2MM)
-			chan->flush_on_fsync = true;
+			if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
+			    xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
+				chan->flush_on_fsync = true;
+		}
 	} else {
 		dev_err(xdev->dev, "Invalid channel compatible node\n");
 		return -EINVAL;
@@ -1193,15 +2046,22 @@
 
 	/* Request the interrupt */
 	chan->irq = irq_of_parse_and_map(node, 0);
-	err = request_irq(chan->irq, xilinx_vdma_irq_handler, IRQF_SHARED,
-			  "xilinx-vdma-controller", chan);
+	err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
+			  "xilinx-dma-controller", chan);
 	if (err) {
 		dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
 		return err;
 	}
 
+	if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+		chan->start_transfer = xilinx_dma_start_transfer;
+	else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
+		chan->start_transfer = xilinx_cdma_start_transfer;
+	else
+		chan->start_transfer = xilinx_vdma_start_transfer;
+
 	/* Initialize the tasklet */
-	tasklet_init(&chan->tasklet, xilinx_vdma_do_tasklet,
+	tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
 			(unsigned long)chan);
 
 	/*
@@ -1214,7 +2074,7 @@
 	xdev->chan[chan->id] = chan;
 
 	/* Reset the channel */
-	err = xilinx_vdma_chan_reset(chan);
+	err = xilinx_dma_chan_reset(chan);
 	if (err < 0) {
 		dev_err(xdev->dev, "Reset channel failed\n");
 		return err;
@@ -1233,28 +2093,54 @@
 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
 						struct of_dma *ofdma)
 {
-	struct xilinx_vdma_device *xdev = ofdma->of_dma_data;
+	struct xilinx_dma_device *xdev = ofdma->of_dma_data;
 	int chan_id = dma_spec->args[0];
 
-	if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE)
+	if (chan_id >= XILINX_DMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id])
 		return NULL;
 
 	return dma_get_slave_channel(&xdev->chan[chan_id]->common);
 }
 
+static const struct xilinx_dma_config axidma_config = {
+	.dmatype = XDMA_TYPE_AXIDMA,
+	.clk_init = axidma_clk_init,
+};
+
+static const struct xilinx_dma_config axicdma_config = {
+	.dmatype = XDMA_TYPE_CDMA,
+	.clk_init = axicdma_clk_init,
+};
+
+static const struct xilinx_dma_config axivdma_config = {
+	.dmatype = XDMA_TYPE_VDMA,
+	.clk_init = axivdma_clk_init,
+};
+
+static const struct of_device_id xilinx_dma_of_ids[] = {
+	{ .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
+	{ .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
+	{ .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
+	{}
+};
+MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
+
 /**
- * xilinx_vdma_probe - Driver probe function
+ * xilinx_dma_probe - Driver probe function
  * @pdev: Pointer to the platform_device structure
  *
  * Return: '0' on success and failure value on error
  */
-static int xilinx_vdma_probe(struct platform_device *pdev)
+static int xilinx_dma_probe(struct platform_device *pdev)
 {
+	int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
+			struct clk **, struct clk **, struct clk **)
+					= axivdma_clk_init;
 	struct device_node *node = pdev->dev.of_node;
-	struct xilinx_vdma_device *xdev;
-	struct device_node *child;
+	struct xilinx_dma_device *xdev;
+	struct device_node *child, *np = pdev->dev.of_node;
 	struct resource *io;
-	u32 num_frames;
+	u32 num_frames, addr_width;
 	int i, err;
 
 	/* Allocate and initialize the DMA engine structure */
@@ -1263,6 +2149,20 @@
 		return -ENOMEM;
 
 	xdev->dev = &pdev->dev;
+	if (np) {
+		const struct of_device_id *match;
+
+		match = of_match_node(xilinx_dma_of_ids, np);
+		if (match && match->data) {
+			xdev->dma_config = match->data;
+			clk_init = xdev->dma_config->clk_init;
+		}
+	}
+
+	err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
+		       &xdev->rx_clk, &xdev->rxs_clk);
+	if (err)
+		return err;
 
 	/* Request and map I/O memory */
 	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1273,46 +2173,77 @@
 	/* Retrieve the DMA engine properties from the device tree */
 	xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
 
-	err = of_property_read_u32(node, "xlnx,num-fstores", &num_frames);
-	if (err < 0) {
-		dev_err(xdev->dev, "missing xlnx,num-fstores property\n");
-		return err;
+	if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+		err = of_property_read_u32(node, "xlnx,num-fstores",
+					   &num_frames);
+		if (err < 0) {
+			dev_err(xdev->dev,
+				"missing xlnx,num-fstores property\n");
+			return err;
+		}
+
+		err = of_property_read_u32(node, "xlnx,flush-fsync",
+					   &xdev->flush_on_fsync);
+		if (err < 0)
+			dev_warn(xdev->dev,
+				 "missing xlnx,flush-fsync property\n");
 	}
 
-	err = of_property_read_u32(node, "xlnx,flush-fsync",
-					&xdev->flush_on_fsync);
+	err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
 	if (err < 0)
-		dev_warn(xdev->dev, "missing xlnx,flush-fsync property\n");
+		dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
+
+	if (addr_width > 32)
+		xdev->ext_addr = true;
+	else
+		xdev->ext_addr = false;
+
+	/* Set the dma mask bits */
+	dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
 
 	/* Initialize the DMA engine */
 	xdev->common.dev = &pdev->dev;
 
 	INIT_LIST_HEAD(&xdev->common.channels);
-	dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
-	dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
+	if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
+		dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
+		dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
+	}
 
 	xdev->common.device_alloc_chan_resources =
-				xilinx_vdma_alloc_chan_resources;
+				xilinx_dma_alloc_chan_resources;
 	xdev->common.device_free_chan_resources =
-				xilinx_vdma_free_chan_resources;
-	xdev->common.device_prep_interleaved_dma =
+				xilinx_dma_free_chan_resources;
+	xdev->common.device_terminate_all = xilinx_dma_terminate_all;
+	xdev->common.device_tx_status = xilinx_dma_tx_status;
+	xdev->common.device_issue_pending = xilinx_dma_issue_pending;
+	if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+		xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
+		/* Residue calculation is supported by only AXI DMA */
+		xdev->common.residue_granularity =
+					  DMA_RESIDUE_GRANULARITY_SEGMENT;
+	} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+		dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
+		xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
+	} else {
+		xdev->common.device_prep_interleaved_dma =
 				xilinx_vdma_dma_prep_interleaved;
-	xdev->common.device_terminate_all = xilinx_vdma_terminate_all;
-	xdev->common.device_tx_status = xilinx_vdma_tx_status;
-	xdev->common.device_issue_pending = xilinx_vdma_issue_pending;
+	}
 
 	platform_set_drvdata(pdev, xdev);
 
 	/* Initialize the channels */
 	for_each_child_of_node(node, child) {
-		err = xilinx_vdma_chan_probe(xdev, child);
+		err = xilinx_dma_chan_probe(xdev, child);
 		if (err < 0)
-			goto error;
+			goto disable_clks;
 	}
 
-	for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++)
-		if (xdev->chan[i])
-			xdev->chan[i]->num_frms = num_frames;
+	if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+		for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++)
+			if (xdev->chan[i])
+				xdev->chan[i]->num_frms = num_frames;
+	}
 
 	/* Register the DMA engine with the core */
 	dma_async_device_register(&xdev->common);
@@ -1329,49 +2260,47 @@
 
 	return 0;
 
+disable_clks:
+	xdma_disable_allclks(xdev);
 error:
-	for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++)
+	for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++)
 		if (xdev->chan[i])
-			xilinx_vdma_chan_remove(xdev->chan[i]);
+			xilinx_dma_chan_remove(xdev->chan[i]);
 
 	return err;
 }
 
 /**
- * xilinx_vdma_remove - Driver remove function
+ * xilinx_dma_remove - Driver remove function
  * @pdev: Pointer to the platform_device structure
  *
  * Return: Always '0'
  */
-static int xilinx_vdma_remove(struct platform_device *pdev)
+static int xilinx_dma_remove(struct platform_device *pdev)
 {
-	struct xilinx_vdma_device *xdev = platform_get_drvdata(pdev);
+	struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
 	int i;
 
 	of_dma_controller_free(pdev->dev.of_node);
 
 	dma_async_device_unregister(&xdev->common);
 
-	for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++)
+	for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++)
 		if (xdev->chan[i])
-			xilinx_vdma_chan_remove(xdev->chan[i]);
+			xilinx_dma_chan_remove(xdev->chan[i]);
+
+	xdma_disable_allclks(xdev);
 
 	return 0;
 }
 
-static const struct of_device_id xilinx_vdma_of_ids[] = {
-	{ .compatible = "xlnx,axi-vdma-1.00.a",},
-	{}
-};
-MODULE_DEVICE_TABLE(of, xilinx_vdma_of_ids);
-
 static struct platform_driver xilinx_vdma_driver = {
 	.driver = {
 		.name = "xilinx-vdma",
-		.of_match_table = xilinx_vdma_of_ids,
+		.of_match_table = xilinx_dma_of_ids,
 	},
-	.probe = xilinx_vdma_probe,
-	.remove = xilinx_vdma_remove,
+	.probe = xilinx_dma_probe,
+	.remove = xilinx_dma_remove,
 };
 
 module_platform_driver(xilinx_vdma_driver);
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 37755e6..6ca7474 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -378,12 +378,11 @@
 
 config EDAC_ALTERA_L2C
 	bool "Altera L2 Cache ECC"
-	depends on EDAC_ALTERA=y
-	select CACHE_L2X0
+	depends on EDAC_ALTERA=y && CACHE_L2X0
 	help
 	  Support for error detection and correction on the
 	  Altera L2 cache Memory for Altera SoCs. This option
-	  requires L2 cache so it will force that selection.
+	  requires L2 cache.
 
 config EDAC_ALTERA_OCRAM
 	bool "Altera On-Chip RAM ECC"
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 63e4209..5b4d223 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -24,6 +24,7 @@
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
@@ -78,27 +79,6 @@
 	.ue_set_mask        = A10_DIAGINT_TDERRA_MASK,
 };
 
-/************************** EDAC Device Defines **************************/
-
-/* OCRAM ECC Management Group Defines */
-#define ALTR_MAN_GRP_OCRAM_ECC_OFFSET   0x04
-#define ALTR_OCR_ECC_EN                 BIT(0)
-#define ALTR_OCR_ECC_INJS               BIT(1)
-#define ALTR_OCR_ECC_INJD               BIT(2)
-#define ALTR_OCR_ECC_SERR               BIT(3)
-#define ALTR_OCR_ECC_DERR               BIT(4)
-
-/* L2 ECC Management Group Defines */
-#define ALTR_MAN_GRP_L2_ECC_OFFSET      0x00
-#define ALTR_L2_ECC_EN                  BIT(0)
-#define ALTR_L2_ECC_INJS                BIT(1)
-#define ALTR_L2_ECC_INJD                BIT(2)
-
-#define ALTR_UE_TRIGGER_CHAR            'U'   /* Trigger for UE */
-#define ALTR_TRIGGER_READ_WRD_CNT       32    /* Line size x 4 */
-#define ALTR_TRIG_OCRAM_BYTE_SIZE       128   /* Line size x 4 */
-#define ALTR_TRIG_L2C_BYTE_SIZE         4096  /* Full Page */
-
 /*********************** EDAC Memory Controller Functions ****************/
 
 /* The SDRAM controller uses the EDAC Memory Controller framework.       */
@@ -252,8 +232,8 @@
 }
 
 static const struct of_device_id altr_sdram_ctrl_of_match[] = {
-	{ .compatible = "altr,sdram-edac", .data = (void *)&c5_data},
-	{ .compatible = "altr,sdram-edac-a10", .data = (void *)&a10_data},
+	{ .compatible = "altr,sdram-edac", .data = &c5_data},
+	{ .compatible = "altr,sdram-edac-a10", .data = &a10_data},
 	{},
 };
 MODULE_DEVICE_TABLE(of, altr_sdram_ctrl_of_match);
@@ -570,28 +550,8 @@
 
 const struct edac_device_prv_data ocramecc_data;
 const struct edac_device_prv_data l2ecc_data;
-
-struct edac_device_prv_data {
-	int (*setup)(struct platform_device *pdev, void __iomem *base);
-	int ce_clear_mask;
-	int ue_clear_mask;
-	char dbgfs_name[20];
-	void * (*alloc_mem)(size_t size, void **other);
-	void (*free_mem)(void *p, size_t size, void *other);
-	int ecc_enable_mask;
-	int ce_set_mask;
-	int ue_set_mask;
-	int trig_alloc_sz;
-};
-
-struct altr_edac_device_dev {
-	void __iomem *base;
-	int sb_irq;
-	int db_irq;
-	const struct edac_device_prv_data *data;
-	struct dentry *debugfs_dir;
-	char *edac_dev_name;
-};
+const struct edac_device_prv_data a10_ocramecc_data;
+const struct edac_device_prv_data a10_l2ecc_data;
 
 static irqreturn_t altr_edac_device_handler(int irq, void *dev_id)
 {
@@ -665,8 +625,9 @@
 		if (ACCESS_ONCE(ptemp[i]))
 			result = -1;
 		/* Toggle Error bit (it is latched), leave ECC enabled */
-		writel(error_mask, drvdata->base);
-		writel(priv->ecc_enable_mask, drvdata->base);
+		writel(error_mask, (drvdata->base + priv->set_err_ofst));
+		writel(priv->ecc_enable_mask, (drvdata->base +
+					       priv->set_err_ofst));
 		ptemp[i] = i;
 	}
 	/* Ensure it has been written out */
@@ -694,6 +655,16 @@
 	.llseek = generic_file_llseek,
 };
 
+static ssize_t altr_edac_a10_device_trig(struct file *file,
+					 const char __user *user_buf,
+					 size_t count, loff_t *ppos);
+
+static const struct file_operations altr_edac_a10_device_inject_fops = {
+	.open = simple_open,
+	.write = altr_edac_a10_device_trig,
+	.llseek = generic_file_llseek,
+};
+
 static void altr_create_edacdev_dbgfs(struct edac_device_ctl_info *edac_dci,
 				      const struct edac_device_prv_data *priv)
 {
@@ -708,17 +679,18 @@
 
 	if (!edac_debugfs_create_file(priv->dbgfs_name, S_IWUSR,
 				      drvdata->debugfs_dir, edac_dci,
-				      &altr_edac_device_inject_fops))
+				      priv->inject_fops))
 		debugfs_remove_recursive(drvdata->debugfs_dir);
 }
 
 static const struct of_device_id altr_edac_device_of_match[] = {
 #ifdef CONFIG_EDAC_ALTERA_L2C
-	{ .compatible = "altr,socfpga-l2-ecc", .data = (void *)&l2ecc_data },
+	{ .compatible = "altr,socfpga-l2-ecc", .data = &l2ecc_data },
+	{ .compatible = "altr,socfpga-a10-l2-ecc", .data = &a10_l2ecc_data },
 #endif
 #ifdef CONFIG_EDAC_ALTERA_OCRAM
-	{ .compatible = "altr,socfpga-ocram-ecc",
-	  .data = (void *)&ocramecc_data },
+	{ .compatible = "altr,socfpga-ocram-ecc", .data = &ocramecc_data },
+	{ .compatible = "altr,socfpga-a10-ocram-ecc", .data = &a10_ocramecc_data },
 #endif
 	{},
 };
@@ -789,7 +761,7 @@
 
 	/* Check specific dependencies for the module */
 	if (drvdata->data->setup) {
-		res = drvdata->data->setup(pdev, drvdata->base);
+		res = drvdata->data->setup(drvdata);
 		if (res)
 			goto fail1;
 	}
@@ -856,6 +828,25 @@
 /*********************** OCRAM EDAC Device Functions *********************/
 
 #ifdef CONFIG_EDAC_ALTERA_OCRAM
+/*
+ *  Test for memory's ECC dependencies upon entry because platform specific
+ *  startup should have initialized the memory and enabled the ECC.
+ *  Can't turn on ECC here because accessing un-initialized memory will
+ *  cause CE/UE errors possibly causing an ABORT.
+ */
+static int altr_check_ecc_deps(struct altr_edac_device_dev *device)
+{
+	void __iomem  *base = device->base;
+	const struct edac_device_prv_data *prv = device->data;
+
+	if (readl(base + prv->ecc_en_ofst) & prv->ecc_enable_mask)
+		return 0;
+
+	edac_printk(KERN_ERR, EDAC_DEVICE,
+		    "%s: No ECC present or ECC disabled.\n",
+		    device->edac_dev_name);
+	return -ENODEV;
+}
 
 static void *ocram_alloc_mem(size_t size, void **other)
 {
@@ -891,36 +882,53 @@
 	gen_pool_free((struct gen_pool *)other, (u32)p, size);
 }
 
-/*
- * altr_ocram_check_deps()
- *	Test for OCRAM cache ECC dependencies upon entry because
- *	platform specific startup should have initialized the
- *	On-Chip RAM memory and enabled the ECC.
- *	Can't turn on ECC here because accessing un-initialized
- *	memory will cause CE/UE errors possibly causing an ABORT.
- */
-static int altr_ocram_check_deps(struct platform_device *pdev,
-				 void __iomem *base)
+static irqreturn_t altr_edac_a10_ecc_irq(struct altr_edac_device_dev *dci,
+					 bool sberr)
 {
-	if (readl(base) & ALTR_OCR_ECC_EN)
-		return 0;
+	void __iomem  *base = dci->base;
 
-	edac_printk(KERN_ERR, EDAC_DEVICE,
-		    "OCRAM: No ECC present or ECC disabled.\n");
-	return -ENODEV;
+	if (sberr) {
+		writel(ALTR_A10_ECC_SERRPENA,
+		       base + ALTR_A10_ECC_INTSTAT_OFST);
+		edac_device_handle_ce(dci->edac_dev, 0, 0, dci->edac_dev_name);
+	} else {
+		writel(ALTR_A10_ECC_DERRPENA,
+		       base + ALTR_A10_ECC_INTSTAT_OFST);
+		edac_device_handle_ue(dci->edac_dev, 0, 0, dci->edac_dev_name);
+		panic("\nEDAC:ECC_DEVICE[Uncorrectable errors]\n");
+	}
+	return IRQ_HANDLED;
 }
 
 const struct edac_device_prv_data ocramecc_data = {
-	.setup = altr_ocram_check_deps,
+	.setup = altr_check_ecc_deps,
 	.ce_clear_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_SERR),
 	.ue_clear_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_DERR),
 	.dbgfs_name = "altr_ocram_trigger",
 	.alloc_mem = ocram_alloc_mem,
 	.free_mem = ocram_free_mem,
 	.ecc_enable_mask = ALTR_OCR_ECC_EN,
+	.ecc_en_ofst = ALTR_OCR_ECC_REG_OFFSET,
 	.ce_set_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_INJS),
 	.ue_set_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_INJD),
+	.set_err_ofst = ALTR_OCR_ECC_REG_OFFSET,
 	.trig_alloc_sz = ALTR_TRIG_OCRAM_BYTE_SIZE,
+	.inject_fops = &altr_edac_device_inject_fops,
+};
+
+const struct edac_device_prv_data a10_ocramecc_data = {
+	.setup = altr_check_ecc_deps,
+	.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
+	.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
+	.irq_status_mask = A10_SYSMGR_ECC_INTSTAT_OCRAM,
+	.dbgfs_name = "altr_ocram_trigger",
+	.ecc_enable_mask = ALTR_A10_OCRAM_ECC_EN_CTL,
+	.ecc_en_ofst = ALTR_A10_ECC_CTRL_OFST,
+	.ce_set_mask = ALTR_A10_ECC_TSERRA,
+	.ue_set_mask = ALTR_A10_ECC_TDERRA,
+	.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
+	.ecc_irq_handler = altr_edac_a10_ecc_irq,
+	.inject_fops = &altr_edac_a10_device_inject_fops,
 };
 
 #endif	/* CONFIG_EDAC_ALTERA_OCRAM */
@@ -966,10 +974,13 @@
  *	Bail if ECC is not enabled.
  *	Note that L2 Cache Enable is forced at build time.
  */
-static int altr_l2_check_deps(struct platform_device *pdev,
-			      void __iomem *base)
+static int altr_l2_check_deps(struct altr_edac_device_dev *device)
 {
-	if (readl(base) & ALTR_L2_ECC_EN)
+	void __iomem *base = device->base;
+	const struct edac_device_prv_data *prv = device->data;
+
+	if ((readl(base) & prv->ecc_enable_mask) ==
+	     prv->ecc_enable_mask)
 		return 0;
 
 	edac_printk(KERN_ERR, EDAC_DEVICE,
@@ -977,6 +988,24 @@
 	return -ENODEV;
 }
 
+static irqreturn_t altr_edac_a10_l2_irq(struct altr_edac_device_dev *dci,
+					bool sberr)
+{
+	if (sberr) {
+		regmap_write(dci->edac->ecc_mgr_map,
+			     A10_SYSGMR_MPU_CLEAR_L2_ECC_OFST,
+			     A10_SYSGMR_MPU_CLEAR_L2_ECC_SB);
+		edac_device_handle_ce(dci->edac_dev, 0, 0, dci->edac_dev_name);
+	} else {
+		regmap_write(dci->edac->ecc_mgr_map,
+			     A10_SYSGMR_MPU_CLEAR_L2_ECC_OFST,
+			     A10_SYSGMR_MPU_CLEAR_L2_ECC_MB);
+		edac_device_handle_ue(dci->edac_dev, 0, 0, dci->edac_dev_name);
+		panic("\nEDAC:ECC_DEVICE[Uncorrectable errors]\n");
+	}
+	return IRQ_HANDLED;
+}
+
 const struct edac_device_prv_data l2ecc_data = {
 	.setup = altr_l2_check_deps,
 	.ce_clear_mask = 0,
@@ -987,11 +1016,252 @@
 	.ecc_enable_mask = ALTR_L2_ECC_EN,
 	.ce_set_mask = (ALTR_L2_ECC_EN | ALTR_L2_ECC_INJS),
 	.ue_set_mask = (ALTR_L2_ECC_EN | ALTR_L2_ECC_INJD),
+	.set_err_ofst = ALTR_L2_ECC_REG_OFFSET,
 	.trig_alloc_sz = ALTR_TRIG_L2C_BYTE_SIZE,
+	.inject_fops = &altr_edac_device_inject_fops,
+};
+
+const struct edac_device_prv_data a10_l2ecc_data = {
+	.setup = altr_l2_check_deps,
+	.ce_clear_mask = ALTR_A10_L2_ECC_SERR_CLR,
+	.ue_clear_mask = ALTR_A10_L2_ECC_MERR_CLR,
+	.irq_status_mask = A10_SYSMGR_ECC_INTSTAT_L2,
+	.dbgfs_name = "altr_l2_trigger",
+	.alloc_mem = l2_alloc_mem,
+	.free_mem = l2_free_mem,
+	.ecc_enable_mask = ALTR_A10_L2_ECC_EN_CTL,
+	.ce_set_mask = ALTR_A10_L2_ECC_CE_INJ_MASK,
+	.ue_set_mask = ALTR_A10_L2_ECC_UE_INJ_MASK,
+	.set_err_ofst = ALTR_A10_L2_ECC_INJ_OFST,
+	.ecc_irq_handler = altr_edac_a10_l2_irq,
+	.trig_alloc_sz = ALTR_TRIG_L2C_BYTE_SIZE,
+	.inject_fops = &altr_edac_device_inject_fops,
 };
 
 #endif	/* CONFIG_EDAC_ALTERA_L2C */
 
+/********************* Arria10 EDAC Device Functions *************************/
+
+/*
+ * The Arria10 EDAC Device Functions differ from the Cyclone5/Arria5
+ * because 2 IRQs are shared among the all ECC peripherals. The ECC
+ * manager manages the IRQs and the children.
+ * Based on xgene_edac.c peripheral code.
+ */
+
+static ssize_t altr_edac_a10_device_trig(struct file *file,
+					 const char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct edac_device_ctl_info *edac_dci = file->private_data;
+	struct altr_edac_device_dev *drvdata = edac_dci->pvt_info;
+	const struct edac_device_prv_data *priv = drvdata->data;
+	void __iomem *set_addr = (drvdata->base + priv->set_err_ofst);
+	unsigned long flags;
+	u8 trig_type;
+
+	if (!user_buf || get_user(trig_type, user_buf))
+		return -EFAULT;
+
+	local_irq_save(flags);
+	if (trig_type == ALTR_UE_TRIGGER_CHAR)
+		writel(priv->ue_set_mask, set_addr);
+	else
+		writel(priv->ce_set_mask, set_addr);
+	/* Ensure the interrupt test bits are set */
+	wmb();
+	local_irq_restore(flags);
+
+	return count;
+}
+
+static irqreturn_t altr_edac_a10_irq_handler(int irq, void *dev_id)
+{
+	irqreturn_t rc = IRQ_NONE;
+	struct altr_arria10_edac *edac = dev_id;
+	struct altr_edac_device_dev *dci;
+	int irq_status;
+	bool sberr = (irq == edac->sb_irq) ? 1 : 0;
+	int sm_offset = sberr ? A10_SYSMGR_ECC_INTSTAT_SERR_OFST :
+				A10_SYSMGR_ECC_INTSTAT_DERR_OFST;
+
+	regmap_read(edac->ecc_mgr_map, sm_offset, &irq_status);
+
+	if ((irq != edac->sb_irq) && (irq != edac->db_irq)) {
+		WARN_ON(1);
+	} else {
+		list_for_each_entry(dci, &edac->a10_ecc_devices, next) {
+			if (irq_status & dci->data->irq_status_mask)
+				rc = dci->data->ecc_irq_handler(dci, sberr);
+		}
+	}
+
+	return rc;
+}
+
+static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
+				    struct device_node *np)
+{
+	struct edac_device_ctl_info *dci;
+	struct altr_edac_device_dev *altdev;
+	char *ecc_name = (char *)np->name;
+	struct resource res;
+	int edac_idx;
+	int rc = 0;
+	const struct edac_device_prv_data *prv;
+	/* Get matching node and check for valid result */
+	const struct of_device_id *pdev_id =
+		of_match_node(altr_edac_device_of_match, np);
+	if (IS_ERR_OR_NULL(pdev_id))
+		return -ENODEV;
+
+	/* Get driver specific data for this EDAC device */
+	prv = pdev_id->data;
+	if (IS_ERR_OR_NULL(prv))
+		return -ENODEV;
+
+	if (!devres_open_group(edac->dev, altr_edac_a10_device_add, GFP_KERNEL))
+		return -ENOMEM;
+
+	rc = of_address_to_resource(np, 0, &res);
+	if (rc < 0) {
+		edac_printk(KERN_ERR, EDAC_DEVICE,
+			    "%s: no resource address\n", ecc_name);
+		goto err_release_group;
+	}
+
+	edac_idx = edac_device_alloc_index();
+	dci = edac_device_alloc_ctl_info(sizeof(*altdev), ecc_name,
+					 1, ecc_name, 1, 0, NULL, 0,
+					 edac_idx);
+
+	if (!dci) {
+		edac_printk(KERN_ERR, EDAC_DEVICE,
+			    "%s: Unable to allocate EDAC device\n", ecc_name);
+		rc = -ENOMEM;
+		goto err_release_group;
+	}
+
+	altdev = dci->pvt_info;
+	dci->dev = edac->dev;
+	altdev->edac_dev_name = ecc_name;
+	altdev->edac_idx = edac_idx;
+	altdev->edac = edac;
+	altdev->edac_dev = dci;
+	altdev->data = prv;
+	altdev->ddev = *edac->dev;
+	dci->dev = &altdev->ddev;
+	dci->ctl_name = "Altera ECC Manager";
+	dci->mod_name = ecc_name;
+	dci->dev_name = ecc_name;
+
+	altdev->base = devm_ioremap_resource(edac->dev, &res);
+	if (IS_ERR(altdev->base)) {
+		rc = PTR_ERR(altdev->base);
+		goto err_release_group1;
+	}
+
+	/* Check specific dependencies for the module */
+	if (altdev->data->setup) {
+		rc = altdev->data->setup(altdev);
+		if (rc)
+			goto err_release_group1;
+	}
+
+	rc = edac_device_add_device(dci);
+	if (rc) {
+		dev_err(edac->dev, "edac_device_add_device failed\n");
+		rc = -ENOMEM;
+		goto err_release_group1;
+	}
+
+	altr_create_edacdev_dbgfs(dci, prv);
+
+	list_add(&altdev->next, &edac->a10_ecc_devices);
+
+	devres_remove_group(edac->dev, altr_edac_a10_device_add);
+
+	return 0;
+
+err_release_group1:
+	edac_device_free_ctl_info(dci);
+err_release_group:
+	edac_printk(KERN_ALERT, EDAC_DEVICE, "%s: %d\n", __func__, __LINE__);
+	devres_release_group(edac->dev, NULL);
+	edac_printk(KERN_ERR, EDAC_DEVICE,
+		    "%s:Error setting up EDAC device: %d\n", ecc_name, rc);
+
+	return rc;
+}
+
+static int altr_edac_a10_probe(struct platform_device *pdev)
+{
+	struct altr_arria10_edac *edac;
+	struct device_node *child;
+	int rc;
+
+	edac = devm_kzalloc(&pdev->dev, sizeof(*edac), GFP_KERNEL);
+	if (!edac)
+		return -ENOMEM;
+
+	edac->dev = &pdev->dev;
+	platform_set_drvdata(pdev, edac);
+	INIT_LIST_HEAD(&edac->a10_ecc_devices);
+
+	edac->ecc_mgr_map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+							"altr,sysmgr-syscon");
+	if (IS_ERR(edac->ecc_mgr_map)) {
+		edac_printk(KERN_ERR, EDAC_DEVICE,
+			    "Unable to get syscon altr,sysmgr-syscon\n");
+		return PTR_ERR(edac->ecc_mgr_map);
+	}
+
+	edac->sb_irq = platform_get_irq(pdev, 0);
+	rc = devm_request_irq(&pdev->dev, edac->sb_irq,
+			      altr_edac_a10_irq_handler,
+			      IRQF_SHARED, dev_name(&pdev->dev), edac);
+	if (rc) {
+		edac_printk(KERN_ERR, EDAC_DEVICE, "No SBERR IRQ resource\n");
+		return rc;
+	}
+
+	edac->db_irq = platform_get_irq(pdev, 1);
+	rc = devm_request_irq(&pdev->dev, edac->db_irq,
+			      altr_edac_a10_irq_handler,
+			      IRQF_SHARED, dev_name(&pdev->dev), edac);
+	if (rc) {
+		edac_printk(KERN_ERR, EDAC_DEVICE, "No DBERR IRQ resource\n");
+		return rc;
+	}
+
+	for_each_child_of_node(pdev->dev.of_node, child) {
+		if (!of_device_is_available(child))
+			continue;
+		if (of_device_is_compatible(child, "altr,socfpga-a10-l2-ecc"))
+			altr_edac_a10_device_add(edac, child);
+		else if (of_device_is_compatible(child,
+						 "altr,socfpga-a10-ocram-ecc"))
+			altr_edac_a10_device_add(edac, child);
+	}
+
+	return 0;
+}
+
+static const struct of_device_id altr_edac_a10_of_match[] = {
+	{ .compatible = "altr,socfpga-a10-ecc-manager" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, altr_edac_a10_of_match);
+
+static struct platform_driver altr_edac_a10_driver = {
+	.probe =  altr_edac_a10_probe,
+	.driver = {
+		.name = "socfpga_a10_ecc_manager",
+		.of_match_table = altr_edac_a10_of_match,
+	},
+};
+module_platform_driver(altr_edac_a10_driver);
+
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Thor Thayer");
 MODULE_DESCRIPTION("EDAC Driver for Altera Memories");
diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h
index 953077d..42090f3 100644
--- a/drivers/edac/altera_edac.h
+++ b/drivers/edac/altera_edac.h
@@ -195,4 +195,132 @@
 	const struct altr_sdram_prv_data *data;
 };
 
+/************************** EDAC Device Defines **************************/
+/***** General Device Trigger Defines *****/
+#define ALTR_UE_TRIGGER_CHAR            'U'   /* Trigger for UE */
+#define ALTR_TRIGGER_READ_WRD_CNT       32    /* Line size x 4 */
+#define ALTR_TRIG_OCRAM_BYTE_SIZE       128   /* Line size x 4 */
+#define ALTR_TRIG_L2C_BYTE_SIZE         4096  /* Full Page */
+
+/******* Cyclone5 and Arria5 Defines *******/
+/* OCRAM ECC Management Group Defines */
+#define ALTR_MAN_GRP_OCRAM_ECC_OFFSET   0x04
+#define ALTR_OCR_ECC_REG_OFFSET         0x00
+#define ALTR_OCR_ECC_EN                 BIT(0)
+#define ALTR_OCR_ECC_INJS               BIT(1)
+#define ALTR_OCR_ECC_INJD               BIT(2)
+#define ALTR_OCR_ECC_SERR               BIT(3)
+#define ALTR_OCR_ECC_DERR               BIT(4)
+
+/* L2 ECC Management Group Defines */
+#define ALTR_MAN_GRP_L2_ECC_OFFSET      0x00
+#define ALTR_L2_ECC_REG_OFFSET          0x00
+#define ALTR_L2_ECC_EN                  BIT(0)
+#define ALTR_L2_ECC_INJS                BIT(1)
+#define ALTR_L2_ECC_INJD                BIT(2)
+
+/* Arria10 General ECC Block Module Defines */
+#define ALTR_A10_ECC_CTRL_OFST          0x08
+#define ALTR_A10_ECC_EN                 BIT(0)
+#define ALTR_A10_ECC_INITA              BIT(16)
+#define ALTR_A10_ECC_INITB              BIT(24)
+
+#define ALTR_A10_ECC_INITSTAT_OFST      0x0C
+#define ALTR_A10_ECC_INITCOMPLETEA      BIT(0)
+#define ALTR_A10_ECC_INITCOMPLETEB      BIT(8)
+
+#define ALTR_A10_ECC_ERRINTEN_OFST      0x10
+#define ALTR_A10_ECC_SERRINTEN          BIT(0)
+
+#define ALTR_A10_ECC_INTSTAT_OFST       0x20
+#define ALTR_A10_ECC_SERRPENA           BIT(0)
+#define ALTR_A10_ECC_DERRPENA           BIT(8)
+#define ALTR_A10_ECC_ERRPENA_MASK       (ALTR_A10_ECC_SERRPENA | \
+					 ALTR_A10_ECC_DERRPENA)
+#define ALTR_A10_ECC_SERRPENB           BIT(16)
+#define ALTR_A10_ECC_DERRPENB           BIT(24)
+#define ALTR_A10_ECC_ERRPENB_MASK       (ALTR_A10_ECC_SERRPENB | \
+					 ALTR_A10_ECC_DERRPENB)
+
+#define ALTR_A10_ECC_INTTEST_OFST       0x24
+#define ALTR_A10_ECC_TSERRA             BIT(0)
+#define ALTR_A10_ECC_TDERRA             BIT(8)
+
+/* ECC Manager Defines */
+#define A10_SYSMGR_ECC_INTMASK_SET_OFST   0x94
+#define A10_SYSMGR_ECC_INTMASK_CLR_OFST   0x98
+#define A10_SYSMGR_ECC_INTMASK_OCRAM      BIT(1)
+
+#define A10_SYSMGR_ECC_INTSTAT_SERR_OFST  0x9C
+#define A10_SYSMGR_ECC_INTSTAT_DERR_OFST  0xA0
+#define A10_SYSMGR_ECC_INTSTAT_L2         BIT(0)
+#define A10_SYSMGR_ECC_INTSTAT_OCRAM      BIT(1)
+
+#define A10_SYSGMR_MPU_CLEAR_L2_ECC_OFST  0xA8
+#define A10_SYSGMR_MPU_CLEAR_L2_ECC_SB    BIT(15)
+#define A10_SYSGMR_MPU_CLEAR_L2_ECC_MB    BIT(31)
+
+/* Arria 10 L2 ECC Management Group Defines */
+#define ALTR_A10_L2_ECC_CTL_OFST        0x0
+#define ALTR_A10_L2_ECC_EN_CTL          BIT(0)
+
+#define ALTR_A10_L2_ECC_STATUS          0xFFD060A4
+#define ALTR_A10_L2_ECC_STAT_OFST       0xA4
+#define ALTR_A10_L2_ECC_SERR_PEND       BIT(0)
+#define ALTR_A10_L2_ECC_MERR_PEND       BIT(0)
+
+#define ALTR_A10_L2_ECC_CLR_OFST        0x4
+#define ALTR_A10_L2_ECC_SERR_CLR        BIT(15)
+#define ALTR_A10_L2_ECC_MERR_CLR        BIT(31)
+
+#define ALTR_A10_L2_ECC_INJ_OFST        ALTR_A10_L2_ECC_CTL_OFST
+#define ALTR_A10_L2_ECC_CE_INJ_MASK     0x00000101
+#define ALTR_A10_L2_ECC_UE_INJ_MASK     0x00010101
+
+/* Arria 10 OCRAM ECC Management Group Defines */
+#define ALTR_A10_OCRAM_ECC_EN_CTL       (BIT(1) | BIT(0))
+
+struct altr_edac_device_dev;
+
+struct edac_device_prv_data {
+	int (*setup)(struct altr_edac_device_dev *device);
+	int ce_clear_mask;
+	int ue_clear_mask;
+	int irq_status_mask;
+	char dbgfs_name[20];
+	void * (*alloc_mem)(size_t size, void **other);
+	void (*free_mem)(void *p, size_t size, void *other);
+	int ecc_enable_mask;
+	int ecc_en_ofst;
+	int ce_set_mask;
+	int ue_set_mask;
+	int set_err_ofst;
+	irqreturn_t (*ecc_irq_handler)(struct altr_edac_device_dev *dci,
+				       bool sb);
+	int trig_alloc_sz;
+	const struct file_operations *inject_fops;
+};
+
+struct altr_edac_device_dev {
+	struct list_head next;
+	void __iomem *base;
+	int sb_irq;
+	int db_irq;
+	const struct edac_device_prv_data *data;
+	struct dentry *debugfs_dir;
+	char *edac_dev_name;
+	struct altr_arria10_edac *edac;
+	struct edac_device_ctl_info *edac_dev;
+	struct device ddev;
+	int edac_idx;
+};
+
+struct altr_arria10_edac {
+	struct device		*dev;
+	struct regmap		*ecc_mgr_map;
+	int sb_irq;
+	int db_irq;
+	struct list_head	a10_ecc_devices;
+};
+
 #endif	/* #ifndef _ALTERA_EDAC_H */
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index d87a475..46784eb 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -15,11 +15,6 @@
 
 static struct msr __percpu *msrs;
 
-/*
- * count successfully initialized driver instances for setup_pci_device()
- */
-static atomic_t drv_instances = ATOMIC_INIT(0);
-
 /* Per-node stuff */
 static struct ecc_settings **ecc_stngs;
 
@@ -645,7 +640,7 @@
 	input_addr =
 	    dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
 
-	edac_dbg(2, "SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
+	edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
 		 (unsigned long)sys_addr, (unsigned long)input_addr);
 
 	return input_addr;
@@ -1918,7 +1913,7 @@
 	[K8_CPUS] = {
 		.ctl_name = "K8",
 		.f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
-		.f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
+		.f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
 		.ops = {
 			.early_channel_count	= k8_early_channel_count,
 			.map_sysaddr_to_csrow	= k8_map_sysaddr_to_csrow,
@@ -1928,7 +1923,7 @@
 	[F10_CPUS] = {
 		.ctl_name = "F10h",
 		.f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
-		.f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
+		.f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
 		.ops = {
 			.early_channel_count	= f1x_early_channel_count,
 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
@@ -1938,7 +1933,7 @@
 	[F15_CPUS] = {
 		.ctl_name = "F15h",
 		.f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
-		.f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3,
+		.f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
 		.ops = {
 			.early_channel_count	= f1x_early_channel_count,
 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
@@ -1948,7 +1943,7 @@
 	[F15_M30H_CPUS] = {
 		.ctl_name = "F15h_M30h",
 		.f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
-		.f3_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F3,
+		.f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
 		.ops = {
 			.early_channel_count	= f1x_early_channel_count,
 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
@@ -1958,7 +1953,7 @@
 	[F15_M60H_CPUS] = {
 		.ctl_name = "F15h_M60h",
 		.f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
-		.f3_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F3,
+		.f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
 		.ops = {
 			.early_channel_count	= f1x_early_channel_count,
 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
@@ -1968,7 +1963,7 @@
 	[F16_CPUS] = {
 		.ctl_name = "F16h",
 		.f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
-		.f3_id = PCI_DEVICE_ID_AMD_16H_NB_F3,
+		.f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
 		.ops = {
 			.early_channel_count	= f1x_early_channel_count,
 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
@@ -1978,7 +1973,7 @@
 	[F16_M30H_CPUS] = {
 		.ctl_name = "F16h_M30h",
 		.f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
-		.f3_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F3,
+		.f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
 		.ops = {
 			.early_channel_count	= f1x_early_channel_count,
 			.map_sysaddr_to_csrow	= f1x_map_sysaddr_to_csrow,
@@ -2227,13 +2222,13 @@
 }
 
 /*
- * Use pvt->F2 which contains the F2 CPU PCI device to get the related
- * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
+ * Use pvt->F3 which contains the F3 CPU PCI device to get the related
+ * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
  */
-static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
+static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f2_id)
 {
 	/* Reserve the ADDRESS MAP Device */
-	pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
+	pvt->F1 = pci_get_related_function(pvt->F3->vendor, f1_id, pvt->F3);
 	if (!pvt->F1) {
 		amd64_err("error address map device not found: "
 			  "vendor %x device 0x%x (broken BIOS?)\n",
@@ -2241,15 +2236,15 @@
 		return -ENODEV;
 	}
 
-	/* Reserve the MISC Device */
-	pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
-	if (!pvt->F3) {
+	/* Reserve the DCT Device */
+	pvt->F2 = pci_get_related_function(pvt->F3->vendor, f2_id, pvt->F3);
+	if (!pvt->F2) {
 		pci_dev_put(pvt->F1);
 		pvt->F1 = NULL;
 
-		amd64_err("error F3 device not found: "
+		amd64_err("error F2 device not found: "
 			  "vendor %x device 0x%x (broken BIOS?)\n",
-			  PCI_VENDOR_ID_AMD, f3_id);
+			  PCI_VENDOR_ID_AMD, f2_id);
 
 		return -ENODEV;
 	}
@@ -2263,7 +2258,7 @@
 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
 {
 	pci_dev_put(pvt->F1);
-	pci_dev_put(pvt->F3);
+	pci_dev_put(pvt->F2);
 }
 
 /*
@@ -2778,14 +2773,14 @@
 	NULL
 };
 
-static int init_one_instance(struct pci_dev *F2)
+static int init_one_instance(unsigned int nid)
 {
-	struct amd64_pvt *pvt = NULL;
+	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
 	struct amd64_family_type *fam_type = NULL;
 	struct mem_ctl_info *mci = NULL;
 	struct edac_mc_layer layers[2];
+	struct amd64_pvt *pvt = NULL;
 	int err = 0, ret;
-	u16 nid = amd_pci_dev_to_node_id(F2);
 
 	ret = -ENOMEM;
 	pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
@@ -2793,7 +2788,7 @@
 		goto err_ret;
 
 	pvt->mc_node_id	= nid;
-	pvt->F2 = F2;
+	pvt->F3 = F3;
 
 	ret = -EINVAL;
 	fam_type = per_family_init(pvt);
@@ -2801,7 +2796,7 @@
 		goto err_free;
 
 	ret = -ENODEV;
-	err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
+	err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f2_id);
 	if (err)
 		goto err_free;
 
@@ -2836,7 +2831,7 @@
 		goto err_siblings;
 
 	mci->pvt_info = pvt;
-	mci->pdev = &pvt->F2->dev;
+	mci->pdev = &pvt->F3->dev;
 
 	setup_mci_misc_attrs(mci, fam_type);
 
@@ -2855,8 +2850,6 @@
 
 	amd_register_ecc_decoder(decode_bus_error);
 
-	atomic_inc(&drv_instances);
-
 	return 0;
 
 err_add_mc:
@@ -2872,19 +2865,11 @@
 	return ret;
 }
 
-static int probe_one_instance(struct pci_dev *pdev,
-			      const struct pci_device_id *mc_type)
+static int probe_one_instance(unsigned int nid)
 {
-	u16 nid = amd_pci_dev_to_node_id(pdev);
 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
 	struct ecc_settings *s;
-	int ret = 0;
-
-	ret = pci_enable_device(pdev);
-	if (ret < 0) {
-		edac_dbg(0, "ret=%d\n", ret);
-		return -EIO;
-	}
+	int ret;
 
 	ret = -ENOMEM;
 	s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
@@ -2905,7 +2890,7 @@
 			goto err_enable;
 	}
 
-	ret = init_one_instance(pdev);
+	ret = init_one_instance(nid);
 	if (ret < 0) {
 		amd64_err("Error probing instance: %d\n", nid);
 		restore_ecc_error_reporting(s, nid, F3);
@@ -2921,19 +2906,18 @@
 	return ret;
 }
 
-static void remove_one_instance(struct pci_dev *pdev)
+static void remove_one_instance(unsigned int nid)
 {
-	struct mem_ctl_info *mci;
-	struct amd64_pvt *pvt;
-	u16 nid = amd_pci_dev_to_node_id(pdev);
 	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
 	struct ecc_settings *s = ecc_stngs[nid];
+	struct mem_ctl_info *mci;
+	struct amd64_pvt *pvt;
 
-	mci = find_mci_by_dev(&pdev->dev);
+	mci = find_mci_by_dev(&F3->dev);
 	WARN_ON(!mci);
 
 	/* Remove from EDAC CORE tracking list */
-	mci = edac_mc_del_mc(&pdev->dev);
+	mci = edac_mc_del_mc(&F3->dev);
 	if (!mci)
 		return;
 
@@ -2957,31 +2941,6 @@
 	edac_mc_free(mci);
 }
 
-/*
- * This table is part of the interface for loading drivers for PCI devices. The
- * PCI core identifies what devices are on a system during boot, and then
- * inquiry this table to see if this driver is for a given device found.
- */
-static const struct pci_device_id amd64_pci_table[] = {
-	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_K8_NB_MEMCTL) },
-	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_DRAM) },
-	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F2) },
-	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F2) },
-	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F2) },
-	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F2) },
-	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F2) },
-	{0, }
-};
-MODULE_DEVICE_TABLE(pci, amd64_pci_table);
-
-static struct pci_driver amd64_pci_driver = {
-	.name		= EDAC_MOD_STR,
-	.probe		= probe_one_instance,
-	.remove		= remove_one_instance,
-	.id_table	= amd64_pci_table,
-	.driver.probe_type = PROBE_FORCE_SYNCHRONOUS,
-};
-
 static void setup_pci_device(void)
 {
 	struct mem_ctl_info *mci;
@@ -3005,8 +2964,7 @@
 static int __init amd64_edac_init(void)
 {
 	int err = -ENODEV;
-
-	printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
+	int i;
 
 	opstate_init();
 
@@ -3022,13 +2980,14 @@
 	if (!msrs)
 		goto err_free;
 
-	err = pci_register_driver(&amd64_pci_driver);
-	if (err)
-		goto err_pci;
+	for (i = 0; i < amd_nb_num(); i++)
+		if (probe_one_instance(i)) {
+			/* unwind properly */
+			while (--i >= 0)
+				remove_one_instance(i);
 
-	err = -ENODEV;
-	if (!atomic_read(&drv_instances))
-		goto err_no_instances;
+			goto err_pci;
+		}
 
 	setup_pci_device();
 
@@ -3036,10 +2995,9 @@
 	amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
 #endif
 
-	return 0;
+	printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
 
-err_no_instances:
-	pci_unregister_driver(&amd64_pci_driver);
+	return 0;
 
 err_pci:
 	msrs_free(msrs);
@@ -3055,10 +3013,13 @@
 
 static void __exit amd64_edac_exit(void)
 {
+	int i;
+
 	if (pci_ctl)
 		edac_pci_release_generic_ctl(pci_ctl);
 
-	pci_unregister_driver(&amd64_pci_driver);
+	for (i = 0; i < amd_nb_num(); i++)
+		remove_one_instance(i);
 
 	kfree(ecc_stngs);
 	ecc_stngs = NULL;
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index c0f248f..c088704 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -422,7 +422,7 @@
 
 struct amd64_family_type {
 	const char *ctl_name;
-	u16 f1_id, f3_id;
+	u16 f1_id, f2_id;
 	struct low_ops ops;
 };
 
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 1472f48..6aa256b0 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -923,7 +923,7 @@
 	mci->ue_mc += count;
 
 	if (!enable_per_layer_report) {
-		mci->ce_noinfo_count += count;
+		mci->ue_noinfo_count += count;
 		return;
 	}
 
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 26e65ab..10c305b 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -998,11 +998,12 @@
 
 void edac_unregister_sysfs(struct mem_ctl_info *mci)
 {
+	struct bus_type *bus = mci->bus;
 	const char *name = mci->bus->name;
 
 	edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
 	device_unregister(&mci->dev);
-	bus_unregister(mci->bus);
+	bus_unregister(bus);
 	kfree(name);
 }
 
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 01087a3..8a68a5e 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -271,16 +271,6 @@
 
 	bool		is_registered, enable_scrub;
 
-	/* Fifo double buffers */
-	struct mce		mce_entry[MCE_LOG_LEN];
-	struct mce		mce_outentry[MCE_LOG_LEN];
-
-	/* Fifo in/out counters */
-	unsigned		mce_in, mce_out;
-
-	/* Count indicator to show errors not got */
-	unsigned		mce_overrun;
-
 	/* DCLK Frequency used for computing scrub rate */
 	int			dclk_freq;
 
@@ -1792,56 +1782,15 @@
  *	i7core_check_error	Retrieve and process errors reported by the
  *				hardware. Called by the Core module.
  */
-static void i7core_check_error(struct mem_ctl_info *mci)
+static void i7core_check_error(struct mem_ctl_info *mci, struct mce *m)
 {
 	struct i7core_pvt *pvt = mci->pvt_info;
-	int i;
-	unsigned count = 0;
-	struct mce *m;
 
-	/*
-	 * MCE first step: Copy all mce errors into a temporary buffer
-	 * We use a double buffering here, to reduce the risk of
-	 * losing an error.
-	 */
-	smp_rmb();
-	count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
-		% MCE_LOG_LEN;
-	if (!count)
-		goto check_ce_error;
-
-	m = pvt->mce_outentry;
-	if (pvt->mce_in + count > MCE_LOG_LEN) {
-		unsigned l = MCE_LOG_LEN - pvt->mce_in;
-
-		memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
-		smp_wmb();
-		pvt->mce_in = 0;
-		count -= l;
-		m += l;
-	}
-	memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
-	smp_wmb();
-	pvt->mce_in += count;
-
-	smp_rmb();
-	if (pvt->mce_overrun) {
-		i7core_printk(KERN_ERR, "Lost %d memory errors\n",
-			      pvt->mce_overrun);
-		smp_wmb();
-		pvt->mce_overrun = 0;
-	}
-
-	/*
-	 * MCE second step: parse errors and display
-	 */
-	for (i = 0; i < count; i++)
-		i7core_mce_output_error(mci, &pvt->mce_outentry[i]);
+	i7core_mce_output_error(mci, m);
 
 	/*
 	 * Now, let's increment CE error counts
 	 */
-check_ce_error:
 	if (!pvt->is_registered)
 		i7core_udimm_check_mc_ecc_err(mci);
 	else
@@ -1849,12 +1798,8 @@
 }
 
 /*
- * i7core_mce_check_error	Replicates mcelog routine to get errors
- *				This routine simply queues mcelog errors, and
- *				return. The error itself should be handled later
- *				by i7core_check_error.
- * WARNING: As this routine should be called at NMI time, extra care should
- * be taken to avoid deadlocks, and to be as fast as possible.
+ * Check that logging is enabled and that this is the right type
+ * of error for us to handle.
  */
 static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
 				  void *data)
@@ -1866,7 +1811,7 @@
 
 	i7_dev = get_i7core_dev(mce->socketid);
 	if (!i7_dev)
-		return NOTIFY_BAD;
+		return NOTIFY_DONE;
 
 	mci = i7_dev->mci;
 	pvt = mci->pvt_info;
@@ -1882,21 +1827,7 @@
 	if (mce->bank != 8)
 		return NOTIFY_DONE;
 
-	smp_rmb();
-	if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
-		smp_wmb();
-		pvt->mce_overrun++;
-		return NOTIFY_DONE;
-	}
-
-	/* Copy memory error at the ringbuffer */
-	memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
-	smp_wmb();
-	pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
-
-	/* Handle fatal errors immediately */
-	if (mce->mcgstatus & 1)
-		i7core_check_error(mci);
+	i7core_check_error(mci, mce);
 
 	/* Advise mcelog that the errors were handled */
 	return NOTIFY_STOP;
@@ -2243,8 +2174,6 @@
 	get_dimm_config(mci);
 	/* record ptr to the generic device */
 	mci->pdev = &i7core_dev->pdev[0]->dev;
-	/* Set the function pointer to an actual operation function */
-	mci->edac_check = i7core_check_error;
 
 	/* Enable scrubrate setting */
 	if (pvt->enable_scrub)
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 18d77ac..1c88d97 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -17,6 +17,7 @@
  * 015c: Xeon E3-1200 v2/3rd Gen Core processor DRAM Controller
  * 0c04: Xeon E3-1200 v3/4th Gen Core Processor DRAM Controller
  * 0c08: Xeon E3-1200 v3 Processor DRAM Controller
+ * 1918: Xeon E3-1200 v5 Skylake Host Bridge/DRAM Registers
  *
  * Based on Intel specification:
  * http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/xeon-e3-1200v3-vol-2-datasheet.pdf
@@ -55,6 +56,7 @@
 #define PCI_DEVICE_ID_INTEL_IE31200_HB_5 0x015c
 #define PCI_DEVICE_ID_INTEL_IE31200_HB_6 0x0c04
 #define PCI_DEVICE_ID_INTEL_IE31200_HB_7 0x0c08
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_8 0x1918
 
 #define IE31200_DIMMS			4
 #define IE31200_RANKS			8
@@ -105,8 +107,11 @@
  *    1  Multiple Bit Error Status (MERRSTS)
  *    0  Correctable Error Status (CERRSTS)
  */
+
 #define IE31200_C0ECCERRLOG			0x40c8
 #define IE31200_C1ECCERRLOG			0x44c8
+#define IE31200_C0ECCERRLOG_SKL			0x4048
+#define IE31200_C1ECCERRLOG_SKL			0x4448
 #define IE31200_ECCERRLOG_CE			BIT(0)
 #define IE31200_ECCERRLOG_UE			BIT(1)
 #define IE31200_ECCERRLOG_RANK_BITS		GENMASK_ULL(28, 27)
@@ -123,17 +128,28 @@
 #define IE31200_CAPID0_DDPCD		BIT(6)
 #define IE31200_CAPID0_ECC		BIT(1)
 
-#define IE31200_MAD_DIMM_0_OFFSET	0x5004
-#define IE31200_MAD_DIMM_SIZE		GENMASK_ULL(7, 0)
-#define IE31200_MAD_DIMM_A_RANK		BIT(17)
-#define IE31200_MAD_DIMM_A_WIDTH	BIT(19)
+#define IE31200_MAD_DIMM_0_OFFSET		0x5004
+#define IE31200_MAD_DIMM_0_OFFSET_SKL		0x500C
+#define IE31200_MAD_DIMM_SIZE			GENMASK_ULL(7, 0)
+#define IE31200_MAD_DIMM_A_RANK			BIT(17)
+#define IE31200_MAD_DIMM_A_RANK_SHIFT		17
+#define IE31200_MAD_DIMM_A_RANK_SKL		BIT(10)
+#define IE31200_MAD_DIMM_A_RANK_SKL_SHIFT	10
+#define IE31200_MAD_DIMM_A_WIDTH		BIT(19)
+#define IE31200_MAD_DIMM_A_WIDTH_SHIFT		19
+#define IE31200_MAD_DIMM_A_WIDTH_SKL		GENMASK_ULL(9, 8)
+#define IE31200_MAD_DIMM_A_WIDTH_SKL_SHIFT	8
 
-#define IE31200_PAGES(n)		(n << (28 - PAGE_SHIFT))
+/* Skylake reports 1GB increments, everything else is 256MB */
+#define IE31200_PAGES(n, skl)	\
+	(n << (28 + (2 * skl) - PAGE_SHIFT))
 
 static int nr_channels;
 
 struct ie31200_priv {
 	void __iomem *window;
+	void __iomem *c0errlog;
+	void __iomem *c1errlog;
 };
 
 enum ie31200_chips {
@@ -157,9 +173,9 @@
 };
 
 struct dimm_data {
-	u8 size; /* in 256MB multiples */
+	u8 size; /* in multiples of 256MB, except Skylake is 1GB */
 	u8 dual_rank : 1,
-	   x16_width : 1; /* 0 means x8 width */
+	   x16_width : 2; /* 0 means x8 width */
 };
 
 static int how_many_channels(struct pci_dev *pdev)
@@ -197,11 +213,10 @@
 	return true;
 }
 
-static int eccerrlog_row(int channel, u64 log)
+static int eccerrlog_row(u64 log)
 {
-	int rank = ((log & IE31200_ECCERRLOG_RANK_BITS) >>
-		IE31200_ECCERRLOG_RANK_SHIFT);
-	return rank | (channel * IE31200_RANKS_PER_CHANNEL);
+	return ((log & IE31200_ECCERRLOG_RANK_BITS) >>
+				IE31200_ECCERRLOG_RANK_SHIFT);
 }
 
 static void ie31200_clear_error_info(struct mem_ctl_info *mci)
@@ -219,7 +234,6 @@
 {
 	struct pci_dev *pdev;
 	struct ie31200_priv *priv = mci->pvt_info;
-	void __iomem *window = priv->window;
 
 	pdev = to_pci_dev(mci->pdev);
 
@@ -232,9 +246,9 @@
 	if (!(info->errsts & IE31200_ERRSTS_BITS))
 		return;
 
-	info->eccerrlog[0] = lo_hi_readq(window + IE31200_C0ECCERRLOG);
+	info->eccerrlog[0] = lo_hi_readq(priv->c0errlog);
 	if (nr_channels == 2)
-		info->eccerrlog[1] = lo_hi_readq(window + IE31200_C1ECCERRLOG);
+		info->eccerrlog[1] = lo_hi_readq(priv->c1errlog);
 
 	pci_read_config_word(pdev, IE31200_ERRSTS, &info->errsts2);
 
@@ -245,10 +259,10 @@
 	 * should be UE info.
 	 */
 	if ((info->errsts ^ info->errsts2) & IE31200_ERRSTS_BITS) {
-		info->eccerrlog[0] = lo_hi_readq(window + IE31200_C0ECCERRLOG);
+		info->eccerrlog[0] = lo_hi_readq(priv->c0errlog);
 		if (nr_channels == 2)
 			info->eccerrlog[1] =
-				lo_hi_readq(window + IE31200_C1ECCERRLOG);
+				lo_hi_readq(priv->c1errlog);
 	}
 
 	ie31200_clear_error_info(mci);
@@ -274,14 +288,14 @@
 		if (log & IE31200_ECCERRLOG_UE) {
 			edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
 					     0, 0, 0,
-					     eccerrlog_row(channel, log),
+					     eccerrlog_row(log),
 					     channel, -1,
 					     "ie31200 UE", "");
 		} else if (log & IE31200_ECCERRLOG_CE) {
 			edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
 					     0, 0,
 					     IE31200_ECCERRLOG_SYNDROME(log),
-					     eccerrlog_row(channel, log),
+					     eccerrlog_row(log),
 					     channel, -1,
 					     "ie31200 CE", "");
 		}
@@ -326,6 +340,33 @@
 	return window;
 }
 
+static void __skl_populate_dimm_info(struct dimm_data *dd, u32 addr_decode,
+				     int chan)
+{
+	dd->size = (addr_decode >> (chan << 4)) & IE31200_MAD_DIMM_SIZE;
+	dd->dual_rank = (addr_decode & (IE31200_MAD_DIMM_A_RANK_SKL << (chan << 4))) ? 1 : 0;
+	dd->x16_width = ((addr_decode & (IE31200_MAD_DIMM_A_WIDTH_SKL << (chan << 4))) >>
+				(IE31200_MAD_DIMM_A_WIDTH_SKL_SHIFT + (chan << 4)));
+}
+
+static void __populate_dimm_info(struct dimm_data *dd, u32 addr_decode,
+				 int chan)
+{
+	dd->size = (addr_decode >> (chan << 3)) & IE31200_MAD_DIMM_SIZE;
+	dd->dual_rank = (addr_decode & (IE31200_MAD_DIMM_A_RANK << chan)) ? 1 : 0;
+	dd->x16_width = (addr_decode & (IE31200_MAD_DIMM_A_WIDTH << chan)) ? 1 : 0;
+}
+
+static void populate_dimm_info(struct dimm_data *dd, u32 addr_decode, int chan,
+			       bool skl)
+{
+	if (skl)
+		__skl_populate_dimm_info(dd, addr_decode, chan);
+	else
+		__populate_dimm_info(dd, addr_decode, chan);
+}
+
+
 static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
 {
 	int i, j, ret;
@@ -334,7 +375,8 @@
 	struct dimm_data dimm_info[IE31200_CHANNELS][IE31200_DIMMS_PER_CHANNEL];
 	void __iomem *window;
 	struct ie31200_priv *priv;
-	u32 addr_decode;
+	u32 addr_decode, mad_offset;
+	bool skl = (pdev->device == PCI_DEVICE_ID_INTEL_IE31200_HB_8);
 
 	edac_dbg(0, "MC:\n");
 
@@ -363,7 +405,10 @@
 
 	edac_dbg(3, "MC: init mci\n");
 	mci->pdev = &pdev->dev;
-	mci->mtype_cap = MEM_FLAG_DDR3;
+	if (skl)
+		mci->mtype_cap = MEM_FLAG_DDR4;
+	else
+		mci->mtype_cap = MEM_FLAG_DDR3;
 	mci->edac_ctl_cap = EDAC_FLAG_SECDED;
 	mci->edac_cap = EDAC_FLAG_SECDED;
 	mci->mod_name = EDAC_MOD_STR;
@@ -374,19 +419,24 @@
 	mci->ctl_page_to_phys = NULL;
 	priv = mci->pvt_info;
 	priv->window = window;
+	if (skl) {
+		priv->c0errlog = window + IE31200_C0ECCERRLOG_SKL;
+		priv->c1errlog = window + IE31200_C1ECCERRLOG_SKL;
+		mad_offset = IE31200_MAD_DIMM_0_OFFSET_SKL;
+	} else {
+		priv->c0errlog = window + IE31200_C0ECCERRLOG;
+		priv->c1errlog = window + IE31200_C1ECCERRLOG;
+		mad_offset = IE31200_MAD_DIMM_0_OFFSET;
+	}
 
 	/* populate DIMM info */
 	for (i = 0; i < IE31200_CHANNELS; i++) {
-		addr_decode = readl(window + IE31200_MAD_DIMM_0_OFFSET +
+		addr_decode = readl(window + mad_offset +
 					(i * 4));
 		edac_dbg(0, "addr_decode: 0x%x\n", addr_decode);
 		for (j = 0; j < IE31200_DIMMS_PER_CHANNEL; j++) {
-			dimm_info[i][j].size = (addr_decode >> (j * 8)) &
-						IE31200_MAD_DIMM_SIZE;
-			dimm_info[i][j].dual_rank = (addr_decode &
-				(IE31200_MAD_DIMM_A_RANK << j)) ? 1 : 0;
-			dimm_info[i][j].x16_width = (addr_decode &
-				(IE31200_MAD_DIMM_A_WIDTH << j)) ? 1 : 0;
+			populate_dimm_info(&dimm_info[i][j], addr_decode, j,
+					   skl);
 			edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n",
 				 dimm_info[i][j].size,
 				 dimm_info[i][j].dual_rank,
@@ -405,7 +455,7 @@
 			struct dimm_info *dimm;
 			unsigned long nr_pages;
 
-			nr_pages = IE31200_PAGES(dimm_info[j][i].size);
+			nr_pages = IE31200_PAGES(dimm_info[j][i].size, skl);
 			if (nr_pages == 0)
 				continue;
 
@@ -417,7 +467,10 @@
 				dimm->nr_pages = nr_pages;
 				edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
 				dimm->grain = 8; /* just a guess */
-				dimm->mtype = MEM_DDR3;
+				if (skl)
+					dimm->mtype = MEM_DDR4;
+				else
+					dimm->mtype = MEM_DDR3;
 				dimm->dtype = DEV_UNKNOWN;
 				dimm->edac_mode = EDAC_UNKNOWN;
 			}
@@ -426,7 +479,10 @@
 			dimm->nr_pages = nr_pages;
 			edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
 			dimm->grain = 8; /* same guess */
-			dimm->mtype = MEM_DDR3;
+			if (skl)
+				dimm->mtype = MEM_DDR4;
+			else
+				dimm->mtype = MEM_DDR3;
 			dimm->dtype = DEV_UNKNOWN;
 			dimm->edac_mode = EDAC_UNKNOWN;
 		}
@@ -501,6 +557,9 @@
 		PCI_VEND_DEV(INTEL, IE31200_HB_7), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
 		IE31200},
 	{
+		PCI_VEND_DEV(INTEL, IE31200_HB_8), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		IE31200},
+	{
 		0,
 	}            /* 0 terminated list. */
 };
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 49768c0..9b6800a 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -1052,7 +1052,6 @@
 	struct mce *m = (struct mce *)data;
 	struct cpuinfo_x86 *c = &cpu_data(m->extcpu);
 	int ecc;
-	u32 ebx = cpuid_ebx(0x80000007);
 
 	if (amd_filter_mce(m))
 		return NOTIFY_STOP;
@@ -1075,7 +1074,7 @@
 			((m->status & MCI_STATUS_DEFERRED) ? "Deferred" : "-"),
 			((m->status & MCI_STATUS_POISON)   ? "Poison"   : "-"));
 
-	if (!!(ebx & BIT(3))) {
+	if (boot_cpu_has(X86_FEATURE_SMCA)) {
 		u32 low, high;
 		u32 addr = MSR_AMD64_SMCA_MCx_CONFIG(m->bank);
 
@@ -1094,7 +1093,7 @@
 	if (m->status & MCI_STATUS_ADDRV)
 		pr_emerg(HW_ERR "MC%d Error Address: 0x%016llx\n", m->bank, m->addr);
 
-	if (!!(ebx & BIT(3))) {
+	if (boot_cpu_has(X86_FEATURE_SMCA)) {
 		decode_smca_errors(m);
 		goto err_code;
 	}
@@ -1149,7 +1148,6 @@
 static int __init mce_amd_init(void)
 {
 	struct cpuinfo_x86 *c = &boot_cpu_data;
-	u32 ebx;
 
 	if (c->x86_vendor != X86_VENDOR_AMD)
 		return -ENODEV;
@@ -1205,9 +1203,8 @@
 		break;
 
 	case 0x17:
-		ebx = cpuid_ebx(0x80000007);
 		xec_mask = 0x3f;
-		if (!(ebx & BIT(3))) {
+		if (!boot_cpu_has(X86_FEATURE_SMCA)) {
 			printk(KERN_WARNING "Decoding supported only on Scalable MCA processors.\n");
 			goto err_out;
 		}
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 93f0d41..b4d0bf6 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -21,6 +21,8 @@
 #include <linux/smp.h>
 #include <linux/bitmap.h>
 #include <linux/math64.h>
+#include <linux/mod_devicetable.h>
+#include <asm/cpu_device_id.h>
 #include <asm/processor.h>
 #include <asm/mce.h>
 
@@ -28,8 +30,6 @@
 
 /* Static vars */
 static LIST_HEAD(sbridge_edac_list);
-static DEFINE_MUTEX(sbridge_edac_lock);
-static int probed;
 
 /*
  * Alter this version for the module when modifications are made
@@ -362,16 +362,7 @@
 
 	/* Memory type detection */
 	bool			is_mirrored, is_lockstep, is_close_pg;
-
-	/* Fifo double buffers */
-	struct mce		mce_entry[MCE_LOG_LEN];
-	struct mce		mce_outentry[MCE_LOG_LEN];
-
-	/* Fifo in/out counters */
-	unsigned		mce_in, mce_out;
-
-	/* Count indicator to show errors not got */
-	unsigned		mce_overrun;
+	bool			is_chan_hash;
 
 	/* Memory description */
 	u64			tolm, tohm;
@@ -661,18 +652,6 @@
 	{0,}			/* 0 terminated list. */
 };
 
-/*
- *	pci_device_id	table for which devices we are looking for
- */
-static const struct pci_device_id sbridge_pci_tbl[] = {
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0)},
-	{0,}			/* 0 terminated list. */
-};
-
 
 /****************************************************************************
 			Ancillary status routines
@@ -1060,6 +1039,20 @@
 	return (pkg >> 2) & 0x1;
 }
 
+static int haswell_chan_hash(int idx, u64 addr)
+{
+	int i;
+
+	/*
+	 * XOR even bits from 12:26 to bit0 of idx,
+	 *     odd bits from 13:27 to bit1
+	 */
+	for (i = 12; i < 28; i += 2)
+		idx ^= (addr >> i) & 3;
+
+	return idx;
+}
+
 /****************************************************************************
 			Memory check routines
  ****************************************************************************/
@@ -1616,6 +1609,10 @@
 		KNL_MAX_CHANNELS : NUM_CHANNELS;
 	u64 knl_mc_sizes[KNL_MAX_CHANNELS];
 
+	if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
+		pci_read_config_dword(pvt->pci_ha0, HASWELL_HASYSDEFEATURE2, &reg);
+		pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
+	}
 	if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
 			pvt->info.type == KNIGHTS_LANDING)
 		pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
@@ -2118,12 +2115,15 @@
 	}
 
 	ch_way = TAD_CH(reg) + 1;
-	sck_way = 1 << TAD_SOCK(reg);
+	sck_way = TAD_SOCK(reg);
 
 	if (ch_way == 3)
 		idx = addr >> 6;
-	else
+	else {
 		idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
+		if (pvt->is_chan_hash)
+			idx = haswell_chan_hash(idx, addr);
+	}
 	idx = idx % ch_way;
 
 	/*
@@ -2157,7 +2157,7 @@
 		switch(ch_way) {
 		case 2:
 		case 4:
-			sck_xch = 1 << sck_way * (ch_way >> 1);
+			sck_xch = (1 << sck_way) * (ch_way >> 1);
 			break;
 		default:
 			sprintf(msg, "Invalid mirror set. Can't decode addr");
@@ -2193,7 +2193,7 @@
 
 	ch_addr = addr - offset;
 	ch_addr >>= (6 + shiftup);
-	ch_addr /= ch_way * sck_way;
+	ch_addr /= sck_xch;
 	ch_addr <<= (6 + shiftup);
 	ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
 
@@ -3075,63 +3075,8 @@
 }
 
 /*
- *	sbridge_check_error	Retrieve and process errors reported by the
- *				hardware. Called by the Core module.
- */
-static void sbridge_check_error(struct mem_ctl_info *mci)
-{
-	struct sbridge_pvt *pvt = mci->pvt_info;
-	int i;
-	unsigned count = 0;
-	struct mce *m;
-
-	/*
-	 * MCE first step: Copy all mce errors into a temporary buffer
-	 * We use a double buffering here, to reduce the risk of
-	 * loosing an error.
-	 */
-	smp_rmb();
-	count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
-		% MCE_LOG_LEN;
-	if (!count)
-		return;
-
-	m = pvt->mce_outentry;
-	if (pvt->mce_in + count > MCE_LOG_LEN) {
-		unsigned l = MCE_LOG_LEN - pvt->mce_in;
-
-		memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
-		smp_wmb();
-		pvt->mce_in = 0;
-		count -= l;
-		m += l;
-	}
-	memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
-	smp_wmb();
-	pvt->mce_in += count;
-
-	smp_rmb();
-	if (pvt->mce_overrun) {
-		sbridge_printk(KERN_ERR, "Lost %d memory errors\n",
-			      pvt->mce_overrun);
-		smp_wmb();
-		pvt->mce_overrun = 0;
-	}
-
-	/*
-	 * MCE second step: parse errors and display
-	 */
-	for (i = 0; i < count; i++)
-		sbridge_mce_output_error(mci, &pvt->mce_outentry[i]);
-}
-
-/*
- * sbridge_mce_check_error	Replicates mcelog routine to get errors
- *				This routine simply queues mcelog errors, and
- *				return. The error itself should be handled later
- *				by sbridge_check_error.
- * WARNING: As this routine should be called at NMI time, extra care should
- * be taken to avoid deadlocks, and to be as fast as possible.
+ * Check that logging is enabled and that this is the right type
+ * of error for us to handle.
  */
 static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
 				   void *data)
@@ -3146,7 +3091,7 @@
 
 	mci = get_mci_for_node_id(mce->socketid);
 	if (!mci)
-		return NOTIFY_BAD;
+		return NOTIFY_DONE;
 	pvt = mci->pvt_info;
 
 	/*
@@ -3176,21 +3121,7 @@
 			  "%u APIC %x\n", mce->cpuvendor, mce->cpuid,
 			  mce->time, mce->socketid, mce->apicid);
 
-	smp_rmb();
-	if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
-		smp_wmb();
-		pvt->mce_overrun++;
-		return NOTIFY_DONE;
-	}
-
-	/* Copy memory error at the ringbuffer */
-	memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
-	smp_wmb();
-	pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
-
-	/* Handle fatal errors immediately */
-	if (mce->mcgstatus & 1)
-		sbridge_check_error(mci);
+	sbridge_mce_output_error(mci, mce);
 
 	/* Advice mcelog that the error were handled */
 	return NOTIFY_STOP;
@@ -3276,9 +3207,6 @@
 	mci->dev_name = pci_name(pdev);
 	mci->ctl_page_to_phys = NULL;
 
-	/* Set the function pointer to an actual operation function */
-	mci->edac_check = sbridge_check_error;
-
 	pvt->info.type = type;
 	switch (type) {
 	case IVY_BRIDGE:
@@ -3426,62 +3354,40 @@
 	return rc;
 }
 
+#define ICPU(model, table) \
+	{ X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table }
+
+/* Order here must match "enum type" */
+static const struct x86_cpu_id sbridge_cpuids[] = {
+	ICPU(0x2d, pci_dev_descr_sbridge_table),	/* SANDY_BRIDGE */
+	ICPU(0x3e, pci_dev_descr_ibridge_table),	/* IVY_BRIDGE */
+	ICPU(0x3f, pci_dev_descr_haswell_table),	/* HASWELL */
+	ICPU(0x4f, pci_dev_descr_broadwell_table),	/* BROADWELL */
+	ICPU(0x57, pci_dev_descr_knl_table),		/* KNIGHTS_LANDING */
+	{ }
+};
+MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
+
 /*
- *	sbridge_probe	Probe for ONE instance of device to see if it is
+ *	sbridge_probe	Get all devices and register memory controllers
  *			present.
  *	return:
  *		0 for FOUND a device
  *		< 0 for error code
  */
 
-static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int sbridge_probe(const struct x86_cpu_id *id)
 {
 	int rc = -ENODEV;
 	u8 mc, num_mc = 0;
 	struct sbridge_dev *sbridge_dev;
-	enum type type = SANDY_BRIDGE;
+	struct pci_id_table *ptable = (struct pci_id_table *)id->driver_data;
 
 	/* get the pci devices we want to reserve for our use */
-	mutex_lock(&sbridge_edac_lock);
+	rc = sbridge_get_all_devices(&num_mc, ptable);
 
-	/*
-	 * All memory controllers are allocated at the first pass.
-	 */
-	if (unlikely(probed >= 1)) {
-		mutex_unlock(&sbridge_edac_lock);
-		return -ENODEV;
-	}
-	probed++;
-
-	switch (pdev->device) {
-	case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
-		rc = sbridge_get_all_devices(&num_mc,
-					pci_dev_descr_ibridge_table);
-		type = IVY_BRIDGE;
-		break;
-	case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
-		rc = sbridge_get_all_devices(&num_mc,
-					pci_dev_descr_sbridge_table);
-		type = SANDY_BRIDGE;
-		break;
-	case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
-		rc = sbridge_get_all_devices(&num_mc,
-					pci_dev_descr_haswell_table);
-		type = HASWELL;
-		break;
-	case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0:
-		rc = sbridge_get_all_devices(&num_mc,
-					pci_dev_descr_broadwell_table);
-		type = BROADWELL;
-	    break;
-	case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0:
-		rc = sbridge_get_all_devices_knl(&num_mc,
-					pci_dev_descr_knl_table);
-		type = KNIGHTS_LANDING;
-		break;
-	}
 	if (unlikely(rc < 0)) {
-		edac_dbg(0, "couldn't get all devices for 0x%x\n", pdev->device);
+		edac_dbg(0, "couldn't get all devices\n");
 		goto fail0;
 	}
 
@@ -3492,14 +3398,13 @@
 			 mc, mc + 1, num_mc);
 
 		sbridge_dev->mc = mc++;
-		rc = sbridge_register_mci(sbridge_dev, type);
+		rc = sbridge_register_mci(sbridge_dev, id - sbridge_cpuids);
 		if (unlikely(rc < 0))
 			goto fail1;
 	}
 
 	sbridge_printk(KERN_INFO, "%s\n", SBRIDGE_REVISION);
 
-	mutex_unlock(&sbridge_edac_lock);
 	return 0;
 
 fail1:
@@ -3508,74 +3413,47 @@
 
 	sbridge_put_all_devices();
 fail0:
-	mutex_unlock(&sbridge_edac_lock);
 	return rc;
 }
 
 /*
- *	sbridge_remove	destructor for one instance of device
+ *	sbridge_remove	cleanup
  *
  */
-static void sbridge_remove(struct pci_dev *pdev)
+static void sbridge_remove(void)
 {
 	struct sbridge_dev *sbridge_dev;
 
 	edac_dbg(0, "\n");
 
-	/*
-	 * we have a trouble here: pdev value for removal will be wrong, since
-	 * it will point to the X58 register used to detect that the machine
-	 * is a Nehalem or upper design. However, due to the way several PCI
-	 * devices are grouped together to provide MC functionality, we need
-	 * to use a different method for releasing the devices
-	 */
-
-	mutex_lock(&sbridge_edac_lock);
-
-	if (unlikely(!probed)) {
-		mutex_unlock(&sbridge_edac_lock);
-		return;
-	}
-
 	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
 		sbridge_unregister_mci(sbridge_dev);
 
 	/* Release PCI resources */
 	sbridge_put_all_devices();
-
-	probed--;
-
-	mutex_unlock(&sbridge_edac_lock);
 }
 
-MODULE_DEVICE_TABLE(pci, sbridge_pci_tbl);
-
-/*
- *	sbridge_driver	pci_driver structure for this module
- *
- */
-static struct pci_driver sbridge_driver = {
-	.name     = "sbridge_edac",
-	.probe    = sbridge_probe,
-	.remove   = sbridge_remove,
-	.id_table = sbridge_pci_tbl,
-};
-
 /*
  *	sbridge_init		Module entry function
  *			Try to initialize this module for its devices
  */
 static int __init sbridge_init(void)
 {
-	int pci_rc;
+	const struct x86_cpu_id *id;
+	int rc;
 
 	edac_dbg(2, "\n");
 
+	id = x86_match_cpu(sbridge_cpuids);
+	if (!id)
+		return -ENODEV;
+
 	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
 	opstate_init();
 
-	pci_rc = pci_register_driver(&sbridge_driver);
-	if (pci_rc >= 0) {
+	rc = sbridge_probe(id);
+
+	if (rc >= 0) {
 		mce_register_decode_chain(&sbridge_mce_dec);
 		if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
 			sbridge_printk(KERN_WARNING, "Loading driver, error reporting disabled.\n");
@@ -3583,9 +3461,9 @@
 	}
 
 	sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
-		      pci_rc);
+		      rc);
 
-	return pci_rc;
+	return rc;
 }
 
 /*
@@ -3595,7 +3473,7 @@
 static void __exit sbridge_exit(void)
 {
 	edac_dbg(2, "\n");
-	pci_unregister_driver(&sbridge_driver);
+	sbridge_remove();
 	mce_unregister_decode_chain(&sbridge_mce_dec);
 }
 
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 841a4b5..8b3226d 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -348,8 +348,7 @@
 						palmas_vbus_irq_handler,
 						IRQF_TRIGGER_FALLING |
 						IRQF_TRIGGER_RISING |
-						IRQF_ONESHOT |
-						IRQF_EARLY_RESUME,
+						IRQF_ONESHOT,
 						"palmas_usb_vbus",
 						palmas_usb);
 		if (status < 0) {
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index f4ea80d..309311b 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -1023,7 +1023,7 @@
 
 	spin_unlock_irqrestore(&dev->lock, flags);
 
-	dev->netdev->trans_start = jiffies;
+	netif_trans_update(dev->netdev);
  out:
 	if (free)
 		fwnet_free_ptask(ptask);
diff --git a/drivers/firmware/broadcom/Kconfig b/drivers/firmware/broadcom/Kconfig
index 6bed1199..3c7e5b7 100644
--- a/drivers/firmware/broadcom/Kconfig
+++ b/drivers/firmware/broadcom/Kconfig
@@ -9,3 +9,14 @@
 	  This driver provides an easy way to get value of requested parameter.
 	  It simply reads content of NVRAM and parses it. It doesn't control any
 	  hardware part itself.
+
+config BCM47XX_SPROM
+	bool "Broadcom SPROM driver"
+	depends on BCM47XX_NVRAM
+	help
+	  Broadcom devices store configuration data in SPROM. Accessing it is
+	  specific to the bus host type, e.g. PCI(e) devices have it mapped in
+	  a PCI BAR.
+	  In case of SoC devices SPROM content is stored on a flash used by
+	  bootloader firmware CFE. This driver provides method to ssb and bcma
+	  drivers to read SPROM on SoC.
diff --git a/drivers/firmware/broadcom/Makefile b/drivers/firmware/broadcom/Makefile
index d0e6835..f93efc4 100644
--- a/drivers/firmware/broadcom/Makefile
+++ b/drivers/firmware/broadcom/Makefile
@@ -1 +1,2 @@
 obj-$(CONFIG_BCM47XX_NVRAM)		+= bcm47xx_nvram.o
+obj-$(CONFIG_BCM47XX_SPROM)		+= bcm47xx_sprom.o
diff --git a/drivers/firmware/broadcom/bcm47xx_sprom.c b/drivers/firmware/broadcom/bcm47xx_sprom.c
new file mode 100644
index 0000000..b6eb875
--- /dev/null
+++ b/drivers/firmware/broadcom/bcm47xx_sprom.c
@@ -0,0 +1,737 @@
+/*
+ *  Copyright (C) 2004 Florian Schirmer <jolt@tuxbox.org>
+ *  Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
+ *  Copyright (C) 2006 Michael Buesch <m@bues.ch>
+ *  Copyright (C) 2010 Waldemar Brodkorb <wbx@openadk.org>
+ *  Copyright (C) 2010-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
+ *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
+ *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
+ *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *  You should have received a copy of the  GNU General Public License along
+ *  with this program; if not, write  to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/bcm47xx_nvram.h>
+#include <linux/bcma/bcma.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/ssb/ssb.h>
+
+static void create_key(const char *prefix, const char *postfix,
+		       const char *name, char *buf, int len)
+{
+	if (prefix && postfix)
+		snprintf(buf, len, "%s%s%s", prefix, name, postfix);
+	else if (prefix)
+		snprintf(buf, len, "%s%s", prefix, name);
+	else if (postfix)
+		snprintf(buf, len, "%s%s", name, postfix);
+	else
+		snprintf(buf, len, "%s", name);
+}
+
+static int get_nvram_var(const char *prefix, const char *postfix,
+			 const char *name, char *buf, int len, bool fallback)
+{
+	char key[40];
+	int err;
+
+	create_key(prefix, postfix, name, key, sizeof(key));
+
+	err = bcm47xx_nvram_getenv(key, buf, len);
+	if (fallback && err == -ENOENT && prefix) {
+		create_key(NULL, postfix, name, key, sizeof(key));
+		err = bcm47xx_nvram_getenv(key, buf, len);
+	}
+	return err;
+}
+
+#define NVRAM_READ_VAL(type)						\
+static void nvram_read_ ## type(const char *prefix,			\
+				const char *postfix, const char *name,	\
+				type *val, type allset, bool fallback)	\
+{									\
+	char buf[100];							\
+	int err;							\
+	type var;							\
+									\
+	err = get_nvram_var(prefix, postfix, name, buf, sizeof(buf),	\
+			    fallback);					\
+	if (err < 0)							\
+		return;							\
+	err = kstrto ## type(strim(buf), 0, &var);			\
+	if (err) {							\
+		pr_warn("can not parse nvram name %s%s%s with value %s got %i\n",	\
+			prefix, name, postfix, buf, err);		\
+		return;							\
+	}								\
+	if (allset && var == allset)					\
+		return;							\
+	*val = var;							\
+}
+
+NVRAM_READ_VAL(u8)
+NVRAM_READ_VAL(s8)
+NVRAM_READ_VAL(u16)
+NVRAM_READ_VAL(u32)
+
+#undef NVRAM_READ_VAL
+
+static void nvram_read_u32_2(const char *prefix, const char *name,
+			     u16 *val_lo, u16 *val_hi, bool fallback)
+{
+	char buf[100];
+	int err;
+	u32 val;
+
+	err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
+	if (err < 0)
+		return;
+	err = kstrtou32(strim(buf), 0, &val);
+	if (err) {
+		pr_warn("can not parse nvram name %s%s with value %s got %i\n",
+			prefix, name, buf, err);
+		return;
+	}
+	*val_lo = (val & 0x0000FFFFU);
+	*val_hi = (val & 0xFFFF0000U) >> 16;
+}
+
+static void nvram_read_leddc(const char *prefix, const char *name,
+			     u8 *leddc_on_time, u8 *leddc_off_time,
+			     bool fallback)
+{
+	char buf[100];
+	int err;
+	u32 val;
+
+	err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
+	if (err < 0)
+		return;
+	err = kstrtou32(strim(buf), 0, &val);
+	if (err) {
+		pr_warn("can not parse nvram name %s%s with value %s got %i\n",
+			prefix, name, buf, err);
+		return;
+	}
+
+	if (val == 0xffff || val == 0xffffffff)
+		return;
+
+	*leddc_on_time = val & 0xff;
+	*leddc_off_time = (val >> 16) & 0xff;
+}
+
+static void bcm47xx_nvram_parse_macaddr(char *buf, u8 macaddr[6])
+{
+	if (strchr(buf, ':'))
+		sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0],
+			&macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4],
+			&macaddr[5]);
+	else if (strchr(buf, '-'))
+		sscanf(buf, "%hhx-%hhx-%hhx-%hhx-%hhx-%hhx", &macaddr[0],
+			&macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4],
+			&macaddr[5]);
+	else
+		pr_warn("Can not parse mac address: %s\n", buf);
+}
+
+static void nvram_read_macaddr(const char *prefix, const char *name,
+			       u8 val[6], bool fallback)
+{
+	char buf[100];
+	int err;
+
+	err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
+	if (err < 0)
+		return;
+
+	bcm47xx_nvram_parse_macaddr(buf, val);
+}
+
+static void nvram_read_alpha2(const char *prefix, const char *name,
+			     char val[2], bool fallback)
+{
+	char buf[10];
+	int err;
+
+	err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
+	if (err < 0)
+		return;
+	if (buf[0] == '0')
+		return;
+	if (strlen(buf) > 2) {
+		pr_warn("alpha2 is too long %s\n", buf);
+		return;
+	}
+	memcpy(val, buf, 2);
+}
+
+/* This is one-function-only macro, it uses local "sprom" variable! */
+#define ENTRY(_revmask, _type, _prefix, _name, _val, _allset, _fallback) \
+	if (_revmask & BIT(sprom->revision)) \
+		nvram_read_ ## _type(_prefix, NULL, _name, &sprom->_val, \
+				     _allset, _fallback)
+/*
+ * Special version of filling function that can be safely called for any SPROM
+ * revision. For every NVRAM to SPROM mapping it contains bitmask of revisions
+ * for which the mapping is valid.
+ * It obviously requires some hexadecimal/bitmasks knowledge, but allows
+ * writing cleaner code (easy revisions handling).
+ * Note that while SPROM revision 0 was never used, we still keep BIT(0)
+ * reserved for it, just to keep numbering sane.
+ */
+static void bcm47xx_sprom_fill_auto(struct ssb_sprom *sprom,
+				    const char *prefix, bool fallback)
+{
+	const char *pre = prefix;
+	bool fb = fallback;
+
+	/* Broadcom extracts it for rev 8+ but it was found on 2 and 4 too */
+	ENTRY(0xfffffffe, u16, pre, "devid", dev_id, 0, fallback);
+
+	ENTRY(0xfffffffe, u16, pre, "boardrev", board_rev, 0, true);
+	ENTRY(0xfffffffe, u32, pre, "boardflags", boardflags, 0, fb);
+	ENTRY(0xfffffff0, u32, pre, "boardflags2", boardflags2, 0, fb);
+	ENTRY(0xfffff800, u32, pre, "boardflags3", boardflags3, 0, fb);
+	ENTRY(0x00000002, u16, pre, "boardflags", boardflags_lo, 0, fb);
+	ENTRY(0xfffffffc, u16, pre, "boardtype", board_type, 0, true);
+	ENTRY(0xfffffffe, u16, pre, "boardnum", board_num, 0, fb);
+	ENTRY(0x00000002, u8, pre, "cc", country_code, 0, fb);
+	ENTRY(0xfffffff8, u8, pre, "regrev", regrev, 0, fb);
+
+	ENTRY(0xfffffffe, u8, pre, "ledbh0", gpio0, 0xff, fb);
+	ENTRY(0xfffffffe, u8, pre, "ledbh1", gpio1, 0xff, fb);
+	ENTRY(0xfffffffe, u8, pre, "ledbh2", gpio2, 0xff, fb);
+	ENTRY(0xfffffffe, u8, pre, "ledbh3", gpio3, 0xff, fb);
+
+	ENTRY(0x0000070e, u16, pre, "pa0b0", pa0b0, 0, fb);
+	ENTRY(0x0000070e, u16, pre, "pa0b1", pa0b1, 0, fb);
+	ENTRY(0x0000070e, u16, pre, "pa0b2", pa0b2, 0, fb);
+	ENTRY(0x0000070e, u8, pre, "pa0itssit", itssi_bg, 0, fb);
+	ENTRY(0x0000070e, u8, pre, "pa0maxpwr", maxpwr_bg, 0, fb);
+
+	ENTRY(0x0000070c, u8, pre, "opo", opo, 0, fb);
+	ENTRY(0xfffffffe, u8, pre, "aa2g", ant_available_bg, 0, fb);
+	ENTRY(0xfffffffe, u8, pre, "aa5g", ant_available_a, 0, fb);
+	ENTRY(0x000007fe, s8, pre, "ag0", antenna_gain.a0, 0, fb);
+	ENTRY(0x000007fe, s8, pre, "ag1", antenna_gain.a1, 0, fb);
+	ENTRY(0x000007f0, s8, pre, "ag2", antenna_gain.a2, 0, fb);
+	ENTRY(0x000007f0, s8, pre, "ag3", antenna_gain.a3, 0, fb);
+
+	ENTRY(0x0000070e, u16, pre, "pa1b0", pa1b0, 0, fb);
+	ENTRY(0x0000070e, u16, pre, "pa1b1", pa1b1, 0, fb);
+	ENTRY(0x0000070e, u16, pre, "pa1b2", pa1b2, 0, fb);
+	ENTRY(0x0000070c, u16, pre, "pa1lob0", pa1lob0, 0, fb);
+	ENTRY(0x0000070c, u16, pre, "pa1lob1", pa1lob1, 0, fb);
+	ENTRY(0x0000070c, u16, pre, "pa1lob2", pa1lob2, 0, fb);
+	ENTRY(0x0000070c, u16, pre, "pa1hib0", pa1hib0, 0, fb);
+	ENTRY(0x0000070c, u16, pre, "pa1hib1", pa1hib1, 0, fb);
+	ENTRY(0x0000070c, u16, pre, "pa1hib2", pa1hib2, 0, fb);
+	ENTRY(0x0000070e, u8, pre, "pa1itssit", itssi_a, 0, fb);
+	ENTRY(0x0000070e, u8, pre, "pa1maxpwr", maxpwr_a, 0, fb);
+	ENTRY(0x0000070c, u8, pre, "pa1lomaxpwr", maxpwr_al, 0, fb);
+	ENTRY(0x0000070c, u8, pre, "pa1himaxpwr", maxpwr_ah, 0, fb);
+
+	ENTRY(0x00000708, u8, pre, "bxa2g", bxa2g, 0, fb);
+	ENTRY(0x00000708, u8, pre, "rssisav2g", rssisav2g, 0, fb);
+	ENTRY(0x00000708, u8, pre, "rssismc2g", rssismc2g, 0, fb);
+	ENTRY(0x00000708, u8, pre, "rssismf2g", rssismf2g, 0, fb);
+	ENTRY(0x00000708, u8, pre, "bxa5g", bxa5g, 0, fb);
+	ENTRY(0x00000708, u8, pre, "rssisav5g", rssisav5g, 0, fb);
+	ENTRY(0x00000708, u8, pre, "rssismc5g", rssismc5g, 0, fb);
+	ENTRY(0x00000708, u8, pre, "rssismf5g", rssismf5g, 0, fb);
+	ENTRY(0x00000708, u8, pre, "tri2g", tri2g, 0, fb);
+	ENTRY(0x00000708, u8, pre, "tri5g", tri5g, 0, fb);
+	ENTRY(0x00000708, u8, pre, "tri5gl", tri5gl, 0, fb);
+	ENTRY(0x00000708, u8, pre, "tri5gh", tri5gh, 0, fb);
+	ENTRY(0x00000708, s8, pre, "rxpo2g", rxpo2g, 0, fb);
+	ENTRY(0x00000708, s8, pre, "rxpo5g", rxpo5g, 0, fb);
+	ENTRY(0xfffffff0, u8, pre, "txchain", txchain, 0xf, fb);
+	ENTRY(0xfffffff0, u8, pre, "rxchain", rxchain, 0xf, fb);
+	ENTRY(0xfffffff0, u8, pre, "antswitch", antswitch, 0xff, fb);
+	ENTRY(0x00000700, u8, pre, "tssipos2g", fem.ghz2.tssipos, 0, fb);
+	ENTRY(0x00000700, u8, pre, "extpagain2g", fem.ghz2.extpa_gain, 0, fb);
+	ENTRY(0x00000700, u8, pre, "pdetrange2g", fem.ghz2.pdet_range, 0, fb);
+	ENTRY(0x00000700, u8, pre, "triso2g", fem.ghz2.tr_iso, 0, fb);
+	ENTRY(0x00000700, u8, pre, "antswctl2g", fem.ghz2.antswlut, 0, fb);
+	ENTRY(0x00000700, u8, pre, "tssipos5g", fem.ghz5.tssipos, 0, fb);
+	ENTRY(0x00000700, u8, pre, "extpagain5g", fem.ghz5.extpa_gain, 0, fb);
+	ENTRY(0x00000700, u8, pre, "pdetrange5g", fem.ghz5.pdet_range, 0, fb);
+	ENTRY(0x00000700, u8, pre, "triso5g", fem.ghz5.tr_iso, 0, fb);
+	ENTRY(0x00000700, u8, pre, "antswctl5g", fem.ghz5.antswlut, 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid2ga0", txpid2g[0], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid2ga1", txpid2g[1], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid2ga2", txpid2g[2], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid2ga3", txpid2g[3], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid5ga0", txpid5g[0], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid5ga1", txpid5g[1], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid5ga2", txpid5g[2], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid5ga3", txpid5g[3], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid5gla0", txpid5gl[0], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid5gla1", txpid5gl[1], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid5gla2", txpid5gl[2], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid5gla3", txpid5gl[3], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid5gha0", txpid5gh[0], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid5gha1", txpid5gh[1], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid5gha2", txpid5gh[2], 0, fb);
+	ENTRY(0x000000f0, u8, pre, "txpid5gha3", txpid5gh[3], 0, fb);
+
+	ENTRY(0xffffff00, u8, pre, "tempthresh", tempthresh, 0, fb);
+	ENTRY(0xffffff00, u8, pre, "tempoffset", tempoffset, 0, fb);
+	ENTRY(0xffffff00, u16, pre, "rawtempsense", rawtempsense, 0, fb);
+	ENTRY(0xffffff00, u8, pre, "measpower", measpower, 0, fb);
+	ENTRY(0xffffff00, u8, pre, "tempsense_slope", tempsense_slope, 0, fb);
+	ENTRY(0xffffff00, u8, pre, "tempcorrx", tempcorrx, 0, fb);
+	ENTRY(0xffffff00, u8, pre, "tempsense_option", tempsense_option, 0, fb);
+	ENTRY(0x00000700, u8, pre, "freqoffset_corr", freqoffset_corr, 0, fb);
+	ENTRY(0x00000700, u8, pre, "iqcal_swp_dis", iqcal_swp_dis, 0, fb);
+	ENTRY(0x00000700, u8, pre, "hw_iqcal_en", hw_iqcal_en, 0, fb);
+	ENTRY(0x00000700, u8, pre, "elna2g", elna2g, 0, fb);
+	ENTRY(0x00000700, u8, pre, "elna5g", elna5g, 0, fb);
+	ENTRY(0xffffff00, u8, pre, "phycal_tempdelta", phycal_tempdelta, 0, fb);
+	ENTRY(0xffffff00, u8, pre, "temps_period", temps_period, 0, fb);
+	ENTRY(0xffffff00, u8, pre, "temps_hysteresis", temps_hysteresis, 0, fb);
+	ENTRY(0xffffff00, u8, pre, "measpower1", measpower1, 0, fb);
+	ENTRY(0xffffff00, u8, pre, "measpower2", measpower2, 0, fb);
+
+	ENTRY(0x000001f0, u16, pre, "cck2gpo", cck2gpo, 0, fb);
+	ENTRY(0x000001f0, u32, pre, "ofdm2gpo", ofdm2gpo, 0, fb);
+	ENTRY(0x000001f0, u32, pre, "ofdm5gpo", ofdm5gpo, 0, fb);
+	ENTRY(0x000001f0, u32, pre, "ofdm5glpo", ofdm5glpo, 0, fb);
+	ENTRY(0x000001f0, u32, pre, "ofdm5ghpo", ofdm5ghpo, 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs2gpo0", mcs2gpo[0], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs2gpo1", mcs2gpo[1], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs2gpo2", mcs2gpo[2], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs2gpo3", mcs2gpo[3], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs2gpo4", mcs2gpo[4], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs2gpo5", mcs2gpo[5], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs2gpo6", mcs2gpo[6], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs2gpo7", mcs2gpo[7], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5gpo0", mcs5gpo[0], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5gpo1", mcs5gpo[1], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5gpo2", mcs5gpo[2], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5gpo3", mcs5gpo[3], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5gpo4", mcs5gpo[4], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5gpo5", mcs5gpo[5], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5gpo6", mcs5gpo[6], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5gpo7", mcs5gpo[7], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5glpo0", mcs5glpo[0], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5glpo1", mcs5glpo[1], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5glpo2", mcs5glpo[2], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5glpo3", mcs5glpo[3], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5glpo4", mcs5glpo[4], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5glpo5", mcs5glpo[5], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5glpo6", mcs5glpo[6], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5glpo7", mcs5glpo[7], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5ghpo0", mcs5ghpo[0], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5ghpo1", mcs5ghpo[1], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5ghpo2", mcs5ghpo[2], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5ghpo3", mcs5ghpo[3], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5ghpo4", mcs5ghpo[4], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5ghpo5", mcs5ghpo[5], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5ghpo6", mcs5ghpo[6], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "mcs5ghpo7", mcs5ghpo[7], 0, fb);
+	ENTRY(0x000001f0, u16, pre, "cddpo", cddpo, 0, fb);
+	ENTRY(0x000001f0, u16, pre, "stbcpo", stbcpo, 0, fb);
+	ENTRY(0x000001f0, u16, pre, "bw40po", bw40po, 0, fb);
+	ENTRY(0x000001f0, u16, pre, "bwduppo", bwduppo, 0, fb);
+
+	ENTRY(0xfffffe00, u16, pre, "cckbw202gpo", cckbw202gpo, 0, fb);
+	ENTRY(0xfffffe00, u16, pre, "cckbw20ul2gpo", cckbw20ul2gpo, 0, fb);
+	ENTRY(0x00000600, u32, pre, "legofdmbw202gpo", legofdmbw202gpo, 0, fb);
+	ENTRY(0x00000600, u32, pre, "legofdmbw20ul2gpo", legofdmbw20ul2gpo, 0, fb);
+	ENTRY(0x00000600, u32, pre, "legofdmbw205glpo", legofdmbw205glpo, 0, fb);
+	ENTRY(0x00000600, u32, pre, "legofdmbw20ul5glpo", legofdmbw20ul5glpo, 0, fb);
+	ENTRY(0x00000600, u32, pre, "legofdmbw205gmpo", legofdmbw205gmpo, 0, fb);
+	ENTRY(0x00000600, u32, pre, "legofdmbw20ul5gmpo", legofdmbw20ul5gmpo, 0, fb);
+	ENTRY(0x00000600, u32, pre, "legofdmbw205ghpo", legofdmbw205ghpo, 0, fb);
+	ENTRY(0x00000600, u32, pre, "legofdmbw20ul5ghpo", legofdmbw20ul5ghpo, 0, fb);
+	ENTRY(0xfffffe00, u32, pre, "mcsbw202gpo", mcsbw202gpo, 0, fb);
+	ENTRY(0x00000600, u32, pre, "mcsbw20ul2gpo", mcsbw20ul2gpo, 0, fb);
+	ENTRY(0xfffffe00, u32, pre, "mcsbw402gpo", mcsbw402gpo, 0, fb);
+	ENTRY(0xfffffe00, u32, pre, "mcsbw205glpo", mcsbw205glpo, 0, fb);
+	ENTRY(0x00000600, u32, pre, "mcsbw20ul5glpo", mcsbw20ul5glpo, 0, fb);
+	ENTRY(0xfffffe00, u32, pre, "mcsbw405glpo", mcsbw405glpo, 0, fb);
+	ENTRY(0xfffffe00, u32, pre, "mcsbw205gmpo", mcsbw205gmpo, 0, fb);
+	ENTRY(0x00000600, u32, pre, "mcsbw20ul5gmpo", mcsbw20ul5gmpo, 0, fb);
+	ENTRY(0xfffffe00, u32, pre, "mcsbw405gmpo", mcsbw405gmpo, 0, fb);
+	ENTRY(0xfffffe00, u32, pre, "mcsbw205ghpo", mcsbw205ghpo, 0, fb);
+	ENTRY(0x00000600, u32, pre, "mcsbw20ul5ghpo", mcsbw20ul5ghpo, 0, fb);
+	ENTRY(0xfffffe00, u32, pre, "mcsbw405ghpo", mcsbw405ghpo, 0, fb);
+	ENTRY(0x00000600, u16, pre, "mcs32po", mcs32po, 0, fb);
+	ENTRY(0x00000600, u16, pre, "legofdm40duppo", legofdm40duppo, 0, fb);
+	ENTRY(0x00000700, u8, pre, "pcieingress_war", pcieingress_war, 0, fb);
+
+	/* TODO: rev 11 support */
+	ENTRY(0x00000700, u8, pre, "rxgainerr2ga0", rxgainerr2ga[0], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr2ga1", rxgainerr2ga[1], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr2ga2", rxgainerr2ga[2], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr5gla0", rxgainerr5gla[0], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr5gla1", rxgainerr5gla[1], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr5gla2", rxgainerr5gla[2], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr5gma0", rxgainerr5gma[0], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr5gma1", rxgainerr5gma[1], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr5gma2", rxgainerr5gma[2], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr5gha0", rxgainerr5gha[0], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr5gha1", rxgainerr5gha[1], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr5gha2", rxgainerr5gha[2], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr5gua0", rxgainerr5gua[0], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr5gua1", rxgainerr5gua[1], 0, fb);
+	ENTRY(0x00000700, u8, pre, "rxgainerr5gua2", rxgainerr5gua[2], 0, fb);
+
+	ENTRY(0xfffffe00, u8, pre, "sar2g", sar2g, 0, fb);
+	ENTRY(0xfffffe00, u8, pre, "sar5g", sar5g, 0, fb);
+
+	/* TODO: rev 11 support */
+	ENTRY(0x00000700, u8, pre, "noiselvl2ga0", noiselvl2ga[0], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl2ga1", noiselvl2ga[1], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl2ga2", noiselvl2ga[2], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl5gla0", noiselvl5gla[0], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl5gla1", noiselvl5gla[1], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl5gla2", noiselvl5gla[2], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl5gma0", noiselvl5gma[0], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl5gma1", noiselvl5gma[1], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl5gma2", noiselvl5gma[2], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl5gha0", noiselvl5gha[0], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl5gha1", noiselvl5gha[1], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl5gha2", noiselvl5gha[2], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl5gua0", noiselvl5gua[0], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl5gua1", noiselvl5gua[1], 0, fb);
+	ENTRY(0x00000700, u8, pre, "noiselvl5gua2", noiselvl5gua[2], 0, fb);
+}
+#undef ENTRY /* It's specififc, uses local variable, don't use it (again). */
+
+static void bcm47xx_fill_sprom_path_r4589(struct ssb_sprom *sprom,
+					  const char *prefix, bool fallback)
+{
+	char postfix[2];
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(sprom->core_pwr_info); i++) {
+		struct ssb_sprom_core_pwr_info *pwr_info;
+
+		pwr_info = &sprom->core_pwr_info[i];
+
+		snprintf(postfix, sizeof(postfix), "%i", i);
+		nvram_read_u8(prefix, postfix, "maxp2ga",
+			      &pwr_info->maxpwr_2g, 0, fallback);
+		nvram_read_u8(prefix, postfix, "itt2ga",
+			      &pwr_info->itssi_2g, 0, fallback);
+		nvram_read_u8(prefix, postfix, "itt5ga",
+			      &pwr_info->itssi_5g, 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa2gw0a",
+			       &pwr_info->pa_2g[0], 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa2gw1a",
+			       &pwr_info->pa_2g[1], 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa2gw2a",
+			       &pwr_info->pa_2g[2], 0, fallback);
+		nvram_read_u8(prefix, postfix, "maxp5ga",
+			      &pwr_info->maxpwr_5g, 0, fallback);
+		nvram_read_u8(prefix, postfix, "maxp5gha",
+			      &pwr_info->maxpwr_5gh, 0, fallback);
+		nvram_read_u8(prefix, postfix, "maxp5gla",
+			      &pwr_info->maxpwr_5gl, 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa5gw0a",
+			       &pwr_info->pa_5g[0], 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa5gw1a",
+			       &pwr_info->pa_5g[1], 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa5gw2a",
+			       &pwr_info->pa_5g[2], 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa5glw0a",
+			       &pwr_info->pa_5gl[0], 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa5glw1a",
+			       &pwr_info->pa_5gl[1], 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa5glw2a",
+			       &pwr_info->pa_5gl[2], 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa5ghw0a",
+			       &pwr_info->pa_5gh[0], 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa5ghw1a",
+			       &pwr_info->pa_5gh[1], 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa5ghw2a",
+			       &pwr_info->pa_5gh[2], 0, fallback);
+	}
+}
+
+static void bcm47xx_fill_sprom_path_r45(struct ssb_sprom *sprom,
+					const char *prefix, bool fallback)
+{
+	char postfix[2];
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(sprom->core_pwr_info); i++) {
+		struct ssb_sprom_core_pwr_info *pwr_info;
+
+		pwr_info = &sprom->core_pwr_info[i];
+
+		snprintf(postfix, sizeof(postfix), "%i", i);
+		nvram_read_u16(prefix, postfix, "pa2gw3a",
+			       &pwr_info->pa_2g[3], 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa5gw3a",
+			       &pwr_info->pa_5g[3], 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa5glw3a",
+			       &pwr_info->pa_5gl[3], 0, fallback);
+		nvram_read_u16(prefix, postfix, "pa5ghw3a",
+			       &pwr_info->pa_5gh[3], 0, fallback);
+	}
+}
+
+static bool bcm47xx_is_valid_mac(u8 *mac)
+{
+	return mac && !(mac[0] == 0x00 && mac[1] == 0x90 && mac[2] == 0x4c);
+}
+
+static int bcm47xx_increase_mac_addr(u8 *mac, u8 num)
+{
+	u8 *oui = mac + ETH_ALEN/2 - 1;
+	u8 *p = mac + ETH_ALEN - 1;
+
+	do {
+		(*p) += num;
+		if (*p > num)
+			break;
+		p--;
+		num = 1;
+	} while (p != oui);
+
+	if (p == oui) {
+		pr_err("unable to fetch mac address\n");
+		return -ENOENT;
+	}
+	return 0;
+}
+
+static int mac_addr_used = 2;
+
+static void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom,
+					const char *prefix, bool fallback)
+{
+	bool fb = fallback;
+
+	nvram_read_macaddr(prefix, "et0macaddr", sprom->et0mac, fallback);
+	nvram_read_u8(prefix, NULL, "et0mdcport", &sprom->et0mdcport, 0,
+		      fallback);
+	nvram_read_u8(prefix, NULL, "et0phyaddr", &sprom->et0phyaddr, 0,
+		      fallback);
+
+	nvram_read_macaddr(prefix, "et1macaddr", sprom->et1mac, fallback);
+	nvram_read_u8(prefix, NULL, "et1mdcport", &sprom->et1mdcport, 0,
+		      fallback);
+	nvram_read_u8(prefix, NULL, "et1phyaddr", &sprom->et1phyaddr, 0,
+		      fallback);
+
+	nvram_read_macaddr(prefix, "et2macaddr", sprom->et2mac, fb);
+	nvram_read_u8(prefix, NULL, "et2mdcport", &sprom->et2mdcport, 0, fb);
+	nvram_read_u8(prefix, NULL, "et2phyaddr", &sprom->et2phyaddr, 0, fb);
+
+	nvram_read_macaddr(prefix, "macaddr", sprom->il0mac, fallback);
+	nvram_read_macaddr(prefix, "il0macaddr", sprom->il0mac, fallback);
+
+	/* The address prefix 00:90:4C is used by Broadcom in their initial
+	 * configuration. When a mac address with the prefix 00:90:4C is used
+	 * all devices from the same series are sharing the same mac address.
+	 * To prevent mac address collisions we replace them with a mac address
+	 * based on the base address.
+	 */
+	if (!bcm47xx_is_valid_mac(sprom->il0mac)) {
+		u8 mac[6];
+
+		nvram_read_macaddr(NULL, "et0macaddr", mac, false);
+		if (bcm47xx_is_valid_mac(mac)) {
+			int err = bcm47xx_increase_mac_addr(mac, mac_addr_used);
+
+			if (!err) {
+				ether_addr_copy(sprom->il0mac, mac);
+				mac_addr_used++;
+			}
+		}
+	}
+}
+
+static void bcm47xx_fill_board_data(struct ssb_sprom *sprom, const char *prefix,
+				    bool fallback)
+{
+	nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo,
+			 &sprom->boardflags_hi, fallback);
+	nvram_read_u32_2(prefix, "boardflags2", &sprom->boardflags2_lo,
+			 &sprom->boardflags2_hi, fallback);
+}
+
+void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix,
+			bool fallback)
+{
+	bcm47xx_fill_sprom_ethernet(sprom, prefix, fallback);
+	bcm47xx_fill_board_data(sprom, prefix, fallback);
+
+	nvram_read_u8(prefix, NULL, "sromrev", &sprom->revision, 0, fallback);
+
+	/* Entries requiring custom functions */
+	nvram_read_alpha2(prefix, "ccode", sprom->alpha2, fallback);
+	if (sprom->revision >= 3)
+		nvram_read_leddc(prefix, "leddc", &sprom->leddc_on_time,
+				 &sprom->leddc_off_time, fallback);
+
+	switch (sprom->revision) {
+	case 4:
+	case 5:
+		bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback);
+		bcm47xx_fill_sprom_path_r45(sprom, prefix, fallback);
+		break;
+	case 8:
+	case 9:
+		bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback);
+		break;
+	}
+
+	bcm47xx_sprom_fill_auto(sprom, prefix, fallback);
+}
+
+#if IS_BUILTIN(CONFIG_SSB) && IS_ENABLED(CONFIG_SSB_SPROM)
+static int bcm47xx_get_sprom_ssb(struct ssb_bus *bus, struct ssb_sprom *out)
+{
+	char prefix[10];
+
+	switch (bus->bustype) {
+	case SSB_BUSTYPE_SSB:
+		bcm47xx_fill_sprom(out, NULL, false);
+		return 0;
+	case SSB_BUSTYPE_PCI:
+		memset(out, 0, sizeof(struct ssb_sprom));
+		snprintf(prefix, sizeof(prefix), "pci/%u/%u/",
+			 bus->host_pci->bus->number + 1,
+			 PCI_SLOT(bus->host_pci->devfn));
+		bcm47xx_fill_sprom(out, prefix, false);
+		return 0;
+	default:
+		pr_warn("Unable to fill SPROM for given bustype.\n");
+		return -EINVAL;
+	}
+}
+#endif
+
+#if IS_BUILTIN(CONFIG_BCMA)
+/*
+ * Having many NVRAM entries for PCI devices led to repeating prefixes like
+ * pci/1/1/ all the time and wasting flash space. So at some point Broadcom
+ * decided to introduce prefixes like 0: 1: 2: etc.
+ * If we find e.g. devpath0=pci/2/1 or devpath0=pci/2/1/ we should use 0:
+ * instead of pci/2/1/.
+ */
+static void bcm47xx_sprom_apply_prefix_alias(char *prefix, size_t prefix_size)
+{
+	size_t prefix_len = strlen(prefix);
+	size_t short_len = prefix_len - 1;
+	char nvram_var[10];
+	char buf[20];
+	int i;
+
+	/* Passed prefix has to end with a slash */
+	if (prefix_len <= 0 || prefix[prefix_len - 1] != '/')
+		return;
+
+	for (i = 0; i < 3; i++) {
+		if (snprintf(nvram_var, sizeof(nvram_var), "devpath%d", i) <= 0)
+			continue;
+		if (bcm47xx_nvram_getenv(nvram_var, buf, sizeof(buf)) < 0)
+			continue;
+		if (!strcmp(buf, prefix) ||
+		    (short_len && strlen(buf) == short_len && !strncmp(buf, prefix, short_len))) {
+			snprintf(prefix, prefix_size, "%d:", i);
+			return;
+		}
+	}
+}
+
+static int bcm47xx_get_sprom_bcma(struct bcma_bus *bus, struct ssb_sprom *out)
+{
+	struct bcma_boardinfo *binfo = &bus->boardinfo;
+	struct bcma_device *core;
+	char buf[10];
+	char *prefix;
+	bool fallback = false;
+
+	switch (bus->hosttype) {
+	case BCMA_HOSTTYPE_PCI:
+		memset(out, 0, sizeof(struct ssb_sprom));
+		/* On BCM47XX all PCI buses share the same domain */
+		if (config_enabled(CONFIG_BCM47XX))
+			snprintf(buf, sizeof(buf), "pci/%u/%u/",
+				 bus->host_pci->bus->number + 1,
+				 PCI_SLOT(bus->host_pci->devfn));
+		else
+			snprintf(buf, sizeof(buf), "pci/%u/%u/",
+				 pci_domain_nr(bus->host_pci->bus) + 1,
+				 bus->host_pci->bus->number);
+		bcm47xx_sprom_apply_prefix_alias(buf, sizeof(buf));
+		prefix = buf;
+		break;
+	case BCMA_HOSTTYPE_SOC:
+		memset(out, 0, sizeof(struct ssb_sprom));
+		core = bcma_find_core(bus, BCMA_CORE_80211);
+		if (core) {
+			snprintf(buf, sizeof(buf), "sb/%u/",
+				 core->core_index);
+			prefix = buf;
+			fallback = true;
+		} else {
+			prefix = NULL;
+		}
+		break;
+	default:
+		pr_warn("Unable to fill SPROM for given bustype.\n");
+		return -EINVAL;
+	}
+
+	nvram_read_u16(prefix, NULL, "boardvendor", &binfo->vendor, 0, true);
+	if (!binfo->vendor)
+		binfo->vendor = SSB_BOARDVENDOR_BCM;
+	nvram_read_u16(prefix, NULL, "boardtype", &binfo->type, 0, true);
+
+	bcm47xx_fill_sprom(out, prefix, fallback);
+
+	return 0;
+}
+#endif
+
+static unsigned int bcm47xx_sprom_registered;
+
+/*
+ * On bcm47xx we need to register SPROM fallback handler very early, so we can't
+ * use anything like platform device / driver for this.
+ */
+int bcm47xx_sprom_register_fallbacks(void)
+{
+	if (bcm47xx_sprom_registered)
+		return 0;
+
+#if IS_BUILTIN(CONFIG_SSB) && IS_ENABLED(CONFIG_SSB_SPROM)
+	if (ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom_ssb))
+		pr_warn("Failed to register ssb SPROM handler\n");
+#endif
+
+#if IS_BUILTIN(CONFIG_BCMA)
+	if (bcma_arch_register_fallback_sprom(&bcm47xx_get_sprom_bcma))
+		pr_warn("Failed to register bcma SPROM handler\n");
+#endif
+
+	bcm47xx_sprom_registered = 1;
+
+	return 0;
+}
+
+fs_initcall(bcm47xx_sprom_register_fallbacks);
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index e1670d5..6394152 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -87,6 +87,31 @@
 config EFI_ARMSTUB
 	bool
 
+config EFI_BOOTLOADER_CONTROL
+	tristate "EFI Bootloader Control"
+	depends on EFI_VARS
+	default n
+	---help---
+	  This module installs a reboot hook, such that if reboot() is
+	  invoked with a string argument NNN, "NNN" is copied to the
+	  "LoaderEntryOneShot" EFI variable, to be read by the
+	  bootloader. If the string matches one of the boot labels
+	  defined in its configuration, the bootloader will boot once
+	  to that label. The "LoaderEntryRebootReason" EFI variable is
+	  set with the reboot reason: "reboot" or "shutdown". The
+	  bootloader reads this reboot reason and takes particular
+	  action according to its policy.
+
+config EFI_CAPSULE_LOADER
+	tristate "EFI capsule loader"
+	depends on EFI
+	help
+	  This option exposes a loader interface "/dev/efi_capsule_loader" for
+	  users to load EFI capsules. This driver requires working runtime
+	  capsule support in the firmware, which many OEMs do not provide.
+
+	  Most users should say N.
+
 endmenu
 
 config UEFI_CPER
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 62e654f..a219640 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -9,7 +9,8 @@
 #
 KASAN_SANITIZE_runtime-wrappers.o	:= n
 
-obj-$(CONFIG_EFI)			+= efi.o vars.o reboot.o
+obj-$(CONFIG_EFI)			+= efi.o vars.o reboot.o memattr.o
+obj-$(CONFIG_EFI)			+= capsule.o
 obj-$(CONFIG_EFI_VARS)			+= efivars.o
 obj-$(CONFIG_EFI_ESRT)			+= esrt.o
 obj-$(CONFIG_EFI_VARS_PSTORE)		+= efi-pstore.o
@@ -18,7 +19,9 @@
 obj-$(CONFIG_EFI_RUNTIME_WRAPPERS)	+= runtime-wrappers.o
 obj-$(CONFIG_EFI_STUB)			+= libstub/
 obj-$(CONFIG_EFI_FAKE_MEMMAP)		+= fake_mem.o
+obj-$(CONFIG_EFI_BOOTLOADER_CONTROL)	+= efibc.o
 
 arm-obj-$(CONFIG_EFI)			:= arm-init.o arm-runtime.o
 obj-$(CONFIG_ARM)			+= $(arm-obj-y)
 obj-$(CONFIG_ARM64)			+= $(arm-obj-y)
+obj-$(CONFIG_EFI_CAPSULE_LOADER)	+= capsule-loader.o
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index aa1f743..a850cbc 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -11,17 +11,19 @@
  *
  */
 
+#define pr_fmt(fmt)	"efi: " fmt
+
 #include <linux/efi.h>
 #include <linux/init.h>
 #include <linux/memblock.h>
 #include <linux/mm_types.h>
 #include <linux/of.h>
 #include <linux/of_fdt.h>
+#include <linux/platform_device.h>
+#include <linux/screen_info.h>
 
 #include <asm/efi.h>
 
-struct efi_memory_map memmap;
-
 u64 efi_system_table;
 
 static int __init is_normal_ram(efi_memory_desc_t *md)
@@ -40,7 +42,7 @@
 {
 	efi_memory_desc_t *md;
 
-	for_each_efi_memory_desc(&memmap, md) {
+	for_each_efi_memory_desc(md) {
 		if (!(md->attribute & EFI_MEMORY_RUNTIME))
 			continue;
 		if (md->virt_addr == 0)
@@ -53,6 +55,36 @@
 	return addr;
 }
 
+static __initdata unsigned long screen_info_table = EFI_INVALID_TABLE_ADDR;
+
+static __initdata efi_config_table_type_t arch_tables[] = {
+	{LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID, NULL, &screen_info_table},
+	{NULL_GUID, NULL, NULL}
+};
+
+static void __init init_screen_info(void)
+{
+	struct screen_info *si;
+
+	if (screen_info_table != EFI_INVALID_TABLE_ADDR) {
+		si = early_memremap_ro(screen_info_table, sizeof(*si));
+		if (!si) {
+			pr_err("Could not map screen_info config table\n");
+			return;
+		}
+		screen_info = *si;
+		early_memunmap(si, sizeof(*si));
+
+		/* dummycon on ARM needs non-zero values for columns/lines */
+		screen_info.orig_video_cols = 80;
+		screen_info.orig_video_lines = 25;
+	}
+
+	if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI &&
+	    memblock_is_map_memory(screen_info.lfb_base))
+		memblock_mark_nomap(screen_info.lfb_base, screen_info.lfb_size);
+}
+
 static int __init uefi_init(void)
 {
 	efi_char16_t *c16;
@@ -85,6 +117,8 @@
 			efi.systab->hdr.revision >> 16,
 			efi.systab->hdr.revision & 0xffff);
 
+	efi.runtime_version = efi.systab->hdr.revision;
+
 	/* Show what we know for posterity */
 	c16 = early_memremap_ro(efi_to_phys(efi.systab->fw_vendor),
 				sizeof(vendor) * sizeof(efi_char16_t));
@@ -108,7 +142,8 @@
 		goto out;
 	}
 	retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables,
-					 sizeof(efi_config_table_t), NULL);
+					 sizeof(efi_config_table_t),
+					 arch_tables);
 
 	early_memunmap(config_tables, table_size);
 out:
@@ -143,7 +178,15 @@
 	if (efi_enabled(EFI_DBG))
 		pr_info("Processing EFI memory map:\n");
 
-	for_each_efi_memory_desc(&memmap, md) {
+	/*
+	 * Discard memblocks discovered so far: if there are any at this
+	 * point, they originate from memory nodes in the DT, and UEFI
+	 * uses its own memory map instead.
+	 */
+	memblock_dump_all();
+	memblock_remove(0, (phys_addr_t)ULLONG_MAX);
+
+	for_each_efi_memory_desc(md) {
 		paddr = md->phys_addr;
 		npages = md->num_pages;
 
@@ -184,9 +227,9 @@
 
 	efi_system_table = params.system_table;
 
-	memmap.phys_map = params.mmap;
-	memmap.map = early_memremap_ro(params.mmap, params.mmap_size);
-	if (memmap.map == NULL) {
+	efi.memmap.phys_map = params.mmap;
+	efi.memmap.map = early_memremap_ro(params.mmap, params.mmap_size);
+	if (efi.memmap.map == NULL) {
 		/*
 		* If we are booting via UEFI, the UEFI memory map is the only
 		* description of memory we have, so there is little point in
@@ -194,16 +237,37 @@
 		*/
 		panic("Unable to map EFI memory map.\n");
 	}
-	memmap.map_end = memmap.map + params.mmap_size;
-	memmap.desc_size = params.desc_size;
-	memmap.desc_version = params.desc_ver;
+	efi.memmap.map_end = efi.memmap.map + params.mmap_size;
+	efi.memmap.desc_size = params.desc_size;
+	efi.memmap.desc_version = params.desc_ver;
+
+	WARN(efi.memmap.desc_version != 1,
+	     "Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
+	      efi.memmap.desc_version);
 
 	if (uefi_init() < 0)
 		return;
 
 	reserve_regions();
-	early_memunmap(memmap.map, params.mmap_size);
-	memblock_mark_nomap(params.mmap & PAGE_MASK,
-			    PAGE_ALIGN(params.mmap_size +
-				       (params.mmap & ~PAGE_MASK)));
+	efi_memattr_init();
+	early_memunmap(efi.memmap.map, params.mmap_size);
+
+	memblock_reserve(params.mmap & PAGE_MASK,
+			 PAGE_ALIGN(params.mmap_size +
+				    (params.mmap & ~PAGE_MASK)));
+
+	init_screen_info();
 }
+
+static int __init register_gop_device(void)
+{
+	void *pd;
+
+	if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+		return 0;
+
+	pd = platform_device_register_data(NULL, "efi-framebuffer", 0,
+					   &screen_info, sizeof(screen_info));
+	return PTR_ERR_OR_ZERO(pd);
+}
+subsys_initcall(register_gop_device);
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 6ae21e4..17ccf0a 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -42,11 +42,13 @@
 static bool __init efi_virtmap_init(void)
 {
 	efi_memory_desc_t *md;
+	bool systab_found;
 
 	efi_mm.pgd = pgd_alloc(&efi_mm);
 	init_new_context(NULL, &efi_mm);
 
-	for_each_efi_memory_desc(&memmap, md) {
+	systab_found = false;
+	for_each_efi_memory_desc(md) {
 		phys_addr_t phys = md->phys_addr;
 		int ret;
 
@@ -64,7 +66,25 @@
 				&phys, ret);
 			return false;
 		}
+		/*
+		 * If this entry covers the address of the UEFI system table,
+		 * calculate and record its virtual address.
+		 */
+		if (efi_system_table >= phys &&
+		    efi_system_table < phys + (md->num_pages * EFI_PAGE_SIZE)) {
+			efi.systab = (void *)(unsigned long)(efi_system_table -
+							     phys + md->virt_addr);
+			systab_found = true;
+		}
 	}
+	if (!systab_found) {
+		pr_err("No virtual mapping found for the UEFI System Table\n");
+		return false;
+	}
+
+	if (efi_memattr_apply_permissions(&efi_mm, efi_set_mapping_permissions))
+		return false;
+
 	return true;
 }
 
@@ -89,26 +109,17 @@
 
 	pr_info("Remapping and enabling EFI services.\n");
 
-	mapsize = memmap.map_end - memmap.map;
-	memmap.map = (__force void *)ioremap_cache(memmap.phys_map,
-						   mapsize);
-	if (!memmap.map) {
+	mapsize = efi.memmap.map_end - efi.memmap.map;
+
+	efi.memmap.map = memremap(efi.memmap.phys_map, mapsize, MEMREMAP_WB);
+	if (!efi.memmap.map) {
 		pr_err("Failed to remap EFI memory map\n");
 		return -ENOMEM;
 	}
-	memmap.map_end = memmap.map + mapsize;
-	efi.memmap = &memmap;
-
-	efi.systab = (__force void *)ioremap_cache(efi_system_table,
-						   sizeof(efi_system_table_t));
-	if (!efi.systab) {
-		pr_err("Failed to remap EFI System Table\n");
-		return -ENOMEM;
-	}
-	set_bit(EFI_SYSTEM_TABLES, &efi.flags);
+	efi.memmap.map_end = efi.memmap.map + mapsize;
 
 	if (!efi_virtmap_init()) {
-		pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n");
+		pr_err("UEFI virtual mapping missing or invalid -- runtime services will not be available\n");
 		return -ENOMEM;
 	}
 
@@ -116,8 +127,6 @@
 	efi_native_runtime_setup();
 	set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
 
-	efi.runtime_version = efi.systab->hdr.revision;
-
 	return 0;
 }
 early_initcall(arm_enable_runtime_services);
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
new file mode 100644
index 0000000..c99c24b
--- /dev/null
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -0,0 +1,343 @@
+/*
+ * EFI capsule loader driver.
+ *
+ * Copyright 2015 Intel Corporation
+ *
+ * This file is part of the Linux kernel, and is made available under
+ * the terms of the GNU General Public License version 2.
+ */
+
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/efi.h>
+
+#define NO_FURTHER_WRITE_ACTION -1
+
+struct capsule_info {
+	bool		header_obtained;
+	int		reset_type;
+	long		index;
+	size_t		count;
+	size_t		total_size;
+	struct page	**pages;
+	size_t		page_bytes_remain;
+};
+
+/**
+ * efi_free_all_buff_pages - free all previous allocated buffer pages
+ * @cap_info: pointer to current instance of capsule_info structure
+ *
+ *	In addition to freeing buffer pages, it flags NO_FURTHER_WRITE_ACTION
+ *	to cease processing data in subsequent write(2) calls until close(2)
+ *	is called.
+ **/
+static void efi_free_all_buff_pages(struct capsule_info *cap_info)
+{
+	while (cap_info->index > 0)
+		__free_page(cap_info->pages[--cap_info->index]);
+
+	cap_info->index = NO_FURTHER_WRITE_ACTION;
+}
+
+/**
+ * efi_capsule_setup_info - obtain the efi capsule header in the binary and
+ *			    setup capsule_info structure
+ * @cap_info: pointer to current instance of capsule_info structure
+ * @kbuff: a mapped first page buffer pointer
+ * @hdr_bytes: the total received number of bytes for efi header
+ **/
+static ssize_t efi_capsule_setup_info(struct capsule_info *cap_info,
+				      void *kbuff, size_t hdr_bytes)
+{
+	efi_capsule_header_t *cap_hdr;
+	size_t pages_needed;
+	int ret;
+	void *temp_page;
+
+	/* Only process data block that is larger than efi header size */
+	if (hdr_bytes < sizeof(efi_capsule_header_t))
+		return 0;
+
+	/* Reset back to the correct offset of header */
+	cap_hdr = kbuff - cap_info->count;
+	pages_needed = ALIGN(cap_hdr->imagesize, PAGE_SIZE) >> PAGE_SHIFT;
+
+	if (pages_needed == 0) {
+		pr_err("%s: pages count invalid\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Check if the capsule binary supported */
+	ret = efi_capsule_supported(cap_hdr->guid, cap_hdr->flags,
+				    cap_hdr->imagesize,
+				    &cap_info->reset_type);
+	if (ret) {
+		pr_err("%s: efi_capsule_supported() failed\n",
+		       __func__);
+		return ret;
+	}
+
+	cap_info->total_size = cap_hdr->imagesize;
+	temp_page = krealloc(cap_info->pages,
+			     pages_needed * sizeof(void *),
+			     GFP_KERNEL | __GFP_ZERO);
+	if (!temp_page) {
+		pr_debug("%s: krealloc() failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	cap_info->pages = temp_page;
+	cap_info->header_obtained = true;
+
+	return 0;
+}
+
+/**
+ * efi_capsule_submit_update - invoke the efi_capsule_update API once binary
+ *			       upload done
+ * @cap_info: pointer to current instance of capsule_info structure
+ **/
+static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info)
+{
+	int ret;
+	void *cap_hdr_temp;
+
+	cap_hdr_temp = kmap(cap_info->pages[0]);
+	if (!cap_hdr_temp) {
+		pr_debug("%s: kmap() failed\n", __func__);
+		return -EFAULT;
+	}
+
+	ret = efi_capsule_update(cap_hdr_temp, cap_info->pages);
+	kunmap(cap_info->pages[0]);
+	if (ret) {
+		pr_err("%s: efi_capsule_update() failed\n", __func__);
+		return ret;
+	}
+
+	/* Indicate capsule binary uploading is done */
+	cap_info->index = NO_FURTHER_WRITE_ACTION;
+	pr_info("%s: Successfully upload capsule file with reboot type '%s'\n",
+		__func__, !cap_info->reset_type ? "RESET_COLD" :
+		cap_info->reset_type == 1 ? "RESET_WARM" :
+		"RESET_SHUTDOWN");
+	return 0;
+}
+
+/**
+ * efi_capsule_write - store the capsule binary and pass it to
+ *		       efi_capsule_update() API
+ * @file: file pointer
+ * @buff: buffer pointer
+ * @count: number of bytes in @buff
+ * @offp: not used
+ *
+ *	Expectation:
+ *	- A user space tool should start at the beginning of capsule binary and
+ *	  pass data in sequentially.
+ *	- Users should close and re-open this file note in order to upload more
+ *	  capsules.
+ *	- After an error returned, user should close the file and restart the
+ *	  operation for the next try otherwise -EIO will be returned until the
+ *	  file is closed.
+ *	- An EFI capsule header must be located at the beginning of capsule
+ *	  binary file and passed in as first block data of write operation.
+ **/
+static ssize_t efi_capsule_write(struct file *file, const char __user *buff,
+				 size_t count, loff_t *offp)
+{
+	int ret = 0;
+	struct capsule_info *cap_info = file->private_data;
+	struct page *page;
+	void *kbuff = NULL;
+	size_t write_byte;
+
+	if (count == 0)
+		return 0;
+
+	/* Return error while NO_FURTHER_WRITE_ACTION is flagged */
+	if (cap_info->index < 0)
+		return -EIO;
+
+	/* Only alloc a new page when previous page is full */
+	if (!cap_info->page_bytes_remain) {
+		page = alloc_page(GFP_KERNEL);
+		if (!page) {
+			pr_debug("%s: alloc_page() failed\n", __func__);
+			ret = -ENOMEM;
+			goto failed;
+		}
+
+		cap_info->pages[cap_info->index++] = page;
+		cap_info->page_bytes_remain = PAGE_SIZE;
+	}
+
+	page = cap_info->pages[cap_info->index - 1];
+
+	kbuff = kmap(page);
+	if (!kbuff) {
+		pr_debug("%s: kmap() failed\n", __func__);
+		ret = -EFAULT;
+		goto failed;
+	}
+	kbuff += PAGE_SIZE - cap_info->page_bytes_remain;
+
+	/* Copy capsule binary data from user space to kernel space buffer */
+	write_byte = min_t(size_t, count, cap_info->page_bytes_remain);
+	if (copy_from_user(kbuff, buff, write_byte)) {
+		pr_debug("%s: copy_from_user() failed\n", __func__);
+		ret = -EFAULT;
+		goto fail_unmap;
+	}
+	cap_info->page_bytes_remain -= write_byte;
+
+	/* Setup capsule binary info structure */
+	if (!cap_info->header_obtained) {
+		ret = efi_capsule_setup_info(cap_info, kbuff,
+					     cap_info->count + write_byte);
+		if (ret)
+			goto fail_unmap;
+	}
+
+	cap_info->count += write_byte;
+	kunmap(page);
+
+	/* Submit the full binary to efi_capsule_update() API */
+	if (cap_info->header_obtained &&
+	    cap_info->count >= cap_info->total_size) {
+		if (cap_info->count > cap_info->total_size) {
+			pr_err("%s: upload size exceeded header defined size\n",
+			       __func__);
+			ret = -EINVAL;
+			goto failed;
+		}
+
+		ret = efi_capsule_submit_update(cap_info);
+		if (ret)
+			goto failed;
+	}
+
+	return write_byte;
+
+fail_unmap:
+	kunmap(page);
+failed:
+	efi_free_all_buff_pages(cap_info);
+	return ret;
+}
+
+/**
+ * efi_capsule_flush - called by file close or file flush
+ * @file: file pointer
+ * @id: not used
+ *
+ *	If a capsule is being partially uploaded then calling this function
+ *	will be treated as upload termination and will free those completed
+ *	buffer pages and -ECANCELED will be returned.
+ **/
+static int efi_capsule_flush(struct file *file, fl_owner_t id)
+{
+	int ret = 0;
+	struct capsule_info *cap_info = file->private_data;
+
+	if (cap_info->index > 0) {
+		pr_err("%s: capsule upload not complete\n", __func__);
+		efi_free_all_buff_pages(cap_info);
+		ret = -ECANCELED;
+	}
+
+	return ret;
+}
+
+/**
+ * efi_capsule_release - called by file close
+ * @inode: not used
+ * @file: file pointer
+ *
+ *	We will not free successfully submitted pages since efi update
+ *	requires data to be maintained across system reboot.
+ **/
+static int efi_capsule_release(struct inode *inode, struct file *file)
+{
+	struct capsule_info *cap_info = file->private_data;
+
+	kfree(cap_info->pages);
+	kfree(file->private_data);
+	file->private_data = NULL;
+	return 0;
+}
+
+/**
+ * efi_capsule_open - called by file open
+ * @inode: not used
+ * @file: file pointer
+ *
+ *	Will allocate each capsule_info memory for each file open call.
+ *	This provided the capability to support multiple file open feature
+ *	where user is not needed to wait for others to finish in order to
+ *	upload their capsule binary.
+ **/
+static int efi_capsule_open(struct inode *inode, struct file *file)
+{
+	struct capsule_info *cap_info;
+
+	cap_info = kzalloc(sizeof(*cap_info), GFP_KERNEL);
+	if (!cap_info)
+		return -ENOMEM;
+
+	cap_info->pages = kzalloc(sizeof(void *), GFP_KERNEL);
+	if (!cap_info->pages) {
+		kfree(cap_info);
+		return -ENOMEM;
+	}
+
+	file->private_data = cap_info;
+
+	return 0;
+}
+
+static const struct file_operations efi_capsule_fops = {
+	.owner = THIS_MODULE,
+	.open = efi_capsule_open,
+	.write = efi_capsule_write,
+	.flush = efi_capsule_flush,
+	.release = efi_capsule_release,
+	.llseek = no_llseek,
+};
+
+static struct miscdevice efi_capsule_misc = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "efi_capsule_loader",
+	.fops = &efi_capsule_fops,
+};
+
+static int __init efi_capsule_loader_init(void)
+{
+	int ret;
+
+	if (!efi_enabled(EFI_RUNTIME_SERVICES))
+		return -ENODEV;
+
+	ret = misc_register(&efi_capsule_misc);
+	if (ret)
+		pr_err("%s: Failed to register misc char file note\n",
+		       __func__);
+
+	return ret;
+}
+module_init(efi_capsule_loader_init);
+
+static void __exit efi_capsule_loader_exit(void)
+{
+	misc_deregister(&efi_capsule_misc);
+}
+module_exit(efi_capsule_loader_exit);
+
+MODULE_DESCRIPTION("EFI capsule firmware binary loader");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/capsule.c b/drivers/firmware/efi/capsule.c
new file mode 100644
index 0000000..53b9fd2
--- /dev/null
+++ b/drivers/firmware/efi/capsule.c
@@ -0,0 +1,308 @@
+/*
+ * EFI capsule support.
+ *
+ * Copyright 2013 Intel Corporation; author Matt Fleming
+ *
+ * This file is part of the Linux kernel, and is made available under
+ * the terms of the GNU General Public License version 2.
+ */
+
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/highmem.h>
+#include <linux/efi.h>
+#include <linux/vmalloc.h>
+#include <asm/io.h>
+
+typedef struct {
+	u64 length;
+	u64 data;
+} efi_capsule_block_desc_t;
+
+static bool capsule_pending;
+static bool stop_capsules;
+static int efi_reset_type = -1;
+
+/*
+ * capsule_mutex serialises access to both capsule_pending and
+ * efi_reset_type and stop_capsules.
+ */
+static DEFINE_MUTEX(capsule_mutex);
+
+/**
+ * efi_capsule_pending - has a capsule been passed to the firmware?
+ * @reset_type: store the type of EFI reset if capsule is pending
+ *
+ * To ensure that the registered capsule is processed correctly by the
+ * firmware we need to perform a specific type of reset. If a capsule is
+ * pending return the reset type in @reset_type.
+ *
+ * This function will race with callers of efi_capsule_update(), for
+ * example, calling this function while somebody else is in
+ * efi_capsule_update() but hasn't reached efi_capsue_update_locked()
+ * will miss the updates to capsule_pending and efi_reset_type after
+ * efi_capsule_update_locked() completes.
+ *
+ * A non-racy use is from platform reboot code because we use
+ * system_state to ensure no capsules can be sent to the firmware once
+ * we're at SYSTEM_RESTART. See efi_capsule_update_locked().
+ */
+bool efi_capsule_pending(int *reset_type)
+{
+	if (!capsule_pending)
+		return false;
+
+	if (reset_type)
+		*reset_type = efi_reset_type;
+
+	return true;
+}
+
+/*
+ * Whitelist of EFI capsule flags that we support.
+ *
+ * We do not handle EFI_CAPSULE_INITIATE_RESET because that would
+ * require us to prepare the kernel for reboot. Refuse to load any
+ * capsules with that flag and any other flags that we do not know how
+ * to handle.
+ */
+#define EFI_CAPSULE_SUPPORTED_FLAG_MASK			\
+	(EFI_CAPSULE_PERSIST_ACROSS_RESET | EFI_CAPSULE_POPULATE_SYSTEM_TABLE)
+
+/**
+ * efi_capsule_supported - does the firmware support the capsule?
+ * @guid: vendor guid of capsule
+ * @flags: capsule flags
+ * @size: size of capsule data
+ * @reset: the reset type required for this capsule
+ *
+ * Check whether a capsule with @flags is supported by the firmware
+ * and that @size doesn't exceed the maximum size for a capsule.
+ *
+ * No attempt is made to check @reset against the reset type required
+ * by any pending capsules because of the races involved.
+ */
+int efi_capsule_supported(efi_guid_t guid, u32 flags, size_t size, int *reset)
+{
+	efi_capsule_header_t capsule;
+	efi_capsule_header_t *cap_list[] = { &capsule };
+	efi_status_t status;
+	u64 max_size;
+
+	if (flags & ~EFI_CAPSULE_SUPPORTED_FLAG_MASK)
+		return -EINVAL;
+
+	capsule.headersize = capsule.imagesize = sizeof(capsule);
+	memcpy(&capsule.guid, &guid, sizeof(efi_guid_t));
+	capsule.flags = flags;
+
+	status = efi.query_capsule_caps(cap_list, 1, &max_size, reset);
+	if (status != EFI_SUCCESS)
+		return efi_status_to_err(status);
+
+	if (size > max_size)
+		return -ENOSPC;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(efi_capsule_supported);
+
+/*
+ * Every scatter gather list (block descriptor) page must end with a
+ * continuation pointer. The last continuation pointer of the last
+ * page must be zero to mark the end of the chain.
+ */
+#define SGLIST_PER_PAGE	((PAGE_SIZE / sizeof(efi_capsule_block_desc_t)) - 1)
+
+/*
+ * How many scatter gather list (block descriptor) pages do we need
+ * to map @count pages?
+ */
+static inline unsigned int sg_pages_num(unsigned int count)
+{
+	return DIV_ROUND_UP(count, SGLIST_PER_PAGE);
+}
+
+/**
+ * efi_capsule_update_locked - pass a single capsule to the firmware
+ * @capsule: capsule to send to the firmware
+ * @sg_pages: array of scatter gather (block descriptor) pages
+ * @reset: the reset type required for @capsule
+ *
+ * Since this function must be called under capsule_mutex check
+ * whether efi_reset_type will conflict with @reset, and atomically
+ * set it and capsule_pending if a capsule was successfully sent to
+ * the firmware.
+ *
+ * We also check to see if the system is about to restart, and if so,
+ * abort. This avoids races between efi_capsule_update() and
+ * efi_capsule_pending().
+ */
+static int
+efi_capsule_update_locked(efi_capsule_header_t *capsule,
+			  struct page **sg_pages, int reset)
+{
+	efi_physical_addr_t sglist_phys;
+	efi_status_t status;
+
+	lockdep_assert_held(&capsule_mutex);
+
+	/*
+	 * If someone has already registered a capsule that requires a
+	 * different reset type, we're out of luck and must abort.
+	 */
+	if (efi_reset_type >= 0 && efi_reset_type != reset) {
+		pr_err("Conflicting capsule reset type %d (%d).\n",
+		       reset, efi_reset_type);
+		return -EINVAL;
+	}
+
+	/*
+	 * If the system is getting ready to restart it may have
+	 * called efi_capsule_pending() to make decisions (such as
+	 * whether to force an EFI reboot), and we're racing against
+	 * that call. Abort in that case.
+	 */
+	if (unlikely(stop_capsules)) {
+		pr_warn("Capsule update raced with reboot, aborting.\n");
+		return -EINVAL;
+	}
+
+	sglist_phys = page_to_phys(sg_pages[0]);
+
+	status = efi.update_capsule(&capsule, 1, sglist_phys);
+	if (status == EFI_SUCCESS) {
+		capsule_pending = true;
+		efi_reset_type = reset;
+	}
+
+	return efi_status_to_err(status);
+}
+
+/**
+ * efi_capsule_update - send a capsule to the firmware
+ * @capsule: capsule to send to firmware
+ * @pages: an array of capsule data pages
+ *
+ * Build a scatter gather list with EFI capsule block descriptors to
+ * map the capsule described by @capsule with its data in @pages and
+ * send it to the firmware via the UpdateCapsule() runtime service.
+ *
+ * @capsule must be a virtual mapping of the first page in @pages
+ * (@pages[0]) in the kernel address space. That is, a
+ * capsule_header_t that describes the entire contents of the capsule
+ * must be at the start of the first data page.
+ *
+ * Even though this function will validate that the firmware supports
+ * the capsule guid, users will likely want to check that
+ * efi_capsule_supported() returns true before calling this function
+ * because it makes it easier to print helpful error messages.
+ *
+ * If the capsule is successfully submitted to the firmware, any
+ * subsequent calls to efi_capsule_pending() will return true. @pages
+ * must not be released or modified if this function returns
+ * successfully.
+ *
+ * Callers must be prepared for this function to fail, which can
+ * happen if we raced with system reboot or if there is already a
+ * pending capsule that has a reset type that conflicts with the one
+ * required by @capsule. Do NOT use efi_capsule_pending() to detect
+ * this conflict since that would be racy. Instead, submit the capsule
+ * to efi_capsule_update() and check the return value.
+ *
+ * Return 0 on success, a converted EFI status code on failure.
+ */
+int efi_capsule_update(efi_capsule_header_t *capsule, struct page **pages)
+{
+	u32 imagesize = capsule->imagesize;
+	efi_guid_t guid = capsule->guid;
+	unsigned int count, sg_count;
+	u32 flags = capsule->flags;
+	struct page **sg_pages;
+	int rv, reset_type;
+	int i, j;
+
+	rv = efi_capsule_supported(guid, flags, imagesize, &reset_type);
+	if (rv)
+		return rv;
+
+	count = DIV_ROUND_UP(imagesize, PAGE_SIZE);
+	sg_count = sg_pages_num(count);
+
+	sg_pages = kzalloc(sg_count * sizeof(*sg_pages), GFP_KERNEL);
+	if (!sg_pages)
+		return -ENOMEM;
+
+	for (i = 0; i < sg_count; i++) {
+		sg_pages[i] = alloc_page(GFP_KERNEL);
+		if (!sg_pages[i]) {
+			rv = -ENOMEM;
+			goto out;
+		}
+	}
+
+	for (i = 0; i < sg_count; i++) {
+		efi_capsule_block_desc_t *sglist;
+
+		sglist = kmap(sg_pages[i]);
+		if (!sglist) {
+			rv = -ENOMEM;
+			goto out;
+		}
+
+		for (j = 0; j < SGLIST_PER_PAGE && count > 0; j++) {
+			u64 sz = min_t(u64, imagesize, PAGE_SIZE);
+
+			sglist[j].length = sz;
+			sglist[j].data = page_to_phys(*pages++);
+
+			imagesize -= sz;
+			count--;
+		}
+
+		/* Continuation pointer */
+		sglist[j].length = 0;
+
+		if (i + 1 == sg_count)
+			sglist[j].data = 0;
+		else
+			sglist[j].data = page_to_phys(sg_pages[i + 1]);
+
+		kunmap(sg_pages[i]);
+	}
+
+	mutex_lock(&capsule_mutex);
+	rv = efi_capsule_update_locked(capsule, sg_pages, reset_type);
+	mutex_unlock(&capsule_mutex);
+
+out:
+	for (i = 0; rv && i < sg_count; i++) {
+		if (sg_pages[i])
+			__free_page(sg_pages[i]);
+	}
+
+	kfree(sg_pages);
+	return rv;
+}
+EXPORT_SYMBOL_GPL(efi_capsule_update);
+
+static int capsule_reboot_notify(struct notifier_block *nb, unsigned long event, void *cmd)
+{
+	mutex_lock(&capsule_mutex);
+	stop_capsules = true;
+	mutex_unlock(&capsule_mutex);
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block capsule_reboot_nb = {
+	.notifier_call = capsule_reboot_notify,
+};
+
+static int __init capsule_reboot_register(void)
+{
+	return register_reboot_notifier(&capsule_reboot_nb);
+}
+core_initcall(capsule_reboot_register);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 3a69ed5..05509f3 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -43,6 +43,7 @@
 	.config_table		= EFI_INVALID_TABLE_ADDR,
 	.esrt			= EFI_INVALID_TABLE_ADDR,
 	.properties_table	= EFI_INVALID_TABLE_ADDR,
+	.mem_attr_table		= EFI_INVALID_TABLE_ADDR,
 };
 EXPORT_SYMBOL(efi);
 
@@ -256,7 +257,7 @@
  */
 int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
 {
-	struct efi_memory_map *map = efi.memmap;
+	struct efi_memory_map *map = &efi.memmap;
 	phys_addr_t p, e;
 
 	if (!efi_enabled(EFI_MEMMAP)) {
@@ -338,6 +339,7 @@
 	{UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga},
 	{EFI_SYSTEM_RESOURCE_TABLE_GUID, "ESRT", &efi.esrt},
 	{EFI_PROPERTIES_TABLE_GUID, "PROP", &efi.properties_table},
+	{EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table},
 	{NULL_GUID, NULL, NULL},
 };
 
@@ -351,8 +353,9 @@
 		for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
 			if (!efi_guidcmp(*guid, table_types[i].guid)) {
 				*(table_types[i].ptr) = table;
-				pr_cont(" %s=0x%lx ",
-					table_types[i].name, table);
+				if (table_types[i].name)
+					pr_cont(" %s=0x%lx ",
+						table_types[i].name, table);
 				return 1;
 			}
 		}
@@ -620,16 +623,12 @@
  */
 u64 __weak efi_mem_attributes(unsigned long phys_addr)
 {
-	struct efi_memory_map *map;
 	efi_memory_desc_t *md;
-	void *p;
 
 	if (!efi_enabled(EFI_MEMMAP))
 		return 0;
 
-	map = efi.memmap;
-	for (p = map->map; p < map->map_end; p += map->desc_size) {
-		md = p;
+	for_each_efi_memory_desc(md) {
 		if ((md->phys_addr <= phys_addr) &&
 		    (phys_addr < (md->phys_addr +
 		    (md->num_pages << EFI_PAGE_SHIFT))))
@@ -637,3 +636,36 @@
 	}
 	return 0;
 }
+
+int efi_status_to_err(efi_status_t status)
+{
+	int err;
+
+	switch (status) {
+	case EFI_SUCCESS:
+		err = 0;
+		break;
+	case EFI_INVALID_PARAMETER:
+		err = -EINVAL;
+		break;
+	case EFI_OUT_OF_RESOURCES:
+		err = -ENOSPC;
+		break;
+	case EFI_DEVICE_ERROR:
+		err = -EIO;
+		break;
+	case EFI_WRITE_PROTECTED:
+		err = -EROFS;
+		break;
+	case EFI_SECURITY_VIOLATION:
+		err = -EACCES;
+		break;
+	case EFI_NOT_FOUND:
+		err = -ENOENT;
+		break;
+	default:
+		err = -EINVAL;
+	}
+
+	return err;
+}
diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
new file mode 100644
index 0000000..8dd0c70
--- /dev/null
+++ b/drivers/firmware/efi/efibc.c
@@ -0,0 +1,113 @@
+/*
+ * efibc: control EFI bootloaders which obey LoaderEntryOneShot var
+ * Copyright (c) 2013-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#define pr_fmt(fmt) "efibc: " fmt
+
+#include <linux/efi.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+
+static void efibc_str_to_str16(const char *str, efi_char16_t *str16)
+{
+	size_t i;
+
+	for (i = 0; i < strlen(str); i++)
+		str16[i] = str[i];
+
+	str16[i] = '\0';
+}
+
+static int efibc_set_variable(const char *name, const char *value)
+{
+	int ret;
+	efi_guid_t guid = LINUX_EFI_LOADER_ENTRY_GUID;
+	struct efivar_entry *entry;
+	size_t size = (strlen(value) + 1) * sizeof(efi_char16_t);
+
+	if (size > sizeof(entry->var.Data)) {
+		pr_err("value is too large");
+		return -EINVAL;
+	}
+
+	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry) {
+		pr_err("failed to allocate efivar entry");
+		return -ENOMEM;
+	}
+
+	efibc_str_to_str16(name, entry->var.VariableName);
+	efibc_str_to_str16(value, (efi_char16_t *)entry->var.Data);
+	memcpy(&entry->var.VendorGuid, &guid, sizeof(guid));
+
+	ret = efivar_entry_set(entry,
+			       EFI_VARIABLE_NON_VOLATILE
+			       | EFI_VARIABLE_BOOTSERVICE_ACCESS
+			       | EFI_VARIABLE_RUNTIME_ACCESS,
+			       size, entry->var.Data, NULL);
+	if (ret)
+		pr_err("failed to set %s EFI variable: 0x%x\n",
+		       name, ret);
+
+	kfree(entry);
+	return ret;
+}
+
+static int efibc_reboot_notifier_call(struct notifier_block *notifier,
+				      unsigned long event, void *data)
+{
+	const char *reason = "shutdown";
+	int ret;
+
+	if (event == SYS_RESTART)
+		reason = "reboot";
+
+	ret = efibc_set_variable("LoaderEntryRebootReason", reason);
+	if (ret || !data)
+		return NOTIFY_DONE;
+
+	efibc_set_variable("LoaderEntryOneShot", (char *)data);
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block efibc_reboot_notifier = {
+	.notifier_call = efibc_reboot_notifier_call,
+};
+
+static int __init efibc_init(void)
+{
+	int ret;
+
+	if (!efi_enabled(EFI_RUNTIME_SERVICES))
+		return -ENODEV;
+
+	ret = register_reboot_notifier(&efibc_reboot_notifier);
+	if (ret)
+		pr_err("unable to register reboot notifier\n");
+
+	return ret;
+}
+module_init(efibc_init);
+
+static void __exit efibc_exit(void)
+{
+	unregister_reboot_notifier(&efibc_reboot_notifier);
+}
+module_exit(efibc_exit);
+
+MODULE_AUTHOR("Jeremy Compostella <jeremy.compostella@intel.com>");
+MODULE_AUTHOR("Matt Gumbel <matthew.k.gumbel@intel.com");
+MODULE_DESCRIPTION("EFI Bootloader Control");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 096adcb..116b244 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -661,7 +661,7 @@
 			return;
 
 		err = efivar_init(efivar_update_sysfs_entry, entry,
-				  true, false, &efivar_sysfs_list);
+				  false, &efivar_sysfs_list);
 		if (!err)
 			break;
 
@@ -730,8 +730,7 @@
 		return -ENOMEM;
 	}
 
-	efivar_init(efivars_sysfs_callback, NULL, false,
-		    true, &efivar_sysfs_list);
+	efivar_init(efivars_sysfs_callback, NULL, true, &efivar_sysfs_list);
 
 	error = create_efivars_bin_attributes();
 	if (error) {
diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c
index ed3a854..48430ab 100644
--- a/drivers/firmware/efi/fake_mem.c
+++ b/drivers/firmware/efi/fake_mem.c
@@ -57,7 +57,7 @@
 void __init efi_fake_memmap(void)
 {
 	u64 start, end, m_start, m_end, m_attr;
-	int new_nr_map = memmap.nr_map;
+	int new_nr_map = efi.memmap.nr_map;
 	efi_memory_desc_t *md;
 	phys_addr_t new_memmap_phy;
 	void *new_memmap;
@@ -68,8 +68,7 @@
 		return;
 
 	/* count up the number of EFI memory descriptor */
-	for (old = memmap.map; old < memmap.map_end; old += memmap.desc_size) {
-		md = old;
+	for_each_efi_memory_desc(md) {
 		start = md->phys_addr;
 		end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
 
@@ -95,25 +94,25 @@
 	}
 
 	/* allocate memory for new EFI memmap */
-	new_memmap_phy = memblock_alloc(memmap.desc_size * new_nr_map,
+	new_memmap_phy = memblock_alloc(efi.memmap.desc_size * new_nr_map,
 					PAGE_SIZE);
 	if (!new_memmap_phy)
 		return;
 
 	/* create new EFI memmap */
 	new_memmap = early_memremap(new_memmap_phy,
-				    memmap.desc_size * new_nr_map);
+				    efi.memmap.desc_size * new_nr_map);
 	if (!new_memmap) {
-		memblock_free(new_memmap_phy, memmap.desc_size * new_nr_map);
+		memblock_free(new_memmap_phy, efi.memmap.desc_size * new_nr_map);
 		return;
 	}
 
-	for (old = memmap.map, new = new_memmap;
-	     old < memmap.map_end;
-	     old += memmap.desc_size, new += memmap.desc_size) {
+	for (old = efi.memmap.map, new = new_memmap;
+	     old < efi.memmap.map_end;
+	     old += efi.memmap.desc_size, new += efi.memmap.desc_size) {
 
 		/* copy original EFI memory descriptor */
-		memcpy(new, old, memmap.desc_size);
+		memcpy(new, old, efi.memmap.desc_size);
 		md = new;
 		start = md->phys_addr;
 		end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
@@ -134,8 +133,8 @@
 				md->num_pages = (m_end - md->phys_addr + 1) >>
 					EFI_PAGE_SHIFT;
 				/* latter part */
-				new += memmap.desc_size;
-				memcpy(new, old, memmap.desc_size);
+				new += efi.memmap.desc_size;
+				memcpy(new, old, efi.memmap.desc_size);
 				md = new;
 				md->phys_addr = m_end + 1;
 				md->num_pages = (end - md->phys_addr + 1) >>
@@ -147,16 +146,16 @@
 				md->num_pages = (m_start - md->phys_addr) >>
 					EFI_PAGE_SHIFT;
 				/* middle part */
-				new += memmap.desc_size;
-				memcpy(new, old, memmap.desc_size);
+				new += efi.memmap.desc_size;
+				memcpy(new, old, efi.memmap.desc_size);
 				md = new;
 				md->attribute |= m_attr;
 				md->phys_addr = m_start;
 				md->num_pages = (m_end - m_start + 1) >>
 					EFI_PAGE_SHIFT;
 				/* last part */
-				new += memmap.desc_size;
-				memcpy(new, old, memmap.desc_size);
+				new += efi.memmap.desc_size;
+				memcpy(new, old, efi.memmap.desc_size);
 				md = new;
 				md->phys_addr = m_end + 1;
 				md->num_pages = (end - m_end) >>
@@ -169,8 +168,8 @@
 				md->num_pages = (m_start - md->phys_addr) >>
 					EFI_PAGE_SHIFT;
 				/* latter part */
-				new += memmap.desc_size;
-				memcpy(new, old, memmap.desc_size);
+				new += efi.memmap.desc_size;
+				memcpy(new, old, efi.memmap.desc_size);
 				md = new;
 				md->phys_addr = m_start;
 				md->num_pages = (end - md->phys_addr + 1) >>
@@ -182,10 +181,10 @@
 
 	/* swap into new EFI memmap */
 	efi_unmap_memmap();
-	memmap.map = new_memmap;
-	memmap.phys_map = new_memmap_phy;
-	memmap.nr_map = new_nr_map;
-	memmap.map_end = memmap.map + memmap.nr_map * memmap.desc_size;
+	efi.memmap.map = new_memmap;
+	efi.memmap.phys_map = new_memmap_phy;
+	efi.memmap.nr_map = new_nr_map;
+	efi.memmap.map_end = efi.memmap.map + efi.memmap.nr_map * efi.memmap.desc_size;
 	set_bit(EFI_MEMMAP, &efi.flags);
 
 	/* print new EFI memmap */
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index da99bbb..c069451 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -28,7 +28,7 @@
 # Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
 KCOV_INSTRUMENT			:= n
 
-lib-y				:= efi-stub-helper.o
+lib-y				:= efi-stub-helper.o gop.o
 
 # include the stub's generic dependencies from lib/ when building for ARM/arm64
 arm-deps := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c sort.c
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 414deb8..993aa56 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -20,27 +20,49 @@
 
 bool __nokaslr;
 
-static int efi_secureboot_enabled(efi_system_table_t *sys_table_arg)
+static int efi_get_secureboot(efi_system_table_t *sys_table_arg)
 {
-	static efi_guid_t const var_guid = EFI_GLOBAL_VARIABLE_GUID;
-	static efi_char16_t const var_name[] = {
+	static efi_char16_t const sb_var_name[] = {
 		'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0 };
+	static efi_char16_t const sm_var_name[] = {
+		'S', 'e', 't', 'u', 'p', 'M', 'o', 'd', 'e', 0 };
 
+	efi_guid_t var_guid = EFI_GLOBAL_VARIABLE_GUID;
 	efi_get_variable_t *f_getvar = sys_table_arg->runtime->get_variable;
-	unsigned long size = sizeof(u8);
-	efi_status_t status;
 	u8 val;
+	unsigned long size = sizeof(val);
+	efi_status_t status;
 
-	status = f_getvar((efi_char16_t *)var_name, (efi_guid_t *)&var_guid,
+	status = f_getvar((efi_char16_t *)sb_var_name, (efi_guid_t *)&var_guid,
 			  NULL, &size, &val);
 
+	if (status != EFI_SUCCESS)
+		goto out_efi_err;
+
+	if (val == 0)
+		return 0;
+
+	status = f_getvar((efi_char16_t *)sm_var_name, (efi_guid_t *)&var_guid,
+			  NULL, &size, &val);
+
+	if (status != EFI_SUCCESS)
+		goto out_efi_err;
+
+	if (val == 1)
+		return 0;
+
+	return 1;
+
+out_efi_err:
 	switch (status) {
-	case EFI_SUCCESS:
-		return val;
 	case EFI_NOT_FOUND:
 		return 0;
+	case EFI_DEVICE_ERROR:
+		return -EIO;
+	case EFI_SECURITY_VIOLATION:
+		return -EACCES;
 	default:
-		return 1;
+		return -EINVAL;
 	}
 }
 
@@ -147,6 +169,25 @@
 	out->output_string(out, str);
 }
 
+static struct screen_info *setup_graphics(efi_system_table_t *sys_table_arg)
+{
+	efi_guid_t gop_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID;
+	efi_status_t status;
+	unsigned long size;
+	void **gop_handle = NULL;
+	struct screen_info *si = NULL;
+
+	size = 0;
+	status = efi_call_early(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+				&gop_proto, NULL, &size, gop_handle);
+	if (status == EFI_BUFFER_TOO_SMALL) {
+		si = alloc_screen_info(sys_table_arg);
+		if (!si)
+			return NULL;
+		efi_setup_gop(sys_table_arg, si, &gop_proto, size);
+	}
+	return si;
+}
 
 /*
  * This function handles the architcture specific differences between arm and
@@ -185,6 +226,8 @@
 	efi_guid_t loaded_image_proto = LOADED_IMAGE_PROTOCOL_GUID;
 	unsigned long reserve_addr = 0;
 	unsigned long reserve_size = 0;
+	int secure_boot = 0;
+	struct screen_info *si;
 
 	/* Check if we were booted by the EFI firmware */
 	if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
@@ -237,6 +280,8 @@
 			__nokaslr = true;
 	}
 
+	si = setup_graphics(sys_table);
+
 	status = handle_kernel_image(sys_table, image_addr, &image_size,
 				     &reserve_addr,
 				     &reserve_size,
@@ -250,12 +295,21 @@
 	if (status != EFI_SUCCESS)
 		pr_efi_err(sys_table, "Failed to parse EFI cmdline options\n");
 
+	secure_boot = efi_get_secureboot(sys_table);
+	if (secure_boot > 0)
+		pr_efi(sys_table, "UEFI Secure Boot is enabled.\n");
+
+	if (secure_boot < 0) {
+		pr_efi_err(sys_table,
+			"could not determine UEFI Secure Boot status.\n");
+	}
+
 	/*
 	 * Unauthenticated device tree data is a security hazard, so
 	 * ignore 'dtb=' unless UEFI Secure Boot is disabled.
 	 */
-	if (efi_secureboot_enabled(sys_table)) {
-		pr_efi(sys_table, "UEFI Secure Boot is enabled.\n");
+	if (secure_boot != 0 && strstr(cmdline_ptr, "dtb=")) {
+		pr_efi(sys_table, "Ignoring DTB from command line.\n");
 	} else {
 		status = handle_cmdline_files(sys_table, image, cmdline_ptr,
 					      "dtb=",
@@ -309,6 +363,7 @@
 	efi_free(sys_table, image_size, *image_addr);
 	efi_free(sys_table, reserve_size, reserve_addr);
 fail_free_cmdline:
+	free_screen_info(sys_table, si);
 	efi_free(sys_table, cmdline_size, (unsigned long)cmdline_ptr);
 fail:
 	return EFI_ERROR;
diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c
index 6f42be4..e1f0b28 100644
--- a/drivers/firmware/efi/libstub/arm32-stub.c
+++ b/drivers/firmware/efi/libstub/arm32-stub.c
@@ -26,6 +26,43 @@
 	return EFI_SUCCESS;
 }
 
+static efi_guid_t screen_info_guid = LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID;
+
+struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg)
+{
+	struct screen_info *si;
+	efi_status_t status;
+
+	/*
+	 * Unlike on arm64, where we can directly fill out the screen_info
+	 * structure from the stub, we need to allocate a buffer to hold
+	 * its contents while we hand over to the kernel proper from the
+	 * decompressor.
+	 */
+	status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
+				sizeof(*si), (void **)&si);
+
+	if (status != EFI_SUCCESS)
+		return NULL;
+
+	status = efi_call_early(install_configuration_table,
+				&screen_info_guid, si);
+	if (status == EFI_SUCCESS)
+		return si;
+
+	efi_call_early(free_pool, si);
+	return NULL;
+}
+
+void free_screen_info(efi_system_table_t *sys_table_arg, struct screen_info *si)
+{
+	if (!si)
+		return;
+
+	efi_call_early(install_configuration_table, &screen_info_guid, NULL);
+	efi_call_early(free_pool, si);
+}
+
 efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
 				 unsigned long *image_addr,
 				 unsigned long *image_size,
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index a90f645..eae693e 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -81,15 +81,24 @@
 
 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
 		/*
+		 * If CONFIG_DEBUG_ALIGN_RODATA is not set, produce a
+		 * displacement in the interval [0, MIN_KIMG_ALIGN) that
+		 * is a multiple of the minimal segment alignment (SZ_64K)
+		 */
+		u32 mask = (MIN_KIMG_ALIGN - 1) & ~(SZ_64K - 1);
+		u32 offset = !IS_ENABLED(CONFIG_DEBUG_ALIGN_RODATA) ?
+			     (phys_seed >> 32) & mask : TEXT_OFFSET;
+
+		/*
 		 * If KASLR is enabled, and we have some randomness available,
 		 * locate the kernel at a randomized offset in physical memory.
 		 */
-		*reserve_size = kernel_memsize + TEXT_OFFSET;
+		*reserve_size = kernel_memsize + offset;
 		status = efi_random_alloc(sys_table_arg, *reserve_size,
 					  MIN_KIMG_ALIGN, reserve_addr,
-					  phys_seed);
+					  (u32)phys_seed);
 
-		*image_addr = *reserve_addr + TEXT_OFFSET;
+		*image_addr = *reserve_addr + offset;
 	} else {
 		/*
 		 * Else, try a straight allocation at the preferred offset.
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 29ed2f9..3bd127f9 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -125,10 +125,12 @@
 
 	map.map_end = map.map + map_size;
 
-	for_each_efi_memory_desc(&map, md)
-		if (md->attribute & EFI_MEMORY_WB)
+	for_each_efi_memory_desc_in_map(&map, md) {
+		if (md->attribute & EFI_MEMORY_WB) {
 			if (membase > md->phys_addr)
 				membase = md->phys_addr;
+		}
+	}
 
 	efi_call_early(free_pool, map.map);
 
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 6dba78a..e58abfa 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -24,7 +24,7 @@
 			unsigned long map_size, unsigned long desc_size,
 			u32 desc_ver)
 {
-	int node, prev, num_rsv;
+	int node, num_rsv;
 	int status;
 	u32 fdt_val32;
 	u64 fdt_val64;
@@ -54,28 +54,6 @@
 		goto fdt_set_fail;
 
 	/*
-	 * Delete any memory nodes present. We must delete nodes which
-	 * early_init_dt_scan_memory may try to use.
-	 */
-	prev = 0;
-	for (;;) {
-		const char *type;
-		int len;
-
-		node = fdt_next_node(fdt, prev, NULL);
-		if (node < 0)
-			break;
-
-		type = fdt_getprop(fdt, node, "device_type", &len);
-		if (type && strncmp(type, "memory", len) == 0) {
-			fdt_del_node(fdt, node);
-			continue;
-		}
-
-		prev = node;
-	}
-
-	/*
 	 * Delete all memory reserve map entries. When booting via UEFI,
 	 * kernel will use the UEFI memory map to find reserved regions.
 	 */
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
new file mode 100644
index 0000000..932742e
--- /dev/null
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -0,0 +1,354 @@
+/* -----------------------------------------------------------------------
+ *
+ *   Copyright 2011 Intel Corporation; author Matt Fleming
+ *
+ *   This file is part of the Linux kernel, and is made available under
+ *   the terms of the GNU General Public License version 2.
+ *
+ * ----------------------------------------------------------------------- */
+
+#include <linux/efi.h>
+#include <linux/screen_info.h>
+#include <asm/efi.h>
+#include <asm/setup.h>
+
+static void find_bits(unsigned long mask, u8 *pos, u8 *size)
+{
+	u8 first, len;
+
+	first = 0;
+	len = 0;
+
+	if (mask) {
+		while (!(mask & 0x1)) {
+			mask = mask >> 1;
+			first++;
+		}
+
+		while (mask & 0x1) {
+			mask = mask >> 1;
+			len++;
+		}
+	}
+
+	*pos = first;
+	*size = len;
+}
+
+static void
+setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
+		 struct efi_pixel_bitmask pixel_info, int pixel_format)
+{
+	if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) {
+		si->lfb_depth = 32;
+		si->lfb_linelength = pixels_per_scan_line * 4;
+		si->red_size = 8;
+		si->red_pos = 0;
+		si->green_size = 8;
+		si->green_pos = 8;
+		si->blue_size = 8;
+		si->blue_pos = 16;
+		si->rsvd_size = 8;
+		si->rsvd_pos = 24;
+	} else if (pixel_format == PIXEL_BGR_RESERVED_8BIT_PER_COLOR) {
+		si->lfb_depth = 32;
+		si->lfb_linelength = pixels_per_scan_line * 4;
+		si->red_size = 8;
+		si->red_pos = 16;
+		si->green_size = 8;
+		si->green_pos = 8;
+		si->blue_size = 8;
+		si->blue_pos = 0;
+		si->rsvd_size = 8;
+		si->rsvd_pos = 24;
+	} else if (pixel_format == PIXEL_BIT_MASK) {
+		find_bits(pixel_info.red_mask, &si->red_pos, &si->red_size);
+		find_bits(pixel_info.green_mask, &si->green_pos,
+			  &si->green_size);
+		find_bits(pixel_info.blue_mask, &si->blue_pos, &si->blue_size);
+		find_bits(pixel_info.reserved_mask, &si->rsvd_pos,
+			  &si->rsvd_size);
+		si->lfb_depth = si->red_size + si->green_size +
+			si->blue_size + si->rsvd_size;
+		si->lfb_linelength = (pixels_per_scan_line * si->lfb_depth) / 8;
+	} else {
+		si->lfb_depth = 4;
+		si->lfb_linelength = si->lfb_width / 2;
+		si->red_size = 0;
+		si->red_pos = 0;
+		si->green_size = 0;
+		si->green_pos = 0;
+		si->blue_size = 0;
+		si->blue_pos = 0;
+		si->rsvd_size = 0;
+		si->rsvd_pos = 0;
+	}
+}
+
+static efi_status_t
+__gop_query32(efi_system_table_t *sys_table_arg,
+	      struct efi_graphics_output_protocol_32 *gop32,
+	      struct efi_graphics_output_mode_info **info,
+	      unsigned long *size, u64 *fb_base)
+{
+	struct efi_graphics_output_protocol_mode_32 *mode;
+	efi_graphics_output_protocol_query_mode query_mode;
+	efi_status_t status;
+	unsigned long m;
+
+	m = gop32->mode;
+	mode = (struct efi_graphics_output_protocol_mode_32 *)m;
+	query_mode = (void *)(unsigned long)gop32->query_mode;
+
+	status = __efi_call_early(query_mode, (void *)gop32, mode->mode, size,
+				  info);
+	if (status != EFI_SUCCESS)
+		return status;
+
+	*fb_base = mode->frame_buffer_base;
+	return status;
+}
+
+static efi_status_t
+setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
+            efi_guid_t *proto, unsigned long size, void **gop_handle)
+{
+	struct efi_graphics_output_protocol_32 *gop32, *first_gop;
+	unsigned long nr_gops;
+	u16 width, height;
+	u32 pixels_per_scan_line;
+	u32 ext_lfb_base;
+	u64 fb_base;
+	struct efi_pixel_bitmask pixel_info;
+	int pixel_format;
+	efi_status_t status = EFI_NOT_FOUND;
+	u32 *handles = (u32 *)(unsigned long)gop_handle;
+	int i;
+
+	first_gop = NULL;
+	gop32 = NULL;
+
+	nr_gops = size / sizeof(u32);
+	for (i = 0; i < nr_gops; i++) {
+		struct efi_graphics_output_mode_info *info = NULL;
+		efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
+		bool conout_found = false;
+		void *dummy = NULL;
+		efi_handle_t h = (efi_handle_t)(unsigned long)handles[i];
+		u64 current_fb_base;
+
+		status = efi_call_early(handle_protocol, h,
+					proto, (void **)&gop32);
+		if (status != EFI_SUCCESS)
+			continue;
+
+		status = efi_call_early(handle_protocol, h,
+					&conout_proto, &dummy);
+		if (status == EFI_SUCCESS)
+			conout_found = true;
+
+		status = __gop_query32(sys_table_arg, gop32, &info, &size,
+				       &current_fb_base);
+		if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
+			/*
+			 * Systems that use the UEFI Console Splitter may
+			 * provide multiple GOP devices, not all of which are
+			 * backed by real hardware. The workaround is to search
+			 * for a GOP implementing the ConOut protocol, and if
+			 * one isn't found, to just fall back to the first GOP.
+			 */
+			width = info->horizontal_resolution;
+			height = info->vertical_resolution;
+			pixel_format = info->pixel_format;
+			pixel_info = info->pixel_information;
+			pixels_per_scan_line = info->pixels_per_scan_line;
+			fb_base = current_fb_base;
+
+			/*
+			 * Once we've found a GOP supporting ConOut,
+			 * don't bother looking any further.
+			 */
+			first_gop = gop32;
+			if (conout_found)
+				break;
+		}
+	}
+
+	/* Did we find any GOPs? */
+	if (!first_gop)
+		goto out;
+
+	/* EFI framebuffer */
+	si->orig_video_isVGA = VIDEO_TYPE_EFI;
+
+	si->lfb_width = width;
+	si->lfb_height = height;
+	si->lfb_base = fb_base;
+
+	ext_lfb_base = (u64)(unsigned long)fb_base >> 32;
+	if (ext_lfb_base) {
+		si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
+		si->ext_lfb_base = ext_lfb_base;
+	}
+
+	si->pages = 1;
+
+	setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
+
+	si->lfb_size = si->lfb_linelength * si->lfb_height;
+
+	si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
+out:
+	return status;
+}
+
+static efi_status_t
+__gop_query64(efi_system_table_t *sys_table_arg,
+	      struct efi_graphics_output_protocol_64 *gop64,
+	      struct efi_graphics_output_mode_info **info,
+	      unsigned long *size, u64 *fb_base)
+{
+	struct efi_graphics_output_protocol_mode_64 *mode;
+	efi_graphics_output_protocol_query_mode query_mode;
+	efi_status_t status;
+	unsigned long m;
+
+	m = gop64->mode;
+	mode = (struct efi_graphics_output_protocol_mode_64 *)m;
+	query_mode = (void *)(unsigned long)gop64->query_mode;
+
+	status = __efi_call_early(query_mode, (void *)gop64, mode->mode, size,
+				  info);
+	if (status != EFI_SUCCESS)
+		return status;
+
+	*fb_base = mode->frame_buffer_base;
+	return status;
+}
+
+static efi_status_t
+setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
+	    efi_guid_t *proto, unsigned long size, void **gop_handle)
+{
+	struct efi_graphics_output_protocol_64 *gop64, *first_gop;
+	unsigned long nr_gops;
+	u16 width, height;
+	u32 pixels_per_scan_line;
+	u32 ext_lfb_base;
+	u64 fb_base;
+	struct efi_pixel_bitmask pixel_info;
+	int pixel_format;
+	efi_status_t status = EFI_NOT_FOUND;
+	u64 *handles = (u64 *)(unsigned long)gop_handle;
+	int i;
+
+	first_gop = NULL;
+	gop64 = NULL;
+
+	nr_gops = size / sizeof(u64);
+	for (i = 0; i < nr_gops; i++) {
+		struct efi_graphics_output_mode_info *info = NULL;
+		efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
+		bool conout_found = false;
+		void *dummy = NULL;
+		efi_handle_t h = (efi_handle_t)(unsigned long)handles[i];
+		u64 current_fb_base;
+
+		status = efi_call_early(handle_protocol, h,
+					proto, (void **)&gop64);
+		if (status != EFI_SUCCESS)
+			continue;
+
+		status = efi_call_early(handle_protocol, h,
+					&conout_proto, &dummy);
+		if (status == EFI_SUCCESS)
+			conout_found = true;
+
+		status = __gop_query64(sys_table_arg, gop64, &info, &size,
+				       &current_fb_base);
+		if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
+			/*
+			 * Systems that use the UEFI Console Splitter may
+			 * provide multiple GOP devices, not all of which are
+			 * backed by real hardware. The workaround is to search
+			 * for a GOP implementing the ConOut protocol, and if
+			 * one isn't found, to just fall back to the first GOP.
+			 */
+			width = info->horizontal_resolution;
+			height = info->vertical_resolution;
+			pixel_format = info->pixel_format;
+			pixel_info = info->pixel_information;
+			pixels_per_scan_line = info->pixels_per_scan_line;
+			fb_base = current_fb_base;
+
+			/*
+			 * Once we've found a GOP supporting ConOut,
+			 * don't bother looking any further.
+			 */
+			first_gop = gop64;
+			if (conout_found)
+				break;
+		}
+	}
+
+	/* Did we find any GOPs? */
+	if (!first_gop)
+		goto out;
+
+	/* EFI framebuffer */
+	si->orig_video_isVGA = VIDEO_TYPE_EFI;
+
+	si->lfb_width = width;
+	si->lfb_height = height;
+	si->lfb_base = fb_base;
+
+	ext_lfb_base = (u64)(unsigned long)fb_base >> 32;
+	if (ext_lfb_base) {
+		si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
+		si->ext_lfb_base = ext_lfb_base;
+	}
+
+	si->pages = 1;
+
+	setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
+
+	si->lfb_size = si->lfb_linelength * si->lfb_height;
+
+	si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
+out:
+	return status;
+}
+
+/*
+ * See if we have Graphics Output Protocol
+ */
+efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
+			   struct screen_info *si, efi_guid_t *proto,
+			   unsigned long size)
+{
+	efi_status_t status;
+	void **gop_handle = NULL;
+
+	status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+				size, (void **)&gop_handle);
+	if (status != EFI_SUCCESS)
+		return status;
+
+	status = efi_call_early(locate_handle,
+				EFI_LOCATE_BY_PROTOCOL,
+				proto, NULL, &size, gop_handle);
+	if (status != EFI_SUCCESS)
+		goto free_handle;
+
+	if (efi_is_64bit()) {
+		status = setup_gop64(sys_table_arg, si, proto, size,
+				     gop_handle);
+	} else {
+		status = setup_gop32(sys_table_arg, si, proto, size,
+				     gop_handle);
+	}
+
+free_handle:
+	efi_call_early(free_pool, gop_handle);
+	return status;
+}
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
new file mode 100644
index 0000000..236004b
--- /dev/null
+++ b/drivers/firmware/efi/memattr.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt)	"efi: memattr: " fmt
+
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
+
+#include <asm/early_ioremap.h>
+
+static int __initdata tbl_size;
+
+/*
+ * Reserve the memory associated with the Memory Attributes configuration
+ * table, if it exists.
+ */
+int __init efi_memattr_init(void)
+{
+	efi_memory_attributes_table_t *tbl;
+
+	if (efi.mem_attr_table == EFI_INVALID_TABLE_ADDR)
+		return 0;
+
+	tbl = early_memremap(efi.mem_attr_table, sizeof(*tbl));
+	if (!tbl) {
+		pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
+		       efi.mem_attr_table);
+		return -ENOMEM;
+	}
+
+	if (tbl->version > 1) {
+		pr_warn("Unexpected EFI Memory Attributes table version %d\n",
+			tbl->version);
+		goto unmap;
+	}
+
+	tbl_size = sizeof(*tbl) + tbl->num_entries * tbl->desc_size;
+	memblock_reserve(efi.mem_attr_table, tbl_size);
+
+unmap:
+	early_memunmap(tbl, sizeof(*tbl));
+	return 0;
+}
+
+/*
+ * Returns a copy @out of the UEFI memory descriptor @in if it is covered
+ * entirely by a UEFI memory map entry with matching attributes. The virtual
+ * address of @out is set according to the matching entry that was found.
+ */
+static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
+{
+	u64 in_paddr = in->phys_addr;
+	u64 in_size = in->num_pages << EFI_PAGE_SHIFT;
+	efi_memory_desc_t *md;
+
+	*out = *in;
+
+	if (in->type != EFI_RUNTIME_SERVICES_CODE &&
+	    in->type != EFI_RUNTIME_SERVICES_DATA) {
+		pr_warn("Entry type should be RuntimeServiceCode/Data\n");
+		return false;
+	}
+
+	if (!(in->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))) {
+		pr_warn("Entry attributes invalid: RO and XP bits both cleared\n");
+		return false;
+	}
+
+	if (PAGE_SIZE > EFI_PAGE_SIZE &&
+	    (!PAGE_ALIGNED(in->phys_addr) ||
+	     !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) {
+		/*
+		 * Since arm64 may execute with page sizes of up to 64 KB, the
+		 * UEFI spec mandates that RuntimeServices memory regions must
+		 * be 64 KB aligned. We need to validate this here since we will
+		 * not be able to tighten permissions on such regions without
+		 * affecting adjacent regions.
+		 */
+		pr_warn("Entry address region misaligned\n");
+		return false;
+	}
+
+	for_each_efi_memory_desc(md) {
+		u64 md_paddr = md->phys_addr;
+		u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
+
+		if (!(md->attribute & EFI_MEMORY_RUNTIME))
+			continue;
+		if (md->virt_addr == 0) {
+			/* no virtual mapping has been installed by the stub */
+			break;
+		}
+
+		if (md_paddr > in_paddr || (in_paddr - md_paddr) >= md_size)
+			continue;
+
+		/*
+		 * This entry covers the start of @in, check whether
+		 * it covers the end as well.
+		 */
+		if (md_paddr + md_size < in_paddr + in_size) {
+			pr_warn("Entry covers multiple EFI memory map regions\n");
+			return false;
+		}
+
+		if (md->type != in->type) {
+			pr_warn("Entry type deviates from EFI memory map region type\n");
+			return false;
+		}
+
+		out->virt_addr = in_paddr + (md->virt_addr - md_paddr);
+
+		return true;
+	}
+
+	pr_warn("No matching entry found in the EFI memory map\n");
+	return false;
+}
+
+/*
+ * To be called after the EFI page tables have been populated. If a memory
+ * attributes table is available, its contents will be used to update the
+ * mappings with tightened permissions as described by the table.
+ * This requires the UEFI memory map to have already been populated with
+ * virtual addresses.
+ */
+int __init efi_memattr_apply_permissions(struct mm_struct *mm,
+					 efi_memattr_perm_setter fn)
+{
+	efi_memory_attributes_table_t *tbl;
+	int i, ret;
+
+	if (tbl_size <= sizeof(*tbl))
+		return 0;
+
+	/*
+	 * We need the EFI memory map to be setup so we can use it to
+	 * lookup the virtual addresses of all entries in the  of EFI
+	 * Memory Attributes table. If it isn't available, this
+	 * function should not be called.
+	 */
+	if (WARN_ON(!efi_enabled(EFI_MEMMAP)))
+		return 0;
+
+	tbl = memremap(efi.mem_attr_table, tbl_size, MEMREMAP_WB);
+	if (!tbl) {
+		pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
+		       efi.mem_attr_table);
+		return -ENOMEM;
+	}
+
+	if (efi_enabled(EFI_DBG))
+		pr_info("Processing EFI Memory Attributes table:\n");
+
+	for (i = ret = 0; ret == 0 && i < tbl->num_entries; i++) {
+		efi_memory_desc_t md;
+		unsigned long size;
+		bool valid;
+		char buf[64];
+
+		valid = entry_is_valid((void *)tbl->entry + i * tbl->desc_size,
+				       &md);
+		size = md.num_pages << EFI_PAGE_SHIFT;
+		if (efi_enabled(EFI_DBG) || !valid)
+			pr_info("%s 0x%012llx-0x%012llx %s\n",
+				valid ? "" : "!", md.phys_addr,
+				md.phys_addr + size - 1,
+				efi_md_typeattr_format(buf, sizeof(buf), &md));
+
+		if (valid)
+			ret = fn(mm, &md);
+	}
+	memunmap(tbl);
+	return ret;
+}
diff --git a/drivers/firmware/efi/reboot.c b/drivers/firmware/efi/reboot.c
index 9c59d1c..62ead9b 100644
--- a/drivers/firmware/efi/reboot.c
+++ b/drivers/firmware/efi/reboot.c
@@ -9,7 +9,8 @@
 
 void efi_reboot(enum reboot_mode reboot_mode, const char *__unused)
 {
-	int efi_mode;
+	const char *str[] = { "cold", "warm", "shutdown", "platform" };
+	int efi_mode, cap_reset_mode;
 
 	if (!efi_enabled(EFI_RUNTIME_SERVICES))
 		return;
@@ -30,6 +31,15 @@
 	if (efi_reboot_quirk_mode != -1)
 		efi_mode = efi_reboot_quirk_mode;
 
+	if (efi_capsule_pending(&cap_reset_mode)) {
+		if (efi_mode != cap_reset_mode)
+			printk(KERN_CRIT "efi: %s reset requested but pending "
+			       "capsule update requires %s reset... Performing "
+			       "%s reset.\n", str[efi_mode], str[cap_reset_mode],
+			       str[cap_reset_mode]);
+		efi_mode = cap_reset_mode;
+	}
+
 	efi.reset_system(efi_mode, EFI_SUCCESS, 0, NULL);
 }
 
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index de69530..23bef6b 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -16,10 +16,70 @@
 
 #include <linux/bug.h>
 #include <linux/efi.h>
+#include <linux/irqflags.h>
 #include <linux/mutex.h>
 #include <linux/spinlock.h>
+#include <linux/stringify.h>
 #include <asm/efi.h>
 
+static void efi_call_virt_check_flags(unsigned long flags, const char *call)
+{
+	unsigned long cur_flags, mismatch;
+
+	local_save_flags(cur_flags);
+
+	mismatch = flags ^ cur_flags;
+	if (!WARN_ON_ONCE(mismatch & ARCH_EFI_IRQ_FLAGS_MASK))
+		return;
+
+	add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_NOW_UNRELIABLE);
+	pr_err_ratelimited(FW_BUG "IRQ flags corrupted (0x%08lx=>0x%08lx) by EFI %s\n",
+			   flags, cur_flags, call);
+	local_irq_restore(flags);
+}
+
+/*
+ * Arch code can implement the following three template macros, avoiding
+ * reptition for the void/non-void return cases of {__,}efi_call_virt:
+ *
+ *  * arch_efi_call_virt_setup
+ *
+ *    Sets up the environment for the call (e.g. switching page tables,
+ *    allowing kernel-mode use of floating point, if required).
+ *
+ *  * arch_efi_call_virt
+ *
+ *    Performs the call. The last expression in the macro must be the call
+ *    itself, allowing the logic to be shared by the void and non-void
+ *    cases.
+ *
+ *  * arch_efi_call_virt_teardown
+ *
+ *    Restores the usual kernel environment once the call has returned.
+ */
+
+#define efi_call_virt(f, args...)					\
+({									\
+	efi_status_t __s;						\
+	unsigned long flags;						\
+	arch_efi_call_virt_setup();					\
+	local_save_flags(flags);					\
+	__s = arch_efi_call_virt(f, args);				\
+	efi_call_virt_check_flags(flags, __stringify(f));		\
+	arch_efi_call_virt_teardown();					\
+	__s;								\
+})
+
+#define __efi_call_virt(f, args...)					\
+({									\
+	unsigned long flags;						\
+	arch_efi_call_virt_setup();					\
+	local_save_flags(flags);					\
+	arch_efi_call_virt(f, args);					\
+	efi_call_virt_check_flags(flags, __stringify(f));		\
+	arch_efi_call_virt_teardown();					\
+})
+
 /*
  * According to section 7.1 of the UEFI spec, Runtime Services are not fully
  * reentrant, and there are particular combinations of calls that need to be
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 0ac594c..d3b7513 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -202,29 +202,44 @@
 	{ NULL_GUID, "", NULL },
 };
 
+/*
+ * Check if @var_name matches the pattern given in @match_name.
+ *
+ * @var_name: an array of @len non-NUL characters.
+ * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
+ *              final "*" character matches any trailing characters @var_name,
+ *              including the case when there are none left in @var_name.
+ * @match: on output, the number of non-wildcard characters in @match_name
+ *         that @var_name matches, regardless of the return value.
+ * @return: whether @var_name fully matches @match_name.
+ */
 static bool
 variable_matches(const char *var_name, size_t len, const char *match_name,
 		 int *match)
 {
 	for (*match = 0; ; (*match)++) {
 		char c = match_name[*match];
-		char u = var_name[*match];
 
-		/* Wildcard in the matching name means we've matched */
-		if (c == '*')
+		switch (c) {
+		case '*':
+			/* Wildcard in @match_name means we've matched. */
 			return true;
 
-		/* Case sensitive match */
-		if (!c && *match == len)
-			return true;
+		case '\0':
+			/* @match_name has ended. Has @var_name too? */
+			return (*match == len);
 
-		if (c != u)
+		default:
+			/*
+			 * We've reached a non-wildcard char in @match_name.
+			 * Continue only if there's an identical character in
+			 * @var_name.
+			 */
+			if (*match < len && c == var_name[*match])
+				continue;
 			return false;
-
-		if (!c)
-			return true;
+		}
 	}
-	return true;
 }
 
 bool
@@ -314,39 +329,6 @@
 	return fops->query_variable_store(attributes, size, true);
 }
 
-static int efi_status_to_err(efi_status_t status)
-{
-	int err;
-
-	switch (status) {
-	case EFI_SUCCESS:
-		err = 0;
-		break;
-	case EFI_INVALID_PARAMETER:
-		err = -EINVAL;
-		break;
-	case EFI_OUT_OF_RESOURCES:
-		err = -ENOSPC;
-		break;
-	case EFI_DEVICE_ERROR:
-		err = -EIO;
-		break;
-	case EFI_WRITE_PROTECTED:
-		err = -EROFS;
-		break;
-	case EFI_SECURITY_VIOLATION:
-		err = -EACCES;
-		break;
-	case EFI_NOT_FOUND:
-		err = -ENOENT;
-		break;
-	default:
-		err = -EINVAL;
-	}
-
-	return err;
-}
-
 static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor,
 				struct list_head *head)
 {
@@ -437,8 +419,7 @@
  * Returns 0 on success, or a kernel error code on failure.
  */
 int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
-		void *data, bool atomic, bool duplicates,
-		struct list_head *head)
+		void *data, bool duplicates, struct list_head *head)
 {
 	const struct efivar_operations *ops = __efivars->ops;
 	unsigned long variable_name_size = 1024;
@@ -468,7 +449,7 @@
 						&vendor_guid);
 		switch (status) {
 		case EFI_SUCCESS:
-			if (!atomic)
+			if (duplicates)
 				spin_unlock_irq(&__efivars->lock);
 
 			variable_name_size = var_name_strnsize(variable_name,
@@ -483,21 +464,19 @@
 			 * and may end up looping here forever.
 			 */
 			if (duplicates &&
-			    variable_is_present(variable_name, &vendor_guid, head)) {
+			    variable_is_present(variable_name, &vendor_guid,
+						head)) {
 				dup_variable_bug(variable_name, &vendor_guid,
 						 variable_name_size);
-				if (!atomic)
-					spin_lock_irq(&__efivars->lock);
-
 				status = EFI_NOT_FOUND;
-				break;
+			} else {
+				err = func(variable_name, vendor_guid,
+					   variable_name_size, data);
+				if (err)
+					status = EFI_NOT_FOUND;
 			}
 
-			err = func(variable_name, vendor_guid, variable_name_size, data);
-			if (err)
-				status = EFI_NOT_FOUND;
-
-			if (!atomic)
+			if (duplicates)
 				spin_lock_irq(&__efivars->lock);
 
 			break;
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 81037e5..14042a6 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -418,6 +418,31 @@
 	return str - buf;
 }
 
+static ssize_t ibft_attr_show_acpitbl(void *data, int type, char *buf)
+{
+	struct ibft_kobject *entry = data;
+	char *str = buf;
+
+	switch (type) {
+	case ISCSI_BOOT_ACPITBL_SIGNATURE:
+		str += sprintf_string(str, ACPI_NAME_SIZE,
+				      entry->header->header.signature);
+		break;
+	case ISCSI_BOOT_ACPITBL_OEM_ID:
+		str += sprintf_string(str, ACPI_OEM_ID_SIZE,
+				      entry->header->header.oem_id);
+		break;
+	case ISCSI_BOOT_ACPITBL_OEM_TABLE_ID:
+		str += sprintf_string(str, ACPI_OEM_TABLE_ID_SIZE,
+				      entry->header->header.oem_table_id);
+		break;
+	default:
+		break;
+	}
+
+	return str - buf;
+}
+
 static int __init ibft_check_device(void)
 {
 	int len;
@@ -576,6 +601,24 @@
 	return rc;
 }
 
+static umode_t __init ibft_check_acpitbl_for(void *data, int type)
+{
+
+	umode_t rc = 0;
+
+	switch (type) {
+	case ISCSI_BOOT_ACPITBL_SIGNATURE:
+	case ISCSI_BOOT_ACPITBL_OEM_ID:
+	case ISCSI_BOOT_ACPITBL_OEM_TABLE_ID:
+		rc = S_IRUGO;
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
 static void ibft_kobj_release(void *data)
 {
 	kfree(data);
@@ -699,6 +742,8 @@
 static int __init ibft_register_kobjects(struct acpi_table_ibft *header)
 {
 	struct ibft_control *control = NULL;
+	struct iscsi_boot_kobj *boot_kobj;
+	struct ibft_kobject *ibft_kobj;
 	void *ptr, *end;
 	int rc = 0;
 	u16 offset;
@@ -726,6 +771,25 @@
 				break;
 		}
 	}
+	if (rc)
+		return rc;
+
+	ibft_kobj = kzalloc(sizeof(*ibft_kobj), GFP_KERNEL);
+	if (!ibft_kobj)
+		return -ENOMEM;
+
+	ibft_kobj->header = header;
+	ibft_kobj->hdr = NULL; /*for ibft_unregister*/
+
+	boot_kobj = iscsi_boot_create_acpitbl(boot_kset, 0,
+					ibft_kobj,
+					ibft_attr_show_acpitbl,
+					ibft_check_acpitbl_for,
+					ibft_kobj_release);
+	if (!boot_kobj)  {
+		kfree(ibft_kobj);
+		rc = -ENOMEM;
+	}
 
 	return rc;
 }
@@ -738,7 +802,7 @@
 	list_for_each_entry_safe(boot_kobj, tmp_kobj,
 				 &boot_kset->kobj_list, list) {
 		ibft_kobj = boot_kobj->data;
-		if (ibft_kobj->hdr->id == id_nic)
+		if (ibft_kobj->hdr && ibft_kobj->hdr->id == id_nic)
 			sysfs_remove_link(&boot_kobj->kobj, "device");
 	};
 }
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index 11bfee8..03e0458 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -91,7 +91,7 @@
 				PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK;
 }
 
-bool psci_power_state_loses_context(u32 state)
+static inline bool psci_power_state_loses_context(u32 state)
 {
 	const u32 mask = psci_has_ext_power_state() ?
 					PSCI_1_0_EXT_POWER_STATE_TYPE_MASK :
@@ -100,7 +100,7 @@
 	return state & mask;
 }
 
-bool psci_power_state_is_valid(u32 state)
+static inline bool psci_power_state_is_valid(u32 state)
 {
 	const u32 valid_mask = psci_has_ext_power_state() ?
 			       PSCI_1_0_EXT_POWER_STATE_MASK :
@@ -355,12 +355,12 @@
 
 /* ARM specific CPU idle operations */
 #ifdef CONFIG_ARM
-static struct cpuidle_ops psci_cpuidle_ops __initdata = {
+static const struct cpuidle_ops psci_cpuidle_ops __initconst = {
 	.suspend = psci_cpu_suspend_enter,
 	.init = psci_dt_cpu_init_idle,
 };
 
-CPUIDLE_METHOD_OF_DECLARE(psci, "arm,psci", &psci_cpuidle_ops);
+CPUIDLE_METHOD_OF_DECLARE(psci, "psci", &psci_cpuidle_ops);
 #endif
 #endif
 
@@ -563,7 +563,7 @@
 	return err;
 }
 
-static const struct of_device_id const psci_of_match[] __initconst = {
+static const struct of_device_id psci_of_match[] __initconst = {
 	{ .compatible = "arm,psci",	.data = psci_0_1_init},
 	{ .compatible = "arm,psci-0.2",	.data = psci_0_2_init},
 	{ .compatible = "arm,psci-1.0",	.data = psci_0_2_init},
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index fedbff5..1b95475 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -77,12 +77,28 @@
 static inline void fw_cfg_read_blob(u16 key,
 				    void *buf, loff_t pos, size_t count)
 {
+	u32 glk = -1U;
+	acpi_status status;
+
+	/* If we have ACPI, ensure mutual exclusion against any potential
+	 * device access by the firmware, e.g. via AML methods:
+	 */
+	status = acpi_acquire_global_lock(ACPI_WAIT_FOREVER, &glk);
+	if (ACPI_FAILURE(status) && status != AE_NOT_CONFIGURED) {
+		/* Should never get here */
+		WARN(1, "fw_cfg_read_blob: Failed to lock ACPI!\n");
+		memset(buf, 0, count);
+		return;
+	}
+
 	mutex_lock(&fw_cfg_dev_lock);
 	iowrite16(fw_cfg_sel_endianness(key), fw_cfg_reg_ctrl);
 	while (pos-- > 0)
 		ioread8(fw_cfg_reg_data);
 	ioread8_rep(fw_cfg_reg_data, buf, count);
 	mutex_unlock(&fw_cfg_dev_lock);
+
+	acpi_release_global_lock(glk);
 }
 
 /* clean up fw_cfg device i/o */
@@ -727,12 +743,18 @@
 
 static int __init fw_cfg_sysfs_init(void)
 {
+	int ret;
+
 	/* create /sys/firmware/qemu_fw_cfg/ top level directory */
 	fw_cfg_top_ko = kobject_create_and_add("qemu_fw_cfg", firmware_kobj);
 	if (!fw_cfg_top_ko)
 		return -ENOMEM;
 
-	return platform_driver_register(&fw_cfg_sysfs_driver);
+	ret = platform_driver_register(&fw_cfg_sysfs_driver);
+	if (ret)
+		fw_cfg_kobj_cleanup(fw_cfg_top_ko);
+
+	return ret;
 }
 
 static void __exit fw_cfg_sysfs_exit(void)
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 5f3429f..d00e7b6 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -33,7 +33,6 @@
 
 menuconfig GPIOLIB
 	bool "GPIO Support"
-	depends on ARCH_WANT_OPTIONAL_GPIOLIB || ARCH_REQUIRE_GPIOLIB
 	help
 	  This enables GPIO support through the generic GPIO library.
 	  You only need to enable this, if you also want to enable
@@ -49,7 +48,7 @@
 
 config OF_GPIO
 	def_bool y
-	depends on OF
+	depends on OF || COMPILE_TEST
 
 config GPIO_ACPI
 	def_bool y
@@ -122,6 +121,7 @@
 config GPIO_AMDPT
 	tristate "AMD Promontory GPIO support"
 	depends on ACPI
+	select GPIO_GENERIC
 	help
 	  driver for GPIO functionality on Promontory IOHub
 	  Require ACPI ASL code to enumerate as a platform device.
@@ -303,6 +303,7 @@
 		   FSL_SOC_BOOKE || PPC_86xx || ARCH_LAYERSCAPE || ARM || \
 		   COMPILE_TEST
 	select GPIO_GENERIC
+	select IRQ_DOMAIN
 	help
 	  Say Y here if you're going to use hardware that connects to the
 	  MPC512x/831x/834x/837x/8572/8610/QorIQ GPIOs.
@@ -399,6 +400,11 @@
 	select GENERIC_IRQ_CHIP
 	select OF_GPIO
 
+config GPIO_TEGRA
+	bool
+	default y
+	depends on ARCH_TEGRA || COMPILE_TEST
+
 config GPIO_TS4800
 	tristate "TS-4800 DIO blocks and compatibles"
 	depends on OF_GPIO
@@ -473,7 +479,7 @@
 
 config GPIO_XLP
 	tristate "Netlogic XLP GPIO support"
-	depends on CPU_XLP && OF_GPIO
+	depends on OF_GPIO && (CPU_XLP || ARCH_VULCAN || COMPILE_TEST)
 	select GPIOLIB_IRQCHIP
 	help
 	  This driver provides support for GPIO interface on Netlogic XLP MIPS64
@@ -510,6 +516,13 @@
 	help
 	  Say yes here to support the GPIO device on ZTE ZX SoCs.
 
+config GPIO_LOONGSON1
+	tristate "Loongson1 GPIO support"
+	depends on MACH_LOONGSON32
+	select GPIO_GENERIC
+	help
+	  Say Y or M here to support GPIO on Loongson1 SoCs.
+
 endmenu
 
 menu "Port-mapped I/O GPIO drivers"
@@ -557,7 +570,7 @@
 	  Say yes here to support GPIO functionality of IT87xx Super I/O chips.
 
 	  This driver is tested with ITE IT8728 and IT8732 Super I/O chips, and
-	  supports the IT8761E Super I/O chip as well.
+	  supports the IT8761E, IT8620E and IT8628E Super I/O chip as well.
 
 	  To compile this driver as a module, choose M here: the module will
 	  be called gpio_it87
@@ -1091,6 +1104,7 @@
 
 config GPIO_MCP23S08
 	tristate "Microchip MCP23xxx I/O expander"
+	select GPIOLIB_IRQCHIP
 	help
 	  SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
 	  I/O expanders.
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 1e0b74f..991598e 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -12,6 +12,9 @@
 # Device drivers. Generally keep list sorted alphabetically
 obj-$(CONFIG_GPIO_GENERIC)	+= gpio-generic.o
 
+# directly supported by gpio-generic
+gpio-generic-$(CONFIG_GPIO_GENERIC)	+= gpio-mmio.o
+
 obj-$(CONFIG_GPIO_104_DIO_48E)	+= gpio-104-dio-48e.o
 obj-$(CONFIG_GPIO_104_IDIO_16)	+= gpio-104-idio-16.o
 obj-$(CONFIG_GPIO_104_IDI_48)	+= gpio-104-idi-48.o
@@ -95,7 +98,7 @@
 obj-$(CONFIG_GPIO_SYSCON)	+= gpio-syscon.o
 obj-$(CONFIG_GPIO_TB10X)	+= gpio-tb10x.o
 obj-$(CONFIG_GPIO_TC3589X)	+= gpio-tc3589x.o
-obj-$(CONFIG_ARCH_TEGRA)	+= gpio-tegra.o
+obj-$(CONFIG_GPIO_TEGRA)	+= gpio-tegra.o
 obj-$(CONFIG_GPIO_TIMBERDALE)	+= gpio-timberdale.o
 obj-$(CONFIG_GPIO_PALMAS)	+= gpio-palmas.o
 obj-$(CONFIG_GPIO_TPIC2810)	+= gpio-tpic2810.o
@@ -127,3 +130,4 @@
 obj-$(CONFIG_GPIO_ZEVIO)	+= gpio-zevio.o
 obj-$(CONFIG_GPIO_ZYNQ)		+= gpio-zynq.o
 obj-$(CONFIG_GPIO_ZX)		+= gpio-zx.o
+obj-$(CONFIG_GPIO_LOONGSON1)	+= gpio-loongson1.o
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index c81224f..80f9ddf 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -75,6 +75,29 @@
 	mutex_unlock(&chip->lock);
 }
 
+static void gen_74x164_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+				    unsigned long *bits)
+{
+	struct gen_74x164_chip *chip = gpiochip_get_data(gc);
+	unsigned int i, idx, shift;
+	u8 bank, bankmask;
+
+	mutex_lock(&chip->lock);
+	for (i = 0, bank = chip->registers - 1; i < chip->registers;
+	     i++, bank--) {
+		idx = i / sizeof(*mask);
+		shift = i % sizeof(*mask) * BITS_PER_BYTE;
+		bankmask = mask[idx] >> shift;
+		if (!bankmask)
+			continue;
+
+		chip->buffer[bank] &= ~bankmask;
+		chip->buffer[bank] |= bankmask & (bits[idx] >> shift);
+	}
+	__gen_74x164_write_config(chip);
+	mutex_unlock(&chip->lock);
+}
+
 static int gen_74x164_direction_output(struct gpio_chip *gc,
 		unsigned offset, int val)
 {
@@ -114,6 +137,7 @@
 	chip->gpio_chip.direction_output = gen_74x164_direction_output;
 	chip->gpio_chip.get = gen_74x164_get_value;
 	chip->gpio_chip.set = gen_74x164_set_value;
+	chip->gpio_chip.set_multiple = gen_74x164_set_multiple;
 	chip->gpio_chip.base = -1;
 
 	chip->registers = nregs;
@@ -153,6 +177,7 @@
 
 static const struct of_device_id gen_74x164_dt_ids[] = {
 	{ .compatible = "fairchild,74hc595" },
+	{ .compatible = "nxp,74lvc594" },
 	{},
 };
 MODULE_DEVICE_TABLE(of, gen_74x164_dt_ids);
diff --git a/drivers/gpio/gpio-amdpt.c b/drivers/gpio/gpio-amdpt.c
index c248404..9b78dc8 100644
--- a/drivers/gpio/gpio-amdpt.c
+++ b/drivers/gpio/gpio-amdpt.c
@@ -28,7 +28,6 @@
 struct pt_gpio_chip {
 	struct gpio_chip         gc;
 	void __iomem             *reg_base;
-	spinlock_t               lock;
 };
 
 static int pt_gpio_request(struct gpio_chip *gc, unsigned offset)
@@ -39,19 +38,19 @@
 
 	dev_dbg(gc->parent, "pt_gpio_request offset=%x\n", offset);
 
-	spin_lock_irqsave(&pt_gpio->lock, flags);
+	spin_lock_irqsave(&gc->bgpio_lock, flags);
 
 	using_pins = readl(pt_gpio->reg_base + PT_SYNC_REG);
 	if (using_pins & BIT(offset)) {
 		dev_warn(gc->parent, "PT GPIO pin %x reconfigured\n",
 			 offset);
-		spin_unlock_irqrestore(&pt_gpio->lock, flags);
+		spin_unlock_irqrestore(&gc->bgpio_lock, flags);
 		return -EINVAL;
 	}
 
 	writel(using_pins | BIT(offset), pt_gpio->reg_base + PT_SYNC_REG);
 
-	spin_unlock_irqrestore(&pt_gpio->lock, flags);
+	spin_unlock_irqrestore(&gc->bgpio_lock, flags);
 
 	return 0;
 }
@@ -62,111 +61,17 @@
 	unsigned long flags;
 	u32 using_pins;
 
-	spin_lock_irqsave(&pt_gpio->lock, flags);
+	spin_lock_irqsave(&gc->bgpio_lock, flags);
 
 	using_pins = readl(pt_gpio->reg_base + PT_SYNC_REG);
 	using_pins &= ~BIT(offset);
 	writel(using_pins, pt_gpio->reg_base + PT_SYNC_REG);
 
-	spin_unlock_irqrestore(&pt_gpio->lock, flags);
+	spin_unlock_irqrestore(&gc->bgpio_lock, flags);
 
 	dev_dbg(gc->parent, "pt_gpio_free offset=%x\n", offset);
 }
 
-static void pt_gpio_set_value(struct gpio_chip *gc, unsigned offset, int value)
-{
-	struct pt_gpio_chip *pt_gpio = gpiochip_get_data(gc);
-	unsigned long flags;
-	u32 data;
-
-	dev_dbg(gc->parent, "pt_gpio_set_value offset=%x, value=%x\n",
-		offset, value);
-
-	spin_lock_irqsave(&pt_gpio->lock, flags);
-
-	data = readl(pt_gpio->reg_base + PT_OUTPUTDATA_REG);
-	data &= ~BIT(offset);
-	if (value)
-		data |= BIT(offset);
-	writel(data, pt_gpio->reg_base + PT_OUTPUTDATA_REG);
-
-	spin_unlock_irqrestore(&pt_gpio->lock, flags);
-}
-
-static int pt_gpio_get_value(struct gpio_chip *gc, unsigned offset)
-{
-	struct pt_gpio_chip *pt_gpio = gpiochip_get_data(gc);
-	unsigned long flags;
-	u32 data;
-
-	spin_lock_irqsave(&pt_gpio->lock, flags);
-
-	data = readl(pt_gpio->reg_base + PT_DIRECTION_REG);
-
-	/* configure as output */
-	if (data & BIT(offset))
-		data = readl(pt_gpio->reg_base + PT_OUTPUTDATA_REG);
-	else	/* configure as input */
-		data = readl(pt_gpio->reg_base + PT_INPUTDATA_REG);
-
-	spin_unlock_irqrestore(&pt_gpio->lock, flags);
-
-	data >>= offset;
-	data &= 1;
-
-	dev_dbg(gc->parent, "pt_gpio_get_value offset=%x, value=%x\n",
-		offset, data);
-
-	return data;
-}
-
-static int pt_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
-{
-	struct pt_gpio_chip *pt_gpio = gpiochip_get_data(gc);
-	unsigned long flags;
-	u32 data;
-
-	dev_dbg(gc->parent, "pt_gpio_dirction_input offset=%x\n", offset);
-
-	spin_lock_irqsave(&pt_gpio->lock, flags);
-
-	data = readl(pt_gpio->reg_base + PT_DIRECTION_REG);
-	data &= ~BIT(offset);
-	writel(data, pt_gpio->reg_base + PT_DIRECTION_REG);
-
-	spin_unlock_irqrestore(&pt_gpio->lock, flags);
-
-	return 0;
-}
-
-static int pt_gpio_direction_output(struct gpio_chip *gc,
-					unsigned offset, int value)
-{
-	struct pt_gpio_chip *pt_gpio = gpiochip_get_data(gc);
-	unsigned long flags;
-	u32 data;
-
-	dev_dbg(gc->parent, "pt_gpio_direction_output offset=%x, value=%x\n",
-		offset, value);
-
-	spin_lock_irqsave(&pt_gpio->lock, flags);
-
-	data = readl(pt_gpio->reg_base + PT_OUTPUTDATA_REG);
-	if (value)
-		data |= BIT(offset);
-	else
-		data &= ~BIT(offset);
-	writel(data, pt_gpio->reg_base + PT_OUTPUTDATA_REG);
-
-	data = readl(pt_gpio->reg_base + PT_DIRECTION_REG);
-	data |= BIT(offset);
-	writel(data, pt_gpio->reg_base + PT_DIRECTION_REG);
-
-	spin_unlock_irqrestore(&pt_gpio->lock, flags);
-
-	return 0;
-}
-
 static int pt_gpio_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -196,18 +101,19 @@
 		return PTR_ERR(pt_gpio->reg_base);
 	}
 
-	spin_lock_init(&pt_gpio->lock);
+	ret = bgpio_init(&pt_gpio->gc, dev, 4,
+			 pt_gpio->reg_base + PT_INPUTDATA_REG,
+			 pt_gpio->reg_base + PT_OUTPUTDATA_REG, NULL,
+			 pt_gpio->reg_base + PT_DIRECTION_REG, NULL,
+			 BGPIOF_READ_OUTPUT_REG_SET);
+	if (ret) {
+		dev_err(&pdev->dev, "bgpio_init failed\n");
+		return ret;
+	}
 
-	pt_gpio->gc.label            = pdev->name;
 	pt_gpio->gc.owner            = THIS_MODULE;
-	pt_gpio->gc.parent              = dev;
 	pt_gpio->gc.request          = pt_gpio_request;
 	pt_gpio->gc.free             = pt_gpio_free;
-	pt_gpio->gc.direction_input  = pt_gpio_direction_input;
-	pt_gpio->gc.direction_output = pt_gpio_direction_output;
-	pt_gpio->gc.get              = pt_gpio_get_value;
-	pt_gpio->gc.set              = pt_gpio_set_value;
-	pt_gpio->gc.base             = -1;
 	pt_gpio->gc.ngpio            = PT_TOTAL_GPIO;
 #if defined(CONFIG_OF_GPIO)
 	pt_gpio->gc.of_node          = pdev->dev.of_node;
@@ -239,6 +145,7 @@
 
 static const struct acpi_device_id pt_gpio_acpi_match[] = {
 	{ "AMDF030", 0 },
+	{ "AMDIF030", 0 },
 	{ },
 };
 MODULE_DEVICE_TABLE(acpi, pt_gpio_acpi_match);
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 2fd38d5..9aabc48f 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -1,4 +1,7 @@
 /*
+ * Broadcom Kona GPIO Driver
+ *
+ * Author: Broadcom Corporation <bcm-kernel-feedback-list@broadcom.com>
  * Copyright (C) 2012-2014 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or
@@ -17,7 +20,7 @@
 #include <linux/gpio.h>
 #include <linux/of_device.h>
 #include <linux/of_irq.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/irqdomain.h>
 #include <linux/irqchip/chained_irq.h>
 
@@ -502,8 +505,6 @@
 	{}
 };
 
-MODULE_DEVICE_TABLE(of, bcm_kona_gpio_of_match);
-
 /*
  * This lock class tells lockdep that GPIO irqs are in a different
  * category than their parents, so it won't report false recursion.
@@ -659,9 +660,4 @@
 	},
 	.probe = bcm_kona_gpio_probe,
 };
-
-module_platform_driver(bcm_kona_gpio_driver);
-
-MODULE_AUTHOR("Broadcom Corporation <bcm-kernel-feedback-list@broadcom.com>");
-MODULE_DESCRIPTION("Broadcom Kona GPIO Driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(bcm_kona_gpio_driver);
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 42d51c5..e648914 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -461,6 +461,7 @@
 		bank->id = num_banks;
 		if (bank_width <= 0 || bank_width > MAX_GPIO_PER_BANK) {
 			dev_err(dev, "Invalid bank width %d\n", bank_width);
+			err = -EINVAL;
 			goto fail;
 		} else {
 			bank->width = bank_width;
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 597de1e..34779bb 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -7,6 +7,7 @@
  *
  * All enquiries to support@picochip.com
  */
+#include <linux/acpi.h>
 #include <linux/gpio/driver.h>
 /* FIXME: for gpio_get_value(), replace this with direct register read */
 #include <linux/gpio.h>
@@ -22,10 +23,13 @@
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/platform_device.h>
+#include <linux/property.h>
 #include <linux/spinlock.h>
 #include <linux/platform_data/gpio-dwapb.h>
 #include <linux/slab.h>
 
+#include "gpiolib.h"
+
 #define GPIO_SWPORTA_DR		0x00
 #define GPIO_SWPORTA_DDR	0x04
 #define GPIO_SWPORTB_DR		0x0c
@@ -290,14 +294,14 @@
 				 struct dwapb_port_property *pp)
 {
 	struct gpio_chip *gc = &port->gc;
-	struct device_node *node = pp->node;
+	struct fwnode_handle  *fwnode = pp->fwnode;
 	struct irq_chip_generic	*irq_gc = NULL;
 	unsigned int hwirq, ngpio = gc->ngpio;
 	struct irq_chip_type *ct;
 	int err, i;
 
-	gpio->domain = irq_domain_add_linear(node, ngpio,
-					     &irq_generic_chip_ops, gpio);
+	gpio->domain = irq_domain_create_linear(fwnode, ngpio,
+						 &irq_generic_chip_ops, gpio);
 	if (!gpio->domain)
 		return;
 
@@ -409,13 +413,13 @@
 	err = bgpio_init(&port->gc, gpio->dev, 4, dat, set, NULL, dirout,
 			 NULL, false);
 	if (err) {
-		dev_err(gpio->dev, "failed to init gpio chip for %s\n",
-			pp->name);
+		dev_err(gpio->dev, "failed to init gpio chip for port%d\n",
+			port->idx);
 		return err;
 	}
 
 #ifdef CONFIG_OF_GPIO
-	port->gc.of_node = pp->node;
+	port->gc.of_node = to_of_node(pp->fwnode);
 #endif
 	port->gc.ngpio = pp->ngpio;
 	port->gc.base = pp->gpio_base;
@@ -429,11 +433,15 @@
 
 	err = gpiochip_add_data(&port->gc, port);
 	if (err)
-		dev_err(gpio->dev, "failed to register gpiochip for %s\n",
-			pp->name);
+		dev_err(gpio->dev, "failed to register gpiochip for port%d\n",
+			port->idx);
 	else
 		port->is_registered = true;
 
+	/* Add GPIO-signaled ACPI event support */
+	if (pp->irq)
+		acpi_gpiochip_request_interrupts(&port->gc);
+
 	return err;
 }
 
@@ -447,19 +455,15 @@
 }
 
 static struct dwapb_platform_data *
-dwapb_gpio_get_pdata_of(struct device *dev)
+dwapb_gpio_get_pdata(struct device *dev)
 {
-	struct device_node *node, *port_np;
+	struct fwnode_handle *fwnode;
 	struct dwapb_platform_data *pdata;
 	struct dwapb_port_property *pp;
 	int nports;
 	int i;
 
-	node = dev->of_node;
-	if (!IS_ENABLED(CONFIG_OF_GPIO) || !node)
-		return ERR_PTR(-ENODEV);
-
-	nports = of_get_child_count(node);
+	nports = device_get_child_node_count(dev);
 	if (nports == 0)
 		return ERR_PTR(-ENODEV);
 
@@ -474,21 +478,22 @@
 	pdata->nports = nports;
 
 	i = 0;
-	for_each_child_of_node(node, port_np) {
+	device_for_each_child_node(dev, fwnode)  {
 		pp = &pdata->properties[i++];
-		pp->node = port_np;
+		pp->fwnode = fwnode;
 
-		if (of_property_read_u32(port_np, "reg", &pp->idx) ||
+		if (fwnode_property_read_u32(fwnode, "reg", &pp->idx) ||
 		    pp->idx >= DWAPB_MAX_PORTS) {
-			dev_err(dev, "missing/invalid port index for %s\n",
-				port_np->full_name);
+			dev_err(dev,
+				"missing/invalid port index for port%d\n", i);
 			return ERR_PTR(-EINVAL);
 		}
 
-		if (of_property_read_u32(port_np, "snps,nr-gpios",
+		if (fwnode_property_read_u32(fwnode, "snps,nr-gpios",
 					 &pp->ngpio)) {
-			dev_info(dev, "failed to get number of gpios for %s\n",
-				 port_np->full_name);
+			dev_info(dev,
+				 "failed to get number of gpios for port%d\n",
+				 i);
 			pp->ngpio = 32;
 		}
 
@@ -496,18 +501,19 @@
 		 * Only port A can provide interrupts in all configurations of
 		 * the IP.
 		 */
-		if (pp->idx == 0 &&
-		    of_property_read_bool(port_np, "interrupt-controller")) {
-			pp->irq = irq_of_parse_and_map(port_np, 0);
-			if (!pp->irq) {
-				dev_warn(dev, "no irq for bank %s\n",
-					 port_np->full_name);
-			}
+		if (dev->of_node && pp->idx == 0 &&
+			fwnode_property_read_bool(fwnode,
+						  "interrupt-controller")) {
+			pp->irq = irq_of_parse_and_map(to_of_node(fwnode), 0);
+			if (!pp->irq)
+				dev_warn(dev, "no irq for port%d\n", pp->idx);
 		}
 
+		if (has_acpi_companion(dev) && pp->idx == 0)
+			pp->irq = platform_get_irq(to_platform_device(dev), 0);
+
 		pp->irq_shared	= false;
 		pp->gpio_base	= -1;
-		pp->name	= port_np->full_name;
 	}
 
 	return pdata;
@@ -523,7 +529,7 @@
 	struct dwapb_platform_data *pdata = dev_get_platdata(dev);
 
 	if (!pdata) {
-		pdata = dwapb_gpio_get_pdata_of(dev);
+		pdata = dwapb_gpio_get_pdata(dev);
 		if (IS_ERR(pdata))
 			return PTR_ERR(pdata);
 	}
@@ -580,6 +586,13 @@
 };
 MODULE_DEVICE_TABLE(of, dwapb_of_match);
 
+static const struct acpi_device_id dwapb_acpi_match[] = {
+	{"HISI0181", 0},
+	{"APMC0D07", 0},
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, dwapb_acpi_match);
+
 #ifdef CONFIG_PM_SLEEP
 static int dwapb_gpio_suspend(struct device *dev)
 {
@@ -674,6 +687,7 @@
 		.name	= "gpio-dwapb",
 		.pm	= &dwapb_gpio_pm_ops,
 		.of_match_table = of_match_ptr(dwapb_of_match),
+		.acpi_match_table = ACPI_PTR(dwapb_acpi_match),
 	},
 	.probe		= dwapb_gpio_probe,
 	.remove		= dwapb_gpio_remove,
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index daac2d4..05aa538 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -15,7 +15,8 @@
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/bitops.h>
 
 #define DRVNAME "gpio-f7188x"
 
@@ -129,6 +130,9 @@
 static int f7188x_gpio_direction_out(struct gpio_chip *chip,
 				     unsigned offset, int value);
 static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value);
+static int f7188x_gpio_set_single_ended(struct gpio_chip *gc,
+					unsigned offset,
+					enum single_ended_mode mode);
 
 #define F7188X_GPIO_BANK(_base, _ngpio, _regbase)			\
 	{								\
@@ -139,6 +143,7 @@
 			.get              = f7188x_gpio_get,		\
 			.direction_output = f7188x_gpio_direction_out,	\
 			.set              = f7188x_gpio_set,		\
+			.set_single_ended = f7188x_gpio_set_single_ended, \
 			.base             = _base,			\
 			.ngpio            = _ngpio,			\
 			.can_sleep        = true,			\
@@ -217,7 +222,7 @@
 	superio_select(sio->addr, SIO_LD_GPIO);
 
 	dir = superio_inb(sio->addr, gpio_dir(bank->regbase));
-	dir &= ~(1 << offset);
+	dir &= ~BIT(offset);
 	superio_outb(sio->addr, gpio_dir(bank->regbase), dir);
 
 	superio_exit(sio->addr);
@@ -238,7 +243,7 @@
 	superio_select(sio->addr, SIO_LD_GPIO);
 
 	dir = superio_inb(sio->addr, gpio_dir(bank->regbase));
-	dir = !!(dir & (1 << offset));
+	dir = !!(dir & BIT(offset));
 	if (dir)
 		data = superio_inb(sio->addr, gpio_data_out(bank->regbase));
 	else
@@ -246,7 +251,7 @@
 
 	superio_exit(sio->addr);
 
-	return !!(data & 1 << offset);
+	return !!(data & BIT(offset));
 }
 
 static int f7188x_gpio_direction_out(struct gpio_chip *chip,
@@ -264,13 +269,13 @@
 
 	data_out = superio_inb(sio->addr, gpio_data_out(bank->regbase));
 	if (value)
-		data_out |= (1 << offset);
+		data_out |= BIT(offset);
 	else
-		data_out &= ~(1 << offset);
+		data_out &= ~BIT(offset);
 	superio_outb(sio->addr, gpio_data_out(bank->regbase), data_out);
 
 	dir = superio_inb(sio->addr, gpio_dir(bank->regbase));
-	dir |= (1 << offset);
+	dir |= BIT(offset);
 	superio_outb(sio->addr, gpio_dir(bank->regbase), dir);
 
 	superio_exit(sio->addr);
@@ -292,14 +297,43 @@
 
 	data_out = superio_inb(sio->addr, gpio_data_out(bank->regbase));
 	if (value)
-		data_out |= (1 << offset);
+		data_out |= BIT(offset);
 	else
-		data_out &= ~(1 << offset);
+		data_out &= ~BIT(offset);
 	superio_outb(sio->addr, gpio_data_out(bank->regbase), data_out);
 
 	superio_exit(sio->addr);
 }
 
+static int f7188x_gpio_set_single_ended(struct gpio_chip *chip,
+					unsigned offset,
+					enum single_ended_mode mode)
+{
+	int err;
+	struct f7188x_gpio_bank *bank = gpiochip_get_data(chip);
+	struct f7188x_sio *sio = bank->data->sio;
+	u8 data;
+
+	if (mode != LINE_MODE_OPEN_DRAIN &&
+	    mode != LINE_MODE_PUSH_PULL)
+		return -ENOTSUPP;
+
+	err = superio_enter(sio->addr);
+	if (err)
+		return err;
+	superio_select(sio->addr, SIO_LD_GPIO);
+
+	data = superio_inb(sio->addr, gpio_out_mode(bank->regbase));
+	if (mode == LINE_MODE_OPEN_DRAIN)
+		data &= ~BIT(offset);
+	else
+		data |= BIT(offset);
+	superio_outb(sio->addr, gpio_out_mode(bank->regbase), data);
+
+	superio_exit(sio->addr);
+	return 0;
+}
+
 /*
  * Platform device and driver.
  */
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
deleted file mode 100644
index 54cddfa..0000000
--- a/drivers/gpio/gpio-generic.c
+++ /dev/null
@@ -1,660 +0,0 @@
-/*
- * Generic driver for memory-mapped GPIO controllers.
- *
- * Copyright 2008 MontaVista Software, Inc.
- * Copyright 2008,2010 Anton Vorontsov <cbouatmailru@gmail.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- *
- * ....``.```~~~~````.`.`.`.`.```````'',,,.........`````......`.......
- * ...``                                                         ```````..
- * ..The simplest form of a GPIO controller that the driver supports is``
- *  `.just a single "data" register, where GPIO state can be read and/or `
- *    `,..written. ,,..``~~~~ .....``.`.`.~~.```.`.........``````.```````
- *        `````````
-                                    ___
-_/~~|___/~|   . ```~~~~~~       ___/___\___     ,~.`.`.`.`````.~~...,,,,...
-__________|~$@~~~        %~    /o*o*o*o*o*o\   .. Implementing such a GPIO .
-o        `                     ~~~~\___/~~~~    ` controller in FPGA is ,.`
-                                                 `....trivial..'~`.```.```
- *                                                    ```````
- *  .```````~~~~`..`.``.``.
- * .  The driver supports  `...       ,..```.`~~~```````````````....````.``,,
- * .   big-endian notation, just`.  .. A bit more sophisticated controllers ,
- *  . register the device with -be`. .with a pair of set/clear-bit registers ,
- *   `.. suffix.  ```~~`````....`.`   . affecting the data register and the .`
- *     ``.`.``...```                  ```.. output pins are also supported.`
- *                        ^^             `````.`````````.,``~``~``~~``````
- *                                                   .                  ^^
- *   ,..`.`.`...````````````......`.`.`.`.`.`..`.`.`..
- * .. The expectation is that in at least some cases .    ,-~~~-,
- *  .this will be used with roll-your-own ASIC/FPGA .`     \   /
- *  .logic in Verilog or VHDL. ~~~`````````..`````~~`       \ /
- *  ..````````......```````````                             \o_
- *                                                           |
- *                              ^^                          / \
- *
- *           ...`````~~`.....``.`..........``````.`.``.```........``.
- *            `  8, 16, 32 and 64 bits registers are supported, and``.
- *            . the number of GPIOs is determined by the width of   ~
- *             .. the registers. ,............```.`.`..`.`.~~~.`.`.`~
- *               `.......````.```
- */
-
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/bug.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/compiler.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/log2.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/gpio/driver.h>
-#include <linux/slab.h>
-#include <linux/bitops.h>
-#include <linux/platform_device.h>
-#include <linux/mod_devicetable.h>
-
-static void bgpio_write8(void __iomem *reg, unsigned long data)
-{
-	writeb(data, reg);
-}
-
-static unsigned long bgpio_read8(void __iomem *reg)
-{
-	return readb(reg);
-}
-
-static void bgpio_write16(void __iomem *reg, unsigned long data)
-{
-	writew(data, reg);
-}
-
-static unsigned long bgpio_read16(void __iomem *reg)
-{
-	return readw(reg);
-}
-
-static void bgpio_write32(void __iomem *reg, unsigned long data)
-{
-	writel(data, reg);
-}
-
-static unsigned long bgpio_read32(void __iomem *reg)
-{
-	return readl(reg);
-}
-
-#if BITS_PER_LONG >= 64
-static void bgpio_write64(void __iomem *reg, unsigned long data)
-{
-	writeq(data, reg);
-}
-
-static unsigned long bgpio_read64(void __iomem *reg)
-{
-	return readq(reg);
-}
-#endif /* BITS_PER_LONG >= 64 */
-
-static void bgpio_write16be(void __iomem *reg, unsigned long data)
-{
-	iowrite16be(data, reg);
-}
-
-static unsigned long bgpio_read16be(void __iomem *reg)
-{
-	return ioread16be(reg);
-}
-
-static void bgpio_write32be(void __iomem *reg, unsigned long data)
-{
-	iowrite32be(data, reg);
-}
-
-static unsigned long bgpio_read32be(void __iomem *reg)
-{
-	return ioread32be(reg);
-}
-
-static unsigned long bgpio_pin2mask(struct gpio_chip *gc, unsigned int pin)
-{
-	return BIT(pin);
-}
-
-static unsigned long bgpio_pin2mask_be(struct gpio_chip *gc,
-				       unsigned int pin)
-{
-	return BIT(gc->bgpio_bits - 1 - pin);
-}
-
-static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio)
-{
-	unsigned long pinmask = gc->pin2mask(gc, gpio);
-
-	if (gc->bgpio_dir & pinmask)
-		return !!(gc->read_reg(gc->reg_set) & pinmask);
-	else
-		return !!(gc->read_reg(gc->reg_dat) & pinmask);
-}
-
-static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
-{
-	return !!(gc->read_reg(gc->reg_dat) & gc->pin2mask(gc, gpio));
-}
-
-static void bgpio_set_none(struct gpio_chip *gc, unsigned int gpio, int val)
-{
-}
-
-static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
-{
-	unsigned long mask = gc->pin2mask(gc, gpio);
-	unsigned long flags;
-
-	spin_lock_irqsave(&gc->bgpio_lock, flags);
-
-	if (val)
-		gc->bgpio_data |= mask;
-	else
-		gc->bgpio_data &= ~mask;
-
-	gc->write_reg(gc->reg_dat, gc->bgpio_data);
-
-	spin_unlock_irqrestore(&gc->bgpio_lock, flags);
-}
-
-static void bgpio_set_with_clear(struct gpio_chip *gc, unsigned int gpio,
-				 int val)
-{
-	unsigned long mask = gc->pin2mask(gc, gpio);
-
-	if (val)
-		gc->write_reg(gc->reg_set, mask);
-	else
-		gc->write_reg(gc->reg_clr, mask);
-}
-
-static void bgpio_set_set(struct gpio_chip *gc, unsigned int gpio, int val)
-{
-	unsigned long mask = gc->pin2mask(gc, gpio);
-	unsigned long flags;
-
-	spin_lock_irqsave(&gc->bgpio_lock, flags);
-
-	if (val)
-		gc->bgpio_data |= mask;
-	else
-		gc->bgpio_data &= ~mask;
-
-	gc->write_reg(gc->reg_set, gc->bgpio_data);
-
-	spin_unlock_irqrestore(&gc->bgpio_lock, flags);
-}
-
-static void bgpio_multiple_get_masks(struct gpio_chip *gc,
-				     unsigned long *mask, unsigned long *bits,
-				     unsigned long *set_mask,
-				     unsigned long *clear_mask)
-{
-	int i;
-
-	*set_mask = 0;
-	*clear_mask = 0;
-
-	for (i = 0; i < gc->bgpio_bits; i++) {
-		if (*mask == 0)
-			break;
-		if (__test_and_clear_bit(i, mask)) {
-			if (test_bit(i, bits))
-				*set_mask |= gc->pin2mask(gc, i);
-			else
-				*clear_mask |= gc->pin2mask(gc, i);
-		}
-	}
-}
-
-static void bgpio_set_multiple_single_reg(struct gpio_chip *gc,
-					  unsigned long *mask,
-					  unsigned long *bits,
-					  void __iomem *reg)
-{
-	unsigned long flags;
-	unsigned long set_mask, clear_mask;
-
-	spin_lock_irqsave(&gc->bgpio_lock, flags);
-
-	bgpio_multiple_get_masks(gc, mask, bits, &set_mask, &clear_mask);
-
-	gc->bgpio_data |= set_mask;
-	gc->bgpio_data &= ~clear_mask;
-
-	gc->write_reg(reg, gc->bgpio_data);
-
-	spin_unlock_irqrestore(&gc->bgpio_lock, flags);
-}
-
-static void bgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
-			       unsigned long *bits)
-{
-	bgpio_set_multiple_single_reg(gc, mask, bits, gc->reg_dat);
-}
-
-static void bgpio_set_multiple_set(struct gpio_chip *gc, unsigned long *mask,
-				   unsigned long *bits)
-{
-	bgpio_set_multiple_single_reg(gc, mask, bits, gc->reg_set);
-}
-
-static void bgpio_set_multiple_with_clear(struct gpio_chip *gc,
-					  unsigned long *mask,
-					  unsigned long *bits)
-{
-	unsigned long set_mask, clear_mask;
-
-	bgpio_multiple_get_masks(gc, mask, bits, &set_mask, &clear_mask);
-
-	if (set_mask)
-		gc->write_reg(gc->reg_set, set_mask);
-	if (clear_mask)
-		gc->write_reg(gc->reg_clr, clear_mask);
-}
-
-static int bgpio_simple_dir_in(struct gpio_chip *gc, unsigned int gpio)
-{
-	return 0;
-}
-
-static int bgpio_dir_out_err(struct gpio_chip *gc, unsigned int gpio,
-				int val)
-{
-	return -EINVAL;
-}
-
-static int bgpio_simple_dir_out(struct gpio_chip *gc, unsigned int gpio,
-				int val)
-{
-	gc->set(gc, gpio, val);
-
-	return 0;
-}
-
-static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&gc->bgpio_lock, flags);
-
-	gc->bgpio_dir &= ~gc->pin2mask(gc, gpio);
-	gc->write_reg(gc->reg_dir, gc->bgpio_dir);
-
-	spin_unlock_irqrestore(&gc->bgpio_lock, flags);
-
-	return 0;
-}
-
-static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
-{
-	/* Return 0 if output, 1 of input */
-	return !(gc->read_reg(gc->reg_dir) & gc->pin2mask(gc, gpio));
-}
-
-static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
-{
-	unsigned long flags;
-
-	gc->set(gc, gpio, val);
-
-	spin_lock_irqsave(&gc->bgpio_lock, flags);
-
-	gc->bgpio_dir |= gc->pin2mask(gc, gpio);
-	gc->write_reg(gc->reg_dir, gc->bgpio_dir);
-
-	spin_unlock_irqrestore(&gc->bgpio_lock, flags);
-
-	return 0;
-}
-
-static int bgpio_dir_in_inv(struct gpio_chip *gc, unsigned int gpio)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&gc->bgpio_lock, flags);
-
-	gc->bgpio_dir |= gc->pin2mask(gc, gpio);
-	gc->write_reg(gc->reg_dir, gc->bgpio_dir);
-
-	spin_unlock_irqrestore(&gc->bgpio_lock, flags);
-
-	return 0;
-}
-
-static int bgpio_dir_out_inv(struct gpio_chip *gc, unsigned int gpio, int val)
-{
-	unsigned long flags;
-
-	gc->set(gc, gpio, val);
-
-	spin_lock_irqsave(&gc->bgpio_lock, flags);
-
-	gc->bgpio_dir &= ~gc->pin2mask(gc, gpio);
-	gc->write_reg(gc->reg_dir, gc->bgpio_dir);
-
-	spin_unlock_irqrestore(&gc->bgpio_lock, flags);
-
-	return 0;
-}
-
-static int bgpio_get_dir_inv(struct gpio_chip *gc, unsigned int gpio)
-{
-	/* Return 0 if output, 1 if input */
-	return !!(gc->read_reg(gc->reg_dir) & gc->pin2mask(gc, gpio));
-}
-
-static int bgpio_setup_accessors(struct device *dev,
-				 struct gpio_chip *gc,
-				 bool bit_be,
-				 bool byte_be)
-{
-
-	switch (gc->bgpio_bits) {
-	case 8:
-		gc->read_reg	= bgpio_read8;
-		gc->write_reg	= bgpio_write8;
-		break;
-	case 16:
-		if (byte_be) {
-			gc->read_reg	= bgpio_read16be;
-			gc->write_reg	= bgpio_write16be;
-		} else {
-			gc->read_reg	= bgpio_read16;
-			gc->write_reg	= bgpio_write16;
-		}
-		break;
-	case 32:
-		if (byte_be) {
-			gc->read_reg	= bgpio_read32be;
-			gc->write_reg	= bgpio_write32be;
-		} else {
-			gc->read_reg	= bgpio_read32;
-			gc->write_reg	= bgpio_write32;
-		}
-		break;
-#if BITS_PER_LONG >= 64
-	case 64:
-		if (byte_be) {
-			dev_err(dev,
-				"64 bit big endian byte order unsupported\n");
-			return -EINVAL;
-		} else {
-			gc->read_reg	= bgpio_read64;
-			gc->write_reg	= bgpio_write64;
-		}
-		break;
-#endif /* BITS_PER_LONG >= 64 */
-	default:
-		dev_err(dev, "unsupported data width %u bits\n", gc->bgpio_bits);
-		return -EINVAL;
-	}
-
-	gc->pin2mask = bit_be ? bgpio_pin2mask_be : bgpio_pin2mask;
-
-	return 0;
-}
-
-/*
- * Create the device and allocate the resources.  For setting GPIO's there are
- * three supported configurations:
- *
- *	- single input/output register resource (named "dat").
- *	- set/clear pair (named "set" and "clr").
- *	- single output register resource and single input resource ("set" and
- *	dat").
- *
- * For the single output register, this drives a 1 by setting a bit and a zero
- * by clearing a bit.  For the set clr pair, this drives a 1 by setting a bit
- * in the set register and clears it by setting a bit in the clear register.
- * The configuration is detected by which resources are present.
- *
- * For setting the GPIO direction, there are three supported configurations:
- *
- *	- simple bidirection GPIO that requires no configuration.
- *	- an output direction register (named "dirout") where a 1 bit
- *	indicates the GPIO is an output.
- *	- an input direction register (named "dirin") where a 1 bit indicates
- *	the GPIO is an input.
- */
-static int bgpio_setup_io(struct gpio_chip *gc,
-			  void __iomem *dat,
-			  void __iomem *set,
-			  void __iomem *clr,
-			  unsigned long flags)
-{
-
-	gc->reg_dat = dat;
-	if (!gc->reg_dat)
-		return -EINVAL;
-
-	if (set && clr) {
-		gc->reg_set = set;
-		gc->reg_clr = clr;
-		gc->set = bgpio_set_with_clear;
-		gc->set_multiple = bgpio_set_multiple_with_clear;
-	} else if (set && !clr) {
-		gc->reg_set = set;
-		gc->set = bgpio_set_set;
-		gc->set_multiple = bgpio_set_multiple_set;
-	} else if (flags & BGPIOF_NO_OUTPUT) {
-		gc->set = bgpio_set_none;
-		gc->set_multiple = NULL;
-	} else {
-		gc->set = bgpio_set;
-		gc->set_multiple = bgpio_set_multiple;
-	}
-
-	if (!(flags & BGPIOF_UNREADABLE_REG_SET) &&
-	    (flags & BGPIOF_READ_OUTPUT_REG_SET))
-		gc->get = bgpio_get_set;
-	else
-		gc->get = bgpio_get;
-
-	return 0;
-}
-
-static int bgpio_setup_direction(struct gpio_chip *gc,
-				 void __iomem *dirout,
-				 void __iomem *dirin,
-				 unsigned long flags)
-{
-	if (dirout && dirin) {
-		return -EINVAL;
-	} else if (dirout) {
-		gc->reg_dir = dirout;
-		gc->direction_output = bgpio_dir_out;
-		gc->direction_input = bgpio_dir_in;
-		gc->get_direction = bgpio_get_dir;
-	} else if (dirin) {
-		gc->reg_dir = dirin;
-		gc->direction_output = bgpio_dir_out_inv;
-		gc->direction_input = bgpio_dir_in_inv;
-		gc->get_direction = bgpio_get_dir_inv;
-	} else {
-		if (flags & BGPIOF_NO_OUTPUT)
-			gc->direction_output = bgpio_dir_out_err;
-		else
-			gc->direction_output = bgpio_simple_dir_out;
-		gc->direction_input = bgpio_simple_dir_in;
-	}
-
-	return 0;
-}
-
-static int bgpio_request(struct gpio_chip *chip, unsigned gpio_pin)
-{
-	if (gpio_pin < chip->ngpio)
-		return 0;
-
-	return -EINVAL;
-}
-
-int bgpio_init(struct gpio_chip *gc, struct device *dev,
-	       unsigned long sz, void __iomem *dat, void __iomem *set,
-	       void __iomem *clr, void __iomem *dirout, void __iomem *dirin,
-	       unsigned long flags)
-{
-	int ret;
-
-	if (!is_power_of_2(sz))
-		return -EINVAL;
-
-	gc->bgpio_bits = sz * 8;
-	if (gc->bgpio_bits > BITS_PER_LONG)
-		return -EINVAL;
-
-	spin_lock_init(&gc->bgpio_lock);
-	gc->parent = dev;
-	gc->label = dev_name(dev);
-	gc->base = -1;
-	gc->ngpio = gc->bgpio_bits;
-	gc->request = bgpio_request;
-
-	ret = bgpio_setup_io(gc, dat, set, clr, flags);
-	if (ret)
-		return ret;
-
-	ret = bgpio_setup_accessors(dev, gc, flags & BGPIOF_BIG_ENDIAN,
-				    flags & BGPIOF_BIG_ENDIAN_BYTE_ORDER);
-	if (ret)
-		return ret;
-
-	ret = bgpio_setup_direction(gc, dirout, dirin, flags);
-	if (ret)
-		return ret;
-
-	gc->bgpio_data = gc->read_reg(gc->reg_dat);
-	if (gc->set == bgpio_set_set &&
-			!(flags & BGPIOF_UNREADABLE_REG_SET))
-		gc->bgpio_data = gc->read_reg(gc->reg_set);
-	if (gc->reg_dir && !(flags & BGPIOF_UNREADABLE_REG_DIR))
-		gc->bgpio_dir = gc->read_reg(gc->reg_dir);
-
-	return ret;
-}
-EXPORT_SYMBOL_GPL(bgpio_init);
-
-#ifdef CONFIG_GPIO_GENERIC_PLATFORM
-
-static void __iomem *bgpio_map(struct platform_device *pdev,
-			       const char *name,
-			       resource_size_t sane_sz)
-{
-	struct resource *r;
-	resource_size_t sz;
-
-	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
-	if (!r)
-		return NULL;
-
-	sz = resource_size(r);
-	if (sz != sane_sz)
-		return IOMEM_ERR_PTR(-EINVAL);
-
-	return devm_ioremap_resource(&pdev->dev, r);
-}
-
-static int bgpio_pdev_probe(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct resource *r;
-	void __iomem *dat;
-	void __iomem *set;
-	void __iomem *clr;
-	void __iomem *dirout;
-	void __iomem *dirin;
-	unsigned long sz;
-	unsigned long flags = pdev->id_entry->driver_data;
-	int err;
-	struct gpio_chip *gc;
-	struct bgpio_pdata *pdata = dev_get_platdata(dev);
-
-	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat");
-	if (!r)
-		return -EINVAL;
-
-	sz = resource_size(r);
-
-	dat = bgpio_map(pdev, "dat", sz);
-	if (IS_ERR(dat))
-		return PTR_ERR(dat);
-
-	set = bgpio_map(pdev, "set", sz);
-	if (IS_ERR(set))
-		return PTR_ERR(set);
-
-	clr = bgpio_map(pdev, "clr", sz);
-	if (IS_ERR(clr))
-		return PTR_ERR(clr);
-
-	dirout = bgpio_map(pdev, "dirout", sz);
-	if (IS_ERR(dirout))
-		return PTR_ERR(dirout);
-
-	dirin = bgpio_map(pdev, "dirin", sz);
-	if (IS_ERR(dirin))
-		return PTR_ERR(dirin);
-
-	gc = devm_kzalloc(&pdev->dev, sizeof(*gc), GFP_KERNEL);
-	if (!gc)
-		return -ENOMEM;
-
-	err = bgpio_init(gc, dev, sz, dat, set, clr, dirout, dirin, flags);
-	if (err)
-		return err;
-
-	if (pdata) {
-		if (pdata->label)
-			gc->label = pdata->label;
-		gc->base = pdata->base;
-		if (pdata->ngpio > 0)
-			gc->ngpio = pdata->ngpio;
-	}
-
-	platform_set_drvdata(pdev, gc);
-
-	return devm_gpiochip_add_data(&pdev->dev, gc, NULL);
-}
-
-static const struct platform_device_id bgpio_id_table[] = {
-	{
-		.name		= "basic-mmio-gpio",
-		.driver_data	= 0,
-	}, {
-		.name		= "basic-mmio-gpio-be",
-		.driver_data	= BGPIOF_BIG_ENDIAN,
-	},
-	{ }
-};
-MODULE_DEVICE_TABLE(platform, bgpio_id_table);
-
-static struct platform_driver bgpio_driver = {
-	.driver = {
-		.name = "basic-mmio-gpio",
-	},
-	.id_table = bgpio_id_table,
-	.probe = bgpio_pdev_probe,
-};
-
-module_platform_driver(bgpio_driver);
-
-#endif /* CONFIG_GPIO_GENERIC_PLATFORM */
-
-MODULE_DESCRIPTION("Driver for basic memory-mapped GPIO controllers");
-MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-it87.c b/drivers/gpio/gpio-it87.c
index b219c82..63a962d 100644
--- a/drivers/gpio/gpio-it87.c
+++ b/drivers/gpio/gpio-it87.c
@@ -34,6 +34,8 @@
 
 /* Chip Id numbers */
 #define NO_DEV_ID	0xffff
+#define IT8620_ID	0x8620
+#define IT8628_ID	0x8628
 #define IT8728_ID	0x8728
 #define IT8732_ID	0x8732
 #define IT8761_ID	0x8761
@@ -302,6 +304,14 @@
 	it87_gpio->chip = it87_template_chip;
 
 	switch (chip_type) {
+	case IT8620_ID:
+	case IT8628_ID:
+		gpio_ba_reg = 0x62;
+		it87_gpio->io_size = 11;
+		it87_gpio->output_base = 0xc8;
+		it87_gpio->simple_size = 0;
+		it87_gpio->chip.ngpio = 64;
+		break;
 	case IT8728_ID:
 	case IT8732_ID:
 		gpio_ba_reg = 0x62;
diff --git a/drivers/gpio/gpio-loongson1.c b/drivers/gpio/gpio-loongson1.c
new file mode 100644
index 0000000..10c09bd
--- /dev/null
+++ b/drivers/gpio/gpio-loongson1.c
@@ -0,0 +1,102 @@
+/*
+ * GPIO Driver for Loongson 1 SoC
+ *
+ * Copyright (C) 2015-2016 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/platform_device.h>
+
+/* Loongson 1 GPIO Register Definitions */
+#define GPIO_CFG		0x0
+#define GPIO_DIR		0x10
+#define GPIO_DATA		0x20
+#define GPIO_OUTPUT		0x30
+
+static void __iomem *gpio_reg_base;
+
+static int ls1x_gpio_request(struct gpio_chip *gc, unsigned int offset)
+{
+	unsigned long pinmask = gc->pin2mask(gc, offset);
+	unsigned long flags;
+
+	spin_lock_irqsave(&gc->bgpio_lock, flags);
+	__raw_writel(__raw_readl(gpio_reg_base + GPIO_CFG) | pinmask,
+		     gpio_reg_base + GPIO_CFG);
+	spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+
+	return 0;
+}
+
+static void ls1x_gpio_free(struct gpio_chip *gc, unsigned int offset)
+{
+	unsigned long pinmask = gc->pin2mask(gc, offset);
+	unsigned long flags;
+
+	spin_lock_irqsave(&gc->bgpio_lock, flags);
+	__raw_writel(__raw_readl(gpio_reg_base + GPIO_CFG) & ~pinmask,
+		     gpio_reg_base + GPIO_CFG);
+	spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+}
+
+static int ls1x_gpio_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct gpio_chip *gc;
+	struct resource *res;
+	int ret;
+
+	gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
+	if (!gc)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev, "failed to get I/O memory\n");
+		return -EINVAL;
+	}
+
+	gpio_reg_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(gpio_reg_base))
+		return PTR_ERR(gpio_reg_base);
+
+	ret = bgpio_init(gc, dev, 4, gpio_reg_base + GPIO_DATA,
+			 gpio_reg_base + GPIO_OUTPUT, NULL,
+			 NULL, gpio_reg_base + GPIO_DIR, 0);
+	if (ret)
+		goto err;
+
+	gc->owner = THIS_MODULE;
+	gc->request = ls1x_gpio_request;
+	gc->free = ls1x_gpio_free;
+	gc->base = pdev->id * 32;
+
+	ret = devm_gpiochip_add_data(dev, gc, NULL);
+	if (ret)
+		goto err;
+
+	platform_set_drvdata(pdev, gc);
+	dev_info(dev, "Loongson1 GPIO driver registered\n");
+
+	return 0;
+err:
+	dev_err(dev, "failed to register GPIO device\n");
+	return ret;
+}
+
+static struct platform_driver ls1x_gpio_driver = {
+	.probe	= ls1x_gpio_probe,
+	.driver	= {
+		.name	= "ls1x-gpio",
+	},
+};
+
+module_platform_driver(ls1x_gpio_driver);
+
+MODULE_AUTHOR("Kelvin Cheung <keguang.zhang@gmail.com>");
+MODULE_DESCRIPTION("Loongson1 GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index 7fffc1d..d55af50 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -17,7 +17,6 @@
 #include <linux/io.h>
 #include <linux/init.h>
 #include <linux/clk.h>
-#include <linux/module.h>
 #include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/ioport.h>
@@ -185,8 +184,6 @@
 	gchip->gc.parent = &pdev->dev;
 	gchip->gc.base = -1;
 
-	platform_set_drvdata(pdev, gchip);
-
 	ret = gpiochip_add_data(&gchip->gc, gchip);
 	if (ret) {
 		dev_err(&pdev->dev, "couldn't register gpio driver\n");
@@ -210,7 +207,6 @@
 	{ .compatible = "fujitsu,mb86s70-gpio" },
 	{ /* sentinel */ }
 };
-MODULE_DEVICE_TABLE(of, mb86s70_gpio_dt_ids);
 
 static struct platform_driver mb86s70_gpio_driver = {
 	.driver = {
@@ -225,8 +221,4 @@
 {
 	return platform_driver_register(&mb86s70_gpio_driver);
 }
-module_init(mb86s70_gpio_init);
-
-MODULE_DESCRIPTION("MB86S7x GPIO Driver");
-MODULE_ALIAS("platform:mb86s70-gpio");
-MODULE_LICENSE("GPL");
+device_initcall(mb86s70_gpio_init);
diff --git a/drivers/gpio/gpio-mc9s08dz60.c b/drivers/gpio/gpio-mc9s08dz60.c
index 14f252f..2fcad5b 100644
--- a/drivers/gpio/gpio-mc9s08dz60.c
+++ b/drivers/gpio/gpio-mc9s08dz60.c
@@ -15,7 +15,7 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
 #include <linux/gpio.h>
@@ -111,8 +111,6 @@
 	{},
 };
 
-MODULE_DEVICE_TABLE(i2c, mc9s08dz60_id);
-
 static struct i2c_driver mc9s08dz60_i2c_driver = {
 	.driver = {
 		.name = "mc9s08dz60",
@@ -120,10 +118,4 @@
 	.probe = mc9s08dz60_probe,
 	.id_table = mc9s08dz60_id,
 };
-
-module_i2c_driver(mc9s08dz60_i2c_driver);
-
-MODULE_AUTHOR("Freescale Semiconductor, Inc. "
-		"Wu Guoxing <b39297@freescale.com>");
-MODULE_DESCRIPTION("mc9s08dz60 gpio function on mx35 3ds board");
-MODULE_LICENSE("GPL v2");
+builtin_i2c_driver(mc9s08dz60_i2c_driver);
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index 47e4869..ac22efc 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -77,7 +77,6 @@
 	/* lock protects the cached values */
 	struct mutex		lock;
 	struct mutex		irq_lock;
-	struct irq_domain	*irq_domain;
 
 	struct gpio_chip	chip;
 
@@ -96,11 +95,6 @@
 	struct mcp23s08		chip[];
 };
 
-/* This lock class tells lockdep that GPIO irqs are in a different
- * category than their parents, so it won't report false recursion.
- */
-static struct lock_class_key gpio_lock_class;
-
 /*----------------------------------------------------------------------*/
 
 #if IS_ENABLED(CONFIG_I2C)
@@ -368,8 +362,9 @@
 	for (i = 0; i < mcp->chip.ngpio; i++) {
 		if ((BIT(i) & mcp->cache[MCP_INTF]) &&
 		    ((BIT(i) & intcap & mcp->irq_rise) ||
-		     (mcp->irq_fall & ~intcap & BIT(i)))) {
-			child_irq = irq_find_mapping(mcp->irq_domain, i);
+		     (mcp->irq_fall & ~intcap & BIT(i)) ||
+		     (BIT(i) & mcp->cache[MCP_INTCON]))) {
+			child_irq = irq_find_mapping(mcp->chip.irqdomain, i);
 			handle_nested_irq(child_irq);
 		}
 	}
@@ -377,16 +372,10 @@
 	return IRQ_HANDLED;
 }
 
-static int mcp23s08_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
-	struct mcp23s08 *mcp = gpiochip_get_data(chip);
-
-	return irq_find_mapping(mcp->irq_domain, offset);
-}
-
 static void mcp23s08_irq_mask(struct irq_data *data)
 {
-	struct mcp23s08 *mcp = irq_data_get_irq_chip_data(data);
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+	struct mcp23s08 *mcp = gpiochip_get_data(gc);
 	unsigned int pos = data->hwirq;
 
 	mcp->cache[MCP_GPINTEN] &= ~BIT(pos);
@@ -394,7 +383,8 @@
 
 static void mcp23s08_irq_unmask(struct irq_data *data)
 {
-	struct mcp23s08 *mcp = irq_data_get_irq_chip_data(data);
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+	struct mcp23s08 *mcp = gpiochip_get_data(gc);
 	unsigned int pos = data->hwirq;
 
 	mcp->cache[MCP_GPINTEN] |= BIT(pos);
@@ -402,7 +392,8 @@
 
 static int mcp23s08_irq_set_type(struct irq_data *data, unsigned int type)
 {
-	struct mcp23s08 *mcp = irq_data_get_irq_chip_data(data);
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+	struct mcp23s08 *mcp = gpiochip_get_data(gc);
 	unsigned int pos = data->hwirq;
 	int status = 0;
 
@@ -418,6 +409,12 @@
 		mcp->cache[MCP_INTCON] &= ~BIT(pos);
 		mcp->irq_rise &= ~BIT(pos);
 		mcp->irq_fall |= BIT(pos);
+	} else if (type & IRQ_TYPE_LEVEL_HIGH) {
+		mcp->cache[MCP_INTCON] |= BIT(pos);
+		mcp->cache[MCP_DEFVAL] &= ~BIT(pos);
+	} else if (type & IRQ_TYPE_LEVEL_LOW) {
+		mcp->cache[MCP_INTCON] |= BIT(pos);
+		mcp->cache[MCP_DEFVAL] |= BIT(pos);
 	} else
 		return -EINVAL;
 
@@ -426,14 +423,16 @@
 
 static void mcp23s08_irq_bus_lock(struct irq_data *data)
 {
-	struct mcp23s08 *mcp = irq_data_get_irq_chip_data(data);
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+	struct mcp23s08 *mcp = gpiochip_get_data(gc);
 
 	mutex_lock(&mcp->irq_lock);
 }
 
 static void mcp23s08_irq_bus_unlock(struct irq_data *data)
 {
-	struct mcp23s08 *mcp = irq_data_get_irq_chip_data(data);
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+	struct mcp23s08 *mcp = gpiochip_get_data(gc);
 
 	mutex_lock(&mcp->lock);
 	mcp->ops->write(mcp, MCP_GPINTEN, mcp->cache[MCP_GPINTEN]);
@@ -443,27 +442,6 @@
 	mutex_unlock(&mcp->irq_lock);
 }
 
-static int mcp23s08_irq_reqres(struct irq_data *data)
-{
-	struct mcp23s08 *mcp = irq_data_get_irq_chip_data(data);
-
-	if (gpiochip_lock_as_irq(&mcp->chip, data->hwirq)) {
-		dev_err(mcp->chip.parent,
-			"unable to lock HW IRQ %lu for IRQ usage\n",
-			data->hwirq);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static void mcp23s08_irq_relres(struct irq_data *data)
-{
-	struct mcp23s08 *mcp = irq_data_get_irq_chip_data(data);
-
-	gpiochip_unlock_as_irq(&mcp->chip, data->hwirq);
-}
-
 static struct irq_chip mcp23s08_irq_chip = {
 	.name = "gpio-mcp23xxx",
 	.irq_mask = mcp23s08_irq_mask,
@@ -471,24 +449,16 @@
 	.irq_set_type = mcp23s08_irq_set_type,
 	.irq_bus_lock = mcp23s08_irq_bus_lock,
 	.irq_bus_sync_unlock = mcp23s08_irq_bus_unlock,
-	.irq_request_resources = mcp23s08_irq_reqres,
-	.irq_release_resources = mcp23s08_irq_relres,
 };
 
 static int mcp23s08_irq_setup(struct mcp23s08 *mcp)
 {
 	struct gpio_chip *chip = &mcp->chip;
-	int err, irq, j;
+	int err;
 	unsigned long irqflags = IRQF_ONESHOT | IRQF_SHARED;
 
 	mutex_init(&mcp->irq_lock);
 
-	mcp->irq_domain = irq_domain_add_linear(chip->parent->of_node,
-						chip->ngpio,
-						&irq_domain_simple_ops, mcp);
-	if (!mcp->irq_domain)
-		return -ENODEV;
-
 	if (mcp->irq_active_high)
 		irqflags |= IRQF_TRIGGER_HIGH;
 	else
@@ -503,32 +473,25 @@
 		return err;
 	}
 
-	chip->to_irq = mcp23s08_gpio_to_irq;
-
-	for (j = 0; j < mcp->chip.ngpio; j++) {
-		irq = irq_create_mapping(mcp->irq_domain, j);
-		irq_set_lockdep_class(irq, &gpio_lock_class);
-		irq_set_chip_data(irq, mcp);
-		irq_set_chip(irq, &mcp23s08_irq_chip);
-		irq_set_nested_thread(irq, true);
-		irq_set_noprobe(irq);
+	err =  gpiochip_irqchip_add(chip,
+				    &mcp23s08_irq_chip,
+				    0,
+				    handle_simple_irq,
+				    IRQ_TYPE_NONE);
+	if (err) {
+		dev_err(chip->parent,
+			"could not connect irqchip to gpiochip: %d\n", err);
+		return err;
 	}
+
+	gpiochip_set_chained_irqchip(chip,
+				     &mcp23s08_irq_chip,
+				     mcp->irq,
+				     NULL);
+
 	return 0;
 }
 
-static void mcp23s08_irq_teardown(struct mcp23s08 *mcp)
-{
-	unsigned int irq, i;
-
-	for (i = 0; i < mcp->chip.ngpio; i++) {
-		irq = irq_find_mapping(mcp->irq_domain, i);
-		if (irq > 0)
-			irq_dispose_mapping(irq);
-	}
-
-	irq_domain_remove(mcp->irq_domain);
-}
-
 /*----------------------------------------------------------------------*/
 
 #ifdef CONFIG_DEBUG_FS
@@ -721,7 +684,6 @@
 	if (mcp->irq && mcp->irq_controller) {
 		status = mcp23s08_irq_setup(mcp);
 		if (status) {
-			mcp23s08_irq_teardown(mcp);
 			goto fail;
 		}
 	}
@@ -847,9 +809,6 @@
 {
 	struct mcp23s08 *mcp = i2c_get_clientdata(client);
 
-	if (client->irq && mcp->irq_controller)
-		mcp23s08_irq_teardown(mcp);
-
 	gpiochip_remove(&mcp->chip);
 	kfree(mcp);
 
@@ -1017,8 +976,6 @@
 		if (!data->mcp[addr])
 			continue;
 
-		if (spi->irq && data->mcp[addr]->irq_controller)
-			mcp23s08_irq_teardown(data->mcp[addr]);
 		gpiochip_remove(&data->mcp[addr]->chip);
 	}
 
diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c
index a68e199..cc103af 100644
--- a/drivers/gpio/gpio-menz127.c
+++ b/drivers/gpio/gpio-menz127.c
@@ -35,16 +35,14 @@
 struct men_z127_gpio {
 	struct gpio_chip gc;
 	void __iomem *reg_base;
-	struct mcb_device *mdev;
 	struct resource *mem;
-	spinlock_t lock;
 };
 
 static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
 			     unsigned debounce)
 {
 	struct men_z127_gpio *priv = gpiochip_get_data(gc);
-	struct device *dev = &priv->mdev->dev;
+	struct device *dev = gc->parent;
 	unsigned int rnd;
 	u32 db_en, db_cnt;
 
@@ -69,7 +67,7 @@
 		debounce /= 50;
 	}
 
-	spin_lock(&priv->lock);
+	spin_lock(&gc->bgpio_lock);
 
 	db_en = readl(priv->reg_base + MEN_Z127_DBER);
 
@@ -84,29 +82,33 @@
 	writel(db_en, priv->reg_base + MEN_Z127_DBER);
 	writel(db_cnt, priv->reg_base + GPIO_TO_DBCNT_REG(gpio));
 
-	spin_unlock(&priv->lock);
+	spin_unlock(&gc->bgpio_lock);
 
 	return 0;
 }
 
-static int men_z127_request(struct gpio_chip *gc, unsigned gpio_pin)
+static int men_z127_set_single_ended(struct gpio_chip *gc,
+				     unsigned offset,
+				     enum single_ended_mode mode)
 {
 	struct men_z127_gpio *priv = gpiochip_get_data(gc);
 	u32 od_en;
 
-	if (gpio_pin >= gc->ngpio)
-		return -EINVAL;
+	if (mode != LINE_MODE_OPEN_DRAIN &&
+	    mode != LINE_MODE_PUSH_PULL)
+		return -ENOTSUPP;
 
-	spin_lock(&priv->lock);
+	spin_lock(&gc->bgpio_lock);
 	od_en = readl(priv->reg_base + MEN_Z127_ODER);
 
-	if (gpiochip_line_is_open_drain(gc, gpio_pin))
-		od_en |= BIT(gpio_pin);
+	if (mode == LINE_MODE_OPEN_DRAIN)
+		od_en |= BIT(offset);
 	else
-		od_en &= ~BIT(gpio_pin);
+		/* Implicitly LINE_MODE_PUSH_PULL */
+		od_en &= ~BIT(offset);
 
 	writel(od_en, priv->reg_base + MEN_Z127_ODER);
-	spin_unlock(&priv->lock);
+	spin_unlock(&gc->bgpio_lock);
 
 	return 0;
 }
@@ -136,7 +138,6 @@
 		goto err_release;
 	}
 
-	men_z127_gpio->mdev = mdev;
 	mcb_set_drvdata(mdev, men_z127_gpio);
 
 	ret = bgpio_init(&men_z127_gpio->gc, &mdev->dev, 4,
@@ -149,7 +150,7 @@
 		goto err_unmap;
 
 	men_z127_gpio->gc.set_debounce = men_z127_debounce;
-	men_z127_gpio->gc.request = men_z127_request;
+	men_z127_gpio->gc.set_single_ended = men_z127_set_single_ended;
 
 	ret = gpiochip_add_data(&men_z127_gpio->gc, men_z127_gpio);
 	if (ret) {
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
new file mode 100644
index 0000000..6c1cb3b
--- /dev/null
+++ b/drivers/gpio/gpio-mmio.c
@@ -0,0 +1,660 @@
+/*
+ * Generic driver for memory-mapped GPIO controllers.
+ *
+ * Copyright 2008 MontaVista Software, Inc.
+ * Copyright 2008,2010 Anton Vorontsov <cbouatmailru@gmail.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * ....``.```~~~~````.`.`.`.`.```````'',,,.........`````......`.......
+ * ...``                                                         ```````..
+ * ..The simplest form of a GPIO controller that the driver supports is``
+ *  `.just a single "data" register, where GPIO state can be read and/or `
+ *    `,..written. ,,..``~~~~ .....``.`.`.~~.```.`.........``````.```````
+ *        `````````
+                                    ___
+_/~~|___/~|   . ```~~~~~~       ___/___\___     ,~.`.`.`.`````.~~...,,,,...
+__________|~$@~~~        %~    /o*o*o*o*o*o\   .. Implementing such a GPIO .
+o        `                     ~~~~\___/~~~~    ` controller in FPGA is ,.`
+                                                 `....trivial..'~`.```.```
+ *                                                    ```````
+ *  .```````~~~~`..`.``.``.
+ * .  The driver supports  `...       ,..```.`~~~```````````````....````.``,,
+ * .   big-endian notation, just`.  .. A bit more sophisticated controllers ,
+ *  . register the device with -be`. .with a pair of set/clear-bit registers ,
+ *   `.. suffix.  ```~~`````....`.`   . affecting the data register and the .`
+ *     ``.`.``...```                  ```.. output pins are also supported.`
+ *                        ^^             `````.`````````.,``~``~``~~``````
+ *                                                   .                  ^^
+ *   ,..`.`.`...````````````......`.`.`.`.`.`..`.`.`..
+ * .. The expectation is that in at least some cases .    ,-~~~-,
+ *  .this will be used with roll-your-own ASIC/FPGA .`     \   /
+ *  .logic in Verilog or VHDL. ~~~`````````..`````~~`       \ /
+ *  ..````````......```````````                             \o_
+ *                                                           |
+ *                              ^^                          / \
+ *
+ *           ...`````~~`.....``.`..........``````.`.``.```........``.
+ *            `  8, 16, 32 and 64 bits registers are supported, and``.
+ *            . the number of GPIOs is determined by the width of   ~
+ *             .. the registers. ,............```.`.`..`.`.~~~.`.`.`~
+ *               `.......````.```
+ */
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/log2.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/gpio/driver.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
+
+static void bgpio_write8(void __iomem *reg, unsigned long data)
+{
+	writeb(data, reg);
+}
+
+static unsigned long bgpio_read8(void __iomem *reg)
+{
+	return readb(reg);
+}
+
+static void bgpio_write16(void __iomem *reg, unsigned long data)
+{
+	writew(data, reg);
+}
+
+static unsigned long bgpio_read16(void __iomem *reg)
+{
+	return readw(reg);
+}
+
+static void bgpio_write32(void __iomem *reg, unsigned long data)
+{
+	writel(data, reg);
+}
+
+static unsigned long bgpio_read32(void __iomem *reg)
+{
+	return readl(reg);
+}
+
+#if BITS_PER_LONG >= 64
+static void bgpio_write64(void __iomem *reg, unsigned long data)
+{
+	writeq(data, reg);
+}
+
+static unsigned long bgpio_read64(void __iomem *reg)
+{
+	return readq(reg);
+}
+#endif /* BITS_PER_LONG >= 64 */
+
+static void bgpio_write16be(void __iomem *reg, unsigned long data)
+{
+	iowrite16be(data, reg);
+}
+
+static unsigned long bgpio_read16be(void __iomem *reg)
+{
+	return ioread16be(reg);
+}
+
+static void bgpio_write32be(void __iomem *reg, unsigned long data)
+{
+	iowrite32be(data, reg);
+}
+
+static unsigned long bgpio_read32be(void __iomem *reg)
+{
+	return ioread32be(reg);
+}
+
+static unsigned long bgpio_pin2mask(struct gpio_chip *gc, unsigned int pin)
+{
+	return BIT(pin);
+}
+
+static unsigned long bgpio_pin2mask_be(struct gpio_chip *gc,
+				       unsigned int pin)
+{
+	return BIT(gc->bgpio_bits - 1 - pin);
+}
+
+static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio)
+{
+	unsigned long pinmask = gc->pin2mask(gc, gpio);
+
+	if (gc->bgpio_dir & pinmask)
+		return !!(gc->read_reg(gc->reg_set) & pinmask);
+	else
+		return !!(gc->read_reg(gc->reg_dat) & pinmask);
+}
+
+static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+	return !!(gc->read_reg(gc->reg_dat) & gc->pin2mask(gc, gpio));
+}
+
+static void bgpio_set_none(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+}
+
+static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+	unsigned long mask = gc->pin2mask(gc, gpio);
+	unsigned long flags;
+
+	spin_lock_irqsave(&gc->bgpio_lock, flags);
+
+	if (val)
+		gc->bgpio_data |= mask;
+	else
+		gc->bgpio_data &= ~mask;
+
+	gc->write_reg(gc->reg_dat, gc->bgpio_data);
+
+	spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+}
+
+static void bgpio_set_with_clear(struct gpio_chip *gc, unsigned int gpio,
+				 int val)
+{
+	unsigned long mask = gc->pin2mask(gc, gpio);
+
+	if (val)
+		gc->write_reg(gc->reg_set, mask);
+	else
+		gc->write_reg(gc->reg_clr, mask);
+}
+
+static void bgpio_set_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+	unsigned long mask = gc->pin2mask(gc, gpio);
+	unsigned long flags;
+
+	spin_lock_irqsave(&gc->bgpio_lock, flags);
+
+	if (val)
+		gc->bgpio_data |= mask;
+	else
+		gc->bgpio_data &= ~mask;
+
+	gc->write_reg(gc->reg_set, gc->bgpio_data);
+
+	spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+}
+
+static void bgpio_multiple_get_masks(struct gpio_chip *gc,
+				     unsigned long *mask, unsigned long *bits,
+				     unsigned long *set_mask,
+				     unsigned long *clear_mask)
+{
+	int i;
+
+	*set_mask = 0;
+	*clear_mask = 0;
+
+	for (i = 0; i < gc->bgpio_bits; i++) {
+		if (*mask == 0)
+			break;
+		if (__test_and_clear_bit(i, mask)) {
+			if (test_bit(i, bits))
+				*set_mask |= gc->pin2mask(gc, i);
+			else
+				*clear_mask |= gc->pin2mask(gc, i);
+		}
+	}
+}
+
+static void bgpio_set_multiple_single_reg(struct gpio_chip *gc,
+					  unsigned long *mask,
+					  unsigned long *bits,
+					  void __iomem *reg)
+{
+	unsigned long flags;
+	unsigned long set_mask, clear_mask;
+
+	spin_lock_irqsave(&gc->bgpio_lock, flags);
+
+	bgpio_multiple_get_masks(gc, mask, bits, &set_mask, &clear_mask);
+
+	gc->bgpio_data |= set_mask;
+	gc->bgpio_data &= ~clear_mask;
+
+	gc->write_reg(reg, gc->bgpio_data);
+
+	spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+}
+
+static void bgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+			       unsigned long *bits)
+{
+	bgpio_set_multiple_single_reg(gc, mask, bits, gc->reg_dat);
+}
+
+static void bgpio_set_multiple_set(struct gpio_chip *gc, unsigned long *mask,
+				   unsigned long *bits)
+{
+	bgpio_set_multiple_single_reg(gc, mask, bits, gc->reg_set);
+}
+
+static void bgpio_set_multiple_with_clear(struct gpio_chip *gc,
+					  unsigned long *mask,
+					  unsigned long *bits)
+{
+	unsigned long set_mask, clear_mask;
+
+	bgpio_multiple_get_masks(gc, mask, bits, &set_mask, &clear_mask);
+
+	if (set_mask)
+		gc->write_reg(gc->reg_set, set_mask);
+	if (clear_mask)
+		gc->write_reg(gc->reg_clr, clear_mask);
+}
+
+static int bgpio_simple_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+	return 0;
+}
+
+static int bgpio_dir_out_err(struct gpio_chip *gc, unsigned int gpio,
+				int val)
+{
+	return -EINVAL;
+}
+
+static int bgpio_simple_dir_out(struct gpio_chip *gc, unsigned int gpio,
+				int val)
+{
+	gc->set(gc, gpio, val);
+
+	return 0;
+}
+
+static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&gc->bgpio_lock, flags);
+
+	gc->bgpio_dir &= ~gc->pin2mask(gc, gpio);
+	gc->write_reg(gc->reg_dir, gc->bgpio_dir);
+
+	spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+
+	return 0;
+}
+
+static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
+{
+	/* Return 0 if output, 1 of input */
+	return !(gc->read_reg(gc->reg_dir) & gc->pin2mask(gc, gpio));
+}
+
+static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+	unsigned long flags;
+
+	gc->set(gc, gpio, val);
+
+	spin_lock_irqsave(&gc->bgpio_lock, flags);
+
+	gc->bgpio_dir |= gc->pin2mask(gc, gpio);
+	gc->write_reg(gc->reg_dir, gc->bgpio_dir);
+
+	spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+
+	return 0;
+}
+
+static int bgpio_dir_in_inv(struct gpio_chip *gc, unsigned int gpio)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&gc->bgpio_lock, flags);
+
+	gc->bgpio_dir |= gc->pin2mask(gc, gpio);
+	gc->write_reg(gc->reg_dir, gc->bgpio_dir);
+
+	spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+
+	return 0;
+}
+
+static int bgpio_dir_out_inv(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+	unsigned long flags;
+
+	gc->set(gc, gpio, val);
+
+	spin_lock_irqsave(&gc->bgpio_lock, flags);
+
+	gc->bgpio_dir &= ~gc->pin2mask(gc, gpio);
+	gc->write_reg(gc->reg_dir, gc->bgpio_dir);
+
+	spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+
+	return 0;
+}
+
+static int bgpio_get_dir_inv(struct gpio_chip *gc, unsigned int gpio)
+{
+	/* Return 0 if output, 1 if input */
+	return !!(gc->read_reg(gc->reg_dir) & gc->pin2mask(gc, gpio));
+}
+
+static int bgpio_setup_accessors(struct device *dev,
+				 struct gpio_chip *gc,
+				 bool bit_be,
+				 bool byte_be)
+{
+
+	switch (gc->bgpio_bits) {
+	case 8:
+		gc->read_reg	= bgpio_read8;
+		gc->write_reg	= bgpio_write8;
+		break;
+	case 16:
+		if (byte_be) {
+			gc->read_reg	= bgpio_read16be;
+			gc->write_reg	= bgpio_write16be;
+		} else {
+			gc->read_reg	= bgpio_read16;
+			gc->write_reg	= bgpio_write16;
+		}
+		break;
+	case 32:
+		if (byte_be) {
+			gc->read_reg	= bgpio_read32be;
+			gc->write_reg	= bgpio_write32be;
+		} else {
+			gc->read_reg	= bgpio_read32;
+			gc->write_reg	= bgpio_write32;
+		}
+		break;
+#if BITS_PER_LONG >= 64
+	case 64:
+		if (byte_be) {
+			dev_err(dev,
+				"64 bit big endian byte order unsupported\n");
+			return -EINVAL;
+		} else {
+			gc->read_reg	= bgpio_read64;
+			gc->write_reg	= bgpio_write64;
+		}
+		break;
+#endif /* BITS_PER_LONG >= 64 */
+	default:
+		dev_err(dev, "unsupported data width %u bits\n", gc->bgpio_bits);
+		return -EINVAL;
+	}
+
+	gc->pin2mask = bit_be ? bgpio_pin2mask_be : bgpio_pin2mask;
+
+	return 0;
+}
+
+/*
+ * Create the device and allocate the resources.  For setting GPIO's there are
+ * three supported configurations:
+ *
+ *	- single input/output register resource (named "dat").
+ *	- set/clear pair (named "set" and "clr").
+ *	- single output register resource and single input resource ("set" and
+ *	dat").
+ *
+ * For the single output register, this drives a 1 by setting a bit and a zero
+ * by clearing a bit.  For the set clr pair, this drives a 1 by setting a bit
+ * in the set register and clears it by setting a bit in the clear register.
+ * The configuration is detected by which resources are present.
+ *
+ * For setting the GPIO direction, there are three supported configurations:
+ *
+ *	- simple bidirection GPIO that requires no configuration.
+ *	- an output direction register (named "dirout") where a 1 bit
+ *	indicates the GPIO is an output.
+ *	- an input direction register (named "dirin") where a 1 bit indicates
+ *	the GPIO is an input.
+ */
+static int bgpio_setup_io(struct gpio_chip *gc,
+			  void __iomem *dat,
+			  void __iomem *set,
+			  void __iomem *clr,
+			  unsigned long flags)
+{
+
+	gc->reg_dat = dat;
+	if (!gc->reg_dat)
+		return -EINVAL;
+
+	if (set && clr) {
+		gc->reg_set = set;
+		gc->reg_clr = clr;
+		gc->set = bgpio_set_with_clear;
+		gc->set_multiple = bgpio_set_multiple_with_clear;
+	} else if (set && !clr) {
+		gc->reg_set = set;
+		gc->set = bgpio_set_set;
+		gc->set_multiple = bgpio_set_multiple_set;
+	} else if (flags & BGPIOF_NO_OUTPUT) {
+		gc->set = bgpio_set_none;
+		gc->set_multiple = NULL;
+	} else {
+		gc->set = bgpio_set;
+		gc->set_multiple = bgpio_set_multiple;
+	}
+
+	if (!(flags & BGPIOF_UNREADABLE_REG_SET) &&
+	    (flags & BGPIOF_READ_OUTPUT_REG_SET))
+		gc->get = bgpio_get_set;
+	else
+		gc->get = bgpio_get;
+
+	return 0;
+}
+
+static int bgpio_setup_direction(struct gpio_chip *gc,
+				 void __iomem *dirout,
+				 void __iomem *dirin,
+				 unsigned long flags)
+{
+	if (dirout && dirin) {
+		return -EINVAL;
+	} else if (dirout) {
+		gc->reg_dir = dirout;
+		gc->direction_output = bgpio_dir_out;
+		gc->direction_input = bgpio_dir_in;
+		gc->get_direction = bgpio_get_dir;
+	} else if (dirin) {
+		gc->reg_dir = dirin;
+		gc->direction_output = bgpio_dir_out_inv;
+		gc->direction_input = bgpio_dir_in_inv;
+		gc->get_direction = bgpio_get_dir_inv;
+	} else {
+		if (flags & BGPIOF_NO_OUTPUT)
+			gc->direction_output = bgpio_dir_out_err;
+		else
+			gc->direction_output = bgpio_simple_dir_out;
+		gc->direction_input = bgpio_simple_dir_in;
+	}
+
+	return 0;
+}
+
+static int bgpio_request(struct gpio_chip *chip, unsigned gpio_pin)
+{
+	if (gpio_pin < chip->ngpio)
+		return 0;
+
+	return -EINVAL;
+}
+
+int bgpio_init(struct gpio_chip *gc, struct device *dev,
+	       unsigned long sz, void __iomem *dat, void __iomem *set,
+	       void __iomem *clr, void __iomem *dirout, void __iomem *dirin,
+	       unsigned long flags)
+{
+	int ret;
+
+	if (!is_power_of_2(sz))
+		return -EINVAL;
+
+	gc->bgpio_bits = sz * 8;
+	if (gc->bgpio_bits > BITS_PER_LONG)
+		return -EINVAL;
+
+	spin_lock_init(&gc->bgpio_lock);
+	gc->parent = dev;
+	gc->label = dev_name(dev);
+	gc->base = -1;
+	gc->ngpio = gc->bgpio_bits;
+	gc->request = bgpio_request;
+
+	ret = bgpio_setup_io(gc, dat, set, clr, flags);
+	if (ret)
+		return ret;
+
+	ret = bgpio_setup_accessors(dev, gc, flags & BGPIOF_BIG_ENDIAN,
+				    flags & BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+	if (ret)
+		return ret;
+
+	ret = bgpio_setup_direction(gc, dirout, dirin, flags);
+	if (ret)
+		return ret;
+
+	gc->bgpio_data = gc->read_reg(gc->reg_dat);
+	if (gc->set == bgpio_set_set &&
+			!(flags & BGPIOF_UNREADABLE_REG_SET))
+		gc->bgpio_data = gc->read_reg(gc->reg_set);
+	if (gc->reg_dir && !(flags & BGPIOF_UNREADABLE_REG_DIR))
+		gc->bgpio_dir = gc->read_reg(gc->reg_dir);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(bgpio_init);
+
+#if IS_ENABLED(CONFIG_GPIO_GENERIC_PLATFORM)
+
+static void __iomem *bgpio_map(struct platform_device *pdev,
+			       const char *name,
+			       resource_size_t sane_sz)
+{
+	struct resource *r;
+	resource_size_t sz;
+
+	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+	if (!r)
+		return NULL;
+
+	sz = resource_size(r);
+	if (sz != sane_sz)
+		return IOMEM_ERR_PTR(-EINVAL);
+
+	return devm_ioremap_resource(&pdev->dev, r);
+}
+
+static int bgpio_pdev_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *r;
+	void __iomem *dat;
+	void __iomem *set;
+	void __iomem *clr;
+	void __iomem *dirout;
+	void __iomem *dirin;
+	unsigned long sz;
+	unsigned long flags = pdev->id_entry->driver_data;
+	int err;
+	struct gpio_chip *gc;
+	struct bgpio_pdata *pdata = dev_get_platdata(dev);
+
+	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat");
+	if (!r)
+		return -EINVAL;
+
+	sz = resource_size(r);
+
+	dat = bgpio_map(pdev, "dat", sz);
+	if (IS_ERR(dat))
+		return PTR_ERR(dat);
+
+	set = bgpio_map(pdev, "set", sz);
+	if (IS_ERR(set))
+		return PTR_ERR(set);
+
+	clr = bgpio_map(pdev, "clr", sz);
+	if (IS_ERR(clr))
+		return PTR_ERR(clr);
+
+	dirout = bgpio_map(pdev, "dirout", sz);
+	if (IS_ERR(dirout))
+		return PTR_ERR(dirout);
+
+	dirin = bgpio_map(pdev, "dirin", sz);
+	if (IS_ERR(dirin))
+		return PTR_ERR(dirin);
+
+	gc = devm_kzalloc(&pdev->dev, sizeof(*gc), GFP_KERNEL);
+	if (!gc)
+		return -ENOMEM;
+
+	err = bgpio_init(gc, dev, sz, dat, set, clr, dirout, dirin, flags);
+	if (err)
+		return err;
+
+	if (pdata) {
+		if (pdata->label)
+			gc->label = pdata->label;
+		gc->base = pdata->base;
+		if (pdata->ngpio > 0)
+			gc->ngpio = pdata->ngpio;
+	}
+
+	platform_set_drvdata(pdev, gc);
+
+	return devm_gpiochip_add_data(&pdev->dev, gc, NULL);
+}
+
+static const struct platform_device_id bgpio_id_table[] = {
+	{
+		.name		= "basic-mmio-gpio",
+		.driver_data	= 0,
+	}, {
+		.name		= "basic-mmio-gpio-be",
+		.driver_data	= BGPIOF_BIG_ENDIAN,
+	},
+	{ }
+};
+MODULE_DEVICE_TABLE(platform, bgpio_id_table);
+
+static struct platform_driver bgpio_driver = {
+	.driver = {
+		.name = "basic-mmio-gpio",
+	},
+	.id_table = bgpio_id_table,
+	.probe = bgpio_pdev_probe,
+};
+
+module_platform_driver(bgpio_driver);
+
+#endif /* CONFIG_GPIO_GENERIC_PLATFORM */
+
+MODULE_DESCRIPTION("Driver for basic memory-mapped GPIO controllers");
+MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-moxart.c b/drivers/gpio/gpio-moxart.c
index f02d0b4..d58d3890 100644
--- a/drivers/gpio/gpio-moxart.c
+++ b/drivers/gpio/gpio-moxart.c
@@ -15,7 +15,6 @@
 #include <linux/irq.h>
 #include <linux/io.h>
 #include <linux/platform_device.h>
-#include <linux/module.h>
 #include <linux/of_address.h>
 #include <linux/of_gpio.h>
 #include <linux/pinctrl/consumer.h>
@@ -82,8 +81,4 @@
 	},
 	.probe	= moxart_gpio_probe,
 };
-module_platform_driver(moxart_gpio_driver);
-
-MODULE_DESCRIPTION("MOXART GPIO chip driver");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+builtin_platform_driver(moxart_gpio_driver);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 11c6582..cd5dc27 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -34,7 +34,7 @@
  */
 
 #include <linux/err.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/gpio.h>
 #include <linux/irq.h>
 #include <linux/slab.h>
@@ -557,7 +557,6 @@
 		/* sentinel */
 	},
 };
-MODULE_DEVICE_TABLE(of, mvebu_gpio_of_match);
 
 static int mvebu_gpio_suspend(struct platform_device *pdev, pm_message_t state)
 {
@@ -838,4 +837,4 @@
 	.suspend        = mvebu_gpio_suspend,
 	.resume         = mvebu_gpio_resume,
 };
-module_platform_driver(mvebu_gpio_driver);
+builtin_platform_driver(mvebu_gpio_driver);
diff --git a/drivers/gpio/gpio-octeon.c b/drivers/gpio/gpio-octeon.c
index 47aead1..96a8a8c 100644
--- a/drivers/gpio/gpio-octeon.c
+++ b/drivers/gpio/gpio-octeon.c
@@ -83,6 +83,7 @@
 	struct octeon_gpio *gpio;
 	struct gpio_chip *chip;
 	struct resource *res_mem;
+	void __iomem *reg_base;
 	int err = 0;
 
 	gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
@@ -91,21 +92,11 @@
 	chip = &gpio->chip;
 
 	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (res_mem == NULL) {
-		dev_err(&pdev->dev, "found no memory resource\n");
-		err = -ENXIO;
-		goto out;
-	}
-	if (!devm_request_mem_region(&pdev->dev, res_mem->start,
-					resource_size(res_mem),
-				     res_mem->name)) {
-		dev_err(&pdev->dev, "request_mem_region failed\n");
-		err = -ENXIO;
-		goto out;
-	}
-	gpio->register_base = (u64)devm_ioremap(&pdev->dev, res_mem->start,
-						resource_size(res_mem));
+	reg_base = devm_ioremap_resource(&pdev->dev, res_mem);
+	if (IS_ERR(reg_base))
+		return PTR_ERR(reg_base);
 
+	gpio->register_base = (u64)reg_base;
 	pdev->dev.platform_data = chip;
 	chip->label = "octeon-gpio";
 	chip->parent = &pdev->dev;
@@ -119,14 +110,13 @@
 	chip->set = octeon_gpio_set;
 	err = devm_gpiochip_add_data(&pdev->dev, chip, gpio);
 	if (err)
-		goto out;
+		return err;
 
 	dev_info(&pdev->dev, "OCTEON GPIO driver probed.\n");
-out:
-	return err;
+	return 0;
 }
 
-static struct of_device_id octeon_gpio_match[] = {
+static const struct of_device_id octeon_gpio_match[] = {
 	{
 		.compatible = "cavium,octeon-3860-gpio",
 	},
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 551dfa9..b98ede7 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -611,51 +611,12 @@
 		omap_disable_gpio_irqbank(bank, BIT(offset));
 }
 
-/*
- * Note that ENAWAKEUP needs to be enabled in GPIO_SYSCONFIG register.
- * 1510 does not seem to have a wake-up register. If JTAG is connected
- * to the target, system will wake up always on GPIO events. While
- * system is running all registered GPIO interrupts need to have wake-up
- * enabled. When system is suspended, only selected GPIO interrupts need
- * to have wake-up enabled.
- */
-static int omap_set_gpio_wakeup(struct gpio_bank *bank, unsigned offset,
-				int enable)
-{
-	u32 gpio_bit = BIT(offset);
-	unsigned long flags;
-
-	if (bank->non_wakeup_gpios & gpio_bit) {
-		dev_err(bank->chip.parent,
-			"Unable to modify wakeup on non-wakeup GPIO%d\n",
-			offset);
-		return -EINVAL;
-	}
-
-	raw_spin_lock_irqsave(&bank->lock, flags);
-	if (enable)
-		bank->context.wake_en |= gpio_bit;
-	else
-		bank->context.wake_en &= ~gpio_bit;
-
-	writel_relaxed(bank->context.wake_en, bank->base + bank->regs->wkup_en);
-	raw_spin_unlock_irqrestore(&bank->lock, flags);
-
-	return 0;
-}
-
 /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
 static int omap_gpio_wake_enable(struct irq_data *d, unsigned int enable)
 {
 	struct gpio_bank *bank = omap_irq_data_get_bank(d);
-	unsigned offset = d->hwirq;
-	int ret;
 
-	ret = omap_set_gpio_wakeup(bank, offset, enable);
-	if (!ret)
-		ret = irq_set_irq_wake(bank->irq, enable);
-
-	return ret;
+	return irq_set_irq_wake(bank->irq, enable);
 }
 
 static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
@@ -1187,6 +1148,7 @@
 	irqc->irq_bus_lock = omap_gpio_irq_bus_lock,
 	irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock,
 	irqc->name = dev_name(&pdev->dev);
+	irqc->flags = IRQCHIP_MASK_ON_SUSPEND;
 
 	bank->irq = platform_get_irq(pdev, 0);
 	if (bank->irq <= 0) {
diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c
index 6f27b3d..e248707 100644
--- a/drivers/gpio/gpio-palmas.c
+++ b/drivers/gpio/gpio-palmas.c
@@ -20,7 +20,7 @@
 
 #include <linux/gpio.h>
 #include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/mfd/palmas.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -218,14 +218,3 @@
 	return platform_driver_register(&palmas_gpio_driver);
 }
 subsys_initcall(palmas_gpio_init);
-
-static void __exit palmas_gpio_exit(void)
-{
-	platform_driver_unregister(&palmas_gpio_driver);
-}
-module_exit(palmas_gpio_exit);
-
-MODULE_ALIAS("platform:palmas-gpio");
-MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
-MODULE_DESCRIPTION("GPIO driver for TI Palmas series PMICs");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index d0d3065..5e3be32 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -18,6 +18,7 @@
 #include <linux/i2c.h>
 #include <linux/platform_data/pca953x.h>
 #include <linux/slab.h>
+#include <asm/unaligned.h>
 #include <linux/of_platform.h>
 #include <linux/acpi.h>
 
@@ -37,8 +38,13 @@
 #define PCA957X_MSK		6
 #define PCA957X_INTS		7
 
+#define PCAL953X_IN_LATCH	34
+#define PCAL953X_INT_MASK	37
+#define PCAL953X_INT_STAT	38
+
 #define PCA_GPIO_MASK		0x00FF
 #define PCA_INT			0x0100
+#define PCA_PCAL			0x0200
 #define PCA953X_TYPE		0x1000
 #define PCA957X_TYPE		0x2000
 #define PCA_TYPE_MASK		0xF000
@@ -76,7 +82,7 @@
 MODULE_DEVICE_TABLE(i2c, pca953x_id);
 
 static const struct acpi_device_id pca953x_acpi_ids[] = {
-	{ "INT3491", 16 | PCA953X_TYPE | PCA_INT, },
+	{ "INT3491", 16 | PCA953X_TYPE | PCA_INT | PCA_PCAL, },
 	{ }
 };
 MODULE_DEVICE_TABLE(acpi, pca953x_acpi_ids);
@@ -159,7 +165,7 @@
 		switch (chip->chip_type) {
 		case PCA953X_TYPE:
 			ret = i2c_smbus_write_word_data(chip->client,
-							reg << 1, (u16) *val);
+			    reg << 1, cpu_to_le16(get_unaligned((u16 *)val)));
 			break;
 		case PCA957X_TYPE:
 			ret = i2c_smbus_write_byte_data(chip->client, reg << 1,
@@ -436,6 +442,18 @@
 	struct pca953x_chip *chip = gpiochip_get_data(gc);
 	u8 new_irqs;
 	int level, i;
+	u8 invert_irq_mask[MAX_BANK];
+
+	if (chip->driver_data & PCA_PCAL) {
+		/* Enable latch on interrupt-enabled inputs */
+		pca953x_write_regs(chip, PCAL953X_IN_LATCH, chip->irq_mask);
+
+		for (i = 0; i < NBANK(chip); i++)
+			invert_irq_mask[i] = ~chip->irq_mask[i];
+
+		/* Unmask enabled interrupts */
+		pca953x_write_regs(chip, PCAL953X_INT_MASK, invert_irq_mask);
+	}
 
 	/* Look for any newly setup interrupt */
 	for (i = 0; i < NBANK(chip); i++) {
@@ -497,6 +515,29 @@
 	u8 trigger[MAX_BANK];
 	int ret, i, offset = 0;
 
+	if (chip->driver_data & PCA_PCAL) {
+		/* Read the current interrupt status from the device */
+		ret = pca953x_read_regs(chip, PCAL953X_INT_STAT, trigger);
+		if (ret)
+			return false;
+
+		/* Check latched inputs and clear interrupt status */
+		ret = pca953x_read_regs(chip, PCA953X_INPUT, cur_stat);
+		if (ret)
+			return false;
+
+		for (i = 0; i < NBANK(chip); i++) {
+			/* Apply filter for rising/falling edge selection */
+			pending[i] = (~cur_stat[i] & chip->irq_trig_fall[i]) |
+				(cur_stat[i] & chip->irq_trig_raise[i]);
+			pending[i] &= trigger[i];
+			if (pending[i])
+				pending_seen = true;
+		}
+
+		return pending_seen;
+	}
+
 	switch (chip->chip_type) {
 	case PCA953X_TYPE:
 		offset = PCA953X_INPUT;
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 5cb3821..6e3c143 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -1,6 +1,8 @@
 /*
  * Copyright (C) 2008, 2009 Provigent Ltd.
  *
+ * Author: Baruch Siach <baruch@tkos.co.il>
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
@@ -11,7 +13,7 @@
  */
 #include <linux/spinlock.h>
 #include <linux/errno.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/io.h>
 #include <linux/ioport.h>
 #include <linux/interrupt.h>
@@ -59,15 +61,19 @@
 #endif
 };
 
+static int pl061_get_direction(struct gpio_chip *gc, unsigned offset)
+{
+	struct pl061_gpio *chip = gpiochip_get_data(gc);
+
+	return !(readb(chip->base + GPIODIR) & BIT(offset));
+}
+
 static int pl061_direction_input(struct gpio_chip *gc, unsigned offset)
 {
 	struct pl061_gpio *chip = gpiochip_get_data(gc);
 	unsigned long flags;
 	unsigned char gpiodir;
 
-	if (offset >= gc->ngpio)
-		return -EINVAL;
-
 	spin_lock_irqsave(&chip->lock, flags);
 	gpiodir = readb(chip->base + GPIODIR);
 	gpiodir &= ~(BIT(offset));
@@ -84,9 +90,6 @@
 	unsigned long flags;
 	unsigned char gpiodir;
 
-	if (offset >= gc->ngpio)
-		return -EINVAL;
-
 	spin_lock_irqsave(&chip->lock, flags);
 	writeb(!!value << offset, chip->base + (BIT(offset + 2)));
 	gpiodir = readb(chip->base + GPIODIR);
@@ -319,6 +322,7 @@
 		chip->gc.free = gpiochip_generic_free;
 	}
 
+	chip->gc.get_direction = pl061_get_direction;
 	chip->gc.direction_input = pl061_direction_input;
 	chip->gc.direction_output = pl061_direction_output;
 	chip->gc.get = pl061_get_value;
@@ -429,8 +433,6 @@
 	{ 0, 0 },
 };
 
-MODULE_DEVICE_TABLE(amba, pl061_ids);
-
 static struct amba_driver pl061_gpio_driver = {
 	.drv = {
 		.name	= "pl061_gpio",
@@ -446,8 +448,4 @@
 {
 	return amba_driver_register(&pl061_gpio_driver);
 }
-module_init(pl061_gpio_init);
-
-MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
-MODULE_DESCRIPTION("PL061 GPIO driver");
-MODULE_LICENSE("GPL");
+device_initcall(pl061_gpio_init);
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index b2b7b78..76ac906 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -283,8 +283,8 @@
 	writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
 
 	ret = pinctrl_gpio_direction_output(chip->base + offset);
-	if (!ret)
-		return 0;
+	if (ret)
+		return ret;
 
 	spin_lock_irqsave(&gpio_lock, flags);
 
diff --git a/drivers/gpio/gpio-rc5t583.c b/drivers/gpio/gpio-rc5t583.c
index 1d6100f..3b4dc1a 100644
--- a/drivers/gpio/gpio-rc5t583.c
+++ b/drivers/gpio/gpio-rc5t583.c
@@ -23,7 +23,6 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
-#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/device.h>
 #include <linux/gpio.h>
@@ -152,14 +151,3 @@
 	return platform_driver_register(&rc5t583_gpio_driver);
 }
 subsys_initcall(rc5t583_gpio_init);
-
-static void __exit rc5t583_gpio_exit(void)
-{
-	platform_driver_unregister(&rc5t583_gpio_driver);
-}
-module_exit(rc5t583_gpio_exit);
-
-MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
-MODULE_DESCRIPTION("GPIO interface for RC5T583");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:rc5t583-gpio");
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index d9ab0cd..681c93f 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -196,44 +196,6 @@
 	return 0;
 }
 
-static void gpio_rcar_irq_bus_lock(struct irq_data *d)
-{
-	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-	struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-
-	pm_runtime_get_sync(&p->pdev->dev);
-}
-
-static void gpio_rcar_irq_bus_sync_unlock(struct irq_data *d)
-{
-	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-	struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-
-	pm_runtime_put(&p->pdev->dev);
-}
-
-
-static int gpio_rcar_irq_request_resources(struct irq_data *d)
-{
-	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-	struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-	int error;
-
-	error = pm_runtime_get_sync(&p->pdev->dev);
-	if (error < 0)
-		return error;
-
-	return 0;
-}
-
-static void gpio_rcar_irq_release_resources(struct irq_data *d)
-{
-	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-	struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-
-	pm_runtime_put(&p->pdev->dev);
-}
-
 static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
 {
 	struct gpio_rcar_priv *p = dev_id;
@@ -280,32 +242,18 @@
 
 static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
 {
-	struct gpio_rcar_priv *p = gpiochip_get_data(chip);
-	int error;
-
-	error = pm_runtime_get_sync(&p->pdev->dev);
-	if (error < 0)
-		return error;
-
-	error = pinctrl_request_gpio(chip->base + offset);
-	if (error)
-		pm_runtime_put(&p->pdev->dev);
-
-	return error;
+	return pinctrl_request_gpio(chip->base + offset);
 }
 
 static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset)
 {
-	struct gpio_rcar_priv *p = gpiochip_get_data(chip);
-
 	pinctrl_free_gpio(chip->base + offset);
 
-	/* Set the GPIO as an input to ensure that the next GPIO request won't
+	/*
+	 * Set the GPIO as an input to ensure that the next GPIO request won't
 	 * drive the GPIO pin as an output.
 	 */
 	gpio_rcar_config_general_input_output_mode(chip, offset, false);
-
-	pm_runtime_put(&p->pdev->dev);
 }
 
 static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -336,6 +284,25 @@
 	spin_unlock_irqrestore(&p->lock, flags);
 }
 
+static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+				   unsigned long *bits)
+{
+	struct gpio_rcar_priv *p = gpiochip_get_data(chip);
+	unsigned long flags;
+	u32 val, bankmask;
+
+	bankmask = mask[0] & GENMASK(chip->ngpio - 1, 0);
+	if (!bankmask)
+		return;
+
+	spin_lock_irqsave(&p->lock, flags);
+	val = gpio_rcar_read(p, OUTDT);
+	val &= ~bankmask;
+	val |= (bankmask & bits[0]);
+	gpio_rcar_write(p, OUTDT, val);
+	spin_unlock_irqrestore(&p->lock, flags);
+}
+
 static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset,
 				      int value)
 {
@@ -452,6 +419,7 @@
 	}
 
 	pm_runtime_enable(dev);
+	pm_runtime_get_sync(dev);
 
 	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -476,6 +444,7 @@
 	gpio_chip->get = gpio_rcar_get;
 	gpio_chip->direction_output = gpio_rcar_direction_output;
 	gpio_chip->set = gpio_rcar_set;
+	gpio_chip->set_multiple = gpio_rcar_set_multiple;
 	gpio_chip->label = name;
 	gpio_chip->parent = dev;
 	gpio_chip->owner = THIS_MODULE;
@@ -488,10 +457,6 @@
 	irq_chip->irq_unmask = gpio_rcar_irq_enable;
 	irq_chip->irq_set_type = gpio_rcar_irq_set_type;
 	irq_chip->irq_set_wake = gpio_rcar_irq_set_wake;
-	irq_chip->irq_bus_lock = gpio_rcar_irq_bus_lock;
-	irq_chip->irq_bus_sync_unlock = gpio_rcar_irq_bus_sync_unlock;
-	irq_chip->irq_request_resources = gpio_rcar_irq_request_resources;
-	irq_chip->irq_release_resources = gpio_rcar_irq_release_resources;
 	irq_chip->flags	= IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
 
 	ret = gpiochip_add_data(gpio_chip, p);
@@ -522,6 +487,7 @@
 err1:
 	gpiochip_remove(gpio_chip);
 err0:
+	pm_runtime_put(dev);
 	pm_runtime_disable(dev);
 	return ret;
 }
@@ -532,6 +498,7 @@
 
 	gpiochip_remove(&p->gpio_chip);
 
+	pm_runtime_put(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 	return 0;
 }
diff --git a/drivers/gpio/gpio-sodaville.c b/drivers/gpio/gpio-sodaville.c
index e3cb677..7da9e6c 100644
--- a/drivers/gpio/gpio-sodaville.c
+++ b/drivers/gpio/gpio-sodaville.c
@@ -3,6 +3,8 @@
  *
  *  Copyright (c) 2010, 2011 Intel Corporation
  *
+ *  Author: Hans J. Koch <hjk@linutronix.de>
+ *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License 2 as published
  *  by the Free Software Foundation.
@@ -15,7 +17,6 @@
 #include <linux/irq.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/of_irq.h>
@@ -257,34 +258,17 @@
 	return ret;
 }
 
-static void sdv_gpio_remove(struct pci_dev *pdev)
-{
-	struct sdv_gpio_chip_data *sd = pci_get_drvdata(pdev);
-
-	free_irq(pdev->irq, sd);
-	irq_free_descs(sd->irq_base, SDV_NUM_PUB_GPIOS);
-
-	gpiochip_remove(&sd->chip);
-	pci_release_region(pdev, GPIO_BAR);
-	iounmap(sd->gpio_pub_base);
-	pci_disable_device(pdev);
-	kfree(sd);
-}
-
 static const struct pci_device_id sdv_gpio_pci_ids[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_SDV_GPIO) },
 	{ 0, },
 };
 
 static struct pci_driver sdv_gpio_driver = {
+	.driver = {
+		.suppress_bind_attrs = true,
+	},
 	.name = DRV_NAME,
 	.id_table = sdv_gpio_pci_ids,
 	.probe = sdv_gpio_probe,
-	.remove = sdv_gpio_remove,
 };
-
-module_pci_driver(sdv_gpio_driver);
-
-MODULE_AUTHOR("Hans J. Koch <hjk@linutronix.de>");
-MODULE_DESCRIPTION("GPIO interface for Intel Sodaville SoCs");
-MODULE_LICENSE("GPL v2");
+builtin_pci_driver(sdv_gpio_driver);
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
index 0d5b8c5..853ca23 100644
--- a/drivers/gpio/gpio-sta2x11.c
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -20,7 +20,7 @@
  *
  */
 
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/gpio.h>
@@ -432,8 +432,4 @@
 	},
 	.probe = gsta_probe,
 };
-
-module_platform_driver(sta2x11_gpio_platform_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("sta2x11_gpio GPIO driver");
+builtin_platform_driver(sta2x11_gpio_platform_driver);
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 5197edf..6f7af28 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -5,7 +5,6 @@
  * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
  */
 
-#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
@@ -413,23 +412,13 @@
 	return ret;
 }
 
-static int stmpe_gpio_remove(struct platform_device *pdev)
-{
-	struct stmpe_gpio *stmpe_gpio = platform_get_drvdata(pdev);
-	struct stmpe *stmpe = stmpe_gpio->stmpe;
-
-	gpiochip_remove(&stmpe_gpio->chip);
-	stmpe_disable(stmpe, STMPE_BLOCK_GPIO);
-	kfree(stmpe_gpio);
-
-	return 0;
-}
-
 static struct platform_driver stmpe_gpio_driver = {
-	.driver.name	= "stmpe-gpio",
-	.driver.owner	= THIS_MODULE,
+	.driver = {
+		.suppress_bind_attrs	= true,
+		.name			= "stmpe-gpio",
+		.owner			= THIS_MODULE,
+	},
 	.probe		= stmpe_gpio_probe,
-	.remove		= stmpe_gpio_remove,
 };
 
 static int __init stmpe_gpio_init(void)
@@ -437,13 +426,3 @@
 	return platform_driver_register(&stmpe_gpio_driver);
 }
 subsys_initcall(stmpe_gpio_init);
-
-static void __exit stmpe_gpio_exit(void)
-{
-	platform_driver_unregister(&stmpe_gpio_driver);
-}
-module_exit(stmpe_gpio_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("STMPExxxx GPIO driver");
-MODULE_AUTHOR("Rabin Vincent <rabin.vincent@stericsson.com>");
diff --git a/drivers/gpio/gpio-sx150x.c b/drivers/gpio/gpio-sx150x.c
index d387eb5..a177ebd 100644
--- a/drivers/gpio/gpio-sx150x.c
+++ b/drivers/gpio/gpio-sx150x.c
@@ -1,5 +1,9 @@
 /* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
  *
+ * Driver for Semtech SX150X I2C GPIO Expanders
+ *
+ * Author: Gregory Bean <gbean@codeaurora.org>
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
  * only version 2 as published by the Free Software Foundation.
@@ -19,10 +23,8 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
-#include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
-#include <linux/i2c/sx150x.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
@@ -82,6 +84,57 @@
 	} pri;
 };
 
+/**
+ * struct sx150x_platform_data - config data for SX150x driver
+ * @gpio_base: The index number of the first GPIO assigned to this
+ *             GPIO expander.  The expander will create a block of
+ *             consecutively numbered gpios beginning at the given base,
+ *             with the size of the block depending on the model of the
+ *             expander chip.
+ * @oscio_is_gpo: If set to true, the driver will configure OSCIO as a GPO
+ *                instead of as an oscillator, increasing the size of the
+ *                GP(I)O pool created by this expander by one.  The
+ *                output-only GPO pin will be added at the end of the block.
+ * @io_pullup_ena: A bit-mask which enables or disables the pull-up resistor
+ *                 for each IO line in the expander.  Setting the bit at
+ *                 position n will enable the pull-up for the IO at
+ *                 the corresponding offset.  For chips with fewer than
+ *                 16 IO pins, high-end bits are ignored.
+ * @io_pulldn_ena: A bit-mask which enables-or disables the pull-down
+ *                 resistor for each IO line in the expander. Setting the
+ *                 bit at position n will enable the pull-down for the IO at
+ *                 the corresponding offset.  For chips with fewer than
+ *                 16 IO pins, high-end bits are ignored.
+ * @io_polarity: A bit-mask which enables polarity inversion for each IO line
+ *               in the expander.  Setting the bit at position n inverts
+ *               the polarity of that IO line, while clearing it results
+ *               in normal polarity. For chips with fewer than 16 IO pins,
+ *               high-end bits are ignored.
+ * @irq_summary: The 'summary IRQ' line to which the GPIO expander's INT line
+ *               is connected, via which it reports interrupt events
+ *               across all GPIO lines.  This must be a real,
+ *               pre-existing IRQ line.
+ *               Setting this value < 0 disables the irq_chip functionality
+ *               of the driver.
+ * @irq_base: The first 'virtual IRQ' line at which our block of GPIO-based
+ *            IRQ lines will appear.  Similarly to gpio_base, the expander
+ *            will create a block of irqs beginning at this number.
+ *            This value is ignored if irq_summary is < 0.
+ * @reset_during_probe: If set to true, the driver will trigger a full
+ *                      reset of the chip at the beginning of the probe
+ *                      in order to place it in a known state.
+ */
+struct sx150x_platform_data {
+	unsigned gpio_base;
+	bool     oscio_is_gpo;
+	u16      io_pullup_ena;
+	u16      io_pulldn_ena;
+	u16      io_polarity;
+	int      irq_summary;
+	unsigned irq_base;
+	bool     reset_during_probe;
+};
+
 struct sx150x_chip {
 	struct gpio_chip                 gpio_chip;
 	struct i2c_client               *client;
@@ -354,6 +407,32 @@
 	mutex_unlock(&chip->lock);
 }
 
+static int sx150x_gpio_set_single_ended(struct gpio_chip *gc,
+					unsigned offset,
+                                        enum single_ended_mode mode)
+{
+	struct sx150x_chip *chip = gpiochip_get_data(gc);
+
+	/* On the SX160X 789 we can set open drain */
+	if (chip->dev_cfg->model != SX150X_789)
+		return -ENOTSUPP;
+
+	if (mode == LINE_MODE_PUSH_PULL)
+		return sx150x_write_cfg(chip,
+					offset,
+					1,
+					chip->dev_cfg->pri.x789.reg_drain,
+					0);
+
+	if (mode == LINE_MODE_OPEN_DRAIN)
+		return sx150x_write_cfg(chip,
+					offset,
+					1,
+					chip->dev_cfg->pri.x789.reg_drain,
+					1);
+	return -ENOTSUPP;
+}
+
 static int sx150x_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
 {
 	struct sx150x_chip *chip = gpiochip_get_data(gc);
@@ -508,6 +587,7 @@
 	chip->gpio_chip.direction_output = sx150x_gpio_direction_output;
 	chip->gpio_chip.get              = sx150x_gpio_get;
 	chip->gpio_chip.set              = sx150x_gpio_set;
+	chip->gpio_chip.set_single_ended = sx150x_gpio_set_single_ended;
 	chip->gpio_chip.base             = pdata->gpio_base;
 	chip->gpio_chip.can_sleep        = true;
 	chip->gpio_chip.ngpio            = chip->dev_cfg->ngpios;
@@ -597,12 +677,6 @@
 
 	if (chip->dev_cfg->model == SX150X_789) {
 		err = sx150x_init_io(chip,
-				chip->dev_cfg->pri.x789.reg_drain,
-				pdata->io_open_drain_ena);
-		if (err < 0)
-			return err;
-
-		err = sx150x_init_io(chip,
 				chip->dev_cfg->pri.x789.reg_polarity,
 				pdata->io_polarity);
 		if (err < 0)
@@ -718,13 +792,3 @@
 	return i2c_add_driver(&sx150x_driver);
 }
 subsys_initcall(sx150x_init);
-
-static void __exit sx150x_exit(void)
-{
-	return i2c_del_driver(&sx150x_driver);
-}
-module_exit(sx150x_exit);
-
-MODULE_AUTHOR("Gregory Bean <gbean@codeaurora.org>");
-MODULE_DESCRIPTION("Driver for Semtech SX150X I2C GPIO Expanders");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 4f566e6..2e35ed3 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -6,14 +6,14 @@
  * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
  */
 
-#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
 #include <linux/of.h>
 #include <linux/interrupt.h>
 #include <linux/mfd/tc3589x.h>
+#include <linux/bitops.h>
 
 /*
  * These registers are modified under the irq bus lock and cached to avoid
@@ -39,7 +39,7 @@
 	struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(chip);
 	struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
 	u8 reg = TC3589x_GPIODATA0 + (offset / 8) * 2;
-	u8 mask = 1 << (offset % 8);
+	u8 mask = BIT(offset % 8);
 	int ret;
 
 	ret = tc3589x_reg_read(tc3589x, reg);
@@ -55,7 +55,7 @@
 	struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
 	u8 reg = TC3589x_GPIODATA0 + (offset / 8) * 2;
 	unsigned pos = offset % 8;
-	u8 data[] = {!!val << pos, 1 << pos};
+	u8 data[] = {val ? BIT(pos) : 0, BIT(pos)};
 
 	tc3589x_block_write(tc3589x, reg, ARRAY_SIZE(data), data);
 }
@@ -70,7 +70,7 @@
 
 	tc3589x_gpio_set(chip, offset, val);
 
-	return tc3589x_set_bits(tc3589x, reg, 1 << pos, 1 << pos);
+	return tc3589x_set_bits(tc3589x, reg, BIT(pos), BIT(pos));
 }
 
 static int tc3589x_gpio_direction_input(struct gpio_chip *chip,
@@ -81,7 +81,47 @@
 	u8 reg = TC3589x_GPIODIR0 + offset / 8;
 	unsigned pos = offset % 8;
 
-	return tc3589x_set_bits(tc3589x, reg, 1 << pos, 0);
+	return tc3589x_set_bits(tc3589x, reg, BIT(pos), 0);
+}
+
+static int tc3589x_gpio_single_ended(struct gpio_chip *chip,
+				     unsigned offset,
+				     enum single_ended_mode mode)
+{
+	struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(chip);
+	struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
+	/*
+	 * These registers are alterated at each second address
+	 * ODM bit 0 = drive to GND or Hi-Z (open drain)
+	 * ODM bit 1 = drive to VDD or Hi-Z (open source)
+	 */
+	u8 odmreg = TC3589x_GPIOODM0 + (offset / 8) * 2;
+	u8 odereg = TC3589x_GPIOODE0 + (offset / 8) * 2;
+	unsigned pos = offset % 8;
+	int ret;
+
+	switch(mode) {
+	case LINE_MODE_OPEN_DRAIN:
+		/* Set open drain mode */
+		ret = tc3589x_set_bits(tc3589x, odmreg, BIT(pos), 0);
+		if (ret)
+			return ret;
+		/* Enable open drain/source mode */
+		return tc3589x_set_bits(tc3589x, odereg, BIT(pos), BIT(pos));
+	case LINE_MODE_OPEN_SOURCE:
+		/* Set open source mode */
+		ret = tc3589x_set_bits(tc3589x, odmreg, BIT(pos), BIT(pos));
+		if (ret)
+			return ret;
+		/* Enable open drain/source mode */
+		return tc3589x_set_bits(tc3589x, odereg, BIT(pos), BIT(pos));
+	case LINE_MODE_PUSH_PULL:
+		/* Disable open drain/source mode */
+		return tc3589x_set_bits(tc3589x, odereg, BIT(pos), 0);
+	default:
+		break;
+	}
+	return -ENOTSUPP;
 }
 
 static struct gpio_chip template_chip = {
@@ -91,6 +131,7 @@
 	.get			= tc3589x_gpio_get,
 	.direction_output	= tc3589x_gpio_direction_output,
 	.set			= tc3589x_gpio_set,
+	.set_single_ended	= tc3589x_gpio_single_ended,
 	.can_sleep		= true,
 };
 
@@ -100,7 +141,7 @@
 	struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(gc);
 	int offset = d->hwirq;
 	int regoffset = offset / 8;
-	int mask = 1 << (offset % 8);
+	int mask = BIT(offset % 8);
 
 	if (type == IRQ_TYPE_EDGE_BOTH) {
 		tc3589x_gpio->regs[REG_IBE][regoffset] |= mask;
@@ -165,7 +206,7 @@
 	struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(gc);
 	int offset = d->hwirq;
 	int regoffset = offset / 8;
-	int mask = 1 << (offset % 8);
+	int mask = BIT(offset % 8);
 
 	tc3589x_gpio->regs[REG_IE][regoffset] &= ~mask;
 }
@@ -176,7 +217,7 @@
 	struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(gc);
 	int offset = d->hwirq;
 	int regoffset = offset / 8;
-	int mask = 1 << (offset % 8);
+	int mask = BIT(offset % 8);
 
 	tc3589x_gpio->regs[REG_IE][regoffset] |= mask;
 }
@@ -311,13 +352,3 @@
 	return platform_driver_register(&tc3589x_gpio_driver);
 }
 subsys_initcall(tc3589x_gpio_init);
-
-static void __exit tc3589x_gpio_exit(void)
-{
-	platform_driver_unregister(&tc3589x_gpio_driver);
-}
-module_exit(tc3589x_gpio_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("TC3589x GPIO driver");
-MODULE_AUTHOR("Hanumath Prasad, Rabin Vincent");
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 790bb11..ec891a27 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -35,24 +35,27 @@
 #define GPIO_PORT(x)		(((x) >> 3) & 0x3)
 #define GPIO_BIT(x)		((x) & 0x7)
 
-#define GPIO_REG(x)		(GPIO_BANK(x) * tegra_gpio_bank_stride + \
+#define GPIO_REG(tgi, x)	(GPIO_BANK(x) * tgi->soc->bank_stride + \
 					GPIO_PORT(x) * 4)
 
-#define GPIO_CNF(x)		(GPIO_REG(x) + 0x00)
-#define GPIO_OE(x)		(GPIO_REG(x) + 0x10)
-#define GPIO_OUT(x)		(GPIO_REG(x) + 0X20)
-#define GPIO_IN(x)		(GPIO_REG(x) + 0x30)
-#define GPIO_INT_STA(x)		(GPIO_REG(x) + 0x40)
-#define GPIO_INT_ENB(x)		(GPIO_REG(x) + 0x50)
-#define GPIO_INT_LVL(x)		(GPIO_REG(x) + 0x60)
-#define GPIO_INT_CLR(x)		(GPIO_REG(x) + 0x70)
+#define GPIO_CNF(t, x)		(GPIO_REG(t, x) + 0x00)
+#define GPIO_OE(t, x)		(GPIO_REG(t, x) + 0x10)
+#define GPIO_OUT(t, x)		(GPIO_REG(t, x) + 0X20)
+#define GPIO_IN(t, x)		(GPIO_REG(t, x) + 0x30)
+#define GPIO_INT_STA(t, x)	(GPIO_REG(t, x) + 0x40)
+#define GPIO_INT_ENB(t, x)	(GPIO_REG(t, x) + 0x50)
+#define GPIO_INT_LVL(t, x)	(GPIO_REG(t, x) + 0x60)
+#define GPIO_INT_CLR(t, x)	(GPIO_REG(t, x) + 0x70)
+#define GPIO_DBC_CNT(t, x)	(GPIO_REG(t, x) + 0xF0)
 
-#define GPIO_MSK_CNF(x)		(GPIO_REG(x) + tegra_gpio_upper_offset + 0x00)
-#define GPIO_MSK_OE(x)		(GPIO_REG(x) + tegra_gpio_upper_offset + 0x10)
-#define GPIO_MSK_OUT(x)		(GPIO_REG(x) + tegra_gpio_upper_offset + 0X20)
-#define GPIO_MSK_INT_STA(x)	(GPIO_REG(x) + tegra_gpio_upper_offset + 0x40)
-#define GPIO_MSK_INT_ENB(x)	(GPIO_REG(x) + tegra_gpio_upper_offset + 0x50)
-#define GPIO_MSK_INT_LVL(x)	(GPIO_REG(x) + tegra_gpio_upper_offset + 0x60)
+
+#define GPIO_MSK_CNF(t, x)	(GPIO_REG(t, x) + t->soc->upper_offset + 0x00)
+#define GPIO_MSK_OE(t, x)	(GPIO_REG(t, x) + t->soc->upper_offset + 0x10)
+#define GPIO_MSK_OUT(t, x)	(GPIO_REG(t, x) + t->soc->upper_offset + 0X20)
+#define GPIO_MSK_DBC_EN(t, x)	(GPIO_REG(t, x) + t->soc->upper_offset + 0x30)
+#define GPIO_MSK_INT_STA(t, x)	(GPIO_REG(t, x) + t->soc->upper_offset + 0x40)
+#define GPIO_MSK_INT_ENB(t, x)	(GPIO_REG(t, x) + t->soc->upper_offset + 0x50)
+#define GPIO_MSK_INT_LVL(t, x)	(GPIO_REG(t, x) + t->soc->upper_offset + 0x60)
 
 #define GPIO_INT_LVL_MASK		0x010101
 #define GPIO_INT_LVL_EDGE_RISING	0x000101
@@ -61,10 +64,13 @@
 #define GPIO_INT_LVL_LEVEL_HIGH		0x000001
 #define GPIO_INT_LVL_LEVEL_LOW		0x000000
 
+struct tegra_gpio_info;
+
 struct tegra_gpio_bank {
 	int bank;
 	int irq;
 	spinlock_t lvl_lock[4];
+	spinlock_t dbc_lock[4];	/* Lock for updating debounce count register */
 #ifdef CONFIG_PM_SLEEP
 	u32 cnf[4];
 	u32 out[4];
@@ -72,25 +78,39 @@
 	u32 int_enb[4];
 	u32 int_lvl[4];
 	u32 wake_enb[4];
+	u32 dbc_enb[4];
 #endif
+	u32 dbc_cnt[4];
+	struct tegra_gpio_info *tgi;
 };
 
-static struct device *dev;
-static struct irq_domain *irq_domain;
-static void __iomem *regs;
-static u32 tegra_gpio_bank_count;
-static u32 tegra_gpio_bank_stride;
-static u32 tegra_gpio_upper_offset;
-static struct tegra_gpio_bank *tegra_gpio_banks;
+struct tegra_gpio_soc_config {
+	bool debounce_supported;
+	u32 bank_stride;
+	u32 upper_offset;
+};
 
-static inline void tegra_gpio_writel(u32 val, u32 reg)
+struct tegra_gpio_info {
+	struct device				*dev;
+	void __iomem				*regs;
+	struct irq_domain			*irq_domain;
+	struct tegra_gpio_bank			*bank_info;
+	const struct tegra_gpio_soc_config	*soc;
+	struct gpio_chip			gc;
+	struct irq_chip				ic;
+	struct lock_class_key			lock_class;
+	u32					bank_count;
+};
+
+static inline void tegra_gpio_writel(struct tegra_gpio_info *tgi,
+				     u32 val, u32 reg)
 {
-	__raw_writel(val, regs + reg);
+	__raw_writel(val, tgi->regs + reg);
 }
 
-static inline u32 tegra_gpio_readl(u32 reg)
+static inline u32 tegra_gpio_readl(struct tegra_gpio_info *tgi, u32 reg)
 {
-	return __raw_readl(regs + reg);
+	return __raw_readl(tgi->regs + reg);
 }
 
 static int tegra_gpio_compose(int bank, int port, int bit)
@@ -98,24 +118,25 @@
 	return (bank << 5) | ((port & 0x3) << 3) | (bit & 0x7);
 }
 
-static void tegra_gpio_mask_write(u32 reg, int gpio, int value)
+static void tegra_gpio_mask_write(struct tegra_gpio_info *tgi, u32 reg,
+				  int gpio, int value)
 {
 	u32 val;
 
 	val = 0x100 << GPIO_BIT(gpio);
 	if (value)
 		val |= 1 << GPIO_BIT(gpio);
-	tegra_gpio_writel(val, reg);
+	tegra_gpio_writel(tgi, val, reg);
 }
 
-static void tegra_gpio_enable(int gpio)
+static void tegra_gpio_enable(struct tegra_gpio_info *tgi, int gpio)
 {
-	tegra_gpio_mask_write(GPIO_MSK_CNF(gpio), gpio, 1);
+	tegra_gpio_mask_write(tgi, GPIO_MSK_CNF(tgi, gpio), gpio, 1);
 }
 
-static void tegra_gpio_disable(int gpio)
+static void tegra_gpio_disable(struct tegra_gpio_info *tgi, int gpio)
 {
-	tegra_gpio_mask_write(GPIO_MSK_CNF(gpio), gpio, 0);
+	tegra_gpio_mask_write(tgi, GPIO_MSK_CNF(tgi, gpio), gpio, 0);
 }
 
 static int tegra_gpio_request(struct gpio_chip *chip, unsigned offset)
@@ -125,83 +146,138 @@
 
 static void tegra_gpio_free(struct gpio_chip *chip, unsigned offset)
 {
+	struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
+
 	pinctrl_free_gpio(offset);
-	tegra_gpio_disable(offset);
+	tegra_gpio_disable(tgi, offset);
 }
 
 static void tegra_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 {
-	tegra_gpio_mask_write(GPIO_MSK_OUT(offset), offset, value);
+	struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
+
+	tegra_gpio_mask_write(tgi, GPIO_MSK_OUT(tgi, offset), offset, value);
 }
 
 static int tegra_gpio_get(struct gpio_chip *chip, unsigned offset)
 {
-	/* If gpio is in output mode then read from the out value */
-	if ((tegra_gpio_readl(GPIO_OE(offset)) >> GPIO_BIT(offset)) & 1)
-		return (tegra_gpio_readl(GPIO_OUT(offset)) >>
-				GPIO_BIT(offset)) & 0x1;
+	struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
+	int bval = BIT(GPIO_BIT(offset));
 
-	return (tegra_gpio_readl(GPIO_IN(offset)) >> GPIO_BIT(offset)) & 0x1;
+	/* If gpio is in output mode then read from the out value */
+	if (tegra_gpio_readl(tgi, GPIO_OE(tgi, offset)) & bval)
+		return !!(tegra_gpio_readl(tgi, GPIO_OUT(tgi, offset)) & bval);
+
+	return !!(tegra_gpio_readl(tgi, GPIO_IN(tgi, offset)) & bval);
 }
 
 static int tegra_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 {
-	tegra_gpio_mask_write(GPIO_MSK_OE(offset), offset, 0);
-	tegra_gpio_enable(offset);
+	struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
+
+	tegra_gpio_mask_write(tgi, GPIO_MSK_OE(tgi, offset), offset, 0);
+	tegra_gpio_enable(tgi, offset);
 	return 0;
 }
 
 static int tegra_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
 					int value)
 {
+	struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
+
 	tegra_gpio_set(chip, offset, value);
-	tegra_gpio_mask_write(GPIO_MSK_OE(offset), offset, 1);
-	tegra_gpio_enable(offset);
+	tegra_gpio_mask_write(tgi, GPIO_MSK_OE(tgi, offset), offset, 1);
+	tegra_gpio_enable(tgi, offset);
+	return 0;
+}
+
+static int tegra_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+	struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
+	u32 pin_mask = BIT(GPIO_BIT(offset));
+	u32 cnf, oe;
+
+	cnf = tegra_gpio_readl(tgi, GPIO_CNF(tgi, offset));
+	if (!(cnf & pin_mask))
+		return -EINVAL;
+
+	oe = tegra_gpio_readl(tgi, GPIO_OE(tgi, offset));
+
+	return (oe & pin_mask) ? GPIOF_DIR_OUT : GPIOF_DIR_IN;
+}
+
+static int tegra_gpio_set_debounce(struct gpio_chip *chip, unsigned int offset,
+				   unsigned int debounce)
+{
+	struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
+	struct tegra_gpio_bank *bank = &tgi->bank_info[GPIO_BANK(offset)];
+	unsigned int debounce_ms = DIV_ROUND_UP(debounce, 1000);
+	unsigned long flags;
+	int port;
+
+	if (!debounce_ms) {
+		tegra_gpio_mask_write(tgi, GPIO_MSK_DBC_EN(tgi, offset),
+				      offset, 0);
+		return 0;
+	}
+
+	debounce_ms = min(debounce_ms, 255U);
+	port = GPIO_PORT(offset);
+
+	/* There is only one debounce count register per port and hence
+	 * set the maximum of current and requested debounce time.
+	 */
+	spin_lock_irqsave(&bank->dbc_lock[port], flags);
+	if (bank->dbc_cnt[port] < debounce_ms) {
+		tegra_gpio_writel(tgi, debounce_ms, GPIO_DBC_CNT(tgi, offset));
+		bank->dbc_cnt[port] = debounce_ms;
+	}
+	spin_unlock_irqrestore(&bank->dbc_lock[port], flags);
+
+	tegra_gpio_mask_write(tgi, GPIO_MSK_DBC_EN(tgi, offset), offset, 1);
+
 	return 0;
 }
 
 static int tegra_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
 {
-	return irq_find_mapping(irq_domain, offset);
-}
+	struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
 
-static struct gpio_chip tegra_gpio_chip = {
-	.label			= "tegra-gpio",
-	.request		= tegra_gpio_request,
-	.free			= tegra_gpio_free,
-	.direction_input	= tegra_gpio_direction_input,
-	.get			= tegra_gpio_get,
-	.direction_output	= tegra_gpio_direction_output,
-	.set			= tegra_gpio_set,
-	.to_irq			= tegra_gpio_to_irq,
-	.base			= 0,
-};
+	return irq_find_mapping(tgi->irq_domain, offset);
+}
 
 static void tegra_gpio_irq_ack(struct irq_data *d)
 {
+	struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+	struct tegra_gpio_info *tgi = bank->tgi;
 	int gpio = d->hwirq;
 
-	tegra_gpio_writel(1 << GPIO_BIT(gpio), GPIO_INT_CLR(gpio));
+	tegra_gpio_writel(tgi, 1 << GPIO_BIT(gpio), GPIO_INT_CLR(tgi, gpio));
 }
 
 static void tegra_gpio_irq_mask(struct irq_data *d)
 {
+	struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+	struct tegra_gpio_info *tgi = bank->tgi;
 	int gpio = d->hwirq;
 
-	tegra_gpio_mask_write(GPIO_MSK_INT_ENB(gpio), gpio, 0);
+	tegra_gpio_mask_write(tgi, GPIO_MSK_INT_ENB(tgi, gpio), gpio, 0);
 }
 
 static void tegra_gpio_irq_unmask(struct irq_data *d)
 {
+	struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+	struct tegra_gpio_info *tgi = bank->tgi;
 	int gpio = d->hwirq;
 
-	tegra_gpio_mask_write(GPIO_MSK_INT_ENB(gpio), gpio, 1);
+	tegra_gpio_mask_write(tgi, GPIO_MSK_INT_ENB(tgi, gpio), gpio, 1);
 }
 
 static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 {
 	int gpio = d->hwirq;
 	struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+	struct tegra_gpio_info *tgi = bank->tgi;
 	int port = GPIO_PORT(gpio);
 	int lvl_type;
 	int val;
@@ -233,23 +309,24 @@
 		return -EINVAL;
 	}
 
-	ret = gpiochip_lock_as_irq(&tegra_gpio_chip, gpio);
+	ret = gpiochip_lock_as_irq(&tgi->gc, gpio);
 	if (ret) {
-		dev_err(dev, "unable to lock Tegra GPIO %d as IRQ\n", gpio);
+		dev_err(tgi->dev,
+			"unable to lock Tegra GPIO %d as IRQ\n", gpio);
 		return ret;
 	}
 
 	spin_lock_irqsave(&bank->lvl_lock[port], flags);
 
-	val = tegra_gpio_readl(GPIO_INT_LVL(gpio));
+	val = tegra_gpio_readl(tgi, GPIO_INT_LVL(tgi, gpio));
 	val &= ~(GPIO_INT_LVL_MASK << GPIO_BIT(gpio));
 	val |= lvl_type << GPIO_BIT(gpio);
-	tegra_gpio_writel(val, GPIO_INT_LVL(gpio));
+	tegra_gpio_writel(tgi, val, GPIO_INT_LVL(tgi, gpio));
 
 	spin_unlock_irqrestore(&bank->lvl_lock[port], flags);
 
-	tegra_gpio_mask_write(GPIO_MSK_OE(gpio), gpio, 0);
-	tegra_gpio_enable(gpio);
+	tegra_gpio_mask_write(tgi, GPIO_MSK_OE(tgi, gpio), gpio, 0);
+	tegra_gpio_enable(tgi, gpio);
 
 	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
 		irq_set_handler_locked(d, handle_level_irq);
@@ -261,9 +338,11 @@
 
 static void tegra_gpio_irq_shutdown(struct irq_data *d)
 {
+	struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+	struct tegra_gpio_info *tgi = bank->tgi;
 	int gpio = d->hwirq;
 
-	gpiochip_unlock_as_irq(&tegra_gpio_chip, gpio);
+	gpiochip_unlock_as_irq(&tgi->gc, gpio);
 }
 
 static void tegra_gpio_irq_handler(struct irq_desc *desc)
@@ -271,19 +350,24 @@
 	int port;
 	int pin;
 	int unmasked = 0;
+	int gpio;
+	u32 lvl;
+	unsigned long sta;
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct tegra_gpio_bank *bank = irq_desc_get_handler_data(desc);
+	struct tegra_gpio_info *tgi = bank->tgi;
 
 	chained_irq_enter(chip, desc);
 
 	for (port = 0; port < 4; port++) {
-		int gpio = tegra_gpio_compose(bank->bank, port, 0);
-		unsigned long sta = tegra_gpio_readl(GPIO_INT_STA(gpio)) &
-			tegra_gpio_readl(GPIO_INT_ENB(gpio));
-		u32 lvl = tegra_gpio_readl(GPIO_INT_LVL(gpio));
+		gpio = tegra_gpio_compose(bank->bank, port, 0);
+		sta = tegra_gpio_readl(tgi, GPIO_INT_STA(tgi, gpio)) &
+			tegra_gpio_readl(tgi, GPIO_INT_ENB(tgi, gpio));
+		lvl = tegra_gpio_readl(tgi, GPIO_INT_LVL(tgi, gpio));
 
 		for_each_set_bit(pin, &sta, 8) {
-			tegra_gpio_writel(1 << pin, GPIO_INT_CLR(gpio));
+			tegra_gpio_writel(tgi, 1 << pin,
+					  GPIO_INT_CLR(tgi, gpio));
 
 			/* if gpio is edge triggered, clear condition
 			 * before executing the handler so that we don't
@@ -306,22 +390,37 @@
 #ifdef CONFIG_PM_SLEEP
 static int tegra_gpio_resume(struct device *dev)
 {
+	struct platform_device *pdev = to_platform_device(dev);
+	struct tegra_gpio_info *tgi = platform_get_drvdata(pdev);
 	unsigned long flags;
 	int b;
 	int p;
 
 	local_irq_save(flags);
 
-	for (b = 0; b < tegra_gpio_bank_count; b++) {
-		struct tegra_gpio_bank *bank = &tegra_gpio_banks[b];
+	for (b = 0; b < tgi->bank_count; b++) {
+		struct tegra_gpio_bank *bank = &tgi->bank_info[b];
 
 		for (p = 0; p < ARRAY_SIZE(bank->oe); p++) {
 			unsigned int gpio = (b<<5) | (p<<3);
-			tegra_gpio_writel(bank->cnf[p], GPIO_CNF(gpio));
-			tegra_gpio_writel(bank->out[p], GPIO_OUT(gpio));
-			tegra_gpio_writel(bank->oe[p], GPIO_OE(gpio));
-			tegra_gpio_writel(bank->int_lvl[p], GPIO_INT_LVL(gpio));
-			tegra_gpio_writel(bank->int_enb[p], GPIO_INT_ENB(gpio));
+			tegra_gpio_writel(tgi, bank->cnf[p],
+					  GPIO_CNF(tgi, gpio));
+
+			if (tgi->soc->debounce_supported) {
+				tegra_gpio_writel(tgi, bank->dbc_cnt[p],
+						  GPIO_DBC_CNT(tgi, gpio));
+				tegra_gpio_writel(tgi, bank->dbc_enb[p],
+						  GPIO_MSK_DBC_EN(tgi, gpio));
+			}
+
+			tegra_gpio_writel(tgi, bank->out[p],
+					  GPIO_OUT(tgi, gpio));
+			tegra_gpio_writel(tgi, bank->oe[p],
+					  GPIO_OE(tgi, gpio));
+			tegra_gpio_writel(tgi, bank->int_lvl[p],
+					  GPIO_INT_LVL(tgi, gpio));
+			tegra_gpio_writel(tgi, bank->int_enb[p],
+					  GPIO_INT_ENB(tgi, gpio));
 		}
 	}
 
@@ -331,25 +430,39 @@
 
 static int tegra_gpio_suspend(struct device *dev)
 {
+	struct platform_device *pdev = to_platform_device(dev);
+	struct tegra_gpio_info *tgi = platform_get_drvdata(pdev);
 	unsigned long flags;
 	int b;
 	int p;
 
 	local_irq_save(flags);
-	for (b = 0; b < tegra_gpio_bank_count; b++) {
-		struct tegra_gpio_bank *bank = &tegra_gpio_banks[b];
+	for (b = 0; b < tgi->bank_count; b++) {
+		struct tegra_gpio_bank *bank = &tgi->bank_info[b];
 
 		for (p = 0; p < ARRAY_SIZE(bank->oe); p++) {
 			unsigned int gpio = (b<<5) | (p<<3);
-			bank->cnf[p] = tegra_gpio_readl(GPIO_CNF(gpio));
-			bank->out[p] = tegra_gpio_readl(GPIO_OUT(gpio));
-			bank->oe[p] = tegra_gpio_readl(GPIO_OE(gpio));
-			bank->int_enb[p] = tegra_gpio_readl(GPIO_INT_ENB(gpio));
-			bank->int_lvl[p] = tegra_gpio_readl(GPIO_INT_LVL(gpio));
+			bank->cnf[p] = tegra_gpio_readl(tgi,
+							GPIO_CNF(tgi, gpio));
+			bank->out[p] = tegra_gpio_readl(tgi,
+							GPIO_OUT(tgi, gpio));
+			bank->oe[p] = tegra_gpio_readl(tgi,
+						       GPIO_OE(tgi, gpio));
+			if (tgi->soc->debounce_supported) {
+				bank->dbc_enb[p] = tegra_gpio_readl(tgi,
+						GPIO_MSK_DBC_EN(tgi, gpio));
+				bank->dbc_enb[p] = (bank->dbc_enb[p] << 8) |
+							bank->dbc_enb[p];
+			}
+
+			bank->int_enb[p] = tegra_gpio_readl(tgi,
+						GPIO_INT_ENB(tgi, gpio));
+			bank->int_lvl[p] = tegra_gpio_readl(tgi,
+						GPIO_INT_LVL(tgi, gpio));
 
 			/* Enable gpio irq for wake up source */
-			tegra_gpio_writel(bank->wake_enb[p],
-					  GPIO_INT_ENB(gpio));
+			tegra_gpio_writel(tgi, bank->wake_enb[p],
+					  GPIO_INT_ENB(tgi, gpio));
 		}
 	}
 	local_irq_restore(flags);
@@ -382,22 +495,23 @@
 
 static int dbg_gpio_show(struct seq_file *s, void *unused)
 {
+	struct tegra_gpio_info *tgi = s->private;
 	int i;
 	int j;
 
-	for (i = 0; i < tegra_gpio_bank_count; i++) {
+	for (i = 0; i < tgi->bank_count; i++) {
 		for (j = 0; j < 4; j++) {
 			int gpio = tegra_gpio_compose(i, j, 0);
 			seq_printf(s,
 				"%d:%d %02x %02x %02x %02x %02x %02x %06x\n",
 				i, j,
-				tegra_gpio_readl(GPIO_CNF(gpio)),
-				tegra_gpio_readl(GPIO_OE(gpio)),
-				tegra_gpio_readl(GPIO_OUT(gpio)),
-				tegra_gpio_readl(GPIO_IN(gpio)),
-				tegra_gpio_readl(GPIO_INT_STA(gpio)),
-				tegra_gpio_readl(GPIO_INT_ENB(gpio)),
-				tegra_gpio_readl(GPIO_INT_LVL(gpio)));
+				tegra_gpio_readl(tgi, GPIO_CNF(tgi, gpio)),
+				tegra_gpio_readl(tgi, GPIO_OE(tgi, gpio)),
+				tegra_gpio_readl(tgi, GPIO_OUT(tgi, gpio)),
+				tegra_gpio_readl(tgi, GPIO_IN(tgi, gpio)),
+				tegra_gpio_readl(tgi, GPIO_INT_STA(tgi, gpio)),
+				tegra_gpio_readl(tgi, GPIO_INT_ENB(tgi, gpio)),
+				tegra_gpio_readl(tgi, GPIO_INT_LVL(tgi, gpio)));
 		}
 	}
 	return 0;
@@ -405,7 +519,7 @@
 
 static int dbg_gpio_open(struct inode *inode, struct file *file)
 {
-	return single_open(file, dbg_gpio_show, &inode->i_private);
+	return single_open(file, dbg_gpio_show, inode->i_private);
 }
 
 static const struct file_operations debug_fops = {
@@ -415,66 +529,28 @@
 	.release	= single_release,
 };
 
-static void tegra_gpio_debuginit(void)
+static void tegra_gpio_debuginit(struct tegra_gpio_info *tgi)
 {
 	(void) debugfs_create_file("tegra_gpio", S_IRUGO,
-					NULL, NULL, &debug_fops);
+					NULL, tgi, &debug_fops);
 }
 
 #else
 
-static inline void tegra_gpio_debuginit(void)
+static inline void tegra_gpio_debuginit(struct tegra_gpio_info *tgi)
 {
 }
 
 #endif
 
-static struct irq_chip tegra_gpio_irq_chip = {
-	.name		= "GPIO",
-	.irq_ack	= tegra_gpio_irq_ack,
-	.irq_mask	= tegra_gpio_irq_mask,
-	.irq_unmask	= tegra_gpio_irq_unmask,
-	.irq_set_type	= tegra_gpio_irq_set_type,
-	.irq_shutdown	= tegra_gpio_irq_shutdown,
-#ifdef CONFIG_PM_SLEEP
-	.irq_set_wake	= tegra_gpio_irq_set_wake,
-#endif
-};
-
 static const struct dev_pm_ops tegra_gpio_pm_ops = {
 	SET_SYSTEM_SLEEP_PM_OPS(tegra_gpio_suspend, tegra_gpio_resume)
 };
 
-struct tegra_gpio_soc_config {
-	u32 bank_stride;
-	u32 upper_offset;
-};
-
-static struct tegra_gpio_soc_config tegra20_gpio_config = {
-	.bank_stride = 0x80,
-	.upper_offset = 0x800,
-};
-
-static struct tegra_gpio_soc_config tegra30_gpio_config = {
-	.bank_stride = 0x100,
-	.upper_offset = 0x80,
-};
-
-static const struct of_device_id tegra_gpio_of_match[] = {
-	{ .compatible = "nvidia,tegra30-gpio", .data = &tegra30_gpio_config },
-	{ .compatible = "nvidia,tegra20-gpio", .data = &tegra20_gpio_config },
-	{ },
-};
-
-/* This lock class tells lockdep that GPIO irqs are in a different
- * category than their parents, so it won't report false recursion.
- */
-static struct lock_class_key gpio_lock_class;
-
 static int tegra_gpio_probe(struct platform_device *pdev)
 {
-	const struct of_device_id *match;
-	struct tegra_gpio_soc_config *config;
+	const struct tegra_gpio_soc_config *config;
+	struct tegra_gpio_info *tgi;
 	struct resource *res;
 	struct tegra_gpio_bank *bank;
 	int ret;
@@ -482,102 +558,153 @@
 	int i;
 	int j;
 
-	dev = &pdev->dev;
-
-	match = of_match_device(tegra_gpio_of_match, &pdev->dev);
-	if (!match) {
+	config = of_device_get_match_data(&pdev->dev);
+	if (!config) {
 		dev_err(&pdev->dev, "Error: No device match found\n");
 		return -ENODEV;
 	}
-	config = (struct tegra_gpio_soc_config *)match->data;
 
-	tegra_gpio_bank_stride = config->bank_stride;
-	tegra_gpio_upper_offset = config->upper_offset;
+	tgi = devm_kzalloc(&pdev->dev, sizeof(*tgi), GFP_KERNEL);
+	if (!tgi)
+		return -ENODEV;
+
+	tgi->soc = config;
+	tgi->dev = &pdev->dev;
 
 	for (;;) {
-		res = platform_get_resource(pdev, IORESOURCE_IRQ, tegra_gpio_bank_count);
+		res = platform_get_resource(pdev, IORESOURCE_IRQ,
+					    tgi->bank_count);
 		if (!res)
 			break;
-		tegra_gpio_bank_count++;
+		tgi->bank_count++;
 	}
-	if (!tegra_gpio_bank_count) {
+	if (!tgi->bank_count) {
 		dev_err(&pdev->dev, "Missing IRQ resource\n");
 		return -ENODEV;
 	}
 
-	tegra_gpio_chip.ngpio = tegra_gpio_bank_count * 32;
+	tgi->gc.label			= "tegra-gpio";
+	tgi->gc.request			= tegra_gpio_request;
+	tgi->gc.free			= tegra_gpio_free;
+	tgi->gc.direction_input		= tegra_gpio_direction_input;
+	tgi->gc.get			= tegra_gpio_get;
+	tgi->gc.direction_output	= tegra_gpio_direction_output;
+	tgi->gc.set			= tegra_gpio_set;
+	tgi->gc.get_direction		= tegra_gpio_get_direction;
+	tgi->gc.to_irq			= tegra_gpio_to_irq;
+	tgi->gc.base			= 0;
+	tgi->gc.ngpio			= tgi->bank_count * 32;
+	tgi->gc.parent			= &pdev->dev;
+	tgi->gc.of_node			= pdev->dev.of_node;
 
-	tegra_gpio_banks = devm_kzalloc(&pdev->dev,
-			tegra_gpio_bank_count * sizeof(*tegra_gpio_banks),
-			GFP_KERNEL);
-	if (!tegra_gpio_banks)
+	tgi->ic.name			= "GPIO";
+	tgi->ic.irq_ack			= tegra_gpio_irq_ack;
+	tgi->ic.irq_mask		= tegra_gpio_irq_mask;
+	tgi->ic.irq_unmask		= tegra_gpio_irq_unmask;
+	tgi->ic.irq_set_type		= tegra_gpio_irq_set_type;
+	tgi->ic.irq_shutdown		= tegra_gpio_irq_shutdown;
+#ifdef CONFIG_PM_SLEEP
+	tgi->ic.irq_set_wake		= tegra_gpio_irq_set_wake;
+#endif
+
+	platform_set_drvdata(pdev, tgi);
+
+	if (config->debounce_supported)
+		tgi->gc.set_debounce = tegra_gpio_set_debounce;
+
+	tgi->bank_info = devm_kzalloc(&pdev->dev, tgi->bank_count *
+				      sizeof(*tgi->bank_info), GFP_KERNEL);
+	if (!tgi->bank_info)
 		return -ENODEV;
 
-	irq_domain = irq_domain_add_linear(pdev->dev.of_node,
-					   tegra_gpio_chip.ngpio,
-					   &irq_domain_simple_ops, NULL);
-	if (!irq_domain)
+	tgi->irq_domain = irq_domain_add_linear(pdev->dev.of_node,
+						tgi->gc.ngpio,
+						&irq_domain_simple_ops, NULL);
+	if (!tgi->irq_domain)
 		return -ENODEV;
 
-	for (i = 0; i < tegra_gpio_bank_count; i++) {
+	for (i = 0; i < tgi->bank_count; i++) {
 		res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
 		if (!res) {
 			dev_err(&pdev->dev, "Missing IRQ resource\n");
 			return -ENODEV;
 		}
 
-		bank = &tegra_gpio_banks[i];
+		bank = &tgi->bank_info[i];
 		bank->bank = i;
 		bank->irq = res->start;
+		bank->tgi = tgi;
 	}
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	regs = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(regs))
-		return PTR_ERR(regs);
+	tgi->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(tgi->regs))
+		return PTR_ERR(tgi->regs);
 
-	for (i = 0; i < tegra_gpio_bank_count; i++) {
+	for (i = 0; i < tgi->bank_count; i++) {
 		for (j = 0; j < 4; j++) {
 			int gpio = tegra_gpio_compose(i, j, 0);
-			tegra_gpio_writel(0x00, GPIO_INT_ENB(gpio));
+			tegra_gpio_writel(tgi, 0x00, GPIO_INT_ENB(tgi, gpio));
 		}
 	}
 
-	tegra_gpio_chip.of_node = pdev->dev.of_node;
-
-	ret = devm_gpiochip_add_data(&pdev->dev, &tegra_gpio_chip, NULL);
+	ret = devm_gpiochip_add_data(&pdev->dev, &tgi->gc, tgi);
 	if (ret < 0) {
-		irq_domain_remove(irq_domain);
+		irq_domain_remove(tgi->irq_domain);
 		return ret;
 	}
 
-	for (gpio = 0; gpio < tegra_gpio_chip.ngpio; gpio++) {
-		int irq = irq_create_mapping(irq_domain, gpio);
+	for (gpio = 0; gpio < tgi->gc.ngpio; gpio++) {
+		int irq = irq_create_mapping(tgi->irq_domain, gpio);
 		/* No validity check; all Tegra GPIOs are valid IRQs */
 
-		bank = &tegra_gpio_banks[GPIO_BANK(gpio)];
+		bank = &tgi->bank_info[GPIO_BANK(gpio)];
 
-		irq_set_lockdep_class(irq, &gpio_lock_class);
+		irq_set_lockdep_class(irq, &tgi->lock_class);
 		irq_set_chip_data(irq, bank);
-		irq_set_chip_and_handler(irq, &tegra_gpio_irq_chip,
-					 handle_simple_irq);
+		irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq);
 	}
 
-	for (i = 0; i < tegra_gpio_bank_count; i++) {
-		bank = &tegra_gpio_banks[i];
+	for (i = 0; i < tgi->bank_count; i++) {
+		bank = &tgi->bank_info[i];
 
 		irq_set_chained_handler_and_data(bank->irq,
 						 tegra_gpio_irq_handler, bank);
 
-		for (j = 0; j < 4; j++)
+		for (j = 0; j < 4; j++) {
 			spin_lock_init(&bank->lvl_lock[j]);
+			spin_lock_init(&bank->dbc_lock[j]);
+		}
 	}
 
-	tegra_gpio_debuginit();
+	tegra_gpio_debuginit(tgi);
 
 	return 0;
 }
 
+static const struct tegra_gpio_soc_config tegra20_gpio_config = {
+	.bank_stride = 0x80,
+	.upper_offset = 0x800,
+};
+
+static const struct tegra_gpio_soc_config tegra30_gpio_config = {
+	.bank_stride = 0x100,
+	.upper_offset = 0x80,
+};
+
+static const struct tegra_gpio_soc_config tegra210_gpio_config = {
+	.debounce_supported = true,
+	.bank_stride = 0x100,
+	.upper_offset = 0x80,
+};
+
+static const struct of_device_id tegra_gpio_of_match[] = {
+	{ .compatible = "nvidia,tegra210-gpio", .data = &tegra210_gpio_config },
+	{ .compatible = "nvidia,tegra30-gpio", .data = &tegra30_gpio_config },
+	{ .compatible = "nvidia,tegra20-gpio", .data = &tegra20_gpio_config },
+	{ },
+};
+
 static struct platform_driver tegra_gpio_driver = {
 	.driver		= {
 		.name	= "tegra-gpio",
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index 85ed608..181f86c 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -1,5 +1,6 @@
 /*
  * Timberdale FPGA GPIO driver
+ * Author: Mocean Laboratories
  * Copyright (c) 2009 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
@@ -20,7 +21,7 @@
  * Timberdale FPGA GPIO
  */
 
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/gpio.h>
 #include <linux/platform_device.h>
 #include <linux/irq.h>
@@ -290,40 +291,14 @@
 	return 0;
 }
 
-static int timbgpio_remove(struct platform_device *pdev)
-{
-	struct timbgpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
-	struct timbgpio *tgpio = platform_get_drvdata(pdev);
-	int irq = platform_get_irq(pdev, 0);
-
-	if (irq >= 0 && tgpio->irq_base > 0) {
-		int i;
-		for (i = 0; i < pdata->nr_pins; i++) {
-			irq_set_chip(tgpio->irq_base + i, NULL);
-			irq_set_chip_data(tgpio->irq_base + i, NULL);
-		}
-
-		irq_set_handler(irq, NULL);
-		irq_set_handler_data(irq, NULL);
-	}
-
-	return 0;
-}
-
 static struct platform_driver timbgpio_platform_driver = {
 	.driver = {
-		.name	= DRIVER_NAME,
+		.name			= DRIVER_NAME,
+		.suppress_bind_attrs	= true,
 	},
 	.probe		= timbgpio_probe,
-	.remove		= timbgpio_remove,
 };
 
 /*--------------------------------------------------------------------------*/
 
-module_platform_driver(timbgpio_platform_driver);
-
-MODULE_DESCRIPTION("Timberdale GPIO driver");
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Mocean Laboratories");
-MODULE_ALIAS("platform:"DRIVER_NAME);
-
+builtin_platform_driver(timbgpio_platform_driver);
diff --git a/drivers/gpio/gpio-tpic2810.c b/drivers/gpio/gpio-tpic2810.c
index 9f020aa..cace79c 100644
--- a/drivers/gpio/gpio-tpic2810.c
+++ b/drivers/gpio/gpio-tpic2810.c
@@ -57,39 +57,34 @@
 	return 0;
 }
 
-static void tpic2810_set(struct gpio_chip *chip, unsigned offset, int value)
+static void tpic2810_set_mask_bits(struct gpio_chip *chip, u8 mask, u8 bits)
 {
 	struct tpic2810 *gpio = gpiochip_get_data(chip);
+	u8 buffer;
+	int err;
 
 	mutex_lock(&gpio->lock);
 
-	if (value)
-		gpio->buffer |= BIT(offset);
-	else
-		gpio->buffer &= ~BIT(offset);
+	buffer = gpio->buffer & ~mask;
+	buffer |= (mask & bits);
 
-	i2c_smbus_write_byte_data(gpio->client, TPIC2810_WS_COMMAND,
-				  gpio->buffer);
+	err = i2c_smbus_write_byte_data(gpio->client, TPIC2810_WS_COMMAND,
+					buffer);
+	if (!err)
+		gpio->buffer = buffer;
 
 	mutex_unlock(&gpio->lock);
 }
 
+static void tpic2810_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+	tpic2810_set_mask_bits(chip, BIT(offset), value ? BIT(offset) : 0);
+}
+
 static void tpic2810_set_multiple(struct gpio_chip *chip, unsigned long *mask,
 				  unsigned long *bits)
 {
-	struct tpic2810 *gpio = gpiochip_get_data(chip);
-
-	mutex_lock(&gpio->lock);
-
-	/* clear bits under mask */
-	gpio->buffer &= ~(*mask);
-	/* set bits under mask */
-	gpio->buffer |= ((*mask) & (*bits));
-
-	i2c_smbus_write_byte_data(gpio->client, TPIC2810_WS_COMMAND,
-				  gpio->buffer);
-
-	mutex_unlock(&gpio->lock);
+	tpic2810_set_mask_bits(chip, *mask, *bits);
 }
 
 static struct gpio_chip template_chip = {
diff --git a/drivers/gpio/gpio-tps65218.c b/drivers/gpio/gpio-tps65218.c
index 313c0e4..0eaeac8 100644
--- a/drivers/gpio/gpio-tps65218.c
+++ b/drivers/gpio/gpio-tps65218.c
@@ -101,16 +101,6 @@
 
 		break;
 	case 1:
-		/* GP02 is push-pull by default, can be set as open drain. */
-		if (gpiochip_line_is_open_drain(gc, offset)) {
-			ret = tps65218_clear_bits(tps65218,
-						  TPS65218_REG_CONFIG1,
-						  TPS65218_CONFIG1_GPO2_BUF,
-						  TPS65218_PROTECT_L1);
-			if (ret)
-				return ret;
-		}
-
 		/* Setup GPO2 */
 		ret = tps65218_clear_bits(tps65218, TPS65218_REG_CONFIG1,
 					  TPS65218_CONFIG1_IO1_SEL,
@@ -148,6 +138,40 @@
 	return 0;
 }
 
+static int tps65218_gpio_set_single_ended(struct gpio_chip *gc,
+					  unsigned offset,
+					  enum single_ended_mode mode)
+{
+	struct tps65218_gpio *tps65218_gpio = gpiochip_get_data(gc);
+	struct tps65218 *tps65218 = tps65218_gpio->tps65218;
+
+	switch (offset) {
+	case 0:
+	case 2:
+		/* GPO1 is hardwired to be open drain */
+		if (mode == LINE_MODE_OPEN_DRAIN)
+			return 0;
+		return -ENOTSUPP;
+	case 1:
+		/* GPO2 is push-pull by default, can be set as open drain. */
+		if (mode == LINE_MODE_OPEN_DRAIN)
+			return tps65218_clear_bits(tps65218,
+						   TPS65218_REG_CONFIG1,
+						   TPS65218_CONFIG1_GPO2_BUF,
+						   TPS65218_PROTECT_L1);
+		if (mode == LINE_MODE_PUSH_PULL)
+			return tps65218_set_bits(tps65218,
+						 TPS65218_REG_CONFIG1,
+						 TPS65218_CONFIG1_GPO2_BUF,
+						 TPS65218_CONFIG1_GPO2_BUF,
+						 TPS65218_PROTECT_L1);
+		return -ENOTSUPP;
+	default:
+		break;
+	}
+	return -ENOTSUPP;
+}
+
 static struct gpio_chip template_chip = {
 	.label			= "gpio-tps65218",
 	.owner			= THIS_MODULE,
@@ -156,6 +180,7 @@
 	.direction_input	= tps65218_gpio_input,
 	.get			= tps65218_gpio_get,
 	.set			= tps65218_gpio_set,
+	.set_single_ended	= tps65218_gpio_set_single_ended,
 	.can_sleep		= true,
 	.ngpio			= 3,
 	.base			= -1,
diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c
index c88bdc8..6b15e68 100644
--- a/drivers/gpio/gpio-tps6586x.c
+++ b/drivers/gpio/gpio-tps6586x.c
@@ -24,7 +24,7 @@
 #include <linux/errno.h>
 #include <linux/gpio.h>
 #include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/mfd/tps6586x.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
@@ -140,14 +140,3 @@
 	return platform_driver_register(&tps6586x_gpio_driver);
 }
 subsys_initcall(tps6586x_gpio_init);
-
-static void __exit tps6586x_gpio_exit(void)
-{
-	platform_driver_unregister(&tps6586x_gpio_driver);
-}
-module_exit(tps6586x_gpio_exit);
-
-MODULE_ALIAS("platform:tps6586x-gpio");
-MODULE_DESCRIPTION("GPIO interface for TPS6586X PMIC");
-MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index cdbd7c7..0ae6a5a 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -4,7 +4,7 @@
  * Copyright 2010 Texas Instruments Inc.
  *
  * Author: Graeme Gregory <gg@slimlogic.co.uk>
- * Author: Jorge Eduardo Candelaria jedu@slimlogic.co.uk>
+ * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
  *
  *  This program is free software; you can redistribute it and/or modify it
  *  under  the terms of the GNU General  Public License as published by the
@@ -14,7 +14,7 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/gpio.h>
 #include <linux/i2c.h>
@@ -193,15 +193,3 @@
 	return platform_driver_register(&tps65910_gpio_driver);
 }
 subsys_initcall(tps65910_gpio_init);
-
-static void __exit tps65910_gpio_exit(void)
-{
-	platform_driver_unregister(&tps65910_gpio_driver);
-}
-module_exit(tps65910_gpio_exit);
-
-MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
-MODULE_AUTHOR("Jorge Eduardo Candelaria jedu@slimlogic.co.uk>");
-MODULE_DESCRIPTION("GPIO interface for TPS65910/TPS6511 PMICs");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:tps65910-gpio");
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index 8cdb9f7..4e45012 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -186,6 +186,28 @@
 	return 0;
 }
 
+static int vx855gpio_set_single_ended(struct gpio_chip *gpio,
+				      unsigned int nr,
+				      enum single_ended_mode mode)
+{
+	/* The GPI cannot be single-ended */
+	if (nr < NR_VX855_GPI)
+		return -EINVAL;
+
+	/* The GPO's are push-pull */
+	if (nr < NR_VX855_GPInO) {
+		if (mode != LINE_MODE_PUSH_PULL)
+			return -ENOTSUPP;
+		return 0;
+	}
+
+	/* The GPIO's are open drain */
+	if (mode != LINE_MODE_OPEN_DRAIN)
+		return -ENOTSUPP;
+
+	return 0;
+}
+
 static const char *vx855gpio_names[NR_VX855_GP] = {
 	"VX855_GPI0", "VX855_GPI1", "VX855_GPI2", "VX855_GPI3", "VX855_GPI4",
 	"VX855_GPI5", "VX855_GPI6", "VX855_GPI7", "VX855_GPI8", "VX855_GPI9",
@@ -209,6 +231,7 @@
 	c->direction_output = vx855gpio_direction_output;
 	c->get = vx855gpio_get;
 	c->set = vx855gpio_set;
+	c->set_single_ended = vx855gpio_set_single_ended;
 	c->dbg_show = NULL;
 	c->base = 0;
 	c->ngpio = NR_VX855_GP;
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index 18cb0f5..41ec783 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -132,6 +132,28 @@
 	return wm831x_set_bits(wm831x, reg, WM831X_GPN_FN_MASK, fn);
 }
 
+static int wm831x_set_single_ended(struct gpio_chip *chip,
+				   unsigned int offset,
+				   enum single_ended_mode mode)
+{
+	struct wm831x_gpio *wm831x_gpio = gpiochip_get_data(chip);
+	struct wm831x *wm831x = wm831x_gpio->wm831x;
+	int reg = WM831X_GPIO1_CONTROL + offset;
+
+	switch (mode) {
+	case LINE_MODE_OPEN_DRAIN:
+		return wm831x_set_bits(wm831x, reg,
+				       WM831X_GPN_OD_MASK, WM831X_GPN_OD);
+	case LINE_MODE_PUSH_PULL:
+		return wm831x_set_bits(wm831x, reg,
+				       WM831X_GPN_OD_MASK, 0);
+	default:
+		break;
+	}
+
+	return -ENOTSUPP;
+}
+
 #ifdef CONFIG_DEBUG_FS
 static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
 {
@@ -216,7 +238,7 @@
 			   pull,
 			   powerdomain,
 			   reg & WM831X_GPN_POL ? "" : " inverted",
-			   reg & WM831X_GPN_OD ? "open-drain" : "CMOS",
+			   reg & WM831X_GPN_OD ? "open-drain" : "push-pull",
 			   tristated ? " tristated" : "",
 			   reg);
 	}
@@ -234,6 +256,7 @@
 	.set			= wm831x_gpio_set,
 	.to_irq			= wm831x_gpio_to_irq,
 	.set_debounce		= wm831x_gpio_set_debounce,
+	.set_single_ended	= wm831x_set_single_ended,
 	.dbg_show		= wm831x_gpio_dbg_show,
 	.can_sleep		= true,
 };
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index b089df9..744af38 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -103,6 +103,28 @@
 	wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset, WM8994_GPN_LVL, value);
 }
 
+static int wm8994_gpio_set_single_ended(struct gpio_chip *chip,
+					unsigned int offset,
+					enum single_ended_mode mode)
+{
+	struct wm8994_gpio *wm8994_gpio = gpiochip_get_data(chip);
+	struct wm8994 *wm8994 = wm8994_gpio->wm8994;
+
+	switch (mode) {
+	case LINE_MODE_OPEN_DRAIN:
+		return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset,
+				       WM8994_GPN_OP_CFG_MASK,
+				       WM8994_GPN_OP_CFG);
+	case LINE_MODE_PUSH_PULL:
+		return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset,
+				       WM8994_GPN_OP_CFG_MASK, 0);
+	default:
+		break;
+	}
+
+	return -ENOTSUPP;
+}
+
 static int wm8994_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
 {
 	struct wm8994_gpio *wm8994_gpio = gpiochip_get_data(chip);
@@ -217,7 +239,7 @@
 		if (reg & WM8994_GPN_OP_CFG)
 			seq_printf(s, "open drain ");
 		else
-			seq_printf(s, "CMOS ");
+			seq_printf(s, "push-pull ");
 
 		seq_printf(s, "%s (%x)\n",
 			   wm8994_gpio_fn(reg & WM8994_GPN_FN_MASK), reg);
@@ -235,6 +257,7 @@
 	.get			= wm8994_gpio_get,
 	.direction_output	= wm8994_gpio_direction_out,
 	.set			= wm8994_gpio_set,
+	.set_single_ended	= wm8994_gpio_set_single_ended,
 	.to_irq			= wm8994_gpio_to_irq,
 	.dbg_show		= wm8994_gpio_dbg_show,
 	.can_sleep		= true,
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index 31cbcb8..0332586 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -216,23 +216,10 @@
 			&parent_fwspec);
 }
 
-static void xgene_gpio_sb_domain_free(struct irq_domain *domain,
-		unsigned int virq,
-		unsigned int nr_irqs)
-{
-	struct irq_data *d;
-	unsigned int i;
-
-	for (i = 0; i < nr_irqs; i++) {
-		d = irq_domain_get_irq_data(domain, virq + i);
-		irq_domain_reset_irq_data(d);
-	}
-}
-
 static const struct irq_domain_ops xgene_gpio_sb_domain_ops = {
 	.translate      = xgene_gpio_sb_domain_translate,
 	.alloc          = xgene_gpio_sb_domain_alloc,
-	.free           = xgene_gpio_sb_domain_free,
+	.free           = irq_domain_free_irqs_common,
 	.activate	= xgene_gpio_sb_domain_activate,
 	.deactivate	= xgene_gpio_sb_domain_deactivate,
 };
diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c
index c0aa387..40a8881 100644
--- a/drivers/gpio/gpio-xgene.c
+++ b/drivers/gpio/gpio-xgene.c
@@ -17,7 +17,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <linux/module.h>
+#include <linux/acpi.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/io.h>
@@ -85,6 +85,17 @@
 	spin_unlock_irqrestore(&chip->lock, flags);
 }
 
+static int xgene_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+	struct xgene_gpio *chip = gpiochip_get_data(gc);
+	unsigned long bank_offset, bit_offset;
+
+	bank_offset = GPIO_SET_DR_OFFSET + GPIO_BANK_OFFSET(offset);
+	bit_offset = GPIO_BIT_OFFSET(offset);
+
+	return !!(ioread32(chip->base + bank_offset) & BIT(bit_offset));
+}
+
 static int xgene_gpio_dir_in(struct gpio_chip *gc, unsigned int offset)
 {
 	struct xgene_gpio *chip = gpiochip_get_data(gc);
@@ -173,6 +184,11 @@
 	}
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		err = -EINVAL;
+		goto err;
+	}
+
 	gpio->base = devm_ioremap_nocache(&pdev->dev, res->start,
 							resource_size(res));
 	if (!gpio->base) {
@@ -184,6 +200,7 @@
 
 	spin_lock_init(&gpio->lock);
 	gpio->chip.parent = &pdev->dev;
+	gpio->chip.get_direction = xgene_gpio_get_direction;
 	gpio->chip.direction_input = xgene_gpio_dir_in;
 	gpio->chip.direction_output = xgene_gpio_dir_out;
 	gpio->chip.get = xgene_gpio_get;
@@ -211,19 +228,21 @@
 	{ .compatible = "apm,xgene-gpio", },
 	{},
 };
-MODULE_DEVICE_TABLE(of, xgene_gpio_of_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_gpio_acpi_match[] = {
+	{ "APMC0D14", 0 },
+	{ },
+};
+#endif
 
 static struct platform_driver xgene_gpio_driver = {
 	.driver = {
 		.name = "xgene-gpio",
 		.of_match_table = xgene_gpio_of_match,
+		.acpi_match_table = ACPI_PTR(xgene_gpio_acpi_match),
 		.pm     = XGENE_GPIO_PM_OPS,
 	},
 	.probe = xgene_gpio_probe,
 };
-
-module_platform_driver(xgene_gpio_driver);
-
-MODULE_AUTHOR("Feng Kan <fkan@apm.com>");
-MODULE_DESCRIPTION("APM X-Gene GPIO driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(xgene_gpio_driver);
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index aa5813d..08897dc 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -85,7 +85,8 @@
 	XLP_GPIO_VARIANT_XLP316,
 	XLP_GPIO_VARIANT_XLP208,
 	XLP_GPIO_VARIANT_XLP980,
-	XLP_GPIO_VARIANT_XLP532
+	XLP_GPIO_VARIANT_XLP532,
+	GPIO_VARIANT_VULCAN
 };
 
 struct xlp_gpio_priv {
@@ -285,6 +286,10 @@
 		.compatible = "netlogic,xlp532-gpio",
 		.data	    = (void *)XLP_GPIO_VARIANT_XLP532,
 	},
+	{
+		.compatible = "brcm,vulcan-gpio",
+		.data	    = (void *)GPIO_VARIANT_VULCAN,
+	},
 	{ /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, xlp_gpio_of_ids);
@@ -347,6 +352,7 @@
 		break;
 	case XLP_GPIO_VARIANT_XLP980:
 	case XLP_GPIO_VARIANT_XLP532:
+	case GPIO_VARIANT_VULCAN:
 		priv->gpio_out_en = gpio_base + GPIO_9XX_OUTPUT_EN;
 		priv->gpio_paddrv = gpio_base + GPIO_9XX_PADDRV;
 		priv->gpio_intr_stat = gpio_base + GPIO_9XX_INT_STAT;
@@ -354,7 +360,12 @@
 		priv->gpio_intr_pol = gpio_base + GPIO_9XX_INT_POL;
 		priv->gpio_intr_en = gpio_base + GPIO_9XX_INT_EN00;
 
-		ngpio = (soc_type == XLP_GPIO_VARIANT_XLP980) ? 66 : 67;
+		if (soc_type == XLP_GPIO_VARIANT_XLP980)
+			ngpio = 66;
+		else if (soc_type == XLP_GPIO_VARIANT_XLP532)
+			ngpio = 67;
+		else
+			ngpio = 70;
 		break;
 	default:
 		dev_err(&pdev->dev, "Unknown Processor type!\n");
@@ -377,10 +388,14 @@
 	gc->get = xlp_gpio_get;
 
 	spin_lock_init(&priv->lock);
-	irq_base = irq_alloc_descs(-1, XLP_GPIO_IRQ_BASE, gc->ngpio, 0);
-	if (irq_base < 0) {
+	/* XLP has fixed IRQ range for GPIO interrupts */
+	if (soc_type == GPIO_VARIANT_VULCAN)
+		irq_base = irq_alloc_descs(-1, 0, gc->ngpio, 0);
+	else
+		irq_base = irq_alloc_descs(-1, XLP_GPIO_IRQ_BASE, gc->ngpio, 0);
+	if (IS_ERR_VALUE(irq_base)) {
 		dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n");
-		return -ENODEV;
+		return irq_base;
 	}
 
 	err = gpiochip_add_data(gc, priv);
diff --git a/drivers/gpio/gpio-zevio.c b/drivers/gpio/gpio-zevio.c
index cda6d92..e23ef7b 100644
--- a/drivers/gpio/gpio-zevio.c
+++ b/drivers/gpio/gpio-zevio.c
@@ -10,7 +10,7 @@
 
 #include <linux/spinlock.h>
 #include <linux/errno.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/bitops.h>
 #include <linux/io.h>
 #include <linux/of_device.h>
@@ -203,32 +203,17 @@
 	return 0;
 }
 
-static int zevio_gpio_remove(struct platform_device *pdev)
-{
-	struct zevio_gpio *controller = platform_get_drvdata(pdev);
-
-	of_mm_gpiochip_remove(&controller->chip);
-
-	return 0;
-}
-
 static const struct of_device_id zevio_gpio_of_match[] = {
 	{ .compatible = "lsi,zevio-gpio", },
 	{ },
 };
 
-MODULE_DEVICE_TABLE(of, zevio_gpio_of_match);
-
 static struct platform_driver zevio_gpio_driver = {
 	.driver		= {
 		.name	= "gpio-zevio",
 		.of_match_table = zevio_gpio_of_match,
+		.suppress_bind_attrs = true,
 	},
 	.probe		= zevio_gpio_probe,
-	.remove		= zevio_gpio_remove,
 };
-module_platform_driver(zevio_gpio_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Fabian Vogt <fabian@ritter-vogt.de>");
-MODULE_DESCRIPTION("LSI ZEVIO SoC GPIO driver");
+builtin_platform_driver(zevio_gpio_driver);
diff --git a/drivers/gpio/gpio-zx.c b/drivers/gpio/gpio-zx.c
index 47c79fa..93de8be 100644
--- a/drivers/gpio/gpio-zx.c
+++ b/drivers/gpio/gpio-zx.c
@@ -1,4 +1,8 @@
 /*
+ * ZTE ZX296702 GPIO driver
+ *
+ * Author: Jun Nie <jun.nie@linaro.org>
+ *
  * Copyright (C) 2015 Linaro Ltd.
  *
  * This program is free software; you can redistribute it and/or modify
@@ -10,7 +14,7 @@
 #include <linux/errno.h>
 #include <linux/gpio/driver.h>
 #include <linux/irqchip/chained_irq.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/of.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
@@ -282,7 +286,6 @@
 	},
 	{ },
 };
-MODULE_DEVICE_TABLE(of, zx_gpio_match);
 
 static struct platform_driver zx_gpio_driver = {
 	.probe		= zx_gpio_probe,
@@ -291,9 +294,4 @@
 		.of_match_table = of_match_ptr(zx_gpio_match),
 	},
 };
-
-module_platform_driver(zx_gpio_driver)
-
-MODULE_AUTHOR("Jun Nie <jun.nie@linaro.org>");
-MODULE_DESCRIPTION("ZTE ZX296702 GPIO driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(zx_gpio_driver)
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 66d3d24..75c6355 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -713,7 +713,7 @@
 	pm_runtime_enable(&pdev->dev);
 	ret = pm_runtime_get_sync(&pdev->dev);
 	if (ret < 0)
-		return ret;
+		goto err_pm_dis;
 
 	/* report a bug if gpio chip registration fails */
 	ret = gpiochip_add_data(chip, gpio);
@@ -745,6 +745,8 @@
 	gpiochip_remove(chip);
 err_pm_put:
 	pm_runtime_put(&pdev->dev);
+err_pm_dis:
+	pm_runtime_disable(&pdev->dev);
 
 	return ret;
 }
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 682070d..2dc5258 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -977,7 +977,7 @@
 		lookup = kmalloc(sizeof(*lookup), GFP_KERNEL);
 		if (lookup) {
 			lookup->adev = adev;
-			lookup->con_id = con_id;
+			lookup->con_id = kstrdup(con_id, GFP_KERNEL);
 			list_add_tail(&lookup->node, &acpi_crs_lookup_list);
 		}
 	}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 42a4bb7..d22dcc3 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -196,21 +196,68 @@
 }
 
 /**
+ * of_gpiochip_set_names() - set up the names of the lines
+ * @chip: GPIO chip whose lines should be named, if possible
+ */
+static void of_gpiochip_set_names(struct gpio_chip *gc)
+{
+	struct gpio_device *gdev = gc->gpiodev;
+	struct device_node *np = gc->of_node;
+	int i;
+	int nstrings;
+
+	nstrings = of_property_count_strings(np, "gpio-line-names");
+	if (nstrings <= 0)
+		/* Lines names not present */
+		return;
+
+	/* This is normally not what you want */
+	if (gdev->ngpio != nstrings)
+		dev_info(&gdev->dev, "gpio-line-names specifies %d line "
+			 "names but there are %d lines on the chip\n",
+			 nstrings, gdev->ngpio);
+
+	/*
+	 * Make sure to not index beyond the end of the number of descriptors
+	 * of the GPIO device.
+	 */
+	for (i = 0; i < gdev->ngpio; i++) {
+		const char *name;
+		int ret;
+
+		ret = of_property_read_string_index(np,
+						    "gpio-line-names",
+						    i,
+						    &name);
+		if (ret) {
+			if (ret != -ENODATA)
+                                dev_err(&gdev->dev,
+                                        "unable to name line %d: %d\n",
+                                        i, ret);
+			break;
+		}
+		gdev->descs[i].name = name;
+	}
+}
+
+/**
  * of_gpiochip_scan_gpios - Scan gpio-controller for gpio definitions
  * @chip:	gpio chip to act on
  *
  * This is only used by of_gpiochip_add to request/set GPIO initial
  * configuration.
+ * It retures error if it fails otherwise 0 on success.
  */
-static void of_gpiochip_scan_gpios(struct gpio_chip *chip)
+static int of_gpiochip_scan_gpios(struct gpio_chip *chip)
 {
 	struct gpio_desc *desc = NULL;
 	struct device_node *np;
 	const char *name;
 	enum gpio_lookup_flags lflags;
 	enum gpiod_flags dflags;
+	int ret;
 
-	for_each_child_of_node(chip->of_node, np) {
+	for_each_available_child_of_node(chip->of_node, np) {
 		if (!of_property_read_bool(np, "gpio-hog"))
 			continue;
 
@@ -218,9 +265,12 @@
 		if (IS_ERR(desc))
 			continue;
 
-		if (gpiod_hog(desc, name, lflags, dflags))
-			continue;
+		ret = gpiod_hog(desc, name, lflags, dflags);
+		if (ret < 0)
+			return ret;
 	}
+
+	return 0;
 }
 
 /**
@@ -440,11 +490,13 @@
 	if (status)
 		return status;
 
+	/* If the chip defines names itself, these take precedence */
+	if (!chip->names)
+		of_gpiochip_set_names(chip);
+
 	of_node_get(chip->of_node);
 
-	of_gpiochip_scan_gpios(chip);
-
-	return 0;
+	return of_gpiochip_scan_gpios(chip);
 }
 
 void of_gpiochip_remove(struct gpio_chip *chip)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 7206553..d407f904 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -68,6 +68,7 @@
 static void gpiochip_free_hogs(struct gpio_chip *chip);
 static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
 
+static bool gpiolib_initialized;
 
 static inline void desc_set_label(struct gpio_desc *d, const char *label)
 {
@@ -440,9 +441,63 @@
 	cdev_del(&gdev->chrdev);
 	list_del(&gdev->list);
 	ida_simple_remove(&gpio_ida, gdev->id);
+	kfree(gdev->label);
+	kfree(gdev->descs);
 	kfree(gdev);
 }
 
+static int gpiochip_setup_dev(struct gpio_device *gdev)
+{
+	int status;
+
+	cdev_init(&gdev->chrdev, &gpio_fileops);
+	gdev->chrdev.owner = THIS_MODULE;
+	gdev->chrdev.kobj.parent = &gdev->dev.kobj;
+	gdev->dev.devt = MKDEV(MAJOR(gpio_devt), gdev->id);
+	status = cdev_add(&gdev->chrdev, gdev->dev.devt, 1);
+	if (status < 0)
+		chip_warn(gdev->chip, "failed to add char device %d:%d\n",
+			  MAJOR(gpio_devt), gdev->id);
+	else
+		chip_dbg(gdev->chip, "added GPIO chardev (%d:%d)\n",
+			 MAJOR(gpio_devt), gdev->id);
+	status = device_add(&gdev->dev);
+	if (status)
+		goto err_remove_chardev;
+
+	status = gpiochip_sysfs_register(gdev);
+	if (status)
+		goto err_remove_device;
+
+	/* From this point, the .release() function cleans up gpio_device */
+	gdev->dev.release = gpiodevice_release;
+	get_device(&gdev->dev);
+	pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n",
+		 __func__, gdev->base, gdev->base + gdev->ngpio - 1,
+		 dev_name(&gdev->dev), gdev->chip->label ? : "generic");
+
+	return 0;
+
+err_remove_device:
+	device_del(&gdev->dev);
+err_remove_chardev:
+	cdev_del(&gdev->chrdev);
+	return status;
+}
+
+static void gpiochip_setup_devs(void)
+{
+	struct gpio_device *gdev;
+	int err;
+
+	list_for_each_entry(gdev, &gpio_devices, list) {
+		err = gpiochip_setup_dev(gdev);
+		if (err)
+			pr_err("%s: Failed to initialize gpio device (%d)\n",
+			       dev_name(&gdev->dev), err);
+	}
+}
+
 /**
  * gpiochip_add_data() - register a gpio_chip
  * @chip: the chip to register, with chip->base initialized
@@ -457,6 +512,9 @@
  * the gpio framework's arch_initcall().  Otherwise sysfs initialization
  * for GPIOs will fail rudely.
  *
+ * gpiochip_add_data() must only be called after gpiolib initialization,
+ * ie after core_initcall().
+ *
  * If chip->base is negative, this requests dynamic assignment of
  * a range of valid GPIOs.
  */
@@ -504,8 +562,7 @@
 	else
 		gdev->owner = THIS_MODULE;
 
-	gdev->descs = devm_kcalloc(&gdev->dev, chip->ngpio,
-				   sizeof(gdev->descs[0]), GFP_KERNEL);
+	gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
 	if (!gdev->descs) {
 		status = -ENOMEM;
 		goto err_free_gdev;
@@ -514,16 +571,16 @@
 	if (chip->ngpio == 0) {
 		chip_err(chip, "tried to insert a GPIO chip with zero lines\n");
 		status = -EINVAL;
-		goto err_free_gdev;
+		goto err_free_descs;
 	}
 
 	if (chip->label)
-		gdev->label = devm_kstrdup(&gdev->dev, chip->label, GFP_KERNEL);
+		gdev->label = kstrdup(chip->label, GFP_KERNEL);
 	else
-		gdev->label = devm_kstrdup(&gdev->dev, "unknown", GFP_KERNEL);
+		gdev->label = kstrdup("unknown", GFP_KERNEL);
 	if (!gdev->label) {
 		status = -ENOMEM;
-		goto err_free_gdev;
+		goto err_free_descs;
 	}
 
 	gdev->ngpio = chip->ngpio;
@@ -543,7 +600,7 @@
 		if (base < 0) {
 			status = base;
 			spin_unlock_irqrestore(&gpio_lock, flags);
-			goto err_free_gdev;
+			goto err_free_label;
 		}
 		/*
 		 * TODO: it should not be necessary to reflect the assigned
@@ -558,21 +615,38 @@
 	status = gpiodev_add_to_list(gdev);
 	if (status) {
 		spin_unlock_irqrestore(&gpio_lock, flags);
-		goto err_free_gdev;
+		goto err_free_label;
 	}
 
 	for (i = 0; i < chip->ngpio; i++) {
 		struct gpio_desc *desc = &gdev->descs[i];
 
 		desc->gdev = gdev;
-
-		/* REVISIT: most hardware initializes GPIOs as inputs (often
-		 * with pullups enabled) so power usage is minimized. Linux
-		 * code should set the gpio direction first thing; but until
-		 * it does, and in case chip->get_direction is not set, we may
-		 * expose the wrong direction in sysfs.
+		/*
+		 * REVISIT: most hardware initializes GPIOs as inputs
+		 * (often with pullups enabled) so power usage is
+		 * minimized. Linux code should set the gpio direction
+		 * first thing; but until it does, and in case
+		 * chip->get_direction is not set, we may expose the
+		 * wrong direction in sysfs.
 		 */
-		desc->flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0;
+
+		if (chip->get_direction) {
+			/*
+			 * If we have .get_direction, set up the initial
+			 * direction flag from the hardware.
+			 */
+			int dir = chip->get_direction(chip, i);
+
+			if (!dir)
+				set_bit(FLAG_IS_OUT, &desc->flags);
+		} else if (!chip->direction_input) {
+			/*
+			 * If the chip lacks the .direction_input callback
+			 * we logically assume all lines are outputs.
+			 */
+			set_bit(FLAG_IS_OUT, &desc->flags);
+		}
 	}
 
 	spin_unlock_irqrestore(&gpio_lock, flags);
@@ -596,39 +670,16 @@
 	 * we get a device node entry in sysfs under
 	 * /sys/bus/gpio/devices/gpiochipN/dev that can be used for
 	 * coldplug of device nodes and other udev business.
+	 * We can do this only if gpiolib has been initialized.
+	 * Otherwise, defer until later.
 	 */
-	cdev_init(&gdev->chrdev, &gpio_fileops);
-	gdev->chrdev.owner = THIS_MODULE;
-	gdev->chrdev.kobj.parent = &gdev->dev.kobj;
-	gdev->dev.devt = MKDEV(MAJOR(gpio_devt), gdev->id);
-	status = cdev_add(&gdev->chrdev, gdev->dev.devt, 1);
-	if (status < 0)
-		chip_warn(chip, "failed to add char device %d:%d\n",
-			  MAJOR(gpio_devt), gdev->id);
-	else
-		chip_dbg(chip, "added GPIO chardev (%d:%d)\n",
-			 MAJOR(gpio_devt), gdev->id);
-	status = device_add(&gdev->dev);
-	if (status)
-		goto err_remove_chardev;
-
-	status = gpiochip_sysfs_register(gdev);
-	if (status)
-		goto err_remove_device;
-
-	/* From this point, the .release() function cleans up gpio_device */
-	gdev->dev.release = gpiodevice_release;
-	get_device(&gdev->dev);
-	pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n",
-		 __func__, gdev->base, gdev->base + gdev->ngpio - 1,
-		 dev_name(&gdev->dev), chip->label ? : "generic");
-
+	if (gpiolib_initialized) {
+		status = gpiochip_setup_dev(gdev);
+		if (status)
+			goto err_remove_chip;
+	}
 	return 0;
 
-err_remove_device:
-	device_del(&gdev->dev);
-err_remove_chardev:
-	cdev_del(&gdev->chrdev);
 err_remove_chip:
 	acpi_gpiochip_remove(chip);
 	gpiochip_free_hogs(chip);
@@ -637,6 +688,10 @@
 	spin_lock_irqsave(&gpio_lock, flags);
 	list_del(&gdev->list);
 	spin_unlock_irqrestore(&gpio_lock, flags);
+err_free_label:
+	kfree(gdev->label);
+err_free_descs:
+	kfree(gdev->descs);
 err_free_gdev:
 	ida_simple_remove(&gpio_ida, gdev->id);
 	/* failures here can mean systems won't boot... */
@@ -1509,8 +1564,8 @@
 
 static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value)
 {
-	struct gpio_chip	*chip;
-	int			status = -EINVAL;
+	struct gpio_chip *gc = desc->gdev->chip;
+	int ret;
 
 	/* GPIOs used for IRQs shall not be set as output */
 	if (test_bit(FLAG_USED_AS_IRQ, &desc->flags)) {
@@ -1520,28 +1575,50 @@
 		return -EIO;
 	}
 
-	/* Open drain pin should not be driven to 1 */
-	if (value && test_bit(FLAG_OPEN_DRAIN,  &desc->flags))
-		return gpiod_direction_input(desc);
+	if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
+		/* First see if we can enable open drain in hardware */
+		if (gc->set_single_ended) {
+			ret = gc->set_single_ended(gc, gpio_chip_hwgpio(desc),
+						   LINE_MODE_OPEN_DRAIN);
+			if (!ret)
+				goto set_output_value;
+		}
+		/* Emulate open drain by not actively driving the line high */
+		if (value)
+			return gpiod_direction_input(desc);
+	}
+	else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) {
+		if (gc->set_single_ended) {
+			ret = gc->set_single_ended(gc, gpio_chip_hwgpio(desc),
+						   LINE_MODE_OPEN_SOURCE);
+			if (!ret)
+				goto set_output_value;
+		}
+		/* Emulate open source by not actively driving the line low */
+		if (!value)
+			return gpiod_direction_input(desc);
+	} else {
+		/* Make sure to disable open drain/source hardware, if any */
+		if (gc->set_single_ended)
+			gc->set_single_ended(gc,
+					     gpio_chip_hwgpio(desc),
+					     LINE_MODE_PUSH_PULL);
+	}
 
-	/* Open source pin should not be driven to 0 */
-	if (!value && test_bit(FLAG_OPEN_SOURCE,  &desc->flags))
-		return gpiod_direction_input(desc);
-
-	chip = desc->gdev->chip;
-	if (!chip->set || !chip->direction_output) {
+set_output_value:
+	if (!gc->set || !gc->direction_output) {
 		gpiod_warn(desc,
 		       "%s: missing set() or direction_output() operations\n",
 		       __func__);
 		return -EIO;
 	}
 
-	status = chip->direction_output(chip, gpio_chip_hwgpio(desc), value);
-	if (status == 0)
+	ret = gc->direction_output(gc, gpio_chip_hwgpio(desc), value);
+	if (!ret)
 		set_bit(FLAG_IS_OUT, &desc->flags);
 	trace_gpio_value(desc_to_gpio(desc), 0, value);
-	trace_gpio_direction(desc_to_gpio(desc), 0, status);
-	return status;
+	trace_gpio_direction(desc_to_gpio(desc), 0, ret);
+	return ret;
 }
 
 /**
@@ -1803,10 +1880,10 @@
 	}
 }
 
-static void gpiod_set_array_value_priv(bool raw, bool can_sleep,
-				       unsigned int array_size,
-				       struct gpio_desc **desc_array,
-				       int *value_array)
+void gpiod_set_array_value_complex(bool raw, bool can_sleep,
+				   unsigned int array_size,
+				   struct gpio_desc **desc_array,
+				   int *value_array)
 {
 	int i = 0;
 
@@ -1912,8 +1989,8 @@
 {
 	if (!desc_array)
 		return;
-	gpiod_set_array_value_priv(true, false, array_size, desc_array,
-				   value_array);
+	gpiod_set_array_value_complex(true, false, array_size, desc_array,
+				      value_array);
 }
 EXPORT_SYMBOL_GPL(gpiod_set_raw_array_value);
 
@@ -1934,8 +2011,8 @@
 {
 	if (!desc_array)
 		return;
-	gpiod_set_array_value_priv(false, false, array_size, desc_array,
-				   value_array);
+	gpiod_set_array_value_complex(false, false, array_size, desc_array,
+				      value_array);
 }
 EXPORT_SYMBOL_GPL(gpiod_set_array_value);
 
@@ -1960,13 +2037,22 @@
  */
 int gpiod_to_irq(const struct gpio_desc *desc)
 {
-	struct gpio_chip	*chip;
-	int			offset;
+	struct gpio_chip *chip;
+	int offset;
 
 	VALIDATE_DESC(desc);
 	chip = desc->gdev->chip;
 	offset = gpio_chip_hwgpio(desc);
-	return chip->to_irq ? chip->to_irq(chip, offset) : -ENXIO;
+	if (chip->to_irq) {
+		int retirq = chip->to_irq(chip, offset);
+
+		/* Zero means NO_IRQ */
+		if (!retirq)
+			return -ENXIO;
+
+		return retirq;
+	}
+	return -ENXIO;
 }
 EXPORT_SYMBOL_GPL(gpiod_to_irq);
 
@@ -2138,8 +2224,8 @@
 	might_sleep_if(extra_checks);
 	if (!desc_array)
 		return;
-	gpiod_set_array_value_priv(true, true, array_size, desc_array,
-				   value_array);
+	gpiod_set_array_value_complex(true, true, array_size, desc_array,
+				      value_array);
 }
 EXPORT_SYMBOL_GPL(gpiod_set_raw_array_value_cansleep);
 
@@ -2161,8 +2247,8 @@
 	might_sleep_if(extra_checks);
 	if (!desc_array)
 		return;
-	gpiod_set_array_value_priv(false, true, array_size, desc_array,
-				   value_array);
+	gpiod_set_array_value_complex(false, true, array_size, desc_array,
+				      value_array);
 }
 EXPORT_SYMBOL_GPL(gpiod_set_array_value_cansleep);
 
@@ -2231,9 +2317,11 @@
 	return desc;
 }
 
-static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id,
+static struct gpio_desc *acpi_find_gpio(struct device *dev,
+					const char *con_id,
 					unsigned int idx,
-					enum gpio_lookup_flags *flags)
+					enum gpiod_flags flags,
+					enum gpio_lookup_flags *lookupflags)
 {
 	struct acpi_device *adev = ACPI_COMPANION(dev);
 	struct acpi_gpio_info info;
@@ -2264,10 +2352,16 @@
 		desc = acpi_get_gpiod_by_index(adev, NULL, idx, &info);
 		if (IS_ERR(desc))
 			return desc;
+
+		if ((flags == GPIOD_OUT_LOW || flags == GPIOD_OUT_HIGH) &&
+		    info.gpioint) {
+			dev_dbg(dev, "refusing GpioInt() entry when doing GPIOD_OUT_* lookup\n");
+			return ERR_PTR(-ENOENT);
+		}
 	}
 
 	if (info.polarity == GPIO_ACTIVE_LOW)
-		*flags |= GPIO_ACTIVE_LOW;
+		*lookupflags |= GPIO_ACTIVE_LOW;
 
 	return desc;
 }
@@ -2530,7 +2624,7 @@
 			desc = of_find_gpio(dev, con_id, idx, &lookupflags);
 		} else if (ACPI_COMPANION(dev)) {
 			dev_dbg(dev, "using ACPI for GPIO lookup\n");
-			desc = acpi_find_gpio(dev, con_id, idx, &lookupflags);
+			desc = acpi_find_gpio(dev, con_id, idx, flags, &lookupflags);
 		}
 	}
 
@@ -2680,15 +2774,16 @@
 
 	local_desc = gpiochip_request_own_desc(chip, hwnum, name);
 	if (IS_ERR(local_desc)) {
-		pr_err("requesting hog GPIO %s (chip %s, offset %d) failed\n",
-		       name, chip->label, hwnum);
-		return PTR_ERR(local_desc);
+		status = PTR_ERR(local_desc);
+		pr_err("requesting hog GPIO %s (chip %s, offset %d) failed, %d\n",
+		       name, chip->label, hwnum, status);
+		return status;
 	}
 
 	status = gpiod_configure_flags(desc, name, dflags);
 	if (status < 0) {
-		pr_err("setup of hog GPIO %s (chip %s, offset %d) failed\n",
-		       name, chip->label, hwnum);
+		pr_err("setup of hog GPIO %s (chip %s, offset %d) failed, %d\n",
+		       name, chip->label, hwnum, status);
 		gpiochip_free_own_desc(desc);
 		return status;
 	}
@@ -2829,6 +2924,9 @@
 	if (ret < 0) {
 		pr_err("gpiolib: failed to allocate char dev region\n");
 		bus_unregister(&gpio_bus_type);
+	} else {
+		gpiolib_initialized = true;
+		gpiochip_setup_devs();
 	}
 	return ret;
 }
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index e30e5fd..2d9ea5e 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -141,6 +141,10 @@
 		   const char *list_name, int index, enum of_gpio_flags *flags);
 
 struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip, u16 hwnum);
+void gpiod_set_array_value_complex(bool raw, bool can_sleep,
+				   unsigned int array_size,
+				   struct gpio_desc **desc_array,
+				   int *value_array);
 
 extern struct spinlock gpio_lock;
 extern struct list_head gpio_devices;
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig
index 0f734ee..ca77ec1 100644
--- a/drivers/gpu/drm/amd/acp/Kconfig
+++ b/drivers/gpu/drm/amd/acp/Kconfig
@@ -1,10 +1,14 @@
-menu "ACP Configuration"
+menu "ACP (Audio CoProcessor) Configuration"
 
 config DRM_AMD_ACP
-       bool "Enable ACP IP support"
+       bool "Enable AMD Audio CoProcessor IP support"
        select MFD_CORE
        select PM_GENERIC_DOMAINS if PM
        help
 	Choose this option to enable ACP IP support for AMD SOCs.
+	This adds the ACP (Audio CoProcessor) IP driver and wires
+	it up into the amdgpu driver.  The ACP block provides the DMA
+	engine for the i2s-based ALSA driver. It is required for audio
+	on APUs which utilize an i2s codec.
 
 endmenu
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index c4a21c6..1bcbade 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1591,6 +1591,8 @@
 	struct amdgpu_bo	*vcpu_bo;
 	void			*cpu_addr;
 	uint64_t		gpu_addr;
+	unsigned		fw_version;
+	void			*saved_bo;
 	atomic_t		handles[AMDGPU_MAX_UVD_HANDLES];
 	struct drm_file		*filp[AMDGPU_MAX_UVD_HANDLES];
 	struct delayed_work	idle_work;
@@ -2033,6 +2035,7 @@
 
 	/* tracking pinned memory */
 	u64 vram_pin_size;
+	u64 invisible_pin_size;
 	u64 gart_pin_size;
 
 	/* amdkfd interface */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index d6b0bff..b7b583c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -425,6 +425,10 @@
 	struct acp_pm_domain *apd;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	/* return early if no ACP */
+	if (!adev->acp.acp_genpd)
+		return 0;
+
 	/* SMU block will power on ACP irrespective of ACP runtime status.
 	 * Power off explicitly based on genpd ACP runtime status so that ACP
 	 * hw and ACP-genpd status are in sync.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 0020a0e..35a1248 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -63,10 +63,6 @@
 	return amdgpu_atpx_priv.atpx_detected;
 }
 
-bool amdgpu_has_atpx_dgpu_power_cntl(void) {
-	return amdgpu_atpx_priv.atpx.functions.power_cntl;
-}
-
 /**
  * amdgpu_atpx_call - call an ATPX method
  *
@@ -146,6 +142,13 @@
  */
 static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
 {
+	/* make sure required functions are enabled */
+	/* dGPU power control is required */
+	if (atpx->functions.power_cntl == false) {
+		printk("ATPX dGPU power cntl not present, forcing\n");
+		atpx->functions.power_cntl = true;
+	}
+
 	if (atpx->functions.px_params) {
 		union acpi_object *info;
 		struct atpx_px_params output;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 7a4b101..6043dc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -816,10 +816,13 @@
 	struct drm_device *ddev = adev->ddev;
 	struct drm_crtc *crtc;
 	uint32_t line_time_us, vblank_lines;
+	struct cgs_mode_info *mode_info;
 
 	if (info == NULL)
 		return -EINVAL;
 
+	mode_info = info->mode_info;
+
 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
 		list_for_each_entry(crtc,
 				&ddev->mode_config.crtc_list, head) {
@@ -828,7 +831,7 @@
 				info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
 				info->display_count++;
 			}
-			if (info->mode_info != NULL &&
+			if (mode_info != NULL &&
 				crtc->enabled && amdgpu_crtc->enabled &&
 				amdgpu_crtc->hw_mode.clock) {
 				line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
@@ -836,10 +839,10 @@
 				vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
 							amdgpu_crtc->hw_mode.crtc_vdisplay +
 							(amdgpu_crtc->v_border * 2);
-				info->mode_info->vblank_time_us = vblank_lines * line_time_us;
-				info->mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
-				info->mode_info->ref_clock = adev->clock.spll.reference_freq;
-				info->mode_info++;
+				mode_info->vblank_time_us = vblank_lines * line_time_us;
+				mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
+				mode_info->ref_clock = adev->clock.spll.reference_freq;
+				mode_info = NULL;
 			}
 		}
 	}
@@ -847,6 +850,16 @@
 	return 0;
 }
 
+
+static int amdgpu_cgs_notify_dpm_enabled(void *cgs_device, bool enabled)
+{
+	CGS_FUNC_ADEV;
+
+	adev->pm.dpm_enabled = enabled;
+
+	return 0;
+}
+
 /** \brief evaluate acpi namespace object, handle or pathname must be valid
  *  \param cgs_device
  *  \param info input/output arguments for the control method
@@ -1097,6 +1110,7 @@
 	amdgpu_cgs_set_powergating_state,
 	amdgpu_cgs_set_clockgating_state,
 	amdgpu_cgs_get_active_displays_info,
+	amdgpu_cgs_notify_dpm_enabled,
 	amdgpu_cgs_call_acpi_method,
 	amdgpu_cgs_query_system_info,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 61211747..2139da7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -62,12 +62,6 @@
 	"LAST",
 };
 
-#if defined(CONFIG_VGA_SWITCHEROO)
-bool amdgpu_has_atpx_dgpu_power_cntl(void);
-#else
-static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
-#endif
-
 bool amdgpu_device_is_px(struct drm_device *dev)
 {
 	struct amdgpu_device *adev = dev->dev_private;
@@ -1485,7 +1479,7 @@
 
 	if (amdgpu_runtime_pm == 1)
 		runtime = true;
-	if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl())
+	if (amdgpu_device_is_px(ddev))
 		runtime = true;
 	vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
 	if (runtime)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index f0ed974..3fb405b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -57,7 +57,7 @@
 	if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
 		return true;
 
-	fence_put(*f);
+	fence_put(fence);
 	return false;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 4303b44..d81f1f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -121,7 +121,7 @@
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_fence *fence;
-	struct fence **ptr;
+	struct fence *old, **ptr;
 	uint32_t seq;
 
 	fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
@@ -141,7 +141,11 @@
 	/* This function can't be called concurrently anyway, otherwise
 	 * emitting the fence would mess up the hardware ring buffer.
 	 */
-	BUG_ON(rcu_dereference_protected(*ptr, 1));
+	old = rcu_dereference_protected(*ptr, 1);
+	if (old && !fence_is_signaled(old)) {
+		DRM_INFO("rcu slot is busy\n");
+		fence_wait(old, false);
+	}
 
 	rcu_assign_pointer(*ptr, fence_get(&fence->base));
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index f594cfa..762cfdb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -219,6 +219,8 @@
 	if (r) {
 		return r;
 	}
+	adev->ddev->vblank_disable_allowed = true;
+
 	/* enable msi */
 	adev->irq.msi_enabled = false;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 7805a87..b04337d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -303,7 +303,7 @@
 			fw_info.feature = adev->vce.fb_version;
 			break;
 		case AMDGPU_INFO_FW_UVD:
-			fw_info.ver = 0;
+			fw_info.ver = adev->uvd.fw_version;
 			fw_info.feature = 0;
 			break;
 		case AMDGPU_INFO_FW_GMC:
@@ -382,8 +382,9 @@
 		struct drm_amdgpu_info_vram_gtt vram_gtt;
 
 		vram_gtt.vram_size = adev->mc.real_vram_size;
+		vram_gtt.vram_size -= adev->vram_pin_size;
 		vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size;
-		vram_gtt.vram_cpu_accessible_size -= adev->vram_pin_size;
+		vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
 		vram_gtt.gtt_size  = adev->mc.gtt_size;
 		vram_gtt.gtt_size -= adev->gart_pin_size;
 		return copy_to_user(out, &vram_gtt,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 8d432e6..81bd964 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -53,7 +53,7 @@
 
 #define AMDGPU_MAX_HPD_PINS 6
 #define AMDGPU_MAX_CRTCS 6
-#define AMDGPU_MAX_AFMT_BLOCKS 7
+#define AMDGPU_MAX_AFMT_BLOCKS 9
 
 enum amdgpu_rmx_type {
 	RMX_OFF,
@@ -309,8 +309,8 @@
 	struct atom_context *atom_context;
 	struct card_info *atom_card_info;
 	bool mode_config_initialized;
-	struct amdgpu_crtc *crtcs[6];
-	struct amdgpu_afmt *afmt[7];
+	struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
+	struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
 	/* DVI-I properties */
 	struct drm_property *coherent_mode_property;
 	/* DAC enable load detect */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 151a2d4..7ecea83 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -424,9 +424,11 @@
 		bo->pin_count = 1;
 		if (gpu_addr != NULL)
 			*gpu_addr = amdgpu_bo_gpu_offset(bo);
-		if (domain == AMDGPU_GEM_DOMAIN_VRAM)
+		if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
 			bo->adev->vram_pin_size += amdgpu_bo_size(bo);
-		else
+			if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+				bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
+		} else
 			bo->adev->gart_pin_size += amdgpu_bo_size(bo);
 	} else {
 		dev_err(bo->adev->dev, "%p pin failed\n", bo);
@@ -456,9 +458,11 @@
 	}
 	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
 	if (likely(r == 0)) {
-		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+		if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
 			bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
-		else
+			if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+				bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
+		} else
 			bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
 	} else {
 		dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
@@ -476,6 +480,17 @@
 	return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
 }
 
+static const char *amdgpu_vram_names[] = {
+	"UNKNOWN",
+	"GDDR1",
+	"DDR2",
+	"GDDR3",
+	"GDDR4",
+	"GDDR5",
+	"HBM",
+	"DDR3"
+};
+
 int amdgpu_bo_init(struct amdgpu_device *adev)
 {
 	/* Add an MTRR for the VRAM */
@@ -484,8 +499,8 @@
 	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
 		adev->mc.mc_vram_size >> 20,
 		(unsigned long long)adev->mc.aper_size >> 20);
-	DRM_INFO("RAM width %dbits DDR\n",
-			adev->mc.vram_width);
+	DRM_INFO("RAM width %dbits %s\n",
+		 adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
 	return amdgpu_ttm_init(adev);
 }
 
@@ -526,6 +541,7 @@
 	if (!metadata_size) {
 		if (bo->metadata_size) {
 			kfree(bo->metadata);
+			bo->metadata = NULL;
 			bo->metadata_size = 0;
 		}
 		return 0;
@@ -608,6 +624,10 @@
 	if ((offset + size) <= adev->mc.visible_vram_size)
 		return 0;
 
+	/* Can't move a pinned BO to visible VRAM */
+	if (abo->pin_count > 0)
+		return -EINVAL;
+
 	/* hurrah the memory is not visible ! */
 	amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
 	lpfn =	adev->mc.visible_vram_size >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 3cb6d6c..e9c6ae6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -143,7 +143,7 @@
 					adev->powerplay.pp_handle);
 
 #ifdef CONFIG_DRM_AMD_POWERPLAY
-	if (adev->pp_enabled) {
+	if (adev->pp_enabled && adev->pm.dpm_enabled) {
 		amdgpu_pm_sysfs_init(adev);
 		amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
 	}
@@ -161,12 +161,8 @@
 					adev->powerplay.pp_handle);
 
 #ifdef CONFIG_DRM_AMD_POWERPLAY
-	if (adev->pp_enabled) {
-		if (amdgpu_dpm == 0)
-			adev->pm.dpm_enabled = false;
-		else
-			adev->pm.dpm_enabled = true;
-	}
+	if (adev->pp_enabled)
+		adev->pm.dpm_enabled = true;
 #endif
 
 	return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index ab34190..11af449 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -223,6 +223,8 @@
 {
 	struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo);
 
+	if (amdgpu_ttm_tt_get_usermm(bo->ttm))
+		return -EPERM;
 	return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
 }
 
@@ -384,9 +386,15 @@
 			struct ttm_mem_reg *new_mem)
 {
 	struct amdgpu_device *adev;
+	struct amdgpu_bo *abo;
 	struct ttm_mem_reg *old_mem = &bo->mem;
 	int r;
 
+	/* Can't move a pinned BO */
+	abo = container_of(bo, struct amdgpu_bo, tbo);
+	if (WARN_ON_ONCE(abo->pin_count > 0))
+		return -EINVAL;
+
 	adev = amdgpu_get_adev(bo->bdev);
 	if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
 		amdgpu_move_null(bo, new_mem);
@@ -616,7 +624,7 @@
 			set_page_dirty(page);
 
 		mark_page_accessed(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 
 	sg_free_table(ttm->sg);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index c1a5810..871018c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -158,6 +158,9 @@
 	DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
 		version_major, version_minor, family_id);
 
+	adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
+				(family_id << 8));
+
 	bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
 		 +  AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
 	r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
@@ -241,32 +244,30 @@
 
 int amdgpu_uvd_suspend(struct amdgpu_device *adev)
 {
-	struct amdgpu_ring *ring = &adev->uvd.ring;
-	int i, r;
+	unsigned size;
+	void *ptr;
+	int i;
 
 	if (adev->uvd.vcpu_bo == NULL)
 		return 0;
 
-	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
-		uint32_t handle = atomic_read(&adev->uvd.handles[i]);
-		if (handle != 0) {
-			struct fence *fence;
+	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
+		if (atomic_read(&adev->uvd.handles[i]))
+			break;
 
-			amdgpu_uvd_note_usage(adev);
+	if (i == AMDGPU_MAX_UVD_HANDLES)
+		return 0;
 
-			r = amdgpu_uvd_get_destroy_msg(ring, handle, false, &fence);
-			if (r) {
-				DRM_ERROR("Error destroying UVD (%d)!\n", r);
-				continue;
-			}
+	cancel_delayed_work_sync(&adev->uvd.idle_work);
 
-			fence_wait(fence, false);
-			fence_put(fence);
+	size = amdgpu_bo_size(adev->uvd.vcpu_bo);
+	ptr = adev->uvd.cpu_addr;
 
-			adev->uvd.filp[i] = NULL;
-			atomic_set(&adev->uvd.handles[i], 0);
-		}
-	}
+	adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
+	if (!adev->uvd.saved_bo)
+		return -ENOMEM;
+
+	memcpy(adev->uvd.saved_bo, ptr, size);
 
 	return 0;
 }
@@ -275,23 +276,29 @@
 {
 	unsigned size;
 	void *ptr;
-	const struct common_firmware_header *hdr;
-	unsigned offset;
 
 	if (adev->uvd.vcpu_bo == NULL)
 		return -EINVAL;
 
-	hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
-	offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
-	memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
-		(adev->uvd.fw->size) - offset);
-
 	size = amdgpu_bo_size(adev->uvd.vcpu_bo);
-	size -= le32_to_cpu(hdr->ucode_size_bytes);
 	ptr = adev->uvd.cpu_addr;
-	ptr += le32_to_cpu(hdr->ucode_size_bytes);
 
-	memset(ptr, 0, size);
+	if (adev->uvd.saved_bo != NULL) {
+		memcpy(ptr, adev->uvd.saved_bo, size);
+		kfree(adev->uvd.saved_bo);
+		adev->uvd.saved_bo = NULL;
+	} else {
+		const struct common_firmware_header *hdr;
+		unsigned offset;
+
+		hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
+		offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
+		memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
+			(adev->uvd.fw->size) - offset);
+		size -= le32_to_cpu(hdr->ucode_size_bytes);
+		ptr += le32_to_cpu(hdr->ucode_size_bytes);
+		memset(ptr, 0, size);
+	}
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 4bec0c1..481a64f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -234,6 +234,7 @@
 	if (i == AMDGPU_MAX_VCE_HANDLES)
 		return 0;
 
+	cancel_delayed_work_sync(&adev->vce.idle_work);
 	/* TODO: suspending running encoding sessions isn't supported */
 	return -EINVAL;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index bf731e9..7f85c2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -276,8 +276,8 @@
 			}
 		}
 	} else {
-		for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
-			for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
+		for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
+			for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
 				max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
 				if (max_pix_clock >= pix_clock) {
 					*dp_lanes = lane_num;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 1e0bba2..1cd6de5 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -298,6 +298,10 @@
 	    && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
 		adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
 
+	/* vertical FP must be at least 1 */
+	if (mode->crtc_vsync_start == mode->crtc_vdisplay)
+		adjusted_mode->crtc_vsync_start++;
+
 	/* get the native mode for scaling */
 	if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
 		amdgpu_panel_mode_fixup(encoder, adjusted_mode);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 82ce7d9..a4a2e6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -903,14 +903,6 @@
 	gmc_v7_0_set_gart_funcs(adev);
 	gmc_v7_0_set_irq_funcs(adev);
 
-	if (adev->flags & AMD_IS_APU) {
-		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
-	} else {
-		u32 tmp = RREG32(mmMC_SEQ_MISC0);
-		tmp &= MC_SEQ_MISC0__MT__MASK;
-		adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
-	}
-
 	return 0;
 }
 
@@ -918,7 +910,10 @@
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
+		return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+	else
+		return 0;
 }
 
 static int gmc_v7_0_sw_init(void *handle)
@@ -927,6 +922,14 @@
 	int dma_bits;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	if (adev->flags & AMD_IS_APU) {
+		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+	} else {
+		u32 tmp = RREG32(mmMC_SEQ_MISC0);
+		tmp &= MC_SEQ_MISC0__MT__MASK;
+		adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
+	}
+
 	r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
 	if (r)
 		return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 29bd7b5..7a9db2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -863,14 +863,6 @@
 	gmc_v8_0_set_gart_funcs(adev);
 	gmc_v8_0_set_irq_funcs(adev);
 
-	if (adev->flags & AMD_IS_APU) {
-		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
-	} else {
-		u32 tmp = RREG32(mmMC_SEQ_MISC0);
-		tmp &= MC_SEQ_MISC0__MT__MASK;
-		adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
-	}
-
 	return 0;
 }
 
@@ -878,15 +870,33 @@
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
+		return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+	else
+		return 0;
 }
 
+#define mmMC_SEQ_MISC0_FIJI 0xA71
+
 static int gmc_v8_0_sw_init(void *handle)
 {
 	int r;
 	int dma_bits;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	if (adev->flags & AMD_IS_APU) {
+		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+	} else {
+		u32 tmp;
+
+		if (adev->asic_type == CHIP_FIJI)
+			tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
+		else
+			tmp = RREG32(mmMC_SEQ_MISC0);
+		tmp &= MC_SEQ_MISC0__MT__MASK;
+		adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
+	}
+
 	r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
 	if (r)
 		return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index b6f7d7b..0f14199 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -307,7 +307,7 @@
 
 	amdgpu_irq_fini(adev);
 	amdgpu_ih_ring_fini(adev);
-	amdgpu_irq_add_domain(adev);
+	amdgpu_irq_remove_domain(adev);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index c606ccb..cb46375 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -224,11 +224,11 @@
 	int r;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	r = amdgpu_uvd_suspend(adev);
+	r = uvd_v4_2_hw_fini(adev);
 	if (r)
 		return r;
 
-	r = uvd_v4_2_hw_fini(adev);
+	r = amdgpu_uvd_suspend(adev);
 	if (r)
 		return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index e3c852d..16476d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -220,11 +220,11 @@
 	int r;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	r = amdgpu_uvd_suspend(adev);
+	r = uvd_v5_0_hw_fini(adev);
 	if (r)
 		return r;
 
-	r = uvd_v5_0_hw_fini(adev);
+	r = amdgpu_uvd_suspend(adev);
 	if (r)
 		return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 3375e614..d493791 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -214,15 +214,16 @@
 	int r;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	r = uvd_v6_0_hw_fini(adev);
+	if (r)
+		return r;
+
 	/* Skip this for APU for now */
 	if (!(adev->flags & AMD_IS_APU)) {
 		r = amdgpu_uvd_suspend(adev);
 		if (r)
 			return r;
 	}
-	r = uvd_v6_0_hw_fini(adev);
-	if (r)
-		return r;
 
 	return r;
 }
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index aec38fc..ab84d49 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -589,6 +589,8 @@
 					void *cgs_device,
 					struct cgs_display_info *info);
 
+typedef int (*cgs_notify_dpm_enabled)(void *cgs_device, bool enabled);
+
 typedef int (*cgs_call_acpi_method)(void *cgs_device,
 					uint32_t acpi_method,
 					uint32_t acpi_function,
@@ -644,6 +646,8 @@
 	cgs_set_clockgating_state set_clockgating_state;
 	/* display manager */
 	cgs_get_active_displays_info get_active_displays_info;
+	/* notify dpm enabled */
+	cgs_notify_dpm_enabled notify_dpm_enabled;
 	/* ACPI */
 	cgs_call_acpi_method call_acpi_method;
 	/* get system info */
@@ -734,8 +738,12 @@
 	CGS_CALL(set_powergating_state, dev, block_type, state)
 #define cgs_set_clockgating_state(dev, block_type, state)	\
 	CGS_CALL(set_clockgating_state, dev, block_type, state)
+#define cgs_notify_dpm_enabled(dev, enabled)	\
+	CGS_CALL(notify_dpm_enabled, dev, enabled)
+
 #define cgs_get_active_displays_info(dev, info)	\
 	CGS_CALL(get_active_displays_info, dev, info)
+
 #define cgs_call_acpi_method(dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size)	\
 	CGS_CALL(call_acpi_method, dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size)
 #define cgs_query_system_info(dev, sys_info)	\
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index 6b52c78..56856a2 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -137,14 +137,14 @@
 	reset_display_configCounter_tasks,
 	update_dal_configuration_tasks,
 	vari_bright_resume_tasks,
-	block_adjust_power_state_tasks,
 	setup_asic_tasks,
 	enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */
 	enable_dynamic_state_management_tasks,
 	enable_clock_power_gatings_tasks,
 	enable_disable_bapm_tasks,
 	initialize_thermal_controller_tasks,
-	reset_boot_state_tasks,
+	get_2d_performance_state_tasks,
+	set_performance_state_tasks,
 	adjust_power_state_tasks,
 	enable_disable_fps_tasks,
 	notify_hw_power_source_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
index 51dedf8..89f31bc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
@@ -2389,6 +2389,7 @@
 
 	for(count = 0; count < table->VceLevelCount; count++) {
 		table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
+		table->VceLevel[count].MinVoltage = 0;
 		table->VceLevel[count].MinVoltage |=
 				(mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
 		table->VceLevel[count].MinVoltage |=
@@ -2465,6 +2466,7 @@
 
 	for (count = 0; count < table->SamuLevelCount; count++) {
 		/* not sure whether we need evclk or not */
+		table->SamuLevel[count].MinVoltage = 0;
 		table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
 		table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
 				VOLTAGE_SCALE) << VDDC_SHIFT;
@@ -2562,6 +2564,7 @@
 	table->UvdBootLevel = 0;
 
 	for (count = 0; count < table->UvdLevelCount; count++) {
+		table->UvdLevel[count].MinVoltage = 0;
 		table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
 		table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
 		table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
@@ -2900,6 +2903,8 @@
 	if(FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control)
 		fiji_populate_smc_voltage_tables(hwmgr, table);
 
+	table->SystemFlags = 0;
+
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 			PHM_PlatformCaps_AutomaticDCTransition))
 		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
@@ -2997,6 +3002,7 @@
 	table->MemoryThermThrottleEnable = 1;
 	table->PCIeBootLinkLevel = 0;      /* 0:Gen1 1:Gen2 2:Gen3*/
 	table->PCIeGenInterval = 1;
+	table->VRConfig = 0;
 
 	result = fiji_populate_vr_config(hwmgr, table);
 	PP_ASSERT_WITH_CODE(0 == result,
@@ -5195,6 +5201,67 @@
 	return size;
 }
 
+static inline bool fiji_are_power_levels_equal(const struct fiji_performance_level *pl1,
+							   const struct fiji_performance_level *pl2)
+{
+	return ((pl1->memory_clock == pl2->memory_clock) &&
+		  (pl1->engine_clock == pl2->engine_clock) &&
+		  (pl1->pcie_gen == pl2->pcie_gen) &&
+		  (pl1->pcie_lane == pl2->pcie_lane));
+}
+
+int fiji_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
+{
+	const struct fiji_power_state *psa = cast_const_phw_fiji_power_state(pstate1);
+	const struct fiji_power_state *psb = cast_const_phw_fiji_power_state(pstate2);
+	int i;
+
+	if (equal == NULL || psa == NULL || psb == NULL)
+		return -EINVAL;
+
+	/* If the two states don't even have the same number of performance levels they cannot be the same state. */
+	if (psa->performance_level_count != psb->performance_level_count) {
+		*equal = false;
+		return 0;
+	}
+
+	for (i = 0; i < psa->performance_level_count; i++) {
+		if (!fiji_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
+			/* If we have found even one performance level pair that is different the states are different. */
+			*equal = false;
+			return 0;
+		}
+	}
+
+	/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+	*equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
+	*equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
+	*equal &= (psa->sclk_threshold == psb->sclk_threshold);
+	*equal &= (psa->acp_clk == psb->acp_clk);
+
+	return 0;
+}
+
+bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
+{
+	struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+	bool is_update_required = false;
+	struct cgs_display_info info = {0,0,NULL};
+
+	cgs_get_active_displays_info(hwmgr->device, &info);
+
+	if (data->display_timing.num_existing_displays != info.display_count)
+		is_update_required = true;
+/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
+	if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
+		cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
+		if(min_clocks.engineClockInSR != data->display_timing.minClockInSR)
+			is_update_required = true;
+*/
+	return is_update_required;
+}
+
+
 static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
 	.backend_init = &fiji_hwmgr_backend_init,
 	.backend_fini = &tonga_hwmgr_backend_fini,
@@ -5230,6 +5297,8 @@
 	.register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt,
 	.set_fan_control_mode = fiji_set_fan_control_mode,
 	.get_fan_control_mode = fiji_get_fan_control_mode,
+	.check_states_equal = fiji_check_states_equal,
+	.check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration,
 	.get_pp_table = fiji_get_pp_table,
 	.set_pp_table = fiji_set_pp_table,
 	.force_clock_level = fiji_force_clock_level,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index be31bed..fa208ad 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -58,6 +58,9 @@
 
 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
 
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
+	phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
+
 	if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) &&
 		acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION))
 		phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
@@ -130,18 +133,25 @@
 
 int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
 {
+	int ret = 1;
+	bool enabled;
 	PHM_FUNC_CHECK(hwmgr);
 
 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
 		PHM_PlatformCaps_TablelessHardwareInterface)) {
 		if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
-			return hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
+			ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
 	} else {
-		return phm_dispatch_table(hwmgr,
+		ret = phm_dispatch_table(hwmgr,
 				&(hwmgr->enable_dynamic_state_management),
 				NULL, NULL);
 	}
-	return 0;
+
+	enabled = ret == 0 ? true : false;
+
+	cgs_notify_dpm_enabled(hwmgr->device, enabled);
+
+	return ret;
 }
 
 int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level)
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 56b829f..3ac1ae4 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -57,14 +57,13 @@
 		DRM_ERROR("failed to map control registers area\n");
 		ret = PTR_ERR(hdlcd->mmio);
 		hdlcd->mmio = NULL;
-		goto fail;
+		return ret;
 	}
 
 	version = hdlcd_read(hdlcd, HDLCD_REG_VERSION);
 	if ((version & HDLCD_PRODUCT_MASK) != HDLCD_PRODUCT_ID) {
 		DRM_ERROR("unknown product id: 0x%x\n", version);
-		ret = -EINVAL;
-		goto fail;
+		return -EINVAL;
 	}
 	DRM_INFO("found ARM HDLCD version r%dp%d\n",
 		(version & HDLCD_VERSION_MAJOR_MASK) >> 8,
@@ -73,7 +72,7 @@
 	/* Get the optional framebuffer memory resource */
 	ret = of_reserved_mem_device_init(drm->dev);
 	if (ret && ret != -ENODEV)
-		goto fail;
+		return ret;
 
 	ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32));
 	if (ret)
@@ -101,8 +100,6 @@
 	drm_crtc_cleanup(&hdlcd->crtc);
 setup_fail:
 	of_reserved_mem_device_release(drm->dev);
-fail:
-	devm_clk_put(drm->dev, hdlcd->clk);
 
 	return ret;
 }
@@ -412,7 +409,6 @@
 	pm_runtime_put_sync(drm->dev);
 	pm_runtime_disable(drm->dev);
 	of_reserved_mem_device_release(drm->dev);
-	devm_clk_put(dev, hdlcd->clk);
 err_free:
 	drm_dev_unref(drm);
 
@@ -436,10 +432,6 @@
 	pm_runtime_put_sync(drm->dev);
 	pm_runtime_disable(drm->dev);
 	of_reserved_mem_device_release(drm->dev);
-	if (!IS_ERR(hdlcd->clk)) {
-		devm_clk_put(drm->dev, hdlcd->clk);
-		hdlcd->clk = NULL;
-	}
 	drm_mode_config_cleanup(drm);
 	drm_dev_unregister(drm);
 	drm_dev_unref(drm);
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 6e731db..aca7f9c 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -481,7 +481,7 @@
 
  release:
 	for_each_sg(sgt->sgl, sg, num, i)
-		page_cache_release(sg_page(sg));
+		put_page(sg_page(sg));
  free_table:
 	sg_free_table(sgt);
  free_sgt:
@@ -502,7 +502,7 @@
 	if (dobj->obj.filp) {
 		struct scatterlist *sg;
 		for_each_sg(sgt->sgl, sg, sgt->nents, i)
-			page_cache_release(sg_page(sg));
+			put_page(sg_page(sg));
 	}
 
 	sg_free_table(sgt);
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 6743ff7..059f7c3 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -72,7 +72,7 @@
 {
 
 #if defined(CONFIG_X86)
-	if (cpu_has_clflush) {
+	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
 		drm_cache_flush_clflush(pages, num_pages);
 		return;
 	}
@@ -105,7 +105,7 @@
 drm_clflush_sg(struct sg_table *st)
 {
 #if defined(CONFIG_X86)
-	if (cpu_has_clflush) {
+	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
 		struct sg_page_iter sg_iter;
 
 		mb();
@@ -129,7 +129,7 @@
 drm_clflush_virt_range(void *addr, unsigned long length)
 {
 #if defined(CONFIG_X86)
-	if (cpu_has_clflush) {
+	if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
 		const int size = boot_cpu_data.x86_clflush_size;
 		void *end = addr + length;
 		addr = (void *)(((unsigned long)addr) & -size);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 7d58f59..df64ed1 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -179,7 +179,7 @@
 {
 	struct drm_dp_aux_msg msg;
 	unsigned int retry;
-	int err;
+	int err = 0;
 
 	memset(&msg, 0, sizeof(msg));
 	msg.address = offset;
@@ -187,6 +187,8 @@
 	msg.buffer = buffer;
 	msg.size = size;
 
+	mutex_lock(&aux->hw_mutex);
+
 	/*
 	 * The specification doesn't give any recommendation on how often to
 	 * retry native transactions. We used to retry 7 times like for
@@ -195,25 +197,24 @@
 	 */
 	for (retry = 0; retry < 32; retry++) {
 
-		mutex_lock(&aux->hw_mutex);
 		err = aux->transfer(aux, &msg);
-		mutex_unlock(&aux->hw_mutex);
 		if (err < 0) {
 			if (err == -EBUSY)
 				continue;
 
-			return err;
+			goto unlock;
 		}
 
 
 		switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) {
 		case DP_AUX_NATIVE_REPLY_ACK:
 			if (err < size)
-				return -EPROTO;
-			return err;
+				err = -EPROTO;
+			goto unlock;
 
 		case DP_AUX_NATIVE_REPLY_NACK:
-			return -EIO;
+			err = -EIO;
+			goto unlock;
 
 		case DP_AUX_NATIVE_REPLY_DEFER:
 			usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
@@ -222,7 +223,11 @@
 	}
 
 	DRM_DEBUG_KMS("too many retries, giving up\n");
-	return -EIO;
+	err = -EIO;
+
+unlock:
+	mutex_unlock(&aux->hw_mutex);
+	return err;
 }
 
 /**
@@ -544,9 +549,7 @@
 	int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz));
 
 	for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) {
-		mutex_lock(&aux->hw_mutex);
 		ret = aux->transfer(aux, msg);
-		mutex_unlock(&aux->hw_mutex);
 		if (ret < 0) {
 			if (ret == -EBUSY)
 				continue;
@@ -685,6 +688,8 @@
 
 	memset(&msg, 0, sizeof(msg));
 
+	mutex_lock(&aux->hw_mutex);
+
 	for (i = 0; i < num; i++) {
 		msg.address = msgs[i].addr;
 		drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
@@ -739,6 +744,8 @@
 	msg.size = 0;
 	(void)drm_dp_i2c_do_msg(aux, &msg);
 
+	mutex_unlock(&aux->hw_mutex);
+
 	return err;
 }
 
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 27fbd79..71ea052 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1672,13 +1672,19 @@
 	u8 sinks[DRM_DP_MAX_SDP_STREAMS];
 	int i;
 
+	port = drm_dp_get_validated_port_ref(mgr, port);
+	if (!port)
+		return -EINVAL;
+
 	port_num = port->port_num;
 	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
 	if (!mstb) {
 		mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
 
-		if (!mstb)
+		if (!mstb) {
+			drm_dp_put_port(port);
 			return -EINVAL;
+		}
 	}
 
 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -1707,6 +1713,7 @@
 	kfree(txmsg);
 fail_put:
 	drm_dp_put_mst_branch_device(mstb);
+	drm_dp_put_port(port);
 	return ret;
 }
 
@@ -1789,6 +1796,11 @@
 		req_payload.start_slot = cur_slots;
 		if (mgr->proposed_vcpis[i]) {
 			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
+			port = drm_dp_get_validated_port_ref(mgr, port);
+			if (!port) {
+				mutex_unlock(&mgr->payload_lock);
+				return -EINVAL;
+			}
 			req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
 			req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
 		} else {
@@ -1816,6 +1828,9 @@
 			mgr->payloads[i].payload_state = req_payload.payload_state;
 		}
 		cur_slots += req_payload.num_slots;
+
+		if (port)
+			drm_dp_put_port(port);
 	}
 
 	for (i = 0; i < mgr->max_payloads; i++) {
@@ -2121,6 +2136,8 @@
 
 	if (mgr->mst_primary) {
 		int sret;
+		u8 guid[16];
+
 		sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
 		if (sret != DP_RECEIVER_CAP_SIZE) {
 			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
@@ -2135,6 +2152,16 @@
 			ret = -1;
 			goto out_unlock;
 		}
+
+		/* Some hubs forget their guids after they resume */
+		sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
+		if (sret != 16) {
+			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+			ret = -1;
+			goto out_unlock;
+		}
+		drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+
 		ret = 0;
 	} else
 		ret = -1;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 414d7f6..558ef9f 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -205,7 +205,7 @@
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
 	/* 0x0f - 1024x768@43Hz, interlace */
 	{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
-		   1208, 1264, 0, 768, 768, 772, 817, 0,
+		   1208, 1264, 0, 768, 768, 776, 817, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
 		   DRM_MODE_FLAG_INTERLACE) },
 	/* 0x10 - 1024x768@60Hz */
@@ -522,12 +522,12 @@
 		   720, 840, 0, 480, 481, 484, 500, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
 	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
-		   704,  832, 0, 480, 489, 491, 520, 0,
+		   704,  832, 0, 480, 489, 492, 520, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
 	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
 		   768,  864, 0, 480, 483, 486, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
-	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
+	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
 		   752, 800, 0, 480, 490, 492, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
 	{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
@@ -539,7 +539,7 @@
 	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
 		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
-	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
+	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
 		   1136, 1312, 0,  768, 769, 772, 800, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
 	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
@@ -2241,7 +2241,7 @@
 {
 	int i, j, m, modes = 0;
 	struct drm_display_mode *mode;
-	u8 *est = ((u8 *)timing) + 5;
+	u8 *est = ((u8 *)timing) + 6;
 
 	for (i = 0; i < 6; i++) {
 		for (j = 7; j >= 0; j--) {
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 2e8c77e..da0c532 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -534,7 +534,7 @@
 
 fail:
 	while (i--)
-		page_cache_release(pages[i]);
+		put_page(pages[i]);
 
 	drm_free_large(pages);
 	return ERR_CAST(p);
@@ -569,7 +569,7 @@
 			mark_page_accessed(pages[i]);
 
 		/* Undo the reference we took when populating the table */
-		page_cache_release(pages[i]);
+		put_page(pages[i]);
 	}
 
 	drm_free_large(pages);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 09198d0..d8a9a9c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -572,6 +572,24 @@
 		goto fail;
 	}
 
+	/*
+	 * Set the GPU linear window to be at the end of the DMA window, where
+	 * the CMA area is likely to reside. This ensures that we are able to
+	 * map the command buffers while having the linear window overlap as
+	 * much RAM as possible, so we can optimize mappings for other buffers.
+	 *
+	 * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
+	 * to different views of the memory on the individual engines.
+	 */
+	if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
+	    (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
+		u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
+		if (dma_mask < PHYS_OFFSET + SZ_2G)
+			gpu->memory_base = PHYS_OFFSET;
+		else
+			gpu->memory_base = dma_mask - SZ_2G + 1;
+	}
+
 	ret = etnaviv_hw_reset(gpu);
 	if (ret)
 		goto fail;
@@ -778,9 +796,9 @@
 	    debug.state[0] == debug.state[1]) {
 		seq_puts(m, "seems to be stuck\n");
 	} else if (debug.address[0] == debug.address[1]) {
-		seq_puts(m, "adress is constant\n");
+		seq_puts(m, "address is constant\n");
 	} else {
-		seq_puts(m, "is runing\n");
+		seq_puts(m, "is running\n");
 	}
 
 	seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
@@ -1566,7 +1584,6 @@
 {
 	struct device *dev = &pdev->dev;
 	struct etnaviv_gpu *gpu;
-	u32 dma_mask;
 	int err = 0;
 
 	gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
@@ -1576,18 +1593,6 @@
 	gpu->dev = &pdev->dev;
 	mutex_init(&gpu->lock);
 
-	/*
-	 * Set the GPU linear window to be at the end of the DMA window, where
-	 * the CMA area is likely to reside. This ensures that we are able to
-	 * map the command buffers while having the linear window overlap as
-	 * much RAM as possible, so we can optimize mappings for other buffers.
-	 */
-	dma_mask = (u32)dma_get_required_mask(dev);
-	if (dma_mask < PHYS_OFFSET + SZ_2G)
-		gpu->memory_base = PHYS_OFFSET;
-	else
-		gpu->memory_base = dma_mask - SZ_2G + 1;
-
 	/* Map registers: */
 	gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
 	if (IS_ERR(gpu->mmio))
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index f17d392..baddf33 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -94,7 +94,7 @@
 
 config DRM_EXYNOS_G2D
 	bool "G2D"
-	depends on !VIDEO_SAMSUNG_S5P_G2D
+	depends on VIDEO_SAMSUNG_S5P_G2D=n
 	select FRAME_VECTOR
 	help
 	  Choose this option if you want to use Exynos G2D for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 968b31c..23d2f95 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -2,10 +2,10 @@
 # Makefile for the drm device driver.  This driver provides support for the
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
-exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fbdev.o \
-		exynos_drm_fb.o exynos_drm_gem.o exynos_drm_core.o \
-		exynos_drm_plane.o
+exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fb.o \
+		exynos_drm_gem.o exynos_drm_core.o exynos_drm_plane.o
 
+exynosdrm-$(CONFIG_DRM_FBDEV_EMULATION) += exynos_drm_fbdev.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD)	+= exynos_drm_fimd.o
 exynosdrm-$(CONFIG_DRM_EXYNOS5433_DECON)	+= exynos5433_drm_decon.o
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 7f55ba6..011211e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -101,7 +101,7 @@
 	return 0;
 
 err:
-	list_for_each_entry_reverse(subdrv, &subdrv->list, list) {
+	list_for_each_entry_continue_reverse(subdrv, &exynos_drm_subdrv_list, list) {
 		if (subdrv->close)
 			subdrv->close(dev, subdrv->dev, file);
 	}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index d614194..81cc553 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -199,17 +199,6 @@
 	return exynos_fb->dma_addr[index];
 }
 
-static void exynos_drm_output_poll_changed(struct drm_device *dev)
-{
-	struct exynos_drm_private *private = dev->dev_private;
-	struct drm_fb_helper *fb_helper = private->fb_helper;
-
-	if (fb_helper)
-		drm_fb_helper_hotplug_event(fb_helper);
-	else
-		exynos_drm_fbdev_init(dev);
-}
-
 static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
 	.fb_create = exynos_user_fb_create,
 	.output_poll_changed = exynos_drm_output_poll_changed,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 4ae860c..72d7c0b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -317,3 +317,14 @@
 
 	drm_fb_helper_restore_fbdev_mode_unlocked(private->fb_helper);
 }
+
+void exynos_drm_output_poll_changed(struct drm_device *dev)
+{
+	struct exynos_drm_private *private = dev->dev_private;
+	struct drm_fb_helper *fb_helper = private->fb_helper;
+
+	if (fb_helper)
+		drm_fb_helper_hotplug_event(fb_helper);
+	else
+		exynos_drm_fbdev_init(dev);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
index e16d7f0..330eef8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
@@ -15,9 +15,30 @@
 #ifndef _EXYNOS_DRM_FBDEV_H_
 #define _EXYNOS_DRM_FBDEV_H_
 
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+
 int exynos_drm_fbdev_init(struct drm_device *dev);
-int exynos_drm_fbdev_reinit(struct drm_device *dev);
 void exynos_drm_fbdev_fini(struct drm_device *dev);
 void exynos_drm_fbdev_restore_mode(struct drm_device *dev);
+void exynos_drm_output_poll_changed(struct drm_device *dev);
+
+#else
+
+static inline int exynos_drm_fbdev_init(struct drm_device *dev)
+{
+	return 0;
+}
+
+static inline void exynos_drm_fbdev_fini(struct drm_device *dev)
+{
+}
+
+static inline void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
+{
+}
+
+#define exynos_drm_output_poll_changed (NULL)
+
+#endif
 
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 51d484a..018449f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -888,7 +888,7 @@
 	 * clock. On these SoCs the bootloader may enable it but any
 	 * power domain off/on will reset it to disable state.
 	 */
-	if (ctx->driver_data != &exynos5_fimd_driver_data ||
+	if (ctx->driver_data != &exynos5_fimd_driver_data &&
 	    ctx->driver_data != &exynos5420_fimd_driver_data)
 		return;
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 9869d70..a0def0b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -129,7 +129,7 @@
 	} else
 		val &= ~(MIC0_RGB_MUX | MIC0_I80_MUX | MIC0_ON_MUX);
 
-	regmap_write(mic->sysreg, DSD_CFG_MUX, val);
+	ret = regmap_write(mic->sysreg, DSD_CFG_MUX, val);
 	if (ret)
 		DRM_ERROR("mic: Failed to read system register\n");
 }
@@ -457,6 +457,7 @@
 							"samsung,disp-syscon");
 	if (IS_ERR(mic->sysreg)) {
 		DRM_ERROR("mic: Failed to get system register.\n");
+		ret = PTR_ERR(mic->sysreg);
 		goto err;
 	}
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index d862272..50185ac 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -11,9 +11,10 @@
 
 #include <drm/drmP.h>
 
-#include <drm/exynos_drm.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/exynos_drm.h>
 #include "exynos_drm_drv.h"
 #include "exynos_drm_crtc.h"
 #include "exynos_drm_fb.h"
@@ -57,11 +58,12 @@
 }
 
 static void exynos_plane_mode_set(struct exynos_drm_plane_state *exynos_state)
-
 {
 	struct drm_plane_state *state = &exynos_state->base;
-	struct drm_crtc *crtc = exynos_state->base.crtc;
-	struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+	struct drm_crtc *crtc = state->crtc;
+	struct drm_crtc_state *crtc_state =
+			drm_atomic_get_existing_crtc_state(state->state, crtc);
+	struct drm_display_mode *mode = &crtc_state->adjusted_mode;
 	int crtc_x, crtc_y;
 	unsigned int crtc_w, crtc_h;
 	unsigned int src_x, src_y;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 7bb1f1a..c52f9ad 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -220,7 +220,7 @@
  * FIXME: This is the old dp aux helper, gma500 is the last driver that needs to
  * be ported over to the new helper code in drm_dp_helper.c like i915 or radeon.
  */
-static int __deprecated
+static int
 i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
 {
 	int error;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a0f1bd7..e3f4c72 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2872,20 +2872,6 @@
 		intel_panel_info(m, &intel_connector->panel);
 }
 
-static void intel_dp_mst_info(struct seq_file *m,
-			  struct intel_connector *intel_connector)
-{
-	struct intel_encoder *intel_encoder = intel_connector->encoder;
-	struct intel_dp_mst_encoder *intel_mst =
-		enc_to_mst(&intel_encoder->base);
-	struct intel_digital_port *intel_dig_port = intel_mst->primary;
-	struct intel_dp *intel_dp = &intel_dig_port->dp;
-	bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
-					intel_connector->port);
-
-	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
-}
-
 static void intel_hdmi_info(struct seq_file *m,
 			    struct intel_connector *intel_connector)
 {
@@ -2929,8 +2915,6 @@
 			intel_hdmi_info(m, intel_connector);
 		else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
 			intel_lvds_info(m, intel_connector);
-		else if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
-			intel_dp_mst_info(m, intel_connector);
 	}
 
 	seq_printf(m, "\tmodes:\n");
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 20e8200..6d2fb3f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -758,10 +758,10 @@
 		dev_priv->display.hpd_irq_setup(dev);
 	spin_unlock_irq(&dev_priv->irq_lock);
 
-	intel_display_resume(dev);
-
 	intel_dp_mst_resume(dev);
 
+	intel_display_resume(dev);
+
 	/*
 	 * ... but also need to make sure that hotplug processing
 	 * doesn't cause havoc. Like in the driver load code we don't
@@ -792,7 +792,7 @@
 static int i915_drm_resume_early(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int ret = 0;
+	int ret;
 
 	/*
 	 * We have a resume ordering issue with the snd-hda driver also
@@ -803,6 +803,36 @@
 	 * FIXME: This should be solved with a special hdmi sink device or
 	 * similar so that power domains can be employed.
 	 */
+
+	/*
+	 * Note that we need to set the power state explicitly, since we
+	 * powered off the device during freeze and the PCI core won't power
+	 * it back up for us during thaw. Powering off the device during
+	 * freeze is not a hard requirement though, and during the
+	 * suspend/resume phases the PCI core makes sure we get here with the
+	 * device powered on. So in case we change our freeze logic and keep
+	 * the device powered we can also remove the following set power state
+	 * call.
+	 */
+	ret = pci_set_power_state(dev->pdev, PCI_D0);
+	if (ret) {
+		DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
+		goto out;
+	}
+
+	/*
+	 * Note that pci_enable_device() first enables any parent bridge
+	 * device and only then sets the power state for this device. The
+	 * bridge enabling is a nop though, since bridge devices are resumed
+	 * first. The order of enabling power and enabling the device is
+	 * imposed by the PCI core as described above, so here we preserve the
+	 * same order for the freeze/thaw phases.
+	 *
+	 * TODO: eventually we should remove pci_disable_device() /
+	 * pci_enable_enable_device() from suspend/resume. Due to how they
+	 * depend on the device enable refcount we can't anyway depend on them
+	 * disabling/enabling the device.
+	 */
 	if (pci_enable_device(dev->pdev)) {
 		ret = -EIO;
 		goto out;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1048093..daba7eb 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2634,8 +2634,9 @@
 
 /* WaRsDisableCoarsePowerGating:skl,bxt */
 #define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
-						 ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \
-						  IS_SKL_REVID(dev, 0, SKL_REVID_F0)))
+						 IS_SKL_GT3(dev) || \
+						 IS_SKL_GT4(dev))
+
 /*
  * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
  * even when in MSI mode. This results in spurious interrupt warnings if the
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3d31d3a..f2cb9a9 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -177,7 +177,7 @@
 		drm_clflush_virt_range(vaddr, PAGE_SIZE);
 		kunmap_atomic(src);
 
-		page_cache_release(page);
+		put_page(page);
 		vaddr += PAGE_SIZE;
 	}
 
@@ -243,7 +243,7 @@
 			set_page_dirty(page);
 			if (obj->madv == I915_MADV_WILLNEED)
 				mark_page_accessed(page);
-			page_cache_release(page);
+			put_page(page);
 			vaddr += PAGE_SIZE;
 		}
 		obj->dirty = 0;
@@ -1732,7 +1732,7 @@
 	if (args->flags & ~(I915_MMAP_WC))
 		return -EINVAL;
 
-	if (args->flags & I915_MMAP_WC && !cpu_has_pat)
+	if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
 		return -ENODEV;
 
 	obj = drm_gem_object_lookup(dev, file, args->handle);
@@ -2206,7 +2206,7 @@
 		if (obj->madv == I915_MADV_WILLNEED)
 			mark_page_accessed(page);
 
-		page_cache_release(page);
+		put_page(page);
 	}
 	obj->dirty = 0;
 
@@ -2346,7 +2346,7 @@
 err_pages:
 	sg_mark_end(sg);
 	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
-		page_cache_release(sg_page_iter_page(&sg_iter));
+		put_page(sg_page_iter_page(&sg_iter));
 	sg_free_table(st);
 	kfree(st);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 1328bc5..b845f46 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -488,7 +488,7 @@
 		ret = relocate_entry_cpu(obj, reloc, target_offset);
 	else if (obj->map_and_fenceable)
 		ret = relocate_entry_gtt(obj, reloc, target_offset);
-	else if (cpu_has_clflush)
+	else if (static_cpu_has(X86_FEATURE_CLFLUSH))
 		ret = relocate_entry_clflush(obj, reloc, target_offset);
 	else {
 		WARN_ONCE(1, "Impossible case in relocation handling\n");
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 6be40f3..4d30b60 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -501,19 +501,24 @@
 	if (pvec != NULL) {
 		struct mm_struct *mm = obj->userptr.mm->mm;
 
-		down_read(&mm->mmap_sem);
-		while (pinned < npages) {
-			ret = get_user_pages_remote(work->task, mm,
-					obj->userptr.ptr + pinned * PAGE_SIZE,
-					npages - pinned,
-					!obj->userptr.read_only, 0,
-					pvec + pinned, NULL);
-			if (ret < 0)
-				break;
+		ret = -EFAULT;
+		if (atomic_inc_not_zero(&mm->mm_users)) {
+			down_read(&mm->mmap_sem);
+			while (pinned < npages) {
+				ret = get_user_pages_remote
+					(work->task, mm,
+					 obj->userptr.ptr + pinned * PAGE_SIZE,
+					 npages - pinned,
+					 !obj->userptr.read_only, 0,
+					 pvec + pinned, NULL);
+				if (ret < 0)
+					break;
 
-			pinned += ret;
+				pinned += ret;
+			}
+			up_read(&mm->mmap_sem);
+			mmput(mm);
 		}
-		up_read(&mm->mmap_sem);
 	}
 
 	mutex_lock(&dev->struct_mutex);
@@ -683,7 +688,7 @@
 			set_page_dirty(page);
 
 		mark_page_accessed(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 	obj->dirty = 0;
 
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d1a46ef..1c21220 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1829,7 +1829,7 @@
 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
 	disable_rpm_wakeref_asserts(dev_priv);
 
-	for (;;) {
+	do {
 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
 		iir = I915_READ(VLV_IIR);
 
@@ -1857,7 +1857,7 @@
 
 		I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
 		POSTING_READ(GEN8_MASTER_IRQ);
-	}
+	} while (0);
 
 	enable_rpm_wakeref_asserts(dev_priv);
 
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f76cbf3..363bd79 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2907,7 +2907,14 @@
 #define GEN6_RP_STATE_CAP	_MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
 #define BXT_RP_STATE_CAP        _MMIO(0x138170)
 
-#define INTERVAL_1_28_US(us)	(((us) * 100) >> 7)
+/*
+ * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
+ * 8300) freezing up around GPU hangs. Looks as if even
+ * scheduling/timer interrupts start misbehaving if the RPS
+ * EI/thresholds are "bad", leading to a very sluggish or even
+ * frozen machine.
+ */
+#define INTERVAL_1_28_US(us)	roundup(((us) * 100) >> 7, 25)
 #define INTERVAL_1_33_US(us)	(((us) * 3)   >> 2)
 #define INTERVAL_0_833_US(us)	(((us) * 6) / 5)
 #define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
@@ -7437,6 +7444,8 @@
 #define  TRANS_CLK_SEL_DISABLED		(0x0<<29)
 #define  TRANS_CLK_SEL_PORT(x)		(((x)+1)<<29)
 
+#define CDCLK_FREQ			_MMIO(0x46200)
+
 #define _TRANSA_MSA_MISC		0x60410
 #define _TRANSB_MSA_MISC		0x61410
 #define _TRANSC_MSA_MISC		0x62410
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 30f9214..7d281b4 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -262,8 +262,7 @@
 	tmp |= AUD_CONFIG_N_PROG_ENABLE;
 	tmp &= ~AUD_CONFIG_UPPER_N_MASK;
 	tmp &= ~AUD_CONFIG_LOWER_N_MASK;
-	if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) ||
-	    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DP_MST))
+	if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
 		tmp |= AUD_CONFIG_N_VALUE_INDEX;
 	I915_WRITE(HSW_AUD_CFG(pipe), tmp);
 
@@ -476,8 +475,7 @@
 	tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
 	tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
 	tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
-	if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) ||
-	    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DP_MST))
+	if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
 		tmp |= AUD_CONFIG_N_VALUE_INDEX;
 	else
 		tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
@@ -515,8 +513,7 @@
 
 	/* ELD Conn_Type */
 	connector->eld[5] &= ~(3 << 2);
-	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
-	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DP_MST))
+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
 		connector->eld[5] |= (1 << 2);
 
 	connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 505fc5c..0364292 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -257,8 +257,14 @@
 		pipe_config->has_pch_encoder = true;
 
 	/* LPT FDI RX only supports 8bpc. */
-	if (HAS_PCH_LPT(dev))
+	if (HAS_PCH_LPT(dev)) {
+		if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
+			DRM_DEBUG_KMS("LPT only supports 24bpp\n");
+			return false;
+		}
+
 		pipe_config->pipe_bpp = 24;
+	}
 
 	/* FDI must always be 2.7 GHz */
 	if (HAS_DDI(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 62de9f4..96ffcc5 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -443,9 +443,17 @@
 	} else if (IS_BROADWELL(dev_priv)) {
 		ddi_translations_fdi = bdw_ddi_translations_fdi;
 		ddi_translations_dp = bdw_ddi_translations_dp;
-		ddi_translations_edp = bdw_ddi_translations_edp;
+
+		if (dev_priv->edp_low_vswing) {
+			ddi_translations_edp = bdw_ddi_translations_edp;
+			n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+		} else {
+			ddi_translations_edp = bdw_ddi_translations_dp;
+			n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
+		}
+
 		ddi_translations_hdmi = bdw_ddi_translations_hdmi;
-		n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+
 		n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
 		n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
 		hdmi_default_entry = 7;
@@ -3098,23 +3106,6 @@
 	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
 }
 
-bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
-				 struct intel_crtc *intel_crtc)
-{
-	u32 temp;
-
-	if (intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
-		temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
-
-		intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
-
-		if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
-			return true;
-	}
-
-	return false;
-}
-
 void intel_ddi_get_config(struct intel_encoder *encoder,
 			  struct intel_crtc_state *pipe_config)
 {
@@ -3175,8 +3166,11 @@
 		break;
 	}
 
-	pipe_config->has_audio =
-		intel_ddi_is_audio_enabled(dev_priv, intel_crtc);
+	if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
+		temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+		if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
+			pipe_config->has_audio = true;
+	}
 
 	if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
 	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
@@ -3201,12 +3195,6 @@
 	intel_ddi_clock_get(encoder, pipe_config);
 }
 
-static void intel_ddi_destroy(struct drm_encoder *encoder)
-{
-	/* HDMI has nothing special to destroy, so we can go with this. */
-	intel_dp_encoder_destroy(encoder);
-}
-
 static bool intel_ddi_compute_config(struct intel_encoder *encoder,
 				     struct intel_crtc_state *pipe_config)
 {
@@ -3225,7 +3213,8 @@
 }
 
 static const struct drm_encoder_funcs intel_ddi_funcs = {
-	.destroy = intel_ddi_destroy,
+	.reset = intel_dp_encoder_reset,
+	.destroy = intel_dp_encoder_destroy,
 };
 
 static struct intel_connector *
@@ -3324,6 +3313,7 @@
 	intel_encoder->post_disable = intel_ddi_post_disable;
 	intel_encoder->get_hw_state = intel_ddi_get_hw_state;
 	intel_encoder->get_config = intel_ddi_get_config;
+	intel_encoder->suspend = intel_dp_encoder_suspend;
 
 	intel_dig_port->port = port;
 	intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 6e0d828..0104a06 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -7988,9 +7988,6 @@
 
 	pipe_config->gmch_pfit.control = tmp;
 	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
-	if (INTEL_INFO(dev)->gen < 5)
-		pipe_config->gmch_pfit.lvds_border_bits =
-			I915_READ(LVDS) & LVDS_BORDER_ENABLE;
 }
 
 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
@@ -9752,6 +9749,8 @@
 	sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
+	I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
+
 	intel_update_cdclk(dev);
 
 	WARN(cdclk != dev_priv->cdclk_freq,
@@ -13351,6 +13350,9 @@
 	}
 
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		if (state->legacy_cursor_update)
+			continue;
+
 		ret = intel_crtc_wait_for_pending_flips(crtc);
 		if (ret)
 			return ret;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f069a82..412a34c 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -4898,7 +4898,7 @@
 	kfree(intel_dig_port);
 }
 
-static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
 
@@ -4940,7 +4940,7 @@
 	edp_panel_vdd_schedule_off(intel_dp);
 }
 
-static void intel_dp_encoder_reset(struct drm_encoder *encoder)
+void intel_dp_encoder_reset(struct drm_encoder *encoder)
 {
 	struct intel_dp *intel_dp;
 
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index a2bd698..2c99972 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -78,8 +78,6 @@
 		return false;
 	}
 
-	if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, found->port))
-		pipe_config->has_audio = true;
 	mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
 
 	pipe_config->pbn = mst_pbn;
@@ -104,11 +102,6 @@
 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
-	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc = encoder->base.crtc;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 	int ret;
 
 	DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
@@ -119,10 +112,6 @@
 	if (ret) {
 		DRM_ERROR("failed to update payload %d\n", ret);
 	}
-	if (intel_crtc->config->has_audio) {
-		intel_audio_codec_disable(encoder);
-		intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
-	}
 }
 
 static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
@@ -221,7 +210,6 @@
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
 	struct drm_device *dev = intel_dig_port->base.base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 	enum port port = intel_dig_port->port;
 	int ret;
 
@@ -234,13 +222,6 @@
 	ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
 
 	ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
-
-	if (crtc->config->has_audio) {
-		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
-				 pipe_name(crtc->pipe));
-		intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
-		intel_audio_codec_enable(encoder);
-	}
 }
 
 static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
@@ -266,9 +247,6 @@
 
 	pipe_config->has_dp_encoder = true;
 
-	pipe_config->has_audio =
-		intel_ddi_is_audio_enabled(dev_priv, crtc);
-
 	temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
 	if (temp & TRANS_DDI_PHSYNC)
 		flags |= DRM_MODE_FLAG_PHSYNC;
@@ -506,6 +484,8 @@
 	struct intel_connector *intel_connector = to_intel_connector(connector);
 	struct drm_device *dev = connector->dev;
 
+	intel_connector->unregister(intel_connector);
+
 	/* need to nuke the connector */
 	drm_modeset_lock_all(dev);
 	if (connector->state->crtc) {
@@ -519,11 +499,7 @@
 
 		WARN(ret, "Disabling mst crtc failed with %i\n", ret);
 	}
-	drm_modeset_unlock_all(dev);
 
-	intel_connector->unregister(intel_connector);
-
-	drm_modeset_lock_all(dev);
 	intel_connector_remove_from_fbdev(intel_connector);
 	drm_connector_cleanup(connector);
 	drm_modeset_unlock_all(dev);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 4c027d6..9d0770c 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1019,8 +1019,6 @@
 void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
 bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
 void intel_ddi_fdi_disable(struct drm_crtc *crtc);
-bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
-				 struct intel_crtc *intel_crtc);
 void intel_ddi_get_config(struct intel_encoder *encoder,
 			  struct intel_crtc_state *pipe_config);
 struct intel_encoder *
@@ -1238,6 +1236,8 @@
 void intel_dp_start_link_train(struct intel_dp *intel_dp);
 void intel_dp_stop_link_train(struct intel_dp *intel_dp);
 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_encoder_reset(struct drm_encoder *encoder);
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
 void intel_dp_encoder_destroy(struct drm_encoder *encoder);
 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
 bool intel_dp_compute_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index a0d8dae..1ab6f68 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1415,8 +1415,16 @@
 				hdmi_to_dig_port(intel_hdmi));
 	}
 
-	if (!live_status)
-		DRM_DEBUG_KMS("Live status not up!");
+	if (!live_status) {
+		DRM_DEBUG_KMS("HDMI live status down\n");
+		/*
+		 * Live status register is not reliable on all intel platforms.
+		 * So consider live_status only for certain platforms, for
+		 * others, read EDID to determine presence of sink.
+		 */
+		if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
+			live_status = true;
+	}
 
 	intel_hdmi_unset_edid(connector);
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 6a978ce..5c6080f 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -841,11 +841,11 @@
 		if (unlikely(total_bytes > remain_usable)) {
 			/*
 			 * The base request will fit but the reserved space
-			 * falls off the end. So only need to to wait for the
-			 * reserved size after flushing out the remainder.
+			 * falls off the end. So don't need an immediate wrap
+			 * and only need to effectively wait for the reserved
+			 * size space from the start of ringbuffer.
 			 */
 			wait_bytes = remain_actual + ringbuf->reserved_size;
-			need_wrap = true;
 		} else if (total_bytes > ringbuf->space) {
 			/* No wrapping required, just waiting. */
 			wait_bytes = total_bytes;
@@ -1913,15 +1913,18 @@
 	struct intel_ringbuffer *ringbuf = request->ringbuf;
 	int ret;
 
-	ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
+	ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS);
 	if (ret)
 		return ret;
 
+	/* We're using qword write, seqno should be aligned to 8 bytes. */
+	BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
+
 	/* w/a for post sync ops following a GPGPU operation we
 	 * need a prior CS_STALL, which is emitted by the flush
 	 * following the batch.
 	 */
-	intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5));
+	intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
 	intel_logical_ring_emit(ringbuf,
 				(PIPE_CONTROL_GLOBAL_GTT_IVB |
 				 PIPE_CONTROL_CS_STALL |
@@ -1929,7 +1932,10 @@
 	intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
 	intel_logical_ring_emit(ringbuf, 0);
 	intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+	/* We're thrashing one dword of HWS. */
+	intel_logical_ring_emit(ringbuf, 0);
 	intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
+	intel_logical_ring_emit(ringbuf, MI_NOOP);
 	return intel_logical_ring_advance_and_submit(request);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 30a8403..10dc351 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -123,6 +123,10 @@
 
 	pipe_config->base.adjusted_mode.flags |= flags;
 
+	if (INTEL_INFO(dev)->gen < 5)
+		pipe_config->gmch_pfit.lvds_border_bits =
+			tmp & LVDS_BORDER_ENABLE;
+
 	/* gen2/3 store dither state in pfit control, needs to match */
 	if (INTEL_INFO(dev)->gen < 4) {
 		tmp = I915_READ(PFIT_CONTROL);
@@ -478,11 +482,8 @@
 	 * and as part of the cleanup in the hw state restore we also redisable
 	 * the vga plane.
 	 */
-	if (!HAS_PCH_SPLIT(dev)) {
-		drm_modeset_lock_all(dev);
+	if (!HAS_PCH_SPLIT(dev))
 		intel_display_resume(dev);
-		drm_modeset_unlock_all(dev);
-	}
 
 	dev_priv->modeset_restore = MODESET_DONE;
 
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 347d4df..3425d8e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2876,25 +2876,28 @@
 			     const struct drm_plane_state *pstate,
 			     int y)
 {
-	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+	struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
 	struct drm_framebuffer *fb = pstate->fb;
+	uint32_t width = 0, height = 0;
+
+	width = drm_rect_width(&intel_pstate->src) >> 16;
+	height = drm_rect_height(&intel_pstate->src) >> 16;
+
+	if (intel_rotation_90_or_270(pstate->rotation))
+		swap(width, height);
 
 	/* for planar format */
 	if (fb->pixel_format == DRM_FORMAT_NV12) {
 		if (y)  /* y-plane data rate */
-			return intel_crtc->config->pipe_src_w *
-				intel_crtc->config->pipe_src_h *
+			return width * height *
 				drm_format_plane_cpp(fb->pixel_format, 0);
 		else    /* uv-plane data rate */
-			return (intel_crtc->config->pipe_src_w/2) *
-				(intel_crtc->config->pipe_src_h/2) *
+			return (width / 2) * (height / 2) *
 				drm_format_plane_cpp(fb->pixel_format, 1);
 	}
 
 	/* for packed formats */
-	return intel_crtc->config->pipe_src_w *
-		intel_crtc->config->pipe_src_h *
-		drm_format_plane_cpp(fb->pixel_format, 0);
+	return width * height * drm_format_plane_cpp(fb->pixel_format, 0);
 }
 
 /*
@@ -2973,8 +2976,9 @@
 		struct drm_framebuffer *fb = plane->state->fb;
 		int id = skl_wm_plane_id(intel_plane);
 
-		if (fb == NULL)
+		if (!to_intel_plane_state(plane->state)->visible)
 			continue;
+
 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
 			continue;
 
@@ -3000,7 +3004,7 @@
 		uint16_t plane_blocks, y_plane_blocks = 0;
 		int id = skl_wm_plane_id(intel_plane);
 
-		if (pstate->fb == NULL)
+		if (!to_intel_plane_state(pstate)->visible)
 			continue;
 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
 			continue;
@@ -3123,26 +3127,36 @@
 {
 	struct drm_plane *plane = &intel_plane->base;
 	struct drm_framebuffer *fb = plane->state->fb;
+	struct intel_plane_state *intel_pstate =
+					to_intel_plane_state(plane->state);
 	uint32_t latency = dev_priv->wm.skl_latency[level];
 	uint32_t method1, method2;
 	uint32_t plane_bytes_per_line, plane_blocks_per_line;
 	uint32_t res_blocks, res_lines;
 	uint32_t selected_result;
 	uint8_t cpp;
+	uint32_t width = 0, height = 0;
 
-	if (latency == 0 || !cstate->base.active || !fb)
+	if (latency == 0 || !cstate->base.active || !intel_pstate->visible)
 		return false;
 
+	width = drm_rect_width(&intel_pstate->src) >> 16;
+	height = drm_rect_height(&intel_pstate->src) >> 16;
+
+	if (intel_rotation_90_or_270(plane->state->rotation))
+		swap(width, height);
+
 	cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 	method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
 				 cpp, latency);
 	method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
 				 cstate->base.adjusted_mode.crtc_htotal,
-				 cstate->pipe_src_w,
-				 cpp, fb->modifier[0],
+				 width,
+				 cpp,
+				 fb->modifier[0],
 				 latency);
 
-	plane_bytes_per_line = cstate->pipe_src_w * cpp;
+	plane_bytes_per_line = width * cpp;
 	plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
 
 	if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
@@ -6632,6 +6646,12 @@
 	misccpctl = I915_READ(GEN7_MISCCPCTL);
 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
 	I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
+	/*
+	 * Wait at least 100 clocks before re-enabling clock gating. See
+	 * the definition of L3SQCREG1 in BSpec.
+	 */
+	POSTING_READ(GEN8_L3SQCREG1);
+	udelay(1);
 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
 
 	/*
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 45ce45a..9121646 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -968,7 +968,7 @@
 
 	/* WaForceContextSaveRestoreNonCoherent:skl,bxt */
 	tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
-	if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) ||
+	if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
 	    IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
 		tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
 	WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
@@ -1085,7 +1085,8 @@
 		WA_SET_BIT_MASKED(HIZ_CHICKEN,
 				  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
 
-	if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) {
+	/* This is tied to WaForceContextSaveRestoreNonCoherent */
+	if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
 		/*
 		 *Use Force Non-Coherent whenever executing a 3D context. This
 		 * is a workaround for a possible hang in the unlikely event
@@ -2090,10 +2091,12 @@
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_gem_object *obj = ringbuf->obj;
+	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
+	unsigned flags = PIN_OFFSET_BIAS | 4096;
 	int ret;
 
 	if (HAS_LLC(dev_priv) && !obj->stolen) {
-		ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0);
+		ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
 		if (ret)
 			return ret;
 
@@ -2109,7 +2112,8 @@
 			return -ENOMEM;
 		}
 	} else {
-		ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
+		ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
+					    flags | PIN_MAPPABLE);
 		if (ret)
 			return ret;
 
@@ -2454,11 +2458,11 @@
 		if (unlikely(total_bytes > remain_usable)) {
 			/*
 			 * The base request will fit but the reserved space
-			 * falls off the end. So only need to to wait for the
-			 * reserved size after flushing out the remainder.
+			 * falls off the end. So don't need an immediate wrap
+			 * and only need to effectively wait for the reserved
+			 * size space from the start of ringbuffer.
 			 */
 			wait_bytes = remain_actual + ringbuf->reserved_size;
-			need_wrap = true;
 		} else if (total_bytes > ringbuf->space) {
 			/* No wrapping required, just waiting. */
 			wait_bytes = total_bytes;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 436d8f2..68b6f69 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1189,7 +1189,11 @@
 	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
 		dev_priv->uncore.funcs.force_wake_get =
 			fw_domains_get_with_thread_status;
-		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+		if (IS_HASWELL(dev))
+			dev_priv->uncore.funcs.force_wake_put =
+				fw_domains_put_with_fifo;
+		else
+			dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
 		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
 			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
 	} else if (IS_IVYBRIDGE(dev)) {
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 2a95d10..a24631fd 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -225,8 +225,6 @@
 	if (!iores)
 		return -ENXIO;
 
-	platform_set_drvdata(pdev, hdmi);
-
 	encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
 	/*
 	 * If we failed to find the CRTC(s) which this encoder is
@@ -245,7 +243,16 @@
 	drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs,
 			 DRM_MODE_ENCODER_TMDS, NULL);
 
-	return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
+	ret = dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
+
+	/*
+	 * If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
+	 * which would have called the encoder cleanup.  Do it manually.
+	 */
+	if (ret)
+		drm_encoder_cleanup(encoder);
+
+	return ret;
 }
 
 static void dw_hdmi_imx_unbind(struct device *dev, struct device *master,
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 9876e0f..e26dcde 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -326,7 +326,6 @@
 {
 	struct imx_drm_device *imxdrm = drm->dev_private;
 	struct imx_drm_crtc *imx_drm_crtc;
-	int ret;
 
 	/*
 	 * The vblank arrays are dimensioned by MAX_CRTC - we can't
@@ -351,10 +350,6 @@
 
 	*new_crtc = imx_drm_crtc;
 
-	ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
-	if (ret)
-		goto err_register;
-
 	drm_crtc_helper_add(crtc,
 			imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
 
@@ -362,11 +357,6 @@
 			imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs, NULL);
 
 	return 0;
-
-err_register:
-	imxdrm->crtc[--imxdrm->pipes] = NULL;
-	kfree(imx_drm_crtc);
-	return ret;
 }
 EXPORT_SYMBOL_GPL(imx_drm_add_crtc);
 
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 5888278..681ec6e 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -72,22 +72,101 @@
 int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
 		       int x, int y)
 {
-	struct drm_gem_cma_object *cma_obj;
-	unsigned long eba;
-	int active;
+	struct drm_gem_cma_object *cma_obj[3];
+	unsigned long eba, ubo, vbo;
+	int active, i;
 
-	cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
-	if (!cma_obj) {
-		DRM_DEBUG_KMS("entry is null.\n");
-		return -EFAULT;
+	for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
+		cma_obj[i] = drm_fb_cma_get_gem_obj(fb, i);
+		if (!cma_obj[i]) {
+			DRM_DEBUG_KMS("plane %d entry is null.\n", i);
+			return -EFAULT;
+		}
 	}
 
-	dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d",
-		&cma_obj->paddr, x, y);
-
-	eba = cma_obj->paddr + fb->offsets[0] +
+	eba = cma_obj[0]->paddr + fb->offsets[0] +
 	      fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x;
 
+	if (eba & 0x7) {
+		DRM_DEBUG_KMS("base address must be a multiple of 8.\n");
+		return -EINVAL;
+	}
+
+	if (fb->pitches[0] < 1 || fb->pitches[0] > 16384) {
+		DRM_DEBUG_KMS("pitches out of range.\n");
+		return -EINVAL;
+	}
+
+	if (ipu_plane->enabled && fb->pitches[0] != ipu_plane->stride[0]) {
+		DRM_DEBUG_KMS("pitches must not change while plane is enabled.\n");
+		return -EINVAL;
+	}
+
+	ipu_plane->stride[0] = fb->pitches[0];
+
+	switch (fb->pixel_format) {
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+		/*
+		 * Multiplanar formats have to meet the following restrictions:
+		 * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
+		 * - EBA, UBO and VBO are a multiple of 8
+		 * - UBO and VBO are unsigned and not larger than 0xfffff8
+		 * - Only EBA may be changed while scanout is active
+		 * - The strides of U and V planes must be identical.
+		 */
+		ubo = cma_obj[1]->paddr + fb->offsets[1] +
+		      fb->pitches[1] * y / 2 + x / 2 - eba;
+		vbo = cma_obj[2]->paddr + fb->offsets[2] +
+		      fb->pitches[2] * y / 2 + x / 2 - eba;
+
+		if ((ubo & 0x7) || (vbo & 0x7)) {
+			DRM_DEBUG_KMS("U/V buffer offsets must be a multiple of 8.\n");
+			return -EINVAL;
+		}
+
+		if ((ubo > 0xfffff8) || (vbo > 0xfffff8)) {
+			DRM_DEBUG_KMS("U/V buffer offsets must be positive and not larger than 0xfffff8.\n");
+			return -EINVAL;
+		}
+
+		if (ipu_plane->enabled && ((ipu_plane->u_offset != ubo) ||
+					   (ipu_plane->v_offset != vbo))) {
+			DRM_DEBUG_KMS("U/V buffer offsets must not change while plane is enabled.\n");
+			return -EINVAL;
+		}
+
+		if (fb->pitches[1] != fb->pitches[2]) {
+			DRM_DEBUG_KMS("U/V pitches must be identical.\n");
+			return -EINVAL;
+		}
+
+		if (fb->pitches[1] < 1 || fb->pitches[1] > 16384) {
+			DRM_DEBUG_KMS("U/V pitches out of range.\n");
+			return -EINVAL;
+		}
+
+		if (ipu_plane->enabled &&
+		    (ipu_plane->stride[1] != fb->pitches[1])) {
+			DRM_DEBUG_KMS("U/V pitches must not change while plane is enabled.\n");
+			return -EINVAL;
+		}
+
+		ipu_plane->u_offset = ubo;
+		ipu_plane->v_offset = vbo;
+		ipu_plane->stride[1] = fb->pitches[1];
+
+		dev_dbg(ipu_plane->base.dev->dev,
+			"phys = %pad %pad %pad, x = %d, y = %d",
+			&cma_obj[0]->paddr, &cma_obj[1]->paddr,
+			&cma_obj[2]->paddr, x, y);
+		break;
+	default:
+		dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d",
+			&cma_obj[0]->paddr, x, y);
+		break;
+	}
+
 	if (ipu_plane->enabled) {
 		active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
 		ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
@@ -201,12 +280,6 @@
 		}
 	}
 
-	ret = ipu_dmfc_init_channel(ipu_plane->dmfc, crtc_w);
-	if (ret) {
-		dev_err(dev, "initializing dmfc channel failed with %d\n", ret);
-		return ret;
-	}
-
 	ret = ipu_dmfc_alloc_bandwidth(ipu_plane->dmfc,
 			calc_bandwidth(crtc_w, crtc_h,
 				       calc_vref(mode)), 64);
@@ -215,6 +288,8 @@
 		return ret;
 	}
 
+	ipu_dmfc_config_wait4eot(ipu_plane->dmfc, crtc_w);
+
 	ipu_cpmem_zero(ipu_plane->ipu_ch);
 	ipu_cpmem_set_resolution(ipu_plane->ipu_ch, src_w, src_h);
 	ret = ipu_cpmem_set_fmt(ipu_plane->ipu_ch, fb->pixel_format);
@@ -233,6 +308,18 @@
 	if (interlaced)
 		ipu_cpmem_interlaced_scan(ipu_plane->ipu_ch, fb->pitches[0]);
 
+	if (fb->pixel_format == DRM_FORMAT_YUV420) {
+		ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+					      ipu_plane->stride[1],
+					      ipu_plane->u_offset,
+					      ipu_plane->v_offset);
+	} else if (fb->pixel_format == DRM_FORMAT_YVU420) {
+		ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+					      ipu_plane->stride[1],
+					      ipu_plane->v_offset,
+					      ipu_plane->u_offset);
+	}
+
 	ipu_plane->w = src_w;
 	ipu_plane->h = src_h;
 
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index 3a443b4..4448fd4 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -29,6 +29,10 @@
 	int			w;
 	int			h;
 
+	unsigned int		u_offset;
+	unsigned int		v_offset;
+	unsigned int		stride[2];
+
 	bool			enabled;
 };
 
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index b04a646..65428cf 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -196,7 +196,7 @@
 int msm_hdmi_pll_8960_init(struct platform_device *pdev);
 int msm_hdmi_pll_8996_init(struct platform_device *pdev);
 #else
-static inline int msm_hdmi_pll_8960_init(struct platform_device *pdev);
+static inline int msm_hdmi_pll_8960_init(struct platform_device *pdev)
 {
 	return -ENODEV;
 }
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index d52910e..c03b967 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -467,9 +467,6 @@
 	struct msm_file_private *ctx = file->driver_priv;
 	struct msm_kms *kms = priv->kms;
 
-	if (kms)
-		kms->funcs->preclose(kms, file);
-
 	mutex_lock(&dev->struct_mutex);
 	if (ctx == priv->lastctx)
 		priv->lastctx = NULL;
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 9bcabaa..e32222c 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -55,7 +55,6 @@
 			struct drm_encoder *slave_encoder,
 			bool is_cmd_mode);
 	/* cleanup: */
-	void (*preclose)(struct msm_kms *kms, struct drm_file *file);
 	void (*destroy)(struct msm_kms *kms);
 };
 
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
index 16641ce..b5370cb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
@@ -11,6 +11,7 @@
 
 	struct reset_control *rst;
 	struct clk *clk;
+	struct clk *clk_ref;
 	struct clk *clk_pwr;
 
 	struct regulator *vdd;
@@ -36,6 +37,10 @@
 	 * bypassed). A value of 0 means an IOMMU is never used.
 	 */
 	u8 iommu_bit;
+	/*
+	 * Whether the chip requires a reference clock
+	 */
+	bool require_ref_clk;
 };
 
 int nvkm_device_tegra_new(const struct nvkm_device_tegra_func *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index ae96ebc..e81aefe 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -1276,18 +1276,18 @@
 		break;
 	default:
 		if (disp->dithering_mode) {
+			nv_connector->dithering_mode = DITHERING_MODE_AUTO;
 			drm_object_attach_property(&connector->base,
 						   disp->dithering_mode,
 						   nv_connector->
 						   dithering_mode);
-			nv_connector->dithering_mode = DITHERING_MODE_AUTO;
 		}
 		if (disp->dithering_depth) {
+			nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
 			drm_object_attach_property(&connector->base,
 						   disp->dithering_depth,
 						   nv_connector->
 						   dithering_depth);
-			nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
 		}
 		break;
 	}
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 2dfe58a..4c4cc22 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -55,6 +55,11 @@
 	.iommu_bit = 34,
 };
 
+static const struct nvkm_device_tegra_func gm20b_platform_data = {
+	.iommu_bit = 34,
+	.require_ref_clk = true,
+};
+
 static const struct of_device_id nouveau_platform_match[] = {
 	{
 		.compatible = "nvidia,gk20a",
@@ -62,7 +67,7 @@
 	},
 	{
 		.compatible = "nvidia,gm20b",
-		.data = &gk20a_platform_data,
+		.data = &gm20b_platform_data,
 	},
 	{ }
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 9afa5f3..ec12efb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -35,6 +35,11 @@
 	ret = clk_prepare_enable(tdev->clk);
 	if (ret)
 		goto err_clk;
+	if (tdev->clk_ref) {
+		ret = clk_prepare_enable(tdev->clk_ref);
+		if (ret)
+			goto err_clk_ref;
+	}
 	ret = clk_prepare_enable(tdev->clk_pwr);
 	if (ret)
 		goto err_clk_pwr;
@@ -57,6 +62,9 @@
 err_clamp:
 	clk_disable_unprepare(tdev->clk_pwr);
 err_clk_pwr:
+	if (tdev->clk_ref)
+		clk_disable_unprepare(tdev->clk_ref);
+err_clk_ref:
 	clk_disable_unprepare(tdev->clk);
 err_clk:
 	regulator_disable(tdev->vdd);
@@ -71,6 +79,8 @@
 	udelay(10);
 
 	clk_disable_unprepare(tdev->clk_pwr);
+	if (tdev->clk_ref)
+		clk_disable_unprepare(tdev->clk_ref);
 	clk_disable_unprepare(tdev->clk);
 	udelay(10);
 
@@ -274,6 +284,13 @@
 		goto free;
 	}
 
+	if (func->require_ref_clk)
+		tdev->clk_ref = devm_clk_get(&pdev->dev, "ref");
+	if (IS_ERR(tdev->clk_ref)) {
+		ret = PTR_ERR(tdev->clk_ref);
+		goto free;
+	}
+
 	tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
 	if (IS_ERR(tdev->clk_pwr)) {
 		ret = PTR_ERR(tdev->clk_pwr);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index c56a886..b2de290 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1832,6 +1832,8 @@
 
 	gf100_gr_mmio(gr, gr->func->mmio);
 
+	nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
+
 	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
 	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
 		do {
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 43e5f50..030409a 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -375,10 +375,15 @@
 
 	qxl_bo_kunmap(user_bo);
 
+	qcrtc->cur_x += qcrtc->hot_spot_x - hot_x;
+	qcrtc->cur_y += qcrtc->hot_spot_y - hot_y;
+	qcrtc->hot_spot_x = hot_x;
+	qcrtc->hot_spot_y = hot_y;
+
 	cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
 	cmd->type = QXL_CURSOR_SET;
-	cmd->u.set.position.x = qcrtc->cur_x;
-	cmd->u.set.position.y = qcrtc->cur_y;
+	cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
+	cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
 
 	cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
 
@@ -441,8 +446,8 @@
 
 	cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
 	cmd->type = QXL_CURSOR_MOVE;
-	cmd->u.position.x = qcrtc->cur_x;
-	cmd->u.position.y = qcrtc->cur_y;
+	cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
+	cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
 	qxl_release_unmap(qdev, release, &cmd->release_info);
 
 	qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 6e6b9b1..3f3897e 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -135,6 +135,8 @@
 	int index;
 	int cur_x;
 	int cur_y;
+	int hot_spot_x;
+	int hot_spot_y;
 };
 
 struct qxl_output {
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index cf61e08..532127c 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -275,13 +275,15 @@
 		if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
 			atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
 		atombios_blank_crtc(crtc, ATOM_DISABLE);
-		drm_vblank_on(dev, radeon_crtc->crtc_id);
+		if (dev->num_crtcs > radeon_crtc->crtc_id)
+			drm_vblank_on(dev, radeon_crtc->crtc_id);
 		radeon_crtc_load_lut(crtc);
 		break;
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
 	case DRM_MODE_DPMS_OFF:
-		drm_vblank_off(dev, radeon_crtc->crtc_id);
+		if (dev->num_crtcs > radeon_crtc->crtc_id)
+			drm_vblank_off(dev, radeon_crtc->crtc_id);
 		if (radeon_crtc->enabled)
 			atombios_blank_crtc(crtc, ATOM_ENABLE);
 		if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
@@ -1740,6 +1742,7 @@
 static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
 	struct drm_crtc *test_crtc;
 	struct radeon_crtc *test_radeon_crtc;
 
@@ -1749,6 +1752,10 @@
 		test_radeon_crtc = to_radeon_crtc(test_crtc);
 		if (test_radeon_crtc->encoder &&
 		    ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
+			/* PPLL2 is exclusive to UNIPHYA on DCE61 */
+			if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
+			    test_radeon_crtc->pll_id == ATOM_PPLL2)
+				continue;
 			/* for DP use the same PLL for all */
 			if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
 				return test_radeon_crtc->pll_id;
@@ -1770,6 +1777,7 @@
 {
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
 	struct drm_crtc *test_crtc;
 	struct radeon_crtc *test_radeon_crtc;
 	u32 adjusted_clock, test_adjusted_clock;
@@ -1785,6 +1793,10 @@
 		test_radeon_crtc = to_radeon_crtc(test_crtc);
 		if (test_radeon_crtc->encoder &&
 		    !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
+			/* PPLL2 is exclusive to UNIPHYA on DCE61 */
+			if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
+			    test_radeon_crtc->pll_id == ATOM_PPLL2)
+				continue;
 			/* check if we are already driving this connector with another crtc */
 			if (test_radeon_crtc->connector == radeon_crtc->connector) {
 				/* if we are, return that pll */
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index afa9db1..cead089a 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -326,8 +326,8 @@
 			}
 		}
 	} else {
-		for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
-			for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
+		for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
+			for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
 				max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
 				if (max_pix_clock >= pix_clock) {
 					*dp_lanes = lane_num;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index edd05cd..587cae4 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -310,6 +310,10 @@
 	    && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
 		adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
 
+	/* vertical FP must be at least 1 */
+	if (mode->crtc_vsync_start == mode->crtc_vdisplay)
+		adjusted_mode->crtc_vsync_start++;
+
 	/* get the native mode for scaling */
 	if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
 		radeon_panel_mode_fixup(encoder, adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 76c4bdf..34f7a29 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2608,10 +2608,152 @@
 	WREG32(VM_CONTEXT1_CNTL, 0);
 }
 
+static const unsigned ni_dig_offsets[] =
+{
+	NI_DIG0_REGISTER_OFFSET,
+	NI_DIG1_REGISTER_OFFSET,
+	NI_DIG2_REGISTER_OFFSET,
+	NI_DIG3_REGISTER_OFFSET,
+	NI_DIG4_REGISTER_OFFSET,
+	NI_DIG5_REGISTER_OFFSET
+};
+
+static const unsigned ni_tx_offsets[] =
+{
+	NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
+	NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
+	NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
+	NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
+	NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
+	NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
+};
+
+static const unsigned evergreen_dp_offsets[] =
+{
+	EVERGREEN_DP0_REGISTER_OFFSET,
+	EVERGREEN_DP1_REGISTER_OFFSET,
+	EVERGREEN_DP2_REGISTER_OFFSET,
+	EVERGREEN_DP3_REGISTER_OFFSET,
+	EVERGREEN_DP4_REGISTER_OFFSET,
+	EVERGREEN_DP5_REGISTER_OFFSET
+};
+
+
+/*
+ * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
+ * We go from crtc to connector and it is not relible  since it
+ * should be an opposite direction .If crtc is enable then
+ * find the dig_fe which selects this crtc and insure that it enable.
+ * if such dig_fe is found then find dig_be which selects found dig_be and
+ * insure that it enable and in DP_SST mode.
+ * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
+ * from dp symbols clocks .
+ */
+static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
+					       unsigned crtc_id, unsigned *ret_dig_fe)
+{
+	unsigned i;
+	unsigned dig_fe;
+	unsigned dig_be;
+	unsigned dig_en_be;
+	unsigned uniphy_pll;
+	unsigned digs_fe_selected;
+	unsigned dig_be_mode;
+	unsigned dig_fe_mask;
+	bool is_enabled = false;
+	bool found_crtc = false;
+
+	/* loop through all running dig_fe to find selected crtc */
+	for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
+		dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
+		if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
+		    crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
+			/* found running pipe */
+			found_crtc = true;
+			dig_fe_mask = 1 << i;
+			dig_fe = i;
+			break;
+		}
+	}
+
+	if (found_crtc) {
+		/* loop through all running dig_be to find selected dig_fe */
+		for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
+			dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
+			/* if dig_fe_selected by dig_be? */
+			digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
+			dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
+			if (dig_fe_mask &  digs_fe_selected &&
+			    /* if dig_be in sst mode? */
+			    dig_be_mode == NI_DIG_BE_DPSST) {
+				dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
+						   ni_dig_offsets[i]);
+				uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
+						    ni_tx_offsets[i]);
+				/* dig_be enable and tx is running */
+				if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
+				    dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
+				    uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
+					is_enabled = true;
+					*ret_dig_fe = dig_fe;
+					break;
+				}
+			}
+		}
+	}
+
+	return is_enabled;
+}
+
+/*
+ * Blank dig when in dp sst mode
+ * Dig ignores crtc timing
+ */
+static void evergreen_blank_dp_output(struct radeon_device *rdev,
+				      unsigned dig_fe)
+{
+	unsigned stream_ctrl;
+	unsigned fifo_ctrl;
+	unsigned counter = 0;
+
+	if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
+		DRM_ERROR("invalid dig_fe %d\n", dig_fe);
+		return;
+	}
+
+	stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+			     evergreen_dp_offsets[dig_fe]);
+	if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
+		DRM_ERROR("dig %d , should be enable\n", dig_fe);
+		return;
+	}
+
+	stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
+	WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+	       evergreen_dp_offsets[dig_fe], stream_ctrl);
+
+	stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+			     evergreen_dp_offsets[dig_fe]);
+	while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
+		msleep(1);
+		counter++;
+		stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+				     evergreen_dp_offsets[dig_fe]);
+	}
+	if (counter >= 32 )
+		DRM_ERROR("counter exceeds %d\n", counter);
+
+	fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
+	fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
+	WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
+
+}
+
 void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
 {
 	u32 crtc_enabled, tmp, frame_count, blackout;
 	int i, j;
+	unsigned dig_fe;
 
 	if (!ASIC_IS_NODCE(rdev)) {
 		save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
@@ -2651,7 +2793,17 @@
 					break;
 				udelay(1);
 			}
-
+			/*we should disable dig if it drives dp sst*/
+			/*but we are in radeon_device_init and the topology is unknown*/
+			/*and it is available after radeon_modeset_init*/
+			/*the following method radeon_atom_encoder_dpms_dig*/
+			/*does the job if we initialize it properly*/
+			/*for now we do it this manually*/
+			/**/
+			if (ASIC_IS_DCE5(rdev) &&
+			    evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
+				evergreen_blank_dp_output(rdev, dig_fe);
+			/*we could remove 6 lines below*/
 			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
 			WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
 			tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index aa939dfe..b436bad 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -250,8 +250,43 @@
 
 /* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
 #define EVERGREEN_HDMI_BASE				0x7030
+/*DIG block*/
+#define NI_DIG0_REGISTER_OFFSET                 (0x7000  - 0x7000)
+#define NI_DIG1_REGISTER_OFFSET                 (0x7C00  - 0x7000)
+#define NI_DIG2_REGISTER_OFFSET                 (0x10800 - 0x7000)
+#define NI_DIG3_REGISTER_OFFSET                 (0x11400 - 0x7000)
+#define NI_DIG4_REGISTER_OFFSET                 (0x12000 - 0x7000)
+#define NI_DIG5_REGISTER_OFFSET                 (0x12C00 - 0x7000)
+
+
+#define NI_DIG_FE_CNTL                               0x7000
+#       define NI_DIG_FE_CNTL_SOURCE_SELECT(x)        ((x) & 0x3)
+#       define NI_DIG_FE_CNTL_SYMCLK_FE_ON            (1<<24)
+
+
+#define NI_DIG_BE_CNTL                    0x7140
+#       define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x)     (((x) >> 8 ) & 0x3F)
+#       define NI_DIG_FE_CNTL_MODE(x)                 (((x) >> 16) & 0x7 )
+
+#define NI_DIG_BE_EN_CNTL                              0x7144
+#       define NI_DIG_BE_EN_CNTL_ENABLE               (1 << 0)
+#       define NI_DIG_BE_EN_CNTL_SYMBCLK_ON           (1 << 8)
+#       define NI_DIG_BE_DPSST 0
 
 /* Display Port block */
+#define EVERGREEN_DP0_REGISTER_OFFSET                 (0x730C  - 0x730C)
+#define EVERGREEN_DP1_REGISTER_OFFSET                 (0x7F0C  - 0x730C)
+#define EVERGREEN_DP2_REGISTER_OFFSET                 (0x10B0C - 0x730C)
+#define EVERGREEN_DP3_REGISTER_OFFSET                 (0x1170C - 0x730C)
+#define EVERGREEN_DP4_REGISTER_OFFSET                 (0x1230C - 0x730C)
+#define EVERGREEN_DP5_REGISTER_OFFSET                 (0x12F0C - 0x730C)
+
+
+#define EVERGREEN_DP_VID_STREAM_CNTL                    0x730C
+#       define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE     (1 << 0)
+#       define EVERGREEN_DP_VID_STREAM_STATUS          (1 <<16)
+#define EVERGREEN_DP_STEER_FIFO                         0x7310
+#       define EVERGREEN_DP_STEER_FIFO_RESET           (1 << 0)
 #define EVERGREEN_DP_SEC_CNTL                           0x7280
 #       define EVERGREEN_DP_SEC_STREAM_ENABLE           (1 << 0)
 #       define EVERGREEN_DP_SEC_ASP_ENABLE              (1 << 4)
@@ -266,4 +301,15 @@
 #       define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x)      (((x) & 0xf) << 24)
 #       define EVERGREEN_DP_SEC_SS_EN                   (1 << 28)
 
+/*DCIO_UNIPHY block*/
+#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1            (0x6600  -0x6600)
+#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1            (0x6640  -0x6600)
+#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1            (0x6680 - 0x6600)
+#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1            (0x66C0 - 0x6600)
+#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1            (0x6700 - 0x6600)
+#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1            (0x6740 - 0x6600)
+
+#define NI_DCIO_UNIPHY0_PLL_CONTROL1                   0x6618
+#       define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE     (1 << 0)
+
 #endif
diff --git a/drivers/gpu/drm/radeon/ni_reg.h b/drivers/gpu/drm/radeon/ni_reg.h
index da310a70..827ccc8 100644
--- a/drivers/gpu/drm/radeon/ni_reg.h
+++ b/drivers/gpu/drm/radeon/ni_reg.h
@@ -109,6 +109,8 @@
 #define NI_DP_MSE_SAT2                                 0x7398
 
 #define NI_DP_MSE_SAT_UPDATE                           0x739c
+#       define NI_DP_MSE_SAT_UPDATE_MASK               0x3
+#       define NI_DP_MSE_16_MTP_KEEPOUT                0x100
 
 #define NI_DIG_BE_CNTL                                 0x7140
 #       define NI_DIG_FE_SOURCE_SELECT(x)              (((x) & 0x7f) << 8)
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index fd8c4d3..95f4fea 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -62,10 +62,6 @@
 	return radeon_atpx_priv.atpx_detected;
 }
 
-bool radeon_has_atpx_dgpu_power_cntl(void) {
-	return radeon_atpx_priv.atpx.functions.power_cntl;
-}
-
 /**
  * radeon_atpx_call - call an ATPX method
  *
@@ -145,6 +141,13 @@
  */
 static int radeon_atpx_validate(struct radeon_atpx *atpx)
 {
+	/* make sure required functions are enabled */
+	/* dGPU power control is required */
+	if (atpx->functions.power_cntl == false) {
+		printk("ATPX dGPU power cntl not present, forcing\n");
+		atpx->functions.power_cntl = true;
+	}
+
 	if (atpx->functions.px_params) {
 		union acpi_object *info;
 		struct atpx_px_params output;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index cfcc099..81a63d7 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -2002,10 +2002,12 @@
 						   rdev->mode_info.dither_property,
 						   RADEON_FMT_DITHER_DISABLE);
 
-			if (radeon_audio != 0)
+			if (radeon_audio != 0) {
 				drm_object_attach_property(&radeon_connector->base.base,
 							   rdev->mode_info.audio_property,
 							   RADEON_AUDIO_AUTO);
+				radeon_connector->audio = RADEON_AUDIO_AUTO;
+			}
 			if (ASIC_IS_DCE5(rdev))
 				drm_object_attach_property(&radeon_connector->base.base,
 							   rdev->mode_info.output_csc_property,
@@ -2130,6 +2132,7 @@
 				drm_object_attach_property(&radeon_connector->base.base,
 							   rdev->mode_info.audio_property,
 							   RADEON_AUDIO_AUTO);
+				radeon_connector->audio = RADEON_AUDIO_AUTO;
 			}
 			if (connector_type == DRM_MODE_CONNECTOR_DVII) {
 				radeon_connector->dac_load_detect = true;
@@ -2185,6 +2188,7 @@
 				drm_object_attach_property(&radeon_connector->base.base,
 							   rdev->mode_info.audio_property,
 							   RADEON_AUDIO_AUTO);
+				radeon_connector->audio = RADEON_AUDIO_AUTO;
 			}
 			if (ASIC_IS_DCE5(rdev))
 				drm_object_attach_property(&radeon_connector->base.base,
@@ -2237,6 +2241,7 @@
 				drm_object_attach_property(&radeon_connector->base.base,
 							   rdev->mode_info.audio_property,
 							   RADEON_AUDIO_AUTO);
+				radeon_connector->audio = RADEON_AUDIO_AUTO;
 			}
 			if (ASIC_IS_DCE5(rdev))
 				drm_object_attach_property(&radeon_connector->base.base,
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 4fd1a96..d0826fb 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -103,12 +103,6 @@
 	"LAST",
 };
 
-#if defined(CONFIG_VGA_SWITCHEROO)
-bool radeon_has_atpx_dgpu_power_cntl(void);
-#else
-static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
-#endif
-
 #define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
 #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
 
@@ -1305,9 +1299,9 @@
 	}
 	rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
 
-	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
-		radeon_family_name[rdev->family], pdev->vendor, pdev->device,
-		pdev->subsystem_vendor, pdev->subsystem_device);
+	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
+		 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
+		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
 
 	/* mutex initialization are all done here so we
 	 * can recall function without having locking issues */
@@ -1439,7 +1433,7 @@
 	 * ignore it */
 	vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
 
-	if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
+	if (rdev->flags & RADEON_IS_PX)
 		runtime = true;
 	vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
 	if (runtime)
diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
index 3b0c229..db64e00 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
@@ -105,7 +105,7 @@
 
 	tmp &= AUX_HPD_SEL(0x7);
 	tmp |= AUX_HPD_SEL(chan->rec.hpd);
-	tmp |= AUX_EN | AUX_LS_READ_EN;
+	tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
 
 	WREG32(AUX_CONTROL + aux_offset[instance], tmp);
 
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 43cffb5..de504ea 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -89,8 +89,16 @@
 	WREG32(NI_DP_MSE_SAT_UPDATE + primary->offset, 1);
 
 	do {
+		unsigned value1, value2;
+		udelay(10);
 		temp = RREG32(NI_DP_MSE_SAT_UPDATE + primary->offset);
-	} while ((temp & 0x1) && retries++ < 10000);
+
+		value1 = temp & NI_DP_MSE_SAT_UPDATE_MASK;
+		value2 = temp & NI_DP_MSE_16_MTP_KEEPOUT;
+
+		if (!value1 && !value2)
+			break;
+	} while (retries++ < 50);
 
 	if (retries == 10000)
 		DRM_ERROR("timed out waitin for SAT update %d\n", primary->offset);
@@ -150,7 +158,7 @@
 	return 0;
 }
 
-static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, uint32_t y)
+static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, s64 avg_time_slots_per_mtp)
 {
 	struct drm_device *dev = mst->base.dev;
 	struct radeon_device *rdev = dev->dev_private;
@@ -158,6 +166,8 @@
 	uint32_t val, temp;
 	uint32_t offset = radeon_atom_set_enc_offset(mst_enc->fe);
 	int retries = 0;
+	uint32_t x = drm_fixp2int(avg_time_slots_per_mtp);
+	uint32_t y = drm_fixp2int_ceil((avg_time_slots_per_mtp - x) << 26);
 
 	val = NI_DP_MSE_RATE_X(x) | NI_DP_MSE_RATE_Y(y);
 
@@ -165,6 +175,7 @@
 
 	do {
 		temp = RREG32(NI_DP_MSE_RATE_UPDATE + offset);
+		udelay(10);
 	} while ((temp & 0x1) && (retries++ < 10000));
 
 	if (retries >= 10000)
@@ -246,14 +257,8 @@
 	kfree(radeon_connector);
 }
 
-static int radeon_connector_dpms(struct drm_connector *connector, int mode)
-{
-	DRM_DEBUG_KMS("\n");
-	return 0;
-}
-
 static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
-	.dpms = radeon_connector_dpms,
+	.dpms = drm_helper_connector_dpms,
 	.detect = radeon_dp_mst_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.destroy = radeon_dp_mst_connector_destroy,
@@ -394,7 +399,7 @@
 	struct drm_crtc *crtc;
 	struct radeon_crtc *radeon_crtc;
 	int ret, slots;
-
+	s64 fixed_pbn, fixed_pbn_per_slot, avg_time_slots_per_mtp;
 	if (!ASIC_IS_DCE5(rdev)) {
 		DRM_ERROR("got mst dpms on non-DCE5\n");
 		return;
@@ -456,7 +461,11 @@
 
 		mst_enc->enc_active = true;
 		radeon_dp_mst_update_stream_attribs(radeon_connector->mst_port, primary);
-		radeon_dp_mst_set_vcp_size(radeon_encoder, slots, 0);
+
+		fixed_pbn = drm_int2fixp(mst_enc->pbn);
+		fixed_pbn_per_slot = drm_int2fixp(radeon_connector->mst_port->mst_mgr.pbn_div);
+		avg_time_slots_per_mtp = drm_fixp_div(fixed_pbn, fixed_pbn_per_slot);
+		radeon_dp_mst_set_vcp_size(radeon_encoder, avg_time_slots_per_mtp);
 
 		atombios_dig_encoder_setup2(&primary->base, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0,
 					    mst_enc->fe);
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 979f3bf..1e9304d 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -291,6 +291,8 @@
 	if (r) {
 		return r;
 	}
+	rdev->ddev->vblank_disable_allowed = true;
+
 	/* enable msi */
 	rdev->msi_enabled = 0;
 
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 24152df..478d409 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -331,13 +331,15 @@
 									 RADEON_CRTC_DISP_REQ_EN_B));
 			WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
 		}
-		drm_vblank_on(dev, radeon_crtc->crtc_id);
+		if (dev->num_crtcs > radeon_crtc->crtc_id)
+			drm_vblank_on(dev, radeon_crtc->crtc_id);
 		radeon_crtc_load_lut(crtc);
 		break;
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
 	case DRM_MODE_DPMS_OFF:
-		drm_vblank_off(dev, radeon_crtc->crtc_id);
+		if (dev->num_crtcs > radeon_crtc->crtc_id)
+			drm_vblank_off(dev, radeon_crtc->crtc_id);
 		if (radeon_crtc->crtc_id)
 			WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
 		else {
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index dd46c38..2d901bf 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -799,6 +799,10 @@
 	if ((offset + size) <= rdev->mc.visible_vram_size)
 		return 0;
 
+	/* Can't move a pinned BO to visible VRAM */
+	if (rbo->pin_count > 0)
+		return -EINVAL;
+
 	/* hurrah the memory is not visible ! */
 	radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
 	lpfn =	rdev->mc.visible_vram_size >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 6d8c323..90f7394 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -235,6 +235,8 @@
 {
 	struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
 
+	if (radeon_ttm_tt_has_userptr(bo->ttm))
+		return -EPERM;
 	return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
 }
 
@@ -397,9 +399,15 @@
 			struct ttm_mem_reg *new_mem)
 {
 	struct radeon_device *rdev;
+	struct radeon_bo *rbo;
 	struct ttm_mem_reg *old_mem = &bo->mem;
 	int r;
 
+	/* Can't move a pinned BO */
+	rbo = container_of(bo, struct radeon_bo, tbo);
+	if (WARN_ON_ONCE(rbo->pin_count > 0))
+		return -EINVAL;
+
 	rdev = radeon_get_rdev(bo->bdev);
 	if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
 		radeon_move_null(bo, new_mem);
@@ -609,7 +617,7 @@
 			set_page_dirty(page);
 
 		mark_page_accessed(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 
 	sg_free_table(ttm->sg);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index cb75ab7..e6abc09 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2926,9 +2926,12 @@
 	/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
 	{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
 	{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
+	{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 },
 	{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
 	{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
 	{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
+	{ PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
+	{ PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
 	{ 0, 0, 0, 0 },
 };
 
@@ -3008,6 +3011,10 @@
 		}
 		++p;
 	}
+	/* limit mclk on all R7 370 parts for stability */
+	if (rdev->pdev->device == 0x6811 &&
+	    rdev->pdev->revision == 0x81)
+		max_mclk = 120000;
 
 	if (rps->vce_active) {
 		rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 3d3cf2f..d5cfef7 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -271,8 +271,6 @@
 	if (!iores)
 		return -ENXIO;
 
-	platform_set_drvdata(pdev, hdmi);
-
 	encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
 	/*
 	 * If we failed to find the CRTC(s) which this encoder is
@@ -293,7 +291,16 @@
 	drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs,
 			 DRM_MODE_ENCODER_TMDS, NULL);
 
-	return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
+	ret = dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
+
+	/*
+	 * If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
+	 * which would have called the encoder cleanup.  Do it manually.
+	 */
+	if (ret)
+		drm_encoder_cleanup(encoder);
+
+	return ret;
 }
 
 static void dw_hdmi_rockchip_unbind(struct device *dev, struct device *master,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 896da09..f556a8f 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -251,6 +251,27 @@
 	return 0;
 }
 
+static void rockchip_drm_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
+						    struct drm_file *file_priv)
+{
+	struct rockchip_drm_private *priv = crtc->dev->dev_private;
+	int pipe = drm_crtc_index(crtc);
+
+	if (pipe < ROCKCHIP_MAX_CRTC &&
+	    priv->crtc_funcs[pipe] &&
+	    priv->crtc_funcs[pipe]->cancel_pending_vblank)
+		priv->crtc_funcs[pipe]->cancel_pending_vblank(crtc, file_priv);
+}
+
+static void rockchip_drm_preclose(struct drm_device *dev,
+				  struct drm_file *file_priv)
+{
+	struct drm_crtc *crtc;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		rockchip_drm_crtc_cancel_pending_vblank(crtc, file_priv);
+}
+
 void rockchip_drm_lastclose(struct drm_device *dev)
 {
 	struct rockchip_drm_private *priv = dev->dev_private;
@@ -281,6 +302,7 @@
 				  DRIVER_PRIME | DRIVER_ATOMIC,
 	.load			= rockchip_drm_load,
 	.unload			= rockchip_drm_unload,
+	.preclose		= rockchip_drm_preclose,
 	.lastclose		= rockchip_drm_lastclose,
 	.get_vblank_counter	= drm_vblank_no_hw_counter,
 	.enable_vblank		= rockchip_drm_crtc_enable_vblank,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 3529f69..00d17d7 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -40,6 +40,7 @@
 	int (*enable_vblank)(struct drm_crtc *crtc);
 	void (*disable_vblank)(struct drm_crtc *crtc);
 	void (*wait_for_update)(struct drm_crtc *crtc);
+	void (*cancel_pending_vblank)(struct drm_crtc *crtc, struct drm_file *file_priv);
 };
 
 struct rockchip_atomic_commit {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index fd37054..a619f12 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -499,10 +499,25 @@
 static void vop_crtc_disable(struct drm_crtc *crtc)
 {
 	struct vop *vop = to_vop(crtc);
+	int i;
 
 	if (!vop->is_enabled)
 		return;
 
+	/*
+	 * We need to make sure that all windows are disabled before we
+	 * disable that crtc. Otherwise we might try to scan from a destroyed
+	 * buffer later.
+	 */
+	for (i = 0; i < vop->data->win_size; i++) {
+		struct vop_win *vop_win = &vop->win[i];
+		const struct vop_win_data *win = vop_win->data;
+
+		spin_lock(&vop->reg_lock);
+		VOP_WIN_SET(vop, win, enable, 0);
+		spin_unlock(&vop->reg_lock);
+	}
+
 	drm_crtc_vblank_off(crtc);
 
 	/*
@@ -549,6 +564,7 @@
 			   struct drm_plane_state *state)
 {
 	struct drm_crtc *crtc = state->crtc;
+	struct drm_crtc_state *crtc_state;
 	struct drm_framebuffer *fb = state->fb;
 	struct vop_win *vop_win = to_vop_win(plane);
 	struct vop_plane_state *vop_plane_state = to_vop_plane_state(state);
@@ -563,12 +579,13 @@
 	int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
 					DRM_PLANE_HELPER_NO_SCALING;
 
-	crtc = crtc ? crtc : plane->state->crtc;
-	/*
-	 * Both crtc or plane->state->crtc can be null.
-	 */
 	if (!crtc || !fb)
 		goto out_disable;
+
+	crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+	if (WARN_ON(!crtc_state))
+		return -EINVAL;
+
 	src->x1 = state->src_x;
 	src->y1 = state->src_y;
 	src->x2 = state->src_x + state->src_w;
@@ -580,8 +597,8 @@
 
 	clip.x1 = 0;
 	clip.y1 = 0;
-	clip.x2 = crtc->mode.hdisplay;
-	clip.y2 = crtc->mode.vdisplay;
+	clip.x2 = crtc_state->adjusted_mode.hdisplay;
+	clip.y2 = crtc_state->adjusted_mode.vdisplay;
 
 	ret = drm_plane_helper_check_update(plane, crtc, state->fb,
 					    src, dest, &clip,
@@ -873,10 +890,30 @@
 	WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100));
 }
 
+static void vop_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
+					   struct drm_file *file_priv)
+{
+	struct drm_device *drm = crtc->dev;
+	struct vop *vop = to_vop(crtc);
+	struct drm_pending_vblank_event *e;
+	unsigned long flags;
+
+	spin_lock_irqsave(&drm->event_lock, flags);
+	e = vop->event;
+	if (e && e->base.file_priv == file_priv) {
+		vop->event = NULL;
+
+		e->base.destroy(&e->base);
+		file_priv->event_space += sizeof(e->event);
+	}
+	spin_unlock_irqrestore(&drm->event_lock, flags);
+}
+
 static const struct rockchip_crtc_funcs private_crtc_funcs = {
 	.enable_vblank = vop_crtc_enable_vblank,
 	.disable_vblank = vop_crtc_disable_vblank,
 	.wait_for_update = vop_crtc_wait_for_update,
+	.cancel_pending_vblank = vop_crtc_cancel_pending_vblank,
 };
 
 static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -885,9 +922,6 @@
 {
 	struct vop *vop = to_vop(crtc);
 
-	if (adjusted_mode->htotal == 0 || adjusted_mode->vtotal == 0)
-		return false;
-
 	adjusted_mode->clock =
 		clk_round_rate(vop->dclk, mode->clock * 1000) / 1000;
 
@@ -1108,7 +1142,7 @@
 	const struct vop_data *vop_data = vop->data;
 	struct device *dev = vop->dev;
 	struct drm_device *drm_dev = vop->drm_dev;
-	struct drm_plane *primary = NULL, *cursor = NULL, *plane;
+	struct drm_plane *primary = NULL, *cursor = NULL, *plane, *tmp;
 	struct drm_crtc *crtc = &vop->crtc;
 	struct device_node *port;
 	int ret;
@@ -1148,7 +1182,7 @@
 	ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
 					&vop_crtc_funcs, NULL);
 	if (ret)
-		return ret;
+		goto err_cleanup_planes;
 
 	drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
 
@@ -1181,6 +1215,7 @@
 	if (!port) {
 		DRM_ERROR("no port node found in %s\n",
 			  dev->of_node->full_name);
+		ret = -ENOENT;
 		goto err_cleanup_crtc;
 	}
 
@@ -1194,7 +1229,8 @@
 err_cleanup_crtc:
 	drm_crtc_cleanup(crtc);
 err_cleanup_planes:
-	list_for_each_entry(plane, &drm_dev->mode_config.plane_list, head)
+	list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
+				 head)
 		drm_plane_cleanup(plane);
 	return ret;
 }
@@ -1202,9 +1238,28 @@
 static void vop_destroy_crtc(struct vop *vop)
 {
 	struct drm_crtc *crtc = &vop->crtc;
+	struct drm_device *drm_dev = vop->drm_dev;
+	struct drm_plane *plane, *tmp;
 
 	rockchip_unregister_crtc_funcs(crtc);
 	of_node_put(crtc->port);
+
+	/*
+	 * We need to cleanup the planes now.  Why?
+	 *
+	 * The planes are "&vop->win[i].base".  That means the memory is
+	 * all part of the big "struct vop" chunk of memory.  That memory
+	 * was devm allocated and associated with this component.  We need to
+	 * free it ourselves before vop_unbind() finishes.
+	 */
+	list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
+				 head)
+		vop_plane_destroy(plane);
+
+	/*
+	 * Destroy CRTC after vop_plane_destroy() since vop_disable_plane()
+	 * references the CRTC.
+	 */
 	drm_crtc_cleanup(crtc);
 }
 
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 8a10f5b..f52d6cb2 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -121,7 +121,7 @@
 	spinlock_t lock;
 
 	struct drm_crtc base;
-	int powergate;
+	unsigned int powergate;
 	int pipe;
 
 	struct clk *clk;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 4cbf265..e3daafa 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -230,22 +230,13 @@
 
 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
 {
-	struct ttm_bo_device *bdev = bo->bdev;
-	struct ttm_mem_type_manager *man;
+	int put_count = 0;
 
 	lockdep_assert_held(&bo->resv->lock.base);
 
-	if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
-		list_del_init(&bo->swap);
-		list_del_init(&bo->lru);
-
-	} else {
-		if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
-			list_move_tail(&bo->swap, &bo->glob->swap_lru);
-
-		man = &bdev->man[bo->mem.mem_type];
-		list_move_tail(&bo->lru, &man->lru);
-	}
+	put_count = ttm_bo_del_from_lru(bo);
+	ttm_bo_list_ref_sub(bo, put_count, true);
+	ttm_bo_add_to_lru(bo);
 }
 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
 
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 4e19d0f..077ae9b 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -311,7 +311,7 @@
 			goto out_err;
 
 		copy_highpage(to_page, from_page);
-		page_cache_release(from_page);
+		put_page(from_page);
 	}
 
 	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
@@ -361,7 +361,7 @@
 		copy_highpage(to_page, from_page);
 		set_page_dirty(to_page);
 		mark_page_accessed(to_page);
-		page_cache_release(to_page);
+		put_page(to_page);
 	}
 
 	ttm_tt_unpopulate(ttm);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 33239a2..fd1eb9d 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -536,7 +536,7 @@
 out_destroy_fbi:
 	drm_fb_helper_release_fbi(helper);
 out_gfree:
-	drm_gem_object_unreference(&ufbdev->ufb.obj->base);
+	drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
 out:
 	return ret;
 }
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 2a0a784..d7528e0 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -52,7 +52,7 @@
 		return ret;
 	}
 
-	drm_gem_object_unreference(&obj->base);
+	drm_gem_object_unreference_unlocked(&obj->base);
 	*handle_p = handle;
 	return 0;
 }
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index b7d2ff0..109b106 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -153,6 +153,24 @@
 	}
 }
 
+static void vc4_kick_out_firmware_fb(void)
+{
+	struct apertures_struct *ap;
+
+	ap = alloc_apertures(1);
+	if (!ap)
+		return;
+
+	/* Since VC4 is a UMA device, the simplefb node may have been
+	 * located anywhere in memory.
+	 */
+	ap->ranges[0].base = 0;
+	ap->ranges[0].size = ~0;
+
+	remove_conflicting_framebuffers(ap, "vc4drmfb", false);
+	kfree(ap);
+}
+
 static int vc4_drm_bind(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
@@ -186,6 +204,8 @@
 	if (ret)
 		goto gem_destroy;
 
+	vc4_kick_out_firmware_fb();
+
 	ret = drm_dev_register(drm, 0);
 	if (ret < 0)
 		goto unbind_all;
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index e797dfc..7e2a12c 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -188,7 +188,7 @@
 			if (NULL != (page = vsg->pages[i])) {
 				if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
 					SetPageDirty(page);
-				page_cache_release(page);
+				put_page(page);
 			}
 		}
 	case dr_via_pages_alloc:
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 4854dac..5fd1fd0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -267,11 +267,23 @@
 	return 0;
 }
 
+static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
+					 struct drm_crtc_state *old_state)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&crtc->dev->event_lock, flags);
+	if (crtc->state->event)
+		drm_crtc_send_vblank_event(crtc, crtc->state->event);
+	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+}
+
 static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
 	.enable        = virtio_gpu_crtc_enable,
 	.disable       = virtio_gpu_crtc_disable,
 	.mode_set_nofb = virtio_gpu_crtc_mode_set_nofb,
 	.atomic_check  = virtio_gpu_crtc_atomic_check,
+	.atomic_flush  = virtio_gpu_crtc_atomic_flush,
 };
 
 static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 723ba16..1a1a87c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3293,19 +3293,19 @@
 		    &vmw_cmd_dx_cid_check, true, false, true),
 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
 		    true, false, true),
-	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok,
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
 		    true, false, true),
 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
 		    true, false, true),
 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
-		    &vmw_cmd_ok, true, false, true),
-	VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
 		    true, false, true),
-	VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok,
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
 		    true, false, true),
 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
 		    true, false, true),
-	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid,
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
 		    true, false, true),
 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
 		    true, false, true),
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 3b1faf7..679a4cb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -573,9 +573,9 @@
 		mode = old_mode;
 		old_mode = NULL;
 	} else if (!vmw_kms_validate_mode_vram(vmw_priv,
-					       mode->hdisplay *
-					       (var->bits_per_pixel + 7) / 8,
-					       mode->vdisplay)) {
+					mode->hdisplay *
+					DIV_ROUND_UP(var->bits_per_pixel, 8),
+					mode->vdisplay)) {
 		drm_mode_destroy(vmw_priv->dev, mode);
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index e00db3f..abb98c7 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1068,7 +1068,6 @@
 			goto err_register;
 		}
 
-		pdev->dev.of_node = of_node;
 		pdev->dev.parent = dev;
 
 		ret = platform_device_add_data(pdev, &reg->pdata,
@@ -1079,6 +1078,12 @@
 			platform_device_put(pdev);
 			goto err_register;
 		}
+
+		/*
+		 * Set of_node only after calling platform_device_add. Otherwise
+		 * the platform:imx-ipuv3-crtc modalias won't be used.
+		 */
+		pdev->dev.of_node = of_node;
 	}
 
 	return 0;
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index 883a314..6494a4d 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -395,60 +395,48 @@
 EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved);
 
 void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
-				   u32 pixel_format, int stride,
-				   int u_offset, int v_offset)
+				   unsigned int uv_stride,
+				   unsigned int u_offset, unsigned int v_offset)
 {
-	switch (pixel_format) {
-	case V4L2_PIX_FMT_YUV420:
-	case V4L2_PIX_FMT_YUV422P:
-		ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1);
-		ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
-		ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
-		break;
-	case V4L2_PIX_FMT_YVU420:
-		ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1);
-		ipu_ch_param_write_field(ch, IPU_FIELD_UBO, v_offset / 8);
-		ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8);
-		break;
-	case V4L2_PIX_FMT_NV12:
-	case V4L2_PIX_FMT_NV16:
-		ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, stride - 1);
-		ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
-		ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8);
-		break;
-	}
+	ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, uv_stride - 1);
+	ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
+	ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
 }
 EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full);
 
 void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
 			      u32 pixel_format, int stride, int height)
 {
-	int u_offset, v_offset;
+	int fourcc, u_offset, v_offset;
 	int uv_stride = 0;
 
-	switch (pixel_format) {
-	case V4L2_PIX_FMT_YUV420:
-	case V4L2_PIX_FMT_YVU420:
+	fourcc = v4l2_pix_fmt_to_drm_fourcc(pixel_format);
+	switch (fourcc) {
+	case DRM_FORMAT_YUV420:
 		uv_stride = stride / 2;
 		u_offset = stride * height;
 		v_offset = u_offset + (uv_stride * height / 2);
-		ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
-					      u_offset, v_offset);
 		break;
-	case V4L2_PIX_FMT_YUV422P:
+	case DRM_FORMAT_YVU420:
+		uv_stride = stride / 2;
+		v_offset = stride * height;
+		u_offset = v_offset + (uv_stride * height / 2);
+		break;
+	case DRM_FORMAT_YUV422:
 		uv_stride = stride / 2;
 		u_offset = stride * height;
 		v_offset = u_offset + (uv_stride * height);
-		ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
-					      u_offset, v_offset);
 		break;
-	case V4L2_PIX_FMT_NV12:
-	case V4L2_PIX_FMT_NV16:
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV16:
+		uv_stride = stride;
 		u_offset = stride * height;
-		ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
-					      u_offset, 0);
+		v_offset = 0;
 		break;
+	default:
+		return;
 	}
+	ipu_cpmem_set_yuv_planar_full(ch, uv_stride, u_offset, v_offset);
 }
 EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar);
 
@@ -684,6 +672,15 @@
 
 	switch (pix->pixelformat) {
 	case V4L2_PIX_FMT_YUV420:
+		offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
+		u_offset = U_OFFSET(pix, image->rect.left,
+				    image->rect.top) - offset;
+		v_offset = V_OFFSET(pix, image->rect.left,
+				    image->rect.top) - offset;
+
+		ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
+					      u_offset, v_offset);
+		break;
 	case V4L2_PIX_FMT_YVU420:
 		offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
 		u_offset = U_OFFSET(pix, image->rect.left,
@@ -691,9 +688,8 @@
 		v_offset = V_OFFSET(pix, image->rect.left,
 				    image->rect.top) - offset;
 
-		ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
-					      pix->bytesperline,
-					      u_offset, v_offset);
+		ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
+					      v_offset, u_offset);
 		break;
 	case V4L2_PIX_FMT_YUV422P:
 		offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
@@ -702,8 +698,7 @@
 		v_offset = V2_OFFSET(pix, image->rect.left,
 				     image->rect.top) - offset;
 
-		ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
-					      pix->bytesperline,
+		ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
 					      u_offset, v_offset);
 		break;
 	case V4L2_PIX_FMT_NV12:
@@ -712,8 +707,7 @@
 				     image->rect.top) - offset;
 		v_offset = 0;
 
-		ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
-					      pix->bytesperline,
+		ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
 					      u_offset, v_offset);
 		break;
 	case V4L2_PIX_FMT_NV16:
@@ -722,8 +716,7 @@
 				      image->rect.top) - offset;
 		v_offset = 0;
 
-		ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
-					      pix->bytesperline,
+		ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
 					      u_offset, v_offset);
 		break;
 	case V4L2_PIX_FMT_UYVY:
diff --git a/drivers/gpu/ipu-v3/ipu-dmfc.c b/drivers/gpu/ipu-v3/ipu-dmfc.c
index 042c395..837b1ec2 100644
--- a/drivers/gpu/ipu-v3/ipu-dmfc.c
+++ b/drivers/gpu/ipu-v3/ipu-dmfc.c
@@ -350,11 +350,13 @@
 }
 EXPORT_SYMBOL_GPL(ipu_dmfc_alloc_bandwidth);
 
-int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width)
+void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width)
 {
 	struct ipu_dmfc_priv *priv = dmfc->priv;
 	u32 dmfc_gen1;
 
+	mutex_lock(&priv->mutex);
+
 	dmfc_gen1 = readl(priv->base + DMFC_GENERAL1);
 
 	if ((dmfc->slots * 64 * 4) / width > dmfc->data->max_fifo_lines)
@@ -364,9 +366,9 @@
 
 	writel(dmfc_gen1, priv->base + DMFC_GENERAL1);
 
-	return 0;
+	mutex_unlock(&priv->mutex);
 }
-EXPORT_SYMBOL_GPL(ipu_dmfc_init_channel);
+EXPORT_SYMBOL_GPL(ipu_dmfc_config_wait4eot);
 
 struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipu_channel)
 {
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 4117225..5646ca4 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -134,6 +134,16 @@
 
 	Say Y here if you want support for Apple infrared remote control.
 
+config HID_ASUS
+	tristate "Asus"
+	depends on I2C_HID
+	---help---
+	Support for Asus notebook built-in keyboard via i2c.
+
+	Supported devices:
+	- EeeBook X205TA
+	- VivoBook E200HA
+
 config HID_AUREAL
 	tristate "Aureal"
 	depends on HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index be56ab6..a2fb562 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -24,6 +24,7 @@
 obj-$(CONFIG_HID_ACRUX)		+= hid-axff.o
 obj-$(CONFIG_HID_APPLE)		+= hid-apple.o
 obj-$(CONFIG_HID_APPLEIR)	+= hid-appleir.o
+obj-$(CONFIG_HID_ASUS)		+= hid-asus.o
 obj-$(CONFIG_HID_AUREAL)	+= hid-aureal.o
 obj-$(CONFIG_HID_BELKIN)	+= hid-belkin.o
 obj-$(CONFIG_HID_BETOP_FF)	+= hid-betopff.o
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
new file mode 100644
index 0000000..7a811ec
--- /dev/null
+++ b/drivers/hid/hid-asus.c
@@ -0,0 +1,52 @@
+/*
+ *  HID driver for Asus notebook built-in keyboard.
+ *  Fixes small logical maximum to match usage maximum.
+ *
+ *  Currently supported devices are:
+ *    EeeBook X205TA
+ *    VivoBook E200HA
+ *
+ *  Copyright (c) 2016 Yusuke Fujimaki <usk.fujimaki@gmail.com>
+ *
+ *  This module based on hid-ortek by
+ *  Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com>
+ *  Copyright (c) 2011 Jiri Kosina
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+		unsigned int *rsize)
+{
+	if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x65) {
+		hid_info(hdev, "Fixing up Asus notebook report descriptor\n");
+		rdesc[55] = 0xdd;
+	}
+	return rdesc;
+}
+
+static const struct hid_device_id asus_devices[] = {
+	{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD) },
+	{ }
+};
+MODULE_DEVICE_TABLE(hid, asus_devices);
+
+static struct hid_driver asus_driver = {
+	.name = "asus",
+	.id_table = asus_devices,
+	.report_fixup = asus_report_fixup
+};
+module_hid_driver(asus_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index bdb8cc8..8ea3a26 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1129,48 +1129,45 @@
 static void __implement(u8 *report, unsigned offset, int n, u32 value)
 {
 	unsigned int idx = offset / 8;
-	unsigned int size = offset + n;
 	unsigned int bit_shift = offset % 8;
 	int bits_to_set = 8 - bit_shift;
-	u8 bit_mask = 0xff << bit_shift;
 
 	while (n - bits_to_set >= 0) {
-		report[idx] &= ~bit_mask;
+		report[idx] &= ~(0xff << bit_shift);
 		report[idx] |= value << bit_shift;
 		value >>= bits_to_set;
 		n -= bits_to_set;
 		bits_to_set = 8;
-		bit_mask = 0xff;
 		bit_shift = 0;
 		idx++;
 	}
 
 	/* last nibble */
 	if (n) {
-		if (size % 8)
-			bit_mask &= (1U << (size % 8)) - 1;
-		report[idx] &= ~bit_mask;
-		report[idx] |= (value << bit_shift) & bit_mask;
+		u8 bit_mask = ((1U << n) - 1);
+		report[idx] &= ~(bit_mask << bit_shift);
+		report[idx] |= value << bit_shift;
 	}
 }
 
 static void implement(const struct hid_device *hid, u8 *report,
 		      unsigned offset, unsigned n, u32 value)
 {
-	u64 m;
-
-	if (n > 32) {
+	if (unlikely(n > 32)) {
 		hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n",
 			 __func__, n, current->comm);
 		n = 32;
-	}
+	} else if (n < 32) {
+		u32 m = (1U << n) - 1;
 
-	m = (1ULL << n) - 1;
-	if (value > m)
-		hid_warn(hid, "%s() called with too large value %d! (%s)\n",
-			 __func__, value, current->comm);
-	WARN_ON(value > m);
-	value &= m;
+		if (unlikely(value > m)) {
+			hid_warn(hid,
+				 "%s() called with too large value %d (n: %d)! (%s)\n",
+				 __func__, value, n, current->comm);
+			WARN_ON(1);
+			value &= m;
+		}
+	}
 
 	__implement(report, offset, n, value);
 }
@@ -1856,6 +1853,7 @@
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+	{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
@@ -1979,6 +1977,9 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5c0e43e..3eec09a1 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -163,6 +163,7 @@
 #define USB_VENDOR_ID_ASUSTEK		0x0b05
 #define USB_DEVICE_ID_ASUSTEK_LCM	0x1726
 #define USB_DEVICE_ID_ASUSTEK_LCM2	0x175b
+#define USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD	0x8585
 
 #define USB_VENDOR_ID_ATEN		0x0557
 #define USB_DEVICE_ID_ATEN_UC100KM	0x2004
@@ -258,7 +259,15 @@
 #define USB_VENDOR_ID_CORSAIR		0x1b1c
 #define USB_DEVICE_ID_CORSAIR_K90	0x1b02
 
+#define USB_VENDOR_ID_CORSAIR           0x1b1c
+#define USB_DEVICE_ID_CORSAIR_K70R      0x1b09
+#define USB_DEVICE_ID_CORSAIR_K95RGB    0x1b11
+#define USB_DEVICE_ID_CORSAIR_M65RGB    0x1b12
+#define USB_DEVICE_ID_CORSAIR_K70RGB    0x1b13
+#define USB_DEVICE_ID_CORSAIR_K65RGB    0x1b17
+
 #define USB_VENDOR_ID_CREATIVELABS	0x041e
+#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51	0x322c
 #define USB_DEVICE_ID_PRODIKEYS_PCMIDI	0x2801
 
 #define USB_VENDOR_ID_CVTOUCH		0x1ff7
@@ -676,6 +685,7 @@
 #define USB_DEVICE_ID_SIDEWINDER_GV	0x003b
 #define USB_DEVICE_ID_MS_OFFICE_KB	0x0048
 #define USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0 0x009d
+#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K 0x00b4
 #define USB_DEVICE_ID_MS_NE4K		0x00db
 #define USB_DEVICE_ID_MS_NE4K_JP	0x00dc
 #define USB_DEVICE_ID_MS_LK6K		0x00f9
@@ -683,6 +693,8 @@
 #define USB_DEVICE_ID_MS_PRESENTER_8K_USB	0x0713
 #define USB_DEVICE_ID_MS_NE7K		0x071d
 #define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K	0x0730
+#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1 0x0732
+#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_600  0x0750
 #define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500	0x076c
 #define USB_DEVICE_ID_MS_COMFORT_KEYBOARD 0x00e3
 #define USB_DEVICE_ID_MS_SURFACE_PRO_2   0x0799
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index 0125e35..1ac4ff4 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -184,21 +184,31 @@
 			unsigned char byte2, unsigned char byte3)
 {
 	int ret;
-	unsigned char buf[] = {0x18, byte2, byte3};
+	unsigned char *buf;
+
+	buf = kzalloc(3, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	buf[0] = 0x18;
+	buf[1] = byte2;
+	buf[2] = byte3;
 
 	switch (hdev->product) {
 	case USB_DEVICE_ID_LENOVO_CUSBKBD:
-		ret = hid_hw_raw_request(hdev, 0x13, buf, sizeof(buf),
+		ret = hid_hw_raw_request(hdev, 0x13, buf, 3,
 					HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
 		break;
 	case USB_DEVICE_ID_LENOVO_CBTKBD:
-		ret = hid_hw_output_report(hdev, buf, sizeof(buf));
+		ret = hid_hw_output_report(hdev, buf, 3);
 		break;
 	default:
 		ret = -EINVAL;
 		break;
 	}
 
+	kfree(buf);
+
 	return ret < 0 ? ret : 0; /* BT returns 0, USB returns sizeof(buf) */
 }
 
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 75cd3bc..e924d55 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -272,6 +272,12 @@
 		.driver_data = MS_PRESENTER },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K),
 		.driver_data = MS_ERGONOMY | MS_RDESC_3K },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K),
+		.driver_data = MS_ERGONOMY },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600),
+		.driver_data = MS_ERGONOMY },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1),
+		.driver_data = MS_ERGONOMY },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0),
 		.driver_data = MS_NOGET },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 25d3c43..c741f5e 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1169,6 +1169,7 @@
 							   MT_TOOL_FINGER,
 							   false);
 			}
+			input_mt_sync_frame(input_dev);
 			input_sync(input_dev);
 		}
 	}
diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c
index 65c4ccfc..76d06cf 100644
--- a/drivers/hid/hid-roccat.c
+++ b/drivers/hid/hid-roccat.c
@@ -421,14 +421,13 @@
 
 	retval = alloc_chrdev_region(&dev_id, ROCCAT_FIRST_MINOR,
 			ROCCAT_MAX_DEVICES, "roccat");
-
-	roccat_major = MAJOR(dev_id);
-
 	if (retval < 0) {
 		pr_warn("can't get major number\n");
 		goto error;
 	}
 
+	roccat_major = MAJOR(dev_id);
+
 	cdev_init(&roccat_cdev, &roccat_ops);
 	retval = cdev_add(&roccat_cdev, dev_id, ROCCAT_MAX_DEVICES);
 
diff --git a/drivers/hid/hid-thingm.c b/drivers/hid/hid-thingm.c
index 847a497..9ad9c6e 100644
--- a/drivers/hid/hid-thingm.c
+++ b/drivers/hid/hid-thingm.c
@@ -148,13 +148,21 @@
 			  enum led_brightness brightness)
 {
 	struct thingm_led *led = container_of(ldev, struct thingm_led, ldev);
-	int ret;
 
-	ret = thingm_write_color(led->rgb);
-	if (ret)
-		hid_err(led->rgb->tdev->hdev, "failed to write color\n");
+	return thingm_write_color(led->rgb);
+}
 
-	return ret;
+static int thingm_init_led(struct thingm_led *led, const char *color_name,
+			   struct thingm_rgb *rgb, int minor)
+{
+	snprintf(led->name, sizeof(led->name), "thingm%d:%s:led%d",
+		 minor, color_name, rgb->num);
+	led->ldev.name = led->name;
+	led->ldev.max_brightness = 255;
+	led->ldev.brightness_set_blocking = thingm_led_set;
+	led->ldev.flags = LED_HW_PLUGGABLE;
+	led->rgb = rgb;
+	return devm_led_classdev_register(&rgb->tdev->hdev->dev, &led->ldev);
 }
 
 static int thingm_init_rgb(struct thingm_rgb *rgb)
@@ -163,42 +171,17 @@
 	int err;
 
 	/* Register the red diode */
-	snprintf(rgb->red.name, sizeof(rgb->red.name),
-			"thingm%d:red:led%d", minor, rgb->num);
-	rgb->red.ldev.name = rgb->red.name;
-	rgb->red.ldev.max_brightness = 255;
-	rgb->red.ldev.brightness_set_blocking = thingm_led_set;
-	rgb->red.rgb = rgb;
-
-	err = devm_led_classdev_register(&rgb->tdev->hdev->dev,
-					 &rgb->red.ldev);
+	err = thingm_init_led(&rgb->red, "red", rgb, minor);
 	if (err)
 		return err;
 
 	/* Register the green diode */
-	snprintf(rgb->green.name, sizeof(rgb->green.name),
-			"thingm%d:green:led%d", minor, rgb->num);
-	rgb->green.ldev.name = rgb->green.name;
-	rgb->green.ldev.max_brightness = 255;
-	rgb->green.ldev.brightness_set_blocking = thingm_led_set;
-	rgb->green.rgb = rgb;
-
-	err = devm_led_classdev_register(&rgb->tdev->hdev->dev,
-					 &rgb->green.ldev);
+	err = thingm_init_led(&rgb->green, "green", rgb, minor);
 	if (err)
 		return err;
 
 	/* Register the blue diode */
-	snprintf(rgb->blue.name, sizeof(rgb->blue.name),
-			"thingm%d:blue:led%d", minor, rgb->num);
-	rgb->blue.ldev.name = rgb->blue.name;
-	rgb->blue.ldev.max_brightness = 255;
-	rgb->blue.ldev.brightness_set_blocking = thingm_led_set;
-	rgb->blue.rgb = rgb;
-
-	err = devm_led_classdev_register(&rgb->tdev->hdev->dev,
-					 &rgb->blue.ldev);
-	return err;
+	return thingm_init_led(&rgb->blue, "blue", rgb, minor);
 }
 
 static int thingm_probe(struct hid_device *hdev, const struct hid_device_id *id)
diff --git a/drivers/hid/hid-wiimote-modules.c b/drivers/hid/hid-wiimote-modules.c
index 4390eee..c830ed3 100644
--- a/drivers/hid/hid-wiimote-modules.c
+++ b/drivers/hid/hid-wiimote-modules.c
@@ -2049,9 +2049,11 @@
 	 *   -----+------------------------------+-----+-----+
 	 * The single bits Yaw, Roll, Pitch in the lower right corner specify
 	 * whether the wiimote is rotating fast (0) or slow (1). Speed for slow
-	 * roation is 440 deg/s and for fast rotation 2000 deg/s. To get a
-	 * linear scale we multiply by 2000/440 = ~4.5454 which is 18 for fast
-	 * and 9 for slow.
+	 * roation is 8192/440 units / deg/s and for fast rotation 8192/2000
+	 * units / deg/s. To get a linear scale for fast rotation we multiply
+	 * by 2000/440 = ~4.5454 and scale both fast and slow by 9 to match the
+	 * previous scale reported by this driver.
+	 * This leaves a linear scale with 8192*9/440 (~167.564) units / deg/s.
 	 * If the wiimote is not rotating the sensor reports 2^13 = 8192.
 	 * Ext specifies whether an extension is connected to the motionp.
 	 * which is parsed by wiimote-core.
@@ -2070,15 +2072,15 @@
 	z -= 8192;
 
 	if (!(ext[3] & 0x02))
-		x *= 18;
+		x = (x * 2000 * 9) / 440;
 	else
 		x *= 9;
 	if (!(ext[4] & 0x02))
-		y *= 18;
+		y = (y * 2000 * 9) / 440;
 	else
 		y *= 9;
 	if (!(ext[3] & 0x01))
-		z *= 18;
+		z = (z * 2000 * 9) / 440;
 	else
 		z *= 9;
 
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 9c2d7c2..f0e2757 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -34,6 +34,7 @@
 #include <linux/hid.h>
 #include <linux/mutex.h>
 #include <linux/sched.h>
+#include <linux/string.h>
 
 #include <linux/hidraw.h>
 
@@ -123,7 +124,6 @@
 
 	dev = hidraw_table[minor]->hid;
 
-
 	if (count > HID_MAX_BUFFER_SIZE) {
 		hid_warn(dev, "pid %d passed too large report\n",
 			 task_pid_nr(current));
@@ -138,17 +138,12 @@
 		goto out;
 	}
 
-	buf = kmalloc(count * sizeof(__u8), GFP_KERNEL);
-	if (!buf) {
-		ret = -ENOMEM;
+	buf = memdup_user(buffer, count);
+	if (IS_ERR(buf)) {
+		ret = PTR_ERR(buf);
 		goto out;
 	}
 
-	if (copy_from_user(buf, buffer, count)) {
-		ret = -EFAULT;
-		goto out_free;
-	}
-
 	if ((report_type == HID_OUTPUT_REPORT) &&
 	    !(dev->quirks & HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP)) {
 		ret = hid_hw_output_report(dev, buf, count);
@@ -587,14 +582,13 @@
 
 	result = alloc_chrdev_region(&dev_id, HIDRAW_FIRST_MINOR,
 			HIDRAW_MAX_DEVICES, "hidraw");
-
-	hidraw_major = MAJOR(dev_id);
-
 	if (result < 0) {
 		pr_warn("can't get major number\n");
 		goto out;
 	}
 
+	hidraw_major = MAJOR(dev_id);
+
 	hidraw_class = class_create(THIS_MODULE, "hidraw");
 	if (IS_ERR(hidraw_class)) {
 		result = PTR_ERR(hidraw_class);
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index ad71160..ae83af6 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -951,14 +951,6 @@
 	return ret;
 }
 
-static void usbhid_restart_queues(struct usbhid_device *usbhid)
-{
-	if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
-		usbhid_restart_out_queue(usbhid);
-	if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
-		usbhid_restart_ctrl_queue(usbhid);
-}
-
 static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
 {
 	struct usbhid_device *usbhid = hid->driver_data;
@@ -1404,6 +1396,37 @@
 	usb_kill_urb(usbhid->urbout);
 }
 
+static void hid_restart_io(struct hid_device *hid)
+{
+	struct usbhid_device *usbhid = hid->driver_data;
+	int clear_halt = test_bit(HID_CLEAR_HALT, &usbhid->iofl);
+	int reset_pending = test_bit(HID_RESET_PENDING, &usbhid->iofl);
+
+	spin_lock_irq(&usbhid->lock);
+	clear_bit(HID_SUSPENDED, &usbhid->iofl);
+	usbhid_mark_busy(usbhid);
+
+	if (clear_halt || reset_pending)
+		schedule_work(&usbhid->reset_work);
+	usbhid->retry_delay = 0;
+	spin_unlock_irq(&usbhid->lock);
+
+	if (reset_pending || !test_bit(HID_STARTED, &usbhid->iofl))
+		return;
+
+	if (!clear_halt) {
+		if (hid_start_in(hid) < 0)
+			hid_io_error(hid);
+	}
+
+	spin_lock_irq(&usbhid->lock);
+	if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
+		usbhid_restart_out_queue(usbhid);
+	if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
+		usbhid_restart_ctrl_queue(usbhid);
+	spin_unlock_irq(&usbhid->lock);
+}
+
 /* Treat USB reset pretty much the same as suspend/resume */
 static int hid_pre_reset(struct usb_interface *intf)
 {
@@ -1453,14 +1476,14 @@
 		return 1;
 	}
 
+	/* No need to do another reset or clear a halted endpoint */
 	spin_lock_irq(&usbhid->lock);
 	clear_bit(HID_RESET_PENDING, &usbhid->iofl);
+	clear_bit(HID_CLEAR_HALT, &usbhid->iofl);
 	spin_unlock_irq(&usbhid->lock);
 	hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0);
-	status = hid_start_in(hid);
-	if (status < 0)
-		hid_io_error(hid);
-	usbhid_restart_queues(usbhid);
+
+	hid_restart_io(hid);
 
 	return 0;
 }
@@ -1483,25 +1506,9 @@
 #ifdef CONFIG_PM
 static int hid_resume_common(struct hid_device *hid, bool driver_suspended)
 {
-	struct usbhid_device *usbhid = hid->driver_data;
-	int status;
+	int status = 0;
 
-	spin_lock_irq(&usbhid->lock);
-	clear_bit(HID_SUSPENDED, &usbhid->iofl);
-	usbhid_mark_busy(usbhid);
-
-	if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) ||
-			test_bit(HID_RESET_PENDING, &usbhid->iofl))
-		schedule_work(&usbhid->reset_work);
-	usbhid->retry_delay = 0;
-
-	usbhid_restart_queues(usbhid);
-	spin_unlock_irq(&usbhid->lock);
-
-	status = hid_start_in(hid);
-	if (status < 0)
-		hid_io_error(hid);
-
+	hid_restart_io(hid);
 	if (driver_suspended && hid->driver && hid->driver->resume)
 		status = hid->driver->resume(hid);
 	return status;
@@ -1570,12 +1577,8 @@
 static int hid_resume(struct usb_interface *intf)
 {
 	struct hid_device *hid = usb_get_intfdata (intf);
-	struct usbhid_device *usbhid = hid->driver_data;
 	int status;
 
-	if (!test_bit(HID_STARTED, &usbhid->iofl))
-		return 0;
-
 	status = hid_resume_common(hid, true);
 	dev_dbg(&intf->dev, "resume status %d\n", status);
 	return 0;
@@ -1584,10 +1587,8 @@
 static int hid_reset_resume(struct usb_interface *intf)
 {
 	struct hid_device *hid = usb_get_intfdata(intf);
-	struct usbhid_device *usbhid = hid->driver_data;
 	int status;
 
-	clear_bit(HID_SUSPENDED, &usbhid->iofl);
 	status = hid_post_reset(intf);
 	if (status >= 0 && hid->driver && hid->driver->reset_resume) {
 		int ret = hid->driver->reset_resume(hid);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index ed2f68e..b4b8c6a 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -71,6 +71,12 @@
 	{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
+	{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70R, HID_QUIRK_NO_INIT_REPORTS },
+	{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_M65RGB, HID_QUIRK_NO_INIT_REPORTS },
+	{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K95RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+	{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB, HID_QUIRK_NO_INIT_REPORTS },
+	{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS },
+	{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 68a5609..499cc82 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -152,6 +152,25 @@
 		hid_data->inputmode = field->report->id;
 		hid_data->inputmode_index = usage->usage_index;
 		break;
+
+	case HID_UP_DIGITIZER:
+		if (field->report->id == 0x0B &&
+		    (field->application == WACOM_G9_DIGITIZER ||
+		     field->application == WACOM_G11_DIGITIZER)) {
+			wacom->wacom_wac.mode_report = field->report->id;
+			wacom->wacom_wac.mode_value = 0;
+		}
+		break;
+
+	case WACOM_G9_PAGE:
+	case WACOM_G11_PAGE:
+		if (field->report->id == 0x03 &&
+		    (field->application == WACOM_G9_TOUCHSCREEN ||
+		     field->application == WACOM_G11_TOUCHSCREEN)) {
+			wacom->wacom_wac.mode_report = field->report->id;
+			wacom->wacom_wac.mode_value = 0;
+		}
+		break;
 	}
 }
 
@@ -322,26 +341,41 @@
 	return 0;
 }
 
-static int wacom_set_device_mode(struct hid_device *hdev, int report_id,
-		int length, int mode)
+static int wacom_set_device_mode(struct hid_device *hdev,
+				 struct wacom_wac *wacom_wac)
 {
-	unsigned char *rep_data;
+	u8 *rep_data;
+	struct hid_report *r;
+	struct hid_report_enum *re;
+	int length;
 	int error = -ENOMEM, limit = 0;
 
-	rep_data = kzalloc(length, GFP_KERNEL);
+	if (wacom_wac->mode_report < 0)
+		return 0;
+
+	re = &(hdev->report_enum[HID_FEATURE_REPORT]);
+	r = re->report_id_hash[wacom_wac->mode_report];
+	if (!r)
+		return -EINVAL;
+
+	rep_data = hid_alloc_report_buf(r, GFP_KERNEL);
 	if (!rep_data)
-		return error;
+		return -ENOMEM;
+
+	length = hid_report_len(r);
 
 	do {
-		rep_data[0] = report_id;
-		rep_data[1] = mode;
+		rep_data[0] = wacom_wac->mode_report;
+		rep_data[1] = wacom_wac->mode_value;
 
 		error = wacom_set_report(hdev, HID_FEATURE_REPORT, rep_data,
 					 length, 1);
 		if (error >= 0)
 			error = wacom_get_report(hdev, HID_FEATURE_REPORT,
 			                         rep_data, length, 1);
-	} while (error >= 0 && rep_data[1] != mode && limit++ < WAC_MSG_RETRIES);
+	} while (error >= 0 &&
+		 rep_data[1] != wacom_wac->mode_report &&
+		 limit++ < WAC_MSG_RETRIES);
 
 	kfree(rep_data);
 
@@ -411,32 +445,41 @@
 static int wacom_query_tablet_data(struct hid_device *hdev,
 		struct wacom_features *features)
 {
+	struct wacom *wacom = hid_get_drvdata(hdev);
+	struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+
 	if (hdev->bus == BUS_BLUETOOTH)
 		return wacom_bt_query_tablet_data(hdev, 1, features);
 
-	if (features->type == HID_GENERIC)
-		return wacom_hid_set_device_mode(hdev);
-
-	if (features->device_type & WACOM_DEVICETYPE_TOUCH) {
-		if (features->type > TABLETPC) {
-			/* MT Tablet PC touch */
-			return wacom_set_device_mode(hdev, 3, 4, 4);
-		}
-		else if (features->type == WACOM_24HDT) {
-			return wacom_set_device_mode(hdev, 18, 3, 2);
-		}
-		else if (features->type == WACOM_27QHDT) {
-			return wacom_set_device_mode(hdev, 131, 3, 2);
-		}
-		else if (features->type == BAMBOO_PAD) {
-			return wacom_set_device_mode(hdev, 2, 2, 2);
-		}
-	} else if (features->device_type & WACOM_DEVICETYPE_PEN) {
-		if (features->type <= BAMBOO_PT) {
-			return wacom_set_device_mode(hdev, 2, 2, 2);
+	if (features->type != HID_GENERIC) {
+		if (features->device_type & WACOM_DEVICETYPE_TOUCH) {
+			if (features->type > TABLETPC) {
+				/* MT Tablet PC touch */
+				wacom_wac->mode_report = 3;
+				wacom_wac->mode_value = 4;
+			} else if (features->type == WACOM_24HDT) {
+				wacom_wac->mode_report = 18;
+				wacom_wac->mode_value = 2;
+			} else if (features->type == WACOM_27QHDT) {
+				wacom_wac->mode_report = 131;
+				wacom_wac->mode_value = 2;
+			} else if (features->type == BAMBOO_PAD) {
+				wacom_wac->mode_report = 2;
+				wacom_wac->mode_value = 2;
+			}
+		} else if (features->device_type & WACOM_DEVICETYPE_PEN) {
+			if (features->type <= BAMBOO_PT) {
+				wacom_wac->mode_report = 2;
+				wacom_wac->mode_value = 2;
+			}
 		}
 	}
 
+	wacom_set_device_mode(hdev, wacom_wac);
+
+	if (features->type == HID_GENERIC)
+		return wacom_hid_set_device_mode(hdev);
+
 	return 0;
 }
 
@@ -450,7 +493,8 @@
 	features->x_fuzz = 4;
 	features->y_fuzz = 4;
 	features->pressure_fuzz = 0;
-	features->distance_fuzz = 0;
+	features->distance_fuzz = 1;
+	features->tilt_fuzz = 1;
 
 	/*
 	 * The wireless device HID is basic and layout conflicts with
@@ -1817,6 +1861,9 @@
 		goto fail_type;
 	}
 
+	wacom_wac->hid_data.inputmode = -1;
+	wacom_wac->mode_report = -1;
+
 	wacom->usbdev = dev;
 	wacom->intf = intf;
 	mutex_init(&wacom->lock);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index bd198bb..1eae13c 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -684,6 +684,7 @@
 
 		wacom->tool[idx] = wacom_intuos_get_tool_type(wacom->id[idx]);
 
+		wacom->shared->stylus_in_proximity = true;
 		return 1;
 	}
 
@@ -2343,12 +2344,13 @@
 	__set_bit(BTN_STYLUS2, input_dev->keybit);
 
 	input_set_abs_params(input_dev, ABS_DISTANCE,
-			     0, wacom_wac->features.distance_max, 0, 0);
+			     0, wacom_wac->features.distance_max, wacom_wac->features.distance_fuzz, 0);
 }
 
 static void wacom_setup_cintiq(struct wacom_wac *wacom_wac)
 {
 	struct input_dev *input_dev = wacom_wac->pen_input;
+	struct wacom_features *features = &wacom_wac->features;
 
 	wacom_setup_basic_pro_pen(wacom_wac);
 
@@ -2358,9 +2360,9 @@
 	__set_bit(BTN_TOOL_AIRBRUSH, input_dev->keybit);
 
 	input_set_abs_params(input_dev, ABS_WHEEL, 0, 1023, 0, 0);
-	input_set_abs_params(input_dev, ABS_TILT_X, -64, 63, 0, 0);
+	input_set_abs_params(input_dev, ABS_TILT_X, -64, 63, features->tilt_fuzz, 0);
 	input_abs_set_res(input_dev, ABS_TILT_X, 57);
-	input_set_abs_params(input_dev, ABS_TILT_Y, -64, 63, 0, 0);
+	input_set_abs_params(input_dev, ABS_TILT_Y, -64, 63, features->tilt_fuzz, 0);
 	input_abs_set_res(input_dev, ABS_TILT_Y, 57);
 }
 
@@ -2426,6 +2428,17 @@
 	}
 
 	/*
+	 * Hack for the Bamboo One:
+	 * the device presents a PAD/Touch interface as most Bamboos and even
+	 * sends ghosts PAD data on it. However, later, we must disable this
+	 * ghost interface, and we can not detect it unless we set it here
+	 * to WACOM_DEVICETYPE_PAD or WACOM_DEVICETYPE_TOUCH.
+	 */
+	if (features->type == BAMBOO_PEN &&
+	    features->pktlen == WACOM_PKGLEN_BBTOUCH3)
+		features->device_type |= WACOM_DEVICETYPE_PAD;
+
+	/*
 	 * Raw Wacom-mode pen and touch events both come from interface
 	 * 0, whose HID descriptor has an application usage of 0xFF0D
 	 * (i.e., WACOM_VENDORDEFINED_PEN). We route pen packets back
@@ -2495,7 +2508,7 @@
 	case WACOM_G4:
 		input_set_abs_params(input_dev, ABS_DISTANCE, 0,
 					      features->distance_max,
-					      0, 0);
+					      features->distance_fuzz, 0);
 		/* fall through */
 
 	case GRAPHIRE:
@@ -2557,7 +2570,7 @@
 
 		input_set_abs_params(input_dev, ABS_DISTANCE, 0,
 				      features->distance_max,
-				      0, 0);
+				      features->distance_fuzz, 0);
 
 		input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
 		input_abs_set_res(input_dev, ABS_Z, 287);
@@ -2616,7 +2629,7 @@
 			__set_bit(BTN_STYLUS2, input_dev->keybit);
 			input_set_abs_params(input_dev, ABS_DISTANCE, 0,
 				      features->distance_max,
-				      0, 0);
+				      features->distance_fuzz, 0);
 		}
 		break;
 	case BAMBOO_PAD:
@@ -3384,6 +3397,10 @@
 	{ "Wacom Intuos PT M 2", 21600, 13500, 2047, 63,
 	  INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16,
 	  .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
+static const struct wacom_features wacom_features_0x343 =
+	{ "Wacom DTK1651", 34616, 19559, 1023, 0,
+	  DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+	  WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
 
 static const struct wacom_features wacom_features_HID_ANY_ID =
 	{ "Wacom HID", .type = HID_GENERIC };
@@ -3549,6 +3566,7 @@
 	{ USB_DEVICE_WACOM(0x33C) },
 	{ USB_DEVICE_WACOM(0x33D) },
 	{ USB_DEVICE_WACOM(0x33E) },
+	{ USB_DEVICE_WACOM(0x343) },
 	{ USB_DEVICE_WACOM(0x4001) },
 	{ USB_DEVICE_WACOM(0x4004) },
 	{ USB_DEVICE_WACOM(0x5000) },
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 25baa7f..53d1653 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -84,6 +84,12 @@
 #define WACOM_DEVICETYPE_WL_MONITOR     0x0008
 
 #define WACOM_VENDORDEFINED_PEN		0xff0d0001
+#define WACOM_G9_PAGE			0xff090000
+#define WACOM_G9_DIGITIZER		(WACOM_G9_PAGE | 0x02)
+#define WACOM_G9_TOUCHSCREEN		(WACOM_G9_PAGE | 0x11)
+#define WACOM_G11_PAGE			0xff110000
+#define WACOM_G11_DIGITIZER		(WACOM_G11_PAGE | 0x02)
+#define WACOM_G11_TOUCHSCREEN		(WACOM_G11_PAGE | 0x11)
 
 #define WACOM_PEN_FIELD(f)	(((f)->logical == HID_DG_STYLUS) || \
 				 ((f)->physical == HID_DG_STYLUS) || \
@@ -171,6 +177,7 @@
 	int y_fuzz;
 	int pressure_fuzz;
 	int distance_fuzz;
+	int tilt_fuzz;
 	unsigned quirks;
 	unsigned touch_max;
 	int oVid;
@@ -238,6 +245,8 @@
 	int ps_connected;
 	u8 bt_features;
 	u8 bt_high_speed;
+	int mode_report;
+	int mode_value;
 	struct hid_data hid_data;
 };
 
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 5613e2b..a40a73a 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -103,15 +103,29 @@
  *    there is room for the producer to send the pending packet.
  */
 
-static bool hv_need_to_signal_on_read(u32 prev_write_sz,
-				      struct hv_ring_buffer_info *rbi)
+static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
 {
 	u32 cur_write_sz;
 	u32 r_size;
-	u32 write_loc = rbi->ring_buffer->write_index;
+	u32 write_loc;
 	u32 read_loc = rbi->ring_buffer->read_index;
-	u32 pending_sz = rbi->ring_buffer->pending_send_sz;
+	u32 pending_sz;
 
+	/*
+	 * Issue a full memory barrier before making the signaling decision.
+	 * Here is the reason for having this barrier:
+	 * If the reading of the pend_sz (in this function)
+	 * were to be reordered and read before we commit the new read
+	 * index (in the calling function)  we could
+	 * have a problem. If the host were to set the pending_sz after we
+	 * have sampled pending_sz and go to sleep before we commit the
+	 * read index, we could miss sending the interrupt. Issue a full
+	 * memory barrier to address this.
+	 */
+	mb();
+
+	pending_sz = rbi->ring_buffer->pending_send_sz;
+	write_loc = rbi->ring_buffer->write_index;
 	/* If the other end is not blocked on write don't bother. */
 	if (pending_sz == 0)
 		return false;
@@ -120,7 +134,7 @@
 	cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
 			read_loc - write_loc;
 
-	if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
+	if (cur_write_sz >= pending_sz)
 		return true;
 
 	return false;
@@ -455,7 +469,7 @@
 	/* Update the read index */
 	hv_set_next_read_location(inring_info, next_read_location);
 
-	*signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
+	*signal = hv_need_to_signal_on_read(inring_info);
 
 	return ret;
 }
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 5c2d13a..ff94007 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -288,7 +288,7 @@
 
 config SENSORS_FAM15H_POWER
 	tristate "AMD Family 15h processor power"
-	depends on X86 && PCI
+	depends on X86 && PCI && CPU_SUP_AMD
 	help
 	  If you say yes here you get support for processor power
 	  information of your AMD family 15h CPU.
@@ -621,7 +621,8 @@
 	  If you say yes here you get support for ITE IT8705F, IT8712F, IT8716F,
 	  IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8732F, IT8758E,
 	  IT8771E, IT8772E, IT8781F, IT8782F, IT8783E/F, IT8786E, IT8790E,
-	  IT8603E, IT8620E, and IT8623E sensor chips, and the SiS950 clone.
+	  IT8603E, IT8620E, IT8623E, and IT8628E sensor chips, and the SiS950
+	  clone.
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called it87.
@@ -821,6 +822,16 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called max197.
 
+config SENSORS_MAX31722
+tristate "MAX31722 temperature sensor"
+	depends on SPI
+	help
+	  Support for the Maxim Integrated MAX31722/MAX31723 digital
+	  thermometers/thermostats operating over an SPI interface.
+
+	  This driver can also be built as a module. If so, the module
+	  will be called max31722.
+
 config SENSORS_MAX6639
 	tristate "Maxim MAX6639 sensor chip"
 	depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 58cc3ac..2ef5b7c 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -112,6 +112,7 @@
 obj-$(CONFIG_SENSORS_MAX1619)	+= max1619.o
 obj-$(CONFIG_SENSORS_MAX1668)	+= max1668.o
 obj-$(CONFIG_SENSORS_MAX197)	+= max197.o
+obj-$(CONFIG_SENSORS_MAX31722)	+= max31722.o
 obj-$(CONFIG_SENSORS_MAX6639)	+= max6639.o
 obj-$(CONFIG_SENSORS_MAX6642)	+= max6642.o
 obj-$(CONFIG_SENSORS_MAX6650)	+= max6650.o
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index 6c99ee7..ee396ff 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -120,6 +120,7 @@
 	unsigned int vref_mv = ADS7828_INT_VREF_MV;
 	bool diff_input = false;
 	bool ext_vref = false;
+	unsigned int regval;
 
 	data = devm_kzalloc(dev, sizeof(struct ads7828_data), GFP_KERNEL);
 	if (!data)
@@ -154,6 +155,15 @@
 	if (!diff_input)
 		data->cmd_byte |= ADS7828_CMD_SD_SE;
 
+	/*
+	 * Datasheet specifies internal reference voltage is disabled by
+	 * default. The internal reference voltage needs to be enabled and
+	 * voltage needs to settle before getting valid ADC data. So perform a
+	 * dummy read to enable the internal reference voltage.
+	 */
+	if (!ext_vref)
+		regmap_read(data->regmap, data->cmd_byte, &regval);
+
 	hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
 							   data,
 							   ads7828_groups);
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index 4f695d8..eb97a92 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -1,7 +1,7 @@
 /*
  * fam15h_power.c - AMD Family 15h processor power monitoring
  *
- * Copyright (c) 2011 Advanced Micro Devices, Inc.
+ * Copyright (c) 2011-2016 Advanced Micro Devices, Inc.
  * Author: Andreas Herrmann <herrmann.der.user@googlemail.com>
  *
  *
@@ -25,6 +25,10 @@
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/bitops.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/time.h>
+#include <linux/sched.h>
 #include <asm/processor.h>
 #include <asm/msr.h>
 
@@ -44,8 +48,14 @@
 
 #define FAM15H_MIN_NUM_ATTRS		2
 #define FAM15H_NUM_GROUPS		2
+#define MAX_CUS				8
 
+/* set maximum interval as 1 second */
+#define MAX_INTERVAL			1000
+
+#define MSR_F15H_CU_PWR_ACCUMULATOR	0xc001007a
 #define MSR_F15H_CU_MAX_PWR_ACCUMULATOR	0xc001007b
+#define MSR_F15H_PTSC			0xc0010280
 
 #define PCI_DEVICE_ID_AMD_15H_M70H_NB_F4 0x15b4
 
@@ -59,8 +69,20 @@
 	struct attribute_group group;
 	/* maximum accumulated power of a compute unit */
 	u64 max_cu_acc_power;
+	/* accumulated power of the compute units */
+	u64 cu_acc_power[MAX_CUS];
+	/* performance timestamp counter */
+	u64 cpu_sw_pwr_ptsc[MAX_CUS];
+	/* online/offline status of current compute unit */
+	int cu_on[MAX_CUS];
+	unsigned long power_period;
 };
 
+static bool is_carrizo_or_later(void)
+{
+	return boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model >= 0x60;
+}
+
 static ssize_t show_power(struct device *dev,
 			  struct device_attribute *attr, char *buf)
 {
@@ -77,7 +99,7 @@
 	 * On Carrizo and later platforms, TdpRunAvgAccCap bit field
 	 * is extended to 4:31 from 4:25.
 	 */
-	if (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model >= 0x60) {
+	if (is_carrizo_or_later()) {
 		running_avg_capture = val >> 4;
 		running_avg_capture = sign_extend32(running_avg_capture, 27);
 	} else {
@@ -94,7 +116,7 @@
 	 * On Carrizo and later platforms, ApmTdpLimit bit field
 	 * is extended to 16:31 from 16:28.
 	 */
-	if (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model >= 0x60)
+	if (is_carrizo_or_later())
 		tdp_limit = val >> 16;
 	else
 		tdp_limit = (val >> 16) & 0x1fff;
@@ -125,6 +147,167 @@
 }
 static DEVICE_ATTR(power1_crit, S_IRUGO, show_power_crit, NULL);
 
+static void do_read_registers_on_cu(void *_data)
+{
+	struct fam15h_power_data *data = _data;
+	int cpu, cu;
+
+	cpu = smp_processor_id();
+
+	/*
+	 * With the new x86 topology modelling, cpu core id actually
+	 * is compute unit id.
+	 */
+	cu = cpu_data(cpu).cpu_core_id;
+
+	rdmsrl_safe(MSR_F15H_CU_PWR_ACCUMULATOR, &data->cu_acc_power[cu]);
+	rdmsrl_safe(MSR_F15H_PTSC, &data->cpu_sw_pwr_ptsc[cu]);
+
+	data->cu_on[cu] = 1;
+}
+
+/*
+ * This function is only able to be called when CPUID
+ * Fn8000_0007:EDX[12] is set.
+ */
+static int read_registers(struct fam15h_power_data *data)
+{
+	int this_cpu, ret, cpu;
+	int core, this_core;
+	cpumask_var_t mask;
+
+	ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
+	if (!ret)
+		return -ENOMEM;
+
+	memset(data->cu_on, 0, sizeof(int) * MAX_CUS);
+
+	get_online_cpus();
+	this_cpu = smp_processor_id();
+
+	/*
+	 * Choose the first online core of each compute unit, and then
+	 * read their MSR value of power and ptsc in a single IPI,
+	 * because the MSR value of CPU core represent the compute
+	 * unit's.
+	 */
+	core = -1;
+
+	for_each_online_cpu(cpu) {
+		this_core = topology_core_id(cpu);
+
+		if (this_core == core)
+			continue;
+
+		core = this_core;
+
+		/* get any CPU on this compute unit */
+		cpumask_set_cpu(cpumask_any(topology_sibling_cpumask(cpu)), mask);
+	}
+
+	if (cpumask_test_cpu(this_cpu, mask))
+		do_read_registers_on_cu(data);
+
+	smp_call_function_many(mask, do_read_registers_on_cu, data, true);
+	put_online_cpus();
+
+	free_cpumask_var(mask);
+
+	return 0;
+}
+
+static ssize_t acc_show_power(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	struct fam15h_power_data *data = dev_get_drvdata(dev);
+	u64 prev_cu_acc_power[MAX_CUS], prev_ptsc[MAX_CUS],
+	    jdelta[MAX_CUS];
+	u64 tdelta, avg_acc;
+	int cu, cu_num, ret;
+	signed long leftover;
+
+	/*
+	 * With the new x86 topology modelling, x86_max_cores is the
+	 * compute unit number.
+	 */
+	cu_num = boot_cpu_data.x86_max_cores;
+
+	ret = read_registers(data);
+	if (ret)
+		return 0;
+
+	for (cu = 0; cu < cu_num; cu++) {
+		prev_cu_acc_power[cu] = data->cu_acc_power[cu];
+		prev_ptsc[cu] = data->cpu_sw_pwr_ptsc[cu];
+	}
+
+	leftover = schedule_timeout_interruptible(msecs_to_jiffies(data->power_period));
+	if (leftover)
+		return 0;
+
+	ret = read_registers(data);
+	if (ret)
+		return 0;
+
+	for (cu = 0, avg_acc = 0; cu < cu_num; cu++) {
+		/* check if current compute unit is online */
+		if (data->cu_on[cu] == 0)
+			continue;
+
+		if (data->cu_acc_power[cu] < prev_cu_acc_power[cu]) {
+			jdelta[cu] = data->max_cu_acc_power + data->cu_acc_power[cu];
+			jdelta[cu] -= prev_cu_acc_power[cu];
+		} else {
+			jdelta[cu] = data->cu_acc_power[cu] - prev_cu_acc_power[cu];
+		}
+		tdelta = data->cpu_sw_pwr_ptsc[cu] - prev_ptsc[cu];
+		jdelta[cu] *= data->cpu_pwr_sample_ratio * 1000;
+		do_div(jdelta[cu], tdelta);
+
+		/* the unit is microWatt */
+		avg_acc += jdelta[cu];
+	}
+
+	return sprintf(buf, "%llu\n", (unsigned long long)avg_acc);
+}
+static DEVICE_ATTR(power1_average, S_IRUGO, acc_show_power, NULL);
+
+static ssize_t acc_show_power_period(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct fam15h_power_data *data = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%lu\n", data->power_period);
+}
+
+static ssize_t acc_set_power_period(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t count)
+{
+	struct fam15h_power_data *data = dev_get_drvdata(dev);
+	unsigned long temp;
+	int ret;
+
+	ret = kstrtoul(buf, 10, &temp);
+	if (ret)
+		return ret;
+
+	if (temp > MAX_INTERVAL)
+		return -EINVAL;
+
+	/* the interval value should be greater than 0 */
+	if (temp <= 0)
+		return -EINVAL;
+
+	data->power_period = temp;
+
+	return count;
+}
+static DEVICE_ATTR(power1_average_interval, S_IRUGO | S_IWUSR,
+		   acc_show_power_period, acc_set_power_period);
+
 static int fam15h_power_init_attrs(struct pci_dev *pdev,
 				   struct fam15h_power_data *data)
 {
@@ -137,6 +320,10 @@
 	     (c->x86_model >= 0x60 && c->x86_model <= 0x7f)))
 		n += 1;
 
+	/* check if processor supports accumulated power */
+	if (boot_cpu_has(X86_FEATURE_ACC_POWER))
+		n += 2;
+
 	fam15h_power_attrs = devm_kcalloc(&pdev->dev, n,
 					  sizeof(*fam15h_power_attrs),
 					  GFP_KERNEL);
@@ -151,6 +338,11 @@
 	     (c->x86_model >= 0x60 && c->x86_model <= 0x7f)))
 		fam15h_power_attrs[n++] = &dev_attr_power1_input.attr;
 
+	if (boot_cpu_has(X86_FEATURE_ACC_POWER)) {
+		fam15h_power_attrs[n++] = &dev_attr_power1_average.attr;
+		fam15h_power_attrs[n++] = &dev_attr_power1_average_interval.attr;
+	}
+
 	data->group.attrs = fam15h_power_attrs;
 
 	return 0;
@@ -216,7 +408,7 @@
 static int fam15h_power_init_data(struct pci_dev *f4,
 				  struct fam15h_power_data *data)
 {
-	u32 val, eax, ebx, ecx, edx;
+	u32 val;
 	u64 tmp;
 	int ret;
 
@@ -243,10 +435,9 @@
 	if (ret)
 		return ret;
 
-	cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
 
 	/* CPUID Fn8000_0007:EDX[12] indicates to support accumulated power */
-	if (!(edx & BIT(12)))
+	if (!boot_cpu_has(X86_FEATURE_ACC_POWER))
 		return 0;
 
 	/*
@@ -254,7 +445,7 @@
 	 * sample period to the PTSC counter period by executing CPUID
 	 * Fn8000_0007:ECX
 	 */
-	data->cpu_pwr_sample_ratio = ecx;
+	data->cpu_pwr_sample_ratio = cpuid_ecx(0x80000007);
 
 	if (rdmsrl_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &tmp)) {
 		pr_err("Failed to read max compute unit power accumulator MSR\n");
@@ -263,7 +454,15 @@
 
 	data->max_cu_acc_power = tmp;
 
-	return 0;
+	/*
+	 * Milliseconds are a reasonable interval for the measurement.
+	 * But it shouldn't set too long here, because several seconds
+	 * would cause the read function to hang. So set default
+	 * interval as 10 ms.
+	 */
+	data->power_period = 10;
+
+	return read_registers(data);
 }
 
 static int fam15h_power_probe(struct pci_dev *pdev,
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 1896e26..730d840 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -13,6 +13,7 @@
  *  Supports: IT8603E  Super I/O chip w/LPC interface
  *            IT8620E  Super I/O chip w/LPC interface
  *            IT8623E  Super I/O chip w/LPC interface
+ *            IT8628E  Super I/O chip w/LPC interface
  *            IT8705F  Super I/O chip w/LPC interface
  *            IT8712F  Super I/O chip w/LPC interface
  *            IT8716F  Super I/O chip w/LPC interface
@@ -44,14 +45,11 @@
  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/bitops.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -72,17 +70,18 @@
 
 enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728, it8732,
 	     it8771, it8772, it8781, it8782, it8783, it8786, it8790, it8603,
-	     it8620 };
+	     it8620, it8628 };
 
 static unsigned short force_id;
 module_param(force_id, ushort, 0);
 MODULE_PARM_DESC(force_id, "Override the detected device ID");
 
-static struct platform_device *pdev;
+static struct platform_device *it87_pdev[2];
 
-#define	REG	0x2e	/* The register to read/write */
+#define	REG_2E	0x2e	/* The register to read/write */
+#define	REG_4E	0x4e	/* Secondary register to read/write */
+
 #define	DEV	0x07	/* Register: Logical device select */
-#define	VAL	0x2f	/* The value to read/write */
 #define PME	0x04	/* The device with the fan registers in it */
 
 /* The device with the IT8718F/IT8720F VID value in it */
@@ -91,54 +90,55 @@
 #define	DEVID	0x20	/* Register: Device ID */
 #define	DEVREV	0x22	/* Register: Device Revision */
 
-static inline int superio_inb(int reg)
+static inline int superio_inb(int ioreg, int reg)
 {
-	outb(reg, REG);
-	return inb(VAL);
+	outb(reg, ioreg);
+	return inb(ioreg + 1);
 }
 
-static inline void superio_outb(int reg, int val)
+static inline void superio_outb(int ioreg, int reg, int val)
 {
-	outb(reg, REG);
-	outb(val, VAL);
+	outb(reg, ioreg);
+	outb(val, ioreg + 1);
 }
 
-static int superio_inw(int reg)
+static int superio_inw(int ioreg, int reg)
 {
 	int val;
-	outb(reg++, REG);
-	val = inb(VAL) << 8;
-	outb(reg, REG);
-	val |= inb(VAL);
+
+	outb(reg++, ioreg);
+	val = inb(ioreg + 1) << 8;
+	outb(reg, ioreg);
+	val |= inb(ioreg + 1);
 	return val;
 }
 
-static inline void superio_select(int ldn)
+static inline void superio_select(int ioreg, int ldn)
 {
-	outb(DEV, REG);
-	outb(ldn, VAL);
+	outb(DEV, ioreg);
+	outb(ldn, ioreg + 1);
 }
 
-static inline int superio_enter(void)
+static inline int superio_enter(int ioreg)
 {
 	/*
-	 * Try to reserve REG and REG + 1 for exclusive access.
+	 * Try to reserve ioreg and ioreg + 1 for exclusive access.
 	 */
-	if (!request_muxed_region(REG, 2, DRVNAME))
+	if (!request_muxed_region(ioreg, 2, DRVNAME))
 		return -EBUSY;
 
-	outb(0x87, REG);
-	outb(0x01, REG);
-	outb(0x55, REG);
-	outb(0x55, REG);
+	outb(0x87, ioreg);
+	outb(0x01, ioreg);
+	outb(0x55, ioreg);
+	outb(ioreg == REG_4E ? 0xaa : 0x55, ioreg);
 	return 0;
 }
 
-static inline void superio_exit(void)
+static inline void superio_exit(int ioreg)
 {
-	outb(0x02, REG);
-	outb(0x02, VAL);
-	release_region(REG, 2);
+	outb(0x02, ioreg);
+	outb(0x02, ioreg + 1);
+	release_region(ioreg, 2);
 }
 
 /* Logical device 4 registers */
@@ -161,6 +161,7 @@
 #define IT8603E_DEVID 0x8603
 #define IT8620E_DEVID 0x8620
 #define IT8623E_DEVID 0x8623
+#define IT8628E_DEVID 0x8628
 #define IT87_ACT_REG  0x30
 #define IT87_BASE_REG 0x60
 
@@ -168,6 +169,7 @@
 #define IT87_SIO_GPIO1_REG	0x25
 #define IT87_SIO_GPIO2_REG	0x26
 #define IT87_SIO_GPIO3_REG	0x27
+#define IT87_SIO_GPIO4_REG	0x28
 #define IT87_SIO_GPIO5_REG	0x29
 #define IT87_SIO_PINX1_REG	0x2a	/* Pin selection */
 #define IT87_SIO_PINX2_REG	0x2c	/* Pin selection */
@@ -217,7 +219,12 @@
 #define IT87_REG_FAN_DIV       0x0b
 #define IT87_REG_FAN_16BIT     0x0c
 
-/* Monitors: 9 voltage (0 to 7, battery), 3 temp (1 to 3), 3 fan (1 to 3) */
+/*
+ * Monitors:
+ * - up to 13 voltage (0 to 7, battery, avcc, 10 to 12)
+ * - up to 6 temp (1 to 6)
+ * - up to 6 fan (1 to 6)
+ */
 
 static const u8 IT87_REG_FAN[]         = { 0x0d, 0x0e, 0x0f, 0x80, 0x82, 0x4c };
 static const u8 IT87_REG_FAN_MIN[]     = { 0x10, 0x11, 0x12, 0x84, 0x86, 0x4e };
@@ -227,10 +234,12 @@
 
 #define IT87_REG_FAN_MAIN_CTRL 0x13
 #define IT87_REG_FAN_CTL       0x14
-#define IT87_REG_PWM(nr)       (0x15 + (nr))
-#define IT87_REG_PWM_DUTY(nr)  (0x63 + (nr) * 8)
+static const u8 IT87_REG_PWM[]         = { 0x15, 0x16, 0x17, 0x7f, 0xa7, 0xaf };
+static const u8 IT87_REG_PWM_DUTY[]    = { 0x63, 0x6b, 0x73, 0x7b, 0xa3, 0xab };
 
-#define IT87_REG_VIN(nr)       (0x20 + (nr))
+static const u8 IT87_REG_VIN[]	= { 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26,
+				    0x27, 0x28, 0x2f, 0x2c, 0x2d, 0x2e };
+
 #define IT87_REG_TEMP(nr)      (0x29 + (nr))
 
 #define IT87_REG_VIN_MAX(nr)   (0x30 + (nr) * 2)
@@ -245,30 +254,48 @@
 
 #define IT87_REG_CHIPID        0x58
 
-#define IT87_REG_AUTO_TEMP(nr, i) (0x60 + (nr) * 8 + (i))
-#define IT87_REG_AUTO_PWM(nr, i)  (0x65 + (nr) * 8 + (i))
+static const u8 IT87_REG_AUTO_BASE[] = { 0x60, 0x68, 0x70, 0x78, 0xa0, 0xa8 };
+
+#define IT87_REG_AUTO_TEMP(nr, i) (IT87_REG_AUTO_BASE[nr] + (i))
+#define IT87_REG_AUTO_PWM(nr, i)  (IT87_REG_AUTO_BASE[nr] + 5 + (i))
+
+#define IT87_REG_TEMP456_ENABLE	0x77
+
+#define NUM_VIN			ARRAY_SIZE(IT87_REG_VIN)
+#define NUM_VIN_LIMIT		8
+#define NUM_TEMP		6
+#define NUM_TEMP_OFFSET		ARRAY_SIZE(IT87_REG_TEMP_OFFSET)
+#define NUM_TEMP_LIMIT		3
+#define NUM_FAN			ARRAY_SIZE(IT87_REG_FAN)
+#define NUM_FAN_DIV		3
+#define NUM_PWM			ARRAY_SIZE(IT87_REG_PWM)
+#define NUM_AUTO_PWM		ARRAY_SIZE(IT87_REG_PWM)
 
 struct it87_devices {
 	const char *name;
 	const char * const suffix;
-	u16 features;
+	u32 features;
 	u8 peci_mask;
 	u8 old_peci_mask;
 };
 
-#define FEAT_12MV_ADC		(1 << 0)
-#define FEAT_NEWER_AUTOPWM	(1 << 1)
-#define FEAT_OLD_AUTOPWM	(1 << 2)
-#define FEAT_16BIT_FANS		(1 << 3)
-#define FEAT_TEMP_OFFSET	(1 << 4)
-#define FEAT_TEMP_PECI		(1 << 5)
-#define FEAT_TEMP_OLD_PECI	(1 << 6)
-#define FEAT_FAN16_CONFIG	(1 << 7)	/* Need to enable 16-bit fans */
-#define FEAT_FIVE_FANS		(1 << 8)	/* Supports five fans */
-#define FEAT_VID		(1 << 9)	/* Set if chip supports VID */
-#define FEAT_IN7_INTERNAL	(1 << 10)	/* Set if in7 is internal */
-#define FEAT_SIX_FANS		(1 << 11)	/* Supports six fans */
-#define FEAT_10_9MV_ADC		(1 << 12)
+#define FEAT_12MV_ADC		BIT(0)
+#define FEAT_NEWER_AUTOPWM	BIT(1)
+#define FEAT_OLD_AUTOPWM	BIT(2)
+#define FEAT_16BIT_FANS		BIT(3)
+#define FEAT_TEMP_OFFSET	BIT(4)
+#define FEAT_TEMP_PECI		BIT(5)
+#define FEAT_TEMP_OLD_PECI	BIT(6)
+#define FEAT_FAN16_CONFIG	BIT(7)	/* Need to enable 16-bit fans */
+#define FEAT_FIVE_FANS		BIT(8)	/* Supports five fans */
+#define FEAT_VID		BIT(9)	/* Set if chip supports VID */
+#define FEAT_IN7_INTERNAL	BIT(10)	/* Set if in7 is internal */
+#define FEAT_SIX_FANS		BIT(11)	/* Supports six fans */
+#define FEAT_10_9MV_ADC		BIT(12)
+#define FEAT_AVCC3		BIT(13)	/* Chip supports in9/AVCC3 */
+#define FEAT_SIX_PWM		BIT(14)	/* Chip supports 6 pwm chn */
+#define FEAT_PWM_FREQ2		BIT(15)	/* Separate pwm freq 2 */
+#define FEAT_SIX_TEMP		BIT(16)	/* Up to 6 temp sensors */
 
 static const struct it87_devices it87_devices[] = {
 	[it87] = {
@@ -286,20 +313,22 @@
 		.name = "it8716",
 		.suffix = "F",
 		.features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET | FEAT_VID
-		  | FEAT_FAN16_CONFIG | FEAT_FIVE_FANS,
+		  | FEAT_FAN16_CONFIG | FEAT_FIVE_FANS | FEAT_PWM_FREQ2,
 	},
 	[it8718] = {
 		.name = "it8718",
 		.suffix = "F",
 		.features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET | FEAT_VID
-		  | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG | FEAT_FIVE_FANS,
+		  | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG | FEAT_FIVE_FANS
+		  | FEAT_PWM_FREQ2,
 		.old_peci_mask = 0x4,
 	},
 	[it8720] = {
 		.name = "it8720",
 		.suffix = "F",
 		.features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET | FEAT_VID
-		  | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG | FEAT_FIVE_FANS,
+		  | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG | FEAT_FIVE_FANS
+		  | FEAT_PWM_FREQ2,
 		.old_peci_mask = 0x4,
 	},
 	[it8721] = {
@@ -307,7 +336,8 @@
 		.suffix = "F",
 		.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
 		  | FEAT_TEMP_OFFSET | FEAT_TEMP_OLD_PECI | FEAT_TEMP_PECI
-		  | FEAT_FAN16_CONFIG | FEAT_FIVE_FANS | FEAT_IN7_INTERNAL,
+		  | FEAT_FAN16_CONFIG | FEAT_FIVE_FANS | FEAT_IN7_INTERNAL
+		  | FEAT_PWM_FREQ2,
 		.peci_mask = 0x05,
 		.old_peci_mask = 0x02,	/* Actually reports PCH */
 	},
@@ -316,7 +346,7 @@
 		.suffix = "F",
 		.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
 		  | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_FIVE_FANS
-		  | FEAT_IN7_INTERNAL,
+		  | FEAT_IN7_INTERNAL | FEAT_PWM_FREQ2,
 		.peci_mask = 0x07,
 	},
 	[it8732] = {
@@ -332,7 +362,8 @@
 		.name = "it8771",
 		.suffix = "E",
 		.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
-		  | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL,
+		  | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL
+		  | FEAT_PWM_FREQ2,
 				/* PECI: guesswork */
 				/* 12mV ADC (OHM) */
 				/* 16 bit fans (OHM) */
@@ -343,7 +374,8 @@
 		.name = "it8772",
 		.suffix = "E",
 		.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
-		  | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL,
+		  | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL
+		  | FEAT_PWM_FREQ2,
 				/* PECI (coreboot) */
 				/* 12mV ADC (HWSensors4, OHM) */
 				/* 16 bit fans (HWSensors4, OHM) */
@@ -354,42 +386,45 @@
 		.name = "it8781",
 		.suffix = "F",
 		.features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
-		  | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG,
+		  | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG | FEAT_PWM_FREQ2,
 		.old_peci_mask = 0x4,
 	},
 	[it8782] = {
 		.name = "it8782",
 		.suffix = "F",
 		.features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
-		  | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG,
+		  | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG | FEAT_PWM_FREQ2,
 		.old_peci_mask = 0x4,
 	},
 	[it8783] = {
 		.name = "it8783",
 		.suffix = "E/F",
 		.features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
-		  | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG,
+		  | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG | FEAT_PWM_FREQ2,
 		.old_peci_mask = 0x4,
 	},
 	[it8786] = {
 		.name = "it8786",
 		.suffix = "E",
 		.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
-		  | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL,
+		  | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL
+		  | FEAT_PWM_FREQ2,
 		.peci_mask = 0x07,
 	},
 	[it8790] = {
 		.name = "it8790",
 		.suffix = "E",
 		.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
-		  | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL,
+		  | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL
+		  | FEAT_PWM_FREQ2,
 		.peci_mask = 0x07,
 	},
 	[it8603] = {
 		.name = "it8603",
 		.suffix = "E",
 		.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
-		  | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL,
+		  | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL
+		  | FEAT_AVCC3 | FEAT_PWM_FREQ2,
 		.peci_mask = 0x07,
 	},
 	[it8620] = {
@@ -397,7 +432,17 @@
 		.suffix = "E",
 		.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
 		  | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_SIX_FANS
-		  | FEAT_IN7_INTERNAL,
+		  | FEAT_IN7_INTERNAL | FEAT_SIX_PWM | FEAT_PWM_FREQ2
+		  | FEAT_SIX_TEMP,
+		.peci_mask = 0x07,
+	},
+	[it8628] = {
+		.name = "it8628",
+		.suffix = "E",
+		.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
+		  | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_SIX_FANS
+		  | FEAT_IN7_INTERNAL | FEAT_SIX_PWM | FEAT_PWM_FREQ2
+		  | FEAT_SIX_TEMP,
 		.peci_mask = 0x07,
 	},
 };
@@ -409,16 +454,20 @@
 #define has_old_autopwm(data)	((data)->features & FEAT_OLD_AUTOPWM)
 #define has_temp_offset(data)	((data)->features & FEAT_TEMP_OFFSET)
 #define has_temp_peci(data, nr)	(((data)->features & FEAT_TEMP_PECI) && \
-				 ((data)->peci_mask & (1 << nr)))
+				 ((data)->peci_mask & BIT(nr)))
 #define has_temp_old_peci(data, nr) \
 				(((data)->features & FEAT_TEMP_OLD_PECI) && \
-				 ((data)->old_peci_mask & (1 << nr)))
+				 ((data)->old_peci_mask & BIT(nr)))
 #define has_fan16_config(data)	((data)->features & FEAT_FAN16_CONFIG)
 #define has_five_fans(data)	((data)->features & (FEAT_FIVE_FANS | \
 						     FEAT_SIX_FANS))
 #define has_vid(data)		((data)->features & FEAT_VID)
 #define has_in7_internal(data)	((data)->features & FEAT_IN7_INTERNAL)
 #define has_six_fans(data)	((data)->features & FEAT_SIX_FANS)
+#define has_avcc3(data)		((data)->features & FEAT_AVCC3)
+#define has_six_pwm(data)	((data)->features & FEAT_SIX_PWM)
+#define has_pwm_freq2(data)	((data)->features & FEAT_PWM_FREQ2)
+#define has_six_temp(data)	((data)->features & FEAT_SIX_TEMP)
 
 struct it87_sio_data {
 	enum chips type;
@@ -440,7 +489,7 @@
  * The structure is dynamically allocated.
  */
 struct it87_data {
-	struct device *hwmon_dev;
+	const struct attribute_group *groups[7];
 	enum chips type;
 	u16 features;
 	u8 peci_mask;
@@ -453,17 +502,21 @@
 	unsigned long last_updated;	/* In jiffies */
 
 	u16 in_scaled;		/* Internal voltage sensors are scaled */
-	u8 in[10][3];		/* [nr][0]=in, [1]=min, [2]=max */
+	u16 in_internal;	/* Bitfield, internal sensors (for labels) */
+	u16 has_in;		/* Bitfield, voltage sensors enabled */
+	u8 in[NUM_VIN][3];		/* [nr][0]=in, [1]=min, [2]=max */
 	u8 has_fan;		/* Bitfield, fans enabled */
-	u16 fan[6][2];		/* Register values, [nr][0]=fan, [1]=min */
+	u16 fan[NUM_FAN][2];	/* Register values, [nr][0]=fan, [1]=min */
 	u8 has_temp;		/* Bitfield, temp sensors enabled */
-	s8 temp[3][4];		/* [nr][0]=temp, [1]=min, [2]=max, [3]=offset */
+	s8 temp[NUM_TEMP][4];	/* [nr][0]=temp, [1]=min, [2]=max, [3]=offset */
 	u8 sensor;		/* Register value (IT87_REG_TEMP_ENABLE) */
 	u8 extra;		/* Register value (IT87_REG_TEMP_EXTRA) */
-	u8 fan_div[3];		/* Register encoding, shifted right */
+	u8 fan_div[NUM_FAN_DIV];/* Register encoding, shifted right */
+	bool has_vid;		/* True if VID supported */
 	u8 vid;			/* Register encoding, combined */
 	u8 vrm;
 	u32 alarms;		/* Register encoding, combined */
+	bool has_beep;		/* true if beep supported */
 	u8 beeps;		/* Register encoding */
 	u8 fan_main_ctrl;	/* Register value */
 	u8 fan_ctl;		/* Register value */
@@ -478,13 +531,14 @@
 	 * is no longer needed, but it is still done to keep the driver
 	 * simple.
 	 */
-	u8 pwm_ctrl[3];		/* Register value */
-	u8 pwm_duty[3];		/* Manual PWM value set by user */
-	u8 pwm_temp_map[3];	/* PWM to temp. chan. mapping (bits 1-0) */
+	u8 has_pwm;		/* Bitfield, pwm control enabled */
+	u8 pwm_ctrl[NUM_PWM];	/* Register value */
+	u8 pwm_duty[NUM_PWM];	/* Manual PWM value set by user */
+	u8 pwm_temp_map[NUM_PWM];/* PWM to temp. chan. mapping (bits 1-0) */
 
 	/* Automatic fan speed control registers */
-	u8 auto_pwm[3][4];	/* [nr][3] is hard-coded */
-	s8 auto_temp[3][5];	/* [nr][0] is point1_temp_hyst */
+	u8 auto_pwm[NUM_AUTO_PWM][4];	/* [nr][3] is hard-coded */
+	s8 auto_temp[NUM_AUTO_PWM][5];	/* [nr][0] is point1_temp_hyst */
 };
 
 static int adc_lsb(const struct it87_data *data, int nr)
@@ -497,7 +551,7 @@
 		lsb = 109;
 	else
 		lsb = 160;
-	if (data->in_scaled & (1 << nr))
+	if (data->in_scaled & BIT(nr))
 		lsb <<= 1;
 	return lsb;
 }
@@ -554,15 +608,16 @@
 		return (reg & 0x7f) << 1;
 }
 
-
 static int DIV_TO_REG(int val)
 {
 	int answer = 0;
+
 	while (answer < 7 && (val >>= 1))
 		answer++;
 	return answer;
 }
-#define DIV_FROM_REG(val) (1 << (val))
+
+#define DIV_FROM_REG(val) BIT(val)
 
 /*
  * PWM base frequencies. The frequency has to be divided by either 128 or 256,
@@ -585,32 +640,204 @@
 	750000,
 };
 
-static int it87_probe(struct platform_device *pdev);
-static int it87_remove(struct platform_device *pdev);
+/*
+ * Must be called with data->update_lock held, except during initialization.
+ * We ignore the IT87 BUSY flag at this moment - it could lead to deadlocks,
+ * would slow down the IT87 access and should not be necessary.
+ */
+static int it87_read_value(struct it87_data *data, u8 reg)
+{
+	outb_p(reg, data->addr + IT87_ADDR_REG_OFFSET);
+	return inb_p(data->addr + IT87_DATA_REG_OFFSET);
+}
 
-static int it87_read_value(struct it87_data *data, u8 reg);
-static void it87_write_value(struct it87_data *data, u8 reg, u8 value);
-static struct it87_data *it87_update_device(struct device *dev);
-static int it87_check_pwm(struct device *dev);
-static void it87_init_device(struct platform_device *pdev);
+/*
+ * Must be called with data->update_lock held, except during initialization.
+ * We ignore the IT87 BUSY flag at this moment - it could lead to deadlocks,
+ * would slow down the IT87 access and should not be necessary.
+ */
+static void it87_write_value(struct it87_data *data, u8 reg, u8 value)
+{
+	outb_p(reg, data->addr + IT87_ADDR_REG_OFFSET);
+	outb_p(value, data->addr + IT87_DATA_REG_OFFSET);
+}
 
+static void it87_update_pwm_ctrl(struct it87_data *data, int nr)
+{
+	data->pwm_ctrl[nr] = it87_read_value(data, IT87_REG_PWM[nr]);
+	if (has_newer_autopwm(data)) {
+		data->pwm_temp_map[nr] = data->pwm_ctrl[nr] & 0x03;
+		data->pwm_duty[nr] = it87_read_value(data,
+						     IT87_REG_PWM_DUTY[nr]);
+	} else {
+		if (data->pwm_ctrl[nr] & 0x80)	/* Automatic mode */
+			data->pwm_temp_map[nr] = data->pwm_ctrl[nr] & 0x03;
+		else				/* Manual mode */
+			data->pwm_duty[nr] = data->pwm_ctrl[nr] & 0x7f;
+	}
 
-static struct platform_driver it87_driver = {
-	.driver = {
-		.name	= DRVNAME,
-	},
-	.probe	= it87_probe,
-	.remove	= it87_remove,
-};
+	if (has_old_autopwm(data)) {
+		int i;
+
+		for (i = 0; i < 5 ; i++)
+			data->auto_temp[nr][i] = it87_read_value(data,
+						IT87_REG_AUTO_TEMP(nr, i));
+		for (i = 0; i < 3 ; i++)
+			data->auto_pwm[nr][i] = it87_read_value(data,
+						IT87_REG_AUTO_PWM(nr, i));
+	} else if (has_newer_autopwm(data)) {
+		int i;
+
+		/*
+		 * 0: temperature hysteresis (base + 5)
+		 * 1: fan off temperature (base + 0)
+		 * 2: fan start temperature (base + 1)
+		 * 3: fan max temperature (base + 2)
+		 */
+		data->auto_temp[nr][0] =
+			it87_read_value(data, IT87_REG_AUTO_TEMP(nr, 5));
+
+		for (i = 0; i < 3 ; i++)
+			data->auto_temp[nr][i + 1] =
+				it87_read_value(data,
+						IT87_REG_AUTO_TEMP(nr, i));
+		/*
+		 * 0: start pwm value (base + 3)
+		 * 1: pwm slope (base + 4, 1/8th pwm)
+		 */
+		data->auto_pwm[nr][0] =
+			it87_read_value(data, IT87_REG_AUTO_TEMP(nr, 3));
+		data->auto_pwm[nr][1] =
+			it87_read_value(data, IT87_REG_AUTO_TEMP(nr, 4));
+	}
+}
+
+static struct it87_data *it87_update_device(struct device *dev)
+{
+	struct it87_data *data = dev_get_drvdata(dev);
+	int i;
+
+	mutex_lock(&data->update_lock);
+
+	if (time_after(jiffies, data->last_updated + HZ + HZ / 2) ||
+	    !data->valid) {
+		if (update_vbat) {
+			/*
+			 * Cleared after each update, so reenable.  Value
+			 * returned by this read will be previous value
+			 */
+			it87_write_value(data, IT87_REG_CONFIG,
+				it87_read_value(data, IT87_REG_CONFIG) | 0x40);
+		}
+		for (i = 0; i < NUM_VIN; i++) {
+			if (!(data->has_in & BIT(i)))
+				continue;
+
+			data->in[i][0] =
+				it87_read_value(data, IT87_REG_VIN[i]);
+
+			/* VBAT and AVCC don't have limit registers */
+			if (i >= NUM_VIN_LIMIT)
+				continue;
+
+			data->in[i][1] =
+				it87_read_value(data, IT87_REG_VIN_MIN(i));
+			data->in[i][2] =
+				it87_read_value(data, IT87_REG_VIN_MAX(i));
+		}
+
+		for (i = 0; i < NUM_FAN; i++) {
+			/* Skip disabled fans */
+			if (!(data->has_fan & BIT(i)))
+				continue;
+
+			data->fan[i][1] =
+				it87_read_value(data, IT87_REG_FAN_MIN[i]);
+			data->fan[i][0] = it87_read_value(data,
+				       IT87_REG_FAN[i]);
+			/* Add high byte if in 16-bit mode */
+			if (has_16bit_fans(data)) {
+				data->fan[i][0] |= it87_read_value(data,
+						IT87_REG_FANX[i]) << 8;
+				data->fan[i][1] |= it87_read_value(data,
+						IT87_REG_FANX_MIN[i]) << 8;
+			}
+		}
+		for (i = 0; i < NUM_TEMP; i++) {
+			if (!(data->has_temp & BIT(i)))
+				continue;
+			data->temp[i][0] =
+				it87_read_value(data, IT87_REG_TEMP(i));
+
+			if (has_temp_offset(data) && i < NUM_TEMP_OFFSET)
+				data->temp[i][3] =
+				  it87_read_value(data,
+						  IT87_REG_TEMP_OFFSET[i]);
+
+			if (i >= NUM_TEMP_LIMIT)
+				continue;
+
+			data->temp[i][1] =
+				it87_read_value(data, IT87_REG_TEMP_LOW(i));
+			data->temp[i][2] =
+				it87_read_value(data, IT87_REG_TEMP_HIGH(i));
+		}
+
+		/* Newer chips don't have clock dividers */
+		if ((data->has_fan & 0x07) && !has_16bit_fans(data)) {
+			i = it87_read_value(data, IT87_REG_FAN_DIV);
+			data->fan_div[0] = i & 0x07;
+			data->fan_div[1] = (i >> 3) & 0x07;
+			data->fan_div[2] = (i & 0x40) ? 3 : 1;
+		}
+
+		data->alarms =
+			it87_read_value(data, IT87_REG_ALARM1) |
+			(it87_read_value(data, IT87_REG_ALARM2) << 8) |
+			(it87_read_value(data, IT87_REG_ALARM3) << 16);
+		data->beeps = it87_read_value(data, IT87_REG_BEEP_ENABLE);
+
+		data->fan_main_ctrl = it87_read_value(data,
+				IT87_REG_FAN_MAIN_CTRL);
+		data->fan_ctl = it87_read_value(data, IT87_REG_FAN_CTL);
+		for (i = 0; i < NUM_PWM; i++) {
+			if (!(data->has_pwm & BIT(i)))
+				continue;
+			it87_update_pwm_ctrl(data, i);
+		}
+
+		data->sensor = it87_read_value(data, IT87_REG_TEMP_ENABLE);
+		data->extra = it87_read_value(data, IT87_REG_TEMP_EXTRA);
+		/*
+		 * The IT8705F does not have VID capability.
+		 * The IT8718F and later don't use IT87_REG_VID for the
+		 * same purpose.
+		 */
+		if (data->type == it8712 || data->type == it8716) {
+			data->vid = it87_read_value(data, IT87_REG_VID);
+			/*
+			 * The older IT8712F revisions had only 5 VID pins,
+			 * but we assume it is always safe to read 6 bits.
+			 */
+			data->vid &= 0x3f;
+		}
+		data->last_updated = jiffies;
+		data->valid = 1;
+	}
+
+	mutex_unlock(&data->update_lock);
+
+	return data;
+}
 
 static ssize_t show_in(struct device *dev, struct device_attribute *attr,
 		       char *buf)
 {
 	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
-	int nr = sattr->nr;
-	int index = sattr->index;
-
 	struct it87_data *data = it87_update_device(dev);
+	int index = sattr->index;
+	int nr = sattr->nr;
+
 	return sprintf(buf, "%d\n", in_from_reg(data, nr, data->in[nr][index]));
 }
 
@@ -618,10 +845,9 @@
 		      const char *buf, size_t count)
 {
 	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
-	int nr = sattr->nr;
-	int index = sattr->index;
-
 	struct it87_data *data = dev_get_drvdata(dev);
+	int index = sattr->index;
+	int nr = sattr->nr;
 	unsigned long val;
 
 	if (kstrtoul(buf, 10, &val) < 0)
@@ -687,8 +913,11 @@
 
 static SENSOR_DEVICE_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 8, 0);
 static SENSOR_DEVICE_ATTR_2(in9_input, S_IRUGO, show_in, NULL, 9, 0);
+static SENSOR_DEVICE_ATTR_2(in10_input, S_IRUGO, show_in, NULL, 10, 0);
+static SENSOR_DEVICE_ATTR_2(in11_input, S_IRUGO, show_in, NULL, 11, 0);
+static SENSOR_DEVICE_ATTR_2(in12_input, S_IRUGO, show_in, NULL, 12, 0);
 
-/* 3 temperatures */
+/* Up to 6 temperatures */
 static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
 			 char *buf)
 {
@@ -761,6 +990,9 @@
 			    2, 2);
 static SENSOR_DEVICE_ATTR_2(temp3_offset, S_IRUGO | S_IWUSR, show_temp,
 			    set_temp, 2, 3);
+static SENSOR_DEVICE_ATTR_2(temp4_input, S_IRUGO, show_temp, NULL, 3, 0);
+static SENSOR_DEVICE_ATTR_2(temp5_input, S_IRUGO, show_temp, NULL, 4, 0);
+static SENSOR_DEVICE_ATTR_2(temp6_input, S_IRUGO, show_temp, NULL, 5, 0);
 
 static ssize_t show_temp_type(struct device *dev, struct device_attribute *attr,
 			      char *buf)
@@ -771,8 +1003,8 @@
 	u8 reg = data->sensor;	    /* In case value is updated while used */
 	u8 extra = data->extra;
 
-	if ((has_temp_peci(data, nr) && (reg >> 6 == nr + 1))
-	    || (has_temp_old_peci(data, nr) && (extra & 0x80)))
+	if ((has_temp_peci(data, nr) && (reg >> 6 == nr + 1)) ||
+	    (has_temp_old_peci(data, nr) && (extra & 0x80)))
 		return sprintf(buf, "6\n");  /* Intel PECI */
 	if (reg & (1 << nr))
 		return sprintf(buf, "3\n");  /* thermal diode */
@@ -837,18 +1069,19 @@
 static SENSOR_DEVICE_ATTR(temp3_type, S_IRUGO | S_IWUSR, show_temp_type,
 			  set_temp_type, 2);
 
-/* 3 Fans */
+/* 6 Fans */
 
 static int pwm_mode(const struct it87_data *data, int nr)
 {
-	int ctrl = data->fan_main_ctrl & (1 << nr);
+	if (data->type != it8603 && nr < 3 && !(data->fan_main_ctrl & BIT(nr)))
+		return 0;				/* Full speed */
+	if (data->pwm_ctrl[nr] & 0x80)
+		return 2;				/* Automatic mode */
+	if ((data->type == it8603 || nr >= 3) &&
+	    data->pwm_duty[nr] == pwm_to_reg(data, 0xff))
+		return 0;			/* Full speed */
 
-	if (ctrl == 0 && data->type != it8603)		/* Full speed */
-		return 0;
-	if (data->pwm_ctrl[nr] & 0x80)			/* Automatic mode */
-		return 2;
-	else						/* Manual mode */
-		return 1;
+	return 1;				/* Manual mode */
 }
 
 static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
@@ -868,39 +1101,49 @@
 }
 
 static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr,
-		char *buf)
+			    char *buf)
 {
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+	struct it87_data *data = it87_update_device(dev);
 	int nr = sensor_attr->index;
 
-	struct it87_data *data = it87_update_device(dev);
-	return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[nr]));
+	return sprintf(buf, "%lu\n", DIV_FROM_REG(data->fan_div[nr]));
 }
+
 static ssize_t show_pwm_enable(struct device *dev,
-		struct device_attribute *attr, char *buf)
+			       struct device_attribute *attr, char *buf)
 {
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+	struct it87_data *data = it87_update_device(dev);
 	int nr = sensor_attr->index;
 
-	struct it87_data *data = it87_update_device(dev);
 	return sprintf(buf, "%d\n", pwm_mode(data, nr));
 }
+
 static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
-		char *buf)
+			char *buf)
 {
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+	struct it87_data *data = it87_update_device(dev);
 	int nr = sensor_attr->index;
 
-	struct it87_data *data = it87_update_device(dev);
 	return sprintf(buf, "%d\n",
 		       pwm_from_reg(data, data->pwm_duty[nr]));
 }
+
 static ssize_t show_pwm_freq(struct device *dev, struct device_attribute *attr,
-		char *buf)
+			     char *buf)
 {
+	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
 	struct it87_data *data = it87_update_device(dev);
-	int index = (data->fan_ctl >> 4) & 0x07;
+	int nr = sensor_attr->index;
 	unsigned int freq;
+	int index;
+
+	if (has_pwm_freq2(data) && nr == 1)
+		index = (data->extra >> 4) & 0x07;
+	else
+		index = (data->fan_ctl >> 4) & 0x07;
 
 	freq = pwm_freq[index] / (has_newer_autopwm(data) ? 256 : 128);
 
@@ -953,12 +1196,11 @@
 }
 
 static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
-		const char *buf, size_t count)
+			   const char *buf, size_t count)
 {
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-	int nr = sensor_attr->index;
-
 	struct it87_data *data = dev_get_drvdata(dev);
+	int nr = sensor_attr->index;
 	unsigned long val;
 	int min;
 	u8 old;
@@ -1013,6 +1255,11 @@
 			if (data->auto_pwm[nr][i] > data->auto_pwm[nr][i + 1])
 				err = -EINVAL;
 		}
+	} else if (has_newer_autopwm(data)) {
+		for (i = 1; i < 3; i++) {
+			if (data->auto_temp[nr][i] > data->auto_temp[nr][i + 1])
+				err = -EINVAL;
+		}
 	}
 
 	if (err) {
@@ -1023,13 +1270,12 @@
 	return err;
 }
 
-static ssize_t set_pwm_enable(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t count)
 {
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-	int nr = sensor_attr->index;
-
 	struct it87_data *data = dev_get_drvdata(dev);
+	int nr = sensor_attr->index;
 	long val;
 
 	if (kstrtol(buf, 10, &val) < 0 || val < 0 || val > 2)
@@ -1041,21 +1287,30 @@
 			return -EINVAL;
 	}
 
-	/* IT8603E does not have on/off mode */
-	if (val == 0 && data->type == it8603)
-		return -EINVAL;
-
 	mutex_lock(&data->update_lock);
 
 	if (val == 0) {
-		int tmp;
-		/* make sure the fan is on when in on/off mode */
-		tmp = it87_read_value(data, IT87_REG_FAN_CTL);
-		it87_write_value(data, IT87_REG_FAN_CTL, tmp | (1 << nr));
-		/* set on/off mode */
-		data->fan_main_ctrl &= ~(1 << nr);
-		it87_write_value(data, IT87_REG_FAN_MAIN_CTRL,
-				 data->fan_main_ctrl);
+		if (nr < 3 && data->type != it8603) {
+			int tmp;
+			/* make sure the fan is on when in on/off mode */
+			tmp = it87_read_value(data, IT87_REG_FAN_CTL);
+			it87_write_value(data, IT87_REG_FAN_CTL, tmp | BIT(nr));
+			/* set on/off mode */
+			data->fan_main_ctrl &= ~BIT(nr);
+			it87_write_value(data, IT87_REG_FAN_MAIN_CTRL,
+					 data->fan_main_ctrl);
+		} else {
+			/* No on/off mode, set maximum pwm value */
+			data->pwm_duty[nr] = pwm_to_reg(data, 0xff);
+			it87_write_value(data, IT87_REG_PWM_DUTY[nr],
+					 data->pwm_duty[nr]);
+			/* and set manual mode */
+			data->pwm_ctrl[nr] = has_newer_autopwm(data) ?
+					     data->pwm_temp_map[nr] :
+					     data->pwm_duty[nr];
+			it87_write_value(data, IT87_REG_PWM[nr],
+					 data->pwm_ctrl[nr]);
+		}
 	} else {
 		if (val == 1)				/* Manual mode */
 			data->pwm_ctrl[nr] = has_newer_autopwm(data) ?
@@ -1063,11 +1318,11 @@
 					     data->pwm_duty[nr];
 		else					/* Automatic mode */
 			data->pwm_ctrl[nr] = 0x80 | data->pwm_temp_map[nr];
-		it87_write_value(data, IT87_REG_PWM(nr), data->pwm_ctrl[nr]);
+		it87_write_value(data, IT87_REG_PWM[nr], data->pwm_ctrl[nr]);
 
-		if (data->type != it8603) {
+		if (data->type != it8603 && nr < 3) {
 			/* set SmartGuardian mode */
-			data->fan_main_ctrl |= (1 << nr);
+			data->fan_main_ctrl |= BIT(nr);
 			it87_write_value(data, IT87_REG_FAN_MAIN_CTRL,
 					 data->fan_main_ctrl);
 		}
@@ -1076,13 +1331,13 @@
 	mutex_unlock(&data->update_lock);
 	return count;
 }
+
 static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
-		const char *buf, size_t count)
+		       const char *buf, size_t count)
 {
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-	int nr = sensor_attr->index;
-
 	struct it87_data *data = dev_get_drvdata(dev);
+	int nr = sensor_attr->index;
 	long val;
 
 	if (kstrtol(buf, 10, &val) < 0 || val < 0 || val > 255)
@@ -1099,7 +1354,7 @@
 			return -EBUSY;
 		}
 		data->pwm_duty[nr] = pwm_to_reg(data, val);
-		it87_write_value(data, IT87_REG_PWM_DUTY(nr),
+		it87_write_value(data, IT87_REG_PWM_DUTY[nr],
 				 data->pwm_duty[nr]);
 	} else {
 		data->pwm_duty[nr] = pwm_to_reg(data, val);
@@ -1109,17 +1364,20 @@
 		 */
 		if (!(data->pwm_ctrl[nr] & 0x80)) {
 			data->pwm_ctrl[nr] = data->pwm_duty[nr];
-			it87_write_value(data, IT87_REG_PWM(nr),
+			it87_write_value(data, IT87_REG_PWM[nr],
 					 data->pwm_ctrl[nr]);
 		}
 	}
 	mutex_unlock(&data->update_lock);
 	return count;
 }
-static ssize_t set_pwm_freq(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t count)
+
+static ssize_t set_pwm_freq(struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
 {
+	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
 	struct it87_data *data = dev_get_drvdata(dev);
+	int nr = sensor_attr->index;
 	unsigned long val;
 	int i;
 
@@ -1131,63 +1389,66 @@
 
 	/* Search for the nearest available frequency */
 	for (i = 0; i < 7; i++) {
-		if (val > (pwm_freq[i] + pwm_freq[i+1]) / 2)
+		if (val > (pwm_freq[i] + pwm_freq[i + 1]) / 2)
 			break;
 	}
 
 	mutex_lock(&data->update_lock);
-	data->fan_ctl = it87_read_value(data, IT87_REG_FAN_CTL) & 0x8f;
-	data->fan_ctl |= i << 4;
-	it87_write_value(data, IT87_REG_FAN_CTL, data->fan_ctl);
+	if (nr == 0) {
+		data->fan_ctl = it87_read_value(data, IT87_REG_FAN_CTL) & 0x8f;
+		data->fan_ctl |= i << 4;
+		it87_write_value(data, IT87_REG_FAN_CTL, data->fan_ctl);
+	} else {
+		data->extra = it87_read_value(data, IT87_REG_TEMP_EXTRA) & 0x8f;
+		data->extra |= i << 4;
+		it87_write_value(data, IT87_REG_TEMP_EXTRA, data->extra);
+	}
 	mutex_unlock(&data->update_lock);
 
 	return count;
 }
+
 static ssize_t show_pwm_temp_map(struct device *dev,
-		struct device_attribute *attr, char *buf)
+				 struct device_attribute *attr, char *buf)
 {
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-	int nr = sensor_attr->index;
-
 	struct it87_data *data = it87_update_device(dev);
+	int nr = sensor_attr->index;
 	int map;
 
-	if (data->pwm_temp_map[nr] < 3)
-		map = 1 << data->pwm_temp_map[nr];
-	else
-		map = 0;			/* Should never happen */
-	return sprintf(buf, "%d\n", map);
+	map = data->pwm_temp_map[nr];
+	if (map >= 3)
+		map = 0;	/* Should never happen */
+	if (nr >= 3)		/* pwm channels 3..6 map to temp4..6 */
+		map += 3;
+
+	return sprintf(buf, "%d\n", (int)BIT(map));
 }
+
 static ssize_t set_pwm_temp_map(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t count)
+				struct device_attribute *attr, const char *buf,
+				size_t count)
 {
 	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-	int nr = sensor_attr->index;
-
 	struct it87_data *data = dev_get_drvdata(dev);
+	int nr = sensor_attr->index;
 	long val;
 	u8 reg;
 
-	/*
-	 * This check can go away if we ever support automatic fan speed
-	 * control on newer chips.
-	 */
-	if (!has_old_autopwm(data)) {
-		dev_notice(dev, "Mapping change disabled for safety reasons\n");
-		return -EINVAL;
-	}
-
 	if (kstrtol(buf, 10, &val) < 0)
 		return -EINVAL;
 
+	if (nr >= 3)
+		val -= 3;
+
 	switch (val) {
-	case (1 << 0):
+	case BIT(0):
 		reg = 0x00;
 		break;
-	case (1 << 1):
+	case BIT(1):
 		reg = 0x01;
 		break;
-	case (1 << 2):
+	case BIT(2):
 		reg = 0x02;
 		break;
 	default:
@@ -1202,14 +1463,14 @@
 	 */
 	if (data->pwm_ctrl[nr] & 0x80) {
 		data->pwm_ctrl[nr] = 0x80 | data->pwm_temp_map[nr];
-		it87_write_value(data, IT87_REG_PWM(nr), data->pwm_ctrl[nr]);
+		it87_write_value(data, IT87_REG_PWM[nr], data->pwm_ctrl[nr]);
 	}
 	mutex_unlock(&data->update_lock);
 	return count;
 }
 
-static ssize_t show_auto_pwm(struct device *dev,
-		struct device_attribute *attr, char *buf)
+static ssize_t show_auto_pwm(struct device *dev, struct device_attribute *attr,
+			     char *buf)
 {
 	struct it87_data *data = it87_update_device(dev);
 	struct sensor_device_attribute_2 *sensor_attr =
@@ -1221,14 +1482,15 @@
 		       pwm_from_reg(data, data->auto_pwm[nr][point]));
 }
 
-static ssize_t set_auto_pwm(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t set_auto_pwm(struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
 {
 	struct it87_data *data = dev_get_drvdata(dev);
 	struct sensor_device_attribute_2 *sensor_attr =
 			to_sensor_dev_attr_2(attr);
 	int nr = sensor_attr->nr;
 	int point = sensor_attr->index;
+	int regaddr;
 	long val;
 
 	if (kstrtol(buf, 10, &val) < 0 || val < 0 || val > 255)
@@ -1236,26 +1498,65 @@
 
 	mutex_lock(&data->update_lock);
 	data->auto_pwm[nr][point] = pwm_to_reg(data, val);
-	it87_write_value(data, IT87_REG_AUTO_PWM(nr, point),
-			 data->auto_pwm[nr][point]);
+	if (has_newer_autopwm(data))
+		regaddr = IT87_REG_AUTO_TEMP(nr, 3);
+	else
+		regaddr = IT87_REG_AUTO_PWM(nr, point);
+	it87_write_value(data, regaddr, data->auto_pwm[nr][point]);
 	mutex_unlock(&data->update_lock);
 	return count;
 }
 
-static ssize_t show_auto_temp(struct device *dev,
-		struct device_attribute *attr, char *buf)
+static ssize_t show_auto_pwm_slope(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct it87_data *data = it87_update_device(dev);
+	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+	int nr = sensor_attr->index;
+
+	return sprintf(buf, "%d\n", data->auto_pwm[nr][1] & 0x7f);
+}
+
+static ssize_t set_auto_pwm_slope(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct it87_data *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+	int nr = sensor_attr->index;
+	unsigned long val;
+
+	if (kstrtoul(buf, 10, &val) < 0 || val > 127)
+		return -EINVAL;
+
+	mutex_lock(&data->update_lock);
+	data->auto_pwm[nr][1] = (data->auto_pwm[nr][1] & 0x80) | val;
+	it87_write_value(data, IT87_REG_AUTO_TEMP(nr, 4),
+			 data->auto_pwm[nr][1]);
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+static ssize_t show_auto_temp(struct device *dev, struct device_attribute *attr,
+			      char *buf)
 {
 	struct it87_data *data = it87_update_device(dev);
 	struct sensor_device_attribute_2 *sensor_attr =
 			to_sensor_dev_attr_2(attr);
 	int nr = sensor_attr->nr;
 	int point = sensor_attr->index;
+	int reg;
 
-	return sprintf(buf, "%d\n", TEMP_FROM_REG(data->auto_temp[nr][point]));
+	if (has_old_autopwm(data) || point)
+		reg = data->auto_temp[nr][point];
+	else
+		reg = data->auto_temp[nr][1] - (data->auto_temp[nr][0] & 0x1f);
+
+	return sprintf(buf, "%d\n", TEMP_FROM_REG(reg));
 }
 
-static ssize_t set_auto_temp(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t set_auto_temp(struct device *dev, struct device_attribute *attr,
+			     const char *buf, size_t count)
 {
 	struct it87_data *data = dev_get_drvdata(dev);
 	struct sensor_device_attribute_2 *sensor_attr =
@@ -1263,14 +1564,24 @@
 	int nr = sensor_attr->nr;
 	int point = sensor_attr->index;
 	long val;
+	int reg;
 
 	if (kstrtol(buf, 10, &val) < 0 || val < -128000 || val > 127000)
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
-	data->auto_temp[nr][point] = TEMP_TO_REG(val);
-	it87_write_value(data, IT87_REG_AUTO_TEMP(nr, point),
-			 data->auto_temp[nr][point]);
+	if (has_newer_autopwm(data) && !point) {
+		reg = data->auto_temp[nr][1] - TEMP_TO_REG(val);
+		reg = clamp_val(reg, 0, 0x1f) | (data->auto_temp[nr][0] & 0xe0);
+		data->auto_temp[nr][0] = reg;
+		it87_write_value(data, IT87_REG_AUTO_TEMP(nr, 5), reg);
+	} else {
+		reg = TEMP_TO_REG(val);
+		data->auto_temp[nr][point] = reg;
+		if (has_newer_autopwm(data))
+			point--;
+		it87_write_value(data, IT87_REG_AUTO_TEMP(nr, point), reg);
+	}
 	mutex_unlock(&data->update_lock);
 	return count;
 }
@@ -1308,8 +1619,9 @@
 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
 			  show_pwm_enable, set_pwm_enable, 0);
 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 0);
-static DEVICE_ATTR(pwm1_freq, S_IRUGO | S_IWUSR, show_pwm_freq, set_pwm_freq);
-static SENSOR_DEVICE_ATTR(pwm1_auto_channels_temp, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm1_freq, S_IRUGO | S_IWUSR, show_pwm_freq,
+			  set_pwm_freq, 0);
+static SENSOR_DEVICE_ATTR(pwm1_auto_channels_temp, S_IRUGO,
 			  show_pwm_temp_map, set_pwm_temp_map, 0);
 static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO | S_IWUSR,
 			    show_auto_pwm, set_auto_pwm, 0, 0);
@@ -1329,12 +1641,16 @@
 			    show_auto_temp, set_auto_temp, 0, 3);
 static SENSOR_DEVICE_ATTR_2(pwm1_auto_point4_temp, S_IRUGO | S_IWUSR,
 			    show_auto_temp, set_auto_temp, 0, 4);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_start, S_IRUGO | S_IWUSR,
+			    show_auto_pwm, set_auto_pwm, 0, 0);
+static SENSOR_DEVICE_ATTR(pwm1_auto_slope, S_IRUGO | S_IWUSR,
+			  show_auto_pwm_slope, set_auto_pwm_slope, 0);
 
 static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR,
 			  show_pwm_enable, set_pwm_enable, 1);
 static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 1);
-static DEVICE_ATTR(pwm2_freq, S_IRUGO, show_pwm_freq, NULL);
-static SENSOR_DEVICE_ATTR(pwm2_auto_channels_temp, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm2_freq, S_IRUGO, show_pwm_freq, set_pwm_freq, 1);
+static SENSOR_DEVICE_ATTR(pwm2_auto_channels_temp, S_IRUGO,
 			  show_pwm_temp_map, set_pwm_temp_map, 1);
 static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_pwm, S_IRUGO | S_IWUSR,
 			    show_auto_pwm, set_auto_pwm, 1, 0);
@@ -1354,12 +1670,16 @@
 			    show_auto_temp, set_auto_temp, 1, 3);
 static SENSOR_DEVICE_ATTR_2(pwm2_auto_point4_temp, S_IRUGO | S_IWUSR,
 			    show_auto_temp, set_auto_temp, 1, 4);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_start, S_IRUGO | S_IWUSR,
+			    show_auto_pwm, set_auto_pwm, 1, 0);
+static SENSOR_DEVICE_ATTR(pwm2_auto_slope, S_IRUGO | S_IWUSR,
+			  show_auto_pwm_slope, set_auto_pwm_slope, 1);
 
 static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR,
 			  show_pwm_enable, set_pwm_enable, 2);
 static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 2);
-static DEVICE_ATTR(pwm3_freq, S_IRUGO, show_pwm_freq, NULL);
-static SENSOR_DEVICE_ATTR(pwm3_auto_channels_temp, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm3_freq, S_IRUGO, show_pwm_freq, NULL, 2);
+static SENSOR_DEVICE_ATTR(pwm3_auto_channels_temp, S_IRUGO,
 			  show_pwm_temp_map, set_pwm_temp_map, 2);
 static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO | S_IWUSR,
 			    show_auto_pwm, set_auto_pwm, 2, 0);
@@ -1379,30 +1699,94 @@
 			    show_auto_temp, set_auto_temp, 2, 3);
 static SENSOR_DEVICE_ATTR_2(pwm3_auto_point4_temp, S_IRUGO | S_IWUSR,
 			    show_auto_temp, set_auto_temp, 2, 4);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_start, S_IRUGO | S_IWUSR,
+			    show_auto_pwm, set_auto_pwm, 2, 0);
+static SENSOR_DEVICE_ATTR(pwm3_auto_slope, S_IRUGO | S_IWUSR,
+			  show_auto_pwm_slope, set_auto_pwm_slope, 2);
+
+static SENSOR_DEVICE_ATTR(pwm4_enable, S_IRUGO | S_IWUSR,
+			  show_pwm_enable, set_pwm_enable, 3);
+static SENSOR_DEVICE_ATTR(pwm4, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 3);
+static SENSOR_DEVICE_ATTR(pwm4_freq, S_IRUGO, show_pwm_freq, NULL, 3);
+static SENSOR_DEVICE_ATTR(pwm4_auto_channels_temp, S_IRUGO,
+			  show_pwm_temp_map, set_pwm_temp_map, 3);
+static SENSOR_DEVICE_ATTR_2(pwm4_auto_point1_temp, S_IRUGO | S_IWUSR,
+			    show_auto_temp, set_auto_temp, 2, 1);
+static SENSOR_DEVICE_ATTR_2(pwm4_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
+			    show_auto_temp, set_auto_temp, 2, 0);
+static SENSOR_DEVICE_ATTR_2(pwm4_auto_point2_temp, S_IRUGO | S_IWUSR,
+			    show_auto_temp, set_auto_temp, 2, 2);
+static SENSOR_DEVICE_ATTR_2(pwm4_auto_point3_temp, S_IRUGO | S_IWUSR,
+			    show_auto_temp, set_auto_temp, 2, 3);
+static SENSOR_DEVICE_ATTR_2(pwm4_auto_start, S_IRUGO | S_IWUSR,
+			    show_auto_pwm, set_auto_pwm, 3, 0);
+static SENSOR_DEVICE_ATTR(pwm4_auto_slope, S_IRUGO | S_IWUSR,
+			  show_auto_pwm_slope, set_auto_pwm_slope, 3);
+
+static SENSOR_DEVICE_ATTR(pwm5_enable, S_IRUGO | S_IWUSR,
+			  show_pwm_enable, set_pwm_enable, 4);
+static SENSOR_DEVICE_ATTR(pwm5, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 4);
+static SENSOR_DEVICE_ATTR(pwm5_freq, S_IRUGO, show_pwm_freq, NULL, 4);
+static SENSOR_DEVICE_ATTR(pwm5_auto_channels_temp, S_IRUGO,
+			  show_pwm_temp_map, set_pwm_temp_map, 4);
+static SENSOR_DEVICE_ATTR_2(pwm5_auto_point1_temp, S_IRUGO | S_IWUSR,
+			    show_auto_temp, set_auto_temp, 2, 1);
+static SENSOR_DEVICE_ATTR_2(pwm5_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
+			    show_auto_temp, set_auto_temp, 2, 0);
+static SENSOR_DEVICE_ATTR_2(pwm5_auto_point2_temp, S_IRUGO | S_IWUSR,
+			    show_auto_temp, set_auto_temp, 2, 2);
+static SENSOR_DEVICE_ATTR_2(pwm5_auto_point3_temp, S_IRUGO | S_IWUSR,
+			    show_auto_temp, set_auto_temp, 2, 3);
+static SENSOR_DEVICE_ATTR_2(pwm5_auto_start, S_IRUGO | S_IWUSR,
+			    show_auto_pwm, set_auto_pwm, 4, 0);
+static SENSOR_DEVICE_ATTR(pwm5_auto_slope, S_IRUGO | S_IWUSR,
+			  show_auto_pwm_slope, set_auto_pwm_slope, 4);
+
+static SENSOR_DEVICE_ATTR(pwm6_enable, S_IRUGO | S_IWUSR,
+			  show_pwm_enable, set_pwm_enable, 5);
+static SENSOR_DEVICE_ATTR(pwm6, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 5);
+static SENSOR_DEVICE_ATTR(pwm6_freq, S_IRUGO, show_pwm_freq, NULL, 5);
+static SENSOR_DEVICE_ATTR(pwm6_auto_channels_temp, S_IRUGO,
+			  show_pwm_temp_map, set_pwm_temp_map, 5);
+static SENSOR_DEVICE_ATTR_2(pwm6_auto_point1_temp, S_IRUGO | S_IWUSR,
+			    show_auto_temp, set_auto_temp, 2, 1);
+static SENSOR_DEVICE_ATTR_2(pwm6_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
+			    show_auto_temp, set_auto_temp, 2, 0);
+static SENSOR_DEVICE_ATTR_2(pwm6_auto_point2_temp, S_IRUGO | S_IWUSR,
+			    show_auto_temp, set_auto_temp, 2, 2);
+static SENSOR_DEVICE_ATTR_2(pwm6_auto_point3_temp, S_IRUGO | S_IWUSR,
+			    show_auto_temp, set_auto_temp, 2, 3);
+static SENSOR_DEVICE_ATTR_2(pwm6_auto_start, S_IRUGO | S_IWUSR,
+			    show_auto_pwm, set_auto_pwm, 5, 0);
+static SENSOR_DEVICE_ATTR(pwm6_auto_slope, S_IRUGO | S_IWUSR,
+			  show_auto_pwm_slope, set_auto_pwm_slope, 5);
 
 /* Alarms */
 static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
-		char *buf)
+			   char *buf)
 {
 	struct it87_data *data = it87_update_device(dev);
+
 	return sprintf(buf, "%u\n", data->alarms);
 }
 static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
 
 static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
-		char *buf)
+			  char *buf)
 {
-	int bitnr = to_sensor_dev_attr(attr)->index;
 	struct it87_data *data = it87_update_device(dev);
+	int bitnr = to_sensor_dev_attr(attr)->index;
+
 	return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
 }
 
-static ssize_t clear_intrusion(struct device *dev, struct device_attribute
-		*attr, const char *buf, size_t count)
+static ssize_t clear_intrusion(struct device *dev,
+			       struct device_attribute *attr, const char *buf,
+			       size_t count)
 {
 	struct it87_data *data = dev_get_drvdata(dev);
-	long val;
 	int config;
+	long val;
 
 	if (kstrtol(buf, 10, &val) < 0 || val != 0)
 		return -EINVAL;
@@ -1412,7 +1796,7 @@
 	if (config < 0) {
 		count = config;
 	} else {
-		config |= 1 << 5;
+		config |= BIT(5);
 		it87_write_value(data, IT87_REG_CONFIG, config);
 		/* Invalidate cache to force re-read */
 		data->valid = 0;
@@ -1443,29 +1827,30 @@
 			  show_alarm, clear_intrusion, 4);
 
 static ssize_t show_beep(struct device *dev, struct device_attribute *attr,
-		char *buf)
+			 char *buf)
 {
-	int bitnr = to_sensor_dev_attr(attr)->index;
 	struct it87_data *data = it87_update_device(dev);
+	int bitnr = to_sensor_dev_attr(attr)->index;
+
 	return sprintf(buf, "%u\n", (data->beeps >> bitnr) & 1);
 }
+
 static ssize_t set_beep(struct device *dev, struct device_attribute *attr,
-		const char *buf, size_t count)
+			const char *buf, size_t count)
 {
 	int bitnr = to_sensor_dev_attr(attr)->index;
 	struct it87_data *data = dev_get_drvdata(dev);
 	long val;
 
-	if (kstrtol(buf, 10, &val) < 0
-	 || (val != 0 && val != 1))
+	if (kstrtol(buf, 10, &val) < 0 || (val != 0 && val != 1))
 		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
 	data->beeps = it87_read_value(data, IT87_REG_BEEP_ENABLE);
 	if (val)
-		data->beeps |= (1 << bitnr);
+		data->beeps |= BIT(bitnr);
 	else
-		data->beeps &= ~(1 << bitnr);
+		data->beeps &= ~BIT(bitnr);
 	it87_write_value(data, IT87_REG_BEEP_ENABLE, data->beeps);
 	mutex_unlock(&data->update_lock);
 	return count;
@@ -1493,13 +1878,15 @@
 static SENSOR_DEVICE_ATTR(temp3_beep, S_IRUGO, show_beep, NULL, 2);
 
 static ssize_t show_vrm_reg(struct device *dev, struct device_attribute *attr,
-		char *buf)
+			    char *buf)
 {
 	struct it87_data *data = dev_get_drvdata(dev);
+
 	return sprintf(buf, "%u\n", data->vrm);
 }
+
 static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
-		const char *buf, size_t count)
+			     const char *buf, size_t count)
 {
 	struct it87_data *data = dev_get_drvdata(dev);
 	unsigned long val;
@@ -1514,15 +1901,16 @@
 static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm_reg, store_vrm_reg);
 
 static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr,
-		char *buf)
+			    char *buf)
 {
 	struct it87_data *data = it87_update_device(dev);
-	return sprintf(buf, "%ld\n", (long) vid_from_reg(data->vid, data->vrm));
+
+	return sprintf(buf, "%ld\n", (long)vid_from_reg(data->vid, data->vrm));
 }
 static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL);
 
 static ssize_t show_label(struct device *dev, struct device_attribute *attr,
-		char *buf)
+			  char *buf)
 {
 	static const char * const labels[] = {
 		"+5V",
@@ -1548,227 +1936,348 @@
 static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_label, NULL, 0);
 static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, show_label, NULL, 1);
 static SENSOR_DEVICE_ATTR(in8_label, S_IRUGO, show_label, NULL, 2);
-/* special AVCC3 IT8603E in9 */
+/* AVCC3 */
 static SENSOR_DEVICE_ATTR(in9_label, S_IRUGO, show_label, NULL, 0);
 
-static ssize_t show_name(struct device *dev, struct device_attribute
-			 *devattr, char *buf)
+static umode_t it87_in_is_visible(struct kobject *kobj,
+				  struct attribute *attr, int index)
 {
+	struct device *dev = container_of(kobj, struct device, kobj);
 	struct it87_data *data = dev_get_drvdata(dev);
-	return sprintf(buf, "%s\n", data->name);
-}
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+	int i = index / 5;	/* voltage index */
+	int a = index % 5;	/* attribute index */
 
-static struct attribute *it87_attributes_in[10][5] = {
-{
+	if (index >= 40) {	/* in8 and higher only have input attributes */
+		i = index - 40 + 8;
+		a = 0;
+	}
+
+	if (!(data->has_in & BIT(i)))
+		return 0;
+
+	if (a == 4 && !data->has_beep)
+		return 0;
+
+	return attr->mode;
+}
+
+static struct attribute *it87_attributes_in[] = {
 	&sensor_dev_attr_in0_input.dev_attr.attr,
 	&sensor_dev_attr_in0_min.dev_attr.attr,
 	&sensor_dev_attr_in0_max.dev_attr.attr,
 	&sensor_dev_attr_in0_alarm.dev_attr.attr,
-	NULL
-}, {
+	&sensor_dev_attr_in0_beep.dev_attr.attr,	/* 4 */
+
 	&sensor_dev_attr_in1_input.dev_attr.attr,
 	&sensor_dev_attr_in1_min.dev_attr.attr,
 	&sensor_dev_attr_in1_max.dev_attr.attr,
 	&sensor_dev_attr_in1_alarm.dev_attr.attr,
-	NULL
-}, {
+	&sensor_dev_attr_in1_beep.dev_attr.attr,	/* 9 */
+
 	&sensor_dev_attr_in2_input.dev_attr.attr,
 	&sensor_dev_attr_in2_min.dev_attr.attr,
 	&sensor_dev_attr_in2_max.dev_attr.attr,
 	&sensor_dev_attr_in2_alarm.dev_attr.attr,
-	NULL
-}, {
+	&sensor_dev_attr_in2_beep.dev_attr.attr,	/* 14 */
+
 	&sensor_dev_attr_in3_input.dev_attr.attr,
 	&sensor_dev_attr_in3_min.dev_attr.attr,
 	&sensor_dev_attr_in3_max.dev_attr.attr,
 	&sensor_dev_attr_in3_alarm.dev_attr.attr,
-	NULL
-}, {
+	&sensor_dev_attr_in3_beep.dev_attr.attr,	/* 19 */
+
 	&sensor_dev_attr_in4_input.dev_attr.attr,
 	&sensor_dev_attr_in4_min.dev_attr.attr,
 	&sensor_dev_attr_in4_max.dev_attr.attr,
 	&sensor_dev_attr_in4_alarm.dev_attr.attr,
-	NULL
-}, {
+	&sensor_dev_attr_in4_beep.dev_attr.attr,	/* 24 */
+
 	&sensor_dev_attr_in5_input.dev_attr.attr,
 	&sensor_dev_attr_in5_min.dev_attr.attr,
 	&sensor_dev_attr_in5_max.dev_attr.attr,
 	&sensor_dev_attr_in5_alarm.dev_attr.attr,
-	NULL
-}, {
+	&sensor_dev_attr_in5_beep.dev_attr.attr,	/* 29 */
+
 	&sensor_dev_attr_in6_input.dev_attr.attr,
 	&sensor_dev_attr_in6_min.dev_attr.attr,
 	&sensor_dev_attr_in6_max.dev_attr.attr,
 	&sensor_dev_attr_in6_alarm.dev_attr.attr,
-	NULL
-}, {
+	&sensor_dev_attr_in6_beep.dev_attr.attr,	/* 34 */
+
 	&sensor_dev_attr_in7_input.dev_attr.attr,
 	&sensor_dev_attr_in7_min.dev_attr.attr,
 	&sensor_dev_attr_in7_max.dev_attr.attr,
 	&sensor_dev_attr_in7_alarm.dev_attr.attr,
-	NULL
-}, {
-	&sensor_dev_attr_in8_input.dev_attr.attr,
-	NULL
-}, {
-	&sensor_dev_attr_in9_input.dev_attr.attr,
-	NULL
-} };
+	&sensor_dev_attr_in7_beep.dev_attr.attr,	/* 39 */
 
-static const struct attribute_group it87_group_in[10] = {
-	{ .attrs = it87_attributes_in[0] },
-	{ .attrs = it87_attributes_in[1] },
-	{ .attrs = it87_attributes_in[2] },
-	{ .attrs = it87_attributes_in[3] },
-	{ .attrs = it87_attributes_in[4] },
-	{ .attrs = it87_attributes_in[5] },
-	{ .attrs = it87_attributes_in[6] },
-	{ .attrs = it87_attributes_in[7] },
-	{ .attrs = it87_attributes_in[8] },
-	{ .attrs = it87_attributes_in[9] },
+	&sensor_dev_attr_in8_input.dev_attr.attr,	/* 40 */
+	&sensor_dev_attr_in9_input.dev_attr.attr,	/* 41 */
+	&sensor_dev_attr_in10_input.dev_attr.attr,	/* 41 */
+	&sensor_dev_attr_in11_input.dev_attr.attr,	/* 41 */
+	&sensor_dev_attr_in12_input.dev_attr.attr,	/* 41 */
 };
 
-static struct attribute *it87_attributes_temp[3][6] = {
+static const struct attribute_group it87_group_in = {
+	.attrs = it87_attributes_in,
+	.is_visible = it87_in_is_visible,
+};
+
+static umode_t it87_temp_is_visible(struct kobject *kobj,
+				    struct attribute *attr, int index)
 {
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct it87_data *data = dev_get_drvdata(dev);
+	int i = index / 7;	/* temperature index */
+	int a = index % 7;	/* attribute index */
+
+	if (index >= 21) {
+		i = index - 21 + 3;
+		a = 0;
+	}
+
+	if (!(data->has_temp & BIT(i)))
+		return 0;
+
+	if (a == 5 && !has_temp_offset(data))
+		return 0;
+
+	if (a == 6 && !data->has_beep)
+		return 0;
+
+	return attr->mode;
+}
+
+static struct attribute *it87_attributes_temp[] = {
 	&sensor_dev_attr_temp1_input.dev_attr.attr,
 	&sensor_dev_attr_temp1_max.dev_attr.attr,
 	&sensor_dev_attr_temp1_min.dev_attr.attr,
 	&sensor_dev_attr_temp1_type.dev_attr.attr,
 	&sensor_dev_attr_temp1_alarm.dev_attr.attr,
-	NULL
-} , {
-	&sensor_dev_attr_temp2_input.dev_attr.attr,
+	&sensor_dev_attr_temp1_offset.dev_attr.attr,	/* 5 */
+	&sensor_dev_attr_temp1_beep.dev_attr.attr,	/* 6 */
+
+	&sensor_dev_attr_temp2_input.dev_attr.attr,	/* 7 */
 	&sensor_dev_attr_temp2_max.dev_attr.attr,
 	&sensor_dev_attr_temp2_min.dev_attr.attr,
 	&sensor_dev_attr_temp2_type.dev_attr.attr,
 	&sensor_dev_attr_temp2_alarm.dev_attr.attr,
-	NULL
-} , {
-	&sensor_dev_attr_temp3_input.dev_attr.attr,
+	&sensor_dev_attr_temp2_offset.dev_attr.attr,
+	&sensor_dev_attr_temp2_beep.dev_attr.attr,
+
+	&sensor_dev_attr_temp3_input.dev_attr.attr,	/* 14 */
 	&sensor_dev_attr_temp3_max.dev_attr.attr,
 	&sensor_dev_attr_temp3_min.dev_attr.attr,
 	&sensor_dev_attr_temp3_type.dev_attr.attr,
 	&sensor_dev_attr_temp3_alarm.dev_attr.attr,
-	NULL
-} };
-
-static const struct attribute_group it87_group_temp[3] = {
-	{ .attrs = it87_attributes_temp[0] },
-	{ .attrs = it87_attributes_temp[1] },
-	{ .attrs = it87_attributes_temp[2] },
-};
-
-static struct attribute *it87_attributes_temp_offset[] = {
-	&sensor_dev_attr_temp1_offset.dev_attr.attr,
-	&sensor_dev_attr_temp2_offset.dev_attr.attr,
 	&sensor_dev_attr_temp3_offset.dev_attr.attr,
+	&sensor_dev_attr_temp3_beep.dev_attr.attr,
+
+	&sensor_dev_attr_temp4_input.dev_attr.attr,	/* 21 */
+	&sensor_dev_attr_temp5_input.dev_attr.attr,
+	&sensor_dev_attr_temp6_input.dev_attr.attr,
+	NULL
 };
 
+static const struct attribute_group it87_group_temp = {
+	.attrs = it87_attributes_temp,
+	.is_visible = it87_temp_is_visible,
+};
+
+static umode_t it87_is_visible(struct kobject *kobj,
+			       struct attribute *attr, int index)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct it87_data *data = dev_get_drvdata(dev);
+
+	if ((index == 2 || index == 3) && !data->has_vid)
+		return 0;
+
+	if (index > 3 && !(data->in_internal & BIT(index - 4)))
+		return 0;
+
+	return attr->mode;
+}
+
 static struct attribute *it87_attributes[] = {
 	&dev_attr_alarms.attr,
 	&sensor_dev_attr_intrusion0_alarm.dev_attr.attr,
-	&dev_attr_name.attr,
+	&dev_attr_vrm.attr,				/* 2 */
+	&dev_attr_cpu0_vid.attr,			/* 3 */
+	&sensor_dev_attr_in3_label.dev_attr.attr,	/* 4 .. 7 */
+	&sensor_dev_attr_in7_label.dev_attr.attr,
+	&sensor_dev_attr_in8_label.dev_attr.attr,
+	&sensor_dev_attr_in9_label.dev_attr.attr,
 	NULL
 };
 
 static const struct attribute_group it87_group = {
 	.attrs = it87_attributes,
+	.is_visible = it87_is_visible,
 };
 
-static struct attribute *it87_attributes_in_beep[] = {
-	&sensor_dev_attr_in0_beep.dev_attr.attr,
-	&sensor_dev_attr_in1_beep.dev_attr.attr,
-	&sensor_dev_attr_in2_beep.dev_attr.attr,
-	&sensor_dev_attr_in3_beep.dev_attr.attr,
-	&sensor_dev_attr_in4_beep.dev_attr.attr,
-	&sensor_dev_attr_in5_beep.dev_attr.attr,
-	&sensor_dev_attr_in6_beep.dev_attr.attr,
-	&sensor_dev_attr_in7_beep.dev_attr.attr,
-	NULL,
-	NULL,
-};
+static umode_t it87_fan_is_visible(struct kobject *kobj,
+				   struct attribute *attr, int index)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct it87_data *data = dev_get_drvdata(dev);
+	int i = index / 5;	/* fan index */
+	int a = index % 5;	/* attribute index */
 
-static struct attribute *it87_attributes_temp_beep[] = {
-	&sensor_dev_attr_temp1_beep.dev_attr.attr,
-	&sensor_dev_attr_temp2_beep.dev_attr.attr,
-	&sensor_dev_attr_temp3_beep.dev_attr.attr,
-};
+	if (index >= 15) {	/* fan 4..6 don't have divisor attributes */
+		i = (index - 15) / 4 + 3;
+		a = (index - 15) % 4;
+	}
 
-static struct attribute *it87_attributes_fan[6][3+1] = { {
+	if (!(data->has_fan & BIT(i)))
+		return 0;
+
+	if (a == 3) {				/* beep */
+		if (!data->has_beep)
+			return 0;
+		/* first fan beep attribute is writable */
+		if (i == __ffs(data->has_fan))
+			return attr->mode | S_IWUSR;
+	}
+
+	if (a == 4 && has_16bit_fans(data))	/* divisor */
+		return 0;
+
+	return attr->mode;
+}
+
+static struct attribute *it87_attributes_fan[] = {
 	&sensor_dev_attr_fan1_input.dev_attr.attr,
 	&sensor_dev_attr_fan1_min.dev_attr.attr,
 	&sensor_dev_attr_fan1_alarm.dev_attr.attr,
-	NULL
-}, {
+	&sensor_dev_attr_fan1_beep.dev_attr.attr,	/* 3 */
+	&sensor_dev_attr_fan1_div.dev_attr.attr,	/* 4 */
+
 	&sensor_dev_attr_fan2_input.dev_attr.attr,
 	&sensor_dev_attr_fan2_min.dev_attr.attr,
 	&sensor_dev_attr_fan2_alarm.dev_attr.attr,
-	NULL
-}, {
+	&sensor_dev_attr_fan2_beep.dev_attr.attr,
+	&sensor_dev_attr_fan2_div.dev_attr.attr,	/* 9 */
+
 	&sensor_dev_attr_fan3_input.dev_attr.attr,
 	&sensor_dev_attr_fan3_min.dev_attr.attr,
 	&sensor_dev_attr_fan3_alarm.dev_attr.attr,
-	NULL
-}, {
-	&sensor_dev_attr_fan4_input.dev_attr.attr,
+	&sensor_dev_attr_fan3_beep.dev_attr.attr,
+	&sensor_dev_attr_fan3_div.dev_attr.attr,	/* 14 */
+
+	&sensor_dev_attr_fan4_input.dev_attr.attr,	/* 15 */
 	&sensor_dev_attr_fan4_min.dev_attr.attr,
 	&sensor_dev_attr_fan4_alarm.dev_attr.attr,
-	NULL
-}, {
-	&sensor_dev_attr_fan5_input.dev_attr.attr,
+	&sensor_dev_attr_fan4_beep.dev_attr.attr,
+
+	&sensor_dev_attr_fan5_input.dev_attr.attr,	/* 19 */
 	&sensor_dev_attr_fan5_min.dev_attr.attr,
 	&sensor_dev_attr_fan5_alarm.dev_attr.attr,
-	NULL
-}, {
-	&sensor_dev_attr_fan6_input.dev_attr.attr,
+	&sensor_dev_attr_fan5_beep.dev_attr.attr,
+
+	&sensor_dev_attr_fan6_input.dev_attr.attr,	/* 23 */
 	&sensor_dev_attr_fan6_min.dev_attr.attr,
 	&sensor_dev_attr_fan6_alarm.dev_attr.attr,
+	&sensor_dev_attr_fan6_beep.dev_attr.attr,
 	NULL
-} };
-
-static const struct attribute_group it87_group_fan[6] = {
-	{ .attrs = it87_attributes_fan[0] },
-	{ .attrs = it87_attributes_fan[1] },
-	{ .attrs = it87_attributes_fan[2] },
-	{ .attrs = it87_attributes_fan[3] },
-	{ .attrs = it87_attributes_fan[4] },
-	{ .attrs = it87_attributes_fan[5] },
 };
 
-static const struct attribute *it87_attributes_fan_div[] = {
-	&sensor_dev_attr_fan1_div.dev_attr.attr,
-	&sensor_dev_attr_fan2_div.dev_attr.attr,
-	&sensor_dev_attr_fan3_div.dev_attr.attr,
+static const struct attribute_group it87_group_fan = {
+	.attrs = it87_attributes_fan,
+	.is_visible = it87_fan_is_visible,
 };
 
-static struct attribute *it87_attributes_pwm[3][4+1] = { {
+static umode_t it87_pwm_is_visible(struct kobject *kobj,
+				   struct attribute *attr, int index)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct it87_data *data = dev_get_drvdata(dev);
+	int i = index / 4;	/* pwm index */
+	int a = index % 4;	/* attribute index */
+
+	if (!(data->has_pwm & BIT(i)))
+		return 0;
+
+	/* pwmX_auto_channels_temp is only writable if auto pwm is supported */
+	if (a == 3 && (has_old_autopwm(data) || has_newer_autopwm(data)))
+		return attr->mode | S_IWUSR;
+
+	/* pwm2_freq is writable if there are two pwm frequency selects */
+	if (has_pwm_freq2(data) && i == 1 && a == 2)
+		return attr->mode | S_IWUSR;
+
+	return attr->mode;
+}
+
+static struct attribute *it87_attributes_pwm[] = {
 	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
 	&sensor_dev_attr_pwm1.dev_attr.attr,
-	&dev_attr_pwm1_freq.attr,
+	&sensor_dev_attr_pwm1_freq.dev_attr.attr,
 	&sensor_dev_attr_pwm1_auto_channels_temp.dev_attr.attr,
-	NULL
-}, {
+
 	&sensor_dev_attr_pwm2_enable.dev_attr.attr,
 	&sensor_dev_attr_pwm2.dev_attr.attr,
-	&dev_attr_pwm2_freq.attr,
+	&sensor_dev_attr_pwm2_freq.dev_attr.attr,
 	&sensor_dev_attr_pwm2_auto_channels_temp.dev_attr.attr,
-	NULL
-}, {
+
 	&sensor_dev_attr_pwm3_enable.dev_attr.attr,
 	&sensor_dev_attr_pwm3.dev_attr.attr,
-	&dev_attr_pwm3_freq.attr,
+	&sensor_dev_attr_pwm3_freq.dev_attr.attr,
 	&sensor_dev_attr_pwm3_auto_channels_temp.dev_attr.attr,
-	NULL
-} };
 
-static const struct attribute_group it87_group_pwm[3] = {
-	{ .attrs = it87_attributes_pwm[0] },
-	{ .attrs = it87_attributes_pwm[1] },
-	{ .attrs = it87_attributes_pwm[2] },
+	&sensor_dev_attr_pwm4_enable.dev_attr.attr,
+	&sensor_dev_attr_pwm4.dev_attr.attr,
+	&sensor_dev_attr_pwm4_freq.dev_attr.attr,
+	&sensor_dev_attr_pwm4_auto_channels_temp.dev_attr.attr,
+
+	&sensor_dev_attr_pwm5_enable.dev_attr.attr,
+	&sensor_dev_attr_pwm5.dev_attr.attr,
+	&sensor_dev_attr_pwm5_freq.dev_attr.attr,
+	&sensor_dev_attr_pwm5_auto_channels_temp.dev_attr.attr,
+
+	&sensor_dev_attr_pwm6_enable.dev_attr.attr,
+	&sensor_dev_attr_pwm6.dev_attr.attr,
+	&sensor_dev_attr_pwm6_freq.dev_attr.attr,
+	&sensor_dev_attr_pwm6_auto_channels_temp.dev_attr.attr,
+
+	NULL
 };
 
-static struct attribute *it87_attributes_autopwm[3][9+1] = { {
+static const struct attribute_group it87_group_pwm = {
+	.attrs = it87_attributes_pwm,
+	.is_visible = it87_pwm_is_visible,
+};
+
+static umode_t it87_auto_pwm_is_visible(struct kobject *kobj,
+					struct attribute *attr, int index)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct it87_data *data = dev_get_drvdata(dev);
+	int i = index / 11;	/* pwm index */
+	int a = index % 11;	/* attribute index */
+
+	if (index >= 33) {	/* pwm 4..6 */
+		i = (index - 33) / 6 + 3;
+		a = (index - 33) % 6 + 4;
+	}
+
+	if (!(data->has_pwm & BIT(i)))
+		return 0;
+
+	if (has_newer_autopwm(data)) {
+		if (a < 4)	/* no auto point pwm */
+			return 0;
+		if (a == 8)	/* no auto_point4 */
+			return 0;
+	}
+	if (has_old_autopwm(data)) {
+		if (a >= 9)	/* no pwm_auto_start, pwm_auto_slope */
+			return 0;
+	}
+
+	return attr->mode;
+}
+
+static struct attribute *it87_attributes_auto_pwm[] = {
 	&sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
 	&sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
 	&sensor_dev_attr_pwm1_auto_point3_pwm.dev_attr.attr,
@@ -1778,9 +2287,10 @@
 	&sensor_dev_attr_pwm1_auto_point2_temp.dev_attr.attr,
 	&sensor_dev_attr_pwm1_auto_point3_temp.dev_attr.attr,
 	&sensor_dev_attr_pwm1_auto_point4_temp.dev_attr.attr,
-	NULL
-}, {
-	&sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr,
+	&sensor_dev_attr_pwm1_auto_start.dev_attr.attr,
+	&sensor_dev_attr_pwm1_auto_slope.dev_attr.attr,
+
+	&sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr,	/* 11 */
 	&sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr,
 	&sensor_dev_attr_pwm2_auto_point3_pwm.dev_attr.attr,
 	&sensor_dev_attr_pwm2_auto_point4_pwm.dev_attr.attr,
@@ -1789,9 +2299,10 @@
 	&sensor_dev_attr_pwm2_auto_point2_temp.dev_attr.attr,
 	&sensor_dev_attr_pwm2_auto_point3_temp.dev_attr.attr,
 	&sensor_dev_attr_pwm2_auto_point4_temp.dev_attr.attr,
-	NULL
-}, {
-	&sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr,
+	&sensor_dev_attr_pwm2_auto_start.dev_attr.attr,
+	&sensor_dev_attr_pwm2_auto_slope.dev_attr.attr,
+
+	&sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr,	/* 22 */
 	&sensor_dev_attr_pwm3_auto_point2_pwm.dev_attr.attr,
 	&sensor_dev_attr_pwm3_auto_point3_pwm.dev_attr.attr,
 	&sensor_dev_attr_pwm3_auto_point4_pwm.dev_attr.attr,
@@ -1800,61 +2311,53 @@
 	&sensor_dev_attr_pwm3_auto_point2_temp.dev_attr.attr,
 	&sensor_dev_attr_pwm3_auto_point3_temp.dev_attr.attr,
 	&sensor_dev_attr_pwm3_auto_point4_temp.dev_attr.attr,
-	NULL
-} };
+	&sensor_dev_attr_pwm3_auto_start.dev_attr.attr,
+	&sensor_dev_attr_pwm3_auto_slope.dev_attr.attr,
 
-static const struct attribute_group it87_group_autopwm[3] = {
-	{ .attrs = it87_attributes_autopwm[0] },
-	{ .attrs = it87_attributes_autopwm[1] },
-	{ .attrs = it87_attributes_autopwm[2] },
+	&sensor_dev_attr_pwm4_auto_point1_temp.dev_attr.attr,	/* 33 */
+	&sensor_dev_attr_pwm4_auto_point1_temp_hyst.dev_attr.attr,
+	&sensor_dev_attr_pwm4_auto_point2_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm4_auto_point3_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm4_auto_start.dev_attr.attr,
+	&sensor_dev_attr_pwm4_auto_slope.dev_attr.attr,
+
+	&sensor_dev_attr_pwm5_auto_point1_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm5_auto_point1_temp_hyst.dev_attr.attr,
+	&sensor_dev_attr_pwm5_auto_point2_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm5_auto_point3_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm5_auto_start.dev_attr.attr,
+	&sensor_dev_attr_pwm5_auto_slope.dev_attr.attr,
+
+	&sensor_dev_attr_pwm6_auto_point1_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm6_auto_point1_temp_hyst.dev_attr.attr,
+	&sensor_dev_attr_pwm6_auto_point2_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm6_auto_point3_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm6_auto_start.dev_attr.attr,
+	&sensor_dev_attr_pwm6_auto_slope.dev_attr.attr,
+
+	NULL,
 };
 
-static struct attribute *it87_attributes_fan_beep[] = {
-	&sensor_dev_attr_fan1_beep.dev_attr.attr,
-	&sensor_dev_attr_fan2_beep.dev_attr.attr,
-	&sensor_dev_attr_fan3_beep.dev_attr.attr,
-	&sensor_dev_attr_fan4_beep.dev_attr.attr,
-	&sensor_dev_attr_fan5_beep.dev_attr.attr,
-	&sensor_dev_attr_fan6_beep.dev_attr.attr,
-};
-
-static struct attribute *it87_attributes_vid[] = {
-	&dev_attr_vrm.attr,
-	&dev_attr_cpu0_vid.attr,
-	NULL
-};
-
-static const struct attribute_group it87_group_vid = {
-	.attrs = it87_attributes_vid,
-};
-
-static struct attribute *it87_attributes_label[] = {
-	&sensor_dev_attr_in3_label.dev_attr.attr,
-	&sensor_dev_attr_in7_label.dev_attr.attr,
-	&sensor_dev_attr_in8_label.dev_attr.attr,
-	&sensor_dev_attr_in9_label.dev_attr.attr,
-	NULL
-};
-
-static const struct attribute_group it87_group_label = {
-	.attrs = it87_attributes_label,
+static const struct attribute_group it87_group_auto_pwm = {
+	.attrs = it87_attributes_auto_pwm,
+	.is_visible = it87_auto_pwm_is_visible,
 };
 
 /* SuperIO detection - will change isa_address if a chip is found */
-static int __init it87_find(unsigned short *address,
-	struct it87_sio_data *sio_data)
+static int __init it87_find(int sioaddr, unsigned short *address,
+			    struct it87_sio_data *sio_data)
 {
 	int err;
 	u16 chip_type;
 	const char *board_vendor, *board_name;
 	const struct it87_devices *config;
 
-	err = superio_enter();
+	err = superio_enter(sioaddr);
 	if (err)
 		return err;
 
 	err = -ENODEV;
-	chip_type = force_id ? force_id : superio_inw(DEVID);
+	chip_type = force_id ? force_id : superio_inw(sioaddr, DEVID);
 
 	switch (chip_type) {
 	case IT8705F_DEVID:
@@ -1910,6 +2413,9 @@
 	case IT8620E_DEVID:
 		sio_data->type = it8620;
 		break;
+	case IT8628E_DEVID:
+		sio_data->type = it8628;
+		break;
 	case 0xffff:	/* No device at all */
 		goto exit;
 	default:
@@ -1917,20 +2423,20 @@
 		goto exit;
 	}
 
-	superio_select(PME);
-	if (!(superio_inb(IT87_ACT_REG) & 0x01)) {
+	superio_select(sioaddr, PME);
+	if (!(superio_inb(sioaddr, IT87_ACT_REG) & 0x01)) {
 		pr_info("Device not activated, skipping\n");
 		goto exit;
 	}
 
-	*address = superio_inw(IT87_BASE_REG) & ~(IT87_EXTENT - 1);
+	*address = superio_inw(sioaddr, IT87_BASE_REG) & ~(IT87_EXTENT - 1);
 	if (*address == 0) {
 		pr_info("Base address not set, skipping\n");
 		goto exit;
 	}
 
 	err = 0;
-	sio_data->revision = superio_inb(DEVREV) & 0x0f;
+	sio_data->revision = superio_inb(sioaddr, DEVREV) & 0x0f;
 	pr_info("Found IT%04x%s chip at 0x%x, revision %d\n", chip_type,
 		it87_devices[sio_data->type].suffix,
 		*address, sio_data->revision);
@@ -1939,14 +2445,19 @@
 
 	/* in7 (VSB or VCCH5V) is always internal on some chips */
 	if (has_in7_internal(config))
-		sio_data->internal |= (1 << 1);
+		sio_data->internal |= BIT(1);
 
 	/* in8 (Vbat) is always internal */
-	sio_data->internal |= (1 << 2);
+	sio_data->internal |= BIT(2);
 
-	/* Only the IT8603E has in9 */
-	if (sio_data->type != it8603)
-		sio_data->skip_in |= (1 << 9);
+	/* in9 (AVCC3), always internal if supported */
+	if (has_avcc3(config))
+		sio_data->internal |= BIT(3); /* in9 is AVCC */
+	else
+		sio_data->skip_in |= BIT(9);
+
+	if (!has_six_pwm(config))
+		sio_data->skip_pwm |= BIT(3) | BIT(4) | BIT(5);
 
 	if (!has_vid(config))
 		sio_data->skip_vid = 1;
@@ -1954,45 +2465,46 @@
 	/* Read GPIO config and VID value from LDN 7 (GPIO) */
 	if (sio_data->type == it87) {
 		/* The IT8705F has a different LD number for GPIO */
-		superio_select(5);
-		sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
+		superio_select(sioaddr, 5);
+		sio_data->beep_pin = superio_inb(sioaddr,
+						 IT87_SIO_BEEP_PIN_REG) & 0x3f;
 	} else if (sio_data->type == it8783) {
 		int reg25, reg27, reg2a, reg2c, regef;
 
-		superio_select(GPIO);
+		superio_select(sioaddr, GPIO);
 
-		reg25 = superio_inb(IT87_SIO_GPIO1_REG);
-		reg27 = superio_inb(IT87_SIO_GPIO3_REG);
-		reg2a = superio_inb(IT87_SIO_PINX1_REG);
-		reg2c = superio_inb(IT87_SIO_PINX2_REG);
-		regef = superio_inb(IT87_SIO_SPI_REG);
+		reg25 = superio_inb(sioaddr, IT87_SIO_GPIO1_REG);
+		reg27 = superio_inb(sioaddr, IT87_SIO_GPIO3_REG);
+		reg2a = superio_inb(sioaddr, IT87_SIO_PINX1_REG);
+		reg2c = superio_inb(sioaddr, IT87_SIO_PINX2_REG);
+		regef = superio_inb(sioaddr, IT87_SIO_SPI_REG);
 
 		/* Check if fan3 is there or not */
-		if ((reg27 & (1 << 0)) || !(reg2c & (1 << 2)))
-			sio_data->skip_fan |= (1 << 2);
-		if ((reg25 & (1 << 4))
-		    || (!(reg2a & (1 << 1)) && (regef & (1 << 0))))
-			sio_data->skip_pwm |= (1 << 2);
+		if ((reg27 & BIT(0)) || !(reg2c & BIT(2)))
+			sio_data->skip_fan |= BIT(2);
+		if ((reg25 & BIT(4)) ||
+		    (!(reg2a & BIT(1)) && (regef & BIT(0))))
+			sio_data->skip_pwm |= BIT(2);
 
 		/* Check if fan2 is there or not */
-		if (reg27 & (1 << 7))
-			sio_data->skip_fan |= (1 << 1);
-		if (reg27 & (1 << 3))
-			sio_data->skip_pwm |= (1 << 1);
+		if (reg27 & BIT(7))
+			sio_data->skip_fan |= BIT(1);
+		if (reg27 & BIT(3))
+			sio_data->skip_pwm |= BIT(1);
 
 		/* VIN5 */
-		if ((reg27 & (1 << 0)) || (reg2c & (1 << 2)))
-			sio_data->skip_in |= (1 << 5); /* No VIN5 */
+		if ((reg27 & BIT(0)) || (reg2c & BIT(2)))
+			sio_data->skip_in |= BIT(5); /* No VIN5 */
 
 		/* VIN6 */
-		if (reg27 & (1 << 1))
-			sio_data->skip_in |= (1 << 6); /* No VIN6 */
+		if (reg27 & BIT(1))
+			sio_data->skip_in |= BIT(6); /* No VIN6 */
 
 		/*
 		 * VIN7
 		 * Does not depend on bit 2 of Reg2C, contrary to datasheet.
 		 */
-		if (reg27 & (1 << 2)) {
+		if (reg27 & BIT(2)) {
 			/*
 			 * The data sheet is a bit unclear regarding the
 			 * internal voltage divider for VCCH5V. It says
@@ -2006,81 +2518,121 @@
 			 * not the case, and ask the user to report if the
 			 * resulting voltage is sane.
 			 */
-			if (!(reg2c & (1 << 1))) {
-				reg2c |= (1 << 1);
-				superio_outb(IT87_SIO_PINX2_REG, reg2c);
+			if (!(reg2c & BIT(1))) {
+				reg2c |= BIT(1);
+				superio_outb(sioaddr, IT87_SIO_PINX2_REG,
+					     reg2c);
 				pr_notice("Routing internal VCCH5V to in7.\n");
 			}
 			pr_notice("in7 routed to internal voltage divider, with external pin disabled.\n");
 			pr_notice("Please report if it displays a reasonable voltage.\n");
 		}
 
-		if (reg2c & (1 << 0))
-			sio_data->internal |= (1 << 0);
-		if (reg2c & (1 << 1))
-			sio_data->internal |= (1 << 1);
+		if (reg2c & BIT(0))
+			sio_data->internal |= BIT(0);
+		if (reg2c & BIT(1))
+			sio_data->internal |= BIT(1);
 
-		sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
+		sio_data->beep_pin = superio_inb(sioaddr,
+						 IT87_SIO_BEEP_PIN_REG) & 0x3f;
 	} else if (sio_data->type == it8603) {
 		int reg27, reg29;
 
-		superio_select(GPIO);
+		superio_select(sioaddr, GPIO);
 
-		reg27 = superio_inb(IT87_SIO_GPIO3_REG);
+		reg27 = superio_inb(sioaddr, IT87_SIO_GPIO3_REG);
 
 		/* Check if fan3 is there or not */
-		if (reg27 & (1 << 6))
-			sio_data->skip_pwm |= (1 << 2);
-		if (reg27 & (1 << 7))
-			sio_data->skip_fan |= (1 << 2);
+		if (reg27 & BIT(6))
+			sio_data->skip_pwm |= BIT(2);
+		if (reg27 & BIT(7))
+			sio_data->skip_fan |= BIT(2);
 
 		/* Check if fan2 is there or not */
-		reg29 = superio_inb(IT87_SIO_GPIO5_REG);
-		if (reg29 & (1 << 1))
-			sio_data->skip_pwm |= (1 << 1);
-		if (reg29 & (1 << 2))
-			sio_data->skip_fan |= (1 << 1);
+		reg29 = superio_inb(sioaddr, IT87_SIO_GPIO5_REG);
+		if (reg29 & BIT(1))
+			sio_data->skip_pwm |= BIT(1);
+		if (reg29 & BIT(2))
+			sio_data->skip_fan |= BIT(1);
 
-		sio_data->skip_in |= (1 << 5); /* No VIN5 */
-		sio_data->skip_in |= (1 << 6); /* No VIN6 */
+		sio_data->skip_in |= BIT(5); /* No VIN5 */
+		sio_data->skip_in |= BIT(6); /* No VIN6 */
 
-		sio_data->internal |= (1 << 3); /* in9 is AVCC */
-
-		sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
-	} else if (sio_data->type == it8620) {
+		sio_data->beep_pin = superio_inb(sioaddr,
+						 IT87_SIO_BEEP_PIN_REG) & 0x3f;
+	} else if (sio_data->type == it8620 || sio_data->type == it8628) {
 		int reg;
 
-		superio_select(GPIO);
+		superio_select(sioaddr, GPIO);
+
+		/* Check for pwm5 */
+		reg = superio_inb(sioaddr, IT87_SIO_GPIO1_REG);
+		if (reg & BIT(6))
+			sio_data->skip_pwm |= BIT(4);
 
 		/* Check for fan4, fan5 */
-		reg = superio_inb(IT87_SIO_GPIO2_REG);
-		if (!(reg & (1 << 5)))
-			sio_data->skip_fan |= (1 << 3);
-		if (!(reg & (1 << 4)))
-			sio_data->skip_fan |= (1 << 4);
+		reg = superio_inb(sioaddr, IT87_SIO_GPIO2_REG);
+		if (!(reg & BIT(5)))
+			sio_data->skip_fan |= BIT(3);
+		if (!(reg & BIT(4)))
+			sio_data->skip_fan |= BIT(4);
 
 		/* Check for pwm3, fan3 */
-		reg = superio_inb(IT87_SIO_GPIO3_REG);
-		if (reg & (1 << 6))
-			sio_data->skip_pwm |= (1 << 2);
-		if (reg & (1 << 7))
-			sio_data->skip_fan |= (1 << 2);
+		reg = superio_inb(sioaddr, IT87_SIO_GPIO3_REG);
+		if (reg & BIT(6))
+			sio_data->skip_pwm |= BIT(2);
+		if (reg & BIT(7))
+			sio_data->skip_fan |= BIT(2);
+
+		/* Check for pwm4 */
+		reg = superio_inb(sioaddr, IT87_SIO_GPIO4_REG);
+		if (!(reg & BIT(2)))
+			sio_data->skip_pwm |= BIT(3);
 
 		/* Check for pwm2, fan2 */
-		reg = superio_inb(IT87_SIO_GPIO5_REG);
-		if (reg & (1 << 1))
-			sio_data->skip_pwm |= (1 << 1);
-		if (reg & (1 << 2))
-			sio_data->skip_fan |= (1 << 1);
+		reg = superio_inb(sioaddr, IT87_SIO_GPIO5_REG);
+		if (reg & BIT(1))
+			sio_data->skip_pwm |= BIT(1);
+		if (reg & BIT(2))
+			sio_data->skip_fan |= BIT(1);
+		/* Check for pwm6, fan6 */
+		if (!(reg & BIT(7))) {
+			sio_data->skip_pwm |= BIT(5);
+			sio_data->skip_fan |= BIT(5);
+		}
 
-		sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
+		sio_data->beep_pin = superio_inb(sioaddr,
+						 IT87_SIO_BEEP_PIN_REG) & 0x3f;
 	} else {
 		int reg;
 		bool uart6;
 
-		superio_select(GPIO);
+		superio_select(sioaddr, GPIO);
 
-		reg = superio_inb(IT87_SIO_GPIO3_REG);
+		/* Check for fan4, fan5 */
+		if (has_five_fans(config)) {
+			reg = superio_inb(sioaddr, IT87_SIO_GPIO2_REG);
+			switch (sio_data->type) {
+			case it8718:
+				if (reg & BIT(5))
+					sio_data->skip_fan |= BIT(3);
+				if (reg & BIT(4))
+					sio_data->skip_fan |= BIT(4);
+				break;
+			case it8720:
+			case it8721:
+			case it8728:
+				if (!(reg & BIT(5)))
+					sio_data->skip_fan |= BIT(3);
+				if (!(reg & BIT(4)))
+					sio_data->skip_fan |= BIT(4);
+				break;
+			default:
+				break;
+			}
+		}
+
+		reg = superio_inb(sioaddr, IT87_SIO_GPIO3_REG);
 		if (!sio_data->skip_vid) {
 			/* We need at least 4 VID pins */
 			if (reg & 0x0f) {
@@ -2090,25 +2642,26 @@
 		}
 
 		/* Check if fan3 is there or not */
-		if (reg & (1 << 6))
-			sio_data->skip_pwm |= (1 << 2);
-		if (reg & (1 << 7))
-			sio_data->skip_fan |= (1 << 2);
+		if (reg & BIT(6))
+			sio_data->skip_pwm |= BIT(2);
+		if (reg & BIT(7))
+			sio_data->skip_fan |= BIT(2);
 
 		/* Check if fan2 is there or not */
-		reg = superio_inb(IT87_SIO_GPIO5_REG);
-		if (reg & (1 << 1))
-			sio_data->skip_pwm |= (1 << 1);
-		if (reg & (1 << 2))
-			sio_data->skip_fan |= (1 << 1);
+		reg = superio_inb(sioaddr, IT87_SIO_GPIO5_REG);
+		if (reg & BIT(1))
+			sio_data->skip_pwm |= BIT(1);
+		if (reg & BIT(2))
+			sio_data->skip_fan |= BIT(1);
 
-		if ((sio_data->type == it8718 || sio_data->type == it8720)
-		 && !(sio_data->skip_vid))
-			sio_data->vid_value = superio_inb(IT87_SIO_VID_REG);
+		if ((sio_data->type == it8718 || sio_data->type == it8720) &&
+		    !(sio_data->skip_vid))
+			sio_data->vid_value = superio_inb(sioaddr,
+							  IT87_SIO_VID_REG);
 
-		reg = superio_inb(IT87_SIO_PINX2_REG);
+		reg = superio_inb(sioaddr, IT87_SIO_PINX2_REG);
 
-		uart6 = sio_data->type == it8782 && (reg & (1 << 2));
+		uart6 = sio_data->type == it8782 && (reg & BIT(2));
 
 		/*
 		 * The IT8720F has no VIN7 pin, so VCCH should always be
@@ -2124,15 +2677,15 @@
 		 * If UART6 is enabled, re-route VIN7 to the internal divider
 		 * if that is not already the case.
 		 */
-		if ((sio_data->type == it8720 || uart6) && !(reg & (1 << 1))) {
-			reg |= (1 << 1);
-			superio_outb(IT87_SIO_PINX2_REG, reg);
+		if ((sio_data->type == it8720 || uart6) && !(reg & BIT(1))) {
+			reg |= BIT(1);
+			superio_outb(sioaddr, IT87_SIO_PINX2_REG, reg);
 			pr_notice("Routing internal VCCH to in7\n");
 		}
-		if (reg & (1 << 0))
-			sio_data->internal |= (1 << 0);
-		if (reg & (1 << 1))
-			sio_data->internal |= (1 << 1);
+		if (reg & BIT(0))
+			sio_data->internal |= BIT(0);
+		if (reg & BIT(1))
+			sio_data->internal |= BIT(1);
 
 		/*
 		 * On IT8782F, UART6 pins overlap with VIN5, VIN6, and VIN7.
@@ -2144,11 +2697,12 @@
 		 * temperature source here, skip_temp is preliminary.
 		 */
 		if (uart6) {
-			sio_data->skip_in |= (1 << 5) | (1 << 6);
-			sio_data->skip_temp |= (1 << 2);
+			sio_data->skip_in |= BIT(5) | BIT(6);
+			sio_data->skip_temp |= BIT(2);
 		}
 
-		sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
+		sio_data->beep_pin = superio_inb(sioaddr,
+						 IT87_SIO_BEEP_PIN_REG) & 0x3f;
 	}
 	if (sio_data->beep_pin)
 		pr_info("Beeping is supported\n");
@@ -2157,8 +2711,8 @@
 	board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
 	board_name = dmi_get_system_info(DMI_BOARD_NAME);
 	if (board_vendor && board_name) {
-		if (strcmp(board_vendor, "nVIDIA") == 0
-		 && strcmp(board_name, "FN68PT") == 0) {
+		if (strcmp(board_vendor, "nVIDIA") == 0 &&
+		    strcmp(board_name, "FN68PT") == 0) {
 			/*
 			 * On the Shuttle SN68PT, FAN_CTL2 is apparently not
 			 * connected to a fan, but to something else. One user
@@ -2168,373 +2722,15 @@
 			 * the same board is ever used in other systems.
 			 */
 			pr_info("Disabling pwm2 due to hardware constraints\n");
-			sio_data->skip_pwm = (1 << 1);
+			sio_data->skip_pwm = BIT(1);
 		}
 	}
 
 exit:
-	superio_exit();
+	superio_exit(sioaddr);
 	return err;
 }
 
-static void it87_remove_files(struct device *dev)
-{
-	struct it87_data *data = platform_get_drvdata(pdev);
-	struct it87_sio_data *sio_data = dev_get_platdata(dev);
-	int i;
-
-	sysfs_remove_group(&dev->kobj, &it87_group);
-	for (i = 0; i < 10; i++) {
-		if (sio_data->skip_in & (1 << i))
-			continue;
-		sysfs_remove_group(&dev->kobj, &it87_group_in[i]);
-		if (it87_attributes_in_beep[i])
-			sysfs_remove_file(&dev->kobj,
-					  it87_attributes_in_beep[i]);
-	}
-	for (i = 0; i < 3; i++) {
-		if (!(data->has_temp & (1 << i)))
-			continue;
-		sysfs_remove_group(&dev->kobj, &it87_group_temp[i]);
-		if (has_temp_offset(data))
-			sysfs_remove_file(&dev->kobj,
-					  it87_attributes_temp_offset[i]);
-		if (sio_data->beep_pin)
-			sysfs_remove_file(&dev->kobj,
-					  it87_attributes_temp_beep[i]);
-	}
-	for (i = 0; i < 6; i++) {
-		if (!(data->has_fan & (1 << i)))
-			continue;
-		sysfs_remove_group(&dev->kobj, &it87_group_fan[i]);
-		if (sio_data->beep_pin)
-			sysfs_remove_file(&dev->kobj,
-					  it87_attributes_fan_beep[i]);
-		if (i < 3 && !has_16bit_fans(data))
-			sysfs_remove_file(&dev->kobj,
-					  it87_attributes_fan_div[i]);
-	}
-	for (i = 0; i < 3; i++) {
-		if (sio_data->skip_pwm & (1 << i))
-			continue;
-		sysfs_remove_group(&dev->kobj, &it87_group_pwm[i]);
-		if (has_old_autopwm(data))
-			sysfs_remove_group(&dev->kobj,
-					   &it87_group_autopwm[i]);
-	}
-	if (!sio_data->skip_vid)
-		sysfs_remove_group(&dev->kobj, &it87_group_vid);
-	sysfs_remove_group(&dev->kobj, &it87_group_label);
-}
-
-static int it87_probe(struct platform_device *pdev)
-{
-	struct it87_data *data;
-	struct resource *res;
-	struct device *dev = &pdev->dev;
-	struct it87_sio_data *sio_data = dev_get_platdata(dev);
-	int err = 0, i;
-	int enable_pwm_interface;
-	int fan_beep_need_rw;
-
-	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
-	if (!devm_request_region(&pdev->dev, res->start, IT87_EC_EXTENT,
-				 DRVNAME)) {
-		dev_err(dev, "Failed to request region 0x%lx-0x%lx\n",
-			(unsigned long)res->start,
-			(unsigned long)(res->start + IT87_EC_EXTENT - 1));
-		return -EBUSY;
-	}
-
-	data = devm_kzalloc(&pdev->dev, sizeof(struct it87_data), GFP_KERNEL);
-	if (!data)
-		return -ENOMEM;
-
-	data->addr = res->start;
-	data->type = sio_data->type;
-	data->features = it87_devices[sio_data->type].features;
-	data->peci_mask = it87_devices[sio_data->type].peci_mask;
-	data->old_peci_mask = it87_devices[sio_data->type].old_peci_mask;
-	data->name = it87_devices[sio_data->type].name;
-	/*
-	 * IT8705F Datasheet 0.4.1, 3h == Version G.
-	 * IT8712F Datasheet 0.9.1, section 8.3.5 indicates 8h == Version J.
-	 * These are the first revisions with 16-bit tachometer support.
-	 */
-	switch (data->type) {
-	case it87:
-		if (sio_data->revision >= 0x03) {
-			data->features &= ~FEAT_OLD_AUTOPWM;
-			data->features |= FEAT_FAN16_CONFIG | FEAT_16BIT_FANS;
-		}
-		break;
-	case it8712:
-		if (sio_data->revision >= 0x08) {
-			data->features &= ~FEAT_OLD_AUTOPWM;
-			data->features |= FEAT_FAN16_CONFIG | FEAT_16BIT_FANS |
-					  FEAT_FIVE_FANS;
-		}
-		break;
-	default:
-		break;
-	}
-
-	/* Now, we do the remaining detection. */
-	if ((it87_read_value(data, IT87_REG_CONFIG) & 0x80)
-	 || it87_read_value(data, IT87_REG_CHIPID) != 0x90)
-		return -ENODEV;
-
-	platform_set_drvdata(pdev, data);
-
-	mutex_init(&data->update_lock);
-
-	/* Check PWM configuration */
-	enable_pwm_interface = it87_check_pwm(dev);
-
-	/* Starting with IT8721F, we handle scaling of internal voltages */
-	if (has_12mv_adc(data)) {
-		if (sio_data->internal & (1 << 0))
-			data->in_scaled |= (1 << 3);	/* in3 is AVCC */
-		if (sio_data->internal & (1 << 1))
-			data->in_scaled |= (1 << 7);	/* in7 is VSB */
-		if (sio_data->internal & (1 << 2))
-			data->in_scaled |= (1 << 8);	/* in8 is Vbat */
-		if (sio_data->internal & (1 << 3))
-			data->in_scaled |= (1 << 9);	/* in9 is AVCC */
-	} else if (sio_data->type == it8781 || sio_data->type == it8782 ||
-		   sio_data->type == it8783) {
-		if (sio_data->internal & (1 << 0))
-			data->in_scaled |= (1 << 3);	/* in3 is VCC5V */
-		if (sio_data->internal & (1 << 1))
-			data->in_scaled |= (1 << 7);	/* in7 is VCCH5V */
-	}
-
-	data->has_temp = 0x07;
-	if (sio_data->skip_temp & (1 << 2)) {
-		if (sio_data->type == it8782
-		    && !(it87_read_value(data, IT87_REG_TEMP_EXTRA) & 0x80))
-			data->has_temp &= ~(1 << 2);
-	}
-
-	/* Initialize the IT87 chip */
-	it87_init_device(pdev);
-
-	/* Register sysfs hooks */
-	err = sysfs_create_group(&dev->kobj, &it87_group);
-	if (err)
-		return err;
-
-	for (i = 0; i < 10; i++) {
-		if (sio_data->skip_in & (1 << i))
-			continue;
-		err = sysfs_create_group(&dev->kobj, &it87_group_in[i]);
-		if (err)
-			goto error;
-		if (sio_data->beep_pin && it87_attributes_in_beep[i]) {
-			err = sysfs_create_file(&dev->kobj,
-						it87_attributes_in_beep[i]);
-			if (err)
-				goto error;
-		}
-	}
-
-	for (i = 0; i < 3; i++) {
-		if (!(data->has_temp & (1 << i)))
-			continue;
-		err = sysfs_create_group(&dev->kobj, &it87_group_temp[i]);
-		if (err)
-			goto error;
-		if (has_temp_offset(data)) {
-			err = sysfs_create_file(&dev->kobj,
-						it87_attributes_temp_offset[i]);
-			if (err)
-				goto error;
-		}
-		if (sio_data->beep_pin) {
-			err = sysfs_create_file(&dev->kobj,
-						it87_attributes_temp_beep[i]);
-			if (err)
-				goto error;
-		}
-	}
-
-	/* Do not create fan files for disabled fans */
-	fan_beep_need_rw = 1;
-	for (i = 0; i < 6; i++) {
-		if (!(data->has_fan & (1 << i)))
-			continue;
-		err = sysfs_create_group(&dev->kobj, &it87_group_fan[i]);
-		if (err)
-			goto error;
-
-		if (i < 3 && !has_16bit_fans(data)) {
-			err = sysfs_create_file(&dev->kobj,
-						it87_attributes_fan_div[i]);
-			if (err)
-				goto error;
-		}
-
-		if (sio_data->beep_pin) {
-			err = sysfs_create_file(&dev->kobj,
-						it87_attributes_fan_beep[i]);
-			if (err)
-				goto error;
-			if (!fan_beep_need_rw)
-				continue;
-
-			/*
-			 * As we have a single beep enable bit for all fans,
-			 * only the first enabled fan has a writable attribute
-			 * for it.
-			 */
-			if (sysfs_chmod_file(&dev->kobj,
-					     it87_attributes_fan_beep[i],
-					     S_IRUGO | S_IWUSR))
-				dev_dbg(dev, "chmod +w fan%d_beep failed\n",
-					i + 1);
-			fan_beep_need_rw = 0;
-		}
-	}
-
-	if (enable_pwm_interface) {
-		for (i = 0; i < 3; i++) {
-			if (sio_data->skip_pwm & (1 << i))
-				continue;
-			err = sysfs_create_group(&dev->kobj,
-						 &it87_group_pwm[i]);
-			if (err)
-				goto error;
-
-			if (!has_old_autopwm(data))
-				continue;
-			err = sysfs_create_group(&dev->kobj,
-						 &it87_group_autopwm[i]);
-			if (err)
-				goto error;
-		}
-	}
-
-	if (!sio_data->skip_vid) {
-		data->vrm = vid_which_vrm();
-		/* VID reading from Super-I/O config space if available */
-		data->vid = sio_data->vid_value;
-		err = sysfs_create_group(&dev->kobj, &it87_group_vid);
-		if (err)
-			goto error;
-	}
-
-	/* Export labels for internal sensors */
-	for (i = 0; i < 4; i++) {
-		if (!(sio_data->internal & (1 << i)))
-			continue;
-		err = sysfs_create_file(&dev->kobj,
-					it87_attributes_label[i]);
-		if (err)
-			goto error;
-	}
-
-	data->hwmon_dev = hwmon_device_register(dev);
-	if (IS_ERR(data->hwmon_dev)) {
-		err = PTR_ERR(data->hwmon_dev);
-		goto error;
-	}
-
-	return 0;
-
-error:
-	it87_remove_files(dev);
-	return err;
-}
-
-static int it87_remove(struct platform_device *pdev)
-{
-	struct it87_data *data = platform_get_drvdata(pdev);
-
-	hwmon_device_unregister(data->hwmon_dev);
-	it87_remove_files(&pdev->dev);
-
-	return 0;
-}
-
-/*
- * Must be called with data->update_lock held, except during initialization.
- * We ignore the IT87 BUSY flag at this moment - it could lead to deadlocks,
- * would slow down the IT87 access and should not be necessary.
- */
-static int it87_read_value(struct it87_data *data, u8 reg)
-{
-	outb_p(reg, data->addr + IT87_ADDR_REG_OFFSET);
-	return inb_p(data->addr + IT87_DATA_REG_OFFSET);
-}
-
-/*
- * Must be called with data->update_lock held, except during initialization.
- * We ignore the IT87 BUSY flag at this moment - it could lead to deadlocks,
- * would slow down the IT87 access and should not be necessary.
- */
-static void it87_write_value(struct it87_data *data, u8 reg, u8 value)
-{
-	outb_p(reg, data->addr + IT87_ADDR_REG_OFFSET);
-	outb_p(value, data->addr + IT87_DATA_REG_OFFSET);
-}
-
-/* Return 1 if and only if the PWM interface is safe to use */
-static int it87_check_pwm(struct device *dev)
-{
-	struct it87_data *data = dev_get_drvdata(dev);
-	/*
-	 * Some BIOSes fail to correctly configure the IT87 fans. All fans off
-	 * and polarity set to active low is sign that this is the case so we
-	 * disable pwm control to protect the user.
-	 */
-	int tmp = it87_read_value(data, IT87_REG_FAN_CTL);
-	if ((tmp & 0x87) == 0) {
-		if (fix_pwm_polarity) {
-			/*
-			 * The user asks us to attempt a chip reconfiguration.
-			 * This means switching to active high polarity and
-			 * inverting all fan speed values.
-			 */
-			int i;
-			u8 pwm[3];
-
-			for (i = 0; i < 3; i++)
-				pwm[i] = it87_read_value(data,
-							 IT87_REG_PWM(i));
-
-			/*
-			 * If any fan is in automatic pwm mode, the polarity
-			 * might be correct, as suspicious as it seems, so we
-			 * better don't change anything (but still disable the
-			 * PWM interface).
-			 */
-			if (!((pwm[0] | pwm[1] | pwm[2]) & 0x80)) {
-				dev_info(dev,
-					 "Reconfiguring PWM to active high polarity\n");
-				it87_write_value(data, IT87_REG_FAN_CTL,
-						 tmp | 0x87);
-				for (i = 0; i < 3; i++)
-					it87_write_value(data,
-							 IT87_REG_PWM(i),
-							 0x7f & ~pwm[i]);
-				return 1;
-			}
-
-			dev_info(dev,
-				 "PWM configuration is too broken to be fixed\n");
-		}
-
-		dev_info(dev,
-			 "Detected broken BIOS defaults, disabling PWM interface\n");
-		return 0;
-	} else if (fix_pwm_polarity) {
-		dev_info(dev,
-			 "PWM configuration looks sane, won't touch\n");
-	}
-
-	return 1;
-}
-
 /* Called when we have found a new IT87. */
 static void it87_init_device(struct platform_device *pdev)
 {
@@ -2556,7 +2752,7 @@
 	 * these have separate registers for the temperature mapping and the
 	 * manual duty cycle.
 	 */
-	for (i = 0; i < 3; i++) {
+	for (i = 0; i < NUM_AUTO_PWM; i++) {
 		data->pwm_temp_map[i] = i;
 		data->pwm_duty[i] = 0x7f;	/* Full speed */
 		data->auto_pwm[i][3] = 0x7f;	/* Full speed, hard-coded */
@@ -2569,12 +2765,12 @@
 	 * means -1 degree C, which surprisingly doesn't trigger an alarm,
 	 * but is still confusing, so change to 127 degrees C.
 	 */
-	for (i = 0; i < 8; i++) {
+	for (i = 0; i < NUM_VIN_LIMIT; i++) {
 		tmp = it87_read_value(data, IT87_REG_VIN_MIN(i));
 		if (tmp == 0xff)
 			it87_write_value(data, IT87_REG_VIN_MIN(i), 0);
 	}
-	for (i = 0; i < 3; i++) {
+	for (i = 0; i < NUM_TEMP_LIMIT; i++) {
 		tmp = it87_read_value(data, IT87_REG_TEMP_HIGH(i));
 		if (tmp == 0xff)
 			it87_write_value(data, IT87_REG_TEMP_HIGH(i), 127);
@@ -2619,158 +2815,245 @@
 
 	/* Check for additional fans */
 	if (has_five_fans(data)) {
-		if (tmp & (1 << 4))
-			data->has_fan |= (1 << 3); /* fan4 enabled */
-		if (tmp & (1 << 5))
-			data->has_fan |= (1 << 4); /* fan5 enabled */
-		if (has_six_fans(data) && (tmp & (1 << 2)))
-			data->has_fan |= (1 << 5); /* fan6 enabled */
+		if (tmp & BIT(4))
+			data->has_fan |= BIT(3); /* fan4 enabled */
+		if (tmp & BIT(5))
+			data->has_fan |= BIT(4); /* fan5 enabled */
+		if (has_six_fans(data) && (tmp & BIT(2)))
+			data->has_fan |= BIT(5); /* fan6 enabled */
 	}
 
 	/* Fan input pins may be used for alternative functions */
 	data->has_fan &= ~sio_data->skip_fan;
 
+	/* Check if pwm5, pwm6 are enabled */
+	if (has_six_pwm(data)) {
+		/* The following code may be IT8620E specific */
+		tmp = it87_read_value(data, IT87_REG_FAN_DIV);
+		if ((tmp & 0xc0) == 0xc0)
+			sio_data->skip_pwm |= BIT(4);
+		if (!(tmp & BIT(3)))
+			sio_data->skip_pwm |= BIT(5);
+	}
+
 	/* Start monitoring */
 	it87_write_value(data, IT87_REG_CONFIG,
 			 (it87_read_value(data, IT87_REG_CONFIG) & 0x3e)
 			 | (update_vbat ? 0x41 : 0x01));
 }
 
-static void it87_update_pwm_ctrl(struct it87_data *data, int nr)
-{
-	data->pwm_ctrl[nr] = it87_read_value(data, IT87_REG_PWM(nr));
-	if (has_newer_autopwm(data)) {
-		data->pwm_temp_map[nr] = data->pwm_ctrl[nr] & 0x03;
-		data->pwm_duty[nr] = it87_read_value(data,
-						     IT87_REG_PWM_DUTY(nr));
-	} else {
-		if (data->pwm_ctrl[nr] & 0x80)	/* Automatic mode */
-			data->pwm_temp_map[nr] = data->pwm_ctrl[nr] & 0x03;
-		else				/* Manual mode */
-			data->pwm_duty[nr] = data->pwm_ctrl[nr] & 0x7f;
-	}
-
-	if (has_old_autopwm(data)) {
-		int i;
-
-		for (i = 0; i < 5 ; i++)
-			data->auto_temp[nr][i] = it87_read_value(data,
-						IT87_REG_AUTO_TEMP(nr, i));
-		for (i = 0; i < 3 ; i++)
-			data->auto_pwm[nr][i] = it87_read_value(data,
-						IT87_REG_AUTO_PWM(nr, i));
-	}
-}
-
-static struct it87_data *it87_update_device(struct device *dev)
+/* Return 1 if and only if the PWM interface is safe to use */
+static int it87_check_pwm(struct device *dev)
 {
 	struct it87_data *data = dev_get_drvdata(dev);
-	int i;
+	/*
+	 * Some BIOSes fail to correctly configure the IT87 fans. All fans off
+	 * and polarity set to active low is sign that this is the case so we
+	 * disable pwm control to protect the user.
+	 */
+	int tmp = it87_read_value(data, IT87_REG_FAN_CTL);
 
-	mutex_lock(&data->update_lock);
-
-	if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
-	    || !data->valid) {
-		if (update_vbat) {
+	if ((tmp & 0x87) == 0) {
+		if (fix_pwm_polarity) {
 			/*
-			 * Cleared after each update, so reenable.  Value
-			 * returned by this read will be previous value
+			 * The user asks us to attempt a chip reconfiguration.
+			 * This means switching to active high polarity and
+			 * inverting all fan speed values.
 			 */
-			it87_write_value(data, IT87_REG_CONFIG,
-				it87_read_value(data, IT87_REG_CONFIG) | 0x40);
-		}
-		for (i = 0; i <= 7; i++) {
-			data->in[i][0] =
-				it87_read_value(data, IT87_REG_VIN(i));
-			data->in[i][1] =
-				it87_read_value(data, IT87_REG_VIN_MIN(i));
-			data->in[i][2] =
-				it87_read_value(data, IT87_REG_VIN_MAX(i));
-		}
-		/* in8 (battery) has no limit registers */
-		data->in[8][0] = it87_read_value(data, IT87_REG_VIN(8));
-		if (data->type == it8603)
-			data->in[9][0] = it87_read_value(data, 0x2f);
+			int i;
+			u8 pwm[3];
 
-		for (i = 0; i < 6; i++) {
-			/* Skip disabled fans */
-			if (!(data->has_fan & (1 << i)))
-				continue;
+			for (i = 0; i < ARRAY_SIZE(pwm); i++)
+				pwm[i] = it87_read_value(data,
+							 IT87_REG_PWM[i]);
 
-			data->fan[i][1] =
-				it87_read_value(data, IT87_REG_FAN_MIN[i]);
-			data->fan[i][0] = it87_read_value(data,
-				       IT87_REG_FAN[i]);
-			/* Add high byte if in 16-bit mode */
-			if (has_16bit_fans(data)) {
-				data->fan[i][0] |= it87_read_value(data,
-						IT87_REG_FANX[i]) << 8;
-				data->fan[i][1] |= it87_read_value(data,
-						IT87_REG_FANX_MIN[i]) << 8;
+			/*
+			 * If any fan is in automatic pwm mode, the polarity
+			 * might be correct, as suspicious as it seems, so we
+			 * better don't change anything (but still disable the
+			 * PWM interface).
+			 */
+			if (!((pwm[0] | pwm[1] | pwm[2]) & 0x80)) {
+				dev_info(dev,
+					 "Reconfiguring PWM to active high polarity\n");
+				it87_write_value(data, IT87_REG_FAN_CTL,
+						 tmp | 0x87);
+				for (i = 0; i < 3; i++)
+					it87_write_value(data,
+							 IT87_REG_PWM[i],
+							 0x7f & ~pwm[i]);
+				return 1;
 			}
-		}
-		for (i = 0; i < 3; i++) {
-			if (!(data->has_temp & (1 << i)))
-				continue;
-			data->temp[i][0] =
-				it87_read_value(data, IT87_REG_TEMP(i));
-			data->temp[i][1] =
-				it87_read_value(data, IT87_REG_TEMP_LOW(i));
-			data->temp[i][2] =
-				it87_read_value(data, IT87_REG_TEMP_HIGH(i));
-			if (has_temp_offset(data))
-				data->temp[i][3] =
-				  it87_read_value(data,
-						  IT87_REG_TEMP_OFFSET[i]);
+
+			dev_info(dev,
+				 "PWM configuration is too broken to be fixed\n");
 		}
 
-		/* Newer chips don't have clock dividers */
-		if ((data->has_fan & 0x07) && !has_16bit_fans(data)) {
-			i = it87_read_value(data, IT87_REG_FAN_DIV);
-			data->fan_div[0] = i & 0x07;
-			data->fan_div[1] = (i >> 3) & 0x07;
-			data->fan_div[2] = (i & 0x40) ? 3 : 1;
-		}
-
-		data->alarms =
-			it87_read_value(data, IT87_REG_ALARM1) |
-			(it87_read_value(data, IT87_REG_ALARM2) << 8) |
-			(it87_read_value(data, IT87_REG_ALARM3) << 16);
-		data->beeps = it87_read_value(data, IT87_REG_BEEP_ENABLE);
-
-		data->fan_main_ctrl = it87_read_value(data,
-				IT87_REG_FAN_MAIN_CTRL);
-		data->fan_ctl = it87_read_value(data, IT87_REG_FAN_CTL);
-		for (i = 0; i < 3; i++)
-			it87_update_pwm_ctrl(data, i);
-
-		data->sensor = it87_read_value(data, IT87_REG_TEMP_ENABLE);
-		data->extra = it87_read_value(data, IT87_REG_TEMP_EXTRA);
-		/*
-		 * The IT8705F does not have VID capability.
-		 * The IT8718F and later don't use IT87_REG_VID for the
-		 * same purpose.
-		 */
-		if (data->type == it8712 || data->type == it8716) {
-			data->vid = it87_read_value(data, IT87_REG_VID);
-			/*
-			 * The older IT8712F revisions had only 5 VID pins,
-			 * but we assume it is always safe to read 6 bits.
-			 */
-			data->vid &= 0x3f;
-		}
-		data->last_updated = jiffies;
-		data->valid = 1;
+		dev_info(dev,
+			 "Detected broken BIOS defaults, disabling PWM interface\n");
+		return 0;
+	} else if (fix_pwm_polarity) {
+		dev_info(dev,
+			 "PWM configuration looks sane, won't touch\n");
 	}
 
-	mutex_unlock(&data->update_lock);
-
-	return data;
+	return 1;
 }
 
-static int __init it87_device_add(unsigned short address,
+static int it87_probe(struct platform_device *pdev)
+{
+	struct it87_data *data;
+	struct resource *res;
+	struct device *dev = &pdev->dev;
+	struct it87_sio_data *sio_data = dev_get_platdata(dev);
+	int enable_pwm_interface;
+	struct device *hwmon_dev;
+
+	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	if (!devm_request_region(&pdev->dev, res->start, IT87_EC_EXTENT,
+				 DRVNAME)) {
+		dev_err(dev, "Failed to request region 0x%lx-0x%lx\n",
+			(unsigned long)res->start,
+			(unsigned long)(res->start + IT87_EC_EXTENT - 1));
+		return -EBUSY;
+	}
+
+	data = devm_kzalloc(&pdev->dev, sizeof(struct it87_data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->addr = res->start;
+	data->type = sio_data->type;
+	data->features = it87_devices[sio_data->type].features;
+	data->peci_mask = it87_devices[sio_data->type].peci_mask;
+	data->old_peci_mask = it87_devices[sio_data->type].old_peci_mask;
+	/*
+	 * IT8705F Datasheet 0.4.1, 3h == Version G.
+	 * IT8712F Datasheet 0.9.1, section 8.3.5 indicates 8h == Version J.
+	 * These are the first revisions with 16-bit tachometer support.
+	 */
+	switch (data->type) {
+	case it87:
+		if (sio_data->revision >= 0x03) {
+			data->features &= ~FEAT_OLD_AUTOPWM;
+			data->features |= FEAT_FAN16_CONFIG | FEAT_16BIT_FANS;
+		}
+		break;
+	case it8712:
+		if (sio_data->revision >= 0x08) {
+			data->features &= ~FEAT_OLD_AUTOPWM;
+			data->features |= FEAT_FAN16_CONFIG | FEAT_16BIT_FANS |
+					  FEAT_FIVE_FANS;
+		}
+		break;
+	default:
+		break;
+	}
+
+	/* Now, we do the remaining detection. */
+	if ((it87_read_value(data, IT87_REG_CONFIG) & 0x80) ||
+	    it87_read_value(data, IT87_REG_CHIPID) != 0x90)
+		return -ENODEV;
+
+	platform_set_drvdata(pdev, data);
+
+	mutex_init(&data->update_lock);
+
+	/* Check PWM configuration */
+	enable_pwm_interface = it87_check_pwm(dev);
+
+	/* Starting with IT8721F, we handle scaling of internal voltages */
+	if (has_12mv_adc(data)) {
+		if (sio_data->internal & BIT(0))
+			data->in_scaled |= BIT(3);	/* in3 is AVCC */
+		if (sio_data->internal & BIT(1))
+			data->in_scaled |= BIT(7);	/* in7 is VSB */
+		if (sio_data->internal & BIT(2))
+			data->in_scaled |= BIT(8);	/* in8 is Vbat */
+		if (sio_data->internal & BIT(3))
+			data->in_scaled |= BIT(9);	/* in9 is AVCC */
+	} else if (sio_data->type == it8781 || sio_data->type == it8782 ||
+		   sio_data->type == it8783) {
+		if (sio_data->internal & BIT(0))
+			data->in_scaled |= BIT(3);	/* in3 is VCC5V */
+		if (sio_data->internal & BIT(1))
+			data->in_scaled |= BIT(7);	/* in7 is VCCH5V */
+	}
+
+	data->has_temp = 0x07;
+	if (sio_data->skip_temp & BIT(2)) {
+		if (sio_data->type == it8782 &&
+		    !(it87_read_value(data, IT87_REG_TEMP_EXTRA) & 0x80))
+			data->has_temp &= ~BIT(2);
+	}
+
+	data->in_internal = sio_data->internal;
+	data->has_in = 0x3ff & ~sio_data->skip_in;
+
+	if (has_six_temp(data)) {
+		u8 reg = it87_read_value(data, IT87_REG_TEMP456_ENABLE);
+
+		/* Check for additional temperature sensors */
+		if ((reg & 0x03) >= 0x02)
+			data->has_temp |= BIT(3);
+		if (((reg >> 2) & 0x03) >= 0x02)
+			data->has_temp |= BIT(4);
+		if (((reg >> 4) & 0x03) >= 0x02)
+			data->has_temp |= BIT(5);
+
+		/* Check for additional voltage sensors */
+		if ((reg & 0x03) == 0x01)
+			data->has_in |= BIT(10);
+		if (((reg >> 2) & 0x03) == 0x01)
+			data->has_in |= BIT(11);
+		if (((reg >> 4) & 0x03) == 0x01)
+			data->has_in |= BIT(12);
+	}
+
+	data->has_beep = !!sio_data->beep_pin;
+
+	/* Initialize the IT87 chip */
+	it87_init_device(pdev);
+
+	if (!sio_data->skip_vid) {
+		data->has_vid = true;
+		data->vrm = vid_which_vrm();
+		/* VID reading from Super-I/O config space if available */
+		data->vid = sio_data->vid_value;
+	}
+
+	/* Prepare for sysfs hooks */
+	data->groups[0] = &it87_group;
+	data->groups[1] = &it87_group_in;
+	data->groups[2] = &it87_group_temp;
+	data->groups[3] = &it87_group_fan;
+
+	if (enable_pwm_interface) {
+		data->has_pwm = BIT(ARRAY_SIZE(IT87_REG_PWM)) - 1;
+		data->has_pwm &= ~sio_data->skip_pwm;
+
+		data->groups[4] = &it87_group_pwm;
+		if (has_old_autopwm(data) || has_newer_autopwm(data))
+			data->groups[5] = &it87_group_auto_pwm;
+	}
+
+	hwmon_dev = devm_hwmon_device_register_with_groups(dev,
+					it87_devices[sio_data->type].name,
+					data, data->groups);
+	return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static struct platform_driver it87_driver = {
+	.driver = {
+		.name	= DRVNAME,
+	},
+	.probe	= it87_probe,
+};
+
+static int __init it87_device_add(int index, unsigned short address,
 				  const struct it87_sio_data *sio_data)
 {
+	struct platform_device *pdev;
 	struct resource res = {
 		.start	= address + IT87_EC_OFFSET,
 		.end	= address + IT87_EC_OFFSET + IT87_EC_EXTENT - 1,
@@ -2781,14 +3064,11 @@
 
 	err = acpi_check_resource_conflict(&res);
 	if (err)
-		goto exit;
+		return err;
 
 	pdev = platform_device_alloc(DRVNAME, address);
-	if (!pdev) {
-		err = -ENOMEM;
-		pr_err("Device allocation failed\n");
-		goto exit;
-	}
+	if (!pdev)
+		return -ENOMEM;
 
 	err = platform_device_add_resources(pdev, &res, 1);
 	if (err) {
@@ -2809,44 +3089,61 @@
 		goto exit_device_put;
 	}
 
+	it87_pdev[index] = pdev;
 	return 0;
 
 exit_device_put:
 	platform_device_put(pdev);
-exit:
 	return err;
 }
 
 static int __init sm_it87_init(void)
 {
-	int err;
-	unsigned short isa_address = 0;
+	int sioaddr[2] = { REG_2E, REG_4E };
 	struct it87_sio_data sio_data;
+	unsigned short isa_address;
+	bool found = false;
+	int i, err;
 
-	memset(&sio_data, 0, sizeof(struct it87_sio_data));
-	err = it87_find(&isa_address, &sio_data);
-	if (err)
-		return err;
 	err = platform_driver_register(&it87_driver);
 	if (err)
 		return err;
 
-	err = it87_device_add(isa_address, &sio_data);
-	if (err) {
-		platform_driver_unregister(&it87_driver);
-		return err;
+	for (i = 0; i < ARRAY_SIZE(sioaddr); i++) {
+		memset(&sio_data, 0, sizeof(struct it87_sio_data));
+		isa_address = 0;
+		err = it87_find(sioaddr[i], &isa_address, &sio_data);
+		if (err || isa_address == 0)
+			continue;
+
+		err = it87_device_add(i, isa_address, &sio_data);
+		if (err)
+			goto exit_dev_unregister;
+		found = true;
 	}
 
+	if (!found) {
+		err = -ENODEV;
+		goto exit_unregister;
+	}
 	return 0;
+
+exit_dev_unregister:
+	/* NULL check handled by platform_device_unregister */
+	platform_device_unregister(it87_pdev[0]);
+exit_unregister:
+	platform_driver_unregister(&it87_driver);
+	return err;
 }
 
 static void __exit sm_it87_exit(void)
 {
-	platform_device_unregister(pdev);
+	/* NULL check handled by platform_device_unregister */
+	platform_device_unregister(it87_pdev[1]);
+	platform_device_unregister(it87_pdev[0]);
 	platform_driver_unregister(&it87_driver);
 }
 
-
 MODULE_AUTHOR("Chris Gauthron, Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("IT8705F/IT871xF/IT872xF hardware monitoring driver");
 module_param(update_vbat, bool, 0);
diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
index 36544c4..303d0c9 100644
--- a/drivers/hwmon/max1111.c
+++ b/drivers/hwmon/max1111.c
@@ -85,6 +85,9 @@
 
 int max1111_read_channel(int channel)
 {
+	if (!the_max1111 || !the_max1111->spi)
+		return -ENODEV;
+
 	return max1111_read(&the_max1111->spi->dev, channel);
 }
 EXPORT_SYMBOL(max1111_read_channel);
@@ -258,6 +261,9 @@
 {
 	struct max1111_data *data = spi_get_drvdata(spi);
 
+#ifdef CONFIG_SHARPSL_PM
+	the_max1111 = NULL;
+#endif
 	hwmon_device_unregister(data->hwmon_dev);
 	sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group);
 	sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
diff --git a/drivers/hwmon/max31722.c b/drivers/hwmon/max31722.c
new file mode 100644
index 0000000..30a100e
--- /dev/null
+++ b/drivers/hwmon/max31722.c
@@ -0,0 +1,165 @@
+/*
+ * max31722 - hwmon driver for Maxim Integrated MAX31722/MAX31723 SPI
+ * digital thermometer and thermostats.
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ */
+
+#include <linux/acpi.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#define MAX31722_REG_CFG				0x00
+#define MAX31722_REG_TEMP_LSB				0x01
+
+#define MAX31722_MODE_CONTINUOUS			0x00
+#define MAX31722_MODE_STANDBY				0x01
+#define MAX31722_MODE_MASK				0xFE
+#define MAX31722_RESOLUTION_12BIT			0x06
+#define MAX31722_WRITE_MASK				0x80
+
+struct max31722_data {
+	struct device *hwmon_dev;
+	struct spi_device *spi_device;
+	u8 mode;
+};
+
+static int max31722_set_mode(struct max31722_data *data, u8 mode)
+{
+	int ret;
+	struct spi_device *spi = data->spi_device;
+	u8 buf[2] = {
+		MAX31722_REG_CFG | MAX31722_WRITE_MASK,
+		(data->mode & MAX31722_MODE_MASK) | mode
+	};
+
+	ret = spi_write(spi, &buf, sizeof(buf));
+	if (ret < 0) {
+		dev_err(&spi->dev, "failed to set sensor mode.\n");
+		return ret;
+	}
+	data->mode = (data->mode & MAX31722_MODE_MASK) | mode;
+
+	return 0;
+}
+
+static ssize_t max31722_show_temp(struct device *dev,
+				  struct device_attribute *attr,
+				  char *buf)
+{
+	ssize_t ret;
+	struct max31722_data *data = dev_get_drvdata(dev);
+
+	ret = spi_w8r16(data->spi_device, MAX31722_REG_TEMP_LSB);
+	if (ret < 0)
+		return ret;
+	/* Keep 12 bits and multiply by the scale of 62.5 millidegrees/bit. */
+	return sprintf(buf, "%d\n", (s16)le16_to_cpu(ret) * 125 / 32);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
+			  max31722_show_temp, NULL, 0);
+
+static struct attribute *max31722_attrs[] = {
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	NULL,
+};
+
+ATTRIBUTE_GROUPS(max31722);
+
+static int max31722_probe(struct spi_device *spi)
+{
+	int ret;
+	struct max31722_data *data;
+
+	data = devm_kzalloc(&spi->dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	spi_set_drvdata(spi, data);
+	data->spi_device = spi;
+	/*
+	 * Set SD bit to 0 so we can have continuous measurements.
+	 * Set resolution to 12 bits for maximum precision.
+	 */
+	data->mode = MAX31722_MODE_CONTINUOUS | MAX31722_RESOLUTION_12BIT;
+	ret = max31722_set_mode(data, MAX31722_MODE_CONTINUOUS);
+	if (ret < 0)
+		return ret;
+
+	data->hwmon_dev = hwmon_device_register_with_groups(&spi->dev,
+							    spi->modalias,
+							    data,
+							    max31722_groups);
+	if (IS_ERR(data->hwmon_dev)) {
+		max31722_set_mode(data, MAX31722_MODE_STANDBY);
+		return PTR_ERR(data->hwmon_dev);
+	}
+
+	return 0;
+}
+
+static int max31722_remove(struct spi_device *spi)
+{
+	struct max31722_data *data = spi_get_drvdata(spi);
+
+	hwmon_device_unregister(data->hwmon_dev);
+
+	return max31722_set_mode(data, MAX31722_MODE_STANDBY);
+}
+
+static int __maybe_unused max31722_suspend(struct device *dev)
+{
+	struct spi_device *spi_device = to_spi_device(dev);
+	struct max31722_data *data = spi_get_drvdata(spi_device);
+
+	return max31722_set_mode(data, MAX31722_MODE_STANDBY);
+}
+
+static int __maybe_unused max31722_resume(struct device *dev)
+{
+	struct spi_device *spi_device = to_spi_device(dev);
+	struct max31722_data *data = spi_get_drvdata(spi_device);
+
+	return max31722_set_mode(data, MAX31722_MODE_CONTINUOUS);
+}
+
+static SIMPLE_DEV_PM_OPS(max31722_pm_ops, max31722_suspend, max31722_resume);
+
+static const struct spi_device_id max31722_spi_id[] = {
+	{"max31722", 0},
+	{"max31723", 0},
+	{}
+};
+
+static const struct acpi_device_id __maybe_unused max31722_acpi_id[] = {
+	{"MAX31722", 0},
+	{"MAX31723", 0},
+	{}
+};
+
+MODULE_DEVICE_TABLE(spi, max31722_spi_id);
+
+static struct spi_driver max31722_driver = {
+	.driver = {
+		.name = "max31722",
+		.pm = &max31722_pm_ops,
+		.acpi_match_table = ACPI_PTR(max31722_acpi_id),
+	},
+	.probe =            max31722_probe,
+	.remove =           max31722_remove,
+	.id_table =         max31722_spi_id,
+};
+
+module_spi_driver(max31722_driver);
+
+MODULE_AUTHOR("Tiberiu Breana <tiberiu.a.breana@intel.com>");
+MODULE_DESCRIPTION("max31722 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index 131a281..d24d7b6 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -449,7 +449,7 @@
 		}
 		revision[i] = val;
 	}
-	pr_info("Found %s chip at %#hx, revison: %d.%02d\n", DEVNAME,
+	pr_info("Found %s chip at %#hx, revision: %d.%02d\n", DEVNAME,
 		data->addr, revision[0], revision[1]);
 
 	/* Read all temp + fan ctrl registers to determine which are active */
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 9d233bb..a8e89df 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -617,7 +617,7 @@
 };
 EXPORT_SYMBOL(i2c_bit_algo);
 
-const struct i2c_adapter_quirks i2c_bit_quirk_no_clk_stretch = {
+static const struct i2c_adapter_quirks i2c_bit_quirk_no_clk_stretch = {
 	.flags = I2C_AQ_NO_CLK_STRETCH,
 };
 
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index faa8e68..2dd40dd 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -663,7 +663,7 @@
 
 config I2C_MV64XXX
 	tristate "Marvell mv64xxx I2C Controller"
-	depends on MV64X60 || PLAT_ORION || ARCH_SUNXI
+	depends on MV64X60 || PLAT_ORION || ARCH_SUNXI || ARCH_MVEBU
 	help
 	  If you say yes to this option, support will be included for the
 	  built-in I2C interface on the Marvell 64xxx line of host bridges.
@@ -975,10 +975,10 @@
 
 config I2C_XLP9XX
 	tristate "XLP9XX I2C support"
-	depends on CPU_XLP || COMPILE_TEST
+	depends on CPU_XLP || ARCH_VULCAN || COMPILE_TEST
 	help
 	  This driver enables support for the on-chip I2C interface of
-	  the Broadcom XLP9xx/XLP5xx MIPS processors.
+	  the Broadcom XLP9xx/XLP5xx MIPS and Vulcan ARM64 processors.
 
 	  This driver can also be built as a module.  If so, the module will
 	  be called i2c-xlp9xx.
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index b9f0fff..19c8438 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -267,7 +267,7 @@
 	iproc_i2c->msg = msg;
 
 	/* format and load slave address into the TX FIFO */
-	addr = msg->addr << 1 | (msg->flags & I2C_M_RD ? 1 : 0);
+	addr = i2c_8bit_addr_from_msg(msg);
 	writel(addr, iproc_i2c->base + M_TX_OFFSET);
 
 	/*
diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c
index 2c9d9b1..ac9f476 100644
--- a/drivers/i2c/busses/i2c-bcm-kona.c
+++ b/drivers/i2c/busses/i2c-bcm-kona.c
@@ -501,10 +501,7 @@
 				return -EREMOTEIO;
 		}
 	} else {
-		addr = msg->addr << 1;
-
-		if (msg->flags & I2C_M_RD)
-			addr |= 1;
+		addr = i2c_8bit_addr_from_msg(msg);
 
 		if (bcm_kona_i2c_write_byte(dev, addr, 0) < 0)
 			return -EREMOTEIO;
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 4a45408..6a8cfc1 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -446,9 +446,7 @@
 
 		}
 	} else {
-		addr = msg->addr << 1;
-		if (msg->flags & I2C_M_RD)
-			addr |= 1;
+		addr = i2c_8bit_addr_from_msg(msg);
 
 		bsc_writel(dev, addr, chip_address);
 	}
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 714bdc8..ee57c1e 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -116,8 +116,8 @@
 	cbd_t __iomem *rbase;
 	u_char *txbuf[CPM_MAXBD];
 	u_char *rxbuf[CPM_MAXBD];
-	u32 txdma[CPM_MAXBD];
-	u32 rxdma[CPM_MAXBD];
+	dma_addr_t txdma[CPM_MAXBD];
+	dma_addr_t rxdma[CPM_MAXBD];
 };
 
 static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
@@ -197,9 +197,7 @@
 	tbdf = cpm->tbase + tx;
 	rbdf = cpm->rbase + rx;
 
-	addr = pmsg->addr << 1;
-	if (pmsg->flags & I2C_M_RD)
-		addr |= 1;
+	addr = i2c_8bit_addr_from_msg(pmsg);
 
 	tb = cpm->txbuf[tx];
 	rb = cpm->rxbuf[rx];
diff --git a/drivers/i2c/busses/i2c-dln2.c b/drivers/i2c/busses/i2c-dln2.c
index 1600edd..f2eb4f7 100644
--- a/drivers/i2c/busses/i2c-dln2.c
+++ b/drivers/i2c/busses/i2c-dln2.c
@@ -19,6 +19,7 @@
 #include <linux/i2c.h>
 #include <linux/platform_device.h>
 #include <linux/mfd/dln2.h>
+#include <linux/acpi.h>
 
 #define DLN2_I2C_MODULE_ID		0x03
 #define DLN2_I2C_CMD(cmd)		DLN2_CMD(cmd, DLN2_I2C_MODULE_ID)
@@ -210,6 +211,7 @@
 	dln2->adapter.algo = &dln2_i2c_usb_algorithm;
 	dln2->adapter.quirks = &dln2_i2c_quirks;
 	dln2->adapter.dev.parent = dev;
+	ACPI_COMPANION_SET(&dln2->adapter.dev, ACPI_COMPANION(&pdev->dev));
 	dln2->adapter.dev.of_node = dev->of_node;
 	i2c_set_adapdata(&dln2->adapter, dln2);
 	snprintf(dln2->adapter.name, sizeof(dln2->adapter.name), "%s-%s-%d",
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index b29c750..c0e3ada 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -671,7 +671,9 @@
 		return -EIO;
 	}
 
-	clk_prepare_enable(i2c->clk);
+	ret = clk_enable(i2c->clk);
+	if (ret)
+		return ret;
 
 	for (i = 0; i < num; i++, msgs++) {
 		stop = (i == num - 1);
@@ -695,7 +697,7 @@
 	}
 
  out:
-	clk_disable_unprepare(i2c->clk);
+	clk_disable(i2c->clk);
 	return ret;
 }
 
@@ -747,7 +749,9 @@
 		return -ENOENT;
 	}
 
-	clk_prepare_enable(i2c->clk);
+	ret = clk_prepare_enable(i2c->clk);
+	if (ret)
+		return ret;
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
@@ -799,6 +803,10 @@
 
 	platform_set_drvdata(pdev, i2c);
 
+	clk_disable(i2c->clk);
+
+	return 0;
+
  err_clk:
 	clk_disable_unprepare(i2c->clk);
 	return ret;
@@ -810,6 +818,8 @@
 
 	i2c_del_adapter(&i2c->adap);
 
+	clk_unprepare(i2c->clk);
+
 	return 0;
 }
 
@@ -821,6 +831,8 @@
 
 	i2c->suspended = 1;
 
+	clk_unprepare(i2c->clk);
+
 	return 0;
 }
 
@@ -830,7 +842,9 @@
 	struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
 	int ret = 0;
 
-	clk_prepare_enable(i2c->clk);
+	ret = clk_prepare_enable(i2c->clk);
+	if (ret)
+		return ret;
 
 	ret = exynos5_hsi2c_clock_setup(i2c);
 	if (ret) {
@@ -839,7 +853,7 @@
 	}
 
 	exynos5_i2c_init(i2c);
-	clk_disable_unprepare(i2c->clk);
+	clk_disable(i2c->clk);
 	i2c->suspended = 0;
 
 	return 0;
@@ -847,14 +861,8 @@
 #endif
 
 static const struct dev_pm_ops exynos5_i2c_dev_pm_ops = {
-#ifdef CONFIG_PM_SLEEP
-	.suspend_noirq = exynos5_i2c_suspend_noirq,
-	.resume_noirq = exynos5_i2c_resume_noirq,
-	.freeze_noirq = exynos5_i2c_suspend_noirq,
-	.thaw_noirq = exynos5_i2c_resume_noirq,
-	.poweroff_noirq = exynos5_i2c_suspend_noirq,
-	.restore_noirq = exynos5_i2c_resume_noirq,
-#endif
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos5_i2c_suspend_noirq,
+				      exynos5_i2c_resume_noirq)
 };
 
 static struct platform_driver exynos5_i2c_driver = {
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 585a3b7..64b1208b 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -94,6 +94,7 @@
 #include <linux/err.h>
 #include <linux/platform_device.h>
 #include <linux/platform_data/itco_wdt.h>
+#include <linux/pm_runtime.h>
 
 #if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \
 		defined CONFIG_DMI
@@ -714,9 +715,11 @@
 {
 	int hwpec;
 	int block = 0;
-	int ret, xact = 0;
+	int ret = 0, xact = 0;
 	struct i801_priv *priv = i2c_get_adapdata(adap);
 
+	pm_runtime_get_sync(&priv->pci_dev->dev);
+
 	hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC)
 		&& size != I2C_SMBUS_QUICK
 		&& size != I2C_SMBUS_I2C_BLOCK_DATA;
@@ -773,7 +776,8 @@
 	default:
 		dev_err(&priv->pci_dev->dev, "Unsupported transaction %d\n",
 			size);
-		return -EOPNOTSUPP;
+		ret = -EOPNOTSUPP;
+		goto out;
 	}
 
 	if (hwpec)	/* enable/disable hardware PEC */
@@ -796,11 +800,11 @@
 		       ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
 
 	if (block)
-		return ret;
+		goto out;
 	if (ret)
-		return ret;
+		goto out;
 	if ((read_write == I2C_SMBUS_WRITE) || (xact == I801_QUICK))
-		return 0;
+		goto out;
 
 	switch (xact & 0x7f) {
 	case I801_BYTE:	/* Result put in SMBHSTDAT0 */
@@ -812,7 +816,11 @@
 			     (inb_p(SMBHSTDAT1(priv)) << 8);
 		break;
 	}
-	return 0;
+
+out:
+	pm_runtime_mark_last_busy(&priv->pci_dev->dev);
+	pm_runtime_put_autosuspend(&priv->pci_dev->dev);
+	return ret;
 }
 
 
@@ -1413,6 +1421,11 @@
 
 	pci_set_drvdata(dev, priv);
 
+	pm_runtime_set_autosuspend_delay(&dev->dev, 1000);
+	pm_runtime_use_autosuspend(&dev->dev);
+	pm_runtime_put_autosuspend(&dev->dev);
+	pm_runtime_allow(&dev->dev);
+
 	return 0;
 }
 
@@ -1420,6 +1433,9 @@
 {
 	struct i801_priv *priv = pci_get_drvdata(dev);
 
+	pm_runtime_forbid(&dev->dev);
+	pm_runtime_get_noresume(&dev->dev);
+
 	i801_del_mux(priv);
 	i2c_del_adapter(&priv->adapter);
 	pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
@@ -1433,34 +1449,32 @@
 }
 
 #ifdef CONFIG_PM
-static int i801_suspend(struct pci_dev *dev, pm_message_t mesg)
+static int i801_suspend(struct device *dev)
 {
-	struct i801_priv *priv = pci_get_drvdata(dev);
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	struct i801_priv *priv = pci_get_drvdata(pci_dev);
 
-	pci_save_state(dev);
-	pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
-	pci_set_power_state(dev, pci_choose_state(dev, mesg));
+	pci_write_config_byte(pci_dev, SMBHSTCFG, priv->original_hstcfg);
 	return 0;
 }
 
-static int i801_resume(struct pci_dev *dev)
+static int i801_resume(struct device *dev)
 {
-	pci_set_power_state(dev, PCI_D0);
-	pci_restore_state(dev);
 	return 0;
 }
-#else
-#define i801_suspend NULL
-#define i801_resume NULL
 #endif
 
+static UNIVERSAL_DEV_PM_OPS(i801_pm_ops, i801_suspend,
+			    i801_resume, NULL);
+
 static struct pci_driver i801_driver = {
 	.name		= "i801_smbus",
 	.id_table	= i801_ids,
 	.probe		= i801_probe,
 	.remove		= i801_remove,
-	.suspend	= i801_suspend,
-	.resume		= i801_resume,
+	.driver		= {
+		.pm	= &i801_pm_ops,
+	},
 };
 
 static int __init i2c_i801_init(void)
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index b6c0803..cdaa7be 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -269,7 +269,7 @@
 	ndelay(t->hd_sta);
 
 	/* Send address */
-	v = (u8)((p->addr << 1) | ((p->flags & I2C_M_RD) ? 1 : 0));
+	v = i2c_8bit_addr_from_msg(p);
 	for (i = 0, mask = 0x80; i < 8; ++i, mask >>= 1){
 		out_8(&iic->directcntl, sda);
 		ndelay(t->low / 2);
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
index 379ef9c..ea20425b 100644
--- a/drivers/i2c/busses/i2c-img-scb.c
+++ b/drivers/i2c/busses/i2c-img-scb.c
@@ -751,9 +751,7 @@
 	switch (i2c->at_cur_cmd) {
 	case CMD_GEN_START:
 		next_cmd = CMD_GEN_DATA;
-		next_data = (i2c->msg.addr << 1);
-		if (i2c->msg.flags & I2C_M_RD)
-			next_data |= 0x1;
+		next_data = i2c_8bit_addr_from_msg(&i2c->msg);
 		break;
 	case CMD_GEN_DATA:
 		if (i2c->line_status & LINESTAT_INPUT_HELD_V)
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 1ca7ef2..1844bc9 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -525,7 +525,7 @@
 	imx_i2c_write_reg(i2c_imx->hwdata->i2cr_ien_opcode, i2c_imx, IMX_I2C_I2CR);
 
 	/* Wait controller to be stable */
-	udelay(50);
+	usleep_range(50, 150);
 
 	/* Start I2C transaction */
 	temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index 72d6161..85cbe4b 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -50,10 +50,7 @@
 {
 	unsigned char addr;
 
-	addr = (msg->addr << 1);
-
-	if (msg->flags & I2C_M_RD)
-		addr |= 1;
+	addr = i2c_8bit_addr_from_msg(msg);
 
 	return addr;
 }
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 7ba795b..1c87077 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -75,6 +75,7 @@
 /* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */
 #define PCI_DEVICE_ID_INTEL_S1200_SMT0	0x0c59
 #define PCI_DEVICE_ID_INTEL_S1200_SMT1	0x0c5a
+#define PCI_DEVICE_ID_INTEL_DNV_SMT	0x19ac
 #define PCI_DEVICE_ID_INTEL_AVOTON_SMT	0x1f15
 
 #define ISMT_DESC_ENTRIES	2	/* number of descriptor entries */
@@ -180,6 +181,7 @@
 static const struct pci_device_id ismt_ids[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMT) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) },
 	{ 0, }
 };
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index f325663..ba14a86 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -771,11 +771,16 @@
 	ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
 				   &clk_freq);
 	if (ret) {
-		dev_err(&pdev->dev, "clock-frequency not specified in DT");
+		dev_err(&pdev->dev, "clock-frequency not specified in DT\n");
 		goto err;
 	}
 
 	i2c->speed = clk_freq / 1000;
+	if (i2c->speed == 0) {
+		ret = -EINVAL;
+		dev_err(&pdev->dev, "clock-frequency minimum is 1000\n");
+		goto err;
+	}
 	jz4780_i2c_set_speed(i2c);
 
 	dev_info(&pdev->dev, "Bus frequency is %d KHz\n", i2c->speed);
diff --git a/drivers/i2c/busses/i2c-lpc2k.c b/drivers/i2c/busses/i2c-lpc2k.c
index 8560a13..586a152 100644
--- a/drivers/i2c/busses/i2c-lpc2k.c
+++ b/drivers/i2c/busses/i2c-lpc2k.c
@@ -133,9 +133,7 @@
 	case M_START:
 	case M_REPSTART:
 		/* Start bit was just sent out, send out addr and dir */
-		data = i2c->msg->addr << 1;
-		if (i2c->msg->flags & I2C_M_RD)
-			data |= 1;
+		data = i2c_8bit_addr_from_msg(i2c->msg);
 
 		writel(data, i2c->base + LPC24XX_I2DAT);
 		writel(LPC24XX_STA, i2c->base + LPC24XX_I2CONCLR);
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 453358b..d9373e6 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -413,10 +413,7 @@
 	else
 		writew(I2C_FS_START_CON, i2c->base + OFFSET_EXT_CONF);
 
-	addr_reg = msgs->addr << 1;
-	if (i2c->op == I2C_MASTER_RD)
-		addr_reg |= 0x1;
-
+	addr_reg = i2c_8bit_addr_from_msg(msgs);
 	writew(addr_reg, i2c->base + OFFSET_SLAVE_ADDR);
 
 	/* Clear interrupt status */
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 43207f5..b4dec08 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -134,9 +134,7 @@
 	int			rc;
 	u32			freq_m;
 	u32			freq_n;
-#if defined(CONFIG_HAVE_CLK)
 	struct clk              *clk;
-#endif
 	wait_queue_head_t	waitq;
 	spinlock_t		lock;
 	struct i2c_msg		*msg;
@@ -757,7 +755,6 @@
 MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table);
 
 #ifdef CONFIG_OF
-#ifdef CONFIG_HAVE_CLK
 static int
 mv64xxx_calc_freq(struct mv64xxx_i2c_data *drv_data,
 		  const int tclk, const int n, const int m)
@@ -791,25 +788,20 @@
 		return false;
 	return true;
 }
-#endif /* CONFIG_HAVE_CLK */
 
 static int
 mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
 		  struct device *dev)
 {
-	/* CLK is mandatory when using DT to describe the i2c bus. We
-	 * need to know tclk in order to calculate bus clock
-	 * factors.
-	 */
-#if !defined(CONFIG_HAVE_CLK)
-	/* Have OF but no CLK */
-	return -ENODEV;
-#else
 	const struct of_device_id *device;
 	struct device_node *np = dev->of_node;
 	u32 bus_freq, tclk;
 	int rc = 0;
 
+	/* CLK is mandatory when using DT to describe the i2c bus. We
+	 * need to know tclk in order to calculate bus clock
+	 * factors.
+	 */
 	if (IS_ERR(drv_data->clk)) {
 		rc = -ENODEV;
 		goto out;
@@ -869,7 +861,6 @@
 
 out:
 	return rc;
-#endif
 }
 #else /* CONFIG_OF */
 static int
@@ -907,14 +898,13 @@
 	init_waitqueue_head(&drv_data->waitq);
 	spin_lock_init(&drv_data->lock);
 
-#if defined(CONFIG_HAVE_CLK)
 	/* Not all platforms have a clk */
 	drv_data->clk = devm_clk_get(&pd->dev, NULL);
-	if (!IS_ERR(drv_data->clk)) {
-		clk_prepare(drv_data->clk);
-		clk_enable(drv_data->clk);
-	}
-#endif
+	if (IS_ERR(drv_data->clk) && PTR_ERR(drv_data->clk) == -EPROBE_DEFER)
+		return -EPROBE_DEFER;
+	if (!IS_ERR(drv_data->clk))
+		clk_prepare_enable(drv_data->clk);
+
 	if (pdata) {
 		drv_data->freq_m = pdata->freq_m;
 		drv_data->freq_n = pdata->freq_n;
@@ -964,13 +954,10 @@
 	if (!IS_ERR_OR_NULL(drv_data->rstc))
 		reset_control_assert(drv_data->rstc);
 exit_clk:
-#if defined(CONFIG_HAVE_CLK)
 	/* Not all platforms have a clk */
-	if (!IS_ERR(drv_data->clk)) {
-		clk_disable(drv_data->clk);
-		clk_unprepare(drv_data->clk);
-	}
-#endif
+	if (!IS_ERR(drv_data->clk))
+		clk_disable_unprepare(drv_data->clk);
+
 	return rc;
 }
 
@@ -983,13 +970,9 @@
 	free_irq(drv_data->irq, drv_data);
 	if (!IS_ERR_OR_NULL(drv_data->rstc))
 		reset_control_assert(drv_data->rstc);
-#if defined(CONFIG_HAVE_CLK)
 	/* Not all platforms have a clk */
-	if (!IS_ERR(drv_data->clk)) {
-		clk_disable(drv_data->clk);
-		clk_unprepare(drv_data->clk);
-	}
-#endif
+	if (!IS_ERR(drv_data->clk))
+		clk_disable_unprepare(drv_data->clk);
 
 	return 0;
 }
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index 70b3c91..42fcc94 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -127,7 +127,7 @@
 
 /* For multiplexing support, we need a global reference to the 1st
    SMBus channel */
-#if defined CONFIG_I2C_NFORCE2_S4985 || defined CONFIG_I2C_NFORCE2_S4985_MODULE
+#if IS_ENABLED(CONFIG_I2C_NFORCE2_S4985)
 struct i2c_adapter *nforce2_smbus;
 EXPORT_SYMBOL_GPL(nforce2_smbus);
 
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 11b7b87..dfa7a4b 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -178,10 +178,7 @@
 		if (i2c->nmsgs) {	/* end? */
 			/* send start? */
 			if (!(msg->flags & I2C_M_NOSTART)) {
-				u8 addr = (msg->addr << 1);
-
-				if (msg->flags & I2C_M_RD)
-					addr |= 1;
+				u8 addr = i2c_8bit_addr_from_msg(msg);
 
 				i2c->state = STATE_START;
 
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index 46fb6c4..aa5f01e 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -11,6 +11,7 @@
  * warranty of any kind, whether express or implied.
  */
 
+#include <linux/atomic.h>
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
@@ -29,13 +30,23 @@
 /* Register offsets */
 #define SW_TWSI			0x00
 #define TWSI_INT		0x10
+#define SW_TWSI_EXT		0x18
 
 /* Controller command patterns */
 #define SW_TWSI_V		BIT_ULL(63)	/* Valid bit */
+#define SW_TWSI_EIA		BIT_ULL(61)	/* Extended internal address */
 #define SW_TWSI_R		BIT_ULL(56)	/* Result or read bit */
+#define SW_TWSI_SOVR		BIT_ULL(55)	/* Size override */
+#define SW_TWSI_SIZE_SHIFT	52
+#define SW_TWSI_ADDR_SHIFT	40
+#define SW_TWSI_IA_SHIFT	32		/* Internal address */
 
 /* Controller opcode word (bits 60:57) */
 #define SW_TWSI_OP_SHIFT	57
+#define SW_TWSI_OP_7		(0ULL << SW_TWSI_OP_SHIFT)
+#define SW_TWSI_OP_7_IA		(1ULL << SW_TWSI_OP_SHIFT)
+#define SW_TWSI_OP_10		(2ULL << SW_TWSI_OP_SHIFT)
+#define SW_TWSI_OP_10_IA	(3ULL << SW_TWSI_OP_SHIFT)
 #define SW_TWSI_OP_TWSI_CLK	(4ULL << SW_TWSI_OP_SHIFT)
 #define SW_TWSI_OP_EOP		(6ULL << SW_TWSI_OP_SHIFT) /* Extended opcode */
 
@@ -48,46 +59,93 @@
 #define SW_TWSI_EOP_TWSI_RST	(SW_TWSI_OP_EOP | 7ULL << SW_TWSI_EOP_SHIFT)
 
 /* Controller command and status bits */
-#define TWSI_CTL_CE		0x80
+#define TWSI_CTL_CE		0x80	/* High level controller enable */
 #define TWSI_CTL_ENAB		0x40	/* Bus enable */
 #define TWSI_CTL_STA		0x20	/* Master-mode start, HW clears when done */
 #define TWSI_CTL_STP		0x10	/* Master-mode stop, HW clears when done */
 #define TWSI_CTL_IFLG		0x08	/* HW event, SW writes 0 to ACK */
 #define TWSI_CTL_AAK		0x04	/* Assert ACK */
 
-/* Some status values */
+/* Status values */
+#define STAT_ERROR		0x00
 #define STAT_START		0x08
-#define STAT_RSTART		0x10
+#define STAT_REP_START		0x10
 #define STAT_TXADDR_ACK		0x18
+#define STAT_TXADDR_NAK		0x20
 #define STAT_TXDATA_ACK		0x28
+#define STAT_TXDATA_NAK		0x30
+#define STAT_LOST_ARB_38	0x38
 #define STAT_RXADDR_ACK		0x40
+#define STAT_RXADDR_NAK		0x48
 #define STAT_RXDATA_ACK		0x50
+#define STAT_RXDATA_NAK		0x58
+#define STAT_SLAVE_60		0x60
+#define STAT_LOST_ARB_68	0x68
+#define STAT_SLAVE_70		0x70
+#define STAT_LOST_ARB_78	0x78
+#define STAT_SLAVE_80		0x80
+#define STAT_SLAVE_88		0x88
+#define STAT_GENDATA_ACK	0x90
+#define STAT_GENDATA_NAK	0x98
+#define STAT_SLAVE_A0		0xA0
+#define STAT_SLAVE_A8		0xA8
+#define STAT_LOST_ARB_B0	0xB0
+#define STAT_SLAVE_LOST		0xB8
+#define STAT_SLAVE_NAK		0xC0
+#define STAT_SLAVE_ACK		0xC8
+#define STAT_AD2W_ACK		0xD0
+#define STAT_AD2W_NAK		0xD8
 #define STAT_IDLE		0xF8
 
 /* TWSI_INT values */
+#define TWSI_INT_ST_INT		BIT_ULL(0)
+#define TWSI_INT_TS_INT		BIT_ULL(1)
+#define TWSI_INT_CORE_INT	BIT_ULL(2)
+#define TWSI_INT_ST_EN		BIT_ULL(4)
+#define TWSI_INT_TS_EN		BIT_ULL(5)
 #define TWSI_INT_CORE_EN	BIT_ULL(6)
 #define TWSI_INT_SDA_OVR	BIT_ULL(8)
 #define TWSI_INT_SCL_OVR	BIT_ULL(9)
+#define TWSI_INT_SDA		BIT_ULL(10)
+#define TWSI_INT_SCL		BIT_ULL(11)
+
+#define I2C_OCTEON_EVENT_WAIT 80 /* microseconds */
 
 struct octeon_i2c {
 	wait_queue_head_t queue;
 	struct i2c_adapter adap;
 	int irq;
+	int hlc_irq;		/* For cn7890 only */
 	u32 twsi_freq;
 	int sys_freq;
 	void __iomem *twsi_base;
 	struct device *dev;
+	bool hlc_enabled;
+	bool broken_irq_mode;
+	bool broken_irq_check;
+	void (*int_enable)(struct octeon_i2c *);
+	void (*int_disable)(struct octeon_i2c *);
+	void (*hlc_int_enable)(struct octeon_i2c *);
+	void (*hlc_int_disable)(struct octeon_i2c *);
+	atomic_t int_enable_cnt;
+	atomic_t hlc_int_enable_cnt;
 };
 
+static void octeon_i2c_writeq_flush(u64 val, void __iomem *addr)
+{
+	__raw_writeq(val, addr);
+	__raw_readq(addr);	/* wait for write to land */
+}
+
 /**
- * octeon_i2c_write_sw - write an I2C core register
+ * octeon_i2c_reg_write - write an I2C core register
  * @i2c: The struct octeon_i2c
  * @eop_reg: Register selector
  * @data: Value to be written
  *
  * The I2C core registers are accessed indirectly via the SW_TWSI CSR.
  */
-static void octeon_i2c_write_sw(struct octeon_i2c *i2c, u64 eop_reg, u8 data)
+static void octeon_i2c_reg_write(struct octeon_i2c *i2c, u64 eop_reg, u8 data)
 {
 	u64 tmp;
 
@@ -97,8 +155,13 @@
 	} while ((tmp & SW_TWSI_V) != 0);
 }
 
+#define octeon_i2c_ctl_write(i2c, val)					\
+	octeon_i2c_reg_write(i2c, SW_TWSI_EOP_TWSI_CTL, val)
+#define octeon_i2c_data_write(i2c, val)					\
+	octeon_i2c_reg_write(i2c, SW_TWSI_EOP_TWSI_DATA, val)
+
 /**
- * octeon_i2c_read_sw - read lower bits of an I2C core register
+ * octeon_i2c_reg_read - read lower bits of an I2C core register
  * @i2c: The struct octeon_i2c
  * @eop_reg: Register selector
  *
@@ -106,7 +169,7 @@
  *
  * The I2C core registers are accessed indirectly via the SW_TWSI CSR.
  */
-static u8 octeon_i2c_read_sw(struct octeon_i2c *i2c, u64 eop_reg)
+static u8 octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg)
 {
 	u64 tmp;
 
@@ -118,6 +181,24 @@
 	return tmp & 0xFF;
 }
 
+#define octeon_i2c_ctl_read(i2c)					\
+	octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_CTL)
+#define octeon_i2c_data_read(i2c)					\
+	octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_DATA)
+#define octeon_i2c_stat_read(i2c)					\
+	octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT)
+
+/**
+ * octeon_i2c_read_int - read the TWSI_INT register
+ * @i2c: The struct octeon_i2c
+ *
+ * Returns the value of the register.
+ */
+static u64 octeon_i2c_read_int(struct octeon_i2c *i2c)
+{
+	return __raw_readq(i2c->twsi_base + TWSI_INT);
+}
+
 /**
  * octeon_i2c_write_int - write the TWSI_INT register
  * @i2c: The struct octeon_i2c
@@ -125,8 +206,7 @@
  */
 static void octeon_i2c_write_int(struct octeon_i2c *i2c, u64 data)
 {
-	__raw_writeq(data, i2c->twsi_base + TWSI_INT);
-	__raw_readq(i2c->twsi_base + TWSI_INT);
+	octeon_i2c_writeq_flush(data, i2c->twsi_base + TWSI_INT);
 }
 
 /**
@@ -149,30 +229,96 @@
 }
 
 /**
- * octeon_i2c_unblock - unblock the bus
+ * octeon_i2c_int_enable78 - enable the CORE interrupt
  * @i2c: The struct octeon_i2c
  *
- * If there was a reset while a device was driving 0 to bus, bus is blocked.
- * We toggle it free manually by some clock cycles and send a stop.
+ * The interrupt will be asserted when there is non-STAT_IDLE state in the
+ * SW_TWSI_EOP_TWSI_STAT register.
  */
-static void octeon_i2c_unblock(struct octeon_i2c *i2c)
+static void octeon_i2c_int_enable78(struct octeon_i2c *i2c)
 {
-	int i;
+	atomic_inc_return(&i2c->int_enable_cnt);
+	enable_irq(i2c->irq);
+}
 
-	dev_dbg(i2c->dev, "%s\n", __func__);
+static void __octeon_i2c_irq_disable(atomic_t *cnt, int irq)
+{
+	int count;
 
-	for (i = 0; i < 9; i++) {
-		octeon_i2c_write_int(i2c, 0);
-		udelay(5);
-		octeon_i2c_write_int(i2c, TWSI_INT_SCL_OVR);
-		udelay(5);
+	/*
+	 * The interrupt can be disabled in two places, but we only
+	 * want to make the disable_irq_nosync() call once, so keep
+	 * track with the atomic variable.
+	 */
+	count = atomic_dec_if_positive(cnt);
+	if (count >= 0)
+		disable_irq_nosync(irq);
+}
+
+/* disable the CORE interrupt */
+static void octeon_i2c_int_disable78(struct octeon_i2c *i2c)
+{
+	__octeon_i2c_irq_disable(&i2c->int_enable_cnt, i2c->irq);
+}
+
+/**
+ * octeon_i2c_hlc_int_enable78 - enable the ST interrupt
+ * @i2c: The struct octeon_i2c
+ *
+ * The interrupt will be asserted when there is non-STAT_IDLE state in
+ * the SW_TWSI_EOP_TWSI_STAT register.
+ */
+static void octeon_i2c_hlc_int_enable78(struct octeon_i2c *i2c)
+{
+	atomic_inc_return(&i2c->hlc_int_enable_cnt);
+	enable_irq(i2c->hlc_irq);
+}
+
+/* disable the ST interrupt */
+static void octeon_i2c_hlc_int_disable78(struct octeon_i2c *i2c)
+{
+	__octeon_i2c_irq_disable(&i2c->hlc_int_enable_cnt, i2c->hlc_irq);
+}
+
+/*
+ * Cleanup low-level state & enable high-level controller.
+ */
+static void octeon_i2c_hlc_enable(struct octeon_i2c *i2c)
+{
+	int try = 0;
+	u64 val;
+
+	if (i2c->hlc_enabled)
+		return;
+	i2c->hlc_enabled = true;
+
+	while (1) {
+		val = octeon_i2c_ctl_read(i2c);
+		if (!(val & (TWSI_CTL_STA | TWSI_CTL_STP)))
+			break;
+
+		/* clear IFLG event */
+		if (val & TWSI_CTL_IFLG)
+			octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
+
+		if (try++ > 100) {
+			pr_err("%s: giving up\n", __func__);
+			break;
+		}
+
+		/* spin until any start/stop has finished */
+		udelay(10);
 	}
-	/* hand-crank a STOP */
-	octeon_i2c_write_int(i2c, TWSI_INT_SDA_OVR | TWSI_INT_SCL_OVR);
-	udelay(5);
-	octeon_i2c_write_int(i2c, TWSI_INT_SDA_OVR);
-	udelay(5);
-	octeon_i2c_write_int(i2c, 0);
+	octeon_i2c_ctl_write(i2c, TWSI_CTL_CE | TWSI_CTL_AAK | TWSI_CTL_ENAB);
+}
+
+static void octeon_i2c_hlc_disable(struct octeon_i2c *i2c)
+{
+	if (!i2c->hlc_enabled)
+		return;
+
+	i2c->hlc_enabled = false;
+	octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
 }
 
 /* interrupt service routine */
@@ -180,16 +326,44 @@
 {
 	struct octeon_i2c *i2c = dev_id;
 
-	octeon_i2c_int_disable(i2c);
+	i2c->int_disable(i2c);
 	wake_up(&i2c->queue);
 
 	return IRQ_HANDLED;
 }
 
-
-static int octeon_i2c_test_iflg(struct octeon_i2c *i2c)
+/* HLC interrupt service routine */
+static irqreturn_t octeon_i2c_hlc_isr78(int irq, void *dev_id)
 {
-	return (octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_CTL) & TWSI_CTL_IFLG) != 0;
+	struct octeon_i2c *i2c = dev_id;
+
+	i2c->hlc_int_disable(i2c);
+	wake_up(&i2c->queue);
+
+	return IRQ_HANDLED;
+}
+
+static bool octeon_i2c_test_iflg(struct octeon_i2c *i2c)
+{
+	return (octeon_i2c_ctl_read(i2c) & TWSI_CTL_IFLG);
+}
+
+static bool octeon_i2c_test_ready(struct octeon_i2c *i2c, bool *first)
+{
+	if (octeon_i2c_test_iflg(i2c))
+		return true;
+
+	if (*first) {
+		*first = false;
+		return false;
+	}
+
+	/*
+	 * IRQ has signaled an event but IFLG hasn't changed.
+	 * Sleep and retry once.
+	 */
+	usleep_range(I2C_OCTEON_EVENT_WAIT, 2 * I2C_OCTEON_EVENT_WAIT);
+	return octeon_i2c_test_iflg(i2c);
 }
 
 /**
@@ -201,233 +375,379 @@
 static int octeon_i2c_wait(struct octeon_i2c *i2c)
 {
 	long time_left;
+	bool first = 1;
 
-	octeon_i2c_int_enable(i2c);
-	time_left = wait_event_timeout(i2c->queue, octeon_i2c_test_iflg(i2c),
-				       i2c->adap.timeout);
-	octeon_i2c_int_disable(i2c);
-	if (!time_left) {
-		dev_dbg(i2c->dev, "%s: timeout\n", __func__);
-		return -ETIMEDOUT;
+	/*
+	 * Some chip revisions don't assert the irq in the interrupt
+	 * controller. So we must poll for the IFLG change.
+	 */
+	if (i2c->broken_irq_mode) {
+		u64 end = get_jiffies_64() + i2c->adap.timeout;
+
+		while (!octeon_i2c_test_iflg(i2c) &&
+		       time_before64(get_jiffies_64(), end))
+			usleep_range(I2C_OCTEON_EVENT_WAIT / 2, I2C_OCTEON_EVENT_WAIT);
+
+		return octeon_i2c_test_iflg(i2c) ? 0 : -ETIMEDOUT;
 	}
 
+	i2c->int_enable(i2c);
+	time_left = wait_event_timeout(i2c->queue, octeon_i2c_test_ready(i2c, &first),
+				       i2c->adap.timeout);
+	i2c->int_disable(i2c);
+
+	if (i2c->broken_irq_check && !time_left &&
+	    octeon_i2c_test_iflg(i2c)) {
+		dev_err(i2c->dev, "broken irq connection detected, switching to polling mode.\n");
+		i2c->broken_irq_mode = true;
+		return 0;
+	}
+
+	if (!time_left)
+		return -ETIMEDOUT;
+
 	return 0;
 }
 
-/**
- * octeon_i2c_start - send START to the bus
- * @i2c: The struct octeon_i2c
- *
- * Returns 0 on success, otherwise a negative errno.
- */
-static int octeon_i2c_start(struct octeon_i2c *i2c)
+static int octeon_i2c_check_status(struct octeon_i2c *i2c, int final_read)
 {
-	int result;
-	u8 data;
+	u8 stat = octeon_i2c_stat_read(i2c);
 
-	octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
-			    TWSI_CTL_ENAB | TWSI_CTL_STA);
+	switch (stat) {
+	/* Everything is fine */
+	case STAT_IDLE:
+	case STAT_AD2W_ACK:
+	case STAT_RXADDR_ACK:
+	case STAT_TXADDR_ACK:
+	case STAT_TXDATA_ACK:
+		return 0;
 
-	result = octeon_i2c_wait(i2c);
-	if (result) {
-		if (octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT) == STAT_IDLE) {
-			/*
-			 * Controller refused to send start flag May
-			 * be a client is holding SDA low - let's try
-			 * to free it.
-			 */
-			octeon_i2c_unblock(i2c);
-			octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
-					    TWSI_CTL_ENAB | TWSI_CTL_STA);
-			result = octeon_i2c_wait(i2c);
-		}
-		if (result)
-			return result;
-	}
+	/* ACK allowed on pre-terminal bytes only */
+	case STAT_RXDATA_ACK:
+		if (!final_read)
+			return 0;
+		return -EIO;
 
-	data = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT);
-	if ((data != STAT_START) && (data != STAT_RSTART)) {
-		dev_err(i2c->dev, "%s: bad status (0x%x)\n", __func__, data);
+	/* NAK allowed on terminal byte only */
+	case STAT_RXDATA_NAK:
+		if (final_read)
+			return 0;
+		return -EIO;
+
+	/* Arbitration lost */
+	case STAT_LOST_ARB_38:
+	case STAT_LOST_ARB_68:
+	case STAT_LOST_ARB_78:
+	case STAT_LOST_ARB_B0:
+		return -EAGAIN;
+
+	/* Being addressed as slave, should back off & listen */
+	case STAT_SLAVE_60:
+	case STAT_SLAVE_70:
+	case STAT_GENDATA_ACK:
+	case STAT_GENDATA_NAK:
+		return -EOPNOTSUPP;
+
+	/* Core busy as slave */
+	case STAT_SLAVE_80:
+	case STAT_SLAVE_88:
+	case STAT_SLAVE_A0:
+	case STAT_SLAVE_A8:
+	case STAT_SLAVE_LOST:
+	case STAT_SLAVE_NAK:
+	case STAT_SLAVE_ACK:
+		return -EOPNOTSUPP;
+
+	case STAT_TXDATA_NAK:
+		return -EIO;
+	case STAT_TXADDR_NAK:
+	case STAT_RXADDR_NAK:
+	case STAT_AD2W_NAK:
+		return -ENXIO;
+	default:
+		dev_err(i2c->dev, "unhandled state: %d\n", stat);
 		return -EIO;
 	}
-
-	return 0;
 }
 
-/* send STOP to the bus */
-static void octeon_i2c_stop(struct octeon_i2c *i2c)
+static bool octeon_i2c_hlc_test_valid(struct octeon_i2c *i2c)
 {
-	octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
-			    TWSI_CTL_ENAB | TWSI_CTL_STP);
+	return (__raw_readq(i2c->twsi_base + SW_TWSI) & SW_TWSI_V) == 0;
+}
+
+static bool octeon_i2c_hlc_test_ready(struct octeon_i2c *i2c, bool *first)
+{
+	/* check if valid bit is cleared */
+	if (octeon_i2c_hlc_test_valid(i2c))
+		return true;
+
+	if (*first) {
+		*first = false;
+		return false;
+	}
+
+	/*
+	 * IRQ has signaled an event but valid bit isn't cleared.
+	 * Sleep and retry once.
+	 */
+	usleep_range(I2C_OCTEON_EVENT_WAIT, 2 * I2C_OCTEON_EVENT_WAIT);
+	return octeon_i2c_hlc_test_valid(i2c);
+}
+
+static void octeon_i2c_hlc_int_enable(struct octeon_i2c *i2c)
+{
+	octeon_i2c_write_int(i2c, TWSI_INT_ST_EN);
+}
+
+static void octeon_i2c_hlc_int_clear(struct octeon_i2c *i2c)
+{
+	/* clear ST/TS events, listen for neither */
+	octeon_i2c_write_int(i2c, TWSI_INT_ST_INT | TWSI_INT_TS_INT);
 }
 
 /**
- * octeon_i2c_write - send data to the bus via low-level controller
+ * octeon_i2c_hlc_wait - wait for an HLC operation to complete
  * @i2c: The struct octeon_i2c
- * @target: Target address
- * @data: Pointer to the data to be sent
- * @length: Length of the data
  *
- * The address is sent over the bus, then the data.
- *
- * Returns 0 on success, otherwise a negative errno.
+ * Returns 0 on success, otherwise -ETIMEDOUT.
  */
-static int octeon_i2c_write(struct octeon_i2c *i2c, int target,
-			    const u8 *data, int length)
+static int octeon_i2c_hlc_wait(struct octeon_i2c *i2c)
 {
-	int i, result;
-	u8 tmp;
+	bool first = 1;
+	int time_left;
 
-	result = octeon_i2c_start(i2c);
-	if (result)
-		return result;
+	/*
+	 * Some cn38xx boards don't assert the irq in the interrupt
+	 * controller. So we must poll for the valid bit change.
+	 */
+	if (i2c->broken_irq_mode) {
+		u64 end = get_jiffies_64() + i2c->adap.timeout;
 
-	octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_DATA, target << 1);
-	octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB);
+		while (!octeon_i2c_hlc_test_valid(i2c) &&
+		       time_before64(get_jiffies_64(), end))
+			usleep_range(I2C_OCTEON_EVENT_WAIT / 2, I2C_OCTEON_EVENT_WAIT);
 
-	result = octeon_i2c_wait(i2c);
-	if (result)
-		return result;
-
-	for (i = 0; i < length; i++) {
-		tmp = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT);
-
-		if ((tmp != STAT_TXADDR_ACK) && (tmp != STAT_TXDATA_ACK)) {
-			dev_err(i2c->dev,
-				"%s: bad status before write (0x%x)\n",
-				__func__, tmp);
-			return -EIO;
-		}
-
-		octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_DATA, data[i]);
-		octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB);
-
-		result = octeon_i2c_wait(i2c);
-		if (result)
-			return result;
+		return octeon_i2c_hlc_test_valid(i2c) ? 0 : -ETIMEDOUT;
 	}
 
+	i2c->hlc_int_enable(i2c);
+	time_left = wait_event_timeout(i2c->queue,
+				       octeon_i2c_hlc_test_ready(i2c, &first),
+				       i2c->adap.timeout);
+	i2c->hlc_int_disable(i2c);
+	if (!time_left)
+		octeon_i2c_hlc_int_clear(i2c);
+
+	if (i2c->broken_irq_check && !time_left &&
+	    octeon_i2c_hlc_test_valid(i2c)) {
+		dev_err(i2c->dev, "broken irq connection detected, switching to polling mode.\n");
+		i2c->broken_irq_mode = true;
+		return 0;
+	}
+
+	if (!time_left)
+		return -ETIMEDOUT;
 	return 0;
 }
 
-/**
- * octeon_i2c_read - receive data from the bus via low-level controller
- * @i2c: The struct octeon_i2c
- * @target: Target address
- * @data: Pointer to the location to store the data
- * @rlength: Length of the data
- * @recv_len: flag for length byte
- *
- * The address is sent over the bus, then the data is read.
- *
- * Returns 0 on success, otherwise a negative errno.
- */
-static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
-			   u8 *data, u16 *rlength, bool recv_len)
+/* high-level-controller pure read of up to 8 bytes */
+static int octeon_i2c_hlc_read(struct octeon_i2c *i2c, struct i2c_msg *msgs)
 {
-	int i, result, length = *rlength;
-	u8 tmp;
+	int i, j, ret = 0;
+	u64 cmd;
 
-	if (length < 1)
-		return -EINVAL;
+	octeon_i2c_hlc_enable(i2c);
+	octeon_i2c_hlc_int_clear(i2c);
 
-	result = octeon_i2c_start(i2c);
-	if (result)
-		return result;
+	cmd = SW_TWSI_V | SW_TWSI_R | SW_TWSI_SOVR;
+	/* SIZE */
+	cmd |= (u64)(msgs[0].len - 1) << SW_TWSI_SIZE_SHIFT;
+	/* A */
+	cmd |= (u64)(msgs[0].addr & 0x7full) << SW_TWSI_ADDR_SHIFT;
 
-	octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_DATA, (target << 1) | 1);
-	octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB);
+	if (msgs[0].flags & I2C_M_TEN)
+		cmd |= SW_TWSI_OP_10;
+	else
+		cmd |= SW_TWSI_OP_7;
 
-	result = octeon_i2c_wait(i2c);
-	if (result)
-		return result;
+	octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI);
+	ret = octeon_i2c_hlc_wait(i2c);
+	if (ret)
+		goto err;
 
-	for (i = 0; i < length; i++) {
-		tmp = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT);
+	cmd = __raw_readq(i2c->twsi_base + SW_TWSI);
+	if ((cmd & SW_TWSI_R) == 0)
+		return -EAGAIN;
 
-		if ((tmp != STAT_RXDATA_ACK) && (tmp != STAT_RXADDR_ACK)) {
-			dev_err(i2c->dev,
-				"%s: bad status before read (0x%x)\n",
-				__func__, tmp);
-			return -EIO;
-		}
+	for (i = 0, j = msgs[0].len - 1; i  < msgs[0].len && i < 4; i++, j--)
+		msgs[0].buf[j] = (cmd >> (8 * i)) & 0xff;
 
-		if (i + 1 < length)
-			octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
-					    TWSI_CTL_ENAB | TWSI_CTL_AAK);
-		else
-			octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
-					    TWSI_CTL_ENAB);
-
-		result = octeon_i2c_wait(i2c);
-		if (result)
-			return result;
-
-		data[i] = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_DATA);
-		if (recv_len && i == 0) {
-			if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) {
-				dev_err(i2c->dev,
-					"%s: read len > I2C_SMBUS_BLOCK_MAX %d\n",
-					__func__, data[i]);
-				return -EPROTO;
-			}
-			length += data[i];
-		}
+	if (msgs[0].len > 4) {
+		cmd = __raw_readq(i2c->twsi_base + SW_TWSI_EXT);
+		for (i = 0; i  < msgs[0].len - 4 && i < 4; i++, j--)
+			msgs[0].buf[j] = (cmd >> (8 * i)) & 0xff;
 	}
-	*rlength = length;
-	return 0;
+
+err:
+	return ret;
 }
 
-/**
- * octeon_i2c_xfer - The driver's master_xfer function
- * @adap: Pointer to the i2c_adapter structure
- * @msgs: Pointer to the messages to be processed
- * @num: Length of the MSGS array
- *
- * Returns the number of messages processed, or a negative errno on failure.
- */
-static int octeon_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
-			   int num)
+/* high-level-controller pure write of up to 8 bytes */
+static int octeon_i2c_hlc_write(struct octeon_i2c *i2c, struct i2c_msg *msgs)
 {
-	struct octeon_i2c *i2c = i2c_get_adapdata(adap);
-	int i, ret = 0;
+	int i, j, ret = 0;
+	u64 cmd;
 
-	for (i = 0; ret == 0 && i < num; i++) {
-		struct i2c_msg *pmsg = &msgs[i];
+	octeon_i2c_hlc_enable(i2c);
+	octeon_i2c_hlc_int_clear(i2c);
 
-		dev_dbg(i2c->dev,
-			"Doing %s %d byte(s) to/from 0x%02x - %d of %d messages\n",
-			 pmsg->flags & I2C_M_RD ? "read" : "write",
-			 pmsg->len, pmsg->addr, i + 1, num);
-		if (pmsg->flags & I2C_M_RD)
-			ret = octeon_i2c_read(i2c, pmsg->addr, pmsg->buf,
-					      &pmsg->len, pmsg->flags & I2C_M_RECV_LEN);
-		else
-			ret = octeon_i2c_write(i2c, pmsg->addr, pmsg->buf,
-					       pmsg->len);
+	cmd = SW_TWSI_V | SW_TWSI_SOVR;
+	/* SIZE */
+	cmd |= (u64)(msgs[0].len - 1) << SW_TWSI_SIZE_SHIFT;
+	/* A */
+	cmd |= (u64)(msgs[0].addr & 0x7full) << SW_TWSI_ADDR_SHIFT;
+
+	if (msgs[0].flags & I2C_M_TEN)
+		cmd |= SW_TWSI_OP_10;
+	else
+		cmd |= SW_TWSI_OP_7;
+
+	for (i = 0, j = msgs[0].len - 1; i  < msgs[0].len && i < 4; i++, j--)
+		cmd |= (u64)msgs[0].buf[j] << (8 * i);
+
+	if (msgs[0].len > 4) {
+		u64 ext = 0;
+
+		for (i = 0; i < msgs[0].len - 4 && i < 4; i++, j--)
+			ext |= (u64)msgs[0].buf[j] << (8 * i);
+		octeon_i2c_writeq_flush(ext, i2c->twsi_base + SW_TWSI_EXT);
 	}
-	octeon_i2c_stop(i2c);
 
-	return (ret != 0) ? ret : num;
+	octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI);
+	ret = octeon_i2c_hlc_wait(i2c);
+	if (ret)
+		goto err;
+
+	cmd = __raw_readq(i2c->twsi_base + SW_TWSI);
+	if ((cmd & SW_TWSI_R) == 0)
+		return -EAGAIN;
+
+	ret = octeon_i2c_check_status(i2c, false);
+
+err:
+	return ret;
 }
 
-static u32 octeon_i2c_functionality(struct i2c_adapter *adap)
+/* high-level-controller composite write+read, msg0=addr, msg1=data */
+static int octeon_i2c_hlc_comp_read(struct octeon_i2c *i2c, struct i2c_msg *msgs)
 {
-	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
-	       I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_SMBUS_BLOCK_PROC_CALL;
+	int i, j, ret = 0;
+	u64 cmd;
+
+	octeon_i2c_hlc_enable(i2c);
+
+	cmd = SW_TWSI_V | SW_TWSI_R | SW_TWSI_SOVR;
+	/* SIZE */
+	cmd |= (u64)(msgs[1].len - 1) << SW_TWSI_SIZE_SHIFT;
+	/* A */
+	cmd |= (u64)(msgs[0].addr & 0x7full) << SW_TWSI_ADDR_SHIFT;
+
+	if (msgs[0].flags & I2C_M_TEN)
+		cmd |= SW_TWSI_OP_10_IA;
+	else
+		cmd |= SW_TWSI_OP_7_IA;
+
+	if (msgs[0].len == 2) {
+		u64 ext = 0;
+
+		cmd |= SW_TWSI_EIA;
+		ext = (u64)msgs[0].buf[0] << SW_TWSI_IA_SHIFT;
+		cmd |= (u64)msgs[0].buf[1] << SW_TWSI_IA_SHIFT;
+		octeon_i2c_writeq_flush(ext, i2c->twsi_base + SW_TWSI_EXT);
+	} else {
+		cmd |= (u64)msgs[0].buf[0] << SW_TWSI_IA_SHIFT;
+	}
+
+	octeon_i2c_hlc_int_clear(i2c);
+	octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI);
+
+	ret = octeon_i2c_hlc_wait(i2c);
+	if (ret)
+		goto err;
+
+	cmd = __raw_readq(i2c->twsi_base + SW_TWSI);
+	if ((cmd & SW_TWSI_R) == 0)
+		return -EAGAIN;
+
+	for (i = 0, j = msgs[1].len - 1; i  < msgs[1].len && i < 4; i++, j--)
+		msgs[1].buf[j] = (cmd >> (8 * i)) & 0xff;
+
+	if (msgs[1].len > 4) {
+		cmd = __raw_readq(i2c->twsi_base + SW_TWSI_EXT);
+		for (i = 0; i  < msgs[1].len - 4 && i < 4; i++, j--)
+			msgs[1].buf[j] = (cmd >> (8 * i)) & 0xff;
+	}
+
+err:
+	return ret;
 }
 
-static const struct i2c_algorithm octeon_i2c_algo = {
-	.master_xfer = octeon_i2c_xfer,
-	.functionality = octeon_i2c_functionality,
-};
+/* high-level-controller composite write+write, m[0]len<=2, m[1]len<=8 */
+static int octeon_i2c_hlc_comp_write(struct octeon_i2c *i2c, struct i2c_msg *msgs)
+{
+	bool set_ext = false;
+	int i, j, ret = 0;
+	u64 cmd, ext = 0;
 
-static struct i2c_adapter octeon_i2c_ops = {
-	.owner = THIS_MODULE,
-	.name = "OCTEON adapter",
-	.algo = &octeon_i2c_algo,
-	.timeout = HZ / 50,
-};
+	octeon_i2c_hlc_enable(i2c);
+
+	cmd = SW_TWSI_V | SW_TWSI_SOVR;
+	/* SIZE */
+	cmd |= (u64)(msgs[1].len - 1) << SW_TWSI_SIZE_SHIFT;
+	/* A */
+	cmd |= (u64)(msgs[0].addr & 0x7full) << SW_TWSI_ADDR_SHIFT;
+
+	if (msgs[0].flags & I2C_M_TEN)
+		cmd |= SW_TWSI_OP_10_IA;
+	else
+		cmd |= SW_TWSI_OP_7_IA;
+
+	if (msgs[0].len == 2) {
+		cmd |= SW_TWSI_EIA;
+		ext |= (u64)msgs[0].buf[0] << SW_TWSI_IA_SHIFT;
+		set_ext = true;
+		cmd |= (u64)msgs[0].buf[1] << SW_TWSI_IA_SHIFT;
+	} else {
+		cmd |= (u64)msgs[0].buf[0] << SW_TWSI_IA_SHIFT;
+	}
+
+	for (i = 0, j = msgs[1].len - 1; i  < msgs[1].len && i < 4; i++, j--)
+		cmd |= (u64)msgs[1].buf[j] << (8 * i);
+
+	if (msgs[1].len > 4) {
+		for (i = 0; i < msgs[1].len - 4 && i < 4; i++, j--)
+			ext |= (u64)msgs[1].buf[j] << (8 * i);
+		set_ext = true;
+	}
+	if (set_ext)
+		octeon_i2c_writeq_flush(ext, i2c->twsi_base + SW_TWSI_EXT);
+
+	octeon_i2c_hlc_int_clear(i2c);
+	octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI);
+
+	ret = octeon_i2c_hlc_wait(i2c);
+	if (ret)
+		goto err;
+
+	cmd = __raw_readq(i2c->twsi_base + SW_TWSI);
+	if ((cmd & SW_TWSI_R) == 0)
+		return -EAGAIN;
+
+	ret = octeon_i2c_check_status(i2c, false);
+
+err:
+	return ret;
+}
 
 /* calculate and set clock divisors */
 static void octeon_i2c_set_clock(struct octeon_i2c *i2c)
@@ -467,42 +787,342 @@
 			}
 		}
 	}
-	octeon_i2c_write_sw(i2c, SW_TWSI_OP_TWSI_CLK, thp);
-	octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CLKCTL, (mdiv << 3) | ndiv);
+	octeon_i2c_reg_write(i2c, SW_TWSI_OP_TWSI_CLK, thp);
+	octeon_i2c_reg_write(i2c, SW_TWSI_EOP_TWSI_CLKCTL, (mdiv << 3) | ndiv);
 }
 
 static int octeon_i2c_init_lowlevel(struct octeon_i2c *i2c)
 {
-	u8 status;
+	u8 status = 0;
 	int tries;
 
-	/* disable high level controller, enable bus access */
-	octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB);
-
 	/* reset controller */
-	octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_RST, 0);
+	octeon_i2c_reg_write(i2c, SW_TWSI_EOP_TWSI_RST, 0);
 
-	for (tries = 10; tries; tries--) {
+	for (tries = 10; tries && status != STAT_IDLE; tries--) {
 		udelay(1);
-		status = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT);
+		status = octeon_i2c_stat_read(i2c);
 		if (status == STAT_IDLE)
-			return 0;
+			break;
 	}
-	dev_err(i2c->dev, "%s: TWSI_RST failed! (0x%x)\n", __func__, status);
-	return -EIO;
+
+	if (status != STAT_IDLE) {
+		dev_err(i2c->dev, "%s: TWSI_RST failed! (0x%x)\n",
+			__func__, status);
+		return -EIO;
+	}
+
+	/* toggle twice to force both teardowns */
+	octeon_i2c_hlc_enable(i2c);
+	octeon_i2c_hlc_disable(i2c);
+	return 0;
 }
 
+static int octeon_i2c_recovery(struct octeon_i2c *i2c)
+{
+	int ret;
+
+	ret = i2c_recover_bus(&i2c->adap);
+	if (ret)
+		/* recover failed, try hardware re-init */
+		ret = octeon_i2c_init_lowlevel(i2c);
+	return ret;
+}
+
+/**
+ * octeon_i2c_start - send START to the bus
+ * @i2c: The struct octeon_i2c
+ *
+ * Returns 0 on success, otherwise a negative errno.
+ */
+static int octeon_i2c_start(struct octeon_i2c *i2c)
+{
+	int ret;
+	u8 stat;
+
+	octeon_i2c_hlc_disable(i2c);
+
+	octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB | TWSI_CTL_STA);
+	ret = octeon_i2c_wait(i2c);
+	if (ret)
+		goto error;
+
+	stat = octeon_i2c_stat_read(i2c);
+	if (stat == STAT_START || stat == STAT_REP_START)
+		/* START successful, bail out */
+		return 0;
+
+error:
+	/* START failed, try to recover */
+	ret = octeon_i2c_recovery(i2c);
+	return (ret) ? ret : -EAGAIN;
+}
+
+/* send STOP to the bus */
+static void octeon_i2c_stop(struct octeon_i2c *i2c)
+{
+	octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB | TWSI_CTL_STP);
+}
+
+/**
+ * octeon_i2c_write - send data to the bus via low-level controller
+ * @i2c: The struct octeon_i2c
+ * @target: Target address
+ * @data: Pointer to the data to be sent
+ * @length: Length of the data
+ *
+ * The address is sent over the bus, then the data.
+ *
+ * Returns 0 on success, otherwise a negative errno.
+ */
+static int octeon_i2c_write(struct octeon_i2c *i2c, int target,
+			    const u8 *data, int length)
+{
+	int i, result;
+
+	octeon_i2c_data_write(i2c, target << 1);
+	octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
+
+	result = octeon_i2c_wait(i2c);
+	if (result)
+		return result;
+
+	for (i = 0; i < length; i++) {
+		result = octeon_i2c_check_status(i2c, false);
+		if (result)
+			return result;
+
+		octeon_i2c_data_write(i2c, data[i]);
+		octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
+
+		result = octeon_i2c_wait(i2c);
+		if (result)
+			return result;
+	}
+
+	return 0;
+}
+
+/**
+ * octeon_i2c_read - receive data from the bus via low-level controller
+ * @i2c: The struct octeon_i2c
+ * @target: Target address
+ * @data: Pointer to the location to store the data
+ * @rlength: Length of the data
+ * @recv_len: flag for length byte
+ *
+ * The address is sent over the bus, then the data is read.
+ *
+ * Returns 0 on success, otherwise a negative errno.
+ */
+static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
+			   u8 *data, u16 *rlength, bool recv_len)
+{
+	int i, result, length = *rlength;
+	bool final_read = false;
+
+	octeon_i2c_data_write(i2c, (target << 1) | 1);
+	octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
+
+	result = octeon_i2c_wait(i2c);
+	if (result)
+		return result;
+
+	/* address OK ? */
+	result = octeon_i2c_check_status(i2c, false);
+	if (result)
+		return result;
+
+	for (i = 0; i < length; i++) {
+		/* for the last byte TWSI_CTL_AAK must not be set */
+		if (i + 1 == length)
+			final_read = true;
+
+		/* clear iflg to allow next event */
+		if (final_read)
+			octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
+		else
+			octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB | TWSI_CTL_AAK);
+
+		result = octeon_i2c_wait(i2c);
+		if (result)
+			return result;
+
+		data[i] = octeon_i2c_data_read(i2c);
+		if (recv_len && i == 0) {
+			if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) {
+				dev_err(i2c->dev,
+					"%s: read len > I2C_SMBUS_BLOCK_MAX %d\n",
+					__func__, data[i]);
+				return -EPROTO;
+			}
+			length += data[i];
+		}
+
+		result = octeon_i2c_check_status(i2c, final_read);
+		if (result)
+			return result;
+	}
+	*rlength = length;
+	return 0;
+}
+
+/**
+ * octeon_i2c_xfer - The driver's master_xfer function
+ * @adap: Pointer to the i2c_adapter structure
+ * @msgs: Pointer to the messages to be processed
+ * @num: Length of the MSGS array
+ *
+ * Returns the number of messages processed, or a negative errno on failure.
+ */
+static int octeon_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+			   int num)
+{
+	struct octeon_i2c *i2c = i2c_get_adapdata(adap);
+	int i, ret = 0;
+
+	if (num == 1) {
+		if (msgs[0].len > 0 && msgs[0].len <= 8) {
+			if (msgs[0].flags & I2C_M_RD)
+				ret = octeon_i2c_hlc_read(i2c, msgs);
+			else
+				ret = octeon_i2c_hlc_write(i2c, msgs);
+			goto out;
+		}
+	} else if (num == 2) {
+		if ((msgs[0].flags & I2C_M_RD) == 0 &&
+		    (msgs[1].flags & I2C_M_RECV_LEN) == 0 &&
+		    msgs[0].len > 0 && msgs[0].len <= 2 &&
+		    msgs[1].len > 0 && msgs[1].len <= 8 &&
+		    msgs[0].addr == msgs[1].addr) {
+			if (msgs[1].flags & I2C_M_RD)
+				ret = octeon_i2c_hlc_comp_read(i2c, msgs);
+			else
+				ret = octeon_i2c_hlc_comp_write(i2c, msgs);
+			goto out;
+		}
+	}
+
+	for (i = 0; ret == 0 && i < num; i++) {
+		struct i2c_msg *pmsg = &msgs[i];
+
+		/* zero-length messages are not supported */
+		if (!pmsg->len) {
+			ret = -EOPNOTSUPP;
+			break;
+		}
+
+		ret = octeon_i2c_start(i2c);
+		if (ret)
+			return ret;
+
+		if (pmsg->flags & I2C_M_RD)
+			ret = octeon_i2c_read(i2c, pmsg->addr, pmsg->buf,
+					      &pmsg->len, pmsg->flags & I2C_M_RECV_LEN);
+		else
+			ret = octeon_i2c_write(i2c, pmsg->addr, pmsg->buf,
+					       pmsg->len);
+	}
+	octeon_i2c_stop(i2c);
+out:
+	return (ret != 0) ? ret : num;
+}
+
+static int octeon_i2c_get_scl(struct i2c_adapter *adap)
+{
+	struct octeon_i2c *i2c = i2c_get_adapdata(adap);
+	u64 state;
+
+	state = octeon_i2c_read_int(i2c);
+	return state & TWSI_INT_SCL;
+}
+
+static void octeon_i2c_set_scl(struct i2c_adapter *adap, int val)
+{
+	struct octeon_i2c *i2c = i2c_get_adapdata(adap);
+
+	octeon_i2c_write_int(i2c, TWSI_INT_SCL_OVR);
+}
+
+static int octeon_i2c_get_sda(struct i2c_adapter *adap)
+{
+	struct octeon_i2c *i2c = i2c_get_adapdata(adap);
+	u64 state;
+
+	state = octeon_i2c_read_int(i2c);
+	return state & TWSI_INT_SDA;
+}
+
+static void octeon_i2c_prepare_recovery(struct i2c_adapter *adap)
+{
+	struct octeon_i2c *i2c = i2c_get_adapdata(adap);
+
+	/*
+	 * The stop resets the state machine, does not _transmit_ STOP unless
+	 * engine was active.
+	 */
+	octeon_i2c_stop(i2c);
+
+	octeon_i2c_hlc_disable(i2c);
+	octeon_i2c_write_int(i2c, 0);
+}
+
+static void octeon_i2c_unprepare_recovery(struct i2c_adapter *adap)
+{
+	struct octeon_i2c *i2c = i2c_get_adapdata(adap);
+
+	octeon_i2c_write_int(i2c, 0);
+}
+
+static struct i2c_bus_recovery_info octeon_i2c_recovery_info = {
+	.recover_bus = i2c_generic_scl_recovery,
+	.get_scl = octeon_i2c_get_scl,
+	.set_scl = octeon_i2c_set_scl,
+	.get_sda = octeon_i2c_get_sda,
+	.prepare_recovery = octeon_i2c_prepare_recovery,
+	.unprepare_recovery = octeon_i2c_unprepare_recovery,
+};
+
+static u32 octeon_i2c_functionality(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) |
+	       I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_SMBUS_BLOCK_PROC_CALL;
+}
+
+static const struct i2c_algorithm octeon_i2c_algo = {
+	.master_xfer = octeon_i2c_xfer,
+	.functionality = octeon_i2c_functionality,
+};
+
+static struct i2c_adapter octeon_i2c_ops = {
+	.owner = THIS_MODULE,
+	.name = "OCTEON adapter",
+	.algo = &octeon_i2c_algo,
+};
+
 static int octeon_i2c_probe(struct platform_device *pdev)
 {
 	struct device_node *node = pdev->dev.of_node;
+	int irq, result = 0, hlc_irq = 0;
 	struct resource *res_mem;
 	struct octeon_i2c *i2c;
-	int irq, result = 0;
+	bool cn78xx_style;
 
-	/* All adaptors have an irq.  */
-	irq = platform_get_irq(pdev, 0);
-	if (irq < 0)
-		return irq;
+	cn78xx_style = of_device_is_compatible(node, "cavium,octeon-7890-twsi");
+	if (cn78xx_style) {
+		hlc_irq = platform_get_irq(pdev, 0);
+		if (hlc_irq < 0)
+			return hlc_irq;
+
+		irq = platform_get_irq(pdev, 2);
+		if (irq < 0)
+			return irq;
+	} else {
+		/* All adaptors have an irq.  */
+		irq = platform_get_irq(pdev, 0);
+		if (irq < 0)
+			return irq;
+	}
 
 	i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
 	if (!i2c) {
@@ -537,6 +1157,31 @@
 
 	i2c->irq = irq;
 
+	if (cn78xx_style) {
+		i2c->hlc_irq = hlc_irq;
+
+		i2c->int_enable = octeon_i2c_int_enable78;
+		i2c->int_disable = octeon_i2c_int_disable78;
+		i2c->hlc_int_enable = octeon_i2c_hlc_int_enable78;
+		i2c->hlc_int_disable = octeon_i2c_hlc_int_disable78;
+
+		irq_set_status_flags(i2c->irq, IRQ_NOAUTOEN);
+		irq_set_status_flags(i2c->hlc_irq, IRQ_NOAUTOEN);
+
+		result = devm_request_irq(&pdev->dev, i2c->hlc_irq,
+					  octeon_i2c_hlc_isr78, 0,
+					  DRV_NAME, i2c);
+		if (result < 0) {
+			dev_err(i2c->dev, "failed to attach interrupt\n");
+			goto out;
+		}
+	} else {
+		i2c->int_enable = octeon_i2c_int_enable;
+		i2c->int_disable = octeon_i2c_int_disable;
+		i2c->hlc_int_enable = octeon_i2c_hlc_int_enable;
+		i2c->hlc_int_disable = octeon_i2c_int_disable;
+	}
+
 	result = devm_request_irq(&pdev->dev, i2c->irq,
 				  octeon_i2c_isr, 0, DRV_NAME, i2c);
 	if (result < 0) {
@@ -544,6 +1189,9 @@
 		goto out;
 	}
 
+	if (OCTEON_IS_MODEL(OCTEON_CN38XX))
+		i2c->broken_irq_check = true;
+
 	result = octeon_i2c_init_lowlevel(i2c);
 	if (result) {
 		dev_err(i2c->dev, "init low level failed\n");
@@ -553,6 +1201,9 @@
 	octeon_i2c_set_clock(i2c);
 
 	i2c->adap = octeon_i2c_ops;
+	i2c->adap.timeout = msecs_to_jiffies(2);
+	i2c->adap.retries = 5;
+	i2c->adap.bus_recovery_info = &octeon_i2c_recovery_info;
 	i2c->adap.dev.parent = &pdev->dev;
 	i2c->adap.dev.of_node = node;
 	i2c_set_adapdata(&i2c->adap, i2c);
@@ -580,6 +1231,7 @@
 
 static const struct of_device_id octeon_i2c_match[] = {
 	{ .compatible = "cavium,octeon-3860-twsi", },
+	{ .compatible = "cavium,octeon-7890-twsi", },
 	{},
 };
 MODULE_DEVICE_TABLE(of, octeon_i2c_match);
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 13c4529..ab1279b 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -185,7 +185,6 @@
 #define OMAP_I2C_IP_V2_INTERRUPTS_MASK	0x6FFF
 
 struct omap_i2c_dev {
-	spinlock_t		lock;		/* IRQ synchronization */
 	struct device		*dev;
 	void __iomem		*base;		/* virtual */
 	int			irq;
@@ -995,15 +994,12 @@
 	u16 mask;
 	u16 stat;
 
-	spin_lock(&omap->lock);
-	mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
 	stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
+	mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
 
 	if (stat & mask)
 		ret = IRQ_WAKE_THREAD;
 
-	spin_unlock(&omap->lock);
-
 	return ret;
 }
 
@@ -1011,12 +1007,10 @@
 omap_i2c_isr_thread(int this_irq, void *dev_id)
 {
 	struct omap_i2c_dev *omap = dev_id;
-	unsigned long flags;
 	u16 bits;
 	u16 stat;
 	int err = 0, count = 0;
 
-	spin_lock_irqsave(&omap->lock, flags);
 	do {
 		bits = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
 		stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
@@ -1142,8 +1136,6 @@
 	omap_i2c_complete_cmd(omap, err);
 
 out:
-	spin_unlock_irqrestore(&omap->lock, flags);
-
 	return IRQ_HANDLED;
 }
 
@@ -1330,8 +1322,6 @@
 	omap->dev = &pdev->dev;
 	omap->irq = irq;
 
-	spin_lock_init(&omap->lock);
-
 	platform_set_drvdata(pdev, omap);
 	init_completion(&omap->cmd_complete);
 
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 6abcf69..b0d9dee 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -150,13 +150,11 @@
 {
 	struct pmac_i2c_bus	*bus = i2c_get_adapdata(adap);
 	int			rc = 0;
-	int			read;
 	int			addrdir;
 
 	if (msgs->flags & I2C_M_TEN)
 		return -EINVAL;
-	read = (msgs->flags & I2C_M_RD) != 0;
-	addrdir = (msgs->addr << 1) | read;
+	addrdir = i2c_8bit_addr_from_msg(msgs);
 
 	rc = pmac_i2c_open(bus, 0);
 	if (rc) {
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 23eaabb..cc6439a 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -515,7 +515,7 @@
 static int qup_i2c_set_tags(u8 *tags, struct qup_i2c_dev *qup,
 			    struct i2c_msg *msg,  int is_dma)
 {
-	u16 addr = (msg->addr << 1) | ((msg->flags & I2C_M_RD) == I2C_M_RD);
+	u16 addr = i2c_8bit_addr_from_msg(msg);
 	int len = 0;
 	int data_len;
 
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 68ecb56..9aca1b4 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -21,6 +21,8 @@
  */
 #include <linux/clk.h>
 #include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
 #include <linux/err.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
@@ -43,6 +45,8 @@
 #define ICSAR	0x1C	/* slave address */
 #define ICMAR	0x20	/* master address */
 #define ICRXTX	0x24	/* data port */
+#define ICDMAER	0x3c	/* DMA enable */
+#define ICFBSCR	0x38	/* first bit setup cycle */
 
 /* ICSCR */
 #define SDBS	(1 << 3)	/* slave data buffer select */
@@ -78,6 +82,16 @@
 #define MDR	(1 << 1)
 #define MAT	(1 << 0)	/* slave addr xfer done */
 
+/* ICDMAER */
+#define RSDMAE	(1 << 3)	/* DMA Slave Received Enable */
+#define TSDMAE	(1 << 2)	/* DMA Slave Transmitted Enable */
+#define RMDMAE	(1 << 1)	/* DMA Master Received Enable */
+#define TMDMAE	(1 << 0)	/* DMA Master Transmitted Enable */
+
+/* ICFBSCR */
+#define TCYC06	0x04		/*  6*Tcyc delay 1st bit between SDA and SCL */
+#define TCYC17	0x0f		/* 17*Tcyc delay 1st bit between SDA and SCL */
+
 
 #define RCAR_BUS_PHASE_START	(MDBS | MIE | ESG)
 #define RCAR_BUS_PHASE_DATA	(MDBS | MIE)
@@ -120,6 +134,12 @@
 	u32 flags;
 	enum rcar_i2c_type devtype;
 	struct i2c_client *slave;
+
+	struct resource *res;
+	struct dma_chan *dma_tx;
+	struct dma_chan *dma_rx;
+	struct scatterlist sg;
+	enum dma_data_direction dma_direction;
 };
 
 #define rcar_i2c_priv_to_dev(p)		((p)->adap.dev.parent)
@@ -287,6 +307,118 @@
 /*
  *		interrupt functions
  */
+static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
+{
+	struct dma_chan *chan = priv->dma_direction == DMA_FROM_DEVICE
+		? priv->dma_rx : priv->dma_tx;
+
+	/* Disable DMA Master Received/Transmitted */
+	rcar_i2c_write(priv, ICDMAER, 0);
+
+	/* Reset default delay */
+	rcar_i2c_write(priv, ICFBSCR, TCYC06);
+
+	dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
+			 priv->msg->len, priv->dma_direction);
+
+	priv->dma_direction = DMA_NONE;
+}
+
+static void rcar_i2c_cleanup_dma(struct rcar_i2c_priv *priv)
+{
+	if (priv->dma_direction == DMA_NONE)
+		return;
+	else if (priv->dma_direction == DMA_FROM_DEVICE)
+		dmaengine_terminate_all(priv->dma_rx);
+	else if (priv->dma_direction == DMA_TO_DEVICE)
+		dmaengine_terminate_all(priv->dma_tx);
+
+	rcar_i2c_dma_unmap(priv);
+}
+
+static void rcar_i2c_dma_callback(void *data)
+{
+	struct rcar_i2c_priv *priv = data;
+
+	priv->pos += sg_dma_len(&priv->sg);
+
+	rcar_i2c_dma_unmap(priv);
+}
+
+static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
+{
+	struct device *dev = rcar_i2c_priv_to_dev(priv);
+	struct i2c_msg *msg = priv->msg;
+	bool read = msg->flags & I2C_M_RD;
+	enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+	struct dma_chan *chan = read ? priv->dma_rx : priv->dma_tx;
+	struct dma_async_tx_descriptor *txdesc;
+	dma_addr_t dma_addr;
+	dma_cookie_t cookie;
+	unsigned char *buf;
+	int len;
+
+	/* Do not use DMA if it's not available or for messages < 8 bytes */
+	if (IS_ERR(chan) || msg->len < 8)
+		return;
+
+	if (read) {
+		/*
+		 * The last two bytes needs to be fetched using PIO in
+		 * order for the STOP phase to work.
+		 */
+		buf = priv->msg->buf;
+		len = priv->msg->len - 2;
+	} else {
+		/*
+		 * First byte in message was sent using PIO.
+		 */
+		buf = priv->msg->buf + 1;
+		len = priv->msg->len - 1;
+	}
+
+	dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
+	if (dma_mapping_error(dev, dma_addr)) {
+		dev_dbg(dev, "dma map failed, using PIO\n");
+		return;
+	}
+
+	sg_dma_len(&priv->sg) = len;
+	sg_dma_address(&priv->sg) = dma_addr;
+
+	priv->dma_direction = dir;
+
+	txdesc = dmaengine_prep_slave_sg(chan, &priv->sg, 1,
+					 read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV,
+					 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!txdesc) {
+		dev_dbg(dev, "dma prep slave sg failed, using PIO\n");
+		rcar_i2c_cleanup_dma(priv);
+		return;
+	}
+
+	txdesc->callback = rcar_i2c_dma_callback;
+	txdesc->callback_param = priv;
+
+	cookie = dmaengine_submit(txdesc);
+	if (dma_submit_error(cookie)) {
+		dev_dbg(dev, "submitting dma failed, using PIO\n");
+		rcar_i2c_cleanup_dma(priv);
+		return;
+	}
+
+	/* Set delay for DMA operations */
+	rcar_i2c_write(priv, ICFBSCR, TCYC17);
+
+	/* Enable DMA Master Received/Transmitted */
+	if (read)
+		rcar_i2c_write(priv, ICDMAER, RMDMAE);
+	else
+		rcar_i2c_write(priv, ICDMAER, TMDMAE);
+
+	dma_async_issue_pending(chan);
+}
+
 static void rcar_i2c_irq_send(struct rcar_i2c_priv *priv, u32 msr)
 {
 	struct i2c_msg *msg = priv->msg;
@@ -306,6 +438,12 @@
 		rcar_i2c_write(priv, ICRXTX, msg->buf[priv->pos]);
 		priv->pos++;
 
+		/*
+		 * Try to use DMA to transmit the rest of the data if
+		 * address transfer pashe just finished.
+		 */
+		if (msr & MAT)
+			rcar_i2c_dma(priv);
 	} else {
 		/*
 		 * The last data was pushed to ICRXTX on _PREV_ empty irq.
@@ -340,7 +478,11 @@
 		return;
 
 	if (msr & MAT) {
-		/* Address transfer phase finished, but no data at this point. */
+		/*
+		 * Address transfer phase finished, but no data at this point.
+		 * Try to use DMA to receive data.
+		 */
+		rcar_i2c_dma(priv);
 	} else if (priv->pos < msg->len) {
 		/* get received data */
 		msg->buf[priv->pos] = rcar_i2c_read(priv, ICRXTX);
@@ -472,6 +614,81 @@
 	return IRQ_HANDLED;
 }
 
+static struct dma_chan *rcar_i2c_request_dma_chan(struct device *dev,
+					enum dma_transfer_direction dir,
+					dma_addr_t port_addr)
+{
+	struct dma_chan *chan;
+	struct dma_slave_config cfg;
+	char *chan_name = dir == DMA_MEM_TO_DEV ? "tx" : "rx";
+	int ret;
+
+	chan = dma_request_slave_channel_reason(dev, chan_name);
+	if (IS_ERR(chan)) {
+		ret = PTR_ERR(chan);
+		dev_dbg(dev, "request_channel failed for %s (%d)\n",
+			chan_name, ret);
+		return chan;
+	}
+
+	memset(&cfg, 0, sizeof(cfg));
+	cfg.direction = dir;
+	if (dir == DMA_MEM_TO_DEV) {
+		cfg.dst_addr = port_addr;
+		cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+	} else {
+		cfg.src_addr = port_addr;
+		cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+	}
+
+	ret = dmaengine_slave_config(chan, &cfg);
+	if (ret) {
+		dev_dbg(dev, "slave_config failed for %s (%d)\n",
+			chan_name, ret);
+		dma_release_channel(chan);
+		return ERR_PTR(ret);
+	}
+
+	dev_dbg(dev, "got DMA channel for %s\n", chan_name);
+	return chan;
+}
+
+static void rcar_i2c_request_dma(struct rcar_i2c_priv *priv,
+				 struct i2c_msg *msg)
+{
+	struct device *dev = rcar_i2c_priv_to_dev(priv);
+	bool read;
+	struct dma_chan *chan;
+	enum dma_transfer_direction dir;
+
+	read = msg->flags & I2C_M_RD;
+
+	chan = read ? priv->dma_rx : priv->dma_tx;
+	if (PTR_ERR(chan) != -EPROBE_DEFER)
+		return;
+
+	dir = read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
+	chan = rcar_i2c_request_dma_chan(dev, dir, priv->res->start + ICRXTX);
+
+	if (read)
+		priv->dma_rx = chan;
+	else
+		priv->dma_tx = chan;
+}
+
+static void rcar_i2c_release_dma(struct rcar_i2c_priv *priv)
+{
+	if (!IS_ERR(priv->dma_tx)) {
+		dma_release_channel(priv->dma_tx);
+		priv->dma_tx = ERR_PTR(-EPROBE_DEFER);
+	}
+
+	if (!IS_ERR(priv->dma_rx)) {
+		dma_release_channel(priv->dma_rx);
+		priv->dma_rx = ERR_PTR(-EPROBE_DEFER);
+	}
+}
+
 static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
 				struct i2c_msg *msgs,
 				int num)
@@ -493,6 +710,7 @@
 			ret = -EOPNOTSUPP;
 			goto out;
 		}
+		rcar_i2c_request_dma(priv, msgs + i);
 	}
 
 	/* init first message */
@@ -504,6 +722,7 @@
 	time_left = wait_event_timeout(priv->wait, priv->flags & ID_DONE,
 				     num * adap->timeout);
 	if (!time_left) {
+		rcar_i2c_cleanup_dma(priv);
 		rcar_i2c_init(priv);
 		ret = -ETIMEDOUT;
 	} else if (priv->flags & ID_NACK) {
@@ -591,7 +810,6 @@
 {
 	struct rcar_i2c_priv *priv;
 	struct i2c_adapter *adap;
-	struct resource *res;
 	struct device *dev = &pdev->dev;
 	struct i2c_timings i2c_t;
 	int irq, ret;
@@ -606,8 +824,9 @@
 		return PTR_ERR(priv->clk);
 	}
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	priv->io = devm_ioremap_resource(dev, res);
+	priv->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	priv->io = devm_ioremap_resource(dev, priv->res);
 	if (IS_ERR(priv->io))
 		return PTR_ERR(priv->io);
 
@@ -626,6 +845,11 @@
 
 	i2c_parse_fw_timings(dev, &i2c_t, false);
 
+	/* Init DMA */
+	sg_init_table(&priv->sg, 1);
+	priv->dma_direction = DMA_NONE;
+	priv->dma_rx = priv->dma_tx = ERR_PTR(-EPROBE_DEFER);
+
 	pm_runtime_enable(dev);
 	pm_runtime_get_sync(dev);
 	ret = rcar_i2c_clock_calculate(priv, &i2c_t);
@@ -673,6 +897,7 @@
 	struct device *dev = &pdev->dev;
 
 	i2c_del_adapter(&priv->adap);
+	rcar_i2c_release_dma(priv);
 	if (priv->flags & ID_P_PM_BLOCKED)
 		pm_runtime_put(dev);
 	pm_runtime_disable(dev);
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 9096d17..80bed02 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -101,10 +101,7 @@
 	struct notifier_block clk_rate_nb;
 
 	/* Settings */
-	unsigned int scl_frequency;
-	unsigned int scl_rise_ns;
-	unsigned int scl_fall_ns;
-	unsigned int sda_fall_ns;
+	struct i2c_timings t;
 
 	/* Synchronization & notification */
 	spinlock_t lock;
@@ -437,10 +434,7 @@
  * Calculate divider values for desired SCL frequency
  *
  * @clk_rate: I2C input clock rate
- * @scl_rate: Desired SCL rate
- * @scl_rise_ns: How many ns it takes for SCL to rise.
- * @scl_fall_ns: How many ns it takes for SCL to fall.
- * @sda_fall_ns: How many ns it takes for SDA to fall.
+ * @t: Known I2C timing information.
  * @div_low: Divider output for low
  * @div_high: Divider output for high
  *
@@ -448,11 +442,10 @@
  * a best-effort divider value is returned in divs. If the target rate is
  * too high, we silently use the highest possible rate.
  */
-static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
-			      unsigned long scl_rise_ns,
-			      unsigned long scl_fall_ns,
-			      unsigned long sda_fall_ns,
-			      unsigned long *div_low, unsigned long *div_high)
+static int rk3x_i2c_calc_divs(unsigned long clk_rate,
+			      struct i2c_timings *t,
+			      unsigned long *div_low,
+			      unsigned long *div_high)
 {
 	unsigned long spec_min_low_ns, spec_min_high_ns;
 	unsigned long spec_setup_start, spec_max_data_hold_ns;
@@ -472,12 +465,12 @@
 	int ret = 0;
 
 	/* Only support standard-mode and fast-mode */
-	if (WARN_ON(scl_rate > 400000))
-		scl_rate = 400000;
+	if (WARN_ON(t->bus_freq_hz > 400000))
+		t->bus_freq_hz = 400000;
 
 	/* prevent scl_rate_khz from becoming 0 */
-	if (WARN_ON(scl_rate < 1000))
-		scl_rate = 1000;
+	if (WARN_ON(t->bus_freq_hz < 1000))
+		t->bus_freq_hz = 1000;
 
 	/*
 	 * min_low_ns:  The minimum number of ns we need to hold low to
@@ -491,7 +484,7 @@
 	 *	 This is because the i2c host on Rockchip holds the data line
 	 *	 for half the low time.
 	 */
-	if (scl_rate <= 100000) {
+	if (t->bus_freq_hz <= 100000) {
 		/* Standard-mode */
 		spec_min_low_ns = 4700;
 		spec_setup_start = 4700;
@@ -506,7 +499,7 @@
 		spec_max_data_hold_ns = 900;
 		data_hold_buffer_ns = 50;
 	}
-	min_high_ns = scl_rise_ns + spec_min_high_ns;
+	min_high_ns = t->scl_rise_ns + spec_min_high_ns;
 
 	/*
 	 * Timings for repeated start:
@@ -517,18 +510,18 @@
 	 * we meet tSU;STA and tHD;STA times.
 	 */
 	min_high_ns = max(min_high_ns,
-		DIV_ROUND_UP((scl_rise_ns + spec_setup_start) * 1000, 875));
+		DIV_ROUND_UP((t->scl_rise_ns + spec_setup_start) * 1000, 875));
 	min_high_ns = max(min_high_ns,
-		DIV_ROUND_UP((scl_rise_ns + spec_setup_start +
-			      sda_fall_ns + spec_min_high_ns), 2));
+		DIV_ROUND_UP((t->scl_rise_ns + spec_setup_start +
+			      t->sda_fall_ns + spec_min_high_ns), 2));
 
-	min_low_ns = scl_fall_ns + spec_min_low_ns;
+	min_low_ns = t->scl_fall_ns + spec_min_low_ns;
 	max_low_ns = spec_max_data_hold_ns * 2 - data_hold_buffer_ns;
 	min_total_ns = min_low_ns + min_high_ns;
 
 	/* Adjust to avoid overflow */
 	clk_rate_khz = DIV_ROUND_UP(clk_rate, 1000);
-	scl_rate_khz = scl_rate / 1000;
+	scl_rate_khz = t->bus_freq_hz / 1000;
 
 	/*
 	 * We need the total div to be >= this number
@@ -616,14 +609,13 @@
 
 static void rk3x_i2c_adapt_div(struct rk3x_i2c *i2c, unsigned long clk_rate)
 {
+	struct i2c_timings *t = &i2c->t;
 	unsigned long div_low, div_high;
 	u64 t_low_ns, t_high_ns;
 	int ret;
 
-	ret = rk3x_i2c_calc_divs(clk_rate, i2c->scl_frequency, i2c->scl_rise_ns,
-				 i2c->scl_fall_ns, i2c->sda_fall_ns,
-				 &div_low, &div_high);
-	WARN_ONCE(ret != 0, "Could not reach SCL freq %u", i2c->scl_frequency);
+	ret = rk3x_i2c_calc_divs(clk_rate, t, &div_low, &div_high);
+	WARN_ONCE(ret != 0, "Could not reach SCL freq %u", t->bus_freq_hz);
 
 	clk_enable(i2c->clk);
 	i2c_writel(i2c, (div_high << 16) | (div_low & 0xffff), REG_CLKDIV);
@@ -634,7 +626,7 @@
 	dev_dbg(i2c->dev,
 		"CLK %lukhz, Req %uns, Act low %lluns high %lluns\n",
 		clk_rate / 1000,
-		1000000000 / i2c->scl_frequency,
+		1000000000 / t->bus_freq_hz,
 		t_low_ns, t_high_ns);
 }
 
@@ -664,9 +656,7 @@
 
 	switch (event) {
 	case PRE_RATE_CHANGE:
-		if (rk3x_i2c_calc_divs(ndata->new_rate, i2c->scl_frequency,
-				       i2c->scl_rise_ns, i2c->scl_fall_ns,
-				       i2c->sda_fall_ns,
+		if (rk3x_i2c_calc_divs(ndata->new_rate, &i2c->t,
 				       &div_low, &div_high) != 0)
 			return NOTIFY_STOP;
 
@@ -855,6 +845,7 @@
 static const struct of_device_id rk3x_i2c_match[] = {
 	{ .compatible = "rockchip,rk3066-i2c", .data = (void *)&soc_data[0] },
 	{ .compatible = "rockchip,rk3188-i2c", .data = (void *)&soc_data[1] },
+	{ .compatible = "rockchip,rk3228-i2c", .data = (void *)&soc_data[2] },
 	{ .compatible = "rockchip,rk3288-i2c", .data = (void *)&soc_data[2] },
 	{},
 };
@@ -879,37 +870,8 @@
 	match = of_match_node(rk3x_i2c_match, np);
 	i2c->soc_data = (struct rk3x_i2c_soc_data *)match->data;
 
-	if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
-				 &i2c->scl_frequency)) {
-		dev_info(&pdev->dev, "using default SCL frequency: %d\n",
-			 DEFAULT_SCL_RATE);
-		i2c->scl_frequency = DEFAULT_SCL_RATE;
-	}
-
-	if (i2c->scl_frequency == 0 || i2c->scl_frequency > 400 * 1000) {
-		dev_warn(&pdev->dev, "invalid SCL frequency specified.\n");
-		dev_warn(&pdev->dev, "using default SCL frequency: %d\n",
-			 DEFAULT_SCL_RATE);
-		i2c->scl_frequency = DEFAULT_SCL_RATE;
-	}
-
-	/*
-	 * Read rise and fall time from device tree. If not available use
-	 * the default maximum timing from the specification.
-	 */
-	if (of_property_read_u32(pdev->dev.of_node, "i2c-scl-rising-time-ns",
-				 &i2c->scl_rise_ns)) {
-		if (i2c->scl_frequency <= 100000)
-			i2c->scl_rise_ns = 1000;
-		else
-			i2c->scl_rise_ns = 300;
-	}
-	if (of_property_read_u32(pdev->dev.of_node, "i2c-scl-falling-time-ns",
-				 &i2c->scl_fall_ns))
-		i2c->scl_fall_ns = 300;
-	if (of_property_read_u32(pdev->dev.of_node, "i2c-sda-falling-time-ns",
-				 &i2c->sda_fall_ns))
-		i2c->sda_fall_ns = i2c->scl_fall_ns;
+	/* use common interface to get I2C timing properties */
+	i2c_parse_fw_timings(&pdev->dev, &i2c->t, true);
 
 	strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
 	i2c->adap.owner = THIS_MODULE;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 362a6de..38dc1ca 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -163,15 +163,14 @@
 MODULE_DEVICE_TABLE(of, s3c24xx_i2c_match);
 #endif
 
-/* s3c24xx_get_device_quirks
- *
+/*
  * Get controller type either from device tree or platform device variant.
-*/
-
+ */
 static inline kernel_ulong_t s3c24xx_get_device_quirks(struct platform_device *pdev)
 {
 	if (pdev->dev.of_node) {
 		const struct of_device_id *match;
+
 		match = of_match_node(s3c24xx_i2c_match, pdev->dev.of_node);
 		return (kernel_ulong_t)match->data;
 	}
@@ -179,12 +178,10 @@
 	return platform_get_device_id(pdev)->driver_data;
 }
 
-/* s3c24xx_i2c_master_complete
- *
- * complete the message and wake up the caller, using the given return code,
+/*
+ * Complete the message and wake up the caller, using the given return code,
  * or zero to mean ok.
-*/
-
+ */
 static inline void s3c24xx_i2c_master_complete(struct s3c24xx_i2c *i2c, int ret)
 {
 	dev_dbg(i2c->dev, "master_complete %d\n", ret);
@@ -217,7 +214,6 @@
 }
 
 /* irq enable/disable functions */
-
 static inline void s3c24xx_i2c_disable_irq(struct s3c24xx_i2c *i2c)
 {
 	unsigned long tmp;
@@ -251,11 +247,9 @@
 	return false;
 }
 
-/* s3c24xx_i2c_message_start
- *
+/*
  * put the start of a message onto the bus
-*/
-
+ */
 static void s3c24xx_i2c_message_start(struct s3c24xx_i2c *i2c,
 				      struct i2c_msg *msg)
 {
@@ -284,9 +278,10 @@
 	dev_dbg(i2c->dev, "START: %08lx to IICSTAT, %02x to DS\n", stat, addr);
 	writeb(addr, i2c->regs + S3C2410_IICDS);
 
-	/* delay here to ensure the data byte has gotten onto the bus
-	 * before the transaction is started */
-
+	/*
+	 * delay here to ensure the data byte has gotten onto the bus
+	 * before the transaction is started
+	 */
 	ndelay(i2c->tx_setup);
 
 	dev_dbg(i2c->dev, "iiccon, %08lx\n", iiccon);
@@ -361,50 +356,46 @@
 	s3c24xx_i2c_disable_irq(i2c);
 }
 
-/* helper functions to determine the current state in the set of
- * messages we are sending */
+/*
+ * helper functions to determine the current state in the set of
+ * messages we are sending
+ */
 
-/* is_lastmsg()
- *
+/*
  * returns TRUE if the current message is the last in the set
-*/
-
+ */
 static inline int is_lastmsg(struct s3c24xx_i2c *i2c)
 {
 	return i2c->msg_idx >= (i2c->msg_num - 1);
 }
 
-/* is_msglast
- *
+/*
  * returns TRUE if we this is the last byte in the current message
-*/
-
+ */
 static inline int is_msglast(struct s3c24xx_i2c *i2c)
 {
-	/* msg->len is always 1 for the first byte of smbus block read.
+	/*
+	 * msg->len is always 1 for the first byte of smbus block read.
 	 * Actual length will be read from slave. More bytes will be
-	 * read according to the length then. */
+	 * read according to the length then.
+	 */
 	if (i2c->msg->flags & I2C_M_RECV_LEN && i2c->msg->len == 1)
 		return 0;
 
 	return i2c->msg_ptr == i2c->msg->len-1;
 }
 
-/* is_msgend
- *
+/*
  * returns TRUE if we reached the end of the current message
-*/
-
+ */
 static inline int is_msgend(struct s3c24xx_i2c *i2c)
 {
 	return i2c->msg_ptr >= i2c->msg->len;
 }
 
-/* i2c_s3c_irq_nextbyte
- *
+/*
  * process an interrupt and work out what to do
  */
-
 static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
 {
 	unsigned long tmp;
@@ -423,14 +414,13 @@
 		goto out_ack;
 
 	case STATE_START:
-		/* last thing we did was send a start condition on the
+		/*
+		 * last thing we did was send a start condition on the
 		 * bus, or started a new i2c message
 		 */
-
 		if (iicstat & S3C2410_IICSTAT_LASTBIT &&
 		    !(i2c->msg->flags & I2C_M_IGNORE_NAK)) {
 			/* ack was not received... */
-
 			dev_dbg(i2c->dev, "ack was not received\n");
 			s3c24xx_i2c_stop(i2c, -ENXIO);
 			goto out_ack;
@@ -441,9 +431,10 @@
 		else
 			i2c->state = STATE_WRITE;
 
-		/* terminate the transfer if there is nothing to do
-		 * as this is used by the i2c probe to find devices. */
-
+		/*
+		 * Terminate the transfer if there is nothing to do
+		 * as this is used by the i2c probe to find devices.
+		 */
 		if (is_lastmsg(i2c) && i2c->msg->len == 0) {
 			s3c24xx_i2c_stop(i2c, 0);
 			goto out_ack;
@@ -452,14 +443,16 @@
 		if (i2c->state == STATE_READ)
 			goto prepare_read;
 
-		/* fall through to the write state, as we will need to
-		 * send a byte as well */
-
-	case STATE_WRITE:
-		/* we are writing data to the device... check for the
-		 * end of the message, and if so, work out what to do
+		/*
+		 * fall through to the write state, as we will need to
+		 * send a byte as well
 		 */
 
+	case STATE_WRITE:
+		/*
+		 * we are writing data to the device... check for the
+		 * end of the message, and if so, work out what to do
+		 */
 		if (!(i2c->msg->flags & I2C_M_IGNORE_NAK)) {
 			if (iicstat & S3C2410_IICSTAT_LASTBIT) {
 				dev_dbg(i2c->dev, "WRITE: No Ack\n");
@@ -475,12 +468,13 @@
 			byte = i2c->msg->buf[i2c->msg_ptr++];
 			writeb(byte, i2c->regs + S3C2410_IICDS);
 
-			/* delay after writing the byte to allow the
+			/*
+			 * delay after writing the byte to allow the
 			 * data setup time on the bus, as writing the
 			 * data to the register causes the first bit
 			 * to appear on SDA, and SCL will change as
-			 * soon as the interrupt is acknowledged */
-
+			 * soon as the interrupt is acknowledged
+			 */
 			ndelay(i2c->tx_setup);
 
 		} else if (!is_lastmsg(i2c)) {
@@ -496,10 +490,11 @@
 			if (i2c->msg->flags & I2C_M_NOSTART) {
 
 				if (i2c->msg->flags & I2C_M_RD) {
-					/* cannot do this, the controller
+					/*
+					 * cannot do this, the controller
 					 * forces us to send a new START
-					 * when we change direction */
-
+					 * when we change direction
+					 */
 					s3c24xx_i2c_stop(i2c, -EINVAL);
 				}
 
@@ -512,17 +507,16 @@
 
 		} else {
 			/* send stop */
-
 			s3c24xx_i2c_stop(i2c, 0);
 		}
 		break;
 
 	case STATE_READ:
-		/* we have a byte of data in the data register, do
+		/*
+		 * we have a byte of data in the data register, do
 		 * something with it, and then work out whether we are
 		 * going to do any more read/write
 		 */
-
 		byte = readb(i2c->regs + S3C2410_IICDS);
 		i2c->msg->buf[i2c->msg_ptr++] = byte;
 
@@ -537,9 +531,10 @@
 				s3c24xx_i2c_disable_ack(i2c);
 
 		} else if (is_msgend(i2c)) {
-			/* ok, we've read the entire buffer, see if there
-			 * is anything else we need to do */
-
+			/*
+			 * ok, we've read the entire buffer, see if there
+			 * is anything else we need to do
+			 */
 			if (is_lastmsg(i2c)) {
 				/* last message, send stop and complete */
 				dev_dbg(i2c->dev, "READ: Send Stop\n");
@@ -568,11 +563,9 @@
 	return ret;
 }
 
-/* s3c24xx_i2c_irq
- *
+/*
  * top level IRQ servicing routine
-*/
-
+ */
 static irqreturn_t s3c24xx_i2c_irq(int irqno, void *dev_id)
 {
 	struct s3c24xx_i2c *i2c = dev_id;
@@ -595,9 +588,10 @@
 		goto out;
 	}
 
-	/* pretty much this leaves us with the fact that we've
-	 * transmitted or received whatever byte we last sent */
-
+	/*
+	 * pretty much this leaves us with the fact that we've
+	 * transmitted or received whatever byte we last sent
+	 */
 	i2c_s3c_irq_nextbyte(i2c, status);
 
  out:
@@ -630,11 +624,9 @@
 }
 
 
-/* s3c24xx_i2c_set_master
- *
+/*
  * get the i2c bus for a master transaction
-*/
-
+ */
 static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
 {
 	unsigned long iicstat;
@@ -652,11 +644,9 @@
 	return -ETIMEDOUT;
 }
 
-/* s3c24xx_i2c_wait_idle
- *
+/*
  * wait for the i2c bus to become idle.
-*/
-
+ */
 static void s3c24xx_i2c_wait_idle(struct s3c24xx_i2c *i2c)
 {
 	unsigned long iicstat;
@@ -706,11 +696,9 @@
 		dev_warn(i2c->dev, "timeout waiting for bus idle\n");
 }
 
-/* s3c24xx_i2c_doxfer
- *
+/*
  * this starts an i2c transfer
-*/
-
+ */
 static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
 			      struct i2c_msg *msgs, int num)
 {
@@ -749,9 +737,10 @@
 
 	ret = i2c->msg_idx;
 
-	/* having these next two as dev_err() makes life very
-	 * noisy when doing an i2cdetect */
-
+	/*
+	 * Having these next two as dev_err() makes life very
+	 * noisy when doing an i2cdetect
+	 */
 	if (timeout == 0)
 		dev_dbg(i2c->dev, "timeout\n");
 	else if (ret != num)
@@ -771,12 +760,10 @@
 	return ret;
 }
 
-/* s3c24xx_i2c_xfer
- *
+/*
  * first port of call from the i2c bus code when an message needs
  * transferring across the i2c bus.
-*/
-
+ */
 static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
 			struct i2c_msg *msgs, int num)
 {
@@ -814,17 +801,14 @@
 }
 
 /* i2c bus registration info */
-
 static const struct i2c_algorithm s3c24xx_i2c_algorithm = {
 	.master_xfer		= s3c24xx_i2c_xfer,
 	.functionality		= s3c24xx_i2c_func,
 };
 
-/* s3c24xx_i2c_calcdivisor
- *
+/*
  * return the divisor settings for a given frequency
-*/
-
+ */
 static int s3c24xx_i2c_calcdivisor(unsigned long clkin, unsigned int wanted,
 				   unsigned int *div1, unsigned int *divs)
 {
@@ -850,13 +834,11 @@
 	return clkin / (calc_divs * calc_div1);
 }
 
-/* s3c24xx_i2c_clockrate
- *
+/*
  * work out a divisor for the user requested frequency setting,
  * either by the requested frequency, or scanning the acceptable
  * range of frequencies until something is found
-*/
-
+ */
 static int s3c24xx_i2c_clockrate(struct s3c24xx_i2c *i2c, unsigned int *got)
 {
 	struct s3c2410_platform_i2c *pdata = i2c->pdata;
@@ -944,7 +926,7 @@
 		i2c_unlock_adapter(&i2c->adap);
 
 		if (ret < 0)
-			dev_err(i2c->dev, "cannot find frequency\n");
+			dev_err(i2c->dev, "cannot find frequency (%d)\n", ret);
 		else
 			dev_info(i2c->dev, "setting freq %d\n", got);
 	}
@@ -995,7 +977,8 @@
 
 		ret = gpio_request(gpio, "i2c-bus");
 		if (ret) {
-			dev_err(i2c->dev, "gpio [%d] request failed\n", gpio);
+			dev_err(i2c->dev, "gpio [%d] request failed (%d)\n",
+				gpio, ret);
 			goto free_gpio;
 		}
 	}
@@ -1028,11 +1011,9 @@
 }
 #endif
 
-/* s3c24xx_i2c_init
- *
+/*
  * initialise the controller, set the IO lines and frequency
-*/
-
+ */
 static int s3c24xx_i2c_init(struct s3c24xx_i2c *i2c)
 {
 	struct s3c2410_platform_i2c *pdata;
@@ -1068,11 +1049,9 @@
 }
 
 #ifdef CONFIG_OF
-/* s3c24xx_i2c_parse_dt
- *
+/*
  * Parse the device tree node and retreive the platform data.
-*/
-
+ */
 static void
 s3c24xx_i2c_parse_dt(struct device_node *np, struct s3c24xx_i2c *i2c)
 {
@@ -1105,17 +1084,9 @@
 }
 #else
 static void
-s3c24xx_i2c_parse_dt(struct device_node *np, struct s3c24xx_i2c *i2c)
-{
-	return;
-}
+s3c24xx_i2c_parse_dt(struct device_node *np, struct s3c24xx_i2c *i2c) { }
 #endif
 
-/* s3c24xx_i2c_probe
- *
- * called by the bus driver when a suitable device is found
-*/
-
 static int s3c24xx_i2c_probe(struct platform_device *pdev)
 {
 	struct s3c24xx_i2c *i2c;
@@ -1156,7 +1127,6 @@
 	init_waitqueue_head(&i2c->wait);
 
 	/* find the clock and enable it */
-
 	i2c->dev = &pdev->dev;
 	i2c->clk = devm_clk_get(&pdev->dev, "i2c");
 	if (IS_ERR(i2c->clk)) {
@@ -1166,9 +1136,7 @@
 
 	dev_dbg(&pdev->dev, "clock source %p\n", i2c->clk);
 
-
 	/* map the registers */
-
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	i2c->regs = devm_ioremap_resource(&pdev->dev, res);
 
@@ -1179,33 +1147,35 @@
 		i2c->regs, res);
 
 	/* setup info block for the i2c core */
-
 	i2c->adap.algo_data = i2c;
 	i2c->adap.dev.parent = &pdev->dev;
-
 	i2c->pctrl = devm_pinctrl_get_select_default(i2c->dev);
 
 	/* inititalise the i2c gpio lines */
-
-	if (i2c->pdata->cfg_gpio) {
+	if (i2c->pdata->cfg_gpio)
 		i2c->pdata->cfg_gpio(to_platform_device(i2c->dev));
-	} else if (IS_ERR(i2c->pctrl) && s3c24xx_i2c_parse_dt_gpio(i2c)) {
+	else if (IS_ERR(i2c->pctrl) && s3c24xx_i2c_parse_dt_gpio(i2c))
 		return -EINVAL;
-	}
 
 	/* initialise the i2c controller */
+	ret = clk_prepare_enable(i2c->clk);
+	if (ret) {
+		dev_err(&pdev->dev, "I2C clock enable failed\n");
+		return ret;
+	}
 
-	clk_prepare_enable(i2c->clk);
 	ret = s3c24xx_i2c_init(i2c);
 	clk_disable(i2c->clk);
 	if (ret != 0) {
 		dev_err(&pdev->dev, "I2C controller init failed\n");
+		clk_unprepare(i2c->clk);
 		return ret;
 	}
-	/* find the IRQ for this unit (note, this relies on the init call to
+
+	/*
+	 * find the IRQ for this unit (note, this relies on the init call to
 	 * ensure no current IRQs pending
 	 */
-
 	if (!(i2c->quirks & QUIRK_POLL)) {
 		i2c->irq = ret = platform_get_irq(pdev, 0);
 		if (ret <= 0) {
@@ -1214,9 +1184,8 @@
 			return ret;
 		}
 
-	ret = devm_request_irq(&pdev->dev, i2c->irq, s3c24xx_i2c_irq, 0,
-				dev_name(&pdev->dev), i2c);
-
+		ret = devm_request_irq(&pdev->dev, i2c->irq, s3c24xx_i2c_irq,
+				       0, dev_name(&pdev->dev), i2c);
 		if (ret != 0) {
 			dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
 			clk_unprepare(i2c->clk);
@@ -1231,12 +1200,12 @@
 		return ret;
 	}
 
-	/* Note, previous versions of the driver used i2c_add_adapter()
+	/*
+	 * Note, previous versions of the driver used i2c_add_adapter()
 	 * to add the bus at any number. We now pass the bus number via
 	 * the platform data, so if unset it will now default to always
 	 * being bus 0.
 	 */
-
 	i2c->adap.nr = i2c->pdata->bus_num;
 	i2c->adap.dev.of_node = pdev->dev.of_node;
 
@@ -1257,11 +1226,6 @@
 	return 0;
 }
 
-/* s3c24xx_i2c_remove
- *
- * called when device is removed from the bus
-*/
-
 static int s3c24xx_i2c_remove(struct platform_device *pdev)
 {
 	struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
@@ -1316,14 +1280,8 @@
 
 #ifdef CONFIG_PM
 static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = {
-#ifdef CONFIG_PM_SLEEP
-	.suspend_noirq = s3c24xx_i2c_suspend_noirq,
-	.resume_noirq = s3c24xx_i2c_resume_noirq,
-	.freeze_noirq = s3c24xx_i2c_suspend_noirq,
-	.thaw_noirq = s3c24xx_i2c_resume_noirq,
-	.poweroff_noirq = s3c24xx_i2c_suspend_noirq,
-	.restore_noirq = s3c24xx_i2c_resume_noirq,
-#endif
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(s3c24xx_i2c_suspend_noirq,
+				      s3c24xx_i2c_resume_noirq)
 };
 
 #define S3C24XX_DEV_PM_OPS (&s3c24xx_i2c_dev_pm_ops)
@@ -1331,8 +1289,6 @@
 #define S3C24XX_DEV_PM_OPS NULL
 #endif
 
-/* device driver for platform bus bits */
-
 static struct platform_driver s3c24xx_i2c_driver = {
 	.probe		= s3c24xx_i2c_probe,
 	.remove		= s3c24xx_i2c_remove,
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 7d2bd3e..6fb3e26 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -398,8 +398,7 @@
 {
 	switch (pd->pos) {
 	case -1:
-		*buf = (pd->msg->addr & 0x7f) << 1;
-		*buf |= (pd->msg->flags & I2C_M_RD) ? 1 : 0;
+		*buf = i2c_8bit_addr_from_msg(pd->msg);
 		break;
 	default:
 		*buf = pd->msg->buf[pd->pos];
diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c
index 13e51ef..792a42b 100644
--- a/drivers/i2c/busses/i2c-sirf.c
+++ b/drivers/i2c/busses/i2c-sirf.c
@@ -190,9 +190,7 @@
 
 	writel(regval, siic->base + SIRFSOC_I2C_CMD(siic->cmd_ptr++));
 
-	addr = msg->addr << 1;	/* Generate address */
-	if (msg->flags & I2C_M_RD)
-		addr |= 1;
+	addr = i2c_8bit_addr_from_msg(msg);
 
 	/* Reverse direction bit */
 	if (msg->flags & I2C_M_REV_DIR_ADDR)
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index 6ee7715..944ec420 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -337,10 +337,42 @@
 	writel_relaxed(val, i2c_dev->base + SSC_NOISE_SUPP_WIDTH_DATAOUT);
 }
 
+static int st_i2c_recover_bus(struct i2c_adapter *i2c_adap)
+{
+	struct st_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap);
+	u32 ctl;
+
+	dev_dbg(i2c_dev->dev, "Trying to recover bus\n");
+
+	/*
+	 * SSP IP is dual role SPI/I2C to generate 9 clock pulses
+	 * we switch to SPI node, 9 bit words and write a 0. This
+	 * has been validate with a oscilloscope and is easier
+	 * than switching to GPIO mode.
+	 */
+
+	/* Disable interrupts */
+	writel_relaxed(0, i2c_dev->base + SSC_IEN);
+
+	st_i2c_hw_config(i2c_dev);
+
+	ctl = SSC_CTL_EN | SSC_CTL_MS |	SSC_CTL_EN_RX_FIFO | SSC_CTL_EN_TX_FIFO;
+	st_i2c_set_bits(i2c_dev->base + SSC_CTL, ctl);
+
+	st_i2c_clr_bits(i2c_dev->base + SSC_I2C, SSC_I2C_I2CM);
+	usleep_range(8000, 10000);
+
+	writel_relaxed(0, i2c_dev->base + SSC_TBUF);
+	usleep_range(2000, 4000);
+	st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_I2CM);
+
+	return 0;
+}
+
 static int st_i2c_wait_free_bus(struct st_i2c_dev *i2c_dev)
 {
 	u32 sta;
-	int i;
+	int i, ret;
 
 	for (i = 0; i < 10; i++) {
 		sta = readl_relaxed(i2c_dev->base + SSC_STA);
@@ -352,6 +384,12 @@
 
 	dev_err(i2c_dev->dev, "bus not free (status = 0x%08x)\n", sta);
 
+	ret = i2c_recover_bus(&i2c_dev->adap);
+	if (ret) {
+		dev_err(i2c_dev->dev, "Failed to recover the bus (%d)\n", ret);
+		return ret;
+	}
+
 	return -EBUSY;
 }
 
@@ -614,8 +652,7 @@
 	unsigned long timeout;
 	int ret;
 
-	c->addr		= (u8)(msg->addr << 1);
-	c->addr		|= (msg->flags & I2C_M_RD);
+	c->addr		= i2c_8bit_addr_from_msg(msg);
 	c->buf		= msg->buf;
 	c->count	= msg->len;
 	c->xfered	= 0;
@@ -744,6 +781,10 @@
 	.functionality = st_i2c_func,
 };
 
+static struct i2c_bus_recovery_info st_i2c_recovery_info = {
+	.recover_bus = st_i2c_recover_bus,
+};
+
 static int st_i2c_of_get_deglitch(struct device_node *np,
 		struct st_i2c_dev *i2c_dev)
 {
@@ -826,6 +867,7 @@
 	adap->timeout = 2 * HZ;
 	adap->retries = 0;
 	adap->algo = &st_i2c_algo;
+	adap->bus_recovery_info = &st_i2c_recovery_info;
 	adap->dev.parent = &pdev->dev;
 	adap->dev.of_node = pdev->dev.of_node;
 
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 929185a..445398c3 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -38,6 +38,7 @@
 #define I2C_CNFG_DEBOUNCE_CNT_SHIFT		12
 #define I2C_CNFG_PACKET_MODE_EN			(1<<10)
 #define I2C_CNFG_NEW_MASTER_FSM			(1<<11)
+#define I2C_CNFG_MULTI_MASTER_MODE		(1<<17)
 #define I2C_STATUS				0x01C
 #define I2C_SL_CNFG				0x020
 #define I2C_SL_CNFG_NACK			(1<<1)
@@ -106,6 +107,9 @@
 #define I2C_SLV_CONFIG_LOAD			(1 << 1)
 #define I2C_TIMEOUT_CONFIG_LOAD			(1 << 2)
 
+#define I2C_CLKEN_OVERRIDE			0x090
+#define I2C_MST_CORE_CLKEN_OVR			(1 << 0)
+
 /*
  * msg_end_type: The bus control which need to be send at end of transfer.
  * @MSG_END_STOP: Send stop pulse at end of transfer.
@@ -143,6 +147,8 @@
 	int clk_divisor_hs_mode;
 	int clk_divisor_std_fast_mode;
 	u16 clk_divisor_fast_plus_mode;
+	bool has_multi_master_mode;
+	bool has_slcg_override_reg;
 };
 
 /**
@@ -184,6 +190,7 @@
 	u32 bus_clk_rate;
 	u16 clk_divisor_non_hs_mode;
 	bool is_suspended;
+	bool is_multimaster_mode;
 };
 
 static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned long reg)
@@ -438,6 +445,10 @@
 
 	val = I2C_CNFG_NEW_MASTER_FSM | I2C_CNFG_PACKET_MODE_EN |
 		(0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
+
+	if (i2c_dev->hw->has_multi_master_mode)
+		val |= I2C_CNFG_MULTI_MASTER_MODE;
+
 	i2c_writel(i2c_dev, val, I2C_CNFG);
 	i2c_writel(i2c_dev, 0, I2C_INT_MASK);
 
@@ -463,25 +474,29 @@
 	if (tegra_i2c_flush_fifos(i2c_dev))
 		err = -ETIMEDOUT;
 
+	if (i2c_dev->is_multimaster_mode && i2c_dev->hw->has_slcg_override_reg)
+		i2c_writel(i2c_dev, I2C_MST_CORE_CLKEN_OVR, I2C_CLKEN_OVERRIDE);
+
 	if (i2c_dev->hw->has_config_load_reg) {
 		i2c_writel(i2c_dev, I2C_MSTR_CONFIG_LOAD, I2C_CONFIG_LOAD);
 		while (i2c_readl(i2c_dev, I2C_CONFIG_LOAD) != 0) {
 			if (time_after(jiffies, timeout)) {
 				dev_warn(i2c_dev->dev,
 					"timeout waiting for config load\n");
-				return -ETIMEDOUT;
+				err = -ETIMEDOUT;
+				goto err;
 			}
 			msleep(1);
 		}
 	}
 
-	tegra_i2c_clock_disable(i2c_dev);
-
 	if (i2c_dev->irq_disabled) {
 		i2c_dev->irq_disabled = 0;
 		enable_irq(i2c_dev->irq);
 	}
 
+err:
+	tegra_i2c_clock_disable(i2c_dev);
 	return err;
 }
 
@@ -688,6 +703,20 @@
 	return ret;
 }
 
+static void tegra_i2c_parse_dt(struct tegra_i2c_dev *i2c_dev)
+{
+	struct device_node *np = i2c_dev->dev->of_node;
+	int ret;
+
+	ret = of_property_read_u32(np, "clock-frequency",
+			&i2c_dev->bus_clk_rate);
+	if (ret)
+		i2c_dev->bus_clk_rate = 100000; /* default clock rate */
+
+	i2c_dev->is_multimaster_mode = of_property_read_bool(np,
+			"multi-master");
+}
+
 static const struct i2c_algorithm tegra_i2c_algo = {
 	.master_xfer	= tegra_i2c_xfer,
 	.functionality	= tegra_i2c_func,
@@ -707,6 +736,8 @@
 	.clk_divisor_std_fast_mode = 0,
 	.clk_divisor_fast_plus_mode = 0,
 	.has_config_load_reg = false,
+	.has_multi_master_mode = false,
+	.has_slcg_override_reg = false,
 };
 
 static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
@@ -717,6 +748,8 @@
 	.clk_divisor_std_fast_mode = 0,
 	.clk_divisor_fast_plus_mode = 0,
 	.has_config_load_reg = false,
+	.has_multi_master_mode = false,
+	.has_slcg_override_reg = false,
 };
 
 static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
@@ -727,6 +760,8 @@
 	.clk_divisor_std_fast_mode = 0x19,
 	.clk_divisor_fast_plus_mode = 0x10,
 	.has_config_load_reg = false,
+	.has_multi_master_mode = false,
+	.has_slcg_override_reg = false,
 };
 
 static const struct tegra_i2c_hw_feature tegra124_i2c_hw = {
@@ -737,10 +772,25 @@
 	.clk_divisor_std_fast_mode = 0x19,
 	.clk_divisor_fast_plus_mode = 0x10,
 	.has_config_load_reg = true,
+	.has_multi_master_mode = false,
+	.has_slcg_override_reg = true,
+};
+
+static const struct tegra_i2c_hw_feature tegra210_i2c_hw = {
+	.has_continue_xfer_support = true,
+	.has_per_pkt_xfer_complete_irq = true,
+	.has_single_clk_source = true,
+	.clk_divisor_hs_mode = 1,
+	.clk_divisor_std_fast_mode = 0x19,
+	.clk_divisor_fast_plus_mode = 0x10,
+	.has_config_load_reg = true,
+	.has_multi_master_mode = true,
+	.has_slcg_override_reg = true,
 };
 
 /* Match table for of_platform binding */
 static const struct of_device_id tegra_i2c_of_match[] = {
+	{ .compatible = "nvidia,tegra210-i2c", .data = &tegra210_i2c_hw, },
 	{ .compatible = "nvidia,tegra124-i2c", .data = &tegra124_i2c_hw, },
 	{ .compatible = "nvidia,tegra114-i2c", .data = &tegra114_i2c_hw, },
 	{ .compatible = "nvidia,tegra30-i2c", .data = &tegra30_i2c_hw, },
@@ -797,10 +847,7 @@
 		return PTR_ERR(i2c_dev->rst);
 	}
 
-	ret = of_property_read_u32(i2c_dev->dev->of_node, "clock-frequency",
-					&i2c_dev->bus_clk_rate);
-	if (ret)
-		i2c_dev->bus_clk_rate = 100000; /* default clock rate */
+	tegra_i2c_parse_dt(i2c_dev);
 
 	i2c_dev->hw = &tegra20_i2c_hw;
 
@@ -853,6 +900,15 @@
 		goto unprepare_fast_clk;
 	}
 
+	if (i2c_dev->is_multimaster_mode) {
+		ret = clk_enable(i2c_dev->div_clk);
+		if (ret < 0) {
+			dev_err(i2c_dev->dev, "div_clk enable failed %d\n",
+				ret);
+			goto unprepare_div_clk;
+		}
+	}
+
 	ret = tegra_i2c_init(i2c_dev);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to initialize i2c controller");
@@ -863,7 +919,7 @@
 			tegra_i2c_isr, 0, dev_name(&pdev->dev), i2c_dev);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq);
-		goto unprepare_div_clk;
+		goto disable_div_clk;
 	}
 
 	i2c_set_adapdata(&i2c_dev->adapter, i2c_dev);
@@ -878,11 +934,15 @@
 	ret = i2c_add_numbered_adapter(&i2c_dev->adapter);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to add I2C adapter\n");
-		goto unprepare_div_clk;
+		goto disable_div_clk;
 	}
 
 	return 0;
 
+disable_div_clk:
+	if (i2c_dev->is_multimaster_mode)
+		clk_disable(i2c_dev->div_clk);
+
 unprepare_div_clk:
 	clk_unprepare(i2c_dev->div_clk);
 
@@ -898,6 +958,9 @@
 	struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
 	i2c_del_adapter(&i2c_dev->adapter);
 
+	if (i2c_dev->is_multimaster_mode)
+		clk_disable(i2c_dev->div_clk);
+
 	clk_unprepare(i2c_dev->div_clk);
 	if (!i2c_dev->hw->has_single_clk_source)
 		clk_unprepare(i2c_dev->fast_clk);
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index 213ba55..aeead0d 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -524,7 +524,7 @@
 
 	irq = platform_get_irq(pdev, 0);
 	if (irq < 0) {
-		dev_err(dev, "failed to get IRQ number");
+		dev_err(dev, "failed to get IRQ number\n");
 		return irq;
 	}
 
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index 89eaa8a..475a5eb 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -381,7 +381,7 @@
 
 	irq = platform_get_irq(pdev, 0);
 	if (irq < 0) {
-		dev_err(dev, "failed to get IRQ number");
+		dev_err(dev, "failed to get IRQ number\n");
 		return irq;
 	}
 
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 0f2f848..af11b65 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -525,22 +525,16 @@
 	return 0;
 }
 
-
-/* uevent helps with hotplug: modprobe -q $(MODALIAS) */
 static int i2c_device_uevent(struct device *dev, struct kobj_uevent_env *env)
 {
-	struct i2c_client	*client = to_i2c_client(dev);
+	struct i2c_client *client = to_i2c_client(dev);
 	int rc;
 
 	rc = acpi_device_uevent_modalias(dev, env);
 	if (rc != -ENODEV)
 		return rc;
 
-	if (add_uevent_var(env, "MODALIAS=%s%s",
-			   I2C_MODULE_PREFIX, client->name))
-		return -ENOMEM;
-	dev_dbg(dev, "uevent\n");
-	return 0;
+	return add_uevent_var(env, "MODALIAS=%s%s", I2C_MODULE_PREFIX, client->name);
 }
 
 /* i2c bus recovery routines */
@@ -960,48 +954,40 @@
 }
 
 /**
- * i2c_lock_adapter - Get exclusive access to an I2C bus segment
+ * i2c_adapter_lock_bus - Get exclusive access to an I2C bus segment
  * @adapter: Target I2C bus segment
+ * @flags: I2C_LOCK_ROOT_ADAPTER locks the root i2c adapter, I2C_LOCK_SEGMENT
+ *	locks only this branch in the adapter tree
  */
-void i2c_lock_adapter(struct i2c_adapter *adapter)
+static void i2c_adapter_lock_bus(struct i2c_adapter *adapter,
+				 unsigned int flags)
 {
-	struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
-
-	if (parent)
-		i2c_lock_adapter(parent);
-	else
-		rt_mutex_lock(&adapter->bus_lock);
-}
-EXPORT_SYMBOL_GPL(i2c_lock_adapter);
-
-/**
- * i2c_trylock_adapter - Try to get exclusive access to an I2C bus segment
- * @adapter: Target I2C bus segment
- */
-static int i2c_trylock_adapter(struct i2c_adapter *adapter)
-{
-	struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
-
-	if (parent)
-		return i2c_trylock_adapter(parent);
-	else
-		return rt_mutex_trylock(&adapter->bus_lock);
+	rt_mutex_lock(&adapter->bus_lock);
 }
 
 /**
- * i2c_unlock_adapter - Release exclusive access to an I2C bus segment
+ * i2c_adapter_trylock_bus - Try to get exclusive access to an I2C bus segment
  * @adapter: Target I2C bus segment
+ * @flags: I2C_LOCK_ROOT_ADAPTER trylocks the root i2c adapter, I2C_LOCK_SEGMENT
+ *	trylocks only this branch in the adapter tree
  */
-void i2c_unlock_adapter(struct i2c_adapter *adapter)
+static int i2c_adapter_trylock_bus(struct i2c_adapter *adapter,
+				   unsigned int flags)
 {
-	struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
-
-	if (parent)
-		i2c_unlock_adapter(parent);
-	else
-		rt_mutex_unlock(&adapter->bus_lock);
+	return rt_mutex_trylock(&adapter->bus_lock);
 }
-EXPORT_SYMBOL_GPL(i2c_unlock_adapter);
+
+/**
+ * i2c_adapter_unlock_bus - Release exclusive access to an I2C bus segment
+ * @adapter: Target I2C bus segment
+ * @flags: I2C_LOCK_ROOT_ADAPTER unlocks the root i2c adapter, I2C_LOCK_SEGMENT
+ *	unlocks only this branch in the adapter tree
+ */
+static void i2c_adapter_unlock_bus(struct i2c_adapter *adapter,
+				   unsigned int flags)
+{
+	rt_mutex_unlock(&adapter->bus_lock);
+}
 
 static void i2c_dev_set_name(struct i2c_adapter *adap,
 			     struct i2c_client *client)
@@ -1547,7 +1533,14 @@
 		return -EINVAL;
 	}
 
+	if (!adap->lock_bus) {
+		adap->lock_bus = i2c_adapter_lock_bus;
+		adap->trylock_bus = i2c_adapter_trylock_bus;
+		adap->unlock_bus = i2c_adapter_unlock_bus;
+	}
+
 	rt_mutex_init(&adap->bus_lock);
+	rt_mutex_init(&adap->mux_lock);
 	mutex_init(&adap->userspace_clients_lock);
 	INIT_LIST_HEAD(&adap->userspace_clients);
 
@@ -1565,6 +1558,7 @@
 	dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name);
 
 	pm_runtime_no_callbacks(&adap->dev);
+	pm_suspend_ignore_children(&adap->dev, true);
 	pm_runtime_enable(&adap->dev);
 
 #ifdef CONFIG_I2C_COMPAT
@@ -1600,10 +1594,12 @@
 
 			bri->get_scl = get_scl_gpio_value;
 			bri->set_scl = set_scl_gpio_value;
-		} else if (!bri->set_scl || !bri->get_scl) {
+		} else if (bri->recover_bus == i2c_generic_scl_recovery) {
 			/* Generic SCL recovery */
-			dev_err(&adap->dev, "No {get|set}_gpio() found, not using recovery\n");
-			adap->bus_recovery_info = NULL;
+			if (!bri->set_scl || !bri->get_scl) {
+				dev_err(&adap->dev, "No {get|set}_scl() found, not using recovery\n");
+				adap->bus_recovery_info = NULL;
+			}
 		}
 	}
 
@@ -2315,16 +2311,16 @@
 #endif
 
 		if (in_atomic() || irqs_disabled()) {
-			ret = i2c_trylock_adapter(adap);
+			ret = adap->trylock_bus(adap, I2C_LOCK_SEGMENT);
 			if (!ret)
 				/* I2C activity is ongoing. */
 				return -EAGAIN;
 		} else {
-			i2c_lock_adapter(adap);
+			i2c_lock_bus(adap, I2C_LOCK_SEGMENT);
 		}
 
 		ret = __i2c_transfer(adap, msgs, num);
-		i2c_unlock_adapter(adap);
+		i2c_unlock_bus(adap, I2C_LOCK_SEGMENT);
 
 		return ret;
 	} else {
@@ -2652,7 +2648,7 @@
 static u8 i2c_smbus_msg_pec(u8 pec, struct i2c_msg *msg)
 {
 	/* The address will be sent first */
-	u8 addr = (msg->addr << 1) | !!(msg->flags & I2C_M_RD);
+	u8 addr = i2c_8bit_addr_from_msg(msg);
 	pec = i2c_smbus_pec(pec, &addr, 1);
 
 	/* The data buffer follows */
@@ -3099,7 +3095,7 @@
 	flags &= I2C_M_TEN | I2C_CLIENT_PEC | I2C_CLIENT_SCCB;
 
 	if (adapter->algo->smbus_xfer) {
-		i2c_lock_adapter(adapter);
+		i2c_lock_bus(adapter, I2C_LOCK_SEGMENT);
 
 		/* Retry automatically on arbitration loss */
 		orig_jiffies = jiffies;
@@ -3113,7 +3109,7 @@
 				       orig_jiffies + adapter->timeout))
 				break;
 		}
-		i2c_unlock_adapter(adapter);
+		i2c_unlock_bus(adapter, I2C_LOCK_SEGMENT);
 
 		if (res != -EOPNOTSUPP || !adapter->algo->master_xfer)
 			goto trace;
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index d402287..8eee986 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -31,30 +31,66 @@
 struct i2c_mux_priv {
 	struct i2c_adapter adap;
 	struct i2c_algorithm algo;
-
-	struct i2c_adapter *parent;
-	struct device *mux_dev;
-	void *mux_priv;
+	struct i2c_mux_core *muxc;
 	u32 chan_id;
-
-	int (*select)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
-	int (*deselect)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
 };
 
+static int __i2c_mux_master_xfer(struct i2c_adapter *adap,
+				 struct i2c_msg msgs[], int num)
+{
+	struct i2c_mux_priv *priv = adap->algo_data;
+	struct i2c_mux_core *muxc = priv->muxc;
+	struct i2c_adapter *parent = muxc->parent;
+	int ret;
+
+	/* Switch to the right mux port and perform the transfer. */
+
+	ret = muxc->select(muxc, priv->chan_id);
+	if (ret >= 0)
+		ret = __i2c_transfer(parent, msgs, num);
+	if (muxc->deselect)
+		muxc->deselect(muxc, priv->chan_id);
+
+	return ret;
+}
+
 static int i2c_mux_master_xfer(struct i2c_adapter *adap,
 			       struct i2c_msg msgs[], int num)
 {
 	struct i2c_mux_priv *priv = adap->algo_data;
-	struct i2c_adapter *parent = priv->parent;
+	struct i2c_mux_core *muxc = priv->muxc;
+	struct i2c_adapter *parent = muxc->parent;
 	int ret;
 
 	/* Switch to the right mux port and perform the transfer. */
 
-	ret = priv->select(parent, priv->mux_priv, priv->chan_id);
+	ret = muxc->select(muxc, priv->chan_id);
 	if (ret >= 0)
-		ret = __i2c_transfer(parent, msgs, num);
-	if (priv->deselect)
-		priv->deselect(parent, priv->mux_priv, priv->chan_id);
+		ret = i2c_transfer(parent, msgs, num);
+	if (muxc->deselect)
+		muxc->deselect(muxc, priv->chan_id);
+
+	return ret;
+}
+
+static int __i2c_mux_smbus_xfer(struct i2c_adapter *adap,
+				u16 addr, unsigned short flags,
+				char read_write, u8 command,
+				int size, union i2c_smbus_data *data)
+{
+	struct i2c_mux_priv *priv = adap->algo_data;
+	struct i2c_mux_core *muxc = priv->muxc;
+	struct i2c_adapter *parent = muxc->parent;
+	int ret;
+
+	/* Select the right mux port and perform the transfer. */
+
+	ret = muxc->select(muxc, priv->chan_id);
+	if (ret >= 0)
+		ret = parent->algo->smbus_xfer(parent, addr, flags,
+					read_write, command, size, data);
+	if (muxc->deselect)
+		muxc->deselect(muxc, priv->chan_id);
 
 	return ret;
 }
@@ -65,17 +101,18 @@
 			      int size, union i2c_smbus_data *data)
 {
 	struct i2c_mux_priv *priv = adap->algo_data;
-	struct i2c_adapter *parent = priv->parent;
+	struct i2c_mux_core *muxc = priv->muxc;
+	struct i2c_adapter *parent = muxc->parent;
 	int ret;
 
 	/* Select the right mux port and perform the transfer. */
 
-	ret = priv->select(parent, priv->mux_priv, priv->chan_id);
+	ret = muxc->select(muxc, priv->chan_id);
 	if (ret >= 0)
-		ret = parent->algo->smbus_xfer(parent, addr, flags,
-					read_write, command, size, data);
-	if (priv->deselect)
-		priv->deselect(parent, priv->mux_priv, priv->chan_id);
+		ret = i2c_smbus_xfer(parent, addr, flags,
+				     read_write, command, size, data);
+	if (muxc->deselect)
+		muxc->deselect(muxc, priv->chan_id);
 
 	return ret;
 }
@@ -84,7 +121,7 @@
 static u32 i2c_mux_functionality(struct i2c_adapter *adap)
 {
 	struct i2c_mux_priv *priv = adap->algo_data;
-	struct i2c_adapter *parent = priv->parent;
+	struct i2c_adapter *parent = priv->muxc->parent;
 
 	return parent->algo->functionality(parent);
 }
@@ -102,38 +139,167 @@
 	return class;
 }
 
-struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
-				struct device *mux_dev,
-				void *mux_priv, u32 force_nr, u32 chan_id,
-				unsigned int class,
-				int (*select) (struct i2c_adapter *,
-					       void *, u32),
-				int (*deselect) (struct i2c_adapter *,
-						 void *, u32))
+static void i2c_mux_lock_bus(struct i2c_adapter *adapter, unsigned int flags)
 {
+	struct i2c_mux_priv *priv = adapter->algo_data;
+	struct i2c_adapter *parent = priv->muxc->parent;
+
+	rt_mutex_lock(&parent->mux_lock);
+	if (!(flags & I2C_LOCK_ROOT_ADAPTER))
+		return;
+	i2c_lock_bus(parent, flags);
+}
+
+static int i2c_mux_trylock_bus(struct i2c_adapter *adapter, unsigned int flags)
+{
+	struct i2c_mux_priv *priv = adapter->algo_data;
+	struct i2c_adapter *parent = priv->muxc->parent;
+
+	if (!rt_mutex_trylock(&parent->mux_lock))
+		return 0;	/* mux_lock not locked, failure */
+	if (!(flags & I2C_LOCK_ROOT_ADAPTER))
+		return 1;	/* we only want mux_lock, success */
+	if (parent->trylock_bus(parent, flags))
+		return 1;	/* parent locked too, success */
+	rt_mutex_unlock(&parent->mux_lock);
+	return 0;		/* parent not locked, failure */
+}
+
+static void i2c_mux_unlock_bus(struct i2c_adapter *adapter, unsigned int flags)
+{
+	struct i2c_mux_priv *priv = adapter->algo_data;
+	struct i2c_adapter *parent = priv->muxc->parent;
+
+	if (flags & I2C_LOCK_ROOT_ADAPTER)
+		i2c_unlock_bus(parent, flags);
+	rt_mutex_unlock(&parent->mux_lock);
+}
+
+static void i2c_parent_lock_bus(struct i2c_adapter *adapter,
+				unsigned int flags)
+{
+	struct i2c_mux_priv *priv = adapter->algo_data;
+	struct i2c_adapter *parent = priv->muxc->parent;
+
+	rt_mutex_lock(&parent->mux_lock);
+	i2c_lock_bus(parent, flags);
+}
+
+static int i2c_parent_trylock_bus(struct i2c_adapter *adapter,
+				  unsigned int flags)
+{
+	struct i2c_mux_priv *priv = adapter->algo_data;
+	struct i2c_adapter *parent = priv->muxc->parent;
+
+	if (!rt_mutex_trylock(&parent->mux_lock))
+		return 0;	/* mux_lock not locked, failure */
+	if (parent->trylock_bus(parent, flags))
+		return 1;	/* parent locked too, success */
+	rt_mutex_unlock(&parent->mux_lock);
+	return 0;		/* parent not locked, failure */
+}
+
+static void i2c_parent_unlock_bus(struct i2c_adapter *adapter,
+				  unsigned int flags)
+{
+	struct i2c_mux_priv *priv = adapter->algo_data;
+	struct i2c_adapter *parent = priv->muxc->parent;
+
+	i2c_unlock_bus(parent, flags);
+	rt_mutex_unlock(&parent->mux_lock);
+}
+
+struct i2c_adapter *i2c_root_adapter(struct device *dev)
+{
+	struct device *i2c;
+	struct i2c_adapter *i2c_root;
+
+	/*
+	 * Walk up the device tree to find an i2c adapter, indicating
+	 * that this is an i2c client device. Check all ancestors to
+	 * handle mfd devices etc.
+	 */
+	for (i2c = dev; i2c; i2c = i2c->parent) {
+		if (i2c->type == &i2c_adapter_type)
+			break;
+	}
+	if (!i2c)
+		return NULL;
+
+	/* Continue up the tree to find the root i2c adapter */
+	i2c_root = to_i2c_adapter(i2c);
+	while (i2c_parent_is_i2c_adapter(i2c_root))
+		i2c_root = i2c_parent_is_i2c_adapter(i2c_root);
+
+	return i2c_root;
+}
+EXPORT_SYMBOL_GPL(i2c_root_adapter);
+
+struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
+				   struct device *dev, int max_adapters,
+				   int sizeof_priv, u32 flags,
+				   int (*select)(struct i2c_mux_core *, u32),
+				   int (*deselect)(struct i2c_mux_core *, u32))
+{
+	struct i2c_mux_core *muxc;
+
+	muxc = devm_kzalloc(dev, sizeof(*muxc)
+			    + max_adapters * sizeof(muxc->adapter[0])
+			    + sizeof_priv, GFP_KERNEL);
+	if (!muxc)
+		return NULL;
+	if (sizeof_priv)
+		muxc->priv = &muxc->adapter[max_adapters];
+
+	muxc->parent = parent;
+	muxc->dev = dev;
+	if (flags & I2C_MUX_LOCKED)
+		muxc->mux_locked = true;
+	muxc->select = select;
+	muxc->deselect = deselect;
+	muxc->max_adapters = max_adapters;
+
+	return muxc;
+}
+EXPORT_SYMBOL_GPL(i2c_mux_alloc);
+
+int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
+			u32 force_nr, u32 chan_id,
+			unsigned int class)
+{
+	struct i2c_adapter *parent = muxc->parent;
 	struct i2c_mux_priv *priv;
 	char symlink_name[20];
 	int ret;
 
-	priv = kzalloc(sizeof(struct i2c_mux_priv), GFP_KERNEL);
+	if (muxc->num_adapters >= muxc->max_adapters) {
+		dev_err(muxc->dev, "No room for more i2c-mux adapters\n");
+		return -EINVAL;
+	}
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 	if (!priv)
-		return NULL;
+		return -ENOMEM;
 
 	/* Set up private adapter data */
-	priv->parent = parent;
-	priv->mux_dev = mux_dev;
-	priv->mux_priv = mux_priv;
+	priv->muxc = muxc;
 	priv->chan_id = chan_id;
-	priv->select = select;
-	priv->deselect = deselect;
 
 	/* Need to do algo dynamically because we don't know ahead
 	 * of time what sort of physical adapter we'll be dealing with.
 	 */
-	if (parent->algo->master_xfer)
-		priv->algo.master_xfer = i2c_mux_master_xfer;
-	if (parent->algo->smbus_xfer)
-		priv->algo.smbus_xfer = i2c_mux_smbus_xfer;
+	if (parent->algo->master_xfer) {
+		if (muxc->mux_locked)
+			priv->algo.master_xfer = i2c_mux_master_xfer;
+		else
+			priv->algo.master_xfer = __i2c_mux_master_xfer;
+	}
+	if (parent->algo->smbus_xfer) {
+		if (muxc->mux_locked)
+			priv->algo.smbus_xfer = i2c_mux_smbus_xfer;
+		else
+			priv->algo.smbus_xfer = __i2c_mux_smbus_xfer;
+	}
 	priv->algo.functionality = i2c_mux_functionality;
 
 	/* Now fill out new adapter structure */
@@ -146,6 +312,15 @@
 	priv->adap.retries = parent->retries;
 	priv->adap.timeout = parent->timeout;
 	priv->adap.quirks = parent->quirks;
+	if (muxc->mux_locked) {
+		priv->adap.lock_bus = i2c_mux_lock_bus;
+		priv->adap.trylock_bus = i2c_mux_trylock_bus;
+		priv->adap.unlock_bus = i2c_mux_unlock_bus;
+	} else {
+		priv->adap.lock_bus = i2c_parent_lock_bus;
+		priv->adap.trylock_bus = i2c_parent_trylock_bus;
+		priv->adap.unlock_bus = i2c_parent_unlock_bus;
+	}
 
 	/* Sanity check on class */
 	if (i2c_mux_parent_classes(parent) & class)
@@ -159,11 +334,11 @@
 	 * Try to populate the mux adapter's of_node, expands to
 	 * nothing if !CONFIG_OF.
 	 */
-	if (mux_dev->of_node) {
+	if (muxc->dev->of_node) {
 		struct device_node *child;
 		u32 reg;
 
-		for_each_child_of_node(mux_dev->of_node, child) {
+		for_each_child_of_node(muxc->dev->of_node, child) {
 			ret = of_property_read_u32(child, "reg", &reg);
 			if (ret)
 				continue;
@@ -177,8 +352,9 @@
 	/*
 	 * Associate the mux channel with an ACPI node.
 	 */
-	if (has_acpi_companion(mux_dev))
-		acpi_preset_companion(&priv->adap.dev, ACPI_COMPANION(mux_dev),
+	if (has_acpi_companion(muxc->dev))
+		acpi_preset_companion(&priv->adap.dev,
+				      ACPI_COMPANION(muxc->dev),
 				      chan_id);
 
 	if (force_nr) {
@@ -192,35 +368,45 @@
 			"failed to add mux-adapter (error=%d)\n",
 			ret);
 		kfree(priv);
-		return NULL;
+		return ret;
 	}
 
-	WARN(sysfs_create_link(&priv->adap.dev.kobj, &mux_dev->kobj, "mux_device"),
-			       "can't create symlink to mux device\n");
+	WARN(sysfs_create_link(&priv->adap.dev.kobj, &muxc->dev->kobj,
+			       "mux_device"),
+	     "can't create symlink to mux device\n");
 
 	snprintf(symlink_name, sizeof(symlink_name), "channel-%u", chan_id);
-	WARN(sysfs_create_link(&mux_dev->kobj, &priv->adap.dev.kobj, symlink_name),
-			       "can't create symlink for channel %u\n", chan_id);
+	WARN(sysfs_create_link(&muxc->dev->kobj, &priv->adap.dev.kobj,
+			       symlink_name),
+	     "can't create symlink for channel %u\n", chan_id);
 	dev_info(&parent->dev, "Added multiplexed i2c bus %d\n",
 		 i2c_adapter_id(&priv->adap));
 
-	return &priv->adap;
+	muxc->adapter[muxc->num_adapters++] = &priv->adap;
+	return 0;
 }
-EXPORT_SYMBOL_GPL(i2c_add_mux_adapter);
+EXPORT_SYMBOL_GPL(i2c_mux_add_adapter);
 
-void i2c_del_mux_adapter(struct i2c_adapter *adap)
+void i2c_mux_del_adapters(struct i2c_mux_core *muxc)
 {
-	struct i2c_mux_priv *priv = adap->algo_data;
 	char symlink_name[20];
 
-	snprintf(symlink_name, sizeof(symlink_name), "channel-%u", priv->chan_id);
-	sysfs_remove_link(&priv->mux_dev->kobj, symlink_name);
+	while (muxc->num_adapters) {
+		struct i2c_adapter *adap = muxc->adapter[--muxc->num_adapters];
+		struct i2c_mux_priv *priv = adap->algo_data;
 
-	sysfs_remove_link(&priv->adap.dev.kobj, "mux_device");
-	i2c_del_adapter(adap);
-	kfree(priv);
+		muxc->adapter[muxc->num_adapters] = NULL;
+
+		snprintf(symlink_name, sizeof(symlink_name),
+			 "channel-%u", priv->chan_id);
+		sysfs_remove_link(&muxc->dev->kobj, symlink_name);
+
+		sysfs_remove_link(&priv->adap.dev.kobj, "mux_device");
+		i2c_del_adapter(adap);
+		kfree(priv);
+	}
 }
-EXPORT_SYMBOL_GPL(i2c_del_mux_adapter);
+EXPORT_SYMBOL_GPL(i2c_mux_del_adapters);
 
 MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
 MODULE_DESCRIPTION("I2C driver for multiplexed I2C busses");
diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
index 402e3a6..a90bbc4 100644
--- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
+++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
@@ -28,8 +28,6 @@
 /**
  * struct i2c_arbitrator_data - Driver data for I2C arbitrator
  *
- * @parent: Parent adapter
- * @child: Child bus
  * @our_gpio: GPIO we'll use to claim.
  * @our_gpio_release: 0 if active high; 1 if active low; AKA if the GPIO ==
  *   this then consider it released.
@@ -42,8 +40,6 @@
  */
 
 struct i2c_arbitrator_data {
-	struct i2c_adapter *parent;
-	struct i2c_adapter *child;
 	int our_gpio;
 	int our_gpio_release;
 	int their_gpio;
@@ -59,9 +55,9 @@
  *
  * Use the GPIO-based signalling protocol; return -EBUSY if we fail.
  */
-static int i2c_arbitrator_select(struct i2c_adapter *adap, void *data, u32 chan)
+static int i2c_arbitrator_select(struct i2c_mux_core *muxc, u32 chan)
 {
-	const struct i2c_arbitrator_data *arb = data;
+	const struct i2c_arbitrator_data *arb = i2c_mux_priv(muxc);
 	unsigned long stop_retry, stop_time;
 
 	/* Start a round of trying to claim the bus */
@@ -93,7 +89,7 @@
 	/* Give up, release our claim */
 	gpio_set_value(arb->our_gpio, arb->our_gpio_release);
 	udelay(arb->slew_delay_us);
-	dev_err(&adap->dev, "Could not claim bus, timeout\n");
+	dev_err(muxc->dev, "Could not claim bus, timeout\n");
 	return -EBUSY;
 }
 
@@ -102,10 +98,9 @@
  *
  * Release the I2C bus using the GPIO-based signalling protocol.
  */
-static int i2c_arbitrator_deselect(struct i2c_adapter *adap, void *data,
-				   u32 chan)
+static int i2c_arbitrator_deselect(struct i2c_mux_core *muxc, u32 chan)
 {
-	const struct i2c_arbitrator_data *arb = data;
+	const struct i2c_arbitrator_data *arb = i2c_mux_priv(muxc);
 
 	/* Release the bus and wait for the other master to notice */
 	gpio_set_value(arb->our_gpio, arb->our_gpio_release);
@@ -119,6 +114,7 @@
 	struct device *dev = &pdev->dev;
 	struct device_node *np = dev->of_node;
 	struct device_node *parent_np;
+	struct i2c_mux_core *muxc;
 	struct i2c_arbitrator_data *arb;
 	enum of_gpio_flags gpio_flags;
 	unsigned long out_init;
@@ -134,12 +130,13 @@
 		return -EINVAL;
 	}
 
-	arb = devm_kzalloc(dev, sizeof(*arb), GFP_KERNEL);
-	if (!arb) {
-		dev_err(dev, "Cannot allocate i2c_arbitrator_data\n");
+	muxc = i2c_mux_alloc(NULL, dev, 1, sizeof(*arb), 0,
+			     i2c_arbitrator_select, i2c_arbitrator_deselect);
+	if (!muxc)
 		return -ENOMEM;
-	}
-	platform_set_drvdata(pdev, arb);
+	arb = i2c_mux_priv(muxc);
+
+	platform_set_drvdata(pdev, muxc);
 
 	/* Request GPIOs */
 	ret = of_get_named_gpio_flags(np, "our-claim-gpio", 0, &gpio_flags);
@@ -196,21 +193,18 @@
 		dev_err(dev, "Cannot parse i2c-parent\n");
 		return -EINVAL;
 	}
-	arb->parent = of_get_i2c_adapter_by_node(parent_np);
+	muxc->parent = of_get_i2c_adapter_by_node(parent_np);
 	of_node_put(parent_np);
-	if (!arb->parent) {
+	if (!muxc->parent) {
 		dev_err(dev, "Cannot find parent bus\n");
 		return -EPROBE_DEFER;
 	}
 
 	/* Actually add the mux adapter */
-	arb->child = i2c_add_mux_adapter(arb->parent, dev, arb, 0, 0, 0,
-					 i2c_arbitrator_select,
-					 i2c_arbitrator_deselect);
-	if (!arb->child) {
+	ret = i2c_mux_add_adapter(muxc, 0, 0, 0);
+	if (ret) {
 		dev_err(dev, "Failed to add adapter\n");
-		ret = -ENODEV;
-		i2c_put_adapter(arb->parent);
+		i2c_put_adapter(muxc->parent);
 	}
 
 	return ret;
@@ -218,11 +212,10 @@
 
 static int i2c_arbitrator_remove(struct platform_device *pdev)
 {
-	struct i2c_arbitrator_data *arb = platform_get_drvdata(pdev);
+	struct i2c_mux_core *muxc = platform_get_drvdata(pdev);
 
-	i2c_del_mux_adapter(arb->child);
-	i2c_put_adapter(arb->parent);
-
+	i2c_mux_del_adapters(muxc);
+	i2c_put_adapter(muxc->parent);
 	return 0;
 }
 
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index 7748a0a..8de073a 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -140,22 +140,34 @@
 	return i2c_demux_activate_master(priv, new_chan);
 }
 
-static ssize_t cur_master_show(struct device *dev, struct device_attribute *attr,
-			   char *buf)
+static ssize_t available_masters_show(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
 {
 	struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev);
 	int count = 0, i;
 
 	for (i = 0; i < priv->num_chan && count < PAGE_SIZE; i++)
-		count += scnprintf(buf + count, PAGE_SIZE - count, "%c %d - %s\n",
-				 i == priv->cur_chan ? '*' : ' ', i,
-				 priv->chan[i].parent_np->full_name);
+		count += scnprintf(buf + count, PAGE_SIZE - count, "%d:%s%c",
+				   i, priv->chan[i].parent_np->full_name,
+				   i == priv->num_chan - 1 ? '\n' : ' ');
 
 	return count;
 }
+static DEVICE_ATTR_RO(available_masters);
 
-static ssize_t cur_master_store(struct device *dev, struct device_attribute *attr,
-			    const char *buf, size_t count)
+static ssize_t current_master_show(struct device *dev,
+				   struct device_attribute *attr,
+				   char *buf)
+{
+	struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%d\n", priv->cur_chan);
+}
+
+static ssize_t current_master_store(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t count)
 {
 	struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev);
 	unsigned int val;
@@ -172,7 +184,7 @@
 
 	return ret < 0 ? ret : count;
 }
-static DEVICE_ATTR_RW(cur_master);
+static DEVICE_ATTR_RW(current_master);
 
 static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
 {
@@ -218,12 +230,18 @@
 	/* switch to first parent as active master */
 	i2c_demux_activate_master(priv, 0);
 
-	err = device_create_file(&pdev->dev, &dev_attr_cur_master);
+	err = device_create_file(&pdev->dev, &dev_attr_available_masters);
 	if (err)
 		goto err_rollback;
 
+	err = device_create_file(&pdev->dev, &dev_attr_current_master);
+	if (err)
+		goto err_rollback_available;
+
 	return 0;
 
+err_rollback_available:
+	device_remove_file(&pdev->dev, &dev_attr_available_masters);
 err_rollback:
 	for (j = 0; j < i; j++) {
 		of_node_put(priv->chan[j].parent_np);
@@ -238,7 +256,8 @@
 	struct i2c_demux_pinctrl_priv *priv = platform_get_drvdata(pdev);
 	int i;
 
-	device_remove_file(&pdev->dev, &dev_attr_cur_master);
+	device_remove_file(&pdev->dev, &dev_attr_current_master);
+	device_remove_file(&pdev->dev, &dev_attr_available_masters);
 
 	i2c_demux_deactivate_master(priv);
 
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index b8e11c1..e5cf26e 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -15,11 +15,10 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/gpio.h>
+#include "../../gpio/gpiolib.h"
 #include <linux/of_gpio.h>
 
 struct gpiomux {
-	struct i2c_adapter *parent;
-	struct i2c_adapter **adap; /* child busses */
 	struct i2c_mux_gpio_platform_data data;
 	unsigned gpio_base;
 };
@@ -33,18 +32,18 @@
 					val & (1 << i));
 }
 
-static int i2c_mux_gpio_select(struct i2c_adapter *adap, void *data, u32 chan)
+static int i2c_mux_gpio_select(struct i2c_mux_core *muxc, u32 chan)
 {
-	struct gpiomux *mux = data;
+	struct gpiomux *mux = i2c_mux_priv(muxc);
 
 	i2c_mux_gpio_set(mux, chan);
 
 	return 0;
 }
 
-static int i2c_mux_gpio_deselect(struct i2c_adapter *adap, void *data, u32 chan)
+static int i2c_mux_gpio_deselect(struct i2c_mux_core *muxc, u32 chan)
 {
-	struct gpiomux *mux = data;
+	struct gpiomux *mux = i2c_mux_priv(muxc);
 
 	i2c_mux_gpio_set(mux, mux->data.idle);
 
@@ -136,19 +135,16 @@
 
 static int i2c_mux_gpio_probe(struct platform_device *pdev)
 {
+	struct i2c_mux_core *muxc;
 	struct gpiomux *mux;
 	struct i2c_adapter *parent;
-	int (*deselect) (struct i2c_adapter *, void *, u32);
+	struct i2c_adapter *root;
 	unsigned initial_state, gpio_base;
 	int i, ret;
 
 	mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
-	if (!mux) {
-		dev_err(&pdev->dev, "Cannot allocate gpiomux structure");
+	if (!mux)
 		return -ENOMEM;
-	}
-
-	platform_set_drvdata(pdev, mux);
 
 	if (!dev_get_platdata(&pdev->dev)) {
 		ret = i2c_mux_gpio_probe_dt(mux, pdev);
@@ -180,27 +176,32 @@
 	if (!parent)
 		return -EPROBE_DEFER;
 
-	mux->parent = parent;
-	mux->gpio_base = gpio_base;
-
-	mux->adap = devm_kzalloc(&pdev->dev,
-				 sizeof(*mux->adap) * mux->data.n_values,
-				 GFP_KERNEL);
-	if (!mux->adap) {
-		dev_err(&pdev->dev, "Cannot allocate i2c_adapter structure");
+	muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values, 0, 0,
+			     i2c_mux_gpio_select, NULL);
+	if (!muxc) {
 		ret = -ENOMEM;
 		goto alloc_failed;
 	}
+	muxc->priv = mux;
+
+	platform_set_drvdata(pdev, muxc);
+
+	root = i2c_root_adapter(&parent->dev);
+
+	muxc->mux_locked = true;
+	mux->gpio_base = gpio_base;
 
 	if (mux->data.idle != I2C_MUX_GPIO_NO_IDLE) {
 		initial_state = mux->data.idle;
-		deselect = i2c_mux_gpio_deselect;
+		muxc->deselect = i2c_mux_gpio_deselect;
 	} else {
 		initial_state = mux->data.values[0];
-		deselect = NULL;
 	}
 
 	for (i = 0; i < mux->data.n_gpios; i++) {
+		struct device *gpio_dev;
+		struct gpio_desc *gpio_desc;
+
 		ret = gpio_request(gpio_base + mux->data.gpios[i], "i2c-mux-gpio");
 		if (ret) {
 			dev_err(&pdev->dev, "Failed to request GPIO %d\n",
@@ -217,17 +218,24 @@
 			i++;	/* gpio_request above succeeded, so must free */
 			goto err_request_gpio;
 		}
+
+		if (!muxc->mux_locked)
+			continue;
+
+		gpio_desc = gpio_to_desc(gpio_base + mux->data.gpios[i]);
+		gpio_dev = &gpio_desc->gdev->dev;
+		muxc->mux_locked = i2c_root_adapter(gpio_dev) == root;
 	}
 
+	if (muxc->mux_locked)
+		dev_info(&pdev->dev, "mux-locked i2c mux\n");
+
 	for (i = 0; i < mux->data.n_values; i++) {
 		u32 nr = mux->data.base_nr ? (mux->data.base_nr + i) : 0;
 		unsigned int class = mux->data.classes ? mux->data.classes[i] : 0;
 
-		mux->adap[i] = i2c_add_mux_adapter(parent, &pdev->dev, mux, nr,
-						   mux->data.values[i], class,
-						   i2c_mux_gpio_select, deselect);
-		if (!mux->adap[i]) {
-			ret = -ENODEV;
+		ret = i2c_mux_add_adapter(muxc, nr, mux->data.values[i], class);
+		if (ret) {
 			dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
 			goto add_adapter_failed;
 		}
@@ -239,8 +247,7 @@
 	return 0;
 
 add_adapter_failed:
-	for (; i > 0; i--)
-		i2c_del_mux_adapter(mux->adap[i - 1]);
+	i2c_mux_del_adapters(muxc);
 	i = mux->data.n_gpios;
 err_request_gpio:
 	for (; i > 0; i--)
@@ -253,16 +260,16 @@
 
 static int i2c_mux_gpio_remove(struct platform_device *pdev)
 {
-	struct gpiomux *mux = platform_get_drvdata(pdev);
+	struct i2c_mux_core *muxc = platform_get_drvdata(pdev);
+	struct gpiomux *mux = i2c_mux_priv(muxc);
 	int i;
 
-	for (i = 0; i < mux->data.n_values; i++)
-		i2c_del_mux_adapter(mux->adap[i]);
+	i2c_mux_del_adapters(muxc);
 
 	for (i = 0; i < mux->data.n_gpios; i++)
 		gpio_free(mux->gpio_base + mux->data.gpios[i]);
 
-	i2c_put_adapter(mux->parent);
+	i2c_put_adapter(muxc->parent);
 
 	return 0;
 }
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index d0ba424..3cb8af6 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -73,7 +73,7 @@
 #define SELECT_DELAY_LONG	1000
 
 struct pca9541 {
-	struct i2c_adapter *mux_adap;
+	struct i2c_client *client;
 	unsigned long select_timeout;
 	unsigned long arb_timeout;
 };
@@ -217,7 +217,8 @@
  */
 static int pca9541_arbitrate(struct i2c_client *client)
 {
-	struct pca9541 *data = i2c_get_clientdata(client);
+	struct i2c_mux_core *muxc = i2c_get_clientdata(client);
+	struct pca9541 *data = i2c_mux_priv(muxc);
 	int reg;
 
 	reg = pca9541_reg_read(client, PCA9541_CONTROL);
@@ -285,9 +286,10 @@
 	return 0;
 }
 
-static int pca9541_select_chan(struct i2c_adapter *adap, void *client, u32 chan)
+static int pca9541_select_chan(struct i2c_mux_core *muxc, u32 chan)
 {
-	struct pca9541 *data = i2c_get_clientdata(client);
+	struct pca9541 *data = i2c_mux_priv(muxc);
+	struct i2c_client *client = data->client;
 	int ret;
 	unsigned long timeout = jiffies + ARB2_TIMEOUT;
 		/* give up after this time */
@@ -309,9 +311,11 @@
 	return -ETIMEDOUT;
 }
 
-static int pca9541_release_chan(struct i2c_adapter *adap,
-				void *client, u32 chan)
+static int pca9541_release_chan(struct i2c_mux_core *muxc, u32 chan)
 {
+	struct pca9541 *data = i2c_mux_priv(muxc);
+	struct i2c_client *client = data->client;
+
 	pca9541_release_bus(client);
 	return 0;
 }
@@ -324,20 +328,13 @@
 {
 	struct i2c_adapter *adap = client->adapter;
 	struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev);
+	struct i2c_mux_core *muxc;
 	struct pca9541 *data;
 	int force;
-	int ret = -ENODEV;
+	int ret;
 
 	if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE_DATA))
-		goto err;
-
-	data = kzalloc(sizeof(struct pca9541), GFP_KERNEL);
-	if (!data) {
-		ret = -ENOMEM;
-		goto err;
-	}
-
-	i2c_set_clientdata(client, data);
+		return -ENODEV;
 
 	/*
 	 * I2C accesses are unprotected here.
@@ -352,34 +349,33 @@
 	force = 0;
 	if (pdata)
 		force = pdata->modes[0].adap_id;
-	data->mux_adap = i2c_add_mux_adapter(adap, &client->dev, client,
-					     force, 0, 0,
-					     pca9541_select_chan,
-					     pca9541_release_chan);
+	muxc = i2c_mux_alloc(adap, &client->dev, 1, sizeof(*data), 0,
+			     pca9541_select_chan, pca9541_release_chan);
+	if (!muxc)
+		return -ENOMEM;
 
-	if (data->mux_adap == NULL) {
+	data = i2c_mux_priv(muxc);
+	data->client = client;
+
+	i2c_set_clientdata(client, muxc);
+
+	ret = i2c_mux_add_adapter(muxc, force, 0, 0);
+	if (ret) {
 		dev_err(&client->dev, "failed to register master selector\n");
-		goto exit_free;
+		return ret;
 	}
 
 	dev_info(&client->dev, "registered master selector for I2C %s\n",
 		 client->name);
 
 	return 0;
-
-exit_free:
-	kfree(data);
-err:
-	return ret;
 }
 
 static int pca9541_remove(struct i2c_client *client)
 {
-	struct pca9541 *data = i2c_get_clientdata(client);
+	struct i2c_mux_core *muxc = i2c_get_clientdata(client);
 
-	i2c_del_mux_adapter(data->mux_adap);
-
-	kfree(data);
+	i2c_mux_del_adapters(muxc);
 	return 0;
 }
 
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index acfcef3..528e755 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -60,9 +60,10 @@
 
 struct pca954x {
 	enum pca_type type;
-	struct i2c_adapter *virt_adaps[PCA954X_MAX_NCHANS];
 
 	u8 last_chan;		/* last register value */
+	u8 deselect;
+	struct i2c_client *client;
 };
 
 struct chip_desc {
@@ -146,10 +147,10 @@
 	return ret;
 }
 
-static int pca954x_select_chan(struct i2c_adapter *adap,
-			       void *client, u32 chan)
+static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
 {
-	struct pca954x *data = i2c_get_clientdata(client);
+	struct pca954x *data = i2c_mux_priv(muxc);
+	struct i2c_client *client = data->client;
 	const struct chip_desc *chip = &chips[data->type];
 	u8 regval;
 	int ret = 0;
@@ -162,21 +163,24 @@
 
 	/* Only select the channel if its different from the last channel */
 	if (data->last_chan != regval) {
-		ret = pca954x_reg_write(adap, client, regval);
+		ret = pca954x_reg_write(muxc->parent, client, regval);
 		data->last_chan = regval;
 	}
 
 	return ret;
 }
 
-static int pca954x_deselect_mux(struct i2c_adapter *adap,
-				void *client, u32 chan)
+static int pca954x_deselect_mux(struct i2c_mux_core *muxc, u32 chan)
 {
-	struct pca954x *data = i2c_get_clientdata(client);
+	struct pca954x *data = i2c_mux_priv(muxc);
+	struct i2c_client *client = data->client;
+
+	if (!(data->deselect & (1 << chan)))
+		return 0;
 
 	/* Deselect active channel */
 	data->last_chan = 0;
-	return pca954x_reg_write(adap, client, data->last_chan);
+	return pca954x_reg_write(muxc->parent, client, data->last_chan);
 }
 
 /*
@@ -191,17 +195,22 @@
 	bool idle_disconnect_dt;
 	struct gpio_desc *gpio;
 	int num, force, class;
+	struct i2c_mux_core *muxc;
 	struct pca954x *data;
 	int ret;
 
 	if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE))
 		return -ENODEV;
 
-	data = devm_kzalloc(&client->dev, sizeof(struct pca954x), GFP_KERNEL);
-	if (!data)
+	muxc = i2c_mux_alloc(adap, &client->dev,
+			     PCA954X_MAX_NCHANS, sizeof(*data), 0,
+			     pca954x_select_chan, pca954x_deselect_mux);
+	if (!muxc)
 		return -ENOMEM;
+	data = i2c_mux_priv(muxc);
 
-	i2c_set_clientdata(client, data);
+	i2c_set_clientdata(client, muxc);
+	data->client = client;
 
 	/* Get the mux out of reset if a reset GPIO is specified. */
 	gpio = devm_gpiod_get_optional(&client->dev, "reset", GPIOD_OUT_LOW);
@@ -238,16 +247,13 @@
 				/* discard unconfigured channels */
 				break;
 			idle_disconnect_pd = pdata->modes[num].deselect_on_exit;
+			data->deselect |= (idle_disconnect_pd
+					   || idle_disconnect_dt) << num;
 		}
 
-		data->virt_adaps[num] =
-			i2c_add_mux_adapter(adap, &client->dev, client,
-				force, num, class, pca954x_select_chan,
-				(idle_disconnect_pd || idle_disconnect_dt)
-					? pca954x_deselect_mux : NULL);
+		ret = i2c_mux_add_adapter(muxc, force, num, class);
 
-		if (data->virt_adaps[num] == NULL) {
-			ret = -ENODEV;
+		if (ret) {
 			dev_err(&client->dev,
 				"failed to register multiplexed adapter"
 				" %d as bus %d\n", num, force);
@@ -263,23 +269,15 @@
 	return 0;
 
 virt_reg_failed:
-	for (num--; num >= 0; num--)
-		i2c_del_mux_adapter(data->virt_adaps[num]);
+	i2c_mux_del_adapters(muxc);
 	return ret;
 }
 
 static int pca954x_remove(struct i2c_client *client)
 {
-	struct pca954x *data = i2c_get_clientdata(client);
-	const struct chip_desc *chip = &chips[data->type];
-	int i;
+	struct i2c_mux_core *muxc = i2c_get_clientdata(client);
 
-	for (i = 0; i < chip->nchans; ++i)
-		if (data->virt_adaps[i]) {
-			i2c_del_mux_adapter(data->virt_adaps[i]);
-			data->virt_adaps[i] = NULL;
-		}
-
+	i2c_mux_del_adapters(muxc);
 	return 0;
 }
 
@@ -287,7 +285,8 @@
 static int pca954x_resume(struct device *dev)
 {
 	struct i2c_client *client = to_i2c_client(dev);
-	struct pca954x *data = i2c_get_clientdata(client);
+	struct i2c_mux_core *muxc = i2c_get_clientdata(client);
+	struct pca954x *data = i2c_mux_priv(muxc);
 
 	data->last_chan = 0;
 	return i2c_smbus_write_byte(client, 0);
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index b5a982b..35bb775 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -24,36 +24,32 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/of.h>
+#include "../../pinctrl/core.h"
 
 struct i2c_mux_pinctrl {
-	struct device *dev;
 	struct i2c_mux_pinctrl_platform_data *pdata;
 	struct pinctrl *pinctrl;
 	struct pinctrl_state **states;
 	struct pinctrl_state *state_idle;
-	struct i2c_adapter *parent;
-	struct i2c_adapter **busses;
 };
 
-static int i2c_mux_pinctrl_select(struct i2c_adapter *adap, void *data,
-				  u32 chan)
+static int i2c_mux_pinctrl_select(struct i2c_mux_core *muxc, u32 chan)
 {
-	struct i2c_mux_pinctrl *mux = data;
+	struct i2c_mux_pinctrl *mux = i2c_mux_priv(muxc);
 
 	return pinctrl_select_state(mux->pinctrl, mux->states[chan]);
 }
 
-static int i2c_mux_pinctrl_deselect(struct i2c_adapter *adap, void *data,
-				    u32 chan)
+static int i2c_mux_pinctrl_deselect(struct i2c_mux_core *muxc, u32 chan)
 {
-	struct i2c_mux_pinctrl *mux = data;
+	struct i2c_mux_pinctrl *mux = i2c_mux_priv(muxc);
 
 	return pinctrl_select_state(mux->pinctrl, mux->state_idle);
 }
 
 #ifdef CONFIG_OF
 static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
-				struct platform_device *pdev)
+				    struct platform_device *pdev)
 {
 	struct device_node *np = pdev->dev.of_node;
 	int num_names, i, ret;
@@ -64,15 +60,12 @@
 		return 0;
 
 	mux->pdata = devm_kzalloc(&pdev->dev, sizeof(*mux->pdata), GFP_KERNEL);
-	if (!mux->pdata) {
-		dev_err(mux->dev,
-			"Cannot allocate i2c_mux_pinctrl_platform_data\n");
+	if (!mux->pdata)
 		return -ENOMEM;
-	}
 
 	num_names = of_property_count_strings(np, "pinctrl-names");
 	if (num_names < 0) {
-		dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n",
+		dev_err(&pdev->dev, "Cannot parse pinctrl-names: %d\n",
 			num_names);
 		return num_names;
 	}
@@ -80,23 +73,22 @@
 	mux->pdata->pinctrl_states = devm_kzalloc(&pdev->dev,
 		sizeof(*mux->pdata->pinctrl_states) * num_names,
 		GFP_KERNEL);
-	if (!mux->pdata->pinctrl_states) {
-		dev_err(mux->dev, "Cannot allocate pinctrl_states\n");
+	if (!mux->pdata->pinctrl_states)
 		return -ENOMEM;
-	}
 
 	for (i = 0; i < num_names; i++) {
 		ret = of_property_read_string_index(np, "pinctrl-names", i,
 			&mux->pdata->pinctrl_states[mux->pdata->bus_count]);
 		if (ret < 0) {
-			dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n",
+			dev_err(&pdev->dev, "Cannot parse pinctrl-names: %d\n",
 				ret);
 			return ret;
 		}
 		if (!strcmp(mux->pdata->pinctrl_states[mux->pdata->bus_count],
 			    "idle")) {
 			if (i != num_names - 1) {
-				dev_err(mux->dev, "idle state must be last\n");
+				dev_err(&pdev->dev,
+					"idle state must be last\n");
 				return -EINVAL;
 			}
 			mux->pdata->pinctrl_state_idle = "idle";
@@ -107,13 +99,13 @@
 
 	adapter_np = of_parse_phandle(np, "i2c-parent", 0);
 	if (!adapter_np) {
-		dev_err(mux->dev, "Cannot parse i2c-parent\n");
+		dev_err(&pdev->dev, "Cannot parse i2c-parent\n");
 		return -ENODEV;
 	}
 	adapter = of_find_i2c_adapter_by_node(adapter_np);
 	of_node_put(adapter_np);
 	if (!adapter) {
-		dev_err(mux->dev, "Cannot find parent bus\n");
+		dev_err(&pdev->dev, "Cannot find parent bus\n");
 		return -EPROBE_DEFER;
 	}
 	mux->pdata->parent_bus_num = i2c_adapter_id(adapter);
@@ -129,21 +121,38 @@
 }
 #endif
 
+static struct i2c_adapter *i2c_mux_pinctrl_root_adapter(
+	struct pinctrl_state *state)
+{
+	struct i2c_adapter *root = NULL;
+	struct pinctrl_setting *setting;
+	struct i2c_adapter *pin_root;
+
+	list_for_each_entry(setting, &state->settings, node) {
+		pin_root = i2c_root_adapter(setting->pctldev->dev);
+		if (!pin_root)
+			return NULL;
+		if (!root)
+			root = pin_root;
+		else if (root != pin_root)
+			return NULL;
+	}
+
+	return root;
+}
+
 static int i2c_mux_pinctrl_probe(struct platform_device *pdev)
 {
+	struct i2c_mux_core *muxc;
 	struct i2c_mux_pinctrl *mux;
-	int (*deselect)(struct i2c_adapter *, void *, u32);
+	struct i2c_adapter *root;
 	int i, ret;
 
 	mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
 	if (!mux) {
-		dev_err(&pdev->dev, "Cannot allocate i2c_mux_pinctrl\n");
 		ret = -ENOMEM;
 		goto err;
 	}
-	platform_set_drvdata(pdev, mux);
-
-	mux->dev = &pdev->dev;
 
 	mux->pdata = dev_get_platdata(&pdev->dev);
 	if (!mux->pdata) {
@@ -166,14 +175,15 @@
 		goto err;
 	}
 
-	mux->busses = devm_kzalloc(&pdev->dev,
-				   sizeof(*mux->busses) * mux->pdata->bus_count,
-				   GFP_KERNEL);
-	if (!mux->busses) {
-		dev_err(&pdev->dev, "Cannot allocate busses\n");
+	muxc = i2c_mux_alloc(NULL, &pdev->dev, mux->pdata->bus_count, 0, 0,
+			     i2c_mux_pinctrl_select, NULL);
+	if (!muxc) {
 		ret = -ENOMEM;
 		goto err;
 	}
+	muxc->priv = mux;
+
+	platform_set_drvdata(pdev, muxc);
 
 	mux->pinctrl = devm_pinctrl_get(&pdev->dev);
 	if (IS_ERR(mux->pinctrl)) {
@@ -184,13 +194,13 @@
 	for (i = 0; i < mux->pdata->bus_count; i++) {
 		mux->states[i] = pinctrl_lookup_state(mux->pinctrl,
 						mux->pdata->pinctrl_states[i]);
-			if (IS_ERR(mux->states[i])) {
-				ret = PTR_ERR(mux->states[i]);
-				dev_err(&pdev->dev,
-					"Cannot look up pinctrl state %s: %d\n",
-					mux->pdata->pinctrl_states[i], ret);
-				goto err;
-			}
+		if (IS_ERR(mux->states[i])) {
+			ret = PTR_ERR(mux->states[i]);
+			dev_err(&pdev->dev,
+				"Cannot look up pinctrl state %s: %d\n",
+				mux->pdata->pinctrl_states[i], ret);
+			goto err;
+		}
 	}
 	if (mux->pdata->pinctrl_state_idle) {
 		mux->state_idle = pinctrl_lookup_state(mux->pinctrl,
@@ -203,29 +213,39 @@
 			goto err;
 		}
 
-		deselect = i2c_mux_pinctrl_deselect;
-	} else {
-		deselect = NULL;
+		muxc->deselect = i2c_mux_pinctrl_deselect;
 	}
 
-	mux->parent = i2c_get_adapter(mux->pdata->parent_bus_num);
-	if (!mux->parent) {
+	muxc->parent = i2c_get_adapter(mux->pdata->parent_bus_num);
+	if (!muxc->parent) {
 		dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
 			mux->pdata->parent_bus_num);
 		ret = -EPROBE_DEFER;
 		goto err;
 	}
 
+	root = i2c_root_adapter(&muxc->parent->dev);
+
+	muxc->mux_locked = true;
+	for (i = 0; i < mux->pdata->bus_count; i++) {
+		if (root != i2c_mux_pinctrl_root_adapter(mux->states[i])) {
+			muxc->mux_locked = false;
+			break;
+		}
+	}
+	if (muxc->mux_locked && mux->pdata->pinctrl_state_idle &&
+	    root != i2c_mux_pinctrl_root_adapter(mux->state_idle))
+		muxc->mux_locked = false;
+
+	if (muxc->mux_locked)
+		dev_info(&pdev->dev, "mux-locked i2c mux\n");
+
 	for (i = 0; i < mux->pdata->bus_count; i++) {
 		u32 bus = mux->pdata->base_bus_num ?
 				(mux->pdata->base_bus_num + i) : 0;
 
-		mux->busses[i] = i2c_add_mux_adapter(mux->parent, &pdev->dev,
-						     mux, bus, i, 0,
-						     i2c_mux_pinctrl_select,
-						     deselect);
-		if (!mux->busses[i]) {
-			ret = -ENODEV;
+		ret = i2c_mux_add_adapter(muxc, bus, i, 0);
+		if (ret) {
 			dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
 			goto err_del_adapter;
 		}
@@ -234,23 +254,18 @@
 	return 0;
 
 err_del_adapter:
-	for (; i > 0; i--)
-		i2c_del_mux_adapter(mux->busses[i - 1]);
-	i2c_put_adapter(mux->parent);
+	i2c_mux_del_adapters(muxc);
+	i2c_put_adapter(muxc->parent);
 err:
 	return ret;
 }
 
 static int i2c_mux_pinctrl_remove(struct platform_device *pdev)
 {
-	struct i2c_mux_pinctrl *mux = platform_get_drvdata(pdev);
-	int i;
+	struct i2c_mux_core *muxc = platform_get_drvdata(pdev);
 
-	for (i = 0; i < mux->pdata->bus_count; i++)
-		i2c_del_mux_adapter(mux->busses[i]);
-
-	i2c_put_adapter(mux->parent);
-
+	i2c_mux_del_adapters(muxc);
+	i2c_put_adapter(muxc->parent);
 	return 0;
 }
 
diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c
index 5fbd5bd..6773cad 100644
--- a/drivers/i2c/muxes/i2c-mux-reg.c
+++ b/drivers/i2c/muxes/i2c-mux-reg.c
@@ -21,8 +21,6 @@
 #include <linux/slab.h>
 
 struct regmux {
-	struct i2c_adapter *parent;
-	struct i2c_adapter **adap; /* child busses */
 	struct i2c_mux_reg_platform_data data;
 };
 
@@ -64,18 +62,16 @@
 	return 0;
 }
 
-static int i2c_mux_reg_select(struct i2c_adapter *adap, void *data,
-			      unsigned int chan)
+static int i2c_mux_reg_select(struct i2c_mux_core *muxc, u32 chan)
 {
-	struct regmux *mux = data;
+	struct regmux *mux = i2c_mux_priv(muxc);
 
 	return i2c_mux_reg_set(mux, chan);
 }
 
-static int i2c_mux_reg_deselect(struct i2c_adapter *adap, void *data,
-				unsigned int chan)
+static int i2c_mux_reg_deselect(struct i2c_mux_core *muxc, u32 chan)
 {
-	struct regmux *mux = data;
+	struct regmux *mux = i2c_mux_priv(muxc);
 
 	if (mux->data.idle_in_use)
 		return i2c_mux_reg_set(mux, mux->data.idle);
@@ -85,7 +81,7 @@
 
 #ifdef CONFIG_OF
 static int i2c_mux_reg_probe_dt(struct regmux *mux,
-					struct platform_device *pdev)
+				struct platform_device *pdev)
 {
 	struct device_node *np = pdev->dev.of_node;
 	struct device_node *adapter_np, *child;
@@ -107,7 +103,6 @@
 	if (!adapter)
 		return -EPROBE_DEFER;
 
-	mux->parent = adapter;
 	mux->data.parent = i2c_adapter_id(adapter);
 	put_device(&adapter->dev);
 
@@ -161,7 +156,7 @@
 }
 #else
 static int i2c_mux_reg_probe_dt(struct regmux *mux,
-					struct platform_device *pdev)
+				struct platform_device *pdev)
 {
 	return 0;
 }
@@ -169,10 +164,10 @@
 
 static int i2c_mux_reg_probe(struct platform_device *pdev)
 {
+	struct i2c_mux_core *muxc;
 	struct regmux *mux;
 	struct i2c_adapter *parent;
 	struct resource *res;
-	int (*deselect)(struct i2c_adapter *, void *, u32);
 	unsigned int class;
 	int i, ret, nr;
 
@@ -180,17 +175,9 @@
 	if (!mux)
 		return -ENOMEM;
 
-	platform_set_drvdata(pdev, mux);
-
 	if (dev_get_platdata(&pdev->dev)) {
 		memcpy(&mux->data, dev_get_platdata(&pdev->dev),
 			sizeof(mux->data));
-
-		parent = i2c_get_adapter(mux->data.parent);
-		if (!parent)
-			return -EPROBE_DEFER;
-
-		mux->parent = parent;
 	} else {
 		ret = i2c_mux_reg_probe_dt(mux, pdev);
 		if (ret < 0) {
@@ -199,6 +186,10 @@
 		}
 	}
 
+	parent = i2c_get_adapter(mux->data.parent);
+	if (!parent)
+		return -EPROBE_DEFER;
+
 	if (!mux->data.reg) {
 		dev_info(&pdev->dev,
 			"Register not set, using platform resource\n");
@@ -215,55 +206,45 @@
 		return -EINVAL;
 	}
 
-	mux->adap = devm_kzalloc(&pdev->dev,
-				 sizeof(*mux->adap) * mux->data.n_values,
-				 GFP_KERNEL);
-	if (!mux->adap) {
-		dev_err(&pdev->dev, "Cannot allocate i2c_adapter structure");
+	muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values, 0, 0,
+			     i2c_mux_reg_select, NULL);
+	if (!muxc)
 		return -ENOMEM;
-	}
+	muxc->priv = mux;
+
+	platform_set_drvdata(pdev, muxc);
 
 	if (mux->data.idle_in_use)
-		deselect = i2c_mux_reg_deselect;
-	else
-		deselect = NULL;
+		muxc->deselect = i2c_mux_reg_deselect;
 
 	for (i = 0; i < mux->data.n_values; i++) {
 		nr = mux->data.base_nr ? (mux->data.base_nr + i) : 0;
 		class = mux->data.classes ? mux->data.classes[i] : 0;
 
-		mux->adap[i] = i2c_add_mux_adapter(mux->parent, &pdev->dev, mux,
-						   nr, mux->data.values[i],
-						   class, i2c_mux_reg_select,
-						   deselect);
-		if (!mux->adap[i]) {
-			ret = -ENODEV;
+		ret = i2c_mux_add_adapter(muxc, nr, mux->data.values[i], class);
+		if (ret) {
 			dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
 			goto add_adapter_failed;
 		}
 	}
 
 	dev_dbg(&pdev->dev, "%d port mux on %s adapter\n",
-		 mux->data.n_values, mux->parent->name);
+		 mux->data.n_values, muxc->parent->name);
 
 	return 0;
 
 add_adapter_failed:
-	for (; i > 0; i--)
-		i2c_del_mux_adapter(mux->adap[i - 1]);
+	i2c_mux_del_adapters(muxc);
 
 	return ret;
 }
 
 static int i2c_mux_reg_remove(struct platform_device *pdev)
 {
-	struct regmux *mux = platform_get_drvdata(pdev);
-	int i;
+	struct i2c_mux_core *muxc = platform_get_drvdata(pdev);
 
-	for (i = 0; i < mux->data.n_values; i++)
-		i2c_del_mux_adapter(mux->adap[i]);
-
-	i2c_put_adapter(mux->parent);
+	i2c_mux_del_adapters(muxc);
+	i2c_put_adapter(muxc->parent);
 
 	return 0;
 }
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
index 9f0a48e..80e933b 100644
--- a/drivers/ide/icside.c
+++ b/drivers/ide/icside.c
@@ -451,7 +451,7 @@
 	return ret;
 }
 
-static const struct ide_port_info icside_v6_port_info __initconst = {
+static const struct ide_port_info icside_v6_port_info = {
 	.init_dma		= icside_dma_off_init,
 	.port_ops		= &icside_v6_no_dma_port_ops,
 	.host_flags		= IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 37a8a90..05dbcce 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -522,7 +522,7 @@
 static void update_flush(ide_drive_t *drive)
 {
 	u16 *id = drive->id;
-	unsigned flush = 0;
+	bool wc = false;
 
 	if (drive->dev_flags & IDE_DFLAG_WCACHE) {
 		unsigned long long capacity;
@@ -546,12 +546,12 @@
 		       drive->name, barrier ? "" : "not ");
 
 		if (barrier) {
-			flush = REQ_FLUSH;
+			wc = true;
 			blk_queue_prep_rq(drive->queue, idedisk_prep_fn);
 		}
 	}
 
-	blk_queue_flush(drive->queue, flush);
+	blk_queue_write_cache(drive->queue, wc, false);
 }
 
 ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE);
diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c
index 8012e43..46427ea 100644
--- a/drivers/ide/palm_bk3710.c
+++ b/drivers/ide/palm_bk3710.c
@@ -325,6 +325,8 @@
 
 	clk_enable(clk);
 	rate = clk_get_rate(clk);
+	if (!rate)
+		return -EINVAL;
 
 	/* NOTE:  round *down* to meet minimum timings; we count in clocks */
 	ideclk_period = 1000000000UL / rate;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index ba947df..c966492 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -660,6 +660,35 @@
 		.enter = NULL }
 };
 
+static struct cpuidle_state skx_cstates[] = {
+	{
+		.name = "C1-SKX",
+		.desc = "MWAIT 0x00",
+		.flags = MWAIT2flg(0x00),
+		.exit_latency = 2,
+		.target_residency = 2,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.name = "C1E-SKX",
+		.desc = "MWAIT 0x01",
+		.flags = MWAIT2flg(0x01),
+		.exit_latency = 10,
+		.target_residency = 20,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.name = "C6-SKX",
+		.desc = "MWAIT 0x20",
+		.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 133,
+		.target_residency = 600,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.enter = NULL }
+};
+
 static struct cpuidle_state atom_cstates[] = {
 	{
 		.name = "C1E-ATM",
@@ -737,6 +766,67 @@
 		.enter = NULL }
 };
 
+static struct cpuidle_state bxt_cstates[] = {
+	{
+		.name = "C1-BXT",
+		.desc = "MWAIT 0x00",
+		.flags = MWAIT2flg(0x00),
+		.exit_latency = 2,
+		.target_residency = 2,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.name = "C1E-BXT",
+		.desc = "MWAIT 0x01",
+		.flags = MWAIT2flg(0x01),
+		.exit_latency = 10,
+		.target_residency = 20,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.name = "C6-BXT",
+		.desc = "MWAIT 0x20",
+		.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 133,
+		.target_residency = 133,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.name = "C7s-BXT",
+		.desc = "MWAIT 0x31",
+		.flags = MWAIT2flg(0x31) | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 155,
+		.target_residency = 155,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.name = "C8-BXT",
+		.desc = "MWAIT 0x40",
+		.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 1000,
+		.target_residency = 1000,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.name = "C9-BXT",
+		.desc = "MWAIT 0x50",
+		.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 2000,
+		.target_residency = 2000,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.name = "C10-BXT",
+		.desc = "MWAIT 0x60",
+		.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 10000,
+		.target_residency = 10000,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.enter = NULL }
+};
+
 /**
  * intel_idle
  * @dev: cpuidle_device
@@ -818,8 +908,11 @@
 		 * driver in this case
 		 */
 		dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu);
-		if (!dev->registered)
-			intel_idle_cpu_init(hotcpu);
+		if (dev->registered)
+			break;
+
+		if (intel_idle_cpu_init(hotcpu))
+			return NOTIFY_BAD;
 
 		break;
 	}
@@ -904,6 +997,10 @@
 	.disable_promotion_to_c1e = true,
 };
 
+static const struct idle_cpu idle_cpu_skx = {
+	.state_table = skx_cstates,
+	.disable_promotion_to_c1e = true,
+};
 
 static const struct idle_cpu idle_cpu_avn = {
 	.state_table = avn_cstates,
@@ -914,6 +1011,11 @@
 	.state_table = knl_cstates,
 };
 
+static const struct idle_cpu idle_cpu_bxt = {
+	.state_table = bxt_cstates,
+	.disable_promotion_to_c1e = true,
+};
+
 #define ICPU(model, cpu) \
 	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
 
@@ -945,7 +1047,11 @@
 	ICPU(0x56, idle_cpu_bdw),
 	ICPU(0x4e, idle_cpu_skl),
 	ICPU(0x5e, idle_cpu_skl),
+	ICPU(0x8e, idle_cpu_skl),
+	ICPU(0x9e, idle_cpu_skl),
+	ICPU(0x55, idle_cpu_skx),
 	ICPU(0x57, idle_cpu_knl),
+	ICPU(0x5c, idle_cpu_bxt),
 	{}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
@@ -987,22 +1093,15 @@
 	icpu = (const struct idle_cpu *)id->driver_data;
 	cpuidle_state_table = icpu->state_table;
 
-	if (boot_cpu_has(X86_FEATURE_ARAT))	/* Always Reliable APIC Timer */
-		lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
-	else
-		on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
-
 	pr_debug(PREFIX "v" INTEL_IDLE_VERSION
 		" model 0x%X\n", boot_cpu_data.x86_model);
 
-	pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
-		lapic_timer_reliable_states);
 	return 0;
 }
 
 /*
  * intel_idle_cpuidle_devices_uninit()
- * unregister, free cpuidle_devices
+ * Unregisters the cpuidle devices.
  */
 static void intel_idle_cpuidle_devices_uninit(void)
 {
@@ -1013,9 +1112,6 @@
 		dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
 		cpuidle_unregister_device(dev);
 	}
-
-	free_percpu(intel_idle_cpuidle_devices);
-	return;
 }
 
 /*
@@ -1046,6 +1142,73 @@
 
 	/* else, 1 and 2 socket systems use default ivt_cstates */
 }
+
+/*
+ * Translate IRTL (Interrupt Response Time Limit) MSR to usec
+ */
+
+static unsigned int irtl_ns_units[] = {
+	1, 32, 1024, 32768, 1048576, 33554432, 0, 0 };
+
+static unsigned long long irtl_2_usec(unsigned long long irtl)
+{
+	unsigned long long ns;
+
+	ns = irtl_ns_units[(irtl >> 10) & 0x3];
+
+	return div64_u64((irtl & 0x3FF) * ns, 1000);
+}
+/*
+ * bxt_idle_state_table_update(void)
+ *
+ * On BXT, we trust the IRTL to show the definitive maximum latency
+ * We use the same value for target_residency.
+ */
+static void bxt_idle_state_table_update(void)
+{
+	unsigned long long msr;
+
+	rdmsrl(MSR_PKGC6_IRTL, msr);
+	if (msr) {
+		unsigned int usec = irtl_2_usec(msr);
+
+		bxt_cstates[2].exit_latency = usec;
+		bxt_cstates[2].target_residency = usec;
+	}
+
+	rdmsrl(MSR_PKGC7_IRTL, msr);
+	if (msr) {
+		unsigned int usec = irtl_2_usec(msr);
+
+		bxt_cstates[3].exit_latency = usec;
+		bxt_cstates[3].target_residency = usec;
+	}
+
+	rdmsrl(MSR_PKGC8_IRTL, msr);
+	if (msr) {
+		unsigned int usec = irtl_2_usec(msr);
+
+		bxt_cstates[4].exit_latency = usec;
+		bxt_cstates[4].target_residency = usec;
+	}
+
+	rdmsrl(MSR_PKGC9_IRTL, msr);
+	if (msr) {
+		unsigned int usec = irtl_2_usec(msr);
+
+		bxt_cstates[5].exit_latency = usec;
+		bxt_cstates[5].target_residency = usec;
+	}
+
+	rdmsrl(MSR_PKGC10_IRTL, msr);
+	if (msr) {
+		unsigned int usec = irtl_2_usec(msr);
+
+		bxt_cstates[6].exit_latency = usec;
+		bxt_cstates[6].target_residency = usec;
+	}
+
+}
 /*
  * sklh_idle_state_table_update(void)
  *
@@ -1101,6 +1264,9 @@
 	case 0x3e: /* IVT */
 		ivt_idle_state_table_update();
 		break;
+	case 0x5c: /* BXT */
+		bxt_idle_state_table_update();
+		break;
 	case 0x5e: /* SKL-H */
 		sklh_idle_state_table_update();
 		break;
@@ -1111,7 +1277,7 @@
  * intel_idle_cpuidle_driver_init()
  * allocate, initialize cpuidle_states
  */
-static int __init intel_idle_cpuidle_driver_init(void)
+static void __init intel_idle_cpuidle_driver_init(void)
 {
 	int cstate;
 	struct cpuidle_driver *drv = &intel_idle_driver;
@@ -1163,18 +1329,10 @@
 		drv->state_count += 1;
 	}
 
-	if (icpu->auto_demotion_disable_flags)
-		on_each_cpu(auto_demotion_disable, NULL, 1);
-
 	if (icpu->byt_auto_demotion_disable_flag) {
 		wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0);
 		wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0);
 	}
-
-	if (icpu->disable_promotion_to_c1e)	/* each-cpu is redundant */
-		on_each_cpu(c1e_promotion_disable, NULL, 1);
-
-	return 0;
 }
 
 
@@ -1193,7 +1351,6 @@
 
 	if (cpuidle_register_device(dev)) {
 		pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu);
-		intel_idle_cpuidle_devices_uninit();
 		return -EIO;
 	}
 
@@ -1218,40 +1375,51 @@
 	if (retval)
 		return retval;
 
+	intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
+	if (intel_idle_cpuidle_devices == NULL)
+		return -ENOMEM;
+
 	intel_idle_cpuidle_driver_init();
 	retval = cpuidle_register_driver(&intel_idle_driver);
 	if (retval) {
 		struct cpuidle_driver *drv = cpuidle_get_driver();
 		printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
 			drv ? drv->name : "none");
+		free_percpu(intel_idle_cpuidle_devices);
 		return retval;
 	}
 
-	intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
-	if (intel_idle_cpuidle_devices == NULL)
-		return -ENOMEM;
-
 	cpu_notifier_register_begin();
 
 	for_each_online_cpu(i) {
 		retval = intel_idle_cpu_init(i);
 		if (retval) {
+			intel_idle_cpuidle_devices_uninit();
 			cpu_notifier_register_done();
 			cpuidle_unregister_driver(&intel_idle_driver);
+			free_percpu(intel_idle_cpuidle_devices);
 			return retval;
 		}
 	}
 	__register_cpu_notifier(&cpu_hotplug_notifier);
 
+	if (boot_cpu_has(X86_FEATURE_ARAT))	/* Always Reliable APIC Timer */
+		lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
+	else
+		on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
+
 	cpu_notifier_register_done();
 
+	pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
+		lapic_timer_reliable_states);
+
 	return 0;
 }
 
 static void __exit intel_idle_exit(void)
 {
-	intel_idle_cpuidle_devices_uninit();
-	cpuidle_unregister_driver(&intel_idle_driver);
+	struct cpuidle_device *dev;
+	int i;
 
 	cpu_notifier_register_begin();
 
@@ -1259,9 +1427,15 @@
 		on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
 	__unregister_cpu_notifier(&cpu_hotplug_notifier);
 
+	for_each_possible_cpu(i) {
+		dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
+		cpuidle_unregister_device(dev);
+	}
+
 	cpu_notifier_register_done();
 
-	return;
+	cpuidle_unregister_driver(&intel_idle_driver);
+	free_percpu(intel_idle_cpuidle_devices);
 }
 
 module_init(intel_idle_init);
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index c73331f7..2072a31 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -547,7 +547,7 @@
 {
 	int ret;
 	int axis = chan->scan_index;
-	unsigned int raw_val;
+	__le16 raw_val;
 
 	mutex_lock(&data->mutex);
 	ret = bmc150_accel_set_power_state(data, true);
@@ -557,14 +557,14 @@
 	}
 
 	ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis),
-			       &raw_val, 2);
+			       &raw_val, sizeof(raw_val));
 	if (ret < 0) {
 		dev_err(data->dev, "Error reading axis %d\n", axis);
 		bmc150_accel_set_power_state(data, false);
 		mutex_unlock(&data->mutex);
 		return ret;
 	}
-	*val = sign_extend32(raw_val >> chan->scan_type.shift,
+	*val = sign_extend32(le16_to_cpu(raw_val) >> chan->scan_type.shift,
 			     chan->scan_type.realbits - 1);
 	ret = bmc150_accel_set_power_state(data, false);
 	mutex_unlock(&data->mutex);
@@ -988,6 +988,7 @@
 		.realbits = (bits),					\
 		.storagebits = 16,					\
 		.shift = 16 - (bits),					\
+		.endianness = IIO_LE,					\
 	},								\
 	.event_spec = &bmc150_accel_event,				\
 	.num_event_specs = 1						\
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index af4aea7..82c718c 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -134,6 +134,7 @@
 config AT91_SAMA5D2_ADC
 	tristate "Atmel AT91 SAMA5D2 ADC"
 	depends on ARCH_AT91 || COMPILE_TEST
+	depends on HAS_IOMEM
 	help
 	  Say yes here to build support for Atmel SAMA5D2 ADC which is
 	  available on SAMA5D2 SoC family.
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index dbee13a..2e154cb 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -451,6 +451,8 @@
 	if (ret)
 		goto vref_disable;
 
+	platform_set_drvdata(pdev, indio_dev);
+
 	ret = iio_device_register(indio_dev);
 	if (ret < 0)
 		goto per_clk_disable_unprepare;
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 929508e..998dc3c 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1386,7 +1386,7 @@
 	},
 	[max11644] = {
 		.bits = 12,
-		.int_vref_mv = 2048,
+		.int_vref_mv = 4096,
 		.mode_list = max11644_mode_list,
 		.num_modes = ARRAY_SIZE(max11644_mode_list),
 		.default_mode = s0to1,
@@ -1396,7 +1396,7 @@
 	},
 	[max11645] = {
 		.bits = 12,
-		.int_vref_mv = 4096,
+		.int_vref_mv = 2048,
 		.mode_list = max11644_mode_list,
 		.num_modes = ARRAY_SIZE(max11644_mode_list),
 		.default_mode = s0to1,
@@ -1406,7 +1406,7 @@
 	},
 	[max11646] = {
 		.bits = 10,
-		.int_vref_mv = 2048,
+		.int_vref_mv = 4096,
 		.mode_list = max11644_mode_list,
 		.num_modes = ARRAY_SIZE(max11644_mode_list),
 		.default_mode = s0to1,
@@ -1416,7 +1416,7 @@
 	},
 	[max11647] = {
 		.bits = 10,
-		.int_vref_mv = 4096,
+		.int_vref_mv = 2048,
 		.mode_list = max11644_mode_list,
 		.num_modes = ARRAY_SIZE(max11644_mode_list),
 		.default_mode = s0to1,
@@ -1680,6 +1680,10 @@
 	{ "max11615", max11615 },
 	{ "max11616", max11616 },
 	{ "max11617", max11617 },
+	{ "max11644", max11644 },
+	{ "max11645", max11645 },
+	{ "max11646", max11646 },
+	{ "max11647", max11647 },
 	{}
 };
 
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index bbce3b0..4dac567 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -452,7 +452,7 @@
 static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
 {
 	int ret;
-	unsigned int raw_val;
+	__le16 raw_val;
 
 	mutex_lock(&data->mutex);
 	ret = bmg160_set_power_state(data, true);
@@ -462,7 +462,7 @@
 	}
 
 	ret = regmap_bulk_read(data->regmap, BMG160_AXIS_TO_REG(axis), &raw_val,
-			       2);
+			       sizeof(raw_val));
 	if (ret < 0) {
 		dev_err(data->dev, "Error reading axis %d\n", axis);
 		bmg160_set_power_state(data, false);
@@ -470,7 +470,7 @@
 		return ret;
 	}
 
-	*val = sign_extend32(raw_val, 15);
+	*val = sign_extend32(le16_to_cpu(raw_val), 15);
 	ret = bmg160_set_power_state(data, false);
 	mutex_unlock(&data->mutex);
 	if (ret < 0)
@@ -733,6 +733,7 @@
 		.sign = 's',						\
 		.realbits = 16,					\
 		.storagebits = 16,					\
+		.endianness = IIO_LE,					\
 	},								\
 	.event_spec = &bmg160_event,					\
 	.num_event_specs = 1						\
@@ -780,7 +781,7 @@
 			mutex_unlock(&data->mutex);
 			goto err;
 		}
-		data->buffer[i++] = ret;
+		data->buffer[i++] = val;
 	}
 	mutex_unlock(&data->mutex);
 
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index 09db893..90ab8a2d 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -238,12 +238,13 @@
 
 	mutex_lock(&data->lock);
 
-	while (cnt-- || (cnt = max30100_fifo_count(data) > 0)) {
+	while (cnt || (cnt = max30100_fifo_count(data) > 0)) {
 		ret = max30100_read_measurement(data);
 		if (ret)
 			break;
 
 		iio_push_to_buffers(data->indio_dev, data->buffer);
+		cnt--;
 	}
 
 	mutex_unlock(&data->lock);
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index a7f557a..847455a 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -9,9 +9,8 @@
 
 config INV_MPU6050_I2C
 	tristate "Invensense MPU6050 devices (I2C)"
-	depends on I2C
+	depends on I2C_MUX
 	select INV_MPU6050_IIO
-	select I2C_MUX
 	select REGMAP_I2C
 	help
 	  This driver supports the Invensense MPU6050 devices.
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
index 2771106..f62b8bd 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
@@ -183,7 +183,7 @@
 			} else
 				return 0; /* no secondary addr, which is OK */
 		}
-		st->mux_client = i2c_new_device(st->mux_adapter, &info);
+		st->mux_client = i2c_new_device(st->muxc->adapter[0], &info);
 		if (!st->mux_client)
 			return -ENODEV;
 	}
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index d192953..0c2bded 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -23,7 +23,6 @@
 #include <linux/kfifo.h>
 #include <linux/spinlock.h>
 #include <linux/iio/iio.h>
-#include <linux/i2c-mux.h>
 #include <linux/acpi.h>
 #include "inv_mpu_iio.h"
 
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index f581256..9ba1179 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -15,7 +15,6 @@
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/i2c.h>
-#include <linux/i2c-mux.h>
 #include <linux/iio/iio.h>
 #include <linux/module.h>
 #include "inv_mpu_iio.h"
@@ -25,46 +24,16 @@
 	.val_bits = 8,
 };
 
-/*
- * The i2c read/write needs to happen in unlocked mode. As the parent
- * adapter is common. If we use locked versions, it will fail as
- * the mux adapter will lock the parent i2c adapter, while calling
- * select/deselect functions.
- */
-static int inv_mpu6050_write_reg_unlocked(struct i2c_client *client,
-					  u8 reg, u8 d)
+static int inv_mpu6050_select_bypass(struct i2c_mux_core *muxc, u32 chan_id)
 {
-	int ret;
-	u8 buf[2] = {reg, d};
-	struct i2c_msg msg[1] = {
-		{
-			.addr = client->addr,
-			.flags = 0,
-			.len = sizeof(buf),
-			.buf = buf,
-		}
-	};
-
-	ret = __i2c_transfer(client->adapter, msg, 1);
-	if (ret != 1)
-		return ret;
-
-	return 0;
-}
-
-static int inv_mpu6050_select_bypass(struct i2c_adapter *adap, void *mux_priv,
-				     u32 chan_id)
-{
-	struct i2c_client *client = mux_priv;
-	struct iio_dev *indio_dev = dev_get_drvdata(&client->dev);
+	struct iio_dev *indio_dev = i2c_mux_priv(muxc);
 	struct inv_mpu6050_state *st = iio_priv(indio_dev);
 	int ret = 0;
 
 	/* Use the same mutex which was used everywhere to protect power-op */
 	mutex_lock(&indio_dev->mlock);
 	if (!st->powerup_count) {
-		ret = inv_mpu6050_write_reg_unlocked(client,
-						     st->reg->pwr_mgmt_1, 0);
+		ret = regmap_write(st->map, st->reg->pwr_mgmt_1, 0);
 		if (ret)
 			goto write_error;
 
@@ -73,10 +42,9 @@
 	}
 	if (!ret) {
 		st->powerup_count++;
-		ret = inv_mpu6050_write_reg_unlocked(client,
-						     st->reg->int_pin_cfg,
-						     INV_MPU6050_INT_PIN_CFG |
-						     INV_MPU6050_BIT_BYPASS_EN);
+		ret = regmap_write(st->map, st->reg->int_pin_cfg,
+				   INV_MPU6050_INT_PIN_CFG |
+				   INV_MPU6050_BIT_BYPASS_EN);
 	}
 write_error:
 	mutex_unlock(&indio_dev->mlock);
@@ -84,26 +52,36 @@
 	return ret;
 }
 
-static int inv_mpu6050_deselect_bypass(struct i2c_adapter *adap,
-				       void *mux_priv, u32 chan_id)
+static int inv_mpu6050_deselect_bypass(struct i2c_mux_core *muxc, u32 chan_id)
 {
-	struct i2c_client *client = mux_priv;
-	struct iio_dev *indio_dev = dev_get_drvdata(&client->dev);
+	struct iio_dev *indio_dev = i2c_mux_priv(muxc);
 	struct inv_mpu6050_state *st = iio_priv(indio_dev);
 
 	mutex_lock(&indio_dev->mlock);
 	/* It doesn't really mattter, if any of the calls fails */
-	inv_mpu6050_write_reg_unlocked(client, st->reg->int_pin_cfg,
-				       INV_MPU6050_INT_PIN_CFG);
+	regmap_write(st->map, st->reg->int_pin_cfg, INV_MPU6050_INT_PIN_CFG);
 	st->powerup_count--;
 	if (!st->powerup_count)
-		inv_mpu6050_write_reg_unlocked(client, st->reg->pwr_mgmt_1,
-					       INV_MPU6050_BIT_SLEEP);
+		regmap_write(st->map, st->reg->pwr_mgmt_1,
+			     INV_MPU6050_BIT_SLEEP);
 	mutex_unlock(&indio_dev->mlock);
 
 	return 0;
 }
 
+static const char *inv_mpu_match_acpi_device(struct device *dev, int *chip_id)
+{
+	const struct acpi_device_id *id;
+
+	id = acpi_match_device(dev->driver->acpi_match_table, dev);
+	if (!id)
+		return NULL;
+
+	*chip_id = (int)id->driver_data;
+
+	return dev_name(dev);
+}
+
 /**
  *  inv_mpu_probe() - probe function.
  *  @client:          i2c client.
@@ -115,14 +93,25 @@
 			 const struct i2c_device_id *id)
 {
 	struct inv_mpu6050_state *st;
-	int result;
-	const char *name = id ? id->name : NULL;
+	int result, chip_type;
 	struct regmap *regmap;
+	const char *name;
 
 	if (!i2c_check_functionality(client->adapter,
 				     I2C_FUNC_SMBUS_I2C_BLOCK))
 		return -EOPNOTSUPP;
 
+	if (id) {
+		chip_type = (int)id->driver_data;
+		name = id->name;
+	} else if (ACPI_HANDLE(&client->dev)) {
+		name = inv_mpu_match_acpi_device(&client->dev, &chip_type);
+		if (!name)
+			return -ENODEV;
+	} else {
+		return -ENOSYS;
+	}
+
 	regmap = devm_regmap_init_i2c(client, &inv_mpu_regmap_config);
 	if (IS_ERR(regmap)) {
 		dev_err(&client->dev, "Failed to register i2c regmap %d\n",
@@ -131,21 +120,23 @@
 	}
 
 	result = inv_mpu_core_probe(regmap, client->irq, name,
-				    NULL, id->driver_data);
+				    NULL, chip_type);
 	if (result < 0)
 		return result;
 
 	st = iio_priv(dev_get_drvdata(&client->dev));
-	st->mux_adapter = i2c_add_mux_adapter(client->adapter,
-					      &client->dev,
-					      client,
-					      0, 0, 0,
-					      inv_mpu6050_select_bypass,
-					      inv_mpu6050_deselect_bypass);
-	if (!st->mux_adapter) {
-		result = -ENODEV;
+	st->muxc = i2c_mux_alloc(client->adapter, &client->dev,
+				 1, 0, I2C_MUX_LOCKED,
+				 inv_mpu6050_select_bypass,
+				 inv_mpu6050_deselect_bypass);
+	if (!st->muxc) {
+		result = -ENOMEM;
 		goto out_unreg_device;
 	}
+	st->muxc->priv = dev_get_drvdata(&client->dev);
+	result = i2c_mux_add_adapter(st->muxc, 0, 0, 0);
+	if (result)
+		goto out_unreg_device;
 
 	result = inv_mpu_acpi_create_mux_client(client);
 	if (result)
@@ -154,7 +145,7 @@
 	return 0;
 
 out_del_mux:
-	i2c_del_mux_adapter(st->mux_adapter);
+	i2c_mux_del_adapters(st->muxc);
 out_unreg_device:
 	inv_mpu_core_remove(&client->dev);
 	return result;
@@ -166,7 +157,7 @@
 	struct inv_mpu6050_state *st = iio_priv(indio_dev);
 
 	inv_mpu_acpi_delete_mux_client(client);
-	i2c_del_mux_adapter(st->mux_adapter);
+	i2c_mux_del_adapters(st->muxc);
 
 	return inv_mpu_core_remove(&client->dev);
 }
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index e302a49..bb3cef6 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -11,6 +11,7 @@
 * GNU General Public License for more details.
 */
 #include <linux/i2c.h>
+#include <linux/i2c-mux.h>
 #include <linux/kfifo.h>
 #include <linux/spinlock.h>
 #include <linux/iio/iio.h>
@@ -127,7 +128,7 @@
 	const struct inv_mpu6050_hw *hw;
 	enum   inv_devices chip_type;
 	spinlock_t time_stamp_lock;
-	struct i2c_adapter *mux_adapter;
+	struct i2c_mux_core *muxc;
 	struct i2c_client *mux_client;
 	unsigned int powerup_count;
 	struct inv_mpu6050_platform_data plat_data;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index dea6c43..7bcb8d8 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -46,6 +46,7 @@
 	struct regmap *regmap;
 	const struct spi_device_id *id = spi_get_device_id(spi);
 	const char *name = id ? id->name : NULL;
+	const int chip_type = id ? id->driver_data : 0;
 
 	regmap = devm_regmap_init_spi(spi, &inv_mpu_regmap_config);
 	if (IS_ERR(regmap)) {
@@ -55,7 +56,7 @@
 	}
 
 	return inv_mpu_core_probe(regmap, spi->irq, name,
-				  inv_mpu_i2c_disable, id->driver_data);
+				  inv_mpu_i2c_disable, chip_type);
 }
 
 static int inv_mpu_remove(struct spi_device *spi)
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index b976332..90462fc 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -653,6 +653,7 @@
 	unsigned int modes;
 
 	memset(config, 0, sizeof(*config));
+	config->watermark = ~0;
 
 	/*
 	 * If there is just one buffer and we are removing it there is nothing
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index f6a07dc..a6af56a 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -769,7 +769,7 @@
 	mutex_lock(&data->lock);
 	data->gesture_mode_running = 1;
 
-	while (cnt-- || (cnt = apds9660_fifo_is_empty(data) > 0)) {
+	while (cnt || (cnt = apds9660_fifo_is_empty(data) > 0)) {
 		ret = regmap_bulk_read(data->regmap, APDS9960_REG_GFIFO_BASE,
 				      &data->buffer, 4);
 
@@ -777,6 +777,7 @@
 			goto err_read;
 
 		iio_push_to_buffers(data->indio_dev, data->buffer);
+		cnt--;
 	}
 
 err_read:
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 9c5c9ef..0e931a9 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -462,6 +462,8 @@
 	int rc;
 	int irq;
 
+	init_waitqueue_head(&data->data_ready_queue);
+	clear_bit(0, &data->flags);
 	if (client->irq)
 		irq = client->irq;
 	else
@@ -477,8 +479,6 @@
 		return rc;
 	}
 
-	init_waitqueue_head(&data->data_ready_queue);
-	clear_bit(0, &data->flags);
 	data->eoc_irq = irq;
 
 	return rc;
@@ -732,7 +732,7 @@
 	int eoc_gpio;
 	int err;
 	const char *name = NULL;
-	enum asahi_compass_chipset chipset;
+	enum asahi_compass_chipset chipset = AK_MAX_TYPE;
 
 	/* Grab and set up the supplied GPIO. */
 	if (client->dev.platform_data)
diff --git a/drivers/iio/magnetometer/st_magn.h b/drivers/iio/magnetometer/st_magn.h
index 06a4d9c..9daca46 100644
--- a/drivers/iio/magnetometer/st_magn.h
+++ b/drivers/iio/magnetometer/st_magn.h
@@ -44,6 +44,7 @@
 static inline void st_magn_deallocate_ring(struct iio_dev *indio_dev)
 {
 }
+#define ST_MAGN_TRIGGER_SET_STATE NULL
 #endif /* CONFIG_IIO_BUFFER */
 
 #endif /* ST_MAGN_H */
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index cb00d59..c2e257d 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -691,7 +691,8 @@
 			      NULL);
 
 		/* Coudn't find default GID location */
-		WARN_ON(ix < 0);
+		if (WARN_ON(ix < 0))
+			goto release;
 
 		zattr_type.gid_type = gid_type;
 
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 4a9aa04..7713ef0 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -48,6 +48,7 @@
 
 #include <asm/uaccess.h>
 
+#include <rdma/ib.h>
 #include <rdma/ib_cm.h>
 #include <rdma/ib_user_cm.h>
 #include <rdma/ib_marshall.h>
@@ -1103,6 +1104,9 @@
 	struct ib_ucm_cmd_hdr hdr;
 	ssize_t result;
 
+	if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+		return -EACCES;
+
 	if (len < sizeof(hdr))
 		return -EINVAL;
 
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index dd3bcce..c0f3826 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1574,6 +1574,9 @@
 	struct rdma_ucm_cmd_hdr hdr;
 	ssize_t ret;
 
+	if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+		return -EACCES;
+
 	if (len < sizeof(hdr))
 		return -EINVAL;
 
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 28ba2cc..31f422a 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -48,6 +48,8 @@
 
 #include <asm/uaccess.h>
 
+#include <rdma/ib.h>
+
 #include "uverbs.h"
 
 MODULE_AUTHOR("Roland Dreier");
@@ -709,6 +711,9 @@
 	int srcu_key;
 	ssize_t ret;
 
+	if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+		return -EACCES;
+
 	if (count < sizeof hdr)
 		return -EINVAL;
 
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 15b8adb..b65b354 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1860,6 +1860,7 @@
 void ib_drain_qp(struct ib_qp *qp)
 {
 	ib_drain_sq(qp);
-	ib_drain_rq(qp);
+	if (!qp->srq)
+		ib_drain_rq(qp);
 }
 EXPORT_SYMBOL(ib_drain_qp);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index d403231..3e8431b 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -367,7 +367,7 @@
  */
 static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
 {
-	printk(KERN_ERR MOD "ARP failure duing connect\n");
+	printk(KERN_ERR MOD "ARP failure during connect\n");
 	kfree_skb(skb);
 }
 
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 42a7b89..3234a8b 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1390,6 +1390,8 @@
 	dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
 	dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
 	dev->ibdev.iwcm->get_qp = iwch_get_qp;
+	memcpy(dev->ibdev.iwcm->ifname, dev->rdev.t3cdev_p->lldev->name,
+	       sizeof(dev->ibdev.iwcm->ifname));
 
 	ret = ib_register_device(&dev->ibdev, NULL);
 	if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index b4eeb78..b0b9557 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -162,7 +162,7 @@
 	cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
 				      &cq->bar2_qid,
 				      user ? &cq->bar2_pa : NULL);
-	if (user && !cq->bar2_va) {
+	if (user && !cq->bar2_pa) {
 		pr_warn(MOD "%s: cqid %u not in BAR2 range.\n",
 			pci_name(rdev->lldi.pdev), cq->cqid);
 		ret = -EINVAL;
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 124682d..7574f394 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -580,6 +580,8 @@
 	dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
 	dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
 	dev->ibdev.iwcm->get_qp = c4iw_get_qp;
+	memcpy(dev->ibdev.iwcm->ifname, dev->rdev.lldi.ports[0]->name,
+	       sizeof(dev->ibdev.iwcm->ifname));
 
 	ret = ib_register_device(&dev->ibdev, NULL);
 	if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index e17fb5d5..e8993e4 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -185,6 +185,10 @@
 
 	if (pbar2_pa)
 		*pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
+
+	if (is_t4(rdev->lldi.adapter_type))
+		return NULL;
+
 	return rdev->bar2_kva + bar2_qoffset;
 }
 
@@ -270,7 +274,7 @@
 	/*
 	 * User mode must have bar2 access.
 	 */
-	if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) {
+	if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
 		pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n",
 			pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
 		goto free_dma;
@@ -1895,13 +1899,27 @@
 void c4iw_drain_sq(struct ib_qp *ibqp)
 {
 	struct c4iw_qp *qp = to_c4iw_qp(ibqp);
+	unsigned long flag;
+	bool need_to_wait;
 
-	wait_for_completion(&qp->sq_drained);
+	spin_lock_irqsave(&qp->lock, flag);
+	need_to_wait = !t4_sq_empty(&qp->wq);
+	spin_unlock_irqrestore(&qp->lock, flag);
+
+	if (need_to_wait)
+		wait_for_completion(&qp->sq_drained);
 }
 
 void c4iw_drain_rq(struct ib_qp *ibqp)
 {
 	struct c4iw_qp *qp = to_c4iw_qp(ibqp);
+	unsigned long flag;
+	bool need_to_wait;
 
-	wait_for_completion(&qp->rq_drained);
+	spin_lock_irqsave(&qp->lock, flag);
+	need_to_wait = !t4_rq_empty(&qp->wq);
+	spin_unlock_irqrestore(&qp->lock, flag);
+
+	if (need_to_wait)
+		wait_for_completion(&qp->rq_drained);
 }
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 92745d7..38f917a 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -1992,7 +1992,6 @@
 /**
  * i40iw_get_dst_ipv6
  */
-#if IS_ENABLED(CONFIG_IPV6)
 static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
 					    struct sockaddr_in6 *dst_addr)
 {
@@ -2008,7 +2007,6 @@
 	dst = ip6_route_output(&init_net, NULL, &fl6);
 	return dst;
 }
-#endif
 
 /**
  * i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
@@ -2016,7 +2014,6 @@
  * @dst_ip: remote ip address
  * @arpindex: if there is an arp entry
  */
-#if IS_ENABLED(CONFIG_IPV6)
 static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
 					 u32 *src,
 					 u32 *dest,
@@ -2089,7 +2086,6 @@
 	dst_release(dst);
 	return rc;
 }
-#endif
 
 /**
  * i40iw_ipv4_is_loopback - check if loopback
@@ -2190,13 +2186,13 @@
 							    cm_info->loc_addr[0],
 							    cm_info->rem_addr[0],
 							    oldarpindex);
-#if IS_ENABLED(CONFIG_IPV6)
-		else
+		else if (IS_ENABLED(CONFIG_IPV6))
 			arpindex = i40iw_addr_resolve_neigh_ipv6(iwdev,
 								 cm_info->loc_addr,
 								 cm_info->rem_addr,
 								 oldarpindex);
-#endif
+		else
+			arpindex = -EINVAL;
 	}
 	if (arpindex < 0) {
 		i40iw_pr_err("cm_node arpindex\n");
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 90e5af2..e41fae24 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -1863,7 +1863,7 @@
 }
 
 /* client interface functions */
-static struct i40e_client_ops i40e_ops = {
+static const struct i40e_client_ops i40e_ops = {
 	.open = i40iw_open,
 	.close = i40iw_close,
 	.l2_param_change = i40iw_l2param_change,
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index f014eaf..b01ef6e 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1601,7 +1601,7 @@
 	else if (ret == -ENXIO)
 		pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
 	else if (ret)
-		pr_err("Invalid argumant. Fail to register network rule.\n");
+		pr_err("Invalid argument. Fail to register network rule.\n");
 
 	mlx4_free_cmd_mailbox(mdev->dev, mailbox);
 	return ret;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index fd97534..81b0e1f 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -419,7 +419,8 @@
 }
 
 static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
-			      enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp)
+			      enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp,
+			      bool shrink_wqe)
 {
 	int s;
 
@@ -477,7 +478,7 @@
 	 * We set WQE size to at least 64 bytes, this way stamping
 	 * invalidates each WQE.
 	 */
-	if (dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC &&
+	if (shrink_wqe && dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC &&
 	    qp->sq_signal_bits && BITS_PER_LONG == 64 &&
 	    type != MLX4_IB_QPT_SMI && type != MLX4_IB_QPT_GSI &&
 	    !(type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI |
@@ -642,6 +643,7 @@
 {
 	int qpn;
 	int err;
+	struct ib_qp_cap backup_cap;
 	struct mlx4_ib_sqp *sqp;
 	struct mlx4_ib_qp *qp;
 	enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
@@ -775,7 +777,9 @@
 				goto err;
 		}
 
-		err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp);
+		memcpy(&backup_cap, &init_attr->cap, sizeof(backup_cap));
+		err = set_kernel_sq_size(dev, &init_attr->cap,
+					 qp_type, qp, true);
 		if (err)
 			goto err;
 
@@ -787,9 +791,20 @@
 			*qp->db.db = 0;
 		}
 
-		if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf, gfp)) {
-			err = -ENOMEM;
-			goto err_db;
+		if (mlx4_buf_alloc(dev->dev, qp->buf_size, qp->buf_size,
+				   &qp->buf, gfp)) {
+			memcpy(&init_attr->cap, &backup_cap,
+			       sizeof(backup_cap));
+			err = set_kernel_sq_size(dev, &init_attr->cap, qp_type,
+						 qp, false);
+			if (err)
+				goto err_db;
+
+			if (mlx4_buf_alloc(dev->dev, qp->buf_size,
+					   PAGE_SIZE * 2, &qp->buf, gfp)) {
+				err = -ENOMEM;
+				goto err_db;
+			}
 		}
 
 		err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift,
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 5acf346..4cb81f6 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -530,7 +530,7 @@
 		     sizeof(struct mlx5_wqe_ctrl_seg)) /
 		     sizeof(struct mlx5_wqe_data_seg);
 	props->max_sge = min(max_rq_sg, max_sq_sg);
-	props->max_sge_rd = props->max_sge;
+	props->max_sge_rd	   = MLX5_MAX_SGE_RD;
 	props->max_cq		   = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
 	props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
 	props->max_mr		   = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
@@ -671,8 +671,8 @@
 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
 	struct mlx5_core_dev *mdev = dev->mdev;
 	struct mlx5_hca_vport_context *rep;
-	int max_mtu;
-	int oper_mtu;
+	u16 max_mtu;
+	u16 oper_mtu;
 	int err;
 	u8 ib_link_width_oper;
 	u8 vl_hw_cap;
@@ -1438,7 +1438,8 @@
 	if (!ft) {
 		ft = mlx5_create_auto_grouped_flow_table(ns, priority,
 							 num_entries,
-							 num_groups);
+							 num_groups,
+							 0);
 
 		if (!IS_ERR(ft)) {
 			prio->refcount = 0;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index f16c818..b46c255 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -776,15 +776,6 @@
 void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
 			      unsigned long end);
-int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
-			  u8 port, struct ifla_vf_info *info);
-int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
-			      u8 port, int state);
-int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
-			 u8 port, struct ifla_vf_stats *stats);
-int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
-			u64 guid, int type);
-
 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
 static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
 {
@@ -801,6 +792,15 @@
 
 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
 
+int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
+			  u8 port, struct ifla_vf_info *info);
+int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
+			      u8 port, int state);
+int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
+			 u8 port, struct ifla_vf_stats *stats);
+int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
+			u64 guid, int type);
+
 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
 			       int index);
 
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 3ea9e05..2b27d13 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -356,7 +356,7 @@
 /**
  * nes_nic_send
  */
-static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
+static bool nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
 {
 	struct nes_vnic *nesvnic = netdev_priv(netdev);
 	struct nes_device *nesdev = nesvnic->nesdev;
@@ -413,7 +413,7 @@
 					netdev->name, skb_shinfo(skb)->nr_frags + 2, skb_headlen(skb));
 			kfree_skb(skb);
 			nesvnic->tx_sw_dropped++;
-			return NETDEV_TX_LOCKED;
+			return false;
 		}
 		set_bit(nesnic->sq_head, nesnic->first_frag_overflow);
 		bus_address = pci_map_single(nesdev->pcidev, skb->data + NES_FIRST_FRAG_SIZE,
@@ -454,8 +454,7 @@
 	set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_MISC_IDX, wqe_misc);
 	nesnic->sq_head++;
 	nesnic->sq_head &= nesnic->sq_size - 1;
-
-	return NETDEV_TX_OK;
+	return true;
 }
 
 
@@ -479,7 +478,6 @@
 	u32 tso_wqe_length;
 	u32 curr_tcp_seq;
 	u32 wqe_count=1;
-	u32 send_rc;
 	struct iphdr *iph;
 	__le16 *wqe_fragment_length;
 	u32 nr_frags;
@@ -500,9 +498,6 @@
 	 *		skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
 	 */
 
-	if (!netif_carrier_ok(netdev))
-		return NETDEV_TX_OK;
-
 	if (netif_queue_stopped(netdev))
 		return NETDEV_TX_BUSY;
 
@@ -673,13 +668,11 @@
 			skb_linearize(skb);
 			skb_set_transport_header(skb, hoffset);
 			skb_set_network_header(skb, nhoffset);
-			send_rc = nes_nic_send(skb, netdev);
-			if (send_rc != NETDEV_TX_OK)
+			if (!nes_nic_send(skb, netdev))
 				return NETDEV_TX_OK;
 		}
 	} else {
-		send_rc = nes_nic_send(skb, netdev);
-		if (send_rc != NETDEV_TX_OK)
+		if (!nes_nic_send(skb, netdev))
 			return NETDEV_TX_OK;
 	}
 
@@ -689,7 +682,7 @@
 		nes_write32(nesdev->regs+NES_WQE_ALLOC,
 				(wqe_count << 24) | (1 << 23) | nesvnic->nic.qp_id);
 
-	netdev->trans_start = jiffies;
+	netif_trans_update(netdev);
 
 	return NETDEV_TX_OK;
 }
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index e449e39..24f4a78 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -45,6 +45,8 @@
 #include <linux/export.h>
 #include <linux/uio.h>
 
+#include <rdma/ib.h>
+
 #include "qib.h"
 #include "qib_common.h"
 #include "qib_user_sdma.h"
@@ -2067,6 +2069,9 @@
 	ssize_t ret = 0;
 	void *dest;
 
+	if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
+		return -EACCES;
+
 	if (count < sizeof(cmd.type)) {
 		ret = -EINVAL;
 		goto bail;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index bd82a69..a9e3bcc 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1637,9 +1637,9 @@
 	spin_unlock_irqrestore(&qp->s_hlock, flags);
 	if (nreq) {
 		if (call_send)
-			rdi->driver_f.schedule_send_no_lock(qp);
-		else
 			rdi->driver_f.do_send(qp);
+		else
+			rdi->driver_f.schedule_send_no_lock(qp);
 	}
 	return err;
 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index c8ed535..b2f4283 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -766,7 +766,7 @@
 		ipoib_dma_unmap_tx(priv, tx_req);
 		dev_kfree_skb_any(skb);
 	} else {
-		dev->trans_start = jiffies;
+		netif_trans_update(dev);
 		++tx->tx_head;
 
 		if (++priv->tx_outstanding == ipoib_sendq_size) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index f0e55e4..3643d55 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -637,7 +637,7 @@
 		if (netif_queue_stopped(dev))
 			netif_wake_queue(dev);
 	} else {
-		dev->trans_start = jiffies;
+		netif_trans_update(dev);
 
 		address->last_send = priv->tx_head;
 		++priv->tx_head;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 80807d6..b940ef1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1036,7 +1036,7 @@
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 
 	ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
-		   jiffies_to_msecs(jiffies - dev->trans_start));
+		   jiffies_to_msecs(jiffies - dev_trans_start(dev)));
 	ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
 		   netif_queue_stopped(dev),
 		   priv->tx_head, priv->tx_tail);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 80b6bed..64b3d11 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -612,6 +612,7 @@
 	struct Scsi_Host *shost;
 	struct iser_conn *iser_conn = NULL;
 	struct ib_conn *ib_conn;
+	u32 max_fr_sectors;
 	u16 max_cmds;
 
 	shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
@@ -632,7 +633,6 @@
 		iser_conn = ep->dd_data;
 		max_cmds = iser_conn->max_cmds;
 		shost->sg_tablesize = iser_conn->scsi_sg_tablesize;
-		shost->max_sectors = iser_conn->scsi_max_sectors;
 
 		mutex_lock(&iser_conn->state_mutex);
 		if (iser_conn->state != ISER_CONN_UP) {
@@ -657,8 +657,6 @@
 		 */
 		shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize,
 			ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len);
-		shost->max_sectors = min_t(unsigned int,
-			1024, (shost->sg_tablesize * PAGE_SIZE) >> 9);
 
 		if (iscsi_host_add(shost,
 				   ib_conn->device->ib_device->dma_device)) {
@@ -672,6 +670,15 @@
 			goto free_host;
 	}
 
+	/*
+	 * FRs or FMRs can only map up to a (device) page per entry, but if the
+	 * first entry is misaligned we'll end up using using two entries
+	 * (head and tail) for a single page worth data, so we have to drop
+	 * one segment from the calculation.
+	 */
+	max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9;
+	shost->max_sectors = min(iser_max_sectors, max_fr_sectors);
+
 	if (cmds_max > max_cmds) {
 		iser_info("cmds_max changed from %u to %u\n",
 			  cmds_max, max_cmds);
@@ -989,7 +996,6 @@
 	.queuecommand           = iscsi_queuecommand,
 	.change_queue_depth	= scsi_change_queue_depth,
 	.sg_tablesize           = ISCSI_ISER_DEF_SG_TABLESIZE,
-	.max_sectors            = ISER_DEF_MAX_SECTORS,
 	.cmd_per_lun            = ISER_DEF_CMD_PER_LUN,
 	.eh_abort_handler       = iscsi_eh_abort,
 	.eh_device_reset_handler= iscsi_eh_device_reset,
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 60b30d3..411e446 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -63,7 +63,6 @@
 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
 
 static void isert_release_work(struct work_struct *work);
-static void isert_wait4flush(struct isert_conn *isert_conn);
 static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc);
 static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc);
 static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc);
@@ -141,7 +140,7 @@
 	attr.qp_context = isert_conn;
 	attr.send_cq = comp->cq;
 	attr.recv_cq = comp->cq;
-	attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
+	attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1;
 	attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
 	attr.cap.max_send_sge = device->ib_device->attrs.max_sge;
 	isert_conn->max_sge = min(device->ib_device->attrs.max_sge,
@@ -887,7 +886,7 @@
 		break;
 	case ISER_CONN_UP:
 		isert_conn_terminate(isert_conn);
-		isert_wait4flush(isert_conn);
+		ib_drain_qp(isert_conn->qp);
 		isert_handle_unbound_conn(isert_conn);
 		break;
 	case ISER_CONN_BOUND:
@@ -3213,36 +3212,6 @@
 	}
 }
 
-static void
-isert_beacon_done(struct ib_cq *cq, struct ib_wc *wc)
-{
-	struct isert_conn *isert_conn = wc->qp->qp_context;
-
-	isert_print_wc(wc, "beacon");
-
-	isert_info("conn %p completing wait_comp_err\n", isert_conn);
-	complete(&isert_conn->wait_comp_err);
-}
-
-static void
-isert_wait4flush(struct isert_conn *isert_conn)
-{
-	struct ib_recv_wr *bad_wr;
-	static struct ib_cqe cqe = { .done = isert_beacon_done };
-
-	isert_info("conn %p\n", isert_conn);
-
-	init_completion(&isert_conn->wait_comp_err);
-	isert_conn->beacon.wr_cqe = &cqe;
-	/* post an indication that all flush errors were consumed */
-	if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) {
-		isert_err("conn %p failed to post beacon", isert_conn);
-		return;
-	}
-
-	wait_for_completion(&isert_conn->wait_comp_err);
-}
-
 /**
  * isert_put_unsol_pending_cmds() - Drop commands waiting for
  *     unsolicitate dataout
@@ -3288,7 +3257,7 @@
 	isert_conn_terminate(isert_conn);
 	mutex_unlock(&isert_conn->mutex);
 
-	isert_wait4flush(isert_conn);
+	ib_drain_qp(isert_conn->qp);
 	isert_put_unsol_pending_cmds(conn);
 	isert_wait4cmds(conn);
 	isert_wait4logout(isert_conn);
@@ -3300,7 +3269,7 @@
 {
 	struct isert_conn *isert_conn = conn->context;
 
-	isert_wait4flush(isert_conn);
+	ib_drain_qp(isert_conn->qp);
 	isert_put_conn(isert_conn);
 }
 
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 192788a..147900c 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -209,14 +209,12 @@
 	struct ib_qp		*qp;
 	struct isert_device	*device;
 	struct mutex		mutex;
-	struct completion	wait_comp_err;
 	struct kref		kref;
 	struct list_head	fr_pool;
 	int			fr_pool_size;
 	/* lock to protect fastreg pool */
 	spinlock_t		pool_lock;
 	struct work_struct	release_work;
-	struct ib_recv_wr       beacon;
 	bool                    logout_posted;
 	bool                    snd_w_inv;
 };
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index b6bf204..369a75e 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -81,7 +81,7 @@
 
 module_param(indirect_sg_entries, uint, 0444);
 MODULE_PARM_DESC(indirect_sg_entries,
-		 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
+		 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
 
 module_param(allow_ext_sg, bool, 0444);
 MODULE_PARM_DESC(allow_ext_sg,
@@ -2819,7 +2819,7 @@
 	spin_unlock(&host->target_lock);
 
 	scsi_scan_target(&target->scsi_host->shost_gendev,
-			 0, target->scsi_id, SCAN_WILD_CARD, 0);
+			 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
 
 	if (srp_connected_ch(target) < target->ch_count ||
 	    target->qp_in_error) {
@@ -3097,7 +3097,7 @@
 
 		case SRP_OPT_SG_TABLESIZE:
 			if (match_int(args, &token) || token < 1 ||
-					token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
+					token > SG_MAX_SEGMENTS) {
 				pr_warn("bad max sg_tablesize parameter '%s'\n",
 					p);
 				goto out;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 0bd3cb2..8b42401 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1264,26 +1264,40 @@
  */
 static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
 {
-	struct se_session *se_sess;
 	struct srpt_send_ioctx *ioctx;
-	int tag;
+	unsigned long flags;
 
 	BUG_ON(!ch);
-	se_sess = ch->sess;
 
-	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
-	if (tag < 0) {
-		pr_err("Unable to obtain tag for srpt_send_ioctx\n");
-		return NULL;
+	ioctx = NULL;
+	spin_lock_irqsave(&ch->spinlock, flags);
+	if (!list_empty(&ch->free_list)) {
+		ioctx = list_first_entry(&ch->free_list,
+					 struct srpt_send_ioctx, free_list);
+		list_del(&ioctx->free_list);
 	}
-	ioctx = &((struct srpt_send_ioctx *)se_sess->sess_cmd_map)[tag];
-	memset(ioctx, 0, sizeof(struct srpt_send_ioctx));
-	ioctx->ch = ch;
+	spin_unlock_irqrestore(&ch->spinlock, flags);
+
+	if (!ioctx)
+		return ioctx;
+
+	BUG_ON(ioctx->ch != ch);
 	spin_lock_init(&ioctx->spinlock);
 	ioctx->state = SRPT_STATE_NEW;
+	ioctx->n_rbuf = 0;
+	ioctx->rbufs = NULL;
+	ioctx->n_rdma = 0;
+	ioctx->n_rdma_wrs = 0;
+	ioctx->rdma_wrs = NULL;
+	ioctx->mapped_sg_count = 0;
 	init_completion(&ioctx->tx_done);
-
-	ioctx->cmd.map_tag = tag;
+	ioctx->queue_status_only = false;
+	/*
+	 * transport_init_se_cmd() does not initialize all fields, so do it
+	 * here.
+	 */
+	memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
+	memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
 
 	return ioctx;
 }
@@ -2021,7 +2035,7 @@
 	struct ib_cm_rep_param *rep_param;
 	struct srpt_rdma_ch *ch, *tmp_ch;
 	u32 it_iu_len;
-	int ret = 0;
+	int i, ret = 0;
 	unsigned char *p;
 
 	WARN_ON_ONCE(irqs_disabled());
@@ -2143,6 +2157,12 @@
 	if (!ch->ioctx_ring)
 		goto free_ch;
 
+	INIT_LIST_HEAD(&ch->free_list);
+	for (i = 0; i < ch->rq_size; i++) {
+		ch->ioctx_ring[i]->ch = ch;
+		list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
+	}
+
 	ret = srpt_create_ch_ib(ch);
 	if (ret) {
 		rej->reason = cpu_to_be32(
@@ -2173,8 +2193,7 @@
 	p = &ch->sess_name[0];
 
 try_again:
-	ch->sess = target_alloc_session(&sport->port_tpg_1, ch->rq_size,
-					sizeof(struct srpt_send_ioctx),
+	ch->sess = target_alloc_session(&sport->port_tpg_1, 0, 0,
 					TARGET_PROT_NORMAL, p, ch, NULL);
 	if (IS_ERR(ch->sess)) {
 		pr_info("Rejected login because no ACL has been"
@@ -2881,7 +2900,7 @@
 	struct srpt_send_ioctx *ioctx = container_of(se_cmd,
 				struct srpt_send_ioctx, cmd);
 	struct srpt_rdma_ch *ch = ioctx->ch;
-	struct se_session *se_sess = ch->sess;
+	unsigned long flags;
 
 	WARN_ON(ioctx->state != SRPT_STATE_DONE);
 	WARN_ON(ioctx->mapped_sg_count != 0);
@@ -2892,7 +2911,9 @@
 		ioctx->n_rbuf = 0;
 	}
 
-	percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
+	spin_lock_irqsave(&ch->spinlock, flags);
+	list_add(&ioctx->free_list, &ch->free_list);
+	spin_unlock_irqrestore(&ch->spinlock, flags);
 }
 
 /**
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index ca288f0..af9b8b5 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -179,6 +179,7 @@
  * struct srpt_send_ioctx - SRPT send I/O context.
  * @ioctx:       See above.
  * @ch:          Channel pointer.
+ * @free_list:   Node in srpt_rdma_ch.free_list.
  * @n_rbuf:      Number of data buffers in the received SRP command.
  * @rbufs:       Pointer to SRP data buffer array.
  * @single_rbuf: SRP data buffer if the command has only a single buffer.
@@ -201,6 +202,7 @@
 	struct srp_direct_buf	*rbufs;
 	struct srp_direct_buf	single_rbuf;
 	struct scatterlist	*sg;
+	struct list_head	free_list;
 	spinlock_t		spinlock;
 	enum srpt_command_state	state;
 	struct se_cmd		cmd;
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 6f8b084..3d8ff09 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -143,9 +143,9 @@
 
 #include <linux/i8253.h>
 
-#define GET_TIME(x)	do { if (cpu_has_tsc) x = (unsigned int)rdtsc(); else x = get_time_pit(); } while (0)
-#define DELTA(x,y)	(cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0)))
-#define TIME_NAME	(cpu_has_tsc?"TSC":"PIT")
+#define GET_TIME(x)	do { if (boot_cpu_has(X86_FEATURE_TSC)) x = (unsigned int)rdtsc(); else x = get_time_pit(); } while (0)
+#define DELTA(x,y)	(boot_cpu_has(X86_FEATURE_TSC) ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0)))
+#define TIME_NAME	(boot_cpu_has(X86_FEATURE_TSC)?"TSC":"PIT")
 static unsigned int get_time_pit(void)
 {
         unsigned long flags;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index e8a84d1..1142a93 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -153,6 +153,7 @@
 	{ 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
 	{ 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
 	{ 0x0738, 0x4740, "Mad Catz Beat Pad", 0, XTYPE_XBOX360 },
+	{ 0x0738, 0x4a01, "Mad Catz FightStick TE 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
 	{ 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
 	{ 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 },
 	{ 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
@@ -304,6 +305,7 @@
 	XPAD_XBOX360_VENDOR(0x046d),		/* Logitech X-Box 360 style controllers */
 	XPAD_XBOX360_VENDOR(0x0738),		/* Mad Catz X-Box 360 controllers */
 	{ USB_DEVICE(0x0738, 0x4540) },		/* Mad Catz Beat Pad */
+	XPAD_XBOXONE_VENDOR(0x0738),		/* Mad Catz FightStick TE 2 */
 	XPAD_XBOX360_VENDOR(0x0e6f),		/* 0x0e6f X-Box 360 controllers */
 	XPAD_XBOX360_VENDOR(0x12ab),		/* X-Box 360 dance pads */
 	XPAD_XBOX360_VENDOR(0x1430),		/* RedOctane X-Box 360 controllers */
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 21a62d0..53fe9a3 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -73,7 +73,7 @@
 #ifdef CONFIG_GPIOLIB
 static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
 {
-	struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
+	struct adp5588_kpad *kpad = gpiochip_get_data(chip);
 	unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
 	unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
 	int val;
@@ -93,7 +93,7 @@
 static void adp5588_gpio_set_value(struct gpio_chip *chip,
 				   unsigned off, int val)
 {
-	struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
+	struct adp5588_kpad *kpad = gpiochip_get_data(chip);
 	unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
 	unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
 
@@ -112,7 +112,7 @@
 
 static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off)
 {
-	struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
+	struct adp5588_kpad *kpad = gpiochip_get_data(chip);
 	unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
 	unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
 	int ret;
@@ -130,7 +130,7 @@
 static int adp5588_gpio_direction_output(struct gpio_chip *chip,
 					 unsigned off, int val)
 {
-	struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
+	struct adp5588_kpad *kpad = gpiochip_get_data(chip);
 	unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
 	unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
 	int ret;
@@ -210,7 +210,7 @@
 
 	mutex_init(&kpad->gpio_lock);
 
-	error = gpiochip_add(&kpad->gc);
+	error = gpiochip_add_data(&kpad->gc, kpad);
 	if (error) {
 		dev_err(dev, "gpiochip_add failed, err: %d\n", error);
 		return error;
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index c01a1d6..32d94c6 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -387,7 +387,7 @@
 #ifdef CONFIG_GPIOLIB
 static int adp5589_gpio_get_value(struct gpio_chip *chip, unsigned off)
 {
-	struct adp5589_kpad *kpad = container_of(chip, struct adp5589_kpad, gc);
+	struct adp5589_kpad *kpad = gpiochip_get_data(chip);
 	unsigned int bank = kpad->var->bank(kpad->gpiomap[off]);
 	unsigned int bit = kpad->var->bit(kpad->gpiomap[off]);
 
@@ -399,7 +399,7 @@
 static void adp5589_gpio_set_value(struct gpio_chip *chip,
 				   unsigned off, int val)
 {
-	struct adp5589_kpad *kpad = container_of(chip, struct adp5589_kpad, gc);
+	struct adp5589_kpad *kpad = gpiochip_get_data(chip);
 	unsigned int bank = kpad->var->bank(kpad->gpiomap[off]);
 	unsigned int bit = kpad->var->bit(kpad->gpiomap[off]);
 
@@ -418,7 +418,7 @@
 
 static int adp5589_gpio_direction_input(struct gpio_chip *chip, unsigned off)
 {
-	struct adp5589_kpad *kpad = container_of(chip, struct adp5589_kpad, gc);
+	struct adp5589_kpad *kpad = gpiochip_get_data(chip);
 	unsigned int bank = kpad->var->bank(kpad->gpiomap[off]);
 	unsigned int bit = kpad->var->bit(kpad->gpiomap[off]);
 	int ret;
@@ -438,7 +438,7 @@
 static int adp5589_gpio_direction_output(struct gpio_chip *chip,
 					 unsigned off, int val)
 {
-	struct adp5589_kpad *kpad = container_of(chip, struct adp5589_kpad, gc);
+	struct adp5589_kpad *kpad = gpiochip_get_data(chip);
 	unsigned int bank = kpad->var->bank(kpad->gpiomap[off]);
 	unsigned int bit = kpad->var->bit(kpad->gpiomap[off]);
 	int ret;
@@ -525,9 +525,9 @@
 
 	mutex_init(&kpad->gpio_lock);
 
-	error = gpiochip_add(&kpad->gc);
+	error = gpiochip_add_data(&kpad->gc, kpad);
 	if (error) {
-		dev_err(dev, "gpiochip_add failed, err: %d\n", error);
+		dev_err(dev, "gpiochip_add_data() failed, err: %d\n", error);
 		return error;
 	}
 
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index e0d72c8..146b26f 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -64,31 +64,6 @@
 static unsigned int *row_gpios;
 static unsigned int *col_gpios;
 
-#ifdef CONFIG_ARCH_OMAP2
-static void set_col_gpio_val(struct omap_kp *omap_kp, u8 value)
-{
-	int col;
-
-	for (col = 0; col < omap_kp->cols; col++)
-		gpio_set_value(col_gpios[col], value & (1 << col));
-}
-
-static u8 get_row_gpio_val(struct omap_kp *omap_kp)
-{
-	int row;
-	u8 value = 0;
-
-	for (row = 0; row < omap_kp->rows; row++) {
-		if (gpio_get_value(row_gpios[row]))
-			value |= (1 << row);
-	}
-	return value;
-}
-#else
-#define		set_col_gpio_val(x, y)	do {} while (0)
-#define		get_row_gpio_val(x)	0
-#endif
-
 static irqreturn_t omap_kp_interrupt(int irq, void *dev_id)
 {
 	/* disable keyboard interrupt and schedule for handling */
@@ -133,7 +108,6 @@
 	unsigned int row_shift = get_count_order(omap_kp_data->cols);
 	unsigned char new_state[8], changed, key_down = 0;
 	int col, row;
-	int spurious = 0;
 
 	/* check for any changes */
 	omap_kp_scan_keypad(omap_kp_data, new_state);
@@ -170,12 +144,9 @@
 	memcpy(keypad_state, new_state, sizeof(keypad_state));
 
 	if (key_down) {
-		int delay = HZ / 20;
 		/* some key is pressed - keep irq disabled and use timer
 		 * to poll the keypad */
-		if (spurious)
-			delay = 2 * HZ;
-		mod_timer(&omap_kp_data->timer, jiffies + delay);
+		mod_timer(&omap_kp_data->timer, jiffies + HZ / 20);
 	} else {
 		/* enable interrupts */
 		omap_writew(0, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT);
@@ -216,25 +187,6 @@
 
 static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, omap_kp_enable_show, omap_kp_enable_store);
 
-#ifdef CONFIG_PM
-static int omap_kp_suspend(struct platform_device *dev, pm_message_t state)
-{
-	/* Nothing yet */
-
-	return 0;
-}
-
-static int omap_kp_resume(struct platform_device *dev)
-{
-	/* Nothing yet */
-
-	return 0;
-}
-#else
-#define omap_kp_suspend	NULL
-#define omap_kp_resume	NULL
-#endif
-
 static int omap_kp_probe(struct platform_device *pdev)
 {
 	struct omap_kp *omap_kp;
@@ -371,8 +323,6 @@
 static struct platform_driver omap_kp_driver = {
 	.probe		= omap_kp_probe,
 	.remove		= omap_kp_remove,
-	.suspend	= omap_kp_suspend,
-	.resume		= omap_kp_resume,
 	.driver		= {
 		.name	= "omap-keypad",
 	},
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index bbcccd6..323a0fb 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -61,9 +61,9 @@
 	unsigned short	keymap[TWL4030_KEYMAP_SIZE];
 	u16		kp_state[TWL4030_MAX_ROWS];
 	bool		autorepeat;
-	unsigned	n_rows;
-	unsigned	n_cols;
-	unsigned	irq;
+	unsigned int	n_rows;
+	unsigned int	n_cols;
+	unsigned int	irq;
 
 	struct device *dbg_dev;
 	struct input_dev *input;
@@ -110,7 +110,7 @@
 #define KEYP_CTRL_KBD_ON		BIT(6)
 
 /* KEYP_DEB, KEYP_LONG_KEY, KEYP_TIMEOUT_x*/
-#define KEYP_PERIOD_US(t, prescale)	((t) / (31 << (prescale + 1)) - 1)
+#define KEYP_PERIOD_US(t, prescale)	((t) / (31 << ((prescale) + 1)) - 1)
 
 /* KEYP_LK_PTV_REG Fields */
 #define KEYP_LK_PTV_PTV_SHIFT		5
@@ -162,9 +162,10 @@
 
 static inline u16 twl4030_col_xlate(struct twl4030_keypad *kp, u8 col)
 {
-	/* If all bits in a row are active for all coloumns then
+	/*
+	 * If all bits in a row are active for all columns then
 	 * we have that row line connected to gnd. Mark this
-	 * key on as if it was on matrix position n_cols (ie
+	 * key on as if it was on matrix position n_cols (i.e.
 	 * one higher than the size of the matrix).
 	 */
 	if (col == 0xFF)
@@ -209,9 +210,9 @@
 	u16 new_state[TWL4030_MAX_ROWS];
 	int col, row;
 
-	if (release_all)
+	if (release_all) {
 		memset(new_state, 0, sizeof(new_state));
-	else {
+	} else {
 		/* check for any changes */
 		int ret = twl4030_read_kp_matrix_state(kp, new_state);
 
@@ -262,8 +263,10 @@
 	/* Read & Clear TWL4030 pending interrupt */
 	ret = twl4030_kpread(kp, &reg, KEYP_ISR1, 1);
 
-	/* Release all keys if I2C has gone bad or
-	 * the KEYP has gone to idle state */
+	/*
+	 * Release all keys if I2C has gone bad or
+	 * the KEYP has gone to idle state.
+	 */
 	if (ret >= 0 && (reg & KEYP_IMR1_KP))
 		twl4030_kp_scan(kp, false);
 	else
@@ -283,7 +286,8 @@
 	if (twl4030_kpwrite_u8(kp, reg, KEYP_CTRL) < 0)
 		return -EIO;
 
-	/* NOTE:  we could use sih_setup() here to package keypad
+	/*
+	 * NOTE: we could use sih_setup() here to package keypad
 	 * event sources as four different IRQs ... but we don't.
 	 */
 
@@ -312,7 +316,7 @@
 
 	/*
 	 * Enable Clear-on-Read; disable remembering events that fire
-	 * after the IRQ but before our handler acks (reads) them,
+	 * after the IRQ but before our handler acks (reads) them.
 	 */
 	reg = TWL4030_SIH_CTRL_COR_MASK | TWL4030_SIH_CTRL_PENDDIS_MASK;
 	if (twl4030_kpwrite_u8(kp, reg, KEYP_SIH_CTRL) < 0)
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index d5994a7..9829363 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -178,7 +178,6 @@
 	input_set_drvdata(haptics->input_dev, haptics);
 
 	haptics->input_dev->name = "arizona:haptics";
-	haptics->input_dev->dev.parent = pdev->dev.parent;
 	haptics->input_dev->close = arizona_haptics_close;
 	__set_bit(FF_RUMBLE, haptics->input_dev->ffbit);
 
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index 9365535..9cc6d05 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -76,8 +76,8 @@
 
 	BUZZER_ON = 1 << 5,
 
-	/* up to 256 normal keys, up to 16 special keys */
-	KEYMAP_SIZE = 256 + 16,
+	/* up to 256 normal keys, up to 15 special key combinations */
+	KEYMAP_SIZE = 256 + 15,
 };
 
 /* CM109 protocol packet */
@@ -139,7 +139,7 @@
 {
 	if (code > 0xff) {
 		switch (code - 0xff) {
-		case RECORD_MUTE:	return KEY_MUTE;
+		case RECORD_MUTE:	return KEY_MICMUTE;
 		case PLAYBACK_MUTE:	return KEY_MUTE;
 		case VOLUME_DOWN:	return KEY_VOLUMEDOWN;
 		case VOLUME_UP:		return KEY_VOLUMEUP;
@@ -312,6 +312,32 @@
 	input_sync(idev);
 }
 
+/*
+ * Converts data of special key presses (volume, mute) into events
+ * for the input subsystem, sends press-n-release for mute keys.
+ */
+static void cm109_report_special(struct cm109_dev *dev)
+{
+	static const u8 autorelease = RECORD_MUTE | PLAYBACK_MUTE;
+	struct input_dev *idev = dev->idev;
+	u8 data = dev->irq_data->byte[HID_IR0];
+	unsigned short keycode;
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		keycode = dev->keymap[0xff + BIT(i)];
+		if (keycode == KEY_RESERVED)
+			continue;
+
+		input_report_key(idev, keycode, data & BIT(i));
+		if (data & autorelease & BIT(i)) {
+			input_sync(idev);
+			input_report_key(idev, keycode, 0);
+		}
+	}
+	input_sync(idev);
+}
+
 /******************************************************************************
  * CM109 usb communication interface
  *****************************************************************************/
@@ -340,6 +366,7 @@
 	struct cm109_dev *dev = urb->context;
 	const int status = urb->status;
 	int error;
+	unsigned long flags;
 
 	dev_dbg(&dev->intf->dev, "### URB IRQ: [0x%02x 0x%02x 0x%02x 0x%02x] keybit=0x%02x\n",
 	     dev->irq_data->byte[0],
@@ -357,10 +384,7 @@
 	}
 
 	/* Special keys */
-	if (dev->irq_data->byte[HID_IR0] & 0x0f) {
-		const int code = (dev->irq_data->byte[HID_IR0] & 0x0f);
-		report_key(dev, dev->keymap[0xff + code]);
-	}
+	cm109_report_special(dev);
 
 	/* Scan key column */
 	if (dev->keybit == 0xf) {
@@ -381,7 +405,7 @@
 
  out:
 
-	spin_lock(&dev->ctl_submit_lock);
+	spin_lock_irqsave(&dev->ctl_submit_lock, flags);
 
 	dev->irq_urb_pending = 0;
 
@@ -405,7 +429,7 @@
 				__func__, error);
 	}
 
-	spin_unlock(&dev->ctl_submit_lock);
+	spin_unlock_irqrestore(&dev->ctl_submit_lock, flags);
 }
 
 static void cm109_urb_ctl_callback(struct urb *urb)
@@ -413,6 +437,7 @@
 	struct cm109_dev *dev = urb->context;
 	const int status = urb->status;
 	int error;
+	unsigned long flags;
 
 	dev_dbg(&dev->intf->dev, "### URB CTL: [0x%02x 0x%02x 0x%02x 0x%02x]\n",
 	     dev->ctl_data->byte[0],
@@ -427,7 +452,7 @@
 				    __func__, status);
 	}
 
-	spin_lock(&dev->ctl_submit_lock);
+	spin_lock_irqsave(&dev->ctl_submit_lock, flags);
 
 	dev->ctl_urb_pending = 0;
 
@@ -448,7 +473,7 @@
 		}
 	}
 
-	spin_unlock(&dev->ctl_submit_lock);
+	spin_unlock_irqrestore(&dev->ctl_submit_lock, flags);
 }
 
 static void cm109_toggle_buzzer_async(struct cm109_dev *dev)
diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
index a806ba3..8d6326d 100644
--- a/drivers/input/misc/max8997_haptic.c
+++ b/drivers/input/misc/max8997_haptic.c
@@ -255,12 +255,14 @@
 	struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
 	const struct max8997_platform_data *pdata =
 					dev_get_platdata(iodev->dev);
-	const struct max8997_haptic_platform_data *haptic_pdata =
-					pdata->haptic_pdata;
+	const struct max8997_haptic_platform_data *haptic_pdata = NULL;
 	struct max8997_haptic *chip;
 	struct input_dev *input_dev;
 	int error;
 
+	if (pdata)
+		haptic_pdata = pdata->haptic_pdata;
+
 	if (!haptic_pdata) {
 		dev_err(&pdev->dev, "no haptic platform data\n");
 		return -EINVAL;
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index 3f02e0e..67aab86 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -353,7 +353,8 @@
 	if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
 		kpd_delay = 15625;
 
-	if (kpd_delay > 62500 || kpd_delay == 0) {
+	/* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
+	if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
 		dev_err(&pdev->dev, "invalid power key trigger delay\n");
 		return -EINVAL;
 	}
@@ -385,8 +386,8 @@
 	pwr->name = "pmic8xxx_pwrkey";
 	pwr->phys = "pmic8xxx_pwrkey/input0";
 
-	delay = (kpd_delay << 10) / USEC_PER_SEC;
-	delay = 1 + ilog2(delay);
+	delay = (kpd_delay << 6) / USEC_PER_SEC;
+	delay = ilog2(delay);
 
 	err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
 	if (err < 0) {
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index 96c486d..c7fc8d4 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -47,13 +47,13 @@
 	bool armed;
 	signed char dir;	/* 1 - clockwise, -1 - CCW */
 
-	unsigned last_stable;
+	unsigned int last_stable;
 };
 
-static unsigned rotary_encoder_get_state(struct rotary_encoder *encoder)
+static unsigned int rotary_encoder_get_state(struct rotary_encoder *encoder)
 {
 	int i;
-	unsigned ret = 0;
+	unsigned int ret = 0;
 
 	for (i = 0; i < encoder->gpios->ndescs; ++i) {
 		int val = gpiod_get_value_cansleep(encoder->gpios->desc[i]);
@@ -100,7 +100,7 @@
 static irqreturn_t rotary_encoder_irq(int irq, void *dev_id)
 {
 	struct rotary_encoder *encoder = dev_id;
-	unsigned state;
+	unsigned int state;
 
 	mutex_lock(&encoder->access_mutex);
 
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 10c4e3d..caa5a62 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -222,7 +222,6 @@
 
 	info->input_dev->name = "twl4030:vibrator";
 	info->input_dev->id.version = 1;
-	info->input_dev->dev.parent = pdev->dev.parent;
 	info->input_dev->close = twl4030_vibra_close;
 	__set_bit(FF_RUMBLE, info->input_dev->ffbit);
 
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index ea63fad..5690eb7 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -45,9 +45,8 @@
 struct vibra_info {
 	struct device *dev;
 	struct input_dev *input_dev;
-	struct workqueue_struct *workqueue;
 	struct work_struct play_work;
-	struct mutex mutex;
+
 	int irq;
 
 	bool enabled;
@@ -182,8 +181,14 @@
 {
 	struct vibra_info *info = container_of(work,
 				struct vibra_info, play_work);
+	int ret;
 
-	mutex_lock(&info->mutex);
+	/* Do not allow effect, while the routing is set to use audio */
+	ret = twl6040_get_vibralr_status(info->twl6040);
+	if (ret & TWL6040_VIBSEL) {
+		dev_info(info->dev, "Vibra is configured for audio\n");
+		return;
+	}
 
 	if (info->weak_speed || info->strong_speed) {
 		if (!info->enabled)
@@ -193,31 +198,18 @@
 	} else if (info->enabled)
 		twl6040_vibra_disable(info);
 
-	mutex_unlock(&info->mutex);
 }
 
 static int vibra_play(struct input_dev *input, void *data,
 		      struct ff_effect *effect)
 {
 	struct vibra_info *info = input_get_drvdata(input);
-	int ret;
-
-	/* Do not allow effect, while the routing is set to use audio */
-	ret = twl6040_get_vibralr_status(info->twl6040);
-	if (ret & TWL6040_VIBSEL) {
-		dev_info(&input->dev, "Vibra is configured for audio\n");
-		return -EBUSY;
-	}
 
 	info->weak_speed = effect->u.rumble.weak_magnitude;
 	info->strong_speed = effect->u.rumble.strong_magnitude;
 	info->direction = effect->direction < EFFECT_DIR_180_DEG ? 1 : -1;
 
-	ret = queue_work(info->workqueue, &info->play_work);
-	if (!ret) {
-		dev_info(&input->dev, "work is already on queue\n");
-		return ret;
-	}
+	schedule_work(&info->play_work);
 
 	return 0;
 }
@@ -228,12 +220,8 @@
 
 	cancel_work_sync(&info->play_work);
 
-	mutex_lock(&info->mutex);
-
 	if (info->enabled)
 		twl6040_vibra_disable(info);
-
-	mutex_unlock(&info->mutex);
 }
 
 static int __maybe_unused twl6040_vibra_suspend(struct device *dev)
@@ -241,13 +229,11 @@
 	struct platform_device *pdev = to_platform_device(dev);
 	struct vibra_info *info = platform_get_drvdata(pdev);
 
-	mutex_lock(&info->mutex);
+	cancel_work_sync(&info->play_work);
 
 	if (info->enabled)
 		twl6040_vibra_disable(info);
 
-	mutex_unlock(&info->mutex);
-
 	return 0;
 }
 
@@ -262,6 +248,7 @@
 	int vddvibr_uV = 0;
 	int error;
 
+	of_node_get(twl6040_core_dev->of_node);
 	twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node,
 						 "vibra");
 	if (!twl6040_core_node) {
@@ -305,8 +292,6 @@
 		return -EINVAL;
 	}
 
-	mutex_init(&info->mutex);
-
 	error = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
 					  twl6040_vib_irq_handler,
 					  IRQF_ONESHOT,
@@ -362,7 +347,6 @@
 
 	info->input_dev->name = "twl6040:vibrator";
 	info->input_dev->id.version = 1;
-	info->input_dev->dev.parent = pdev->dev.parent;
 	info->input_dev->close = twl6040_vibra_close;
 	__set_bit(FF_RUMBLE, info->input_dev->ffbit);
 
diff --git a/drivers/input/mouse/byd.c b/drivers/input/mouse/byd.c
index fdc243ca..b27aa63 100644
--- a/drivers/input/mouse/byd.c
+++ b/drivers/input/mouse/byd.c
@@ -2,6 +2,10 @@
  * BYD TouchPad PS/2 mouse driver
  *
  * Copyright (C) 2015 Chris Diamand <chris@diamand.org>
+ * Copyright (C) 2015 Richard Pospesel
+ * Copyright (C) 2015 Tai Chi Minh Ralph Eastwood
+ * Copyright (C) 2015 Martin Wimpress
+ * Copyright (C) 2015 Jay Kuri
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published by
@@ -474,7 +478,6 @@
 	if (!priv)
 		return -ENOMEM;
 
-	memset(priv, 0, sizeof(*priv));
 	setup_timer(&priv->timer, byd_clear_touch, (unsigned long) psmouse);
 
 	psmouse->private = priv;
diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c
index 889f6b7..e86e377 100644
--- a/drivers/input/tablet/acecad.c
+++ b/drivers/input/tablet/acecad.c
@@ -49,7 +49,6 @@
 struct usb_acecad {
 	char name[128];
 	char phys[64];
-	struct usb_device *usbdev;
 	struct usb_interface *intf;
 	struct input_dev *input;
 	struct urb *irq;
@@ -64,6 +63,7 @@
 	unsigned char *data = acecad->data;
 	struct input_dev *dev = acecad->input;
 	struct usb_interface *intf = acecad->intf;
+	struct usb_device *udev = interface_to_usbdev(intf);
 	int prox, status;
 
 	switch (urb->status) {
@@ -110,15 +110,15 @@
 	if (status)
 		dev_err(&intf->dev,
 			"can't resubmit intr, %s-%s/input0, status %d\n",
-			acecad->usbdev->bus->bus_name,
-			acecad->usbdev->devpath, status);
+			udev->bus->bus_name,
+			udev->devpath, status);
 }
 
 static int usb_acecad_open(struct input_dev *dev)
 {
 	struct usb_acecad *acecad = input_get_drvdata(dev);
 
-	acecad->irq->dev = acecad->usbdev;
+	acecad->irq->dev = interface_to_usbdev(acecad->intf);
 	if (usb_submit_urb(acecad->irq, GFP_KERNEL))
 		return -EIO;
 
@@ -172,7 +172,6 @@
 		goto fail2;
 	}
 
-	acecad->usbdev = dev;
 	acecad->intf = intf;
 	acecad->input = input_dev;
 
@@ -251,12 +250,13 @@
 static void usb_acecad_disconnect(struct usb_interface *intf)
 {
 	struct usb_acecad *acecad = usb_get_intfdata(intf);
+	struct usb_device *udev = interface_to_usbdev(intf);
 
 	usb_set_intfdata(intf, NULL);
 
 	input_unregister_device(acecad->input);
 	usb_free_urb(acecad->irq);
-	usb_free_coherent(acecad->usbdev, 8, acecad->data, acecad->data_dma);
+	usb_free_coherent(udev, 8, acecad->data, acecad->data_dma);
 	kfree(acecad);
 }
 
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 78ca448..4613f0a 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -307,7 +307,6 @@
 
 struct aiptek {
 	struct input_dev *inputdev;		/* input device struct           */
-	struct usb_device *usbdev;		/* usb device struct             */
 	struct usb_interface *intf;		/* usb interface struct          */
 	struct urb *urb;			/* urb for incoming reports      */
 	dma_addr_t data_dma;			/* our dma stuffage              */
@@ -847,7 +846,7 @@
 {
 	struct aiptek *aiptek = input_get_drvdata(inputdev);
 
-	aiptek->urb->dev = aiptek->usbdev;
+	aiptek->urb->dev = interface_to_usbdev(aiptek->intf);
 	if (usb_submit_urb(aiptek->urb, GFP_KERNEL) != 0)
 		return -EIO;
 
@@ -873,8 +872,10 @@
 		  unsigned char report_type,
 		  unsigned char report_id, void *buffer, int size)
 {
-	return usb_control_msg(aiptek->usbdev,
-			       usb_sndctrlpipe(aiptek->usbdev, 0),
+	struct usb_device *udev = interface_to_usbdev(aiptek->intf);
+
+	return usb_control_msg(udev,
+			       usb_sndctrlpipe(udev, 0),
 			       USB_REQ_SET_REPORT,
 			       USB_TYPE_CLASS | USB_RECIP_INTERFACE |
 			       USB_DIR_OUT, (report_type << 8) + report_id,
@@ -886,8 +887,10 @@
 		  unsigned char report_type,
 		  unsigned char report_id, void *buffer, int size)
 {
-	return usb_control_msg(aiptek->usbdev,
-			       usb_rcvctrlpipe(aiptek->usbdev, 0),
+	struct usb_device *udev = interface_to_usbdev(aiptek->intf);
+
+	return usb_control_msg(udev,
+			       usb_rcvctrlpipe(udev, 0),
 			       USB_REQ_GET_REPORT,
 			       USB_TYPE_CLASS | USB_RECIP_INTERFACE |
 			       USB_DIR_IN, (report_type << 8) + report_id,
@@ -1729,7 +1732,6 @@
 	}
 
 	aiptek->inputdev = inputdev;
-	aiptek->usbdev = usbdev;
 	aiptek->intf = intf;
 	aiptek->ifnum = intf->altsetting[0].desc.bInterfaceNumber;
 	aiptek->inDelay = 0;
@@ -1833,8 +1835,8 @@
 	 * input.
 	 */
 	usb_fill_int_urb(aiptek->urb,
-			 aiptek->usbdev,
-			 usb_rcvintpipe(aiptek->usbdev,
+			 usbdev,
+			 usb_rcvintpipe(usbdev,
 					endpoint->bEndpointAddress),
 			 aiptek->data, 8, aiptek_irq, aiptek,
 			 endpoint->bInterval);
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 3a7f3a4..abf09ac 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -104,7 +104,6 @@
 struct gtco {
 
 	struct input_dev  *inputdevice; /* input device struct pointer  */
-	struct usb_device *usbdev; /* the usb device for this device */
 	struct usb_interface *intf;	/* the usb interface for this device */
 	struct urb        *urbinfo;	 /* urb for incoming reports      */
 	dma_addr_t        buf_dma;  /* dma addr of the data buffer*/
@@ -540,7 +539,7 @@
 {
 	struct gtco *device = input_get_drvdata(inputdev);
 
-	device->urbinfo->dev = device->usbdev;
+	device->urbinfo->dev = interface_to_usbdev(device->intf);
 	if (usb_submit_urb(device->urbinfo, GFP_KERNEL))
 		return -EIO;
 
@@ -824,6 +823,7 @@
 	int                     result = 0, retry;
 	int			error;
 	struct usb_endpoint_descriptor *endpoint;
+	struct usb_device	*udev = interface_to_usbdev(usbinterface);
 
 	/* Allocate memory for device structure */
 	gtco = kzalloc(sizeof(struct gtco), GFP_KERNEL);
@@ -838,11 +838,10 @@
 	gtco->inputdevice = input_dev;
 
 	/* Save interface information */
-	gtco->usbdev = interface_to_usbdev(usbinterface);
 	gtco->intf = usbinterface;
 
 	/* Allocate some data for incoming reports */
-	gtco->buffer = usb_alloc_coherent(gtco->usbdev, REPORT_MAX_SIZE,
+	gtco->buffer = usb_alloc_coherent(udev, REPORT_MAX_SIZE,
 					  GFP_KERNEL, &gtco->buf_dma);
 	if (!gtco->buffer) {
 		dev_err(&usbinterface->dev, "No more memory for us buffers\n");
@@ -858,6 +857,14 @@
 		goto err_free_buf;
 	}
 
+	/* Sanity check that a device has an endpoint */
+	if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
+		dev_err(&usbinterface->dev,
+			"Invalid number of endpoints\n");
+		error = -EINVAL;
+		goto err_free_urb;
+	}
+
 	/*
 	 * The endpoint is always altsetting 0, we know this since we know
 	 * this device only has one interrupt endpoint
@@ -879,7 +886,7 @@
 	 * HID report descriptor
 	 */
 	if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
-				     HID_DEVICE_TYPE, &hid_desc) != 0){
+				     HID_DEVICE_TYPE, &hid_desc) != 0) {
 		dev_err(&usbinterface->dev,
 			"Can't retrieve exta USB descriptor to get hid report descriptor length\n");
 		error = -EIO;
@@ -899,8 +906,8 @@
 
 	/* Couple of tries to get reply */
 	for (retry = 0; retry < 3; retry++) {
-		result = usb_control_msg(gtco->usbdev,
-					 usb_rcvctrlpipe(gtco->usbdev, 0),
+		result = usb_control_msg(udev,
+					 usb_rcvctrlpipe(udev, 0),
 					 USB_REQ_GET_DESCRIPTOR,
 					 USB_RECIP_INTERFACE | USB_DIR_IN,
 					 REPORT_DEVICE_TYPE << 8,
@@ -928,7 +935,7 @@
 	}
 
 	/* Create a device file node */
-	usb_make_path(gtco->usbdev, gtco->usbpath, sizeof(gtco->usbpath));
+	usb_make_path(udev, gtco->usbpath, sizeof(gtco->usbpath));
 	strlcat(gtco->usbpath, "/input0", sizeof(gtco->usbpath));
 
 	/* Set Input device functions */
@@ -945,15 +952,15 @@
 	gtco_setup_caps(input_dev);
 
 	/* Set input device required ID information */
-	usb_to_input_id(gtco->usbdev, &input_dev->id);
+	usb_to_input_id(udev, &input_dev->id);
 	input_dev->dev.parent = &usbinterface->dev;
 
 	/* Setup the URB, it will be posted later on open of input device */
 	endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
 
 	usb_fill_int_urb(gtco->urbinfo,
-			 gtco->usbdev,
-			 usb_rcvintpipe(gtco->usbdev,
+			 udev,
+			 usb_rcvintpipe(udev,
 					endpoint->bEndpointAddress),
 			 gtco->buffer,
 			 REPORT_MAX_SIZE,
@@ -977,7 +984,7 @@
  err_free_urb:
 	usb_free_urb(gtco->urbinfo);
  err_free_buf:
-	usb_free_coherent(gtco->usbdev, REPORT_MAX_SIZE,
+	usb_free_coherent(udev, REPORT_MAX_SIZE,
 			  gtco->buffer, gtco->buf_dma);
  err_free_devs:
 	input_free_device(input_dev);
@@ -994,13 +1001,14 @@
 {
 	/* Grab private device ptr */
 	struct gtco *gtco = usb_get_intfdata(interface);
+	struct usb_device *udev = interface_to_usbdev(interface);
 
 	/* Now reverse all the registration stuff */
 	if (gtco) {
 		input_unregister_device(gtco->inputdevice);
 		usb_kill_urb(gtco->urbinfo);
 		usb_free_urb(gtco->urbinfo);
-		usb_free_coherent(gtco->usbdev, REPORT_MAX_SIZE,
+		usb_free_coherent(udev, REPORT_MAX_SIZE,
 				  gtco->buffer, gtco->buf_dma);
 		kfree(gtco);
 	}
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index d2ac7c2..e850d7e 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -31,7 +31,6 @@
 	unsigned char *data;
 	dma_addr_t data_dma;
 	struct input_dev *dev;
-	struct usb_device *usbdev;
 	struct usb_interface *intf;
 	struct urb *irq;
 	char phys[32];
@@ -99,8 +98,9 @@
 static int kbtab_open(struct input_dev *dev)
 {
 	struct kbtab *kbtab = input_get_drvdata(dev);
+	struct usb_device *udev = interface_to_usbdev(kbtab->intf);
 
-	kbtab->irq->dev = kbtab->usbdev;
+	kbtab->irq->dev = udev;
 	if (usb_submit_urb(kbtab->irq, GFP_KERNEL))
 		return -EIO;
 
@@ -135,7 +135,6 @@
 	if (!kbtab->irq)
 		goto fail2;
 
-	kbtab->usbdev = dev;
 	kbtab->intf = intf;
 	kbtab->dev = input_dev;
 
@@ -188,12 +187,13 @@
 static void kbtab_disconnect(struct usb_interface *intf)
 {
 	struct kbtab *kbtab = usb_get_intfdata(intf);
+	struct usb_device *udev = interface_to_usbdev(intf);
 
 	usb_set_intfdata(intf, NULL);
 
 	input_unregister_device(kbtab->dev);
 	usb_free_urb(kbtab->irq);
-	usb_free_coherent(kbtab->usbdev, 8, kbtab->data, kbtab->data_dma);
+	usb_free_coherent(udev, 8, kbtab->data, kbtab->data_dma);
 	kfree(kbtab);
 }
 
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index 69d299d..e4bf110 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -379,7 +379,7 @@
 static int ad7879_gpio_direction_input(struct gpio_chip *chip,
 					unsigned gpio)
 {
-	struct ad7879 *ts = container_of(chip, struct ad7879, gc);
+	struct ad7879 *ts = gpiochip_get_data(chip);
 	int err;
 
 	mutex_lock(&ts->mutex);
@@ -393,7 +393,7 @@
 static int ad7879_gpio_direction_output(struct gpio_chip *chip,
 					unsigned gpio, int level)
 {
-	struct ad7879 *ts = container_of(chip, struct ad7879, gc);
+	struct ad7879 *ts = gpiochip_get_data(chip);
 	int err;
 
 	mutex_lock(&ts->mutex);
@@ -412,7 +412,7 @@
 
 static int ad7879_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
 {
-	struct ad7879 *ts = container_of(chip, struct ad7879, gc);
+	struct ad7879 *ts = gpiochip_get_data(chip);
 	u16 val;
 
 	mutex_lock(&ts->mutex);
@@ -425,7 +425,7 @@
 static void ad7879_gpio_set_value(struct gpio_chip *chip,
 				  unsigned gpio, int value)
 {
-	struct ad7879 *ts = container_of(chip, struct ad7879, gc);
+	struct ad7879 *ts = gpiochip_get_data(chip);
 
 	mutex_lock(&ts->mutex);
 	if (value)
@@ -456,7 +456,7 @@
 		ts->gc.owner = THIS_MODULE;
 		ts->gc.parent = ts->dev;
 
-		ret = gpiochip_add(&ts->gc);
+		ret = gpiochip_add_data(&ts->gc, ts);
 		if (ret)
 			dev_err(ts->dev, "failed to register gpio %d\n",
 				ts->gc.base);
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 2160512..5af7907 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -1093,6 +1093,19 @@
 	return 0;
 }
 
+static int mxt_acquire_irq(struct mxt_data *data)
+{
+	int error;
+
+	enable_irq(data->irq);
+
+	error = mxt_process_messages_until_invalid(data);
+	if (error)
+		return error;
+
+	return 0;
+}
+
 static int mxt_soft_reset(struct mxt_data *data)
 {
 	struct device *dev = &data->client->dev;
@@ -1111,7 +1124,7 @@
 	/* Ignore CHG line for 100ms after reset */
 	msleep(100);
 
-	enable_irq(data->irq);
+	mxt_acquire_irq(data);
 
 	ret = mxt_wait_for_completion(data, &data->reset_completion,
 				      MXT_RESET_TIMEOUT);
@@ -1466,19 +1479,6 @@
 	return ret;
 }
 
-static int mxt_acquire_irq(struct mxt_data *data)
-{
-	int error;
-
-	enable_irq(data->irq);
-
-	error = mxt_process_messages_until_invalid(data);
-	if (error)
-		return error;
-
-	return 0;
-}
-
 static int mxt_get_info(struct mxt_data *data)
 {
 	struct i2c_client *client = data->client;
diff --git a/drivers/input/touchscreen/bcm_iproc_tsc.c b/drivers/input/touchscreen/bcm_iproc_tsc.c
index ae460a5c..4d11b27 100644
--- a/drivers/input/touchscreen/bcm_iproc_tsc.c
+++ b/drivers/input/touchscreen/bcm_iproc_tsc.c
@@ -23,6 +23,8 @@
 #include <linux/io.h>
 #include <linux/clk.h>
 #include <linux/serio.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
 
 #define IPROC_TS_NAME "iproc-ts"
 
@@ -88,7 +90,11 @@
 #define TS_WIRE_MODE_BIT        BIT(1)
 
 #define dbg_reg(dev, priv, reg) \
-	dev_dbg(dev, "%20s= 0x%08x\n", #reg, readl((priv)->regs + reg))
+do { \
+	u32 val; \
+	regmap_read(priv->regmap, reg, &val); \
+	dev_dbg(dev, "%20s= 0x%08x\n", #reg, val); \
+} while (0)
 
 struct tsc_param {
 	/* Each step is 1024 us.  Valid 1-256 */
@@ -141,7 +147,7 @@
 	struct platform_device *pdev;
 	struct input_dev *idev;
 
-	void __iomem *regs;
+	struct regmap *regmap;
 	struct clk *tsc_clk;
 
 	int  pen_status;
@@ -196,22 +202,22 @@
 	int i;
 	bool needs_sync = false;
 
-	intr_status = readl(priv->regs + INTERRUPT_STATUS);
+	regmap_read(priv->regmap, INTERRUPT_STATUS, &intr_status);
 	intr_status &= TS_PEN_INTR_MASK | TS_FIFO_INTR_MASK;
 	if (intr_status == 0)
 		return IRQ_NONE;
 
 	/* Clear all interrupt status bits, write-1-clear */
-	writel(intr_status, priv->regs + INTERRUPT_STATUS);
-
+	regmap_write(priv->regmap, INTERRUPT_STATUS, intr_status);
 	/* Pen up/down */
 	if (intr_status & TS_PEN_INTR_MASK) {
-		if (readl(priv->regs + CONTROLLER_STATUS) & TS_PEN_DOWN)
+		regmap_read(priv->regmap, CONTROLLER_STATUS, &priv->pen_status);
+		if (priv->pen_status & TS_PEN_DOWN)
 			priv->pen_status = PEN_DOWN_STATUS;
 		else
 			priv->pen_status = PEN_UP_STATUS;
 
-		input_report_key(priv->idev, BTN_TOUCH, priv->pen_status);
+		input_report_key(priv->idev, BTN_TOUCH,	priv->pen_status);
 		needs_sync = true;
 
 		dev_dbg(&priv->pdev->dev,
@@ -221,7 +227,7 @@
 	/* coordinates in FIFO exceed the theshold */
 	if (intr_status & TS_FIFO_INTR_MASK) {
 		for (i = 0; i < priv->cfg_params.fifo_threshold; i++) {
-			raw_coordinate = readl(priv->regs + FIFO_DATA);
+			regmap_read(priv->regmap, FIFO_DATA, &raw_coordinate);
 			if (raw_coordinate == INVALID_COORD)
 				continue;
 
@@ -239,7 +245,7 @@
 			x = (x >> 4) & 0x0FFF;
 			y = (y >> 4) & 0x0FFF;
 
-			/* adjust x y according to lcd tsc mount angle */
+			/* Adjust x y according to LCD tsc mount angle. */
 			if (priv->cfg_params.invert_x)
 				x = priv->cfg_params.max_x - x;
 
@@ -262,9 +268,10 @@
 
 static int iproc_ts_start(struct input_dev *idev)
 {
-	struct iproc_ts_priv *priv = input_get_drvdata(idev);
 	u32 val;
+	u32 mask;
 	int error;
+	struct iproc_ts_priv *priv = input_get_drvdata(idev);
 
 	/* Enable clock */
 	error = clk_prepare_enable(priv->tsc_clk);
@@ -279,9 +286,10 @@
 	 *  FIFO reaches the int_th value, and pen event(up/down)
 	 */
 	val = TS_PEN_INTR_MASK | TS_FIFO_INTR_MASK;
-	writel(val, priv->regs + INTERRUPT_MASK);
+	regmap_update_bits(priv->regmap, INTERRUPT_MASK, val, val);
 
-	writel(priv->cfg_params.fifo_threshold, priv->regs + INTERRUPT_THRES);
+	val = priv->cfg_params.fifo_threshold;
+	regmap_write(priv->regmap, INTERRUPT_THRES, val);
 
 	/* Initialize control reg1 */
 	val = 0;
@@ -289,26 +297,23 @@
 	val |= priv->cfg_params.debounce_timeout << DEBOUNCE_TIMEOUT_SHIFT;
 	val |= priv->cfg_params.settling_timeout << SETTLING_TIMEOUT_SHIFT;
 	val |= priv->cfg_params.touch_timeout << TOUCH_TIMEOUT_SHIFT;
-	writel(val, priv->regs + REGCTL1);
+	regmap_write(priv->regmap, REGCTL1, val);
 
 	/* Try to clear all interrupt status */
-	val = readl(priv->regs + INTERRUPT_STATUS);
-	val |= TS_FIFO_INTR_MASK | TS_PEN_INTR_MASK;
-	writel(val, priv->regs + INTERRUPT_STATUS);
+	val = TS_FIFO_INTR_MASK | TS_PEN_INTR_MASK;
+	regmap_update_bits(priv->regmap, INTERRUPT_STATUS, val, val);
 
 	/* Initialize control reg2 */
-	val = readl(priv->regs + REGCTL2);
-	val |= TS_CONTROLLER_EN_BIT | TS_WIRE_MODE_BIT;
-
-	val &= ~TS_CONTROLLER_AVGDATA_MASK;
+	val = TS_CONTROLLER_EN_BIT | TS_WIRE_MODE_BIT;
 	val |= priv->cfg_params.average_data << TS_CONTROLLER_AVGDATA_SHIFT;
 
-	val &= ~(TS_CONTROLLER_PWR_LDO |	/* PWR up LDO */
+	mask = (TS_CONTROLLER_AVGDATA_MASK);
+	mask |= (TS_CONTROLLER_PWR_LDO |	/* PWR up LDO */
 		   TS_CONTROLLER_PWR_ADC |	/* PWR up ADC */
 		   TS_CONTROLLER_PWR_BGP |	/* PWR up BGP */
 		   TS_CONTROLLER_PWR_TS);	/* PWR up TS */
-
-	writel(val, priv->regs + REGCTL2);
+	mask |= val;
+	regmap_update_bits(priv->regmap, REGCTL2, mask, val);
 
 	ts_reg_dump(priv);
 
@@ -320,12 +325,17 @@
 	u32 val;
 	struct iproc_ts_priv *priv = input_get_drvdata(dev);
 
-	writel(0, priv->regs + INTERRUPT_MASK); /* Disable all interrupts */
+	/*
+	 * Disable FIFO int_th and pen event(up/down)Interrupts only
+	 * as the interrupt mask register is shared between ADC, TS and
+	 * flextimer.
+	 */
+	val = TS_PEN_INTR_MASK | TS_FIFO_INTR_MASK;
+	regmap_update_bits(priv->regmap, INTERRUPT_MASK, val, 0);
 
 	/* Only power down touch screen controller */
-	val = readl(priv->regs + REGCTL2);
-	val |= TS_CONTROLLER_PWR_TS;
-	writel(val, priv->regs + REGCTL2);
+	val = TS_CONTROLLER_PWR_TS;
+	regmap_update_bits(priv->regmap, REGCTL2, val, val);
 
 	clk_disable(priv->tsc_clk);
 }
@@ -414,7 +424,6 @@
 {
 	struct iproc_ts_priv *priv;
 	struct input_dev *idev;
-	struct resource *res;
 	int irq;
 	int error;
 
@@ -422,12 +431,12 @@
 	if (!priv)
 		return -ENOMEM;
 
-	/* touchscreen controller memory mapped regs */
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	priv->regs = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(priv->regs)) {
-		error = PTR_ERR(priv->regs);
-		dev_err(&pdev->dev, "unable to map I/O memory: %d\n", error);
+	/* touchscreen controller memory mapped regs via syscon*/
+	priv->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+							"ts_syscon");
+	if (IS_ERR(priv->regmap)) {
+		error = PTR_ERR(priv->regmap);
+		dev_err(&pdev->dev, "unable to map I/O memory:%d\n", error);
 		return error;
 	}
 
diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c
index 5ed3105..44deca8 100644
--- a/drivers/input/touchscreen/cyttsp4_core.c
+++ b/drivers/input/touchscreen/cyttsp4_core.c
@@ -1499,7 +1499,7 @@
 
 	if (IS_BOOTLOADER(mode[0], mode[1])) {
 		mutex_unlock(&cd->system_lock);
-		dev_err(cd->dev, "%s: Device in BOOTLADER mode.\n", __func__);
+		dev_err(cd->dev, "%s: Device in BOOTLOADER mode.\n", __func__);
 		rc = -EINVAL;
 		goto error;
 	}
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index a21a07c..8b3f15c 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -487,8 +487,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
-static int titsc_suspend(struct device *dev)
+static int __maybe_unused titsc_suspend(struct device *dev)
 {
 	struct titsc *ts_dev = dev_get_drvdata(dev);
 	struct ti_tscadc_dev *tscadc_dev;
@@ -504,7 +503,7 @@
 	return 0;
 }
 
-static int titsc_resume(struct device *dev)
+static int __maybe_unused titsc_resume(struct device *dev)
 {
 	struct titsc *ts_dev = dev_get_drvdata(dev);
 	struct ti_tscadc_dev *tscadc_dev;
@@ -521,14 +520,7 @@
 	return 0;
 }
 
-static const struct dev_pm_ops titsc_pm_ops = {
-	.suspend = titsc_suspend,
-	.resume  = titsc_resume,
-};
-#define TITSC_PM_OPS (&titsc_pm_ops)
-#else
-#define TITSC_PM_OPS NULL
-#endif
+static SIMPLE_DEV_PM_OPS(titsc_pm_ops, titsc_suspend, titsc_resume);
 
 static const struct of_device_id ti_tsc_dt_ids[] = {
 	{ .compatible = "ti,am3359-tsc", },
@@ -541,7 +533,7 @@
 	.remove	= titsc_remove,
 	.driver	= {
 		.name   = "TI-am335x-tsc",
-		.pm	= TITSC_PM_OPS,
+		.pm	= &titsc_pm_ops,
 		.of_match_table = ti_tsc_dt_ids,
 	},
 };
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index 9bbadaa..7b3845a 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -370,8 +370,8 @@
 			point.coord_x = point.coord_y = 0;
 		}
 
-		point.state = payload[9 * i + 5] & 0x03;
-		point.id = (payload[9 * i + 5] & 0xfc) >> 2;
+		point.state = payload[9 * i + 5] & 0x0f;
+		point.id = (payload[9 * i + 5] & 0xf0) >> 4;
 
 		/* determine touch major, minor and orientation */
 		point.area_major = max(payload[9 * i + 6],
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index dd1dc39..ad08603 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -76,8 +76,7 @@
 
 config FSL_PAMU
 	bool "Freescale IOMMU support"
-	depends on PPC32
-	depends on PPC_E500MC || COMPILE_TEST
+	depends on PPC_E500MC || (COMPILE_TEST && PPC)
 	select IOMMU_API
 	select GENERIC_ALLOCATOR
 	help
@@ -124,16 +123,6 @@
 	  your BIOS for an option to enable it or if you have an IVRS ACPI
 	  table.
 
-config AMD_IOMMU_STATS
-	bool "Export AMD IOMMU statistics to debugfs"
-	depends on AMD_IOMMU
-	select DEBUG_FS
-	---help---
-	  This option enables code in the AMD IOMMU driver to collect various
-	  statistics about whats happening in the driver and exports that
-	  information to userspace via debugfs.
-	  If unsure, say N.
-
 config AMD_IOMMU_V2
 	tristate "AMD IOMMU Version 2 driver"
 	depends on AMD_IOMMU
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 374c129..634f636 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -19,6 +19,8 @@
 
 #include <linux/ratelimit.h>
 #include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/amba/bus.h>
 #include <linux/pci-ats.h>
 #include <linux/bitmap.h>
 #include <linux/slab.h>
@@ -72,6 +74,7 @@
 
 LIST_HEAD(ioapic_map);
 LIST_HEAD(hpet_map);
+LIST_HEAD(acpihid_map);
 
 /*
  * Domain for untranslated devices - only allocated
@@ -92,6 +95,7 @@
 	struct list_head dev_data_list;	  /* For global dev_data_list */
 	struct protection_domain *domain; /* Domain the device is bound to */
 	u16 devid;			  /* PCI Device ID */
+	u16 alias;			  /* Alias Device ID */
 	bool iommu_v2;			  /* Device can make use of IOMMUv2 */
 	bool passthrough;		  /* Device is identity mapped */
 	struct {
@@ -161,6 +165,60 @@
  *
  ****************************************************************************/
 
+static inline int match_hid_uid(struct device *dev,
+				struct acpihid_map_entry *entry)
+{
+	const char *hid, *uid;
+
+	hid = acpi_device_hid(ACPI_COMPANION(dev));
+	uid = acpi_device_uid(ACPI_COMPANION(dev));
+
+	if (!hid || !(*hid))
+		return -ENODEV;
+
+	if (!uid || !(*uid))
+		return strcmp(hid, entry->hid);
+
+	if (!(*entry->uid))
+		return strcmp(hid, entry->hid);
+
+	return (strcmp(hid, entry->hid) || strcmp(uid, entry->uid));
+}
+
+static inline u16 get_pci_device_id(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	return PCI_DEVID(pdev->bus->number, pdev->devfn);
+}
+
+static inline int get_acpihid_device_id(struct device *dev,
+					struct acpihid_map_entry **entry)
+{
+	struct acpihid_map_entry *p;
+
+	list_for_each_entry(p, &acpihid_map, list) {
+		if (!match_hid_uid(dev, p)) {
+			if (entry)
+				*entry = p;
+			return p->devid;
+		}
+	}
+	return -EINVAL;
+}
+
+static inline int get_device_id(struct device *dev)
+{
+	int devid;
+
+	if (dev_is_pci(dev))
+		devid = get_pci_device_id(dev);
+	else
+		devid = get_acpihid_device_id(dev, NULL);
+
+	return devid;
+}
+
 static struct protection_domain *to_pdomain(struct iommu_domain *dom)
 {
 	return container_of(dom, struct protection_domain, domain);
@@ -203,6 +261,68 @@
 	return dev_data;
 }
 
+static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
+{
+	*(u16 *)data = alias;
+	return 0;
+}
+
+static u16 get_alias(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	u16 devid, ivrs_alias, pci_alias;
+
+	/* The callers make sure that get_device_id() does not fail here */
+	devid = get_device_id(dev);
+	ivrs_alias = amd_iommu_alias_table[devid];
+	pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
+
+	if (ivrs_alias == pci_alias)
+		return ivrs_alias;
+
+	/*
+	 * DMA alias showdown
+	 *
+	 * The IVRS is fairly reliable in telling us about aliases, but it
+	 * can't know about every screwy device.  If we don't have an IVRS
+	 * reported alias, use the PCI reported alias.  In that case we may
+	 * still need to initialize the rlookup and dev_table entries if the
+	 * alias is to a non-existent device.
+	 */
+	if (ivrs_alias == devid) {
+		if (!amd_iommu_rlookup_table[pci_alias]) {
+			amd_iommu_rlookup_table[pci_alias] =
+				amd_iommu_rlookup_table[devid];
+			memcpy(amd_iommu_dev_table[pci_alias].data,
+			       amd_iommu_dev_table[devid].data,
+			       sizeof(amd_iommu_dev_table[pci_alias].data));
+		}
+
+		return pci_alias;
+	}
+
+	pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
+		"for device %s[%04x:%04x], kernel reported alias "
+		"%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
+		PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
+		PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
+		PCI_FUNC(pci_alias));
+
+	/*
+	 * If we don't have a PCI DMA alias and the IVRS alias is on the same
+	 * bus, then the IVRS table may know about a quirk that we don't.
+	 */
+	if (pci_alias == devid &&
+	    PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
+		pci_add_dma_alias(pdev, ivrs_alias & 0xff);
+		pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
+			PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
+			dev_name(dev));
+	}
+
+	return ivrs_alias;
+}
+
 static struct iommu_dev_data *find_dev_data(u16 devid)
 {
 	struct iommu_dev_data *dev_data;
@@ -215,18 +335,34 @@
 	return dev_data;
 }
 
-static inline u16 get_device_id(struct device *dev)
-{
-	struct pci_dev *pdev = to_pci_dev(dev);
-
-	return PCI_DEVID(pdev->bus->number, pdev->devfn);
-}
-
 static struct iommu_dev_data *get_dev_data(struct device *dev)
 {
 	return dev->archdata.iommu;
 }
 
+/*
+* Find or create an IOMMU group for a acpihid device.
+*/
+static struct iommu_group *acpihid_device_group(struct device *dev)
+{
+	struct acpihid_map_entry *p, *entry = NULL;
+	int devid;
+
+	devid = get_acpihid_device_id(dev, &entry);
+	if (devid < 0)
+		return ERR_PTR(devid);
+
+	list_for_each_entry(p, &acpihid_map, list) {
+		if ((devid == p->devid) && p->group)
+			entry->group = p->group;
+	}
+
+	if (!entry->group)
+		entry->group = generic_device_group(dev);
+
+	return entry->group;
+}
+
 static bool pci_iommuv2_capable(struct pci_dev *pdev)
 {
 	static const int caps[] = {
@@ -278,9 +414,11 @@
 					   struct dma_ops_domain *dma_dom)
 {
 	struct unity_map_entry *e;
-	u16 devid;
+	int devid;
 
 	devid = get_device_id(dev);
+	if (devid < 0)
+		return;
 
 	list_for_each_entry(e, &amd_iommu_unity_map, list) {
 		if (!(devid >= e->devid_start && devid <= e->devid_end))
@@ -295,16 +433,14 @@
  */
 static bool check_device(struct device *dev)
 {
-	u16 devid;
+	int devid;
 
 	if (!dev || !dev->dma_mask)
 		return false;
 
-	/* No PCI device */
-	if (!dev_is_pci(dev))
-		return false;
-
 	devid = get_device_id(dev);
+	if (devid < 0)
+		return false;
 
 	/* Out of our scope? */
 	if (devid > amd_iommu_last_bdf)
@@ -339,20 +475,26 @@
 
 static int iommu_init_device(struct device *dev)
 {
-	struct pci_dev *pdev = to_pci_dev(dev);
 	struct iommu_dev_data *dev_data;
+	int devid;
 
 	if (dev->archdata.iommu)
 		return 0;
 
-	dev_data = find_dev_data(get_device_id(dev));
+	devid = get_device_id(dev);
+	if (devid < 0)
+		return devid;
+
+	dev_data = find_dev_data(devid);
 	if (!dev_data)
 		return -ENOMEM;
 
-	if (pci_iommuv2_capable(pdev)) {
+	dev_data->alias = get_alias(dev);
+
+	if (dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
 		struct amd_iommu *iommu;
 
-		iommu              = amd_iommu_rlookup_table[dev_data->devid];
+		iommu = amd_iommu_rlookup_table[dev_data->devid];
 		dev_data->iommu_v2 = iommu->is_iommu_v2;
 	}
 
@@ -366,10 +508,14 @@
 
 static void iommu_ignore_device(struct device *dev)
 {
-	u16 devid, alias;
+	u16 alias;
+	int devid;
 
 	devid = get_device_id(dev);
-	alias = amd_iommu_alias_table[devid];
+	if (devid < 0)
+		return;
+
+	alias = get_alias(dev);
 
 	memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
 	memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
@@ -380,8 +526,14 @@
 
 static void iommu_uninit_device(struct device *dev)
 {
-	struct iommu_dev_data *dev_data = search_dev_data(get_device_id(dev));
+	int devid;
+	struct iommu_dev_data *dev_data;
 
+	devid = get_device_id(dev);
+	if (devid < 0)
+		return;
+
+	dev_data = search_dev_data(devid);
 	if (!dev_data)
 		return;
 
@@ -402,70 +554,6 @@
 	 */
 }
 
-#ifdef CONFIG_AMD_IOMMU_STATS
-
-/*
- * Initialization code for statistics collection
- */
-
-DECLARE_STATS_COUNTER(compl_wait);
-DECLARE_STATS_COUNTER(cnt_map_single);
-DECLARE_STATS_COUNTER(cnt_unmap_single);
-DECLARE_STATS_COUNTER(cnt_map_sg);
-DECLARE_STATS_COUNTER(cnt_unmap_sg);
-DECLARE_STATS_COUNTER(cnt_alloc_coherent);
-DECLARE_STATS_COUNTER(cnt_free_coherent);
-DECLARE_STATS_COUNTER(cross_page);
-DECLARE_STATS_COUNTER(domain_flush_single);
-DECLARE_STATS_COUNTER(domain_flush_all);
-DECLARE_STATS_COUNTER(alloced_io_mem);
-DECLARE_STATS_COUNTER(total_map_requests);
-DECLARE_STATS_COUNTER(complete_ppr);
-DECLARE_STATS_COUNTER(invalidate_iotlb);
-DECLARE_STATS_COUNTER(invalidate_iotlb_all);
-DECLARE_STATS_COUNTER(pri_requests);
-
-static struct dentry *stats_dir;
-static struct dentry *de_fflush;
-
-static void amd_iommu_stats_add(struct __iommu_counter *cnt)
-{
-	if (stats_dir == NULL)
-		return;
-
-	cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
-				       &cnt->value);
-}
-
-static void amd_iommu_stats_init(void)
-{
-	stats_dir = debugfs_create_dir("amd-iommu", NULL);
-	if (stats_dir == NULL)
-		return;
-
-	de_fflush  = debugfs_create_bool("fullflush", 0444, stats_dir,
-					 &amd_iommu_unmap_flush);
-
-	amd_iommu_stats_add(&compl_wait);
-	amd_iommu_stats_add(&cnt_map_single);
-	amd_iommu_stats_add(&cnt_unmap_single);
-	amd_iommu_stats_add(&cnt_map_sg);
-	amd_iommu_stats_add(&cnt_unmap_sg);
-	amd_iommu_stats_add(&cnt_alloc_coherent);
-	amd_iommu_stats_add(&cnt_free_coherent);
-	amd_iommu_stats_add(&cross_page);
-	amd_iommu_stats_add(&domain_flush_single);
-	amd_iommu_stats_add(&domain_flush_all);
-	amd_iommu_stats_add(&alloced_io_mem);
-	amd_iommu_stats_add(&total_map_requests);
-	amd_iommu_stats_add(&complete_ppr);
-	amd_iommu_stats_add(&invalidate_iotlb);
-	amd_iommu_stats_add(&invalidate_iotlb_all);
-	amd_iommu_stats_add(&pri_requests);
-}
-
-#endif
-
 /****************************************************************************
  *
  * Interrupt handling functions
@@ -588,8 +676,6 @@
 {
 	struct amd_iommu_fault fault;
 
-	INC_STATS_COUNTER(pri_requests);
-
 	if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
 		pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
 		return;
@@ -1061,7 +1147,7 @@
 	int ret;
 
 	iommu = amd_iommu_rlookup_table[dev_data->devid];
-	alias = amd_iommu_alias_table[dev_data->devid];
+	alias = dev_data->alias;
 
 	ret = iommu_flush_dte(iommu, dev_data->devid);
 	if (!ret && alias != dev_data->devid)
@@ -2039,7 +2125,7 @@
 	bool ats;
 
 	iommu = amd_iommu_rlookup_table[dev_data->devid];
-	alias = amd_iommu_alias_table[dev_data->devid];
+	alias = dev_data->alias;
 	ats   = dev_data->ats.enabled;
 
 	/* Update data structures */
@@ -2073,7 +2159,7 @@
 		return;
 
 	iommu = amd_iommu_rlookup_table[dev_data->devid];
-	alias = amd_iommu_alias_table[dev_data->devid];
+	alias = dev_data->alias;
 
 	/* decrease reference counters */
 	dev_data->domain->dev_iommu[iommu->index] -= 1;
@@ -2219,13 +2305,17 @@
 static int attach_device(struct device *dev,
 			 struct protection_domain *domain)
 {
-	struct pci_dev *pdev = to_pci_dev(dev);
+	struct pci_dev *pdev;
 	struct iommu_dev_data *dev_data;
 	unsigned long flags;
 	int ret;
 
 	dev_data = get_dev_data(dev);
 
+	if (!dev_is_pci(dev))
+		goto skip_ats_check;
+
+	pdev = to_pci_dev(dev);
 	if (domain->flags & PD_IOMMUV2_MASK) {
 		if (!dev_data->passthrough)
 			return -EINVAL;
@@ -2244,6 +2334,7 @@
 		dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
 	}
 
+skip_ats_check:
 	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
 	ret = __attach_device(dev_data, domain);
 	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
@@ -2300,6 +2391,9 @@
 	__detach_device(dev_data);
 	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
 
+	if (!dev_is_pci(dev))
+		return;
+
 	if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
 		pdev_iommuv2_disable(to_pci_dev(dev));
 	else if (dev_data->ats.enabled)
@@ -2313,13 +2407,15 @@
 	struct iommu_dev_data *dev_data;
 	struct iommu_domain *domain;
 	struct amd_iommu *iommu;
-	u16 devid;
-	int ret;
+	int ret, devid;
 
 	if (!check_device(dev) || get_dev_data(dev))
 		return 0;
 
 	devid = get_device_id(dev);
+	if (devid < 0)
+		return devid;
+
 	iommu = amd_iommu_rlookup_table[devid];
 
 	ret = iommu_init_device(dev);
@@ -2357,18 +2453,29 @@
 static void amd_iommu_remove_device(struct device *dev)
 {
 	struct amd_iommu *iommu;
-	u16 devid;
+	int devid;
 
 	if (!check_device(dev))
 		return;
 
 	devid = get_device_id(dev);
+	if (devid < 0)
+		return;
+
 	iommu = amd_iommu_rlookup_table[devid];
 
 	iommu_uninit_device(dev);
 	iommu_completion_wait(iommu);
 }
 
+static struct iommu_group *amd_iommu_device_group(struct device *dev)
+{
+	if (dev_is_pci(dev))
+		return pci_device_group(dev);
+
+	return acpihid_device_group(dev);
+}
+
 /*****************************************************************************
  *
  * The next functions belong to the dma_ops mapping/unmapping code.
@@ -2533,11 +2640,6 @@
 	pages = iommu_num_pages(paddr, size, PAGE_SIZE);
 	paddr &= PAGE_MASK;
 
-	INC_STATS_COUNTER(total_map_requests);
-
-	if (pages > 1)
-		INC_STATS_COUNTER(cross_page);
-
 	if (align)
 		align_mask = (1UL << get_order(size)) - 1;
 
@@ -2558,8 +2660,6 @@
 	}
 	address += offset;
 
-	ADD_STATS_COUNTER(alloced_io_mem, size);
-
 	if (unlikely(amd_iommu_np_cache)) {
 		domain_flush_pages(&dma_dom->domain, address, size);
 		domain_flush_complete(&dma_dom->domain);
@@ -2607,8 +2707,6 @@
 		start += PAGE_SIZE;
 	}
 
-	SUB_STATS_COUNTER(alloced_io_mem, size);
-
 	dma_ops_free_addresses(dma_dom, dma_addr, pages);
 }
 
@@ -2624,8 +2722,6 @@
 	struct protection_domain *domain;
 	u64 dma_mask;
 
-	INC_STATS_COUNTER(cnt_map_single);
-
 	domain = get_domain(dev);
 	if (PTR_ERR(domain) == -EINVAL)
 		return (dma_addr_t)paddr;
@@ -2646,8 +2742,6 @@
 {
 	struct protection_domain *domain;
 
-	INC_STATS_COUNTER(cnt_unmap_single);
-
 	domain = get_domain(dev);
 	if (IS_ERR(domain))
 		return;
@@ -2670,8 +2764,6 @@
 	int mapped_elems = 0;
 	u64 dma_mask;
 
-	INC_STATS_COUNTER(cnt_map_sg);
-
 	domain = get_domain(dev);
 	if (IS_ERR(domain))
 		return 0;
@@ -2717,8 +2809,6 @@
 	struct scatterlist *s;
 	int i;
 
-	INC_STATS_COUNTER(cnt_unmap_sg);
-
 	domain = get_domain(dev);
 	if (IS_ERR(domain))
 		return;
@@ -2741,8 +2831,6 @@
 	struct protection_domain *domain;
 	struct page *page;
 
-	INC_STATS_COUNTER(cnt_alloc_coherent);
-
 	domain = get_domain(dev);
 	if (PTR_ERR(domain) == -EINVAL) {
 		page = alloc_pages(flag, get_order(size));
@@ -2796,8 +2884,6 @@
 	struct protection_domain *domain;
 	struct page *page;
 
-	INC_STATS_COUNTER(cnt_free_coherent);
-
 	page = virt_to_page(virt_addr);
 	size = PAGE_ALIGN(size);
 
@@ -2862,7 +2948,17 @@
 
 int __init amd_iommu_init_api(void)
 {
-	return bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
+	int err = 0;
+
+	err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
+	if (err)
+		return err;
+#ifdef CONFIG_ARM_AMBA
+	err = bus_set_iommu(&amba_bustype, &amd_iommu_ops);
+	if (err)
+		return err;
+#endif
+	return 0;
 }
 
 int __init amd_iommu_init_dma_ops(void)
@@ -2879,8 +2975,6 @@
 	if (!swiotlb)
 		dma_ops = &nommu_dma_ops;
 
-	amd_iommu_stats_init();
-
 	if (amd_iommu_unmap_flush)
 		pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
 	else
@@ -3034,12 +3128,14 @@
 {
 	struct iommu_dev_data *dev_data = dev->archdata.iommu;
 	struct amd_iommu *iommu;
-	u16 devid;
+	int devid;
 
 	if (!check_device(dev))
 		return;
 
 	devid = get_device_id(dev);
+	if (devid < 0)
+		return;
 
 	if (dev_data->domain != NULL)
 		detach_device(dev);
@@ -3157,9 +3253,11 @@
 				     struct list_head *head)
 {
 	struct unity_map_entry *entry;
-	u16 devid;
+	int devid;
 
 	devid = get_device_id(dev);
+	if (devid < 0)
+		return;
 
 	list_for_each_entry(entry, &amd_iommu_unity_map, list) {
 		struct iommu_dm_region *region;
@@ -3206,7 +3304,7 @@
 	.iova_to_phys = amd_iommu_iova_to_phys,
 	.add_device = amd_iommu_add_device,
 	.remove_device = amd_iommu_remove_device,
-	.device_group = pci_device_group,
+	.device_group = amd_iommu_device_group,
 	.get_dm_regions = amd_iommu_get_dm_regions,
 	.put_dm_regions = amd_iommu_put_dm_regions,
 	.pgsize_bitmap	= AMD_IOMMU_PGSIZES,
@@ -3367,8 +3465,6 @@
 static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
 				  u64 address)
 {
-	INC_STATS_COUNTER(invalidate_iotlb);
-
 	return __flush_pasid(domain, pasid, address, false);
 }
 
@@ -3389,8 +3485,6 @@
 
 static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
 {
-	INC_STATS_COUNTER(invalidate_iotlb_all);
-
 	return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
 			     true);
 }
@@ -3510,8 +3604,6 @@
 	struct amd_iommu *iommu;
 	struct iommu_cmd cmd;
 
-	INC_STATS_COUNTER(complete_ppr);
-
 	dev_data = get_dev_data(&pdev->dev);
 	iommu    = amd_iommu_rlookup_table[dev_data->devid];
 
@@ -3861,6 +3953,9 @@
 	case X86_IRQ_ALLOC_TYPE_MSI:
 	case X86_IRQ_ALLOC_TYPE_MSIX:
 		devid = get_device_id(&info->msi_dev->dev);
+		if (devid < 0)
+			return NULL;
+
 		iommu = amd_iommu_rlookup_table[devid];
 		if (iommu)
 			return iommu->msi_domain;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index bf4959f..9e00341 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -44,7 +44,7 @@
  */
 #define IVRS_HEADER_LENGTH 48
 
-#define ACPI_IVHD_TYPE                  0x10
+#define ACPI_IVHD_TYPE_MAX_SUPPORTED	0x40
 #define ACPI_IVMD_TYPE_ALL              0x20
 #define ACPI_IVMD_TYPE                  0x21
 #define ACPI_IVMD_TYPE_RANGE            0x22
@@ -58,6 +58,11 @@
 #define IVHD_DEV_EXT_SELECT             0x46
 #define IVHD_DEV_EXT_SELECT_RANGE       0x47
 #define IVHD_DEV_SPECIAL		0x48
+#define IVHD_DEV_ACPI_HID		0xf0
+
+#define UID_NOT_PRESENT                 0
+#define UID_IS_INTEGER                  1
+#define UID_IS_CHARACTER                2
 
 #define IVHD_SPECIAL_IOAPIC		1
 #define IVHD_SPECIAL_HPET		2
@@ -99,7 +104,11 @@
 	u64 mmio_phys;
 	u16 pci_seg;
 	u16 info;
-	u32 efr;
+	u32 efr_attr;
+
+	/* Following only valid on IVHD type 11h and 40h */
+	u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
+	u64 res;
 } __attribute__((packed));
 
 /*
@@ -111,6 +120,11 @@
 	u16 devid;
 	u8 flags;
 	u32 ext;
+	u32 hidh;
+	u64 cid;
+	u8 uidf;
+	u8 uidl;
+	u8 uid;
 } __attribute__((packed));
 
 /*
@@ -133,6 +147,7 @@
 
 static bool amd_iommu_detected;
 static bool __initdata amd_iommu_disabled;
+static int amd_iommu_target_ivhd_type;
 
 u16 amd_iommu_last_bdf;			/* largest PCI device id we have
 					   to handle */
@@ -218,8 +233,12 @@
 #define EARLY_MAP_SIZE		4
 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
 static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
+static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
+
 static int __initdata early_ioapic_map_size;
 static int __initdata early_hpet_map_size;
+static int __initdata early_acpihid_map_size;
+
 static bool __initdata cmdline_maps;
 
 static enum iommu_init_state init_state = IOMMU_START_STATE;
@@ -394,6 +413,22 @@
 	release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
 }
 
+static inline u32 get_ivhd_header_size(struct ivhd_header *h)
+{
+	u32 size = 0;
+
+	switch (h->type) {
+	case 0x10:
+		size = 24;
+		break;
+	case 0x11:
+	case 0x40:
+		size = 40;
+		break;
+	}
+	return size;
+}
+
 /****************************************************************************
  *
  * The functions below belong to the first pass of AMD IOMMU ACPI table
@@ -408,7 +443,15 @@
  */
 static inline int ivhd_entry_length(u8 *ivhd)
 {
-	return 0x04 << (*ivhd >> 6);
+	u32 type = ((struct ivhd_entry *)ivhd)->type;
+
+	if (type < 0x80) {
+		return 0x04 << (*ivhd >> 6);
+	} else if (type == IVHD_DEV_ACPI_HID) {
+		/* For ACPI_HID, offset 21 is uid len */
+		return *((u8 *)ivhd + 21) + 22;
+	}
+	return 0;
 }
 
 /*
@@ -420,7 +463,14 @@
 	u8 *p = (void *)h, *end = (void *)h;
 	struct ivhd_entry *dev;
 
-	p += sizeof(*h);
+	u32 ivhd_size = get_ivhd_header_size(h);
+
+	if (!ivhd_size) {
+		pr_err("AMD-Vi: Unsupported IVHD type %#x\n", h->type);
+		return -EINVAL;
+	}
+
+	p += ivhd_size;
 	end += h->length;
 
 	while (p < end) {
@@ -448,6 +498,22 @@
 	return 0;
 }
 
+static int __init check_ivrs_checksum(struct acpi_table_header *table)
+{
+	int i;
+	u8 checksum = 0, *p = (u8 *)table;
+
+	for (i = 0; i < table->length; ++i)
+		checksum += p[i];
+	if (checksum != 0) {
+		/* ACPI table corrupt */
+		pr_err(FW_BUG "AMD-Vi: IVRS invalid checksum\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
 /*
  * Iterate over all IVHD entries in the ACPI table and find the highest device
  * id which we need to handle. This is the first of three functions which parse
@@ -455,31 +521,19 @@
  */
 static int __init find_last_devid_acpi(struct acpi_table_header *table)
 {
-	int i;
-	u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table;
+	u8 *p = (u8 *)table, *end = (u8 *)table;
 	struct ivhd_header *h;
 
-	/*
-	 * Validate checksum here so we don't need to do it when
-	 * we actually parse the table
-	 */
-	for (i = 0; i < table->length; ++i)
-		checksum += p[i];
-	if (checksum != 0)
-		/* ACPI table corrupt */
-		return -ENODEV;
-
 	p += IVRS_HEADER_LENGTH;
 
 	end += table->length;
 	while (p < end) {
 		h = (struct ivhd_header *)p;
-		switch (h->type) {
-		case ACPI_IVHD_TYPE:
-			find_last_devid_from_ivhd(h);
-			break;
-		default:
-			break;
+		if (h->type == amd_iommu_target_ivhd_type) {
+			int ret = find_last_devid_from_ivhd(h);
+
+			if (ret)
+				return ret;
 		}
 		p += h->length;
 	}
@@ -724,6 +778,42 @@
 	return 0;
 }
 
+static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
+				      bool cmd_line)
+{
+	struct acpihid_map_entry *entry;
+	struct list_head *list = &acpihid_map;
+
+	list_for_each_entry(entry, list, list) {
+		if (strcmp(entry->hid, hid) ||
+		    (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
+		    !entry->cmd_line)
+			continue;
+
+		pr_info("AMD-Vi: Command-line override for hid:%s uid:%s\n",
+			hid, uid);
+		*devid = entry->devid;
+		return 0;
+	}
+
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+
+	memcpy(entry->uid, uid, strlen(uid));
+	memcpy(entry->hid, hid, strlen(hid));
+	entry->devid = *devid;
+	entry->cmd_line	= cmd_line;
+	entry->root_devid = (entry->devid & (~0x7));
+
+	pr_info("AMD-Vi:%s, add hid:%s, uid:%s, rdevid:%d\n",
+		entry->cmd_line ? "cmd" : "ivrs",
+		entry->hid, entry->uid, entry->root_devid);
+
+	list_add_tail(&entry->list, list);
+	return 0;
+}
+
 static int __init add_early_maps(void)
 {
 	int i, ret;
@@ -746,6 +836,15 @@
 			return ret;
 	}
 
+	for (i = 0; i < early_acpihid_map_size; ++i) {
+		ret = add_acpi_hid_device(early_acpihid_map[i].hid,
+					  early_acpihid_map[i].uid,
+					  &early_acpihid_map[i].devid,
+					  early_acpihid_map[i].cmd_line);
+		if (ret)
+			return ret;
+	}
+
 	return 0;
 }
 
@@ -785,6 +884,7 @@
 	u32 dev_i, ext_flags = 0;
 	bool alias = false;
 	struct ivhd_entry *e;
+	u32 ivhd_size;
 	int ret;
 
 
@@ -800,7 +900,14 @@
 	/*
 	 * Done. Now parse the device entries
 	 */
-	p += sizeof(struct ivhd_header);
+	ivhd_size = get_ivhd_header_size(h);
+	if (!ivhd_size) {
+		pr_err("AMD-Vi: Unsupported IVHD type %#x\n", h->type);
+		return -EINVAL;
+	}
+
+	p += ivhd_size;
+
 	end += h->length;
 
 
@@ -958,6 +1065,70 @@
 
 			break;
 		}
+		case IVHD_DEV_ACPI_HID: {
+			u16 devid;
+			u8 hid[ACPIHID_HID_LEN] = {0};
+			u8 uid[ACPIHID_UID_LEN] = {0};
+			int ret;
+
+			if (h->type != 0x40) {
+				pr_err(FW_BUG "Invalid IVHD device type %#x\n",
+				       e->type);
+				break;
+			}
+
+			memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
+			hid[ACPIHID_HID_LEN - 1] = '\0';
+
+			if (!(*hid)) {
+				pr_err(FW_BUG "Invalid HID.\n");
+				break;
+			}
+
+			switch (e->uidf) {
+			case UID_NOT_PRESENT:
+
+				if (e->uidl != 0)
+					pr_warn(FW_BUG "Invalid UID length.\n");
+
+				break;
+			case UID_IS_INTEGER:
+
+				sprintf(uid, "%d", e->uid);
+
+				break;
+			case UID_IS_CHARACTER:
+
+				memcpy(uid, (u8 *)(&e->uid), ACPIHID_UID_LEN - 1);
+				uid[ACPIHID_UID_LEN - 1] = '\0';
+
+				break;
+			default:
+				break;
+			}
+
+			DUMP_printk("  DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
+				    hid, uid,
+				    PCI_BUS_NUM(devid),
+				    PCI_SLOT(devid),
+				    PCI_FUNC(devid));
+
+			devid  = e->devid;
+			flags = e->flags;
+
+			ret = add_acpi_hid_device(hid, uid, &devid, false);
+			if (ret)
+				return ret;
+
+			/*
+			 * add_special_device might update the devid in case a
+			 * command-line override is present. So call
+			 * set_dev_entry_from_acpi after add_special_device.
+			 */
+			set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
+
+			break;
+		}
 		default:
 			break;
 		}
@@ -1078,13 +1249,25 @@
 	iommu->pci_seg = h->pci_seg;
 	iommu->mmio_phys = h->mmio_phys;
 
-	/* Check if IVHD EFR contains proper max banks/counters */
-	if ((h->efr != 0) &&
-	    ((h->efr & (0xF << 13)) != 0) &&
-	    ((h->efr & (0x3F << 17)) != 0)) {
-		iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
-	} else {
-		iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
+	switch (h->type) {
+	case 0x10:
+		/* Check if IVHD EFR contains proper max banks/counters */
+		if ((h->efr_attr != 0) &&
+		    ((h->efr_attr & (0xF << 13)) != 0) &&
+		    ((h->efr_attr & (0x3F << 17)) != 0))
+			iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
+		else
+			iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
+		break;
+	case 0x11:
+	case 0x40:
+		if (h->efr_reg & (1 << 9))
+			iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
+		else
+			iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
+		break;
+	default:
+		return -EINVAL;
 	}
 
 	iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
@@ -1117,6 +1300,32 @@
 	return 0;
 }
 
+/**
+ * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
+ * @ivrs          Pointer to the IVRS header
+ *
+ * This function search through all IVDB of the maximum supported IVHD
+ */
+static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
+{
+	u8 *base = (u8 *)ivrs;
+	struct ivhd_header *ivhd = (struct ivhd_header *)
+					(base + IVRS_HEADER_LENGTH);
+	u8 last_type = ivhd->type;
+	u16 devid = ivhd->devid;
+
+	while (((u8 *)ivhd - base < ivrs->length) &&
+	       (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
+		u8 *p = (u8 *) ivhd;
+
+		if (ivhd->devid == devid)
+			last_type = ivhd->type;
+		ivhd = (struct ivhd_header *)(p + ivhd->length);
+	}
+
+	return last_type;
+}
+
 /*
  * Iterates over all IOMMU entries in the ACPI table, allocates the
  * IOMMU structure and initializes it with init_iommu_one()
@@ -1133,8 +1342,7 @@
 
 	while (p < end) {
 		h = (struct ivhd_header *)p;
-		switch (*p) {
-		case ACPI_IVHD_TYPE:
+		if (*p == amd_iommu_target_ivhd_type) {
 
 			DUMP_printk("device: %02x:%02x.%01x cap: %04x "
 				    "seg: %d flags: %01x info %04x\n",
@@ -1151,9 +1359,6 @@
 			ret = init_iommu_one(iommu, h);
 			if (ret)
 				return ret;
-			break;
-		default:
-			break;
 		}
 		p += h->length;
 
@@ -1818,18 +2023,20 @@
  * remapping setup code.
  *
  * This function basically parses the ACPI table for AMD IOMMU (IVRS)
- * three times:
+ * four times:
  *
- *	1 pass) Find the highest PCI device id the driver has to handle.
+ *	1 pass) Discover the most comprehensive IVHD type to use.
+ *
+ *	2 pass) Find the highest PCI device id the driver has to handle.
  *		Upon this information the size of the data structures is
  *		determined that needs to be allocated.
  *
- *	2 pass) Initialize the data structures just allocated with the
+ *	3 pass) Initialize the data structures just allocated with the
  *		information in the ACPI table about available AMD IOMMUs
  *		in the system. It also maps the PCI devices in the
  *		system to specific IOMMUs
  *
- *	3 pass) After the basic data structures are allocated and
+ *	4 pass) After the basic data structures are allocated and
  *		initialized we update them with information about memory
  *		remapping requirements parsed out of the ACPI table in
  *		this last pass.
@@ -1857,6 +2064,17 @@
 	}
 
 	/*
+	 * Validate checksum here so we don't need to do it when
+	 * we actually parse the table
+	 */
+	ret = check_ivrs_checksum(ivrs_base);
+	if (ret)
+		return ret;
+
+	amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
+	DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
+
+	/*
 	 * First parse ACPI tables to find the largest Bus/Dev/Func
 	 * we need to handle. Upon this information the shared data
 	 * structures for the IOMMUs in the system will be allocated
@@ -2259,10 +2477,43 @@
 	return 1;
 }
 
+static int __init parse_ivrs_acpihid(char *str)
+{
+	u32 bus, dev, fn;
+	char *hid, *uid, *p;
+	char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
+	int ret, i;
+
+	ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
+	if (ret != 4) {
+		pr_err("AMD-Vi: Invalid command line: ivrs_acpihid(%s)\n", str);
+		return 1;
+	}
+
+	p = acpiid;
+	hid = strsep(&p, ":");
+	uid = p;
+
+	if (!hid || !(*hid) || !uid) {
+		pr_err("AMD-Vi: Invalid command line: hid or uid\n");
+		return 1;
+	}
+
+	i = early_acpihid_map_size++;
+	memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
+	memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
+	early_acpihid_map[i].devid =
+		((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
+	early_acpihid_map[i].cmd_line	= true;
+
+	return 1;
+}
+
 __setup("amd_iommu_dump",	parse_amd_iommu_dump);
 __setup("amd_iommu=",		parse_amd_iommu_options);
 __setup("ivrs_ioapic",		parse_ivrs_ioapic);
 __setup("ivrs_hpet",		parse_ivrs_hpet);
+__setup("ivrs_acpihid",		parse_ivrs_acpihid);
 
 IOMMU_INIT_FINISH(amd_iommu_detect,
 		  gart_iommu_hole_init,
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 9d32b20..590956a 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -527,6 +527,19 @@
 #endif
 };
 
+#define ACPIHID_UID_LEN 256
+#define ACPIHID_HID_LEN 9
+
+struct acpihid_map_entry {
+	struct list_head list;
+	u8 uid[ACPIHID_UID_LEN];
+	u8 hid[ACPIHID_HID_LEN];
+	u16 devid;
+	u16 root_devid;
+	bool cmd_line;
+	struct iommu_group *group;
+};
+
 struct devid_map {
 	struct list_head list;
 	u8 id;
@@ -537,6 +550,7 @@
 /* Map HPET and IOAPIC ids to the devid used by the IOMMU */
 extern struct list_head ioapic_map;
 extern struct list_head hpet_map;
+extern struct list_head acpihid_map;
 
 /*
  * List with all IOMMUs in the system. This list is not locked because it is
@@ -668,30 +682,4 @@
 	return -EINVAL;
 }
 
-#ifdef CONFIG_AMD_IOMMU_STATS
-
-struct __iommu_counter {
-	char *name;
-	struct dentry *dent;
-	u64 value;
-};
-
-#define DECLARE_STATS_COUNTER(nm) \
-	static struct __iommu_counter nm = {	\
-		.name = #nm,			\
-	}
-
-#define INC_STATS_COUNTER(name)		name.value += 1
-#define ADD_STATS_COUNTER(name, x)	name.value += (x)
-#define SUB_STATS_COUNTER(name, x)	name.value -= (x)
-
-#else /* CONFIG_AMD_IOMMU_STATS */
-
-#define DECLARE_STATS_COUNTER(name)
-#define INC_STATS_COUNTER(name)
-#define ADD_STATS_COUNTER(name, x)
-#define SUB_STATS_COUNTER(name, x)
-
-#endif /* CONFIG_AMD_IOMMU_STATS */
-
 #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 4ff73ff..ebab33e 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -590,6 +590,7 @@
 
 	unsigned long			ias; /* IPA */
 	unsigned long			oas; /* PA */
+	unsigned long			pgsize_bitmap;
 
 #define ARM_SMMU_MAX_ASIDS		(1 << 16)
 	unsigned int			asid_bits;
@@ -1516,8 +1517,6 @@
 	return 0;
 }
 
-static struct iommu_ops arm_smmu_ops;
-
 static int arm_smmu_domain_finalise(struct iommu_domain *domain)
 {
 	int ret;
@@ -1555,7 +1554,7 @@
 	}
 
 	pgtbl_cfg = (struct io_pgtable_cfg) {
-		.pgsize_bitmap	= arm_smmu_ops.pgsize_bitmap,
+		.pgsize_bitmap	= smmu->pgsize_bitmap,
 		.ias		= ias,
 		.oas		= oas,
 		.tlb		= &arm_smmu_gather_ops,
@@ -1566,7 +1565,7 @@
 	if (!pgtbl_ops)
 		return -ENOMEM;
 
-	arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+	domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
 	smmu_domain->pgtbl_ops = pgtbl_ops;
 
 	ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
@@ -2410,7 +2409,6 @@
 {
 	u32 reg;
 	bool coherent;
-	unsigned long pgsize_bitmap = 0;
 
 	/* IDR0 */
 	reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
@@ -2541,13 +2539,16 @@
 
 	/* Page sizes */
 	if (reg & IDR5_GRAN64K)
-		pgsize_bitmap |= SZ_64K | SZ_512M;
+		smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
 	if (reg & IDR5_GRAN16K)
-		pgsize_bitmap |= SZ_16K | SZ_32M;
+		smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
 	if (reg & IDR5_GRAN4K)
-		pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
+		smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
 
-	arm_smmu_ops.pgsize_bitmap &= pgsize_bitmap;
+	if (arm_smmu_ops.pgsize_bitmap == -1UL)
+		arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
+	else
+		arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
 
 	/* Output address size */
 	switch (reg & IDR5_OAS_MASK << IDR5_OAS_SHIFT) {
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 2409e3b..0360919 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -34,6 +34,7 @@
 #include <linux/err.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
 #include <linux/iommu.h>
 #include <linux/iopoll.h>
 #include <linux/module.h>
@@ -71,16 +72,15 @@
 		((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS)	\
 			? 0x400 : 0))
 
+/*
+ * Some 64-bit registers only make sense to write atomically, but in such
+ * cases all the data relevant to AArch32 formats lies within the lower word,
+ * therefore this actually makes more sense than it might first appear.
+ */
 #ifdef CONFIG_64BIT
-#define smmu_writeq	writeq_relaxed
+#define smmu_write_atomic_lq		writeq_relaxed
 #else
-#define smmu_writeq(reg64, addr)				\
-	do {							\
-		u64 __val = (reg64);				\
-		void __iomem *__addr = (addr);			\
-		writel_relaxed(__val >> 32, __addr + 4);	\
-		writel_relaxed(__val, __addr);			\
-	} while (0)
+#define smmu_write_atomic_lq		writel_relaxed
 #endif
 
 /* Configuration registers */
@@ -94,9 +94,13 @@
 #define sCR0_VMIDPNE			(1 << 11)
 #define sCR0_PTM			(1 << 12)
 #define sCR0_FB				(1 << 13)
+#define sCR0_VMID16EN			(1 << 31)
 #define sCR0_BSU_SHIFT			14
 #define sCR0_BSU_MASK			0x3
 
+/* Auxiliary Configuration register */
+#define ARM_SMMU_GR0_sACR		0x10
+
 /* Identification registers */
 #define ARM_SMMU_GR0_ID0		0x20
 #define ARM_SMMU_GR0_ID1		0x24
@@ -116,6 +120,8 @@
 #define ID0_NTS				(1 << 28)
 #define ID0_SMS				(1 << 27)
 #define ID0_ATOSNS			(1 << 26)
+#define ID0_PTFS_NO_AARCH32		(1 << 25)
+#define ID0_PTFS_NO_AARCH32S		(1 << 24)
 #define ID0_CTTW			(1 << 14)
 #define ID0_NUMIRPT_SHIFT		16
 #define ID0_NUMIRPT_MASK		0xff
@@ -141,6 +147,10 @@
 #define ID2_PTFS_4K			(1 << 12)
 #define ID2_PTFS_16K			(1 << 13)
 #define ID2_PTFS_64K			(1 << 14)
+#define ID2_VMID16			(1 << 15)
+
+#define ID7_MAJOR_SHIFT			4
+#define ID7_MAJOR_MASK			0xf
 
 /* Global TLB invalidation */
 #define ARM_SMMU_GR0_TLBIVMID		0x64
@@ -193,12 +203,15 @@
 #define ARM_SMMU_GR1_CBA2R(n)		(0x800 + ((n) << 2))
 #define CBA2R_RW64_32BIT		(0 << 0)
 #define CBA2R_RW64_64BIT		(1 << 0)
+#define CBA2R_VMID_SHIFT		16
+#define CBA2R_VMID_MASK			0xffff
 
 /* Translation context bank */
 #define ARM_SMMU_CB_BASE(smmu)		((smmu)->base + ((smmu)->size >> 1))
 #define ARM_SMMU_CB(smmu, n)		((n) * (1 << (smmu)->pgshift))
 
 #define ARM_SMMU_CB_SCTLR		0x0
+#define ARM_SMMU_CB_ACTLR		0x4
 #define ARM_SMMU_CB_RESUME		0x8
 #define ARM_SMMU_CB_TTBCR2		0x10
 #define ARM_SMMU_CB_TTBR0		0x20
@@ -206,11 +219,9 @@
 #define ARM_SMMU_CB_TTBCR		0x30
 #define ARM_SMMU_CB_S1_MAIR0		0x38
 #define ARM_SMMU_CB_S1_MAIR1		0x3c
-#define ARM_SMMU_CB_PAR_LO		0x50
-#define ARM_SMMU_CB_PAR_HI		0x54
+#define ARM_SMMU_CB_PAR			0x50
 #define ARM_SMMU_CB_FSR			0x58
-#define ARM_SMMU_CB_FAR_LO		0x60
-#define ARM_SMMU_CB_FAR_HI		0x64
+#define ARM_SMMU_CB_FAR			0x60
 #define ARM_SMMU_CB_FSYNR0		0x68
 #define ARM_SMMU_CB_S1_TLBIVA		0x600
 #define ARM_SMMU_CB_S1_TLBIASID		0x610
@@ -230,6 +241,10 @@
 #define SCTLR_M				(1 << 0)
 #define SCTLR_EAE_SBOP			(SCTLR_AFE | SCTLR_TRE)
 
+#define ARM_MMU500_ACTLR_CPRE		(1 << 1)
+
+#define ARM_MMU500_ACR_CACHE_LOCK	(1 << 26)
+
 #define CB_PAR_F			(1 << 0)
 
 #define ATSR_ACTIVE			(1 << 0)
@@ -270,10 +285,17 @@
 	"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
 
 enum arm_smmu_arch_version {
-	ARM_SMMU_V1 = 1,
+	ARM_SMMU_V1,
+	ARM_SMMU_V1_64K,
 	ARM_SMMU_V2,
 };
 
+enum arm_smmu_implementation {
+	GENERIC_SMMU,
+	ARM_MMU500,
+	CAVIUM_SMMUV2,
+};
+
 struct arm_smmu_smr {
 	u8				idx;
 	u16				mask;
@@ -305,11 +327,18 @@
 #define ARM_SMMU_FEAT_TRANS_S2		(1 << 3)
 #define ARM_SMMU_FEAT_TRANS_NESTED	(1 << 4)
 #define ARM_SMMU_FEAT_TRANS_OPS		(1 << 5)
+#define ARM_SMMU_FEAT_VMID16		(1 << 6)
+#define ARM_SMMU_FEAT_FMT_AARCH64_4K	(1 << 7)
+#define ARM_SMMU_FEAT_FMT_AARCH64_16K	(1 << 8)
+#define ARM_SMMU_FEAT_FMT_AARCH64_64K	(1 << 9)
+#define ARM_SMMU_FEAT_FMT_AARCH32_L	(1 << 10)
+#define ARM_SMMU_FEAT_FMT_AARCH32_S	(1 << 11)
 	u32				features;
 
 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
 	u32				options;
 	enum arm_smmu_arch_version	version;
+	enum arm_smmu_implementation	model;
 
 	u32				num_context_banks;
 	u32				num_s2_context_banks;
@@ -322,6 +351,7 @@
 	unsigned long			va_size;
 	unsigned long			ipa_size;
 	unsigned long			pa_size;
+	unsigned long			pgsize_bitmap;
 
 	u32				num_global_irqs;
 	u32				num_context_irqs;
@@ -329,17 +359,27 @@
 
 	struct list_head		list;
 	struct rb_root			masters;
+
+	u32				cavium_id_base; /* Specific to Cavium */
+};
+
+enum arm_smmu_context_fmt {
+	ARM_SMMU_CTX_FMT_NONE,
+	ARM_SMMU_CTX_FMT_AARCH64,
+	ARM_SMMU_CTX_FMT_AARCH32_L,
+	ARM_SMMU_CTX_FMT_AARCH32_S,
 };
 
 struct arm_smmu_cfg {
 	u8				cbndx;
 	u8				irptndx;
 	u32				cbar;
+	enum arm_smmu_context_fmt	fmt;
 };
 #define INVALID_IRPTNDX			0xff
 
-#define ARM_SMMU_CB_ASID(cfg)		((cfg)->cbndx)
-#define ARM_SMMU_CB_VMID(cfg)		((cfg)->cbndx + 1)
+#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
+#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
 
 enum arm_smmu_domain_stage {
 	ARM_SMMU_DOMAIN_S1 = 0,
@@ -357,8 +397,6 @@
 	struct iommu_domain		domain;
 };
 
-static struct iommu_ops arm_smmu_ops;
-
 static DEFINE_SPINLOCK(arm_smmu_devices_lock);
 static LIST_HEAD(arm_smmu_devices);
 
@@ -367,6 +405,8 @@
 	const char *prop;
 };
 
+static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
+
 static struct arm_smmu_option_prop arm_smmu_options[] = {
 	{ ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
 	{ 0, NULL},
@@ -578,11 +618,11 @@
 
 	if (stage1) {
 		base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
-		writel_relaxed(ARM_SMMU_CB_ASID(cfg),
+		writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
 			       base + ARM_SMMU_CB_S1_TLBIASID);
 	} else {
 		base = ARM_SMMU_GR0(smmu);
-		writel_relaxed(ARM_SMMU_CB_VMID(cfg),
+		writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
 			       base + ARM_SMMU_GR0_TLBIVMID);
 	}
 
@@ -602,37 +642,33 @@
 		reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
 		reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
 
-		if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) {
+		if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
 			iova &= ~12UL;
-			iova |= ARM_SMMU_CB_ASID(cfg);
+			iova |= ARM_SMMU_CB_ASID(smmu, cfg);
 			do {
 				writel_relaxed(iova, reg);
 				iova += granule;
 			} while (size -= granule);
-#ifdef CONFIG_64BIT
 		} else {
 			iova >>= 12;
-			iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48;
+			iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
 			do {
 				writeq_relaxed(iova, reg);
 				iova += granule >> 12;
 			} while (size -= granule);
-#endif
 		}
-#ifdef CONFIG_64BIT
 	} else if (smmu->version == ARM_SMMU_V2) {
 		reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
 		reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
 			      ARM_SMMU_CB_S2_TLBIIPAS2;
 		iova >>= 12;
 		do {
-			writeq_relaxed(iova, reg);
+			smmu_write_atomic_lq(iova, reg);
 			iova += granule >> 12;
 		} while (size -= granule);
-#endif
 	} else {
 		reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
-		writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg);
+		writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
 	}
 }
 
@@ -645,7 +681,7 @@
 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
 {
 	int flags, ret;
-	u32 fsr, far, fsynr, resume;
+	u32 fsr, fsynr, resume;
 	unsigned long iova;
 	struct iommu_domain *domain = dev;
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -667,13 +703,7 @@
 	fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
 	flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
 
-	far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO);
-	iova = far;
-#ifdef CONFIG_64BIT
-	far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI);
-	iova |= ((unsigned long)far << 32);
-#endif
-
+	iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
 	if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
 		ret = IRQ_HANDLED;
 		resume = RESUME_RETRY;
@@ -734,22 +764,20 @@
 	cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
 
 	if (smmu->version > ARM_SMMU_V1) {
-		/*
-		 * CBA2R.
-		 * *Must* be initialised before CBAR thanks to VMID16
-		 * architectural oversight affected some implementations.
-		 */
-#ifdef CONFIG_64BIT
-		reg = CBA2R_RW64_64BIT;
-#else
-		reg = CBA2R_RW64_32BIT;
-#endif
+		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
+			reg = CBA2R_RW64_64BIT;
+		else
+			reg = CBA2R_RW64_32BIT;
+		/* 16-bit VMIDs live in CBA2R */
+		if (smmu->features & ARM_SMMU_FEAT_VMID16)
+			reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
+
 		writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
 	}
 
 	/* CBAR */
 	reg = cfg->cbar;
-	if (smmu->version == ARM_SMMU_V1)
+	if (smmu->version < ARM_SMMU_V2)
 		reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
 
 	/*
@@ -759,8 +787,9 @@
 	if (stage1) {
 		reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
 			(CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
-	} else {
-		reg |= ARM_SMMU_CB_VMID(cfg) << CBAR_VMID_SHIFT;
+	} else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
+		/* 8-bit VMIDs live in CBAR */
+		reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
 	}
 	writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
 
@@ -768,15 +797,15 @@
 	if (stage1) {
 		reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
 
-		reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT;
-		smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
+		reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
+		writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
 
 		reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
-		reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT;
-		smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR1);
+		reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
+		writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
 	} else {
 		reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
-		smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
+		writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
 	}
 
 	/* TTBCR */
@@ -826,6 +855,12 @@
 	if (smmu_domain->smmu)
 		goto out_unlock;
 
+	/* We're bypassing these SIDs, so don't allocate an actual context */
+	if (domain->type == IOMMU_DOMAIN_DMA) {
+		smmu_domain->smmu = smmu;
+		goto out_unlock;
+	}
+
 	/*
 	 * Mapping the requested stage onto what we support is surprisingly
 	 * complicated, mainly because the spec allows S1+S2 SMMUs without
@@ -849,16 +884,40 @@
 	if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
 		smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
 
+	/*
+	 * Choosing a suitable context format is even more fiddly. Until we
+	 * grow some way for the caller to express a preference, and/or move
+	 * the decision into the io-pgtable code where it arguably belongs,
+	 * just aim for the closest thing to the rest of the system, and hope
+	 * that the hardware isn't esoteric enough that we can't assume AArch64
+	 * support to be a superset of AArch32 support...
+	 */
+	if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
+		cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
+	if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
+	    (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
+			       ARM_SMMU_FEAT_FMT_AARCH64_16K |
+			       ARM_SMMU_FEAT_FMT_AARCH64_4K)))
+		cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
+
+	if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
 	switch (smmu_domain->stage) {
 	case ARM_SMMU_DOMAIN_S1:
 		cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
 		start = smmu->num_s2_context_banks;
 		ias = smmu->va_size;
 		oas = smmu->ipa_size;
-		if (IS_ENABLED(CONFIG_64BIT))
+		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
 			fmt = ARM_64_LPAE_S1;
-		else
+		} else {
 			fmt = ARM_32_LPAE_S1;
+			ias = min(ias, 32UL);
+			oas = min(oas, 40UL);
+		}
 		break;
 	case ARM_SMMU_DOMAIN_NESTED:
 		/*
@@ -870,10 +929,13 @@
 		start = 0;
 		ias = smmu->ipa_size;
 		oas = smmu->pa_size;
-		if (IS_ENABLED(CONFIG_64BIT))
+		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
 			fmt = ARM_64_LPAE_S2;
-		else
+		} else {
 			fmt = ARM_32_LPAE_S2;
+			ias = min(ias, 40UL);
+			oas = min(oas, 40UL);
+		}
 		break;
 	default:
 		ret = -EINVAL;
@@ -886,7 +948,7 @@
 		goto out_unlock;
 
 	cfg->cbndx = ret;
-	if (smmu->version == ARM_SMMU_V1) {
+	if (smmu->version < ARM_SMMU_V2) {
 		cfg->irptndx = atomic_inc_return(&smmu->irptndx);
 		cfg->irptndx %= smmu->num_context_irqs;
 	} else {
@@ -894,7 +956,7 @@
 	}
 
 	pgtbl_cfg = (struct io_pgtable_cfg) {
-		.pgsize_bitmap	= arm_smmu_ops.pgsize_bitmap,
+		.pgsize_bitmap	= smmu->pgsize_bitmap,
 		.ias		= ias,
 		.oas		= oas,
 		.tlb		= &arm_smmu_gather_ops,
@@ -908,8 +970,8 @@
 		goto out_clear_smmu;
 	}
 
-	/* Update our support page sizes to reflect the page table format */
-	arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+	/* Update the domain's page sizes to reflect the page table format */
+	domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
 
 	/* Initialise the context bank with our page table cfg */
 	arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
@@ -948,7 +1010,7 @@
 	void __iomem *cb_base;
 	int irq;
 
-	if (!smmu)
+	if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
 		return;
 
 	/*
@@ -1089,18 +1151,20 @@
 	struct arm_smmu_device *smmu = smmu_domain->smmu;
 	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
 
+	/*
+	 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
+	 * for all devices behind the SMMU. Note that we need to take
+	 * care configuring SMRs for devices both a platform_device and
+	 * and a PCI device (i.e. a PCI host controller)
+	 */
+	if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
+		return 0;
+
 	/* Devices in an IOMMU group may already be configured */
 	ret = arm_smmu_master_configure_smrs(smmu, cfg);
 	if (ret)
 		return ret == -EEXIST ? 0 : ret;
 
-	/*
-	 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
-	 * for all devices behind the SMMU.
-	 */
-	if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
-		return 0;
-
 	for (i = 0; i < cfg->num_streamids; ++i) {
 		u32 idx, s2cr;
 
@@ -1244,8 +1308,8 @@
 	/* ATS1 registers can only be written atomically */
 	va = iova & ~0xfffUL;
 	if (smmu->version == ARM_SMMU_V2)
-		smmu_writeq(va, cb_base + ARM_SMMU_CB_ATS1PR);
-	else
+		smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
+	else /* Register is only 32-bit in v1 */
 		writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
 
 	if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
@@ -1256,9 +1320,7 @@
 		return ops->iova_to_phys(ops, iova);
 	}
 
-	phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
-	phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32;
-
+	phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
 	if (phys & CB_PAR_F) {
 		dev_err(dev, "translation fault!\n");
 		dev_err(dev, "PAR = 0x%llx\n", phys);
@@ -1484,7 +1546,7 @@
 	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
 	void __iomem *cb_base;
 	int i = 0;
-	u32 reg;
+	u32 reg, major;
 
 	/* clear global FSR */
 	reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
@@ -1497,11 +1559,33 @@
 		writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
 	}
 
+	/*
+	 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
+	 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
+	 * bit is only present in MMU-500r2 onwards.
+	 */
+	reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
+	major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
+	if ((smmu->model == ARM_MMU500) && (major >= 2)) {
+		reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
+		reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
+		writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
+	}
+
 	/* Make sure all context banks are disabled and clear CB_FSR  */
 	for (i = 0; i < smmu->num_context_banks; ++i) {
 		cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
 		writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
 		writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
+		/*
+		 * Disable MMU-500's not-particularly-beneficial next-page
+		 * prefetcher for the sake of errata #841119 and #826419.
+		 */
+		if (smmu->model == ARM_MMU500) {
+			reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
+			reg &= ~ARM_MMU500_ACTLR_CPRE;
+			writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
+		}
 	}
 
 	/* Invalidate the TLB, just in case */
@@ -1529,6 +1613,9 @@
 	/* Don't upgrade barriers */
 	reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
 
+	if (smmu->features & ARM_SMMU_FEAT_VMID16)
+		reg |= sCR0_VMID16EN;
+
 	/* Push the button */
 	__arm_smmu_tlb_sync(smmu);
 	writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
@@ -1561,7 +1648,8 @@
 	bool cttw_dt, cttw_reg;
 
 	dev_notice(smmu->dev, "probing hardware configuration...\n");
-	dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
+	dev_notice(smmu->dev, "SMMUv%d with:\n",
+			smmu->version == ARM_SMMU_V2 ? 2 : 1);
 
 	/* ID0 */
 	id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
@@ -1593,7 +1681,8 @@
 		return -ENODEV;
 	}
 
-	if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) {
+	if ((id & ID0_S1TS) &&
+		((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
 		smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
 		dev_notice(smmu->dev, "\taddress translation ops\n");
 	}
@@ -1649,6 +1738,12 @@
 					   ID0_NUMSIDB_MASK;
 	}
 
+	if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
+		smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
+		if (!(id & ID0_PTFS_NO_AARCH32S))
+			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
+	}
+
 	/* ID1 */
 	id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
 	smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
@@ -1669,6 +1764,17 @@
 	}
 	dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
 		   smmu->num_context_banks, smmu->num_s2_context_banks);
+	/*
+	 * Cavium CN88xx erratum #27704.
+	 * Ensure ASID and VMID allocation is unique across all SMMUs in
+	 * the system.
+	 */
+	if (smmu->model == CAVIUM_SMMUV2) {
+		smmu->cavium_id_base =
+			atomic_add_return(smmu->num_context_banks,
+					  &cavium_smmu_context_count);
+		smmu->cavium_id_base -= smmu->num_context_banks;
+	}
 
 	/* ID2 */
 	id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
@@ -1679,6 +1785,9 @@
 	size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
 	smmu->pa_size = size;
 
+	if (id & ID2_VMID16)
+		smmu->features |= ARM_SMMU_FEAT_VMID16;
+
 	/*
 	 * What the page table walker can address actually depends on which
 	 * descriptor format is in use, but since a) we don't know that yet,
@@ -1688,26 +1797,39 @@
 		dev_warn(smmu->dev,
 			 "failed to set DMA mask for table walker\n");
 
-	if (smmu->version == ARM_SMMU_V1) {
+	if (smmu->version < ARM_SMMU_V2) {
 		smmu->va_size = smmu->ipa_size;
-		size = SZ_4K | SZ_2M | SZ_1G;
+		if (smmu->version == ARM_SMMU_V1_64K)
+			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
 	} else {
 		size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
 		smmu->va_size = arm_smmu_id_size_to_bits(size);
-#ifndef CONFIG_64BIT
-		smmu->va_size = min(32UL, smmu->va_size);
-#endif
-		size = 0;
 		if (id & ID2_PTFS_4K)
-			size |= SZ_4K | SZ_2M | SZ_1G;
+			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
 		if (id & ID2_PTFS_16K)
-			size |= SZ_16K | SZ_32M;
+			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
 		if (id & ID2_PTFS_64K)
-			size |= SZ_64K | SZ_512M;
+			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
 	}
 
-	arm_smmu_ops.pgsize_bitmap &= size;
-	dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
+	/* Now we've corralled the various formats, what'll it do? */
+	if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
+		smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
+	if (smmu->features &
+	    (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
+		smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
+	if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
+		smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
+	if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
+		smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
+
+	if (arm_smmu_ops.pgsize_bitmap == -1UL)
+		arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
+	else
+		arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
+	dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
+		   smmu->pgsize_bitmap);
+
 
 	if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
 		dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
@@ -1720,12 +1842,27 @@
 	return 0;
 }
 
+struct arm_smmu_match_data {
+	enum arm_smmu_arch_version version;
+	enum arm_smmu_implementation model;
+};
+
+#define ARM_SMMU_MATCH_DATA(name, ver, imp)	\
+static struct arm_smmu_match_data name = { .version = ver, .model = imp }
+
+ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
+ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
+ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
+ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
+ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
+
 static const struct of_device_id arm_smmu_of_match[] = {
-	{ .compatible = "arm,smmu-v1", .data = (void *)ARM_SMMU_V1 },
-	{ .compatible = "arm,smmu-v2", .data = (void *)ARM_SMMU_V2 },
-	{ .compatible = "arm,mmu-400", .data = (void *)ARM_SMMU_V1 },
-	{ .compatible = "arm,mmu-401", .data = (void *)ARM_SMMU_V1 },
-	{ .compatible = "arm,mmu-500", .data = (void *)ARM_SMMU_V2 },
+	{ .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
+	{ .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
+	{ .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
+	{ .compatible = "arm,mmu-401", .data = &arm_mmu401 },
+	{ .compatible = "arm,mmu-500", .data = &arm_mmu500 },
+	{ .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
 	{ },
 };
 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
@@ -1733,6 +1870,7 @@
 static int arm_smmu_device_dt_probe(struct platform_device *pdev)
 {
 	const struct of_device_id *of_id;
+	const struct arm_smmu_match_data *data;
 	struct resource *res;
 	struct arm_smmu_device *smmu;
 	struct device *dev = &pdev->dev;
@@ -1748,7 +1886,9 @@
 	smmu->dev = dev;
 
 	of_id = of_match_node(arm_smmu_of_match, dev->of_node);
-	smmu->version = (enum arm_smmu_arch_version)of_id->data;
+	data = of_id->data;
+	smmu->version = data->version;
+	smmu->model = data->model;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	smmu->base = devm_ioremap_resource(dev, res);
@@ -1814,7 +1954,7 @@
 
 	parse_driver_options(smmu);
 
-	if (smmu->version > ARM_SMMU_V1 &&
+	if (smmu->version == ARM_SMMU_V2 &&
 	    smmu->num_context_banks != smmu->num_context_irqs) {
 		dev_err(dev,
 			"found only %d context interrupt(s) but %d required\n",
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 72d6182..ea5a9eb 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -94,7 +94,7 @@
 		return -ENODEV;
 
 	/* Use the smallest supported page size for IOVA granularity */
-	order = __ffs(domain->ops->pgsize_bitmap);
+	order = __ffs(domain->pgsize_bitmap);
 	base_pfn = max_t(unsigned long, 1, base >> order);
 	end_pfn = (base + size - 1) >> order;
 
@@ -190,11 +190,15 @@
 	kvfree(pages);
 }
 
-static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp)
+static struct page **__iommu_dma_alloc_pages(unsigned int count,
+		unsigned long order_mask, gfp_t gfp)
 {
 	struct page **pages;
 	unsigned int i = 0, array_size = count * sizeof(*pages);
-	unsigned int order = MAX_ORDER;
+
+	order_mask &= (2U << MAX_ORDER) - 1;
+	if (!order_mask)
+		return NULL;
 
 	if (array_size <= PAGE_SIZE)
 		pages = kzalloc(array_size, GFP_KERNEL);
@@ -208,36 +212,38 @@
 
 	while (count) {
 		struct page *page = NULL;
-		int j;
+		unsigned int order_size;
 
 		/*
 		 * Higher-order allocations are a convenience rather
 		 * than a necessity, hence using __GFP_NORETRY until
-		 * falling back to single-page allocations.
+		 * falling back to minimum-order allocations.
 		 */
-		for (order = min_t(unsigned int, order, __fls(count));
-		     order > 0; order--) {
-			page = alloc_pages(gfp | __GFP_NORETRY, order);
+		for (order_mask &= (2U << __fls(count)) - 1;
+		     order_mask; order_mask &= ~order_size) {
+			unsigned int order = __fls(order_mask);
+
+			order_size = 1U << order;
+			page = alloc_pages((order_mask - order_size) ?
+					   gfp | __GFP_NORETRY : gfp, order);
 			if (!page)
 				continue;
-			if (PageCompound(page)) {
-				if (!split_huge_page(page))
-					break;
-				__free_pages(page, order);
-			} else {
+			if (!order)
+				break;
+			if (!PageCompound(page)) {
 				split_page(page, order);
 				break;
+			} else if (!split_huge_page(page)) {
+				break;
 			}
+			__free_pages(page, order);
 		}
-		if (!page)
-			page = alloc_page(gfp);
 		if (!page) {
 			__iommu_dma_free_pages(pages, i);
 			return NULL;
 		}
-		j = 1 << order;
-		count -= j;
-		while (j--)
+		count -= order_size;
+		while (order_size--)
 			pages[i++] = page++;
 	}
 	return pages;
@@ -267,6 +273,7 @@
  *	 attached to an iommu_dma_domain
  * @size: Size of buffer in bytes
  * @gfp: Allocation flags
+ * @attrs: DMA attributes for this allocation
  * @prot: IOMMU mapping flags
  * @handle: Out argument for allocated DMA handle
  * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
@@ -278,8 +285,8 @@
  * Return: Array of struct page pointers describing the buffer,
  *	   or NULL on failure.
  */
-struct page **iommu_dma_alloc(struct device *dev, size_t size,
-		gfp_t gfp, int prot, dma_addr_t *handle,
+struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
+		struct dma_attrs *attrs, int prot, dma_addr_t *handle,
 		void (*flush_page)(struct device *, const void *, phys_addr_t))
 {
 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
@@ -288,11 +295,22 @@
 	struct page **pages;
 	struct sg_table sgt;
 	dma_addr_t dma_addr;
-	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
 
 	*handle = DMA_ERROR_CODE;
 
-	pages = __iommu_dma_alloc_pages(count, gfp);
+	min_size = alloc_sizes & -alloc_sizes;
+	if (min_size < PAGE_SIZE) {
+		min_size = PAGE_SIZE;
+		alloc_sizes |= PAGE_SIZE;
+	} else {
+		size = ALIGN(size, min_size);
+	}
+	if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs))
+		alloc_sizes = min_size;
+
+	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp);
 	if (!pages)
 		return NULL;
 
@@ -389,26 +407,58 @@
 
 /*
  * Prepare a successfully-mapped scatterlist to give back to the caller.
- * Handling IOVA concatenation can come later, if needed
+ *
+ * At this point the segments are already laid out by iommu_dma_map_sg() to
+ * avoid individually crossing any boundaries, so we merely need to check a
+ * segment's start address to avoid concatenating across one.
  */
 static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
 		dma_addr_t dma_addr)
 {
-	struct scatterlist *s;
-	int i;
+	struct scatterlist *s, *cur = sg;
+	unsigned long seg_mask = dma_get_seg_boundary(dev);
+	unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
+	int i, count = 0;
 
 	for_each_sg(sg, s, nents, i) {
-		/* Un-swizzling the fields here, hence the naming mismatch */
-		unsigned int s_offset = sg_dma_address(s);
+		/* Restore this segment's original unaligned fields first */
+		unsigned int s_iova_off = sg_dma_address(s);
 		unsigned int s_length = sg_dma_len(s);
-		unsigned int s_dma_len = s->length;
+		unsigned int s_iova_len = s->length;
 
-		s->offset = s_offset;
+		s->offset += s_iova_off;
 		s->length = s_length;
-		sg_dma_address(s) = dma_addr + s_offset;
-		dma_addr += s_dma_len;
+		sg_dma_address(s) = DMA_ERROR_CODE;
+		sg_dma_len(s) = 0;
+
+		/*
+		 * Now fill in the real DMA data. If...
+		 * - there is a valid output segment to append to
+		 * - and this segment starts on an IOVA page boundary
+		 * - but doesn't fall at a segment boundary
+		 * - and wouldn't make the resulting output segment too long
+		 */
+		if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
+		    (cur_len + s_length <= max_len)) {
+			/* ...then concatenate it with the previous one */
+			cur_len += s_length;
+		} else {
+			/* Otherwise start the next output segment */
+			if (i > 0)
+				cur = sg_next(cur);
+			cur_len = s_length;
+			count++;
+
+			sg_dma_address(cur) = dma_addr + s_iova_off;
+		}
+
+		sg_dma_len(cur) = cur_len;
+		dma_addr += s_iova_len;
+
+		if (s_length + s_iova_off < s_iova_len)
+			cur_len = 0;
 	}
-	return i;
+	return count;
 }
 
 /*
@@ -422,7 +472,7 @@
 
 	for_each_sg(sg, s, nents, i) {
 		if (sg_dma_address(s) != DMA_ERROR_CODE)
-			s->offset = sg_dma_address(s);
+			s->offset += sg_dma_address(s);
 		if (sg_dma_len(s))
 			s->length = sg_dma_len(s);
 		sg_dma_address(s) = DMA_ERROR_CODE;
@@ -446,34 +496,40 @@
 	struct scatterlist *s, *prev = NULL;
 	dma_addr_t dma_addr;
 	size_t iova_len = 0;
+	unsigned long mask = dma_get_seg_boundary(dev);
 	int i;
 
 	/*
 	 * Work out how much IOVA space we need, and align the segments to
 	 * IOVA granules for the IOMMU driver to handle. With some clever
 	 * trickery we can modify the list in-place, but reversibly, by
-	 * hiding the original data in the as-yet-unused DMA fields.
+	 * stashing the unaligned parts in the as-yet-unused DMA fields.
 	 */
 	for_each_sg(sg, s, nents, i) {
-		size_t s_offset = iova_offset(iovad, s->offset);
+		size_t s_iova_off = iova_offset(iovad, s->offset);
 		size_t s_length = s->length;
+		size_t pad_len = (mask - iova_len + 1) & mask;
 
-		sg_dma_address(s) = s_offset;
+		sg_dma_address(s) = s_iova_off;
 		sg_dma_len(s) = s_length;
-		s->offset -= s_offset;
-		s_length = iova_align(iovad, s_length + s_offset);
+		s->offset -= s_iova_off;
+		s_length = iova_align(iovad, s_length + s_iova_off);
 		s->length = s_length;
 
 		/*
-		 * The simple way to avoid the rare case of a segment
-		 * crossing the boundary mask is to pad the previous one
-		 * to end at a naturally-aligned IOVA for this one's size,
-		 * at the cost of potentially over-allocating a little.
+		 * Due to the alignment of our single IOVA allocation, we can
+		 * depend on these assumptions about the segment boundary mask:
+		 * - If mask size >= IOVA size, then the IOVA range cannot
+		 *   possibly fall across a boundary, so we don't care.
+		 * - If mask size < IOVA size, then the IOVA range must start
+		 *   exactly on a boundary, therefore we can lay things out
+		 *   based purely on segment lengths without needing to know
+		 *   the actual addresses beforehand.
+		 * - The mask must be a power of 2, so pad_len == 0 if
+		 *   iova_len == 0, thus we cannot dereference prev the first
+		 *   time through here (i.e. before it has a meaningful value).
 		 */
-		if (prev) {
-			size_t pad_len = roundup_pow_of_two(s_length);
-
-			pad_len = (pad_len - iova_len) & (pad_len - 1);
+		if (pad_len && pad_len < s_length - 1) {
 			prev->length += pad_len;
 			iova_len += pad_len;
 		}
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 8ffd756..6a86b5d 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1579,18 +1579,14 @@
 	reason = dmar_get_fault_reason(fault_reason, &fault_type);
 
 	if (fault_type == INTR_REMAP)
-		pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
-		       "fault index %llx\n"
-			"INTR-REMAP:[fault reason %02d] %s\n",
-			(source_id >> 8), PCI_SLOT(source_id & 0xFF),
+		pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s\n",
+			source_id >> 8, PCI_SLOT(source_id & 0xFF),
 			PCI_FUNC(source_id & 0xFF), addr >> 48,
 			fault_reason, reason);
 	else
-		pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
-		       "fault addr %llx \n"
-		       "DMAR:[fault reason %02d] %s\n",
-		       (type ? "DMA Read" : "DMA Write"),
-		       (source_id >> 8), PCI_SLOT(source_id & 0xFF),
+		pr_err("[%s] Request device [%02x:%02x.%d] fault addr %llx [fault reason %02d] %s\n",
+		       type ? "DMA Read" : "DMA Write",
+		       source_id >> 8, PCI_SLOT(source_id & 0xFF),
 		       PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
 	return 0;
 }
@@ -1602,10 +1598,17 @@
 	int reg, fault_index;
 	u32 fault_status;
 	unsigned long flag;
+	bool ratelimited;
+	static DEFINE_RATELIMIT_STATE(rs,
+				      DEFAULT_RATELIMIT_INTERVAL,
+				      DEFAULT_RATELIMIT_BURST);
+
+	/* Disable printing, simply clear the fault when ratelimited */
+	ratelimited = !__ratelimit(&rs);
 
 	raw_spin_lock_irqsave(&iommu->register_lock, flag);
 	fault_status = readl(iommu->reg + DMAR_FSTS_REG);
-	if (fault_status)
+	if (fault_status && !ratelimited)
 		pr_err("DRHD: handling fault status reg %x\n", fault_status);
 
 	/* TBD: ignore advanced fault log currently */
@@ -1627,24 +1630,28 @@
 		if (!(data & DMA_FRCD_F))
 			break;
 
-		fault_reason = dma_frcd_fault_reason(data);
-		type = dma_frcd_type(data);
+		if (!ratelimited) {
+			fault_reason = dma_frcd_fault_reason(data);
+			type = dma_frcd_type(data);
 
-		data = readl(iommu->reg + reg +
-				fault_index * PRIMARY_FAULT_REG_LEN + 8);
-		source_id = dma_frcd_source_id(data);
+			data = readl(iommu->reg + reg +
+				     fault_index * PRIMARY_FAULT_REG_LEN + 8);
+			source_id = dma_frcd_source_id(data);
 
-		guest_addr = dmar_readq(iommu->reg + reg +
-				fault_index * PRIMARY_FAULT_REG_LEN);
-		guest_addr = dma_frcd_page_addr(guest_addr);
+			guest_addr = dmar_readq(iommu->reg + reg +
+					fault_index * PRIMARY_FAULT_REG_LEN);
+			guest_addr = dma_frcd_page_addr(guest_addr);
+		}
+
 		/* clear the fault */
 		writel(DMA_FRCD_F, iommu->reg + reg +
 			fault_index * PRIMARY_FAULT_REG_LEN + 12);
 
 		raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
 
-		dmar_fault_do_one(iommu, type, fault_reason,
-				source_id, guest_addr);
+		if (!ratelimited)
+			dmar_fault_do_one(iommu, type, fault_reason,
+					  source_id, guest_addr);
 
 		fault_index++;
 		if (fault_index >= cap_num_fault_regs(iommu->cap))
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a2e1b7f..b2bfb95 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1143,7 +1143,7 @@
 	} while (!first_pte_in_page(++pte) && pfn <= last_pfn);
 }
 
-/* free page table pages. last level pte should already be cleared */
+/* clear last level (leaf) ptes and free page table pages. */
 static void dma_pte_free_pagetable(struct dmar_domain *domain,
 				   unsigned long start_pfn,
 				   unsigned long last_pfn)
@@ -2458,7 +2458,7 @@
 	}
 
 	/* register PCI DMA alias device */
-	if (req_id != dma_alias && dev_is_pci(dev)) {
+	if (dev_is_pci(dev) && req_id != dma_alias) {
 		tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
 					       dma_alias & 0xff, NULL, domain);
 
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 9488e3c..8c61399 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -121,6 +121,8 @@
 #define ARM_V7S_TEX_MASK		0x7
 #define ARM_V7S_ATTR_TEX(val)		(((val) & ARM_V7S_TEX_MASK) << ARM_V7S_TEX_SHIFT)
 
+#define ARM_V7S_ATTR_MTK_4GB		BIT(9) /* MTK extend it for 4GB mode */
+
 /* *well, except for TEX on level 2 large pages, of course :( */
 #define ARM_V7S_CONT_PAGE_TEX_SHIFT	6
 #define ARM_V7S_CONT_PAGE_TEX_MASK	(ARM_V7S_TEX_MASK << ARM_V7S_CONT_PAGE_TEX_SHIFT)
@@ -258,9 +260,10 @@
 					 struct io_pgtable_cfg *cfg)
 {
 	bool ap = !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS);
-	arm_v7s_iopte pte = ARM_V7S_ATTR_NG | ARM_V7S_ATTR_S |
-			    ARM_V7S_ATTR_TEX(1);
+	arm_v7s_iopte pte = ARM_V7S_ATTR_NG | ARM_V7S_ATTR_S;
 
+	if (!(prot & IOMMU_MMIO))
+		pte |= ARM_V7S_ATTR_TEX(1);
 	if (ap) {
 		pte |= ARM_V7S_PTE_AF | ARM_V7S_PTE_AP_UNPRIV;
 		if (!(prot & IOMMU_WRITE))
@@ -270,7 +273,9 @@
 
 	if ((prot & IOMMU_NOEXEC) && ap)
 		pte |= ARM_V7S_ATTR_XN(lvl);
-	if (prot & IOMMU_CACHE)
+	if (prot & IOMMU_MMIO)
+		pte |= ARM_V7S_ATTR_B;
+	else if (prot & IOMMU_CACHE)
 		pte |= ARM_V7S_ATTR_B | ARM_V7S_ATTR_C;
 
 	return pte;
@@ -279,10 +284,13 @@
 static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl)
 {
 	int prot = IOMMU_READ;
+	arm_v7s_iopte attr = pte >> ARM_V7S_ATTR_SHIFT(lvl);
 
-	if (pte & (ARM_V7S_PTE_AP_RDONLY << ARM_V7S_ATTR_SHIFT(lvl)))
+	if (attr & ARM_V7S_PTE_AP_RDONLY)
 		prot |= IOMMU_WRITE;
-	if (pte & ARM_V7S_ATTR_C)
+	if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0)
+		prot |= IOMMU_MMIO;
+	else if (pte & ARM_V7S_ATTR_C)
 		prot |= IOMMU_CACHE;
 
 	return prot;
@@ -364,6 +372,9 @@
 	if (lvl == 1 && (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS))
 		pte |= ARM_V7S_ATTR_NS_SECTION;
 
+	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB)
+		pte |= ARM_V7S_ATTR_MTK_4GB;
+
 	if (num_entries > 1)
 		pte = arm_v7s_pte_to_cont(pte, lvl);
 
@@ -625,9 +636,15 @@
 
 	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
 			    IO_PGTABLE_QUIRK_NO_PERMS |
-			    IO_PGTABLE_QUIRK_TLBI_ON_MAP))
+			    IO_PGTABLE_QUIRK_TLBI_ON_MAP |
+			    IO_PGTABLE_QUIRK_ARM_MTK_4GB))
 		return NULL;
 
+	/* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */
+	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB &&
+	    !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS))
+			return NULL;
+
 	data = kmalloc(sizeof(*data), GFP_KERNEL);
 	if (!data)
 		return NULL;
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index f433b51..a1ed1b7 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -355,7 +355,10 @@
 		if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
 			pte |= ARM_LPAE_PTE_AP_RDONLY;
 
-		if (prot & IOMMU_CACHE)
+		if (prot & IOMMU_MMIO)
+			pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
+				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
+		else if (prot & IOMMU_CACHE)
 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
 	} else {
@@ -364,7 +367,9 @@
 			pte |= ARM_LPAE_PTE_HAP_READ;
 		if (prot & IOMMU_WRITE)
 			pte |= ARM_LPAE_PTE_HAP_WRITE;
-		if (prot & IOMMU_CACHE)
+		if (prot & IOMMU_MMIO)
+			pte |= ARM_LPAE_PTE_MEMATTR_DEV;
+		else if (prot & IOMMU_CACHE)
 			pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
 		else
 			pte |= ARM_LPAE_PTE_MEMATTR_NC;
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
index 876f6a7..127558d 100644
--- a/drivers/iommu/io-pgtable.c
+++ b/drivers/iommu/io-pgtable.c
@@ -25,8 +25,7 @@
 #include "io-pgtable.h"
 
 static const struct io_pgtable_init_fns *
-io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
-{
+io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = {
 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE
 	[ARM_32_LPAE_S1] = &io_pgtable_arm_32_lpae_s1_init_fns,
 	[ARM_32_LPAE_S2] = &io_pgtable_arm_32_lpae_s2_init_fns,
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index d4f5027..969d82c 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -60,10 +60,16 @@
 	 * IO_PGTABLE_QUIRK_TLBI_ON_MAP: If the format forbids caching invalid
 	 *	(unmapped) entries but the hardware might do so anyway, perform
 	 *	TLB maintenance when mapping as well as when unmapping.
+	 *
+	 * IO_PGTABLE_QUIRK_ARM_MTK_4GB: (ARM v7s format) Set bit 9 in all
+	 *	PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit
+	 *	when the SoC is in "4GB mode" and they can only access the high
+	 *	remap of DRAM (0x1_00000000 to 0x1_ffffffff).
 	 */
 	#define IO_PGTABLE_QUIRK_ARM_NS		BIT(0)
 	#define IO_PGTABLE_QUIRK_NO_PERMS	BIT(1)
 	#define IO_PGTABLE_QUIRK_TLBI_ON_MAP	BIT(2)
+	#define IO_PGTABLE_QUIRK_ARM_MTK_4GB	BIT(3)
 	unsigned long			quirks;
 	unsigned long			pgsize_bitmap;
 	unsigned int			ias;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index bfd4f7c..3000051 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -337,9 +337,9 @@
 	if (!domain || domain->type != IOMMU_DOMAIN_DMA)
 		return 0;
 
-	BUG_ON(!domain->ops->pgsize_bitmap);
+	BUG_ON(!domain->pgsize_bitmap);
 
-	pg_size = 1UL << __ffs(domain->ops->pgsize_bitmap);
+	pg_size = 1UL << __ffs(domain->pgsize_bitmap);
 	INIT_LIST_HEAD(&mappings);
 
 	iommu_get_dm_regions(dev, &mappings);
@@ -660,8 +660,8 @@
 }
 
 /*
- * Look for aliases to or from the given device for exisiting groups.  The
- * dma_alias_devfn only supports aliases on the same bus, therefore the search
+ * Look for aliases to or from the given device for existing groups. DMA
+ * aliases are only supported on the same bus, therefore the search
  * space is quite small (especially since we're really only looking at pcie
  * device, and therefore only expect multiple slots on the root complex or
  * downstream switch ports).  It's conceivable though that a pair of
@@ -686,11 +686,7 @@
 			continue;
 
 		/* We alias them or they alias us */
-		if (((pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) &&
-		     pdev->dma_alias_devfn == tmp->devfn) ||
-		    ((tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) &&
-		     tmp->dma_alias_devfn == pdev->devfn)) {
-
+		if (pci_devs_are_dma_aliases(pdev, tmp)) {
 			group = get_pci_alias_group(tmp, devfns);
 			if (group) {
 				pci_dev_put(tmp);
@@ -848,7 +844,8 @@
 	if (!group->default_domain) {
 		group->default_domain = __iommu_domain_alloc(dev->bus,
 							     IOMMU_DOMAIN_DMA);
-		group->domain = group->default_domain;
+		if (!group->domain)
+			group->domain = group->default_domain;
 	}
 
 	ret = iommu_group_add_device(group, dev);
@@ -1072,6 +1069,8 @@
 
 	domain->ops  = bus->iommu_ops;
 	domain->type = type;
+	/* Assume all sizes by default; the driver may override this later */
+	domain->pgsize_bitmap  = bus->iommu_ops->pgsize_bitmap;
 
 	return domain;
 }
@@ -1296,7 +1295,7 @@
 	pgsize = (1UL << (pgsize_idx + 1)) - 1;
 
 	/* throw away page sizes not supported by the hardware */
-	pgsize &= domain->ops->pgsize_bitmap;
+	pgsize &= domain->pgsize_bitmap;
 
 	/* make sure we're still sane */
 	BUG_ON(!pgsize);
@@ -1318,14 +1317,14 @@
 	int ret = 0;
 
 	if (unlikely(domain->ops->map == NULL ||
-		     domain->ops->pgsize_bitmap == 0UL))
+		     domain->pgsize_bitmap == 0UL))
 		return -ENODEV;
 
 	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
 		return -EINVAL;
 
 	/* find out the minimum page size supported */
-	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
 
 	/*
 	 * both the virtual address and the physical one, as well as
@@ -1372,14 +1371,14 @@
 	unsigned long orig_iova = iova;
 
 	if (unlikely(domain->ops->unmap == NULL ||
-		     domain->ops->pgsize_bitmap == 0UL))
+		     domain->pgsize_bitmap == 0UL))
 		return -ENODEV;
 
 	if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
 		return -EINVAL;
 
 	/* find out the minimum page size supported */
-	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
 
 	/*
 	 * The virtual address, as well as the size of the mapping, must be
@@ -1425,10 +1424,10 @@
 	unsigned int i, min_pagesz;
 	int ret;
 
-	if (unlikely(domain->ops->pgsize_bitmap == 0UL))
+	if (unlikely(domain->pgsize_bitmap == 0UL))
 		return 0;
 
-	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+	min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
 
 	for_each_sg(sg, s, nents, i) {
 		phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
@@ -1509,7 +1508,7 @@
 		break;
 	case DOMAIN_ATTR_PAGING:
 		paging  = data;
-		*paging = (domain->ops->pgsize_bitmap != 0UL);
+		*paging = (domain->pgsize_bitmap != 0UL);
 		break;
 	case DOMAIN_ATTR_WINDOWS:
 		count = data;
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 8adaaea..49721b4 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -36,7 +36,7 @@
 	 * As this gets called during crash dump, keep this simple for
 	 * now.
 	 */
-	if (cpu_has_apic || apic_from_smp_config())
+	if (boot_cpu_has(X86_FEATURE_APIC) || apic_from_smp_config())
 		disconnect_bsp_APIC(0);
 }
 
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 929a66a..c3043d8 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -11,6 +11,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
+#include <linux/bootmem.h>
 #include <linux/bug.h>
 #include <linux/clk.h>
 #include <linux/component.h>
@@ -56,7 +57,7 @@
 #define F_MMU_TF_PROTECT_SEL(prot)		(((prot) & 0x3) << 5)
 
 #define REG_MMU_IVRP_PADDR			0x114
-#define F_MMU_IVRP_PA_SET(pa)			((pa) >> 1)
+#define F_MMU_IVRP_PA_SET(pa, ext)		(((pa) >> 1) | ((!!(ext)) << 31))
 
 #define REG_MMU_INT_CONTROL0			0x120
 #define F_L2_MULIT_HIT_EN			BIT(0)
@@ -125,6 +126,7 @@
 	struct mtk_iommu_domain		*m4u_dom;
 	struct iommu_group		*m4u_group;
 	struct mtk_smi_iommu		smi_imu;      /* SMI larb iommu info */
+	bool                            enable_4GB;
 };
 
 static struct iommu_ops mtk_iommu_ops;
@@ -257,6 +259,9 @@
 		.iommu_dev = data->dev,
 	};
 
+	if (data->enable_4GB)
+		dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_4GB;
+
 	dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
 	if (!dom->iop) {
 		dev_err(data->dev, "Failed to alloc io pgtable\n");
@@ -264,7 +269,7 @@
 	}
 
 	/* Update our support page sizes bitmap */
-	mtk_iommu_ops.pgsize_bitmap = dom->cfg.pgsize_bitmap;
+	dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
 
 	writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
 	       data->base + REG_MMU_PT_BASE_ADDR);
@@ -530,7 +535,7 @@
 		F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
 	writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
 
-	writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base),
+	writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB),
 		       data->base + REG_MMU_IVRP_PADDR);
 
 	writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
@@ -591,6 +596,9 @@
 		return -ENOMEM;
 	data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
 
+	/* Whether the current dram is over 4GB */
+	data->enable_4GB = !!(max_pfn > (0xffffffffUL >> PAGE_SHIFT));
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	data->base = devm_ioremap_resource(dev, res);
 	if (IS_ERR(data->base))
@@ -690,7 +698,7 @@
 	writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
 	writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
 	writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
-	writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base),
+	writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB),
 		       base + REG_MMU_IVRP_PADDR);
 	return 0;
 }
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 5fea665..af499ae 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -98,12 +98,12 @@
 struct of_iommu_node {
 	struct list_head list;
 	struct device_node *np;
-	struct iommu_ops *ops;
+	const struct iommu_ops *ops;
 };
 static LIST_HEAD(of_iommu_list);
 static DEFINE_SPINLOCK(of_iommu_lock);
 
-void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops)
+void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops)
 {
 	struct of_iommu_node *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
 
@@ -119,10 +119,10 @@
 	spin_unlock(&of_iommu_lock);
 }
 
-struct iommu_ops *of_iommu_get_ops(struct device_node *np)
+const struct iommu_ops *of_iommu_get_ops(struct device_node *np)
 {
 	struct of_iommu_node *node;
-	struct iommu_ops *ops = NULL;
+	const struct iommu_ops *ops = NULL;
 
 	spin_lock(&of_iommu_lock);
 	list_for_each_entry(node, &of_iommu_list, list)
@@ -134,12 +134,12 @@
 	return ops;
 }
 
-struct iommu_ops *of_iommu_configure(struct device *dev,
-				     struct device_node *master_np)
+const struct iommu_ops *of_iommu_configure(struct device *dev,
+					   struct device_node *master_np)
 {
 	struct of_phandle_args iommu_spec;
 	struct device_node *np;
-	struct iommu_ops *ops = NULL;
+	const struct iommu_ops *ops = NULL;
 	int idx = 0;
 
 	/*
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index 9bc20e2..505548a 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -136,7 +136,7 @@
 			     struct seq_file *s)
 {
 	seq_printf(s, "%08x %08x %01x\n", cr->cam, cr->ram,
-			  (cr->cam & MMU_CAM_P) ? 1 : 0);
+		   (cr->cam & MMU_CAM_P) ? 1 : 0);
 	return 0;
 }
 
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 3dc5b65..e2583cc 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -628,10 +628,12 @@
 		break;
 	default:
 		fn = NULL;
-		BUG();
 		break;
 	}
 
+	if (WARN_ON(!fn))
+		return -EINVAL;
+
 	prot = get_iopte_attr(e);
 
 	spin_lock(&obj->page_table_lock);
@@ -987,7 +989,6 @@
 {
 	struct omap_iommu *obj = platform_get_drvdata(pdev);
 
-	iopgtable_clear_entry_all(obj);
 	omap_iommu_debugfs_remove(obj);
 
 	pm_runtime_disable(obj->dev);
@@ -1161,7 +1162,8 @@
 	 * should never fail, but please keep this around to ensure
 	 * we keep the hardware happy
 	 */
-	BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE));
+	if (WARN_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE)))
+		goto fail_align;
 
 	clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
 	spin_lock_init(&omap_domain->lock);
@@ -1172,6 +1174,8 @@
 
 	return &omap_domain->domain;
 
+fail_align:
+	kfree(omap_domain->pgtable);
 fail_nomem:
 	kfree(omap_domain);
 out:
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index a6f593a..c7d6156 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -315,8 +315,8 @@
 	int i;
 
 	for (i = 0; i < iommu->num_mmu; i++)
-		active &= rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
-					RK_MMU_STATUS_STALL_ACTIVE;
+		active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
+					   RK_MMU_STATUS_STALL_ACTIVE);
 
 	return active;
 }
@@ -327,8 +327,8 @@
 	int i;
 
 	for (i = 0; i < iommu->num_mmu; i++)
-		enable &= rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
-					RK_MMU_STATUS_PAGING_ENABLED;
+		enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
+					   RK_MMU_STATUS_PAGING_ENABLED);
 
 	return enable;
 }
@@ -1049,6 +1049,8 @@
 
 	for (i = 0; i < pdev->num_resources; i++) {
 		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+		if (!res)
+			continue;
 		iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
 		if (IS_ERR(iommu->bases[i]))
 			continue;
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 3e12479..46f10ec 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -27,6 +27,7 @@
 	select IRQ_DOMAIN
 	select MULTI_IRQ_HANDLER
 	select IRQ_DOMAIN_HIERARCHY
+	select PARTITION_PERCPU
 
 config ARM_GIC_V3_ITS
 	bool
@@ -244,3 +245,17 @@
 config MVEBU_ODMI
 	bool
 	select GENERIC_MSI_IRQ_DOMAIN
+
+config LS_SCFG_MSI
+	def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
+	depends on PCI && PCI_MSI
+	select PCI_MSI_IRQ_DOMAIN
+
+config PARTITION_PERCPU
+	bool
+
+config EZNPS_GIC
+	bool "NPS400 Global Interrupt Manager (GIM)"
+	select IRQ_DOMAIN
+	help
+	  Support the EZchip NPS400 global interrupt controller
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index b03cfcb..38853a1 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -7,6 +7,7 @@
 obj-$(CONFIG_ARCH_BCM2835)		+= irq-bcm2836.o
 obj-$(CONFIG_ARCH_EXYNOS)		+= exynos-combiner.o
 obj-$(CONFIG_ARCH_HIP04)		+= irq-hip04.o
+obj-$(CONFIG_ARCH_LPC32XX)		+= irq-lpc32xx.o
 obj-$(CONFIG_ARCH_MMP)			+= irq-mmp.o
 obj-$(CONFIG_IRQ_MXS)			+= irq-mxs.o
 obj-$(CONFIG_ARCH_TEGRA)		+= irq-tegra.o
@@ -27,6 +28,7 @@
 obj-$(CONFIG_ARM_GIC_V2M)		+= irq-gic-v2m.o
 obj-$(CONFIG_ARM_GIC_V3)		+= irq-gic-v3.o irq-gic-common.o
 obj-$(CONFIG_ARM_GIC_V3_ITS)		+= irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o
+obj-$(CONFIG_PARTITION_PERCPU)		+= irq-partition-percpu.o
 obj-$(CONFIG_HISILICON_IRQ_MBIGEN)	+= irq-mbigen.o
 obj-$(CONFIG_ARM_NVIC)			+= irq-nvic.o
 obj-$(CONFIG_ARM_VIC)			+= irq-vic.o
@@ -65,3 +67,5 @@
 obj-$(CONFIG_IMX_GPCV2)			+= irq-imx-gpcv2.o
 obj-$(CONFIG_PIC32_EVIC)		+= irq-pic32-evic.o
 obj-$(CONFIG_MVEBU_ODMI)		+= irq-mvebu-odmi.o
+obj-$(CONFIG_LS_SCFG_MSI)		+= irq-ls-scfg-msi.o
+obj-$(CONFIG_EZNPS_GIC)			+= irq-eznps.o
diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c
index 2538425..63d9809 100644
--- a/drivers/irqchip/irq-alpine-msi.c
+++ b/drivers/irqchip/irq-alpine-msi.c
@@ -23,7 +23,7 @@
 #include <linux/slab.h>
 
 #include <asm/irq.h>
-#include <asm-generic/msi.h>
+#include <asm/msi.h>
 
 /* MSIX message address format: local GIC target */
 #define ALPINE_MSIX_SPI_TARGET_CLUSTER0		BIT(16)
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index b6e950d..72ff1d5 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -195,7 +195,7 @@
 	 * Ensure that stores to normal memory are visible to the
 	 * other CPUs before issuing the IPI.
 	 */
-	dsb();
+	smp_wmb();
 
 	for_each_cpu(cpu, mask)	{
 		writel(1 << ipi, mailbox0_base + 16 * cpu);
@@ -223,6 +223,7 @@
 	.priority = 100,
 };
 
+#ifdef CONFIG_ARM
 int __init bcm2836_smp_boot_secondary(unsigned int cpu,
 				      struct task_struct *idle)
 {
@@ -238,7 +239,7 @@
 static const struct smp_operations bcm2836_smp_ops __initconst = {
 	.smp_boot_secondary	= bcm2836_smp_boot_secondary,
 };
-
+#endif
 #endif
 
 static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = {
@@ -252,12 +253,15 @@
 	/* Unmask IPIs to the boot CPU. */
 	bcm2836_arm_irqchip_cpu_notify(&bcm2836_arm_irqchip_cpu_notifier,
 				       CPU_STARTING,
-				       (void *)smp_processor_id());
+				       (void *)(uintptr_t)smp_processor_id());
 	register_cpu_notifier(&bcm2836_arm_irqchip_cpu_notifier);
 
 	set_smp_cross_call(bcm2836_arm_irqchip_send_ipi);
+
+#ifdef CONFIG_ARM
 	smp_set_ops(&bcm2836_smp_ops);
 #endif
+#endif
 }
 
 /*
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index 75573fa..1eef56a 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -183,7 +183,7 @@
 			return -EINVAL;
 
 		*hwirq = fwspec->param[1];
-		*type = fwspec->param[2];
+		*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
 		return 0;
 	}
 
diff --git a/drivers/irqchip/irq-eznps.c b/drivers/irqchip/irq-eznps.c
new file mode 100644
index 0000000..efbf0e4
--- /dev/null
+++ b/drivers/irqchip/irq-eznps.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <soc/nps/common.h>
+
+#define NPS_NR_CPU_IRQS 8  /* number of interrupt lines of NPS400 CPU */
+#define NPS_TIMER0_IRQ  3
+
+/*
+ * NPS400 core includes an Interrupt Controller (IC) support.
+ * All cores can deactivate level irqs at first level control
+ * at cores mesh layer called MTM.
+ * For devices out side chip e.g. uart, network there is another
+ * level called Global Interrupt Manager (GIM).
+ * This second level can control level and edge interrupt.
+ *
+ * NOTE: AUX_IENABLE and CTOP_AUX_IACK are auxiliary registers
+ * with private HW copy per CPU.
+ */
+
+static void nps400_irq_mask(struct irq_data *irqd)
+{
+	unsigned int ienb;
+	unsigned int irq = irqd_to_hwirq(irqd);
+
+	ienb = read_aux_reg(AUX_IENABLE);
+	ienb &= ~(1 << irq);
+	write_aux_reg(AUX_IENABLE, ienb);
+}
+
+static void nps400_irq_unmask(struct irq_data *irqd)
+{
+	unsigned int ienb;
+	unsigned int irq = irqd_to_hwirq(irqd);
+
+	ienb = read_aux_reg(AUX_IENABLE);
+	ienb |= (1 << irq);
+	write_aux_reg(AUX_IENABLE, ienb);
+}
+
+static void nps400_irq_eoi_global(struct irq_data *irqd)
+{
+	unsigned int __maybe_unused irq = irqd_to_hwirq(irqd);
+
+	write_aux_reg(CTOP_AUX_IACK, 1 << irq);
+
+	/* Don't ack GIC before all device access attempts are done */
+	mb();
+
+	nps_ack_gic();
+}
+
+static void nps400_irq_eoi(struct irq_data *irqd)
+{
+	unsigned int __maybe_unused irq = irqd_to_hwirq(irqd);
+
+	write_aux_reg(CTOP_AUX_IACK, 1 << irq);
+}
+
+static struct irq_chip nps400_irq_chip_fasteoi = {
+	.name		= "NPS400 IC Global",
+	.irq_mask	= nps400_irq_mask,
+	.irq_unmask	= nps400_irq_unmask,
+	.irq_eoi	= nps400_irq_eoi_global,
+};
+
+static struct irq_chip nps400_irq_chip_percpu = {
+	.name		= "NPS400 IC",
+	.irq_mask	= nps400_irq_mask,
+	.irq_unmask	= nps400_irq_unmask,
+	.irq_eoi	= nps400_irq_eoi,
+};
+
+static int nps400_irq_map(struct irq_domain *d, unsigned int virq,
+			  irq_hw_number_t hw)
+{
+	switch (hw) {
+	case NPS_TIMER0_IRQ:
+#ifdef CONFIG_SMP
+	case NPS_IPI_IRQ:
+#endif
+		irq_set_percpu_devid(virq);
+		irq_set_chip_and_handler(virq, &nps400_irq_chip_percpu,
+					 handle_percpu_devid_irq);
+		break;
+	default:
+		irq_set_chip_and_handler(virq, &nps400_irq_chip_fasteoi,
+					 handle_fasteoi_irq);
+		break;
+	}
+
+	return 0;
+}
+
+static const struct irq_domain_ops nps400_irq_ops = {
+	.xlate = irq_domain_xlate_onecell,
+	.map = nps400_irq_map,
+};
+
+static int __init nps400_of_init(struct device_node *node,
+				 struct device_node *parent)
+{
+	static struct irq_domain *nps400_root_domain;
+
+	if (parent) {
+		pr_err("DeviceTree incore ic not a root irq controller\n");
+		return -EINVAL;
+	}
+
+	nps400_root_domain = irq_domain_add_linear(node, NPS_NR_CPU_IRQS,
+						   &nps400_irq_ops, NULL);
+
+	if (!nps400_root_domain) {
+		pr_err("nps400 root irq domain not avail\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * Needed for primary domain lookup to succeed
+	 * This is a primary irqchip, and can never have a parent
+	 */
+	irq_set_default_host(nps400_root_domain);
+
+#ifdef CONFIG_SMP
+	irq_create_mapping(nps400_root_domain, NPS_IPI_IRQ);
+#endif
+
+	return 0;
+}
+IRQCHIP_DECLARE(ezchip_nps400_ic, "ezchip,nps400-ic", nps400_of_init);
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
index f174ce0..89e7423 100644
--- a/drivers/irqchip/irq-gic-common.c
+++ b/drivers/irqchip/irq-gic-common.c
@@ -21,6 +21,19 @@
 
 #include "irq-gic-common.h"
 
+static const struct gic_kvm_info *gic_kvm_info;
+
+const struct gic_kvm_info *gic_get_kvm_info(void)
+{
+	return gic_kvm_info;
+}
+
+void gic_set_kvm_info(const struct gic_kvm_info *info)
+{
+	BUG_ON(gic_kvm_info != NULL);
+	gic_kvm_info = info;
+}
+
 void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
 		void *data)
 {
@@ -50,14 +63,26 @@
 	else if (type & IRQ_TYPE_EDGE_BOTH)
 		val |= confmask;
 
+	/* If the current configuration is the same, then we are done */
+	if (val == oldval)
+		return 0;
+
 	/*
 	 * Write back the new configuration, and possibly re-enable
-	 * the interrupt. If we tried to write a new configuration and failed,
-	 * return an error.
+	 * the interrupt. If we fail to write a new configuration for
+	 * an SPI then WARN and return an error. If we fail to write the
+	 * configuration for a PPI this is most likely because the GIC
+	 * does not allow us to set the configuration or we are in a
+	 * non-secure mode, and hence it may not be catastrophic.
 	 */
 	writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
-	if (readl_relaxed(base + GIC_DIST_CONFIG + confoff) != val && val != oldval)
-		ret = -EINVAL;
+	if (readl_relaxed(base + GIC_DIST_CONFIG + confoff) != val) {
+		if (WARN_ON(irq >= 32))
+			ret = -EINVAL;
+		else
+			pr_warn("GIC: PPI%d is secure or misconfigured\n",
+				irq - 16);
+	}
 
 	if (sync_access)
 		sync_access();
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
index fff697d..205e5fd 100644
--- a/drivers/irqchip/irq-gic-common.h
+++ b/drivers/irqchip/irq-gic-common.h
@@ -19,6 +19,7 @@
 
 #include <linux/of.h>
 #include <linux/irqdomain.h>
+#include <linux/irqchip/arm-gic-common.h>
 
 struct gic_quirk {
 	const char *desc;
@@ -35,4 +36,6 @@
 void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
 		void *data);
 
+void gic_set_kvm_info(const struct gic_kvm_info *info);
+
 #endif /* _IRQ_GIC_COMMON_H */
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index 28f047c..ad0d296 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -49,6 +49,9 @@
 /* APM X-Gene with GICv2m MSI_IIDR register value */
 #define XGENE_GICV2M_MSI_IIDR		0x06000170
 
+/* Broadcom NS2 GICv2m MSI_IIDR register value */
+#define BCM_NS2_GICV2M_MSI_IIDR		0x0000013f
+
 /* List of flags for specific v2m implementation */
 #define GICV2M_NEEDS_SPI_OFFSET		0x00000001
 
@@ -62,6 +65,7 @@
 	void __iomem *base;	/* GICv2m virt address */
 	u32 spi_start;		/* The SPI number that MSIs start */
 	u32 nr_spis;		/* The number of SPIs for MSIs */
+	u32 spi_offset;		/* offset to be subtracted from SPI number */
 	unsigned long *bm;	/* MSI vector bitmap */
 	u32 flags;		/* v2m flags for specific implementation */
 };
@@ -102,7 +106,7 @@
 	msg->data = data->hwirq;
 
 	if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
-		msg->data -= v2m->spi_start;
+		msg->data -= v2m->spi_offset;
 }
 
 static struct irq_chip gicv2m_irq_chip = {
@@ -340,9 +344,20 @@
 	 * different from the standard GICv2m implementation where
 	 * the MSI data is the absolute value within the range from
 	 * spi_start to (spi_start + num_spis).
+	 *
+	 * Broadom NS2 GICv2m implementation has an erratum where the MSI data
+	 * is 'spi_number - 32'
 	 */
-	if (readl_relaxed(v2m->base + V2M_MSI_IIDR) == XGENE_GICV2M_MSI_IIDR)
+	switch (readl_relaxed(v2m->base + V2M_MSI_IIDR)) {
+	case XGENE_GICV2M_MSI_IIDR:
 		v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
+		v2m->spi_offset = v2m->spi_start;
+		break;
+	case BCM_NS2_GICV2M_MSI_IIDR:
+		v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
+		v2m->spi_offset = 32;
+		break;
+	}
 
 	v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis),
 			  GFP_KERNEL);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 3926179..6bd881b 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -55,6 +55,16 @@
 };
 
 /*
+ * The ITS_BASER structure - contains memory information and cached
+ * value of BASER register configuration.
+ */
+struct its_baser {
+	void		*base;
+	u64		val;
+	u32		order;
+};
+
+/*
  * The ITS structure - contains most of the infrastructure, with the
  * top-level MSI domain, the command queue, the collections, and the
  * list of devices writing to it.
@@ -66,14 +76,12 @@
 	unsigned long		phys_base;
 	struct its_cmd_block	*cmd_base;
 	struct its_cmd_block	*cmd_write;
-	struct {
-		void		*base;
-		u32		order;
-	} tables[GITS_BASER_NR_REGS];
+	struct its_baser	tables[GITS_BASER_NR_REGS];
 	struct its_collection	*collections;
 	struct list_head	its_device_list;
 	u64			flags;
 	u32			ite_size;
+	u32			device_ids;
 };
 
 #define ITS_ITT_ALIGN		SZ_256
@@ -838,6 +846,8 @@
 		ids	= GITS_TYPER_DEVBITS(typer);
 	}
 
+	its->device_ids = ids;
+
 	for (i = 0; i < GITS_BASER_NR_REGS; i++) {
 		u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
 		u64 type = GITS_BASER_TYPE(val);
@@ -913,6 +923,7 @@
 		}
 
 		val |= alloc_pages - 1;
+		its->tables[i].val = val;
 
 		writeq_relaxed(val, its->base + GITS_BASER + i * 8);
 		tmp = readq_relaxed(its->base + GITS_BASER + i * 8);
@@ -1138,9 +1149,22 @@
 	return its_dev;
 }
 
+static struct its_baser *its_get_baser(struct its_node *its, u32 type)
+{
+	int i;
+
+	for (i = 0; i < GITS_BASER_NR_REGS; i++) {
+		if (GITS_BASER_TYPE(its->tables[i].val) == type)
+			return &its->tables[i];
+	}
+
+	return NULL;
+}
+
 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
 					    int nvecs)
 {
+	struct its_baser *baser;
 	struct its_device *dev;
 	unsigned long *lpi_map;
 	unsigned long flags;
@@ -1151,6 +1175,16 @@
 	int nr_ites;
 	int sz;
 
+	baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
+
+	/* Don't allow 'dev_id' that exceeds single, flat table limit */
+	if (baser) {
+		if (dev_id >= (PAGE_ORDER_TO_SIZE(baser->order) /
+			      GITS_BASER_ENTRY_SIZE(baser->val)))
+			return NULL;
+	} else if (ilog2(dev_id) >= its->device_ids)
+		return NULL;
+
 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 	/*
 	 * At least one bit of EventID is being used, hence a minimum
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 5b7d3c2..fb042ba 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -15,6 +15,8 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#define pr_fmt(fmt)	"GICv3: " fmt
+
 #include <linux/acpi.h>
 #include <linux/cpu.h>
 #include <linux/cpu_pm.h>
@@ -28,7 +30,9 @@
 #include <linux/slab.h>
 
 #include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic-common.h>
 #include <linux/irqchip/arm-gic-v3.h>
+#include <linux/irqchip/irq-partition-percpu.h>
 
 #include <asm/cputype.h>
 #include <asm/exception.h>
@@ -44,6 +48,7 @@
 };
 
 struct gic_chip_data {
+	struct fwnode_handle	*fwnode;
 	void __iomem		*dist_base;
 	struct redist_region	*redist_regions;
 	struct rdists		rdists;
@@ -51,11 +56,14 @@
 	u64			redist_stride;
 	u32			nr_redist_regions;
 	unsigned int		irq_nr;
+	struct partition_desc	*ppi_descs[16];
 };
 
 static struct gic_chip_data gic_data __read_mostly;
 static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE;
 
+static struct gic_kvm_info gic_v3_kvm_info;
+
 #define gic_data_rdist()		(this_cpu_ptr(gic_data.rdists.rdist))
 #define gic_data_rdist_rd_base()	(gic_data_rdist()->rd_base)
 #define gic_data_rdist_sgi_base()	(gic_data_rdist_rd_base() + SZ_64K)
@@ -364,6 +372,13 @@
 			if (static_key_true(&supports_deactivate))
 				gic_write_dir(irqnr);
 #ifdef CONFIG_SMP
+			/*
+			 * Unlike GICv2, we don't need an smp_rmb() here.
+			 * The control dependency from gic_read_iar to
+			 * the ISB in gic_write_eoir is enough to ensure
+			 * that any shared data read by handle_IPI will
+			 * be read after the ACK.
+			 */
 			handle_IPI(irqnr, regs);
 #else
 			WARN_ONCE(true, "Unexpected SGI received!\n");
@@ -383,6 +398,15 @@
 	writel_relaxed(0, base + GICD_CTLR);
 	gic_dist_wait_for_rwp();
 
+	/*
+	 * Configure SPIs as non-secure Group-1. This will only matter
+	 * if the GIC only has a single security state. This will not
+	 * do the right thing if the kernel is running in secure mode,
+	 * but that's not the intended use case anyway.
+	 */
+	for (i = 32; i < gic_data.irq_nr; i += 32)
+		writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
+
 	gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
 
 	/* Enable distributor with ARE, Group1 */
@@ -500,6 +524,9 @@
 
 	rbase = gic_data_rdist_sgi_base();
 
+	/* Configure SGIs/PPIs as non-secure Group-1 */
+	writel_relaxed(~0, rbase + GICR_IGROUPR0);
+
 	gic_cpu_config(rbase, gic_redist_wait_for_rwp);
 
 	/* Give LPIs a spin */
@@ -812,10 +839,62 @@
 	}
 }
 
+static int gic_irq_domain_select(struct irq_domain *d,
+				 struct irq_fwspec *fwspec,
+				 enum irq_domain_bus_token bus_token)
+{
+	/* Not for us */
+        if (fwspec->fwnode != d->fwnode)
+		return 0;
+
+	/* If this is not DT, then we have a single domain */
+	if (!is_of_node(fwspec->fwnode))
+		return 1;
+
+	/*
+	 * If this is a PPI and we have a 4th (non-null) parameter,
+	 * then we need to match the partition domain.
+	 */
+	if (fwspec->param_count >= 4 &&
+	    fwspec->param[0] == 1 && fwspec->param[3] != 0)
+		return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]);
+
+	return d == gic_data.domain;
+}
+
 static const struct irq_domain_ops gic_irq_domain_ops = {
 	.translate = gic_irq_domain_translate,
 	.alloc = gic_irq_domain_alloc,
 	.free = gic_irq_domain_free,
+	.select = gic_irq_domain_select,
+};
+
+static int partition_domain_translate(struct irq_domain *d,
+				      struct irq_fwspec *fwspec,
+				      unsigned long *hwirq,
+				      unsigned int *type)
+{
+	struct device_node *np;
+	int ret;
+
+	np = of_find_node_by_phandle(fwspec->param[3]);
+	if (WARN_ON(!np))
+		return -EINVAL;
+
+	ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]],
+				     of_node_to_fwnode(np));
+	if (ret < 0)
+		return ret;
+
+	*hwirq = ret;
+	*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+
+	return 0;
+}
+
+static const struct irq_domain_ops partition_domain_ops = {
+	.translate = partition_domain_translate,
+	.select = gic_irq_domain_select,
 };
 
 static void gicv3_enable_quirks(void)
@@ -843,6 +922,7 @@
 	if (static_key_true(&supports_deactivate))
 		pr_info("GIC: Using split EOI/Deactivate mode\n");
 
+	gic_data.fwnode = handle;
 	gic_data.dist_base = dist_base;
 	gic_data.redist_regions = rdist_regs;
 	gic_data.nr_redist_regions = nr_redist_regions;
@@ -901,6 +981,143 @@
 	return 0;
 }
 
+static int get_cpu_number(struct device_node *dn)
+{
+	const __be32 *cell;
+	u64 hwid;
+	int i;
+
+	cell = of_get_property(dn, "reg", NULL);
+	if (!cell)
+		return -1;
+
+	hwid = of_read_number(cell, of_n_addr_cells(dn));
+
+	/*
+	 * Non affinity bits must be set to 0 in the DT
+	 */
+	if (hwid & ~MPIDR_HWID_BITMASK)
+		return -1;
+
+	for (i = 0; i < num_possible_cpus(); i++)
+		if (cpu_logical_map(i) == hwid)
+			return i;
+
+	return -1;
+}
+
+/* Create all possible partitions at boot time */
+static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
+{
+	struct device_node *parts_node, *child_part;
+	int part_idx = 0, i;
+	int nr_parts;
+	struct partition_affinity *parts;
+
+	parts_node = of_find_node_by_name(gic_node, "ppi-partitions");
+	if (!parts_node)
+		return;
+
+	nr_parts = of_get_child_count(parts_node);
+
+	if (!nr_parts)
+		return;
+
+	parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL);
+	if (WARN_ON(!parts))
+		return;
+
+	for_each_child_of_node(parts_node, child_part) {
+		struct partition_affinity *part;
+		int n;
+
+		part = &parts[part_idx];
+
+		part->partition_id = of_node_to_fwnode(child_part);
+
+		pr_info("GIC: PPI partition %s[%d] { ",
+			child_part->name, part_idx);
+
+		n = of_property_count_elems_of_size(child_part, "affinity",
+						    sizeof(u32));
+		WARN_ON(n <= 0);
+
+		for (i = 0; i < n; i++) {
+			int err, cpu;
+			u32 cpu_phandle;
+			struct device_node *cpu_node;
+
+			err = of_property_read_u32_index(child_part, "affinity",
+							 i, &cpu_phandle);
+			if (WARN_ON(err))
+				continue;
+
+			cpu_node = of_find_node_by_phandle(cpu_phandle);
+			if (WARN_ON(!cpu_node))
+				continue;
+
+			cpu = get_cpu_number(cpu_node);
+			if (WARN_ON(cpu == -1))
+				continue;
+
+			pr_cont("%s[%d] ", cpu_node->full_name, cpu);
+
+			cpumask_set_cpu(cpu, &part->mask);
+		}
+
+		pr_cont("}\n");
+		part_idx++;
+	}
+
+	for (i = 0; i < 16; i++) {
+		unsigned int irq;
+		struct partition_desc *desc;
+		struct irq_fwspec ppi_fwspec = {
+			.fwnode		= gic_data.fwnode,
+			.param_count	= 3,
+			.param		= {
+				[0]	= 1,
+				[1]	= i,
+				[2]	= IRQ_TYPE_NONE,
+			},
+		};
+
+		irq = irq_create_fwspec_mapping(&ppi_fwspec);
+		if (WARN_ON(!irq))
+			continue;
+		desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
+					     irq, &partition_domain_ops);
+		if (WARN_ON(!desc))
+			continue;
+
+		gic_data.ppi_descs[i] = desc;
+	}
+}
+
+static void __init gic_of_setup_kvm_info(struct device_node *node)
+{
+	int ret;
+	struct resource r;
+	u32 gicv_idx;
+
+	gic_v3_kvm_info.type = GIC_V3;
+
+	gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
+	if (!gic_v3_kvm_info.maint_irq)
+		return;
+
+	if (of_property_read_u32(node, "#redistributor-regions",
+				 &gicv_idx))
+		gicv_idx = 1;
+
+	gicv_idx += 3;	/* Also skip GICD, GICC, GICH */
+	ret = of_address_to_resource(node, gicv_idx, &r);
+	if (!ret)
+		gic_v3_kvm_info.vcpu = r;
+
+	gic_set_kvm_info(&gic_v3_kvm_info);
+}
+
 static int __init gic_of_init(struct device_node *node, struct device_node *parent)
 {
 	void __iomem *dist_base;
@@ -952,8 +1169,12 @@
 
 	err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
 			     redist_stride, &node->fwnode);
-	if (!err)
-		return 0;
+	if (err)
+		goto out_unmap_rdist;
+
+	gic_populate_ppi_partitions(node);
+	gic_of_setup_kvm_info(node);
+	return 0;
 
 out_unmap_rdist:
 	for (i = 0; i < nr_redist_regions; i++)
@@ -968,19 +1189,25 @@
 IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
 
 #ifdef CONFIG_ACPI
-static void __iomem *dist_base;
-static struct redist_region *redist_regs __initdata;
-static u32 nr_redist_regions __initdata;
-static bool single_redist;
+static struct
+{
+	void __iomem *dist_base;
+	struct redist_region *redist_regs;
+	u32 nr_redist_regions;
+	bool single_redist;
+	u32 maint_irq;
+	int maint_irq_mode;
+	phys_addr_t vcpu_base;
+} acpi_data __initdata;
 
 static void __init
 gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
 {
 	static int count = 0;
 
-	redist_regs[count].phys_base = phys_base;
-	redist_regs[count].redist_base = redist_base;
-	redist_regs[count].single_redist = single_redist;
+	acpi_data.redist_regs[count].phys_base = phys_base;
+	acpi_data.redist_regs[count].redist_base = redist_base;
+	acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
 	count++;
 }
 
@@ -1008,7 +1235,7 @@
 {
 	struct acpi_madt_generic_interrupt *gicc =
 				(struct acpi_madt_generic_interrupt *)header;
-	u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
+	u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
 	u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
 	void __iomem *redist_base;
 
@@ -1025,7 +1252,7 @@
 	acpi_tbl_entry_handler redist_parser;
 	enum acpi_madt_type type;
 
-	if (single_redist) {
+	if (acpi_data.single_redist) {
 		type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
 		redist_parser = gic_acpi_parse_madt_gicc;
 	} else {
@@ -1076,14 +1303,14 @@
 	count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
 				      gic_acpi_match_gicr, 0);
 	if (count > 0) {
-		single_redist = false;
+		acpi_data.single_redist = false;
 		return count;
 	}
 
 	count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
 				      gic_acpi_match_gicc, 0);
 	if (count > 0)
-		single_redist = true;
+		acpi_data.single_redist = true;
 
 	return count;
 }
@@ -1103,36 +1330,117 @@
 	if (count <= 0)
 		return false;
 
-	nr_redist_regions = count;
+	acpi_data.nr_redist_regions = count;
 	return true;
 }
 
+static int __init gic_acpi_parse_virt_madt_gicc(struct acpi_subtable_header *header,
+						const unsigned long end)
+{
+	struct acpi_madt_generic_interrupt *gicc =
+		(struct acpi_madt_generic_interrupt *)header;
+	int maint_irq_mode;
+	static int first_madt = true;
+
+	/* Skip unusable CPUs */
+	if (!(gicc->flags & ACPI_MADT_ENABLED))
+		return 0;
+
+	maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
+		ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
+
+	if (first_madt) {
+		first_madt = false;
+
+		acpi_data.maint_irq = gicc->vgic_interrupt;
+		acpi_data.maint_irq_mode = maint_irq_mode;
+		acpi_data.vcpu_base = gicc->gicv_base_address;
+
+		return 0;
+	}
+
+	/*
+	 * The maintenance interrupt and GICV should be the same for every CPU
+	 */
+	if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
+	    (acpi_data.maint_irq_mode != maint_irq_mode) ||
+	    (acpi_data.vcpu_base != gicc->gicv_base_address))
+		return -EINVAL;
+
+	return 0;
+}
+
+static bool __init gic_acpi_collect_virt_info(void)
+{
+	int count;
+
+	count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
+				      gic_acpi_parse_virt_madt_gicc, 0);
+
+	return (count > 0);
+}
+
 #define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
+#define ACPI_GICV2_VCTRL_MEM_SIZE	(SZ_4K)
+#define ACPI_GICV2_VCPU_MEM_SIZE	(SZ_8K)
+
+static void __init gic_acpi_setup_kvm_info(void)
+{
+	int irq;
+
+	if (!gic_acpi_collect_virt_info()) {
+		pr_warn("Unable to get hardware information used for virtualization\n");
+		return;
+	}
+
+	gic_v3_kvm_info.type = GIC_V3;
+
+	irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
+				acpi_data.maint_irq_mode,
+				ACPI_ACTIVE_HIGH);
+	if (irq <= 0)
+		return;
+
+	gic_v3_kvm_info.maint_irq = irq;
+
+	if (acpi_data.vcpu_base) {
+		struct resource *vcpu = &gic_v3_kvm_info.vcpu;
+
+		vcpu->flags = IORESOURCE_MEM;
+		vcpu->start = acpi_data.vcpu_base;
+		vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
+	}
+
+	gic_set_kvm_info(&gic_v3_kvm_info);
+}
 
 static int __init
 gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
 {
 	struct acpi_madt_generic_distributor *dist;
 	struct fwnode_handle *domain_handle;
+	size_t size;
 	int i, err;
 
 	/* Get distributor base address */
 	dist = (struct acpi_madt_generic_distributor *)header;
-	dist_base = ioremap(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE);
-	if (!dist_base) {
+	acpi_data.dist_base = ioremap(dist->base_address,
+				      ACPI_GICV3_DIST_MEM_SIZE);
+	if (!acpi_data.dist_base) {
 		pr_err("Unable to map GICD registers\n");
 		return -ENOMEM;
 	}
 
-	err = gic_validate_dist_version(dist_base);
+	err = gic_validate_dist_version(acpi_data.dist_base);
 	if (err) {
-		pr_err("No distributor detected at @%p, giving up", dist_base);
+		pr_err("No distributor detected at @%p, giving up",
+		       acpi_data.dist_base);
 		goto out_dist_unmap;
 	}
 
-	redist_regs = kzalloc(sizeof(*redist_regs) * nr_redist_regions,
-			      GFP_KERNEL);
-	if (!redist_regs) {
+	size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
+	acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
+	if (!acpi_data.redist_regs) {
 		err = -ENOMEM;
 		goto out_dist_unmap;
 	}
@@ -1141,29 +1449,31 @@
 	if (err)
 		goto out_redist_unmap;
 
-	domain_handle = irq_domain_alloc_fwnode(dist_base);
+	domain_handle = irq_domain_alloc_fwnode(acpi_data.dist_base);
 	if (!domain_handle) {
 		err = -ENOMEM;
 		goto out_redist_unmap;
 	}
 
-	err = gic_init_bases(dist_base, redist_regs, nr_redist_regions, 0,
-			     domain_handle);
+	err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs,
+			     acpi_data.nr_redist_regions, 0, domain_handle);
 	if (err)
 		goto out_fwhandle_free;
 
 	acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
+	gic_acpi_setup_kvm_info();
+
 	return 0;
 
 out_fwhandle_free:
 	irq_domain_free_fwnode(domain_handle);
 out_redist_unmap:
-	for (i = 0; i < nr_redist_regions; i++)
-		if (redist_regs[i].redist_base)
-			iounmap(redist_regs[i].redist_base);
-	kfree(redist_regs);
+	for (i = 0; i < acpi_data.nr_redist_regions; i++)
+		if (acpi_data.redist_regs[i].redist_base)
+			iounmap(acpi_data.redist_regs[i].redist_base);
+	kfree(acpi_data.redist_regs);
 out_dist_unmap:
-	iounmap(dist_base);
+	iounmap(acpi_data.dist_base);
 	return err;
 }
 IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 282344b..b4e6471 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -55,7 +55,7 @@
 
 static void gic_check_cpu_features(void)
 {
-	WARN_TAINT_ONCE(cpus_have_cap(ARM64_HAS_SYSREG_GIC_CPUIF),
+	WARN_TAINT_ONCE(this_cpu_has_cap(ARM64_HAS_SYSREG_GIC_CPUIF),
 			TAINT_CPU_OUT_OF_SPEC,
 			"GICv3 system registers enabled, broken firmware!\n");
 }
@@ -72,6 +72,9 @@
 	struct irq_chip chip;
 	union gic_base dist_base;
 	union gic_base cpu_base;
+	void __iomem *raw_dist_base;
+	void __iomem *raw_cpu_base;
+	u32 percpu_offset;
 #ifdef CONFIG_CPU_PM
 	u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
 	u32 saved_spi_active[DIV_ROUND_UP(1020, 32)];
@@ -102,6 +105,8 @@
 
 static struct gic_chip_data gic_data[CONFIG_ARM_GIC_MAX_NR] __read_mostly;
 
+static struct gic_kvm_info gic_v2_kvm_info;
+
 #ifdef CONFIG_GIC_NON_BANKED
 static void __iomem *gic_get_percpu_base(union gic_base *base)
 {
@@ -344,6 +349,14 @@
 			if (static_key_true(&supports_deactivate))
 				writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE);
 #ifdef CONFIG_SMP
+			/*
+			 * Ensure any shared data written by the CPU sending
+			 * the IPI is read after we've read the ACK register
+			 * on the GIC.
+			 *
+			 * Pairs with the write barrier in gic_raise_softirq
+			 */
+			smp_rmb();
 			handle_IPI(irqnr, regs);
 #endif
 			continue;
@@ -391,20 +404,6 @@
 				  IRQCHIP_MASK_ON_SUSPEND,
 };
 
-static struct irq_chip gic_eoimode1_chip = {
-	.name			= "GICv2",
-	.irq_mask		= gic_eoimode1_mask_irq,
-	.irq_unmask		= gic_unmask_irq,
-	.irq_eoi		= gic_eoimode1_eoi_irq,
-	.irq_set_type		= gic_set_type,
-	.irq_get_irqchip_state	= gic_irq_get_irqchip_state,
-	.irq_set_irqchip_state	= gic_irq_set_irqchip_state,
-	.irq_set_vcpu_affinity	= gic_irq_set_vcpu_affinity,
-	.flags			= IRQCHIP_SET_TYPE_MASKED |
-				  IRQCHIP_SKIP_SET_WAKE |
-				  IRQCHIP_MASK_ON_SUSPEND,
-};
-
 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
 {
 	BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
@@ -473,7 +472,7 @@
 	writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL);
 }
 
-static void gic_cpu_init(struct gic_chip_data *gic)
+static int gic_cpu_init(struct gic_chip_data *gic)
 {
 	void __iomem *dist_base = gic_data_dist_base(gic);
 	void __iomem *base = gic_data_cpu_base(gic);
@@ -489,7 +488,10 @@
 		/*
 		 * Get what the GIC says our CPU mask is.
 		 */
-		BUG_ON(cpu >= NR_GIC_CPU_IF);
+		if (WARN_ON(cpu >= NR_GIC_CPU_IF))
+			return -EINVAL;
+
+		gic_check_cpu_features();
 		cpu_mask = gic_get_cpumask(gic);
 		gic_cpu_map[cpu] = cpu_mask;
 
@@ -506,6 +508,8 @@
 
 	writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
 	gic_cpu_if_up(gic);
+
+	return 0;
 }
 
 int gic_cpu_if_down(unsigned int gic_nr)
@@ -531,34 +535,35 @@
  * this function, no interrupts will be delivered by the GIC, and another
  * platform-specific wakeup source must be enabled.
  */
-static void gic_dist_save(unsigned int gic_nr)
+static void gic_dist_save(struct gic_chip_data *gic)
 {
 	unsigned int gic_irqs;
 	void __iomem *dist_base;
 	int i;
 
-	BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
+	if (WARN_ON(!gic))
+		return;
 
-	gic_irqs = gic_data[gic_nr].gic_irqs;
-	dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+	gic_irqs = gic->gic_irqs;
+	dist_base = gic_data_dist_base(gic);
 
 	if (!dist_base)
 		return;
 
 	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
-		gic_data[gic_nr].saved_spi_conf[i] =
+		gic->saved_spi_conf[i] =
 			readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
 
 	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
-		gic_data[gic_nr].saved_spi_target[i] =
+		gic->saved_spi_target[i] =
 			readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
 
 	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
-		gic_data[gic_nr].saved_spi_enable[i] =
+		gic->saved_spi_enable[i] =
 			readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
 
 	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
-		gic_data[gic_nr].saved_spi_active[i] =
+		gic->saved_spi_active[i] =
 			readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
 }
 
@@ -569,16 +574,17 @@
  * handled normally, but any edge interrupts that occured will not be seen by
  * the GIC and need to be handled by the platform-specific wakeup source.
  */
-static void gic_dist_restore(unsigned int gic_nr)
+static void gic_dist_restore(struct gic_chip_data *gic)
 {
 	unsigned int gic_irqs;
 	unsigned int i;
 	void __iomem *dist_base;
 
-	BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
+	if (WARN_ON(!gic))
+		return;
 
-	gic_irqs = gic_data[gic_nr].gic_irqs;
-	dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+	gic_irqs = gic->gic_irqs;
+	dist_base = gic_data_dist_base(gic);
 
 	if (!dist_base)
 		return;
@@ -586,7 +592,7 @@
 	writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL);
 
 	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
-		writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
+		writel_relaxed(gic->saved_spi_conf[i],
 			dist_base + GIC_DIST_CONFIG + i * 4);
 
 	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
@@ -594,85 +600,87 @@
 			dist_base + GIC_DIST_PRI + i * 4);
 
 	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
-		writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
+		writel_relaxed(gic->saved_spi_target[i],
 			dist_base + GIC_DIST_TARGET + i * 4);
 
 	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
 		writel_relaxed(GICD_INT_EN_CLR_X32,
 			dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
-		writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
+		writel_relaxed(gic->saved_spi_enable[i],
 			dist_base + GIC_DIST_ENABLE_SET + i * 4);
 	}
 
 	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
 		writel_relaxed(GICD_INT_EN_CLR_X32,
 			dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
-		writel_relaxed(gic_data[gic_nr].saved_spi_active[i],
+		writel_relaxed(gic->saved_spi_active[i],
 			dist_base + GIC_DIST_ACTIVE_SET + i * 4);
 	}
 
 	writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
 }
 
-static void gic_cpu_save(unsigned int gic_nr)
+static void gic_cpu_save(struct gic_chip_data *gic)
 {
 	int i;
 	u32 *ptr;
 	void __iomem *dist_base;
 	void __iomem *cpu_base;
 
-	BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
+	if (WARN_ON(!gic))
+		return;
 
-	dist_base = gic_data_dist_base(&gic_data[gic_nr]);
-	cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
+	dist_base = gic_data_dist_base(gic);
+	cpu_base = gic_data_cpu_base(gic);
 
 	if (!dist_base || !cpu_base)
 		return;
 
-	ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
+	ptr = raw_cpu_ptr(gic->saved_ppi_enable);
 	for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
 		ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
 
-	ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active);
+	ptr = raw_cpu_ptr(gic->saved_ppi_active);
 	for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
 		ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
 
-	ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
+	ptr = raw_cpu_ptr(gic->saved_ppi_conf);
 	for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
 		ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
 
 }
 
-static void gic_cpu_restore(unsigned int gic_nr)
+static void gic_cpu_restore(struct gic_chip_data *gic)
 {
 	int i;
 	u32 *ptr;
 	void __iomem *dist_base;
 	void __iomem *cpu_base;
 
-	BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
+	if (WARN_ON(!gic))
+		return;
 
-	dist_base = gic_data_dist_base(&gic_data[gic_nr]);
-	cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
+	dist_base = gic_data_dist_base(gic);
+	cpu_base = gic_data_cpu_base(gic);
 
 	if (!dist_base || !cpu_base)
 		return;
 
-	ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
+	ptr = raw_cpu_ptr(gic->saved_ppi_enable);
 	for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
 		writel_relaxed(GICD_INT_EN_CLR_X32,
 			       dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
 		writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
 	}
 
-	ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active);
+	ptr = raw_cpu_ptr(gic->saved_ppi_active);
 	for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
 		writel_relaxed(GICD_INT_EN_CLR_X32,
 			       dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
 		writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4);
 	}
 
-	ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
+	ptr = raw_cpu_ptr(gic->saved_ppi_conf);
 	for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
 		writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
 
@@ -681,7 +689,7 @@
 					dist_base + GIC_DIST_PRI + i * 4);
 
 	writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK);
-	gic_cpu_if_up(&gic_data[gic_nr]);
+	gic_cpu_if_up(gic);
 }
 
 static int gic_notifier(struct notifier_block *self, unsigned long cmd,	void *v)
@@ -696,18 +704,18 @@
 #endif
 		switch (cmd) {
 		case CPU_PM_ENTER:
-			gic_cpu_save(i);
+			gic_cpu_save(&gic_data[i]);
 			break;
 		case CPU_PM_ENTER_FAILED:
 		case CPU_PM_EXIT:
-			gic_cpu_restore(i);
+			gic_cpu_restore(&gic_data[i]);
 			break;
 		case CPU_CLUSTER_PM_ENTER:
-			gic_dist_save(i);
+			gic_dist_save(&gic_data[i]);
 			break;
 		case CPU_CLUSTER_PM_ENTER_FAILED:
 		case CPU_CLUSTER_PM_EXIT:
-			gic_dist_restore(i);
+			gic_dist_restore(&gic_data[i]);
 			break;
 		}
 	}
@@ -719,26 +727,39 @@
 	.notifier_call = gic_notifier,
 };
 
-static void __init gic_pm_init(struct gic_chip_data *gic)
+static int __init gic_pm_init(struct gic_chip_data *gic)
 {
 	gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
 		sizeof(u32));
-	BUG_ON(!gic->saved_ppi_enable);
+	if (WARN_ON(!gic->saved_ppi_enable))
+		return -ENOMEM;
 
 	gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
 		sizeof(u32));
-	BUG_ON(!gic->saved_ppi_active);
+	if (WARN_ON(!gic->saved_ppi_active))
+		goto free_ppi_enable;
 
 	gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
 		sizeof(u32));
-	BUG_ON(!gic->saved_ppi_conf);
+	if (WARN_ON(!gic->saved_ppi_conf))
+		goto free_ppi_active;
 
 	if (gic == &gic_data[0])
 		cpu_pm_register_notifier(&gic_notifier_block);
+
+	return 0;
+
+free_ppi_active:
+	free_percpu(gic->saved_ppi_active);
+free_ppi_enable:
+	free_percpu(gic->saved_ppi_enable);
+
+	return -ENOMEM;
 }
 #else
-static void __init gic_pm_init(struct gic_chip_data *gic)
+static int __init gic_pm_init(struct gic_chip_data *gic)
 {
+	return 0;
 }
 #endif
 
@@ -1011,63 +1032,63 @@
 	.unmap = gic_irq_domain_unmap,
 };
 
-static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
-			   void __iomem *dist_base, void __iomem *cpu_base,
-			   u32 percpu_offset, struct fwnode_handle *handle)
+static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start,
+				   struct fwnode_handle *handle)
 {
 	irq_hw_number_t hwirq_base;
-	struct gic_chip_data *gic;
-	int gic_irqs, irq_base, i;
+	int gic_irqs, irq_base, i, ret;
 
-	BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
-
-	gic_check_cpu_features();
-
-	gic = &gic_data[gic_nr];
+	if (WARN_ON(!gic || gic->domain))
+		return -EINVAL;
 
 	/* Initialize irq_chip */
-	if (static_key_true(&supports_deactivate) && gic_nr == 0) {
-		gic->chip = gic_eoimode1_chip;
+	gic->chip = gic_chip;
+
+	if (static_key_true(&supports_deactivate) && gic == &gic_data[0]) {
+		gic->chip.irq_mask = gic_eoimode1_mask_irq;
+		gic->chip.irq_eoi = gic_eoimode1_eoi_irq;
+		gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity;
+		gic->chip.name = kasprintf(GFP_KERNEL, "GICv2");
 	} else {
-		gic->chip = gic_chip;
-		gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d", gic_nr);
+		gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d",
+					   (int)(gic - &gic_data[0]));
 	}
 
 #ifdef CONFIG_SMP
-	if (gic_nr == 0)
+	if (gic == &gic_data[0])
 		gic->chip.irq_set_affinity = gic_set_affinity;
 #endif
 
-#ifdef CONFIG_GIC_NON_BANKED
-	if (percpu_offset) { /* Frankein-GIC without banked registers... */
+	if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
+		/* Frankein-GIC without banked registers... */
 		unsigned int cpu;
 
 		gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
 		gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
 		if (WARN_ON(!gic->dist_base.percpu_base ||
 			    !gic->cpu_base.percpu_base)) {
-			free_percpu(gic->dist_base.percpu_base);
-			free_percpu(gic->cpu_base.percpu_base);
-			return;
+			ret = -ENOMEM;
+			goto error;
 		}
 
 		for_each_possible_cpu(cpu) {
 			u32 mpidr = cpu_logical_map(cpu);
 			u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
-			unsigned long offset = percpu_offset * core_id;
-			*per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
-			*per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
+			unsigned long offset = gic->percpu_offset * core_id;
+			*per_cpu_ptr(gic->dist_base.percpu_base, cpu) =
+				gic->raw_dist_base + offset;
+			*per_cpu_ptr(gic->cpu_base.percpu_base, cpu) =
+				gic->raw_cpu_base + offset;
 		}
 
 		gic_set_base_accessor(gic, gic_get_percpu_base);
-	} else
-#endif
-	{			/* Normal, sane GIC... */
-		WARN(percpu_offset,
+	} else {
+		/* Normal, sane GIC... */
+		WARN(gic->percpu_offset,
 		     "GIC_NON_BANKED not enabled, ignoring %08x offset!",
-		     percpu_offset);
-		gic->dist_base.common_base = dist_base;
-		gic->cpu_base.common_base = cpu_base;
+		     gic->percpu_offset);
+		gic->dist_base.common_base = gic->raw_dist_base;
+		gic->cpu_base.common_base = gic->raw_cpu_base;
 		gic_set_base_accessor(gic, gic_get_common_base);
 	}
 
@@ -1090,7 +1111,7 @@
 		 * For primary GICs, skip over SGIs.
 		 * For secondary GICs, skip over PPIs, too.
 		 */
-		if (gic_nr == 0 && (irq_start & 31) > 0) {
+		if (gic == &gic_data[0] && (irq_start & 31) > 0) {
 			hwirq_base = 16;
 			if (irq_start != -1)
 				irq_start = (irq_start & ~31) + 16;
@@ -1112,10 +1133,12 @@
 					hwirq_base, &gic_irq_domain_ops, gic);
 	}
 
-	if (WARN_ON(!gic->domain))
-		return;
+	if (WARN_ON(!gic->domain)) {
+		ret = -ENODEV;
+		goto error;
+	}
 
-	if (gic_nr == 0) {
+	if (gic == &gic_data[0]) {
 		/*
 		 * Initialize the CPU interface map to all CPUs.
 		 * It will be refined as each CPU probes its ID.
@@ -1133,19 +1156,57 @@
 	}
 
 	gic_dist_init(gic);
-	gic_cpu_init(gic);
-	gic_pm_init(gic);
+	ret = gic_cpu_init(gic);
+	if (ret)
+		goto error;
+
+	ret = gic_pm_init(gic);
+	if (ret)
+		goto error;
+
+	return 0;
+
+error:
+	if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
+		free_percpu(gic->dist_base.percpu_base);
+		free_percpu(gic->cpu_base.percpu_base);
+	}
+
+	kfree(gic->chip.name);
+
+	return ret;
 }
 
 void __init gic_init(unsigned int gic_nr, int irq_start,
 		     void __iomem *dist_base, void __iomem *cpu_base)
 {
+	struct gic_chip_data *gic;
+
+	if (WARN_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR))
+		return;
+
 	/*
 	 * Non-DT/ACPI systems won't run a hypervisor, so let's not
 	 * bother with these...
 	 */
 	static_key_slow_dec(&supports_deactivate);
-	__gic_init_bases(gic_nr, irq_start, dist_base, cpu_base, 0, NULL);
+
+	gic = &gic_data[gic_nr];
+	gic->raw_dist_base = dist_base;
+	gic->raw_cpu_base = cpu_base;
+
+	__gic_init_bases(gic, irq_start, NULL);
+}
+
+static void gic_teardown(struct gic_chip_data *gic)
+{
+	if (WARN_ON(!gic))
+		return;
+
+	if (gic->raw_dist_base)
+		iounmap(gic->raw_dist_base);
+	if (gic->raw_cpu_base)
+		iounmap(gic->raw_cpu_base);
 }
 
 #ifdef CONFIG_OF
@@ -1189,37 +1250,88 @@
 	return true;
 }
 
+static int __init gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
+{
+	if (!gic || !node)
+		return -EINVAL;
+
+	gic->raw_dist_base = of_iomap(node, 0);
+	if (WARN(!gic->raw_dist_base, "unable to map gic dist registers\n"))
+		goto error;
+
+	gic->raw_cpu_base = of_iomap(node, 1);
+	if (WARN(!gic->raw_cpu_base, "unable to map gic cpu registers\n"))
+		goto error;
+
+	if (of_property_read_u32(node, "cpu-offset", &gic->percpu_offset))
+		gic->percpu_offset = 0;
+
+	return 0;
+
+error:
+	gic_teardown(gic);
+
+	return -ENOMEM;
+}
+
+static void __init gic_of_setup_kvm_info(struct device_node *node)
+{
+	int ret;
+	struct resource *vctrl_res = &gic_v2_kvm_info.vctrl;
+	struct resource *vcpu_res = &gic_v2_kvm_info.vcpu;
+
+	gic_v2_kvm_info.type = GIC_V2;
+
+	gic_v2_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
+	if (!gic_v2_kvm_info.maint_irq)
+		return;
+
+	ret = of_address_to_resource(node, 2, vctrl_res);
+	if (ret)
+		return;
+
+	ret = of_address_to_resource(node, 3, vcpu_res);
+	if (ret)
+		return;
+
+	gic_set_kvm_info(&gic_v2_kvm_info);
+}
+
 int __init
 gic_of_init(struct device_node *node, struct device_node *parent)
 {
-	void __iomem *cpu_base;
-	void __iomem *dist_base;
-	u32 percpu_offset;
-	int irq;
+	struct gic_chip_data *gic;
+	int irq, ret;
 
 	if (WARN_ON(!node))
 		return -ENODEV;
 
-	dist_base = of_iomap(node, 0);
-	WARN(!dist_base, "unable to map gic dist registers\n");
+	if (WARN_ON(gic_cnt >= CONFIG_ARM_GIC_MAX_NR))
+		return -EINVAL;
 
-	cpu_base = of_iomap(node, 1);
-	WARN(!cpu_base, "unable to map gic cpu registers\n");
+	gic = &gic_data[gic_cnt];
+
+	ret = gic_of_setup(gic, node);
+	if (ret)
+		return ret;
 
 	/*
 	 * Disable split EOI/Deactivate if either HYP is not available
 	 * or the CPU interface is too small.
 	 */
-	if (gic_cnt == 0 && !gic_check_eoimode(node, &cpu_base))
+	if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base))
 		static_key_slow_dec(&supports_deactivate);
 
-	if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
-		percpu_offset = 0;
+	ret = __gic_init_bases(gic, -1, &node->fwnode);
+	if (ret) {
+		gic_teardown(gic);
+		return ret;
+	}
 
-	__gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset,
-			 &node->fwnode);
-	if (!gic_cnt)
+	if (!gic_cnt) {
 		gic_init_physaddr(node);
+		gic_of_setup_kvm_info(node);
+	}
 
 	if (parent) {
 		irq = irq_of_parse_and_map(node, 0);
@@ -1245,7 +1357,14 @@
 #endif
 
 #ifdef CONFIG_ACPI
-static phys_addr_t cpu_phy_base __initdata;
+static struct
+{
+	phys_addr_t cpu_phys_base;
+	u32 maint_irq;
+	int maint_irq_mode;
+	phys_addr_t vctrl_base;
+	phys_addr_t vcpu_base;
+} acpi_data __initdata;
 
 static int __init
 gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header,
@@ -1265,10 +1384,16 @@
 	 * All CPU interface addresses have to be the same.
 	 */
 	gic_cpu_base = processor->base_address;
-	if (cpu_base_assigned && gic_cpu_base != cpu_phy_base)
+	if (cpu_base_assigned && gic_cpu_base != acpi_data.cpu_phys_base)
 		return -EINVAL;
 
-	cpu_phy_base = gic_cpu_base;
+	acpi_data.cpu_phys_base = gic_cpu_base;
+	acpi_data.maint_irq = processor->vgic_interrupt;
+	acpi_data.maint_irq_mode = (processor->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
+				    ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
+	acpi_data.vctrl_base = processor->gich_base_address;
+	acpi_data.vcpu_base = processor->gicv_base_address;
+
 	cpu_base_assigned = 1;
 	return 0;
 }
@@ -1299,14 +1424,49 @@
 
 #define ACPI_GICV2_DIST_MEM_SIZE	(SZ_4K)
 #define ACPI_GIC_CPU_IF_MEM_SIZE	(SZ_8K)
+#define ACPI_GICV2_VCTRL_MEM_SIZE	(SZ_4K)
+#define ACPI_GICV2_VCPU_MEM_SIZE	(SZ_8K)
+
+static void __init gic_acpi_setup_kvm_info(void)
+{
+	int irq;
+	struct resource *vctrl_res = &gic_v2_kvm_info.vctrl;
+	struct resource *vcpu_res = &gic_v2_kvm_info.vcpu;
+
+	gic_v2_kvm_info.type = GIC_V2;
+
+	if (!acpi_data.vctrl_base)
+		return;
+
+	vctrl_res->flags = IORESOURCE_MEM;
+	vctrl_res->start = acpi_data.vctrl_base;
+	vctrl_res->end = vctrl_res->start + ACPI_GICV2_VCTRL_MEM_SIZE - 1;
+
+	if (!acpi_data.vcpu_base)
+		return;
+
+	vcpu_res->flags = IORESOURCE_MEM;
+	vcpu_res->start = acpi_data.vcpu_base;
+	vcpu_res->end = vcpu_res->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
+
+	irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
+				acpi_data.maint_irq_mode,
+				ACPI_ACTIVE_HIGH);
+	if (irq <= 0)
+		return;
+
+	gic_v2_kvm_info.maint_irq = irq;
+
+	gic_set_kvm_info(&gic_v2_kvm_info);
+}
 
 static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
 				   const unsigned long end)
 {
 	struct acpi_madt_generic_distributor *dist;
-	void __iomem *cpu_base, *dist_base;
 	struct fwnode_handle *domain_handle;
-	int count;
+	struct gic_chip_data *gic = &gic_data[0];
+	int count, ret;
 
 	/* Collect CPU base addresses */
 	count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
@@ -1316,17 +1476,18 @@
 		return -EINVAL;
 	}
 
-	cpu_base = ioremap(cpu_phy_base, ACPI_GIC_CPU_IF_MEM_SIZE);
-	if (!cpu_base) {
+	gic->raw_cpu_base = ioremap(acpi_data.cpu_phys_base, ACPI_GIC_CPU_IF_MEM_SIZE);
+	if (!gic->raw_cpu_base) {
 		pr_err("Unable to map GICC registers\n");
 		return -ENOMEM;
 	}
 
 	dist = (struct acpi_madt_generic_distributor *)header;
-	dist_base = ioremap(dist->base_address, ACPI_GICV2_DIST_MEM_SIZE);
-	if (!dist_base) {
+	gic->raw_dist_base = ioremap(dist->base_address,
+				     ACPI_GICV2_DIST_MEM_SIZE);
+	if (!gic->raw_dist_base) {
 		pr_err("Unable to map GICD registers\n");
-		iounmap(cpu_base);
+		gic_teardown(gic);
 		return -ENOMEM;
 	}
 
@@ -1341,21 +1502,28 @@
 	/*
 	 * Initialize GIC instance zero (no multi-GIC support).
 	 */
-	domain_handle = irq_domain_alloc_fwnode(dist_base);
+	domain_handle = irq_domain_alloc_fwnode(gic->raw_dist_base);
 	if (!domain_handle) {
 		pr_err("Unable to allocate domain handle\n");
-		iounmap(cpu_base);
-		iounmap(dist_base);
+		gic_teardown(gic);
 		return -ENOMEM;
 	}
 
-	__gic_init_bases(0, -1, dist_base, cpu_base, 0, domain_handle);
+	ret = __gic_init_bases(gic, -1, domain_handle);
+	if (ret) {
+		pr_err("Failed to initialise GIC\n");
+		irq_domain_free_fwnode(domain_handle);
+		gic_teardown(gic);
+		return ret;
+	}
 
 	acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
 
 	if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
 		gicv2m_init(NULL, gic_data[0].domain);
 
+	gic_acpi_setup_kvm_info();
+
 	return 0;
 }
 IRQCHIP_ACPI_DECLARE(gic_v2, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
diff --git a/drivers/irqchip/irq-lpc32xx.c b/drivers/irqchip/irq-lpc32xx.c
new file mode 100644
index 0000000..1034aeb
--- /dev/null
+++ b/drivers/irqchip/irq-lpc32xx.c
@@ -0,0 +1,238 @@
+/*
+ * Copyright 2015-2016 Vladimir Zapolskiy <vz@mleia.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <asm/exception.h>
+
+#define LPC32XX_INTC_MASK		0x00
+#define LPC32XX_INTC_RAW		0x04
+#define LPC32XX_INTC_STAT		0x08
+#define LPC32XX_INTC_POL		0x0C
+#define LPC32XX_INTC_TYPE		0x10
+#define LPC32XX_INTC_FIQ		0x14
+
+#define NR_LPC32XX_IC_IRQS		32
+
+struct lpc32xx_irq_chip {
+	void __iomem *base;
+	struct irq_domain *domain;
+	struct irq_chip chip;
+};
+
+static struct lpc32xx_irq_chip *lpc32xx_mic_irqc;
+
+static inline u32 lpc32xx_ic_read(struct lpc32xx_irq_chip *ic, u32 reg)
+{
+	return readl_relaxed(ic->base + reg);
+}
+
+static inline void lpc32xx_ic_write(struct lpc32xx_irq_chip *ic,
+				    u32 reg, u32 val)
+{
+	writel_relaxed(val, ic->base + reg);
+}
+
+static void lpc32xx_irq_mask(struct irq_data *d)
+{
+	struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
+	u32 val, mask = BIT(d->hwirq);
+
+	val = lpc32xx_ic_read(ic, LPC32XX_INTC_MASK) & ~mask;
+	lpc32xx_ic_write(ic, LPC32XX_INTC_MASK, val);
+}
+
+static void lpc32xx_irq_unmask(struct irq_data *d)
+{
+	struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
+	u32 val, mask = BIT(d->hwirq);
+
+	val = lpc32xx_ic_read(ic, LPC32XX_INTC_MASK) | mask;
+	lpc32xx_ic_write(ic, LPC32XX_INTC_MASK, val);
+}
+
+static void lpc32xx_irq_ack(struct irq_data *d)
+{
+	struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
+	u32 mask = BIT(d->hwirq);
+
+	lpc32xx_ic_write(ic, LPC32XX_INTC_RAW, mask);
+}
+
+static int lpc32xx_irq_set_type(struct irq_data *d, unsigned int type)
+{
+	struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
+	u32 val, mask = BIT(d->hwirq);
+	bool high, edge;
+
+	switch (type) {
+	case IRQ_TYPE_EDGE_RISING:
+		edge = true;
+		high = true;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		edge = true;
+		high = false;
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+		edge = false;
+		high = true;
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		edge = false;
+		high = false;
+		break;
+	default:
+		pr_info("unsupported irq type %d\n", type);
+		return -EINVAL;
+	}
+
+	irqd_set_trigger_type(d, type);
+
+	val = lpc32xx_ic_read(ic, LPC32XX_INTC_POL);
+	if (high)
+		val |= mask;
+	else
+		val &= ~mask;
+	lpc32xx_ic_write(ic, LPC32XX_INTC_POL, val);
+
+	val = lpc32xx_ic_read(ic, LPC32XX_INTC_TYPE);
+	if (edge) {
+		val |= mask;
+		irq_set_handler_locked(d, handle_edge_irq);
+	} else {
+		val &= ~mask;
+		irq_set_handler_locked(d, handle_level_irq);
+	}
+	lpc32xx_ic_write(ic, LPC32XX_INTC_TYPE, val);
+
+	return 0;
+}
+
+static void __exception_irq_entry lpc32xx_handle_irq(struct pt_regs *regs)
+{
+	struct lpc32xx_irq_chip *ic = lpc32xx_mic_irqc;
+	u32 hwirq = lpc32xx_ic_read(ic, LPC32XX_INTC_STAT), irq;
+
+	while (hwirq) {
+		irq = __ffs(hwirq);
+		hwirq &= ~BIT(irq);
+		handle_domain_irq(lpc32xx_mic_irqc->domain, irq, regs);
+	}
+}
+
+static void lpc32xx_sic_handler(struct irq_desc *desc)
+{
+	struct lpc32xx_irq_chip *ic = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	u32 hwirq = lpc32xx_ic_read(ic, LPC32XX_INTC_STAT), irq;
+
+	chained_irq_enter(chip, desc);
+
+	while (hwirq) {
+		irq = __ffs(hwirq);
+		hwirq &= ~BIT(irq);
+		generic_handle_irq(irq_find_mapping(ic->domain, irq));
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+static int lpc32xx_irq_domain_map(struct irq_domain *id, unsigned int virq,
+				  irq_hw_number_t hw)
+{
+	struct lpc32xx_irq_chip *ic = id->host_data;
+
+	irq_set_chip_data(virq, ic);
+	irq_set_chip_and_handler(virq, &ic->chip, handle_level_irq);
+	irq_set_status_flags(virq, IRQ_LEVEL);
+	irq_set_noprobe(virq);
+
+	return 0;
+}
+
+static void lpc32xx_irq_domain_unmap(struct irq_domain *id, unsigned int virq)
+{
+	irq_set_chip_and_handler(virq, NULL, NULL);
+}
+
+static const struct irq_domain_ops lpc32xx_irq_domain_ops = {
+	.map    = lpc32xx_irq_domain_map,
+	.unmap	= lpc32xx_irq_domain_unmap,
+	.xlate  = irq_domain_xlate_twocell,
+};
+
+static int __init lpc32xx_of_ic_init(struct device_node *node,
+				     struct device_node *parent)
+{
+	struct lpc32xx_irq_chip *irqc;
+	bool is_mic = of_device_is_compatible(node, "nxp,lpc3220-mic");
+	const __be32 *reg = of_get_property(node, "reg", NULL);
+	u32 parent_irq, i, addr = reg ? be32_to_cpu(*reg) : 0;
+
+	irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
+	if (!irqc)
+		return -ENOMEM;
+
+	irqc->base = of_iomap(node, 0);
+	if (!irqc->base) {
+		pr_err("%s: unable to map registers\n", node->full_name);
+		kfree(irqc);
+		return -EINVAL;
+	}
+
+	irqc->chip.irq_ack = lpc32xx_irq_ack;
+	irqc->chip.irq_mask = lpc32xx_irq_mask;
+	irqc->chip.irq_unmask = lpc32xx_irq_unmask;
+	irqc->chip.irq_set_type = lpc32xx_irq_set_type;
+	if (is_mic)
+		irqc->chip.name = kasprintf(GFP_KERNEL, "%08x.mic", addr);
+	else
+		irqc->chip.name = kasprintf(GFP_KERNEL, "%08x.sic", addr);
+
+	irqc->domain = irq_domain_add_linear(node, NR_LPC32XX_IC_IRQS,
+					     &lpc32xx_irq_domain_ops, irqc);
+	if (!irqc->domain) {
+		pr_err("unable to add irq domain\n");
+		iounmap(irqc->base);
+		kfree(irqc->chip.name);
+		kfree(irqc);
+		return -ENODEV;
+	}
+
+	if (is_mic) {
+		lpc32xx_mic_irqc = irqc;
+		set_handle_irq(lpc32xx_handle_irq);
+	} else {
+		for (i = 0; i < of_irq_count(node); i++) {
+			parent_irq = irq_of_parse_and_map(node, i);
+			if (parent_irq)
+				irq_set_chained_handler_and_data(parent_irq,
+						 lpc32xx_sic_handler, irqc);
+		}
+	}
+
+	lpc32xx_ic_write(irqc, LPC32XX_INTC_MASK, 0x00);
+	lpc32xx_ic_write(irqc, LPC32XX_INTC_POL,  0x00);
+	lpc32xx_ic_write(irqc, LPC32XX_INTC_TYPE, 0x00);
+
+	return 0;
+}
+
+IRQCHIP_DECLARE(nxp_lpc32xx_mic, "nxp,lpc3220-mic", lpc32xx_of_ic_init);
+IRQCHIP_DECLARE(nxp_lpc32xx_sic, "nxp,lpc3220-sic", lpc32xx_of_ic_init);
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
new file mode 100644
index 0000000..02cca74c
--- /dev/null
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -0,0 +1,240 @@
+/*
+ * Freescale SCFG MSI(-X) support
+ *
+ * Copyright (C) 2016 Freescale Semiconductor.
+ *
+ * Author: Minghuan Lian <Minghuan.Lian@nxp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock.h>
+
+#define MSI_MAX_IRQS	32
+#define MSI_IBS_SHIFT	3
+#define MSIR		4
+
+struct ls_scfg_msi {
+	spinlock_t		lock;
+	struct platform_device	*pdev;
+	struct irq_domain	*parent;
+	struct irq_domain	*msi_domain;
+	void __iomem		*regs;
+	phys_addr_t		msiir_addr;
+	int			irq;
+	DECLARE_BITMAP(used, MSI_MAX_IRQS);
+};
+
+static struct irq_chip ls_scfg_msi_irq_chip = {
+	.name = "MSI",
+	.irq_mask	= pci_msi_mask_irq,
+	.irq_unmask	= pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info ls_scfg_msi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS |
+		   MSI_FLAG_USE_DEF_CHIP_OPS |
+		   MSI_FLAG_PCI_MSIX),
+	.chip	= &ls_scfg_msi_irq_chip,
+};
+
+static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
+{
+	struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data);
+
+	msg->address_hi = upper_32_bits(msi_data->msiir_addr);
+	msg->address_lo = lower_32_bits(msi_data->msiir_addr);
+	msg->data = data->hwirq << MSI_IBS_SHIFT;
+}
+
+static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
+				    const struct cpumask *mask, bool force)
+{
+	return -EINVAL;
+}
+
+static struct irq_chip ls_scfg_msi_parent_chip = {
+	.name			= "SCFG",
+	.irq_compose_msi_msg	= ls_scfg_msi_compose_msg,
+	.irq_set_affinity	= ls_scfg_msi_set_affinity,
+};
+
+static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain,
+					unsigned int virq,
+					unsigned int nr_irqs,
+					void *args)
+{
+	struct ls_scfg_msi *msi_data = domain->host_data;
+	int pos, err = 0;
+
+	WARN_ON(nr_irqs != 1);
+
+	spin_lock(&msi_data->lock);
+	pos = find_first_zero_bit(msi_data->used, MSI_MAX_IRQS);
+	if (pos < MSI_MAX_IRQS)
+		__set_bit(pos, msi_data->used);
+	else
+		err = -ENOSPC;
+	spin_unlock(&msi_data->lock);
+
+	if (err)
+		return err;
+
+	irq_domain_set_info(domain, virq, pos,
+			    &ls_scfg_msi_parent_chip, msi_data,
+			    handle_simple_irq, NULL, NULL);
+
+	return 0;
+}
+
+static void ls_scfg_msi_domain_irq_free(struct irq_domain *domain,
+				   unsigned int virq, unsigned int nr_irqs)
+{
+	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+	struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(d);
+	int pos;
+
+	pos = d->hwirq;
+	if (pos < 0 || pos >= MSI_MAX_IRQS) {
+		pr_err("failed to teardown msi. Invalid hwirq %d\n", pos);
+		return;
+	}
+
+	spin_lock(&msi_data->lock);
+	__clear_bit(pos, msi_data->used);
+	spin_unlock(&msi_data->lock);
+}
+
+static const struct irq_domain_ops ls_scfg_msi_domain_ops = {
+	.alloc	= ls_scfg_msi_domain_irq_alloc,
+	.free	= ls_scfg_msi_domain_irq_free,
+};
+
+static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
+{
+	struct ls_scfg_msi *msi_data = irq_desc_get_handler_data(desc);
+	unsigned long val;
+	int pos, virq;
+
+	chained_irq_enter(irq_desc_get_chip(desc), desc);
+
+	val = ioread32be(msi_data->regs + MSIR);
+	for_each_set_bit(pos, &val, MSI_MAX_IRQS) {
+		virq = irq_find_mapping(msi_data->parent, (31 - pos));
+		if (virq)
+			generic_handle_irq(virq);
+	}
+
+	chained_irq_exit(irq_desc_get_chip(desc), desc);
+}
+
+static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
+{
+	/* Initialize MSI domain parent */
+	msi_data->parent = irq_domain_add_linear(NULL,
+						 MSI_MAX_IRQS,
+						 &ls_scfg_msi_domain_ops,
+						 msi_data);
+	if (!msi_data->parent) {
+		dev_err(&msi_data->pdev->dev, "failed to create IRQ domain\n");
+		return -ENOMEM;
+	}
+
+	msi_data->msi_domain = pci_msi_create_irq_domain(
+				of_node_to_fwnode(msi_data->pdev->dev.of_node),
+				&ls_scfg_msi_domain_info,
+				msi_data->parent);
+	if (!msi_data->msi_domain) {
+		dev_err(&msi_data->pdev->dev, "failed to create MSI domain\n");
+		irq_domain_remove(msi_data->parent);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int ls_scfg_msi_probe(struct platform_device *pdev)
+{
+	struct ls_scfg_msi *msi_data;
+	struct resource *res;
+	int ret;
+
+	msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
+	if (!msi_data)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	msi_data->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(msi_data->regs)) {
+		dev_err(&pdev->dev, "failed to initialize 'regs'\n");
+		return PTR_ERR(msi_data->regs);
+	}
+	msi_data->msiir_addr = res->start;
+
+	msi_data->irq = platform_get_irq(pdev, 0);
+	if (msi_data->irq <= 0) {
+		dev_err(&pdev->dev, "failed to get MSI irq\n");
+		return -ENODEV;
+	}
+
+	msi_data->pdev = pdev;
+	spin_lock_init(&msi_data->lock);
+
+	ret = ls_scfg_msi_domains_init(msi_data);
+	if (ret)
+		return ret;
+
+	irq_set_chained_handler_and_data(msi_data->irq,
+					 ls_scfg_msi_irq_handler,
+					 msi_data);
+
+	platform_set_drvdata(pdev, msi_data);
+
+	return 0;
+}
+
+static int ls_scfg_msi_remove(struct platform_device *pdev)
+{
+	struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
+
+	irq_set_chained_handler_and_data(msi_data->irq, NULL, NULL);
+
+	irq_domain_remove(msi_data->msi_domain);
+	irq_domain_remove(msi_data->parent);
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static const struct of_device_id ls_scfg_msi_id[] = {
+	{ .compatible = "fsl,1s1021a-msi", },
+	{ .compatible = "fsl,1s1043a-msi", },
+	{},
+};
+
+static struct platform_driver ls_scfg_msi_driver = {
+	.driver = {
+		.name = "ls-scfg-msi",
+		.of_match_table = ls_scfg_msi_id,
+	},
+	.probe = ls_scfg_msi_probe,
+	.remove = ls_scfg_msi_remove,
+};
+
+module_platform_driver(ls_scfg_msi_driver);
+
+MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@nxp.com>");
+MODULE_DESCRIPTION("Freescale Layerscape SCFG MSI controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index d67baa2..03b79b0 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -263,8 +263,8 @@
 
 		parent = platform_bus_type.dev_root;
 		child = of_platform_device_create(np, NULL, parent);
-		if (IS_ERR(child))
-			return PTR_ERR(child);
+		if (!child)
+			return -ENOMEM;
 
 		if (of_property_read_u32(child->dev.of_node, "num-pins",
 					 &num_pins) < 0) {
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 94a30da..c089f49 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -197,7 +197,7 @@
 
 	local_irq_save(flags);
 
-	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
+	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), mips_cm_vp_id(cpu));
 
 	if (mips_cm_is64) {
 		gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE), cnt);
@@ -246,6 +246,14 @@
 
 #endif
 
+unsigned gic_read_local_vp_id(void)
+{
+	unsigned long ident;
+
+	ident = gic_read(GIC_REG(VPE_LOCAL, GIC_VP_IDENT));
+	return ident & GIC_VP_IDENT_VCNUM_MSK;
+}
+
 static bool gic_local_irq_is_routable(int intr)
 {
 	u32 vpe_ctl;
@@ -467,7 +475,7 @@
 	gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
 
 	/* Update the pcpu_masks */
-	for (i = 0; i < gic_vpes; i++)
+	for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
 		clear_bit(irq, pcpu_masks[i].pcpu_mask);
 	set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
 
@@ -553,7 +561,8 @@
 
 	spin_lock_irqsave(&gic_lock, flags);
 	for (i = 0; i < gic_vpes; i++) {
-		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
+		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
+			  mips_cm_vp_id(i));
 		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
 	}
 	spin_unlock_irqrestore(&gic_lock, flags);
@@ -567,7 +576,8 @@
 
 	spin_lock_irqsave(&gic_lock, flags);
 	for (i = 0; i < gic_vpes; i++) {
-		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
+		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
+			  mips_cm_vp_id(i));
 		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
 	}
 	spin_unlock_irqrestore(&gic_lock, flags);
@@ -607,7 +617,8 @@
 	for (i = 0; i < gic_vpes; i++) {
 		unsigned int j;
 
-		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
+		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
+			  mips_cm_vp_id(i));
 		for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
 			if (!gic_local_irq_is_routable(j))
 				continue;
@@ -652,7 +663,8 @@
 	for (i = 0; i < gic_vpes; i++) {
 		u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
 
-		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
+		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
+			  mips_cm_vp_id(i));
 
 		switch (intr) {
 		case GIC_LOCAL_INT_WD:
@@ -707,7 +719,7 @@
 	spin_lock_irqsave(&gic_lock, flags);
 	gic_map_to_pin(intr, gic_cpu_pin);
 	gic_map_to_vpe(intr, vpe);
-	for (i = 0; i < gic_vpes; i++)
+	for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
 		clear_bit(intr, pcpu_masks[i].pcpu_mask);
 	set_bit(intr, pcpu_masks[vpe].pcpu_mask);
 	spin_unlock_irqrestore(&gic_lock, flags);
diff --git a/drivers/irqchip/irq-partition-percpu.c b/drivers/irqchip/irq-partition-percpu.c
new file mode 100644
index 0000000..ccd72c2
--- /dev/null
+++ b/drivers/irqchip/irq-partition-percpu.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright (C) 2016 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-partition-percpu.h>
+#include <linux/irqdomain.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+struct partition_desc {
+	int				nr_parts;
+	struct partition_affinity	*parts;
+	struct irq_domain		*domain;
+	struct irq_desc			*chained_desc;
+	unsigned long			*bitmap;
+	struct irq_domain_ops		ops;
+};
+
+static bool partition_check_cpu(struct partition_desc *part,
+				unsigned int cpu, unsigned int hwirq)
+{
+	return cpumask_test_cpu(cpu, &part->parts[hwirq].mask);
+}
+
+static void partition_irq_mask(struct irq_data *d)
+{
+	struct partition_desc *part = irq_data_get_irq_chip_data(d);
+	struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+	struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+	if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
+	    chip->irq_mask)
+		chip->irq_mask(data);
+}
+
+static void partition_irq_unmask(struct irq_data *d)
+{
+	struct partition_desc *part = irq_data_get_irq_chip_data(d);
+	struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+	struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+	if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
+	    chip->irq_unmask)
+		chip->irq_unmask(data);
+}
+
+static int partition_irq_set_irqchip_state(struct irq_data *d,
+					   enum irqchip_irq_state which,
+					   bool val)
+{
+	struct partition_desc *part = irq_data_get_irq_chip_data(d);
+	struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+	struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+	if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
+	    chip->irq_set_irqchip_state)
+		return chip->irq_set_irqchip_state(data, which, val);
+
+	return -EINVAL;
+}
+
+static int partition_irq_get_irqchip_state(struct irq_data *d,
+					   enum irqchip_irq_state which,
+					   bool *val)
+{
+	struct partition_desc *part = irq_data_get_irq_chip_data(d);
+	struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+	struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+	if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
+	    chip->irq_get_irqchip_state)
+		return chip->irq_get_irqchip_state(data, which, val);
+
+	return -EINVAL;
+}
+
+static int partition_irq_set_type(struct irq_data *d, unsigned int type)
+{
+	struct partition_desc *part = irq_data_get_irq_chip_data(d);
+	struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+	struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+	if (chip->irq_set_type)
+		return chip->irq_set_type(data, type);
+
+	return -EINVAL;
+}
+
+static void partition_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+	struct partition_desc *part = irq_data_get_irq_chip_data(d);
+	struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+	struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+	seq_printf(p, " %5s-%lu", chip->name, data->hwirq);
+}
+
+static struct irq_chip partition_irq_chip = {
+	.irq_mask		= partition_irq_mask,
+	.irq_unmask		= partition_irq_unmask,
+	.irq_set_type		= partition_irq_set_type,
+	.irq_get_irqchip_state	= partition_irq_get_irqchip_state,
+	.irq_set_irqchip_state	= partition_irq_set_irqchip_state,
+	.irq_print_chip		= partition_irq_print_chip,
+};
+
+static void partition_handle_irq(struct irq_desc *desc)
+{
+	struct partition_desc *part = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	int cpu = smp_processor_id();
+	int hwirq;
+
+	chained_irq_enter(chip, desc);
+
+	for_each_set_bit(hwirq, part->bitmap, part->nr_parts) {
+		if (partition_check_cpu(part, cpu, hwirq))
+			break;
+	}
+
+	if (unlikely(hwirq == part->nr_parts)) {
+		handle_bad_irq(desc);
+	} else {
+		unsigned int irq;
+		irq = irq_find_mapping(part->domain, hwirq);
+		generic_handle_irq(irq);
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+static int partition_domain_alloc(struct irq_domain *domain, unsigned int virq,
+				  unsigned int nr_irqs, void *arg)
+{
+	int ret;
+	irq_hw_number_t hwirq;
+	unsigned int type;
+	struct irq_fwspec *fwspec = arg;
+	struct partition_desc *part;
+
+	BUG_ON(nr_irqs != 1);
+	ret = domain->ops->translate(domain, fwspec, &hwirq, &type);
+	if (ret)
+		return ret;
+
+	part = domain->host_data;
+
+	set_bit(hwirq, part->bitmap);
+	irq_set_chained_handler_and_data(irq_desc_get_irq(part->chained_desc),
+					 partition_handle_irq, part);
+	irq_set_percpu_devid_partition(virq, &part->parts[hwirq].mask);
+	irq_domain_set_info(domain, virq, hwirq, &partition_irq_chip, part,
+			    handle_percpu_devid_irq, NULL, NULL);
+	irq_set_status_flags(virq, IRQ_NOAUTOEN);
+
+	return 0;
+}
+
+static void partition_domain_free(struct irq_domain *domain, unsigned int virq,
+				  unsigned int nr_irqs)
+{
+	struct irq_data *d;
+
+	BUG_ON(nr_irqs != 1);
+
+	d = irq_domain_get_irq_data(domain, virq);
+	irq_set_handler(virq, NULL);
+	irq_domain_reset_irq_data(d);
+}
+
+int partition_translate_id(struct partition_desc *desc, void *partition_id)
+{
+	struct partition_affinity *part = NULL;
+	int i;
+
+	for (i = 0; i < desc->nr_parts; i++) {
+		if (desc->parts[i].partition_id == partition_id) {
+			part = &desc->parts[i];
+			break;
+		}
+	}
+
+	if (WARN_ON(!part)) {
+		pr_err("Failed to find partition\n");
+		return -EINVAL;
+	}
+
+	return i;
+}
+
+struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
+					     struct partition_affinity *parts,
+					     int nr_parts,
+					     int chained_irq,
+					     const struct irq_domain_ops *ops)
+{
+	struct partition_desc *desc;
+	struct irq_domain *d;
+
+	BUG_ON(!ops->select || !ops->translate);
+
+	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+	if (!desc)
+		return NULL;
+
+	desc->ops = *ops;
+	desc->ops.free = partition_domain_free;
+	desc->ops.alloc = partition_domain_alloc;
+
+	d = irq_domain_create_linear(fwnode, nr_parts, &desc->ops, desc);
+	if (!d)
+		goto out;
+	desc->domain = d;
+
+	desc->bitmap = kzalloc(sizeof(long) * BITS_TO_LONGS(nr_parts),
+			       GFP_KERNEL);
+	if (WARN_ON(!desc->bitmap))
+		goto out;
+
+	desc->chained_desc = irq_to_desc(chained_irq);
+	desc->nr_parts = nr_parts;
+	desc->parts = parts;
+
+	return desc;
+out:
+	if (d)
+		irq_domain_remove(d);
+	kfree(desc);
+
+	return NULL;
+}
+
+struct irq_domain *partition_get_domain(struct partition_desc *dsc)
+{
+	if (dsc)
+		return dsc->domain;
+
+	return NULL;
+}
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
index 50be963..e902f08 100644
--- a/drivers/irqchip/irq-tegra.c
+++ b/drivers/irqchip/irq-tegra.c
@@ -235,7 +235,7 @@
 			return -EINVAL;
 
 		*hwirq = fwspec->param[1];
-		*type = fwspec->param[2];
+		*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
 		return 0;
 	}
 
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 598ab3f..37dd464 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -227,4 +227,5 @@
 }
 IRQCHIP_DECLARE(arm_fpga, "arm,versatile-fpga-irq", fpga_irq_of_init);
 IRQCHIP_DECLARE(arm_fpga_sic, "arm,versatile-sic", fpga_irq_of_init);
+IRQCHIP_DECLARE(ox810se_rps, "oxsemi,ox810se-rps-irq", fpga_irq_of_init);
 #endif
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index d7c2866..1a1d997 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -1147,8 +1147,6 @@
 
 static void dump_c_ind_mask(PLCI *plci)
 {
-	static char hex_digit_table[0x10] =
-		{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
 	word i, j, k;
 	dword d;
 	char *p;
@@ -1165,7 +1163,7 @@
 				d = plci->c_ind_mask_table[i + j];
 				for (k = 0; k < 8; k++)
 				{
-					*(--p) = hex_digit_table[d & 0xf];
+					*(--p) = hex_asc_lo(d);
 					d >>= 4;
 				}
 			}
@@ -10507,7 +10505,6 @@
 
 static void mixer_calculate_coefs(DIVA_CAPI_ADAPTER *a)
 {
-	static char hex_digit_table[0x10] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
 	word n, i, j;
 	char *p;
 	char hex_line[2 * MIXER_MAX_DUMP_CHANNELS + MIXER_MAX_DUMP_CHANNELS / 8 + 4];
@@ -10690,13 +10687,13 @@
 	n = li_total_channels;
 	if (n > MIXER_MAX_DUMP_CHANNELS)
 		n = MIXER_MAX_DUMP_CHANNELS;
+
 	p = hex_line;
 	for (j = 0; j < n; j++)
 	{
 		if ((j & 0x7) == 0)
 			*(p++) = ' ';
-		*(p++) = hex_digit_table[li_config_table[j].curchnl >> 4];
-		*(p++) = hex_digit_table[li_config_table[j].curchnl & 0xf];
+		p = hex_byte_pack(p, li_config_table[j].curchnl);
 	}
 	*p = '\0';
 	dbug(1, dprintf("[%06lx] CURRENT %s",
@@ -10706,8 +10703,7 @@
 	{
 		if ((j & 0x7) == 0)
 			*(p++) = ' ';
-		*(p++) = hex_digit_table[li_config_table[j].channel >> 4];
-		*(p++) = hex_digit_table[li_config_table[j].channel & 0xf];
+		p = hex_byte_pack(p, li_config_table[j].channel);
 	}
 	*p = '\0';
 	dbug(1, dprintf("[%06lx] CHANNEL %s",
@@ -10717,8 +10713,7 @@
 	{
 		if ((j & 0x7) == 0)
 			*(p++) = ' ';
-		*(p++) = hex_digit_table[li_config_table[j].chflags >> 4];
-		*(p++) = hex_digit_table[li_config_table[j].chflags & 0xf];
+		p = hex_byte_pack(p, li_config_table[j].chflags);
 	}
 	*p = '\0';
 	dbug(1, dprintf("[%06lx] CHFLAG  %s",
@@ -10730,8 +10725,7 @@
 		{
 			if ((j & 0x7) == 0)
 				*(p++) = ' ';
-			*(p++) = hex_digit_table[li_config_table[i].flag_table[j] >> 4];
-			*(p++) = hex_digit_table[li_config_table[i].flag_table[j] & 0xf];
+			p = hex_byte_pack(p, li_config_table[i].flag_table[j]);
 		}
 		*p = '\0';
 		dbug(1, dprintf("[%06lx] FLAG[%02x]%s",
@@ -10744,8 +10738,7 @@
 		{
 			if ((j & 0x7) == 0)
 				*(p++) = ' ';
-			*(p++) = hex_digit_table[li_config_table[i].coef_table[j] >> 4];
-			*(p++) = hex_digit_table[li_config_table[i].coef_table[j] & 0xf];
+			p = hex_byte_pack(p, li_config_table[i].coef_table[j]);
 		}
 		*p = '\0';
 		dbug(1, dprintf("[%06lx] COEF[%02x]%s",
diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c
index 7fdf78f..df7e05c 100644
--- a/drivers/isdn/hisax/isac.c
+++ b/drivers/isdn/hisax/isac.c
@@ -215,9 +215,11 @@
 			if (count == 0)
 				count = 32;
 			isac_empty_fifo(cs, count);
-			if ((count = cs->rcvidx) > 0) {
+			count = cs->rcvidx;
+			if (count > 0) {
 				cs->rcvidx = 0;
-				if (!(skb = alloc_skb(count, GFP_ATOMIC)))
+				skb = alloc_skb(count, GFP_ATOMIC);
+				if (!skb)
 					printk(KERN_WARNING "HiSax: D receive out of memory\n");
 				else {
 					memcpy(skb_put(skb, count), cs->rcvbuf, count);
@@ -251,7 +253,8 @@
 				cs->tx_skb = NULL;
 			}
 		}
-		if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
+		cs->tx_skb = skb_dequeue(&cs->sq);
+		if (cs->tx_skb) {
 			cs->tx_cnt = 0;
 			isac_fill_fifo(cs);
 		} else
@@ -313,7 +316,8 @@
 #if ARCOFI_USE
 			if (v1 & 0x08) {
 				if (!cs->dc.isac.mon_rx) {
-					if (!(cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC))) {
+					cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC);
+					if (!cs->dc.isac.mon_rx) {
 						if (cs->debug & L1_DEB_WARN)
 							debugl1(cs, "ISAC MON RX out of memory!");
 						cs->dc.isac.mocr &= 0xf0;
@@ -343,7 +347,8 @@
 		afterMONR0:
 			if (v1 & 0x80) {
 				if (!cs->dc.isac.mon_rx) {
-					if (!(cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC))) {
+					cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC);
+					if (!cs->dc.isac.mon_rx) {
 						if (cs->debug & L1_DEB_WARN)
 							debugl1(cs, "ISAC MON RX out of memory!");
 						cs->dc.isac.mocr &= 0x0f;
diff --git a/drivers/isdn/hysdn/hysdn_net.c b/drivers/isdn/hysdn/hysdn_net.c
index a0efb4c..5609dee 100644
--- a/drivers/isdn/hysdn/hysdn_net.c
+++ b/drivers/isdn/hysdn/hysdn_net.c
@@ -127,7 +127,7 @@
 	if (lp->in_idx >= MAX_SKB_BUFFERS)
 		lp->in_idx = 0;	/* wrap around */
 	lp->sk_count++;		/* adjust counter */
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 
 	/* If we just used up the very last entry in the
 	 * TX ring on this device, tell the queueing
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index aa5dd56..c151c6d 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -1153,7 +1153,7 @@
 		 * ever called   --KG
 		 */
 	}
-	ndev->trans_start = jiffies;
+	netif_trans_update(ndev);
 	netif_wake_queue(ndev);
 }
 
@@ -1291,7 +1291,7 @@
 			}
 		} else {
 			/* Device is connected to an ISDN channel */
-			ndev->trans_start = jiffies;
+			netif_trans_update(ndev);
 			if (!lp->dialstate) {
 				/* ISDN connection is established, try sending */
 				int ret;
diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
index e2d4e58..0c5d8de 100644
--- a/drivers/isdn/i4l/isdn_x25iface.c
+++ b/drivers/isdn/i4l/isdn_x25iface.c
@@ -278,7 +278,7 @@
 	case X25_IFACE_DATA:
 		if (*state == WAN_CONNECTED) {
 			skb_pull(skb, 1);
-			cprot->net_dev->trans_start = jiffies;
+			netif_trans_update(cprot->net_dev);
 			ret = (cprot->dops->data_req(cprot, skb));
 			/* prepare for future retransmissions */
 			if (ret) skb_push(skb, 1);
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 0d29b5a..99e5f97 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -715,6 +715,9 @@
 	if (!maddr || maddr->family != AF_ISDN)
 		return -EINVAL;
 
+	if (addr_len < sizeof(struct sockaddr_mISDN))
+		return -EINVAL;
+
 	lock_sock(sk);
 
 	if (_pms(sk)->dev) {
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 2251478..5ae2834 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -413,10 +413,11 @@
 	tristate "LED driver for Intel NAS SS4200 series"
 	depends on LEDS_CLASS
 	depends on PCI && DMI
+	depends on X86
 	help
 	  This option enables support for the Intel SS4200 series of
-	  Network Attached Storage servers.  You may control the hard
-	  drive or power LEDs on the front panel.  Using this driver
+	  Network Attached Storage servers. You may control the hard
+	  drive or power LEDs on the front panel. Using this driver
 	  can stop the front LED from blinking after startup.
 
 config LEDS_LT3593
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 2181581..55fa65e 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -26,7 +26,7 @@
  * Nests outside led_cdev->trigger_lock
  */
 static DECLARE_RWSEM(triggers_list_lock);
-static LIST_HEAD(trigger_list);
+LIST_HEAD(trigger_list);
 
  /* Used by LED Class */
 
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 61143f5..8229f06 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -127,6 +127,8 @@
 	led_dat->cdev.brightness = state ? LED_FULL : LED_OFF;
 	if (!template->retain_state_suspended)
 		led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
+	if (template->panic_indicator)
+		led_dat->cdev.flags |= LED_PANIC_INDICATOR;
 
 	ret = gpiod_direction_output(led_dat->gpiod, state);
 	if (ret < 0)
@@ -200,6 +202,8 @@
 
 		if (fwnode_property_present(child, "retain-state-suspended"))
 			led.retain_state_suspended = 1;
+		if (fwnode_property_present(child, "panic-indicator"))
+			led.panic_indicator = 1;
 
 		ret = create_gpio_led(&led, &priv->leds[priv->num_leds],
 				      dev, NULL);
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
index 046cb70..732eb86 100644
--- a/drivers/leds/leds-ss4200.c
+++ b/drivers/leds/leds-ss4200.c
@@ -101,6 +101,19 @@
 			DMI_MATCH(DMI_PRODUCT_VERSION, "1.00.00")
 		}
 	},
+	{
+		/*
+		 * FUJITSU SIEMENS SCALEO Home Server/SS4200-E
+		 * BIOS V090L 12/19/2007
+		 */
+		.callback = ss4200_led_dmi_callback,
+		.ident = "Fujitsu Siemens SCALEO Home Server",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "SCALEO Home Server"),
+			DMI_MATCH(DMI_PRODUCT_VERSION, "1.00.00")
+		}
+	},
 	{}
 };
 
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index c548ea1..45222a7 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -327,6 +327,8 @@
 	int result;
 
 	result = choose_times(tca->bank[bank].ontime, &c1, &c2);
+	if (result < 0)
+		return;
 	dev_dbg(&tca->client->dev,
 		"Chose on  times %d(%d) %d(%d) for %dms\n",
 		c1, time_codes[c1],
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
index db3f20d..7d38e6b 100644
--- a/drivers/leds/leds.h
+++ b/drivers/leds/leds.h
@@ -30,5 +30,6 @@
 
 extern struct rw_semaphore leds_list_lock;
 extern struct list_head leds_list;
+extern struct list_head trigger_list;
 
 #endif	/* __LEDS_H_INCLUDED */
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
index 5bda6a9..9893d91 100644
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
@@ -41,6 +41,14 @@
 	  This allows LEDs to be controlled by IDE disk activity.
 	  If unsure, say Y.
 
+config LEDS_TRIGGER_MTD
+	bool "LED MTD (NAND/NOR) Trigger"
+	depends on MTD
+	depends on LEDS_TRIGGERS
+	help
+	  This allows LEDs to be controlled by MTD activity.
+	  If unsure, say N.
+
 config LEDS_TRIGGER_HEARTBEAT
 	tristate "LED Heartbeat Trigger"
 	depends on LEDS_TRIGGERS
@@ -108,4 +116,14 @@
 	  This enables direct flash/torch on/off by the driver, kernel space.
 	  If unsure, say Y.
 
+config LEDS_TRIGGER_PANIC
+	bool "LED Panic Trigger"
+	depends on LEDS_TRIGGERS
+	help
+	  This allows LEDs to be configured to blink on a kernel panic.
+	  Enabling this option will allow to mark certain LEDs as panic indicators,
+	  allowing to blink them on a kernel panic, even if they are set to
+	  a different trigger.
+	  If unsure, say Y.
+
 endif # LEDS_TRIGGERS
diff --git a/drivers/leds/trigger/Makefile b/drivers/leds/trigger/Makefile
index 1abf48d..8cc64a4 100644
--- a/drivers/leds/trigger/Makefile
+++ b/drivers/leds/trigger/Makefile
@@ -1,6 +1,7 @@
 obj-$(CONFIG_LEDS_TRIGGER_TIMER)	+= ledtrig-timer.o
 obj-$(CONFIG_LEDS_TRIGGER_ONESHOT)	+= ledtrig-oneshot.o
 obj-$(CONFIG_LEDS_TRIGGER_IDE_DISK)	+= ledtrig-ide-disk.o
+obj-$(CONFIG_LEDS_TRIGGER_MTD)		+= ledtrig-mtd.o
 obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT)	+= ledtrig-heartbeat.o
 obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT)	+= ledtrig-backlight.o
 obj-$(CONFIG_LEDS_TRIGGER_GPIO)		+= ledtrig-gpio.o
@@ -8,3 +9,4 @@
 obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON)	+= ledtrig-default-on.o
 obj-$(CONFIG_LEDS_TRIGGER_TRANSIENT)	+= ledtrig-transient.o
 obj-$(CONFIG_LEDS_TRIGGER_CAMERA)	+= ledtrig-camera.o
+obj-$(CONFIG_LEDS_TRIGGER_PANIC)	+= ledtrig-panic.o
diff --git a/drivers/leds/trigger/ledtrig-ide-disk.c b/drivers/leds/trigger/ledtrig-ide-disk.c
index c02a3ac..15123d3 100644
--- a/drivers/leds/trigger/ledtrig-ide-disk.c
+++ b/drivers/leds/trigger/ledtrig-ide-disk.c
@@ -18,10 +18,11 @@
 #define BLINK_DELAY 30
 
 DEFINE_LED_TRIGGER(ledtrig_ide);
-static unsigned long ide_blink_delay = BLINK_DELAY;
 
 void ledtrig_ide_activity(void)
 {
+	unsigned long ide_blink_delay = BLINK_DELAY;
+
 	led_trigger_blink_oneshot(ledtrig_ide,
 				  &ide_blink_delay, &ide_blink_delay, 0);
 }
diff --git a/drivers/leds/trigger/ledtrig-mtd.c b/drivers/leds/trigger/ledtrig-mtd.c
new file mode 100644
index 0000000..99b5b0a
--- /dev/null
+++ b/drivers/leds/trigger/ledtrig-mtd.c
@@ -0,0 +1,45 @@
+/*
+ * LED MTD trigger
+ *
+ * Copyright 2016 Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
+ *
+ * Based on LED IDE-Disk Activity Trigger
+ *
+ * Copyright 2006 Openedhand Ltd.
+ *
+ * Author: Richard Purdie <rpurdie@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+
+#define BLINK_DELAY 30
+
+DEFINE_LED_TRIGGER(ledtrig_mtd);
+DEFINE_LED_TRIGGER(ledtrig_nand);
+
+void ledtrig_mtd_activity(void)
+{
+	unsigned long blink_delay = BLINK_DELAY;
+
+	led_trigger_blink_oneshot(ledtrig_mtd,
+				  &blink_delay, &blink_delay, 0);
+	led_trigger_blink_oneshot(ledtrig_nand,
+				  &blink_delay, &blink_delay, 0);
+}
+EXPORT_SYMBOL(ledtrig_mtd_activity);
+
+static int __init ledtrig_mtd_init(void)
+{
+	led_trigger_register_simple("mtd", &ledtrig_mtd);
+	led_trigger_register_simple("nand-disk", &ledtrig_nand);
+
+	return 0;
+}
+device_initcall(ledtrig_mtd_init);
diff --git a/drivers/leds/trigger/ledtrig-panic.c b/drivers/leds/trigger/ledtrig-panic.c
new file mode 100644
index 0000000..d735526
--- /dev/null
+++ b/drivers/leds/trigger/ledtrig-panic.c
@@ -0,0 +1,77 @@
+/*
+ * Kernel Panic LED Trigger
+ *
+ * Copyright 2016 Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/notifier.h>
+#include <linux/leds.h>
+#include "../leds.h"
+
+static struct led_trigger *trigger;
+
+/*
+ * This is called in a special context by the atomic panic
+ * notifier. This means the trigger can be changed without
+ * worrying about locking.
+ */
+static void led_trigger_set_panic(struct led_classdev *led_cdev)
+{
+	struct led_trigger *trig;
+
+	list_for_each_entry(trig, &trigger_list, next_trig) {
+		if (strcmp("panic", trig->name))
+			continue;
+		if (led_cdev->trigger)
+			list_del(&led_cdev->trig_list);
+		list_add_tail(&led_cdev->trig_list, &trig->led_cdevs);
+
+		/* Avoid the delayed blink path */
+		led_cdev->blink_delay_on = 0;
+		led_cdev->blink_delay_off = 0;
+
+		led_cdev->trigger = trig;
+		if (trig->activate)
+			trig->activate(led_cdev);
+		break;
+	}
+}
+
+static int led_trigger_panic_notifier(struct notifier_block *nb,
+				      unsigned long code, void *unused)
+{
+	struct led_classdev *led_cdev;
+
+	list_for_each_entry(led_cdev, &leds_list, node)
+		if (led_cdev->flags & LED_PANIC_INDICATOR)
+			led_trigger_set_panic(led_cdev);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block led_trigger_panic_nb = {
+	.notifier_call = led_trigger_panic_notifier,
+};
+
+static long led_panic_blink(int state)
+{
+	led_trigger_event(trigger, state ? LED_FULL : LED_OFF);
+	return 0;
+}
+
+static int __init ledtrig_panic_init(void)
+{
+	atomic_notifier_chain_register(&panic_notifier_list,
+				       &led_trigger_panic_nb);
+
+	led_trigger_register_simple("panic", &trigger);
+	panic_blink = led_panic_blink;
+	return 0;
+}
+device_initcall(ledtrig_panic_init);
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index eb934b0..67392b6 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -331,7 +331,7 @@
  * Actually now I think of it, it's possible that Ron *is* half the Plan 9
  * userbase.  Oh well.
  */
-static bool could_be_syscall(unsigned int num)
+bool could_be_syscall(unsigned int num)
 {
 	/* Normal Linux IA32_SYSCALL_VECTOR or reserved vector? */
 	return num == IA32_SYSCALL_VECTOR || num == syscall_vector;
@@ -416,6 +416,10 @@
  *
  * This routine indicates if a particular trap number could be delivered
  * directly.
+ *
+ * Unfortunately, Linux 4.6 started using an interrupt gate instead of a
+ * trap gate for syscalls, so this trick is ineffective.  See Mastery for
+ * how we could do this anyway...
  */
 static bool direct_trap(unsigned int num)
 {
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index ac8ad04..69b3814 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -167,6 +167,7 @@
 bool send_notify_to_eventfd(struct lg_cpu *cpu);
 void init_clockdev(struct lg_cpu *cpu);
 bool check_syscall_vector(struct lguest *lg);
+bool could_be_syscall(unsigned int num);
 int init_interrupts(void);
 void free_interrupts(void);
 
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 6a4cd77..6e9042e 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -429,8 +429,12 @@
 			return;
 		break;
 	case 32 ... 255:
+		/* This might be a syscall. */
+		if (could_be_syscall(cpu->regs->trapnum))
+			break;
+
 		/*
-		 * These values mean a real interrupt occurred, in which case
+		 * Other values mean a real interrupt occurred, in which case
 		 * the Host handler has already been run. We just do a
 		 * friendly check if another process should now be run, then
 		 * return to run the Guest again.
@@ -599,7 +603,7 @@
 	 * doing this.
 	 */
 	get_online_cpus();
-	if (cpu_has_pge) { /* We have a broader idea of "global". */
+	if (boot_cpu_has(X86_FEATURE_PGE)) { /* We have a broader idea of "global". */
 		/* Remember that this was originally set (for cleanup). */
 		cpu_had_pge = 1;
 		/*
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 0dc9a80..160c1a6 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -30,23 +30,35 @@
 #include <linux/sched/sysctl.h>
 #include <uapi/linux/lightnvm.h>
 
-static LIST_HEAD(nvm_targets);
+static LIST_HEAD(nvm_tgt_types);
 static LIST_HEAD(nvm_mgrs);
 static LIST_HEAD(nvm_devices);
+static LIST_HEAD(nvm_targets);
 static DECLARE_RWSEM(nvm_lock);
 
+static struct nvm_target *nvm_find_target(const char *name)
+{
+	struct nvm_target *tgt;
+
+	list_for_each_entry(tgt, &nvm_targets, list)
+		if (!strcmp(name, tgt->disk->disk_name))
+			return tgt;
+
+	return NULL;
+}
+
 static struct nvm_tgt_type *nvm_find_target_type(const char *name)
 {
 	struct nvm_tgt_type *tt;
 
-	list_for_each_entry(tt, &nvm_targets, list)
+	list_for_each_entry(tt, &nvm_tgt_types, list)
 		if (!strcmp(name, tt->name))
 			return tt;
 
 	return NULL;
 }
 
-int nvm_register_target(struct nvm_tgt_type *tt)
+int nvm_register_tgt_type(struct nvm_tgt_type *tt)
 {
 	int ret = 0;
 
@@ -54,14 +66,14 @@
 	if (nvm_find_target_type(tt->name))
 		ret = -EEXIST;
 	else
-		list_add(&tt->list, &nvm_targets);
+		list_add(&tt->list, &nvm_tgt_types);
 	up_write(&nvm_lock);
 
 	return ret;
 }
-EXPORT_SYMBOL(nvm_register_target);
+EXPORT_SYMBOL(nvm_register_tgt_type);
 
-void nvm_unregister_target(struct nvm_tgt_type *tt)
+void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
 {
 	if (!tt)
 		return;
@@ -70,20 +82,20 @@
 	list_del(&tt->list);
 	up_write(&nvm_lock);
 }
-EXPORT_SYMBOL(nvm_unregister_target);
+EXPORT_SYMBOL(nvm_unregister_tgt_type);
 
 void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
 							dma_addr_t *dma_handler)
 {
-	return dev->ops->dev_dma_alloc(dev, dev->ppalist_pool, mem_flags,
+	return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
 								dma_handler);
 }
 EXPORT_SYMBOL(nvm_dev_dma_alloc);
 
-void nvm_dev_dma_free(struct nvm_dev *dev, void *ppa_list,
+void nvm_dev_dma_free(struct nvm_dev *dev, void *addr,
 							dma_addr_t dma_handler)
 {
-	dev->ops->dev_dma_free(dev->ppalist_pool, ppa_list, dma_handler);
+	dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
 }
 EXPORT_SYMBOL(nvm_dev_dma_free);
 
@@ -214,8 +226,8 @@
 {
 	int i;
 
-	if (rqd->nr_pages > 1) {
-		for (i = 0; i < rqd->nr_pages; i++)
+	if (rqd->nr_ppas > 1) {
+		for (i = 0; i < rqd->nr_ppas; i++)
 			rqd->ppa_list[i] = dev_to_generic_addr(dev,
 							rqd->ppa_list[i]);
 	} else {
@@ -228,8 +240,8 @@
 {
 	int i;
 
-	if (rqd->nr_pages > 1) {
-		for (i = 0; i < rqd->nr_pages; i++)
+	if (rqd->nr_ppas > 1) {
+		for (i = 0; i < rqd->nr_ppas; i++)
 			rqd->ppa_list[i] = generic_to_dev_addr(dev,
 							rqd->ppa_list[i]);
 	} else {
@@ -239,33 +251,36 @@
 EXPORT_SYMBOL(nvm_generic_to_addr_mode);
 
 int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
-					struct ppa_addr *ppas, int nr_ppas)
+				struct ppa_addr *ppas, int nr_ppas, int vblk)
 {
 	int i, plane_cnt, pl_idx;
 
-	if (dev->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
-		rqd->nr_pages = 1;
+	if ((!vblk || dev->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
+		rqd->nr_ppas = nr_ppas;
 		rqd->ppa_addr = ppas[0];
 
 		return 0;
 	}
 
-	plane_cnt = dev->plane_mode;
-	rqd->nr_pages = plane_cnt * nr_ppas;
-
-	if (dev->ops->max_phys_sect < rqd->nr_pages)
-		return -EINVAL;
-
+	rqd->nr_ppas = nr_ppas;
 	rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
 	if (!rqd->ppa_list) {
 		pr_err("nvm: failed to allocate dma memory\n");
 		return -ENOMEM;
 	}
 
-	for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
+	if (!vblk) {
+		for (i = 0; i < nr_ppas; i++)
+			rqd->ppa_list[i] = ppas[i];
+	} else {
+		plane_cnt = dev->plane_mode;
+		rqd->nr_ppas *= plane_cnt;
+
 		for (i = 0; i < nr_ppas; i++) {
-			ppas[i].g.pl = pl_idx;
-			rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i];
+			for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
+				ppas[i].g.pl = pl_idx;
+				rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i];
+			}
 		}
 	}
 
@@ -292,7 +307,7 @@
 
 	memset(&rqd, 0, sizeof(struct nvm_rq));
 
-	ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas);
+	ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
 	if (ret)
 		return ret;
 
@@ -322,11 +337,10 @@
 	complete(waiting);
 }
 
-int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
-				int opcode, int flags, void *buf, int len)
+int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
+						int flags, void *buf, int len)
 {
 	DECLARE_COMPLETION_ONSTACK(wait);
-	struct nvm_rq rqd;
 	struct bio *bio;
 	int ret;
 	unsigned long hang_check;
@@ -335,23 +349,21 @@
 	if (IS_ERR_OR_NULL(bio))
 		return -ENOMEM;
 
-	memset(&rqd, 0, sizeof(struct nvm_rq));
-	ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas);
+	nvm_generic_to_addr_mode(dev, rqd);
+
+	rqd->dev = dev;
+	rqd->opcode = opcode;
+	rqd->flags = flags;
+	rqd->bio = bio;
+	rqd->wait = &wait;
+	rqd->end_io = nvm_end_io_sync;
+
+	ret = dev->ops->submit_io(dev, rqd);
 	if (ret) {
 		bio_put(bio);
 		return ret;
 	}
 
-	rqd.opcode = opcode;
-	rqd.bio = bio;
-	rqd.wait = &wait;
-	rqd.dev = dev;
-	rqd.end_io = nvm_end_io_sync;
-	rqd.flags = flags;
-	nvm_generic_to_addr_mode(dev, &rqd);
-
-	ret = dev->ops->submit_io(dev, &rqd);
-
 	/* Prevent hang_check timer from firing at us during very long I/O */
 	hang_check = sysctl_hung_task_timeout_secs;
 	if (hang_check)
@@ -359,12 +371,113 @@
 	else
 		wait_for_completion_io(&wait);
 
+	return rqd->error;
+}
+
+/**
+ * nvm_submit_ppa_list - submit user-defined ppa list to device. The user must
+ *			 take to free ppa list if necessary.
+ * @dev:	device
+ * @ppa_list:	user created ppa_list
+ * @nr_ppas:	length of ppa_list
+ * @opcode:	device opcode
+ * @flags:	device flags
+ * @buf:	data buffer
+ * @len:	data buffer length
+ */
+int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list,
+			int nr_ppas, int opcode, int flags, void *buf, int len)
+{
+	struct nvm_rq rqd;
+
+	if (dev->ops->max_phys_sect < nr_ppas)
+		return -EINVAL;
+
+	memset(&rqd, 0, sizeof(struct nvm_rq));
+
+	rqd.nr_ppas = nr_ppas;
+	if (nr_ppas > 1)
+		rqd.ppa_list = ppa_list;
+	else
+		rqd.ppa_addr = ppa_list[0];
+
+	return __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
+}
+EXPORT_SYMBOL(nvm_submit_ppa_list);
+
+/**
+ * nvm_submit_ppa - submit PPAs to device. PPAs will automatically be unfolded
+ *		    as single, dual, quad plane PPAs depending on device type.
+ * @dev:	device
+ * @ppa:	user created ppa_list
+ * @nr_ppas:	length of ppa_list
+ * @opcode:	device opcode
+ * @flags:	device flags
+ * @buf:	data buffer
+ * @len:	data buffer length
+ */
+int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
+				int opcode, int flags, void *buf, int len)
+{
+	struct nvm_rq rqd;
+	int ret;
+
+	memset(&rqd, 0, sizeof(struct nvm_rq));
+	ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas, 1);
+	if (ret)
+		return ret;
+
+	ret = __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
+
 	nvm_free_rqd_ppalist(dev, &rqd);
 
-	return rqd.error;
+	return ret;
 }
 EXPORT_SYMBOL(nvm_submit_ppa);
 
+/*
+ * folds a bad block list from its plane representation to its virtual
+ * block representation. The fold is done in place and reduced size is
+ * returned.
+ *
+ * If any of the planes status are bad or grown bad block, the virtual block
+ * is marked bad. If not bad, the first plane state acts as the block state.
+ */
+int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
+{
+	int blk, offset, pl, blktype;
+
+	if (nr_blks != dev->blks_per_lun * dev->plane_mode)
+		return -EINVAL;
+
+	for (blk = 0; blk < dev->blks_per_lun; blk++) {
+		offset = blk * dev->plane_mode;
+		blktype = blks[offset];
+
+		/* Bad blocks on any planes take precedence over other types */
+		for (pl = 0; pl < dev->plane_mode; pl++) {
+			if (blks[offset + pl] &
+					(NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
+				blktype = blks[offset + pl];
+				break;
+			}
+		}
+
+		blks[blk] = blktype;
+	}
+
+	return dev->blks_per_lun;
+}
+EXPORT_SYMBOL(nvm_bb_tbl_fold);
+
+int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks)
+{
+	ppa = generic_to_dev_addr(dev, ppa);
+
+	return dev->ops->get_bb_tbl(dev, ppa, blks);
+}
+EXPORT_SYMBOL(nvm_get_bb_tbl);
+
 static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
 {
 	int i;
@@ -414,6 +527,7 @@
 {
 	struct nvm_id *id = &dev->identity;
 	struct nvm_id_group *grp = &id->groups[0];
+	int ret;
 
 	/* device values */
 	dev->nr_chnls = grp->num_ch;
@@ -421,6 +535,8 @@
 	dev->pgs_per_blk = grp->num_pg;
 	dev->blks_per_lun = grp->num_blk;
 	dev->nr_planes = grp->num_pln;
+	dev->fpg_size = grp->fpg_sz;
+	dev->pfpg_size = grp->fpg_sz * grp->num_pln;
 	dev->sec_size = grp->csecs;
 	dev->oob_size = grp->sos;
 	dev->sec_per_pg = grp->fpg_sz / grp->csecs;
@@ -430,33 +546,16 @@
 	dev->plane_mode = NVM_PLANE_SINGLE;
 	dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
 
-	if (grp->mtype != 0) {
-		pr_err("nvm: memory type not supported\n");
-		return -EINVAL;
-	}
-
-	switch (grp->fmtype) {
-	case NVM_ID_FMTYPE_SLC:
-		if (nvm_init_slc_tbl(dev, grp))
-			return -ENOMEM;
-		break;
-	case NVM_ID_FMTYPE_MLC:
-		if (nvm_init_mlc_tbl(dev, grp))
-			return -ENOMEM;
-		break;
-	default:
-		pr_err("nvm: flash type not supported\n");
-		return -EINVAL;
-	}
-
-	if (!dev->lps_per_blk)
-		pr_info("nvm: lower page programming table missing\n");
-
 	if (grp->mpos & 0x020202)
 		dev->plane_mode = NVM_PLANE_DOUBLE;
 	if (grp->mpos & 0x040404)
 		dev->plane_mode = NVM_PLANE_QUAD;
 
+	if (grp->mtype != 0) {
+		pr_err("nvm: memory type not supported\n");
+		return -EINVAL;
+	}
+
 	/* calculated values */
 	dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
 	dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
@@ -468,11 +567,73 @@
 					sizeof(unsigned long), GFP_KERNEL);
 	if (!dev->lun_map)
 		return -ENOMEM;
-	INIT_LIST_HEAD(&dev->online_targets);
+
+	switch (grp->fmtype) {
+	case NVM_ID_FMTYPE_SLC:
+		if (nvm_init_slc_tbl(dev, grp)) {
+			ret = -ENOMEM;
+			goto err_fmtype;
+		}
+		break;
+	case NVM_ID_FMTYPE_MLC:
+		if (nvm_init_mlc_tbl(dev, grp)) {
+			ret = -ENOMEM;
+			goto err_fmtype;
+		}
+		break;
+	default:
+		pr_err("nvm: flash type not supported\n");
+		ret = -EINVAL;
+		goto err_fmtype;
+	}
+
 	mutex_init(&dev->mlock);
 	spin_lock_init(&dev->lock);
 
 	return 0;
+err_fmtype:
+	kfree(dev->lun_map);
+	return ret;
+}
+
+static void nvm_remove_target(struct nvm_target *t)
+{
+	struct nvm_tgt_type *tt = t->type;
+	struct gendisk *tdisk = t->disk;
+	struct request_queue *q = tdisk->queue;
+
+	lockdep_assert_held(&nvm_lock);
+
+	del_gendisk(tdisk);
+	blk_cleanup_queue(q);
+
+	if (tt->exit)
+		tt->exit(tdisk->private_data);
+
+	put_disk(tdisk);
+
+	list_del(&t->list);
+	kfree(t);
+}
+
+static void nvm_free_mgr(struct nvm_dev *dev)
+{
+	struct nvm_target *tgt, *tmp;
+
+	if (!dev->mt)
+		return;
+
+	down_write(&nvm_lock);
+	list_for_each_entry_safe(tgt, tmp, &nvm_targets, list) {
+		if (tgt->dev != dev)
+			continue;
+
+		nvm_remove_target(tgt);
+	}
+	up_write(&nvm_lock);
+
+	dev->mt->unregister_mgr(dev);
+	dev->mt = NULL;
 }
 
 static void nvm_free(struct nvm_dev *dev)
@@ -480,10 +641,10 @@
 	if (!dev)
 		return;
 
-	if (dev->mt)
-		dev->mt->unregister_mgr(dev);
+	nvm_free_mgr(dev);
 
 	kfree(dev->lptbl);
+	kfree(dev->lun_map);
 }
 
 static int nvm_init(struct nvm_dev *dev)
@@ -530,8 +691,8 @@
 
 static void nvm_exit(struct nvm_dev *dev)
 {
-	if (dev->ppalist_pool)
-		dev->ops->destroy_dma_pool(dev->ppalist_pool);
+	if (dev->dma_pool)
+		dev->ops->destroy_dma_pool(dev->dma_pool);
 	nvm_free(dev);
 
 	pr_info("nvm: successfully unloaded\n");
@@ -565,9 +726,9 @@
 	}
 
 	if (dev->ops->max_phys_sect > 1) {
-		dev->ppalist_pool = dev->ops->create_dma_pool(dev, "ppalist");
-		if (!dev->ppalist_pool) {
-			pr_err("nvm: could not create ppa pool\n");
+		dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
+		if (!dev->dma_pool) {
+			pr_err("nvm: could not create dma pool\n");
 			ret = -ENOMEM;
 			goto err_init;
 		}
@@ -613,7 +774,6 @@
 	up_write(&nvm_lock);
 
 	nvm_exit(dev);
-	kfree(dev->lun_map);
 	kfree(dev);
 }
 EXPORT_SYMBOL(nvm_unregister);
@@ -645,12 +805,11 @@
 		return -EINVAL;
 	}
 
-	list_for_each_entry(t, &dev->online_targets, list) {
-		if (!strcmp(create->tgtname, t->disk->disk_name)) {
-			pr_err("nvm: target name already exists.\n");
-			up_write(&nvm_lock);
-			return -EINVAL;
-		}
+	t = nvm_find_target(create->tgtname);
+	if (t) {
+		pr_err("nvm: target name already exists.\n");
+		up_write(&nvm_lock);
+		return -EINVAL;
 	}
 	up_write(&nvm_lock);
 
@@ -688,9 +847,10 @@
 
 	t->type = tt;
 	t->disk = tdisk;
+	t->dev = dev;
 
 	down_write(&nvm_lock);
-	list_add_tail(&t->list, &dev->online_targets);
+	list_add_tail(&t->list, &nvm_targets);
 	up_write(&nvm_lock);
 
 	return 0;
@@ -703,26 +863,6 @@
 	return -ENOMEM;
 }
 
-static void nvm_remove_target(struct nvm_target *t)
-{
-	struct nvm_tgt_type *tt = t->type;
-	struct gendisk *tdisk = t->disk;
-	struct request_queue *q = tdisk->queue;
-
-	lockdep_assert_held(&nvm_lock);
-
-	del_gendisk(tdisk);
-	blk_cleanup_queue(q);
-
-	if (tt->exit)
-		tt->exit(tdisk->private_data);
-
-	put_disk(tdisk);
-
-	list_del(&t->list);
-	kfree(t);
-}
-
 static int __nvm_configure_create(struct nvm_ioctl_create *create)
 {
 	struct nvm_dev *dev;
@@ -753,26 +893,19 @@
 
 static int __nvm_configure_remove(struct nvm_ioctl_remove *remove)
 {
-	struct nvm_target *t = NULL;
-	struct nvm_dev *dev;
-	int ret = -1;
+	struct nvm_target *t;
 
 	down_write(&nvm_lock);
-	list_for_each_entry(dev, &nvm_devices, devices)
-		list_for_each_entry(t, &dev->online_targets, list) {
-			if (!strcmp(remove->tgtname, t->disk->disk_name)) {
-				nvm_remove_target(t);
-				ret = 0;
-				break;
-			}
-		}
-	up_write(&nvm_lock);
-
-	if (ret) {
+	t = nvm_find_target(remove->tgtname);
+	if (!t) {
 		pr_err("nvm: target \"%s\" doesn't exist.\n", remove->tgtname);
+		up_write(&nvm_lock);
 		return -EINVAL;
 	}
 
+	nvm_remove_target(t);
+	up_write(&nvm_lock);
+
 	return 0;
 }
 
@@ -921,7 +1054,7 @@
 	info->version[2] = NVM_VERSION_PATCH;
 
 	down_write(&nvm_lock);
-	list_for_each_entry(tt, &nvm_targets, list) {
+	list_for_each_entry(tt, &nvm_tgt_types, list) {
 		struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
 
 		tgt->version[0] = tt->version[0];
@@ -1118,10 +1251,7 @@
 		return -EINVAL;
 	}
 
-	if (dev->mt) {
-		dev->mt->unregister_mgr(dev);
-		dev->mt = NULL;
-	}
+	nvm_free_mgr(dev);
 
 	if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
 		return nvm_dev_factory(dev, fact.flags);
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 72e124a..ec9fb68 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -129,27 +129,25 @@
 	return 0;
 }
 
-static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
-								void *private)
+static int gennvm_block_bb(struct gen_nvm *gn, struct ppa_addr ppa,
+							u8 *blks, int nr_blks)
 {
-	struct gen_nvm *gn = private;
 	struct nvm_dev *dev = gn->dev;
 	struct gen_lun *lun;
 	struct nvm_block *blk;
 	int i;
 
+	nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
+	if (nr_blks < 0)
+		return nr_blks;
+
 	lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
 
-	for (i = 0; i < nr_blocks; i++) {
+	for (i = 0; i < nr_blks; i++) {
 		if (blks[i] == 0)
 			continue;
 
 		blk = &lun->vlun.blocks[i];
-		if (!blk) {
-			pr_err("gennvm: BB data is out of bounds.\n");
-			return -EINVAL;
-		}
-
 		list_move_tail(&blk->list, &lun->bb_list);
 		lun->vlun.nr_bad_blocks++;
 		lun->vlun.nr_free_blocks--;
@@ -216,13 +214,21 @@
 	struct gen_lun *lun;
 	struct nvm_block *block;
 	sector_t lun_iter, blk_iter, cur_block_id = 0;
-	int ret;
+	int ret, nr_blks;
+	u8 *blks;
+
+	nr_blks = dev->blks_per_lun * dev->plane_mode;
+	blks = kmalloc(nr_blks, GFP_KERNEL);
+	if (!blks)
+		return -ENOMEM;
 
 	gennvm_for_each_lun(gn, lun, lun_iter) {
 		lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) *
 							dev->blks_per_lun);
-		if (!lun->vlun.blocks)
+		if (!lun->vlun.blocks) {
+			kfree(blks);
 			return -ENOMEM;
+		}
 
 		for (blk_iter = 0; blk_iter < dev->blks_per_lun; blk_iter++) {
 			block = &lun->vlun.blocks[blk_iter];
@@ -246,14 +252,15 @@
 
 			ppa.ppa = 0;
 			ppa.g.ch = lun->vlun.chnl_id;
-			ppa.g.lun = lun->vlun.id;
-			ppa = generic_to_dev_addr(dev, ppa);
+			ppa.g.lun = lun->vlun.lun_id;
 
-			ret = dev->ops->get_bb_tbl(dev, ppa,
-						dev->blks_per_lun,
-						gennvm_block_bb, gn);
+			ret = nvm_get_bb_tbl(dev, ppa, blks);
 			if (ret)
-				pr_err("gennvm: could not read BB table\n");
+				pr_err("gennvm: could not get BB table\n");
+
+			ret = gennvm_block_bb(gn, ppa, blks, nr_blks);
+			if (ret)
+				pr_err("gennvm: BB table map failed\n");
 		}
 	}
 
@@ -266,6 +273,7 @@
 		}
 	}
 
+	kfree(blks);
 	return 0;
 }
 
@@ -399,64 +407,60 @@
 	spin_unlock(&vlun->lock);
 }
 
-static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
-								int type)
+static void gennvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
 {
 	struct gen_nvm *gn = dev->mp;
 	struct gen_lun *lun;
 	struct nvm_block *blk;
 
-	if (unlikely(ppa->g.ch > dev->nr_chnls ||
-					ppa->g.lun > dev->luns_per_chnl ||
-					ppa->g.blk > dev->blks_per_lun)) {
+	pr_debug("gennvm: ppa  (ch: %u lun: %u blk: %u pg: %u) -> %u\n",
+			ppa.g.ch, ppa.g.lun, ppa.g.blk, ppa.g.pg, type);
+
+	if (unlikely(ppa.g.ch > dev->nr_chnls ||
+					ppa.g.lun > dev->luns_per_chnl ||
+					ppa.g.blk > dev->blks_per_lun)) {
 		WARN_ON_ONCE(1);
 		pr_err("gennvm: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
-				ppa->g.ch, dev->nr_chnls,
-				ppa->g.lun, dev->luns_per_chnl,
-				ppa->g.blk, dev->blks_per_lun);
+				ppa.g.ch, dev->nr_chnls,
+				ppa.g.lun, dev->luns_per_chnl,
+				ppa.g.blk, dev->blks_per_lun);
 		return;
 	}
 
-	lun = &gn->luns[ppa->g.lun * ppa->g.ch];
-	blk = &lun->vlun.blocks[ppa->g.blk];
+	lun = &gn->luns[ppa.g.lun * ppa.g.ch];
+	blk = &lun->vlun.blocks[ppa.g.blk];
 
 	/* will be moved to bb list on put_blk from target */
 	blk->state = type;
 }
 
-/* mark block bad. It is expected the target recover from the error. */
+/*
+ * mark block bad in gennvm. It is expected that the target recovers separately
+ */
 static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
 {
-	int i;
-
-	if (!dev->ops->set_bb_tbl)
-		return;
-
-	if (dev->ops->set_bb_tbl(dev, rqd, 1))
-		return;
+	int bit = -1;
+	int max_secs = dev->ops->max_phys_sect;
+	void *comp_bits = &rqd->ppa_status;
 
 	nvm_addr_to_generic_mode(dev, rqd);
 
 	/* look up blocks and mark them as bad */
-	if (rqd->nr_pages > 1)
-		for (i = 0; i < rqd->nr_pages; i++)
-			gennvm_blk_set_type(dev, &rqd->ppa_list[i],
-						NVM_BLK_ST_BAD);
-	else
-		gennvm_blk_set_type(dev, &rqd->ppa_addr, NVM_BLK_ST_BAD);
+	if (rqd->nr_ppas == 1) {
+		gennvm_mark_blk(dev, rqd->ppa_addr, NVM_BLK_ST_BAD);
+		return;
+	}
+
+	while ((bit = find_next_bit(comp_bits, max_secs, bit + 1)) < max_secs)
+		gennvm_mark_blk(dev, rqd->ppa_list[bit], NVM_BLK_ST_BAD);
 }
 
 static void gennvm_end_io(struct nvm_rq *rqd)
 {
 	struct nvm_tgt_instance *ins = rqd->ins;
 
-	switch (rqd->error) {
-	case NVM_RSP_SUCCESS:
-	case NVM_RSP_ERR_EMPTYPAGE:
-		break;
-	case NVM_RSP_ERR_FAILWRITE:
+	if (rqd->error == NVM_RSP_ERR_FAILWRITE)
 		gennvm_mark_blk_bad(rqd->dev, rqd);
-	}
 
 	ins->tt->end_io(rqd);
 }
@@ -539,6 +543,8 @@
 	.submit_io		= gennvm_submit_io,
 	.erase_blk		= gennvm_erase_blk,
 
+	.mark_blk		= gennvm_mark_blk,
+
 	.get_lun		= gennvm_get_lun,
 	.reserve_lun		= gennvm_reserve_lun,
 	.release_lun		= gennvm_release_lun,
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 3ab6495..2103e97 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -405,9 +405,8 @@
 									ws_gc);
 	struct rrpc *rrpc = gcb->rrpc;
 	struct rrpc_block *rblk = gcb->rblk;
+	struct rrpc_lun *rlun = rblk->rlun;
 	struct nvm_dev *dev = rrpc->dev;
-	struct nvm_lun *lun = rblk->parent->lun;
-	struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
 
 	mempool_free(gcb, rrpc->gcb_pool);
 	pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
@@ -508,9 +507,9 @@
 									ws_gc);
 	struct rrpc *rrpc = gcb->rrpc;
 	struct rrpc_block *rblk = gcb->rblk;
+	struct rrpc_lun *rlun = rblk->rlun;
 	struct nvm_lun *lun = rblk->parent->lun;
 	struct nvm_block *blk = rblk->parent;
-	struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
 
 	spin_lock(&rlun->lock);
 	list_add_tail(&rblk->prio, &rlun->prio_list);
@@ -696,7 +695,7 @@
 {
 	struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
 	struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
-	uint8_t npages = rqd->nr_pages;
+	uint8_t npages = rqd->nr_ppas;
 	sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
 
 	if (bio_data_dir(rqd->bio) == WRITE)
@@ -711,8 +710,6 @@
 
 	if (npages > 1)
 		nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
-	if (rqd->metadata)
-		nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata);
 
 	mempool_free(rqd, rrpc->rq_pool);
 }
@@ -886,7 +883,7 @@
 	bio_get(bio);
 	rqd->bio = bio;
 	rqd->ins = &rrpc->instance;
-	rqd->nr_pages = nr_pages;
+	rqd->nr_ppas = nr_pages;
 	rrq->flags = flags;
 
 	err = nvm_submit_io(rrpc->dev, rqd);
@@ -895,7 +892,7 @@
 		bio_put(bio);
 		if (!(flags & NVM_IOTYPE_GC)) {
 			rrpc_unlock_rq(rrpc, rqd);
-			if (rqd->nr_pages > 1)
+			if (rqd->nr_ppas > 1)
 				nvm_dev_dma_free(rrpc->dev,
 			rqd->ppa_list, rqd->dma_ppa_list);
 		}
@@ -1039,11 +1036,8 @@
 {
 	struct nvm_dev *dev = rrpc->dev;
 	sector_t i;
-	u64 slba;
 	int ret;
 
-	slba = rrpc->soffset >> (ilog2(dev->sec_size) - 9);
-
 	rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
 	if (!rrpc->trans_map)
 		return -ENOMEM;
@@ -1065,8 +1059,8 @@
 		return 0;
 
 	/* Bring up the mapping table from device */
-	ret = dev->ops->get_l2p_tbl(dev, slba, rrpc->nr_sects, rrpc_l2p_update,
-									rrpc);
+	ret = dev->ops->get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
+					rrpc_l2p_update, rrpc);
 	if (ret) {
 		pr_err("nvm: rrpc: could not read L2P table.\n");
 		return -EINVAL;
@@ -1207,10 +1201,6 @@
 
 		INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
 		spin_lock_init(&rlun->lock);
-
-		rrpc->total_blocks += dev->blks_per_lun;
-		rrpc->nr_sects += dev->sec_per_lun;
-
 	}
 
 	return 0;
@@ -1224,18 +1214,24 @@
 	struct nvm_dev *dev = rrpc->dev;
 	struct nvmm_type *mt = dev->mt;
 	sector_t size = rrpc->nr_sects * dev->sec_size;
+	int ret;
 
 	size >>= 9;
 
-	return mt->get_area(dev, begin, size);
+	ret = mt->get_area(dev, begin, size);
+	if (!ret)
+		*begin >>= (ilog2(dev->sec_size) - 9);
+
+	return ret;
 }
 
 static void rrpc_area_free(struct rrpc *rrpc)
 {
 	struct nvm_dev *dev = rrpc->dev;
 	struct nvmm_type *mt = dev->mt;
+	sector_t begin = rrpc->soffset << (ilog2(dev->sec_size) - 9);
 
-	mt->put_area(dev, rrpc->soffset);
+	mt->put_area(dev, begin);
 }
 
 static void rrpc_free(struct rrpc *rrpc)
@@ -1268,7 +1264,7 @@
 	sector_t reserved, provisioned;
 
 	/* cur, gc, and two emergency blocks for each lun */
-	reserved = rrpc->nr_luns * dev->max_pages_per_blk * 4;
+	reserved = rrpc->nr_luns * dev->sec_per_blk * 4;
 	provisioned = rrpc->nr_sects - reserved;
 
 	if (reserved > rrpc->nr_sects) {
@@ -1388,6 +1384,8 @@
 	INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
 
 	rrpc->nr_luns = lun_end - lun_begin + 1;
+	rrpc->total_blocks = (unsigned long)dev->blks_per_lun * rrpc->nr_luns;
+	rrpc->nr_sects = (unsigned long long)dev->sec_per_lun * rrpc->nr_luns;
 
 	/* simple round-robin strategy */
 	atomic_set(&rrpc->next_lun, -1);
@@ -1468,12 +1466,12 @@
 
 static int __init rrpc_module_init(void)
 {
-	return nvm_register_target(&tt_rrpc);
+	return nvm_register_tgt_type(&tt_rrpc);
 }
 
 static void rrpc_module_exit(void)
 {
-	nvm_unregister_target(&tt_rrpc);
+	nvm_unregister_tgt_type(&tt_rrpc);
 }
 
 module_init(rrpc_module_init);
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index 2653484..87e84b5 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -251,7 +251,7 @@
 static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd)
 {
 	struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
-	uint8_t pages = rqd->nr_pages;
+	uint8_t pages = rqd->nr_ppas;
 
 	BUG_ON((r->l_start + pages) > rrpc->nr_sects);
 
diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c
index 321de1f..994697a 100644
--- a/drivers/lightnvm/sysblk.c
+++ b/drivers/lightnvm/sysblk.c
@@ -93,12 +93,51 @@
 	s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas);
 }
 
-static int sysblk_get_host_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
-								void *private)
+static int sysblk_get_free_blks(struct nvm_dev *dev, struct ppa_addr ppa,
+					u8 *blks, int nr_blks,
+					struct sysblk_scan *s)
 {
-	struct sysblk_scan *s = private;
+	struct ppa_addr *sppa;
+	int i, blkid = 0;
+
+	nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
+	if (nr_blks < 0)
+		return nr_blks;
+
+	for (i = 0; i < nr_blks; i++) {
+		if (blks[i] == NVM_BLK_T_HOST)
+			return -EEXIST;
+
+		if (blks[i] != NVM_BLK_T_FREE)
+			continue;
+
+		sppa = &s->ppas[scan_ppa_idx(s->row, blkid)];
+		sppa->g.ch = ppa.g.ch;
+		sppa->g.lun = ppa.g.lun;
+		sppa->g.blk = i;
+		s->nr_ppas++;
+		blkid++;
+
+		pr_debug("nvm: use (%u %u %u) as sysblk\n",
+					sppa->g.ch, sppa->g.lun, sppa->g.blk);
+		if (blkid > MAX_BLKS_PR_SYSBLK - 1)
+			return 0;
+	}
+
+	pr_err("nvm: sysblk failed get sysblk\n");
+	return -EINVAL;
+}
+
+static int sysblk_get_host_blks(struct nvm_dev *dev, struct ppa_addr ppa,
+					u8 *blks, int nr_blks,
+					struct sysblk_scan *s)
+{
 	int i, nr_sysblk = 0;
 
+	nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
+	if (nr_blks < 0)
+		return nr_blks;
+
 	for (i = 0; i < nr_blks; i++) {
 		if (blks[i] != NVM_BLK_T_HOST)
 			continue;
@@ -119,26 +158,42 @@
 }
 
 static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s,
-				struct ppa_addr *ppas, nvm_bb_update_fn *fn)
+				struct ppa_addr *ppas, int get_free)
 {
-	struct ppa_addr dppa;
-	int i, ret;
+	int i, nr_blks, ret = 0;
+	u8 *blks;
 
 	s->nr_ppas = 0;
+	nr_blks = dev->blks_per_lun * dev->plane_mode;
+
+	blks = kmalloc(nr_blks, GFP_KERNEL);
+	if (!blks)
+		return -ENOMEM;
 
 	for (i = 0; i < s->nr_rows; i++) {
-		dppa = generic_to_dev_addr(dev, ppas[i]);
 		s->row = i;
 
-		ret = dev->ops->get_bb_tbl(dev, dppa, dev->blks_per_lun, fn, s);
+		ret = nvm_get_bb_tbl(dev, ppas[i], blks);
 		if (ret) {
 			pr_err("nvm: failed bb tbl for ppa (%u %u)\n",
 							ppas[i].g.ch,
 							ppas[i].g.blk);
-			return ret;
+			goto err_get;
 		}
+
+		if (get_free)
+			ret = sysblk_get_free_blks(dev, ppas[i], blks, nr_blks,
+									s);
+		else
+			ret = sysblk_get_host_blks(dev, ppas[i], blks, nr_blks,
+									s);
+
+		if (ret)
+			goto err_get;
 	}
 
+err_get:
+	kfree(blks);
 	return ret;
 }
 
@@ -154,13 +209,12 @@
 						struct nvm_system_block *sblk)
 {
 	struct nvm_system_block *cur;
-	int pg, cursz, ret, found = 0;
+	int pg, ret, found = 0;
 
 	/* the full buffer for a flash page is allocated. Only the first of it
 	 * contains the system block information
 	 */
-	cursz = dev->sec_size * dev->sec_per_pg * dev->nr_planes;
-	cur = kmalloc(cursz, GFP_KERNEL);
+	cur = kmalloc(dev->pfpg_size, GFP_KERNEL);
 	if (!cur)
 		return -ENOMEM;
 
@@ -169,7 +223,7 @@
 		ppa->g.pg = ppa_to_slc(dev, pg);
 
 		ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE,
-								cur, cursz);
+							cur, dev->pfpg_size);
 		if (ret) {
 			if (ret == NVM_RSP_ERR_EMPTYPAGE) {
 				pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n",
@@ -223,10 +277,10 @@
 
 	memset(&rqd, 0, sizeof(struct nvm_rq));
 
-	nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas);
+	nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas, 1);
 	nvm_generic_to_addr_mode(dev, &rqd);
 
-	ret = dev->ops->set_bb_tbl(dev, &rqd, type);
+	ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
 	nvm_free_rqd_ppalist(dev, &rqd);
 	if (ret) {
 		pr_err("nvm: sysblk failed bb mark\n");
@@ -236,50 +290,17 @@
 	return 0;
 }
 
-static int sysblk_get_free_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
-								void *private)
-{
-	struct sysblk_scan *s = private;
-	struct ppa_addr *sppa;
-	int i, blkid = 0;
-
-	for (i = 0; i < nr_blks; i++) {
-		if (blks[i] == NVM_BLK_T_HOST)
-			return -EEXIST;
-
-		if (blks[i] != NVM_BLK_T_FREE)
-			continue;
-
-		sppa = &s->ppas[scan_ppa_idx(s->row, blkid)];
-		sppa->g.ch = ppa.g.ch;
-		sppa->g.lun = ppa.g.lun;
-		sppa->g.blk = i;
-		s->nr_ppas++;
-		blkid++;
-
-		pr_debug("nvm: use (%u %u %u) as sysblk\n",
-					sppa->g.ch, sppa->g.lun, sppa->g.blk);
-		if (blkid > MAX_BLKS_PR_SYSBLK - 1)
-			return 0;
-	}
-
-	pr_err("nvm: sysblk failed get sysblk\n");
-	return -EINVAL;
-}
-
 static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
 							struct sysblk_scan *s)
 {
 	struct nvm_system_block nvmsb;
 	void *buf;
-	int i, sect, ret, bufsz;
+	int i, sect, ret = 0;
 	struct ppa_addr *ppas;
 
 	nvm_cpu_to_sysblk(&nvmsb, info);
 
-	/* buffer for flash page */
-	bufsz = dev->sec_size * dev->sec_per_pg * dev->nr_planes;
-	buf = kzalloc(bufsz, GFP_KERNEL);
+	buf = kzalloc(dev->pfpg_size, GFP_KERNEL);
 	if (!buf)
 		return -ENOMEM;
 	memcpy(buf, &nvmsb, sizeof(struct nvm_system_block));
@@ -309,7 +330,7 @@
 		}
 
 		ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PWRITE,
-						NVM_IO_SLC_MODE, buf, bufsz);
+					NVM_IO_SLC_MODE, buf, dev->pfpg_size);
 		if (ret) {
 			pr_err("nvm: sysblk failed program (%u %u %u)\n",
 							ppas[0].g.ch,
@@ -319,7 +340,7 @@
 		}
 
 		ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PREAD,
-						NVM_IO_SLC_MODE, buf, bufsz);
+					NVM_IO_SLC_MODE, buf, dev->pfpg_size);
 		if (ret) {
 			pr_err("nvm: sysblk failed read (%u %u %u)\n",
 							ppas[0].g.ch,
@@ -388,7 +409,7 @@
 	nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
 
 	mutex_lock(&dev->mlock);
-	ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks);
+	ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
 	if (ret)
 		goto err_sysblk;
 
@@ -448,7 +469,7 @@
 	nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
 
 	mutex_lock(&dev->mlock);
-	ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks);
+	ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
 	if (ret)
 		goto err_sysblk;
 
@@ -546,7 +567,7 @@
 	nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
 
 	mutex_lock(&dev->mlock);
-	ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_free_blks);
+	ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 1);
 	if (ret)
 		goto err_mark;
 
@@ -561,52 +582,49 @@
 	return ret;
 }
 
-struct factory_blks {
-	struct nvm_dev *dev;
-	int flags;
-	unsigned long *blks;
-};
-
 static int factory_nblks(int nblks)
 {
 	/* Round up to nearest BITS_PER_LONG */
 	return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
 }
 
-static unsigned int factory_blk_offset(struct nvm_dev *dev, int ch, int lun)
+static unsigned int factory_blk_offset(struct nvm_dev *dev, struct ppa_addr ppa)
 {
 	int nblks = factory_nblks(dev->blks_per_lun);
 
-	return ((ch * dev->luns_per_chnl * nblks) + (lun * nblks)) /
+	return ((ppa.g.ch * dev->luns_per_chnl * nblks) + (ppa.g.lun * nblks)) /
 								BITS_PER_LONG;
 }
 
-static int nvm_factory_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
-								void *private)
+static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
+					u8 *blks, int nr_blks,
+					unsigned long *blk_bitmap, int flags)
 {
-	struct factory_blks *f = private;
-	struct nvm_dev *dev = f->dev;
 	int i, lunoff;
 
-	lunoff = factory_blk_offset(dev, ppa.g.ch, ppa.g.lun);
+	nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
+	if (nr_blks < 0)
+		return nr_blks;
+
+	lunoff = factory_blk_offset(dev, ppa);
 
 	/* non-set bits correspond to the block must be erased */
 	for (i = 0; i < nr_blks; i++) {
 		switch (blks[i]) {
 		case NVM_BLK_T_FREE:
-			if (f->flags & NVM_FACTORY_ERASE_ONLY_USER)
-				set_bit(i, &f->blks[lunoff]);
+			if (flags & NVM_FACTORY_ERASE_ONLY_USER)
+				set_bit(i, &blk_bitmap[lunoff]);
 			break;
 		case NVM_BLK_T_HOST:
-			if (!(f->flags & NVM_FACTORY_RESET_HOST_BLKS))
-				set_bit(i, &f->blks[lunoff]);
+			if (!(flags & NVM_FACTORY_RESET_HOST_BLKS))
+				set_bit(i, &blk_bitmap[lunoff]);
 			break;
 		case NVM_BLK_T_GRWN_BAD:
-			if (!(f->flags & NVM_FACTORY_RESET_GRWN_BBLKS))
-				set_bit(i, &f->blks[lunoff]);
+			if (!(flags & NVM_FACTORY_RESET_GRWN_BBLKS))
+				set_bit(i, &blk_bitmap[lunoff]);
 			break;
 		default:
-			set_bit(i, &f->blks[lunoff]);
+			set_bit(i, &blk_bitmap[lunoff]);
 			break;
 		}
 	}
@@ -615,7 +633,7 @@
 }
 
 static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
-					int max_ppas, struct factory_blks *f)
+					int max_ppas, unsigned long *blk_bitmap)
 {
 	struct ppa_addr ppa;
 	int ch, lun, blkid, idx, done = 0, ppa_cnt = 0;
@@ -623,111 +641,95 @@
 
 	while (!done) {
 		done = 1;
-		for (ch = 0; ch < dev->nr_chnls; ch++) {
-			for (lun = 0; lun < dev->luns_per_chnl; lun++) {
-				idx = factory_blk_offset(dev, ch, lun);
-				offset = &f->blks[idx];
+		nvm_for_each_lun_ppa(dev, ppa, ch, lun) {
+			idx = factory_blk_offset(dev, ppa);
+			offset = &blk_bitmap[idx];
 
-				blkid = find_first_zero_bit(offset,
-							dev->blks_per_lun);
-				if (blkid >= dev->blks_per_lun)
-					continue;
-				set_bit(blkid, offset);
+			blkid = find_first_zero_bit(offset,
+						dev->blks_per_lun);
+			if (blkid >= dev->blks_per_lun)
+				continue;
+			set_bit(blkid, offset);
 
-				ppa.ppa = 0;
-				ppa.g.ch = ch;
-				ppa.g.lun = lun;
-				ppa.g.blk = blkid;
-				pr_debug("nvm: erase ppa (%u %u %u)\n",
-								ppa.g.ch,
-								ppa.g.lun,
-								ppa.g.blk);
+			ppa.g.blk = blkid;
+			pr_debug("nvm: erase ppa (%u %u %u)\n",
+							ppa.g.ch,
+							ppa.g.lun,
+							ppa.g.blk);
 
-				erase_list[ppa_cnt] = ppa;
-				ppa_cnt++;
-				done = 0;
+			erase_list[ppa_cnt] = ppa;
+			ppa_cnt++;
+			done = 0;
 
-				if (ppa_cnt == max_ppas)
-					return ppa_cnt;
-			}
+			if (ppa_cnt == max_ppas)
+				return ppa_cnt;
 		}
 	}
 
 	return ppa_cnt;
 }
 
-static int nvm_fact_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa,
-					nvm_bb_update_fn *fn, void *priv)
+static int nvm_fact_select_blks(struct nvm_dev *dev, unsigned long *blk_bitmap,
+								int flags)
 {
-	struct ppa_addr dev_ppa;
-	int ret;
-
-	dev_ppa = generic_to_dev_addr(dev, ppa);
-
-	ret = dev->ops->get_bb_tbl(dev, dev_ppa, dev->blks_per_lun, fn, priv);
-	if (ret)
-		pr_err("nvm: failed bb tbl for ch%u lun%u\n",
-							ppa.g.ch, ppa.g.blk);
-	return ret;
-}
-
-static int nvm_fact_select_blks(struct nvm_dev *dev, struct factory_blks *f)
-{
-	int ch, lun, ret;
 	struct ppa_addr ppa;
+	int ch, lun, nr_blks, ret = 0;
+	u8 *blks;
 
-	ppa.ppa = 0;
-	for (ch = 0; ch < dev->nr_chnls; ch++) {
-		for (lun = 0; lun < dev->luns_per_chnl; lun++) {
-			ppa.g.ch = ch;
-			ppa.g.lun = lun;
+	nr_blks = dev->blks_per_lun * dev->plane_mode;
+	blks = kmalloc(nr_blks, GFP_KERNEL);
+	if (!blks)
+		return -ENOMEM;
 
-			ret = nvm_fact_get_bb_tbl(dev, ppa, nvm_factory_blks,
-									f);
-			if (ret)
-				return ret;
-		}
+	nvm_for_each_lun_ppa(dev, ppa, ch, lun) {
+		ret = nvm_get_bb_tbl(dev, ppa, blks);
+		if (ret)
+			pr_err("nvm: failed bb tbl for ch%u lun%u\n",
+							ppa.g.ch, ppa.g.blk);
+
+		ret = nvm_factory_blks(dev, ppa, blks, nr_blks, blk_bitmap,
+									flags);
+		if (ret)
+			break;
 	}
 
-	return 0;
+	kfree(blks);
+	return ret;
 }
 
 int nvm_dev_factory(struct nvm_dev *dev, int flags)
 {
-	struct factory_blks f;
 	struct ppa_addr *ppas;
 	int ppa_cnt, ret = -ENOMEM;
 	int max_ppas = dev->ops->max_phys_sect / dev->nr_planes;
 	struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
 	struct sysblk_scan s;
+	unsigned long *blk_bitmap;
 
-	f.blks = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns,
+	blk_bitmap = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns,
 								GFP_KERNEL);
-	if (!f.blks)
+	if (!blk_bitmap)
 		return ret;
 
 	ppas = kcalloc(max_ppas, sizeof(struct ppa_addr), GFP_KERNEL);
 	if (!ppas)
 		goto err_blks;
 
-	f.dev = dev;
-	f.flags = flags;
-
 	/* create list of blks to be erased */
-	ret = nvm_fact_select_blks(dev, &f);
+	ret = nvm_fact_select_blks(dev, blk_bitmap, flags);
 	if (ret)
 		goto err_ppas;
 
 	/* continue to erase until list of blks until empty */
-	while ((ppa_cnt = nvm_fact_get_blks(dev, ppas, max_ppas, &f)) > 0)
+	while ((ppa_cnt =
+			nvm_fact_get_blks(dev, ppas, max_ppas, blk_bitmap)) > 0)
 		nvm_erase_ppa(dev, ppas, ppa_cnt);
 
 	/* mark host reserved blocks free */
 	if (flags & NVM_FACTORY_RESET_HOST_BLKS) {
 		nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
 		mutex_lock(&dev->mlock);
-		ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas,
-							sysblk_get_host_blks);
+		ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
 		if (!ret)
 			ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
 		mutex_unlock(&dev->mlock);
@@ -735,7 +737,7 @@
 err_ppas:
 	kfree(ppas);
 err_blks:
-	kfree(f.blks);
+	kfree(blk_bitmap);
 	return ret;
 }
 EXPORT_SYMBOL(nvm_dev_factory);
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index dc11bbf..58d0472 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -46,7 +46,6 @@
 				       size_t count, loff_t *ppos)
 {
 	struct mbox_test_device *tdev = filp->private_data;
-	int ret;
 
 	if (!tdev->tx_channel) {
 		dev_err(tdev->dev, "Channel cannot do Tx\n");
@@ -60,17 +59,20 @@
 		return -EINVAL;
 	}
 
-	tdev->signal = kzalloc(MBOX_MAX_SIG_LEN, GFP_KERNEL);
-	if (!tdev->signal)
-		return -ENOMEM;
+	/* Only allocate memory if we need to */
+	if (!tdev->signal) {
+		tdev->signal = kzalloc(MBOX_MAX_SIG_LEN, GFP_KERNEL);
+		if (!tdev->signal)
+			return -ENOMEM;
+	}
 
-	ret = copy_from_user(tdev->signal, userbuf, count);
-	if (ret) {
+	if (copy_from_user(tdev->signal, userbuf, count)) {
 		kfree(tdev->signal);
+		tdev->signal = NULL;
 		return -EFAULT;
 	}
 
-	return ret < 0 ? ret : count;
+	return count;
 }
 
 static const struct file_operations mbox_test_signal_ops = {
diff --git a/drivers/mailbox/mailbox-xgene-slimpro.c b/drivers/mailbox/mailbox-xgene-slimpro.c
index bd07f39..dd2afbc 100644
--- a/drivers/mailbox/mailbox-xgene-slimpro.c
+++ b/drivers/mailbox/mailbox-xgene-slimpro.c
@@ -189,8 +189,8 @@
 	int i;
 
 	ctx = devm_kzalloc(&pdev->dev, sizeof(struct slimpro_mbox), GFP_KERNEL);
-	if (IS_ERR(ctx))
-		return PTR_ERR(ctx);
+	if (!ctx)
+		return -ENOMEM;
 
 	platform_set_drvdata(pdev, ctx);
 
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 6a4811f..4a36632 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -375,13 +375,13 @@
 
 	if (!np) {
 		dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
-		return ERR_PTR(-ENOSYS);
+		return ERR_PTR(-EINVAL);
 	}
 
 	if (!of_get_property(np, "mbox-names", NULL)) {
 		dev_err(cl->dev,
 			"%s() requires an \"mbox-names\" property\n", __func__);
-		return ERR_PTR(-ENOSYS);
+		return ERR_PTR(-EINVAL);
 	}
 
 	of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 0ddf638..043828d 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -361,8 +361,6 @@
 		struct acpi_generic_address *db_reg;
 		struct acpi_pcct_hw_reduced *pcct_ss;
 		pcc_mbox_channels[i].con_priv = pcct_entry;
-		pcct_entry = (struct acpi_subtable_header *)
-			((unsigned long) pcct_entry + pcct_entry->length);
 
 		/* If doorbell is in system memory cache the virt address */
 		pcct_ss = (struct acpi_pcct_hw_reduced *)pcct_entry;
@@ -370,6 +368,8 @@
 		if (db_reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
 			pcc_doorbell_vaddr[i] = acpi_os_ioremap(db_reg->address,
 							db_reg->bit_width/8);
+		pcct_entry = (struct acpi_subtable_header *)
+			((unsigned long) pcct_entry + pcct_entry->length);
 	}
 
 	pcc_mbox_ctrl.num_chans = count;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index a296425..f5dbb4e 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -816,7 +816,7 @@
 	clear_bit(QUEUE_FLAG_ADD_RANDOM, &d->disk->queue->queue_flags);
 	set_bit(QUEUE_FLAG_DISCARD,	&d->disk->queue->queue_flags);
 
-	blk_queue_flush(q, REQ_FLUSH|REQ_FUA);
+	blk_queue_write_cache(q, true, true);
 
 	return 0;
 }
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 7df6b4f..d8129ec 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -46,7 +46,7 @@
  * allocated while we're using it
  */
 static int bitmap_checkpage(struct bitmap_counts *bitmap,
-			    unsigned long page, int create)
+			    unsigned long page, int create, int no_hijack)
 __releases(bitmap->lock)
 __acquires(bitmap->lock)
 {
@@ -90,6 +90,9 @@
 
 	if (mappage == NULL) {
 		pr_debug("md/bitmap: map page allocation failed, hijacking\n");
+		/* We don't support hijack for cluster raid */
+		if (no_hijack)
+			return -ENOMEM;
 		/* failed - set the hijacked flag so that we can use the
 		 * pointer as a counter */
 		if (!bitmap->bp[page].map)
@@ -322,7 +325,7 @@
 {
 	ClearPagePrivate(page);
 	set_page_private(page, 0);
-	page_cache_release(page);
+	put_page(page);
 }
 static void free_buffers(struct page *page)
 {
@@ -756,7 +759,7 @@
 		bytes += sizeof(bitmap_super_t);
 
 	num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
-	offset = slot_number * (num_pages - 1);
+	offset = slot_number * num_pages;
 
 	store->filemap = kmalloc(sizeof(struct page *)
 				 * num_pages, GFP_KERNEL);
@@ -900,6 +903,11 @@
 	struct page *page;
 	void *kaddr;
 	unsigned long chunk = block >> bitmap->counts.chunkshift;
+	struct bitmap_storage *store = &bitmap->storage;
+	unsigned long node_offset = 0;
+
+	if (mddev_is_clustered(bitmap->mddev))
+		node_offset = bitmap->cluster_slot * store->file_pages;
 
 	page = filemap_get_page(&bitmap->storage, chunk);
 	if (!page)
@@ -915,7 +923,7 @@
 	kunmap_atomic(kaddr);
 	pr_debug("set file bit %lu page %lu\n", bit, page->index);
 	/* record page number so it gets flushed to disk when unplug occurs */
-	set_page_attr(bitmap, page->index, BITMAP_PAGE_DIRTY);
+	set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_DIRTY);
 }
 
 static void bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
@@ -924,6 +932,11 @@
 	struct page *page;
 	void *paddr;
 	unsigned long chunk = block >> bitmap->counts.chunkshift;
+	struct bitmap_storage *store = &bitmap->storage;
+	unsigned long node_offset = 0;
+
+	if (mddev_is_clustered(bitmap->mddev))
+		node_offset = bitmap->cluster_slot * store->file_pages;
 
 	page = filemap_get_page(&bitmap->storage, chunk);
 	if (!page)
@@ -935,8 +948,8 @@
 	else
 		clear_bit_le(bit, paddr);
 	kunmap_atomic(paddr);
-	if (!test_page_attr(bitmap, page->index, BITMAP_PAGE_NEEDWRITE)) {
-		set_page_attr(bitmap, page->index, BITMAP_PAGE_PENDING);
+	if (!test_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_NEEDWRITE)) {
+		set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_PENDING);
 		bitmap->allclean = 0;
 	}
 }
@@ -1321,7 +1334,7 @@
 	sector_t csize;
 	int err;
 
-	err = bitmap_checkpage(bitmap, page, create);
+	err = bitmap_checkpage(bitmap, page, create, 0);
 
 	if (bitmap->bp[page].hijacked ||
 	    bitmap->bp[page].map == NULL)
@@ -1594,6 +1607,27 @@
 }
 EXPORT_SYMBOL(bitmap_cond_end_sync);
 
+void bitmap_sync_with_cluster(struct mddev *mddev,
+			      sector_t old_lo, sector_t old_hi,
+			      sector_t new_lo, sector_t new_hi)
+{
+	struct bitmap *bitmap = mddev->bitmap;
+	sector_t sector, blocks = 0;
+
+	for (sector = old_lo; sector < new_lo; ) {
+		bitmap_end_sync(bitmap, sector, &blocks, 0);
+		sector += blocks;
+	}
+	WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n");
+
+	for (sector = old_hi; sector < new_hi; ) {
+		bitmap_start_sync(bitmap, sector, &blocks, 0);
+		sector += blocks;
+	}
+	WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n");
+}
+EXPORT_SYMBOL(bitmap_sync_with_cluster);
+
 static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
 {
 	/* For each chunk covered by any of these sectors, set the
@@ -1673,6 +1707,9 @@
 	if (!bitmap) /* there was no bitmap */
 		return;
 
+	if (bitmap->sysfs_can_clear)
+		sysfs_put(bitmap->sysfs_can_clear);
+
 	if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
 		bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
 		md_cluster_stop(bitmap->mddev);
@@ -1712,15 +1749,13 @@
 	if (mddev->thread)
 		mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
 
-	if (bitmap->sysfs_can_clear)
-		sysfs_put(bitmap->sysfs_can_clear);
-
 	bitmap_free(bitmap);
 }
 
 /*
  * initialize the bitmap structure
  * if this returns an error, bitmap_destroy must be called to do clean up
+ * once mddev->bitmap is set
  */
 struct bitmap *bitmap_create(struct mddev *mddev, int slot)
 {
@@ -1813,6 +1848,9 @@
 	if (!bitmap)
 		goto out;
 
+	if (mddev_is_clustered(mddev))
+		md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
+
 	/* Clear out old bitmap info first:  Either there is none, or we
 	 * are resuming after someone else has possibly changed things,
 	 * so we should forget old cached info.
@@ -1865,8 +1903,10 @@
 	struct bitmap_counts *counts;
 	struct bitmap *bitmap = bitmap_create(mddev, slot);
 
-	if (IS_ERR(bitmap))
+	if (IS_ERR(bitmap)) {
+		bitmap_free(bitmap);
 		return PTR_ERR(bitmap);
+	}
 
 	rv = bitmap_init_from_disk(bitmap, 0);
 	if (rv)
@@ -1887,14 +1927,14 @@
 
 	if (clear_bits) {
 		bitmap_update_sb(bitmap);
-		/* Setting this for the ev_page should be enough.
-		 * And we do not require both write_all and PAGE_DIRT either
-		 */
+		/* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs
+		 * BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */
 		for (i = 0; i < bitmap->storage.file_pages; i++)
-			set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
-		bitmap_write_all(bitmap);
+			if (test_page_attr(bitmap, i, BITMAP_PAGE_PENDING))
+				set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE);
 		bitmap_unplug(bitmap);
 	}
+	bitmap_unplug(mddev->bitmap);
 	*low = lo;
 	*high = hi;
 err:
@@ -2029,6 +2069,35 @@
 		     chunks << chunkshift);
 
 	spin_lock_irq(&bitmap->counts.lock);
+	/* For cluster raid, need to pre-allocate bitmap */
+	if (mddev_is_clustered(bitmap->mddev)) {
+		unsigned long page;
+		for (page = 0; page < pages; page++) {
+			ret = bitmap_checkpage(&bitmap->counts, page, 1, 1);
+			if (ret) {
+				unsigned long k;
+
+				/* deallocate the page memory */
+				for (k = 0; k < page; k++) {
+					kfree(new_bp[k].map);
+				}
+
+				/* restore some fields from old_counts */
+				bitmap->counts.bp = old_counts.bp;
+				bitmap->counts.pages = old_counts.pages;
+				bitmap->counts.missing_pages = old_counts.pages;
+				bitmap->counts.chunkshift = old_counts.chunkshift;
+				bitmap->counts.chunks = old_counts.chunks;
+				bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift +
+									     BITMAP_BLOCK_SHIFT);
+				blocks = old_counts.chunks << old_counts.chunkshift;
+				pr_err("Could not pre-allocate in-memory bitmap for cluster raid\n");
+				break;
+			} else
+				bitmap->counts.bp[page].count += 1;
+		}
+	}
+
 	for (block = 0; block < blocks; ) {
 		bitmap_counter_t *bmc_old, *bmc_new;
 		int set;
@@ -2170,14 +2239,14 @@
 				else {
 					mddev->bitmap = bitmap;
 					rv = bitmap_load(mddev);
-					if (rv) {
-						bitmap_destroy(mddev);
+					if (rv)
 						mddev->bitmap_info.offset = 0;
-					}
 				}
 				mddev->pers->quiesce(mddev, 0);
-				if (rv)
+				if (rv) {
+					bitmap_destroy(mddev);
 					return rv;
+				}
 			}
 		}
 	}
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index 5e3fcd6..5b6dd63 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -258,6 +258,9 @@
 void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted);
 void bitmap_close_sync(struct bitmap *bitmap);
 void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force);
+void bitmap_sync_with_cluster(struct mddev *mddev,
+			      sector_t old_lo, sector_t old_hi,
+			      sector_t new_lo, sector_t new_hi);
 
 void bitmap_unplug(struct bitmap *bitmap);
 void bitmap_daemon_work(struct mddev *mddev);
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 27f2ef3..3970cda 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -867,39 +867,55 @@
 	return 0;
 }
 
-#define WRITE_LOCK(cmd)	\
-	down_write(&cmd->root_lock); \
-	if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
-		up_write(&cmd->root_lock); \
-		return -EINVAL; \
+static bool cmd_write_lock(struct dm_cache_metadata *cmd)
+{
+	down_write(&cmd->root_lock);
+	if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
+		up_write(&cmd->root_lock);
+		return false;
 	}
+	return true;
+}
 
-#define WRITE_LOCK_VOID(cmd) \
-	down_write(&cmd->root_lock); \
-	if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
-		up_write(&cmd->root_lock); \
-		return; \
-	}
+#define WRITE_LOCK(cmd)				\
+	do {					\
+		if (!cmd_write_lock((cmd)))	\
+			return -EINVAL;		\
+	} while(0)
+
+#define WRITE_LOCK_VOID(cmd)			\
+	do {					\
+		if (!cmd_write_lock((cmd)))	\
+			return;			\
+	} while(0)
 
 #define WRITE_UNLOCK(cmd) \
-	up_write(&cmd->root_lock)
+	up_write(&(cmd)->root_lock)
 
-#define READ_LOCK(cmd) \
-	down_read(&cmd->root_lock); \
-	if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
-		up_read(&cmd->root_lock); \
-		return -EINVAL; \
+static bool cmd_read_lock(struct dm_cache_metadata *cmd)
+{
+	down_read(&cmd->root_lock);
+	if (cmd->fail_io) {
+		up_read(&cmd->root_lock);
+		return false;
 	}
+	return true;
+}
 
-#define READ_LOCK_VOID(cmd)	\
-	down_read(&cmd->root_lock); \
-	if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
-		up_read(&cmd->root_lock); \
-		return; \
-	}
+#define READ_LOCK(cmd)				\
+	do {					\
+		if (!cmd_read_lock((cmd)))	\
+			return -EINVAL;		\
+	} while(0)
+
+#define READ_LOCK_VOID(cmd)			\
+	do {					\
+		if (!cmd_read_lock((cmd)))	\
+			return;			\
+	} while(0)
 
 #define READ_UNLOCK(cmd) \
-	up_read(&cmd->root_lock)
+	up_read(&(cmd)->root_lock)
 
 int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
 {
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 2adf81d..2c7ca25 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1723,7 +1723,7 @@
 	if (!dmi) {
 		unsigned noio_flag;
 		noio_flag = memalloc_noio_save();
-		dmi = __vmalloc(param_kernel->data_size, GFP_NOIO | __GFP_REPEAT | __GFP_HIGH | __GFP_HIGHMEM, PAGE_KERNEL);
+		dmi = __vmalloc(param_kernel->data_size, GFP_NOIO | __GFP_HIGH | __GFP_HIGHMEM, PAGE_KERNEL);
 		memalloc_noio_restore(noio_flag);
 		if (dmi)
 			*param_flags |= DM_PARAMS_VMALLOC;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 677ba22..52baf8a 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -76,26 +76,18 @@
 
 	wait_queue_head_t pg_init_wait;	/* Wait for pg_init completion */
 
-	unsigned pg_init_in_progress;	/* Only one pg_init allowed at once */
-
-	unsigned nr_valid_paths;	/* Total number of usable paths */
 	struct pgpath *current_pgpath;
 	struct priority_group *current_pg;
 	struct priority_group *next_pg;	/* Switch to this PG if set */
 
-	bool queue_io:1;		/* Must we queue all I/O? */
-	bool queue_if_no_path:1;	/* Queue I/O if last path fails? */
-	bool saved_queue_if_no_path:1;	/* Saved state during suspension */
-	bool retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
-	bool pg_init_disabled:1;	/* pg_init is not currently allowed */
-	bool pg_init_required:1;	/* pg_init needs calling? */
-	bool pg_init_delay_retry:1;	/* Delay pg_init retry? */
+	unsigned long flags;		/* Multipath state flags */
 
 	unsigned pg_init_retries;	/* Number of times to retry pg_init */
-	unsigned pg_init_count;		/* Number of times pg_init called */
 	unsigned pg_init_delay_msecs;	/* Number of msecs before pg_init retry */
 
-	struct work_struct trigger_event;
+	atomic_t nr_valid_paths;	/* Total number of usable paths */
+	atomic_t pg_init_in_progress;	/* Only one pg_init allowed at once */
+	atomic_t pg_init_count;		/* Number of times pg_init called */
 
 	/*
 	 * We must use a mempool of dm_mpath_io structs so that we
@@ -104,6 +96,7 @@
 	mempool_t *mpio_pool;
 
 	struct mutex work_mutex;
+	struct work_struct trigger_event;
 };
 
 /*
@@ -122,6 +115,17 @@
 static void trigger_event(struct work_struct *work);
 static void activate_path(struct work_struct *work);
 
+/*-----------------------------------------------
+ * Multipath state flags.
+ *-----------------------------------------------*/
+
+#define MPATHF_QUEUE_IO 0			/* Must we queue all I/O? */
+#define MPATHF_QUEUE_IF_NO_PATH 1		/* Queue I/O if last path fails? */
+#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2		/* Saved state during suspension */
+#define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3	/* If there's already a hw_handler present, don't change it. */
+#define MPATHF_PG_INIT_DISABLED 4		/* pg_init is not currently allowed */
+#define MPATHF_PG_INIT_REQUIRED 5		/* pg_init needs calling? */
+#define MPATHF_PG_INIT_DELAY_RETRY 6		/* Delay pg_init retry? */
 
 /*-----------------------------------------------
  * Allocation routines
@@ -189,7 +193,10 @@
 	if (m) {
 		INIT_LIST_HEAD(&m->priority_groups);
 		spin_lock_init(&m->lock);
-		m->queue_io = true;
+		set_bit(MPATHF_QUEUE_IO, &m->flags);
+		atomic_set(&m->nr_valid_paths, 0);
+		atomic_set(&m->pg_init_in_progress, 0);
+		atomic_set(&m->pg_init_count, 0);
 		m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
 		INIT_WORK(&m->trigger_event, trigger_event);
 		init_waitqueue_head(&m->pg_init_wait);
@@ -274,17 +281,17 @@
 	struct pgpath *pgpath;
 	unsigned long pg_init_delay = 0;
 
-	if (m->pg_init_in_progress || m->pg_init_disabled)
+	if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
 		return 0;
 
-	m->pg_init_count++;
-	m->pg_init_required = false;
+	atomic_inc(&m->pg_init_count);
+	clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 
 	/* Check here to reset pg_init_required */
 	if (!m->current_pg)
 		return 0;
 
-	if (m->pg_init_delay_retry)
+	if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
 		pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
 						 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
 	list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
@@ -293,65 +300,99 @@
 			continue;
 		if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
 				       pg_init_delay))
-			m->pg_init_in_progress++;
+			atomic_inc(&m->pg_init_in_progress);
 	}
-	return m->pg_init_in_progress;
+	return atomic_read(&m->pg_init_in_progress);
 }
 
-static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
+static int pg_init_all_paths(struct multipath *m)
 {
-	m->current_pg = pgpath->pg;
+	int r;
+	unsigned long flags;
+
+	spin_lock_irqsave(&m->lock, flags);
+	r = __pg_init_all_paths(m);
+	spin_unlock_irqrestore(&m->lock, flags);
+
+	return r;
+}
+
+static void __switch_pg(struct multipath *m, struct priority_group *pg)
+{
+	m->current_pg = pg;
 
 	/* Must we initialise the PG first, and queue I/O till it's ready? */
 	if (m->hw_handler_name) {
-		m->pg_init_required = true;
-		m->queue_io = true;
+		set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
+		set_bit(MPATHF_QUEUE_IO, &m->flags);
 	} else {
-		m->pg_init_required = false;
-		m->queue_io = false;
+		clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
+		clear_bit(MPATHF_QUEUE_IO, &m->flags);
 	}
 
-	m->pg_init_count = 0;
+	atomic_set(&m->pg_init_count, 0);
 }
 
-static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
-			       size_t nr_bytes)
+static struct pgpath *choose_path_in_pg(struct multipath *m,
+					struct priority_group *pg,
+					size_t nr_bytes)
 {
+	unsigned long flags;
 	struct dm_path *path;
+	struct pgpath *pgpath;
 
 	path = pg->ps.type->select_path(&pg->ps, nr_bytes);
 	if (!path)
-		return -ENXIO;
+		return ERR_PTR(-ENXIO);
 
-	m->current_pgpath = path_to_pgpath(path);
+	pgpath = path_to_pgpath(path);
 
-	if (m->current_pg != pg)
-		__switch_pg(m, m->current_pgpath);
+	if (unlikely(lockless_dereference(m->current_pg) != pg)) {
+		/* Only update current_pgpath if pg changed */
+		spin_lock_irqsave(&m->lock, flags);
+		m->current_pgpath = pgpath;
+		__switch_pg(m, pg);
+		spin_unlock_irqrestore(&m->lock, flags);
+	}
 
-	return 0;
+	return pgpath;
 }
 
-static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
+static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
 {
+	unsigned long flags;
 	struct priority_group *pg;
+	struct pgpath *pgpath;
 	bool bypassed = true;
 
-	if (!m->nr_valid_paths) {
-		m->queue_io = false;
+	if (!atomic_read(&m->nr_valid_paths)) {
+		clear_bit(MPATHF_QUEUE_IO, &m->flags);
 		goto failed;
 	}
 
 	/* Were we instructed to switch PG? */
-	if (m->next_pg) {
+	if (lockless_dereference(m->next_pg)) {
+		spin_lock_irqsave(&m->lock, flags);
 		pg = m->next_pg;
+		if (!pg) {
+			spin_unlock_irqrestore(&m->lock, flags);
+			goto check_current_pg;
+		}
 		m->next_pg = NULL;
-		if (!__choose_path_in_pg(m, pg, nr_bytes))
-			return;
+		spin_unlock_irqrestore(&m->lock, flags);
+		pgpath = choose_path_in_pg(m, pg, nr_bytes);
+		if (!IS_ERR_OR_NULL(pgpath))
+			return pgpath;
 	}
 
 	/* Don't change PG until it has no remaining paths */
-	if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes))
-		return;
+check_current_pg:
+	pg = lockless_dereference(m->current_pg);
+	if (pg) {
+		pgpath = choose_path_in_pg(m, pg, nr_bytes);
+		if (!IS_ERR_OR_NULL(pgpath))
+			return pgpath;
+	}
 
 	/*
 	 * Loop through priority groups until we find a valid path.
@@ -363,34 +404,38 @@
 		list_for_each_entry(pg, &m->priority_groups, list) {
 			if (pg->bypassed == bypassed)
 				continue;
-			if (!__choose_path_in_pg(m, pg, nr_bytes)) {
+			pgpath = choose_path_in_pg(m, pg, nr_bytes);
+			if (!IS_ERR_OR_NULL(pgpath)) {
 				if (!bypassed)
-					m->pg_init_delay_retry = true;
-				return;
+					set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
+				return pgpath;
 			}
 		}
 	} while (bypassed--);
 
 failed:
+	spin_lock_irqsave(&m->lock, flags);
 	m->current_pgpath = NULL;
 	m->current_pg = NULL;
+	spin_unlock_irqrestore(&m->lock, flags);
+
+	return NULL;
 }
 
 /*
  * Check whether bios must be queued in the device-mapper core rather
  * than here in the target.
  *
- * m->lock must be held on entry.
- *
  * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
  * same value then we are not between multipath_presuspend()
  * and multipath_resume() calls and we have no need to check
  * for the DMF_NOFLUSH_SUSPENDING flag.
  */
-static int __must_push_back(struct multipath *m)
+static int must_push_back(struct multipath *m)
 {
-	return (m->queue_if_no_path ||
-		(m->queue_if_no_path != m->saved_queue_if_no_path &&
+	return (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) ||
+		((test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) !=
+		  test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) &&
 		 dm_noflush_suspending(m->ti)));
 }
 
@@ -408,35 +453,31 @@
 	struct block_device *bdev;
 	struct dm_mpath_io *mpio;
 
-	spin_lock_irq(&m->lock);
-
 	/* Do we need to select a new pgpath? */
-	if (!m->current_pgpath || !m->queue_io)
-		__choose_pgpath(m, nr_bytes);
-
-	pgpath = m->current_pgpath;
+	pgpath = lockless_dereference(m->current_pgpath);
+	if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
+		pgpath = choose_pgpath(m, nr_bytes);
 
 	if (!pgpath) {
-		if (!__must_push_back(m))
+		if (!must_push_back(m))
 			r = -EIO;	/* Failed */
-		goto out_unlock;
-	} else if (m->queue_io || m->pg_init_required) {
-		__pg_init_all_paths(m);
-		goto out_unlock;
+		return r;
+	} else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
+		   test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
+		pg_init_all_paths(m);
+		return r;
 	}
 
 	mpio = set_mpio(m, map_context);
 	if (!mpio)
 		/* ENOMEM, requeue */
-		goto out_unlock;
+		return r;
 
 	mpio->pgpath = pgpath;
 	mpio->nr_bytes = nr_bytes;
 
 	bdev = pgpath->path.dev->bdev;
 
-	spin_unlock_irq(&m->lock);
-
 	if (clone) {
 		/*
 		 * Old request-based interface: allocated clone is passed in.
@@ -468,11 +509,6 @@
 					      &pgpath->path,
 					      nr_bytes);
 	return DM_MAPIO_REMAPPED;
-
-out_unlock:
-	spin_unlock_irq(&m->lock);
-
-	return r;
 }
 
 static int multipath_map(struct dm_target *ti, struct request *clone,
@@ -503,11 +539,22 @@
 
 	spin_lock_irqsave(&m->lock, flags);
 
-	if (save_old_value)
-		m->saved_queue_if_no_path = m->queue_if_no_path;
+	if (save_old_value) {
+		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
+			set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
+		else
+			clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
+	} else {
+		if (queue_if_no_path)
+			set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
+		else
+			clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
+	}
+	if (queue_if_no_path)
+		set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
 	else
-		m->saved_queue_if_no_path = queue_if_no_path;
-	m->queue_if_no_path = queue_if_no_path;
+		clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
+
 	spin_unlock_irqrestore(&m->lock, flags);
 
 	if (!queue_if_no_path)
@@ -600,10 +647,10 @@
 		goto bad;
 	}
 
-	if (m->retain_attached_hw_handler || m->hw_handler_name)
+	if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) || m->hw_handler_name)
 		q = bdev_get_queue(p->path.dev->bdev);
 
-	if (m->retain_attached_hw_handler) {
+	if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
 retain:
 		attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
 		if (attached_handler_name) {
@@ -808,7 +855,7 @@
 		}
 
 		if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
-			m->retain_attached_hw_handler = true;
+			set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
 			continue;
 		}
 
@@ -884,6 +931,7 @@
 	/* parse the priority groups */
 	while (as.argc) {
 		struct priority_group *pg;
+		unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
 
 		pg = parse_priority_group(&as, m);
 		if (IS_ERR(pg)) {
@@ -891,7 +939,9 @@
 			goto bad;
 		}
 
-		m->nr_valid_paths += pg->nr_pgpaths;
+		nr_valid_paths += pg->nr_pgpaths;
+		atomic_set(&m->nr_valid_paths, nr_valid_paths);
+
 		list_add_tail(&pg->list, &m->priority_groups);
 		pg_count++;
 		pg->pg_num = pg_count;
@@ -921,19 +971,14 @@
 static void multipath_wait_for_pg_init_completion(struct multipath *m)
 {
 	DECLARE_WAITQUEUE(wait, current);
-	unsigned long flags;
 
 	add_wait_queue(&m->pg_init_wait, &wait);
 
 	while (1) {
 		set_current_state(TASK_UNINTERRUPTIBLE);
 
-		spin_lock_irqsave(&m->lock, flags);
-		if (!m->pg_init_in_progress) {
-			spin_unlock_irqrestore(&m->lock, flags);
+		if (!atomic_read(&m->pg_init_in_progress))
 			break;
-		}
-		spin_unlock_irqrestore(&m->lock, flags);
 
 		io_schedule();
 	}
@@ -944,20 +989,16 @@
 
 static void flush_multipath_work(struct multipath *m)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&m->lock, flags);
-	m->pg_init_disabled = true;
-	spin_unlock_irqrestore(&m->lock, flags);
+	set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
+	smp_mb__after_atomic();
 
 	flush_workqueue(kmpath_handlerd);
 	multipath_wait_for_pg_init_completion(m);
 	flush_workqueue(kmultipathd);
 	flush_work(&m->trigger_event);
 
-	spin_lock_irqsave(&m->lock, flags);
-	m->pg_init_disabled = false;
-	spin_unlock_irqrestore(&m->lock, flags);
+	clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
+	smp_mb__after_atomic();
 }
 
 static void multipath_dtr(struct dm_target *ti)
@@ -987,13 +1028,13 @@
 	pgpath->is_active = false;
 	pgpath->fail_count++;
 
-	m->nr_valid_paths--;
+	atomic_dec(&m->nr_valid_paths);
 
 	if (pgpath == m->current_pgpath)
 		m->current_pgpath = NULL;
 
 	dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
-		      pgpath->path.dev->name, m->nr_valid_paths);
+		       pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
 
 	schedule_work(&m->trigger_event);
 
@@ -1011,6 +1052,7 @@
 	int r = 0, run_queue = 0;
 	unsigned long flags;
 	struct multipath *m = pgpath->pg->m;
+	unsigned nr_valid_paths;
 
 	spin_lock_irqsave(&m->lock, flags);
 
@@ -1025,16 +1067,17 @@
 
 	pgpath->is_active = true;
 
-	if (!m->nr_valid_paths++) {
+	nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
+	if (nr_valid_paths == 1) {
 		m->current_pgpath = NULL;
 		run_queue = 1;
 	} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
 		if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
-			m->pg_init_in_progress++;
+			atomic_inc(&m->pg_init_in_progress);
 	}
 
 	dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
-		      pgpath->path.dev->name, m->nr_valid_paths);
+		       pgpath->path.dev->name, nr_valid_paths);
 
 	schedule_work(&m->trigger_event);
 
@@ -1152,8 +1195,9 @@
 
 	spin_lock_irqsave(&m->lock, flags);
 
-	if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
-		m->pg_init_required = true;
+	if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
+	    !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
+		set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 	else
 		limit_reached = true;
 
@@ -1219,19 +1263,23 @@
 			m->current_pgpath = NULL;
 			m->current_pg = NULL;
 		}
-	} else if (!m->pg_init_required)
+	} else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
 		pg->bypassed = false;
 
-	if (--m->pg_init_in_progress)
+	if (atomic_dec_return(&m->pg_init_in_progress) > 0)
 		/* Activations of other paths are still on going */
 		goto out;
 
-	if (m->pg_init_required) {
-		m->pg_init_delay_retry = delay_retry;
+	if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
+		if (delay_retry)
+			set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
+		else
+			clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
+
 		if (__pg_init_all_paths(m))
 			goto out;
 	}
-	m->queue_io = false;
+	clear_bit(MPATHF_QUEUE_IO, &m->flags);
 
 	/*
 	 * Wake up any thread waiting to suspend.
@@ -1287,7 +1335,6 @@
 	 * clone bios for it and resubmit it later.
 	 */
 	int r = DM_ENDIO_REQUEUE;
-	unsigned long flags;
 
 	if (!error && !clone->errors)
 		return 0;	/* I/O complete */
@@ -1298,17 +1345,15 @@
 	if (mpio->pgpath)
 		fail_path(mpio->pgpath);
 
-	spin_lock_irqsave(&m->lock, flags);
-	if (!m->nr_valid_paths) {
-		if (!m->queue_if_no_path) {
-			if (!__must_push_back(m))
+	if (!atomic_read(&m->nr_valid_paths)) {
+		if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
+			if (!must_push_back(m))
 				r = -EIO;
 		} else {
 			if (error == -EBADE)
 				r = error;
 		}
 	}
-	spin_unlock_irqrestore(&m->lock, flags);
 
 	return r;
 }
@@ -1364,11 +1409,12 @@
 static void multipath_resume(struct dm_target *ti)
 {
 	struct multipath *m = ti->private;
-	unsigned long flags;
 
-	spin_lock_irqsave(&m->lock, flags);
-	m->queue_if_no_path = m->saved_queue_if_no_path;
-	spin_unlock_irqrestore(&m->lock, flags);
+	if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags))
+		set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
+	else
+		clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
+	smp_mb__after_atomic();
 }
 
 /*
@@ -1402,19 +1448,20 @@
 
 	/* Features */
 	if (type == STATUSTYPE_INFO)
-		DMEMIT("2 %u %u ", m->queue_io, m->pg_init_count);
+		DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
+		       atomic_read(&m->pg_init_count));
 	else {
-		DMEMIT("%u ", m->queue_if_no_path +
+		DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
 			      (m->pg_init_retries > 0) * 2 +
 			      (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
-			      m->retain_attached_hw_handler);
-		if (m->queue_if_no_path)
+			      test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags));
+		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
 			DMEMIT("queue_if_no_path ");
 		if (m->pg_init_retries)
 			DMEMIT("pg_init_retries %u ", m->pg_init_retries);
 		if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
 			DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
-		if (m->retain_attached_hw_handler)
+		if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
 			DMEMIT("retain_attached_hw_handler ");
 	}
 
@@ -1563,18 +1610,17 @@
 		struct block_device **bdev, fmode_t *mode)
 {
 	struct multipath *m = ti->private;
-	unsigned long flags;
+	struct pgpath *current_pgpath;
 	int r;
 
-	spin_lock_irqsave(&m->lock, flags);
+	current_pgpath = lockless_dereference(m->current_pgpath);
+	if (!current_pgpath)
+		current_pgpath = choose_pgpath(m, 0);
 
-	if (!m->current_pgpath)
-		__choose_pgpath(m, 0);
-
-	if (m->current_pgpath) {
-		if (!m->queue_io) {
-			*bdev = m->current_pgpath->path.dev->bdev;
-			*mode = m->current_pgpath->path.dev->mode;
+	if (current_pgpath) {
+		if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) {
+			*bdev = current_pgpath->path.dev->bdev;
+			*mode = current_pgpath->path.dev->mode;
 			r = 0;
 		} else {
 			/* pg_init has not started or completed */
@@ -1582,23 +1628,19 @@
 		}
 	} else {
 		/* No path is available */
-		if (m->queue_if_no_path)
+		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
 			r = -ENOTCONN;
 		else
 			r = -EIO;
 	}
 
-	spin_unlock_irqrestore(&m->lock, flags);
-
 	if (r == -ENOTCONN) {
-		spin_lock_irqsave(&m->lock, flags);
-		if (!m->current_pg) {
+		if (!lockless_dereference(m->current_pg)) {
 			/* Path status changed, redo selection */
-			__choose_pgpath(m, 0);
+			(void) choose_pgpath(m, 0);
 		}
-		if (m->pg_init_required)
-			__pg_init_all_paths(m);
-		spin_unlock_irqrestore(&m->lock, flags);
+		if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
+			pg_init_all_paths(m);
 		dm_table_run_md_queue_async(m->ti->table);
 	}
 
@@ -1649,39 +1691,37 @@
 {
 	bool busy = false, has_active = false;
 	struct multipath *m = ti->private;
-	struct priority_group *pg;
+	struct priority_group *pg, *next_pg;
 	struct pgpath *pgpath;
-	unsigned long flags;
-
-	spin_lock_irqsave(&m->lock, flags);
 
 	/* pg_init in progress or no paths available */
-	if (m->pg_init_in_progress ||
-	    (!m->nr_valid_paths && m->queue_if_no_path)) {
-		busy = true;
-		goto out;
-	}
+	if (atomic_read(&m->pg_init_in_progress) ||
+	    (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)))
+		return true;
+
 	/* Guess which priority_group will be used at next mapping time */
-	if (unlikely(!m->current_pgpath && m->next_pg))
-		pg = m->next_pg;
-	else if (likely(m->current_pg))
-		pg = m->current_pg;
-	else
+	pg = lockless_dereference(m->current_pg);
+	next_pg = lockless_dereference(m->next_pg);
+	if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg))
+		pg = next_pg;
+
+	if (!pg) {
 		/*
 		 * We don't know which pg will be used at next mapping time.
-		 * We don't call __choose_pgpath() here to avoid to trigger
+		 * We don't call choose_pgpath() here to avoid to trigger
 		 * pg_init just by busy checking.
 		 * So we don't know whether underlying devices we will be using
 		 * at next mapping time are busy or not. Just try mapping.
 		 */
-		goto out;
+		return busy;
+	}
 
 	/*
 	 * If there is one non-busy active path at least, the path selector
 	 * will be able to select it. So we consider such a pg as not busy.
 	 */
 	busy = true;
-	list_for_each_entry(pgpath, &pg->pgpaths, list)
+	list_for_each_entry(pgpath, &pg->pgpaths, list) {
 		if (pgpath->is_active) {
 			has_active = true;
 			if (!pgpath_busy(pgpath)) {
@@ -1689,17 +1729,16 @@
 				break;
 			}
 		}
+	}
 
-	if (!has_active)
+	if (!has_active) {
 		/*
 		 * No active path in this pg, so this pg won't be used and
 		 * the current_pg will be changed at next mapping time.
 		 * We need to try mapping to determine it.
 		 */
 		busy = false;
-
-out:
-	spin_unlock_irqrestore(&m->lock, flags);
+	}
 
 	return busy;
 }
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index a090121..5253274 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1037,6 +1037,11 @@
 	if (!mddev->events && super_init_validation(mddev, rdev))
 		return -EINVAL;
 
+	if (le32_to_cpu(sb->features)) {
+		rs->ti->error = "Unable to assemble array: No feature flags supported yet";
+		return -EINVAL;
+	}
+
 	/* Enable bitmap creation for RAID levels != 0 */
 	mddev->bitmap_info.offset = (rs->raid_type->level) ? to_sector(4096) : 0;
 	rdev->mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
@@ -1718,7 +1723,7 @@
 
 static struct target_type raid_target = {
 	.name = "raid",
-	.version = {1, 7, 0},
+	.version = {1, 8, 0},
 	.module = THIS_MODULE,
 	.ctr = raid_ctr,
 	.dtr = raid_dtr,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index f9e8f0b..626a5ec 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1348,13 +1348,13 @@
 static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
 				sector_t start, sector_t len, void *data)
 {
-	unsigned flush = (*(unsigned *)data);
+	unsigned long flush = (unsigned long) data;
 	struct request_queue *q = bdev_get_queue(dev->bdev);
 
-	return q && (q->flush_flags & flush);
+	return q && (q->queue_flags & flush);
 }
 
-static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
+static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
 {
 	struct dm_target *ti;
 	unsigned i = 0;
@@ -1375,7 +1375,7 @@
 			return true;
 
 		if (ti->type->iterate_devices &&
-		    ti->type->iterate_devices(ti, device_flush_capable, &flush))
+		    ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
 			return true;
 	}
 
@@ -1506,7 +1506,7 @@
 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
 			       struct queue_limits *limits)
 {
-	unsigned flush = 0;
+	bool wc = false, fua = false;
 
 	/*
 	 * Copy table's limits to the DM device's request_queue
@@ -1518,12 +1518,12 @@
 	else
 		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
 
-	if (dm_table_supports_flush(t, REQ_FLUSH)) {
-		flush |= REQ_FLUSH;
-		if (dm_table_supports_flush(t, REQ_FUA))
-			flush |= REQ_FUA;
+	if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
+		wc = true;
+		if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
+			fua = true;
 	}
-	blk_queue_flush(q, flush);
+	blk_queue_write_cache(q, wc, fua);
 
 	if (!dm_table_discard_zeroes_data(t))
 		q->limits.discard_zeroes_data = 0;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 92237b6..fc803d5 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -322,56 +322,6 @@
 
 /*----------------------------------------------------------------*/
 
-/**
- * __blkdev_issue_discard_async - queue a discard with async completion
- * @bdev:	blockdev to issue discard for
- * @sector:	start sector
- * @nr_sects:	number of sectors to discard
- * @gfp_mask:	memory allocation flags (for bio_alloc)
- * @flags:	BLKDEV_IFL_* flags to control behaviour
- * @parent_bio: parent discard bio that all sub discards get chained to
- *
- * Description:
- *    Asynchronously issue a discard request for the sectors in question.
- */
-static int __blkdev_issue_discard_async(struct block_device *bdev, sector_t sector,
-					sector_t nr_sects, gfp_t gfp_mask, unsigned long flags,
-					struct bio *parent_bio)
-{
-	struct request_queue *q = bdev_get_queue(bdev);
-	int type = REQ_WRITE | REQ_DISCARD;
-	struct bio *bio;
-
-	if (!q || !nr_sects)
-		return -ENXIO;
-
-	if (!blk_queue_discard(q))
-		return -EOPNOTSUPP;
-
-	if (flags & BLKDEV_DISCARD_SECURE) {
-		if (!blk_queue_secdiscard(q))
-			return -EOPNOTSUPP;
-		type |= REQ_SECURE;
-	}
-
-	/*
-	 * Required bio_put occurs in bio_endio thanks to bio_chain below
-	 */
-	bio = bio_alloc(gfp_mask, 1);
-	if (!bio)
-		return -ENOMEM;
-
-	bio_chain(bio, parent_bio);
-
-	bio->bi_iter.bi_sector = sector;
-	bio->bi_bdev = bdev;
-	bio->bi_iter.bi_size = nr_sects << 9;
-
-	submit_bio(type, bio);
-
-	return 0;
-}
-
 static bool block_size_is_power_of_two(struct pool *pool)
 {
 	return pool->sectors_per_block_shift >= 0;
@@ -384,14 +334,55 @@
 		(b * pool->sectors_per_block);
 }
 
-static int issue_discard(struct thin_c *tc, dm_block_t data_b, dm_block_t data_e,
-			 struct bio *parent_bio)
+/*----------------------------------------------------------------*/
+
+struct discard_op {
+	struct thin_c *tc;
+	struct blk_plug plug;
+	struct bio *parent_bio;
+	struct bio *bio;
+};
+
+static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio *parent)
 {
+	BUG_ON(!parent);
+
+	op->tc = tc;
+	blk_start_plug(&op->plug);
+	op->parent_bio = parent;
+	op->bio = NULL;
+}
+
+static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t data_e)
+{
+	struct thin_c *tc = op->tc;
 	sector_t s = block_to_sectors(tc->pool, data_b);
 	sector_t len = block_to_sectors(tc->pool, data_e - data_b);
 
-	return __blkdev_issue_discard_async(tc->pool_dev->bdev, s, len,
-					    GFP_NOWAIT, 0, parent_bio);
+	return __blkdev_issue_discard(tc->pool_dev->bdev, s, len,
+				      GFP_NOWAIT, REQ_WRITE | REQ_DISCARD, &op->bio);
+}
+
+static void end_discard(struct discard_op *op, int r)
+{
+	if (op->bio) {
+		/*
+		 * Even if one of the calls to issue_discard failed, we
+		 * need to wait for the chain to complete.
+		 */
+		bio_chain(op->bio, op->parent_bio);
+		submit_bio(REQ_WRITE | REQ_DISCARD, op->bio);
+	}
+
+	blk_finish_plug(&op->plug);
+
+	/*
+	 * Even if r is set, there could be sub discards in flight that we
+	 * need to wait for.
+	 */
+	if (r && !op->parent_bio->bi_error)
+		op->parent_bio->bi_error = r;
+	bio_endio(op->parent_bio);
 }
 
 /*----------------------------------------------------------------*/
@@ -632,7 +623,7 @@
 {
 	int error = get_pool_io_error_code(pool);
 
-	return error_retry_list_with_code(pool, error);
+	error_retry_list_with_code(pool, error);
 }
 
 /*
@@ -1006,24 +997,28 @@
 	mempool_free(m, tc->pool->mapping_pool);
 }
 
-static int passdown_double_checking_shared_status(struct dm_thin_new_mapping *m)
+/*----------------------------------------------------------------*/
+
+static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m)
 {
 	/*
 	 * We've already unmapped this range of blocks, but before we
 	 * passdown we have to check that these blocks are now unused.
 	 */
-	int r;
+	int r = 0;
 	bool used = true;
 	struct thin_c *tc = m->tc;
 	struct pool *pool = tc->pool;
 	dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
+	struct discard_op op;
 
+	begin_discard(&op, tc, m->bio);
 	while (b != end) {
 		/* find start of unmapped run */
 		for (; b < end; b++) {
 			r = dm_pool_block_is_used(pool->pmd, b, &used);
 			if (r)
-				return r;
+				goto out;
 
 			if (!used)
 				break;
@@ -1036,20 +1031,20 @@
 		for (e = b + 1; e != end; e++) {
 			r = dm_pool_block_is_used(pool->pmd, e, &used);
 			if (r)
-				return r;
+				goto out;
 
 			if (used)
 				break;
 		}
 
-		r = issue_discard(tc, b, e, m->bio);
+		r = issue_discard(&op, b, e);
 		if (r)
-			return r;
+			goto out;
 
 		b = e;
 	}
-
-	return 0;
+out:
+	end_discard(&op, r);
 }
 
 static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
@@ -1059,20 +1054,21 @@
 	struct pool *pool = tc->pool;
 
 	r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end);
-	if (r)
+	if (r) {
 		metadata_operation_failed(pool, "dm_thin_remove_range", r);
+		bio_io_error(m->bio);
 
-	else if (m->maybe_shared)
-		r = passdown_double_checking_shared_status(m);
-	else
-		r = issue_discard(tc, m->data_block, m->data_block + (m->virt_end - m->virt_begin), m->bio);
+	} else if (m->maybe_shared) {
+		passdown_double_checking_shared_status(m);
 
-	/*
-	 * Even if r is set, there could be sub discards in flight that we
-	 * need to wait for.
-	 */
-	m->bio->bi_error = r;
-	bio_endio(m->bio);
+	} else {
+		struct discard_op op;
+		begin_discard(&op, tc, m->bio);
+		r = issue_discard(&op, m->data_block,
+				  m->data_block + (m->virt_end - m->virt_begin));
+		end_discard(&op, r);
+	}
+
 	cell_defer_no_holder(tc, m->cell);
 	mempool_free(m, pool->mapping_pool);
 }
@@ -1494,17 +1490,6 @@
 		pool->process_prepared_discard(m);
 }
 
-/*
- * __bio_inc_remaining() is used to defer parent bios's end_io until
- * we _know_ all chained sub range discard bios have completed.
- */
-static inline void __bio_inc_remaining(struct bio *bio)
-{
-	bio->bi_flags |= (1 << BIO_CHAIN);
-	smp_mb__before_atomic();
-	atomic_inc(&bio->__bi_remaining);
-}
-
 static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t end,
 				 struct bio *bio)
 {
@@ -1554,13 +1539,13 @@
 
 		/*
 		 * The parent bio must not complete before sub discard bios are
-		 * chained to it (see __blkdev_issue_discard_async's bio_chain)!
+		 * chained to it (see end_discard's bio_chain)!
 		 *
 		 * This per-mapping bi_remaining increment is paired with
 		 * the implicit decrement that occurs via bio_endio() in
-		 * process_prepared_discard_{passdown,no_passdown}.
+		 * end_discard().
 		 */
-		__bio_inc_remaining(bio);
+		bio_inc_remaining(bio);
 		if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
 			pool->process_prepared_discard(m);
 
@@ -3899,7 +3884,7 @@
 	.name = "thin-pool",
 	.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
 		    DM_TARGET_IMMUTABLE,
-	.version = {1, 18, 0},
+	.version = {1, 19, 0},
 	.module = THIS_MODULE,
 	.ctr = pool_ctr,
 	.dtr = pool_dtr,
@@ -4273,7 +4258,7 @@
 
 static struct target_type thin_target = {
 	.name = "thin",
-	.version = {1, 18, 0},
+	.version = {1, 19, 0},
 	.module	= THIS_MODULE,
 	.ctr = thin_ctr,
 	.dtr = thin_dtr,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index be49057..1b2f962 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -674,7 +674,7 @@
 	mempool_free(io, md->io_pool);
 }
 
-static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
+static void free_tio(struct dm_target_io *tio)
 {
 	bio_put(&tio->clone);
 }
@@ -1055,7 +1055,7 @@
 		     !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
 		disable_write_same(md);
 
-	free_tio(md, tio);
+	free_tio(tio);
 	dec_pending(io, error);
 }
 
@@ -1517,7 +1517,6 @@
 {
 	int r;
 	sector_t sector;
-	struct mapped_device *md;
 	struct bio *clone = &tio->clone;
 	struct dm_target *ti = tio->ti;
 
@@ -1540,9 +1539,8 @@
 		generic_make_request(clone);
 	} else if (r < 0 || r == DM_MAPIO_REQUEUE) {
 		/* error the io and bail out, or requeue it if needed */
-		md = tio->io->md;
 		dec_pending(tio->io, r);
-		free_tio(md, tio);
+		free_tio(tio);
 	} else if (r != DM_MAPIO_SUBMITTED) {
 		DMWARN("unimplemented target map return value: %d", r);
 		BUG();
@@ -1662,8 +1660,10 @@
 		tio = alloc_tio(ci, ti, target_bio_nr);
 		tio->len_ptr = len;
 		r = clone_bio(tio, bio, sector, *len);
-		if (r < 0)
+		if (r < 0) {
+			free_tio(tio);
 			break;
+		}
 		__map_bio(tio);
 	}
 
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index dd97d42..41573f1 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -61,6 +61,10 @@
  * the lock.
  */
 #define		MD_CLUSTER_SEND_LOCKED_ALREADY		5
+/* We should receive message after node joined cluster and
+ * set up all the related infos such as bitmap and personality */
+#define		MD_CLUSTER_ALREADY_IN_CLUSTER		6
+#define		MD_CLUSTER_PENDING_RECV_EVENT		7
 
 
 struct md_cluster_info {
@@ -85,6 +89,9 @@
 	struct completion newdisk_completion;
 	wait_queue_head_t wait;
 	unsigned long state;
+	/* record the region in RESYNCING message */
+	sector_t sync_low;
+	sector_t sync_hi;
 };
 
 enum msg_type {
@@ -284,11 +291,14 @@
 			goto dlm_unlock;
 		}
 		if (hi > 0) {
-			/* TODO:Wait for current resync to get over */
-			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
 			if (lo < mddev->recovery_cp)
 				mddev->recovery_cp = lo;
-			md_check_recovery(mddev);
+			/* wake up thread to continue resync in case resync
+			 * is not finished */
+			if (mddev->recovery_cp != MaxSector) {
+			    set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+			    md_wakeup_thread(mddev->thread);
+			}
 		}
 dlm_unlock:
 		dlm_unlock_sync(bm_lockres);
@@ -370,8 +380,12 @@
 	struct dlm_lock_resource *res = arg;
 	struct md_cluster_info *cinfo = res->mddev->cluster_info;
 
-	if (mode == DLM_LOCK_EX)
-		md_wakeup_thread(cinfo->recv_thread);
+	if (mode == DLM_LOCK_EX) {
+		if (test_bit(MD_CLUSTER_ALREADY_IN_CLUSTER, &cinfo->state))
+			md_wakeup_thread(cinfo->recv_thread);
+		else
+			set_bit(MD_CLUSTER_PENDING_RECV_EVENT, &cinfo->state);
+	}
 }
 
 static void __remove_suspend_info(struct md_cluster_info *cinfo, int slot)
@@ -408,6 +422,30 @@
 		md_wakeup_thread(mddev->thread);
 		return;
 	}
+
+	/*
+	 * The bitmaps are not same for different nodes
+	 * if RESYNCING is happening in one node, then
+	 * the node which received the RESYNCING message
+	 * probably will perform resync with the region
+	 * [lo, hi] again, so we could reduce resync time
+	 * a lot if we can ensure that the bitmaps among
+	 * different nodes are match up well.
+	 *
+	 * sync_low/hi is used to record the region which
+	 * arrived in the previous RESYNCING message,
+	 *
+	 * Call bitmap_sync_with_cluster to clear
+	 * NEEDED_MASK and set RESYNC_MASK since
+	 * resync thread is running in another node,
+	 * so we don't need to do the resync again
+	 * with the same section */
+	bitmap_sync_with_cluster(mddev, cinfo->sync_low,
+					cinfo->sync_hi,
+					lo, hi);
+	cinfo->sync_low = lo;
+	cinfo->sync_hi = hi;
+
 	s = kzalloc(sizeof(struct suspend_info), GFP_KERNEL);
 	if (!s)
 		return;
@@ -482,11 +520,13 @@
 			__func__, __LINE__, le32_to_cpu(msg->raid_slot));
 }
 
-static void process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
+static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
 {
+	int ret = 0;
+
 	if (WARN(mddev->cluster_info->slot_number - 1 == le32_to_cpu(msg->slot),
 		"node %d received it's own msg\n", le32_to_cpu(msg->slot)))
-		return;
+		return -1;
 	switch (le32_to_cpu(msg->type)) {
 	case METADATA_UPDATED:
 		process_metadata_update(mddev, msg);
@@ -509,9 +549,11 @@
 		__recover_slot(mddev, le32_to_cpu(msg->slot));
 		break;
 	default:
+		ret = -1;
 		pr_warn("%s:%d Received unknown message from %d\n",
 			__func__, __LINE__, msg->slot);
 	}
+	return ret;
 }
 
 /*
@@ -535,7 +577,9 @@
 
 	/* read lvb and wake up thread to process this message_lockres */
 	memcpy(&msg, message_lockres->lksb.sb_lvbptr, sizeof(struct cluster_msg));
-	process_recvd_msg(thread->mddev, &msg);
+	ret = process_recvd_msg(thread->mddev, &msg);
+	if (ret)
+		goto out;
 
 	/*release CR on ack_lockres*/
 	ret = dlm_unlock_sync(ack_lockres);
@@ -549,6 +593,7 @@
 	ret = dlm_lock_sync(ack_lockres, DLM_LOCK_CR);
 	if (unlikely(ret != 0))
 		pr_info("lock CR on ack failed return %d\n", ret);
+out:
 	/*release CR on message_lockres*/
 	ret = dlm_unlock_sync(message_lockres);
 	if (unlikely(ret != 0))
@@ -778,17 +823,24 @@
 	cinfo->token_lockres = lockres_init(mddev, "token", NULL, 0);
 	if (!cinfo->token_lockres)
 		goto err;
-	cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0);
-	if (!cinfo->ack_lockres)
-		goto err;
 	cinfo->no_new_dev_lockres = lockres_init(mddev, "no-new-dev", NULL, 0);
 	if (!cinfo->no_new_dev_lockres)
 		goto err;
 
+	ret = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
+	if (ret) {
+		ret = -EAGAIN;
+		pr_err("md-cluster: can't join cluster to avoid lock issue\n");
+		goto err;
+	}
+	cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0);
+	if (!cinfo->ack_lockres)
+		goto err;
 	/* get sync CR lock on ACK. */
 	if (dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR))
 		pr_err("md-cluster: failed to get a sync CR lock on ACK!(%d)\n",
 				ret);
+	dlm_unlock_sync(cinfo->token_lockres);
 	/* get sync CR lock on no-new-dev. */
 	if (dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR))
 		pr_err("md-cluster: failed to get a sync CR lock on no-new-dev!(%d)\n", ret);
@@ -809,12 +861,10 @@
 	if (!cinfo->resync_lockres)
 		goto err;
 
-	ret = gather_all_resync_info(mddev, nodes);
-	if (ret)
-		goto err;
-
 	return 0;
 err:
+	md_unregister_thread(&cinfo->recovery_thread);
+	md_unregister_thread(&cinfo->recv_thread);
 	lockres_free(cinfo->message_lockres);
 	lockres_free(cinfo->token_lockres);
 	lockres_free(cinfo->ack_lockres);
@@ -828,6 +878,19 @@
 	return ret;
 }
 
+static void load_bitmaps(struct mddev *mddev, int total_slots)
+{
+	struct md_cluster_info *cinfo = mddev->cluster_info;
+
+	/* load all the node's bitmap info for resync */
+	if (gather_all_resync_info(mddev, total_slots))
+		pr_err("md-cluster: failed to gather all resyn infos\n");
+	set_bit(MD_CLUSTER_ALREADY_IN_CLUSTER, &cinfo->state);
+	/* wake up recv thread in case something need to be handled */
+	if (test_and_clear_bit(MD_CLUSTER_PENDING_RECV_EVENT, &cinfo->state))
+		md_wakeup_thread(cinfo->recv_thread);
+}
+
 static void resync_bitmap(struct mddev *mddev)
 {
 	struct md_cluster_info *cinfo = mddev->cluster_info;
@@ -937,7 +1000,6 @@
 static int resync_start(struct mddev *mddev)
 {
 	struct md_cluster_info *cinfo = mddev->cluster_info;
-	cinfo->resync_lockres->flags |= DLM_LKF_NOQUEUE;
 	return dlm_lock_sync(cinfo->resync_lockres, DLM_LOCK_EX);
 }
 
@@ -967,7 +1029,6 @@
 static int resync_finish(struct mddev *mddev)
 {
 	struct md_cluster_info *cinfo = mddev->cluster_info;
-	cinfo->resync_lockres->flags &= ~DLM_LKF_NOQUEUE;
 	dlm_unlock_sync(cinfo->resync_lockres);
 	return resync_info_update(mddev, 0, 0);
 }
@@ -1171,6 +1232,7 @@
 	.add_new_disk_cancel = add_new_disk_cancel,
 	.new_disk_ack = new_disk_ack,
 	.remove_disk = remove_disk,
+	.load_bitmaps = load_bitmaps,
 	.gather_bitmaps = gather_bitmaps,
 	.lock_all_bitmaps = lock_all_bitmaps,
 	.unlock_all_bitmaps = unlock_all_bitmaps,
diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h
index 45ce6c9..e765499 100644
--- a/drivers/md/md-cluster.h
+++ b/drivers/md/md-cluster.h
@@ -23,6 +23,7 @@
 	void (*add_new_disk_cancel)(struct mddev *mddev);
 	int (*new_disk_ack)(struct mddev *mddev, bool ack);
 	int (*remove_disk)(struct mddev *mddev, struct md_rdev *rdev);
+	void (*load_bitmaps)(struct mddev *mddev, int total_slots);
 	int (*gather_bitmaps)(struct md_rdev *rdev);
 	int (*lock_all_bitmaps)(struct mddev *mddev);
 	void (*unlock_all_bitmaps)(struct mddev *mddev);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c068f17..866825f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -284,6 +284,8 @@
 	 * go away inside make_request
 	 */
 	sectors = bio_sectors(bio);
+	/* bio could be mergeable after passing to underlayer */
+	bio->bi_rw &= ~REQ_NOMERGE;
 	mddev->pers->make_request(mddev, bio);
 
 	cpu = part_stat_lock();
@@ -305,7 +307,7 @@
  */
 void mddev_suspend(struct mddev *mddev)
 {
-	WARN_ON_ONCE(current == mddev->thread->tsk);
+	WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
 	if (mddev->suspended++)
 		return;
 	synchronize_rcu();
@@ -718,6 +720,7 @@
 
 	if (atomic_dec_and_test(&mddev->pending_writes))
 		wake_up(&mddev->sb_wait);
+	rdev_dec_pending(rdev, mddev);
 	bio_put(bio);
 }
 
@@ -732,6 +735,8 @@
 	 */
 	struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
 
+	atomic_inc(&rdev->nr_pending);
+
 	bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
 	bio->bi_iter.bi_sector = sector;
 	bio_add_page(bio, page, size, 0);
@@ -2286,19 +2291,24 @@
 		return;
 	}
 
+repeat:
 	if (mddev_is_clustered(mddev)) {
 		if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
 			force_change = 1;
+		if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
+			nospares = 1;
 		ret = md_cluster_ops->metadata_update_start(mddev);
 		/* Has someone else has updated the sb */
 		if (!does_sb_need_changing(mddev)) {
 			if (ret == 0)
 				md_cluster_ops->metadata_update_cancel(mddev);
-			clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+			bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING),
+							 BIT(MD_CHANGE_DEVS) |
+							 BIT(MD_CHANGE_CLEAN));
 			return;
 		}
 	}
-repeat:
+
 	/* First make sure individual recovery_offsets are correct */
 	rdev_for_each(rdev, mddev) {
 		if (rdev->raid_disk >= 0 &&
@@ -2425,15 +2435,14 @@
 	md_super_wait(mddev);
 	/* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
 
-	spin_lock(&mddev->lock);
+	if (mddev_is_clustered(mddev) && ret == 0)
+		md_cluster_ops->metadata_update_finish(mddev);
+
 	if (mddev->in_sync != sync_req ||
-	    test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
+	    !bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING),
+			       BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_CLEAN)))
 		/* have to write it out again */
-		spin_unlock(&mddev->lock);
 		goto repeat;
-	}
-	clear_bit(MD_CHANGE_PENDING, &mddev->flags);
-	spin_unlock(&mddev->lock);
 	wake_up(&mddev->sb_wait);
 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
 		sysfs_notify(&mddev->kobj, NULL, "sync_completed");
@@ -2447,9 +2456,6 @@
 		clear_bit(BlockedBadBlocks, &rdev->flags);
 		wake_up(&rdev->blocked_wait);
 	}
-
-	if (mddev_is_clustered(mddev) && ret == 0)
-		md_cluster_ops->metadata_update_finish(mddev);
 }
 EXPORT_SYMBOL(md_update_sb);
 
@@ -4811,6 +4817,10 @@
 	if (err)
 		return err;
 
+	/* cluster raid doesn't support change array_sectors */
+	if (mddev_is_clustered(mddev))
+		return -EINVAL;
+
 	if (strncmp(buf, "default", 7) == 0) {
 		if (mddev->pers)
 			sectors = mddev->pers->size(mddev, 0, 0);
@@ -5034,7 +5044,7 @@
 	disk->fops = &md_fops;
 	disk->private_data = mddev;
 	disk->queue = mddev->queue;
-	blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
+	blk_queue_write_cache(mddev->queue, true, true);
 	/* Allow extended partitions.  This makes the
 	 * 'mdp' device redundant, but we can't really
 	 * remove it now.
@@ -6432,6 +6442,10 @@
 	int rv;
 	int fit = (num_sectors == 0);
 
+	/* cluster raid doesn't support update size */
+	if (mddev_is_clustered(mddev))
+		return -EINVAL;
+
 	if (mddev->pers->resize == NULL)
 		return -EINVAL;
 	/* The "num_sectors" is the number of sectors of each device that
@@ -6883,7 +6897,7 @@
 
 	case ADD_NEW_DISK:
 		/* We can support ADD_NEW_DISK on read-only arrays
-		 * on if we are re-adding a preexisting device.
+		 * only if we are re-adding a preexisting device.
 		 * So require mddev->pers and MD_DISK_SYNC.
 		 */
 		if (mddev->pers) {
@@ -7780,7 +7794,7 @@
 	struct md_rdev *rdev;
 	char *desc, *action = NULL;
 	struct blk_plug plug;
-	bool cluster_resync_finished = false;
+	int ret;
 
 	/* just incase thread restarts... */
 	if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
@@ -7790,6 +7804,19 @@
 		return;
 	}
 
+	if (mddev_is_clustered(mddev)) {
+		ret = md_cluster_ops->resync_start(mddev);
+		if (ret)
+			goto skip;
+
+		if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
+			test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
+			test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
+		     && ((unsigned long long)mddev->curr_resync_completed
+			 < (unsigned long long)mddev->resync_max_sectors))
+			goto skip;
+	}
+
 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
 		if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
 			desc = "data-check";
@@ -8084,11 +8111,6 @@
 		mddev->curr_resync_completed = mddev->curr_resync;
 		sysfs_notify(&mddev->kobj, NULL, "sync_completed");
 	}
-	/* tell personality and other nodes that we are finished */
-	if (mddev_is_clustered(mddev)) {
-		md_cluster_ops->resync_finish(mddev);
-		cluster_resync_finished = true;
-	}
 	mddev->pers->sync_request(mddev, max_sectors, &skipped);
 
 	if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
@@ -8125,12 +8147,18 @@
 		}
 	}
  skip:
-	set_bit(MD_CHANGE_DEVS, &mddev->flags);
-
 	if (mddev_is_clustered(mddev) &&
-	    test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
-	    !cluster_resync_finished)
+	    ret == 0) {
+		/* set CHANGE_PENDING here since maybe another
+		 * update is needed, so other nodes are informed */
+		set_mask_bits(&mddev->flags, 0,
+			      BIT(MD_CHANGE_PENDING) | BIT(MD_CHANGE_DEVS));
+		md_wakeup_thread(mddev->thread);
+		wait_event(mddev->sb_wait,
+			   !test_bit(MD_CHANGE_PENDING, &mddev->flags));
 		md_cluster_ops->resync_finish(mddev);
+	} else
+		set_bit(MD_CHANGE_DEVS, &mddev->flags);
 
 	spin_lock(&mddev->lock);
 	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
@@ -8221,18 +8249,9 @@
 	struct mddev *mddev = container_of(ws, struct mddev, del_work);
 	int ret = 0;
 
-	if (mddev_is_clustered(mddev)) {
-		ret = md_cluster_ops->resync_start(mddev);
-		if (ret) {
-			mddev->sync_thread = NULL;
-			goto out;
-		}
-	}
-
 	mddev->sync_thread = md_register_thread(md_do_sync,
 						mddev,
 						"resync");
-out:
 	if (!mddev->sync_thread) {
 		if (!(mddev_is_clustered(mddev) && ret == -EAGAIN))
 			printk(KERN_ERR "%s: could not start resync"
@@ -8531,6 +8550,7 @@
 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
 		       int is_new)
 {
+	struct mddev *mddev = rdev->mddev;
 	int rv;
 	if (is_new)
 		s += rdev->new_data_offset;
@@ -8540,8 +8560,8 @@
 	if (rv == 0) {
 		/* Make sure they get written out promptly */
 		sysfs_notify_dirent_safe(rdev->sysfs_state);
-		set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
-		set_bit(MD_CHANGE_PENDING, &rdev->mddev->flags);
+		set_mask_bits(&mddev->flags, 0,
+			      BIT(MD_CHANGE_CLEAN) | BIT(MD_CHANGE_PENDING));
 		md_wakeup_thread(rdev->mddev->thread);
 		return 1;
 	} else
@@ -8675,6 +8695,11 @@
 				ret = remove_and_add_spares(mddev, rdev2);
 				pr_info("Activated spare: %s\n",
 						bdevname(rdev2->bdev,b));
+				/* wakeup mddev->thread here, so array could
+				 * perform resync with the new activated disk */
+				set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+				md_wakeup_thread(mddev->thread);
+
 			}
 			/* device faulty
 			 * We just want to do the minimum to mark the disk
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 2ea12c6..34783a3 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -70,7 +70,6 @@
 			(unsigned long long)zone_size>>1);
 		zone_start = conf->strip_zone[j].zone_end;
 	}
-	printk(KERN_INFO "\n");
 }
 
 static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
@@ -85,6 +84,7 @@
 	struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
 	unsigned short blksize = 512;
 
+	*private_conf = ERR_PTR(-ENOMEM);
 	if (!conf)
 		return -ENOMEM;
 	rdev_for_each(rdev1, mddev) {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 39fb21e..c7c8cde 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -570,7 +570,7 @@
 			if (best_dist_disk < 0) {
 				if (is_badblock(rdev, this_sector, sectors,
 						&first_bad, &bad_sectors)) {
-					if (first_bad < this_sector)
+					if (first_bad <= this_sector)
 						/* Cannot use this */
 						continue;
 					best_good_sectors = first_bad - this_sector;
@@ -1474,8 +1474,8 @@
 	 * if recovery is running, make sure it aborts.
 	 */
 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-	set_bit(MD_CHANGE_DEVS, &mddev->flags);
-	set_bit(MD_CHANGE_PENDING, &mddev->flags);
+	set_mask_bits(&mddev->flags, 0,
+		      BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
 	printk(KERN_ALERT
 	       "md/raid1:%s: Disk failure on %s, disabling device.\n"
 	       "md/raid1:%s: Operation continuing on %d devices.\n",
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index e3fd725..c7de2a5 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1102,8 +1102,8 @@
 		bio->bi_iter.bi_sector < conf->reshape_progress))) {
 		/* Need to update reshape_position in metadata */
 		mddev->reshape_position = conf->reshape_progress;
-		set_bit(MD_CHANGE_DEVS, &mddev->flags);
-		set_bit(MD_CHANGE_PENDING, &mddev->flags);
+		set_mask_bits(&mddev->flags, 0,
+			      BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
 		md_wakeup_thread(mddev->thread);
 		wait_event(mddev->sb_wait,
 			   !test_bit(MD_CHANGE_PENDING, &mddev->flags));
@@ -1591,8 +1591,8 @@
 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
 	set_bit(Blocked, &rdev->flags);
 	set_bit(Faulty, &rdev->flags);
-	set_bit(MD_CHANGE_DEVS, &mddev->flags);
-	set_bit(MD_CHANGE_PENDING, &mddev->flags);
+	set_mask_bits(&mddev->flags, 0,
+		      BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
 	spin_unlock_irqrestore(&conf->device_lock, flags);
 	printk(KERN_ALERT
 	       "md/raid10:%s: Disk failure on %s, disabling device.\n"
@@ -3782,8 +3782,10 @@
 			return ret;
 	}
 	md_set_array_sectors(mddev, size);
-	set_capacity(mddev->gendisk, mddev->array_sectors);
-	revalidate_disk(mddev->gendisk);
+	if (mddev->queue) {
+		set_capacity(mddev->gendisk, mddev->array_sectors);
+		revalidate_disk(mddev->gendisk);
+	}
 	if (sectors > mddev->dev_sectors &&
 	    mddev->recovery_cp > oldsize) {
 		mddev->recovery_cp = oldsize;
@@ -4593,8 +4595,10 @@
 			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
 		}
 		mddev->resync_max_sectors = size;
-		set_capacity(mddev->gendisk, mddev->array_sectors);
-		revalidate_disk(mddev->gendisk);
+		if (mddev->queue) {
+			set_capacity(mddev->gendisk, mddev->array_sectors);
+			revalidate_disk(mddev->gendisk);
+		}
 	} else {
 		int d;
 		for (d = conf->geo.raid_disks ;
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 9531f5f..e889e2d 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -712,8 +712,8 @@
 	 * in_teardown check workaround this issue.
 	 */
 	if (!log->in_teardown) {
-		set_bit(MD_CHANGE_DEVS, &mddev->flags);
-		set_bit(MD_CHANGE_PENDING, &mddev->flags);
+		set_mask_bits(&mddev->flags, 0,
+			      BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
 		md_wakeup_thread(mddev->thread);
 		wait_event(mddev->sb_wait,
 			!test_bit(MD_CHANGE_PENDING, &mddev->flags) ||
@@ -1188,6 +1188,7 @@
 
 int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
 {
+	struct request_queue *q = bdev_get_queue(rdev->bdev);
 	struct r5l_log *log;
 
 	if (PAGE_SIZE != 4096)
@@ -1197,7 +1198,7 @@
 		return -ENOMEM;
 	log->rdev = rdev;
 
-	log->need_cache_flush = (rdev->bdev->bd_disk->queue->flush_flags != 0);
+	log->need_cache_flush = test_bit(QUEUE_FLAG_WC, &q->queue_flags) != 0;
 
 	log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
 				       sizeof(rdev->mddev->uuid));
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8ab8b65..8959e6d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2514,8 +2514,8 @@
 
 	set_bit(Blocked, &rdev->flags);
 	set_bit(Faulty, &rdev->flags);
-	set_bit(MD_CHANGE_DEVS, &mddev->flags);
-	set_bit(MD_CHANGE_PENDING, &mddev->flags);
+	set_mask_bits(&mddev->flags, 0,
+		      BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
 	printk(KERN_ALERT
 	       "md/raid:%s: Disk failure on %s, disabling device.\n"
 	       "md/raid:%s: Operation continuing on %d devices.\n",
@@ -3502,8 +3502,6 @@
 				dev = &sh->dev[i];
 			} else if (test_bit(R5_Discard, &dev->flags))
 				discard_pending = 1;
-			WARN_ON(test_bit(R5_SkipCopy, &dev->flags));
-			WARN_ON(dev->page != dev->orig_page);
 		}
 
 	r5l_stripe_write_finished(sh);
@@ -7574,8 +7572,10 @@
 
 		if (mddev->delta_disks > 0) {
 			md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
-			set_capacity(mddev->gendisk, mddev->array_sectors);
-			revalidate_disk(mddev->gendisk);
+			if (mddev->queue) {
+				set_capacity(mddev->gendisk, mddev->array_sectors);
+				revalidate_disk(mddev->gendisk);
+			}
 		} else {
 			int d;
 			spin_lock_irq(&conf->device_lock);
diff --git a/drivers/media/common/Kconfig b/drivers/media/common/Kconfig
index 21154dd..326df0a 100644
--- a/drivers/media/common/Kconfig
+++ b/drivers/media/common/Kconfig
@@ -19,3 +19,4 @@
 source "drivers/media/common/b2c2/Kconfig"
 source "drivers/media/common/saa7146/Kconfig"
 source "drivers/media/common/siano/Kconfig"
+source "drivers/media/common/v4l2-tpg/Kconfig"
diff --git a/drivers/media/common/Makefile b/drivers/media/common/Makefile
index 89b795d..2d1b0a0 100644
--- a/drivers/media/common/Makefile
+++ b/drivers/media/common/Makefile
@@ -1,4 +1,4 @@
-obj-y += b2c2/ saa7146/ siano/
+obj-y += b2c2/ saa7146/ siano/ v4l2-tpg/
 obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
 obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o
 obj-$(CONFIG_CYPRESS_FIRMWARE) += cypress_firmware.o
diff --git a/drivers/media/common/v4l2-tpg/Kconfig b/drivers/media/common/v4l2-tpg/Kconfig
new file mode 100644
index 0000000..7456fc1
--- /dev/null
+++ b/drivers/media/common/v4l2-tpg/Kconfig
@@ -0,0 +1,2 @@
+config VIDEO_V4L2_TPG
+	tristate
diff --git a/drivers/media/common/v4l2-tpg/Makefile b/drivers/media/common/v4l2-tpg/Makefile
new file mode 100644
index 0000000..f588df4
--- /dev/null
+++ b/drivers/media/common/v4l2-tpg/Makefile
@@ -0,0 +1,3 @@
+v4l2-tpg-objs := v4l2-tpg-core.o v4l2-tpg-colors.o
+
+obj-$(CONFIG_VIDEO_V4L2_TPG) += v4l2-tpg.o
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c
new file mode 100644
index 0000000..9bcbd31
--- /dev/null
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c
@@ -0,0 +1,1415 @@
+/*
+ * v4l2-tpg-colors.c - A table that converts colors to various colorspaces
+ *
+ * The test pattern generator uses the tpg_colors for its test patterns.
+ * For testing colorspaces the first 8 colors of that table need to be
+ * converted to their equivalent in the target colorspace.
+ *
+ * The tpg_csc_colors[] table is the result of that conversion and since
+ * it is precalculated the colorspace conversion is just a simple table
+ * lookup.
+ *
+ * This source also contains the code used to generate the tpg_csc_colors
+ * table. Run the following command to compile it:
+ *
+ *	gcc v4l2-tpg-colors.c -DCOMPILE_APP -o gen-colors -lm
+ *
+ * and run the utility.
+ *
+ * Note that the converted colors are in the range 0x000-0xff0 (so times 16)
+ * in order to preserve precision.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/videodev2.h>
+#include <media/v4l2-tpg-colors.h>
+
+/* sRGB colors with range [0-255] */
+const struct color tpg_colors[TPG_COLOR_MAX] = {
+	/*
+	 * Colors to test colorspace conversion: converting these colors
+	 * to other colorspaces will never lead to out-of-gamut colors.
+	 */
+	{ 191, 191, 191 }, /* TPG_COLOR_CSC_WHITE */
+	{ 191, 191,  50 }, /* TPG_COLOR_CSC_YELLOW */
+	{  50, 191, 191 }, /* TPG_COLOR_CSC_CYAN */
+	{  50, 191,  50 }, /* TPG_COLOR_CSC_GREEN */
+	{ 191,  50, 191 }, /* TPG_COLOR_CSC_MAGENTA */
+	{ 191,  50,  50 }, /* TPG_COLOR_CSC_RED */
+	{  50,  50, 191 }, /* TPG_COLOR_CSC_BLUE */
+	{  50,  50,  50 }, /* TPG_COLOR_CSC_BLACK */
+
+	/* 75% colors */
+	{ 191, 191,   0 }, /* TPG_COLOR_75_YELLOW */
+	{   0, 191, 191 }, /* TPG_COLOR_75_CYAN */
+	{   0, 191,   0 }, /* TPG_COLOR_75_GREEN */
+	{ 191,   0, 191 }, /* TPG_COLOR_75_MAGENTA */
+	{ 191,   0,   0 }, /* TPG_COLOR_75_RED */
+	{   0,   0, 191 }, /* TPG_COLOR_75_BLUE */
+
+	/* 100% colors */
+	{ 255, 255, 255 }, /* TPG_COLOR_100_WHITE */
+	{ 255, 255,   0 }, /* TPG_COLOR_100_YELLOW */
+	{   0, 255, 255 }, /* TPG_COLOR_100_CYAN */
+	{   0, 255,   0 }, /* TPG_COLOR_100_GREEN */
+	{ 255,   0, 255 }, /* TPG_COLOR_100_MAGENTA */
+	{ 255,   0,   0 }, /* TPG_COLOR_100_RED */
+	{   0,   0, 255 }, /* TPG_COLOR_100_BLUE */
+	{   0,   0,   0 }, /* TPG_COLOR_100_BLACK */
+
+	{   0,   0,   0 }, /* TPG_COLOR_RANDOM placeholder */
+};
+
+#ifndef COMPILE_APP
+
+/* Generated table */
+const unsigned short tpg_rec709_to_linear[255 * 16 + 1] = {
+	   0,    0,    0,    1,    1,    1,    1,    2,    2,    2,    2,    2,    3,    3,    3,    3,
+	   4,    4,    4,    4,    4,    5,    5,    5,    5,    6,    6,    6,    6,    6,    7,    7,
+	   7,    7,    8,    8,    8,    8,    8,    9,    9,    9,    9,   10,   10,   10,   10,   10,
+	  11,   11,   11,   11,   12,   12,   12,   12,   12,   13,   13,   13,   13,   14,   14,   14,
+	  14,   14,   15,   15,   15,   15,   16,   16,   16,   16,   16,   17,   17,   17,   17,   18,
+	  18,   18,   18,   18,   19,   19,   19,   19,   20,   20,   20,   20,   20,   21,   21,   21,
+	  21,   22,   22,   22,   22,   22,   23,   23,   23,   23,   24,   24,   24,   24,   24,   25,
+	  25,   25,   25,   26,   26,   26,   26,   26,   27,   27,   27,   27,   28,   28,   28,   28,
+	  28,   29,   29,   29,   29,   30,   30,   30,   30,   30,   31,   31,   31,   31,   32,   32,
+	  32,   32,   32,   33,   33,   33,   33,   34,   34,   34,   34,   34,   35,   35,   35,   35,
+	  36,   36,   36,   36,   36,   37,   37,   37,   37,   38,   38,   38,   38,   38,   39,   39,
+	  39,   39,   40,   40,   40,   40,   40,   41,   41,   41,   41,   42,   42,   42,   42,   42,
+	  43,   43,   43,   43,   44,   44,   44,   44,   44,   45,   45,   45,   45,   46,   46,   46,
+	  46,   46,   47,   47,   47,   47,   48,   48,   48,   48,   48,   49,   49,   49,   49,   50,
+	  50,   50,   50,   50,   51,   51,   51,   51,   52,   52,   52,   52,   52,   53,   53,   53,
+	  53,   54,   54,   54,   54,   54,   55,   55,   55,   55,   56,   56,   56,   56,   56,   57,
+	  57,   57,   57,   58,   58,   58,   58,   58,   59,   59,   59,   59,   60,   60,   60,   60,
+	  60,   61,   61,   61,   61,   62,   62,   62,   62,   62,   63,   63,   63,   63,   64,   64,
+	  64,   64,   64,   65,   65,   65,   65,   66,   66,   66,   66,   66,   67,   67,   67,   67,
+	  68,   68,   68,   68,   68,   69,   69,   69,   69,   70,   70,   70,   70,   70,   71,   71,
+	  71,   71,   72,   72,   72,   72,   72,   73,   73,   73,   73,   73,   74,   74,   74,   74,
+	  74,   75,   75,   75,   75,   76,   76,   76,   76,   76,   77,   77,   77,   77,   78,   78,
+	  78,   78,   79,   79,   79,   79,   79,   80,   80,   80,   80,   81,   81,   81,   81,   82,
+	  82,   82,   82,   82,   83,   83,   83,   83,   84,   84,   84,   84,   85,   85,   85,   85,
+	  86,   86,   86,   86,   87,   87,   87,   87,   88,   88,   88,   88,   89,   89,   89,   89,
+	  90,   90,   90,   90,   91,   91,   91,   91,   92,   92,   92,   92,   93,   93,   93,   93,
+	  94,   94,   94,   94,   95,   95,   95,   95,   96,   96,   96,   96,   97,   97,   97,   97,
+	  98,   98,   98,   98,   99,   99,   99,   99,  100,  100,  100,  101,  101,  101,  101,  102,
+	 102,  102,  102,  103,  103,  103,  103,  104,  104,  104,  105,  105,  105,  105,  106,  106,
+	 106,  106,  107,  107,  107,  107,  108,  108,  108,  109,  109,  109,  109,  110,  110,  110,
+	 111,  111,  111,  111,  112,  112,  112,  112,  113,  113,  113,  114,  114,  114,  114,  115,
+	 115,  115,  116,  116,  116,  116,  117,  117,  117,  118,  118,  118,  118,  119,  119,  119,
+	 120,  120,  120,  120,  121,  121,  121,  122,  122,  122,  123,  123,  123,  123,  124,  124,
+	 124,  125,  125,  125,  125,  126,  126,  126,  127,  127,  127,  128,  128,  128,  128,  129,
+	 129,  129,  130,  130,  130,  131,  131,  131,  132,  132,  132,  132,  133,  133,  133,  134,
+	 134,  134,  135,  135,  135,  136,  136,  136,  136,  137,  137,  137,  138,  138,  138,  139,
+	 139,  139,  140,  140,  140,  141,  141,  141,  142,  142,  142,  142,  143,  143,  143,  144,
+	 144,  144,  145,  145,  145,  146,  146,  146,  147,  147,  147,  148,  148,  148,  149,  149,
+	 149,  150,  150,  150,  151,  151,  151,  152,  152,  152,  153,  153,  153,  154,  154,  154,
+	 155,  155,  155,  156,  156,  156,  157,  157,  157,  158,  158,  158,  159,  159,  159,  160,
+	 160,  160,  161,  161,  161,  162,  162,  162,  163,  163,  163,  164,  164,  164,  165,  165,
+	 165,  166,  166,  167,  167,  167,  168,  168,  168,  169,  169,  169,  170,  170,  170,  171,
+	 171,  171,  172,  172,  172,  173,  173,  174,  174,  174,  175,  175,  175,  176,  176,  176,
+	 177,  177,  177,  178,  178,  179,  179,  179,  180,  180,  180,  181,  181,  181,  182,  182,
+	 183,  183,  183,  184,  184,  184,  185,  185,  186,  186,  186,  187,  187,  187,  188,  188,
+	 188,  189,  189,  190,  190,  190,  191,  191,  191,  192,  192,  193,  193,  193,  194,  194,
+	 194,  195,  195,  196,  196,  196,  197,  197,  198,  198,  198,  199,  199,  199,  200,  200,
+	 201,  201,  201,  202,  202,  203,  203,  203,  204,  204,  204,  205,  205,  206,  206,  206,
+	 207,  207,  208,  208,  208,  209,  209,  210,  210,  210,  211,  211,  212,  212,  212,  213,
+	 213,  214,  214,  214,  215,  215,  216,  216,  216,  217,  217,  218,  218,  218,  219,  219,
+	 220,  220,  220,  221,  221,  222,  222,  222,  223,  223,  224,  224,  224,  225,  225,  226,
+	 226,  227,  227,  227,  228,  228,  229,  229,  229,  230,  230,  231,  231,  232,  232,  232,
+	 233,  233,  234,  234,  234,  235,  235,  236,  236,  237,  237,  237,  238,  238,  239,  239,
+	 240,  240,  240,  241,  241,  242,  242,  243,  243,  243,  244,  244,  245,  245,  246,  246,
+	 246,  247,  247,  248,  248,  249,  249,  249,  250,  250,  251,  251,  252,  252,  252,  253,
+	 253,  254,  254,  255,  255,  256,  256,  256,  257,  257,  258,  258,  259,  259,  260,  260,
+	 260,  261,  261,  262,  262,  263,  263,  264,  264,  264,  265,  265,  266,  266,  267,  267,
+	 268,  268,  269,  269,  269,  270,  270,  271,  271,  272,  272,  273,  273,  274,  274,  274,
+	 275,  275,  276,  276,  277,  277,  278,  278,  279,  279,  279,  280,  280,  281,  281,  282,
+	 282,  283,  283,  284,  284,  285,  285,  286,  286,  286,  287,  287,  288,  288,  289,  289,
+	 290,  290,  291,  291,  292,  292,  293,  293,  294,  294,  295,  295,  295,  296,  296,  297,
+	 297,  298,  298,  299,  299,  300,  300,  301,  301,  302,  302,  303,  303,  304,  304,  305,
+	 305,  306,  306,  307,  307,  308,  308,  309,  309,  309,  310,  310,  311,  311,  312,  312,
+	 313,  313,  314,  314,  315,  315,  316,  316,  317,  317,  318,  318,  319,  319,  320,  320,
+	 321,  321,  322,  322,  323,  323,  324,  324,  325,  325,  326,  326,  327,  327,  328,  328,
+	 329,  329,  330,  330,  331,  331,  332,  332,  333,  333,  334,  335,  335,  336,  336,  337,
+	 337,  338,  338,  339,  339,  340,  340,  341,  341,  342,  342,  343,  343,  344,  344,  345,
+	 345,  346,  346,  347,  347,  348,  348,  349,  349,  350,  351,  351,  352,  352,  353,  353,
+	 354,  354,  355,  355,  356,  356,  357,  357,  358,  358,  359,  360,  360,  361,  361,  362,
+	 362,  363,  363,  364,  364,  365,  365,  366,  366,  367,  368,  368,  369,  369,  370,  370,
+	 371,  371,  372,  372,  373,  373,  374,  375,  375,  376,  376,  377,  377,  378,  378,  379,
+	 379,  380,  381,  381,  382,  382,  383,  383,  384,  384,  385,  386,  386,  387,  387,  388,
+	 388,  389,  389,  390,  391,  391,  392,  392,  393,  393,  394,  394,  395,  396,  396,  397,
+	 397,  398,  398,  399,  399,  400,  401,  401,  402,  402,  403,  403,  404,  405,  405,  406,
+	 406,  407,  407,  408,  409,  409,  410,  410,  411,  411,  412,  413,  413,  414,  414,  415,
+	 415,  416,  417,  417,  418,  418,  419,  419,  420,  421,  421,  422,  422,  423,  424,  424,
+	 425,  425,  426,  426,  427,  428,  428,  429,  429,  430,  431,  431,  432,  432,  433,  433,
+	 434,  435,  435,  436,  436,  437,  438,  438,  439,  439,  440,  441,  441,  442,  442,  443,
+	 444,  444,  445,  445,  446,  447,  447,  448,  448,  449,  450,  450,  451,  451,  452,  453,
+	 453,  454,  454,  455,  456,  456,  457,  457,  458,  459,  459,  460,  460,  461,  462,  462,
+	 463,  463,  464,  465,  465,  466,  467,  467,  468,  468,  469,  470,  470,  471,  471,  472,
+	 473,  473,  474,  475,  475,  476,  476,  477,  478,  478,  479,  480,  480,  481,  481,  482,
+	 483,  483,  484,  485,  485,  486,  486,  487,  488,  488,  489,  490,  490,  491,  491,  492,
+	 493,  493,  494,  495,  495,  496,  497,  497,  498,  498,  499,  500,  500,  501,  502,  502,
+	 503,  504,  504,  505,  505,  506,  507,  507,  508,  509,  509,  510,  511,  511,  512,  513,
+	 513,  514,  514,  515,  516,  516,  517,  518,  518,  519,  520,  520,  521,  522,  522,  523,
+	 524,  524,  525,  526,  526,  527,  528,  528,  529,  529,  530,  531,  531,  532,  533,  533,
+	 534,  535,  535,  536,  537,  537,  538,  539,  539,  540,  541,  541,  542,  543,  543,  544,
+	 545,  545,  546,  547,  547,  548,  549,  549,  550,  551,  551,  552,  553,  553,  554,  555,
+	 555,  556,  557,  557,  558,  559,  560,  560,  561,  562,  562,  563,  564,  564,  565,  566,
+	 566,  567,  568,  568,  569,  570,  570,  571,  572,  572,  573,  574,  575,  575,  576,  577,
+	 577,  578,  579,  579,  580,  581,  581,  582,  583,  584,  584,  585,  586,  586,  587,  588,
+	 588,  589,  590,  590,  591,  592,  593,  593,  594,  595,  595,  596,  597,  598,  598,  599,
+	 600,  600,  601,  602,  602,  603,  604,  605,  605,  606,  607,  607,  608,  609,  610,  610,
+	 611,  612,  612,  613,  614,  615,  615,  616,  617,  617,  618,  619,  620,  620,  621,  622,
+	 622,  623,  624,  625,  625,  626,  627,  627,  628,  629,  630,  630,  631,  632,  632,  633,
+	 634,  635,  635,  636,  637,  638,  638,  639,  640,  640,  641,  642,  643,  643,  644,  645,
+	 646,  646,  647,  648,  649,  649,  650,  651,  652,  652,  653,  654,  654,  655,  656,  657,
+	 657,  658,  659,  660,  660,  661,  662,  663,  663,  664,  665,  666,  666,  667,  668,  669,
+	 669,  670,  671,  672,  672,  673,  674,  675,  675,  676,  677,  678,  678,  679,  680,  681,
+	 681,  682,  683,  684,  684,  685,  686,  687,  687,  688,  689,  690,  690,  691,  692,  693,
+	 694,  694,  695,  696,  697,  697,  698,  699,  700,  700,  701,  702,  703,  703,  704,  705,
+	 706,  707,  707,  708,  709,  710,  710,  711,  712,  713,  714,  714,  715,  716,  717,  717,
+	 718,  719,  720,  720,  721,  722,  723,  724,  724,  725,  726,  727,  728,  728,  729,  730,
+	 731,  731,  732,  733,  734,  735,  735,  736,  737,  738,  739,  739,  740,  741,  742,  742,
+	 743,  744,  745,  746,  746,  747,  748,  749,  750,  750,  751,  752,  753,  754,  754,  755,
+	 756,  757,  758,  758,  759,  760,  761,  762,  762,  763,  764,  765,  766,  766,  767,  768,
+	 769,  770,  771,  771,  772,  773,  774,  775,  775,  776,  777,  778,  779,  779,  780,  781,
+	 782,  783,  783,  784,  785,  786,  787,  788,  788,  789,  790,  791,  792,  793,  793,  794,
+	 795,  796,  797,  797,  798,  799,  800,  801,  802,  802,  803,  804,  805,  806,  807,  807,
+	 808,  809,  810,  811,  812,  812,  813,  814,  815,  816,  817,  817,  818,  819,  820,  821,
+	 822,  822,  823,  824,  825,  826,  827,  827,  828,  829,  830,  831,  832,  832,  833,  834,
+	 835,  836,  837,  838,  838,  839,  840,  841,  842,  843,  843,  844,  845,  846,  847,  848,
+	 849,  849,  850,  851,  852,  853,  854,  855,  855,  856,  857,  858,  859,  860,  861,  861,
+	 862,  863,  864,  865,  866,  867,  867,  868,  869,  870,  871,  872,  873,  873,  874,  875,
+	 876,  877,  878,  879,  880,  880,  881,  882,  883,  884,  885,  886,  887,  887,  888,  889,
+	 890,  891,  892,  893,  894,  894,  895,  896,  897,  898,  899,  900,  901,  901,  902,  903,
+	 904,  905,  906,  907,  908,  909,  909,  910,  911,  912,  913,  914,  915,  916,  916,  917,
+	 918,  919,  920,  921,  922,  923,  924,  925,  925,  926,  927,  928,  929,  930,  931,  932,
+	 933,  933,  934,  935,  936,  937,  938,  939,  940,  941,  942,  942,  943,  944,  945,  946,
+	 947,  948,  949,  950,  951,  952,  952,  953,  954,  955,  956,  957,  958,  959,  960,  961,
+	 962,  962,  963,  964,  965,  966,  967,  968,  969,  970,  971,  972,  973,  973,  974,  975,
+	 976,  977,  978,  979,  980,  981,  982,  983,  984,  985,  985,  986,  987,  988,  989,  990,
+	 991,  992,  993,  994,  995,  996,  997,  998,  998,  999, 1000, 1001, 1002, 1003, 1004, 1005,
+	1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020,
+	1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1030, 1031, 1032, 1033, 1034, 1035,
+	1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1050,
+	1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066,
+	1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1077, 1078, 1078, 1079, 1080, 1081,
+	1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1093, 1094, 1095, 1096, 1097,
+	1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113,
+	1114, 1115, 1116, 1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129,
+	1130, 1131, 1132, 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145,
+	1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161,
+	1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177,
+	1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1193, 1194,
+	1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1208, 1209, 1210,
+	1211, 1212, 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1221, 1223, 1224, 1225, 1226, 1227,
+	1228, 1229, 1230, 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243,
+	1245, 1246, 1247, 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260,
+	1261, 1262, 1264, 1265, 1266, 1267, 1268, 1269, 1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277,
+	1278, 1280, 1281, 1282, 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1295,
+	1296, 1297, 1298, 1299, 1300, 1301, 1302, 1303, 1304, 1305, 1306, 1307, 1309, 1310, 1311, 1312,
+	1313, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1322, 1323, 1324, 1325, 1326, 1327, 1328, 1329,
+	1330, 1331, 1332, 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1345, 1346, 1347,
+	1348, 1349, 1350, 1351, 1352, 1353, 1354, 1356, 1357, 1358, 1359, 1360, 1361, 1362, 1363, 1364,
+	1365, 1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1377, 1378, 1379, 1380, 1381, 1382,
+	1383, 1384, 1385, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1396, 1397, 1398, 1399, 1400,
+	1401, 1402, 1403, 1405, 1406, 1407, 1408, 1409, 1410, 1411, 1412, 1414, 1415, 1416, 1417, 1418,
+	1419, 1420, 1421, 1423, 1424, 1425, 1426, 1427, 1428, 1429, 1431, 1432, 1433, 1434, 1435, 1436,
+	1437, 1439, 1440, 1441, 1442, 1443, 1444, 1445, 1446, 1448, 1449, 1450, 1451, 1452, 1453, 1455,
+	1456, 1457, 1458, 1459, 1460, 1461, 1463, 1464, 1465, 1466, 1467, 1468, 1469, 1471, 1472, 1473,
+	1474, 1475, 1476, 1478, 1479, 1480, 1481, 1482, 1483, 1484, 1486, 1487, 1488, 1489, 1490, 1491,
+	1493, 1494, 1495, 1496, 1497, 1498, 1500, 1501, 1502, 1503, 1504, 1505, 1507, 1508, 1509, 1510,
+	1511, 1512, 1514, 1515, 1516, 1517, 1518, 1519, 1521, 1522, 1523, 1524, 1525, 1527, 1528, 1529,
+	1530, 1531, 1532, 1534, 1535, 1536, 1537, 1538, 1540, 1541, 1542, 1543, 1544, 1545, 1547, 1548,
+	1549, 1550, 1551, 1553, 1554, 1555, 1556, 1557, 1559, 1560, 1561, 1562, 1563, 1564, 1566, 1567,
+	1568, 1569, 1570, 1572, 1573, 1574, 1575, 1576, 1578, 1579, 1580, 1581, 1582, 1584, 1585, 1586,
+	1587, 1588, 1590, 1591, 1592, 1593, 1594, 1596, 1597, 1598, 1599, 1601, 1602, 1603, 1604, 1605,
+	1607, 1608, 1609, 1610, 1611, 1613, 1614, 1615, 1616, 1617, 1619, 1620, 1621, 1622, 1624, 1625,
+	1626, 1627, 1628, 1630, 1631, 1632, 1633, 1635, 1636, 1637, 1638, 1639, 1641, 1642, 1643, 1644,
+	1646, 1647, 1648, 1649, 1650, 1652, 1653, 1654, 1655, 1657, 1658, 1659, 1660, 1662, 1663, 1664,
+	1665, 1667, 1668, 1669, 1670, 1671, 1673, 1674, 1675, 1676, 1678, 1679, 1680, 1681, 1683, 1684,
+	1685, 1686, 1688, 1689, 1690, 1691, 1693, 1694, 1695, 1696, 1698, 1699, 1700, 1701, 1703, 1704,
+	1705, 1706, 1708, 1709, 1710, 1711, 1713, 1714, 1715, 1716, 1718, 1719, 1720, 1721, 1723, 1724,
+	1725, 1726, 1728, 1729, 1730, 1731, 1733, 1734, 1735, 1737, 1738, 1739, 1740, 1742, 1743, 1744,
+	1745, 1747, 1748, 1749, 1750, 1752, 1753, 1754, 1756, 1757, 1758, 1759, 1761, 1762, 1763, 1764,
+	1766, 1767, 1768, 1770, 1771, 1772, 1773, 1775, 1776, 1777, 1778, 1780, 1781, 1782, 1784, 1785,
+	1786, 1787, 1789, 1790, 1791, 1793, 1794, 1795, 1796, 1798, 1799, 1800, 1802, 1803, 1804, 1806,
+	1807, 1808, 1809, 1811, 1812, 1813, 1815, 1816, 1817, 1818, 1820, 1821, 1822, 1824, 1825, 1826,
+	1828, 1829, 1830, 1831, 1833, 1834, 1835, 1837, 1838, 1839, 1841, 1842, 1843, 1844, 1846, 1847,
+	1848, 1850, 1851, 1852, 1854, 1855, 1856, 1858, 1859, 1860, 1862, 1863, 1864, 1865, 1867, 1868,
+	1869, 1871, 1872, 1873, 1875, 1876, 1877, 1879, 1880, 1881, 1883, 1884, 1885, 1887, 1888, 1889,
+	1891, 1892, 1893, 1894, 1896, 1897, 1898, 1900, 1901, 1902, 1904, 1905, 1906, 1908, 1909, 1910,
+	1912, 1913, 1914, 1916, 1917, 1918, 1920, 1921, 1922, 1924, 1925, 1926, 1928, 1929, 1930, 1932,
+	1933, 1935, 1936, 1937, 1939, 1940, 1941, 1943, 1944, 1945, 1947, 1948, 1949, 1951, 1952, 1953,
+	1955, 1956, 1957, 1959, 1960, 1961, 1963, 1964, 1965, 1967, 1968, 1970, 1971, 1972, 1974, 1975,
+	1976, 1978, 1979, 1980, 1982, 1983, 1984, 1986, 1987, 1989, 1990, 1991, 1993, 1994, 1995, 1997,
+	1998, 1999, 2001, 2002, 2004, 2005, 2006, 2008, 2009, 2010, 2012, 2013, 2015, 2016, 2017, 2019,
+	2020, 2021, 2023, 2024, 2026, 2027, 2028, 2030, 2031, 2032, 2034, 2035, 2037, 2038, 2039, 2041,
+	2042, 2043, 2045, 2046, 2048, 2049, 2050, 2052, 2053, 2055, 2056, 2057, 2059, 2060, 2061, 2063,
+	2064, 2066, 2067, 2068, 2070, 2071, 2073, 2074, 2075, 2077, 2078, 2080, 2081, 2082, 2084, 2085,
+	2087, 2088, 2089, 2091, 2092, 2094, 2095, 2096, 2098, 2099, 2101, 2102, 2103, 2105, 2106, 2108,
+	2109, 2110, 2112, 2113, 2115, 2116, 2117, 2119, 2120, 2122, 2123, 2124, 2126, 2127, 2129, 2130,
+	2132, 2133, 2134, 2136, 2137, 2139, 2140, 2141, 2143, 2144, 2146, 2147, 2149, 2150, 2151, 2153,
+	2154, 2156, 2157, 2159, 2160, 2161, 2163, 2164, 2166, 2167, 2169, 2170, 2171, 2173, 2174, 2176,
+	2177, 2179, 2180, 2181, 2183, 2184, 2186, 2187, 2189, 2190, 2191, 2193, 2194, 2196, 2197, 2199,
+	2200, 2202, 2203, 2204, 2206, 2207, 2209, 2210, 2212, 2213, 2214, 2216, 2217, 2219, 2220, 2222,
+	2223, 2225, 2226, 2228, 2229, 2230, 2232, 2233, 2235, 2236, 2238, 2239, 2241, 2242, 2243, 2245,
+	2246, 2248, 2249, 2251, 2252, 2254, 2255, 2257, 2258, 2260, 2261, 2262, 2264, 2265, 2267, 2268,
+	2270, 2271, 2273, 2274, 2276, 2277, 2279, 2280, 2282, 2283, 2284, 2286, 2287, 2289, 2290, 2292,
+	2293, 2295, 2296, 2298, 2299, 2301, 2302, 2304, 2305, 2307, 2308, 2310, 2311, 2312, 2314, 2315,
+	2317, 2318, 2320, 2321, 2323, 2324, 2326, 2327, 2329, 2330, 2332, 2333, 2335, 2336, 2338, 2339,
+	2341, 2342, 2344, 2345, 2347, 2348, 2350, 2351, 2353, 2354, 2356, 2357, 2359, 2360, 2362, 2363,
+	2365, 2366, 2368, 2369, 2371, 2372, 2374, 2375, 2377, 2378, 2380, 2381, 2383, 2384, 2386, 2387,
+	2389, 2390, 2392, 2393, 2395, 2396, 2398, 2399, 2401, 2402, 2404, 2405, 2407, 2408, 2410, 2411,
+	2413, 2414, 2416, 2417, 2419, 2420, 2422, 2423, 2425, 2426, 2428, 2429, 2431, 2433, 2434, 2436,
+	2437, 2439, 2440, 2442, 2443, 2445, 2446, 2448, 2449, 2451, 2452, 2454, 2455, 2457, 2458, 2460,
+	2462, 2463, 2465, 2466, 2468, 2469, 2471, 2472, 2474, 2475, 2477, 2478, 2480, 2481, 2483, 2485,
+	2486, 2488, 2489, 2491, 2492, 2494, 2495, 2497, 2498, 2500, 2502, 2503, 2505, 2506, 2508, 2509,
+	2511, 2512, 2514, 2515, 2517, 2519, 2520, 2522, 2523, 2525, 2526, 2528, 2529, 2531, 2533, 2534,
+	2536, 2537, 2539, 2540, 2542, 2543, 2545, 2547, 2548, 2550, 2551, 2553, 2554, 2556, 2557, 2559,
+	2561, 2562, 2564, 2565, 2567, 2568, 2570, 2572, 2573, 2575, 2576, 2578, 2579, 2581, 2583, 2584,
+	2586, 2587, 2589, 2590, 2592, 2594, 2595, 2597, 2598, 2600, 2601, 2603, 2605, 2606, 2608, 2609,
+	2611, 2613, 2614, 2616, 2617, 2619, 2620, 2622, 2624, 2625, 2627, 2628, 2630, 2632, 2633, 2635,
+	2636, 2638, 2640, 2641, 2643, 2644, 2646, 2647, 2649, 2651, 2652, 2654, 2655, 2657, 2659, 2660,
+	2662, 2663, 2665, 2667, 2668, 2670, 2671, 2673, 2675, 2676, 2678, 2679, 2681, 2683, 2684, 2686,
+	2687, 2689, 2691, 2692, 2694, 2696, 2697, 2699, 2700, 2702, 2704, 2705, 2707, 2708, 2710, 2712,
+	2713, 2715, 2716, 2718, 2720, 2721, 2723, 2725, 2726, 2728, 2729, 2731, 2733, 2734, 2736, 2738,
+	2739, 2741, 2742, 2744, 2746, 2747, 2749, 2751, 2752, 2754, 2755, 2757, 2759, 2760, 2762, 2764,
+	2765, 2767, 2769, 2770, 2772, 2773, 2775, 2777, 2778, 2780, 2782, 2783, 2785, 2787, 2788, 2790,
+	2791, 2793, 2795, 2796, 2798, 2800, 2801, 2803, 2805, 2806, 2808, 2810, 2811, 2813, 2814, 2816,
+	2818, 2819, 2821, 2823, 2824, 2826, 2828, 2829, 2831, 2833, 2834, 2836, 2838, 2839, 2841, 2843,
+	2844, 2846, 2848, 2849, 2851, 2853, 2854, 2856, 2857, 2859, 2861, 2862, 2864, 2866, 2867, 2869,
+	2871, 2872, 2874, 2876, 2877, 2879, 2881, 2882, 2884, 2886, 2888, 2889, 2891, 2893, 2894, 2896,
+	2898, 2899, 2901, 2903, 2904, 2906, 2908, 2909, 2911, 2913, 2914, 2916, 2918, 2919, 2921, 2923,
+	2924, 2926, 2928, 2929, 2931, 2933, 2935, 2936, 2938, 2940, 2941, 2943, 2945, 2946, 2948, 2950,
+	2951, 2953, 2955, 2956, 2958, 2960, 2962, 2963, 2965, 2967, 2968, 2970, 2972, 2973, 2975, 2977,
+	2979, 2980, 2982, 2984, 2985, 2987, 2989, 2990, 2992, 2994, 2996, 2997, 2999, 3001, 3002, 3004,
+	3006, 3008, 3009, 3011, 3013, 3014, 3016, 3018, 3020, 3021, 3023, 3025, 3026, 3028, 3030, 3032,
+	3033, 3035, 3037, 3038, 3040, 3042, 3044, 3045, 3047, 3049, 3050, 3052, 3054, 3056, 3057, 3059,
+	3061, 3063, 3064, 3066, 3068, 3069, 3071, 3073, 3075, 3076, 3078, 3080, 3082, 3083, 3085, 3087,
+	3089, 3090, 3092, 3094, 3095, 3097, 3099, 3101, 3102, 3104, 3106, 3108, 3109, 3111, 3113, 3115,
+	3116, 3118, 3120, 3122, 3123, 3125, 3127, 3129, 3130, 3132, 3134, 3136, 3137, 3139, 3141, 3143,
+	3144, 3146, 3148, 3150, 3151, 3153, 3155, 3157, 3158, 3160, 3162, 3164, 3165, 3167, 3169, 3171,
+	3172, 3174, 3176, 3178, 3179, 3181, 3183, 3185, 3187, 3188, 3190, 3192, 3194, 3195, 3197, 3199,
+	3201, 3202, 3204, 3206, 3208, 3209, 3211, 3213, 3215, 3217, 3218, 3220, 3222, 3224, 3225, 3227,
+	3229, 3231, 3233, 3234, 3236, 3238, 3240, 3241, 3243, 3245, 3247, 3249, 3250, 3252, 3254, 3256,
+	3258, 3259, 3261, 3263, 3265, 3266, 3268, 3270, 3272, 3274, 3275, 3277, 3279, 3281, 3283, 3284,
+	3286, 3288, 3290, 3292, 3293, 3295, 3297, 3299, 3301, 3302, 3304, 3306, 3308, 3310, 3311, 3313,
+	3315, 3317, 3319, 3320, 3322, 3324, 3326, 3328, 3329, 3331, 3333, 3335, 3337, 3338, 3340, 3342,
+	3344, 3346, 3348, 3349, 3351, 3353, 3355, 3357, 3358, 3360, 3362, 3364, 3366, 3368, 3369, 3371,
+	3373, 3375, 3377, 3378, 3380, 3382, 3384, 3386, 3388, 3389, 3391, 3393, 3395, 3397, 3399, 3400,
+	3402, 3404, 3406, 3408, 3410, 3411, 3413, 3415, 3417, 3419, 3421, 3422, 3424, 3426, 3428, 3430,
+	3432, 3433, 3435, 3437, 3439, 3441, 3443, 3444, 3446, 3448, 3450, 3452, 3454, 3455, 3457, 3459,
+	3461, 3463, 3465, 3467, 3468, 3470, 3472, 3474, 3476, 3478, 3480, 3481, 3483, 3485, 3487, 3489,
+	3491, 3492, 3494, 3496, 3498, 3500, 3502, 3504, 3506, 3507, 3509, 3511, 3513, 3515, 3517, 3519,
+	3520, 3522, 3524, 3526, 3528, 3530, 3532, 3533, 3535, 3537, 3539, 3541, 3543, 3545, 3547, 3548,
+	3550, 3552, 3554, 3556, 3558, 3560, 3562, 3563, 3565, 3567, 3569, 3571, 3573, 3575, 3577, 3578,
+	3580, 3582, 3584, 3586, 3588, 3590, 3592, 3594, 3595, 3597, 3599, 3601, 3603, 3605, 3607, 3609,
+	3611, 3612, 3614, 3616, 3618, 3620, 3622, 3624, 3626, 3628, 3629, 3631, 3633, 3635, 3637, 3639,
+	3641, 3643, 3645, 3647, 3648, 3650, 3652, 3654, 3656, 3658, 3660, 3662, 3664, 3666, 3667, 3669,
+	3671, 3673, 3675, 3677, 3679, 3681, 3683, 3685, 3687, 3688, 3690, 3692, 3694, 3696, 3698, 3700,
+	3702, 3704, 3706, 3708, 3710, 3711, 3713, 3715, 3717, 3719, 3721, 3723, 3725, 3727, 3729, 3731,
+	3733, 3735, 3736, 3738, 3740, 3742, 3744, 3746, 3748, 3750, 3752, 3754, 3756, 3758, 3760, 3762,
+	3764, 3765, 3767, 3769, 3771, 3773, 3775, 3777, 3779, 3781, 3783, 3785, 3787, 3789, 3791, 3793,
+	3795, 3796, 3798, 3800, 3802, 3804, 3806, 3808, 3810, 3812, 3814, 3816, 3818, 3820, 3822, 3824,
+	3826, 3828, 3830, 3832, 3833, 3835, 3837, 3839, 3841, 3843, 3845, 3847, 3849, 3851, 3853, 3855,
+	3857, 3859, 3861, 3863, 3865, 3867, 3869, 3871, 3873, 3875, 3877, 3879, 3881, 3883, 3884, 3886,
+	3888, 3890, 3892, 3894, 3896, 3898, 3900, 3902, 3904, 3906, 3908, 3910, 3912, 3914, 3916, 3918,
+	3920, 3922, 3924, 3926, 3928, 3930, 3932, 3934, 3936, 3938, 3940, 3942, 3944, 3946, 3948, 3950,
+	3952, 3954, 3956, 3958, 3960, 3962, 3964, 3966, 3968, 3970, 3972, 3974, 3976, 3978, 3980, 3982,
+	3984, 3986, 3988, 3990, 3992, 3994, 3996, 3998, 4000, 4002, 4004, 4006, 4008, 4010, 4012, 4014,
+	4016, 4018, 4020, 4022, 4024, 4026, 4028, 4030, 4032, 4034, 4036, 4038, 4040, 4042, 4044, 4046,
+	4048, 4050, 4052, 4054, 4056, 4058, 4060, 4062, 4064, 4066, 4068, 4070, 4072, 4074, 4076, 4078,
+	4080,
+};
+
+/* Generated table */
+const unsigned short tpg_linear_to_rec709[255 * 16 + 1] = {
+	   0,    5,    9,   14,   18,   22,   27,   32,   36,   41,   45,   50,   54,   59,   63,   68,
+	  72,   77,   81,   86,   90,   95,   99,  104,  108,  113,  117,  122,  126,  131,  135,  139,
+	 144,  149,  153,  158,  162,  167,  171,  176,  180,  185,  189,  194,  198,  203,  207,  212,
+	 216,  221,  225,  230,  234,  239,  243,  248,  252,  257,  261,  266,  270,  275,  279,  284,
+	 288,  293,  297,  302,  306,  311,  315,  320,  324,  328,  334,  338,  343,  347,  352,  356,
+	 360,  365,  369,  373,  377,  381,  386,  390,  394,  398,  402,  406,  410,  414,  418,  422,
+	 426,  430,  433,  437,  441,  445,  449,  452,  456,  460,  464,  467,  471,  475,  478,  482,
+	 485,  489,  492,  496,  499,  503,  506,  510,  513,  517,  520,  524,  527,  530,  534,  537,
+	 540,  544,  547,  550,  554,  557,  560,  563,  566,  570,  573,  576,  579,  582,  586,  589,
+	 592,  595,  598,  601,  604,  607,  610,  613,  616,  619,  622,  625,  628,  631,  634,  637,
+	 640,  643,  646,  649,  652,  655,  658,  660,  663,  666,  669,  672,  675,  677,  680,  683,
+	 686,  689,  691,  694,  697,  700,  702,  705,  708,  711,  713,  716,  719,  721,  724,  727,
+	 729,  732,  735,  737,  740,  743,  745,  748,  750,  753,  756,  758,  761,  763,  766,  768,
+	 771,  773,  776,  779,  781,  784,  786,  789,  791,  794,  796,  799,  801,  803,  806,  808,
+	 811,  813,  816,  818,  821,  823,  825,  828,  830,  833,  835,  837,  840,  842,  844,  847,
+	 849,  851,  854,  856,  858,  861,  863,  865,  868,  870,  872,  875,  877,  879,  881,  884,
+	 886,  888,  891,  893,  895,  897,  900,  902,  904,  906,  908,  911,  913,  915,  917,  919,
+	 922,  924,  926,  928,  930,  933,  935,  937,  939,  941,  943,  946,  948,  950,  952,  954,
+	 956,  958,  960,  963,  965,  967,  969,  971,  973,  975,  977,  979,  981,  984,  986,  988,
+	 990,  992,  994,  996,  998, 1000, 1002, 1004, 1006, 1008, 1010, 1012, 1014, 1016, 1018, 1020,
+	1022, 1024, 1026, 1028, 1030, 1032, 1034, 1036, 1038, 1040, 1042, 1044, 1046, 1048, 1050, 1052,
+	1054, 1056, 1058, 1060, 1062, 1064, 1066, 1068, 1069, 1071, 1073, 1075, 1077, 1079, 1081, 1083,
+	1085, 1087, 1089, 1090, 1092, 1094, 1096, 1098, 1100, 1102, 1104, 1106, 1107, 1109, 1111, 1113,
+	1115, 1117, 1119, 1120, 1122, 1124, 1126, 1128, 1130, 1131, 1133, 1135, 1137, 1139, 1141, 1142,
+	1144, 1146, 1148, 1150, 1151, 1153, 1155, 1157, 1159, 1160, 1162, 1164, 1166, 1168, 1169, 1171,
+	1173, 1175, 1176, 1178, 1180, 1182, 1184, 1185, 1187, 1189, 1191, 1192, 1194, 1196, 1198, 1199,
+	1201, 1203, 1204, 1206, 1208, 1210, 1211, 1213, 1215, 1217, 1218, 1220, 1222, 1223, 1225, 1227,
+	1228, 1230, 1232, 1234, 1235, 1237, 1239, 1240, 1242, 1244, 1245, 1247, 1249, 1250, 1252, 1254,
+	1255, 1257, 1259, 1260, 1262, 1264, 1265, 1267, 1269, 1270, 1272, 1274, 1275, 1277, 1279, 1280,
+	1282, 1283, 1285, 1287, 1288, 1290, 1292, 1293, 1295, 1296, 1298, 1300, 1301, 1303, 1305, 1306,
+	1308, 1309, 1311, 1313, 1314, 1316, 1317, 1319, 1321, 1322, 1324, 1325, 1327, 1328, 1330, 1332,
+	1333, 1335, 1336, 1338, 1339, 1341, 1343, 1344, 1346, 1347, 1349, 1350, 1352, 1354, 1355, 1357,
+	1358, 1360, 1361, 1363, 1364, 1366, 1367, 1369, 1371, 1372, 1374, 1375, 1377, 1378, 1380, 1381,
+	1383, 1384, 1386, 1387, 1389, 1390, 1392, 1393, 1395, 1396, 1398, 1399, 1401, 1402, 1404, 1405,
+	1407, 1408, 1410, 1411, 1413, 1414, 1416, 1417, 1419, 1420, 1422, 1423, 1425, 1426, 1428, 1429,
+	1431, 1432, 1434, 1435, 1437, 1438, 1440, 1441, 1442, 1444, 1445, 1447, 1448, 1450, 1451, 1453,
+	1454, 1456, 1457, 1458, 1460, 1461, 1463, 1464, 1466, 1467, 1469, 1470, 1471, 1473, 1474, 1476,
+	1477, 1479, 1480, 1481, 1483, 1484, 1486, 1487, 1489, 1490, 1491, 1493, 1494, 1496, 1497, 1498,
+	1500, 1501, 1503, 1504, 1505, 1507, 1508, 1510, 1511, 1512, 1514, 1515, 1517, 1518, 1519, 1521,
+	1522, 1524, 1525, 1526, 1528, 1529, 1531, 1532, 1533, 1535, 1536, 1537, 1539, 1540, 1542, 1543,
+	1544, 1546, 1547, 1548, 1550, 1551, 1553, 1554, 1555, 1557, 1558, 1559, 1561, 1562, 1563, 1565,
+	1566, 1567, 1569, 1570, 1571, 1573, 1574, 1576, 1577, 1578, 1580, 1581, 1582, 1584, 1585, 1586,
+	1588, 1589, 1590, 1592, 1593, 1594, 1596, 1597, 1598, 1600, 1601, 1602, 1603, 1605, 1606, 1607,
+	1609, 1610, 1611, 1613, 1614, 1615, 1617, 1618, 1619, 1621, 1622, 1623, 1624, 1626, 1627, 1628,
+	1630, 1631, 1632, 1634, 1635, 1636, 1637, 1639, 1640, 1641, 1643, 1644, 1645, 1647, 1648, 1649,
+	1650, 1652, 1653, 1654, 1655, 1657, 1658, 1659, 1661, 1662, 1663, 1664, 1666, 1667, 1668, 1670,
+	1671, 1672, 1673, 1675, 1676, 1677, 1678, 1680, 1681, 1682, 1683, 1685, 1686, 1687, 1688, 1690,
+	1691, 1692, 1693, 1695, 1696, 1697, 1698, 1700, 1701, 1702, 1703, 1705, 1706, 1707, 1708, 1710,
+	1711, 1712, 1713, 1715, 1716, 1717, 1718, 1720, 1721, 1722, 1723, 1724, 1726, 1727, 1728, 1729,
+	1731, 1732, 1733, 1734, 1736, 1737, 1738, 1739, 1740, 1742, 1743, 1744, 1745, 1746, 1748, 1749,
+	1750, 1751, 1753, 1754, 1755, 1756, 1757, 1759, 1760, 1761, 1762, 1763, 1765, 1766, 1767, 1768,
+	1769, 1771, 1772, 1773, 1774, 1775, 1777, 1778, 1779, 1780, 1781, 1783, 1784, 1785, 1786, 1787,
+	1788, 1790, 1791, 1792, 1793, 1794, 1796, 1797, 1798, 1799, 1800, 1801, 1803, 1804, 1805, 1806,
+	1807, 1809, 1810, 1811, 1812, 1813, 1814, 1816, 1817, 1818, 1819, 1820, 1821, 1823, 1824, 1825,
+	1826, 1827, 1828, 1829, 1831, 1832, 1833, 1834, 1835, 1836, 1838, 1839, 1840, 1841, 1842, 1843,
+	1845, 1846, 1847, 1848, 1849, 1850, 1851, 1853, 1854, 1855, 1856, 1857, 1858, 1859, 1861, 1862,
+	1863, 1864, 1865, 1866, 1867, 1868, 1870, 1871, 1872, 1873, 1874, 1875, 1876, 1878, 1879, 1880,
+	1881, 1882, 1883, 1884, 1885, 1887, 1888, 1889, 1890, 1891, 1892, 1893, 1894, 1896, 1897, 1898,
+	1899, 1900, 1901, 1902, 1903, 1904, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914, 1916,
+	1917, 1918, 1919, 1920, 1921, 1922, 1923, 1924, 1925, 1927, 1928, 1929, 1930, 1931, 1932, 1933,
+	1934, 1935, 1936, 1938, 1939, 1940, 1941, 1942, 1943, 1944, 1945, 1946, 1947, 1948, 1950, 1951,
+	1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961, 1963, 1964, 1965, 1966, 1967, 1968,
+	1969, 1970, 1971, 1972, 1973, 1974, 1975, 1977, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985,
+	1986, 1987, 1988, 1989, 1990, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+	2003, 2004, 2005, 2006, 2007, 2008, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019,
+	2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2031, 2032, 2033, 2034, 2035, 2036,
+	2037, 2038, 2039, 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 2050, 2051, 2052,
+	2053, 2054, 2055, 2056, 2057, 2058, 2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069,
+	2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2081, 2082, 2083, 2084, 2085,
+	2086, 2087, 2088, 2089, 2090, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099, 2100, 2101,
+	2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 2112, 2113, 2114, 2115, 2116, 2117,
+	2118, 2119, 2120, 2121, 2122, 2123, 2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133,
+	2134, 2135, 2136, 2137, 2138, 2139, 2140, 2141, 2142, 2143, 2144, 2145, 2146, 2147, 2148, 2149,
+	2150, 2151, 2152, 2153, 2154, 2155, 2156, 2157, 2158, 2159, 2160, 2161, 2162, 2163, 2164, 2165,
+	2166, 2167, 2168, 2169, 2170, 2171, 2172, 2173, 2173, 2174, 2175, 2176, 2177, 2178, 2179, 2180,
+	2181, 2182, 2183, 2184, 2185, 2186, 2187, 2188, 2189, 2190, 2191, 2192, 2193, 2194, 2195, 2196,
+	2197, 2198, 2199, 2200, 2201, 2202, 2202, 2203, 2204, 2205, 2206, 2207, 2208, 2209, 2210, 2211,
+	2212, 2213, 2214, 2215, 2216, 2217, 2218, 2219, 2220, 2221, 2222, 2223, 2224, 2224, 2225, 2226,
+	2227, 2228, 2229, 2230, 2231, 2232, 2233, 2234, 2235, 2236, 2237, 2238, 2239, 2240, 2241, 2241,
+	2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255, 2256, 2257,
+	2257, 2258, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266, 2267, 2268, 2269, 2270, 2271, 2271,
+	2272, 2273, 2274, 2275, 2276, 2277, 2278, 2279, 2280, 2281, 2282, 2283, 2283, 2284, 2285, 2286,
+	2287, 2288, 2289, 2290, 2291, 2292, 2293, 2294, 2295, 2295, 2296, 2297, 2298, 2299, 2300, 2301,
+	2302, 2303, 2304, 2305, 2306, 2306, 2307, 2308, 2309, 2310, 2311, 2312, 2313, 2314, 2315, 2316,
+	2317, 2317, 2318, 2319, 2320, 2321, 2322, 2323, 2324, 2325, 2326, 2327, 2327, 2328, 2329, 2330,
+	2331, 2332, 2333, 2334, 2335, 2336, 2336, 2337, 2338, 2339, 2340, 2341, 2342, 2343, 2344, 2345,
+	2345, 2346, 2347, 2348, 2349, 2350, 2351, 2352, 2353, 2354, 2354, 2355, 2356, 2357, 2358, 2359,
+	2360, 2361, 2362, 2363, 2363, 2364, 2365, 2366, 2367, 2368, 2369, 2370, 2371, 2371, 2372, 2373,
+	2374, 2375, 2376, 2377, 2378, 2379, 2379, 2380, 2381, 2382, 2383, 2384, 2385, 2386, 2386, 2387,
+	2388, 2389, 2390, 2391, 2392, 2393, 2394, 2394, 2395, 2396, 2397, 2398, 2399, 2400, 2401, 2401,
+	2402, 2403, 2404, 2405, 2406, 2407, 2408, 2408, 2409, 2410, 2411, 2412, 2413, 2414, 2415, 2415,
+	2416, 2417, 2418, 2419, 2420, 2421, 2422, 2422, 2423, 2424, 2425, 2426, 2427, 2428, 2428, 2429,
+	2430, 2431, 2432, 2433, 2434, 2435, 2435, 2436, 2437, 2438, 2439, 2440, 2441, 2441, 2442, 2443,
+	2444, 2445, 2446, 2447, 2447, 2448, 2449, 2450, 2451, 2452, 2453, 2453, 2454, 2455, 2456, 2457,
+	2458, 2459, 2459, 2460, 2461, 2462, 2463, 2464, 2465, 2465, 2466, 2467, 2468, 2469, 2470, 2471,
+	2471, 2472, 2473, 2474, 2475, 2476, 2477, 2477, 2478, 2479, 2480, 2481, 2482, 2482, 2483, 2484,
+	2485, 2486, 2487, 2488, 2488, 2489, 2490, 2491, 2492, 2493, 2493, 2494, 2495, 2496, 2497, 2498,
+	2499, 2499, 2500, 2501, 2502, 2503, 2504, 2504, 2505, 2506, 2507, 2508, 2509, 2509, 2510, 2511,
+	2512, 2513, 2514, 2514, 2515, 2516, 2517, 2518, 2519, 2519, 2520, 2521, 2522, 2523, 2524, 2524,
+	2525, 2526, 2527, 2528, 2529, 2529, 2530, 2531, 2532, 2533, 2534, 2534, 2535, 2536, 2537, 2538,
+	2539, 2539, 2540, 2541, 2542, 2543, 2544, 2544, 2545, 2546, 2547, 2548, 2548, 2549, 2550, 2551,
+	2552, 2553, 2553, 2554, 2555, 2556, 2557, 2558, 2558, 2559, 2560, 2561, 2562, 2562, 2563, 2564,
+	2565, 2566, 2567, 2567, 2568, 2569, 2570, 2571, 2571, 2572, 2573, 2574, 2575, 2576, 2576, 2577,
+	2578, 2579, 2580, 2580, 2581, 2582, 2583, 2584, 2584, 2585, 2586, 2587, 2588, 2589, 2589, 2590,
+	2591, 2592, 2593, 2593, 2594, 2595, 2596, 2597, 2597, 2598, 2599, 2600, 2601, 2601, 2602, 2603,
+	2604, 2605, 2605, 2606, 2607, 2608, 2609, 2610, 2610, 2611, 2612, 2613, 2614, 2614, 2615, 2616,
+	2617, 2618, 2618, 2619, 2620, 2621, 2622, 2622, 2623, 2624, 2625, 2626, 2626, 2627, 2628, 2629,
+	2630, 2630, 2631, 2632, 2633, 2634, 2634, 2635, 2636, 2637, 2637, 2638, 2639, 2640, 2641, 2641,
+	2642, 2643, 2644, 2645, 2645, 2646, 2647, 2648, 2649, 2649, 2650, 2651, 2652, 2653, 2653, 2654,
+	2655, 2656, 2656, 2657, 2658, 2659, 2660, 2660, 2661, 2662, 2663, 2664, 2664, 2665, 2666, 2667,
+	2668, 2668, 2669, 2670, 2671, 2671, 2672, 2673, 2674, 2675, 2675, 2676, 2677, 2678, 2678, 2679,
+	2680, 2681, 2682, 2682, 2683, 2684, 2685, 2686, 2686, 2687, 2688, 2689, 2689, 2690, 2691, 2692,
+	2693, 2693, 2694, 2695, 2696, 2696, 2697, 2698, 2699, 2700, 2700, 2701, 2702, 2703, 2703, 2704,
+	2705, 2706, 2706, 2707, 2708, 2709, 2710, 2710, 2711, 2712, 2713, 2713, 2714, 2715, 2716, 2717,
+	2717, 2718, 2719, 2720, 2720, 2721, 2722, 2723, 2723, 2724, 2725, 2726, 2727, 2727, 2728, 2729,
+	2730, 2730, 2731, 2732, 2733, 2733, 2734, 2735, 2736, 2736, 2737, 2738, 2739, 2740, 2740, 2741,
+	2742, 2743, 2743, 2744, 2745, 2746, 2746, 2747, 2748, 2749, 2749, 2750, 2751, 2752, 2752, 2753,
+	2754, 2755, 2755, 2756, 2757, 2758, 2759, 2759, 2760, 2761, 2762, 2762, 2763, 2764, 2765, 2765,
+	2766, 2767, 2768, 2768, 2769, 2770, 2771, 2771, 2772, 2773, 2774, 2774, 2775, 2776, 2777, 2777,
+	2778, 2779, 2780, 2780, 2781, 2782, 2783, 2783, 2784, 2785, 2786, 2786, 2787, 2788, 2789, 2789,
+	2790, 2791, 2792, 2792, 2793, 2794, 2795, 2795, 2796, 2797, 2798, 2798, 2799, 2800, 2801, 2801,
+	2802, 2803, 2804, 2804, 2805, 2806, 2807, 2807, 2808, 2809, 2810, 2810, 2811, 2812, 2813, 2813,
+	2814, 2815, 2815, 2816, 2817, 2818, 2818, 2819, 2820, 2821, 2821, 2822, 2823, 2824, 2824, 2825,
+	2826, 2827, 2827, 2828, 2829, 2830, 2830, 2831, 2832, 2832, 2833, 2834, 2835, 2835, 2836, 2837,
+	2838, 2838, 2839, 2840, 2841, 2841, 2842, 2843, 2844, 2844, 2845, 2846, 2846, 2847, 2848, 2849,
+	2849, 2850, 2851, 2852, 2852, 2853, 2854, 2855, 2855, 2856, 2857, 2857, 2858, 2859, 2860, 2860,
+	2861, 2862, 2863, 2863, 2864, 2865, 2865, 2866, 2867, 2868, 2868, 2869, 2870, 2871, 2871, 2872,
+	2873, 2873, 2874, 2875, 2876, 2876, 2877, 2878, 2879, 2879, 2880, 2881, 2881, 2882, 2883, 2884,
+	2884, 2885, 2886, 2886, 2887, 2888, 2889, 2889, 2890, 2891, 2892, 2892, 2893, 2894, 2894, 2895,
+	2896, 2897, 2897, 2898, 2899, 2899, 2900, 2901, 2902, 2902, 2903, 2904, 2904, 2905, 2906, 2907,
+	2907, 2908, 2909, 2909, 2910, 2911, 2912, 2912, 2913, 2914, 2914, 2915, 2916, 2917, 2917, 2918,
+	2919, 2919, 2920, 2921, 2922, 2922, 2923, 2924, 2924, 2925, 2926, 2927, 2927, 2928, 2929, 2929,
+	2930, 2931, 2932, 2932, 2933, 2934, 2934, 2935, 2936, 2937, 2937, 2938, 2939, 2939, 2940, 2941,
+	2941, 2942, 2943, 2944, 2944, 2945, 2946, 2946, 2947, 2948, 2949, 2949, 2950, 2951, 2951, 2952,
+	2953, 2953, 2954, 2955, 2956, 2956, 2957, 2958, 2958, 2959, 2960, 2961, 2961, 2962, 2963, 2963,
+	2964, 2965, 2965, 2966, 2967, 2968, 2968, 2969, 2970, 2970, 2971, 2972, 2972, 2973, 2974, 2975,
+	2975, 2976, 2977, 2977, 2978, 2979, 2979, 2980, 2981, 2982, 2982, 2983, 2984, 2984, 2985, 2986,
+	2986, 2987, 2988, 2988, 2989, 2990, 2991, 2991, 2992, 2993, 2993, 2994, 2995, 2995, 2996, 2997,
+	2998, 2998, 2999, 3000, 3000, 3001, 3002, 3002, 3003, 3004, 3004, 3005, 3006, 3006, 3007, 3008,
+	3009, 3009, 3010, 3011, 3011, 3012, 3013, 3013, 3014, 3015, 3015, 3016, 3017, 3018, 3018, 3019,
+	3020, 3020, 3021, 3022, 3022, 3023, 3024, 3024, 3025, 3026, 3026, 3027, 3028, 3029, 3029, 3030,
+	3031, 3031, 3032, 3033, 3033, 3034, 3035, 3035, 3036, 3037, 3037, 3038, 3039, 3039, 3040, 3041,
+	3042, 3042, 3043, 3044, 3044, 3045, 3046, 3046, 3047, 3048, 3048, 3049, 3050, 3050, 3051, 3052,
+	3052, 3053, 3054, 3054, 3055, 3056, 3056, 3057, 3058, 3059, 3059, 3060, 3061, 3061, 3062, 3063,
+	3063, 3064, 3065, 3065, 3066, 3067, 3067, 3068, 3069, 3069, 3070, 3071, 3071, 3072, 3073, 3073,
+	3074, 3075, 3075, 3076, 3077, 3077, 3078, 3079, 3079, 3080, 3081, 3081, 3082, 3083, 3084, 3084,
+	3085, 3086, 3086, 3087, 3088, 3088, 3089, 3090, 3090, 3091, 3092, 3092, 3093, 3094, 3094, 3095,
+	3096, 3096, 3097, 3098, 3098, 3099, 3100, 3100, 3101, 3102, 3102, 3103, 3104, 3104, 3105, 3106,
+	3106, 3107, 3108, 3108, 3109, 3110, 3110, 3111, 3112, 3112, 3113, 3114, 3114, 3115, 3116, 3116,
+	3117, 3118, 3118, 3119, 3120, 3120, 3121, 3122, 3122, 3123, 3124, 3124, 3125, 3126, 3126, 3127,
+	3128, 3128, 3129, 3130, 3130, 3131, 3132, 3132, 3133, 3134, 3134, 3135, 3135, 3136, 3137, 3137,
+	3138, 3139, 3139, 3140, 3141, 3141, 3142, 3143, 3143, 3144, 3145, 3145, 3146, 3147, 3147, 3148,
+	3149, 3149, 3150, 3151, 3151, 3152, 3153, 3153, 3154, 3155, 3155, 3156, 3157, 3157, 3158, 3159,
+	3159, 3160, 3160, 3161, 3162, 3162, 3163, 3164, 3164, 3165, 3166, 3166, 3167, 3168, 3168, 3169,
+	3170, 3170, 3171, 3172, 3172, 3173, 3174, 3174, 3175, 3175, 3176, 3177, 3177, 3178, 3179, 3179,
+	3180, 3181, 3181, 3182, 3183, 3183, 3184, 3185, 3185, 3186, 3187, 3187, 3188, 3188, 3189, 3190,
+	3190, 3191, 3192, 3192, 3193, 3194, 3194, 3195, 3196, 3196, 3197, 3198, 3198, 3199, 3199, 3200,
+	3201, 3201, 3202, 3203, 3203, 3204, 3205, 3205, 3206, 3207, 3207, 3208, 3209, 3209, 3210, 3210,
+	3211, 3212, 3212, 3213, 3214, 3214, 3215, 3216, 3216, 3217, 3218, 3218, 3219, 3219, 3220, 3221,
+	3221, 3222, 3223, 3223, 3224, 3225, 3225, 3226, 3227, 3227, 3228, 3228, 3229, 3230, 3230, 3231,
+	3232, 3232, 3233, 3234, 3234, 3235, 3235, 3236, 3237, 3237, 3238, 3239, 3239, 3240, 3241, 3241,
+	3242, 3242, 3243, 3244, 3244, 3245, 3246, 3246, 3247, 3248, 3248, 3249, 3249, 3250, 3251, 3251,
+	3252, 3253, 3253, 3254, 3255, 3255, 3256, 3256, 3257, 3258, 3258, 3259, 3260, 3260, 3261, 3262,
+	3262, 3263, 3263, 3264, 3265, 3265, 3266, 3267, 3267, 3268, 3268, 3269, 3270, 3270, 3271, 3272,
+	3272, 3273, 3274, 3274, 3275, 3275, 3276, 3277, 3277, 3278, 3279, 3279, 3280, 3280, 3281, 3282,
+	3282, 3283, 3284, 3284, 3285, 3285, 3286, 3287, 3287, 3288, 3289, 3289, 3290, 3290, 3291, 3292,
+	3292, 3293, 3294, 3294, 3295, 3295, 3296, 3297, 3297, 3298, 3299, 3299, 3300, 3300, 3301, 3302,
+	3302, 3303, 3304, 3304, 3305, 3305, 3306, 3307, 3307, 3308, 3309, 3309, 3310, 3310, 3311, 3312,
+	3312, 3313, 3314, 3314, 3315, 3315, 3316, 3317, 3317, 3318, 3319, 3319, 3320, 3320, 3321, 3322,
+	3322, 3323, 3323, 3324, 3325, 3325, 3326, 3327, 3327, 3328, 3328, 3329, 3330, 3330, 3331, 3332,
+	3332, 3333, 3333, 3334, 3335, 3335, 3336, 3336, 3337, 3338, 3338, 3339, 3340, 3340, 3341, 3341,
+	3342, 3343, 3343, 3344, 3345, 3345, 3346, 3346, 3347, 3348, 3348, 3349, 3349, 3350, 3351, 3351,
+	3352, 3352, 3353, 3354, 3354, 3355, 3356, 3356, 3357, 3357, 3358, 3359, 3359, 3360, 3360, 3361,
+	3362, 3362, 3363, 3364, 3364, 3365, 3365, 3366, 3367, 3367, 3368, 3368, 3369, 3370, 3370, 3371,
+	3371, 3372, 3373, 3373, 3374, 3375, 3375, 3376, 3376, 3377, 3378, 3378, 3379, 3379, 3380, 3381,
+	3381, 3382, 3382, 3383, 3384, 3384, 3385, 3385, 3386, 3387, 3387, 3388, 3389, 3389, 3390, 3390,
+	3391, 3392, 3392, 3393, 3393, 3394, 3395, 3395, 3396, 3396, 3397, 3398, 3398, 3399, 3399, 3400,
+	3401, 3401, 3402, 3402, 3403, 3404, 3404, 3405, 3405, 3406, 3407, 3407, 3408, 3408, 3409, 3410,
+	3410, 3411, 3411, 3412, 3413, 3413, 3414, 3414, 3415, 3416, 3416, 3417, 3418, 3418, 3419, 3419,
+	3420, 3421, 3421, 3422, 3422, 3423, 3424, 3424, 3425, 3425, 3426, 3427, 3427, 3428, 3428, 3429,
+	3430, 3430, 3431, 3431, 3432, 3433, 3433, 3434, 3434, 3435, 3435, 3436, 3437, 3437, 3438, 3438,
+	3439, 3440, 3440, 3441, 3441, 3442, 3443, 3443, 3444, 3444, 3445, 3446, 3446, 3447, 3447, 3448,
+	3449, 3449, 3450, 3450, 3451, 3452, 3452, 3453, 3453, 3454, 3455, 3455, 3456, 3456, 3457, 3458,
+	3458, 3459, 3459, 3460, 3461, 3461, 3462, 3462, 3463, 3463, 3464, 3465, 3465, 3466, 3466, 3467,
+	3468, 3468, 3469, 3469, 3470, 3471, 3471, 3472, 3472, 3473, 3474, 3474, 3475, 3475, 3476, 3476,
+	3477, 3478, 3478, 3479, 3479, 3480, 3481, 3481, 3482, 3482, 3483, 3484, 3484, 3485, 3485, 3486,
+	3486, 3487, 3488, 3488, 3489, 3489, 3490, 3491, 3491, 3492, 3492, 3493, 3494, 3494, 3495, 3495,
+	3496, 3496, 3497, 3498, 3498, 3499, 3499, 3500, 3501, 3501, 3502, 3502, 3503, 3504, 3504, 3505,
+	3505, 3506, 3506, 3507, 3508, 3508, 3509, 3509, 3510, 3511, 3511, 3512, 3512, 3513, 3513, 3514,
+	3515, 3515, 3516, 3516, 3517, 3518, 3518, 3519, 3519, 3520, 3520, 3521, 3522, 3522, 3523, 3523,
+	3524, 3525, 3525, 3526, 3526, 3527, 3527, 3528, 3529, 3529, 3530, 3530, 3531, 3531, 3532, 3533,
+	3533, 3534, 3534, 3535, 3536, 3536, 3537, 3537, 3538, 3538, 3539, 3540, 3540, 3541, 3541, 3542,
+	3542, 3543, 3544, 3544, 3545, 3545, 3546, 3547, 3547, 3548, 3548, 3549, 3549, 3550, 3551, 3551,
+	3552, 3552, 3553, 3553, 3554, 3555, 3555, 3556, 3556, 3557, 3557, 3558, 3559, 3559, 3560, 3560,
+	3561, 3561, 3562, 3563, 3563, 3564, 3564, 3565, 3566, 3566, 3567, 3567, 3568, 3568, 3569, 3570,
+	3570, 3571, 3571, 3572, 3572, 3573, 3574, 3574, 3575, 3575, 3576, 3576, 3577, 3578, 3578, 3579,
+	3579, 3580, 3580, 3581, 3582, 3582, 3583, 3583, 3584, 3584, 3585, 3586, 3586, 3587, 3587, 3588,
+	3588, 3589, 3590, 3590, 3591, 3591, 3592, 3592, 3593, 3594, 3594, 3595, 3595, 3596, 3596, 3597,
+	3597, 3598, 3599, 3599, 3600, 3600, 3601, 3601, 3602, 3603, 3603, 3604, 3604, 3605, 3605, 3606,
+	3607, 3607, 3608, 3608, 3609, 3609, 3610, 3611, 3611, 3612, 3612, 3613, 3613, 3614, 3615, 3615,
+	3616, 3616, 3617, 3617, 3618, 3618, 3619, 3620, 3620, 3621, 3621, 3622, 3622, 3623, 3624, 3624,
+	3625, 3625, 3626, 3626, 3627, 3627, 3628, 3629, 3629, 3630, 3630, 3631, 3631, 3632, 3633, 3633,
+	3634, 3634, 3635, 3635, 3636, 3636, 3637, 3638, 3638, 3639, 3639, 3640, 3640, 3641, 3642, 3642,
+	3643, 3643, 3644, 3644, 3645, 3645, 3646, 3647, 3647, 3648, 3648, 3649, 3649, 3650, 3650, 3651,
+	3652, 3652, 3653, 3653, 3654, 3654, 3655, 3656, 3656, 3657, 3657, 3658, 3658, 3659, 3659, 3660,
+	3661, 3661, 3662, 3662, 3663, 3663, 3664, 3664, 3665, 3666, 3666, 3667, 3667, 3668, 3668, 3669,
+	3669, 3670, 3671, 3671, 3672, 3672, 3673, 3673, 3674, 3674, 3675, 3676, 3676, 3677, 3677, 3678,
+	3678, 3679, 3679, 3680, 3681, 3681, 3682, 3682, 3683, 3683, 3684, 3684, 3685, 3686, 3686, 3687,
+	3687, 3688, 3688, 3689, 3689, 3690, 3691, 3691, 3692, 3692, 3693, 3693, 3694, 3694, 3695, 3695,
+	3696, 3697, 3697, 3698, 3698, 3699, 3699, 3700, 3700, 3701, 3702, 3702, 3703, 3703, 3704, 3704,
+	3705, 3705, 3706, 3707, 3707, 3708, 3708, 3709, 3709, 3710, 3710, 3711, 3711, 3712, 3713, 3713,
+	3714, 3714, 3715, 3715, 3716, 3716, 3717, 3717, 3718, 3719, 3719, 3720, 3720, 3721, 3721, 3722,
+	3722, 3723, 3724, 3724, 3725, 3725, 3726, 3726, 3727, 3727, 3728, 3728, 3729, 3730, 3730, 3731,
+	3731, 3732, 3732, 3733, 3733, 3734, 3734, 3735, 3736, 3736, 3737, 3737, 3738, 3738, 3739, 3739,
+	3740, 3740, 3741, 3742, 3742, 3743, 3743, 3744, 3744, 3745, 3745, 3746, 3746, 3747, 3748, 3748,
+	3749, 3749, 3750, 3750, 3751, 3751, 3752, 3752, 3753, 3753, 3754, 3755, 3755, 3756, 3756, 3757,
+	3757, 3758, 3758, 3759, 3759, 3760, 3761, 3761, 3762, 3762, 3763, 3763, 3764, 3764, 3765, 3765,
+	3766, 3766, 3767, 3768, 3768, 3769, 3769, 3770, 3770, 3771, 3771, 3772, 3772, 3773, 3773, 3774,
+	3775, 3775, 3776, 3776, 3777, 3777, 3778, 3778, 3779, 3779, 3780, 3781, 3781, 3782, 3782, 3783,
+	3783, 3784, 3784, 3785, 3785, 3786, 3786, 3787, 3787, 3788, 3789, 3789, 3790, 3790, 3791, 3791,
+	3792, 3792, 3793, 3793, 3794, 3794, 3795, 3796, 3796, 3797, 3797, 3798, 3798, 3799, 3799, 3800,
+	3800, 3801, 3801, 3802, 3802, 3803, 3804, 3804, 3805, 3805, 3806, 3806, 3807, 3807, 3808, 3808,
+	3809, 3809, 3810, 3811, 3811, 3812, 3812, 3813, 3813, 3814, 3814, 3815, 3815, 3816, 3816, 3817,
+	3817, 3818, 3819, 3819, 3820, 3820, 3821, 3821, 3822, 3822, 3823, 3823, 3824, 3824, 3825, 3825,
+	3826, 3826, 3827, 3828, 3828, 3829, 3829, 3830, 3830, 3831, 3831, 3832, 3832, 3833, 3833, 3834,
+	3834, 3835, 3835, 3836, 3837, 3837, 3838, 3838, 3839, 3839, 3840, 3840, 3841, 3841, 3842, 3842,
+	3843, 3843, 3844, 3844, 3845, 3846, 3846, 3847, 3847, 3848, 3848, 3849, 3849, 3850, 3850, 3851,
+	3851, 3852, 3852, 3853, 3853, 3854, 3855, 3855, 3856, 3856, 3857, 3857, 3858, 3858, 3859, 3859,
+	3860, 3860, 3861, 3861, 3862, 3862, 3863, 3863, 3864, 3864, 3865, 3866, 3866, 3867, 3867, 3868,
+	3868, 3869, 3869, 3870, 3870, 3871, 3871, 3872, 3872, 3873, 3873, 3874, 3874, 3875, 3876, 3876,
+	3877, 3877, 3878, 3878, 3879, 3879, 3880, 3880, 3881, 3881, 3882, 3882, 3883, 3883, 3884, 3884,
+	3885, 3885, 3886, 3886, 3887, 3888, 3888, 3889, 3889, 3890, 3890, 3891, 3891, 3892, 3892, 3893,
+	3893, 3894, 3894, 3895, 3895, 3896, 3896, 3897, 3897, 3898, 3898, 3899, 3900, 3900, 3901, 3901,
+	3902, 3902, 3903, 3903, 3904, 3904, 3905, 3905, 3906, 3906, 3907, 3907, 3908, 3908, 3909, 3909,
+	3910, 3910, 3911, 3911, 3912, 3912, 3913, 3914, 3914, 3915, 3915, 3916, 3916, 3917, 3917, 3918,
+	3918, 3919, 3919, 3920, 3920, 3921, 3921, 3922, 3922, 3923, 3923, 3924, 3924, 3925, 3925, 3926,
+	3926, 3927, 3927, 3928, 3929, 3929, 3930, 3930, 3931, 3931, 3932, 3932, 3933, 3933, 3934, 3934,
+	3935, 3935, 3936, 3936, 3937, 3937, 3938, 3938, 3939, 3939, 3940, 3940, 3941, 3941, 3942, 3942,
+	3943, 3943, 3944, 3944, 3945, 3945, 3946, 3947, 3947, 3948, 3948, 3949, 3949, 3950, 3950, 3951,
+	3951, 3952, 3952, 3953, 3953, 3954, 3954, 3955, 3955, 3956, 3956, 3957, 3957, 3958, 3958, 3959,
+	3959, 3960, 3960, 3961, 3961, 3962, 3962, 3963, 3963, 3964, 3964, 3965, 3965, 3966, 3966, 3967,
+	3967, 3968, 3969, 3969, 3970, 3970, 3971, 3971, 3972, 3972, 3973, 3973, 3974, 3974, 3975, 3975,
+	3976, 3976, 3977, 3977, 3978, 3978, 3979, 3979, 3980, 3980, 3981, 3981, 3982, 3982, 3983, 3983,
+	3984, 3984, 3985, 3985, 3986, 3986, 3987, 3987, 3988, 3988, 3989, 3989, 3990, 3990, 3991, 3991,
+	3992, 3992, 3993, 3993, 3994, 3994, 3995, 3995, 3996, 3996, 3997, 3997, 3998, 3998, 3999, 3999,
+	4000, 4001, 4001, 4002, 4002, 4003, 4003, 4004, 4004, 4005, 4005, 4006, 4006, 4007, 4007, 4008,
+	4008, 4009, 4009, 4010, 4010, 4011, 4011, 4012, 4012, 4013, 4013, 4014, 4014, 4015, 4015, 4016,
+	4016, 4017, 4017, 4018, 4018, 4019, 4019, 4020, 4020, 4021, 4021, 4022, 4022, 4023, 4023, 4024,
+	4024, 4025, 4025, 4026, 4026, 4027, 4027, 4028, 4028, 4029, 4029, 4030, 4030, 4031, 4031, 4032,
+	4032, 4033, 4033, 4034, 4034, 4035, 4035, 4036, 4036, 4037, 4037, 4038, 4038, 4039, 4039, 4040,
+	4040, 4041, 4041, 4042, 4042, 4043, 4043, 4044, 4044, 4045, 4045, 4046, 4046, 4047, 4047, 4048,
+	4048, 4049, 4049, 4050, 4050, 4051, 4051, 4052, 4052, 4053, 4053, 4054, 4054, 4055, 4055, 4056,
+	4056, 4057, 4057, 4058, 4058, 4059, 4059, 4060, 4060, 4061, 4061, 4062, 4062, 4063, 4063, 4064,
+	4064, 4065, 4065, 4066, 4066, 4067, 4067, 4068, 4068, 4069, 4069, 4070, 4070, 4071, 4071, 4072,
+	4072, 4073, 4073, 4074, 4074, 4075, 4075, 4076, 4076, 4077, 4077, 4078, 4078, 4079, 4079, 4080,
+	4080,
+};
+
+/* Generated table */
+const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFER_FUNC_SMPTE2084 + 1][TPG_COLOR_CSC_BLACK + 1] = {
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][1] = { 2953, 2963, 586 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][2] = { 0, 2967, 2937 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][3] = { 88, 2990, 575 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][4] = { 3016, 259, 2933 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][5] = { 3030, 405, 558 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][6] = { 478, 428, 2931 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][1] = { 3068, 3077, 838 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][2] = { 0, 3081, 3053 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][3] = { 241, 3102, 828 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][4] = { 3126, 504, 3050 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][5] = { 3138, 657, 810 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][6] = { 731, 680, 3048 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][7] = { 800, 799, 800 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][1] = { 3046, 3054, 886 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][2] = { 0, 3058, 3031 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][3] = { 360, 3079, 877 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][4] = { 3103, 587, 3027 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][5] = { 3116, 723, 861 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][6] = { 789, 744, 3025 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][1] = { 2941, 2950, 546 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][2] = { 0, 2954, 2924 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][3] = { 78, 2978, 536 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][4] = { 3004, 230, 2920 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][5] = { 3018, 363, 518 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][6] = { 437, 387, 2918 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][1] = { 2145, 2159, 142 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][2] = { 0, 2164, 2122 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][3] = { 19, 2198, 138 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][4] = { 2236, 57, 2116 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][5] = { 2256, 90, 133 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][6] = { 110, 96, 2113 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][1] = { 3186, 3194, 1121 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][2] = { 0, 3197, 3173 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][3] = { 523, 3216, 1112 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][4] = { 3237, 792, 3169 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][5] = { 3248, 944, 1094 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][6] = { 1017, 967, 3168 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][1] = { 3802, 3805, 2602 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][2] = { 0, 3806, 3797 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][3] = { 1780, 3812, 2592 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][4] = { 3820, 2215, 3796 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][5] = { 3824, 2409, 2574 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][6] = { 2491, 2435, 3795 },
+	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][1] = { 2953, 2963, 586 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][2] = { 0, 2967, 2937 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][3] = { 88, 2990, 575 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][4] = { 3016, 259, 2933 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][5] = { 3030, 405, 558 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][6] = { 478, 428, 2931 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][1] = { 3068, 3077, 838 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][2] = { 0, 3081, 3053 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][3] = { 241, 3102, 828 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][4] = { 3126, 504, 3050 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][5] = { 3138, 657, 810 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][6] = { 731, 680, 3048 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][7] = { 800, 799, 800 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][1] = { 3046, 3054, 886 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][2] = { 0, 3058, 3031 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][3] = { 360, 3079, 877 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][4] = { 3103, 587, 3027 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][5] = { 3116, 723, 861 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][6] = { 789, 744, 3025 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][1] = { 2941, 2950, 546 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][2] = { 0, 2954, 2924 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][3] = { 78, 2978, 536 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][4] = { 3004, 230, 2920 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][5] = { 3018, 363, 518 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][6] = { 437, 387, 2918 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][1] = { 2145, 2159, 142 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][2] = { 0, 2164, 2122 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][3] = { 19, 2198, 138 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][4] = { 2236, 57, 2116 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][5] = { 2256, 90, 133 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][6] = { 110, 96, 2113 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][1] = { 3186, 3194, 1121 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][2] = { 0, 3197, 3173 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][3] = { 523, 3216, 1112 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][4] = { 3237, 792, 3169 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][5] = { 3248, 944, 1094 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][6] = { 1017, 967, 3168 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][1] = { 3802, 3805, 2602 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][2] = { 0, 3806, 3797 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][3] = { 1780, 3812, 2592 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][4] = { 3820, 2215, 3796 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][5] = { 3824, 2409, 2574 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][6] = { 2491, 2435, 3795 },
+	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 547 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][2] = { 547, 2939, 2939 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][3] = { 547, 2939, 547 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][4] = { 2939, 547, 2939 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][5] = { 2939, 547, 547 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][6] = { 547, 547, 2939 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][1] = { 3056, 3056, 800 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][2] = { 800, 3056, 3056 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][3] = { 800, 3056, 800 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][4] = { 3056, 800, 3056 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][5] = { 3056, 800, 800 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3056 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][1] = { 3033, 3033, 851 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][2] = { 851, 3033, 3033 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][3] = { 851, 3033, 851 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][4] = { 3033, 851, 3033 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][5] = { 3033, 851, 851 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][6] = { 851, 851, 3033 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 507 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][2] = { 507, 2926, 2926 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][3] = { 507, 2926, 507 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][4] = { 2926, 507, 2926 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][5] = { 2926, 507, 507 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][6] = { 507, 507, 2926 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][1] = { 2125, 2125, 130 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][2] = { 130, 2125, 2125 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][3] = { 130, 2125, 130 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][4] = { 2125, 130, 2125 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][5] = { 2125, 130, 130 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][6] = { 130, 130, 2125 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][1] = { 3175, 3175, 1084 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][2] = { 1084, 3175, 3175 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][3] = { 1084, 3175, 1084 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][4] = { 3175, 1084, 3175 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][5] = { 3175, 1084, 1084 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3175 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][1] = { 3798, 3798, 2563 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][2] = { 2563, 3798, 3798 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][3] = { 2563, 3798, 2563 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][4] = { 3798, 2563, 3798 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][5] = { 3798, 2563, 2563 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][6] = { 2563, 2563, 3798 },
+	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][1] = { 2892, 3034, 910 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][2] = { 1715, 2916, 2914 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][3] = { 1631, 3012, 828 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][4] = { 2497, 119, 2867 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][5] = { 2440, 649, 657 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][6] = { 740, 0, 2841 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3055, 3056 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][1] = { 3013, 3142, 1157 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][2] = { 1926, 3034, 3032 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][3] = { 1847, 3121, 1076 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][4] = { 2651, 304, 2990 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][5] = { 2599, 901, 909 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][6] = { 991, 0, 2966 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][7] = { 800, 799, 800 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][1] = { 2989, 3120, 1180 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][2] = { 1913, 3011, 3009 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][3] = { 1836, 3099, 1105 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][4] = { 2627, 413, 2966 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][5] = { 2576, 943, 951 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][6] = { 1026, 0, 2942 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][1] = { 2879, 3022, 874 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][2] = { 1688, 2903, 2901 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][3] = { 1603, 2999, 791 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][4] = { 2479, 106, 2853 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][5] = { 2422, 610, 618 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][6] = { 702, 0, 2827 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][1] = { 2059, 2262, 266 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][2] = { 771, 2092, 2089 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][3] = { 705, 2229, 231 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][4] = { 1550, 26, 2024 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][5] = { 1484, 163, 165 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][6] = { 196, 0, 1988 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][1] = { 3136, 3251, 1429 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][2] = { 2150, 3156, 3154 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][3] = { 2077, 3233, 1352 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][4] = { 2812, 589, 3116 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][5] = { 2765, 1182, 1190 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][6] = { 1270, 0, 3094 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][1] = { 3784, 3825, 2879 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][2] = { 3351, 3791, 3790 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][3] = { 3311, 3819, 2815 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][4] = { 3659, 1900, 3777 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][5] = { 3640, 2662, 2669 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][6] = { 2743, 0, 3769 },
+	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 464 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][2] = { 786, 2939, 2939 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][3] = { 786, 2939, 464 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][4] = { 2879, 547, 2956 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][5] = { 2879, 547, 547 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][6] = { 547, 547, 2956 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][1] = { 3056, 3056, 717 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][2] = { 1036, 3056, 3056 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][3] = { 1036, 3056, 717 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][4] = { 3001, 800, 3071 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][5] = { 3001, 800, 799 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3071 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 799 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][1] = { 3033, 3033, 776 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][2] = { 1068, 3033, 3033 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][3] = { 1068, 3033, 776 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][4] = { 2977, 851, 3048 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][5] = { 2977, 851, 851 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][6] = { 851, 851, 3048 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 423 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][2] = { 749, 2926, 2926 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][3] = { 749, 2926, 423 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][4] = { 2865, 507, 2943 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][5] = { 2865, 507, 507 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][6] = { 507, 507, 2943 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][1] = { 2125, 2125, 106 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][2] = { 214, 2125, 2125 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][3] = { 214, 2125, 106 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][4] = { 2041, 130, 2149 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][5] = { 2041, 130, 130 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][6] = { 130, 130, 2149 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][1] = { 3175, 3175, 1003 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][2] = { 1313, 3175, 3175 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][3] = { 1313, 3175, 1003 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][4] = { 3126, 1084, 3188 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][5] = { 3126, 1084, 1084 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3188 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][1] = { 3798, 3798, 2476 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][2] = { 2782, 3798, 3798 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][3] = { 2782, 3798, 2476 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][4] = { 3780, 2563, 3803 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][5] = { 3780, 2563, 2563 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][6] = { 2563, 2563, 3803 },
+	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 547 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][2] = { 547, 2939, 2939 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][3] = { 547, 2939, 547 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][4] = { 2939, 547, 2939 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][5] = { 2939, 547, 547 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][6] = { 547, 547, 2939 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][1] = { 3056, 3056, 800 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][2] = { 800, 3056, 3056 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][3] = { 800, 3056, 800 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][4] = { 3056, 800, 3056 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][5] = { 3056, 800, 800 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3056 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][1] = { 3033, 3033, 851 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][2] = { 851, 3033, 3033 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][3] = { 851, 3033, 851 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][4] = { 3033, 851, 3033 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][5] = { 3033, 851, 851 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][6] = { 851, 851, 3033 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 507 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][2] = { 507, 2926, 2926 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][3] = { 507, 2926, 507 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][4] = { 2926, 507, 2926 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][5] = { 2926, 507, 507 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][6] = { 507, 507, 2926 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][1] = { 2125, 2125, 130 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][2] = { 130, 2125, 2125 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][3] = { 130, 2125, 130 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][4] = { 2125, 130, 2125 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][5] = { 2125, 130, 130 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][6] = { 130, 130, 2125 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][1] = { 3175, 3175, 1084 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][2] = { 1084, 3175, 3175 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][3] = { 1084, 3175, 1084 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][4] = { 3175, 1084, 3175 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][5] = { 3175, 1084, 1084 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3175 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][1] = { 3798, 3798, 2563 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][2] = { 2563, 3798, 3798 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][3] = { 2563, 3798, 2563 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][4] = { 3798, 2563, 3798 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][5] = { 3798, 2563, 2563 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][6] = { 2563, 2563, 3798 },
+	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 781 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][2] = { 1622, 2939, 2939 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][3] = { 1622, 2939, 781 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][4] = { 2502, 547, 2881 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][5] = { 2502, 547, 547 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][6] = { 547, 547, 2881 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][1] = { 3056, 3056, 1031 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][2] = { 1838, 3056, 3056 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][3] = { 1838, 3056, 1031 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][4] = { 2657, 800, 3002 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][5] = { 2657, 800, 800 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3002 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][1] = { 3033, 3033, 1063 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][2] = { 1828, 3033, 3033 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][3] = { 1828, 3033, 1063 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][4] = { 2633, 851, 2979 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][5] = { 2633, 851, 851 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][6] = { 851, 851, 2979 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 744 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][2] = { 1594, 2926, 2926 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][3] = { 1594, 2926, 744 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][4] = { 2484, 507, 2867 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][5] = { 2484, 507, 507 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][6] = { 507, 507, 2867 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][1] = { 2125, 2125, 212 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][2] = { 698, 2125, 2125 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][3] = { 698, 2125, 212 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][4] = { 1557, 130, 2043 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][5] = { 1557, 130, 130 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][6] = { 130, 130, 2043 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][1] = { 3175, 3175, 1308 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][2] = { 2069, 3175, 3175 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][3] = { 2069, 3175, 1308 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][4] = { 2816, 1084, 3127 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][5] = { 2816, 1084, 1084 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3127 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][1] = { 3798, 3798, 2778 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][2] = { 3306, 3798, 3798 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][3] = { 3306, 3798, 2778 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][4] = { 3661, 2563, 3781 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][5] = { 3661, 2563, 2563 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][6] = { 2563, 2563, 3781 },
+	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][1] = { 2877, 2923, 1058 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][2] = { 1837, 2840, 2916 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][3] = { 1734, 2823, 993 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][4] = { 2427, 961, 2812 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][5] = { 2351, 912, 648 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][6] = { 792, 618, 2788 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][1] = { 2999, 3041, 1301 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][2] = { 2040, 2965, 3034 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][3] = { 1944, 2950, 1238 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][4] = { 2587, 1207, 2940 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][5] = { 2517, 1159, 900 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][6] = { 1042, 870, 2917 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][1] = { 2976, 3018, 1315 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][2] = { 2024, 2942, 3011 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][3] = { 1930, 2926, 1256 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][4] = { 2563, 1227, 2916 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][5] = { 2494, 1183, 943 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][6] = { 1073, 916, 2894 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][1] = { 2864, 2910, 1024 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][2] = { 1811, 2826, 2903 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][3] = { 1707, 2809, 958 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][4] = { 2408, 926, 2798 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][5] = { 2331, 876, 609 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][6] = { 755, 579, 2773 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][1] = { 2039, 2102, 338 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][2] = { 873, 1987, 2092 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][3] = { 787, 1965, 305 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][4] = { 1468, 290, 1949 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][5] = { 1382, 268, 162 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][6] = { 216, 152, 1917 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][1] = { 3124, 3161, 1566 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][2] = { 2255, 3094, 3156 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][3] = { 2166, 3080, 1506 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][4] = { 2754, 1477, 3071 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][5] = { 2690, 1431, 1182 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][6] = { 1318, 1153, 3051 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][1] = { 3780, 3793, 2984 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][2] = { 3406, 3768, 3791 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][3] = { 3359, 3763, 2939 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][4] = { 3636, 2916, 3760 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][5] = { 3609, 2880, 2661 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][6] = { 2786, 2633, 3753 },
+	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][1] = { 2936, 2934, 992 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][2] = { 1159, 2890, 2916 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][3] = { 1150, 2885, 921 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][4] = { 2751, 766, 2837 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][5] = { 2747, 747, 650 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][6] = { 563, 570, 2812 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3055 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][1] = { 3052, 3051, 1237 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][2] = { 1397, 3011, 3034 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][3] = { 1389, 3006, 1168 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][4] = { 2884, 1016, 2962 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][5] = { 2880, 998, 902 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][6] = { 816, 823, 2940 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 799 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][1] = { 3029, 3028, 1255 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][2] = { 1406, 2988, 3011 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][3] = { 1398, 2983, 1190 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][4] = { 2860, 1050, 2939 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][5] = { 2857, 1033, 945 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][6] = { 866, 873, 2916 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][1] = { 2923, 2921, 957 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][2] = { 1125, 2877, 2902 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][3] = { 1116, 2871, 885 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][4] = { 2736, 729, 2823 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][5] = { 2732, 710, 611 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][6] = { 523, 531, 2798 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][1] = { 2120, 2118, 305 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][2] = { 392, 2056, 2092 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][3] = { 387, 2049, 271 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][4] = { 1868, 206, 1983 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][5] = { 1863, 199, 163 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][6] = { 135, 137, 1950 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][1] = { 3172, 3170, 1505 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][2] = { 1657, 3135, 3155 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][3] = { 1649, 3130, 1439 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][4] = { 3021, 1294, 3091 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][5] = { 3018, 1276, 1184 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][6] = { 1100, 1107, 3071 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][1] = { 3797, 3796, 2938 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][2] = { 3049, 3783, 3791 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][3] = { 3044, 3782, 2887 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][4] = { 3741, 2765, 3768 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][5] = { 3740, 2749, 2663 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][6] = { 2580, 2587, 3760 },
+	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
+};
+
+#else
+
+/* This code generates the table above */
+
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+static const double rec709_to_ntsc1953[3][3] = {
+	/*
+	 * This transform uses the Bradford method to compensate for
+	 * the different whitepoints.
+	 */
+	{ 0.6785011, 0.2883441, 0.0331548 },
+	{ 0.0165284, 1.0518725, -0.0684009 },
+	{ 0.0179230, 0.0506096, 0.9314674 }
+};
+
+static const double rec709_to_ebu[3][3] = {
+	{ 0.9578221, 0.0421779, -0.0000000 },
+	{ -0.0000000, 1.0000000, 0.0000000 },
+	{ -0.0000000, -0.0119367, 1.0119367 }
+};
+
+static const double rec709_to_170m[3][3] = {
+	{ 1.0653640, -0.0553900, -0.0099740 },
+	{ -0.0196361, 1.0363630, -0.0167269 },
+	{ 0.0016327, 0.0044133, 0.9939540 },
+};
+
+static const double rec709_to_240m[3][3] = {
+	{ 1.0653640, -0.0553900, -0.0099740 },
+	{ -0.0196361, 1.0363630, -0.0167269 },
+	{ 0.0016327, 0.0044133, 0.9939540 },
+};
+
+static const double rec709_to_adobergb[3][3] = {
+	{ 0.7151627, 0.2848373, -0.0000000 },
+	{ 0.0000000, 1.0000000, 0.0000000 },
+	{ -0.0000000, 0.0411705, 0.9588295 },
+};
+
+static const double rec709_to_bt2020[3][3] = {
+	{ 0.6274524, 0.3292485, 0.0432991 },
+	{ 0.0691092, 0.9195311, 0.0113597 },
+	{ 0.0163976, 0.0880301, 0.8955723 },
+};
+
+static const double rec709_to_dcip3[3][3] = {
+	/*
+	 * This transform uses the Bradford method to compensate for
+	 * the different whitepoints.
+	 */
+	{ 0.8686648, 0.1288456, 0.0024896 },
+	{ 0.0345479, 0.9618084, 0.0036437 },
+	{ 0.0167785, 0.0710559, 0.9121655 }
+};
+
+static void mult_matrix(double *r, double *g, double *b, const double m[3][3])
+{
+	double ir, ig, ib;
+
+	ir = m[0][0] * (*r) + m[0][1] * (*g) + m[0][2] * (*b);
+	ig = m[1][0] * (*r) + m[1][1] * (*g) + m[1][2] * (*b);
+	ib = m[2][0] * (*r) + m[2][1] * (*g) + m[2][2] * (*b);
+	*r = ir;
+	*g = ig;
+	*b = ib;
+}
+
+static double transfer_srgb_to_rgb(double v)
+{
+	if (v < -0.04045)
+		return pow((-v + 0.055) / 1.055, 2.4);
+	return (v <= 0.04045) ? v / 12.92 : pow((v + 0.055) / 1.055, 2.4);
+}
+
+static double transfer_rgb_to_srgb(double v)
+{
+	if (v <= -0.0031308)
+		return -1.055 * pow(-v, 1.0 / 2.4) + 0.055;
+	if (v <= 0.0031308)
+		return v * 12.92;
+	return 1.055 * pow(v, 1.0 / 2.4) - 0.055;
+}
+
+static double transfer_rgb_to_smpte240m(double v)
+{
+	return (v <= 0.0228) ? v * 4.0 : 1.1115 * pow(v, 0.45) - 0.1115;
+}
+
+static double transfer_rgb_to_rec709(double v)
+{
+	if (v <= -0.018)
+		return -1.099 * pow(-v, 0.45) + 0.099;
+	return (v < 0.018) ? v * 4.5 : 1.099 * pow(v, 0.45) - 0.099;
+}
+
+static double transfer_rec709_to_rgb(double v)
+{
+	return (v < 0.081) ? v / 4.5 : pow((v + 0.099) / 1.099, 1.0 / 0.45);
+}
+
+static double transfer_rgb_to_adobergb(double v)
+{
+	return pow(v, 1.0 / 2.19921875);
+}
+
+static double transfer_rgb_to_dcip3(double v)
+{
+	return pow(v, 1.0 / 2.6);
+}
+
+static double transfer_rgb_to_smpte2084(double v)
+{
+	const double m1 = (2610.0 / 4096.0) / 4.0;
+	const double m2 = 128.0 * 2523.0 / 4096.0;
+	const double c1 = 3424.0 / 4096.0;
+	const double c2 = 32.0 * 2413.0 / 4096.0;
+	const double c3 = 32.0 * 2392.0 / 4096.0;
+
+	v = pow(v, m1);
+	return pow((c1 + c2 * v) / (1 + c3 * v), m2);
+}
+
+static double transfer_srgb_to_rec709(double v)
+{
+	return transfer_rgb_to_rec709(transfer_srgb_to_rgb(v));
+}
+
+static void csc(enum v4l2_colorspace colorspace, enum v4l2_xfer_func xfer_func,
+		double *r, double *g, double *b)
+{
+	int clamp = 1;
+
+	*r = transfer_srgb_to_rgb(*r);
+	*g = transfer_srgb_to_rgb(*g);
+	*b = transfer_srgb_to_rgb(*b);
+
+	/* Convert the primaries of Rec. 709 Linear RGB */
+	switch (colorspace) {
+	case V4L2_COLORSPACE_SMPTE240M:
+		mult_matrix(r, g, b, rec709_to_240m);
+		break;
+	case V4L2_COLORSPACE_SMPTE170M:
+		mult_matrix(r, g, b, rec709_to_170m);
+		break;
+	case V4L2_COLORSPACE_470_SYSTEM_BG:
+		mult_matrix(r, g, b, rec709_to_ebu);
+		break;
+	case V4L2_COLORSPACE_470_SYSTEM_M:
+		mult_matrix(r, g, b, rec709_to_ntsc1953);
+		break;
+	case V4L2_COLORSPACE_ADOBERGB:
+		mult_matrix(r, g, b, rec709_to_adobergb);
+		break;
+	case V4L2_COLORSPACE_BT2020:
+		mult_matrix(r, g, b, rec709_to_bt2020);
+		break;
+	case V4L2_COLORSPACE_DCI_P3:
+		mult_matrix(r, g, b, rec709_to_dcip3);
+		break;
+	case V4L2_COLORSPACE_SRGB:
+	case V4L2_COLORSPACE_REC709:
+		break;
+	default:
+		break;
+	}
+
+	if (clamp) {
+		*r = ((*r) < 0) ? 0 : (((*r) > 1) ? 1 : (*r));
+		*g = ((*g) < 0) ? 0 : (((*g) > 1) ? 1 : (*g));
+		*b = ((*b) < 0) ? 0 : (((*b) > 1) ? 1 : (*b));
+	}
+
+	switch (xfer_func) {
+	case V4L2_XFER_FUNC_709:
+		*r = transfer_rgb_to_rec709(*r);
+		*g = transfer_rgb_to_rec709(*g);
+		*b = transfer_rgb_to_rec709(*b);
+		break;
+	case V4L2_XFER_FUNC_SRGB:
+		*r = transfer_rgb_to_srgb(*r);
+		*g = transfer_rgb_to_srgb(*g);
+		*b = transfer_rgb_to_srgb(*b);
+		break;
+	case V4L2_XFER_FUNC_ADOBERGB:
+		*r = transfer_rgb_to_adobergb(*r);
+		*g = transfer_rgb_to_adobergb(*g);
+		*b = transfer_rgb_to_adobergb(*b);
+		break;
+	case V4L2_XFER_FUNC_DCI_P3:
+		*r = transfer_rgb_to_dcip3(*r);
+		*g = transfer_rgb_to_dcip3(*g);
+		*b = transfer_rgb_to_dcip3(*b);
+		break;
+	case V4L2_XFER_FUNC_SMPTE2084:
+		*r = transfer_rgb_to_smpte2084(*r);
+		*g = transfer_rgb_to_smpte2084(*g);
+		*b = transfer_rgb_to_smpte2084(*b);
+		break;
+	case V4L2_XFER_FUNC_SMPTE240M:
+		*r = transfer_rgb_to_smpte240m(*r);
+		*g = transfer_rgb_to_smpte240m(*g);
+		*b = transfer_rgb_to_smpte240m(*b);
+		break;
+	case V4L2_XFER_FUNC_NONE:
+		break;
+	}
+}
+
+int main(int argc, char **argv)
+{
+	static const unsigned colorspaces[] = {
+		0,
+		V4L2_COLORSPACE_SMPTE170M,
+		V4L2_COLORSPACE_SMPTE240M,
+		V4L2_COLORSPACE_REC709,
+		0,
+		V4L2_COLORSPACE_470_SYSTEM_M,
+		V4L2_COLORSPACE_470_SYSTEM_BG,
+		0,
+		V4L2_COLORSPACE_SRGB,
+		V4L2_COLORSPACE_ADOBERGB,
+		V4L2_COLORSPACE_BT2020,
+		0,
+		V4L2_COLORSPACE_DCI_P3,
+	};
+	static const char * const colorspace_names[] = {
+		"",
+		"V4L2_COLORSPACE_SMPTE170M",
+		"V4L2_COLORSPACE_SMPTE240M",
+		"V4L2_COLORSPACE_REC709",
+		"",
+		"V4L2_COLORSPACE_470_SYSTEM_M",
+		"V4L2_COLORSPACE_470_SYSTEM_BG",
+		"",
+		"V4L2_COLORSPACE_SRGB",
+		"V4L2_COLORSPACE_ADOBERGB",
+		"V4L2_COLORSPACE_BT2020",
+		"",
+		"V4L2_COLORSPACE_DCI_P3",
+	};
+	static const char * const xfer_func_names[] = {
+		"",
+		"V4L2_XFER_FUNC_709",
+		"V4L2_XFER_FUNC_SRGB",
+		"V4L2_XFER_FUNC_ADOBERGB",
+		"V4L2_XFER_FUNC_SMPTE240M",
+		"V4L2_XFER_FUNC_NONE",
+		"V4L2_XFER_FUNC_DCI_P3",
+		"V4L2_XFER_FUNC_SMPTE2084",
+	};
+	int i;
+	int x;
+	int c;
+
+	printf("/* Generated table */\n");
+	printf("const unsigned short tpg_rec709_to_linear[255 * 16 + 1] = {");
+	for (i = 0; i <= 255 * 16; i++) {
+		if (i % 16 == 0)
+			printf("\n\t");
+		printf("%4d,%s",
+			(int)(0.5 + 16.0 * 255.0 *
+				transfer_rec709_to_rgb(i / (16.0 * 255.0))),
+			i % 16 == 15 || i == 255 * 16 ? "" : " ");
+	}
+	printf("\n};\n\n");
+
+	printf("/* Generated table */\n");
+	printf("const unsigned short tpg_linear_to_rec709[255 * 16 + 1] = {");
+	for (i = 0; i <= 255 * 16; i++) {
+		if (i % 16 == 0)
+			printf("\n\t");
+		printf("%4d,%s",
+			(int)(0.5 + 16.0 * 255.0 *
+				transfer_rgb_to_rec709(i / (16.0 * 255.0))),
+			i % 16 == 15 || i == 255 * 16 ? "" : " ");
+	}
+	printf("\n};\n\n");
+
+	printf("/* Generated table */\n");
+	printf("const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFER_FUNC_SMPTE2084 + 1][TPG_COLOR_CSC_BLACK + 1] = {\n");
+	for (c = 0; c <= V4L2_COLORSPACE_DCI_P3; c++) {
+		for (x = 1; x <= V4L2_XFER_FUNC_SMPTE2084; x++) {
+			for (i = 0; i <= TPG_COLOR_CSC_BLACK; i++) {
+				double r, g, b;
+
+				if (colorspaces[c] == 0)
+					continue;
+
+				r = tpg_colors[i].r / 255.0;
+				g = tpg_colors[i].g / 255.0;
+				b = tpg_colors[i].b / 255.0;
+
+				csc(c, x, &r, &g, &b);
+
+				printf("\t[%s][%s][%d] = { %d, %d, %d },\n",
+					colorspace_names[c],
+					xfer_func_names[x], i,
+					(int)(r * 4080), (int)(g * 4080), (int)(b * 4080));
+			}
+		}
+	}
+	printf("};\n\n");
+	return 0;
+}
+
+#endif
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
new file mode 100644
index 0000000..cf1dadd
--- /dev/null
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -0,0 +1,2335 @@
+/*
+ * v4l2-tpg-core.c - Test Pattern Generator
+ *
+ * Note: gen_twopix and tpg_gen_text are based on code from vivi.c. See the
+ * vivi.c source for the copyright information of those functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <media/v4l2-tpg.h>
+
+/* Must remain in sync with enum tpg_pattern */
+const char * const tpg_pattern_strings[] = {
+	"75% Colorbar",
+	"100% Colorbar",
+	"CSC Colorbar",
+	"Horizontal 100% Colorbar",
+	"100% Color Squares",
+	"100% Black",
+	"100% White",
+	"100% Red",
+	"100% Green",
+	"100% Blue",
+	"16x16 Checkers",
+	"2x2 Checkers",
+	"1x1 Checkers",
+	"2x2 Red/Green Checkers",
+	"1x1 Red/Green Checkers",
+	"Alternating Hor Lines",
+	"Alternating Vert Lines",
+	"One Pixel Wide Cross",
+	"Two Pixels Wide Cross",
+	"Ten Pixels Wide Cross",
+	"Gray Ramp",
+	"Noise",
+	NULL
+};
+EXPORT_SYMBOL_GPL(tpg_pattern_strings);
+
+/* Must remain in sync with enum tpg_aspect */
+const char * const tpg_aspect_strings[] = {
+	"Source Width x Height",
+	"4x3",
+	"14x9",
+	"16x9",
+	"16x9 Anamorphic",
+	NULL
+};
+EXPORT_SYMBOL_GPL(tpg_aspect_strings);
+
+/*
+ * Sine table: sin[0] = 127 * sin(-180 degrees)
+ *             sin[128] = 127 * sin(0 degrees)
+ *             sin[256] = 127 * sin(180 degrees)
+ */
+static const s8 sin[257] = {
+	   0,   -4,   -7,  -11,  -13,  -18,  -20,  -22,  -26,  -29,  -33,  -35,  -37,  -41,  -43,  -48,
+	 -50,  -52,  -56,  -58,  -62,  -63,  -65,  -69,  -71,  -75,  -76,  -78,  -82,  -83,  -87,  -88,
+	 -90,  -93,  -94,  -97,  -99, -101, -103, -104, -107, -108, -110, -111, -112, -114, -115, -117,
+	-118, -119, -120, -121, -122, -123, -123, -124, -125, -125, -126, -126, -127, -127, -127, -127,
+	-127, -127, -127, -127, -126, -126, -125, -125, -124, -124, -123, -122, -121, -120, -119, -118,
+	-117, -116, -114, -113, -111, -110, -109, -107, -105, -103, -101, -100,  -97,  -96,  -93,  -91,
+	 -90,  -87,  -85,  -82,  -80,  -76,  -75,  -73,  -69,  -67,  -63,  -62,  -60,  -56,  -54,  -50,
+	 -48,  -46,  -41,  -39,  -35,  -33,  -31,  -26,  -24,  -20,  -18,  -15,  -11,   -9,   -4,   -2,
+	   0,    2,    4,    9,   11,   15,   18,   20,   24,   26,   31,   33,   35,   39,   41,   46,
+	  48,   50,   54,   56,   60,   62,   64,   67,   69,   73,   75,   76,   80,   82,   85,   87,
+	  90,   91,   93,   96,   97,  100,  101,  103,  105,  107,  109,  110,  111,  113,  114,  116,
+	 117,  118,  119,  120,  121,  122,  123,  124,  124,  125,  125,  126,  126,  127,  127,  127,
+	 127,  127,  127,  127,  127,  126,  126,  125,  125,  124,  123,  123,  122,  121,  120,  119,
+	 118,  117,  115,  114,  112,  111,  110,  108,  107,  104,  103,  101,   99,   97,   94,   93,
+	  90,   88,   87,   83,   82,   78,   76,   75,   71,   69,   65,   64,   62,   58,   56,   52,
+	  50,   48,   43,   41,   37,   35,   33,   29,   26,   22,   20,   18,   13,   11,    7,    4,
+	   0,
+};
+
+#define cos(idx) sin[((idx) + 64) % sizeof(sin)]
+
+/* Global font descriptor */
+static const u8 *font8x16;
+
+void tpg_set_font(const u8 *f)
+{
+	font8x16 = f;
+}
+EXPORT_SYMBOL_GPL(tpg_set_font);
+
+void tpg_init(struct tpg_data *tpg, unsigned w, unsigned h)
+{
+	memset(tpg, 0, sizeof(*tpg));
+	tpg->scaled_width = tpg->src_width = w;
+	tpg->src_height = tpg->buf_height = h;
+	tpg->crop.width = tpg->compose.width = w;
+	tpg->crop.height = tpg->compose.height = h;
+	tpg->recalc_colors = true;
+	tpg->recalc_square_border = true;
+	tpg->brightness = 128;
+	tpg->contrast = 128;
+	tpg->saturation = 128;
+	tpg->hue = 0;
+	tpg->mv_hor_mode = TPG_MOVE_NONE;
+	tpg->mv_vert_mode = TPG_MOVE_NONE;
+	tpg->field = V4L2_FIELD_NONE;
+	tpg_s_fourcc(tpg, V4L2_PIX_FMT_RGB24);
+	tpg->colorspace = V4L2_COLORSPACE_SRGB;
+	tpg->perc_fill = 100;
+}
+EXPORT_SYMBOL_GPL(tpg_init);
+
+int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
+{
+	unsigned pat;
+	unsigned plane;
+
+	tpg->max_line_width = max_w;
+	for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++) {
+		for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
+			unsigned pixelsz = plane ? 2 : 4;
+
+			tpg->lines[pat][plane] = vzalloc(max_w * 2 * pixelsz);
+			if (!tpg->lines[pat][plane])
+				return -ENOMEM;
+			if (plane == 0)
+				continue;
+			tpg->downsampled_lines[pat][plane] = vzalloc(max_w * 2 * pixelsz);
+			if (!tpg->downsampled_lines[pat][plane])
+				return -ENOMEM;
+		}
+	}
+	for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
+		unsigned pixelsz = plane ? 2 : 4;
+
+		tpg->contrast_line[plane] = vzalloc(max_w * pixelsz);
+		if (!tpg->contrast_line[plane])
+			return -ENOMEM;
+		tpg->black_line[plane] = vzalloc(max_w * pixelsz);
+		if (!tpg->black_line[plane])
+			return -ENOMEM;
+		tpg->random_line[plane] = vzalloc(max_w * 2 * pixelsz);
+		if (!tpg->random_line[plane])
+			return -ENOMEM;
+	}
+	return 0;
+}
+EXPORT_SYMBOL_GPL(tpg_alloc);
+
+void tpg_free(struct tpg_data *tpg)
+{
+	unsigned pat;
+	unsigned plane;
+
+	for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++)
+		for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
+			vfree(tpg->lines[pat][plane]);
+			tpg->lines[pat][plane] = NULL;
+			if (plane == 0)
+				continue;
+			vfree(tpg->downsampled_lines[pat][plane]);
+			tpg->downsampled_lines[pat][plane] = NULL;
+		}
+	for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
+		vfree(tpg->contrast_line[plane]);
+		vfree(tpg->black_line[plane]);
+		vfree(tpg->random_line[plane]);
+		tpg->contrast_line[plane] = NULL;
+		tpg->black_line[plane] = NULL;
+		tpg->random_line[plane] = NULL;
+	}
+}
+EXPORT_SYMBOL_GPL(tpg_free);
+
+bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
+{
+	tpg->fourcc = fourcc;
+	tpg->planes = 1;
+	tpg->buffers = 1;
+	tpg->recalc_colors = true;
+	tpg->interleaved = false;
+	tpg->vdownsampling[0] = 1;
+	tpg->hdownsampling[0] = 1;
+	tpg->hmask[0] = ~0;
+	tpg->hmask[1] = ~0;
+	tpg->hmask[2] = ~0;
+
+	switch (fourcc) {
+	case V4L2_PIX_FMT_SBGGR8:
+	case V4L2_PIX_FMT_SGBRG8:
+	case V4L2_PIX_FMT_SGRBG8:
+	case V4L2_PIX_FMT_SRGGB8:
+	case V4L2_PIX_FMT_SBGGR10:
+	case V4L2_PIX_FMT_SGBRG10:
+	case V4L2_PIX_FMT_SGRBG10:
+	case V4L2_PIX_FMT_SRGGB10:
+	case V4L2_PIX_FMT_SBGGR12:
+	case V4L2_PIX_FMT_SGBRG12:
+	case V4L2_PIX_FMT_SGRBG12:
+	case V4L2_PIX_FMT_SRGGB12:
+		tpg->interleaved = true;
+		tpg->vdownsampling[1] = 1;
+		tpg->hdownsampling[1] = 1;
+		tpg->planes = 2;
+		/* fall through */
+	case V4L2_PIX_FMT_RGB332:
+	case V4L2_PIX_FMT_RGB565:
+	case V4L2_PIX_FMT_RGB565X:
+	case V4L2_PIX_FMT_RGB444:
+	case V4L2_PIX_FMT_XRGB444:
+	case V4L2_PIX_FMT_ARGB444:
+	case V4L2_PIX_FMT_RGB555:
+	case V4L2_PIX_FMT_XRGB555:
+	case V4L2_PIX_FMT_ARGB555:
+	case V4L2_PIX_FMT_RGB555X:
+	case V4L2_PIX_FMT_XRGB555X:
+	case V4L2_PIX_FMT_ARGB555X:
+	case V4L2_PIX_FMT_BGR666:
+	case V4L2_PIX_FMT_RGB24:
+	case V4L2_PIX_FMT_BGR24:
+	case V4L2_PIX_FMT_RGB32:
+	case V4L2_PIX_FMT_BGR32:
+	case V4L2_PIX_FMT_XRGB32:
+	case V4L2_PIX_FMT_XBGR32:
+	case V4L2_PIX_FMT_ARGB32:
+	case V4L2_PIX_FMT_ABGR32:
+	case V4L2_PIX_FMT_GREY:
+	case V4L2_PIX_FMT_Y16:
+	case V4L2_PIX_FMT_Y16_BE:
+		tpg->is_yuv = false;
+		break;
+	case V4L2_PIX_FMT_YUV444:
+	case V4L2_PIX_FMT_YUV555:
+	case V4L2_PIX_FMT_YUV565:
+	case V4L2_PIX_FMT_YUV32:
+		tpg->is_yuv = true;
+		break;
+	case V4L2_PIX_FMT_YUV420M:
+	case V4L2_PIX_FMT_YVU420M:
+		tpg->buffers = 3;
+		/* fall through */
+	case V4L2_PIX_FMT_YUV420:
+	case V4L2_PIX_FMT_YVU420:
+		tpg->vdownsampling[1] = 2;
+		tpg->vdownsampling[2] = 2;
+		tpg->hdownsampling[1] = 2;
+		tpg->hdownsampling[2] = 2;
+		tpg->planes = 3;
+		tpg->is_yuv = true;
+		break;
+	case V4L2_PIX_FMT_YUV422M:
+	case V4L2_PIX_FMT_YVU422M:
+		tpg->buffers = 3;
+		/* fall through */
+	case V4L2_PIX_FMT_YUV422P:
+		tpg->vdownsampling[1] = 1;
+		tpg->vdownsampling[2] = 1;
+		tpg->hdownsampling[1] = 2;
+		tpg->hdownsampling[2] = 2;
+		tpg->planes = 3;
+		tpg->is_yuv = true;
+		break;
+	case V4L2_PIX_FMT_NV16M:
+	case V4L2_PIX_FMT_NV61M:
+		tpg->buffers = 2;
+		/* fall through */
+	case V4L2_PIX_FMT_NV16:
+	case V4L2_PIX_FMT_NV61:
+		tpg->vdownsampling[1] = 1;
+		tpg->hdownsampling[1] = 1;
+		tpg->hmask[1] = ~1;
+		tpg->planes = 2;
+		tpg->is_yuv = true;
+		break;
+	case V4L2_PIX_FMT_NV12M:
+	case V4L2_PIX_FMT_NV21M:
+		tpg->buffers = 2;
+		/* fall through */
+	case V4L2_PIX_FMT_NV12:
+	case V4L2_PIX_FMT_NV21:
+		tpg->vdownsampling[1] = 2;
+		tpg->hdownsampling[1] = 1;
+		tpg->hmask[1] = ~1;
+		tpg->planes = 2;
+		tpg->is_yuv = true;
+		break;
+	case V4L2_PIX_FMT_YUV444M:
+	case V4L2_PIX_FMT_YVU444M:
+		tpg->buffers = 3;
+		tpg->planes = 3;
+		tpg->vdownsampling[1] = 1;
+		tpg->vdownsampling[2] = 1;
+		tpg->hdownsampling[1] = 1;
+		tpg->hdownsampling[2] = 1;
+		tpg->is_yuv = true;
+		break;
+	case V4L2_PIX_FMT_NV24:
+	case V4L2_PIX_FMT_NV42:
+		tpg->vdownsampling[1] = 1;
+		tpg->hdownsampling[1] = 1;
+		tpg->planes = 2;
+		tpg->is_yuv = true;
+		break;
+	case V4L2_PIX_FMT_YUYV:
+	case V4L2_PIX_FMT_UYVY:
+	case V4L2_PIX_FMT_YVYU:
+	case V4L2_PIX_FMT_VYUY:
+		tpg->hmask[0] = ~1;
+		tpg->is_yuv = true;
+		break;
+	default:
+		return false;
+	}
+
+	switch (fourcc) {
+	case V4L2_PIX_FMT_GREY:
+	case V4L2_PIX_FMT_RGB332:
+		tpg->twopixelsize[0] = 2;
+		break;
+	case V4L2_PIX_FMT_RGB565:
+	case V4L2_PIX_FMT_RGB565X:
+	case V4L2_PIX_FMT_RGB444:
+	case V4L2_PIX_FMT_XRGB444:
+	case V4L2_PIX_FMT_ARGB444:
+	case V4L2_PIX_FMT_RGB555:
+	case V4L2_PIX_FMT_XRGB555:
+	case V4L2_PIX_FMT_ARGB555:
+	case V4L2_PIX_FMT_RGB555X:
+	case V4L2_PIX_FMT_XRGB555X:
+	case V4L2_PIX_FMT_ARGB555X:
+	case V4L2_PIX_FMT_YUYV:
+	case V4L2_PIX_FMT_UYVY:
+	case V4L2_PIX_FMT_YVYU:
+	case V4L2_PIX_FMT_VYUY:
+	case V4L2_PIX_FMT_YUV444:
+	case V4L2_PIX_FMT_YUV555:
+	case V4L2_PIX_FMT_YUV565:
+	case V4L2_PIX_FMT_Y16:
+	case V4L2_PIX_FMT_Y16_BE:
+		tpg->twopixelsize[0] = 2 * 2;
+		break;
+	case V4L2_PIX_FMT_RGB24:
+	case V4L2_PIX_FMT_BGR24:
+		tpg->twopixelsize[0] = 2 * 3;
+		break;
+	case V4L2_PIX_FMT_BGR666:
+	case V4L2_PIX_FMT_RGB32:
+	case V4L2_PIX_FMT_BGR32:
+	case V4L2_PIX_FMT_XRGB32:
+	case V4L2_PIX_FMT_XBGR32:
+	case V4L2_PIX_FMT_ARGB32:
+	case V4L2_PIX_FMT_ABGR32:
+	case V4L2_PIX_FMT_YUV32:
+		tpg->twopixelsize[0] = 2 * 4;
+		break;
+	case V4L2_PIX_FMT_NV12:
+	case V4L2_PIX_FMT_NV21:
+	case V4L2_PIX_FMT_NV12M:
+	case V4L2_PIX_FMT_NV21M:
+	case V4L2_PIX_FMT_NV16:
+	case V4L2_PIX_FMT_NV61:
+	case V4L2_PIX_FMT_NV16M:
+	case V4L2_PIX_FMT_NV61M:
+	case V4L2_PIX_FMT_SBGGR8:
+	case V4L2_PIX_FMT_SGBRG8:
+	case V4L2_PIX_FMT_SGRBG8:
+	case V4L2_PIX_FMT_SRGGB8:
+		tpg->twopixelsize[0] = 2;
+		tpg->twopixelsize[1] = 2;
+		break;
+	case V4L2_PIX_FMT_SRGGB10:
+	case V4L2_PIX_FMT_SGRBG10:
+	case V4L2_PIX_FMT_SGBRG10:
+	case V4L2_PIX_FMT_SBGGR10:
+	case V4L2_PIX_FMT_SRGGB12:
+	case V4L2_PIX_FMT_SGRBG12:
+	case V4L2_PIX_FMT_SGBRG12:
+	case V4L2_PIX_FMT_SBGGR12:
+		tpg->twopixelsize[0] = 4;
+		tpg->twopixelsize[1] = 4;
+		break;
+	case V4L2_PIX_FMT_YUV444M:
+	case V4L2_PIX_FMT_YVU444M:
+	case V4L2_PIX_FMT_YUV422M:
+	case V4L2_PIX_FMT_YVU422M:
+	case V4L2_PIX_FMT_YUV422P:
+	case V4L2_PIX_FMT_YUV420:
+	case V4L2_PIX_FMT_YVU420:
+	case V4L2_PIX_FMT_YUV420M:
+	case V4L2_PIX_FMT_YVU420M:
+		tpg->twopixelsize[0] = 2;
+		tpg->twopixelsize[1] = 2;
+		tpg->twopixelsize[2] = 2;
+		break;
+	case V4L2_PIX_FMT_NV24:
+	case V4L2_PIX_FMT_NV42:
+		tpg->twopixelsize[0] = 2;
+		tpg->twopixelsize[1] = 4;
+		break;
+	}
+	return true;
+}
+EXPORT_SYMBOL_GPL(tpg_s_fourcc);
+
+void tpg_s_crop_compose(struct tpg_data *tpg, const struct v4l2_rect *crop,
+		const struct v4l2_rect *compose)
+{
+	tpg->crop = *crop;
+	tpg->compose = *compose;
+	tpg->scaled_width = (tpg->src_width * tpg->compose.width +
+				 tpg->crop.width - 1) / tpg->crop.width;
+	tpg->scaled_width &= ~1;
+	if (tpg->scaled_width > tpg->max_line_width)
+		tpg->scaled_width = tpg->max_line_width;
+	if (tpg->scaled_width < 2)
+		tpg->scaled_width = 2;
+	tpg->recalc_lines = true;
+}
+EXPORT_SYMBOL_GPL(tpg_s_crop_compose);
+
+void tpg_reset_source(struct tpg_data *tpg, unsigned width, unsigned height,
+		       u32 field)
+{
+	unsigned p;
+
+	tpg->src_width = width;
+	tpg->src_height = height;
+	tpg->field = field;
+	tpg->buf_height = height;
+	if (V4L2_FIELD_HAS_T_OR_B(field))
+		tpg->buf_height /= 2;
+	tpg->scaled_width = width;
+	tpg->crop.top = tpg->crop.left = 0;
+	tpg->crop.width = width;
+	tpg->crop.height = height;
+	tpg->compose.top = tpg->compose.left = 0;
+	tpg->compose.width = width;
+	tpg->compose.height = tpg->buf_height;
+	for (p = 0; p < tpg->planes; p++)
+		tpg->bytesperline[p] = (width * tpg->twopixelsize[p]) /
+				       (2 * tpg->hdownsampling[p]);
+	tpg->recalc_square_border = true;
+}
+EXPORT_SYMBOL_GPL(tpg_reset_source);
+
+static enum tpg_color tpg_get_textbg_color(struct tpg_data *tpg)
+{
+	switch (tpg->pattern) {
+	case TPG_PAT_BLACK:
+		return TPG_COLOR_100_WHITE;
+	case TPG_PAT_CSC_COLORBAR:
+		return TPG_COLOR_CSC_BLACK;
+	default:
+		return TPG_COLOR_100_BLACK;
+	}
+}
+
+static enum tpg_color tpg_get_textfg_color(struct tpg_data *tpg)
+{
+	switch (tpg->pattern) {
+	case TPG_PAT_75_COLORBAR:
+	case TPG_PAT_CSC_COLORBAR:
+		return TPG_COLOR_CSC_WHITE;
+	case TPG_PAT_BLACK:
+		return TPG_COLOR_100_BLACK;
+	default:
+		return TPG_COLOR_100_WHITE;
+	}
+}
+
+static inline int rec709_to_linear(int v)
+{
+	v = clamp(v, 0, 0xff0);
+	return tpg_rec709_to_linear[v];
+}
+
+static inline int linear_to_rec709(int v)
+{
+	v = clamp(v, 0, 0xff0);
+	return tpg_linear_to_rec709[v];
+}
+
+static void rgb2ycbcr(const int m[3][3], int r, int g, int b,
+			int y_offset, int *y, int *cb, int *cr)
+{
+	*y  = ((m[0][0] * r + m[0][1] * g + m[0][2] * b) >> 16) + (y_offset << 4);
+	*cb = ((m[1][0] * r + m[1][1] * g + m[1][2] * b) >> 16) + (128 << 4);
+	*cr = ((m[2][0] * r + m[2][1] * g + m[2][2] * b) >> 16) + (128 << 4);
+}
+
+static void color_to_ycbcr(struct tpg_data *tpg, int r, int g, int b,
+			   int *y, int *cb, int *cr)
+{
+#define COEFF(v, r) ((int)(0.5 + (v) * (r) * 256.0))
+
+	static const int bt601[3][3] = {
+		{ COEFF(0.299, 219),  COEFF(0.587, 219),  COEFF(0.114, 219)  },
+		{ COEFF(-0.169, 224), COEFF(-0.331, 224), COEFF(0.5, 224)    },
+		{ COEFF(0.5, 224),    COEFF(-0.419, 224), COEFF(-0.081, 224) },
+	};
+	static const int bt601_full[3][3] = {
+		{ COEFF(0.299, 255),  COEFF(0.587, 255),  COEFF(0.114, 255)  },
+		{ COEFF(-0.169, 255), COEFF(-0.331, 255), COEFF(0.5, 255)    },
+		{ COEFF(0.5, 255),    COEFF(-0.419, 255), COEFF(-0.081, 255) },
+	};
+	static const int rec709[3][3] = {
+		{ COEFF(0.2126, 219),  COEFF(0.7152, 219),  COEFF(0.0722, 219)  },
+		{ COEFF(-0.1146, 224), COEFF(-0.3854, 224), COEFF(0.5, 224)     },
+		{ COEFF(0.5, 224),     COEFF(-0.4542, 224), COEFF(-0.0458, 224) },
+	};
+	static const int rec709_full[3][3] = {
+		{ COEFF(0.2126, 255),  COEFF(0.7152, 255),  COEFF(0.0722, 255)  },
+		{ COEFF(-0.1146, 255), COEFF(-0.3854, 255), COEFF(0.5, 255)     },
+		{ COEFF(0.5, 255),     COEFF(-0.4542, 255), COEFF(-0.0458, 255) },
+	};
+	static const int smpte240m[3][3] = {
+		{ COEFF(0.212, 219),  COEFF(0.701, 219),  COEFF(0.087, 219)  },
+		{ COEFF(-0.116, 224), COEFF(-0.384, 224), COEFF(0.5, 224)    },
+		{ COEFF(0.5, 224),    COEFF(-0.445, 224), COEFF(-0.055, 224) },
+	};
+	static const int smpte240m_full[3][3] = {
+		{ COEFF(0.212, 255),  COEFF(0.701, 255),  COEFF(0.087, 255)  },
+		{ COEFF(-0.116, 255), COEFF(-0.384, 255), COEFF(0.5, 255)    },
+		{ COEFF(0.5, 255),    COEFF(-0.445, 255), COEFF(-0.055, 255) },
+	};
+	static const int bt2020[3][3] = {
+		{ COEFF(0.2627, 219),  COEFF(0.6780, 219),  COEFF(0.0593, 219)  },
+		{ COEFF(-0.1396, 224), COEFF(-0.3604, 224), COEFF(0.5, 224)     },
+		{ COEFF(0.5, 224),     COEFF(-0.4598, 224), COEFF(-0.0402, 224) },
+	};
+	static const int bt2020_full[3][3] = {
+		{ COEFF(0.2627, 255),  COEFF(0.6780, 255),  COEFF(0.0593, 255)  },
+		{ COEFF(-0.1396, 255), COEFF(-0.3604, 255), COEFF(0.5, 255)     },
+		{ COEFF(0.5, 255),     COEFF(-0.4698, 255), COEFF(-0.0402, 255) },
+	};
+	static const int bt2020c[4] = {
+		COEFF(1.0 / 1.9404, 224), COEFF(1.0 / 1.5816, 224),
+		COEFF(1.0 / 1.7184, 224), COEFF(1.0 / 0.9936, 224),
+	};
+	static const int bt2020c_full[4] = {
+		COEFF(1.0 / 1.9404, 255), COEFF(1.0 / 1.5816, 255),
+		COEFF(1.0 / 1.7184, 255), COEFF(1.0 / 0.9936, 255),
+	};
+
+	bool full = tpg->real_quantization == V4L2_QUANTIZATION_FULL_RANGE;
+	unsigned y_offset = full ? 0 : 16;
+	int lin_y, yc;
+
+	switch (tpg->real_ycbcr_enc) {
+	case V4L2_YCBCR_ENC_601:
+	case V4L2_YCBCR_ENC_SYCC:
+		rgb2ycbcr(full ? bt601_full : bt601, r, g, b, y_offset, y, cb, cr);
+		break;
+	case V4L2_YCBCR_ENC_XV601:
+		/* Ignore quantization range, there is only one possible
+		 * Y'CbCr encoding. */
+		rgb2ycbcr(bt601, r, g, b, 16, y, cb, cr);
+		break;
+	case V4L2_YCBCR_ENC_XV709:
+		/* Ignore quantization range, there is only one possible
+		 * Y'CbCr encoding. */
+		rgb2ycbcr(rec709, r, g, b, 16, y, cb, cr);
+		break;
+	case V4L2_YCBCR_ENC_BT2020:
+		rgb2ycbcr(full ? bt2020_full : bt2020, r, g, b, y_offset, y, cb, cr);
+		break;
+	case V4L2_YCBCR_ENC_BT2020_CONST_LUM:
+		lin_y = (COEFF(0.2627, 255) * rec709_to_linear(r) +
+			 COEFF(0.6780, 255) * rec709_to_linear(g) +
+			 COEFF(0.0593, 255) * rec709_to_linear(b)) >> 16;
+		yc = linear_to_rec709(lin_y);
+		*y = full ? yc : (yc * 219) / 255 + (16 << 4);
+		if (b <= yc)
+			*cb = (((b - yc) * (full ? bt2020c_full[0] : bt2020c[0])) >> 16) + (128 << 4);
+		else
+			*cb = (((b - yc) * (full ? bt2020c_full[1] : bt2020c[1])) >> 16) + (128 << 4);
+		if (r <= yc)
+			*cr = (((r - yc) * (full ? bt2020c_full[2] : bt2020c[2])) >> 16) + (128 << 4);
+		else
+			*cr = (((r - yc) * (full ? bt2020c_full[3] : bt2020c[3])) >> 16) + (128 << 4);
+		break;
+	case V4L2_YCBCR_ENC_SMPTE240M:
+		rgb2ycbcr(full ? smpte240m_full : smpte240m, r, g, b, y_offset, y, cb, cr);
+		break;
+	case V4L2_YCBCR_ENC_709:
+	default:
+		rgb2ycbcr(full ? rec709_full : rec709, r, g, b, y_offset, y, cb, cr);
+		break;
+	}
+}
+
+static void ycbcr2rgb(const int m[3][3], int y, int cb, int cr,
+			int y_offset, int *r, int *g, int *b)
+{
+	y -= y_offset << 4;
+	cb -= 128 << 4;
+	cr -= 128 << 4;
+	*r = m[0][0] * y + m[0][1] * cb + m[0][2] * cr;
+	*g = m[1][0] * y + m[1][1] * cb + m[1][2] * cr;
+	*b = m[2][0] * y + m[2][1] * cb + m[2][2] * cr;
+	*r = clamp(*r >> 12, 0, 0xff0);
+	*g = clamp(*g >> 12, 0, 0xff0);
+	*b = clamp(*b >> 12, 0, 0xff0);
+}
+
+static void ycbcr_to_color(struct tpg_data *tpg, int y, int cb, int cr,
+			   int *r, int *g, int *b)
+{
+#undef COEFF
+#define COEFF(v, r) ((int)(0.5 + (v) * ((255.0 * 255.0 * 16.0) / (r))))
+	static const int bt601[3][3] = {
+		{ COEFF(1, 219), COEFF(0, 224),       COEFF(1.4020, 224)  },
+		{ COEFF(1, 219), COEFF(-0.3441, 224), COEFF(-0.7141, 224) },
+		{ COEFF(1, 219), COEFF(1.7720, 224),  COEFF(0, 224)       },
+	};
+	static const int bt601_full[3][3] = {
+		{ COEFF(1, 255), COEFF(0, 255),       COEFF(1.4020, 255)  },
+		{ COEFF(1, 255), COEFF(-0.3441, 255), COEFF(-0.7141, 255) },
+		{ COEFF(1, 255), COEFF(1.7720, 255),  COEFF(0, 255)       },
+	};
+	static const int rec709[3][3] = {
+		{ COEFF(1, 219), COEFF(0, 224),       COEFF(1.5748, 224)  },
+		{ COEFF(1, 219), COEFF(-0.1873, 224), COEFF(-0.4681, 224) },
+		{ COEFF(1, 219), COEFF(1.8556, 224),  COEFF(0, 224)       },
+	};
+	static const int rec709_full[3][3] = {
+		{ COEFF(1, 255), COEFF(0, 255),       COEFF(1.5748, 255)  },
+		{ COEFF(1, 255), COEFF(-0.1873, 255), COEFF(-0.4681, 255) },
+		{ COEFF(1, 255), COEFF(1.8556, 255),  COEFF(0, 255)       },
+	};
+	static const int smpte240m[3][3] = {
+		{ COEFF(1, 219), COEFF(0, 224),       COEFF(1.5756, 224)  },
+		{ COEFF(1, 219), COEFF(-0.2253, 224), COEFF(-0.4767, 224) },
+		{ COEFF(1, 219), COEFF(1.8270, 224),  COEFF(0, 224)       },
+	};
+	static const int smpte240m_full[3][3] = {
+		{ COEFF(1, 255), COEFF(0, 255),       COEFF(1.5756, 255)  },
+		{ COEFF(1, 255), COEFF(-0.2253, 255), COEFF(-0.4767, 255) },
+		{ COEFF(1, 255), COEFF(1.8270, 255),  COEFF(0, 255)       },
+	};
+	static const int bt2020[3][3] = {
+		{ COEFF(1, 219), COEFF(0, 224),       COEFF(1.4746, 224)  },
+		{ COEFF(1, 219), COEFF(-0.1646, 224), COEFF(-0.5714, 224) },
+		{ COEFF(1, 219), COEFF(1.8814, 224),  COEFF(0, 224)       },
+	};
+	static const int bt2020_full[3][3] = {
+		{ COEFF(1, 255), COEFF(0, 255),       COEFF(1.4746, 255)  },
+		{ COEFF(1, 255), COEFF(-0.1646, 255), COEFF(-0.5714, 255) },
+		{ COEFF(1, 255), COEFF(1.8814, 255),  COEFF(0, 255)       },
+	};
+	static const int bt2020c[4] = {
+		COEFF(1.9404, 224), COEFF(1.5816, 224),
+		COEFF(1.7184, 224), COEFF(0.9936, 224),
+	};
+	static const int bt2020c_full[4] = {
+		COEFF(1.9404, 255), COEFF(1.5816, 255),
+		COEFF(1.7184, 255), COEFF(0.9936, 255),
+	};
+
+	bool full = tpg->real_quantization == V4L2_QUANTIZATION_FULL_RANGE;
+	unsigned y_offset = full ? 0 : 16;
+	int y_fac = full ? COEFF(1.0, 255) : COEFF(1.0, 219);
+	int lin_r, lin_g, lin_b, lin_y;
+
+	switch (tpg->real_ycbcr_enc) {
+	case V4L2_YCBCR_ENC_601:
+	case V4L2_YCBCR_ENC_SYCC:
+		ycbcr2rgb(full ? bt601_full : bt601, y, cb, cr, y_offset, r, g, b);
+		break;
+	case V4L2_YCBCR_ENC_XV601:
+		/* Ignore quantization range, there is only one possible
+		 * Y'CbCr encoding. */
+		ycbcr2rgb(bt601, y, cb, cr, 16, r, g, b);
+		break;
+	case V4L2_YCBCR_ENC_XV709:
+		/* Ignore quantization range, there is only one possible
+		 * Y'CbCr encoding. */
+		ycbcr2rgb(rec709, y, cb, cr, 16, r, g, b);
+		break;
+	case V4L2_YCBCR_ENC_BT2020:
+		ycbcr2rgb(full ? bt2020_full : bt2020, y, cb, cr, y_offset, r, g, b);
+		break;
+	case V4L2_YCBCR_ENC_BT2020_CONST_LUM:
+		y -= full ? 0 : 16 << 4;
+		cb -= 128 << 4;
+		cr -= 128 << 4;
+
+		if (cb <= 0)
+			*b = y_fac * y + (full ? bt2020c_full[0] : bt2020c[0]) * cb;
+		else
+			*b = y_fac * y + (full ? bt2020c_full[1] : bt2020c[1]) * cb;
+		*b = *b >> 12;
+		if (cr <= 0)
+			*r = y_fac * y + (full ? bt2020c_full[2] : bt2020c[2]) * cr;
+		else
+			*r = y_fac * y + (full ? bt2020c_full[3] : bt2020c[3]) * cr;
+		*r = *r >> 12;
+		lin_r = rec709_to_linear(*r);
+		lin_b = rec709_to_linear(*b);
+		lin_y = rec709_to_linear((y * 255) / (full ? 255 : 219));
+
+		lin_g = COEFF(1.0 / 0.6780, 255) * lin_y -
+			COEFF(0.2627 / 0.6780, 255) * lin_r -
+			COEFF(0.0593 / 0.6780, 255) * lin_b;
+		*g = linear_to_rec709(lin_g >> 12);
+		break;
+	case V4L2_YCBCR_ENC_SMPTE240M:
+		ycbcr2rgb(full ? smpte240m_full : smpte240m, y, cb, cr, y_offset, r, g, b);
+		break;
+	case V4L2_YCBCR_ENC_709:
+	default:
+		ycbcr2rgb(full ? rec709_full : rec709, y, cb, cr, y_offset, r, g, b);
+		break;
+	}
+}
+
+/* precalculate color bar values to speed up rendering */
+static void precalculate_color(struct tpg_data *tpg, int k)
+{
+	int col = k;
+	int r = tpg_colors[col].r;
+	int g = tpg_colors[col].g;
+	int b = tpg_colors[col].b;
+
+	if (k == TPG_COLOR_TEXTBG) {
+		col = tpg_get_textbg_color(tpg);
+
+		r = tpg_colors[col].r;
+		g = tpg_colors[col].g;
+		b = tpg_colors[col].b;
+	} else if (k == TPG_COLOR_TEXTFG) {
+		col = tpg_get_textfg_color(tpg);
+
+		r = tpg_colors[col].r;
+		g = tpg_colors[col].g;
+		b = tpg_colors[col].b;
+	} else if (tpg->pattern == TPG_PAT_NOISE) {
+		r = g = b = prandom_u32_max(256);
+	} else if (k == TPG_COLOR_RANDOM) {
+		r = g = b = tpg->qual_offset + prandom_u32_max(196);
+	} else if (k >= TPG_COLOR_RAMP) {
+		r = g = b = k - TPG_COLOR_RAMP;
+	}
+
+	if (tpg->pattern == TPG_PAT_CSC_COLORBAR && col <= TPG_COLOR_CSC_BLACK) {
+		r = tpg_csc_colors[tpg->colorspace][tpg->real_xfer_func][col].r;
+		g = tpg_csc_colors[tpg->colorspace][tpg->real_xfer_func][col].g;
+		b = tpg_csc_colors[tpg->colorspace][tpg->real_xfer_func][col].b;
+	} else {
+		r <<= 4;
+		g <<= 4;
+		b <<= 4;
+	}
+	if (tpg->qual == TPG_QUAL_GRAY || tpg->fourcc == V4L2_PIX_FMT_GREY ||
+	    tpg->fourcc == V4L2_PIX_FMT_Y16 ||
+	    tpg->fourcc == V4L2_PIX_FMT_Y16_BE) {
+		/* Rec. 709 Luma function */
+		/* (0.2126, 0.7152, 0.0722) * (255 * 256) */
+		r = g = b = (13879 * r + 46688 * g + 4713 * b) >> 16;
+	}
+
+	/*
+	 * The assumption is that the RGB output is always full range,
+	 * so only if the rgb_range overrides the 'real' rgb range do
+	 * we need to convert the RGB values.
+	 *
+	 * Remember that r, g and b are still in the 0 - 0xff0 range.
+	 */
+	if (tpg->real_rgb_range == V4L2_DV_RGB_RANGE_LIMITED &&
+	    tpg->rgb_range == V4L2_DV_RGB_RANGE_FULL) {
+		/*
+		 * Convert from full range (which is what r, g and b are)
+		 * to limited range (which is the 'real' RGB range), which
+		 * is then interpreted as full range.
+		 */
+		r = (r * 219) / 255 + (16 << 4);
+		g = (g * 219) / 255 + (16 << 4);
+		b = (b * 219) / 255 + (16 << 4);
+	} else if (tpg->real_rgb_range != V4L2_DV_RGB_RANGE_LIMITED &&
+		   tpg->rgb_range == V4L2_DV_RGB_RANGE_LIMITED) {
+		/*
+		 * Clamp r, g and b to the limited range and convert to full
+		 * range since that's what we deliver.
+		 */
+		r = clamp(r, 16 << 4, 235 << 4);
+		g = clamp(g, 16 << 4, 235 << 4);
+		b = clamp(b, 16 << 4, 235 << 4);
+		r = (r - (16 << 4)) * 255 / 219;
+		g = (g - (16 << 4)) * 255 / 219;
+		b = (b - (16 << 4)) * 255 / 219;
+	}
+
+	if (tpg->brightness != 128 || tpg->contrast != 128 ||
+	    tpg->saturation != 128 || tpg->hue) {
+		/* Implement these operations */
+		int y, cb, cr;
+		int tmp_cb, tmp_cr;
+
+		/* First convert to YCbCr */
+
+		color_to_ycbcr(tpg, r, g, b, &y, &cb, &cr);
+
+		y = (16 << 4) + ((y - (16 << 4)) * tpg->contrast) / 128;
+		y += (tpg->brightness << 4) - (128 << 4);
+
+		cb -= 128 << 4;
+		cr -= 128 << 4;
+		tmp_cb = (cb * cos(128 + tpg->hue)) / 127 + (cr * sin[128 + tpg->hue]) / 127;
+		tmp_cr = (cr * cos(128 + tpg->hue)) / 127 - (cb * sin[128 + tpg->hue]) / 127;
+
+		cb = (128 << 4) + (tmp_cb * tpg->contrast * tpg->saturation) / (128 * 128);
+		cr = (128 << 4) + (tmp_cr * tpg->contrast * tpg->saturation) / (128 * 128);
+		if (tpg->is_yuv) {
+			tpg->colors[k][0] = clamp(y >> 4, 1, 254);
+			tpg->colors[k][1] = clamp(cb >> 4, 1, 254);
+			tpg->colors[k][2] = clamp(cr >> 4, 1, 254);
+			return;
+		}
+		ycbcr_to_color(tpg, y, cb, cr, &r, &g, &b);
+	}
+
+	if (tpg->is_yuv) {
+		/* Convert to YCbCr */
+		int y, cb, cr;
+
+		color_to_ycbcr(tpg, r, g, b, &y, &cb, &cr);
+
+		if (tpg->real_quantization == V4L2_QUANTIZATION_LIM_RANGE) {
+			y = clamp(y, 16 << 4, 235 << 4);
+			cb = clamp(cb, 16 << 4, 240 << 4);
+			cr = clamp(cr, 16 << 4, 240 << 4);
+		}
+		y = clamp(y >> 4, 1, 254);
+		cb = clamp(cb >> 4, 1, 254);
+		cr = clamp(cr >> 4, 1, 254);
+		switch (tpg->fourcc) {
+		case V4L2_PIX_FMT_YUV444:
+			y >>= 4;
+			cb >>= 4;
+			cr >>= 4;
+			break;
+		case V4L2_PIX_FMT_YUV555:
+			y >>= 3;
+			cb >>= 3;
+			cr >>= 3;
+			break;
+		case V4L2_PIX_FMT_YUV565:
+			y >>= 3;
+			cb >>= 2;
+			cr >>= 3;
+			break;
+		}
+		tpg->colors[k][0] = y;
+		tpg->colors[k][1] = cb;
+		tpg->colors[k][2] = cr;
+	} else {
+		if (tpg->real_quantization == V4L2_QUANTIZATION_LIM_RANGE) {
+			r = (r * 219) / 255 + (16 << 4);
+			g = (g * 219) / 255 + (16 << 4);
+			b = (b * 219) / 255 + (16 << 4);
+		}
+		switch (tpg->fourcc) {
+		case V4L2_PIX_FMT_RGB332:
+			r >>= 9;
+			g >>= 9;
+			b >>= 10;
+			break;
+		case V4L2_PIX_FMT_RGB565:
+		case V4L2_PIX_FMT_RGB565X:
+			r >>= 7;
+			g >>= 6;
+			b >>= 7;
+			break;
+		case V4L2_PIX_FMT_RGB444:
+		case V4L2_PIX_FMT_XRGB444:
+		case V4L2_PIX_FMT_ARGB444:
+			r >>= 8;
+			g >>= 8;
+			b >>= 8;
+			break;
+		case V4L2_PIX_FMT_RGB555:
+		case V4L2_PIX_FMT_XRGB555:
+		case V4L2_PIX_FMT_ARGB555:
+		case V4L2_PIX_FMT_RGB555X:
+		case V4L2_PIX_FMT_XRGB555X:
+		case V4L2_PIX_FMT_ARGB555X:
+			r >>= 7;
+			g >>= 7;
+			b >>= 7;
+			break;
+		case V4L2_PIX_FMT_BGR666:
+			r >>= 6;
+			g >>= 6;
+			b >>= 6;
+			break;
+		default:
+			r >>= 4;
+			g >>= 4;
+			b >>= 4;
+			break;
+		}
+
+		tpg->colors[k][0] = r;
+		tpg->colors[k][1] = g;
+		tpg->colors[k][2] = b;
+	}
+}
+
+static void tpg_precalculate_colors(struct tpg_data *tpg)
+{
+	int k;
+
+	for (k = 0; k < TPG_COLOR_MAX; k++)
+		precalculate_color(tpg, k);
+}
+
+/* 'odd' is true for pixels 1, 3, 5, etc. and false for pixels 0, 2, 4, etc. */
+static void gen_twopix(struct tpg_data *tpg,
+		u8 buf[TPG_MAX_PLANES][8], int color, bool odd)
+{
+	unsigned offset = odd * tpg->twopixelsize[0] / 2;
+	u8 alpha = tpg->alpha_component;
+	u8 r_y, g_u, b_v;
+
+	if (tpg->alpha_red_only && color != TPG_COLOR_CSC_RED &&
+				   color != TPG_COLOR_100_RED &&
+				   color != TPG_COLOR_75_RED)
+		alpha = 0;
+	if (color == TPG_COLOR_RANDOM)
+		precalculate_color(tpg, color);
+	r_y = tpg->colors[color][0]; /* R or precalculated Y */
+	g_u = tpg->colors[color][1]; /* G or precalculated U */
+	b_v = tpg->colors[color][2]; /* B or precalculated V */
+
+	switch (tpg->fourcc) {
+	case V4L2_PIX_FMT_GREY:
+		buf[0][offset] = r_y;
+		break;
+	case V4L2_PIX_FMT_Y16:
+		/*
+		 * Ideally both bytes should be set to r_y, but then you won't
+		 * be able to detect endian problems. So keep it 0 except for
+		 * the corner case where r_y is 0xff so white really will be
+		 * white (0xffff).
+		 */
+		buf[0][offset] = r_y == 0xff ? r_y : 0;
+		buf[0][offset+1] = r_y;
+		break;
+	case V4L2_PIX_FMT_Y16_BE:
+		/* See comment for V4L2_PIX_FMT_Y16 above */
+		buf[0][offset] = r_y;
+		buf[0][offset+1] = r_y == 0xff ? r_y : 0;
+		break;
+	case V4L2_PIX_FMT_YUV422M:
+	case V4L2_PIX_FMT_YUV422P:
+	case V4L2_PIX_FMT_YUV420:
+	case V4L2_PIX_FMT_YUV420M:
+		buf[0][offset] = r_y;
+		if (odd) {
+			buf[1][0] = (buf[1][0] + g_u) / 2;
+			buf[2][0] = (buf[2][0] + b_v) / 2;
+			buf[1][1] = buf[1][0];
+			buf[2][1] = buf[2][0];
+			break;
+		}
+		buf[1][0] = g_u;
+		buf[2][0] = b_v;
+		break;
+	case V4L2_PIX_FMT_YVU422M:
+	case V4L2_PIX_FMT_YVU420:
+	case V4L2_PIX_FMT_YVU420M:
+		buf[0][offset] = r_y;
+		if (odd) {
+			buf[1][0] = (buf[1][0] + b_v) / 2;
+			buf[2][0] = (buf[2][0] + g_u) / 2;
+			buf[1][1] = buf[1][0];
+			buf[2][1] = buf[2][0];
+			break;
+		}
+		buf[1][0] = b_v;
+		buf[2][0] = g_u;
+		break;
+
+	case V4L2_PIX_FMT_NV12:
+	case V4L2_PIX_FMT_NV12M:
+	case V4L2_PIX_FMT_NV16:
+	case V4L2_PIX_FMT_NV16M:
+		buf[0][offset] = r_y;
+		if (odd) {
+			buf[1][0] = (buf[1][0] + g_u) / 2;
+			buf[1][1] = (buf[1][1] + b_v) / 2;
+			break;
+		}
+		buf[1][0] = g_u;
+		buf[1][1] = b_v;
+		break;
+	case V4L2_PIX_FMT_NV21:
+	case V4L2_PIX_FMT_NV21M:
+	case V4L2_PIX_FMT_NV61:
+	case V4L2_PIX_FMT_NV61M:
+		buf[0][offset] = r_y;
+		if (odd) {
+			buf[1][0] = (buf[1][0] + b_v) / 2;
+			buf[1][1] = (buf[1][1] + g_u) / 2;
+			break;
+		}
+		buf[1][0] = b_v;
+		buf[1][1] = g_u;
+		break;
+
+	case V4L2_PIX_FMT_YUV444M:
+		buf[0][offset] = r_y;
+		buf[1][offset] = g_u;
+		buf[2][offset] = b_v;
+		break;
+
+	case V4L2_PIX_FMT_YVU444M:
+		buf[0][offset] = r_y;
+		buf[1][offset] = b_v;
+		buf[2][offset] = g_u;
+		break;
+
+	case V4L2_PIX_FMT_NV24:
+		buf[0][offset] = r_y;
+		buf[1][2 * offset] = g_u;
+		buf[1][2 * offset + 1] = b_v;
+		break;
+
+	case V4L2_PIX_FMT_NV42:
+		buf[0][offset] = r_y;
+		buf[1][2 * offset] = b_v;
+		buf[1][2 * offset + 1] = g_u;
+		break;
+
+	case V4L2_PIX_FMT_YUYV:
+		buf[0][offset] = r_y;
+		if (odd) {
+			buf[0][1] = (buf[0][1] + g_u) / 2;
+			buf[0][3] = (buf[0][3] + b_v) / 2;
+			break;
+		}
+		buf[0][1] = g_u;
+		buf[0][3] = b_v;
+		break;
+	case V4L2_PIX_FMT_UYVY:
+		buf[0][offset + 1] = r_y;
+		if (odd) {
+			buf[0][0] = (buf[0][0] + g_u) / 2;
+			buf[0][2] = (buf[0][2] + b_v) / 2;
+			break;
+		}
+		buf[0][0] = g_u;
+		buf[0][2] = b_v;
+		break;
+	case V4L2_PIX_FMT_YVYU:
+		buf[0][offset] = r_y;
+		if (odd) {
+			buf[0][1] = (buf[0][1] + b_v) / 2;
+			buf[0][3] = (buf[0][3] + g_u) / 2;
+			break;
+		}
+		buf[0][1] = b_v;
+		buf[0][3] = g_u;
+		break;
+	case V4L2_PIX_FMT_VYUY:
+		buf[0][offset + 1] = r_y;
+		if (odd) {
+			buf[0][0] = (buf[0][0] + b_v) / 2;
+			buf[0][2] = (buf[0][2] + g_u) / 2;
+			break;
+		}
+		buf[0][0] = b_v;
+		buf[0][2] = g_u;
+		break;
+	case V4L2_PIX_FMT_RGB332:
+		buf[0][offset] = (r_y << 5) | (g_u << 2) | b_v;
+		break;
+	case V4L2_PIX_FMT_YUV565:
+	case V4L2_PIX_FMT_RGB565:
+		buf[0][offset] = (g_u << 5) | b_v;
+		buf[0][offset + 1] = (r_y << 3) | (g_u >> 3);
+		break;
+	case V4L2_PIX_FMT_RGB565X:
+		buf[0][offset] = (r_y << 3) | (g_u >> 3);
+		buf[0][offset + 1] = (g_u << 5) | b_v;
+		break;
+	case V4L2_PIX_FMT_RGB444:
+	case V4L2_PIX_FMT_XRGB444:
+		alpha = 0;
+		/* fall through */
+	case V4L2_PIX_FMT_YUV444:
+	case V4L2_PIX_FMT_ARGB444:
+		buf[0][offset] = (g_u << 4) | b_v;
+		buf[0][offset + 1] = (alpha & 0xf0) | r_y;
+		break;
+	case V4L2_PIX_FMT_RGB555:
+	case V4L2_PIX_FMT_XRGB555:
+		alpha = 0;
+		/* fall through */
+	case V4L2_PIX_FMT_YUV555:
+	case V4L2_PIX_FMT_ARGB555:
+		buf[0][offset] = (g_u << 5) | b_v;
+		buf[0][offset + 1] = (alpha & 0x80) | (r_y << 2) | (g_u >> 3);
+		break;
+	case V4L2_PIX_FMT_RGB555X:
+	case V4L2_PIX_FMT_XRGB555X:
+		alpha = 0;
+		/* fall through */
+	case V4L2_PIX_FMT_ARGB555X:
+		buf[0][offset] = (alpha & 0x80) | (r_y << 2) | (g_u >> 3);
+		buf[0][offset + 1] = (g_u << 5) | b_v;
+		break;
+	case V4L2_PIX_FMT_RGB24:
+		buf[0][offset] = r_y;
+		buf[0][offset + 1] = g_u;
+		buf[0][offset + 2] = b_v;
+		break;
+	case V4L2_PIX_FMT_BGR24:
+		buf[0][offset] = b_v;
+		buf[0][offset + 1] = g_u;
+		buf[0][offset + 2] = r_y;
+		break;
+	case V4L2_PIX_FMT_BGR666:
+		buf[0][offset] = (b_v << 2) | (g_u >> 4);
+		buf[0][offset + 1] = (g_u << 4) | (r_y >> 2);
+		buf[0][offset + 2] = r_y << 6;
+		buf[0][offset + 3] = 0;
+		break;
+	case V4L2_PIX_FMT_RGB32:
+	case V4L2_PIX_FMT_XRGB32:
+		alpha = 0;
+		/* fall through */
+	case V4L2_PIX_FMT_YUV32:
+	case V4L2_PIX_FMT_ARGB32:
+		buf[0][offset] = alpha;
+		buf[0][offset + 1] = r_y;
+		buf[0][offset + 2] = g_u;
+		buf[0][offset + 3] = b_v;
+		break;
+	case V4L2_PIX_FMT_BGR32:
+	case V4L2_PIX_FMT_XBGR32:
+		alpha = 0;
+		/* fall through */
+	case V4L2_PIX_FMT_ABGR32:
+		buf[0][offset] = b_v;
+		buf[0][offset + 1] = g_u;
+		buf[0][offset + 2] = r_y;
+		buf[0][offset + 3] = alpha;
+		break;
+	case V4L2_PIX_FMT_SBGGR8:
+		buf[0][offset] = odd ? g_u : b_v;
+		buf[1][offset] = odd ? r_y : g_u;
+		break;
+	case V4L2_PIX_FMT_SGBRG8:
+		buf[0][offset] = odd ? b_v : g_u;
+		buf[1][offset] = odd ? g_u : r_y;
+		break;
+	case V4L2_PIX_FMT_SGRBG8:
+		buf[0][offset] = odd ? r_y : g_u;
+		buf[1][offset] = odd ? g_u : b_v;
+		break;
+	case V4L2_PIX_FMT_SRGGB8:
+		buf[0][offset] = odd ? g_u : r_y;
+		buf[1][offset] = odd ? b_v : g_u;
+		break;
+	case V4L2_PIX_FMT_SBGGR10:
+		buf[0][offset] = odd ? g_u << 2 : b_v << 2;
+		buf[0][offset + 1] = odd ? g_u >> 6 : b_v >> 6;
+		buf[1][offset] = odd ? r_y << 2 : g_u << 2;
+		buf[1][offset + 1] = odd ? r_y >> 6 : g_u >> 6;
+		buf[0][offset] |= (buf[0][offset] >> 2) & 3;
+		buf[1][offset] |= (buf[1][offset] >> 2) & 3;
+		break;
+	case V4L2_PIX_FMT_SGBRG10:
+		buf[0][offset] = odd ? b_v << 2 : g_u << 2;
+		buf[0][offset + 1] = odd ? b_v >> 6 : g_u >> 6;
+		buf[1][offset] = odd ? g_u << 2 : r_y << 2;
+		buf[1][offset + 1] = odd ? g_u >> 6 : r_y >> 6;
+		buf[0][offset] |= (buf[0][offset] >> 2) & 3;
+		buf[1][offset] |= (buf[1][offset] >> 2) & 3;
+		break;
+	case V4L2_PIX_FMT_SGRBG10:
+		buf[0][offset] = odd ? r_y << 2 : g_u << 2;
+		buf[0][offset + 1] = odd ? r_y >> 6 : g_u >> 6;
+		buf[1][offset] = odd ? g_u << 2 : b_v << 2;
+		buf[1][offset + 1] = odd ? g_u >> 6 : b_v >> 6;
+		buf[0][offset] |= (buf[0][offset] >> 2) & 3;
+		buf[1][offset] |= (buf[1][offset] >> 2) & 3;
+		break;
+	case V4L2_PIX_FMT_SRGGB10:
+		buf[0][offset] = odd ? g_u << 2 : r_y << 2;
+		buf[0][offset + 1] = odd ? g_u >> 6 : r_y >> 6;
+		buf[1][offset] = odd ? b_v << 2 : g_u << 2;
+		buf[1][offset + 1] = odd ? b_v >> 6 : g_u >> 6;
+		buf[0][offset] |= (buf[0][offset] >> 2) & 3;
+		buf[1][offset] |= (buf[1][offset] >> 2) & 3;
+		break;
+	case V4L2_PIX_FMT_SBGGR12:
+		buf[0][offset] = odd ? g_u << 4 : b_v << 4;
+		buf[0][offset + 1] = odd ? g_u >> 4 : b_v >> 4;
+		buf[1][offset] = odd ? r_y << 4 : g_u << 4;
+		buf[1][offset + 1] = odd ? r_y >> 4 : g_u >> 4;
+		buf[0][offset] |= (buf[0][offset] >> 4) & 0xf;
+		buf[1][offset] |= (buf[1][offset] >> 4) & 0xf;
+		break;
+	case V4L2_PIX_FMT_SGBRG12:
+		buf[0][offset] = odd ? b_v << 4 : g_u << 4;
+		buf[0][offset + 1] = odd ? b_v >> 4 : g_u >> 4;
+		buf[1][offset] = odd ? g_u << 4 : r_y << 4;
+		buf[1][offset + 1] = odd ? g_u >> 4 : r_y >> 4;
+		buf[0][offset] |= (buf[0][offset] >> 4) & 0xf;
+		buf[1][offset] |= (buf[1][offset] >> 4) & 0xf;
+		break;
+	case V4L2_PIX_FMT_SGRBG12:
+		buf[0][offset] = odd ? r_y << 4 : g_u << 4;
+		buf[0][offset + 1] = odd ? r_y >> 4 : g_u >> 4;
+		buf[1][offset] = odd ? g_u << 4 : b_v << 4;
+		buf[1][offset + 1] = odd ? g_u >> 4 : b_v >> 4;
+		buf[0][offset] |= (buf[0][offset] >> 4) & 0xf;
+		buf[1][offset] |= (buf[1][offset] >> 4) & 0xf;
+		break;
+	case V4L2_PIX_FMT_SRGGB12:
+		buf[0][offset] = odd ? g_u << 4 : r_y << 4;
+		buf[0][offset + 1] = odd ? g_u >> 4 : r_y >> 4;
+		buf[1][offset] = odd ? b_v << 4 : g_u << 4;
+		buf[1][offset + 1] = odd ? b_v >> 4 : g_u >> 4;
+		buf[0][offset] |= (buf[0][offset] >> 4) & 0xf;
+		buf[1][offset] |= (buf[1][offset] >> 4) & 0xf;
+		break;
+	}
+}
+
+unsigned tpg_g_interleaved_plane(const struct tpg_data *tpg, unsigned buf_line)
+{
+	switch (tpg->fourcc) {
+	case V4L2_PIX_FMT_SBGGR8:
+	case V4L2_PIX_FMT_SGBRG8:
+	case V4L2_PIX_FMT_SGRBG8:
+	case V4L2_PIX_FMT_SRGGB8:
+	case V4L2_PIX_FMT_SBGGR10:
+	case V4L2_PIX_FMT_SGBRG10:
+	case V4L2_PIX_FMT_SGRBG10:
+	case V4L2_PIX_FMT_SRGGB10:
+	case V4L2_PIX_FMT_SBGGR12:
+	case V4L2_PIX_FMT_SGBRG12:
+	case V4L2_PIX_FMT_SGRBG12:
+	case V4L2_PIX_FMT_SRGGB12:
+		return buf_line & 1;
+	default:
+		return 0;
+	}
+}
+EXPORT_SYMBOL_GPL(tpg_g_interleaved_plane);
+
+/* Return how many pattern lines are used by the current pattern. */
+static unsigned tpg_get_pat_lines(const struct tpg_data *tpg)
+{
+	switch (tpg->pattern) {
+	case TPG_PAT_CHECKERS_16X16:
+	case TPG_PAT_CHECKERS_2X2:
+	case TPG_PAT_CHECKERS_1X1:
+	case TPG_PAT_COLOR_CHECKERS_2X2:
+	case TPG_PAT_COLOR_CHECKERS_1X1:
+	case TPG_PAT_ALTERNATING_HLINES:
+	case TPG_PAT_CROSS_1_PIXEL:
+	case TPG_PAT_CROSS_2_PIXELS:
+	case TPG_PAT_CROSS_10_PIXELS:
+		return 2;
+	case TPG_PAT_100_COLORSQUARES:
+	case TPG_PAT_100_HCOLORBAR:
+		return 8;
+	default:
+		return 1;
+	}
+}
+
+/* Which pattern line should be used for the given frame line. */
+static unsigned tpg_get_pat_line(const struct tpg_data *tpg, unsigned line)
+{
+	switch (tpg->pattern) {
+	case TPG_PAT_CHECKERS_16X16:
+		return (line >> 4) & 1;
+	case TPG_PAT_CHECKERS_1X1:
+	case TPG_PAT_COLOR_CHECKERS_1X1:
+	case TPG_PAT_ALTERNATING_HLINES:
+		return line & 1;
+	case TPG_PAT_CHECKERS_2X2:
+	case TPG_PAT_COLOR_CHECKERS_2X2:
+		return (line & 2) >> 1;
+	case TPG_PAT_100_COLORSQUARES:
+	case TPG_PAT_100_HCOLORBAR:
+		return (line * 8) / tpg->src_height;
+	case TPG_PAT_CROSS_1_PIXEL:
+		return line == tpg->src_height / 2;
+	case TPG_PAT_CROSS_2_PIXELS:
+		return (line + 1) / 2 == tpg->src_height / 4;
+	case TPG_PAT_CROSS_10_PIXELS:
+		return (line + 10) / 20 == tpg->src_height / 40;
+	default:
+		return 0;
+	}
+}
+
+/*
+ * Which color should be used for the given pattern line and X coordinate.
+ * Note: x is in the range 0 to 2 * tpg->src_width.
+ */
+static enum tpg_color tpg_get_color(const struct tpg_data *tpg,
+				    unsigned pat_line, unsigned x)
+{
+	/* Maximum number of bars are TPG_COLOR_MAX - otherwise, the input print code
+	   should be modified */
+	static const enum tpg_color bars[3][8] = {
+		/* Standard ITU-R 75% color bar sequence */
+		{ TPG_COLOR_CSC_WHITE,   TPG_COLOR_75_YELLOW,
+		  TPG_COLOR_75_CYAN,     TPG_COLOR_75_GREEN,
+		  TPG_COLOR_75_MAGENTA,  TPG_COLOR_75_RED,
+		  TPG_COLOR_75_BLUE,     TPG_COLOR_100_BLACK, },
+		/* Standard ITU-R 100% color bar sequence */
+		{ TPG_COLOR_100_WHITE,   TPG_COLOR_100_YELLOW,
+		  TPG_COLOR_100_CYAN,    TPG_COLOR_100_GREEN,
+		  TPG_COLOR_100_MAGENTA, TPG_COLOR_100_RED,
+		  TPG_COLOR_100_BLUE,    TPG_COLOR_100_BLACK, },
+		/* Color bar sequence suitable to test CSC */
+		{ TPG_COLOR_CSC_WHITE,   TPG_COLOR_CSC_YELLOW,
+		  TPG_COLOR_CSC_CYAN,    TPG_COLOR_CSC_GREEN,
+		  TPG_COLOR_CSC_MAGENTA, TPG_COLOR_CSC_RED,
+		  TPG_COLOR_CSC_BLUE,    TPG_COLOR_CSC_BLACK, },
+	};
+
+	switch (tpg->pattern) {
+	case TPG_PAT_75_COLORBAR:
+	case TPG_PAT_100_COLORBAR:
+	case TPG_PAT_CSC_COLORBAR:
+		return bars[tpg->pattern][((x * 8) / tpg->src_width) % 8];
+	case TPG_PAT_100_COLORSQUARES:
+		return bars[1][(pat_line + (x * 8) / tpg->src_width) % 8];
+	case TPG_PAT_100_HCOLORBAR:
+		return bars[1][pat_line];
+	case TPG_PAT_BLACK:
+		return TPG_COLOR_100_BLACK;
+	case TPG_PAT_WHITE:
+		return TPG_COLOR_100_WHITE;
+	case TPG_PAT_RED:
+		return TPG_COLOR_100_RED;
+	case TPG_PAT_GREEN:
+		return TPG_COLOR_100_GREEN;
+	case TPG_PAT_BLUE:
+		return TPG_COLOR_100_BLUE;
+	case TPG_PAT_CHECKERS_16X16:
+		return (((x >> 4) & 1) ^ (pat_line & 1)) ?
+			TPG_COLOR_100_BLACK : TPG_COLOR_100_WHITE;
+	case TPG_PAT_CHECKERS_1X1:
+		return ((x & 1) ^ (pat_line & 1)) ?
+			TPG_COLOR_100_WHITE : TPG_COLOR_100_BLACK;
+	case TPG_PAT_COLOR_CHECKERS_1X1:
+		return ((x & 1) ^ (pat_line & 1)) ?
+			TPG_COLOR_100_RED : TPG_COLOR_100_BLUE;
+	case TPG_PAT_CHECKERS_2X2:
+		return (((x >> 1) & 1) ^ (pat_line & 1)) ?
+			TPG_COLOR_100_WHITE : TPG_COLOR_100_BLACK;
+	case TPG_PAT_COLOR_CHECKERS_2X2:
+		return (((x >> 1) & 1) ^ (pat_line & 1)) ?
+			TPG_COLOR_100_RED : TPG_COLOR_100_BLUE;
+	case TPG_PAT_ALTERNATING_HLINES:
+		return pat_line ? TPG_COLOR_100_WHITE : TPG_COLOR_100_BLACK;
+	case TPG_PAT_ALTERNATING_VLINES:
+		return (x & 1) ? TPG_COLOR_100_WHITE : TPG_COLOR_100_BLACK;
+	case TPG_PAT_CROSS_1_PIXEL:
+		if (pat_line || (x % tpg->src_width) == tpg->src_width / 2)
+			return TPG_COLOR_100_BLACK;
+		return TPG_COLOR_100_WHITE;
+	case TPG_PAT_CROSS_2_PIXELS:
+		if (pat_line || ((x % tpg->src_width) + 1) / 2 == tpg->src_width / 4)
+			return TPG_COLOR_100_BLACK;
+		return TPG_COLOR_100_WHITE;
+	case TPG_PAT_CROSS_10_PIXELS:
+		if (pat_line || ((x % tpg->src_width) + 10) / 20 == tpg->src_width / 40)
+			return TPG_COLOR_100_BLACK;
+		return TPG_COLOR_100_WHITE;
+	case TPG_PAT_GRAY_RAMP:
+		return TPG_COLOR_RAMP + ((x % tpg->src_width) * 256) / tpg->src_width;
+	default:
+		return TPG_COLOR_100_RED;
+	}
+}
+
+/*
+ * Given the pixel aspect ratio and video aspect ratio calculate the
+ * coordinates of a centered square and the coordinates of the border of
+ * the active video area. The coordinates are relative to the source
+ * frame rectangle.
+ */
+static void tpg_calculate_square_border(struct tpg_data *tpg)
+{
+	unsigned w = tpg->src_width;
+	unsigned h = tpg->src_height;
+	unsigned sq_w, sq_h;
+
+	sq_w = (w * 2 / 5) & ~1;
+	if (((w - sq_w) / 2) & 1)
+		sq_w += 2;
+	sq_h = sq_w;
+	tpg->square.width = sq_w;
+	if (tpg->vid_aspect == TPG_VIDEO_ASPECT_16X9_ANAMORPHIC) {
+		unsigned ana_sq_w = (sq_w / 4) * 3;
+
+		if (((w - ana_sq_w) / 2) & 1)
+			ana_sq_w += 2;
+		tpg->square.width = ana_sq_w;
+	}
+	tpg->square.left = (w - tpg->square.width) / 2;
+	if (tpg->pix_aspect == TPG_PIXEL_ASPECT_NTSC)
+		sq_h = sq_w * 10 / 11;
+	else if (tpg->pix_aspect == TPG_PIXEL_ASPECT_PAL)
+		sq_h = sq_w * 59 / 54;
+	tpg->square.height = sq_h;
+	tpg->square.top = (h - sq_h) / 2;
+	tpg->border.left = 0;
+	tpg->border.width = w;
+	tpg->border.top = 0;
+	tpg->border.height = h;
+	switch (tpg->vid_aspect) {
+	case TPG_VIDEO_ASPECT_4X3:
+		if (tpg->pix_aspect)
+			return;
+		if (3 * w >= 4 * h) {
+			tpg->border.width = ((4 * h) / 3) & ~1;
+			if (((w - tpg->border.width) / 2) & ~1)
+				tpg->border.width -= 2;
+			tpg->border.left = (w - tpg->border.width) / 2;
+			break;
+		}
+		tpg->border.height = ((3 * w) / 4) & ~1;
+		tpg->border.top = (h - tpg->border.height) / 2;
+		break;
+	case TPG_VIDEO_ASPECT_14X9_CENTRE:
+		if (tpg->pix_aspect) {
+			tpg->border.height = tpg->pix_aspect == TPG_PIXEL_ASPECT_NTSC ? 420 : 506;
+			tpg->border.top = (h - tpg->border.height) / 2;
+			break;
+		}
+		if (9 * w >= 14 * h) {
+			tpg->border.width = ((14 * h) / 9) & ~1;
+			if (((w - tpg->border.width) / 2) & ~1)
+				tpg->border.width -= 2;
+			tpg->border.left = (w - tpg->border.width) / 2;
+			break;
+		}
+		tpg->border.height = ((9 * w) / 14) & ~1;
+		tpg->border.top = (h - tpg->border.height) / 2;
+		break;
+	case TPG_VIDEO_ASPECT_16X9_CENTRE:
+		if (tpg->pix_aspect) {
+			tpg->border.height = tpg->pix_aspect == TPG_PIXEL_ASPECT_NTSC ? 368 : 442;
+			tpg->border.top = (h - tpg->border.height) / 2;
+			break;
+		}
+		if (9 * w >= 16 * h) {
+			tpg->border.width = ((16 * h) / 9) & ~1;
+			if (((w - tpg->border.width) / 2) & ~1)
+				tpg->border.width -= 2;
+			tpg->border.left = (w - tpg->border.width) / 2;
+			break;
+		}
+		tpg->border.height = ((9 * w) / 16) & ~1;
+		tpg->border.top = (h - tpg->border.height) / 2;
+		break;
+	default:
+		break;
+	}
+}
+
+static void tpg_precalculate_line(struct tpg_data *tpg)
+{
+	enum tpg_color contrast;
+	u8 pix[TPG_MAX_PLANES][8];
+	unsigned pat;
+	unsigned p;
+	unsigned x;
+
+	switch (tpg->pattern) {
+	case TPG_PAT_GREEN:
+		contrast = TPG_COLOR_100_RED;
+		break;
+	case TPG_PAT_CSC_COLORBAR:
+		contrast = TPG_COLOR_CSC_GREEN;
+		break;
+	default:
+		contrast = TPG_COLOR_100_GREEN;
+		break;
+	}
+
+	for (pat = 0; pat < tpg_get_pat_lines(tpg); pat++) {
+		/* Coarse scaling with Bresenham */
+		unsigned int_part = tpg->src_width / tpg->scaled_width;
+		unsigned fract_part = tpg->src_width % tpg->scaled_width;
+		unsigned src_x = 0;
+		unsigned error = 0;
+
+		for (x = 0; x < tpg->scaled_width * 2; x += 2) {
+			unsigned real_x = src_x;
+			enum tpg_color color1, color2;
+
+			real_x = tpg->hflip ? tpg->src_width * 2 - real_x - 2 : real_x;
+			color1 = tpg_get_color(tpg, pat, real_x);
+
+			src_x += int_part;
+			error += fract_part;
+			if (error >= tpg->scaled_width) {
+				error -= tpg->scaled_width;
+				src_x++;
+			}
+
+			real_x = src_x;
+			real_x = tpg->hflip ? tpg->src_width * 2 - real_x - 2 : real_x;
+			color2 = tpg_get_color(tpg, pat, real_x);
+
+			src_x += int_part;
+			error += fract_part;
+			if (error >= tpg->scaled_width) {
+				error -= tpg->scaled_width;
+				src_x++;
+			}
+
+			gen_twopix(tpg, pix, tpg->hflip ? color2 : color1, 0);
+			gen_twopix(tpg, pix, tpg->hflip ? color1 : color2, 1);
+			for (p = 0; p < tpg->planes; p++) {
+				unsigned twopixsize = tpg->twopixelsize[p];
+				unsigned hdiv = tpg->hdownsampling[p];
+				u8 *pos = tpg->lines[pat][p] + tpg_hdiv(tpg, p, x);
+
+				memcpy(pos, pix[p], twopixsize / hdiv);
+			}
+		}
+	}
+
+	if (tpg->vdownsampling[tpg->planes - 1] > 1) {
+		unsigned pat_lines = tpg_get_pat_lines(tpg);
+
+		for (pat = 0; pat < pat_lines; pat++) {
+			unsigned next_pat = (pat + 1) % pat_lines;
+
+			for (p = 1; p < tpg->planes; p++) {
+				unsigned w = tpg_hdiv(tpg, p, tpg->scaled_width * 2);
+				u8 *pos1 = tpg->lines[pat][p];
+				u8 *pos2 = tpg->lines[next_pat][p];
+				u8 *dest = tpg->downsampled_lines[pat][p];
+
+				for (x = 0; x < w; x++, pos1++, pos2++, dest++)
+					*dest = ((u16)*pos1 + (u16)*pos2) / 2;
+			}
+		}
+	}
+
+	gen_twopix(tpg, pix, contrast, 0);
+	gen_twopix(tpg, pix, contrast, 1);
+	for (p = 0; p < tpg->planes; p++) {
+		unsigned twopixsize = tpg->twopixelsize[p];
+		u8 *pos = tpg->contrast_line[p];
+
+		for (x = 0; x < tpg->scaled_width; x += 2, pos += twopixsize)
+			memcpy(pos, pix[p], twopixsize);
+	}
+
+	gen_twopix(tpg, pix, TPG_COLOR_100_BLACK, 0);
+	gen_twopix(tpg, pix, TPG_COLOR_100_BLACK, 1);
+	for (p = 0; p < tpg->planes; p++) {
+		unsigned twopixsize = tpg->twopixelsize[p];
+		u8 *pos = tpg->black_line[p];
+
+		for (x = 0; x < tpg->scaled_width; x += 2, pos += twopixsize)
+			memcpy(pos, pix[p], twopixsize);
+	}
+
+	for (x = 0; x < tpg->scaled_width * 2; x += 2) {
+		gen_twopix(tpg, pix, TPG_COLOR_RANDOM, 0);
+		gen_twopix(tpg, pix, TPG_COLOR_RANDOM, 1);
+		for (p = 0; p < tpg->planes; p++) {
+			unsigned twopixsize = tpg->twopixelsize[p];
+			u8 *pos = tpg->random_line[p] + x * twopixsize / 2;
+
+			memcpy(pos, pix[p], twopixsize);
+		}
+	}
+
+	gen_twopix(tpg, tpg->textbg, TPG_COLOR_TEXTBG, 0);
+	gen_twopix(tpg, tpg->textbg, TPG_COLOR_TEXTBG, 1);
+	gen_twopix(tpg, tpg->textfg, TPG_COLOR_TEXTFG, 0);
+	gen_twopix(tpg, tpg->textfg, TPG_COLOR_TEXTFG, 1);
+}
+
+/* need this to do rgb24 rendering */
+typedef struct { u16 __; u8 _; } __packed x24;
+
+#define PRINTSTR(PIXTYPE) do {	\
+	unsigned vdiv = tpg->vdownsampling[p]; \
+	unsigned hdiv = tpg->hdownsampling[p]; \
+	int line;	\
+	PIXTYPE fg;	\
+	PIXTYPE bg;	\
+	memcpy(&fg, tpg->textfg[p], sizeof(PIXTYPE));	\
+	memcpy(&bg, tpg->textbg[p], sizeof(PIXTYPE));	\
+	\
+	for (line = first; line < 16; line += vdiv * step) {	\
+		int l = tpg->vflip ? 15 - line : line; \
+		PIXTYPE *pos = (PIXTYPE *)(basep[p][(line / vdiv) & 1] + \
+			       ((y * step + l) / (vdiv * div)) * tpg->bytesperline[p] + \
+			       (x / hdiv) * sizeof(PIXTYPE));	\
+		unsigned s;	\
+	\
+		for (s = 0; s < len; s++) {	\
+			u8 chr = font8x16[text[s] * 16 + line];	\
+	\
+			if (hdiv == 2 && tpg->hflip) { \
+				pos[3] = (chr & (0x01 << 6) ? fg : bg);	\
+				pos[2] = (chr & (0x01 << 4) ? fg : bg);	\
+				pos[1] = (chr & (0x01 << 2) ? fg : bg);	\
+				pos[0] = (chr & (0x01 << 0) ? fg : bg);	\
+			} else if (hdiv == 2) { \
+				pos[0] = (chr & (0x01 << 7) ? fg : bg);	\
+				pos[1] = (chr & (0x01 << 5) ? fg : bg);	\
+				pos[2] = (chr & (0x01 << 3) ? fg : bg);	\
+				pos[3] = (chr & (0x01 << 1) ? fg : bg);	\
+			} else if (tpg->hflip) { \
+				pos[7] = (chr & (0x01 << 7) ? fg : bg);	\
+				pos[6] = (chr & (0x01 << 6) ? fg : bg);	\
+				pos[5] = (chr & (0x01 << 5) ? fg : bg);	\
+				pos[4] = (chr & (0x01 << 4) ? fg : bg);	\
+				pos[3] = (chr & (0x01 << 3) ? fg : bg);	\
+				pos[2] = (chr & (0x01 << 2) ? fg : bg);	\
+				pos[1] = (chr & (0x01 << 1) ? fg : bg);	\
+				pos[0] = (chr & (0x01 << 0) ? fg : bg);	\
+			} else { \
+				pos[0] = (chr & (0x01 << 7) ? fg : bg);	\
+				pos[1] = (chr & (0x01 << 6) ? fg : bg);	\
+				pos[2] = (chr & (0x01 << 5) ? fg : bg);	\
+				pos[3] = (chr & (0x01 << 4) ? fg : bg);	\
+				pos[4] = (chr & (0x01 << 3) ? fg : bg);	\
+				pos[5] = (chr & (0x01 << 2) ? fg : bg);	\
+				pos[6] = (chr & (0x01 << 1) ? fg : bg);	\
+				pos[7] = (chr & (0x01 << 0) ? fg : bg);	\
+			} \
+	\
+			pos += (tpg->hflip ? -8 : 8) / hdiv;	\
+		}	\
+	}	\
+} while (0)
+
+static noinline void tpg_print_str_2(const struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2],
+			unsigned p, unsigned first, unsigned div, unsigned step,
+			int y, int x, char *text, unsigned len)
+{
+	PRINTSTR(u8);
+}
+
+static noinline void tpg_print_str_4(const struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2],
+			unsigned p, unsigned first, unsigned div, unsigned step,
+			int y, int x, char *text, unsigned len)
+{
+	PRINTSTR(u16);
+}
+
+static noinline void tpg_print_str_6(const struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2],
+			unsigned p, unsigned first, unsigned div, unsigned step,
+			int y, int x, char *text, unsigned len)
+{
+	PRINTSTR(x24);
+}
+
+static noinline void tpg_print_str_8(const struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2],
+			unsigned p, unsigned first, unsigned div, unsigned step,
+			int y, int x, char *text, unsigned len)
+{
+	PRINTSTR(u32);
+}
+
+void tpg_gen_text(const struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2],
+		  int y, int x, char *text)
+{
+	unsigned step = V4L2_FIELD_HAS_T_OR_B(tpg->field) ? 2 : 1;
+	unsigned div = step;
+	unsigned first = 0;
+	unsigned len = strlen(text);
+	unsigned p;
+
+	if (font8x16 == NULL || basep == NULL)
+		return;
+
+	/* Checks if it is possible to show string */
+	if (y + 16 >= tpg->compose.height || x + 8 >= tpg->compose.width)
+		return;
+
+	if (len > (tpg->compose.width - x) / 8)
+		len = (tpg->compose.width - x) / 8;
+	if (tpg->vflip)
+		y = tpg->compose.height - y - 16;
+	if (tpg->hflip)
+		x = tpg->compose.width - x - 8;
+	y += tpg->compose.top;
+	x += tpg->compose.left;
+	if (tpg->field == V4L2_FIELD_BOTTOM)
+		first = 1;
+	else if (tpg->field == V4L2_FIELD_SEQ_TB || tpg->field == V4L2_FIELD_SEQ_BT)
+		div = 2;
+
+	for (p = 0; p < tpg->planes; p++) {
+		/* Print text */
+		switch (tpg->twopixelsize[p]) {
+		case 2:
+			tpg_print_str_2(tpg, basep, p, first, div, step, y, x,
+					text, len);
+			break;
+		case 4:
+			tpg_print_str_4(tpg, basep, p, first, div, step, y, x,
+					text, len);
+			break;
+		case 6:
+			tpg_print_str_6(tpg, basep, p, first, div, step, y, x,
+					text, len);
+			break;
+		case 8:
+			tpg_print_str_8(tpg, basep, p, first, div, step, y, x,
+					text, len);
+			break;
+		}
+	}
+}
+EXPORT_SYMBOL_GPL(tpg_gen_text);
+
+void tpg_update_mv_step(struct tpg_data *tpg)
+{
+	int factor = tpg->mv_hor_mode > TPG_MOVE_NONE ? -1 : 1;
+
+	if (tpg->hflip)
+		factor = -factor;
+	switch (tpg->mv_hor_mode) {
+	case TPG_MOVE_NEG_FAST:
+	case TPG_MOVE_POS_FAST:
+		tpg->mv_hor_step = ((tpg->src_width + 319) / 320) * 4;
+		break;
+	case TPG_MOVE_NEG:
+	case TPG_MOVE_POS:
+		tpg->mv_hor_step = ((tpg->src_width + 639) / 640) * 4;
+		break;
+	case TPG_MOVE_NEG_SLOW:
+	case TPG_MOVE_POS_SLOW:
+		tpg->mv_hor_step = 2;
+		break;
+	case TPG_MOVE_NONE:
+		tpg->mv_hor_step = 0;
+		break;
+	}
+	if (factor < 0)
+		tpg->mv_hor_step = tpg->src_width - tpg->mv_hor_step;
+
+	factor = tpg->mv_vert_mode > TPG_MOVE_NONE ? -1 : 1;
+	switch (tpg->mv_vert_mode) {
+	case TPG_MOVE_NEG_FAST:
+	case TPG_MOVE_POS_FAST:
+		tpg->mv_vert_step = ((tpg->src_width + 319) / 320) * 4;
+		break;
+	case TPG_MOVE_NEG:
+	case TPG_MOVE_POS:
+		tpg->mv_vert_step = ((tpg->src_width + 639) / 640) * 4;
+		break;
+	case TPG_MOVE_NEG_SLOW:
+	case TPG_MOVE_POS_SLOW:
+		tpg->mv_vert_step = 1;
+		break;
+	case TPG_MOVE_NONE:
+		tpg->mv_vert_step = 0;
+		break;
+	}
+	if (factor < 0)
+		tpg->mv_vert_step = tpg->src_height - tpg->mv_vert_step;
+}
+EXPORT_SYMBOL_GPL(tpg_update_mv_step);
+
+/* Map the line number relative to the crop rectangle to a frame line number */
+static unsigned tpg_calc_frameline(const struct tpg_data *tpg, unsigned src_y,
+				    unsigned field)
+{
+	switch (field) {
+	case V4L2_FIELD_TOP:
+		return tpg->crop.top + src_y * 2;
+	case V4L2_FIELD_BOTTOM:
+		return tpg->crop.top + src_y * 2 + 1;
+	default:
+		return src_y + tpg->crop.top;
+	}
+}
+
+/*
+ * Map the line number relative to the compose rectangle to a destination
+ * buffer line number.
+ */
+static unsigned tpg_calc_buffer_line(const struct tpg_data *tpg, unsigned y,
+				    unsigned field)
+{
+	y += tpg->compose.top;
+	switch (field) {
+	case V4L2_FIELD_SEQ_TB:
+		if (y & 1)
+			return tpg->buf_height / 2 + y / 2;
+		return y / 2;
+	case V4L2_FIELD_SEQ_BT:
+		if (y & 1)
+			return y / 2;
+		return tpg->buf_height / 2 + y / 2;
+	default:
+		return y;
+	}
+}
+
+static void tpg_recalc(struct tpg_data *tpg)
+{
+	if (tpg->recalc_colors) {
+		tpg->recalc_colors = false;
+		tpg->recalc_lines = true;
+		tpg->real_xfer_func = tpg->xfer_func;
+		tpg->real_ycbcr_enc = tpg->ycbcr_enc;
+		tpg->real_quantization = tpg->quantization;
+
+		if (tpg->xfer_func == V4L2_XFER_FUNC_DEFAULT)
+			tpg->real_xfer_func =
+				V4L2_MAP_XFER_FUNC_DEFAULT(tpg->colorspace);
+
+		if (tpg->ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
+			tpg->real_ycbcr_enc =
+				V4L2_MAP_YCBCR_ENC_DEFAULT(tpg->colorspace);
+
+		if (tpg->quantization == V4L2_QUANTIZATION_DEFAULT)
+			tpg->real_quantization =
+				V4L2_MAP_QUANTIZATION_DEFAULT(!tpg->is_yuv,
+					tpg->colorspace, tpg->real_ycbcr_enc);
+
+		tpg_precalculate_colors(tpg);
+	}
+	if (tpg->recalc_square_border) {
+		tpg->recalc_square_border = false;
+		tpg_calculate_square_border(tpg);
+	}
+	if (tpg->recalc_lines) {
+		tpg->recalc_lines = false;
+		tpg_precalculate_line(tpg);
+	}
+}
+
+void tpg_calc_text_basep(struct tpg_data *tpg,
+		u8 *basep[TPG_MAX_PLANES][2], unsigned p, u8 *vbuf)
+{
+	unsigned stride = tpg->bytesperline[p];
+	unsigned h = tpg->buf_height;
+
+	tpg_recalc(tpg);
+
+	basep[p][0] = vbuf;
+	basep[p][1] = vbuf;
+	h /= tpg->vdownsampling[p];
+	if (tpg->field == V4L2_FIELD_SEQ_TB)
+		basep[p][1] += h * stride / 2;
+	else if (tpg->field == V4L2_FIELD_SEQ_BT)
+		basep[p][0] += h * stride / 2;
+	if (p == 0 && tpg->interleaved)
+		tpg_calc_text_basep(tpg, basep, 1, vbuf);
+}
+EXPORT_SYMBOL_GPL(tpg_calc_text_basep);
+
+static int tpg_pattern_avg(const struct tpg_data *tpg,
+			   unsigned pat1, unsigned pat2)
+{
+	unsigned pat_lines = tpg_get_pat_lines(tpg);
+
+	if (pat1 == (pat2 + 1) % pat_lines)
+		return pat2;
+	if (pat2 == (pat1 + 1) % pat_lines)
+		return pat1;
+	return -1;
+}
+
+void tpg_log_status(struct tpg_data *tpg)
+{
+	pr_info("tpg source WxH: %ux%u (%s)\n",
+			tpg->src_width, tpg->src_height,
+			tpg->is_yuv ? "YCbCr" : "RGB");
+	pr_info("tpg field: %u\n", tpg->field);
+	pr_info("tpg crop: %ux%u@%dx%d\n", tpg->crop.width, tpg->crop.height,
+			tpg->crop.left, tpg->crop.top);
+	pr_info("tpg compose: %ux%u@%dx%d\n", tpg->compose.width, tpg->compose.height,
+			tpg->compose.left, tpg->compose.top);
+	pr_info("tpg colorspace: %d\n", tpg->colorspace);
+	pr_info("tpg transfer function: %d/%d\n", tpg->xfer_func, tpg->real_xfer_func);
+	pr_info("tpg Y'CbCr encoding: %d/%d\n", tpg->ycbcr_enc, tpg->real_ycbcr_enc);
+	pr_info("tpg quantization: %d/%d\n", tpg->quantization, tpg->real_quantization);
+	pr_info("tpg RGB range: %d/%d\n", tpg->rgb_range, tpg->real_rgb_range);
+}
+EXPORT_SYMBOL_GPL(tpg_log_status);
+
+/*
+ * This struct contains common parameters used by both the drawing of the
+ * test pattern and the drawing of the extras (borders, square, etc.)
+ */
+struct tpg_draw_params {
+	/* common data */
+	bool is_tv;
+	bool is_60hz;
+	unsigned twopixsize;
+	unsigned img_width;
+	unsigned stride;
+	unsigned hmax;
+	unsigned frame_line;
+	unsigned frame_line_next;
+
+	/* test pattern */
+	unsigned mv_hor_old;
+	unsigned mv_hor_new;
+	unsigned mv_vert_old;
+	unsigned mv_vert_new;
+
+	/* extras */
+	unsigned wss_width;
+	unsigned wss_random_offset;
+	unsigned sav_eav_f;
+	unsigned left_pillar_width;
+	unsigned right_pillar_start;
+};
+
+static void tpg_fill_params_pattern(const struct tpg_data *tpg, unsigned p,
+				    struct tpg_draw_params *params)
+{
+	params->mv_hor_old =
+		tpg_hscale_div(tpg, p, tpg->mv_hor_count % tpg->src_width);
+	params->mv_hor_new =
+		tpg_hscale_div(tpg, p, (tpg->mv_hor_count + tpg->mv_hor_step) %
+			       tpg->src_width);
+	params->mv_vert_old = tpg->mv_vert_count % tpg->src_height;
+	params->mv_vert_new =
+		(tpg->mv_vert_count + tpg->mv_vert_step) % tpg->src_height;
+}
+
+static void tpg_fill_params_extras(const struct tpg_data *tpg,
+				   unsigned p,
+				   struct tpg_draw_params *params)
+{
+	unsigned left_pillar_width = 0;
+	unsigned right_pillar_start = params->img_width;
+
+	params->wss_width = tpg->crop.left < tpg->src_width / 2 ?
+		tpg->src_width / 2 - tpg->crop.left : 0;
+	if (params->wss_width > tpg->crop.width)
+		params->wss_width = tpg->crop.width;
+	params->wss_width = tpg_hscale_div(tpg, p, params->wss_width);
+	params->wss_random_offset =
+		params->twopixsize * prandom_u32_max(tpg->src_width / 2);
+
+	if (tpg->crop.left < tpg->border.left) {
+		left_pillar_width = tpg->border.left - tpg->crop.left;
+		if (left_pillar_width > tpg->crop.width)
+			left_pillar_width = tpg->crop.width;
+		left_pillar_width = tpg_hscale_div(tpg, p, left_pillar_width);
+	}
+	params->left_pillar_width = left_pillar_width;
+
+	if (tpg->crop.left + tpg->crop.width >
+	    tpg->border.left + tpg->border.width) {
+		right_pillar_start =
+			tpg->border.left + tpg->border.width - tpg->crop.left;
+		right_pillar_start =
+			tpg_hscale_div(tpg, p, right_pillar_start);
+		if (right_pillar_start > params->img_width)
+			right_pillar_start = params->img_width;
+	}
+	params->right_pillar_start = right_pillar_start;
+
+	params->sav_eav_f = tpg->field ==
+			(params->is_60hz ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM);
+}
+
+static void tpg_fill_plane_extras(const struct tpg_data *tpg,
+				  const struct tpg_draw_params *params,
+				  unsigned p, unsigned h, u8 *vbuf)
+{
+	unsigned twopixsize = params->twopixsize;
+	unsigned img_width = params->img_width;
+	unsigned frame_line = params->frame_line;
+	const struct v4l2_rect *sq = &tpg->square;
+	const struct v4l2_rect *b = &tpg->border;
+	const struct v4l2_rect *c = &tpg->crop;
+
+	if (params->is_tv && !params->is_60hz &&
+	    frame_line == 0 && params->wss_width) {
+		/*
+		 * Replace the first half of the top line of a 50 Hz frame
+		 * with random data to simulate a WSS signal.
+		 */
+		u8 *wss = tpg->random_line[p] + params->wss_random_offset;
+
+		memcpy(vbuf, wss, params->wss_width);
+	}
+
+	if (tpg->show_border && frame_line >= b->top &&
+	    frame_line < b->top + b->height) {
+		unsigned bottom = b->top + b->height - 1;
+		unsigned left = params->left_pillar_width;
+		unsigned right = params->right_pillar_start;
+
+		if (frame_line == b->top || frame_line == b->top + 1 ||
+		    frame_line == bottom || frame_line == bottom - 1) {
+			memcpy(vbuf + left, tpg->contrast_line[p],
+					right - left);
+		} else {
+			if (b->left >= c->left &&
+			    b->left < c->left + c->width)
+				memcpy(vbuf + left,
+					tpg->contrast_line[p], twopixsize);
+			if (b->left + b->width > c->left &&
+			    b->left + b->width <= c->left + c->width)
+				memcpy(vbuf + right - twopixsize,
+					tpg->contrast_line[p], twopixsize);
+		}
+	}
+	if (tpg->qual != TPG_QUAL_NOISE && frame_line >= b->top &&
+	    frame_line < b->top + b->height) {
+		memcpy(vbuf, tpg->black_line[p], params->left_pillar_width);
+		memcpy(vbuf + params->right_pillar_start, tpg->black_line[p],
+		       img_width - params->right_pillar_start);
+	}
+	if (tpg->show_square && frame_line >= sq->top &&
+	    frame_line < sq->top + sq->height &&
+	    sq->left < c->left + c->width &&
+	    sq->left + sq->width >= c->left) {
+		unsigned left = sq->left;
+		unsigned width = sq->width;
+
+		if (c->left > left) {
+			width -= c->left - left;
+			left = c->left;
+		}
+		if (c->left + c->width < left + width)
+			width -= left + width - c->left - c->width;
+		left -= c->left;
+		left = tpg_hscale_div(tpg, p, left);
+		width = tpg_hscale_div(tpg, p, width);
+		memcpy(vbuf + left, tpg->contrast_line[p], width);
+	}
+	if (tpg->insert_sav) {
+		unsigned offset = tpg_hdiv(tpg, p, tpg->compose.width / 3);
+		u8 *p = vbuf + offset;
+		unsigned vact = 0, hact = 0;
+
+		p[0] = 0xff;
+		p[1] = 0;
+		p[2] = 0;
+		p[3] = 0x80 | (params->sav_eav_f << 6) |
+			(vact << 5) | (hact << 4) |
+			((hact ^ vact) << 3) |
+			((hact ^ params->sav_eav_f) << 2) |
+			((params->sav_eav_f ^ vact) << 1) |
+			(hact ^ vact ^ params->sav_eav_f);
+	}
+	if (tpg->insert_eav) {
+		unsigned offset = tpg_hdiv(tpg, p, tpg->compose.width * 2 / 3);
+		u8 *p = vbuf + offset;
+		unsigned vact = 0, hact = 1;
+
+		p[0] = 0xff;
+		p[1] = 0;
+		p[2] = 0;
+		p[3] = 0x80 | (params->sav_eav_f << 6) |
+			(vact << 5) | (hact << 4) |
+			((hact ^ vact) << 3) |
+			((hact ^ params->sav_eav_f) << 2) |
+			((params->sav_eav_f ^ vact) << 1) |
+			(hact ^ vact ^ params->sav_eav_f);
+	}
+}
+
+static void tpg_fill_plane_pattern(const struct tpg_data *tpg,
+				   const struct tpg_draw_params *params,
+				   unsigned p, unsigned h, u8 *vbuf)
+{
+	unsigned twopixsize = params->twopixsize;
+	unsigned img_width = params->img_width;
+	unsigned mv_hor_old = params->mv_hor_old;
+	unsigned mv_hor_new = params->mv_hor_new;
+	unsigned mv_vert_old = params->mv_vert_old;
+	unsigned mv_vert_new = params->mv_vert_new;
+	unsigned frame_line = params->frame_line;
+	unsigned frame_line_next = params->frame_line_next;
+	unsigned line_offset = tpg_hscale_div(tpg, p, tpg->crop.left);
+	bool even;
+	bool fill_blank = false;
+	unsigned pat_line_old;
+	unsigned pat_line_new;
+	u8 *linestart_older;
+	u8 *linestart_newer;
+	u8 *linestart_top;
+	u8 *linestart_bottom;
+
+	even = !(frame_line & 1);
+
+	if (h >= params->hmax) {
+		if (params->hmax == tpg->compose.height)
+			return;
+		if (!tpg->perc_fill_blank)
+			return;
+		fill_blank = true;
+	}
+
+	if (tpg->vflip) {
+		frame_line = tpg->src_height - frame_line - 1;
+		frame_line_next = tpg->src_height - frame_line_next - 1;
+	}
+
+	if (fill_blank) {
+		linestart_older = tpg->contrast_line[p];
+		linestart_newer = tpg->contrast_line[p];
+	} else if (tpg->qual != TPG_QUAL_NOISE &&
+		   (frame_line < tpg->border.top ||
+		    frame_line >= tpg->border.top + tpg->border.height)) {
+		linestart_older = tpg->black_line[p];
+		linestart_newer = tpg->black_line[p];
+	} else if (tpg->pattern == TPG_PAT_NOISE || tpg->qual == TPG_QUAL_NOISE) {
+		linestart_older = tpg->random_line[p] +
+				  twopixsize * prandom_u32_max(tpg->src_width / 2);
+		linestart_newer = tpg->random_line[p] +
+				  twopixsize * prandom_u32_max(tpg->src_width / 2);
+	} else {
+		unsigned frame_line_old =
+			(frame_line + mv_vert_old) % tpg->src_height;
+		unsigned frame_line_new =
+			(frame_line + mv_vert_new) % tpg->src_height;
+		unsigned pat_line_next_old;
+		unsigned pat_line_next_new;
+
+		pat_line_old = tpg_get_pat_line(tpg, frame_line_old);
+		pat_line_new = tpg_get_pat_line(tpg, frame_line_new);
+		linestart_older = tpg->lines[pat_line_old][p] + mv_hor_old;
+		linestart_newer = tpg->lines[pat_line_new][p] + mv_hor_new;
+
+		if (tpg->vdownsampling[p] > 1 && frame_line != frame_line_next) {
+			int avg_pat;
+
+			/*
+			 * Now decide whether we need to use downsampled_lines[].
+			 * That's necessary if the two lines use different patterns.
+			 */
+			pat_line_next_old = tpg_get_pat_line(tpg,
+					(frame_line_next + mv_vert_old) % tpg->src_height);
+			pat_line_next_new = tpg_get_pat_line(tpg,
+					(frame_line_next + mv_vert_new) % tpg->src_height);
+
+			switch (tpg->field) {
+			case V4L2_FIELD_INTERLACED:
+			case V4L2_FIELD_INTERLACED_BT:
+			case V4L2_FIELD_INTERLACED_TB:
+				avg_pat = tpg_pattern_avg(tpg, pat_line_old, pat_line_new);
+				if (avg_pat < 0)
+					break;
+				linestart_older = tpg->downsampled_lines[avg_pat][p] + mv_hor_old;
+				linestart_newer = linestart_older;
+				break;
+			case V4L2_FIELD_NONE:
+			case V4L2_FIELD_TOP:
+			case V4L2_FIELD_BOTTOM:
+			case V4L2_FIELD_SEQ_BT:
+			case V4L2_FIELD_SEQ_TB:
+				avg_pat = tpg_pattern_avg(tpg, pat_line_old, pat_line_next_old);
+				if (avg_pat >= 0)
+					linestart_older = tpg->downsampled_lines[avg_pat][p] +
+						mv_hor_old;
+				avg_pat = tpg_pattern_avg(tpg, pat_line_new, pat_line_next_new);
+				if (avg_pat >= 0)
+					linestart_newer = tpg->downsampled_lines[avg_pat][p] +
+						mv_hor_new;
+				break;
+			}
+		}
+		linestart_older += line_offset;
+		linestart_newer += line_offset;
+	}
+	if (tpg->field_alternate) {
+		linestart_top = linestart_bottom = linestart_older;
+	} else if (params->is_60hz) {
+		linestart_top = linestart_newer;
+		linestart_bottom = linestart_older;
+	} else {
+		linestart_top = linestart_older;
+		linestart_bottom = linestart_newer;
+	}
+
+	switch (tpg->field) {
+	case V4L2_FIELD_INTERLACED:
+	case V4L2_FIELD_INTERLACED_TB:
+	case V4L2_FIELD_SEQ_TB:
+	case V4L2_FIELD_SEQ_BT:
+		if (even)
+			memcpy(vbuf, linestart_top, img_width);
+		else
+			memcpy(vbuf, linestart_bottom, img_width);
+		break;
+	case V4L2_FIELD_INTERLACED_BT:
+		if (even)
+			memcpy(vbuf, linestart_bottom, img_width);
+		else
+			memcpy(vbuf, linestart_top, img_width);
+		break;
+	case V4L2_FIELD_TOP:
+		memcpy(vbuf, linestart_top, img_width);
+		break;
+	case V4L2_FIELD_BOTTOM:
+		memcpy(vbuf, linestart_bottom, img_width);
+		break;
+	case V4L2_FIELD_NONE:
+	default:
+		memcpy(vbuf, linestart_older, img_width);
+		break;
+	}
+}
+
+void tpg_fill_plane_buffer(struct tpg_data *tpg, v4l2_std_id std,
+			   unsigned p, u8 *vbuf)
+{
+	struct tpg_draw_params params;
+	unsigned factor = V4L2_FIELD_HAS_T_OR_B(tpg->field) ? 2 : 1;
+
+	/* Coarse scaling with Bresenham */
+	unsigned int_part = (tpg->crop.height / factor) / tpg->compose.height;
+	unsigned fract_part = (tpg->crop.height / factor) % tpg->compose.height;
+	unsigned src_y = 0;
+	unsigned error = 0;
+	unsigned h;
+
+	tpg_recalc(tpg);
+
+	params.is_tv = std;
+	params.is_60hz = std & V4L2_STD_525_60;
+	params.twopixsize = tpg->twopixelsize[p];
+	params.img_width = tpg_hdiv(tpg, p, tpg->compose.width);
+	params.stride = tpg->bytesperline[p];
+	params.hmax = (tpg->compose.height * tpg->perc_fill) / 100;
+
+	tpg_fill_params_pattern(tpg, p, &params);
+	tpg_fill_params_extras(tpg, p, &params);
+
+	vbuf += tpg_hdiv(tpg, p, tpg->compose.left);
+
+	for (h = 0; h < tpg->compose.height; h++) {
+		unsigned buf_line;
+
+		params.frame_line = tpg_calc_frameline(tpg, src_y, tpg->field);
+		params.frame_line_next = params.frame_line;
+		buf_line = tpg_calc_buffer_line(tpg, h, tpg->field);
+		src_y += int_part;
+		error += fract_part;
+		if (error >= tpg->compose.height) {
+			error -= tpg->compose.height;
+			src_y++;
+		}
+
+		/*
+		 * For line-interleaved formats determine the 'plane'
+		 * based on the buffer line.
+		 */
+		if (tpg_g_interleaved(tpg))
+			p = tpg_g_interleaved_plane(tpg, buf_line);
+
+		if (tpg->vdownsampling[p] > 1) {
+			/*
+			 * When doing vertical downsampling the field setting
+			 * matters: for SEQ_BT/TB we downsample each field
+			 * separately (i.e. lines 0+2 are combined, as are
+			 * lines 1+3), for the other field settings we combine
+			 * odd and even lines. Doing that for SEQ_BT/TB would
+			 * be really weird.
+			 */
+			if (tpg->field == V4L2_FIELD_SEQ_BT ||
+			    tpg->field == V4L2_FIELD_SEQ_TB) {
+				unsigned next_src_y = src_y;
+
+				if ((h & 3) >= 2)
+					continue;
+				next_src_y += int_part;
+				if (error + fract_part >= tpg->compose.height)
+					next_src_y++;
+				params.frame_line_next =
+					tpg_calc_frameline(tpg, next_src_y, tpg->field);
+			} else {
+				if (h & 1)
+					continue;
+				params.frame_line_next =
+					tpg_calc_frameline(tpg, src_y, tpg->field);
+			}
+
+			buf_line /= tpg->vdownsampling[p];
+		}
+		tpg_fill_plane_pattern(tpg, &params, p, h,
+				vbuf + buf_line * params.stride);
+		tpg_fill_plane_extras(tpg, &params, p, h,
+				vbuf + buf_line * params.stride);
+	}
+}
+EXPORT_SYMBOL_GPL(tpg_fill_plane_buffer);
+
+void tpg_fillbuffer(struct tpg_data *tpg, v4l2_std_id std, unsigned p, u8 *vbuf)
+{
+	unsigned offset = 0;
+	unsigned i;
+
+	if (tpg->buffers > 1) {
+		tpg_fill_plane_buffer(tpg, std, p, vbuf);
+		return;
+	}
+
+	for (i = 0; i < tpg_g_planes(tpg); i++) {
+		tpg_fill_plane_buffer(tpg, std, i, vbuf + offset);
+		offset += tpg_calc_plane_size(tpg, i);
+	}
+}
+EXPORT_SYMBOL_GPL(tpg_fillbuffer);
+
+MODULE_DESCRIPTION("V4L2 Test Pattern Generator");
+MODULE_AUTHOR("Hans Verkuil");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
index 0afad39..a7a4674 100644
--- a/drivers/media/dvb-core/dvb-usb-ids.h
+++ b/drivers/media/dvb-core/dvb-usb-ids.h
@@ -58,6 +58,14 @@
 #define USB_VID_TELESTAR			0x10b9
 #define USB_VID_VISIONPLUS			0x13d3
 #define USB_VID_SONY				0x1415
+#define USB_PID_TEVII_S421			0xd421
+#define USB_PID_TEVII_S480_1			0xd481
+#define USB_PID_TEVII_S480_2			0xd482
+#define USB_PID_TEVII_S630			0xd630
+#define USB_PID_TEVII_S632			0xd632
+#define USB_PID_TEVII_S650			0xd650
+#define USB_PID_TEVII_S660			0xd660
+#define USB_PID_TEVII_S662			0xd662
 #define USB_VID_TWINHAN				0x1822
 #define USB_VID_ULTIMA_ELECTRONIC		0x05d8
 #define USB_VID_UNIWILL				0x1584
@@ -141,6 +149,7 @@
 #define USB_PID_GENIUS_TVGO_DVB_T03			0x4012
 #define USB_PID_GRANDTEC_DVBT_USB_COLD			0x0fa0
 #define USB_PID_GRANDTEC_DVBT_USB_WARM			0x0fa1
+#define USB_PID_GOTVIEW_SAT_HD				0x5456
 #define USB_PID_INTEL_CE9500				0x9500
 #define USB_PID_ITETECH_IT9135				0x9135
 #define USB_PID_ITETECH_IT9135_9005			0x9005
@@ -159,6 +168,8 @@
 #define USB_PID_KWORLD_UB499_2T_T09			0xe409
 #define USB_PID_KWORLD_VSTREAM_COLD			0x17de
 #define USB_PID_KWORLD_VSTREAM_WARM			0x17df
+#define USB_PID_PROF_1100				0xb012
+#define USB_PID_TERRATEC_CINERGY_S			0x0064
 #define USB_PID_TERRATEC_CINERGY_T_USB_XE		0x0055
 #define USB_PID_TERRATEC_CINERGY_T_USB_XE_REV2		0x0069
 #define USB_PID_TERRATEC_CINERGY_T_STICK		0x0093
@@ -361,6 +372,8 @@
 #define USB_PID_YUAN_STK7700D				0x1efc
 #define USB_PID_YUAN_STK7700D_2				0x1e8c
 #define USB_PID_DW2102					0x2102
+#define USB_PID_DW2104					0x2104
+#define USB_PID_DW3101					0x3101
 #define USB_PID_XTENSIONS_XD_380			0x0381
 #define USB_PID_TELESTAR_STARSTICK_2			0x8000
 #define USB_PID_MSI_DIGI_VOX_MINI_III                   0x8807
@@ -373,6 +386,7 @@
 #define USB_PID_ELGATO_EYETV_DTT_Dlx			0x0020
 #define USB_PID_ELGATO_EYETV_SAT			0x002a
 #define USB_PID_ELGATO_EYETV_SAT_V2			0x0025
+#define USB_PID_ELGATO_EYETV_SAT_V3			0x0036
 #define USB_PID_DVB_T_USB_STICK_HIGH_SPEED_COLD		0x5000
 #define USB_PID_DVB_T_USB_STICK_HIGH_SPEED_WARM		0x5001
 #define USB_PID_FRIIO_WHITE				0x0001
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index e1684c5..75a3f4b 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -676,13 +676,13 @@
 					     demux, 0, MEDIA_LNK_FL_ENABLED,
 					     false);
 		if (ret)
-			return -ENOMEM;
+			return ret;
 	}
 	if (demux && ca) {
 		ret = media_create_pad_link(demux, 1, ca,
 					    0, MEDIA_LNK_FL_ENABLED);
 		if (ret)
-			return -ENOMEM;
+			return ret;
 	}
 
 	/* Create demux links for each ringbuffer/pad */
diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c
index dc2d41e..d879dc0 100644
--- a/drivers/media/dvb-frontends/dib0090.c
+++ b/drivers/media/dvb-frontends/dib0090.c
@@ -1121,7 +1121,7 @@
 				(state->current_band == BAND_CBAND) ? "CBAND" : "NOT CBAND",
 				state->identity.version & 0x1f);
 
-		if (rf_ramp && ((state->rf_ramp[0] == 0) ||
+		if (rf_ramp && ((state->rf_ramp && state->rf_ramp[0] == 0) ||
 		    (state->current_band == BAND_CBAND &&
 		    (state->identity.version & 0x1f) <= P1D_E_F))) {
 			dprintk("DE-Engage mux for direct gain reg control");
diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
index e8fc032..addffc3 100644
--- a/drivers/media/dvb-frontends/ds3000.c
+++ b/drivers/media/dvb-frontends/ds3000.c
@@ -458,7 +458,7 @@
 
 		break;
 	default:
-		return 1;
+		return -EINVAL;
 	}
 
 	if (state->config->set_lock_led)
@@ -528,7 +528,7 @@
 			*ber = 0xffffffff;
 		break;
 	default:
-		return 1;
+		return -EINVAL;
 	}
 
 	return 0;
@@ -623,7 +623,7 @@
 				snr_reading, *snr);
 		break;
 	default:
-		return 1;
+		return -EINVAL;
 	}
 
 	return 0;
@@ -661,7 +661,7 @@
 		state->prevUCBS2 = _ucblocks;
 		break;
 	default:
-		return 1;
+		return -EINVAL;
 	}
 
 	return 0;
@@ -754,7 +754,7 @@
 		data |= 0x80;
 		ds3000_writereg(state, 0xa2, data);
 
-		return 1;
+		return -ETIMEDOUT;
 	}
 
 	data = ds3000_readreg(state, 0xa2);
@@ -808,7 +808,7 @@
 		data |= 0x80;
 		ds3000_writereg(state, 0xa2, data);
 
-		return 1;
+		return -ETIMEDOUT;
 	}
 
 	data = ds3000_readreg(state, 0xa2);
@@ -951,7 +951,7 @@
 			ds3000_writereg(state, 0xfe, 0x98);
 		break;
 	default:
-		return 1;
+		return -EINVAL;
 	}
 
 	/* enable 27MHz clock output */
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index 7688360..5557ef8 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -1251,9 +1251,9 @@
 	i2c_unregister_device(client);
 }
 
-static int m88ds3103_select(struct i2c_adapter *adap, void *mux_priv, u32 chan)
+static int m88ds3103_select(struct i2c_mux_core *muxc, u32 chan)
 {
-	struct m88ds3103_dev *dev = mux_priv;
+	struct m88ds3103_dev *dev = i2c_mux_priv(muxc);
 	struct i2c_client *client = dev->client;
 	int ret;
 	struct i2c_msg msg = {
@@ -1374,7 +1374,7 @@
 
 	dev_dbg(&client->dev, "\n");
 
-	return dev->i2c_adapter;
+	return dev->muxc->adapter[0];
 }
 
 static int m88ds3103_probe(struct i2c_client *client,
@@ -1467,13 +1467,16 @@
 		goto err_kfree;
 
 	/* create mux i2c adapter for tuner */
-	dev->i2c_adapter = i2c_add_mux_adapter(client->adapter, &client->dev,
-					       dev, 0, 0, 0, m88ds3103_select,
-					       NULL);
-	if (dev->i2c_adapter == NULL) {
+	dev->muxc = i2c_mux_alloc(client->adapter, &client->dev, 1, 0, 0,
+				  m88ds3103_select, NULL);
+	if (!dev->muxc) {
 		ret = -ENOMEM;
 		goto err_kfree;
 	}
+	dev->muxc->priv = dev;
+	ret = i2c_mux_add_adapter(dev->muxc, 0, 0, 0);
+	if (ret)
+		goto err_kfree;
 
 	/* create dvb_frontend */
 	memcpy(&dev->fe.ops, &m88ds3103_ops, sizeof(struct dvb_frontend_ops));
@@ -1502,7 +1505,7 @@
 
 	dev_dbg(&client->dev, "\n");
 
-	i2c_del_mux_adapter(dev->i2c_adapter);
+	i2c_mux_del_adapters(dev->muxc);
 
 	kfree(dev);
 	return 0;
diff --git a/drivers/media/dvb-frontends/m88ds3103_priv.h b/drivers/media/dvb-frontends/m88ds3103_priv.h
index eee8c22..d78e467 100644
--- a/drivers/media/dvb-frontends/m88ds3103_priv.h
+++ b/drivers/media/dvb-frontends/m88ds3103_priv.h
@@ -42,11 +42,11 @@
 	enum fe_status fe_status;
 	u32 dvbv3_ber; /* for old DVBv3 API read_ber */
 	bool warm; /* FW running */
-	struct i2c_adapter *i2c_adapter;
+	struct i2c_mux_core *muxc;
 	/* auto detect chip id to do different config */
 	u8 chip_id;
 	/* main mclk is calculated for M88RS6000 dynamically */
-	u32 mclk_khz;
+	s32 mclk_khz;
 	u64 post_bit_error;
 	u64 post_bit_count;
 };
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
index 3f96429..d25d1e0 100644
--- a/drivers/media/dvb-frontends/rtl2830.c
+++ b/drivers/media/dvb-frontends/rtl2830.c
@@ -677,9 +677,9 @@
  * adapter lock is already taken by tuner driver.
  * Gate is closed automatically after single I2C transfer.
  */
-static int rtl2830_select(struct i2c_adapter *adap, void *mux_priv, u32 chan_id)
+static int rtl2830_select(struct i2c_mux_core *muxc, u32 chan_id)
 {
-	struct i2c_client *client = mux_priv;
+	struct i2c_client *client = i2c_mux_priv(muxc);
 	struct rtl2830_dev *dev = i2c_get_clientdata(client);
 	int ret;
 
@@ -712,7 +712,7 @@
 
 	dev_dbg(&client->dev, "\n");
 
-	return dev->adapter;
+	return dev->muxc->adapter[0];
 }
 
 /*
@@ -865,12 +865,16 @@
 		goto err_regmap_exit;
 
 	/* create muxed i2c adapter for tuner */
-	dev->adapter = i2c_add_mux_adapter(client->adapter, &client->dev,
-			client, 0, 0, 0, rtl2830_select, NULL);
-	if (dev->adapter == NULL) {
-		ret = -ENODEV;
+	dev->muxc = i2c_mux_alloc(client->adapter, &client->dev, 1, 0, 0,
+				  rtl2830_select, NULL);
+	if (!dev->muxc) {
+		ret = -ENOMEM;
 		goto err_regmap_exit;
 	}
+	dev->muxc->priv = client;
+	ret = i2c_mux_add_adapter(dev->muxc, 0, 0, 0);
+	if (ret)
+		goto err_regmap_exit;
 
 	/* create dvb frontend */
 	memcpy(&dev->fe.ops, &rtl2830_ops, sizeof(dev->fe.ops));
@@ -903,7 +907,7 @@
 	/* stop statistics polling */
 	cancel_delayed_work_sync(&dev->stat_work);
 
-	i2c_del_mux_adapter(dev->adapter);
+	i2c_mux_del_adapters(dev->muxc);
 	regmap_exit(dev->regmap);
 	kfree(dev);
 
diff --git a/drivers/media/dvb-frontends/rtl2830_priv.h b/drivers/media/dvb-frontends/rtl2830_priv.h
index cf793f3..da49095 100644
--- a/drivers/media/dvb-frontends/rtl2830_priv.h
+++ b/drivers/media/dvb-frontends/rtl2830_priv.h
@@ -29,7 +29,7 @@
 	struct rtl2830_platform_data *pdata;
 	struct i2c_client *client;
 	struct regmap *regmap;
-	struct i2c_adapter *adapter;
+	struct i2c_mux_core *muxc;
 	struct dvb_frontend fe;
 	bool sleeping;
 	unsigned long filters;
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index 7c96f76..bfb6bee 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -153,43 +153,6 @@
 	[DVBT_REG_4MSEL]	= {0x013,  0, 0},
 };
 
-/* Our regmap is bypassing I2C adapter lock, thus we do it! */
-static int rtl2832_bulk_write(struct i2c_client *client, unsigned int reg,
-			      const void *val, size_t val_count)
-{
-	struct rtl2832_dev *dev = i2c_get_clientdata(client);
-	int ret;
-
-	i2c_lock_adapter(client->adapter);
-	ret = regmap_bulk_write(dev->regmap, reg, val, val_count);
-	i2c_unlock_adapter(client->adapter);
-	return ret;
-}
-
-static int rtl2832_update_bits(struct i2c_client *client, unsigned int reg,
-			       unsigned int mask, unsigned int val)
-{
-	struct rtl2832_dev *dev = i2c_get_clientdata(client);
-	int ret;
-
-	i2c_lock_adapter(client->adapter);
-	ret = regmap_update_bits(dev->regmap, reg, mask, val);
-	i2c_unlock_adapter(client->adapter);
-	return ret;
-}
-
-static int rtl2832_bulk_read(struct i2c_client *client, unsigned int reg,
-			     void *val, size_t val_count)
-{
-	struct rtl2832_dev *dev = i2c_get_clientdata(client);
-	int ret;
-
-	i2c_lock_adapter(client->adapter);
-	ret = regmap_bulk_read(dev->regmap, reg, val, val_count);
-	i2c_unlock_adapter(client->adapter);
-	return ret;
-}
-
 static int rtl2832_rd_demod_reg(struct rtl2832_dev *dev, int reg, u32 *val)
 {
 	struct i2c_client *client = dev->client;
@@ -204,7 +167,7 @@
 	len = (msb >> 3) + 1;
 	mask = REG_MASK(msb - lsb);
 
-	ret = rtl2832_bulk_read(client, reg_start_addr, reading, len);
+	ret = regmap_bulk_read(dev->regmap, reg_start_addr, reading, len);
 	if (ret)
 		goto err;
 
@@ -234,7 +197,7 @@
 	len = (msb >> 3) + 1;
 	mask = REG_MASK(msb - lsb);
 
-	ret = rtl2832_bulk_read(client, reg_start_addr, reading, len);
+	ret = regmap_bulk_read(dev->regmap, reg_start_addr, reading, len);
 	if (ret)
 		goto err;
 
@@ -248,7 +211,7 @@
 	for (i = 0; i < len; i++)
 		writing[i] = (writing_tmp >> ((len - 1 - i) * 8)) & 0xff;
 
-	ret = rtl2832_bulk_write(client, reg_start_addr, writing, len);
+	ret = regmap_bulk_write(dev->regmap, reg_start_addr, writing, len);
 	if (ret)
 		goto err;
 
@@ -525,7 +488,8 @@
 	}
 
 	for (j = 0; j < sizeof(bw_params[0]); j++) {
-		ret = rtl2832_bulk_write(client, 0x11c + j, &bw_params[i][j], 1);
+		ret = regmap_bulk_write(dev->regmap,
+					0x11c + j, &bw_params[i][j], 1);
 		if (ret)
 			goto err;
 	}
@@ -581,11 +545,11 @@
 	if (dev->sleeping)
 		return 0;
 
-	ret = rtl2832_bulk_read(client, 0x33c, buf, 2);
+	ret = regmap_bulk_read(dev->regmap, 0x33c, buf, 2);
 	if (ret)
 		goto err;
 
-	ret = rtl2832_bulk_read(client, 0x351, &buf[2], 1);
+	ret = regmap_bulk_read(dev->regmap, 0x351, &buf[2], 1);
 	if (ret)
 		goto err;
 
@@ -716,7 +680,7 @@
 	/* signal strength */
 	if (dev->fe_status & FE_HAS_SIGNAL) {
 		/* read digital AGC */
-		ret = rtl2832_bulk_read(client, 0x305, &u8tmp, 1);
+		ret = regmap_bulk_read(dev->regmap, 0x305, &u8tmp, 1);
 		if (ret)
 			goto err;
 
@@ -742,7 +706,7 @@
 			{87659938, 87659938, 87885178, 88241743},
 		};
 
-		ret = rtl2832_bulk_read(client, 0x33c, &u8tmp, 1);
+		ret = regmap_bulk_read(dev->regmap, 0x33c, &u8tmp, 1);
 		if (ret)
 			goto err;
 
@@ -754,7 +718,7 @@
 		if (hierarchy > HIERARCHY_NUM - 1)
 			goto err;
 
-		ret = rtl2832_bulk_read(client, 0x40c, buf, 2);
+		ret = regmap_bulk_read(dev->regmap, 0x40c, buf, 2);
 		if (ret)
 			goto err;
 
@@ -775,7 +739,7 @@
 
 	/* BER */
 	if (dev->fe_status & FE_HAS_LOCK) {
-		ret = rtl2832_bulk_read(client, 0x34e, buf, 2);
+		ret = regmap_bulk_read(dev->regmap, 0x34e, buf, 2);
 		if (ret)
 			goto err;
 
@@ -825,8 +789,6 @@
 
 /*
  * I2C gate/mux/repeater logic
- * We must use unlocked __i2c_transfer() here (through regmap) because of I2C
- * adapter lock is already taken by tuner driver.
  * There is delay mechanism to avoid unneeded I2C gate open / close. Gate close
  * is delayed here a little bit in order to see if there is sequence of I2C
  * messages sent to same I2C bus.
@@ -838,7 +800,7 @@
 	int ret;
 
 	/* close gate */
-	ret = rtl2832_update_bits(dev->client, 0x101, 0x08, 0x00);
+	ret = regmap_update_bits(dev->regmap, 0x101, 0x08, 0x00);
 	if (ret)
 		goto err;
 
@@ -847,19 +809,16 @@
 	dev_dbg(&client->dev, "failed=%d\n", ret);
 }
 
-static int rtl2832_select(struct i2c_adapter *adap, void *mux_priv, u32 chan_id)
+static int rtl2832_select(struct i2c_mux_core *muxc, u32 chan_id)
 {
-	struct rtl2832_dev *dev = mux_priv;
+	struct rtl2832_dev *dev = i2c_mux_priv(muxc);
 	struct i2c_client *client = dev->client;
 	int ret;
 
 	/* terminate possible gate closing */
 	cancel_delayed_work(&dev->i2c_gate_work);
 
-	/*
-	 * I2C adapter lock is already taken and due to that we will use
-	 * regmap_update_bits() which does not lock again I2C adapter.
-	 */
+	/* open gate */
 	ret = regmap_update_bits(dev->regmap, 0x101, 0x08, 0x08);
 	if (ret)
 		goto err;
@@ -870,10 +829,9 @@
 	return ret;
 }
 
-static int rtl2832_deselect(struct i2c_adapter *adap, void *mux_priv,
-			    u32 chan_id)
+static int rtl2832_deselect(struct i2c_mux_core *muxc, u32 chan_id)
 {
-	struct rtl2832_dev *dev = mux_priv;
+	struct rtl2832_dev *dev = i2c_mux_priv(muxc);
 
 	schedule_delayed_work(&dev->i2c_gate_work, usecs_to_jiffies(100));
 	return 0;
@@ -932,120 +890,6 @@
 	return false;
 }
 
-/*
- * We implement own I2C access routines for regmap in order to get manual access
- * to I2C adapter lock, which is needed for I2C mux adapter.
- */
-static int rtl2832_regmap_read(void *context, const void *reg_buf,
-			       size_t reg_size, void *val_buf, size_t val_size)
-{
-	struct i2c_client *client = context;
-	int ret;
-	struct i2c_msg msg[2] = {
-		{
-			.addr = client->addr,
-			.flags = 0,
-			.len = reg_size,
-			.buf = (u8 *)reg_buf,
-		}, {
-			.addr = client->addr,
-			.flags = I2C_M_RD,
-			.len = val_size,
-			.buf = val_buf,
-		}
-	};
-
-	ret = __i2c_transfer(client->adapter, msg, 2);
-	if (ret != 2) {
-		dev_warn(&client->dev, "i2c reg read failed %d reg %02x\n",
-			 ret, *(u8 *)reg_buf);
-		if (ret >= 0)
-			ret = -EREMOTEIO;
-		return ret;
-	}
-	return 0;
-}
-
-static int rtl2832_regmap_write(void *context, const void *data, size_t count)
-{
-	struct i2c_client *client = context;
-	int ret;
-	struct i2c_msg msg[1] = {
-		{
-			.addr = client->addr,
-			.flags = 0,
-			.len = count,
-			.buf = (u8 *)data,
-		}
-	};
-
-	ret = __i2c_transfer(client->adapter, msg, 1);
-	if (ret != 1) {
-		dev_warn(&client->dev, "i2c reg write failed %d reg %02x\n",
-			 ret, *(u8 *)data);
-		if (ret >= 0)
-			ret = -EREMOTEIO;
-		return ret;
-	}
-	return 0;
-}
-
-static int rtl2832_regmap_gather_write(void *context, const void *reg,
-				       size_t reg_len, const void *val,
-				       size_t val_len)
-{
-	struct i2c_client *client = context;
-	int ret;
-	u8 buf[256];
-	struct i2c_msg msg[1] = {
-		{
-			.addr = client->addr,
-			.flags = 0,
-			.len = 1 + val_len,
-			.buf = buf,
-		}
-	};
-
-	buf[0] = *(u8 const *)reg;
-	memcpy(&buf[1], val, val_len);
-
-	ret = __i2c_transfer(client->adapter, msg, 1);
-	if (ret != 1) {
-		dev_warn(&client->dev, "i2c reg write failed %d reg %02x\n",
-			 ret, *(u8 const *)reg);
-		if (ret >= 0)
-			ret = -EREMOTEIO;
-		return ret;
-	}
-	return 0;
-}
-
-/*
- * FIXME: Hack. Implement own regmap locking in order to silence lockdep
- * recursive lock warning. That happens when regmap I2C client calls I2C mux
- * adapter, which leads demod I2C repeater enable via demod regmap. Operation
- * takes two regmap locks recursively - but those are different regmap instances
- * in a two different I2C drivers, so it is not deadlock. Proper fix is to make
- * regmap aware of lockdep.
- */
-static void rtl2832_regmap_lock(void *__dev)
-{
-	struct rtl2832_dev *dev = __dev;
-	struct i2c_client *client = dev->client;
-
-	dev_dbg(&client->dev, "\n");
-	mutex_lock(&dev->regmap_mutex);
-}
-
-static void rtl2832_regmap_unlock(void *__dev)
-{
-	struct rtl2832_dev *dev = __dev;
-	struct i2c_client *client = dev->client;
-
-	dev_dbg(&client->dev, "\n");
-	mutex_unlock(&dev->regmap_mutex);
-}
-
 static struct dvb_frontend *rtl2832_get_dvb_frontend(struct i2c_client *client)
 {
 	struct rtl2832_dev *dev = i2c_get_clientdata(client);
@@ -1059,7 +903,7 @@
 	struct rtl2832_dev *dev = i2c_get_clientdata(client);
 
 	dev_dbg(&client->dev, "\n");
-	return dev->i2c_adapter_tuner;
+	return dev->muxc->adapter[0];
 }
 
 static int rtl2832_slave_ts_ctrl(struct i2c_client *client, bool enable)
@@ -1073,29 +917,29 @@
 		ret = rtl2832_wr_demod_reg(dev, DVBT_SOFT_RST, 0x0);
 		if (ret)
 			goto err;
-		ret = rtl2832_bulk_write(client, 0x10c, "\x5f\xff", 2);
+		ret = regmap_bulk_write(dev->regmap, 0x10c, "\x5f\xff", 2);
 		if (ret)
 			goto err;
 		ret = rtl2832_wr_demod_reg(dev, DVBT_PIP_ON, 0x1);
 		if (ret)
 			goto err;
-		ret = rtl2832_bulk_write(client, 0x0bc, "\x18", 1);
+		ret = regmap_bulk_write(dev->regmap, 0x0bc, "\x18", 1);
 		if (ret)
 			goto err;
-		ret = rtl2832_bulk_write(client, 0x192, "\x7f\xf7\xff", 3);
+		ret = regmap_bulk_write(dev->regmap, 0x192, "\x7f\xf7\xff", 3);
 		if (ret)
 			goto err;
 	} else {
-		ret = rtl2832_bulk_write(client, 0x192, "\x00\x0f\xff", 3);
+		ret = regmap_bulk_write(dev->regmap, 0x192, "\x00\x0f\xff", 3);
 		if (ret)
 			goto err;
-		ret = rtl2832_bulk_write(client, 0x0bc, "\x08", 1);
+		ret = regmap_bulk_write(dev->regmap, 0x0bc, "\x08", 1);
 		if (ret)
 			goto err;
 		ret = rtl2832_wr_demod_reg(dev, DVBT_PIP_ON, 0x0);
 		if (ret)
 			goto err;
-		ret = rtl2832_bulk_write(client, 0x10c, "\x00\x00", 2);
+		ret = regmap_bulk_write(dev->regmap, 0x10c, "\x00\x00", 2);
 		if (ret)
 			goto err;
 		ret = rtl2832_wr_demod_reg(dev, DVBT_SOFT_RST, 0x1);
@@ -1124,7 +968,7 @@
 	else
 		u8tmp = 0x00;
 
-	ret = rtl2832_update_bits(client, 0x061, 0xc0, u8tmp);
+	ret = regmap_update_bits(dev->regmap, 0x061, 0xc0, u8tmp);
 	if (ret)
 		goto err;
 
@@ -1159,14 +1003,14 @@
 	buf[1] = (dev->filters >>  8) & 0xff;
 	buf[2] = (dev->filters >> 16) & 0xff;
 	buf[3] = (dev->filters >> 24) & 0xff;
-	ret = rtl2832_bulk_write(client, 0x062, buf, 4);
+	ret = regmap_bulk_write(dev->regmap, 0x062, buf, 4);
 	if (ret)
 		goto err;
 
 	/* add PID */
 	buf[0] = (pid >> 8) & 0xff;
 	buf[1] = (pid >> 0) & 0xff;
-	ret = rtl2832_bulk_write(client, 0x066 + 2 * index, buf, 2);
+	ret = regmap_bulk_write(dev->regmap, 0x066 + 2 * index, buf, 2);
 	if (ret)
 		goto err;
 
@@ -1184,12 +1028,6 @@
 	struct rtl2832_dev *dev;
 	int ret;
 	u8 tmp;
-	static const struct regmap_bus regmap_bus = {
-		.read = rtl2832_regmap_read,
-		.write = rtl2832_regmap_write,
-		.gather_write = rtl2832_regmap_gather_write,
-		.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
-	};
 	static const struct regmap_range_cfg regmap_range_cfg[] = {
 		{
 			.selector_reg     = 0x00,
@@ -1218,36 +1056,35 @@
 	dev->sleeping = true;
 	INIT_DELAYED_WORK(&dev->i2c_gate_work, rtl2832_i2c_gate_work);
 	/* create regmap */
-	mutex_init(&dev->regmap_mutex);
 	dev->regmap_config.reg_bits =  8,
 	dev->regmap_config.val_bits =  8,
-	dev->regmap_config.lock = rtl2832_regmap_lock,
-	dev->regmap_config.unlock = rtl2832_regmap_unlock,
-	dev->regmap_config.lock_arg = dev,
 	dev->regmap_config.volatile_reg = rtl2832_volatile_reg,
 	dev->regmap_config.max_register = 5 * 0x100,
 	dev->regmap_config.ranges = regmap_range_cfg,
 	dev->regmap_config.num_ranges = ARRAY_SIZE(regmap_range_cfg),
 	dev->regmap_config.cache_type = REGCACHE_NONE,
-	dev->regmap = regmap_init(&client->dev, &regmap_bus, client,
-				  &dev->regmap_config);
+	dev->regmap = regmap_init_i2c(client, &dev->regmap_config);
 	if (IS_ERR(dev->regmap)) {
 		ret = PTR_ERR(dev->regmap);
 		goto err_kfree;
 	}
 
 	/* check if the demod is there */
-	ret = rtl2832_bulk_read(client, 0x000, &tmp, 1);
+	ret = regmap_bulk_read(dev->regmap, 0x000, &tmp, 1);
 	if (ret)
 		goto err_regmap_exit;
 
 	/* create muxed i2c adapter for demod tuner bus */
-	dev->i2c_adapter_tuner = i2c_add_mux_adapter(i2c, &i2c->dev, dev,
-			0, 0, 0, rtl2832_select, rtl2832_deselect);
-	if (dev->i2c_adapter_tuner == NULL) {
-		ret = -ENODEV;
+	dev->muxc = i2c_mux_alloc(i2c, &i2c->dev, 1, 0, I2C_MUX_LOCKED,
+				  rtl2832_select, rtl2832_deselect);
+	if (!dev->muxc) {
+		ret = -ENOMEM;
 		goto err_regmap_exit;
 	}
+	dev->muxc->priv = dev;
+	ret = i2c_mux_add_adapter(dev->muxc, 0, 0, 0);
+	if (ret)
+		goto err_regmap_exit;
 
 	/* create dvb_frontend */
 	memcpy(&dev->fe.ops, &rtl2832_ops, sizeof(struct dvb_frontend_ops));
@@ -1259,9 +1096,7 @@
 	pdata->slave_ts_ctrl = rtl2832_slave_ts_ctrl;
 	pdata->pid_filter = rtl2832_pid_filter;
 	pdata->pid_filter_ctrl = rtl2832_pid_filter_ctrl;
-	pdata->bulk_read = rtl2832_bulk_read;
-	pdata->bulk_write = rtl2832_bulk_write;
-	pdata->update_bits = rtl2832_update_bits;
+	pdata->regmap = dev->regmap;
 
 	dev_info(&client->dev, "Realtek RTL2832 successfully attached\n");
 	return 0;
@@ -1282,7 +1117,7 @@
 
 	cancel_delayed_work_sync(&dev->i2c_gate_work);
 
-	i2c_del_mux_adapter(dev->i2c_adapter_tuner);
+	i2c_mux_del_adapters(dev->muxc);
 
 	regmap_exit(dev->regmap);
 
diff --git a/drivers/media/dvb-frontends/rtl2832.h b/drivers/media/dvb-frontends/rtl2832.h
index 6390af6..03c0de0 100644
--- a/drivers/media/dvb-frontends/rtl2832.h
+++ b/drivers/media/dvb-frontends/rtl2832.h
@@ -57,9 +57,7 @@
 	int (*pid_filter)(struct dvb_frontend *, u8, u16, int);
 	int (*pid_filter_ctrl)(struct dvb_frontend *, int);
 /* private: Register access for SDR module use only */
-	int (*bulk_read)(struct i2c_client *, unsigned int, void *, size_t);
-	int (*bulk_write)(struct i2c_client *, unsigned int, const void *, size_t);
-	int (*update_bits)(struct i2c_client *, unsigned int, unsigned int, unsigned int);
+	struct regmap *regmap;
 };
 
 #endif /* RTL2832_H */
diff --git a/drivers/media/dvb-frontends/rtl2832_priv.h b/drivers/media/dvb-frontends/rtl2832_priv.h
index 6b875f4..c1a8a69 100644
--- a/drivers/media/dvb-frontends/rtl2832_priv.h
+++ b/drivers/media/dvb-frontends/rtl2832_priv.h
@@ -33,10 +33,9 @@
 struct rtl2832_dev {
 	struct rtl2832_platform_data *pdata;
 	struct i2c_client *client;
-	struct mutex regmap_mutex;
 	struct regmap_config regmap_config;
 	struct regmap *regmap;
-	struct i2c_adapter *i2c_adapter_tuner;
+	struct i2c_mux_core *muxc;
 	struct dvb_frontend fe;
 	enum fe_status fe_status;
 	u64 post_bit_error_prev; /* for old DVBv3 read_ber() calculation */
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index b860f02..47a480a 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -35,6 +35,7 @@
 #include <linux/platform_device.h>
 #include <linux/jiffies.h>
 #include <linux/math64.h>
+#include <linux/regmap.h>
 
 static bool rtl2832_sdr_emulated_fmt;
 module_param_named(emulated_formats, rtl2832_sdr_emulated_fmt, bool, 0644);
@@ -119,6 +120,7 @@
 	unsigned long flags;
 
 	struct platform_device *pdev;
+	struct regmap *regmap;
 
 	struct video_device vdev;
 	struct v4l2_device v4l2_dev;
@@ -163,47 +165,6 @@
 	unsigned long jiffies_next;
 };
 
-/* write multiple registers */
-static int rtl2832_sdr_wr_regs(struct rtl2832_sdr_dev *dev, u16 reg,
-		const u8 *val, int len)
-{
-	struct platform_device *pdev = dev->pdev;
-	struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
-	struct i2c_client *client = pdata->i2c_client;
-
-	return pdata->bulk_write(client, reg, val, len);
-}
-
-#if 0
-/* read multiple registers */
-static int rtl2832_sdr_rd_regs(struct rtl2832_sdr_dev *dev, u16 reg, u8 *val,
-		int len)
-{
-	struct platform_device *pdev = dev->pdev;
-	struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
-	struct i2c_client *client = pdata->i2c_client;
-
-	return pdata->bulk_read(client, reg, val, len);
-}
-#endif
-
-/* write single register */
-static int rtl2832_sdr_wr_reg(struct rtl2832_sdr_dev *dev, u16 reg, u8 val)
-{
-	return rtl2832_sdr_wr_regs(dev, reg, &val, 1);
-}
-
-/* write single register with mask */
-static int rtl2832_sdr_wr_reg_mask(struct rtl2832_sdr_dev *dev, u16 reg,
-		u8 val, u8 mask)
-{
-	struct platform_device *pdev = dev->pdev;
-	struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
-	struct i2c_client *client = pdata->i2c_client;
-
-	return pdata->update_bits(client, reg, mask, val);
-}
-
 /* Private functions */
 static struct rtl2832_sdr_frame_buf *rtl2832_sdr_get_next_fill_buf(
 		struct rtl2832_sdr_dev *dev)
@@ -558,11 +519,11 @@
 
 	f_sr = dev->f_adc;
 
-	ret = rtl2832_sdr_wr_regs(dev, 0x13e, "\x00\x00", 2);
+	ret = regmap_bulk_write(dev->regmap, 0x13e, "\x00\x00", 2);
 	if (ret)
 		goto err;
 
-	ret = rtl2832_sdr_wr_regs(dev, 0x115, "\x00\x00\x00\x00", 4);
+	ret = regmap_bulk_write(dev->regmap, 0x115, "\x00\x00\x00\x00", 4);
 	if (ret)
 		goto err;
 
@@ -588,7 +549,7 @@
 	buf[1] = (u32tmp >>  8) & 0xff;
 	buf[2] = (u32tmp >>  0) & 0xff;
 
-	ret = rtl2832_sdr_wr_regs(dev, 0x119, buf, 3);
+	ret = regmap_bulk_write(dev->regmap, 0x119, buf, 3);
 	if (ret)
 		goto err;
 
@@ -602,15 +563,15 @@
 		u8tmp2 = 0xcd; /* enable ADC I, ADC Q */
 	}
 
-	ret = rtl2832_sdr_wr_reg(dev, 0x1b1, u8tmp1);
+	ret = regmap_write(dev->regmap, 0x1b1, u8tmp1);
 	if (ret)
 		goto err;
 
-	ret = rtl2832_sdr_wr_reg(dev, 0x008, u8tmp2);
+	ret = regmap_write(dev->regmap, 0x008, u8tmp2);
 	if (ret)
 		goto err;
 
-	ret = rtl2832_sdr_wr_reg(dev, 0x006, 0x80);
+	ret = regmap_write(dev->regmap, 0x006, 0x80);
 	if (ret)
 		goto err;
 
@@ -621,168 +582,169 @@
 	buf[1] = (u32tmp >> 16) & 0xff;
 	buf[2] = (u32tmp >>  8) & 0xff;
 	buf[3] = (u32tmp >>  0) & 0xff;
-	ret = rtl2832_sdr_wr_regs(dev, 0x19f, buf, 4);
+	ret = regmap_bulk_write(dev->regmap, 0x19f, buf, 4);
 	if (ret)
 		goto err;
 
 	/* low-pass filter */
-	ret = rtl2832_sdr_wr_regs(dev, 0x11c,
-			"\xca\xdc\xd7\xd8\xe0\xf2\x0e\x35\x06\x50\x9c\x0d\x71\x11\x14\x71\x74\x19\x41\xa5",
-			20);
+	ret = regmap_bulk_write(dev->regmap, 0x11c,
+				"\xca\xdc\xd7\xd8\xe0\xf2\x0e\x35\x06\x50\x9c\x0d\x71\x11\x14\x71\x74\x19\x41\xa5",
+				20);
 	if (ret)
 		goto err;
 
-	ret = rtl2832_sdr_wr_regs(dev, 0x017, "\x11\x10", 2);
+	ret = regmap_bulk_write(dev->regmap, 0x017, "\x11\x10", 2);
 	if (ret)
 		goto err;
 
 	/* mode */
-	ret = rtl2832_sdr_wr_regs(dev, 0x019, "\x05", 1);
+	ret = regmap_write(dev->regmap, 0x019, 0x05);
 	if (ret)
 		goto err;
 
-	ret = rtl2832_sdr_wr_regs(dev, 0x01a, "\x1b\x16\x0d\x06\x01\xff", 6);
+	ret = regmap_bulk_write(dev->regmap, 0x01a,
+				"\x1b\x16\x0d\x06\x01\xff", 6);
 	if (ret)
 		goto err;
 
 	/* FSM */
-	ret = rtl2832_sdr_wr_regs(dev, 0x192, "\x00\xf0\x0f", 3);
+	ret = regmap_bulk_write(dev->regmap, 0x192, "\x00\xf0\x0f", 3);
 	if (ret)
 		goto err;
 
 	/* PID filter */
-	ret = rtl2832_sdr_wr_regs(dev, 0x061, "\x60", 1);
+	ret = regmap_write(dev->regmap, 0x061, 0x60);
 	if (ret)
 		goto err;
 
 	/* used RF tuner based settings */
 	switch (pdata->tuner) {
 	case RTL2832_SDR_TUNER_E4000:
-		ret = rtl2832_sdr_wr_regs(dev, 0x112, "\x5a", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x102, "\x40", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x103, "\x5a", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1c7, "\x30", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x104, "\xd0", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x105, "\xbe", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1c8, "\x18", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x106, "\x35", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1c9, "\x21", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1ca, "\x21", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1cb, "\x00", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x107, "\x40", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1cd, "\x10", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1ce, "\x10", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x108, "\x80", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x109, "\x7f", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x10a, "\x80", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x10b, "\x7f", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x011, "\xd4", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1e5, "\xf0", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1d9, "\x00", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1db, "\x00", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1dd, "\x14", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1de, "\xec", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1d8, "\x0c", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1e6, "\x02", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1d7, "\x09", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x00d, "\x83", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x010, "\x49", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x00d, "\x87", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x00d, "\x85", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x013, "\x02", 1);
+		ret = regmap_write(dev->regmap, 0x112, 0x5a);
+		ret = regmap_write(dev->regmap, 0x102, 0x40);
+		ret = regmap_write(dev->regmap, 0x103, 0x5a);
+		ret = regmap_write(dev->regmap, 0x1c7, 0x30);
+		ret = regmap_write(dev->regmap, 0x104, 0xd0);
+		ret = regmap_write(dev->regmap, 0x105, 0xbe);
+		ret = regmap_write(dev->regmap, 0x1c8, 0x18);
+		ret = regmap_write(dev->regmap, 0x106, 0x35);
+		ret = regmap_write(dev->regmap, 0x1c9, 0x21);
+		ret = regmap_write(dev->regmap, 0x1ca, 0x21);
+		ret = regmap_write(dev->regmap, 0x1cb, 0x00);
+		ret = regmap_write(dev->regmap, 0x107, 0x40);
+		ret = regmap_write(dev->regmap, 0x1cd, 0x10);
+		ret = regmap_write(dev->regmap, 0x1ce, 0x10);
+		ret = regmap_write(dev->regmap, 0x108, 0x80);
+		ret = regmap_write(dev->regmap, 0x109, 0x7f);
+		ret = regmap_write(dev->regmap, 0x10a, 0x80);
+		ret = regmap_write(dev->regmap, 0x10b, 0x7f);
+		ret = regmap_write(dev->regmap, 0x00e, 0xfc);
+		ret = regmap_write(dev->regmap, 0x00e, 0xfc);
+		ret = regmap_write(dev->regmap, 0x011, 0xd4);
+		ret = regmap_write(dev->regmap, 0x1e5, 0xf0);
+		ret = regmap_write(dev->regmap, 0x1d9, 0x00);
+		ret = regmap_write(dev->regmap, 0x1db, 0x00);
+		ret = regmap_write(dev->regmap, 0x1dd, 0x14);
+		ret = regmap_write(dev->regmap, 0x1de, 0xec);
+		ret = regmap_write(dev->regmap, 0x1d8, 0x0c);
+		ret = regmap_write(dev->regmap, 0x1e6, 0x02);
+		ret = regmap_write(dev->regmap, 0x1d7, 0x09);
+		ret = regmap_write(dev->regmap, 0x00d, 0x83);
+		ret = regmap_write(dev->regmap, 0x010, 0x49);
+		ret = regmap_write(dev->regmap, 0x00d, 0x87);
+		ret = regmap_write(dev->regmap, 0x00d, 0x85);
+		ret = regmap_write(dev->regmap, 0x013, 0x02);
 		break;
 	case RTL2832_SDR_TUNER_FC0012:
 	case RTL2832_SDR_TUNER_FC0013:
-		ret = rtl2832_sdr_wr_regs(dev, 0x112, "\x5a", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x102, "\x40", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x103, "\x5a", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1c7, "\x2c", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x104, "\xcc", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x105, "\xbe", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1c8, "\x16", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x106, "\x35", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1c9, "\x21", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1ca, "\x21", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1cb, "\x00", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x107, "\x40", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1cd, "\x10", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1ce, "\x10", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x108, "\x80", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x109, "\x7f", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x10a, "\x80", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x10b, "\x7f", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x011, "\xe9\xbf", 2);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1e5, "\xf0", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1d9, "\x00", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1db, "\x00", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1dd, "\x11", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1de, "\xef", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1d8, "\x0c", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1e6, "\x02", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1d7, "\x09", 1);
+		ret = regmap_write(dev->regmap, 0x112, 0x5a);
+		ret = regmap_write(dev->regmap, 0x102, 0x40);
+		ret = regmap_write(dev->regmap, 0x103, 0x5a);
+		ret = regmap_write(dev->regmap, 0x1c7, 0x2c);
+		ret = regmap_write(dev->regmap, 0x104, 0xcc);
+		ret = regmap_write(dev->regmap, 0x105, 0xbe);
+		ret = regmap_write(dev->regmap, 0x1c8, 0x16);
+		ret = regmap_write(dev->regmap, 0x106, 0x35);
+		ret = regmap_write(dev->regmap, 0x1c9, 0x21);
+		ret = regmap_write(dev->regmap, 0x1ca, 0x21);
+		ret = regmap_write(dev->regmap, 0x1cb, 0x00);
+		ret = regmap_write(dev->regmap, 0x107, 0x40);
+		ret = regmap_write(dev->regmap, 0x1cd, 0x10);
+		ret = regmap_write(dev->regmap, 0x1ce, 0x10);
+		ret = regmap_write(dev->regmap, 0x108, 0x80);
+		ret = regmap_write(dev->regmap, 0x109, 0x7f);
+		ret = regmap_write(dev->regmap, 0x10a, 0x80);
+		ret = regmap_write(dev->regmap, 0x10b, 0x7f);
+		ret = regmap_write(dev->regmap, 0x00e, 0xfc);
+		ret = regmap_write(dev->regmap, 0x00e, 0xfc);
+		ret = regmap_bulk_write(dev->regmap, 0x011, "\xe9\xbf", 2);
+		ret = regmap_write(dev->regmap, 0x1e5, 0xf0);
+		ret = regmap_write(dev->regmap, 0x1d9, 0x00);
+		ret = regmap_write(dev->regmap, 0x1db, 0x00);
+		ret = regmap_write(dev->regmap, 0x1dd, 0x11);
+		ret = regmap_write(dev->regmap, 0x1de, 0xef);
+		ret = regmap_write(dev->regmap, 0x1d8, 0x0c);
+		ret = regmap_write(dev->regmap, 0x1e6, 0x02);
+		ret = regmap_write(dev->regmap, 0x1d7, 0x09);
 		break;
 	case RTL2832_SDR_TUNER_R820T:
 	case RTL2832_SDR_TUNER_R828D:
-		ret = rtl2832_sdr_wr_regs(dev, 0x112, "\x5a", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x102, "\x40", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x115, "\x01", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x103, "\x80", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1c7, "\x24", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x104, "\xcc", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x105, "\xbe", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1c8, "\x14", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x106, "\x35", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1c9, "\x21", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1ca, "\x21", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1cb, "\x00", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x107, "\x40", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1cd, "\x10", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1ce, "\x10", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x108, "\x80", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x109, "\x7f", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x10a, "\x80", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x10b, "\x7f", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x011, "\xf4", 1);
+		ret = regmap_write(dev->regmap, 0x112, 0x5a);
+		ret = regmap_write(dev->regmap, 0x102, 0x40);
+		ret = regmap_write(dev->regmap, 0x115, 0x01);
+		ret = regmap_write(dev->regmap, 0x103, 0x80);
+		ret = regmap_write(dev->regmap, 0x1c7, 0x24);
+		ret = regmap_write(dev->regmap, 0x104, 0xcc);
+		ret = regmap_write(dev->regmap, 0x105, 0xbe);
+		ret = regmap_write(dev->regmap, 0x1c8, 0x14);
+		ret = regmap_write(dev->regmap, 0x106, 0x35);
+		ret = regmap_write(dev->regmap, 0x1c9, 0x21);
+		ret = regmap_write(dev->regmap, 0x1ca, 0x21);
+		ret = regmap_write(dev->regmap, 0x1cb, 0x00);
+		ret = regmap_write(dev->regmap, 0x107, 0x40);
+		ret = regmap_write(dev->regmap, 0x1cd, 0x10);
+		ret = regmap_write(dev->regmap, 0x1ce, 0x10);
+		ret = regmap_write(dev->regmap, 0x108, 0x80);
+		ret = regmap_write(dev->regmap, 0x109, 0x7f);
+		ret = regmap_write(dev->regmap, 0x10a, 0x80);
+		ret = regmap_write(dev->regmap, 0x10b, 0x7f);
+		ret = regmap_write(dev->regmap, 0x00e, 0xfc);
+		ret = regmap_write(dev->regmap, 0x00e, 0xfc);
+		ret = regmap_write(dev->regmap, 0x011, 0xf4);
 		break;
 	case RTL2832_SDR_TUNER_FC2580:
-		ret = rtl2832_sdr_wr_regs(dev, 0x112, "\x39", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x102, "\x40", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x103, "\x5a", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1c7, "\x2c", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x104, "\xcc", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x105, "\xbe", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1c8, "\x16", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x106, "\x35", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1c9, "\x21", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1ca, "\x21", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1cb, "\x00", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x107, "\x40", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1cd, "\x10", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x1ce, "\x10", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x108, "\x80", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x109, "\x7f", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x10a, "\x9c", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x10b, "\x7f", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
-		ret = rtl2832_sdr_wr_regs(dev, 0x011, "\xe9\xf4", 2);
+		ret = regmap_write(dev->regmap, 0x112, 0x39);
+		ret = regmap_write(dev->regmap, 0x102, 0x40);
+		ret = regmap_write(dev->regmap, 0x103, 0x5a);
+		ret = regmap_write(dev->regmap, 0x1c7, 0x2c);
+		ret = regmap_write(dev->regmap, 0x104, 0xcc);
+		ret = regmap_write(dev->regmap, 0x105, 0xbe);
+		ret = regmap_write(dev->regmap, 0x1c8, 0x16);
+		ret = regmap_write(dev->regmap, 0x106, 0x35);
+		ret = regmap_write(dev->regmap, 0x1c9, 0x21);
+		ret = regmap_write(dev->regmap, 0x1ca, 0x21);
+		ret = regmap_write(dev->regmap, 0x1cb, 0x00);
+		ret = regmap_write(dev->regmap, 0x107, 0x40);
+		ret = regmap_write(dev->regmap, 0x1cd, 0x10);
+		ret = regmap_write(dev->regmap, 0x1ce, 0x10);
+		ret = regmap_write(dev->regmap, 0x108, 0x80);
+		ret = regmap_write(dev->regmap, 0x109, 0x7f);
+		ret = regmap_write(dev->regmap, 0x10a, 0x9c);
+		ret = regmap_write(dev->regmap, 0x10b, 0x7f);
+		ret = regmap_write(dev->regmap, 0x00e, 0xfc);
+		ret = regmap_write(dev->regmap, 0x00e, 0xfc);
+		ret = regmap_bulk_write(dev->regmap, 0x011, "\xe9\xf4", 2);
 		break;
 	default:
 		dev_notice(&pdev->dev, "Unsupported tuner\n");
 	}
 
 	/* software reset */
-	ret = rtl2832_sdr_wr_reg_mask(dev, 0x101, 0x04, 0x04);
+	ret = regmap_update_bits(dev->regmap, 0x101, 0x04, 0x04);
 	if (ret)
 		goto err;
 
-	ret = rtl2832_sdr_wr_reg_mask(dev, 0x101, 0x00, 0x04);
+	ret = regmap_update_bits(dev->regmap, 0x101, 0x04, 0x00);
 	if (ret)
 		goto err;
 err:
@@ -797,29 +759,29 @@
 	dev_dbg(&pdev->dev, "\n");
 
 	/* PID filter */
-	ret = rtl2832_sdr_wr_regs(dev, 0x061, "\xe0", 1);
+	ret = regmap_write(dev->regmap, 0x061, 0xe0);
 	if (ret)
 		goto err;
 
 	/* mode */
-	ret = rtl2832_sdr_wr_regs(dev, 0x019, "\x20", 1);
+	ret = regmap_write(dev->regmap, 0x019, 0x20);
 	if (ret)
 		goto err;
 
-	ret = rtl2832_sdr_wr_regs(dev, 0x017, "\x11\x10", 2);
+	ret = regmap_bulk_write(dev->regmap, 0x017, "\x11\x10", 2);
 	if (ret)
 		goto err;
 
 	/* FSM */
-	ret = rtl2832_sdr_wr_regs(dev, 0x192, "\x00\x0f\xff", 3);
+	ret = regmap_bulk_write(dev->regmap, 0x192, "\x00\x0f\xff", 3);
 	if (ret)
 		goto err;
 
-	ret = rtl2832_sdr_wr_regs(dev, 0x13e, "\x40\x00", 2);
+	ret = regmap_bulk_write(dev->regmap, 0x13e, "\x40\x00", 2);
 	if (ret)
 		goto err;
 
-	ret = rtl2832_sdr_wr_regs(dev, 0x115, "\x06\x3f\xce\xcc", 4);
+	ret = regmap_bulk_write(dev->regmap, 0x115, "\x06\x3f\xce\xcc", 4);
 	if (ret)
 		goto err;
 err:
@@ -1399,6 +1361,7 @@
 	subdev = pdata->v4l2_subdev;
 	dev->v4l2_subdev = pdata->v4l2_subdev;
 	dev->pdev = pdev;
+	dev->regmap = pdata->regmap;
 	dev->udev = pdata->dvb_usb_device->udev;
 	dev->f_adc = bands_adc[0].rangelow;
 	dev->f_tuner = bands_fm[0].rangelow;
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.h b/drivers/media/dvb-frontends/rtl2832_sdr.h
index 342ea84..d8fc7e7 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.h
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.h
@@ -56,10 +56,7 @@
 #define RTL2832_SDR_TUNER_R828D     0x2b
 	u8 tuner;
 
-	struct i2c_client *i2c_client;
-	int (*bulk_read)(struct i2c_client *, unsigned int, void *, size_t);
-	int (*bulk_write)(struct i2c_client *, unsigned int, const void *, size_t);
-	int (*update_bits)(struct i2c_client *, unsigned int, unsigned int, unsigned int);
+	struct regmap *regmap;
 	struct dvb_frontend *dvb_frontend;
 	struct v4l2_subdev *v4l2_subdev;
 	struct dvb_usb_device *dvb_usb_device;
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 821a8f4..108a069 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -18,53 +18,23 @@
 
 static const struct dvb_frontend_ops si2168_ops;
 
-/* Own I2C adapter locking is needed because of I2C gate logic. */
-static int si2168_i2c_master_send_unlocked(const struct i2c_client *client,
-					   const char *buf, int count)
-{
-	int ret;
-	struct i2c_msg msg = {
-		.addr = client->addr,
-		.flags = 0,
-		.len = count,
-		.buf = (char *)buf,
-	};
-
-	ret = __i2c_transfer(client->adapter, &msg, 1);
-	return (ret == 1) ? count : ret;
-}
-
-static int si2168_i2c_master_recv_unlocked(const struct i2c_client *client,
-					   char *buf, int count)
-{
-	int ret;
-	struct i2c_msg msg = {
-		.addr = client->addr,
-		.flags = I2C_M_RD,
-		.len = count,
-		.buf = buf,
-	};
-
-	ret = __i2c_transfer(client->adapter, &msg, 1);
-	return (ret == 1) ? count : ret;
-}
-
 /* execute firmware command */
-static int si2168_cmd_execute_unlocked(struct i2c_client *client,
-				       struct si2168_cmd *cmd)
+static int si2168_cmd_execute(struct i2c_client *client, struct si2168_cmd *cmd)
 {
+	struct si2168_dev *dev = i2c_get_clientdata(client);
 	int ret;
 	unsigned long timeout;
 
+	mutex_lock(&dev->i2c_mutex);
+
 	if (cmd->wlen) {
 		/* write cmd and args for firmware */
-		ret = si2168_i2c_master_send_unlocked(client, cmd->args,
-						      cmd->wlen);
+		ret = i2c_master_send(client, cmd->args, cmd->wlen);
 		if (ret < 0) {
-			goto err;
+			goto err_mutex_unlock;
 		} else if (ret != cmd->wlen) {
 			ret = -EREMOTEIO;
-			goto err;
+			goto err_mutex_unlock;
 		}
 	}
 
@@ -73,13 +43,12 @@
 		#define TIMEOUT 70
 		timeout = jiffies + msecs_to_jiffies(TIMEOUT);
 		while (!time_after(jiffies, timeout)) {
-			ret = si2168_i2c_master_recv_unlocked(client, cmd->args,
-							      cmd->rlen);
+			ret = i2c_master_recv(client, cmd->args, cmd->rlen);
 			if (ret < 0) {
-				goto err;
+				goto err_mutex_unlock;
 			} else if (ret != cmd->rlen) {
 				ret = -EREMOTEIO;
-				goto err;
+				goto err_mutex_unlock;
 			}
 
 			/* firmware ready? */
@@ -94,32 +63,23 @@
 		/* error bit set? */
 		if ((cmd->args[0] >> 6) & 0x01) {
 			ret = -EREMOTEIO;
-			goto err;
+			goto err_mutex_unlock;
 		}
 
 		if (!((cmd->args[0] >> 7) & 0x01)) {
 			ret = -ETIMEDOUT;
-			goto err;
+			goto err_mutex_unlock;
 		}
 	}
 
+	mutex_unlock(&dev->i2c_mutex);
 	return 0;
-err:
+err_mutex_unlock:
+	mutex_unlock(&dev->i2c_mutex);
 	dev_dbg(&client->dev, "failed=%d\n", ret);
 	return ret;
 }
 
-static int si2168_cmd_execute(struct i2c_client *client, struct si2168_cmd *cmd)
-{
-	int ret;
-
-	i2c_lock_adapter(client->adapter);
-	ret = si2168_cmd_execute_unlocked(client, cmd);
-	i2c_unlock_adapter(client->adapter);
-
-	return ret;
-}
-
 static int si2168_read_status(struct dvb_frontend *fe, enum fe_status *status)
 {
 	struct i2c_client *client = fe->demodulator_priv;
@@ -610,14 +570,9 @@
 	return 0;
 }
 
-/*
- * I2C gate logic
- * We must use unlocked I2C I/O because I2C adapter lock is already taken
- * by the caller (usually tuner driver).
- */
-static int si2168_select(struct i2c_adapter *adap, void *mux_priv, u32 chan)
+static int si2168_select(struct i2c_mux_core *muxc, u32 chan)
 {
-	struct i2c_client *client = mux_priv;
+	struct i2c_client *client = i2c_mux_priv(muxc);
 	int ret;
 	struct si2168_cmd cmd;
 
@@ -625,7 +580,7 @@
 	memcpy(cmd.args, "\xc0\x0d\x01", 3);
 	cmd.wlen = 3;
 	cmd.rlen = 0;
-	ret = si2168_cmd_execute_unlocked(client, &cmd);
+	ret = si2168_cmd_execute(client, &cmd);
 	if (ret)
 		goto err;
 
@@ -635,9 +590,9 @@
 	return ret;
 }
 
-static int si2168_deselect(struct i2c_adapter *adap, void *mux_priv, u32 chan)
+static int si2168_deselect(struct i2c_mux_core *muxc, u32 chan)
 {
-	struct i2c_client *client = mux_priv;
+	struct i2c_client *client = i2c_mux_priv(muxc);
 	int ret;
 	struct si2168_cmd cmd;
 
@@ -645,7 +600,7 @@
 	memcpy(cmd.args, "\xc0\x0d\x00", 3);
 	cmd.wlen = 3;
 	cmd.rlen = 0;
-	ret = si2168_cmd_execute_unlocked(client, &cmd);
+	ret = si2168_cmd_execute(client, &cmd);
 	if (ret)
 		goto err;
 
@@ -708,18 +663,25 @@
 		goto err;
 	}
 
+	mutex_init(&dev->i2c_mutex);
+
 	/* create mux i2c adapter for tuner */
-	dev->adapter = i2c_add_mux_adapter(client->adapter, &client->dev,
-			client, 0, 0, 0, si2168_select, si2168_deselect);
-	if (dev->adapter == NULL) {
-		ret = -ENODEV;
+	dev->muxc = i2c_mux_alloc(client->adapter, &client->dev,
+				  1, 0, I2C_MUX_LOCKED,
+				  si2168_select, si2168_deselect);
+	if (!dev->muxc) {
+		ret = -ENOMEM;
 		goto err_kfree;
 	}
+	dev->muxc->priv = client;
+	ret = i2c_mux_add_adapter(dev->muxc, 0, 0, 0);
+	if (ret)
+		goto err_kfree;
 
 	/* create dvb_frontend */
 	memcpy(&dev->fe.ops, &si2168_ops, sizeof(struct dvb_frontend_ops));
 	dev->fe.demodulator_priv = client;
-	*config->i2c_adapter = dev->adapter;
+	*config->i2c_adapter = dev->muxc->adapter[0];
 	*config->fe = &dev->fe;
 	dev->ts_mode = config->ts_mode;
 	dev->ts_clock_inv = config->ts_clock_inv;
@@ -743,7 +705,7 @@
 
 	dev_dbg(&client->dev, "\n");
 
-	i2c_del_mux_adapter(dev->adapter);
+	i2c_mux_del_adapters(dev->muxc);
 
 	dev->fe.ops.release = NULL;
 	dev->fe.demodulator_priv = NULL;
diff --git a/drivers/media/dvb-frontends/si2168_priv.h b/drivers/media/dvb-frontends/si2168_priv.h
index c07e6fe..8a1f36d 100644
--- a/drivers/media/dvb-frontends/si2168_priv.h
+++ b/drivers/media/dvb-frontends/si2168_priv.h
@@ -29,7 +29,8 @@
 
 /* state struct */
 struct si2168_dev {
-	struct i2c_adapter *adapter;
+	struct mutex i2c_mutex;
+	struct i2c_mux_core *muxc;
 	struct dvb_frontend fe;
 	enum fe_delivery_system delivery_system;
 	enum fe_status fe_status;
diff --git a/drivers/media/dvb-frontends/zl10353.c b/drivers/media/dvb-frontends/zl10353.c
index 1832c2f..3b08176 100644
--- a/drivers/media/dvb-frontends/zl10353.c
+++ b/drivers/media/dvb-frontends/zl10353.c
@@ -135,8 +135,7 @@
 
 	value = (u64)10 * (1 << 23) / 7 * 125;
 	value = (bw * value) + adc_clock / 2;
-	do_div(value, adc_clock);
-	*nominal_rate = value;
+	*nominal_rate = div_u64(value, adc_clock);
 
 	dprintk("%s: bw %d, adc_clock %d => 0x%x\n",
 		__func__, bw, adc_clock, *nominal_rate);
@@ -163,8 +162,7 @@
 		if (ife > adc_clock / 2)
 			ife = adc_clock - ife;
 	}
-	value = (u64)65536 * ife + adc_clock / 2;
-	do_div(value, adc_clock);
+	value = div_u64((u64)65536 * ife + adc_clock / 2, adc_clock);
 	*input_freq = -value;
 
 	dprintk("%s: if2 %d, ife %d, adc_clock %d => %d / 0x%x\n",
diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c
index 788967d..0462f46 100644
--- a/drivers/media/i2c/ad9389b.c
+++ b/drivers/media/i2c/ad9389b.c
@@ -1130,8 +1130,6 @@
 	hdl = &state->hdl;
 	v4l2_ctrl_handler_init(hdl, 5);
 
-	/* private controls */
-
 	state->hdmi_mode_ctrl = v4l2_ctrl_new_std_menu(hdl, &ad9389b_ctrl_ops,
 			V4L2_CID_DV_TX_MODE, V4L2_DV_TX_MODE_HDMI,
 			0, V4L2_DV_TX_MODE_DVI_D);
@@ -1151,12 +1149,6 @@
 
 		goto err_hdl;
 	}
-	state->hdmi_mode_ctrl->is_private = true;
-	state->hotplug_ctrl->is_private = true;
-	state->rx_sense_ctrl->is_private = true;
-	state->have_edid0_ctrl->is_private = true;
-	state->rgb_quantization_range_ctrl->is_private = true;
-
 	state->pad.flags = MEDIA_PAD_FL_SINK;
 	err = media_entity_pads_init(&sd->entity, 1, &state->pad);
 	if (err)
diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c
index fb7ed73..9e1731c 100644
--- a/drivers/media/i2c/adp1653.c
+++ b/drivers/media/i2c/adp1653.c
@@ -466,9 +466,9 @@
 	of_node_put(child);
 
 	pd->enable_gpio = devm_gpiod_get(&client->dev, "enable", GPIOD_OUT_LOW);
-	if (!pd->enable_gpio) {
+	if (IS_ERR(pd->enable_gpio)) {
 		dev_err(&client->dev, "Error getting GPIO\n");
-		return -EINVAL;
+		return PTR_ERR(pd->enable_gpio);
 	}
 
 	return 0;
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index ff57c1d..b77b0a4 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -26,8 +26,9 @@
 #include <linux/i2c.h>
 #include <linux/slab.h>
 #include <linux/of.h>
-#include <media/v4l2-ioctl.h>
 #include <linux/videodev2.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-ctrls.h>
 #include <linux/mutex.h>
@@ -192,8 +193,8 @@
 	struct mutex		mutex; /* mutual excl. when accessing chip */
 	int			irq;
 	v4l2_std_id		curr_norm;
-	bool			autodetect;
 	bool			powered;
+	bool			streaming;
 	u8			input;
 
 	struct i2c_client	*client;
@@ -338,12 +339,26 @@
 	if (err)
 		return err;
 
-	/* when we are interrupt driven we know the state */
-	if (!state->autodetect || state->irq > 0)
-		*std = state->curr_norm;
-	else
-		err = __adv7180_status(state, NULL, std);
+	if (state->streaming) {
+		err = -EBUSY;
+		goto unlock;
+	}
 
+	err = adv7180_set_video_standard(state,
+			ADV7180_STD_AD_PAL_BG_NTSC_J_SECAM);
+	if (err)
+		goto unlock;
+
+	msleep(100);
+	__adv7180_status(state, NULL, std);
+
+	err = v4l2_std_to_adv7180(state->curr_norm);
+	if (err < 0)
+		goto unlock;
+
+	err = adv7180_set_video_standard(state, err);
+
+unlock:
 	mutex_unlock(&state->mutex);
 	return err;
 }
@@ -387,23 +402,13 @@
 {
 	int ret;
 
-	if (state->autodetect) {
-		ret = adv7180_set_video_standard(state,
-			ADV7180_STD_AD_PAL_BG_NTSC_J_SECAM);
-		if (ret < 0)
-			return ret;
+	ret = v4l2_std_to_adv7180(state->curr_norm);
+	if (ret < 0)
+		return ret;
 
-		__adv7180_status(state, NULL, &state->curr_norm);
-	} else {
-		ret = v4l2_std_to_adv7180(state->curr_norm);
-		if (ret < 0)
-			return ret;
-
-		ret = adv7180_set_video_standard(state, ret);
-		if (ret < 0)
-			return ret;
-	}
-
+	ret = adv7180_set_video_standard(state, ret);
+	if (ret < 0)
+		return ret;
 	return 0;
 }
 
@@ -415,18 +420,12 @@
 	if (ret)
 		return ret;
 
-	/* all standards -> autodetect */
-	if (std == V4L2_STD_ALL) {
-		state->autodetect = true;
-	} else {
-		/* Make sure we can support this std */
-		ret = v4l2_std_to_adv7180(std);
-		if (ret < 0)
-			goto out;
+	/* Make sure we can support this std */
+	ret = v4l2_std_to_adv7180(std);
+	if (ret < 0)
+		goto out;
 
-		state->curr_norm = std;
-		state->autodetect = false;
-	}
+	state->curr_norm = std;
 
 	ret = adv7180_program_std(state);
 out:
@@ -434,6 +433,15 @@
 	return ret;
 }
 
+static int adv7180_g_std(struct v4l2_subdev *sd, v4l2_std_id *norm)
+{
+	struct adv7180_state *state = to_state(sd);
+
+	*norm = state->curr_norm;
+
+	return 0;
+}
+
 static int adv7180_set_power(struct adv7180_state *state, bool on)
 {
 	u8 val;
@@ -717,17 +725,77 @@
 	return 0;
 }
 
+static int adv7180_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *cropcap)
+{
+	struct adv7180_state *state = to_state(sd);
+
+	if (state->curr_norm & V4L2_STD_525_60) {
+		cropcap->pixelaspect.numerator = 11;
+		cropcap->pixelaspect.denominator = 10;
+	} else {
+		cropcap->pixelaspect.numerator = 54;
+		cropcap->pixelaspect.denominator = 59;
+	}
+
+	return 0;
+}
+
+static int adv7180_g_tvnorms(struct v4l2_subdev *sd, v4l2_std_id *norm)
+{
+	*norm = V4L2_STD_ALL;
+	return 0;
+}
+
+static int adv7180_s_stream(struct v4l2_subdev *sd, int enable)
+{
+	struct adv7180_state *state = to_state(sd);
+	int ret;
+
+	/* It's always safe to stop streaming, no need to take the lock */
+	if (!enable) {
+		state->streaming = enable;
+		return 0;
+	}
+
+	/* Must wait until querystd released the lock */
+	ret = mutex_lock_interruptible(&state->mutex);
+	if (ret)
+		return ret;
+	state->streaming = enable;
+	mutex_unlock(&state->mutex);
+	return 0;
+}
+
+static int adv7180_subscribe_event(struct v4l2_subdev *sd,
+				   struct v4l2_fh *fh,
+				   struct v4l2_event_subscription *sub)
+{
+	switch (sub->type) {
+	case V4L2_EVENT_SOURCE_CHANGE:
+		return v4l2_src_change_event_subdev_subscribe(sd, fh, sub);
+	case V4L2_EVENT_CTRL:
+		return v4l2_ctrl_subdev_subscribe_event(sd, fh, sub);
+	default:
+		return -EINVAL;
+	}
+}
+
 static const struct v4l2_subdev_video_ops adv7180_video_ops = {
 	.s_std = adv7180_s_std,
+	.g_std = adv7180_g_std,
 	.querystd = adv7180_querystd,
 	.g_input_status = adv7180_g_input_status,
 	.s_routing = adv7180_s_routing,
 	.g_mbus_config = adv7180_g_mbus_config,
+	.cropcap = adv7180_cropcap,
+	.g_tvnorms = adv7180_g_tvnorms,
+	.s_stream = adv7180_s_stream,
 };
 
-
 static const struct v4l2_subdev_core_ops adv7180_core_ops = {
 	.s_power = adv7180_s_power,
+	.subscribe_event = adv7180_subscribe_event,
+	.unsubscribe_event = v4l2_event_subdev_unsubscribe,
 };
 
 static const struct v4l2_subdev_pad_ops adv7180_pad_ops = {
@@ -752,8 +820,14 @@
 	/* clear */
 	adv7180_write(state, ADV7180_REG_ICR3, isr3);
 
-	if (isr3 & ADV7180_IRQ3_AD_CHANGE && state->autodetect)
-		__adv7180_status(state, NULL, &state->curr_norm);
+	if (isr3 & ADV7180_IRQ3_AD_CHANGE) {
+		static const struct v4l2_event src_ch = {
+			.type = V4L2_EVENT_SOURCE_CHANGE,
+			.u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+		};
+
+		v4l2_subdev_notify_event(&state->sd, &src_ch);
+	}
 	mutex_unlock(&state->mutex);
 
 	return IRQ_HANDLED;
@@ -1198,7 +1272,7 @@
 
 	state->irq = client->irq;
 	mutex_init(&state->mutex);
-	state->autodetect = true;
+	state->curr_norm = V4L2_STD_NTSC;
 	if (state->chip_info->flags & ADV7180_FLAG_RESET_POWERED)
 		state->powered = true;
 	else
@@ -1206,7 +1280,7 @@
 	state->input = 0;
 	sd = &state->sd;
 	v4l2_i2c_subdev_init(sd, client, &adv7180_ops);
-	sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+	sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
 
 	ret = adv7180_init_controls(state);
 	if (ret)
@@ -1328,6 +1402,14 @@
 #ifdef CONFIG_OF
 static const struct of_device_id adv7180_of_id[] = {
 	{ .compatible = "adi,adv7180", },
+	{ .compatible = "adi,adv7182", },
+	{ .compatible = "adi,adv7280", },
+	{ .compatible = "adi,adv7280-m", },
+	{ .compatible = "adi,adv7281", },
+	{ .compatible = "adi,adv7281-m", },
+	{ .compatible = "adi,adv7281-ma", },
+	{ .compatible = "adi,adv7282", },
+	{ .compatible = "adi,adv7282-m", },
 	{ },
 };
 
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index bd822f0..39271c3 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -1502,12 +1502,6 @@
 		err = hdl->error;
 		goto err_hdl;
 	}
-	state->hdmi_mode_ctrl->is_private = true;
-	state->hotplug_ctrl->is_private = true;
-	state->rx_sense_ctrl->is_private = true;
-	state->have_edid0_ctrl->is_private = true;
-	state->rgb_quantization_range_ctrl->is_private = true;
-
 	state->pad.flags = MEDIA_PAD_FL_SINK;
 	err = media_entity_pads_init(&sd->entity, 1, &state->pad);
 	if (err)
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 41a1bfc..beb2841 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -3141,7 +3141,6 @@
 	if (ctrl)
 		ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
 
-	/* private controls */
 	state->detect_tx_5v_ctrl = v4l2_ctrl_new_std(hdl, NULL,
 			V4L2_CID_DV_RX_POWER_PRESENT, 0,
 			(1 << state->info->num_dv_ports) - 1, 0, 0);
@@ -3164,13 +3163,6 @@
 		err = hdl->error;
 		goto err_hdl;
 	}
-	state->detect_tx_5v_ctrl->is_private = true;
-	state->rgb_quantization_range_ctrl->is_private = true;
-	if (adv76xx_has_afe(state))
-		state->analog_sampling_phase_ctrl->is_private = true;
-	state->free_run_color_manual_ctrl->is_private = true;
-	state->free_run_color_ctrl->is_private = true;
-
 	if (adv76xx_s_detect_tx_5v_ctrl(sd)) {
 		err = -ENODEV;
 		goto err_hdl;
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 7ccb85d..ecaacb0 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -3300,12 +3300,6 @@
 		err = hdl->error;
 		goto err_hdl;
 	}
-	state->detect_tx_5v_ctrl->is_private = true;
-	state->rgb_quantization_range_ctrl->is_private = true;
-	state->analog_sampling_phase_ctrl->is_private = true;
-	state->free_run_color_ctrl_manual->is_private = true;
-	state->free_run_color_ctrl->is_private = true;
-
 	if (adv7842_s_detect_tx_5v_ctrl(sd)) {
 		err = -ENODEV;
 		goto err_hdl;
diff --git a/drivers/media/i2c/m5mols/m5mols_controls.c b/drivers/media/i2c/m5mols/m5mols_controls.c
index a60931e..c2218c0 100644
--- a/drivers/media/i2c/m5mols/m5mols_controls.c
+++ b/drivers/media/i2c/m5mols/m5mols_controls.c
@@ -405,7 +405,7 @@
 	struct v4l2_subdev *sd = to_sd(ctrl);
 	struct m5mols_info *info = to_m5mols(sd);
 	int ret = 0;
-	u8 status;
+	u8 status = REG_ISO_AUTO;
 
 	v4l2_dbg(1, m5mols_debug, sd, "%s: ctrl: %s (%d)\n",
 		 __func__, ctrl->name, info->isp_ready);
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c
index d2a1ce2..bd3526b 100644
--- a/drivers/media/i2c/saa7115.c
+++ b/drivers/media/i2c/saa7115.c
@@ -1798,6 +1798,21 @@
 		return GM7113C;
 	}
 
+	/* Check if it is a CJC7113 */
+	if (!memcmp(name, "1111111111111111", CHIP_VER_SIZE)) {
+		strlcpy(name, "cjc7113", CHIP_VER_SIZE);
+
+		if (!autodetect && strcmp(name, id->name))
+			return -EINVAL;
+
+		v4l_dbg(1, debug, client,
+			"It seems to be a %s chip (%*ph) @ 0x%x.\n",
+			name, 16, chip_ver, client->addr << 1);
+
+		/* CJC7113 seems to be SAA7113-compatible */
+		return SAA7113;
+	}
+
 	/* Chip was not discovered. Return its ID and don't bind */
 	v4l_dbg(1, debug, client, "chip %*ph @ 0x%x is unknown.\n",
 		16, chip_ver, client->addr << 1);
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index a215efe..3dfe387 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -188,6 +188,8 @@
 		embedded_end = 0;
 	}
 
+	sensor->image_start = image_start;
+
 	dev_dbg(&client->dev, "embedded data from lines %d to %d\n",
 		embedded_start, embedded_end);
 	dev_dbg(&client->dev, "image data starts at line %d\n", image_start);
@@ -2280,6 +2282,15 @@
 	return 0;
 }
 
+static int smiapp_get_skip_top_lines(struct v4l2_subdev *subdev, u32 *lines)
+{
+	struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+
+	*lines = sensor->image_start;
+
+	return 0;
+}
+
 /* -----------------------------------------------------------------------------
  * sysfs attributes
  */
@@ -2890,6 +2901,7 @@
 
 static const struct v4l2_subdev_sensor_ops smiapp_sensor_ops = {
 	.g_skip_frames = smiapp_get_skip_frames,
+	.g_skip_top_lines = smiapp_get_skip_top_lines,
 };
 
 static const struct v4l2_subdev_ops smiapp_ops = {
diff --git a/drivers/media/i2c/smiapp/smiapp.h b/drivers/media/i2c/smiapp/smiapp.h
index f6af0cc..2174f89 100644
--- a/drivers/media/i2c/smiapp/smiapp.h
+++ b/drivers/media/i2c/smiapp/smiapp.h
@@ -217,6 +217,7 @@
 
 	u8 hvflip_inv_mask; /* H/VFLIP inversion due to sensor orientation */
 	u8 frame_skip;
+	u16 image_start;	/* Offset to first line after metadata lines */
 
 	int power_count;
 
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 972e0d4..6cf6d06 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -1551,6 +1551,8 @@
 {
 	struct tc358743_state *state = to_state(sd);
 
+	memset(edid->reserved, 0, sizeof(edid->reserved));
+
 	if (edid->pad != 0)
 		return -EINVAL;
 
@@ -1585,6 +1587,8 @@
 	v4l2_dbg(2, debug, sd, "%s, pad %d, start block %d, blocks %d\n",
 		 __func__, edid->pad, edid->start_block, edid->blocks);
 
+	memset(edid->reserved, 0, sizeof(edid->reserved));
+
 	if (edid->pad != 0)
 		return -EINVAL;
 
@@ -1859,7 +1863,6 @@
 	/* control handlers */
 	v4l2_ctrl_handler_init(&state->hdl, 3);
 
-	/* private controls */
 	state->detect_tx_5v_ctrl = v4l2_ctrl_new_std(&state->hdl, NULL,
 			V4L2_CID_DV_RX_POWER_PRESENT, 0, 1, 0, 0);
 
diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c
index 5bbfcab..71a3135 100644
--- a/drivers/media/i2c/ths7303.c
+++ b/drivers/media/i2c/ths7303.c
@@ -285,7 +285,7 @@
 	v4l2_info(sd, "stream %s\n", state->stream_on ? "On" : "Off");
 
 	if (state->bt.pixelclock) {
-		struct v4l2_bt_timings *bt = bt = &state->bt;
+		struct v4l2_bt_timings *bt = &state->bt;
 		u32 frame_width, frame_height;
 
 		frame_width = V4L2_DV_BT_FRAME_WIDTH(bt);
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index ff18444..0b6d46c 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -83,7 +83,7 @@
 	return rc;
 }
 
-static inline void tvp5150_write(struct v4l2_subdev *sd, unsigned char addr,
+static int tvp5150_write(struct v4l2_subdev *sd, unsigned char addr,
 				 unsigned char value)
 {
 	struct i2c_client *c = v4l2_get_subdevdata(sd);
@@ -92,7 +92,9 @@
 	v4l2_dbg(2, debug, sd, "tvp5150: writing 0x%02x 0x%02x\n", addr, value);
 	rc = i2c_smbus_write_byte_data(c, addr, value);
 	if (rc < 0)
-		v4l2_dbg(0, debug, sd, "i2c i/o error: rc == %d\n", rc);
+		v4l2_err(sd, "i2c i/o error: rc == %d\n", rc);
+
+	return rc;
 }
 
 static void dump_reg_range(struct v4l2_subdev *sd, char *s, u8 init,
@@ -1159,8 +1161,7 @@
 
 static int tvp5150_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg)
 {
-	tvp5150_write(sd, reg->reg & 0xff, reg->val & 0xff);
-	return 0;
+	return tvp5150_write(sd, reg->reg & 0xff, reg->val & 0xff);
 }
 #endif
 
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index 6e43c95..a1cd50f 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -90,18 +90,13 @@
 
 	id &= ~MEDIA_ENT_ID_FLAG_NEXT;
 
-	spin_lock(&mdev->lock);
-
 	media_device_for_each_entity(entity, mdev) {
 		if (((media_entity_id(entity) == id) && !next) ||
 		    ((media_entity_id(entity) > id) && next)) {
-			spin_unlock(&mdev->lock);
 			return entity;
 		}
 	}
 
-	spin_unlock(&mdev->lock);
-
 	return NULL;
 }
 
@@ -431,6 +426,7 @@
 	struct media_device *dev = to_media_device(devnode);
 	long ret;
 
+	mutex_lock(&dev->graph_mutex);
 	switch (cmd) {
 	case MEDIA_IOC_DEVICE_INFO:
 		ret = media_device_get_info(dev,
@@ -443,29 +439,24 @@
 		break;
 
 	case MEDIA_IOC_ENUM_LINKS:
-		mutex_lock(&dev->graph_mutex);
 		ret = media_device_enum_links(dev,
 				(struct media_links_enum __user *)arg);
-		mutex_unlock(&dev->graph_mutex);
 		break;
 
 	case MEDIA_IOC_SETUP_LINK:
-		mutex_lock(&dev->graph_mutex);
 		ret = media_device_setup_link(dev,
 				(struct media_link_desc __user *)arg);
-		mutex_unlock(&dev->graph_mutex);
 		break;
 
 	case MEDIA_IOC_G_TOPOLOGY:
-		mutex_lock(&dev->graph_mutex);
 		ret = media_device_get_topology(dev,
 				(struct media_v2_topology __user *)arg);
-		mutex_unlock(&dev->graph_mutex);
 		break;
 
 	default:
 		ret = -ENOIOCTLCMD;
 	}
+	mutex_unlock(&dev->graph_mutex);
 
 	return ret;
 }
@@ -508,12 +499,6 @@
 	long ret;
 
 	switch (cmd) {
-	case MEDIA_IOC_DEVICE_INFO:
-	case MEDIA_IOC_ENUM_ENTITIES:
-	case MEDIA_IOC_SETUP_LINK:
-	case MEDIA_IOC_G_TOPOLOGY:
-		return media_device_ioctl(filp, cmd, arg);
-
 	case MEDIA_IOC_ENUM_LINKS32:
 		mutex_lock(&dev->graph_mutex);
 		ret = media_device_enum_links32(dev,
@@ -522,7 +507,7 @@
 		break;
 
 	default:
-		ret = -ENOIOCTLCMD;
+		return media_device_ioctl(filp, cmd, arg);
 	}
 
 	return ret;
@@ -590,12 +575,12 @@
 	if (!ida_pre_get(&mdev->entity_internal_idx, GFP_KERNEL))
 		return -ENOMEM;
 
-	spin_lock(&mdev->lock);
+	mutex_lock(&mdev->graph_mutex);
 
 	ret = ida_get_new_above(&mdev->entity_internal_idx, 1,
 				&entity->internal_idx);
 	if (ret < 0) {
-		spin_unlock(&mdev->lock);
+		mutex_unlock(&mdev->graph_mutex);
 		return ret;
 	}
 
@@ -615,9 +600,6 @@
 		(notify)->notify(entity, notify->notify_data);
 	}
 
-	spin_unlock(&mdev->lock);
-
-	mutex_lock(&mdev->graph_mutex);
 	if (mdev->entity_internal_idx_max
 	    >= mdev->pm_count_walk.ent_enum.idx_max) {
 		struct media_entity_graph new = { .top = 0 };
@@ -680,9 +662,9 @@
 	if (mdev == NULL)
 		return;
 
-	spin_lock(&mdev->lock);
+	mutex_lock(&mdev->graph_mutex);
 	__media_device_unregister_entity(entity);
-	spin_unlock(&mdev->lock);
+	mutex_unlock(&mdev->graph_mutex);
 }
 EXPORT_SYMBOL_GPL(media_device_unregister_entity);
 
@@ -703,7 +685,6 @@
 	INIT_LIST_HEAD(&mdev->pads);
 	INIT_LIST_HEAD(&mdev->links);
 	INIT_LIST_HEAD(&mdev->entity_notify);
-	spin_lock_init(&mdev->lock);
 	mutex_init(&mdev->graph_mutex);
 	ida_init(&mdev->entity_internal_idx);
 
@@ -752,9 +733,9 @@
 int __must_check media_device_register_entity_notify(struct media_device *mdev,
 					struct media_entity_notify *nptr)
 {
-	spin_lock(&mdev->lock);
+	mutex_lock(&mdev->graph_mutex);
 	list_add_tail(&nptr->list, &mdev->entity_notify);
-	spin_unlock(&mdev->lock);
+	mutex_unlock(&mdev->graph_mutex);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(media_device_register_entity_notify);
@@ -771,9 +752,9 @@
 void media_device_unregister_entity_notify(struct media_device *mdev,
 					struct media_entity_notify *nptr)
 {
-	spin_lock(&mdev->lock);
+	mutex_lock(&mdev->graph_mutex);
 	__media_device_unregister_entity_notify(mdev, nptr);
-	spin_unlock(&mdev->lock);
+	mutex_unlock(&mdev->graph_mutex);
 }
 EXPORT_SYMBOL_GPL(media_device_unregister_entity_notify);
 
@@ -787,11 +768,11 @@
 	if (mdev == NULL)
 		return;
 
-	spin_lock(&mdev->lock);
+	mutex_lock(&mdev->graph_mutex);
 
 	/* Check if mdev was ever registered at all */
 	if (!media_devnode_is_registered(&mdev->devnode)) {
-		spin_unlock(&mdev->lock);
+		mutex_unlock(&mdev->graph_mutex);
 		return;
 	}
 
@@ -811,12 +792,11 @@
 		kfree(intf);
 	}
 
-	spin_unlock(&mdev->lock);
+	mutex_unlock(&mdev->graph_mutex);
 
 	device_remove_file(&mdev->devnode.dev, &dev_attr_model);
+	dev_dbg(mdev->dev, "Media device unregistering\n");
 	media_devnode_unregister(&mdev->devnode);
-
-	dev_dbg(mdev->dev, "Media device unregistered\n");
 }
 EXPORT_SYMBOL_GPL(media_device_unregister);
 
@@ -846,11 +826,11 @@
 }
 EXPORT_SYMBOL_GPL(media_device_find_devres);
 
+#if IS_ENABLED(CONFIG_PCI)
 void media_device_pci_init(struct media_device *mdev,
 			   struct pci_dev *pci_dev,
 			   const char *name)
 {
-#ifdef CONFIG_PCI
 	mdev->dev = &pci_dev->dev;
 
 	if (name)
@@ -866,16 +846,16 @@
 	mdev->driver_version = LINUX_VERSION_CODE;
 
 	media_device_init(mdev);
-#endif
 }
 EXPORT_SYMBOL_GPL(media_device_pci_init);
+#endif
 
+#if IS_ENABLED(CONFIG_USB)
 void __media_device_usb_init(struct media_device *mdev,
 			     struct usb_device *udev,
 			     const char *board_name,
 			     const char *driver_name)
 {
-#ifdef CONFIG_USB
 	mdev->dev = &udev->dev;
 
 	if (driver_name)
@@ -895,9 +875,9 @@
 	mdev->driver_version = LINUX_VERSION_CODE;
 
 	media_device_init(mdev);
-#endif
 }
 EXPORT_SYMBOL_GPL(__media_device_usb_init);
+#endif
 
 
 #endif /* CONFIG_MEDIA_CONTROLLER */
diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c
index 29409f4..b66dc9d 100644
--- a/drivers/media/media-devnode.c
+++ b/drivers/media/media-devnode.c
@@ -197,10 +197,11 @@
 	if (mdev->fops->release)
 		mdev->fops->release(filp);
 
+	filp->private_data = NULL;
+
 	/* decrease the refcount unconditionally since the release()
 	   return value is ignored. */
 	put_device(&mdev->dev);
-	filp->private_data = NULL;
 	return 0;
 }
 
@@ -267,8 +268,11 @@
 	return 0;
 
 error:
+	mutex_lock(&media_devnode_lock);
 	cdev_del(&mdev->cdev);
 	clear_bit(mdev->minor, media_devnode_nums);
+	mutex_unlock(&media_devnode_lock);
+
 	return ret;
 }
 
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index e95070b..d8a2299 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -219,7 +219,7 @@
 	entity->pads = pads;
 
 	if (mdev)
-		spin_lock(&mdev->lock);
+		mutex_lock(&mdev->graph_mutex);
 
 	for (i = 0; i < num_pads; i++) {
 		pads[i].entity = entity;
@@ -230,7 +230,7 @@
 	}
 
 	if (mdev)
-		spin_unlock(&mdev->lock);
+		mutex_unlock(&mdev->graph_mutex);
 
 	return 0;
 }
@@ -445,7 +445,7 @@
 		bitmap_or(active, active, has_no_links, entity->num_pads);
 
 		if (!bitmap_full(active, entity->num_pads)) {
-			ret = -EPIPE;
+			ret = -ENOLINK;
 			dev_dbg(entity->graph_obj.mdev->dev,
 				"\"%s\":%u must be connected by an enabled link\n",
 				entity->name,
@@ -747,9 +747,9 @@
 	if (mdev == NULL)
 		return;
 
-	spin_lock(&mdev->lock);
+	mutex_lock(&mdev->graph_mutex);
 	__media_entity_remove_links(entity);
-	spin_unlock(&mdev->lock);
+	mutex_unlock(&mdev->graph_mutex);
 }
 EXPORT_SYMBOL_GPL(media_entity_remove_links);
 
@@ -951,9 +951,9 @@
 	if (mdev == NULL)
 		return;
 
-	spin_lock(&mdev->lock);
+	mutex_lock(&mdev->graph_mutex);
 	__media_remove_intf_link(link);
-	spin_unlock(&mdev->lock);
+	mutex_unlock(&mdev->graph_mutex);
 }
 EXPORT_SYMBOL_GPL(media_remove_intf_link);
 
@@ -975,8 +975,8 @@
 	if (mdev == NULL)
 		return;
 
-	spin_lock(&mdev->lock);
+	mutex_lock(&mdev->graph_mutex);
 	__media_remove_intf_links(intf);
-	spin_unlock(&mdev->lock);
+	mutex_unlock(&mdev->graph_mutex);
 }
 EXPORT_SYMBOL_GPL(media_remove_intf_links);
diff --git a/drivers/media/pci/Kconfig b/drivers/media/pci/Kconfig
index 48a611b..4f6467f 100644
--- a/drivers/media/pci/Kconfig
+++ b/drivers/media/pci/Kconfig
@@ -14,6 +14,7 @@
 source "drivers/media/pci/solo6x10/Kconfig"
 source "drivers/media/pci/sta2x11/Kconfig"
 source "drivers/media/pci/tw68/Kconfig"
+source "drivers/media/pci/tw686x/Kconfig"
 source "drivers/media/pci/zoran/Kconfig"
 endif
 
diff --git a/drivers/media/pci/Makefile b/drivers/media/pci/Makefile
index 5f8aacb..2e54c36 100644
--- a/drivers/media/pci/Makefile
+++ b/drivers/media/pci/Makefile
@@ -25,6 +25,7 @@
 obj-$(CONFIG_VIDEO_SAA7134) += saa7134/
 obj-$(CONFIG_VIDEO_SAA7164) += saa7164/
 obj-$(CONFIG_VIDEO_TW68) += tw68/
+obj-$(CONFIG_VIDEO_TW686X) += tw686x/
 obj-$(CONFIG_VIDEO_DT3155) += dt3155/
 obj-$(CONFIG_VIDEO_MEYE) += meye/
 obj-$(CONFIG_STA2X11_VIP) += sta2x11/
diff --git a/drivers/media/pci/cobalt/Kconfig b/drivers/media/pci/cobalt/Kconfig
index a01f0cc..7034382 100644
--- a/drivers/media/pci/cobalt/Kconfig
+++ b/drivers/media/pci/cobalt/Kconfig
@@ -4,6 +4,7 @@
 	depends on PCI_MSI && MTD_COMPLEX_MAPPINGS
 	depends on GPIOLIB || COMPILE_TEST
 	depends on SND
+	depends on MTD
 	select I2C_ALGOBIT
 	select VIDEO_ADV7604
 	select VIDEO_ADV7511
diff --git a/drivers/media/pci/cx18/cx18-driver.h b/drivers/media/pci/cx18/cx18-driver.h
index 7e31f2a..47ce80f 100644
--- a/drivers/media/pci/cx18/cx18-driver.h
+++ b/drivers/media/pci/cx18/cx18-driver.h
@@ -707,11 +707,7 @@
 /* Call the specified callback for all subdevs with a grp_id bit matching the
  * mask in hw (if 0, then match them all). Ignore any errors. */
 #define cx18_call_hw(cx, hw, o, f, args...)				\
-	do {								\
-		struct v4l2_subdev *__sd;				\
-		__v4l2_device_call_subdevs_p(&(cx)->v4l2_dev, __sd,	\
-			!(hw) || (__sd->grp_id & (hw)), o, f , ##args);	\
-	} while (0)
+	v4l2_device_mask_call_all(&(cx)->v4l2_dev, hw, o, f, ##args)
 
 #define cx18_call_all(cx, o, f, args...) cx18_call_hw(cx, 0, o, f , ##args)
 
@@ -719,12 +715,7 @@
  * mask in hw (if 0, then match them all). If the callback returns an error
  * other than 0 or -ENOIOCTLCMD, then return with that error code. */
 #define cx18_call_hw_err(cx, hw, o, f, args...)				\
-({									\
-	struct v4l2_subdev *__sd;					\
-	__v4l2_device_call_subdevs_until_err_p(&(cx)->v4l2_dev,		\
-			__sd, !(hw) || (__sd->grp_id & (hw)), o, f,	\
-			##args);					\
-})
+	v4l2_device_mask_call_until_err(&(cx)->v4l2_dev, hw, o, f, ##args)
 
 #define cx18_call_all_err(cx, o, f, args...) \
 	cx18_call_hw_err(cx, 0, o, f , ##args)
diff --git a/drivers/media/pci/cx23885/cx23885-av.c b/drivers/media/pci/cx23885/cx23885-av.c
index 877dad8..e7d4406 100644
--- a/drivers/media/pci/cx23885/cx23885-av.c
+++ b/drivers/media/pci/cx23885/cx23885-av.c
@@ -24,7 +24,7 @@
 {
 	struct cx23885_dev *dev =
 			   container_of(work, struct cx23885_dev, cx25840_work);
-	bool handled;
+	bool handled = false;
 
 	v4l2_subdev_call(dev->sd_cx25840, core, interrupt_service_routine,
 			 PCI_MSK_AV_CORE, &handled);
diff --git a/drivers/media/pci/ivtv/ivtv-driver.h b/drivers/media/pci/ivtv/ivtv-driver.h
index 6c08dae..10cba305 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.h
+++ b/drivers/media/pci/ivtv/ivtv-driver.h
@@ -827,12 +827,7 @@
 /* Call the specified callback for all subdevs matching hw (if 0, then
    match them all). Ignore any errors. */
 #define ivtv_call_hw(itv, hw, o, f, args...) 				\
-	do {								\
-		struct v4l2_subdev *__sd;				\
-		__v4l2_device_call_subdevs_p(&(itv)->v4l2_dev, __sd,	\
-			 !(hw) ? true : (__sd->grp_id & (hw)),		\
-			 o, f, ##args);					\
-	} while (0)
+	v4l2_device_mask_call_all(&(itv)->v4l2_dev, hw, o, f, ##args)
 
 #define ivtv_call_all(itv, o, f, args...) ivtv_call_hw(itv, 0, o, f , ##args)
 
@@ -840,11 +835,7 @@
    match them all). If the callback returns an error other than 0 or
    -ENOIOCTLCMD, then return with that error code. */
 #define ivtv_call_hw_err(itv, hw, o, f, args...)			\
-({									\
-	struct v4l2_subdev *__sd;					\
-	__v4l2_device_call_subdevs_until_err_p(&(itv)->v4l2_dev, __sd,	\
-		!(hw) || (__sd->grp_id & (hw)), o, f , ##args);		\
-})
+	v4l2_device_mask_call_until_err(&(itv)->v4l2_dev, hw, o, f, ##args)
 
 #define ivtv_call_all_err(itv, o, f, args...) ivtv_call_hw_err(itv, 0, o, f , ##args)
 
diff --git a/drivers/media/pci/smipcie/smipcie-ir.c b/drivers/media/pci/smipcie/smipcie-ir.c
index d018673..826c7c7 100644
--- a/drivers/media/pci/smipcie/smipcie-ir.c
+++ b/drivers/media/pci/smipcie/smipcie-ir.c
@@ -203,7 +203,7 @@
 	rc_dev->dev.parent = &dev->pci_dev->dev;
 
 	rc_dev->driver_type = RC_DRIVER_SCANCODE;
-	rc_dev->map_name = RC_MAP_DVBSKY;
+	rc_dev->map_name = dev->info->rc_map;
 
 	ir->rc_dev = rc_dev;
 	ir->dev = dev;
diff --git a/drivers/media/pci/smipcie/smipcie-main.c b/drivers/media/pci/smipcie/smipcie-main.c
index b039a22..83981d61 100644
--- a/drivers/media/pci/smipcie/smipcie-main.c
+++ b/drivers/media/pci/smipcie/smipcie-main.c
@@ -716,7 +716,8 @@
 	/* init MAC.*/
 	ret = smi_read_eeprom(&dev->i2c_bus[0], 0xc0, mac_ee, 16);
 	dev_info(&port->dev->pci_dev->dev,
-		"DVBSky SMI PCIe MAC= %pM\n", mac_ee + (port->idx)*8);
+		"%s port %d MAC: %pM\n", dev->info->name,
+		port->idx, mac_ee + (port->idx)*8);
 	memcpy(adap->proposed_mac, mac_ee + (port->idx)*8, 6);
 	return ret;
 }
@@ -1066,6 +1067,7 @@
 	.ts_1 = SMI_TS_DMA_BOTH,
 	.fe_0 = DVBSKY_FE_NULL,
 	.fe_1 = DVBSKY_FE_M88DS3103,
+	.rc_map = RC_MAP_DVBSKY,
 };
 
 static struct smi_cfg_info dvbsky_s952_cfg = {
@@ -1075,6 +1077,7 @@
 	.ts_1 = SMI_TS_DMA_BOTH,
 	.fe_0 = DVBSKY_FE_M88RS6000,
 	.fe_1 = DVBSKY_FE_M88RS6000,
+	.rc_map = RC_MAP_DVBSKY,
 };
 
 static struct smi_cfg_info dvbsky_t9580_cfg = {
@@ -1084,6 +1087,17 @@
 	.ts_1 = SMI_TS_DMA_BOTH,
 	.fe_0 = DVBSKY_FE_SIT2,
 	.fe_1 = DVBSKY_FE_M88DS3103,
+	.rc_map = RC_MAP_DVBSKY,
+};
+
+static struct smi_cfg_info technotrend_s2_4200_cfg = {
+	.type = SMI_TECHNOTREND_S2_4200,
+	.name = "TechnoTrend TT-budget S2-4200 Twin",
+	.ts_0 = SMI_TS_DMA_BOTH,
+	.ts_1 = SMI_TS_DMA_BOTH,
+	.fe_0 = DVBSKY_FE_M88RS6000,
+	.fe_1 = DVBSKY_FE_M88RS6000,
+	.rc_map = RC_MAP_TT_1500,
 };
 
 /* PCI IDs */
@@ -1096,6 +1110,7 @@
 	SMI_ID(0x4254, 0x0550, dvbsky_s950_cfg),
 	SMI_ID(0x4254, 0x0552, dvbsky_s952_cfg),
 	SMI_ID(0x4254, 0x5580, dvbsky_t9580_cfg),
+	SMI_ID(0x13c2, 0x3016, technotrend_s2_4200_cfg),
 	{0}
 };
 MODULE_DEVICE_TABLE(pci, smi_id_table);
diff --git a/drivers/media/pci/smipcie/smipcie.h b/drivers/media/pci/smipcie/smipcie.h
index 68cdda2..611e4f0 100644
--- a/drivers/media/pci/smipcie/smipcie.h
+++ b/drivers/media/pci/smipcie/smipcie.h
@@ -216,6 +216,7 @@
 #define SMI_DVBSKY_S950         1
 #define SMI_DVBSKY_T9580        2
 #define SMI_DVBSKY_T982         3
+#define SMI_TECHNOTREND_S2_4200 4
 	int type;
 	char *name;
 #define SMI_TS_NULL             0
@@ -232,6 +233,7 @@
 #define DVBSKY_FE_SIT2          3
 	int fe_0;
 	int fe_1;
+	char *rc_map;
 };
 
 struct smi_rc {
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index 753411c..1fc195f 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -444,27 +444,19 @@
 static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id std)
 {
 	struct sta2x11_vip *vip = video_drvdata(file);
-	v4l2_std_id oldstd = vip->std, newstd;
-	int status;
 
-	if (V4L2_STD_ALL == std) {
-		v4l2_subdev_call(vip->decoder, video, s_std, std);
-		ssleep(2);
-		v4l2_subdev_call(vip->decoder, video, querystd, &newstd);
-		v4l2_subdev_call(vip->decoder, video, g_input_status, &status);
-		if (status & V4L2_IN_ST_NO_SIGNAL)
+	/*
+	 * This is here for backwards compatibility only.
+	 * The use of V4L2_STD_ALL to trigger a querystd is non-standard.
+	 */
+	if (std == V4L2_STD_ALL) {
+		v4l2_subdev_call(vip->decoder, video, querystd, &std);
+		if (std == V4L2_STD_UNKNOWN)
 			return -EIO;
-		std = vip->std = newstd;
-		if (oldstd != std) {
-			if (V4L2_STD_525_60 & std)
-				vip->format = formats_60[0];
-			else
-				vip->format = formats_50[0];
-		}
-		return 0;
 	}
 
-	if (oldstd != std) {
+	if (vip->std != std) {
+		vip->std = std;
 		if (V4L2_STD_525_60 & std)
 			vip->format = formats_60[0];
 		else
diff --git a/drivers/media/pci/tw686x/Kconfig b/drivers/media/pci/tw686x/Kconfig
new file mode 100644
index 0000000..fb85369
--- /dev/null
+++ b/drivers/media/pci/tw686x/Kconfig
@@ -0,0 +1,18 @@
+config VIDEO_TW686X
+	tristate "Intersil/Techwell TW686x video capture cards"
+	depends on PCI && VIDEO_DEV && VIDEO_V4L2 && SND
+	depends on HAS_DMA
+	select VIDEOBUF2_VMALLOC
+	select SND_PCM
+	help
+	  Support for Intersil/Techwell TW686x-based frame grabber cards.
+
+	  Currently supported chips:
+	  - TW6864 (4 video channels),
+	  - TW6865 (4 video channels, not tested, second generation chip),
+	  - TW6868 (8 video channels but only 4 first channels using
+	    built-in video decoder are supported, not tested),
+	  - TW6869 (8 video channels, second generation chip).
+
+	  To compile this driver as a module, choose M here: the module
+	  will be named tw686x.
diff --git a/drivers/media/pci/tw686x/Makefile b/drivers/media/pci/tw686x/Makefile
new file mode 100644
index 0000000..9981954
--- /dev/null
+++ b/drivers/media/pci/tw686x/Makefile
@@ -0,0 +1,3 @@
+tw686x-objs := tw686x-core.o tw686x-video.o tw686x-audio.o
+
+obj-$(CONFIG_VIDEO_TW686X) += tw686x.o
diff --git a/drivers/media/pci/tw686x/tw686x-audio.c b/drivers/media/pci/tw686x/tw686x-audio.c
new file mode 100644
index 0000000..91459ab
--- /dev/null
+++ b/drivers/media/pci/tw686x/tw686x-audio.c
@@ -0,0 +1,386 @@
+/*
+ * Copyright (C) 2015 VanguardiaSur - www.vanguardiasur.com.ar
+ *
+ * Based on the audio support from the tw6869 driver:
+ * Copyright 2015 www.starterkit.ru <info@starterkit.ru>
+ *
+ * Based on:
+ * Driver for Intersil|Techwell TW6869 based DVR cards
+ * (c) 2011-12 liran <jli11@intersil.com> [Intersil|Techwell China]
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+#include <sound/control.h>
+#include "tw686x.h"
+#include "tw686x-regs.h"
+
+#define AUDIO_CHANNEL_OFFSET 8
+
+void tw686x_audio_irq(struct tw686x_dev *dev, unsigned long requests,
+		      unsigned int pb_status)
+{
+	unsigned long flags;
+	unsigned int ch, pb;
+
+	for_each_set_bit(ch, &requests, max_channels(dev)) {
+		struct tw686x_audio_channel *ac = &dev->audio_channels[ch];
+		struct tw686x_audio_buf *done = NULL;
+		struct tw686x_audio_buf *next = NULL;
+		struct tw686x_dma_desc *desc;
+
+		pb = !!(pb_status & BIT(AUDIO_CHANNEL_OFFSET + ch));
+
+		spin_lock_irqsave(&ac->lock, flags);
+
+		/* Sanity check */
+		if (!ac->ss || !ac->curr_bufs[0] || !ac->curr_bufs[1]) {
+			spin_unlock_irqrestore(&ac->lock, flags);
+			continue;
+		}
+
+		if (!list_empty(&ac->buf_list)) {
+			next = list_first_entry(&ac->buf_list,
+					struct tw686x_audio_buf, list);
+			list_move_tail(&next->list, &ac->buf_list);
+			done = ac->curr_bufs[!pb];
+			ac->curr_bufs[pb] = next;
+		}
+		spin_unlock_irqrestore(&ac->lock, flags);
+
+		desc = &ac->dma_descs[pb];
+		if (done && next && desc->virt) {
+			memcpy(done->virt, desc->virt, desc->size);
+			ac->ptr = done->dma - ac->buf[0].dma;
+			snd_pcm_period_elapsed(ac->ss);
+		}
+	}
+}
+
+static int tw686x_pcm_hw_params(struct snd_pcm_substream *ss,
+				struct snd_pcm_hw_params *hw_params)
+{
+	return snd_pcm_lib_malloc_pages(ss, params_buffer_bytes(hw_params));
+}
+
+static int tw686x_pcm_hw_free(struct snd_pcm_substream *ss)
+{
+	return snd_pcm_lib_free_pages(ss);
+}
+
+/*
+ * The audio device rate is global and shared among all
+ * capture channels. The driver makes no effort to prevent
+ * rate modifications. User is free change the rate, but it
+ * means changing the rate for all capture sub-devices.
+ */
+static const struct snd_pcm_hardware tw686x_capture_hw = {
+	.info			= (SNDRV_PCM_INFO_MMAP |
+				   SNDRV_PCM_INFO_INTERLEAVED |
+				   SNDRV_PCM_INFO_BLOCK_TRANSFER |
+				   SNDRV_PCM_INFO_MMAP_VALID),
+	.formats		= SNDRV_PCM_FMTBIT_S16_LE,
+	.rates			= SNDRV_PCM_RATE_8000_48000,
+	.rate_min		= 8000,
+	.rate_max		= 48000,
+	.channels_min		= 1,
+	.channels_max		= 1,
+	.buffer_bytes_max	= TW686X_AUDIO_PAGE_MAX * TW686X_AUDIO_PAGE_SZ,
+	.period_bytes_min	= TW686X_AUDIO_PAGE_SZ,
+	.period_bytes_max	= TW686X_AUDIO_PAGE_SZ,
+	.periods_min		= TW686X_AUDIO_PERIODS_MIN,
+	.periods_max		= TW686X_AUDIO_PERIODS_MAX,
+};
+
+static int tw686x_pcm_open(struct snd_pcm_substream *ss)
+{
+	struct tw686x_dev *dev = snd_pcm_substream_chip(ss);
+	struct tw686x_audio_channel *ac = &dev->audio_channels[ss->number];
+	struct snd_pcm_runtime *rt = ss->runtime;
+	int err;
+
+	ac->ss = ss;
+	rt->hw = tw686x_capture_hw;
+
+	err = snd_pcm_hw_constraint_integer(rt, SNDRV_PCM_HW_PARAM_PERIODS);
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
+static int tw686x_pcm_close(struct snd_pcm_substream *ss)
+{
+	struct tw686x_dev *dev = snd_pcm_substream_chip(ss);
+	struct tw686x_audio_channel *ac = &dev->audio_channels[ss->number];
+
+	ac->ss = NULL;
+	return 0;
+}
+
+static int tw686x_pcm_prepare(struct snd_pcm_substream *ss)
+{
+	struct tw686x_dev *dev = snd_pcm_substream_chip(ss);
+	struct tw686x_audio_channel *ac = &dev->audio_channels[ss->number];
+	struct snd_pcm_runtime *rt = ss->runtime;
+	unsigned int period_size = snd_pcm_lib_period_bytes(ss);
+	struct tw686x_audio_buf *p_buf, *b_buf;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	tw686x_disable_channel(dev, AUDIO_CHANNEL_OFFSET + ac->ch);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (dev->audio_rate != rt->rate) {
+		u32 reg;
+
+		dev->audio_rate = rt->rate;
+		reg = ((125000000 / rt->rate) << 16) +
+		       ((125000000 % rt->rate) << 16) / rt->rate;
+
+		reg_write(dev, AUDIO_CONTROL2, reg);
+	}
+
+	if (period_size != TW686X_AUDIO_PAGE_SZ ||
+	    rt->periods < TW686X_AUDIO_PERIODS_MIN ||
+	    rt->periods > TW686X_AUDIO_PERIODS_MAX) {
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&ac->lock, flags);
+	INIT_LIST_HEAD(&ac->buf_list);
+
+	for (i = 0; i < rt->periods; i++) {
+		ac->buf[i].dma = rt->dma_addr + period_size * i;
+		ac->buf[i].virt = rt->dma_area + period_size * i;
+		INIT_LIST_HEAD(&ac->buf[i].list);
+		list_add_tail(&ac->buf[i].list, &ac->buf_list);
+	}
+
+	p_buf =	list_first_entry(&ac->buf_list, struct tw686x_audio_buf, list);
+	list_move_tail(&p_buf->list, &ac->buf_list);
+
+	b_buf =	list_first_entry(&ac->buf_list, struct tw686x_audio_buf, list);
+	list_move_tail(&b_buf->list, &ac->buf_list);
+
+	ac->curr_bufs[0] = p_buf;
+	ac->curr_bufs[1] = b_buf;
+	ac->ptr = 0;
+	spin_unlock_irqrestore(&ac->lock, flags);
+
+	return 0;
+}
+
+static int tw686x_pcm_trigger(struct snd_pcm_substream *ss, int cmd)
+{
+	struct tw686x_dev *dev = snd_pcm_substream_chip(ss);
+	struct tw686x_audio_channel *ac = &dev->audio_channels[ss->number];
+	unsigned long flags;
+	int err = 0;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+		if (ac->curr_bufs[0] && ac->curr_bufs[1]) {
+			spin_lock_irqsave(&dev->lock, flags);
+			tw686x_enable_channel(dev,
+				AUDIO_CHANNEL_OFFSET + ac->ch);
+			spin_unlock_irqrestore(&dev->lock, flags);
+
+			mod_timer(&dev->dma_delay_timer,
+				  jiffies + msecs_to_jiffies(100));
+		} else {
+			err = -EIO;
+		}
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+		spin_lock_irqsave(&dev->lock, flags);
+		tw686x_disable_channel(dev, AUDIO_CHANNEL_OFFSET + ac->ch);
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		spin_lock_irqsave(&ac->lock, flags);
+		ac->curr_bufs[0] = NULL;
+		ac->curr_bufs[1] = NULL;
+		spin_unlock_irqrestore(&ac->lock, flags);
+		break;
+	default:
+		err = -EINVAL;
+	}
+	return err;
+}
+
+static snd_pcm_uframes_t tw686x_pcm_pointer(struct snd_pcm_substream *ss)
+{
+	struct tw686x_dev *dev = snd_pcm_substream_chip(ss);
+	struct tw686x_audio_channel *ac = &dev->audio_channels[ss->number];
+
+	return bytes_to_frames(ss->runtime, ac->ptr);
+}
+
+static struct snd_pcm_ops tw686x_pcm_ops = {
+	.open = tw686x_pcm_open,
+	.close = tw686x_pcm_close,
+	.ioctl = snd_pcm_lib_ioctl,
+	.hw_params = tw686x_pcm_hw_params,
+	.hw_free = tw686x_pcm_hw_free,
+	.prepare = tw686x_pcm_prepare,
+	.trigger = tw686x_pcm_trigger,
+	.pointer = tw686x_pcm_pointer,
+};
+
+static int tw686x_snd_pcm_init(struct tw686x_dev *dev)
+{
+	struct snd_card *card = dev->snd_card;
+	struct snd_pcm *pcm;
+	struct snd_pcm_substream *ss;
+	unsigned int i;
+	int err;
+
+	err = snd_pcm_new(card, card->driver, 0, 0, max_channels(dev), &pcm);
+	if (err < 0)
+		return err;
+
+	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &tw686x_pcm_ops);
+	snd_pcm_chip(pcm) = dev;
+	pcm->info_flags = 0;
+	strlcpy(pcm->name, "tw686x PCM", sizeof(pcm->name));
+
+	for (i = 0, ss = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
+	     ss; ss = ss->next, i++)
+		snprintf(ss->name, sizeof(ss->name), "vch%u audio", i);
+
+	return snd_pcm_lib_preallocate_pages_for_all(pcm,
+				SNDRV_DMA_TYPE_DEV,
+				snd_dma_pci_data(dev->pci_dev),
+				TW686X_AUDIO_PAGE_MAX * TW686X_AUDIO_PAGE_SZ,
+				TW686X_AUDIO_PAGE_MAX * TW686X_AUDIO_PAGE_SZ);
+}
+
+static void tw686x_audio_dma_free(struct tw686x_dev *dev,
+				  struct tw686x_audio_channel *ac)
+{
+	int pb;
+
+	for (pb = 0; pb < 2; pb++) {
+		if (!ac->dma_descs[pb].virt)
+			continue;
+		pci_free_consistent(dev->pci_dev, ac->dma_descs[pb].size,
+				    ac->dma_descs[pb].virt,
+				    ac->dma_descs[pb].phys);
+		ac->dma_descs[pb].virt = NULL;
+	}
+}
+
+static int tw686x_audio_dma_alloc(struct tw686x_dev *dev,
+				  struct tw686x_audio_channel *ac)
+{
+	int pb;
+
+	for (pb = 0; pb < 2; pb++) {
+		u32 reg = pb ? ADMA_B_ADDR[ac->ch] : ADMA_P_ADDR[ac->ch];
+		void *virt;
+
+		virt = pci_alloc_consistent(dev->pci_dev, TW686X_AUDIO_PAGE_SZ,
+					    &ac->dma_descs[pb].phys);
+		if (!virt) {
+			dev_err(&dev->pci_dev->dev,
+				"dma%d: unable to allocate audio DMA %s-buffer\n",
+				ac->ch, pb ? "B" : "P");
+			return -ENOMEM;
+		}
+		ac->dma_descs[pb].virt = virt;
+		ac->dma_descs[pb].size = TW686X_AUDIO_PAGE_SZ;
+		reg_write(dev, reg, ac->dma_descs[pb].phys);
+	}
+	return 0;
+}
+
+void tw686x_audio_free(struct tw686x_dev *dev)
+{
+	unsigned long flags;
+	u32 dma_ch_mask;
+	u32 dma_cmd;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	dma_cmd = reg_read(dev, DMA_CMD);
+	dma_ch_mask = reg_read(dev, DMA_CHANNEL_ENABLE);
+	reg_write(dev, DMA_CMD, dma_cmd & ~0xff00);
+	reg_write(dev, DMA_CHANNEL_ENABLE, dma_ch_mask & ~0xff00);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (!dev->snd_card)
+		return;
+	snd_card_free(dev->snd_card);
+	dev->snd_card = NULL;
+}
+
+int tw686x_audio_init(struct tw686x_dev *dev)
+{
+	struct pci_dev *pci_dev = dev->pci_dev;
+	struct snd_card *card;
+	int err, ch;
+
+	/*
+	 * AUDIO_CONTROL1
+	 * DMA byte length [31:19] = 4096 (i.e. ALSA period)
+	 * External audio enable [0] = enabled
+	 */
+	reg_write(dev, AUDIO_CONTROL1, 0x80000001);
+
+	err = snd_card_new(&pci_dev->dev, SNDRV_DEFAULT_IDX1,
+			   SNDRV_DEFAULT_STR1,
+			   THIS_MODULE, 0, &card);
+	if (err < 0)
+		return err;
+
+	dev->snd_card = card;
+	strlcpy(card->driver, "tw686x", sizeof(card->driver));
+	strlcpy(card->shortname, "tw686x", sizeof(card->shortname));
+	strlcpy(card->longname, pci_name(pci_dev), sizeof(card->longname));
+	snd_card_set_dev(card, &pci_dev->dev);
+
+	for (ch = 0; ch < max_channels(dev); ch++) {
+		struct tw686x_audio_channel *ac;
+
+		ac = &dev->audio_channels[ch];
+		spin_lock_init(&ac->lock);
+		ac->dev = dev;
+		ac->ch = ch;
+
+		err = tw686x_audio_dma_alloc(dev, ac);
+		if (err < 0)
+			goto err_cleanup;
+	}
+
+	err = tw686x_snd_pcm_init(dev);
+	if (err < 0)
+		goto err_cleanup;
+
+	err = snd_card_register(card);
+	if (!err)
+		return 0;
+
+err_cleanup:
+	for (ch = 0; ch < max_channels(dev); ch++) {
+		if (!dev->audio_channels[ch].dev)
+			continue;
+		tw686x_audio_dma_free(dev, &dev->audio_channels[ch]);
+	}
+	snd_card_free(card);
+	dev->snd_card = NULL;
+	return err;
+}
diff --git a/drivers/media/pci/tw686x/tw686x-core.c b/drivers/media/pci/tw686x/tw686x-core.c
new file mode 100644
index 0000000..cf53b0e
--- /dev/null
+++ b/drivers/media/pci/tw686x/tw686x-core.c
@@ -0,0 +1,415 @@
+/*
+ * Copyright (C) 2015 VanguardiaSur - www.vanguardiasur.com.ar
+ *
+ * Based on original driver by Krzysztof Ha?asa:
+ * Copyright (C) 2015 Industrial Research Institute for Automation
+ * and Measurements PIAP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * Notes
+ * -----
+ *
+ * 1. Under stress-testing, it has been observed that the PCIe link
+ * goes down, without reason. Therefore, the driver takes special care
+ * to allow device hot-unplugging.
+ *
+ * 2. TW686X devices are capable of setting a few different DMA modes,
+ * including: scatter-gather, field and frame modes. However,
+ * under stress testings it has been found that the machine can
+ * freeze completely if DMA registers are programmed while streaming
+ * is active.
+ * This driver tries to access hardware registers as infrequently
+ * as possible by:
+ *   i.  allocating fixed DMA buffers and memcpy'ing into
+ *       vmalloc'ed buffers
+ *   ii. using a timer to mitigate the rate of DMA reset operations,
+ *       on DMA channels error.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+#include "tw686x.h"
+#include "tw686x-regs.h"
+
+/*
+ * This module parameter allows to control the DMA_TIMER_INTERVAL value.
+ * The DMA_TIMER_INTERVAL register controls the minimum DMA interrupt
+ * time span (iow, the maximum DMA interrupt rate) thus allowing for
+ * IRQ coalescing.
+ *
+ * The chip datasheet does not mention a time unit for this value, so
+ * users wanting fine-grain control over the interrupt rate should
+ * determine the desired value through testing.
+ */
+static u32 dma_interval = 0x00098968;
+module_param(dma_interval, int, 0444);
+MODULE_PARM_DESC(dma_interval, "Minimum time span for DMA interrupting host");
+
+void tw686x_disable_channel(struct tw686x_dev *dev, unsigned int channel)
+{
+	u32 dma_en = reg_read(dev, DMA_CHANNEL_ENABLE);
+	u32 dma_cmd = reg_read(dev, DMA_CMD);
+
+	dma_en &= ~BIT(channel);
+	dma_cmd &= ~BIT(channel);
+
+	/* Must remove it from pending too */
+	dev->pending_dma_en &= ~BIT(channel);
+	dev->pending_dma_cmd &= ~BIT(channel);
+
+	/* Stop DMA if no channels are enabled */
+	if (!dma_en)
+		dma_cmd = 0;
+	reg_write(dev, DMA_CHANNEL_ENABLE, dma_en);
+	reg_write(dev, DMA_CMD, dma_cmd);
+}
+
+void tw686x_enable_channel(struct tw686x_dev *dev, unsigned int channel)
+{
+	u32 dma_en = reg_read(dev, DMA_CHANNEL_ENABLE);
+	u32 dma_cmd = reg_read(dev, DMA_CMD);
+
+	dev->pending_dma_en |= dma_en | BIT(channel);
+	dev->pending_dma_cmd |= dma_cmd | DMA_CMD_ENABLE | BIT(channel);
+}
+
+/*
+ * The purpose of this awful hack is to avoid enabling the DMA
+ * channels "too fast" which makes some TW686x devices very
+ * angry and freeze the CPU (see note 1).
+ */
+static void tw686x_dma_delay(unsigned long data)
+{
+	struct tw686x_dev *dev = (struct tw686x_dev *)data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	reg_write(dev, DMA_CHANNEL_ENABLE, dev->pending_dma_en);
+	reg_write(dev, DMA_CMD, dev->pending_dma_cmd);
+	dev->pending_dma_en = 0;
+	dev->pending_dma_cmd = 0;
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void tw686x_reset_channels(struct tw686x_dev *dev, unsigned int ch_mask)
+{
+	u32 dma_en, dma_cmd;
+
+	dma_en = reg_read(dev, DMA_CHANNEL_ENABLE);
+	dma_cmd = reg_read(dev, DMA_CMD);
+
+	/*
+	 * Save pending register status, the timer will
+	 * restore them.
+	 */
+	dev->pending_dma_en |= dma_en;
+	dev->pending_dma_cmd |= dma_cmd;
+
+	/* Disable the reset channels */
+	reg_write(dev, DMA_CHANNEL_ENABLE, dma_en & ~ch_mask);
+
+	if ((dma_en & ~ch_mask) == 0) {
+		dev_dbg(&dev->pci_dev->dev, "reset: stopping DMA\n");
+		dma_cmd &= ~DMA_CMD_ENABLE;
+	}
+	reg_write(dev, DMA_CMD, dma_cmd & ~ch_mask);
+}
+
+static irqreturn_t tw686x_irq(int irq, void *dev_id)
+{
+	struct tw686x_dev *dev = (struct tw686x_dev *)dev_id;
+	unsigned int video_requests, audio_requests, reset_ch;
+	u32 fifo_status, fifo_signal, fifo_ov, fifo_bad, fifo_errors;
+	u32 int_status, dma_en, video_en, pb_status;
+	unsigned long flags;
+
+	int_status = reg_read(dev, INT_STATUS); /* cleared on read */
+	fifo_status = reg_read(dev, VIDEO_FIFO_STATUS);
+
+	/* INT_STATUS does not include FIFO_STATUS errors! */
+	if (!int_status && !TW686X_FIFO_ERROR(fifo_status))
+		return IRQ_NONE;
+
+	if (int_status & INT_STATUS_DMA_TOUT) {
+		dev_dbg(&dev->pci_dev->dev,
+			"DMA timeout. Resetting DMA for all channels\n");
+		reset_ch = ~0;
+		goto reset_channels;
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+	dma_en = reg_read(dev, DMA_CHANNEL_ENABLE);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	video_en = dma_en & 0xff;
+	fifo_signal = ~(fifo_status & 0xff) & video_en;
+	fifo_ov = fifo_status >> 24;
+	fifo_bad = fifo_status >> 16;
+
+	/* Mask of channels with signal and FIFO errors */
+	fifo_errors = fifo_signal & (fifo_ov | fifo_bad);
+
+	reset_ch = 0;
+	pb_status = reg_read(dev, PB_STATUS);
+
+	/* Coalesce video frame/error events */
+	video_requests = (int_status & video_en) | fifo_errors;
+	audio_requests = (int_status & dma_en) >> 8;
+
+	if (video_requests)
+		tw686x_video_irq(dev, video_requests, pb_status,
+				 fifo_status, &reset_ch);
+	if (audio_requests)
+		tw686x_audio_irq(dev, audio_requests, pb_status);
+
+reset_channels:
+	if (reset_ch) {
+		spin_lock_irqsave(&dev->lock, flags);
+		tw686x_reset_channels(dev, reset_ch);
+		spin_unlock_irqrestore(&dev->lock, flags);
+		mod_timer(&dev->dma_delay_timer,
+			  jiffies + msecs_to_jiffies(100));
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void tw686x_dev_release(struct v4l2_device *v4l2_dev)
+{
+	struct tw686x_dev *dev = container_of(v4l2_dev, struct tw686x_dev,
+					      v4l2_dev);
+	unsigned int ch;
+
+	for (ch = 0; ch < max_channels(dev); ch++)
+		v4l2_ctrl_handler_free(&dev->video_channels[ch].ctrl_handler);
+
+	v4l2_device_unregister(&dev->v4l2_dev);
+
+	kfree(dev->audio_channels);
+	kfree(dev->video_channels);
+	kfree(dev);
+}
+
+static int tw686x_probe(struct pci_dev *pci_dev,
+			const struct pci_device_id *pci_id)
+{
+	struct tw686x_dev *dev;
+	int err;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+	dev->type = pci_id->driver_data;
+	sprintf(dev->name, "tw%04X", pci_dev->device);
+
+	dev->video_channels = kcalloc(max_channels(dev),
+		sizeof(*dev->video_channels), GFP_KERNEL);
+	if (!dev->video_channels) {
+		err = -ENOMEM;
+		goto free_dev;
+	}
+
+	dev->audio_channels = kcalloc(max_channels(dev),
+		sizeof(*dev->audio_channels), GFP_KERNEL);
+	if (!dev->audio_channels) {
+		err = -ENOMEM;
+		goto free_video;
+	}
+
+	pr_info("%s: PCI %s, IRQ %d, MMIO 0x%lx\n", dev->name,
+		pci_name(pci_dev), pci_dev->irq,
+		(unsigned long)pci_resource_start(pci_dev, 0));
+
+	dev->pci_dev = pci_dev;
+	if (pci_enable_device(pci_dev)) {
+		err = -EIO;
+		goto free_audio;
+	}
+
+	pci_set_master(pci_dev);
+	err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+	if (err) {
+		dev_err(&pci_dev->dev, "32-bit PCI DMA not supported\n");
+		err = -EIO;
+		goto disable_pci;
+	}
+
+	err = pci_request_regions(pci_dev, dev->name);
+	if (err) {
+		dev_err(&pci_dev->dev, "unable to request PCI region\n");
+		goto disable_pci;
+	}
+
+	dev->mmio = pci_ioremap_bar(pci_dev, 0);
+	if (!dev->mmio) {
+		dev_err(&pci_dev->dev, "unable to remap PCI region\n");
+		err = -ENOMEM;
+		goto free_region;
+	}
+
+	/* Reset all subsystems */
+	reg_write(dev, SYS_SOFT_RST, 0x0f);
+	mdelay(1);
+
+	reg_write(dev, SRST[0], 0x3f);
+	if (max_channels(dev) > 4)
+		reg_write(dev, SRST[1], 0x3f);
+
+	/* Disable the DMA engine */
+	reg_write(dev, DMA_CMD, 0);
+	reg_write(dev, DMA_CHANNEL_ENABLE, 0);
+
+	/* Enable DMA FIFO overflow and pointer check */
+	reg_write(dev, DMA_CONFIG, 0xffffff04);
+	reg_write(dev, DMA_CHANNEL_TIMEOUT, 0x140c8584);
+	reg_write(dev, DMA_TIMER_INTERVAL, dma_interval);
+
+	spin_lock_init(&dev->lock);
+
+	err = request_irq(pci_dev->irq, tw686x_irq, IRQF_SHARED,
+			  dev->name, dev);
+	if (err < 0) {
+		dev_err(&pci_dev->dev, "unable to request interrupt\n");
+		goto iounmap;
+	}
+
+	setup_timer(&dev->dma_delay_timer,
+		    tw686x_dma_delay, (unsigned long) dev);
+
+	/*
+	 * This must be set right before initializing v4l2_dev.
+	 * It's used to release resources after the last handle
+	 * held is released.
+	 */
+	dev->v4l2_dev.release = tw686x_dev_release;
+	err = tw686x_video_init(dev);
+	if (err) {
+		dev_err(&pci_dev->dev, "can't register video\n");
+		goto free_irq;
+	}
+
+	err = tw686x_audio_init(dev);
+	if (err)
+		dev_warn(&pci_dev->dev, "can't register audio\n");
+
+	pci_set_drvdata(pci_dev, dev);
+	return 0;
+
+free_irq:
+	free_irq(pci_dev->irq, dev);
+iounmap:
+	pci_iounmap(pci_dev, dev->mmio);
+free_region:
+	pci_release_regions(pci_dev);
+disable_pci:
+	pci_disable_device(pci_dev);
+free_audio:
+	kfree(dev->audio_channels);
+free_video:
+	kfree(dev->video_channels);
+free_dev:
+	kfree(dev);
+	return err;
+}
+
+static void tw686x_remove(struct pci_dev *pci_dev)
+{
+	struct tw686x_dev *dev = pci_get_drvdata(pci_dev);
+	unsigned long flags;
+
+	/* This guarantees the IRQ handler is no longer running,
+	 * which means we can kiss good-bye some resources.
+	 */
+	free_irq(pci_dev->irq, dev);
+
+	tw686x_video_free(dev);
+	tw686x_audio_free(dev);
+	del_timer_sync(&dev->dma_delay_timer);
+
+	pci_iounmap(pci_dev, dev->mmio);
+	pci_release_regions(pci_dev);
+	pci_disable_device(pci_dev);
+
+	/*
+	 * Setting pci_dev to NULL allows to detect hardware is no longer
+	 * available and will be used by vb2_ops. This is required because
+	 * the device sometimes hot-unplugs itself as the result of a PCIe
+	 * link down.
+	 * The lock is really important here.
+	 */
+	spin_lock_irqsave(&dev->lock, flags);
+	dev->pci_dev = NULL;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	/*
+	 * This calls tw686x_dev_release if it's the last reference.
+	 * Otherwise, release is postponed until there are no users left.
+	 */
+	v4l2_device_put(&dev->v4l2_dev);
+}
+
+/*
+ * On TW6864 and TW6868, all channels share the pair of video DMA SG tables,
+ * with 10-bit start_idx and end_idx determining start and end of frame buffer
+ * for particular channel.
+ * TW6868 with all its 8 channels would be problematic (only 127 SG entries per
+ * channel) but we support only 4 channels on this chip anyway (the first
+ * 4 channels are driven with internal video decoder, the other 4 would require
+ * an external TW286x part).
+ *
+ * On TW6865 and TW6869, each channel has its own DMA SG table, with indexes
+ * starting with 0. Both chips have complete sets of internal video decoders
+ * (respectively 4 or 8-channel).
+ *
+ * All chips have separate SG tables for two video frames.
+ */
+
+/* driver_data is number of A/V channels */
+static const struct pci_device_id tw686x_pci_tbl[] = {
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, 0x6864),
+		.driver_data = 4
+	},
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, 0x6865), /* not tested */
+		.driver_data = 4 | TYPE_SECOND_GEN
+	},
+	/*
+	 * TW6868 supports 8 A/V channels with an external TW2865 chip;
+	 * not supported by the driver.
+	 */
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, 0x6868), /* not tested */
+		.driver_data = 4
+	},
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, 0x6869),
+		.driver_data = 8 | TYPE_SECOND_GEN},
+	{}
+};
+MODULE_DEVICE_TABLE(pci, tw686x_pci_tbl);
+
+static struct pci_driver tw686x_pci_driver = {
+	.name = "tw686x",
+	.id_table = tw686x_pci_tbl,
+	.probe = tw686x_probe,
+	.remove = tw686x_remove,
+};
+module_pci_driver(tw686x_pci_driver);
+
+MODULE_DESCRIPTION("Driver for video frame grabber cards based on Intersil/Techwell TW686[4589]");
+MODULE_AUTHOR("Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>");
+MODULE_AUTHOR("Krzysztof Ha?asa <khalasa@piap.pl>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/pci/tw686x/tw686x-regs.h b/drivers/media/pci/tw686x/tw686x-regs.h
new file mode 100644
index 0000000..fcef586
--- /dev/null
+++ b/drivers/media/pci/tw686x/tw686x-regs.h
@@ -0,0 +1,122 @@
+/* DMA controller registers */
+#define REG8_1(a0) ((const u16[8]) { a0, a0 + 1, a0 + 2, a0 + 3, \
+				     a0 + 4, a0 + 5, a0 + 6, a0 + 7})
+#define REG8_2(a0) ((const u16[8]) { a0, a0 + 2, a0 + 4, a0 + 6,	\
+				     a0 + 8, a0 + 0xa, a0 + 0xc, a0 + 0xe})
+#define REG8_8(a0) ((const u16[8]) { a0, a0 + 8, a0 + 0x10, a0 + 0x18, \
+				     a0 + 0x20, a0 + 0x28, a0 + 0x30, \
+				     a0 + 0x38})
+#define INT_STATUS		0x00
+#define PB_STATUS		0x01
+#define DMA_CMD			0x02
+#define VIDEO_FIFO_STATUS	0x03
+#define VIDEO_CHANNEL_ID	0x04
+#define VIDEO_PARSER_STATUS	0x05
+#define SYS_SOFT_RST		0x06
+#define DMA_PAGE_TABLE0_ADDR	((const u16[8]) { 0x08, 0xd0, 0xd2, 0xd4, \
+						  0xd6, 0xd8, 0xda, 0xdc })
+#define DMA_PAGE_TABLE1_ADDR	((const u16[8]) { 0x09, 0xd1, 0xd3, 0xd5, \
+						  0xd7, 0xd9, 0xdb, 0xdd })
+#define DMA_CHANNEL_ENABLE	0x0a
+#define DMA_CONFIG		0x0b
+#define DMA_TIMER_INTERVAL	0x0c
+#define DMA_CHANNEL_TIMEOUT	0x0d
+#define VDMA_CHANNEL_CONFIG	REG8_1(0x10)
+#define ADMA_P_ADDR		REG8_2(0x18)
+#define ADMA_B_ADDR		REG8_2(0x19)
+#define DMA10_P_ADDR		0x28
+#define DMA10_B_ADDR		0x29
+#define VIDEO_CONTROL1		0x2a
+#define VIDEO_CONTROL2		0x2b
+#define AUDIO_CONTROL1		0x2c
+#define AUDIO_CONTROL2		0x2d
+#define PHASE_REF		0x2e
+#define GPIO_REG		0x2f
+#define INTL_HBAR_CTRL		REG8_1(0x30)
+#define AUDIO_CONTROL3		0x38
+#define VIDEO_FIELD_CTRL	REG8_1(0x39)
+#define HSCALER_CTRL		REG8_1(0x42)
+#define VIDEO_SIZE		REG8_1(0x4A)
+#define VIDEO_SIZE_F2		REG8_1(0x52)
+#define MD_CONF			REG8_1(0x60)
+#define MD_INIT			REG8_1(0x68)
+#define MD_MAP0			REG8_1(0x70)
+#define VDMA_P_ADDR		REG8_8(0x80) /* not used in DMA SG mode */
+#define VDMA_WHP		REG8_8(0x81)
+#define VDMA_B_ADDR		REG8_8(0x82)
+#define VDMA_F2_P_ADDR		REG8_8(0x84)
+#define VDMA_F2_WHP		REG8_8(0x85)
+#define VDMA_F2_B_ADDR		REG8_8(0x86)
+#define EP_REG_ADDR		0xfe
+#define EP_REG_DATA		0xff
+
+/* Video decoder registers */
+#define VDREG8(a0) ((const u16[8]) { \
+	a0 + 0x000, a0 + 0x010, a0 + 0x020, a0 + 0x030,	\
+	a0 + 0x100, a0 + 0x110, a0 + 0x120, a0 + 0x130})
+#define VIDSTAT			VDREG8(0x100)
+#define BRIGHT			VDREG8(0x101)
+#define CONTRAST		VDREG8(0x102)
+#define SHARPNESS		VDREG8(0x103)
+#define SAT_U			VDREG8(0x104)
+#define SAT_V			VDREG8(0x105)
+#define HUE			VDREG8(0x106)
+#define CROP_HI			VDREG8(0x107)
+#define VDELAY_LO		VDREG8(0x108)
+#define VACTIVE_LO		VDREG8(0x109)
+#define HDELAY_LO		VDREG8(0x10a)
+#define HACTIVE_LO		VDREG8(0x10b)
+#define MVSN			VDREG8(0x10c)
+#define STATUS2			VDREG8(0x10d)
+#define SDT			VDREG8(0x10e)
+#define SDT_EN			VDREG8(0x10f)
+
+#define VSCALE_LO		VDREG8(0x144)
+#define SCALE_HI		VDREG8(0x145)
+#define HSCALE_LO		VDREG8(0x146)
+#define F2CROP_HI		VDREG8(0x147)
+#define F2VDELAY_LO		VDREG8(0x148)
+#define F2VACTIVE_LO		VDREG8(0x149)
+#define F2HDELAY_LO		VDREG8(0x14a)
+#define F2HACTIVE_LO		VDREG8(0x14b)
+#define F2VSCALE_LO		VDREG8(0x14c)
+#define F2SCALE_HI		VDREG8(0x14d)
+#define F2HSCALE_LO		VDREG8(0x14e)
+#define F2CNT			VDREG8(0x14f)
+
+#define VDREG2(a0) ((const u16[2]) { a0, a0 + 0x100 })
+#define SRST			VDREG2(0x180)
+#define ACNTL			VDREG2(0x181)
+#define ACNTL2			VDREG2(0x182)
+#define CNTRL1			VDREG2(0x183)
+#define CKHY			VDREG2(0x184)
+#define SHCOR			VDREG2(0x185)
+#define CORING			VDREG2(0x186)
+#define CLMPG			VDREG2(0x187)
+#define IAGC			VDREG2(0x188)
+#define VCTRL1			VDREG2(0x18f)
+#define MISC1			VDREG2(0x194)
+#define LOOP			VDREG2(0x195)
+#define MISC2			VDREG2(0x196)
+
+#define CLMD			VDREG2(0x197)
+#define ANPWRDOWN		VDREG2(0x1ce)
+#define AIGAIN			((const u16[8]) { 0x1d0, 0x1d1, 0x1d2, 0x1d3, \
+						  0x2d0, 0x2d1, 0x2d2, 0x2d3 })
+
+#define SYS_MODE_DMA_SHIFT	13
+
+#define DMA_CMD_ENABLE		BIT(31)
+#define INT_STATUS_DMA_TOUT	BIT(17)
+#define TW686X_VIDSTAT_HLOCK	BIT(6)
+#define TW686X_VIDSTAT_VDLOSS	BIT(7)
+
+#define TW686X_STD_NTSC_M	0
+#define TW686X_STD_PAL		1
+#define TW686X_STD_SECAM	2
+#define TW686X_STD_NTSC_443	3
+#define TW686X_STD_PAL_M	4
+#define TW686X_STD_PAL_CN	5
+#define TW686X_STD_PAL_60	6
+
+#define TW686X_FIFO_ERROR(x)	(x & ~(0xff))
diff --git a/drivers/media/pci/tw686x/tw686x-video.c b/drivers/media/pci/tw686x/tw686x-video.c
new file mode 100644
index 0000000..253e108
--- /dev/null
+++ b/drivers/media/pci/tw686x/tw686x-video.c
@@ -0,0 +1,937 @@
+/*
+ * Copyright (C) 2015 VanguardiaSur - www.vanguardiasur.com.ar
+ *
+ * Based on original driver by Krzysztof Ha?asa:
+ * Copyright (C) 2015 Industrial Research Institute for Automation
+ * and Measurements PIAP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-vmalloc.h>
+#include "tw686x.h"
+#include "tw686x-regs.h"
+
+#define TW686X_INPUTS_PER_CH		4
+#define TW686X_VIDEO_WIDTH		720
+#define TW686X_VIDEO_HEIGHT(id)		((id & V4L2_STD_525_60) ? 480 : 576)
+
+static const struct tw686x_format formats[] = {
+	{
+		.fourcc = V4L2_PIX_FMT_UYVY,
+		.mode = 0,
+		.depth = 16,
+	}, {
+		.fourcc = V4L2_PIX_FMT_RGB565,
+		.mode = 5,
+		.depth = 16,
+	}, {
+		.fourcc = V4L2_PIX_FMT_YUYV,
+		.mode = 6,
+		.depth = 16,
+	}
+};
+
+static unsigned int tw686x_fields_map(v4l2_std_id std, unsigned int fps)
+{
+	static const unsigned int map[15] = {
+		0x00000000, 0x00000001, 0x00004001, 0x00104001, 0x00404041,
+		0x01041041, 0x01104411, 0x01111111, 0x04444445, 0x04511445,
+		0x05145145, 0x05151515, 0x05515455, 0x05551555, 0x05555555
+	};
+
+	static const unsigned int std_625_50[26] = {
+		0, 1, 1, 2,  3,  3,  4,  4,  5,  5,  6,  7,  7,
+		   8, 8, 9, 10, 10, 11, 11, 12, 13, 13, 14, 14, 0
+	};
+
+	static const unsigned int std_525_60[31] = {
+		0, 1, 1, 1, 2,  2,  3,  3,  4,  4,  5,  5,  6,  6, 7, 7,
+		   8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 0, 0
+	};
+
+	unsigned int i;
+
+	if (std & V4L2_STD_525_60) {
+		if (fps >= ARRAY_SIZE(std_525_60))
+			fps = 30;
+		i = std_525_60[fps];
+	} else {
+		if (fps >= ARRAY_SIZE(std_625_50))
+			fps = 25;
+		i = std_625_50[fps];
+	}
+
+	return map[i];
+}
+
+static void tw686x_set_framerate(struct tw686x_video_channel *vc,
+				 unsigned int fps)
+{
+	unsigned int map;
+
+	if (vc->fps == fps)
+		return;
+
+	map = tw686x_fields_map(vc->video_standard, fps) << 1;
+	map |= map << 1;
+	if (map > 0)
+		map |= BIT(31);
+	reg_write(vc->dev, VIDEO_FIELD_CTRL[vc->ch], map);
+	vc->fps = fps;
+}
+
+static const struct tw686x_format *format_by_fourcc(unsigned int fourcc)
+{
+	unsigned int cnt;
+
+	for (cnt = 0; cnt < ARRAY_SIZE(formats); cnt++)
+		if (formats[cnt].fourcc == fourcc)
+			return &formats[cnt];
+	return NULL;
+}
+
+static int tw686x_queue_setup(struct vb2_queue *vq,
+			      unsigned int *nbuffers, unsigned int *nplanes,
+			      unsigned int sizes[], void *alloc_ctxs[])
+{
+	struct tw686x_video_channel *vc = vb2_get_drv_priv(vq);
+	unsigned int szimage =
+		(vc->width * vc->height * vc->format->depth) >> 3;
+
+	/*
+	 * Let's request at least three buffers: two for the
+	 * DMA engine and one for userspace.
+	 */
+	if (vq->num_buffers + *nbuffers < 3)
+		*nbuffers = 3 - vq->num_buffers;
+
+	if (*nplanes) {
+		if (*nplanes != 1 || sizes[0] < szimage)
+			return -EINVAL;
+		return 0;
+	}
+
+	sizes[0] = szimage;
+	*nplanes = 1;
+	return 0;
+}
+
+static void tw686x_buf_queue(struct vb2_buffer *vb)
+{
+	struct tw686x_video_channel *vc = vb2_get_drv_priv(vb->vb2_queue);
+	struct tw686x_dev *dev = vc->dev;
+	struct pci_dev *pci_dev;
+	unsigned long flags;
+	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+	struct tw686x_v4l2_buf *buf =
+		container_of(vbuf, struct tw686x_v4l2_buf, vb);
+
+	/* Check device presence */
+	spin_lock_irqsave(&dev->lock, flags);
+	pci_dev = dev->pci_dev;
+	spin_unlock_irqrestore(&dev->lock, flags);
+	if (!pci_dev) {
+		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+		return;
+	}
+
+	spin_lock_irqsave(&vc->qlock, flags);
+	list_add_tail(&buf->list, &vc->vidq_queued);
+	spin_unlock_irqrestore(&vc->qlock, flags);
+}
+
+/*
+ * We can call this even when alloc_dma failed for the given channel
+ */
+static void tw686x_free_dma(struct tw686x_video_channel *vc, unsigned int pb)
+{
+	struct tw686x_dma_desc *desc = &vc->dma_descs[pb];
+	struct tw686x_dev *dev = vc->dev;
+	struct pci_dev *pci_dev;
+	unsigned long flags;
+
+	/* Check device presence. Shouldn't really happen! */
+	spin_lock_irqsave(&dev->lock, flags);
+	pci_dev = dev->pci_dev;
+	spin_unlock_irqrestore(&dev->lock, flags);
+	if (!pci_dev) {
+		WARN(1, "trying to deallocate on missing device\n");
+		return;
+	}
+
+	if (desc->virt) {
+		pci_free_consistent(dev->pci_dev, desc->size,
+				    desc->virt, desc->phys);
+		desc->virt = NULL;
+	}
+}
+
+static int tw686x_alloc_dma(struct tw686x_video_channel *vc, unsigned int pb)
+{
+	struct tw686x_dev *dev = vc->dev;
+	u32 reg = pb ? VDMA_B_ADDR[vc->ch] : VDMA_P_ADDR[vc->ch];
+	unsigned int len;
+	void *virt;
+
+	WARN(vc->dma_descs[pb].virt,
+	     "Allocating buffer but previous still here\n");
+
+	len = (vc->width * vc->height * vc->format->depth) >> 3;
+	virt = pci_alloc_consistent(dev->pci_dev, len,
+				    &vc->dma_descs[pb].phys);
+	if (!virt) {
+		v4l2_err(&dev->v4l2_dev,
+			 "dma%d: unable to allocate %s-buffer\n",
+			 vc->ch, pb ? "B" : "P");
+		return -ENOMEM;
+	}
+	vc->dma_descs[pb].size = len;
+	vc->dma_descs[pb].virt = virt;
+	reg_write(dev, reg, vc->dma_descs[pb].phys);
+
+	return 0;
+}
+
+static void tw686x_buffer_refill(struct tw686x_video_channel *vc,
+				 unsigned int pb)
+{
+	struct tw686x_v4l2_buf *buf;
+
+	while (!list_empty(&vc->vidq_queued)) {
+
+		buf = list_first_entry(&vc->vidq_queued,
+			struct tw686x_v4l2_buf, list);
+		list_del(&buf->list);
+
+		vc->curr_bufs[pb] = buf;
+		return;
+	}
+	vc->curr_bufs[pb] = NULL;
+}
+
+static void tw686x_clear_queue(struct tw686x_video_channel *vc,
+			       enum vb2_buffer_state state)
+{
+	unsigned int pb;
+
+	while (!list_empty(&vc->vidq_queued)) {
+		struct tw686x_v4l2_buf *buf;
+
+		buf = list_first_entry(&vc->vidq_queued,
+			struct tw686x_v4l2_buf, list);
+		list_del(&buf->list);
+		vb2_buffer_done(&buf->vb.vb2_buf, state);
+	}
+
+	for (pb = 0; pb < 2; pb++) {
+		if (vc->curr_bufs[pb])
+			vb2_buffer_done(&vc->curr_bufs[pb]->vb.vb2_buf, state);
+		vc->curr_bufs[pb] = NULL;
+	}
+}
+
+static int tw686x_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+	struct tw686x_video_channel *vc = vb2_get_drv_priv(vq);
+	struct tw686x_dev *dev = vc->dev;
+	struct pci_dev *pci_dev;
+	unsigned long flags;
+	int pb, err;
+
+	/* Check device presence */
+	spin_lock_irqsave(&dev->lock, flags);
+	pci_dev = dev->pci_dev;
+	spin_unlock_irqrestore(&dev->lock, flags);
+	if (!pci_dev) {
+		err = -ENODEV;
+		goto err_clear_queue;
+	}
+
+	spin_lock_irqsave(&vc->qlock, flags);
+
+	/* Sanity check */
+	if (!vc->dma_descs[0].virt || !vc->dma_descs[1].virt) {
+		spin_unlock_irqrestore(&vc->qlock, flags);
+		v4l2_err(&dev->v4l2_dev,
+			 "video%d: refusing to start without DMA buffers\n",
+			 vc->num);
+		err = -ENOMEM;
+		goto err_clear_queue;
+	}
+
+	for (pb = 0; pb < 2; pb++)
+		tw686x_buffer_refill(vc, pb);
+	spin_unlock_irqrestore(&vc->qlock, flags);
+
+	vc->sequence = 0;
+	vc->pb = 0;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	tw686x_enable_channel(dev, vc->ch);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	mod_timer(&dev->dma_delay_timer, jiffies + msecs_to_jiffies(100));
+
+	return 0;
+
+err_clear_queue:
+	spin_lock_irqsave(&vc->qlock, flags);
+	tw686x_clear_queue(vc, VB2_BUF_STATE_QUEUED);
+	spin_unlock_irqrestore(&vc->qlock, flags);
+	return err;
+}
+
+static void tw686x_stop_streaming(struct vb2_queue *vq)
+{
+	struct tw686x_video_channel *vc = vb2_get_drv_priv(vq);
+	struct tw686x_dev *dev = vc->dev;
+	struct pci_dev *pci_dev;
+	unsigned long flags;
+
+	/* Check device presence */
+	spin_lock_irqsave(&dev->lock, flags);
+	pci_dev = dev->pci_dev;
+	spin_unlock_irqrestore(&dev->lock, flags);
+	if (pci_dev)
+		tw686x_disable_channel(dev, vc->ch);
+
+	spin_lock_irqsave(&vc->qlock, flags);
+	tw686x_clear_queue(vc, VB2_BUF_STATE_ERROR);
+	spin_unlock_irqrestore(&vc->qlock, flags);
+}
+
+static int tw686x_buf_prepare(struct vb2_buffer *vb)
+{
+	struct tw686x_video_channel *vc = vb2_get_drv_priv(vb->vb2_queue);
+	unsigned int size =
+		(vc->width * vc->height * vc->format->depth) >> 3;
+
+	if (vb2_plane_size(vb, 0) < size)
+		return -EINVAL;
+	vb2_set_plane_payload(vb, 0, size);
+	return 0;
+}
+
+static struct vb2_ops tw686x_video_qops = {
+	.queue_setup		= tw686x_queue_setup,
+	.buf_queue		= tw686x_buf_queue,
+	.buf_prepare		= tw686x_buf_prepare,
+	.start_streaming	= tw686x_start_streaming,
+	.stop_streaming		= tw686x_stop_streaming,
+	.wait_prepare		= vb2_ops_wait_prepare,
+	.wait_finish		= vb2_ops_wait_finish,
+};
+
+static int tw686x_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct tw686x_video_channel *vc;
+	struct tw686x_dev *dev;
+	unsigned int ch;
+
+	vc = container_of(ctrl->handler, struct tw686x_video_channel,
+			  ctrl_handler);
+	dev = vc->dev;
+	ch = vc->ch;
+
+	switch (ctrl->id) {
+	case V4L2_CID_BRIGHTNESS:
+		reg_write(dev, BRIGHT[ch], ctrl->val & 0xff);
+		return 0;
+
+	case V4L2_CID_CONTRAST:
+		reg_write(dev, CONTRAST[ch], ctrl->val);
+		return 0;
+
+	case V4L2_CID_SATURATION:
+		reg_write(dev, SAT_U[ch], ctrl->val);
+		reg_write(dev, SAT_V[ch], ctrl->val);
+		return 0;
+
+	case V4L2_CID_HUE:
+		reg_write(dev, HUE[ch], ctrl->val & 0xff);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops ctrl_ops = {
+	.s_ctrl = tw686x_s_ctrl,
+};
+
+static int tw686x_g_fmt_vid_cap(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	struct tw686x_video_channel *vc = video_drvdata(file);
+
+	f->fmt.pix.width = vc->width;
+	f->fmt.pix.height = vc->height;
+	f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+	f->fmt.pix.pixelformat = vc->format->fourcc;
+	f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+	f->fmt.pix.bytesperline = (f->fmt.pix.width * vc->format->depth) / 8;
+	f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+	return 0;
+}
+
+static int tw686x_try_fmt_vid_cap(struct file *file, void *priv,
+				  struct v4l2_format *f)
+{
+	struct tw686x_video_channel *vc = video_drvdata(file);
+	unsigned int video_height = TW686X_VIDEO_HEIGHT(vc->video_standard);
+	const struct tw686x_format *format;
+
+	format = format_by_fourcc(f->fmt.pix.pixelformat);
+	if (!format) {
+		format = &formats[0];
+		f->fmt.pix.pixelformat = format->fourcc;
+	}
+
+	if (f->fmt.pix.width <= TW686X_VIDEO_WIDTH / 2)
+		f->fmt.pix.width = TW686X_VIDEO_WIDTH / 2;
+	else
+		f->fmt.pix.width = TW686X_VIDEO_WIDTH;
+
+	if (f->fmt.pix.height <= video_height / 2)
+		f->fmt.pix.height = video_height / 2;
+	else
+		f->fmt.pix.height = video_height;
+
+	f->fmt.pix.bytesperline = (f->fmt.pix.width * format->depth) / 8;
+	f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+	f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+	f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+
+	return 0;
+}
+
+static int tw686x_s_fmt_vid_cap(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	struct tw686x_video_channel *vc = video_drvdata(file);
+	u32 val, width, line_width, height;
+	unsigned long bitsperframe;
+	int err, pb;
+
+	if (vb2_is_busy(&vc->vidq))
+		return -EBUSY;
+
+	bitsperframe = vc->width * vc->height * vc->format->depth;
+	err = tw686x_try_fmt_vid_cap(file, priv, f);
+	if (err)
+		return err;
+
+	vc->format = format_by_fourcc(f->fmt.pix.pixelformat);
+	vc->width = f->fmt.pix.width;
+	vc->height = f->fmt.pix.height;
+
+	/* We need new DMA buffers if the framesize has changed */
+	if (bitsperframe != vc->width * vc->height * vc->format->depth) {
+		for (pb = 0; pb < 2; pb++)
+			tw686x_free_dma(vc, pb);
+
+		for (pb = 0; pb < 2; pb++) {
+			err = tw686x_alloc_dma(vc, pb);
+			if (err) {
+				if (pb > 0)
+					tw686x_free_dma(vc, 0);
+				return err;
+			}
+		}
+	}
+
+	val = reg_read(vc->dev, VDMA_CHANNEL_CONFIG[vc->ch]);
+
+	if (vc->width <= TW686X_VIDEO_WIDTH / 2)
+		val |= BIT(23);
+	else
+		val &= ~BIT(23);
+
+	if (vc->height <= TW686X_VIDEO_HEIGHT(vc->video_standard) / 2)
+		val |= BIT(24);
+	else
+		val &= ~BIT(24);
+
+	val &= ~(0x7 << 20);
+	val |= vc->format->mode << 20;
+	reg_write(vc->dev, VDMA_CHANNEL_CONFIG[vc->ch], val);
+
+	/* Program the DMA frame size */
+	width = (vc->width * 2) & 0x7ff;
+	height = vc->height / 2;
+	line_width = (vc->width * 2) & 0x7ff;
+	val = (height << 22) | (line_width << 11)  | width;
+	reg_write(vc->dev, VDMA_WHP[vc->ch], val);
+	return 0;
+}
+
+static int tw686x_querycap(struct file *file, void *priv,
+			   struct v4l2_capability *cap)
+{
+	struct tw686x_video_channel *vc = video_drvdata(file);
+	struct tw686x_dev *dev = vc->dev;
+
+	strlcpy(cap->driver, "tw686x", sizeof(cap->driver));
+	strlcpy(cap->card, dev->name, sizeof(cap->card));
+	snprintf(cap->bus_info, sizeof(cap->bus_info),
+		 "PCI:%s", pci_name(dev->pci_dev));
+	cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+			   V4L2_CAP_READWRITE;
+	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+	return 0;
+}
+
+static int tw686x_s_std(struct file *file, void *priv, v4l2_std_id id)
+{
+	struct tw686x_video_channel *vc = video_drvdata(file);
+	struct v4l2_format f;
+	u32 val, ret;
+
+	if (vc->video_standard == id)
+		return 0;
+
+	if (vb2_is_busy(&vc->vidq))
+		return -EBUSY;
+
+	if (id & V4L2_STD_NTSC)
+		val = 0;
+	else if (id & V4L2_STD_PAL)
+		val = 1;
+	else if (id & V4L2_STD_SECAM)
+		val = 2;
+	else if (id & V4L2_STD_NTSC_443)
+		val = 3;
+	else if (id & V4L2_STD_PAL_M)
+		val = 4;
+	else if (id & V4L2_STD_PAL_Nc)
+		val = 5;
+	else if (id & V4L2_STD_PAL_60)
+		val = 6;
+	else
+		return -EINVAL;
+
+	vc->video_standard = id;
+	reg_write(vc->dev, SDT[vc->ch], val);
+
+	val = reg_read(vc->dev, VIDEO_CONTROL1);
+	if (id & V4L2_STD_525_60)
+		val &= ~(1 << (SYS_MODE_DMA_SHIFT + vc->ch));
+	else
+		val |= (1 << (SYS_MODE_DMA_SHIFT + vc->ch));
+	reg_write(vc->dev, VIDEO_CONTROL1, val);
+
+	/*
+	 * Adjust format after V4L2_STD_525_60/V4L2_STD_625_50 change,
+	 * calling g_fmt and s_fmt will sanitize the height
+	 * according to the standard.
+	 */
+	ret = tw686x_g_fmt_vid_cap(file, priv, &f);
+	if (!ret)
+		tw686x_s_fmt_vid_cap(file, priv, &f);
+	return 0;
+}
+
+static int tw686x_querystd(struct file *file, void *priv, v4l2_std_id *std)
+{
+	struct tw686x_video_channel *vc = video_drvdata(file);
+	struct tw686x_dev *dev = vc->dev;
+	unsigned int old_std, detected_std = 0;
+	unsigned long end;
+
+	if (vb2_is_streaming(&vc->vidq))
+		return -EBUSY;
+
+	/* Enable and start standard detection */
+	old_std = reg_read(dev, SDT[vc->ch]);
+	reg_write(dev, SDT[vc->ch], 0x7);
+	reg_write(dev, SDT_EN[vc->ch], 0xff);
+
+	end = jiffies + msecs_to_jiffies(500);
+	while (time_is_after_jiffies(end)) {
+
+		detected_std = reg_read(dev, SDT[vc->ch]);
+		if (!(detected_std & BIT(7)))
+			break;
+		msleep(100);
+	}
+	reg_write(dev, SDT[vc->ch], old_std);
+
+	/* Exit if still busy */
+	if (detected_std & BIT(7))
+		return 0;
+
+	detected_std = (detected_std >> 4) & 0x7;
+	switch (detected_std) {
+	case TW686X_STD_NTSC_M:
+		*std &= V4L2_STD_NTSC;
+		break;
+	case TW686X_STD_NTSC_443:
+		*std &= V4L2_STD_NTSC_443;
+		break;
+	case TW686X_STD_PAL_M:
+		*std &= V4L2_STD_PAL_M;
+		break;
+	case TW686X_STD_PAL_60:
+		*std &= V4L2_STD_PAL_60;
+		break;
+	case TW686X_STD_PAL:
+		*std &= V4L2_STD_PAL;
+		break;
+	case TW686X_STD_PAL_CN:
+		*std &= V4L2_STD_PAL_Nc;
+		break;
+	case TW686X_STD_SECAM:
+		*std &= V4L2_STD_SECAM;
+		break;
+	default:
+		*std = 0;
+	}
+	return 0;
+}
+
+static int tw686x_g_std(struct file *file, void *priv, v4l2_std_id *id)
+{
+	struct tw686x_video_channel *vc = video_drvdata(file);
+
+	*id = vc->video_standard;
+	return 0;
+}
+
+static int tw686x_enum_fmt_vid_cap(struct file *file, void *priv,
+				   struct v4l2_fmtdesc *f)
+{
+	if (f->index >= ARRAY_SIZE(formats))
+		return -EINVAL;
+	f->pixelformat = formats[f->index].fourcc;
+	return 0;
+}
+
+static int tw686x_s_input(struct file *file, void *priv, unsigned int i)
+{
+	struct tw686x_video_channel *vc = video_drvdata(file);
+	u32 val;
+
+	if (i >= TW686X_INPUTS_PER_CH)
+		return -EINVAL;
+	if (i == vc->input)
+		return 0;
+	/*
+	 * Not sure we are able to support on the fly input change
+	 */
+	if (vb2_is_busy(&vc->vidq))
+		return -EBUSY;
+
+	vc->input = i;
+
+	val = reg_read(vc->dev, VDMA_CHANNEL_CONFIG[vc->ch]);
+	val &= ~(0x3 << 30);
+	val |= i << 30;
+	reg_write(vc->dev, VDMA_CHANNEL_CONFIG[vc->ch], val);
+	return 0;
+}
+
+static int tw686x_g_input(struct file *file, void *priv, unsigned int *i)
+{
+	struct tw686x_video_channel *vc = video_drvdata(file);
+
+	*i = vc->input;
+	return 0;
+}
+
+static int tw686x_enum_input(struct file *file, void *priv,
+			     struct v4l2_input *i)
+{
+	struct tw686x_video_channel *vc = video_drvdata(file);
+	unsigned int vidstat;
+
+	if (i->index >= TW686X_INPUTS_PER_CH)
+		return -EINVAL;
+
+	snprintf(i->name, sizeof(i->name), "Composite%d", i->index);
+	i->type = V4L2_INPUT_TYPE_CAMERA;
+	i->std = vc->device->tvnorms;
+	i->capabilities = V4L2_IN_CAP_STD;
+
+	vidstat = reg_read(vc->dev, VIDSTAT[vc->ch]);
+	i->status = 0;
+	if (vidstat & TW686X_VIDSTAT_VDLOSS)
+		i->status |= V4L2_IN_ST_NO_SIGNAL;
+	if (!(vidstat & TW686X_VIDSTAT_HLOCK))
+		i->status |= V4L2_IN_ST_NO_H_LOCK;
+
+	return 0;
+}
+
+static const struct v4l2_file_operations tw686x_video_fops = {
+	.owner		= THIS_MODULE,
+	.open		= v4l2_fh_open,
+	.unlocked_ioctl	= video_ioctl2,
+	.release	= vb2_fop_release,
+	.poll		= vb2_fop_poll,
+	.read		= vb2_fop_read,
+	.mmap		= vb2_fop_mmap,
+};
+
+static const struct v4l2_ioctl_ops tw686x_video_ioctl_ops = {
+	.vidioc_querycap		= tw686x_querycap,
+	.vidioc_g_fmt_vid_cap		= tw686x_g_fmt_vid_cap,
+	.vidioc_s_fmt_vid_cap		= tw686x_s_fmt_vid_cap,
+	.vidioc_enum_fmt_vid_cap	= tw686x_enum_fmt_vid_cap,
+	.vidioc_try_fmt_vid_cap		= tw686x_try_fmt_vid_cap,
+
+	.vidioc_querystd		= tw686x_querystd,
+	.vidioc_g_std			= tw686x_g_std,
+	.vidioc_s_std			= tw686x_s_std,
+
+	.vidioc_enum_input		= tw686x_enum_input,
+	.vidioc_g_input			= tw686x_g_input,
+	.vidioc_s_input			= tw686x_s_input,
+
+	.vidioc_reqbufs			= vb2_ioctl_reqbufs,
+	.vidioc_querybuf		= vb2_ioctl_querybuf,
+	.vidioc_qbuf			= vb2_ioctl_qbuf,
+	.vidioc_dqbuf			= vb2_ioctl_dqbuf,
+	.vidioc_create_bufs		= vb2_ioctl_create_bufs,
+	.vidioc_streamon		= vb2_ioctl_streamon,
+	.vidioc_streamoff		= vb2_ioctl_streamoff,
+	.vidioc_prepare_buf		= vb2_ioctl_prepare_buf,
+
+	.vidioc_log_status		= v4l2_ctrl_log_status,
+	.vidioc_subscribe_event		= v4l2_ctrl_subscribe_event,
+	.vidioc_unsubscribe_event	= v4l2_event_unsubscribe,
+};
+
+static void tw686x_buffer_copy(struct tw686x_video_channel *vc,
+			       unsigned int pb, struct vb2_v4l2_buffer *vb)
+{
+	struct tw686x_dma_desc *desc = &vc->dma_descs[pb];
+	struct vb2_buffer *vb2_buf = &vb->vb2_buf;
+
+	vb->field = V4L2_FIELD_INTERLACED;
+	vb->sequence = vc->sequence++;
+
+	memcpy(vb2_plane_vaddr(vb2_buf, 0), desc->virt, desc->size);
+	vb2_buf->timestamp = ktime_get_ns();
+	vb2_buffer_done(vb2_buf, VB2_BUF_STATE_DONE);
+}
+
+void tw686x_video_irq(struct tw686x_dev *dev, unsigned long requests,
+		      unsigned int pb_status, unsigned int fifo_status,
+		      unsigned int *reset_ch)
+{
+	struct tw686x_video_channel *vc;
+	struct vb2_v4l2_buffer *vb;
+	unsigned long flags;
+	unsigned int ch, pb;
+
+	for_each_set_bit(ch, &requests, max_channels(dev)) {
+		vc = &dev->video_channels[ch];
+
+		/*
+		 * This can either be a blue frame (with signal-lost bit set)
+		 * or a good frame (with signal-lost bit clear). If we have just
+		 * got signal, then this channel needs resetting.
+		 */
+		if (vc->no_signal && !(fifo_status & BIT(ch))) {
+			v4l2_printk(KERN_DEBUG, &dev->v4l2_dev,
+				    "video%d: signal recovered\n", vc->num);
+			vc->no_signal = false;
+			*reset_ch |= BIT(ch);
+			vc->pb = 0;
+			continue;
+		}
+		vc->no_signal = !!(fifo_status & BIT(ch));
+
+		/* Check FIFO errors only if there's signal */
+		if (!vc->no_signal) {
+			u32 fifo_ov, fifo_bad;
+
+			fifo_ov = (fifo_status >> 24) & BIT(ch);
+			fifo_bad = (fifo_status >> 16) & BIT(ch);
+			if (fifo_ov || fifo_bad) {
+				/* Mark this channel for reset */
+				v4l2_printk(KERN_DEBUG, &dev->v4l2_dev,
+					    "video%d: FIFO error\n", vc->num);
+				*reset_ch |= BIT(ch);
+				vc->pb = 0;
+				continue;
+			}
+		}
+
+		pb = !!(pb_status & BIT(ch));
+		if (vc->pb != pb) {
+			/* Mark this channel for reset */
+			v4l2_printk(KERN_DEBUG, &dev->v4l2_dev,
+				    "video%d: unexpected p-b buffer!\n",
+				    vc->num);
+			*reset_ch |= BIT(ch);
+			vc->pb = 0;
+			continue;
+		}
+
+		/* handle video stream */
+		spin_lock_irqsave(&vc->qlock, flags);
+		if (vc->curr_bufs[pb]) {
+			vb = &vc->curr_bufs[pb]->vb;
+			tw686x_buffer_copy(vc, pb, vb);
+		}
+		vc->pb = !pb;
+		tw686x_buffer_refill(vc, pb);
+		spin_unlock_irqrestore(&vc->qlock, flags);
+	}
+}
+
+void tw686x_video_free(struct tw686x_dev *dev)
+{
+	unsigned int ch, pb;
+
+	for (ch = 0; ch < max_channels(dev); ch++) {
+		struct tw686x_video_channel *vc = &dev->video_channels[ch];
+
+		if (vc->device)
+			video_unregister_device(vc->device);
+
+		for (pb = 0; pb < 2; pb++)
+			tw686x_free_dma(vc, pb);
+	}
+}
+
+int tw686x_video_init(struct tw686x_dev *dev)
+{
+	unsigned int ch, val, pb;
+	int err;
+
+	err = v4l2_device_register(&dev->pci_dev->dev, &dev->v4l2_dev);
+	if (err)
+		return err;
+
+	for (ch = 0; ch < max_channels(dev); ch++) {
+		struct tw686x_video_channel *vc = &dev->video_channels[ch];
+		struct video_device *vdev;
+
+		mutex_init(&vc->vb_mutex);
+		spin_lock_init(&vc->qlock);
+		INIT_LIST_HEAD(&vc->vidq_queued);
+
+		vc->dev = dev;
+		vc->ch = ch;
+
+		/* default settings */
+		vc->format = &formats[0];
+		vc->video_standard = V4L2_STD_NTSC;
+		vc->width = TW686X_VIDEO_WIDTH;
+		vc->height = TW686X_VIDEO_HEIGHT(vc->video_standard);
+		vc->input = 0;
+
+		reg_write(vc->dev, SDT[ch], 0);
+		tw686x_set_framerate(vc, 30);
+
+		reg_write(dev, VDELAY_LO[ch], 0x14);
+		reg_write(dev, HACTIVE_LO[ch], 0xd0);
+		reg_write(dev, VIDEO_SIZE[ch], 0);
+
+		for (pb = 0; pb < 2; pb++) {
+			err = tw686x_alloc_dma(vc, pb);
+			if (err)
+				goto error;
+		}
+
+		vc->vidq.io_modes = VB2_READ | VB2_MMAP | VB2_DMABUF;
+		vc->vidq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+		vc->vidq.drv_priv = vc;
+		vc->vidq.buf_struct_size = sizeof(struct tw686x_v4l2_buf);
+		vc->vidq.ops = &tw686x_video_qops;
+		vc->vidq.mem_ops = &vb2_vmalloc_memops;
+		vc->vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+		vc->vidq.min_buffers_needed = 2;
+		vc->vidq.lock = &vc->vb_mutex;
+		vc->vidq.gfp_flags = GFP_DMA32;
+
+		err = vb2_queue_init(&vc->vidq);
+		if (err) {
+			v4l2_err(&dev->v4l2_dev,
+				 "dma%d: cannot init vb2 queue\n", ch);
+			goto error;
+		}
+
+		err = v4l2_ctrl_handler_init(&vc->ctrl_handler, 4);
+		if (err) {
+			v4l2_err(&dev->v4l2_dev,
+				 "dma%d: cannot init ctrl handler\n", ch);
+			goto error;
+		}
+		v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops,
+				  V4L2_CID_BRIGHTNESS, -128, 127, 1, 0);
+		v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops,
+				  V4L2_CID_CONTRAST, 0, 255, 1, 100);
+		v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops,
+				  V4L2_CID_SATURATION, 0, 255, 1, 128);
+		v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops,
+				  V4L2_CID_HUE, -128, 127, 1, 0);
+		err = vc->ctrl_handler.error;
+		if (err)
+			goto error;
+
+		err = v4l2_ctrl_handler_setup(&vc->ctrl_handler);
+		if (err)
+			goto error;
+
+		vdev = video_device_alloc();
+		if (!vdev) {
+			v4l2_err(&dev->v4l2_dev,
+				 "dma%d: unable to allocate device\n", ch);
+			err = -ENOMEM;
+			goto error;
+		}
+
+		snprintf(vdev->name, sizeof(vdev->name), "%s video", dev->name);
+		vdev->fops = &tw686x_video_fops;
+		vdev->ioctl_ops = &tw686x_video_ioctl_ops;
+		vdev->release = video_device_release;
+		vdev->v4l2_dev = &dev->v4l2_dev;
+		vdev->queue = &vc->vidq;
+		vdev->tvnorms = V4L2_STD_525_60 | V4L2_STD_625_50;
+		vdev->minor = -1;
+		vdev->lock = &vc->vb_mutex;
+		vdev->ctrl_handler = &vc->ctrl_handler;
+		vc->device = vdev;
+		video_set_drvdata(vdev, vc);
+
+		err = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+		if (err < 0)
+			goto error;
+		vc->num = vdev->num;
+	}
+
+	/* Set DMA frame mode on all channels. Only supported mode for now. */
+	val = TW686X_DEF_PHASE_REF;
+	for (ch = 0; ch < max_channels(dev); ch++)
+		val |= TW686X_FRAME_MODE << (16 + ch * 2);
+	reg_write(dev, PHASE_REF, val);
+
+	reg_write(dev, MISC2[0], 0xe7);
+	reg_write(dev, VCTRL1[0], 0xcc);
+	reg_write(dev, LOOP[0], 0xa5);
+	if (max_channels(dev) > 4) {
+		reg_write(dev, VCTRL1[1], 0xcc);
+		reg_write(dev, LOOP[1], 0xa5);
+		reg_write(dev, MISC2[1], 0xe7);
+	}
+	return 0;
+
+error:
+	tw686x_video_free(dev);
+	return err;
+}
diff --git a/drivers/media/pci/tw686x/tw686x.h b/drivers/media/pci/tw686x/tw686x.h
new file mode 100644
index 0000000..44b5755
--- /dev/null
+++ b/drivers/media/pci/tw686x/tw686x.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2015 VanguardiaSur - www.vanguardiasur.com.ar
+ *
+ * Copyright (C) 2015 Industrial Research Institute for Automation
+ * and Measurements PIAP
+ * Written by Krzysztof Ha?asa
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/timer.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <sound/pcm.h>
+
+#include "tw686x-regs.h"
+
+#define TYPE_MAX_CHANNELS	0x0f
+#define TYPE_SECOND_GEN		0x10
+#define TW686X_DEF_PHASE_REF	0x1518
+
+#define TW686X_FIELD_MODE	0x3
+#define TW686X_FRAME_MODE	0x2
+/* 0x1 is reserved */
+#define TW686X_SG_MODE		0x0
+
+#define TW686X_AUDIO_PAGE_SZ		4096
+#define TW686X_AUDIO_PAGE_MAX		16
+#define TW686X_AUDIO_PERIODS_MIN	2
+#define TW686X_AUDIO_PERIODS_MAX	TW686X_AUDIO_PAGE_MAX
+
+struct tw686x_format {
+	char *name;
+	unsigned int fourcc;
+	unsigned int depth;
+	unsigned int mode;
+};
+
+struct tw686x_dma_desc {
+	dma_addr_t phys;
+	void *virt;
+	unsigned int size;
+};
+
+struct tw686x_audio_buf {
+	dma_addr_t dma;
+	void *virt;
+	struct list_head list;
+};
+
+struct tw686x_v4l2_buf {
+	struct vb2_v4l2_buffer vb;
+	struct list_head list;
+};
+
+struct tw686x_audio_channel {
+	struct tw686x_dev *dev;
+	struct snd_pcm_substream *ss;
+	unsigned int ch;
+	struct tw686x_audio_buf *curr_bufs[2];
+	struct tw686x_dma_desc dma_descs[2];
+	dma_addr_t ptr;
+
+	struct tw686x_audio_buf buf[TW686X_AUDIO_PAGE_MAX];
+	struct list_head buf_list;
+	spinlock_t lock;
+};
+
+struct tw686x_video_channel {
+	struct tw686x_dev *dev;
+
+	struct vb2_queue vidq;
+	struct list_head vidq_queued;
+	struct video_device *device;
+	struct tw686x_v4l2_buf *curr_bufs[2];
+	struct tw686x_dma_desc dma_descs[2];
+
+	struct v4l2_ctrl_handler ctrl_handler;
+	const struct tw686x_format *format;
+	struct mutex vb_mutex;
+	spinlock_t qlock;
+	v4l2_std_id video_standard;
+	unsigned int width, height;
+	unsigned int h_halve, v_halve;
+	unsigned int ch;
+	unsigned int num;
+	unsigned int fps;
+	unsigned int input;
+	unsigned int sequence;
+	unsigned int pb;
+	bool no_signal;
+};
+
+/**
+ * struct tw686x_dev - global device status
+ * @lock: spinlock controlling access to the
+ *        shared device registers (DMA enable/disable).
+ */
+struct tw686x_dev {
+	spinlock_t lock;
+
+	struct v4l2_device v4l2_dev;
+	struct snd_card *snd_card;
+
+	char name[32];
+	unsigned int type;
+	struct pci_dev *pci_dev;
+	__u32 __iomem *mmio;
+
+	void *alloc_ctx;
+
+	struct tw686x_video_channel *video_channels;
+	struct tw686x_audio_channel *audio_channels;
+
+	int audio_rate; /* per-device value */
+
+	struct timer_list dma_delay_timer;
+	u32 pending_dma_en; /* must be protected by lock */
+	u32 pending_dma_cmd; /* must be protected by lock */
+};
+
+static inline uint32_t reg_read(struct tw686x_dev *dev, unsigned int reg)
+{
+	return readl(dev->mmio + reg);
+}
+
+static inline void reg_write(struct tw686x_dev *dev, unsigned int reg,
+			     uint32_t value)
+{
+	writel(value, dev->mmio + reg);
+}
+
+static inline unsigned int max_channels(struct tw686x_dev *dev)
+{
+	return dev->type & TYPE_MAX_CHANNELS; /* 4 or 8 channels */
+}
+
+void tw686x_enable_channel(struct tw686x_dev *dev, unsigned int channel);
+void tw686x_disable_channel(struct tw686x_dev *dev, unsigned int channel);
+
+int tw686x_video_init(struct tw686x_dev *dev);
+void tw686x_video_free(struct tw686x_dev *dev);
+void tw686x_video_irq(struct tw686x_dev *dev, unsigned long requests,
+		      unsigned int pb_status, unsigned int fifo_status,
+		      unsigned int *reset_ch);
+
+int tw686x_audio_init(struct tw686x_dev *dev);
+void tw686x_audio_free(struct tw686x_dev *dev);
+void tw686x_audio_irq(struct tw686x_dev *dev, unsigned long requests,
+		      unsigned int pb_status);
diff --git a/drivers/media/pci/zoran/videocodec.c b/drivers/media/pci/zoran/videocodec.c
index c010716..13a3c07 100644
--- a/drivers/media/pci/zoran/videocodec.c
+++ b/drivers/media/pci/zoran/videocodec.c
@@ -116,8 +116,9 @@
 				goto out_module_put;
 			}
 
-			snprintf(codec->name, sizeof(codec->name),
-				 "%s[%d]", codec->name, h->attached);
+			res = strlen(codec->name);
+			snprintf(codec->name + res, sizeof(codec->name) - res,
+				 "[%d]", h->attached);
 			codec->master_data = master;
 			res = codec->setup(codec);
 			if (res == 0) {
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 201f5c2..84e041c 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -238,7 +238,7 @@
 config VIDEO_RENESAS_JPU
 	tristate "Renesas JPEG Processing Unit"
 	depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
-	depends on ARCH_SHMOBILE || COMPILE_TEST
+	depends on ARCH_RENESAS || COMPILE_TEST
 	select VIDEOBUF2_DMA_CONTIG
 	select V4L2_MEM2MEM_DEV
 	---help---
@@ -250,7 +250,7 @@
 config VIDEO_RENESAS_VSP1
 	tristate "Renesas VSP1 Video Processing Engine"
 	depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA
-	depends on (ARCH_SHMOBILE && OF) || COMPILE_TEST
+	depends on (ARCH_RENESAS && OF) || COMPILE_TEST
 	select VIDEOBUF2_DMA_CONTIG
 	---help---
 	  This is a V4L2 driver for the Renesas VSP1 video processing engine.
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index de32e3a..e749eb7 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -1047,7 +1047,7 @@
 static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe)
 {
 	enum ccdc_frmfmt frm_fmt = CCDC_FRMFMT_INTERLACED;
-	int ret;
+	int ret = 0;
 
 	vpfe_dbg(2, vpfe, "vpfe_config_ccdc_image_format\n");
 
@@ -1706,7 +1706,7 @@
 		sdinfo = &cfg->sub_devs[i];
 		client = v4l2_get_subdevdata(sdinfo->sd);
 		if (client->addr == curr_client->addr &&
-		    client->adapter->nr == client->adapter->nr) {
+		    client->adapter->nr == curr_client->adapter->nr) {
 			if (vpfe->current_input >= 1)
 				return -1;
 			*app_input_index = j + vpfe->current_input;
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index 9b9e423..c049736 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -967,15 +967,6 @@
 	.lclk_frequency = 266000000UL,
 };
 
-static const struct platform_device_id gsc_driver_ids[] = {
-	{
-		.name		= "exynos-gsc",
-		.driver_data	= (unsigned long)&gsc_v_100_drvdata,
-	},
-	{},
-};
-MODULE_DEVICE_TABLE(platform, gsc_driver_ids);
-
 static const struct of_device_id exynos_gsc_match[] = {
 	{
 		.compatible = "samsung,exynos5-gsc",
@@ -988,17 +979,11 @@
 static void *gsc_get_drv_data(struct platform_device *pdev)
 {
 	struct gsc_driverdata *driver_data = NULL;
+	const struct of_device_id *match;
 
-	if (pdev->dev.of_node) {
-		const struct of_device_id *match;
-		match = of_match_node(exynos_gsc_match,
-					pdev->dev.of_node);
-		if (match)
-			driver_data = (struct gsc_driverdata *)match->data;
-	} else {
-		driver_data = (struct gsc_driverdata *)
-			platform_get_device_id(pdev)->driver_data;
-	}
+	match = of_match_node(exynos_gsc_match, pdev->dev.of_node);
+	if (match)
+		driver_data = (struct gsc_driverdata *)match->data;
 
 	return driver_data;
 }
@@ -1078,17 +1063,17 @@
 	struct resource *res;
 	struct gsc_driverdata *drv_data = gsc_get_drv_data(pdev);
 	struct device *dev = &pdev->dev;
-	int ret = 0;
+	int ret;
 
 	gsc = devm_kzalloc(dev, sizeof(struct gsc_dev), GFP_KERNEL);
 	if (!gsc)
 		return -ENOMEM;
 
-	if (dev->of_node)
-		gsc->id = of_alias_get_id(pdev->dev.of_node, "gsc");
-	else
-		gsc->id = pdev->id;
+	ret = of_alias_get_id(pdev->dev.of_node, "gsc");
+	if (ret < 0)
+		return ret;
 
+	gsc->id = ret;
 	if (gsc->id >= drv_data->num_entities) {
 		dev_err(dev, "Invalid platform device id: %d\n", gsc->id);
 		return -EINVAL;
@@ -1096,7 +1081,6 @@
 
 	gsc->variant = drv_data->variant[gsc->id];
 	gsc->pdev = pdev;
-	gsc->pdata = dev->platform_data;
 
 	init_waitqueue_head(&gsc->irq_queue);
 	spin_lock_init(&gsc->slock);
@@ -1253,7 +1237,6 @@
 static struct platform_driver gsc_driver = {
 	.probe		= gsc_probe,
 	.remove		= gsc_remove,
-	.id_table	= gsc_driver_ids,
 	.driver = {
 		.name	= GSC_MODULE_NAME,
 		.pm	= &gsc_pm_ops,
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
index e93a233..ec4000c 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/exynos-gsc/gsc-core.h
@@ -340,7 +340,6 @@
 	void __iomem			*regs;
 	wait_queue_head_t		irq_queue;
 	struct gsc_m2m_device		m2m;
-	struct exynos_platform_gscaler	*pdata;
 	unsigned long			state;
 	struct vb2_alloc_ctx		*alloc_ctx;
 	struct video_device		vdev;
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index cef2a7f..b1c1cea 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -1154,26 +1154,6 @@
 	},
 };
 
-static const struct fimc_variant fimc0_variant_s5p = {
-	.has_inp_rot	 = 1,
-	.has_out_rot	 = 1,
-	.has_cam_if	 = 1,
-	.min_inp_pixsize = 16,
-	.min_out_pixsize = 16,
-	.hor_offs_align	 = 8,
-	.min_vsize_align = 16,
-	.pix_limit	 = &s5p_pix_limit[0],
-};
-
-static const struct fimc_variant fimc2_variant_s5p = {
-	.has_cam_if	 = 1,
-	.min_inp_pixsize = 16,
-	.min_out_pixsize = 16,
-	.hor_offs_align	 = 8,
-	.min_vsize_align = 16,
-	.pix_limit	 = &s5p_pix_limit[1],
-};
-
 static const struct fimc_variant fimc0_variant_s5pv210 = {
 	.has_inp_rot	 = 1,
 	.has_out_rot	 = 1,
@@ -1206,18 +1186,6 @@
 	.pix_limit	 = &s5p_pix_limit[2],
 };
 
-/* S5PC100 */
-static const struct fimc_drvdata fimc_drvdata_s5p = {
-	.variant = {
-		[0] = &fimc0_variant_s5p,
-		[1] = &fimc0_variant_s5p,
-		[2] = &fimc2_variant_s5p,
-	},
-	.num_entities	= 3,
-	.lclk_frequency = 133000000UL,
-	.out_buf_count	= 4,
-};
-
 /* S5PV210, S5PC110 */
 static const struct fimc_drvdata fimc_drvdata_s5pv210 = {
 	.variant = {
@@ -1251,23 +1219,6 @@
 	.out_buf_count	= 32,
 };
 
-static const struct platform_device_id fimc_driver_ids[] = {
-	{
-		.name		= "s5p-fimc",
-		.driver_data	= (unsigned long)&fimc_drvdata_s5p,
-	}, {
-		.name		= "s5pv210-fimc",
-		.driver_data	= (unsigned long)&fimc_drvdata_s5pv210,
-	}, {
-		.name		= "exynos4-fimc",
-		.driver_data	= (unsigned long)&fimc_drvdata_exynos4210,
-	}, {
-		.name		= "exynos4x12-fimc",
-		.driver_data	= (unsigned long)&fimc_drvdata_exynos4x12,
-	},
-	{ },
-};
-
 static const struct of_device_id fimc_of_match[] = {
 	{
 		.compatible = "samsung,s5pv210-fimc",
@@ -1290,7 +1241,6 @@
 static struct platform_driver fimc_driver = {
 	.probe		= fimc_probe,
 	.remove		= fimc_remove,
-	.id_table	= fimc_driver_ids,
 	.driver = {
 		.of_match_table = fimc_of_match,
 		.name		= FIMC_DRIVER_NAME,
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index feb521f..891625e 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -446,8 +446,10 @@
 	else
 		pd->fimc_bus_type = pd->sensor_bus_type;
 
-	if (WARN_ON(index >= ARRAY_SIZE(fmd->sensor)))
+	if (WARN_ON(index >= ARRAY_SIZE(fmd->sensor))) {
+		of_node_put(rem);
 		return -EINVAL;
+	}
 
 	fmd->sensor[index].asd.match_type = V4L2_ASYNC_MATCH_OF;
 	fmd->sensor[index].asd.match.of.node = rem;
@@ -1130,7 +1132,7 @@
 	media_entity_graph_walk_start(graph, entity);
 
 	while ((entity = media_entity_graph_walk_next(graph))) {
-		if (!is_media_entity_v4l2_io(entity))
+		if (!is_media_entity_v4l2_video_device(entity))
 			continue;
 
 		ret  = __fimc_md_modify_pipeline(entity, enable);
@@ -1145,7 +1147,7 @@
 	media_entity_graph_walk_start(graph, entity_err);
 
 	while ((entity_err = media_entity_graph_walk_next(graph))) {
-		if (!is_media_entity_v4l2_io(entity_err))
+		if (!is_media_entity_v4l2_video_device(entity_err))
 			continue;
 
 		__fimc_md_modify_pipeline(entity_err, !enable);
@@ -1446,22 +1448,13 @@
 
 	platform_set_drvdata(pdev, fmd);
 
-	/* Protect the media graph while we're registering entities */
-	mutex_lock(&fmd->media_dev.graph_mutex);
-
 	ret = fimc_md_register_platform_entities(fmd, dev->of_node);
-	if (ret) {
-		mutex_unlock(&fmd->media_dev.graph_mutex);
+	if (ret)
 		goto err_clk;
-	}
 
 	ret = fimc_md_register_sensor_entities(fmd);
-	if (ret) {
-		mutex_unlock(&fmd->media_dev.graph_mutex);
+	if (ret)
 		goto err_m_ent;
-	}
-
-	mutex_unlock(&fmd->media_dev.graph_mutex);
 
 	ret = device_create_file(&pdev->dev, &dev_attr_subdev_conf_mode);
 	if (ret)
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index bd5c46c..bf95442 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -757,8 +757,10 @@
 		goto err;
 
 	state->index = endpoint.base.port - FIMC_INPUT_MIPI_CSI2_0;
-	if (state->index >= CSIS_MAX_ENTITIES)
-		return -ENXIO;
+	if (state->index >= CSIS_MAX_ENTITIES) {
+		ret = -ENXIO;
+		goto err;
+	}
 
 	/* Get MIPI CSI-2 bus configration from the endpoint node. */
 	of_property_read_u32(node, "samsung,csis-hs-settle",
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index ac76d29..1b1a95d 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -251,7 +251,7 @@
 		if (entity == &video->video.entity)
 			continue;
 
-		if (!is_media_entity_v4l2_io(entity))
+		if (!is_media_entity_v4l2_video_device(entity))
 			continue;
 
 		__video = to_isp_video(media_entity_to_video_device(entity));
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
index 0b44b9a..af237af 100644
--- a/drivers/media/platform/s3c-camif/camif-core.c
+++ b/drivers/media/platform/s3c-camif/camif-core.c
@@ -493,21 +493,17 @@
 	if (ret < 0)
 		goto err_sens;
 
-	mutex_lock(&camif->media_dev.graph_mutex);
-
 	ret = v4l2_device_register_subdev_nodes(&camif->v4l2_dev);
 	if (ret < 0)
-		goto err_unlock;
+		goto err_sens;
 
 	ret = camif_register_video_nodes(camif);
 	if (ret < 0)
-		goto err_unlock;
+		goto err_sens;
 
 	ret = camif_create_media_links(camif);
 	if (ret < 0)
-		goto err_unlock;
-
-	mutex_unlock(&camif->media_dev.graph_mutex);
+		goto err_sens;
 
 	ret = media_device_register(&camif->media_dev);
 	if (ret < 0)
@@ -516,8 +512,6 @@
 	pm_runtime_put(dev);
 	return 0;
 
-err_unlock:
-	mutex_unlock(&camif->media_dev.graph_mutex);
 err_sens:
 	v4l2_device_unregister(&camif->v4l2_dev);
 	media_device_unregister(&camif->media_dev);
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 74bd46c..612d1ea 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -719,16 +719,12 @@
 
 	def_frame.stride = (def_frame.width * def_frame.fmt->depth) >> 3;
 
-	if (!pdev->dev.of_node) {
-		dev->variant = g2d_get_drv_data(pdev);
-	} else {
-		of_id = of_match_node(exynos_g2d_match, pdev->dev.of_node);
-		if (!of_id) {
-			ret = -ENODEV;
-			goto unreg_video_dev;
-		}
-		dev->variant = (struct g2d_variant *)of_id->data;
+	of_id = of_match_node(exynos_g2d_match, pdev->dev.of_node);
+	if (!of_id) {
+		ret = -ENODEV;
+		goto unreg_video_dev;
 	}
+	dev->variant = (struct g2d_variant *)of_id->data;
 
 	return 0;
 
@@ -788,22 +784,9 @@
 };
 MODULE_DEVICE_TABLE(of, exynos_g2d_match);
 
-static const struct platform_device_id g2d_driver_ids[] = {
-	{
-		.name = "s5p-g2d",
-		.driver_data = (unsigned long)&g2d_drvdata_v3x,
-	}, {
-		.name = "s5p-g2d-v4x",
-		.driver_data = (unsigned long)&g2d_drvdata_v4x,
-	},
-	{},
-};
-MODULE_DEVICE_TABLE(platform, g2d_driver_ids);
-
 static struct platform_driver g2d_pdrv = {
 	.probe		= g2d_probe,
 	.remove		= g2d_remove,
-	.id_table	= g2d_driver_ids,
 	.driver		= {
 		.name = G2D_NAME,
 		.of_match_table = exynos_g2d_match,
diff --git a/drivers/media/platform/s5p-g2d/g2d.h b/drivers/media/platform/s5p-g2d/g2d.h
index b0e52ab..e31df54 100644
--- a/drivers/media/platform/s5p-g2d/g2d.h
+++ b/drivers/media/platform/s5p-g2d/g2d.h
@@ -89,8 +89,3 @@
 void g2d_set_v41_stretch(struct g2d_dev *d,
 			struct g2d_frame *src, struct g2d_frame *dst);
 void g2d_set_cmd(struct g2d_dev *d, u32 c);
-
-static inline struct g2d_variant *g2d_get_drv_data(struct platform_device *pdev)
-{
-	return (struct g2d_variant *)platform_get_device_id(pdev)->driver_data;
-}
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index c3b13a6..caa19b4 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -1548,8 +1548,10 @@
 	struct v4l2_pix_format *pix = &f->fmt.pix;
 	u32 pix_fmt = f->fmt.pix.pixelformat;
 	int w = pix->width, h = pix->height, wh_align;
+	int padding = 0;
 
 	if (pix_fmt == V4L2_PIX_FMT_RGB32 ||
+	    pix_fmt == V4L2_PIX_FMT_RGB565 ||
 	    pix_fmt == V4L2_PIX_FMT_NV24 ||
 	    pix_fmt == V4L2_PIX_FMT_NV42 ||
 	    pix_fmt == V4L2_PIX_FMT_NV12 ||
@@ -1564,7 +1566,10 @@
 			       &h, S5P_JPEG_MIN_HEIGHT,
 			       S5P_JPEG_MAX_HEIGHT, wh_align);
 
-	return w * h * fmt_depth >> 3;
+	if (ctx->jpeg->variant->version == SJPEG_EXYNOS4)
+		padding = PAGE_SIZE;
+
+	return (w * h * fmt_depth >> 3) + padding;
 }
 
 static int exynos3250_jpeg_try_downscale(struct s5p_jpeg_ctx *ctx,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 927ab49..b16466f 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -1489,27 +1489,6 @@
 	.fw_name[0]     = "s5p-mfc-v8.fw",
 };
 
-static const struct platform_device_id mfc_driver_ids[] = {
-	{
-		.name = "s5p-mfc",
-		.driver_data = (unsigned long)&mfc_drvdata_v5,
-	}, {
-		.name = "s5p-mfc-v5",
-		.driver_data = (unsigned long)&mfc_drvdata_v5,
-	}, {
-		.name = "s5p-mfc-v6",
-		.driver_data = (unsigned long)&mfc_drvdata_v6,
-	}, {
-		.name = "s5p-mfc-v7",
-		.driver_data = (unsigned long)&mfc_drvdata_v7,
-	}, {
-		.name = "s5p-mfc-v8",
-		.driver_data = (unsigned long)&mfc_drvdata_v8,
-	},
-	{},
-};
-MODULE_DEVICE_TABLE(platform, mfc_driver_ids);
-
 static const struct of_device_id exynos_mfc_match[] = {
 	{
 		.compatible = "samsung,mfc-v5",
@@ -1531,24 +1510,18 @@
 static void *mfc_get_drv_data(struct platform_device *pdev)
 {
 	struct s5p_mfc_variant *driver_data = NULL;
+	const struct of_device_id *match;
 
-	if (pdev->dev.of_node) {
-		const struct of_device_id *match;
-		match = of_match_node(exynos_mfc_match,
-				pdev->dev.of_node);
-		if (match)
-			driver_data = (struct s5p_mfc_variant *)match->data;
-	} else {
-		driver_data = (struct s5p_mfc_variant *)
-			platform_get_device_id(pdev)->driver_data;
-	}
+	match = of_match_node(exynos_mfc_match, pdev->dev.of_node);
+	if (match)
+		driver_data = (struct s5p_mfc_variant *)match->data;
+
 	return driver_data;
 }
 
 static struct platform_driver s5p_mfc_driver = {
 	.probe		= s5p_mfc_probe,
 	.remove		= s5p_mfc_remove,
-	.id_table	= mfc_driver_ids,
 	.driver	= {
 		.name	= S5P_MFC_NAME,
 		.pm	= &s5p_mfc_pm_ops,
diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
index 42cd270..4dd62a9 100644
--- a/drivers/media/platform/s5p-tv/mixer.h
+++ b/drivers/media/platform/s5p-tv/mixer.h
@@ -300,7 +300,7 @@
 struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx);
 struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx);
 struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
-	int idx, char *name, struct mxr_layer_ops *ops);
+	int idx, char *name, const struct mxr_layer_ops *ops);
 
 void mxr_base_layer_release(struct mxr_layer *layer);
 void mxr_layer_release(struct mxr_layer *layer);
diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
index db3163b..d4d2564 100644
--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
@@ -235,7 +235,7 @@
 {
 	struct mxr_layer *layer;
 	int ret;
-	struct mxr_layer_ops ops = {
+	const struct mxr_layer_ops ops = {
 		.release = mxr_graph_layer_release,
 		.buffer_set = mxr_graph_buffer_set,
 		.stream_set = mxr_graph_stream_set,
diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
index d9e7f03..7ab5578 100644
--- a/drivers/media/platform/s5p-tv/mixer_video.c
+++ b/drivers/media/platform/s5p-tv/mixer_video.c
@@ -1070,7 +1070,7 @@
 }
 
 struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
-	int idx, char *name, struct mxr_layer_ops *ops)
+	int idx, char *name, const struct mxr_layer_ops *ops)
 {
 	struct mxr_layer *layer;
 
diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
index dd002a4..6fa6f67 100644
--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
@@ -207,7 +207,7 @@
 {
 	struct mxr_layer *layer;
 	int ret;
-	struct mxr_layer_ops ops = {
+	const struct mxr_layer_ops ops = {
 		.release = mxr_vp_layer_release,
 		.buffer_set = mxr_vp_buffer_set,
 		.stream_set = mxr_vp_stream_set,
diff --git a/drivers/media/platform/soc_camera/Kconfig b/drivers/media/platform/soc_camera/Kconfig
index 3552989..83029a4 100644
--- a/drivers/media/platform/soc_camera/Kconfig
+++ b/drivers/media/platform/soc_camera/Kconfig
@@ -28,7 +28,7 @@
 config VIDEO_RCAR_VIN
 	tristate "R-Car Video Input (VIN) support"
 	depends on VIDEO_DEV && SOC_CAMERA
-	depends on ARCH_SHMOBILE || COMPILE_TEST
+	depends on ARCH_RENESAS || COMPILE_TEST
 	depends on HAS_DMA
 	select VIDEOBUF2_DMA_CONTIG
 	select SOC_CAMERA_SCALE_CROP
@@ -45,7 +45,7 @@
 config VIDEO_SH_MOBILE_CEU
 	tristate "SuperH Mobile CEU Interface driver"
 	depends on VIDEO_DEV && SOC_CAMERA && HAS_DMA && HAVE_CLK
-	depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST
+	depends on ARCH_SHMOBILE || COMPILE_TEST
 	depends on HAS_DMA
 	select VIDEOBUF2_DMA_CONTIG
 	select SOC_CAMERA_SCALE_CROP
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index 3b8edf4..3f9c1b8 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -1845,6 +1845,8 @@
 	{ .compatible = "renesas,vin-r8a7790", .data = (void *)RCAR_GEN2 },
 	{ .compatible = "renesas,vin-r8a7779", .data = (void *)RCAR_H1 },
 	{ .compatible = "renesas,vin-r8a7778", .data = (void *)RCAR_M1 },
+	{ .compatible = "renesas,rcar-gen3-vin", .data = (void *)RCAR_GEN3 },
+	{ .compatible = "renesas,rcar-gen2-vin", .data = (void *)RCAR_GEN2 },
 	{ },
 };
 MODULE_DEVICE_TABLE(of, rcar_vin_of_table);
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
index 78e3cb9..7dddf77 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -49,7 +49,7 @@
 #define PID_TABLE_SIZE 1024
 #define POLL_MSECS 50
 
-static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei);
+static int load_c8sectpfe_fw(struct c8sectpfei *fei);
 
 #define TS_PKT_SIZE 188
 #define HEADER_SIZE (4)
@@ -130,7 +130,7 @@
 		writel(channel->back_buffer_busaddr, channel->irec +
 			DMA_PRDS_BUSRP_TP(0));
 	else
-		writel(wp, channel->irec + DMA_PRDS_BUSWP_TP(0));
+		writel(wp, channel->irec + DMA_PRDS_BUSRP_TP(0));
 }
 
 static int c8sectpfe_start_feed(struct dvb_demux_feed *dvbdmxfeed)
@@ -141,6 +141,7 @@
 	struct channel_info *channel;
 	u32 tmp;
 	unsigned long *bitmap;
+	int ret;
 
 	switch (dvbdmxfeed->type) {
 	case DMX_TYPE_TS:
@@ -169,8 +170,9 @@
 	}
 
 	if (!atomic_read(&fei->fw_loaded)) {
-		dev_err(fei->dev, "%s: c8sectpfe fw not loaded\n", __func__);
-		return -EINVAL;
+		ret = load_c8sectpfe_fw(fei);
+		if (ret)
+			return ret;
 	}
 
 	mutex_lock(&fei->lock);
@@ -265,8 +267,9 @@
 	unsigned long *bitmap;
 
 	if (!atomic_read(&fei->fw_loaded)) {
-		dev_err(fei->dev, "%s: c8sectpfe fw not loaded\n", __func__);
-		return -EINVAL;
+		ret = load_c8sectpfe_fw(fei);
+		if (ret)
+			return ret;
 	}
 
 	mutex_lock(&fei->lock);
@@ -585,7 +588,7 @@
 	writel(tsin->pid_buffer_busaddr,
 		fei->io + PIDF_BASE(tsin->tsin_id));
 
-	dev_info(fei->dev, "chan=%d PIDF_BASE=0x%x pid_bus_addr=%pad\n",
+	dev_dbg(fei->dev, "chan=%d PIDF_BASE=0x%x pid_bus_addr=%pad\n",
 		tsin->tsin_id, readl(fei->io + PIDF_BASE(tsin->tsin_id)),
 		&tsin->pid_buffer_busaddr);
 
@@ -880,13 +883,6 @@
 		goto err_clk_disable;
 	}
 
-	/* ensure all other init has been done before requesting firmware */
-	ret = load_c8sectpfe_fw_step1(fei);
-	if (ret) {
-		dev_err(dev, "Couldn't load slim core firmware\n");
-		goto err_clk_disable;
-	}
-
 	c8sectpfe_debugfs_init(fei);
 
 	return 0;
@@ -1091,15 +1087,14 @@
 		phdr->p_memsz - phdr->p_filesz);
 }
 
-static int load_slim_core_fw(const struct firmware *fw, void *context)
+static int load_slim_core_fw(const struct firmware *fw, struct c8sectpfei *fei)
 {
-	struct c8sectpfei *fei = context;
 	Elf32_Ehdr *ehdr;
 	Elf32_Phdr *phdr;
 	u8 __iomem *dst;
 	int err = 0, i;
 
-	if (!fw || !context)
+	if (!fw || !fei)
 		return -EINVAL;
 
 	ehdr = (Elf32_Ehdr *)fw->data;
@@ -1151,29 +1146,35 @@
 	return err;
 }
 
-static void load_c8sectpfe_fw_cb(const struct firmware *fw, void *context)
+static int load_c8sectpfe_fw(struct c8sectpfei *fei)
 {
-	struct c8sectpfei *fei = context;
+	const struct firmware *fw;
 	int err;
 
+	dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA);
+
+	err = request_firmware(&fw, FIRMWARE_MEMDMA, fei->dev);
+	if (err)
+		return err;
+
 	err = c8sectpfe_elf_sanity_check(fei, fw);
 	if (err) {
 		dev_err(fei->dev, "c8sectpfe_elf_sanity_check failed err=(%d)\n"
 			, err);
-		goto err;
+		return err;
 	}
 
-	err = load_slim_core_fw(fw, context);
+	err = load_slim_core_fw(fw, fei);
 	if (err) {
 		dev_err(fei->dev, "load_slim_core_fw failed err=(%d)\n", err);
-		goto err;
+		return err;
 	}
 
 	/* now the firmware is loaded configure the input blocks */
 	err = configure_channels(fei);
 	if (err) {
 		dev_err(fei->dev, "configure_channels failed err=(%d)\n", err);
-		goto err;
+		return err;
 	}
 
 	/*
@@ -1186,28 +1187,6 @@
 	writel(0x1,  fei->io + DMA_CPU_RUN);
 
 	atomic_set(&fei->fw_loaded, 1);
-err:
-	complete_all(&fei->fw_ack);
-}
-
-static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei)
-{
-	int err;
-
-	dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA);
-
-	init_completion(&fei->fw_ack);
-	atomic_set(&fei->fw_loaded, 0);
-
-	err = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
-				FIRMWARE_MEMDMA, fei->dev, GFP_KERNEL, fei,
-				load_c8sectpfe_fw_cb);
-
-	if (err) {
-		dev_err(fei->dev, "request_firmware_nowait err: %d.\n", err);
-		complete_all(&fei->fw_ack);
-		return err;
-	}
 
 	return 0;
 }
diff --git a/drivers/media/platform/vivid/Kconfig b/drivers/media/platform/vivid/Kconfig
index 0885e93..f535f57 100644
--- a/drivers/media/platform/vivid/Kconfig
+++ b/drivers/media/platform/vivid/Kconfig
@@ -7,6 +7,7 @@
 	select FB_CFB_COPYAREA
 	select FB_CFB_IMAGEBLIT
 	select VIDEOBUF2_VMALLOC
+	select VIDEO_V4L2_TPG
 	default n
 	---help---
 	  Enables a virtual video driver. This driver emulates a webcam,
diff --git a/drivers/media/platform/vivid/Makefile b/drivers/media/platform/vivid/Makefile
index 756fc12..633c8a1b 100644
--- a/drivers/media/platform/vivid/Makefile
+++ b/drivers/media/platform/vivid/Makefile
@@ -2,5 +2,5 @@
 		vivid-vid-cap.o vivid-vid-out.o vivid-kthread-cap.o vivid-kthread-out.o \
 		vivid-radio-rx.o vivid-radio-tx.o vivid-radio-common.o \
 		vivid-rds-gen.o vivid-sdr-cap.o vivid-vbi-cap.o vivid-vbi-out.o \
-		vivid-osd.o vivid-tpg.o vivid-tpg-colors.o
+		vivid-osd.o
 obj-$(CONFIG_VIDEO_VIVID) += vivid.o
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index ec125bec..c14da84 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -200,27 +200,12 @@
 					struct v4l2_capability *cap)
 {
 	struct vivid_dev *dev = video_drvdata(file);
-	struct video_device *vdev = video_devdata(file);
 
 	strcpy(cap->driver, "vivid");
 	strcpy(cap->card, "vivid");
 	snprintf(cap->bus_info, sizeof(cap->bus_info),
 			"platform:%s", dev->v4l2_dev.name);
 
-	if (vdev->vfl_type == VFL_TYPE_GRABBER && vdev->vfl_dir == VFL_DIR_RX)
-		cap->device_caps = dev->vid_cap_caps;
-	if (vdev->vfl_type == VFL_TYPE_GRABBER && vdev->vfl_dir == VFL_DIR_TX)
-		cap->device_caps = dev->vid_out_caps;
-	else if (vdev->vfl_type == VFL_TYPE_VBI && vdev->vfl_dir == VFL_DIR_RX)
-		cap->device_caps = dev->vbi_cap_caps;
-	else if (vdev->vfl_type == VFL_TYPE_VBI && vdev->vfl_dir == VFL_DIR_TX)
-		cap->device_caps = dev->vbi_out_caps;
-	else if (vdev->vfl_type == VFL_TYPE_SDR)
-		cap->device_caps = dev->sdr_cap_caps;
-	else if (vdev->vfl_type == VFL_TYPE_RADIO && vdev->vfl_dir == VFL_DIR_RX)
-		cap->device_caps = dev->radio_rx_caps;
-	else if (vdev->vfl_type == VFL_TYPE_RADIO && vdev->vfl_dir == VFL_DIR_TX)
-		cap->device_caps = dev->radio_tx_caps;
 	cap->capabilities = dev->vid_cap_caps | dev->vid_out_caps |
 		dev->vbi_cap_caps | dev->vbi_out_caps |
 		dev->radio_rx_caps | dev->radio_tx_caps |
@@ -1135,6 +1120,7 @@
 		strlcpy(vfd->name, "vivid-vid-cap", sizeof(vfd->name));
 		vfd->fops = &vivid_fops;
 		vfd->ioctl_ops = &vivid_ioctl_ops;
+		vfd->device_caps = dev->vid_cap_caps;
 		vfd->release = video_device_release_empty;
 		vfd->v4l2_dev = &dev->v4l2_dev;
 		vfd->queue = &dev->vb_vid_cap_q;
@@ -1160,6 +1146,7 @@
 		vfd->vfl_dir = VFL_DIR_TX;
 		vfd->fops = &vivid_fops;
 		vfd->ioctl_ops = &vivid_ioctl_ops;
+		vfd->device_caps = dev->vid_out_caps;
 		vfd->release = video_device_release_empty;
 		vfd->v4l2_dev = &dev->v4l2_dev;
 		vfd->queue = &dev->vb_vid_out_q;
@@ -1184,6 +1171,7 @@
 		strlcpy(vfd->name, "vivid-vbi-cap", sizeof(vfd->name));
 		vfd->fops = &vivid_fops;
 		vfd->ioctl_ops = &vivid_ioctl_ops;
+		vfd->device_caps = dev->vbi_cap_caps;
 		vfd->release = video_device_release_empty;
 		vfd->v4l2_dev = &dev->v4l2_dev;
 		vfd->queue = &dev->vb_vbi_cap_q;
@@ -1207,6 +1195,7 @@
 		vfd->vfl_dir = VFL_DIR_TX;
 		vfd->fops = &vivid_fops;
 		vfd->ioctl_ops = &vivid_ioctl_ops;
+		vfd->device_caps = dev->vbi_out_caps;
 		vfd->release = video_device_release_empty;
 		vfd->v4l2_dev = &dev->v4l2_dev;
 		vfd->queue = &dev->vb_vbi_out_q;
@@ -1229,6 +1218,7 @@
 		strlcpy(vfd->name, "vivid-sdr-cap", sizeof(vfd->name));
 		vfd->fops = &vivid_fops;
 		vfd->ioctl_ops = &vivid_ioctl_ops;
+		vfd->device_caps = dev->sdr_cap_caps;
 		vfd->release = video_device_release_empty;
 		vfd->v4l2_dev = &dev->v4l2_dev;
 		vfd->queue = &dev->vb_sdr_cap_q;
@@ -1247,6 +1237,7 @@
 		strlcpy(vfd->name, "vivid-rad-rx", sizeof(vfd->name));
 		vfd->fops = &vivid_radio_fops;
 		vfd->ioctl_ops = &vivid_ioctl_ops;
+		vfd->device_caps = dev->radio_rx_caps;
 		vfd->release = video_device_release_empty;
 		vfd->v4l2_dev = &dev->v4l2_dev;
 		vfd->lock = &dev->mutex;
@@ -1265,6 +1256,7 @@
 		vfd->vfl_dir = VFL_DIR_TX;
 		vfd->fops = &vivid_radio_fops;
 		vfd->ioctl_ops = &vivid_ioctl_ops;
+		vfd->device_caps = dev->radio_tx_caps;
 		vfd->release = video_device_release_empty;
 		vfd->v4l2_dev = &dev->v4l2_dev;
 		vfd->lock = &dev->mutex;
diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h
index 751c1ba..776783be 100644
--- a/drivers/media/platform/vivid/vivid-core.h
+++ b/drivers/media/platform/vivid/vivid-core.h
@@ -25,7 +25,7 @@
 #include <media/v4l2-device.h>
 #include <media/v4l2-dev.h>
 #include <media/v4l2-ctrls.h>
-#include "vivid-tpg.h"
+#include <media/v4l2-tpg.h>
 #include "vivid-rds-gen.h"
 #include "vivid-vbi-gen.h"
 
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
index 9034281..3b8c101 100644
--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
@@ -36,6 +36,7 @@
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-fh.h>
 #include <media/v4l2-event.h>
+#include <media/v4l2-rect.h>
 
 #include "vivid-core.h"
 #include "vivid-vid-common.h"
@@ -184,15 +185,15 @@
 		dev->compose_out.width, dev->compose_out.height
 	};
 
-	dev->loop_vid_copy = rect_intersect(&dev->crop_cap, &dev->compose_out);
+	v4l2_rect_intersect(&dev->loop_vid_copy, &dev->crop_cap, &dev->compose_out);
 
 	dev->loop_vid_out = dev->loop_vid_copy;
-	rect_scale(&dev->loop_vid_out, &dev->compose_out, &dev->crop_out);
+	v4l2_rect_scale(&dev->loop_vid_out, &dev->compose_out, &dev->crop_out);
 	dev->loop_vid_out.left += dev->crop_out.left;
 	dev->loop_vid_out.top += dev->crop_out.top;
 
 	dev->loop_vid_cap = dev->loop_vid_copy;
-	rect_scale(&dev->loop_vid_cap, &dev->crop_cap, &dev->compose_cap);
+	v4l2_rect_scale(&dev->loop_vid_cap, &dev->crop_cap, &dev->compose_cap);
 
 	dprintk(dev, 1,
 		"loop_vid_copy: %dx%d@%dx%d loop_vid_out: %dx%d@%dx%d loop_vid_cap: %dx%d@%dx%d\n",
@@ -203,13 +204,13 @@
 		dev->loop_vid_cap.width, dev->loop_vid_cap.height,
 		dev->loop_vid_cap.left, dev->loop_vid_cap.top);
 
-	r_overlay = rect_intersect(&r_fb, &r_overlay);
+	v4l2_rect_intersect(&r_overlay, &r_fb, &r_overlay);
 
 	/* shift r_overlay to the same origin as compose_out */
 	r_overlay.left += dev->compose_out.left - dev->overlay_out_left;
 	r_overlay.top += dev->compose_out.top - dev->overlay_out_top;
 
-	dev->loop_vid_overlay = rect_intersect(&r_overlay, &dev->loop_vid_copy);
+	v4l2_rect_intersect(&dev->loop_vid_overlay, &r_overlay, &dev->loop_vid_copy);
 	dev->loop_fb_copy = dev->loop_vid_overlay;
 
 	/* shift dev->loop_fb_copy back again to the fb origin */
@@ -217,7 +218,7 @@
 	dev->loop_fb_copy.top -= dev->compose_out.top - dev->overlay_out_top;
 
 	dev->loop_vid_overlay_cap = dev->loop_vid_overlay;
-	rect_scale(&dev->loop_vid_overlay_cap, &dev->crop_cap, &dev->compose_cap);
+	v4l2_rect_scale(&dev->loop_vid_overlay_cap, &dev->crop_cap, &dev->compose_cap);
 
 	dprintk(dev, 1,
 		"loop_fb_copy: %dx%d@%dx%d loop_vid_overlay: %dx%d@%dx%d loop_vid_overlay_cap: %dx%d@%dx%d\n",
diff --git a/drivers/media/platform/vivid/vivid-rds-gen.c b/drivers/media/platform/vivid/vivid-rds-gen.c
index c382343..53c7777 100644
--- a/drivers/media/platform/vivid/vivid-rds-gen.c
+++ b/drivers/media/platform/vivid/vivid-rds-gen.c
@@ -55,6 +55,7 @@
 {
 	struct v4l2_rds_data *data = rds->data;
 	unsigned grp;
+	unsigned idx;
 	struct tm tm;
 	unsigned date;
 	unsigned time;
@@ -73,24 +74,26 @@
 		case 0 ... 3:
 		case 22 ... 25:
 		case 44 ... 47: /* Group 0B */
+			idx = (grp % 22) % 4;
 			data[1].lsb |= (rds->ta << 4) | (rds->ms << 3);
-			data[1].lsb |= vivid_get_di(rds, grp % 22);
+			data[1].lsb |= vivid_get_di(rds, idx);
 			data[1].msb |= 1 << 3;
 			data[2].lsb = rds->picode & 0xff;
 			data[2].msb = rds->picode >> 8;
 			data[2].block = V4L2_RDS_BLOCK_C_ALT | (V4L2_RDS_BLOCK_C_ALT << 3);
-			data[3].lsb = rds->psname[2 * (grp % 22) + 1];
-			data[3].msb = rds->psname[2 * (grp % 22)];
+			data[3].lsb = rds->psname[2 * idx + 1];
+			data[3].msb = rds->psname[2 * idx];
 			break;
 		case 4 ... 19:
 		case 26 ... 41: /* Group 2A */
-			data[1].lsb |= (grp - 4) % 22;
+			idx = ((grp - 4) % 22) % 16;
+			data[1].lsb |= idx;
 			data[1].msb |= 4 << 3;
-			data[2].msb = rds->radiotext[4 * ((grp - 4) % 22)];
-			data[2].lsb = rds->radiotext[4 * ((grp - 4) % 22) + 1];
+			data[2].msb = rds->radiotext[4 * idx];
+			data[2].lsb = rds->radiotext[4 * idx + 1];
 			data[2].block = V4L2_RDS_BLOCK_C | (V4L2_RDS_BLOCK_C << 3);
-			data[3].msb = rds->radiotext[4 * ((grp - 4) % 22) + 2];
-			data[3].lsb = rds->radiotext[4 * ((grp - 4) % 22) + 3];
+			data[3].msb = rds->radiotext[4 * idx + 2];
+			data[3].lsb = rds->radiotext[4 * idx + 3];
 			break;
 		case 56:
 			/*
diff --git a/drivers/media/platform/vivid/vivid-tpg-colors.c b/drivers/media/platform/vivid/vivid-tpg-colors.c
deleted file mode 100644
index 2299f0c..0000000
--- a/drivers/media/platform/vivid/vivid-tpg-colors.c
+++ /dev/null
@@ -1,1416 +0,0 @@
-/*
- * vivid-color.c - A table that converts colors to various colorspaces
- *
- * The test pattern generator uses the tpg_colors for its test patterns.
- * For testing colorspaces the first 8 colors of that table need to be
- * converted to their equivalent in the target colorspace.
- *
- * The tpg_csc_colors[] table is the result of that conversion and since
- * it is precalculated the colorspace conversion is just a simple table
- * lookup.
- *
- * This source also contains the code used to generate the tpg_csc_colors
- * table. Run the following command to compile it:
- *
- *	gcc vivid-tpg-colors.c -DCOMPILE_APP -o gen-colors -lm
- *
- * and run the utility.
- *
- * Note that the converted colors are in the range 0x000-0xff0 (so times 16)
- * in order to preserve precision.
- *
- * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/videodev2.h>
-
-#include "vivid-tpg-colors.h"
-
-/* sRGB colors with range [0-255] */
-const struct color tpg_colors[TPG_COLOR_MAX] = {
-	/*
-	 * Colors to test colorspace conversion: converting these colors
-	 * to other colorspaces will never lead to out-of-gamut colors.
-	 */
-	{ 191, 191, 191 }, /* TPG_COLOR_CSC_WHITE */
-	{ 191, 191,  50 }, /* TPG_COLOR_CSC_YELLOW */
-	{  50, 191, 191 }, /* TPG_COLOR_CSC_CYAN */
-	{  50, 191,  50 }, /* TPG_COLOR_CSC_GREEN */
-	{ 191,  50, 191 }, /* TPG_COLOR_CSC_MAGENTA */
-	{ 191,  50,  50 }, /* TPG_COLOR_CSC_RED */
-	{  50,  50, 191 }, /* TPG_COLOR_CSC_BLUE */
-	{  50,  50,  50 }, /* TPG_COLOR_CSC_BLACK */
-
-	/* 75% colors */
-	{ 191, 191,   0 }, /* TPG_COLOR_75_YELLOW */
-	{   0, 191, 191 }, /* TPG_COLOR_75_CYAN */
-	{   0, 191,   0 }, /* TPG_COLOR_75_GREEN */
-	{ 191,   0, 191 }, /* TPG_COLOR_75_MAGENTA */
-	{ 191,   0,   0 }, /* TPG_COLOR_75_RED */
-	{   0,   0, 191 }, /* TPG_COLOR_75_BLUE */
-
-	/* 100% colors */
-	{ 255, 255, 255 }, /* TPG_COLOR_100_WHITE */
-	{ 255, 255,   0 }, /* TPG_COLOR_100_YELLOW */
-	{   0, 255, 255 }, /* TPG_COLOR_100_CYAN */
-	{   0, 255,   0 }, /* TPG_COLOR_100_GREEN */
-	{ 255,   0, 255 }, /* TPG_COLOR_100_MAGENTA */
-	{ 255,   0,   0 }, /* TPG_COLOR_100_RED */
-	{   0,   0, 255 }, /* TPG_COLOR_100_BLUE */
-	{   0,   0,   0 }, /* TPG_COLOR_100_BLACK */
-
-	{   0,   0,   0 }, /* TPG_COLOR_RANDOM placeholder */
-};
-
-#ifndef COMPILE_APP
-
-/* Generated table */
-const unsigned short tpg_rec709_to_linear[255 * 16 + 1] = {
-	   0,    0,    0,    1,    1,    1,    1,    2,    2,    2,    2,    2,    3,    3,    3,    3,
-	   4,    4,    4,    4,    4,    5,    5,    5,    5,    6,    6,    6,    6,    6,    7,    7,
-	   7,    7,    8,    8,    8,    8,    8,    9,    9,    9,    9,   10,   10,   10,   10,   10,
-	  11,   11,   11,   11,   12,   12,   12,   12,   12,   13,   13,   13,   13,   14,   14,   14,
-	  14,   14,   15,   15,   15,   15,   16,   16,   16,   16,   16,   17,   17,   17,   17,   18,
-	  18,   18,   18,   18,   19,   19,   19,   19,   20,   20,   20,   20,   20,   21,   21,   21,
-	  21,   22,   22,   22,   22,   22,   23,   23,   23,   23,   24,   24,   24,   24,   24,   25,
-	  25,   25,   25,   26,   26,   26,   26,   26,   27,   27,   27,   27,   28,   28,   28,   28,
-	  28,   29,   29,   29,   29,   30,   30,   30,   30,   30,   31,   31,   31,   31,   32,   32,
-	  32,   32,   32,   33,   33,   33,   33,   34,   34,   34,   34,   34,   35,   35,   35,   35,
-	  36,   36,   36,   36,   36,   37,   37,   37,   37,   38,   38,   38,   38,   38,   39,   39,
-	  39,   39,   40,   40,   40,   40,   40,   41,   41,   41,   41,   42,   42,   42,   42,   42,
-	  43,   43,   43,   43,   44,   44,   44,   44,   44,   45,   45,   45,   45,   46,   46,   46,
-	  46,   46,   47,   47,   47,   47,   48,   48,   48,   48,   48,   49,   49,   49,   49,   50,
-	  50,   50,   50,   50,   51,   51,   51,   51,   52,   52,   52,   52,   52,   53,   53,   53,
-	  53,   54,   54,   54,   54,   54,   55,   55,   55,   55,   56,   56,   56,   56,   56,   57,
-	  57,   57,   57,   58,   58,   58,   58,   58,   59,   59,   59,   59,   60,   60,   60,   60,
-	  60,   61,   61,   61,   61,   62,   62,   62,   62,   62,   63,   63,   63,   63,   64,   64,
-	  64,   64,   64,   65,   65,   65,   65,   66,   66,   66,   66,   66,   67,   67,   67,   67,
-	  68,   68,   68,   68,   68,   69,   69,   69,   69,   70,   70,   70,   70,   70,   71,   71,
-	  71,   71,   72,   72,   72,   72,   72,   73,   73,   73,   73,   73,   74,   74,   74,   74,
-	  74,   75,   75,   75,   75,   76,   76,   76,   76,   76,   77,   77,   77,   77,   78,   78,
-	  78,   78,   79,   79,   79,   79,   79,   80,   80,   80,   80,   81,   81,   81,   81,   82,
-	  82,   82,   82,   82,   83,   83,   83,   83,   84,   84,   84,   84,   85,   85,   85,   85,
-	  86,   86,   86,   86,   87,   87,   87,   87,   88,   88,   88,   88,   89,   89,   89,   89,
-	  90,   90,   90,   90,   91,   91,   91,   91,   92,   92,   92,   92,   93,   93,   93,   93,
-	  94,   94,   94,   94,   95,   95,   95,   95,   96,   96,   96,   96,   97,   97,   97,   97,
-	  98,   98,   98,   98,   99,   99,   99,   99,  100,  100,  100,  101,  101,  101,  101,  102,
-	 102,  102,  102,  103,  103,  103,  103,  104,  104,  104,  105,  105,  105,  105,  106,  106,
-	 106,  106,  107,  107,  107,  107,  108,  108,  108,  109,  109,  109,  109,  110,  110,  110,
-	 111,  111,  111,  111,  112,  112,  112,  112,  113,  113,  113,  114,  114,  114,  114,  115,
-	 115,  115,  116,  116,  116,  116,  117,  117,  117,  118,  118,  118,  118,  119,  119,  119,
-	 120,  120,  120,  120,  121,  121,  121,  122,  122,  122,  123,  123,  123,  123,  124,  124,
-	 124,  125,  125,  125,  125,  126,  126,  126,  127,  127,  127,  128,  128,  128,  128,  129,
-	 129,  129,  130,  130,  130,  131,  131,  131,  132,  132,  132,  132,  133,  133,  133,  134,
-	 134,  134,  135,  135,  135,  136,  136,  136,  136,  137,  137,  137,  138,  138,  138,  139,
-	 139,  139,  140,  140,  140,  141,  141,  141,  142,  142,  142,  142,  143,  143,  143,  144,
-	 144,  144,  145,  145,  145,  146,  146,  146,  147,  147,  147,  148,  148,  148,  149,  149,
-	 149,  150,  150,  150,  151,  151,  151,  152,  152,  152,  153,  153,  153,  154,  154,  154,
-	 155,  155,  155,  156,  156,  156,  157,  157,  157,  158,  158,  158,  159,  159,  159,  160,
-	 160,  160,  161,  161,  161,  162,  162,  162,  163,  163,  163,  164,  164,  164,  165,  165,
-	 165,  166,  166,  167,  167,  167,  168,  168,  168,  169,  169,  169,  170,  170,  170,  171,
-	 171,  171,  172,  172,  172,  173,  173,  174,  174,  174,  175,  175,  175,  176,  176,  176,
-	 177,  177,  177,  178,  178,  179,  179,  179,  180,  180,  180,  181,  181,  181,  182,  182,
-	 183,  183,  183,  184,  184,  184,  185,  185,  186,  186,  186,  187,  187,  187,  188,  188,
-	 188,  189,  189,  190,  190,  190,  191,  191,  191,  192,  192,  193,  193,  193,  194,  194,
-	 194,  195,  195,  196,  196,  196,  197,  197,  198,  198,  198,  199,  199,  199,  200,  200,
-	 201,  201,  201,  202,  202,  203,  203,  203,  204,  204,  204,  205,  205,  206,  206,  206,
-	 207,  207,  208,  208,  208,  209,  209,  210,  210,  210,  211,  211,  212,  212,  212,  213,
-	 213,  214,  214,  214,  215,  215,  216,  216,  216,  217,  217,  218,  218,  218,  219,  219,
-	 220,  220,  220,  221,  221,  222,  222,  222,  223,  223,  224,  224,  224,  225,  225,  226,
-	 226,  227,  227,  227,  228,  228,  229,  229,  229,  230,  230,  231,  231,  232,  232,  232,
-	 233,  233,  234,  234,  234,  235,  235,  236,  236,  237,  237,  237,  238,  238,  239,  239,
-	 240,  240,  240,  241,  241,  242,  242,  243,  243,  243,  244,  244,  245,  245,  246,  246,
-	 246,  247,  247,  248,  248,  249,  249,  249,  250,  250,  251,  251,  252,  252,  252,  253,
-	 253,  254,  254,  255,  255,  256,  256,  256,  257,  257,  258,  258,  259,  259,  260,  260,
-	 260,  261,  261,  262,  262,  263,  263,  264,  264,  264,  265,  265,  266,  266,  267,  267,
-	 268,  268,  269,  269,  269,  270,  270,  271,  271,  272,  272,  273,  273,  274,  274,  274,
-	 275,  275,  276,  276,  277,  277,  278,  278,  279,  279,  279,  280,  280,  281,  281,  282,
-	 282,  283,  283,  284,  284,  285,  285,  286,  286,  286,  287,  287,  288,  288,  289,  289,
-	 290,  290,  291,  291,  292,  292,  293,  293,  294,  294,  295,  295,  295,  296,  296,  297,
-	 297,  298,  298,  299,  299,  300,  300,  301,  301,  302,  302,  303,  303,  304,  304,  305,
-	 305,  306,  306,  307,  307,  308,  308,  309,  309,  309,  310,  310,  311,  311,  312,  312,
-	 313,  313,  314,  314,  315,  315,  316,  316,  317,  317,  318,  318,  319,  319,  320,  320,
-	 321,  321,  322,  322,  323,  323,  324,  324,  325,  325,  326,  326,  327,  327,  328,  328,
-	 329,  329,  330,  330,  331,  331,  332,  332,  333,  333,  334,  335,  335,  336,  336,  337,
-	 337,  338,  338,  339,  339,  340,  340,  341,  341,  342,  342,  343,  343,  344,  344,  345,
-	 345,  346,  346,  347,  347,  348,  348,  349,  349,  350,  351,  351,  352,  352,  353,  353,
-	 354,  354,  355,  355,  356,  356,  357,  357,  358,  358,  359,  360,  360,  361,  361,  362,
-	 362,  363,  363,  364,  364,  365,  365,  366,  366,  367,  368,  368,  369,  369,  370,  370,
-	 371,  371,  372,  372,  373,  373,  374,  375,  375,  376,  376,  377,  377,  378,  378,  379,
-	 379,  380,  381,  381,  382,  382,  383,  383,  384,  384,  385,  386,  386,  387,  387,  388,
-	 388,  389,  389,  390,  391,  391,  392,  392,  393,  393,  394,  394,  395,  396,  396,  397,
-	 397,  398,  398,  399,  399,  400,  401,  401,  402,  402,  403,  403,  404,  405,  405,  406,
-	 406,  407,  407,  408,  409,  409,  410,  410,  411,  411,  412,  413,  413,  414,  414,  415,
-	 415,  416,  417,  417,  418,  418,  419,  419,  420,  421,  421,  422,  422,  423,  424,  424,
-	 425,  425,  426,  426,  427,  428,  428,  429,  429,  430,  431,  431,  432,  432,  433,  433,
-	 434,  435,  435,  436,  436,  437,  438,  438,  439,  439,  440,  441,  441,  442,  442,  443,
-	 444,  444,  445,  445,  446,  447,  447,  448,  448,  449,  450,  450,  451,  451,  452,  453,
-	 453,  454,  454,  455,  456,  456,  457,  457,  458,  459,  459,  460,  460,  461,  462,  462,
-	 463,  463,  464,  465,  465,  466,  467,  467,  468,  468,  469,  470,  470,  471,  471,  472,
-	 473,  473,  474,  475,  475,  476,  476,  477,  478,  478,  479,  480,  480,  481,  481,  482,
-	 483,  483,  484,  485,  485,  486,  486,  487,  488,  488,  489,  490,  490,  491,  491,  492,
-	 493,  493,  494,  495,  495,  496,  497,  497,  498,  498,  499,  500,  500,  501,  502,  502,
-	 503,  504,  504,  505,  505,  506,  507,  507,  508,  509,  509,  510,  511,  511,  512,  513,
-	 513,  514,  514,  515,  516,  516,  517,  518,  518,  519,  520,  520,  521,  522,  522,  523,
-	 524,  524,  525,  526,  526,  527,  528,  528,  529,  529,  530,  531,  531,  532,  533,  533,
-	 534,  535,  535,  536,  537,  537,  538,  539,  539,  540,  541,  541,  542,  543,  543,  544,
-	 545,  545,  546,  547,  547,  548,  549,  549,  550,  551,  551,  552,  553,  553,  554,  555,
-	 555,  556,  557,  557,  558,  559,  560,  560,  561,  562,  562,  563,  564,  564,  565,  566,
-	 566,  567,  568,  568,  569,  570,  570,  571,  572,  572,  573,  574,  575,  575,  576,  577,
-	 577,  578,  579,  579,  580,  581,  581,  582,  583,  584,  584,  585,  586,  586,  587,  588,
-	 588,  589,  590,  590,  591,  592,  593,  593,  594,  595,  595,  596,  597,  598,  598,  599,
-	 600,  600,  601,  602,  602,  603,  604,  605,  605,  606,  607,  607,  608,  609,  610,  610,
-	 611,  612,  612,  613,  614,  615,  615,  616,  617,  617,  618,  619,  620,  620,  621,  622,
-	 622,  623,  624,  625,  625,  626,  627,  627,  628,  629,  630,  630,  631,  632,  632,  633,
-	 634,  635,  635,  636,  637,  638,  638,  639,  640,  640,  641,  642,  643,  643,  644,  645,
-	 646,  646,  647,  648,  649,  649,  650,  651,  652,  652,  653,  654,  654,  655,  656,  657,
-	 657,  658,  659,  660,  660,  661,  662,  663,  663,  664,  665,  666,  666,  667,  668,  669,
-	 669,  670,  671,  672,  672,  673,  674,  675,  675,  676,  677,  678,  678,  679,  680,  681,
-	 681,  682,  683,  684,  684,  685,  686,  687,  687,  688,  689,  690,  690,  691,  692,  693,
-	 694,  694,  695,  696,  697,  697,  698,  699,  700,  700,  701,  702,  703,  703,  704,  705,
-	 706,  707,  707,  708,  709,  710,  710,  711,  712,  713,  714,  714,  715,  716,  717,  717,
-	 718,  719,  720,  720,  721,  722,  723,  724,  724,  725,  726,  727,  728,  728,  729,  730,
-	 731,  731,  732,  733,  734,  735,  735,  736,  737,  738,  739,  739,  740,  741,  742,  742,
-	 743,  744,  745,  746,  746,  747,  748,  749,  750,  750,  751,  752,  753,  754,  754,  755,
-	 756,  757,  758,  758,  759,  760,  761,  762,  762,  763,  764,  765,  766,  766,  767,  768,
-	 769,  770,  771,  771,  772,  773,  774,  775,  775,  776,  777,  778,  779,  779,  780,  781,
-	 782,  783,  783,  784,  785,  786,  787,  788,  788,  789,  790,  791,  792,  793,  793,  794,
-	 795,  796,  797,  797,  798,  799,  800,  801,  802,  802,  803,  804,  805,  806,  807,  807,
-	 808,  809,  810,  811,  812,  812,  813,  814,  815,  816,  817,  817,  818,  819,  820,  821,
-	 822,  822,  823,  824,  825,  826,  827,  827,  828,  829,  830,  831,  832,  832,  833,  834,
-	 835,  836,  837,  838,  838,  839,  840,  841,  842,  843,  843,  844,  845,  846,  847,  848,
-	 849,  849,  850,  851,  852,  853,  854,  855,  855,  856,  857,  858,  859,  860,  861,  861,
-	 862,  863,  864,  865,  866,  867,  867,  868,  869,  870,  871,  872,  873,  873,  874,  875,
-	 876,  877,  878,  879,  880,  880,  881,  882,  883,  884,  885,  886,  887,  887,  888,  889,
-	 890,  891,  892,  893,  894,  894,  895,  896,  897,  898,  899,  900,  901,  901,  902,  903,
-	 904,  905,  906,  907,  908,  909,  909,  910,  911,  912,  913,  914,  915,  916,  916,  917,
-	 918,  919,  920,  921,  922,  923,  924,  925,  925,  926,  927,  928,  929,  930,  931,  932,
-	 933,  933,  934,  935,  936,  937,  938,  939,  940,  941,  942,  942,  943,  944,  945,  946,
-	 947,  948,  949,  950,  951,  952,  952,  953,  954,  955,  956,  957,  958,  959,  960,  961,
-	 962,  962,  963,  964,  965,  966,  967,  968,  969,  970,  971,  972,  973,  973,  974,  975,
-	 976,  977,  978,  979,  980,  981,  982,  983,  984,  985,  985,  986,  987,  988,  989,  990,
-	 991,  992,  993,  994,  995,  996,  997,  998,  998,  999, 1000, 1001, 1002, 1003, 1004, 1005,
-	1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020,
-	1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1030, 1031, 1032, 1033, 1034, 1035,
-	1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1050,
-	1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066,
-	1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1077, 1078, 1078, 1079, 1080, 1081,
-	1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1093, 1094, 1095, 1096, 1097,
-	1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113,
-	1114, 1115, 1116, 1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129,
-	1130, 1131, 1132, 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145,
-	1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161,
-	1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177,
-	1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1193, 1194,
-	1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1208, 1209, 1210,
-	1211, 1212, 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1221, 1223, 1224, 1225, 1226, 1227,
-	1228, 1229, 1230, 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243,
-	1245, 1246, 1247, 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260,
-	1261, 1262, 1264, 1265, 1266, 1267, 1268, 1269, 1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277,
-	1278, 1280, 1281, 1282, 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1295,
-	1296, 1297, 1298, 1299, 1300, 1301, 1302, 1303, 1304, 1305, 1306, 1307, 1309, 1310, 1311, 1312,
-	1313, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1322, 1323, 1324, 1325, 1326, 1327, 1328, 1329,
-	1330, 1331, 1332, 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1345, 1346, 1347,
-	1348, 1349, 1350, 1351, 1352, 1353, 1354, 1356, 1357, 1358, 1359, 1360, 1361, 1362, 1363, 1364,
-	1365, 1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1377, 1378, 1379, 1380, 1381, 1382,
-	1383, 1384, 1385, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1396, 1397, 1398, 1399, 1400,
-	1401, 1402, 1403, 1405, 1406, 1407, 1408, 1409, 1410, 1411, 1412, 1414, 1415, 1416, 1417, 1418,
-	1419, 1420, 1421, 1423, 1424, 1425, 1426, 1427, 1428, 1429, 1431, 1432, 1433, 1434, 1435, 1436,
-	1437, 1439, 1440, 1441, 1442, 1443, 1444, 1445, 1446, 1448, 1449, 1450, 1451, 1452, 1453, 1455,
-	1456, 1457, 1458, 1459, 1460, 1461, 1463, 1464, 1465, 1466, 1467, 1468, 1469, 1471, 1472, 1473,
-	1474, 1475, 1476, 1478, 1479, 1480, 1481, 1482, 1483, 1484, 1486, 1487, 1488, 1489, 1490, 1491,
-	1493, 1494, 1495, 1496, 1497, 1498, 1500, 1501, 1502, 1503, 1504, 1505, 1507, 1508, 1509, 1510,
-	1511, 1512, 1514, 1515, 1516, 1517, 1518, 1519, 1521, 1522, 1523, 1524, 1525, 1527, 1528, 1529,
-	1530, 1531, 1532, 1534, 1535, 1536, 1537, 1538, 1540, 1541, 1542, 1543, 1544, 1545, 1547, 1548,
-	1549, 1550, 1551, 1553, 1554, 1555, 1556, 1557, 1559, 1560, 1561, 1562, 1563, 1564, 1566, 1567,
-	1568, 1569, 1570, 1572, 1573, 1574, 1575, 1576, 1578, 1579, 1580, 1581, 1582, 1584, 1585, 1586,
-	1587, 1588, 1590, 1591, 1592, 1593, 1594, 1596, 1597, 1598, 1599, 1601, 1602, 1603, 1604, 1605,
-	1607, 1608, 1609, 1610, 1611, 1613, 1614, 1615, 1616, 1617, 1619, 1620, 1621, 1622, 1624, 1625,
-	1626, 1627, 1628, 1630, 1631, 1632, 1633, 1635, 1636, 1637, 1638, 1639, 1641, 1642, 1643, 1644,
-	1646, 1647, 1648, 1649, 1650, 1652, 1653, 1654, 1655, 1657, 1658, 1659, 1660, 1662, 1663, 1664,
-	1665, 1667, 1668, 1669, 1670, 1671, 1673, 1674, 1675, 1676, 1678, 1679, 1680, 1681, 1683, 1684,
-	1685, 1686, 1688, 1689, 1690, 1691, 1693, 1694, 1695, 1696, 1698, 1699, 1700, 1701, 1703, 1704,
-	1705, 1706, 1708, 1709, 1710, 1711, 1713, 1714, 1715, 1716, 1718, 1719, 1720, 1721, 1723, 1724,
-	1725, 1726, 1728, 1729, 1730, 1731, 1733, 1734, 1735, 1737, 1738, 1739, 1740, 1742, 1743, 1744,
-	1745, 1747, 1748, 1749, 1750, 1752, 1753, 1754, 1756, 1757, 1758, 1759, 1761, 1762, 1763, 1764,
-	1766, 1767, 1768, 1770, 1771, 1772, 1773, 1775, 1776, 1777, 1778, 1780, 1781, 1782, 1784, 1785,
-	1786, 1787, 1789, 1790, 1791, 1793, 1794, 1795, 1796, 1798, 1799, 1800, 1802, 1803, 1804, 1806,
-	1807, 1808, 1809, 1811, 1812, 1813, 1815, 1816, 1817, 1818, 1820, 1821, 1822, 1824, 1825, 1826,
-	1828, 1829, 1830, 1831, 1833, 1834, 1835, 1837, 1838, 1839, 1841, 1842, 1843, 1844, 1846, 1847,
-	1848, 1850, 1851, 1852, 1854, 1855, 1856, 1858, 1859, 1860, 1862, 1863, 1864, 1865, 1867, 1868,
-	1869, 1871, 1872, 1873, 1875, 1876, 1877, 1879, 1880, 1881, 1883, 1884, 1885, 1887, 1888, 1889,
-	1891, 1892, 1893, 1894, 1896, 1897, 1898, 1900, 1901, 1902, 1904, 1905, 1906, 1908, 1909, 1910,
-	1912, 1913, 1914, 1916, 1917, 1918, 1920, 1921, 1922, 1924, 1925, 1926, 1928, 1929, 1930, 1932,
-	1933, 1935, 1936, 1937, 1939, 1940, 1941, 1943, 1944, 1945, 1947, 1948, 1949, 1951, 1952, 1953,
-	1955, 1956, 1957, 1959, 1960, 1961, 1963, 1964, 1965, 1967, 1968, 1970, 1971, 1972, 1974, 1975,
-	1976, 1978, 1979, 1980, 1982, 1983, 1984, 1986, 1987, 1989, 1990, 1991, 1993, 1994, 1995, 1997,
-	1998, 1999, 2001, 2002, 2004, 2005, 2006, 2008, 2009, 2010, 2012, 2013, 2015, 2016, 2017, 2019,
-	2020, 2021, 2023, 2024, 2026, 2027, 2028, 2030, 2031, 2032, 2034, 2035, 2037, 2038, 2039, 2041,
-	2042, 2043, 2045, 2046, 2048, 2049, 2050, 2052, 2053, 2055, 2056, 2057, 2059, 2060, 2061, 2063,
-	2064, 2066, 2067, 2068, 2070, 2071, 2073, 2074, 2075, 2077, 2078, 2080, 2081, 2082, 2084, 2085,
-	2087, 2088, 2089, 2091, 2092, 2094, 2095, 2096, 2098, 2099, 2101, 2102, 2103, 2105, 2106, 2108,
-	2109, 2110, 2112, 2113, 2115, 2116, 2117, 2119, 2120, 2122, 2123, 2124, 2126, 2127, 2129, 2130,
-	2132, 2133, 2134, 2136, 2137, 2139, 2140, 2141, 2143, 2144, 2146, 2147, 2149, 2150, 2151, 2153,
-	2154, 2156, 2157, 2159, 2160, 2161, 2163, 2164, 2166, 2167, 2169, 2170, 2171, 2173, 2174, 2176,
-	2177, 2179, 2180, 2181, 2183, 2184, 2186, 2187, 2189, 2190, 2191, 2193, 2194, 2196, 2197, 2199,
-	2200, 2202, 2203, 2204, 2206, 2207, 2209, 2210, 2212, 2213, 2214, 2216, 2217, 2219, 2220, 2222,
-	2223, 2225, 2226, 2228, 2229, 2230, 2232, 2233, 2235, 2236, 2238, 2239, 2241, 2242, 2243, 2245,
-	2246, 2248, 2249, 2251, 2252, 2254, 2255, 2257, 2258, 2260, 2261, 2262, 2264, 2265, 2267, 2268,
-	2270, 2271, 2273, 2274, 2276, 2277, 2279, 2280, 2282, 2283, 2284, 2286, 2287, 2289, 2290, 2292,
-	2293, 2295, 2296, 2298, 2299, 2301, 2302, 2304, 2305, 2307, 2308, 2310, 2311, 2312, 2314, 2315,
-	2317, 2318, 2320, 2321, 2323, 2324, 2326, 2327, 2329, 2330, 2332, 2333, 2335, 2336, 2338, 2339,
-	2341, 2342, 2344, 2345, 2347, 2348, 2350, 2351, 2353, 2354, 2356, 2357, 2359, 2360, 2362, 2363,
-	2365, 2366, 2368, 2369, 2371, 2372, 2374, 2375, 2377, 2378, 2380, 2381, 2383, 2384, 2386, 2387,
-	2389, 2390, 2392, 2393, 2395, 2396, 2398, 2399, 2401, 2402, 2404, 2405, 2407, 2408, 2410, 2411,
-	2413, 2414, 2416, 2417, 2419, 2420, 2422, 2423, 2425, 2426, 2428, 2429, 2431, 2433, 2434, 2436,
-	2437, 2439, 2440, 2442, 2443, 2445, 2446, 2448, 2449, 2451, 2452, 2454, 2455, 2457, 2458, 2460,
-	2462, 2463, 2465, 2466, 2468, 2469, 2471, 2472, 2474, 2475, 2477, 2478, 2480, 2481, 2483, 2485,
-	2486, 2488, 2489, 2491, 2492, 2494, 2495, 2497, 2498, 2500, 2502, 2503, 2505, 2506, 2508, 2509,
-	2511, 2512, 2514, 2515, 2517, 2519, 2520, 2522, 2523, 2525, 2526, 2528, 2529, 2531, 2533, 2534,
-	2536, 2537, 2539, 2540, 2542, 2543, 2545, 2547, 2548, 2550, 2551, 2553, 2554, 2556, 2557, 2559,
-	2561, 2562, 2564, 2565, 2567, 2568, 2570, 2572, 2573, 2575, 2576, 2578, 2579, 2581, 2583, 2584,
-	2586, 2587, 2589, 2590, 2592, 2594, 2595, 2597, 2598, 2600, 2601, 2603, 2605, 2606, 2608, 2609,
-	2611, 2613, 2614, 2616, 2617, 2619, 2620, 2622, 2624, 2625, 2627, 2628, 2630, 2632, 2633, 2635,
-	2636, 2638, 2640, 2641, 2643, 2644, 2646, 2647, 2649, 2651, 2652, 2654, 2655, 2657, 2659, 2660,
-	2662, 2663, 2665, 2667, 2668, 2670, 2671, 2673, 2675, 2676, 2678, 2679, 2681, 2683, 2684, 2686,
-	2687, 2689, 2691, 2692, 2694, 2696, 2697, 2699, 2700, 2702, 2704, 2705, 2707, 2708, 2710, 2712,
-	2713, 2715, 2716, 2718, 2720, 2721, 2723, 2725, 2726, 2728, 2729, 2731, 2733, 2734, 2736, 2738,
-	2739, 2741, 2742, 2744, 2746, 2747, 2749, 2751, 2752, 2754, 2755, 2757, 2759, 2760, 2762, 2764,
-	2765, 2767, 2769, 2770, 2772, 2773, 2775, 2777, 2778, 2780, 2782, 2783, 2785, 2787, 2788, 2790,
-	2791, 2793, 2795, 2796, 2798, 2800, 2801, 2803, 2805, 2806, 2808, 2810, 2811, 2813, 2814, 2816,
-	2818, 2819, 2821, 2823, 2824, 2826, 2828, 2829, 2831, 2833, 2834, 2836, 2838, 2839, 2841, 2843,
-	2844, 2846, 2848, 2849, 2851, 2853, 2854, 2856, 2857, 2859, 2861, 2862, 2864, 2866, 2867, 2869,
-	2871, 2872, 2874, 2876, 2877, 2879, 2881, 2882, 2884, 2886, 2888, 2889, 2891, 2893, 2894, 2896,
-	2898, 2899, 2901, 2903, 2904, 2906, 2908, 2909, 2911, 2913, 2914, 2916, 2918, 2919, 2921, 2923,
-	2924, 2926, 2928, 2929, 2931, 2933, 2935, 2936, 2938, 2940, 2941, 2943, 2945, 2946, 2948, 2950,
-	2951, 2953, 2955, 2956, 2958, 2960, 2962, 2963, 2965, 2967, 2968, 2970, 2972, 2973, 2975, 2977,
-	2979, 2980, 2982, 2984, 2985, 2987, 2989, 2990, 2992, 2994, 2996, 2997, 2999, 3001, 3002, 3004,
-	3006, 3008, 3009, 3011, 3013, 3014, 3016, 3018, 3020, 3021, 3023, 3025, 3026, 3028, 3030, 3032,
-	3033, 3035, 3037, 3038, 3040, 3042, 3044, 3045, 3047, 3049, 3050, 3052, 3054, 3056, 3057, 3059,
-	3061, 3063, 3064, 3066, 3068, 3069, 3071, 3073, 3075, 3076, 3078, 3080, 3082, 3083, 3085, 3087,
-	3089, 3090, 3092, 3094, 3095, 3097, 3099, 3101, 3102, 3104, 3106, 3108, 3109, 3111, 3113, 3115,
-	3116, 3118, 3120, 3122, 3123, 3125, 3127, 3129, 3130, 3132, 3134, 3136, 3137, 3139, 3141, 3143,
-	3144, 3146, 3148, 3150, 3151, 3153, 3155, 3157, 3158, 3160, 3162, 3164, 3165, 3167, 3169, 3171,
-	3172, 3174, 3176, 3178, 3179, 3181, 3183, 3185, 3187, 3188, 3190, 3192, 3194, 3195, 3197, 3199,
-	3201, 3202, 3204, 3206, 3208, 3209, 3211, 3213, 3215, 3217, 3218, 3220, 3222, 3224, 3225, 3227,
-	3229, 3231, 3233, 3234, 3236, 3238, 3240, 3241, 3243, 3245, 3247, 3249, 3250, 3252, 3254, 3256,
-	3258, 3259, 3261, 3263, 3265, 3266, 3268, 3270, 3272, 3274, 3275, 3277, 3279, 3281, 3283, 3284,
-	3286, 3288, 3290, 3292, 3293, 3295, 3297, 3299, 3301, 3302, 3304, 3306, 3308, 3310, 3311, 3313,
-	3315, 3317, 3319, 3320, 3322, 3324, 3326, 3328, 3329, 3331, 3333, 3335, 3337, 3338, 3340, 3342,
-	3344, 3346, 3348, 3349, 3351, 3353, 3355, 3357, 3358, 3360, 3362, 3364, 3366, 3368, 3369, 3371,
-	3373, 3375, 3377, 3378, 3380, 3382, 3384, 3386, 3388, 3389, 3391, 3393, 3395, 3397, 3399, 3400,
-	3402, 3404, 3406, 3408, 3410, 3411, 3413, 3415, 3417, 3419, 3421, 3422, 3424, 3426, 3428, 3430,
-	3432, 3433, 3435, 3437, 3439, 3441, 3443, 3444, 3446, 3448, 3450, 3452, 3454, 3455, 3457, 3459,
-	3461, 3463, 3465, 3467, 3468, 3470, 3472, 3474, 3476, 3478, 3480, 3481, 3483, 3485, 3487, 3489,
-	3491, 3492, 3494, 3496, 3498, 3500, 3502, 3504, 3506, 3507, 3509, 3511, 3513, 3515, 3517, 3519,
-	3520, 3522, 3524, 3526, 3528, 3530, 3532, 3533, 3535, 3537, 3539, 3541, 3543, 3545, 3547, 3548,
-	3550, 3552, 3554, 3556, 3558, 3560, 3562, 3563, 3565, 3567, 3569, 3571, 3573, 3575, 3577, 3578,
-	3580, 3582, 3584, 3586, 3588, 3590, 3592, 3594, 3595, 3597, 3599, 3601, 3603, 3605, 3607, 3609,
-	3611, 3612, 3614, 3616, 3618, 3620, 3622, 3624, 3626, 3628, 3629, 3631, 3633, 3635, 3637, 3639,
-	3641, 3643, 3645, 3647, 3648, 3650, 3652, 3654, 3656, 3658, 3660, 3662, 3664, 3666, 3667, 3669,
-	3671, 3673, 3675, 3677, 3679, 3681, 3683, 3685, 3687, 3688, 3690, 3692, 3694, 3696, 3698, 3700,
-	3702, 3704, 3706, 3708, 3710, 3711, 3713, 3715, 3717, 3719, 3721, 3723, 3725, 3727, 3729, 3731,
-	3733, 3735, 3736, 3738, 3740, 3742, 3744, 3746, 3748, 3750, 3752, 3754, 3756, 3758, 3760, 3762,
-	3764, 3765, 3767, 3769, 3771, 3773, 3775, 3777, 3779, 3781, 3783, 3785, 3787, 3789, 3791, 3793,
-	3795, 3796, 3798, 3800, 3802, 3804, 3806, 3808, 3810, 3812, 3814, 3816, 3818, 3820, 3822, 3824,
-	3826, 3828, 3830, 3832, 3833, 3835, 3837, 3839, 3841, 3843, 3845, 3847, 3849, 3851, 3853, 3855,
-	3857, 3859, 3861, 3863, 3865, 3867, 3869, 3871, 3873, 3875, 3877, 3879, 3881, 3883, 3884, 3886,
-	3888, 3890, 3892, 3894, 3896, 3898, 3900, 3902, 3904, 3906, 3908, 3910, 3912, 3914, 3916, 3918,
-	3920, 3922, 3924, 3926, 3928, 3930, 3932, 3934, 3936, 3938, 3940, 3942, 3944, 3946, 3948, 3950,
-	3952, 3954, 3956, 3958, 3960, 3962, 3964, 3966, 3968, 3970, 3972, 3974, 3976, 3978, 3980, 3982,
-	3984, 3986, 3988, 3990, 3992, 3994, 3996, 3998, 4000, 4002, 4004, 4006, 4008, 4010, 4012, 4014,
-	4016, 4018, 4020, 4022, 4024, 4026, 4028, 4030, 4032, 4034, 4036, 4038, 4040, 4042, 4044, 4046,
-	4048, 4050, 4052, 4054, 4056, 4058, 4060, 4062, 4064, 4066, 4068, 4070, 4072, 4074, 4076, 4078,
-	4080,
-};
-
-/* Generated table */
-const unsigned short tpg_linear_to_rec709[255 * 16 + 1] = {
-	   0,    5,    9,   14,   18,   22,   27,   32,   36,   41,   45,   50,   54,   59,   63,   68,
-	  72,   77,   81,   86,   90,   95,   99,  104,  108,  113,  117,  122,  126,  131,  135,  139,
-	 144,  149,  153,  158,  162,  167,  171,  176,  180,  185,  189,  194,  198,  203,  207,  212,
-	 216,  221,  225,  230,  234,  239,  243,  248,  252,  257,  261,  266,  270,  275,  279,  284,
-	 288,  293,  297,  302,  306,  311,  315,  320,  324,  328,  334,  338,  343,  347,  352,  356,
-	 360,  365,  369,  373,  377,  381,  386,  390,  394,  398,  402,  406,  410,  414,  418,  422,
-	 426,  430,  433,  437,  441,  445,  449,  452,  456,  460,  464,  467,  471,  475,  478,  482,
-	 485,  489,  492,  496,  499,  503,  506,  510,  513,  517,  520,  524,  527,  530,  534,  537,
-	 540,  544,  547,  550,  554,  557,  560,  563,  566,  570,  573,  576,  579,  582,  586,  589,
-	 592,  595,  598,  601,  604,  607,  610,  613,  616,  619,  622,  625,  628,  631,  634,  637,
-	 640,  643,  646,  649,  652,  655,  658,  660,  663,  666,  669,  672,  675,  677,  680,  683,
-	 686,  689,  691,  694,  697,  700,  702,  705,  708,  711,  713,  716,  719,  721,  724,  727,
-	 729,  732,  735,  737,  740,  743,  745,  748,  750,  753,  756,  758,  761,  763,  766,  768,
-	 771,  773,  776,  779,  781,  784,  786,  789,  791,  794,  796,  799,  801,  803,  806,  808,
-	 811,  813,  816,  818,  821,  823,  825,  828,  830,  833,  835,  837,  840,  842,  844,  847,
-	 849,  851,  854,  856,  858,  861,  863,  865,  868,  870,  872,  875,  877,  879,  881,  884,
-	 886,  888,  891,  893,  895,  897,  900,  902,  904,  906,  908,  911,  913,  915,  917,  919,
-	 922,  924,  926,  928,  930,  933,  935,  937,  939,  941,  943,  946,  948,  950,  952,  954,
-	 956,  958,  960,  963,  965,  967,  969,  971,  973,  975,  977,  979,  981,  984,  986,  988,
-	 990,  992,  994,  996,  998, 1000, 1002, 1004, 1006, 1008, 1010, 1012, 1014, 1016, 1018, 1020,
-	1022, 1024, 1026, 1028, 1030, 1032, 1034, 1036, 1038, 1040, 1042, 1044, 1046, 1048, 1050, 1052,
-	1054, 1056, 1058, 1060, 1062, 1064, 1066, 1068, 1069, 1071, 1073, 1075, 1077, 1079, 1081, 1083,
-	1085, 1087, 1089, 1090, 1092, 1094, 1096, 1098, 1100, 1102, 1104, 1106, 1107, 1109, 1111, 1113,
-	1115, 1117, 1119, 1120, 1122, 1124, 1126, 1128, 1130, 1131, 1133, 1135, 1137, 1139, 1141, 1142,
-	1144, 1146, 1148, 1150, 1151, 1153, 1155, 1157, 1159, 1160, 1162, 1164, 1166, 1168, 1169, 1171,
-	1173, 1175, 1176, 1178, 1180, 1182, 1184, 1185, 1187, 1189, 1191, 1192, 1194, 1196, 1198, 1199,
-	1201, 1203, 1204, 1206, 1208, 1210, 1211, 1213, 1215, 1217, 1218, 1220, 1222, 1223, 1225, 1227,
-	1228, 1230, 1232, 1234, 1235, 1237, 1239, 1240, 1242, 1244, 1245, 1247, 1249, 1250, 1252, 1254,
-	1255, 1257, 1259, 1260, 1262, 1264, 1265, 1267, 1269, 1270, 1272, 1274, 1275, 1277, 1279, 1280,
-	1282, 1283, 1285, 1287, 1288, 1290, 1292, 1293, 1295, 1296, 1298, 1300, 1301, 1303, 1305, 1306,
-	1308, 1309, 1311, 1313, 1314, 1316, 1317, 1319, 1321, 1322, 1324, 1325, 1327, 1328, 1330, 1332,
-	1333, 1335, 1336, 1338, 1339, 1341, 1343, 1344, 1346, 1347, 1349, 1350, 1352, 1354, 1355, 1357,
-	1358, 1360, 1361, 1363, 1364, 1366, 1367, 1369, 1371, 1372, 1374, 1375, 1377, 1378, 1380, 1381,
-	1383, 1384, 1386, 1387, 1389, 1390, 1392, 1393, 1395, 1396, 1398, 1399, 1401, 1402, 1404, 1405,
-	1407, 1408, 1410, 1411, 1413, 1414, 1416, 1417, 1419, 1420, 1422, 1423, 1425, 1426, 1428, 1429,
-	1431, 1432, 1434, 1435, 1437, 1438, 1440, 1441, 1442, 1444, 1445, 1447, 1448, 1450, 1451, 1453,
-	1454, 1456, 1457, 1458, 1460, 1461, 1463, 1464, 1466, 1467, 1469, 1470, 1471, 1473, 1474, 1476,
-	1477, 1479, 1480, 1481, 1483, 1484, 1486, 1487, 1489, 1490, 1491, 1493, 1494, 1496, 1497, 1498,
-	1500, 1501, 1503, 1504, 1505, 1507, 1508, 1510, 1511, 1512, 1514, 1515, 1517, 1518, 1519, 1521,
-	1522, 1524, 1525, 1526, 1528, 1529, 1531, 1532, 1533, 1535, 1536, 1537, 1539, 1540, 1542, 1543,
-	1544, 1546, 1547, 1548, 1550, 1551, 1553, 1554, 1555, 1557, 1558, 1559, 1561, 1562, 1563, 1565,
-	1566, 1567, 1569, 1570, 1571, 1573, 1574, 1576, 1577, 1578, 1580, 1581, 1582, 1584, 1585, 1586,
-	1588, 1589, 1590, 1592, 1593, 1594, 1596, 1597, 1598, 1600, 1601, 1602, 1603, 1605, 1606, 1607,
-	1609, 1610, 1611, 1613, 1614, 1615, 1617, 1618, 1619, 1621, 1622, 1623, 1624, 1626, 1627, 1628,
-	1630, 1631, 1632, 1634, 1635, 1636, 1637, 1639, 1640, 1641, 1643, 1644, 1645, 1647, 1648, 1649,
-	1650, 1652, 1653, 1654, 1655, 1657, 1658, 1659, 1661, 1662, 1663, 1664, 1666, 1667, 1668, 1670,
-	1671, 1672, 1673, 1675, 1676, 1677, 1678, 1680, 1681, 1682, 1683, 1685, 1686, 1687, 1688, 1690,
-	1691, 1692, 1693, 1695, 1696, 1697, 1698, 1700, 1701, 1702, 1703, 1705, 1706, 1707, 1708, 1710,
-	1711, 1712, 1713, 1715, 1716, 1717, 1718, 1720, 1721, 1722, 1723, 1724, 1726, 1727, 1728, 1729,
-	1731, 1732, 1733, 1734, 1736, 1737, 1738, 1739, 1740, 1742, 1743, 1744, 1745, 1746, 1748, 1749,
-	1750, 1751, 1753, 1754, 1755, 1756, 1757, 1759, 1760, 1761, 1762, 1763, 1765, 1766, 1767, 1768,
-	1769, 1771, 1772, 1773, 1774, 1775, 1777, 1778, 1779, 1780, 1781, 1783, 1784, 1785, 1786, 1787,
-	1788, 1790, 1791, 1792, 1793, 1794, 1796, 1797, 1798, 1799, 1800, 1801, 1803, 1804, 1805, 1806,
-	1807, 1809, 1810, 1811, 1812, 1813, 1814, 1816, 1817, 1818, 1819, 1820, 1821, 1823, 1824, 1825,
-	1826, 1827, 1828, 1829, 1831, 1832, 1833, 1834, 1835, 1836, 1838, 1839, 1840, 1841, 1842, 1843,
-	1845, 1846, 1847, 1848, 1849, 1850, 1851, 1853, 1854, 1855, 1856, 1857, 1858, 1859, 1861, 1862,
-	1863, 1864, 1865, 1866, 1867, 1868, 1870, 1871, 1872, 1873, 1874, 1875, 1876, 1878, 1879, 1880,
-	1881, 1882, 1883, 1884, 1885, 1887, 1888, 1889, 1890, 1891, 1892, 1893, 1894, 1896, 1897, 1898,
-	1899, 1900, 1901, 1902, 1903, 1904, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914, 1916,
-	1917, 1918, 1919, 1920, 1921, 1922, 1923, 1924, 1925, 1927, 1928, 1929, 1930, 1931, 1932, 1933,
-	1934, 1935, 1936, 1938, 1939, 1940, 1941, 1942, 1943, 1944, 1945, 1946, 1947, 1948, 1950, 1951,
-	1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961, 1963, 1964, 1965, 1966, 1967, 1968,
-	1969, 1970, 1971, 1972, 1973, 1974, 1975, 1977, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985,
-	1986, 1987, 1988, 1989, 1990, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-	2003, 2004, 2005, 2006, 2007, 2008, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019,
-	2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2031, 2032, 2033, 2034, 2035, 2036,
-	2037, 2038, 2039, 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 2050, 2051, 2052,
-	2053, 2054, 2055, 2056, 2057, 2058, 2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069,
-	2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2081, 2082, 2083, 2084, 2085,
-	2086, 2087, 2088, 2089, 2090, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099, 2100, 2101,
-	2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 2112, 2113, 2114, 2115, 2116, 2117,
-	2118, 2119, 2120, 2121, 2122, 2123, 2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133,
-	2134, 2135, 2136, 2137, 2138, 2139, 2140, 2141, 2142, 2143, 2144, 2145, 2146, 2147, 2148, 2149,
-	2150, 2151, 2152, 2153, 2154, 2155, 2156, 2157, 2158, 2159, 2160, 2161, 2162, 2163, 2164, 2165,
-	2166, 2167, 2168, 2169, 2170, 2171, 2172, 2173, 2173, 2174, 2175, 2176, 2177, 2178, 2179, 2180,
-	2181, 2182, 2183, 2184, 2185, 2186, 2187, 2188, 2189, 2190, 2191, 2192, 2193, 2194, 2195, 2196,
-	2197, 2198, 2199, 2200, 2201, 2202, 2202, 2203, 2204, 2205, 2206, 2207, 2208, 2209, 2210, 2211,
-	2212, 2213, 2214, 2215, 2216, 2217, 2218, 2219, 2220, 2221, 2222, 2223, 2224, 2224, 2225, 2226,
-	2227, 2228, 2229, 2230, 2231, 2232, 2233, 2234, 2235, 2236, 2237, 2238, 2239, 2240, 2241, 2241,
-	2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255, 2256, 2257,
-	2257, 2258, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266, 2267, 2268, 2269, 2270, 2271, 2271,
-	2272, 2273, 2274, 2275, 2276, 2277, 2278, 2279, 2280, 2281, 2282, 2283, 2283, 2284, 2285, 2286,
-	2287, 2288, 2289, 2290, 2291, 2292, 2293, 2294, 2295, 2295, 2296, 2297, 2298, 2299, 2300, 2301,
-	2302, 2303, 2304, 2305, 2306, 2306, 2307, 2308, 2309, 2310, 2311, 2312, 2313, 2314, 2315, 2316,
-	2317, 2317, 2318, 2319, 2320, 2321, 2322, 2323, 2324, 2325, 2326, 2327, 2327, 2328, 2329, 2330,
-	2331, 2332, 2333, 2334, 2335, 2336, 2336, 2337, 2338, 2339, 2340, 2341, 2342, 2343, 2344, 2345,
-	2345, 2346, 2347, 2348, 2349, 2350, 2351, 2352, 2353, 2354, 2354, 2355, 2356, 2357, 2358, 2359,
-	2360, 2361, 2362, 2363, 2363, 2364, 2365, 2366, 2367, 2368, 2369, 2370, 2371, 2371, 2372, 2373,
-	2374, 2375, 2376, 2377, 2378, 2379, 2379, 2380, 2381, 2382, 2383, 2384, 2385, 2386, 2386, 2387,
-	2388, 2389, 2390, 2391, 2392, 2393, 2394, 2394, 2395, 2396, 2397, 2398, 2399, 2400, 2401, 2401,
-	2402, 2403, 2404, 2405, 2406, 2407, 2408, 2408, 2409, 2410, 2411, 2412, 2413, 2414, 2415, 2415,
-	2416, 2417, 2418, 2419, 2420, 2421, 2422, 2422, 2423, 2424, 2425, 2426, 2427, 2428, 2428, 2429,
-	2430, 2431, 2432, 2433, 2434, 2435, 2435, 2436, 2437, 2438, 2439, 2440, 2441, 2441, 2442, 2443,
-	2444, 2445, 2446, 2447, 2447, 2448, 2449, 2450, 2451, 2452, 2453, 2453, 2454, 2455, 2456, 2457,
-	2458, 2459, 2459, 2460, 2461, 2462, 2463, 2464, 2465, 2465, 2466, 2467, 2468, 2469, 2470, 2471,
-	2471, 2472, 2473, 2474, 2475, 2476, 2477, 2477, 2478, 2479, 2480, 2481, 2482, 2482, 2483, 2484,
-	2485, 2486, 2487, 2488, 2488, 2489, 2490, 2491, 2492, 2493, 2493, 2494, 2495, 2496, 2497, 2498,
-	2499, 2499, 2500, 2501, 2502, 2503, 2504, 2504, 2505, 2506, 2507, 2508, 2509, 2509, 2510, 2511,
-	2512, 2513, 2514, 2514, 2515, 2516, 2517, 2518, 2519, 2519, 2520, 2521, 2522, 2523, 2524, 2524,
-	2525, 2526, 2527, 2528, 2529, 2529, 2530, 2531, 2532, 2533, 2534, 2534, 2535, 2536, 2537, 2538,
-	2539, 2539, 2540, 2541, 2542, 2543, 2544, 2544, 2545, 2546, 2547, 2548, 2548, 2549, 2550, 2551,
-	2552, 2553, 2553, 2554, 2555, 2556, 2557, 2558, 2558, 2559, 2560, 2561, 2562, 2562, 2563, 2564,
-	2565, 2566, 2567, 2567, 2568, 2569, 2570, 2571, 2571, 2572, 2573, 2574, 2575, 2576, 2576, 2577,
-	2578, 2579, 2580, 2580, 2581, 2582, 2583, 2584, 2584, 2585, 2586, 2587, 2588, 2589, 2589, 2590,
-	2591, 2592, 2593, 2593, 2594, 2595, 2596, 2597, 2597, 2598, 2599, 2600, 2601, 2601, 2602, 2603,
-	2604, 2605, 2605, 2606, 2607, 2608, 2609, 2610, 2610, 2611, 2612, 2613, 2614, 2614, 2615, 2616,
-	2617, 2618, 2618, 2619, 2620, 2621, 2622, 2622, 2623, 2624, 2625, 2626, 2626, 2627, 2628, 2629,
-	2630, 2630, 2631, 2632, 2633, 2634, 2634, 2635, 2636, 2637, 2637, 2638, 2639, 2640, 2641, 2641,
-	2642, 2643, 2644, 2645, 2645, 2646, 2647, 2648, 2649, 2649, 2650, 2651, 2652, 2653, 2653, 2654,
-	2655, 2656, 2656, 2657, 2658, 2659, 2660, 2660, 2661, 2662, 2663, 2664, 2664, 2665, 2666, 2667,
-	2668, 2668, 2669, 2670, 2671, 2671, 2672, 2673, 2674, 2675, 2675, 2676, 2677, 2678, 2678, 2679,
-	2680, 2681, 2682, 2682, 2683, 2684, 2685, 2686, 2686, 2687, 2688, 2689, 2689, 2690, 2691, 2692,
-	2693, 2693, 2694, 2695, 2696, 2696, 2697, 2698, 2699, 2700, 2700, 2701, 2702, 2703, 2703, 2704,
-	2705, 2706, 2706, 2707, 2708, 2709, 2710, 2710, 2711, 2712, 2713, 2713, 2714, 2715, 2716, 2717,
-	2717, 2718, 2719, 2720, 2720, 2721, 2722, 2723, 2723, 2724, 2725, 2726, 2727, 2727, 2728, 2729,
-	2730, 2730, 2731, 2732, 2733, 2733, 2734, 2735, 2736, 2736, 2737, 2738, 2739, 2740, 2740, 2741,
-	2742, 2743, 2743, 2744, 2745, 2746, 2746, 2747, 2748, 2749, 2749, 2750, 2751, 2752, 2752, 2753,
-	2754, 2755, 2755, 2756, 2757, 2758, 2759, 2759, 2760, 2761, 2762, 2762, 2763, 2764, 2765, 2765,
-	2766, 2767, 2768, 2768, 2769, 2770, 2771, 2771, 2772, 2773, 2774, 2774, 2775, 2776, 2777, 2777,
-	2778, 2779, 2780, 2780, 2781, 2782, 2783, 2783, 2784, 2785, 2786, 2786, 2787, 2788, 2789, 2789,
-	2790, 2791, 2792, 2792, 2793, 2794, 2795, 2795, 2796, 2797, 2798, 2798, 2799, 2800, 2801, 2801,
-	2802, 2803, 2804, 2804, 2805, 2806, 2807, 2807, 2808, 2809, 2810, 2810, 2811, 2812, 2813, 2813,
-	2814, 2815, 2815, 2816, 2817, 2818, 2818, 2819, 2820, 2821, 2821, 2822, 2823, 2824, 2824, 2825,
-	2826, 2827, 2827, 2828, 2829, 2830, 2830, 2831, 2832, 2832, 2833, 2834, 2835, 2835, 2836, 2837,
-	2838, 2838, 2839, 2840, 2841, 2841, 2842, 2843, 2844, 2844, 2845, 2846, 2846, 2847, 2848, 2849,
-	2849, 2850, 2851, 2852, 2852, 2853, 2854, 2855, 2855, 2856, 2857, 2857, 2858, 2859, 2860, 2860,
-	2861, 2862, 2863, 2863, 2864, 2865, 2865, 2866, 2867, 2868, 2868, 2869, 2870, 2871, 2871, 2872,
-	2873, 2873, 2874, 2875, 2876, 2876, 2877, 2878, 2879, 2879, 2880, 2881, 2881, 2882, 2883, 2884,
-	2884, 2885, 2886, 2886, 2887, 2888, 2889, 2889, 2890, 2891, 2892, 2892, 2893, 2894, 2894, 2895,
-	2896, 2897, 2897, 2898, 2899, 2899, 2900, 2901, 2902, 2902, 2903, 2904, 2904, 2905, 2906, 2907,
-	2907, 2908, 2909, 2909, 2910, 2911, 2912, 2912, 2913, 2914, 2914, 2915, 2916, 2917, 2917, 2918,
-	2919, 2919, 2920, 2921, 2922, 2922, 2923, 2924, 2924, 2925, 2926, 2927, 2927, 2928, 2929, 2929,
-	2930, 2931, 2932, 2932, 2933, 2934, 2934, 2935, 2936, 2937, 2937, 2938, 2939, 2939, 2940, 2941,
-	2941, 2942, 2943, 2944, 2944, 2945, 2946, 2946, 2947, 2948, 2949, 2949, 2950, 2951, 2951, 2952,
-	2953, 2953, 2954, 2955, 2956, 2956, 2957, 2958, 2958, 2959, 2960, 2961, 2961, 2962, 2963, 2963,
-	2964, 2965, 2965, 2966, 2967, 2968, 2968, 2969, 2970, 2970, 2971, 2972, 2972, 2973, 2974, 2975,
-	2975, 2976, 2977, 2977, 2978, 2979, 2979, 2980, 2981, 2982, 2982, 2983, 2984, 2984, 2985, 2986,
-	2986, 2987, 2988, 2988, 2989, 2990, 2991, 2991, 2992, 2993, 2993, 2994, 2995, 2995, 2996, 2997,
-	2998, 2998, 2999, 3000, 3000, 3001, 3002, 3002, 3003, 3004, 3004, 3005, 3006, 3006, 3007, 3008,
-	3009, 3009, 3010, 3011, 3011, 3012, 3013, 3013, 3014, 3015, 3015, 3016, 3017, 3018, 3018, 3019,
-	3020, 3020, 3021, 3022, 3022, 3023, 3024, 3024, 3025, 3026, 3026, 3027, 3028, 3029, 3029, 3030,
-	3031, 3031, 3032, 3033, 3033, 3034, 3035, 3035, 3036, 3037, 3037, 3038, 3039, 3039, 3040, 3041,
-	3042, 3042, 3043, 3044, 3044, 3045, 3046, 3046, 3047, 3048, 3048, 3049, 3050, 3050, 3051, 3052,
-	3052, 3053, 3054, 3054, 3055, 3056, 3056, 3057, 3058, 3059, 3059, 3060, 3061, 3061, 3062, 3063,
-	3063, 3064, 3065, 3065, 3066, 3067, 3067, 3068, 3069, 3069, 3070, 3071, 3071, 3072, 3073, 3073,
-	3074, 3075, 3075, 3076, 3077, 3077, 3078, 3079, 3079, 3080, 3081, 3081, 3082, 3083, 3084, 3084,
-	3085, 3086, 3086, 3087, 3088, 3088, 3089, 3090, 3090, 3091, 3092, 3092, 3093, 3094, 3094, 3095,
-	3096, 3096, 3097, 3098, 3098, 3099, 3100, 3100, 3101, 3102, 3102, 3103, 3104, 3104, 3105, 3106,
-	3106, 3107, 3108, 3108, 3109, 3110, 3110, 3111, 3112, 3112, 3113, 3114, 3114, 3115, 3116, 3116,
-	3117, 3118, 3118, 3119, 3120, 3120, 3121, 3122, 3122, 3123, 3124, 3124, 3125, 3126, 3126, 3127,
-	3128, 3128, 3129, 3130, 3130, 3131, 3132, 3132, 3133, 3134, 3134, 3135, 3135, 3136, 3137, 3137,
-	3138, 3139, 3139, 3140, 3141, 3141, 3142, 3143, 3143, 3144, 3145, 3145, 3146, 3147, 3147, 3148,
-	3149, 3149, 3150, 3151, 3151, 3152, 3153, 3153, 3154, 3155, 3155, 3156, 3157, 3157, 3158, 3159,
-	3159, 3160, 3160, 3161, 3162, 3162, 3163, 3164, 3164, 3165, 3166, 3166, 3167, 3168, 3168, 3169,
-	3170, 3170, 3171, 3172, 3172, 3173, 3174, 3174, 3175, 3175, 3176, 3177, 3177, 3178, 3179, 3179,
-	3180, 3181, 3181, 3182, 3183, 3183, 3184, 3185, 3185, 3186, 3187, 3187, 3188, 3188, 3189, 3190,
-	3190, 3191, 3192, 3192, 3193, 3194, 3194, 3195, 3196, 3196, 3197, 3198, 3198, 3199, 3199, 3200,
-	3201, 3201, 3202, 3203, 3203, 3204, 3205, 3205, 3206, 3207, 3207, 3208, 3209, 3209, 3210, 3210,
-	3211, 3212, 3212, 3213, 3214, 3214, 3215, 3216, 3216, 3217, 3218, 3218, 3219, 3219, 3220, 3221,
-	3221, 3222, 3223, 3223, 3224, 3225, 3225, 3226, 3227, 3227, 3228, 3228, 3229, 3230, 3230, 3231,
-	3232, 3232, 3233, 3234, 3234, 3235, 3235, 3236, 3237, 3237, 3238, 3239, 3239, 3240, 3241, 3241,
-	3242, 3242, 3243, 3244, 3244, 3245, 3246, 3246, 3247, 3248, 3248, 3249, 3249, 3250, 3251, 3251,
-	3252, 3253, 3253, 3254, 3255, 3255, 3256, 3256, 3257, 3258, 3258, 3259, 3260, 3260, 3261, 3262,
-	3262, 3263, 3263, 3264, 3265, 3265, 3266, 3267, 3267, 3268, 3268, 3269, 3270, 3270, 3271, 3272,
-	3272, 3273, 3274, 3274, 3275, 3275, 3276, 3277, 3277, 3278, 3279, 3279, 3280, 3280, 3281, 3282,
-	3282, 3283, 3284, 3284, 3285, 3285, 3286, 3287, 3287, 3288, 3289, 3289, 3290, 3290, 3291, 3292,
-	3292, 3293, 3294, 3294, 3295, 3295, 3296, 3297, 3297, 3298, 3299, 3299, 3300, 3300, 3301, 3302,
-	3302, 3303, 3304, 3304, 3305, 3305, 3306, 3307, 3307, 3308, 3309, 3309, 3310, 3310, 3311, 3312,
-	3312, 3313, 3314, 3314, 3315, 3315, 3316, 3317, 3317, 3318, 3319, 3319, 3320, 3320, 3321, 3322,
-	3322, 3323, 3323, 3324, 3325, 3325, 3326, 3327, 3327, 3328, 3328, 3329, 3330, 3330, 3331, 3332,
-	3332, 3333, 3333, 3334, 3335, 3335, 3336, 3336, 3337, 3338, 3338, 3339, 3340, 3340, 3341, 3341,
-	3342, 3343, 3343, 3344, 3345, 3345, 3346, 3346, 3347, 3348, 3348, 3349, 3349, 3350, 3351, 3351,
-	3352, 3352, 3353, 3354, 3354, 3355, 3356, 3356, 3357, 3357, 3358, 3359, 3359, 3360, 3360, 3361,
-	3362, 3362, 3363, 3364, 3364, 3365, 3365, 3366, 3367, 3367, 3368, 3368, 3369, 3370, 3370, 3371,
-	3371, 3372, 3373, 3373, 3374, 3375, 3375, 3376, 3376, 3377, 3378, 3378, 3379, 3379, 3380, 3381,
-	3381, 3382, 3382, 3383, 3384, 3384, 3385, 3385, 3386, 3387, 3387, 3388, 3389, 3389, 3390, 3390,
-	3391, 3392, 3392, 3393, 3393, 3394, 3395, 3395, 3396, 3396, 3397, 3398, 3398, 3399, 3399, 3400,
-	3401, 3401, 3402, 3402, 3403, 3404, 3404, 3405, 3405, 3406, 3407, 3407, 3408, 3408, 3409, 3410,
-	3410, 3411, 3411, 3412, 3413, 3413, 3414, 3414, 3415, 3416, 3416, 3417, 3418, 3418, 3419, 3419,
-	3420, 3421, 3421, 3422, 3422, 3423, 3424, 3424, 3425, 3425, 3426, 3427, 3427, 3428, 3428, 3429,
-	3430, 3430, 3431, 3431, 3432, 3433, 3433, 3434, 3434, 3435, 3435, 3436, 3437, 3437, 3438, 3438,
-	3439, 3440, 3440, 3441, 3441, 3442, 3443, 3443, 3444, 3444, 3445, 3446, 3446, 3447, 3447, 3448,
-	3449, 3449, 3450, 3450, 3451, 3452, 3452, 3453, 3453, 3454, 3455, 3455, 3456, 3456, 3457, 3458,
-	3458, 3459, 3459, 3460, 3461, 3461, 3462, 3462, 3463, 3463, 3464, 3465, 3465, 3466, 3466, 3467,
-	3468, 3468, 3469, 3469, 3470, 3471, 3471, 3472, 3472, 3473, 3474, 3474, 3475, 3475, 3476, 3476,
-	3477, 3478, 3478, 3479, 3479, 3480, 3481, 3481, 3482, 3482, 3483, 3484, 3484, 3485, 3485, 3486,
-	3486, 3487, 3488, 3488, 3489, 3489, 3490, 3491, 3491, 3492, 3492, 3493, 3494, 3494, 3495, 3495,
-	3496, 3496, 3497, 3498, 3498, 3499, 3499, 3500, 3501, 3501, 3502, 3502, 3503, 3504, 3504, 3505,
-	3505, 3506, 3506, 3507, 3508, 3508, 3509, 3509, 3510, 3511, 3511, 3512, 3512, 3513, 3513, 3514,
-	3515, 3515, 3516, 3516, 3517, 3518, 3518, 3519, 3519, 3520, 3520, 3521, 3522, 3522, 3523, 3523,
-	3524, 3525, 3525, 3526, 3526, 3527, 3527, 3528, 3529, 3529, 3530, 3530, 3531, 3531, 3532, 3533,
-	3533, 3534, 3534, 3535, 3536, 3536, 3537, 3537, 3538, 3538, 3539, 3540, 3540, 3541, 3541, 3542,
-	3542, 3543, 3544, 3544, 3545, 3545, 3546, 3547, 3547, 3548, 3548, 3549, 3549, 3550, 3551, 3551,
-	3552, 3552, 3553, 3553, 3554, 3555, 3555, 3556, 3556, 3557, 3557, 3558, 3559, 3559, 3560, 3560,
-	3561, 3561, 3562, 3563, 3563, 3564, 3564, 3565, 3566, 3566, 3567, 3567, 3568, 3568, 3569, 3570,
-	3570, 3571, 3571, 3572, 3572, 3573, 3574, 3574, 3575, 3575, 3576, 3576, 3577, 3578, 3578, 3579,
-	3579, 3580, 3580, 3581, 3582, 3582, 3583, 3583, 3584, 3584, 3585, 3586, 3586, 3587, 3587, 3588,
-	3588, 3589, 3590, 3590, 3591, 3591, 3592, 3592, 3593, 3594, 3594, 3595, 3595, 3596, 3596, 3597,
-	3597, 3598, 3599, 3599, 3600, 3600, 3601, 3601, 3602, 3603, 3603, 3604, 3604, 3605, 3605, 3606,
-	3607, 3607, 3608, 3608, 3609, 3609, 3610, 3611, 3611, 3612, 3612, 3613, 3613, 3614, 3615, 3615,
-	3616, 3616, 3617, 3617, 3618, 3618, 3619, 3620, 3620, 3621, 3621, 3622, 3622, 3623, 3624, 3624,
-	3625, 3625, 3626, 3626, 3627, 3627, 3628, 3629, 3629, 3630, 3630, 3631, 3631, 3632, 3633, 3633,
-	3634, 3634, 3635, 3635, 3636, 3636, 3637, 3638, 3638, 3639, 3639, 3640, 3640, 3641, 3642, 3642,
-	3643, 3643, 3644, 3644, 3645, 3645, 3646, 3647, 3647, 3648, 3648, 3649, 3649, 3650, 3650, 3651,
-	3652, 3652, 3653, 3653, 3654, 3654, 3655, 3656, 3656, 3657, 3657, 3658, 3658, 3659, 3659, 3660,
-	3661, 3661, 3662, 3662, 3663, 3663, 3664, 3664, 3665, 3666, 3666, 3667, 3667, 3668, 3668, 3669,
-	3669, 3670, 3671, 3671, 3672, 3672, 3673, 3673, 3674, 3674, 3675, 3676, 3676, 3677, 3677, 3678,
-	3678, 3679, 3679, 3680, 3681, 3681, 3682, 3682, 3683, 3683, 3684, 3684, 3685, 3686, 3686, 3687,
-	3687, 3688, 3688, 3689, 3689, 3690, 3691, 3691, 3692, 3692, 3693, 3693, 3694, 3694, 3695, 3695,
-	3696, 3697, 3697, 3698, 3698, 3699, 3699, 3700, 3700, 3701, 3702, 3702, 3703, 3703, 3704, 3704,
-	3705, 3705, 3706, 3707, 3707, 3708, 3708, 3709, 3709, 3710, 3710, 3711, 3711, 3712, 3713, 3713,
-	3714, 3714, 3715, 3715, 3716, 3716, 3717, 3717, 3718, 3719, 3719, 3720, 3720, 3721, 3721, 3722,
-	3722, 3723, 3724, 3724, 3725, 3725, 3726, 3726, 3727, 3727, 3728, 3728, 3729, 3730, 3730, 3731,
-	3731, 3732, 3732, 3733, 3733, 3734, 3734, 3735, 3736, 3736, 3737, 3737, 3738, 3738, 3739, 3739,
-	3740, 3740, 3741, 3742, 3742, 3743, 3743, 3744, 3744, 3745, 3745, 3746, 3746, 3747, 3748, 3748,
-	3749, 3749, 3750, 3750, 3751, 3751, 3752, 3752, 3753, 3753, 3754, 3755, 3755, 3756, 3756, 3757,
-	3757, 3758, 3758, 3759, 3759, 3760, 3761, 3761, 3762, 3762, 3763, 3763, 3764, 3764, 3765, 3765,
-	3766, 3766, 3767, 3768, 3768, 3769, 3769, 3770, 3770, 3771, 3771, 3772, 3772, 3773, 3773, 3774,
-	3775, 3775, 3776, 3776, 3777, 3777, 3778, 3778, 3779, 3779, 3780, 3781, 3781, 3782, 3782, 3783,
-	3783, 3784, 3784, 3785, 3785, 3786, 3786, 3787, 3787, 3788, 3789, 3789, 3790, 3790, 3791, 3791,
-	3792, 3792, 3793, 3793, 3794, 3794, 3795, 3796, 3796, 3797, 3797, 3798, 3798, 3799, 3799, 3800,
-	3800, 3801, 3801, 3802, 3802, 3803, 3804, 3804, 3805, 3805, 3806, 3806, 3807, 3807, 3808, 3808,
-	3809, 3809, 3810, 3811, 3811, 3812, 3812, 3813, 3813, 3814, 3814, 3815, 3815, 3816, 3816, 3817,
-	3817, 3818, 3819, 3819, 3820, 3820, 3821, 3821, 3822, 3822, 3823, 3823, 3824, 3824, 3825, 3825,
-	3826, 3826, 3827, 3828, 3828, 3829, 3829, 3830, 3830, 3831, 3831, 3832, 3832, 3833, 3833, 3834,
-	3834, 3835, 3835, 3836, 3837, 3837, 3838, 3838, 3839, 3839, 3840, 3840, 3841, 3841, 3842, 3842,
-	3843, 3843, 3844, 3844, 3845, 3846, 3846, 3847, 3847, 3848, 3848, 3849, 3849, 3850, 3850, 3851,
-	3851, 3852, 3852, 3853, 3853, 3854, 3855, 3855, 3856, 3856, 3857, 3857, 3858, 3858, 3859, 3859,
-	3860, 3860, 3861, 3861, 3862, 3862, 3863, 3863, 3864, 3864, 3865, 3866, 3866, 3867, 3867, 3868,
-	3868, 3869, 3869, 3870, 3870, 3871, 3871, 3872, 3872, 3873, 3873, 3874, 3874, 3875, 3876, 3876,
-	3877, 3877, 3878, 3878, 3879, 3879, 3880, 3880, 3881, 3881, 3882, 3882, 3883, 3883, 3884, 3884,
-	3885, 3885, 3886, 3886, 3887, 3888, 3888, 3889, 3889, 3890, 3890, 3891, 3891, 3892, 3892, 3893,
-	3893, 3894, 3894, 3895, 3895, 3896, 3896, 3897, 3897, 3898, 3898, 3899, 3900, 3900, 3901, 3901,
-	3902, 3902, 3903, 3903, 3904, 3904, 3905, 3905, 3906, 3906, 3907, 3907, 3908, 3908, 3909, 3909,
-	3910, 3910, 3911, 3911, 3912, 3912, 3913, 3914, 3914, 3915, 3915, 3916, 3916, 3917, 3917, 3918,
-	3918, 3919, 3919, 3920, 3920, 3921, 3921, 3922, 3922, 3923, 3923, 3924, 3924, 3925, 3925, 3926,
-	3926, 3927, 3927, 3928, 3929, 3929, 3930, 3930, 3931, 3931, 3932, 3932, 3933, 3933, 3934, 3934,
-	3935, 3935, 3936, 3936, 3937, 3937, 3938, 3938, 3939, 3939, 3940, 3940, 3941, 3941, 3942, 3942,
-	3943, 3943, 3944, 3944, 3945, 3945, 3946, 3947, 3947, 3948, 3948, 3949, 3949, 3950, 3950, 3951,
-	3951, 3952, 3952, 3953, 3953, 3954, 3954, 3955, 3955, 3956, 3956, 3957, 3957, 3958, 3958, 3959,
-	3959, 3960, 3960, 3961, 3961, 3962, 3962, 3963, 3963, 3964, 3964, 3965, 3965, 3966, 3966, 3967,
-	3967, 3968, 3969, 3969, 3970, 3970, 3971, 3971, 3972, 3972, 3973, 3973, 3974, 3974, 3975, 3975,
-	3976, 3976, 3977, 3977, 3978, 3978, 3979, 3979, 3980, 3980, 3981, 3981, 3982, 3982, 3983, 3983,
-	3984, 3984, 3985, 3985, 3986, 3986, 3987, 3987, 3988, 3988, 3989, 3989, 3990, 3990, 3991, 3991,
-	3992, 3992, 3993, 3993, 3994, 3994, 3995, 3995, 3996, 3996, 3997, 3997, 3998, 3998, 3999, 3999,
-	4000, 4001, 4001, 4002, 4002, 4003, 4003, 4004, 4004, 4005, 4005, 4006, 4006, 4007, 4007, 4008,
-	4008, 4009, 4009, 4010, 4010, 4011, 4011, 4012, 4012, 4013, 4013, 4014, 4014, 4015, 4015, 4016,
-	4016, 4017, 4017, 4018, 4018, 4019, 4019, 4020, 4020, 4021, 4021, 4022, 4022, 4023, 4023, 4024,
-	4024, 4025, 4025, 4026, 4026, 4027, 4027, 4028, 4028, 4029, 4029, 4030, 4030, 4031, 4031, 4032,
-	4032, 4033, 4033, 4034, 4034, 4035, 4035, 4036, 4036, 4037, 4037, 4038, 4038, 4039, 4039, 4040,
-	4040, 4041, 4041, 4042, 4042, 4043, 4043, 4044, 4044, 4045, 4045, 4046, 4046, 4047, 4047, 4048,
-	4048, 4049, 4049, 4050, 4050, 4051, 4051, 4052, 4052, 4053, 4053, 4054, 4054, 4055, 4055, 4056,
-	4056, 4057, 4057, 4058, 4058, 4059, 4059, 4060, 4060, 4061, 4061, 4062, 4062, 4063, 4063, 4064,
-	4064, 4065, 4065, 4066, 4066, 4067, 4067, 4068, 4068, 4069, 4069, 4070, 4070, 4071, 4071, 4072,
-	4072, 4073, 4073, 4074, 4074, 4075, 4075, 4076, 4076, 4077, 4077, 4078, 4078, 4079, 4079, 4080,
-	4080,
-};
-
-/* Generated table */
-const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFER_FUNC_SMPTE2084 + 1][TPG_COLOR_CSC_BLACK + 1] = {
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][1] = { 2953, 2963, 586 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][2] = { 0, 2967, 2937 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][3] = { 88, 2990, 575 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][4] = { 3016, 259, 2933 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][5] = { 3030, 405, 558 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][6] = { 478, 428, 2931 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][1] = { 3068, 3077, 838 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][2] = { 0, 3081, 3053 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][3] = { 241, 3102, 828 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][4] = { 3126, 504, 3050 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][5] = { 3138, 657, 810 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][6] = { 731, 680, 3048 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][7] = { 800, 799, 800 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][1] = { 3046, 3054, 886 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][2] = { 0, 3058, 3031 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][3] = { 360, 3079, 877 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][4] = { 3103, 587, 3027 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][5] = { 3116, 723, 861 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][6] = { 789, 744, 3025 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][1] = { 2941, 2950, 546 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][2] = { 0, 2954, 2924 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][3] = { 78, 2978, 536 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][4] = { 3004, 230, 2920 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][5] = { 3018, 363, 518 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][6] = { 437, 387, 2918 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][1] = { 2145, 2159, 142 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][2] = { 0, 2164, 2122 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][3] = { 19, 2198, 138 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][4] = { 2236, 57, 2116 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][5] = { 2256, 90, 133 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][6] = { 110, 96, 2113 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][1] = { 3186, 3194, 1121 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][2] = { 0, 3197, 3173 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][3] = { 523, 3216, 1112 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][4] = { 3237, 792, 3169 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][5] = { 3248, 944, 1094 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][6] = { 1017, 967, 3168 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][1] = { 3802, 3805, 2602 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][2] = { 0, 3806, 3797 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][3] = { 1780, 3812, 2592 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][4] = { 3820, 2215, 3796 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][5] = { 3824, 2409, 2574 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][6] = { 2491, 2435, 3795 },
-	[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][1] = { 2953, 2963, 586 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][2] = { 0, 2967, 2937 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][3] = { 88, 2990, 575 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][4] = { 3016, 259, 2933 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][5] = { 3030, 405, 558 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][6] = { 478, 428, 2931 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][1] = { 3068, 3077, 838 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][2] = { 0, 3081, 3053 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][3] = { 241, 3102, 828 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][4] = { 3126, 504, 3050 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][5] = { 3138, 657, 810 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][6] = { 731, 680, 3048 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][7] = { 800, 799, 800 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][1] = { 3046, 3054, 886 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][2] = { 0, 3058, 3031 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][3] = { 360, 3079, 877 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][4] = { 3103, 587, 3027 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][5] = { 3116, 723, 861 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][6] = { 789, 744, 3025 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][1] = { 2941, 2950, 546 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][2] = { 0, 2954, 2924 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][3] = { 78, 2978, 536 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][4] = { 3004, 230, 2920 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][5] = { 3018, 363, 518 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][6] = { 437, 387, 2918 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][1] = { 2145, 2159, 142 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][2] = { 0, 2164, 2122 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][3] = { 19, 2198, 138 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][4] = { 2236, 57, 2116 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][5] = { 2256, 90, 133 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][6] = { 110, 96, 2113 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][1] = { 3186, 3194, 1121 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][2] = { 0, 3197, 3173 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][3] = { 523, 3216, 1112 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][4] = { 3237, 792, 3169 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][5] = { 3248, 944, 1094 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][6] = { 1017, 967, 3168 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][1] = { 3802, 3805, 2602 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][2] = { 0, 3806, 3797 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][3] = { 1780, 3812, 2592 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][4] = { 3820, 2215, 3796 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][5] = { 3824, 2409, 2574 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][6] = { 2491, 2435, 3795 },
-	[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 547 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][2] = { 547, 2939, 2939 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][3] = { 547, 2939, 547 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][4] = { 2939, 547, 2939 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][5] = { 2939, 547, 547 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][6] = { 547, 547, 2939 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][1] = { 3056, 3056, 800 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][2] = { 800, 3056, 3056 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][3] = { 800, 3056, 800 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][4] = { 3056, 800, 3056 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][5] = { 3056, 800, 800 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3056 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][1] = { 3033, 3033, 851 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][2] = { 851, 3033, 3033 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][3] = { 851, 3033, 851 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][4] = { 3033, 851, 3033 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][5] = { 3033, 851, 851 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][6] = { 851, 851, 3033 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 507 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][2] = { 507, 2926, 2926 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][3] = { 507, 2926, 507 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][4] = { 2926, 507, 2926 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][5] = { 2926, 507, 507 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][6] = { 507, 507, 2926 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][1] = { 2125, 2125, 130 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][2] = { 130, 2125, 2125 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][3] = { 130, 2125, 130 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][4] = { 2125, 130, 2125 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][5] = { 2125, 130, 130 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][6] = { 130, 130, 2125 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][1] = { 3175, 3175, 1084 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][2] = { 1084, 3175, 3175 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][3] = { 1084, 3175, 1084 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][4] = { 3175, 1084, 3175 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][5] = { 3175, 1084, 1084 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3175 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][1] = { 3798, 3798, 2563 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][2] = { 2563, 3798, 3798 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][3] = { 2563, 3798, 2563 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][4] = { 3798, 2563, 3798 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][5] = { 3798, 2563, 2563 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][6] = { 2563, 2563, 3798 },
-	[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][1] = { 2892, 3034, 910 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][2] = { 1715, 2916, 2914 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][3] = { 1631, 3012, 828 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][4] = { 2497, 119, 2867 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][5] = { 2440, 649, 657 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][6] = { 740, 0, 2841 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3055, 3056 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][1] = { 3013, 3142, 1157 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][2] = { 1926, 3034, 3032 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][3] = { 1847, 3121, 1076 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][4] = { 2651, 304, 2990 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][5] = { 2599, 901, 909 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][6] = { 991, 0, 2966 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][7] = { 800, 799, 800 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][1] = { 2989, 3120, 1180 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][2] = { 1913, 3011, 3009 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][3] = { 1836, 3099, 1105 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][4] = { 2627, 413, 2966 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][5] = { 2576, 943, 951 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][6] = { 1026, 0, 2942 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][1] = { 2879, 3022, 874 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][2] = { 1688, 2903, 2901 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][3] = { 1603, 2999, 791 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][4] = { 2479, 106, 2853 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][5] = { 2422, 610, 618 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][6] = { 702, 0, 2827 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][1] = { 2059, 2262, 266 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][2] = { 771, 2092, 2089 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][3] = { 705, 2229, 231 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][4] = { 1550, 26, 2024 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][5] = { 1484, 163, 165 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][6] = { 196, 0, 1988 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][1] = { 3136, 3251, 1429 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][2] = { 2150, 3156, 3154 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][3] = { 2077, 3233, 1352 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][4] = { 2812, 589, 3116 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][5] = { 2765, 1182, 1190 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][6] = { 1270, 0, 3094 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][1] = { 3784, 3825, 2879 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][2] = { 3351, 3791, 3790 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][3] = { 3311, 3819, 2815 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][4] = { 3659, 1900, 3777 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][5] = { 3640, 2662, 2669 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][6] = { 2743, 0, 3769 },
-	[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 464 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][2] = { 786, 2939, 2939 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][3] = { 786, 2939, 464 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][4] = { 2879, 547, 2956 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][5] = { 2879, 547, 547 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][6] = { 547, 547, 2956 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][1] = { 3056, 3056, 717 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][2] = { 1036, 3056, 3056 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][3] = { 1036, 3056, 717 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][4] = { 3001, 800, 3071 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][5] = { 3001, 800, 799 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3071 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 799 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][1] = { 3033, 3033, 776 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][2] = { 1068, 3033, 3033 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][3] = { 1068, 3033, 776 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][4] = { 2977, 851, 3048 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][5] = { 2977, 851, 851 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][6] = { 851, 851, 3048 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 423 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][2] = { 749, 2926, 2926 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][3] = { 749, 2926, 423 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][4] = { 2865, 507, 2943 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][5] = { 2865, 507, 507 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][6] = { 507, 507, 2943 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][1] = { 2125, 2125, 106 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][2] = { 214, 2125, 2125 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][3] = { 214, 2125, 106 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][4] = { 2041, 130, 2149 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][5] = { 2041, 130, 130 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][6] = { 130, 130, 2149 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][1] = { 3175, 3175, 1003 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][2] = { 1313, 3175, 3175 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][3] = { 1313, 3175, 1003 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][4] = { 3126, 1084, 3188 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][5] = { 3126, 1084, 1084 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3188 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][1] = { 3798, 3798, 2476 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][2] = { 2782, 3798, 3798 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][3] = { 2782, 3798, 2476 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][4] = { 3780, 2563, 3803 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][5] = { 3780, 2563, 2563 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][6] = { 2563, 2563, 3803 },
-	[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 547 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][2] = { 547, 2939, 2939 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][3] = { 547, 2939, 547 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][4] = { 2939, 547, 2939 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][5] = { 2939, 547, 547 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][6] = { 547, 547, 2939 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][1] = { 3056, 3056, 800 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][2] = { 800, 3056, 3056 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][3] = { 800, 3056, 800 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][4] = { 3056, 800, 3056 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][5] = { 3056, 800, 800 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3056 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][1] = { 3033, 3033, 851 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][2] = { 851, 3033, 3033 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][3] = { 851, 3033, 851 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][4] = { 3033, 851, 3033 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][5] = { 3033, 851, 851 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][6] = { 851, 851, 3033 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 507 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][2] = { 507, 2926, 2926 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][3] = { 507, 2926, 507 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][4] = { 2926, 507, 2926 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][5] = { 2926, 507, 507 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][6] = { 507, 507, 2926 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][1] = { 2125, 2125, 130 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][2] = { 130, 2125, 2125 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][3] = { 130, 2125, 130 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][4] = { 2125, 130, 2125 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][5] = { 2125, 130, 130 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][6] = { 130, 130, 2125 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][1] = { 3175, 3175, 1084 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][2] = { 1084, 3175, 3175 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][3] = { 1084, 3175, 1084 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][4] = { 3175, 1084, 3175 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][5] = { 3175, 1084, 1084 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3175 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][1] = { 3798, 3798, 2563 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][2] = { 2563, 3798, 3798 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][3] = { 2563, 3798, 2563 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][4] = { 3798, 2563, 3798 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][5] = { 3798, 2563, 2563 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][6] = { 2563, 2563, 3798 },
-	[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 781 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][2] = { 1622, 2939, 2939 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][3] = { 1622, 2939, 781 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][4] = { 2502, 547, 2881 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][5] = { 2502, 547, 547 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][6] = { 547, 547, 2881 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][1] = { 3056, 3056, 1031 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][2] = { 1838, 3056, 3056 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][3] = { 1838, 3056, 1031 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][4] = { 2657, 800, 3002 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][5] = { 2657, 800, 800 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3002 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][1] = { 3033, 3033, 1063 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][2] = { 1828, 3033, 3033 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][3] = { 1828, 3033, 1063 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][4] = { 2633, 851, 2979 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][5] = { 2633, 851, 851 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][6] = { 851, 851, 2979 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 744 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][2] = { 1594, 2926, 2926 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][3] = { 1594, 2926, 744 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][4] = { 2484, 507, 2867 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][5] = { 2484, 507, 507 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][6] = { 507, 507, 2867 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][1] = { 2125, 2125, 212 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][2] = { 698, 2125, 2125 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][3] = { 698, 2125, 212 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][4] = { 1557, 130, 2043 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][5] = { 1557, 130, 130 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][6] = { 130, 130, 2043 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][1] = { 3175, 3175, 1308 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][2] = { 2069, 3175, 3175 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][3] = { 2069, 3175, 1308 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][4] = { 2816, 1084, 3127 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][5] = { 2816, 1084, 1084 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3127 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][1] = { 3798, 3798, 2778 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][2] = { 3306, 3798, 3798 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][3] = { 3306, 3798, 2778 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][4] = { 3661, 2563, 3781 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][5] = { 3661, 2563, 2563 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][6] = { 2563, 2563, 3781 },
-	[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][1] = { 2877, 2923, 1058 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][2] = { 1837, 2840, 2916 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][3] = { 1734, 2823, 993 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][4] = { 2427, 961, 2812 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][5] = { 2351, 912, 648 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][6] = { 792, 618, 2788 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][1] = { 2999, 3041, 1301 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][2] = { 2040, 2965, 3034 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][3] = { 1944, 2950, 1238 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][4] = { 2587, 1207, 2940 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][5] = { 2517, 1159, 900 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][6] = { 1042, 870, 2917 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][1] = { 2976, 3018, 1315 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][2] = { 2024, 2942, 3011 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][3] = { 1930, 2926, 1256 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][4] = { 2563, 1227, 2916 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][5] = { 2494, 1183, 943 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][6] = { 1073, 916, 2894 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][1] = { 2864, 2910, 1024 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][2] = { 1811, 2826, 2903 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][3] = { 1707, 2809, 958 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][4] = { 2408, 926, 2798 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][5] = { 2331, 876, 609 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][6] = { 755, 579, 2773 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][1] = { 2039, 2102, 338 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][2] = { 873, 1987, 2092 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][3] = { 787, 1965, 305 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][4] = { 1468, 290, 1949 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][5] = { 1382, 268, 162 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][6] = { 216, 152, 1917 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][1] = { 3124, 3161, 1566 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][2] = { 2255, 3094, 3156 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][3] = { 2166, 3080, 1506 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][4] = { 2754, 1477, 3071 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][5] = { 2690, 1431, 1182 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][6] = { 1318, 1153, 3051 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][1] = { 3780, 3793, 2984 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][2] = { 3406, 3768, 3791 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][3] = { 3359, 3763, 2939 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][4] = { 3636, 2916, 3760 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][5] = { 3609, 2880, 2661 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][6] = { 2786, 2633, 3753 },
-	[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][1] = { 2936, 2934, 992 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][2] = { 1159, 2890, 2916 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][3] = { 1150, 2885, 921 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][4] = { 2751, 766, 2837 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][5] = { 2747, 747, 650 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][6] = { 563, 570, 2812 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3055 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][1] = { 3052, 3051, 1237 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][2] = { 1397, 3011, 3034 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][3] = { 1389, 3006, 1168 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][4] = { 2884, 1016, 2962 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][5] = { 2880, 998, 902 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][6] = { 816, 823, 2940 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 799 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][1] = { 3029, 3028, 1255 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][2] = { 1406, 2988, 3011 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][3] = { 1398, 2983, 1190 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][4] = { 2860, 1050, 2939 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][5] = { 2857, 1033, 945 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][6] = { 866, 873, 2916 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][1] = { 2923, 2921, 957 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][2] = { 1125, 2877, 2902 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][3] = { 1116, 2871, 885 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][4] = { 2736, 729, 2823 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][5] = { 2732, 710, 611 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][6] = { 523, 531, 2798 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][1] = { 2120, 2118, 305 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][2] = { 392, 2056, 2092 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][3] = { 387, 2049, 271 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][4] = { 1868, 206, 1983 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][5] = { 1863, 199, 163 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][6] = { 135, 137, 1950 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][1] = { 3172, 3170, 1505 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][2] = { 1657, 3135, 3155 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][3] = { 1649, 3130, 1439 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][4] = { 3021, 1294, 3091 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][5] = { 3018, 1276, 1184 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][6] = { 1100, 1107, 3071 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][1] = { 3797, 3796, 2938 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][2] = { 3049, 3783, 3791 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][3] = { 3044, 3782, 2887 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][4] = { 3741, 2765, 3768 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][5] = { 3740, 2749, 2663 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][6] = { 2580, 2587, 3760 },
-	[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
-};
-
-#else
-
-/* This code generates the table above */
-
-#include <math.h>
-#include <stdio.h>
-#include <stdlib.h>
-
-static const double rec709_to_ntsc1953[3][3] = {
-	/*
-	 * This transform uses the Bradford method to compensate for
-	 * the different whitepoints.
-	 */
-	{ 0.6785011, 0.2883441, 0.0331548 },
-	{ 0.0165284, 1.0518725, -0.0684009 },
-	{ 0.0179230, 0.0506096, 0.9314674 }
-};
-
-static const double rec709_to_ebu[3][3] = {
-	{ 0.9578221, 0.0421779, -0.0000000 },
-	{ -0.0000000, 1.0000000, 0.0000000 },
-	{ -0.0000000, -0.0119367, 1.0119367 }
-};
-
-static const double rec709_to_170m[3][3] = {
-	{ 1.0653640, -0.0553900, -0.0099740 },
-	{ -0.0196361, 1.0363630, -0.0167269 },
-	{ 0.0016327, 0.0044133, 0.9939540 },
-};
-
-static const double rec709_to_240m[3][3] = {
-	{ 1.0653640, -0.0553900, -0.0099740 },
-	{ -0.0196361, 1.0363630, -0.0167269 },
-	{ 0.0016327, 0.0044133, 0.9939540 },
-};
-
-static const double rec709_to_adobergb[3][3] = {
-	{ 0.7151627, 0.2848373, -0.0000000 },
-	{ 0.0000000, 1.0000000, 0.0000000 },
-	{ -0.0000000, 0.0411705, 0.9588295 },
-};
-
-static const double rec709_to_bt2020[3][3] = {
-	{ 0.6274524, 0.3292485, 0.0432991 },
-	{ 0.0691092, 0.9195311, 0.0113597 },
-	{ 0.0163976, 0.0880301, 0.8955723 },
-};
-
-static const double rec709_to_dcip3[3][3] = {
-	/*
-	 * This transform uses the Bradford method to compensate for
-	 * the different whitepoints.
-	 */
-	{ 0.8686648, 0.1288456, 0.0024896 },
-	{ 0.0345479, 0.9618084, 0.0036437 },
-	{ 0.0167785, 0.0710559, 0.9121655 }
-};
-
-static void mult_matrix(double *r, double *g, double *b, const double m[3][3])
-{
-	double ir, ig, ib;
-
-	ir = m[0][0] * (*r) + m[0][1] * (*g) + m[0][2] * (*b);
-	ig = m[1][0] * (*r) + m[1][1] * (*g) + m[1][2] * (*b);
-	ib = m[2][0] * (*r) + m[2][1] * (*g) + m[2][2] * (*b);
-	*r = ir;
-	*g = ig;
-	*b = ib;
-}
-
-static double transfer_srgb_to_rgb(double v)
-{
-	if (v < -0.04045)
-		return pow((-v + 0.055) / 1.055, 2.4);
-	return (v <= 0.04045) ? v / 12.92 : pow((v + 0.055) / 1.055, 2.4);
-}
-
-static double transfer_rgb_to_srgb(double v)
-{
-	if (v <= -0.0031308)
-		return -1.055 * pow(-v, 1.0 / 2.4) + 0.055;
-	if (v <= 0.0031308)
-		return v * 12.92;
-	return 1.055 * pow(v, 1.0 / 2.4) - 0.055;
-}
-
-static double transfer_rgb_to_smpte240m(double v)
-{
-	return (v <= 0.0228) ? v * 4.0 : 1.1115 * pow(v, 0.45) - 0.1115;
-}
-
-static double transfer_rgb_to_rec709(double v)
-{
-	if (v <= -0.018)
-		return -1.099 * pow(-v, 0.45) + 0.099;
-	return (v < 0.018) ? v * 4.5 : 1.099 * pow(v, 0.45) - 0.099;
-}
-
-static double transfer_rec709_to_rgb(double v)
-{
-	return (v < 0.081) ? v / 4.5 : pow((v + 0.099) / 1.099, 1.0 / 0.45);
-}
-
-static double transfer_rgb_to_adobergb(double v)
-{
-	return pow(v, 1.0 / 2.19921875);
-}
-
-static double transfer_rgb_to_dcip3(double v)
-{
-	return pow(v, 1.0 / 2.6);
-}
-
-static double transfer_rgb_to_smpte2084(double v)
-{
-	const double m1 = (2610.0 / 4096.0) / 4.0;
-	const double m2 = 128.0 * 2523.0 / 4096.0;
-	const double c1 = 3424.0 / 4096.0;
-	const double c2 = 32.0 * 2413.0 / 4096.0;
-	const double c3 = 32.0 * 2392.0 / 4096.0;
-
-	v = pow(v, m1);
-	return pow((c1 + c2 * v) / (1 + c3 * v), m2);
-}
-
-static double transfer_srgb_to_rec709(double v)
-{
-	return transfer_rgb_to_rec709(transfer_srgb_to_rgb(v));
-}
-
-static void csc(enum v4l2_colorspace colorspace, enum v4l2_xfer_func xfer_func,
-		double *r, double *g, double *b)
-{
-	int clamp = 1;
-
-	*r = transfer_srgb_to_rgb(*r);
-	*g = transfer_srgb_to_rgb(*g);
-	*b = transfer_srgb_to_rgb(*b);
-
-	/* Convert the primaries of Rec. 709 Linear RGB */
-	switch (colorspace) {
-	case V4L2_COLORSPACE_SMPTE240M:
-		mult_matrix(r, g, b, rec709_to_240m);
-		break;
-	case V4L2_COLORSPACE_SMPTE170M:
-		mult_matrix(r, g, b, rec709_to_170m);
-		break;
-	case V4L2_COLORSPACE_470_SYSTEM_BG:
-		mult_matrix(r, g, b, rec709_to_ebu);
-		break;
-	case V4L2_COLORSPACE_470_SYSTEM_M:
-		mult_matrix(r, g, b, rec709_to_ntsc1953);
-		break;
-	case V4L2_COLORSPACE_ADOBERGB:
-		mult_matrix(r, g, b, rec709_to_adobergb);
-		break;
-	case V4L2_COLORSPACE_BT2020:
-		mult_matrix(r, g, b, rec709_to_bt2020);
-		break;
-	case V4L2_COLORSPACE_DCI_P3:
-		mult_matrix(r, g, b, rec709_to_dcip3);
-		break;
-	case V4L2_COLORSPACE_SRGB:
-	case V4L2_COLORSPACE_REC709:
-		break;
-	default:
-		break;
-	}
-
-	if (clamp) {
-		*r = ((*r) < 0) ? 0 : (((*r) > 1) ? 1 : (*r));
-		*g = ((*g) < 0) ? 0 : (((*g) > 1) ? 1 : (*g));
-		*b = ((*b) < 0) ? 0 : (((*b) > 1) ? 1 : (*b));
-	}
-
-	switch (xfer_func) {
-	case V4L2_XFER_FUNC_709:
-		*r = transfer_rgb_to_rec709(*r);
-		*g = transfer_rgb_to_rec709(*g);
-		*b = transfer_rgb_to_rec709(*b);
-		break;
-	case V4L2_XFER_FUNC_SRGB:
-		*r = transfer_rgb_to_srgb(*r);
-		*g = transfer_rgb_to_srgb(*g);
-		*b = transfer_rgb_to_srgb(*b);
-		break;
-	case V4L2_XFER_FUNC_ADOBERGB:
-		*r = transfer_rgb_to_adobergb(*r);
-		*g = transfer_rgb_to_adobergb(*g);
-		*b = transfer_rgb_to_adobergb(*b);
-		break;
-	case V4L2_XFER_FUNC_DCI_P3:
-		*r = transfer_rgb_to_dcip3(*r);
-		*g = transfer_rgb_to_dcip3(*g);
-		*b = transfer_rgb_to_dcip3(*b);
-		break;
-	case V4L2_XFER_FUNC_SMPTE2084:
-		*r = transfer_rgb_to_smpte2084(*r);
-		*g = transfer_rgb_to_smpte2084(*g);
-		*b = transfer_rgb_to_smpte2084(*b);
-		break;
-	case V4L2_XFER_FUNC_SMPTE240M:
-		*r = transfer_rgb_to_smpte240m(*r);
-		*g = transfer_rgb_to_smpte240m(*g);
-		*b = transfer_rgb_to_smpte240m(*b);
-		break;
-	case V4L2_XFER_FUNC_NONE:
-		break;
-	}
-}
-
-int main(int argc, char **argv)
-{
-	static const unsigned colorspaces[] = {
-		0,
-		V4L2_COLORSPACE_SMPTE170M,
-		V4L2_COLORSPACE_SMPTE240M,
-		V4L2_COLORSPACE_REC709,
-		0,
-		V4L2_COLORSPACE_470_SYSTEM_M,
-		V4L2_COLORSPACE_470_SYSTEM_BG,
-		0,
-		V4L2_COLORSPACE_SRGB,
-		V4L2_COLORSPACE_ADOBERGB,
-		V4L2_COLORSPACE_BT2020,
-		0,
-		V4L2_COLORSPACE_DCI_P3,
-	};
-	static const char * const colorspace_names[] = {
-		"",
-		"V4L2_COLORSPACE_SMPTE170M",
-		"V4L2_COLORSPACE_SMPTE240M",
-		"V4L2_COLORSPACE_REC709",
-		"",
-		"V4L2_COLORSPACE_470_SYSTEM_M",
-		"V4L2_COLORSPACE_470_SYSTEM_BG",
-		"",
-		"V4L2_COLORSPACE_SRGB",
-		"V4L2_COLORSPACE_ADOBERGB",
-		"V4L2_COLORSPACE_BT2020",
-		"",
-		"V4L2_COLORSPACE_DCI_P3",
-	};
-	static const char * const xfer_func_names[] = {
-		"",
-		"V4L2_XFER_FUNC_709",
-		"V4L2_XFER_FUNC_SRGB",
-		"V4L2_XFER_FUNC_ADOBERGB",
-		"V4L2_XFER_FUNC_SMPTE240M",
-		"V4L2_XFER_FUNC_NONE",
-		"V4L2_XFER_FUNC_DCI_P3",
-		"V4L2_XFER_FUNC_SMPTE2084",
-	};
-	int i;
-	int x;
-	int c;
-
-	printf("/* Generated table */\n");
-	printf("const unsigned short tpg_rec709_to_linear[255 * 16 + 1] = {");
-	for (i = 0; i <= 255 * 16; i++) {
-		if (i % 16 == 0)
-			printf("\n\t");
-		printf("%4d,%s",
-			(int)(0.5 + 16.0 * 255.0 *
-				transfer_rec709_to_rgb(i / (16.0 * 255.0))),
-			i % 16 == 15 || i == 255 * 16 ? "" : " ");
-	}
-	printf("\n};\n\n");
-
-	printf("/* Generated table */\n");
-	printf("const unsigned short tpg_linear_to_rec709[255 * 16 + 1] = {");
-	for (i = 0; i <= 255 * 16; i++) {
-		if (i % 16 == 0)
-			printf("\n\t");
-		printf("%4d,%s",
-			(int)(0.5 + 16.0 * 255.0 *
-				transfer_rgb_to_rec709(i / (16.0 * 255.0))),
-			i % 16 == 15 || i == 255 * 16 ? "" : " ");
-	}
-	printf("\n};\n\n");
-
-	printf("/* Generated table */\n");
-	printf("const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFER_FUNC_SMPTE2084 + 1][TPG_COLOR_CSC_BLACK + 1] = {\n");
-	for (c = 0; c <= V4L2_COLORSPACE_DCI_P3; c++) {
-		for (x = 1; x <= V4L2_XFER_FUNC_SMPTE2084; x++) {
-			for (i = 0; i <= TPG_COLOR_CSC_BLACK; i++) {
-				double r, g, b;
-
-				if (colorspaces[c] == 0)
-					continue;
-
-				r = tpg_colors[i].r / 255.0;
-				g = tpg_colors[i].g / 255.0;
-				b = tpg_colors[i].b / 255.0;
-
-				csc(c, x, &r, &g, &b);
-
-				printf("\t[%s][%s][%d] = { %d, %d, %d },\n",
-					colorspace_names[c],
-					xfer_func_names[x], i,
-					(int)(r * 4080), (int)(g * 4080), (int)(b * 4080));
-			}
-		}
-	}
-	printf("};\n\n");
-	return 0;
-}
-
-#endif
diff --git a/drivers/media/platform/vivid/vivid-tpg-colors.h b/drivers/media/platform/vivid/vivid-tpg-colors.h
deleted file mode 100644
index 4e5a76a..0000000
--- a/drivers/media/platform/vivid/vivid-tpg-colors.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * vivid-color.h - Color definitions for the test pattern generator
- *
- * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _VIVID_COLORS_H_
-#define _VIVID_COLORS_H_
-
-struct color {
-	unsigned char r, g, b;
-};
-
-struct color16 {
-	int r, g, b;
-};
-
-enum tpg_color {
-	TPG_COLOR_CSC_WHITE,
-	TPG_COLOR_CSC_YELLOW,
-	TPG_COLOR_CSC_CYAN,
-	TPG_COLOR_CSC_GREEN,
-	TPG_COLOR_CSC_MAGENTA,
-	TPG_COLOR_CSC_RED,
-	TPG_COLOR_CSC_BLUE,
-	TPG_COLOR_CSC_BLACK,
-	TPG_COLOR_75_YELLOW,
-	TPG_COLOR_75_CYAN,
-	TPG_COLOR_75_GREEN,
-	TPG_COLOR_75_MAGENTA,
-	TPG_COLOR_75_RED,
-	TPG_COLOR_75_BLUE,
-	TPG_COLOR_100_WHITE,
-	TPG_COLOR_100_YELLOW,
-	TPG_COLOR_100_CYAN,
-	TPG_COLOR_100_GREEN,
-	TPG_COLOR_100_MAGENTA,
-	TPG_COLOR_100_RED,
-	TPG_COLOR_100_BLUE,
-	TPG_COLOR_100_BLACK,
-	TPG_COLOR_TEXTFG,
-	TPG_COLOR_TEXTBG,
-	TPG_COLOR_RANDOM,
-	TPG_COLOR_RAMP,
-	TPG_COLOR_MAX = TPG_COLOR_RAMP + 256
-};
-
-extern const struct color tpg_colors[TPG_COLOR_MAX];
-extern const unsigned short tpg_rec709_to_linear[255 * 16 + 1];
-extern const unsigned short tpg_linear_to_rec709[255 * 16 + 1];
-extern const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1]
-					  [V4L2_XFER_FUNC_SMPTE2084 + 1]
-					  [TPG_COLOR_CSC_BLACK + 1];
-
-#endif
diff --git a/drivers/media/platform/vivid/vivid-tpg.c b/drivers/media/platform/vivid/vivid-tpg.c
deleted file mode 100644
index da862bb..0000000
--- a/drivers/media/platform/vivid/vivid-tpg.c
+++ /dev/null
@@ -1,2314 +0,0 @@
-/*
- * vivid-tpg.c - Test Pattern Generator
- *
- * Note: gen_twopix and tpg_gen_text are based on code from vivi.c. See the
- * vivi.c source for the copyright information of those functions.
- *
- * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "vivid-tpg.h"
-
-/* Must remain in sync with enum tpg_pattern */
-const char * const tpg_pattern_strings[] = {
-	"75% Colorbar",
-	"100% Colorbar",
-	"CSC Colorbar",
-	"Horizontal 100% Colorbar",
-	"100% Color Squares",
-	"100% Black",
-	"100% White",
-	"100% Red",
-	"100% Green",
-	"100% Blue",
-	"16x16 Checkers",
-	"2x2 Checkers",
-	"1x1 Checkers",
-	"2x2 Red/Green Checkers",
-	"1x1 Red/Green Checkers",
-	"Alternating Hor Lines",
-	"Alternating Vert Lines",
-	"One Pixel Wide Cross",
-	"Two Pixels Wide Cross",
-	"Ten Pixels Wide Cross",
-	"Gray Ramp",
-	"Noise",
-	NULL
-};
-
-/* Must remain in sync with enum tpg_aspect */
-const char * const tpg_aspect_strings[] = {
-	"Source Width x Height",
-	"4x3",
-	"14x9",
-	"16x9",
-	"16x9 Anamorphic",
-	NULL
-};
-
-/*
- * Sine table: sin[0] = 127 * sin(-180 degrees)
- *             sin[128] = 127 * sin(0 degrees)
- *             sin[256] = 127 * sin(180 degrees)
- */
-static const s8 sin[257] = {
-	   0,   -4,   -7,  -11,  -13,  -18,  -20,  -22,  -26,  -29,  -33,  -35,  -37,  -41,  -43,  -48,
-	 -50,  -52,  -56,  -58,  -62,  -63,  -65,  -69,  -71,  -75,  -76,  -78,  -82,  -83,  -87,  -88,
-	 -90,  -93,  -94,  -97,  -99, -101, -103, -104, -107, -108, -110, -111, -112, -114, -115, -117,
-	-118, -119, -120, -121, -122, -123, -123, -124, -125, -125, -126, -126, -127, -127, -127, -127,
-	-127, -127, -127, -127, -126, -126, -125, -125, -124, -124, -123, -122, -121, -120, -119, -118,
-	-117, -116, -114, -113, -111, -110, -109, -107, -105, -103, -101, -100,  -97,  -96,  -93,  -91,
-	 -90,  -87,  -85,  -82,  -80,  -76,  -75,  -73,  -69,  -67,  -63,  -62,  -60,  -56,  -54,  -50,
-	 -48,  -46,  -41,  -39,  -35,  -33,  -31,  -26,  -24,  -20,  -18,  -15,  -11,   -9,   -4,   -2,
-	   0,    2,    4,    9,   11,   15,   18,   20,   24,   26,   31,   33,   35,   39,   41,   46,
-	  48,   50,   54,   56,   60,   62,   64,   67,   69,   73,   75,   76,   80,   82,   85,   87,
-	  90,   91,   93,   96,   97,  100,  101,  103,  105,  107,  109,  110,  111,  113,  114,  116,
-	 117,  118,  119,  120,  121,  122,  123,  124,  124,  125,  125,  126,  126,  127,  127,  127,
-	 127,  127,  127,  127,  127,  126,  126,  125,  125,  124,  123,  123,  122,  121,  120,  119,
-	 118,  117,  115,  114,  112,  111,  110,  108,  107,  104,  103,  101,   99,   97,   94,   93,
-	  90,   88,   87,   83,   82,   78,   76,   75,   71,   69,   65,   64,   62,   58,   56,   52,
-	  50,   48,   43,   41,   37,   35,   33,   29,   26,   22,   20,   18,   13,   11,    7,    4,
-	   0,
-};
-
-#define cos(idx) sin[((idx) + 64) % sizeof(sin)]
-
-/* Global font descriptor */
-static const u8 *font8x16;
-
-void tpg_set_font(const u8 *f)
-{
-	font8x16 = f;
-}
-
-void tpg_init(struct tpg_data *tpg, unsigned w, unsigned h)
-{
-	memset(tpg, 0, sizeof(*tpg));
-	tpg->scaled_width = tpg->src_width = w;
-	tpg->src_height = tpg->buf_height = h;
-	tpg->crop.width = tpg->compose.width = w;
-	tpg->crop.height = tpg->compose.height = h;
-	tpg->recalc_colors = true;
-	tpg->recalc_square_border = true;
-	tpg->brightness = 128;
-	tpg->contrast = 128;
-	tpg->saturation = 128;
-	tpg->hue = 0;
-	tpg->mv_hor_mode = TPG_MOVE_NONE;
-	tpg->mv_vert_mode = TPG_MOVE_NONE;
-	tpg->field = V4L2_FIELD_NONE;
-	tpg_s_fourcc(tpg, V4L2_PIX_FMT_RGB24);
-	tpg->colorspace = V4L2_COLORSPACE_SRGB;
-	tpg->perc_fill = 100;
-}
-
-int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
-{
-	unsigned pat;
-	unsigned plane;
-
-	tpg->max_line_width = max_w;
-	for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++) {
-		for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
-			unsigned pixelsz = plane ? 2 : 4;
-
-			tpg->lines[pat][plane] = vzalloc(max_w * 2 * pixelsz);
-			if (!tpg->lines[pat][plane])
-				return -ENOMEM;
-			if (plane == 0)
-				continue;
-			tpg->downsampled_lines[pat][plane] = vzalloc(max_w * 2 * pixelsz);
-			if (!tpg->downsampled_lines[pat][plane])
-				return -ENOMEM;
-		}
-	}
-	for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
-		unsigned pixelsz = plane ? 2 : 4;
-
-		tpg->contrast_line[plane] = vzalloc(max_w * pixelsz);
-		if (!tpg->contrast_line[plane])
-			return -ENOMEM;
-		tpg->black_line[plane] = vzalloc(max_w * pixelsz);
-		if (!tpg->black_line[plane])
-			return -ENOMEM;
-		tpg->random_line[plane] = vzalloc(max_w * 2 * pixelsz);
-		if (!tpg->random_line[plane])
-			return -ENOMEM;
-	}
-	return 0;
-}
-
-void tpg_free(struct tpg_data *tpg)
-{
-	unsigned pat;
-	unsigned plane;
-
-	for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++)
-		for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
-			vfree(tpg->lines[pat][plane]);
-			tpg->lines[pat][plane] = NULL;
-			if (plane == 0)
-				continue;
-			vfree(tpg->downsampled_lines[pat][plane]);
-			tpg->downsampled_lines[pat][plane] = NULL;
-		}
-	for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
-		vfree(tpg->contrast_line[plane]);
-		vfree(tpg->black_line[plane]);
-		vfree(tpg->random_line[plane]);
-		tpg->contrast_line[plane] = NULL;
-		tpg->black_line[plane] = NULL;
-		tpg->random_line[plane] = NULL;
-	}
-}
-
-bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
-{
-	tpg->fourcc = fourcc;
-	tpg->planes = 1;
-	tpg->buffers = 1;
-	tpg->recalc_colors = true;
-	tpg->interleaved = false;
-	tpg->vdownsampling[0] = 1;
-	tpg->hdownsampling[0] = 1;
-	tpg->hmask[0] = ~0;
-	tpg->hmask[1] = ~0;
-	tpg->hmask[2] = ~0;
-
-	switch (fourcc) {
-	case V4L2_PIX_FMT_SBGGR8:
-	case V4L2_PIX_FMT_SGBRG8:
-	case V4L2_PIX_FMT_SGRBG8:
-	case V4L2_PIX_FMT_SRGGB8:
-	case V4L2_PIX_FMT_SBGGR10:
-	case V4L2_PIX_FMT_SGBRG10:
-	case V4L2_PIX_FMT_SGRBG10:
-	case V4L2_PIX_FMT_SRGGB10:
-	case V4L2_PIX_FMT_SBGGR12:
-	case V4L2_PIX_FMT_SGBRG12:
-	case V4L2_PIX_FMT_SGRBG12:
-	case V4L2_PIX_FMT_SRGGB12:
-		tpg->interleaved = true;
-		tpg->vdownsampling[1] = 1;
-		tpg->hdownsampling[1] = 1;
-		tpg->planes = 2;
-		/* fall through */
-	case V4L2_PIX_FMT_RGB332:
-	case V4L2_PIX_FMT_RGB565:
-	case V4L2_PIX_FMT_RGB565X:
-	case V4L2_PIX_FMT_RGB444:
-	case V4L2_PIX_FMT_XRGB444:
-	case V4L2_PIX_FMT_ARGB444:
-	case V4L2_PIX_FMT_RGB555:
-	case V4L2_PIX_FMT_XRGB555:
-	case V4L2_PIX_FMT_ARGB555:
-	case V4L2_PIX_FMT_RGB555X:
-	case V4L2_PIX_FMT_XRGB555X:
-	case V4L2_PIX_FMT_ARGB555X:
-	case V4L2_PIX_FMT_BGR666:
-	case V4L2_PIX_FMT_RGB24:
-	case V4L2_PIX_FMT_BGR24:
-	case V4L2_PIX_FMT_RGB32:
-	case V4L2_PIX_FMT_BGR32:
-	case V4L2_PIX_FMT_XRGB32:
-	case V4L2_PIX_FMT_XBGR32:
-	case V4L2_PIX_FMT_ARGB32:
-	case V4L2_PIX_FMT_ABGR32:
-	case V4L2_PIX_FMT_GREY:
-	case V4L2_PIX_FMT_Y16:
-	case V4L2_PIX_FMT_Y16_BE:
-		tpg->is_yuv = false;
-		break;
-	case V4L2_PIX_FMT_YUV444:
-	case V4L2_PIX_FMT_YUV555:
-	case V4L2_PIX_FMT_YUV565:
-	case V4L2_PIX_FMT_YUV32:
-		tpg->is_yuv = true;
-		break;
-	case V4L2_PIX_FMT_YUV420M:
-	case V4L2_PIX_FMT_YVU420M:
-		tpg->buffers = 3;
-		/* fall through */
-	case V4L2_PIX_FMT_YUV420:
-	case V4L2_PIX_FMT_YVU420:
-		tpg->vdownsampling[1] = 2;
-		tpg->vdownsampling[2] = 2;
-		tpg->hdownsampling[1] = 2;
-		tpg->hdownsampling[2] = 2;
-		tpg->planes = 3;
-		tpg->is_yuv = true;
-		break;
-	case V4L2_PIX_FMT_YUV422M:
-	case V4L2_PIX_FMT_YVU422M:
-		tpg->buffers = 3;
-		/* fall through */
-	case V4L2_PIX_FMT_YUV422P:
-		tpg->vdownsampling[1] = 1;
-		tpg->vdownsampling[2] = 1;
-		tpg->hdownsampling[1] = 2;
-		tpg->hdownsampling[2] = 2;
-		tpg->planes = 3;
-		tpg->is_yuv = true;
-		break;
-	case V4L2_PIX_FMT_NV16M:
-	case V4L2_PIX_FMT_NV61M:
-		tpg->buffers = 2;
-		/* fall through */
-	case V4L2_PIX_FMT_NV16:
-	case V4L2_PIX_FMT_NV61:
-		tpg->vdownsampling[1] = 1;
-		tpg->hdownsampling[1] = 1;
-		tpg->hmask[1] = ~1;
-		tpg->planes = 2;
-		tpg->is_yuv = true;
-		break;
-	case V4L2_PIX_FMT_NV12M:
-	case V4L2_PIX_FMT_NV21M:
-		tpg->buffers = 2;
-		/* fall through */
-	case V4L2_PIX_FMT_NV12:
-	case V4L2_PIX_FMT_NV21:
-		tpg->vdownsampling[1] = 2;
-		tpg->hdownsampling[1] = 1;
-		tpg->hmask[1] = ~1;
-		tpg->planes = 2;
-		tpg->is_yuv = true;
-		break;
-	case V4L2_PIX_FMT_YUV444M:
-	case V4L2_PIX_FMT_YVU444M:
-		tpg->buffers = 3;
-		tpg->planes = 3;
-		tpg->vdownsampling[1] = 1;
-		tpg->vdownsampling[2] = 1;
-		tpg->hdownsampling[1] = 1;
-		tpg->hdownsampling[2] = 1;
-		tpg->is_yuv = true;
-		break;
-	case V4L2_PIX_FMT_NV24:
-	case V4L2_PIX_FMT_NV42:
-		tpg->vdownsampling[1] = 1;
-		tpg->hdownsampling[1] = 1;
-		tpg->planes = 2;
-		tpg->is_yuv = true;
-		break;
-	case V4L2_PIX_FMT_YUYV:
-	case V4L2_PIX_FMT_UYVY:
-	case V4L2_PIX_FMT_YVYU:
-	case V4L2_PIX_FMT_VYUY:
-		tpg->hmask[0] = ~1;
-		tpg->is_yuv = true;
-		break;
-	default:
-		return false;
-	}
-
-	switch (fourcc) {
-	case V4L2_PIX_FMT_GREY:
-	case V4L2_PIX_FMT_RGB332:
-		tpg->twopixelsize[0] = 2;
-		break;
-	case V4L2_PIX_FMT_RGB565:
-	case V4L2_PIX_FMT_RGB565X:
-	case V4L2_PIX_FMT_RGB444:
-	case V4L2_PIX_FMT_XRGB444:
-	case V4L2_PIX_FMT_ARGB444:
-	case V4L2_PIX_FMT_RGB555:
-	case V4L2_PIX_FMT_XRGB555:
-	case V4L2_PIX_FMT_ARGB555:
-	case V4L2_PIX_FMT_RGB555X:
-	case V4L2_PIX_FMT_XRGB555X:
-	case V4L2_PIX_FMT_ARGB555X:
-	case V4L2_PIX_FMT_YUYV:
-	case V4L2_PIX_FMT_UYVY:
-	case V4L2_PIX_FMT_YVYU:
-	case V4L2_PIX_FMT_VYUY:
-	case V4L2_PIX_FMT_YUV444:
-	case V4L2_PIX_FMT_YUV555:
-	case V4L2_PIX_FMT_YUV565:
-	case V4L2_PIX_FMT_Y16:
-	case V4L2_PIX_FMT_Y16_BE:
-		tpg->twopixelsize[0] = 2 * 2;
-		break;
-	case V4L2_PIX_FMT_RGB24:
-	case V4L2_PIX_FMT_BGR24:
-		tpg->twopixelsize[0] = 2 * 3;
-		break;
-	case V4L2_PIX_FMT_BGR666:
-	case V4L2_PIX_FMT_RGB32:
-	case V4L2_PIX_FMT_BGR32:
-	case V4L2_PIX_FMT_XRGB32:
-	case V4L2_PIX_FMT_XBGR32:
-	case V4L2_PIX_FMT_ARGB32:
-	case V4L2_PIX_FMT_ABGR32:
-	case V4L2_PIX_FMT_YUV32:
-		tpg->twopixelsize[0] = 2 * 4;
-		break;
-	case V4L2_PIX_FMT_NV12:
-	case V4L2_PIX_FMT_NV21:
-	case V4L2_PIX_FMT_NV12M:
-	case V4L2_PIX_FMT_NV21M:
-	case V4L2_PIX_FMT_NV16:
-	case V4L2_PIX_FMT_NV61:
-	case V4L2_PIX_FMT_NV16M:
-	case V4L2_PIX_FMT_NV61M:
-	case V4L2_PIX_FMT_SBGGR8:
-	case V4L2_PIX_FMT_SGBRG8:
-	case V4L2_PIX_FMT_SGRBG8:
-	case V4L2_PIX_FMT_SRGGB8:
-		tpg->twopixelsize[0] = 2;
-		tpg->twopixelsize[1] = 2;
-		break;
-	case V4L2_PIX_FMT_SRGGB10:
-	case V4L2_PIX_FMT_SGRBG10:
-	case V4L2_PIX_FMT_SGBRG10:
-	case V4L2_PIX_FMT_SBGGR10:
-	case V4L2_PIX_FMT_SRGGB12:
-	case V4L2_PIX_FMT_SGRBG12:
-	case V4L2_PIX_FMT_SGBRG12:
-	case V4L2_PIX_FMT_SBGGR12:
-		tpg->twopixelsize[0] = 4;
-		tpg->twopixelsize[1] = 4;
-		break;
-	case V4L2_PIX_FMT_YUV444M:
-	case V4L2_PIX_FMT_YVU444M:
-	case V4L2_PIX_FMT_YUV422M:
-	case V4L2_PIX_FMT_YVU422M:
-	case V4L2_PIX_FMT_YUV422P:
-	case V4L2_PIX_FMT_YUV420:
-	case V4L2_PIX_FMT_YVU420:
-	case V4L2_PIX_FMT_YUV420M:
-	case V4L2_PIX_FMT_YVU420M:
-		tpg->twopixelsize[0] = 2;
-		tpg->twopixelsize[1] = 2;
-		tpg->twopixelsize[2] = 2;
-		break;
-	case V4L2_PIX_FMT_NV24:
-	case V4L2_PIX_FMT_NV42:
-		tpg->twopixelsize[0] = 2;
-		tpg->twopixelsize[1] = 4;
-		break;
-	}
-	return true;
-}
-
-void tpg_s_crop_compose(struct tpg_data *tpg, const struct v4l2_rect *crop,
-		const struct v4l2_rect *compose)
-{
-	tpg->crop = *crop;
-	tpg->compose = *compose;
-	tpg->scaled_width = (tpg->src_width * tpg->compose.width +
-				 tpg->crop.width - 1) / tpg->crop.width;
-	tpg->scaled_width &= ~1;
-	if (tpg->scaled_width > tpg->max_line_width)
-		tpg->scaled_width = tpg->max_line_width;
-	if (tpg->scaled_width < 2)
-		tpg->scaled_width = 2;
-	tpg->recalc_lines = true;
-}
-
-void tpg_reset_source(struct tpg_data *tpg, unsigned width, unsigned height,
-		       u32 field)
-{
-	unsigned p;
-
-	tpg->src_width = width;
-	tpg->src_height = height;
-	tpg->field = field;
-	tpg->buf_height = height;
-	if (V4L2_FIELD_HAS_T_OR_B(field))
-		tpg->buf_height /= 2;
-	tpg->scaled_width = width;
-	tpg->crop.top = tpg->crop.left = 0;
-	tpg->crop.width = width;
-	tpg->crop.height = height;
-	tpg->compose.top = tpg->compose.left = 0;
-	tpg->compose.width = width;
-	tpg->compose.height = tpg->buf_height;
-	for (p = 0; p < tpg->planes; p++)
-		tpg->bytesperline[p] = (width * tpg->twopixelsize[p]) /
-				       (2 * tpg->hdownsampling[p]);
-	tpg->recalc_square_border = true;
-}
-
-static enum tpg_color tpg_get_textbg_color(struct tpg_data *tpg)
-{
-	switch (tpg->pattern) {
-	case TPG_PAT_BLACK:
-		return TPG_COLOR_100_WHITE;
-	case TPG_PAT_CSC_COLORBAR:
-		return TPG_COLOR_CSC_BLACK;
-	default:
-		return TPG_COLOR_100_BLACK;
-	}
-}
-
-static enum tpg_color tpg_get_textfg_color(struct tpg_data *tpg)
-{
-	switch (tpg->pattern) {
-	case TPG_PAT_75_COLORBAR:
-	case TPG_PAT_CSC_COLORBAR:
-		return TPG_COLOR_CSC_WHITE;
-	case TPG_PAT_BLACK:
-		return TPG_COLOR_100_BLACK;
-	default:
-		return TPG_COLOR_100_WHITE;
-	}
-}
-
-static inline int rec709_to_linear(int v)
-{
-	v = clamp(v, 0, 0xff0);
-	return tpg_rec709_to_linear[v];
-}
-
-static inline int linear_to_rec709(int v)
-{
-	v = clamp(v, 0, 0xff0);
-	return tpg_linear_to_rec709[v];
-}
-
-static void rgb2ycbcr(const int m[3][3], int r, int g, int b,
-			int y_offset, int *y, int *cb, int *cr)
-{
-	*y  = ((m[0][0] * r + m[0][1] * g + m[0][2] * b) >> 16) + (y_offset << 4);
-	*cb = ((m[1][0] * r + m[1][1] * g + m[1][2] * b) >> 16) + (128 << 4);
-	*cr = ((m[2][0] * r + m[2][1] * g + m[2][2] * b) >> 16) + (128 << 4);
-}
-
-static void color_to_ycbcr(struct tpg_data *tpg, int r, int g, int b,
-			   int *y, int *cb, int *cr)
-{
-#define COEFF(v, r) ((int)(0.5 + (v) * (r) * 256.0))
-
-	static const int bt601[3][3] = {
-		{ COEFF(0.299, 219),  COEFF(0.587, 219),  COEFF(0.114, 219)  },
-		{ COEFF(-0.169, 224), COEFF(-0.331, 224), COEFF(0.5, 224)    },
-		{ COEFF(0.5, 224),    COEFF(-0.419, 224), COEFF(-0.081, 224) },
-	};
-	static const int bt601_full[3][3] = {
-		{ COEFF(0.299, 255),  COEFF(0.587, 255),  COEFF(0.114, 255)  },
-		{ COEFF(-0.169, 255), COEFF(-0.331, 255), COEFF(0.5, 255)    },
-		{ COEFF(0.5, 255),    COEFF(-0.419, 255), COEFF(-0.081, 255) },
-	};
-	static const int rec709[3][3] = {
-		{ COEFF(0.2126, 219),  COEFF(0.7152, 219),  COEFF(0.0722, 219)  },
-		{ COEFF(-0.1146, 224), COEFF(-0.3854, 224), COEFF(0.5, 224)     },
-		{ COEFF(0.5, 224),     COEFF(-0.4542, 224), COEFF(-0.0458, 224) },
-	};
-	static const int rec709_full[3][3] = {
-		{ COEFF(0.2126, 255),  COEFF(0.7152, 255),  COEFF(0.0722, 255)  },
-		{ COEFF(-0.1146, 255), COEFF(-0.3854, 255), COEFF(0.5, 255)     },
-		{ COEFF(0.5, 255),     COEFF(-0.4542, 255), COEFF(-0.0458, 255) },
-	};
-	static const int smpte240m[3][3] = {
-		{ COEFF(0.212, 219),  COEFF(0.701, 219),  COEFF(0.087, 219)  },
-		{ COEFF(-0.116, 224), COEFF(-0.384, 224), COEFF(0.5, 224)    },
-		{ COEFF(0.5, 224),    COEFF(-0.445, 224), COEFF(-0.055, 224) },
-	};
-	static const int smpte240m_full[3][3] = {
-		{ COEFF(0.212, 255),  COEFF(0.701, 255),  COEFF(0.087, 255)  },
-		{ COEFF(-0.116, 255), COEFF(-0.384, 255), COEFF(0.5, 255)    },
-		{ COEFF(0.5, 255),    COEFF(-0.445, 255), COEFF(-0.055, 255) },
-	};
-	static const int bt2020[3][3] = {
-		{ COEFF(0.2627, 219),  COEFF(0.6780, 219),  COEFF(0.0593, 219)  },
-		{ COEFF(-0.1396, 224), COEFF(-0.3604, 224), COEFF(0.5, 224)     },
-		{ COEFF(0.5, 224),     COEFF(-0.4598, 224), COEFF(-0.0402, 224) },
-	};
-	static const int bt2020_full[3][3] = {
-		{ COEFF(0.2627, 255),  COEFF(0.6780, 255),  COEFF(0.0593, 255)  },
-		{ COEFF(-0.1396, 255), COEFF(-0.3604, 255), COEFF(0.5, 255)     },
-		{ COEFF(0.5, 255),     COEFF(-0.4698, 255), COEFF(-0.0402, 255) },
-	};
-	static const int bt2020c[4] = {
-		COEFF(1.0 / 1.9404, 224), COEFF(1.0 / 1.5816, 224),
-		COEFF(1.0 / 1.7184, 224), COEFF(1.0 / 0.9936, 224),
-	};
-	static const int bt2020c_full[4] = {
-		COEFF(1.0 / 1.9404, 255), COEFF(1.0 / 1.5816, 255),
-		COEFF(1.0 / 1.7184, 255), COEFF(1.0 / 0.9936, 255),
-	};
-
-	bool full = tpg->real_quantization == V4L2_QUANTIZATION_FULL_RANGE;
-	unsigned y_offset = full ? 0 : 16;
-	int lin_y, yc;
-
-	switch (tpg->real_ycbcr_enc) {
-	case V4L2_YCBCR_ENC_601:
-	case V4L2_YCBCR_ENC_SYCC:
-		rgb2ycbcr(full ? bt601_full : bt601, r, g, b, y_offset, y, cb, cr);
-		break;
-	case V4L2_YCBCR_ENC_XV601:
-		/* Ignore quantization range, there is only one possible
-		 * Y'CbCr encoding. */
-		rgb2ycbcr(bt601, r, g, b, 16, y, cb, cr);
-		break;
-	case V4L2_YCBCR_ENC_XV709:
-		/* Ignore quantization range, there is only one possible
-		 * Y'CbCr encoding. */
-		rgb2ycbcr(rec709, r, g, b, 16, y, cb, cr);
-		break;
-	case V4L2_YCBCR_ENC_BT2020:
-		rgb2ycbcr(full ? bt2020_full : bt2020, r, g, b, y_offset, y, cb, cr);
-		break;
-	case V4L2_YCBCR_ENC_BT2020_CONST_LUM:
-		lin_y = (COEFF(0.2627, 255) * rec709_to_linear(r) +
-			 COEFF(0.6780, 255) * rec709_to_linear(g) +
-			 COEFF(0.0593, 255) * rec709_to_linear(b)) >> 16;
-		yc = linear_to_rec709(lin_y);
-		*y = full ? yc : (yc * 219) / 255 + (16 << 4);
-		if (b <= yc)
-			*cb = (((b - yc) * (full ? bt2020c_full[0] : bt2020c[0])) >> 16) + (128 << 4);
-		else
-			*cb = (((b - yc) * (full ? bt2020c_full[1] : bt2020c[1])) >> 16) + (128 << 4);
-		if (r <= yc)
-			*cr = (((r - yc) * (full ? bt2020c_full[2] : bt2020c[2])) >> 16) + (128 << 4);
-		else
-			*cr = (((r - yc) * (full ? bt2020c_full[3] : bt2020c[3])) >> 16) + (128 << 4);
-		break;
-	case V4L2_YCBCR_ENC_SMPTE240M:
-		rgb2ycbcr(full ? smpte240m_full : smpte240m, r, g, b, y_offset, y, cb, cr);
-		break;
-	case V4L2_YCBCR_ENC_709:
-	default:
-		rgb2ycbcr(full ? rec709_full : rec709, r, g, b, y_offset, y, cb, cr);
-		break;
-	}
-}
-
-static void ycbcr2rgb(const int m[3][3], int y, int cb, int cr,
-			int y_offset, int *r, int *g, int *b)
-{
-	y -= y_offset << 4;
-	cb -= 128 << 4;
-	cr -= 128 << 4;
-	*r = m[0][0] * y + m[0][1] * cb + m[0][2] * cr;
-	*g = m[1][0] * y + m[1][1] * cb + m[1][2] * cr;
-	*b = m[2][0] * y + m[2][1] * cb + m[2][2] * cr;
-	*r = clamp(*r >> 12, 0, 0xff0);
-	*g = clamp(*g >> 12, 0, 0xff0);
-	*b = clamp(*b >> 12, 0, 0xff0);
-}
-
-static void ycbcr_to_color(struct tpg_data *tpg, int y, int cb, int cr,
-			   int *r, int *g, int *b)
-{
-#undef COEFF
-#define COEFF(v, r) ((int)(0.5 + (v) * ((255.0 * 255.0 * 16.0) / (r))))
-	static const int bt601[3][3] = {
-		{ COEFF(1, 219), COEFF(0, 224),       COEFF(1.4020, 224)  },
-		{ COEFF(1, 219), COEFF(-0.3441, 224), COEFF(-0.7141, 224) },
-		{ COEFF(1, 219), COEFF(1.7720, 224),  COEFF(0, 224)       },
-	};
-	static const int bt601_full[3][3] = {
-		{ COEFF(1, 255), COEFF(0, 255),       COEFF(1.4020, 255)  },
-		{ COEFF(1, 255), COEFF(-0.3441, 255), COEFF(-0.7141, 255) },
-		{ COEFF(1, 255), COEFF(1.7720, 255),  COEFF(0, 255)       },
-	};
-	static const int rec709[3][3] = {
-		{ COEFF(1, 219), COEFF(0, 224),       COEFF(1.5748, 224)  },
-		{ COEFF(1, 219), COEFF(-0.1873, 224), COEFF(-0.4681, 224) },
-		{ COEFF(1, 219), COEFF(1.8556, 224),  COEFF(0, 224)       },
-	};
-	static const int rec709_full[3][3] = {
-		{ COEFF(1, 255), COEFF(0, 255),       COEFF(1.5748, 255)  },
-		{ COEFF(1, 255), COEFF(-0.1873, 255), COEFF(-0.4681, 255) },
-		{ COEFF(1, 255), COEFF(1.8556, 255),  COEFF(0, 255)       },
-	};
-	static const int smpte240m[3][3] = {
-		{ COEFF(1, 219), COEFF(0, 224),       COEFF(1.5756, 224)  },
-		{ COEFF(1, 219), COEFF(-0.2253, 224), COEFF(-0.4767, 224) },
-		{ COEFF(1, 219), COEFF(1.8270, 224),  COEFF(0, 224)       },
-	};
-	static const int smpte240m_full[3][3] = {
-		{ COEFF(1, 255), COEFF(0, 255),       COEFF(1.5756, 255)  },
-		{ COEFF(1, 255), COEFF(-0.2253, 255), COEFF(-0.4767, 255) },
-		{ COEFF(1, 255), COEFF(1.8270, 255),  COEFF(0, 255)       },
-	};
-	static const int bt2020[3][3] = {
-		{ COEFF(1, 219), COEFF(0, 224),       COEFF(1.4746, 224)  },
-		{ COEFF(1, 219), COEFF(-0.1646, 224), COEFF(-0.5714, 224) },
-		{ COEFF(1, 219), COEFF(1.8814, 224),  COEFF(0, 224)       },
-	};
-	static const int bt2020_full[3][3] = {
-		{ COEFF(1, 255), COEFF(0, 255),       COEFF(1.4746, 255)  },
-		{ COEFF(1, 255), COEFF(-0.1646, 255), COEFF(-0.5714, 255) },
-		{ COEFF(1, 255), COEFF(1.8814, 255),  COEFF(0, 255)       },
-	};
-	static const int bt2020c[4] = {
-		COEFF(1.9404, 224), COEFF(1.5816, 224),
-		COEFF(1.7184, 224), COEFF(0.9936, 224),
-	};
-	static const int bt2020c_full[4] = {
-		COEFF(1.9404, 255), COEFF(1.5816, 255),
-		COEFF(1.7184, 255), COEFF(0.9936, 255),
-	};
-
-	bool full = tpg->real_quantization == V4L2_QUANTIZATION_FULL_RANGE;
-	unsigned y_offset = full ? 0 : 16;
-	int y_fac = full ? COEFF(1.0, 255) : COEFF(1.0, 219);
-	int lin_r, lin_g, lin_b, lin_y;
-
-	switch (tpg->real_ycbcr_enc) {
-	case V4L2_YCBCR_ENC_601:
-	case V4L2_YCBCR_ENC_SYCC:
-		ycbcr2rgb(full ? bt601_full : bt601, y, cb, cr, y_offset, r, g, b);
-		break;
-	case V4L2_YCBCR_ENC_XV601:
-		/* Ignore quantization range, there is only one possible
-		 * Y'CbCr encoding. */
-		ycbcr2rgb(bt601, y, cb, cr, 16, r, g, b);
-		break;
-	case V4L2_YCBCR_ENC_XV709:
-		/* Ignore quantization range, there is only one possible
-		 * Y'CbCr encoding. */
-		ycbcr2rgb(rec709, y, cb, cr, 16, r, g, b);
-		break;
-	case V4L2_YCBCR_ENC_BT2020:
-		ycbcr2rgb(full ? bt2020_full : bt2020, y, cb, cr, y_offset, r, g, b);
-		break;
-	case V4L2_YCBCR_ENC_BT2020_CONST_LUM:
-		y -= full ? 0 : 16 << 4;
-		cb -= 128 << 4;
-		cr -= 128 << 4;
-
-		if (cb <= 0)
-			*b = y_fac * y + (full ? bt2020c_full[0] : bt2020c[0]) * cb;
-		else
-			*b = y_fac * y + (full ? bt2020c_full[1] : bt2020c[1]) * cb;
-		*b = *b >> 12;
-		if (cr <= 0)
-			*r = y_fac * y + (full ? bt2020c_full[2] : bt2020c[2]) * cr;
-		else
-			*r = y_fac * y + (full ? bt2020c_full[3] : bt2020c[3]) * cr;
-		*r = *r >> 12;
-		lin_r = rec709_to_linear(*r);
-		lin_b = rec709_to_linear(*b);
-		lin_y = rec709_to_linear((y * 255) / (full ? 255 : 219));
-
-		lin_g = COEFF(1.0 / 0.6780, 255) * lin_y -
-			COEFF(0.2627 / 0.6780, 255) * lin_r -
-			COEFF(0.0593 / 0.6780, 255) * lin_b;
-		*g = linear_to_rec709(lin_g >> 12);
-		break;
-	case V4L2_YCBCR_ENC_SMPTE240M:
-		ycbcr2rgb(full ? smpte240m_full : smpte240m, y, cb, cr, y_offset, r, g, b);
-		break;
-	case V4L2_YCBCR_ENC_709:
-	default:
-		ycbcr2rgb(full ? rec709_full : rec709, y, cb, cr, y_offset, r, g, b);
-		break;
-	}
-}
-
-/* precalculate color bar values to speed up rendering */
-static void precalculate_color(struct tpg_data *tpg, int k)
-{
-	int col = k;
-	int r = tpg_colors[col].r;
-	int g = tpg_colors[col].g;
-	int b = tpg_colors[col].b;
-
-	if (k == TPG_COLOR_TEXTBG) {
-		col = tpg_get_textbg_color(tpg);
-
-		r = tpg_colors[col].r;
-		g = tpg_colors[col].g;
-		b = tpg_colors[col].b;
-	} else if (k == TPG_COLOR_TEXTFG) {
-		col = tpg_get_textfg_color(tpg);
-
-		r = tpg_colors[col].r;
-		g = tpg_colors[col].g;
-		b = tpg_colors[col].b;
-	} else if (tpg->pattern == TPG_PAT_NOISE) {
-		r = g = b = prandom_u32_max(256);
-	} else if (k == TPG_COLOR_RANDOM) {
-		r = g = b = tpg->qual_offset + prandom_u32_max(196);
-	} else if (k >= TPG_COLOR_RAMP) {
-		r = g = b = k - TPG_COLOR_RAMP;
-	}
-
-	if (tpg->pattern == TPG_PAT_CSC_COLORBAR && col <= TPG_COLOR_CSC_BLACK) {
-		r = tpg_csc_colors[tpg->colorspace][tpg->real_xfer_func][col].r;
-		g = tpg_csc_colors[tpg->colorspace][tpg->real_xfer_func][col].g;
-		b = tpg_csc_colors[tpg->colorspace][tpg->real_xfer_func][col].b;
-	} else {
-		r <<= 4;
-		g <<= 4;
-		b <<= 4;
-	}
-	if (tpg->qual == TPG_QUAL_GRAY || tpg->fourcc == V4L2_PIX_FMT_GREY ||
-	    tpg->fourcc == V4L2_PIX_FMT_Y16 ||
-	    tpg->fourcc == V4L2_PIX_FMT_Y16_BE) {
-		/* Rec. 709 Luma function */
-		/* (0.2126, 0.7152, 0.0722) * (255 * 256) */
-		r = g = b = (13879 * r + 46688 * g + 4713 * b) >> 16;
-	}
-
-	/*
-	 * The assumption is that the RGB output is always full range,
-	 * so only if the rgb_range overrides the 'real' rgb range do
-	 * we need to convert the RGB values.
-	 *
-	 * Remember that r, g and b are still in the 0 - 0xff0 range.
-	 */
-	if (tpg->real_rgb_range == V4L2_DV_RGB_RANGE_LIMITED &&
-	    tpg->rgb_range == V4L2_DV_RGB_RANGE_FULL) {
-		/*
-		 * Convert from full range (which is what r, g and b are)
-		 * to limited range (which is the 'real' RGB range), which
-		 * is then interpreted as full range.
-		 */
-		r = (r * 219) / 255 + (16 << 4);
-		g = (g * 219) / 255 + (16 << 4);
-		b = (b * 219) / 255 + (16 << 4);
-	} else if (tpg->real_rgb_range != V4L2_DV_RGB_RANGE_LIMITED &&
-		   tpg->rgb_range == V4L2_DV_RGB_RANGE_LIMITED) {
-		/*
-		 * Clamp r, g and b to the limited range and convert to full
-		 * range since that's what we deliver.
-		 */
-		r = clamp(r, 16 << 4, 235 << 4);
-		g = clamp(g, 16 << 4, 235 << 4);
-		b = clamp(b, 16 << 4, 235 << 4);
-		r = (r - (16 << 4)) * 255 / 219;
-		g = (g - (16 << 4)) * 255 / 219;
-		b = (b - (16 << 4)) * 255 / 219;
-	}
-
-	if (tpg->brightness != 128 || tpg->contrast != 128 ||
-	    tpg->saturation != 128 || tpg->hue) {
-		/* Implement these operations */
-		int y, cb, cr;
-		int tmp_cb, tmp_cr;
-
-		/* First convert to YCbCr */
-
-		color_to_ycbcr(tpg, r, g, b, &y, &cb, &cr);
-
-		y = (16 << 4) + ((y - (16 << 4)) * tpg->contrast) / 128;
-		y += (tpg->brightness << 4) - (128 << 4);
-
-		cb -= 128 << 4;
-		cr -= 128 << 4;
-		tmp_cb = (cb * cos(128 + tpg->hue)) / 127 + (cr * sin[128 + tpg->hue]) / 127;
-		tmp_cr = (cr * cos(128 + tpg->hue)) / 127 - (cb * sin[128 + tpg->hue]) / 127;
-
-		cb = (128 << 4) + (tmp_cb * tpg->contrast * tpg->saturation) / (128 * 128);
-		cr = (128 << 4) + (tmp_cr * tpg->contrast * tpg->saturation) / (128 * 128);
-		if (tpg->is_yuv) {
-			tpg->colors[k][0] = clamp(y >> 4, 1, 254);
-			tpg->colors[k][1] = clamp(cb >> 4, 1, 254);
-			tpg->colors[k][2] = clamp(cr >> 4, 1, 254);
-			return;
-		}
-		ycbcr_to_color(tpg, y, cb, cr, &r, &g, &b);
-	}
-
-	if (tpg->is_yuv) {
-		/* Convert to YCbCr */
-		int y, cb, cr;
-
-		color_to_ycbcr(tpg, r, g, b, &y, &cb, &cr);
-
-		if (tpg->real_quantization == V4L2_QUANTIZATION_LIM_RANGE) {
-			y = clamp(y, 16 << 4, 235 << 4);
-			cb = clamp(cb, 16 << 4, 240 << 4);
-			cr = clamp(cr, 16 << 4, 240 << 4);
-		}
-		y = clamp(y >> 4, 1, 254);
-		cb = clamp(cb >> 4, 1, 254);
-		cr = clamp(cr >> 4, 1, 254);
-		switch (tpg->fourcc) {
-		case V4L2_PIX_FMT_YUV444:
-			y >>= 4;
-			cb >>= 4;
-			cr >>= 4;
-			break;
-		case V4L2_PIX_FMT_YUV555:
-			y >>= 3;
-			cb >>= 3;
-			cr >>= 3;
-			break;
-		case V4L2_PIX_FMT_YUV565:
-			y >>= 3;
-			cb >>= 2;
-			cr >>= 3;
-			break;
-		}
-		tpg->colors[k][0] = y;
-		tpg->colors[k][1] = cb;
-		tpg->colors[k][2] = cr;
-	} else {
-		if (tpg->real_quantization == V4L2_QUANTIZATION_LIM_RANGE) {
-			r = (r * 219) / 255 + (16 << 4);
-			g = (g * 219) / 255 + (16 << 4);
-			b = (b * 219) / 255 + (16 << 4);
-		}
-		switch (tpg->fourcc) {
-		case V4L2_PIX_FMT_RGB332:
-			r >>= 9;
-			g >>= 9;
-			b >>= 10;
-			break;
-		case V4L2_PIX_FMT_RGB565:
-		case V4L2_PIX_FMT_RGB565X:
-			r >>= 7;
-			g >>= 6;
-			b >>= 7;
-			break;
-		case V4L2_PIX_FMT_RGB444:
-		case V4L2_PIX_FMT_XRGB444:
-		case V4L2_PIX_FMT_ARGB444:
-			r >>= 8;
-			g >>= 8;
-			b >>= 8;
-			break;
-		case V4L2_PIX_FMT_RGB555:
-		case V4L2_PIX_FMT_XRGB555:
-		case V4L2_PIX_FMT_ARGB555:
-		case V4L2_PIX_FMT_RGB555X:
-		case V4L2_PIX_FMT_XRGB555X:
-		case V4L2_PIX_FMT_ARGB555X:
-			r >>= 7;
-			g >>= 7;
-			b >>= 7;
-			break;
-		case V4L2_PIX_FMT_BGR666:
-			r >>= 6;
-			g >>= 6;
-			b >>= 6;
-			break;
-		default:
-			r >>= 4;
-			g >>= 4;
-			b >>= 4;
-			break;
-		}
-
-		tpg->colors[k][0] = r;
-		tpg->colors[k][1] = g;
-		tpg->colors[k][2] = b;
-	}
-}
-
-static void tpg_precalculate_colors(struct tpg_data *tpg)
-{
-	int k;
-
-	for (k = 0; k < TPG_COLOR_MAX; k++)
-		precalculate_color(tpg, k);
-}
-
-/* 'odd' is true for pixels 1, 3, 5, etc. and false for pixels 0, 2, 4, etc. */
-static void gen_twopix(struct tpg_data *tpg,
-		u8 buf[TPG_MAX_PLANES][8], int color, bool odd)
-{
-	unsigned offset = odd * tpg->twopixelsize[0] / 2;
-	u8 alpha = tpg->alpha_component;
-	u8 r_y, g_u, b_v;
-
-	if (tpg->alpha_red_only && color != TPG_COLOR_CSC_RED &&
-				   color != TPG_COLOR_100_RED &&
-				   color != TPG_COLOR_75_RED)
-		alpha = 0;
-	if (color == TPG_COLOR_RANDOM)
-		precalculate_color(tpg, color);
-	r_y = tpg->colors[color][0]; /* R or precalculated Y */
-	g_u = tpg->colors[color][1]; /* G or precalculated U */
-	b_v = tpg->colors[color][2]; /* B or precalculated V */
-
-	switch (tpg->fourcc) {
-	case V4L2_PIX_FMT_GREY:
-		buf[0][offset] = r_y;
-		break;
-	case V4L2_PIX_FMT_Y16:
-		/*
-		 * Ideally both bytes should be set to r_y, but then you won't
-		 * be able to detect endian problems. So keep it 0 except for
-		 * the corner case where r_y is 0xff so white really will be
-		 * white (0xffff).
-		 */
-		buf[0][offset] = r_y == 0xff ? r_y : 0;
-		buf[0][offset+1] = r_y;
-		break;
-	case V4L2_PIX_FMT_Y16_BE:
-		/* See comment for V4L2_PIX_FMT_Y16 above */
-		buf[0][offset] = r_y;
-		buf[0][offset+1] = r_y == 0xff ? r_y : 0;
-		break;
-	case V4L2_PIX_FMT_YUV422M:
-	case V4L2_PIX_FMT_YUV422P:
-	case V4L2_PIX_FMT_YUV420:
-	case V4L2_PIX_FMT_YUV420M:
-		buf[0][offset] = r_y;
-		if (odd) {
-			buf[1][0] = (buf[1][0] + g_u) / 2;
-			buf[2][0] = (buf[2][0] + b_v) / 2;
-			buf[1][1] = buf[1][0];
-			buf[2][1] = buf[2][0];
-			break;
-		}
-		buf[1][0] = g_u;
-		buf[2][0] = b_v;
-		break;
-	case V4L2_PIX_FMT_YVU422M:
-	case V4L2_PIX_FMT_YVU420:
-	case V4L2_PIX_FMT_YVU420M:
-		buf[0][offset] = r_y;
-		if (odd) {
-			buf[1][0] = (buf[1][0] + b_v) / 2;
-			buf[2][0] = (buf[2][0] + g_u) / 2;
-			buf[1][1] = buf[1][0];
-			buf[2][1] = buf[2][0];
-			break;
-		}
-		buf[1][0] = b_v;
-		buf[2][0] = g_u;
-		break;
-
-	case V4L2_PIX_FMT_NV12:
-	case V4L2_PIX_FMT_NV12M:
-	case V4L2_PIX_FMT_NV16:
-	case V4L2_PIX_FMT_NV16M:
-		buf[0][offset] = r_y;
-		if (odd) {
-			buf[1][0] = (buf[1][0] + g_u) / 2;
-			buf[1][1] = (buf[1][1] + b_v) / 2;
-			break;
-		}
-		buf[1][0] = g_u;
-		buf[1][1] = b_v;
-		break;
-	case V4L2_PIX_FMT_NV21:
-	case V4L2_PIX_FMT_NV21M:
-	case V4L2_PIX_FMT_NV61:
-	case V4L2_PIX_FMT_NV61M:
-		buf[0][offset] = r_y;
-		if (odd) {
-			buf[1][0] = (buf[1][0] + b_v) / 2;
-			buf[1][1] = (buf[1][1] + g_u) / 2;
-			break;
-		}
-		buf[1][0] = b_v;
-		buf[1][1] = g_u;
-		break;
-
-	case V4L2_PIX_FMT_YUV444M:
-		buf[0][offset] = r_y;
-		buf[1][offset] = g_u;
-		buf[2][offset] = b_v;
-		break;
-
-	case V4L2_PIX_FMT_YVU444M:
-		buf[0][offset] = r_y;
-		buf[1][offset] = b_v;
-		buf[2][offset] = g_u;
-		break;
-
-	case V4L2_PIX_FMT_NV24:
-		buf[0][offset] = r_y;
-		buf[1][2 * offset] = g_u;
-		buf[1][2 * offset + 1] = b_v;
-		break;
-
-	case V4L2_PIX_FMT_NV42:
-		buf[0][offset] = r_y;
-		buf[1][2 * offset] = b_v;
-		buf[1][2 * offset + 1] = g_u;
-		break;
-
-	case V4L2_PIX_FMT_YUYV:
-		buf[0][offset] = r_y;
-		if (odd) {
-			buf[0][1] = (buf[0][1] + g_u) / 2;
-			buf[0][3] = (buf[0][3] + b_v) / 2;
-			break;
-		}
-		buf[0][1] = g_u;
-		buf[0][3] = b_v;
-		break;
-	case V4L2_PIX_FMT_UYVY:
-		buf[0][offset + 1] = r_y;
-		if (odd) {
-			buf[0][0] = (buf[0][0] + g_u) / 2;
-			buf[0][2] = (buf[0][2] + b_v) / 2;
-			break;
-		}
-		buf[0][0] = g_u;
-		buf[0][2] = b_v;
-		break;
-	case V4L2_PIX_FMT_YVYU:
-		buf[0][offset] = r_y;
-		if (odd) {
-			buf[0][1] = (buf[0][1] + b_v) / 2;
-			buf[0][3] = (buf[0][3] + g_u) / 2;
-			break;
-		}
-		buf[0][1] = b_v;
-		buf[0][3] = g_u;
-		break;
-	case V4L2_PIX_FMT_VYUY:
-		buf[0][offset + 1] = r_y;
-		if (odd) {
-			buf[0][0] = (buf[0][0] + b_v) / 2;
-			buf[0][2] = (buf[0][2] + g_u) / 2;
-			break;
-		}
-		buf[0][0] = b_v;
-		buf[0][2] = g_u;
-		break;
-	case V4L2_PIX_FMT_RGB332:
-		buf[0][offset] = (r_y << 5) | (g_u << 2) | b_v;
-		break;
-	case V4L2_PIX_FMT_YUV565:
-	case V4L2_PIX_FMT_RGB565:
-		buf[0][offset] = (g_u << 5) | b_v;
-		buf[0][offset + 1] = (r_y << 3) | (g_u >> 3);
-		break;
-	case V4L2_PIX_FMT_RGB565X:
-		buf[0][offset] = (r_y << 3) | (g_u >> 3);
-		buf[0][offset + 1] = (g_u << 5) | b_v;
-		break;
-	case V4L2_PIX_FMT_RGB444:
-	case V4L2_PIX_FMT_XRGB444:
-		alpha = 0;
-		/* fall through */
-	case V4L2_PIX_FMT_YUV444:
-	case V4L2_PIX_FMT_ARGB444:
-		buf[0][offset] = (g_u << 4) | b_v;
-		buf[0][offset + 1] = (alpha & 0xf0) | r_y;
-		break;
-	case V4L2_PIX_FMT_RGB555:
-	case V4L2_PIX_FMT_XRGB555:
-		alpha = 0;
-		/* fall through */
-	case V4L2_PIX_FMT_YUV555:
-	case V4L2_PIX_FMT_ARGB555:
-		buf[0][offset] = (g_u << 5) | b_v;
-		buf[0][offset + 1] = (alpha & 0x80) | (r_y << 2) | (g_u >> 3);
-		break;
-	case V4L2_PIX_FMT_RGB555X:
-	case V4L2_PIX_FMT_XRGB555X:
-		alpha = 0;
-		/* fall through */
-	case V4L2_PIX_FMT_ARGB555X:
-		buf[0][offset] = (alpha & 0x80) | (r_y << 2) | (g_u >> 3);
-		buf[0][offset + 1] = (g_u << 5) | b_v;
-		break;
-	case V4L2_PIX_FMT_RGB24:
-		buf[0][offset] = r_y;
-		buf[0][offset + 1] = g_u;
-		buf[0][offset + 2] = b_v;
-		break;
-	case V4L2_PIX_FMT_BGR24:
-		buf[0][offset] = b_v;
-		buf[0][offset + 1] = g_u;
-		buf[0][offset + 2] = r_y;
-		break;
-	case V4L2_PIX_FMT_BGR666:
-		buf[0][offset] = (b_v << 2) | (g_u >> 4);
-		buf[0][offset + 1] = (g_u << 4) | (r_y >> 2);
-		buf[0][offset + 2] = r_y << 6;
-		buf[0][offset + 3] = 0;
-		break;
-	case V4L2_PIX_FMT_RGB32:
-	case V4L2_PIX_FMT_XRGB32:
-		alpha = 0;
-		/* fall through */
-	case V4L2_PIX_FMT_YUV32:
-	case V4L2_PIX_FMT_ARGB32:
-		buf[0][offset] = alpha;
-		buf[0][offset + 1] = r_y;
-		buf[0][offset + 2] = g_u;
-		buf[0][offset + 3] = b_v;
-		break;
-	case V4L2_PIX_FMT_BGR32:
-	case V4L2_PIX_FMT_XBGR32:
-		alpha = 0;
-		/* fall through */
-	case V4L2_PIX_FMT_ABGR32:
-		buf[0][offset] = b_v;
-		buf[0][offset + 1] = g_u;
-		buf[0][offset + 2] = r_y;
-		buf[0][offset + 3] = alpha;
-		break;
-	case V4L2_PIX_FMT_SBGGR8:
-		buf[0][offset] = odd ? g_u : b_v;
-		buf[1][offset] = odd ? r_y : g_u;
-		break;
-	case V4L2_PIX_FMT_SGBRG8:
-		buf[0][offset] = odd ? b_v : g_u;
-		buf[1][offset] = odd ? g_u : r_y;
-		break;
-	case V4L2_PIX_FMT_SGRBG8:
-		buf[0][offset] = odd ? r_y : g_u;
-		buf[1][offset] = odd ? g_u : b_v;
-		break;
-	case V4L2_PIX_FMT_SRGGB8:
-		buf[0][offset] = odd ? g_u : r_y;
-		buf[1][offset] = odd ? b_v : g_u;
-		break;
-	case V4L2_PIX_FMT_SBGGR10:
-		buf[0][offset] = odd ? g_u << 2 : b_v << 2;
-		buf[0][offset + 1] = odd ? g_u >> 6 : b_v >> 6;
-		buf[1][offset] = odd ? r_y << 2 : g_u << 2;
-		buf[1][offset + 1] = odd ? r_y >> 6 : g_u >> 6;
-		buf[0][offset] |= (buf[0][offset] >> 2) & 3;
-		buf[1][offset] |= (buf[1][offset] >> 2) & 3;
-		break;
-	case V4L2_PIX_FMT_SGBRG10:
-		buf[0][offset] = odd ? b_v << 2 : g_u << 2;
-		buf[0][offset + 1] = odd ? b_v >> 6 : g_u >> 6;
-		buf[1][offset] = odd ? g_u << 2 : r_y << 2;
-		buf[1][offset + 1] = odd ? g_u >> 6 : r_y >> 6;
-		buf[0][offset] |= (buf[0][offset] >> 2) & 3;
-		buf[1][offset] |= (buf[1][offset] >> 2) & 3;
-		break;
-	case V4L2_PIX_FMT_SGRBG10:
-		buf[0][offset] = odd ? r_y << 2 : g_u << 2;
-		buf[0][offset + 1] = odd ? r_y >> 6 : g_u >> 6;
-		buf[1][offset] = odd ? g_u << 2 : b_v << 2;
-		buf[1][offset + 1] = odd ? g_u >> 6 : b_v >> 6;
-		buf[0][offset] |= (buf[0][offset] >> 2) & 3;
-		buf[1][offset] |= (buf[1][offset] >> 2) & 3;
-		break;
-	case V4L2_PIX_FMT_SRGGB10:
-		buf[0][offset] = odd ? g_u << 2 : r_y << 2;
-		buf[0][offset + 1] = odd ? g_u >> 6 : r_y >> 6;
-		buf[1][offset] = odd ? b_v << 2 : g_u << 2;
-		buf[1][offset + 1] = odd ? b_v >> 6 : g_u >> 6;
-		buf[0][offset] |= (buf[0][offset] >> 2) & 3;
-		buf[1][offset] |= (buf[1][offset] >> 2) & 3;
-		break;
-	case V4L2_PIX_FMT_SBGGR12:
-		buf[0][offset] = odd ? g_u << 4 : b_v << 4;
-		buf[0][offset + 1] = odd ? g_u >> 4 : b_v >> 4;
-		buf[1][offset] = odd ? r_y << 4 : g_u << 4;
-		buf[1][offset + 1] = odd ? r_y >> 4 : g_u >> 4;
-		buf[0][offset] |= (buf[0][offset] >> 4) & 0xf;
-		buf[1][offset] |= (buf[1][offset] >> 4) & 0xf;
-		break;
-	case V4L2_PIX_FMT_SGBRG12:
-		buf[0][offset] = odd ? b_v << 4 : g_u << 4;
-		buf[0][offset + 1] = odd ? b_v >> 4 : g_u >> 4;
-		buf[1][offset] = odd ? g_u << 4 : r_y << 4;
-		buf[1][offset + 1] = odd ? g_u >> 4 : r_y >> 4;
-		buf[0][offset] |= (buf[0][offset] >> 4) & 0xf;
-		buf[1][offset] |= (buf[1][offset] >> 4) & 0xf;
-		break;
-	case V4L2_PIX_FMT_SGRBG12:
-		buf[0][offset] = odd ? r_y << 4 : g_u << 4;
-		buf[0][offset + 1] = odd ? r_y >> 4 : g_u >> 4;
-		buf[1][offset] = odd ? g_u << 4 : b_v << 4;
-		buf[1][offset + 1] = odd ? g_u >> 4 : b_v >> 4;
-		buf[0][offset] |= (buf[0][offset] >> 4) & 0xf;
-		buf[1][offset] |= (buf[1][offset] >> 4) & 0xf;
-		break;
-	case V4L2_PIX_FMT_SRGGB12:
-		buf[0][offset] = odd ? g_u << 4 : r_y << 4;
-		buf[0][offset + 1] = odd ? g_u >> 4 : r_y >> 4;
-		buf[1][offset] = odd ? b_v << 4 : g_u << 4;
-		buf[1][offset + 1] = odd ? b_v >> 4 : g_u >> 4;
-		buf[0][offset] |= (buf[0][offset] >> 4) & 0xf;
-		buf[1][offset] |= (buf[1][offset] >> 4) & 0xf;
-		break;
-	}
-}
-
-unsigned tpg_g_interleaved_plane(const struct tpg_data *tpg, unsigned buf_line)
-{
-	switch (tpg->fourcc) {
-	case V4L2_PIX_FMT_SBGGR8:
-	case V4L2_PIX_FMT_SGBRG8:
-	case V4L2_PIX_FMT_SGRBG8:
-	case V4L2_PIX_FMT_SRGGB8:
-	case V4L2_PIX_FMT_SBGGR10:
-	case V4L2_PIX_FMT_SGBRG10:
-	case V4L2_PIX_FMT_SGRBG10:
-	case V4L2_PIX_FMT_SRGGB10:
-	case V4L2_PIX_FMT_SBGGR12:
-	case V4L2_PIX_FMT_SGBRG12:
-	case V4L2_PIX_FMT_SGRBG12:
-	case V4L2_PIX_FMT_SRGGB12:
-		return buf_line & 1;
-	default:
-		return 0;
-	}
-}
-
-/* Return how many pattern lines are used by the current pattern. */
-static unsigned tpg_get_pat_lines(const struct tpg_data *tpg)
-{
-	switch (tpg->pattern) {
-	case TPG_PAT_CHECKERS_16X16:
-	case TPG_PAT_CHECKERS_2X2:
-	case TPG_PAT_CHECKERS_1X1:
-	case TPG_PAT_COLOR_CHECKERS_2X2:
-	case TPG_PAT_COLOR_CHECKERS_1X1:
-	case TPG_PAT_ALTERNATING_HLINES:
-	case TPG_PAT_CROSS_1_PIXEL:
-	case TPG_PAT_CROSS_2_PIXELS:
-	case TPG_PAT_CROSS_10_PIXELS:
-		return 2;
-	case TPG_PAT_100_COLORSQUARES:
-	case TPG_PAT_100_HCOLORBAR:
-		return 8;
-	default:
-		return 1;
-	}
-}
-
-/* Which pattern line should be used for the given frame line. */
-static unsigned tpg_get_pat_line(const struct tpg_data *tpg, unsigned line)
-{
-	switch (tpg->pattern) {
-	case TPG_PAT_CHECKERS_16X16:
-		return (line >> 4) & 1;
-	case TPG_PAT_CHECKERS_1X1:
-	case TPG_PAT_COLOR_CHECKERS_1X1:
-	case TPG_PAT_ALTERNATING_HLINES:
-		return line & 1;
-	case TPG_PAT_CHECKERS_2X2:
-	case TPG_PAT_COLOR_CHECKERS_2X2:
-		return (line & 2) >> 1;
-	case TPG_PAT_100_COLORSQUARES:
-	case TPG_PAT_100_HCOLORBAR:
-		return (line * 8) / tpg->src_height;
-	case TPG_PAT_CROSS_1_PIXEL:
-		return line == tpg->src_height / 2;
-	case TPG_PAT_CROSS_2_PIXELS:
-		return (line + 1) / 2 == tpg->src_height / 4;
-	case TPG_PAT_CROSS_10_PIXELS:
-		return (line + 10) / 20 == tpg->src_height / 40;
-	default:
-		return 0;
-	}
-}
-
-/*
- * Which color should be used for the given pattern line and X coordinate.
- * Note: x is in the range 0 to 2 * tpg->src_width.
- */
-static enum tpg_color tpg_get_color(const struct tpg_data *tpg,
-				    unsigned pat_line, unsigned x)
-{
-	/* Maximum number of bars are TPG_COLOR_MAX - otherwise, the input print code
-	   should be modified */
-	static const enum tpg_color bars[3][8] = {
-		/* Standard ITU-R 75% color bar sequence */
-		{ TPG_COLOR_CSC_WHITE,   TPG_COLOR_75_YELLOW,
-		  TPG_COLOR_75_CYAN,     TPG_COLOR_75_GREEN,
-		  TPG_COLOR_75_MAGENTA,  TPG_COLOR_75_RED,
-		  TPG_COLOR_75_BLUE,     TPG_COLOR_100_BLACK, },
-		/* Standard ITU-R 100% color bar sequence */
-		{ TPG_COLOR_100_WHITE,   TPG_COLOR_100_YELLOW,
-		  TPG_COLOR_100_CYAN,    TPG_COLOR_100_GREEN,
-		  TPG_COLOR_100_MAGENTA, TPG_COLOR_100_RED,
-		  TPG_COLOR_100_BLUE,    TPG_COLOR_100_BLACK, },
-		/* Color bar sequence suitable to test CSC */
-		{ TPG_COLOR_CSC_WHITE,   TPG_COLOR_CSC_YELLOW,
-		  TPG_COLOR_CSC_CYAN,    TPG_COLOR_CSC_GREEN,
-		  TPG_COLOR_CSC_MAGENTA, TPG_COLOR_CSC_RED,
-		  TPG_COLOR_CSC_BLUE,    TPG_COLOR_CSC_BLACK, },
-	};
-
-	switch (tpg->pattern) {
-	case TPG_PAT_75_COLORBAR:
-	case TPG_PAT_100_COLORBAR:
-	case TPG_PAT_CSC_COLORBAR:
-		return bars[tpg->pattern][((x * 8) / tpg->src_width) % 8];
-	case TPG_PAT_100_COLORSQUARES:
-		return bars[1][(pat_line + (x * 8) / tpg->src_width) % 8];
-	case TPG_PAT_100_HCOLORBAR:
-		return bars[1][pat_line];
-	case TPG_PAT_BLACK:
-		return TPG_COLOR_100_BLACK;
-	case TPG_PAT_WHITE:
-		return TPG_COLOR_100_WHITE;
-	case TPG_PAT_RED:
-		return TPG_COLOR_100_RED;
-	case TPG_PAT_GREEN:
-		return TPG_COLOR_100_GREEN;
-	case TPG_PAT_BLUE:
-		return TPG_COLOR_100_BLUE;
-	case TPG_PAT_CHECKERS_16X16:
-		return (((x >> 4) & 1) ^ (pat_line & 1)) ?
-			TPG_COLOR_100_BLACK : TPG_COLOR_100_WHITE;
-	case TPG_PAT_CHECKERS_1X1:
-		return ((x & 1) ^ (pat_line & 1)) ?
-			TPG_COLOR_100_WHITE : TPG_COLOR_100_BLACK;
-	case TPG_PAT_COLOR_CHECKERS_1X1:
-		return ((x & 1) ^ (pat_line & 1)) ?
-			TPG_COLOR_100_RED : TPG_COLOR_100_BLUE;
-	case TPG_PAT_CHECKERS_2X2:
-		return (((x >> 1) & 1) ^ (pat_line & 1)) ?
-			TPG_COLOR_100_WHITE : TPG_COLOR_100_BLACK;
-	case TPG_PAT_COLOR_CHECKERS_2X2:
-		return (((x >> 1) & 1) ^ (pat_line & 1)) ?
-			TPG_COLOR_100_RED : TPG_COLOR_100_BLUE;
-	case TPG_PAT_ALTERNATING_HLINES:
-		return pat_line ? TPG_COLOR_100_WHITE : TPG_COLOR_100_BLACK;
-	case TPG_PAT_ALTERNATING_VLINES:
-		return (x & 1) ? TPG_COLOR_100_WHITE : TPG_COLOR_100_BLACK;
-	case TPG_PAT_CROSS_1_PIXEL:
-		if (pat_line || (x % tpg->src_width) == tpg->src_width / 2)
-			return TPG_COLOR_100_BLACK;
-		return TPG_COLOR_100_WHITE;
-	case TPG_PAT_CROSS_2_PIXELS:
-		if (pat_line || ((x % tpg->src_width) + 1) / 2 == tpg->src_width / 4)
-			return TPG_COLOR_100_BLACK;
-		return TPG_COLOR_100_WHITE;
-	case TPG_PAT_CROSS_10_PIXELS:
-		if (pat_line || ((x % tpg->src_width) + 10) / 20 == tpg->src_width / 40)
-			return TPG_COLOR_100_BLACK;
-		return TPG_COLOR_100_WHITE;
-	case TPG_PAT_GRAY_RAMP:
-		return TPG_COLOR_RAMP + ((x % tpg->src_width) * 256) / tpg->src_width;
-	default:
-		return TPG_COLOR_100_RED;
-	}
-}
-
-/*
- * Given the pixel aspect ratio and video aspect ratio calculate the
- * coordinates of a centered square and the coordinates of the border of
- * the active video area. The coordinates are relative to the source
- * frame rectangle.
- */
-static void tpg_calculate_square_border(struct tpg_data *tpg)
-{
-	unsigned w = tpg->src_width;
-	unsigned h = tpg->src_height;
-	unsigned sq_w, sq_h;
-
-	sq_w = (w * 2 / 5) & ~1;
-	if (((w - sq_w) / 2) & 1)
-		sq_w += 2;
-	sq_h = sq_w;
-	tpg->square.width = sq_w;
-	if (tpg->vid_aspect == TPG_VIDEO_ASPECT_16X9_ANAMORPHIC) {
-		unsigned ana_sq_w = (sq_w / 4) * 3;
-
-		if (((w - ana_sq_w) / 2) & 1)
-			ana_sq_w += 2;
-		tpg->square.width = ana_sq_w;
-	}
-	tpg->square.left = (w - tpg->square.width) / 2;
-	if (tpg->pix_aspect == TPG_PIXEL_ASPECT_NTSC)
-		sq_h = sq_w * 10 / 11;
-	else if (tpg->pix_aspect == TPG_PIXEL_ASPECT_PAL)
-		sq_h = sq_w * 59 / 54;
-	tpg->square.height = sq_h;
-	tpg->square.top = (h - sq_h) / 2;
-	tpg->border.left = 0;
-	tpg->border.width = w;
-	tpg->border.top = 0;
-	tpg->border.height = h;
-	switch (tpg->vid_aspect) {
-	case TPG_VIDEO_ASPECT_4X3:
-		if (tpg->pix_aspect)
-			return;
-		if (3 * w >= 4 * h) {
-			tpg->border.width = ((4 * h) / 3) & ~1;
-			if (((w - tpg->border.width) / 2) & ~1)
-				tpg->border.width -= 2;
-			tpg->border.left = (w - tpg->border.width) / 2;
-			break;
-		}
-		tpg->border.height = ((3 * w) / 4) & ~1;
-		tpg->border.top = (h - tpg->border.height) / 2;
-		break;
-	case TPG_VIDEO_ASPECT_14X9_CENTRE:
-		if (tpg->pix_aspect) {
-			tpg->border.height = tpg->pix_aspect == TPG_PIXEL_ASPECT_NTSC ? 420 : 506;
-			tpg->border.top = (h - tpg->border.height) / 2;
-			break;
-		}
-		if (9 * w >= 14 * h) {
-			tpg->border.width = ((14 * h) / 9) & ~1;
-			if (((w - tpg->border.width) / 2) & ~1)
-				tpg->border.width -= 2;
-			tpg->border.left = (w - tpg->border.width) / 2;
-			break;
-		}
-		tpg->border.height = ((9 * w) / 14) & ~1;
-		tpg->border.top = (h - tpg->border.height) / 2;
-		break;
-	case TPG_VIDEO_ASPECT_16X9_CENTRE:
-		if (tpg->pix_aspect) {
-			tpg->border.height = tpg->pix_aspect == TPG_PIXEL_ASPECT_NTSC ? 368 : 442;
-			tpg->border.top = (h - tpg->border.height) / 2;
-			break;
-		}
-		if (9 * w >= 16 * h) {
-			tpg->border.width = ((16 * h) / 9) & ~1;
-			if (((w - tpg->border.width) / 2) & ~1)
-				tpg->border.width -= 2;
-			tpg->border.left = (w - tpg->border.width) / 2;
-			break;
-		}
-		tpg->border.height = ((9 * w) / 16) & ~1;
-		tpg->border.top = (h - tpg->border.height) / 2;
-		break;
-	default:
-		break;
-	}
-}
-
-static void tpg_precalculate_line(struct tpg_data *tpg)
-{
-	enum tpg_color contrast;
-	u8 pix[TPG_MAX_PLANES][8];
-	unsigned pat;
-	unsigned p;
-	unsigned x;
-
-	switch (tpg->pattern) {
-	case TPG_PAT_GREEN:
-		contrast = TPG_COLOR_100_RED;
-		break;
-	case TPG_PAT_CSC_COLORBAR:
-		contrast = TPG_COLOR_CSC_GREEN;
-		break;
-	default:
-		contrast = TPG_COLOR_100_GREEN;
-		break;
-	}
-
-	for (pat = 0; pat < tpg_get_pat_lines(tpg); pat++) {
-		/* Coarse scaling with Bresenham */
-		unsigned int_part = tpg->src_width / tpg->scaled_width;
-		unsigned fract_part = tpg->src_width % tpg->scaled_width;
-		unsigned src_x = 0;
-		unsigned error = 0;
-
-		for (x = 0; x < tpg->scaled_width * 2; x += 2) {
-			unsigned real_x = src_x;
-			enum tpg_color color1, color2;
-
-			real_x = tpg->hflip ? tpg->src_width * 2 - real_x - 2 : real_x;
-			color1 = tpg_get_color(tpg, pat, real_x);
-
-			src_x += int_part;
-			error += fract_part;
-			if (error >= tpg->scaled_width) {
-				error -= tpg->scaled_width;
-				src_x++;
-			}
-
-			real_x = src_x;
-			real_x = tpg->hflip ? tpg->src_width * 2 - real_x - 2 : real_x;
-			color2 = tpg_get_color(tpg, pat, real_x);
-
-			src_x += int_part;
-			error += fract_part;
-			if (error >= tpg->scaled_width) {
-				error -= tpg->scaled_width;
-				src_x++;
-			}
-
-			gen_twopix(tpg, pix, tpg->hflip ? color2 : color1, 0);
-			gen_twopix(tpg, pix, tpg->hflip ? color1 : color2, 1);
-			for (p = 0; p < tpg->planes; p++) {
-				unsigned twopixsize = tpg->twopixelsize[p];
-				unsigned hdiv = tpg->hdownsampling[p];
-				u8 *pos = tpg->lines[pat][p] + tpg_hdiv(tpg, p, x);
-
-				memcpy(pos, pix[p], twopixsize / hdiv);
-			}
-		}
-	}
-
-	if (tpg->vdownsampling[tpg->planes - 1] > 1) {
-		unsigned pat_lines = tpg_get_pat_lines(tpg);
-
-		for (pat = 0; pat < pat_lines; pat++) {
-			unsigned next_pat = (pat + 1) % pat_lines;
-
-			for (p = 1; p < tpg->planes; p++) {
-				unsigned w = tpg_hdiv(tpg, p, tpg->scaled_width * 2);
-				u8 *pos1 = tpg->lines[pat][p];
-				u8 *pos2 = tpg->lines[next_pat][p];
-				u8 *dest = tpg->downsampled_lines[pat][p];
-
-				for (x = 0; x < w; x++, pos1++, pos2++, dest++)
-					*dest = ((u16)*pos1 + (u16)*pos2) / 2;
-			}
-		}
-	}
-
-	gen_twopix(tpg, pix, contrast, 0);
-	gen_twopix(tpg, pix, contrast, 1);
-	for (p = 0; p < tpg->planes; p++) {
-		unsigned twopixsize = tpg->twopixelsize[p];
-		u8 *pos = tpg->contrast_line[p];
-
-		for (x = 0; x < tpg->scaled_width; x += 2, pos += twopixsize)
-			memcpy(pos, pix[p], twopixsize);
-	}
-
-	gen_twopix(tpg, pix, TPG_COLOR_100_BLACK, 0);
-	gen_twopix(tpg, pix, TPG_COLOR_100_BLACK, 1);
-	for (p = 0; p < tpg->planes; p++) {
-		unsigned twopixsize = tpg->twopixelsize[p];
-		u8 *pos = tpg->black_line[p];
-
-		for (x = 0; x < tpg->scaled_width; x += 2, pos += twopixsize)
-			memcpy(pos, pix[p], twopixsize);
-	}
-
-	for (x = 0; x < tpg->scaled_width * 2; x += 2) {
-		gen_twopix(tpg, pix, TPG_COLOR_RANDOM, 0);
-		gen_twopix(tpg, pix, TPG_COLOR_RANDOM, 1);
-		for (p = 0; p < tpg->planes; p++) {
-			unsigned twopixsize = tpg->twopixelsize[p];
-			u8 *pos = tpg->random_line[p] + x * twopixsize / 2;
-
-			memcpy(pos, pix[p], twopixsize);
-		}
-	}
-
-	gen_twopix(tpg, tpg->textbg, TPG_COLOR_TEXTBG, 0);
-	gen_twopix(tpg, tpg->textbg, TPG_COLOR_TEXTBG, 1);
-	gen_twopix(tpg, tpg->textfg, TPG_COLOR_TEXTFG, 0);
-	gen_twopix(tpg, tpg->textfg, TPG_COLOR_TEXTFG, 1);
-}
-
-/* need this to do rgb24 rendering */
-typedef struct { u16 __; u8 _; } __packed x24;
-
-#define PRINTSTR(PIXTYPE) do {	\
-	unsigned vdiv = tpg->vdownsampling[p]; \
-	unsigned hdiv = tpg->hdownsampling[p]; \
-	int line;	\
-	PIXTYPE fg;	\
-	PIXTYPE bg;	\
-	memcpy(&fg, tpg->textfg[p], sizeof(PIXTYPE));	\
-	memcpy(&bg, tpg->textbg[p], sizeof(PIXTYPE));	\
-	\
-	for (line = first; line < 16; line += vdiv * step) {	\
-		int l = tpg->vflip ? 15 - line : line; \
-		PIXTYPE *pos = (PIXTYPE *)(basep[p][(line / vdiv) & 1] + \
-			       ((y * step + l) / (vdiv * div)) * tpg->bytesperline[p] + \
-			       (x / hdiv) * sizeof(PIXTYPE));	\
-		unsigned s;	\
-	\
-		for (s = 0; s < len; s++) {	\
-			u8 chr = font8x16[text[s] * 16 + line];	\
-	\
-			if (hdiv == 2 && tpg->hflip) { \
-				pos[3] = (chr & (0x01 << 6) ? fg : bg);	\
-				pos[2] = (chr & (0x01 << 4) ? fg : bg);	\
-				pos[1] = (chr & (0x01 << 2) ? fg : bg);	\
-				pos[0] = (chr & (0x01 << 0) ? fg : bg);	\
-			} else if (hdiv == 2) { \
-				pos[0] = (chr & (0x01 << 7) ? fg : bg);	\
-				pos[1] = (chr & (0x01 << 5) ? fg : bg);	\
-				pos[2] = (chr & (0x01 << 3) ? fg : bg);	\
-				pos[3] = (chr & (0x01 << 1) ? fg : bg);	\
-			} else if (tpg->hflip) { \
-				pos[7] = (chr & (0x01 << 7) ? fg : bg);	\
-				pos[6] = (chr & (0x01 << 6) ? fg : bg);	\
-				pos[5] = (chr & (0x01 << 5) ? fg : bg);	\
-				pos[4] = (chr & (0x01 << 4) ? fg : bg);	\
-				pos[3] = (chr & (0x01 << 3) ? fg : bg);	\
-				pos[2] = (chr & (0x01 << 2) ? fg : bg);	\
-				pos[1] = (chr & (0x01 << 1) ? fg : bg);	\
-				pos[0] = (chr & (0x01 << 0) ? fg : bg);	\
-			} else { \
-				pos[0] = (chr & (0x01 << 7) ? fg : bg);	\
-				pos[1] = (chr & (0x01 << 6) ? fg : bg);	\
-				pos[2] = (chr & (0x01 << 5) ? fg : bg);	\
-				pos[3] = (chr & (0x01 << 4) ? fg : bg);	\
-				pos[4] = (chr & (0x01 << 3) ? fg : bg);	\
-				pos[5] = (chr & (0x01 << 2) ? fg : bg);	\
-				pos[6] = (chr & (0x01 << 1) ? fg : bg);	\
-				pos[7] = (chr & (0x01 << 0) ? fg : bg);	\
-			} \
-	\
-			pos += (tpg->hflip ? -8 : 8) / hdiv;	\
-		}	\
-	}	\
-} while (0)
-
-static noinline void tpg_print_str_2(const struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2],
-			unsigned p, unsigned first, unsigned div, unsigned step,
-			int y, int x, char *text, unsigned len)
-{
-	PRINTSTR(u8);
-}
-
-static noinline void tpg_print_str_4(const struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2],
-			unsigned p, unsigned first, unsigned div, unsigned step,
-			int y, int x, char *text, unsigned len)
-{
-	PRINTSTR(u16);
-}
-
-static noinline void tpg_print_str_6(const struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2],
-			unsigned p, unsigned first, unsigned div, unsigned step,
-			int y, int x, char *text, unsigned len)
-{
-	PRINTSTR(x24);
-}
-
-static noinline void tpg_print_str_8(const struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2],
-			unsigned p, unsigned first, unsigned div, unsigned step,
-			int y, int x, char *text, unsigned len)
-{
-	PRINTSTR(u32);
-}
-
-void tpg_gen_text(const struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2],
-		  int y, int x, char *text)
-{
-	unsigned step = V4L2_FIELD_HAS_T_OR_B(tpg->field) ? 2 : 1;
-	unsigned div = step;
-	unsigned first = 0;
-	unsigned len = strlen(text);
-	unsigned p;
-
-	if (font8x16 == NULL || basep == NULL)
-		return;
-
-	/* Checks if it is possible to show string */
-	if (y + 16 >= tpg->compose.height || x + 8 >= tpg->compose.width)
-		return;
-
-	if (len > (tpg->compose.width - x) / 8)
-		len = (tpg->compose.width - x) / 8;
-	if (tpg->vflip)
-		y = tpg->compose.height - y - 16;
-	if (tpg->hflip)
-		x = tpg->compose.width - x - 8;
-	y += tpg->compose.top;
-	x += tpg->compose.left;
-	if (tpg->field == V4L2_FIELD_BOTTOM)
-		first = 1;
-	else if (tpg->field == V4L2_FIELD_SEQ_TB || tpg->field == V4L2_FIELD_SEQ_BT)
-		div = 2;
-
-	for (p = 0; p < tpg->planes; p++) {
-		/* Print text */
-		switch (tpg->twopixelsize[p]) {
-		case 2:
-			tpg_print_str_2(tpg, basep, p, first, div, step, y, x,
-					text, len);
-			break;
-		case 4:
-			tpg_print_str_4(tpg, basep, p, first, div, step, y, x,
-					text, len);
-			break;
-		case 6:
-			tpg_print_str_6(tpg, basep, p, first, div, step, y, x,
-					text, len);
-			break;
-		case 8:
-			tpg_print_str_8(tpg, basep, p, first, div, step, y, x,
-					text, len);
-			break;
-		}
-	}
-}
-
-void tpg_update_mv_step(struct tpg_data *tpg)
-{
-	int factor = tpg->mv_hor_mode > TPG_MOVE_NONE ? -1 : 1;
-
-	if (tpg->hflip)
-		factor = -factor;
-	switch (tpg->mv_hor_mode) {
-	case TPG_MOVE_NEG_FAST:
-	case TPG_MOVE_POS_FAST:
-		tpg->mv_hor_step = ((tpg->src_width + 319) / 320) * 4;
-		break;
-	case TPG_MOVE_NEG:
-	case TPG_MOVE_POS:
-		tpg->mv_hor_step = ((tpg->src_width + 639) / 640) * 4;
-		break;
-	case TPG_MOVE_NEG_SLOW:
-	case TPG_MOVE_POS_SLOW:
-		tpg->mv_hor_step = 2;
-		break;
-	case TPG_MOVE_NONE:
-		tpg->mv_hor_step = 0;
-		break;
-	}
-	if (factor < 0)
-		tpg->mv_hor_step = tpg->src_width - tpg->mv_hor_step;
-
-	factor = tpg->mv_vert_mode > TPG_MOVE_NONE ? -1 : 1;
-	switch (tpg->mv_vert_mode) {
-	case TPG_MOVE_NEG_FAST:
-	case TPG_MOVE_POS_FAST:
-		tpg->mv_vert_step = ((tpg->src_width + 319) / 320) * 4;
-		break;
-	case TPG_MOVE_NEG:
-	case TPG_MOVE_POS:
-		tpg->mv_vert_step = ((tpg->src_width + 639) / 640) * 4;
-		break;
-	case TPG_MOVE_NEG_SLOW:
-	case TPG_MOVE_POS_SLOW:
-		tpg->mv_vert_step = 1;
-		break;
-	case TPG_MOVE_NONE:
-		tpg->mv_vert_step = 0;
-		break;
-	}
-	if (factor < 0)
-		tpg->mv_vert_step = tpg->src_height - tpg->mv_vert_step;
-}
-
-/* Map the line number relative to the crop rectangle to a frame line number */
-static unsigned tpg_calc_frameline(const struct tpg_data *tpg, unsigned src_y,
-				    unsigned field)
-{
-	switch (field) {
-	case V4L2_FIELD_TOP:
-		return tpg->crop.top + src_y * 2;
-	case V4L2_FIELD_BOTTOM:
-		return tpg->crop.top + src_y * 2 + 1;
-	default:
-		return src_y + tpg->crop.top;
-	}
-}
-
-/*
- * Map the line number relative to the compose rectangle to a destination
- * buffer line number.
- */
-static unsigned tpg_calc_buffer_line(const struct tpg_data *tpg, unsigned y,
-				    unsigned field)
-{
-	y += tpg->compose.top;
-	switch (field) {
-	case V4L2_FIELD_SEQ_TB:
-		if (y & 1)
-			return tpg->buf_height / 2 + y / 2;
-		return y / 2;
-	case V4L2_FIELD_SEQ_BT:
-		if (y & 1)
-			return y / 2;
-		return tpg->buf_height / 2 + y / 2;
-	default:
-		return y;
-	}
-}
-
-static void tpg_recalc(struct tpg_data *tpg)
-{
-	if (tpg->recalc_colors) {
-		tpg->recalc_colors = false;
-		tpg->recalc_lines = true;
-		tpg->real_xfer_func = tpg->xfer_func;
-		tpg->real_ycbcr_enc = tpg->ycbcr_enc;
-		tpg->real_quantization = tpg->quantization;
-
-		if (tpg->xfer_func == V4L2_XFER_FUNC_DEFAULT)
-			tpg->real_xfer_func =
-				V4L2_MAP_XFER_FUNC_DEFAULT(tpg->colorspace);
-
-		if (tpg->ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
-			tpg->real_ycbcr_enc =
-				V4L2_MAP_YCBCR_ENC_DEFAULT(tpg->colorspace);
-
-		if (tpg->quantization == V4L2_QUANTIZATION_DEFAULT)
-			tpg->real_quantization =
-				V4L2_MAP_QUANTIZATION_DEFAULT(!tpg->is_yuv,
-					tpg->colorspace, tpg->real_ycbcr_enc);
-
-		tpg_precalculate_colors(tpg);
-	}
-	if (tpg->recalc_square_border) {
-		tpg->recalc_square_border = false;
-		tpg_calculate_square_border(tpg);
-	}
-	if (tpg->recalc_lines) {
-		tpg->recalc_lines = false;
-		tpg_precalculate_line(tpg);
-	}
-}
-
-void tpg_calc_text_basep(struct tpg_data *tpg,
-		u8 *basep[TPG_MAX_PLANES][2], unsigned p, u8 *vbuf)
-{
-	unsigned stride = tpg->bytesperline[p];
-	unsigned h = tpg->buf_height;
-
-	tpg_recalc(tpg);
-
-	basep[p][0] = vbuf;
-	basep[p][1] = vbuf;
-	h /= tpg->vdownsampling[p];
-	if (tpg->field == V4L2_FIELD_SEQ_TB)
-		basep[p][1] += h * stride / 2;
-	else if (tpg->field == V4L2_FIELD_SEQ_BT)
-		basep[p][0] += h * stride / 2;
-	if (p == 0 && tpg->interleaved)
-		tpg_calc_text_basep(tpg, basep, 1, vbuf);
-}
-
-static int tpg_pattern_avg(const struct tpg_data *tpg,
-			   unsigned pat1, unsigned pat2)
-{
-	unsigned pat_lines = tpg_get_pat_lines(tpg);
-
-	if (pat1 == (pat2 + 1) % pat_lines)
-		return pat2;
-	if (pat2 == (pat1 + 1) % pat_lines)
-		return pat1;
-	return -1;
-}
-
-void tpg_log_status(struct tpg_data *tpg)
-{
-	pr_info("tpg source WxH: %ux%u (%s)\n",
-			tpg->src_width, tpg->src_height,
-			tpg->is_yuv ? "YCbCr" : "RGB");
-	pr_info("tpg field: %u\n", tpg->field);
-	pr_info("tpg crop: %ux%u@%dx%d\n", tpg->crop.width, tpg->crop.height,
-			tpg->crop.left, tpg->crop.top);
-	pr_info("tpg compose: %ux%u@%dx%d\n", tpg->compose.width, tpg->compose.height,
-			tpg->compose.left, tpg->compose.top);
-	pr_info("tpg colorspace: %d\n", tpg->colorspace);
-	pr_info("tpg transfer function: %d/%d\n", tpg->xfer_func, tpg->real_xfer_func);
-	pr_info("tpg Y'CbCr encoding: %d/%d\n", tpg->ycbcr_enc, tpg->real_ycbcr_enc);
-	pr_info("tpg quantization: %d/%d\n", tpg->quantization, tpg->real_quantization);
-	pr_info("tpg RGB range: %d/%d\n", tpg->rgb_range, tpg->real_rgb_range);
-}
-
-/*
- * This struct contains common parameters used by both the drawing of the
- * test pattern and the drawing of the extras (borders, square, etc.)
- */
-struct tpg_draw_params {
-	/* common data */
-	bool is_tv;
-	bool is_60hz;
-	unsigned twopixsize;
-	unsigned img_width;
-	unsigned stride;
-	unsigned hmax;
-	unsigned frame_line;
-	unsigned frame_line_next;
-
-	/* test pattern */
-	unsigned mv_hor_old;
-	unsigned mv_hor_new;
-	unsigned mv_vert_old;
-	unsigned mv_vert_new;
-
-	/* extras */
-	unsigned wss_width;
-	unsigned wss_random_offset;
-	unsigned sav_eav_f;
-	unsigned left_pillar_width;
-	unsigned right_pillar_start;
-};
-
-static void tpg_fill_params_pattern(const struct tpg_data *tpg, unsigned p,
-				    struct tpg_draw_params *params)
-{
-	params->mv_hor_old =
-		tpg_hscale_div(tpg, p, tpg->mv_hor_count % tpg->src_width);
-	params->mv_hor_new =
-		tpg_hscale_div(tpg, p, (tpg->mv_hor_count + tpg->mv_hor_step) %
-			       tpg->src_width);
-	params->mv_vert_old = tpg->mv_vert_count % tpg->src_height;
-	params->mv_vert_new =
-		(tpg->mv_vert_count + tpg->mv_vert_step) % tpg->src_height;
-}
-
-static void tpg_fill_params_extras(const struct tpg_data *tpg,
-				   unsigned p,
-				   struct tpg_draw_params *params)
-{
-	unsigned left_pillar_width = 0;
-	unsigned right_pillar_start = params->img_width;
-
-	params->wss_width = tpg->crop.left < tpg->src_width / 2 ?
-		tpg->src_width / 2 - tpg->crop.left : 0;
-	if (params->wss_width > tpg->crop.width)
-		params->wss_width = tpg->crop.width;
-	params->wss_width = tpg_hscale_div(tpg, p, params->wss_width);
-	params->wss_random_offset =
-		params->twopixsize * prandom_u32_max(tpg->src_width / 2);
-
-	if (tpg->crop.left < tpg->border.left) {
-		left_pillar_width = tpg->border.left - tpg->crop.left;
-		if (left_pillar_width > tpg->crop.width)
-			left_pillar_width = tpg->crop.width;
-		left_pillar_width = tpg_hscale_div(tpg, p, left_pillar_width);
-	}
-	params->left_pillar_width = left_pillar_width;
-
-	if (tpg->crop.left + tpg->crop.width >
-	    tpg->border.left + tpg->border.width) {
-		right_pillar_start =
-			tpg->border.left + tpg->border.width - tpg->crop.left;
-		right_pillar_start =
-			tpg_hscale_div(tpg, p, right_pillar_start);
-		if (right_pillar_start > params->img_width)
-			right_pillar_start = params->img_width;
-	}
-	params->right_pillar_start = right_pillar_start;
-
-	params->sav_eav_f = tpg->field ==
-			(params->is_60hz ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM);
-}
-
-static void tpg_fill_plane_extras(const struct tpg_data *tpg,
-				  const struct tpg_draw_params *params,
-				  unsigned p, unsigned h, u8 *vbuf)
-{
-	unsigned twopixsize = params->twopixsize;
-	unsigned img_width = params->img_width;
-	unsigned frame_line = params->frame_line;
-	const struct v4l2_rect *sq = &tpg->square;
-	const struct v4l2_rect *b = &tpg->border;
-	const struct v4l2_rect *c = &tpg->crop;
-
-	if (params->is_tv && !params->is_60hz &&
-	    frame_line == 0 && params->wss_width) {
-		/*
-		 * Replace the first half of the top line of a 50 Hz frame
-		 * with random data to simulate a WSS signal.
-		 */
-		u8 *wss = tpg->random_line[p] + params->wss_random_offset;
-
-		memcpy(vbuf, wss, params->wss_width);
-	}
-
-	if (tpg->show_border && frame_line >= b->top &&
-	    frame_line < b->top + b->height) {
-		unsigned bottom = b->top + b->height - 1;
-		unsigned left = params->left_pillar_width;
-		unsigned right = params->right_pillar_start;
-
-		if (frame_line == b->top || frame_line == b->top + 1 ||
-		    frame_line == bottom || frame_line == bottom - 1) {
-			memcpy(vbuf + left, tpg->contrast_line[p],
-					right - left);
-		} else {
-			if (b->left >= c->left &&
-			    b->left < c->left + c->width)
-				memcpy(vbuf + left,
-					tpg->contrast_line[p], twopixsize);
-			if (b->left + b->width > c->left &&
-			    b->left + b->width <= c->left + c->width)
-				memcpy(vbuf + right - twopixsize,
-					tpg->contrast_line[p], twopixsize);
-		}
-	}
-	if (tpg->qual != TPG_QUAL_NOISE && frame_line >= b->top &&
-	    frame_line < b->top + b->height) {
-		memcpy(vbuf, tpg->black_line[p], params->left_pillar_width);
-		memcpy(vbuf + params->right_pillar_start, tpg->black_line[p],
-		       img_width - params->right_pillar_start);
-	}
-	if (tpg->show_square && frame_line >= sq->top &&
-	    frame_line < sq->top + sq->height &&
-	    sq->left < c->left + c->width &&
-	    sq->left + sq->width >= c->left) {
-		unsigned left = sq->left;
-		unsigned width = sq->width;
-
-		if (c->left > left) {
-			width -= c->left - left;
-			left = c->left;
-		}
-		if (c->left + c->width < left + width)
-			width -= left + width - c->left - c->width;
-		left -= c->left;
-		left = tpg_hscale_div(tpg, p, left);
-		width = tpg_hscale_div(tpg, p, width);
-		memcpy(vbuf + left, tpg->contrast_line[p], width);
-	}
-	if (tpg->insert_sav) {
-		unsigned offset = tpg_hdiv(tpg, p, tpg->compose.width / 3);
-		u8 *p = vbuf + offset;
-		unsigned vact = 0, hact = 0;
-
-		p[0] = 0xff;
-		p[1] = 0;
-		p[2] = 0;
-		p[3] = 0x80 | (params->sav_eav_f << 6) |
-			(vact << 5) | (hact << 4) |
-			((hact ^ vact) << 3) |
-			((hact ^ params->sav_eav_f) << 2) |
-			((params->sav_eav_f ^ vact) << 1) |
-			(hact ^ vact ^ params->sav_eav_f);
-	}
-	if (tpg->insert_eav) {
-		unsigned offset = tpg_hdiv(tpg, p, tpg->compose.width * 2 / 3);
-		u8 *p = vbuf + offset;
-		unsigned vact = 0, hact = 1;
-
-		p[0] = 0xff;
-		p[1] = 0;
-		p[2] = 0;
-		p[3] = 0x80 | (params->sav_eav_f << 6) |
-			(vact << 5) | (hact << 4) |
-			((hact ^ vact) << 3) |
-			((hact ^ params->sav_eav_f) << 2) |
-			((params->sav_eav_f ^ vact) << 1) |
-			(hact ^ vact ^ params->sav_eav_f);
-	}
-}
-
-static void tpg_fill_plane_pattern(const struct tpg_data *tpg,
-				   const struct tpg_draw_params *params,
-				   unsigned p, unsigned h, u8 *vbuf)
-{
-	unsigned twopixsize = params->twopixsize;
-	unsigned img_width = params->img_width;
-	unsigned mv_hor_old = params->mv_hor_old;
-	unsigned mv_hor_new = params->mv_hor_new;
-	unsigned mv_vert_old = params->mv_vert_old;
-	unsigned mv_vert_new = params->mv_vert_new;
-	unsigned frame_line = params->frame_line;
-	unsigned frame_line_next = params->frame_line_next;
-	unsigned line_offset = tpg_hscale_div(tpg, p, tpg->crop.left);
-	bool even;
-	bool fill_blank = false;
-	unsigned pat_line_old;
-	unsigned pat_line_new;
-	u8 *linestart_older;
-	u8 *linestart_newer;
-	u8 *linestart_top;
-	u8 *linestart_bottom;
-
-	even = !(frame_line & 1);
-
-	if (h >= params->hmax) {
-		if (params->hmax == tpg->compose.height)
-			return;
-		if (!tpg->perc_fill_blank)
-			return;
-		fill_blank = true;
-	}
-
-	if (tpg->vflip) {
-		frame_line = tpg->src_height - frame_line - 1;
-		frame_line_next = tpg->src_height - frame_line_next - 1;
-	}
-
-	if (fill_blank) {
-		linestart_older = tpg->contrast_line[p];
-		linestart_newer = tpg->contrast_line[p];
-	} else if (tpg->qual != TPG_QUAL_NOISE &&
-		   (frame_line < tpg->border.top ||
-		    frame_line >= tpg->border.top + tpg->border.height)) {
-		linestart_older = tpg->black_line[p];
-		linestart_newer = tpg->black_line[p];
-	} else if (tpg->pattern == TPG_PAT_NOISE || tpg->qual == TPG_QUAL_NOISE) {
-		linestart_older = tpg->random_line[p] +
-				  twopixsize * prandom_u32_max(tpg->src_width / 2);
-		linestart_newer = tpg->random_line[p] +
-				  twopixsize * prandom_u32_max(tpg->src_width / 2);
-	} else {
-		unsigned frame_line_old =
-			(frame_line + mv_vert_old) % tpg->src_height;
-		unsigned frame_line_new =
-			(frame_line + mv_vert_new) % tpg->src_height;
-		unsigned pat_line_next_old;
-		unsigned pat_line_next_new;
-
-		pat_line_old = tpg_get_pat_line(tpg, frame_line_old);
-		pat_line_new = tpg_get_pat_line(tpg, frame_line_new);
-		linestart_older = tpg->lines[pat_line_old][p] + mv_hor_old;
-		linestart_newer = tpg->lines[pat_line_new][p] + mv_hor_new;
-
-		if (tpg->vdownsampling[p] > 1 && frame_line != frame_line_next) {
-			int avg_pat;
-
-			/*
-			 * Now decide whether we need to use downsampled_lines[].
-			 * That's necessary if the two lines use different patterns.
-			 */
-			pat_line_next_old = tpg_get_pat_line(tpg,
-					(frame_line_next + mv_vert_old) % tpg->src_height);
-			pat_line_next_new = tpg_get_pat_line(tpg,
-					(frame_line_next + mv_vert_new) % tpg->src_height);
-
-			switch (tpg->field) {
-			case V4L2_FIELD_INTERLACED:
-			case V4L2_FIELD_INTERLACED_BT:
-			case V4L2_FIELD_INTERLACED_TB:
-				avg_pat = tpg_pattern_avg(tpg, pat_line_old, pat_line_new);
-				if (avg_pat < 0)
-					break;
-				linestart_older = tpg->downsampled_lines[avg_pat][p] + mv_hor_old;
-				linestart_newer = linestart_older;
-				break;
-			case V4L2_FIELD_NONE:
-			case V4L2_FIELD_TOP:
-			case V4L2_FIELD_BOTTOM:
-			case V4L2_FIELD_SEQ_BT:
-			case V4L2_FIELD_SEQ_TB:
-				avg_pat = tpg_pattern_avg(tpg, pat_line_old, pat_line_next_old);
-				if (avg_pat >= 0)
-					linestart_older = tpg->downsampled_lines[avg_pat][p] +
-						mv_hor_old;
-				avg_pat = tpg_pattern_avg(tpg, pat_line_new, pat_line_next_new);
-				if (avg_pat >= 0)
-					linestart_newer = tpg->downsampled_lines[avg_pat][p] +
-						mv_hor_new;
-				break;
-			}
-		}
-		linestart_older += line_offset;
-		linestart_newer += line_offset;
-	}
-	if (tpg->field_alternate) {
-		linestart_top = linestart_bottom = linestart_older;
-	} else if (params->is_60hz) {
-		linestart_top = linestart_newer;
-		linestart_bottom = linestart_older;
-	} else {
-		linestart_top = linestart_older;
-		linestart_bottom = linestart_newer;
-	}
-
-	switch (tpg->field) {
-	case V4L2_FIELD_INTERLACED:
-	case V4L2_FIELD_INTERLACED_TB:
-	case V4L2_FIELD_SEQ_TB:
-	case V4L2_FIELD_SEQ_BT:
-		if (even)
-			memcpy(vbuf, linestart_top, img_width);
-		else
-			memcpy(vbuf, linestart_bottom, img_width);
-		break;
-	case V4L2_FIELD_INTERLACED_BT:
-		if (even)
-			memcpy(vbuf, linestart_bottom, img_width);
-		else
-			memcpy(vbuf, linestart_top, img_width);
-		break;
-	case V4L2_FIELD_TOP:
-		memcpy(vbuf, linestart_top, img_width);
-		break;
-	case V4L2_FIELD_BOTTOM:
-		memcpy(vbuf, linestart_bottom, img_width);
-		break;
-	case V4L2_FIELD_NONE:
-	default:
-		memcpy(vbuf, linestart_older, img_width);
-		break;
-	}
-}
-
-void tpg_fill_plane_buffer(struct tpg_data *tpg, v4l2_std_id std,
-			   unsigned p, u8 *vbuf)
-{
-	struct tpg_draw_params params;
-	unsigned factor = V4L2_FIELD_HAS_T_OR_B(tpg->field) ? 2 : 1;
-
-	/* Coarse scaling with Bresenham */
-	unsigned int_part = (tpg->crop.height / factor) / tpg->compose.height;
-	unsigned fract_part = (tpg->crop.height / factor) % tpg->compose.height;
-	unsigned src_y = 0;
-	unsigned error = 0;
-	unsigned h;
-
-	tpg_recalc(tpg);
-
-	params.is_tv = std;
-	params.is_60hz = std & V4L2_STD_525_60;
-	params.twopixsize = tpg->twopixelsize[p];
-	params.img_width = tpg_hdiv(tpg, p, tpg->compose.width);
-	params.stride = tpg->bytesperline[p];
-	params.hmax = (tpg->compose.height * tpg->perc_fill) / 100;
-
-	tpg_fill_params_pattern(tpg, p, &params);
-	tpg_fill_params_extras(tpg, p, &params);
-
-	vbuf += tpg_hdiv(tpg, p, tpg->compose.left);
-
-	for (h = 0; h < tpg->compose.height; h++) {
-		unsigned buf_line;
-
-		params.frame_line = tpg_calc_frameline(tpg, src_y, tpg->field);
-		params.frame_line_next = params.frame_line;
-		buf_line = tpg_calc_buffer_line(tpg, h, tpg->field);
-		src_y += int_part;
-		error += fract_part;
-		if (error >= tpg->compose.height) {
-			error -= tpg->compose.height;
-			src_y++;
-		}
-
-		/*
-		 * For line-interleaved formats determine the 'plane'
-		 * based on the buffer line.
-		 */
-		if (tpg_g_interleaved(tpg))
-			p = tpg_g_interleaved_plane(tpg, buf_line);
-
-		if (tpg->vdownsampling[p] > 1) {
-			/*
-			 * When doing vertical downsampling the field setting
-			 * matters: for SEQ_BT/TB we downsample each field
-			 * separately (i.e. lines 0+2 are combined, as are
-			 * lines 1+3), for the other field settings we combine
-			 * odd and even lines. Doing that for SEQ_BT/TB would
-			 * be really weird.
-			 */
-			if (tpg->field == V4L2_FIELD_SEQ_BT ||
-			    tpg->field == V4L2_FIELD_SEQ_TB) {
-				unsigned next_src_y = src_y;
-
-				if ((h & 3) >= 2)
-					continue;
-				next_src_y += int_part;
-				if (error + fract_part >= tpg->compose.height)
-					next_src_y++;
-				params.frame_line_next =
-					tpg_calc_frameline(tpg, next_src_y, tpg->field);
-			} else {
-				if (h & 1)
-					continue;
-				params.frame_line_next =
-					tpg_calc_frameline(tpg, src_y, tpg->field);
-			}
-
-			buf_line /= tpg->vdownsampling[p];
-		}
-		tpg_fill_plane_pattern(tpg, &params, p, h,
-				vbuf + buf_line * params.stride);
-		tpg_fill_plane_extras(tpg, &params, p, h,
-				vbuf + buf_line * params.stride);
-	}
-}
-
-void tpg_fillbuffer(struct tpg_data *tpg, v4l2_std_id std, unsigned p, u8 *vbuf)
-{
-	unsigned offset = 0;
-	unsigned i;
-
-	if (tpg->buffers > 1) {
-		tpg_fill_plane_buffer(tpg, std, p, vbuf);
-		return;
-	}
-
-	for (i = 0; i < tpg_g_planes(tpg); i++) {
-		tpg_fill_plane_buffer(tpg, std, i, vbuf + offset);
-		offset += tpg_calc_plane_size(tpg, i);
-	}
-}
diff --git a/drivers/media/platform/vivid/vivid-tpg.h b/drivers/media/platform/vivid/vivid-tpg.h
deleted file mode 100644
index 93fbaee..0000000
--- a/drivers/media/platform/vivid/vivid-tpg.h
+++ /dev/null
@@ -1,598 +0,0 @@
-/*
- * vivid-tpg.h - Test Pattern Generator
- *
- * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _VIVID_TPG_H_
-#define _VIVID_TPG_H_
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/random.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/videodev2.h>
-
-#include "vivid-tpg-colors.h"
-
-enum tpg_pattern {
-	TPG_PAT_75_COLORBAR,
-	TPG_PAT_100_COLORBAR,
-	TPG_PAT_CSC_COLORBAR,
-	TPG_PAT_100_HCOLORBAR,
-	TPG_PAT_100_COLORSQUARES,
-	TPG_PAT_BLACK,
-	TPG_PAT_WHITE,
-	TPG_PAT_RED,
-	TPG_PAT_GREEN,
-	TPG_PAT_BLUE,
-	TPG_PAT_CHECKERS_16X16,
-	TPG_PAT_CHECKERS_2X2,
-	TPG_PAT_CHECKERS_1X1,
-	TPG_PAT_COLOR_CHECKERS_2X2,
-	TPG_PAT_COLOR_CHECKERS_1X1,
-	TPG_PAT_ALTERNATING_HLINES,
-	TPG_PAT_ALTERNATING_VLINES,
-	TPG_PAT_CROSS_1_PIXEL,
-	TPG_PAT_CROSS_2_PIXELS,
-	TPG_PAT_CROSS_10_PIXELS,
-	TPG_PAT_GRAY_RAMP,
-
-	/* Must be the last pattern */
-	TPG_PAT_NOISE,
-};
-
-extern const char * const tpg_pattern_strings[];
-
-enum tpg_quality {
-	TPG_QUAL_COLOR,
-	TPG_QUAL_GRAY,
-	TPG_QUAL_NOISE
-};
-
-enum tpg_video_aspect {
-	TPG_VIDEO_ASPECT_IMAGE,
-	TPG_VIDEO_ASPECT_4X3,
-	TPG_VIDEO_ASPECT_14X9_CENTRE,
-	TPG_VIDEO_ASPECT_16X9_CENTRE,
-	TPG_VIDEO_ASPECT_16X9_ANAMORPHIC,
-};
-
-enum tpg_pixel_aspect {
-	TPG_PIXEL_ASPECT_SQUARE,
-	TPG_PIXEL_ASPECT_NTSC,
-	TPG_PIXEL_ASPECT_PAL,
-};
-
-enum tpg_move_mode {
-	TPG_MOVE_NEG_FAST,
-	TPG_MOVE_NEG,
-	TPG_MOVE_NEG_SLOW,
-	TPG_MOVE_NONE,
-	TPG_MOVE_POS_SLOW,
-	TPG_MOVE_POS,
-	TPG_MOVE_POS_FAST,
-};
-
-extern const char * const tpg_aspect_strings[];
-
-#define TPG_MAX_PLANES 3
-#define TPG_MAX_PAT_LINES 8
-
-struct tpg_data {
-	/* Source frame size */
-	unsigned			src_width, src_height;
-	/* Buffer height */
-	unsigned			buf_height;
-	/* Scaled output frame size */
-	unsigned			scaled_width;
-	u32				field;
-	bool				field_alternate;
-	/* crop coordinates are frame-based */
-	struct v4l2_rect		crop;
-	/* compose coordinates are format-based */
-	struct v4l2_rect		compose;
-	/* border and square coordinates are frame-based */
-	struct v4l2_rect		border;
-	struct v4l2_rect		square;
-
-	/* Color-related fields */
-	enum tpg_quality		qual;
-	unsigned			qual_offset;
-	u8				alpha_component;
-	bool				alpha_red_only;
-	u8				brightness;
-	u8				contrast;
-	u8				saturation;
-	s16				hue;
-	u32				fourcc;
-	bool				is_yuv;
-	u32				colorspace;
-	u32				xfer_func;
-	u32				ycbcr_enc;
-	/*
-	 * Stores the actual transfer function, i.e. will never be
-	 * V4L2_XFER_FUNC_DEFAULT.
-	 */
-	u32				real_xfer_func;
-	/*
-	 * Stores the actual Y'CbCr encoding, i.e. will never be
-	 * V4L2_YCBCR_ENC_DEFAULT.
-	 */
-	u32				real_ycbcr_enc;
-	u32				quantization;
-	/*
-	 * Stores the actual quantization, i.e. will never be
-	 * V4L2_QUANTIZATION_DEFAULT.
-	 */
-	u32				real_quantization;
-	enum tpg_video_aspect		vid_aspect;
-	enum tpg_pixel_aspect		pix_aspect;
-	unsigned			rgb_range;
-	unsigned			real_rgb_range;
-	unsigned			buffers;
-	unsigned			planes;
-	bool				interleaved;
-	u8				vdownsampling[TPG_MAX_PLANES];
-	u8				hdownsampling[TPG_MAX_PLANES];
-	/*
-	 * horizontal positions must be ANDed with this value to enforce
-	 * correct boundaries for packed YUYV values.
-	 */
-	unsigned			hmask[TPG_MAX_PLANES];
-	/* Used to store the colors in native format, either RGB or YUV */
-	u8				colors[TPG_COLOR_MAX][3];
-	u8				textfg[TPG_MAX_PLANES][8], textbg[TPG_MAX_PLANES][8];
-	/* size in bytes for two pixels in each plane */
-	unsigned			twopixelsize[TPG_MAX_PLANES];
-	unsigned			bytesperline[TPG_MAX_PLANES];
-
-	/* Configuration */
-	enum tpg_pattern		pattern;
-	bool				hflip;
-	bool				vflip;
-	unsigned			perc_fill;
-	bool				perc_fill_blank;
-	bool				show_border;
-	bool				show_square;
-	bool				insert_sav;
-	bool				insert_eav;
-
-	/* Test pattern movement */
-	enum tpg_move_mode		mv_hor_mode;
-	int				mv_hor_count;
-	int				mv_hor_step;
-	enum tpg_move_mode		mv_vert_mode;
-	int				mv_vert_count;
-	int				mv_vert_step;
-
-	bool				recalc_colors;
-	bool				recalc_lines;
-	bool				recalc_square_border;
-
-	/* Used to store TPG_MAX_PAT_LINES lines, each with up to two planes */
-	unsigned			max_line_width;
-	u8				*lines[TPG_MAX_PAT_LINES][TPG_MAX_PLANES];
-	u8				*downsampled_lines[TPG_MAX_PAT_LINES][TPG_MAX_PLANES];
-	u8				*random_line[TPG_MAX_PLANES];
-	u8				*contrast_line[TPG_MAX_PLANES];
-	u8				*black_line[TPG_MAX_PLANES];
-};
-
-void tpg_init(struct tpg_data *tpg, unsigned w, unsigned h);
-int tpg_alloc(struct tpg_data *tpg, unsigned max_w);
-void tpg_free(struct tpg_data *tpg);
-void tpg_reset_source(struct tpg_data *tpg, unsigned width, unsigned height,
-		       u32 field);
-void tpg_log_status(struct tpg_data *tpg);
-
-void tpg_set_font(const u8 *f);
-void tpg_gen_text(const struct tpg_data *tpg,
-		u8 *basep[TPG_MAX_PLANES][2], int y, int x, char *text);
-void tpg_calc_text_basep(struct tpg_data *tpg,
-		u8 *basep[TPG_MAX_PLANES][2], unsigned p, u8 *vbuf);
-unsigned tpg_g_interleaved_plane(const struct tpg_data *tpg, unsigned buf_line);
-void tpg_fill_plane_buffer(struct tpg_data *tpg, v4l2_std_id std,
-			   unsigned p, u8 *vbuf);
-void tpg_fillbuffer(struct tpg_data *tpg, v4l2_std_id std,
-		    unsigned p, u8 *vbuf);
-bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc);
-void tpg_s_crop_compose(struct tpg_data *tpg, const struct v4l2_rect *crop,
-		const struct v4l2_rect *compose);
-
-static inline void tpg_s_pattern(struct tpg_data *tpg, enum tpg_pattern pattern)
-{
-	if (tpg->pattern == pattern)
-		return;
-	tpg->pattern = pattern;
-	tpg->recalc_colors = true;
-}
-
-static inline void tpg_s_quality(struct tpg_data *tpg,
-				    enum tpg_quality qual, unsigned qual_offset)
-{
-	if (tpg->qual == qual && tpg->qual_offset == qual_offset)
-		return;
-	tpg->qual = qual;
-	tpg->qual_offset = qual_offset;
-	tpg->recalc_colors = true;
-}
-
-static inline enum tpg_quality tpg_g_quality(const struct tpg_data *tpg)
-{
-	return tpg->qual;
-}
-
-static inline void tpg_s_alpha_component(struct tpg_data *tpg,
-					    u8 alpha_component)
-{
-	if (tpg->alpha_component == alpha_component)
-		return;
-	tpg->alpha_component = alpha_component;
-	tpg->recalc_colors = true;
-}
-
-static inline void tpg_s_alpha_mode(struct tpg_data *tpg,
-					    bool red_only)
-{
-	if (tpg->alpha_red_only == red_only)
-		return;
-	tpg->alpha_red_only = red_only;
-	tpg->recalc_colors = true;
-}
-
-static inline void tpg_s_brightness(struct tpg_data *tpg,
-					u8 brightness)
-{
-	if (tpg->brightness == brightness)
-		return;
-	tpg->brightness = brightness;
-	tpg->recalc_colors = true;
-}
-
-static inline void tpg_s_contrast(struct tpg_data *tpg,
-					u8 contrast)
-{
-	if (tpg->contrast == contrast)
-		return;
-	tpg->contrast = contrast;
-	tpg->recalc_colors = true;
-}
-
-static inline void tpg_s_saturation(struct tpg_data *tpg,
-					u8 saturation)
-{
-	if (tpg->saturation == saturation)
-		return;
-	tpg->saturation = saturation;
-	tpg->recalc_colors = true;
-}
-
-static inline void tpg_s_hue(struct tpg_data *tpg,
-					s16 hue)
-{
-	if (tpg->hue == hue)
-		return;
-	tpg->hue = hue;
-	tpg->recalc_colors = true;
-}
-
-static inline void tpg_s_rgb_range(struct tpg_data *tpg,
-					unsigned rgb_range)
-{
-	if (tpg->rgb_range == rgb_range)
-		return;
-	tpg->rgb_range = rgb_range;
-	tpg->recalc_colors = true;
-}
-
-static inline void tpg_s_real_rgb_range(struct tpg_data *tpg,
-					unsigned rgb_range)
-{
-	if (tpg->real_rgb_range == rgb_range)
-		return;
-	tpg->real_rgb_range = rgb_range;
-	tpg->recalc_colors = true;
-}
-
-static inline void tpg_s_colorspace(struct tpg_data *tpg, u32 colorspace)
-{
-	if (tpg->colorspace == colorspace)
-		return;
-	tpg->colorspace = colorspace;
-	tpg->recalc_colors = true;
-}
-
-static inline u32 tpg_g_colorspace(const struct tpg_data *tpg)
-{
-	return tpg->colorspace;
-}
-
-static inline void tpg_s_ycbcr_enc(struct tpg_data *tpg, u32 ycbcr_enc)
-{
-	if (tpg->ycbcr_enc == ycbcr_enc)
-		return;
-	tpg->ycbcr_enc = ycbcr_enc;
-	tpg->recalc_colors = true;
-}
-
-static inline u32 tpg_g_ycbcr_enc(const struct tpg_data *tpg)
-{
-	return tpg->ycbcr_enc;
-}
-
-static inline void tpg_s_xfer_func(struct tpg_data *tpg, u32 xfer_func)
-{
-	if (tpg->xfer_func == xfer_func)
-		return;
-	tpg->xfer_func = xfer_func;
-	tpg->recalc_colors = true;
-}
-
-static inline u32 tpg_g_xfer_func(const struct tpg_data *tpg)
-{
-	return tpg->xfer_func;
-}
-
-static inline void tpg_s_quantization(struct tpg_data *tpg, u32 quantization)
-{
-	if (tpg->quantization == quantization)
-		return;
-	tpg->quantization = quantization;
-	tpg->recalc_colors = true;
-}
-
-static inline u32 tpg_g_quantization(const struct tpg_data *tpg)
-{
-	return tpg->quantization;
-}
-
-static inline unsigned tpg_g_buffers(const struct tpg_data *tpg)
-{
-	return tpg->buffers;
-}
-
-static inline unsigned tpg_g_planes(const struct tpg_data *tpg)
-{
-	return tpg->interleaved ? 1 : tpg->planes;
-}
-
-static inline bool tpg_g_interleaved(const struct tpg_data *tpg)
-{
-	return tpg->interleaved;
-}
-
-static inline unsigned tpg_g_twopixelsize(const struct tpg_data *tpg, unsigned plane)
-{
-	return tpg->twopixelsize[plane];
-}
-
-static inline unsigned tpg_hdiv(const struct tpg_data *tpg,
-				  unsigned plane, unsigned x)
-{
-	return ((x / tpg->hdownsampling[plane]) & tpg->hmask[plane]) *
-		tpg->twopixelsize[plane] / 2;
-}
-
-static inline unsigned tpg_hscale(const struct tpg_data *tpg, unsigned x)
-{
-	return (x * tpg->scaled_width) / tpg->src_width;
-}
-
-static inline unsigned tpg_hscale_div(const struct tpg_data *tpg,
-				      unsigned plane, unsigned x)
-{
-	return tpg_hdiv(tpg, plane, tpg_hscale(tpg, x));
-}
-
-static inline unsigned tpg_g_bytesperline(const struct tpg_data *tpg, unsigned plane)
-{
-	return tpg->bytesperline[plane];
-}
-
-static inline void tpg_s_bytesperline(struct tpg_data *tpg, unsigned plane, unsigned bpl)
-{
-	unsigned p;
-
-	if (tpg->buffers > 1) {
-		tpg->bytesperline[plane] = bpl;
-		return;
-	}
-
-	for (p = 0; p < tpg_g_planes(tpg); p++) {
-		unsigned plane_w = bpl * tpg->twopixelsize[p] / tpg->twopixelsize[0];
-
-		tpg->bytesperline[p] = plane_w / tpg->hdownsampling[p];
-	}
-	if (tpg_g_interleaved(tpg))
-		tpg->bytesperline[1] = tpg->bytesperline[0];
-}
-
-
-static inline unsigned tpg_g_line_width(const struct tpg_data *tpg, unsigned plane)
-{
-	unsigned w = 0;
-	unsigned p;
-
-	if (tpg->buffers > 1)
-		return tpg_g_bytesperline(tpg, plane);
-	for (p = 0; p < tpg_g_planes(tpg); p++) {
-		unsigned plane_w = tpg_g_bytesperline(tpg, p);
-
-		w += plane_w / tpg->vdownsampling[p];
-	}
-	return w;
-}
-
-static inline unsigned tpg_calc_line_width(const struct tpg_data *tpg,
-					   unsigned plane, unsigned bpl)
-{
-	unsigned w = 0;
-	unsigned p;
-
-	if (tpg->buffers > 1)
-		return bpl;
-	for (p = 0; p < tpg_g_planes(tpg); p++) {
-		unsigned plane_w = bpl * tpg->twopixelsize[p] / tpg->twopixelsize[0];
-
-		plane_w /= tpg->hdownsampling[p];
-		w += plane_w / tpg->vdownsampling[p];
-	}
-	return w;
-}
-
-static inline unsigned tpg_calc_plane_size(const struct tpg_data *tpg, unsigned plane)
-{
-	if (plane >= tpg_g_planes(tpg))
-		return 0;
-
-	return tpg_g_bytesperline(tpg, plane) * tpg->buf_height /
-	       tpg->vdownsampling[plane];
-}
-
-static inline void tpg_s_buf_height(struct tpg_data *tpg, unsigned h)
-{
-	tpg->buf_height = h;
-}
-
-static inline void tpg_s_field(struct tpg_data *tpg, unsigned field, bool alternate)
-{
-	tpg->field = field;
-	tpg->field_alternate = alternate;
-}
-
-static inline void tpg_s_perc_fill(struct tpg_data *tpg,
-				      unsigned perc_fill)
-{
-	tpg->perc_fill = perc_fill;
-}
-
-static inline unsigned tpg_g_perc_fill(const struct tpg_data *tpg)
-{
-	return tpg->perc_fill;
-}
-
-static inline void tpg_s_perc_fill_blank(struct tpg_data *tpg,
-					 bool perc_fill_blank)
-{
-	tpg->perc_fill_blank = perc_fill_blank;
-}
-
-static inline void tpg_s_video_aspect(struct tpg_data *tpg,
-					enum tpg_video_aspect vid_aspect)
-{
-	if (tpg->vid_aspect == vid_aspect)
-		return;
-	tpg->vid_aspect = vid_aspect;
-	tpg->recalc_square_border = true;
-}
-
-static inline enum tpg_video_aspect tpg_g_video_aspect(const struct tpg_data *tpg)
-{
-	return tpg->vid_aspect;
-}
-
-static inline void tpg_s_pixel_aspect(struct tpg_data *tpg,
-					enum tpg_pixel_aspect pix_aspect)
-{
-	if (tpg->pix_aspect == pix_aspect)
-		return;
-	tpg->pix_aspect = pix_aspect;
-	tpg->recalc_square_border = true;
-}
-
-static inline void tpg_s_show_border(struct tpg_data *tpg,
-					bool show_border)
-{
-	tpg->show_border = show_border;
-}
-
-static inline void tpg_s_show_square(struct tpg_data *tpg,
-					bool show_square)
-{
-	tpg->show_square = show_square;
-}
-
-static inline void tpg_s_insert_sav(struct tpg_data *tpg, bool insert_sav)
-{
-	tpg->insert_sav = insert_sav;
-}
-
-static inline void tpg_s_insert_eav(struct tpg_data *tpg, bool insert_eav)
-{
-	tpg->insert_eav = insert_eav;
-}
-
-void tpg_update_mv_step(struct tpg_data *tpg);
-
-static inline void tpg_s_mv_hor_mode(struct tpg_data *tpg,
-				enum tpg_move_mode mv_hor_mode)
-{
-	tpg->mv_hor_mode = mv_hor_mode;
-	tpg_update_mv_step(tpg);
-}
-
-static inline void tpg_s_mv_vert_mode(struct tpg_data *tpg,
-				enum tpg_move_mode mv_vert_mode)
-{
-	tpg->mv_vert_mode = mv_vert_mode;
-	tpg_update_mv_step(tpg);
-}
-
-static inline void tpg_init_mv_count(struct tpg_data *tpg)
-{
-	tpg->mv_hor_count = tpg->mv_vert_count = 0;
-}
-
-static inline void tpg_update_mv_count(struct tpg_data *tpg, bool frame_is_field)
-{
-	tpg->mv_hor_count += tpg->mv_hor_step * (frame_is_field ? 1 : 2);
-	tpg->mv_vert_count += tpg->mv_vert_step * (frame_is_field ? 1 : 2);
-}
-
-static inline void tpg_s_hflip(struct tpg_data *tpg, bool hflip)
-{
-	if (tpg->hflip == hflip)
-		return;
-	tpg->hflip = hflip;
-	tpg_update_mv_step(tpg);
-	tpg->recalc_lines = true;
-}
-
-static inline bool tpg_g_hflip(const struct tpg_data *tpg)
-{
-	return tpg->hflip;
-}
-
-static inline void tpg_s_vflip(struct tpg_data *tpg, bool vflip)
-{
-	tpg->vflip = vflip;
-}
-
-static inline bool tpg_g_vflip(const struct tpg_data *tpg)
-{
-	return tpg->vflip;
-}
-
-static inline bool tpg_pattern_is_static(const struct tpg_data *tpg)
-{
-	return tpg->pattern != TPG_PAT_NOISE &&
-	       tpg->mv_hor_mode == TPG_MOVE_NONE &&
-	       tpg->mv_vert_mode == TPG_MOVE_NONE;
-}
-
-#endif
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index b84f081..4f730f3 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -26,6 +26,7 @@
 #include <media/v4l2-common.h>
 #include <media/v4l2-event.h>
 #include <media/v4l2-dv-timings.h>
+#include <media/v4l2-rect.h>
 
 #include "vivid-core.h"
 #include "vivid-vid-common.h"
@@ -590,16 +591,16 @@
 	} else {
 		struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor };
 
-		rect_set_min_size(&r, &vivid_min_rect);
-		rect_set_max_size(&r, &vivid_max_rect);
+		v4l2_rect_set_min_size(&r, &vivid_min_rect);
+		v4l2_rect_set_max_size(&r, &vivid_max_rect);
 		if (dev->has_scaler_cap && !dev->has_compose_cap) {
 			struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h };
 
-			rect_set_max_size(&r, &max_r);
+			v4l2_rect_set_max_size(&r, &max_r);
 		} else if (!dev->has_scaler_cap && dev->has_crop_cap && !dev->has_compose_cap) {
-			rect_set_max_size(&r, &dev->src_rect);
+			v4l2_rect_set_max_size(&r, &dev->src_rect);
 		} else if (!dev->has_scaler_cap && !dev->has_crop_cap) {
-			rect_set_min_size(&r, &dev->src_rect);
+			v4l2_rect_set_min_size(&r, &dev->src_rect);
 		}
 		mp->width = r.width;
 		mp->height = r.height / factor;
@@ -668,7 +669,7 @@
 
 		if (dev->has_scaler_cap) {
 			if (dev->has_compose_cap)
-				rect_map_inside(compose, &r);
+				v4l2_rect_map_inside(compose, &r);
 			else
 				*compose = r;
 			if (dev->has_crop_cap && !dev->has_compose_cap) {
@@ -683,9 +684,9 @@
 					factor * r.height * MAX_ZOOM
 				};
 
-				rect_set_min_size(crop, &min_r);
-				rect_set_max_size(crop, &max_r);
-				rect_map_inside(crop, &dev->crop_bounds_cap);
+				v4l2_rect_set_min_size(crop, &min_r);
+				v4l2_rect_set_max_size(crop, &max_r);
+				v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
 			} else if (dev->has_crop_cap) {
 				struct v4l2_rect min_r = {
 					0, 0,
@@ -698,27 +699,27 @@
 					factor * compose->height * MAX_ZOOM
 				};
 
-				rect_set_min_size(crop, &min_r);
-				rect_set_max_size(crop, &max_r);
-				rect_map_inside(crop, &dev->crop_bounds_cap);
+				v4l2_rect_set_min_size(crop, &min_r);
+				v4l2_rect_set_max_size(crop, &max_r);
+				v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
 			}
 		} else if (dev->has_crop_cap && !dev->has_compose_cap) {
 			r.height *= factor;
-			rect_set_size_to(crop, &r);
-			rect_map_inside(crop, &dev->crop_bounds_cap);
+			v4l2_rect_set_size_to(crop, &r);
+			v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
 			r = *crop;
 			r.height /= factor;
-			rect_set_size_to(compose, &r);
+			v4l2_rect_set_size_to(compose, &r);
 		} else if (!dev->has_crop_cap) {
-			rect_map_inside(compose, &r);
+			v4l2_rect_map_inside(compose, &r);
 		} else {
 			r.height *= factor;
-			rect_set_max_size(crop, &r);
-			rect_map_inside(crop, &dev->crop_bounds_cap);
+			v4l2_rect_set_max_size(crop, &r);
+			v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
 			compose->top *= factor;
 			compose->height *= factor;
-			rect_set_size_to(compose, crop);
-			rect_map_inside(compose, &r);
+			v4l2_rect_set_size_to(compose, crop);
+			v4l2_rect_map_inside(compose, &r);
 			compose->top /= factor;
 			compose->height /= factor;
 		}
@@ -735,9 +736,9 @@
 	} else {
 		struct v4l2_rect r = { 0, 0, mp->width, mp->height };
 
-		rect_set_size_to(compose, &r);
+		v4l2_rect_set_size_to(compose, &r);
 		r.height *= factor;
-		rect_set_size_to(crop, &r);
+		v4l2_rect_set_size_to(crop, &r);
 	}
 
 	dev->fmt_cap_rect.width = mp->width;
@@ -886,9 +887,9 @@
 		ret = vivid_vid_adjust_sel(s->flags, &s->r);
 		if (ret)
 			return ret;
-		rect_set_min_size(&s->r, &vivid_min_rect);
-		rect_set_max_size(&s->r, &dev->src_rect);
-		rect_map_inside(&s->r, &dev->crop_bounds_cap);
+		v4l2_rect_set_min_size(&s->r, &vivid_min_rect);
+		v4l2_rect_set_max_size(&s->r, &dev->src_rect);
+		v4l2_rect_map_inside(&s->r, &dev->crop_bounds_cap);
 		s->r.top /= factor;
 		s->r.height /= factor;
 		if (dev->has_scaler_cap) {
@@ -904,36 +905,36 @@
 				s->r.height / MAX_ZOOM
 			};
 
-			rect_set_min_size(&fmt, &min_rect);
+			v4l2_rect_set_min_size(&fmt, &min_rect);
 			if (!dev->has_compose_cap)
-				rect_set_max_size(&fmt, &max_rect);
-			if (!rect_same_size(&dev->fmt_cap_rect, &fmt) &&
+				v4l2_rect_set_max_size(&fmt, &max_rect);
+			if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) &&
 			    vb2_is_busy(&dev->vb_vid_cap_q))
 				return -EBUSY;
 			if (dev->has_compose_cap) {
-				rect_set_min_size(compose, &min_rect);
-				rect_set_max_size(compose, &max_rect);
+				v4l2_rect_set_min_size(compose, &min_rect);
+				v4l2_rect_set_max_size(compose, &max_rect);
 			}
 			dev->fmt_cap_rect = fmt;
 			tpg_s_buf_height(&dev->tpg, fmt.height);
 		} else if (dev->has_compose_cap) {
 			struct v4l2_rect fmt = dev->fmt_cap_rect;
 
-			rect_set_min_size(&fmt, &s->r);
-			if (!rect_same_size(&dev->fmt_cap_rect, &fmt) &&
+			v4l2_rect_set_min_size(&fmt, &s->r);
+			if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) &&
 			    vb2_is_busy(&dev->vb_vid_cap_q))
 				return -EBUSY;
 			dev->fmt_cap_rect = fmt;
 			tpg_s_buf_height(&dev->tpg, fmt.height);
-			rect_set_size_to(compose, &s->r);
-			rect_map_inside(compose, &dev->fmt_cap_rect);
+			v4l2_rect_set_size_to(compose, &s->r);
+			v4l2_rect_map_inside(compose, &dev->fmt_cap_rect);
 		} else {
-			if (!rect_same_size(&s->r, &dev->fmt_cap_rect) &&
+			if (!v4l2_rect_same_size(&s->r, &dev->fmt_cap_rect) &&
 			    vb2_is_busy(&dev->vb_vid_cap_q))
 				return -EBUSY;
-			rect_set_size_to(&dev->fmt_cap_rect, &s->r);
-			rect_set_size_to(compose, &s->r);
-			rect_map_inside(compose, &dev->fmt_cap_rect);
+			v4l2_rect_set_size_to(&dev->fmt_cap_rect, &s->r);
+			v4l2_rect_set_size_to(compose, &s->r);
+			v4l2_rect_map_inside(compose, &dev->fmt_cap_rect);
 			tpg_s_buf_height(&dev->tpg, dev->fmt_cap_rect.height);
 		}
 		s->r.top *= factor;
@@ -946,8 +947,8 @@
 		ret = vivid_vid_adjust_sel(s->flags, &s->r);
 		if (ret)
 			return ret;
-		rect_set_min_size(&s->r, &vivid_min_rect);
-		rect_set_max_size(&s->r, &dev->fmt_cap_rect);
+		v4l2_rect_set_min_size(&s->r, &vivid_min_rect);
+		v4l2_rect_set_max_size(&s->r, &dev->fmt_cap_rect);
 		if (dev->has_scaler_cap) {
 			struct v4l2_rect max_rect = {
 				0, 0,
@@ -955,7 +956,7 @@
 				(dev->src_rect.height / factor) * MAX_ZOOM
 			};
 
-			rect_set_max_size(&s->r, &max_rect);
+			v4l2_rect_set_max_size(&s->r, &max_rect);
 			if (dev->has_crop_cap) {
 				struct v4l2_rect min_rect = {
 					0, 0,
@@ -968,23 +969,23 @@
 					(s->r.height * factor) * MAX_ZOOM
 				};
 
-				rect_set_min_size(crop, &min_rect);
-				rect_set_max_size(crop, &max_rect);
-				rect_map_inside(crop, &dev->crop_bounds_cap);
+				v4l2_rect_set_min_size(crop, &min_rect);
+				v4l2_rect_set_max_size(crop, &max_rect);
+				v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
 			}
 		} else if (dev->has_crop_cap) {
 			s->r.top *= factor;
 			s->r.height *= factor;
-			rect_set_max_size(&s->r, &dev->src_rect);
-			rect_set_size_to(crop, &s->r);
-			rect_map_inside(crop, &dev->crop_bounds_cap);
+			v4l2_rect_set_max_size(&s->r, &dev->src_rect);
+			v4l2_rect_set_size_to(crop, &s->r);
+			v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
 			s->r.top /= factor;
 			s->r.height /= factor;
 		} else {
-			rect_set_size_to(&s->r, &dev->src_rect);
+			v4l2_rect_set_size_to(&s->r, &dev->src_rect);
 			s->r.height /= factor;
 		}
-		rect_map_inside(&s->r, &dev->fmt_cap_rect);
+		v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect);
 		if (dev->bitmap_cap && (compose->width != s->r.width ||
 					compose->height != s->r.height)) {
 			kfree(dev->bitmap_cap);
@@ -1124,7 +1125,7 @@
 			for (j = i + 1; j < win->clipcount; j++) {
 				struct v4l2_rect *r2 = &dev->try_clips_cap[j].c;
 
-				if (rect_overlap(r1, r2))
+				if (v4l2_rect_overlap(r1, r2))
 					return -EINVAL;
 			}
 		}
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index b0d4e3a..39ea228 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -653,103 +653,6 @@
 	return ret;
 }
 
-/* v4l2_rect helper function: copy the width/height values */
-void rect_set_size_to(struct v4l2_rect *r, const struct v4l2_rect *size)
-{
-	r->width = size->width;
-	r->height = size->height;
-}
-
-/* v4l2_rect helper function: width and height of r should be >= min_size */
-void rect_set_min_size(struct v4l2_rect *r, const struct v4l2_rect *min_size)
-{
-	if (r->width < min_size->width)
-		r->width = min_size->width;
-	if (r->height < min_size->height)
-		r->height = min_size->height;
-}
-
-/* v4l2_rect helper function: width and height of r should be <= max_size */
-void rect_set_max_size(struct v4l2_rect *r, const struct v4l2_rect *max_size)
-{
-	if (r->width > max_size->width)
-		r->width = max_size->width;
-	if (r->height > max_size->height)
-		r->height = max_size->height;
-}
-
-/* v4l2_rect helper function: r should be inside boundary */
-void rect_map_inside(struct v4l2_rect *r, const struct v4l2_rect *boundary)
-{
-	rect_set_max_size(r, boundary);
-	if (r->left < boundary->left)
-		r->left = boundary->left;
-	if (r->top < boundary->top)
-		r->top = boundary->top;
-	if (r->left + r->width > boundary->width)
-		r->left = boundary->width - r->width;
-	if (r->top + r->height > boundary->height)
-		r->top = boundary->height - r->height;
-}
-
-/* v4l2_rect helper function: return true if r1 has the same size as r2 */
-bool rect_same_size(const struct v4l2_rect *r1, const struct v4l2_rect *r2)
-{
-	return r1->width == r2->width && r1->height == r2->height;
-}
-
-/* v4l2_rect helper function: calculate the intersection of two rects */
-struct v4l2_rect rect_intersect(const struct v4l2_rect *a, const struct v4l2_rect *b)
-{
-	struct v4l2_rect r;
-	int right, bottom;
-
-	r.top = max(a->top, b->top);
-	r.left = max(a->left, b->left);
-	bottom = min(a->top + a->height, b->top + b->height);
-	right = min(a->left + a->width, b->left + b->width);
-	r.height = max(0, bottom - r.top);
-	r.width = max(0, right - r.left);
-	return r;
-}
-
-/*
- * v4l2_rect helper function: scale rect r by to->width / from->width and
- * to->height / from->height.
- */
-void rect_scale(struct v4l2_rect *r, const struct v4l2_rect *from,
-				     const struct v4l2_rect *to)
-{
-	if (from->width == 0 || from->height == 0) {
-		r->left = r->top = r->width = r->height = 0;
-		return;
-	}
-	r->left = (((r->left - from->left) * to->width) / from->width) & ~1;
-	r->width = ((r->width * to->width) / from->width) & ~1;
-	r->top = ((r->top - from->top) * to->height) / from->height;
-	r->height = (r->height * to->height) / from->height;
-}
-
-bool rect_overlap(const struct v4l2_rect *r1, const struct v4l2_rect *r2)
-{
-	/*
-	 * IF the left side of r1 is to the right of the right side of r2 OR
-	 *    the left side of r2 is to the right of the right side of r1 THEN
-	 * they do not overlap.
-	 */
-	if (r1->left >= r2->left + r2->width ||
-	    r2->left >= r1->left + r1->width)
-		return false;
-	/*
-	 * IF the top side of r1 is below the bottom of r2 OR
-	 *    the top side of r2 is below the bottom of r1 THEN
-	 * they do not overlap.
-	 */
-	if (r1->top >= r2->top + r2->height ||
-	    r2->top >= r1->top + r1->height)
-		return false;
-	return true;
-}
 int vivid_vid_adjust_sel(unsigned flags, struct v4l2_rect *r)
 {
 	unsigned w = r->width;
diff --git a/drivers/media/platform/vivid/vivid-vid-common.h b/drivers/media/platform/vivid/vivid-vid-common.h
index 3ec4fa8..4b6175e 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.h
+++ b/drivers/media/platform/vivid/vivid-vid-common.h
@@ -37,15 +37,6 @@
 bool vivid_vid_can_loop(struct vivid_dev *dev);
 void vivid_send_source_change(struct vivid_dev *dev, unsigned type);
 
-bool rect_overlap(const struct v4l2_rect *r1, const struct v4l2_rect *r2);
-void rect_set_size_to(struct v4l2_rect *r, const struct v4l2_rect *size);
-void rect_set_min_size(struct v4l2_rect *r, const struct v4l2_rect *min_size);
-void rect_set_max_size(struct v4l2_rect *r, const struct v4l2_rect *max_size);
-void rect_map_inside(struct v4l2_rect *r, const struct v4l2_rect *boundary);
-bool rect_same_size(const struct v4l2_rect *r1, const struct v4l2_rect *r2);
-struct v4l2_rect rect_intersect(const struct v4l2_rect *a, const struct v4l2_rect *b);
-void rect_scale(struct v4l2_rect *r, const struct v4l2_rect *from,
-				     const struct v4l2_rect *to);
 int vivid_vid_adjust_sel(unsigned flags, struct v4l2_rect *r);
 
 int vivid_enum_fmt_vid(struct file *file, void  *priv, struct v4l2_fmtdesc *f);
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index 64e4d66..f92f449 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -25,6 +25,7 @@
 #include <media/v4l2-common.h>
 #include <media/v4l2-event.h>
 #include <media/v4l2-dv-timings.h>
+#include <media/v4l2-rect.h>
 
 #include "vivid-core.h"
 #include "vivid-vid-common.h"
@@ -376,16 +377,16 @@
 	} else {
 		struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor };
 
-		rect_set_min_size(&r, &vivid_min_rect);
-		rect_set_max_size(&r, &vivid_max_rect);
+		v4l2_rect_set_min_size(&r, &vivid_min_rect);
+		v4l2_rect_set_max_size(&r, &vivid_max_rect);
 		if (dev->has_scaler_out && !dev->has_crop_out) {
 			struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h };
 
-			rect_set_max_size(&r, &max_r);
+			v4l2_rect_set_max_size(&r, &max_r);
 		} else if (!dev->has_scaler_out && dev->has_compose_out && !dev->has_crop_out) {
-			rect_set_max_size(&r, &dev->sink_rect);
+			v4l2_rect_set_max_size(&r, &dev->sink_rect);
 		} else if (!dev->has_scaler_out && !dev->has_compose_out) {
-			rect_set_min_size(&r, &dev->sink_rect);
+			v4l2_rect_set_min_size(&r, &dev->sink_rect);
 		}
 		mp->width = r.width;
 		mp->height = r.height / factor;
@@ -473,7 +474,7 @@
 
 		if (dev->has_scaler_out) {
 			if (dev->has_crop_out)
-				rect_map_inside(crop, &r);
+				v4l2_rect_map_inside(crop, &r);
 			else
 				*crop = r;
 			if (dev->has_compose_out && !dev->has_crop_out) {
@@ -488,9 +489,9 @@
 					factor * r.height * MAX_ZOOM
 				};
 
-				rect_set_min_size(compose, &min_r);
-				rect_set_max_size(compose, &max_r);
-				rect_map_inside(compose, &dev->compose_bounds_out);
+				v4l2_rect_set_min_size(compose, &min_r);
+				v4l2_rect_set_max_size(compose, &max_r);
+				v4l2_rect_map_inside(compose, &dev->compose_bounds_out);
 			} else if (dev->has_compose_out) {
 				struct v4l2_rect min_r = {
 					0, 0,
@@ -503,36 +504,36 @@
 					factor * crop->height * MAX_ZOOM
 				};
 
-				rect_set_min_size(compose, &min_r);
-				rect_set_max_size(compose, &max_r);
-				rect_map_inside(compose, &dev->compose_bounds_out);
+				v4l2_rect_set_min_size(compose, &min_r);
+				v4l2_rect_set_max_size(compose, &max_r);
+				v4l2_rect_map_inside(compose, &dev->compose_bounds_out);
 			}
 		} else if (dev->has_compose_out && !dev->has_crop_out) {
-			rect_set_size_to(crop, &r);
+			v4l2_rect_set_size_to(crop, &r);
 			r.height *= factor;
-			rect_set_size_to(compose, &r);
-			rect_map_inside(compose, &dev->compose_bounds_out);
+			v4l2_rect_set_size_to(compose, &r);
+			v4l2_rect_map_inside(compose, &dev->compose_bounds_out);
 		} else if (!dev->has_compose_out) {
-			rect_map_inside(crop, &r);
+			v4l2_rect_map_inside(crop, &r);
 			r.height /= factor;
-			rect_set_size_to(compose, &r);
+			v4l2_rect_set_size_to(compose, &r);
 		} else {
 			r.height *= factor;
-			rect_set_max_size(compose, &r);
-			rect_map_inside(compose, &dev->compose_bounds_out);
+			v4l2_rect_set_max_size(compose, &r);
+			v4l2_rect_map_inside(compose, &dev->compose_bounds_out);
 			crop->top *= factor;
 			crop->height *= factor;
-			rect_set_size_to(crop, compose);
-			rect_map_inside(crop, &r);
+			v4l2_rect_set_size_to(crop, compose);
+			v4l2_rect_map_inside(crop, &r);
 			crop->top /= factor;
 			crop->height /= factor;
 		}
 	} else {
 		struct v4l2_rect r = { 0, 0, mp->width, mp->height };
 
-		rect_set_size_to(crop, &r);
+		v4l2_rect_set_size_to(crop, &r);
 		r.height /= factor;
-		rect_set_size_to(compose, &r);
+		v4l2_rect_set_size_to(compose, &r);
 	}
 
 	dev->fmt_out_rect.width = mp->width;
@@ -683,8 +684,8 @@
 		ret = vivid_vid_adjust_sel(s->flags, &s->r);
 		if (ret)
 			return ret;
-		rect_set_min_size(&s->r, &vivid_min_rect);
-		rect_set_max_size(&s->r, &dev->fmt_out_rect);
+		v4l2_rect_set_min_size(&s->r, &vivid_min_rect);
+		v4l2_rect_set_max_size(&s->r, &dev->fmt_out_rect);
 		if (dev->has_scaler_out) {
 			struct v4l2_rect max_rect = {
 				0, 0,
@@ -692,7 +693,7 @@
 				(dev->sink_rect.height / factor) * MAX_ZOOM
 			};
 
-			rect_set_max_size(&s->r, &max_rect);
+			v4l2_rect_set_max_size(&s->r, &max_rect);
 			if (dev->has_compose_out) {
 				struct v4l2_rect min_rect = {
 					0, 0,
@@ -705,23 +706,23 @@
 					(s->r.height * factor) * MAX_ZOOM
 				};
 
-				rect_set_min_size(compose, &min_rect);
-				rect_set_max_size(compose, &max_rect);
-				rect_map_inside(compose, &dev->compose_bounds_out);
+				v4l2_rect_set_min_size(compose, &min_rect);
+				v4l2_rect_set_max_size(compose, &max_rect);
+				v4l2_rect_map_inside(compose, &dev->compose_bounds_out);
 			}
 		} else if (dev->has_compose_out) {
 			s->r.top *= factor;
 			s->r.height *= factor;
-			rect_set_max_size(&s->r, &dev->sink_rect);
-			rect_set_size_to(compose, &s->r);
-			rect_map_inside(compose, &dev->compose_bounds_out);
+			v4l2_rect_set_max_size(&s->r, &dev->sink_rect);
+			v4l2_rect_set_size_to(compose, &s->r);
+			v4l2_rect_map_inside(compose, &dev->compose_bounds_out);
 			s->r.top /= factor;
 			s->r.height /= factor;
 		} else {
-			rect_set_size_to(&s->r, &dev->sink_rect);
+			v4l2_rect_set_size_to(&s->r, &dev->sink_rect);
 			s->r.height /= factor;
 		}
-		rect_map_inside(&s->r, &dev->fmt_out_rect);
+		v4l2_rect_map_inside(&s->r, &dev->fmt_out_rect);
 		*crop = s->r;
 		break;
 	case V4L2_SEL_TGT_COMPOSE:
@@ -730,9 +731,9 @@
 		ret = vivid_vid_adjust_sel(s->flags, &s->r);
 		if (ret)
 			return ret;
-		rect_set_min_size(&s->r, &vivid_min_rect);
-		rect_set_max_size(&s->r, &dev->sink_rect);
-		rect_map_inside(&s->r, &dev->compose_bounds_out);
+		v4l2_rect_set_min_size(&s->r, &vivid_min_rect);
+		v4l2_rect_set_max_size(&s->r, &dev->sink_rect);
+		v4l2_rect_map_inside(&s->r, &dev->compose_bounds_out);
 		s->r.top /= factor;
 		s->r.height /= factor;
 		if (dev->has_scaler_out) {
@@ -748,35 +749,35 @@
 				s->r.height / MAX_ZOOM
 			};
 
-			rect_set_min_size(&fmt, &min_rect);
+			v4l2_rect_set_min_size(&fmt, &min_rect);
 			if (!dev->has_crop_out)
-				rect_set_max_size(&fmt, &max_rect);
-			if (!rect_same_size(&dev->fmt_out_rect, &fmt) &&
+				v4l2_rect_set_max_size(&fmt, &max_rect);
+			if (!v4l2_rect_same_size(&dev->fmt_out_rect, &fmt) &&
 			    vb2_is_busy(&dev->vb_vid_out_q))
 				return -EBUSY;
 			if (dev->has_crop_out) {
-				rect_set_min_size(crop, &min_rect);
-				rect_set_max_size(crop, &max_rect);
+				v4l2_rect_set_min_size(crop, &min_rect);
+				v4l2_rect_set_max_size(crop, &max_rect);
 			}
 			dev->fmt_out_rect = fmt;
 		} else if (dev->has_crop_out) {
 			struct v4l2_rect fmt = dev->fmt_out_rect;
 
-			rect_set_min_size(&fmt, &s->r);
-			if (!rect_same_size(&dev->fmt_out_rect, &fmt) &&
+			v4l2_rect_set_min_size(&fmt, &s->r);
+			if (!v4l2_rect_same_size(&dev->fmt_out_rect, &fmt) &&
 			    vb2_is_busy(&dev->vb_vid_out_q))
 				return -EBUSY;
 			dev->fmt_out_rect = fmt;
-			rect_set_size_to(crop, &s->r);
-			rect_map_inside(crop, &dev->fmt_out_rect);
+			v4l2_rect_set_size_to(crop, &s->r);
+			v4l2_rect_map_inside(crop, &dev->fmt_out_rect);
 		} else {
-			if (!rect_same_size(&s->r, &dev->fmt_out_rect) &&
+			if (!v4l2_rect_same_size(&s->r, &dev->fmt_out_rect) &&
 			    vb2_is_busy(&dev->vb_vid_out_q))
 				return -EBUSY;
-			rect_set_size_to(&dev->fmt_out_rect, &s->r);
-			rect_set_size_to(crop, &s->r);
+			v4l2_rect_set_size_to(&dev->fmt_out_rect, &s->r);
+			v4l2_rect_set_size_to(crop, &s->r);
 			crop->height /= factor;
-			rect_map_inside(crop, &dev->fmt_out_rect);
+			v4l2_rect_map_inside(crop, &dev->fmt_out_rect);
 		}
 		s->r.top *= factor;
 		s->r.height *= factor;
@@ -901,7 +902,7 @@
 			for (j = i + 1; j < win->clipcount; j++) {
 				struct v4l2_rect *r2 = &dev->try_clips_out[j].c;
 
-				if (rect_overlap(r1, r2))
+				if (v4l2_rect_overlap(r1, r2))
 					return -EINVAL;
 			}
 		}
diff --git a/drivers/media/platform/vsp1/vsp1.h b/drivers/media/platform/vsp1/vsp1.h
index 910d6b8..46738b6 100644
--- a/drivers/media/platform/vsp1/vsp1.h
+++ b/drivers/media/platform/vsp1/vsp1.h
@@ -26,7 +26,6 @@
 struct clk;
 struct device;
 
-struct vsp1_dl;
 struct vsp1_drm;
 struct vsp1_entity;
 struct vsp1_platform_data;
@@ -49,6 +48,7 @@
 
 struct vsp1_device_info {
 	u32 version;
+	unsigned int gen;
 	unsigned int features;
 	unsigned int rpf_count;
 	unsigned int uds_count;
@@ -85,8 +85,6 @@
 	struct media_entity_operations media_ops;
 
 	struct vsp1_drm *drm;
-
-	bool use_dl;
 };
 
 int vsp1_device_get(struct vsp1_device *vsp1);
@@ -104,14 +102,4 @@
 	iowrite32(data, vsp1->mmio + reg);
 }
 
-#include "vsp1_dl.h"
-
-static inline void vsp1_mod_write(struct vsp1_entity *e, u32 reg, u32 data)
-{
-	if (e->vsp1->use_dl)
-		vsp1_dl_add(e, reg, data);
-	else
-		vsp1_write(e->vsp1, reg, data);
-}
-
 #endif /* __VSP1_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_bru.c b/drivers/media/platform/vsp1/vsp1_bru.c
index cb0dbc1..b1068c0 100644
--- a/drivers/media/platform/vsp1/vsp1_bru.c
+++ b/drivers/media/platform/vsp1/vsp1_bru.c
@@ -18,6 +18,8 @@
 
 #include "vsp1.h"
 #include "vsp1_bru.h"
+#include "vsp1_dl.h"
+#include "vsp1_pipe.h"
 #include "vsp1_rwpf.h"
 #include "vsp1_video.h"
 
@@ -28,9 +30,10 @@
  * Device Access
  */
 
-static inline void vsp1_bru_write(struct vsp1_bru *bru, u32 reg, u32 data)
+static inline void vsp1_bru_write(struct vsp1_bru *bru, struct vsp1_dl_list *dl,
+				  u32 reg, u32 data)
 {
-	vsp1_mod_write(&bru->entity, reg, data);
+	vsp1_dl_list_write(dl, reg, data);
 }
 
 /* -----------------------------------------------------------------------------
@@ -42,13 +45,9 @@
 	struct vsp1_bru *bru =
 		container_of(ctrl->handler, struct vsp1_bru, ctrls);
 
-	if (!vsp1_entity_is_streaming(&bru->entity))
-		return 0;
-
 	switch (ctrl->id) {
 	case V4L2_CID_BG_COLOR:
-		vsp1_bru_write(bru, VI6_BRU_VIRRPF_COL, ctrl->val |
-			       (0xff << VI6_BRU_VIRRPF_COL_A_SHIFT));
+		bru->bgcolor = ctrl->val;
 		break;
 	}
 
@@ -60,26 +59,225 @@
 };
 
 /* -----------------------------------------------------------------------------
- * V4L2 Subdevice Core Operations
+ * V4L2 Subdevice Operations
  */
 
-static int bru_s_stream(struct v4l2_subdev *subdev, int enable)
+/*
+ * The BRU can't perform format conversion, all sink and source formats must be
+ * identical. We pick the format on the first sink pad (pad 0) and propagate it
+ * to all other pads.
+ */
+
+static int bru_enum_mbus_code(struct v4l2_subdev *subdev,
+			      struct v4l2_subdev_pad_config *cfg,
+			      struct v4l2_subdev_mbus_code_enum *code)
 {
-	struct vsp1_pipeline *pipe = to_vsp1_pipeline(&subdev->entity);
+	static const unsigned int codes[] = {
+		MEDIA_BUS_FMT_ARGB8888_1X32,
+		MEDIA_BUS_FMT_AYUV8_1X32,
+	};
+
+	return vsp1_subdev_enum_mbus_code(subdev, cfg, code, codes,
+					  ARRAY_SIZE(codes));
+}
+
+static int bru_enum_frame_size(struct v4l2_subdev *subdev,
+			       struct v4l2_subdev_pad_config *cfg,
+			       struct v4l2_subdev_frame_size_enum *fse)
+{
+	if (fse->index)
+		return -EINVAL;
+
+	if (fse->code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
+	    fse->code != MEDIA_BUS_FMT_AYUV8_1X32)
+		return -EINVAL;
+
+	fse->min_width = BRU_MIN_SIZE;
+	fse->max_width = BRU_MAX_SIZE;
+	fse->min_height = BRU_MIN_SIZE;
+	fse->max_height = BRU_MAX_SIZE;
+
+	return 0;
+}
+
+static struct v4l2_rect *bru_get_compose(struct vsp1_bru *bru,
+					 struct v4l2_subdev_pad_config *cfg,
+					 unsigned int pad)
+{
+	return v4l2_subdev_get_try_compose(&bru->entity.subdev, cfg, pad);
+}
+
+static void bru_try_format(struct vsp1_bru *bru,
+			   struct v4l2_subdev_pad_config *config,
+			   unsigned int pad, struct v4l2_mbus_framefmt *fmt)
+{
+	struct v4l2_mbus_framefmt *format;
+
+	switch (pad) {
+	case BRU_PAD_SINK(0):
+		/* Default to YUV if the requested format is not supported. */
+		if (fmt->code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
+		    fmt->code != MEDIA_BUS_FMT_AYUV8_1X32)
+			fmt->code = MEDIA_BUS_FMT_AYUV8_1X32;
+		break;
+
+	default:
+		/* The BRU can't perform format conversion. */
+		format = vsp1_entity_get_pad_format(&bru->entity, config,
+						    BRU_PAD_SINK(0));
+		fmt->code = format->code;
+		break;
+	}
+
+	fmt->width = clamp(fmt->width, BRU_MIN_SIZE, BRU_MAX_SIZE);
+	fmt->height = clamp(fmt->height, BRU_MIN_SIZE, BRU_MAX_SIZE);
+	fmt->field = V4L2_FIELD_NONE;
+	fmt->colorspace = V4L2_COLORSPACE_SRGB;
+}
+
+static int bru_set_format(struct v4l2_subdev *subdev,
+			  struct v4l2_subdev_pad_config *cfg,
+			  struct v4l2_subdev_format *fmt)
+{
 	struct vsp1_bru *bru = to_bru(subdev);
+	struct v4l2_subdev_pad_config *config;
+	struct v4l2_mbus_framefmt *format;
+
+	config = vsp1_entity_get_pad_config(&bru->entity, cfg, fmt->which);
+	if (!config)
+		return -EINVAL;
+
+	bru_try_format(bru, config, fmt->pad, &fmt->format);
+
+	format = vsp1_entity_get_pad_format(&bru->entity, config, fmt->pad);
+	*format = fmt->format;
+
+	/* Reset the compose rectangle */
+	if (fmt->pad != bru->entity.source_pad) {
+		struct v4l2_rect *compose;
+
+		compose = bru_get_compose(bru, config, fmt->pad);
+		compose->left = 0;
+		compose->top = 0;
+		compose->width = format->width;
+		compose->height = format->height;
+	}
+
+	/* Propagate the format code to all pads */
+	if (fmt->pad == BRU_PAD_SINK(0)) {
+		unsigned int i;
+
+		for (i = 0; i <= bru->entity.source_pad; ++i) {
+			format = vsp1_entity_get_pad_format(&bru->entity,
+							    config, i);
+			format->code = fmt->format.code;
+		}
+	}
+
+	return 0;
+}
+
+static int bru_get_selection(struct v4l2_subdev *subdev,
+			     struct v4l2_subdev_pad_config *cfg,
+			     struct v4l2_subdev_selection *sel)
+{
+	struct vsp1_bru *bru = to_bru(subdev);
+	struct v4l2_subdev_pad_config *config;
+
+	if (sel->pad == bru->entity.source_pad)
+		return -EINVAL;
+
+	switch (sel->target) {
+	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+		sel->r.left = 0;
+		sel->r.top = 0;
+		sel->r.width = BRU_MAX_SIZE;
+		sel->r.height = BRU_MAX_SIZE;
+		return 0;
+
+	case V4L2_SEL_TGT_COMPOSE:
+		config = vsp1_entity_get_pad_config(&bru->entity, cfg,
+						    sel->which);
+		if (!config)
+			return -EINVAL;
+
+		sel->r = *bru_get_compose(bru, config, sel->pad);
+		return 0;
+
+	default:
+		return -EINVAL;
+	}
+}
+
+static int bru_set_selection(struct v4l2_subdev *subdev,
+			     struct v4l2_subdev_pad_config *cfg,
+			     struct v4l2_subdev_selection *sel)
+{
+	struct vsp1_bru *bru = to_bru(subdev);
+	struct v4l2_subdev_pad_config *config;
+	struct v4l2_mbus_framefmt *format;
+	struct v4l2_rect *compose;
+
+	if (sel->pad == bru->entity.source_pad)
+		return -EINVAL;
+
+	if (sel->target != V4L2_SEL_TGT_COMPOSE)
+		return -EINVAL;
+
+	config = vsp1_entity_get_pad_config(&bru->entity, cfg, sel->which);
+	if (!config)
+		return -EINVAL;
+
+	/* The compose rectangle top left corner must be inside the output
+	 * frame.
+	 */
+	format = vsp1_entity_get_pad_format(&bru->entity, config,
+					    bru->entity.source_pad);
+	sel->r.left = clamp_t(unsigned int, sel->r.left, 0, format->width - 1);
+	sel->r.top = clamp_t(unsigned int, sel->r.top, 0, format->height - 1);
+
+	/* Scaling isn't supported, the compose rectangle size must be identical
+	 * to the sink format size.
+	 */
+	format = vsp1_entity_get_pad_format(&bru->entity, config, sel->pad);
+	sel->r.width = format->width;
+	sel->r.height = format->height;
+
+	compose = bru_get_compose(bru, config, sel->pad);
+	*compose = sel->r;
+
+	return 0;
+}
+
+static struct v4l2_subdev_pad_ops bru_pad_ops = {
+	.init_cfg = vsp1_entity_init_cfg,
+	.enum_mbus_code = bru_enum_mbus_code,
+	.enum_frame_size = bru_enum_frame_size,
+	.get_fmt = vsp1_subdev_get_pad_format,
+	.set_fmt = bru_set_format,
+	.get_selection = bru_get_selection,
+	.set_selection = bru_set_selection,
+};
+
+static struct v4l2_subdev_ops bru_ops = {
+	.pad    = &bru_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void bru_configure(struct vsp1_entity *entity,
+			  struct vsp1_pipeline *pipe,
+			  struct vsp1_dl_list *dl)
+{
+	struct vsp1_bru *bru = to_bru(&entity->subdev);
 	struct v4l2_mbus_framefmt *format;
 	unsigned int flags;
 	unsigned int i;
-	int ret;
 
-	ret = vsp1_entity_set_streaming(&bru->entity, enable);
-	if (ret < 0)
-		return ret;
-
-	if (!enable)
-		return 0;
-
-	format = &bru->entity.formats[bru->entity.source_pad];
+	format = vsp1_entity_get_pad_format(&bru->entity, bru->entity.config,
+					    bru->entity.source_pad);
 
 	/* The hardware is extremely flexible but we have no userspace API to
 	 * expose all the parameters, nor is it clear whether we would have use
@@ -91,21 +289,26 @@
 	 * format at the pipeline output is premultiplied.
 	 */
 	flags = pipe->output ? pipe->output->format.flags : 0;
-	vsp1_bru_write(bru, VI6_BRU_INCTRL,
+	vsp1_bru_write(bru, dl, VI6_BRU_INCTRL,
 		       flags & V4L2_PIX_FMT_FLAG_PREMUL_ALPHA ?
 		       0 : VI6_BRU_INCTRL_NRM);
 
-	/* Set the background position to cover the whole output image. */
-	vsp1_bru_write(bru, VI6_BRU_VIRRPF_SIZE,
+	/* Set the background position to cover the whole output image and
+	 * configure its color.
+	 */
+	vsp1_bru_write(bru, dl, VI6_BRU_VIRRPF_SIZE,
 		       (format->width << VI6_BRU_VIRRPF_SIZE_HSIZE_SHIFT) |
 		       (format->height << VI6_BRU_VIRRPF_SIZE_VSIZE_SHIFT));
-	vsp1_bru_write(bru, VI6_BRU_VIRRPF_LOC, 0);
+	vsp1_bru_write(bru, dl, VI6_BRU_VIRRPF_LOC, 0);
+
+	vsp1_bru_write(bru, dl, VI6_BRU_VIRRPF_COL, bru->bgcolor |
+		       (0xff << VI6_BRU_VIRRPF_COL_A_SHIFT));
 
 	/* Route BRU input 1 as SRC input to the ROP unit and configure the ROP
 	 * unit with a NOP operation to make BRU input 1 available as the
 	 * Blend/ROP unit B SRC input.
 	 */
-	vsp1_bru_write(bru, VI6_BRU_ROP, VI6_BRU_ROP_DSTSEL_BRUIN(1) |
+	vsp1_bru_write(bru, dl, VI6_BRU_ROP, VI6_BRU_ROP_DSTSEL_BRUIN(1) |
 		       VI6_BRU_ROP_CROP(VI6_ROP_NOP) |
 		       VI6_BRU_ROP_AROP(VI6_ROP_NOP));
 
@@ -142,7 +345,7 @@
 		if (i != 1)
 			ctrl |= VI6_BRU_CTRL_SRCSEL_BRUIN(i);
 
-		vsp1_bru_write(bru, VI6_BRU_CTRL(i), ctrl);
+		vsp1_bru_write(bru, dl, VI6_BRU_CTRL(i), ctrl);
 
 		/* Harcode the blending formula to
 		 *
@@ -156,7 +359,7 @@
 		 *
 		 * otherwise.
 		 */
-		vsp1_bru_write(bru, VI6_BRU_BLD(i),
+		vsp1_bru_write(bru, dl, VI6_BRU_BLD(i),
 			       VI6_BRU_BLD_CCMDX_255_SRC_A |
 			       (premultiplied ? VI6_BRU_BLD_CCMDY_COEFY :
 						VI6_BRU_BLD_CCMDY_SRC_A) |
@@ -164,239 +367,10 @@
 			       VI6_BRU_BLD_ACMDY_COEFY |
 			       (0xff << VI6_BRU_BLD_COEFY_SHIFT));
 	}
-
-	return 0;
 }
 
-/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Pad Operations
- */
-
-/*
- * The BRU can't perform format conversion, all sink and source formats must be
- * identical. We pick the format on the first sink pad (pad 0) and propagate it
- * to all other pads.
- */
-
-static int bru_enum_mbus_code(struct v4l2_subdev *subdev,
-			      struct v4l2_subdev_pad_config *cfg,
-			      struct v4l2_subdev_mbus_code_enum *code)
-{
-	static const unsigned int codes[] = {
-		MEDIA_BUS_FMT_ARGB8888_1X32,
-		MEDIA_BUS_FMT_AYUV8_1X32,
-	};
-	struct vsp1_bru *bru = to_bru(subdev);
-	struct v4l2_mbus_framefmt *format;
-
-	if (code->pad == BRU_PAD_SINK(0)) {
-		if (code->index >= ARRAY_SIZE(codes))
-			return -EINVAL;
-
-		code->code = codes[code->index];
-	} else {
-		if (code->index)
-			return -EINVAL;
-
-		format = vsp1_entity_get_pad_format(&bru->entity, cfg,
-						    BRU_PAD_SINK(0), code->which);
-		code->code = format->code;
-	}
-
-	return 0;
-}
-
-static int bru_enum_frame_size(struct v4l2_subdev *subdev,
-			       struct v4l2_subdev_pad_config *cfg,
-			       struct v4l2_subdev_frame_size_enum *fse)
-{
-	if (fse->index)
-		return -EINVAL;
-
-	if (fse->code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
-	    fse->code != MEDIA_BUS_FMT_AYUV8_1X32)
-		return -EINVAL;
-
-	fse->min_width = BRU_MIN_SIZE;
-	fse->max_width = BRU_MAX_SIZE;
-	fse->min_height = BRU_MIN_SIZE;
-	fse->max_height = BRU_MAX_SIZE;
-
-	return 0;
-}
-
-static struct v4l2_rect *bru_get_compose(struct vsp1_bru *bru,
-					 struct v4l2_subdev_pad_config *cfg,
-					 unsigned int pad, u32 which)
-{
-	switch (which) {
-	case V4L2_SUBDEV_FORMAT_TRY:
-		return v4l2_subdev_get_try_crop(&bru->entity.subdev, cfg, pad);
-	case V4L2_SUBDEV_FORMAT_ACTIVE:
-		return &bru->inputs[pad].compose;
-	default:
-		return NULL;
-	}
-}
-
-static int bru_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
-			  struct v4l2_subdev_format *fmt)
-{
-	struct vsp1_bru *bru = to_bru(subdev);
-
-	fmt->format = *vsp1_entity_get_pad_format(&bru->entity, cfg, fmt->pad,
-						  fmt->which);
-
-	return 0;
-}
-
-static void bru_try_format(struct vsp1_bru *bru, struct v4l2_subdev_pad_config *cfg,
-			   unsigned int pad, struct v4l2_mbus_framefmt *fmt,
-			   enum v4l2_subdev_format_whence which)
-{
-	struct v4l2_mbus_framefmt *format;
-
-	switch (pad) {
-	case BRU_PAD_SINK(0):
-		/* Default to YUV if the requested format is not supported. */
-		if (fmt->code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
-		    fmt->code != MEDIA_BUS_FMT_AYUV8_1X32)
-			fmt->code = MEDIA_BUS_FMT_AYUV8_1X32;
-		break;
-
-	default:
-		/* The BRU can't perform format conversion. */
-		format = vsp1_entity_get_pad_format(&bru->entity, cfg,
-						    BRU_PAD_SINK(0), which);
-		fmt->code = format->code;
-		break;
-	}
-
-	fmt->width = clamp(fmt->width, BRU_MIN_SIZE, BRU_MAX_SIZE);
-	fmt->height = clamp(fmt->height, BRU_MIN_SIZE, BRU_MAX_SIZE);
-	fmt->field = V4L2_FIELD_NONE;
-	fmt->colorspace = V4L2_COLORSPACE_SRGB;
-}
-
-static int bru_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
-			  struct v4l2_subdev_format *fmt)
-{
-	struct vsp1_bru *bru = to_bru(subdev);
-	struct v4l2_mbus_framefmt *format;
-
-	bru_try_format(bru, cfg, fmt->pad, &fmt->format, fmt->which);
-
-	format = vsp1_entity_get_pad_format(&bru->entity, cfg, fmt->pad,
-					    fmt->which);
-	*format = fmt->format;
-
-	/* Reset the compose rectangle */
-	if (fmt->pad != bru->entity.source_pad) {
-		struct v4l2_rect *compose;
-
-		compose = bru_get_compose(bru, cfg, fmt->pad, fmt->which);
-		compose->left = 0;
-		compose->top = 0;
-		compose->width = format->width;
-		compose->height = format->height;
-	}
-
-	/* Propagate the format code to all pads */
-	if (fmt->pad == BRU_PAD_SINK(0)) {
-		unsigned int i;
-
-		for (i = 0; i <= bru->entity.source_pad; ++i) {
-			format = vsp1_entity_get_pad_format(&bru->entity, cfg,
-							    i, fmt->which);
-			format->code = fmt->format.code;
-		}
-	}
-
-	return 0;
-}
-
-static int bru_get_selection(struct v4l2_subdev *subdev,
-			     struct v4l2_subdev_pad_config *cfg,
-			     struct v4l2_subdev_selection *sel)
-{
-	struct vsp1_bru *bru = to_bru(subdev);
-
-	if (sel->pad == bru->entity.source_pad)
-		return -EINVAL;
-
-	switch (sel->target) {
-	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
-		sel->r.left = 0;
-		sel->r.top = 0;
-		sel->r.width = BRU_MAX_SIZE;
-		sel->r.height = BRU_MAX_SIZE;
-		return 0;
-
-	case V4L2_SEL_TGT_COMPOSE:
-		sel->r = *bru_get_compose(bru, cfg, sel->pad, sel->which);
-		return 0;
-
-	default:
-		return -EINVAL;
-	}
-}
-
-static int bru_set_selection(struct v4l2_subdev *subdev,
-			     struct v4l2_subdev_pad_config *cfg,
-			     struct v4l2_subdev_selection *sel)
-{
-	struct vsp1_bru *bru = to_bru(subdev);
-	struct v4l2_mbus_framefmt *format;
-	struct v4l2_rect *compose;
-
-	if (sel->pad == bru->entity.source_pad)
-		return -EINVAL;
-
-	if (sel->target != V4L2_SEL_TGT_COMPOSE)
-		return -EINVAL;
-
-	/* The compose rectangle top left corner must be inside the output
-	 * frame.
-	 */
-	format = vsp1_entity_get_pad_format(&bru->entity, cfg,
-					    bru->entity.source_pad, sel->which);
-	sel->r.left = clamp_t(unsigned int, sel->r.left, 0, format->width - 1);
-	sel->r.top = clamp_t(unsigned int, sel->r.top, 0, format->height - 1);
-
-	/* Scaling isn't supported, the compose rectangle size must be identical
-	 * to the sink format size.
-	 */
-	format = vsp1_entity_get_pad_format(&bru->entity, cfg, sel->pad,
-					    sel->which);
-	sel->r.width = format->width;
-	sel->r.height = format->height;
-
-	compose = bru_get_compose(bru, cfg, sel->pad, sel->which);
-	*compose = sel->r;
-
-	return 0;
-}
-
-/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Operations
- */
-
-static struct v4l2_subdev_video_ops bru_video_ops = {
-	.s_stream = bru_s_stream,
-};
-
-static struct v4l2_subdev_pad_ops bru_pad_ops = {
-	.enum_mbus_code = bru_enum_mbus_code,
-	.enum_frame_size = bru_enum_frame_size,
-	.get_fmt = bru_get_format,
-	.set_fmt = bru_set_format,
-	.get_selection = bru_get_selection,
-	.set_selection = bru_set_selection,
-};
-
-static struct v4l2_subdev_ops bru_ops = {
-	.video	= &bru_video_ops,
-	.pad    = &bru_pad_ops,
+static const struct vsp1_entity_operations bru_entity_ops = {
+	.configure = bru_configure,
 };
 
 /* -----------------------------------------------------------------------------
@@ -405,7 +379,6 @@
 
 struct vsp1_bru *vsp1_bru_create(struct vsp1_device *vsp1)
 {
-	struct v4l2_subdev *subdev;
 	struct vsp1_bru *bru;
 	int ret;
 
@@ -413,31 +386,21 @@
 	if (bru == NULL)
 		return ERR_PTR(-ENOMEM);
 
+	bru->entity.ops = &bru_entity_ops;
 	bru->entity.type = VSP1_ENTITY_BRU;
 
-	ret = vsp1_entity_init(vsp1, &bru->entity,
-			       vsp1->info->num_bru_inputs + 1);
+	ret = vsp1_entity_init(vsp1, &bru->entity, "bru",
+			       vsp1->info->num_bru_inputs + 1, &bru_ops);
 	if (ret < 0)
 		return ERR_PTR(ret);
 
-	/* Initialize the V4L2 subdev. */
-	subdev = &bru->entity.subdev;
-	v4l2_subdev_init(subdev, &bru_ops);
-
-	subdev->entity.ops = &vsp1->media_ops;
-	subdev->internal_ops = &vsp1_subdev_internal_ops;
-	snprintf(subdev->name, sizeof(subdev->name), "%s bru",
-		 dev_name(vsp1->dev));
-	v4l2_set_subdevdata(subdev, bru);
-	subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
-	vsp1_entity_init_formats(subdev, NULL);
-
 	/* Initialize the control handler. */
 	v4l2_ctrl_handler_init(&bru->ctrls, 1);
 	v4l2_ctrl_new_std(&bru->ctrls, &bru_ctrl_ops, V4L2_CID_BG_COLOR,
 			  0, 0xffffff, 1, 0);
 
+	bru->bgcolor = 0;
+
 	bru->entity.subdev.ctrl_handler = &bru->ctrls;
 
 	if (bru->ctrls.error) {
diff --git a/drivers/media/platform/vsp1/vsp1_bru.h b/drivers/media/platform/vsp1/vsp1_bru.h
index dbac968..828a3fc 100644
--- a/drivers/media/platform/vsp1/vsp1_bru.h
+++ b/drivers/media/platform/vsp1/vsp1_bru.h
@@ -31,8 +31,9 @@
 
 	struct {
 		struct vsp1_rwpf *rpf;
-		struct v4l2_rect compose;
 	} inputs[VSP1_MAX_RPF];
+
+	u32 bgcolor;
 };
 
 static inline struct vsp1_bru *to_bru(struct v4l2_subdev *subdev)
diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c
index 1a9a585..e238d9b 100644
--- a/drivers/media/platform/vsp1/vsp1_dl.c
+++ b/drivers/media/platform/vsp1/vsp1_dl.c
@@ -18,139 +18,406 @@
 
 #include "vsp1.h"
 #include "vsp1_dl.h"
-#include "vsp1_pipe.h"
 
-/*
- * Global resources
- *
- * - Display-related interrupts (can be used for vblank evasion ?)
- * - Display-list enable
- * - Header-less for WPF0
- * - DL swap
- */
-
-#define VSP1_DL_BODY_SIZE		(2 * 4 * 256)
+#define VSP1_DL_NUM_ENTRIES		256
 #define VSP1_DL_NUM_LISTS		3
 
+#define VSP1_DLH_INT_ENABLE		(1 << 1)
+#define VSP1_DLH_AUTO_START		(1 << 0)
+
+struct vsp1_dl_header_list {
+	u32 num_bytes;
+	u32 addr;
+} __attribute__((__packed__));
+
+struct vsp1_dl_header {
+	u32 num_lists;
+	struct vsp1_dl_header_list lists[8];
+	u32 next_header;
+	u32 flags;
+} __attribute__((__packed__));
+
 struct vsp1_dl_entry {
 	u32 addr;
 	u32 data;
 } __attribute__((__packed__));
 
-struct vsp1_dl_list {
-	size_t size;
-	int reg_count;
+/**
+ * struct vsp1_dl_body - Display list body
+ * @list: entry in the display list list of bodies
+ * @vsp1: the VSP1 device
+ * @entries: array of entries
+ * @dma: DMA address of the entries
+ * @size: size of the DMA memory in bytes
+ * @num_entries: number of stored entries
+ */
+struct vsp1_dl_body {
+	struct list_head list;
+	struct vsp1_device *vsp1;
 
-	bool in_use;
-
-	struct vsp1_dl_entry *body;
+	struct vsp1_dl_entry *entries;
 	dma_addr_t dma;
+	size_t size;
+
+	unsigned int num_entries;
 };
 
 /**
- * struct vsp1_dl - Display List manager
+ * struct vsp1_dl_list - Display list
+ * @list: entry in the display list manager lists
+ * @dlm: the display list manager
+ * @header: display list header, NULL for headerless lists
+ * @dma: DMA address for the header
+ * @body0: first display list body
+ * @fragments: list of extra display list bodies
+ */
+struct vsp1_dl_list {
+	struct list_head list;
+	struct vsp1_dl_manager *dlm;
+
+	struct vsp1_dl_header *header;
+	dma_addr_t dma;
+
+	struct vsp1_dl_body body0;
+	struct list_head fragments;
+};
+
+enum vsp1_dl_mode {
+	VSP1_DL_MODE_HEADER,
+	VSP1_DL_MODE_HEADERLESS,
+};
+
+/**
+ * struct vsp1_dl_manager - Display List manager
+ * @index: index of the related WPF
+ * @mode: display list operation mode (header or headerless)
  * @vsp1: the VSP1 device
  * @lock: protects the active, queued and pending lists
- * @lists.all: array of all allocate display lists
- * @lists.active: list currently being processed (loaded) by hardware
- * @lists.queued: list queued to the hardware (written to the DL registers)
- * @lists.pending: list waiting to be queued to the hardware
- * @lists.write: list being written to by software
+ * @free: array of all free display lists
+ * @active: list currently being processed (loaded) by hardware
+ * @queued: list queued to the hardware (written to the DL registers)
+ * @pending: list waiting to be queued to the hardware
  */
-struct vsp1_dl {
+struct vsp1_dl_manager {
+	unsigned int index;
+	enum vsp1_dl_mode mode;
 	struct vsp1_device *vsp1;
 
 	spinlock_t lock;
-
-	size_t size;
-	dma_addr_t dma;
-	void *mem;
-
-	struct {
-		struct vsp1_dl_list all[VSP1_DL_NUM_LISTS];
-
-		struct vsp1_dl_list *active;
-		struct vsp1_dl_list *queued;
-		struct vsp1_dl_list *pending;
-		struct vsp1_dl_list *write;
-	} lists;
+	struct list_head free;
+	struct vsp1_dl_list *active;
+	struct vsp1_dl_list *queued;
+	struct vsp1_dl_list *pending;
 };
 
 /* -----------------------------------------------------------------------------
+ * Display List Body Management
+ */
+
+/*
+ * Initialize a display list body object and allocate DMA memory for the body
+ * data. The display list body object is expected to have been initialized to
+ * 0 when allocated.
+ */
+static int vsp1_dl_body_init(struct vsp1_device *vsp1,
+			     struct vsp1_dl_body *dlb, unsigned int num_entries,
+			     size_t extra_size)
+{
+	size_t size = num_entries * sizeof(*dlb->entries) + extra_size;
+
+	dlb->vsp1 = vsp1;
+	dlb->size = size;
+
+	dlb->entries = dma_alloc_wc(vsp1->dev, dlb->size, &dlb->dma,
+				    GFP_KERNEL);
+	if (!dlb->entries)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/*
+ * Cleanup a display list body and free allocated DMA memory allocated.
+ */
+static void vsp1_dl_body_cleanup(struct vsp1_dl_body *dlb)
+{
+	dma_free_wc(dlb->vsp1->dev, dlb->size, dlb->entries, dlb->dma);
+}
+
+/**
+ * vsp1_dl_fragment_alloc - Allocate a display list fragment
+ * @vsp1: The VSP1 device
+ * @num_entries: The maximum number of entries that the fragment can contain
+ *
+ * Allocate a display list fragment with enough memory to contain the requested
+ * number of entries.
+ *
+ * Return a pointer to a fragment on success or NULL if memory can't be
+ * allocated.
+ */
+struct vsp1_dl_body *vsp1_dl_fragment_alloc(struct vsp1_device *vsp1,
+					    unsigned int num_entries)
+{
+	struct vsp1_dl_body *dlb;
+	int ret;
+
+	dlb = kzalloc(sizeof(*dlb), GFP_KERNEL);
+	if (!dlb)
+		return NULL;
+
+	ret = vsp1_dl_body_init(vsp1, dlb, num_entries, 0);
+	if (ret < 0) {
+		kfree(dlb);
+		return NULL;
+	}
+
+	return dlb;
+}
+
+/**
+ * vsp1_dl_fragment_free - Free a display list fragment
+ * @dlb: The fragment
+ *
+ * Free the given display list fragment and the associated DMA memory.
+ *
+ * Fragments must only be freed explicitly if they are not added to a display
+ * list, as the display list will take ownership of them and free them
+ * otherwise. Manual free typically happens at cleanup time for fragments that
+ * have been allocated but not used.
+ *
+ * Passing a NULL pointer to this function is safe, in that case no operation
+ * will be performed.
+ */
+void vsp1_dl_fragment_free(struct vsp1_dl_body *dlb)
+{
+	if (!dlb)
+		return;
+
+	vsp1_dl_body_cleanup(dlb);
+	kfree(dlb);
+}
+
+/**
+ * vsp1_dl_fragment_write - Write a register to a display list fragment
+ * @dlb: The fragment
+ * @reg: The register address
+ * @data: The register value
+ *
+ * Write the given register and value to the display list fragment. The maximum
+ * number of entries that can be written in a fragment is specified when the
+ * fragment is allocated by vsp1_dl_fragment_alloc().
+ */
+void vsp1_dl_fragment_write(struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+	dlb->entries[dlb->num_entries].addr = reg;
+	dlb->entries[dlb->num_entries].data = data;
+	dlb->num_entries++;
+}
+
+/* -----------------------------------------------------------------------------
  * Display List Transaction Management
  */
 
-static void vsp1_dl_free_list(struct vsp1_dl_list *list)
+static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
 {
-	if (!list)
+	struct vsp1_dl_list *dl;
+	size_t header_size;
+	int ret;
+
+	dl = kzalloc(sizeof(*dl), GFP_KERNEL);
+	if (!dl)
+		return NULL;
+
+	INIT_LIST_HEAD(&dl->fragments);
+	dl->dlm = dlm;
+
+	/* Initialize the display list body and allocate DMA memory for the body
+	 * and the optional header. Both are allocated together to avoid memory
+	 * fragmentation, with the header located right after the body in
+	 * memory.
+	 */
+	header_size = dlm->mode == VSP1_DL_MODE_HEADER
+		    ? ALIGN(sizeof(struct vsp1_dl_header), 8)
+		    : 0;
+
+	ret = vsp1_dl_body_init(dlm->vsp1, &dl->body0, VSP1_DL_NUM_ENTRIES,
+				header_size);
+	if (ret < 0) {
+		kfree(dl);
+		return NULL;
+	}
+
+	if (dlm->mode == VSP1_DL_MODE_HEADER) {
+		size_t header_offset = VSP1_DL_NUM_ENTRIES
+				     * sizeof(*dl->body0.entries);
+
+		dl->header = ((void *)dl->body0.entries) + header_offset;
+		dl->dma = dl->body0.dma + header_offset;
+
+		memset(dl->header, 0, sizeof(*dl->header));
+		dl->header->lists[0].addr = dl->body0.dma;
+		dl->header->flags = VSP1_DLH_INT_ENABLE;
+	}
+
+	return dl;
+}
+
+static void vsp1_dl_list_free_fragments(struct vsp1_dl_list *dl)
+{
+	struct vsp1_dl_body *dlb, *next;
+
+	list_for_each_entry_safe(dlb, next, &dl->fragments, list) {
+		list_del(&dlb->list);
+		vsp1_dl_body_cleanup(dlb);
+		kfree(dlb);
+	}
+}
+
+static void vsp1_dl_list_free(struct vsp1_dl_list *dl)
+{
+	vsp1_dl_body_cleanup(&dl->body0);
+	vsp1_dl_list_free_fragments(dl);
+	kfree(dl);
+}
+
+/**
+ * vsp1_dl_list_get - Get a free display list
+ * @dlm: The display list manager
+ *
+ * Get a display list from the pool of free lists and return it.
+ *
+ * This function must be called without the display list manager lock held.
+ */
+struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm)
+{
+	struct vsp1_dl_list *dl = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dlm->lock, flags);
+
+	if (!list_empty(&dlm->free)) {
+		dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list);
+		list_del(&dl->list);
+	}
+
+	spin_unlock_irqrestore(&dlm->lock, flags);
+
+	return dl;
+}
+
+/* This function must be called with the display list manager lock held.*/
+static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
+{
+	if (!dl)
 		return;
 
-	list->in_use = false;
+	vsp1_dl_list_free_fragments(dl);
+	dl->body0.num_entries = 0;
+
+	list_add_tail(&dl->list, &dl->dlm->free);
 }
 
-void vsp1_dl_reset(struct vsp1_dl *dl)
+/**
+ * vsp1_dl_list_put - Release a display list
+ * @dl: The display list
+ *
+ * Release the display list and return it to the pool of free lists.
+ *
+ * Passing a NULL pointer to this function is safe, in that case no operation
+ * will be performed.
+ */
+void vsp1_dl_list_put(struct vsp1_dl_list *dl)
 {
-	unsigned int i;
-
-	dl->lists.active = NULL;
-	dl->lists.queued = NULL;
-	dl->lists.pending = NULL;
-	dl->lists.write = NULL;
-
-	for (i = 0; i < ARRAY_SIZE(dl->lists.all); ++i)
-		dl->lists.all[i].in_use = false;
-}
-
-void vsp1_dl_begin(struct vsp1_dl *dl)
-{
-	struct vsp1_dl_list *list = NULL;
 	unsigned long flags;
-	unsigned int i;
 
-	spin_lock_irqsave(&dl->lock, flags);
+	if (!dl)
+		return;
 
-	for (i = 0; i < ARRAY_SIZE(dl->lists.all); ++i) {
-		if (!dl->lists.all[i].in_use) {
-			list = &dl->lists.all[i];
-			break;
-		}
-	}
-
-	if (!list) {
-		list = dl->lists.pending;
-		dl->lists.pending = NULL;
-	}
-
-	spin_unlock_irqrestore(&dl->lock, flags);
-
-	dl->lists.write = list;
-
-	list->in_use = true;
-	list->reg_count = 0;
+	spin_lock_irqsave(&dl->dlm->lock, flags);
+	__vsp1_dl_list_put(dl);
+	spin_unlock_irqrestore(&dl->dlm->lock, flags);
 }
 
-void vsp1_dl_add(struct vsp1_entity *e, u32 reg, u32 data)
+/**
+ * vsp1_dl_list_write - Write a register to the display list
+ * @dl: The display list
+ * @reg: The register address
+ * @data: The register value
+ *
+ * Write the given register and value to the display list. Up to 256 registers
+ * can be written per display list.
+ */
+void vsp1_dl_list_write(struct vsp1_dl_list *dl, u32 reg, u32 data)
 {
-	struct vsp1_pipeline *pipe = to_vsp1_pipeline(&e->subdev.entity);
-	struct vsp1_dl *dl = pipe->dl;
-	struct vsp1_dl_list *list = dl->lists.write;
-
-	list->body[list->reg_count].addr = reg;
-	list->body[list->reg_count].data = data;
-	list->reg_count++;
+	vsp1_dl_fragment_write(&dl->body0, reg, data);
 }
 
-void vsp1_dl_commit(struct vsp1_dl *dl)
+/**
+ * vsp1_dl_list_add_fragment - Add a fragment to the display list
+ * @dl: The display list
+ * @dlb: The fragment
+ *
+ * Add a display list body as a fragment to a display list. Registers contained
+ * in fragments are processed after registers contained in the main display
+ * list, in the order in which fragments are added.
+ *
+ * Adding a fragment to a display list passes ownership of the fragment to the
+ * list. The caller must not touch the fragment after this call, and must not
+ * free it explicitly with vsp1_dl_fragment_free().
+ *
+ * Fragments are only usable for display lists in header mode. Attempt to
+ * add a fragment to a header-less display list will return an error.
+ */
+int vsp1_dl_list_add_fragment(struct vsp1_dl_list *dl,
+			      struct vsp1_dl_body *dlb)
 {
-	struct vsp1_device *vsp1 = dl->vsp1;
-	struct vsp1_dl_list *list;
+	/* Multi-body lists are only available in header mode. */
+	if (dl->dlm->mode != VSP1_DL_MODE_HEADER)
+		return -EINVAL;
+
+	list_add_tail(&dlb->list, &dl->fragments);
+	return 0;
+}
+
+void vsp1_dl_list_commit(struct vsp1_dl_list *dl)
+{
+	struct vsp1_dl_manager *dlm = dl->dlm;
+	struct vsp1_device *vsp1 = dlm->vsp1;
 	unsigned long flags;
 	bool update;
 
-	list = dl->lists.write;
-	dl->lists.write = NULL;
+	spin_lock_irqsave(&dlm->lock, flags);
 
-	spin_lock_irqsave(&dl->lock, flags);
+	if (dl->dlm->mode == VSP1_DL_MODE_HEADER) {
+		struct vsp1_dl_header_list *hdr = dl->header->lists;
+		struct vsp1_dl_body *dlb;
+		unsigned int num_lists = 0;
+
+		/* Fill the header with the display list bodies addresses and
+		 * sizes. The address of the first body has already been filled
+		 * when the display list was allocated.
+		 *
+		 * In header mode the caller guarantees that the hardware is
+		 * idle at this point.
+		 */
+		hdr->num_bytes = dl->body0.num_entries
+			       * sizeof(*dl->header->lists);
+
+		list_for_each_entry(dlb, &dl->fragments, list) {
+			num_lists++;
+			hdr++;
+
+			hdr->addr = dlb->dma;
+			hdr->num_bytes = dlb->num_entries
+				       * sizeof(*dl->header->lists);
+		}
+
+		dl->header->num_lists = num_lists;
+		vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
+
+		dlm->active = dl;
+		goto done;
+	}
 
 	/* Once the UPD bit has been set the hardware can start processing the
 	 * display list at any time and we can't touch the address and size
@@ -159,8 +426,8 @@
 	 */
 	update = !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD);
 	if (update) {
-		vsp1_dl_free_list(dl->lists.pending);
-		dl->lists.pending = list;
+		__vsp1_dl_list_put(dlm->pending);
+		dlm->pending = dl;
 		goto done;
 	}
 
@@ -168,42 +435,51 @@
 	 * The UPD bit will be cleared by the device when the display list is
 	 * processed.
 	 */
-	vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), list->dma);
+	vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma);
 	vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
-		   (list->reg_count * 8));
+		   (dl->body0.num_entries * sizeof(*dl->header->lists)));
 
-	vsp1_dl_free_list(dl->lists.queued);
-	dl->lists.queued = list;
+	__vsp1_dl_list_put(dlm->queued);
+	dlm->queued = dl;
 
 done:
-	spin_unlock_irqrestore(&dl->lock, flags);
+	spin_unlock_irqrestore(&dlm->lock, flags);
 }
 
 /* -----------------------------------------------------------------------------
- * Interrupt Handling
+ * Display List Manager
  */
 
-void vsp1_dl_irq_display_start(struct vsp1_dl *dl)
+/* Interrupt Handling */
+void vsp1_dlm_irq_display_start(struct vsp1_dl_manager *dlm)
 {
-	spin_lock(&dl->lock);
+	spin_lock(&dlm->lock);
 
 	/* The display start interrupt signals the end of the display list
 	 * processing by the device. The active display list, if any, won't be
 	 * accessed anymore and can be reused.
 	 */
-	if (dl->lists.active) {
-		vsp1_dl_free_list(dl->lists.active);
-		dl->lists.active = NULL;
-	}
+	__vsp1_dl_list_put(dlm->active);
+	dlm->active = NULL;
 
-	spin_unlock(&dl->lock);
+	spin_unlock(&dlm->lock);
 }
 
-void vsp1_dl_irq_frame_end(struct vsp1_dl *dl)
+void vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
 {
-	struct vsp1_device *vsp1 = dl->vsp1;
+	struct vsp1_device *vsp1 = dlm->vsp1;
 
-	spin_lock(&dl->lock);
+	spin_lock(&dlm->lock);
+
+	__vsp1_dl_list_put(dlm->active);
+	dlm->active = NULL;
+
+	/* Header mode is used for mem-to-mem pipelines only. We don't need to
+	 * perform any operation as there can't be any new display list queued
+	 * in that case.
+	 */
+	if (dlm->mode == VSP1_DL_MODE_HEADER)
+		goto done;
 
 	/* The UPD bit set indicates that the commit operation raced with the
 	 * interrupt and occurred after the frame end event and UPD clear but
@@ -216,42 +492,39 @@
 	/* The device starts processing the queued display list right after the
 	 * frame end interrupt. The display list thus becomes active.
 	 */
-	if (dl->lists.queued) {
-		WARN_ON(dl->lists.active);
-		dl->lists.active = dl->lists.queued;
-		dl->lists.queued = NULL;
+	if (dlm->queued) {
+		dlm->active = dlm->queued;
+		dlm->queued = NULL;
 	}
 
 	/* Now that the UPD bit has been cleared we can queue the next display
 	 * list to the hardware if one has been prepared.
 	 */
-	if (dl->lists.pending) {
-		struct vsp1_dl_list *list = dl->lists.pending;
+	if (dlm->pending) {
+		struct vsp1_dl_list *dl = dlm->pending;
 
-		vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), list->dma);
+		vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma);
 		vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
-			   (list->reg_count * 8));
+			   (dl->body0.num_entries *
+			    sizeof(*dl->header->lists)));
 
-		dl->lists.queued = list;
-		dl->lists.pending = NULL;
+		dlm->queued = dl;
+		dlm->pending = NULL;
 	}
 
 done:
-	spin_unlock(&dl->lock);
+	spin_unlock(&dlm->lock);
 }
 
-/* -----------------------------------------------------------------------------
- * Hardware Setup
- */
-
-void vsp1_dl_setup(struct vsp1_device *vsp1)
+/* Hardware Setup */
+void vsp1_dlm_setup(struct vsp1_device *vsp1)
 {
 	u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT)
 		 | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
 		 | VI6_DL_CTRL_DLE;
 
-	/* The DRM pipeline operates with header-less display lists in
-	 * Continuous Frame Mode.
+	/* The DRM pipeline operates with display lists in Continuous Frame
+	 * Mode, all other pipelines use manual start.
 	 */
 	if (vsp1->drm)
 		ctrl |= VI6_DL_CTRL_CFM0 | VI6_DL_CTRL_NH0;
@@ -260,46 +533,64 @@
 	vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS);
 }
 
-/* -----------------------------------------------------------------------------
- * Initialization and Cleanup
- */
-
-struct vsp1_dl *vsp1_dl_create(struct vsp1_device *vsp1)
+void vsp1_dlm_reset(struct vsp1_dl_manager *dlm)
 {
-	struct vsp1_dl *dl;
-	unsigned int i;
+	unsigned long flags;
 
-	dl = kzalloc(sizeof(*dl), GFP_KERNEL);
-	if (!dl)
-		return NULL;
+	spin_lock_irqsave(&dlm->lock, flags);
 
-	spin_lock_init(&dl->lock);
+	__vsp1_dl_list_put(dlm->active);
+	__vsp1_dl_list_put(dlm->queued);
+	__vsp1_dl_list_put(dlm->pending);
 
-	dl->vsp1 = vsp1;
-	dl->size = VSP1_DL_BODY_SIZE * ARRAY_SIZE(dl->lists.all);
+	spin_unlock_irqrestore(&dlm->lock, flags);
 
-	dl->mem = dma_alloc_wc(vsp1->dev, dl->size, &dl->dma,
-					 GFP_KERNEL);
-	if (!dl->mem) {
-		kfree(dl);
-		return NULL;
-	}
-
-	for (i = 0; i < ARRAY_SIZE(dl->lists.all); ++i) {
-		struct vsp1_dl_list *list = &dl->lists.all[i];
-
-		list->size = VSP1_DL_BODY_SIZE;
-		list->reg_count = 0;
-		list->in_use = false;
-		list->dma = dl->dma + VSP1_DL_BODY_SIZE * i;
-		list->body = dl->mem + VSP1_DL_BODY_SIZE * i;
-	}
-
-	return dl;
+	dlm->active = NULL;
+	dlm->queued = NULL;
+	dlm->pending = NULL;
 }
 
-void vsp1_dl_destroy(struct vsp1_dl *dl)
+struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
+					unsigned int index,
+					unsigned int prealloc)
 {
-	dma_free_wc(dl->vsp1->dev, dl->size, dl->mem, dl->dma);
-	kfree(dl);
+	struct vsp1_dl_manager *dlm;
+	unsigned int i;
+
+	dlm = devm_kzalloc(vsp1->dev, sizeof(*dlm), GFP_KERNEL);
+	if (!dlm)
+		return NULL;
+
+	dlm->index = index;
+	dlm->mode = index == 0 && !vsp1->info->uapi
+		  ? VSP1_DL_MODE_HEADERLESS : VSP1_DL_MODE_HEADER;
+	dlm->vsp1 = vsp1;
+
+	spin_lock_init(&dlm->lock);
+	INIT_LIST_HEAD(&dlm->free);
+
+	for (i = 0; i < prealloc; ++i) {
+		struct vsp1_dl_list *dl;
+
+		dl = vsp1_dl_list_alloc(dlm);
+		if (!dl)
+			return NULL;
+
+		list_add_tail(&dl->list, &dlm->free);
+	}
+
+	return dlm;
+}
+
+void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm)
+{
+	struct vsp1_dl_list *dl, *next;
+
+	if (!dlm)
+		return;
+
+	list_for_each_entry_safe(dl, next, &dlm->free, list) {
+		list_del(&dl->list);
+		vsp1_dl_list_free(dl);
+	}
 }
diff --git a/drivers/media/platform/vsp1/vsp1_dl.h b/drivers/media/platform/vsp1/vsp1_dl.h
index 448c425..de387cd 100644
--- a/drivers/media/platform/vsp1/vsp1_dl.h
+++ b/drivers/media/platform/vsp1/vsp1_dl.h
@@ -13,30 +13,33 @@
 #ifndef __VSP1_DL_H__
 #define __VSP1_DL_H__
 
-#include "vsp1_entity.h"
+#include <linux/types.h>
 
 struct vsp1_device;
-struct vsp1_dl;
+struct vsp1_dl_fragment;
+struct vsp1_dl_list;
+struct vsp1_dl_manager;
 
-struct vsp1_dl *vsp1_dl_create(struct vsp1_device *vsp1);
-void vsp1_dl_destroy(struct vsp1_dl *dl);
+void vsp1_dlm_setup(struct vsp1_device *vsp1);
 
-void vsp1_dl_setup(struct vsp1_device *vsp1);
+struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
+					unsigned int index,
+					unsigned int prealloc);
+void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm);
+void vsp1_dlm_reset(struct vsp1_dl_manager *dlm);
+void vsp1_dlm_irq_display_start(struct vsp1_dl_manager *dlm);
+void vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm);
 
-void vsp1_dl_reset(struct vsp1_dl *dl);
-void vsp1_dl_begin(struct vsp1_dl *dl);
-void vsp1_dl_add(struct vsp1_entity *e, u32 reg, u32 data);
-void vsp1_dl_commit(struct vsp1_dl *dl);
+struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm);
+void vsp1_dl_list_put(struct vsp1_dl_list *dl);
+void vsp1_dl_list_write(struct vsp1_dl_list *dl, u32 reg, u32 data);
+void vsp1_dl_list_commit(struct vsp1_dl_list *dl);
 
-void vsp1_dl_irq_display_start(struct vsp1_dl *dl);
-void vsp1_dl_irq_frame_end(struct vsp1_dl *dl);
-
-static inline void vsp1_dl_mod_write(struct vsp1_entity *e, u32 reg, u32 data)
-{
-	if (e->vsp1->use_dl)
-		vsp1_dl_add(e, reg, data);
-	else
-		vsp1_write(e->vsp1, reg, data);
-}
+struct vsp1_dl_body *vsp1_dl_fragment_alloc(struct vsp1_device *vsp1,
+					    unsigned int num_entries);
+void vsp1_dl_fragment_free(struct vsp1_dl_body *dlb);
+void vsp1_dl_fragment_write(struct vsp1_dl_body *dlb, u32 reg, u32 data);
+int vsp1_dl_list_add_fragment(struct vsp1_dl_list *dl,
+			      struct vsp1_dl_body *dlb);
 
 #endif /* __VSP1_DL_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
index 021fe57..fc4bbc4 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/vsp1/vsp1_drm.c
@@ -13,10 +13,10 @@
 
 #include <linux/device.h>
 #include <linux/slab.h>
-#include <linux/vsp1.h>
 
 #include <media/media-entity.h>
 #include <media/v4l2-subdev.h>
+#include <media/vsp1.h>
 
 #include "vsp1.h"
 #include "vsp1_bru.h"
@@ -26,18 +26,14 @@
 #include "vsp1_pipe.h"
 #include "vsp1_rwpf.h"
 
+
 /* -----------------------------------------------------------------------------
- * Runtime Handling
+ * Interrupt Handling
  */
 
-static void vsp1_drm_pipeline_frame_end(struct vsp1_pipeline *pipe)
+void vsp1_drm_display_start(struct vsp1_device *vsp1)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&pipe->irqlock, flags);
-	if (pipe->num_inputs)
-		vsp1_pipeline_run(pipe);
-	spin_unlock_irqrestore(&pipe->irqlock, flags);
+	vsp1_dlm_irq_display_start(vsp1->drm->pipe.output->dlm);
 }
 
 /* -----------------------------------------------------------------------------
@@ -97,12 +93,14 @@
 		media_entity_pipeline_stop(&pipe->output->entity.subdev.entity);
 
 		for (i = 0; i < bru->entity.source_pad; ++i) {
+			vsp1->drm->inputs[i].enabled = false;
 			bru->inputs[i].rpf = NULL;
 			pipe->inputs[i] = NULL;
 		}
 
 		pipe->num_inputs = 0;
 
+		vsp1_dlm_reset(pipe->output->dlm);
 		vsp1_device_put(vsp1);
 
 		dev_dbg(vsp1->dev, "%s: pipeline disabled\n", __func__);
@@ -110,8 +108,6 @@
 		return 0;
 	}
 
-	vsp1_dl_reset(vsp1->drm->dl);
-
 	/* Configure the format at the BRU sinks and propagate it through the
 	 * pipeline.
 	 */
@@ -222,16 +218,11 @@
 {
 	struct vsp1_device *vsp1 = dev_get_drvdata(dev);
 	struct vsp1_pipeline *pipe = &vsp1->drm->pipe;
-	unsigned long flags;
-
-	spin_lock_irqsave(&pipe->irqlock, flags);
 
 	vsp1->drm->num_inputs = pipe->num_inputs;
 
-	spin_unlock_irqrestore(&pipe->irqlock, flags);
-
 	/* Prepare the display list. */
-	vsp1_dl_begin(vsp1->drm->dl);
+	pipe->dl = vsp1_dl_list_get(pipe->output->dlm);
 }
 EXPORT_SYMBOL_GPL(vsp1_du_atomic_begin);
 
@@ -244,10 +235,13 @@
  * @mem: DMA addresses of the memory buffers (one per plane)
  * @src: the source crop rectangle for the RPF
  * @dst: the destination compose rectangle for the BRU input
+ * @alpha: global alpha value for the input
+ * @zpos: the Z-order position of the input
  *
  * Configure the VSP to perform composition of the image referenced by @mem
  * through RPF @rpf_index, using the @src crop rectangle and the @dst
- * composition rectangle. The Z-order is fixed with RPF 0 at the bottom.
+ * composition rectangle. The Z-order is configurable with higher @zpos values
+ * displayed on top.
  *
  * Image format as stored in memory is expressed as a V4L2 @pixelformat value.
  * As a special case, setting the pixel format to 0 will disable the RPF. The
@@ -265,25 +259,17 @@
  *
  * This function isn't reentrant, the caller needs to serialize calls.
  *
- * TODO: Implement Z-order control by decoupling the RPF index from the BRU
- * input index.
- *
  * Return 0 on success or a negative error code on failure.
  */
-int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index,
-			  u32 pixelformat, unsigned int pitch,
-			  dma_addr_t mem[2], const struct v4l2_rect *src,
-			  const struct v4l2_rect *dst)
+int vsp1_du_atomic_update_ext(struct device *dev, unsigned int rpf_index,
+			      u32 pixelformat, unsigned int pitch,
+			      dma_addr_t mem[2], const struct v4l2_rect *src,
+			      const struct v4l2_rect *dst, unsigned int alpha,
+			      unsigned int zpos)
 {
 	struct vsp1_device *vsp1 = dev_get_drvdata(dev);
-	struct vsp1_pipeline *pipe = &vsp1->drm->pipe;
 	const struct vsp1_format_info *fmtinfo;
-	struct v4l2_subdev_selection sel;
-	struct v4l2_subdev_format format;
-	struct vsp1_rwpf_memory memory;
 	struct vsp1_rwpf *rpf;
-	unsigned long flags;
-	int ret;
 
 	if (rpf_index >= vsp1->info->rpf_count)
 		return -EINVAL;
@@ -294,31 +280,20 @@
 		dev_dbg(vsp1->dev, "%s: RPF%u: disable requested\n", __func__,
 			rpf_index);
 
-		spin_lock_irqsave(&pipe->irqlock, flags);
-
-		if (pipe->inputs[rpf_index]) {
-			/* Remove the RPF from the pipeline if it was previously
-			 * enabled.
-			 */
-			vsp1->bru->inputs[rpf_index].rpf = NULL;
-			pipe->inputs[rpf_index] = NULL;
-
-			pipe->num_inputs--;
-		}
-
-		spin_unlock_irqrestore(&pipe->irqlock, flags);
-
+		vsp1->drm->inputs[rpf_index].enabled = false;
 		return 0;
 	}
 
 	dev_dbg(vsp1->dev,
-		"%s: RPF%u: (%u,%u)/%ux%u -> (%u,%u)/%ux%u (%08x), pitch %u dma { %pad, %pad }\n",
+		"%s: RPF%u: (%u,%u)/%ux%u -> (%u,%u)/%ux%u (%08x), pitch %u dma { %pad, %pad } zpos %u\n",
 		__func__, rpf_index,
 		src->left, src->top, src->width, src->height,
 		dst->left, dst->top, dst->width, dst->height,
-		pixelformat, pitch, &mem[0], &mem[1]);
+		pixelformat, pitch, &mem[0], &mem[1], zpos);
 
-	/* Set the stride at the RPF input. */
+	/* Store the format, stride, memory buffer address, crop and compose
+	 * rectangles and Z-order position and for the input.
+	 */
 	fmtinfo = vsp1_get_format_info(pixelformat);
 	if (!fmtinfo) {
 		dev_dbg(vsp1->dev, "Unsupport pixel format %08x for RPF\n",
@@ -330,16 +305,40 @@
 	rpf->format.num_planes = fmtinfo->planes;
 	rpf->format.plane_fmt[0].bytesperline = pitch;
 	rpf->format.plane_fmt[1].bytesperline = pitch;
+	rpf->alpha = alpha;
+
+	rpf->mem.addr[0] = mem[0];
+	rpf->mem.addr[1] = mem[1];
+	rpf->mem.addr[2] = 0;
+
+	vsp1->drm->inputs[rpf_index].crop = *src;
+	vsp1->drm->inputs[rpf_index].compose = *dst;
+	vsp1->drm->inputs[rpf_index].zpos = zpos;
+	vsp1->drm->inputs[rpf_index].enabled = true;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(vsp1_du_atomic_update_ext);
+
+static int vsp1_du_setup_rpf_pipe(struct vsp1_device *vsp1,
+				  struct vsp1_rwpf *rpf, unsigned int bru_input)
+{
+	struct v4l2_subdev_selection sel;
+	struct v4l2_subdev_format format;
+	const struct v4l2_rect *crop;
+	int ret;
 
 	/* Configure the format on the RPF sink pad and propagate it up to the
 	 * BRU sink pad.
 	 */
+	crop = &vsp1->drm->inputs[rpf->entity.index].crop;
+
 	memset(&format, 0, sizeof(format));
 	format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
 	format.pad = RWPF_PAD_SINK;
-	format.format.width = src->width + src->left;
-	format.format.height = src->height + src->top;
-	format.format.code = fmtinfo->mbus;
+	format.format.width = crop->width + crop->left;
+	format.format.height = crop->height + crop->top;
+	format.format.code = rpf->fmtinfo->mbus;
 	format.format.field = V4L2_FIELD_NONE;
 
 	ret = v4l2_subdev_call(&rpf->entity.subdev, pad, set_fmt, NULL,
@@ -356,7 +355,7 @@
 	sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
 	sel.pad = RWPF_PAD_SINK;
 	sel.target = V4L2_SEL_TGT_CROP;
-	sel.r = *src;
+	sel.r = *crop;
 
 	ret = v4l2_subdev_call(&rpf->entity.subdev, pad, set_selection, NULL,
 			       &sel);
@@ -391,7 +390,7 @@
 		return ret;
 
 	/* BRU sink, propagate the format from the RPF source. */
-	format.pad = rpf->entity.index;
+	format.pad = bru_input;
 
 	ret = v4l2_subdev_call(&vsp1->bru->entity.subdev, pad, set_fmt, NULL,
 			       &format);
@@ -402,9 +401,9 @@
 		__func__, format.format.width, format.format.height,
 		format.format.code, format.pad);
 
-	sel.pad = rpf->entity.index;
+	sel.pad = bru_input;
 	sel.target = V4L2_SEL_TGT_COMPOSE;
-	sel.r = *dst;
+	sel.r = vsp1->drm->inputs[rpf->entity.index].compose;
 
 	ret = v4l2_subdev_call(&vsp1->bru->entity.subdev, pad, set_selection,
 			       NULL, &sel);
@@ -416,33 +415,13 @@
 		__func__, sel.r.left, sel.r.top, sel.r.width, sel.r.height,
 		sel.pad);
 
-	/* Store the compose rectangle coordinates in the RPF. */
-	rpf->location.left = dst->left;
-	rpf->location.top = dst->top;
-
-	/* Set the memory buffer address. */
-	memory.num_planes = fmtinfo->planes;
-	memory.addr[0] = mem[0];
-	memory.addr[1] = mem[1];
-
-	rpf->ops->set_memory(rpf, &memory);
-
-	spin_lock_irqsave(&pipe->irqlock, flags);
-
-	/* If the RPF was previously stopped set the BRU input to the RPF and
-	 * store the RPF in the pipeline inputs array.
-	 */
-	if (!pipe->inputs[rpf->entity.index]) {
-		vsp1->bru->inputs[rpf_index].rpf = rpf;
-		pipe->inputs[rpf->entity.index] = rpf;
-		pipe->num_inputs++;
-	}
-
-	spin_unlock_irqrestore(&pipe->irqlock, flags);
-
 	return 0;
 }
-EXPORT_SYMBOL_GPL(vsp1_du_atomic_update);
+
+static unsigned int rpf_zpos(struct vsp1_device *vsp1, struct vsp1_rwpf *rpf)
+{
+	return vsp1->drm->inputs[rpf->entity.index].zpos;
+}
 
 /**
  * vsp1_du_atomic_flush - Commit an atomic update
@@ -452,51 +431,96 @@
 {
 	struct vsp1_device *vsp1 = dev_get_drvdata(dev);
 	struct vsp1_pipeline *pipe = &vsp1->drm->pipe;
+	struct vsp1_rwpf *inputs[VSP1_MAX_RPF] = { NULL, };
 	struct vsp1_entity *entity;
 	unsigned long flags;
-	bool stop = false;
+	unsigned int i;
 	int ret;
 
+	/* Count the number of enabled inputs and sort them by Z-order. */
+	pipe->num_inputs = 0;
+
+	for (i = 0; i < vsp1->info->rpf_count; ++i) {
+		struct vsp1_rwpf *rpf = vsp1->rpf[i];
+		unsigned int j;
+
+		if (!vsp1->drm->inputs[i].enabled) {
+			pipe->inputs[i] = NULL;
+			continue;
+		}
+
+		pipe->inputs[i] = rpf;
+
+		/* Insert the RPF in the sorted RPFs array. */
+		for (j = pipe->num_inputs++; j > 0; --j) {
+			if (rpf_zpos(vsp1, inputs[j-1]) <= rpf_zpos(vsp1, rpf))
+				break;
+			inputs[j] = inputs[j-1];
+		}
+
+		inputs[j] = rpf;
+	}
+
+	/* Setup the RPF input pipeline for every enabled input. */
+	for (i = 0; i < vsp1->info->num_bru_inputs; ++i) {
+		struct vsp1_rwpf *rpf = inputs[i];
+
+		if (!rpf) {
+			vsp1->bru->inputs[i].rpf = NULL;
+			continue;
+		}
+
+		vsp1->bru->inputs[i].rpf = rpf;
+		rpf->bru_input = i;
+		rpf->entity.sink_pad = i;
+
+		dev_dbg(vsp1->dev, "%s: connecting RPF.%u to BRU:%u\n",
+			__func__, rpf->entity.index, i);
+
+		ret = vsp1_du_setup_rpf_pipe(vsp1, rpf, i);
+		if (ret < 0)
+			dev_err(vsp1->dev,
+				"%s: failed to setup RPF.%u\n",
+				__func__, rpf->entity.index);
+	}
+
+	/* Configure all entities in the pipeline. */
 	list_for_each_entry(entity, &pipe->entities, list_pipe) {
 		/* Disconnect unused RPFs from the pipeline. */
 		if (entity->type == VSP1_ENTITY_RPF) {
 			struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev);
 
 			if (!pipe->inputs[rpf->entity.index]) {
-				vsp1_mod_write(entity, entity->route->reg,
-					   VI6_DPR_NODE_UNUSED);
+				vsp1_dl_list_write(pipe->dl, entity->route->reg,
+						   VI6_DPR_NODE_UNUSED);
 				continue;
 			}
 		}
 
-		vsp1_entity_route_setup(entity);
+		vsp1_entity_route_setup(entity, pipe->dl);
 
-		ret = v4l2_subdev_call(&entity->subdev, video,
-				       s_stream, 1);
-		if (ret < 0) {
-			dev_err(vsp1->dev,
-				"DRM pipeline start failure on entity %s\n",
-				entity->subdev.name);
-			return;
-		}
+		if (entity->ops->configure)
+			entity->ops->configure(entity, pipe, pipe->dl);
+
+		/* The memory buffer address must be applied after configuring
+		 * the RPF to make sure the crop offset are computed.
+		 */
+		if (entity->type == VSP1_ENTITY_RPF)
+			vsp1_rwpf_set_memory(to_rwpf(&entity->subdev),
+					     pipe->dl);
 	}
 
-	vsp1_dl_commit(vsp1->drm->dl);
-
-	spin_lock_irqsave(&pipe->irqlock, flags);
+	vsp1_dl_list_commit(pipe->dl);
+	pipe->dl = NULL;
 
 	/* Start or stop the pipeline if needed. */
 	if (!vsp1->drm->num_inputs && pipe->num_inputs) {
 		vsp1_write(vsp1, VI6_DISP_IRQ_STA, 0);
 		vsp1_write(vsp1, VI6_DISP_IRQ_ENB, VI6_DISP_IRQ_ENB_DSTE);
+		spin_lock_irqsave(&pipe->irqlock, flags);
 		vsp1_pipeline_run(pipe);
+		spin_unlock_irqrestore(&pipe->irqlock, flags);
 	} else if (vsp1->drm->num_inputs && !pipe->num_inputs) {
-		stop = true;
-	}
-
-	spin_unlock_irqrestore(&pipe->irqlock, flags);
-
-	if (stop) {
 		vsp1_write(vsp1, VI6_DISP_IRQ_ENB, 0);
 		vsp1_pipeline_stop(pipe);
 	}
@@ -562,14 +586,9 @@
 	if (!vsp1->drm)
 		return -ENOMEM;
 
-	vsp1->drm->dl = vsp1_dl_create(vsp1);
-	if (!vsp1->drm->dl)
-		return -ENOMEM;
-
 	pipe = &vsp1->drm->pipe;
 
 	vsp1_pipeline_init(pipe);
-	pipe->frame_end = vsp1_drm_pipeline_frame_end;
 
 	/* The DRM pipeline is static, add entities manually. */
 	for (i = 0; i < vsp1->info->rpf_count; ++i) {
@@ -586,12 +605,9 @@
 	pipe->lif = &vsp1->lif->entity;
 	pipe->output = vsp1->wpf[0];
 
-	pipe->dl = vsp1->drm->dl;
-
 	return 0;
 }
 
 void vsp1_drm_cleanup(struct vsp1_device *vsp1)
 {
-	vsp1_dl_destroy(vsp1->drm->dl);
 }
diff --git a/drivers/media/platform/vsp1/vsp1_drm.h b/drivers/media/platform/vsp1/vsp1_drm.h
index f680568..9e28ab9 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.h
+++ b/drivers/media/platform/vsp1/vsp1_drm.h
@@ -13,37 +13,32 @@
 #ifndef __VSP1_DRM_H__
 #define __VSP1_DRM_H__
 
-#include "vsp1_pipe.h"
+#include <linux/videodev2.h>
 
-struct vsp1_dl;
+#include "vsp1_pipe.h"
 
 /**
  * vsp1_drm - State for the API exposed to the DRM driver
- * @dl: display list for DRM pipeline operation
  * @pipe: the VSP1 pipeline used for display
  * @num_inputs: number of active pipeline inputs at the beginning of an update
- * @update: the pipeline configuration has been updated
+ * @planes: source crop rectangle, destination compose rectangle and z-order
+ *	position for every input
  */
 struct vsp1_drm {
-	struct vsp1_dl *dl;
 	struct vsp1_pipeline pipe;
 	unsigned int num_inputs;
-	bool update;
+	struct {
+		bool enabled;
+		struct v4l2_rect crop;
+		struct v4l2_rect compose;
+		unsigned int zpos;
+	} inputs[VSP1_MAX_RPF];
 };
 
 int vsp1_drm_init(struct vsp1_device *vsp1);
 void vsp1_drm_cleanup(struct vsp1_device *vsp1);
 int vsp1_drm_create_links(struct vsp1_device *vsp1);
 
-int vsp1_du_init(struct device *dev);
-int vsp1_du_setup_lif(struct device *dev, unsigned int width,
-		      unsigned int height);
-void vsp1_du_atomic_begin(struct device *dev);
-int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index,
-			  u32 pixelformat, unsigned int pitch,
-			  dma_addr_t mem[2], const struct v4l2_rect *src,
-			  const struct v4l2_rect *dst);
-void vsp1_du_atomic_flush(struct device *dev);
-
+void vsp1_drm_display_start(struct vsp1_device *vsp1);
 
 #endif /* __VSP1_DRM_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index 25750a0..e2d779f 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -30,6 +30,7 @@
 #include "vsp1_hsit.h"
 #include "vsp1_lif.h"
 #include "vsp1_lut.h"
+#include "vsp1_pipe.h"
 #include "vsp1_rwpf.h"
 #include "vsp1_sru.h"
 #include "vsp1_uds.h"
@@ -49,17 +50,15 @@
 
 	for (i = 0; i < vsp1->info->wpf_count; ++i) {
 		struct vsp1_rwpf *wpf = vsp1->wpf[i];
-		struct vsp1_pipeline *pipe;
 
 		if (wpf == NULL)
 			continue;
 
-		pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity);
 		status = vsp1_read(vsp1, VI6_WPF_IRQ_STA(i));
 		vsp1_write(vsp1, VI6_WPF_IRQ_STA(i), ~status & mask);
 
 		if (status & VI6_WFP_IRQ_STA_FRE) {
-			vsp1_pipeline_frame_end(pipe);
+			vsp1_pipeline_frame_end(wpf->pipe);
 			ret = IRQ_HANDLED;
 		}
 	}
@@ -68,14 +67,7 @@
 	vsp1_write(vsp1, VI6_DISP_IRQ_STA, ~status & VI6_DISP_IRQ_STA_DST);
 
 	if (status & VI6_DISP_IRQ_STA_DST) {
-		struct vsp1_rwpf *wpf = vsp1->wpf[0];
-		struct vsp1_pipeline *pipe;
-
-		if (wpf) {
-			pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity);
-			vsp1_pipeline_display_start(pipe);
-		}
-
+		vsp1_drm_display_start(vsp1);
 		ret = IRQ_HANDLED;
 	}
 
@@ -387,13 +379,10 @@
 	/* Register subdev nodes if the userspace API is enabled or initialize
 	 * the DRM pipeline otherwise.
 	 */
-	if (vsp1->info->uapi) {
-		vsp1->use_dl = false;
+	if (vsp1->info->uapi)
 		ret = v4l2_device_register_subdev_nodes(&vsp1->v4l2_dev);
-	} else {
-		vsp1->use_dl = true;
+	else
 		ret = vsp1_drm_init(vsp1);
-	}
 	if (ret < 0)
 		goto done;
 
@@ -465,8 +454,7 @@
 	vsp1_write(vsp1, VI6_DPR_HGT_SMPPT, (7 << VI6_DPR_SMPPT_TGW_SHIFT) |
 		   (VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT));
 
-	if (vsp1->use_dl)
-		vsp1_dl_setup(vsp1);
+	vsp1_dlm_setup(vsp1);
 
 	return 0;
 }
@@ -570,6 +558,7 @@
 static const struct vsp1_device_info vsp1_device_infos[] = {
 	{
 		.version = VI6_IP_VERSION_MODEL_VSPS_H2,
+		.gen = 2,
 		.features = VSP1_HAS_BRU | VSP1_HAS_LUT | VSP1_HAS_SRU,
 		.rpf_count = 5,
 		.uds_count = 3,
@@ -578,6 +567,7 @@
 		.uapi = true,
 	}, {
 		.version = VI6_IP_VERSION_MODEL_VSPR_H2,
+		.gen = 2,
 		.features = VSP1_HAS_BRU | VSP1_HAS_SRU,
 		.rpf_count = 5,
 		.uds_count = 1,
@@ -586,6 +576,7 @@
 		.uapi = true,
 	}, {
 		.version = VI6_IP_VERSION_MODEL_VSPD_GEN2,
+		.gen = 2,
 		.features = VSP1_HAS_BRU | VSP1_HAS_LIF | VSP1_HAS_LUT,
 		.rpf_count = 4,
 		.uds_count = 1,
@@ -594,6 +585,7 @@
 		.uapi = true,
 	}, {
 		.version = VI6_IP_VERSION_MODEL_VSPS_M2,
+		.gen = 2,
 		.features = VSP1_HAS_BRU | VSP1_HAS_LUT | VSP1_HAS_SRU,
 		.rpf_count = 5,
 		.uds_count = 3,
@@ -602,6 +594,7 @@
 		.uapi = true,
 	}, {
 		.version = VI6_IP_VERSION_MODEL_VSPI_GEN3,
+		.gen = 3,
 		.features = VSP1_HAS_LUT | VSP1_HAS_SRU,
 		.rpf_count = 1,
 		.uds_count = 1,
@@ -609,6 +602,7 @@
 		.uapi = true,
 	}, {
 		.version = VI6_IP_VERSION_MODEL_VSPBD_GEN3,
+		.gen = 3,
 		.features = VSP1_HAS_BRU,
 		.rpf_count = 5,
 		.wpf_count = 1,
@@ -616,6 +610,7 @@
 		.uapi = true,
 	}, {
 		.version = VI6_IP_VERSION_MODEL_VSPBC_GEN3,
+		.gen = 3,
 		.features = VSP1_HAS_BRU | VSP1_HAS_LUT,
 		.rpf_count = 5,
 		.wpf_count = 1,
@@ -623,7 +618,8 @@
 		.uapi = true,
 	}, {
 		.version = VI6_IP_VERSION_MODEL_VSPD_GEN3,
-		.features = VSP1_HAS_BRU | VSP1_HAS_LIF | VSP1_HAS_LUT,
+		.gen = 3,
+		.features = VSP1_HAS_BRU | VSP1_HAS_LIF,
 		.rpf_count = 5,
 		.wpf_count = 2,
 		.num_bru_inputs = 5,
diff --git a/drivers/media/platform/vsp1/vsp1_entity.c b/drivers/media/platform/vsp1/vsp1_entity.c
index 20a78fb..3d070bc 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.c
+++ b/drivers/media/platform/vsp1/vsp1_entity.c
@@ -19,46 +19,11 @@
 #include <media/v4l2-subdev.h>
 
 #include "vsp1.h"
+#include "vsp1_dl.h"
 #include "vsp1_entity.h"
 
-bool vsp1_entity_is_streaming(struct vsp1_entity *entity)
-{
-	unsigned long flags;
-	bool streaming;
-
-	spin_lock_irqsave(&entity->lock, flags);
-	streaming = entity->streaming;
-	spin_unlock_irqrestore(&entity->lock, flags);
-
-	return streaming;
-}
-
-int vsp1_entity_set_streaming(struct vsp1_entity *entity, bool streaming)
-{
-	unsigned long flags;
-	int ret;
-
-	spin_lock_irqsave(&entity->lock, flags);
-	entity->streaming = streaming;
-	spin_unlock_irqrestore(&entity->lock, flags);
-
-	if (!streaming)
-		return 0;
-
-	if (!entity->vsp1->info->uapi || !entity->subdev.ctrl_handler)
-		return 0;
-
-	ret = v4l2_ctrl_handler_setup(entity->subdev.ctrl_handler);
-	if (ret < 0) {
-		spin_lock_irqsave(&entity->lock, flags);
-		entity->streaming = false;
-		spin_unlock_irqrestore(&entity->lock, flags);
-	}
-
-	return ret;
-}
-
-void vsp1_entity_route_setup(struct vsp1_entity *source)
+void vsp1_entity_route_setup(struct vsp1_entity *source,
+			     struct vsp1_dl_list *dl)
 {
 	struct vsp1_entity *sink;
 
@@ -66,40 +31,74 @@
 		return;
 
 	sink = container_of(source->sink, struct vsp1_entity, subdev.entity);
-	vsp1_mod_write(source, source->route->reg,
-		       sink->route->inputs[source->sink_pad]);
+	vsp1_dl_list_write(dl, source->route->reg,
+			   sink->route->inputs[source->sink_pad]);
 }
 
 /* -----------------------------------------------------------------------------
  * V4L2 Subdevice Operations
  */
 
-struct v4l2_mbus_framefmt *
-vsp1_entity_get_pad_format(struct vsp1_entity *entity,
+/**
+ * vsp1_entity_get_pad_config - Get the pad configuration for an entity
+ * @entity: the entity
+ * @cfg: the TRY pad configuration
+ * @which: configuration selector (ACTIVE or TRY)
+ *
+ * Return the pad configuration requested by the which argument. The TRY
+ * configuration is passed explicitly to the function through the cfg argument
+ * and simply returned when requested. The ACTIVE configuration comes from the
+ * entity structure.
+ */
+struct v4l2_subdev_pad_config *
+vsp1_entity_get_pad_config(struct vsp1_entity *entity,
 			   struct v4l2_subdev_pad_config *cfg,
-			   unsigned int pad, u32 which)
+			   enum v4l2_subdev_format_whence which)
 {
 	switch (which) {
-	case V4L2_SUBDEV_FORMAT_TRY:
-		return v4l2_subdev_get_try_format(&entity->subdev, cfg, pad);
 	case V4L2_SUBDEV_FORMAT_ACTIVE:
-		return &entity->formats[pad];
+		return entity->config;
+	case V4L2_SUBDEV_FORMAT_TRY:
 	default:
-		return NULL;
+		return cfg;
 	}
 }
 
+/**
+ * vsp1_entity_get_pad_format - Get a pad format from storage for an entity
+ * @entity: the entity
+ * @cfg: the configuration storage
+ * @pad: the pad number
+ *
+ * Return the format stored in the given configuration for an entity's pad. The
+ * configuration can be an ACTIVE or TRY configuration.
+ */
+struct v4l2_mbus_framefmt *
+vsp1_entity_get_pad_format(struct vsp1_entity *entity,
+			   struct v4l2_subdev_pad_config *cfg,
+			   unsigned int pad)
+{
+	return v4l2_subdev_get_try_format(&entity->subdev, cfg, pad);
+}
+
+struct v4l2_rect *
+vsp1_entity_get_pad_compose(struct vsp1_entity *entity,
+			    struct v4l2_subdev_pad_config *cfg,
+			    unsigned int pad)
+{
+	return v4l2_subdev_get_try_compose(&entity->subdev, cfg, pad);
+}
+
 /*
- * vsp1_entity_init_formats - Initialize formats on all pads
+ * vsp1_entity_init_cfg - Initialize formats on all pads
  * @subdev: V4L2 subdevice
  * @cfg: V4L2 subdev pad configuration
  *
- * Initialize all pad formats with default values. If cfg is not NULL, try
- * formats are initialized on the file handle. Otherwise active formats are
- * initialized on the device.
+ * Initialize all pad formats with default values in the given pad config. This
+ * function can be used as a handler for the subdev pad::init_cfg operation.
  */
-void vsp1_entity_init_formats(struct v4l2_subdev *subdev,
-			    struct v4l2_subdev_pad_config *cfg)
+int vsp1_entity_init_cfg(struct v4l2_subdev *subdev,
+			 struct v4l2_subdev_pad_config *cfg)
 {
 	struct v4l2_subdev_format format;
 	unsigned int pad;
@@ -113,19 +112,132 @@
 
 		v4l2_subdev_call(subdev, pad, set_fmt, cfg, &format);
 	}
-}
-
-static int vsp1_entity_open(struct v4l2_subdev *subdev,
-			    struct v4l2_subdev_fh *fh)
-{
-	vsp1_entity_init_formats(subdev, fh->pad);
 
 	return 0;
 }
 
-const struct v4l2_subdev_internal_ops vsp1_subdev_internal_ops = {
-	.open = vsp1_entity_open,
-};
+/*
+ * vsp1_subdev_get_pad_format - Subdev pad get_fmt handler
+ * @subdev: V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt: V4L2 subdev format
+ *
+ * This function implements the subdev get_fmt pad operation. It can be used as
+ * a direct drop-in for the operation handler.
+ */
+int vsp1_subdev_get_pad_format(struct v4l2_subdev *subdev,
+			       struct v4l2_subdev_pad_config *cfg,
+			       struct v4l2_subdev_format *fmt)
+{
+	struct vsp1_entity *entity = to_vsp1_entity(subdev);
+	struct v4l2_subdev_pad_config *config;
+
+	config = vsp1_entity_get_pad_config(entity, cfg, fmt->which);
+	if (!config)
+		return -EINVAL;
+
+	fmt->format = *vsp1_entity_get_pad_format(entity, config, fmt->pad);
+
+	return 0;
+}
+
+/*
+ * vsp1_subdev_enum_mbus_code - Subdev pad enum_mbus_code handler
+ * @subdev: V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @code: Media bus code enumeration
+ * @codes: Array of supported media bus codes
+ * @ncodes: Number of supported media bus codes
+ *
+ * This function implements the subdev enum_mbus_code pad operation for entities
+ * that do not support format conversion. It enumerates the given supported
+ * media bus codes on the sink pad and reports a source pad format identical to
+ * the sink pad.
+ */
+int vsp1_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
+			       struct v4l2_subdev_pad_config *cfg,
+			       struct v4l2_subdev_mbus_code_enum *code,
+			       const unsigned int *codes, unsigned int ncodes)
+{
+	struct vsp1_entity *entity = to_vsp1_entity(subdev);
+
+	if (code->pad == 0) {
+		if (code->index >= ncodes)
+			return -EINVAL;
+
+		code->code = codes[code->index];
+	} else {
+		struct v4l2_subdev_pad_config *config;
+		struct v4l2_mbus_framefmt *format;
+
+		/* The entity can't perform format conversion, the sink format
+		 * is always identical to the source format.
+		 */
+		if (code->index)
+			return -EINVAL;
+
+		config = vsp1_entity_get_pad_config(entity, cfg, code->which);
+		if (!config)
+			return -EINVAL;
+
+		format = vsp1_entity_get_pad_format(entity, config, 0);
+		code->code = format->code;
+	}
+
+	return 0;
+}
+
+/*
+ * vsp1_subdev_enum_frame_size - Subdev pad enum_frame_size handler
+ * @subdev: V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fse: Frame size enumeration
+ * @min_width: Minimum image width
+ * @min_height: Minimum image height
+ * @max_width: Maximum image width
+ * @max_height: Maximum image height
+ *
+ * This function implements the subdev enum_frame_size pad operation for
+ * entities that do not support scaling or cropping. It reports the given
+ * minimum and maximum frame width and height on the sink pad, and a fixed
+ * source pad size identical to the sink pad.
+ */
+int vsp1_subdev_enum_frame_size(struct v4l2_subdev *subdev,
+				struct v4l2_subdev_pad_config *cfg,
+				struct v4l2_subdev_frame_size_enum *fse,
+				unsigned int min_width, unsigned int min_height,
+				unsigned int max_width, unsigned int max_height)
+{
+	struct vsp1_entity *entity = to_vsp1_entity(subdev);
+	struct v4l2_subdev_pad_config *config;
+	struct v4l2_mbus_framefmt *format;
+
+	config = vsp1_entity_get_pad_config(entity, cfg, fse->which);
+	if (!config)
+		return -EINVAL;
+
+	format = vsp1_entity_get_pad_format(entity, config, fse->pad);
+
+	if (fse->index || fse->code != format->code)
+		return -EINVAL;
+
+	if (fse->pad == 0) {
+		fse->min_width = min_width;
+		fse->max_width = max_width;
+		fse->min_height = min_height;
+		fse->max_height = max_height;
+	} else {
+		/* The size on the source pad are fixed and always identical to
+		 * the size on the sink pad.
+		 */
+		fse->min_width = format->width;
+		fse->max_width = format->width;
+		fse->min_height = format->height;
+		fse->max_height = format->height;
+	}
+
+	return 0;
+}
 
 /* -----------------------------------------------------------------------------
  * Media Operations
@@ -171,11 +283,11 @@
 	{ VSP1_ENTITY_HST, 0, VI6_DPR_HST_ROUTE, { VI6_DPR_NODE_HST, } },
 	{ VSP1_ENTITY_LIF, 0, 0, { VI6_DPR_NODE_LIF, } },
 	{ VSP1_ENTITY_LUT, 0, VI6_DPR_LUT_ROUTE, { VI6_DPR_NODE_LUT, } },
-	{ VSP1_ENTITY_RPF, 0, VI6_DPR_RPF_ROUTE(0), { VI6_DPR_NODE_RPF(0), } },
-	{ VSP1_ENTITY_RPF, 1, VI6_DPR_RPF_ROUTE(1), { VI6_DPR_NODE_RPF(1), } },
-	{ VSP1_ENTITY_RPF, 2, VI6_DPR_RPF_ROUTE(2), { VI6_DPR_NODE_RPF(2), } },
-	{ VSP1_ENTITY_RPF, 3, VI6_DPR_RPF_ROUTE(3), { VI6_DPR_NODE_RPF(3), } },
-	{ VSP1_ENTITY_RPF, 4, VI6_DPR_RPF_ROUTE(4), { VI6_DPR_NODE_RPF(4), } },
+	{ VSP1_ENTITY_RPF, 0, VI6_DPR_RPF_ROUTE(0), { 0, } },
+	{ VSP1_ENTITY_RPF, 1, VI6_DPR_RPF_ROUTE(1), { 0, } },
+	{ VSP1_ENTITY_RPF, 2, VI6_DPR_RPF_ROUTE(2), { 0, } },
+	{ VSP1_ENTITY_RPF, 3, VI6_DPR_RPF_ROUTE(3), { 0, } },
+	{ VSP1_ENTITY_RPF, 4, VI6_DPR_RPF_ROUTE(4), { 0, } },
 	{ VSP1_ENTITY_SRU, 0, VI6_DPR_SRU_ROUTE, { VI6_DPR_NODE_SRU, } },
 	{ VSP1_ENTITY_UDS, 0, VI6_DPR_UDS_ROUTE(0), { VI6_DPR_NODE_UDS(0), } },
 	{ VSP1_ENTITY_UDS, 1, VI6_DPR_UDS_ROUTE(1), { VI6_DPR_NODE_UDS(1), } },
@@ -187,9 +299,12 @@
 };
 
 int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
-		     unsigned int num_pads)
+		     const char *name, unsigned int num_pads,
+		     const struct v4l2_subdev_ops *ops)
 {
+	struct v4l2_subdev *subdev;
 	unsigned int i;
+	int ret;
 
 	for (i = 0; i < ARRAY_SIZE(vsp1_routes); ++i) {
 		if (vsp1_routes[i].type == entity->type &&
@@ -202,37 +317,56 @@
 	if (i == ARRAY_SIZE(vsp1_routes))
 		return -EINVAL;
 
-	spin_lock_init(&entity->lock);
-
 	entity->vsp1 = vsp1;
 	entity->source_pad = num_pads - 1;
 
-	/* Allocate formats and pads. */
-	entity->formats = devm_kzalloc(vsp1->dev,
-				       num_pads * sizeof(*entity->formats),
-				       GFP_KERNEL);
-	if (entity->formats == NULL)
-		return -ENOMEM;
-
+	/* Allocate and initialize pads. */
 	entity->pads = devm_kzalloc(vsp1->dev, num_pads * sizeof(*entity->pads),
 				    GFP_KERNEL);
 	if (entity->pads == NULL)
 		return -ENOMEM;
 
-	/* Initialize pads. */
 	for (i = 0; i < num_pads - 1; ++i)
 		entity->pads[i].flags = MEDIA_PAD_FL_SINK;
 
 	entity->pads[num_pads - 1].flags = MEDIA_PAD_FL_SOURCE;
 
 	/* Initialize the media entity. */
-	return media_entity_pads_init(&entity->subdev.entity, num_pads,
-				 entity->pads);
+	ret = media_entity_pads_init(&entity->subdev.entity, num_pads,
+				     entity->pads);
+	if (ret < 0)
+		return ret;
+
+	/* Initialize the V4L2 subdev. */
+	subdev = &entity->subdev;
+	v4l2_subdev_init(subdev, ops);
+
+	subdev->entity.ops = &vsp1->media_ops;
+	subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+	snprintf(subdev->name, sizeof(subdev->name), "%s %s",
+		 dev_name(vsp1->dev), name);
+
+	vsp1_entity_init_cfg(subdev, NULL);
+
+	/* Allocate the pad configuration to store formats and selection
+	 * rectangles.
+	 */
+	entity->config = v4l2_subdev_alloc_pad_config(&entity->subdev);
+	if (entity->config == NULL) {
+		media_entity_cleanup(&entity->subdev.entity);
+		return -ENOMEM;
+	}
+
+	return 0;
 }
 
 void vsp1_entity_destroy(struct vsp1_entity *entity)
 {
+	if (entity->ops && entity->ops->destroy)
+		entity->ops->destroy(entity);
 	if (entity->subdev.ctrl_handler)
 		v4l2_ctrl_handler_free(entity->subdev.ctrl_handler);
+	v4l2_subdev_free_pad_config(entity->config);
 	media_entity_cleanup(&entity->subdev.entity);
 }
diff --git a/drivers/media/platform/vsp1/vsp1_entity.h b/drivers/media/platform/vsp1/vsp1_entity.h
index 83570df..69eff4e 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.h
+++ b/drivers/media/platform/vsp1/vsp1_entity.h
@@ -19,6 +19,8 @@
 #include <media/v4l2-subdev.h>
 
 struct vsp1_device;
+struct vsp1_dl_list;
+struct vsp1_pipeline;
 
 enum vsp1_entity_type {
 	VSP1_ENTITY_BRU,
@@ -53,9 +55,27 @@
 	unsigned int inputs[VSP1_ENTITY_MAX_INPUTS];
 };
 
+/**
+ * struct vsp1_entity_operations - Entity operations
+ * @destroy:	Destroy the entity.
+ * @set_memory:	Setup memory buffer access. This operation applies the settings
+ *		stored in the rwpf mem field to the display list. Valid for RPF
+ *		and WPF only.
+ * @configure:	Setup the hardware based on the entity state (pipeline, formats,
+ *		selection rectangles, ...)
+ */
+struct vsp1_entity_operations {
+	void (*destroy)(struct vsp1_entity *);
+	void (*set_memory)(struct vsp1_entity *, struct vsp1_dl_list *dl);
+	void (*configure)(struct vsp1_entity *, struct vsp1_pipeline *,
+			  struct vsp1_dl_list *);
+};
+
 struct vsp1_entity {
 	struct vsp1_device *vsp1;
 
+	const struct vsp1_entity_operations *ops;
+
 	enum vsp1_entity_type type;
 	unsigned int index;
 	const struct vsp1_route *route;
@@ -70,10 +90,7 @@
 	unsigned int sink_pad;
 
 	struct v4l2_subdev subdev;
-	struct v4l2_mbus_framefmt *formats;
-
-	spinlock_t lock;		/* Protects the streaming field */
-	bool streaming;
+	struct v4l2_subdev_pad_config *config;
 };
 
 static inline struct vsp1_entity *to_vsp1_entity(struct v4l2_subdev *subdev)
@@ -82,7 +99,8 @@
 }
 
 int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
-		     unsigned int num_pads);
+		     const char *name, unsigned int num_pads,
+		     const struct v4l2_subdev_ops *ops);
 void vsp1_entity_destroy(struct vsp1_entity *entity);
 
 extern const struct v4l2_subdev_internal_ops vsp1_subdev_internal_ops;
@@ -91,16 +109,35 @@
 			   const struct media_pad *local,
 			   const struct media_pad *remote, u32 flags);
 
+struct v4l2_subdev_pad_config *
+vsp1_entity_get_pad_config(struct vsp1_entity *entity,
+			   struct v4l2_subdev_pad_config *cfg,
+			   enum v4l2_subdev_format_whence which);
 struct v4l2_mbus_framefmt *
 vsp1_entity_get_pad_format(struct vsp1_entity *entity,
 			   struct v4l2_subdev_pad_config *cfg,
-			   unsigned int pad, u32 which);
-void vsp1_entity_init_formats(struct v4l2_subdev *subdev,
-			      struct v4l2_subdev_pad_config *cfg);
+			   unsigned int pad);
+struct v4l2_rect *
+vsp1_entity_get_pad_compose(struct vsp1_entity *entity,
+			    struct v4l2_subdev_pad_config *cfg,
+			    unsigned int pad);
+int vsp1_entity_init_cfg(struct v4l2_subdev *subdev,
+			 struct v4l2_subdev_pad_config *cfg);
 
-bool vsp1_entity_is_streaming(struct vsp1_entity *entity);
-int vsp1_entity_set_streaming(struct vsp1_entity *entity, bool streaming);
+void vsp1_entity_route_setup(struct vsp1_entity *source,
+			     struct vsp1_dl_list *dl);
 
-void vsp1_entity_route_setup(struct vsp1_entity *source);
+int vsp1_subdev_get_pad_format(struct v4l2_subdev *subdev,
+			       struct v4l2_subdev_pad_config *cfg,
+			       struct v4l2_subdev_format *fmt);
+int vsp1_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
+			       struct v4l2_subdev_pad_config *cfg,
+			       struct v4l2_subdev_mbus_code_enum *code,
+			       const unsigned int *codes, unsigned int ncodes);
+int vsp1_subdev_enum_frame_size(struct v4l2_subdev *subdev,
+				struct v4l2_subdev_pad_config *cfg,
+				struct v4l2_subdev_frame_size_enum *fse,
+				unsigned int min_w, unsigned int min_h,
+				unsigned int max_w, unsigned int max_h);
 
 #endif /* __VSP1_ENTITY_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_hsit.c b/drivers/media/platform/vsp1/vsp1_hsit.c
index c1087cf..68b8567 100644
--- a/drivers/media/platform/vsp1/vsp1_hsit.c
+++ b/drivers/media/platform/vsp1/vsp1_hsit.c
@@ -17,6 +17,7 @@
 #include <media/v4l2-subdev.h>
 
 #include "vsp1.h"
+#include "vsp1_dl.h"
 #include "vsp1_hsit.h"
 
 #define HSIT_MIN_SIZE				4U
@@ -26,32 +27,14 @@
  * Device Access
  */
 
-static inline void vsp1_hsit_write(struct vsp1_hsit *hsit, u32 reg, u32 data)
+static inline void vsp1_hsit_write(struct vsp1_hsit *hsit,
+				   struct vsp1_dl_list *dl, u32 reg, u32 data)
 {
-	vsp1_write(hsit->entity.vsp1, reg, data);
+	vsp1_dl_list_write(dl, reg, data);
 }
 
 /* -----------------------------------------------------------------------------
- * V4L2 Subdevice Core Operations
- */
-
-static int hsit_s_stream(struct v4l2_subdev *subdev, int enable)
-{
-	struct vsp1_hsit *hsit = to_hsit(subdev);
-
-	if (!enable)
-		return 0;
-
-	if (hsit->inverse)
-		vsp1_hsit_write(hsit, VI6_HSI_CTRL, VI6_HSI_CTRL_EN);
-	else
-		vsp1_hsit_write(hsit, VI6_HST_CTRL, VI6_HST_CTRL_EN);
-
-	return 0;
-}
-
-/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Pad Operations
+ * V4L2 Subdevice Operations
  */
 
 static int hsit_enum_mbus_code(struct v4l2_subdev *subdev,
@@ -76,43 +59,9 @@
 				struct v4l2_subdev_pad_config *cfg,
 				struct v4l2_subdev_frame_size_enum *fse)
 {
-	struct vsp1_hsit *hsit = to_hsit(subdev);
-	struct v4l2_mbus_framefmt *format;
-
-	format = vsp1_entity_get_pad_format(&hsit->entity, cfg, fse->pad,
-					    fse->which);
-
-	if (fse->index || fse->code != format->code)
-		return -EINVAL;
-
-	if (fse->pad == HSIT_PAD_SINK) {
-		fse->min_width = HSIT_MIN_SIZE;
-		fse->max_width = HSIT_MAX_SIZE;
-		fse->min_height = HSIT_MIN_SIZE;
-		fse->max_height = HSIT_MAX_SIZE;
-	} else {
-		/* The size on the source pad are fixed and always identical to
-		 * the size on the sink pad.
-		 */
-		fse->min_width = format->width;
-		fse->max_width = format->width;
-		fse->min_height = format->height;
-		fse->max_height = format->height;
-	}
-
-	return 0;
-}
-
-static int hsit_get_format(struct v4l2_subdev *subdev,
-			   struct v4l2_subdev_pad_config *cfg,
-			   struct v4l2_subdev_format *fmt)
-{
-	struct vsp1_hsit *hsit = to_hsit(subdev);
-
-	fmt->format = *vsp1_entity_get_pad_format(&hsit->entity, cfg, fmt->pad,
-						  fmt->which);
-
-	return 0;
+	return vsp1_subdev_enum_frame_size(subdev, cfg, fse, HSIT_MIN_SIZE,
+					   HSIT_MIN_SIZE, HSIT_MAX_SIZE,
+					   HSIT_MAX_SIZE);
 }
 
 static int hsit_set_format(struct v4l2_subdev *subdev,
@@ -120,10 +69,14 @@
 			   struct v4l2_subdev_format *fmt)
 {
 	struct vsp1_hsit *hsit = to_hsit(subdev);
+	struct v4l2_subdev_pad_config *config;
 	struct v4l2_mbus_framefmt *format;
 
-	format = vsp1_entity_get_pad_format(&hsit->entity, cfg, fmt->pad,
-					    fmt->which);
+	config = vsp1_entity_get_pad_config(&hsit->entity, cfg, fmt->which);
+	if (!config)
+		return -EINVAL;
+
+	format = vsp1_entity_get_pad_format(&hsit->entity, config, fmt->pad);
 
 	if (fmt->pad == HSIT_PAD_SOURCE) {
 		/* The HST and HSI output format code and resolution can't be
@@ -145,8 +98,8 @@
 	fmt->format = *format;
 
 	/* Propagate the format to the source pad. */
-	format = vsp1_entity_get_pad_format(&hsit->entity, cfg, HSIT_PAD_SOURCE,
-					    fmt->which);
+	format = vsp1_entity_get_pad_format(&hsit->entity, config,
+					    HSIT_PAD_SOURCE);
 	*format = fmt->format;
 	format->code = hsit->inverse ? MEDIA_BUS_FMT_ARGB8888_1X32
 		     : MEDIA_BUS_FMT_AHSV8888_1X32;
@@ -154,33 +107,44 @@
 	return 0;
 }
 
-/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Operations
- */
-
-static struct v4l2_subdev_video_ops hsit_video_ops = {
-	.s_stream = hsit_s_stream,
-};
-
 static struct v4l2_subdev_pad_ops hsit_pad_ops = {
+	.init_cfg = vsp1_entity_init_cfg,
 	.enum_mbus_code = hsit_enum_mbus_code,
 	.enum_frame_size = hsit_enum_frame_size,
-	.get_fmt = hsit_get_format,
+	.get_fmt = vsp1_subdev_get_pad_format,
 	.set_fmt = hsit_set_format,
 };
 
 static struct v4l2_subdev_ops hsit_ops = {
-	.video	= &hsit_video_ops,
 	.pad    = &hsit_pad_ops,
 };
 
 /* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void hsit_configure(struct vsp1_entity *entity,
+			   struct vsp1_pipeline *pipe,
+			   struct vsp1_dl_list *dl)
+{
+	struct vsp1_hsit *hsit = to_hsit(&entity->subdev);
+
+	if (hsit->inverse)
+		vsp1_hsit_write(hsit, dl, VI6_HSI_CTRL, VI6_HSI_CTRL_EN);
+	else
+		vsp1_hsit_write(hsit, dl, VI6_HST_CTRL, VI6_HST_CTRL_EN);
+}
+
+static const struct vsp1_entity_operations hsit_entity_ops = {
+	.configure = hsit_configure,
+};
+
+/* -----------------------------------------------------------------------------
  * Initialization and Cleanup
  */
 
 struct vsp1_hsit *vsp1_hsit_create(struct vsp1_device *vsp1, bool inverse)
 {
-	struct v4l2_subdev *subdev;
 	struct vsp1_hsit *hsit;
 	int ret;
 
@@ -190,27 +154,17 @@
 
 	hsit->inverse = inverse;
 
+	hsit->entity.ops = &hsit_entity_ops;
+
 	if (inverse)
 		hsit->entity.type = VSP1_ENTITY_HSI;
 	else
 		hsit->entity.type = VSP1_ENTITY_HST;
 
-	ret = vsp1_entity_init(vsp1, &hsit->entity, 2);
+	ret = vsp1_entity_init(vsp1, &hsit->entity, inverse ? "hsi" : "hst", 2,
+			       &hsit_ops);
 	if (ret < 0)
 		return ERR_PTR(ret);
 
-	/* Initialize the V4L2 subdev. */
-	subdev = &hsit->entity.subdev;
-	v4l2_subdev_init(subdev, &hsit_ops);
-
-	subdev->entity.ops = &vsp1->media_ops;
-	subdev->internal_ops = &vsp1_subdev_internal_ops;
-	snprintf(subdev->name, sizeof(subdev->name), "%s %s",
-		 dev_name(vsp1->dev), inverse ? "hsi" : "hst");
-	v4l2_set_subdevdata(subdev, hsit);
-	subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
-	vsp1_entity_init_formats(subdev, NULL);
-
 	return hsit;
 }
diff --git a/drivers/media/platform/vsp1/vsp1_lif.c b/drivers/media/platform/vsp1/vsp1_lif.c
index 433853c..0217393 100644
--- a/drivers/media/platform/vsp1/vsp1_lif.c
+++ b/drivers/media/platform/vsp1/vsp1_lif.c
@@ -17,55 +17,24 @@
 #include <media/v4l2-subdev.h>
 
 #include "vsp1.h"
+#include "vsp1_dl.h"
 #include "vsp1_lif.h"
 
 #define LIF_MIN_SIZE				2U
-#define LIF_MAX_SIZE				2048U
+#define LIF_MAX_SIZE				8190U
 
 /* -----------------------------------------------------------------------------
  * Device Access
  */
 
-static inline void vsp1_lif_write(struct vsp1_lif *lif, u32 reg, u32 data)
+static inline void vsp1_lif_write(struct vsp1_lif *lif, struct vsp1_dl_list *dl,
+				  u32 reg, u32 data)
 {
-	vsp1_mod_write(&lif->entity, reg, data);
+	vsp1_dl_list_write(dl, reg, data);
 }
 
 /* -----------------------------------------------------------------------------
- * V4L2 Subdevice Core Operations
- */
-
-static int lif_s_stream(struct v4l2_subdev *subdev, int enable)
-{
-	const struct v4l2_mbus_framefmt *format;
-	struct vsp1_lif *lif = to_lif(subdev);
-	unsigned int hbth = 1300;
-	unsigned int obth = 400;
-	unsigned int lbth = 200;
-
-	if (!enable) {
-		vsp1_write(lif->entity.vsp1, VI6_LIF_CTRL, 0);
-		return 0;
-	}
-
-	format = &lif->entity.formats[LIF_PAD_SOURCE];
-
-	obth = min(obth, (format->width + 1) / 2 * format->height - 4);
-
-	vsp1_lif_write(lif, VI6_LIF_CSBTH,
-			(hbth << VI6_LIF_CSBTH_HBTH_SHIFT) |
-			(lbth << VI6_LIF_CSBTH_LBTH_SHIFT));
-
-	vsp1_lif_write(lif, VI6_LIF_CTRL,
-			(obth << VI6_LIF_CTRL_OBTH_SHIFT) |
-			(format->code == 0 ? VI6_LIF_CTRL_CFMT : 0) |
-			VI6_LIF_CTRL_REQSEL | VI6_LIF_CTRL_LIF_EN);
-
-	return 0;
-}
-
-/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Pad Operations
+ * V4L2 Subdevice Operations
  */
 
 static int lif_enum_mbus_code(struct v4l2_subdev *subdev,
@@ -76,82 +45,38 @@
 		MEDIA_BUS_FMT_ARGB8888_1X32,
 		MEDIA_BUS_FMT_AYUV8_1X32,
 	};
-	struct vsp1_lif *lif = to_lif(subdev);
 
-	if (code->pad == LIF_PAD_SINK) {
-		if (code->index >= ARRAY_SIZE(codes))
-			return -EINVAL;
-
-		code->code = codes[code->index];
-	} else {
-		struct v4l2_mbus_framefmt *format;
-
-		/* The LIF can't perform format conversion, the sink format is
-		 * always identical to the source format.
-		 */
-		if (code->index)
-			return -EINVAL;
-
-		format = vsp1_entity_get_pad_format(&lif->entity, cfg,
-						    LIF_PAD_SINK, code->which);
-		code->code = format->code;
-	}
-
-	return 0;
+	return vsp1_subdev_enum_mbus_code(subdev, cfg, code, codes,
+					  ARRAY_SIZE(codes));
 }
 
 static int lif_enum_frame_size(struct v4l2_subdev *subdev,
 			       struct v4l2_subdev_pad_config *cfg,
 			       struct v4l2_subdev_frame_size_enum *fse)
 {
+	return vsp1_subdev_enum_frame_size(subdev, cfg, fse, LIF_MIN_SIZE,
+					   LIF_MIN_SIZE, LIF_MAX_SIZE,
+					   LIF_MAX_SIZE);
+}
+
+static int lif_set_format(struct v4l2_subdev *subdev,
+			  struct v4l2_subdev_pad_config *cfg,
+			  struct v4l2_subdev_format *fmt)
+{
 	struct vsp1_lif *lif = to_lif(subdev);
+	struct v4l2_subdev_pad_config *config;
 	struct v4l2_mbus_framefmt *format;
 
-	format = vsp1_entity_get_pad_format(&lif->entity, cfg, LIF_PAD_SINK,
-					    fse->which);
-
-	if (fse->index || fse->code != format->code)
+	config = vsp1_entity_get_pad_config(&lif->entity, cfg, fmt->which);
+	if (!config)
 		return -EINVAL;
 
-	if (fse->pad == LIF_PAD_SINK) {
-		fse->min_width = LIF_MIN_SIZE;
-		fse->max_width = LIF_MAX_SIZE;
-		fse->min_height = LIF_MIN_SIZE;
-		fse->max_height = LIF_MAX_SIZE;
-	} else {
-		fse->min_width = format->width;
-		fse->max_width = format->width;
-		fse->min_height = format->height;
-		fse->max_height = format->height;
-	}
-
-	return 0;
-}
-
-static int lif_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
-			  struct v4l2_subdev_format *fmt)
-{
-	struct vsp1_lif *lif = to_lif(subdev);
-
-	fmt->format = *vsp1_entity_get_pad_format(&lif->entity, cfg, fmt->pad,
-						  fmt->which);
-
-	return 0;
-}
-
-static int lif_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
-			  struct v4l2_subdev_format *fmt)
-{
-	struct vsp1_lif *lif = to_lif(subdev);
-	struct v4l2_mbus_framefmt *format;
-
 	/* Default to YUV if the requested format is not supported. */
 	if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
 	    fmt->format.code != MEDIA_BUS_FMT_AYUV8_1X32)
 		fmt->format.code = MEDIA_BUS_FMT_AYUV8_1X32;
 
-	format = vsp1_entity_get_pad_format(&lif->entity, cfg, fmt->pad,
-					    fmt->which);
+	format = vsp1_entity_get_pad_format(&lif->entity, config, fmt->pad);
 
 	if (fmt->pad == LIF_PAD_SOURCE) {
 		/* The LIF source format is always identical to its sink
@@ -172,40 +97,64 @@
 	fmt->format = *format;
 
 	/* Propagate the format to the source pad. */
-	format = vsp1_entity_get_pad_format(&lif->entity, cfg, LIF_PAD_SOURCE,
-					    fmt->which);
+	format = vsp1_entity_get_pad_format(&lif->entity, config,
+					    LIF_PAD_SOURCE);
 	*format = fmt->format;
 
 	return 0;
 }
 
-/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Operations
- */
-
-static struct v4l2_subdev_video_ops lif_video_ops = {
-	.s_stream = lif_s_stream,
-};
-
 static struct v4l2_subdev_pad_ops lif_pad_ops = {
+	.init_cfg = vsp1_entity_init_cfg,
 	.enum_mbus_code = lif_enum_mbus_code,
 	.enum_frame_size = lif_enum_frame_size,
-	.get_fmt = lif_get_format,
+	.get_fmt = vsp1_subdev_get_pad_format,
 	.set_fmt = lif_set_format,
 };
 
 static struct v4l2_subdev_ops lif_ops = {
-	.video	= &lif_video_ops,
 	.pad    = &lif_pad_ops,
 };
 
 /* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void lif_configure(struct vsp1_entity *entity,
+			  struct vsp1_pipeline *pipe,
+			  struct vsp1_dl_list *dl)
+{
+	const struct v4l2_mbus_framefmt *format;
+	struct vsp1_lif *lif = to_lif(&entity->subdev);
+	unsigned int hbth = 1300;
+	unsigned int obth = 400;
+	unsigned int lbth = 200;
+
+	format = vsp1_entity_get_pad_format(&lif->entity, lif->entity.config,
+					    LIF_PAD_SOURCE);
+
+	obth = min(obth, (format->width + 1) / 2 * format->height - 4);
+
+	vsp1_lif_write(lif, dl, VI6_LIF_CSBTH,
+			(hbth << VI6_LIF_CSBTH_HBTH_SHIFT) |
+			(lbth << VI6_LIF_CSBTH_LBTH_SHIFT));
+
+	vsp1_lif_write(lif, dl, VI6_LIF_CTRL,
+			(obth << VI6_LIF_CTRL_OBTH_SHIFT) |
+			(format->code == 0 ? VI6_LIF_CTRL_CFMT : 0) |
+			VI6_LIF_CTRL_REQSEL | VI6_LIF_CTRL_LIF_EN);
+}
+
+static const struct vsp1_entity_operations lif_entity_ops = {
+	.configure = lif_configure,
+};
+
+/* -----------------------------------------------------------------------------
  * Initialization and Cleanup
  */
 
 struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1)
 {
-	struct v4l2_subdev *subdev;
 	struct vsp1_lif *lif;
 	int ret;
 
@@ -213,24 +162,12 @@
 	if (lif == NULL)
 		return ERR_PTR(-ENOMEM);
 
+	lif->entity.ops = &lif_entity_ops;
 	lif->entity.type = VSP1_ENTITY_LIF;
 
-	ret = vsp1_entity_init(vsp1, &lif->entity, 2);
+	ret = vsp1_entity_init(vsp1, &lif->entity, "lif", 2, &lif_ops);
 	if (ret < 0)
 		return ERR_PTR(ret);
 
-	/* Initialize the V4L2 subdev. */
-	subdev = &lif->entity.subdev;
-	v4l2_subdev_init(subdev, &lif_ops);
-
-	subdev->entity.ops = &vsp1->media_ops;
-	subdev->internal_ops = &vsp1_subdev_internal_ops;
-	snprintf(subdev->name, sizeof(subdev->name), "%s lif",
-		 dev_name(vsp1->dev));
-	v4l2_set_subdevdata(subdev, lif);
-	subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
-	vsp1_entity_init_formats(subdev, NULL);
-
 	return lif;
 }
diff --git a/drivers/media/platform/vsp1/vsp1_lut.c b/drivers/media/platform/vsp1/vsp1_lut.c
index 4b89095..aa09e59 100644
--- a/drivers/media/platform/vsp1/vsp1_lut.c
+++ b/drivers/media/platform/vsp1/vsp1_lut.c
@@ -18,6 +18,7 @@
 #include <media/v4l2-subdev.h>
 
 #include "vsp1.h"
+#include "vsp1_dl.h"
 #include "vsp1_lut.h"
 
 #define LUT_MIN_SIZE				4U
@@ -27,19 +28,35 @@
  * Device Access
  */
 
-static inline void vsp1_lut_write(struct vsp1_lut *lut, u32 reg, u32 data)
+static inline void vsp1_lut_write(struct vsp1_lut *lut, struct vsp1_dl_list *dl,
+				  u32 reg, u32 data)
 {
-	vsp1_write(lut->entity.vsp1, reg, data);
+	vsp1_dl_list_write(dl, reg, data);
 }
 
 /* -----------------------------------------------------------------------------
  * V4L2 Subdevice Core Operations
  */
 
-static void lut_configure(struct vsp1_lut *lut, struct vsp1_lut_config *config)
+static int lut_set_table(struct vsp1_lut *lut, struct vsp1_lut_config *config)
 {
-	memcpy_toio(lut->entity.vsp1->mmio + VI6_LUT_TABLE, config->lut,
-		    sizeof(config->lut));
+	struct vsp1_dl_body *dlb;
+	unsigned int i;
+
+	dlb = vsp1_dl_fragment_alloc(lut->entity.vsp1, ARRAY_SIZE(config->lut));
+	if (!dlb)
+		return -ENOMEM;
+
+	for (i = 0; i < ARRAY_SIZE(config->lut); ++i)
+		vsp1_dl_fragment_write(dlb, VI6_LUT_TABLE + 4 * i,
+				       config->lut[i]);
+
+	mutex_lock(&lut->lock);
+	swap(lut->lut, dlb);
+	mutex_unlock(&lut->lock);
+
+	vsp1_dl_fragment_free(dlb);
+	return 0;
 }
 
 static long lut_ioctl(struct v4l2_subdev *subdev, unsigned int cmd, void *arg)
@@ -48,8 +65,7 @@
 
 	switch (cmd) {
 	case VIDIOC_VSP1_LUT_CONFIG:
-		lut_configure(lut, arg);
-		return 0;
+		return lut_set_table(lut, arg);
 
 	default:
 		return -ENOIOCTLCMD;
@@ -57,22 +73,6 @@
 }
 
 /* -----------------------------------------------------------------------------
- * V4L2 Subdevice Video Operations
- */
-
-static int lut_s_stream(struct v4l2_subdev *subdev, int enable)
-{
-	struct vsp1_lut *lut = to_lut(subdev);
-
-	if (!enable)
-		return 0;
-
-	vsp1_lut_write(lut, VI6_LUT_CTRL, VI6_LUT_CTRL_EN);
-
-	return 0;
-}
-
-/* -----------------------------------------------------------------------------
  * V4L2 Subdevice Pad Operations
  */
 
@@ -85,85 +85,39 @@
 		MEDIA_BUS_FMT_AHSV8888_1X32,
 		MEDIA_BUS_FMT_AYUV8_1X32,
 	};
-	struct vsp1_lut *lut = to_lut(subdev);
-	struct v4l2_mbus_framefmt *format;
 
-	if (code->pad == LUT_PAD_SINK) {
-		if (code->index >= ARRAY_SIZE(codes))
-			return -EINVAL;
-
-		code->code = codes[code->index];
-	} else {
-		/* The LUT can't perform format conversion, the sink format is
-		 * always identical to the source format.
-		 */
-		if (code->index)
-			return -EINVAL;
-
-		format = vsp1_entity_get_pad_format(&lut->entity, cfg,
-						    LUT_PAD_SINK, code->which);
-		code->code = format->code;
-	}
-
-	return 0;
+	return vsp1_subdev_enum_mbus_code(subdev, cfg, code, codes,
+					  ARRAY_SIZE(codes));
 }
 
 static int lut_enum_frame_size(struct v4l2_subdev *subdev,
 			       struct v4l2_subdev_pad_config *cfg,
 			       struct v4l2_subdev_frame_size_enum *fse)
 {
+	return vsp1_subdev_enum_frame_size(subdev, cfg, fse, LUT_MIN_SIZE,
+					   LUT_MIN_SIZE, LUT_MAX_SIZE,
+					   LUT_MAX_SIZE);
+}
+
+static int lut_set_format(struct v4l2_subdev *subdev,
+			  struct v4l2_subdev_pad_config *cfg,
+			  struct v4l2_subdev_format *fmt)
+{
 	struct vsp1_lut *lut = to_lut(subdev);
+	struct v4l2_subdev_pad_config *config;
 	struct v4l2_mbus_framefmt *format;
 
-	format = vsp1_entity_get_pad_format(&lut->entity, cfg,
-					    fse->pad, fse->which);
-
-	if (fse->index || fse->code != format->code)
+	config = vsp1_entity_get_pad_config(&lut->entity, cfg, fmt->which);
+	if (!config)
 		return -EINVAL;
 
-	if (fse->pad == LUT_PAD_SINK) {
-		fse->min_width = LUT_MIN_SIZE;
-		fse->max_width = LUT_MAX_SIZE;
-		fse->min_height = LUT_MIN_SIZE;
-		fse->max_height = LUT_MAX_SIZE;
-	} else {
-		/* The size on the source pad are fixed and always identical to
-		 * the size on the sink pad.
-		 */
-		fse->min_width = format->width;
-		fse->max_width = format->width;
-		fse->min_height = format->height;
-		fse->max_height = format->height;
-	}
-
-	return 0;
-}
-
-static int lut_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
-			  struct v4l2_subdev_format *fmt)
-{
-	struct vsp1_lut *lut = to_lut(subdev);
-
-	fmt->format = *vsp1_entity_get_pad_format(&lut->entity, cfg, fmt->pad,
-						  fmt->which);
-
-	return 0;
-}
-
-static int lut_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
-			  struct v4l2_subdev_format *fmt)
-{
-	struct vsp1_lut *lut = to_lut(subdev);
-	struct v4l2_mbus_framefmt *format;
-
 	/* Default to YUV if the requested format is not supported. */
 	if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
 	    fmt->format.code != MEDIA_BUS_FMT_AHSV8888_1X32 &&
 	    fmt->format.code != MEDIA_BUS_FMT_AYUV8_1X32)
 		fmt->format.code = MEDIA_BUS_FMT_AYUV8_1X32;
 
-	format = vsp1_entity_get_pad_format(&lut->entity, cfg, fmt->pad,
-					    fmt->which);
+	format = vsp1_entity_get_pad_format(&lut->entity, config, fmt->pad);
 
 	if (fmt->pad == LUT_PAD_SOURCE) {
 		/* The LUT output format can't be modified. */
@@ -171,6 +125,7 @@
 		return 0;
 	}
 
+	format->code = fmt->format.code;
 	format->width = clamp_t(unsigned int, fmt->format.width,
 				LUT_MIN_SIZE, LUT_MAX_SIZE);
 	format->height = clamp_t(unsigned int, fmt->format.height,
@@ -181,8 +136,8 @@
 	fmt->format = *format;
 
 	/* Propagate the format to the source pad. */
-	format = vsp1_entity_get_pad_format(&lut->entity, cfg, LUT_PAD_SOURCE,
-					    fmt->which);
+	format = vsp1_entity_get_pad_format(&lut->entity, config,
+					    LUT_PAD_SOURCE);
 	*format = fmt->format;
 
 	return 0;
@@ -196,30 +151,49 @@
 	.ioctl = lut_ioctl,
 };
 
-static struct v4l2_subdev_video_ops lut_video_ops = {
-	.s_stream = lut_s_stream,
-};
-
 static struct v4l2_subdev_pad_ops lut_pad_ops = {
+	.init_cfg = vsp1_entity_init_cfg,
 	.enum_mbus_code = lut_enum_mbus_code,
 	.enum_frame_size = lut_enum_frame_size,
-	.get_fmt = lut_get_format,
+	.get_fmt = vsp1_subdev_get_pad_format,
 	.set_fmt = lut_set_format,
 };
 
 static struct v4l2_subdev_ops lut_ops = {
 	.core	= &lut_core_ops,
-	.video	= &lut_video_ops,
 	.pad    = &lut_pad_ops,
 };
 
 /* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void lut_configure(struct vsp1_entity *entity,
+			  struct vsp1_pipeline *pipe,
+			  struct vsp1_dl_list *dl)
+{
+	struct vsp1_lut *lut = to_lut(&entity->subdev);
+
+	vsp1_lut_write(lut, dl, VI6_LUT_CTRL, VI6_LUT_CTRL_EN);
+
+	mutex_lock(&lut->lock);
+	if (lut->lut) {
+		vsp1_dl_list_add_fragment(dl, lut->lut);
+		lut->lut = NULL;
+	}
+	mutex_unlock(&lut->lock);
+}
+
+static const struct vsp1_entity_operations lut_entity_ops = {
+	.configure = lut_configure,
+};
+
+/* -----------------------------------------------------------------------------
  * Initialization and Cleanup
  */
 
 struct vsp1_lut *vsp1_lut_create(struct vsp1_device *vsp1)
 {
-	struct v4l2_subdev *subdev;
 	struct vsp1_lut *lut;
 	int ret;
 
@@ -227,24 +201,12 @@
 	if (lut == NULL)
 		return ERR_PTR(-ENOMEM);
 
+	lut->entity.ops = &lut_entity_ops;
 	lut->entity.type = VSP1_ENTITY_LUT;
 
-	ret = vsp1_entity_init(vsp1, &lut->entity, 2);
+	ret = vsp1_entity_init(vsp1, &lut->entity, "lut", 2, &lut_ops);
 	if (ret < 0)
 		return ERR_PTR(ret);
 
-	/* Initialize the V4L2 subdev. */
-	subdev = &lut->entity.subdev;
-	v4l2_subdev_init(subdev, &lut_ops);
-
-	subdev->entity.ops = &vsp1->media_ops;
-	subdev->internal_ops = &vsp1_subdev_internal_ops;
-	snprintf(subdev->name, sizeof(subdev->name), "%s lut",
-		 dev_name(vsp1->dev));
-	v4l2_set_subdevdata(subdev, lut);
-	subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
-	vsp1_entity_init_formats(subdev, NULL);
-
 	return lut;
 }
diff --git a/drivers/media/platform/vsp1/vsp1_lut.h b/drivers/media/platform/vsp1/vsp1_lut.h
index f92ffb8..cef874f 100644
--- a/drivers/media/platform/vsp1/vsp1_lut.h
+++ b/drivers/media/platform/vsp1/vsp1_lut.h
@@ -13,6 +13,8 @@
 #ifndef __VSP1_LUT_H__
 #define __VSP1_LUT_H__
 
+#include <linux/mutex.h>
+
 #include <media/media-entity.h>
 #include <media/v4l2-subdev.h>
 
@@ -25,7 +27,9 @@
 
 struct vsp1_lut {
 	struct vsp1_entity entity;
-	u32 lut[256];
+
+	struct mutex lock;
+	struct vsp1_dl_body *lut;
 };
 
 static inline struct vsp1_lut *to_lut(struct v4l2_subdev *subdev)
diff --git a/drivers/media/platform/vsp1/vsp1_pipe.c b/drivers/media/platform/vsp1/vsp1_pipe.c
index 6659f06..4f3b4a1 100644
--- a/drivers/media/platform/vsp1/vsp1_pipe.c
+++ b/drivers/media/platform/vsp1/vsp1_pipe.c
@@ -43,7 +43,7 @@
 	{ V4L2_PIX_FMT_XRGB444, MEDIA_BUS_FMT_ARGB8888_1X32,
 	  VI6_FMT_XRGB_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
 	  VI6_RPF_DSWAP_P_WDS,
-	  1, { 16, 0, 0 }, false, false, 1, 1, true },
+	  1, { 16, 0, 0 }, false, false, 1, 1, false },
 	{ V4L2_PIX_FMT_ARGB555, MEDIA_BUS_FMT_ARGB8888_1X32,
 	  VI6_FMT_ARGB_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
 	  VI6_RPF_DSWAP_P_WDS,
@@ -172,14 +172,18 @@
 			bru->inputs[i].rpf = NULL;
 	}
 
-	for (i = 0; i < ARRAY_SIZE(pipe->inputs); ++i)
+	for (i = 0; i < pipe->num_inputs; ++i) {
+		pipe->inputs[i]->pipe = NULL;
 		pipe->inputs[i] = NULL;
+	}
+
+	pipe->output->pipe = NULL;
+	pipe->output = NULL;
 
 	INIT_LIST_HEAD(&pipe->entities);
 	pipe->state = VSP1_PIPELINE_STOPPED;
 	pipe->buffers_ready = 0;
 	pipe->num_inputs = 0;
-	pipe->output = NULL;
 	pipe->bru = NULL;
 	pipe->lif = NULL;
 	pipe->uds = NULL;
@@ -190,11 +194,13 @@
 	mutex_init(&pipe->lock);
 	spin_lock_init(&pipe->irqlock);
 	init_waitqueue_head(&pipe->wq);
+	kref_init(&pipe->kref);
 
 	INIT_LIST_HEAD(&pipe->entities);
 	pipe->state = VSP1_PIPELINE_STOPPED;
 }
 
+/* Must be called with the pipe irqlock held. */
 void vsp1_pipeline_run(struct vsp1_pipeline *pipe)
 {
 	struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
@@ -226,7 +232,7 @@
 	unsigned long flags;
 	int ret;
 
-	if (pipe->dl) {
+	if (pipe->lif) {
 		/* When using display lists in continuous frame mode the only
 		 * way to stop the pipeline is to reset the hardware.
 		 */
@@ -253,10 +259,10 @@
 		if (entity->route && entity->route->reg)
 			vsp1_write(entity->vsp1, entity->route->reg,
 				   VI6_DPR_NODE_UNUSED);
-
-		v4l2_subdev_call(&entity->subdev, video, s_stream, 0);
 	}
 
+	v4l2_subdev_call(&pipe->output->entity.subdev, video, s_stream, 0);
+
 	return ret;
 }
 
@@ -271,50 +277,15 @@
 	return pipe->buffers_ready == mask;
 }
 
-void vsp1_pipeline_display_start(struct vsp1_pipeline *pipe)
-{
-	if (pipe->dl)
-		vsp1_dl_irq_display_start(pipe->dl);
-}
-
 void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe)
 {
-	enum vsp1_pipeline_state state;
-	unsigned long flags;
-
 	if (pipe == NULL)
 		return;
 
-	if (pipe->dl)
-		vsp1_dl_irq_frame_end(pipe->dl);
+	vsp1_dlm_irq_frame_end(pipe->output->dlm);
 
-	/* Signal frame end to the pipeline handler. */
-	pipe->frame_end(pipe);
-
-	spin_lock_irqsave(&pipe->irqlock, flags);
-
-	state = pipe->state;
-
-	/* When using display lists in continuous frame mode the pipeline is
-	 * automatically restarted by the hardware.
-	 */
-	if (!pipe->dl)
-		pipe->state = VSP1_PIPELINE_STOPPED;
-
-	/* If a stop has been requested, mark the pipeline as stopped and
-	 * return.
-	 */
-	if (state == VSP1_PIPELINE_STOPPING) {
-		wake_up(&pipe->wq);
-		goto done;
-	}
-
-	/* Restart the pipeline if ready. */
-	if (vsp1_pipeline_ready(pipe))
-		vsp1_pipeline_run(pipe);
-
-done:
-	spin_unlock_irqrestore(&pipe->irqlock, flags);
+	if (pipe->frame_end)
+		pipe->frame_end(pipe);
 }
 
 /*
@@ -324,9 +295,13 @@
  * to be scaled, we disable alpha scaling when the UDS input has a fixed alpha
  * value. The UDS then outputs a fixed alpha value which needs to be programmed
  * from the input RPF alpha.
+ *
+ * This function can only be called from a subdev s_stream handler as it
+ * requires a valid display list context.
  */
 void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe,
 				   struct vsp1_entity *input,
+				   struct vsp1_dl_list *dl,
 				   unsigned int alpha)
 {
 	struct vsp1_entity *entity;
@@ -349,7 +324,7 @@
 		if (entity->type == VSP1_ENTITY_UDS) {
 			struct vsp1_uds *uds = to_uds(&entity->subdev);
 
-			vsp1_uds_set_alpha(uds, alpha);
+			vsp1_uds_set_alpha(uds, dl, alpha);
 			break;
 		}
 
@@ -375,7 +350,7 @@
 		if (wpf == NULL)
 			continue;
 
-		pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity);
+		pipe = wpf->pipe;
 		if (pipe == NULL)
 			continue;
 
@@ -392,7 +367,7 @@
 		if (wpf == NULL)
 			continue;
 
-		pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity);
+		pipe = wpf->pipe;
 		if (pipe == NULL)
 			continue;
 
@@ -416,7 +391,7 @@
 		if (wpf == NULL)
 			continue;
 
-		pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity);
+		pipe = wpf->pipe;
 		if (pipe == NULL)
 			continue;
 
diff --git a/drivers/media/platform/vsp1/vsp1_pipe.h b/drivers/media/platform/vsp1/vsp1_pipe.h
index b2f3a8a..7b561135 100644
--- a/drivers/media/platform/vsp1/vsp1_pipe.h
+++ b/drivers/media/platform/vsp1/vsp1_pipe.h
@@ -13,13 +13,14 @@
 #ifndef __VSP1_PIPE_H__
 #define __VSP1_PIPE_H__
 
+#include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/wait.h>
 
 #include <media/media-entity.h>
 
-struct vsp1_dl;
+struct vsp1_dl_list;
 struct vsp1_rwpf;
 
 /*
@@ -63,7 +64,7 @@
  * @wq: work queue to wait for state change completion
  * @frame_end: frame end interrupt handler
  * @lock: protects the pipeline use count and stream count
- * @use_count: number of video nodes using the pipeline
+ * @kref: pipeline reference count
  * @stream_count: number of streaming video nodes
  * @buffers_ready: bitmask of RPFs and WPFs with at least one buffer available
  * @num_inputs: number of RPFs
@@ -86,7 +87,7 @@
 	void (*frame_end)(struct vsp1_pipeline *pipe);
 
 	struct mutex lock;
-	unsigned int use_count;
+	struct kref kref;
 	unsigned int stream_count;
 	unsigned int buffers_ready;
 
@@ -100,17 +101,9 @@
 
 	struct list_head entities;
 
-	struct vsp1_dl *dl;
+	struct vsp1_dl_list *dl;
 };
 
-static inline struct vsp1_pipeline *to_vsp1_pipeline(struct media_entity *e)
-{
-	if (likely(e->pipe))
-		return container_of(e->pipe, struct vsp1_pipeline, pipe);
-	else
-		return NULL;
-}
-
 void vsp1_pipeline_reset(struct vsp1_pipeline *pipe);
 void vsp1_pipeline_init(struct vsp1_pipeline *pipe);
 
@@ -119,11 +112,11 @@
 int vsp1_pipeline_stop(struct vsp1_pipeline *pipe);
 bool vsp1_pipeline_ready(struct vsp1_pipeline *pipe);
 
-void vsp1_pipeline_display_start(struct vsp1_pipeline *pipe);
 void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe);
 
 void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe,
 				   struct vsp1_entity *input,
+				   struct vsp1_dl_list *dl,
 				   unsigned int alpha);
 
 void vsp1_pipelines_suspend(struct vsp1_device *vsp1);
diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
index 069216f..927b5fb 100644
--- a/drivers/media/platform/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/vsp1/vsp1_regs.h
@@ -217,6 +217,16 @@
 #define VI6_RPF_SRCM_ADDR_C1		0x0344
 #define VI6_RPF_SRCM_ADDR_AI		0x0348
 
+#define VI6_RPF_MULT_ALPHA		0x036c
+#define VI6_RPF_MULT_ALPHA_A_MMD_NONE	(0 << 12)
+#define VI6_RPF_MULT_ALPHA_A_MMD_RATIO	(1 << 12)
+#define VI6_RPF_MULT_ALPHA_P_MMD_NONE	(0 << 8)
+#define VI6_RPF_MULT_ALPHA_P_MMD_RATIO	(1 << 8)
+#define VI6_RPF_MULT_ALPHA_P_MMD_IMAGE	(2 << 8)
+#define VI6_RPF_MULT_ALPHA_P_MMD_BOTH	(3 << 8)
+#define VI6_RPF_MULT_ALPHA_RATIO_MASK	(0xff < 0)
+#define VI6_RPF_MULT_ALPHA_RATIO_SHIFT	0
+
 /* -----------------------------------------------------------------------------
  * WPF Control Registers
  */
diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/vsp1/vsp1_rpf.c
index 5bc1d15..49168db 100644
--- a/drivers/media/platform/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rpf.c
@@ -16,6 +16,8 @@
 #include <media/v4l2-subdev.h>
 
 #include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_pipe.h"
 #include "vsp1_rwpf.h"
 #include "vsp1_video.h"
 
@@ -26,64 +28,50 @@
  * Device Access
  */
 
-static inline void vsp1_rpf_write(struct vsp1_rwpf *rpf, u32 reg, u32 data)
+static inline void vsp1_rpf_write(struct vsp1_rwpf *rpf,
+				  struct vsp1_dl_list *dl, u32 reg, u32 data)
 {
-	vsp1_mod_write(&rpf->entity, reg + rpf->entity.index * VI6_RPF_OFFSET,
-		       data);
+	vsp1_dl_list_write(dl, reg + rpf->entity.index * VI6_RPF_OFFSET, data);
 }
 
 /* -----------------------------------------------------------------------------
- * Controls
+ * V4L2 Subdevice Operations
  */
 
-static int rpf_s_ctrl(struct v4l2_ctrl *ctrl)
-{
-	struct vsp1_rwpf *rpf =
-		container_of(ctrl->handler, struct vsp1_rwpf, ctrls);
-	struct vsp1_pipeline *pipe;
-
-	if (!vsp1_entity_is_streaming(&rpf->entity))
-		return 0;
-
-	switch (ctrl->id) {
-	case V4L2_CID_ALPHA_COMPONENT:
-		vsp1_rpf_write(rpf, VI6_RPF_VRTCOL_SET,
-			       ctrl->val << VI6_RPF_VRTCOL_SET_LAYA_SHIFT);
-
-		pipe = to_vsp1_pipeline(&rpf->entity.subdev.entity);
-		vsp1_pipeline_propagate_alpha(pipe, &rpf->entity, ctrl->val);
-		break;
-	}
-
-	return 0;
-}
-
-static const struct v4l2_ctrl_ops rpf_ctrl_ops = {
-	.s_ctrl = rpf_s_ctrl,
+static struct v4l2_subdev_ops rpf_ops = {
+	.pad    = &vsp1_rwpf_pad_ops,
 };
 
 /* -----------------------------------------------------------------------------
- * V4L2 Subdevice Core Operations
+ * VSP1 Entity Operations
  */
 
-static int rpf_s_stream(struct v4l2_subdev *subdev, int enable)
+static void rpf_set_memory(struct vsp1_entity *entity, struct vsp1_dl_list *dl)
 {
-	struct vsp1_pipeline *pipe = to_vsp1_pipeline(&subdev->entity);
-	struct vsp1_rwpf *rpf = to_rwpf(subdev);
-	struct vsp1_device *vsp1 = rpf->entity.vsp1;
+	struct vsp1_rwpf *rpf = entity_to_rwpf(entity);
+
+	vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_ADDR_Y,
+		       rpf->mem.addr[0] + rpf->offsets[0]);
+	vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_ADDR_C0,
+		       rpf->mem.addr[1] + rpf->offsets[1]);
+	vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_ADDR_C1,
+		       rpf->mem.addr[2] + rpf->offsets[1]);
+}
+
+static void rpf_configure(struct vsp1_entity *entity,
+			  struct vsp1_pipeline *pipe,
+			  struct vsp1_dl_list *dl)
+{
+	struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev);
 	const struct vsp1_format_info *fmtinfo = rpf->fmtinfo;
 	const struct v4l2_pix_format_mplane *format = &rpf->format;
-	const struct v4l2_rect *crop = &rpf->crop;
+	const struct v4l2_mbus_framefmt *source_format;
+	const struct v4l2_mbus_framefmt *sink_format;
+	const struct v4l2_rect *crop;
+	unsigned int left = 0;
+	unsigned int top = 0;
 	u32 pstride;
 	u32 infmt;
-	int ret;
-
-	ret = vsp1_entity_set_streaming(&rpf->entity, enable);
-	if (ret < 0)
-		return ret;
-
-	if (!enable)
-		return 0;
 
 	/* Source size, stride and crop offsets.
 	 *
@@ -91,10 +79,12 @@
 	 * left corner in the plane buffer. Only two offsets are needed, as
 	 * planes 2 and 3 always have identical strides.
 	 */
-	vsp1_rpf_write(rpf, VI6_RPF_SRC_BSIZE,
+	crop = vsp1_rwpf_get_crop(rpf, rpf->entity.config);
+
+	vsp1_rpf_write(rpf, dl, VI6_RPF_SRC_BSIZE,
 		       (crop->width << VI6_RPF_SRC_BSIZE_BHSIZE_SHIFT) |
 		       (crop->height << VI6_RPF_SRC_BSIZE_BVSIZE_SHIFT));
-	vsp1_rpf_write(rpf, VI6_RPF_SRC_ESIZE,
+	vsp1_rpf_write(rpf, dl, VI6_RPF_SRC_ESIZE,
 		       (crop->width << VI6_RPF_SRC_ESIZE_EHSIZE_SHIFT) |
 		       (crop->height << VI6_RPF_SRC_ESIZE_EVSIZE_SHIFT));
 
@@ -103,26 +93,25 @@
 	pstride = format->plane_fmt[0].bytesperline
 		<< VI6_RPF_SRCM_PSTRIDE_Y_SHIFT;
 
-	vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_Y,
-		       rpf->buf_addr[0] + rpf->offsets[0]);
-
 	if (format->num_planes > 1) {
 		rpf->offsets[1] = crop->top * format->plane_fmt[1].bytesperline
 				+ crop->left * fmtinfo->bpp[1] / 8;
 		pstride |= format->plane_fmt[1].bytesperline
 			<< VI6_RPF_SRCM_PSTRIDE_C_SHIFT;
-
-		vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C0,
-			       rpf->buf_addr[1] + rpf->offsets[1]);
-
-		if (format->num_planes > 2)
-			vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C1,
-				       rpf->buf_addr[2] + rpf->offsets[1]);
+	} else {
+		rpf->offsets[1] = 0;
 	}
 
-	vsp1_rpf_write(rpf, VI6_RPF_SRCM_PSTRIDE, pstride);
+	vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_PSTRIDE, pstride);
 
 	/* Format */
+	sink_format = vsp1_entity_get_pad_format(&rpf->entity,
+						 rpf->entity.config,
+						 RWPF_PAD_SINK);
+	source_format = vsp1_entity_get_pad_format(&rpf->entity,
+						   rpf->entity.config,
+						   RWPF_PAD_SOURCE);
+
 	infmt = VI6_RPF_INFMT_CIPM
 	      | (fmtinfo->hwfmt << VI6_RPF_INFMT_RDFMT_SHIFT);
 
@@ -131,88 +120,98 @@
 	if (fmtinfo->swap_uv)
 		infmt |= VI6_RPF_INFMT_SPUVS;
 
-	if (rpf->entity.formats[RWPF_PAD_SINK].code !=
-	    rpf->entity.formats[RWPF_PAD_SOURCE].code)
+	if (sink_format->code != source_format->code)
 		infmt |= VI6_RPF_INFMT_CSC;
 
-	vsp1_rpf_write(rpf, VI6_RPF_INFMT, infmt);
-	vsp1_rpf_write(rpf, VI6_RPF_DSWAP, fmtinfo->swap);
+	vsp1_rpf_write(rpf, dl, VI6_RPF_INFMT, infmt);
+	vsp1_rpf_write(rpf, dl, VI6_RPF_DSWAP, fmtinfo->swap);
 
 	/* Output location */
-	vsp1_rpf_write(rpf, VI6_RPF_LOC,
-		       (rpf->location.left << VI6_RPF_LOC_HCOORD_SHIFT) |
-		       (rpf->location.top << VI6_RPF_LOC_VCOORD_SHIFT));
+	if (pipe->bru) {
+		const struct v4l2_rect *compose;
 
-	/* Use the alpha channel (extended to 8 bits) when available or an
-	 * alpha value set through the V4L2_CID_ALPHA_COMPONENT control
-	 * otherwise. Disable color keying.
+		compose = vsp1_entity_get_pad_compose(pipe->bru,
+						      pipe->bru->config,
+						      rpf->bru_input);
+		left = compose->left;
+		top = compose->top;
+	}
+
+	vsp1_rpf_write(rpf, dl, VI6_RPF_LOC,
+		       (left << VI6_RPF_LOC_HCOORD_SHIFT) |
+		       (top << VI6_RPF_LOC_VCOORD_SHIFT));
+
+	/* On Gen2 use the alpha channel (extended to 8 bits) when available or
+	 * a fixed alpha value set through the V4L2_CID_ALPHA_COMPONENT control
+	 * otherwise.
+	 *
+	 * The Gen3 RPF has extended alpha capability and can both multiply the
+	 * alpha channel by a fixed global alpha value, and multiply the pixel
+	 * components to convert the input to premultiplied alpha.
+	 *
+	 * As alpha premultiplication is available in the BRU for both Gen2 and
+	 * Gen3 we handle it there and use the Gen3 alpha multiplier for global
+	 * alpha multiplication only. This however prevents conversion to
+	 * premultiplied alpha if no BRU is present in the pipeline. If that use
+	 * case turns out to be useful we will revisit the implementation (for
+	 * Gen3 only).
+	 *
+	 * We enable alpha multiplication on Gen3 using the fixed alpha value
+	 * set through the V4L2_CID_ALPHA_COMPONENT control when the input
+	 * contains an alpha channel. On Gen2 the global alpha is ignored in
+	 * that case.
+	 *
+	 * In all cases, disable color keying.
 	 */
-	vsp1_rpf_write(rpf, VI6_RPF_ALPH_SEL, VI6_RPF_ALPH_SEL_AEXT_EXT |
+	vsp1_rpf_write(rpf, dl, VI6_RPF_ALPH_SEL, VI6_RPF_ALPH_SEL_AEXT_EXT |
 		       (fmtinfo->alpha ? VI6_RPF_ALPH_SEL_ASEL_PACKED
 				       : VI6_RPF_ALPH_SEL_ASEL_FIXED));
 
-	if (vsp1->info->uapi)
-		mutex_lock(rpf->ctrls.lock);
-	vsp1_rpf_write(rpf, VI6_RPF_VRTCOL_SET,
-		       rpf->alpha->cur.val << VI6_RPF_VRTCOL_SET_LAYA_SHIFT);
-	vsp1_pipeline_propagate_alpha(pipe, &rpf->entity, rpf->alpha->cur.val);
-	if (vsp1->info->uapi)
-		mutex_unlock(rpf->ctrls.lock);
+	vsp1_rpf_write(rpf, dl, VI6_RPF_VRTCOL_SET,
+		       rpf->alpha << VI6_RPF_VRTCOL_SET_LAYA_SHIFT);
 
-	vsp1_rpf_write(rpf, VI6_RPF_MSK_CTRL, 0);
-	vsp1_rpf_write(rpf, VI6_RPF_CKEY_CTRL, 0);
+	if (entity->vsp1->info->gen == 3) {
+		u32 mult;
 
-	return 0;
+		if (fmtinfo->alpha) {
+			/* When the input contains an alpha channel enable the
+			 * alpha multiplier. If the input is premultiplied we
+			 * need to multiply both the alpha channel and the pixel
+			 * components by the global alpha value to keep them
+			 * premultiplied. Otherwise multiply the alpha channel
+			 * only.
+			 */
+			bool premultiplied = format->flags
+					   & V4L2_PIX_FMT_FLAG_PREMUL_ALPHA;
+
+			mult = VI6_RPF_MULT_ALPHA_A_MMD_RATIO
+			     | (premultiplied ?
+				VI6_RPF_MULT_ALPHA_P_MMD_RATIO :
+				VI6_RPF_MULT_ALPHA_P_MMD_NONE)
+			     | (rpf->alpha << VI6_RPF_MULT_ALPHA_RATIO_SHIFT);
+		} else {
+			/* When the input doesn't contain an alpha channel the
+			 * global alpha value is applied in the unpacking unit,
+			 * the alpha multiplier isn't needed and must be
+			 * disabled.
+			 */
+			mult = VI6_RPF_MULT_ALPHA_A_MMD_NONE
+			     | VI6_RPF_MULT_ALPHA_P_MMD_NONE;
+		}
+
+		vsp1_rpf_write(rpf, dl, VI6_RPF_MULT_ALPHA, mult);
+	}
+
+	vsp1_pipeline_propagate_alpha(pipe, &rpf->entity, dl, rpf->alpha);
+
+	vsp1_rpf_write(rpf, dl, VI6_RPF_MSK_CTRL, 0);
+	vsp1_rpf_write(rpf, dl, VI6_RPF_CKEY_CTRL, 0);
+
 }
 
-/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Operations
- */
-
-static struct v4l2_subdev_video_ops rpf_video_ops = {
-	.s_stream = rpf_s_stream,
-};
-
-static struct v4l2_subdev_pad_ops rpf_pad_ops = {
-	.enum_mbus_code = vsp1_rwpf_enum_mbus_code,
-	.enum_frame_size = vsp1_rwpf_enum_frame_size,
-	.get_fmt = vsp1_rwpf_get_format,
-	.set_fmt = vsp1_rwpf_set_format,
-	.get_selection = vsp1_rwpf_get_selection,
-	.set_selection = vsp1_rwpf_set_selection,
-};
-
-static struct v4l2_subdev_ops rpf_ops = {
-	.video	= &rpf_video_ops,
-	.pad    = &rpf_pad_ops,
-};
-
-/* -----------------------------------------------------------------------------
- * Video Device Operations
- */
-
-static void rpf_set_memory(struct vsp1_rwpf *rpf, struct vsp1_rwpf_memory *mem)
-{
-	unsigned int i;
-
-	for (i = 0; i < 3; ++i)
-		rpf->buf_addr[i] = mem->addr[i];
-
-	if (!vsp1_entity_is_streaming(&rpf->entity))
-		return;
-
-	vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_Y,
-		       mem->addr[0] + rpf->offsets[0]);
-	if (mem->num_planes > 1)
-		vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C0,
-			       mem->addr[1] + rpf->offsets[1]);
-	if (mem->num_planes > 2)
-		vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C1,
-			       mem->addr[2] + rpf->offsets[1]);
-}
-
-static const struct vsp1_rwpf_operations rpf_vdev_ops = {
+static const struct vsp1_entity_operations rpf_entity_ops = {
 	.set_memory = rpf_set_memory,
+	.configure = rpf_configure,
 };
 
 /* -----------------------------------------------------------------------------
@@ -221,51 +220,31 @@
 
 struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index)
 {
-	struct v4l2_subdev *subdev;
 	struct vsp1_rwpf *rpf;
+	char name[6];
 	int ret;
 
 	rpf = devm_kzalloc(vsp1->dev, sizeof(*rpf), GFP_KERNEL);
 	if (rpf == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	rpf->ops = &rpf_vdev_ops;
-
 	rpf->max_width = RPF_MAX_WIDTH;
 	rpf->max_height = RPF_MAX_HEIGHT;
 
+	rpf->entity.ops = &rpf_entity_ops;
 	rpf->entity.type = VSP1_ENTITY_RPF;
 	rpf->entity.index = index;
 
-	ret = vsp1_entity_init(vsp1, &rpf->entity, 2);
+	sprintf(name, "rpf.%u", index);
+	ret = vsp1_entity_init(vsp1, &rpf->entity, name, 2, &rpf_ops);
 	if (ret < 0)
 		return ERR_PTR(ret);
 
-	/* Initialize the V4L2 subdev. */
-	subdev = &rpf->entity.subdev;
-	v4l2_subdev_init(subdev, &rpf_ops);
-
-	subdev->entity.ops = &vsp1->media_ops;
-	subdev->internal_ops = &vsp1_subdev_internal_ops;
-	snprintf(subdev->name, sizeof(subdev->name), "%s rpf.%u",
-		 dev_name(vsp1->dev), index);
-	v4l2_set_subdevdata(subdev, rpf);
-	subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
-	vsp1_entity_init_formats(subdev, NULL);
-
 	/* Initialize the control handler. */
-	v4l2_ctrl_handler_init(&rpf->ctrls, 1);
-	rpf->alpha = v4l2_ctrl_new_std(&rpf->ctrls, &rpf_ctrl_ops,
-				       V4L2_CID_ALPHA_COMPONENT,
-				       0, 255, 1, 255);
-
-	rpf->entity.subdev.ctrl_handler = &rpf->ctrls;
-
-	if (rpf->ctrls.error) {
+	ret = vsp1_rwpf_init_ctrls(rpf);
+	if (ret < 0) {
 		dev_err(vsp1->dev, "rpf%u: failed to initialize controls\n",
 			index);
-		ret = rpf->ctrls.error;
 		goto error;
 	}
 
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.c b/drivers/media/platform/vsp1/vsp1_rwpf.c
index 9688c21..3b6e032 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.c
@@ -20,13 +20,20 @@
 #define RWPF_MIN_WIDTH				1
 #define RWPF_MIN_HEIGHT				1
 
+struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
+				     struct v4l2_subdev_pad_config *config)
+{
+	return v4l2_subdev_get_try_crop(&rwpf->entity.subdev, config,
+					RWPF_PAD_SINK);
+}
+
 /* -----------------------------------------------------------------------------
  * V4L2 Subdevice Pad Operations
  */
 
-int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
-			     struct v4l2_subdev_pad_config *cfg,
-			     struct v4l2_subdev_mbus_code_enum *code)
+static int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
+				    struct v4l2_subdev_pad_config *cfg,
+				    struct v4l2_subdev_mbus_code_enum *code)
 {
 	static const unsigned int codes[] = {
 		MEDIA_BUS_FMT_ARGB8888_1X32,
@@ -41,75 +48,36 @@
 	return 0;
 }
 
-int vsp1_rwpf_enum_frame_size(struct v4l2_subdev *subdev,
-			      struct v4l2_subdev_pad_config *cfg,
-			      struct v4l2_subdev_frame_size_enum *fse)
-{
-	struct vsp1_rwpf *rwpf = to_rwpf(subdev);
-	struct v4l2_mbus_framefmt *format;
-
-	format = vsp1_entity_get_pad_format(&rwpf->entity, cfg, fse->pad,
-					    fse->which);
-
-	if (fse->index || fse->code != format->code)
-		return -EINVAL;
-
-	if (fse->pad == RWPF_PAD_SINK) {
-		fse->min_width = RWPF_MIN_WIDTH;
-		fse->max_width = rwpf->max_width;
-		fse->min_height = RWPF_MIN_HEIGHT;
-		fse->max_height = rwpf->max_height;
-	} else {
-		/* The size on the source pad are fixed and always identical to
-		 * the size on the sink pad.
-		 */
-		fse->min_width = format->width;
-		fse->max_width = format->width;
-		fse->min_height = format->height;
-		fse->max_height = format->height;
-	}
-
-	return 0;
-}
-
-static struct v4l2_rect *
-vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf, struct v4l2_subdev_pad_config *cfg, u32 which)
-{
-	switch (which) {
-	case V4L2_SUBDEV_FORMAT_TRY:
-		return v4l2_subdev_get_try_crop(&rwpf->entity.subdev, cfg, RWPF_PAD_SINK);
-	case V4L2_SUBDEV_FORMAT_ACTIVE:
-		return &rwpf->crop;
-	default:
-		return NULL;
-	}
-}
-
-int vsp1_rwpf_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
-			 struct v4l2_subdev_format *fmt)
+static int vsp1_rwpf_enum_frame_size(struct v4l2_subdev *subdev,
+				     struct v4l2_subdev_pad_config *cfg,
+				     struct v4l2_subdev_frame_size_enum *fse)
 {
 	struct vsp1_rwpf *rwpf = to_rwpf(subdev);
 
-	fmt->format = *vsp1_entity_get_pad_format(&rwpf->entity, cfg, fmt->pad,
-						  fmt->which);
-
-	return 0;
+	return vsp1_subdev_enum_frame_size(subdev, cfg, fse, RWPF_MIN_WIDTH,
+					   RWPF_MIN_HEIGHT, rwpf->max_width,
+					   rwpf->max_height);
 }
 
-int vsp1_rwpf_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
-			 struct v4l2_subdev_format *fmt)
+static int vsp1_rwpf_set_format(struct v4l2_subdev *subdev,
+				struct v4l2_subdev_pad_config *cfg,
+				struct v4l2_subdev_format *fmt)
 {
 	struct vsp1_rwpf *rwpf = to_rwpf(subdev);
+	struct v4l2_subdev_pad_config *config;
 	struct v4l2_mbus_framefmt *format;
 	struct v4l2_rect *crop;
 
+	config = vsp1_entity_get_pad_config(&rwpf->entity, cfg, fmt->which);
+	if (!config)
+		return -EINVAL;
+
 	/* Default to YUV if the requested format is not supported. */
 	if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
 	    fmt->format.code != MEDIA_BUS_FMT_AYUV8_1X32)
 		fmt->format.code = MEDIA_BUS_FMT_AYUV8_1X32;
 
-	format = vsp1_entity_get_pad_format(&rwpf->entity, cfg, fmt->pad,
-					    fmt->which);
+	format = vsp1_entity_get_pad_format(&rwpf->entity, config, fmt->pad);
 
 	if (fmt->pad == RWPF_PAD_SOURCE) {
 		/* The RWPF performs format conversion but can't scale, only the
@@ -131,39 +99,44 @@
 	fmt->format = *format;
 
 	/* Update the sink crop rectangle. */
-	crop = vsp1_rwpf_get_crop(rwpf, cfg, fmt->which);
+	crop = vsp1_rwpf_get_crop(rwpf, config);
 	crop->left = 0;
 	crop->top = 0;
 	crop->width = fmt->format.width;
 	crop->height = fmt->format.height;
 
 	/* Propagate the format to the source pad. */
-	format = vsp1_entity_get_pad_format(&rwpf->entity, cfg, RWPF_PAD_SOURCE,
-					    fmt->which);
+	format = vsp1_entity_get_pad_format(&rwpf->entity, config,
+					    RWPF_PAD_SOURCE);
 	*format = fmt->format;
 
 	return 0;
 }
 
-int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
-			    struct v4l2_subdev_pad_config *cfg,
-			    struct v4l2_subdev_selection *sel)
+static int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
+				   struct v4l2_subdev_pad_config *cfg,
+				   struct v4l2_subdev_selection *sel)
 {
 	struct vsp1_rwpf *rwpf = to_rwpf(subdev);
+	struct v4l2_subdev_pad_config *config;
 	struct v4l2_mbus_framefmt *format;
 
 	/* Cropping is implemented on the sink pad. */
 	if (sel->pad != RWPF_PAD_SINK)
 		return -EINVAL;
 
+	config = vsp1_entity_get_pad_config(&rwpf->entity, cfg, sel->which);
+	if (!config)
+		return -EINVAL;
+
 	switch (sel->target) {
 	case V4L2_SEL_TGT_CROP:
-		sel->r = *vsp1_rwpf_get_crop(rwpf, cfg, sel->which);
+		sel->r = *vsp1_rwpf_get_crop(rwpf, config);
 		break;
 
 	case V4L2_SEL_TGT_CROP_BOUNDS:
-		format = vsp1_entity_get_pad_format(&rwpf->entity, cfg,
-						    RWPF_PAD_SINK, sel->which);
+		format = vsp1_entity_get_pad_format(&rwpf->entity, config,
+						    RWPF_PAD_SINK);
 		sel->r.left = 0;
 		sel->r.top = 0;
 		sel->r.width = format->width;
@@ -177,11 +150,12 @@
 	return 0;
 }
 
-int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
-			    struct v4l2_subdev_pad_config *cfg,
-			    struct v4l2_subdev_selection *sel)
+static int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
+				   struct v4l2_subdev_pad_config *cfg,
+				   struct v4l2_subdev_selection *sel)
 {
 	struct vsp1_rwpf *rwpf = to_rwpf(subdev);
+	struct v4l2_subdev_pad_config *config;
 	struct v4l2_mbus_framefmt *format;
 	struct v4l2_rect *crop;
 
@@ -192,11 +166,15 @@
 	if (sel->target != V4L2_SEL_TGT_CROP)
 		return -EINVAL;
 
+	config = vsp1_entity_get_pad_config(&rwpf->entity, cfg, sel->which);
+	if (!config)
+		return -EINVAL;
+
 	/* Make sure the crop rectangle is entirely contained in the image. The
 	 * WPF top and left offsets are limited to 255.
 	 */
-	format = vsp1_entity_get_pad_format(&rwpf->entity, cfg, RWPF_PAD_SINK,
-					    sel->which);
+	format = vsp1_entity_get_pad_format(&rwpf->entity, config,
+					    RWPF_PAD_SINK);
 
 	/* Restrict the crop rectangle coordinates to multiples of 2 to avoid
 	 * shifting the color plane.
@@ -219,14 +197,59 @@
 	sel->r.height = min_t(unsigned int, sel->r.height,
 			      format->height - sel->r.top);
 
-	crop = vsp1_rwpf_get_crop(rwpf, cfg, sel->which);
+	crop = vsp1_rwpf_get_crop(rwpf, config);
 	*crop = sel->r;
 
 	/* Propagate the format to the source pad. */
-	format = vsp1_entity_get_pad_format(&rwpf->entity, cfg, RWPF_PAD_SOURCE,
-					    sel->which);
+	format = vsp1_entity_get_pad_format(&rwpf->entity, config,
+					    RWPF_PAD_SOURCE);
 	format->width = crop->width;
 	format->height = crop->height;
 
 	return 0;
 }
+
+const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops = {
+	.init_cfg = vsp1_entity_init_cfg,
+	.enum_mbus_code = vsp1_rwpf_enum_mbus_code,
+	.enum_frame_size = vsp1_rwpf_enum_frame_size,
+	.get_fmt = vsp1_subdev_get_pad_format,
+	.set_fmt = vsp1_rwpf_set_format,
+	.get_selection = vsp1_rwpf_get_selection,
+	.set_selection = vsp1_rwpf_set_selection,
+};
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+static int vsp1_rwpf_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct vsp1_rwpf *rwpf =
+		container_of(ctrl->handler, struct vsp1_rwpf, ctrls);
+
+	switch (ctrl->id) {
+	case V4L2_CID_ALPHA_COMPONENT:
+		rwpf->alpha = ctrl->val;
+		break;
+	}
+
+	return 0;
+}
+
+static const struct v4l2_ctrl_ops vsp1_rwpf_ctrl_ops = {
+	.s_ctrl = vsp1_rwpf_s_ctrl,
+};
+
+int vsp1_rwpf_init_ctrls(struct vsp1_rwpf *rwpf)
+{
+	rwpf->alpha = 255;
+
+	v4l2_ctrl_handler_init(&rwpf->ctrls, 1);
+	v4l2_ctrl_new_std(&rwpf->ctrls, &vsp1_rwpf_ctrl_ops,
+			  V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255);
+
+	rwpf->entity.subdev.ctrl_handler = &rwpf->ctrls;
+
+	return rwpf->ctrls.error;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.h b/drivers/media/platform/vsp1/vsp1_rwpf.h
index 8e82356..9ff7c78 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.h
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.h
@@ -24,42 +24,35 @@
 #define RWPF_PAD_SOURCE				1
 
 struct v4l2_ctrl;
+struct vsp1_dl_manager;
+struct vsp1_pipeline;
 struct vsp1_rwpf;
 struct vsp1_video;
 
 struct vsp1_rwpf_memory {
-	unsigned int num_planes;
 	dma_addr_t addr[3];
-	unsigned int length[3];
-};
-
-struct vsp1_rwpf_operations {
-	void (*set_memory)(struct vsp1_rwpf *rwpf,
-			   struct vsp1_rwpf_memory *mem);
 };
 
 struct vsp1_rwpf {
 	struct vsp1_entity entity;
 	struct v4l2_ctrl_handler ctrls;
-	struct v4l2_ctrl *alpha;
 
+	struct vsp1_pipeline *pipe;
 	struct vsp1_video *video;
 
-	const struct vsp1_rwpf_operations *ops;
-
 	unsigned int max_width;
 	unsigned int max_height;
 
 	struct v4l2_pix_format_mplane format;
 	const struct vsp1_format_info *fmtinfo;
-	struct {
-		unsigned int left;
-		unsigned int top;
-	} location;
-	struct v4l2_rect crop;
+	unsigned int bru_input;
+
+	unsigned int alpha;
 
 	unsigned int offsets[2];
-	dma_addr_t buf_addr[3];
+	struct vsp1_rwpf_memory mem;
+
+	struct vsp1_dl_manager *dlm;
 };
 
 static inline struct vsp1_rwpf *to_rwpf(struct v4l2_subdev *subdev)
@@ -67,24 +60,31 @@
 	return container_of(subdev, struct vsp1_rwpf, entity.subdev);
 }
 
+static inline struct vsp1_rwpf *entity_to_rwpf(struct vsp1_entity *entity)
+{
+	return container_of(entity, struct vsp1_rwpf, entity);
+}
+
 struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index);
 struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index);
 
-int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
-			     struct v4l2_subdev_pad_config *cfg,
-			     struct v4l2_subdev_mbus_code_enum *code);
-int vsp1_rwpf_enum_frame_size(struct v4l2_subdev *subdev,
-			      struct v4l2_subdev_pad_config *cfg,
-			      struct v4l2_subdev_frame_size_enum *fse);
-int vsp1_rwpf_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
-			 struct v4l2_subdev_format *fmt);
-int vsp1_rwpf_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
-			 struct v4l2_subdev_format *fmt);
-int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
-			    struct v4l2_subdev_pad_config *cfg,
-			    struct v4l2_subdev_selection *sel);
-int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
-			    struct v4l2_subdev_pad_config *cfg,
-			    struct v4l2_subdev_selection *sel);
+int vsp1_rwpf_init_ctrls(struct vsp1_rwpf *rwpf);
+
+extern const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops;
+
+struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
+				     struct v4l2_subdev_pad_config *config);
+/**
+ * vsp1_rwpf_set_memory - Configure DMA addresses for a [RW]PF
+ * @rwpf: the [RW]PF instance
+ * @dl: the display list
+ *
+ * This function applies the cached memory buffer address to the display list.
+ */
+static inline void vsp1_rwpf_set_memory(struct vsp1_rwpf *rwpf,
+					struct vsp1_dl_list *dl)
+{
+	rwpf->entity.ops->set_memory(&rwpf->entity, dl);
+}
 
 #endif /* __VSP1_RWPF_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c
index cc09efb..97ef997 100644
--- a/drivers/media/platform/vsp1/vsp1_sru.c
+++ b/drivers/media/platform/vsp1/vsp1_sru.c
@@ -17,6 +17,7 @@
 #include <media/v4l2-subdev.h>
 
 #include "vsp1.h"
+#include "vsp1_dl.h"
 #include "vsp1_sru.h"
 
 #define SRU_MIN_SIZE				4U
@@ -26,14 +27,10 @@
  * Device Access
  */
 
-static inline u32 vsp1_sru_read(struct vsp1_sru *sru, u32 reg)
+static inline void vsp1_sru_write(struct vsp1_sru *sru, struct vsp1_dl_list *dl,
+				  u32 reg, u32 data)
 {
-	return vsp1_read(sru->entity.vsp1, reg);
-}
-
-static inline void vsp1_sru_write(struct vsp1_sru *sru, u32 reg, u32 data)
-{
-	vsp1_write(sru->entity.vsp1, reg, data);
+	vsp1_dl_list_write(dl, reg, data);
 }
 
 /* -----------------------------------------------------------------------------
@@ -82,20 +79,10 @@
 {
 	struct vsp1_sru *sru =
 		container_of(ctrl->handler, struct vsp1_sru, ctrls);
-	const struct vsp1_sru_param *param;
-	u32 value;
 
 	switch (ctrl->id) {
 	case V4L2_CID_VSP1_SRU_INTENSITY:
-		param = &vsp1_sru_params[ctrl->val - 1];
-
-		value = vsp1_sru_read(sru, VI6_SRU_CTRL0);
-		value &= ~(VI6_SRU_CTRL0_PARAM0_MASK |
-			   VI6_SRU_CTRL0_PARAM1_MASK);
-		value |= param->ctrl0;
-		vsp1_sru_write(sru, VI6_SRU_CTRL0, value);
-
-		vsp1_sru_write(sru, VI6_SRU_CTRL2, param->ctrl2);
+		sru->intensity = ctrl->val;
 		break;
 	}
 
@@ -118,54 +105,7 @@
 };
 
 /* -----------------------------------------------------------------------------
- * V4L2 Subdevice Core Operations
- */
-
-static int sru_s_stream(struct v4l2_subdev *subdev, int enable)
-{
-	struct vsp1_sru *sru = to_sru(subdev);
-	struct v4l2_mbus_framefmt *input;
-	struct v4l2_mbus_framefmt *output;
-	u32 ctrl0;
-	int ret;
-
-	ret = vsp1_entity_set_streaming(&sru->entity, enable);
-	if (ret < 0)
-		return ret;
-
-	if (!enable)
-		return 0;
-
-	input = &sru->entity.formats[SRU_PAD_SINK];
-	output = &sru->entity.formats[SRU_PAD_SOURCE];
-
-	if (input->code == MEDIA_BUS_FMT_ARGB8888_1X32)
-		ctrl0 = VI6_SRU_CTRL0_PARAM2 | VI6_SRU_CTRL0_PARAM3
-		      | VI6_SRU_CTRL0_PARAM4;
-	else
-		ctrl0 = VI6_SRU_CTRL0_PARAM3;
-
-	if (input->width != output->width)
-		ctrl0 |= VI6_SRU_CTRL0_MODE_UPSCALE;
-
-	/* Take the control handler lock to ensure that the CTRL0 value won't be
-	 * changed behind our back by a set control operation.
-	 */
-	if (sru->entity.vsp1->info->uapi)
-		mutex_lock(sru->ctrls.lock);
-	ctrl0 |= vsp1_sru_read(sru, VI6_SRU_CTRL0)
-	       & (VI6_SRU_CTRL0_PARAM0_MASK | VI6_SRU_CTRL0_PARAM1_MASK);
-	vsp1_sru_write(sru, VI6_SRU_CTRL0, ctrl0);
-	if (sru->entity.vsp1->info->uapi)
-		mutex_unlock(sru->ctrls.lock);
-
-	vsp1_sru_write(sru, VI6_SRU_CTRL1, VI6_SRU_CTRL1_PARAM5);
-
-	return 0;
-}
-
-/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Pad Operations
+ * V4L2 Subdevice Operations
  */
 
 static int sru_enum_mbus_code(struct v4l2_subdev *subdev,
@@ -176,27 +116,9 @@
 		MEDIA_BUS_FMT_ARGB8888_1X32,
 		MEDIA_BUS_FMT_AYUV8_1X32,
 	};
-	struct vsp1_sru *sru = to_sru(subdev);
-	struct v4l2_mbus_framefmt *format;
 
-	if (code->pad == SRU_PAD_SINK) {
-		if (code->index >= ARRAY_SIZE(codes))
-			return -EINVAL;
-
-		code->code = codes[code->index];
-	} else {
-		/* The SRU can't perform format conversion, the sink format is
-		 * always identical to the source format.
-		 */
-		if (code->index)
-			return -EINVAL;
-
-		format = vsp1_entity_get_pad_format(&sru->entity, cfg,
-						    SRU_PAD_SINK, code->which);
-		code->code = format->code;
-	}
-
-	return 0;
+	return vsp1_subdev_enum_mbus_code(subdev, cfg, code, codes,
+					  ARRAY_SIZE(codes));
 }
 
 static int sru_enum_frame_size(struct v4l2_subdev *subdev,
@@ -204,10 +126,14 @@
 			       struct v4l2_subdev_frame_size_enum *fse)
 {
 	struct vsp1_sru *sru = to_sru(subdev);
+	struct v4l2_subdev_pad_config *config;
 	struct v4l2_mbus_framefmt *format;
 
-	format = vsp1_entity_get_pad_format(&sru->entity, cfg,
-					    SRU_PAD_SINK, fse->which);
+	config = vsp1_entity_get_pad_config(&sru->entity, cfg, fse->which);
+	if (!config)
+		return -EINVAL;
+
+	format = vsp1_entity_get_pad_format(&sru->entity, config, SRU_PAD_SINK);
 
 	if (fse->index || fse->code != format->code)
 		return -EINVAL;
@@ -233,20 +159,9 @@
 	return 0;
 }
 
-static int sru_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
-			  struct v4l2_subdev_format *fmt)
-{
-	struct vsp1_sru *sru = to_sru(subdev);
-
-	fmt->format = *vsp1_entity_get_pad_format(&sru->entity, cfg, fmt->pad,
-						  fmt->which);
-
-	return 0;
-}
-
-static void sru_try_format(struct vsp1_sru *sru, struct v4l2_subdev_pad_config *cfg,
-			   unsigned int pad, struct v4l2_mbus_framefmt *fmt,
-			   enum v4l2_subdev_format_whence which)
+static void sru_try_format(struct vsp1_sru *sru,
+			   struct v4l2_subdev_pad_config *config,
+			   unsigned int pad, struct v4l2_mbus_framefmt *fmt)
 {
 	struct v4l2_mbus_framefmt *format;
 	unsigned int input_area;
@@ -265,8 +180,8 @@
 
 	case SRU_PAD_SOURCE:
 		/* The SRU can't perform format conversion. */
-		format = vsp1_entity_get_pad_format(&sru->entity, cfg,
-						    SRU_PAD_SINK, which);
+		format = vsp1_entity_get_pad_format(&sru->entity, config,
+						    SRU_PAD_SINK);
 		fmt->code = format->code;
 
 		/* We can upscale by 2 in both direction, but not independently.
@@ -295,57 +210,94 @@
 	fmt->colorspace = V4L2_COLORSPACE_SRGB;
 }
 
-static int sru_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
+static int sru_set_format(struct v4l2_subdev *subdev,
+			  struct v4l2_subdev_pad_config *cfg,
 			  struct v4l2_subdev_format *fmt)
 {
 	struct vsp1_sru *sru = to_sru(subdev);
+	struct v4l2_subdev_pad_config *config;
 	struct v4l2_mbus_framefmt *format;
 
-	sru_try_format(sru, cfg, fmt->pad, &fmt->format, fmt->which);
+	config = vsp1_entity_get_pad_config(&sru->entity, cfg, fmt->which);
+	if (!config)
+		return -EINVAL;
 
-	format = vsp1_entity_get_pad_format(&sru->entity, cfg, fmt->pad,
-					    fmt->which);
+	sru_try_format(sru, config, fmt->pad, &fmt->format);
+
+	format = vsp1_entity_get_pad_format(&sru->entity, config, fmt->pad);
 	*format = fmt->format;
 
 	if (fmt->pad == SRU_PAD_SINK) {
 		/* Propagate the format to the source pad. */
-		format = vsp1_entity_get_pad_format(&sru->entity, cfg,
-						    SRU_PAD_SOURCE, fmt->which);
+		format = vsp1_entity_get_pad_format(&sru->entity, config,
+						    SRU_PAD_SOURCE);
 		*format = fmt->format;
 
-		sru_try_format(sru, cfg, SRU_PAD_SOURCE, format, fmt->which);
+		sru_try_format(sru, config, SRU_PAD_SOURCE, format);
 	}
 
 	return 0;
 }
 
-/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Operations
- */
-
-static struct v4l2_subdev_video_ops sru_video_ops = {
-	.s_stream = sru_s_stream,
-};
-
 static struct v4l2_subdev_pad_ops sru_pad_ops = {
+	.init_cfg = vsp1_entity_init_cfg,
 	.enum_mbus_code = sru_enum_mbus_code,
 	.enum_frame_size = sru_enum_frame_size,
-	.get_fmt = sru_get_format,
+	.get_fmt = vsp1_subdev_get_pad_format,
 	.set_fmt = sru_set_format,
 };
 
 static struct v4l2_subdev_ops sru_ops = {
-	.video	= &sru_video_ops,
 	.pad    = &sru_pad_ops,
 };
 
 /* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void sru_configure(struct vsp1_entity *entity,
+			  struct vsp1_pipeline *pipe,
+			  struct vsp1_dl_list *dl)
+{
+	const struct vsp1_sru_param *param;
+	struct vsp1_sru *sru = to_sru(&entity->subdev);
+	struct v4l2_mbus_framefmt *input;
+	struct v4l2_mbus_framefmt *output;
+	u32 ctrl0;
+
+	input = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
+					   SRU_PAD_SINK);
+	output = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
+					    SRU_PAD_SOURCE);
+
+	if (input->code == MEDIA_BUS_FMT_ARGB8888_1X32)
+		ctrl0 = VI6_SRU_CTRL0_PARAM2 | VI6_SRU_CTRL0_PARAM3
+		      | VI6_SRU_CTRL0_PARAM4;
+	else
+		ctrl0 = VI6_SRU_CTRL0_PARAM3;
+
+	if (input->width != output->width)
+		ctrl0 |= VI6_SRU_CTRL0_MODE_UPSCALE;
+
+	param = &vsp1_sru_params[sru->intensity - 1];
+
+	ctrl0 |= param->ctrl0;
+
+	vsp1_sru_write(sru, dl, VI6_SRU_CTRL0, ctrl0);
+	vsp1_sru_write(sru, dl, VI6_SRU_CTRL1, VI6_SRU_CTRL1_PARAM5);
+	vsp1_sru_write(sru, dl, VI6_SRU_CTRL2, param->ctrl2);
+}
+
+static const struct vsp1_entity_operations sru_entity_ops = {
+	.configure = sru_configure,
+};
+
+/* -----------------------------------------------------------------------------
  * Initialization and Cleanup
  */
 
 struct vsp1_sru *vsp1_sru_create(struct vsp1_device *vsp1)
 {
-	struct v4l2_subdev *subdev;
 	struct vsp1_sru *sru;
 	int ret;
 
@@ -353,29 +305,19 @@
 	if (sru == NULL)
 		return ERR_PTR(-ENOMEM);
 
+	sru->entity.ops = &sru_entity_ops;
 	sru->entity.type = VSP1_ENTITY_SRU;
 
-	ret = vsp1_entity_init(vsp1, &sru->entity, 2);
+	ret = vsp1_entity_init(vsp1, &sru->entity, "sru", 2, &sru_ops);
 	if (ret < 0)
 		return ERR_PTR(ret);
 
-	/* Initialize the V4L2 subdev. */
-	subdev = &sru->entity.subdev;
-	v4l2_subdev_init(subdev, &sru_ops);
-
-	subdev->entity.ops = &vsp1->media_ops;
-	subdev->internal_ops = &vsp1_subdev_internal_ops;
-	snprintf(subdev->name, sizeof(subdev->name), "%s sru",
-		 dev_name(vsp1->dev));
-	v4l2_set_subdevdata(subdev, sru);
-	subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
-	vsp1_entity_init_formats(subdev, NULL);
-
 	/* Initialize the control handler. */
 	v4l2_ctrl_handler_init(&sru->ctrls, 1);
 	v4l2_ctrl_new_custom(&sru->ctrls, &sru_intensity_control, NULL);
 
+	sru->intensity = 1;
+
 	sru->entity.subdev.ctrl_handler = &sru->ctrls;
 
 	if (sru->ctrls.error) {
diff --git a/drivers/media/platform/vsp1/vsp1_sru.h b/drivers/media/platform/vsp1/vsp1_sru.h
index b6768bf..85e2414 100644
--- a/drivers/media/platform/vsp1/vsp1_sru.h
+++ b/drivers/media/platform/vsp1/vsp1_sru.h
@@ -28,6 +28,8 @@
 	struct vsp1_entity entity;
 
 	struct v4l2_ctrl_handler ctrls;
+
+	unsigned int intensity;
 };
 
 static inline struct vsp1_sru *to_sru(struct v4l2_subdev *subdev)
diff --git a/drivers/media/platform/vsp1/vsp1_uds.c b/drivers/media/platform/vsp1/vsp1_uds.c
index bba6777..1875e29 100644
--- a/drivers/media/platform/vsp1/vsp1_uds.c
+++ b/drivers/media/platform/vsp1/vsp1_uds.c
@@ -17,6 +17,7 @@
 #include <media/v4l2-subdev.h>
 
 #include "vsp1.h"
+#include "vsp1_dl.h"
 #include "vsp1_uds.h"
 
 #define UDS_MIN_SIZE				4U
@@ -29,19 +30,21 @@
  * Device Access
  */
 
-static inline void vsp1_uds_write(struct vsp1_uds *uds, u32 reg, u32 data)
+static inline void vsp1_uds_write(struct vsp1_uds *uds, struct vsp1_dl_list *dl,
+				  u32 reg, u32 data)
 {
-	vsp1_write(uds->entity.vsp1,
-		   reg + uds->entity.index * VI6_UDS_OFFSET, data);
+	vsp1_dl_list_write(dl, reg + uds->entity.index * VI6_UDS_OFFSET, data);
 }
 
 /* -----------------------------------------------------------------------------
  * Scaling Computation
  */
 
-void vsp1_uds_set_alpha(struct vsp1_uds *uds, unsigned int alpha)
+void vsp1_uds_set_alpha(struct vsp1_uds *uds, struct vsp1_dl_list *dl,
+			unsigned int alpha)
 {
-	vsp1_uds_write(uds, VI6_UDS_ALPVAL, alpha << VI6_UDS_ALPVAL_VAL0_SHIFT);
+	vsp1_uds_write(uds, dl, VI6_UDS_ALPVAL,
+		       alpha << VI6_UDS_ALPVAL_VAL0_SHIFT);
 }
 
 /*
@@ -105,60 +108,6 @@
 }
 
 /* -----------------------------------------------------------------------------
- * V4L2 Subdevice Core Operations
- */
-
-static int uds_s_stream(struct v4l2_subdev *subdev, int enable)
-{
-	struct vsp1_uds *uds = to_uds(subdev);
-	const struct v4l2_mbus_framefmt *output;
-	const struct v4l2_mbus_framefmt *input;
-	unsigned int hscale;
-	unsigned int vscale;
-	bool multitap;
-
-	if (!enable)
-		return 0;
-
-	input = &uds->entity.formats[UDS_PAD_SINK];
-	output = &uds->entity.formats[UDS_PAD_SOURCE];
-
-	hscale = uds_compute_ratio(input->width, output->width);
-	vscale = uds_compute_ratio(input->height, output->height);
-
-	dev_dbg(uds->entity.vsp1->dev, "hscale %u vscale %u\n", hscale, vscale);
-
-	/* Multi-tap scaling can't be enabled along with alpha scaling when
-	 * scaling down with a factor lower than or equal to 1/2 in either
-	 * direction.
-	 */
-	if (uds->scale_alpha && (hscale >= 8192 || vscale >= 8192))
-		multitap = false;
-	else
-		multitap = true;
-
-	vsp1_uds_write(uds, VI6_UDS_CTRL,
-		       (uds->scale_alpha ? VI6_UDS_CTRL_AON : 0) |
-		       (multitap ? VI6_UDS_CTRL_BC : 0));
-
-	vsp1_uds_write(uds, VI6_UDS_PASS_BWIDTH,
-		       (uds_passband_width(hscale)
-				<< VI6_UDS_PASS_BWIDTH_H_SHIFT) |
-		       (uds_passband_width(vscale)
-				<< VI6_UDS_PASS_BWIDTH_V_SHIFT));
-
-	/* Set the scaling ratios and the output size. */
-	vsp1_uds_write(uds, VI6_UDS_SCALE,
-		       (hscale << VI6_UDS_SCALE_HFRAC_SHIFT) |
-		       (vscale << VI6_UDS_SCALE_VFRAC_SHIFT));
-	vsp1_uds_write(uds, VI6_UDS_CLIP_SIZE,
-		       (output->width << VI6_UDS_CLIP_SIZE_HSIZE_SHIFT) |
-		       (output->height << VI6_UDS_CLIP_SIZE_VSIZE_SHIFT));
-
-	return 0;
-}
-
-/* -----------------------------------------------------------------------------
  * V4L2 Subdevice Pad Operations
  */
 
@@ -170,28 +119,9 @@
 		MEDIA_BUS_FMT_ARGB8888_1X32,
 		MEDIA_BUS_FMT_AYUV8_1X32,
 	};
-	struct vsp1_uds *uds = to_uds(subdev);
 
-	if (code->pad == UDS_PAD_SINK) {
-		if (code->index >= ARRAY_SIZE(codes))
-			return -EINVAL;
-
-		code->code = codes[code->index];
-	} else {
-		struct v4l2_mbus_framefmt *format;
-
-		/* The UDS can't perform format conversion, the sink format is
-		 * always identical to the source format.
-		 */
-		if (code->index)
-			return -EINVAL;
-
-		format = vsp1_entity_get_pad_format(&uds->entity, cfg,
-						    UDS_PAD_SINK, code->which);
-		code->code = format->code;
-	}
-
-	return 0;
+	return vsp1_subdev_enum_mbus_code(subdev, cfg, code, codes,
+					  ARRAY_SIZE(codes));
 }
 
 static int uds_enum_frame_size(struct v4l2_subdev *subdev,
@@ -199,10 +129,15 @@
 			       struct v4l2_subdev_frame_size_enum *fse)
 {
 	struct vsp1_uds *uds = to_uds(subdev);
+	struct v4l2_subdev_pad_config *config;
 	struct v4l2_mbus_framefmt *format;
 
-	format = vsp1_entity_get_pad_format(&uds->entity, cfg,
-					    UDS_PAD_SINK, fse->which);
+	config = vsp1_entity_get_pad_config(&uds->entity, cfg, fse->which);
+	if (!config)
+		return -EINVAL;
+
+	format = vsp1_entity_get_pad_format(&uds->entity, config,
+					    UDS_PAD_SINK);
 
 	if (fse->index || fse->code != format->code)
 		return -EINVAL;
@@ -222,20 +157,9 @@
 	return 0;
 }
 
-static int uds_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
-			  struct v4l2_subdev_format *fmt)
-{
-	struct vsp1_uds *uds = to_uds(subdev);
-
-	fmt->format = *vsp1_entity_get_pad_format(&uds->entity, cfg, fmt->pad,
-						  fmt->which);
-
-	return 0;
-}
-
-static void uds_try_format(struct vsp1_uds *uds, struct v4l2_subdev_pad_config *cfg,
-			   unsigned int pad, struct v4l2_mbus_framefmt *fmt,
-			   enum v4l2_subdev_format_whence which)
+static void uds_try_format(struct vsp1_uds *uds,
+			   struct v4l2_subdev_pad_config *config,
+			   unsigned int pad, struct v4l2_mbus_framefmt *fmt)
 {
 	struct v4l2_mbus_framefmt *format;
 	unsigned int minimum;
@@ -254,8 +178,8 @@
 
 	case UDS_PAD_SOURCE:
 		/* The UDS scales but can't perform format conversion. */
-		format = vsp1_entity_get_pad_format(&uds->entity, cfg,
-						    UDS_PAD_SINK, which);
+		format = vsp1_entity_get_pad_format(&uds->entity, config,
+						    UDS_PAD_SINK);
 		fmt->code = format->code;
 
 		uds_output_limits(format->width, &minimum, &maximum);
@@ -269,25 +193,30 @@
 	fmt->colorspace = V4L2_COLORSPACE_SRGB;
 }
 
-static int uds_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
+static int uds_set_format(struct v4l2_subdev *subdev,
+			  struct v4l2_subdev_pad_config *cfg,
 			  struct v4l2_subdev_format *fmt)
 {
 	struct vsp1_uds *uds = to_uds(subdev);
+	struct v4l2_subdev_pad_config *config;
 	struct v4l2_mbus_framefmt *format;
 
-	uds_try_format(uds, cfg, fmt->pad, &fmt->format, fmt->which);
+	config = vsp1_entity_get_pad_config(&uds->entity, cfg, fmt->which);
+	if (!config)
+		return -EINVAL;
 
-	format = vsp1_entity_get_pad_format(&uds->entity, cfg, fmt->pad,
-					    fmt->which);
+	uds_try_format(uds, config, fmt->pad, &fmt->format);
+
+	format = vsp1_entity_get_pad_format(&uds->entity, config, fmt->pad);
 	*format = fmt->format;
 
 	if (fmt->pad == UDS_PAD_SINK) {
 		/* Propagate the format to the source pad. */
-		format = vsp1_entity_get_pad_format(&uds->entity, cfg,
-						    UDS_PAD_SOURCE, fmt->which);
+		format = vsp1_entity_get_pad_format(&uds->entity, config,
+						    UDS_PAD_SOURCE);
 		*format = fmt->format;
 
-		uds_try_format(uds, cfg, UDS_PAD_SOURCE, format, fmt->which);
+		uds_try_format(uds, config, UDS_PAD_SOURCE, format);
 	}
 
 	return 0;
@@ -297,55 +226,97 @@
  * V4L2 Subdevice Operations
  */
 
-static struct v4l2_subdev_video_ops uds_video_ops = {
-	.s_stream = uds_s_stream,
-};
-
 static struct v4l2_subdev_pad_ops uds_pad_ops = {
+	.init_cfg = vsp1_entity_init_cfg,
 	.enum_mbus_code = uds_enum_mbus_code,
 	.enum_frame_size = uds_enum_frame_size,
-	.get_fmt = uds_get_format,
+	.get_fmt = vsp1_subdev_get_pad_format,
 	.set_fmt = uds_set_format,
 };
 
 static struct v4l2_subdev_ops uds_ops = {
-	.video	= &uds_video_ops,
 	.pad    = &uds_pad_ops,
 };
 
 /* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void uds_configure(struct vsp1_entity *entity,
+			  struct vsp1_pipeline *pipe,
+			  struct vsp1_dl_list *dl)
+{
+	struct vsp1_uds *uds = to_uds(&entity->subdev);
+	const struct v4l2_mbus_framefmt *output;
+	const struct v4l2_mbus_framefmt *input;
+	unsigned int hscale;
+	unsigned int vscale;
+	bool multitap;
+
+	input = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+					   UDS_PAD_SINK);
+	output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+					    UDS_PAD_SOURCE);
+
+	hscale = uds_compute_ratio(input->width, output->width);
+	vscale = uds_compute_ratio(input->height, output->height);
+
+	dev_dbg(uds->entity.vsp1->dev, "hscale %u vscale %u\n", hscale, vscale);
+
+	/* Multi-tap scaling can't be enabled along with alpha scaling when
+	 * scaling down with a factor lower than or equal to 1/2 in either
+	 * direction.
+	 */
+	if (uds->scale_alpha && (hscale >= 8192 || vscale >= 8192))
+		multitap = false;
+	else
+		multitap = true;
+
+	vsp1_uds_write(uds, dl, VI6_UDS_CTRL,
+		       (uds->scale_alpha ? VI6_UDS_CTRL_AON : 0) |
+		       (multitap ? VI6_UDS_CTRL_BC : 0));
+
+	vsp1_uds_write(uds, dl, VI6_UDS_PASS_BWIDTH,
+		       (uds_passband_width(hscale)
+				<< VI6_UDS_PASS_BWIDTH_H_SHIFT) |
+		       (uds_passband_width(vscale)
+				<< VI6_UDS_PASS_BWIDTH_V_SHIFT));
+
+	/* Set the scaling ratios and the output size. */
+	vsp1_uds_write(uds, dl, VI6_UDS_SCALE,
+		       (hscale << VI6_UDS_SCALE_HFRAC_SHIFT) |
+		       (vscale << VI6_UDS_SCALE_VFRAC_SHIFT));
+	vsp1_uds_write(uds, dl, VI6_UDS_CLIP_SIZE,
+		       (output->width << VI6_UDS_CLIP_SIZE_HSIZE_SHIFT) |
+		       (output->height << VI6_UDS_CLIP_SIZE_VSIZE_SHIFT));
+}
+
+static const struct vsp1_entity_operations uds_entity_ops = {
+	.configure = uds_configure,
+};
+
+/* -----------------------------------------------------------------------------
  * Initialization and Cleanup
  */
 
 struct vsp1_uds *vsp1_uds_create(struct vsp1_device *vsp1, unsigned int index)
 {
-	struct v4l2_subdev *subdev;
 	struct vsp1_uds *uds;
+	char name[6];
 	int ret;
 
 	uds = devm_kzalloc(vsp1->dev, sizeof(*uds), GFP_KERNEL);
 	if (uds == NULL)
 		return ERR_PTR(-ENOMEM);
 
+	uds->entity.ops = &uds_entity_ops;
 	uds->entity.type = VSP1_ENTITY_UDS;
 	uds->entity.index = index;
 
-	ret = vsp1_entity_init(vsp1, &uds->entity, 2);
+	sprintf(name, "uds.%u", index);
+	ret = vsp1_entity_init(vsp1, &uds->entity, name, 2, &uds_ops);
 	if (ret < 0)
 		return ERR_PTR(ret);
 
-	/* Initialize the V4L2 subdev. */
-	subdev = &uds->entity.subdev;
-	v4l2_subdev_init(subdev, &uds_ops);
-
-	subdev->entity.ops = &vsp1->media_ops;
-	subdev->internal_ops = &vsp1_subdev_internal_ops;
-	snprintf(subdev->name, sizeof(subdev->name), "%s uds.%u",
-		 dev_name(vsp1->dev), index);
-	v4l2_set_subdevdata(subdev, uds);
-	subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
-	vsp1_entity_init_formats(subdev, NULL);
-
 	return uds;
 }
diff --git a/drivers/media/platform/vsp1/vsp1_uds.h b/drivers/media/platform/vsp1/vsp1_uds.h
index 031ac0d..5c8cbfca 100644
--- a/drivers/media/platform/vsp1/vsp1_uds.h
+++ b/drivers/media/platform/vsp1/vsp1_uds.h
@@ -35,6 +35,7 @@
 
 struct vsp1_uds *vsp1_uds_create(struct vsp1_device *vsp1, unsigned int index);
 
-void vsp1_uds_set_alpha(struct vsp1_uds *uds, unsigned int alpha);
+void vsp1_uds_set_alpha(struct vsp1_uds *uds, struct vsp1_dl_list *dl,
+			unsigned int alpha);
 
 #endif /* __VSP1_UDS_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index 72cc7d3..a9aec5c 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -29,6 +29,7 @@
 
 #include "vsp1.h"
 #include "vsp1_bru.h"
+#include "vsp1_dl.h"
 #include "vsp1_entity.h"
 #include "vsp1_pipe.h"
 #include "vsp1_rwpf.h"
@@ -171,209 +172,6 @@
  * Pipeline Management
  */
 
-static int vsp1_video_pipeline_validate_branch(struct vsp1_pipeline *pipe,
-					       struct vsp1_rwpf *input,
-					       struct vsp1_rwpf *output)
-{
-	struct vsp1_entity *entity;
-	struct media_entity_enum ent_enum;
-	struct media_pad *pad;
-	int rval;
-	bool bru_found = false;
-
-	input->location.left = 0;
-	input->location.top = 0;
-
-	rval = media_entity_enum_init(
-		&ent_enum, input->entity.pads[RWPF_PAD_SOURCE].graph_obj.mdev);
-	if (rval)
-		return rval;
-
-	pad = media_entity_remote_pad(&input->entity.pads[RWPF_PAD_SOURCE]);
-
-	while (1) {
-		if (pad == NULL) {
-			rval = -EPIPE;
-			goto out;
-		}
-
-		/* We've reached a video node, that shouldn't have happened. */
-		if (!is_media_entity_v4l2_subdev(pad->entity)) {
-			rval = -EPIPE;
-			goto out;
-		}
-
-		entity = to_vsp1_entity(
-			media_entity_to_v4l2_subdev(pad->entity));
-
-		/* A BRU is present in the pipeline, store the compose rectangle
-		 * location in the input RPF for use when configuring the RPF.
-		 */
-		if (entity->type == VSP1_ENTITY_BRU) {
-			struct vsp1_bru *bru = to_bru(&entity->subdev);
-			struct v4l2_rect *rect =
-				&bru->inputs[pad->index].compose;
-
-			bru->inputs[pad->index].rpf = input;
-
-			input->location.left = rect->left;
-			input->location.top = rect->top;
-
-			bru_found = true;
-		}
-
-		/* We've reached the WPF, we're done. */
-		if (entity->type == VSP1_ENTITY_WPF)
-			break;
-
-		/* Ensure the branch has no loop. */
-		if (media_entity_enum_test_and_set(&ent_enum,
-						   &entity->subdev.entity)) {
-			rval = -EPIPE;
-			goto out;
-		}
-
-		/* UDS can't be chained. */
-		if (entity->type == VSP1_ENTITY_UDS) {
-			if (pipe->uds) {
-				rval = -EPIPE;
-				goto out;
-			}
-
-			pipe->uds = entity;
-			pipe->uds_input = bru_found ? pipe->bru
-					: &input->entity;
-		}
-
-		/* Follow the source link. The link setup operations ensure
-		 * that the output fan-out can't be more than one, there is thus
-		 * no need to verify here that only a single source link is
-		 * activated.
-		 */
-		pad = &entity->pads[entity->source_pad];
-		pad = media_entity_remote_pad(pad);
-	}
-
-	/* The last entity must be the output WPF. */
-	if (entity != &output->entity)
-		rval = -EPIPE;
-
-out:
-	media_entity_enum_cleanup(&ent_enum);
-
-	return rval;
-}
-
-static int vsp1_video_pipeline_validate(struct vsp1_pipeline *pipe,
-					struct vsp1_video *video)
-{
-	struct media_entity_graph graph;
-	struct media_entity *entity = &video->video.entity;
-	struct media_device *mdev = entity->graph_obj.mdev;
-	unsigned int i;
-	int ret;
-
-	mutex_lock(&mdev->graph_mutex);
-
-	/* Walk the graph to locate the entities and video nodes. */
-	ret = media_entity_graph_walk_init(&graph, mdev);
-	if (ret) {
-		mutex_unlock(&mdev->graph_mutex);
-		return ret;
-	}
-
-	media_entity_graph_walk_start(&graph, entity);
-
-	while ((entity = media_entity_graph_walk_next(&graph))) {
-		struct v4l2_subdev *subdev;
-		struct vsp1_rwpf *rwpf;
-		struct vsp1_entity *e;
-
-		if (!is_media_entity_v4l2_subdev(entity))
-			continue;
-
-		subdev = media_entity_to_v4l2_subdev(entity);
-		e = to_vsp1_entity(subdev);
-		list_add_tail(&e->list_pipe, &pipe->entities);
-
-		if (e->type == VSP1_ENTITY_RPF) {
-			rwpf = to_rwpf(subdev);
-			pipe->inputs[rwpf->entity.index] = rwpf;
-			rwpf->video->pipe_index = ++pipe->num_inputs;
-		} else if (e->type == VSP1_ENTITY_WPF) {
-			rwpf = to_rwpf(subdev);
-			pipe->output = rwpf;
-			rwpf->video->pipe_index = 0;
-		} else if (e->type == VSP1_ENTITY_LIF) {
-			pipe->lif = e;
-		} else if (e->type == VSP1_ENTITY_BRU) {
-			pipe->bru = e;
-		}
-	}
-
-	mutex_unlock(&mdev->graph_mutex);
-
-	media_entity_graph_walk_cleanup(&graph);
-
-	/* We need one output and at least one input. */
-	if (pipe->num_inputs == 0 || !pipe->output) {
-		ret = -EPIPE;
-		goto error;
-	}
-
-	/* Follow links downstream for each input and make sure the graph
-	 * contains no loop and that all branches end at the output WPF.
-	 */
-	for (i = 0; i < video->vsp1->info->rpf_count; ++i) {
-		if (!pipe->inputs[i])
-			continue;
-
-		ret = vsp1_video_pipeline_validate_branch(pipe, pipe->inputs[i],
-							  pipe->output);
-		if (ret < 0)
-			goto error;
-	}
-
-	return 0;
-
-error:
-	vsp1_pipeline_reset(pipe);
-	return ret;
-}
-
-static int vsp1_video_pipeline_init(struct vsp1_pipeline *pipe,
-				    struct vsp1_video *video)
-{
-	int ret;
-
-	mutex_lock(&pipe->lock);
-
-	/* If we're the first user validate and initialize the pipeline. */
-	if (pipe->use_count == 0) {
-		ret = vsp1_video_pipeline_validate(pipe, video);
-		if (ret < 0)
-			goto done;
-	}
-
-	pipe->use_count++;
-	ret = 0;
-
-done:
-	mutex_unlock(&pipe->lock);
-	return ret;
-}
-
-static void vsp1_video_pipeline_cleanup(struct vsp1_pipeline *pipe)
-{
-	mutex_lock(&pipe->lock);
-
-	/* If we're the last user clean up the pipeline. */
-	if (--pipe->use_count == 0)
-		vsp1_pipeline_reset(pipe);
-
-	mutex_unlock(&pipe->lock);
-}
-
 /*
  * vsp1_video_complete_buffer - Complete the current buffer
  * @video: the video node
@@ -391,7 +189,7 @@
 static struct vsp1_vb2_buffer *
 vsp1_video_complete_buffer(struct vsp1_video *video)
 {
-	struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity);
+	struct vsp1_pipeline *pipe = video->rwpf->pipe;
 	struct vsp1_vb2_buffer *next = NULL;
 	struct vsp1_vb2_buffer *done;
 	unsigned long flags;
@@ -425,7 +223,7 @@
 	done->buf.vb2_buf.timestamp = ktime_get_ns();
 	for (i = 0; i < done->buf.vb2_buf.num_planes; ++i)
 		vb2_set_plane_payload(&done->buf.vb2_buf, i,
-				      done->mem.length[i]);
+				      vb2_plane_size(&done->buf.vb2_buf, i));
 	vb2_buffer_done(&done->buf.vb2_buf, VB2_BUF_STATE_DONE);
 
 	return next;
@@ -444,15 +242,41 @@
 
 	spin_lock_irqsave(&pipe->irqlock, flags);
 
-	video->rwpf->ops->set_memory(video->rwpf, &buf->mem);
+	video->rwpf->mem = buf->mem;
 	pipe->buffers_ready |= 1 << video->pipe_index;
 
 	spin_unlock_irqrestore(&pipe->irqlock, flags);
 }
 
+static void vsp1_video_pipeline_run(struct vsp1_pipeline *pipe)
+{
+	struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
+	unsigned int i;
+
+	if (!pipe->dl)
+		pipe->dl = vsp1_dl_list_get(pipe->output->dlm);
+
+	for (i = 0; i < vsp1->info->rpf_count; ++i) {
+		struct vsp1_rwpf *rwpf = pipe->inputs[i];
+
+		if (rwpf)
+			vsp1_rwpf_set_memory(rwpf, pipe->dl);
+	}
+
+	if (!pipe->lif)
+		vsp1_rwpf_set_memory(pipe->output, pipe->dl);
+
+	vsp1_dl_list_commit(pipe->dl);
+	pipe->dl = NULL;
+
+	vsp1_pipeline_run(pipe);
+}
+
 static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe)
 {
 	struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
+	enum vsp1_pipeline_state state;
+	unsigned long flags;
 	unsigned int i;
 
 	/* Complete buffers on all video nodes. */
@@ -463,8 +287,230 @@
 		vsp1_video_frame_end(pipe, pipe->inputs[i]);
 	}
 
-	if (!pipe->lif)
-		vsp1_video_frame_end(pipe, pipe->output);
+	vsp1_video_frame_end(pipe, pipe->output);
+
+	spin_lock_irqsave(&pipe->irqlock, flags);
+
+	state = pipe->state;
+	pipe->state = VSP1_PIPELINE_STOPPED;
+
+	/* If a stop has been requested, mark the pipeline as stopped and
+	 * return. Otherwise restart the pipeline if ready.
+	 */
+	if (state == VSP1_PIPELINE_STOPPING)
+		wake_up(&pipe->wq);
+	else if (vsp1_pipeline_ready(pipe))
+		vsp1_video_pipeline_run(pipe);
+
+	spin_unlock_irqrestore(&pipe->irqlock, flags);
+}
+
+static int vsp1_video_pipeline_build_branch(struct vsp1_pipeline *pipe,
+					    struct vsp1_rwpf *input,
+					    struct vsp1_rwpf *output)
+{
+	struct media_entity_enum ent_enum;
+	struct vsp1_entity *entity;
+	struct media_pad *pad;
+	bool bru_found = false;
+	int ret;
+
+	ret = media_entity_enum_init(&ent_enum, &input->entity.vsp1->media_dev);
+	if (ret < 0)
+		return ret;
+
+	pad = media_entity_remote_pad(&input->entity.pads[RWPF_PAD_SOURCE]);
+
+	while (1) {
+		if (pad == NULL) {
+			ret = -EPIPE;
+			goto out;
+		}
+
+		/* We've reached a video node, that shouldn't have happened. */
+		if (!is_media_entity_v4l2_subdev(pad->entity)) {
+			ret = -EPIPE;
+			goto out;
+		}
+
+		entity = to_vsp1_entity(
+			media_entity_to_v4l2_subdev(pad->entity));
+
+		/* A BRU is present in the pipeline, store the BRU input pad
+		 * number in the input RPF for use when configuring the RPF.
+		 */
+		if (entity->type == VSP1_ENTITY_BRU) {
+			struct vsp1_bru *bru = to_bru(&entity->subdev);
+
+			bru->inputs[pad->index].rpf = input;
+			input->bru_input = pad->index;
+
+			bru_found = true;
+		}
+
+		/* We've reached the WPF, we're done. */
+		if (entity->type == VSP1_ENTITY_WPF)
+			break;
+
+		/* Ensure the branch has no loop. */
+		if (media_entity_enum_test_and_set(&ent_enum,
+						   &entity->subdev.entity)) {
+			ret = -EPIPE;
+			goto out;
+		}
+
+		/* UDS can't be chained. */
+		if (entity->type == VSP1_ENTITY_UDS) {
+			if (pipe->uds) {
+				ret = -EPIPE;
+				goto out;
+			}
+
+			pipe->uds = entity;
+			pipe->uds_input = bru_found ? pipe->bru
+					: &input->entity;
+		}
+
+		/* Follow the source link. The link setup operations ensure
+		 * that the output fan-out can't be more than one, there is thus
+		 * no need to verify here that only a single source link is
+		 * activated.
+		 */
+		pad = &entity->pads[entity->source_pad];
+		pad = media_entity_remote_pad(pad);
+	}
+
+	/* The last entity must be the output WPF. */
+	if (entity != &output->entity)
+		ret = -EPIPE;
+
+out:
+	media_entity_enum_cleanup(&ent_enum);
+
+	return ret;
+}
+
+static int vsp1_video_pipeline_build(struct vsp1_pipeline *pipe,
+				     struct vsp1_video *video)
+{
+	struct media_entity_graph graph;
+	struct media_entity *entity = &video->video.entity;
+	struct media_device *mdev = entity->graph_obj.mdev;
+	unsigned int i;
+	int ret;
+
+	/* Walk the graph to locate the entities and video nodes. */
+	ret = media_entity_graph_walk_init(&graph, mdev);
+	if (ret)
+		return ret;
+
+	media_entity_graph_walk_start(&graph, entity);
+
+	while ((entity = media_entity_graph_walk_next(&graph))) {
+		struct v4l2_subdev *subdev;
+		struct vsp1_rwpf *rwpf;
+		struct vsp1_entity *e;
+
+		if (!is_media_entity_v4l2_subdev(entity))
+			continue;
+
+		subdev = media_entity_to_v4l2_subdev(entity);
+		e = to_vsp1_entity(subdev);
+		list_add_tail(&e->list_pipe, &pipe->entities);
+
+		if (e->type == VSP1_ENTITY_RPF) {
+			rwpf = to_rwpf(subdev);
+			pipe->inputs[rwpf->entity.index] = rwpf;
+			rwpf->video->pipe_index = ++pipe->num_inputs;
+			rwpf->pipe = pipe;
+		} else if (e->type == VSP1_ENTITY_WPF) {
+			rwpf = to_rwpf(subdev);
+			pipe->output = rwpf;
+			rwpf->video->pipe_index = 0;
+			rwpf->pipe = pipe;
+		} else if (e->type == VSP1_ENTITY_LIF) {
+			pipe->lif = e;
+		} else if (e->type == VSP1_ENTITY_BRU) {
+			pipe->bru = e;
+		}
+	}
+
+	media_entity_graph_walk_cleanup(&graph);
+
+	/* We need one output and at least one input. */
+	if (pipe->num_inputs == 0 || !pipe->output)
+		return -EPIPE;
+
+	/* Follow links downstream for each input and make sure the graph
+	 * contains no loop and that all branches end at the output WPF.
+	 */
+	for (i = 0; i < video->vsp1->info->rpf_count; ++i) {
+		if (!pipe->inputs[i])
+			continue;
+
+		ret = vsp1_video_pipeline_build_branch(pipe, pipe->inputs[i],
+						       pipe->output);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int vsp1_video_pipeline_init(struct vsp1_pipeline *pipe,
+				    struct vsp1_video *video)
+{
+	vsp1_pipeline_init(pipe);
+
+	pipe->frame_end = vsp1_video_pipeline_frame_end;
+
+	return vsp1_video_pipeline_build(pipe, video);
+}
+
+static struct vsp1_pipeline *vsp1_video_pipeline_get(struct vsp1_video *video)
+{
+	struct vsp1_pipeline *pipe;
+	int ret;
+
+	/* Get a pipeline object for the video node. If a pipeline has already
+	 * been allocated just increment its reference count and return it.
+	 * Otherwise allocate a new pipeline and initialize it, it will be freed
+	 * when the last reference is released.
+	 */
+	if (!video->rwpf->pipe) {
+		pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
+		if (!pipe)
+			return ERR_PTR(-ENOMEM);
+
+		ret = vsp1_video_pipeline_init(pipe, video);
+		if (ret < 0) {
+			vsp1_pipeline_reset(pipe);
+			kfree(pipe);
+			return ERR_PTR(ret);
+		}
+	} else {
+		pipe = video->rwpf->pipe;
+		kref_get(&pipe->kref);
+	}
+
+	return pipe;
+}
+
+static void vsp1_video_pipeline_release(struct kref *kref)
+{
+	struct vsp1_pipeline *pipe = container_of(kref, typeof(*pipe), kref);
+
+	vsp1_pipeline_reset(pipe);
+	kfree(pipe);
+}
+
+static void vsp1_video_pipeline_put(struct vsp1_pipeline *pipe)
+{
+	struct media_device *mdev = &pipe->output->entity.vsp1->media_dev;
+
+	mutex_lock(&mdev->graph_mutex);
+	kref_put(&pipe->kref, vsp1_video_pipeline_release);
+	mutex_unlock(&mdev->graph_mutex);
 }
 
 /* -----------------------------------------------------------------------------
@@ -513,16 +559,16 @@
 	if (vb->num_planes < format->num_planes)
 		return -EINVAL;
 
-	buf->mem.num_planes = vb->num_planes;
-
 	for (i = 0; i < vb->num_planes; ++i) {
 		buf->mem.addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
-		buf->mem.length[i] = vb2_plane_size(vb, i);
 
-		if (buf->mem.length[i] < format->plane_fmt[i].sizeimage)
+		if (vb2_plane_size(vb, i) < format->plane_fmt[i].sizeimage)
 			return -EINVAL;
 	}
 
+	for ( ; i < 3; ++i)
+		buf->mem.addr[i] = 0;
+
 	return 0;
 }
 
@@ -530,7 +576,7 @@
 {
 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
 	struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
-	struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity);
+	struct vsp1_pipeline *pipe = video->rwpf->pipe;
 	struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf);
 	unsigned long flags;
 	bool empty;
@@ -545,54 +591,66 @@
 
 	spin_lock_irqsave(&pipe->irqlock, flags);
 
-	video->rwpf->ops->set_memory(video->rwpf, &buf->mem);
+	video->rwpf->mem = buf->mem;
 	pipe->buffers_ready |= 1 << video->pipe_index;
 
 	if (vb2_is_streaming(&video->queue) &&
 	    vsp1_pipeline_ready(pipe))
-		vsp1_pipeline_run(pipe);
+		vsp1_video_pipeline_run(pipe);
 
 	spin_unlock_irqrestore(&pipe->irqlock, flags);
 }
 
+static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe)
+{
+	struct vsp1_entity *entity;
+
+	/* Prepare the display list. */
+	pipe->dl = vsp1_dl_list_get(pipe->output->dlm);
+	if (!pipe->dl)
+		return -ENOMEM;
+
+	if (pipe->uds) {
+		struct vsp1_uds *uds = to_uds(&pipe->uds->subdev);
+
+		/* If a BRU is present in the pipeline before the UDS, the alpha
+		 * component doesn't need to be scaled as the BRU output alpha
+		 * value is fixed to 255. Otherwise we need to scale the alpha
+		 * component only when available at the input RPF.
+		 */
+		if (pipe->uds_input->type == VSP1_ENTITY_BRU) {
+			uds->scale_alpha = false;
+		} else {
+			struct vsp1_rwpf *rpf =
+				to_rwpf(&pipe->uds_input->subdev);
+
+			uds->scale_alpha = rpf->fmtinfo->alpha;
+		}
+	}
+
+	list_for_each_entry(entity, &pipe->entities, list_pipe) {
+		vsp1_entity_route_setup(entity, pipe->dl);
+
+		if (entity->ops->configure)
+			entity->ops->configure(entity, pipe, pipe->dl);
+	}
+
+	return 0;
+}
+
 static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
 {
 	struct vsp1_video *video = vb2_get_drv_priv(vq);
-	struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity);
-	struct vsp1_entity *entity;
+	struct vsp1_pipeline *pipe = video->rwpf->pipe;
 	unsigned long flags;
 	int ret;
 
 	mutex_lock(&pipe->lock);
 	if (pipe->stream_count == pipe->num_inputs) {
-		if (pipe->uds) {
-			struct vsp1_uds *uds = to_uds(&pipe->uds->subdev);
-
-			/* If a BRU is present in the pipeline before the UDS,
-			 * the alpha component doesn't need to be scaled as the
-			 * BRU output alpha value is fixed to 255. Otherwise we
-			 * need to scale the alpha component only when available
-			 * at the input RPF.
-			 */
-			if (pipe->uds_input->type == VSP1_ENTITY_BRU) {
-				uds->scale_alpha = false;
-			} else {
-				struct vsp1_rwpf *rpf =
-					to_rwpf(&pipe->uds_input->subdev);
-
-				uds->scale_alpha = rpf->fmtinfo->alpha;
-			}
-		}
-
-		list_for_each_entry(entity, &pipe->entities, list_pipe) {
-			vsp1_entity_route_setup(entity);
-
-			ret = v4l2_subdev_call(&entity->subdev, video,
-					       s_stream, 1);
-			if (ret < 0) {
-				mutex_unlock(&pipe->lock);
-				return ret;
-			}
+		ret = vsp1_video_setup_pipeline(pipe);
+		if (ret < 0) {
+			mutex_unlock(&pipe->lock);
+			return ret;
 		}
 	}
 
@@ -601,7 +659,7 @@
 
 	spin_lock_irqsave(&pipe->irqlock, flags);
 	if (vsp1_pipeline_ready(pipe))
-		vsp1_pipeline_run(pipe);
+		vsp1_video_pipeline_run(pipe);
 	spin_unlock_irqrestore(&pipe->irqlock, flags);
 
 	return 0;
@@ -610,7 +668,7 @@
 static void vsp1_video_stop_streaming(struct vb2_queue *vq)
 {
 	struct vsp1_video *video = vb2_get_drv_priv(vq);
-	struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity);
+	struct vsp1_pipeline *pipe = video->rwpf->pipe;
 	struct vsp1_vb2_buffer *buffer;
 	unsigned long flags;
 	int ret;
@@ -621,11 +679,14 @@
 		ret = vsp1_pipeline_stop(pipe);
 		if (ret == -ETIMEDOUT)
 			dev_err(video->vsp1->dev, "pipeline stop timeout\n");
+
+		vsp1_dl_list_put(pipe->dl);
+		pipe->dl = NULL;
 	}
 	mutex_unlock(&pipe->lock);
 
-	vsp1_video_pipeline_cleanup(pipe);
 	media_entity_pipeline_stop(&video->video.entity);
+	vsp1_video_pipeline_put(pipe);
 
 	/* Remove all buffers from the IRQ queue. */
 	spin_lock_irqsave(&video->irqlock, flags);
@@ -737,6 +798,7 @@
 {
 	struct v4l2_fh *vfh = file->private_data;
 	struct vsp1_video *video = to_vsp1_video(vfh->vdev);
+	struct media_device *mdev = &video->vsp1->media_dev;
 	struct vsp1_pipeline *pipe;
 	int ret;
 
@@ -745,18 +807,25 @@
 
 	video->sequence = 0;
 
-	/* Start streaming on the pipeline. No link touching an entity in the
-	 * pipeline can be activated or deactivated once streaming is started.
-	 *
-	 * Use the VSP1 pipeline object embedded in the first video object that
-	 * starts streaming.
+	/* Get a pipeline for the video node and start streaming on it. No link
+	 * touching an entity in the pipeline can be activated or deactivated
+	 * once streaming is started.
 	 */
-	pipe = video->video.entity.pipe
-	     ? to_vsp1_pipeline(&video->video.entity) : &video->pipe;
+	mutex_lock(&mdev->graph_mutex);
 
-	ret = media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
-	if (ret < 0)
-		return ret;
+	pipe = vsp1_video_pipeline_get(video);
+	if (IS_ERR(pipe)) {
+		mutex_unlock(&mdev->graph_mutex);
+		return PTR_ERR(pipe);
+	}
+
+	ret = __media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
+	if (ret < 0) {
+		mutex_unlock(&mdev->graph_mutex);
+		goto err_pipe;
+	}
+
+	mutex_unlock(&mdev->graph_mutex);
 
 	/* Verify that the configured format matches the output of the connected
 	 * subdev.
@@ -765,21 +834,17 @@
 	if (ret < 0)
 		goto err_stop;
 
-	ret = vsp1_video_pipeline_init(pipe, video);
-	if (ret < 0)
-		goto err_stop;
-
 	/* Start the queue. */
 	ret = vb2_streamon(&video->queue, type);
 	if (ret < 0)
-		goto err_cleanup;
+		goto err_stop;
 
 	return 0;
 
-err_cleanup:
-	vsp1_video_pipeline_cleanup(pipe);
 err_stop:
 	media_entity_pipeline_stop(&video->video.entity);
+err_pipe:
+	vsp1_video_pipeline_put(pipe);
 	return ret;
 }
 
@@ -895,26 +960,16 @@
 	spin_lock_init(&video->irqlock);
 	INIT_LIST_HEAD(&video->irqqueue);
 
-	vsp1_pipeline_init(&video->pipe);
-	video->pipe.frame_end = vsp1_video_pipeline_frame_end;
-
 	/* Initialize the media entity... */
 	ret = media_entity_pads_init(&video->video.entity, 1, &video->pad);
 	if (ret < 0)
 		return ERR_PTR(ret);
 
 	/* ... and the format ... */
-	rwpf->fmtinfo = vsp1_get_format_info(VSP1_VIDEO_DEF_FORMAT);
-	rwpf->format.pixelformat = rwpf->fmtinfo->fourcc;
-	rwpf->format.colorspace = V4L2_COLORSPACE_SRGB;
-	rwpf->format.field = V4L2_FIELD_NONE;
+	rwpf->format.pixelformat = VSP1_VIDEO_DEF_FORMAT;
 	rwpf->format.width = VSP1_VIDEO_DEF_WIDTH;
 	rwpf->format.height = VSP1_VIDEO_DEF_HEIGHT;
-	rwpf->format.num_planes = 1;
-	rwpf->format.plane_fmt[0].bytesperline =
-		rwpf->format.width * rwpf->fmtinfo->bpp[0] / 8;
-	rwpf->format.plane_fmt[0].sizeimage =
-		rwpf->format.plane_fmt[0].bytesperline * rwpf->format.height;
+	__vsp1_video_try_format(video, &rwpf->format, &rwpf->fmtinfo);
 
 	/* ... and the video node... */
 	video->video.v4l2_dev = &video->vsp1->v4l2_dev;
diff --git a/drivers/media/platform/vsp1/vsp1_video.h b/drivers/media/platform/vsp1/vsp1_video.h
index 64abd39..867b008 100644
--- a/drivers/media/platform/vsp1/vsp1_video.h
+++ b/drivers/media/platform/vsp1/vsp1_video.h
@@ -18,7 +18,6 @@
 
 #include <media/videobuf2-v4l2.h>
 
-#include "vsp1_pipe.h"
 #include "vsp1_rwpf.h"
 
 struct vsp1_vb2_buffer {
@@ -44,7 +43,6 @@
 
 	struct mutex lock;
 
-	struct vsp1_pipeline pipe;
 	unsigned int pipe_index;
 
 	struct vb2_queue queue;
diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c
index c78d4af..6c91eaa 100644
--- a/drivers/media/platform/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/vsp1/vsp1_wpf.c
@@ -16,83 +16,146 @@
 #include <media/v4l2-subdev.h>
 
 #include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_pipe.h"
 #include "vsp1_rwpf.h"
 #include "vsp1_video.h"
 
-#define WPF_MAX_WIDTH				2048
-#define WPF_MAX_HEIGHT				2048
+#define WPF_GEN2_MAX_WIDTH			2048U
+#define WPF_GEN2_MAX_HEIGHT			2048U
+#define WPF_GEN3_MAX_WIDTH			8190U
+#define WPF_GEN3_MAX_HEIGHT			8190U
 
 /* -----------------------------------------------------------------------------
  * Device Access
  */
 
-static inline u32 vsp1_wpf_read(struct vsp1_rwpf *wpf, u32 reg)
+static inline void vsp1_wpf_write(struct vsp1_rwpf *wpf,
+				  struct vsp1_dl_list *dl, u32 reg, u32 data)
 {
-	return vsp1_read(wpf->entity.vsp1,
-			 reg + wpf->entity.index * VI6_WPF_OFFSET);
+	vsp1_dl_list_write(dl, reg + wpf->entity.index * VI6_WPF_OFFSET, data);
 }
 
-static inline void vsp1_wpf_write(struct vsp1_rwpf *wpf, u32 reg, u32 data)
-{
-	vsp1_mod_write(&wpf->entity,
-		       reg + wpf->entity.index * VI6_WPF_OFFSET, data);
-}
-
-/* -----------------------------------------------------------------------------
- * Controls
- */
-
-static int wpf_s_ctrl(struct v4l2_ctrl *ctrl)
-{
-	struct vsp1_rwpf *wpf =
-		container_of(ctrl->handler, struct vsp1_rwpf, ctrls);
-	u32 value;
-
-	if (!vsp1_entity_is_streaming(&wpf->entity))
-		return 0;
-
-	switch (ctrl->id) {
-	case V4L2_CID_ALPHA_COMPONENT:
-		value = vsp1_wpf_read(wpf, VI6_WPF_OUTFMT);
-		value &= ~VI6_WPF_OUTFMT_PDV_MASK;
-		value |= ctrl->val << VI6_WPF_OUTFMT_PDV_SHIFT;
-		vsp1_wpf_write(wpf, VI6_WPF_OUTFMT, value);
-		break;
-	}
-
-	return 0;
-}
-
-static const struct v4l2_ctrl_ops wpf_ctrl_ops = {
-	.s_ctrl = wpf_s_ctrl,
-};
-
 /* -----------------------------------------------------------------------------
  * V4L2 Subdevice Core Operations
  */
 
 static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
 {
-	struct vsp1_pipeline *pipe = to_vsp1_pipeline(&subdev->entity);
 	struct vsp1_rwpf *wpf = to_rwpf(subdev);
 	struct vsp1_device *vsp1 = wpf->entity.vsp1;
-	const struct v4l2_rect *crop = &wpf->crop;
-	unsigned int i;
-	u32 srcrpf = 0;
-	u32 outfmt = 0;
-	int ret;
 
-	ret = vsp1_entity_set_streaming(&wpf->entity, enable);
-	if (ret < 0)
-		return ret;
-
-	if (!enable) {
-		vsp1_write(vsp1, VI6_WPF_IRQ_ENB(wpf->entity.index), 0);
-		vsp1_write(vsp1, wpf->entity.index * VI6_WPF_OFFSET +
-			   VI6_WPF_SRCRPF, 0);
+	if (enable)
 		return 0;
+
+	/* Write to registers directly when stopping the stream as there will be
+	 * no pipeline run to apply the display list.
+	 */
+	vsp1_write(vsp1, VI6_WPF_IRQ_ENB(wpf->entity.index), 0);
+	vsp1_write(vsp1, wpf->entity.index * VI6_WPF_OFFSET +
+		   VI6_WPF_SRCRPF, 0);
+
+	return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static struct v4l2_subdev_video_ops wpf_video_ops = {
+	.s_stream = wpf_s_stream,
+};
+
+static struct v4l2_subdev_ops wpf_ops = {
+	.video	= &wpf_video_ops,
+	.pad    = &vsp1_rwpf_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void vsp1_wpf_destroy(struct vsp1_entity *entity)
+{
+	struct vsp1_rwpf *wpf = entity_to_rwpf(entity);
+
+	vsp1_dlm_destroy(wpf->dlm);
+}
+
+static void wpf_set_memory(struct vsp1_entity *entity, struct vsp1_dl_list *dl)
+{
+	struct vsp1_rwpf *wpf = entity_to_rwpf(entity);
+
+	vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_Y, wpf->mem.addr[0]);
+	vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_C0, wpf->mem.addr[1]);
+	vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_C1, wpf->mem.addr[2]);
+}
+
+static void wpf_configure(struct vsp1_entity *entity,
+			  struct vsp1_pipeline *pipe,
+			  struct vsp1_dl_list *dl)
+{
+	struct vsp1_rwpf *wpf = to_rwpf(&entity->subdev);
+	struct vsp1_device *vsp1 = wpf->entity.vsp1;
+	const struct v4l2_mbus_framefmt *source_format;
+	const struct v4l2_mbus_framefmt *sink_format;
+	const struct v4l2_rect *crop;
+	unsigned int i;
+	u32 outfmt = 0;
+	u32 srcrpf = 0;
+
+	/* Cropping */
+	crop = vsp1_rwpf_get_crop(wpf, wpf->entity.config);
+
+	vsp1_wpf_write(wpf, dl, VI6_WPF_HSZCLIP, VI6_WPF_SZCLIP_EN |
+		       (crop->left << VI6_WPF_SZCLIP_OFST_SHIFT) |
+		       (crop->width << VI6_WPF_SZCLIP_SIZE_SHIFT));
+	vsp1_wpf_write(wpf, dl, VI6_WPF_VSZCLIP, VI6_WPF_SZCLIP_EN |
+		       (crop->top << VI6_WPF_SZCLIP_OFST_SHIFT) |
+		       (crop->height << VI6_WPF_SZCLIP_SIZE_SHIFT));
+
+	/* Format */
+	sink_format = vsp1_entity_get_pad_format(&wpf->entity,
+						 wpf->entity.config,
+						 RWPF_PAD_SINK);
+	source_format = vsp1_entity_get_pad_format(&wpf->entity,
+						   wpf->entity.config,
+						   RWPF_PAD_SOURCE);
+
+	if (!pipe->lif) {
+		const struct v4l2_pix_format_mplane *format = &wpf->format;
+		const struct vsp1_format_info *fmtinfo = wpf->fmtinfo;
+
+		outfmt = fmtinfo->hwfmt << VI6_WPF_OUTFMT_WRFMT_SHIFT;
+
+		if (fmtinfo->alpha)
+			outfmt |= VI6_WPF_OUTFMT_PXA;
+		if (fmtinfo->swap_yc)
+			outfmt |= VI6_WPF_OUTFMT_SPYCS;
+		if (fmtinfo->swap_uv)
+			outfmt |= VI6_WPF_OUTFMT_SPUVS;
+
+		/* Destination stride and byte swapping. */
+		vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_STRIDE_Y,
+			       format->plane_fmt[0].bytesperline);
+		if (format->num_planes > 1)
+			vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_STRIDE_C,
+				       format->plane_fmt[1].bytesperline);
+
+		vsp1_wpf_write(wpf, dl, VI6_WPF_DSWAP, fmtinfo->swap);
 	}
 
+	if (sink_format->code != source_format->code)
+		outfmt |= VI6_WPF_OUTFMT_CSC;
+
+	outfmt |= wpf->alpha << VI6_WPF_OUTFMT_PDV_SHIFT;
+	vsp1_wpf_write(wpf, dl, VI6_WPF_OUTFMT, outfmt);
+
+	vsp1_dl_list_write(dl, VI6_DPR_WPF_FPORCH(wpf->entity.index),
+			   VI6_DPR_WPF_FPORCH_FP_WPFN);
+
+	vsp1_dl_list_write(dl, VI6_WPF_WRBCK_CTRL, 0);
+
 	/* Sources. If the pipeline has a single input and BRU is not used,
 	 * configure it as the master layer. Otherwise configure all
 	 * inputs as sub-layers and select the virtual RPF as the master
@@ -112,106 +175,18 @@
 	if (pipe->bru || pipe->num_inputs > 1)
 		srcrpf |= VI6_WPF_SRCRPF_VIRACT_MST;
 
-	vsp1_wpf_write(wpf, VI6_WPF_SRCRPF, srcrpf);
-
-	/* Destination stride. */
-	if (!pipe->lif) {
-		struct v4l2_pix_format_mplane *format = &wpf->format;
-
-		vsp1_wpf_write(wpf, VI6_WPF_DSTM_STRIDE_Y,
-			       format->plane_fmt[0].bytesperline);
-		if (format->num_planes > 1)
-			vsp1_wpf_write(wpf, VI6_WPF_DSTM_STRIDE_C,
-				       format->plane_fmt[1].bytesperline);
-	}
-
-	vsp1_wpf_write(wpf, VI6_WPF_HSZCLIP, VI6_WPF_SZCLIP_EN |
-		       (crop->left << VI6_WPF_SZCLIP_OFST_SHIFT) |
-		       (crop->width << VI6_WPF_SZCLIP_SIZE_SHIFT));
-	vsp1_wpf_write(wpf, VI6_WPF_VSZCLIP, VI6_WPF_SZCLIP_EN |
-		       (crop->top << VI6_WPF_SZCLIP_OFST_SHIFT) |
-		       (crop->height << VI6_WPF_SZCLIP_SIZE_SHIFT));
-
-	/* Format */
-	if (!pipe->lif) {
-		const struct vsp1_format_info *fmtinfo = wpf->fmtinfo;
-
-		outfmt = fmtinfo->hwfmt << VI6_WPF_OUTFMT_WRFMT_SHIFT;
-
-		if (fmtinfo->alpha)
-			outfmt |= VI6_WPF_OUTFMT_PXA;
-		if (fmtinfo->swap_yc)
-			outfmt |= VI6_WPF_OUTFMT_SPYCS;
-		if (fmtinfo->swap_uv)
-			outfmt |= VI6_WPF_OUTFMT_SPUVS;
-
-		vsp1_wpf_write(wpf, VI6_WPF_DSWAP, fmtinfo->swap);
-	}
-
-	if (wpf->entity.formats[RWPF_PAD_SINK].code !=
-	    wpf->entity.formats[RWPF_PAD_SOURCE].code)
-		outfmt |= VI6_WPF_OUTFMT_CSC;
-
-	/* Take the control handler lock to ensure that the PDV value won't be
-	 * changed behind our back by a set control operation.
-	 */
-	if (vsp1->info->uapi)
-		mutex_lock(wpf->ctrls.lock);
-	outfmt |= wpf->alpha->cur.val << VI6_WPF_OUTFMT_PDV_SHIFT;
-	vsp1_wpf_write(wpf, VI6_WPF_OUTFMT, outfmt);
-	if (vsp1->info->uapi)
-		mutex_unlock(wpf->ctrls.lock);
-
-	vsp1_mod_write(&wpf->entity, VI6_DPR_WPF_FPORCH(wpf->entity.index),
-		       VI6_DPR_WPF_FPORCH_FP_WPFN);
-
-	vsp1_mod_write(&wpf->entity, VI6_WPF_WRBCK_CTRL, 0);
+	vsp1_wpf_write(wpf, dl, VI6_WPF_SRCRPF, srcrpf);
 
 	/* Enable interrupts */
-	vsp1_write(vsp1, VI6_WPF_IRQ_STA(wpf->entity.index), 0);
-	vsp1_write(vsp1, VI6_WPF_IRQ_ENB(wpf->entity.index),
-		   VI6_WFP_IRQ_ENB_FREE);
-
-	return 0;
+	vsp1_dl_list_write(dl, VI6_WPF_IRQ_STA(wpf->entity.index), 0);
+	vsp1_dl_list_write(dl, VI6_WPF_IRQ_ENB(wpf->entity.index),
+			   VI6_WFP_IRQ_ENB_FREE);
 }
 
-/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Operations
- */
-
-static struct v4l2_subdev_video_ops wpf_video_ops = {
-	.s_stream = wpf_s_stream,
-};
-
-static struct v4l2_subdev_pad_ops wpf_pad_ops = {
-	.enum_mbus_code = vsp1_rwpf_enum_mbus_code,
-	.enum_frame_size = vsp1_rwpf_enum_frame_size,
-	.get_fmt = vsp1_rwpf_get_format,
-	.set_fmt = vsp1_rwpf_set_format,
-	.get_selection = vsp1_rwpf_get_selection,
-	.set_selection = vsp1_rwpf_set_selection,
-};
-
-static struct v4l2_subdev_ops wpf_ops = {
-	.video	= &wpf_video_ops,
-	.pad    = &wpf_pad_ops,
-};
-
-/* -----------------------------------------------------------------------------
- * Video Device Operations
- */
-
-static void wpf_set_memory(struct vsp1_rwpf *wpf, struct vsp1_rwpf_memory *mem)
-{
-	vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_Y, mem->addr[0]);
-	if (mem->num_planes > 1)
-		vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_C0, mem->addr[1]);
-	if (mem->num_planes > 2)
-		vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_C1, mem->addr[2]);
-}
-
-static const struct vsp1_rwpf_operations wpf_vdev_ops = {
+static const struct vsp1_entity_operations wpf_entity_ops = {
+	.destroy = vsp1_wpf_destroy,
 	.set_memory = wpf_set_memory,
+	.configure = wpf_configure,
 };
 
 /* -----------------------------------------------------------------------------
@@ -220,51 +195,43 @@
 
 struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index)
 {
-	struct v4l2_subdev *subdev;
 	struct vsp1_rwpf *wpf;
+	char name[6];
 	int ret;
 
 	wpf = devm_kzalloc(vsp1->dev, sizeof(*wpf), GFP_KERNEL);
 	if (wpf == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	wpf->ops = &wpf_vdev_ops;
+	if (vsp1->info->gen == 2) {
+		wpf->max_width = WPF_GEN2_MAX_WIDTH;
+		wpf->max_height = WPF_GEN2_MAX_HEIGHT;
+	} else {
+		wpf->max_width = WPF_GEN3_MAX_WIDTH;
+		wpf->max_height = WPF_GEN3_MAX_HEIGHT;
+	}
 
-	wpf->max_width = WPF_MAX_WIDTH;
-	wpf->max_height = WPF_MAX_HEIGHT;
-
+	wpf->entity.ops = &wpf_entity_ops;
 	wpf->entity.type = VSP1_ENTITY_WPF;
 	wpf->entity.index = index;
 
-	ret = vsp1_entity_init(vsp1, &wpf->entity, 2);
+	sprintf(name, "wpf.%u", index);
+	ret = vsp1_entity_init(vsp1, &wpf->entity, name, 2, &wpf_ops);
 	if (ret < 0)
 		return ERR_PTR(ret);
 
-	/* Initialize the V4L2 subdev. */
-	subdev = &wpf->entity.subdev;
-	v4l2_subdev_init(subdev, &wpf_ops);
-
-	subdev->entity.ops = &vsp1->media_ops;
-	subdev->internal_ops = &vsp1_subdev_internal_ops;
-	snprintf(subdev->name, sizeof(subdev->name), "%s wpf.%u",
-		 dev_name(vsp1->dev), index);
-	v4l2_set_subdevdata(subdev, wpf);
-	subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
-	vsp1_entity_init_formats(subdev, NULL);
+	/* Initialize the display list manager. */
+	wpf->dlm = vsp1_dlm_create(vsp1, index, 4);
+	if (!wpf->dlm) {
+		ret = -ENOMEM;
+		goto error;
+	}
 
 	/* Initialize the control handler. */
-	v4l2_ctrl_handler_init(&wpf->ctrls, 1);
-	wpf->alpha = v4l2_ctrl_new_std(&wpf->ctrls, &wpf_ctrl_ops,
-				       V4L2_CID_ALPHA_COMPONENT,
-				       0, 255, 1, 255);
-
-	wpf->entity.subdev.ctrl_handler = &wpf->ctrls;
-
-	if (wpf->ctrls.error) {
+	ret = vsp1_rwpf_init_ctrls(wpf);
+	if (ret < 0) {
 		dev_err(vsp1->dev, "wpf%u: failed to initialize controls\n",
 			index);
-		ret = wpf->ctrls.error;
 		goto error;
 	}
 
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c
index e795a45..feb3b2f 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.c
+++ b/drivers/media/platform/xilinx/xilinx-vipp.c
@@ -351,19 +351,15 @@
 	struct xvip_graph_entity *entity;
 	struct device_node *remote;
 	struct device_node *ep = NULL;
-	struct device_node *next;
 	int ret = 0;
 
 	dev_dbg(xdev->dev, "parsing node %s\n", node->full_name);
 
 	while (1) {
-		next = of_graph_get_next_endpoint(node, ep);
-		if (next == NULL)
+		ep = of_graph_get_next_endpoint(node, ep);
+		if (ep == NULL)
 			break;
 
-		of_node_put(ep);
-		ep = next;
-
 		dev_dbg(xdev->dev, "handling endpoint %s\n", ep->full_name);
 
 		remote = of_graph_get_remote_port_parent(ep);
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index 3f61d77..9f5b597 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -873,13 +873,10 @@
 	strlcat(ati_remote->rc_phys, "/input0", sizeof(ati_remote->rc_phys));
 	strlcat(ati_remote->mouse_phys, "/input1", sizeof(ati_remote->mouse_phys));
 
-	if (udev->manufacturer)
-		strlcpy(ati_remote->rc_name, udev->manufacturer,
-			sizeof(ati_remote->rc_name));
-
-	if (udev->product)
-		snprintf(ati_remote->rc_name, sizeof(ati_remote->rc_name),
-			 "%s %s", ati_remote->rc_name, udev->product);
+	snprintf(ati_remote->rc_name, sizeof(ati_remote->rc_name), "%s%s%s",
+		udev->manufacturer ?: "",
+		udev->manufacturer && udev->product ? " " : "",
+		udev->product ?: "");
 
 	if (!strlen(ati_remote->rc_name))
 		snprintf(ati_remote->rc_name, sizeof(ati_remote->rc_name),
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 35155ae..5cf2e74 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -188,6 +188,7 @@
 #define VENDOR_TWISTEDMELON	0x2596
 #define VENDOR_HAUPPAUGE	0x2040
 #define VENDOR_PCTV		0x2013
+#define VENDOR_ADAPTEC		0x03f3
 
 enum mceusb_model_type {
 	MCE_GEN2 = 0,		/* Most boards */
@@ -302,6 +303,9 @@
 	/* SMK/I-O Data GV-MC7/RCKIT Receiver */
 	{ USB_DEVICE(VENDOR_SMK, 0x0353),
 	  .driver_info = MCE_GEN2_NO_TX },
+	/* SMK RXX6000 Infrared Receiver */
+	{ USB_DEVICE(VENDOR_SMK, 0x0357),
+	  .driver_info = MCE_GEN2_NO_TX },
 	/* Tatung eHome Infrared Transceiver */
 	{ USB_DEVICE(VENDOR_TATUNG, 0x9150) },
 	/* Shuttle eHome Infrared Transceiver */
@@ -405,6 +409,8 @@
 	  .driver_info = HAUPPAUGE_CX_HYBRID_TV },
 	{ USB_DEVICE(VENDOR_PCTV, 0x025e),
 	  .driver_info = HAUPPAUGE_CX_HYBRID_TV },
+	/* Adaptec / HP eHome Receiver */
+	{ USB_DEVICE(VENDOR_ADAPTEC, 0x0094) },
 
 	/* Terminating entry */
 	{ }
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 4e9bbe7..7dfc7c2 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -1263,6 +1263,9 @@
 
 static void rc_dev_release(struct device *device)
 {
+	struct rc_dev *dev = to_rc_dev(device);
+
+	kfree(dev);
 }
 
 #define ADD_HOTPLUG_VAR(fmt, val...)					\
@@ -1384,7 +1387,9 @@
 
 	put_device(&dev->dev);
 
-	kfree(dev);
+	/* kfree(dev) will be called by the callback function
+	   rc_dev_release() */
+
 	module_put(THIS_MODULE);
 }
 EXPORT_SYMBOL_GPL(rc_free_device);
@@ -1492,9 +1497,7 @@
 	}
 
 	/* Allow the RC sysfs nodes to be accessible */
-	mutex_lock(&dev->lock);
 	atomic_set(&dev->initialized, 1);
-	mutex_unlock(&dev->lock);
 
 	IR_dprintk(1, "Registered rc%u (driver: %s, remote: %s, mode %s)\n",
 		   dev->minor,
diff --git a/drivers/media/tuners/qm1d1c0042.c b/drivers/media/tuners/qm1d1c0042.c
index 18bc745..9af2a15 100644
--- a/drivers/media/tuners/qm1d1c0042.c
+++ b/drivers/media/tuners/qm1d1c0042.c
@@ -32,14 +32,24 @@
 #include "qm1d1c0042.h"
 
 #define QM1D1C0042_NUM_REGS 0x20
+#define QM1D1C0042_NUM_REG_ROWS 2
 
-static const u8 reg_initval[QM1D1C0042_NUM_REGS] = {
-	0x48, 0x1c, 0xa0, 0x10, 0xbc, 0xc5, 0x20, 0x33,
-	0x06, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
-	0x00, 0xff, 0xf3, 0x00, 0x2a, 0x64, 0xa6, 0x86,
-	0x8c, 0xcf, 0xb8, 0xf1, 0xa8, 0xf2, 0x89, 0x00
+static const u8
+reg_initval[QM1D1C0042_NUM_REG_ROWS][QM1D1C0042_NUM_REGS] = { {
+		0x48, 0x1c, 0xa0, 0x10, 0xbc, 0xc5, 0x20, 0x33,
+		0x06, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+		0x00, 0xff, 0xf3, 0x00, 0x2a, 0x64, 0xa6, 0x86,
+		0x8c, 0xcf, 0xb8, 0xf1, 0xa8, 0xf2, 0x89, 0x00
+	}, {
+		0x68, 0x1c, 0xc0, 0x10, 0xbc, 0xc1, 0x11, 0x33,
+		0x03, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+		0x00, 0xff, 0xf3, 0x00, 0x3f, 0x25, 0x5c, 0xd6,
+		0x55, 0xcf, 0x95, 0xf6, 0x36, 0xf2, 0x09, 0x00
+	}
 };
 
+static int reg_index;
+
 static const struct qm1d1c0042_config default_cfg = {
 	.xtal_freq = 16000,
 	.lpf = 1,
@@ -320,7 +330,6 @@
 	int i, ret;
 
 	state = fe->tuner_priv;
-	memcpy(state->regs, reg_initval, sizeof(reg_initval));
 
 	reg_write(state, 0x01, 0x0c);
 	reg_write(state, 0x01, 0x0c);
@@ -330,15 +339,22 @@
 		goto failed;
 	usleep_range(2000, 3000);
 
-	val = state->regs[0x01] | 0x10;
-	ret = reg_write(state, 0x01, val); /* soft reset off */
+	ret = reg_write(state, 0x01, 0x1c); /* soft reset off */
 	if (ret < 0)
 		goto failed;
 
-	/* check ID */
+	/* check ID and choose initial registers corresponding ID */
 	ret = reg_read(state, 0x00, &val);
-	if (ret < 0 || val != 0x48)
+	if (ret < 0)
 		goto failed;
+	for (reg_index = 0; reg_index < QM1D1C0042_NUM_REG_ROWS;
+	     reg_index++) {
+		if (val == reg_initval[reg_index][0x00])
+			break;
+	}
+	if (reg_index >= QM1D1C0042_NUM_REG_ROWS)
+		goto failed;
+	memcpy(state->regs, reg_initval[reg_index], QM1D1C0042_NUM_REGS);
 	usleep_range(2000, 3000);
 
 	state->regs[0x0c] |= 0x40;
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 243ac38..b07a681 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -84,11 +84,22 @@
 	struct si2157_cmd cmd;
 	const struct firmware *fw;
 	const char *fw_name;
-	unsigned int chip_id;
+	unsigned int uitmp, chip_id;
 
 	dev_dbg(&client->dev, "\n");
 
-	if (dev->fw_loaded)
+	/* Returned IF frequency is garbage when firmware is not running */
+	memcpy(cmd.args, "\x15\x00\x06\x07", 4);
+	cmd.wlen = 4;
+	cmd.rlen = 4;
+	ret = si2157_cmd_execute(client, &cmd);
+	if (ret)
+		goto err;
+
+	uitmp = cmd.args[2] << 0 | cmd.args[3] << 8;
+	dev_dbg(&client->dev, "if_frequency kHz=%u\n", uitmp);
+
+	if (uitmp == dev->if_frequency / 1000)
 		goto warm;
 
 	/* power up */
@@ -203,9 +214,6 @@
 
 	dev_info(&client->dev, "firmware version: %c.%c.%d\n",
 			cmd.args[6], cmd.args[7], cmd.args[8]);
-
-	dev->fw_loaded = true;
-
 warm:
 	/* init statistics in order signal app which are supported */
 	c->strength.len = 1;
@@ -422,7 +430,6 @@
 	dev->fe = cfg->fe;
 	dev->inversion = cfg->inversion;
 	dev->if_port = cfg->if_port;
-	dev->fw_loaded = false;
 	dev->chiptype = (u8)id->driver_data;
 	dev->if_frequency = 5000000; /* default value of property 0x0706 */
 	mutex_init(&dev->i2c_mutex);
diff --git a/drivers/media/tuners/si2157_priv.h b/drivers/media/tuners/si2157_priv.h
index 589d558..d6b2c7b 100644
--- a/drivers/media/tuners/si2157_priv.h
+++ b/drivers/media/tuners/si2157_priv.h
@@ -26,7 +26,6 @@
 	struct mutex i2c_mutex;
 	struct dvb_frontend *fe;
 	bool active;
-	bool fw_loaded;
 	bool inversion;
 	u8 chiptype;
 	u8 if_port;
diff --git a/drivers/media/usb/au0828/au0828-cards.c b/drivers/media/usb/au0828/au0828-cards.c
index ca861ae..6b469e8 100644
--- a/drivers/media/usb/au0828/au0828-cards.c
+++ b/drivers/media/usb/au0828/au0828-cards.c
@@ -228,10 +228,6 @@
 				"au8522", 0x8e >> 1, NULL);
 		if (sd == NULL)
 			pr_err("analog subdev registration failed\n");
-#ifdef CONFIG_MEDIA_CONTROLLER
-		if (sd)
-			dev->decoder = &sd->entity;
-#endif
 	}
 
 	/* Setup tuners */
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index 5dc82e8..321ea5c 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -131,16 +131,36 @@
 	return status;
 }
 
+#ifdef CONFIG_MEDIA_CONTROLLER
+static void au0828_media_graph_notify(struct media_entity *new,
+				      void *notify_data);
+#endif
+
 static void au0828_unregister_media_device(struct au0828_dev *dev)
 {
-
 #ifdef CONFIG_MEDIA_CONTROLLER
-	if (dev->media_dev &&
-		media_devnode_is_registered(&dev->media_dev->devnode)) {
-		media_device_unregister(dev->media_dev);
-		media_device_cleanup(dev->media_dev);
-		dev->media_dev = NULL;
+	struct media_device *mdev = dev->media_dev;
+	struct media_entity_notify *notify, *nextp;
+
+	if (!mdev || !media_devnode_is_registered(&mdev->devnode))
+		return;
+
+	/* Remove au0828 entity_notify callbacks */
+	list_for_each_entry_safe(notify, nextp, &mdev->entity_notify, list) {
+		if (notify->notify != au0828_media_graph_notify)
+			continue;
+		media_device_unregister_entity_notify(mdev, notify);
 	}
+
+	/* clear enable_source, disable_source */
+	dev->media_dev->source_priv = NULL;
+	dev->media_dev->enable_source = NULL;
+	dev->media_dev->disable_source = NULL;
+
+	media_device_unregister(dev->media_dev);
+	media_device_cleanup(dev->media_dev);
+	kfree(dev->media_dev);
+	dev->media_dev = NULL;
 #endif
 }
 
@@ -166,7 +186,7 @@
 	   Set the status so poll routines can check and avoid
 	   access after disconnect.
 	*/
-	dev->dev_state = DEV_DISCONNECTED;
+	set_bit(DEV_DISCONNECTED, &dev->dev_state);
 
 	au0828_rc_unregister(dev);
 	/* Digital TV */
@@ -192,7 +212,7 @@
 #ifdef CONFIG_MEDIA_CONTROLLER
 	struct media_device *mdev;
 
-	mdev = media_device_get_devres(&udev->dev);
+	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
 	if (!mdev)
 		return -ENOMEM;
 
@@ -456,7 +476,8 @@
 {
 #ifdef CONFIG_MEDIA_CONTROLLER
 	int ret;
-	struct media_entity *entity, *demod = NULL, *tuner = NULL;
+	struct media_entity *entity, *demod = NULL;
+	struct media_link *link;
 
 	if (!dev->media_dev)
 		return 0;
@@ -482,26 +503,37 @@
 	}
 
 	/*
-	 * Find tuner and demod to disable the link between
-	 * the two to avoid disable step when tuner is requested
-	 * by video or audio. Note that this step can't be done
-	 * until dvb graph is created during dvb register.
+	 * Find tuner, decoder and demod.
+	 *
+	 * The tuner and decoder should be cached, as they'll be used by
+	 *	au0828_enable_source.
+	 *
+	 * It also needs to disable the link between tuner and
+	 * decoder/demod, to avoid disable step when tuner is requested
+	 * by video or audio. Note that this step can't be done until dvb
+	 * graph is created during dvb register.
 	*/
 	media_device_for_each_entity(entity, dev->media_dev) {
-		if (entity->function == MEDIA_ENT_F_DTV_DEMOD)
+		switch (entity->function) {
+		case MEDIA_ENT_F_TUNER:
+			dev->tuner = entity;
+			break;
+		case MEDIA_ENT_F_ATV_DECODER:
+			dev->decoder = entity;
+			break;
+		case MEDIA_ENT_F_DTV_DEMOD:
 			demod = entity;
-		else if (entity->function == MEDIA_ENT_F_TUNER)
-			tuner = entity;
+			break;
+		}
 	}
-	/* Disable link between tuner and demod */
-	if (tuner && demod) {
-		struct media_link *link;
 
-		list_for_each_entry(link, &demod->links, list) {
-			if (link->sink->entity == demod &&
-			    link->source->entity == tuner) {
+	/* Disable link between tuner->demod and/or tuner->decoder */
+	if (dev->tuner) {
+		list_for_each_entry(link, &dev->tuner->links, list) {
+			if (demod && link->sink->entity == demod)
 				media_entity_setup_link(link, 0);
-			}
+			if (dev->decoder && link->sink->entity == dev->decoder)
+				media_entity_setup_link(link, 0);
 		}
 	}
 
diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
index b0f0679..3d6687f 100644
--- a/drivers/media/usb/au0828/au0828-input.c
+++ b/drivers/media/usb/au0828/au0828-input.c
@@ -130,7 +130,7 @@
 	bool first = true;
 
 	/* do nothing if device is disconnected */
-	if (ir->dev->dev_state == DEV_DISCONNECTED)
+	if (test_bit(DEV_DISCONNECTED, &ir->dev->dev_state))
 		return 0;
 
 	/* Check IR int */
@@ -260,7 +260,7 @@
 	cancel_delayed_work_sync(&ir->work);
 
 	/* do nothing if device is disconnected */
-	if (ir->dev->dev_state != DEV_DISCONNECTED) {
+	if (!test_bit(DEV_DISCONNECTED, &ir->dev->dev_state)) {
 		/* Disable IR */
 		au8522_rc_clear(ir, 0xe0, 1 << 4);
 	}
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 13f6dab..7d0ec4c 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -106,14 +106,13 @@
 
 static int check_dev(struct au0828_dev *dev)
 {
-	if (dev->dev_state & DEV_DISCONNECTED) {
+	if (test_bit(DEV_DISCONNECTED, &dev->dev_state)) {
 		pr_info("v4l2 ioctl: device not present\n");
 		return -ENODEV;
 	}
 
-	if (dev->dev_state & DEV_MISCONFIGURED) {
-		pr_info("v4l2 ioctl: device is misconfigured; "
-		       "close and open it again\n");
+	if (test_bit(DEV_MISCONFIGURED, &dev->dev_state)) {
+		pr_info("v4l2 ioctl: device is misconfigured; close and open it again\n");
 		return -EIO;
 	}
 	return 0;
@@ -521,8 +520,8 @@
 	if (!dev)
 		return 0;
 
-	if ((dev->dev_state & DEV_DISCONNECTED) ||
-	    (dev->dev_state & DEV_MISCONFIGURED))
+	if (test_bit(DEV_DISCONNECTED, &dev->dev_state) ||
+	    test_bit(DEV_MISCONFIGURED, &dev->dev_state))
 		return 0;
 
 	if (urb->status < 0) {
@@ -680,8 +679,6 @@
 	if (retval) {
 		pr_err("%s() v4l2_device_register failed\n",
 		       __func__);
-		mutex_unlock(&dev->lock);
-		kfree(dev);
 		return retval;
 	}
 
@@ -692,8 +689,6 @@
 	if (retval) {
 		pr_err("%s() v4l2_ctrl_handler_init failed\n",
 		       __func__);
-		mutex_unlock(&dev->lock);
-		kfree(dev);
 		return retval;
 	}
 	dev->v4l2_dev.ctrl_handler = &dev->v4l2_ctrl_hdl;
@@ -824,10 +819,10 @@
 	int ret = 0;
 
 	dev->stream_state = STREAM_INTERRUPT;
-	if (dev->dev_state == DEV_DISCONNECTED)
+	if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
 		return -ENODEV;
 	else if (ret) {
-		dev->dev_state = DEV_MISCONFIGURED;
+		set_bit(DEV_MISCONFIGURED, &dev->dev_state);
 		dprintk(1, "%s device is misconfigured!\n", __func__);
 		return ret;
 	}
@@ -1026,7 +1021,7 @@
 	int ret;
 
 	dprintk(1,
-		"%s called std_set %d dev_state %d stream users %d users %d\n",
+		"%s called std_set %d dev_state %ld stream users %d users %d\n",
 		__func__, dev->std_set_in_tuner_core, dev->dev_state,
 		dev->streaming_users, dev->users);
 
@@ -1045,7 +1040,7 @@
 		au0828_analog_stream_enable(dev);
 		au0828_analog_stream_reset(dev);
 		dev->stream_state = STREAM_OFF;
-		dev->dev_state |= DEV_INITIALIZED;
+		set_bit(DEV_INITIALIZED, &dev->dev_state);
 	}
 	dev->users++;
 	mutex_unlock(&dev->lock);
@@ -1059,7 +1054,7 @@
 	struct video_device *vdev = video_devdata(filp);
 
 	dprintk(1,
-		"%s called std_set %d dev_state %d stream users %d users %d\n",
+		"%s called std_set %d dev_state %ld stream users %d users %d\n",
 		__func__, dev->std_set_in_tuner_core, dev->dev_state,
 		dev->streaming_users, dev->users);
 
@@ -1075,7 +1070,7 @@
 		del_timer_sync(&dev->vbi_timeout);
 	}
 
-	if (dev->dev_state == DEV_DISCONNECTED)
+	if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
 		goto end;
 
 	if (dev->users == 1) {
@@ -1135,7 +1130,7 @@
 		.type = V4L2_TUNER_ANALOG_TV,
 	};
 
-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
 		dev->std_set_in_tuner_core, dev->dev_state);
 
 	if (dev->std_set_in_tuner_core)
@@ -1207,7 +1202,7 @@
 	struct video_device *vdev = video_devdata(file);
 	struct au0828_dev *dev = video_drvdata(file);
 
-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
 		dev->std_set_in_tuner_core, dev->dev_state);
 
 	strlcpy(cap->driver, "au0828", sizeof(cap->driver));
@@ -1250,7 +1245,7 @@
 {
 	struct au0828_dev *dev = video_drvdata(file);
 
-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
 		dev->std_set_in_tuner_core, dev->dev_state);
 
 	f->fmt.pix.width = dev->width;
@@ -1269,7 +1264,7 @@
 {
 	struct au0828_dev *dev = video_drvdata(file);
 
-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
 		dev->std_set_in_tuner_core, dev->dev_state);
 
 	return au0828_set_format(dev, VIDIOC_TRY_FMT, f);
@@ -1281,7 +1276,7 @@
 	struct au0828_dev *dev = video_drvdata(file);
 	int rc;
 
-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
 		dev->std_set_in_tuner_core, dev->dev_state);
 
 	rc = check_dev(dev);
@@ -1303,7 +1298,7 @@
 {
 	struct au0828_dev *dev = video_drvdata(file);
 
-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
 		dev->std_set_in_tuner_core, dev->dev_state);
 
 	if (norm == dev->std)
@@ -1335,7 +1330,7 @@
 {
 	struct au0828_dev *dev = video_drvdata(file);
 
-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
 		dev->std_set_in_tuner_core, dev->dev_state);
 
 	*norm = dev->std;
@@ -1357,7 +1352,7 @@
 		[AU0828_VMUX_DVB] = "DVB",
 	};
 
-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
 		dev->std_set_in_tuner_core, dev->dev_state);
 
 	tmp = input->index;
@@ -1387,7 +1382,7 @@
 {
 	struct au0828_dev *dev = video_drvdata(file);
 
-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
 		dev->std_set_in_tuner_core, dev->dev_state);
 
 	*i = dev->ctrl_input;
@@ -1398,7 +1393,7 @@
 {
 	int i;
 
-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
 		dev->std_set_in_tuner_core, dev->dev_state);
 
 	switch (AUVI_INPUT(index).type) {
@@ -1496,7 +1491,7 @@
 {
 	struct au0828_dev *dev = video_drvdata(file);
 
-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
 		dev->std_set_in_tuner_core, dev->dev_state);
 
 	a->index = dev->ctrl_ainput;
@@ -1516,7 +1511,7 @@
 	if (a->index != dev->ctrl_ainput)
 		return -EINVAL;
 
-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
 		dev->std_set_in_tuner_core, dev->dev_state);
 	return 0;
 }
@@ -1534,7 +1529,7 @@
 	if (ret)
 		return ret;
 
-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
 		dev->std_set_in_tuner_core, dev->dev_state);
 
 	strcpy(t->name, "Auvitek tuner");
@@ -1554,7 +1549,7 @@
 	if (t->index != 0)
 		return -EINVAL;
 
-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
 		dev->std_set_in_tuner_core, dev->dev_state);
 
 	au0828_init_tuner(dev);
@@ -1576,7 +1571,7 @@
 
 	if (freq->tuner != 0)
 		return -EINVAL;
-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
 		dev->std_set_in_tuner_core, dev->dev_state);
 	freq->frequency = dev->ctrl_freq;
 	return 0;
@@ -1591,7 +1586,7 @@
 	if (freq->tuner != 0)
 		return -EINVAL;
 
-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
 		dev->std_set_in_tuner_core, dev->dev_state);
 
 	au0828_init_tuner(dev);
@@ -1617,7 +1612,7 @@
 {
 	struct au0828_dev *dev = video_drvdata(file);
 
-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
 		dev->std_set_in_tuner_core, dev->dev_state);
 
 	format->fmt.vbi.samples_per_line = dev->vbi_width;
@@ -1643,7 +1638,7 @@
 	if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
 		return -EINVAL;
 
-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
 		dev->std_set_in_tuner_core, dev->dev_state);
 
 	cc->bounds.left = 0;
@@ -1665,7 +1660,7 @@
 {
 	struct au0828_dev *dev = video_drvdata(file);
 
-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
 		dev->std_set_in_tuner_core, dev->dev_state);
 
 	reg->val = au0828_read(dev, reg->reg);
@@ -1678,7 +1673,7 @@
 {
 	struct au0828_dev *dev = video_drvdata(file);
 
-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
 		dev->std_set_in_tuner_core, dev->dev_state);
 
 	return au0828_writereg(dev, reg->reg, reg->val);
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index ff7f851..dd7b378 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -21,6 +21,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/bitops.h>
 #include <linux/usb.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
@@ -54,7 +55,6 @@
 #define NTSC_STD_H      480
 
 #define AU0828_INTERLACED_DEFAULT       1
-#define V4L2_CID_PRIVATE_SHARPNESS  (V4L2_CID_PRIVATE_BASE + 0)
 
 /* Defination for AU0828 USB transfer */
 #define AU0828_MAX_ISO_BUFS    12  /* maybe resize this value in the future */
@@ -121,9 +121,9 @@
 
 /* device state */
 enum au0828_dev_state {
-	DEV_INITIALIZED = 0x01,
-	DEV_DISCONNECTED = 0x02,
-	DEV_MISCONFIGURED = 0x04
+	DEV_INITIALIZED = 0,
+	DEV_DISCONNECTED = 1,
+	DEV_MISCONFIGURED = 2
 };
 
 struct au0828_dev;
@@ -247,7 +247,7 @@
 	int input_type;
 	int std_set_in_tuner_core;
 	unsigned int ctrl_input;
-	enum au0828_dev_state dev_state;
+	long unsigned int dev_state; /* defined at enum au0828_dev_state */;
 	enum au0828_stream_state stream_state;
 	wait_queue_head_t open;
 
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index c9320d6..00da024 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -360,7 +360,7 @@
 
 		if (count++ > 100) {
 			dprintk(3, "ERROR: Timeout - gpio=%x\n", gpio);
-			return -1;
+			return -EIO;
 		}
 	}
 	return 0;
@@ -856,7 +856,7 @@
 		}
 	}
 	dprintk(3, "Mailbox signature values not found!\n");
-	return -1;
+	return -EIO;
 }
 
 static void mci_write_memory_to_gpio(struct cx231xx *dev, u32 address, u32 value,
@@ -960,13 +960,14 @@
 	p_fw = p_current_fw;
 	if (p_current_fw == NULL) {
 		dprintk(2, "FAIL!!!\n");
-		return -1;
+		return -ENOMEM;
 	}
 
 	p_buffer = vmalloc(4096);
 	if (p_buffer == NULL) {
 		dprintk(2, "FAIL!!!\n");
-		return -1;
+		vfree(p_current_fw);
+		return -ENOMEM;
 	}
 
 	dprintk(2, "%s()\n", __func__);
@@ -989,7 +990,9 @@
 	if (retval != 0) {
 		dev_err(dev->dev,
 			"%s: Error with mc417_register_write\n", __func__);
-		return -1;
+		vfree(p_current_fw);
+		vfree(p_buffer);
+		return retval;
 	}
 
 	retval = request_firmware(&firmware, CX231xx_FIRM_IMAGE_NAME,
@@ -1001,7 +1004,9 @@
 			CX231xx_FIRM_IMAGE_NAME);
 		dev_err(dev->dev,
 			"Please fix your hotplug setup, the board will not work without firmware loaded!\n");
-		return -1;
+		vfree(p_current_fw);
+		vfree(p_buffer);
+		return retval;
 	}
 
 	if (firmware->size != CX231xx_FIRM_IMAGE_SIZE) {
@@ -1009,14 +1014,18 @@
 			"ERROR: Firmware size mismatch (have %zd, expected %d)\n",
 			firmware->size, CX231xx_FIRM_IMAGE_SIZE);
 		release_firmware(firmware);
-		return -1;
+		vfree(p_current_fw);
+		vfree(p_buffer);
+		return -EINVAL;
 	}
 
 	if (0 != memcmp(firmware->data, magic, 8)) {
 		dev_err(dev->dev,
 			"ERROR: Firmware magic mismatch, wrong file?\n");
 		release_firmware(firmware);
-		return -1;
+		vfree(p_current_fw);
+		vfree(p_buffer);
+		return -EINVAL;
 	}
 
 	initGPIO(dev);
@@ -1131,21 +1140,21 @@
 		if (retval < 0) {
 			dev_err(dev->dev, "%s: mailbox < 0, error\n",
 				__func__);
-			return -1;
+			return retval;
 		}
 		dev->cx23417_mailbox = retval;
 		retval = cx231xx_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0);
 		if (retval < 0) {
 			dev_err(dev->dev,
 				"ERROR: cx23417 firmware ping failed!\n");
-			return -1;
+			return retval;
 		}
 		retval = cx231xx_api_cmd(dev, CX2341X_ENC_GET_VERSION, 0, 1,
 			&version);
 		if (retval < 0) {
 			dev_err(dev->dev,
 				"ERROR: cx23417 firmware get encoder: version failed!\n");
-			return -1;
+			return retval;
 		}
 		dprintk(1, "cx23417 firmware version is 0x%08x\n", version);
 		msleep(200);
diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
index f497888..630f4fc 100644
--- a/drivers/media/usb/cx231xx/cx231xx-core.c
+++ b/drivers/media/usb/cx231xx/cx231xx-core.c
@@ -752,7 +752,8 @@
 int cx231xx_ep5_bulkout(struct cx231xx *dev, u8 *firmware, u16 size)
 {
 	int errCode = 0;
-	int actlen, ret = -ENOMEM;
+	int actlen = -1;
+	int ret = -ENOMEM;
 	u32 *buffer;
 
 	buffer = kzalloc(4096, GFP_KERNEL);
@@ -1304,6 +1305,9 @@
 	cx231xx_i2c_register(&dev->i2c_bus[1]);
 	cx231xx_i2c_register(&dev->i2c_bus[2]);
 
+	errCode = cx231xx_i2c_mux_create(dev);
+	if (errCode < 0)
+		return errCode;
 	cx231xx_i2c_mux_register(dev, 0);
 	cx231xx_i2c_mux_register(dev, 1);
 
@@ -1426,8 +1430,7 @@
 void cx231xx_dev_uninit(struct cx231xx *dev)
 {
 	/* Un Initialize I2C bus */
-	cx231xx_i2c_mux_unregister(dev, 1);
-	cx231xx_i2c_mux_unregister(dev, 0);
+	cx231xx_i2c_mux_unregister(dev);
 	cx231xx_i2c_unregister(&dev->i2c_bus[2]);
 	cx231xx_i2c_unregister(&dev->i2c_bus[1]);
 	cx231xx_i2c_unregister(&dev->i2c_bus[0]);
diff --git a/drivers/media/usb/cx231xx/cx231xx-i2c.c b/drivers/media/usb/cx231xx/cx231xx-i2c.c
index a29c345..473cd34 100644
--- a/drivers/media/usb/cx231xx/cx231xx-i2c.c
+++ b/drivers/media/usb/cx231xx/cx231xx-i2c.c
@@ -557,40 +557,41 @@
  * cx231xx_i2c_mux_select()
  * switch i2c master number 1 between port1 and port3
  */
-static int cx231xx_i2c_mux_select(struct i2c_adapter *adap,
-			void *mux_priv, u32 chan_id)
+static int cx231xx_i2c_mux_select(struct i2c_mux_core *muxc, u32 chan_id)
 {
-	struct cx231xx *dev = mux_priv;
+	struct cx231xx *dev = i2c_mux_priv(muxc);
 
 	return cx231xx_enable_i2c_port_3(dev, chan_id);
 }
 
-int cx231xx_i2c_mux_register(struct cx231xx *dev, int mux_no)
+int cx231xx_i2c_mux_create(struct cx231xx *dev)
 {
-	struct i2c_adapter *i2c_parent = &dev->i2c_bus[1].i2c_adap;
-	/* what is the correct mux_dev? */
-	struct device *mux_dev = dev->dev;
-
-	dev->i2c_mux_adap[mux_no] = i2c_add_mux_adapter(i2c_parent,
-				mux_dev,
-				dev /* mux_priv */,
-				0,
-				mux_no /* chan_id */,
-				0 /* class */,
-				&cx231xx_i2c_mux_select,
-				NULL);
-
-	if (!dev->i2c_mux_adap[mux_no])
-		dev_warn(dev->dev,
-			 "i2c mux %d register FAILED\n", mux_no);
-
+	dev->muxc = i2c_mux_alloc(&dev->i2c_bus[1].i2c_adap, dev->dev, 2, 0, 0,
+				  cx231xx_i2c_mux_select, NULL);
+	if (!dev->muxc)
+		return -ENOMEM;
+	dev->muxc->priv = dev;
 	return 0;
 }
 
-void cx231xx_i2c_mux_unregister(struct cx231xx *dev, int mux_no)
+int cx231xx_i2c_mux_register(struct cx231xx *dev, int mux_no)
 {
-	i2c_del_mux_adapter(dev->i2c_mux_adap[mux_no]);
-	dev->i2c_mux_adap[mux_no] = NULL;
+	int rc;
+
+	rc = i2c_mux_add_adapter(dev->muxc,
+				 0,
+				 mux_no /* chan_id */,
+				 0 /* class */);
+	if (rc)
+		dev_warn(dev->dev,
+			 "i2c mux %d register FAILED\n", mux_no);
+
+	return rc;
+}
+
+void cx231xx_i2c_mux_unregister(struct cx231xx *dev)
+{
+	i2c_mux_del_adapters(dev->muxc);
 }
 
 struct i2c_adapter *cx231xx_get_i2c_adap(struct cx231xx *dev, int i2c_port)
@@ -603,9 +604,9 @@
 	case I2C_2:
 		return &dev->i2c_bus[2].i2c_adap;
 	case I2C_1_MUX_1:
-		return dev->i2c_mux_adap[0];
+		return dev->muxc->adapter[0];
 	case I2C_1_MUX_3:
-		return dev->i2c_mux_adap[1];
+		return dev->muxc->adapter[1];
 	default:
 		return NULL;
 	}
diff --git a/drivers/media/usb/cx231xx/cx231xx.h b/drivers/media/usb/cx231xx/cx231xx.h
index 69f6d20..90c8676 100644
--- a/drivers/media/usb/cx231xx/cx231xx.h
+++ b/drivers/media/usb/cx231xx/cx231xx.h
@@ -624,6 +624,7 @@
 
 	/* I2C adapters: Master 1 & 2 (External) & Master 3 (Internal only) */
 	struct cx231xx_i2c i2c_bus[3];
+	struct i2c_mux_core *muxc;
 	struct i2c_adapter *i2c_mux_adap[2];
 
 	unsigned int xc_fw_load_done:1;
@@ -760,8 +761,9 @@
 void cx231xx_do_i2c_scan(struct cx231xx *dev, int i2c_port);
 int cx231xx_i2c_register(struct cx231xx_i2c *bus);
 int cx231xx_i2c_unregister(struct cx231xx_i2c *bus);
+int cx231xx_i2c_mux_create(struct cx231xx *dev);
 int cx231xx_i2c_mux_register(struct cx231xx *dev, int mux_no);
-void cx231xx_i2c_mux_unregister(struct cx231xx *dev, int mux_no);
+void cx231xx_i2c_mux_unregister(struct cx231xx *dev);
 struct i2c_adapter *cx231xx_get_i2c_adap(struct cx231xx *dev, int i2c_port);
 
 /* Internal block control functions */
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.h b/drivers/media/usb/dvb-usb-v2/af9035.h
index df22001..89e629a 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.h
+++ b/drivers/media/usb/dvb-usb-v2/af9035.h
@@ -118,20 +118,20 @@
  * Values 0, 3 and 5 are seen to this day. 0 for single TS and 3/5 for dual TS.
  */
 
-#define EEPROM_BASE_AF9035        0x42fd
-#define EEPROM_BASE_IT9135        0x499c
+#define EEPROM_BASE_AF9035        0x42f5
+#define EEPROM_BASE_IT9135        0x4994
 #define EEPROM_SHIFT                0x10
 
-#define EEPROM_IR_MODE              0x10
-#define EEPROM_TS_MODE              0x29
-#define EEPROM_2ND_DEMOD_ADDR       0x2a
-#define EEPROM_IR_TYPE              0x2c
-#define EEPROM_1_IF_L               0x30
-#define EEPROM_1_IF_H               0x31
-#define EEPROM_1_TUNER_ID           0x34
-#define EEPROM_2_IF_L               0x40
-#define EEPROM_2_IF_H               0x41
-#define EEPROM_2_TUNER_ID           0x44
+#define EEPROM_IR_MODE              0x18
+#define EEPROM_TS_MODE              0x31
+#define EEPROM_2ND_DEMOD_ADDR       0x32
+#define EEPROM_IR_TYPE              0x34
+#define EEPROM_1_IF_L               0x38
+#define EEPROM_1_IF_H               0x39
+#define EEPROM_1_TUNER_ID           0x3c
+#define EEPROM_2_IF_L               0x48
+#define EEPROM_2_IF_H               0x49
+#define EEPROM_2_TUNER_ID           0x4c
 
 /* USB commands */
 #define CMD_MEM_RD                  0x00
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index fa72642..eb7af8c 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -1333,10 +1333,7 @@
 	case TUNER_RTL2832_R828D:
 		pdata.clk = dev->rtl2832_platform_data.clk;
 		pdata.tuner = dev->tuner;
-		pdata.i2c_client = dev->i2c_client_demod;
-		pdata.bulk_read = dev->rtl2832_platform_data.bulk_read;
-		pdata.bulk_write = dev->rtl2832_platform_data.bulk_write;
-		pdata.update_bits = dev->rtl2832_platform_data.update_bits;
+		pdata.regmap = dev->rtl2832_platform_data.regmap;
 		pdata.dvb_frontend = adap->fe[0];
 		pdata.dvb_usb_device = d;
 		pdata.v4l2_subdev = subdev;
diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c
index 92e47d6..2e71136 100644
--- a/drivers/media/usb/dvb-usb/az6027.c
+++ b/drivers/media/usb/dvb-usb/az6027.c
@@ -1090,6 +1090,7 @@
 	{ USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_HDCI_V2) },
 	{ USB_DEVICE(USB_VID_ELGATO, USB_PID_ELGATO_EYETV_SAT) },
 	{ USB_DEVICE(USB_VID_ELGATO, USB_PID_ELGATO_EYETV_SAT_V2) },
+	{ USB_DEVICE(USB_VID_ELGATO, USB_PID_ELGATO_EYETV_SAT_V3) },
 	{ },
 };
 
@@ -1138,7 +1139,7 @@
 
 	.i2c_algo         = &az6027_i2c_algo,
 
-	.num_device_descs = 7,
+	.num_device_descs = 8,
 	.devices = {
 		{
 			.name = "AZUREWAVE DVB-S/S2 USB2.0 (AZ6027)",
@@ -1168,6 +1169,10 @@
 			.name = "Elgato EyeTV Sat",
 			.cold_ids = { &az6027_usb_table[6], NULL },
 			.warm_ids = { NULL },
+		}, {
+			.name = "Elgato EyeTV Sat",
+			.cold_ids = { &az6027_usb_table[7], NULL },
+			.warm_ids = { NULL },
 		},
 		{ NULL },
 	}
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index ea0391e..0857b56 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -3814,6 +3814,7 @@
 	{ USB_DEVICE(USB_VID_PCTV,      USB_PID_PCTV_2002E) },
 	{ USB_DEVICE(USB_VID_PCTV,      USB_PID_PCTV_2002E_SE) },
 	{ USB_DEVICE(USB_VID_PCTV,      USB_PID_DIBCOM_STK8096PVR) },
+	{ USB_DEVICE(USB_VID_DIBCOM,    USB_PID_DIBCOM_STK8096PVR) },
 	{ 0 }		/* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
@@ -5017,7 +5018,8 @@
 		.num_device_descs = 1,
 		.devices = {
 			{   "DiBcom STK8096-PVR reference design",
-				{ &dib0700_usb_id_table[83], NULL },
+				{ &dib0700_usb_id_table[83],
+					&dib0700_usb_id_table[84], NULL},
 				{ NULL },
 			},
 		},
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
index 35de609..6eea4e6 100644
--- a/drivers/media/usb/dvb-usb/dibusb-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-common.c
@@ -184,6 +184,8 @@
 }
 EXPORT_SYMBOL(dibusb_read_eeprom_byte);
 
+#if IS_ENABLED(CONFIG_DVB_DIB3000MC)
+
 /* 3000MC/P stuff */
 // Config Adjacent channels  Perf -cal22
 static struct dibx000_agc_config dib3000p_mt2060_agc_config = {
@@ -242,8 +244,6 @@
 	.agc2_slope2 = 0x1e,
 };
 
-#if IS_ENABLED(CONFIG_DVB_DIB3000MC)
-
 static struct dib3000mc_config mod3000p_dib3000p_config = {
 	&dib3000p_panasonic_agc_config,
 
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 6d0dd85..49b55d7 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -13,6 +13,7 @@
  *
  * see Documentation/dvb/README.dvb-usb for more information
  */
+#include "dvb-usb-ids.h"
 #include "dw2102.h"
 #include "si21xx.h"
 #include "stv0299.h"
@@ -38,61 +39,6 @@
 /* Max transfer size done by I2C transfer functions */
 #define MAX_XFER_SIZE  64
 
-#ifndef USB_PID_DW2102
-#define USB_PID_DW2102 0x2102
-#endif
-
-#ifndef USB_PID_DW2104
-#define USB_PID_DW2104 0x2104
-#endif
-
-#ifndef USB_PID_DW3101
-#define USB_PID_DW3101 0x3101
-#endif
-
-#ifndef USB_PID_CINERGY_S
-#define USB_PID_CINERGY_S 0x0064
-#endif
-
-#ifndef USB_PID_TEVII_S630
-#define USB_PID_TEVII_S630 0xd630
-#endif
-
-#ifndef USB_PID_TEVII_S650
-#define USB_PID_TEVII_S650 0xd650
-#endif
-
-#ifndef USB_PID_TEVII_S660
-#define USB_PID_TEVII_S660 0xd660
-#endif
-
-#ifndef USB_PID_TEVII_S662
-#define USB_PID_TEVII_S662 0xd662
-#endif
-
-#ifndef USB_PID_TEVII_S480_1
-#define USB_PID_TEVII_S480_1 0xd481
-#endif
-
-#ifndef USB_PID_TEVII_S480_2
-#define USB_PID_TEVII_S480_2 0xd482
-#endif
-
-#ifndef USB_PID_PROF_1100
-#define USB_PID_PROF_1100 0xb012
-#endif
-
-#ifndef USB_PID_TEVII_S421
-#define USB_PID_TEVII_S421 0xd421
-#endif
-
-#ifndef USB_PID_TEVII_S632
-#define USB_PID_TEVII_S632 0xd632
-#endif
-
-#ifndef USB_PID_GOTVIEW_SAT_HD
-#define USB_PID_GOTVIEW_SAT_HD 0x5456
-#endif
 
 #define DW210X_READ_MSG 0
 #define DW210X_WRITE_MSG 1
@@ -1709,7 +1655,7 @@
 	[CYPRESS_DW2101] = {USB_DEVICE(USB_VID_CYPRESS, 0x2101)},
 	[CYPRESS_DW2104] = {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW2104)},
 	[TEVII_S650] = {USB_DEVICE(0x9022, USB_PID_TEVII_S650)},
-	[TERRATEC_CINERGY_S] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_CINERGY_S)},
+	[TERRATEC_CINERGY_S] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S)},
 	[CYPRESS_DW3101] = {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW3101)},
 	[TEVII_S630] = {USB_DEVICE(0x9022, USB_PID_TEVII_S630)},
 	[PROF_1100] = {USB_DEVICE(0x3011, USB_PID_PROF_1100)},
@@ -1801,7 +1747,7 @@
 			dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0,
 					DW210X_WRITE_MSG);
 			break;
-		case USB_PID_CINERGY_S:
+		case USB_PID_TERRATEC_CINERGY_S:
 		case USB_PID_DW2102:
 			dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0,
 					DW210X_WRITE_MSG);
@@ -1843,6 +1789,9 @@
 		msleep(100);
 		kfree(p);
 	}
+
+	if (le16_to_cpu(dev->descriptor.idProduct) == 0x2101)
+		release_firmware(fw);
 	return ret;
 }
 
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index ec397c4..c05de1b 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -995,11 +995,11 @@
 			/* parameter for the MPEG2-data transfer */
 			.stream = {
 				.type = USB_ISOC,
-				.count = 7,
+				.count = 4,
 				.endpoint = 0x02,
 				.u = {
 					.isoc = {
-						.framesperurb = 4,
+						.framesperurb = 64,
 						.framesize = 940,
 						.interval = 1
 					}
diff --git a/drivers/media/usb/em28xx/Kconfig b/drivers/media/usb/em28xx/Kconfig
index e382210..d917b0a 100644
--- a/drivers/media/usb/em28xx/Kconfig
+++ b/drivers/media/usb/em28xx/Kconfig
@@ -59,6 +59,8 @@
 	select DVB_DRX39XYJ if MEDIA_SUBDRV_AUTOSELECT
 	select DVB_SI2168 if MEDIA_SUBDRV_AUTOSELECT
 	select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
+	select DVB_TC90522 if MEDIA_SUBDRV_AUTOSELECT
+	select MEDIA_TUNER_QM1D1C0042 if MEDIA_SUBDRV_AUTOSELECT
 	---help---
 	  This adds support for DVB cards based on the
 	  Empiatech em28xx chips.
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 930e3e3..e397f54 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -492,6 +492,44 @@
 	{-1,                             -1,   -1,     -1},
 };
 
+static struct em28xx_reg_seq plex_px_bcud[] = {
+	{EM2874_R80_GPIO_P0_CTRL,	0xff,	0xff,	0},
+	{0x0d,				0xff,	0xff,	0},
+	{EM2874_R50_IR_CONFIG,		0x01,	0xff,	0},
+	{EM28XX_R06_I2C_CLK,		0x40,	0xff,	0},
+	{EM2874_R80_GPIO_P0_CTRL,	0xfd,	0xff,	100},
+	{EM28XX_R12_VINENABLE,		0x20,	0x20,	0},
+	{0x0d,				0x42,	0xff,	1000},
+	{EM2874_R80_GPIO_P0_CTRL,	0xfc,	0xff,	10},
+	{EM2874_R80_GPIO_P0_CTRL,	0xfd,	0xff,	10},
+	{0x73,				0xfd,	0xff,	100},
+	{-1,				-1,	-1,	-1},
+};
+
+/*
+ * 2040:0265 Hauppauge WinTV-dualHD DVB
+ * reg 0x80/0x84:
+ * GPIO_0: Yellow LED tuner 1, 0=on, 1=off
+ * GPIO_1: Green LED tuner 1, 0=on, 1=off
+ * GPIO_2: Yellow LED tuner 2, 0=on, 1=off
+ * GPIO_3: Green LED tuner 2, 0=on, 1=off
+ * GPIO_5: Reset #2, 0=active
+ * GPIO_6: Reset #1, 0=active
+ */
+static struct em28xx_reg_seq hauppauge_dualhd_dvb[] = {
+	{EM2874_R80_GPIO_P0_CTRL,      0xff, 0xff,      0},
+	{0x0d,                         0xff, 0xff,    200},
+	{0x50,                         0x04, 0xff,    300},
+	{EM2874_R80_GPIO_P0_CTRL,      0xbf, 0xff,    100}, /* demod 1 reset */
+	{EM2874_R80_GPIO_P0_CTRL,      0xff, 0xff,    100},
+	{EM2874_R80_GPIO_P0_CTRL,      0xdf, 0xff,    100}, /* demod 2 reset */
+	{EM2874_R80_GPIO_P0_CTRL,      0xff, 0xff,    100},
+	{EM2874_R5F_TS_ENABLE,         0x44, 0xff,     50},
+	{EM2874_R5D_TS1_PKT_SIZE,      0x05, 0xff,     50},
+	{EM2874_R5E_TS2_PKT_SIZE,      0x05, 0xff,     50},
+	{-1,                             -1,   -1,     -1},
+};
+
 /*
  *  Button definitions
  */
@@ -571,6 +609,22 @@
 	{-1, 0, 0, 0},
 };
 
+static struct em28xx_led hauppauge_dualhd_leds[] = {
+	{
+		.role      = EM28XX_LED_DIGITAL_CAPTURING,
+		.gpio_reg  = EM2874_R80_GPIO_P0_CTRL,
+		.gpio_mask = EM_GPIO_1,
+		.inverted  = 1,
+	},
+	{
+		.role      = EM28XX_LED_DIGITAL_CAPTURING_TS2,
+		.gpio_reg  = EM2874_R80_GPIO_P0_CTRL,
+		.gpio_mask = EM_GPIO_3,
+		.inverted  = 1,
+	},
+	{-1, 0, 0, 0},
+};
+
 /*
  *  Board definitions
  */
@@ -2306,6 +2360,35 @@
 		.has_dvb       = 1,
 		.ir_codes      = RC_MAP_TERRATEC_SLIM_2,
 	},
+
+	/*
+	 * 3275:0085 PLEX PX-BCUD.
+	 * Empia EM28178, TOSHIBA TC90532XBG, Sharp QM1D1C0042
+	 */
+	[EM28178_BOARD_PLEX_PX_BCUD] = {
+		.name          = "PLEX PX-BCUD",
+		.xclk          = EM28XX_XCLK_FREQUENCY_4_3MHZ,
+		.def_i2c_bus   = 1,
+		.i2c_speed     = EM28XX_I2C_CLK_WAIT_ENABLE,
+		.tuner_type    = TUNER_ABSENT,
+		.tuner_gpio    = plex_px_bcud,
+		.has_dvb       = 1,
+	},
+	/*
+	 * 2040:0265 Hauppauge WinTV-dualHD (DVB version).
+	 * Empia EM28274, 2x Silicon Labs Si2168, 2x Silicon Labs Si2157
+	 */
+	[EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB] = {
+		.name          = "Hauppauge WinTV-dualHD DVB",
+		.def_i2c_bus   = 1,
+		.i2c_speed     = EM28XX_I2C_CLK_WAIT_ENABLE |
+				 EM28XX_I2C_FREQ_400_KHZ,
+		.tuner_type    = TUNER_ABSENT,
+		.tuner_gpio    = hauppauge_dualhd_dvb,
+		.has_dvb       = 1,
+		.ir_codes      = RC_MAP_HAUPPAUGE,
+		.leds          = hauppauge_dualhd_leds,
+	},
 };
 EXPORT_SYMBOL_GPL(em28xx_boards);
 
@@ -2429,6 +2512,8 @@
 			.driver_info = EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950 },
 	{ USB_DEVICE(0x2040, 0x651f),
 			.driver_info = EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850 },
+	{ USB_DEVICE(0x2040, 0x0265),
+			.driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB },
 	{ USB_DEVICE(0x0438, 0xb002),
 			.driver_info = EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600 },
 	{ USB_DEVICE(0x2001, 0xf112),
@@ -2495,6 +2580,8 @@
 			.driver_info = EM2861_BOARD_LEADTEK_VC100 },
 	{ USB_DEVICE(0xeb1a, 0x8179),
 			.driver_info = EM28178_BOARD_TERRATEC_T2_STICK_HD },
+	{ USB_DEVICE(0x3275, 0x0085),
+			.driver_info = EM28178_BOARD_PLEX_PX_BCUD },
 	{ },
 };
 MODULE_DEVICE_TABLE(usb, em28xx_id_table);
@@ -2861,6 +2948,7 @@
 	case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850:
 	case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950:
 	case EM2884_BOARD_HAUPPAUGE_WINTV_HVR_930C:
+	case EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB:
 	{
 		struct tveeprom tv;
 
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 5d209c7..1a5c012 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -58,6 +58,8 @@
 #include "ts2020.h"
 #include "si2168.h"
 #include "si2157.h"
+#include "tc90522.h"
+#include "qm1d1c0042.h"
 
 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
 MODULE_LICENSE("GPL");
@@ -787,6 +789,68 @@
 	return 0;
 }
 
+static void px_bcud_init(struct em28xx *dev)
+{
+	int i;
+	struct {
+		unsigned char r[4];
+		int len;
+	} regs1[] = {
+		{{ 0x0e, 0x77 }, 2},
+		{{ 0x0f, 0x77 }, 2},
+		{{ 0x03, 0x90 }, 2},
+	}, regs2[] = {
+		{{ 0x07, 0x01 }, 2},
+		{{ 0x08, 0x10 }, 2},
+		{{ 0x13, 0x00 }, 2},
+		{{ 0x17, 0x00 }, 2},
+		{{ 0x03, 0x01 }, 2},
+		{{ 0x10, 0xb1 }, 2},
+		{{ 0x11, 0x40 }, 2},
+		{{ 0x85, 0x7a }, 2},
+		{{ 0x87, 0x04 }, 2},
+	};
+	static struct em28xx_reg_seq gpio[] = {
+		{EM28XX_R06_I2C_CLK,		0x40,	0xff,	300},
+		{EM2874_R80_GPIO_P0_CTRL,	0xfd,	0xff,	60},
+		{EM28XX_R15_RGAIN,		0x20,	0xff,	0},
+		{EM28XX_R16_GGAIN,		0x20,	0xff,	0},
+		{EM28XX_R17_BGAIN,		0x20,	0xff,	0},
+		{EM28XX_R18_ROFFSET,		0x00,	0xff,	0},
+		{EM28XX_R19_GOFFSET,		0x00,	0xff,	0},
+		{EM28XX_R1A_BOFFSET,		0x00,	0xff,	0},
+		{EM28XX_R23_UOFFSET,		0x00,	0xff,	0},
+		{EM28XX_R24_VOFFSET,		0x00,	0xff,	0},
+		{EM28XX_R26_COMPR,		0x00,	0xff,	0},
+		{0x13,				0x08,	0xff,	0},
+		{EM28XX_R12_VINENABLE,		0x27,	0xff,	0},
+		{EM28XX_R0C_USBSUSP,		0x10,	0xff,	0},
+		{EM28XX_R27_OUTFMT,		0x00,	0xff,	0},
+		{EM28XX_R10_VINMODE,		0x00,	0xff,	0},
+		{EM28XX_R11_VINCTRL,		0x11,	0xff,	0},
+		{EM2874_R50_IR_CONFIG,		0x01,	0xff,	0},
+		{EM2874_R5F_TS_ENABLE,		0x80,	0xff,	0},
+		{EM28XX_R06_I2C_CLK,		0x46,	0xff,	0},
+	};
+	em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x46);
+	/* sleeping ISDB-T */
+	dev->dvb->i2c_client_demod->addr = 0x14;
+	for (i = 0; i < ARRAY_SIZE(regs1); i++)
+		i2c_master_send(dev->dvb->i2c_client_demod, regs1[i].r,
+				regs1[i].len);
+	/* sleeping ISDB-S */
+	dev->dvb->i2c_client_demod->addr = 0x15;
+	for (i = 0; i < ARRAY_SIZE(regs2); i++)
+		i2c_master_send(dev->dvb->i2c_client_demod, regs2[i].r,
+				regs2[i].len);
+	for (i = 0; i < ARRAY_SIZE(gpio); i++) {
+		em28xx_write_reg_bits(dev, gpio[i].reg, gpio[i].val,
+				      gpio[i].mask);
+		if (gpio[i].sleep > 0)
+			msleep(gpio[i].sleep);
+	}
+};
+
 static struct mt352_config terratec_xs_mt352_cfg = {
 	.demod_address = (0x1e >> 1),
 	.no_tuner = 1,
@@ -1762,6 +1826,127 @@
 			dvb->i2c_client_tuner = client;
 		}
 		break;
+
+	case EM28178_BOARD_PLEX_PX_BCUD:
+		{
+			struct i2c_client *client;
+			struct i2c_board_info info;
+			struct tc90522_config tc90522_config;
+			struct qm1d1c0042_config qm1d1c0042_config;
+
+			/* attach demod */
+			memset(&tc90522_config, 0, sizeof(tc90522_config));
+			memset(&info, 0, sizeof(struct i2c_board_info));
+			strlcpy(info.type, "tc90522sat", I2C_NAME_SIZE);
+			info.addr = 0x15;
+			info.platform_data = &tc90522_config;
+			request_module("tc90522");
+			client = i2c_new_device(&dev->i2c_adap[dev->def_i2c_bus], &info);
+			if (client == NULL || client->dev.driver == NULL) {
+				result = -ENODEV;
+				goto out_free;
+			}
+			dvb->i2c_client_demod = client;
+			if (!try_module_get(client->dev.driver->owner)) {
+				i2c_unregister_device(client);
+				result = -ENODEV;
+				goto out_free;
+			}
+
+			/* attach tuner */
+			memset(&qm1d1c0042_config, 0,
+			       sizeof(qm1d1c0042_config));
+			qm1d1c0042_config.fe = tc90522_config.fe;
+			qm1d1c0042_config.lpf = 1;
+			memset(&info, 0, sizeof(struct i2c_board_info));
+			strlcpy(info.type, "qm1d1c0042", I2C_NAME_SIZE);
+			info.addr = 0x61;
+			info.platform_data = &qm1d1c0042_config;
+			request_module(info.type);
+			client = i2c_new_device(tc90522_config.tuner_i2c,
+						&info);
+			if (client == NULL || client->dev.driver == NULL) {
+				module_put(dvb->i2c_client_demod->dev.driver->owner);
+				i2c_unregister_device(dvb->i2c_client_demod);
+				result = -ENODEV;
+				goto out_free;
+			}
+			dvb->i2c_client_tuner = client;
+			if (!try_module_get(client->dev.driver->owner)) {
+				i2c_unregister_device(client);
+				module_put(dvb->i2c_client_demod->dev.driver->owner);
+				i2c_unregister_device(dvb->i2c_client_demod);
+				result = -ENODEV;
+				goto out_free;
+			}
+			dvb->fe[0] = tc90522_config.fe;
+			px_bcud_init(dev);
+		}
+		break;
+	case EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB:
+		{
+			struct i2c_adapter *adapter;
+			struct i2c_client *client;
+			struct i2c_board_info info;
+			struct si2168_config si2168_config;
+			struct si2157_config si2157_config;
+
+			/* attach demod */
+			memset(&si2168_config, 0, sizeof(si2168_config));
+			si2168_config.i2c_adapter = &adapter;
+			si2168_config.fe = &dvb->fe[0];
+			si2168_config.ts_mode = SI2168_TS_SERIAL;
+			memset(&info, 0, sizeof(struct i2c_board_info));
+			strlcpy(info.type, "si2168", I2C_NAME_SIZE);
+			info.addr = 0x64;
+			info.platform_data = &si2168_config;
+			request_module(info.type);
+			client = i2c_new_device(&dev->i2c_adap[dev->def_i2c_bus], &info);
+			if (client == NULL || client->dev.driver == NULL) {
+				result = -ENODEV;
+				goto out_free;
+			}
+
+			if (!try_module_get(client->dev.driver->owner)) {
+				i2c_unregister_device(client);
+				result = -ENODEV;
+				goto out_free;
+			}
+
+			dvb->i2c_client_demod = client;
+
+			/* attach tuner */
+			memset(&si2157_config, 0, sizeof(si2157_config));
+			si2157_config.fe = dvb->fe[0];
+			si2157_config.if_port = 1;
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+			si2157_config.mdev = dev->media_dev;
+#endif
+			memset(&info, 0, sizeof(struct i2c_board_info));
+			strlcpy(info.type, "si2157", I2C_NAME_SIZE);
+			info.addr = 0x60;
+			info.platform_data = &si2157_config;
+			request_module(info.type);
+			client = i2c_new_device(adapter, &info);
+			if (client == NULL || client->dev.driver == NULL) {
+				module_put(dvb->i2c_client_demod->dev.driver->owner);
+				i2c_unregister_device(dvb->i2c_client_demod);
+				result = -ENODEV;
+				goto out_free;
+			}
+
+			if (!try_module_get(client->dev.driver->owner)) {
+				i2c_unregister_device(client);
+				module_put(dvb->i2c_client_demod->dev.driver->owner);
+				i2c_unregister_device(dvb->i2c_client_demod);
+				result = -ENODEV;
+				goto out_free;
+			}
+
+			dvb->i2c_client_tuner = client;
+
+		}
+		break;
 	default:
 		em28xx_errdev("/2: The frontend of your DVB/ATSC card"
 				" isn't supported yet\n");
diff --git a/drivers/media/usb/em28xx/em28xx-reg.h b/drivers/media/usb/em28xx/em28xx-reg.h
index 13cbb7f..afe7a66 100644
--- a/drivers/media/usb/em28xx/em28xx-reg.h
+++ b/drivers/media/usb/em28xx/em28xx-reg.h
@@ -193,6 +193,19 @@
 /* em2874 registers */
 #define EM2874_R50_IR_CONFIG    0x50
 #define EM2874_R51_IR           0x51
+#define EM2874_R5D_TS1_PKT_SIZE 0x5d
+#define EM2874_R5E_TS2_PKT_SIZE 0x5e
+	/*
+	 * For both TS1 and TS2, In isochronous mode:
+	 *  0x01  188 bytes
+	 *  0x02  376 bytes
+	 *  0x03  564 bytes
+	 *  0x04  752 bytes
+	 *  0x05  940 bytes
+	 * In bulk mode:
+	 *  0x01..0xff  total packet count in 188-byte
+	 */
+
 #define EM2874_R5F_TS_ENABLE    0x5f
 
 /* em2874/174/84, em25xx, em276x/7x/8x GPIO registers */
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index 2674449..d148463 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -145,6 +145,8 @@
 #define EM2861_BOARD_LEADTEK_VC100                95
 #define EM28178_BOARD_TERRATEC_T2_STICK_HD        96
 #define EM2884_BOARD_ELGATO_EYETV_HYBRID_2008     97
+#define EM28178_BOARD_PLEX_PX_BCUD                98
+#define EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB  99
 
 /* Limits minimum and default number of buffers */
 #define EM28XX_MIN_BUF 4
@@ -406,6 +408,7 @@
 enum em28xx_led_role {
 	EM28XX_LED_ANALOG_CAPTURING = 0,
 	EM28XX_LED_DIGITAL_CAPTURING,
+	EM28XX_LED_DIGITAL_CAPTURING_TS2,
 	EM28XX_LED_ILLUMINATION,
 	EM28XX_NUM_LED_ROLES, /* must be the last */
 };
diff --git a/drivers/media/usb/go7007/go7007-v4l2.c b/drivers/media/usb/go7007/go7007-v4l2.c
index 358c1c1..ea01ee5 100644
--- a/drivers/media/usb/go7007/go7007-v4l2.c
+++ b/drivers/media/usb/go7007/go7007-v4l2.c
@@ -1125,7 +1125,7 @@
 	vdev->queue = &go->vidq;
 	video_set_drvdata(vdev, go);
 	vdev->v4l2_dev = &go->v4l2_dev;
-	if (!v4l2_device_has_op(&go->v4l2_dev, video, querystd))
+	if (!v4l2_device_has_op(&go->v4l2_dev, 0, video, querystd))
 		v4l2_disable_ioctl(vdev, VIDIOC_QUERYSTD);
 	if (!(go->board_info->flags & GO7007_BOARD_HAS_TUNER)) {
 		v4l2_disable_ioctl(vdev, VIDIOC_S_FREQUENCY);
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index 1a093e5..83e9a3e 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -3672,11 +3672,10 @@
 
 
 	hdw->cmd_debug_state = 1;
-	if (write_len) {
+	if (write_len && write_data)
 		hdw->cmd_debug_code = ((unsigned char *)write_data)[0];
-	} else {
+	else
 		hdw->cmd_debug_code = 0;
-	}
 	hdw->cmd_debug_write_len = write_len;
 	hdw->cmd_debug_read_len = read_len;
 
@@ -3688,7 +3687,7 @@
 	setup_timer(&timer, pvr2_ctl_timeout, (unsigned long)hdw);
 	timer.expires = jiffies + timeout;
 
-	if (write_len) {
+	if (write_len && write_data) {
 		hdw->cmd_debug_state = 2;
 		/* Transfer write data to internal buffer */
 		for (idx = 0; idx < write_len; idx++) {
@@ -3795,7 +3794,7 @@
 			goto done;
 		}
 	}
-	if (read_len) {
+	if (read_len && read_data) {
 		/* Validate results of read request */
 		if ((hdw->ctl_read_urb->status != 0) &&
 		    (hdw->ctl_read_urb->status != -ENOENT) &&
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index 12f5ebb..ad2f3d2 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -1452,13 +1452,6 @@
 	printk(KERN_INFO "%s: %s found\n", __func__,
 				usbvision_device_data[model].model_string);
 
-	/*
-	 * this is a security check.
-	 * an exploit using an incorrect bInterfaceNumber is known
-	 */
-	if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum])
-		return -ENODEV;
-
 	if (usbvision_device_data[model].interface >= 0)
 		interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
 	else if (ifnum < dev->actconfig->desc.bNumInterfaces)
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 019644f..bacecbd 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -280,7 +280,8 @@
 static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
 {
 	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_create_buffers32)) ||
-	    copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format)))
+	    copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format)) ||
+	    copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
 		return -EFAULT;
 	return __put_v4l2_format32(&kp->format, &up->format);
 }
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index d8e5994..70b559d 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -735,6 +735,7 @@
 	if (!vdev->v4l2_dev->mdev)
 		return 0;
 
+	vdev->entity.obj_type = MEDIA_ENTITY_TYPE_VIDEO_DEVICE;
 	vdev->entity.function = MEDIA_ENT_F_UNKNOWN;
 
 	switch (type) {
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 170dd68..28e5be2 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1020,9 +1020,12 @@
 				struct file *file, void *fh, void *arg)
 {
 	struct v4l2_capability *cap = (struct v4l2_capability *)arg;
+	struct video_device *vfd = video_devdata(file);
 	int ret;
 
 	cap->version = LINUX_VERSION_CODE;
+	cap->device_caps = vfd->device_caps;
+	cap->capabilities = vfd->device_caps | V4L2_CAP_DEVICE_CAPS;
 
 	ret = ops->vidioc_querycap(file, fh, cap);
 
@@ -2157,40 +2160,56 @@
 				struct file *file, void *fh, void *arg)
 {
 	struct v4l2_cropcap *p = arg;
-
-	if (ops->vidioc_g_selection) {
-		struct v4l2_selection s = { .type = p->type };
-		int ret;
-
-		/* obtaining bounds */
-		if (V4L2_TYPE_IS_OUTPUT(p->type))
-			s.target = V4L2_SEL_TGT_COMPOSE_BOUNDS;
-		else
-			s.target = V4L2_SEL_TGT_CROP_BOUNDS;
-
-		ret = ops->vidioc_g_selection(file, fh, &s);
-		if (ret)
-			return ret;
-		p->bounds = s.r;
-
-		/* obtaining defrect */
-		if (V4L2_TYPE_IS_OUTPUT(p->type))
-			s.target = V4L2_SEL_TGT_COMPOSE_DEFAULT;
-		else
-			s.target = V4L2_SEL_TGT_CROP_DEFAULT;
-
-		ret = ops->vidioc_g_selection(file, fh, &s);
-		if (ret)
-			return ret;
-		p->defrect = s.r;
-	}
+	struct v4l2_selection s = { .type = p->type };
+	int ret = 0;
 
 	/* setting trivial pixelaspect */
 	p->pixelaspect.numerator = 1;
 	p->pixelaspect.denominator = 1;
 
+	/*
+	 * The determine_valid_ioctls() call already should ensure
+	 * that this can never happen, but just in case...
+	 */
+	if (WARN_ON(!ops->vidioc_cropcap && !ops->vidioc_cropcap))
+		return -ENOTTY;
+
 	if (ops->vidioc_cropcap)
-		return ops->vidioc_cropcap(file, fh, p);
+		ret = ops->vidioc_cropcap(file, fh, p);
+
+	if (!ops->vidioc_g_selection)
+		return ret;
+
+	/*
+	 * Ignore ENOTTY or ENOIOCTLCMD error returns, just use the
+	 * square pixel aspect ratio in that case.
+	 */
+	if (ret && ret != -ENOTTY && ret != -ENOIOCTLCMD)
+		return ret;
+
+	/* Use g_selection() to fill in the bounds and defrect rectangles */
+
+	/* obtaining bounds */
+	if (V4L2_TYPE_IS_OUTPUT(p->type))
+		s.target = V4L2_SEL_TGT_COMPOSE_BOUNDS;
+	else
+		s.target = V4L2_SEL_TGT_CROP_BOUNDS;
+
+	ret = ops->vidioc_g_selection(file, fh, &s);
+	if (ret)
+		return ret;
+	p->bounds = s.r;
+
+	/* obtaining defrect */
+	if (V4L2_TYPE_IS_OUTPUT(p->type))
+		s.target = V4L2_SEL_TGT_COMPOSE_DEFAULT;
+	else
+		s.target = V4L2_SEL_TGT_CROP_DEFAULT;
+
+	ret = ops->vidioc_g_selection(file, fh, &s);
+	if (ret)
+		return ret;
+	p->defrect = s.r;
 
 	return 0;
 }
diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c
index 2a7b79b..ca94bde 100644
--- a/drivers/media/v4l2-core/v4l2-mc.c
+++ b/drivers/media/v4l2-core/v4l2-mc.c
@@ -34,7 +34,7 @@
 {
 	struct media_entity *entity;
 	struct media_entity *if_vid = NULL, *if_aud = NULL;
-	struct media_entity *tuner = NULL, *decoder = NULL, *dtv_demod = NULL;
+	struct media_entity *tuner = NULL, *decoder = NULL;
 	struct media_entity *io_v4l = NULL, *io_vbi = NULL, *io_swradio = NULL;
 	bool is_webcam = false;
 	u32 flags;
@@ -263,7 +263,7 @@
 	media_entity_graph_walk_start(graph, entity);
 
 	while ((entity = media_entity_graph_walk_next(graph))) {
-		if (is_media_entity_v4l2_io(entity))
+		if (is_media_entity_v4l2_video_device(entity))
 			use += entity->use_count;
 	}
 
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index d630838..953eab0 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -35,9 +35,11 @@
 static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
 {
 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
-	fh->pad = kzalloc(sizeof(*fh->pad) * sd->entity.num_pads, GFP_KERNEL);
-	if (fh->pad == NULL)
-		return -ENOMEM;
+	if (sd->entity.num_pads) {
+		fh->pad = v4l2_subdev_alloc_pad_config(sd);
+		if (fh->pad == NULL)
+			return -ENOMEM;
+	}
 #endif
 	return 0;
 }
@@ -45,7 +47,7 @@
 static void subdev_fh_free(struct v4l2_subdev_fh *fh)
 {
 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
-	kfree(fh->pad);
+	v4l2_subdev_free_pad_config(fh->pad);
 	fh->pad = NULL;
 #endif
 }
@@ -508,7 +510,7 @@
 	if (source_fmt->format.width != sink_fmt->format.width
 	    || source_fmt->format.height != sink_fmt->format.height
 	    || source_fmt->format.code != sink_fmt->format.code)
-		return -EINVAL;
+		return -EPIPE;
 
 	/* The field order must match, or the sink field order must be NONE
 	 * to support interlaced hardware connected to bridges that support
@@ -516,7 +518,7 @@
 	 */
 	if (source_fmt->format.field != sink_fmt->format.field &&
 	    sink_fmt->format.field != V4L2_FIELD_NONE)
-		return -EINVAL;
+		return -EPIPE;
 
 	return 0;
 }
@@ -569,6 +571,35 @@
 		sink, link, &source_fmt, &sink_fmt);
 }
 EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate);
+
+struct v4l2_subdev_pad_config *
+v4l2_subdev_alloc_pad_config(struct v4l2_subdev *sd)
+{
+	struct v4l2_subdev_pad_config *cfg;
+	int ret;
+
+	if (!sd->entity.num_pads)
+		return NULL;
+
+	cfg = kcalloc(sd->entity.num_pads, sizeof(*cfg), GFP_KERNEL);
+	if (!cfg)
+		return NULL;
+
+	ret = v4l2_subdev_call(sd, pad, init_cfg, cfg);
+	if (ret < 0 && ret != -ENOIOCTLCMD) {
+		kfree(cfg);
+		return NULL;
+	}
+
+	return cfg;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_alloc_pad_config);
+
+void v4l2_subdev_free_pad_config(struct v4l2_subdev_pad_config *cfg)
+{
+	kfree(cfg);
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_free_pad_config);
 #endif /* CONFIG_MEDIA_CONTROLLER */
 
 void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
@@ -584,6 +615,7 @@
 	sd->host_priv = NULL;
 #if defined(CONFIG_MEDIA_CONTROLLER)
 	sd->entity.name = sd->name;
+	sd->entity.obj_type = MEDIA_ENTITY_TYPE_V4L2_SUBDEV;
 	sd->entity.function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
 #endif
 }
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index df4c052c..f300f06 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -349,7 +349,7 @@
 
 	if (dma->pages) {
 		for (i = 0; i < dma->nr_pages; i++)
-			page_cache_release(dma->pages[i]);
+			put_page(dma->pages[i]);
 		kfree(dma->pages);
 		dma->pages = NULL;
 	}
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 5d016f4..9fbcb67 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1645,7 +1645,7 @@
  * Will sleep if required for nonblocking == false.
  */
 static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
-				int nonblocking)
+			     void *pb, int nonblocking)
 {
 	unsigned long flags;
 	int ret;
@@ -1666,10 +1666,10 @@
 	/*
 	 * Only remove the buffer from done_list if v4l2_buffer can handle all
 	 * the planes.
-	 * Verifying planes is NOT necessary since it already has been checked
-	 * before the buffer is queued/prepared. So it can never fail.
 	 */
-	list_del(&(*vb)->done_entry);
+	ret = call_bufop(q, verify_planes_array, *vb, pb);
+	if (!ret)
+		list_del(&(*vb)->done_entry);
 	spin_unlock_irqrestore(&q->done_lock, flags);
 
 	return ret;
@@ -1748,7 +1748,7 @@
 	struct vb2_buffer *vb = NULL;
 	int ret;
 
-	ret = __vb2_get_done_vb(q, &vb, nonblocking);
+	ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
 	if (ret < 0)
 		return ret;
 
@@ -2298,6 +2298,16 @@
 		return POLLERR;
 
 	/*
+	 * If this quirk is set and QBUF hasn't been called yet then
+	 * return POLLERR as well. This only affects capture queues, output
+	 * queues will always initialize waiting_for_buffers to false.
+	 * This quirk is set by V4L2 for backwards compatibility reasons.
+	 */
+	if (q->quirk_poll_must_check_waiting_for_buffers &&
+	    q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
+		return POLLERR;
+
+	/*
 	 * For output streams you can call write() as long as there are fewer
 	 * buffers queued than there are buffers available.
 	 */
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index dbec592..3c3b517 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -49,7 +49,7 @@
 	vec = frame_vector_create(nr);
 	if (!vec)
 		return ERR_PTR(-ENOMEM);
-	ret = get_vaddr_frames(start, nr, write, 1, vec);
+	ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
 	if (ret < 0)
 		goto out_destroy;
 	/* We accept only complete set of PFNs */
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 91f5521..0b1b8c7 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -765,6 +765,12 @@
 	q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
 	q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
 			== V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	/*
+	 * For compatibility with vb1: if QBUF hasn't been called yet, then
+	 * return POLLERR as well. This only affects capture queues, output
+	 * queues will always initialize waiting_for_buffers to false.
+	 */
+	q->quirk_poll_must_check_waiting_for_buffers = true;
 
 	return vb2_core_queue_init(q);
 }
@@ -818,14 +824,6 @@
 			poll_wait(file, &fh->wait, wait);
 	}
 
-	/*
-	 * For compatibility with vb1: if QBUF hasn't been called yet, then
-	 * return POLLERR as well. This only affects capture queues, output
-	 * queues will always initialize waiting_for_buffers to false.
-	 */
-	if (q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
-		return POLLERR;
-
 	return res | vb2_core_poll(q, file, wait);
 }
 EXPORT_SYMBOL_GPL(vb2_poll);
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 51d5cd2..c61a284 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -122,6 +122,7 @@
 	  mainly help enable/disable iommu and control the power domain and
 	  clocks for each local arbiter.
 
+source "drivers/memory/samsung/Kconfig"
 source "drivers/memory/tegra/Kconfig"
 
 endif
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index 890bdf4..cb0b7a1 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -17,4 +17,5 @@
 obj-$(CONFIG_JZ4780_NEMC)	+= jz4780-nemc.o
 obj-$(CONFIG_MTK_SMI)		+= mtk-smi.o
 
+obj-$(CONFIG_SAMSUNG_MC)	+= samsung/
 obj-$(CONFIG_TEGRA_MC)		+= tegra/
diff --git a/drivers/memory/samsung/Kconfig b/drivers/memory/samsung/Kconfig
new file mode 100644
index 0000000..9de1222
--- /dev/null
+++ b/drivers/memory/samsung/Kconfig
@@ -0,0 +1,13 @@
+config SAMSUNG_MC
+	bool "Samsung Exynos Memory Controller support" if COMPILE_TEST
+	help
+	  Support for the Memory Controller (MC) devices found on
+	  Samsung Exynos SoCs.
+
+if SAMSUNG_MC
+
+config EXYNOS_SROM
+	bool "Exynos SROM controller driver" if COMPILE_TEST
+	depends on (ARM && ARCH_EXYNOS) || (COMPILE_TEST && HAS_IOMEM)
+
+endif
diff --git a/drivers/memory/samsung/Makefile b/drivers/memory/samsung/Makefile
new file mode 100644
index 0000000..9c554d5
--- /dev/null
+++ b/drivers/memory/samsung/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_EXYNOS_SROM)	+= exynos-srom.o
diff --git a/drivers/memory/samsung/exynos-srom.c b/drivers/memory/samsung/exynos-srom.c
new file mode 100644
index 0000000..96756fb
--- /dev/null
+++ b/drivers/memory/samsung/exynos-srom.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ *	      http://www.samsung.com/
+ *
+ * EXYNOS - SROM Controller support
+ * Author: Pankaj Dubey <pankaj.dubey@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "exynos-srom.h"
+
+static const unsigned long exynos_srom_offsets[] = {
+	/* SROM side */
+	EXYNOS_SROM_BW,
+	EXYNOS_SROM_BC0,
+	EXYNOS_SROM_BC1,
+	EXYNOS_SROM_BC2,
+	EXYNOS_SROM_BC3,
+};
+
+/**
+ * struct exynos_srom_reg_dump: register dump of SROM Controller registers.
+ * @offset: srom register offset from the controller base address.
+ * @value: the value of register under the offset.
+ */
+struct exynos_srom_reg_dump {
+	u32     offset;
+	u32     value;
+};
+
+/**
+ * struct exynos_srom: platform data for exynos srom controller driver.
+ * @dev: platform device pointer
+ * @reg_base: srom base address
+ * @reg_offset: exynos_srom_reg_dump pointer to hold offset and its value.
+ */
+struct exynos_srom {
+	struct device *dev;
+	void __iomem *reg_base;
+	struct exynos_srom_reg_dump *reg_offset;
+};
+
+static struct exynos_srom_reg_dump *exynos_srom_alloc_reg_dump(
+		const unsigned long *rdump,
+		unsigned long nr_rdump)
+{
+	struct exynos_srom_reg_dump *rd;
+	unsigned int i;
+
+	rd = kcalloc(nr_rdump, sizeof(*rd), GFP_KERNEL);
+	if (!rd)
+		return NULL;
+
+	for (i = 0; i < nr_rdump; ++i)
+		rd[i].offset = rdump[i];
+
+	return rd;
+}
+
+static int exynos_srom_configure_bank(struct exynos_srom *srom,
+				      struct device_node *np)
+{
+	u32 bank, width, pmc = 0;
+	u32 timing[6];
+	u32 cs, bw;
+
+	if (of_property_read_u32(np, "reg", &bank))
+		return -EINVAL;
+	if (of_property_read_u32(np, "reg-io-width", &width))
+		width = 1;
+	if (of_property_read_bool(np, "samsung,srom-page-mode"))
+		pmc = 1 << EXYNOS_SROM_BCX__PMC__SHIFT;
+	if (of_property_read_u32_array(np, "samsung,srom-timing", timing,
+				       ARRAY_SIZE(timing)))
+		return -EINVAL;
+
+	bank *= 4; /* Convert bank into shift/offset */
+
+	cs = 1 << EXYNOS_SROM_BW__BYTEENABLE__SHIFT;
+	if (width == 2)
+		cs |= 1 << EXYNOS_SROM_BW__DATAWIDTH__SHIFT;
+
+	bw = __raw_readl(srom->reg_base + EXYNOS_SROM_BW);
+	bw = (bw & ~(EXYNOS_SROM_BW__CS_MASK << bank)) | (cs << bank);
+	__raw_writel(bw, srom->reg_base + EXYNOS_SROM_BW);
+
+	__raw_writel(pmc | (timing[0] << EXYNOS_SROM_BCX__TACP__SHIFT) |
+		    (timing[1] << EXYNOS_SROM_BCX__TCAH__SHIFT) |
+		    (timing[2] << EXYNOS_SROM_BCX__TCOH__SHIFT) |
+		    (timing[3] << EXYNOS_SROM_BCX__TACC__SHIFT) |
+		    (timing[4] << EXYNOS_SROM_BCX__TCOS__SHIFT) |
+		    (timing[5] << EXYNOS_SROM_BCX__TACS__SHIFT),
+		    srom->reg_base + EXYNOS_SROM_BC0 + bank);
+
+	return 0;
+}
+
+static int exynos_srom_probe(struct platform_device *pdev)
+{
+	struct device_node *np, *child;
+	struct exynos_srom *srom;
+	struct device *dev = &pdev->dev;
+	bool bad_bank_config = false;
+
+	np = dev->of_node;
+	if (!np) {
+		dev_err(&pdev->dev, "could not find device info\n");
+		return -EINVAL;
+	}
+
+	srom = devm_kzalloc(&pdev->dev,
+			sizeof(struct exynos_srom), GFP_KERNEL);
+	if (!srom)
+		return -ENOMEM;
+
+	srom->dev = dev;
+	srom->reg_base = of_iomap(np, 0);
+	if (!srom->reg_base) {
+		dev_err(&pdev->dev, "iomap of exynos srom controller failed\n");
+		return -ENOMEM;
+	}
+
+	platform_set_drvdata(pdev, srom);
+
+	srom->reg_offset = exynos_srom_alloc_reg_dump(exynos_srom_offsets,
+			sizeof(exynos_srom_offsets));
+	if (!srom->reg_offset) {
+		iounmap(srom->reg_base);
+		return -ENOMEM;
+	}
+
+	for_each_child_of_node(np, child) {
+		if (exynos_srom_configure_bank(srom, child)) {
+			dev_err(dev,
+				"Could not decode bank configuration for %s\n",
+				child->name);
+			bad_bank_config = true;
+		}
+	}
+
+	/*
+	 * If any bank failed to configure, we still provide suspend/resume,
+	 * but do not probe child devices
+	 */
+	if (bad_bank_config)
+		return 0;
+
+	return of_platform_populate(np, NULL, NULL, dev);
+}
+
+static int exynos_srom_remove(struct platform_device *pdev)
+{
+	struct exynos_srom *srom = platform_get_drvdata(pdev);
+
+	kfree(srom->reg_offset);
+	iounmap(srom->reg_base);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void exynos_srom_save(void __iomem *base,
+				    struct exynos_srom_reg_dump *rd,
+				    unsigned int num_regs)
+{
+	for (; num_regs > 0; --num_regs, ++rd)
+		rd->value = readl(base + rd->offset);
+}
+
+static void exynos_srom_restore(void __iomem *base,
+				      const struct exynos_srom_reg_dump *rd,
+				      unsigned int num_regs)
+{
+	for (; num_regs > 0; --num_regs, ++rd)
+		writel(rd->value, base + rd->offset);
+}
+
+static int exynos_srom_suspend(struct device *dev)
+{
+	struct exynos_srom *srom = dev_get_drvdata(dev);
+
+	exynos_srom_save(srom->reg_base, srom->reg_offset,
+				ARRAY_SIZE(exynos_srom_offsets));
+	return 0;
+}
+
+static int exynos_srom_resume(struct device *dev)
+{
+	struct exynos_srom *srom = dev_get_drvdata(dev);
+
+	exynos_srom_restore(srom->reg_base, srom->reg_offset,
+				ARRAY_SIZE(exynos_srom_offsets));
+	return 0;
+}
+#endif
+
+static const struct of_device_id of_exynos_srom_ids[] = {
+	{
+		.compatible	= "samsung,exynos4210-srom",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, of_exynos_srom_ids);
+
+static SIMPLE_DEV_PM_OPS(exynos_srom_pm_ops, exynos_srom_suspend, exynos_srom_resume);
+
+static struct platform_driver exynos_srom_driver = {
+	.probe = exynos_srom_probe,
+	.remove = exynos_srom_remove,
+	.driver = {
+		.name = "exynos-srom",
+		.of_match_table = of_exynos_srom_ids,
+		.pm = &exynos_srom_pm_ops,
+	},
+};
+module_platform_driver(exynos_srom_driver);
+
+MODULE_AUTHOR("Pankaj Dubey <pankaj.dubey@samsung.com>");
+MODULE_DESCRIPTION("Exynos SROM Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/memory/samsung/exynos-srom.h b/drivers/memory/samsung/exynos-srom.h
new file mode 100644
index 0000000..34660c6
--- /dev/null
+++ b/drivers/memory/samsung/exynos-srom.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Exynos SROMC register definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __EXYNOS_SROM_H
+#define __EXYNOS_SROM_H __FILE__
+
+#define EXYNOS_SROMREG(x)		(x)
+
+#define EXYNOS_SROM_BW		EXYNOS_SROMREG(0x0)
+#define EXYNOS_SROM_BC0		EXYNOS_SROMREG(0x4)
+#define EXYNOS_SROM_BC1		EXYNOS_SROMREG(0x8)
+#define EXYNOS_SROM_BC2		EXYNOS_SROMREG(0xc)
+#define EXYNOS_SROM_BC3		EXYNOS_SROMREG(0x10)
+#define EXYNOS_SROM_BC4		EXYNOS_SROMREG(0x14)
+#define EXYNOS_SROM_BC5		EXYNOS_SROMREG(0x18)
+
+/* one register BW holds 4 x 4-bit packed settings for NCS0 - NCS3 */
+
+#define EXYNOS_SROM_BW__DATAWIDTH__SHIFT	0
+#define EXYNOS_SROM_BW__ADDRMODE__SHIFT		1
+#define EXYNOS_SROM_BW__WAITENABLE__SHIFT	2
+#define EXYNOS_SROM_BW__BYTEENABLE__SHIFT	3
+
+#define EXYNOS_SROM_BW__CS_MASK			0xf
+
+#define EXYNOS_SROM_BW__NCS0__SHIFT		0
+#define EXYNOS_SROM_BW__NCS1__SHIFT		4
+#define EXYNOS_SROM_BW__NCS2__SHIFT		8
+#define EXYNOS_SROM_BW__NCS3__SHIFT		12
+#define EXYNOS_SROM_BW__NCS4__SHIFT		16
+#define EXYNOS_SROM_BW__NCS5__SHIFT		20
+
+/* applies to same to BCS0 - BCS3 */
+
+#define EXYNOS_SROM_BCX__PMC__SHIFT		0
+#define EXYNOS_SROM_BCX__TACP__SHIFT		4
+#define EXYNOS_SROM_BCX__TCAH__SHIFT		8
+#define EXYNOS_SROM_BCX__TCOH__SHIFT		12
+#define EXYNOS_SROM_BCX__TACC__SHIFT		16
+#define EXYNOS_SROM_BCX__TCOS__SHIFT		24
+#define EXYNOS_SROM_BCX__TACS__SHIFT		28
+
+#endif /* __EXYNOS_SROM_H */
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 84abf9d..3cd6815 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -1220,7 +1220,7 @@
 		}
 
 		if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
-			dbg("managment flag doesn't indicate boot block %d",
+			dbg("management flag doesn't indicate boot block %d",
 									pba);
 			continue;
 		}
@@ -1367,7 +1367,7 @@
 static int msb_ftl_scan(struct msb_data *msb)
 {
 	u16 pba, lba, other_block;
-	u8 overwrite_flag, managment_flag, other_overwrite_flag;
+	u8 overwrite_flag, management_flag, other_overwrite_flag;
 	int error;
 	struct ms_extra_data_register extra;
 	u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
@@ -1409,7 +1409,7 @@
 		}
 
 		lba = be16_to_cpu(extra.logical_address);
-		managment_flag = extra.management_flag;
+		management_flag = extra.management_flag;
 		overwrite_flag = extra.overwrite_flag;
 		overwrite_flags[pba] = overwrite_flag;
 
@@ -1421,16 +1421,16 @@
 		}
 
 		/* Skip system/drm blocks */
-		if ((managment_flag & MEMSTICK_MANAGMENT_FLAG_NORMAL) !=
-			MEMSTICK_MANAGMENT_FLAG_NORMAL) {
-			dbg("pba %05d -> [reserved managment flag %02x]",
-							pba, managment_flag);
+		if ((management_flag & MEMSTICK_MANAGEMENT_FLAG_NORMAL) !=
+			MEMSTICK_MANAGEMENT_FLAG_NORMAL) {
+			dbg("pba %05d -> [reserved management flag %02x]",
+							pba, management_flag);
 			msb_mark_block_used(msb, pba);
 			continue;
 		}
 
 		/* Erase temporary tables */
-		if (!(managment_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
+		if (!(management_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
 			dbg("pba %05d -> [temp table] - will erase", pba);
 
 			msb_mark_block_used(msb, pba);
diff --git a/drivers/memstick/core/ms_block.h b/drivers/memstick/core/ms_block.h
index c75198d..53962c3 100644
--- a/drivers/memstick/core/ms_block.h
+++ b/drivers/memstick/core/ms_block.h
@@ -47,7 +47,7 @@
 #define MEMSTICK_OV_PG_NORMAL \
 	(MEMSTICK_OVERWRITE_PGST1 | MEMSTICK_OVERWRITE_PGST0)
 
-#define MEMSTICK_MANAGMENT_FLAG_NORMAL \
+#define MEMSTICK_MANAGEMENT_FLAG_NORMAL \
 	(MEMSTICK_MANAGEMENT_SYSFLG |  \
 	MEMSTICK_MANAGEMENT_SCMS1   |  \
 	MEMSTICK_MANAGEMENT_SCMS0)     \
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index cbe9607..6955c9e 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -791,7 +791,7 @@
 		pSimple->Address.High = 0;
 
 	mpt_put_msg_frame (LanCtx, mpt_dev, mf);
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 
 	dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
 			IOC_AND_NETDEV_NAMES_s_s(dev),
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 7ebccfa..7ee1667 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2281,7 +2281,7 @@
 
 	dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio),
 				      blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
-	if (!dma_addr_out)
+	if (pci_dma_mapping_error(ioc->pcidev, dma_addr_out))
 		goto put_mf;
 	ioc->add_sge(psge, flagsLength, dma_addr_out);
 	psge += ioc->SGE_size;
@@ -2296,7 +2296,7 @@
 	flagsLength |= blk_rq_bytes(rsp) + 4;
 	dma_addr_in =  pci_map_single(ioc->pcidev, bio_data(rsp->bio),
 				      blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
-	if (!dma_addr_in)
+	if (pci_dma_mapping_error(ioc->pcidev, dma_addr_in))
 		goto unmap;
 	ioc->add_sge(psge, flagsLength, dma_addr_in);
 
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 613231c..031e088 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -1150,7 +1150,7 @@
 	}
 	shost_printk(KERN_INFO, shost, MYIOC_s_FMT
 	    "Integrated RAID detects new device %d\n", ioc->name, disk);
-	scsi_scan_target(&ioc->sh->shost_gendev, 1, disk, 0, 1);
+	scsi_scan_target(&ioc->sh->shost_gendev, 1, disk, 0, SCSI_SCAN_RESCAN);
 }
 
 
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 69d9fff..0aecd7b 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -2563,7 +2563,7 @@
 
 	if (user_trig_timer & ~0xFF) {
 		dev_err(dev,
-			"debugfs error input: should be beetween 0 to 255\n");
+			"debugfs error input: should be between 0 to 255\n");
 		return -EINVAL;
 	}
 
diff --git a/drivers/mfd/intel-lpss-acpi.c b/drivers/mfd/intel-lpss-acpi.c
index 5a8d9c7..7ddc4a9 100644
--- a/drivers/mfd/intel-lpss-acpi.c
+++ b/drivers/mfd/intel-lpss-acpi.c
@@ -31,13 +31,9 @@
 	{ },
 };
 
-static struct property_set spt_i2c_pset = {
-	.properties = spt_i2c_properties,
-};
-
 static const struct intel_lpss_platform_info spt_i2c_info = {
 	.clk_rate = 120000000,
-	.pset = &spt_i2c_pset,
+	.properties = spt_i2c_properties,
 };
 
 static const struct intel_lpss_platform_info bxt_info = {
@@ -51,13 +47,9 @@
 	{ },
 };
 
-static struct property_set bxt_i2c_pset = {
-	.properties = bxt_i2c_properties,
-};
-
 static const struct intel_lpss_platform_info bxt_i2c_info = {
 	.clk_rate = 133000000,
-	.pset = &bxt_i2c_pset,
+	.properties = bxt_i2c_properties,
 };
 
 static const struct acpi_device_id intel_lpss_acpi_ids[] = {
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index a19e571..1d79a3c 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -71,13 +71,9 @@
 	{ },
 };
 
-static struct property_set spt_i2c_pset = {
-	.properties = spt_i2c_properties,
-};
-
 static const struct intel_lpss_platform_info spt_i2c_info = {
 	.clk_rate = 120000000,
-	.pset = &spt_i2c_pset,
+	.properties = spt_i2c_properties,
 };
 
 static struct property_entry uart_properties[] = {
@@ -87,14 +83,10 @@
 	{ },
 };
 
-static struct property_set uart_pset = {
-	.properties = uart_properties,
-};
-
 static const struct intel_lpss_platform_info spt_uart_info = {
 	.clk_rate = 120000000,
 	.clk_con_id = "baudclk",
-	.pset = &uart_pset,
+	.properties = uart_properties,
 };
 
 static const struct intel_lpss_platform_info bxt_info = {
@@ -104,7 +96,7 @@
 static const struct intel_lpss_platform_info bxt_uart_info = {
 	.clk_rate = 100000000,
 	.clk_con_id = "baudclk",
-	.pset = &uart_pset,
+	.properties = uart_properties,
 };
 
 static struct property_entry bxt_i2c_properties[] = {
@@ -114,13 +106,9 @@
 	{ },
 };
 
-static struct property_set bxt_i2c_pset = {
-	.properties = bxt_i2c_properties,
-};
-
 static const struct intel_lpss_platform_info bxt_i2c_info = {
 	.clk_rate = 133000000,
-	.pset = &bxt_i2c_pset,
+	.properties = bxt_i2c_properties,
 };
 
 static const struct pci_device_id intel_lpss_pci_ids[] = {
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 1bbbe87..6352aab 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -407,7 +407,7 @@
 	if (ret)
 		return ret;
 
-	lpss->cell->pset = info->pset;
+	lpss->cell->properties = info->properties;
 
 	intel_lpss_init_dev(lpss);
 
diff --git a/drivers/mfd/intel-lpss.h b/drivers/mfd/intel-lpss.h
index 0dcea9e..6941166 100644
--- a/drivers/mfd/intel-lpss.h
+++ b/drivers/mfd/intel-lpss.h
@@ -16,14 +16,14 @@
 
 struct device;
 struct resource;
-struct property_set;
+struct property_entry;
 
 struct intel_lpss_platform_info {
 	struct resource *mem;
 	int irq;
 	unsigned long clk_rate;
 	const char *clk_con_id;
-	struct property_set *pset;
+	struct property_entry *properties;
 };
 
 int intel_lpss_probe(struct device *dev,
diff --git a/drivers/mfd/intel_quark_i2c_gpio.c b/drivers/mfd/intel_quark_i2c_gpio.c
index bdc5e27..a24b35f 100644
--- a/drivers/mfd/intel_quark_i2c_gpio.c
+++ b/drivers/mfd/intel_quark_i2c_gpio.c
@@ -219,8 +219,7 @@
 		return -ENOMEM;
 
 	/* Set the properties for portA */
-	pdata->properties->node		= NULL;
-	pdata->properties->name		= "intel-quark-x1000-gpio-portA";
+	pdata->properties->fwnode	= NULL;
 	pdata->properties->idx		= 0;
 	pdata->properties->ngpio	= INTEL_QUARK_MFD_NGPIO;
 	pdata->properties->gpio_base	= INTEL_QUARK_MFD_GPIO_BASE;
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 88bd1b1..fc1c1fc 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -193,8 +193,8 @@
 			goto fail_alias;
 	}
 
-	if (cell->pset) {
-		ret = platform_device_add_properties(pdev, cell->pset);
+	if (cell->properties) {
+		ret = platform_device_add_properties(pdev, cell->properties);
 		if (ret)
 			goto fail_alias;
 	}
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 10370f2..7edea9c 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -223,6 +223,13 @@
 		cxl_ops->link_ok(ctx->afu->adapter, ctx->afu));
 	flush_work(&ctx->fault_work); /* Only needed for dedicated process */
 
+	/*
+	 * Wait until no further interrupts are presented by the PSL
+	 * for this context.
+	 */
+	if (cxl_ops->irq_wait)
+		cxl_ops->irq_wait(ctx);
+
 	/* release the reference to the group leader and mm handling pid */
 	put_pid(ctx->pid);
 	put_pid(ctx->glpid);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 38e21cf..73dc2a3 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -274,6 +274,7 @@
 #define CXL_PSL_DSISR_An_PE (1ull << (63-4))  /* PSL Error (implementation specific) */
 #define CXL_PSL_DSISR_An_AE (1ull << (63-5))  /* AFU Error */
 #define CXL_PSL_DSISR_An_OC (1ull << (63-6))  /* OS Context Warning */
+#define CXL_PSL_DSISR_PENDING (CXL_PSL_DSISR_TRANS | CXL_PSL_DSISR_An_PE | CXL_PSL_DSISR_An_AE | CXL_PSL_DSISR_An_OC)
 /* NOTE: Bits 32:63 are undefined if DSISR[DS] = 1 */
 #define CXL_PSL_DSISR_An_M  DSISR_NOHPTE      /* PTE not found */
 #define CXL_PSL_DSISR_An_P  DSISR_PROTFAULT   /* Storage protection violation */
@@ -855,6 +856,7 @@
 					u64 dsisr, u64 errstat);
 	irqreturn_t (*psl_interrupt)(int irq, void *data);
 	int (*ack_irq)(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask);
+	void (*irq_wait)(struct cxl_context *ctx);
 	int (*attach_process)(struct cxl_context *ctx, bool kernel,
 			u64 wed, u64 amr);
 	int (*detach_process)(struct cxl_context *ctx);
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index be646dc..8def455 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -203,7 +203,6 @@
 void cxl_unmap_irq(unsigned int virq, void *cookie)
 {
 	free_irq(virq, cookie);
-	irq_dispose_mapping(virq);
 }
 
 int cxl_register_one_irq(struct cxl *adapter,
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 387fcbd..ecf7557 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -14,6 +14,7 @@
 #include <linux/mutex.h>
 #include <linux/mm.h>
 #include <linux/uaccess.h>
+#include <linux/delay.h>
 #include <asm/synch.h>
 #include <misc/cxl-base.h>
 
@@ -797,6 +798,35 @@
 	return fail_psl_irq(afu, &irq_info);
 }
 
+void native_irq_wait(struct cxl_context *ctx)
+{
+	u64 dsisr;
+	int timeout = 1000;
+	int ph;
+
+	/*
+	 * Wait until no further interrupts are presented by the PSL
+	 * for this context.
+	 */
+	while (timeout--) {
+		ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff;
+		if (ph != ctx->pe)
+			return;
+		dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
+		if ((dsisr & CXL_PSL_DSISR_PENDING) == 0)
+			return;
+		/*
+		 * We are waiting for the workqueue to process our
+		 * irq, so need to let that run here.
+		 */
+		msleep(1);
+	}
+
+	dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i"
+		 " DSISR %016llx!\n", ph, dsisr);
+	return;
+}
+
 static irqreturn_t native_slice_irq_err(int irq, void *data)
 {
 	struct cxl_afu *afu = data;
@@ -1076,6 +1106,7 @@
 	.handle_psl_slice_error = native_handle_psl_slice_error,
 	.psl_interrupt = NULL,
 	.ack_irq = native_ack_irq,
+	.irq_wait = native_irq_wait,
 	.attach_process = native_attach_process,
 	.detach_process = native_detach_process,
 	.support_attributes = native_support_attributes,
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 089d694..6cc17b77 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -245,8 +245,7 @@
 		if (status == count)
 			return count;
 
-		/* REVISIT: at HZ=100, this is sloooow */
-		msleep(1);
+		usleep_range(1000, 1500);
 	} while (time_before(read_time, timeout));
 
 	return -ETIMEDOUT;
@@ -365,8 +364,7 @@
 		if (status == count)
 			return count;
 
-		/* REVISIT: at HZ=100, this is sloooow */
-		msleep(1);
+		usleep_range(1000, 1500);
 	} while (time_before(write_time, timeout));
 
 	return -ETIMEDOUT;
@@ -544,10 +542,7 @@
 		} else {
 			return -EPFNOSUPPORT;
 		}
-	}
 
-	/* Use I2C operations unless we're stuck with SMBus extensions. */
-	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
 		if (i2c_check_functionality(client->adapter,
 				I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)) {
 			use_smbus_write = I2C_SMBUS_I2C_BLOCK_DATA;
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index e8b9331..9c677f3 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -116,8 +116,8 @@
 {
 	struct inode *root;
 
-	sb->s_blocksize = PAGE_CACHE_SIZE;
-	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	sb->s_blocksize = PAGE_SIZE;
+	sb->s_blocksize_bits = PAGE_SHIFT;
 	sb->s_magic = IBMASMFS_MAGIC;
 	sb->s_op = &ibmasmfs_s_ops;
 	sb->s_time_gran = 1;
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 5f1a36b..0a5cbbe 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -458,8 +458,10 @@
 			break;
 
 		val = kmalloc(len, GFP_KERNEL);
-		if (!val)
+		if (!val) {
+			kfree(base);
 			break;
+		}
 
 		*val = 0x12345678;
 		base[offset] = *val;
@@ -498,14 +500,17 @@
 	}
 	case CT_READ_BUDDY_AFTER_FREE: {
 		unsigned long p = __get_free_page(GFP_KERNEL);
-		int saw, *val = kmalloc(1024, GFP_KERNEL);
+		int saw, *val;
 		int *base;
 
 		if (!p)
 			break;
 
-		if (!val)
+		val = kmalloc(1024, GFP_KERNEL);
+		if (!val) {
+			free_page(p);
 			break;
+		}
 
 		base = (int *)p;
 
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
index e94c7fb..88e4523 100644
--- a/drivers/misc/mic/vop/vop_vringh.c
+++ b/drivers/misc/mic/vop/vop_vringh.c
@@ -945,6 +945,11 @@
 			ret = -EFAULT;
 			goto free_ret;
 		}
+		/* Ensure desc has not changed between the two reads */
+		if (memcmp(&dd, dd_config, sizeof(dd))) {
+			ret = -EINVAL;
+			goto free_ret;
+		}
 		mutex_lock(&vdev->vdev_mutex);
 		mutex_lock(&vi->vop_mutex);
 		ret = vop_virtio_add_device(vdev, dd_config);
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index 967b9dd..0307690 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -718,8 +718,8 @@
 static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
 			void *mesg, int lines)
 {
-	unsigned long m, *val = mesg, gpa, save;
-	int ret;
+	unsigned long m;
+	int ret, loops = 200;	/* experimentally determined */
 
 	m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
 	if (lines == 2) {
@@ -735,22 +735,28 @@
 		return MQE_OK;
 
 	/*
-	 * Send a cross-partition interrupt to the SSI that contains the target
-	 * message queue. Normally, the interrupt is automatically delivered by
-	 * hardware but some error conditions require explicit delivery.
-	 * Use the GRU to deliver the interrupt. Otherwise partition failures
+	 * Send a noop message in order to deliver a cross-partition interrupt
+	 * to the SSI that contains the target message queue. Normally, the
+	 * interrupt is automatically delivered by hardware following mesq
+	 * operations, but some error conditions require explicit delivery.
+	 * The noop message will trigger delivery. Otherwise partition failures
 	 * could cause unrecovered errors.
 	 */
-	gpa = uv_global_gru_mmr_address(mqd->interrupt_pnode, UVH_IPI_INT);
-	save = *val;
-	*val = uv_hub_ipi_value(mqd->interrupt_apicid, mqd->interrupt_vector,
-				dest_Fixed);
-	gru_vstore_phys(cb, gpa, gru_get_tri(mesg), IAA_REGISTER, IMA);
-	ret = gru_wait(cb);
-	*val = save;
-	if (ret != CBS_IDLE)
-		return MQE_UNEXPECTED_CB_ERR;
-	return MQE_OK;
+	do {
+		ret = send_noop_message(cb, mqd, mesg);
+	} while ((ret == MQIE_AGAIN || ret == MQE_CONGESTION) && (loops-- > 0));
+
+	if (ret == MQIE_AGAIN || ret == MQE_CONGESTION) {
+		/*
+		 * Don't indicate to the app to resend the message, as it's
+		 * already been successfully sent.  We simply send an OK
+		 * (rather than fail the send with MQE_UNEXPECTED_CB_ERR),
+		 * assuming that the other side is receiving enough
+		 * interrupts to get this message processed anyway.
+		 */
+		ret = MQE_OK;
+	}
+	return ret;
 }
 
 /*
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index f42d9c4..f84a427 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -728,7 +728,7 @@
 		if (dirty)
 			set_page_dirty(pages[i]);
 
-		page_cache_release(pages[i]);
+		put_page(pages[i]);
 		pages[i] = NULL;
 	}
 }
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 3bdbe50..ddc9620 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -35,6 +35,7 @@
 #include <linux/capability.h>
 #include <linux/compat.h>
 #include <linux/pm_runtime.h>
+#include <linux/idr.h>
 
 #include <linux/mmc/ioctl.h>
 #include <linux/mmc/card.h>
@@ -78,15 +79,14 @@
 /*
  * We've only got one major, so number of mmcblk devices is
  * limited to (1 << 20) / number of minors per device.  It is also
- * currently limited by the size of the static bitmaps below.
+ * limited by the MAX_DEVICES below.
  */
 static int max_devices;
 
 #define MAX_DEVICES 256
 
-/* TODO: Replace these with struct ida */
-static DECLARE_BITMAP(dev_use, MAX_DEVICES);
-static DECLARE_BITMAP(name_use, MAX_DEVICES);
+static DEFINE_IDA(mmc_blk_ida);
+static DEFINE_SPINLOCK(mmc_blk_lock);
 
 /*
  * There is one mmc_blk_data per slot.
@@ -105,7 +105,6 @@
 	unsigned int	usage;
 	unsigned int	read_only;
 	unsigned int	part_type;
-	unsigned int	name_idx;
 	unsigned int	reset_done;
 #define MMC_BLK_READ		BIT(0)
 #define MMC_BLK_WRITE		BIT(1)
@@ -180,7 +179,9 @@
 		int devidx = mmc_get_devidx(md->disk);
 		blk_cleanup_queue(md->queue.queue);
 
-		__clear_bit(devidx, dev_use);
+		spin_lock(&mmc_blk_lock);
+		ida_remove(&mmc_blk_ida, devidx);
+		spin_unlock(&mmc_blk_lock);
 
 		put_disk(md->disk);
 		kfree(md);
@@ -947,16 +948,22 @@
 			req->rq_disk->disk_name, "timed out", name, status);
 
 		/* If the status cmd initially failed, retry the r/w cmd */
-		if (!status_valid)
+		if (!status_valid) {
+			pr_err("%s: status not valid, retrying timeout\n",
+				req->rq_disk->disk_name);
 			return ERR_RETRY;
+		}
 
 		/*
 		 * If it was a r/w cmd crc error, or illegal command
 		 * (eg, issued in wrong state) then retry - we should
 		 * have corrected the state problem above.
 		 */
-		if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
+		if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
+			pr_err("%s: command error, retrying timeout\n",
+				req->rq_disk->disk_name);
 			return ERR_RETRY;
+		}
 
 		/* Otherwise abort the command */
 		return ERR_ABORT;
@@ -2191,10 +2198,23 @@
 	struct mmc_blk_data *md;
 	int devidx, ret;
 
-	devidx = find_first_zero_bit(dev_use, max_devices);
-	if (devidx >= max_devices)
-		return ERR_PTR(-ENOSPC);
-	__set_bit(devidx, dev_use);
+again:
+	if (!ida_pre_get(&mmc_blk_ida, GFP_KERNEL))
+		return ERR_PTR(-ENOMEM);
+
+	spin_lock(&mmc_blk_lock);
+	ret = ida_get_new(&mmc_blk_ida, &devidx);
+	spin_unlock(&mmc_blk_lock);
+
+	if (ret == -EAGAIN)
+		goto again;
+	else if (ret)
+		return ERR_PTR(ret);
+
+	if (devidx >= max_devices) {
+		ret = -ENOSPC;
+		goto out;
+	}
 
 	md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
 	if (!md) {
@@ -2202,19 +2222,6 @@
 		goto out;
 	}
 
-	/*
-	 * !subname implies we are creating main mmc_blk_data that will be
-	 * associated with mmc_card with dev_set_drvdata. Due to device
-	 * partitions, devidx will not coincide with a per-physical card
-	 * index anymore so we keep track of a name index.
-	 */
-	if (!subname) {
-		md->name_idx = find_first_zero_bit(name_use, max_devices);
-		__set_bit(md->name_idx, name_use);
-	} else
-		md->name_idx = ((struct mmc_blk_data *)
-				dev_to_disk(parent)->private_data)->name_idx;
-
 	md->area_type = area_type;
 
 	/*
@@ -2264,7 +2271,7 @@
 	 */
 
 	snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
-		 "mmcblk%u%s", md->name_idx, subname ? subname : "");
+		 "mmcblk%u%s", card->host->index, subname ? subname : "");
 
 	if (mmc_card_mmc(card))
 		blk_queue_logical_block_size(md->queue.queue,
@@ -2286,7 +2293,7 @@
 	    ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
 	     card->ext_csd.rel_sectors)) {
 		md->flags |= MMC_BLK_REL_WR;
-		blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
+		blk_queue_write_cache(md->queue.queue, true, true);
 	}
 
 	if (mmc_card_mmc(card) &&
@@ -2304,6 +2311,9 @@
  err_kfree:
 	kfree(md);
  out:
+	spin_lock(&mmc_blk_lock);
+	ida_remove(&mmc_blk_ida, devidx);
+	spin_unlock(&mmc_blk_lock);
 	return ERR_PTR(ret);
 }
 
@@ -2418,7 +2428,6 @@
 	struct list_head *pos, *q;
 	struct mmc_blk_data *part_md;
 
-	__clear_bit(md->name_idx, name_use);
 	list_for_each_safe(pos, q, &md->part) {
 		part_md = list_entry(pos, struct mmc_blk_data, part);
 		list_del(pos);
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index 4c33d76..250f223 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -1,3 +1,24 @@
 #
 # MMC core configuration
 #
+config PWRSEQ_EMMC
+	tristate "HW reset support for eMMC"
+	default y
+	depends on OF
+	help
+	  This selects Hardware reset support aka pwrseq-emmc for eMMC
+	  devices. By default this option is set to y.
+
+	  This driver can also be built as a module. If so, the module
+	  will be called pwrseq_emmc.
+
+config PWRSEQ_SIMPLE
+	tristate "Simple HW reset support for MMC"
+	default y
+	depends on OF
+	help
+	  This selects simple hardware reset support aka pwrseq-simple for MMC
+	  devices. By default this option is set to y.
+
+	  This driver can also be built as a module. If so, the module
+	  will be called pwrseq_simple.
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 2c25138..f007151 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -8,5 +8,7 @@
 				   sdio.o sdio_ops.o sdio_bus.o \
 				   sdio_cis.o sdio_io.o sdio_irq.o \
 				   quirks.o slot-gpio.o
-mmc_core-$(CONFIG_OF)		+= pwrseq.o pwrseq_simple.o pwrseq_emmc.o
+mmc_core-$(CONFIG_OF)		+= pwrseq.o
+obj-$(CONFIG_PWRSEQ_SIMPLE)	+= pwrseq_simple.o
+obj-$(CONFIG_PWRSEQ_EMMC)	+= pwrseq_emmc.o
 mmc_core-$(CONFIG_DEBUG_FS)	+= debugfs.o
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 41b1e76..99275e4 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -36,6 +36,9 @@
 #include <linux/mmc/sd.h>
 #include <linux/mmc/slot-gpio.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/mmc.h>
+
 #include "core.h"
 #include "bus.h"
 #include "host.h"
@@ -140,6 +143,8 @@
 			cmd->retries = 0;
 	}
 
+	trace_mmc_request_done(host, mrq);
+
 	if (err && cmd->retries && !mmc_card_removed(host->card)) {
 		/*
 		 * Request starter must handle retries - see
@@ -215,6 +220,8 @@
 		}
 	}
 
+	trace_mmc_request_start(host, mrq);
+
 	host->ops->request(host, mrq);
 }
 
@@ -2449,8 +2456,9 @@
 	ret = host->bus_ops->reset(host);
 	mmc_bus_put(host);
 
-	if (ret != -EOPNOTSUPP)
-		pr_warn("%s: tried to reset card\n", mmc_hostname(host));
+	if (ret)
+		pr_warn("%s: tried to reset card, got error %d\n",
+			mmc_hostname(host), ret);
 
 	return ret;
 }
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 1d94607..e0a3ee1 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -33,14 +33,14 @@
 
 #define cls_dev_to_mmc_host(d)	container_of(d, struct mmc_host, class_dev)
 
-static DEFINE_IDR(mmc_host_idr);
+static DEFINE_IDA(mmc_host_ida);
 static DEFINE_SPINLOCK(mmc_host_lock);
 
 static void mmc_host_classdev_release(struct device *dev)
 {
 	struct mmc_host *host = cls_dev_to_mmc_host(dev);
 	spin_lock(&mmc_host_lock);
-	idr_remove(&mmc_host_idr, host->index);
+	ida_remove(&mmc_host_ida, host->index);
 	spin_unlock(&mmc_host_lock);
 	kfree(host);
 }
@@ -321,14 +321,20 @@
 
 	/* scanning will be enabled when we're ready */
 	host->rescan_disable = 1;
-	idr_preload(GFP_KERNEL);
+
+again:
+	if (!ida_pre_get(&mmc_host_ida, GFP_KERNEL)) {
+		kfree(host);
+		return NULL;
+	}
+
 	spin_lock(&mmc_host_lock);
-	err = idr_alloc(&mmc_host_idr, host, 0, 0, GFP_NOWAIT);
-	if (err >= 0)
-		host->index = err;
+	err = ida_get_new(&mmc_host_ida, &host->index);
 	spin_unlock(&mmc_host_lock);
-	idr_preload_end();
-	if (err < 0) {
+
+	if (err == -EAGAIN) {
+		goto again;
+	} else if (err) {
 		kfree(host);
 		return NULL;
 	}
@@ -356,11 +362,11 @@
 	 * They have to set these according to their abilities.
 	 */
 	host->max_segs = 1;
-	host->max_seg_size = PAGE_CACHE_SIZE;
+	host->max_seg_size = PAGE_SIZE;
 
-	host->max_req_size = PAGE_CACHE_SIZE;
+	host->max_req_size = PAGE_SIZE;
 	host->max_blk_size = 512;
-	host->max_blk_count = PAGE_CACHE_SIZE / 512;
+	host->max_blk_count = PAGE_SIZE / 512;
 
 	return host;
 }
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 4dbe3df..b81b08f 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -333,6 +333,9 @@
 	}
 }
 
+/* Minimum partition switch timeout in milliseconds */
+#define MMC_MIN_PART_SWITCH_TIME	300
+
 /*
  * Decode extended CSD.
  */
@@ -397,6 +400,10 @@
 
 		/* EXT_CSD value is in units of 10ms, but we store in ms */
 		card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
+		/* Some eMMC set the value too low so set a minimum */
+		if (card->ext_csd.part_time &&
+		    card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
+			card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
 
 		/* Sleep / awake timeout in 100ns units */
 		if (sa_shift > 0 && sa_shift <= 0x17)
@@ -1244,10 +1251,11 @@
 {
 	struct mmc_host *host = card->host;
 	bool send_status = true;
-	unsigned int old_timing;
+	unsigned int old_timing, old_signal_voltage;
 	int err = -EINVAL;
 	u8 val;
 
+	old_signal_voltage = host->ios.signal_voltage;
 	if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
 		err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
 
@@ -1256,7 +1264,7 @@
 
 	/* If fails try again during next card power cycle */
 	if (err)
-		goto err;
+		return err;
 
 	mmc_select_driver_type(card);
 
@@ -1290,9 +1298,14 @@
 		}
 	}
 err:
-	if (err)
+	if (err) {
+		/* fall back to the old signal voltage, if fails report error */
+		if (__mmc_set_signal_voltage(host, old_signal_voltage))
+			err = -EIO;
+
 		pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
 		       __func__, err);
+	}
 	return err;
 }
 
@@ -1314,21 +1327,13 @@
 	if (err && err != -EBADMSG)
 		return err;
 
-	if (err) {
-		pr_warn("%s: switch to %s failed\n",
-			mmc_card_hs(card) ? "high-speed" :
-			(mmc_card_hs200(card) ? "hs200" : ""),
-			mmc_hostname(card->host));
-		err = 0;
-	}
-
 bus_speed:
 	/*
 	 * Set the bus speed to the selected bus timing.
 	 * If timing is not selected, backward compatible is the default.
 	 */
 	mmc_set_bus_speed(card);
-	return err;
+	return 0;
 }
 
 /*
@@ -1483,12 +1488,13 @@
 		if (err)
 			goto free_card;
 
-		/* If doing byte addressing, check if required to do sector
+		/*
+		 * If doing byte addressing, check if required to do sector
 		 * addressing.  Handle the case of <2GB cards needing sector
 		 * addressing.  See section 8.1 JEDEC Standard JED84-A441;
 		 * ocr register has bit 30 set for sector addressing.
 		 */
-		if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30)))
+		if (rocr & BIT(30))
 			mmc_card_set_blockaddr(card);
 
 		/* Erase size depends on CSD and Extended CSD */
@@ -1957,19 +1963,23 @@
 {
 	struct mmc_card *card = host->card;
 
-	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
-		return -EOPNOTSUPP;
+	/*
+	 * In the case of recovery, we can't expect flushing the cache to work
+	 * always, but we have a go and ignore errors.
+	 */
+	mmc_flush_cache(host->card);
 
-	if (!mmc_can_reset(card))
-		return -EOPNOTSUPP;
-
-	mmc_set_clock(host, host->f_init);
-
-	host->ops->hw_reset(host);
-
-	/* Set initial state and call mmc_set_ios */
-	mmc_set_initial_state(host);
-
+	if ((host->caps & MMC_CAP_HW_RESET) && host->ops->hw_reset &&
+	     mmc_can_reset(card)) {
+		/* If the card accept RST_n signal, send it. */
+		mmc_set_clock(host, host->f_init);
+		host->ops->hw_reset(host);
+		/* Set initial state and call mmc_set_ios */
+		mmc_set_initial_state(host);
+	} else {
+		/* Do a brute force power cycle */
+		mmc_power_cycle(host, card->ocr);
+	}
 	return mmc_init_card(host, card->ocr, card);
 }
 
diff --git a/drivers/mmc/core/pwrseq.c b/drivers/mmc/core/pwrseq.c
index 4c1d175..9386c47 100644
--- a/drivers/mmc/core/pwrseq.c
+++ b/drivers/mmc/core/pwrseq.c
@@ -8,88 +8,55 @@
  *  MMC power sequence management
  */
 #include <linux/kernel.h>
-#include <linux/platform_device.h>
 #include <linux/err.h>
+#include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_platform.h>
 
 #include <linux/mmc/host.h>
 
 #include "pwrseq.h"
 
-struct mmc_pwrseq_match {
-	const char *compatible;
-	struct mmc_pwrseq *(*alloc)(struct mmc_host *host, struct device *dev);
-};
-
-static struct mmc_pwrseq_match pwrseq_match[] = {
-	{
-		.compatible = "mmc-pwrseq-simple",
-		.alloc = mmc_pwrseq_simple_alloc,
-	}, {
-		.compatible = "mmc-pwrseq-emmc",
-		.alloc = mmc_pwrseq_emmc_alloc,
-	},
-};
-
-static struct mmc_pwrseq_match *mmc_pwrseq_find(struct device_node *np)
-{
-	struct mmc_pwrseq_match *match = ERR_PTR(-ENODEV);
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(pwrseq_match); i++) {
-		if (of_device_is_compatible(np,	pwrseq_match[i].compatible)) {
-			match = &pwrseq_match[i];
-			break;
-		}
-	}
-
-	return match;
-}
+static DEFINE_MUTEX(pwrseq_list_mutex);
+static LIST_HEAD(pwrseq_list);
 
 int mmc_pwrseq_alloc(struct mmc_host *host)
 {
-	struct platform_device *pdev;
 	struct device_node *np;
-	struct mmc_pwrseq_match *match;
-	struct mmc_pwrseq *pwrseq;
-	int ret = 0;
+	struct mmc_pwrseq *p;
 
 	np = of_parse_phandle(host->parent->of_node, "mmc-pwrseq", 0);
 	if (!np)
 		return 0;
 
-	pdev = of_find_device_by_node(np);
-	if (!pdev) {
-		ret = -ENODEV;
-		goto err;
+	mutex_lock(&pwrseq_list_mutex);
+	list_for_each_entry(p, &pwrseq_list, pwrseq_node) {
+		if (p->dev->of_node == np) {
+			if (!try_module_get(p->owner))
+				dev_err(host->parent,
+					"increasing module refcount failed\n");
+			else
+				host->pwrseq = p;
+
+			break;
+		}
 	}
 
-	match = mmc_pwrseq_find(np);
-	if (IS_ERR(match)) {
-		ret = PTR_ERR(match);
-		goto err;
-	}
+	of_node_put(np);
+	mutex_unlock(&pwrseq_list_mutex);
 
-	pwrseq = match->alloc(host, &pdev->dev);
-	if (IS_ERR(pwrseq)) {
-		ret = PTR_ERR(pwrseq);
-		goto err;
-	}
+	if (!host->pwrseq)
+		return -EPROBE_DEFER;
 
-	host->pwrseq = pwrseq;
 	dev_info(host->parent, "allocated mmc-pwrseq\n");
 
-err:
-	of_node_put(np);
-	return ret;
+	return 0;
 }
 
 void mmc_pwrseq_pre_power_on(struct mmc_host *host)
 {
 	struct mmc_pwrseq *pwrseq = host->pwrseq;
 
-	if (pwrseq && pwrseq->ops && pwrseq->ops->pre_power_on)
+	if (pwrseq && pwrseq->ops->pre_power_on)
 		pwrseq->ops->pre_power_on(host);
 }
 
@@ -97,7 +64,7 @@
 {
 	struct mmc_pwrseq *pwrseq = host->pwrseq;
 
-	if (pwrseq && pwrseq->ops && pwrseq->ops->post_power_on)
+	if (pwrseq && pwrseq->ops->post_power_on)
 		pwrseq->ops->post_power_on(host);
 }
 
@@ -105,7 +72,7 @@
 {
 	struct mmc_pwrseq *pwrseq = host->pwrseq;
 
-	if (pwrseq && pwrseq->ops && pwrseq->ops->power_off)
+	if (pwrseq && pwrseq->ops->power_off)
 		pwrseq->ops->power_off(host);
 }
 
@@ -113,8 +80,31 @@
 {
 	struct mmc_pwrseq *pwrseq = host->pwrseq;
 
-	if (pwrseq && pwrseq->ops && pwrseq->ops->free)
-		pwrseq->ops->free(host);
-
-	host->pwrseq = NULL;
+	if (pwrseq) {
+		module_put(pwrseq->owner);
+		host->pwrseq = NULL;
+	}
 }
+
+int mmc_pwrseq_register(struct mmc_pwrseq *pwrseq)
+{
+	if (!pwrseq || !pwrseq->ops || !pwrseq->dev)
+		return -EINVAL;
+
+	mutex_lock(&pwrseq_list_mutex);
+	list_add(&pwrseq->pwrseq_node, &pwrseq_list);
+	mutex_unlock(&pwrseq_list_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mmc_pwrseq_register);
+
+void mmc_pwrseq_unregister(struct mmc_pwrseq *pwrseq)
+{
+	if (pwrseq) {
+		mutex_lock(&pwrseq_list_mutex);
+		list_del(&pwrseq->pwrseq_node);
+		mutex_unlock(&pwrseq_list_mutex);
+	}
+}
+EXPORT_SYMBOL_GPL(mmc_pwrseq_unregister);
diff --git a/drivers/mmc/core/pwrseq.h b/drivers/mmc/core/pwrseq.h
index 133de04..d69e751 100644
--- a/drivers/mmc/core/pwrseq.h
+++ b/drivers/mmc/core/pwrseq.h
@@ -8,32 +8,39 @@
 #ifndef _MMC_CORE_PWRSEQ_H
 #define _MMC_CORE_PWRSEQ_H
 
+#include <linux/mmc/host.h>
+
 struct mmc_pwrseq_ops {
 	void (*pre_power_on)(struct mmc_host *host);
 	void (*post_power_on)(struct mmc_host *host);
 	void (*power_off)(struct mmc_host *host);
-	void (*free)(struct mmc_host *host);
 };
 
 struct mmc_pwrseq {
 	const struct mmc_pwrseq_ops *ops;
+	struct device *dev;
+	struct list_head pwrseq_node;
+	struct module *owner;
 };
 
 #ifdef CONFIG_OF
 
+int mmc_pwrseq_register(struct mmc_pwrseq *pwrseq);
+void mmc_pwrseq_unregister(struct mmc_pwrseq *pwrseq);
+
 int mmc_pwrseq_alloc(struct mmc_host *host);
 void mmc_pwrseq_pre_power_on(struct mmc_host *host);
 void mmc_pwrseq_post_power_on(struct mmc_host *host);
 void mmc_pwrseq_power_off(struct mmc_host *host);
 void mmc_pwrseq_free(struct mmc_host *host);
 
-struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host,
-					   struct device *dev);
-struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
-					 struct device *dev);
-
 #else
 
+static inline int mmc_pwrseq_register(struct mmc_pwrseq *pwrseq)
+{
+	return -ENOSYS;
+}
+static inline void mmc_pwrseq_unregister(struct mmc_pwrseq *pwrseq) {}
 static inline int mmc_pwrseq_alloc(struct mmc_host *host) { return 0; }
 static inline void mmc_pwrseq_pre_power_on(struct mmc_host *host) {}
 static inline void mmc_pwrseq_post_power_on(struct mmc_host *host) {}
diff --git a/drivers/mmc/core/pwrseq_emmc.c b/drivers/mmc/core/pwrseq_emmc.c
index 4a82bc7..adc9c0c 100644
--- a/drivers/mmc/core/pwrseq_emmc.c
+++ b/drivers/mmc/core/pwrseq_emmc.c
@@ -9,6 +9,9 @@
  */
 #include <linux/delay.h>
 #include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/device.h>
 #include <linux/err.h>
@@ -25,6 +28,8 @@
 	struct gpio_desc *reset_gpio;
 };
 
+#define to_pwrseq_emmc(p) container_of(p, struct mmc_pwrseq_emmc, pwrseq)
+
 static void __mmc_pwrseq_emmc_reset(struct mmc_pwrseq_emmc *pwrseq)
 {
 	gpiod_set_value(pwrseq->reset_gpio, 1);
@@ -35,27 +40,11 @@
 
 static void mmc_pwrseq_emmc_reset(struct mmc_host *host)
 {
-	struct mmc_pwrseq_emmc *pwrseq = container_of(host->pwrseq,
-					struct mmc_pwrseq_emmc, pwrseq);
+	struct mmc_pwrseq_emmc *pwrseq =  to_pwrseq_emmc(host->pwrseq);
 
 	__mmc_pwrseq_emmc_reset(pwrseq);
 }
 
-static void mmc_pwrseq_emmc_free(struct mmc_host *host)
-{
-	struct mmc_pwrseq_emmc *pwrseq = container_of(host->pwrseq,
-					struct mmc_pwrseq_emmc, pwrseq);
-
-	unregister_restart_handler(&pwrseq->reset_nb);
-	gpiod_put(pwrseq->reset_gpio);
-	kfree(pwrseq);
-}
-
-static const struct mmc_pwrseq_ops mmc_pwrseq_emmc_ops = {
-	.post_power_on = mmc_pwrseq_emmc_reset,
-	.free = mmc_pwrseq_emmc_free,
-};
-
 static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
 				    unsigned long mode, void *cmd)
 {
@@ -66,21 +55,22 @@
 	return NOTIFY_DONE;
 }
 
-struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
-					 struct device *dev)
+static const struct mmc_pwrseq_ops mmc_pwrseq_emmc_ops = {
+	.post_power_on = mmc_pwrseq_emmc_reset,
+};
+
+static int mmc_pwrseq_emmc_probe(struct platform_device *pdev)
 {
 	struct mmc_pwrseq_emmc *pwrseq;
-	int ret = 0;
+	struct device *dev = &pdev->dev;
 
-	pwrseq = kzalloc(sizeof(struct mmc_pwrseq_emmc), GFP_KERNEL);
+	pwrseq = devm_kzalloc(dev, sizeof(*pwrseq), GFP_KERNEL);
 	if (!pwrseq)
-		return ERR_PTR(-ENOMEM);
+		return -ENOMEM;
 
-	pwrseq->reset_gpio = gpiod_get(dev, "reset", GPIOD_OUT_LOW);
-	if (IS_ERR(pwrseq->reset_gpio)) {
-		ret = PTR_ERR(pwrseq->reset_gpio);
-		goto free;
-	}
+	pwrseq->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+	if (IS_ERR(pwrseq->reset_gpio))
+		return PTR_ERR(pwrseq->reset_gpio);
 
 	/*
 	 * register reset handler to ensure emmc reset also from
@@ -92,9 +82,38 @@
 	register_restart_handler(&pwrseq->reset_nb);
 
 	pwrseq->pwrseq.ops = &mmc_pwrseq_emmc_ops;
+	pwrseq->pwrseq.dev = dev;
+	pwrseq->pwrseq.owner = THIS_MODULE;
+	platform_set_drvdata(pdev, pwrseq);
 
-	return &pwrseq->pwrseq;
-free:
-	kfree(pwrseq);
-	return ERR_PTR(ret);
+	return mmc_pwrseq_register(&pwrseq->pwrseq);
 }
+
+static int mmc_pwrseq_emmc_remove(struct platform_device *pdev)
+{
+	struct mmc_pwrseq_emmc *pwrseq = platform_get_drvdata(pdev);
+
+	unregister_restart_handler(&pwrseq->reset_nb);
+	mmc_pwrseq_unregister(&pwrseq->pwrseq);
+
+	return 0;
+}
+
+static const struct of_device_id mmc_pwrseq_emmc_of_match[] = {
+	{ .compatible = "mmc-pwrseq-emmc",},
+	{/* sentinel */},
+};
+
+MODULE_DEVICE_TABLE(of, mmc_pwrseq_emmc_of_match);
+
+static struct platform_driver mmc_pwrseq_emmc_driver = {
+	.probe = mmc_pwrseq_emmc_probe,
+	.remove = mmc_pwrseq_emmc_remove,
+	.driver = {
+		.name = "pwrseq_emmc",
+		.of_match_table = mmc_pwrseq_emmc_of_match,
+	},
+};
+
+module_platform_driver(mmc_pwrseq_emmc_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index bc173e1..450d907 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -8,7 +8,10 @@
  *  Simple MMC power sequence management
  */
 #include <linux/clk.h>
+#include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/device.h>
 #include <linux/err.h>
@@ -25,6 +28,8 @@
 	struct gpio_descs *reset_gpios;
 };
 
+#define to_pwrseq_simple(p) container_of(p, struct mmc_pwrseq_simple, pwrseq)
+
 static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
 					      int value)
 {
@@ -44,8 +49,7 @@
 
 static void mmc_pwrseq_simple_pre_power_on(struct mmc_host *host)
 {
-	struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
-					struct mmc_pwrseq_simple, pwrseq);
+	struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
 
 	if (!IS_ERR(pwrseq->ext_clk) && !pwrseq->clk_enabled) {
 		clk_prepare_enable(pwrseq->ext_clk);
@@ -57,16 +61,14 @@
 
 static void mmc_pwrseq_simple_post_power_on(struct mmc_host *host)
 {
-	struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
-					struct mmc_pwrseq_simple, pwrseq);
+	struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
 
 	mmc_pwrseq_simple_set_gpios_value(pwrseq, 0);
 }
 
 static void mmc_pwrseq_simple_power_off(struct mmc_host *host)
 {
-	struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
-					struct mmc_pwrseq_simple, pwrseq);
+	struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
 
 	mmc_pwrseq_simple_set_gpios_value(pwrseq, 1);
 
@@ -76,59 +78,64 @@
 	}
 }
 
-static void mmc_pwrseq_simple_free(struct mmc_host *host)
-{
-	struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
-					struct mmc_pwrseq_simple, pwrseq);
-
-	if (!IS_ERR(pwrseq->reset_gpios))
-		gpiod_put_array(pwrseq->reset_gpios);
-
-	if (!IS_ERR(pwrseq->ext_clk))
-		clk_put(pwrseq->ext_clk);
-
-	kfree(pwrseq);
-}
-
 static const struct mmc_pwrseq_ops mmc_pwrseq_simple_ops = {
 	.pre_power_on = mmc_pwrseq_simple_pre_power_on,
 	.post_power_on = mmc_pwrseq_simple_post_power_on,
 	.power_off = mmc_pwrseq_simple_power_off,
-	.free = mmc_pwrseq_simple_free,
 };
 
-struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host,
-					   struct device *dev)
+static const struct of_device_id mmc_pwrseq_simple_of_match[] = {
+	{ .compatible = "mmc-pwrseq-simple",},
+	{/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, mmc_pwrseq_simple_of_match);
+
+static int mmc_pwrseq_simple_probe(struct platform_device *pdev)
 {
 	struct mmc_pwrseq_simple *pwrseq;
-	int ret = 0;
+	struct device *dev = &pdev->dev;
 
-	pwrseq = kzalloc(sizeof(*pwrseq), GFP_KERNEL);
+	pwrseq = devm_kzalloc(dev, sizeof(*pwrseq), GFP_KERNEL);
 	if (!pwrseq)
-		return ERR_PTR(-ENOMEM);
+		return -ENOMEM;
 
-	pwrseq->ext_clk = clk_get(dev, "ext_clock");
-	if (IS_ERR(pwrseq->ext_clk) &&
-	    PTR_ERR(pwrseq->ext_clk) != -ENOENT) {
-		ret = PTR_ERR(pwrseq->ext_clk);
-		goto free;
-	}
+	pwrseq->ext_clk = devm_clk_get(dev, "ext_clock");
+	if (IS_ERR(pwrseq->ext_clk) && PTR_ERR(pwrseq->ext_clk) != -ENOENT)
+		return PTR_ERR(pwrseq->ext_clk);
 
-	pwrseq->reset_gpios = gpiod_get_array(dev, "reset", GPIOD_OUT_HIGH);
+	pwrseq->reset_gpios = devm_gpiod_get_array(dev, "reset",
+							GPIOD_OUT_HIGH);
 	if (IS_ERR(pwrseq->reset_gpios) &&
 	    PTR_ERR(pwrseq->reset_gpios) != -ENOENT &&
 	    PTR_ERR(pwrseq->reset_gpios) != -ENOSYS) {
-		ret = PTR_ERR(pwrseq->reset_gpios);
-		goto clk_put;
+		return PTR_ERR(pwrseq->reset_gpios);
 	}
 
+	pwrseq->pwrseq.dev = dev;
 	pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops;
+	pwrseq->pwrseq.owner = THIS_MODULE;
+	platform_set_drvdata(pdev, pwrseq);
 
-	return &pwrseq->pwrseq;
-clk_put:
-	if (!IS_ERR(pwrseq->ext_clk))
-		clk_put(pwrseq->ext_clk);
-free:
-	kfree(pwrseq);
-	return ERR_PTR(ret);
+	return mmc_pwrseq_register(&pwrseq->pwrseq);
 }
+
+static int mmc_pwrseq_simple_remove(struct platform_device *pdev)
+{
+	struct mmc_pwrseq_simple *pwrseq = platform_get_drvdata(pdev);
+
+	mmc_pwrseq_unregister(&pwrseq->pwrseq);
+
+	return 0;
+}
+
+static struct platform_driver mmc_pwrseq_simple_driver = {
+	.probe = mmc_pwrseq_simple_probe,
+	.remove = mmc_pwrseq_simple_remove,
+	.driver = {
+		.name = "pwrseq_simple",
+		.of_match_table = mmc_pwrseq_simple_of_match,
+	},
+};
+
+module_platform_driver(mmc_pwrseq_simple_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index 6f6fc52..dcb3dee 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -177,8 +177,13 @@
 	vsn = func->card->cccr.sdio_vsn;
 	min_size = (vsn == SDIO_SDIO_REV_1_00) ? 28 : 42;
 
-	if (size < min_size)
+	if (size == 28 && vsn == SDIO_SDIO_REV_1_10) {
+		pr_warn("%s: card has broken SDIO 1.1 CIS, forcing SDIO 1.0\n",
+			mmc_hostname(card->host));
+		vsn = SDIO_SDIO_REV_1_00;
+	} else if (size < min_size) {
 		return -EINVAL;
+	}
 
 	/* TPLFE_MAX_BLK_SIZE */
 	func->max_blksize = buf[12] | (buf[13] << 8);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 04feea8..0aa484c 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -97,6 +97,7 @@
 config MMC_SDHCI_ACPI
 	tristate "SDHCI support for ACPI enumerated SDHCI controllers"
 	depends on MMC_SDHCI && ACPI
+	select IOSF_MBI if X86
 	help
 	  This selects support for ACPI enumerated SDHCI controllers,
 	  identified by ACPI Compatibility ID PNP0D40 or specific
@@ -676,9 +677,9 @@
 	depends on HAS_DMA
 	depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
 	help
-	  This selects the MMC Host Interface controller (MMCIF).
+	  This selects the MMC Host Interface controller (MMCIF) found in various
+	  Renesas SoCs for SH and ARM architectures.
 
-	  This driver supports MMCIF in sh7724/sh7757/sh7372.
 
 config MMC_JZ4740
 	tristate "JZ4740 SD/Multimedia Card Interface support"
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 9268c41..0ad8ef5 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -1410,8 +1410,6 @@
 	WARN_ON(slot->mrq);
 	dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
 
-	pm_runtime_get_sync(&host->pdev->dev);
-
 	/*
 	 * We may "know" the card is gone even though there's still an
 	 * electrical connection. If so, we really need to communicate
@@ -1442,8 +1440,6 @@
 	struct atmel_mci	*host = slot->host;
 	unsigned int		i;
 
-	pm_runtime_get_sync(&host->pdev->dev);
-
 	slot->sdc_reg &= ~ATMCI_SDCBUS_MASK;
 	switch (ios->bus_width) {
 	case MMC_BUS_WIDTH_1:
@@ -1576,8 +1572,6 @@
 		break;
 	}
 
-	pm_runtime_mark_last_busy(&host->pdev->dev);
-	pm_runtime_put_autosuspend(&host->pdev->dev);
 }
 
 static int atmci_get_ro(struct mmc_host *mmc)
@@ -1669,9 +1663,6 @@
 	spin_unlock(&host->lock);
 	mmc_request_done(prev_mmc, mrq);
 	spin_lock(&host->lock);
-
-	pm_runtime_mark_last_busy(&host->pdev->dev);
-	pm_runtime_put_autosuspend(&host->pdev->dev);
 }
 
 static void atmci_command_complete(struct atmel_mci *host,
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 693144e..a56373c 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -32,12 +32,10 @@
 #include <linux/delay.h>
 #include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
-#include <linux/edma.h>
 #include <linux/mmc/mmc.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
 
-#include <linux/platform_data/edma.h>
 #include <linux/platform_data/mmc-davinci.h>
 
 /*
@@ -202,7 +200,6 @@
 	u32 buffer_bytes_left;
 	u32 bytes_left;
 
-	u32 rxdma, txdma;
 	struct dma_chan *dma_tx;
 	struct dma_chan *dma_rx;
 	bool use_dma;
@@ -513,35 +510,20 @@
 
 static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
 {
-	int r;
-	dma_cap_mask_t mask;
-
-	dma_cap_zero(mask);
-	dma_cap_set(DMA_SLAVE, mask);
-
-	host->dma_tx =
-		dma_request_slave_channel_compat(mask, edma_filter_fn,
-				&host->txdma, mmc_dev(host->mmc), "tx");
-	if (!host->dma_tx) {
+	host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
+	if (IS_ERR(host->dma_tx)) {
 		dev_err(mmc_dev(host->mmc), "Can't get dma_tx channel\n");
-		return -ENODEV;
+		return PTR_ERR(host->dma_tx);
 	}
 
-	host->dma_rx =
-		dma_request_slave_channel_compat(mask, edma_filter_fn,
-				&host->rxdma, mmc_dev(host->mmc), "rx");
-	if (!host->dma_rx) {
+	host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
+	if (IS_ERR(host->dma_rx)) {
 		dev_err(mmc_dev(host->mmc), "Can't get dma_rx channel\n");
-		r = -ENODEV;
-		goto free_master_write;
+		dma_release_channel(host->dma_tx);
+		return PTR_ERR(host->dma_rx);
 	}
 
 	return 0;
-
-free_master_write:
-	dma_release_channel(host->dma_tx);
-
-	return r;
 }
 
 /*----------------------------------------------------------------------*/
@@ -1223,7 +1205,7 @@
 	struct mmc_davinci_host *host = NULL;
 	struct mmc_host *mmc = NULL;
 	struct resource *r, *mem = NULL;
-	int ret = 0, irq = 0;
+	int ret, irq;
 	size_t mem_size;
 	const struct platform_device_id *id_entry;
 
@@ -1233,50 +1215,40 @@
 		return -ENOENT;
 	}
 
-	ret = -ENODEV;
 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	irq = platform_get_irq(pdev, 0);
 	if (!r || irq == NO_IRQ)
-		goto out;
+		return -ENODEV;
 
-	ret = -EBUSY;
 	mem_size = resource_size(r);
-	mem = request_mem_region(r->start, mem_size, pdev->name);
+	mem = devm_request_mem_region(&pdev->dev, r->start, mem_size,
+				      pdev->name);
 	if (!mem)
-		goto out;
+		return -EBUSY;
 
-	ret = -ENOMEM;
 	mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev);
 	if (!mmc)
-		goto out;
+		return -ENOMEM;
 
 	host = mmc_priv(mmc);
 	host->mmc = mmc;	/* Important */
 
-	r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-	if (!r)
-		dev_warn(&pdev->dev, "RX DMA resource not specified\n");
-	else
-		host->rxdma = r->start;
-
-	r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
-	if (!r)
-		dev_warn(&pdev->dev, "TX DMA resource not specified\n");
-	else
-		host->txdma = r->start;
-
 	host->mem_res = mem;
-	host->base = ioremap(mem->start, mem_size);
-	if (!host->base)
-		goto out;
+	host->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
+	if (!host->base) {
+		ret = -ENOMEM;
+		goto ioremap_fail;
+	}
 
-	ret = -ENXIO;
-	host->clk = clk_get(&pdev->dev, "MMCSDCLK");
+	host->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(host->clk)) {
 		ret = PTR_ERR(host->clk);
-		goto out;
+		goto clk_get_fail;
 	}
-	clk_enable(host->clk);
+	ret = clk_prepare_enable(host->clk);
+	if (ret)
+		goto clk_prepare_enable_fail;
+
 	host->mmc_input_clk = clk_get_rate(host->clk);
 
 	init_mmcsd_host(host);
@@ -1291,8 +1263,13 @@
 	host->mmc_irq = irq;
 	host->sdio_irq = platform_get_irq(pdev, 1);
 
-	if (host->use_dma && davinci_acquire_dma_channels(host) != 0)
-		host->use_dma = 0;
+	if (host->use_dma) {
+		ret = davinci_acquire_dma_channels(host);
+		if (ret == -EPROBE_DEFER)
+			goto dma_probe_defer;
+		else if (ret)
+			host->use_dma = 0;
+	}
 
 	/* REVISIT:  someday, support IRQ-driven card detection.  */
 	mmc->caps |= MMC_CAP_NEEDS_POLL;
@@ -1346,15 +1323,17 @@
 
 	ret = mmc_add_host(mmc);
 	if (ret < 0)
-		goto out;
+		goto mmc_add_host_fail;
 
-	ret = request_irq(irq, mmc_davinci_irq, 0, mmc_hostname(mmc), host);
+	ret = devm_request_irq(&pdev->dev, irq, mmc_davinci_irq, 0,
+			       mmc_hostname(mmc), host);
 	if (ret)
-		goto out;
+		goto request_irq_fail;
 
 	if (host->sdio_irq >= 0) {
-		ret = request_irq(host->sdio_irq, mmc_davinci_sdio_irq, 0,
-				  mmc_hostname(mmc), host);
+		ret = devm_request_irq(&pdev->dev, host->sdio_irq,
+				       mmc_davinci_sdio_irq, 0,
+				       mmc_hostname(mmc), host);
 		if (!ret)
 			mmc->caps |= MMC_CAP_SDIO_IRQ;
 	}
@@ -1367,28 +1346,18 @@
 
 	return 0;
 
-out:
+request_irq_fail:
+	mmc_remove_host(mmc);
+mmc_add_host_fail:
 	mmc_davinci_cpufreq_deregister(host);
 cpu_freq_fail:
-	if (host) {
-		davinci_release_dma_channels(host);
-
-		if (host->clk) {
-			clk_disable(host->clk);
-			clk_put(host->clk);
-		}
-
-		if (host->base)
-			iounmap(host->base);
-	}
-
-	if (mmc)
-		mmc_free_host(mmc);
-
-	if (mem)
-		release_resource(mem);
-
-	dev_dbg(&pdev->dev, "probe err %d\n", ret);
+	davinci_release_dma_channels(host);
+dma_probe_defer:
+	clk_disable_unprepare(host->clk);
+clk_prepare_enable_fail:
+clk_get_fail:
+ioremap_fail:
+	mmc_free_host(mmc);
 
 	return ret;
 }
@@ -1397,25 +1366,11 @@
 {
 	struct mmc_davinci_host *host = platform_get_drvdata(pdev);
 
-	if (host) {
-		mmc_davinci_cpufreq_deregister(host);
-
-		mmc_remove_host(host->mmc);
-		free_irq(host->mmc_irq, host);
-		if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
-			free_irq(host->sdio_irq, host);
-
-		davinci_release_dma_channels(host);
-
-		clk_disable(host->clk);
-		clk_put(host->clk);
-
-		iounmap(host->base);
-
-		release_resource(host->mem_res);
-
-		mmc_free_host(host->mmc);
-	}
+	mmc_remove_host(host->mmc);
+	mmc_davinci_cpufreq_deregister(host);
+	davinci_release_dma_channels(host);
+	clk_disable_unprepare(host->clk);
+	mmc_free_host(host->mmc);
 
 	return 0;
 }
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 8790f2a..7e3a324 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -91,10 +91,14 @@
 		return SDMMC_CLKSEL_GET_DIV(mci_readl(host, CLKSEL)) + 1;
 }
 
-static int dw_mci_exynos_priv_init(struct dw_mci *host)
+static void dw_mci_exynos_config_smu(struct dw_mci *host)
 {
 	struct dw_mci_exynos_priv_data *priv = host->priv;
 
+	/*
+	 * If Exynos is provided the Security management,
+	 * set for non-ecryption mode at this time.
+	 */
 	if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420_SMU ||
 		priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) {
 		mci_writel(host, MPSBEGIN0, 0);
@@ -104,6 +108,13 @@
 			   SDMMC_MPSCTRL_VALID |
 			   SDMMC_MPSCTRL_NON_SECURE_WRITE_BIT);
 	}
+}
+
+static int dw_mci_exynos_priv_init(struct dw_mci *host)
+{
+	struct dw_mci_exynos_priv_data *priv = host->priv;
+
+	dw_mci_exynos_config_smu(host);
 
 	if (priv->ctrl_type >= DW_MCI_TYPE_EXYNOS5420) {
 		priv->saved_strobe_ctrl = mci_readl(host, HS400_DLINE_CTRL);
@@ -115,13 +126,6 @@
 				DQS_CTRL_GET_RD_DELAY(priv->saved_strobe_ctrl);
 	}
 
-	return 0;
-}
-
-static int dw_mci_exynos_setup_clock(struct dw_mci *host)
-{
-	struct dw_mci_exynos_priv_data *priv = host->priv;
-
 	host->bus_hz /= (priv->ciu_div + 1);
 
 	return 0;
@@ -169,7 +173,7 @@
 {
 	struct dw_mci *host = dev_get_drvdata(dev);
 
-	dw_mci_exynos_priv_init(host);
+	dw_mci_exynos_config_smu(host);
 	return dw_mci_resume(host);
 }
 
@@ -489,7 +493,6 @@
 static const struct dw_mci_drv_data exynos_drv_data = {
 	.caps			= exynos_dwmmc_caps,
 	.init			= dw_mci_exynos_priv_init,
-	.setup_clock		= dw_mci_exynos_setup_clock,
 	.set_ios		= dw_mci_exynos_set_ios,
 	.parse_dt		= dw_mci_exynos_parse_dt,
 	.execute_tuning		= dw_mci_exynos_execute_tuning,
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index 84e50f3..8c20b81 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -26,13 +26,6 @@
 	int			default_sample_phase;
 };
 
-static int dw_mci_rk3288_setup_clock(struct dw_mci *host)
-{
-	host->bus_hz /= RK3288_CLKGEN_DIV;
-
-	return 0;
-}
-
 static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
 {
 	struct dw_mci_rockchip_priv_data *priv = host->priv;
@@ -231,18 +224,30 @@
 	/* It needs this quirk on all Rockchip SoCs */
 	host->pdata->quirks |= DW_MCI_QUIRK_BROKEN_DTO;
 
+	if (of_device_is_compatible(host->dev->of_node,
+				    "rockchip,rk3288-dw-mshc"))
+		host->bus_hz /= RK3288_CLKGEN_DIV;
+
 	return 0;
 }
 
+/* Common capabilities of RK3288 SoC */
+static unsigned long dw_mci_rk3288_dwmmc_caps[4] = {
+	MMC_CAP_ERASE,
+	MMC_CAP_ERASE,
+	MMC_CAP_ERASE,
+	MMC_CAP_ERASE,
+};
+
 static const struct dw_mci_drv_data rk2928_drv_data = {
 	.init			= dw_mci_rockchip_init,
 };
 
 static const struct dw_mci_drv_data rk3288_drv_data = {
+	.caps			= dw_mci_rk3288_dwmmc_caps,
 	.set_ios		= dw_mci_rk3288_set_ios,
 	.execute_tuning		= dw_mci_rk3288_execute_tuning,
 	.parse_dt		= dw_mci_rk3288_parse_dt,
-	.setup_clock    = dw_mci_rk3288_setup_clock,
 	.init			= dw_mci_rockchip_init,
 };
 
@@ -269,33 +274,13 @@
 	return dw_mci_pltfm_register(pdev, drv_data);
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int dw_mci_rockchip_suspend(struct device *dev)
-{
-	struct dw_mci *host = dev_get_drvdata(dev);
-
-	return dw_mci_suspend(host);
-}
-
-static int dw_mci_rockchip_resume(struct device *dev)
-{
-	struct dw_mci *host = dev_get_drvdata(dev);
-
-	return dw_mci_resume(host);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(dw_mci_rockchip_pmops,
-			 dw_mci_rockchip_suspend,
-			 dw_mci_rockchip_resume);
-
 static struct platform_driver dw_mci_rockchip_pltfm_driver = {
 	.probe		= dw_mci_rockchip_probe,
 	.remove		= dw_mci_pltfm_remove,
 	.driver		= {
 		.name		= "dwmmc_rockchip",
 		.of_match_table	= dw_mci_rockchip_match,
-		.pm		= &dw_mci_rockchip_pmops,
+		.pm		= &dw_mci_pltfm_pmops,
 	},
 };
 
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 242f9a0..9dd1bd3 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -680,7 +680,7 @@
 
 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
 {
-	dmaengine_terminate_all(host->dms->ch);
+	dmaengine_terminate_async(host->dms->ch);
 }
 
 static int dw_mci_edmac_start_dma(struct dw_mci *host,
@@ -3003,15 +3003,6 @@
 		}
 	}
 
-	if (drv_data && drv_data->setup_clock) {
-		ret = drv_data->setup_clock(host);
-		if (ret) {
-			dev_err(host->dev,
-				"implementation specific clock setup failed\n");
-			goto err_clk_ciu;
-		}
-	}
-
 	setup_timer(&host->cmd11_timer,
 		    dw_mci_cmd11_timer, (unsigned long)host);
 
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 68d5da2..1e8d838 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -277,7 +277,6 @@
  * dw_mci driver data - dw-mshc implementation specific driver data.
  * @caps: mmc subsystem specified capabilities of the controller(s).
  * @init: early implementation specific initialization.
- * @setup_clock: implementation specific clock configuration.
  * @set_ios: handle bus specific extensions.
  * @parse_dt: parse implementation specific device tree properties.
  * @execute_tuning: implementation specific tuning procedure.
@@ -289,7 +288,6 @@
 struct dw_mci_drv_data {
 	unsigned long	*caps;
 	int		(*init)(struct dw_mci *host);
-	int		(*setup_clock)(struct dw_mci *host);
 	void		(*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
 	int		(*parse_dt)(struct dw_mci *host);
 	int		(*execute_tuning)(struct dw_mci_slot *slot, u32 opcode);
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 2e6c968..df990bb 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -226,16 +226,11 @@
 	unsigned long flags;
 	int busy = 0;
 
-	pm_runtime_get_sync(mmc_dev(mmc));
-
 	spin_lock_irqsave(&host->lock, flags);
 	if (readl(host->base + MMCISTATUS) & MCI_ST_CARDBUSY)
 		busy = 1;
 	spin_unlock_irqrestore(&host->lock, flags);
 
-	pm_runtime_mark_last_busy(mmc_dev(mmc));
-	pm_runtime_put_autosuspend(mmc_dev(mmc));
-
 	return busy;
 }
 
@@ -381,9 +376,6 @@
 	host->cmd = NULL;
 
 	mmc_request_done(host->mmc, mrq);
-
-	pm_runtime_mark_last_busy(mmc_dev(host->mmc));
-	pm_runtime_put_autosuspend(mmc_dev(host->mmc));
 }
 
 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
@@ -1290,8 +1282,6 @@
 		return;
 	}
 
-	pm_runtime_get_sync(mmc_dev(mmc));
-
 	spin_lock_irqsave(&host->lock, flags);
 
 	host->mrq = mrq;
@@ -1318,8 +1308,6 @@
 	unsigned long flags;
 	int ret;
 
-	pm_runtime_get_sync(mmc_dev(mmc));
-
 	if (host->plat->ios_handler &&
 		host->plat->ios_handler(mmc_dev(mmc), ios))
 			dev_err(mmc_dev(mmc), "platform ios_handler failed\n");
@@ -1414,9 +1402,6 @@
 	mmci_reg_delay(host);
 
 	spin_unlock_irqrestore(&host->lock, flags);
-
-	pm_runtime_mark_last_busy(mmc_dev(mmc));
-	pm_runtime_put_autosuspend(mmc_dev(mmc));
 }
 
 static int mmci_get_cd(struct mmc_host *mmc)
@@ -1440,8 +1425,6 @@
 
 	if (!IS_ERR(mmc->supply.vqmmc)) {
 
-		pm_runtime_get_sync(mmc_dev(mmc));
-
 		switch (ios->signal_voltage) {
 		case MMC_SIGNAL_VOLTAGE_330:
 			ret = regulator_set_voltage(mmc->supply.vqmmc,
@@ -1459,9 +1442,6 @@
 
 		if (ret)
 			dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
-
-		pm_runtime_mark_last_busy(mmc_dev(mmc));
-		pm_runtime_put_autosuspend(mmc_dev(mmc));
 	}
 
 	return ret;
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index b17f30d..5642f71 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -736,9 +736,6 @@
 	if (mrq->data)
 		msdc_unprepare_data(host, mrq);
 	mmc_request_done(host->mmc, mrq);
-
-	pm_runtime_mark_last_busy(host->dev);
-	pm_runtime_put_autosuspend(host->dev);
 }
 
 /* returns true if command is fully handled; returns false otherwise */
@@ -886,8 +883,6 @@
 	WARN_ON(host->mrq);
 	host->mrq = mrq;
 
-	pm_runtime_get_sync(host->dev);
-
 	if (mrq->data)
 		msdc_prepare_data(host, mrq);
 
@@ -1201,8 +1196,6 @@
 	struct msdc_host *host = mmc_priv(mmc);
 	int ret;
 
-	pm_runtime_get_sync(host->dev);
-
 	msdc_set_buswidth(host, ios->bus_width);
 
 	/* Suspend/Resume will do power off/on */
@@ -1214,7 +1207,7 @@
 					ios->vdd);
 			if (ret) {
 				dev_err(host->dev, "Failed to set vmmc power!\n");
-				goto end;
+				return;
 			}
 		}
 		break;
@@ -1242,10 +1235,6 @@
 
 	if (host->mclk != ios->clock || host->timing != ios->timing)
 		msdc_set_mclk(host, ios->timing, ios->clock);
-
-end:
-	pm_runtime_mark_last_busy(host->dev);
-	pm_runtime_put_autosuspend(host->dev);
 }
 
 static u32 test_delay_bit(u32 delay, u32 bit)
@@ -1408,19 +1397,15 @@
 	struct msdc_host *host = mmc_priv(mmc);
 	int ret;
 
-	pm_runtime_get_sync(host->dev);
 	ret = msdc_tune_response(mmc, opcode);
 	if (ret == -EIO) {
 		dev_err(host->dev, "Tune response fail!\n");
-		goto out;
+		return ret;
 	}
 	ret = msdc_tune_data(mmc, opcode);
 	if (ret == -EIO)
 		dev_err(host->dev, "Tune data fail!\n");
 
-out:
-	pm_runtime_mark_last_busy(host->dev);
-	pm_runtime_put_autosuspend(host->dev);
 	return ret;
 }
 
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index b9958a1..f23d65e 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -23,7 +23,6 @@
 #include <linux/spinlock.h>
 #include <linux/timer.h>
 #include <linux/of.h>
-#include <linux/omap-dma.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/card.h>
 #include <linux/mmc/mmc.h>
@@ -1321,8 +1320,6 @@
 	struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
 	struct mmc_omap_host *host = NULL;
 	struct resource *res;
-	dma_cap_mask_t mask;
-	unsigned sig = 0;
 	int i, ret = 0;
 	int irq;
 
@@ -1382,29 +1379,34 @@
 		goto err_free_iclk;
 	}
 
-	dma_cap_zero(mask);
-	dma_cap_set(DMA_SLAVE, mask);
-
 	host->dma_tx_burst = -1;
 	host->dma_rx_burst = -1;
 
-	res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
-	if (res)
-		sig = res->start;
-	host->dma_tx = dma_request_slave_channel_compat(mask,
-				omap_dma_filter_fn, &sig, &pdev->dev, "tx");
-	if (!host->dma_tx)
-		dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n",
-			sig);
+	host->dma_tx = dma_request_chan(&pdev->dev, "tx");
+	if (IS_ERR(host->dma_tx)) {
+		ret = PTR_ERR(host->dma_tx);
+		if (ret == -EPROBE_DEFER) {
+			clk_put(host->fclk);
+			goto err_free_iclk;
+		}
 
-	res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
-	if (res)
-		sig = res->start;
-	host->dma_rx = dma_request_slave_channel_compat(mask,
-				omap_dma_filter_fn, &sig, &pdev->dev, "rx");
-	if (!host->dma_rx)
-		dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n",
-			sig);
+		host->dma_tx = NULL;
+		dev_warn(host->dev, "TX DMA channel request failed\n");
+	}
+
+	host->dma_rx = dma_request_chan(&pdev->dev, "rx");
+	if (IS_ERR(host->dma_rx)) {
+		ret = PTR_ERR(host->dma_rx);
+		if (ret == -EPROBE_DEFER) {
+			if (host->dma_tx)
+				dma_release_channel(host->dma_tx);
+			clk_put(host->fclk);
+			goto err_free_iclk;
+		}
+
+		host->dma_rx = NULL;
+		dev_warn(host->dev, "RX DMA channel request failed\n");
+	}
 
 	ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
 	if (ret)
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index f9ac3bb..24ebc9a 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -32,7 +32,6 @@
 #include <linux/of_irq.h>
 #include <linux/of_gpio.h>
 #include <linux/of_device.h>
-#include <linux/omap-dmaengine.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/core.h>
 #include <linux/mmc/mmc.h>
@@ -351,15 +350,14 @@
 	return 0;
 }
 
-static int omap_hsmmc_set_power(struct device *dev, int power_on, int vdd)
+static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
+				int vdd)
 {
-	struct omap_hsmmc_host *host =
-		platform_get_drvdata(to_platform_device(dev));
 	struct mmc_host *mmc = host->mmc;
 	int ret = 0;
 
 	if (mmc_pdata(host)->set_power)
-		return mmc_pdata(host)->set_power(dev, power_on, vdd);
+		return mmc_pdata(host)->set_power(host->dev, power_on, vdd);
 
 	/*
 	 * If we don't see a Vcc regulator, assume it's a fixed
@@ -369,7 +367,7 @@
 		return 0;
 
 	if (mmc_pdata(host)->before_set_reg)
-		mmc_pdata(host)->before_set_reg(dev, power_on, vdd);
+		mmc_pdata(host)->before_set_reg(host->dev, power_on, vdd);
 
 	ret = omap_hsmmc_set_pbias(host, false, 0);
 	if (ret)
@@ -403,7 +401,7 @@
 	}
 
 	if (mmc_pdata(host)->after_set_reg)
-		mmc_pdata(host)->after_set_reg(dev, power_on, vdd);
+		mmc_pdata(host)->after_set_reg(host->dev, power_on, vdd);
 
 	return 0;
 
@@ -968,8 +966,6 @@
 		return;
 	host->mrq = NULL;
 	mmc_request_done(host->mmc, mrq);
-	pm_runtime_mark_last_busy(host->dev);
-	pm_runtime_put_autosuspend(host->dev);
 }
 
 /*
@@ -1250,17 +1246,15 @@
 	int ret;
 
 	/* Disable the clocks */
-	pm_runtime_put_sync(host->dev);
 	if (host->dbclk)
 		clk_disable_unprepare(host->dbclk);
 
 	/* Turn the power off */
-	ret = omap_hsmmc_set_power(host->dev, 0, 0);
+	ret = omap_hsmmc_set_power(host, 0, 0);
 
 	/* Turn the power ON with given VDD 1.8 or 3.0v */
 	if (!ret)
-		ret = omap_hsmmc_set_power(host->dev, 1, vdd);
-	pm_runtime_get_sync(host->dev);
+		ret = omap_hsmmc_set_power(host, 1, vdd);
 	if (host->dbclk)
 		clk_prepare_enable(host->dbclk);
 
@@ -1368,8 +1362,6 @@
 
 		host->mrq = NULL;
 		mmc_request_done(host->mmc, mrq);
-		pm_runtime_mark_last_busy(host->dev);
-		pm_runtime_put_autosuspend(host->dev);
 	}
 }
 
@@ -1602,7 +1594,6 @@
 
 	BUG_ON(host->req_in_progress);
 	BUG_ON(host->dma_ch != -1);
-	pm_runtime_get_sync(host->dev);
 	if (host->protect_card) {
 		if (host->reqs_blocked < 3) {
 			/*
@@ -1619,8 +1610,6 @@
 			req->data->error = -EBADF;
 		req->cmd->retries = 0;
 		mmc_request_done(mmc, req);
-		pm_runtime_mark_last_busy(host->dev);
-		pm_runtime_put_autosuspend(host->dev);
 		return;
 	} else if (host->reqs_blocked)
 		host->reqs_blocked = 0;
@@ -1634,8 +1623,6 @@
 			req->data->error = err;
 		host->mrq = NULL;
 		mmc_request_done(mmc, req);
-		pm_runtime_mark_last_busy(host->dev);
-		pm_runtime_put_autosuspend(host->dev);
 		return;
 	}
 	if (req->sbc && !(host->flags & AUTO_CMD23)) {
@@ -1653,15 +1640,13 @@
 	struct omap_hsmmc_host *host = mmc_priv(mmc);
 	int do_send_init_stream = 0;
 
-	pm_runtime_get_sync(host->dev);
-
 	if (ios->power_mode != host->power_mode) {
 		switch (ios->power_mode) {
 		case MMC_POWER_OFF:
-			omap_hsmmc_set_power(host->dev, 0, 0);
+			omap_hsmmc_set_power(host, 0, 0);
 			break;
 		case MMC_POWER_UP:
-			omap_hsmmc_set_power(host->dev, 1, ios->vdd);
+			omap_hsmmc_set_power(host, 1, ios->vdd);
 			break;
 		case MMC_POWER_ON:
 			do_send_init_stream = 1;
@@ -1698,8 +1683,6 @@
 		send_init_stream(host);
 
 	omap_hsmmc_set_bus_mode(host);
-
-	pm_runtime_put_autosuspend(host->dev);
 }
 
 static int omap_hsmmc_get_cd(struct mmc_host *mmc)
@@ -1962,13 +1945,17 @@
 
 static struct omap_hsmmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
 {
-	struct omap_hsmmc_platform_data *pdata;
+	struct omap_hsmmc_platform_data *pdata, *legacy;
 	struct device_node *np = dev->of_node;
 
 	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
 		return ERR_PTR(-ENOMEM); /* out of memory */
 
+	legacy = dev_get_platdata(dev);
+	if (legacy && legacy->name)
+		pdata->name = legacy->name;
+
 	if (of_find_property(np, "ti,dual-volt", NULL))
 		pdata->controller_flags |= OMAP_HSMMC_SUPPORTS_DUAL_VOLT;
 
@@ -2005,8 +1992,6 @@
 	struct resource *res;
 	int ret, irq;
 	const struct of_device_id *match;
-	dma_cap_mask_t mask;
-	unsigned tx_req, rx_req;
 	const struct omap_mmc_of_data *data;
 	void __iomem *base;
 
@@ -2136,44 +2121,17 @@
 
 	omap_hsmmc_conf_bus_power(host);
 
-	if (!pdev->dev.of_node) {
-		res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
-		if (!res) {
-			dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
-			ret = -ENXIO;
-			goto err_irq;
-		}
-		tx_req = res->start;
-
-		res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
-		if (!res) {
-			dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
-			ret = -ENXIO;
-			goto err_irq;
-		}
-		rx_req = res->start;
-	}
-
-	dma_cap_zero(mask);
-	dma_cap_set(DMA_SLAVE, mask);
-
-	host->rx_chan =
-		dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
-						 &rx_req, &pdev->dev, "rx");
-
-	if (!host->rx_chan) {
-		dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel\n");
-		ret = -ENXIO;
+	host->rx_chan = dma_request_chan(&pdev->dev, "rx");
+	if (IS_ERR(host->rx_chan)) {
+		dev_err(mmc_dev(host->mmc), "RX DMA channel request failed\n");
+		ret = PTR_ERR(host->rx_chan);
 		goto err_irq;
 	}
 
-	host->tx_chan =
-		dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
-						 &tx_req, &pdev->dev, "tx");
-
-	if (!host->tx_chan) {
-		dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel\n");
-		ret = -ENXIO;
+	host->tx_chan = dma_request_chan(&pdev->dev, "tx");
+	if (IS_ERR(host->tx_chan)) {
+		dev_err(mmc_dev(host->mmc), "TX DMA channel request failed\n");
+		ret = PTR_ERR(host->tx_chan);
 		goto err_irq;
 	}
 
@@ -2231,9 +2189,9 @@
 	mmc_remove_host(mmc);
 err_irq:
 	device_init_wakeup(&pdev->dev, false);
-	if (host->tx_chan)
+	if (!IS_ERR_OR_NULL(host->tx_chan))
 		dma_release_channel(host->tx_chan);
-	if (host->rx_chan)
+	if (!IS_ERR_OR_NULL(host->rx_chan))
 		dma_release_channel(host->rx_chan);
 	pm_runtime_dont_use_autosuspend(host->dev);
 	pm_runtime_put_sync(host->dev);
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 6839e41..b2d70ba 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -41,6 +41,11 @@
 #include <linux/mmc/pm.h>
 #include <linux/mmc/slot-gpio.h>
 
+#ifdef CONFIG_X86
+#include <asm/cpu_device_id.h>
+#include <asm/iosf_mbi.h>
+#endif
+
 #include "sdhci.h"
 
 enum {
@@ -116,6 +121,75 @@
 	.ops = &sdhci_acpi_ops_int,
 };
 
+#ifdef CONFIG_X86
+
+static bool sdhci_acpi_byt(void)
+{
+	static const struct x86_cpu_id byt[] = {
+		{ X86_VENDOR_INTEL, 6, 0x37 },
+		{}
+	};
+
+	return x86_match_cpu(byt);
+}
+
+#define BYT_IOSF_SCCEP			0x63
+#define BYT_IOSF_OCP_NETCTRL0		0x1078
+#define BYT_IOSF_OCP_TIMEOUT_BASE	GENMASK(10, 8)
+
+static void sdhci_acpi_byt_setting(struct device *dev)
+{
+	u32 val = 0;
+
+	if (!sdhci_acpi_byt())
+		return;
+
+	if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
+			  &val)) {
+		dev_err(dev, "%s read error\n", __func__);
+		return;
+	}
+
+	if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
+		return;
+
+	val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
+
+	if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
+			   val)) {
+		dev_err(dev, "%s write error\n", __func__);
+		return;
+	}
+
+	dev_dbg(dev, "%s completed\n", __func__);
+}
+
+static bool sdhci_acpi_byt_defer(struct device *dev)
+{
+	if (!sdhci_acpi_byt())
+		return false;
+
+	if (!iosf_mbi_available())
+		return true;
+
+	sdhci_acpi_byt_setting(dev);
+
+	return false;
+}
+
+#else
+
+static inline void sdhci_acpi_byt_setting(struct device *dev)
+{
+}
+
+static inline bool sdhci_acpi_byt_defer(struct device *dev)
+{
+	return false;
+}
+
+#endif
+
 static int bxt_get_cd(struct mmc_host *mmc)
 {
 	int gpio_cd = mmc_gpio_get_cd(mmc);
@@ -126,8 +200,6 @@
 	if (!gpio_cd)
 		return 0;
 
-	pm_runtime_get_sync(mmc->parent);
-
 	spin_lock_irqsave(&host->lock, flags);
 
 	if (host->flags & SDHCI_DEVICE_DEAD)
@@ -137,9 +209,6 @@
 out:
 	spin_unlock_irqrestore(&host->lock, flags);
 
-	pm_runtime_mark_last_busy(mmc->parent);
-	pm_runtime_put_autosuspend(mmc->parent);
-
 	return ret;
 }
 
@@ -193,8 +262,10 @@
 
 	/* Platform specific code during sd probe slot goes here */
 
-	if (hid && !strcmp(hid, "80865ACA"))
+	if (hid && !strcmp(hid, "80865ACA")) {
 		host->mmc_host_ops.get_cd = bxt_get_cd;
+		host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
+	}
 
 	return 0;
 }
@@ -322,6 +393,9 @@
 	if (acpi_bus_get_status(device) || !device->status.present)
 		return -ENODEV;
 
+	if (sdhci_acpi_byt_defer(dev))
+		return -EPROBE_DEFER;
+
 	hid = acpi_device_hid(device);
 	uid = device->pnp.unique_id;
 
@@ -447,6 +521,8 @@
 {
 	struct sdhci_acpi_host *c = dev_get_drvdata(dev);
 
+	sdhci_acpi_byt_setting(&c->pdev->dev);
+
 	return sdhci_resume_host(c->host);
 }
 
@@ -470,6 +546,8 @@
 {
 	struct sdhci_acpi_host *c = dev_get_drvdata(dev);
 
+	sdhci_acpi_byt_setting(&c->pdev->dev);
+
 	return sdhci_runtime_resume_host(c->host);
 }
 
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 2e482b1..b6f4c1d 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -55,8 +55,32 @@
 	return freq;
 }
 
+static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+	bool ctrl_phy = false;
+
+	if (clock > MMC_HIGH_52_MAX_DTR && (!IS_ERR(sdhci_arasan->phy)))
+		ctrl_phy = true;
+
+	if (ctrl_phy) {
+		spin_unlock_irq(&host->lock);
+		phy_power_off(sdhci_arasan->phy);
+		spin_lock_irq(&host->lock);
+	}
+
+	sdhci_set_clock(host, clock);
+
+	if (ctrl_phy) {
+		spin_unlock_irq(&host->lock);
+		phy_power_on(sdhci_arasan->phy);
+		spin_lock_irq(&host->lock);
+	}
+}
+
 static struct sdhci_ops sdhci_arasan_ops = {
-	.set_clock = sdhci_set_clock,
+	.set_clock = sdhci_arasan_set_clock,
 	.get_max_clock = sdhci_pltfm_clk_get_max_clock,
 	.get_timeout_clock = sdhci_arasan_get_timeout_clock,
 	.set_bus_width = sdhci_set_bus_width,
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 2703aa9..25f779e 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -15,8 +15,10 @@
  */
 
 #include <linux/clk.h>
+#include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/io.h>
+#include <linux/kernel.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/slot-gpio.h>
 #include <linux/module.h>
@@ -31,14 +33,60 @@
 #define		SDMMC_CACR_CAPWREN	BIT(0)
 #define		SDMMC_CACR_KEY		(0x46 << 8)
 
+#define SDHCI_AT91_PRESET_COMMON_CONF	0x400 /* drv type B, programmable clock mode */
+
 struct sdhci_at91_priv {
 	struct clk *hclock;
 	struct clk *gck;
 	struct clk *mainck;
 };
 
+static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+	u16 clk;
+	unsigned long timeout;
+
+	host->mmc->actual_clock = 0;
+
+	/*
+	 * There is no requirement to disable the internal clock before
+	 * changing the SD clock configuration. Moreover, disabling the
+	 * internal clock, changing the configuration and re-enabling the
+	 * internal clock causes some bugs. It can prevent to get the internal
+	 * clock stable flag ready and an unexpected switch to the base clock
+	 * when using presets.
+	 */
+	clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+	clk &= SDHCI_CLOCK_INT_EN;
+	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+	if (clock == 0)
+		return;
+
+	clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
+
+	clk |= SDHCI_CLOCK_INT_EN;
+	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+	/* Wait max 20 ms */
+	timeout = 20;
+	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
+		& SDHCI_CLOCK_INT_STABLE)) {
+		if (timeout == 0) {
+			pr_err("%s: Internal clock never stabilised.\n",
+			       mmc_hostname(host->mmc));
+			return;
+		}
+		timeout--;
+		mdelay(1);
+	}
+
+	clk |= SDHCI_CLOCK_CARD_EN;
+	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+}
+
 static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
-	.set_clock		= sdhci_set_clock,
+	.set_clock		= sdhci_at91_set_clock,
 	.set_bus_width		= sdhci_set_bus_width,
 	.reset			= sdhci_reset,
 	.set_uhs_signaling	= sdhci_set_uhs_signaling,
@@ -46,7 +94,6 @@
 
 static const struct sdhci_pltfm_data soc_data_sama5d2 = {
 	.ops = &sdhci_at91_sama5d2_ops,
-	.quirks2 = SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST,
 };
 
 static const struct of_device_id sdhci_at91_dt_match[] = {
@@ -119,6 +166,7 @@
 	unsigned int			clk_base, clk_mul;
 	unsigned int			gck_rate, real_gck_rate;
 	int				ret;
+	unsigned int			preset_div;
 
 	match = of_match_device(sdhci_at91_dt_match, &pdev->dev);
 	if (!match)
@@ -186,6 +234,28 @@
 			 clk_mul, real_gck_rate);
 	}
 
+	/*
+	 * We have to set preset values because it depends on the clk_mul
+	 * value. Moreover, SDR104 is supported in a degraded mode since the
+	 * maximum sd clock value is 120 MHz instead of 208 MHz. For that
+	 * reason, we need to use presets to support SDR104.
+	 */
+	preset_div = DIV_ROUND_UP(real_gck_rate, 24000000) - 1;
+	writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
+	       host->ioaddr + SDHCI_PRESET_FOR_SDR12);
+	preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1;
+	writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
+	       host->ioaddr + SDHCI_PRESET_FOR_SDR25);
+	preset_div = DIV_ROUND_UP(real_gck_rate, 100000000) - 1;
+	writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
+	       host->ioaddr + SDHCI_PRESET_FOR_SDR50);
+	preset_div = DIV_ROUND_UP(real_gck_rate, 120000000) - 1;
+	writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
+	       host->ioaddr + SDHCI_PRESET_FOR_SDR104);
+	preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1;
+	writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
+	       host->ioaddr + SDHCI_PRESET_FOR_DDR50);
+
 	clk_prepare_enable(priv->mainck);
 	clk_prepare_enable(priv->gck);
 
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 62aa5d0..97d4eeb 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -340,8 +340,6 @@
 	if (!gpio_cd)
 		return 0;
 
-	pm_runtime_get_sync(mmc->parent);
-
 	spin_lock_irqsave(&host->lock, flags);
 
 	if (host->flags & SDHCI_DEVICE_DEAD)
@@ -351,9 +349,6 @@
 out:
 	spin_unlock_irqrestore(&host->lock, flags);
 
-	pm_runtime_mark_last_busy(mmc->parent);
-	pm_runtime_put_autosuspend(mmc->parent);
-
 	return ret;
 }
 
@@ -390,8 +385,11 @@
 	slot->cd_idx = 0;
 	slot->cd_override_level = true;
 	if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
-	    slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
+	    slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
+	    slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD) {
 		slot->host->mmc_host_ops.get_cd = bxt_get_cd;
+		slot->host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
+	}
 
 	return 0;
 }
@@ -1173,6 +1171,30 @@
 
 	{
 		.vendor		= PCI_VENDOR_ID_INTEL,
+		.device		= PCI_DEVICE_ID_INTEL_BXTM_EMMC,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_emmc,
+	},
+
+	{
+		.vendor		= PCI_VENDOR_ID_INTEL,
+		.device		= PCI_DEVICE_ID_INTEL_BXTM_SDIO,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sdio,
+	},
+
+	{
+		.vendor		= PCI_VENDOR_ID_INTEL,
+		.device		= PCI_DEVICE_ID_INTEL_BXTM_SD,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sd,
+	},
+
+	{
+		.vendor		= PCI_VENDOR_ID_INTEL,
 		.device		= PCI_DEVICE_ID_INTEL_APL_EMMC,
 		.subvendor	= PCI_ANY_ID,
 		.subdevice	= PCI_ANY_ID,
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index d1a0b4d..89e7151 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -28,6 +28,9 @@
 #define PCI_DEVICE_ID_INTEL_BXT_SD	0x0aca
 #define PCI_DEVICE_ID_INTEL_BXT_EMMC	0x0acc
 #define PCI_DEVICE_ID_INTEL_BXT_SDIO	0x0ad0
+#define PCI_DEVICE_ID_INTEL_BXTM_SD	0x1aca
+#define PCI_DEVICE_ID_INTEL_BXTM_EMMC	0x1acc
+#define PCI_DEVICE_ID_INTEL_BXTM_SDIO	0x1ad0
 #define PCI_DEVICE_ID_INTEL_APL_SD	0x5aca
 #define PCI_DEVICE_ID_INTEL_APL_EMMC	0x5acc
 #define PCI_DEVICE_ID_INTEL_APL_SDIO	0x5ad0
diff --git a/drivers/mmc/host/sdhci-pic32.c b/drivers/mmc/host/sdhci-pic32.c
index 059df70..72c13b6 100644
--- a/drivers/mmc/host/sdhci-pic32.c
+++ b/drivers/mmc/host/sdhci-pic32.c
@@ -243,7 +243,6 @@
 static struct platform_driver pic32_sdhci_driver = {
 	.driver = {
 		.name	= "pic32-sdhci",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(pic32_sdhci_id_table),
 	},
 	.probe		= pic32_sdhci_probe,
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 072bb27..64f287a 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -119,16 +119,22 @@
 {
 	struct sdhci_host *host;
 	struct resource *iomem;
-	int ret;
+	void __iomem *ioaddr;
+	int irq, ret;
 
 	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!iomem) {
-		ret = -ENOMEM;
+	ioaddr = devm_ioremap_resource(&pdev->dev, iomem);
+	if (IS_ERR(ioaddr)) {
+		ret = PTR_ERR(ioaddr);
 		goto err;
 	}
 
-	if (resource_size(iomem) < 0x100)
-		dev_err(&pdev->dev, "Invalid iomem size!\n");
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "failed to get IRQ number\n");
+		ret = irq;
+		goto err;
+	}
 
 	host = sdhci_alloc_host(&pdev->dev,
 		sizeof(struct sdhci_pltfm_host) + priv_size);
@@ -138,6 +144,8 @@
 		goto err;
 	}
 
+	host->ioaddr = ioaddr;
+	host->irq = irq;
 	host->hw_name = dev_name(&pdev->dev);
 	if (pdata && pdata->ops)
 		host->ops = pdata->ops;
@@ -148,22 +156,6 @@
 		host->quirks2 = pdata->quirks2;
 	}
 
-	host->irq = platform_get_irq(pdev, 0);
-
-	if (!request_mem_region(iomem->start, resource_size(iomem),
-		mmc_hostname(host->mmc))) {
-		dev_err(&pdev->dev, "cannot request region\n");
-		ret = -EBUSY;
-		goto err_request;
-	}
-
-	host->ioaddr = ioremap(iomem->start, resource_size(iomem));
-	if (!host->ioaddr) {
-		dev_err(&pdev->dev, "failed to remap registers\n");
-		ret = -ENOMEM;
-		goto err_remap;
-	}
-
 	/*
 	 * Some platforms need to probe the controller to be able to
 	 * determine which caps should be used.
@@ -174,11 +166,6 @@
 	platform_set_drvdata(pdev, host);
 
 	return host;
-
-err_remap:
-	release_mem_region(iomem->start, resource_size(iomem));
-err_request:
-	sdhci_free_host(host);
 err:
 	dev_err(&pdev->dev, "%s failed %d\n", __func__, ret);
 	return ERR_PTR(ret);
@@ -188,10 +175,7 @@
 void sdhci_pltfm_free(struct platform_device *pdev)
 {
 	struct sdhci_host *host = platform_get_drvdata(pdev);
-	struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
-	iounmap(host->ioaddr);
-	release_mem_region(iomem->start, resource_size(iomem));
 	sdhci_free_host(host);
 }
 EXPORT_SYMBOL_GPL(sdhci_pltfm_free);
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index aca439d..3013250 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -309,8 +309,30 @@
 		__func__, uhs, ctrl_2);
 }
 
+static void pxav3_set_power(struct sdhci_host *host, unsigned char mode,
+			    unsigned short vdd)
+{
+	struct mmc_host *mmc = host->mmc;
+	u8 pwr = host->pwr;
+
+	sdhci_set_power(host, mode, vdd);
+
+	if (host->pwr == pwr)
+		return;
+
+	if (host->pwr == 0)
+		vdd = 0;
+
+	if (!IS_ERR(mmc->supply.vmmc)) {
+		spin_unlock_irq(&host->lock);
+		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+		spin_lock_irq(&host->lock);
+	}
+}
+
 static const struct sdhci_ops pxav3_sdhci_ops = {
 	.set_clock = sdhci_set_clock,
+	.set_power = pxav3_set_power,
 	.platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
 	.get_max_clock = sdhci_pltfm_clk_get_max_clock,
 	.set_bus_width = sdhci_set_bus_width,
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index f8c4762..bcc0de4 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -382,14 +382,6 @@
 	.pdata = &sdhci_tegra114_pdata,
 };
 
-static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
-	.pdata = &sdhci_tegra114_pdata,
-	.nvquirks = NVQUIRK_ENABLE_SDR50 |
-		    NVQUIRK_ENABLE_DDR50 |
-		    NVQUIRK_ENABLE_SDR104 |
-		    NVQUIRK_HAS_PADCALIB,
-};
-
 static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
 		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
@@ -407,7 +399,7 @@
 
 static const struct of_device_id sdhci_tegra_dt_match[] = {
 	{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
-	{ .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
+	{ .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra114 },
 	{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
 	{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
 	{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 8670f16..e010ea4 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -38,11 +38,6 @@
 #define DBG(f, x...) \
 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
 
-#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
-	defined(CONFIG_MMC_SDHCI_MODULE))
-#define SDHCI_USE_LEDS_CLASS
-#endif
-
 #define MAX_TUNING_LOOP 40
 
 static unsigned int debug_quirks = 0;
@@ -53,29 +48,7 @@
 static void sdhci_finish_command(struct sdhci_host *);
 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
-static int sdhci_do_get_cd(struct sdhci_host *host);
-
-#ifdef CONFIG_PM
-static int sdhci_runtime_pm_get(struct sdhci_host *host);
-static int sdhci_runtime_pm_put(struct sdhci_host *host);
-static void sdhci_runtime_pm_bus_on(struct sdhci_host *host);
-static void sdhci_runtime_pm_bus_off(struct sdhci_host *host);
-#else
-static inline int sdhci_runtime_pm_get(struct sdhci_host *host)
-{
-	return 0;
-}
-static inline int sdhci_runtime_pm_put(struct sdhci_host *host)
-{
-	return 0;
-}
-static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
-{
-}
-static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
-{
-}
-#endif
+static int sdhci_get_cd(struct mmc_host *mmc);
 
 static void sdhci_dumpregs(struct sdhci_host *host)
 {
@@ -171,6 +144,22 @@
 	sdhci_set_card_detection(host, false);
 }
 
+static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
+{
+	if (host->bus_on)
+		return;
+	host->bus_on = true;
+	pm_runtime_get_noresume(host->mmc->parent);
+}
+
+static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
+{
+	if (!host->bus_on)
+		return;
+	host->bus_on = false;
+	pm_runtime_put_noidle(host->mmc->parent);
+}
+
 void sdhci_reset(struct sdhci_host *host, u8 mask)
 {
 	unsigned long timeout;
@@ -204,7 +193,7 @@
 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
 {
 	if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
-		if (!sdhci_do_get_cd(host))
+		if (!sdhci_get_cd(host->mmc))
 			return;
 	}
 
@@ -252,7 +241,7 @@
 	sdhci_enable_card_detection(host);
 }
 
-static void sdhci_activate_led(struct sdhci_host *host)
+static void __sdhci_led_activate(struct sdhci_host *host)
 {
 	u8 ctrl;
 
@@ -261,7 +250,7 @@
 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
 }
 
-static void sdhci_deactivate_led(struct sdhci_host *host)
+static void __sdhci_led_deactivate(struct sdhci_host *host)
 {
 	u8 ctrl;
 
@@ -270,9 +259,9 @@
 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
 }
 
-#ifdef SDHCI_USE_LEDS_CLASS
+#if IS_REACHABLE(CONFIG_LEDS_CLASS)
 static void sdhci_led_control(struct led_classdev *led,
-	enum led_brightness brightness)
+			      enum led_brightness brightness)
 {
 	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
 	unsigned long flags;
@@ -283,12 +272,62 @@
 		goto out;
 
 	if (brightness == LED_OFF)
-		sdhci_deactivate_led(host);
+		__sdhci_led_deactivate(host);
 	else
-		sdhci_activate_led(host);
+		__sdhci_led_activate(host);
 out:
 	spin_unlock_irqrestore(&host->lock, flags);
 }
+
+static int sdhci_led_register(struct sdhci_host *host)
+{
+	struct mmc_host *mmc = host->mmc;
+
+	snprintf(host->led_name, sizeof(host->led_name),
+		 "%s::", mmc_hostname(mmc));
+
+	host->led.name = host->led_name;
+	host->led.brightness = LED_OFF;
+	host->led.default_trigger = mmc_hostname(mmc);
+	host->led.brightness_set = sdhci_led_control;
+
+	return led_classdev_register(mmc_dev(mmc), &host->led);
+}
+
+static void sdhci_led_unregister(struct sdhci_host *host)
+{
+	led_classdev_unregister(&host->led);
+}
+
+static inline void sdhci_led_activate(struct sdhci_host *host)
+{
+}
+
+static inline void sdhci_led_deactivate(struct sdhci_host *host)
+{
+}
+
+#else
+
+static inline int sdhci_led_register(struct sdhci_host *host)
+{
+	return 0;
+}
+
+static inline void sdhci_led_unregister(struct sdhci_host *host)
+{
+}
+
+static inline void sdhci_led_activate(struct sdhci_host *host)
+{
+	__sdhci_led_activate(host);
+}
+
+static inline void sdhci_led_deactivate(struct sdhci_host *host)
+{
+	__sdhci_led_deactivate(host);
+}
+
 #endif
 
 /*****************************************************************************\
@@ -1091,23 +1130,14 @@
 	return preset;
 }
 
-void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
+		   unsigned int *actual_clock)
 {
 	int div = 0; /* Initialized for compiler warning */
 	int real_div = div, clk_mul = 1;
 	u16 clk = 0;
-	unsigned long timeout;
 	bool switch_base_clk = false;
 
-	host->mmc->actual_clock = 0;
-
-	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
-	if (host->quirks2 & SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST)
-		mdelay(1);
-
-	if (clock == 0)
-		return;
-
 	if (host->version >= SDHCI_SPEC_300) {
 		if (host->preset_enabled) {
 			u16 pre_val;
@@ -1184,10 +1214,29 @@
 
 clock_set:
 	if (real_div)
-		host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div;
+		*actual_clock = (host->max_clk * clk_mul) / real_div;
 	clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
 	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
 		<< SDHCI_DIVIDER_HI_SHIFT;
+
+	return clk;
+}
+EXPORT_SYMBOL_GPL(sdhci_calc_clk);
+
+void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+	u16 clk;
+	unsigned long timeout;
+
+	host->mmc->actual_clock = 0;
+
+	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+
+	if (clock == 0)
+		return;
+
+	clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
+
 	clk |= SDHCI_CLOCK_INT_EN;
 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
 
@@ -1210,10 +1259,24 @@
 }
 EXPORT_SYMBOL_GPL(sdhci_set_clock);
 
-static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
-			    unsigned short vdd)
+static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
+				unsigned short vdd)
 {
 	struct mmc_host *mmc = host->mmc;
+
+	spin_unlock_irq(&host->lock);
+	mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+	spin_lock_irq(&host->lock);
+
+	if (mode != MMC_POWER_OFF)
+		sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
+	else
+		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+}
+
+void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+		     unsigned short vdd)
+{
 	u8 pwr = 0;
 
 	if (mode != MMC_POWER_OFF) {
@@ -1245,7 +1308,6 @@
 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
 			sdhci_runtime_pm_bus_off(host);
-		vdd = 0;
 	} else {
 		/*
 		 * Spec says that we should clear the power reg before setting
@@ -1276,12 +1338,20 @@
 		if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
 			mdelay(10);
 	}
+}
+EXPORT_SYMBOL_GPL(sdhci_set_power);
 
-	if (!IS_ERR(mmc->supply.vmmc)) {
-		spin_unlock_irq(&host->lock);
-		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
-		spin_lock_irq(&host->lock);
-	}
+static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+			      unsigned short vdd)
+{
+	struct mmc_host *mmc = host->mmc;
+
+	if (host->ops->set_power)
+		host->ops->set_power(host, mode, vdd);
+	else if (!IS_ERR(mmc->supply.vmmc))
+		sdhci_set_power_reg(host, mode, vdd);
+	else
+		sdhci_set_power(host, mode, vdd);
 }
 
 /*****************************************************************************\
@@ -1298,8 +1368,6 @@
 
 	host = mmc_priv(mmc);
 
-	sdhci_runtime_pm_get(host);
-
 	/* Firstly check card presence */
 	present = mmc->ops->get_cd(mmc);
 
@@ -1307,9 +1375,7 @@
 
 	WARN_ON(host->mrq != NULL);
 
-#ifndef SDHCI_USE_LEDS_CLASS
-	sdhci_activate_led(host);
-#endif
+	sdhci_led_activate(host);
 
 	/*
 	 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
@@ -1384,11 +1450,11 @@
 }
 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
 
-static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
+static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 {
+	struct sdhci_host *host = mmc_priv(mmc);
 	unsigned long flags;
 	u8 ctrl;
-	struct mmc_host *mmc = host->mmc;
 
 	spin_lock_irqsave(&host->lock, flags);
 
@@ -1431,7 +1497,7 @@
 		}
 	}
 
-	sdhci_set_power(host, ios->power_mode, ios->vdd);
+	__sdhci_set_power(host, ios->power_mode, ios->vdd);
 
 	if (host->ops->platform_send_init_74_clocks)
 		host->ops->platform_send_init_74_clocks(host, ios->power_mode);
@@ -1542,18 +1608,10 @@
 	spin_unlock_irqrestore(&host->lock, flags);
 }
 
-static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+static int sdhci_get_cd(struct mmc_host *mmc)
 {
 	struct sdhci_host *host = mmc_priv(mmc);
-
-	sdhci_runtime_pm_get(host);
-	sdhci_do_set_ios(host, ios);
-	sdhci_runtime_pm_put(host);
-}
-
-static int sdhci_do_get_cd(struct sdhci_host *host)
-{
-	int gpio_cd = mmc_gpio_get_cd(host->mmc);
+	int gpio_cd = mmc_gpio_get_cd(mmc);
 
 	if (host->flags & SDHCI_DEVICE_DEAD)
 		return 0;
@@ -1577,17 +1635,6 @@
 	return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
 }
 
-static int sdhci_get_cd(struct mmc_host *mmc)
-{
-	struct sdhci_host *host = mmc_priv(mmc);
-	int ret;
-
-	sdhci_runtime_pm_get(host);
-	ret = sdhci_do_get_cd(host);
-	sdhci_runtime_pm_put(host);
-	return ret;
-}
-
 static int sdhci_check_ro(struct sdhci_host *host)
 {
 	unsigned long flags;
@@ -1612,8 +1659,9 @@
 
 #define SAMPLE_COUNT	5
 
-static int sdhci_do_get_ro(struct sdhci_host *host)
+static int sdhci_get_ro(struct mmc_host *mmc)
 {
+	struct sdhci_host *host = mmc_priv(mmc);
 	int i, ro_count;
 
 	if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
@@ -1638,17 +1686,6 @@
 		host->ops->hw_reset(host);
 }
 
-static int sdhci_get_ro(struct mmc_host *mmc)
-{
-	struct sdhci_host *host = mmc_priv(mmc);
-	int ret;
-
-	sdhci_runtime_pm_get(host);
-	ret = sdhci_do_get_ro(host);
-	sdhci_runtime_pm_put(host);
-	return ret;
-}
-
 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
 {
 	if (!(host->flags & SDHCI_DEVICE_DEAD)) {
@@ -1668,8 +1705,6 @@
 	struct sdhci_host *host = mmc_priv(mmc);
 	unsigned long flags;
 
-	sdhci_runtime_pm_get(host);
-
 	spin_lock_irqsave(&host->lock, flags);
 	if (enable)
 		host->flags |= SDHCI_SDIO_IRQ_ENABLED;
@@ -1678,14 +1713,12 @@
 
 	sdhci_enable_sdio_irq_nolock(host, enable);
 	spin_unlock_irqrestore(&host->lock, flags);
-
-	sdhci_runtime_pm_put(host);
 }
 
-static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
-						struct mmc_ios *ios)
+static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
+					     struct mmc_ios *ios)
 {
-	struct mmc_host *mmc = host->mmc;
+	struct sdhci_host *host = mmc_priv(mmc);
 	u16 ctrl;
 	int ret;
 
@@ -1773,29 +1806,13 @@
 	}
 }
 
-static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
-	struct mmc_ios *ios)
-{
-	struct sdhci_host *host = mmc_priv(mmc);
-	int err;
-
-	if (host->version < SDHCI_SPEC_300)
-		return 0;
-	sdhci_runtime_pm_get(host);
-	err = sdhci_do_start_signal_voltage_switch(host, ios);
-	sdhci_runtime_pm_put(host);
-	return err;
-}
-
 static int sdhci_card_busy(struct mmc_host *mmc)
 {
 	struct sdhci_host *host = mmc_priv(mmc);
 	u32 present_state;
 
-	sdhci_runtime_pm_get(host);
 	/* Check whether DAT[3:0] is 0000 */
 	present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
-	sdhci_runtime_pm_put(host);
 
 	return !(present_state & SDHCI_DATA_LVL_MASK);
 }
@@ -1822,7 +1839,6 @@
 	unsigned int tuning_count = 0;
 	bool hs400_tuning;
 
-	sdhci_runtime_pm_get(host);
 	spin_lock_irqsave(&host->lock, flags);
 
 	hs400_tuning = host->flags & SDHCI_HS400_TUNING;
@@ -1858,8 +1874,7 @@
 		break;
 
 	case MMC_TIMING_UHS_SDR50:
-		if (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
-		    host->flags & SDHCI_SDR104_NEEDS_TUNING)
+		if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
 			break;
 		/* FALLTHROUGH */
 
@@ -1870,7 +1885,6 @@
 	if (host->ops->platform_execute_tuning) {
 		spin_unlock_irqrestore(&host->lock, flags);
 		err = host->ops->platform_execute_tuning(host, opcode);
-		sdhci_runtime_pm_put(host);
 		return err;
 	}
 
@@ -2002,8 +2016,6 @@
 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
 out_unlock:
 	spin_unlock_irqrestore(&host->lock, flags);
-	sdhci_runtime_pm_put(host);
-
 	return err;
 }
 
@@ -2084,7 +2096,7 @@
 	if (host->ops->card_event)
 		host->ops->card_event(host);
 
-	present = sdhci_do_get_cd(host);
+	present = sdhci_get_cd(host->mmc);
 
 	spin_lock_irqsave(&host->lock, flags);
 
@@ -2193,15 +2205,12 @@
 	host->cmd = NULL;
 	host->data = NULL;
 
-#ifndef SDHCI_USE_LEDS_CLASS
-	sdhci_deactivate_led(host);
-#endif
+	sdhci_led_deactivate(host);
 
 	mmiowb();
 	spin_unlock_irqrestore(&host->lock, flags);
 
 	mmc_request_done(host->mmc, mrq);
-	sdhci_runtime_pm_put(host);
 }
 
 static void sdhci_timeout_timer(unsigned long data)
@@ -2658,7 +2667,7 @@
 		sdhci_init(host, 0);
 		host->pwr = 0;
 		host->clock = 0;
-		sdhci_do_set_ios(host, &host->mmc->ios);
+		sdhci_set_ios(host->mmc, &host->mmc->ios);
 	} else {
 		sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
 		mmiowb();
@@ -2682,33 +2691,6 @@
 
 EXPORT_SYMBOL_GPL(sdhci_resume_host);
 
-static int sdhci_runtime_pm_get(struct sdhci_host *host)
-{
-	return pm_runtime_get_sync(host->mmc->parent);
-}
-
-static int sdhci_runtime_pm_put(struct sdhci_host *host)
-{
-	pm_runtime_mark_last_busy(host->mmc->parent);
-	return pm_runtime_put_autosuspend(host->mmc->parent);
-}
-
-static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
-{
-	if (host->bus_on)
-		return;
-	host->bus_on = true;
-	pm_runtime_get_noresume(host->mmc->parent);
-}
-
-static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
-{
-	if (!host->bus_on)
-		return;
-	host->bus_on = false;
-	pm_runtime_put_noidle(host->mmc->parent);
-}
-
 int sdhci_runtime_suspend_host(struct sdhci_host *host)
 {
 	unsigned long flags;
@@ -2747,8 +2729,8 @@
 	/* Force clock and power re-program */
 	host->pwr = 0;
 	host->clock = 0;
-	sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
-	sdhci_do_set_ios(host, &host->mmc->ios);
+	sdhci_start_signal_voltage_switch(host->mmc, &host->mmc->ios);
+	sdhci_set_ios(host->mmc, &host->mmc->ios);
 
 	if ((host_flags & SDHCI_PV_ENABLED) &&
 		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
@@ -2993,7 +2975,8 @@
 		if (!host->ops->get_max_clock) {
 			pr_err("%s: Hardware doesn't specify base clock frequency.\n",
 			       mmc_hostname(mmc));
-			return -ENODEV;
+			ret = -ENODEV;
+			goto undma;
 		}
 		host->max_clk = host->ops->get_max_clock(host);
 	}
@@ -3030,7 +3013,7 @@
 	} else
 		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
 
-	if (!mmc->f_max || (mmc->f_max && (mmc->f_max > max_clk)))
+	if (!mmc->f_max || mmc->f_max > max_clk)
 		mmc->f_max = max_clk;
 
 	if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
@@ -3043,7 +3026,8 @@
 			} else {
 				pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
 					mmc_hostname(mmc));
-				return -ENODEV;
+				ret = -ENODEV;
+				goto undma;
 			}
 		}
 
@@ -3097,8 +3081,9 @@
 		mmc->caps |= MMC_CAP_NEEDS_POLL;
 
 	/* If there are external regulators, get them */
-	if (mmc_regulator_get_supply(mmc) == -EPROBE_DEFER)
-		return -EPROBE_DEFER;
+	ret = mmc_regulator_get_supply(mmc);
+	if (ret == -EPROBE_DEFER)
+		goto undma;
 
 	/* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
 	if (!IS_ERR(mmc->supply.vqmmc)) {
@@ -3153,10 +3138,6 @@
 	if (caps[1] & SDHCI_USE_SDR50_TUNING)
 		host->flags |= SDHCI_SDR50_NEEDS_TUNING;
 
-	/* Does the host need tuning for SDR104 / HS200? */
-	if (mmc->caps2 & MMC_CAP2_HS200)
-		host->flags |= SDHCI_SDR104_NEEDS_TUNING;
-
 	/* Driver Type(s) (A, C, D) supported by the host */
 	if (caps[1] & SDHCI_DRIVER_TYPE_A)
 		mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
@@ -3255,7 +3236,8 @@
 	if (mmc->ocr_avail == 0) {
 		pr_err("%s: Hardware doesn't report any support voltages.\n",
 		       mmc_hostname(mmc));
-		return -ENODEV;
+		ret = -ENODEV;
+		goto unreg;
 	}
 
 	spin_lock_init(&host->lock);
@@ -3339,25 +3321,18 @@
 	sdhci_dumpregs(host);
 #endif
 
-#ifdef SDHCI_USE_LEDS_CLASS
-	snprintf(host->led_name, sizeof(host->led_name),
-		"%s::", mmc_hostname(mmc));
-	host->led.name = host->led_name;
-	host->led.brightness = LED_OFF;
-	host->led.default_trigger = mmc_hostname(mmc);
-	host->led.brightness_set = sdhci_led_control;
-
-	ret = led_classdev_register(mmc_dev(mmc), &host->led);
+	ret = sdhci_led_register(host);
 	if (ret) {
 		pr_err("%s: Failed to register LED device: %d\n",
 		       mmc_hostname(mmc), ret);
-		goto reset;
+		goto unirq;
 	}
-#endif
 
 	mmiowb();
 
-	mmc_add_host(mmc);
+	ret = mmc_add_host(mmc);
+	if (ret)
+		goto unled;
 
 	pr_info("%s: SDHCI controller on %s [%s] using %s\n",
 		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
@@ -3369,15 +3344,25 @@
 
 	return 0;
 
-#ifdef SDHCI_USE_LEDS_CLASS
-reset:
+unled:
+	sdhci_led_unregister(host);
+unirq:
 	sdhci_do_reset(host, SDHCI_RESET_ALL);
 	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
 	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
 	free_irq(host->irq, host);
-#endif
 untasklet:
 	tasklet_kill(&host->finish_tasklet);
+unreg:
+	if (!IS_ERR(mmc->supply.vqmmc))
+		regulator_disable(mmc->supply.vqmmc);
+undma:
+	if (host->align_buffer)
+		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
+				  host->adma_table_sz, host->align_buffer,
+				  host->align_addr);
+	host->adma_table = NULL;
+	host->align_buffer = NULL;
 
 	return ret;
 }
@@ -3409,9 +3394,7 @@
 
 	mmc_remove_host(mmc);
 
-#ifdef SDHCI_USE_LEDS_CLASS
-	led_classdev_unregister(&host->led);
-#endif
+	sdhci_led_unregister(host);
 
 	if (!dead)
 		sdhci_do_reset(host, SDHCI_RESET_ALL);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 3bd2803..609f87c 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -417,11 +417,6 @@
 #define SDHCI_QUIRK2_ACMD23_BROKEN			(1<<14)
 /* Broken Clock divider zero in controller */
 #define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN		(1<<15)
-/*
- * When internal clock is disabled, a delay is needed before modifying the
- * SD clock frequency or enabling back the internal clock.
- */
-#define SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST	(1<<16)
 
 	int irq;		/* Device IRQ */
 	void __iomem *ioaddr;	/* Mapped address */
@@ -433,7 +428,7 @@
 	struct mmc_host_ops mmc_host_ops;	/* MMC host ops */
 	u64 dma_mask;		/* custom DMA mask */
 
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
 	struct led_classdev led;	/* LED control */
 	char led_name[32];
 #endif
@@ -450,7 +445,6 @@
 #define SDHCI_AUTO_CMD23	(1<<7)	/* Auto CMD23 support */
 #define SDHCI_PV_ENABLED	(1<<8)	/* Preset value enabled */
 #define SDHCI_SDIO_IRQ_ENABLED	(1<<9)	/* SDIO irq enabled */
-#define SDHCI_SDR104_NEEDS_TUNING (1<<10)	/* SDR104/HS200 needs tuning */
 #define SDHCI_USE_64_BIT_DMA	(1<<12)	/* Use 64-bit DMA */
 #define SDHCI_HS400_TUNING	(1<<13)	/* Tuning for HS400 */
 
@@ -529,6 +523,8 @@
 #endif
 
 	void	(*set_clock)(struct sdhci_host *host, unsigned int clock);
+	void	(*set_power)(struct sdhci_host *host, unsigned char mode,
+			     unsigned short vdd);
 
 	int		(*enable_dma)(struct sdhci_host *host);
 	unsigned int	(*get_max_clock)(struct sdhci_host *host);
@@ -659,7 +655,11 @@
 	return !!(host->flags & SDHCI_SDIO_IRQ_ENABLED);
 }
 
+u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
+		   unsigned int *actual_clock);
 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
+void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+		     unsigned short vdd);
 void sdhci_set_bus_width(struct sdhci_host *host, int width);
 void sdhci_reset(struct sdhci_host *host, u8 mask);
 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 8d870ce..dd64b86 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -248,7 +248,6 @@
 	int sg_idx;
 	int sg_blkidx;
 	bool power;
-	bool card_present;
 	bool ccs_enable;		/* Command Completion Signal support */
 	bool clk_ctrl2_enable;
 	struct mutex thread_lock;
@@ -1064,16 +1063,6 @@
 		host->mmc->f_max, host->mmc->f_min);
 }
 
-static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios)
-{
-	struct mmc_host *mmc = host->mmc;
-
-	if (!IS_ERR(mmc->supply.vmmc))
-		/* Errors ignored... */
-		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
-				      ios->power_mode ? ios->vdd : 0);
-}
-
 static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 {
 	struct sh_mmcif_host *host = mmc_priv(mmc);
@@ -1091,42 +1080,32 @@
 	host->state = STATE_IOS;
 	spin_unlock_irqrestore(&host->lock, flags);
 
-	if (ios->power_mode == MMC_POWER_UP) {
-		if (!host->card_present) {
-			/* See if we also get DMA */
-			sh_mmcif_request_dma(host);
-			host->card_present = true;
-		}
-		sh_mmcif_set_power(host, ios);
-	} else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
-		/* clock stop */
-		sh_mmcif_clock_control(host, 0);
-		if (ios->power_mode == MMC_POWER_OFF) {
-			if (host->card_present) {
-				sh_mmcif_release_dma(host);
-				host->card_present = false;
-			}
-		}
-		if (host->power) {
-			pm_runtime_put_sync(dev);
-			clk_disable_unprepare(host->clk);
-			host->power = false;
-			if (ios->power_mode == MMC_POWER_OFF)
-				sh_mmcif_set_power(host, ios);
-		}
-		host->state = STATE_IDLE;
-		return;
-	}
-
-	if (ios->clock) {
+	switch (ios->power_mode) {
+	case MMC_POWER_UP:
+		if (!IS_ERR(mmc->supply.vmmc))
+			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
 		if (!host->power) {
 			clk_prepare_enable(host->clk);
-
 			pm_runtime_get_sync(dev);
-			host->power = true;
 			sh_mmcif_sync_reset(host);
+			sh_mmcif_request_dma(host);
+			host->power = true;
 		}
+		break;
+	case MMC_POWER_OFF:
+		if (!IS_ERR(mmc->supply.vmmc))
+			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+		if (host->power) {
+			sh_mmcif_clock_control(host, 0);
+			sh_mmcif_release_dma(host);
+			pm_runtime_put(dev);
+			clk_disable_unprepare(host->clk);
+			host->power = false;
+		}
+		break;
+	case MMC_POWER_ON:
 		sh_mmcif_clock_control(host, ios->clock);
+		break;
 	}
 
 	host->timing = ios->timing;
@@ -1513,29 +1492,29 @@
 		mmc->caps |= pd->caps;
 	mmc->max_segs = 32;
 	mmc->max_blk_size = 512;
-	mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
+	mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
 	mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
 	mmc->max_seg_size = mmc->max_req_size;
 
 	platform_set_drvdata(pdev, host);
 
-	pm_runtime_enable(dev);
-	host->power = false;
-
 	host->clk = devm_clk_get(dev, NULL);
 	if (IS_ERR(host->clk)) {
 		ret = PTR_ERR(host->clk);
 		dev_err(dev, "cannot get clock: %d\n", ret);
-		goto err_pm;
+		goto err_host;
 	}
 
 	ret = clk_prepare_enable(host->clk);
 	if (ret < 0)
-		goto err_pm;
+		goto err_host;
 
 	sh_mmcif_clk_setup(host);
 
-	ret = pm_runtime_resume(dev);
+	pm_runtime_enable(dev);
+	host->power = false;
+
+	ret = pm_runtime_get_sync(dev);
 	if (ret < 0)
 		goto err_clk;
 
@@ -1579,12 +1558,13 @@
 		 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff,
 		 clk_get_rate(host->clk) / 1000000UL);
 
+	pm_runtime_put(dev);
 	clk_disable_unprepare(host->clk);
 	return ret;
 
 err_clk:
 	clk_disable_unprepare(host->clk);
-err_pm:
+	pm_runtime_put_sync(dev);
 	pm_runtime_disable(dev);
 err_host:
 	mmc_free_host(mmc);
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 9aa1479..f750f94 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -28,10 +28,12 @@
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/mmc/host.h>
-#include <linux/mmc/sh_mobile_sdhi.h>
 #include <linux/mfd/tmio.h>
 #include <linux/sh_dma.h>
 #include <linux/delay.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/pinctrl-state.h>
+#include <linux/regulator/consumer.h>
 
 #include "tmio_mmc.h"
 
@@ -48,10 +50,8 @@
 	unsigned bus_shift;
 };
 
-static const struct sh_mobile_sdhi_of_data sh_mobile_sdhi_of_cfg[] = {
-	{
-		.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
-	},
+static const struct sh_mobile_sdhi_of_data of_default_cfg = {
+	.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
 };
 
 static const struct sh_mobile_sdhi_of_data of_rcar_gen1_compatible = {
@@ -62,7 +62,7 @@
 
 static const struct sh_mobile_sdhi_of_data of_rcar_gen2_compatible = {
 	.tmio_flags	= TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
-			  TMIO_MMC_CLK_ACTUAL | TMIO_MMC_FAST_CLK_CHG,
+			  TMIO_MMC_CLK_ACTUAL | TMIO_MMC_MIN_RCAR2,
 	.capabilities	= MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
 	.dma_buswidth	= DMA_SLAVE_BUSWIDTH_4_BYTES,
 	.dma_rx_offset	= 0x2000,
@@ -70,17 +70,16 @@
 
 static const struct sh_mobile_sdhi_of_data of_rcar_gen3_compatible = {
 	.tmio_flags	= TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
-			  TMIO_MMC_CLK_ACTUAL | TMIO_MMC_FAST_CLK_CHG,
-	.capabilities	= MMC_CAP_SD_HIGHSPEED,
+			  TMIO_MMC_CLK_ACTUAL | TMIO_MMC_MIN_RCAR2,
+	.capabilities	= MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
 	.bus_shift	= 2,
 };
 
 static const struct of_device_id sh_mobile_sdhi_of_match[] = {
 	{ .compatible = "renesas,sdhi-shmobile" },
-	{ .compatible = "renesas,sdhi-sh7372" },
-	{ .compatible = "renesas,sdhi-sh73a0", .data = &sh_mobile_sdhi_of_cfg[0], },
-	{ .compatible = "renesas,sdhi-r8a73a4", .data = &sh_mobile_sdhi_of_cfg[0], },
-	{ .compatible = "renesas,sdhi-r8a7740", .data = &sh_mobile_sdhi_of_cfg[0], },
+	{ .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, },
+	{ .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, },
+	{ .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, },
 	{ .compatible = "renesas,sdhi-r8a7778", .data = &of_rcar_gen1_compatible, },
 	{ .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, },
 	{ .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, },
@@ -97,6 +96,8 @@
 	struct clk *clk;
 	struct tmio_mmc_data mmc_data;
 	struct tmio_mmc_dma dma_priv;
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *pins_default, *pins_uhs;
 };
 
 static void sh_mobile_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
@@ -131,16 +132,28 @@
 	sd_ctrl_write16(host, EXT_ACC, val);
 }
 
-static int sh_mobile_sdhi_clk_enable(struct platform_device *pdev, unsigned int *f)
+static int sh_mobile_sdhi_clk_enable(struct tmio_mmc_host *host)
 {
-	struct mmc_host *mmc = platform_get_drvdata(pdev);
-	struct tmio_mmc_host *host = mmc_priv(mmc);
+	struct mmc_host *mmc = host->mmc;
 	struct sh_mobile_sdhi *priv = host_to_priv(host);
 	int ret = clk_prepare_enable(priv->clk);
 	if (ret < 0)
 		return ret;
 
-	*f = clk_get_rate(priv->clk);
+	/*
+	 * The clock driver may not know what maximum frequency
+	 * actually works, so it should be set with the max-frequency
+	 * property which will already have been read to f_max.  If it
+	 * was missing, assume the current frequency is the maximum.
+	 */
+	if (!mmc->f_max)
+		mmc->f_max = clk_get_rate(priv->clk);
+
+	/*
+	 * Minimum frequency is the minimum input clock frequency
+	 * divided by our maximum divider.
+	 */
+	mmc->f_min = max(clk_round_rate(priv->clk, 1) / 512, 1L);
 
 	/* enable 16bit data access on SDBUF as default */
 	sh_mobile_sdhi_sdbuf_width(host, 16);
@@ -148,19 +161,92 @@
 	return 0;
 }
 
-static void sh_mobile_sdhi_clk_disable(struct platform_device *pdev)
+static unsigned int sh_mobile_sdhi_clk_update(struct tmio_mmc_host *host,
+					      unsigned int new_clock)
 {
-	struct mmc_host *mmc = platform_get_drvdata(pdev);
+	struct sh_mobile_sdhi *priv = host_to_priv(host);
+	unsigned int freq, diff, best_freq = 0, diff_min = ~0;
+	int i, ret;
+
+	/* tested only on RCar Gen2+ currently; may work for others */
+	if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
+		return clk_get_rate(priv->clk);
+
+	/*
+	 * We want the bus clock to be as close as possible to, but no
+	 * greater than, new_clock.  As we can divide by 1 << i for
+	 * any i in [0, 9] we want the input clock to be as close as
+	 * possible, but no greater than, new_clock << i.
+	 */
+	for (i = min(9, ilog2(UINT_MAX / new_clock)); i >= 0; i--) {
+		freq = clk_round_rate(priv->clk, new_clock << i);
+		if (freq > (new_clock << i)) {
+			/* Too fast; look for a slightly slower option */
+			freq = clk_round_rate(priv->clk,
+					      (new_clock << i) / 4 * 3);
+			if (freq > (new_clock << i))
+				continue;
+		}
+
+		diff = new_clock - (freq >> i);
+		if (diff <= diff_min) {
+			best_freq = freq;
+			diff_min = diff;
+		}
+	}
+
+	ret = clk_set_rate(priv->clk, best_freq);
+
+	return ret == 0 ? best_freq : clk_get_rate(priv->clk);
+}
+
+static void sh_mobile_sdhi_clk_disable(struct tmio_mmc_host *host)
+{
+	struct sh_mobile_sdhi *priv = host_to_priv(host);
+
+	clk_disable_unprepare(priv->clk);
+}
+
+static int sh_mobile_sdhi_start_signal_voltage_switch(struct mmc_host *mmc,
+						      struct mmc_ios *ios)
+{
 	struct tmio_mmc_host *host = mmc_priv(mmc);
 	struct sh_mobile_sdhi *priv = host_to_priv(host);
-	clk_disable_unprepare(priv->clk);
+	struct pinctrl_state *pin_state;
+	int ret;
+
+	switch (ios->signal_voltage) {
+	case MMC_SIGNAL_VOLTAGE_330:
+		pin_state = priv->pins_default;
+		break;
+	case MMC_SIGNAL_VOLTAGE_180:
+		pin_state = priv->pins_uhs;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/*
+	 * If anything is missing, assume signal voltage is fixed at
+	 * 3.3V and succeed/fail accordingly.
+	 */
+	if (IS_ERR(priv->pinctrl) || IS_ERR(pin_state))
+		return ios->signal_voltage ==
+			MMC_SIGNAL_VOLTAGE_330 ? 0 : -EINVAL;
+
+	ret = mmc_regulator_set_vqmmc(host->mmc, ios);
+	if (ret)
+		return ret;
+
+	return pinctrl_select_state(priv->pinctrl, pin_state);
 }
 
 static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host)
 {
 	int timeout = 1000;
 
-	while (--timeout && !(sd_ctrl_read16(host, CTL_STATUS2) & (1 << 13)))
+	while (--timeout && !(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS)
+			      & TMIO_STAT_SCLKDIVEN))
 		udelay(1);
 
 	if (!timeout) {
@@ -226,7 +312,6 @@
 	struct tmio_mmc_host *host;
 	struct resource *res;
 	int irq, ret, i = 0;
-	bool multiplexed_isr = true;
 	struct tmio_mmc_dma *dma_priv;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -247,6 +332,14 @@
 		goto eprobe;
 	}
 
+	priv->pinctrl = devm_pinctrl_get(&pdev->dev);
+	if (!IS_ERR(priv->pinctrl)) {
+		priv->pins_default = pinctrl_lookup_state(priv->pinctrl,
+						PINCTRL_STATE_DEFAULT);
+		priv->pins_uhs = pinctrl_lookup_state(priv->pinctrl,
+						"state_uhs");
+	}
+
 	host = tmio_mmc_host_alloc(pdev);
 	if (!host) {
 		ret = -ENOMEM;
@@ -267,8 +360,10 @@
 	host->dma		= dma_priv;
 	host->write16_hook	= sh_mobile_sdhi_write16_hook;
 	host->clk_enable	= sh_mobile_sdhi_clk_enable;
+	host->clk_update	= sh_mobile_sdhi_clk_update;
 	host->clk_disable	= sh_mobile_sdhi_clk_disable;
 	host->multi_io_quirk	= sh_mobile_sdhi_multi_io_quirk;
+	host->start_signal_voltage_switch = sh_mobile_sdhi_start_signal_voltage_switch;
 
 	/* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */
 	if (!host->bus_shift && resource_size(res) > 0x100) /* old way to determine the shift */
@@ -308,63 +403,24 @@
 	if (ret < 0)
 		goto efree;
 
-	/*
-	 * Allow one or more specific (named) ISRs or
-	 * one or more multiplexed (un-named) ISRs.
-	 */
-
-	irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_CARD_DETECT);
-	if (irq >= 0) {
-		multiplexed_isr = false;
-		ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_card_detect_irq, 0,
+	while (1) {
+		irq = platform_get_irq(pdev, i);
+		if (irq < 0)
+			break;
+		i++;
+		ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
 				  dev_name(&pdev->dev), host);
 		if (ret)
 			goto eirq;
 	}
 
-	irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDIO);
-	if (irq >= 0) {
-		multiplexed_isr = false;
-		ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_sdio_irq, 0,
-				  dev_name(&pdev->dev), host);
-		if (ret)
-			goto eirq;
-	}
-
-	irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDCARD);
-	if (irq >= 0) {
-		multiplexed_isr = false;
-		ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_sdcard_irq, 0,
-				  dev_name(&pdev->dev), host);
-		if (ret)
-			goto eirq;
-	} else if (!multiplexed_isr) {
-		dev_err(&pdev->dev,
-			"Principal SD-card IRQ is missing among named interrupts\n");
+	/* There must be at least one IRQ source */
+	if (!i) {
 		ret = irq;
 		goto eirq;
 	}
 
-	if (multiplexed_isr) {
-		while (1) {
-			irq = platform_get_irq(pdev, i);
-			if (irq < 0)
-				break;
-			i++;
-			ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
-					  dev_name(&pdev->dev), host);
-			if (ret)
-				goto eirq;
-		}
-
-		/* There must be at least one IRQ source */
-		if (!i) {
-			ret = irq;
-			goto eirq;
-		}
-	}
-
-	dev_info(&pdev->dev, "%s base at 0x%08lx clock rate %u MHz\n",
+	dev_info(&pdev->dev, "%s base at 0x%08lx max clock rate %u MHz\n",
 		 mmc_hostname(host->mmc), (unsigned long)
 		 (platform_get_resource(pdev, IORESOURCE_MEM, 0)->start),
 		 host->mmc->f_max / 1000000);
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 8372a41..7fc8b7a 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1129,6 +1129,11 @@
 				  MMC_CAP_1_8V_DDR |
 				  MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
 
+	/* TODO MMC DDR is not working on A80 */
+	if (of_device_is_compatible(pdev->dev.of_node,
+				    "allwinner,sun9i-a80-mmc"))
+		mmc->caps &= ~MMC_CAP_1_8V_DDR;
+
 	ret = mmc_of_parse(mmc);
 	if (ret)
 		goto error_free_dma;
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 4a597f5a..1aac2ad 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -1,6 +1,8 @@
 /*
  * linux/drivers/mmc/host/tmio_mmc.h
  *
+ * Copyright (C) 2016 Sang Engineering, Wolfram Sang
+ * Copyright (C) 2015-16 Renesas Electronics Corporation
  * Copyright (C) 2007 Ian Molton
  * Copyright (C) 2004 Ian Molton
  *
@@ -18,12 +20,67 @@
 
 #include <linux/dmaengine.h>
 #include <linux/highmem.h>
-#include <linux/mmc/tmio.h>
 #include <linux/mutex.h>
 #include <linux/pagemap.h>
 #include <linux/scatterlist.h>
 #include <linux/spinlock.h>
 
+#define CTL_SD_CMD 0x00
+#define CTL_ARG_REG 0x04
+#define CTL_STOP_INTERNAL_ACTION 0x08
+#define CTL_XFER_BLK_COUNT 0xa
+#define CTL_RESPONSE 0x0c
+/* driver merges STATUS and following STATUS2 */
+#define CTL_STATUS 0x1c
+/* driver merges IRQ_MASK and following IRQ_MASK2 */
+#define CTL_IRQ_MASK 0x20
+#define CTL_SD_CARD_CLK_CTL 0x24
+#define CTL_SD_XFER_LEN 0x26
+#define CTL_SD_MEM_CARD_OPT 0x28
+#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
+#define CTL_SD_DATA_PORT 0x30
+#define CTL_TRANSACTION_CTL 0x34
+#define CTL_SDIO_STATUS 0x36
+#define CTL_SDIO_IRQ_MASK 0x38
+#define CTL_DMA_ENABLE 0xd8
+#define CTL_RESET_SD 0xe0
+#define CTL_VERSION 0xe2
+#define CTL_SDIO_REGS 0x100
+#define CTL_CLK_AND_WAIT_CTL 0x138
+#define CTL_RESET_SDIO 0x1e0
+
+/* Definitions for values the CTRL_STATUS register can take. */
+#define TMIO_STAT_CMDRESPEND    BIT(0)
+#define TMIO_STAT_DATAEND       BIT(2)
+#define TMIO_STAT_CARD_REMOVE   BIT(3)
+#define TMIO_STAT_CARD_INSERT   BIT(4)
+#define TMIO_STAT_SIGSTATE      BIT(5)
+#define TMIO_STAT_WRPROTECT     BIT(7)
+#define TMIO_STAT_CARD_REMOVE_A BIT(8)
+#define TMIO_STAT_CARD_INSERT_A BIT(9)
+#define TMIO_STAT_SIGSTATE_A    BIT(10)
+
+/* These belong technically to CTRL_STATUS2, but the driver merges them */
+#define TMIO_STAT_CMD_IDX_ERR   BIT(16)
+#define TMIO_STAT_CRCFAIL       BIT(17)
+#define TMIO_STAT_STOPBIT_ERR   BIT(18)
+#define TMIO_STAT_DATATIMEOUT   BIT(19)
+#define TMIO_STAT_RXOVERFLOW    BIT(20)
+#define TMIO_STAT_TXUNDERRUN    BIT(21)
+#define TMIO_STAT_CMDTIMEOUT    BIT(22)
+#define TMIO_STAT_DAT0		BIT(23)	/* only known on R-Car so far */
+#define TMIO_STAT_RXRDY         BIT(24)
+#define TMIO_STAT_TXRQ          BIT(25)
+#define TMIO_STAT_ILL_FUNC      BIT(29) /* only when !TMIO_MMC_HAS_IDLE_WAIT */
+#define TMIO_STAT_SCLKDIVEN     BIT(29) /* only when TMIO_MMC_HAS_IDLE_WAIT */
+#define TMIO_STAT_CMD_BUSY      BIT(30)
+#define TMIO_STAT_ILL_ACCESS    BIT(31)
+
+#define	CLK_CTL_DIV_MASK	0xff
+#define	CLK_CTL_SCLKEN		BIT(8)
+
+#define TMIO_BBS		512		/* Boot block size */
+
 /* Definitions for values the CTRL_SDIO_STATUS register can take. */
 #define TMIO_SDIO_STAT_IOIRQ	0x0001
 #define TMIO_SDIO_STAT_EXPUB52	0x4000
@@ -95,10 +152,14 @@
 	bool			sdio_irq_enabled;
 
 	int (*write16_hook)(struct tmio_mmc_host *host, int addr);
-	int (*clk_enable)(struct platform_device *pdev, unsigned int *f);
-	void (*clk_disable)(struct platform_device *pdev);
+	int (*clk_enable)(struct tmio_mmc_host *host);
+	unsigned int (*clk_update)(struct tmio_mmc_host *host,
+				   unsigned int new_clock);
+	void (*clk_disable)(struct tmio_mmc_host *host);
 	int (*multi_io_quirk)(struct mmc_card *card,
 			      unsigned int direction, int blk_size);
+	int (*start_signal_voltage_switch)(struct mmc_host *mmc,
+					   struct mmc_ios *ios);
 };
 
 struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev);
@@ -111,9 +172,6 @@
 void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
 void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
 irqreturn_t tmio_mmc_irq(int irq, void *devid);
-irqreturn_t tmio_mmc_sdcard_irq(int irq, void *devid);
-irqreturn_t tmio_mmc_card_detect_irq(int irq, void *devid);
-irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid);
 
 static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
 					 unsigned long *flags)
@@ -177,7 +235,7 @@
 	readsw(host->ctl + (addr << host->bus_shift), buf, count);
 }
 
-static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
+static inline u32 sd_ctrl_read16_and_16_as_32(struct tmio_mmc_host *host, int addr)
 {
 	return readw(host->ctl + (addr << host->bus_shift)) |
 	       readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
@@ -199,11 +257,10 @@
 	writesw(host->ctl + (addr << host->bus_shift), buf, count);
 }
 
-static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
+static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host, int addr, u32 val)
 {
 	writew(val, host->ctl + (addr << host->bus_shift));
 	writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
 }
 
-
 #endif
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 6754358..fa8a936 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -15,7 +15,6 @@
 #include <linux/dmaengine.h>
 #include <linux/mfd/tmio.h>
 #include <linux/mmc/host.h>
-#include <linux/mmc/tmio.h>
 #include <linux/pagemap.h>
 #include <linux/scatterlist.h>
 
@@ -63,7 +62,7 @@
 		}
 	}
 
-	if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
+	if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
 			  (align & PAGE_MASK))) || !multiple) {
 		ret = -EINVAL;
 		goto pio;
@@ -133,7 +132,7 @@
 		}
 	}
 
-	if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
+	if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
 			  (align & PAGE_MASK))) || !multiple) {
 		ret = -EINVAL;
 		goto pio;
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 03f6e74..f44e2ab 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -39,7 +39,6 @@
 #include <linux/mmc/host.h>
 #include <linux/mmc/mmc.h>
 #include <linux/mmc/slot-gpio.h>
-#include <linux/mmc/tmio.h>
 #include <linux/module.h>
 #include <linux/pagemap.h>
 #include <linux/platform_device.h>
@@ -56,18 +55,18 @@
 void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
 {
 	host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ);
-	sd_ctrl_write32(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
+	sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
 }
 
 void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
 {
 	host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ);
-	sd_ctrl_write32(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
+	sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
 }
 
 static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
 {
-	sd_ctrl_write32(host, CTL_STATUS, ~i);
+	sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, ~i);
 }
 
 static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
@@ -154,31 +153,16 @@
 	}
 }
 
-static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
-				unsigned int new_clock)
+static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
 {
-	u32 clk = 0, clock;
+	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
+		sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+	msleep(host->pdata->flags & TMIO_MMC_MIN_RCAR2 ? 1 : 10);
 
-	if (new_clock) {
-		for (clock = host->mmc->f_min, clk = 0x80000080;
-		     new_clock >= (clock << 1);
-		     clk >>= 1)
-			clock <<= 1;
-
-		/* 1/1 clock is option */
-		if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) &&
-		   ((clk >> 22) & 0x1))
-			clk |= 0xff;
-	}
-
-	if (host->set_clk_div)
-		host->set_clk_div(host->pdev, (clk >> 22) & 1);
-
-	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
-			sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
-	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
-	if (!(host->pdata->flags & TMIO_MMC_FAST_CLK_CHG))
+	if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
+		sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
 		msleep(10);
+	}
 }
 
 static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
@@ -190,19 +174,41 @@
 
 	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
 		sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
-	msleep(host->pdata->flags & TMIO_MMC_FAST_CLK_CHG ? 5 : 10);
+	msleep(host->pdata->flags & TMIO_MMC_MIN_RCAR2 ? 5 : 10);
 }
 
-static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
+static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
+				unsigned int new_clock)
 {
-	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
-		sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
-	msleep(host->pdata->flags & TMIO_MMC_FAST_CLK_CHG ? 1 : 10);
+	u32 clk = 0, clock;
 
-	if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
-		sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
-		msleep(10);
+	if (new_clock == 0) {
+		tmio_mmc_clk_stop(host);
+		return;
 	}
+
+	if (host->clk_update)
+		clock = host->clk_update(host, new_clock) / 512;
+	else
+		clock = host->mmc->f_min;
+
+	for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1)
+		clock <<= 1;
+
+	/* 1/1 clock is option */
+	if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) && ((clk >> 22) & 0x1))
+		clk |= 0xff;
+
+	if (host->set_clk_div)
+		host->set_clk_div(host->pdev, (clk >> 22) & 1);
+
+	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
+			sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
+	if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
+		msleep(10);
+
+	tmio_mmc_clk_start(host);
 }
 
 static void tmio_mmc_reset(struct tmio_mmc_host *host)
@@ -264,9 +270,6 @@
 
 	tmio_mmc_abort_dma(host);
 	mmc_request_done(host->mmc, mrq);
-
-	pm_runtime_mark_last_busy(mmc_dev(host->mmc));
-	pm_runtime_put_autosuspend(mmc_dev(host->mmc));
 }
 
 /* called with host->lock held, interrupts disabled */
@@ -296,9 +299,6 @@
 		tmio_mmc_abort_dma(host);
 
 	mmc_request_done(host->mmc, mrq);
-
-	pm_runtime_mark_last_busy(mmc_dev(host->mmc));
-	pm_runtime_put_autosuspend(mmc_dev(host->mmc));
 }
 
 static void tmio_mmc_done_work(struct work_struct *work)
@@ -375,7 +375,7 @@
 	tmio_mmc_enable_mmc_irqs(host, irq_mask);
 
 	/* Fire off the command */
-	sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg);
+	sd_ctrl_write32_as_16_and_16(host, CTL_ARG_REG, cmd->arg);
 	sd_ctrl_write16(host, CTL_SD_CMD, c);
 
 	return 0;
@@ -530,7 +530,7 @@
 		goto out;
 
 	if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
-		u32 status = sd_ctrl_read32(host, CTL_STATUS);
+		u32 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
 		bool done = false;
 
 		/*
@@ -542,7 +542,7 @@
 		 * waiting for one more interrupt fixes the problem.
 		 */
 		if (host->pdata->flags & TMIO_MMC_HAS_IDLE_WAIT) {
-			if (status & TMIO_STAT_ILL_FUNC)
+			if (status & TMIO_STAT_SCLKDIVEN)
 				done = true;
 		} else {
 			if (!(status & TMIO_STAT_CMD_BUSY))
@@ -585,7 +585,7 @@
 	 */
 
 	for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
-		cmd->resp[i] = sd_ctrl_read32(host, addr);
+		cmd->resp[i] = sd_ctrl_read16_and_16_as_32(host, addr);
 
 	if (cmd->flags &  MMC_RSP_136) {
 		cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
@@ -625,19 +625,6 @@
 	spin_unlock(&host->lock);
 }
 
-static void tmio_mmc_card_irq_status(struct tmio_mmc_host *host,
-				       int *ireg, int *status)
-{
-	*status = sd_ctrl_read32(host, CTL_STATUS);
-	*ireg = *status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask;
-
-	pr_debug_status(*status);
-	pr_debug_status(*ireg);
-
-	/* Clear the status except the interrupt status */
-	sd_ctrl_write32(host, CTL_STATUS, TMIO_MASK_IRQ);
-}
-
 static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host,
 				      int ireg, int status)
 {
@@ -657,18 +644,6 @@
 	return false;
 }
 
-irqreturn_t tmio_mmc_card_detect_irq(int irq, void *devid)
-{
-	unsigned int ireg, status;
-	struct tmio_mmc_host *host = devid;
-
-	tmio_mmc_card_irq_status(host, &ireg, &status);
-	__tmio_mmc_card_detect_irq(host, ireg, status);
-
-	return IRQ_HANDLED;
-}
-EXPORT_SYMBOL(tmio_mmc_card_detect_irq);
-
 static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host,
 				 int ireg, int status)
 {
@@ -698,19 +673,7 @@
 	return false;
 }
 
-irqreturn_t tmio_mmc_sdcard_irq(int irq, void *devid)
-{
-	unsigned int ireg, status;
-	struct tmio_mmc_host *host = devid;
-
-	tmio_mmc_card_irq_status(host, &ireg, &status);
-	__tmio_mmc_sdcard_irq(host, ireg, status);
-
-	return IRQ_HANDLED;
-}
-EXPORT_SYMBOL(tmio_mmc_sdcard_irq);
-
-irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid)
+static void tmio_mmc_sdio_irq(int irq, void *devid)
 {
 	struct tmio_mmc_host *host = devid;
 	struct mmc_host *mmc = host->mmc;
@@ -719,7 +682,7 @@
 	unsigned int sdio_status;
 
 	if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
-		return IRQ_HANDLED;
+		return;
 
 	status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
 	ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdcard_irq_mask;
@@ -732,19 +695,22 @@
 
 	if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
 		mmc_signal_sdio_irq(mmc);
-
-	return IRQ_HANDLED;
 }
-EXPORT_SYMBOL(tmio_mmc_sdio_irq);
 
 irqreturn_t tmio_mmc_irq(int irq, void *devid)
 {
 	struct tmio_mmc_host *host = devid;
 	unsigned int ireg, status;
 
-	pr_debug("MMC IRQ begin\n");
+	status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
+	ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask;
 
-	tmio_mmc_card_irq_status(host, &ireg, &status);
+	pr_debug_status(status);
+	pr_debug_status(ireg);
+
+	/* Clear the status except the interrupt status */
+	sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ);
+
 	if (__tmio_mmc_card_detect_irq(host, ireg, status))
 		return IRQ_HANDLED;
 	if (__tmio_mmc_sdcard_irq(host, ireg, status))
@@ -812,8 +778,6 @@
 
 	spin_unlock_irqrestore(&host->lock, flags);
 
-	pm_runtime_get_sync(mmc_dev(mmc));
-
 	if (mrq->data) {
 		ret = tmio_mmc_start_data(host, mrq->data);
 		if (ret)
@@ -832,24 +796,14 @@
 	host->mrq = NULL;
 	mrq->cmd->error = ret;
 	mmc_request_done(mmc, mrq);
-
-	pm_runtime_mark_last_busy(mmc_dev(mmc));
-	pm_runtime_put_autosuspend(mmc_dev(mmc));
 }
 
-static int tmio_mmc_clk_update(struct tmio_mmc_host *host)
+static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
 {
-	struct mmc_host *mmc = host->mmc;
-	int ret;
-
 	if (!host->clk_enable)
 		return -ENOTSUPP;
 
-	ret = host->clk_enable(host->pdev, &mmc->f_max);
-	if (!ret)
-		mmc->f_min = mmc->f_max / 512;
-
-	return ret;
+	return host->clk_enable(host);
 }
 
 static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
@@ -925,8 +879,6 @@
 	struct device *dev = &host->pdev->dev;
 	unsigned long flags;
 
-	pm_runtime_get_sync(mmc_dev(mmc));
-
 	mutex_lock(&host->ios_lock);
 
 	spin_lock_irqsave(&host->lock, flags);
@@ -959,14 +911,12 @@
 		tmio_mmc_clk_stop(host);
 		break;
 	case MMC_POWER_UP:
-		tmio_mmc_set_clock(host, ios->clock);
 		tmio_mmc_power_on(host, ios->vdd);
-		tmio_mmc_clk_start(host);
+		tmio_mmc_set_clock(host, ios->clock);
 		tmio_mmc_set_bus_width(host, ios->bus_width);
 		break;
 	case MMC_POWER_ON:
 		tmio_mmc_set_clock(host, ios->clock);
-		tmio_mmc_clk_start(host);
 		tmio_mmc_set_bus_width(host, ios->bus_width);
 		break;
 	}
@@ -983,9 +933,6 @@
 	host->clk_cache = ios->clock;
 
 	mutex_unlock(&host->ios_lock);
-
-	pm_runtime_mark_last_busy(mmc_dev(mmc));
-	pm_runtime_put_autosuspend(mmc_dev(mmc));
 }
 
 static int tmio_mmc_get_ro(struct mmc_host *mmc)
@@ -996,11 +943,8 @@
 	if (ret >= 0)
 		return ret;
 
-	pm_runtime_get_sync(mmc_dev(mmc));
 	ret = !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
-		(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
-	pm_runtime_mark_last_busy(mmc_dev(mmc));
-	pm_runtime_put_autosuspend(mmc_dev(mmc));
+		(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
 
 	return ret;
 }
@@ -1016,12 +960,20 @@
 	return blk_size;
 }
 
-static const struct mmc_host_ops tmio_mmc_ops = {
+static int tmio_mmc_card_busy(struct mmc_host *mmc)
+{
+	struct tmio_mmc_host *host = mmc_priv(mmc);
+
+	return !(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_DAT0);
+}
+
+static struct mmc_host_ops tmio_mmc_ops = {
 	.request	= tmio_mmc_request,
 	.set_ios	= tmio_mmc_set_ios,
 	.get_ro         = tmio_mmc_get_ro,
 	.get_cd		= mmc_gpio_get_cd,
 	.enable_sdio_irq = tmio_mmc_enable_sdio_irq,
+	.card_busy	= tmio_mmc_card_busy,
 	.multi_io_quirk	= tmio_multi_io_quirk,
 };
 
@@ -1120,12 +1072,14 @@
 		goto host_free;
 	}
 
+	tmio_mmc_ops.start_signal_voltage_switch = _host->start_signal_voltage_switch;
 	mmc->ops = &tmio_mmc_ops;
+
 	mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
 	mmc->caps2 |= pdata->capabilities2;
 	mmc->max_segs = 32;
 	mmc->max_blk_size = 512;
-	mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
+	mmc->max_blk_count = (PAGE_SIZE / mmc->max_blk_size) *
 		mmc->max_segs;
 	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
 	mmc->max_seg_size = mmc->max_req_size;
@@ -1135,7 +1089,7 @@
 				  mmc->caps & MMC_CAP_NONREMOVABLE ||
 				  mmc->slot.cd_irq >= 0);
 
-	if (tmio_mmc_clk_update(_host) < 0) {
+	if (tmio_mmc_clk_enable(_host) < 0) {
 		mmc->f_max = pdata->hclk;
 		mmc->f_min = mmc->f_max / 512;
 	}
@@ -1159,7 +1113,7 @@
 	tmio_mmc_clk_stop(_host);
 	tmio_mmc_reset(_host);
 
-	_host->sdcard_irq_mask = sd_ctrl_read32(_host, CTL_IRQ_MASK);
+	_host->sdcard_irq_mask = sd_ctrl_read16_and_16_as_32(_host, CTL_IRQ_MASK);
 	tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
 
 	/* Unmask the IRQs we want to know about */
@@ -1251,7 +1205,7 @@
 		tmio_mmc_clk_stop(host);
 
 	if (host->clk_disable)
-		host->clk_disable(host->pdev);
+		host->clk_disable(host);
 
 	return 0;
 }
@@ -1263,12 +1217,10 @@
 	struct tmio_mmc_host *host = mmc_priv(mmc);
 
 	tmio_mmc_reset(host);
-	tmio_mmc_clk_update(host);
+	tmio_mmc_clk_enable(host);
 
-	if (host->clk_cache) {
+	if (host->clk_cache)
 		tmio_mmc_set_clock(host, host->clk_cache);
-		tmio_mmc_clk_start(host);
-	}
 
 	tmio_mmc_enable_dma(host, true);
 
diff --git a/drivers/mmc/host/toshsd.c b/drivers/mmc/host/toshsd.c
index e2cdd5f..553ef41 100644
--- a/drivers/mmc/host/toshsd.c
+++ b/drivers/mmc/host/toshsd.c
@@ -21,6 +21,7 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/pm.h>
+#include <linux/pm_runtime.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/mmc.h>
 
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index b2752fe..1bd5f1a 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -22,6 +22,7 @@
 #include <linux/mmc/sdio.h>
 #include <linux/module.h>
 #include <linux/pagemap.h>
+#include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
 #include <linux/scatterlist.h>
 #include <linux/string.h>
@@ -198,6 +199,11 @@
 	struct dma_chan *chan_rx;
 	struct dma_chan *chan_tx;
 	bool dma_active;
+
+	/* Pin control */
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *pins_default;
+	struct pinctrl_state *pins_uhs;
 };
 
 /*			I/O primitives					*/
@@ -1147,12 +1153,45 @@
 	}
 }
 
+static int usdhi6_set_pinstates(struct usdhi6_host *host, int voltage)
+{
+	if (IS_ERR(host->pins_uhs))
+		return 0;
+
+	switch (voltage) {
+	case MMC_SIGNAL_VOLTAGE_180:
+	case MMC_SIGNAL_VOLTAGE_120:
+		return pinctrl_select_state(host->pinctrl,
+					    host->pins_uhs);
+
+	default:
+		return pinctrl_select_state(host->pinctrl,
+					    host->pins_default);
+	}
+}
+
+static int usdhi6_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+	int ret;
+
+	ret = mmc_regulator_set_vqmmc(mmc, ios);
+	if (ret < 0)
+		return ret;
+
+	ret = usdhi6_set_pinstates(mmc_priv(mmc), ios->signal_voltage);
+	if (ret)
+		dev_warn_once(mmc_dev(mmc),
+			      "Failed to set pinstate err=%d\n", ret);
+	return ret;
+}
+
 static struct mmc_host_ops usdhi6_ops = {
 	.request	= usdhi6_request,
 	.set_ios	= usdhi6_set_ios,
 	.get_cd		= usdhi6_get_cd,
 	.get_ro		= usdhi6_get_ro,
 	.enable_sdio_irq = usdhi6_enable_sdio_irq,
+	.start_signal_voltage_switch = usdhi6_sig_volt_switch,
 };
 
 /*			State machine handlers				*/
@@ -1730,6 +1769,25 @@
 	host->wait	= USDHI6_WAIT_FOR_REQUEST;
 	host->timeout	= msecs_to_jiffies(4000);
 
+	host->pinctrl = devm_pinctrl_get(&pdev->dev);
+	if (IS_ERR(host->pinctrl)) {
+		ret = PTR_ERR(host->pinctrl);
+		goto e_free_mmc;
+	}
+
+	host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs");
+	if (!IS_ERR(host->pins_uhs)) {
+		host->pins_default = pinctrl_lookup_state(host->pinctrl,
+							  PINCTRL_STATE_DEFAULT);
+
+		if (IS_ERR(host->pins_default)) {
+			dev_err(dev,
+				"UHS pinctrl requires a default pin state.\n");
+			ret = PTR_ERR(host->pins_default);
+			goto e_free_mmc;
+		}
+	}
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	host->base = devm_ioremap_resource(dev, res);
 	if (IS_ERR(host->base)) {
@@ -1785,11 +1843,11 @@
 
 	mmc->ops = &usdhi6_ops;
 	mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
-		MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 | MMC_CAP_SDIO_IRQ;
+		     MMC_CAP_SDIO_IRQ;
 	/* Set .max_segs to some random number. Feel free to adjust. */
 	mmc->max_segs = 32;
 	mmc->max_blk_size = 512;
-	mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
+	mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
 	mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
 	/*
 	 * Setting .max_seg_size to 1 page would simplify our page-mapping code,
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index e2c0057..7c887f1 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -75,7 +75,7 @@
 				break;
 			}
 
-		page_cache_release(page);
+		put_page(page);
 		pages--;
 		index++;
 	}
@@ -124,7 +124,7 @@
 			return PTR_ERR(page);
 
 		memcpy(buf, page_address(page) + offset, cpylen);
-		page_cache_release(page);
+		put_page(page);
 
 		if (retlen)
 			*retlen += cpylen;
@@ -164,7 +164,7 @@
 			unlock_page(page);
 			balance_dirty_pages_ratelimited(mapping);
 		}
-		page_cache_release(page);
+		put_page(page);
 
 		if (retlen)
 			*retlen += cpylen;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 7c95a65..392f9ef 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -74,6 +74,16 @@
 	  physically into the CPU's memory. The mapping description here is
 	  taken from OF device tree.
 
+config MTD_PHYSMAP_OF_VERSATILE
+	bool "Support ARM Versatile physmap OF"
+	depends on MTD_PHYSMAP_OF
+	depends on MFD_SYSCON
+	default y if (ARCH_INTEGRATOR || ARCH_VERSATILE || REALVIEW_DT)
+	help
+	  This provides some extra DT physmap parsing for the ARM Versatile
+	  platforms, basically to add a VPP (write protection) callback so
+	  the flash can be taken out of write protection.
+
 config MTD_PMC_MSP_EVM
 	tristate "CFI Flash device mapped on PMC-Sierra MSP"
 	depends on PMC_MSP && MTD_CFI
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 141c91a..644f7d3 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -18,6 +18,9 @@
 obj-$(CONFIG_MTD_PXA2XX)	+= pxa2xx-flash.o
 obj-$(CONFIG_MTD_PHYSMAP)	+= physmap.o
 obj-$(CONFIG_MTD_PHYSMAP_OF)	+= physmap_of.o
+ifdef CONFIG_MTD_PHYSMAP_OF_VERSATILE
+obj-$(CONFIG_MTD_PHYSMAP_OF)	+= physmap_of_versatile.o
+endif
 obj-$(CONFIG_MTD_PISMO)		+= pismo.o
 obj-$(CONFIG_MTD_PMC_MSP_EVM)   += pmcmsp-flash.o
 obj-$(CONFIG_MTD_PCMCIA)	+= pcmciamtd.o
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 70c4531..22f3858 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -24,6 +24,7 @@
 #include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/slab.h>
+#include "physmap_of_versatile.h"
 
 struct of_flash_list {
 	struct mtd_info *mtd;
@@ -240,6 +241,11 @@
 		info->list[i].map.size = res_size;
 		info->list[i].map.bankwidth = be32_to_cpup(width);
 		info->list[i].map.device_node = dp;
+		err = of_flash_probe_versatile(dev, dp, &info->list[i].map);
+		if (err) {
+			dev_err(&dev->dev, "Can't probe Versatile VPP\n");
+			return err;
+		}
 
 		err = -ENOMEM;
 		info->list[i].map.virt = ioremap(info->list[i].map.phys,
diff --git a/drivers/mtd/maps/physmap_of_versatile.c b/drivers/mtd/maps/physmap_of_versatile.c
new file mode 100644
index 0000000..0f39b2a
--- /dev/null
+++ b/drivers/mtd/maps/physmap_of_versatile.c
@@ -0,0 +1,255 @@
+/*
+ * Versatile OF physmap driver add-on
+ *
+ * Copyright (c) 2016, Linaro Limited
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/mtd/map.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/bitops.h>
+#include "physmap_of_versatile.h"
+
+static struct regmap *syscon_regmap;
+
+enum versatile_flashprot {
+	INTEGRATOR_AP_FLASHPROT,
+	INTEGRATOR_CP_FLASHPROT,
+	VERSATILE_FLASHPROT,
+	REALVIEW_FLASHPROT,
+};
+
+static const struct of_device_id syscon_match[] = {
+	{
+		.compatible = "arm,integrator-ap-syscon",
+		.data = (void *)INTEGRATOR_AP_FLASHPROT,
+	},
+	{
+		.compatible = "arm,integrator-cp-syscon",
+		.data = (void *)INTEGRATOR_CP_FLASHPROT,
+	},
+	{
+		.compatible = "arm,core-module-versatile",
+		.data = (void *)VERSATILE_FLASHPROT,
+	},
+	{
+		.compatible = "arm,realview-eb-syscon",
+		.data = (void *)REALVIEW_FLASHPROT,
+	},
+	{
+		.compatible = "arm,realview-pb1176-syscon",
+		.data = (void *)REALVIEW_FLASHPROT,
+	},
+	{
+		.compatible = "arm,realview-pb11mp-syscon",
+		.data = (void *)REALVIEW_FLASHPROT,
+	},
+	{
+		.compatible = "arm,realview-pba8-syscon",
+		.data = (void *)REALVIEW_FLASHPROT,
+	},
+	{
+		.compatible = "arm,realview-pbx-syscon",
+		.data = (void *)REALVIEW_FLASHPROT,
+	},
+	{},
+};
+
+/*
+ * Flash protection handling for the Integrator/AP
+ */
+#define INTEGRATOR_SC_CTRLS_OFFSET	0x08
+#define INTEGRATOR_SC_CTRLC_OFFSET	0x0C
+#define INTEGRATOR_SC_CTRL_FLVPPEN	BIT(1)
+#define INTEGRATOR_SC_CTRL_FLWP		BIT(2)
+
+#define INTEGRATOR_EBI_CSR1_OFFSET	0x04
+/* The manual says bit 2, the code says bit 3, trust the code */
+#define INTEGRATOR_EBI_WRITE_ENABLE	BIT(3)
+#define INTEGRATOR_EBI_LOCK_OFFSET	0x20
+#define INTEGRATOR_EBI_LOCK_VAL		0xA05F
+
+static const struct of_device_id ebi_match[] = {
+	{ .compatible = "arm,external-bus-interface"},
+	{ },
+};
+
+static int ap_flash_init(struct platform_device *pdev)
+{
+	struct device_node *ebi;
+	static void __iomem *ebi_base;
+	u32 val;
+	int ret;
+
+	/* Look up the EBI */
+	ebi = of_find_matching_node(NULL, ebi_match);
+	if (!ebi) {
+		return -ENODEV;
+	}
+	ebi_base = of_iomap(ebi, 0);
+	if (!ebi_base)
+		return -ENODEV;
+
+	/* Clear VPP and write protection bits */
+	ret = regmap_write(syscon_regmap,
+		INTEGRATOR_SC_CTRLC_OFFSET,
+		INTEGRATOR_SC_CTRL_FLVPPEN | INTEGRATOR_SC_CTRL_FLWP);
+	if (ret)
+		dev_err(&pdev->dev, "error clearing Integrator VPP/WP\n");
+
+	/* Unlock the EBI */
+	writel(INTEGRATOR_EBI_LOCK_VAL, ebi_base + INTEGRATOR_EBI_LOCK_OFFSET);
+
+	/* Enable write cycles on the EBI, CSR1 (flash) */
+	val = readl(ebi_base + INTEGRATOR_EBI_CSR1_OFFSET);
+	val |= INTEGRATOR_EBI_WRITE_ENABLE;
+	writel(val, ebi_base + INTEGRATOR_EBI_CSR1_OFFSET);
+
+	/* Lock the EBI again */
+	writel(0, ebi_base + INTEGRATOR_EBI_LOCK_OFFSET);
+	iounmap(ebi_base);
+
+	return 0;
+}
+
+static void ap_flash_set_vpp(struct map_info *map, int on)
+{
+	int ret;
+
+	if (on) {
+		ret = regmap_write(syscon_regmap,
+			INTEGRATOR_SC_CTRLS_OFFSET,
+			INTEGRATOR_SC_CTRL_FLVPPEN | INTEGRATOR_SC_CTRL_FLWP);
+		if (ret)
+			pr_err("error enabling AP VPP\n");
+	} else {
+		ret = regmap_write(syscon_regmap,
+			INTEGRATOR_SC_CTRLC_OFFSET,
+			INTEGRATOR_SC_CTRL_FLVPPEN | INTEGRATOR_SC_CTRL_FLWP);
+		if (ret)
+			pr_err("error disabling AP VPP\n");
+	}
+}
+
+/*
+ * Flash protection handling for the Integrator/CP
+ */
+
+#define INTCP_FLASHPROG_OFFSET		0x04
+#define CINTEGRATOR_FLVPPEN		BIT(0)
+#define CINTEGRATOR_FLWREN		BIT(1)
+#define CINTEGRATOR_FLMASK		BIT(0)|BIT(1)
+
+static void cp_flash_set_vpp(struct map_info *map, int on)
+{
+	int ret;
+
+	if (on) {
+		ret = regmap_update_bits(syscon_regmap,
+				INTCP_FLASHPROG_OFFSET,
+				CINTEGRATOR_FLMASK,
+				CINTEGRATOR_FLVPPEN | CINTEGRATOR_FLWREN);
+		if (ret)
+			pr_err("error setting CP VPP\n");
+	} else {
+		ret = regmap_update_bits(syscon_regmap,
+				INTCP_FLASHPROG_OFFSET,
+				CINTEGRATOR_FLMASK,
+				0);
+		if (ret)
+			pr_err("error setting CP VPP\n");
+	}
+}
+
+/*
+ * Flash protection handling for the Versatiles and RealViews
+ */
+
+#define VERSATILE_SYS_FLASH_OFFSET            0x4C
+
+static void versatile_flash_set_vpp(struct map_info *map, int on)
+{
+	int ret;
+
+	ret = regmap_update_bits(syscon_regmap, VERSATILE_SYS_FLASH_OFFSET,
+				 0x01, !!on);
+	if (ret)
+		pr_err("error setting Versatile VPP\n");
+}
+
+int of_flash_probe_versatile(struct platform_device *pdev,
+			     struct device_node *np,
+			     struct map_info *map)
+{
+	struct device_node *sysnp;
+	const struct of_device_id *devid;
+	struct regmap *rmap;
+	static enum versatile_flashprot versatile_flashprot;
+	int ret;
+
+	/* Not all flash chips use this protection line */
+	if (!of_device_is_compatible(np, "arm,versatile-flash"))
+		return 0;
+
+	/* For first chip probed, look up the syscon regmap */
+	if (!syscon_regmap) {
+		sysnp = of_find_matching_node_and_match(NULL,
+							syscon_match,
+							&devid);
+		if (!sysnp)
+			return -ENODEV;
+
+		versatile_flashprot = (enum versatile_flashprot)devid->data;
+		rmap = syscon_node_to_regmap(sysnp);
+		if (IS_ERR(rmap))
+			return PTR_ERR(rmap);
+
+		syscon_regmap = rmap;
+	}
+
+	switch (versatile_flashprot) {
+	case INTEGRATOR_AP_FLASHPROT:
+		ret = ap_flash_init(pdev);
+		if (ret)
+			return ret;
+		map->set_vpp = ap_flash_set_vpp;
+		dev_info(&pdev->dev, "Integrator/AP flash protection\n");
+		break;
+	case INTEGRATOR_CP_FLASHPROT:
+		map->set_vpp = cp_flash_set_vpp;
+		dev_info(&pdev->dev, "Integrator/CP flash protection\n");
+		break;
+	case VERSATILE_FLASHPROT:
+	case REALVIEW_FLASHPROT:
+		map->set_vpp = versatile_flash_set_vpp;
+		dev_info(&pdev->dev, "versatile/realview flash protection\n");
+		break;
+	default:
+		dev_info(&pdev->dev, "device marked as Versatile flash "
+			 "but no system controller was found\n");
+		break;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(of_flash_probe_versatile);
diff --git a/drivers/mtd/maps/physmap_of_versatile.h b/drivers/mtd/maps/physmap_of_versatile.h
new file mode 100644
index 0000000..5b86f6d
--- /dev/null
+++ b/drivers/mtd/maps/physmap_of_versatile.h
@@ -0,0 +1,16 @@
+#include <linux/of.h>
+#include <linux/mtd/map.h>
+
+#ifdef CONFIG_MTD_PHYSMAP_OF_VERSATILE
+int of_flash_probe_versatile(struct platform_device *pdev,
+			     struct device_node *np,
+			     struct map_info *map);
+#else
+static inline
+int of_flash_probe_versatile(struct platform_device *pdev,
+			     struct device_node *np,
+			     struct map_info *map)
+{
+	return 0;
+}
+#endif
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index f470118..74ae243 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -409,7 +409,7 @@
 		goto error3;
 
 	if (tr->flush)
-		blk_queue_flush(new->rq, REQ_FLUSH);
+		blk_queue_write_cache(new->rq, true, false);
 
 	new->rq->queuedata = new;
 	blk_queue_logical_block_size(new->rq, tr->blksize);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 3096251..bee180bd 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -40,6 +40,7 @@
 #include <linux/slab.h>
 #include <linux/reboot.h>
 #include <linux/kconfig.h>
+#include <linux/leds.h>
 
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
@@ -862,6 +863,7 @@
 		mtd_erase_callback(instr);
 		return 0;
 	}
+	ledtrig_mtd_activity();
 	return mtd->_erase(mtd, instr);
 }
 EXPORT_SYMBOL_GPL(mtd_erase);
@@ -925,6 +927,7 @@
 	if (!len)
 		return 0;
 
+	ledtrig_mtd_activity();
 	/*
 	 * In the absence of an error, drivers return a non-negative integer
 	 * representing the maximum number of bitflips that were corrected on
@@ -949,6 +952,7 @@
 		return -EROFS;
 	if (!len)
 		return 0;
+	ledtrig_mtd_activity();
 	return mtd->_write(mtd, to, len, retlen, buf);
 }
 EXPORT_SYMBOL_GPL(mtd_write);
@@ -982,6 +986,8 @@
 	ops->retlen = ops->oobretlen = 0;
 	if (!mtd->_read_oob)
 		return -EOPNOTSUPP;
+
+	ledtrig_mtd_activity();
 	/*
 	 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
 	 * similar to mtd->_read(), returning a non-negative integer
@@ -997,6 +1003,19 @@
 }
 EXPORT_SYMBOL_GPL(mtd_read_oob);
 
+int mtd_write_oob(struct mtd_info *mtd, loff_t to,
+				struct mtd_oob_ops *ops)
+{
+	ops->retlen = ops->oobretlen = 0;
+	if (!mtd->_write_oob)
+		return -EOPNOTSUPP;
+	if (!(mtd->flags & MTD_WRITEABLE))
+		return -EROFS;
+	ledtrig_mtd_activity();
+	return mtd->_write_oob(mtd, to, ops);
+}
+EXPORT_SYMBOL_GPL(mtd_write_oob);
+
 /*
  * Method to access the protection register area, present in some flash
  * devices. The user data is one time programmable but the factory data is read
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index b6facac..ba4f603 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -43,7 +43,6 @@
 #include <linux/mtd/nand_bch.h>
 #include <linux/interrupt.h>
 #include <linux/bitops.h>
-#include <linux/leds.h>
 #include <linux/io.h>
 #include <linux/mtd/partitions.h>
 #include <linux/of_mtd.h>
@@ -97,12 +96,6 @@
 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
 			     struct mtd_oob_ops *ops);
 
-/*
- * For devices which display every fart in the system on a separate LED. Is
- * compiled away when LED support is disabled.
- */
-DEFINE_LED_TRIGGER(nand_led_trigger);
-
 static int check_offs_len(struct mtd_info *mtd,
 					loff_t ofs, uint64_t len)
 {
@@ -540,19 +533,16 @@
 	if (in_interrupt() || oops_in_progress)
 		return panic_nand_wait_ready(mtd, timeo);
 
-	led_trigger_event(nand_led_trigger, LED_FULL);
 	/* Wait until command is processed or timeout occurs */
 	timeo = jiffies + msecs_to_jiffies(timeo);
 	do {
 		if (chip->dev_ready(mtd))
-			goto out;
+			return;
 		cond_resched();
 	} while (time_before(jiffies, timeo));
 
 	if (!chip->dev_ready(mtd))
 		pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
-out:
-	led_trigger_event(nand_led_trigger, LED_OFF);
 }
 EXPORT_SYMBOL_GPL(nand_wait_ready);
 
@@ -885,8 +875,6 @@
 	int status;
 	unsigned long timeo = 400;
 
-	led_trigger_event(nand_led_trigger, LED_FULL);
-
 	/*
 	 * Apply this short delay always to ensure that we do wait tWB in any
 	 * case on any machine.
@@ -910,7 +898,6 @@
 			cond_resched();
 		} while (time_before(jiffies, timeo));
 	}
-	led_trigger_event(nand_led_trigger, LED_OFF);
 
 	status = (int)chip->read_byte(mtd);
 	/* This can happen if in case of timeout or buggy dev_ready */
@@ -4009,7 +3996,6 @@
  * This is the first phase of the normal nand_scan() function. It reads the
  * flash ID and sets up MTD fields accordingly.
  *
- * The mtd->owner field must be set to the module of the caller.
  */
 int nand_scan_ident(struct mtd_info *mtd, int maxchips,
 		    struct nand_flash_dev *table)
@@ -4429,19 +4415,12 @@
  *
  * This fills out all the uninitialized function pointers with the defaults.
  * The flash ID is read and the mtd/chip structures are filled with the
- * appropriate values. The mtd->owner field must be set to the module of the
- * caller.
+ * appropriate values.
  */
 int nand_scan(struct mtd_info *mtd, int maxchips)
 {
 	int ret;
 
-	/* Many callers got this wrong, so check for it for a while... */
-	if (!mtd->owner && caller_is_module()) {
-		pr_crit("%s called with NULL mtd->owner!\n", __func__);
-		BUG();
-	}
-
 	ret = nand_scan_ident(mtd, maxchips, NULL);
 	if (!ret)
 		ret = nand_scan_tail(mtd);
@@ -4474,20 +4453,6 @@
 }
 EXPORT_SYMBOL_GPL(nand_release);
 
-static int __init nand_base_init(void)
-{
-	led_trigger_register_simple("nand-disk", &nand_led_trigger);
-	return 0;
-}
-
-static void __exit nand_base_exit(void)
-{
-	led_trigger_unregister_simple(nand_led_trigger);
-}
-
-module_init(nand_base_init);
-module_exit(nand_base_exit);
-
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 1fd5195..a58169a2 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -1339,7 +1339,7 @@
 	int i;
 
 	for (i = 0; i < ns->held_cnt; i++)
-		page_cache_release(ns->held_pages[i]);
+		put_page(ns->held_pages[i]);
 }
 
 /* Get page cache pages in advance to provide NOFS memory allocation */
@@ -1349,8 +1349,8 @@
 	struct page *page;
 	struct address_space *mapping = file->f_mapping;
 
-	start_index = pos >> PAGE_CACHE_SHIFT;
-	end_index = (pos + count - 1) >> PAGE_CACHE_SHIFT;
+	start_index = pos >> PAGE_SHIFT;
+	end_index = (pos + count - 1) >> PAGE_SHIFT;
 	if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
 		return -EINVAL;
 	ns->held_cnt = 0;
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index b096f8b..3692dd5 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -386,7 +386,7 @@
 		if (test_bit(boffset / SM_SECTOR_SIZE, &invalid_bitmap)) {
 
 			sm_printk("sector %d of block at LBA %d of zone %d"
-				" coudn't be read, marking it as invalid",
+				" couldn't be read, marking it as invalid",
 				boffset / SM_SECTOR_SIZE, lba, zone);
 
 			oob.data_status = 0;
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 22fd19c..a7d1feb 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -1142,22 +1142,19 @@
  */
 static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
 {
-	int err, major, minor, mode;
-	struct path path;
+	struct kstat stat;
+	int err, minor;
 
 	/* Probably this is an MTD character device node path */
-	err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
+	err = vfs_stat(mtd_dev, &stat);
 	if (err)
 		return ERR_PTR(err);
 
 	/* MTD device number is defined by the major / minor numbers */
-	major = imajor(d_backing_inode(path.dentry));
-	minor = iminor(d_backing_inode(path.dentry));
-	mode = d_backing_inode(path.dentry)->i_mode;
-	path_put(&path);
-	if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode))
+	if (MAJOR(stat.rdev) != MTD_CHAR_MAJOR || !S_ISCHR(stat.mode))
 		return ERR_PTR(-EINVAL);
 
+	minor = MINOR(stat.rdev);
 	if (minor & 1)
 		/*
 		 * Just do not think the "/dev/mtdrX" devices support is need,
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index e844887..437757c 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -301,27 +301,24 @@
  */
 struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
 {
-	int error, ubi_num, vol_id, mod;
-	struct inode *inode;
-	struct path path;
+	int error, ubi_num, vol_id;
+	struct kstat stat;
 
 	dbg_gen("open volume %s, mode %d", pathname, mode);
 
 	if (!pathname || !*pathname)
 		return ERR_PTR(-EINVAL);
 
-	error = kern_path(pathname, LOOKUP_FOLLOW, &path);
+	error = vfs_stat(pathname, &stat);
 	if (error)
 		return ERR_PTR(error);
 
-	inode = d_backing_inode(path.dentry);
-	mod = inode->i_mode;
-	ubi_num = ubi_major2num(imajor(inode));
-	vol_id = iminor(inode) - 1;
-	path_put(&path);
-
-	if (!S_ISCHR(mod))
+	if (!S_ISCHR(stat.mode))
 		return ERR_PTR(-EINVAL);
+
+	ubi_num = ubi_major2num(MAJOR(stat.rdev));
+	vol_id = MINOR(stat.rdev) - 1;
+
 	if (vol_id >= 0 && ubi_num >= 0)
 		return ubi_open_volume(ubi_num, vol_id, mode);
 	return ERR_PTR(-ENODEV);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2a1ba62b..0c5415b 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -62,9 +62,8 @@
 	  this device is consigned into oblivion) with a configurable IP
 	  address. It is most commonly used in order to make your currently
 	  inactive SLIP address seem like a real address for local programs.
-	  If you use SLIP or PPP, you might want to say Y here. Since this
-	  thing often comes in handy, the default is Y. It won't enlarge your
-	  kernel either. What a deal. Read about it in the Network
+	  If you use SLIP or PPP, you might want to say Y here. It won't
+	  enlarge your kernel. What a deal. Read about it in the Network
 	  Administrator's Guide, available from
 	  <http://www.tldp.org/docs.html#guide>.
 
@@ -193,8 +192,26 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called geneve.
 
+config GTP
+	tristate "GPRS Tunneling Protocol datapath (GTP-U)"
+	depends on INET && NET_UDP_TUNNEL
+	select NET_IP_TUNNEL
+	---help---
+	  This allows one to create gtp virtual interfaces that provide
+	  the GPRS Tunneling Protocol datapath (GTP-U). This tunneling protocol
+	  is used to prevent subscribers from accessing mobile carrier core
+	  network infrastructure. This driver requires a userspace software that
+	  implements the signaling protocol (GTP-C) to update its PDP context
+	  base, such as OpenGGSN <http://git.osmocom.org/openggsn/). This
+	  tunneling protocol is implemented according to the GSM TS 09.60 and
+	  3GPP TS 29.060 standards.
+
+	  To compile this drivers as a module, choose M here: the module
+	  wil be called gtp.
+
 config MACSEC
 	tristate "IEEE 802.1AE MAC-level encryption (MACsec)"
+	select CRYPTO
 	select CRYPTO_AES
 	select CRYPTO_GCM
 	---help---
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 1aa7cb8..7336cbd 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -25,6 +25,7 @@
 obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
 obj-$(CONFIG_VXLAN) += vxlan.o
 obj-$(CONFIG_GENEVE) += geneve.o
+obj-$(CONFIG_GTP) += gtp.o
 obj-$(CONFIG_NLMON) += nlmon.o
 obj-$(CONFIG_NET_VRF) += vrf.o
 
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 7f2a032..1b2e921 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -861,7 +861,7 @@
 	}
 	printk(KERN_WARNING "%s: Transmit timed out.\n", dev->name);
 	cops_jumpstart(dev);	/* Restart the card. */
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	netif_wake_queue(dev);
 }
 
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index 0d9b45f..81f90c4 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -433,7 +433,7 @@
 	kfree(iomem);
 }
 
-static int check_mirror(unsigned long addr, size_t size)
+static int __init check_mirror(unsigned long addr, size_t size)
 {
 	void __iomem *p;
 	int res = -1;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 141c2a4..910c12e 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -696,11 +696,17 @@
 	/* allow change of MTU according to the CANFD ability of the device */
 	switch (new_mtu) {
 	case CAN_MTU:
+		/* 'CANFD-only' controllers can not switch to CAN_MTU */
+		if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
+			return -EINVAL;
+
 		priv->ctrlmode &= ~CAN_CTRLMODE_FD;
 		break;
 
 	case CANFD_MTU:
-		if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD))
+		/* check for potential CANFD ability */
+		if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
+		    !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
 			return -EINVAL;
 
 		priv->ctrlmode |= CAN_CTRLMODE_FD;
@@ -782,6 +788,35 @@
 				= { .len = sizeof(struct can_bittiming_const) },
 };
 
+static int can_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+	bool is_can_fd = false;
+
+	/* Make sure that valid CAN FD configurations always consist of
+	 * - nominal/arbitration bittiming
+	 * - data bittiming
+	 * - control mode with CAN_CTRLMODE_FD set
+	 */
+
+	if (data[IFLA_CAN_CTRLMODE]) {
+		struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+
+		is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
+	}
+
+	if (is_can_fd) {
+		if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
+			return -EOPNOTSUPP;
+	}
+
+	if (data[IFLA_CAN_DATA_BITTIMING]) {
+		if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
+			return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
 static int can_changelink(struct net_device *dev,
 			  struct nlattr *tb[], struct nlattr *data[])
 {
@@ -813,19 +848,31 @@
 
 	if (data[IFLA_CAN_CTRLMODE]) {
 		struct can_ctrlmode *cm;
+		u32 ctrlstatic;
+		u32 maskedflags;
 
 		/* Do not allow changing controller mode while running */
 		if (dev->flags & IFF_UP)
 			return -EBUSY;
 		cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+		ctrlstatic = priv->ctrlmode_static;
+		maskedflags = cm->flags & cm->mask;
 
-		/* check whether changed bits are allowed to be modified */
-		if (cm->mask & ~priv->ctrlmode_supported)
+		/* check whether provided bits are allowed to be passed */
+		if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic))
+			return -EOPNOTSUPP;
+
+		/* do not check for static fd-non-iso if 'fd' is disabled */
+		if (!(maskedflags & CAN_CTRLMODE_FD))
+			ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
+
+		/* make sure static options are provided by configuration */
+		if ((maskedflags & ctrlstatic) != ctrlstatic)
 			return -EOPNOTSUPP;
 
 		/* clear bits to be modified and copy the flag values */
 		priv->ctrlmode &= ~cm->mask;
-		priv->ctrlmode |= (cm->flags & cm->mask);
+		priv->ctrlmode |= maskedflags;
 
 		/* CAN_CTRLMODE_FD can only be set when driver supports FD */
 		if (priv->ctrlmode & CAN_CTRLMODE_FD)
@@ -966,6 +1013,7 @@
 	.maxtype	= IFLA_CAN_MAX,
 	.policy		= can_policy,
 	.setup		= can_setup,
+	.validate	= can_validate,
 	.newlink	= can_newlink,
 	.changelink	= can_changelink,
 	.get_size	= can_get_size,
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index a1bd54f..2d1d22e 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -34,6 +34,7 @@
 #define IFI_CANFD_STCMD_LOOPBACK		BIT(18)
 #define IFI_CANFD_STCMD_DISABLE_CANFD		BIT(24)
 #define IFI_CANFD_STCMD_ENABLE_ISO		BIT(25)
+#define IFI_CANFD_STCMD_ENABLE_7_9_8_8_TIMING	BIT(26)
 #define IFI_CANFD_STCMD_NORMAL_MODE		((u32)BIT(31))
 
 #define IFI_CANFD_RXSTCMD			0x4
@@ -51,7 +52,8 @@
 #define IFI_CANFD_TXSTCMD_OVERFLOW		BIT(13)
 
 #define IFI_CANFD_INTERRUPT			0xc
-#define IFI_CANFD_INTERRUPT_ERROR_WARNING	((u32)BIT(1))
+#define IFI_CANFD_INTERRUPT_ERROR_WARNING	BIT(1)
+#define IFI_CANFD_INTERRUPT_ERROR_COUNTER	BIT(10)
 #define IFI_CANFD_INTERRUPT_TXFIFO_EMPTY	BIT(16)
 #define IFI_CANFD_INTERRUPT_TXFIFO_REMOVE	BIT(22)
 #define IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY	BIT(24)
@@ -71,12 +73,12 @@
 #define IFI_CANFD_TIME_TIMEB_OFF		0
 #define IFI_CANFD_TIME_TIMEA_OFF		8
 #define IFI_CANFD_TIME_PRESCALE_OFF		16
-#define IFI_CANFD_TIME_SJW_OFF_ISO		25
-#define IFI_CANFD_TIME_SJW_OFF_BOSCH		28
-#define IFI_CANFD_TIME_SET_SJW_BOSCH		BIT(6)
-#define IFI_CANFD_TIME_SET_TIMEB_BOSCH		BIT(7)
-#define IFI_CANFD_TIME_SET_PRESC_BOSCH		BIT(14)
-#define IFI_CANFD_TIME_SET_TIMEA_BOSCH		BIT(15)
+#define IFI_CANFD_TIME_SJW_OFF_7_9_8_8		25
+#define IFI_CANFD_TIME_SJW_OFF_4_12_6_6		28
+#define IFI_CANFD_TIME_SET_SJW_4_12_6_6		BIT(6)
+#define IFI_CANFD_TIME_SET_TIMEB_4_12_6_6	BIT(7)
+#define IFI_CANFD_TIME_SET_PRESC_4_12_6_6	BIT(14)
+#define IFI_CANFD_TIME_SET_TIMEA_4_12_6_6	BIT(15)
 
 #define IFI_CANFD_TDELAY			0x1c
 
@@ -102,7 +104,26 @@
 
 #define IFI_CANFD_RES1				0x40
 
-#define IFI_CANFD_RES2				0x44
+#define IFI_CANFD_ERROR_CTR			0x44
+#define IFI_CANFD_ERROR_CTR_UNLOCK_MAGIC	0x21302899
+#define IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST	BIT(0)
+#define IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST	BIT(1)
+#define IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST	BIT(2)
+#define IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST	BIT(3)
+#define IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST	BIT(4)
+#define IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST	BIT(5)
+#define IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST	BIT(6)
+#define IFI_CANFD_ERROR_CTR_OVERLOAD_ALL	BIT(8)
+#define IFI_CANFD_ERROR_CTR_ACK_ERROR_ALL	BIT(9)
+#define IFI_CANFD_ERROR_CTR_BIT0_ERROR_ALL	BIT(10)
+#define IFI_CANFD_ERROR_CTR_BIT1_ERROR_ALL	BIT(11)
+#define IFI_CANFD_ERROR_CTR_STUFF_ERROR_ALL	BIT(12)
+#define IFI_CANFD_ERROR_CTR_CRC_ERROR_ALL	BIT(13)
+#define IFI_CANFD_ERROR_CTR_FORM_ERROR_ALL	BIT(14)
+#define IFI_CANFD_ERROR_CTR_BITPOSITION_OFFSET	16
+#define IFI_CANFD_ERROR_CTR_BITPOSITION_MASK	0xff
+#define IFI_CANFD_ERROR_CTR_ER_RESET		BIT(30)
+#define IFI_CANFD_ERROR_CTR_ER_ENABLE		((u32)BIT(31))
 
 #define IFI_CANFD_PAR				0x48
 
@@ -196,6 +217,8 @@
 	if (enable) {
 		enirq = IFI_CANFD_IRQMASK_TXFIFO_EMPTY |
 			IFI_CANFD_IRQMASK_RXFIFO_NEMPTY;
+		if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+			enirq |= IFI_CANFD_INTERRUPT_ERROR_COUNTER;
 	}
 
 	writel(IFI_CANFD_IRQMASK_SET_ERR |
@@ -334,6 +357,68 @@
 	return 1;
 }
 
+static int ifi_canfd_handle_lec_err(struct net_device *ndev, const u32 errctr)
+{
+	struct ifi_canfd_priv *priv = netdev_priv(ndev);
+	struct net_device_stats *stats = &ndev->stats;
+	struct can_frame *cf;
+	struct sk_buff *skb;
+	const u32 errmask = IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST |
+			    IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST |
+			    IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST |
+			    IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST |
+			    IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST |
+			    IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST |
+			    IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST;
+
+	if (!(errctr & errmask))	/* No error happened. */
+		return 0;
+
+	priv->can.can_stats.bus_error++;
+	stats->rx_errors++;
+
+	/* Propagate the error condition to the CAN stack. */
+	skb = alloc_can_err_skb(ndev, &cf);
+	if (unlikely(!skb))
+		return 0;
+
+	/* Read the error counter register and check for new errors. */
+	cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+	if (errctr & IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST)
+		cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+
+	if (errctr & IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST)
+		cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+
+	if (errctr & IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST)
+		cf->data[2] |= CAN_ERR_PROT_BIT0;
+
+	if (errctr & IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST)
+		cf->data[2] |= CAN_ERR_PROT_BIT1;
+
+	if (errctr & IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST)
+		cf->data[2] |= CAN_ERR_PROT_STUFF;
+
+	if (errctr & IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST)
+		cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+
+	if (errctr & IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST)
+		cf->data[2] |= CAN_ERR_PROT_FORM;
+
+	/* Reset the error counter, ack the IRQ and re-enable the counter. */
+	writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR);
+	writel(IFI_CANFD_INTERRUPT_ERROR_COUNTER,
+	       priv->base + IFI_CANFD_INTERRUPT);
+	writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR);
+
+	stats->rx_packets++;
+	stats->rx_bytes += cf->can_dlc;
+	netif_receive_skb(skb);
+
+	return 1;
+}
+
 static int ifi_canfd_get_berr_counter(const struct net_device *ndev,
 				      struct can_berr_counter *bec)
 {
@@ -469,6 +554,7 @@
 
 	u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
 	u32 rxstcmd = readl(priv->base + IFI_CANFD_STCMD);
+	u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
 
 	/* Handle bus state changes */
 	if ((stcmd & stcmd_state_mask) ||
@@ -479,6 +565,10 @@
 	if (rxstcmd & IFI_CANFD_RXSTCMD_OVERFLOW)
 		work_done += ifi_canfd_handle_lost_msg(ndev);
 
+	/* Handle lec errors on the bus */
+	if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+		work_done += ifi_canfd_handle_lec_err(ndev, errctr);
+
 	/* Handle normal messages on RX */
 	if (!(rxstcmd & IFI_CANFD_RXSTCMD_EMPTY))
 		work_done += ifi_canfd_do_rx_poll(ndev, quota - work_done);
@@ -497,11 +587,13 @@
 	struct ifi_canfd_priv *priv = netdev_priv(ndev);
 	struct net_device_stats *stats = &ndev->stats;
 	const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY |
-				IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER;
+				IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER |
+				IFI_CANFD_INTERRUPT_ERROR_WARNING |
+				IFI_CANFD_INTERRUPT_ERROR_COUNTER;
 	const u32 tx_irq_mask = IFI_CANFD_INTERRUPT_TXFIFO_EMPTY |
 				IFI_CANFD_INTERRUPT_TXFIFO_REMOVE;
-	const u32 clr_irq_mask = ~(IFI_CANFD_INTERRUPT_SET_IRQ |
-				   IFI_CANFD_INTERRUPT_ERROR_WARNING);
+	const u32 clr_irq_mask = ~((u32)(IFI_CANFD_INTERRUPT_SET_IRQ |
+					 IFI_CANFD_INTERRUPT_ERROR_WARNING));
 	u32 isr;
 
 	isr = readl(priv->base + IFI_CANFD_INTERRUPT);
@@ -513,44 +605,34 @@
 	/* Clear all pending interrupts but ErrWarn */
 	writel(clr_irq_mask, priv->base + IFI_CANFD_INTERRUPT);
 
-	/* RX IRQ, start NAPI */
+	/* RX IRQ or bus warning, start NAPI */
 	if (isr & rx_irq_mask) {
 		ifi_canfd_irq_enable(ndev, 0);
 		napi_schedule(&priv->napi);
 	}
 
 	/* TX IRQ */
-	if (isr & tx_irq_mask) {
+	if (isr & IFI_CANFD_INTERRUPT_TXFIFO_REMOVE) {
 		stats->tx_bytes += can_get_echo_skb(ndev, 0);
 		stats->tx_packets++;
 		can_led_event(ndev, CAN_LED_EVENT_TX);
-		netif_wake_queue(ndev);
 	}
 
+	if (isr & tx_irq_mask)
+		netif_wake_queue(ndev);
+
 	return IRQ_HANDLED;
 }
 
 static const struct can_bittiming_const ifi_canfd_bittiming_const = {
 	.name		= KBUILD_MODNAME,
 	.tseg1_min	= 1,	/* Time segment 1 = prop_seg + phase_seg1 */
-	.tseg1_max	= 64,
+	.tseg1_max	= 256,
 	.tseg2_min	= 2,	/* Time segment 2 = phase_seg2 */
-	.tseg2_max	= 64,
-	.sjw_max	= 16,
+	.tseg2_max	= 256,
+	.sjw_max	= 128,
 	.brp_min	= 2,
-	.brp_max	= 256,
-	.brp_inc	= 1,
-};
-
-static const struct can_bittiming_const ifi_canfd_data_bittiming_const = {
-	.name		= KBUILD_MODNAME,
-	.tseg1_min	= 1,	/* Time segment 1 = prop_seg + phase_seg1 */
-	.tseg1_max	= 64,
-	.tseg2_min	= 2,	/* Time segment 2 = phase_seg2 */
-	.tseg2_max	= 64,
-	.sjw_max	= 16,
-	.brp_min	= 2,
-	.brp_max	= 256,
+	.brp_max	= 512,
 	.brp_inc	= 1,
 };
 
@@ -560,19 +642,6 @@
 	const struct can_bittiming *bt = &priv->can.bittiming;
 	const struct can_bittiming *dbt = &priv->can.data_bittiming;
 	u16 brp, sjw, tseg1, tseg2;
-	u32 noniso_arg = 0;
-	u32 time_off;
-
-	if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) &&
-	    !(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)) {
-		time_off = IFI_CANFD_TIME_SJW_OFF_ISO;
-	} else {
-		noniso_arg = IFI_CANFD_TIME_SET_TIMEB_BOSCH |
-			     IFI_CANFD_TIME_SET_TIMEA_BOSCH |
-			     IFI_CANFD_TIME_SET_PRESC_BOSCH |
-			     IFI_CANFD_TIME_SET_SJW_BOSCH;
-		time_off = IFI_CANFD_TIME_SJW_OFF_BOSCH;
-	}
 
 	/* Configure bit timing */
 	brp = bt->brp - 2;
@@ -582,8 +651,7 @@
 	writel((tseg2 << IFI_CANFD_TIME_TIMEB_OFF) |
 	       (tseg1 << IFI_CANFD_TIME_TIMEA_OFF) |
 	       (brp << IFI_CANFD_TIME_PRESCALE_OFF) |
-	       (sjw << time_off) |
-	       noniso_arg,
+	       (sjw << IFI_CANFD_TIME_SJW_OFF_7_9_8_8),
 	       priv->base + IFI_CANFD_TIME);
 
 	/* Configure data bit timing */
@@ -594,8 +662,7 @@
 	writel((tseg2 << IFI_CANFD_TIME_TIMEB_OFF) |
 	       (tseg1 << IFI_CANFD_TIME_TIMEA_OFF) |
 	       (brp << IFI_CANFD_TIME_PRESCALE_OFF) |
-	       (sjw << time_off) |
-	       noniso_arg,
+	       (sjw << IFI_CANFD_TIME_SJW_OFF_7_9_8_8),
 	       priv->base + IFI_CANFD_FTIME);
 }
 
@@ -640,7 +707,8 @@
 
 	/* Reset the IP */
 	writel(IFI_CANFD_STCMD_HARDRESET, priv->base + IFI_CANFD_STCMD);
-	writel(0, priv->base + IFI_CANFD_STCMD);
+	writel(IFI_CANFD_STCMD_ENABLE_7_9_8_8_TIMING,
+	       priv->base + IFI_CANFD_STCMD);
 
 	ifi_canfd_set_bittiming(ndev);
 	ifi_canfd_set_filters(ndev);
@@ -659,7 +727,8 @@
 	writel((u32)(~IFI_CANFD_INTERRUPT_SET_IRQ),
 	       priv->base + IFI_CANFD_INTERRUPT);
 
-	stcmd = IFI_CANFD_STCMD_ENABLE | IFI_CANFD_STCMD_NORMAL_MODE;
+	stcmd = IFI_CANFD_STCMD_ENABLE | IFI_CANFD_STCMD_NORMAL_MODE |
+		IFI_CANFD_STCMD_ENABLE_7_9_8_8_TIMING;
 
 	if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
 		stcmd |= IFI_CANFD_STCMD_BUSMONITOR;
@@ -667,16 +736,23 @@
 	if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
 		stcmd |= IFI_CANFD_STCMD_LOOPBACK;
 
-	if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+	if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) &&
+	    !(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO))
 		stcmd |= IFI_CANFD_STCMD_ENABLE_ISO;
 
-	if (!(priv->can.ctrlmode & (CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO)))
+	if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
 		stcmd |= IFI_CANFD_STCMD_DISABLE_CANFD;
 
 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
 
 	ifi_canfd_irq_enable(ndev, 1);
 
+	/* Unlock, reset and enable the error counter. */
+	writel(IFI_CANFD_ERROR_CTR_UNLOCK_MAGIC,
+	       priv->base + IFI_CANFD_ERROR_CTR);
+	writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR);
+	writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR);
+
 	/* Enable controller */
 	writel(stcmd, priv->base + IFI_CANFD_STCMD);
 }
@@ -685,6 +761,10 @@
 {
 	struct ifi_canfd_priv *priv = netdev_priv(ndev);
 
+	/* Reset and disable the error counter. */
+	writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR);
+	writel(0, priv->base + IFI_CANFD_ERROR_CTR);
+
 	/* Reset the IP */
 	writel(IFI_CANFD_STCMD_HARDRESET, priv->base + IFI_CANFD_STCMD);
 
@@ -877,7 +957,7 @@
 	priv->can.clock.freq = readl(addr + IFI_CANFD_CANCLOCK);
 
 	priv->can.bittiming_const	= &ifi_canfd_bittiming_const;
-	priv->can.data_bittiming_const	= &ifi_canfd_data_bittiming_const;
+	priv->can.data_bittiming_const	= &ifi_canfd_bittiming_const;
 	priv->can.do_set_mode		= ifi_canfd_set_mode;
 	priv->can.do_get_berr_counter	= ifi_canfd_get_berr_counter;
 
@@ -888,7 +968,8 @@
 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
 				       CAN_CTRLMODE_LISTENONLY |
 				       CAN_CTRLMODE_FD |
-				       CAN_CTRLMODE_FD_NON_ISO;
+				       CAN_CTRLMODE_FD_NON_ISO |
+				       CAN_CTRLMODE_BERR_REPORTING;
 
 	platform_set_drvdata(pdev, ndev);
 	SET_NETDEV_DEV(ndev, dev);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 5d04f54..f13bb8d 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -84,6 +84,7 @@
 #define MSG_COFFREQ		0x42
 #define MSG_CONREQ		0x43
 #define MSG_CCONFREQ		0x47
+#define MSG_NMTS		0xb0
 #define MSG_LMTS		0xb4
 
 /*
@@ -130,6 +131,22 @@
 
 #define ICAN3_CAN_DLC_MASK	0x0f
 
+/* Janz ICAN3 NMTS subtypes */
+#define NMTS_CREATE_NODE_REQ	0x0
+#define NMTS_SLAVE_STATE_IND	0x8
+#define NMTS_SLAVE_EVENT_IND	0x9
+
+/* Janz ICAN3 LMTS subtypes */
+#define LMTS_BUSON_REQ		0x0
+#define LMTS_BUSOFF_REQ		0x1
+#define LMTS_CAN_CONF_REQ	0x2
+
+/* Janz ICAN3 NMTS Event indications */
+#define NE_LOCAL_OCCURRED	0x3
+#define NE_LOCAL_RESOLVED	0x2
+#define NE_REMOTE_OCCURRED	0xc
+#define NE_REMOTE_RESOLVED	0x8
+
 /*
  * SJA1000 Status and Error Register Definitions
  *
@@ -800,21 +817,41 @@
 		return ican3_send_msg(mod, &msg);
 
 	} else if (mod->fwtype == ICAN3_FWTYPE_CAL_CANOPEN) {
+		/* bittiming + can-on/off request */
 		memset(&msg, 0, sizeof(msg));
 		msg.spec = MSG_LMTS;
 		if (on) {
 			msg.len = cpu_to_le16(4);
-			msg.data[0] = 0;
+			msg.data[0] = LMTS_BUSON_REQ;
 			msg.data[1] = 0;
 			msg.data[2] = btr0;
 			msg.data[3] = btr1;
 		} else {
 			msg.len = cpu_to_le16(2);
-			msg.data[0] = 1;
+			msg.data[0] = LMTS_BUSOFF_REQ;
 			msg.data[1] = 0;
 		}
+		res = ican3_send_msg(mod, &msg);
+		if (res)
+			return res;
 
-		return ican3_send_msg(mod, &msg);
+		if (on) {
+			/* create NMT Slave Node for error processing
+			 *   class 2 (with error capability, see CiA/DS203-1)
+			 *   id    1
+			 *   name  locnod1 (must be exactly 7 bytes)
+			 */
+			memset(&msg, 0, sizeof(msg));
+			msg.spec = MSG_NMTS;
+			msg.len = cpu_to_le16(11);
+			msg.data[0] = NMTS_CREATE_NODE_REQ;
+			msg.data[1] = 0;
+			msg.data[2] = 2;                 /* node class */
+			msg.data[3] = 1;                 /* node id */
+			strcpy(msg.data + 4, "locnod1"); /* node name  */
+			return ican3_send_msg(mod, &msg);
+		}
+		return 0;
 	}
 	return -ENOTSUPP;
 }
@@ -849,12 +886,23 @@
 {
 	struct ican3_msg msg;
 
-	memset(&msg, 0, sizeof(msg));
-	msg.spec = MSG_CCONFREQ;
-	msg.len = cpu_to_le16(2);
-	msg.data[0] = 0x00;
-	msg.data[1] = quota;
-
+	if (mod->fwtype == ICAN3_FWTYPE_ICANOS) {
+		memset(&msg, 0, sizeof(msg));
+		msg.spec = MSG_CCONFREQ;
+		msg.len = cpu_to_le16(2);
+		msg.data[0] = 0x00;
+		msg.data[1] = quota;
+	} else if (mod->fwtype == ICAN3_FWTYPE_CAL_CANOPEN) {
+		memset(&msg, 0, sizeof(msg));
+		msg.spec = MSG_LMTS;
+		msg.len = cpu_to_le16(4);
+		msg.data[0] = LMTS_CAN_CONF_REQ;
+		msg.data[1] = 0x00;
+		msg.data[2] = 0x00;
+		msg.data[3] = quota;
+	} else {
+		return -ENOTSUPP;
+	}
 	return ican3_send_msg(mod, &msg);
 }
 
@@ -1150,6 +1198,41 @@
 	}
 }
 
+/* Handle NMTS Slave Event Indication Messages from the firmware */
+static void ican3_handle_nmtsind(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+	u16 subspec;
+
+	subspec = msg->data[0] + msg->data[1] * 0x100;
+	if (subspec == NMTS_SLAVE_EVENT_IND) {
+		switch (msg->data[2]) {
+		case NE_LOCAL_OCCURRED:
+		case NE_LOCAL_RESOLVED:
+			/* now follows the same message as Raw ICANOS CEVTIND
+			 * shift the data at the same place and call this method
+			 */
+			le16_add_cpu(&msg->len, -3);
+			memmove(msg->data, msg->data + 3, le16_to_cpu(msg->len));
+			ican3_handle_cevtind(mod, msg);
+			break;
+		case NE_REMOTE_OCCURRED:
+		case NE_REMOTE_RESOLVED:
+			/* should not occurre, ignore */
+			break;
+		default:
+			netdev_warn(mod->ndev, "unknown NMTS event indication %x\n",
+				    msg->data[2]);
+			break;
+		}
+	} else if (subspec == NMTS_SLAVE_STATE_IND) {
+		/* ignore state indications */
+	} else {
+		netdev_warn(mod->ndev, "unhandled NMTS indication %x\n",
+			    subspec);
+		return;
+	}
+}
+
 static void ican3_handle_unknown_message(struct ican3_dev *mod,
 					struct ican3_msg *msg)
 {
@@ -1179,6 +1262,9 @@
 	case MSG_INQUIRY:
 		ican3_handle_inquiry(mod, msg);
 		break;
+	case MSG_NMTS:
+		ican3_handle_nmtsind(mod, msg);
+		break;
 	default:
 		ican3_handle_unknown_message(mod, msg);
 		break;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 39cf911..195f15e 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -955,7 +955,7 @@
 	priv->can.do_get_berr_counter = m_can_get_berr_counter;
 
 	/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.1 */
-	priv->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
+	can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
 
 	/* CAN_CTRLMODE_FD_NON_ISO can not be changed with M_CAN IP v3.0.1 */
 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index e36b740..acb708f 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -276,7 +276,7 @@
 	out_8(&regs->cantflg, 1 << buf_id);
 
 	if (!test_bit(F_TX_PROGRESS, &priv->flags))
-		dev->trans_start = jiffies;
+		netif_trans_update(dev);
 
 	list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head);
 
@@ -469,7 +469,7 @@
 			clear_bit(F_TX_PROGRESS, &priv->flags);
 			priv->cur_pri = 0;
 		} else {
-			dev->trans_start = jiffies;
+			netif_trans_update(dev);
 		}
 
 		if (!test_bit(F_TX_WAIT_ALL, &priv->flags))
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 8836a74..3eb7430 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -39,6 +39,7 @@
 MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
 			"Adlink PCI-7841/cPCI-7841 SE, "
 			"Marathon CAN-bus-PCI, "
+			"Marathon CAN-bus-PCIe, "
 			"TEWS TECHNOLOGIES TPMC810, "
 			"esd CAN-PCI/CPCI/PCI104/200, "
 			"esd CAN-PCI/PMC/266, "
@@ -133,6 +134,7 @@
 #define IXXAT_PCI_SUB_SYS_ID		0x2540
 
 #define MARATHON_PCI_DEVICE_ID		0x2715
+#define MARATHON_PCIE_DEVICE_ID		0x3432
 
 #define TEWS_PCI_VENDOR_ID		0x1498
 #define TEWS_PCI_DEVICE_ID_TMPC810	0x032A
@@ -141,8 +143,9 @@
 #define CTI_PCI_DEVICE_ID_CRG001	0x0900
 
 static void plx_pci_reset_common(struct pci_dev *pdev);
-static void plx_pci_reset_marathon(struct pci_dev *pdev);
 static void plx9056_pci_reset_common(struct pci_dev *pdev);
+static void plx_pci_reset_marathon_pci(struct pci_dev *pdev);
+static void plx_pci_reset_marathon_pcie(struct pci_dev *pdev);
 
 struct plx_pci_channel_map {
 	u32 bar;
@@ -215,14 +218,22 @@
 	/* based on PLX9050 */
 };
 
-static struct plx_pci_card_info plx_pci_card_info_marathon = {
+static struct plx_pci_card_info plx_pci_card_info_marathon_pci = {
 	"Marathon CAN-bus-PCI", 2,
 	PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
 	{0, 0x00, 0x00}, { {2, 0x00, 0x00}, {4, 0x00, 0x00} },
-	&plx_pci_reset_marathon
+	&plx_pci_reset_marathon_pci
 	/* based on PLX9052 */
 };
 
+static struct plx_pci_card_info plx_pci_card_info_marathon_pcie = {
+	"Marathon CAN-bus-PCIe", 2,
+	PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+	{0, 0x00, 0x00}, { {2, 0x00, 0x00}, {3, 0x80, 0x00} },
+	&plx_pci_reset_marathon_pcie
+	/* based on PEX8311 */
+};
+
 static struct plx_pci_card_info plx_pci_card_info_tews = {
 	"TEWS TECHNOLOGIES TPMC810", 2,
 	PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
@@ -316,7 +327,14 @@
 		PCI_VENDOR_ID_PLX, MARATHON_PCI_DEVICE_ID,
 		PCI_ANY_ID, PCI_ANY_ID,
 		0, 0,
-		(kernel_ulong_t)&plx_pci_card_info_marathon
+		(kernel_ulong_t)&plx_pci_card_info_marathon_pci
+	},
+	{
+		/* Marathon CAN-bus-PCIe card */
+		PCI_VENDOR_ID_PLX, MARATHON_PCIE_DEVICE_ID,
+		PCI_ANY_ID, PCI_ANY_ID,
+		0, 0,
+		(kernel_ulong_t)&plx_pci_card_info_marathon_pcie
 	},
 	{
 		/* TEWS TECHNOLOGIES TPMC810 card */
@@ -437,8 +455,8 @@
 	iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
 };
 
-/* Special reset function for Marathon card */
-static void plx_pci_reset_marathon(struct pci_dev *pdev)
+/* Special reset function for Marathon CAN-bus-PCI card */
+static void plx_pci_reset_marathon_pci(struct pci_dev *pdev)
 {
 	void __iomem *reset_addr;
 	int i;
@@ -460,6 +478,34 @@
 	}
 }
 
+/* Special reset function for Marathon CAN-bus-PCIe card */
+static void plx_pci_reset_marathon_pcie(struct pci_dev *pdev)
+{
+	void __iomem *addr;
+	void __iomem *reset_addr;
+	int i;
+
+	plx9056_pci_reset_common(pdev);
+
+	for (i = 0; i < 2; i++) {
+		struct plx_pci_channel_map *chan_map =
+			&plx_pci_card_info_marathon_pcie.chan_map_tbl[i];
+		addr = pci_iomap(pdev, chan_map->bar, chan_map->size);
+		if (!addr) {
+			dev_err(&pdev->dev, "Failed to remap reset "
+				"space %d (BAR%d)\n", i, chan_map->bar);
+		} else {
+			/* reset the SJA1000 chip */
+			#define MARATHON_PCIE_RESET_OFFSET 32
+			reset_addr = addr + chan_map->offset +
+			             MARATHON_PCIE_RESET_OFFSET;
+			iowrite8(0x1, reset_addr);
+			udelay(100);
+			pci_iounmap(pdev, addr);
+		}
+	}
+}
+
 static void plx_pci_del_card(struct pci_dev *pdev)
 {
 	struct plx_pci_card *card = pci_get_drvdata(pdev);
@@ -486,7 +532,8 @@
 	 * Disable interrupts from PCI-card and disable local
 	 * interrupts
 	 */
-	if (pdev->device != PCI_DEVICE_ID_PLX_9056)
+	if (pdev->device != PCI_DEVICE_ID_PLX_9056 &&
+	    pdev->device != MARATHON_PCIE_DEVICE_ID)
 		iowrite32(0x0, card->conf_addr + PLX_INTCSR);
 	else
 		iowrite32(0x0, card->conf_addr + PLX9056_INTCSR);
@@ -619,7 +666,8 @@
 	 * Enable interrupts from PCI-card (PLX90xx) and enable Local_1,
 	 * Local_2 interrupts from the SJA1000 chips
 	 */
-	if (pdev->device != PCI_DEVICE_ID_PLX_9056) {
+	if (pdev->device != PCI_DEVICE_ID_PLX_9056 &&
+	    pdev->device != MARATHON_PCIE_DEVICE_ID) {
 		val = ioread32(card->conf_addr + PLX_INTCSR);
 		if (pdev->subsystem_vendor == PCI_VENDOR_ID_ESDGMBH)
 			val |= PLX_LINT1_EN | PLX_PCI_INT_EN;
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 8dda3b7..9f10779 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -438,6 +438,7 @@
 
 		cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
 
+		/* set error type */
 		switch (ecc & ECC_MASK) {
 		case ECC_BIT:
 			cf->data[2] |= CAN_ERR_PROT_BIT;
@@ -449,9 +450,12 @@
 			cf->data[2] |= CAN_ERR_PROT_STUFF;
 			break;
 		default:
-			cf->data[3] = ecc & ECC_SEG;
 			break;
 		}
+
+		/* set error location */
+		cf->data[3] = ecc & ECC_SEG;
+
 		/* Error occurred during transmission? */
 		if ((ecc & ECC_DIR) == 0)
 			cf->data[2] |= CAN_ERR_PROT_TX;
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 74a7dfe..cf36d26 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -961,7 +961,8 @@
 		goto open_unlock;
 	}
 
-	priv->wq = create_freezable_workqueue("mcp251x_wq");
+	priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
+				   0);
 	INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
 	INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
 
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 3400fd1..71f0e79 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -521,7 +521,7 @@
 	if (urb->status)
 		netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
 
-	netdev->trans_start = jiffies;
+	netif_trans_update(netdev);
 
 	/* transmission complete interrupt */
 	netdev->stats.tx_packets++;
@@ -835,7 +835,7 @@
 			stats->tx_dropped++;
 		}
 	} else {
-		netdev->trans_start = jiffies;
+		netif_trans_update(netdev);
 
 		/* Slow down tx path */
 		if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 113e64f..784a900 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -480,7 +480,7 @@
 	if (urb->status)
 		netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
 
-	netdev->trans_start = jiffies;
+	netif_trans_update(netdev);
 }
 
 static ssize_t show_firmware(struct device *d,
@@ -820,7 +820,7 @@
 		goto releasebuf;
 	}
 
-	netdev->trans_start = jiffies;
+	netif_trans_update(netdev);
 
 	/*
 	 * Release our reference to this URB, the USB core will eventually free
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index cbc99d5..1556d42 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -950,7 +950,8 @@
 }
 
 static const struct usb_device_id gs_usb_table[] = {
-	{USB_DEVICE(USB_GSUSB_1_VENDOR_ID, USB_GSUSB_1_PRODUCT_ID)},
+	{ USB_DEVICE_INTERFACE_NUMBER(USB_GSUSB_1_VENDOR_ID,
+				      USB_GSUSB_1_PRODUCT_ID, 0) },
 	{} /* Terminating entry */
 };
 
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 5a2e341..bfb91d8 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -274,7 +274,7 @@
 		netdev->stats.tx_bytes += context->data_len;
 
 		/* prevent tx timeout */
-		netdev->trans_start = jiffies;
+		netif_trans_update(netdev);
 		break;
 
 	default:
@@ -373,7 +373,7 @@
 			stats->tx_dropped++;
 		}
 	} else {
-		netdev->trans_start = jiffies;
+		netif_trans_update(netdev);
 
 		/* slow down tx path */
 		if (atomic_read(&dev->active_tx_urbs) >= PCAN_USB_MAX_TX_URBS)
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 64c016a..221f5f0 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1106,7 +1106,7 @@
 
 	myNextTxDesc->skb = skb;
 
-	dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
+	netif_trans_update(dev); /* NETIF_F_LLTX driver :( */
 
 	e100_hardware_send_packet(np, buf, skb->len);
 
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 90ba003..200663c 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -1,10 +1,6 @@
 menu "Distributed Switch Architecture drivers"
 	depends on HAVE_NET_DSA
 
-config NET_DSA_MV88E6XXX
-	tristate
-	default n
-
 config NET_DSA_MV88E6060
 	tristate "Marvell 88E6060 ethernet switch chip support"
 	depends on NET_DSA
@@ -13,46 +9,13 @@
 	  This enables support for the Marvell 88E6060 ethernet switch
 	  chip.
 
-config NET_DSA_MV88E6XXX_NEED_PPU
-	bool
-	default n
-
-config NET_DSA_MV88E6131
-	tristate "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support"
+config NET_DSA_MV88E6XXX
+	tristate "Marvell 88E6xxx Ethernet switch chip support"
 	depends on NET_DSA
-	select NET_DSA_MV88E6XXX
-	select NET_DSA_MV88E6XXX_NEED_PPU
-	select NET_DSA_TAG_DSA
-	---help---
-	  This enables support for the Marvell 88E6085/6095/6095F/6131
-	  ethernet switch chips.
-
-config NET_DSA_MV88E6123
-	tristate "Marvell 88E6123/6161/6165 ethernet switch chip support"
-	depends on NET_DSA
-	select NET_DSA_MV88E6XXX
 	select NET_DSA_TAG_EDSA
 	---help---
-	  This enables support for the Marvell 88E6123/6161/6165
-	  ethernet switch chips.
-
-config NET_DSA_MV88E6171
-	tristate "Marvell 88E6171/6175/6350/6351 ethernet switch chip support"
-	depends on NET_DSA
-	select NET_DSA_MV88E6XXX
-	select NET_DSA_TAG_EDSA
-	---help---
-	  This enables support for the Marvell 88E6171/6175/6350/6351
-	  ethernet switches chips.
-
-config NET_DSA_MV88E6352
-	tristate "Marvell 88E6172/6176/6320/6321/6352 ethernet switch chip support"
-	depends on NET_DSA
-	select NET_DSA_MV88E6XXX
-	select NET_DSA_TAG_EDSA
-	---help---
-	  This enables support for the Marvell 88E6172, 88E6176, 88E6320,
-	  88E6321 and 88E6352 ethernet switch chips.
+	  This enables support for most of the Marvell 88E6xxx models of
+	  Ethernet switch chips, except 88E6060.
 
 config NET_DSA_BCM_SF2
 	tristate "Broadcom Starfighter 2 Ethernet switch support"
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index a6e0993..76b751d 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -1,16 +1,3 @@
 obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
-obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx_drv.o
-mv88e6xxx_drv-y += mv88e6xxx.o
-ifdef CONFIG_NET_DSA_MV88E6123
-mv88e6xxx_drv-y += mv88e6123.o
-endif
-ifdef CONFIG_NET_DSA_MV88E6131
-mv88e6xxx_drv-y += mv88e6131.o
-endif
-ifdef CONFIG_NET_DSA_MV88E6352
-mv88e6xxx_drv-y += mv88e6352.o
-endif
-ifdef CONFIG_NET_DSA_MV88E6171
-mv88e6xxx_drv-y += mv88e6171.o
-endif
+obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o
 obj-$(CONFIG_NET_DSA_BCM_SF2)	+= bcm_sf2.o
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 95944d5..10ddd5a 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -135,8 +135,17 @@
 	return BCM_SF2_STATS_SIZE;
 }
 
-static char *bcm_sf2_sw_probe(struct device *host_dev, int sw_addr)
+static const char *bcm_sf2_sw_drv_probe(struct device *dsa_dev,
+					struct device *host_dev, int sw_addr,
+					void **_priv)
 {
+	struct bcm_sf2_priv *priv;
+
+	priv = devm_kzalloc(dsa_dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return NULL;
+	*_priv = priv;
+
 	return "Broadcom Starfighter 2";
 }
 
@@ -151,7 +160,7 @@
 	 * the same VLAN.
 	 */
 	for (i = 0; i < priv->hw_params.num_ports; i++) {
-		if (!((1 << i) & ds->phys_port_mask))
+		if (!((1 << i) & ds->enabled_port_mask))
 			continue;
 
 		reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
@@ -545,12 +554,11 @@
 	priv->port_sts[port].bridge_dev = NULL;
 }
 
-static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
-				       u8 state)
+static void bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
+					u8 state)
 {
 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
 	u8 hw_state, cur_hw_state;
-	int ret = 0;
 	u32 reg;
 
 	reg = core_readl(priv, CORE_G_PCTL_PORT(port));
@@ -574,7 +582,7 @@
 		break;
 	default:
 		pr_err("%s: invalid STP state: %d\n", __func__, state);
-		return -EINVAL;
+		return;
 	}
 
 	/* Fast-age ARL entries if we are moving a port from Learning or
@@ -584,10 +592,9 @@
 	if (cur_hw_state != hw_state) {
 		if (cur_hw_state >= G_MISTP_LEARN_STATE &&
 		    hw_state <= G_MISTP_LISTEN_STATE) {
-			ret = bcm_sf2_sw_fast_age_port(ds, port);
-			if (ret) {
+			if (bcm_sf2_sw_fast_age_port(ds, port)) {
 				pr_err("%s: fast-ageing failed\n", __func__);
-				return ret;
+				return;
 			}
 		}
 	}
@@ -596,8 +603,6 @@
 	reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
 	reg |= hw_state;
 	core_writel(priv, reg, CORE_G_PCTL_PORT(port));
-
-	return 0;
 }
 
 /* Address Resolution Logic routines */
@@ -728,13 +733,14 @@
 	return 0;
 }
 
-static int bcm_sf2_sw_fdb_add(struct dsa_switch *ds, int port,
-			      const struct switchdev_obj_port_fdb *fdb,
-			      struct switchdev_trans *trans)
+static void bcm_sf2_sw_fdb_add(struct dsa_switch *ds, int port,
+			       const struct switchdev_obj_port_fdb *fdb,
+			       struct switchdev_trans *trans)
 {
 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
 
-	return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, true);
+	if (bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, true))
+		pr_err("%s: failed to add MAC address\n", __func__);
 }
 
 static int bcm_sf2_sw_fdb_del(struct dsa_switch *ds, int port,
@@ -943,8 +949,8 @@
 	/* All the interesting properties are at the parent device_node
 	 * level
 	 */
-	dn = ds->pd->of_node->parent;
-	bcm_sf2_identify_ports(priv, ds->pd->of_node);
+	dn = ds->cd->of_node->parent;
+	bcm_sf2_identify_ports(priv, ds->cd->of_node);
 
 	priv->irq0 = irq_of_parse_and_map(dn, 0);
 	priv->irq1 = irq_of_parse_and_map(dn, 1);
@@ -1003,7 +1009,7 @@
 	/* Enable all valid ports and disable those unused */
 	for (port = 0; port < priv->hw_params.num_ports; port++) {
 		/* IMP port receives special treatment */
-		if ((1 << port) & ds->phys_port_mask)
+		if ((1 << port) & ds->enabled_port_mask)
 			bcm_sf2_port_setup(ds, port, NULL);
 		else if (dsa_is_cpu_port(ds, port))
 			bcm_sf2_imp_setup(ds, port);
@@ -1016,11 +1022,12 @@
 	 * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such
 	 * that we can use the regular SWITCH_MDIO master controller instead.
 	 *
-	 * By default, DSA initializes ds->phys_mii_mask to ds->phys_port_mask
-	 * to have a 1:1 mapping between Port address and PHY address in order
-	 * to utilize the slave_mii_bus instance to read from Port PHYs. This is
-	 * not what we want here, so we initialize phys_mii_mask 0 to always
-	 * utilize the "master" MDIO bus backed by the "mdio-unimac" driver.
+	 * By default, DSA initializes ds->phys_mii_mask to
+	 * ds->enabled_port_mask to have a 1:1 mapping between Port address
+	 * and PHY address in order to utilize the slave_mii_bus instance to
+	 * read from Port PHYs. This is not what we want here, so we
+	 * initialize phys_mii_mask 0 to always utilize the "master" MDIO
+	 * bus backed by the "mdio-unimac" driver.
 	 */
 	if (of_machine_is_compatible("brcm,bcm7445d0"))
 		ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
@@ -1278,7 +1285,7 @@
 	 * bcm_sf2_sw_setup
 	 */
 	for (port = 0; port < DSA_MAX_PORTS; port++) {
-		if ((1 << port) & ds->phys_port_mask ||
+		if ((1 << port) & ds->enabled_port_mask ||
 		    dsa_is_cpu_port(ds, port))
 			bcm_sf2_port_disable(ds, port, NULL);
 	}
@@ -1302,7 +1309,7 @@
 		bcm_sf2_gphy_enable_set(ds, true);
 
 	for (port = 0; port < DSA_MAX_PORTS; port++) {
-		if ((1 << port) & ds->phys_port_mask)
+		if ((1 << port) & ds->enabled_port_mask)
 			bcm_sf2_port_setup(ds, port, NULL);
 		else if (dsa_is_cpu_port(ds, port))
 			bcm_sf2_imp_setup(ds, port);
@@ -1365,8 +1372,7 @@
 
 static struct dsa_switch_driver bcm_sf2_switch_driver = {
 	.tag_protocol		= DSA_TAG_PROTO_BRCM,
-	.priv_size		= sizeof(struct bcm_sf2_priv),
-	.probe			= bcm_sf2_sw_probe,
+	.probe			= bcm_sf2_sw_drv_probe,
 	.setup			= bcm_sf2_sw_setup,
 	.set_addr		= bcm_sf2_sw_set_addr,
 	.get_phy_flags		= bcm_sf2_sw_get_phy_flags,
@@ -1387,7 +1393,7 @@
 	.set_eee		= bcm_sf2_sw_set_eee,
 	.port_bridge_join	= bcm_sf2_sw_br_join,
 	.port_bridge_leave	= bcm_sf2_sw_br_leave,
-	.port_stp_update	= bcm_sf2_sw_br_set_stp_state,
+	.port_stp_state_set	= bcm_sf2_sw_br_set_stp_state,
 	.port_fdb_prepare	= bcm_sf2_sw_fdb_prepare,
 	.port_fdb_add		= bcm_sf2_sw_fdb_add,
 	.port_fdb_del		= bcm_sf2_sw_fdb_del,
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 0527f48..e36b408 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -19,12 +19,9 @@
 
 static int reg_read(struct dsa_switch *ds, int addr, int reg)
 {
-	struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
+	struct mv88e6060_priv *priv = ds_to_priv(ds);
 
-	if (bus == NULL)
-		return -EINVAL;
-
-	return mdiobus_read_nested(bus, ds->pd->sw_addr + addr, reg);
+	return mdiobus_read_nested(priv->bus, priv->sw_addr + addr, reg);
 }
 
 #define REG_READ(addr, reg)					\
@@ -40,12 +37,9 @@
 
 static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
 {
-	struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
+	struct mv88e6060_priv *priv = ds_to_priv(ds);
 
-	if (bus == NULL)
-		return -EINVAL;
-
-	return mdiobus_write_nested(bus, ds->pd->sw_addr + addr, reg, val);
+	return mdiobus_write_nested(priv->bus, priv->sw_addr + addr, reg, val);
 }
 
 #define REG_WRITE(addr, reg, val)				\
@@ -57,14 +51,10 @@
 			return __ret;				\
 	})
 
-static char *mv88e6060_probe(struct device *host_dev, int sw_addr)
+static const char *mv88e6060_get_name(struct mii_bus *bus, int sw_addr)
 {
-	struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
 	int ret;
 
-	if (bus == NULL)
-		return NULL;
-
 	ret = mdiobus_read(bus, sw_addr + REG_PORT(0), PORT_SWITCH_ID);
 	if (ret >= 0) {
 		if (ret == PORT_SWITCH_ID_6060)
@@ -79,6 +69,27 @@
 	return NULL;
 }
 
+static const char *mv88e6060_drv_probe(struct device *dsa_dev,
+				       struct device *host_dev, int sw_addr,
+				       void **_priv)
+{
+	struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
+	struct mv88e6060_priv *priv;
+	const char *name;
+
+	name = mv88e6060_get_name(bus, sw_addr);
+	if (name) {
+		priv = devm_kzalloc(dsa_dev, sizeof(*priv), GFP_KERNEL);
+		if (!priv)
+			return NULL;
+		*_priv = priv;
+		priv->bus = bus;
+		priv->sw_addr = sw_addr;
+	}
+
+	return name;
+}
+
 static int mv88e6060_switch_reset(struct dsa_switch *ds)
 {
 	int i;
@@ -159,7 +170,7 @@
 	REG_WRITE(addr, PORT_VLAN_MAP,
 		  ((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) |
 		   (dsa_is_cpu_port(ds, p) ?
-			ds->phys_port_mask :
+			ds->enabled_port_mask :
 			BIT(ds->dst->cpu_port)));
 
 	/* Port Association Vector: when learning source addresses
@@ -174,8 +185,8 @@
 
 static int mv88e6060_setup(struct dsa_switch *ds)
 {
-	int i;
 	int ret;
+	int i;
 
 	ret = mv88e6060_switch_reset(ds);
 	if (ret < 0)
@@ -238,7 +249,7 @@
 
 static struct dsa_switch_driver mv88e6060_switch_driver = {
 	.tag_protocol	= DSA_TAG_PROTO_TRAILER,
-	.probe		= mv88e6060_probe,
+	.probe		= mv88e6060_drv_probe,
 	.setup		= mv88e6060_setup,
 	.set_addr	= mv88e6060_set_addr,
 	.phy_read	= mv88e6060_phy_read,
diff --git a/drivers/net/dsa/mv88e6060.h b/drivers/net/dsa/mv88e6060.h
index cc9b2ed..10249bd 100644
--- a/drivers/net/dsa/mv88e6060.h
+++ b/drivers/net/dsa/mv88e6060.h
@@ -108,4 +108,15 @@
 #define GLOBAL_ATU_MAC_23	0x0e
 #define GLOBAL_ATU_MAC_45	0x0f
 
+struct mv88e6060_priv {
+	/* MDIO bus and address on bus to use. When in single chip
+	 * mode, address is 0, and the switch uses multiple addresses
+	 * on the bus.  When in multi-chip mode, the switch uses a
+	 * single address which contains two registers used for
+	 * indirect access to more registers.
+	 */
+	struct mii_bus *bus;
+	int sw_addr;
+};
+
 #endif
diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c
deleted file mode 100644
index 69a6f79..0000000
--- a/drivers/net/dsa/mv88e6123.c
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * net/dsa/mv88e6123_61_65.c - Marvell 88e6123/6161/6165 switch chip support
- * Copyright (c) 2008-2009 Marvell Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/phy.h>
-#include <net/dsa.h>
-#include "mv88e6xxx.h"
-
-static const struct mv88e6xxx_switch_id mv88e6123_table[] = {
-	{ PORT_SWITCH_ID_6123, "Marvell 88E6123" },
-	{ PORT_SWITCH_ID_6123_A1, "Marvell 88E6123 (A1)" },
-	{ PORT_SWITCH_ID_6123_A2, "Marvell 88E6123 (A2)" },
-	{ PORT_SWITCH_ID_6161, "Marvell 88E6161" },
-	{ PORT_SWITCH_ID_6161_A1, "Marvell 88E6161 (A1)" },
-	{ PORT_SWITCH_ID_6161_A2, "Marvell 88E6161 (A2)" },
-	{ PORT_SWITCH_ID_6165, "Marvell 88E6165" },
-	{ PORT_SWITCH_ID_6165_A1, "Marvell 88E6165 (A1)" },
-	{ PORT_SWITCH_ID_6165_A2, "Marvell 88e6165 (A2)" },
-};
-
-static char *mv88e6123_probe(struct device *host_dev, int sw_addr)
-{
-	return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6123_table,
-				     ARRAY_SIZE(mv88e6123_table));
-}
-
-static int mv88e6123_setup_global(struct dsa_switch *ds)
-{
-	u32 upstream_port = dsa_upstream_port(ds);
-	int ret;
-	u32 reg;
-
-	ret = mv88e6xxx_setup_global(ds);
-	if (ret)
-		return ret;
-
-	/* Disable the PHY polling unit (since there won't be any
-	 * external PHYs to poll), don't discard packets with
-	 * excessive collisions, and mask all interrupt sources.
-	 */
-	REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, 0x0000);
-
-	/* Configure the upstream port, and configure the upstream
-	 * port as the port to which ingress and egress monitor frames
-	 * are to be sent.
-	 */
-	reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
-		upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
-		upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
-	REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
-
-	/* Disable remote management for now, and set the switch's
-	 * DSA device number.
-	 */
-	REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2, ds->index & 0x1f);
-
-	return 0;
-}
-
-static int mv88e6123_setup(struct dsa_switch *ds)
-{
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int ret;
-
-	ret = mv88e6xxx_setup_common(ds);
-	if (ret < 0)
-		return ret;
-
-	switch (ps->id) {
-	case PORT_SWITCH_ID_6123:
-		ps->num_ports = 3;
-		break;
-	case PORT_SWITCH_ID_6161:
-	case PORT_SWITCH_ID_6165:
-		ps->num_ports = 6;
-		break;
-	default:
-		return -ENODEV;
-	}
-
-	ret = mv88e6xxx_switch_reset(ds, false);
-	if (ret < 0)
-		return ret;
-
-	ret = mv88e6123_setup_global(ds);
-	if (ret < 0)
-		return ret;
-
-	return mv88e6xxx_setup_ports(ds);
-}
-
-struct dsa_switch_driver mv88e6123_switch_driver = {
-	.tag_protocol		= DSA_TAG_PROTO_EDSA,
-	.priv_size		= sizeof(struct mv88e6xxx_priv_state),
-	.probe			= mv88e6123_probe,
-	.setup			= mv88e6123_setup,
-	.set_addr		= mv88e6xxx_set_addr_indirect,
-	.phy_read		= mv88e6xxx_phy_read,
-	.phy_write		= mv88e6xxx_phy_write,
-	.get_strings		= mv88e6xxx_get_strings,
-	.get_ethtool_stats	= mv88e6xxx_get_ethtool_stats,
-	.get_sset_count		= mv88e6xxx_get_sset_count,
-	.adjust_link		= mv88e6xxx_adjust_link,
-#ifdef CONFIG_NET_DSA_HWMON
-	.get_temp		= mv88e6xxx_get_temp,
-#endif
-	.get_regs_len		= mv88e6xxx_get_regs_len,
-	.get_regs		= mv88e6xxx_get_regs,
-};
-
-MODULE_ALIAS("platform:mv88e6123");
-MODULE_ALIAS("platform:mv88e6161");
-MODULE_ALIAS("platform:mv88e6165");
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
deleted file mode 100644
index a92ca65..0000000
--- a/drivers/net/dsa/mv88e6131.c
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * net/dsa/mv88e6131.c - Marvell 88e6095/6095f/6131 switch chip support
- * Copyright (c) 2008-2009 Marvell Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/phy.h>
-#include <net/dsa.h>
-#include "mv88e6xxx.h"
-
-static const struct mv88e6xxx_switch_id mv88e6131_table[] = {
-	{ PORT_SWITCH_ID_6085, "Marvell 88E6085" },
-	{ PORT_SWITCH_ID_6095, "Marvell 88E6095/88E6095F" },
-	{ PORT_SWITCH_ID_6131, "Marvell 88E6131" },
-	{ PORT_SWITCH_ID_6131_B2, "Marvell 88E6131 (B2)" },
-	{ PORT_SWITCH_ID_6185, "Marvell 88E6185" },
-};
-
-static char *mv88e6131_probe(struct device *host_dev, int sw_addr)
-{
-	return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6131_table,
-				     ARRAY_SIZE(mv88e6131_table));
-}
-
-static int mv88e6131_setup_global(struct dsa_switch *ds)
-{
-	u32 upstream_port = dsa_upstream_port(ds);
-	int ret;
-	u32 reg;
-
-	ret = mv88e6xxx_setup_global(ds);
-	if (ret)
-		return ret;
-
-	/* Enable the PHY polling unit, don't discard packets with
-	 * excessive collisions, use a weighted fair queueing scheme
-	 * to arbitrate between packet queues, set the maximum frame
-	 * size to 1632, and mask all interrupt sources.
-	 */
-	REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
-		  GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_MAX_FRAME_1632);
-
-	/* Set the VLAN ethertype to 0x8100. */
-	REG_WRITE(REG_GLOBAL, GLOBAL_CORE_TAG_TYPE, 0x8100);
-
-	/* Disable ARP mirroring, and configure the upstream port as
-	 * the port to which ingress and egress monitor frames are to
-	 * be sent.
-	 */
-	reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
-		upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
-		GLOBAL_MONITOR_CONTROL_ARP_DISABLED;
-	REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
-
-	/* Disable cascade port functionality unless this device
-	 * is used in a cascade configuration, and set the switch's
-	 * DSA device number.
-	 */
-	if (ds->dst->pd->nr_chips > 1)
-		REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2,
-			  GLOBAL_CONTROL_2_MULTIPLE_CASCADE |
-			  (ds->index & 0x1f));
-	else
-		REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2,
-			  GLOBAL_CONTROL_2_NO_CASCADE |
-			  (ds->index & 0x1f));
-
-	/* Force the priority of IGMP/MLD snoop frames and ARP frames
-	 * to the highest setting.
-	 */
-	REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
-		  GLOBAL2_PRIO_OVERRIDE_FORCE_SNOOP |
-		  7 << GLOBAL2_PRIO_OVERRIDE_SNOOP_SHIFT |
-		  GLOBAL2_PRIO_OVERRIDE_FORCE_ARP |
-		  7 << GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT);
-
-	return 0;
-}
-
-static int mv88e6131_setup(struct dsa_switch *ds)
-{
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int ret;
-
-	ret = mv88e6xxx_setup_common(ds);
-	if (ret < 0)
-		return ret;
-
-	mv88e6xxx_ppu_state_init(ds);
-
-	switch (ps->id) {
-	case PORT_SWITCH_ID_6085:
-	case PORT_SWITCH_ID_6185:
-		ps->num_ports = 10;
-		break;
-	case PORT_SWITCH_ID_6095:
-		ps->num_ports = 11;
-		break;
-	case PORT_SWITCH_ID_6131:
-	case PORT_SWITCH_ID_6131_B2:
-		ps->num_ports = 8;
-		break;
-	default:
-		return -ENODEV;
-	}
-
-	ret = mv88e6xxx_switch_reset(ds, false);
-	if (ret < 0)
-		return ret;
-
-	ret = mv88e6131_setup_global(ds);
-	if (ret < 0)
-		return ret;
-
-	return mv88e6xxx_setup_ports(ds);
-}
-
-static int mv88e6131_port_to_phy_addr(struct dsa_switch *ds, int port)
-{
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
-	if (port >= 0 && port < ps->num_ports)
-		return port;
-
-	return -EINVAL;
-}
-
-static int
-mv88e6131_phy_read(struct dsa_switch *ds, int port, int regnum)
-{
-	int addr = mv88e6131_port_to_phy_addr(ds, port);
-
-	if (addr < 0)
-		return addr;
-
-	return mv88e6xxx_phy_read_ppu(ds, addr, regnum);
-}
-
-static int
-mv88e6131_phy_write(struct dsa_switch *ds,
-			      int port, int regnum, u16 val)
-{
-	int addr = mv88e6131_port_to_phy_addr(ds, port);
-
-	if (addr < 0)
-		return addr;
-
-	return mv88e6xxx_phy_write_ppu(ds, addr, regnum, val);
-}
-
-struct dsa_switch_driver mv88e6131_switch_driver = {
-	.tag_protocol		= DSA_TAG_PROTO_DSA,
-	.priv_size		= sizeof(struct mv88e6xxx_priv_state),
-	.probe			= mv88e6131_probe,
-	.setup			= mv88e6131_setup,
-	.set_addr		= mv88e6xxx_set_addr_direct,
-	.phy_read		= mv88e6131_phy_read,
-	.phy_write		= mv88e6131_phy_write,
-	.get_strings		= mv88e6xxx_get_strings,
-	.get_ethtool_stats	= mv88e6xxx_get_ethtool_stats,
-	.get_sset_count		= mv88e6xxx_get_sset_count,
-	.adjust_link		= mv88e6xxx_adjust_link,
-};
-
-MODULE_ALIAS("platform:mv88e6085");
-MODULE_ALIAS("platform:mv88e6095");
-MODULE_ALIAS("platform:mv88e6095f");
-MODULE_ALIAS("platform:mv88e6131");
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c
deleted file mode 100644
index c0164b9..0000000
--- a/drivers/net/dsa/mv88e6171.c
+++ /dev/null
@@ -1,123 +0,0 @@
-/* net/dsa/mv88e6171.c - Marvell 88e6171 switch chip support
- * Copyright (c) 2008-2009 Marvell Semiconductor
- * Copyright (c) 2014 Claudio Leite <leitec@staticky.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/phy.h>
-#include <net/dsa.h>
-#include "mv88e6xxx.h"
-
-static const struct mv88e6xxx_switch_id mv88e6171_table[] = {
-	{ PORT_SWITCH_ID_6171, "Marvell 88E6171" },
-	{ PORT_SWITCH_ID_6175, "Marvell 88E6175" },
-	{ PORT_SWITCH_ID_6350, "Marvell 88E6350" },
-	{ PORT_SWITCH_ID_6351, "Marvell 88E6351" },
-};
-
-static char *mv88e6171_probe(struct device *host_dev, int sw_addr)
-{
-	return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6171_table,
-				     ARRAY_SIZE(mv88e6171_table));
-}
-
-static int mv88e6171_setup_global(struct dsa_switch *ds)
-{
-	u32 upstream_port = dsa_upstream_port(ds);
-	int ret;
-	u32 reg;
-
-	ret = mv88e6xxx_setup_global(ds);
-	if (ret)
-		return ret;
-
-	/* Discard packets with excessive collisions, mask all
-	 * interrupt sources, enable PPU.
-	 */
-	REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
-		  GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_DISCARD_EXCESS);
-
-	/* Configure the upstream port, and configure the upstream
-	 * port as the port to which ingress and egress monitor frames
-	 * are to be sent.
-	 */
-	reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
-		upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
-		upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT |
-		upstream_port << GLOBAL_MONITOR_CONTROL_MIRROR_SHIFT;
-	REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
-
-	/* Disable remote management for now, and set the switch's
-	 * DSA device number.
-	 */
-	REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2, ds->index & 0x1f);
-
-	return 0;
-}
-
-static int mv88e6171_setup(struct dsa_switch *ds)
-{
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int ret;
-
-	ret = mv88e6xxx_setup_common(ds);
-	if (ret < 0)
-		return ret;
-
-	ps->num_ports = 7;
-
-	ret = mv88e6xxx_switch_reset(ds, true);
-	if (ret < 0)
-		return ret;
-
-	ret = mv88e6171_setup_global(ds);
-	if (ret < 0)
-		return ret;
-
-	return mv88e6xxx_setup_ports(ds);
-}
-
-struct dsa_switch_driver mv88e6171_switch_driver = {
-	.tag_protocol		= DSA_TAG_PROTO_EDSA,
-	.priv_size		= sizeof(struct mv88e6xxx_priv_state),
-	.probe			= mv88e6171_probe,
-	.setup			= mv88e6171_setup,
-	.set_addr		= mv88e6xxx_set_addr_indirect,
-	.phy_read		= mv88e6xxx_phy_read_indirect,
-	.phy_write		= mv88e6xxx_phy_write_indirect,
-	.get_strings		= mv88e6xxx_get_strings,
-	.get_ethtool_stats	= mv88e6xxx_get_ethtool_stats,
-	.get_sset_count		= mv88e6xxx_get_sset_count,
-	.adjust_link		= mv88e6xxx_adjust_link,
-#ifdef CONFIG_NET_DSA_HWMON
-	.get_temp               = mv88e6xxx_get_temp,
-#endif
-	.get_regs_len		= mv88e6xxx_get_regs_len,
-	.get_regs		= mv88e6xxx_get_regs,
-	.port_bridge_join	= mv88e6xxx_port_bridge_join,
-	.port_bridge_leave	= mv88e6xxx_port_bridge_leave,
-	.port_stp_update        = mv88e6xxx_port_stp_update,
-	.port_vlan_filtering	= mv88e6xxx_port_vlan_filtering,
-	.port_vlan_prepare	= mv88e6xxx_port_vlan_prepare,
-	.port_vlan_add		= mv88e6xxx_port_vlan_add,
-	.port_vlan_del		= mv88e6xxx_port_vlan_del,
-	.port_vlan_dump		= mv88e6xxx_port_vlan_dump,
-	.port_fdb_prepare	= mv88e6xxx_port_fdb_prepare,
-	.port_fdb_add		= mv88e6xxx_port_fdb_add,
-	.port_fdb_del		= mv88e6xxx_port_fdb_del,
-	.port_fdb_dump		= mv88e6xxx_port_fdb_dump,
-};
-
-MODULE_ALIAS("platform:mv88e6171");
-MODULE_ALIAS("platform:mv88e6175");
-MODULE_ALIAS("platform:mv88e6350");
-MODULE_ALIAS("platform:mv88e6351");
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
deleted file mode 100644
index 5f528ab..0000000
--- a/drivers/net/dsa/mv88e6352.c
+++ /dev/null
@@ -1,345 +0,0 @@
-/*
- * net/dsa/mv88e6352.c - Marvell 88e6352 switch chip support
- *
- * Copyright (c) 2014 Guenter Roeck
- *
- * Derived from mv88e6123_61_65.c
- * Copyright (c) 2008-2009 Marvell Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/platform_device.h>
-#include <linux/phy.h>
-#include <net/dsa.h>
-#include "mv88e6xxx.h"
-
-static const struct mv88e6xxx_switch_id mv88e6352_table[] = {
-	{ PORT_SWITCH_ID_6172, "Marvell 88E6172" },
-	{ PORT_SWITCH_ID_6176, "Marvell 88E6176" },
-	{ PORT_SWITCH_ID_6240, "Marvell 88E6240" },
-	{ PORT_SWITCH_ID_6320, "Marvell 88E6320" },
-	{ PORT_SWITCH_ID_6320_A1, "Marvell 88E6320 (A1)" },
-	{ PORT_SWITCH_ID_6320_A2, "Marvell 88e6320 (A2)" },
-	{ PORT_SWITCH_ID_6321, "Marvell 88E6321" },
-	{ PORT_SWITCH_ID_6321_A1, "Marvell 88E6321 (A1)" },
-	{ PORT_SWITCH_ID_6321_A2, "Marvell 88e6321 (A2)" },
-	{ PORT_SWITCH_ID_6352, "Marvell 88E6352" },
-	{ PORT_SWITCH_ID_6352_A0, "Marvell 88E6352 (A0)" },
-	{ PORT_SWITCH_ID_6352_A1, "Marvell 88E6352 (A1)" },
-};
-
-static char *mv88e6352_probe(struct device *host_dev, int sw_addr)
-{
-	return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6352_table,
-				     ARRAY_SIZE(mv88e6352_table));
-}
-
-static int mv88e6352_setup_global(struct dsa_switch *ds)
-{
-	u32 upstream_port = dsa_upstream_port(ds);
-	int ret;
-	u32 reg;
-
-	ret = mv88e6xxx_setup_global(ds);
-	if (ret)
-		return ret;
-
-	/* Discard packets with excessive collisions,
-	 * mask all interrupt sources, enable PPU (bit 14, undocumented).
-	 */
-	REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
-		  GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_DISCARD_EXCESS);
-
-	/* Configure the upstream port, and configure the upstream
-	 * port as the port to which ingress and egress monitor frames
-	 * are to be sent.
-	 */
-	reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
-		upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
-		upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
-	REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
-
-	/* Disable remote management for now, and set the switch's
-	 * DSA device number.
-	 */
-	REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f);
-
-	return 0;
-}
-
-static int mv88e6352_setup(struct dsa_switch *ds)
-{
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int ret;
-
-	ret = mv88e6xxx_setup_common(ds);
-	if (ret < 0)
-		return ret;
-
-	ps->num_ports = 7;
-
-	mutex_init(&ps->eeprom_mutex);
-
-	ret = mv88e6xxx_switch_reset(ds, true);
-	if (ret < 0)
-		return ret;
-
-	ret = mv88e6352_setup_global(ds);
-	if (ret < 0)
-		return ret;
-
-	return mv88e6xxx_setup_ports(ds);
-}
-
-static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr)
-{
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int ret;
-
-	mutex_lock(&ps->eeprom_mutex);
-
-	ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
-				  GLOBAL2_EEPROM_OP_READ |
-				  (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
-	if (ret < 0)
-		goto error;
-
-	ret = mv88e6xxx_eeprom_busy_wait(ds);
-	if (ret < 0)
-		goto error;
-
-	ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_EEPROM_DATA);
-error:
-	mutex_unlock(&ps->eeprom_mutex);
-	return ret;
-}
-
-static int mv88e6352_get_eeprom(struct dsa_switch *ds,
-				struct ethtool_eeprom *eeprom, u8 *data)
-{
-	int offset;
-	int len;
-	int ret;
-
-	offset = eeprom->offset;
-	len = eeprom->len;
-	eeprom->len = 0;
-
-	eeprom->magic = 0xc3ec4951;
-
-	ret = mv88e6xxx_eeprom_load_wait(ds);
-	if (ret < 0)
-		return ret;
-
-	if (offset & 1) {
-		int word;
-
-		word = mv88e6352_read_eeprom_word(ds, offset >> 1);
-		if (word < 0)
-			return word;
-
-		*data++ = (word >> 8) & 0xff;
-
-		offset++;
-		len--;
-		eeprom->len++;
-	}
-
-	while (len >= 2) {
-		int word;
-
-		word = mv88e6352_read_eeprom_word(ds, offset >> 1);
-		if (word < 0)
-			return word;
-
-		*data++ = word & 0xff;
-		*data++ = (word >> 8) & 0xff;
-
-		offset += 2;
-		len -= 2;
-		eeprom->len += 2;
-	}
-
-	if (len) {
-		int word;
-
-		word = mv88e6352_read_eeprom_word(ds, offset >> 1);
-		if (word < 0)
-			return word;
-
-		*data++ = word & 0xff;
-
-		offset++;
-		len--;
-		eeprom->len++;
-	}
-
-	return 0;
-}
-
-static int mv88e6352_eeprom_is_readonly(struct dsa_switch *ds)
-{
-	int ret;
-
-	ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP);
-	if (ret < 0)
-		return ret;
-
-	if (!(ret & GLOBAL2_EEPROM_OP_WRITE_EN))
-		return -EROFS;
-
-	return 0;
-}
-
-static int mv88e6352_write_eeprom_word(struct dsa_switch *ds, int addr,
-				       u16 data)
-{
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int ret;
-
-	mutex_lock(&ps->eeprom_mutex);
-
-	ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
-	if (ret < 0)
-		goto error;
-
-	ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
-				  GLOBAL2_EEPROM_OP_WRITE |
-				  (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
-	if (ret < 0)
-		goto error;
-
-	ret = mv88e6xxx_eeprom_busy_wait(ds);
-error:
-	mutex_unlock(&ps->eeprom_mutex);
-	return ret;
-}
-
-static int mv88e6352_set_eeprom(struct dsa_switch *ds,
-				struct ethtool_eeprom *eeprom, u8 *data)
-{
-	int offset;
-	int ret;
-	int len;
-
-	if (eeprom->magic != 0xc3ec4951)
-		return -EINVAL;
-
-	ret = mv88e6352_eeprom_is_readonly(ds);
-	if (ret)
-		return ret;
-
-	offset = eeprom->offset;
-	len = eeprom->len;
-	eeprom->len = 0;
-
-	ret = mv88e6xxx_eeprom_load_wait(ds);
-	if (ret < 0)
-		return ret;
-
-	if (offset & 1) {
-		int word;
-
-		word = mv88e6352_read_eeprom_word(ds, offset >> 1);
-		if (word < 0)
-			return word;
-
-		word = (*data++ << 8) | (word & 0xff);
-
-		ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word);
-		if (ret < 0)
-			return ret;
-
-		offset++;
-		len--;
-		eeprom->len++;
-	}
-
-	while (len >= 2) {
-		int word;
-
-		word = *data++;
-		word |= *data++ << 8;
-
-		ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word);
-		if (ret < 0)
-			return ret;
-
-		offset += 2;
-		len -= 2;
-		eeprom->len += 2;
-	}
-
-	if (len) {
-		int word;
-
-		word = mv88e6352_read_eeprom_word(ds, offset >> 1);
-		if (word < 0)
-			return word;
-
-		word = (word & 0xff00) | *data++;
-
-		ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word);
-		if (ret < 0)
-			return ret;
-
-		offset++;
-		len--;
-		eeprom->len++;
-	}
-
-	return 0;
-}
-
-struct dsa_switch_driver mv88e6352_switch_driver = {
-	.tag_protocol		= DSA_TAG_PROTO_EDSA,
-	.priv_size		= sizeof(struct mv88e6xxx_priv_state),
-	.probe			= mv88e6352_probe,
-	.setup			= mv88e6352_setup,
-	.set_addr		= mv88e6xxx_set_addr_indirect,
-	.phy_read		= mv88e6xxx_phy_read_indirect,
-	.phy_write		= mv88e6xxx_phy_write_indirect,
-	.get_strings		= mv88e6xxx_get_strings,
-	.get_ethtool_stats	= mv88e6xxx_get_ethtool_stats,
-	.get_sset_count		= mv88e6xxx_get_sset_count,
-	.adjust_link		= mv88e6xxx_adjust_link,
-	.set_eee		= mv88e6xxx_set_eee,
-	.get_eee		= mv88e6xxx_get_eee,
-#ifdef CONFIG_NET_DSA_HWMON
-	.get_temp		= mv88e6xxx_get_temp,
-	.get_temp_limit		= mv88e6xxx_get_temp_limit,
-	.set_temp_limit		= mv88e6xxx_set_temp_limit,
-	.get_temp_alarm		= mv88e6xxx_get_temp_alarm,
-#endif
-	.get_eeprom		= mv88e6352_get_eeprom,
-	.set_eeprom		= mv88e6352_set_eeprom,
-	.get_regs_len		= mv88e6xxx_get_regs_len,
-	.get_regs		= mv88e6xxx_get_regs,
-	.port_bridge_join	= mv88e6xxx_port_bridge_join,
-	.port_bridge_leave	= mv88e6xxx_port_bridge_leave,
-	.port_stp_update	= mv88e6xxx_port_stp_update,
-	.port_vlan_filtering	= mv88e6xxx_port_vlan_filtering,
-	.port_vlan_prepare	= mv88e6xxx_port_vlan_prepare,
-	.port_vlan_add		= mv88e6xxx_port_vlan_add,
-	.port_vlan_del		= mv88e6xxx_port_vlan_del,
-	.port_vlan_dump		= mv88e6xxx_port_vlan_dump,
-	.port_fdb_prepare	= mv88e6xxx_port_fdb_prepare,
-	.port_fdb_add		= mv88e6xxx_port_fdb_add,
-	.port_fdb_del		= mv88e6xxx_port_fdb_del,
-	.port_fdb_dump		= mv88e6xxx_port_fdb_dump,
-};
-
-MODULE_ALIAS("platform:mv88e6172");
-MODULE_ALIAS("platform:mv88e6176");
-MODULE_ALIAS("platform:mv88e6320");
-MODULE_ALIAS("platform:mv88e6321");
-MODULE_ALIAS("platform:mv88e6352");
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index fa086e0..ba9dfc9 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -5,6 +5,8 @@
  * Copyright (c) 2015 CMC Electronics, Inc.
  *	Added support for VLAN Table Unit operations
  *
+ * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
@@ -17,6 +19,7 @@
 #include <linux/if_bridge.h>
 #include <linux/jiffies.h>
 #include <linux/list.h>
+#include <linux/mdio.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/gpio/consumer.h>
@@ -25,12 +28,10 @@
 #include <net/switchdev.h>
 #include "mv88e6xxx.h"
 
-static void assert_smi_lock(struct dsa_switch *ds)
+static void assert_smi_lock(struct mv88e6xxx_priv_state *ps)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
 	if (unlikely(!mutex_is_locked(&ps->smi_mutex))) {
-		dev_err(ds->master_dev, "SMI lock not held!\n");
+		dev_err(ps->dev, "SMI lock not held!\n");
 		dump_stack();
 	}
 }
@@ -92,33 +93,29 @@
 	return ret & 0xffff;
 }
 
-static int _mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
+static int _mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps,
+			       int addr, int reg)
 {
-	struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
 	int ret;
 
-	assert_smi_lock(ds);
+	assert_smi_lock(ps);
 
-	if (bus == NULL)
-		return -EINVAL;
-
-	ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
+	ret = __mv88e6xxx_reg_read(ps->bus, ps->sw_addr, addr, reg);
 	if (ret < 0)
 		return ret;
 
-	dev_dbg(ds->master_dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
+	dev_dbg(ps->dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
 		addr, reg, ret);
 
 	return ret;
 }
 
-int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
+int mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps, int addr, int reg)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	int ret;
 
 	mutex_lock(&ps->smi_mutex);
-	ret = _mv88e6xxx_reg_read(ds, addr, reg);
+	ret = _mv88e6xxx_reg_read(ps, addr, reg);
 	mutex_unlock(&ps->smi_mutex);
 
 	return ret;
@@ -156,58 +153,71 @@
 	return 0;
 }
 
-static int _mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg,
-				u16 val)
+static int _mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr,
+				int reg, u16 val)
 {
-	struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
+	assert_smi_lock(ps);
 
-	assert_smi_lock(ds);
-
-	if (bus == NULL)
-		return -EINVAL;
-
-	dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
+	dev_dbg(ps->dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
 		addr, reg, val);
 
-	return __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
+	return __mv88e6xxx_reg_write(ps->bus, ps->sw_addr, addr, reg, val);
 }
 
-int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
+int mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr,
+			int reg, u16 val)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	int ret;
 
 	mutex_lock(&ps->smi_mutex);
-	ret = _mv88e6xxx_reg_write(ds, addr, reg, val);
+	ret = _mv88e6xxx_reg_write(ps, addr, reg, val);
 	mutex_unlock(&ps->smi_mutex);
 
 	return ret;
 }
 
-int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
+static int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
 {
-	REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
-	REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
-	REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int err;
 
-	return 0;
+	err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_01,
+				  (addr[0] << 8) | addr[1]);
+	if (err)
+		return err;
+
+	err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_23,
+				  (addr[2] << 8) | addr[3]);
+	if (err)
+		return err;
+
+	return mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_45,
+				   (addr[4] << 8) | addr[5]);
 }
 
-int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
+static int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
 {
-	int i;
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	int ret;
+	int i;
 
 	for (i = 0; i < 6; i++) {
 		int j;
 
 		/* Write the MAC address byte. */
-		REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
-			  GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]);
+		ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
+					  GLOBAL2_SWITCH_MAC_BUSY |
+					  (i << 8) | addr[i]);
+		if (ret)
+			return ret;
 
 		/* Wait for the write to complete. */
 		for (j = 0; j < 16; j++) {
-			ret = REG_READ(REG_GLOBAL2, GLOBAL2_SWITCH_MAC);
+			ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2,
+						 GLOBAL2_SWITCH_MAC);
+			if (ret < 0)
+				return ret;
+
 			if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
 				break;
 		}
@@ -218,34 +228,52 @@
 	return 0;
 }
 
-static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
+int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+	if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SWITCH_MAC))
+		return mv88e6xxx_set_addr_indirect(ds, addr);
+	else
+		return mv88e6xxx_set_addr_direct(ds, addr);
+}
+
+static int _mv88e6xxx_phy_read(struct mv88e6xxx_priv_state *ps, int addr,
+			       int regnum)
 {
 	if (addr >= 0)
-		return _mv88e6xxx_reg_read(ds, addr, regnum);
+		return _mv88e6xxx_reg_read(ps, addr, regnum);
 	return 0xffff;
 }
 
-static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
-				u16 val)
+static int _mv88e6xxx_phy_write(struct mv88e6xxx_priv_state *ps, int addr,
+				int regnum, u16 val)
 {
 	if (addr >= 0)
-		return _mv88e6xxx_reg_write(ds, addr, regnum, val);
+		return _mv88e6xxx_reg_write(ps, addr, regnum, val);
 	return 0;
 }
 
-#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
-static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
+static int mv88e6xxx_ppu_disable(struct mv88e6xxx_priv_state *ps)
 {
 	int ret;
 	unsigned long timeout;
 
-	ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
-	REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
-		  ret & ~GLOBAL_CONTROL_PPU_ENABLE);
+	ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL);
+	if (ret < 0)
+		return ret;
+
+	ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL,
+				   ret & ~GLOBAL_CONTROL_PPU_ENABLE);
+	if (ret)
+		return ret;
 
 	timeout = jiffies + 1 * HZ;
 	while (time_before(jiffies, timeout)) {
-		ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
+		ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS);
+		if (ret < 0)
+			return ret;
+
 		usleep_range(1000, 2000);
 		if ((ret & GLOBAL_STATUS_PPU_MASK) !=
 		    GLOBAL_STATUS_PPU_POLLING)
@@ -255,17 +283,26 @@
 	return -ETIMEDOUT;
 }
 
-static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
+static int mv88e6xxx_ppu_enable(struct mv88e6xxx_priv_state *ps)
 {
-	int ret;
+	int ret, err;
 	unsigned long timeout;
 
-	ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
-	REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE);
+	ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL);
+	if (ret < 0)
+		return ret;
+
+	err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL,
+				  ret | GLOBAL_CONTROL_PPU_ENABLE);
+	if (err)
+		return err;
 
 	timeout = jiffies + 1 * HZ;
 	while (time_before(jiffies, timeout)) {
-		ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
+		ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS);
+		if (ret < 0)
+			return ret;
+
 		usleep_range(1000, 2000);
 		if ((ret & GLOBAL_STATUS_PPU_MASK) ==
 		    GLOBAL_STATUS_PPU_POLLING)
@@ -281,9 +318,7 @@
 
 	ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
 	if (mutex_trylock(&ps->ppu_mutex)) {
-		struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
-
-		if (mv88e6xxx_ppu_enable(ds) == 0)
+		if (mv88e6xxx_ppu_enable(ps) == 0)
 			ps->ppu_disabled = 0;
 		mutex_unlock(&ps->ppu_mutex);
 	}
@@ -296,9 +331,8 @@
 	schedule_work(&ps->ppu_work);
 }
 
-static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
+static int mv88e6xxx_ppu_access_get(struct mv88e6xxx_priv_state *ps)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	int ret;
 
 	mutex_lock(&ps->ppu_mutex);
@@ -309,7 +343,7 @@
 	 * it.
 	 */
 	if (!ps->ppu_disabled) {
-		ret = mv88e6xxx_ppu_disable(ds);
+		ret = mv88e6xxx_ppu_disable(ps);
 		if (ret < 0) {
 			mutex_unlock(&ps->ppu_mutex);
 			return ret;
@@ -323,19 +357,15 @@
 	return ret;
 }
 
-static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
+static void mv88e6xxx_ppu_access_put(struct mv88e6xxx_priv_state *ps)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
 	/* Schedule a timer to re-enable the PHY polling unit. */
 	mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
 	mutex_unlock(&ps->ppu_mutex);
 }
 
-void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
+void mv88e6xxx_ppu_state_init(struct mv88e6xxx_priv_state *ps)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
 	mutex_init(&ps->ppu_mutex);
 	INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
 	init_timer(&ps->ppu_timer);
@@ -343,142 +373,86 @@
 	ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
 }
 
-int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum)
+static int mv88e6xxx_phy_read_ppu(struct mv88e6xxx_priv_state *ps, int addr,
+				  int regnum)
 {
 	int ret;
 
-	ret = mv88e6xxx_ppu_access_get(ds);
+	ret = mv88e6xxx_ppu_access_get(ps);
 	if (ret >= 0) {
-		ret = mv88e6xxx_reg_read(ds, addr, regnum);
-		mv88e6xxx_ppu_access_put(ds);
+		ret = _mv88e6xxx_reg_read(ps, addr, regnum);
+		mv88e6xxx_ppu_access_put(ps);
 	}
 
 	return ret;
 }
 
-int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
-			    int regnum, u16 val)
+static int mv88e6xxx_phy_write_ppu(struct mv88e6xxx_priv_state *ps, int addr,
+				   int regnum, u16 val)
 {
 	int ret;
 
-	ret = mv88e6xxx_ppu_access_get(ds);
+	ret = mv88e6xxx_ppu_access_get(ps);
 	if (ret >= 0) {
-		ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
-		mv88e6xxx_ppu_access_put(ds);
+		ret = _mv88e6xxx_reg_write(ps, addr, regnum, val);
+		mv88e6xxx_ppu_access_put(ps);
 	}
 
 	return ret;
 }
-#endif
 
-static bool mv88e6xxx_6065_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6065_family(struct mv88e6xxx_priv_state *ps)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
-	switch (ps->id) {
-	case PORT_SWITCH_ID_6031:
-	case PORT_SWITCH_ID_6061:
-	case PORT_SWITCH_ID_6035:
-	case PORT_SWITCH_ID_6065:
-		return true;
-	}
-	return false;
+	return ps->info->family == MV88E6XXX_FAMILY_6065;
 }
 
-static bool mv88e6xxx_6095_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6095_family(struct mv88e6xxx_priv_state *ps)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
-	switch (ps->id) {
-	case PORT_SWITCH_ID_6092:
-	case PORT_SWITCH_ID_6095:
-		return true;
-	}
-	return false;
+	return ps->info->family == MV88E6XXX_FAMILY_6095;
 }
 
-static bool mv88e6xxx_6097_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6097_family(struct mv88e6xxx_priv_state *ps)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
-	switch (ps->id) {
-	case PORT_SWITCH_ID_6046:
-	case PORT_SWITCH_ID_6085:
-	case PORT_SWITCH_ID_6096:
-	case PORT_SWITCH_ID_6097:
-		return true;
-	}
-	return false;
+	return ps->info->family == MV88E6XXX_FAMILY_6097;
 }
 
-static bool mv88e6xxx_6165_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6165_family(struct mv88e6xxx_priv_state *ps)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
-	switch (ps->id) {
-	case PORT_SWITCH_ID_6123:
-	case PORT_SWITCH_ID_6161:
-	case PORT_SWITCH_ID_6165:
-		return true;
-	}
-	return false;
+	return ps->info->family == MV88E6XXX_FAMILY_6165;
 }
 
-static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6185_family(struct mv88e6xxx_priv_state *ps)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
-	switch (ps->id) {
-	case PORT_SWITCH_ID_6121:
-	case PORT_SWITCH_ID_6122:
-	case PORT_SWITCH_ID_6152:
-	case PORT_SWITCH_ID_6155:
-	case PORT_SWITCH_ID_6182:
-	case PORT_SWITCH_ID_6185:
-	case PORT_SWITCH_ID_6108:
-	case PORT_SWITCH_ID_6131:
-		return true;
-	}
-	return false;
+	return ps->info->family == MV88E6XXX_FAMILY_6185;
 }
 
-static bool mv88e6xxx_6320_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6320_family(struct mv88e6xxx_priv_state *ps)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
-	switch (ps->id) {
-	case PORT_SWITCH_ID_6320:
-	case PORT_SWITCH_ID_6321:
-		return true;
-	}
-	return false;
+	return ps->info->family == MV88E6XXX_FAMILY_6320;
 }
 
-static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6351_family(struct mv88e6xxx_priv_state *ps)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
-	switch (ps->id) {
-	case PORT_SWITCH_ID_6171:
-	case PORT_SWITCH_ID_6175:
-	case PORT_SWITCH_ID_6350:
-	case PORT_SWITCH_ID_6351:
-		return true;
-	}
-	return false;
+	return ps->info->family == MV88E6XXX_FAMILY_6351;
 }
 
-static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6352_family(struct mv88e6xxx_priv_state *ps)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	return ps->info->family == MV88E6XXX_FAMILY_6352;
+}
 
-	switch (ps->id) {
-	case PORT_SWITCH_ID_6172:
-	case PORT_SWITCH_ID_6176:
-	case PORT_SWITCH_ID_6240:
-	case PORT_SWITCH_ID_6352:
+static unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_priv_state *ps)
+{
+	return ps->info->num_databases;
+}
+
+static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_priv_state *ps)
+{
+	/* Does the device have dedicated FID registers for ATU and VTU ops? */
+	if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) ||
+	    mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps))
 		return true;
-	}
+
 	return false;
 }
 
@@ -486,8 +460,8 @@
  * phy. However, in the case of a fixed link phy, we force the port
  * settings from the fixed link settings.
  */
-void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
-			   struct phy_device *phydev)
+static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
+				  struct phy_device *phydev)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	u32 reg;
@@ -498,7 +472,7 @@
 
 	mutex_lock(&ps->smi_mutex);
 
-	ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
+	ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_PCS_CTRL);
 	if (ret < 0)
 		goto out;
 
@@ -512,7 +486,7 @@
 	if (phydev->link)
 			reg |= PORT_PCS_CTRL_LINK_UP;
 
-	if (mv88e6xxx_6065_family(ds) && phydev->speed > SPEED_100)
+	if (mv88e6xxx_6065_family(ps) && phydev->speed > SPEED_100)
 		goto out;
 
 	switch (phydev->speed) {
@@ -534,8 +508,8 @@
 	if (phydev->duplex == DUPLEX_FULL)
 		reg |= PORT_PCS_CTRL_DUPLEX_FULL;
 
-	if ((mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds)) &&
-	    (port >= ps->num_ports - 2)) {
+	if ((mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps)) &&
+	    (port >= ps->info->num_ports - 2)) {
 		if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
 			reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
 		if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
@@ -544,19 +518,19 @@
 			reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
 				PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
 	}
-	_mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_PCS_CTRL, reg);
+	_mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_PCS_CTRL, reg);
 
 out:
 	mutex_unlock(&ps->smi_mutex);
 }
 
-static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
+static int _mv88e6xxx_stats_wait(struct mv88e6xxx_priv_state *ps)
 {
 	int ret;
 	int i;
 
 	for (i = 0; i < 10; i++) {
-		ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_OP);
+		ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_OP);
 		if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
 			return 0;
 	}
@@ -564,52 +538,54 @@
 	return -ETIMEDOUT;
 }
 
-static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
+static int _mv88e6xxx_stats_snapshot(struct mv88e6xxx_priv_state *ps,
+				     int port)
 {
 	int ret;
 
-	if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
+	if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps))
 		port = (port + 1) << 5;
 
 	/* Snapshot the hardware statistics counters for this port. */
-	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
+	ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
 				   GLOBAL_STATS_OP_CAPTURE_PORT |
 				   GLOBAL_STATS_OP_HIST_RX_TX | port);
 	if (ret < 0)
 		return ret;
 
 	/* Wait for the snapshotting to complete. */
-	ret = _mv88e6xxx_stats_wait(ds);
+	ret = _mv88e6xxx_stats_wait(ps);
 	if (ret < 0)
 		return ret;
 
 	return 0;
 }
 
-static void _mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
+static void _mv88e6xxx_stats_read(struct mv88e6xxx_priv_state *ps,
+				  int stat, u32 *val)
 {
 	u32 _val;
 	int ret;
 
 	*val = 0;
 
-	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
+	ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
 				   GLOBAL_STATS_OP_READ_CAPTURED |
 				   GLOBAL_STATS_OP_HIST_RX_TX | stat);
 	if (ret < 0)
 		return;
 
-	ret = _mv88e6xxx_stats_wait(ds);
+	ret = _mv88e6xxx_stats_wait(ps);
 	if (ret < 0)
 		return;
 
-	ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
+	ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
 	if (ret < 0)
 		return;
 
 	_val = ret << 16;
 
-	ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
+	ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
 	if (ret < 0)
 		return;
 
@@ -678,26 +654,26 @@
 	{ "out_management",	4, 0x1f | GLOBAL_STATS_OP_BANK_1, BANK1, },
 };
 
-static bool mv88e6xxx_has_stat(struct dsa_switch *ds,
+static bool mv88e6xxx_has_stat(struct mv88e6xxx_priv_state *ps,
 			       struct mv88e6xxx_hw_stat *stat)
 {
 	switch (stat->type) {
 	case BANK0:
 		return true;
 	case BANK1:
-		return mv88e6xxx_6320_family(ds);
+		return mv88e6xxx_6320_family(ps);
 	case PORT:
-		return mv88e6xxx_6095_family(ds) ||
-			mv88e6xxx_6185_family(ds) ||
-			mv88e6xxx_6097_family(ds) ||
-			mv88e6xxx_6165_family(ds) ||
-			mv88e6xxx_6351_family(ds) ||
-			mv88e6xxx_6352_family(ds);
+		return mv88e6xxx_6095_family(ps) ||
+			mv88e6xxx_6185_family(ps) ||
+			mv88e6xxx_6097_family(ps) ||
+			mv88e6xxx_6165_family(ps) ||
+			mv88e6xxx_6351_family(ps) ||
+			mv88e6xxx_6352_family(ps);
 	}
 	return false;
 }
 
-static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
+static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_priv_state *ps,
 					    struct mv88e6xxx_hw_stat *s,
 					    int port)
 {
@@ -708,13 +684,13 @@
 
 	switch (s->type) {
 	case PORT:
-		ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), s->reg);
+		ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), s->reg);
 		if (ret < 0)
 			return UINT64_MAX;
 
 		low = ret;
 		if (s->sizeof_stat == 4) {
-			ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
+			ret = _mv88e6xxx_reg_read(ps, REG_PORT(port),
 						  s->reg + 1);
 			if (ret < 0)
 				return UINT64_MAX;
@@ -723,22 +699,24 @@
 		break;
 	case BANK0:
 	case BANK1:
-		_mv88e6xxx_stats_read(ds, s->reg, &low);
+		_mv88e6xxx_stats_read(ps, s->reg, &low);
 		if (s->sizeof_stat == 8)
-			_mv88e6xxx_stats_read(ds, s->reg + 1, &high);
+			_mv88e6xxx_stats_read(ps, s->reg + 1, &high);
 	}
 	value = (((u64)high) << 16) | low;
 	return value;
 }
 
-void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
+				  uint8_t *data)
 {
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	struct mv88e6xxx_hw_stat *stat;
 	int i, j;
 
 	for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
 		stat = &mv88e6xxx_hw_stats[i];
-		if (mv88e6xxx_has_stat(ds, stat)) {
+		if (mv88e6xxx_has_stat(ps, stat)) {
 			memcpy(data + j * ETH_GSTRING_LEN, stat->string,
 			       ETH_GSTRING_LEN);
 			j++;
@@ -746,22 +724,22 @@
 	}
 }
 
-int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
+static int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
 {
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	struct mv88e6xxx_hw_stat *stat;
 	int i, j;
 
 	for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
 		stat = &mv88e6xxx_hw_stats[i];
-		if (mv88e6xxx_has_stat(ds, stat))
+		if (mv88e6xxx_has_stat(ps, stat))
 			j++;
 	}
 	return j;
 }
 
-void
-mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
-			    int port, uint64_t *data)
+static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
+					uint64_t *data)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	struct mv88e6xxx_hw_stat *stat;
@@ -770,15 +748,15 @@
 
 	mutex_lock(&ps->smi_mutex);
 
-	ret = _mv88e6xxx_stats_snapshot(ds, port);
+	ret = _mv88e6xxx_stats_snapshot(ps, port);
 	if (ret < 0) {
 		mutex_unlock(&ps->smi_mutex);
 		return;
 	}
 	for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
 		stat = &mv88e6xxx_hw_stats[i];
-		if (mv88e6xxx_has_stat(ds, stat)) {
-			data[j] = _mv88e6xxx_get_ethtool_stat(ds, stat, port);
+		if (mv88e6xxx_has_stat(ps, stat)) {
+			data[j] = _mv88e6xxx_get_ethtool_stat(ps, stat, port);
 			j++;
 		}
 	}
@@ -786,14 +764,15 @@
 	mutex_unlock(&ps->smi_mutex);
 }
 
-int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
+static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
 {
 	return 32 * sizeof(u16);
 }
 
-void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
-			struct ethtool_regs *regs, void *_p)
+static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
+			       struct ethtool_regs *regs, void *_p)
 {
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	u16 *p = _p;
 	int i;
 
@@ -801,16 +780,20 @@
 
 	memset(p, 0xff, 32 * sizeof(u16));
 
+	mutex_lock(&ps->smi_mutex);
+
 	for (i = 0; i < 32; i++) {
 		int ret;
 
-		ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i);
+		ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), i);
 		if (ret >= 0)
 			p[i] = ret;
 	}
+
+	mutex_unlock(&ps->smi_mutex);
 }
 
-static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
+static int _mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg, int offset,
 			   u16 mask)
 {
 	unsigned long timeout = jiffies + HZ / 10;
@@ -818,7 +801,7 @@
 	while (time_before(jiffies, timeout)) {
 		int ret;
 
-		ret = _mv88e6xxx_reg_read(ds, reg, offset);
+		ret = _mv88e6xxx_reg_read(ps, reg, offset);
 		if (ret < 0)
 			return ret;
 		if (!(ret & mask))
@@ -829,91 +812,320 @@
 	return -ETIMEDOUT;
 }
 
-static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
+static int mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg,
+			  int offset, u16 mask)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	int ret;
 
 	mutex_lock(&ps->smi_mutex);
-	ret = _mv88e6xxx_wait(ds, reg, offset, mask);
+	ret = _mv88e6xxx_wait(ps, reg, offset, mask);
 	mutex_unlock(&ps->smi_mutex);
 
 	return ret;
 }
 
-static int _mv88e6xxx_phy_wait(struct dsa_switch *ds)
+static int _mv88e6xxx_phy_wait(struct mv88e6xxx_priv_state *ps)
 {
-	return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+	return _mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
 			       GLOBAL2_SMI_OP_BUSY);
 }
 
-int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
+static int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
 {
-	return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+	return mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
 			      GLOBAL2_EEPROM_OP_LOAD);
 }
 
-int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
+static int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
 {
-	return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+	return mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
 			      GLOBAL2_EEPROM_OP_BUSY);
 }
 
-static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
+static int mv88e6xxx_read_eeprom_word(struct dsa_switch *ds, int addr)
 {
-	return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP,
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int ret;
+
+	mutex_lock(&ps->eeprom_mutex);
+
+	ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+				  GLOBAL2_EEPROM_OP_READ |
+				  (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
+	if (ret < 0)
+		goto error;
+
+	ret = mv88e6xxx_eeprom_busy_wait(ds);
+	if (ret < 0)
+		goto error;
+
+	ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA);
+error:
+	mutex_unlock(&ps->eeprom_mutex);
+	return ret;
+}
+
+static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+	if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
+		return ps->eeprom_len;
+
+	return 0;
+}
+
+static int mv88e6xxx_get_eeprom(struct dsa_switch *ds,
+				struct ethtool_eeprom *eeprom, u8 *data)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int offset;
+	int len;
+	int ret;
+
+	if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
+		return -EOPNOTSUPP;
+
+	offset = eeprom->offset;
+	len = eeprom->len;
+	eeprom->len = 0;
+
+	eeprom->magic = 0xc3ec4951;
+
+	ret = mv88e6xxx_eeprom_load_wait(ds);
+	if (ret < 0)
+		return ret;
+
+	if (offset & 1) {
+		int word;
+
+		word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
+		if (word < 0)
+			return word;
+
+		*data++ = (word >> 8) & 0xff;
+
+		offset++;
+		len--;
+		eeprom->len++;
+	}
+
+	while (len >= 2) {
+		int word;
+
+		word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
+		if (word < 0)
+			return word;
+
+		*data++ = word & 0xff;
+		*data++ = (word >> 8) & 0xff;
+
+		offset += 2;
+		len -= 2;
+		eeprom->len += 2;
+	}
+
+	if (len) {
+		int word;
+
+		word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
+		if (word < 0)
+			return word;
+
+		*data++ = word & 0xff;
+
+		offset++;
+		len--;
+		eeprom->len++;
+	}
+
+	return 0;
+}
+
+static int mv88e6xxx_eeprom_is_readonly(struct dsa_switch *ds)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int ret;
+
+	ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP);
+	if (ret < 0)
+		return ret;
+
+	if (!(ret & GLOBAL2_EEPROM_OP_WRITE_EN))
+		return -EROFS;
+
+	return 0;
+}
+
+static int mv88e6xxx_write_eeprom_word(struct dsa_switch *ds, int addr,
+				       u16 data)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int ret;
+
+	mutex_lock(&ps->eeprom_mutex);
+
+	ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
+	if (ret < 0)
+		goto error;
+
+	ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+				  GLOBAL2_EEPROM_OP_WRITE |
+				  (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
+	if (ret < 0)
+		goto error;
+
+	ret = mv88e6xxx_eeprom_busy_wait(ds);
+error:
+	mutex_unlock(&ps->eeprom_mutex);
+	return ret;
+}
+
+static int mv88e6xxx_set_eeprom(struct dsa_switch *ds,
+				struct ethtool_eeprom *eeprom, u8 *data)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int offset;
+	int ret;
+	int len;
+
+	if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
+		return -EOPNOTSUPP;
+
+	if (eeprom->magic != 0xc3ec4951)
+		return -EINVAL;
+
+	ret = mv88e6xxx_eeprom_is_readonly(ds);
+	if (ret)
+		return ret;
+
+	offset = eeprom->offset;
+	len = eeprom->len;
+	eeprom->len = 0;
+
+	ret = mv88e6xxx_eeprom_load_wait(ds);
+	if (ret < 0)
+		return ret;
+
+	if (offset & 1) {
+		int word;
+
+		word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
+		if (word < 0)
+			return word;
+
+		word = (*data++ << 8) | (word & 0xff);
+
+		ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
+		if (ret < 0)
+			return ret;
+
+		offset++;
+		len--;
+		eeprom->len++;
+	}
+
+	while (len >= 2) {
+		int word;
+
+		word = *data++;
+		word |= *data++ << 8;
+
+		ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
+		if (ret < 0)
+			return ret;
+
+		offset += 2;
+		len -= 2;
+		eeprom->len += 2;
+	}
+
+	if (len) {
+		int word;
+
+		word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
+		if (word < 0)
+			return word;
+
+		word = (word & 0xff00) | *data++;
+
+		ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
+		if (ret < 0)
+			return ret;
+
+		offset++;
+		len--;
+		eeprom->len++;
+	}
+
+	return 0;
+}
+
+static int _mv88e6xxx_atu_wait(struct mv88e6xxx_priv_state *ps)
+{
+	return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_ATU_OP,
 			       GLOBAL_ATU_OP_BUSY);
 }
 
-static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
-					int regnum)
+static int _mv88e6xxx_phy_read_indirect(struct mv88e6xxx_priv_state *ps,
+					int addr, int regnum)
 {
 	int ret;
 
-	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+	ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
 				   GLOBAL2_SMI_OP_22_READ | (addr << 5) |
 				   regnum);
 	if (ret < 0)
 		return ret;
 
-	ret = _mv88e6xxx_phy_wait(ds);
+	ret = _mv88e6xxx_phy_wait(ps);
 	if (ret < 0)
 		return ret;
 
-	return _mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA);
+	ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_SMI_DATA);
+
+	return ret;
 }
 
-static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
-					 int regnum, u16 val)
+static int _mv88e6xxx_phy_write_indirect(struct mv88e6xxx_priv_state *ps,
+					 int addr, int regnum, u16 val)
 {
 	int ret;
 
-	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
+	ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
 	if (ret < 0)
 		return ret;
 
-	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+	ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
 				   GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
 				   regnum);
 
-	return _mv88e6xxx_phy_wait(ds);
+	return _mv88e6xxx_phy_wait(ps);
 }
 
-int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
+static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port,
+			     struct ethtool_eee *e)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	int reg;
 
+	if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEE))
+		return -EOPNOTSUPP;
+
 	mutex_lock(&ps->smi_mutex);
 
-	reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
+	reg = _mv88e6xxx_phy_read_indirect(ps, port, 16);
 	if (reg < 0)
 		goto out;
 
 	e->eee_enabled = !!(reg & 0x0200);
 	e->tx_lpi_enabled = !!(reg & 0x0100);
 
-	reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
+	reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS);
 	if (reg < 0)
 		goto out;
 
@@ -925,16 +1137,19 @@
 	return reg;
 }
 
-int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
-		      struct phy_device *phydev, struct ethtool_eee *e)
+static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
+			     struct phy_device *phydev, struct ethtool_eee *e)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	int reg;
 	int ret;
 
+	if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEE))
+		return -EOPNOTSUPP;
+
 	mutex_lock(&ps->smi_mutex);
 
-	ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
+	ret = _mv88e6xxx_phy_read_indirect(ps, port, 16);
 	if (ret < 0)
 		goto out;
 
@@ -944,25 +1159,45 @@
 	if (e->tx_lpi_enabled)
 		reg |= 0x0100;
 
-	ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
+	ret = _mv88e6xxx_phy_write_indirect(ps, port, 16, reg);
 out:
 	mutex_unlock(&ps->smi_mutex);
 
 	return ret;
 }
 
-static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, u16 cmd)
+static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_priv_state *ps, u16 fid, u16 cmd)
 {
 	int ret;
 
-	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
+	if (mv88e6xxx_has_fid_reg(ps)) {
+		ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_FID, fid);
+		if (ret < 0)
+			return ret;
+	} else if (mv88e6xxx_num_databases(ps) == 256) {
+		/* ATU DBNum[7:4] are located in ATU Control 15:12 */
+		ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL);
+		if (ret < 0)
+			return ret;
+
+		ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL,
+					   (ret & 0xfff) |
+					   ((fid << 8) & 0xf000));
+		if (ret < 0)
+			return ret;
+
+		/* ATU DBNum[3:0] are located in ATU Operation 3:0 */
+		cmd |= fid & 0xf;
+	}
+
+	ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
 	if (ret < 0)
 		return ret;
 
-	return _mv88e6xxx_atu_wait(ds);
+	return _mv88e6xxx_atu_wait(ps);
 }
 
-static int _mv88e6xxx_atu_data_write(struct dsa_switch *ds,
+static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_priv_state *ps,
 				     struct mv88e6xxx_atu_entry *entry)
 {
 	u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK;
@@ -982,30 +1217,25 @@
 		data |= (entry->portv_trunkid << shift) & mask;
 	}
 
-	return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, data);
+	return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_DATA, data);
 }
 
-static int _mv88e6xxx_atu_flush_move(struct dsa_switch *ds,
+static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_priv_state *ps,
 				     struct mv88e6xxx_atu_entry *entry,
 				     bool static_too)
 {
 	int op;
 	int err;
 
-	err = _mv88e6xxx_atu_wait(ds);
+	err = _mv88e6xxx_atu_wait(ps);
 	if (err)
 		return err;
 
-	err = _mv88e6xxx_atu_data_write(ds, entry);
+	err = _mv88e6xxx_atu_data_write(ps, entry);
 	if (err)
 		return err;
 
 	if (entry->fid) {
-		err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID,
-					   entry->fid);
-		if (err)
-			return err;
-
 		op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB :
 			GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
 	} else {
@@ -1013,21 +1243,22 @@
 			GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
 	}
 
-	return _mv88e6xxx_atu_cmd(ds, op);
+	return _mv88e6xxx_atu_cmd(ps, entry->fid, op);
 }
 
-static int _mv88e6xxx_atu_flush(struct dsa_switch *ds, u16 fid, bool static_too)
+static int _mv88e6xxx_atu_flush(struct mv88e6xxx_priv_state *ps,
+				u16 fid, bool static_too)
 {
 	struct mv88e6xxx_atu_entry entry = {
 		.fid = fid,
 		.state = 0, /* EntryState bits must be 0 */
 	};
 
-	return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
+	return _mv88e6xxx_atu_flush_move(ps, &entry, static_too);
 }
 
-static int _mv88e6xxx_atu_move(struct dsa_switch *ds, u16 fid, int from_port,
-			       int to_port, bool static_too)
+static int _mv88e6xxx_atu_move(struct mv88e6xxx_priv_state *ps, u16 fid,
+			       int from_port, int to_port, bool static_too)
 {
 	struct mv88e6xxx_atu_entry entry = {
 		.trunk = false,
@@ -1041,14 +1272,14 @@
 	entry.portv_trunkid = (to_port & 0x0f) << 4;
 	entry.portv_trunkid |= from_port & 0x0f;
 
-	return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
+	return _mv88e6xxx_atu_flush_move(ps, &entry, static_too);
 }
 
-static int _mv88e6xxx_atu_remove(struct dsa_switch *ds, u16 fid, int port,
-				 bool static_too)
+static int _mv88e6xxx_atu_remove(struct mv88e6xxx_priv_state *ps, u16 fid,
+				 int port, bool static_too)
 {
 	/* Destination port 0xF means remove the entries */
-	return _mv88e6xxx_atu_move(ds, fid, port, 0x0f, static_too);
+	return _mv88e6xxx_atu_move(ps, fid, port, 0x0f, static_too);
 }
 
 static const char * const mv88e6xxx_port_state_names[] = {
@@ -1058,12 +1289,14 @@
 	[PORT_CONTROL_STATE_FORWARDING] = "Forwarding",
 };
 
-static int _mv88e6xxx_port_state(struct dsa_switch *ds, int port, u8 state)
+static int _mv88e6xxx_port_state(struct mv88e6xxx_priv_state *ps, int port,
+				 u8 state)
 {
+	struct dsa_switch *ds = ps->ds;
 	int reg, ret = 0;
 	u8 oldstate;
 
-	reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
+	reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL);
 	if (reg < 0)
 		return reg;
 
@@ -1078,13 +1311,13 @@
 		     oldstate == PORT_CONTROL_STATE_FORWARDING)
 		    && (state == PORT_CONTROL_STATE_DISABLED ||
 			state == PORT_CONTROL_STATE_BLOCKING)) {
-			ret = _mv88e6xxx_atu_remove(ds, 0, port, false);
+			ret = _mv88e6xxx_atu_remove(ps, 0, port, false);
 			if (ret)
 				return ret;
 		}
 
 		reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
-		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL,
+		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL,
 					   reg);
 		if (ret)
 			return ret;
@@ -1097,11 +1330,12 @@
 	return ret;
 }
 
-static int _mv88e6xxx_port_based_vlan_map(struct dsa_switch *ds, int port)
+static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_priv_state *ps,
+					  int port)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	struct net_device *bridge = ps->ports[port].bridge_dev;
-	const u16 mask = (1 << ps->num_ports) - 1;
+	const u16 mask = (1 << ps->info->num_ports) - 1;
+	struct dsa_switch *ds = ps->ds;
 	u16 output_ports = 0;
 	int reg;
 	int i;
@@ -1110,7 +1344,7 @@
 	if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
 		output_ports = mask;
 	} else {
-		for (i = 0; i < ps->num_ports; ++i) {
+		for (i = 0; i < ps->info->num_ports; ++i) {
 			/* allow sending frames to every group member */
 			if (bridge && ps->ports[i].bridge_dev == bridge)
 				output_ports |= BIT(i);
@@ -1124,20 +1358,25 @@
 	/* prevent frames from going back out of the port they came in on */
 	output_ports &= ~BIT(port);
 
-	reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN);
+	reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_BASE_VLAN);
 	if (reg < 0)
 		return reg;
 
 	reg &= ~mask;
 	reg |= output_ports & mask;
 
-	return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
+	return _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN, reg);
 }
 
-int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
+static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
+					 u8 state)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	int stp_state;
+	int err;
+
+	if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_PORTSTATE))
+		return;
 
 	switch (state) {
 	case BR_STATE_DISABLED:
@@ -1156,23 +1395,23 @@
 		break;
 	}
 
-	/* mv88e6xxx_port_stp_update may be called with softirqs disabled,
-	 * so we can not update the port state directly but need to schedule it.
-	 */
-	ps->ports[port].state = stp_state;
-	set_bit(port, ps->port_state_update_mask);
-	schedule_work(&ps->bridge_work);
+	mutex_lock(&ps->smi_mutex);
+	err = _mv88e6xxx_port_state(ps, port, stp_state);
+	mutex_unlock(&ps->smi_mutex);
 
-	return 0;
+	if (err)
+		netdev_err(ds->ports[port], "failed to update state to %s\n",
+			   mv88e6xxx_port_state_names[stp_state]);
 }
 
-static int _mv88e6xxx_port_pvid(struct dsa_switch *ds, int port, u16 *new,
-				u16 *old)
+static int _mv88e6xxx_port_pvid(struct mv88e6xxx_priv_state *ps, int port,
+				u16 *new, u16 *old)
 {
+	struct dsa_switch *ds = ps->ds;
 	u16 pvid;
 	int ret;
 
-	ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN);
+	ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_DEFAULT_VLAN);
 	if (ret < 0)
 		return ret;
 
@@ -1182,7 +1421,7 @@
 		ret &= ~PORT_DEFAULT_VLAN_MASK;
 		ret |= *new & PORT_DEFAULT_VLAN_MASK;
 
-		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
 					   PORT_DEFAULT_VLAN, ret);
 		if (ret < 0)
 			return ret;
@@ -1197,55 +1436,56 @@
 	return 0;
 }
 
-static int _mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid)
+static int _mv88e6xxx_port_pvid_get(struct mv88e6xxx_priv_state *ps,
+				    int port, u16 *pvid)
 {
-	return _mv88e6xxx_port_pvid(ds, port, NULL, pvid);
+	return _mv88e6xxx_port_pvid(ps, port, NULL, pvid);
 }
 
-static int _mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 pvid)
+static int _mv88e6xxx_port_pvid_set(struct mv88e6xxx_priv_state *ps,
+				    int port, u16 pvid)
 {
-	return _mv88e6xxx_port_pvid(ds, port, &pvid, NULL);
+	return _mv88e6xxx_port_pvid(ps, port, &pvid, NULL);
 }
 
-static int _mv88e6xxx_vtu_wait(struct dsa_switch *ds)
+static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_priv_state *ps)
 {
-	return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_VTU_OP,
+	return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_VTU_OP,
 			       GLOBAL_VTU_OP_BUSY);
 }
 
-static int _mv88e6xxx_vtu_cmd(struct dsa_switch *ds, u16 op)
+static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_priv_state *ps, u16 op)
 {
 	int ret;
 
-	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_OP, op);
+	ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_OP, op);
 	if (ret < 0)
 		return ret;
 
-	return _mv88e6xxx_vtu_wait(ds);
+	return _mv88e6xxx_vtu_wait(ps);
 }
 
-static int _mv88e6xxx_vtu_stu_flush(struct dsa_switch *ds)
+static int _mv88e6xxx_vtu_stu_flush(struct mv88e6xxx_priv_state *ps)
 {
 	int ret;
 
-	ret = _mv88e6xxx_vtu_wait(ds);
+	ret = _mv88e6xxx_vtu_wait(ps);
 	if (ret < 0)
 		return ret;
 
-	return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_FLUSH_ALL);
+	return _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_FLUSH_ALL);
 }
 
-static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds,
+static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_priv_state *ps,
 					struct mv88e6xxx_vtu_stu_entry *entry,
 					unsigned int nibble_offset)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	u16 regs[3];
 	int i;
 	int ret;
 
 	for (i = 0; i < 3; ++i) {
-		ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+		ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
 					  GLOBAL_VTU_DATA_0_3 + i);
 		if (ret < 0)
 			return ret;
@@ -1253,7 +1493,7 @@
 		regs[i] = ret;
 	}
 
-	for (i = 0; i < ps->num_ports; ++i) {
+	for (i = 0; i < ps->info->num_ports; ++i) {
 		unsigned int shift = (i % 4) * 4 + nibble_offset;
 		u16 reg = regs[i / 4];
 
@@ -1263,16 +1503,27 @@
 	return 0;
 }
 
-static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
+static int mv88e6xxx_vtu_data_read(struct mv88e6xxx_priv_state *ps,
+				   struct mv88e6xxx_vtu_stu_entry *entry)
+{
+	return _mv88e6xxx_vtu_stu_data_read(ps, entry, 0);
+}
+
+static int mv88e6xxx_stu_data_read(struct mv88e6xxx_priv_state *ps,
+				   struct mv88e6xxx_vtu_stu_entry *entry)
+{
+	return _mv88e6xxx_vtu_stu_data_read(ps, entry, 2);
+}
+
+static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_priv_state *ps,
 					 struct mv88e6xxx_vtu_stu_entry *entry,
 					 unsigned int nibble_offset)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	u16 regs[3] = { 0 };
 	int i;
 	int ret;
 
-	for (i = 0; i < ps->num_ports; ++i) {
+	for (i = 0; i < ps->info->num_ports; ++i) {
 		unsigned int shift = (i % 4) * 4 + nibble_offset;
 		u8 data = entry->data[i];
 
@@ -1280,7 +1531,7 @@
 	}
 
 	for (i = 0; i < 3; ++i) {
-		ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL,
+		ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL,
 					   GLOBAL_VTU_DATA_0_3 + i, regs[i]);
 		if (ret < 0)
 			return ret;
@@ -1289,27 +1540,39 @@
 	return 0;
 }
 
-static int _mv88e6xxx_vtu_vid_write(struct dsa_switch *ds, u16 vid)
+static int mv88e6xxx_vtu_data_write(struct mv88e6xxx_priv_state *ps,
+				    struct mv88e6xxx_vtu_stu_entry *entry)
 {
-	return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID,
+	return _mv88e6xxx_vtu_stu_data_write(ps, entry, 0);
+}
+
+static int mv88e6xxx_stu_data_write(struct mv88e6xxx_priv_state *ps,
+				    struct mv88e6xxx_vtu_stu_entry *entry)
+{
+	return _mv88e6xxx_vtu_stu_data_write(ps, entry, 2);
+}
+
+static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_priv_state *ps, u16 vid)
+{
+	return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID,
 				    vid & GLOBAL_VTU_VID_MASK);
 }
 
-static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds,
+static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_priv_state *ps,
 				  struct mv88e6xxx_vtu_stu_entry *entry)
 {
 	struct mv88e6xxx_vtu_stu_entry next = { 0 };
 	int ret;
 
-	ret = _mv88e6xxx_vtu_wait(ds);
+	ret = _mv88e6xxx_vtu_wait(ps);
 	if (ret < 0)
 		return ret;
 
-	ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT);
+	ret = _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_VTU_GET_NEXT);
 	if (ret < 0)
 		return ret;
 
-	ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
+	ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_VID);
 	if (ret < 0)
 		return ret;
 
@@ -1317,20 +1580,32 @@
 	next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
 
 	if (next.valid) {
-		ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 0);
+		ret = mv88e6xxx_vtu_data_read(ps, &next);
 		if (ret < 0)
 			return ret;
 
-		if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
-		    mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
-			ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+		if (mv88e6xxx_has_fid_reg(ps)) {
+			ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
 						  GLOBAL_VTU_FID);
 			if (ret < 0)
 				return ret;
 
 			next.fid = ret & GLOBAL_VTU_FID_MASK;
+		} else if (mv88e6xxx_num_databases(ps) == 256) {
+			/* VTU DBNum[7:4] are located in VTU Operation 11:8, and
+			 * VTU DBNum[3:0] are located in VTU Operation 3:0
+			 */
+			ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
+						  GLOBAL_VTU_OP);
+			if (ret < 0)
+				return ret;
 
-			ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+			next.fid = (ret & 0xf00) >> 4;
+			next.fid |= ret & 0xf;
+		}
+
+		if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_STU)) {
+			ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
 						  GLOBAL_VTU_SID);
 			if (ret < 0)
 				return ret;
@@ -1343,27 +1618,30 @@
 	return 0;
 }
 
-int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
-			     struct switchdev_obj_port_vlan *vlan,
-			     int (*cb)(struct switchdev_obj *obj))
+static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
+				    struct switchdev_obj_port_vlan *vlan,
+				    int (*cb)(struct switchdev_obj *obj))
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	struct mv88e6xxx_vtu_stu_entry next;
 	u16 pvid;
 	int err;
 
+	if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
+		return -EOPNOTSUPP;
+
 	mutex_lock(&ps->smi_mutex);
 
-	err = _mv88e6xxx_port_pvid_get(ds, port, &pvid);
+	err = _mv88e6xxx_port_pvid_get(ps, port, &pvid);
 	if (err)
 		goto unlock;
 
-	err = _mv88e6xxx_vtu_vid_write(ds, GLOBAL_VTU_VID_MASK);
+	err = _mv88e6xxx_vtu_vid_write(ps, GLOBAL_VTU_VID_MASK);
 	if (err)
 		goto unlock;
 
 	do {
-		err = _mv88e6xxx_vtu_getnext(ds, &next);
+		err = _mv88e6xxx_vtu_getnext(ps, &next);
 		if (err)
 			break;
 
@@ -1394,13 +1672,14 @@
 	return err;
 }
 
-static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
+static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_priv_state *ps,
 				    struct mv88e6xxx_vtu_stu_entry *entry)
 {
+	u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE;
 	u16 reg = 0;
 	int ret;
 
-	ret = _mv88e6xxx_vtu_wait(ds);
+	ret = _mv88e6xxx_vtu_wait(ps);
 	if (ret < 0)
 		return ret;
 
@@ -1408,66 +1687,73 @@
 		goto loadpurge;
 
 	/* Write port member tags */
-	ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 0);
+	ret = mv88e6xxx_vtu_data_write(ps, entry);
 	if (ret < 0)
 		return ret;
 
-	if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
-	    mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
+	if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_STU)) {
 		reg = entry->sid & GLOBAL_VTU_SID_MASK;
-		ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
+		ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg);
 		if (ret < 0)
 			return ret;
+	}
 
+	if (mv88e6xxx_has_fid_reg(ps)) {
 		reg = entry->fid & GLOBAL_VTU_FID_MASK;
-		ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg);
+		ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_FID, reg);
 		if (ret < 0)
 			return ret;
+	} else if (mv88e6xxx_num_databases(ps) == 256) {
+		/* VTU DBNum[7:4] are located in VTU Operation 11:8, and
+		 * VTU DBNum[3:0] are located in VTU Operation 3:0
+		 */
+		op |= (entry->fid & 0xf0) << 8;
+		op |= entry->fid & 0xf;
 	}
 
 	reg = GLOBAL_VTU_VID_VALID;
 loadpurge:
 	reg |= entry->vid & GLOBAL_VTU_VID_MASK;
-	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
+	ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, reg);
 	if (ret < 0)
 		return ret;
 
-	return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_LOAD_PURGE);
+	return _mv88e6xxx_vtu_cmd(ps, op);
 }
 
-static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid,
+static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_priv_state *ps, u8 sid,
 				  struct mv88e6xxx_vtu_stu_entry *entry)
 {
 	struct mv88e6xxx_vtu_stu_entry next = { 0 };
 	int ret;
 
-	ret = _mv88e6xxx_vtu_wait(ds);
+	ret = _mv88e6xxx_vtu_wait(ps);
 	if (ret < 0)
 		return ret;
 
-	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID,
+	ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID,
 				   sid & GLOBAL_VTU_SID_MASK);
 	if (ret < 0)
 		return ret;
 
-	ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_GET_NEXT);
+	ret = _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_STU_GET_NEXT);
 	if (ret < 0)
 		return ret;
 
-	ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_SID);
+	ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_SID);
 	if (ret < 0)
 		return ret;
 
 	next.sid = ret & GLOBAL_VTU_SID_MASK;
 
-	ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
+	ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_VID);
 	if (ret < 0)
 		return ret;
 
 	next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
 
 	if (next.valid) {
-		ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 2);
+		ret = mv88e6xxx_stu_data_read(ps, &next);
 		if (ret < 0)
 			return ret;
 	}
@@ -1476,13 +1762,13 @@
 	return 0;
 }
 
-static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds,
+static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_priv_state *ps,
 				    struct mv88e6xxx_vtu_stu_entry *entry)
 {
 	u16 reg = 0;
 	int ret;
 
-	ret = _mv88e6xxx_vtu_wait(ds);
+	ret = _mv88e6xxx_vtu_wait(ps);
 	if (ret < 0)
 		return ret;
 
@@ -1490,32 +1776,41 @@
 		goto loadpurge;
 
 	/* Write port states */
-	ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 2);
+	ret = mv88e6xxx_stu_data_write(ps, entry);
 	if (ret < 0)
 		return ret;
 
 	reg = GLOBAL_VTU_VID_VALID;
 loadpurge:
-	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
+	ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, reg);
 	if (ret < 0)
 		return ret;
 
 	reg = entry->sid & GLOBAL_VTU_SID_MASK;
-	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
+	ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg);
 	if (ret < 0)
 		return ret;
 
-	return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE);
+	return _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_STU_LOAD_PURGE);
 }
 
-static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new,
-			       u16 *old)
+static int _mv88e6xxx_port_fid(struct mv88e6xxx_priv_state *ps, int port,
+			       u16 *new, u16 *old)
 {
+	struct dsa_switch *ds = ps->ds;
+	u16 upper_mask;
 	u16 fid;
 	int ret;
 
+	if (mv88e6xxx_num_databases(ps) == 4096)
+		upper_mask = 0xff;
+	else if (mv88e6xxx_num_databases(ps) == 256)
+		upper_mask = 0xf;
+	else
+		return -EOPNOTSUPP;
+
 	/* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */
-	ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN);
+	ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_BASE_VLAN);
 	if (ret < 0)
 		return ret;
 
@@ -1525,24 +1820,24 @@
 		ret &= ~PORT_BASE_VLAN_FID_3_0_MASK;
 		ret |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK;
 
-		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN,
+		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN,
 					   ret);
 		if (ret < 0)
 			return ret;
 	}
 
 	/* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */
-	ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL_1);
+	ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_1);
 	if (ret < 0)
 		return ret;
 
-	fid |= (ret & PORT_CONTROL_1_FID_11_4_MASK) << 4;
+	fid |= (ret & upper_mask) << 4;
 
 	if (new) {
-		ret &= ~PORT_CONTROL_1_FID_11_4_MASK;
-		ret |= (*new >> 4) & PORT_CONTROL_1_FID_11_4_MASK;
+		ret &= ~upper_mask;
+		ret |= (*new >> 4) & upper_mask;
 
-		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1,
+		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1,
 					   ret);
 		if (ret < 0)
 			return ret;
@@ -1556,19 +1851,20 @@
 	return 0;
 }
 
-static int _mv88e6xxx_port_fid_get(struct dsa_switch *ds, int port, u16 *fid)
+static int _mv88e6xxx_port_fid_get(struct mv88e6xxx_priv_state *ps,
+				   int port, u16 *fid)
 {
-	return _mv88e6xxx_port_fid(ds, port, NULL, fid);
+	return _mv88e6xxx_port_fid(ps, port, NULL, fid);
 }
 
-static int _mv88e6xxx_port_fid_set(struct dsa_switch *ds, int port, u16 fid)
+static int _mv88e6xxx_port_fid_set(struct mv88e6xxx_priv_state *ps,
+				   int port, u16 fid)
 {
-	return _mv88e6xxx_port_fid(ds, port, &fid, NULL);
+	return _mv88e6xxx_port_fid(ps, port, &fid, NULL);
 }
 
-static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid)
+static int _mv88e6xxx_fid_new(struct mv88e6xxx_priv_state *ps, u16 *fid)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
 	struct mv88e6xxx_vtu_stu_entry vlan;
 	int i, err;
@@ -1576,8 +1872,8 @@
 	bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
 
 	/* Set every FID bit used by the (un)bridged ports */
-	for (i = 0; i < ps->num_ports; ++i) {
-		err = _mv88e6xxx_port_fid_get(ds, i, fid);
+	for (i = 0; i < ps->info->num_ports; ++i) {
+		err = _mv88e6xxx_port_fid_get(ps, i, fid);
 		if (err)
 			return err;
 
@@ -1585,12 +1881,12 @@
 	}
 
 	/* Set every FID bit used by the VLAN entries */
-	err = _mv88e6xxx_vtu_vid_write(ds, GLOBAL_VTU_VID_MASK);
+	err = _mv88e6xxx_vtu_vid_write(ps, GLOBAL_VTU_VID_MASK);
 	if (err)
 		return err;
 
 	do {
-		err = _mv88e6xxx_vtu_getnext(ds, &vlan);
+		err = _mv88e6xxx_vtu_getnext(ps, &vlan);
 		if (err)
 			return err;
 
@@ -1604,35 +1900,35 @@
 	 * databases are not needed. Return the next positive available.
 	 */
 	*fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1);
-	if (unlikely(*fid == MV88E6XXX_N_FID))
+	if (unlikely(*fid >= mv88e6xxx_num_databases(ps)))
 		return -ENOSPC;
 
 	/* Clear the database */
-	return _mv88e6xxx_atu_flush(ds, *fid, true);
+	return _mv88e6xxx_atu_flush(ps, *fid, true);
 }
 
-static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid,
+static int _mv88e6xxx_vtu_new(struct mv88e6xxx_priv_state *ps, u16 vid,
 			      struct mv88e6xxx_vtu_stu_entry *entry)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	struct dsa_switch *ds = ps->ds;
 	struct mv88e6xxx_vtu_stu_entry vlan = {
 		.valid = true,
 		.vid = vid,
 	};
 	int i, err;
 
-	err = _mv88e6xxx_fid_new(ds, &vlan.fid);
+	err = _mv88e6xxx_fid_new(ps, &vlan.fid);
 	if (err)
 		return err;
 
 	/* exclude all ports except the CPU and DSA ports */
-	for (i = 0; i < ps->num_ports; ++i)
+	for (i = 0; i < ps->info->num_ports; ++i)
 		vlan.data[i] = dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)
 			? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED
 			: GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
 
-	if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
-	    mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
+	if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) ||
+	    mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps)) {
 		struct mv88e6xxx_vtu_stu_entry vstp;
 
 		/* Adding a VTU entry requires a valid STU entry. As VSTP is not
@@ -1640,7 +1936,7 @@
 		 * entries. Thus, validate the SID 0.
 		 */
 		vlan.sid = 0;
-		err = _mv88e6xxx_stu_getnext(ds, GLOBAL_VTU_SID_MASK, &vstp);
+		err = _mv88e6xxx_stu_getnext(ps, GLOBAL_VTU_SID_MASK, &vstp);
 		if (err)
 			return err;
 
@@ -1649,7 +1945,7 @@
 			vstp.valid = true;
 			vstp.sid = vlan.sid;
 
-			err = _mv88e6xxx_stu_loadpurge(ds, &vstp);
+			err = _mv88e6xxx_stu_loadpurge(ps, &vstp);
 			if (err)
 				return err;
 		}
@@ -1659,7 +1955,7 @@
 	return 0;
 }
 
-static int _mv88e6xxx_vtu_get(struct dsa_switch *ds, u16 vid,
+static int _mv88e6xxx_vtu_get(struct mv88e6xxx_priv_state *ps, u16 vid,
 			      struct mv88e6xxx_vtu_stu_entry *entry, bool creat)
 {
 	int err;
@@ -1667,11 +1963,11 @@
 	if (!vid)
 		return -EINVAL;
 
-	err = _mv88e6xxx_vtu_vid_write(ds, vid - 1);
+	err = _mv88e6xxx_vtu_vid_write(ps, vid - 1);
 	if (err)
 		return err;
 
-	err = _mv88e6xxx_vtu_getnext(ds, entry);
+	err = _mv88e6xxx_vtu_getnext(ps, entry);
 	if (err)
 		return err;
 
@@ -1682,7 +1978,7 @@
 		 * -EOPNOTSUPP to inform bridge about an eventual software VLAN.
 		 */
 
-		err = _mv88e6xxx_vtu_new(ds, vid, entry);
+		err = _mv88e6xxx_vtu_new(ps, vid, entry);
 	}
 
 	return err;
@@ -1700,12 +1996,12 @@
 
 	mutex_lock(&ps->smi_mutex);
 
-	err = _mv88e6xxx_vtu_vid_write(ds, vid_begin - 1);
+	err = _mv88e6xxx_vtu_vid_write(ps, vid_begin - 1);
 	if (err)
 		goto unlock;
 
 	do {
-		err = _mv88e6xxx_vtu_getnext(ds, &vlan);
+		err = _mv88e6xxx_vtu_getnext(ps, &vlan);
 		if (err)
 			goto unlock;
 
@@ -1715,7 +2011,7 @@
 		if (vlan.vid > vid_end)
 			break;
 
-		for (i = 0; i < ps->num_ports; ++i) {
+		for (i = 0; i < ps->info->num_ports; ++i) {
 			if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
 				continue;
 
@@ -1749,17 +2045,20 @@
 	[PORT_CONTROL_2_8021Q_SECURE] = "Secure",
 };
 
-int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
-				  bool vlan_filtering)
+static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
+					 bool vlan_filtering)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE :
 		PORT_CONTROL_2_8021Q_DISABLED;
 	int ret;
 
+	if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
+		return -EOPNOTSUPP;
+
 	mutex_lock(&ps->smi_mutex);
 
-	ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL_2);
+	ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_2);
 	if (ret < 0)
 		goto unlock;
 
@@ -1769,7 +2068,7 @@
 		ret &= ~PORT_CONTROL_2_8021Q_MASK;
 		ret |= new & PORT_CONTROL_2_8021Q_MASK;
 
-		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_2,
+		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_2,
 					   ret);
 		if (ret < 0)
 			goto unlock;
@@ -1786,12 +2085,16 @@
 	return ret;
 }
 
-int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
-				const struct switchdev_obj_port_vlan *vlan,
-				struct switchdev_trans *trans)
+static int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
+				       const struct switchdev_obj_port_vlan *vlan,
+				       struct switchdev_trans *trans)
 {
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	int err;
 
+	if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
+		return -EOPNOTSUPP;
+
 	/* If the requested port doesn't belong to the same bridge as the VLAN
 	 * members, do not support it (yet) and fallback to software VLAN.
 	 */
@@ -1806,13 +2109,13 @@
 	return 0;
 }
 
-static int _mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
-				    bool untagged)
+static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_priv_state *ps, int port,
+				    u16 vid, bool untagged)
 {
 	struct mv88e6xxx_vtu_stu_entry vlan;
 	int err;
 
-	err = _mv88e6xxx_vtu_get(ds, vid, &vlan, true);
+	err = _mv88e6xxx_vtu_get(ps, vid, &vlan, true);
 	if (err)
 		return err;
 
@@ -1820,43 +2123,43 @@
 		GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
 		GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
 
-	return _mv88e6xxx_vtu_loadpurge(ds, &vlan);
+	return _mv88e6xxx_vtu_loadpurge(ps, &vlan);
 }
 
-int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
-			    const struct switchdev_obj_port_vlan *vlan,
-			    struct switchdev_trans *trans)
+static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
+				    const struct switchdev_obj_port_vlan *vlan,
+				    struct switchdev_trans *trans)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
 	bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
 	u16 vid;
-	int err = 0;
+
+	if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
+		return;
 
 	mutex_lock(&ps->smi_mutex);
 
-	for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
-		err = _mv88e6xxx_port_vlan_add(ds, port, vid, untagged);
-		if (err)
-			goto unlock;
-	}
+	for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
+		if (_mv88e6xxx_port_vlan_add(ps, port, vid, untagged))
+			netdev_err(ds->ports[port], "failed to add VLAN %d%c\n",
+				   vid, untagged ? 'u' : 't');
 
-	/* no PVID with ranges, otherwise it's a bug */
-	if (pvid)
-		err = _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end);
-unlock:
+	if (pvid && _mv88e6xxx_port_pvid_set(ps, port, vlan->vid_end))
+		netdev_err(ds->ports[port], "failed to set PVID %d\n",
+			   vlan->vid_end);
+
 	mutex_unlock(&ps->smi_mutex);
-
-	return err;
 }
 
-static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
+static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_priv_state *ps,
+				    int port, u16 vid)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	struct dsa_switch *ds = ps->ds;
 	struct mv88e6xxx_vtu_stu_entry vlan;
 	int i, err;
 
-	err = _mv88e6xxx_vtu_get(ds, vid, &vlan, false);
+	err = _mv88e6xxx_vtu_get(ps, vid, &vlan, false);
 	if (err)
 		return err;
 
@@ -1868,7 +2171,7 @@
 
 	/* keep the VLAN unless all ports are excluded */
 	vlan.valid = false;
-	for (i = 0; i < ps->num_ports; ++i) {
+	for (i = 0; i < ps->info->num_ports; ++i) {
 		if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
 			continue;
 
@@ -1878,33 +2181,36 @@
 		}
 	}
 
-	err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
+	err = _mv88e6xxx_vtu_loadpurge(ps, &vlan);
 	if (err)
 		return err;
 
-	return _mv88e6xxx_atu_remove(ds, vlan.fid, port, false);
+	return _mv88e6xxx_atu_remove(ps, vlan.fid, port, false);
 }
 
-int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
-			    const struct switchdev_obj_port_vlan *vlan)
+static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
+				   const struct switchdev_obj_port_vlan *vlan)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	u16 pvid, vid;
 	int err = 0;
 
+	if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
+		return -EOPNOTSUPP;
+
 	mutex_lock(&ps->smi_mutex);
 
-	err = _mv88e6xxx_port_pvid_get(ds, port, &pvid);
+	err = _mv88e6xxx_port_pvid_get(ps, port, &pvid);
 	if (err)
 		goto unlock;
 
 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
-		err = _mv88e6xxx_port_vlan_del(ds, port, vid);
+		err = _mv88e6xxx_port_vlan_del(ps, port, vid);
 		if (err)
 			goto unlock;
 
 		if (vid == pvid) {
-			err = _mv88e6xxx_port_pvid_set(ds, port, 0);
+			err = _mv88e6xxx_port_pvid_set(ps, port, 0);
 			if (err)
 				goto unlock;
 		}
@@ -1916,14 +2222,14 @@
 	return err;
 }
 
-static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
+static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_priv_state *ps,
 				    const unsigned char *addr)
 {
 	int i, ret;
 
 	for (i = 0; i < 3; i++) {
 		ret = _mv88e6xxx_reg_write(
-			ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
+			ps, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
 			(addr[i * 2] << 8) | addr[i * 2 + 1]);
 		if (ret < 0)
 			return ret;
@@ -1932,12 +2238,13 @@
 	return 0;
 }
 
-static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr)
+static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_priv_state *ps,
+				   unsigned char *addr)
 {
 	int i, ret;
 
 	for (i = 0; i < 3; i++) {
-		ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+		ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
 					  GLOBAL_ATU_MAC_01 + i);
 		if (ret < 0)
 			return ret;
@@ -1948,31 +2255,27 @@
 	return 0;
 }
 
-static int _mv88e6xxx_atu_load(struct dsa_switch *ds,
+static int _mv88e6xxx_atu_load(struct mv88e6xxx_priv_state *ps,
 			       struct mv88e6xxx_atu_entry *entry)
 {
 	int ret;
 
-	ret = _mv88e6xxx_atu_wait(ds);
+	ret = _mv88e6xxx_atu_wait(ps);
 	if (ret < 0)
 		return ret;
 
-	ret = _mv88e6xxx_atu_mac_write(ds, entry->mac);
+	ret = _mv88e6xxx_atu_mac_write(ps, entry->mac);
 	if (ret < 0)
 		return ret;
 
-	ret = _mv88e6xxx_atu_data_write(ds, entry);
+	ret = _mv88e6xxx_atu_data_write(ps, entry);
 	if (ret < 0)
 		return ret;
 
-	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, entry->fid);
-	if (ret < 0)
-		return ret;
-
-	return _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_LOAD_DB);
+	return _mv88e6xxx_atu_cmd(ps, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
 }
 
-static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
+static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_priv_state *ps, int port,
 				    const unsigned char *addr, u16 vid,
 				    u8 state)
 {
@@ -1982,9 +2285,9 @@
 
 	/* Null VLAN ID corresponds to the port private database */
 	if (vid == 0)
-		err = _mv88e6xxx_port_fid_get(ds, port, &vlan.fid);
+		err = _mv88e6xxx_port_fid_get(ps, port, &vlan.fid);
 	else
-		err = _mv88e6xxx_vtu_get(ds, vid, &vlan, false);
+		err = _mv88e6xxx_vtu_get(ps, vid, &vlan, false);
 	if (err)
 		return err;
 
@@ -1996,51 +2299,60 @@
 		entry.portv_trunkid = BIT(port);
 	}
 
-	return _mv88e6xxx_atu_load(ds, &entry);
+	return _mv88e6xxx_atu_load(ps, &entry);
 }
 
-int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
-			       const struct switchdev_obj_port_fdb *fdb,
-			       struct switchdev_trans *trans)
+static int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
+				      const struct switchdev_obj_port_fdb *fdb,
+				      struct switchdev_trans *trans)
 {
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+	if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
+		return -EOPNOTSUPP;
+
 	/* We don't need any dynamic resource from the kernel (yet),
 	 * so skip the prepare phase.
 	 */
 	return 0;
 }
 
-int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
-			   const struct switchdev_obj_port_fdb *fdb,
-			   struct switchdev_trans *trans)
+static void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
+				   const struct switchdev_obj_port_fdb *fdb,
+				   struct switchdev_trans *trans)
 {
 	int state = is_multicast_ether_addr(fdb->addr) ?
 		GLOBAL_ATU_DATA_STATE_MC_STATIC :
 		GLOBAL_ATU_DATA_STATE_UC_STATIC;
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int ret;
+
+	if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
+		return;
 
 	mutex_lock(&ps->smi_mutex);
-	ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid, state);
+	if (_mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid, state))
+		netdev_err(ds->ports[port], "failed to load MAC address\n");
 	mutex_unlock(&ps->smi_mutex);
-
-	return ret;
 }
 
-int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
-			   const struct switchdev_obj_port_fdb *fdb)
+static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
+				  const struct switchdev_obj_port_fdb *fdb)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	int ret;
 
+	if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
+		return -EOPNOTSUPP;
+
 	mutex_lock(&ps->smi_mutex);
-	ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid,
+	ret = _mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid,
 				       GLOBAL_ATU_DATA_STATE_UNUSED);
 	mutex_unlock(&ps->smi_mutex);
 
 	return ret;
 }
 
-static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
+static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_priv_state *ps, u16 fid,
 				  struct mv88e6xxx_atu_entry *entry)
 {
 	struct mv88e6xxx_atu_entry next = { 0 };
@@ -2048,23 +2360,19 @@
 
 	next.fid = fid;
 
-	ret = _mv88e6xxx_atu_wait(ds);
+	ret = _mv88e6xxx_atu_wait(ps);
 	if (ret < 0)
 		return ret;
 
-	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
+	ret = _mv88e6xxx_atu_cmd(ps, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
 	if (ret < 0)
 		return ret;
 
-	ret = _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_GET_NEXT_DB);
+	ret = _mv88e6xxx_atu_mac_read(ps, next.mac);
 	if (ret < 0)
 		return ret;
 
-	ret = _mv88e6xxx_atu_mac_read(ds, next.mac);
-	if (ret < 0)
-		return ret;
-
-	ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
+	ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_DATA);
 	if (ret < 0)
 		return ret;
 
@@ -2089,8 +2397,8 @@
 	return 0;
 }
 
-static int _mv88e6xxx_port_fdb_dump_one(struct dsa_switch *ds, u16 fid, u16 vid,
-					int port,
+static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_priv_state *ps,
+					u16 fid, u16 vid, int port,
 					struct switchdev_obj_port_fdb *fdb,
 					int (*cb)(struct switchdev_obj *obj))
 {
@@ -2099,12 +2407,12 @@
 	};
 	int err;
 
-	err = _mv88e6xxx_atu_mac_write(ds, addr.mac);
+	err = _mv88e6xxx_atu_mac_write(ps, addr.mac);
 	if (err)
 		return err;
 
 	do {
-		err = _mv88e6xxx_atu_getnext(ds, fid, &addr);
+		err = _mv88e6xxx_atu_getnext(ps, fid, &addr);
 		if (err)
 			break;
 
@@ -2130,9 +2438,9 @@
 	return err;
 }
 
-int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
-			    struct switchdev_obj_port_fdb *fdb,
-			    int (*cb)(struct switchdev_obj *obj))
+static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
+				   struct switchdev_obj_port_fdb *fdb,
+				   int (*cb)(struct switchdev_obj *obj))
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	struct mv88e6xxx_vtu_stu_entry vlan = {
@@ -2141,31 +2449,34 @@
 	u16 fid;
 	int err;
 
+	if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
+		return -EOPNOTSUPP;
+
 	mutex_lock(&ps->smi_mutex);
 
 	/* Dump port's default Filtering Information Database (VLAN ID 0) */
-	err = _mv88e6xxx_port_fid_get(ds, port, &fid);
+	err = _mv88e6xxx_port_fid_get(ps, port, &fid);
 	if (err)
 		goto unlock;
 
-	err = _mv88e6xxx_port_fdb_dump_one(ds, fid, 0, port, fdb, cb);
+	err = _mv88e6xxx_port_fdb_dump_one(ps, fid, 0, port, fdb, cb);
 	if (err)
 		goto unlock;
 
 	/* Dump VLANs' Filtering Information Databases */
-	err = _mv88e6xxx_vtu_vid_write(ds, vlan.vid);
+	err = _mv88e6xxx_vtu_vid_write(ps, vlan.vid);
 	if (err)
 		goto unlock;
 
 	do {
-		err = _mv88e6xxx_vtu_getnext(ds, &vlan);
+		err = _mv88e6xxx_vtu_getnext(ps, &vlan);
 		if (err)
 			break;
 
 		if (!vlan.valid)
 			break;
 
-		err = _mv88e6xxx_port_fdb_dump_one(ds, vlan.fid, vlan.vid, port,
+		err = _mv88e6xxx_port_fdb_dump_one(ps, vlan.fid, vlan.vid, port,
 						   fdb, cb);
 		if (err)
 			break;
@@ -2177,501 +2488,106 @@
 	return err;
 }
 
-int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
-			       struct net_device *bridge)
+static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
+				      struct net_device *bridge)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	u16 fid;
-	int i, err;
+	int i, err = 0;
+
+	if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VLANTABLE))
+		return -EOPNOTSUPP;
 
 	mutex_lock(&ps->smi_mutex);
 
-	/* Get or create the bridge FID and assign it to the port */
-	for (i = 0; i < ps->num_ports; ++i)
-		if (ps->ports[i].bridge_dev == bridge)
-			break;
-
-	if (i < ps->num_ports)
-		err = _mv88e6xxx_port_fid_get(ds, i, &fid);
-	else
-		err = _mv88e6xxx_fid_new(ds, &fid);
-	if (err)
-		goto unlock;
-
-	err = _mv88e6xxx_port_fid_set(ds, port, fid);
-	if (err)
-		goto unlock;
-
 	/* Assign the bridge and remap each port's VLANTable */
 	ps->ports[port].bridge_dev = bridge;
 
-	for (i = 0; i < ps->num_ports; ++i) {
+	for (i = 0; i < ps->info->num_ports; ++i) {
 		if (ps->ports[i].bridge_dev == bridge) {
-			err = _mv88e6xxx_port_based_vlan_map(ds, i);
+			err = _mv88e6xxx_port_based_vlan_map(ps, i);
 			if (err)
 				break;
 		}
 	}
 
-unlock:
 	mutex_unlock(&ps->smi_mutex);
 
 	return err;
 }
 
-void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
+static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	struct net_device *bridge = ps->ports[port].bridge_dev;
-	u16 fid;
 	int i;
 
-	mutex_lock(&ps->smi_mutex);
+	if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VLANTABLE))
+		return;
 
-	/* Give the port a fresh Filtering Information Database */
-	if (_mv88e6xxx_fid_new(ds, &fid) ||
-	    _mv88e6xxx_port_fid_set(ds, port, fid))
-		netdev_warn(ds->ports[port], "failed to assign a new FID\n");
+	mutex_lock(&ps->smi_mutex);
 
 	/* Unassign the bridge and remap each port's VLANTable */
 	ps->ports[port].bridge_dev = NULL;
 
-	for (i = 0; i < ps->num_ports; ++i)
+	for (i = 0; i < ps->info->num_ports; ++i)
 		if (i == port || ps->ports[i].bridge_dev == bridge)
-			if (_mv88e6xxx_port_based_vlan_map(ds, i))
+			if (_mv88e6xxx_port_based_vlan_map(ps, i))
 				netdev_warn(ds->ports[i], "failed to remap\n");
 
 	mutex_unlock(&ps->smi_mutex);
 }
 
-static void mv88e6xxx_bridge_work(struct work_struct *work)
+static int _mv88e6xxx_phy_page_write(struct mv88e6xxx_priv_state *ps,
+				     int port, int page, int reg, int val)
 {
-	struct mv88e6xxx_priv_state *ps;
-	struct dsa_switch *ds;
-	int port;
-
-	ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
-	ds = ((struct dsa_switch *)ps) - 1;
-
-	mutex_lock(&ps->smi_mutex);
-
-	for (port = 0; port < ps->num_ports; ++port)
-		if (test_and_clear_bit(port, ps->port_state_update_mask) &&
-		    _mv88e6xxx_port_state(ds, port, ps->ports[port].state))
-			netdev_warn(ds->ports[port], "failed to update state to %s\n",
-				    mv88e6xxx_port_state_names[ps->ports[port].state]);
-
-	mutex_unlock(&ps->smi_mutex);
-}
-
-static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
-{
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	int ret;
-	u16 reg;
 
-	mutex_lock(&ps->smi_mutex);
-
-	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-	    mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
-	    mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) {
-		/* MAC Forcing register: don't force link, speed,
-		 * duplex or flow control state to any particular
-		 * values on physical ports, but force the CPU port
-		 * and all DSA ports to their maximum bandwidth and
-		 * full duplex.
-		 */
-		reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
-		if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
-			reg &= ~PORT_PCS_CTRL_UNFORCED;
-			reg |= PORT_PCS_CTRL_FORCE_LINK |
-				PORT_PCS_CTRL_LINK_UP |
-				PORT_PCS_CTRL_DUPLEX_FULL |
-				PORT_PCS_CTRL_FORCE_DUPLEX;
-			if (mv88e6xxx_6065_family(ds))
-				reg |= PORT_PCS_CTRL_100;
-			else
-				reg |= PORT_PCS_CTRL_1000;
-		} else {
-			reg |= PORT_PCS_CTRL_UNFORCED;
-		}
-
-		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
-					   PORT_PCS_CTRL, reg);
-		if (ret)
-			goto abort;
-	}
-
-	/* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
-	 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
-	 * tunneling, determine priority by looking at 802.1p and IP
-	 * priority fields (IP prio has precedence), and set STP state
-	 * to Forwarding.
-	 *
-	 * If this is the CPU link, use DSA or EDSA tagging depending
-	 * on which tagging mode was configured.
-	 *
-	 * If this is a link to another switch, use DSA tagging mode.
-	 *
-	 * If this is the upstream port for this switch, enable
-	 * forwarding of unknown unicasts and multicasts.
-	 */
-	reg = 0;
-	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-	    mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
-	    mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds))
-		reg = PORT_CONTROL_IGMP_MLD_SNOOP |
-		PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
-		PORT_CONTROL_STATE_FORWARDING;
-	if (dsa_is_cpu_port(ds, port)) {
-		if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
-			reg |= PORT_CONTROL_DSA_TAG;
-		if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-		    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-		    mv88e6xxx_6320_family(ds)) {
-			if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
-				reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
-			else
-				reg |= PORT_CONTROL_FRAME_MODE_DSA;
-			reg |= PORT_CONTROL_FORWARD_UNKNOWN |
-				PORT_CONTROL_FORWARD_UNKNOWN_MC;
-		}
-
-		if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-		    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-		    mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
-		    mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) {
-			if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
-				reg |= PORT_CONTROL_EGRESS_ADD_TAG;
-		}
-	}
-	if (dsa_is_dsa_port(ds, port)) {
-		if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
-			reg |= PORT_CONTROL_DSA_TAG;
-		if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-		    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-		    mv88e6xxx_6320_family(ds)) {
-			reg |= PORT_CONTROL_FRAME_MODE_DSA;
-		}
-
-		if (port == dsa_upstream_port(ds))
-			reg |= PORT_CONTROL_FORWARD_UNKNOWN |
-				PORT_CONTROL_FORWARD_UNKNOWN_MC;
-	}
-	if (reg) {
-		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
-					   PORT_CONTROL, reg);
-		if (ret)
-			goto abort;
-	}
-
-	/* Port Control 2: don't force a good FCS, set the maximum frame size to
-	 * 10240 bytes, disable 802.1q tags checking, don't discard tagged or
-	 * untagged frames on this port, do a destination address lookup on all
-	 * received packets as usual, disable ARP mirroring and don't send a
-	 * copy of all transmitted/received frames on this port to the CPU.
-	 */
-	reg = 0;
-	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-	    mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds))
-		reg = PORT_CONTROL_2_MAP_DA;
-
-	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds))
-		reg |= PORT_CONTROL_2_JUMBO_10240;
-
-	if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
-		/* Set the upstream port this port should use */
-		reg |= dsa_upstream_port(ds);
-		/* enable forwarding of unknown multicast addresses to
-		 * the upstream port
-		 */
-		if (port == dsa_upstream_port(ds))
-			reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
-	}
-
-	reg |= PORT_CONTROL_2_8021Q_DISABLED;
-
-	if (reg) {
-		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
-					   PORT_CONTROL_2, reg);
-		if (ret)
-			goto abort;
-	}
-
-	/* Port Association Vector: when learning source addresses
-	 * of packets, add the address to the address database using
-	 * a port bitmap that has only the bit for this port set and
-	 * the other bits clear.
-	 */
-	reg = 1 << port;
-	/* Disable learning for DSA and CPU ports */
-	if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
-		reg = PORT_ASSOC_VECTOR_LOCKED_PORT;
-
-	ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
-	if (ret)
-		goto abort;
-
-	/* Egress rate control 2: disable egress rate control. */
-	ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL_2,
-				   0x0000);
-	if (ret)
-		goto abort;
-
-	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-	    mv88e6xxx_6320_family(ds)) {
-		/* Do not limit the period of time that this port can
-		 * be paused for by the remote end or the period of
-		 * time that this port can pause the remote end.
-		 */
-		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
-					   PORT_PAUSE_CTRL, 0x0000);
-		if (ret)
-			goto abort;
-
-		/* Port ATU control: disable limiting the number of
-		 * address database entries that this port is allowed
-		 * to use.
-		 */
-		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
-					   PORT_ATU_CONTROL, 0x0000);
-		/* Priority Override: disable DA, SA and VTU priority
-		 * override.
-		 */
-		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
-					   PORT_PRI_OVERRIDE, 0x0000);
-		if (ret)
-			goto abort;
-
-		/* Port Ethertype: use the Ethertype DSA Ethertype
-		 * value.
-		 */
-		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
-					   PORT_ETH_TYPE, ETH_P_EDSA);
-		if (ret)
-			goto abort;
-		/* Tag Remap: use an identity 802.1p prio -> switch
-		 * prio mapping.
-		 */
-		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
-					   PORT_TAG_REGMAP_0123, 0x3210);
-		if (ret)
-			goto abort;
-
-		/* Tag Remap 2: use an identity 802.1p prio -> switch
-		 * prio mapping.
-		 */
-		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
-					   PORT_TAG_REGMAP_4567, 0x7654);
-		if (ret)
-			goto abort;
-	}
-
-	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-	    mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
-	    mv88e6xxx_6320_family(ds)) {
-		/* Rate Control: disable ingress rate limiting. */
-		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
-					   PORT_RATE_CONTROL, 0x0001);
-		if (ret)
-			goto abort;
-	}
-
-	/* Port Control 1: disable trunking, disable sending
-	 * learning messages to this port.
-	 */
-	ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, 0x0000);
-	if (ret)
-		goto abort;
-
-	/* Port based VLAN map: give each port its own address
-	 * database, and allow bidirectional communication between the
-	 * CPU and DSA port(s), and the other ports.
-	 */
-	ret = _mv88e6xxx_port_fid_set(ds, port, port + 1);
-	if (ret)
-		goto abort;
-
-	ret = _mv88e6xxx_port_based_vlan_map(ds, port);
-	if (ret)
-		goto abort;
-
-	/* Default VLAN ID and priority: don't set a default VLAN
-	 * ID, and set the default packet priority to zero.
-	 */
-	ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
-				   0x0000);
-abort:
-	mutex_unlock(&ps->smi_mutex);
-	return ret;
-}
-
-int mv88e6xxx_setup_ports(struct dsa_switch *ds)
-{
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int ret;
-	int i;
-
-	for (i = 0; i < ps->num_ports; i++) {
-		ret = mv88e6xxx_setup_port(ds, i);
-		if (ret < 0)
-			return ret;
-	}
-	return 0;
-}
-
-int mv88e6xxx_setup_common(struct dsa_switch *ds)
-{
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
-	mutex_init(&ps->smi_mutex);
-
-	ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
-
-	INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
-
-	return 0;
-}
-
-int mv88e6xxx_setup_global(struct dsa_switch *ds)
-{
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int ret;
-	int i;
-
-	/* Set the default address aging time to 5 minutes, and
-	 * enable address learn messages to be sent to all message
-	 * ports.
-	 */
-	REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
-		  0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
-
-	/* Configure the IP ToS mapping registers. */
-	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
-	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
-	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
-	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
-	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
-	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
-	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
-	REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
-
-	/* Configure the IEEE 802.1p priority mapping register. */
-	REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
-
-	/* Send all frames with destination addresses matching
-	 * 01:80:c2:00:00:0x to the CPU port.
-	 */
-	REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
-
-	/* Ignore removed tag data on doubly tagged packets, disable
-	 * flow control messages, force flow control priority to the
-	 * highest, and send all special multicast frames to the CPU
-	 * port at the highest priority.
-	 */
-	REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
-		  0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
-		  GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
-
-	/* Program the DSA routing table. */
-	for (i = 0; i < 32; i++) {
-		int nexthop = 0x1f;
-
-		if (ds->pd->rtable &&
-		    i != ds->index && i < ds->dst->pd->nr_chips)
-			nexthop = ds->pd->rtable[i] & 0x1f;
-
-		REG_WRITE(REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
-			  GLOBAL2_DEVICE_MAPPING_UPDATE |
-			  (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) |
-			  nexthop);
-	}
-
-	/* Clear all trunk masks. */
-	for (i = 0; i < 8; i++)
-		REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
-			  0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
-			  ((1 << ps->num_ports) - 1));
-
-	/* Clear all trunk mappings. */
-	for (i = 0; i < 16; i++)
-		REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING,
-			  GLOBAL2_TRUNK_MAPPING_UPDATE |
-			  (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
-
-	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-	    mv88e6xxx_6320_family(ds)) {
-		/* Send all frames with destination addresses matching
-		 * 01:80:c2:00:00:2x to the CPU port.
-		 */
-		REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, 0xffff);
-
-		/* Initialise cross-chip port VLAN table to reset
-		 * defaults.
-		 */
-		REG_WRITE(REG_GLOBAL2, GLOBAL2_PVT_ADDR, 0x9000);
-
-		/* Clear the priority override table. */
-		for (i = 0; i < 16; i++)
-			REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
-				  0x8000 | (i << 8));
-	}
-
-	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-	    mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
-	    mv88e6xxx_6320_family(ds)) {
-		/* Disable ingress rate limiting by resetting all
-		 * ingress rate limit registers to their initial
-		 * state.
-		 */
-		for (i = 0; i < ps->num_ports; i++)
-			REG_WRITE(REG_GLOBAL2, GLOBAL2_INGRESS_OP,
-				  0x9000 | (i << 8));
-	}
-
-	/* Clear the statistics counters for all ports */
-	REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL);
-
-	/* Wait for the flush to complete. */
-	mutex_lock(&ps->smi_mutex);
-	ret = _mv88e6xxx_stats_wait(ds);
+	ret = _mv88e6xxx_phy_write_indirect(ps, port, 0x16, page);
 	if (ret < 0)
-		goto unlock;
+		goto restore_page_0;
 
-	/* Clear all ATU entries */
-	ret = _mv88e6xxx_atu_flush(ds, 0, true);
-	if (ret < 0)
-		goto unlock;
-
-	/* Clear all the VTU and STU entries */
-	ret = _mv88e6xxx_vtu_stu_flush(ds);
-unlock:
-	mutex_unlock(&ps->smi_mutex);
+	ret = _mv88e6xxx_phy_write_indirect(ps, port, reg, val);
+restore_page_0:
+	_mv88e6xxx_phy_write_indirect(ps, port, 0x16, 0x0);
 
 	return ret;
 }
 
-int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
+static int _mv88e6xxx_phy_page_read(struct mv88e6xxx_priv_state *ps,
+				    int port, int page, int reg)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int ret;
+
+	ret = _mv88e6xxx_phy_write_indirect(ps, port, 0x16, page);
+	if (ret < 0)
+		goto restore_page_0;
+
+	ret = _mv88e6xxx_phy_read_indirect(ps, port, reg);
+restore_page_0:
+	_mv88e6xxx_phy_write_indirect(ps, port, 0x16, 0x0);
+
+	return ret;
+}
+
+static int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state *ps)
+{
+	bool ppu_active = mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU_ACTIVE);
 	u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
-	struct gpio_desc *gpiod = ds->pd->reset;
+	struct gpio_desc *gpiod = ps->reset;
 	unsigned long timeout;
 	int ret;
 	int i;
 
 	/* Set all ports to the disabled state. */
-	for (i = 0; i < ps->num_ports; i++) {
-		ret = REG_READ(REG_PORT(i), PORT_CONTROL);
-		REG_WRITE(REG_PORT(i), PORT_CONTROL, ret & 0xfffc);
+	for (i = 0; i < ps->info->num_ports; i++) {
+		ret = _mv88e6xxx_reg_read(ps, REG_PORT(i), PORT_CONTROL);
+		if (ret < 0)
+			return ret;
+
+		ret = _mv88e6xxx_reg_write(ps, REG_PORT(i), PORT_CONTROL,
+					   ret & 0xfffc);
+		if (ret)
+			return ret;
 	}
 
 	/* Wait for transmit queues to drain. */
@@ -2690,37 +2606,563 @@
 	 * through global registers 0x18 and 0x19.
 	 */
 	if (ppu_active)
-		REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
+		ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc000);
 	else
-		REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
+		ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc400);
+	if (ret)
+		return ret;
 
 	/* Wait up to one second for reset to complete. */
 	timeout = jiffies + 1 * HZ;
 	while (time_before(jiffies, timeout)) {
-		ret = REG_READ(REG_GLOBAL, 0x00);
+		ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, 0x00);
+		if (ret < 0)
+			return ret;
+
 		if ((ret & is_reset) == is_reset)
 			break;
 		usleep_range(1000, 2000);
 	}
 	if (time_after(jiffies, timeout))
-		return -ETIMEDOUT;
+		ret = -ETIMEDOUT;
+	else
+		ret = 0;
+
+	return ret;
+}
+
+static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_priv_state *ps)
+{
+	int ret;
+
+	ret = _mv88e6xxx_phy_page_read(ps, REG_FIBER_SERDES, PAGE_FIBER_SERDES,
+				       MII_BMCR);
+	if (ret < 0)
+		return ret;
+
+	if (ret & BMCR_PDOWN) {
+		ret &= ~BMCR_PDOWN;
+		ret = _mv88e6xxx_phy_page_write(ps, REG_FIBER_SERDES,
+						PAGE_FIBER_SERDES, MII_BMCR,
+						ret);
+	}
+
+	return ret;
+}
+
+static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
+{
+	struct dsa_switch *ds = ps->ds;
+	int ret;
+	u16 reg;
+
+	if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+	    mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+	    mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
+	    mv88e6xxx_6065_family(ps) || mv88e6xxx_6320_family(ps)) {
+		/* MAC Forcing register: don't force link, speed,
+		 * duplex or flow control state to any particular
+		 * values on physical ports, but force the CPU port
+		 * and all DSA ports to their maximum bandwidth and
+		 * full duplex.
+		 */
+		reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_PCS_CTRL);
+		if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
+			reg &= ~PORT_PCS_CTRL_UNFORCED;
+			reg |= PORT_PCS_CTRL_FORCE_LINK |
+				PORT_PCS_CTRL_LINK_UP |
+				PORT_PCS_CTRL_DUPLEX_FULL |
+				PORT_PCS_CTRL_FORCE_DUPLEX;
+			if (mv88e6xxx_6065_family(ps))
+				reg |= PORT_PCS_CTRL_100;
+			else
+				reg |= PORT_PCS_CTRL_1000;
+		} else {
+			reg |= PORT_PCS_CTRL_UNFORCED;
+		}
+
+		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
+					   PORT_PCS_CTRL, reg);
+		if (ret)
+			return ret;
+	}
+
+	/* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
+	 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
+	 * tunneling, determine priority by looking at 802.1p and IP
+	 * priority fields (IP prio has precedence), and set STP state
+	 * to Forwarding.
+	 *
+	 * If this is the CPU link, use DSA or EDSA tagging depending
+	 * on which tagging mode was configured.
+	 *
+	 * If this is a link to another switch, use DSA tagging mode.
+	 *
+	 * If this is the upstream port for this switch, enable
+	 * forwarding of unknown unicasts and multicasts.
+	 */
+	reg = 0;
+	if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+	    mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+	    mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) ||
+	    mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps))
+		reg = PORT_CONTROL_IGMP_MLD_SNOOP |
+		PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
+		PORT_CONTROL_STATE_FORWARDING;
+	if (dsa_is_cpu_port(ds, port)) {
+		if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps))
+			reg |= PORT_CONTROL_DSA_TAG;
+		if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+		    mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+		    mv88e6xxx_6320_family(ps)) {
+			if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
+				reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
+			else
+				reg |= PORT_CONTROL_FRAME_MODE_DSA;
+			reg |= PORT_CONTROL_FORWARD_UNKNOWN |
+				PORT_CONTROL_FORWARD_UNKNOWN_MC;
+		}
+
+		if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+		    mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+		    mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) ||
+		    mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps)) {
+			if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
+				reg |= PORT_CONTROL_EGRESS_ADD_TAG;
+		}
+	}
+	if (dsa_is_dsa_port(ds, port)) {
+		if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps))
+			reg |= PORT_CONTROL_DSA_TAG;
+		if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+		    mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+		    mv88e6xxx_6320_family(ps)) {
+			reg |= PORT_CONTROL_FRAME_MODE_DSA;
+		}
+
+		if (port == dsa_upstream_port(ds))
+			reg |= PORT_CONTROL_FORWARD_UNKNOWN |
+				PORT_CONTROL_FORWARD_UNKNOWN_MC;
+	}
+	if (reg) {
+		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
+					   PORT_CONTROL, reg);
+		if (ret)
+			return ret;
+	}
+
+	/* If this port is connected to a SerDes, make sure the SerDes is not
+	 * powered down.
+	 */
+	if (mv88e6xxx_6352_family(ps)) {
+		ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS);
+		if (ret < 0)
+			return ret;
+		ret &= PORT_STATUS_CMODE_MASK;
+		if ((ret == PORT_STATUS_CMODE_100BASE_X) ||
+		    (ret == PORT_STATUS_CMODE_1000BASE_X) ||
+		    (ret == PORT_STATUS_CMODE_SGMII)) {
+			ret = mv88e6xxx_power_on_serdes(ps);
+			if (ret < 0)
+				return ret;
+		}
+	}
+
+	/* Port Control 2: don't force a good FCS, set the maximum frame size to
+	 * 10240 bytes, disable 802.1q tags checking, don't discard tagged or
+	 * untagged frames on this port, do a destination address lookup on all
+	 * received packets as usual, disable ARP mirroring and don't send a
+	 * copy of all transmitted/received frames on this port to the CPU.
+	 */
+	reg = 0;
+	if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+	    mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+	    mv88e6xxx_6095_family(ps) || mv88e6xxx_6320_family(ps) ||
+	    mv88e6xxx_6185_family(ps))
+		reg = PORT_CONTROL_2_MAP_DA;
+
+	if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+	    mv88e6xxx_6165_family(ps) || mv88e6xxx_6320_family(ps))
+		reg |= PORT_CONTROL_2_JUMBO_10240;
+
+	if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps)) {
+		/* Set the upstream port this port should use */
+		reg |= dsa_upstream_port(ds);
+		/* enable forwarding of unknown multicast addresses to
+		 * the upstream port
+		 */
+		if (port == dsa_upstream_port(ds))
+			reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
+	}
+
+	reg |= PORT_CONTROL_2_8021Q_DISABLED;
+
+	if (reg) {
+		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
+					   PORT_CONTROL_2, reg);
+		if (ret)
+			return ret;
+	}
+
+	/* Port Association Vector: when learning source addresses
+	 * of packets, add the address to the address database using
+	 * a port bitmap that has only the bit for this port set and
+	 * the other bits clear.
+	 */
+	reg = 1 << port;
+	/* Disable learning for CPU port */
+	if (dsa_is_cpu_port(ds, port))
+		reg = 0;
+
+	ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
+	if (ret)
+		return ret;
+
+	/* Egress rate control 2: disable egress rate control. */
+	ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_RATE_CONTROL_2,
+				   0x0000);
+	if (ret)
+		return ret;
+
+	if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+	    mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+	    mv88e6xxx_6320_family(ps)) {
+		/* Do not limit the period of time that this port can
+		 * be paused for by the remote end or the period of
+		 * time that this port can pause the remote end.
+		 */
+		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
+					   PORT_PAUSE_CTRL, 0x0000);
+		if (ret)
+			return ret;
+
+		/* Port ATU control: disable limiting the number of
+		 * address database entries that this port is allowed
+		 * to use.
+		 */
+		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
+					   PORT_ATU_CONTROL, 0x0000);
+		/* Priority Override: disable DA, SA and VTU priority
+		 * override.
+		 */
+		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
+					   PORT_PRI_OVERRIDE, 0x0000);
+		if (ret)
+			return ret;
+
+		/* Port Ethertype: use the Ethertype DSA Ethertype
+		 * value.
+		 */
+		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
+					   PORT_ETH_TYPE, ETH_P_EDSA);
+		if (ret)
+			return ret;
+		/* Tag Remap: use an identity 802.1p prio -> switch
+		 * prio mapping.
+		 */
+		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
+					   PORT_TAG_REGMAP_0123, 0x3210);
+		if (ret)
+			return ret;
+
+		/* Tag Remap 2: use an identity 802.1p prio -> switch
+		 * prio mapping.
+		 */
+		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
+					   PORT_TAG_REGMAP_4567, 0x7654);
+		if (ret)
+			return ret;
+	}
+
+	if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+	    mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+	    mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
+	    mv88e6xxx_6320_family(ps)) {
+		/* Rate Control: disable ingress rate limiting. */
+		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
+					   PORT_RATE_CONTROL, 0x0001);
+		if (ret)
+			return ret;
+	}
+
+	/* Port Control 1: disable trunking, disable sending
+	 * learning messages to this port.
+	 */
+	ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1, 0x0000);
+	if (ret)
+		return ret;
+
+	/* Port based VLAN map: give each port the same default address
+	 * database, and allow bidirectional communication between the
+	 * CPU and DSA port(s), and the other ports.
+	 */
+	ret = _mv88e6xxx_port_fid_set(ps, port, 0);
+	if (ret)
+		return ret;
+
+	ret = _mv88e6xxx_port_based_vlan_map(ps, port);
+	if (ret)
+		return ret;
+
+	/* Default VLAN ID and priority: don't set a default VLAN
+	 * ID, and set the default packet priority to zero.
+	 */
+	ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_DEFAULT_VLAN,
+				   0x0000);
+	if (ret)
+		return ret;
 
 	return 0;
 }
 
+static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
+{
+	struct dsa_switch *ds = ps->ds;
+	u32 upstream_port = dsa_upstream_port(ds);
+	u16 reg;
+	int err;
+	int i;
+
+	/* Enable the PHY Polling Unit if present, don't discard any packets,
+	 * and mask all interrupt sources.
+	 */
+	reg = 0;
+	if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU) ||
+	    mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU_ACTIVE))
+		reg |= GLOBAL_CONTROL_PPU_ENABLE;
+
+	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL, reg);
+	if (err)
+		return err;
+
+	/* Configure the upstream port, and configure it as the port to which
+	 * ingress and egress and ARP monitor frames are to be sent.
+	 */
+	reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
+		upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
+		upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
+	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
+	if (err)
+		return err;
+
+	/* Disable remote management, and set the switch's DSA device number. */
+	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL_2,
+				   GLOBAL_CONTROL_2_MULTIPLE_CASCADE |
+				   (ds->index & 0x1f));
+	if (err)
+		return err;
+
+	/* Set the default address aging time to 5 minutes, and
+	 * enable address learn messages to be sent to all message
+	 * ports.
+	 */
+	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL,
+				   0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
+	if (err)
+		return err;
+
+	/* Configure the IP ToS mapping registers. */
+	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
+	if (err)
+		return err;
+	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
+	if (err)
+		return err;
+	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
+	if (err)
+		return err;
+	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
+	if (err)
+		return err;
+	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
+	if (err)
+		return err;
+	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
+	if (err)
+		return err;
+	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
+	if (err)
+		return err;
+	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
+	if (err)
+		return err;
+
+	/* Configure the IEEE 802.1p priority mapping register. */
+	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
+	if (err)
+		return err;
+
+	/* Send all frames with destination addresses matching
+	 * 01:80:c2:00:00:0x to the CPU port.
+	 */
+	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
+	if (err)
+		return err;
+
+	/* Ignore removed tag data on doubly tagged packets, disable
+	 * flow control messages, force flow control priority to the
+	 * highest, and send all special multicast frames to the CPU
+	 * port at the highest priority.
+	 */
+	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
+				   0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
+				   GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
+	if (err)
+		return err;
+
+	/* Program the DSA routing table. */
+	for (i = 0; i < 32; i++) {
+		int nexthop = 0x1f;
+
+		if (ps->ds->cd->rtable &&
+		    i != ps->ds->index && i < ps->ds->dst->pd->nr_chips)
+			nexthop = ps->ds->cd->rtable[i] & 0x1f;
+
+		err = _mv88e6xxx_reg_write(
+			ps, REG_GLOBAL2,
+			GLOBAL2_DEVICE_MAPPING,
+			GLOBAL2_DEVICE_MAPPING_UPDATE |
+			(i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) | nexthop);
+		if (err)
+			return err;
+	}
+
+	/* Clear all trunk masks. */
+	for (i = 0; i < 8; i++) {
+		err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
+					   0x8000 |
+					   (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
+					   ((1 << ps->info->num_ports) - 1));
+		if (err)
+			return err;
+	}
+
+	/* Clear all trunk mappings. */
+	for (i = 0; i < 16; i++) {
+		err = _mv88e6xxx_reg_write(
+			ps, REG_GLOBAL2,
+			GLOBAL2_TRUNK_MAPPING,
+			GLOBAL2_TRUNK_MAPPING_UPDATE |
+			(i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
+		if (err)
+			return err;
+	}
+
+	if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+	    mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+	    mv88e6xxx_6320_family(ps)) {
+		/* Send all frames with destination addresses matching
+		 * 01:80:c2:00:00:2x to the CPU port.
+		 */
+		err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
+					   GLOBAL2_MGMT_EN_2X, 0xffff);
+		if (err)
+			return err;
+
+		/* Initialise cross-chip port VLAN table to reset
+		 * defaults.
+		 */
+		err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
+					   GLOBAL2_PVT_ADDR, 0x9000);
+		if (err)
+			return err;
+
+		/* Clear the priority override table. */
+		for (i = 0; i < 16; i++) {
+			err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
+						   GLOBAL2_PRIO_OVERRIDE,
+						   0x8000 | (i << 8));
+			if (err)
+				return err;
+		}
+	}
+
+	if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+	    mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+	    mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
+	    mv88e6xxx_6320_family(ps)) {
+		/* Disable ingress rate limiting by resetting all
+		 * ingress rate limit registers to their initial
+		 * state.
+		 */
+		for (i = 0; i < ps->info->num_ports; i++) {
+			err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
+						   GLOBAL2_INGRESS_OP,
+						   0x9000 | (i << 8));
+			if (err)
+				return err;
+		}
+	}
+
+	/* Clear the statistics counters for all ports */
+	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
+				   GLOBAL_STATS_OP_FLUSH_ALL);
+	if (err)
+		return err;
+
+	/* Wait for the flush to complete. */
+	err = _mv88e6xxx_stats_wait(ps);
+	if (err)
+		return err;
+
+	/* Clear all ATU entries */
+	err = _mv88e6xxx_atu_flush(ps, 0, true);
+	if (err)
+		return err;
+
+	/* Clear all the VTU and STU entries */
+	err = _mv88e6xxx_vtu_stu_flush(ps);
+	if (err < 0)
+		return err;
+
+	return err;
+}
+
+static int mv88e6xxx_setup(struct dsa_switch *ds)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int err;
+	int i;
+
+	ps->ds = ds;
+
+	if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
+		mutex_init(&ps->eeprom_mutex);
+
+	if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
+		mv88e6xxx_ppu_state_init(ps);
+
+	mutex_lock(&ps->smi_mutex);
+
+	err = mv88e6xxx_switch_reset(ps);
+	if (err)
+		goto unlock;
+
+	err = mv88e6xxx_setup_global(ps);
+	if (err)
+		goto unlock;
+
+	for (i = 0; i < ps->info->num_ports; i++) {
+		err = mv88e6xxx_setup_port(ps, i);
+		if (err)
+			goto unlock;
+	}
+
+unlock:
+	mutex_unlock(&ps->smi_mutex);
+
+	return err;
+}
+
 int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	int ret;
 
 	mutex_lock(&ps->smi_mutex);
-	ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
-	if (ret < 0)
-		goto error;
-	ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
-error:
-	_mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
+	ret = _mv88e6xxx_phy_page_read(ps, port, page, reg);
 	mutex_unlock(&ps->smi_mutex);
+
 	return ret;
 }
 
@@ -2731,87 +3173,61 @@
 	int ret;
 
 	mutex_lock(&ps->smi_mutex);
-	ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
-	if (ret < 0)
-		goto error;
-
-	ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
-error:
-	_mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
+	ret = _mv88e6xxx_phy_page_write(ps, port, page, reg, val);
 	mutex_unlock(&ps->smi_mutex);
+
 	return ret;
 }
 
-static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port)
+static int mv88e6xxx_port_to_phy_addr(struct mv88e6xxx_priv_state *ps,
+				      int port)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
-	if (port >= 0 && port < ps->num_ports)
+	if (port >= 0 && port < ps->info->num_ports)
 		return port;
 	return -EINVAL;
 }
 
-int
-mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
+static int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int addr = mv88e6xxx_port_to_phy_addr(ds, port);
+	int addr = mv88e6xxx_port_to_phy_addr(ps, port);
 	int ret;
 
 	if (addr < 0)
-		return addr;
+		return 0xffff;
 
 	mutex_lock(&ps->smi_mutex);
-	ret = _mv88e6xxx_phy_read(ds, addr, regnum);
+
+	if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
+		ret = mv88e6xxx_phy_read_ppu(ps, addr, regnum);
+	else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY))
+		ret = _mv88e6xxx_phy_read_indirect(ps, addr, regnum);
+	else
+		ret = _mv88e6xxx_phy_read(ps, addr, regnum);
+
 	mutex_unlock(&ps->smi_mutex);
 	return ret;
 }
 
-int
-mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
+static int mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum,
+			       u16 val)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int addr = mv88e6xxx_port_to_phy_addr(ds, port);
+	int addr = mv88e6xxx_port_to_phy_addr(ps, port);
 	int ret;
 
 	if (addr < 0)
-		return addr;
+		return 0xffff;
 
 	mutex_lock(&ps->smi_mutex);
-	ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
-	mutex_unlock(&ps->smi_mutex);
-	return ret;
-}
 
-int
-mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
-{
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int addr = mv88e6xxx_port_to_phy_addr(ds, port);
-	int ret;
+	if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
+		ret = mv88e6xxx_phy_write_ppu(ps, addr, regnum, val);
+	else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY))
+		ret = _mv88e6xxx_phy_write_indirect(ps, addr, regnum, val);
+	else
+		ret = _mv88e6xxx_phy_write(ps, addr, regnum, val);
 
-	if (addr < 0)
-		return addr;
-
-	mutex_lock(&ps->smi_mutex);
-	ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
-	mutex_unlock(&ps->smi_mutex);
-	return ret;
-}
-
-int
-mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
-			     u16 val)
-{
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int addr = mv88e6xxx_port_to_phy_addr(ds, port);
-	int ret;
-
-	if (addr < 0)
-		return addr;
-
-	mutex_lock(&ps->smi_mutex);
-	ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
 	mutex_unlock(&ps->smi_mutex);
 	return ret;
 }
@@ -2828,44 +3244,45 @@
 
 	mutex_lock(&ps->smi_mutex);
 
-	ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
+	ret = _mv88e6xxx_phy_write(ps, 0x0, 0x16, 0x6);
 	if (ret < 0)
 		goto error;
 
 	/* Enable temperature sensor */
-	ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+	ret = _mv88e6xxx_phy_read(ps, 0x0, 0x1a);
 	if (ret < 0)
 		goto error;
 
-	ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
+	ret = _mv88e6xxx_phy_write(ps, 0x0, 0x1a, ret | (1 << 5));
 	if (ret < 0)
 		goto error;
 
 	/* Wait for temperature to stabilize */
 	usleep_range(10000, 12000);
 
-	val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+	val = _mv88e6xxx_phy_read(ps, 0x0, 0x1a);
 	if (val < 0) {
 		ret = val;
 		goto error;
 	}
 
 	/* Disable temperature sensor */
-	ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
+	ret = _mv88e6xxx_phy_write(ps, 0x0, 0x1a, ret & ~(1 << 5));
 	if (ret < 0)
 		goto error;
 
 	*temp = ((val & 0x1f) - 5) * 5;
 
 error:
-	_mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
+	_mv88e6xxx_phy_write(ps, 0x0, 0x16, 0x0);
 	mutex_unlock(&ps->smi_mutex);
 	return ret;
 }
 
 static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
 {
-	int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
 	int ret;
 
 	*temp = 0;
@@ -2879,20 +3296,26 @@
 	return 0;
 }
 
-int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
+static int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
 {
-	if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+	if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP))
+		return -EOPNOTSUPP;
+
+	if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps))
 		return mv88e63xx_get_temp(ds, temp);
 
 	return mv88e61xx_get_temp(ds, temp);
 }
 
-int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
+static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
 {
-	int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
 	int ret;
 
-	if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+	if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
 		return -EOPNOTSUPP;
 
 	*temp = 0;
@@ -2906,12 +3329,13 @@
 	return 0;
 }
 
-int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
+static int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
 {
-	int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
 	int ret;
 
-	if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+	if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
 		return -EOPNOTSUPP;
 
 	ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
@@ -2922,12 +3346,13 @@
 					(ret & 0xe0ff) | (temp << 8));
 }
 
-int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
+static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
 {
-	int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
 	int ret;
 
-	if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+	if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
 		return -EOPNOTSUPP;
 
 	*alarm = false;
@@ -2942,70 +3367,354 @@
 }
 #endif /* CONFIG_NET_DSA_HWMON */
 
-char *mv88e6xxx_lookup_name(struct device *host_dev, int sw_addr,
-			    const struct mv88e6xxx_switch_id *table,
-			    unsigned int num)
+static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+	[MV88E6085] = {
+		.prod_num = PORT_SWITCH_ID_PROD_NUM_6085,
+		.family = MV88E6XXX_FAMILY_6097,
+		.name = "Marvell 88E6085",
+		.num_databases = 4096,
+		.num_ports = 10,
+		.flags = MV88E6XXX_FLAGS_FAMILY_6097,
+	},
+
+	[MV88E6095] = {
+		.prod_num = PORT_SWITCH_ID_PROD_NUM_6095,
+		.family = MV88E6XXX_FAMILY_6095,
+		.name = "Marvell 88E6095/88E6095F",
+		.num_databases = 256,
+		.num_ports = 11,
+		.flags = MV88E6XXX_FLAGS_FAMILY_6095,
+	},
+
+	[MV88E6123] = {
+		.prod_num = PORT_SWITCH_ID_PROD_NUM_6123,
+		.family = MV88E6XXX_FAMILY_6165,
+		.name = "Marvell 88E6123",
+		.num_databases = 4096,
+		.num_ports = 3,
+		.flags = MV88E6XXX_FLAGS_FAMILY_6165,
+	},
+
+	[MV88E6131] = {
+		.prod_num = PORT_SWITCH_ID_PROD_NUM_6131,
+		.family = MV88E6XXX_FAMILY_6185,
+		.name = "Marvell 88E6131",
+		.num_databases = 256,
+		.num_ports = 8,
+		.flags = MV88E6XXX_FLAGS_FAMILY_6185,
+	},
+
+	[MV88E6161] = {
+		.prod_num = PORT_SWITCH_ID_PROD_NUM_6161,
+		.family = MV88E6XXX_FAMILY_6165,
+		.name = "Marvell 88E6161",
+		.num_databases = 4096,
+		.num_ports = 6,
+		.flags = MV88E6XXX_FLAGS_FAMILY_6165,
+	},
+
+	[MV88E6165] = {
+		.prod_num = PORT_SWITCH_ID_PROD_NUM_6165,
+		.family = MV88E6XXX_FAMILY_6165,
+		.name = "Marvell 88E6165",
+		.num_databases = 4096,
+		.num_ports = 6,
+		.flags = MV88E6XXX_FLAGS_FAMILY_6165,
+	},
+
+	[MV88E6171] = {
+		.prod_num = PORT_SWITCH_ID_PROD_NUM_6171,
+		.family = MV88E6XXX_FAMILY_6351,
+		.name = "Marvell 88E6171",
+		.num_databases = 4096,
+		.num_ports = 7,
+		.flags = MV88E6XXX_FLAGS_FAMILY_6351,
+	},
+
+	[MV88E6172] = {
+		.prod_num = PORT_SWITCH_ID_PROD_NUM_6172,
+		.family = MV88E6XXX_FAMILY_6352,
+		.name = "Marvell 88E6172",
+		.num_databases = 4096,
+		.num_ports = 7,
+		.flags = MV88E6XXX_FLAGS_FAMILY_6352,
+	},
+
+	[MV88E6175] = {
+		.prod_num = PORT_SWITCH_ID_PROD_NUM_6175,
+		.family = MV88E6XXX_FAMILY_6351,
+		.name = "Marvell 88E6175",
+		.num_databases = 4096,
+		.num_ports = 7,
+		.flags = MV88E6XXX_FLAGS_FAMILY_6351,
+	},
+
+	[MV88E6176] = {
+		.prod_num = PORT_SWITCH_ID_PROD_NUM_6176,
+		.family = MV88E6XXX_FAMILY_6352,
+		.name = "Marvell 88E6176",
+		.num_databases = 4096,
+		.num_ports = 7,
+		.flags = MV88E6XXX_FLAGS_FAMILY_6352,
+	},
+
+	[MV88E6185] = {
+		.prod_num = PORT_SWITCH_ID_PROD_NUM_6185,
+		.family = MV88E6XXX_FAMILY_6185,
+		.name = "Marvell 88E6185",
+		.num_databases = 256,
+		.num_ports = 10,
+		.flags = MV88E6XXX_FLAGS_FAMILY_6185,
+	},
+
+	[MV88E6240] = {
+		.prod_num = PORT_SWITCH_ID_PROD_NUM_6240,
+		.family = MV88E6XXX_FAMILY_6352,
+		.name = "Marvell 88E6240",
+		.num_databases = 4096,
+		.num_ports = 7,
+		.flags = MV88E6XXX_FLAGS_FAMILY_6352,
+	},
+
+	[MV88E6320] = {
+		.prod_num = PORT_SWITCH_ID_PROD_NUM_6320,
+		.family = MV88E6XXX_FAMILY_6320,
+		.name = "Marvell 88E6320",
+		.num_databases = 4096,
+		.num_ports = 7,
+		.flags = MV88E6XXX_FLAGS_FAMILY_6320,
+	},
+
+	[MV88E6321] = {
+		.prod_num = PORT_SWITCH_ID_PROD_NUM_6321,
+		.family = MV88E6XXX_FAMILY_6320,
+		.name = "Marvell 88E6321",
+		.num_databases = 4096,
+		.num_ports = 7,
+		.flags = MV88E6XXX_FLAGS_FAMILY_6320,
+	},
+
+	[MV88E6350] = {
+		.prod_num = PORT_SWITCH_ID_PROD_NUM_6350,
+		.family = MV88E6XXX_FAMILY_6351,
+		.name = "Marvell 88E6350",
+		.num_databases = 4096,
+		.num_ports = 7,
+		.flags = MV88E6XXX_FLAGS_FAMILY_6351,
+	},
+
+	[MV88E6351] = {
+		.prod_num = PORT_SWITCH_ID_PROD_NUM_6351,
+		.family = MV88E6XXX_FAMILY_6351,
+		.name = "Marvell 88E6351",
+		.num_databases = 4096,
+		.num_ports = 7,
+		.flags = MV88E6XXX_FLAGS_FAMILY_6351,
+	},
+
+	[MV88E6352] = {
+		.prod_num = PORT_SWITCH_ID_PROD_NUM_6352,
+		.family = MV88E6XXX_FAMILY_6352,
+		.name = "Marvell 88E6352",
+		.num_databases = 4096,
+		.num_ports = 7,
+		.flags = MV88E6XXX_FLAGS_FAMILY_6352,
+	},
+};
+
+static const struct mv88e6xxx_info *
+mv88e6xxx_lookup_info(unsigned int prod_num, const struct mv88e6xxx_info *table,
+		      unsigned int num)
 {
-	struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
-	int i, ret;
+	int i;
 
-	if (!bus)
-		return NULL;
-
-	ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
-	if (ret < 0)
-		return NULL;
-
-	/* Look up the exact switch ID */
 	for (i = 0; i < num; ++i)
-		if (table[i].id == ret)
-			return table[i].name;
-
-	/* Look up only the product number */
-	for (i = 0; i < num; ++i) {
-		if (table[i].id == (ret & PORT_SWITCH_ID_PROD_NUM_MASK)) {
-			dev_warn(host_dev, "unknown revision %d, using base switch 0x%x\n",
-				 ret & PORT_SWITCH_ID_REV_MASK,
-				 ret & PORT_SWITCH_ID_PROD_NUM_MASK);
-			return table[i].name;
-		}
-	}
+		if (table[i].prod_num == prod_num)
+			return &table[i];
 
 	return NULL;
 }
 
+static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
+				       struct device *host_dev, int sw_addr,
+				       void **priv)
+{
+	const struct mv88e6xxx_info *info;
+	struct mv88e6xxx_priv_state *ps;
+	struct mii_bus *bus;
+	const char *name;
+	int id, prod_num, rev;
+
+	bus = dsa_host_dev_to_mii_bus(host_dev);
+	if (!bus)
+		return NULL;
+
+	id = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
+	if (id < 0)
+		return NULL;
+
+	prod_num = (id & 0xfff0) >> 4;
+	rev = id & 0x000f;
+
+	info = mv88e6xxx_lookup_info(prod_num, mv88e6xxx_table,
+				     ARRAY_SIZE(mv88e6xxx_table));
+	if (!info)
+		return NULL;
+
+	name = info->name;
+
+	ps = devm_kzalloc(dsa_dev, sizeof(*ps), GFP_KERNEL);
+	if (!ps)
+		return NULL;
+
+	ps->bus = bus;
+	ps->sw_addr = sw_addr;
+	ps->info = info;
+	mutex_init(&ps->smi_mutex);
+
+	*priv = ps;
+
+	dev_info(&ps->bus->dev, "switch 0x%x probed: %s, revision %u\n",
+		 prod_num, name, rev);
+
+	return name;
+}
+
+struct dsa_switch_driver mv88e6xxx_switch_driver = {
+	.tag_protocol		= DSA_TAG_PROTO_EDSA,
+	.probe			= mv88e6xxx_drv_probe,
+	.setup			= mv88e6xxx_setup,
+	.set_addr		= mv88e6xxx_set_addr,
+	.phy_read		= mv88e6xxx_phy_read,
+	.phy_write		= mv88e6xxx_phy_write,
+	.adjust_link		= mv88e6xxx_adjust_link,
+	.get_strings		= mv88e6xxx_get_strings,
+	.get_ethtool_stats	= mv88e6xxx_get_ethtool_stats,
+	.get_sset_count		= mv88e6xxx_get_sset_count,
+	.set_eee		= mv88e6xxx_set_eee,
+	.get_eee		= mv88e6xxx_get_eee,
+#ifdef CONFIG_NET_DSA_HWMON
+	.get_temp		= mv88e6xxx_get_temp,
+	.get_temp_limit		= mv88e6xxx_get_temp_limit,
+	.set_temp_limit		= mv88e6xxx_set_temp_limit,
+	.get_temp_alarm		= mv88e6xxx_get_temp_alarm,
+#endif
+	.get_eeprom_len		= mv88e6xxx_get_eeprom_len,
+	.get_eeprom		= mv88e6xxx_get_eeprom,
+	.set_eeprom		= mv88e6xxx_set_eeprom,
+	.get_regs_len		= mv88e6xxx_get_regs_len,
+	.get_regs		= mv88e6xxx_get_regs,
+	.port_bridge_join	= mv88e6xxx_port_bridge_join,
+	.port_bridge_leave	= mv88e6xxx_port_bridge_leave,
+	.port_stp_state_set	= mv88e6xxx_port_stp_state_set,
+	.port_vlan_filtering	= mv88e6xxx_port_vlan_filtering,
+	.port_vlan_prepare	= mv88e6xxx_port_vlan_prepare,
+	.port_vlan_add		= mv88e6xxx_port_vlan_add,
+	.port_vlan_del		= mv88e6xxx_port_vlan_del,
+	.port_vlan_dump		= mv88e6xxx_port_vlan_dump,
+	.port_fdb_prepare       = mv88e6xxx_port_fdb_prepare,
+	.port_fdb_add           = mv88e6xxx_port_fdb_add,
+	.port_fdb_del           = mv88e6xxx_port_fdb_del,
+	.port_fdb_dump          = mv88e6xxx_port_fdb_dump,
+};
+
+int mv88e6xxx_probe(struct mdio_device *mdiodev)
+{
+	struct device *dev = &mdiodev->dev;
+	struct device_node *np = dev->of_node;
+	struct mv88e6xxx_priv_state *ps;
+	int id, prod_num, rev;
+	struct dsa_switch *ds;
+	u32 eeprom_len;
+	int err;
+
+	ds = devm_kzalloc(dev, sizeof(*ds) + sizeof(*ps), GFP_KERNEL);
+	if (!ds)
+		return -ENOMEM;
+
+	ps = (struct mv88e6xxx_priv_state *)(ds + 1);
+	ds->priv = ps;
+	ds->dev = dev;
+	ps->dev = dev;
+	ps->ds = ds;
+	ps->bus = mdiodev->bus;
+	ps->sw_addr = mdiodev->addr;
+	mutex_init(&ps->smi_mutex);
+
+	get_device(&ps->bus->dev);
+
+	ds->drv = &mv88e6xxx_switch_driver;
+
+	id = mv88e6xxx_reg_read(ps, REG_PORT(0), PORT_SWITCH_ID);
+	if (id < 0)
+		return id;
+
+	prod_num = (id & 0xfff0) >> 4;
+	rev = id & 0x000f;
+
+	ps->info = mv88e6xxx_lookup_info(prod_num, mv88e6xxx_table,
+					 ARRAY_SIZE(mv88e6xxx_table));
+	if (!ps->info)
+		return -ENODEV;
+
+	ps->reset = devm_gpiod_get(&mdiodev->dev, "reset", GPIOD_ASIS);
+	if (IS_ERR(ps->reset)) {
+		err = PTR_ERR(ps->reset);
+		if (err == -ENOENT) {
+			/* Optional, so not an error */
+			ps->reset = NULL;
+		} else {
+			return err;
+		}
+	}
+
+	if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM) &&
+	    !of_property_read_u32(np, "eeprom-length", &eeprom_len))
+		ps->eeprom_len = eeprom_len;
+
+	dev_set_drvdata(dev, ds);
+
+	dev_info(dev, "switch 0x%x probed: %s, revision %u\n",
+		 prod_num, ps->info->name, rev);
+
+	return 0;
+}
+
+static void mv88e6xxx_remove(struct mdio_device *mdiodev)
+{
+	struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+	put_device(&ps->bus->dev);
+}
+
+static const struct of_device_id mv88e6xxx_of_match[] = {
+	{ .compatible = "marvell,mv88e6085" },
+	{ /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, mv88e6xxx_of_match);
+
+static struct mdio_driver mv88e6xxx_driver = {
+	.probe	= mv88e6xxx_probe,
+	.remove = mv88e6xxx_remove,
+	.mdiodrv.driver = {
+		.name = "mv88e6085",
+		.of_match_table = mv88e6xxx_of_match,
+	},
+};
+
 static int __init mv88e6xxx_init(void)
 {
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
-	register_switch_driver(&mv88e6131_switch_driver);
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123)
-	register_switch_driver(&mv88e6123_switch_driver);
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
-	register_switch_driver(&mv88e6352_switch_driver);
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
-	register_switch_driver(&mv88e6171_switch_driver);
-#endif
-	return 0;
+	register_switch_driver(&mv88e6xxx_switch_driver);
+	return mdio_driver_register(&mv88e6xxx_driver);
 }
 module_init(mv88e6xxx_init);
 
 static void __exit mv88e6xxx_cleanup(void)
 {
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
-	unregister_switch_driver(&mv88e6171_switch_driver);
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
-	unregister_switch_driver(&mv88e6352_switch_driver);
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123)
-	unregister_switch_driver(&mv88e6123_switch_driver);
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
-	unregister_switch_driver(&mv88e6131_switch_driver);
-#endif
+	mdio_driver_unregister(&mv88e6xxx_driver);
+	unregister_switch_driver(&mv88e6xxx_switch_driver);
 }
 module_exit(mv88e6xxx_cleanup);
 
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index 9a038ab..36d0e15 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -12,6 +12,7 @@
 #define __MV88E6XXX_H
 
 #include <linux/if_vlan.h>
+#include <linux/gpio/consumer.h>
 
 #ifndef UINT64_MAX
 #define UINT64_MAX		(u64)(~((u64)0))
@@ -28,6 +29,10 @@
 #define SMI_CMD_OP_45_READ_DATA_INC	((3 << 10) | SMI_CMD_BUSY)
 #define SMI_DATA		0x01
 
+/* Fiber/SERDES Registers are located at SMI address F, page 1 */
+#define REG_FIBER_SERDES	0x0f
+#define PAGE_FIBER_SERDES	0x01
+
 #define REG_PORT(p)		(0x10 + (p))
 #define PORT_STATUS		0x00
 #define PORT_STATUS_PAUSE_EN	BIT(15)
@@ -45,6 +50,10 @@
 #define PORT_STATUS_MGMII	BIT(6) /* 6185 */
 #define PORT_STATUS_TX_PAUSED	BIT(5)
 #define PORT_STATUS_FLOW_CTRL	BIT(4)
+#define PORT_STATUS_CMODE_MASK	0x0f
+#define PORT_STATUS_CMODE_100BASE_X	0x8
+#define PORT_STATUS_CMODE_1000BASE_X	0x9
+#define PORT_STATUS_CMODE_SGMII		0xa
 #define PORT_PCS_CTRL		0x01
 #define PORT_PCS_CTRL_RGMII_DELAY_RXCLK	BIT(15)
 #define PORT_PCS_CTRL_RGMII_DELAY_TXCLK	BIT(14)
@@ -60,52 +69,23 @@
 #define PORT_PCS_CTRL_UNFORCED		0x03
 #define PORT_PAUSE_CTRL		0x02
 #define PORT_SWITCH_ID		0x03
-#define PORT_SWITCH_ID_PROD_NUM_MASK	0xfff0
-#define PORT_SWITCH_ID_REV_MASK		0x000f
-#define PORT_SWITCH_ID_6031	0x0310
-#define PORT_SWITCH_ID_6035	0x0350
-#define PORT_SWITCH_ID_6046	0x0480
-#define PORT_SWITCH_ID_6061	0x0610
-#define PORT_SWITCH_ID_6065	0x0650
-#define PORT_SWITCH_ID_6085	0x04a0
-#define PORT_SWITCH_ID_6092	0x0970
-#define PORT_SWITCH_ID_6095	0x0950
-#define PORT_SWITCH_ID_6096	0x0980
-#define PORT_SWITCH_ID_6097	0x0990
-#define PORT_SWITCH_ID_6108	0x1070
-#define PORT_SWITCH_ID_6121	0x1040
-#define PORT_SWITCH_ID_6122	0x1050
-#define PORT_SWITCH_ID_6123	0x1210
-#define PORT_SWITCH_ID_6123_A1	0x1212
-#define PORT_SWITCH_ID_6123_A2	0x1213
-#define PORT_SWITCH_ID_6131	0x1060
-#define PORT_SWITCH_ID_6131_B2	0x1066
-#define PORT_SWITCH_ID_6152	0x1a40
-#define PORT_SWITCH_ID_6155	0x1a50
-#define PORT_SWITCH_ID_6161	0x1610
-#define PORT_SWITCH_ID_6161_A1	0x1612
-#define PORT_SWITCH_ID_6161_A2	0x1613
-#define PORT_SWITCH_ID_6165	0x1650
-#define PORT_SWITCH_ID_6165_A1	0x1652
-#define PORT_SWITCH_ID_6165_A2	0x1653
-#define PORT_SWITCH_ID_6171	0x1710
-#define PORT_SWITCH_ID_6172	0x1720
-#define PORT_SWITCH_ID_6175	0x1750
-#define PORT_SWITCH_ID_6176	0x1760
-#define PORT_SWITCH_ID_6182	0x1a60
-#define PORT_SWITCH_ID_6185	0x1a70
-#define PORT_SWITCH_ID_6240	0x2400
-#define PORT_SWITCH_ID_6320	0x1150
-#define PORT_SWITCH_ID_6320_A1	0x1151
-#define PORT_SWITCH_ID_6320_A2	0x1152
-#define PORT_SWITCH_ID_6321	0x3100
-#define PORT_SWITCH_ID_6321_A1	0x3101
-#define PORT_SWITCH_ID_6321_A2	0x3102
-#define PORT_SWITCH_ID_6350	0x3710
-#define PORT_SWITCH_ID_6351	0x3750
-#define PORT_SWITCH_ID_6352	0x3520
-#define PORT_SWITCH_ID_6352_A0	0x3521
-#define PORT_SWITCH_ID_6352_A1	0x3522
+#define PORT_SWITCH_ID_PROD_NUM_6085	0x04a
+#define PORT_SWITCH_ID_PROD_NUM_6095	0x095
+#define PORT_SWITCH_ID_PROD_NUM_6131	0x106
+#define PORT_SWITCH_ID_PROD_NUM_6320	0x115
+#define PORT_SWITCH_ID_PROD_NUM_6123	0x121
+#define PORT_SWITCH_ID_PROD_NUM_6161	0x161
+#define PORT_SWITCH_ID_PROD_NUM_6165	0x165
+#define PORT_SWITCH_ID_PROD_NUM_6171	0x171
+#define PORT_SWITCH_ID_PROD_NUM_6172	0x172
+#define PORT_SWITCH_ID_PROD_NUM_6175	0x175
+#define PORT_SWITCH_ID_PROD_NUM_6176	0x176
+#define PORT_SWITCH_ID_PROD_NUM_6185	0x1a7
+#define PORT_SWITCH_ID_PROD_NUM_6240	0x240
+#define PORT_SWITCH_ID_PROD_NUM_6321	0x310
+#define PORT_SWITCH_ID_PROD_NUM_6352	0x352
+#define PORT_SWITCH_ID_PROD_NUM_6350	0x371
+#define PORT_SWITCH_ID_PROD_NUM_6351	0x375
 #define PORT_CONTROL		0x04
 #define PORT_CONTROL_USE_CORE_TAG	BIT(15)
 #define PORT_CONTROL_DROP_ON_LOCK	BIT(14)
@@ -359,9 +339,187 @@
 
 #define MV88E6XXX_N_FID		4096
 
-struct mv88e6xxx_switch_id {
-	u16 id;
-	char *name;
+/* List of supported models */
+enum mv88e6xxx_model {
+	MV88E6085,
+	MV88E6095,
+	MV88E6123,
+	MV88E6131,
+	MV88E6161,
+	MV88E6165,
+	MV88E6171,
+	MV88E6172,
+	MV88E6175,
+	MV88E6176,
+	MV88E6185,
+	MV88E6240,
+	MV88E6320,
+	MV88E6321,
+	MV88E6350,
+	MV88E6351,
+	MV88E6352,
+};
+
+enum mv88e6xxx_family {
+	MV88E6XXX_FAMILY_NONE,
+	MV88E6XXX_FAMILY_6065,	/* 6031 6035 6061 6065 */
+	MV88E6XXX_FAMILY_6095,	/* 6092 6095 */
+	MV88E6XXX_FAMILY_6097,	/* 6046 6085 6096 6097 */
+	MV88E6XXX_FAMILY_6165,	/* 6123 6161 6165 */
+	MV88E6XXX_FAMILY_6185,	/* 6108 6121 6122 6131 6152 6155 6182 6185 */
+	MV88E6XXX_FAMILY_6320,	/* 6320 6321 */
+	MV88E6XXX_FAMILY_6351,	/* 6171 6175 6350 6351 */
+	MV88E6XXX_FAMILY_6352,	/* 6172 6176 6240 6352 */
+};
+
+enum mv88e6xxx_cap {
+	/* Address Translation Unit.
+	 * The ATU is used to lookup and learn MAC addresses. See GLOBAL_ATU_OP.
+	 */
+	MV88E6XXX_CAP_ATU,
+
+	/* Energy Efficient Ethernet.
+	 */
+	MV88E6XXX_CAP_EEE,
+
+	/* EEPROM Command and Data registers.
+	 * See GLOBAL2_EEPROM_OP and GLOBAL2_EEPROM_DATA.
+	 */
+	MV88E6XXX_CAP_EEPROM,
+
+	/* Port State Filtering for 802.1D Spanning Tree.
+	 * See PORT_CONTROL_STATE_* values in the PORT_CONTROL register.
+	 */
+	MV88E6XXX_CAP_PORTSTATE,
+
+	/* PHY Polling Unit.
+	 * See GLOBAL_CONTROL_PPU_ENABLE and GLOBAL_STATUS_PPU_POLLING.
+	 */
+	MV88E6XXX_CAP_PPU,
+	MV88E6XXX_CAP_PPU_ACTIVE,
+
+	/* SMI PHY Command and Data registers.
+	 * This requires an indirect access to PHY registers through
+	 * GLOBAL2_SMI_OP, otherwise direct access to PHY registers is done.
+	 */
+	MV88E6XXX_CAP_SMI_PHY,
+
+	/* Per VLAN Spanning Tree Unit (STU).
+	 * The Port State database, if present, is accessed through VTU
+	 * operations and dedicated SID registers. See GLOBAL_VTU_SID.
+	 */
+	MV88E6XXX_CAP_STU,
+
+	/* Switch MAC/WoL/WoF register.
+	 * This requires an indirect access to set the switch MAC address
+	 * through GLOBAL2_SWITCH_MAC, otherwise GLOBAL_MAC_01, GLOBAL_MAC_23,
+	 * and GLOBAL_MAC_45 are used with a direct access.
+	 */
+	MV88E6XXX_CAP_SWITCH_MAC_WOL_WOF,
+
+	/* Internal temperature sensor.
+	 * Available from any enabled port's PHY register 26, page 6.
+	 */
+	MV88E6XXX_CAP_TEMP,
+	MV88E6XXX_CAP_TEMP_LIMIT,
+
+	/* In-chip Port Based VLANs.
+	 * Each port VLANTable register (see PORT_BASE_VLAN) is used to restrict
+	 * the output (or egress) ports to which it is allowed to send frames.
+	 */
+	MV88E6XXX_CAP_VLANTABLE,
+
+	/* VLAN Table Unit.
+	 * The VTU is used to program 802.1Q VLANs. See GLOBAL_VTU_OP.
+	 */
+	MV88E6XXX_CAP_VTU,
+};
+
+/* Bitmask of capabilities */
+#define MV88E6XXX_FLAG_ATU		BIT(MV88E6XXX_CAP_ATU)
+#define MV88E6XXX_FLAG_EEE		BIT(MV88E6XXX_CAP_EEE)
+#define MV88E6XXX_FLAG_EEPROM		BIT(MV88E6XXX_CAP_EEPROM)
+#define MV88E6XXX_FLAG_PORTSTATE	BIT(MV88E6XXX_CAP_PORTSTATE)
+#define MV88E6XXX_FLAG_PPU		BIT(MV88E6XXX_CAP_PPU)
+#define MV88E6XXX_FLAG_PPU_ACTIVE	BIT(MV88E6XXX_CAP_PPU_ACTIVE)
+#define MV88E6XXX_FLAG_SMI_PHY		BIT(MV88E6XXX_CAP_SMI_PHY)
+#define MV88E6XXX_FLAG_STU		BIT(MV88E6XXX_CAP_STU)
+#define MV88E6XXX_FLAG_SWITCH_MAC	BIT(MV88E6XXX_CAP_SWITCH_MAC_WOL_WOF)
+#define MV88E6XXX_FLAG_TEMP		BIT(MV88E6XXX_CAP_TEMP)
+#define MV88E6XXX_FLAG_TEMP_LIMIT	BIT(MV88E6XXX_CAP_TEMP_LIMIT)
+#define MV88E6XXX_FLAG_VLANTABLE	BIT(MV88E6XXX_CAP_VLANTABLE)
+#define MV88E6XXX_FLAG_VTU		BIT(MV88E6XXX_CAP_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6095	\
+	(MV88E6XXX_FLAG_ATU |		\
+	 MV88E6XXX_FLAG_PPU |		\
+	 MV88E6XXX_FLAG_VLANTABLE |	\
+	 MV88E6XXX_FLAG_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6097	\
+	(MV88E6XXX_FLAG_ATU |		\
+	 MV88E6XXX_FLAG_PPU |		\
+	 MV88E6XXX_FLAG_STU |		\
+	 MV88E6XXX_FLAG_VLANTABLE |	\
+	 MV88E6XXX_FLAG_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6165	\
+	(MV88E6XXX_FLAG_STU |		\
+	 MV88E6XXX_FLAG_SWITCH_MAC |	\
+	 MV88E6XXX_FLAG_TEMP |		\
+	 MV88E6XXX_FLAG_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6185	\
+	(MV88E6XXX_FLAG_ATU |		\
+	 MV88E6XXX_FLAG_PPU |		\
+	 MV88E6XXX_FLAG_VLANTABLE |	\
+	 MV88E6XXX_FLAG_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6320	\
+	(MV88E6XXX_FLAG_ATU |		\
+	 MV88E6XXX_FLAG_EEE |		\
+	 MV88E6XXX_FLAG_EEPROM |	\
+	 MV88E6XXX_FLAG_PORTSTATE |	\
+	 MV88E6XXX_FLAG_PPU_ACTIVE |	\
+	 MV88E6XXX_FLAG_SMI_PHY |	\
+	 MV88E6XXX_FLAG_SWITCH_MAC |	\
+	 MV88E6XXX_FLAG_TEMP |		\
+	 MV88E6XXX_FLAG_TEMP_LIMIT |	\
+	 MV88E6XXX_FLAG_VLANTABLE |	\
+	 MV88E6XXX_FLAG_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6351	\
+	(MV88E6XXX_FLAG_ATU |		\
+	 MV88E6XXX_FLAG_PORTSTATE |	\
+	 MV88E6XXX_FLAG_PPU_ACTIVE |	\
+	 MV88E6XXX_FLAG_SMI_PHY |	\
+	 MV88E6XXX_FLAG_STU |		\
+	 MV88E6XXX_FLAG_SWITCH_MAC |	\
+	 MV88E6XXX_FLAG_TEMP |		\
+	 MV88E6XXX_FLAG_VLANTABLE |	\
+	 MV88E6XXX_FLAG_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6352	\
+	(MV88E6XXX_FLAG_ATU |		\
+	 MV88E6XXX_FLAG_EEE |		\
+	 MV88E6XXX_FLAG_EEPROM |	\
+	 MV88E6XXX_FLAG_PORTSTATE |	\
+	 MV88E6XXX_FLAG_PPU_ACTIVE |	\
+	 MV88E6XXX_FLAG_SMI_PHY |	\
+	 MV88E6XXX_FLAG_STU |		\
+	 MV88E6XXX_FLAG_SWITCH_MAC |	\
+	 MV88E6XXX_FLAG_TEMP |		\
+	 MV88E6XXX_FLAG_TEMP_LIMIT |	\
+	 MV88E6XXX_FLAG_VLANTABLE |	\
+	 MV88E6XXX_FLAG_VTU)
+
+struct mv88e6xxx_info {
+	enum mv88e6xxx_family family;
+	u16 prod_num;
+	const char *name;
+	unsigned int num_databases;
+	unsigned int num_ports;
+	unsigned long flags;
 };
 
 struct mv88e6xxx_atu_entry {
@@ -385,17 +543,29 @@
 
 struct mv88e6xxx_priv_port {
 	struct net_device *bridge_dev;
-	u8 state;
 };
 
 struct mv88e6xxx_priv_state {
+	const struct mv88e6xxx_info *info;
+
+	/* The dsa_switch this private structure is related to */
+	struct dsa_switch *ds;
+
+	/* The device this structure is associated to */
+	struct device *dev;
+
 	/* When using multi-chip addressing, this mutex protects
 	 * access to the indirect access registers.  (In single-chip
 	 * mode, this mutex is effectively useless.)
 	 */
 	struct mutex	smi_mutex;
 
-#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
+	/* The MII bus and the address on the bus that is used to
+	 * communication with the switch
+	 */
+	struct mii_bus *bus;
+	int sw_addr;
+
 	/* Handles automatic disabling and re-enabling of the PHY
 	 * polling unit.
 	 */
@@ -403,7 +573,6 @@
 	int			ppu_disabled;
 	struct work_struct	ppu_work;
 	struct timer_list	ppu_timer;
-#endif
 
 	/* This mutex serialises access to the statistics unit.
 	 * Hold this mutex over snapshot + dump sequences.
@@ -421,14 +590,16 @@
 	 */
 	struct mutex eeprom_mutex;
 
-	int		id; /* switch product id */
-	int		num_ports;	/* number of switch ports */
-
 	struct mv88e6xxx_priv_port	ports[DSA_MAX_PORTS];
 
-	DECLARE_BITMAP(port_state_update_mask, DSA_MAX_PORTS);
+	/* A switch may have a GPIO line tied to its reset pin. Parse
+	 * this from the device tree, and use it before performing
+	 * switch soft reset.
+	 */
+	struct gpio_desc *reset;
 
-	struct work_struct bridge_work;
+	/* set to size of eeprom if supported by the switch */
+	int		eeprom_len;
 };
 
 enum stat_type {
@@ -444,104 +615,10 @@
 	enum stat_type type;
 };
 
-int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active);
-char *mv88e6xxx_lookup_name(struct device *host_dev, int sw_addr,
-			    const struct mv88e6xxx_switch_id *table,
-			    unsigned int num);
-int mv88e6xxx_setup_ports(struct dsa_switch *ds);
-int mv88e6xxx_setup_common(struct dsa_switch *ds);
-int mv88e6xxx_setup_global(struct dsa_switch *ds);
-int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg);
-int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val);
-int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr);
-int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr);
-int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum);
-int mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val);
-int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum);
-int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
-				 u16 val);
-void mv88e6xxx_ppu_state_init(struct dsa_switch *ds);
-int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum);
-int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
-			    int regnum, u16 val);
-void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data);
-void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
-				 uint64_t *data);
-int mv88e6xxx_get_sset_count(struct dsa_switch *ds);
-int mv88e6xxx_get_sset_count_basic(struct dsa_switch *ds);
-void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
-			   struct phy_device *phydev);
-int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port);
-void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
-			struct ethtool_regs *regs, void *_p);
-int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp);
-int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp);
-int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp);
-int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm);
-int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds);
-int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds);
-int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum);
-int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr, int regnum,
-				 u16 val);
-int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
-int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
-		      struct phy_device *phydev, struct ethtool_eee *e);
-int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
-			       struct net_device *bridge);
-void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port);
-int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state);
-int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
-				  bool vlan_filtering);
-int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
-				const struct switchdev_obj_port_vlan *vlan,
-				struct switchdev_trans *trans);
-int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
-			    const struct switchdev_obj_port_vlan *vlan,
-			    struct switchdev_trans *trans);
-int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
-			    const struct switchdev_obj_port_vlan *vlan);
-int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
-			     struct switchdev_obj_port_vlan *vlan,
-			     int (*cb)(struct switchdev_obj *obj));
-int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
-			       const struct switchdev_obj_port_fdb *fdb,
-			       struct switchdev_trans *trans);
-int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
-			   const struct switchdev_obj_port_fdb *fdb,
-			   struct switchdev_trans *trans);
-int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
-			   const struct switchdev_obj_port_fdb *fdb);
-int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
-			    struct switchdev_obj_port_fdb *fdb,
-			    int (*cb)(struct switchdev_obj *obj));
-int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg);
-int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
-			     int reg, int val);
-
-extern struct dsa_switch_driver mv88e6131_switch_driver;
-extern struct dsa_switch_driver mv88e6123_switch_driver;
-extern struct dsa_switch_driver mv88e6352_switch_driver;
-extern struct dsa_switch_driver mv88e6171_switch_driver;
-
-#define REG_READ(addr, reg)						\
-	({								\
-		int __ret;						\
-									\
-		__ret = mv88e6xxx_reg_read(ds, addr, reg);		\
-		if (__ret < 0)						\
-			return __ret;					\
-		__ret;							\
-	})
-
-#define REG_WRITE(addr, reg, val)					\
-	({								\
-		int __ret;						\
-									\
-		__ret = mv88e6xxx_reg_write(ds, addr, reg, val);	\
-		if (__ret < 0)						\
-			return __ret;					\
-	})
-
-
+static inline bool mv88e6xxx_has(struct mv88e6xxx_priv_state *ps,
+				 unsigned long flags)
+{
+	return (ps->info->flags & flags) == flags;
+}
 
 #endif
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 7677c74..91ada52 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -699,7 +699,7 @@
 		dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS),
 		inw(ioaddr + TX_FREE));
 	dev->stats.tx_errors++;
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	/* Issue TX_RESET and TX_START commands. */
 	outw(TxReset, ioaddr + EL3_CMD);
 	outw(TxEnable, ioaddr + EL3_CMD);
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 942fb0d..b26e038 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -992,7 +992,7 @@
 		if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress))
 			break;
 	outw(TxEnable, ioaddr + EL3_CMD);
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	dev->stats.tx_errors++;
 	dev->stats.tx_dropped++;
 	netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index b9948f0..b88afd7 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -700,7 +700,7 @@
 	netdev_notice(dev, "Transmit timed out!\n");
 	dump_status(dev);
 	dev->stats.tx_errors++;
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	/* Issue TX_RESET and TX_START commands. */
 	tc574_wait_for_completion(dev, TxReset);
 	outw(TxEnable, ioaddr + EL3_CMD);
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index c5a3205..71396e4 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -534,7 +534,7 @@
 	netdev_warn(dev, "Transmit timed out!\n");
 	dump_status(dev);
 	dev->stats.tx_errors++;
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	/* Issue TX_RESET and TX_START commands. */
 	tc589_wait_for_completion(dev, TxReset);
 	outw(TxEnable, ioaddr + EL3_CMD);
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index d81fced..25c55ab 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1944,7 +1944,7 @@
 	}
 	/* Issue Tx Enable */
 	iowrite16(TxEnable, ioaddr + EL3_CMD);
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 }
 
 /*
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index ec6eac1..4ea717d 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -1041,7 +1041,7 @@
 	{
 		ei_local->txing = 1;
 		NS8390_trigger_send(dev, send_length, output_page);
-		dev->trans_start = jiffies;
+		netif_trans_update(dev);
 		if (output_page == ei_local->tx_start_page) 
 		{
 			ei_local->tx1 = -1;
@@ -1270,7 +1270,7 @@
 		{
 			ei_local->txing = 1;
 			NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
-			dev->trans_start = jiffies;
+			netif_trans_update(dev);
 			ei_local->tx2 = -1,
 			ei_local->lasttx = 2;
 		}
@@ -1287,7 +1287,7 @@
 		{
 			ei_local->txing = 1;
 			NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
-			dev->trans_start = jiffies;
+			netif_trans_update(dev);
 			ei_local->tx1 = -1;
 			ei_local->lasttx = 1;
 		}
diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c
index b96e885..60f8e2c 100644
--- a/drivers/net/ethernet/8390/lib8390.c
+++ b/drivers/net/ethernet/8390/lib8390.c
@@ -596,7 +596,7 @@
 		if (ei_local->tx2 > 0) {
 			ei_local->txing = 1;
 			NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
-			dev->trans_start = jiffies;
+			netif_trans_update(dev);
 			ei_local->tx2 = -1,
 			ei_local->lasttx = 2;
 		} else
@@ -609,7 +609,7 @@
 		if (ei_local->tx1 > 0) {
 			ei_local->txing = 1;
 			NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
-			dev->trans_start = jiffies;
+			netif_trans_update(dev);
 			ei_local->tx1 = -1;
 			ei_local->lasttx = 1;
 		} else
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index ac72882..1d10696 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -1129,7 +1129,7 @@
 
 	/* Trigger an immediate transmit demand. */
 
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	dev->stats.tx_errors++;
 	netif_wake_queue(dev);
 }
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index 74139cb..3d2245f 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -1430,7 +1430,7 @@
 	bfin_mac_enable(lp->phydev);
 
 	/* We can accept TX packets again */
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 }
 
 static void bfin_mac_multicast_hash(struct net_device *dev)
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 0907ab6..30defe6 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3349,7 +3349,7 @@
 	struct et131x_adapter *adapter = netdev_priv(netdev);
 
 	/* Save the timestamp for the TX watchdog, prevent a timeout */
-	netdev->trans_start = jiffies;
+	netif_trans_update(netdev);
 
 	phy_stop(adapter->phydev);
 	et131x_disable_txrx(netdev);
@@ -3816,7 +3816,7 @@
 		netif_stop_queue(netdev);
 
 	/* Save the timestamp for the TX timeout watchdog */
-	netdev->trans_start = jiffies;
+	netif_trans_update(netdev);
 
 	/* TCB is not available */
 	if (tx_ring->used >= NUM_TCB)
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 8d50314..de2c4bf 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -428,7 +428,7 @@
 	emac_reset(db);
 	emac_init_device(dev);
 	/* We can accept TX packets again */
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	netif_wake_queue(dev);
 
 	/* Restore previous register address */
@@ -468,7 +468,7 @@
 		       db->membase + EMAC_TX_CTL0_REG);
 
 		/* save the time stamp */
-		dev->trans_start = jiffies;
+		netif_trans_update(dev);
 	} else if (channel == 1) {
 		/* set TX len */
 		writel(skb->len, db->membase + EMAC_TX_PL1_REG);
@@ -477,7 +477,7 @@
 		       db->membase + EMAC_TX_CTL1_REG);
 
 		/* save the time stamp */
-		dev->trans_start = jiffies;
+		netif_trans_update(dev);
 	}
 
 	if ((db->tx_fifo_stat & 3) == 3) {
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index 66d0b73c..dcf2a1f 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -260,7 +260,7 @@
 
 	load_csrs(lp);
 	lance_init_ring(dev);
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	status = init_restart_lance(lp);
 #ifdef DEBUG_DRIVER
 	printk("Lance restart=%d\n", status);
@@ -530,7 +530,7 @@
 {
 	printk("lance_tx_timeout\n");
 	lance_reset(dev);
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	netif_wake_queue(dev);
 }
 EXPORT_SYMBOL_GPL(lance_tx_timeout);
@@ -543,11 +543,13 @@
 	static int outs;
 	unsigned long flags;
 
-	if (!TX_BUFFS_AVAIL)
-		return NETDEV_TX_LOCKED;
-
 	netif_stop_queue(dev);
 
+	if (!TX_BUFFS_AVAIL) {
+		dev_consume_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
 	skblen = skb->len;
 
 #ifdef DEBUG_DRIVER
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 5613918..a83cd1c 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -512,7 +512,7 @@
 	load_csrs(lp);
 
 	lance_init_ring(dev);
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	netif_start_queue(dev);
 
 	status = init_restart_lance(lp);
@@ -547,10 +547,8 @@
 
 	local_irq_save(flags);
 
-	if (!lance_tx_buffs_avail(lp)) {
-		local_irq_restore(flags);
-		return NETDEV_TX_LOCKED;
-	}
+	if (!lance_tx_buffs_avail(lp))
+		goto out_free;
 
 #ifdef DEBUG
 	/* dump the packet */
@@ -573,6 +571,7 @@
 
 	/* Kick the lance: transmit now */
 	ll->rdp = LE_C0_INEA | LE_C0_TDMD;
+ out_free:
 	dev_kfree_skb(skb);
 
 	local_irq_restore(flags);
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index b10964e..d2bc8e5 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -764,7 +764,7 @@
 	/* lance_restart, essentially */
 	lance_init_ring(dev);
 	REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	netif_wake_queue(dev);
 }
 
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index d3977d0..9af309e 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1074,7 +1074,7 @@
 	netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev);
 	au1000_reset_mac(dev);
 	au1000_init(dev);
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	netif_wake_queue(dev);
 }
 
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index b584b78..b799c7a 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -877,7 +877,7 @@
 
 	lance_init_ring(dev);
 	load_csrs(lp);
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	status = init_restart_lance(lp);
 	return status;
 }
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 3a7ebfd..abb1ba2 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -943,7 +943,7 @@
 #endif
 	lance_restart (dev, 0x0043, 1);
 
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	netif_wake_queue (dev);
 }
 
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index 1cf33ad..cda53db 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -782,7 +782,7 @@
 		if(!p->lock)
 			if (p->tmdnum || !p->xmit_queued)
 				netif_wake_queue(dev);
-		dev->trans_start = jiffies; /* prevent tx timeout */
+		netif_trans_update(dev); /* prevent tx timeout */
 	}
 	else
 		writedatareg(CSR0_STRT | csr0);
@@ -1148,7 +1148,7 @@
 		printk("%02x ",p->tmdhead[i].u.s.status);
 	printk("\n");
 	ni65_lance_reinit(dev);
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	netif_wake_queue(dev);
 }
 
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 27245ef..2807e18 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -851,7 +851,7 @@
 #else /* #if RESET_ON_TIMEOUT */
   pr_cont("NOT resetting card\n");
 #endif /* #if RESET_ON_TIMEOUT */
-  dev->trans_start = jiffies; /* prevent tx timeout */
+  netif_trans_update(dev); /* prevent tx timeout */
   netif_wake_queue(dev);
 }
 
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 7ccebae..c22bf52 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -448,7 +448,7 @@
 {
 	struct pcnet32_private *lp = netdev_priv(dev);
 
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	napi_disable(&lp->napi);
 	netif_tx_disable(dev);
 }
@@ -2426,7 +2426,7 @@
 	}
 	pcnet32_restart(dev, CSR0_NORMAL);
 
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	netif_wake_queue(dev);
 
 	spin_unlock_irqrestore(&lp->lock, flags);
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 7847638..9b56b40 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -997,7 +997,7 @@
 	}
 	lp->init_ring(dev);
 	load_csrs(lp);
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	status = init_restart_lance(lp);
 	return status;
 }
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
index b2124886..472c0fb 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
@@ -43,6 +43,7 @@
 static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata,
 				  struct xgene_cle_dbptr *dbptr, u32 *buf)
 {
+	buf[0] = SET_VAL(CLE_DROP, dbptr->drop);
 	buf[4] = SET_VAL(CLE_FPSEL, dbptr->fpsel) |
 		 SET_VAL(CLE_DSTQIDL, dbptr->dstqid);
 
@@ -412,7 +413,7 @@
 			.branch = {
 				{
 					/* IPV4 */
-					.valid = 0,
+					.valid = 1,
 					.next_packet_pointer = 22,
 					.jump_bw = JMP_FW,
 					.jump_rel = JMP_ABS,
@@ -420,7 +421,7 @@
 					.next_node = PKT_PROT_NODE,
 					.next_branch = 0,
 					.data = 0x8,
-					.mask = 0xffff
+					.mask = 0x0
 				},
 				{
 					.valid = 0,
@@ -456,7 +457,7 @@
 					.next_node = RSS_IPV4_TCP_NODE,
 					.next_branch = 0,
 					.data = 0x0600,
-					.mask = 0xffff
+					.mask = 0x00ff
 				},
 				{
 					/* UDP */
@@ -468,7 +469,7 @@
 					.next_node = RSS_IPV4_UDP_NODE,
 					.next_branch = 0,
 					.data = 0x1100,
-					.mask = 0xffff
+					.mask = 0x00ff
 				},
 				{
 					.valid = 0,
@@ -642,7 +643,7 @@
 				{
 					/* TCP DST Port */
 					.valid = 0,
-					.next_packet_pointer = 256,
+					.next_packet_pointer = 258,
 					.jump_bw = JMP_FW,
 					.jump_rel = JMP_ABS,
 					.operation = EQT,
@@ -729,6 +730,6 @@
 	return xgene_cle_setup_ptree(pdata, enet_cle);
 }
 
-struct xgene_cle_ops xgene_cle3in_ops = {
+const struct xgene_cle_ops xgene_cle3in_ops = {
 	.cle_init = xgene_enet_cle_init,
 };
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
index 29a17ab..33c5f6b 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
@@ -83,6 +83,8 @@
 #define CLE_TYPE_POS		0
 #define CLE_TYPE_LEN		2
 
+#define CLE_DROP_POS		28
+#define CLE_DROP_LEN		1
 #define CLE_DSTQIDL_POS		25
 #define CLE_DSTQIDL_LEN		7
 #define CLE_DSTQIDH_POS		0
@@ -290,6 +292,6 @@
 	u32 jump_bytes;
 };
 
-extern struct xgene_cle_ops xgene_cle3in_ops;
+extern const struct xgene_cle_ops xgene_cle3in_ops;
 
 #endif /* __XGENE_ENET_CLE_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 39e081a..2f5638f 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -219,27 +219,30 @@
 			    struct xgene_enet_pdata *pdata,
 			    enum xgene_enet_err_code status)
 {
-	struct rtnl_link_stats64 *stats = &pdata->stats;
-
 	switch (status) {
 	case INGRESS_CRC:
-		stats->rx_crc_errors++;
+		ring->rx_crc_errors++;
+		ring->rx_dropped++;
 		break;
 	case INGRESS_CHECKSUM:
 	case INGRESS_CHECKSUM_COMPUTE:
-		stats->rx_errors++;
+		ring->rx_errors++;
+		ring->rx_dropped++;
 		break;
 	case INGRESS_TRUNC_FRAME:
-		stats->rx_frame_errors++;
+		ring->rx_frame_errors++;
+		ring->rx_dropped++;
 		break;
 	case INGRESS_PKT_LEN:
-		stats->rx_length_errors++;
+		ring->rx_length_errors++;
+		ring->rx_dropped++;
 		break;
 	case INGRESS_PKT_UNDER:
-		stats->rx_frame_errors++;
+		ring->rx_frame_errors++;
+		ring->rx_dropped++;
 		break;
 	case INGRESS_FIFO_OVERRUN:
-		stats->rx_fifo_errors++;
+		ring->rx_fifo_errors++;
 		break;
 	default:
 		break;
@@ -824,7 +827,7 @@
 		return -EINVAL;
 
 	phy = get_phy_device(mdio, phy_id, false);
-	if (!phy || IS_ERR(phy))
+	if (IS_ERR(phy))
 		return -EIO;
 
 	ret = phy_device_register(phy);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index ba7da98..45220be 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -86,7 +86,7 @@
 #define RINGADDRL_POS		5
 #define RINGADDRL_LEN		27
 #define RINGADDRH_POS		0
-#define RINGADDRH_LEN		6
+#define RINGADDRH_LEN		7
 #define RINGSIZE_POS		23
 #define RINGSIZE_LEN		3
 #define RINGTYPE_POS		19
@@ -94,9 +94,9 @@
 #define RINGMODE_POS		20
 #define RINGMODE_LEN		3
 #define RECOMTIMEOUTL_POS	28
-#define RECOMTIMEOUTL_LEN	3
+#define RECOMTIMEOUTL_LEN	4
 #define RECOMTIMEOUTH_POS	0
-#define RECOMTIMEOUTH_LEN	2
+#define RECOMTIMEOUTH_LEN	3
 #define NUMMSGSINQ_POS		1
 #define NUMMSGSINQ_LEN		16
 #define ACCEPTLERR		BIT(19)
@@ -201,6 +201,8 @@
 #define USERINFO_LEN			32
 #define FPQNUM_POS			32
 #define FPQNUM_LEN			12
+#define ELERR_POS                       46
+#define ELERR_LEN                       2
 #define NV_POS				50
 #define NV_LEN				1
 #define LL_POS				51
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 8d4c1ad..d208b17 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -443,8 +443,8 @@
 
 	skb_tx_timestamp(skb);
 
-	pdata->stats.tx_packets++;
-	pdata->stats.tx_bytes += skb->len;
+	tx_ring->tx_packets++;
+	tx_ring->tx_bytes += skb->len;
 
 	pdata->ring_ops->wr_cmd(tx_ring, count);
 	return NETDEV_TX_OK;
@@ -483,12 +483,12 @@
 	skb = buf_pool->rx_skb[skb_index];
 
 	/* checking for error */
-	status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
+	status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) ||
+		  GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
 	if (unlikely(status > 2)) {
 		dev_kfree_skb_any(skb);
 		xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
 				       status);
-		pdata->stats.rx_dropped++;
 		ret = -EIO;
 		goto out;
 	}
@@ -506,8 +506,8 @@
 		xgene_enet_skip_csum(skb);
 	}
 
-	pdata->stats.rx_packets++;
-	pdata->stats.rx_bytes += datalen;
+	rx_ring->rx_packets++;
+	rx_ring->rx_bytes += datalen;
 	napi_gro_receive(&rx_ring->napi, skb);
 out:
 	if (--rx_ring->nbufpool == 0) {
@@ -630,7 +630,7 @@
 		ring = pdata->rx_ring[i];
 		irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
 		ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
-				       IRQF_SHARED, ring->irq_name, ring);
+				       0, ring->irq_name, ring);
 		if (ret) {
 			netdev_err(ndev, "Failed to request irq %s\n",
 				   ring->irq_name);
@@ -641,7 +641,7 @@
 		ring = pdata->tx_ring[i]->cp_ring;
 		irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
 		ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
-				       IRQF_SHARED, ring->irq_name, ring);
+				       0, ring->irq_name, ring);
 		if (ret) {
 			netdev_err(ndev, "Failed to request irq %s\n",
 				   ring->irq_name);
@@ -973,6 +973,17 @@
 	return owner;
 }
 
+static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata)
+{
+	struct device *dev = &pdata->pdev->dev;
+	u32 cpu_bufnum;
+	int ret;
+
+	ret = device_property_read_u32(dev, "channel", &cpu_bufnum);
+
+	return (!ret) ? cpu_bufnum : pdata->cpu_bufnum;
+}
+
 static int xgene_enet_create_desc_rings(struct net_device *ndev)
 {
 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
@@ -981,13 +992,15 @@
 	struct xgene_enet_desc_ring *buf_pool = NULL;
 	enum xgene_ring_owner owner;
 	dma_addr_t dma_exp_bufs;
-	u8 cpu_bufnum = pdata->cpu_bufnum;
+	u8 cpu_bufnum;
 	u8 eth_bufnum = pdata->eth_bufnum;
 	u8 bp_bufnum = pdata->bp_bufnum;
 	u16 ring_num = pdata->ring_num;
 	u16 ring_id;
 	int i, ret, size;
 
+	cpu_bufnum = xgene_start_cpu_bufnum(pdata);
+
 	for (i = 0; i < pdata->rxq_cnt; i++) {
 		/* allocate rx descriptor ring */
 		owner = xgene_derive_ring_owner(pdata);
@@ -1114,12 +1127,31 @@
 {
 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
 	struct rtnl_link_stats64 *stats = &pdata->stats;
+	struct xgene_enet_desc_ring *ring;
+	int i;
 
-	stats->rx_errors += stats->rx_length_errors +
-			    stats->rx_crc_errors +
-			    stats->rx_frame_errors +
-			    stats->rx_fifo_errors;
-	memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64));
+	memset(stats, 0, sizeof(struct rtnl_link_stats64));
+	for (i = 0; i < pdata->txq_cnt; i++) {
+		ring = pdata->tx_ring[i];
+		if (ring) {
+			stats->tx_packets += ring->tx_packets;
+			stats->tx_bytes += ring->tx_bytes;
+		}
+	}
+
+	for (i = 0; i < pdata->rxq_cnt; i++) {
+		ring = pdata->rx_ring[i];
+		if (ring) {
+			stats->rx_packets += ring->rx_packets;
+			stats->rx_bytes += ring->rx_bytes;
+			stats->rx_errors += ring->rx_length_errors +
+				ring->rx_crc_errors +
+				ring->rx_frame_errors +
+				ring->rx_fifo_errors;
+			stats->rx_dropped += ring->rx_dropped;
+		}
+	}
+	memcpy(storage, stats, sizeof(struct rtnl_link_stats64));
 
 	return storage;
 }
@@ -1234,6 +1266,13 @@
 	for (i = 0; i < max_irqs; i++) {
 		ret = platform_get_irq(pdev, i);
 		if (ret <= 0) {
+			if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+				max_irqs = i;
+				pdata->rxq_cnt = max_irqs / 2;
+				pdata->txq_cnt = max_irqs / 2;
+				pdata->cq_cnt = max_irqs / 2;
+				break;
+			}
 			dev_err(dev, "Unable to get ENET IRQ\n");
 			ret = ret ? : -ENXIO;
 			return ret;
@@ -1437,19 +1476,28 @@
 		pdata->port_ops = &xgene_xgport_ops;
 		pdata->cle_ops = &xgene_cle3in_ops;
 		pdata->rm = RM0;
-		pdata->rxq_cnt = XGENE_NUM_RX_RING;
-		pdata->txq_cnt = XGENE_NUM_TX_RING;
-		pdata->cq_cnt = XGENE_NUM_TXC_RING;
+		if (!pdata->rxq_cnt) {
+			pdata->rxq_cnt = XGENE_NUM_RX_RING;
+			pdata->txq_cnt = XGENE_NUM_TX_RING;
+			pdata->cq_cnt = XGENE_NUM_TXC_RING;
+		}
 		break;
 	}
 
 	if (pdata->enet_id == XGENE_ENET1) {
 		switch (pdata->port_id) {
 		case 0:
-			pdata->cpu_bufnum = START_CPU_BUFNUM_0;
-			pdata->eth_bufnum = START_ETH_BUFNUM_0;
-			pdata->bp_bufnum = START_BP_BUFNUM_0;
-			pdata->ring_num = START_RING_NUM_0;
+			if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+				pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
+				pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
+				pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
+				pdata->ring_num = START_RING_NUM_0;
+			} else {
+				pdata->cpu_bufnum = START_CPU_BUFNUM_0;
+				pdata->eth_bufnum = START_ETH_BUFNUM_0;
+				pdata->bp_bufnum = START_BP_BUFNUM_0;
+				pdata->ring_num = START_RING_NUM_0;
+			}
 			break;
 		case 1:
 			if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
@@ -1595,21 +1643,22 @@
 
 	ret = xgene_enet_init_hw(pdata);
 	if (ret)
-		goto err;
+		goto err_netdev;
 
 	mac_ops = pdata->mac_ops;
 	if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
 		ret = xgene_enet_mdio_config(pdata);
 		if (ret)
-			goto err;
+			goto err_netdev;
 	} else {
 		INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
 	}
 
 	xgene_enet_napi_add(pdata);
 	return 0;
-err:
+err_netdev:
 	unregister_netdev(ndev);
+err:
 	free_netdev(ndev);
 	return ret;
 }
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 175d188..092fbec 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -49,10 +49,10 @@
 #define XGENE_ENET_MSS	1448
 #define XGENE_MIN_ENET_FRAME_SIZE	60
 
-#define XGENE_MAX_ENET_IRQ	8
-#define XGENE_NUM_RX_RING	4
-#define XGENE_NUM_TX_RING	4
-#define XGENE_NUM_TXC_RING	4
+#define XGENE_MAX_ENET_IRQ	16
+#define XGENE_NUM_RX_RING	8
+#define XGENE_NUM_TX_RING	8
+#define XGENE_NUM_TXC_RING	8
 
 #define START_CPU_BUFNUM_0	0
 #define START_ETH_BUFNUM_0	2
@@ -121,6 +121,16 @@
 		struct xgene_enet_raw_desc16 *raw_desc16;
 	};
 	__le64 *exp_bufs;
+	u64 tx_packets;
+	u64 tx_bytes;
+	u64 rx_packets;
+	u64 rx_bytes;
+	u64 rx_dropped;
+	u64 rx_errors;
+	u64 rx_length_errors;
+	u64 rx_crc_errors;
+	u64 rx_frame_errors;
+	u64 rx_fifo_errors;
 };
 
 struct xgene_mac_ops {
@@ -191,7 +201,7 @@
 	const struct xgene_mac_ops *mac_ops;
 	const struct xgene_port_ops *port_ops;
 	struct xgene_ring_ops *ring_ops;
-	struct xgene_cle_ops *cle_ops;
+	const struct xgene_cle_ops *cle_ops;
 	struct delayed_work link_work;
 	u32 port_id;
 	u8 cpu_bufnum;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h
index 29a71b4..002df5a 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h
@@ -33,7 +33,7 @@
 #define LINK_STATUS			BIT(2)
 #define LINK_UP				BIT(15)
 #define MPA_IDLE_WITH_QMI_EMPTY		BIT(12)
-#define SG_RX_DV_GATE_REG_0_ADDR	0x0dfc
+#define SG_RX_DV_GATE_REG_0_ADDR	0x05fc
 
 extern const struct xgene_mac_ops xgene_sgmac_ops;
 extern const struct xgene_port_ops xgene_sgport_ops;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 55b118e..9fe8b5e 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -745,7 +745,7 @@
 
 static void alx_netif_stop(struct alx_priv *alx)
 {
-	alx->dev->trans_start = jiffies;
+	netif_trans_update(alx->dev);
 	if (netif_carrier_ok(alx->dev)) {
 		netif_carrier_off(alx->dev);
 		netif_tx_disable(alx->dev);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index b9203d9..c46b489 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -488,7 +488,7 @@
 	dma_addr_t dma;		/* descriptor ring physical address */
 	u16 size;		/* descriptor ring length in bytes */
 	u16 count;		/* number of descriptors in the ring */
-	u16 next_to_use; 	/* this is protectd by adapter->tx_lock */
+	u16 next_to_use;
 	atomic_t next_to_clean;
 	struct atl1c_buffer *buffer_info;
 };
@@ -542,7 +542,6 @@
 	u16 link_duplex;
 
 	spinlock_t mdio_lock;
-	spinlock_t tx_lock;
 	atomic_t irq_sem;
 
 	struct work_struct common_task;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index d0084d4..a3200ea 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -821,7 +821,6 @@
 	atl1c_set_rxbufsize(adapter, adapter->netdev);
 	atomic_set(&adapter->irq_sem, 1);
 	spin_lock_init(&adapter->mdio_lock);
-	spin_lock_init(&adapter->tx_lock);
 	set_bit(__AT_DOWN, &adapter->flags);
 
 	return 0;
@@ -2206,7 +2205,6 @@
 					  struct net_device *netdev)
 {
 	struct atl1c_adapter *adapter = netdev_priv(netdev);
-	unsigned long flags;
 	u16 tpd_req = 1;
 	struct atl1c_tpd_desc *tpd;
 	enum atl1c_trans_queue type = atl1c_trans_normal;
@@ -2217,16 +2215,10 @@
 	}
 
 	tpd_req = atl1c_cal_tpd_req(skb);
-	if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
-		if (netif_msg_pktdata(adapter))
-			dev_info(&adapter->pdev->dev, "tx locked\n");
-		return NETDEV_TX_LOCKED;
-	}
 
 	if (atl1c_tpd_avail(adapter, type) < tpd_req) {
 		/* no enough descriptor, just stop queue */
 		netif_stop_queue(netdev);
-		spin_unlock_irqrestore(&adapter->tx_lock, flags);
 		return NETDEV_TX_BUSY;
 	}
 
@@ -2234,7 +2226,6 @@
 
 	/* do TSO and check sum */
 	if (atl1c_tso_csum(adapter, skb, &tpd, type) != 0) {
-		spin_unlock_irqrestore(&adapter->tx_lock, flags);
 		dev_kfree_skb_any(skb);
 		return NETDEV_TX_OK;
 	}
@@ -2257,12 +2248,10 @@
 			   "tx-skb droppted due to dma error\n");
 		/* roll back tpd/buffer */
 		atl1c_tx_rollback(adapter, tpd, type);
-		spin_unlock_irqrestore(&adapter->tx_lock, flags);
 		dev_kfree_skb_any(skb);
 	} else {
 		netdev_sent_queue(adapter->netdev, skb->len);
 		atl1c_tx_queue(adapter, skb, tpd, type);
-		spin_unlock_irqrestore(&adapter->tx_lock, flags);
 	}
 
 	return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h
index 0212dac..632bb84 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e.h
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
@@ -442,7 +442,6 @@
 	u16 link_duplex;
 
 	spinlock_t mdio_lock;
-	spinlock_t tx_lock;
 	atomic_t irq_sem;
 
 	struct work_struct reset_task;
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 59a03a1..974713b 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -648,7 +648,6 @@
 
 	atomic_set(&adapter->irq_sem, 1);
 	spin_lock_init(&adapter->mdio_lock);
-	spin_lock_init(&adapter->tx_lock);
 
 	set_bit(__AT_DOWN, &adapter->flags);
 
@@ -1866,7 +1865,6 @@
 					  struct net_device *netdev)
 {
 	struct atl1e_adapter *adapter = netdev_priv(netdev);
-	unsigned long flags;
 	u16 tpd_req = 1;
 	struct atl1e_tpd_desc *tpd;
 
@@ -1880,13 +1878,10 @@
 		return NETDEV_TX_OK;
 	}
 	tpd_req = atl1e_cal_tdp_req(skb);
-	if (!spin_trylock_irqsave(&adapter->tx_lock, flags))
-		return NETDEV_TX_LOCKED;
 
 	if (atl1e_tpd_avail(adapter) < tpd_req) {
 		/* no enough descriptor, just stop queue */
 		netif_stop_queue(netdev);
-		spin_unlock_irqrestore(&adapter->tx_lock, flags);
 		return NETDEV_TX_BUSY;
 	}
 
@@ -1910,7 +1905,6 @@
 
 	/* do TSO and check sum */
 	if (atl1e_tso_csum(adapter, skb, tpd) != 0) {
-		spin_unlock_irqrestore(&adapter->tx_lock, flags);
 		dev_kfree_skb_any(skb);
 		return NETDEV_TX_OK;
 	}
@@ -1921,10 +1915,7 @@
 	}
 
 	atl1e_tx_queue(adapter, tpd_req, tpd);
-
-	netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
 out:
-	spin_unlock_irqrestore(&adapter->tx_lock, flags);
 	return NETDEV_TX_OK;
 }
 
@@ -2285,8 +2276,7 @@
 
 	netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO |
 			      NETIF_F_HW_VLAN_CTAG_RX;
-	netdev->features = netdev->hw_features | NETIF_F_LLTX |
-			   NETIF_F_HW_VLAN_CTAG_TX;
+	netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_TX;
 	/* not enabled by default */
 	netdev->hw_features |= NETIF_F_RXALL | NETIF_F_RXFCS;
 	return 0;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 8f76f45..2ff4658 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1412,7 +1412,7 @@
 
 	err = -EIO;
 
-	netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
+	netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
 	netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
 
 	/* Init PHY as early as possible due to power saving issue  */
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 993c780..543bf38 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -831,7 +831,7 @@
 	rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
 
 	if (work_done < budget) {
-		napi_complete(napi);
+		napi_complete_done(napi, work_done);
 		/* re-enable RX interrupts */
 		intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
 	}
@@ -873,7 +873,7 @@
 		if (likely(napi_schedule_prep(&priv->napi))) {
 			/* disable RX interrupts */
 			intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
-			__napi_schedule(&priv->napi);
+			__napi_schedule_irqoff(&priv->napi);
 		}
 	}
 
@@ -916,7 +916,7 @@
 
 		if (likely(napi_schedule_prep(&txr->napi))) {
 			intrl2_1_mask_set(priv, BIT(ring));
-			__napi_schedule(&txr->napi);
+			__napi_schedule_irqoff(&txr->napi);
 		}
 	}
 
@@ -1117,7 +1117,7 @@
 {
 	netdev_warn(dev, "transmit timeout!\n");
 
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	dev->stats.tx_errors++;
 
 	netif_tx_wake_all_queues(dev);
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 99b30a9..ee5f431 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1515,7 +1515,7 @@
 	phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
 			      PHY_INTERFACE_MODE_MII);
 	if (IS_ERR(phy_dev)) {
-		bgmac_err(bgmac, "PHY connecton failed\n");
+		bgmac_err(bgmac, "PHY connection failed\n");
 		err = PTR_ERR(phy_dev);
 		goto err_unregister_bus;
 	}
@@ -1572,6 +1572,11 @@
 		dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
 	}
 
+	/* This (reset &) enable is not preset in specs or reference driver but
+	 * Broadcom does it in arch PCI code when enabling fake PCI device.
+	 */
+	bcma_core_enable(core, 0);
+
 	/* Allocation and references */
 	net_dev = alloc_etherdev(sizeof(*bgmac));
 	if (!net_dev)
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 4fbb093..9a03c14 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -199,9 +199,9 @@
 #define  BGMAC_CMDCFG_TAI			0x00000200
 #define  BGMAC_CMDCFG_HD			0x00000400	/* Set if in half duplex mode */
 #define  BGMAC_CMDCFG_HD_SHIFT			10
-#define  BGMAC_CMDCFG_SR_REV0			0x00000800	/* Set to reset mode, for other revs */
-#define  BGMAC_CMDCFG_SR_REV4			0x00002000	/* Set to reset mode, only for core rev 4 */
-#define  BGMAC_CMDCFG_SR(rev)  ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
+#define  BGMAC_CMDCFG_SR_REV0			0x00000800	/* Set to reset mode, for core rev 0-3 */
+#define  BGMAC_CMDCFG_SR_REV4			0x00002000	/* Set to reset mode, for core rev >= 4 */
+#define  BGMAC_CMDCFG_SR(rev)  ((rev >= 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
 #define  BGMAC_CMDCFG_ML			0x00008000	/* Set to activate mac loopback mode */
 #define  BGMAC_CMDCFG_AE			0x00400000
 #define  BGMAC_CMDCFG_CFE			0x00800000
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index aabbd51..5a0dca3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1,6 +1,6 @@
 /* Broadcom NetXtreme-C/E network driver.
  *
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -78,6 +78,7 @@
 	BCM57402,
 	BCM57404,
 	BCM57406,
+	BCM57314,
 	BCM57304_VF,
 	BCM57404_VF,
 };
@@ -92,6 +93,7 @@
 	{ "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" },
 	{ "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
 	{ "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" },
+	{ "Broadcom BCM57314 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
 	{ "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
 	{ "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
 };
@@ -103,6 +105,7 @@
 	{ PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
 	{ PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
 	{ PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
+	{ PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
 #ifdef CONFIG_BNXT_SRIOV
 	{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
 	{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
@@ -118,6 +121,13 @@
 	HWRM_CFA_L2_FILTER_ALLOC,
 };
 
+static const u16 bnxt_async_events_arr[] = {
+	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
+	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
+	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
+	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
+};
+
 static bool bnxt_vf_pciid(enum board_idx idx)
 {
 	return (idx == BCM57304_VF || idx == BCM57404_VF);
@@ -581,12 +591,30 @@
 	struct page *page;
 	dma_addr_t mapping;
 	u16 sw_prod = rxr->rx_sw_agg_prod;
+	unsigned int offset = 0;
 
-	page = alloc_page(gfp);
-	if (!page)
-		return -ENOMEM;
+	if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+		page = rxr->rx_page;
+		if (!page) {
+			page = alloc_page(gfp);
+			if (!page)
+				return -ENOMEM;
+			rxr->rx_page = page;
+			rxr->rx_page_offset = 0;
+		}
+		offset = rxr->rx_page_offset;
+		rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
+		if (rxr->rx_page_offset == PAGE_SIZE)
+			rxr->rx_page = NULL;
+		else
+			get_page(page);
+	} else {
+		page = alloc_page(gfp);
+		if (!page)
+			return -ENOMEM;
+	}
 
-	mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE,
+	mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE,
 			       PCI_DMA_FROMDEVICE);
 	if (dma_mapping_error(&pdev->dev, mapping)) {
 		__free_page(page);
@@ -601,6 +629,7 @@
 	rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
 
 	rx_agg_buf->page = page;
+	rx_agg_buf->offset = offset;
 	rx_agg_buf->mapping = mapping;
 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
 	rxbd->rx_bd_opaque = sw_prod;
@@ -642,6 +671,7 @@
 		page = cons_rx_buf->page;
 		cons_rx_buf->page = NULL;
 		prod_rx_buf->page = page;
+		prod_rx_buf->offset = cons_rx_buf->offset;
 
 		prod_rx_buf->mapping = cons_rx_buf->mapping;
 
@@ -709,7 +739,8 @@
 			    RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
 
 		cons_rx_buf = &rxr->rx_agg_ring[cons];
-		skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len);
+		skb_fill_page_desc(skb, i, cons_rx_buf->page,
+				   cons_rx_buf->offset, frag_len);
 		__clear_bit(cons, rxr->rx_agg_bmap);
 
 		/* It is possible for bnxt_alloc_rx_page() to allocate
@@ -740,7 +771,7 @@
 			return NULL;
 		}
 
-		dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE,
+		dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
 			       PCI_DMA_FROMDEVICE);
 
 		skb->data_len += frag_len;
@@ -792,6 +823,46 @@
 	return skb;
 }
 
+static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
+			   u32 *raw_cons, void *cmp)
+{
+	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+	struct rx_cmp *rxcmp = cmp;
+	u32 tmp_raw_cons = *raw_cons;
+	u8 cmp_type, agg_bufs = 0;
+
+	cmp_type = RX_CMP_TYPE(rxcmp);
+
+	if (cmp_type == CMP_TYPE_RX_L2_CMP) {
+		agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
+			    RX_CMP_AGG_BUFS) >>
+			   RX_CMP_AGG_BUFS_SHIFT;
+	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
+		struct rx_tpa_end_cmp *tpa_end = cmp;
+
+		agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
+			    RX_TPA_END_CMP_AGG_BUFS) >>
+			   RX_TPA_END_CMP_AGG_BUFS_SHIFT;
+	}
+
+	if (agg_bufs) {
+		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
+			return -EBUSY;
+	}
+	*raw_cons = tmp_raw_cons;
+	return 0;
+}
+
+static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+	if (!rxr->bnapi->in_reset) {
+		rxr->bnapi->in_reset = true;
+		set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
+		schedule_work(&bp->sp_task);
+	}
+	rxr->rx_next_cons = 0xffff;
+}
+
 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
 			   struct rx_tpa_start_cmp *tpa_start,
 			   struct rx_tpa_start_cmp_ext *tpa_start1)
@@ -809,6 +880,11 @@
 	prod_rx_buf = &rxr->rx_buf_ring[prod];
 	tpa_info = &rxr->rx_tpa[agg_id];
 
+	if (unlikely(cons != rxr->rx_next_cons)) {
+		bnxt_sched_reset(bp, rxr);
+		return;
+	}
+
 	prod_rx_buf->data = tpa_info->data;
 
 	mapping = tpa_info->mapping;
@@ -846,6 +922,7 @@
 
 	rxr->rx_prod = NEXT_RX(prod);
 	cons = NEXT_RX(cons);
+	rxr->rx_next_cons = NEXT_RX(cons);
 	cons_rx_buf = &rxr->rx_buf_ring[cons];
 
 	bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
@@ -959,6 +1036,14 @@
 	dma_addr_t mapping;
 	struct sk_buff *skb;
 
+	if (unlikely(bnapi->in_reset)) {
+		int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
+
+		if (rc < 0)
+			return ERR_PTR(-EBUSY);
+		return NULL;
+	}
+
 	tpa_info = &rxr->rx_tpa[agg_id];
 	data = tpa_info->data;
 	prefetch(data);
@@ -1125,6 +1210,12 @@
 	cons = rxcmp->rx_cmp_opaque;
 	rx_buf = &rxr->rx_buf_ring[cons];
 	data = rx_buf->data;
+	if (unlikely(cons != rxr->rx_next_cons)) {
+		int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
+
+		bnxt_sched_reset(bp, rxr);
+		return rc1;
+	}
 	prefetch(data);
 
 	agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
@@ -1224,6 +1315,7 @@
 
 next_rx:
 	rxr->rx_prod = NEXT_RX(prod);
+	rxr->rx_next_cons = NEXT_RX(cons);
 
 next_rx_no_prod:
 	*raw_cons = tmp_raw_cons;
@@ -1231,6 +1323,10 @@
 	return rc;
 }
 
+#define BNXT_GET_EVENT_PORT(data)	\
+	((data) &				\
+	 HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
+
 static int bnxt_async_event_process(struct bnxt *bp,
 				    struct hwrm_async_event_cmpl *cmpl)
 {
@@ -1238,12 +1334,40 @@
 
 	/* TODO CHIMP_FW: Define event id's for link change, error etc */
 	switch (event_id) {
+	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
+		u32 data1 = le32_to_cpu(cmpl->event_data1);
+		struct bnxt_link_info *link_info = &bp->link_info;
+
+		if (BNXT_VF(bp))
+			goto async_event_process_exit;
+		if (data1 & 0x20000) {
+			u16 fw_speed = link_info->force_link_speed;
+			u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
+
+			netdev_warn(bp->dev, "Link speed %d no longer supported\n",
+				    speed);
+		}
+		/* fall thru */
+	}
 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
 		set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
 		break;
 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
 		set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
 		break;
+	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
+		u32 data1 = le32_to_cpu(cmpl->event_data1);
+		u16 port_id = BNXT_GET_EVENT_PORT(data1);
+
+		if (BNXT_VF(bp))
+			break;
+
+		if (bp->pf.port_id != port_id)
+			break;
+
+		set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
+		break;
+	}
 	default:
 		netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
 			   event_id);
@@ -1367,6 +1491,10 @@
 		if (!TX_CMP_VALID(txcmp, raw_cons))
 			break;
 
+		/* The valid test of the entry must be done first before
+		 * reading any further.
+		 */
+		dma_rmb();
 		if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
 			tx_pkts++;
 			/* return full budget so NAPI will complete. */
@@ -1584,13 +1712,17 @@
 
 			dma_unmap_page(&pdev->dev,
 				       dma_unmap_addr(rx_agg_buf, mapping),
-				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
+				       BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
 
 			rx_agg_buf->page = NULL;
 			__clear_bit(j, rxr->rx_agg_bmap);
 
 			__free_page(page);
 		}
+		if (rxr->rx_page) {
+			__free_page(rxr->rx_page);
+			rxr->rx_page = NULL;
+		}
 	}
 }
 
@@ -1973,7 +2105,7 @@
 	if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
 		return 0;
 
-	type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) |
+	type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
 		RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
 
 	bnxt_init_rxbd_pages(ring, type);
@@ -2164,7 +2296,7 @@
 	bp->rx_agg_nr_pages = 0;
 
 	if (bp->flags & BNXT_FLAG_TPA)
-		agg_factor = 4;
+		agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
 
 	bp->flags &= ~BNXT_FLAG_JUMBO;
 	if (rx_space > PAGE_SIZE) {
@@ -2457,6 +2589,7 @@
 			rxr->rx_prod = 0;
 			rxr->rx_agg_prod = 0;
 			rxr->rx_sw_agg_prod = 0;
+			rxr->rx_next_cons = 0;
 		}
 	}
 }
@@ -2638,7 +2771,7 @@
 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
 				 int timeout, bool silent)
 {
-	int i, intr_process, rc;
+	int i, intr_process, rc, tmo_count;
 	struct input *req = msg;
 	u32 *data = msg;
 	__le32 *resp_len, *valid;
@@ -2653,7 +2786,7 @@
 	/* Write request msg to hwrm channel */
 	__iowrite32_copy(bp->bar0, data, msg_len / 4);
 
-	for (i = msg_len; i < HWRM_MAX_REQ_LEN; i += 4)
+	for (i = msg_len; i < BNXT_HWRM_MAX_REQ_LEN; i += 4)
 		writel(0, bp->bar0 + i);
 
 	/* currently supports only one outstanding message */
@@ -2667,11 +2800,12 @@
 		timeout = DFLT_HWRM_CMD_TIMEOUT;
 
 	i = 0;
+	tmo_count = timeout * 40;
 	if (intr_process) {
 		/* Wait until hwrm response cmpl interrupt is processed */
 		while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
-		       i++ < timeout) {
-			usleep_range(600, 800);
+		       i++ < tmo_count) {
+			usleep_range(25, 40);
 		}
 
 		if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
@@ -2682,30 +2816,30 @@
 	} else {
 		/* Check if response len is updated */
 		resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
-		for (i = 0; i < timeout; i++) {
+		for (i = 0; i < tmo_count; i++) {
 			len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
 			      HWRM_RESP_LEN_SFT;
 			if (len)
 				break;
-			usleep_range(600, 800);
+			usleep_range(25, 40);
 		}
 
-		if (i >= timeout) {
+		if (i >= tmo_count) {
 			netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
 				   timeout, le16_to_cpu(req->req_type),
-				   le16_to_cpu(req->seq_id), *resp_len);
+				   le16_to_cpu(req->seq_id), len);
 			return -1;
 		}
 
 		/* Last word of resp contains valid bit */
 		valid = bp->hwrm_cmd_resp_addr + len - 4;
-		for (i = 0; i < timeout; i++) {
+		for (i = 0; i < 5; i++) {
 			if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
 				break;
-			usleep_range(600, 800);
+			udelay(1);
 		}
 
-		if (i >= timeout) {
+		if (i >= 5) {
 			netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
 				   timeout, le16_to_cpu(req->req_type),
 				   le16_to_cpu(req->seq_id), len, *valid);
@@ -2751,6 +2885,8 @@
 {
 	struct hwrm_func_drv_rgtr_input req = {0};
 	int i;
+	DECLARE_BITMAP(async_events_bmap, 256);
+	u32 *events = (u32 *)async_events_bmap;
 
 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
 
@@ -2759,11 +2895,14 @@
 			    FUNC_DRV_RGTR_REQ_ENABLES_VER |
 			    FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
 
-	/* TODO: current async event fwd bits are not defined and the firmware
-	 * only checks if it is non-zero to enable async event forwarding
-	 */
-	req.async_event_fwd[0] |= cpu_to_le32(1);
-	req.os_type = cpu_to_le16(1);
+	memset(async_events_bmap, 0, sizeof(async_events_bmap));
+	for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
+		__set_bit(bnxt_async_events_arr[i], async_events_bmap);
+
+	for (i = 0; i < 8; i++)
+		req.async_event_fwd[i] |= cpu_to_le32(events[i]);
+
+	req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
 	req.ver_maj = DRV_VER_MAJ;
 	req.ver_min = DRV_VER_MIN;
 	req.ver_upd = DRV_VER_UPD;
@@ -3020,12 +3159,12 @@
 		/* Number of segs are log2 units, and first packet is not
 		 * included as part of this units.
 		 */
-		if (mss <= PAGE_SIZE) {
-			n = PAGE_SIZE / mss;
+		if (mss <= BNXT_RX_PAGE_SIZE) {
+			n = BNXT_RX_PAGE_SIZE / mss;
 			nsegs = (MAX_SKB_FRAGS - 1) * n;
 		} else {
-			n = mss / PAGE_SIZE;
-			if (mss & (PAGE_SIZE - 1))
+			n = mss / BNXT_RX_PAGE_SIZE;
+			if (mss & (BNXT_RX_PAGE_SIZE - 1))
 				n++;
 			nsegs = (MAX_SKB_FRAGS - n) / n;
 		}
@@ -3391,11 +3530,11 @@
 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
 
+		cpr->cp_doorbell = bp->bar1 + i * 0x80;
 		rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
 					      INVALID_STATS_CTX_ID);
 		if (rc)
 			goto err_out;
-		cpr->cp_doorbell = bp->bar1 + i * 0x80;
 		BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
 		bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
 	}
@@ -3726,7 +3865,7 @@
 
 		pf->fw_fid = le16_to_cpu(resp->fid);
 		pf->port_id = le16_to_cpu(resp->port_id);
-		memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN);
+		memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
 		memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
 		pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
 		pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
@@ -3751,7 +3890,7 @@
 		struct bnxt_vf_info *vf = &bp->vf;
 
 		vf->fw_fid = le16_to_cpu(resp->fid);
-		memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN);
+		memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
 		if (is_valid_ether_addr(vf->mac_addr))
 			/* overwrite netdev dev_adr with admin VF MAC */
 			memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
@@ -3830,6 +3969,7 @@
 	struct hwrm_ver_get_input req = {0};
 	struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
 
+	bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
 	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
 	req.hwrm_intf_min = HWRM_VERSION_MINOR;
@@ -3841,6 +3981,8 @@
 
 	memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
 
+	bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
+			     resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
 	if (resp->hwrm_intf_maj < 1) {
 		netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
 			    resp->hwrm_intf_maj, resp->hwrm_intf_min,
@@ -3855,6 +3997,9 @@
 	if (!bp->hwrm_cmd_timeout)
 		bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
 
+	if (resp->hwrm_intf_maj >= 1)
+		bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
+
 hwrm_ver_get_exit:
 	mutex_unlock(&bp->hwrm_cmd_lock);
 	return rc;
@@ -4009,9 +4154,11 @@
 }
 
 static int bnxt_cfg_rx_mode(struct bnxt *);
+static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
 
 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
 {
+	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
 	int rc = 0;
 
 	if (irq_re_init) {
@@ -4067,13 +4214,22 @@
 		netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
 		goto err_out;
 	}
-	bp->vnic_info[0].uc_filter_count = 1;
+	vnic->uc_filter_count = 1;
 
-	bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
+	vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
 
 	if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
-		bp->vnic_info[0].rx_mask |=
-				CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+
+	if (bp->dev->flags & IFF_ALLMULTI) {
+		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+		vnic->mc_list_count = 0;
+	} else {
+		u32 mask = 0;
+
+		bnxt_mc_list_updated(bp, &mask);
+		vnic->rx_mask |= mask;
+	}
 
 	rc = bnxt_cfg_rx_mode(bp);
 	if (rc)
@@ -4305,7 +4461,7 @@
 	if (bp->flags & BNXT_FLAG_MSIX_CAP)
 		rc = bnxt_setup_msix(bp);
 
-	if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
+	if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
 		/* fallback to INTA */
 		rc = bnxt_setup_inta(bp);
 	}
@@ -4418,6 +4574,7 @@
 	int i;
 
 	for (i = 0; i < bp->cp_nr_rings; i++) {
+		bp->bnapi[i]->in_reset = false;
 		bnxt_enable_poll(bp->bnapi[i]);
 		napi_enable(&bp->bnapi[i]->napi);
 	}
@@ -4482,12 +4639,49 @@
 		speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
 		netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
 			    speed, duplex, flow_ctrl);
+		if (bp->flags & BNXT_FLAG_EEE_CAP)
+			netdev_info(bp->dev, "EEE is %s\n",
+				    bp->eee.eee_active ? "active" :
+							 "not active");
 	} else {
 		netif_carrier_off(bp->dev);
 		netdev_err(bp->dev, "NIC Link is Down\n");
 	}
 }
 
+static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
+{
+	int rc = 0;
+	struct hwrm_port_phy_qcaps_input req = {0};
+	struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+
+	if (bp->hwrm_spec_code < 0x10201)
+		return 0;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc)
+		goto hwrm_phy_qcaps_exit;
+
+	if (resp->eee_supported & PORT_PHY_QCAPS_RESP_EEE_SUPPORTED) {
+		struct ethtool_eee *eee = &bp->eee;
+		u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
+
+		bp->flags |= BNXT_FLAG_EEE_CAP;
+		eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+		bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
+				 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
+		bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
+				 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
+	}
+
+hwrm_phy_qcaps_exit:
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
 {
 	int rc = 0;
@@ -4519,7 +4713,6 @@
 	else
 		link_info->link_speed = 0;
 	link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
-	link_info->auto_link_speed = le16_to_cpu(resp->auto_link_speed);
 	link_info->support_speeds = le16_to_cpu(resp->support_speeds);
 	link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
 	link_info->lp_auto_link_speeds =
@@ -4529,9 +4722,47 @@
 	link_info->phy_ver[1] = resp->phy_min;
 	link_info->phy_ver[2] = resp->phy_bld;
 	link_info->media_type = resp->media_type;
-	link_info->transceiver = resp->transceiver_type;
-	link_info->phy_addr = resp->phy_addr;
+	link_info->phy_type = resp->phy_type;
+	link_info->transceiver = resp->xcvr_pkg_type;
+	link_info->phy_addr = resp->eee_config_phy_addr &
+			      PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
+	link_info->module_status = resp->module_status;
 
+	if (bp->flags & BNXT_FLAG_EEE_CAP) {
+		struct ethtool_eee *eee = &bp->eee;
+		u16 fw_speeds;
+
+		eee->eee_active = 0;
+		if (resp->eee_config_phy_addr &
+		    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
+			eee->eee_active = 1;
+			fw_speeds = le16_to_cpu(
+				resp->link_partner_adv_eee_link_speed_mask);
+			eee->lp_advertised =
+				_bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+		}
+
+		/* Pull initial EEE config */
+		if (!chng_link_state) {
+			if (resp->eee_config_phy_addr &
+			    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
+				eee->eee_enabled = 1;
+
+			fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
+			eee->advertised =
+				_bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+
+			if (resp->eee_config_phy_addr &
+			    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
+				__le32 tmr;
+
+				eee->tx_lpi_enabled = 1;
+				tmr = resp->xcvr_identifier_type_tx_lpi_timer;
+				eee->tx_lpi_timer = le32_to_cpu(tmr) &
+					PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
+			}
+		}
+	}
 	/* TODO: need to add more logic to report VF link */
 	if (chng_link_state) {
 		if (link_info->phy_link_status == BNXT_LINK_LINK)
@@ -4548,14 +4779,44 @@
 	return 0;
 }
 
+static void bnxt_get_port_module_status(struct bnxt *bp)
+{
+	struct bnxt_link_info *link_info = &bp->link_info;
+	struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
+	u8 module_status;
+
+	if (bnxt_update_link(bp, true))
+		return;
+
+	module_status = link_info->module_status;
+	switch (module_status) {
+	case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
+	case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
+	case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
+		netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
+			    bp->pf.port_id);
+		if (bp->hwrm_spec_code >= 0x10201) {
+			netdev_warn(bp->dev, "Module part number %s\n",
+				    resp->phy_vendor_partnumber);
+		}
+		if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
+			netdev_warn(bp->dev, "TX is disabled\n");
+		if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
+			netdev_warn(bp->dev, "SFP+ module is shutdown\n");
+	}
+}
+
 static void
 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
 {
 	if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
+		if (bp->hwrm_spec_code >= 0x10201)
+			req->auto_pause =
+				PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
 			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
-			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
+			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
 		req->enables |=
 			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
 	} else {
@@ -4565,6 +4826,11 @@
 			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
 		req->enables |=
 			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
+		if (bp->hwrm_spec_code >= 0x10201) {
+			req->auto_pause = req->force_pause;
+			req->enables |= cpu_to_le32(
+				PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
+		}
 	}
 }
 
@@ -4577,7 +4843,7 @@
 
 	if (autoneg & BNXT_AUTONEG_SPEED) {
 		req->auto_mode |=
-			PORT_PHY_CFG_REQ_AUTO_MODE_MASK;
+			PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
 
 		req->enables |= cpu_to_le32(
 			PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
@@ -4591,9 +4857,6 @@
 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
 	}
 
-	/* currently don't support half duplex */
-	req->auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL;
-	req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX);
 	/* tell chimp that the setting takes effect immediately */
 	req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
 }
@@ -4628,7 +4891,30 @@
 	return rc;
 }
 
-int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause)
+static void bnxt_hwrm_set_eee(struct bnxt *bp,
+			      struct hwrm_port_phy_cfg_input *req)
+{
+	struct ethtool_eee *eee = &bp->eee;
+
+	if (eee->eee_enabled) {
+		u16 eee_speeds;
+		u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
+
+		if (eee->tx_lpi_enabled)
+			flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
+		else
+			flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
+
+		req->flags |= cpu_to_le32(flags);
+		eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
+		req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
+		req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
+	} else {
+		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
+	}
+}
+
+int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
 {
 	struct hwrm_port_phy_cfg_input req = {0};
 
@@ -4637,14 +4923,57 @@
 		bnxt_hwrm_set_pause_common(bp, &req);
 
 	bnxt_hwrm_set_link_common(bp, &req);
+
+	if (set_eee)
+		bnxt_hwrm_set_eee(bp, &req);
 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 }
 
+static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
+{
+	struct hwrm_port_phy_cfg_input req = {0};
+
+	if (BNXT_VF(bp))
+		return 0;
+
+	if (pci_num_vf(bp->pdev))
+		return 0;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+	req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN);
+	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static bool bnxt_eee_config_ok(struct bnxt *bp)
+{
+	struct ethtool_eee *eee = &bp->eee;
+	struct bnxt_link_info *link_info = &bp->link_info;
+
+	if (!(bp->flags & BNXT_FLAG_EEE_CAP))
+		return true;
+
+	if (eee->eee_enabled) {
+		u32 advertising =
+			_bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+
+		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
+			eee->eee_enabled = 0;
+			return false;
+		}
+		if (eee->advertised & ~advertising) {
+			eee->advertised = advertising & eee->supported;
+			return false;
+		}
+	}
+	return true;
+}
+
 static int bnxt_update_phy_setting(struct bnxt *bp)
 {
 	int rc;
 	bool update_link = false;
 	bool update_pause = false;
+	bool update_eee = false;
 	struct bnxt_link_info *link_info = &bp->link_info;
 
 	rc = bnxt_update_link(bp, true);
@@ -4654,7 +4983,8 @@
 		return rc;
 	}
 	if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
-	    link_info->auto_pause_setting != link_info->req_flow_ctrl)
+	    (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
+	    link_info->req_flow_ctrl)
 		update_pause = true;
 	if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
 	    link_info->force_pause_setting != link_info->req_flow_ctrl)
@@ -4673,8 +5003,11 @@
 			update_link = true;
 	}
 
+	if (!bnxt_eee_config_ok(bp))
+		update_eee = true;
+
 	if (update_link)
-		rc = bnxt_hwrm_set_link_setting(bp, update_pause);
+		rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
 	else if (update_pause)
 		rc = bnxt_hwrm_set_pause(bp);
 	if (rc) {
@@ -4765,7 +5098,8 @@
 	/* Enable TX queues */
 	bnxt_tx_enable(bp);
 	mod_timer(&bp->timer, jiffies + bp->current_interval);
-	bnxt_update_link(bp, true);
+	/* Poll link status and check for SFP+ module status */
+	bnxt_get_port_module_status(bp);
 
 	return 0;
 
@@ -4865,6 +5199,7 @@
 	struct bnxt *bp = netdev_priv(dev);
 
 	bnxt_close_nic(bp, true, true);
+	bnxt_hwrm_shutdown_link(bp);
 	return 0;
 }
 
@@ -5346,6 +5681,9 @@
 		rtnl_unlock();
 	}
 
+	if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
+		bnxt_get_port_module_status(bp);
+
 	if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
 		bnxt_hwrm_port_qstats(bp);
 
@@ -5476,10 +5814,9 @@
 	if (!is_valid_ether_addr(addr->sa_data))
 		return -EADDRNOTAVAIL;
 
-#ifdef CONFIG_BNXT_SRIOV
-	if (BNXT_VF(bp) && is_valid_ether_addr(bp->vf.mac_addr))
-		return -EADDRNOTAVAIL;
-#endif
+	rc = bnxt_approve_mac(bp, addr->sa_data);
+	if (rc)
+		return rc;
 
 	if (ether_addr_equal(addr->sa_data, dev->dev_addr))
 		return 0;
@@ -5814,6 +6151,13 @@
 	int rc = 0;
 	struct bnxt_link_info *link_info = &bp->link_info;
 
+	rc = bnxt_hwrm_phy_qcaps(bp);
+	if (rc) {
+		netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
+			   rc);
+		return rc;
+	}
+
 	rc = bnxt_update_link(bp, false);
 	if (rc) {
 		netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
@@ -5823,15 +6167,24 @@
 
 	/*initialize the ethool setting copy with NVM settings */
 	if (BNXT_AUTO_MODE(link_info->auto_mode)) {
-		link_info->autoneg = BNXT_AUTONEG_SPEED |
-				     BNXT_AUTONEG_FLOW_CTRL;
+		link_info->autoneg = BNXT_AUTONEG_SPEED;
+		if (bp->hwrm_spec_code >= 0x10201) {
+			if (link_info->auto_pause_setting &
+			    PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
+				link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+		} else {
+			link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+		}
 		link_info->advertising = link_info->auto_link_speeds;
-		link_info->req_flow_ctrl = link_info->auto_pause_setting;
 	} else {
 		link_info->req_link_speed = link_info->force_link_speed;
 		link_info->req_duplex = link_info->duplex_setting;
-		link_info->req_flow_ctrl = link_info->force_pause_setting;
 	}
+	if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
+		link_info->req_flow_ctrl =
+			link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
+	else
+		link_info->req_flow_ctrl = link_info->force_pause_setting;
 	return rc;
 }
 
@@ -5906,6 +6259,22 @@
 	return rc;
 }
 
+static void bnxt_parse_log_pcie_link(struct bnxt *bp)
+{
+	enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
+	enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
+
+	if (pcie_get_minimum_link(bp->pdev, &speed, &width) ||
+	    speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
+		netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
+	else
+		netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n",
+			    speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
+			    speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
+			    speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
+			    "Unknown", width);
+}
+
 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	static int version_printed;
@@ -5943,14 +6312,19 @@
 			   NETIF_F_TSO | NETIF_F_TSO6 |
 			   NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
 			   NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT |
-			   NETIF_F_RXHASH |
+			   NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
+			   NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
 			   NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO;
 
 	dev->hw_enc_features =
 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
 			NETIF_F_TSO | NETIF_F_TSO6 |
 			NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
-			NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
+			NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
+			NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT |
+			NETIF_F_GSO_PARTIAL;
+	dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
+				    NETIF_F_GSO_GRE_CSUM;
 	dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
 	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
 			    NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
@@ -6021,6 +6395,8 @@
 		    board_info[ent->driver_data].name,
 		    (long)pci_resource_start(pdev, 0), dev->dev_addr);
 
+	bnxt_parse_log_pcie_link(bp);
+
 	return 0;
 
 init_err:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index ec04c47..2824d65 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1,6 +1,6 @@
 /* Broadcom NetXtreme-C/E network driver.
  *
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -11,7 +11,7 @@
 #define BNXT_H
 
 #define DRV_MODULE_NAME		"bnxt_en"
-#define DRV_MODULE_VERSION	"1.0.0"
+#define DRV_MODULE_VERSION	"1.2.0"
 
 #define DRV_VER_MAJ	1
 #define DRV_VER_MIN	0
@@ -407,6 +407,15 @@
 
 #define BNXT_PAGE_SIZE	(1 << BNXT_PAGE_SHIFT)
 
+/* The RXBD length is 16-bit so we can only support page sizes < 64K */
+#if (PAGE_SHIFT > 15)
+#define BNXT_RX_PAGE_SHIFT 15
+#else
+#define BNXT_RX_PAGE_SHIFT PAGE_SHIFT
+#endif
+
+#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
+
 #define BNXT_MIN_PKT_SIZE	45
 
 #define BNXT_NUM_TESTS(bp)	0
@@ -416,10 +425,17 @@
 
 #define MAX_TPA		64
 
+#if (BNXT_PAGE_SHIFT == 16)
+#define MAX_RX_PAGES	1
+#define MAX_RX_AGG_PAGES	4
+#define MAX_TX_PAGES	1
+#define MAX_CP_PAGES	8
+#else
 #define MAX_RX_PAGES	8
 #define MAX_RX_AGG_PAGES	32
 #define MAX_TX_PAGES	8
 #define MAX_CP_PAGES	64
+#endif
 
 #define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd))
 #define TX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_bd))
@@ -477,6 +493,7 @@
 #define RING_CMP(idx)		((idx) & bp->cp_ring_mask)
 #define NEXT_CMP(idx)		RING_CMP(ADV_RAW_CMP(idx, 1))
 
+#define BNXT_HWRM_MAX_REQ_LEN		(bp->hwrm_max_req_len)
 #define DFLT_HWRM_CMD_TIMEOUT		500
 #define HWRM_CMD_TIMEOUT		(bp->hwrm_cmd_timeout)
 #define HWRM_RESET_TIMEOUT		((HWRM_CMD_TIMEOUT) * 4)
@@ -505,6 +522,7 @@
 
 struct bnxt_sw_rx_agg_bd {
 	struct page		*page;
+	unsigned int		offset;
 	dma_addr_t		mapping;
 };
 
@@ -573,6 +591,7 @@
 	u16			rx_prod;
 	u16			rx_agg_prod;
 	u16			rx_sw_agg_prod;
+	u16			rx_next_cons;
 	void __iomem		*rx_doorbell;
 	void __iomem		*rx_agg_doorbell;
 
@@ -585,6 +604,9 @@
 	unsigned long		*rx_agg_bmap;
 	u16			rx_agg_bmap_size;
 
+	struct page		*rx_page;
+	unsigned int		rx_page_offset;
+
 	dma_addr_t		rx_desc_mapping[MAX_RX_PAGES];
 	dma_addr_t		rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
 
@@ -622,6 +644,7 @@
 #ifdef CONFIG_NET_RX_BUSY_POLL
 	atomic_t		poll_state;
 #endif
+	bool			in_reset;
 };
 
 #ifdef CONFIG_NET_RX_BUSY_POLL
@@ -758,6 +781,7 @@
 };
 
 struct bnxt_link_info {
+	u8			phy_type;
 	u8			media_type;
 	u8			transceiver;
 	u8			phy_addr;
@@ -787,7 +811,7 @@
 #define BNXT_LINK_AUTO_ALLSPDS	PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS
 #define BNXT_LINK_AUTO_ONESPD	PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED
 #define BNXT_LINK_AUTO_ONEORBELOW PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW
-#define BNXT_LINK_AUTO_MSK	PORT_PHY_QCFG_RESP_AUTO_MODE_MASK
+#define BNXT_LINK_AUTO_MSK	PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK
 #define PHY_VER_LEN		3
 	u8			phy_ver[PHY_VER_LEN];
 	u16			link_speed;
@@ -812,9 +836,9 @@
 #define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
 #define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
 	u16			lp_auto_link_speeds;
-	u16			auto_link_speed;
 	u16			force_link_speed;
 	u32			preemphasis;
+	u8			module_status;
 
 	/* copy of requested setting from ethtool cmd */
 	u8			autoneg;
@@ -825,6 +849,7 @@
 	u16			req_link_speed;
 	u32			advertising;
 	bool			force_link_chng;
+
 	/* a copy of phy_qcfg output used to report link
 	 * info to VF
 	 */
@@ -874,6 +899,7 @@
 	#define BNXT_FLAG_RFS		0x100
 	#define BNXT_FLAG_SHARED_RINGS	0x200
 	#define BNXT_FLAG_PORT_STATS	0x400
+	#define BNXT_FLAG_EEE_CAP	0x1000
 
 	#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA |		\
 					    BNXT_FLAG_RFS |		\
@@ -939,6 +965,7 @@
 
 	u32			msg_enable;
 
+	u32			hwrm_spec_code;
 	u16			hwrm_cmd_seq;
 	u32			hwrm_intr_seq_id;
 	void			*hwrm_cmd_resp_addr;
@@ -953,6 +980,7 @@
 	dma_addr_t		hw_tx_port_stats_map;
 	int			hw_port_stats_size;
 
+	u16			hwrm_max_req_len;
 	int			hwrm_cmd_timeout;
 	struct mutex		hwrm_cmd_lock;	/* serialize hwrm messages */
 	struct hwrm_ver_get_output	ver_resp;
@@ -989,6 +1017,7 @@
 #define BNXT_RST_RING_SP_EVENT		7
 #define BNXT_HWRM_PF_UNLOAD_SP_EVENT	8
 #define BNXT_PERIODIC_STATS_SP_EVENT	9
+#define BNXT_HWRM_PORT_MODULE_SP_EVENT	10
 
 	struct bnxt_pf_info	pf;
 #ifdef CONFIG_BNXT_SRIOV
@@ -1009,6 +1038,9 @@
 	int			ntp_fltr_count;
 
 	struct bnxt_link_info	link_info;
+	struct ethtool_eee	eee;
+	u32			lpi_tmr_lo;
+	u32			lpi_tmr_hi;
 };
 
 #ifdef CONFIG_NET_RX_BUSY_POLL
@@ -1098,6 +1130,16 @@
 
 #endif
 
+#define I2C_DEV_ADDR_A0				0xa0
+#define I2C_DEV_ADDR_A2				0xa2
+#define SFP_EEPROM_SFF_8472_COMP_ADDR		0x5e
+#define SFP_EEPROM_SFF_8472_COMP_SIZE		1
+#define SFF_MODULE_ID_SFP			0x3
+#define SFF_MODULE_ID_QSFP			0xc
+#define SFF_MODULE_ID_QSFP_PLUS			0xd
+#define SFF_MODULE_ID_QSFP28			0x11
+#define BNXT_MAX_PHY_I2C_RESP_SIZE		64
+
 void bnxt_set_ring_params(struct bnxt *);
 void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
 int _hwrm_send_message(struct bnxt *, void *, u32, int);
@@ -1106,7 +1148,7 @@
 int bnxt_hwrm_set_coal(struct bnxt *);
 int bnxt_hwrm_func_qcaps(struct bnxt *);
 int bnxt_hwrm_set_pause(struct bnxt *);
-int bnxt_hwrm_set_link_setting(struct bnxt *, bool);
+int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
 int bnxt_open_nic(struct bnxt *, bool, bool);
 int bnxt_close_nic(struct bnxt *, bool, bool);
 int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 9ada166..a38cb04 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1,6 +1,6 @@
 /* Broadcom NetXtreme-C/E network driver.
  *
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -327,7 +327,11 @@
 	bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
 	channel->max_combined = max_rx_rings;
 
-	bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false);
+	if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
+		max_rx_rings = 0;
+		max_tx_rings = 0;
+	}
+
 	tcs = netdev_get_num_tc(dev);
 	if (tcs > 1)
 		max_tx_rings /= tcs;
@@ -597,7 +601,7 @@
 	kfree(pkglog);
 }
 
-static u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
+u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
 {
 	u32 speed_mask = 0;
 
@@ -698,10 +702,23 @@
 		if (link_info->phy_link_status == BNXT_LINK_LINK)
 			cmd->lp_advertising =
 				bnxt_fw_to_ethtool_lp_adv(link_info);
+		ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
+		if (!netif_carrier_ok(dev))
+			cmd->duplex = DUPLEX_UNKNOWN;
+		else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
+			cmd->duplex = DUPLEX_FULL;
+		else
+			cmd->duplex = DUPLEX_HALF;
 	} else {
 		cmd->autoneg = AUTONEG_DISABLE;
 		cmd->advertising = 0;
+		ethtool_speed =
+			bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
+		cmd->duplex = DUPLEX_HALF;
+		if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
+			cmd->duplex = DUPLEX_FULL;
 	}
+	ethtool_cmd_speed_set(cmd, ethtool_speed);
 
 	cmd->port = PORT_NONE;
 	if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
@@ -719,16 +736,8 @@
 			cmd->port = PORT_FIBRE;
 	}
 
-	if (link_info->phy_link_status == BNXT_LINK_LINK) {
-		if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
-			cmd->duplex = DUPLEX_FULL;
-	} else {
-		cmd->duplex = DUPLEX_UNKNOWN;
-	}
-	ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
-	ethtool_cmd_speed_set(cmd, ethtool_speed);
 	if (link_info->transceiver ==
-		PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL)
+	    PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL)
 		cmd->transceiver = XCVR_INTERNAL;
 	else
 		cmd->transceiver = XCVR_EXTERNAL;
@@ -739,31 +748,52 @@
 
 static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed)
 {
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_link_info *link_info = &bp->link_info;
+	u16 support_spds = link_info->support_speeds;
+	u32 fw_speed = 0;
+
 	switch (ethtool_speed) {
 	case SPEED_100:
-		return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
+		if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
+			fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
+		break;
 	case SPEED_1000:
-		return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
+		if (support_spds & BNXT_LINK_SPEED_MSK_1GB)
+			fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
+		break;
 	case SPEED_2500:
-		return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
+		if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
+			fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
+		break;
 	case SPEED_10000:
-		return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
+		if (support_spds & BNXT_LINK_SPEED_MSK_10GB)
+			fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
+		break;
 	case SPEED_20000:
-		return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
+		if (support_spds & BNXT_LINK_SPEED_MSK_20GB)
+			fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
+		break;
 	case SPEED_25000:
-		return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
+		if (support_spds & BNXT_LINK_SPEED_MSK_25GB)
+			fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
+		break;
 	case SPEED_40000:
-		return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
+		if (support_spds & BNXT_LINK_SPEED_MSK_40GB)
+			fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
+		break;
 	case SPEED_50000:
-		return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
+		if (support_spds & BNXT_LINK_SPEED_MSK_50GB)
+			fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
+		break;
 	default:
 		netdev_err(dev, "unsupported speed!\n");
 		break;
 	}
-	return 0;
+	return fw_speed;
 }
 
-static u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
+u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
 {
 	u16 fw_speed_mask = 0;
 
@@ -823,6 +853,16 @@
 		 */
 		set_pause = true;
 	} else {
+		u16 fw_speed;
+		u8 phy_type = link_info->phy_type;
+
+		if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET  ||
+		    phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE ||
+		    link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+			netdev_err(dev, "10GBase-T devices must autoneg\n");
+			rc = -EINVAL;
+			goto set_setting_exit;
+		}
 		/* TODO: currently don't support half duplex */
 		if (cmd->duplex == DUPLEX_HALF) {
 			netdev_err(dev, "HALF DUPLEX is not supported!\n");
@@ -833,14 +873,19 @@
 		if (cmd->duplex == DUPLEX_UNKNOWN)
 			cmd->duplex = DUPLEX_FULL;
 		speed = ethtool_cmd_speed(cmd);
-		link_info->req_link_speed = bnxt_get_fw_speed(dev, speed);
+		fw_speed = bnxt_get_fw_speed(dev, speed);
+		if (!fw_speed) {
+			rc = -EINVAL;
+			goto set_setting_exit;
+		}
+		link_info->req_link_speed = fw_speed;
 		link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
 		link_info->autoneg = 0;
 		link_info->advertising = 0;
 	}
 
 	if (netif_running(dev))
-		rc = bnxt_hwrm_set_link_setting(bp, set_pause);
+		rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
 
 set_setting_exit:
 	return rc;
@@ -855,10 +900,8 @@
 	if (BNXT_VF(bp))
 		return;
 	epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
-	epause->rx_pause =
-		((link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX) != 0);
-	epause->tx_pause =
-		((link_info->auto_pause_setting & BNXT_LINK_PAUSE_TX) != 0);
+	epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
+	epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
 }
 
 static int bnxt_set_pauseparam(struct net_device *dev,
@@ -876,7 +919,9 @@
 			return -EINVAL;
 
 		link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
-		link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH;
+		if (bp->hwrm_spec_code >= 0x10201)
+			link_info->req_flow_ctrl =
+				PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
 	} else {
 		/* when transition from auto pause to force pause,
 		 * force a link change
@@ -884,17 +929,13 @@
 		if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
 			link_info->force_link_chng = true;
 		link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
-		link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_BOTH;
+		link_info->req_flow_ctrl = 0;
 	}
 	if (epause->rx_pause)
 		link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
-	else
-		link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_RX;
 
 	if (epause->tx_pause)
 		link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
-	else
-		link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_TX;
 
 	if (netif_running(dev))
 		rc = bnxt_hwrm_set_pause(bp);
@@ -1383,6 +1424,199 @@
 				eeprom->len);
 }
 
+static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	struct ethtool_eee *eee = &bp->eee;
+	struct bnxt_link_info *link_info = &bp->link_info;
+	u32 advertising =
+		 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+	int rc = 0;
+
+	if (BNXT_VF(bp))
+		return 0;
+
+	if (!(bp->flags & BNXT_FLAG_EEE_CAP))
+		return -EOPNOTSUPP;
+
+	if (!edata->eee_enabled)
+		goto eee_ok;
+
+	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
+		netdev_warn(dev, "EEE requires autoneg\n");
+		return -EINVAL;
+	}
+	if (edata->tx_lpi_enabled) {
+		if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
+				       edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
+			netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
+				    bp->lpi_tmr_lo, bp->lpi_tmr_hi);
+			return -EINVAL;
+		} else if (!bp->lpi_tmr_hi) {
+			edata->tx_lpi_timer = eee->tx_lpi_timer;
+		}
+	}
+	if (!edata->advertised) {
+		edata->advertised = advertising & eee->supported;
+	} else if (edata->advertised & ~advertising) {
+		netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
+			    edata->advertised, advertising);
+		return -EINVAL;
+	}
+
+	eee->advertised = edata->advertised;
+	eee->tx_lpi_enabled = edata->tx_lpi_enabled;
+	eee->tx_lpi_timer = edata->tx_lpi_timer;
+eee_ok:
+	eee->eee_enabled = edata->eee_enabled;
+
+	if (netif_running(dev))
+		rc = bnxt_hwrm_set_link_setting(bp, false, true);
+
+	return rc;
+}
+
+static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	if (!(bp->flags & BNXT_FLAG_EEE_CAP))
+		return -EOPNOTSUPP;
+
+	*edata = bp->eee;
+	if (!bp->eee.eee_enabled) {
+		/* Preserve tx_lpi_timer so that the last value will be used
+		 * by default when it is re-enabled.
+		 */
+		edata->advertised = 0;
+		edata->tx_lpi_enabled = 0;
+	}
+
+	if (!bp->eee.eee_active)
+		edata->lp_advertised = 0;
+
+	return 0;
+}
+
+static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
+					    u16 page_number, u16 start_addr,
+					    u16 data_length, u8 *buf)
+{
+	struct hwrm_port_phy_i2c_read_input req = {0};
+	struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
+	int rc, byte_offset = 0;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
+	req.i2c_slave_addr = i2c_addr;
+	req.page_number = cpu_to_le16(page_number);
+	req.port_id = cpu_to_le16(bp->pf.port_id);
+	do {
+		u16 xfer_size;
+
+		xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
+		data_length -= xfer_size;
+		req.page_offset = cpu_to_le16(start_addr + byte_offset);
+		req.data_length = xfer_size;
+		req.enables = cpu_to_le32(start_addr + byte_offset ?
+				 PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0);
+		mutex_lock(&bp->hwrm_cmd_lock);
+		rc = _hwrm_send_message(bp, &req, sizeof(req),
+					HWRM_CMD_TIMEOUT);
+		if (!rc)
+			memcpy(buf + byte_offset, output->data, xfer_size);
+		mutex_unlock(&bp->hwrm_cmd_lock);
+		byte_offset += xfer_size;
+	} while (!rc && data_length > 0);
+
+	return rc;
+}
+
+static int bnxt_get_module_info(struct net_device *dev,
+				struct ethtool_modinfo *modinfo)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	struct hwrm_port_phy_i2c_read_input req = {0};
+	struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
+	int rc;
+
+	/* No point in going further if phy status indicates
+	 * module is not inserted or if it is powered down or
+	 * if it is of type 10GBase-T
+	 */
+	if (bp->link_info.module_status >
+		PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
+		return -EOPNOTSUPP;
+
+	/* This feature is not supported in older firmware versions */
+	if (bp->hwrm_spec_code < 0x10202)
+		return -EOPNOTSUPP;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
+	req.i2c_slave_addr = I2C_DEV_ADDR_A0;
+	req.page_number = 0;
+	req.page_offset = cpu_to_le16(SFP_EEPROM_SFF_8472_COMP_ADDR);
+	req.data_length = SFP_EEPROM_SFF_8472_COMP_SIZE;
+	req.port_id = cpu_to_le16(bp->pf.port_id);
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (!rc) {
+		u32 module_id = le32_to_cpu(output->data[0]);
+
+		switch (module_id) {
+		case SFF_MODULE_ID_SFP:
+			modinfo->type = ETH_MODULE_SFF_8472;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+			break;
+		case SFF_MODULE_ID_QSFP:
+		case SFF_MODULE_ID_QSFP_PLUS:
+			modinfo->type = ETH_MODULE_SFF_8436;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+			break;
+		case SFF_MODULE_ID_QSFP28:
+			modinfo->type = ETH_MODULE_SFF_8636;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+			break;
+		default:
+			rc = -EOPNOTSUPP;
+			break;
+		}
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_get_module_eeprom(struct net_device *dev,
+				  struct ethtool_eeprom *eeprom,
+				  u8 *data)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	u16  start = eeprom->offset, length = eeprom->len;
+	int rc;
+
+	memset(data, 0, eeprom->len);
+
+	/* Read A0 portion of the EEPROM */
+	if (start < ETH_MODULE_SFF_8436_LEN) {
+		if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
+			length = ETH_MODULE_SFF_8436_LEN - start;
+		rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0,
+						      start, length, data);
+		if (rc)
+			return rc;
+		start += length;
+		data += length;
+		length = eeprom->len - length;
+	}
+
+	/* Read A2 portion of the EEPROM */
+	if (length) {
+		start -= ETH_MODULE_SFF_8436_LEN;
+		bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start,
+						 length, data);
+	}
+	return rc;
+}
+
 const struct ethtool_ops bnxt_ethtool_ops = {
 	.get_settings		= bnxt_get_settings,
 	.set_settings		= bnxt_set_settings,
@@ -1411,4 +1645,8 @@
 	.get_eeprom             = bnxt_get_eeprom,
 	.set_eeprom		= bnxt_set_eeprom,
 	.get_link		= bnxt_get_link,
+	.get_eee		= bnxt_get_eee,
+	.set_eee		= bnxt_set_eee,
+	.get_module_info	= bnxt_get_module_info,
+	.get_module_eeprom	= bnxt_get_module_eeprom,
 };
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index 98fa81e..3abc03b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -1,6 +1,6 @@
 /* Broadcom NetXtreme-C/E network driver.
  *
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -12,6 +12,8 @@
 
 extern const struct ethtool_ops bnxt_ethtool_ops;
 
+u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
 u32 bnxt_fw_to_ethtool_speed(u16);
+u16 bnxt_get_fw_auto_link_speeds(u32);
 
 #endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
index e0aac65..461675c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
@@ -1,6 +1,6 @@
 /* Broadcom NetXtreme-C/E network driver.
  *
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 4badbed..05e3c49 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -1,6 +1,6 @@
 /* Broadcom NetXtreme-C/E network driver.
  *
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -104,6 +104,7 @@
 	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE  (0x3UL << 0)
 	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
 	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0)
 	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD   (0x10UL << 0)
 	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD     (0x11UL << 0)
 	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD     (0x20UL << 0)
@@ -111,6 +112,7 @@
 	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR		   (0x30UL << 0)
 	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
 	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE      (0x33UL << 0)
 	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR	   (0xffUL << 0)
 	__le32 event_data2;
 	u8 opaque_v;
@@ -141,6 +143,7 @@
 	#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
 	#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN (0x0UL << 0)
 	#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP (0x1UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST    HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
 	#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
 	#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
 	#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
@@ -195,6 +198,9 @@
 	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1)
 	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
 	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1)
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10MB (0xffffUL << 1)
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST    HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10MB
 	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
 	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
 };
@@ -237,6 +243,55 @@
 	__le32 event_data1;
 	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
 	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
+	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
+	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
+	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
+	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
+	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
+	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST    HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
+};
+
+/* HWRM Asynchronous Event Completion Record for link speed config not allowed (16 bytes) */
+struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V 0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1
+	u8 timestamp_lo;
+	__le16 timestamp_hi;
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for link speed configuration change (16 bytes) */
+struct hwrm_async_event_cmpl_link_speed_cfg_change {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V      0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
+	u8 timestamp_lo;
+	__le16 timestamp_hi;
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
 };
 
 /* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */
@@ -363,6 +418,47 @@
 	#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0
 };
 
+/* HWRM Asynchronous Event Completion Record for PF-VF communication status change (16 bytes) */
+struct hwrm_async_event_cmpl_pf_vf_comm_status_change {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0
+	#define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V   0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1
+	u8 timestamp_lo;
+	__le16 timestamp_hi;
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED 0x1UL
+};
+
+/* HWRM Asynchronous Event Completion Record for VF configuration change (16 bytes) */
+struct hwrm_async_event_cmpl_vf_cfg_change {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK      0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT       0
+	#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE (0x33UL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V		    0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK    0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT     1
+	u8 timestamp_lo;
+	__le16 timestamp_hi;
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
+	#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
+	#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
+};
+
 /* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */
 struct hwrm_async_event_cmpl_hwrm_error {
 	__le16 type;
@@ -377,6 +473,7 @@
 	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING (0x0UL << 0)
 	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL (0x1UL << 0)
 	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL (0x2UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST    HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
 	u8 opaque_v;
 	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V		    0x1UL
 	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK       0xfeUL
@@ -387,12 +484,12 @@
 	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
 };
 
-/* HW Resource Manager Specification 1.0.0 */
+/* HW Resource Manager Specification 1.2.2 */
 #define HWRM_VERSION_MAJOR	1
-#define HWRM_VERSION_MINOR	0
-#define HWRM_VERSION_UPDATE	0
+#define HWRM_VERSION_MINOR	2
+#define HWRM_VERSION_UPDATE	2
 
-#define HWRM_VERSION_STR	"1.0.0"
+#define HWRM_VERSION_STR	"1.2.2"
 /*
  * Following is the signature for HWRM message field that indicates not
  * applicable (All F's). Need to cast it the size of the field if needed.
@@ -444,7 +541,7 @@
 	#define HWRM_FUNC_BUF_RGTR				   (0x1fUL)
 	#define HWRM_PORT_PHY_CFG				   (0x20UL)
 	#define HWRM_PORT_MAC_CFG				   (0x21UL)
-	#define RESERVED2					   (0x22UL)
+	#define HWRM_PORT_TS_QUERY				   (0x22UL)
 	#define HWRM_PORT_QSTATS				   (0x23UL)
 	#define HWRM_PORT_LPBK_QSTATS				   (0x24UL)
 	#define HWRM_PORT_CLR_STATS				   (0x25UL)
@@ -452,6 +549,9 @@
 	#define HWRM_PORT_PHY_QCFG				   (0x27UL)
 	#define HWRM_PORT_MAC_QCFG				   (0x28UL)
 	#define HWRM_PORT_BLINK_LED				   (0x29UL)
+	#define HWRM_PORT_PHY_QCAPS				   (0x2aUL)
+	#define HWRM_PORT_PHY_I2C_WRITE			   (0x2bUL)
+	#define HWRM_PORT_PHY_I2C_READ				   (0x2cUL)
 	#define HWRM_QUEUE_QPORTCFG				   (0x30UL)
 	#define HWRM_QUEUE_QCFG				   (0x31UL)
 	#define HWRM_QUEUE_CFG					   (0x32UL)
@@ -531,6 +631,7 @@
 	__le16 unused_0[3];
 };
 
+/* Return Codes (8 bytes) */
 struct ret_codes {
 	__le16 error_code;
 	#define HWRM_ERR_CODE_SUCCESS				   (0x0UL)
@@ -875,10 +976,11 @@
 	#define FUNC_VF_CFG_REQ_ENABLES_MTU			    0x1UL
 	#define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN		    0x2UL
 	#define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR		    0x4UL
+	#define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR		    0x8UL
 	__le16 mtu;
 	__le16 guest_vlan;
 	__le16 async_event_cr;
-	__le16 unused_0[3];
+	u8 dflt_mac_addr[6];
 };
 
 /* Output (16 bytes) */
@@ -917,7 +1019,8 @@
 	__le32 flags;
 	#define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED	    0x1UL
 	#define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING      0x2UL
-	u8 perm_mac_address[6];
+	#define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED		    0x4UL
+	u8 mac_address[6];
 	__le16 max_rsscos_ctx;
 	__le16 max_cmpl_rings;
 	__le16 max_tx_rings;
@@ -942,6 +1045,67 @@
 	u8 valid;
 };
 
+/* hwrm_func_qcfg */
+/* Input (24 bytes) */
+struct hwrm_func_qcfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 fid;
+	__le16 unused_0[3];
+};
+
+/* Output (72 bytes) */
+struct hwrm_func_qcfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 fid;
+	__le16 port_id;
+	__le16 vlan;
+	u8 unused_0;
+	u8 unused_1;
+	u8 mac_address[6];
+	__le16 pci_id;
+	__le16 alloc_rsscos_ctx;
+	__le16 alloc_cmpl_rings;
+	__le16 alloc_tx_rings;
+	__le16 alloc_rx_rings;
+	__le16 alloc_l2_ctx;
+	__le16 alloc_vnics;
+	__le16 mtu;
+	__le16 mru;
+	__le16 stat_ctx_id;
+	u8 port_partition_type;
+	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF		   (0x0UL << 0)
+	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS	   (0x1UL << 0)
+	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0	   (0x2UL << 0)
+	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5	   (0x3UL << 0)
+	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0	   (0x4UL << 0)
+	#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN	   (0xffUL << 0)
+	u8 unused_2;
+	__le16 dflt_vnic_id;
+	u8 unused_3;
+	u8 unused_4;
+	__le32 min_bw;
+	__le32 max_bw;
+	u8 evb_mode;
+	#define FUNC_QCFG_RESP_EVB_MODE_NO_EVB			   (0x0UL << 0)
+	#define FUNC_QCFG_RESP_EVB_MODE_VEB			   (0x1UL << 0)
+	#define FUNC_QCFG_RESP_EVB_MODE_VEPA			   (0x2UL << 0)
+	u8 unused_5;
+	__le16 unused_6;
+	__le32 alloc_mcast_filters;
+	__le32 alloc_hw_ring_grps;
+	u8 unused_7;
+	u8 unused_8;
+	u8 unused_9;
+	u8 valid;
+};
+
 /* hwrm_func_cfg */
 /* Input (88 bytes) */
 struct hwrm_func_cfg_input {
@@ -1171,6 +1335,7 @@
 	#define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN		   (0x0UL << 0)
 	#define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER		   (0x1UL << 0)
 	#define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS		   (0xeUL << 0)
+	#define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS		   (0x12UL << 0)
 	#define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS		   (0x1dUL << 0)
 	#define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX		   (0x24UL << 0)
 	#define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD		   (0x2aUL << 0)
@@ -1302,6 +1467,7 @@
 	#define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN		   (0x0UL << 0)
 	#define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER		   (0x1UL << 0)
 	#define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS		   (0xeUL << 0)
+	#define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS		   (0x12UL << 0)
 	#define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS		   (0x1dUL << 0)
 	#define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX		   (0x24UL << 0)
 	#define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD		   (0x2aUL << 0)
@@ -1317,7 +1483,7 @@
 };
 
 /* hwrm_port_phy_cfg */
-/* Input (48 bytes) */
+/* Input (56 bytes) */
 struct hwrm_port_phy_cfg_input {
 	__le16 req_type;
 	__le16 cmpl_ring;
@@ -1329,6 +1495,10 @@
 	#define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN		    0x2UL
 	#define PORT_PHY_CFG_REQ_FLAGS_FORCE			    0x4UL
 	#define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG		    0x8UL
+	#define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE		    0x10UL
+	#define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE		    0x20UL
+	#define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE	    0x40UL
+	#define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE	    0x80UL
 	__le32 enables;
 	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE		    0x1UL
 	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX		    0x2UL
@@ -1339,6 +1509,8 @@
 	#define PORT_PHY_CFG_REQ_ENABLES_LPBK			    0x40UL
 	#define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS		    0x80UL
 	#define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE		    0x100UL
+	#define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK       0x200UL
+	#define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER		    0x400UL
 	__le16 port_id;
 	__le16 force_link_speed;
 	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB	   (0x1UL << 0)
@@ -1350,12 +1522,14 @@
 	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB		   (0xfaUL << 0)
 	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB		   (0x190UL << 0)
 	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB		   (0x1f4UL << 0)
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB	   (0x3e8UL << 0)
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB		   (0xffffUL << 0)
 	u8 auto_mode;
 	#define PORT_PHY_CFG_REQ_AUTO_MODE_NONE		   (0x0UL << 0)
 	#define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS		   (0x1UL << 0)
 	#define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED		   (0x2UL << 0)
 	#define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW	   (0x3UL << 0)
-	#define PORT_PHY_CFG_REQ_AUTO_MODE_MASK		   (0x4UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK		   (0x4UL << 0)
 	u8 auto_duplex;
 	#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF		   (0x0UL << 0)
 	#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL		   (0x1UL << 0)
@@ -1363,6 +1537,7 @@
 	u8 auto_pause;
 	#define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX			    0x1UL
 	#define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX			    0x2UL
+	#define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE	    0x4UL
 	u8 unused_0;
 	__le16 auto_link_speed;
 	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB		   (0x1UL << 0)
@@ -1374,6 +1549,8 @@
 	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB		   (0xfaUL << 0)
 	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB		   (0x190UL << 0)
 	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB		   (0x1f4UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB		   (0x3e8UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB		   (0xffffUL << 0)
 	__le16 auto_link_speed_mask;
 	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD      0x1UL
 	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB	    0x2UL
@@ -1386,6 +1563,9 @@
 	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB	    0x100UL
 	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB	    0x200UL
 	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB	    0x400UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB	    0x800UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD       0x1000UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB	    0x2000UL
 	u8 wirespeed;
 	#define PORT_PHY_CFG_REQ_WIRESPEED_OFF			   (0x0UL << 0)
 	#define PORT_PHY_CFG_REQ_WIRESPEED_ON			   (0x1UL << 0)
@@ -1398,7 +1578,20 @@
 	#define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX		    0x2UL
 	u8 unused_1;
 	__le32 preemphasis;
-	__le32 unused_2;
+	__le16 eee_link_speed_mask;
+	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1	    0x1UL
+	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB	    0x2UL
+	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2	    0x4UL
+	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB	    0x8UL
+	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3	    0x10UL
+	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4	    0x20UL
+	#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB	    0x40UL
+	u8 unused_2;
+	u8 unused_3;
+	__le32 tx_lpi_timer;
+	__le32 unused_4;
+	#define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK		    0xffffffUL
+	#define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT		    0
 };
 
 /* Output (16 bytes) */
@@ -1426,7 +1619,7 @@
 	__le16 unused_0[3];
 };
 
-/* Output (48 bytes) */
+/* Output (96 bytes) */
 struct hwrm_port_phy_qcfg_output {
 	__le16 error_code;
 	__le16 req_type;
@@ -1447,6 +1640,8 @@
 	#define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB		   (0xfaUL << 0)
 	#define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB		   (0x190UL << 0)
 	#define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB		   (0x1f4UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB		   (0x3e8UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB		   (0xffffUL << 0)
 	u8 duplex;
 	#define PORT_PHY_QCFG_RESP_DUPLEX_HALF			   (0x0UL << 0)
 	#define PORT_PHY_QCFG_RESP_DUPLEX_FULL			   (0x1UL << 0)
@@ -1465,6 +1660,9 @@
 	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB		    0x100UL
 	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB		    0x200UL
 	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB		    0x400UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB	    0x800UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD	    0x1000UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB		    0x2000UL
 	__le16 force_link_speed;
 	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB	   (0x1UL << 0)
 	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB	   (0xaUL << 0)
@@ -1475,15 +1673,18 @@
 	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB	   (0xfaUL << 0)
 	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB	   (0x190UL << 0)
 	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB	   (0x1f4UL << 0)
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB	   (0x3e8UL << 0)
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB	   (0xffffUL << 0)
 	u8 auto_mode;
 	#define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE		   (0x0UL << 0)
 	#define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS	   (0x1UL << 0)
 	#define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED		   (0x2UL << 0)
 	#define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW	   (0x3UL << 0)
-	#define PORT_PHY_QCFG_RESP_AUTO_MODE_MASK		   (0x4UL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK	   (0x4UL << 0)
 	u8 auto_pause;
 	#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX		    0x1UL
 	#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX		    0x2UL
+	#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE	    0x4UL
 	__le16 auto_link_speed;
 	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB	   (0x1UL << 0)
 	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB		   (0xaUL << 0)
@@ -1494,6 +1695,8 @@
 	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB	   (0xfaUL << 0)
 	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB	   (0x190UL << 0)
 	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB	   (0x1f4UL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB	   (0x3e8UL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB	   (0xffffUL << 0)
 	__le16 auto_link_speed_mask;
 	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD    0x1UL
 	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB      0x2UL
@@ -1506,6 +1709,9 @@
 	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB       0x100UL
 	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB       0x200UL
 	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB       0x400UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB      0x800UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD     0x1000UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB       0x2000UL
 	u8 wirespeed;
 	#define PORT_PHY_QCFG_RESP_WIRESPEED_OFF		   (0x0UL << 0)
 	#define PORT_PHY_QCFG_RESP_WIRESPEED_ON		   (0x1UL << 0)
@@ -1516,31 +1722,49 @@
 	u8 force_pause;
 	#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX		    0x1UL
 	#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX		    0x2UL
-	u8 reserved1;
+	u8 module_status;
+	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE		   (0x0UL << 0)
+	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX	   (0x1UL << 0)
+	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG       (0x2UL << 0)
+	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN	   (0x3UL << 0)
+	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED      (0x4UL << 0)
+	#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE    (0xffUL << 0)
 	__le32 preemphasis;
 	u8 phy_maj;
 	u8 phy_min;
 	u8 phy_bld;
 	u8 phy_type;
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR4		   (0x1UL << 0)
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN		   (0x0UL << 0)
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR		   (0x1UL << 0)
 	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4		   (0x2UL << 0)
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR4		   (0x3UL << 0)
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR4		   (0x4UL << 0)
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR		   (0x3UL << 0)
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR		   (0x4UL << 0)
 	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2		   (0x5UL << 0)
-	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX4		   (0x6UL << 0)
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX		   (0x6UL << 0)
 	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR		   (0x7UL << 0)
 	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET		   (0x8UL << 0)
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE		   (0x9UL << 0)
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY	   (0xaUL << 0)
 	u8 media_type;
+	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN		   (0x0UL << 0)
 	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP		   (0x1UL << 0)
 	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC		   (0x2UL << 0)
 	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE		   (0x3UL << 0)
-	u8 transceiver_type;
-	#define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL (0x1UL << 0)
-	#define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_EXTERNAL (0x2UL << 0)
-	u8 phy_addr;
+	u8 xcvr_pkg_type;
+	#define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL    (0x1UL << 0)
+	#define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL    (0x2UL << 0)
+	u8 eee_config_phy_addr;
 	#define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK		    0x1fUL
 	#define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT		    0
-	u8 unused_2;
+	#define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED	    0x20UL
+	#define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE	    0x40UL
+	#define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI	    0x80UL
+	#define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK		    0xe0UL
+	#define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT		    5
+	u8 parallel_detect;
+	#define PORT_PHY_QCFG_RESP_PARALLEL_DETECT		    0x1UL
+	#define PORT_PHY_QCFG_RESP_RESERVED_MASK		    0xfeUL
+	#define PORT_PHY_QCFG_RESP_RESERVED_SFT		    1
 	__le16 link_partner_adv_speeds;
 	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
 	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB   0x2UL
@@ -1553,15 +1777,48 @@
 	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB    0x100UL
 	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB    0x200UL
 	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB    0x400UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB   0x800UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD  0x1000UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB    0x2000UL
 	u8 link_partner_adv_auto_mode;
 	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE (0x0UL << 0)
 	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
 	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED (0x2UL << 0)
 	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
-	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_MASK (0x4UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK (0x4UL << 0)
 	u8 link_partner_adv_pause;
 	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX       0x1UL
 	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX       0x2UL
+	__le16 adv_eee_link_speed_mask;
+	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1   0x1UL
+	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB   0x2UL
+	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2   0x4UL
+	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB     0x8UL
+	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3   0x10UL
+	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4   0x20UL
+	#define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB    0x40UL
+	__le16 link_partner_adv_eee_link_speed_mask;
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+	__le32 xcvr_identifier_type_tx_lpi_timer;
+	#define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK		    0xffffffUL
+	#define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT		    0
+	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK       0xff000000UL
+	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT	    24
+	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN   (0x0UL << 24)
+	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP       (0x3UL << 24)
+	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP      (0xcUL << 24)
+	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS  (0xdUL << 24)
+	#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28    (0x11UL << 24)
+	__le32 unused_1;
+	char phy_vendor_name[16];
+	char phy_vendor_partnumber[16];
+	__le32 unused_2;
 	u8 unused_3;
 	u8 unused_4;
 	u8 unused_5;
@@ -1569,7 +1826,7 @@
 };
 
 /* hwrm_port_mac_cfg */
-/* Input (32 bytes) */
+/* Input (40 bytes) */
 struct hwrm_port_mac_cfg_input {
 	__le16 req_type;
 	__le16 cmpl_ring;
@@ -1581,6 +1838,10 @@
 	#define PORT_MAC_CFG_REQ_FLAGS_COS_ASSIGNMENT_ENABLE       0x2UL
 	#define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE       0x4UL
 	#define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE	    0x8UL
+	#define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE    0x10UL
+	#define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE   0x20UL
+	#define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE    0x40UL
+	#define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE   0x80UL
 	__le32 enables;
 	#define PORT_MAC_CFG_REQ_ENABLES_IPG			    0x1UL
 	#define PORT_MAC_CFG_REQ_ENABLES_LPBK			    0x2UL
@@ -1588,6 +1849,8 @@
 	#define PORT_MAC_CFG_REQ_ENABLES_LCOS_MAP_PRI		    0x8UL
 	#define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI    0x10UL
 	#define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI	    0x20UL
+	#define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
+	#define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
 	__le16 port_id;
 	u8 ipg;
 	u8 lpbk;
@@ -1598,6 +1861,9 @@
 	u8 lcos_map_pri;
 	u8 tunnel_pri2cos_map_pri;
 	u8 dscp2pri_map_pri;
+	__le16 rx_ts_capture_ptp_msg_type;
+	__le16 tx_ts_capture_ptp_msg_type;
+	__le32 unused_0;
 };
 
 /* Output (16 bytes) */
@@ -1754,7 +2020,113 @@
 	u8 valid;
 };
 
-/* hwrm_queue_qportcfg */
+/* hwrm_port_phy_qcaps */
+/* Input (24 bytes) */
+struct hwrm_port_phy_qcaps_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 port_id;
+	__le16 unused_0[3];
+};
+
+/* Output (24 bytes) */
+struct hwrm_port_phy_qcaps_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	u8 eee_supported;
+	#define PORT_PHY_QCAPS_RESP_EEE_SUPPORTED		    0x1UL
+	#define PORT_PHY_QCAPS_RESP_RSVD1_MASK			    0xfeUL
+	#define PORT_PHY_QCAPS_RESP_RSVD1_SFT			    1
+	u8 unused_0;
+	__le16 supported_speeds_force_mode;
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
+	__le16 supported_speeds_auto_mode;
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
+	__le16 supported_speeds_eee_mode;
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB  0x8UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
+	#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
+	__le32 tx_lpi_timer_low;
+	#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK	    0xffffffUL
+	#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT	    0
+	#define PORT_PHY_QCAPS_RESP_RSVD2_MASK			    0xff000000UL
+	#define PORT_PHY_QCAPS_RESP_RSVD2_SFT			    24
+	__le32 valid_tx_lpi_timer_high;
+	#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK	    0xffffffUL
+	#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT	    0
+	#define PORT_PHY_QCAPS_RESP_VALID_MASK			    0xff000000UL
+	#define PORT_PHY_QCAPS_RESP_VALID_SFT			    24
+};
+
+/* hwrm_port_phy_i2c_read */
+/* Input (40 bytes) */
+struct hwrm_port_phy_i2c_read_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	__le32 enables;
+	#define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET	    0x1UL
+	__le16 port_id;
+	u8 i2c_slave_addr;
+	u8 unused_0;
+	__le16 page_number;
+	__le16 page_offset;
+	u8 data_length;
+	u8 unused_1[7];
+};
+
+/* Output (80 bytes) */
+struct hwrm_port_phy_i2c_read_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 data[16];
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
 /* Input (24 bytes) */
 struct hwrm_queue_qportcfg_input {
 	__le16 req_type;
@@ -1766,6 +2138,7 @@
 	#define QUEUE_QPORTCFG_REQ_FLAGS_PATH			    0x1UL
 	#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX		   (0x0UL << 0)
 	#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX		   (0x1UL << 0)
+	#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST    QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
 	__le16 port_id;
 	__le16 unused_0;
 };
@@ -1838,6 +2211,7 @@
 	#define QUEUE_CFG_REQ_FLAGS_PATH			    0x1UL
 	#define QUEUE_CFG_REQ_FLAGS_PATH_TX			   (0x0UL << 0)
 	#define QUEUE_CFG_REQ_FLAGS_PATH_RX			   (0x1UL << 0)
+	#define QUEUE_CFG_REQ_FLAGS_PATH_LAST    QUEUE_CFG_REQ_FLAGS_PATH_RX
 	__le32 enables;
 	#define QUEUE_CFG_REQ_ENABLES_DFLT_LEN			    0x1UL
 	#define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE		    0x2UL
@@ -1875,6 +2249,7 @@
 	#define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH		    0x1UL
 	#define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_TX		   (0x0UL << 0)
 	#define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX		   (0x1UL << 0)
+	#define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_LAST    QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX
 	__le32 enables;
 	#define QUEUE_BUFFERS_CFG_REQ_ENABLES_RESERVED		    0x1UL
 	#define QUEUE_BUFFERS_CFG_REQ_ENABLES_SHARED		    0x2UL
@@ -1952,6 +2327,7 @@
 	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH		    0x1UL
 	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX		   (0x0UL << 0)
 	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX		   (0x1UL << 0)
+	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST    QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX
 	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN		    0x2UL
 	__le32 enables;
 	u8 port_id;
@@ -2158,6 +2534,8 @@
 	#define VNIC_CFG_REQ_FLAGS_DEFAULT			    0x1UL
 	#define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE		    0x2UL
 	#define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE		    0x4UL
+	#define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE		    0x8UL
+	#define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE		    0x10UL
 	__le32 enables;
 	#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP		    0x1UL
 	#define VNIC_CFG_REQ_ENABLES_RSS_RULE			    0x2UL
@@ -2622,6 +3000,7 @@
 	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH		    0x1UL
 	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX		   (0x0UL << 0)
 	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX		   (0x1UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST    CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
 	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK		    0x2UL
 	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP		    0x4UL
 	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST	    0x8UL
@@ -2747,6 +3126,7 @@
 	#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH		    0x1UL
 	#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX		   (0x0UL << 0)
 	#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX		   (0x1UL << 0)
+	#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST    CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
 	#define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP		    0x2UL
 	__le32 enables;
 	#define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID		    0x1UL
@@ -3337,6 +3717,41 @@
 	u8 valid;
 };
 
+/* hwrm_fw_qstatus */
+/* Input (24 bytes) */
+struct hwrm_fw_qstatus_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	u8 embedded_proc_type;
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT		   (0x0UL << 0)
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT		   (0x1UL << 0)
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL	   (0x2UL << 0)
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE		   (0x3UL << 0)
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_RSVD		   (0x4UL << 0)
+	u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_qstatus_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	u8 selfrst_status;
+	#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE	   (0x0UL << 0)
+	#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP	   (0x1UL << 0)
+	#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST     (0x2UL << 0)
+	u8 unused_0;
+	__le16 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 unused_4;
+	u8 valid;
+};
+
 /* hwrm_exec_fwd_resp */
 /* Input (128 bytes) */
 struct hwrm_exec_fwd_resp_input {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
index 43ef392..40a7b0e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
@@ -1,6 +1,6 @@
 /* Broadcom NetXtreme-C/E network driver.
  *
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 0c5f510..363884d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -1,6 +1,6 @@
 /* Broadcom NetXtreme-C/E network driver.
  *
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -771,12 +771,8 @@
 			    PORT_PHY_QCFG_RESP_LINK_NO_LINK) {
 				phy_qcfg_resp.link =
 					PORT_PHY_QCFG_RESP_LINK_LINK;
-				if (phy_qcfg_resp.auto_link_speed)
-					phy_qcfg_resp.link_speed =
-						phy_qcfg_resp.auto_link_speed;
-				else
-					phy_qcfg_resp.link_speed =
-						phy_qcfg_resp.force_link_speed;
+				phy_qcfg_resp.link_speed = cpu_to_le16(
+					PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
 				phy_qcfg_resp.duplex =
 					PORT_PHY_QCFG_RESP_DUPLEX_FULL;
 				phy_qcfg_resp.pause =
@@ -859,8 +855,8 @@
 	 *    default but the stored zero MAC will allow the VF user to change
 	 *    the random MAC address using ndo_set_mac_address() if he wants.
 	 */
-	if (!ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr))
-		memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN);
+	if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr))
+		memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
 
 	/* overwrite netdev dev_addr with admin VF MAC */
 	if (is_valid_ether_addr(bp->vf.mac_addr))
@@ -869,6 +865,31 @@
 	mutex_unlock(&bp->hwrm_cmd_lock);
 }
 
+int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
+{
+	struct hwrm_func_vf_cfg_input req = {0};
+	int rc = 0;
+
+	if (!BNXT_VF(bp))
+		return 0;
+
+	if (bp->hwrm_spec_code < 0x10202) {
+		if (is_valid_ether_addr(bp->vf.mac_addr))
+			rc = -EADDRNOTAVAIL;
+		goto mac_done;
+	}
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+	req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
+	memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
+	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+mac_done:
+	if (rc) {
+		rc = -EADDRNOTAVAIL;
+		netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
+			    mac);
+	}
+	return rc;
+}
 #else
 
 void bnxt_sriov_disable(struct bnxt *bp)
@@ -883,4 +904,9 @@
 void bnxt_update_vf_mac(struct bnxt *bp)
 {
 }
+
+int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
+{
+	return 0;
+}
 #endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index c151280..0392670 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -1,6 +1,6 @@
 /* Broadcom NetXtreme-C/E network driver.
  *
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -20,4 +20,5 @@
 void bnxt_sriov_disable(struct bnxt *);
 void bnxt_hwrm_exec_fwd_req(struct bnxt *);
 void bnxt_update_vf_mac(struct bnxt *);
+int bnxt_approve_mac(struct bnxt *, u8 *);
 #endif
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index b69dc58..b1d2ac8 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -5350,7 +5350,10 @@
 	return 0;
 
 err1:
-	cp->free_resc(dev);
+	if (ethdev->drv_state & CNIC_DRV_STATE_HANDLES_IRQ)
+		cp->stop_hw(dev);
+	else
+		cp->free_resc(dev);
 	pci_dev_put(dev->pcidev);
 	return err;
 }
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 6746fd0..5414563 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -104,8 +104,8 @@
 static inline void dmadesc_set(struct bcmgenet_priv *priv,
 			       void __iomem *d, dma_addr_t addr, u32 val)
 {
-	dmadesc_set_length_status(priv, d, val);
 	dmadesc_set_addr(priv, d, addr);
+	dmadesc_set_length_status(priv, d, val);
 }
 
 static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
@@ -878,7 +878,11 @@
 		else
 			p = (char *)priv;
 		p += s->stat_offset;
-		data[i] = *(u32 *)p;
+		if (sizeof(unsigned long) != sizeof(u32) &&
+		    s->stat_sizeof == sizeof(unsigned long))
+			data[i] = *(unsigned long *)p;
+		else
+			data[i] = *(u32 *)p;
 	}
 }
 
@@ -1171,6 +1175,7 @@
 	struct enet_cb *tx_cb_ptr;
 	struct netdev_queue *txq;
 	unsigned int pkts_compl = 0;
+	unsigned int bytes_compl = 0;
 	unsigned int c_index;
 	unsigned int txbds_ready;
 	unsigned int txbds_processed = 0;
@@ -1193,16 +1198,13 @@
 		tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
 		if (tx_cb_ptr->skb) {
 			pkts_compl++;
-			dev->stats.tx_packets++;
-			dev->stats.tx_bytes += tx_cb_ptr->skb->len;
+			bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent;
 			dma_unmap_single(&dev->dev,
 					 dma_unmap_addr(tx_cb_ptr, dma_addr),
 					 dma_unmap_len(tx_cb_ptr, dma_len),
 					 DMA_TO_DEVICE);
 			bcmgenet_free_cb(tx_cb_ptr);
 		} else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
-			dev->stats.tx_bytes +=
-				dma_unmap_len(tx_cb_ptr, dma_len);
 			dma_unmap_page(&dev->dev,
 				       dma_unmap_addr(tx_cb_ptr, dma_addr),
 				       dma_unmap_len(tx_cb_ptr, dma_len),
@@ -1220,8 +1222,13 @@
 	ring->free_bds += txbds_processed;
 	ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
 
+	dev->stats.tx_packets += pkts_compl;
+	dev->stats.tx_bytes += bytes_compl;
+
+	txq = netdev_get_tx_queue(dev, ring->queue);
+	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
+
 	if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
-		txq = netdev_get_tx_queue(dev, ring->queue);
 		if (netif_tx_queue_stopped(txq))
 			netif_tx_wake_queue(txq);
 	}
@@ -1296,7 +1303,7 @@
 
 	tx_cb_ptr->skb = skb;
 
-	skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb);
+	skb_len = skb_headlen(skb);
 
 	mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
 	ret = dma_mapping_error(kdev, mapping);
@@ -1330,6 +1337,7 @@
 	struct bcmgenet_priv *priv = netdev_priv(dev);
 	struct device *kdev = &priv->pdev->dev;
 	struct enet_cb *tx_cb_ptr;
+	unsigned int frag_size;
 	dma_addr_t mapping;
 	int ret;
 
@@ -1337,10 +1345,12 @@
 
 	if (unlikely(!tx_cb_ptr))
 		BUG();
+
 	tx_cb_ptr->skb = NULL;
 
-	mapping = skb_frag_dma_map(kdev, frag, 0,
-				   skb_frag_size(frag), DMA_TO_DEVICE);
+	frag_size = skb_frag_size(frag);
+
+	mapping = skb_frag_dma_map(kdev, frag, 0, frag_size, DMA_TO_DEVICE);
 	ret = dma_mapping_error(kdev, mapping);
 	if (ret) {
 		priv->mib.tx_dma_failed++;
@@ -1350,10 +1360,10 @@
 	}
 
 	dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
-	dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
+	dma_unmap_len_set(tx_cb_ptr, dma_len, frag_size);
 
 	dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
-		    (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
+		    (frag_size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
 		    (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
 
 	return 0;
@@ -1446,15 +1456,19 @@
 	else
 		index -= 1;
 
-	nr_frags = skb_shinfo(skb)->nr_frags;
 	ring = &priv->tx_rings[index];
 	txq = netdev_get_tx_queue(dev, ring->queue);
 
+	nr_frags = skb_shinfo(skb)->nr_frags;
+
 	spin_lock_irqsave(&ring->lock, flags);
-	if (ring->free_bds <= nr_frags + 1) {
-		netif_tx_stop_queue(txq);
-		netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
-			   __func__, index, ring->queue);
+	if (ring->free_bds <= (nr_frags + 1)) {
+		if (!netif_tx_queue_stopped(txq)) {
+			netif_tx_stop_queue(txq);
+			netdev_err(dev,
+				   "%s: tx ring %d full when queue %d awake\n",
+				   __func__, index, ring->queue);
+		}
 		ret = NETDEV_TX_BUSY;
 		goto out;
 	}
@@ -1464,6 +1478,11 @@
 		goto out;
 	}
 
+	/* Retain how many bytes will be sent on the wire, without TSB inserted
+	 * by transmit checksum offload
+	 */
+	GENET_CB(skb)->bytes_sent = skb->len;
+
 	/* set the SKB transmit checksum */
 	if (priv->desc_64b_en) {
 		skb = bcmgenet_put_tx_csum(dev, skb);
@@ -1503,6 +1522,8 @@
 	ring->prod_index += nr_frags + 1;
 	ring->prod_index &= DMA_P_INDEX_MASK;
 
+	netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent);
+
 	if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
 		netif_tx_stop_queue(txq);
 
@@ -1722,7 +1743,7 @@
 	work_done = bcmgenet_desc_rx(ring, budget);
 
 	if (work_done < budget) {
-		napi_complete(napi);
+		napi_complete_done(napi, work_done);
 		ring->int_enable(ring);
 	}
 
@@ -2351,6 +2372,7 @@
 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
 {
 	int i;
+	struct netdev_queue *txq;
 
 	bcmgenet_fini_rx_napi(priv);
 	bcmgenet_fini_tx_napi(priv);
@@ -2365,6 +2387,14 @@
 		}
 	}
 
+	for (i = 0; i < priv->hw_params->tx_queues; i++) {
+		txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
+		netdev_tx_reset_queue(txq);
+	}
+
+	txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue);
+	netdev_tx_reset_queue(txq);
+
 	bcmgenet_free_rx_buffers(priv);
 	kfree(priv->rx_cbs);
 	kfree(priv->tx_cbs);
@@ -2480,7 +2510,7 @@
 
 		if (likely(napi_schedule_prep(&rx_ring->napi))) {
 			rx_ring->int_disable(rx_ring);
-			__napi_schedule(&rx_ring->napi);
+			__napi_schedule_irqoff(&rx_ring->napi);
 		}
 	}
 
@@ -2493,7 +2523,7 @@
 
 		if (likely(napi_schedule_prep(&tx_ring->napi))) {
 			tx_ring->int_disable(tx_ring);
-			__napi_schedule(&tx_ring->napi);
+			__napi_schedule_irqoff(&tx_ring->napi);
 		}
 	}
 
@@ -2523,7 +2553,7 @@
 
 		if (likely(napi_schedule_prep(&rx_ring->napi))) {
 			rx_ring->int_disable(rx_ring);
-			__napi_schedule(&rx_ring->napi);
+			__napi_schedule_irqoff(&rx_ring->napi);
 		}
 	}
 
@@ -2532,7 +2562,7 @@
 
 		if (likely(napi_schedule_prep(&tx_ring->napi))) {
 			tx_ring->int_disable(tx_ring);
-			__napi_schedule(&tx_ring->napi);
+			__napi_schedule_irqoff(&tx_ring->napi);
 		}
 	}
 
@@ -3029,7 +3059,7 @@
 	bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
 	bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
 
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 
 	dev->stats.tx_errors++;
 
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 9673675..1e2dc34 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -531,6 +531,12 @@
 	u32		flags;
 };
 
+struct bcmgenet_skb_cb {
+	unsigned int bytes_sent;	/* bytes on the wire (no TSB) */
+};
+
+#define GENET_CB(skb)	((struct bcmgenet_skb_cb *)((skb)->cb))
+
 struct bcmgenet_tx_ring {
 	spinlock_t	lock;		/* ring lock */
 	struct napi_struct napi;	/* NAPI per tx queue */
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index eacc559..f1b8118 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2462,7 +2462,7 @@
 	spin_lock_irqsave(&sc->sbm_lock, flags);
 
 
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	dev->stats.tx_errors++;
 
 	spin_unlock_irqrestore(&sc->sbm_lock, flags);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 3010080..ff300f7 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7383,7 +7383,7 @@
 
 static inline void tg3_netif_stop(struct tg3 *tp)
 {
-	tp->dev->trans_start = jiffies;	/* prevent tx timeout */
+	netif_trans_update(tp->dev);	/* prevent tx timeout */
 	tg3_napi_disable(tp);
 	netif_carrier_off(tp->dev);
 	netif_tx_disable(tp->dev);
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 6619178..cb07d95 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -61,8 +61,7 @@
 #define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
 #define MACB_WOL_ENABLED		(0x1 << 1)
 
-/*
- * Graceful stop timeouts in us. We should allow up to
+/* Graceful stop timeouts in us. We should allow up to
  * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
  */
 #define MACB_HALT_TIMEOUT	1230
@@ -130,9 +129,8 @@
 	writel_relaxed(value, bp->regs + offset);
 }
 
-/*
- * Find the CPU endianness by using the loopback bit of NCR register. When the
- * CPU is in big endian we need to program swaped mode for management
+/* Find the CPU endianness by using the loopback bit of NCR register. When the
+ * CPU is in big endian we need to program swapped mode for management
  * descriptor access.
  */
 static bool hw_is_native_io(void __iomem *addr)
@@ -189,7 +187,7 @@
 
 	pdata = dev_get_platdata(&bp->pdev->dev);
 
-	/* Check all 4 address register for vaild address */
+	/* Check all 4 address register for valid address */
 	for (i = 0; i < 4; i++) {
 		bottom = macb_or_gem_readl(bp, SA1B + i * 8);
 		top = macb_or_gem_readl(bp, SA1T + i * 8);
@@ -297,7 +295,7 @@
 	ferr = DIV_ROUND_UP(ferr, rate / 100000);
 	if (ferr > 5)
 		netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
-				rate);
+			    rate);
 
 	if (clk_set_rate(clk, rate_rounded))
 		netdev_err(dev, "adjusting tx_clk failed.\n");
@@ -386,7 +384,8 @@
 
 	pdata = dev_get_platdata(&bp->pdev->dev);
 	if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
-		ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int");
+		ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin,
+					"phy int");
 		if (!ret) {
 			phy_irq = gpio_to_irq(pdata->phy_irq_pin);
 			phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
@@ -430,7 +429,7 @@
 	macb_writel(bp, NCR, MACB_BIT(MPE));
 
 	bp->mii_bus = mdiobus_alloc();
-	if (bp->mii_bus == NULL) {
+	if (!bp->mii_bus) {
 		err = -ENOMEM;
 		goto err_out;
 	}
@@ -439,9 +438,9 @@
 	bp->mii_bus->read = &macb_mdio_read;
 	bp->mii_bus->write = &macb_mdio_write;
 	snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
-		bp->pdev->name, bp->pdev->id);
+		 bp->pdev->name, bp->pdev->id);
 	bp->mii_bus->priv = bp;
-	bp->mii_bus->parent = &bp->dev->dev;
+	bp->mii_bus->parent = &bp->pdev->dev;
 	pdata = dev_get_platdata(&bp->pdev->dev);
 
 	dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
@@ -452,13 +451,15 @@
 		err = of_mdiobus_register(bp->mii_bus, np);
 
 		/* fallback to standard phy registration if no phy were
-		   found during dt phy registration */
+		 * found during dt phy registration
+		 */
 		if (!err && !phy_find_first(bp->mii_bus)) {
 			for (i = 0; i < PHY_MAX_ADDR; i++) {
 				struct phy_device *phydev;
 
 				phydev = mdiobus_scan(bp->mii_bus, i);
-				if (IS_ERR(phydev)) {
+				if (IS_ERR(phydev) &&
+				    PTR_ERR(phydev) != -ENODEV) {
 					err = PTR_ERR(phydev);
 					break;
 				}
@@ -499,7 +500,7 @@
 
 	WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
 
-	for(; p < end; p++, offset += 4)
+	for (; p < end; p++, offset += 4)
 		*p += bp->macb_reg_readl(bp, offset);
 }
 
@@ -567,8 +568,7 @@
 	/* Make sure nobody is trying to queue up new packets */
 	netif_tx_stop_all_queues(bp->dev);
 
-	/*
-	 * Stop transmission now
+	/* Stop transmission now
 	 * (in case we have just queued new packets)
 	 * macb/gem must be halted to write TBQP register
 	 */
@@ -576,8 +576,7 @@
 		/* Just complain for now, reinitializing TX path can be good */
 		netdev_err(bp->dev, "BUG: halt tx timed out\n");
 
-	/*
-	 * Treat frames in TX queue including the ones that caused the error.
+	/* Treat frames in TX queue including the ones that caused the error.
 	 * Free transmit buffers in upper layer.
 	 */
 	for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
@@ -607,10 +606,9 @@
 				bp->stats.tx_bytes += skb->len;
 			}
 		} else {
-			/*
-			 * "Buffers exhausted mid-frame" errors may only happen
-			 * if the driver is buggy, so complain loudly about those.
-			 * Statistics are updated by hardware.
+			/* "Buffers exhausted mid-frame" errors may only happen
+			 * if the driver is buggy, so complain loudly about
+			 * those. Statistics are updated by hardware.
 			 */
 			if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
 				netdev_err(bp->dev,
@@ -662,7 +660,7 @@
 		queue_writel(queue, ISR, MACB_BIT(TCOMP));
 
 	netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
-		(unsigned long)status);
+		    (unsigned long)status);
 
 	head = queue->tx_head;
 	for (tail = queue->tx_tail; tail != head; tail++) {
@@ -722,7 +720,8 @@
 	struct sk_buff		*skb;
 	dma_addr_t		paddr;
 
-	while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
+	while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
+			  RX_RING_SIZE) > 0) {
 		entry = macb_rx_ring_wrap(bp->rx_prepared_head);
 
 		/* Make hw descriptor updates visible to CPU */
@@ -730,10 +729,10 @@
 
 		bp->rx_prepared_head++;
 
-		if (bp->rx_skbuff[entry] == NULL) {
+		if (!bp->rx_skbuff[entry]) {
 			/* allocate sk_buff for this free entry in ring */
 			skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
-			if (unlikely(skb == NULL)) {
+			if (unlikely(!skb)) {
 				netdev_err(bp->dev,
 					   "Unable to allocate sk_buff\n");
 				break;
@@ -741,7 +740,8 @@
 
 			/* now fill corresponding descriptor entry */
 			paddr = dma_map_single(&bp->pdev->dev, skb->data,
-					       bp->rx_buffer_size, DMA_FROM_DEVICE);
+					       bp->rx_buffer_size,
+					       DMA_FROM_DEVICE);
 			if (dma_mapping_error(&bp->pdev->dev, paddr)) {
 				dev_kfree_skb(skb);
 				break;
@@ -766,7 +766,7 @@
 	wmb();
 
 	netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
-		   bp->rx_prepared_head, bp->rx_tail);
+		    bp->rx_prepared_head, bp->rx_tail);
 }
 
 /* Mark DMA descriptors from begin up to and not including end as unused */
@@ -777,14 +777,14 @@
 
 	for (frag = begin; frag != end; frag++) {
 		struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
+
 		desc->addr &= ~MACB_BIT(RX_USED);
 	}
 
 	/* Make descriptor updates visible to hardware */
 	wmb();
 
-	/*
-	 * When this happens, the hardware stats registers for
+	/* When this happens, the hardware stats registers for
 	 * whatever caused this is updated, so we don't have to record
 	 * anything.
 	 */
@@ -880,11 +880,10 @@
 	len = desc->ctrl & bp->rx_frm_len_mask;
 
 	netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
-		macb_rx_ring_wrap(first_frag),
-		macb_rx_ring_wrap(last_frag), len);
+		    macb_rx_ring_wrap(first_frag),
+		    macb_rx_ring_wrap(last_frag), len);
 
-	/*
-	 * The ethernet header starts NET_IP_ALIGN bytes into the
+	/* The ethernet header starts NET_IP_ALIGN bytes into the
 	 * first buffer. Since the header is 14 bytes, this makes the
 	 * payload word-aligned.
 	 *
@@ -917,11 +916,15 @@
 		unsigned int frag_len = bp->rx_buffer_size;
 
 		if (offset + frag_len > len) {
-			BUG_ON(frag != last_frag);
+			if (unlikely(frag != last_frag)) {
+				dev_kfree_skb_any(skb);
+				return -1;
+			}
 			frag_len = len - offset;
 		}
 		skb_copy_to_linear_data_offset(skb, offset,
-				macb_rx_buffer(bp, frag), frag_len);
+					       macb_rx_buffer(bp, frag),
+					       frag_len);
 		offset += bp->rx_buffer_size;
 		desc = macb_rx_desc(bp, frag);
 		desc->addr &= ~MACB_BIT(RX_USED);
@@ -939,14 +942,29 @@
 	bp->stats.rx_packets++;
 	bp->stats.rx_bytes += skb->len;
 	netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
-		   skb->len, skb->csum);
+		    skb->len, skb->csum);
 	netif_receive_skb(skb);
 
 	return 0;
 }
 
+static inline void macb_init_rx_ring(struct macb *bp)
+{
+	dma_addr_t addr;
+	int i;
+
+	addr = bp->rx_buffers_dma;
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		bp->rx_ring[i].addr = addr;
+		bp->rx_ring[i].ctrl = 0;
+		addr += bp->rx_buffer_size;
+	}
+	bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
+}
+
 static int macb_rx(struct macb *bp, int budget)
 {
+	bool reset_rx_queue = false;
 	int received = 0;
 	unsigned int tail;
 	int first_frag = -1;
@@ -972,10 +990,18 @@
 
 		if (ctrl & MACB_BIT(RX_EOF)) {
 			int dropped;
-			BUG_ON(first_frag == -1);
+
+			if (unlikely(first_frag == -1)) {
+				reset_rx_queue = true;
+				continue;
+			}
 
 			dropped = macb_rx_frame(bp, first_frag, tail);
 			first_frag = -1;
+			if (unlikely(dropped < 0)) {
+				reset_rx_queue = true;
+				continue;
+			}
 			if (!dropped) {
 				received++;
 				budget--;
@@ -983,6 +1009,26 @@
 		}
 	}
 
+	if (unlikely(reset_rx_queue)) {
+		unsigned long flags;
+		u32 ctrl;
+
+		netdev_err(bp->dev, "RX queue corruption: reset it\n");
+
+		spin_lock_irqsave(&bp->lock, flags);
+
+		ctrl = macb_readl(bp, NCR);
+		macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
+
+		macb_init_rx_ring(bp);
+		macb_writel(bp, RBQP, bp->rx_ring_dma);
+
+		macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
+
+		spin_unlock_irqrestore(&bp->lock, flags);
+		return received;
+	}
+
 	if (first_frag != -1)
 		bp->rx_tail = first_frag;
 	else
@@ -1003,7 +1049,7 @@
 	work_done = 0;
 
 	netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
-		   (unsigned long)status, budget);
+		    (unsigned long)status, budget);
 
 	work_done = bp->macbgem_ops.mog_rx(bp, budget);
 	if (work_done < budget) {
@@ -1053,8 +1099,7 @@
 			    (unsigned long)status);
 
 		if (status & MACB_RX_INT_FLAGS) {
-			/*
-			 * There's no point taking any more interrupts
+			/* There's no point taking any more interrupts
 			 * until we have processed the buffers. The
 			 * scheduling call may fail if the poll routine
 			 * is already scheduled, so disable interrupts
@@ -1083,8 +1128,7 @@
 		if (status & MACB_BIT(TCOMP))
 			macb_tx_interrupt(queue);
 
-		/*
-		 * Link change detection isn't possible with RMII, so we'll
+		/* Link change detection isn't possible with RMII, so we'll
 		 * add that if/when we get our hands on a full-blown MII PHY.
 		 */
 
@@ -1100,7 +1144,7 @@
 			macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
 
 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
-				macb_writel(bp, ISR, MACB_BIT(RXUBR));
+				queue_writel(queue, ISR, MACB_BIT(RXUBR));
 		}
 
 		if (status & MACB_BIT(ISR_ROVR)) {
@@ -1115,8 +1159,7 @@
 		}
 
 		if (status & MACB_BIT(HRESP)) {
-			/*
-			 * TODO: Reset the hardware, and maybe move the
+			/* TODO: Reset the hardware, and maybe move the
 			 * netdev_err to a lower-priority context as well
 			 * (work queue?)
 			 */
@@ -1135,8 +1178,7 @@
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling receive - used by netconsole and other diagnostic tools
+/* Polling receive - used by netconsole and other diagnostic tools
  * to allow network i/o with interrupts disabled.
  */
 static void macb_poll_controller(struct net_device *dev)
@@ -1222,7 +1264,7 @@
 	}
 
 	/* Should never happen */
-	if (unlikely(tx_skb == NULL)) {
+	if (unlikely(!tx_skb)) {
 		netdev_err(bp->dev, "BUG! empty skb!\n");
 		return 0;
 	}
@@ -1292,16 +1334,16 @@
 
 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
 	netdev_vdbg(bp->dev,
-		   "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
-		   queue_index, skb->len, skb->head, skb->data,
-		   skb_tail_pointer(skb), skb_end_pointer(skb));
+		    "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
+		    queue_index, skb->len, skb->head, skb->data,
+		    skb_tail_pointer(skb), skb_end_pointer(skb));
 	print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
 		       skb->data, 16, true);
 #endif
 
 	/* Count how many TX buffer descriptors are needed to send this
 	 * socket buffer: skb fragments of jumbo frames may need to be
-	 * splitted into many buffer descriptors.
+	 * split into many buffer descriptors.
 	 */
 	count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
 	nr_frags = skb_shinfo(skb)->nr_frags;
@@ -1352,8 +1394,8 @@
 
 		if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
 			netdev_dbg(bp->dev,
-				    "RX buffer must be multiple of %d bytes, expanding\n",
-				    RX_BUFFER_MULTIPLE);
+				   "RX buffer must be multiple of %d bytes, expanding\n",
+				   RX_BUFFER_MULTIPLE);
 			bp->rx_buffer_size =
 				roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
 		}
@@ -1376,7 +1418,7 @@
 	for (i = 0; i < RX_RING_SIZE; i++) {
 		skb = bp->rx_skbuff[i];
 
-		if (skb == NULL)
+		if (!skb)
 			continue;
 
 		desc = &bp->rx_ring[i];
@@ -1432,10 +1474,10 @@
 	bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
 	if (!bp->rx_skbuff)
 		return -ENOMEM;
-	else
-		netdev_dbg(bp->dev,
-			   "Allocated %d RX struct sk_buff entries at %p\n",
-			   RX_RING_SIZE, bp->rx_skbuff);
+
+	netdev_dbg(bp->dev,
+		   "Allocated %d RX struct sk_buff entries at %p\n",
+		   RX_RING_SIZE, bp->rx_skbuff);
 	return 0;
 }
 
@@ -1448,10 +1490,10 @@
 					    &bp->rx_buffers_dma, GFP_KERNEL);
 	if (!bp->rx_buffers)
 		return -ENOMEM;
-	else
-		netdev_dbg(bp->dev,
-			   "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
-			   size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
+
+	netdev_dbg(bp->dev,
+		   "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
+		   size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
 	return 0;
 }
 
@@ -1523,15 +1565,8 @@
 static void macb_init_rings(struct macb *bp)
 {
 	int i;
-	dma_addr_t addr;
 
-	addr = bp->rx_buffers_dma;
-	for (i = 0; i < RX_RING_SIZE; i++) {
-		bp->rx_ring[i].addr = addr;
-		bp->rx_ring[i].ctrl = 0;
-		addr += bp->rx_buffer_size;
-	}
-	bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
+	macb_init_rx_ring(bp);
 
 	for (i = 0; i < TX_RING_SIZE; i++) {
 		bp->queues[0].tx_ring[i].addr = 0;
@@ -1549,8 +1584,7 @@
 	struct macb_queue *queue;
 	unsigned int q;
 
-	/*
-	 * Disable RX and TX (XXX: Should we halt the transmission
+	/* Disable RX and TX (XXX: Should we halt the transmission
 	 * more gracefully?)
 	 */
 	macb_writel(bp, NCR, 0);
@@ -1613,8 +1647,7 @@
 	return config;
 }
 
-/*
- * Get the DMA bus width field of the network configuration register that we
+/* Get the DMA bus width field of the network configuration register that we
  * should program.  We find the width from decoding the design configuration
  * register to find the maximum supported data bus width.
  */
@@ -1634,8 +1667,7 @@
 	}
 }
 
-/*
- * Configure the receive DMA engine
+/* Configure the receive DMA engine
  * - use the correct receive buffer size
  * - set best burst length for DMA operations
  *   (if not supported by FIFO, it will fallback to default)
@@ -1723,8 +1755,7 @@
 	macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
 }
 
-/*
- * The hash address register is 64 bits long and takes up two
+/* The hash address register is 64 bits long and takes up two
  * locations in the memory map.  The least significant bits are stored
  * in EMAC_HSL and the most significant bits in EMAC_HSH.
  *
@@ -1764,9 +1795,7 @@
 	return 0;
 }
 
-/*
- * Return the hash index value for the specified address.
- */
+/* Return the hash index value for the specified address. */
 static int hash_get_index(__u8 *addr)
 {
 	int i, j, bitval;
@@ -1782,9 +1811,7 @@
 	return hash_index;
 }
 
-/*
- * Add multicast addresses to the internal multicast-hash table.
- */
+/* Add multicast addresses to the internal multicast-hash table. */
 static void macb_sethashtable(struct net_device *dev)
 {
 	struct netdev_hw_addr *ha;
@@ -1792,7 +1819,8 @@
 	unsigned int bitnr;
 	struct macb *bp = netdev_priv(dev);
 
-	mc_filter[0] = mc_filter[1] = 0;
+	mc_filter[0] = 0;
+	mc_filter[1] = 0;
 
 	netdev_for_each_mc_addr(ha, dev) {
 		bitnr = hash_get_index(ha->addr);
@@ -1803,9 +1831,7 @@
 	macb_or_gem_writel(bp, HRT, mc_filter[1]);
 }
 
-/*
- * Enable/Disable promiscuous and multicast modes.
- */
+/* Enable/Disable promiscuous and multicast modes. */
 static void macb_set_rx_mode(struct net_device *dev)
 {
 	unsigned long cfg;
@@ -2122,9 +2148,8 @@
 
 	if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
 		regs_buff[12] = macb_or_gem_readl(bp, USRIO);
-	if (macb_is_gem(bp)) {
+	if (macb_is_gem(bp))
 		regs_buff[13] = gem_readl(bp, DMACFG);
-	}
 }
 
 static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -2247,11 +2272,11 @@
 	.ndo_set_features	= macb_set_features,
 };
 
-/*
- * Configure peripheral capabilities according to device tree
+/* Configure peripheral capabilities according to device tree
  * and integration options used
  */
-static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_conf)
+static void macb_configure_caps(struct macb *bp,
+				const struct macb_config *dt_conf)
 {
 	u32 dcfg;
 
@@ -2949,7 +2974,7 @@
 
 	mac = of_get_mac_address(np);
 	if (mac)
-		memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
+		ether_addr_copy(bp->dev->dev_addr, mac);
 	else
 		macb_get_hwaddr(bp);
 
@@ -2957,9 +2982,11 @@
 	phy_node =  of_get_next_available_child(np, NULL);
 	if (phy_node) {
 		int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
-		if (gpio_is_valid(gpio))
+
+		if (gpio_is_valid(gpio)) {
 			bp->reset_gpio = gpio_to_desc(gpio);
-		gpiod_direction_output(bp->reset_gpio, 1);
+			gpiod_direction_output(bp->reset_gpio, 1);
+		}
 	}
 	of_node_put(phy_node);
 
@@ -2979,29 +3006,36 @@
 	if (err)
 		goto err_out_free_netdev;
 
+	err = macb_mii_init(bp);
+	if (err)
+		goto err_out_free_netdev;
+
+	phydev = bp->phy_dev;
+
+	netif_carrier_off(dev);
+
 	err = register_netdev(dev);
 	if (err) {
 		dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
-		goto err_out_unregister_netdev;
+		goto err_out_unregister_mdio;
 	}
 
-	err = macb_mii_init(bp);
-	if (err)
-		goto err_out_unregister_netdev;
-
-	netif_carrier_off(dev);
+	phy_attached_info(phydev);
 
 	netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
 		    macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
 		    dev->base_addr, dev->irq, dev->dev_addr);
 
-	phydev = bp->phy_dev;
-	phy_attached_info(phydev);
-
 	return 0;
 
-err_out_unregister_netdev:
-	unregister_netdev(dev);
+err_out_unregister_mdio:
+	phy_disconnect(bp->phy_dev);
+	mdiobus_unregister(bp->mii_bus);
+	mdiobus_free(bp->mii_bus);
+
+	/* Shutdown the PHY if there is a GPIO reset */
+	if (bp->reset_gpio)
+		gpiod_set_value(bp->reset_gpio, 0);
 
 err_out_free_netdev:
 	free_netdev(dev);
@@ -3029,7 +3063,8 @@
 		mdiobus_free(bp->mii_bus);
 
 		/* Shutdown the PHY if there is a GPIO reset */
-		gpiod_set_value(bp->reset_gpio, 0);
+		if (bp->reset_gpio)
+			gpiod_set_value(bp->reset_gpio, 0);
 
 		unregister_netdev(dev);
 		clk_disable_unprepare(bp->tx_clk);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 34d269c..8de79ae 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -2899,7 +2899,7 @@
 	if (status == IQ_SEND_STOP)
 		stop_q(lio->netdev, q_idx);
 
-	netdev->trans_start = jiffies;
+	netif_trans_update(netdev);
 
 	stats->tx_done++;
 	stats->tx_tot_bytes += skb->len;
@@ -2928,7 +2928,7 @@
 	netif_info(lio, tx_err, lio->netdev,
 		   "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
 		   netdev->stats.tx_dropped);
-	netdev->trans_start = jiffies;
+	netif_trans_update(netdev);
 	txqs_wake(netdev);
 }
 
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index c177c7c..388cd79 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1320,7 +1320,7 @@
 	/* Ring the bell.  */
 	cvmx_write_csr(p->mix + MIX_ORING2, 1);
 
-	netdev->trans_start = jiffies;
+	netif_trans_update(netdev);
 	rv = NETDEV_TX_OK;
 out:
 	octeon_mgmt_update_tx_stats(netdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index bfee298..a19e73f 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1442,7 +1442,7 @@
 
 	nicvf_stop(nic->netdev);
 	nicvf_open(nic->netdev);
-	nic->netdev->trans_start = jiffies;
+	netif_trans_update(nic->netdev);
 }
 
 static int nicvf_config_loopback(struct nicvf *nic,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index fa05e34..06b819d 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -533,6 +533,7 @@
 		nicvf_config_vlan_stripping(nic, nic->netdev->features);
 
 	/* Enable Receive queue */
+	memset(&rq_cfg, 0, sizeof(struct rq_cfg));
 	rq_cfg.ena = 1;
 	rq_cfg.tcp_ena = 0;
 	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
@@ -565,6 +566,7 @@
 			      qidx, (u64)(cq->dmem.phys_base));
 
 	/* Enable Completion queue */
+	memset(&cq_cfg, 0, sizeof(struct cq_cfg));
 	cq_cfg.ena = 1;
 	cq_cfg.reset = 0;
 	cq_cfg.caching = 0;
@@ -613,6 +615,7 @@
 			      qidx, (u64)(sq->dmem.phys_base));
 
 	/* Enable send queue  & set queue size */
+	memset(&sq_cfg, 0, sizeof(struct sq_cfg));
 	sq_cfg.ena = 1;
 	sq_cfg.reset = 0;
 	sq_cfg.ldwb = 0;
@@ -649,6 +652,7 @@
 
 	/* Enable RBDR  & set queue size */
 	/* Buffer size should be in multiples of 128 bytes */
+	memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg));
 	rbdr_cfg.ena = 1;
 	rbdr_cfg.reset = 0;
 	rbdr_cfg.ldwb = 0;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 9679515..d20539a 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1011,10 +1011,11 @@
 		}
 
 		lmac++;
-		if (lmac == MAX_LMAC_PER_BGX)
+		if (lmac == MAX_LMAC_PER_BGX) {
+			of_node_put(node);
 			break;
+		}
 	}
-	of_node_put(node);
 	return 0;
 
 defer:
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 526ea74..86f467a 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1664,8 +1664,7 @@
 	struct cmdQ *q = &sge->cmdQ[qid];
 	unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
 
-	if (!spin_trylock(&q->lock))
-		return NETDEV_TX_LOCKED;
+	spin_lock(&q->lock);
 
 	reclaim_completed_tx(sge, q);
 
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 60908ea..43da891 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -576,7 +576,7 @@
 	unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
 	unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
 	u8 cpus[SGE_QSETS + 1];
-	u16 rspq_map[RSS_TABLE_SIZE];
+	u16 rspq_map[RSS_TABLE_SIZE + 1];
 
 	for (i = 0; i < SGE_QSETS; ++i)
 		cpus[i] = i;
@@ -586,6 +586,7 @@
 		rspq_map[i] = i % nq0;
 		rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
 	}
+	rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
 
 	t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
 		      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 984a3cc..b4fceb9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -324,7 +324,9 @@
 	unsigned int sf_fw_start;         /* start of FW image in flash */
 
 	unsigned int fw_vers;
+	unsigned int bs_vers;		/* bootstrap version */
 	unsigned int tp_vers;
+	unsigned int er_vers;		/* expansion ROM version */
 	u8 api_vers[7];
 
 	unsigned short mtus[NMTUS];
@@ -357,6 +359,34 @@
 	unsigned int idma_warn[2];	/* time to warning in HZ */
 };
 
+/* Firmware Mailbox Command/Reply log.  All values are in Host-Endian format.
+ * The access and execute times are signed in order to accommodate negative
+ * error returns.
+ */
+struct mbox_cmd {
+	u64 cmd[MBOX_LEN / 8];		/* a Firmware Mailbox Command/Reply */
+	u64 timestamp;			/* OS-dependent timestamp */
+	u32 seqno;			/* sequence number */
+	s16 access;			/* time (ms) to access mailbox */
+	s16 execute;			/* time (ms) to execute */
+};
+
+struct mbox_cmd_log {
+	unsigned int size;		/* number of entries in the log */
+	unsigned int cursor;		/* next position in the log to write */
+	u32 seqno;			/* next sequence number */
+	/* variable length mailbox command log starts here */
+};
+
+/* Given a pointer to a Firmware Mailbox Command Log and a log entry index,
+ * return a pointer to the specified entry.
+ */
+static inline struct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log,
+						  unsigned int entry_idx)
+{
+	return &((struct mbox_cmd *)&(log)[1])[entry_idx];
+}
+
 #include "t4fw_api.h"
 
 #define FW_VERSION(chip) ( \
@@ -394,6 +424,7 @@
 	unsigned char  fc;               /* actual link flow control */
 	unsigned char  autoneg;          /* autonegotiating? */
 	unsigned char  link_ok;          /* link up? */
+	unsigned char  link_down_rc;     /* link down reason */
 };
 
 #define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16)
@@ -731,6 +762,7 @@
 	u32 t4_bar0;
 	struct pci_dev *pdev;
 	struct device *pdev_dev;
+	const char *name;
 	unsigned int mbox;
 	unsigned int pf;
 	unsigned int flags;
@@ -776,6 +808,10 @@
 	struct work_struct db_drop_task;
 	bool tid_release_task_busy;
 
+	/* support for mailbox command/reply logging */
+#define T4_OS_LOG_MBOX_CMDS 256
+	struct mbox_cmd_log *mbox_log;
+
 	struct dentry *debugfs_root;
 	bool use_bd;     /* Use SGE Back Door intfc for reading SGE Contexts */
 	bool trace_rss;	/* 1 implies that different RSS flit per filter is
@@ -1306,6 +1342,7 @@
 unsigned int t4_flash_cfg_addr(struct adapter *adapter);
 int t4_check_fw_version(struct adapter *adap);
 int t4_get_fw_version(struct adapter *adapter, u32 *vers);
+int t4_get_bs_version(struct adapter *adapter, u32 *vers);
 int t4_get_tp_version(struct adapter *adapter, u32 *vers);
 int t4_get_exprom_version(struct adapter *adapter, u32 *vers);
 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
@@ -1329,6 +1366,8 @@
 int t4_init_tp_params(struct adapter *adap);
 int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
 int t4_init_rss_mode(struct adapter *adap, int mbox);
+int t4_init_portinfo(struct port_info *pi, int mbox,
+		     int port, int pf, int vf, u8 mac[]);
 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
 void t4_fatal_err(struct adapter *adapter);
 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
@@ -1451,6 +1490,9 @@
 	       unsigned int mmd, unsigned int reg, u16 *valp);
 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
 	       unsigned int mmd, unsigned int reg, u16 val);
+int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
+	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
+	       unsigned int fl0id, unsigned int fl1id);
 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
 	       unsigned int fl0id, unsigned int fl1id);
@@ -1461,6 +1503,7 @@
 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
 		    unsigned int vf, unsigned int eqid);
 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
+void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl);
 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
 void t4_db_full(struct adapter *adapter);
 void t4_db_dropped(struct adapter *adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 052c660..6ee2ed3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -253,7 +253,7 @@
 {
 	const union fw_port_dcb *fwdcb = &pcmd->u.dcb;
 	int port = FW_PORT_CMD_PORTID_G(be32_to_cpu(pcmd->op_to_portid));
-	struct net_device *dev = adap->port[port];
+	struct net_device *dev = adap->port[adap->chan_map[port]];
 	struct port_info *pi = netdev_priv(dev);
 	struct port_dcb_info *dcb = &pi->dcb;
 	int dcb_type = pcmd->u.dcb.pgid.type;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 0bb41e9..91fb508 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -1152,6 +1152,104 @@
 	.release = seq_release_private
 };
 
+/* Show Firmware Mailbox Command/Reply Log
+ *
+ * Note that we don't do any locking when dumping the Firmware Mailbox Log so
+ * it's possible that we can catch things during a log update and therefore
+ * see partially corrupted log entries.  But it's probably Good Enough(tm).
+ * If we ever decide that we want to make sure that we're dumping a coherent
+ * log, we'd need to perform locking in the mailbox logging and in
+ * mboxlog_open() where we'd need to grab the entire mailbox log in one go
+ * like we do for the Firmware Device Log.
+ */
+static int mboxlog_show(struct seq_file *seq, void *v)
+{
+	struct adapter *adapter = seq->private;
+	struct mbox_cmd_log *log = adapter->mbox_log;
+	struct mbox_cmd *entry;
+	int entry_idx, i;
+
+	if (v == SEQ_START_TOKEN) {
+		seq_printf(seq,
+			   "%10s  %15s  %5s  %5s  %s\n",
+			   "Seq#", "Tstamp", "Atime", "Etime",
+			   "Command/Reply");
+		return 0;
+	}
+
+	entry_idx = log->cursor + ((uintptr_t)v - 2);
+	if (entry_idx >= log->size)
+		entry_idx -= log->size;
+	entry = mbox_cmd_log_entry(log, entry_idx);
+
+	/* skip over unused entries */
+	if (entry->timestamp == 0)
+		return 0;
+
+	seq_printf(seq, "%10u  %15llu  %5d  %5d",
+		   entry->seqno, entry->timestamp,
+		   entry->access, entry->execute);
+	for (i = 0; i < MBOX_LEN / 8; i++) {
+		u64 flit = entry->cmd[i];
+		u32 hi = (u32)(flit >> 32);
+		u32 lo = (u32)flit;
+
+		seq_printf(seq, "  %08x %08x", hi, lo);
+	}
+	seq_puts(seq, "\n");
+	return 0;
+}
+
+static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos)
+{
+	struct adapter *adapter = seq->private;
+	struct mbox_cmd_log *log = adapter->mbox_log;
+
+	return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL);
+}
+
+static void *mboxlog_start(struct seq_file *seq, loff_t *pos)
+{
+	return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN;
+}
+
+static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	++*pos;
+	return mboxlog_get_idx(seq, *pos);
+}
+
+static void mboxlog_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations mboxlog_seq_ops = {
+	.start = mboxlog_start,
+	.next  = mboxlog_next,
+	.stop  = mboxlog_stop,
+	.show  = mboxlog_show
+};
+
+static int mboxlog_open(struct inode *inode, struct file *file)
+{
+	int res = seq_open(file, &mboxlog_seq_ops);
+
+	if (!res) {
+		struct seq_file *seq = file->private_data;
+
+		seq->private = inode->i_private;
+	}
+	return res;
+}
+
+static const struct file_operations mboxlog_fops = {
+	.owner   = THIS_MODULE,
+	.open    = mboxlog_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
 static int mbox_show(struct seq_file *seq, void *v)
 {
 	static const char * const owner[] = { "none", "FW", "driver",
@@ -1572,6 +1670,7 @@
 	.owner   = THIS_MODULE,
 	.open    = mem_open,
 	.read    = flash_read,
+	.llseek  = default_llseek,
 };
 
 static inline void tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
@@ -3128,6 +3227,7 @@
 		{ "cim_qcfg", &cim_qcfg_fops, S_IRUSR, 0 },
 		{ "clk", &clk_debugfs_fops, S_IRUSR, 0 },
 		{ "devlog", &devlog_fops, S_IRUSR, 0 },
+		{ "mboxlog", &mboxlog_fops, S_IRUSR, 0 },
 		{ "mbox0", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 0 },
 		{ "mbox1", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 1 },
 		{ "mbox2", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 2 },
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index d1e3f09..477db47 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -168,7 +168,8 @@
 static int dflt_msg_enable = DFLT_MSG_ENABLE;
 
 module_param(dflt_msg_enable, int, 0644);
-MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
+MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap, "
+		 "deprecated parameter");
 
 /*
  * The driver uses the best interrupt scheme available on a platform in the
@@ -303,6 +304,22 @@
 }
 #endif /* CONFIG_CHELSIO_T4_DCB */
 
+int cxgb4_dcb_enabled(const struct net_device *dev)
+{
+#ifdef CONFIG_CHELSIO_T4_DCB
+	struct port_info *pi = netdev_priv(dev);
+
+	if (!pi->dcb.enabled)
+		return 0;
+
+	return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
+		(pi->dcb.state == CXGB4_DCB_STATE_HOST));
+#else
+	return 0;
+#endif
+}
+EXPORT_SYMBOL(cxgb4_dcb_enabled);
+
 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
 {
 	struct net_device *dev = adapter->port[port_id];
@@ -313,8 +330,10 @@
 			netif_carrier_on(dev);
 		else {
 #ifdef CONFIG_CHELSIO_T4_DCB
-			cxgb4_dcb_state_init(dev);
-			dcb_tx_queue_prio_enable(dev, false);
+			if (cxgb4_dcb_enabled(dev)) {
+				cxgb4_dcb_state_init(dev);
+				dcb_tx_queue_prio_enable(dev, false);
+			}
 #endif /* CONFIG_CHELSIO_T4_DCB */
 			netif_carrier_off(dev);
 		}
@@ -336,6 +355,17 @@
 		netdev_info(dev, "port module unplugged\n");
 	else if (pi->mod_type < ARRAY_SIZE(mod_str))
 		netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
+	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
+		netdev_info(dev, "%s: unsupported port module inserted\n",
+			    dev->name);
+	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
+		netdev_info(dev, "%s: unknown port module inserted\n",
+			    dev->name);
+	else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
+		netdev_info(dev, "%s: transceiver module error\n", dev->name);
+	else
+		netdev_info(dev, "%s: unknown module type %d inserted\n",
+			    dev->name, pi->mod_type);
 }
 
 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
@@ -482,28 +512,12 @@
 	return ret;
 }
 
-int cxgb4_dcb_enabled(const struct net_device *dev)
-{
-#ifdef CONFIG_CHELSIO_T4_DCB
-	struct port_info *pi = netdev_priv(dev);
-
-	if (!pi->dcb.enabled)
-		return 0;
-
-	return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
-		(pi->dcb.state == CXGB4_DCB_STATE_HOST));
-#else
-	return 0;
-#endif
-}
-EXPORT_SYMBOL(cxgb4_dcb_enabled);
-
 #ifdef CONFIG_CHELSIO_T4_DCB
 /* Handle a Data Center Bridging update message from the firmware. */
 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
 {
 	int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
-	struct net_device *dev = adap->port[port];
+	struct net_device *dev = adap->port[adap->chan_map[port]];
 	int old_dcb_enabled = cxgb4_dcb_enabled(dev);
 	int new_dcb_enabled;
 
@@ -633,7 +647,8 @@
 		    action == FW_PORT_ACTION_GET_PORT_INFO) {
 			int port = FW_PORT_CMD_PORTID_G(
 					be32_to_cpu(pcmd->op_to_portid));
-			struct net_device *dev = q->adap->port[port];
+			struct net_device *dev =
+				q->adap->port[q->adap->chan_map[port]];
 			int state_input = ((pcmd->u.info.dcbxdis_pkd &
 					    FW_PORT_CMD_DCBXDIS_F)
 					   ? CXGB4_DCB_INPUT_FW_DISABLED
@@ -3737,7 +3752,10 @@
 	 * is excessively mismatched relative to the driver.)
 	 */
 	t4_get_fw_version(adap, &adap->params.fw_vers);
+	t4_get_bs_version(adap, &adap->params.bs_vers);
 	t4_get_tp_version(adap, &adap->params.tp_vers);
+	t4_get_exprom_version(adap, &adap->params.er_vers);
+
 	ret = t4_check_fw_version(adap);
 	/* If firmware is too old (not supported by driver) force an update. */
 	if (ret)
@@ -4651,6 +4669,68 @@
 			 "suggested for optimal performance.\n");
 }
 
+/* Dump basic information about the adapter */
+static void print_adapter_info(struct adapter *adapter)
+{
+	/* Device information */
+	dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n",
+		 adapter->params.vpd.id,
+		 CHELSIO_CHIP_RELEASE(adapter->params.chip));
+	dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n",
+		 adapter->params.vpd.sn, adapter->params.vpd.pn);
+
+	/* Firmware Version */
+	if (!adapter->params.fw_vers)
+		dev_warn(adapter->pdev_dev, "No firmware loaded\n");
+	else
+		dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n",
+			 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
+			 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
+			 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
+			 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers));
+
+	/* Bootstrap Firmware Version. (Some adapters don't have Bootstrap
+	 * Firmware, so dev_info() is more appropriate here.)
+	 */
+	if (!adapter->params.bs_vers)
+		dev_info(adapter->pdev_dev, "No bootstrap loaded\n");
+	else
+		dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n",
+			 FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers),
+			 FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers),
+			 FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers),
+			 FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers));
+
+	/* TP Microcode Version */
+	if (!adapter->params.tp_vers)
+		dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n");
+	else
+		dev_info(adapter->pdev_dev,
+			 "TP Microcode version: %u.%u.%u.%u\n",
+			 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
+			 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
+			 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
+			 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
+
+	/* Expansion ROM version */
+	if (!adapter->params.er_vers)
+		dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n");
+	else
+		dev_info(adapter->pdev_dev,
+			 "Expansion ROM version: %u.%u.%u.%u\n",
+			 FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers),
+			 FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers),
+			 FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers),
+			 FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers));
+
+	/* Software/Hardware configuration */
+	dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
+		 is_offload(adapter) ? "R" : "",
+		 ((adapter->flags & USING_MSIX) ? "MSI-X" :
+		  (adapter->flags & USING_MSI) ? "MSI" : ""),
+		 is_offload(adapter) ? "Offload" : "non-Offload");
+}
+
 static void print_port_info(const struct net_device *dev)
 {
 	char buf[80];
@@ -4678,14 +4758,8 @@
 		--bufp;
 	sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
 
-	netdev_info(dev, "Chelsio %s rev %d %s %sNIC %s\n",
-		    adap->params.vpd.id,
-		    CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
-		    is_offload(adap) ? "R" : "",
-		    (adap->flags & USING_MSIX) ? " MSI-X" :
-		    (adap->flags & USING_MSI) ? " MSI" : "");
-	netdev_info(dev, "S/N: %s, P/N: %s\n",
-		    adap->params.vpd.sn, adap->params.vpd.pn);
+	netdev_info(dev, "%s: Chelsio %s (%s) %s\n",
+		    dev->name, adap->params.vpd.id, adap->name, buf);
 }
 
 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
@@ -4837,12 +4911,23 @@
 		goto out_free_adapter;
 	}
 
+	adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
+				    (sizeof(struct mbox_cmd) *
+				     T4_OS_LOG_MBOX_CMDS),
+				    GFP_KERNEL);
+	if (!adapter->mbox_log) {
+		err = -ENOMEM;
+		goto out_free_adapter;
+	}
+	adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
+
 	/* PCI device has been enabled */
 	adapter->flags |= DEV_ENABLED;
 
 	adapter->regs = regs;
 	adapter->pdev = pdev;
 	adapter->pdev_dev = &pdev->dev;
+	adapter->name = pci_name(pdev);
 	adapter->mbox = func;
 	adapter->pf = func;
 	adapter->msg_enable = dflt_msg_enable;
@@ -5073,6 +5158,8 @@
 	if (is_offload(adapter))
 		attach_ulds(adapter);
 
+	print_adapter_info(adapter);
+
 sriov:
 #ifdef CONFIG_PCI_IOV
 	if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
@@ -5092,6 +5179,7 @@
 	if (adapter->workq)
 		destroy_workqueue(adapter->workq);
 
+	kfree(adapter->mbox_log);
 	kfree(adapter);
  out_unmap_bar0:
 	iounmap(regs);
@@ -5158,6 +5246,7 @@
 			adapter->flags &= ~DEV_ENABLED;
 		}
 		pci_release_regions(pdev);
+		kfree(adapter->mbox_log);
 		synchronize_rcu();
 		kfree(adapter);
 	} else
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 13b144b..bad253b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2981,18 +2981,34 @@
 void t4_free_sge_resources(struct adapter *adap)
 {
 	int i;
-	struct sge_eth_rxq *eq = adap->sge.ethrxq;
-	struct sge_eth_txq *etq = adap->sge.ethtxq;
+	struct sge_eth_rxq *eq;
+	struct sge_eth_txq *etq;
+
+	/* stop all Rx queues in order to start them draining */
+	for (i = 0; i < adap->sge.ethqsets; i++) {
+		eq = &adap->sge.ethrxq[i];
+		if (eq->rspq.desc)
+			t4_iq_stop(adap, adap->mbox, adap->pf, 0,
+				   FW_IQ_TYPE_FL_INT_CAP,
+				   eq->rspq.cntxt_id,
+				   eq->fl.size ? eq->fl.cntxt_id : 0xffff,
+				   0xffff);
+	}
 
 	/* clean up Ethernet Tx/Rx queues */
-	for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
+	for (i = 0; i < adap->sge.ethqsets; i++) {
+		eq = &adap->sge.ethrxq[i];
 		if (eq->rspq.desc)
 			free_rspq_fl(adap, &eq->rspq,
 				     eq->fl.size ? &eq->fl : NULL);
+
+		etq = &adap->sge.ethtxq[i];
 		if (etq->q.desc) {
 			t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
 				       etq->q.cntxt_id);
+			__netif_tx_lock_bh(etq->txq);
 			free_tx_desc(adap, &etq->q, etq->q.in_use, true);
+			__netif_tx_unlock_bh(etq->txq);
 			kfree(etq->q.sdesc);
 			free_txq(adap, &etq->q);
 		}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index cc1736b..a63addb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -224,18 +224,34 @@
 		  be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
 }
 
-static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
+/**
+ *	t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
+ *	@adapter: the adapter
+ *	@cmd: the Firmware Mailbox Command or Reply
+ *	@size: command length in bytes
+ *	@access: the time (ms) needed to access the Firmware Mailbox
+ *	@execute: the time (ms) the command spent being executed
+ */
+static void t4_record_mbox(struct adapter *adapter,
+			   const __be64 *cmd, unsigned int size,
+			   int access, int execute)
 {
-	dev_err(adap->pdev_dev,
-		"mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
-		(unsigned long long)t4_read_reg64(adap, data_reg),
-		(unsigned long long)t4_read_reg64(adap, data_reg + 8),
-		(unsigned long long)t4_read_reg64(adap, data_reg + 16),
-		(unsigned long long)t4_read_reg64(adap, data_reg + 24),
-		(unsigned long long)t4_read_reg64(adap, data_reg + 32),
-		(unsigned long long)t4_read_reg64(adap, data_reg + 40),
-		(unsigned long long)t4_read_reg64(adap, data_reg + 48),
-		(unsigned long long)t4_read_reg64(adap, data_reg + 56));
+	struct mbox_cmd_log *log = adapter->mbox_log;
+	struct mbox_cmd *entry;
+	int i;
+
+	entry = mbox_cmd_log_entry(log, log->cursor++);
+	if (log->cursor == log->size)
+		log->cursor = 0;
+
+	for (i = 0; i < size / 8; i++)
+		entry->cmd[i] = be64_to_cpu(cmd[i]);
+	while (i < MBOX_LEN / 8)
+		entry->cmd[i++] = 0;
+	entry->timestamp = jiffies;
+	entry->seqno = log->seqno++;
+	entry->access = access;
+	entry->execute = execute;
 }
 
 /**
@@ -268,12 +284,16 @@
 		1, 1, 3, 5, 10, 10, 20, 50, 100, 200
 	};
 
+	u16 access = 0;
+	u16 execute = 0;
 	u32 v;
 	u64 res;
-	int i, ms, delay_idx;
+	int i, ms, delay_idx, ret;
 	const __be64 *p = cmd;
 	u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
 	u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
+	__be64 cmd_rpl[MBOX_LEN / 8];
+	u32 pcie_fw;
 
 	if ((size & 15) || size > MBOX_LEN)
 		return -EINVAL;
@@ -285,13 +305,24 @@
 	if (adap->pdev->error_state != pci_channel_io_normal)
 		return -EIO;
 
+	/* If we have a negative timeout, that implies that we can't sleep. */
+	if (timeout < 0) {
+		sleep_ok = false;
+		timeout = -timeout;
+	}
+
 	v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
 	for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
 		v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
 
-	if (v != MBOX_OWNER_DRV)
-		return v ? -EBUSY : -ETIMEDOUT;
+	if (v != MBOX_OWNER_DRV) {
+		ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
+		t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
+		return ret;
+	}
 
+	/* Copy in the new mailbox command and send it on its way ... */
+	t4_record_mbox(adap, cmd, MBOX_LEN, access, 0);
 	for (i = 0; i < size; i += 8)
 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
 
@@ -301,7 +332,10 @@
 	delay_idx = 0;
 	ms = delay[0];
 
-	for (i = 0; i < timeout; i += ms) {
+	for (i = 0;
+	     !((pcie_fw = t4_read_reg(adap, PCIE_FW_A)) & PCIE_FW_ERR_F) &&
+	     i < timeout;
+	     i += ms) {
 		if (sleep_ok) {
 			ms = delay[delay_idx];  /* last element may repeat */
 			if (delay_idx < ARRAY_SIZE(delay) - 1)
@@ -317,26 +351,31 @@
 				continue;
 			}
 
-			res = t4_read_reg64(adap, data_reg);
+			get_mbox_rpl(adap, cmd_rpl, MBOX_LEN / 8, data_reg);
+			res = be64_to_cpu(cmd_rpl[0]);
+
 			if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
 				fw_asrt(adap, data_reg);
 				res = FW_CMD_RETVAL_V(EIO);
 			} else if (rpl) {
-				get_mbox_rpl(adap, rpl, size / 8, data_reg);
+				memcpy(rpl, cmd_rpl, size);
 			}
 
-			if (FW_CMD_RETVAL_G((int)res))
-				dump_mbox(adap, mbox, data_reg);
 			t4_write_reg(adap, ctl_reg, 0);
+
+			execute = i + ms;
+			t4_record_mbox(adap, cmd_rpl,
+				       MBOX_LEN, access, execute);
 			return -FW_CMD_RETVAL_G((int)res);
 		}
 	}
 
-	dump_mbox(adap, mbox, data_reg);
+	ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
+	t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
 	dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
 		*(const u8 *)cmd, mbox);
 	t4_report_fw_error(adap);
-	return -ETIMEDOUT;
+	return ret;
 }
 
 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
@@ -2557,6 +2596,7 @@
 }
 
 #define EEPROM_STAT_ADDR   0x7bfc
+#define VPD_SIZE           0x800
 #define VPD_BASE           0x400
 #define VPD_BASE_OLD       0
 #define VPD_LEN            1024
@@ -2594,6 +2634,15 @@
 	if (!vpd)
 		return -ENOMEM;
 
+	/* We have two VPD data structures stored in the adapter VPD area.
+	 * By default, Linux calculates the size of the VPD area by traversing
+	 * the first VPD area at offset 0x0, so we need to tell the OS what
+	 * our real VPD size is.
+	 */
+	ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE);
+	if (ret < 0)
+		goto out;
+
 	/* Card information normally starts at VPD_BASE but early cards had
 	 * it at 0.
 	 */
@@ -2927,6 +2976,20 @@
 }
 
 /**
+ *	t4_get_bs_version - read the firmware bootstrap version
+ *	@adapter: the adapter
+ *	@vers: where to place the version
+ *
+ *	Reads the FW Bootstrap version from flash.
+ */
+int t4_get_bs_version(struct adapter *adapter, u32 *vers)
+{
+	return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
+			     offsetof(struct fw_hdr, fw_ver), 1,
+			     vers, 0);
+}
+
+/**
  *	t4_get_tp_version - read the TP microcode version
  *	@adapter: the adapter
  *	@vers: where to place the version
@@ -6940,6 +7003,39 @@
 }
 
 /**
+ *	t4_iq_stop - stop an ingress queue and its FLs
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@pf: the PF owning the queues
+ *	@vf: the VF owning the queues
+ *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
+ *	@iqid: ingress queue id
+ *	@fl0id: FL0 queue id or 0xffff if no attached FL0
+ *	@fl1id: FL1 queue id or 0xffff if no attached FL1
+ *
+ *	Stops an ingress queue and its associated FLs, if any.  This causes
+ *	any current or future data/messages destined for these queues to be
+ *	tossed.
+ */
+int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
+	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
+	       unsigned int fl0id, unsigned int fl1id)
+{
+	struct fw_iq_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
+				  FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
+				  FW_IQ_CMD_VFN_V(vf));
+	c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
+	c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
+	c.iqid = cpu_to_be16(iqid);
+	c.fl0id = cpu_to_be16(fl0id);
+	c.fl1id = cpu_to_be16(fl1id);
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
  *	t4_iq_free - free an ingress queue and its FLs
  *	@adap: the adapter
  *	@mbox: mailbox to use for the FW command
@@ -7046,52 +7142,122 @@
 }
 
 /**
- *	t4_handle_fw_rpl - process a FW reply message
+ *	t4_link_down_rc_str - return a string for a Link Down Reason Code
  *	@adap: the adapter
+ *	@link_down_rc: Link Down Reason Code
+ *
+ *	Returns a string representation of the Link Down Reason Code.
+ */
+static const char *t4_link_down_rc_str(unsigned char link_down_rc)
+{
+	static const char * const reason[] = {
+		"Link Down",
+		"Remote Fault",
+		"Auto-negotiation Failure",
+		"Reserved",
+		"Insufficient Airflow",
+		"Unable To Determine Reason",
+		"No RX Signal Detected",
+		"Reserved",
+	};
+
+	if (link_down_rc >= ARRAY_SIZE(reason))
+		return "Bad Reason Code";
+
+	return reason[link_down_rc];
+}
+
+/**
+ *	t4_handle_get_port_info - process a FW reply message
+ *	@pi: the port info
  *	@rpl: start of the FW message
  *
- *	Processes a FW message, such as link state change messages.
+ *	Processes a GET_PORT_INFO FW reply message.
+ */
+void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
+{
+	const struct fw_port_cmd *p = (const void *)rpl;
+	struct adapter *adap = pi->adapter;
+
+	/* link/module state change message */
+	int speed = 0, fc = 0;
+	struct link_config *lc;
+	u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
+	int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
+	u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
+
+	if (stat & FW_PORT_CMD_RXPAUSE_F)
+		fc |= PAUSE_RX;
+	if (stat & FW_PORT_CMD_TXPAUSE_F)
+		fc |= PAUSE_TX;
+	if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
+		speed = 100;
+	else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
+		speed = 1000;
+	else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
+		speed = 10000;
+	else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
+		speed = 40000;
+
+	lc = &pi->link_cfg;
+
+	if (mod != pi->mod_type) {
+		pi->mod_type = mod;
+		t4_os_portmod_changed(adap, pi->port_id);
+	}
+	if (link_ok != lc->link_ok || speed != lc->speed ||
+	    fc != lc->fc) {	/* something changed */
+		if (!link_ok && lc->link_ok) {
+			unsigned char rc = FW_PORT_CMD_LINKDNRC_G(stat);
+
+			lc->link_down_rc = rc;
+			dev_warn(adap->pdev_dev,
+				 "Port %d link down, reason: %s\n",
+				 pi->port_id, t4_link_down_rc_str(rc));
+		}
+		lc->link_ok = link_ok;
+		lc->speed = speed;
+		lc->fc = fc;
+		lc->supported = be16_to_cpu(p->u.info.pcap);
+		t4_os_link_changed(adap, pi->port_id, link_ok);
+	}
+}
+
+/**
+ *      t4_handle_fw_rpl - process a FW reply message
+ *      @adap: the adapter
+ *      @rpl: start of the FW message
+ *
+ *      Processes a FW message, such as link state change messages.
  */
 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
 {
 	u8 opcode = *(const u8 *)rpl;
 
-	if (opcode == FW_PORT_CMD) {    /* link/module state change message */
-		int speed = 0, fc = 0;
-		const struct fw_port_cmd *p = (void *)rpl;
+	/* This might be a port command ... this simplifies the following
+	 * conditionals ...  We can get away with pre-dereferencing
+	 * action_to_len16 because it's in the first 16 bytes and all messages
+	 * will be at least that long.
+	 */
+	const struct fw_port_cmd *p = (const void *)rpl;
+	unsigned int action =
+		FW_PORT_CMD_ACTION_G(be32_to_cpu(p->action_to_len16));
+
+	if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
+		int i;
 		int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
-		int port = adap->chan_map[chan];
-		struct port_info *pi = adap2pinfo(adap, port);
-		struct link_config *lc = &pi->link_cfg;
-		u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
-		int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
-		u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
+		struct port_info *pi = NULL;
 
-		if (stat & FW_PORT_CMD_RXPAUSE_F)
-			fc |= PAUSE_RX;
-		if (stat & FW_PORT_CMD_TXPAUSE_F)
-			fc |= PAUSE_TX;
-		if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
-			speed = 100;
-		else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
-			speed = 1000;
-		else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
-			speed = 10000;
-		else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
-			speed = 40000;
+		for_each_port(adap, i) {
+			pi = adap2pinfo(adap, i);
+			if (pi->tx_chan == chan)
+				break;
+		}
 
-		if (link_ok != lc->link_ok || speed != lc->speed ||
-		    fc != lc->fc) {                    /* something changed */
-			lc->link_ok = link_ok;
-			lc->speed = speed;
-			lc->fc = fc;
-			lc->supported = be16_to_cpu(p->u.info.pcap);
-			t4_os_link_changed(adap, port, link_ok);
-		}
-		if (mod != pi->mod_type) {
-			pi->mod_type = mod;
-			t4_os_portmod_changed(adap, port);
-		}
+		t4_handle_get_port_info(pi, rpl);
+	} else {
+		dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n", opcode);
+		return -EINVAL;
 	}
 	return 0;
 }
@@ -7611,61 +7777,74 @@
 	return 0;
 }
 
+/**
+ *	t4_init_portinfo - allocate a virtual interface amd initialize port_info
+ *	@pi: the port_info
+ *	@mbox: mailbox to use for the FW command
+ *	@port: physical port associated with the VI
+ *	@pf: the PF owning the VI
+ *	@vf: the VF owning the VI
+ *	@mac: the MAC address of the VI
+ *
+ *	Allocates a virtual interface for the given physical port.  If @mac is
+ *	not %NULL it contains the MAC address of the VI as assigned by FW.
+ *	@mac should be large enough to hold an Ethernet address.
+ *	Returns < 0 on error.
+ */
+int t4_init_portinfo(struct port_info *pi, int mbox,
+		     int port, int pf, int vf, u8 mac[])
+{
+	int ret;
+	struct fw_port_cmd c;
+	unsigned int rss_size;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+				     FW_CMD_REQUEST_F | FW_CMD_READ_F |
+				     FW_PORT_CMD_PORTID_V(port));
+	c.action_to_len16 = cpu_to_be32(
+		FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
+		FW_LEN16(c));
+	ret = t4_wr_mbox(pi->adapter, mbox, &c, sizeof(c), &c);
+	if (ret)
+		return ret;
+
+	ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size);
+	if (ret < 0)
+		return ret;
+
+	pi->viid = ret;
+	pi->tx_chan = port;
+	pi->lport = port;
+	pi->rss_size = rss_size;
+
+	ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
+	pi->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
+		FW_PORT_CMD_MDIOADDR_G(ret) : -1;
+	pi->port_type = FW_PORT_CMD_PTYPE_G(ret);
+	pi->mod_type = FW_PORT_MOD_TYPE_NA;
+
+	init_link_config(&pi->link_cfg, be16_to_cpu(c.u.info.pcap));
+	return 0;
+}
+
 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
 {
 	u8 addr[6];
 	int ret, i, j = 0;
-	struct fw_port_cmd c;
-	struct fw_rss_vi_config_cmd rvc;
-
-	memset(&c, 0, sizeof(c));
-	memset(&rvc, 0, sizeof(rvc));
 
 	for_each_port(adap, i) {
-		unsigned int rss_size;
-		struct port_info *p = adap2pinfo(adap, i);
+		struct port_info *pi = adap2pinfo(adap, i);
 
 		while ((adap->params.portvec & (1 << j)) == 0)
 			j++;
 
-		c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
-					     FW_CMD_REQUEST_F | FW_CMD_READ_F |
-					     FW_PORT_CMD_PORTID_V(j));
-		c.action_to_len16 = cpu_to_be32(
-			FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
-			FW_LEN16(c));
-		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+		ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
 		if (ret)
 			return ret;
 
-		ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
-		if (ret < 0)
-			return ret;
-
-		p->viid = ret;
-		p->tx_chan = j;
-		p->lport = j;
-		p->rss_size = rss_size;
 		memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
 		adap->port[i]->dev_port = j;
-
-		ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
-		p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
-			FW_PORT_CMD_MDIOADDR_G(ret) : -1;
-		p->port_type = FW_PORT_CMD_PTYPE_G(ret);
-		p->mod_type = FW_PORT_MOD_TYPE_NA;
-
-		rvc.op_to_viid =
-			cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
-				    FW_CMD_REQUEST_F | FW_CMD_READ_F |
-				    FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
-		rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
-		ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
-		if (ret)
-			return ret;
-		p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
-
-		init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));
 		j++;
 	}
 	return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 2fc60e8..7f59ca4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -220,6 +220,13 @@
 	FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
 	FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
 
+	/* Location of bootstrap firmware image in FLASH.
+	 */
+	FLASH_FWBOOTSTRAP_START_SEC = 27,
+	FLASH_FWBOOTSTRAP_NSECS = 1,
+	FLASH_FWBOOTSTRAP_START = FLASH_START(FLASH_FWBOOTSTRAP_START_SEC),
+	FLASH_FWBOOTSTRAP_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FWBOOTSTRAP_NSECS),
+
 	/*
 	 * iSCSI persistent/crash information.
 	 */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 06bc2d2..a2cdfc1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -166,6 +166,7 @@
 	CH_PCI_ID_TABLE_FENTRY(0x5099),	/* Custom 2x40G QSFP */
 	CH_PCI_ID_TABLE_FENTRY(0x509a),	/* Custom T520-CR */
 	CH_PCI_ID_TABLE_FENTRY(0x509b),	/* Custom T540-CR LOM */
+	CH_PCI_ID_TABLE_FENTRY(0x509c),	/* Custom T520-CR*/
 
 	/* T6 adapters:
 	 */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 7ad6d4e..392d664 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -2510,6 +2510,11 @@
 #define FW_PORT_CMD_PTYPE_G(x)	\
 	(((x) >> FW_PORT_CMD_PTYPE_S) & FW_PORT_CMD_PTYPE_M)
 
+#define FW_PORT_CMD_LINKDNRC_S		5
+#define FW_PORT_CMD_LINKDNRC_M		0x7
+#define FW_PORT_CMD_LINKDNRC_G(x)	\
+	(((x) >> FW_PORT_CMD_LINKDNRC_S) & FW_PORT_CMD_LINKDNRC_M)
+
 #define FW_PORT_CMD_MODTYPE_S		0
 #define FW_PORT_CMD_MODTYPE_M		0x1f
 #define FW_PORT_CMD_MODTYPE_V(x)	((x) << FW_PORT_CMD_MODTYPE_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 4a707c3..734dd77 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -387,6 +387,10 @@
 	/* various locks */
 	spinlock_t stats_lock;
 
+	/* support for mailbox command/reply logging */
+#define T4VF_OS_LOG_MBOX_CMDS 256
+	struct mbox_cmd_log *mbox_log;
+
 	/* list of MAC addresses in MPS Hash */
 	struct list_head mac_hlist;
 };
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 1cc8a7a..04fc6f6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -74,7 +74,8 @@
 
 module_param(dflt_msg_enable, int, 0644);
 MODULE_PARM_DESC(dflt_msg_enable,
-		 "default adapter ethtool message level bitmap");
+		 "default adapter ethtool message level bitmap, "
+		 "deprecated parameter");
 
 /*
  * The driver uses the best interrupt scheme available on a platform in the
@@ -1703,6 +1704,105 @@
  */
 
 /*
+ * Show Firmware Mailbox Command/Reply Log
+ *
+ * Note that we don't do any locking when dumping the Firmware Mailbox Log so
+ * it's possible that we can catch things during a log update and therefore
+ * see partially corrupted log entries.  But i9t's probably Good Enough(tm).
+ * If we ever decide that we want to make sure that we're dumping a coherent
+ * log, we'd need to perform locking in the mailbox logging and in
+ * mboxlog_open() where we'd need to grab the entire mailbox log in one go
+ * like we do for the Firmware Device Log.  But as stated above, meh ...
+ */
+static int mboxlog_show(struct seq_file *seq, void *v)
+{
+	struct adapter *adapter = seq->private;
+	struct mbox_cmd_log *log = adapter->mbox_log;
+	struct mbox_cmd *entry;
+	int entry_idx, i;
+
+	if (v == SEQ_START_TOKEN) {
+		seq_printf(seq,
+			   "%10s  %15s  %5s  %5s  %s\n",
+			   "Seq#", "Tstamp", "Atime", "Etime",
+			   "Command/Reply");
+		return 0;
+	}
+
+	entry_idx = log->cursor + ((uintptr_t)v - 2);
+	if (entry_idx >= log->size)
+		entry_idx -= log->size;
+	entry = mbox_cmd_log_entry(log, entry_idx);
+
+	/* skip over unused entries */
+	if (entry->timestamp == 0)
+		return 0;
+
+	seq_printf(seq, "%10u  %15llu  %5d  %5d",
+		   entry->seqno, entry->timestamp,
+		   entry->access, entry->execute);
+	for (i = 0; i < MBOX_LEN / 8; i++) {
+		u64 flit = entry->cmd[i];
+		u32 hi = (u32)(flit >> 32);
+		u32 lo = (u32)flit;
+
+		seq_printf(seq, "  %08x %08x", hi, lo);
+	}
+	seq_puts(seq, "\n");
+	return 0;
+}
+
+static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos)
+{
+	struct adapter *adapter = seq->private;
+	struct mbox_cmd_log *log = adapter->mbox_log;
+
+	return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL);
+}
+
+static void *mboxlog_start(struct seq_file *seq, loff_t *pos)
+{
+	return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN;
+}
+
+static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	++*pos;
+	return mboxlog_get_idx(seq, *pos);
+}
+
+static void mboxlog_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations mboxlog_seq_ops = {
+	.start = mboxlog_start,
+	.next  = mboxlog_next,
+	.stop  = mboxlog_stop,
+	.show  = mboxlog_show
+};
+
+static int mboxlog_open(struct inode *inode, struct file *file)
+{
+	int res = seq_open(file, &mboxlog_seq_ops);
+
+	if (!res) {
+		struct seq_file *seq = file->private_data;
+
+		seq->private = inode->i_private;
+	}
+	return res;
+}
+
+static const struct file_operations mboxlog_fops = {
+	.owner   = THIS_MODULE,
+	.open    = mboxlog_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+/*
  * Show SGE Queue Set information.  We display QPL Queues Sets per line.
  */
 #define QPL	4
@@ -2121,6 +2221,7 @@
 };
 
 static struct cxgb4vf_debugfs_entry debugfs_files[] = {
+	{ "mboxlog",    S_IRUGO, &mboxlog_fops },
 	{ "sge_qinfo",  S_IRUGO, &sge_qinfo_debugfs_fops },
 	{ "sge_qstats", S_IRUGO, &sge_qstats_proc_fops },
 	{ "resources",  S_IRUGO, &resources_proc_fops },
@@ -2663,6 +2764,16 @@
 	adapter->pdev = pdev;
 	adapter->pdev_dev = &pdev->dev;
 
+	adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
+				    (sizeof(struct mbox_cmd) *
+				     T4VF_OS_LOG_MBOX_CMDS),
+				    GFP_KERNEL);
+	if (!adapter->mbox_log) {
+		err = -ENOMEM;
+		goto err_free_adapter;
+	}
+	adapter->mbox_log->size = T4VF_OS_LOG_MBOX_CMDS;
+
 	/*
 	 * Initialize SMP data synchronization resources.
 	 */
@@ -2912,6 +3023,7 @@
 	iounmap(adapter->regs);
 
 err_free_adapter:
+	kfree(adapter->mbox_log);
 	kfree(adapter);
 
 err_release_regions:
@@ -2981,6 +3093,7 @@
 		iounmap(adapter->regs);
 		if (!is_t4(adapter->params.chip))
 			iounmap(adapter->bar2);
+		kfree(adapter->mbox_log);
 		kfree(adapter);
 	}
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 1ccd282..1bb57d3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1448,7 +1448,7 @@
 	 * the new TX descriptors and return success.
 	 */
 	txq_advance(&txq->q, ndesc);
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	ring_tx_db(adapter, &txq->q, ndesc);
 	return NETDEV_TX_OK;
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 9b40a85..438374a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -36,6 +36,7 @@
 #ifndef __T4VF_COMMON_H__
 #define __T4VF_COMMON_H__
 
+#include "../cxgb4/t4_hw.h"
 #include "../cxgb4/t4fw_api.h"
 
 #define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
@@ -227,6 +228,34 @@
 	u8 nports;			/* # of Ethernet "ports" */
 };
 
+/* Firmware Mailbox Command/Reply log.  All values are in Host-Endian format.
+ * The access and execute times are signed in order to accommodate negative
+ * error returns.
+ */
+struct mbox_cmd {
+	u64 cmd[MBOX_LEN / 8];		/* a Firmware Mailbox Command/Reply */
+	u64 timestamp;			/* OS-dependent timestamp */
+	u32 seqno;			/* sequence number */
+	s16 access;			/* time (ms) to access mailbox */
+	s16 execute;			/* time (ms) to execute */
+};
+
+struct mbox_cmd_log {
+	unsigned int size;		/* number of entries in the log */
+	unsigned int cursor;		/* next position in the log to write */
+	u32 seqno;			/* next sequence number */
+	/* variable length mailbox command log starts here */
+};
+
+/* Given a pointer to a Firmware Mailbox Command Log and a log entry index,
+ * return a pointer to the specified entry.
+ */
+static inline struct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log,
+						  unsigned int entry_idx)
+{
+	return &((struct mbox_cmd *)&(log)[1])[entry_idx];
+}
+
 #include "adapter.h"
 
 #ifndef PCI_VENDOR_ID_CHELSIO
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index fed83d8..955ff7c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -76,21 +76,33 @@
 		*rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data));
 }
 
-/*
- * Dump contents of mailbox with a leading tag.
+/**
+ *	t4vf_record_mbox - record a Firmware Mailbox Command/Reply in the log
+ *	@adapter: the adapter
+ *	@cmd: the Firmware Mailbox Command or Reply
+ *	@size: command length in bytes
+ *	@access: the time (ms) needed to access the Firmware Mailbox
+ *	@execute: the time (ms) the command spent being executed
  */
-static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data)
+static void t4vf_record_mbox(struct adapter *adapter, const __be64 *cmd,
+			     int size, int access, int execute)
 {
-	dev_err(adapter->pdev_dev,
-		"mbox %s: %llx %llx %llx %llx %llx %llx %llx %llx\n", tag,
-		(unsigned long long)t4_read_reg64(adapter, mbox_data +  0),
-		(unsigned long long)t4_read_reg64(adapter, mbox_data +  8),
-		(unsigned long long)t4_read_reg64(adapter, mbox_data + 16),
-		(unsigned long long)t4_read_reg64(adapter, mbox_data + 24),
-		(unsigned long long)t4_read_reg64(adapter, mbox_data + 32),
-		(unsigned long long)t4_read_reg64(adapter, mbox_data + 40),
-		(unsigned long long)t4_read_reg64(adapter, mbox_data + 48),
-		(unsigned long long)t4_read_reg64(adapter, mbox_data + 56));
+	struct mbox_cmd_log *log = adapter->mbox_log;
+	struct mbox_cmd *entry;
+	int i;
+
+	entry = mbox_cmd_log_entry(log, log->cursor++);
+	if (log->cursor == log->size)
+		log->cursor = 0;
+
+	for (i = 0; i < size / 8; i++)
+		entry->cmd[i] = be64_to_cpu(cmd[i]);
+	while (i < MBOX_LEN / 8)
+		entry->cmd[i++] = 0;
+	entry->timestamp = jiffies;
+	entry->seqno = log->seqno++;
+	entry->access = access;
+	entry->execute = execute;
 }
 
 /**
@@ -120,10 +132,13 @@
 		1, 1, 3, 5, 10, 10, 20, 50, 100
 	};
 
+	u16 access = 0, execute = 0;
 	u32 v, mbox_data;
-	int i, ms, delay_idx;
+	int i, ms, delay_idx, ret;
 	const __be64 *p;
 	u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL;
+	u32 cmd_op = FW_CMD_OP_G(be32_to_cpu(((struct fw_cmd_hdr *)cmd)->hi));
+	__be64 cmd_rpl[MBOX_LEN / 8];
 
 	/* In T6, mailbox size is changed to 128 bytes to avoid
 	 * invalidating the entire prefetch buffer.
@@ -148,8 +163,11 @@
 	v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
 	for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
 		v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
-	if (v != MBOX_OWNER_DRV)
-		return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT;
+	if (v != MBOX_OWNER_DRV) {
+		ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
+		t4vf_record_mbox(adapter, cmd, size, access, ret);
+		return ret;
+	}
 
 	/*
 	 * Write the command array into the Mailbox Data register array and
@@ -164,6 +182,8 @@
 	 * Data registers before doing the write to the VF Mailbox Control
 	 * register.
 	 */
+	if (cmd_op != FW_VI_STATS_CMD)
+		t4vf_record_mbox(adapter, cmd, size, access, 0);
 	for (i = 0, p = cmd; i < size; i += 8)
 		t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
 	t4_read_reg(adapter, mbox_data);         /* flush write */
@@ -209,31 +229,33 @@
 			 * We return the (negated) firmware command return
 			 * code (this depends on FW_SUCCESS == 0).
 			 */
+			get_mbox_rpl(adapter, cmd_rpl, size, mbox_data);
 
 			/* return value in low-order little-endian word */
-			v = t4_read_reg(adapter, mbox_data);
-			if (FW_CMD_RETVAL_G(v))
-				dump_mbox(adapter, "FW Error", mbox_data);
+			v = be64_to_cpu(cmd_rpl[0]);
 
 			if (rpl) {
 				/* request bit in high-order BE word */
 				WARN_ON((be32_to_cpu(*(const __be32 *)cmd)
 					 & FW_CMD_REQUEST_F) == 0);
-				get_mbox_rpl(adapter, rpl, size, mbox_data);
+				memcpy(rpl, cmd_rpl, size);
 				WARN_ON((be32_to_cpu(*(__be32 *)rpl)
 					 & FW_CMD_REQUEST_F) != 0);
 			}
 			t4_write_reg(adapter, mbox_ctl,
 				     MBOWNER_V(MBOX_OWNER_NONE));
+			execute = i + ms;
+			if (cmd_op != FW_VI_STATS_CMD)
+				t4vf_record_mbox(adapter, cmd_rpl, size, access,
+						 execute);
 			return -FW_CMD_RETVAL_G(v);
 		}
 	}
 
-	/*
-	 * We timed out.  Return the error ...
-	 */
-	dump_mbox(adapter, "FW Timeout", mbox_data);
-	return -ETIMEDOUT;
+	/* We timed out.  Return the error ... */
+	ret = -ETIMEDOUT;
+	t4vf_record_mbox(adapter, cmd, size, access, ret);
+	return ret;
 }
 
 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index b2182d3..f15560a 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2740,6 +2740,7 @@
 		netdev->hw_features |= NETIF_F_RXCSUM;
 
 	netdev->features |= netdev->hw_features;
+	netdev->vlan_features |= netdev->features;
 
 #ifdef CONFIG_RFS_ACCEL
 	netdev->hw_features |= NETIF_F_NTUPLE;
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 48d9194..1471e16 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -966,7 +966,7 @@
 	/* Init Driver variable */
 	db->tx_pkt_cnt = 0;
 	db->queue_pkt_len = 0;
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 }
 
 /* Our watchdog timed out. Called by the networking layer */
@@ -985,7 +985,7 @@
 	dm9000_init_dm9000(dev);
 	dm9000_unmask_interrupts(db);
 	/* We can accept TX packets again */
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	netif_wake_queue(dev);
 
 	/* Restore previous register address */
@@ -1432,6 +1432,7 @@
 	int reset_gpios;
 	enum of_gpio_flags flags;
 	struct regulator *power;
+	bool inv_mac_addr = false;
 
 	power = devm_regulator_get(dev, "vcc");
 	if (IS_ERR(power)) {
@@ -1686,9 +1687,7 @@
 	}
 
 	if (!is_valid_ether_addr(ndev->dev_addr)) {
-		dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
-			 "set using ifconfig\n", ndev->name);
-
+		inv_mac_addr = true;
 		eth_hw_addr_random(ndev);
 		mac_src = "random";
 	}
@@ -1697,11 +1696,15 @@
 	platform_set_drvdata(pdev, ndev);
 	ret = register_netdev(ndev);
 
-	if (ret == 0)
+	if (ret == 0) {
+		if (inv_mac_addr)
+			dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please set using ip\n",
+				 ndev->name);
 		printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n",
 		       ndev->name, dm9000_type_to_char(db->type),
 		       db->io_addr, db->io_data, ndev->irq,
 		       ndev->dev_addr, mac_src);
+	}
 	return 0;
 
 out:
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 3acde3b..cbe8497 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -1336,7 +1336,7 @@
     }
 
     lp->interrupt = UNMASK_INTERRUPTS;
-    dev->trans_start = jiffies; /* prevent tx timeout */
+    netif_trans_update(dev); /* prevent tx timeout */
 
     START_DE4X5;
 
@@ -1465,7 +1465,7 @@
 
     netif_stop_queue(dev);
     if (!lp->tx_enable)                   /* Cannot send for now */
-	return NETDEV_TX_LOCKED;
+		goto tx_err;
 
     /*
     ** Clean out the TX ring asynchronously to interrupts - sometimes the
@@ -1478,7 +1478,7 @@
 
     /* Test if cache is already locked - requeue skb if so */
     if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
-	return NETDEV_TX_LOCKED;
+		goto tx_err;
 
     /* Transmit descriptor ring full or stale skb */
     if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) {
@@ -1519,6 +1519,9 @@
     lp->cache.lock = 0;
 
     return NETDEV_TX_OK;
+tx_err:
+	dev_kfree_skb_any(skb);
+	return NETDEV_TX_OK;
 }
 
 /*
@@ -1932,7 +1935,7 @@
 
 	    lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
 	    outl(POLL_DEMAND, DE4X5_TPD);       /* Start the TX */
-	    dev->trans_start = jiffies; /* prevent tx timeout */
+	    netif_trans_update(dev); /* prevent tx timeout */
 	}
     }
 }
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index afd8e78..8ed0fd8 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -192,9 +192,6 @@
 	(__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \
 	(pci_dev)->revision))
 
-/* Sten Check */
-#define DEVICE net_device
-
 /* Structure/enum declaration ------------------------------- */
 struct tx_desc {
         __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
@@ -313,10 +310,10 @@
 
 
 /* function declaration ------------------------------------- */
-static int dmfe_open(struct DEVICE *);
-static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
-static int dmfe_stop(struct DEVICE *);
-static void dmfe_set_filter_mode(struct DEVICE *);
+static int dmfe_open(struct net_device *);
+static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct net_device *);
+static int dmfe_stop(struct net_device *);
+static void dmfe_set_filter_mode(struct net_device *);
 static const struct ethtool_ops netdev_ethtool_ops;
 static u16 read_srom_word(void __iomem *, int);
 static irqreturn_t dmfe_interrupt(int , void *);
@@ -326,8 +323,8 @@
 static void dmfe_descriptor_init(struct net_device *);
 static void allocate_rx_buffer(struct net_device *);
 static void update_cr6(u32, void __iomem *);
-static void send_filter_frame(struct DEVICE *);
-static void dm9132_id_table(struct DEVICE *);
+static void send_filter_frame(struct net_device *);
+static void dm9132_id_table(struct net_device *);
 static u16 dmfe_phy_read(void __iomem *, u8, u8, u32);
 static void dmfe_phy_write(void __iomem *, u8, u8, u16, u32);
 static void dmfe_phy_write_1bit(void __iomem *, u32);
@@ -336,12 +333,12 @@
 static void dmfe_process_mode(struct dmfe_board_info *);
 static void dmfe_timer(unsigned long);
 static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
-static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
-static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
+static void dmfe_rx_packet(struct net_device *, struct dmfe_board_info *);
+static void dmfe_free_tx_pkt(struct net_device *, struct dmfe_board_info *);
 static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
-static void dmfe_dynamic_reset(struct DEVICE *);
+static void dmfe_dynamic_reset(struct net_device *);
 static void dmfe_free_rxbuffer(struct dmfe_board_info *);
-static void dmfe_init_dm910x(struct DEVICE *);
+static void dmfe_init_dm910x(struct net_device *);
 static void dmfe_parse_srom(struct dmfe_board_info *);
 static void dmfe_program_DM9801(struct dmfe_board_info *, int);
 static void dmfe_program_DM9802(struct dmfe_board_info *);
@@ -558,7 +555,7 @@
  *	The interface is opened whenever "ifconfig" actives it.
  */
 
-static int dmfe_open(struct DEVICE *dev)
+static int dmfe_open(struct net_device *dev)
 {
 	struct dmfe_board_info *db = netdev_priv(dev);
 	const int irq = db->pdev->irq;
@@ -617,7 +614,7 @@
  *	Enable Tx/Rx machine
  */
 
-static void dmfe_init_dm910x(struct DEVICE *dev)
+static void dmfe_init_dm910x(struct net_device *dev)
 {
 	struct dmfe_board_info *db = netdev_priv(dev);
 	void __iomem *ioaddr = db->ioaddr;
@@ -684,7 +681,7 @@
  */
 
 static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
-					 struct DEVICE *dev)
+					 struct net_device *dev)
 {
 	struct dmfe_board_info *db = netdev_priv(dev);
 	void __iomem *ioaddr = db->ioaddr;
@@ -728,7 +725,7 @@
 		txptr->tdes0 = cpu_to_le32(0x80000000);	/* Set owner bit */
 		db->tx_packet_cnt++;			/* Ready to send */
 		dw32(DCR1, 0x1);			/* Issue Tx polling */
-		dev->trans_start = jiffies;		/* saved time stamp */
+		netif_trans_update(dev);		/* saved time stamp */
 	} else {
 		db->tx_queue_cnt++;			/* queue TX packet */
 		dw32(DCR1, 0x1);			/* Issue Tx polling */
@@ -754,7 +751,7 @@
  *	The interface is stopped when it is brought.
  */
 
-static int dmfe_stop(struct DEVICE *dev)
+static int dmfe_stop(struct net_device *dev)
 {
 	struct dmfe_board_info *db = netdev_priv(dev);
 	void __iomem *ioaddr = db->ioaddr;
@@ -798,7 +795,7 @@
 
 static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
 {
-	struct DEVICE *dev = dev_id;
+	struct net_device *dev = dev_id;
 	struct dmfe_board_info *db = netdev_priv(dev);
 	void __iomem *ioaddr = db->ioaddr;
 	unsigned long flags;
@@ -879,7 +876,7 @@
  *	Free TX resource after TX complete
  */
 
-static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
+static void dmfe_free_tx_pkt(struct net_device *dev, struct dmfe_board_info *db)
 {
 	struct tx_desc *txptr;
 	void __iomem *ioaddr = db->ioaddr;
@@ -934,7 +931,7 @@
 		db->tx_packet_cnt++;			/* Ready to send */
 		db->tx_queue_cnt--;
 		dw32(DCR1, 0x1);			/* Issue Tx polling */
-		dev->trans_start = jiffies;		/* saved time stamp */
+		netif_trans_update(dev);		/* saved time stamp */
 	}
 
 	/* Resource available check */
@@ -961,7 +958,7 @@
  *	Receive the come packet and pass to upper layer
  */
 
-static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
+static void dmfe_rx_packet(struct net_device *dev, struct dmfe_board_info *db)
 {
 	struct rx_desc *rxptr;
 	struct sk_buff *skb, *newskb;
@@ -1052,7 +1049,7 @@
  * Set DM910X multicast address
  */
 
-static void dmfe_set_filter_mode(struct DEVICE * dev)
+static void dmfe_set_filter_mode(struct net_device *dev)
 {
 	struct dmfe_board_info *db = netdev_priv(dev);
 	unsigned long flags;
@@ -1545,7 +1542,7 @@
 		update_cr6(db->cr6_data | 0x2000, ioaddr);
 		dw32(DCR1, 0x1);	/* Issue Tx polling */
 		update_cr6(db->cr6_data, ioaddr);
-		dev->trans_start = jiffies;
+		netif_trans_update(dev);
 	} else
 		db->tx_queue_cnt++;	/* Put in TX queue */
 }
diff --git a/drivers/net/ethernet/dec/tulip/pnic.c b/drivers/net/ethernet/dec/tulip/pnic.c
index 5364563..7bcccf5 100644
--- a/drivers/net/ethernet/dec/tulip/pnic.c
+++ b/drivers/net/ethernet/dec/tulip/pnic.c
@@ -44,7 +44,7 @@
 			tp->csr6 = new_csr6;
 			/* Restart Tx */
 			tulip_restart_rxtx(tp);
-			dev->trans_start = jiffies;
+			netif_trans_update(dev);
 		}
 	}
 }
@@ -70,7 +70,7 @@
 			iowrite32(tp->csr6, ioaddr + CSR6);
 			iowrite32(0x30, ioaddr + CSR12);
 			iowrite32(0x0201F078, ioaddr + 0xB8); /* Turn on autonegotiation. */
-			dev->trans_start = jiffies;
+			netif_trans_update(dev);
 		}
 	} else if (ioread32(ioaddr + CSR5) & TPLnkPass) {
 		if (tulip_media_cap[dev->if_port] & MediaIsMII) {
@@ -147,7 +147,7 @@
 				tp->csr6 = new_csr6;
 				/* Restart Tx */
 				tulip_restart_rxtx(tp);
-				dev->trans_start = jiffies;
+				netif_trans_update(dev);
 				if (tulip_debug > 1)
 					dev_info(&dev->dev,
 						 "Changing PNIC configuration to %s %s-duplex, CSR6 %08x\n",
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 94d0eeb..bbde90b 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -605,7 +605,7 @@
 
 out_unlock:
 	spin_unlock_irqrestore (&tp->lock, flags);
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	netif_wake_queue (dev);
 }
 
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 447d092..e750b5d 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -636,7 +636,7 @@
 		txptr->tdes0 = cpu_to_le32(0x80000000);	/* Set owner bit */
 		db->tx_packet_cnt++;			/* Ready to send */
 		uw32(DCR1, 0x1);			/* Issue Tx polling */
-		dev->trans_start = jiffies;		/* saved time stamp */
+		netif_trans_update(dev);		/* saved time stamp */
 	}
 
 	/* Tx resource check */
@@ -1431,7 +1431,7 @@
 		update_cr6(db->cr6_data | 0x2000, ioaddr);
 		uw32(DCR1, 0x1);	/* Issue Tx polling */
 		update_cr6(db->cr6_data, ioaddr);
-		dev->trans_start = jiffies;
+		netif_trans_update(dev);
 	} else
 		netdev_err(dev, "No Tx resource - Send_filter_frame!\n");
 }
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 3c0e4d5..1f62b94 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -966,7 +966,7 @@
 	enable_irq(irq);
 
 	netif_wake_queue(dev);
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	np->stats.tx_errors++;
 }
 
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index f92b6d9..78f1446 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -706,7 +706,7 @@
 		dev->name, dr32(TxStatus));
 	rio_free_tx(dev, 0);
 	dev->if_port = 0;
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 }
 
 static netdev_tx_t
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index a28a2e5..58c6338 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -1011,7 +1011,7 @@
 
 	dev->if_port = 0;
 
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	dev->stats.tx_errors++;
 	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
 		netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 5366864..ed98ef1 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4890,11 +4890,13 @@
 	if (status)
 		return status;
 
-	if (netif_running(netdev)) {
+	rtnl_lock();
+	if (netif_running(netdev))
 		status = be_open(netdev);
-		if (status)
-			return status;
-	}
+	rtnl_unlock();
+
+	if (status)
+		return status;
 
 	netif_device_attach(netdev);
 
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 1f23845a..085f912 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -145,7 +145,7 @@
 	u32 tx_ctrl_nt = (tx_ctrl_value & TX_CTL_NT_MASK) >> TX_CTL_NT_SHIFT;
 
 	/* Check if we got TX */
-	if (!priv->tx_packet_sent || tx_ctrl_ct)
+	if (!priv->tx_skb || tx_ctrl_ct)
 		return;
 
 	/* Ack Tx ctrl register */
@@ -160,7 +160,7 @@
 	}
 
 	dev_kfree_skb(priv->tx_skb);
-	priv->tx_packet_sent = false;
+	priv->tx_skb = NULL;
 
 	if (netif_queue_stopped(ndev))
 		netif_wake_queue(ndev);
@@ -183,6 +183,9 @@
 	work_done = nps_enet_rx_handler(ndev);
 	if (work_done < budget) {
 		u32 buf_int_enable_value = 0;
+		u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
+		u32 tx_ctrl_ct =
+			(tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT;
 
 		napi_complete(napi);
 
@@ -192,6 +195,18 @@
 
 		nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
 				 buf_int_enable_value);
+
+		/* in case we will get a tx interrupt while interrupts
+		 * are masked, we will lose it since the tx is edge interrupt.
+		 * specifically, while executing the code section above,
+		 * between nps_enet_tx_handler and the interrupts enable, all
+		 * tx requests will be stuck until we will get an rx interrupt.
+		 * the two code lines below will solve this situation by
+		 * re-adding ourselves to the poll list.
+		 */
+
+		if (priv->tx_skb && !tx_ctrl_ct)
+			napi_reschedule(napi);
 	}
 
 	return work_done;
@@ -217,7 +232,7 @@
 	u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT;
 	u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT;
 
-	if ((!tx_ctrl_ct && priv->tx_packet_sent) || rx_ctrl_cr)
+	if ((!tx_ctrl_ct && priv->tx_skb) || rx_ctrl_cr)
 		if (likely(napi_schedule_prep(&priv->napi))) {
 			nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
 			__napi_schedule(&priv->napi);
@@ -387,8 +402,6 @@
 	/* Write the length of the Frame */
 	tx_ctrl_value |= length << TX_CTL_NT_SHIFT;
 
-	/* Indicate SW is done */
-	priv->tx_packet_sent = true;
 	tx_ctrl_value |= NPS_ENET_ENABLE << TX_CTL_CT_SHIFT;
 	/* Send Frame */
 	nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, tx_ctrl_value);
@@ -465,7 +478,7 @@
 	s32 err;
 
 	/* Reset private variables */
-	priv->tx_packet_sent = false;
+	priv->tx_skb = NULL;
 	priv->ge_mac_cfg_2_value = 0;
 	priv->ge_mac_cfg_3_value = 0;
 
@@ -534,6 +547,11 @@
 
 	priv->tx_skb = skb;
 
+	/* make sure tx_skb is actually written to the memory
+	 * before the HW is informed and the IRQ is fired.
+	 */
+	wmb();
+
 	nps_enet_send_frame(ndev, skb);
 
 	return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/ezchip/nps_enet.h b/drivers/net/ethernet/ezchip/nps_enet.h
index d0cab60..3939ca2 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.h
+++ b/drivers/net/ethernet/ezchip/nps_enet.h
@@ -165,14 +165,12 @@
  * struct nps_enet_priv - Storage of ENET's private information.
  * @regs_base:      Base address of ENET memory-mapped control registers.
  * @irq:            For RX/TX IRQ number.
- * @tx_packet_sent: SW indication if frame is being sent.
  * @tx_skb:         socket buffer of sent frame.
  * @napi:           Structure for NAPI.
  */
 struct nps_enet_priv {
 	void __iomem *regs_base;
 	s32 irq;
-	bool tx_packet_sent;
 	struct sk_buff *tx_skb;
 	struct napi_struct napi;
 	u32 ge_mac_cfg_2_value;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 84384e1..e7cf313 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -71,7 +71,6 @@
 	struct napi_struct napi;
 
 	struct mii_bus *mii_bus;
-	struct phy_device *phydev;
 	int old_speed;
 };
 
@@ -807,7 +806,7 @@
 static void ftgmac100_adjust_link(struct net_device *netdev)
 {
 	struct ftgmac100 *priv = netdev_priv(netdev);
-	struct phy_device *phydev = priv->phydev;
+	struct phy_device *phydev = netdev->phydev;
 	int ier;
 
 	if (phydev->speed == priv->old_speed)
@@ -850,7 +849,6 @@
 		return PTR_ERR(phydev);
 	}
 
-	priv->phydev = phydev;
 	return 0;
 }
 
@@ -939,27 +937,11 @@
 	strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
 }
 
-static int ftgmac100_get_settings(struct net_device *netdev,
-				  struct ethtool_cmd *cmd)
-{
-	struct ftgmac100 *priv = netdev_priv(netdev);
-
-	return phy_ethtool_gset(priv->phydev, cmd);
-}
-
-static int ftgmac100_set_settings(struct net_device *netdev,
-				  struct ethtool_cmd *cmd)
-{
-	struct ftgmac100 *priv = netdev_priv(netdev);
-
-	return phy_ethtool_sset(priv->phydev, cmd);
-}
-
 static const struct ethtool_ops ftgmac100_ethtool_ops = {
-	.set_settings		= ftgmac100_set_settings,
-	.get_settings		= ftgmac100_get_settings,
 	.get_drvinfo		= ftgmac100_get_drvinfo,
 	.get_link		= ethtool_op_get_link,
+	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
+	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
 };
 
 /******************************************************************************
@@ -1085,7 +1067,7 @@
 	ftgmac100_init_hw(priv);
 	ftgmac100_start_hw(priv, 10);
 
-	phy_start(priv->phydev);
+	phy_start(netdev->phydev);
 
 	napi_enable(&priv->napi);
 	netif_start_queue(netdev);
@@ -1111,7 +1093,7 @@
 
 	netif_stop_queue(netdev);
 	napi_disable(&priv->napi);
-	phy_stop(priv->phydev);
+	phy_stop(netdev->phydev);
 
 	ftgmac100_stop_hw(priv);
 	free_irq(priv->irq, netdev);
@@ -1152,9 +1134,7 @@
 /* optional */
 static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 {
-	struct ftgmac100 *priv = netdev_priv(netdev);
-
-	return phy_mii_ioctl(priv->phydev, ifr, cmd);
+	return phy_mii_ioctl(netdev->phydev, ifr, cmd);
 }
 
 static const struct net_device_ops ftgmac100_netdev_ops = {
@@ -1275,7 +1255,7 @@
 	return 0;
 
 err_register_netdev:
-	phy_disconnect(priv->phydev);
+	phy_disconnect(netdev->phydev);
 err_mii_probe:
 	mdiobus_unregister(priv->mii_bus);
 err_register_mdiobus:
@@ -1301,7 +1281,7 @@
 
 	unregister_netdev(netdev);
 
-	phy_disconnect(priv->phydev);
+	phy_disconnect(netdev->phydev);
 	mdiobus_unregister(priv->mii_bus);
 	mdiobus_free(priv->mii_bus);
 
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index b1b9eba..c08bd76 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -1227,7 +1227,7 @@
 
 	spin_unlock_irqrestore(&np->lock, flags);
 
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	dev->stats.tx_errors++;
 	netif_wake_queue(dev); /* or .._start_.. ?? */
 }
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 195122e..f58f9ea 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -517,7 +517,6 @@
 
 	/* Phylib and MDIO interface */
 	struct	mii_bus *mii_bus;
-	struct	phy_device *phy_dev;
 	int	mii_timeout;
 	uint	phy_speed;
 	phy_interface_t	phy_interface;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 37c0815..ca2cccc 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -943,8 +943,8 @@
 		else
 			val &= ~FEC_RACC_OPTIONS;
 		writel(val, fep->hwp + FEC_RACC);
+		writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
 	}
-	writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
 #endif
 
 	/*
@@ -967,10 +967,10 @@
 			rcntl &= ~(1 << 8);
 
 		/* 1G, 100M or 10M */
-		if (fep->phy_dev) {
-			if (fep->phy_dev->speed == SPEED_1000)
+		if (ndev->phydev) {
+			if (ndev->phydev->speed == SPEED_1000)
 				ecntl |= (1 << 5);
-			else if (fep->phy_dev->speed == SPEED_100)
+			else if (ndev->phydev->speed == SPEED_100)
 				rcntl &= ~(1 << 9);
 			else
 				rcntl |= (1 << 9);
@@ -991,7 +991,7 @@
 			 */
 			cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
 				? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
-			if (fep->phy_dev && fep->phy_dev->speed == SPEED_10)
+			if (ndev->phydev && ndev->phydev->speed == SPEED_10)
 				cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
 			writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
 
@@ -1005,7 +1005,7 @@
 	/* enable pause frame*/
 	if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
 	    ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
-	     fep->phy_dev && fep->phy_dev->pause)) {
+	     ndev->phydev && ndev->phydev->pause)) {
 		rcntl |= FEC_ENET_FCE;
 
 		/* set FIFO threshold parameter to reduce overrun */
@@ -1521,9 +1521,15 @@
 	struct fec_enet_private *fep = netdev_priv(ndev);
 
 	for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
-		clear_bit(queue_id, &fep->work_rx);
-		pkt_received += fec_enet_rx_queue(ndev,
+		int ret;
+
+		ret = fec_enet_rx_queue(ndev,
 					budget - pkt_received, queue_id);
+
+		if (ret < budget - pkt_received)
+			clear_bit(queue_id, &fep->work_rx);
+
+		pkt_received += ret;
 	}
 	return pkt_received;
 }
@@ -1679,7 +1685,7 @@
 static void fec_enet_adjust_link(struct net_device *ndev)
 {
 	struct fec_enet_private *fep = netdev_priv(ndev);
-	struct phy_device *phy_dev = fep->phy_dev;
+	struct phy_device *phy_dev = ndev->phydev;
 	int status_change = 0;
 
 	/* Prevent a state halted on mii error */
@@ -1879,8 +1885,6 @@
 	int phy_id;
 	int dev_id = fep->dev_id;
 
-	fep->phy_dev = NULL;
-
 	if (fep->phy_node) {
 		phy_dev = of_phy_connect(ndev, fep->phy_node,
 					 &fec_enet_adjust_link, 0,
@@ -1928,7 +1932,6 @@
 
 	phy_dev->advertising = phy_dev->supported;
 
-	fep->phy_dev = phy_dev;
 	fep->link = 0;
 	fep->full_duplex = 0;
 
@@ -2058,30 +2061,6 @@
 	}
 }
 
-static int fec_enet_get_settings(struct net_device *ndev,
-				  struct ethtool_cmd *cmd)
-{
-	struct fec_enet_private *fep = netdev_priv(ndev);
-	struct phy_device *phydev = fep->phy_dev;
-
-	if (!phydev)
-		return -ENODEV;
-
-	return phy_ethtool_gset(phydev, cmd);
-}
-
-static int fec_enet_set_settings(struct net_device *ndev,
-				 struct ethtool_cmd *cmd)
-{
-	struct fec_enet_private *fep = netdev_priv(ndev);
-	struct phy_device *phydev = fep->phy_dev;
-
-	if (!phydev)
-		return -ENODEV;
-
-	return phy_ethtool_sset(phydev, cmd);
-}
-
 static void fec_enet_get_drvinfo(struct net_device *ndev,
 				 struct ethtool_drvinfo *info)
 {
@@ -2214,7 +2193,7 @@
 {
 	struct fec_enet_private *fep = netdev_priv(ndev);
 
-	if (!fep->phy_dev)
+	if (!ndev->phydev)
 		return -ENODEV;
 
 	if (pause->tx_pause != pause->rx_pause) {
@@ -2230,17 +2209,17 @@
 	fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
 
 	if (pause->rx_pause || pause->autoneg) {
-		fep->phy_dev->supported |= ADVERTISED_Pause;
-		fep->phy_dev->advertising |= ADVERTISED_Pause;
+		ndev->phydev->supported |= ADVERTISED_Pause;
+		ndev->phydev->advertising |= ADVERTISED_Pause;
 	} else {
-		fep->phy_dev->supported &= ~ADVERTISED_Pause;
-		fep->phy_dev->advertising &= ~ADVERTISED_Pause;
+		ndev->phydev->supported &= ~ADVERTISED_Pause;
+		ndev->phydev->advertising &= ~ADVERTISED_Pause;
 	}
 
 	if (pause->autoneg) {
 		if (netif_running(ndev))
 			fec_stop(ndev);
-		phy_start_aneg(fep->phy_dev);
+		phy_start_aneg(ndev->phydev);
 	}
 	if (netif_running(ndev)) {
 		napi_disable(&fep->napi);
@@ -2356,8 +2335,7 @@
 
 static int fec_enet_nway_reset(struct net_device *dev)
 {
-	struct fec_enet_private *fep = netdev_priv(dev);
-	struct phy_device *phydev = fep->phy_dev;
+	struct phy_device *phydev = dev->phydev;
 
 	if (!phydev)
 		return -ENODEV;
@@ -2562,8 +2540,6 @@
 }
 
 static const struct ethtool_ops fec_enet_ethtool_ops = {
-	.get_settings		= fec_enet_get_settings,
-	.set_settings		= fec_enet_set_settings,
 	.get_drvinfo		= fec_enet_get_drvinfo,
 	.get_regs_len		= fec_enet_get_regs_len,
 	.get_regs		= fec_enet_get_regs,
@@ -2583,12 +2559,14 @@
 	.set_tunable		= fec_enet_set_tunable,
 	.get_wol		= fec_enet_get_wol,
 	.set_wol		= fec_enet_set_wol,
+	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
+	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
 };
 
 static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
 {
 	struct fec_enet_private *fep = netdev_priv(ndev);
-	struct phy_device *phydev = fep->phy_dev;
+	struct phy_device *phydev = ndev->phydev;
 
 	if (!netif_running(ndev))
 		return -EINVAL;
@@ -2843,7 +2821,7 @@
 		goto err_enet_mii_probe;
 
 	napi_enable(&fep->napi);
-	phy_start(fep->phy_dev);
+	phy_start(ndev->phydev);
 	netif_tx_start_all_queues(ndev);
 
 	device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
@@ -2867,7 +2845,7 @@
 {
 	struct fec_enet_private *fep = netdev_priv(ndev);
 
-	phy_stop(fep->phy_dev);
+	phy_stop(ndev->phydev);
 
 	if (netif_device_present(ndev)) {
 		napi_disable(&fep->napi);
@@ -2875,8 +2853,7 @@
 		fec_stop(ndev);
 	}
 
-	phy_disconnect(fep->phy_dev);
-	fep->phy_dev = NULL;
+	phy_disconnect(ndev->phydev);
 
 	fec_enet_clk_enable(ndev, false);
 	pinctrl_pm_select_sleep_state(&fep->pdev->dev);
@@ -3504,7 +3481,7 @@
 	if (netif_running(ndev)) {
 		if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
 			fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
-		phy_stop(fep->phy_dev);
+		phy_stop(ndev->phydev);
 		napi_disable(&fep->napi);
 		netif_tx_lock_bh(ndev);
 		netif_device_detach(ndev);
@@ -3564,7 +3541,7 @@
 		netif_device_attach(ndev);
 		netif_tx_unlock_bh(ndev);
 		napi_enable(&fep->napi);
-		phy_start(fep->phy_dev);
+		phy_start(ndev->phydev);
 	}
 	rtnl_unlock();
 
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 25553ee..446ae9d 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -66,7 +66,6 @@
 	/* MDIO link details */
 	unsigned int mdio_speed;
 	struct device_node *phy_node;
-	struct phy_device *phydev;
 	enum phy_state link;
 	int seven_wire_mode;
 };
@@ -165,7 +164,7 @@
 static void mpc52xx_fec_adjust_link(struct net_device *dev)
 {
 	struct mpc52xx_fec_priv *priv = netdev_priv(dev);
-	struct phy_device *phydev = priv->phydev;
+	struct phy_device *phydev = dev->phydev;
 	int new_state = 0;
 
 	if (phydev->link != PHY_DOWN) {
@@ -215,16 +214,17 @@
 static int mpc52xx_fec_open(struct net_device *dev)
 {
 	struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+	struct phy_device *phydev = NULL;
 	int err = -EBUSY;
 
 	if (priv->phy_node) {
-		priv->phydev = of_phy_connect(priv->ndev, priv->phy_node,
-					      mpc52xx_fec_adjust_link, 0, 0);
-		if (!priv->phydev) {
+		phydev = of_phy_connect(priv->ndev, priv->phy_node,
+					mpc52xx_fec_adjust_link, 0, 0);
+		if (!phydev) {
 			dev_err(&dev->dev, "of_phy_connect failed\n");
 			return -ENODEV;
 		}
-		phy_start(priv->phydev);
+		phy_start(phydev);
 	}
 
 	if (request_irq(dev->irq, mpc52xx_fec_interrupt, IRQF_SHARED,
@@ -268,10 +268,9 @@
  free_ctrl_irq:
 	free_irq(dev->irq, dev);
  free_phy:
-	if (priv->phydev) {
-		phy_stop(priv->phydev);
-		phy_disconnect(priv->phydev);
-		priv->phydev = NULL;
+	if (phydev) {
+		phy_stop(phydev);
+		phy_disconnect(phydev);
 	}
 
 	return err;
@@ -280,6 +279,7 @@
 static int mpc52xx_fec_close(struct net_device *dev)
 {
 	struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+	struct phy_device *phydev = dev->phydev;
 
 	netif_stop_queue(dev);
 
@@ -291,11 +291,10 @@
 	free_irq(priv->r_irq, dev);
 	free_irq(priv->t_irq, dev);
 
-	if (priv->phydev) {
+	if (phydev) {
 		/* power down phy */
-		phy_stop(priv->phydev);
-		phy_disconnect(priv->phydev);
-		priv->phydev = NULL;
+		phy_stop(phydev);
+		phy_disconnect(phydev);
 	}
 
 	return 0;
@@ -763,26 +762,6 @@
 
 /* ethtool interface */
 
-static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-	struct mpc52xx_fec_priv *priv = netdev_priv(dev);
-
-	if (!priv->phydev)
-		return -ENODEV;
-
-	return phy_ethtool_gset(priv->phydev, cmd);
-}
-
-static int mpc52xx_fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-	struct mpc52xx_fec_priv *priv = netdev_priv(dev);
-
-	if (!priv->phydev)
-		return -ENODEV;
-
-	return phy_ethtool_sset(priv->phydev, cmd);
-}
-
 static u32 mpc52xx_fec_get_msglevel(struct net_device *dev)
 {
 	struct mpc52xx_fec_priv *priv = netdev_priv(dev);
@@ -796,23 +775,23 @@
 }
 
 static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
-	.get_settings = mpc52xx_fec_get_settings,
-	.set_settings = mpc52xx_fec_set_settings,
 	.get_link = ethtool_op_get_link,
 	.get_msglevel = mpc52xx_fec_get_msglevel,
 	.set_msglevel = mpc52xx_fec_set_msglevel,
 	.get_ts_info = ethtool_op_get_ts_info,
+	.get_link_ksettings = phy_ethtool_get_link_ksettings,
+	.set_link_ksettings = phy_ethtool_set_link_ksettings,
 };
 
 
 static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
-	struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+	struct phy_device *phydev = dev->phydev;
 
-	if (!priv->phydev)
+	if (!phydev)
 		return -ENOTSUPP;
 
-	return phy_mii_ioctl(priv->phydev, rq, cmd);
+	return phy_mii_ioctl(phydev, rq, cmd);
 }
 
 static const struct net_device_ops mpc52xx_fec_netdev_ops = {
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index ea83712..bcb9dcc 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -2772,7 +2772,7 @@
 	/* Get the FM address */
 	res = platform_get_resource(of_dev, IORESOURCE_MEM, 0);
 	if (!res) {
-		dev_err(&of_dev->dev, "%s: Can't get FMan memory resouce\n",
+		dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n",
 			__func__);
 		goto fman_node_put;
 	}
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 48a9c17..61fd486 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -652,13 +652,13 @@
 	spin_lock_irqsave(&fep->lock, flags);
 
 	if (dev->flags & IFF_UP) {
-		phy_stop(fep->phydev);
+		phy_stop(dev->phydev);
 		(*fep->ops->stop)(dev);
 		(*fep->ops->restart)(dev);
-		phy_start(fep->phydev);
+		phy_start(dev->phydev);
 	}
 
-	phy_start(fep->phydev);
+	phy_start(dev->phydev);
 	wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
 	spin_unlock_irqrestore(&fep->lock, flags);
 
@@ -672,7 +672,7 @@
 static void generic_adjust_link(struct  net_device *dev)
 {
 	struct fs_enet_private *fep = netdev_priv(dev);
-	struct phy_device *phydev = fep->phydev;
+	struct phy_device *phydev = dev->phydev;
 	int new_state = 0;
 
 	if (phydev->link) {
@@ -741,8 +741,6 @@
 		return -ENODEV;
 	}
 
-	fep->phydev = phydev;
-
 	return 0;
 }
 
@@ -776,7 +774,7 @@
 		napi_disable(&fep->napi_tx);
 		return err;
 	}
-	phy_start(fep->phydev);
+	phy_start(dev->phydev);
 
 	netif_start_queue(dev);
 
@@ -792,7 +790,7 @@
 	netif_carrier_off(dev);
 	napi_disable(&fep->napi);
 	napi_disable(&fep->napi_tx);
-	phy_stop(fep->phydev);
+	phy_stop(dev->phydev);
 
 	spin_lock_irqsave(&fep->lock, flags);
 	spin_lock(&fep->tx_lock);
@@ -801,8 +799,7 @@
 	spin_unlock_irqrestore(&fep->lock, flags);
 
 	/* release any irqs */
-	phy_disconnect(fep->phydev);
-	fep->phydev = NULL;
+	phy_disconnect(dev->phydev);
 	free_irq(fep->interrupt, dev);
 
 	return 0;
@@ -847,26 +844,6 @@
 		regs->version = 0;
 }
 
-static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-	struct fs_enet_private *fep = netdev_priv(dev);
-
-	if (!fep->phydev)
-		return -ENODEV;
-
-	return phy_ethtool_gset(fep->phydev, cmd);
-}
-
-static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-	struct fs_enet_private *fep = netdev_priv(dev);
-
-	if (!fep->phydev)
-		return -ENODEV;
-
-	return phy_ethtool_sset(fep->phydev, cmd);
-}
-
 static int fs_nway_reset(struct net_device *dev)
 {
 	return 0;
@@ -887,24 +864,22 @@
 static const struct ethtool_ops fs_ethtool_ops = {
 	.get_drvinfo = fs_get_drvinfo,
 	.get_regs_len = fs_get_regs_len,
-	.get_settings = fs_get_settings,
-	.set_settings = fs_set_settings,
 	.nway_reset = fs_nway_reset,
 	.get_link = ethtool_op_get_link,
 	.get_msglevel = fs_get_msglevel,
 	.set_msglevel = fs_set_msglevel,
 	.get_regs = fs_get_regs,
 	.get_ts_info = ethtool_op_get_ts_info,
+	.get_link_ksettings = phy_ethtool_get_link_ksettings,
+	.set_link_ksettings = phy_ethtool_set_link_ksettings,
 };
 
 static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
-	struct fs_enet_private *fep = netdev_priv(dev);
-
 	if (!netif_running(dev))
 		return -EINVAL;
 
-	return phy_mii_ioctl(fep->phydev, rq, cmd);
+	return phy_mii_ioctl(dev->phydev, rq, cmd);
 }
 
 extern int fs_mii_connect(struct net_device *dev);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index f184d8f..e29f54a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -149,7 +149,6 @@
 	unsigned int last_mii_status;
 	int interrupt;
 
-	struct phy_device *phydev;
 	int oldduplex, oldspeed, oldlink;	/* current settings */
 
 	/* event masks */
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index 1ba359f..d71761a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -370,7 +370,7 @@
 
 	/* adjust to speed (for RMII mode) */
 	if (fpi->use_rmii) {
-		if (fep->phydev->speed == 100)
+		if (dev->phydev->speed == 100)
 			C8(fcccp, fcc_gfemr, 0x20);
 		else
 			S8(fcccp, fcc_gfemr, 0x20);
@@ -396,7 +396,7 @@
 		S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);
 
 	/* adjust to duplex mode */
-	if (fep->phydev->duplex)
+	if (dev->phydev->duplex)
 		S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
 	else
 		C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index bade2f8..35a318e 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -254,7 +254,7 @@
 	int r;
 	u32 addrhi, addrlo;
 
-	struct mii_bus *mii = fep->phydev->mdio.bus;
+	struct mii_bus *mii = dev->phydev->mdio.bus;
 	struct fec_info* fec_inf = mii->priv;
 
 	r = whack_reset(fep->fec.fecp);
@@ -333,7 +333,7 @@
 	/*
 	 * adjust to duplex mode
 	 */
-	if (fep->phydev->duplex) {
+	if (dev->phydev->duplex) {
 		FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
 		FS(fecp, x_cntrl, FEC_TCNTRL_FDEN);	/* FD enable */
 	} else {
@@ -363,7 +363,7 @@
 	const struct fs_platform_info *fpi = fep->fpi;
 	struct fec __iomem *fecp = fep->fec.fecp;
 
-	struct fec_info *feci = fep->phydev->mdio.bus->priv;
+	struct fec_info *feci = dev->phydev->mdio.bus->priv;
 
 	int i;
 
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index 7a184e8..e8b9c33 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -352,7 +352,7 @@
 	W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
 
 	/* Set full duplex mode if needed */
-	if (fep->phydev->duplex)
+	if (dev->phydev->duplex)
 		S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
 
 	/* Restore multicast and promiscuous settings */
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index d2f917a..7615e06 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -999,7 +999,7 @@
 
 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
-	struct gfar_private *priv = netdev_priv(dev);
+	struct phy_device *phydev = dev->phydev;
 
 	if (!netif_running(dev))
 		return -EINVAL;
@@ -1009,10 +1009,10 @@
 	if (cmd == SIOCGHWTSTAMP)
 		return gfar_hwtstamp_get(dev, rq);
 
-	if (!priv->phydev)
+	if (!phydev)
 		return -ENODEV;
 
-	return phy_mii_ioctl(priv->phydev, rq, cmd);
+	return phy_mii_ioctl(phydev, rq, cmd);
 }
 
 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
@@ -1635,7 +1635,7 @@
 		gfar_start_wol_filer(priv);
 
 	} else {
-		phy_stop(priv->phydev);
+		phy_stop(ndev->phydev);
 	}
 
 	return 0;
@@ -1664,7 +1664,7 @@
 		gfar_filer_restore_table(priv);
 
 	} else {
-		phy_start(priv->phydev);
+		phy_start(ndev->phydev);
 	}
 
 	gfar_start(priv);
@@ -1698,8 +1698,8 @@
 	priv->oldspeed = 0;
 	priv->oldduplex = -1;
 
-	if (priv->phydev)
-		phy_start(priv->phydev);
+	if (ndev->phydev)
+		phy_start(ndev->phydev);
 
 	netif_device_attach(ndev);
 	enable_napi(priv);
@@ -1778,6 +1778,7 @@
 		priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
 		GFAR_SUPPORTED_GBIT : 0;
 	phy_interface_t interface;
+	struct phy_device *phydev;
 
 	priv->oldlink = 0;
 	priv->oldspeed = 0;
@@ -1785,9 +1786,9 @@
 
 	interface = gfar_get_interface(dev);
 
-	priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
-				      interface);
-	if (!priv->phydev) {
+	phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
+				interface);
+	if (!phydev) {
 		dev_err(&dev->dev, "could not attach to PHY\n");
 		return -ENODEV;
 	}
@@ -1796,11 +1797,11 @@
 		gfar_configure_serdes(dev);
 
 	/* Remove any features not supported by the controller */
-	priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
-	priv->phydev->advertising = priv->phydev->supported;
+	phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
+	phydev->advertising = phydev->supported;
 
 	/* Add support for flow control, but don't advertise it by default */
-	priv->phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+	phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
 
 	return 0;
 }
@@ -1944,7 +1945,7 @@
 	/* disable ints and gracefully shut down Rx/Tx DMA */
 	gfar_halt(priv);
 
-	phy_stop(priv->phydev);
+	phy_stop(dev->phydev);
 
 	free_skb_resources(priv);
 }
@@ -2076,7 +2077,7 @@
 
 	gfar_ints_enable(priv);
 
-	priv->ndev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(priv->ndev); /* prevent tx timeout */
 }
 
 static void free_grp_irqs(struct gfar_priv_grp *grp)
@@ -2204,7 +2205,7 @@
 	priv->oldspeed = 0;
 	priv->oldduplex = -1;
 
-	phy_start(priv->phydev);
+	phy_start(ndev->phydev);
 
 	enable_napi(priv);
 
@@ -2572,8 +2573,7 @@
 	stop_gfar(dev);
 
 	/* Disconnect from the PHY */
-	phy_disconnect(priv->phydev);
-	priv->phydev = NULL;
+	phy_disconnect(dev->phydev);
 
 	gfar_free_irq(priv);
 
@@ -3379,7 +3379,7 @@
 static void adjust_link(struct net_device *dev)
 {
 	struct gfar_private *priv = netdev_priv(dev);
-	struct phy_device *phydev = priv->phydev;
+	struct phy_device *phydev = dev->phydev;
 
 	if (unlikely(phydev->link != priv->oldlink ||
 		     (phydev->link && (phydev->duplex != priv->oldduplex ||
@@ -3620,7 +3620,8 @@
 
 static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
 {
-	struct phy_device *phydev = priv->phydev;
+	struct net_device *ndev = priv->ndev;
+	struct phy_device *phydev = ndev->phydev;
 	u32 val = 0;
 
 	if (!phydev->duplex)
@@ -3660,7 +3661,8 @@
 static noinline void gfar_update_link_state(struct gfar_private *priv)
 {
 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
-	struct phy_device *phydev = priv->phydev;
+	struct net_device *ndev = priv->ndev;
+	struct phy_device *phydev = ndev->phydev;
 	struct gfar_priv_rx_q *rx_queue = NULL;
 	int i;
 
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index cb77667..373fd09 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1153,7 +1153,6 @@
 	phy_interface_t interface;
 	struct device_node *phy_node;
 	struct device_node *tbi_node;
-	struct phy_device *phydev;
 	struct mii_bus *mii_bus;
 	int oldspeed;
 	int oldduplex;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 4b0ee85..56588f2 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -184,40 +184,6 @@
 	strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
 }
 
-
-static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-	struct gfar_private *priv = netdev_priv(dev);
-	struct phy_device *phydev = priv->phydev;
-
-	if (NULL == phydev)
-		return -ENODEV;
-
-	return phy_ethtool_sset(phydev, cmd);
-}
-
-
-/* Return the current settings in the ethtool_cmd structure */
-static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-	struct gfar_private *priv = netdev_priv(dev);
-	struct phy_device *phydev = priv->phydev;
-	struct gfar_priv_rx_q *rx_queue = NULL;
-	struct gfar_priv_tx_q *tx_queue = NULL;
-
-	if (NULL == phydev)
-		return -ENODEV;
-	tx_queue = priv->tx_queue[0];
-	rx_queue = priv->rx_queue[0];
-
-	/* etsec-1.7 and older versions have only one txic
-	 * and rxic regs although they support multiple queues */
-	cmd->maxtxpkt = get_icft_value(tx_queue->txic);
-	cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
-
-	return phy_ethtool_gset(phydev, cmd);
-}
-
 /* Return the length of the register structure */
 static int gfar_reglen(struct net_device *dev)
 {
@@ -242,10 +208,12 @@
 static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
 				     unsigned int usecs)
 {
+	struct net_device *ndev = priv->ndev;
+	struct phy_device *phydev = ndev->phydev;
 	unsigned int count;
 
 	/* The timer is different, depending on the interface speed */
-	switch (priv->phydev->speed) {
+	switch (phydev->speed) {
 	case SPEED_1000:
 		count = GFAR_GBIT_TIME;
 		break;
@@ -267,10 +235,12 @@
 static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
 				     unsigned int ticks)
 {
+	struct net_device *ndev = priv->ndev;
+	struct phy_device *phydev = ndev->phydev;
 	unsigned int count;
 
 	/* The timer is different, depending on the interface speed */
-	switch (priv->phydev->speed) {
+	switch (phydev->speed) {
 	case SPEED_1000:
 		count = GFAR_GBIT_TIME;
 		break;
@@ -304,7 +274,7 @@
 	if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
 		return -EOPNOTSUPP;
 
-	if (NULL == priv->phydev)
+	if (!dev->phydev)
 		return -ENODEV;
 
 	rx_queue = priv->rx_queue[0];
@@ -365,7 +335,7 @@
 	if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
 		return -EOPNOTSUPP;
 
-	if (NULL == priv->phydev)
+	if (!dev->phydev)
 		return -ENODEV;
 
 	/* Check the bounds of the values */
@@ -529,7 +499,7 @@
 			    struct ethtool_pauseparam *epause)
 {
 	struct gfar_private *priv = netdev_priv(dev);
-	struct phy_device *phydev = priv->phydev;
+	struct phy_device *phydev = dev->phydev;
 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	u32 oldadv, newadv;
 
@@ -1565,8 +1535,6 @@
 }
 
 const struct ethtool_ops gfar_ethtool_ops = {
-	.get_settings = gfar_gsettings,
-	.set_settings = gfar_ssettings,
 	.get_drvinfo = gfar_gdrvinfo,
 	.get_regs_len = gfar_reglen,
 	.get_regs = gfar_get_regs,
@@ -1589,4 +1557,6 @@
 	.set_rxnfc = gfar_set_nfc,
 	.get_rxnfc = gfar_get_nfc,
 	.get_ts_info = gfar_get_ts_info,
+	.get_link_ksettings = phy_ethtool_get_link_ksettings,
+	.set_link_ksettings = phy_ethtool_set_link_ksettings,
 };
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 89714f5..812a968 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -105,23 +105,20 @@
 #define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings)
 
 static int
-uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+uec_get_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd)
 {
 	struct ucc_geth_private *ugeth = netdev_priv(netdev);
 	struct phy_device *phydev = ugeth->phydev;
-	struct ucc_geth_info *ug_info = ugeth->ug_info;
 
 	if (!phydev)
 		return -ENODEV;
 
-	ecmd->maxtxpkt = 1;
-	ecmd->maxrxpkt = ug_info->interruptcoalescingmaxvalue[0];
-
-	return phy_ethtool_gset(phydev, ecmd);
+	return phy_ethtool_ksettings_get(phydev, cmd);
 }
 
 static int
-uec_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+uec_set_ksettings(struct net_device *netdev,
+		  const struct ethtool_link_ksettings *cmd)
 {
 	struct ucc_geth_private *ugeth = netdev_priv(netdev);
 	struct phy_device *phydev = ugeth->phydev;
@@ -129,7 +126,7 @@
 	if (!phydev)
 		return -ENODEV;
 
-	return phy_ethtool_sset(phydev, ecmd);
+	return phy_ethtool_ksettings_set(phydev, cmd);
 }
 
 static void
@@ -392,8 +389,6 @@
 #endif /* CONFIG_PM */
 
 static const struct ethtool_ops uec_ethtool_ops = {
-	.get_settings           = uec_get_settings,
-	.set_settings           = uec_set_settings,
 	.get_drvinfo            = uec_get_drvinfo,
 	.get_regs_len           = uec_get_regs_len,
 	.get_regs               = uec_get_regs,
@@ -411,6 +406,8 @@
 	.get_wol		= uec_get_wol,
 	.set_wol		= uec_set_wol,
 	.get_ts_info		= ethtool_op_get_ts_info,
+	.get_link_ksettings	= uec_get_ksettings,
+	.set_link_ksettings	= uec_set_ksettings,
 };
 
 void uec_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 678f501..399cfd2 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -746,7 +746,7 @@
 	    lp->sent = lp->tx_queue ;
 	    lp->tx_queue = 0;
 	    lp->tx_queue_len = 0;
-	    dev->trans_start = jiffies;
+	    netif_trans_update(dev);
 	} else {
 	    lp->tx_started = 0;
 	}
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index e51892d..b9f2ea5 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -636,7 +636,7 @@
 	pos = dma_ring_incr(pos, TX_DESC_NUM);
 	writel_relaxed(dma_byte(pos), priv->base + TX_BQ_WR_ADDR);
 
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	dev->stats.tx_packets++;
 	dev->stats.tx_bytes += skb->len;
 	netdev_sent_queue(dev, skb->len);
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 37d0cce..e8d36aa 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -469,7 +469,7 @@
 				   u32 *tx_usecs, u32 *rx_usecs);
 	void (*get_rx_max_coalesced_frames)(struct hnae_handle *handle,
 					    u32 *tx_frames, u32 *rx_frames);
-	void (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
+	int (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
 	int (*set_coalesce_frames)(struct hnae_handle *handle,
 				   u32 coalesce_frames);
 	void (*set_promisc_mode)(struct hnae_handle *handle, u32 en);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index 285c893..7a757e8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -29,25 +29,6 @@
 	return vf_cb->mac_cb;
 }
 
-/**
- * hns_ae_map_eport_to_dport - translate enet port id to dsaf port id
- * @port_id: enet port id
- *: debug port 0-1, service port 2 -7 (dsaf mode only 2)
- * return: dsaf port id
- *: service ports 0 - 5, debug port 6-7
- **/
-static int hns_ae_map_eport_to_dport(u32 port_id)
-{
-	int port_index;
-
-	if (port_id < DSAF_DEBUG_NW_NUM)
-		port_index = port_id + DSAF_SERVICE_PORT_NUM_PER_DSAF;
-	else
-		port_index = port_id - DSAF_DEBUG_NW_NUM;
-
-	return port_index;
-}
-
 static struct dsaf_device *hns_ae_get_dsaf_dev(struct hnae_ae_dev *dev)
 {
 	return container_of(dev, struct dsaf_device, ae_dev);
@@ -56,50 +37,35 @@
 static struct hns_ppe_cb *hns_get_ppe_cb(struct hnae_handle *handle)
 {
 	int ppe_index;
-	int ppe_common_index;
 	struct ppe_common_cb *ppe_comm;
 	struct  hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
 
-	if (vf_cb->port_index < DSAF_SERVICE_PORT_NUM_PER_DSAF) {
-		ppe_index = vf_cb->port_index;
-		ppe_common_index = 0;
-	} else {
-		ppe_index = 0;
-		ppe_common_index =
-			vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1;
-	}
-	ppe_comm = vf_cb->dsaf_dev->ppe_common[ppe_common_index];
+	ppe_comm = vf_cb->dsaf_dev->ppe_common[0];
+	ppe_index = vf_cb->port_index;
+
 	return &ppe_comm->ppe_cb[ppe_index];
 }
 
 static int hns_ae_get_q_num_per_vf(
 	struct dsaf_device *dsaf_dev, int port)
 {
-	int common_idx = hns_dsaf_get_comm_idx_by_port(port);
-
-	return dsaf_dev->rcb_common[common_idx]->max_q_per_vf;
+	return dsaf_dev->rcb_common[0]->max_q_per_vf;
 }
 
 static int hns_ae_get_vf_num_per_port(
 	struct dsaf_device *dsaf_dev, int port)
 {
-	int common_idx = hns_dsaf_get_comm_idx_by_port(port);
-
-	return dsaf_dev->rcb_common[common_idx]->max_vfn;
+	return dsaf_dev->rcb_common[0]->max_vfn;
 }
 
 static struct ring_pair_cb *hns_ae_get_base_ring_pair(
 	struct dsaf_device *dsaf_dev, int port)
 {
-	int common_idx = hns_dsaf_get_comm_idx_by_port(port);
-	struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[common_idx];
+	struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[0];
 	int q_num = rcb_comm->max_q_per_vf;
 	int vf_num = rcb_comm->max_vfn;
 
-	if (common_idx == HNS_DSAF_COMM_SERVICE_NW_IDX)
-		return &rcb_comm->ring_pair_cb[port * q_num * vf_num];
-	else
-		return &rcb_comm->ring_pair_cb[0];
+	return &rcb_comm->ring_pair_cb[port * q_num * vf_num];
 }
 
 static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
@@ -110,7 +76,6 @@
 struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
 				      u32 port_id)
 {
-	int port_idx;
 	int vfnum_per_port;
 	int qnum_per_vf;
 	int i;
@@ -120,11 +85,10 @@
 	struct hnae_vf_cb *vf_cb;
 
 	dsaf_dev = hns_ae_get_dsaf_dev(dev);
-	port_idx = hns_ae_map_eport_to_dport(port_id);
 
-	ring_pair_cb = hns_ae_get_base_ring_pair(dsaf_dev, port_idx);
-	vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_idx);
-	qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_idx);
+	ring_pair_cb = hns_ae_get_base_ring_pair(dsaf_dev, port_id);
+	vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_id);
+	qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_id);
 
 	vf_cb = kzalloc(sizeof(*vf_cb) +
 			qnum_per_vf * sizeof(struct hnae_queue *), GFP_KERNEL);
@@ -159,23 +123,18 @@
 		ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i];
 
 		ring_pair_cb->used_by_vf = 1;
-		if (port_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF)
-			ring_pair_cb->port_id_in_dsa = port_idx;
-		else
-			ring_pair_cb->port_id_in_dsa = 0;
-
 		ring_pair_cb++;
 	}
 
 	vf_cb->dsaf_dev = dsaf_dev;
-	vf_cb->port_index = port_idx;
-	vf_cb->mac_cb = &dsaf_dev->mac_cb[port_idx];
+	vf_cb->port_index = port_id;
+	vf_cb->mac_cb = dsaf_dev->mac_cb[port_id];
 
 	ae_handle->phy_if = vf_cb->mac_cb->phy_if;
 	ae_handle->phy_node = vf_cb->mac_cb->phy_node;
 	ae_handle->if_support = vf_cb->mac_cb->if_support;
 	ae_handle->port_type = vf_cb->mac_cb->mac_type;
-	ae_handle->dport_id = port_idx;
+	ae_handle->dport_id = port_id;
 
 	return ae_handle;
 vf_id_err:
@@ -325,11 +284,8 @@
 	struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
 
 	if (vf_cb->mac_cb->mac_type == HNAE_PORT_DEBUG) {
-		u8 ppe_common_index =
-			vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1;
-
 		hns_mac_reset(vf_cb->mac_cb);
-		hns_ppe_reset_common(vf_cb->dsaf_dev, ppe_common_index);
+		hns_ppe_reset_common(vf_cb->dsaf_dev, 0);
 	}
 }
 
@@ -404,11 +360,16 @@
 static void hns_ae_get_pauseparam(struct hnae_handle *handle,
 				  u32 *auto_neg, u32 *rx_en, u32 *tx_en)
 {
-	assert(handle);
+	struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+	struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
 
-	hns_mac_get_autoneg(hns_get_mac_cb(handle), auto_neg);
+	hns_mac_get_autoneg(mac_cb, auto_neg);
 
-	hns_mac_get_pauseparam(hns_get_mac_cb(handle), rx_en, tx_en);
+	hns_mac_get_pauseparam(mac_cb, rx_en, tx_en);
+
+	/* Service port's pause feature is provided by DSAF, not mac */
+	if (handle->port_type == HNAE_PORT_SERVICE)
+		hns_dsaf_get_rx_mac_pause_en(dsaf_dev, mac_cb->mac_id, rx_en);
 }
 
 static int hns_ae_set_autoneg(struct hnae_handle *handle, u8 enable)
@@ -441,71 +402,67 @@
 				 u32 autoneg, u32 rx_en, u32 tx_en)
 {
 	struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+	struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
 	int ret;
 
 	ret = hns_mac_set_autoneg(mac_cb, autoneg);
 	if (ret)
 		return ret;
 
+	/* Service port's pause feature is provided by DSAF, not mac */
+	if (handle->port_type == HNAE_PORT_SERVICE) {
+		ret = hns_dsaf_set_rx_mac_pause_en(dsaf_dev,
+						   mac_cb->mac_id, rx_en);
+		if (ret)
+			return ret;
+		rx_en = 0;
+	}
 	return hns_mac_set_pauseparam(mac_cb, rx_en, tx_en);
 }
 
 static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle,
 				      u32 *tx_usecs, u32 *rx_usecs)
 {
-	int port;
+	struct ring_pair_cb *ring_pair =
+		container_of(handle->qs[0], struct ring_pair_cb, q);
 
-	port = hns_ae_map_eport_to_dport(handle->eport_id);
-
-	*tx_usecs = hns_rcb_get_coalesce_usecs(
-		hns_ae_get_dsaf_dev(handle->dev),
-		hns_dsaf_get_comm_idx_by_port(port));
-	*rx_usecs = hns_rcb_get_coalesce_usecs(
-		hns_ae_get_dsaf_dev(handle->dev),
-		hns_dsaf_get_comm_idx_by_port(port));
+	*tx_usecs = hns_rcb_get_coalesce_usecs(ring_pair->rcb_common,
+					       ring_pair->port_id_in_comm);
+	*rx_usecs = hns_rcb_get_coalesce_usecs(ring_pair->rcb_common,
+					       ring_pair->port_id_in_comm);
 }
 
 static void hns_ae_get_rx_max_coalesced_frames(struct hnae_handle *handle,
 					       u32 *tx_frames, u32 *rx_frames)
 {
-	int port;
+	struct ring_pair_cb *ring_pair =
+		container_of(handle->qs[0], struct ring_pair_cb, q);
 
-	assert(handle);
-
-	port = hns_ae_map_eport_to_dport(handle->eport_id);
-
-	*tx_frames = hns_rcb_get_coalesced_frames(
-		hns_ae_get_dsaf_dev(handle->dev), port);
-	*rx_frames = hns_rcb_get_coalesced_frames(
-		hns_ae_get_dsaf_dev(handle->dev), port);
+	*tx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common,
+						  ring_pair->port_id_in_comm);
+	*rx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common,
+						  ring_pair->port_id_in_comm);
 }
 
-static void hns_ae_set_coalesce_usecs(struct hnae_handle *handle,
-				      u32 timeout)
+static int hns_ae_set_coalesce_usecs(struct hnae_handle *handle,
+				     u32 timeout)
 {
-	int port;
+	struct ring_pair_cb *ring_pair =
+		container_of(handle->qs[0], struct ring_pair_cb, q);
 
-	assert(handle);
-
-	port = hns_ae_map_eport_to_dport(handle->eport_id);
-
-	hns_rcb_set_coalesce_usecs(hns_ae_get_dsaf_dev(handle->dev),
-				   port, timeout);
+	return hns_rcb_set_coalesce_usecs(
+		ring_pair->rcb_common, ring_pair->port_id_in_comm, timeout);
 }
 
 static int  hns_ae_set_coalesce_frames(struct hnae_handle *handle,
 				       u32 coalesce_frames)
 {
-	int port;
-	int ret;
+	struct ring_pair_cb *ring_pair =
+		container_of(handle->qs[0], struct ring_pair_cb, q);
 
-	assert(handle);
-
-	port = hns_ae_map_eport_to_dport(handle->eport_id);
-
-	ret = hns_rcb_set_coalesced_frames(hns_ae_get_dsaf_dev(handle->dev),
-					   port, coalesce_frames);
-	return ret;
+	return hns_rcb_set_coalesced_frames(
+		ring_pair->rcb_common,
+		ring_pair->port_id_in_comm, coalesce_frames);
 }
 
 void hns_ae_update_stats(struct hnae_handle *handle,
@@ -707,7 +664,7 @@
 
 	assert(handle);
 	mac_cb = hns_get_mac_cb(handle);
-	if (!mac_cb->cpld_vaddr)
+	if (!mac_cb->cpld_ctrl)
 		return;
 	hns_set_led_opt(mac_cb);
 }
@@ -727,7 +684,6 @@
 void hns_ae_get_regs(struct hnae_handle *handle, void *data)
 {
 	u32 *p = data;
-	u32 rcb_com_idx;
 	int i;
 	struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
 	struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
@@ -735,8 +691,7 @@
 	hns_ppe_get_regs(ppe_cb, p);
 	p += hns_ppe_get_regs_count();
 
-	rcb_com_idx = hns_dsaf_get_comm_idx_by_port(vf_cb->port_index);
-	hns_rcb_get_common_regs(vf_cb->dsaf_dev->rcb_common[rcb_com_idx], p);
+	hns_rcb_get_common_regs(vf_cb->dsaf_dev->rcb_common[0], p);
 	p += hns_rcb_get_common_regs_count();
 
 	for (i = 0; i < handle->q_num; i++) {
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 6e2b76e..44abb08 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -664,7 +664,8 @@
 		return;
 
 	for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) {
-		snprintf(buff, ETH_GSTRING_LEN, g_gmac_stats_string[i].desc);
+		snprintf(buff, ETH_GSTRING_LEN, "%s",
+			 g_gmac_stats_string[i].desc);
 		buff = buff + ETH_GSTRING_LEN;
 	}
 }
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index a38084a..611581f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -7,18 +7,19 @@
  * (at your option) any later version.
  */
 
-#include <linux/module.h>
-#include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/phy_fixed.h>
 #include <linux/interrupt.h>
-#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/phy_fixed.h>
+#include <linux/platform_device.h>
 
-#include "hns_dsaf_misc.h"
 #include "hns_dsaf_main.h"
+#include "hns_dsaf_misc.h"
 #include "hns_dsaf_rcb.h"
 
 #define MAC_EN_FLAG_V		0xada0328
@@ -81,17 +82,6 @@
 	}
 }
 
-int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
-{
-	if (!mac_cb->cpld_vaddr)
-		return -ENODEV;
-
-	*sfp_prsnt = !dsaf_read_b((u8 *)mac_cb->cpld_vaddr
-					+ MAC_SFP_PORT_OFFSET);
-
-	return 0;
-}
-
 void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
 {
 	struct mac_driver *mac_ctrl_drv;
@@ -168,10 +158,9 @@
 				      u8 vmid, u8 *port_num)
 {
 	u8 tmp_port;
-	u32 comm_idx;
 
 	if (mac_cb->dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE) {
-		if (mac_cb->mac_id != DSAF_MAX_PORT_NUM_PER_CHIP) {
+		if (mac_cb->mac_id != DSAF_MAX_PORT_NUM) {
 			dev_err(mac_cb->dev,
 				"input invalid,%s mac%d vmid%d !\n",
 				mac_cb->dsaf_dev->ae_dev.name,
@@ -179,7 +168,7 @@
 			return -EINVAL;
 		}
 	} else if (mac_cb->dsaf_dev->dsaf_mode < DSAF_MODE_MAX) {
-		if (mac_cb->mac_id >= DSAF_MAX_PORT_NUM_PER_CHIP) {
+		if (mac_cb->mac_id >= DSAF_MAX_PORT_NUM) {
 			dev_err(mac_cb->dev,
 				"input invalid,%s mac%d vmid%d!\n",
 				mac_cb->dsaf_dev->ae_dev.name,
@@ -192,9 +181,7 @@
 		return -EINVAL;
 	}
 
-	comm_idx = hns_dsaf_get_comm_idx_by_port(mac_cb->mac_id);
-
-	if (vmid >= mac_cb->dsaf_dev->rcb_common[comm_idx]->max_vfn) {
+	if (vmid >= mac_cb->dsaf_dev->rcb_common[0]->max_vfn) {
 		dev_err(mac_cb->dev, "input invalid,%s mac%d vmid%d !\n",
 			mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vmid);
 		return -EINVAL;
@@ -234,7 +221,7 @@
 }
 
 /**
- *hns_mac_get_inner_port_num - change vf mac address
+ *hns_mac_change_vf_addr - change vf mac address
  *@mac_cb: mac device
  *@vmid: vmid
  *@addr:mac address
@@ -249,7 +236,7 @@
 	struct mac_entry_idx *old_entry;
 
 	old_entry = &mac_cb->addr_entry_idx[vmid];
-	if (dsaf_dev) {
+	if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
 		memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
 		mac_entry.in_vlan_id = old_entry->vlan_id;
 		mac_entry.in_port_num = mac_cb->mac_id;
@@ -289,7 +276,7 @@
 	struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
 	struct dsaf_drv_mac_single_dest_entry mac_entry;
 
-	if (dsaf_dev && addr) {
+	if (!HNS_DSAF_IS_DEBUG(dsaf_dev) && addr) {
 		memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
 		mac_entry.in_vlan_id = 0;/*vlan_id;*/
 		mac_entry.in_port_num = mac_cb->mac_id;
@@ -380,7 +367,7 @@
 	if (mac_cb->mac_type == HNAE_PORT_DEBUG)
 		return 0;
 
-	if (dsaf_dev) {
+	if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
 		memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
 		mac_entry.in_vlan_id = vlan_id;
 		mac_entry.in_port_num = mac_cb->mac_id;
@@ -418,7 +405,7 @@
 
 	uc_mac_entry = &mac_cb->addr_entry_idx[vmid];
 
-	if (dsaf_dev)  {
+	if (!HNS_DSAF_IS_DEBUG(dsaf_dev))  {
 		memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
 		mac_entry.in_vlan_id = uc_mac_entry->vlan_id;
 		mac_entry.in_port_num = mac_cb->mac_id;
@@ -439,9 +426,8 @@
 
 void hns_mac_reset(struct hns_mac_cb *mac_cb)
 {
-	struct mac_driver *drv;
-
-	drv = hns_mac_get_drv(mac_cb);
+	struct mac_driver *drv = hns_mac_get_drv(mac_cb);
+	bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
 
 	drv->mac_init(drv);
 
@@ -456,7 +442,7 @@
 
 	if (drv->mac_pausefrm_cfg) {
 		if (mac_cb->mac_type == HNAE_PORT_DEBUG)
-			drv->mac_pausefrm_cfg(drv, 0, 0);
+			drv->mac_pausefrm_cfg(drv, !is_ver1, !is_ver1);
 		else /* mac rx must disable, dsaf pfc close instead of it*/
 			drv->mac_pausefrm_cfg(drv, 0, 1);
 	}
@@ -561,14 +547,6 @@
 		*rx_en = 0;
 		*tx_en = 0;
 	}
-
-	/* Due to the chip defect, the service mac's rx pause CAN'T be enabled.
-	 * We set the rx pause frm always be true (1), because DSAF deals with
-	 * the rx pause frm instead of service mac. After all, we still support
-	 * rx pause frm.
-	 */
-	if (mac_cb->mac_type == HNAE_PORT_SERVICE)
-		*rx_en = 1;
 }
 
 /**
@@ -602,20 +580,13 @@
 int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en)
 {
 	struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+	bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
 
-	if (mac_cb->mac_type == HNAE_PORT_SERVICE) {
-		if (!rx_en) {
-			dev_err(mac_cb->dev, "disable rx_pause is not allowed!");
+	if (mac_cb->mac_type == HNAE_PORT_DEBUG) {
+		if (is_ver1 && (tx_en || rx_en)) {
+			dev_err(mac_cb->dev, "macv1 cann't enable tx/rx_pause!");
 			return -EINVAL;
 		}
-	} else if (mac_cb->mac_type == HNAE_PORT_DEBUG) {
-		if (tx_en || rx_en) {
-			dev_err(mac_cb->dev, "enable tx_pause or enable rx_pause are not allowed!");
-			return -EINVAL;
-		}
-	} else {
-		dev_err(mac_cb->dev, "Unsupport this operation!");
-		return -EINVAL;
 	}
 
 	if (mac_ctrl_drv->mac_pausefrm_cfg)
@@ -667,14 +638,18 @@
 }
 
 /**
- *mac_free_dev  - get mac information from device node
+ *hns_mac_get_info  - get mac information from device node
  *@mac_cb: mac device
  *@np:device node
- *@mac_mode_idx:mac mode index
+ * return: 0 --success, negative --fail
  */
-static void hns_mac_get_info(struct hns_mac_cb *mac_cb,
-			     struct device_node *np, u32 mac_mode_idx)
+static int  hns_mac_get_info(struct hns_mac_cb *mac_cb)
 {
+	struct device_node *np = mac_cb->dev->of_node;
+	struct regmap *syscon;
+	struct of_phandle_args cpld_args;
+	u32 ret;
+
 	mac_cb->link = false;
 	mac_cb->half_duplex = false;
 	mac_cb->speed = mac_phy_to_speed[mac_cb->phy_if];
@@ -690,12 +665,73 @@
 
 	mac_cb->max_frm = MAC_DEFAULT_MTU;
 	mac_cb->tx_pause_frm_time = MAC_DEFAULT_PAUSE_TIME;
+	mac_cb->port_rst_off = mac_cb->mac_id;
+	mac_cb->port_mode_off = 0;
 
-	/* Get the rest of the PHY information */
-	mac_cb->phy_node = of_parse_phandle(np, "phy-handle", mac_cb->mac_id);
+	/* if the dsaf node doesn't contain a port subnode, get phy-handle
+	 * from dsaf node
+	 */
+	if (!mac_cb->fw_port) {
+		mac_cb->phy_node = of_parse_phandle(np, "phy-handle",
+						    mac_cb->mac_id);
+		if (mac_cb->phy_node)
+			dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
+				mac_cb->mac_id, mac_cb->phy_node->name);
+		return 0;
+	}
+	if (!is_of_node(mac_cb->fw_port))
+		return -EINVAL;
+	/* parse property from port subnode in dsaf */
+	mac_cb->phy_node = of_parse_phandle(to_of_node(mac_cb->fw_port),
+					    "phy-handle", 0);
 	if (mac_cb->phy_node)
 		dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
 			mac_cb->mac_id, mac_cb->phy_node->name);
+	syscon = syscon_node_to_regmap(
+			of_parse_phandle(to_of_node(mac_cb->fw_port),
+					 "serdes-syscon", 0));
+	if (IS_ERR_OR_NULL(syscon)) {
+		dev_err(mac_cb->dev, "serdes-syscon is needed!\n");
+		return -EINVAL;
+	}
+	mac_cb->serdes_ctrl = syscon;
+
+	ret = fwnode_property_read_u32(mac_cb->fw_port,
+				       "port-rst-offset",
+				       &mac_cb->port_rst_off);
+	if (ret) {
+		dev_dbg(mac_cb->dev,
+			"mac%d port-rst-offset not found, use default value.\n",
+			mac_cb->mac_id);
+	}
+
+	ret = fwnode_property_read_u32(mac_cb->fw_port,
+				       "port-mode-offset",
+				       &mac_cb->port_mode_off);
+	if (ret) {
+		dev_dbg(mac_cb->dev,
+			"mac%d port-mode-offset not found, use default value.\n",
+			mac_cb->mac_id);
+	}
+
+	ret = of_parse_phandle_with_fixed_args(to_of_node(mac_cb->fw_port),
+					       "cpld-syscon", 1, 0, &cpld_args);
+	if (ret) {
+		dev_dbg(mac_cb->dev, "mac%d no cpld-syscon found.\n",
+			mac_cb->mac_id);
+		mac_cb->cpld_ctrl = NULL;
+	} else {
+		syscon = syscon_node_to_regmap(cpld_args.np);
+		if (IS_ERR_OR_NULL(syscon)) {
+			dev_dbg(mac_cb->dev, "no cpld-syscon found!\n");
+			mac_cb->cpld_ctrl = NULL;
+		} else {
+			mac_cb->cpld_ctrl = syscon;
+			mac_cb->cpld_ctrl_reg = cpld_args.args[0];
+		}
+	}
+
+	return 0;
 }
 
 /**
@@ -725,40 +761,31 @@
 		return base + 0x40000 + mac_id * 0x4000 -
 				mac_mode_idx * 0x20000;
 	else
-		return mac_cb->serdes_vaddr + 0x1000
-			+ (mac_id - DSAF_SERVICE_PORT_NUM_PER_DSAF) * 0x100000;
+		return dsaf_dev->ppe_base + 0x1000;
 }
 
 /**
  * hns_mac_get_cfg - get mac cfg from dtb or acpi table
  * @dsaf_dev: dsa fabric device struct pointer
- * @mac_idx: mac index
- * retuen 0 - success , negative --fail
+ * @mac_cb: mac control block
+ * return 0 - success , negative --fail
  */
-int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, int mac_idx)
+int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb)
 {
 	int ret;
 	u32 mac_mode_idx;
-	struct hns_mac_cb *mac_cb = &dsaf_dev->mac_cb[mac_idx];
 
 	mac_cb->dsaf_dev = dsaf_dev;
 	mac_cb->dev = dsaf_dev->dev;
-	mac_cb->mac_id = mac_idx;
 
 	mac_cb->sys_ctl_vaddr =	dsaf_dev->sc_base;
 	mac_cb->serdes_vaddr = dsaf_dev->sds_base;
 
-	if (dsaf_dev->cpld_base &&
-	    mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF) {
-		mac_cb->cpld_vaddr = dsaf_dev->cpld_base +
-			mac_cb->mac_id * CPLD_ADDR_PORT_OFFSET;
-		cpld_led_reset(mac_cb);
-	}
 	mac_cb->sfp_prsnt = 0;
 	mac_cb->txpkt_for_led = 0;
 	mac_cb->rxpkt_for_led = 0;
 
-	if (mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF)
+	if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
 		mac_cb->mac_type = HNAE_PORT_SERVICE;
 	else
 		mac_cb->mac_type = HNAE_PORT_DEBUG;
@@ -774,53 +801,100 @@
 	}
 	mac_mode_idx = (u32)ret;
 
-	hns_mac_get_info(mac_cb, mac_cb->dev->of_node, mac_mode_idx);
+	ret  = hns_mac_get_info(mac_cb);
+	if (ret)
+		return ret;
 
+	cpld_led_reset(mac_cb);
 	mac_cb->vaddr = hns_mac_get_vaddr(dsaf_dev, mac_cb, mac_mode_idx);
 
 	return 0;
 }
 
+static int hns_mac_get_max_port_num(struct dsaf_device *dsaf_dev)
+{
+	if (HNS_DSAF_IS_DEBUG(dsaf_dev))
+		return 1;
+	else
+		return  DSAF_MAX_PORT_NUM;
+}
+
 /**
  * hns_mac_init - init mac
  * @dsaf_dev: dsa fabric device struct pointer
- * retuen 0 - success , negative --fail
+ * return 0 - success , negative --fail
  */
 int hns_mac_init(struct dsaf_device *dsaf_dev)
 {
-	int i;
+	bool found = false;
 	int ret;
-	size_t size;
+	u32 port_id;
+	int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
 	struct hns_mac_cb *mac_cb;
+	struct fwnode_handle *child;
 
-	size = sizeof(struct hns_mac_cb) * DSAF_MAX_PORT_NUM_PER_CHIP;
-	dsaf_dev->mac_cb = devm_kzalloc(dsaf_dev->dev, size, GFP_KERNEL);
-	if (!dsaf_dev->mac_cb)
-		return -ENOMEM;
+	device_for_each_child_node(dsaf_dev->dev, child) {
+		ret = fwnode_property_read_u32(child, "reg", &port_id);
+		if (ret) {
+			dev_err(dsaf_dev->dev,
+				"get reg fail, ret=%d!\n", ret);
+			return ret;
+		}
+		if (port_id >= max_port_num) {
+			dev_err(dsaf_dev->dev,
+				"reg(%u) out of range!\n", port_id);
+			return -EINVAL;
+		}
+		mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb),
+				      GFP_KERNEL);
+		if (!mac_cb)
+			return -ENOMEM;
+		mac_cb->fw_port = child;
+		mac_cb->mac_id = (u8)port_id;
+		dsaf_dev->mac_cb[port_id] = mac_cb;
+		found = true;
+	}
 
-	for (i = 0; i < DSAF_MAX_PORT_NUM_PER_CHIP; i++) {
-		ret = hns_mac_get_cfg(dsaf_dev, i);
+	/* if don't get any port subnode from dsaf node
+	 * will init all port then, this is compatible with the old dts
+	 */
+	if (!found) {
+		for (port_id = 0; port_id < max_port_num; port_id++) {
+			mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb),
+					      GFP_KERNEL);
+			if (!mac_cb)
+				return -ENOMEM;
+
+			mac_cb->mac_id = port_id;
+			dsaf_dev->mac_cb[port_id] = mac_cb;
+		}
+	}
+	/* init mac_cb for all port */
+	for (port_id = 0; port_id < max_port_num; port_id++) {
+		mac_cb = dsaf_dev->mac_cb[port_id];
+		if (!mac_cb)
+			continue;
+
+		ret = hns_mac_get_cfg(dsaf_dev, mac_cb);
 		if (ret)
-			goto free_mac_cb;
-
-		mac_cb = &dsaf_dev->mac_cb[i];
+			return ret;
 		ret = hns_mac_init_ex(mac_cb);
 		if (ret)
-			goto free_mac_cb;
+			return ret;
 	}
 
 	return 0;
-
-free_mac_cb:
-	dsaf_dev->mac_cb = NULL;
-
-	return ret;
 }
 
 void hns_mac_uninit(struct dsaf_device *dsaf_dev)
 {
-	cpld_led_reset(dsaf_dev->mac_cb);
-	dsaf_dev->mac_cb = NULL;
+	int i;
+	int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
+
+	for (i = 0; i < max_port_num; i++) {
+		cpld_led_reset(dsaf_dev->mac_cb[i]);
+		dsaf_dev->mac_cb[i] = NULL;
+	}
 }
 
 int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
@@ -908,7 +982,7 @@
 int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
 			enum hnae_led_state status)
 {
-	if (!mac_cb || !mac_cb->cpld_vaddr)
+	if (!mac_cb || !mac_cb->cpld_ctrl)
 		return 0;
 
 	return cpld_set_led_id(mac_cb, status);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index 823b6e7..97ce9a7 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -10,9 +10,10 @@
 #ifndef _HNS_DSAF_MAC_H
 #define _HNS_DSAF_MAC_H
 
-#include <linux/phy.h>
-#include <linux/kernel.h>
 #include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
 #include "hns_dsaf_main.h"
 
 struct dsaf_device;
@@ -310,10 +311,15 @@
 	struct device *dev;
 	struct dsaf_device *dsaf_dev;
 	struct mac_priv priv;
+	struct fwnode_handle *fw_port;
 	u8 __iomem *vaddr;
-	u8 __iomem *cpld_vaddr;
 	u8 __iomem *sys_ctl_vaddr;
 	u8 __iomem *serdes_vaddr;
+	struct regmap *serdes_ctrl;
+	struct regmap *cpld_ctrl;
+	u32 cpld_ctrl_reg;
+	u32 port_rst_off;
+	u32 port_mode_off;
 	struct mac_entry_idx addr_entry_idx[DSAF_MAX_VM_NUM];
 	u8 sfp_prsnt;
 	u8 cpld_led_value;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 5c1ac9b..1c2ddb2 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -7,27 +7,29 @@
  * (at your option) any later version.
  */
 
-#include <linux/module.h>
-#include <linux/kernel.h>
+#include <linux/device.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/netdevice.h>
-#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
-#include <linux/device.h>
+#include <linux/platform_device.h>
 #include <linux/vmalloc.h>
 
-#include "hns_dsaf_main.h"
-#include "hns_dsaf_rcb.h"
-#include "hns_dsaf_ppe.h"
 #include "hns_dsaf_mac.h"
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_ppe.h"
+#include "hns_dsaf_rcb.h"
 
 const char *g_dsaf_mode_match[DSAF_MODE_MAX] = {
 	[DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf",
 	[DSAF_MODE_DISABLE_6PORT_0VM] = "6port-16rss",
 	[DSAF_MODE_DISABLE_6PORT_16VM] = "6port-16vf",
+	[DSAF_MODE_DISABLE_SP] = "single-port",
 };
 
 int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
@@ -35,8 +37,13 @@
 	int ret, i;
 	u32 desc_num;
 	u32 buf_size;
+	u32 reset_offset = 0;
+	u32 res_idx = 0;
 	const char *mode_str;
+	struct regmap *syscon;
+	struct resource *res;
 	struct device_node *np = dsaf_dev->dev->of_node;
+	struct platform_device *pdev = to_platform_device(dsaf_dev->dev);
 
 	if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1"))
 		dsaf_dev->dsaf_ver = AE_VERSION_1;
@@ -73,42 +80,68 @@
 	else
 		dsaf_dev->dsaf_tc_mode = HRD_DSAF_4TC_MODE;
 
-	dsaf_dev->sc_base = of_iomap(np, 0);
-	if (!dsaf_dev->sc_base) {
-		dev_err(dsaf_dev->dev,
-			"%s of_iomap 0 fail!\n", dsaf_dev->ae_dev.name);
-		ret = -ENOMEM;
-		goto unmap_base_addr;
+	syscon = syscon_node_to_regmap(
+			of_parse_phandle(np, "subctrl-syscon", 0));
+	if (IS_ERR_OR_NULL(syscon)) {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++);
+		if (!res) {
+			dev_err(dsaf_dev->dev, "subctrl info is needed!\n");
+			return -ENOMEM;
+		}
+		dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev, res);
+		if (!dsaf_dev->sc_base) {
+			dev_err(dsaf_dev->dev, "subctrl can not map!\n");
+			return -ENOMEM;
+		}
+
+		res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++);
+		if (!res) {
+			dev_err(dsaf_dev->dev, "serdes-ctrl info is needed!\n");
+			return -ENOMEM;
+		}
+		dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev, res);
+		if (!dsaf_dev->sds_base) {
+			dev_err(dsaf_dev->dev, "serdes-ctrl can not map!\n");
+			return -ENOMEM;
+		}
+	} else {
+		dsaf_dev->sub_ctrl = syscon;
 	}
 
-	dsaf_dev->sds_base = of_iomap(np, 1);
-	if (!dsaf_dev->sds_base) {
-		dev_err(dsaf_dev->dev,
-			"%s of_iomap 1 fail!\n", dsaf_dev->ae_dev.name);
-		ret = -ENOMEM;
-		goto unmap_base_addr;
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ppe-base");
+	if (!res) {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++);
+		if (!res) {
+			dev_err(dsaf_dev->dev, "ppe-base info is needed!\n");
+			return -ENOMEM;
+		}
 	}
-
-	dsaf_dev->ppe_base = of_iomap(np, 2);
+	dsaf_dev->ppe_base = devm_ioremap_resource(&pdev->dev, res);
 	if (!dsaf_dev->ppe_base) {
-		dev_err(dsaf_dev->dev,
-			"%s of_iomap 2 fail!\n", dsaf_dev->ae_dev.name);
-		ret = -ENOMEM;
-		goto unmap_base_addr;
+		dev_err(dsaf_dev->dev, "ppe-base resource can not map!\n");
+		return -ENOMEM;
 	}
+	dsaf_dev->ppe_paddr = res->start;
 
-	dsaf_dev->io_base = of_iomap(np, 3);
-	if (!dsaf_dev->io_base) {
-		dev_err(dsaf_dev->dev,
-			"%s of_iomap 3 fail!\n", dsaf_dev->ae_dev.name);
-		ret = -ENOMEM;
-		goto unmap_base_addr;
+	if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						   "dsaf-base");
+		if (!res) {
+			res = platform_get_resource(pdev, IORESOURCE_MEM,
+						    res_idx);
+			if (!res) {
+				dev_err(dsaf_dev->dev,
+					"dsaf-base info is needed!\n");
+				return -ENOMEM;
+			}
+		}
+		dsaf_dev->io_base = devm_ioremap_resource(&pdev->dev, res);
+		if (!dsaf_dev->io_base) {
+			dev_err(dsaf_dev->dev, "dsaf-base resource can not map!\n");
+			return -ENOMEM;
+		}
 	}
 
-	dsaf_dev->cpld_base = of_iomap(np, 4);
-	if (!dsaf_dev->cpld_base)
-		dev_dbg(dsaf_dev->dev, "NO CPLD ADDR");
-
 	ret = of_property_read_u32(np, "desc-num", &desc_num);
 	if (ret < 0 || desc_num < HNS_DSAF_MIN_DESC_CNT ||
 	    desc_num > HNS_DSAF_MAX_DESC_CNT) {
@@ -118,6 +151,13 @@
 	}
 	dsaf_dev->desc_num = desc_num;
 
+	ret = of_property_read_u32(np, "reset-field-offset", &reset_offset);
+	if (ret < 0) {
+		dev_dbg(dsaf_dev->dev,
+			"get reset-field-offset fail, ret=%d!\r\n", ret);
+	}
+	dsaf_dev->reset_offset = reset_offset;
+
 	ret = of_property_read_u32(np, "buf-size", &buf_size);
 	if (ret < 0) {
 		dev_err(dsaf_dev->dev,
@@ -149,8 +189,6 @@
 		iounmap(dsaf_dev->sds_base);
 	if (dsaf_dev->sc_base)
 		iounmap(dsaf_dev->sc_base);
-	if (dsaf_dev->cpld_base)
-		iounmap(dsaf_dev->cpld_base);
 	return ret;
 }
 
@@ -167,9 +205,6 @@
 
 	if (dsaf_dev->sc_base)
 		iounmap(dsaf_dev->sc_base);
-
-	if (dsaf_dev->cpld_base)
-		iounmap(dsaf_dev->cpld_base);
 }
 
 /**
@@ -217,9 +252,7 @@
 	u32 q_id, q_num_per_port;
 	u32 i;
 
-	hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode,
-			       HNS_DSAF_COMM_SERVICE_NW_IDX,
-			       &max_vfn, &max_q_per_vf);
+	hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, &max_vfn, &max_q_per_vf);
 	q_num_per_port = max_vfn * max_q_per_vf;
 
 	for (i = 0, q_id = 0; i < DSAF_SERVICE_NW_NUM; i++) {
@@ -239,9 +272,7 @@
 	if (AE_IS_VER1(dsaf_dev->dsaf_ver))
 		return;
 
-	hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode,
-			       HNS_DSAF_COMM_SERVICE_NW_IDX,
-			       &max_vfn, &max_q_per_vf);
+	hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, &max_vfn, &max_q_per_vf);
 	q_num_per_port = max_vfn * max_q_per_vf;
 
 	for (mac_id = 0, q_id = 0; mac_id < DSAF_SERVICE_NW_NUM; mac_id++) {
@@ -712,13 +743,15 @@
 
 void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en)
 {
-	dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_MIX_MODE_S, !!en);
+	if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
+		dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG,
+				 DSAF_CFG_MIX_MODE_S, !!en);
 }
 
 void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en)
 {
 	if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
-	    dsaf_dev->mac_cb[mac_id].mac_type == HNAE_PORT_DEBUG)
+	    dsaf_dev->mac_cb[mac_id]->mac_type == HNAE_PORT_DEBUG)
 		return;
 
 	dsaf_set_dev_bit(dsaf_dev, DSAFV2_SERDES_LBK_0_REG + 4 * mac_id,
@@ -1022,12 +1055,52 @@
  * @mac_cb: mac contrl block
  */
 static void hns_dsaf_pfc_en_cfg(struct dsaf_device *dsaf_dev,
-				int mac_id, int en)
+				int mac_id, int tc_en)
 {
-	if (!en)
-		dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, 0);
+	dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, tc_en);
+}
+
+static void hns_dsaf_set_pfc_pause(struct dsaf_device *dsaf_dev,
+				   int mac_id, int tx_en, int rx_en)
+{
+	if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+		if (!tx_en || !rx_en)
+			dev_err(dsaf_dev->dev, "dsaf v1 can not close pfc!\n");
+
+		return;
+	}
+
+	dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4,
+			 DSAF_PFC_PAUSE_RX_EN_B, !!rx_en);
+	dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4,
+			 DSAF_PFC_PAUSE_TX_EN_B, !!tx_en);
+}
+
+int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
+				 u32 en)
+{
+	if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+		if (!en)
+			dev_err(dsaf_dev->dev, "dsafv1 can't close rx_pause!\n");
+
+		return -EINVAL;
+	}
+
+	dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4,
+			 DSAF_MAC_PAUSE_RX_EN_B, !!en);
+
+	return 0;
+}
+
+void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
+				  u32 *en)
+{
+	if (AE_IS_VER1(dsaf_dev->dsaf_ver))
+		*en = 1;
 	else
-		dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, 0xff);
+		*en = dsaf_get_dev_bit(dsaf_dev,
+				       DSAF_PAUSE_CFG_REG + mac_id * 4,
+				       DSAF_MAC_PAUSE_RX_EN_B);
 }
 
 /**
@@ -1039,6 +1112,7 @@
 {
 	u32 i;
 	u32 o_dsaf_cfg;
+	bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
 
 	o_dsaf_cfg = dsaf_read_dev(dsaf_dev, DSAF_CFG_0_REG);
 	dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_EN_S, dsaf_dev->dsaf_en);
@@ -1064,8 +1138,10 @@
 	hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN);
 
 	/*set dsaf pfc  to 0 for parseing rx pause*/
-	for (i = 0; i < DSAF_COMM_CHN; i++)
+	for (i = 0; i < DSAF_COMM_CHN; i++) {
 		hns_dsaf_pfc_en_cfg(dsaf_dev, i, 0);
+		hns_dsaf_set_pfc_pause(dsaf_dev, i, is_ver1, is_ver1);
+	}
 
 	/*msk and  clr exception irqs */
 	for (i = 0; i < DSAF_COMM_CHN; i++) {
@@ -1264,6 +1340,9 @@
 	u32 i;
 	int ret;
 
+	if (HNS_DSAF_IS_DEBUG(dsaf_dev))
+		return 0;
+
 	ret = hns_dsaf_init_hw(dsaf_dev);
 	if (ret)
 		return ret;
@@ -2013,6 +2092,8 @@
 {
 	struct dsaf_hw_stats *hw_stats
 		= &dsaf_dev->hw_stats[node_num];
+	bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
+	u32 reg_tmp;
 
 	hw_stats->pad_drop += dsaf_read_dev(dsaf_dev,
 		DSAF_INODE_PAD_DISCARD_NUM_0_REG + 0x80 * (u64)node_num);
@@ -2022,8 +2103,12 @@
 		DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + 0x80 * (u64)node_num);
 	hw_stats->rx_pkt_id += dsaf_read_dev(dsaf_dev,
 		DSAF_INODE_SBM_PID_NUM_0_REG + 0x80 * (u64)node_num);
-	hw_stats->rx_pause_frame += dsaf_read_dev(dsaf_dev,
-		DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG + 0x80 * (u64)node_num);
+
+	reg_tmp = is_ver1 ? DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG :
+			    DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG;
+	hw_stats->rx_pause_frame +=
+		dsaf_read_dev(dsaf_dev, reg_tmp + 0x80 * (u64)node_num);
+
 	hw_stats->release_buf_num += dsaf_read_dev(dsaf_dev,
 		DSAF_INODE_SBM_RELS_NUM_0_REG + 0x80 * (u64)node_num);
 	hw_stats->sbm_drop += dsaf_read_dev(dsaf_dev,
@@ -2056,6 +2141,8 @@
 	u32 i = 0;
 	u32 j;
 	u32 *p = data;
+	u32 reg_tmp;
+	bool is_ver1 = AE_IS_VER1(ddev->dsaf_ver);
 
 	/* dsaf common registers */
 	p[0] = dsaf_read_dev(ddev, DSAF_SRAM_INIT_OVER_0_REG);
@@ -2120,8 +2207,9 @@
 				DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + j * 0x80);
 		p[190 + i] = dsaf_read_dev(ddev,
 				DSAF_INODE_SBM_PID_NUM_0_REG + j * 0x80);
-		p[193 + i] = dsaf_read_dev(ddev,
-				DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG + j * 0x80);
+		reg_tmp = is_ver1 ? DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG :
+				    DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG;
+		p[193 + i] = dsaf_read_dev(ddev, reg_tmp + j * 0x80);
 		p[196 + i] = dsaf_read_dev(ddev,
 				DSAF_INODE_SBM_RELS_NUM_0_REG + j * 0x80);
 		p[199 + i] = dsaf_read_dev(ddev,
@@ -2219,17 +2307,17 @@
 	/* dsaf onode registers */
 	for (i = 0; i < DSAF_XOD_NUM; i++) {
 		p[311 + i] = dsaf_read_dev(ddev,
-				DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + j * 0x90);
+				DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + i * 0x90);
 		p[319 + i] = dsaf_read_dev(ddev,
-				DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + j * 0x90);
+				DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + i * 0x90);
 		p[327 + i] = dsaf_read_dev(ddev,
-				DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + j * 0x90);
+				DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + i * 0x90);
 		p[335 + i] = dsaf_read_dev(ddev,
-				DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + j * 0x90);
+				DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + i * 0x90);
 		p[343 + i] = dsaf_read_dev(ddev,
-				DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + j * 0x90);
+				DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + i * 0x90);
 		p[351 + i] = dsaf_read_dev(ddev,
-				DSAF_XOD_ETS_TOKEN_CFG_0_REG + j * 0x90);
+				DSAF_XOD_ETS_TOKEN_CFG_0_REG + i * 0x90);
 	}
 
 	p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
@@ -2368,8 +2456,11 @@
 	p[496] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
 	p[497] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
 
+	if (!is_ver1)
+		p[498] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
+
 	/* mark end of dsaf regs */
-	for (i = 498; i < 504; i++)
+	for (i = 499; i < 504; i++)
 		p[i] = 0xdddddddd;
 }
 
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 5fea226..f0502ba 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -41,6 +41,7 @@
 #define DSAF_STATIC_NUM 28
 
 #define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
+#define HNS_DSAF_IS_DEBUG(dev) (dev->dsaf_mode == DSAF_MODE_DISABLE_SP)
 
 enum hal_dsaf_mode {
 	HRD_DSAF_NO_DSAF_MODE	= 0x0,
@@ -117,6 +118,7 @@
 	DSAF_MODE_ENABLE_32VM,	/**< en DSAF-mode, support 32 VM */
 	DSAF_MODE_ENABLE_128VM,	/**< en DSAF-mode, support 128 VM */
 	DSAF_MODE_ENABLE,		/**< before is enable DSAF mode*/
+	DSAF_MODE_DISABLE_SP,	/* <non-dsaf, single port mode */
 	DSAF_MODE_DISABLE_FIX,	/**< non-dasf, fixed to queue*/
 	DSAF_MODE_DISABLE_2PORT_8VM,	/**< non-dasf, 2port 8VM */
 	DSAF_MODE_DISABLE_2PORT_16VM,	/**< non-dasf, 2port 16VM */
@@ -275,10 +277,12 @@
 	u8 __iomem *sds_base;
 	u8 __iomem *ppe_base;
 	u8 __iomem *io_base;
-	u8 __iomem *cpld_base;
+	struct regmap *sub_ctrl;
+	phys_addr_t ppe_paddr;
 
 	u32 desc_num; /*  desc num per queue*/
 	u32 buf_size; /*  ring buffer size */
+	u32 reset_offset; /* reset field offset in sub sysctrl */
 	int buf_size_type; /* ring buffer size-type */
 	enum dsaf_mode dsaf_mode;	 /* dsaf mode  */
 	enum hal_dsaf_mode dsaf_en;
@@ -287,7 +291,7 @@
 
 	struct ppe_common_cb *ppe_common[DSAF_COMM_DEV_NUM];
 	struct rcb_common_cb *rcb_common[DSAF_COMM_DEV_NUM];
-	struct hns_mac_cb *mac_cb;
+	struct hns_mac_cb *mac_cb[DSAF_MAX_PORT_NUM];
 
 	struct dsaf_hw_stats hw_stats[DSAF_NODE_NUM];
 	struct dsaf_int_stat int_stat;
@@ -359,14 +363,6 @@
 			   tab_line_addr);
 }
 
-static inline int hns_dsaf_get_comm_idx_by_port(int port)
-{
-	if ((port < DSAF_COMM_CHN) || (port == DSAF_MAX_PORT_NUM_PER_CHIP))
-		return 0;
-	else
-		return (port - DSAF_COMM_CHN + 1);
-}
-
 static inline struct hnae_vf_cb *hns_ae_get_vf_cb(
 	struct hnae_handle *handle)
 {
@@ -417,6 +413,11 @@
 void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data);
 int hns_dsaf_get_regs_count(void);
 void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en);
+
+void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
+				  u32 *en);
+int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
+				 u32 en);
 void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en);
 
 #endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 607c3be..a837bb9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -7,10 +7,30 @@
  * (at your option) any later version.
  */
 
-#include "hns_dsaf_misc.h"
 #include "hns_dsaf_mac.h"
-#include "hns_dsaf_reg.h"
+#include "hns_dsaf_misc.h"
 #include "hns_dsaf_ppe.h"
+#include "hns_dsaf_reg.h"
+
+static void dsaf_write_sub(struct dsaf_device *dsaf_dev, u32 reg, u32 val)
+{
+	if (dsaf_dev->sub_ctrl)
+		dsaf_write_syscon(dsaf_dev->sub_ctrl, reg, val);
+	else
+		dsaf_write_reg(dsaf_dev->sc_base, reg, val);
+}
+
+static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg)
+{
+	u32 ret;
+
+	if (dsaf_dev->sub_ctrl)
+		ret = dsaf_read_syscon(dsaf_dev->sub_ctrl, reg);
+	else
+		ret = dsaf_read_reg(dsaf_dev->sc_base, reg);
+
+	return ret;
+}
 
 void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
 		      u16 speed, int data)
@@ -22,8 +42,8 @@
 		pr_err("sfp_led_opt mac_dev is null!\n");
 		return;
 	}
-	if (!mac_cb->cpld_vaddr) {
-		dev_err(mac_cb->dev, "mac_id=%d, cpld_vaddr is null !\n",
+	if (!mac_cb->cpld_ctrl) {
+		dev_err(mac_cb->dev, "mac_id=%d, cpld syscon is null !\n",
 			mac_cb->mac_id);
 		return;
 	}
@@ -40,21 +60,24 @@
 		dsaf_set_bit(value, DSAF_LED_DATA_B, data);
 
 		if (value != mac_cb->cpld_led_value) {
-			dsaf_write_b(mac_cb->cpld_vaddr, value);
+			dsaf_write_syscon(mac_cb->cpld_ctrl,
+					  mac_cb->cpld_ctrl_reg, value);
 			mac_cb->cpld_led_value = value;
 		}
 	} else {
-		dsaf_write_b(mac_cb->cpld_vaddr, CPLD_LED_DEFAULT_VALUE);
+		dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+				  CPLD_LED_DEFAULT_VALUE);
 		mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
 	}
 }
 
 void cpld_led_reset(struct hns_mac_cb *mac_cb)
 {
-	if (!mac_cb || !mac_cb->cpld_vaddr)
+	if (!mac_cb || !mac_cb->cpld_ctrl)
 		return;
 
-	dsaf_write_b(mac_cb->cpld_vaddr, CPLD_LED_DEFAULT_VALUE);
+	dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+			  CPLD_LED_DEFAULT_VALUE);
 	mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
 }
 
@@ -63,15 +86,19 @@
 {
 	switch (status) {
 	case HNAE_LED_ACTIVE:
-		mac_cb->cpld_led_value = dsaf_read_b(mac_cb->cpld_vaddr);
+		mac_cb->cpld_led_value =
+			dsaf_read_syscon(mac_cb->cpld_ctrl,
+					 mac_cb->cpld_ctrl_reg);
 		dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
 			     CPLD_LED_ON_VALUE);
-		dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value);
+		dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+				  mac_cb->cpld_led_value);
 		return 2;
 	case HNAE_LED_INACTIVE:
 		dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
 			     CPLD_LED_DEFAULT_VALUE);
-		dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value);
+		dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+				  mac_cb->cpld_led_value);
 		break;
 	default:
 		break;
@@ -95,10 +122,8 @@
 		nt_reg_addr = DSAF_SUB_SC_NT_RESET_DREQ_REG;
 	}
 
-	dsaf_write_reg(dsaf_dev->sc_base, xbar_reg_addr,
-		       RESET_REQ_OR_DREQ);
-	dsaf_write_reg(dsaf_dev->sc_base, nt_reg_addr,
-		       RESET_REQ_OR_DREQ);
+	dsaf_write_sub(dsaf_dev, xbar_reg_addr, RESET_REQ_OR_DREQ);
+	dsaf_write_sub(dsaf_dev, nt_reg_addr, RESET_REQ_OR_DREQ);
 }
 
 void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
@@ -110,14 +135,14 @@
 		return;
 
 	reg_val |= RESET_REQ_OR_DREQ;
-	reg_val |= 0x2082082 << port;
+	reg_val |= 0x2082082 << dsaf_dev->mac_cb[port]->port_rst_off;
 
 	if (val == 0)
 		reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
 	else
 		reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
 
-	dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+	dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
 }
 
 void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
@@ -129,68 +154,63 @@
 	if (port >= DSAF_XGE_NUM)
 		return;
 
-	reg_val |= XGMAC_TRX_CORE_SRST_M << port;
+	reg_val |= XGMAC_TRX_CORE_SRST_M
+		<< dsaf_dev->mac_cb[port]->port_rst_off;
 
 	if (val == 0)
 		reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
 	else
 		reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
 
-	dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+	dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
 }
 
 void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
 {
 	u32 reg_val_1;
 	u32 reg_val_2;
+	u32 port_rst_off;
 
 	if (port >= DSAF_GE_NUM)
 		return;
 
-	if (port < DSAF_SERVICE_NW_NUM) {
+	if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
 		reg_val_1  = 0x1 << port;
+		port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off;
 		/* there is difference between V1 and V2 in register.*/
 		if (AE_IS_VER1(dsaf_dev->dsaf_ver))
-			reg_val_2  = 0x1041041 << port;
+			reg_val_2  = 0x1041041 << port_rst_off;
 		else
-			reg_val_2  = 0x2082082 << port;
+			reg_val_2  = 0x2082082 << port_rst_off;
 
 		if (val == 0) {
-			dsaf_write_reg(dsaf_dev->sc_base,
-				       DSAF_SUB_SC_GE_RESET_REQ1_REG,
+			dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG,
 				       reg_val_1);
 
-			dsaf_write_reg(dsaf_dev->sc_base,
-				       DSAF_SUB_SC_GE_RESET_REQ0_REG,
+			dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ0_REG,
 				       reg_val_2);
 		} else {
-			dsaf_write_reg(dsaf_dev->sc_base,
-				       DSAF_SUB_SC_GE_RESET_DREQ0_REG,
+			dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ0_REG,
 				       reg_val_2);
 
-			dsaf_write_reg(dsaf_dev->sc_base,
-				       DSAF_SUB_SC_GE_RESET_DREQ1_REG,
+			dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ1_REG,
 				       reg_val_1);
 		}
 	} else {
-		reg_val_1 = 0x15540 << (port - 6);
-		reg_val_2 = 0x100 << (port - 6);
+		reg_val_1 = 0x15540 << dsaf_dev->reset_offset;
+		reg_val_2 = 0x100 << dsaf_dev->reset_offset;
 
 		if (val == 0) {
-			dsaf_write_reg(dsaf_dev->sc_base,
-				       DSAF_SUB_SC_GE_RESET_REQ1_REG,
+			dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG,
 				       reg_val_1);
 
-			dsaf_write_reg(dsaf_dev->sc_base,
-				       DSAF_SUB_SC_PPE_RESET_REQ_REG,
+			dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_PPE_RESET_REQ_REG,
 				       reg_val_2);
 		} else {
-			dsaf_write_reg(dsaf_dev->sc_base,
-				       DSAF_SUB_SC_GE_RESET_DREQ1_REG,
+			dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ1_REG,
 				       reg_val_1);
 
-			dsaf_write_reg(dsaf_dev->sc_base,
-				       DSAF_SUB_SC_PPE_RESET_DREQ_REG,
+			dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_PPE_RESET_DREQ_REG,
 				       reg_val_2);
 		}
 	}
@@ -201,24 +221,23 @@
 	u32 reg_val = 0;
 	u32 reg_addr;
 
-	reg_val |= RESET_REQ_OR_DREQ << port;
+	reg_val |= RESET_REQ_OR_DREQ <<	dsaf_dev->mac_cb[port]->port_rst_off;
 
 	if (val == 0)
 		reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
 	else
 		reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
 
-	dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+	dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
 }
 
 void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
 {
-	int comm_index = ppe_common->comm_index;
 	struct dsaf_device *dsaf_dev = ppe_common->dsaf_dev;
 	u32 reg_val;
 	u32 reg_addr;
 
-	if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+	if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
 		reg_val = RESET_REQ_OR_DREQ;
 		if (val == 0)
 			reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG;
@@ -226,7 +245,7 @@
 			reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG;
 
 	} else {
-		reg_val = 0x100 << (comm_index - 1);
+		reg_val = 0x100 << dsaf_dev->reset_offset;
 
 		if (val == 0)
 			reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
@@ -234,7 +253,7 @@
 			reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
 	}
 
-	dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+	dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
 }
 
 /**
@@ -244,34 +263,47 @@
  */
 phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
 {
-	u32 hilink3_mode;
-	u32 hilink4_mode;
-	void __iomem *sys_ctl_vaddr = mac_cb->sys_ctl_vaddr;
-	int dev_id = mac_cb->mac_id;
-	phy_interface_t phy_if = PHY_INTERFACE_MODE_NA;
+	u32 mode;
+	u32 reg;
+	bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
+	int mac_id = mac_cb->mac_id;
+	phy_interface_t phy_if;
 
-	hilink3_mode = dsaf_read_reg(sys_ctl_vaddr, HNS_MAC_HILINK3_REG);
-	hilink4_mode = dsaf_read_reg(sys_ctl_vaddr, HNS_MAC_HILINK4_REG);
-	if (dev_id >= 0 && dev_id <= 3) {
-		if (hilink4_mode == 0)
-			phy_if = PHY_INTERFACE_MODE_SGMII;
+	if (is_ver1) {
+		if (HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev))
+			return PHY_INTERFACE_MODE_SGMII;
+
+		if (mac_id >= 0 && mac_id <= 3)
+			reg = HNS_MAC_HILINK4_REG;
 		else
-			phy_if = PHY_INTERFACE_MODE_XGMII;
-	} else if (dev_id >= 4 && dev_id <= 5) {
-		if (hilink3_mode == 0)
-			phy_if = PHY_INTERFACE_MODE_SGMII;
+			reg = HNS_MAC_HILINK3_REG;
+	} else{
+		if (!HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev) && mac_id <= 3)
+			reg = HNS_MAC_HILINK4V2_REG;
 		else
-			phy_if = PHY_INTERFACE_MODE_XGMII;
-	} else {
-		phy_if = PHY_INTERFACE_MODE_SGMII;
+			reg = HNS_MAC_HILINK3V2_REG;
 	}
 
-	dev_dbg(mac_cb->dev,
-		"hilink3_mode=%d, hilink4_mode=%d dev_id=%d, phy_if=%d\n",
-		hilink3_mode, hilink4_mode, dev_id, phy_if);
+	mode = dsaf_read_sub(mac_cb->dsaf_dev, reg);
+	if (dsaf_get_bit(mode, mac_cb->port_mode_off))
+		phy_if = PHY_INTERFACE_MODE_XGMII;
+	else
+		phy_if = PHY_INTERFACE_MODE_SGMII;
+
 	return phy_if;
 }
 
+int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
+{
+	if (!mac_cb->cpld_ctrl)
+		return -ENODEV;
+
+	*sfp_prsnt = !dsaf_read_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg
+					+ MAC_SFP_PORT_OFFSET);
+
+	return 0;
+}
+
 /**
  * hns_mac_config_sds_loopback - set loop back for serdes
  * @mac_cb: mac control block
@@ -308,7 +340,14 @@
 				pr_info("no sfp in this eth\n");
 	}
 
-	dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, !!en);
+	if (mac_cb->serdes_ctrl) {
+		u32 origin = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset);
+
+		dsaf_set_field(origin, 1ull << 10, 10, !!en);
+		dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
+	} else {
+		dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, !!en);
+	}
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index 5b7ae5f..8cd151a 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -61,22 +61,10 @@
 	}
 }
 
-static void __iomem *hns_ppe_common_get_ioaddr(
-	struct ppe_common_cb *ppe_common)
+static void __iomem *
+hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
 {
-	void __iomem *base_addr;
-
-	int idx = ppe_common->comm_index;
-
-	if (idx == HNS_DSAF_COMM_SERVICE_NW_IDX)
-		base_addr = ppe_common->dsaf_dev->ppe_base
-			+ PPE_COMMON_REG_OFFSET;
-	else
-		base_addr = ppe_common->dsaf_dev->sds_base
-			+ (idx - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET
-			+ PPE_COMMON_REG_OFFSET;
-
-	return base_addr;
+	return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET;
 }
 
 /**
@@ -90,7 +78,7 @@
 	struct ppe_common_cb *ppe_common;
 	int ppe_num;
 
-	if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+	if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
 		ppe_num = HNS_PPE_SERVICE_NW_ENGINE_NUM;
 	else
 		ppe_num = HNS_PPE_DEBUG_NW_ENGINE_NUM;
@@ -103,7 +91,7 @@
 	ppe_common->ppe_num = ppe_num;
 	ppe_common->dsaf_dev = dsaf_dev;
 	ppe_common->comm_index = comm_index;
-	if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+	if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
 		ppe_common->ppe_mode = PPE_COMMON_MODE_SERVICE;
 	else
 		ppe_common->ppe_mode = PPE_COMMON_MODE_DEBUG;
@@ -124,32 +112,8 @@
 static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
 					int ppe_idx)
 {
-	void __iomem *base_addr;
-	int common_idx = ppe_common->comm_index;
 
-	if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) {
-		base_addr = ppe_common->dsaf_dev->ppe_base +
-			ppe_idx * PPE_REG_OFFSET;
-
-	} else {
-		base_addr = ppe_common->dsaf_dev->sds_base +
-			(common_idx - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET;
-	}
-
-	return base_addr;
-}
-
-static int hns_ppe_get_port(struct ppe_common_cb *ppe_common, int idx)
-{
-	int port;
-
-	if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE)
-		port = idx;
-	else
-		port = HNS_PPE_SERVICE_NW_ENGINE_NUM
-			+ ppe_common->comm_index - 1;
-
-	return port;
+	return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET;
 }
 
 static void hns_ppe_get_cfg(struct ppe_common_cb *ppe_common)
@@ -164,7 +128,6 @@
 		ppe_cb->next = NULL;
 		ppe_cb->ppe_common_cb = ppe_common;
 		ppe_cb->index = i;
-		ppe_cb->port = hns_ppe_get_port(ppe_common, i);
 		ppe_cb->io_base = hns_ppe_get_iobase(ppe_common, i);
 		ppe_cb->virq = 0;
 	}
@@ -318,7 +281,7 @@
 static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
 {
 	struct ppe_common_cb *ppe_common_cb = ppe_cb->ppe_common_cb;
-	u32 port = ppe_cb->port;
+	u32 port = ppe_cb->index;
 	struct dsaf_device *dsaf_dev = ppe_common_cb->dsaf_dev;
 	int i;
 
@@ -332,10 +295,12 @@
 	/* clr and msk except irq*/
 	hns_ppe_exc_irq_en(ppe_cb, 0);
 
-	if (ppe_common_cb->ppe_mode == PPE_COMMON_MODE_DEBUG)
+	if (ppe_common_cb->ppe_mode == PPE_COMMON_MODE_DEBUG) {
 		hns_ppe_set_port_mode(ppe_cb, PPE_MODE_GE);
-	else
+		dsaf_write_dev(ppe_cb, PPE_CFG_PAUSE_IDLE_CNT_REG, 0);
+	} else {
 		hns_ppe_set_port_mode(ppe_cb, PPE_MODE_XGE);
+	}
 
 	hns_ppe_checksum_hw(ppe_cb, 0xffffffff);
 	hns_ppe_cnt_clr_ce(ppe_cb);
@@ -375,7 +340,8 @@
 	u32 i;
 
 	for (i = 0; i < ppe_common->ppe_num; i++) {
-		hns_ppe_uninit_hw(&ppe_common->ppe_cb[i]);
+		if (ppe_common->dsaf_dev->mac_cb[i])
+			hns_ppe_uninit_hw(&ppe_common->ppe_cb[i]);
 		memset(&ppe_common->ppe_cb[i], 0, sizeof(struct hns_ppe_cb));
 	}
 }
@@ -408,8 +374,11 @@
 	if (ret)
 		return;
 
-	for (i = 0; i < ppe_common->ppe_num; i++)
-		hns_ppe_init_hw(&ppe_common->ppe_cb[i]);
+	for (i = 0; i < ppe_common->ppe_num; i++) {
+		/* We only need to initiate ppe when the port exists */
+		if (dsaf_dev->mac_cb[i])
+			hns_ppe_init_hw(&ppe_common->ppe_cb[i]);
+	}
 
 	ret = hns_rcb_common_init_hw(dsaf_dev->rcb_common[ppe_common_index]);
 	if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
index e9c0ec2..9d8e643 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -80,7 +80,6 @@
 	struct hns_ppe_hw_stats hw_stats;
 
 	u8 index;	/* index in a ppe common device */
-	u8 port;			 /* port id in dsaf  */
 	void __iomem *io_base;
 	int virq;
 	u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 1218880..4ef6d23 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -215,9 +215,9 @@
 		dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG,
 			       bd_size_type);
 		dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG,
-			       ring_pair->port_id_in_dsa);
+			       ring_pair->port_id_in_comm);
 		dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG,
-			       ring_pair->port_id_in_dsa);
+			       ring_pair->port_id_in_comm);
 	} else {
 		dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG,
 			       (u32)dma);
@@ -227,9 +227,9 @@
 		dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG,
 			       bd_size_type);
 		dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG,
-			       ring_pair->port_id_in_dsa);
+			       ring_pair->port_id_in_comm);
 		dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG,
-			       ring_pair->port_id_in_dsa);
+			       ring_pair->port_id_in_comm);
 	}
 }
 
@@ -256,55 +256,21 @@
 		       desc_cnt);
 }
 
-/**
- *hns_rcb_set_port_coalesced_frames - set rcb port coalesced frames
- *@rcb_common: rcb_common device
- *@port_idx:port index
- *@coalesced_frames:BD num for coalesced frames
- */
-static int  hns_rcb_set_port_coalesced_frames(struct rcb_common_cb *rcb_common,
-					      u32 port_idx,
-					      u32 coalesced_frames)
+static void hns_rcb_set_port_timeout(
+	struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
 {
-	if (coalesced_frames >= rcb_common->desc_num ||
-	    coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES)
-		return -EINVAL;
-
-	dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4,
-		       coalesced_frames);
-	return 0;
-}
-
-/**
- *hns_rcb_get_port_coalesced_frames - set rcb port coalesced frames
- *@rcb_common: rcb_common device
- *@port_idx:port index
- * return coaleseced frames value
- */
-static u32 hns_rcb_get_port_coalesced_frames(struct rcb_common_cb *rcb_common,
-					     u32 port_idx)
-{
-	if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM)
-		port_idx = 0;
-
-	return dsaf_read_dev(rcb_common,
-			     RCB_CFG_PKTLINE_REG + port_idx * 4);
-}
-
-/**
- *hns_rcb_set_timeout - set rcb port coalesced time_out
- *@rcb_common: rcb_common device
- *@time_out:time for coalesced time_out
- */
-static void hns_rcb_set_timeout(struct rcb_common_cb *rcb_common,
-				u32 timeout)
-{
-	dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG, timeout);
+	if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver))
+		dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG,
+			       timeout * HNS_RCB_CLK_FREQ_MHZ);
+	else
+		dsaf_write_dev(rcb_common,
+			       RCB_PORT_CFG_OVERTIME_REG + port_idx * 4,
+			       timeout);
 }
 
 static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common)
 {
-	if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+	if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
 		return HNS_RCB_SERVICE_NW_ENGINE_NUM;
 	else
 		return HNS_RCB_DEBUG_NW_ENGINE_NUM;
@@ -361,10 +327,11 @@
 
 	for (i = 0; i < port_num; i++) {
 		hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num);
-		(void)hns_rcb_set_port_coalesced_frames(
-			rcb_common, i, rcb_common->coalesced_frames);
+		(void)hns_rcb_set_coalesced_frames(
+			rcb_common, i, HNS_RCB_DEF_COALESCED_FRAMES);
+		hns_rcb_set_port_timeout(
+			rcb_common, i, HNS_RCB_DEF_COALESCED_USECS);
 	}
-	hns_rcb_set_timeout(rcb_common, rcb_common->timeout);
 
 	dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG,
 		       HNS_RCB_COMMON_ENDIAN);
@@ -460,38 +427,23 @@
 	hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING);
 }
 
-static int hns_rcb_get_port(struct rcb_common_cb *rcb_common, int ring_idx)
+static int hns_rcb_get_port_in_comm(
+	struct rcb_common_cb *rcb_common, int ring_idx)
 {
-	int comm_index = rcb_common->comm_index;
-	int port;
-	int q_num;
 
-	if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
-		q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn;
-		port = ring_idx / q_num;
-	} else {
-		port = HNS_RCB_SERVICE_NW_ENGINE_NUM + comm_index - 1;
-	}
-
-	return port;
+	return ring_idx / (rcb_common->max_q_per_vf * rcb_common->max_vfn);
 }
 
 #define SERVICE_RING_IRQ_IDX(v1) \
 	((v1) ? HNS_SERVICE_RING_IRQ_IDX : HNSV2_SERVICE_RING_IRQ_IDX)
-#define DEBUG_RING_IRQ_IDX(v1) \
-	((v1) ? HNS_DEBUG_RING_IRQ_IDX : HNSV2_DEBUG_RING_IRQ_IDX)
-#define DEBUG_RING_IRQ_OFFSET(v1) \
-	((v1) ? HNS_DEBUG_RING_IRQ_OFFSET : HNSV2_DEBUG_RING_IRQ_OFFSET)
 static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common)
 {
-	int comm_index = rcb_common->comm_index;
 	bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver);
 
-	if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+	if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
 		return SERVICE_RING_IRQ_IDX(is_ver1);
 	else
-		return  DEBUG_RING_IRQ_IDX(is_ver1) +
-			(comm_index - 1) * DEBUG_RING_IRQ_OFFSET(is_ver1);
+		return  HNS_DEBUG_RING_IRQ_IDX;
 }
 
 #define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\
@@ -518,7 +470,8 @@
 		ring_pair_cb->index = i;
 		ring_pair_cb->q.io_base =
 			RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i);
-		ring_pair_cb->port_id_in_dsa = hns_rcb_get_port(rcb_common, i);
+		ring_pair_cb->port_id_in_comm =
+			hns_rcb_get_port_in_comm(rcb_common, i);
 		ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] =
 		is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2) :
 			  platform_get_irq(pdev, base_irq_idx + i * 3 + 1);
@@ -534,82 +487,95 @@
 /**
  *hns_rcb_get_coalesced_frames - get rcb port coalesced frames
  *@rcb_common: rcb_common device
- *@comm_index:port index
- *return coalesced_frames
+ *@port_idx:port id in comm
+ *
+ *Returns: coalesced_frames
  */
-u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int port)
+u32 hns_rcb_get_coalesced_frames(
+	struct rcb_common_cb *rcb_common, u32 port_idx)
 {
-	int comm_index =  hns_dsaf_get_comm_idx_by_port(port);
-	struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
-
-	return hns_rcb_get_port_coalesced_frames(rcb_comm, port);
+	return dsaf_read_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4);
 }
 
 /**
  *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out
  *@rcb_common: rcb_common device
- *@comm_index:port index
- *return time_out
+ *@port_idx:port id in comm
+ *
+ *Returns: time_out
  */
-u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index)
+u32 hns_rcb_get_coalesce_usecs(
+	struct rcb_common_cb *rcb_common, u32 port_idx)
 {
-	struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
-
-	return rcb_comm->timeout;
+	if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver))
+		return dsaf_read_dev(rcb_common, RCB_CFG_OVERTIME_REG) /
+		       HNS_RCB_CLK_FREQ_MHZ;
+	else
+		return dsaf_read_dev(rcb_common,
+				     RCB_PORT_CFG_OVERTIME_REG + port_idx * 4);
 }
 
 /**
  *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out
  *@rcb_common: rcb_common device
- *@comm_index: comm :index
- *@etx_usecs:tx time for coalesced time_out
- *@rx_usecs:rx time for coalesced time_out
+ *@port_idx:port id in comm
+ *@timeout:tx/rx time for coalesced time_out
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
  */
-void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev,
-				int port, u32 timeout)
+int hns_rcb_set_coalesce_usecs(
+	struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
 {
-	int comm_index =  hns_dsaf_get_comm_idx_by_port(port);
-	struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
+	u32 old_timeout = hns_rcb_get_coalesce_usecs(rcb_common, port_idx);
 
-	if (rcb_comm->timeout == timeout)
-		return;
+	if (timeout == old_timeout)
+		return 0;
 
-	if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
-		dev_err(dsaf_dev->dev,
-			"error: not support coalesce_usecs setting!\n");
-		return;
+	if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
+		if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) {
+			dev_err(rcb_common->dsaf_dev->dev,
+				"error: not support coalesce_usecs setting!\n");
+			return -EINVAL;
+		}
 	}
-	rcb_comm->timeout = timeout;
-	hns_rcb_set_timeout(rcb_comm, rcb_comm->timeout);
+	if (timeout > HNS_RCB_MAX_COALESCED_USECS) {
+		dev_err(rcb_common->dsaf_dev->dev,
+			"error: not support coalesce %dus!\n", timeout);
+		return -EINVAL;
+	}
+	hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
+	return 0;
 }
 
 /**
  *hns_rcb_set_coalesced_frames - set rcb coalesced frames
  *@rcb_common: rcb_common device
- *@tx_frames:tx BD num for coalesced frames
- *@rx_frames:rx BD num for coalesced frames
- *Return 0 on success, negative on failure
+ *@port_idx:port id in comm
+ *@coalesced_frames:tx/rx BD num for coalesced frames
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
  */
-int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev,
-				 int port, u32 coalesced_frames)
+int hns_rcb_set_coalesced_frames(
+	struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames)
 {
-	int comm_index =  hns_dsaf_get_comm_idx_by_port(port);
-	struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
-	u32 coalesced_reg_val;
-	int ret;
+	u32 old_waterline = hns_rcb_get_coalesced_frames(rcb_common, port_idx);
 
-	coalesced_reg_val = hns_rcb_get_port_coalesced_frames(rcb_comm, port);
-
-	if (coalesced_reg_val == coalesced_frames)
+	if (coalesced_frames == old_waterline)
 		return 0;
 
-	if (coalesced_frames >= HNS_RCB_MIN_COALESCED_FRAMES) {
-		ret = hns_rcb_set_port_coalesced_frames(rcb_comm, port,
-							coalesced_frames);
-		return ret;
-	} else {
+	if (coalesced_frames >= rcb_common->desc_num ||
+	    coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES ||
+	    coalesced_frames < HNS_RCB_MIN_COALESCED_FRAMES) {
+		dev_err(rcb_common->dsaf_dev->dev,
+			"error: not support coalesce_frames setting!\n");
 		return -EINVAL;
 	}
+
+	dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4,
+		       coalesced_frames);
+	return 0;
 }
 
 /**
@@ -619,113 +585,82 @@
  *@max_vfn : max vfn number
  *@max_q_per_vf:max ring number per vm
  */
-void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index,
-			    u16 *max_vfn, u16 *max_q_per_vf)
+void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn,
+			    u16 *max_q_per_vf)
 {
-	if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
-		switch (dsaf_mode) {
-		case DSAF_MODE_DISABLE_6PORT_0VM:
-			*max_vfn = 1;
-			*max_q_per_vf = 16;
-			break;
-		case DSAF_MODE_DISABLE_FIX:
-			*max_vfn = 1;
-			*max_q_per_vf = 1;
-			break;
-		case DSAF_MODE_DISABLE_2PORT_64VM:
-			*max_vfn = 64;
-			*max_q_per_vf = 1;
-			break;
-		case DSAF_MODE_DISABLE_6PORT_16VM:
-			*max_vfn = 16;
-			*max_q_per_vf = 1;
-			break;
-		default:
-			*max_vfn = 1;
-			*max_q_per_vf = 16;
-			break;
-		}
-	} else {
+	switch (dsaf_mode) {
+	case DSAF_MODE_DISABLE_6PORT_0VM:
+		*max_vfn = 1;
+		*max_q_per_vf = 16;
+		break;
+	case DSAF_MODE_DISABLE_FIX:
+	case DSAF_MODE_DISABLE_SP:
 		*max_vfn = 1;
 		*max_q_per_vf = 1;
+		break;
+	case DSAF_MODE_DISABLE_2PORT_64VM:
+		*max_vfn = 64;
+		*max_q_per_vf = 1;
+		break;
+	case DSAF_MODE_DISABLE_6PORT_16VM:
+		*max_vfn = 16;
+		*max_q_per_vf = 1;
+		break;
+	default:
+		*max_vfn = 1;
+		*max_q_per_vf = 16;
+		break;
 	}
 }
 
-int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev, int comm_index)
+int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
 {
-	if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
-		switch (dsaf_dev->dsaf_mode) {
-		case DSAF_MODE_ENABLE_FIX:
-			return 1;
-
-		case DSAF_MODE_DISABLE_FIX:
-			return 6;
-
-		case DSAF_MODE_ENABLE_0VM:
-			return 32;
-
-		case DSAF_MODE_DISABLE_6PORT_0VM:
-		case DSAF_MODE_ENABLE_16VM:
-		case DSAF_MODE_DISABLE_6PORT_2VM:
-		case DSAF_MODE_DISABLE_6PORT_16VM:
-		case DSAF_MODE_DISABLE_6PORT_4VM:
-		case DSAF_MODE_ENABLE_8VM:
-			return 96;
-
-		case DSAF_MODE_DISABLE_2PORT_16VM:
-		case DSAF_MODE_DISABLE_2PORT_8VM:
-		case DSAF_MODE_ENABLE_32VM:
-		case DSAF_MODE_DISABLE_2PORT_64VM:
-		case DSAF_MODE_ENABLE_128VM:
-			return 128;
-
-		default:
-			dev_warn(dsaf_dev->dev,
-				 "get ring num fail,use default!dsaf_mode=%d\n",
-				 dsaf_dev->dsaf_mode);
-			return 128;
-		}
-	} else {
+	switch (dsaf_dev->dsaf_mode) {
+	case DSAF_MODE_ENABLE_FIX:
+	case DSAF_MODE_DISABLE_SP:
 		return 1;
+
+	case DSAF_MODE_DISABLE_FIX:
+		return 6;
+
+	case DSAF_MODE_ENABLE_0VM:
+		return 32;
+
+	case DSAF_MODE_DISABLE_6PORT_0VM:
+	case DSAF_MODE_ENABLE_16VM:
+	case DSAF_MODE_DISABLE_6PORT_2VM:
+	case DSAF_MODE_DISABLE_6PORT_16VM:
+	case DSAF_MODE_DISABLE_6PORT_4VM:
+	case DSAF_MODE_ENABLE_8VM:
+		return 96;
+
+	case DSAF_MODE_DISABLE_2PORT_16VM:
+	case DSAF_MODE_DISABLE_2PORT_8VM:
+	case DSAF_MODE_ENABLE_32VM:
+	case DSAF_MODE_DISABLE_2PORT_64VM:
+	case DSAF_MODE_ENABLE_128VM:
+		return 128;
+
+	default:
+		dev_warn(dsaf_dev->dev,
+			 "get ring num fail,use default!dsaf_mode=%d\n",
+			 dsaf_dev->dsaf_mode);
+		return 128;
 	}
 }
 
-void __iomem *hns_rcb_common_get_vaddr(struct dsaf_device *dsaf_dev,
-				       int comm_index)
+void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
 {
-	void __iomem *base_addr;
+	struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
 
-	if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
-		base_addr = dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET;
-	else
-		base_addr = dsaf_dev->sds_base
-			+ (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET
-			+ RCB_COMMON_REG_OFFSET;
-
-	return base_addr;
+	return dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET;
 }
 
-static phys_addr_t hns_rcb_common_get_paddr(struct dsaf_device *dsaf_dev,
-					    int comm_index)
+static phys_addr_t hns_rcb_common_get_paddr(struct rcb_common_cb *rcb_common)
 {
-	struct device_node *np = dsaf_dev->dev->of_node;
-	phys_addr_t phy_addr;
-	const __be32 *tmp_addr;
-	u64 addr_offset = 0;
-	u64 size = 0;
-	int index = 0;
+	struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
 
-	if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
-		index    = 2;
-		addr_offset = RCB_COMMON_REG_OFFSET;
-	} else {
-		index    = 1;
-		addr_offset = (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET +
-				RCB_COMMON_REG_OFFSET;
-	}
-	tmp_addr  = of_get_address(np, index, &size, NULL);
-	phy_addr  = of_translate_address(np, tmp_addr);
-	return phy_addr + addr_offset;
+	return dsaf_dev->ppe_paddr + RCB_COMMON_REG_OFFSET;
 }
 
 int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
@@ -735,7 +670,7 @@
 	enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode;
 	u16 max_vfn;
 	u16 max_q_per_vf;
-	int ring_num = hns_rcb_get_ring_num(dsaf_dev, comm_index);
+	int ring_num = hns_rcb_get_ring_num(dsaf_dev);
 
 	rcb_common =
 		devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) +
@@ -749,15 +684,13 @@
 	rcb_common->dsaf_dev = dsaf_dev;
 
 	rcb_common->desc_num = dsaf_dev->desc_num;
-	rcb_common->coalesced_frames = HNS_RCB_DEF_COALESCED_FRAMES;
-	rcb_common->timeout = HNS_RCB_MAX_TIME_OUT;
 
-	hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf);
+	hns_rcb_get_queue_mode(dsaf_mode, &max_vfn, &max_q_per_vf);
 	rcb_common->max_vfn = max_vfn;
 	rcb_common->max_q_per_vf = max_q_per_vf;
 
-	rcb_common->io_base = hns_rcb_common_get_vaddr(dsaf_dev, comm_index);
-	rcb_common->phy_base = hns_rcb_common_get_paddr(dsaf_dev, comm_index);
+	rcb_common->io_base = hns_rcb_common_get_vaddr(rcb_common);
+	rcb_common->phy_base = hns_rcb_common_get_paddr(rcb_common);
 
 	dsaf_dev->rcb_common[comm_index] = rcb_common;
 	return 0;
@@ -951,6 +884,10 @@
 void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
 {
 	u32 *regs = data;
+	bool is_ver1 = AE_IS_VER1(rcb_com->dsaf_dev->dsaf_ver);
+	bool is_dbg = HNS_DSAF_IS_DEBUG(rcb_com->dsaf_dev);
+	u32 reg_tmp;
+	u32 reg_num_tmp;
 	u32 i = 0;
 
 	/*rcb common registers */
@@ -1004,12 +941,16 @@
 			= dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i);
 	}
 
-	regs[70] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_REG);
-	regs[71] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG);
-	regs[72] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG);
+	reg_tmp = is_ver1 ? RCB_CFG_OVERTIME_REG : RCB_PORT_CFG_OVERTIME_REG;
+	reg_num_tmp = (is_ver1 || is_dbg) ? 1 : 6;
+	for (i = 0; i < reg_num_tmp; i++)
+		regs[70 + i] = dsaf_read_dev(rcb_com, reg_tmp);
+
+	regs[76] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG);
+	regs[77] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG);
 
 	/* mark end of rcb common regs */
-	for (i = 73; i < 80; i++)
+	for (i = 78; i < 80; i++)
 		regs[i] = 0xcccccccc;
 }
 
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index 81fe9f8..bd54dac 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -38,7 +38,9 @@
 #define HNS_RCB_MAX_COALESCED_FRAMES		1023
 #define HNS_RCB_MIN_COALESCED_FRAMES		1
 #define HNS_RCB_DEF_COALESCED_FRAMES		50
-#define HNS_RCB_MAX_TIME_OUT			0x500
+#define HNS_RCB_CLK_FREQ_MHZ			350
+#define HNS_RCB_MAX_COALESCED_USECS		0x3ff
+#define HNS_RCB_DEF_COALESCED_USECS		3
 
 #define HNS_RCB_COMMON_ENDIAN			1
 
@@ -82,7 +84,7 @@
 
 	int virq[HNS_RCB_IRQ_NUM_PER_QUEUE];
 
-	u8 port_id_in_dsa;
+	u8 port_id_in_comm;
 	u8 used_by_vf;
 
 	struct hns_ring_hw_stats hw_stats;
@@ -97,8 +99,6 @@
 
 	u8 comm_index;
 	u32 ring_num;
-	u32 coalesced_frames; /* frames  threshold of  rx interrupt   */
-	u32 timeout; /* time threshold of  rx interrupt  */
 	u32 desc_num; /*  desc num per queue*/
 
 	struct ring_pair_cb ring_pair_cb[0];
@@ -111,7 +111,7 @@
 int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common);
 void hns_rcb_start(struct hnae_queue *q, u32 val);
 void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common);
-void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index,
+void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode,
 			    u16 *max_vfn, u16 *max_q_per_vf);
 
 void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common);
@@ -125,13 +125,14 @@
 void hns_rcb_init_hw(struct ring_pair_cb *ring);
 void hns_rcb_reset_ring_hw(struct hnae_queue *q);
 void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
-
-u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int comm_index);
-u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index);
-void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev,
-				int comm_index, u32 timeout);
-int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev,
-				 int comm_index, u32 coalesce_frames);
+u32 hns_rcb_get_coalesced_frames(
+	struct rcb_common_cb *rcb_common, u32 port_idx);
+u32 hns_rcb_get_coalesce_usecs(
+	struct rcb_common_cb *rcb_common, u32 port_idx);
+int hns_rcb_set_coalesce_usecs(
+	struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout);
+int hns_rcb_set_coalesced_frames(
+	struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames);
 void hns_rcb_update_stats(struct hnae_queue *queue);
 
 void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index bf62687..7c3b510 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -10,25 +10,20 @@
 #ifndef _DSAF_REG_H_
 #define _DSAF_REG_H_
 
-#define HNS_DEBUG_RING_IRQ_IDX 55
-#define HNS_SERVICE_RING_IRQ_IDX 59
-#define HNS_DEBUG_RING_IRQ_OFFSET 2
-#define HNSV2_DEBUG_RING_IRQ_IDX 409
-#define HNSV2_SERVICE_RING_IRQ_IDX 25
-#define HNSV2_DEBUG_RING_IRQ_OFFSET 9
+#include <linux/regmap.h>
+#define HNS_DEBUG_RING_IRQ_IDX		0
+#define HNS_SERVICE_RING_IRQ_IDX	59
+#define HNSV2_SERVICE_RING_IRQ_IDX	25
 
-#define DSAF_MAX_PORT_NUM_PER_CHIP 8
-#define DSAF_SERVICE_PORT_NUM_PER_DSAF 6
-#define DSAF_MAX_VM_NUM 128
+#define DSAF_MAX_PORT_NUM	6
+#define DSAF_MAX_VM_NUM		128
 
-#define DSAF_COMM_DEV_NUM 3
-#define DSAF_PPE_INODE_BASE 6
-#define HNS_DSAF_COMM_SERVICE_NW_IDX 0
+#define DSAF_COMM_DEV_NUM	1
+#define DSAF_PPE_INODE_BASE	6
 #define DSAF_DEBUG_NW_NUM	2
 #define DSAF_SERVICE_NW_NUM	6
 #define DSAF_COMM_CHN		DSAF_SERVICE_NW_NUM
 #define DSAF_GE_NUM		((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM))
-#define DSAF_PORT_NUM		((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM))
 #define DSAF_XGE_NUM		DSAF_SERVICE_NW_NUM
 #define DSAF_PORT_TYPE_NUM 3
 #define DSAF_NODE_NUM		18
@@ -103,6 +98,8 @@
 /*serdes offset**/
 #define HNS_MAC_HILINK3_REG DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG
 #define HNS_MAC_HILINK4_REG DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG
+#define HNS_MAC_HILINK3V2_REG DSAF_SUB_SC_HILINK3_CRG_CTRL1_REG
+#define HNS_MAC_HILINK4V2_REG DSAF_SUB_SC_HILINK4_CRG_CTRL1_REG
 #define HNS_MAC_LANE0_CTLEDFE_REG 0x000BFFCCULL
 #define HNS_MAC_LANE1_CTLEDFE_REG 0x000BFFBCULL
 #define HNS_MAC_LANE2_CTLEDFE_REG 0x000BFFACULL
@@ -135,6 +132,7 @@
 #define DSAF_PPE_INT_STS_0_REG		0x1E0
 #define DSAF_ROCEE_INT_STS_0_REG	0x200
 #define DSAFV2_SERDES_LBK_0_REG         0x220
+#define DSAF_PAUSE_CFG_REG		0x240
 #define DSAF_PPE_QID_CFG_0_REG		0x300
 #define DSAF_SW_PORT_TYPE_0_REG		0x320
 #define DSAF_STP_PORT_TYPE_0_REG	0x340
@@ -153,6 +151,7 @@
 #define DSAF_INODE_FINAL_IN_PKT_NUM_0_REG	0x1030
 #define DSAF_INODE_SBM_PID_NUM_0_REG		0x1038
 #define DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG	0x103C
+#define DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG	0x1024
 #define DSAF_INODE_SBM_RELS_NUM_0_REG		0x104C
 #define DSAF_INODE_SBM_DROP_NUM_0_REG		0x1050
 #define DSAF_INODE_CRC_FALSE_NUM_0_REG		0x1054
@@ -404,6 +403,7 @@
 #define RCB_CFG_OVERTIME_REG			0x9300
 #define RCB_CFG_PKTLINE_INT_NUM_REG		0x9304
 #define RCB_CFG_OVERTIME_INT_NUM_REG		0x9308
+#define RCB_PORT_CFG_OVERTIME_REG		0x9430
 
 #define RCB_RING_RX_RING_BASEADDR_L_REG		0x00000
 #define RCB_RING_RX_RING_BASEADDR_H_REG		0x00004
@@ -708,6 +708,10 @@
 #define DSAF_PFC_UNINT_CNT_M ((1ULL << 9) - 1)
 #define DSAF_PFC_UNINT_CNT_S 0
 
+#define DSAF_MAC_PAUSE_RX_EN_B 2
+#define DSAF_PFC_PAUSE_RX_EN_B 1
+#define DSAF_PFC_PAUSE_TX_EN_B 0
+
 #define DSAF_PPE_QID_CFG_M 0xFF
 #define DSAF_PPE_QID_CFG_S 0
 
@@ -985,6 +989,19 @@
 	return readl(reg_addr + reg);
 }
 
+static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value)
+{
+	regmap_write(base, reg, value);
+}
+
+static inline u32 dsaf_read_syscon(struct regmap *base, u32 reg)
+{
+	unsigned int val;
+
+	regmap_read(base, reg, &val);
+	return val;
+}
+
 #define dsaf_read_dev(a, reg) \
 	dsaf_read_reg((a)->io_base, (reg))
 
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 71aa37b..e621636 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -913,10 +913,7 @@
 static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
 {
 	struct hnae_ring *ring = ring_data->ring;
-	int head = ring->next_to_clean;
-
-	/* for hardware bug fixed */
-	head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+	int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
 
 	if (head != ring->next_to_clean) {
 		ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
@@ -959,8 +956,8 @@
 		napi_complete(napi);
 		ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
 			ring_data->ring, 0);
-
-		ring_data->fini_process(ring_data);
+		if (ring_data->fini_process)
+			ring_data->fini_process(ring_data);
 		return 0;
 	}
 
@@ -1278,7 +1275,7 @@
 {
 	struct hns_nic_priv *priv = netdev_priv(netdev);
 
-	priv->netdev->trans_start = jiffies;
+	netif_trans_update(priv->netdev);
 	while (test_and_set_bit(NIC_STATE_REINITING, &priv->state))
 		usleep_range(1000, 2000);
 
@@ -1379,7 +1376,7 @@
 	ret = hns_nic_net_xmit_hw(ndev, skb,
 				  &tx_ring_data(priv, skb->queue_mapping));
 	if (ret == NETDEV_TX_OK) {
-		ndev->trans_start = jiffies;
+		netif_trans_update(ndev);
 		ndev->stats.tx_bytes += skb->len;
 		ndev->stats.tx_packets++;
 	}
@@ -1651,7 +1648,7 @@
 
 	rtnl_lock();
 	/* put off any impending NetWatchDogTimeout */
-	priv->netdev->trans_start = jiffies;
+	netif_trans_update(priv->netdev);
 
 	if (type == HNAE_PORT_DEBUG) {
 		hns_nic_net_reinit(priv->netdev);
@@ -1723,6 +1720,7 @@
 {
 	struct hnae_handle *h = priv->ae_handle;
 	struct hns_nic_ring_data *rd;
+	bool is_ver1 = AE_IS_VER1(priv->enet_ver);
 	int i;
 
 	if (h->q_num > NIC_MAX_Q_PER_VF) {
@@ -1740,7 +1738,7 @@
 		rd->queue_index = i;
 		rd->ring = &h->qs[i]->tx_ring;
 		rd->poll_one = hns_nic_tx_poll_one;
-		rd->fini_process = hns_nic_tx_fini_pro;
+		rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro : NULL;
 
 		netif_napi_add(priv->netdev, &rd->napi,
 			       hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
@@ -1752,7 +1750,7 @@
 		rd->ring = &h->qs[i - h->q_num]->rx_ring;
 		rd->poll_one = hns_nic_rx_poll_one;
 		rd->ex_process = hns_nic_rx_up_pro;
-		rd->fini_process = hns_nic_rx_fini_pro;
+		rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro : NULL;
 
 		netif_napi_add(priv->netdev, &rd->napi,
 			       hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
@@ -1816,7 +1814,7 @@
 	h = hnae_get_handle(&priv->netdev->dev,
 			    priv->ae_node, priv->port_id, NULL);
 	if (IS_ERR_OR_NULL(h)) {
-		ret = PTR_ERR(h);
+		ret = -ENODEV;
 		dev_dbg(priv->dev, "has not handle, register notifier!\n");
 		goto out;
 	}
@@ -1875,6 +1873,7 @@
 	struct net_device *ndev;
 	struct hns_nic_priv *priv;
 	struct device_node *node = dev->of_node;
+	u32 port_id;
 	int ret;
 
 	ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF);
@@ -1898,10 +1897,18 @@
 		dev_err(dev, "not find ae-handle\n");
 		goto out_read_prop_fail;
 	}
-
-	ret = of_property_read_u32(node, "port-id", &priv->port_id);
-	if (ret)
-		goto out_read_prop_fail;
+	/* try to find port-idx-in-ae first */
+	ret = of_property_read_u32(node, "port-idx-in-ae", &port_id);
+	if (ret) {
+		/* only for old code compatible */
+		ret = of_property_read_u32(node, "port-id", &port_id);
+		if (ret)
+			goto out_read_prop_fail;
+		/* for old dts, we need to caculate the port offset */
+		port_id = port_id < HNS_SRV_OFFSET ? port_id + HNS_DEBUG_OFFSET
+			: port_id - HNS_SRV_OFFSET;
+	}
+	priv->port_id = port_id;
 
 	hns_init_mac_addr(ndev);
 
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
index c68ab3d..337efa5 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
@@ -18,6 +18,9 @@
 
 #include "hnae.h"
 
+#define HNS_DEBUG_OFFSET	6
+#define HNS_SRV_OFFSET		2
+
 enum hns_nic_state {
 	NIC_STATE_TESTING = 0,
 	NIC_STATE_RESETTING,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 9c3ba65..3d746c8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -794,8 +794,10 @@
 	    (!ops->set_coalesce_frames))
 		return -ESRCH;
 
-	ops->set_coalesce_usecs(priv->ae_handle,
-					ec->rx_coalesce_usecs);
+	ret = ops->set_coalesce_usecs(priv->ae_handle,
+				      ec->rx_coalesce_usecs);
+	if (ret)
+		return ret;
 
 	ret = ops->set_coalesce_frames(
 		priv->ae_handle,
@@ -1013,8 +1015,8 @@
 	struct phy_device *phy_dev = priv->phy;
 
 	retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED);
-	retval = phy_write(phy_dev, HNS_LED_FC_REG, value);
-	retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER);
+	retval |= phy_write(phy_dev, HNS_LED_FC_REG, value);
+	retval |= phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER);
 	if (retval) {
 		netdev_err(netdev, "mdiobus_write fail !\n");
 		return retval;
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index 3daf2d4..631dbc7 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -1102,7 +1102,7 @@
 		return -EAGAIN;
 	}
 
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	netif_start_queue(dev);
 
 	lp->lan_type = hp100_sense_lan(dev);
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index 7ce6379..befb4ac 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -1042,7 +1042,7 @@
 		lp->last_restart = dev->stats.tx_packets;
 	}
 
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	netif_wake_queue (dev);
 }
 
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index c984998..3dbc53c2 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -960,7 +960,7 @@
 		lp->last_restart = dev->stats.tx_packets;
 	}
 
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	netif_wake_queue (dev);
 }
 
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index 353f57f6..21c84cc 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -983,7 +983,7 @@
 		p->scb->cmd_cuc = CUC_START;
 		sun3_attn586();
 		WAIT_4_SCB_CMD();
-		dev->trans_start = jiffies; /* prevent tx timeout */
+		netif_trans_update(dev); /* prevent tx timeout */
 		return 0;
 	}
 #endif
@@ -996,7 +996,7 @@
 		sun3_82586_close(dev);
 		sun3_82586_open(dev);
 	}
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 }
 
 /******************************************************
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 5d7db6c..4c9771d 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -301,7 +301,7 @@
 	dev->no_mcast = 1;
 	netif_addr_unlock(dev->ndev);
 	netif_tx_unlock_bh(dev->ndev);
-	dev->ndev->trans_start = jiffies;	/* prevent tx timeout */
+	netif_trans_update(dev->ndev);	/* prevent tx timeout */
 	mal_poll_disable(dev->mal, &dev->commac);
 	netif_tx_disable(dev->ndev);
 }
@@ -1377,7 +1377,7 @@
 		DBG2(dev, "stopped TX queue" NL);
 	}
 
-	ndev->trans_start = jiffies;
+	netif_trans_update(ndev);
 	++dev->stats.tx_packets;
 	dev->stats.tx_bytes += len;
 
diff --git a/drivers/net/ethernet/ibm/emac/phy.c b/drivers/net/ethernet/ibm/emac/phy.c
index d3b9d10..5b88cc6 100644
--- a/drivers/net/ethernet/ibm/emac/phy.c
+++ b/drivers/net/ethernet/ibm/emac/phy.c
@@ -470,12 +470,38 @@
 	.ops		= &m88e1112_phy_ops,
 };
 
+static int ar8035_init(struct mii_phy *phy)
+{
+	phy_write(phy, 0x1d, 0x5); /* Address debug register 5 */
+	phy_write(phy, 0x1e, 0x2d47); /* Value copied from u-boot */
+	phy_write(phy, 0x1d, 0xb);    /* Address hib ctrl */
+	phy_write(phy, 0x1e, 0xbc20); /* Value copied from u-boot */
+
+	return 0;
+}
+
+static struct mii_phy_ops ar8035_phy_ops = {
+	.init		= ar8035_init,
+	.setup_aneg	= genmii_setup_aneg,
+	.setup_forced	= genmii_setup_forced,
+	.poll_link	= genmii_poll_link,
+	.read_link	= genmii_read_link,
+};
+
+static struct mii_phy_def ar8035_phy_def = {
+	.phy_id		= 0x004dd070,
+	.phy_id_mask	= 0xfffffff0,
+	.name		= "Atheros 8035 Gigabit Ethernet",
+	.ops		= &ar8035_phy_ops,
+};
+
 static struct mii_phy_def *mii_phy_table[] = {
 	&et1011c_phy_def,
 	&cis8201_phy_def,
 	&bcm5248_phy_def,
 	&m88e1111_phy_def,
 	&m88e1112_phy_def,
+	&ar8035_phy_def,
 	&genmii_phy_def,
 	NULL
 };
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 6e9e16ee..864cb21 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -61,6 +61,7 @@
 #include <linux/proc_fs.h>
 #include <linux/in.h>
 #include <linux/ip.h>
+#include <linux/ipv6.h>
 #include <linux/irq.h>
 #include <linux/kthread.h>
 #include <linux/seq_file.h>
@@ -94,6 +95,7 @@
 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
 		       union sub_crq *sub_crq);
+static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
 static int enable_scrq_irq(struct ibmvnic_adapter *,
 			   struct ibmvnic_sub_crq_queue *);
@@ -561,10 +563,141 @@
 	return 0;
 }
 
+/**
+ * build_hdr_data - creates L2/L3/L4 header data buffer
+ * @hdr_field - bitfield determining needed headers
+ * @skb - socket buffer
+ * @hdr_len - array of header lengths
+ * @tot_len - total length of data
+ *
+ * Reads hdr_field to determine which headers are needed by firmware.
+ * Builds a buffer containing these headers.  Saves individual header
+ * lengths and total buffer length to be used to build descriptors.
+ */
+static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
+			  int *hdr_len, u8 *hdr_data)
+{
+	int len = 0;
+	u8 *hdr;
+
+	hdr_len[0] = sizeof(struct ethhdr);
+
+	if (skb->protocol == htons(ETH_P_IP)) {
+		hdr_len[1] = ip_hdr(skb)->ihl * 4;
+		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+			hdr_len[2] = tcp_hdrlen(skb);
+		else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
+			hdr_len[2] = sizeof(struct udphdr);
+	} else if (skb->protocol == htons(ETH_P_IPV6)) {
+		hdr_len[1] = sizeof(struct ipv6hdr);
+		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+			hdr_len[2] = tcp_hdrlen(skb);
+		else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
+			hdr_len[2] = sizeof(struct udphdr);
+	}
+
+	memset(hdr_data, 0, 120);
+	if ((hdr_field >> 6) & 1) {
+		hdr = skb_mac_header(skb);
+		memcpy(hdr_data, hdr, hdr_len[0]);
+		len += hdr_len[0];
+	}
+
+	if ((hdr_field >> 5) & 1) {
+		hdr = skb_network_header(skb);
+		memcpy(hdr_data + len, hdr, hdr_len[1]);
+		len += hdr_len[1];
+	}
+
+	if ((hdr_field >> 4) & 1) {
+		hdr = skb_transport_header(skb);
+		memcpy(hdr_data + len, hdr, hdr_len[2]);
+		len += hdr_len[2];
+	}
+	return len;
+}
+
+/**
+ * create_hdr_descs - create header and header extension descriptors
+ * @hdr_field - bitfield determining needed headers
+ * @data - buffer containing header data
+ * @len - length of data buffer
+ * @hdr_len - array of individual header lengths
+ * @scrq_arr - descriptor array
+ *
+ * Creates header and, if needed, header extension descriptors and
+ * places them in a descriptor array, scrq_arr
+ */
+
+static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
+			     union sub_crq *scrq_arr)
+{
+	union sub_crq hdr_desc;
+	int tmp_len = len;
+	u8 *data, *cur;
+	int tmp;
+
+	while (tmp_len > 0) {
+		cur = hdr_data + len - tmp_len;
+
+		memset(&hdr_desc, 0, sizeof(hdr_desc));
+		if (cur != hdr_data) {
+			data = hdr_desc.hdr_ext.data;
+			tmp = tmp_len > 29 ? 29 : tmp_len;
+			hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
+			hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
+			hdr_desc.hdr_ext.len = tmp;
+		} else {
+			data = hdr_desc.hdr.data;
+			tmp = tmp_len > 24 ? 24 : tmp_len;
+			hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
+			hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
+			hdr_desc.hdr.len = tmp;
+			hdr_desc.hdr.l2_len = (u8)hdr_len[0];
+			hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
+			hdr_desc.hdr.l4_len = (u8)hdr_len[2];
+			hdr_desc.hdr.flag = hdr_field << 1;
+		}
+		memcpy(data, cur, tmp);
+		tmp_len -= tmp;
+		*scrq_arr = hdr_desc;
+		scrq_arr++;
+	}
+}
+
+/**
+ * build_hdr_descs_arr - build a header descriptor array
+ * @skb - socket buffer
+ * @num_entries - number of descriptors to be sent
+ * @subcrq - first TX descriptor
+ * @hdr_field - bit field determining which headers will be sent
+ *
+ * This function will build a TX descriptor array with applicable
+ * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
+ */
+
+static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
+				int *num_entries, u8 hdr_field)
+{
+	int hdr_len[3] = {0, 0, 0};
+	int tot_len, len;
+	u8 *hdr_data = txbuff->hdr_data;
+
+	tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
+				 txbuff->hdr_data);
+	len = tot_len;
+	len -= 24;
+	if (len > 0)
+		num_entries += len % 29 ? len / 29 + 1 : len / 29;
+	create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
+			 txbuff->indir_arr + 1);
+}
+
 static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
 {
 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 	int queue_num = skb_get_queue_mapping(skb);
+	u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
 	struct device *dev = &adapter->vdev->dev;
 	struct ibmvnic_tx_buff *tx_buff = NULL;
 	struct ibmvnic_tx_pool *tx_pool;
@@ -579,6 +712,7 @@
 	unsigned long lpar_rc;
 	union sub_crq tx_crq;
 	unsigned int offset;
+	int num_entries = 1;
 	unsigned char *dst;
 	u64 *handle_array;
 	int index = 0;
@@ -644,11 +778,35 @@
 			tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
 	}
 
-	if (skb->ip_summed == CHECKSUM_PARTIAL)
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 		tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
-
-	lpar_rc = send_subcrq(adapter, handle_array[0], &tx_crq);
-
+		hdrs += 2;
+	}
+	/* determine if l2/3/4 headers are sent to firmware */
+	if ((*hdrs >> 7) & 1 &&
+	    (skb->protocol == htons(ETH_P_IP) ||
+	     skb->protocol == htons(ETH_P_IPV6))) {
+		build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
+		tx_crq.v1.n_crq_elem = num_entries;
+		tx_buff->indir_arr[0] = tx_crq;
+		tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
+						    sizeof(tx_buff->indir_arr),
+						    DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, tx_buff->indir_dma)) {
+			if (!firmware_has_feature(FW_FEATURE_CMO))
+				dev_err(dev, "tx: unable to map descriptor array\n");
+			tx_map_failed++;
+			tx_dropped++;
+			ret = NETDEV_TX_BUSY;
+			goto out;
+		}
+		lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
+					       (u64)tx_buff->indir_dma,
+					       (u64)num_entries);
+	} else {
+		lpar_rc = send_subcrq(adapter, handle_array[queue_num],
+				      &tx_crq);
+	}
 	if (lpar_rc != H_SUCCESS) {
 		dev_err(dev, "tx failed with code %ld\n", lpar_rc);
 
@@ -832,7 +990,7 @@
 		netdev->stats.rx_bytes += length;
 		frames_processed++;
 	}
-	replenish_pools(adapter);
+	replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
 
 	if (frames_processed < budget) {
 		enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
@@ -1159,6 +1317,7 @@
 	union sub_crq *next;
 	int index;
 	int i, j;
+	u8 first;
 
 restart_loop:
 	while (pending_scrq(adapter, scrq)) {
@@ -1181,6 +1340,13 @@
 				txbuff->data_dma[j] = 0;
 				txbuff->used_bounce = false;
 			}
+			/* if sub_crq was sent indirectly */
+			first = txbuff->indir_arr[0].generic.first;
+			if (first == IBMVNIC_CRQ_CMD) {
+				dma_unmap_single(dev, txbuff->indir_dma,
+						 sizeof(txbuff->indir_arr),
+						 DMA_TO_DEVICE);
+			}
 
 			if (txbuff->last_frag)
 				dev_kfree_skb_any(txbuff->skb);
@@ -1261,9 +1427,9 @@
 		    entries_page : adapter->max_rx_add_entries_per_subcrq;
 
 		/* Choosing the maximum number of queues supported by firmware*/
-		adapter->req_tx_queues = adapter->min_tx_queues;
-		adapter->req_rx_queues = adapter->min_rx_queues;
-		adapter->req_rx_add_queues = adapter->min_rx_add_queues;
+		adapter->req_tx_queues = adapter->max_tx_queues;
+		adapter->req_rx_queues = adapter->max_rx_queues;
+		adapter->req_rx_add_queues = adapter->max_rx_add_queues;
 
 		adapter->req_mtu = adapter->max_mtu;
 	}
@@ -1494,6 +1660,28 @@
 	return rc;
 }
 
+static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
+				u64 remote_handle, u64 ioba, u64 num_entries)
+{
+	unsigned int ua = adapter->vdev->unit_address;
+	struct device *dev = &adapter->vdev->dev;
+	int rc;
+
+	/* Make sure the hypervisor sees the complete request */
+	mb();
+	rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
+				cpu_to_be64(remote_handle),
+				ioba, num_entries);
+
+	if (rc) {
+		if (rc == H_CLOSED)
+			dev_warn(dev, "CRQ Queue closed\n");
+		dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
+	}
+
+	return rc;
+}
+
 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
 			    union ibmvnic_crq *crq)
 {
@@ -1589,13 +1777,11 @@
 		goto buf_map_failed;
 	}
 
-	rsp_buffer_size =
-	    sizeof(struct ibmvnic_login_rsp_buffer) +
-	    sizeof(u64) * (adapter->req_tx_queues +
-			   adapter->req_rx_queues *
-			   adapter->req_rx_add_queues + adapter->
-			   req_rx_add_queues) +
-	    sizeof(u8) * (IBMVNIC_TX_DESC_VERSIONS);
+	rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
+			  sizeof(u64) * adapter->req_tx_queues +
+			  sizeof(u64) * adapter->req_rx_queues +
+			  sizeof(u64) * adapter->req_rx_queues +
+			  sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
 
 	login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
 	if (!login_rsp_buffer)
@@ -1918,6 +2104,10 @@
 	if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
 		adapter->netdev->features |= NETIF_F_IPV6_CSUM;
 
+	if ((adapter->netdev->features &
+	    (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
+		adapter->netdev->features |= NETIF_F_RXCSUM;
+
 	memset(&crq, 0, sizeof(crq));
 	crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
 	crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
@@ -2210,6 +2400,16 @@
 	dma_unmap_single(dev, adapter->login_rsp_buf_token,
 			 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
 
+	/* If the number of queues requested can't be allocated by the
+	 * server, the login response will return with code 1. We will need
+	 * to resend the login buffer with fewer queues requested.
+	 */
+	if (login_rsp_crq->generic.rc.code) {
+		adapter->renegotiate = true;
+		complete(&adapter->init_done);
+		return 0;
+	}
+
 	netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
 	for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
 		netdev_dbg(adapter->netdev, "%016lx\n",
@@ -3437,14 +3637,21 @@
 	init_completion(&adapter->init_done);
 	wait_for_completion(&adapter->init_done);
 
-	/* needed to pull init_sub_crqs outside of an interrupt context
-	 * because it creates IRQ mappings for the subCRQ queues, causing
-	 * a kernel warning
-	 */
-	init_sub_crqs(adapter, 0);
+	do {
+		adapter->renegotiate = false;
 
-	reinit_completion(&adapter->init_done);
-	wait_for_completion(&adapter->init_done);
+		init_sub_crqs(adapter, 0);
+		reinit_completion(&adapter->init_done);
+		wait_for_completion(&adapter->init_done);
+
+		if (adapter->renegotiate) {
+			release_sub_crqs(adapter);
+			send_cap_queries(adapter);
+
+			reinit_completion(&adapter->init_done);
+			wait_for_completion(&adapter->init_done);
+		}
+	} while (adapter->renegotiate);
 
 	/* if init_sub_crqs is partially successful, retry */
 	while (!adapter->tx_scrq || !adapter->rx_scrq) {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 1a9993c..0b66a50 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -879,6 +879,9 @@
 	int pool_index;
 	bool last_frag;
 	bool used_bounce;
+	union sub_crq indir_arr[6];
+	u8 hdr_data[140];
+	dma_addr_t indir_dma;
 };
 
 struct ibmvnic_tx_pool {
@@ -977,6 +980,7 @@
 	struct ibmvnic_sub_crq_queue **tx_scrq;
 	struct ibmvnic_sub_crq_queue **rx_scrq;
 	int requested_caps;
+	bool renegotiate;
 
 	/* rx structs */
 	struct napi_struct *napi;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 3772f3a..714bd10 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -25,16 +25,13 @@
 	  on the adapter. Look for a label that has a barcode and a number
 	  in the format 123456-001 (six digits hyphen three digits).
 
-	  Use the above information and the Adapter & Driver ID Guide at:
+	  Use the above information and the Adapter & Driver ID Guide that
+	  can be located at:
 
-	  <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+	  <http://support.intel.com>
 
 	  to identify the adapter.
 
-	  For the latest Intel PRO/100 network driver for Linux, see:
-
-	  <http://www.intel.com/p/en_US/support/highlights/network/pro100plus>
-
 	  More specific information on configuring the driver is in
 	  <file:Documentation/networking/e100.txt>.
 
@@ -47,12 +44,7 @@
 	---help---
 	  This driver supports Intel(R) PRO/1000 gigabit ethernet family of
 	  adapters.  For more information on how to identify your adapter, go
-	  to the Adapter & Driver ID Guide at:
-
-	  <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
-	  For general information and support, go to the Intel support
-	  website at:
+	  to the Adapter & Driver ID Guide that can be located at:
 
 	  <http://support.intel.com>
 
@@ -71,12 +63,8 @@
 	  This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
 	  ethernet family of adapters. For PCI or PCI-X e1000 adapters,
 	  use the regular e1000 driver For more information on how to
-	  identify your adapter, go to the Adapter & Driver ID Guide at:
-
-	  <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
-	  For general information and support, go to the Intel support
-	  website at:
+	  identify your adapter, go to the Adapter & Driver ID Guide that
+	  can be located at:
 
 	  <http://support.intel.com>
 
@@ -101,12 +89,7 @@
 	---help---
 	  This driver supports Intel(R) 82575/82576 gigabit ethernet family of
 	  adapters.  For more information on how to identify your adapter, go
-	  to the Adapter & Driver ID Guide at:
-
-	  <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
-	  For general information and support, go to the Intel support
-	  website at:
+	  to the Adapter & Driver ID Guide that can be located at:
 
 	  <http://support.intel.com>
 
@@ -142,12 +125,7 @@
 	---help---
 	  This driver supports Intel(R) 82576 virtual functions.  For more
 	  information on how to identify your adapter, go to the Adapter &
-	  Driver ID Guide at:
-
-	  <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
-	  For general information and support, go to the Intel support
-	  website at:
+	  Driver ID Guide that can be located at:
 
 	  <http://support.intel.com>
 
@@ -164,12 +142,7 @@
 	  This driver supports Intel(R) PRO/10GbE family of adapters for
 	  PCI-X type cards. For PCI-E type cards, use the "ixgbe" driver
 	  instead. For more information on how to identify your adapter, go
-	  to the Adapter & Driver ID Guide at:
-
-	  <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
-	  For general information and support, go to the Intel support
-	  website at:
+	  to the Adapter & Driver ID Guide that can be located at:
 
 	  <http://support.intel.com>
 
@@ -187,12 +160,7 @@
 	---help---
 	  This driver supports Intel(R) 10GbE PCI Express family of
 	  adapters.  For more information on how to identify your adapter, go
-	  to the Adapter & Driver ID Guide at:
-
-	  <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
-	  For general information and support, go to the Intel support
-	  website at:
+	  to the Adapter & Driver ID Guide that can be located at:
 
 	  <http://support.intel.com>
 
@@ -243,12 +211,7 @@
 	---help---
 	  This driver supports Intel(R) PCI Express virtual functions for the
 	  Intel(R) ixgbe driver.  For more information on how to identify your
-	  adapter, go to the Adapter & Driver ID Guide at:
-
-	  <http://support.intel.com/support/network/sb/CS-008441.htm>
-
-	  For general information and support, go to the Intel support
-	  website at:
+	  adapter, go to the Adapter & Driver ID Guide that can be located at:
 
 	  <http://support.intel.com>
 
@@ -266,12 +229,7 @@
 	---help---
 	  This driver supports Intel(R) Ethernet Controller XL710 Family of
 	  devices.  For more information on how to identify your adapter, go
-	  to the Adapter & Driver ID Guide at:
-
-	  <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
-	  For general information and support, go to the Intel support
-	  website at:
+	  to the Adapter & Driver ID Guide that can be located at:
 
 	  <http://support.intel.com>
 
@@ -326,12 +284,7 @@
 	---help---
 	  This driver supports Intel(R) XL710 and X710 virtual functions.
 	  For more information on how to identify your adapter, go to the
-	  Adapter & Driver ID Guide at:
-
-	  <http://support.intel.com/support/network/sb/CS-008441.htm>
-
-	  For general information and support, go to the Intel support
-	  website at:
+	  Adapter & Driver ID Guide that can be located at:
 
 	  <http://support.intel.com>
 
@@ -347,12 +300,7 @@
 	---help---
 	  This driver supports Intel(R) FM10000 Ethernet Switch Host
 	  Interface.  For more information on how to identify your adapter,
-	  go to the Adapter & Driver ID Guide at:
-
-	  <http://support.intel.com/support/network/sb/CS-008441.htm>
-
-	  For general information and support, go to the Intel support
-	  website at:
+	  go to the Adapter & Driver ID Guide that can be located at:
 
 	  <http://support.intel.com>
 
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 98fe5a2..d7bdea7 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -358,6 +358,8 @@
 extern char e1000_driver_name[];
 extern const char e1000_driver_version[];
 
+int e1000_open(struct net_device *netdev);
+int e1000_close(struct net_device *netdev);
 int e1000_up(struct e1000_adapter *adapter);
 void e1000_down(struct e1000_adapter *adapter);
 void e1000_reinit_locked(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 83e557c..975eeb8 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1553,7 +1553,7 @@
 
 		if (if_running)
 			/* indicate we're in test mode */
-			dev_close(netdev);
+			e1000_close(netdev);
 		else
 			e1000_reset(adapter);
 
@@ -1582,7 +1582,7 @@
 		e1000_reset(adapter);
 		clear_bit(__E1000_TESTING, &adapter->flags);
 		if (if_running)
-			dev_open(netdev);
+			e1000_open(netdev);
 	} else {
 		e_info(hw, "online testing starting\n");
 		/* Online tests */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 3fc7bde..f42129d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -114,8 +114,8 @@
 static void e1000_remove(struct pci_dev *pdev);
 static int e1000_alloc_queues(struct e1000_adapter *adapter);
 static int e1000_sw_init(struct e1000_adapter *adapter);
-static int e1000_open(struct net_device *netdev);
-static int e1000_close(struct net_device *netdev);
+int e1000_open(struct net_device *netdev);
+int e1000_close(struct net_device *netdev);
 static void e1000_configure_tx(struct e1000_adapter *adapter);
 static void e1000_configure_rx(struct e1000_adapter *adapter);
 static void e1000_setup_rctl(struct e1000_adapter *adapter);
@@ -1360,7 +1360,7 @@
  * handler is registered with the OS, the watchdog task is started,
  * and the stack is notified that the interface is ready.
  **/
-static int e1000_open(struct net_device *netdev)
+int e1000_open(struct net_device *netdev)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
@@ -1437,7 +1437,7 @@
  * needs to be disabled.  A global MAC reset is issued to stop the
  * hardware, and all transmit and receive resources are freed.
  **/
-static int e1000_close(struct net_device *netdev)
+int e1000_close(struct net_device *netdev)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
@@ -3106,7 +3106,7 @@
 	return __e1000_maybe_stop_tx(netdev, size);
 }
 
-#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
+#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
 				    struct net_device *netdev)
 {
@@ -3256,12 +3256,29 @@
 			     nr_frags, mss);
 
 	if (count) {
+		/* The descriptors needed is higher than other Intel drivers
+		 * due to a number of workarounds.  The breakdown is below:
+		 * Data descriptors: MAX_SKB_FRAGS + 1
+		 * Context Descriptor: 1
+		 * Keep head from touching tail: 2
+		 * Workarounds: 3
+		 */
+		int desc_needed = MAX_SKB_FRAGS + 7;
+
 		netdev_sent_queue(netdev, skb->len);
 		skb_tx_timestamp(skb);
 
 		e1000_tx_queue(adapter, tx_ring, tx_flags, count);
+
+		/* 82544 potentially requires twice as many data descriptors
+		 * in order to guarantee buffers don't end on evenly-aligned
+		 * dwords
+		 */
+		if (adapter->pcix_82544)
+			desc_needed += MAX_SKB_FRAGS + 1;
+
 		/* Make sure there is space in the ring for the next send. */
-		e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
+		e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
 
 		if (!skb->xmit_more ||
 		    netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 2af603f..cd39137 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -121,7 +121,7 @@
 	/* EEPROM access above 16k is unsupported */
 	if (size > 14)
 		size = 14;
-	nvm->word_size = 1 << size;
+	nvm->word_size = BIT(size);
 
 	return 0;
 }
@@ -845,27 +845,27 @@
 
 	/* Transmit Descriptor Control 0 */
 	reg = er32(TXDCTL(0));
-	reg |= (1 << 22);
+	reg |= BIT(22);
 	ew32(TXDCTL(0), reg);
 
 	/* Transmit Descriptor Control 1 */
 	reg = er32(TXDCTL(1));
-	reg |= (1 << 22);
+	reg |= BIT(22);
 	ew32(TXDCTL(1), reg);
 
 	/* Transmit Arbitration Control 0 */
 	reg = er32(TARC(0));
 	reg &= ~(0xF << 27);	/* 30:27 */
 	if (hw->phy.media_type != e1000_media_type_copper)
-		reg &= ~(1 << 20);
+		reg &= ~BIT(20);
 	ew32(TARC(0), reg);
 
 	/* Transmit Arbitration Control 1 */
 	reg = er32(TARC(1));
 	if (er32(TCTL) & E1000_TCTL_MULR)
-		reg &= ~(1 << 28);
+		reg &= ~BIT(28);
 	else
-		reg |= (1 << 28);
+		reg |= BIT(28);
 	ew32(TARC(1), reg);
 
 	/* Disable IPv6 extension header parsing because some malformed
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 5f70164..7fd4d54 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -185,7 +185,7 @@
 		/* EEPROM access above 16k is unsupported */
 		if (size > 14)
 			size = 14;
-		nvm->word_size = 1 << size;
+		nvm->word_size = BIT(size);
 		break;
 	}
 
@@ -1163,12 +1163,12 @@
 
 	/* Transmit Descriptor Control 0 */
 	reg = er32(TXDCTL(0));
-	reg |= (1 << 22);
+	reg |= BIT(22);
 	ew32(TXDCTL(0), reg);
 
 	/* Transmit Descriptor Control 1 */
 	reg = er32(TXDCTL(1));
-	reg |= (1 << 22);
+	reg |= BIT(22);
 	ew32(TXDCTL(1), reg);
 
 	/* Transmit Arbitration Control 0 */
@@ -1177,11 +1177,11 @@
 	switch (hw->mac.type) {
 	case e1000_82571:
 	case e1000_82572:
-		reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26);
+		reg |= BIT(23) | BIT(24) | BIT(25) | BIT(26);
 		break;
 	case e1000_82574:
 	case e1000_82583:
-		reg |= (1 << 26);
+		reg |= BIT(26);
 		break;
 	default:
 		break;
@@ -1193,12 +1193,12 @@
 	switch (hw->mac.type) {
 	case e1000_82571:
 	case e1000_82572:
-		reg &= ~((1 << 29) | (1 << 30));
-		reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26);
+		reg &= ~(BIT(29) | BIT(30));
+		reg |= BIT(22) | BIT(24) | BIT(25) | BIT(26);
 		if (er32(TCTL) & E1000_TCTL_MULR)
-			reg &= ~(1 << 28);
+			reg &= ~BIT(28);
 		else
-			reg |= (1 << 28);
+			reg |= BIT(28);
 		ew32(TARC(1), reg);
 		break;
 	default:
@@ -1211,7 +1211,7 @@
 	case e1000_82574:
 	case e1000_82583:
 		reg = er32(CTRL);
-		reg &= ~(1 << 29);
+		reg &= ~BIT(29);
 		ew32(CTRL, reg);
 		break;
 	default:
@@ -1224,8 +1224,8 @@
 	case e1000_82574:
 	case e1000_82583:
 		reg = er32(CTRL_EXT);
-		reg &= ~(1 << 23);
-		reg |= (1 << 22);
+		reg &= ~BIT(23);
+		reg |= BIT(22);
 		ew32(CTRL_EXT, reg);
 		break;
 	default:
@@ -1261,7 +1261,7 @@
 	case e1000_82574:
 	case e1000_82583:
 		reg = er32(GCR);
-		reg |= (1 << 22);
+		reg |= BIT(22);
 		ew32(GCR, reg);
 
 		/* Workaround for hardware errata.
@@ -1308,8 +1308,8 @@
 				       E1000_VFTA_ENTRY_SHIFT) &
 			    E1000_VFTA_ENTRY_MASK;
 			vfta_bit_in_reg =
-			    1 << (hw->mng_cookie.vlan_id &
-				  E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+			    BIT(hw->mng_cookie.vlan_id &
+				E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
 		}
 		break;
 	default:
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 1dc293b..ef96cd1 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -109,18 +109,18 @@
 #define E1000_TXDCTL_DMA_BURST_ENABLE                          \
 	(E1000_TXDCTL_GRAN | /* set descriptor granularity */  \
 	 E1000_TXDCTL_COUNT_DESC |                             \
-	 (1 << 16) | /* wthresh must be +1 more than desired */\
-	 (1 << 8)  | /* hthresh */                             \
-	 0x1f)       /* pthresh */
+	 (1u << 16) | /* wthresh must be +1 more than desired */\
+	 (1u << 8)  | /* hthresh */                             \
+	 0x1f)        /* pthresh */
 
 #define E1000_RXDCTL_DMA_BURST_ENABLE                          \
 	(0x01000000 | /* set descriptor granularity */         \
-	 (4 << 16)  | /* set writeback threshold    */         \
-	 (4 << 8)   | /* set prefetch threshold     */         \
+	 (4u << 16) | /* set writeback threshold    */         \
+	 (4u << 8)  | /* set prefetch threshold     */         \
 	 0x20)        /* set hthresh                */
 
-#define E1000_TIDV_FPD (1 << 31)
-#define E1000_RDTR_FPD (1 << 31)
+#define E1000_TIDV_FPD BIT(31)
+#define E1000_RDTR_FPD BIT(31)
 
 enum e1000_boards {
 	board_82571,
@@ -347,6 +347,7 @@
 	struct ptp_clock *ptp_clock;
 	struct ptp_clock_info ptp_clock_info;
 	struct pm_qos_request pm_qos_req;
+	s32 ptp_delta;
 
 	u16 eee_advert;
 };
@@ -404,53 +405,53 @@
 #define E1000_82574_SYSTIM_EPSILON	(1ULL << 35ULL)
 
 /* hardware capability, feature, and workaround flags */
-#define FLAG_HAS_AMT                      (1 << 0)
-#define FLAG_HAS_FLASH                    (1 << 1)
-#define FLAG_HAS_HW_VLAN_FILTER           (1 << 2)
-#define FLAG_HAS_WOL                      (1 << 3)
-/* reserved bit4 */
-#define FLAG_HAS_CTRLEXT_ON_LOAD          (1 << 5)
-#define FLAG_HAS_SWSM_ON_LOAD             (1 << 6)
-#define FLAG_HAS_JUMBO_FRAMES             (1 << 7)
-#define FLAG_READ_ONLY_NVM                (1 << 8)
-#define FLAG_IS_ICH                       (1 << 9)
-#define FLAG_HAS_MSIX                     (1 << 10)
-#define FLAG_HAS_SMART_POWER_DOWN         (1 << 11)
-#define FLAG_IS_QUAD_PORT_A               (1 << 12)
-#define FLAG_IS_QUAD_PORT                 (1 << 13)
-#define FLAG_HAS_HW_TIMESTAMP             (1 << 14)
-#define FLAG_APME_IN_WUC                  (1 << 15)
-#define FLAG_APME_IN_CTRL3                (1 << 16)
-#define FLAG_APME_CHECK_PORT_B            (1 << 17)
-#define FLAG_DISABLE_FC_PAUSE_TIME        (1 << 18)
-#define FLAG_NO_WAKE_UCAST                (1 << 19)
-#define FLAG_MNG_PT_ENABLED               (1 << 20)
-#define FLAG_RESET_OVERWRITES_LAA         (1 << 21)
-#define FLAG_TARC_SPEED_MODE_BIT          (1 << 22)
-#define FLAG_TARC_SET_BIT_ZERO            (1 << 23)
-#define FLAG_RX_NEEDS_RESTART             (1 << 24)
-#define FLAG_LSC_GIG_SPEED_DROP           (1 << 25)
-#define FLAG_SMART_POWER_DOWN             (1 << 26)
-#define FLAG_MSI_ENABLED                  (1 << 27)
-/* reserved (1 << 28) */
-#define FLAG_TSO_FORCE                    (1 << 29)
-#define FLAG_RESTART_NOW                  (1 << 30)
-#define FLAG_MSI_TEST_FAILED              (1 << 31)
+#define FLAG_HAS_AMT                      BIT(0)
+#define FLAG_HAS_FLASH                    BIT(1)
+#define FLAG_HAS_HW_VLAN_FILTER           BIT(2)
+#define FLAG_HAS_WOL                      BIT(3)
+/* reserved BIT(4) */
+#define FLAG_HAS_CTRLEXT_ON_LOAD          BIT(5)
+#define FLAG_HAS_SWSM_ON_LOAD             BIT(6)
+#define FLAG_HAS_JUMBO_FRAMES             BIT(7)
+#define FLAG_READ_ONLY_NVM                BIT(8)
+#define FLAG_IS_ICH                       BIT(9)
+#define FLAG_HAS_MSIX                     BIT(10)
+#define FLAG_HAS_SMART_POWER_DOWN         BIT(11)
+#define FLAG_IS_QUAD_PORT_A               BIT(12)
+#define FLAG_IS_QUAD_PORT                 BIT(13)
+#define FLAG_HAS_HW_TIMESTAMP             BIT(14)
+#define FLAG_APME_IN_WUC                  BIT(15)
+#define FLAG_APME_IN_CTRL3                BIT(16)
+#define FLAG_APME_CHECK_PORT_B            BIT(17)
+#define FLAG_DISABLE_FC_PAUSE_TIME        BIT(18)
+#define FLAG_NO_WAKE_UCAST                BIT(19)
+#define FLAG_MNG_PT_ENABLED               BIT(20)
+#define FLAG_RESET_OVERWRITES_LAA         BIT(21)
+#define FLAG_TARC_SPEED_MODE_BIT          BIT(22)
+#define FLAG_TARC_SET_BIT_ZERO            BIT(23)
+#define FLAG_RX_NEEDS_RESTART             BIT(24)
+#define FLAG_LSC_GIG_SPEED_DROP           BIT(25)
+#define FLAG_SMART_POWER_DOWN             BIT(26)
+#define FLAG_MSI_ENABLED                  BIT(27)
+/* reserved BIT(28) */
+#define FLAG_TSO_FORCE                    BIT(29)
+#define FLAG_RESTART_NOW                  BIT(30)
+#define FLAG_MSI_TEST_FAILED              BIT(31)
 
-#define FLAG2_CRC_STRIPPING               (1 << 0)
-#define FLAG2_HAS_PHY_WAKEUP              (1 << 1)
-#define FLAG2_IS_DISCARDING               (1 << 2)
-#define FLAG2_DISABLE_ASPM_L1             (1 << 3)
-#define FLAG2_HAS_PHY_STATS               (1 << 4)
-#define FLAG2_HAS_EEE                     (1 << 5)
-#define FLAG2_DMA_BURST                   (1 << 6)
-#define FLAG2_DISABLE_ASPM_L0S            (1 << 7)
-#define FLAG2_DISABLE_AIM                 (1 << 8)
-#define FLAG2_CHECK_PHY_HANG              (1 << 9)
-#define FLAG2_NO_DISABLE_RX               (1 << 10)
-#define FLAG2_PCIM2PCI_ARBITER_WA         (1 << 11)
-#define FLAG2_DFLT_CRC_STRIPPING          (1 << 12)
-#define FLAG2_CHECK_RX_HWTSTAMP           (1 << 13)
+#define FLAG2_CRC_STRIPPING               BIT(0)
+#define FLAG2_HAS_PHY_WAKEUP              BIT(1)
+#define FLAG2_IS_DISCARDING               BIT(2)
+#define FLAG2_DISABLE_ASPM_L1             BIT(3)
+#define FLAG2_HAS_PHY_STATS               BIT(4)
+#define FLAG2_HAS_EEE                     BIT(5)
+#define FLAG2_DMA_BURST                   BIT(6)
+#define FLAG2_DISABLE_ASPM_L0S            BIT(7)
+#define FLAG2_DISABLE_AIM                 BIT(8)
+#define FLAG2_CHECK_PHY_HANG              BIT(9)
+#define FLAG2_NO_DISABLE_RX               BIT(10)
+#define FLAG2_PCIM2PCI_ARBITER_WA         BIT(11)
+#define FLAG2_DFLT_CRC_STRIPPING          BIT(12)
+#define FLAG2_CHECK_RX_HWTSTAMP           BIT(13)
 
 #define E1000_RX_DESC_PS(R, i)	    \
 	(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -480,6 +481,8 @@
 void e1000e_check_options(struct e1000_adapter *adapter);
 void e1000e_set_ethtool_ops(struct net_device *netdev);
 
+int e1000e_open(struct net_device *netdev);
+int e1000e_close(struct net_device *netdev);
 void e1000e_up(struct e1000_adapter *adapter);
 void e1000e_down(struct e1000_adapter *adapter, bool reset);
 void e1000e_reinit_locked(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 6cab1f3..7aff68a 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -201,6 +201,9 @@
 	else
 		ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
 
+	if (hw->phy.media_type != e1000_media_type_copper)
+		ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+
 	return 0;
 }
 
@@ -236,8 +239,13 @@
 		mac->forced_speed_duplex = ADVERTISE_100_FULL;
 		break;
 	case SPEED_1000 + DUPLEX_FULL:
-		mac->autoneg = 1;
-		adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
+		if (adapter->hw.phy.media_type == e1000_media_type_copper) {
+			mac->autoneg = 1;
+			adapter->hw.phy.autoneg_advertised =
+				ADVERTISE_1000_FULL;
+		} else {
+			mac->forced_speed_duplex = ADVERTISE_1000_FULL;
+		}
 		break;
 	case SPEED_1000 + DUPLEX_HALF:	/* not supported */
 	default:
@@ -439,8 +447,9 @@
 
 	memset(p, 0, E1000_REGS_LEN * sizeof(u32));
 
-	regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
-	    adapter->pdev->device;
+	regs->version = (1u << 24) |
+			(adapter->pdev->revision << 16) |
+			adapter->pdev->device;
 
 	regs_buff[0] = er32(CTRL);
 	regs_buff[1] = er32(STATUS);
@@ -895,7 +904,7 @@
 	case e1000_pch2lan:
 	case e1000_pch_lpt:
 	case e1000_pch_spt:
-		mask |= (1 << 18);
+		mask |= BIT(18);
 		break;
 	default:
 		break;
@@ -914,9 +923,9 @@
 
 			/* SHRAH[9] different than the others */
 			if (i == 10)
-				mask |= (1 << 30);
+				mask |= BIT(30);
 			else
-				mask &= ~(1 << 30);
+				mask &= ~BIT(30);
 		}
 		if (mac->type == e1000_pch2lan) {
 			/* SHRAH[0,1,2] different than previous */
@@ -924,7 +933,7 @@
 				mask &= 0xFFF4FFFF;
 			/* SHRAH[3] different than SHRAH[0,1,2] */
 			if (i == 4)
-				mask |= (1 << 30);
+				mask |= BIT(30);
 			/* RAR[1-6] owned by management engine - skipping */
 			if (i > 0)
 				i += 6;
@@ -1019,7 +1028,7 @@
 	/* Test each interrupt */
 	for (i = 0; i < 10; i++) {
 		/* Interrupt to test */
-		mask = 1 << i;
+		mask = BIT(i);
 
 		if (adapter->flags & FLAG_IS_ICH) {
 			switch (mask) {
@@ -1387,7 +1396,7 @@
 	case e1000_phy_82579:
 		/* Disable PHY energy detect power down */
 		e1e_rphy(hw, PHY_REG(0, 21), &phy_reg);
-		e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3));
+		e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~BIT(3));
 		/* Disable full chip energy detect */
 		e1e_rphy(hw, PHY_REG(776, 18), &phy_reg);
 		e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1);
@@ -1453,7 +1462,7 @@
 
 	/* disable autoneg */
 	ctrl = er32(TXCW);
-	ctrl &= ~(1 << 31);
+	ctrl &= ~BIT(31);
 	ew32(TXCW, ctrl);
 
 	link = (er32(STATUS) & E1000_STATUS_LU);
@@ -1816,7 +1825,7 @@
 
 		if (if_running)
 			/* indicate we're in test mode */
-			dev_close(netdev);
+			e1000e_close(netdev);
 
 		if (e1000_reg_test(adapter, &data[0]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1849,7 +1858,7 @@
 
 		clear_bit(__E1000_TESTING, &adapter->state);
 		if (if_running)
-			dev_open(netdev);
+			e1000e_open(netdev);
 	} else {
 		/* Online tests */
 
@@ -2283,19 +2292,19 @@
 				  SOF_TIMESTAMPING_RX_HARDWARE |
 				  SOF_TIMESTAMPING_RAW_HARDWARE);
 
-	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+	info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
 
-	info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
-			    (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
-			    (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
-			    (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
-			    (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
-			    (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
-			    (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
-			    (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
-			    (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
-			    (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
-			    (1 << HWTSTAMP_FILTER_ALL));
+	info->rx_filters = (BIT(HWTSTAMP_FILTER_NONE) |
+			    BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+			    BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+			    BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+			    BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+			    BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+			    BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+			    BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+			    BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+			    BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+			    BIT(HWTSTAMP_FILTER_ALL));
 
 	if (adapter->ptp_clock)
 		info->phc_index = ptp_clock_index(adapter->ptp_clock);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index c0f4887..3e11322 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1048,7 +1048,7 @@
 
 		while (value > PCI_LTR_VALUE_MASK) {
 			scale++;
-			value = DIV_ROUND_UP(value, (1 << 5));
+			value = DIV_ROUND_UP(value, BIT(5));
 		}
 		if (scale > E1000_LTRV_SCALE_MAX) {
 			e_dbg("Invalid LTR latency scale %d\n", scale);
@@ -1573,7 +1573,7 @@
 		phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
 
 		if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
-			phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
+			phy_reg |= BIT(HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
 
 		e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
 		break;
@@ -2044,9 +2044,9 @@
 		/* Restore SMBus frequency */
 		if (freq--) {
 			phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
-			phy_data |= (freq & (1 << 0)) <<
+			phy_data |= (freq & BIT(0)) <<
 			    HV_SMB_ADDR_FREQ_LOW_SHIFT;
-			phy_data |= (freq & (1 << 1)) <<
+			phy_data |= (freq & BIT(1)) <<
 			    (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
 		} else {
 			e_dbg("Unsupported SMB frequency in PHY\n");
@@ -2530,7 +2530,7 @@
 
 	/* disable Rx path while enabling/disabling workaround */
 	e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
-	ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
+	ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | BIT(14));
 	if (ret_val)
 		return ret_val;
 
@@ -2561,7 +2561,7 @@
 
 		/* Enable jumbo frame workaround in the MAC */
 		mac_reg = er32(FFLT_DBG);
-		mac_reg &= ~(1 << 14);
+		mac_reg &= ~BIT(14);
 		mac_reg |= (7 << 15);
 		ew32(FFLT_DBG, mac_reg);
 
@@ -2576,7 +2576,7 @@
 			return ret_val;
 		ret_val = e1000e_write_kmrn_reg(hw,
 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
-						data | (1 << 0));
+						data | BIT(0));
 		if (ret_val)
 			return ret_val;
 		ret_val = e1000e_read_kmrn_reg(hw,
@@ -2600,7 +2600,7 @@
 		if (ret_val)
 			return ret_val;
 		e1e_rphy(hw, PHY_REG(769, 16), &data);
-		data &= ~(1 << 13);
+		data &= ~BIT(13);
 		ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
 		if (ret_val)
 			return ret_val;
@@ -2614,7 +2614,7 @@
 		if (ret_val)
 			return ret_val;
 		e1e_rphy(hw, HV_PM_CTRL, &data);
-		ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10));
+		ret_val = e1e_wphy(hw, HV_PM_CTRL, data | BIT(10));
 		if (ret_val)
 			return ret_val;
 	} else {
@@ -2634,7 +2634,7 @@
 			return ret_val;
 		ret_val = e1000e_write_kmrn_reg(hw,
 						E1000_KMRNCTRLSTA_CTRL_OFFSET,
-						data & ~(1 << 0));
+						data & ~BIT(0));
 		if (ret_val)
 			return ret_val;
 		ret_val = e1000e_read_kmrn_reg(hw,
@@ -2657,7 +2657,7 @@
 		if (ret_val)
 			return ret_val;
 		e1e_rphy(hw, PHY_REG(769, 16), &data);
-		data |= (1 << 13);
+		data |= BIT(13);
 		ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
 		if (ret_val)
 			return ret_val;
@@ -2671,13 +2671,13 @@
 		if (ret_val)
 			return ret_val;
 		e1e_rphy(hw, HV_PM_CTRL, &data);
-		ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10));
+		ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~BIT(10));
 		if (ret_val)
 			return ret_val;
 	}
 
 	/* re-enable Rx path after enabling/disabling workaround */
-	return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
+	return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~BIT(14));
 }
 
 /**
@@ -4841,7 +4841,7 @@
 
 	/* Extended Device Control */
 	reg = er32(CTRL_EXT);
-	reg |= (1 << 22);
+	reg |= BIT(22);
 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
 	if (hw->mac.type >= e1000_pchlan)
 		reg |= E1000_CTRL_EXT_PHYPDEN;
@@ -4849,34 +4849,34 @@
 
 	/* Transmit Descriptor Control 0 */
 	reg = er32(TXDCTL(0));
-	reg |= (1 << 22);
+	reg |= BIT(22);
 	ew32(TXDCTL(0), reg);
 
 	/* Transmit Descriptor Control 1 */
 	reg = er32(TXDCTL(1));
-	reg |= (1 << 22);
+	reg |= BIT(22);
 	ew32(TXDCTL(1), reg);
 
 	/* Transmit Arbitration Control 0 */
 	reg = er32(TARC(0));
 	if (hw->mac.type == e1000_ich8lan)
-		reg |= (1 << 28) | (1 << 29);
-	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
+		reg |= BIT(28) | BIT(29);
+	reg |= BIT(23) | BIT(24) | BIT(26) | BIT(27);
 	ew32(TARC(0), reg);
 
 	/* Transmit Arbitration Control 1 */
 	reg = er32(TARC(1));
 	if (er32(TCTL) & E1000_TCTL_MULR)
-		reg &= ~(1 << 28);
+		reg &= ~BIT(28);
 	else
-		reg |= (1 << 28);
-	reg |= (1 << 24) | (1 << 26) | (1 << 30);
+		reg |= BIT(28);
+	reg |= BIT(24) | BIT(26) | BIT(30);
 	ew32(TARC(1), reg);
 
 	/* Device Status */
 	if (hw->mac.type == e1000_ich8lan) {
 		reg = er32(STATUS);
-		reg &= ~(1 << 31);
+		reg &= ~BIT(31);
 		ew32(STATUS, reg);
 	}
 
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 2311f60..67163ca 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -73,10 +73,10 @@
 				 (ID_LED_OFF1_ON2  <<  4) | \
 				 (ID_LED_DEF1_DEF2))
 
-#define E1000_ICH_NVM_SIG_WORD		0x13
-#define E1000_ICH_NVM_SIG_MASK		0xC000
-#define E1000_ICH_NVM_VALID_SIG_MASK	0xC0
-#define E1000_ICH_NVM_SIG_VALUE		0x80
+#define E1000_ICH_NVM_SIG_WORD		0x13u
+#define E1000_ICH_NVM_SIG_MASK		0xC000u
+#define E1000_ICH_NVM_VALID_SIG_MASK	0xC0u
+#define E1000_ICH_NVM_SIG_VALUE		0x80u
 
 #define E1000_ICH8_LAN_INIT_TIMEOUT	1500
 
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index e59d7c2..b322011 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -346,7 +346,7 @@
 		hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
 		hash_bit = hash_value & 0x1F;
 
-		hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+		hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
 		mc_addr_list += (ETH_ALEN);
 	}
 
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 9b4ec13..75e6089 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -242,7 +242,7 @@
 		dev_info(&adapter->pdev->dev, "Net device Info\n");
 		pr_info("Device Name     state            trans_start      last_rx\n");
 		pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
-			netdev->state, netdev->trans_start, netdev->last_rx);
+			netdev->state, dev_trans_start(netdev), netdev->last_rx);
 	}
 
 	/* Print Registers */
@@ -317,8 +317,8 @@
 		else
 			next_desc = "";
 		pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p%s\n",
-			(!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
-			 ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
+			(!(le64_to_cpu(u0->b) & BIT(29)) ? 'l' :
+			 ((le64_to_cpu(u0->b) & BIT(20)) ? 'd' : 'c')),
 			i,
 			(unsigned long long)le64_to_cpu(u0->a),
 			(unsigned long long)le64_to_cpu(u0->b),
@@ -2018,7 +2018,7 @@
 	adapter->eiac_mask |= E1000_IMS_OTHER;
 
 	/* Cause Tx interrupts on every write back */
-	ivar |= (1 << 31);
+	ivar |= BIT(31);
 
 	ew32(IVAR, ivar);
 
@@ -2709,7 +2709,7 @@
 	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
 		index = (vid >> 5) & 0x7F;
 		vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
-		vfta |= (1 << (vid & 0x1F));
+		vfta |= BIT((vid & 0x1F));
 		hw->mac.ops.write_vfta(hw, index, vfta);
 	}
 
@@ -2737,7 +2737,7 @@
 	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
 		index = (vid >> 5) & 0x7F;
 		vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
-		vfta &= ~(1 << (vid & 0x1F));
+		vfta &= ~BIT((vid & 0x1F));
 		hw->mac.ops.write_vfta(hw, index, vfta);
 	}
 
@@ -2878,7 +2878,7 @@
 
 			/* Enable this decision filter in MANC2H */
 			if (mdef)
-				manc2h |= (1 << i);
+				manc2h |= BIT(i);
 
 			j |= mdef;
 		}
@@ -2891,7 +2891,7 @@
 			if (er32(MDEF(i)) == 0) {
 				ew32(MDEF(i), (E1000_MDEF_PORT_623 |
 					       E1000_MDEF_PORT_664));
-				manc2h |= (1 << 1);
+				manc2h |= BIT(1);
 				j++;
 				break;
 			}
@@ -2971,7 +2971,7 @@
 		/* set the speed mode bit, we'll clear it if we're not at
 		 * gigabit link later
 		 */
-#define SPEED_MODE_BIT (1 << 21)
+#define SPEED_MODE_BIT BIT(21)
 		tarc |= SPEED_MODE_BIT;
 		ew32(TARC(0), tarc);
 	}
@@ -3071,12 +3071,12 @@
 
 		e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
 		phy_data &= 0xfff8;
-		phy_data |= (1 << 2);
+		phy_data |= BIT(2);
 		e1e_wphy(hw, PHY_REG(770, 26), phy_data);
 
 		e1e_rphy(hw, 22, &phy_data);
 		phy_data &= 0x0fff;
-		phy_data |= (1 << 14);
+		phy_data |= BIT(14);
 		e1e_wphy(hw, 0x10, 0x2823);
 		e1e_wphy(hw, 0x11, 0x0003);
 		e1e_wphy(hw, 22, phy_data);
@@ -3368,12 +3368,12 @@
 		 * combining
 		 */
 		netdev_for_each_uc_addr(ha, netdev) {
-			int rval;
+			int ret_val;
 
 			if (!rar_entries)
 				break;
-			rval = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
-			if (rval < 0)
+			ret_val = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
+			if (ret_val < 0)
 				return -ENOMEM;
 			count++;
 		}
@@ -3503,8 +3503,8 @@
 	    !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) {
 		u32 fextnvm7 = er32(FEXTNVM7);
 
-		if (!(fextnvm7 & (1 << 0))) {
-			ew32(FEXTNVM7, fextnvm7 | (1 << 0));
+		if (!(fextnvm7 & BIT(0))) {
+			ew32(FEXTNVM7, fextnvm7 | BIT(0));
 			e1e_flush();
 		}
 	}
@@ -3580,7 +3580,6 @@
 	bool is_l4 = false;
 	bool is_l2 = false;
 	u32 regval;
-	s32 ret_val;
 
 	if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
 		return -EINVAL;
@@ -3719,16 +3718,6 @@
 	er32(RXSTMPH);
 	er32(TXSTMPH);
 
-	/* Get and set the System Time Register SYSTIM base frequency */
-	ret_val = e1000e_get_base_timinca(adapter, &regval);
-	if (ret_val)
-		return ret_val;
-	ew32(TIMINCA, regval);
-
-	/* reset the ns time counter */
-	timecounter_init(&adapter->tc, &adapter->cc,
-			 ktime_to_ns(ktime_get_real()));
-
 	return 0;
 }
 
@@ -3839,7 +3828,7 @@
 	/* update thresholds: prefetch threshold to 31, host threshold to 1
 	 * and make sure the granularity is "descriptors" and not "cache lines"
 	 */
-	rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC);
+	rxdctl |= (0x1F | BIT(8) | E1000_RXDCTL_THRESH_UNIT_DESC);
 
 	ew32(RXDCTL(0), rxdctl);
 	/* momentarily enable the RX ring for the changes to take effect */
@@ -3885,6 +3874,53 @@
 }
 
 /**
+ * e1000e_systim_reset - reset the timesync registers after a hardware reset
+ * @adapter: board private structure
+ *
+ * When the MAC is reset, all hardware bits for timesync will be reset to the
+ * default values. This function will restore the settings last in place.
+ * Since the clock SYSTIME registers are reset, we will simply restore the
+ * cyclecounter to the kernel real clock time.
+ **/
+static void e1000e_systim_reset(struct e1000_adapter *adapter)
+{
+	struct ptp_clock_info *info = &adapter->ptp_clock_info;
+	struct e1000_hw *hw = &adapter->hw;
+	unsigned long flags;
+	u32 timinca;
+	s32 ret_val;
+
+	if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+		return;
+
+	if (info->adjfreq) {
+		/* restore the previous ptp frequency delta */
+		ret_val = info->adjfreq(info, adapter->ptp_delta);
+	} else {
+		/* set the default base frequency if no adjustment possible */
+		ret_val = e1000e_get_base_timinca(adapter, &timinca);
+		if (!ret_val)
+			ew32(TIMINCA, timinca);
+	}
+
+	if (ret_val) {
+		dev_warn(&adapter->pdev->dev,
+			 "Failed to restore TIMINCA clock rate delta: %d\n",
+			 ret_val);
+		return;
+	}
+
+	/* reset the systim ns time counter */
+	spin_lock_irqsave(&adapter->systim_lock, flags);
+	timecounter_init(&adapter->tc, &adapter->cc,
+			 ktime_to_ns(ktime_get_real()));
+	spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
+	/* restore the previous hwtstamp configuration settings */
+	e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
+}
+
+/**
  * e1000e_reset - bring the hardware into a known good state
  *
  * This function boots the hardware and enables some settings that
@@ -4063,8 +4099,8 @@
 
 	e1000e_reset_adaptive(hw);
 
-	/* initialize systim and reset the ns time counter */
-	e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
+	/* restore systim and hwtstamp settings */
+	e1000e_systim_reset(adapter);
 
 	/* Set EEE advertisement as appropriate */
 	if (adapter->flags2 & FLAG2_HAS_EEE) {
@@ -4275,7 +4311,7 @@
 	struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
 						     cc);
 	struct e1000_hw *hw = &adapter->hw;
-	u32 systimel_1, systimel_2, systimeh;
+	u32 systimel, systimeh;
 	cycle_t systim, systim_next;
 	/* SYSTIMH latching upon SYSTIML read does not work well.
 	 * This means that if SYSTIML overflows after we read it but before
@@ -4283,24 +4319,25 @@
 	 * will experience a huge non linear increment in the systime value
 	 * to fix that we test for overflow and if true, we re-read systime.
 	 */
-	systimel_1 = er32(SYSTIML);
+	systimel = er32(SYSTIML);
 	systimeh = er32(SYSTIMH);
-	systimel_2 = er32(SYSTIML);
-	/* Check for overflow. If there was no overflow, use the values */
-	if (systimel_1 < systimel_2) {
-		systim = (cycle_t)systimel_1;
-		systim |= (cycle_t)systimeh << 32;
-	} else {
-		/* There was an overflow, read again SYSTIMH, and use
-		 * systimel_2
-		 */
-		systimeh = er32(SYSTIMH);
-		systim = (cycle_t)systimel_2;
-		systim |= (cycle_t)systimeh << 32;
+	/* Is systimel is so large that overflow is possible? */
+	if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) {
+		u32 systimel_2 = er32(SYSTIML);
+		if (systimel > systimel_2) {
+			/* There was an overflow, read again SYSTIMH, and use
+			 * systimel_2
+			 */
+			systimeh = er32(SYSTIMH);
+			systimel = systimel_2;
+		}
 	}
+	systim = (cycle_t)systimel;
+	systim |= (cycle_t)systimeh << 32;
 
 	if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
-		u64 incvalue, time_delta, rem, temp;
+		u64 time_delta, rem, temp;
+		u32 incvalue;
 		int i;
 
 		/* errata for 82574/82583 possible bad bits read from SYSTIMH/L
@@ -4495,7 +4532,7 @@
 }
 
 /**
- * e1000_open - Called when a network interface is made active
+ * e1000e_open - Called when a network interface is made active
  * @netdev: network interface device structure
  *
  * Returns 0 on success, negative value on failure
@@ -4506,7 +4543,7 @@
  * handler is registered with the OS, the watchdog timer is started,
  * and the stack is notified that the interface is ready.
  **/
-static int e1000_open(struct net_device *netdev)
+int e1000e_open(struct net_device *netdev)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
@@ -4604,7 +4641,7 @@
 }
 
 /**
- * e1000_close - Disables a network interface
+ * e1000e_close - Disables a network interface
  * @netdev: network interface device structure
  *
  * Returns 0, this is not allowed to fail
@@ -4614,7 +4651,7 @@
  * needs to be disabled.  A global MAC reset is issued to stop the
  * hardware, and all transmit and receive resources are freed.
  **/
-static int e1000_close(struct net_device *netdev)
+int e1000e_close(struct net_device *netdev)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	struct pci_dev *pdev = adapter->pdev;
@@ -6861,7 +6898,7 @@
 
 	ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
 	le16_to_cpus(&buf);
-	if (!ret_val && (!(buf & (1 << 0)))) {
+	if (!ret_val && (!(buf & BIT(0)))) {
 		/* Deep Smart Power Down (DSPD) */
 		dev_warn(&adapter->pdev->dev,
 			 "Warning: detected DSPD enabled in EEPROM\n");
@@ -6920,8 +6957,8 @@
 }
 
 static const struct net_device_ops e1000e_netdev_ops = {
-	.ndo_open		= e1000_open,
-	.ndo_stop		= e1000_close,
+	.ndo_open		= e1000e_open,
+	.ndo_stop		= e1000e_close,
 	.ndo_start_xmit		= e1000_xmit_frame,
 	.ndo_get_stats64	= e1000e_get_stats64,
 	.ndo_set_rx_mode	= e1000e_set_rx_mode,
@@ -6965,7 +7002,7 @@
 	int bars, i, err, pci_using_dac;
 	u16 eeprom_data = 0;
 	u16 eeprom_apme_mask = E1000_EEPROM_APME;
-	s32 rval = 0;
+	s32 ret_val = 0;
 
 	if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
 		aspm_disable_flag = PCIE_LINK_STATE_L0S;
@@ -7200,18 +7237,18 @@
 	} else if (adapter->flags & FLAG_APME_IN_CTRL3) {
 		if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
 		    (adapter->hw.bus.func == 1))
-			rval = e1000_read_nvm(&adapter->hw,
+			ret_val = e1000_read_nvm(&adapter->hw,
 					      NVM_INIT_CONTROL3_PORT_B,
 					      1, &eeprom_data);
 		else
-			rval = e1000_read_nvm(&adapter->hw,
+			ret_val = e1000_read_nvm(&adapter->hw,
 					      NVM_INIT_CONTROL3_PORT_A,
 					      1, &eeprom_data);
 	}
 
 	/* fetch WoL from EEPROM */
-	if (rval)
-		e_dbg("NVM read error getting WoL initial values: %d\n", rval);
+	if (ret_val)
+		e_dbg("NVM read error getting WoL initial values: %d\n", ret_val);
 	else if (eeprom_data & eeprom_apme_mask)
 		adapter->eeprom_wol |= E1000_WUFC_MAG;
 
@@ -7231,13 +7268,16 @@
 		device_wakeup_enable(&pdev->dev);
 
 	/* save off EEPROM version number */
-	rval = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
+	ret_val = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
 
-	if (rval) {
-		e_dbg("NVM read error getting EEPROM version: %d\n", rval);
+	if (ret_val) {
+		e_dbg("NVM read error getting EEPROM version: %d\n", ret_val);
 		adapter->eeprom_vers = 0;
 	}
 
+	/* init PTP hardware clock */
+	e1000e_ptp_init(adapter);
+
 	/* reset the hardware with the new settings */
 	e1000e_reset(adapter);
 
@@ -7256,9 +7296,6 @@
 	/* carrier off reporting is important to ethtool even BEFORE open */
 	netif_carrier_off(netdev);
 
-	/* init PTP hardware clock */
-	e1000e_ptp_init(adapter);
-
 	e1000_print_device_info(adapter);
 
 	if (pci_dev_run_wake(pdev))
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index 49f205c..2efd80d 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -67,7 +67,7 @@
 	u32 eecd = er32(EECD);
 	u32 mask;
 
-	mask = 0x01 << (count - 1);
+	mask = BIT(count - 1);
 	if (nvm->type == e1000_nvm_eeprom_spi)
 		eecd |= E1000_EECD_DO;
 
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index de13aea..d78d47b 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2894,11 +2894,11 @@
 		if ((hw->phy.type == e1000_phy_82578) &&
 		    (hw->phy.revision >= 1) &&
 		    (hw->phy.addr == 2) &&
-		    !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) {
+		    !(MAX_PHY_REG_ADDRESS & reg) && (data & BIT(11))) {
 			u16 data2 = 0x7EFF;
 
 			ret_val = e1000_access_phy_debug_regs_hv(hw,
-								 (1 << 6) | 0x3,
+								 BIT(6) | 0x3,
 								 &data2, false);
 			if (ret_val)
 				goto out;
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index 55bfe47..3027f63 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -104,9 +104,9 @@
 #define BM_WUC_DATA_OPCODE		0x12
 #define BM_WUC_ENABLE_PAGE		BM_PORT_CTRL_PAGE
 #define BM_WUC_ENABLE_REG		17
-#define BM_WUC_ENABLE_BIT		(1 << 2)
-#define BM_WUC_HOST_WU_BIT		(1 << 4)
-#define BM_WUC_ME_WU_BIT		(1 << 5)
+#define BM_WUC_ENABLE_BIT		BIT(2)
+#define BM_WUC_HOST_WU_BIT		BIT(4)
+#define BM_WUC_ME_WU_BIT		BIT(5)
 
 #define PHY_UPPER_SHIFT			21
 #define BM_PHY_REG(page, reg) \
@@ -124,8 +124,8 @@
 #define I82578_ADDR_REG			29
 #define I82577_ADDR_REG			16
 #define I82577_CFG_REG			22
-#define I82577_CFG_ASSERT_CRS_ON_TX	(1 << 15)
-#define I82577_CFG_ENABLE_DOWNSHIFT	(3 << 10)	/* auto downshift */
+#define I82577_CFG_ASSERT_CRS_ON_TX	BIT(15)
+#define I82577_CFG_ENABLE_DOWNSHIFT	(3u << 10)	/* auto downshift */
 #define I82577_CTRL_REG			23
 
 /* 82577 specific PHY registers */
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index e2ff3ef..2e1b17a 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -79,6 +79,8 @@
 
 	ew32(TIMINCA, timinca);
 
+	adapter->ptp_delta = delta;
+
 	spin_unlock_irqrestore(&adapter->systim_lock, flags);
 
 	return 0;
diff --git a/drivers/net/ethernet/intel/fm10k/Makefile b/drivers/net/ethernet/intel/fm10k/Makefile
index b006ff6..cac6453 100644
--- a/drivers/net/ethernet/intel/fm10k/Makefile
+++ b/drivers/net/ethernet/intel/fm10k/Makefile
@@ -1,7 +1,7 @@
 ################################################################################
 #
-# Intel Ethernet Switch Host Interface Driver
-# Copyright(c) 2013 - 2015 Intel Corporation.
+# Intel(R) Ethernet Switch Host Interface Driver
+# Copyright(c) 2013 - 2016 Intel Corporation.
 #
 # This program is free software; you can redistribute it and/or modify it
 # under the terms and conditions of the GNU General Public License,
@@ -22,7 +22,7 @@
 ################################################################################
 
 #
-# Makefile for the Intel(R) FM10000 Ethernet Switch Host Interface driver
+# Makefile for the Intel(R) Ethernet Switch Host Interface Driver
 #
 
 obj-$(CONFIG_FM10K) += fm10k.o
@@ -30,7 +30,6 @@
 fm10k-y := fm10k_main.o \
 	   fm10k_common.o \
 	   fm10k_pci.o \
-	   fm10k_ptp.o \
 	   fm10k_netdev.o \
 	   fm10k_ethtool.o \
 	   fm10k_pf.o \
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index b34bb00..fcf106e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -27,9 +27,6 @@
 #include <linux/rtnetlink.h>
 #include <linux/if_vlan.h>
 #include <linux/pci.h>
-#include <linux/net_tstamp.h>
-#include <linux/clocksource.h>
-#include <linux/ptp_clock_kernel.h>
 
 #include "fm10k_pf.h"
 #include "fm10k_vf.h"
@@ -262,12 +259,12 @@
 	unsigned long state;
 
 	u32 flags;
-#define FM10K_FLAG_RESET_REQUESTED		(u32)(1 << 0)
-#define FM10K_FLAG_RSS_FIELD_IPV4_UDP		(u32)(1 << 1)
-#define FM10K_FLAG_RSS_FIELD_IPV6_UDP		(u32)(1 << 2)
-#define FM10K_FLAG_RX_TS_ENABLED		(u32)(1 << 3)
-#define FM10K_FLAG_SWPRI_CONFIG			(u32)(1 << 4)
-#define FM10K_FLAG_DEBUG_STATS			(u32)(1 << 5)
+#define FM10K_FLAG_RESET_REQUESTED		(u32)(BIT(0))
+#define FM10K_FLAG_RSS_FIELD_IPV4_UDP		(u32)(BIT(1))
+#define FM10K_FLAG_RSS_FIELD_IPV6_UDP		(u32)(BIT(2))
+#define FM10K_FLAG_RX_TS_ENABLED		(u32)(BIT(3))
+#define FM10K_FLAG_SWPRI_CONFIG			(u32)(BIT(4))
+#define FM10K_FLAG_DEBUG_STATS			(u32)(BIT(5))
 	int xcast_mode;
 
 	/* Tx fast path data */
@@ -333,6 +330,7 @@
 	unsigned long last_reset;
 	unsigned long link_down_event;
 	bool host_ready;
+	bool lport_map_failed;
 
 	u32 reta[FM10K_RETA_SIZE];
 	u32 rssrk[FM10K_RSSRK_SIZE];
@@ -342,22 +340,8 @@
 
 #ifdef CONFIG_DEBUG_FS
 	struct dentry *dbg_intfc;
-
 #endif /* CONFIG_DEBUG_FS */
-	struct ptp_clock_info ptp_caps;
-	struct ptp_clock *ptp_clock;
 
-	struct sk_buff_head ts_tx_skb_queue;
-	u32 tx_hwtstamp_timeouts;
-
-	struct hwtstamp_config ts_config;
-	/* We are unable to actually adjust the clock beyond the frequency
-	 * value.  Once the clock is started there is no resetting it.  As
-	 * such we maintain a separate offset from the actual hardware clock
-	 * to allow for offset adjustment.
-	 */
-	s64 ptp_adjust;
-	rwlock_t systime_lock;
 #ifdef CONFIG_DCB
 	u8 pfc_en;
 #endif
@@ -510,6 +494,8 @@
 
 /* Ethtool */
 void fm10k_set_ethtool_ops(struct net_device *dev);
+u32 fm10k_get_reta_size(struct net_device *netdev);
+void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir);
 
 /* IOV */
 s32 fm10k_iov_event(struct fm10k_intfc *interface);
@@ -544,21 +530,6 @@
 static inline void fm10k_dbg_exit(void) {}
 #endif /* CONFIG_DEBUG_FS */
 
-/* Time Stamping */
-void fm10k_systime_to_hwtstamp(struct fm10k_intfc *interface,
-			       struct skb_shared_hwtstamps *hwtstamp,
-			       u64 systime);
-void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb);
-void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort,
-			  u64 systime);
-void fm10k_ts_reset(struct fm10k_intfc *interface);
-void fm10k_ts_init(struct fm10k_intfc *interface);
-void fm10k_ts_tx_subtask(struct fm10k_intfc *interface);
-void fm10k_ptp_register(struct fm10k_intfc *interface);
-void fm10k_ptp_unregister(struct fm10k_intfc *interface);
-int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
-int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
-
 /* DCB */
 #ifdef CONFIG_DCB
 void fm10k_dcbnl_set_ops(struct net_device *dev);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
index 6cfae6a..5bbf19c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.h b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
index 45e4e5b..50f71e9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
index 2be4361..db4bd8b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
index 5d6137f..5116fd0 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 2f6a05b..9c0d875 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -77,19 +77,6 @@
 	FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail),
 
 	FM10K_STAT("tx_hang_count", tx_timeout_count),
-
-	FM10K_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
-};
-
-static const struct fm10k_stats fm10k_gstrings_debug_stats[] = {
-	FM10K_STAT("hw_sm_mbx_full", hw_sm_mbx_full),
-	FM10K_STAT("hw_csum_tx_good", hw_csum_tx_good),
-	FM10K_STAT("hw_csum_rx_good", hw_csum_rx_good),
-	FM10K_STAT("rx_switch_errors", rx_switch_errors),
-	FM10K_STAT("rx_drops", rx_drops),
-	FM10K_STAT("rx_pp_errors", rx_pp_errors),
-	FM10K_STAT("rx_link_errors", rx_link_errors),
-	FM10K_STAT("rx_length_errors", rx_length_errors),
 };
 
 static const struct fm10k_stats fm10k_gstrings_pf_stats[] = {
@@ -121,13 +108,21 @@
 	FM10K_MBX_STAT("mbx_rx_mbmem_pushed", rx_mbmem_pushed),
 };
 
+#define FM10K_QUEUE_STAT(_name, _stat) { \
+	.stat_string = _name, \
+	.sizeof_stat = FIELD_SIZEOF(struct fm10k_ring, _stat), \
+	.stat_offset = offsetof(struct fm10k_ring, _stat) \
+}
+
+static const struct fm10k_stats fm10k_gstrings_queue_stats[] = {
+	FM10K_QUEUE_STAT("packets", stats.packets),
+	FM10K_QUEUE_STAT("bytes", stats.bytes),
+};
+
 #define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats)
-#define FM10K_DEBUG_STATS_LEN ARRAY_SIZE(fm10k_gstrings_debug_stats)
 #define FM10K_PF_STATS_LEN ARRAY_SIZE(fm10k_gstrings_pf_stats)
 #define FM10K_MBX_STATS_LEN ARRAY_SIZE(fm10k_gstrings_mbx_stats)
-
-#define FM10K_QUEUE_STATS_LEN(_n) \
-	((_n) * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64)))
+#define FM10K_QUEUE_STATS_LEN ARRAY_SIZE(fm10k_gstrings_queue_stats)
 
 #define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \
 				FM10K_NETDEV_STATS_LEN + \
@@ -145,77 +140,56 @@
 };
 
 enum {
-	FM10K_PRV_FLAG_DEBUG_STATS,
 	FM10K_PRV_FLAG_LEN,
 };
 
 static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = {
-	"debug-statistics",
 };
 
+static void fm10k_add_stat_strings(char **p, const char *prefix,
+				   const struct fm10k_stats stats[],
+				   const unsigned int size)
+{
+	unsigned int i;
+
+	for (i = 0; i < size; i++) {
+		snprintf(*p, ETH_GSTRING_LEN, "%s%s",
+			 prefix, stats[i].stat_string);
+		*p += ETH_GSTRING_LEN;
+	}
+}
+
 static void fm10k_get_stat_strings(struct net_device *dev, u8 *data)
 {
 	struct fm10k_intfc *interface = netdev_priv(dev);
-	struct fm10k_iov_data *iov_data = interface->iov_data;
 	char *p = (char *)data;
 	unsigned int i;
-	unsigned int j;
 
-	for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) {
-		memcpy(p, fm10k_gstrings_net_stats[i].stat_string,
-		       ETH_GSTRING_LEN);
-		p += ETH_GSTRING_LEN;
-	}
+	fm10k_add_stat_strings(&p, "", fm10k_gstrings_net_stats,
+			       FM10K_NETDEV_STATS_LEN);
 
-	for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) {
-		memcpy(p, fm10k_gstrings_global_stats[i].stat_string,
-		       ETH_GSTRING_LEN);
-		p += ETH_GSTRING_LEN;
-	}
+	fm10k_add_stat_strings(&p, "", fm10k_gstrings_global_stats,
+			       FM10K_GLOBAL_STATS_LEN);
 
-	if (interface->flags & FM10K_FLAG_DEBUG_STATS) {
-		for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) {
-			memcpy(p, fm10k_gstrings_debug_stats[i].stat_string,
-			       ETH_GSTRING_LEN);
-			p += ETH_GSTRING_LEN;
-		}
-	}
+	fm10k_add_stat_strings(&p, "", fm10k_gstrings_mbx_stats,
+			       FM10K_MBX_STATS_LEN);
 
-	for (i = 0; i < FM10K_MBX_STATS_LEN; i++) {
-		memcpy(p, fm10k_gstrings_mbx_stats[i].stat_string,
-		       ETH_GSTRING_LEN);
-		p += ETH_GSTRING_LEN;
-	}
-
-	if (interface->hw.mac.type != fm10k_mac_vf) {
-		for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
-			memcpy(p, fm10k_gstrings_pf_stats[i].stat_string,
-			       ETH_GSTRING_LEN);
-			p += ETH_GSTRING_LEN;
-		}
-	}
-
-	if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) {
-		for (i = 0; i < iov_data->num_vfs; i++) {
-			for (j = 0; j < FM10K_MBX_STATS_LEN; j++) {
-				snprintf(p,
-					 ETH_GSTRING_LEN,
-					 "vf_%u_%s", i,
-					 fm10k_gstrings_mbx_stats[j].stat_string);
-				p += ETH_GSTRING_LEN;
-			}
-		}
-	}
+	if (interface->hw.mac.type != fm10k_mac_vf)
+		fm10k_add_stat_strings(&p, "", fm10k_gstrings_pf_stats,
+				       FM10K_PF_STATS_LEN);
 
 	for (i = 0; i < interface->hw.mac.max_queues; i++) {
-		snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_packets", i);
-		p += ETH_GSTRING_LEN;
-		snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i);
-		p += ETH_GSTRING_LEN;
-		snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_packets", i);
-		p += ETH_GSTRING_LEN;
-		snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i);
-		p += ETH_GSTRING_LEN;
+		char prefix[ETH_GSTRING_LEN];
+
+		snprintf(prefix, ETH_GSTRING_LEN, "tx_queue_%u_", i);
+		fm10k_add_stat_strings(&p, prefix,
+				       fm10k_gstrings_queue_stats,
+				       FM10K_QUEUE_STATS_LEN);
+
+		snprintf(prefix, ETH_GSTRING_LEN, "rx_queue_%u_", i);
+		fm10k_add_stat_strings(&p, prefix,
+				       fm10k_gstrings_queue_stats,
+				       FM10K_QUEUE_STATS_LEN);
 	}
 }
 
@@ -242,7 +216,6 @@
 static int fm10k_get_sset_count(struct net_device *dev, int sset)
 {
 	struct fm10k_intfc *interface = netdev_priv(dev);
-	struct fm10k_iov_data *iov_data = interface->iov_data;
 	struct fm10k_hw *hw = &interface->hw;
 	int stats_len = FM10K_STATIC_STATS_LEN;
 
@@ -250,19 +223,11 @@
 	case ETH_SS_TEST:
 		return FM10K_TEST_LEN;
 	case ETH_SS_STATS:
-		stats_len += FM10K_QUEUE_STATS_LEN(hw->mac.max_queues);
+		stats_len += hw->mac.max_queues * 2 * FM10K_QUEUE_STATS_LEN;
 
 		if (hw->mac.type != fm10k_mac_vf)
 			stats_len += FM10K_PF_STATS_LEN;
 
-		if (interface->flags & FM10K_FLAG_DEBUG_STATS) {
-			stats_len += FM10K_DEBUG_STATS_LEN;
-
-			if (iov_data)
-				stats_len += FM10K_MBX_STATS_LEN *
-					iov_data->num_vfs;
-		}
-
 		return stats_len;
 	case ETH_SS_PRIV_FLAGS:
 		return FM10K_PRV_FLAG_LEN;
@@ -271,93 +236,80 @@
 	}
 }
 
+static void fm10k_add_ethtool_stats(u64 **data, void *pointer,
+				    const struct fm10k_stats stats[],
+				    const unsigned int size)
+{
+	unsigned int i;
+	char *p;
+
+	if (!pointer) {
+		/* memory is not zero allocated so we have to clear it */
+		for (i = 0; i < size; i++)
+			*((*data)++) = 0;
+		return;
+	}
+
+	for (i = 0; i < size; i++) {
+		p = (char *)pointer + stats[i].stat_offset;
+
+		switch (stats[i].sizeof_stat) {
+		case sizeof(u64):
+			*((*data)++) = *(u64 *)p;
+			break;
+		case sizeof(u32):
+			*((*data)++) = *(u32 *)p;
+			break;
+		case sizeof(u16):
+			*((*data)++) = *(u16 *)p;
+			break;
+		case sizeof(u8):
+			*((*data)++) = *(u8 *)p;
+			break;
+		default:
+			*((*data)++) = 0;
+		}
+	}
+}
+
 static void fm10k_get_ethtool_stats(struct net_device *netdev,
 				    struct ethtool_stats __always_unused *stats,
 				    u64 *data)
 {
-	const int stat_count = sizeof(struct fm10k_queue_stats) / sizeof(u64);
 	struct fm10k_intfc *interface = netdev_priv(netdev);
-	struct fm10k_iov_data *iov_data = interface->iov_data;
 	struct net_device_stats *net_stats = &netdev->stats;
-	char *p;
-	int i, j;
+	int i;
 
 	fm10k_update_stats(interface);
 
-	for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) {
-		p = (char *)net_stats + fm10k_gstrings_net_stats[i].stat_offset;
-		*(data++) = (fm10k_gstrings_net_stats[i].sizeof_stat ==
-			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
-	}
+	fm10k_add_ethtool_stats(&data, net_stats, fm10k_gstrings_net_stats,
+				FM10K_NETDEV_STATS_LEN);
 
-	for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) {
-		p = (char *)interface +
-		    fm10k_gstrings_global_stats[i].stat_offset;
-		*(data++) = (fm10k_gstrings_global_stats[i].sizeof_stat ==
-			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
-	}
+	fm10k_add_ethtool_stats(&data, interface, fm10k_gstrings_global_stats,
+				FM10K_GLOBAL_STATS_LEN);
 
-	if (interface->flags & FM10K_FLAG_DEBUG_STATS) {
-		for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) {
-			p = (char *)interface +
-				fm10k_gstrings_debug_stats[i].stat_offset;
-			*(data++) = (fm10k_gstrings_debug_stats[i].sizeof_stat ==
-				     sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
-		}
-	}
-
-	for (i = 0; i < FM10K_MBX_STATS_LEN; i++) {
-		p = (char *)&interface->hw.mbx +
-			fm10k_gstrings_mbx_stats[i].stat_offset;
-		*(data++) = (fm10k_gstrings_mbx_stats[i].sizeof_stat ==
-			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
-	}
+	fm10k_add_ethtool_stats(&data, &interface->hw.mbx,
+				fm10k_gstrings_mbx_stats,
+				FM10K_MBX_STATS_LEN);
 
 	if (interface->hw.mac.type != fm10k_mac_vf) {
-		for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
-			p = (char *)interface +
-			    fm10k_gstrings_pf_stats[i].stat_offset;
-			*(data++) = (fm10k_gstrings_pf_stats[i].sizeof_stat ==
-				     sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
-		}
-	}
-
-	if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) {
-		for (i = 0; i < iov_data->num_vfs; i++) {
-			struct fm10k_vf_info *vf_info;
-
-			vf_info = &iov_data->vf_info[i];
-
-			/* skip stats if we don't have a vf info */
-			if (!vf_info) {
-				data += FM10K_MBX_STATS_LEN;
-				continue;
-			}
-
-			for (j = 0; j < FM10K_MBX_STATS_LEN; j++) {
-				p = (char *)&vf_info->mbx +
-					fm10k_gstrings_mbx_stats[j].stat_offset;
-				*(data++) = (fm10k_gstrings_mbx_stats[j].sizeof_stat ==
-					     sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
-			}
-		}
+		fm10k_add_ethtool_stats(&data, interface,
+					fm10k_gstrings_pf_stats,
+					FM10K_PF_STATS_LEN);
 	}
 
 	for (i = 0; i < interface->hw.mac.max_queues; i++) {
 		struct fm10k_ring *ring;
-		u64 *queue_stat;
 
 		ring = interface->tx_ring[i];
-		if (ring)
-			queue_stat = (u64 *)&ring->stats;
-		for (j = 0; j < stat_count; j++)
-			*(data++) = ring ? queue_stat[j] : 0;
+		fm10k_add_ethtool_stats(&data, ring,
+					fm10k_gstrings_queue_stats,
+					FM10K_QUEUE_STATS_LEN);
 
 		ring = interface->rx_ring[i];
-		if (ring)
-			queue_stat = (u64 *)&ring->stats;
-		for (j = 0; j < stat_count; j++)
-			*(data++) = ring ? queue_stat[j] : 0;
+		fm10k_add_ethtool_stats(&data, ring,
+					fm10k_gstrings_queue_stats,
+					FM10K_QUEUE_STATS_LEN);
 	}
 }
 
@@ -425,7 +377,7 @@
 	u32 *buff = p;
 	u16 i;
 
-	regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+	regs->version = BIT(24) | (hw->revision_id << 16) | hw->device_id;
 
 	switch (hw->mac.type) {
 	case fm10k_mac_pf:
@@ -935,15 +887,15 @@
 	struct fm10k_mbx_info *mbx = &hw->mbx;
 	u32 attr_flag, test_msg[6];
 	unsigned long timeout;
-	int err;
+	int err = -EINVAL;
 
 	/* For now this is a VF only feature */
 	if (hw->mac.type != fm10k_mac_vf)
 		return 0;
 
 	/* loop through both nested and unnested attribute types */
-	for (attr_flag = (1 << FM10K_TEST_MSG_UNSET);
-	     attr_flag < (1 << (2 * FM10K_TEST_MSG_NESTED));
+	for (attr_flag = BIT(FM10K_TEST_MSG_UNSET);
+	     attr_flag < BIT(2 * FM10K_TEST_MSG_NESTED);
 	     attr_flag += attr_flag) {
 		/* generate message to be tested */
 		fm10k_tlv_msg_test_create(test_msg, attr_flag);
@@ -1001,35 +953,56 @@
 
 static u32 fm10k_get_priv_flags(struct net_device *netdev)
 {
-	struct fm10k_intfc *interface = netdev_priv(netdev);
-	u32 priv_flags = 0;
-
-	if (interface->flags & FM10K_FLAG_DEBUG_STATS)
-		priv_flags |= 1 << FM10K_PRV_FLAG_DEBUG_STATS;
-
-	return priv_flags;
+	return 0;
 }
 
 static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags)
 {
-	struct fm10k_intfc *interface = netdev_priv(netdev);
-
-	if (priv_flags >= (1 << FM10K_PRV_FLAG_LEN))
+	if (priv_flags >= BIT(FM10K_PRV_FLAG_LEN))
 		return -EINVAL;
 
-	if (priv_flags & (1 << FM10K_PRV_FLAG_DEBUG_STATS))
-		interface->flags |= FM10K_FLAG_DEBUG_STATS;
-	else
-		interface->flags &= ~FM10K_FLAG_DEBUG_STATS;
-
 	return 0;
 }
 
-static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
+u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
 {
 	return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG;
 }
 
+void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir)
+{
+	u16 rss_i = interface->ring_feature[RING_F_RSS].indices;
+	struct fm10k_hw *hw = &interface->hw;
+	u32 table[4];
+	int i, j;
+
+	/* record entries to reta table */
+	for (i = 0; i < FM10K_RETA_SIZE; i++) {
+		u32 reta, n;
+
+		/* generate a new table if we weren't given one */
+		for (j = 0; j < 4; j++) {
+			if (indir)
+				n = indir[i + j];
+			else
+				n = ethtool_rxfh_indir_default(i + j, rss_i);
+
+			table[j] = n;
+		}
+
+		reta = table[0] |
+			(table[1] << 8) |
+			(table[2] << 16) |
+			(table[3] << 24);
+
+		if (interface->reta[i] == reta)
+			continue;
+
+		interface->reta[i] = reta;
+		fm10k_write_reg(hw, FM10K_RETA(0, i), reta);
+	}
+}
+
 static int fm10k_get_reta(struct net_device *netdev, u32 *indir)
 {
 	struct fm10k_intfc *interface = netdev_priv(netdev);
@@ -1053,7 +1026,6 @@
 static int fm10k_set_reta(struct net_device *netdev, const u32 *indir)
 {
 	struct fm10k_intfc *interface = netdev_priv(netdev);
-	struct fm10k_hw *hw = &interface->hw;
 	int i;
 	u16 rss_i;
 
@@ -1068,19 +1040,7 @@
 		return -EINVAL;
 	}
 
-	/* record entries to reta table */
-	for (i = 0; i < FM10K_RETA_SIZE; i++, indir += 4) {
-		u32 reta = indir[0] |
-			   (indir[1] << 8) |
-			   (indir[2] << 16) |
-			   (indir[3] << 24);
-
-		if (interface->reta[i] == reta)
-			continue;
-
-		interface->reta[i] = reta;
-		fm10k_write_reg(hw, FM10K_RETA(0, i), reta);
-	}
+	fm10k_write_reta(interface, indir);
 
 	return 0;
 }
@@ -1145,7 +1105,7 @@
 
 	/* For QoS report channels per traffic class */
 	if (tcs > 1)
-		max_combined = 1 << (fls(max_combined / tcs) - 1);
+		max_combined = BIT((fls(max_combined / tcs) - 1));
 
 	return max_combined;
 }
@@ -1192,33 +1152,6 @@
 	return fm10k_setup_tc(dev, netdev_get_num_tc(dev));
 }
 
-static int fm10k_get_ts_info(struct net_device *dev,
-			     struct ethtool_ts_info *info)
-{
-	struct fm10k_intfc *interface = netdev_priv(dev);
-
-	info->so_timestamping =
-		SOF_TIMESTAMPING_TX_SOFTWARE |
-		SOF_TIMESTAMPING_RX_SOFTWARE |
-		SOF_TIMESTAMPING_SOFTWARE |
-		SOF_TIMESTAMPING_TX_HARDWARE |
-		SOF_TIMESTAMPING_RX_HARDWARE |
-		SOF_TIMESTAMPING_RAW_HARDWARE;
-
-	if (interface->ptp_clock)
-		info->phc_index = ptp_clock_index(interface->ptp_clock);
-	else
-		info->phc_index = -1;
-
-	info->tx_types = (1 << HWTSTAMP_TX_OFF) |
-			 (1 << HWTSTAMP_TX_ON);
-
-	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
-			   (1 << HWTSTAMP_FILTER_ALL);
-
-	return 0;
-}
-
 static const struct ethtool_ops fm10k_ethtool_ops = {
 	.get_strings		= fm10k_get_strings,
 	.get_sset_count		= fm10k_get_sset_count,
@@ -1246,7 +1179,6 @@
 	.set_rxfh		= fm10k_set_rssh,
 	.get_channels		= fm10k_get_channels,
 	.set_channels		= fm10k_set_channels,
-	.get_ts_info            = fm10k_get_ts_info,
 };
 
 void fm10k_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index acfb8b1f..47f0743 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -50,7 +50,7 @@
 	s64 vflre;
 	int i;
 
-	/* if there is no iov_data then there is no mailboxes to process */
+	/* if there is no iov_data then there is no mailbox to process */
 	if (!ACCESS_ONCE(interface->iov_data))
 		return 0;
 
@@ -98,7 +98,7 @@
 	struct fm10k_iov_data *iov_data;
 	int i;
 
-	/* if there is no iov_data then there is no mailboxes to process */
+	/* if there is no iov_data then there is no mailbox to process */
 	if (!ACCESS_ONCE(interface->iov_data))
 		return 0;
 
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 4de17db..0e166e9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -29,15 +29,15 @@
 #include "fm10k.h"
 
 #define DRV_VERSION	"0.19.3-k"
+#define DRV_SUMMARY	"Intel(R) Ethernet Switch Host Interface Driver"
 const char fm10k_driver_version[] = DRV_VERSION;
 char fm10k_driver_name[] = "fm10k";
-static const char fm10k_driver_string[] =
-	"Intel(R) Ethernet Switch Host Interface Driver";
+static const char fm10k_driver_string[] = DRV_SUMMARY;
 static const char fm10k_copyright[] =
-	"Copyright (c) 2013 Intel Corporation.";
+	"Copyright (c) 2013 - 2016 Intel Corporation.";
 
 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
-MODULE_DESCRIPTION("Intel(R) Ethernet Switch Host Interface Driver");
+MODULE_DESCRIPTION(DRV_SUMMARY);
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
@@ -401,10 +401,10 @@
 }
 
 #define FM10K_RSS_L4_TYPES_MASK \
-	((1ul << FM10K_RSSTYPE_IPV4_TCP) | \
-	 (1ul << FM10K_RSSTYPE_IPV4_UDP) | \
-	 (1ul << FM10K_RSSTYPE_IPV6_TCP) | \
-	 (1ul << FM10K_RSSTYPE_IPV6_UDP))
+	(BIT(FM10K_RSSTYPE_IPV4_TCP) | \
+	 BIT(FM10K_RSSTYPE_IPV4_UDP) | \
+	 BIT(FM10K_RSSTYPE_IPV6_TCP) | \
+	 BIT(FM10K_RSSTYPE_IPV6_UDP))
 
 static inline void fm10k_rx_hash(struct fm10k_ring *ring,
 				 union fm10k_rx_desc *rx_desc,
@@ -420,23 +420,10 @@
 		return;
 
 	skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss),
-		     (FM10K_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
+		     (BIT(rss_type) & FM10K_RSS_L4_TYPES_MASK) ?
 		     PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
 }
 
-static void fm10k_rx_hwtstamp(struct fm10k_ring *rx_ring,
-			      union fm10k_rx_desc *rx_desc,
-			      struct sk_buff *skb)
-{
-	struct fm10k_intfc *interface = rx_ring->q_vector->interface;
-
-	FM10K_CB(skb)->tstamp = rx_desc->q.timestamp;
-
-	if (unlikely(interface->flags & FM10K_FLAG_RX_TS_ENABLED))
-		fm10k_systime_to_hwtstamp(interface, skb_hwtstamps(skb),
-					  le64_to_cpu(rx_desc->q.timestamp));
-}
-
 static void fm10k_type_trans(struct fm10k_ring *rx_ring,
 			     union fm10k_rx_desc __maybe_unused *rx_desc,
 			     struct sk_buff *skb)
@@ -486,8 +473,6 @@
 
 	fm10k_rx_checksum(rx_ring, rx_desc, skb);
 
-	fm10k_rx_hwtstamp(rx_ring, rx_desc, skb);
-
 	FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan;
 
 	skb_record_rx_queue(skb, rx_ring->queue_index);
@@ -835,6 +820,8 @@
 		struct ipv6hdr *ipv6;
 		u8 *raw;
 	} network_hdr;
+	u8 *transport_hdr;
+	__be16 frag_off;
 	__be16 protocol;
 	u8 l4_hdr = 0;
 
@@ -852,9 +839,11 @@
 			goto no_csum;
 		}
 		network_hdr.raw = skb_inner_network_header(skb);
+		transport_hdr = skb_inner_transport_header(skb);
 	} else {
 		protocol = vlan_get_protocol(skb);
 		network_hdr.raw = skb_network_header(skb);
+		transport_hdr = skb_transport_header(skb);
 	}
 
 	switch (protocol) {
@@ -863,15 +852,17 @@
 		break;
 	case htons(ETH_P_IPV6):
 		l4_hdr = network_hdr.ipv6->nexthdr;
+		if (likely((transport_hdr - network_hdr.raw) ==
+			   sizeof(struct ipv6hdr)))
+			break;
+		ipv6_skip_exthdr(skb, network_hdr.raw - skb->data +
+				      sizeof(struct ipv6hdr),
+				 &l4_hdr, &frag_off);
+		if (unlikely(frag_off))
+			l4_hdr = NEXTHDR_FRAGMENT;
 		break;
 	default:
-		if (unlikely(net_ratelimit())) {
-			dev_warn(tx_ring->dev,
-				 "partial checksum but ip version=%x!\n",
-				 protocol);
-		}
-		tx_ring->tx_stats.csum_err++;
-		goto no_csum;
+		break;
 	}
 
 	switch (l4_hdr) {
@@ -884,9 +875,10 @@
 	default:
 		if (unlikely(net_ratelimit())) {
 			dev_warn(tx_ring->dev,
-				 "partial checksum but l4 proto=%x!\n",
-				 l4_hdr);
+				 "partial checksum, version=%d l4 proto=%x\n",
+				 protocol, l4_hdr);
 		}
+		skb_checksum_help(skb);
 		tx_ring->tx_stats.csum_err++;
 		goto no_csum;
 	}
@@ -912,11 +904,6 @@
 	/* set type for advanced descriptor with frame checksum insertion */
 	u32 desc_flags = 0;
 
-	/* set timestamping bits */
-	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
-	    likely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
-		desc_flags |= FM10K_TXD_FLAG_TIME;
-
 	/* set checksum offload bits */
 	desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM,
 				     FM10K_TXD_FLAG_CSUM);
@@ -1198,9 +1185,10 @@
  * fm10k_clean_tx_irq - Reclaim resources after transmit completes
  * @q_vector: structure containing interrupt and ring information
  * @tx_ring: tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
  **/
 static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
-			       struct fm10k_ring *tx_ring)
+			       struct fm10k_ring *tx_ring, int napi_budget)
 {
 	struct fm10k_intfc *interface = q_vector->interface;
 	struct fm10k_tx_buffer *tx_buffer;
@@ -1238,7 +1226,7 @@
 		total_packets += tx_buffer->gso_segs;
 
 		/* free the skb */
-		dev_consume_skb_any(tx_buffer->skb);
+		napi_consume_skb(tx_buffer->skb, napi_budget);
 
 		/* unmap skb header data */
 		dma_unmap_single(tx_ring->dev,
@@ -1409,7 +1397,7 @@
 	 * accounts for changes in the ITR due to PCIe link speed.
 	 */
 	itr_round = ACCESS_ONCE(ring_container->itr_scale) + 8;
-	avg_wire_size += (1 << itr_round) - 1;
+	avg_wire_size += BIT(itr_round) - 1;
 	avg_wire_size >>= itr_round;
 
 	/* write back value and retain adaptive flag */
@@ -1449,8 +1437,10 @@
 	int per_ring_budget, work_done = 0;
 	bool clean_complete = true;
 
-	fm10k_for_each_ring(ring, q_vector->tx)
-		clean_complete &= fm10k_clean_tx_irq(q_vector, ring);
+	fm10k_for_each_ring(ring, q_vector->tx) {
+		if (!fm10k_clean_tx_irq(q_vector, ring, budget))
+			clean_complete = false;
+	}
 
 	/* Handle case where we are called by netpoll with a budget of 0 */
 	if (budget <= 0)
@@ -1468,7 +1458,8 @@
 		int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget);
 
 		work_done += work;
-		clean_complete &= !!(work < per_ring_budget);
+		if (work >= per_ring_budget)
+			clean_complete = false;
 	}
 
 	/* If all work not completed, return budget and keep polling */
@@ -1511,17 +1502,17 @@
 	/* set QoS mask and indices */
 	f = &interface->ring_feature[RING_F_QOS];
 	f->indices = pcs;
-	f->mask = (1 << fls(pcs - 1)) - 1;
+	f->mask = BIT(fls(pcs - 1)) - 1;
 
 	/* determine the upper limit for our current DCB mode */
 	rss_i = interface->hw.mac.max_queues / pcs;
-	rss_i = 1 << (fls(rss_i) - 1);
+	rss_i = BIT(fls(rss_i) - 1);
 
 	/* set RSS mask and indices */
 	f = &interface->ring_feature[RING_F_RSS];
 	rss_i = min_t(u16, rss_i, f->limit);
 	f->indices = rss_i;
-	f->mask = (1 << fls(rss_i - 1)) - 1;
+	f->mask = BIT(fls(rss_i - 1)) - 1;
 
 	/* configure pause class to queue mapping */
 	for (i = 0; i < pcs; i++)
@@ -1551,7 +1542,7 @@
 
 	/* record indices and power of 2 mask for RSS */
 	f->indices = rss_i;
-	f->mask = (1 << fls(rss_i - 1)) - 1;
+	f->mask = BIT(fls(rss_i - 1)) - 1;
 
 	interface->num_rx_queues = rss_i;
 	interface->num_tx_queues = rss_i;
@@ -1572,17 +1563,29 @@
  **/
 static void fm10k_set_num_queues(struct fm10k_intfc *interface)
 {
-	/* Start with base case */
-	interface->num_rx_queues = 1;
-	interface->num_tx_queues = 1;
-
+	/* Attempt to setup QoS and RSS first */
 	if (fm10k_set_qos_queues(interface))
 		return;
 
+	/* If we don't have QoS, just fallback to only RSS. */
 	fm10k_set_rss_queues(interface);
 }
 
 /**
+ * fm10k_reset_num_queues - Reset the number of queues to zero
+ * @interface: board private structure
+ *
+ * This function should be called whenever we need to reset the number of
+ * queues after an error condition.
+ */
+static void fm10k_reset_num_queues(struct fm10k_intfc *interface)
+{
+	interface->num_tx_queues = 0;
+	interface->num_rx_queues = 0;
+	interface->num_q_vectors = 0;
+}
+
+/**
  * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector
  * @interface: board private structure to initialize
  * @v_count: q_vectors allocated on interface, used for ring interleaving
@@ -1765,9 +1768,7 @@
 	return 0;
 
 err_out:
-	interface->num_tx_queues = 0;
-	interface->num_rx_queues = 0;
-	interface->num_q_vectors = 0;
+	fm10k_reset_num_queues(interface);
 
 	while (v_idx--)
 		fm10k_free_q_vector(interface, v_idx);
@@ -1787,9 +1788,7 @@
 {
 	int v_idx = interface->num_q_vectors;
 
-	interface->num_tx_queues = 0;
-	interface->num_rx_queues = 0;
-	interface->num_q_vectors = 0;
+	fm10k_reset_num_queues(interface);
 
 	while (v_idx--)
 		fm10k_free_q_vector(interface, v_idx);
@@ -1935,7 +1934,7 @@
 static void fm10k_init_reta(struct fm10k_intfc *interface)
 {
 	u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices;
-	u32 reta, base;
+	u32 reta;
 
 	/* If the Rx flow indirection table has been configured manually, we
 	 * need to maintain it when possible.
@@ -1960,21 +1959,7 @@
 	}
 
 repopulate_reta:
-	/* Populate the redirection table 4 entries at a time.  To do this
-	 * we are generating the results for n and n+2 and then interleaving
-	 * those with the results with n+1 and n+3.
-	 */
-	for (i = FM10K_RETA_SIZE; i--;) {
-		/* first pass generates n and n+2 */
-		base = ((i * 0x00040004) + 0x00020000) * rss_i;
-		reta = (base & 0x3F803F80) >> 7;
-
-		/* second pass generates n+1 and n+3 */
-		base += 0x00010001 * rss_i;
-		reta |= (base & 0x3F803F80) << 1;
-
-		interface->reta[i] = reta;
-	}
+	fm10k_write_reta(interface, NULL);
 }
 
 /**
@@ -1997,14 +1982,15 @@
 	if (err) {
 		dev_err(&interface->pdev->dev,
 			"Unable to initialize MSI-X capability\n");
-		return err;
+		goto err_init_msix;
 	}
 
 	/* Allocate memory for queues */
 	err = fm10k_alloc_q_vectors(interface);
 	if (err) {
-		fm10k_reset_msix_capability(interface);
-		return err;
+		dev_err(&interface->pdev->dev,
+			"Unable to allocate queue vectors\n");
+		goto err_alloc_q_vectors;
 	}
 
 	/* Map rings to devices, and map devices to physical queues */
@@ -2014,6 +2000,12 @@
 	fm10k_init_reta(interface);
 
 	return 0;
+
+err_alloc_q_vectors:
+	fm10k_reset_msix_capability(interface);
+err_init_msix:
+	fm10k_reset_num_queues(interface);
+	return err;
 }
 
 /**
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 98202c3..c9dfa65 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
index 245a0a3..b7dbc8a 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index d09a8dd..2a08d3f 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -243,9 +243,6 @@
 
 	for (i = 0; i < interface->num_tx_queues; i++)
 		fm10k_clean_tx_ring(interface->tx_ring[i]);
-
-	/* remove any stale timestamp buffers and free them */
-	skb_queue_purge(&interface->ts_tx_skb_queue);
 }
 
 /**
@@ -440,7 +437,7 @@
  * @sa_family: Address family of new port
  * @port: port number used for VXLAN
  *
- * This funciton is called when a new VXLAN interface has added a new port
+ * This function is called when a new VXLAN interface has added a new port
  * number to the range that is currently in use for VXLAN.  The new port
  * number is always added to the tail so that the port number list should
  * match the order in which the ports were allocated.  The head of the list
@@ -484,7 +481,7 @@
  * @sa_family: Address family of freed port
  * @port: port number used for VXLAN
  *
- * This funciton is called when a new VXLAN interface has freed a port
+ * This function is called when a new VXLAN interface has freed a port
  * number from the range that is currently in use for VXLAN.  The freed
  * port is removed from the list and the new head is used to determine
  * the port number for offloads.
@@ -660,10 +657,6 @@
 		__skb_put(skb, pad_len);
 	}
 
-	/* prepare packet for hardware time stamping */
-	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
-		fm10k_ts_tx_enqueue(interface, skb);
-
 	if (r_idx >= interface->num_tx_queues)
 		r_idx %= interface->num_tx_queues;
 
@@ -884,7 +877,7 @@
 		return -EADDRNOTAVAIL;
 
 	/* update table with current entries */
-	for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0;
+	for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
 	     vid < VLAN_N_VID;
 	     vid = fm10k_find_next_vlan(interface, vid)) {
 		err = hw->mac.ops.update_uc_addr(hw, glort, addr,
@@ -947,7 +940,7 @@
 	u16 vid, glort = interface->glort;
 
 	/* update table with current entries */
-	for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0;
+	for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
 	     vid < VLAN_N_VID;
 	     vid = fm10k_find_next_vlan(interface, vid)) {
 		hw->mac.ops.update_mc_addr(hw, glort, addr, vid, sync);
@@ -1002,11 +995,8 @@
 	}
 
 	/* synchronize all of the addresses */
-	if (xcast_mode != FM10K_XCAST_MODE_PROMISC) {
-		__dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync);
-		if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI)
-			__dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync);
-	}
+	__dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync);
+	__dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync);
 
 	fm10k_mbx_unlock(interface);
 }
@@ -1044,7 +1034,7 @@
 	hw->mac.ops.update_vlan(hw, 0, 0, true);
 
 	/* update table with current entries */
-	for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0;
+	for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
 	     vid < VLAN_N_VID;
 	     vid = fm10k_find_next_vlan(interface, vid)) {
 		hw->mac.ops.update_vlan(hw, vid, 0, true);
@@ -1056,11 +1046,8 @@
 	hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode);
 
 	/* synchronize all of the addresses */
-	if (xcast_mode != FM10K_XCAST_MODE_PROMISC) {
-		__dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
-		if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI)
-			__dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync);
-	}
+	__dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
+	__dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync);
 
 	fm10k_mbx_unlock(interface);
 
@@ -1213,18 +1200,6 @@
 	return fm10k_setup_tc(dev, tc->tc);
 }
 
-static int fm10k_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
-	switch (cmd) {
-	case SIOCGHWTSTAMP:
-		return fm10k_get_ts_config(netdev, ifr);
-	case SIOCSHWTSTAMP:
-		return fm10k_set_ts_config(netdev, ifr);
-	default:
-		return -EOPNOTSUPP;
-	}
-}
-
 static void fm10k_assign_l2_accel(struct fm10k_intfc *interface,
 				  struct fm10k_l2_accel *l2_accel)
 {
@@ -1402,7 +1377,6 @@
 	.ndo_get_vf_config	= fm10k_ndo_get_vf_config,
 	.ndo_add_vxlan_port	= fm10k_add_vxlan_port,
 	.ndo_del_vxlan_port	= fm10k_del_vxlan_port,
-	.ndo_do_ioctl		= fm10k_ioctl,
 	.ndo_dfwd_add_station	= fm10k_dfwd_add_station,
 	.ndo_dfwd_del_station	= fm10k_dfwd_del_station,
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1429,7 +1403,7 @@
 
 	/* configure default debug level */
 	interface = netdev_priv(dev);
-	interface->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+	interface->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
 
 	/* configure default features */
 	dev->features |= NETIF_F_IP_CSUM |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 4eb7a6f..e05aca9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -99,7 +99,7 @@
 
 static void fm10k_service_event_complete(struct fm10k_intfc *interface)
 {
-	BUG_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state));
+	WARN_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state));
 
 	/* flush memory to make sure state is correct before next watchog */
 	smp_mb__before_atomic();
@@ -145,7 +145,7 @@
 	WARN_ON(in_interrupt());
 
 	/* put off any impending NetWatchDogTimeout */
-	netdev->trans_start = jiffies;
+	netif_trans_update(netdev);
 
 	while (test_and_set_bit(__FM10K_RESETTING, &interface->state))
 		usleep_range(1000, 2000);
@@ -209,9 +209,6 @@
 			netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
 	}
 
-	/* reset clock */
-	fm10k_ts_reset(interface);
-
 	err = netif_running(netdev) ? fm10k_open(netdev) : 0;
 	if (err)
 		goto err_open;
@@ -559,7 +556,6 @@
 	/* tasks only run when interface is up */
 	fm10k_watchdog_subtask(interface);
 	fm10k_check_hang_subtask(interface);
-	fm10k_ts_tx_subtask(interface);
 
 	/* release lock on service events to allow scheduling next event */
 	fm10k_service_event_complete(interface);
@@ -579,7 +575,7 @@
 	u64 tdba = ring->dma;
 	u32 size = ring->count * sizeof(struct fm10k_tx_desc);
 	u32 txint = FM10K_INT_MAP_DISABLE;
-	u32 txdctl = FM10K_TXDCTL_ENABLE | (1 << FM10K_TXDCTL_MAX_TIME_SHIFT);
+	u32 txdctl = BIT(FM10K_TXDCTL_MAX_TIME_SHIFT) | FM10K_TXDCTL_ENABLE;
 	u8 reg_idx = ring->reg_idx;
 
 	/* disable queue to avoid issues while updating state */
@@ -730,7 +726,7 @@
 	if (interface->pfc_en)
 		rx_pause = interface->pfc_en;
 #endif
-	if (!(rx_pause & (1 << ring->qos_pc)))
+	if (!(rx_pause & BIT(ring->qos_pc)))
 		rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
 
 	fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
@@ -779,7 +775,7 @@
 		u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
 		u8 reg_idx = ring->reg_idx;
 
-		if (!(rx_pause & (1 << ring->qos_pc)))
+		if (!(rx_pause & BIT(ring->qos_pc)))
 			rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
 
 		fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
@@ -903,8 +899,8 @@
 
 	/* re-enable mailbox interrupt and indicate 20us delay */
 	fm10k_write_reg(hw, FM10K_VFITR(FM10K_MBX_VECTOR),
-			FM10K_ITR_ENABLE | (FM10K_MBX_INT_DELAY >>
-					    hw->mac.itr_scale));
+			(FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) |
+			FM10K_ITR_ENABLE);
 
 	/* service upstream mailbox */
 	if (fm10k_mbx_trylock(interface)) {
@@ -1065,7 +1061,7 @@
 	if (maxholdq)
 		fm10k_write_reg(hw, FM10K_MAXHOLDQ(7), maxholdq);
 	for (q = 255;;) {
-		if (maxholdq & (1 << 31)) {
+		if (maxholdq & BIT(31)) {
 			if (q < FM10K_MAX_QUEUES_PF) {
 				interface->rx_overrun_pf++;
 				fm10k_write_reg(hw, FM10K_RXDCTL(q), rxdctl);
@@ -1135,22 +1131,24 @@
 
 	/* re-enable mailbox interrupt and indicate 20us delay */
 	fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR),
-			FM10K_ITR_ENABLE | (FM10K_MBX_INT_DELAY >>
-					    hw->mac.itr_scale));
+			(FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) |
+			FM10K_ITR_ENABLE);
 
 	return IRQ_HANDLED;
 }
 
 void fm10k_mbx_free_irq(struct fm10k_intfc *interface)
 {
-	struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
 	struct fm10k_hw *hw = &interface->hw;
+	struct msix_entry *entry;
 	int itr_reg;
 
 	/* no mailbox IRQ to free if MSI-X is not enabled */
 	if (!interface->msix_entries)
 		return;
 
+	entry = &interface->msix_entries[FM10K_MBX_VECTOR];
+
 	/* disconnect the mailbox */
 	hw->mbx.ops.disconnect(hw, &hw->mbx);
 
@@ -1202,25 +1200,6 @@
 	return 0;
 }
 
-static s32 fm10k_1588_msg_vf(struct fm10k_hw *hw, u32 **results,
-			     struct fm10k_mbx_info __always_unused *mbx)
-{
-	struct fm10k_intfc *interface;
-	u64 timestamp;
-	s32 err;
-
-	err = fm10k_tlv_attr_get_u64(results[FM10K_1588_MSG_TIMESTAMP],
-				     &timestamp);
-	if (err)
-		return err;
-
-	interface = container_of(hw, struct fm10k_intfc, hw);
-
-	fm10k_ts_tx_hwtstamp(interface, 0, timestamp);
-
-	return 0;
-}
-
 /* generic error handler for mailbox issues */
 static s32 fm10k_mbx_error(struct fm10k_hw *hw, u32 **results,
 			   struct fm10k_mbx_info __always_unused *mbx)
@@ -1241,7 +1220,6 @@
 	FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
 	FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_mbx_mac_addr),
 	FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
-	FM10K_VF_MSG_1588_HANDLER(fm10k_1588_msg_vf),
 	FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
 };
 
@@ -1253,7 +1231,7 @@
 	int err;
 
 	/* Use timer0 for interrupt moderation on the mailbox */
-	u32 itr = FM10K_INT_MAP_TIMER0 | entry->entry;
+	u32 itr = entry->entry | FM10K_INT_MAP_TIMER0;
 
 	/* register mailbox handlers */
 	err = hw->mbx.ops.register_handlers(&hw->mbx, vf_mbx_data);
@@ -1285,11 +1263,40 @@
 	u32 dglort_map = hw->mac.dglort_map;
 	s32 err;
 
+	interface = container_of(hw, struct fm10k_intfc, hw);
+
+	err = fm10k_msg_err_pf(hw, results, mbx);
+	if (!err && hw->swapi.status) {
+		/* force link down for a reasonable delay */
+		interface->link_down_event = jiffies + (2 * HZ);
+		set_bit(__FM10K_LINK_DOWN, &interface->state);
+
+		/* reset dglort_map back to no config */
+		hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
+
+		fm10k_service_event_schedule(interface);
+
+		/* prevent overloading kernel message buffer */
+		if (interface->lport_map_failed)
+			return 0;
+
+		interface->lport_map_failed = true;
+
+		if (hw->swapi.status == FM10K_MSG_ERR_PEP_NOT_SCHEDULED)
+			dev_warn(&interface->pdev->dev,
+				 "cannot obtain link because the host interface is configured for a PCIe host interface bandwidth of zero\n");
+		dev_warn(&interface->pdev->dev,
+			 "request logical port map failed: %d\n",
+			 hw->swapi.status);
+
+		return 0;
+	}
+
 	err = fm10k_msg_lport_map_pf(hw, results, mbx);
 	if (err)
 		return err;
 
-	interface = container_of(hw, struct fm10k_intfc, hw);
+	interface->lport_map_failed = false;
 
 	/* we need to reset if port count was just updated */
 	if (dglort_map != hw->mac.dglort_map)
@@ -1339,68 +1346,6 @@
 	return 0;
 }
 
-static s32 fm10k_1588_msg_pf(struct fm10k_hw *hw, u32 **results,
-			     struct fm10k_mbx_info __always_unused *mbx)
-{
-	struct fm10k_swapi_1588_timestamp timestamp;
-	struct fm10k_iov_data *iov_data;
-	struct fm10k_intfc *interface;
-	u16 sglort, vf_idx;
-	s32 err;
-
-	err = fm10k_tlv_attr_get_le_struct(
-				results[FM10K_PF_ATTR_ID_1588_TIMESTAMP],
-				&timestamp, sizeof(timestamp));
-	if (err)
-		return err;
-
-	interface = container_of(hw, struct fm10k_intfc, hw);
-
-	if (timestamp.dglort) {
-		fm10k_ts_tx_hwtstamp(interface, timestamp.dglort,
-				     le64_to_cpu(timestamp.egress));
-		return 0;
-	}
-
-	/* either dglort or sglort must be set */
-	if (!timestamp.sglort)
-		return FM10K_ERR_PARAM;
-
-	/* verify GLORT is at least one of the ones we own */
-	sglort = le16_to_cpu(timestamp.sglort);
-	if (!fm10k_glort_valid_pf(hw, sglort))
-		return FM10K_ERR_PARAM;
-
-	if (sglort == interface->glort) {
-		fm10k_ts_tx_hwtstamp(interface, 0,
-				     le64_to_cpu(timestamp.ingress));
-		return 0;
-	}
-
-	/* if there is no iov_data then there is no mailboxes to process */
-	if (!ACCESS_ONCE(interface->iov_data))
-		return FM10K_ERR_PARAM;
-
-	rcu_read_lock();
-
-	/* notify VF if this timestamp belongs to it */
-	iov_data = interface->iov_data;
-	vf_idx = (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE) - sglort;
-
-	if (!iov_data || vf_idx >= iov_data->num_vfs) {
-		err = FM10K_ERR_PARAM;
-		goto err_unlock;
-	}
-
-	err = hw->iov.ops.report_timestamp(hw, &iov_data->vf_info[vf_idx],
-					   le64_to_cpu(timestamp.ingress));
-
-err_unlock:
-	rcu_read_unlock();
-
-	return err;
-}
-
 static const struct fm10k_msg_data pf_mbx_data[] = {
 	FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
 	FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
@@ -1408,7 +1353,6 @@
 	FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
 	FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
 	FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_update_pvid),
-	FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(fm10k_1588_msg_pf),
 	FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
 };
 
@@ -1420,8 +1364,8 @@
 	int err;
 
 	/* Use timer0 for interrupt moderation on the mailbox */
-	u32 mbx_itr = FM10K_INT_MAP_TIMER0 | entry->entry;
-	u32 other_itr = FM10K_INT_MAP_IMMEDIATE | entry->entry;
+	u32 mbx_itr = entry->entry | FM10K_INT_MAP_TIMER0;
+	u32 other_itr = entry->entry | FM10K_INT_MAP_IMMEDIATE;
 
 	/* register mailbox handlers */
 	err = hw->mbx.ops.register_handlers(&hw->mbx, pf_mbx_data);
@@ -1654,6 +1598,7 @@
 {
 	struct net_device *netdev = interface->netdev;
 	struct fm10k_hw *hw = &interface->hw;
+	int err;
 
 	/* signal that we are down to the interrupt handler and service task */
 	set_bit(__FM10K_DOWN, &interface->state);
@@ -1678,7 +1623,9 @@
 	fm10k_update_stats(interface);
 
 	/* Disable DMA engine for Tx/Rx */
-	hw->mac.ops.stop_hw(hw);
+	err = hw->mac.ops.stop_hw(hw);
+	if (err)
+		dev_err(&interface->pdev->dev, "stop_hw failed: %d\n", err);
 
 	/* free any buffers still on the rings */
 	fm10k_clean_all_tx_rings(interface);
@@ -1776,35 +1723,17 @@
 		netdev->addr_assign_type |= NET_ADDR_RANDOM;
 	}
 
-	memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
-	memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
+	ether_addr_copy(netdev->dev_addr, hw->mac.addr);
+	ether_addr_copy(netdev->perm_addr, hw->mac.addr);
 
 	if (!is_valid_ether_addr(netdev->perm_addr)) {
 		dev_err(&pdev->dev, "Invalid MAC Address\n");
 		return -EIO;
 	}
 
-	/* assign BAR 4 resources for use with PTP */
-	if (fm10k_read_reg(hw, FM10K_CTRL) & FM10K_CTRL_BAR4_ALLOWED)
-		interface->sw_addr = ioremap(pci_resource_start(pdev, 4),
-					     pci_resource_len(pdev, 4));
-	hw->sw_addr = interface->sw_addr;
-
 	/* initialize DCBNL interface */
 	fm10k_dcbnl_set_ops(netdev);
 
-	/* Initialize service timer and service task */
-	set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
-	setup_timer(&interface->service_timer, &fm10k_service_timer,
-		    (unsigned long)interface);
-	INIT_WORK(&interface->service_task, fm10k_service_task);
-
-	/* kick off service timer now, even when interface is down */
-	mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
-
-	/* Intitialize timestamp data */
-	fm10k_ts_init(interface);
-
 	/* set default ring sizes */
 	interface->tx_ring_count = FM10K_DEFAULT_TXD;
 	interface->rx_ring_count = FM10K_DEFAULT_RXD;
@@ -1987,6 +1916,12 @@
 	if (err)
 		goto err_sw_init;
 
+	/* the mbx interrupt might attempt to schedule the service task, so we
+	 * must ensure it is disabled since we haven't yet requested the timer
+	 * or work item.
+	 */
+	set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+
 	err = fm10k_mbx_request_irq(interface);
 	if (err)
 		goto err_mbx_interrupt;
@@ -2006,8 +1941,15 @@
 	/* stop all the transmit queues from transmitting until link is up */
 	netif_tx_stop_all_queues(netdev);
 
-	/* Register PTP interface */
-	fm10k_ptp_register(interface);
+	/* Initialize service timer and service task late in order to avoid
+	 * cleanup issues.
+	 */
+	setup_timer(&interface->service_timer, &fm10k_service_timer,
+		    (unsigned long)interface);
+	INIT_WORK(&interface->service_task, fm10k_service_task);
+
+	/* kick off service timer now, even when interface is down */
+	mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
 
 	/* print warning for non-optimal configurations */
 	fm10k_slot_warn(interface);
@@ -2065,9 +2007,6 @@
 	if (netdev->reg_state == NETREG_REGISTERED)
 		unregister_netdev(netdev);
 
-	/* cleanup timestamp handling */
-	fm10k_ptp_unregister(interface);
-
 	/* release VFs */
 	fm10k_iov_disable(pdev);
 
@@ -2140,9 +2079,6 @@
 	/* reset statistics starting values */
 	hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
 
-	/* reset clock */
-	fm10k_ts_reset(interface);
-
 	rtnl_lock();
 
 	err = fm10k_init_queueing_scheme(interface);
@@ -2259,15 +2195,17 @@
 	if (state == pci_channel_io_perm_failure)
 		return PCI_ERS_RESULT_DISCONNECT;
 
+	rtnl_lock();
+
 	if (netif_running(netdev))
 		fm10k_close(netdev);
 
+	fm10k_mbx_free_irq(interface);
+
 	/* free interrupts */
 	fm10k_clear_queueing_scheme(interface);
 
-	fm10k_mbx_free_irq(interface);
-
-	pci_disable_device(pdev);
+	rtnl_unlock();
 
 	/* Request a slot reset. */
 	return PCI_ERS_RESULT_NEED_RESET;
@@ -2337,27 +2275,31 @@
 	/* reset statistics starting values */
 	hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
 
+	rtnl_lock();
+
 	err = fm10k_init_queueing_scheme(interface);
 	if (err) {
 		dev_err(&interface->pdev->dev,
 			"init_queueing_scheme failed: %d\n", err);
-		return;
+		goto unlock;
 	}
 
 	/* reassociate interrupts */
 	fm10k_mbx_request_irq(interface);
 
-	/* reset clock */
-	fm10k_ts_reset(interface);
-
+	rtnl_lock();
 	if (netif_running(netdev))
 		err = fm10k_open(netdev);
+	rtnl_unlock();
 
 	/* final check of hardware state before registering the interface */
 	err = err ? : fm10k_hw_ready(interface);
 
 	if (!err)
 		netif_device_attach(netdev);
+
+unlock:
+	rtnl_unlock();
 }
 
 static const struct pci_error_handlers fm10k_err_handler = {
@@ -2382,7 +2324,7 @@
 /**
  * fm10k_register_pci_driver - register driver interface
  *
- * This funciton is called on module load in order to register the driver.
+ * This function is called on module load in order to register the driver.
  **/
 int fm10k_register_pci_driver(void)
 {
@@ -2392,7 +2334,7 @@
 /**
  * fm10k_unregister_pci_driver - unregister driver interface
  *
- * This funciton is called on module unload in order to remove the driver.
+ * This function is called on module unload in order to remove the driver.
  **/
 void fm10k_unregister_pci_driver(void)
 {
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 62ccebc..dc75507 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -219,8 +219,8 @@
 
 	/* VLAN multi-bit write:
 	 * The multi-bit write has several parts to it.
-	 *    3			  2		      1			  0
-	 *  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+	 *               24              16               8               0
+	 *  7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
 	 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 	 * | RSVD0 |         Length        |C|RSVD0|        VLAN ID        |
 	 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -488,6 +488,10 @@
 	if (!fm10k_glort_valid_pf(hw, glort))
 		return FM10K_ERR_PARAM;
 
+	/* reset multicast mode if deleting lport */
+	if (!enable)
+		fm10k_update_xcast_mode_pf(hw, glort, FM10K_XCAST_MODE_NONE);
+
 	/* construct the lport message from the 2 pieces of data we have */
 	lport_msg = ((u32)count << 16) | glort;
 
@@ -527,8 +531,8 @@
 		return FM10K_ERR_PARAM;
 
 	/* determine count of VSIs and queues */
-	queue_count = 1 << (dglort->rss_l + dglort->pc_l);
-	vsi_count = 1 << (dglort->vsi_l + dglort->queue_l);
+	queue_count = BIT(dglort->rss_l + dglort->pc_l);
+	vsi_count = BIT(dglort->vsi_l + dglort->queue_l);
 	glort = dglort->glort;
 	q_idx = dglort->queue_b;
 
@@ -544,8 +548,8 @@
 	}
 
 	/* determine count of PCs and queues */
-	queue_count = 1 << (dglort->queue_l + dglort->rss_l + dglort->vsi_l);
-	pc_count = 1 << dglort->pc_l;
+	queue_count = BIT(dglort->queue_l + dglort->rss_l + dglort->vsi_l);
+	pc_count = BIT(dglort->pc_l);
 
 	/* configure PC for Tx queues */
 	for (pc = 0; pc < pc_count; pc++) {
@@ -711,8 +715,8 @@
 					FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
 					FM10K_RXDCTL_DROP_ON_EMPTY);
 			fm10k_write_reg(hw, FM10K_RXQCTL(vf_q_idx),
-					FM10K_RXQCTL_VF |
-					(i << FM10K_RXQCTL_VF_SHIFT));
+					(i << FM10K_RXQCTL_VF_SHIFT) |
+					FM10K_RXQCTL_VF);
 
 			/* map queue pair to VF */
 			fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
@@ -864,9 +868,13 @@
 	fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0);
 	fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0);
 
-	/* determine correct default VLAN ID */
+	/* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is
+	 * used here to indicate to the VF that it will not have privilege to
+	 * write VLAN_TABLE. All policy is enforced on the PF but this allows
+	 * the VF to correctly report errors to userspace rqeuests.
+	 */
 	if (vf_info->pf_vid)
-		vf_vid = vf_info->pf_vid | FM10K_VLAN_CLEAR;
+		vf_vid = vf_info->pf_vid | FM10K_VLAN_OVERRIDE;
 	else
 		vf_vid = vf_info->sw_vid;
 
@@ -952,7 +960,7 @@
 		return FM10K_ERR_PARAM;
 
 	/* clear event notification of VF FLR */
-	fm10k_write_reg(hw, FM10K_PFVFLREC(vf_idx / 32), 1 << (vf_idx % 32));
+	fm10k_write_reg(hw, FM10K_PFVFLREC(vf_idx / 32), BIT(vf_idx % 32));
 
 	/* force timeout and then disconnect the mailbox */
 	vf_info->mbx.timeout = 0;
@@ -987,7 +995,7 @@
 	txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) |
 		 (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
 		 FM10K_TXQCTL_VF | vf_idx;
-	rxqctl = FM10K_RXQCTL_VF | (vf_idx << FM10K_RXQCTL_VF_SHIFT);
+	rxqctl = (vf_idx << FM10K_RXQCTL_VF_SHIFT) | FM10K_RXQCTL_VF;
 
 	/* stop further DMA and reset queue ownership back to VF */
 	for (i = vf_q_idx; i < (queues_per_pool + vf_q_idx); i++) {
@@ -1140,19 +1148,6 @@
 	fm10k_update_hw_stats_q(hw, q, idx, qpp);
 }
 
-static s32 fm10k_iov_report_timestamp_pf(struct fm10k_hw *hw,
-					 struct fm10k_vf_info *vf_info,
-					 u64 timestamp)
-{
-	u32 msg[4];
-
-	/* generate port state response to notify VF it is not ready */
-	fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_1588);
-	fm10k_tlv_attr_put_u64(msg, FM10K_1588_MSG_TIMESTAMP, timestamp);
-
-	return vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
-}
-
 /**
  *  fm10k_iov_msg_msix_pf - Message handler for MSI-X request from VF
  *  @hw: Pointer to hardware structure
@@ -1223,18 +1218,32 @@
 		if (err)
 			return err;
 
-		/* verify upper 16 bits are zero */
-		if (vid >> 16)
-			return FM10K_ERR_PARAM;
-
 		set = !(vid & FM10K_VLAN_CLEAR);
 		vid &= ~FM10K_VLAN_CLEAR;
 
-		err = fm10k_iov_select_vid(vf_info, (u16)vid);
-		if (err < 0)
-			return err;
+		/* if the length field has been set, this is a multi-bit
+		 * update request. For multi-bit requests, simply disallow
+		 * them when the pf_vid has been set. In this case, the PF
+		 * should have already cleared the VLAN_TABLE, and if we
+		 * allowed them, it could allow a rogue VF to receive traffic
+		 * on a VLAN it was not assigned. In the single-bit case, we
+		 * need to modify requests for VLAN 0 to use the default PF or
+		 * SW vid when assigned.
+		 */
 
-		vid = err;
+		if (vid >> 16) {
+			/* prevent multi-bit requests when PF has
+			 * administratively set the VLAN for this VF
+			 */
+			if (vf_info->pf_vid)
+				return FM10K_ERR_PARAM;
+		} else {
+			err = fm10k_iov_select_vid(vf_info, (u16)vid);
+			if (err < 0)
+				return err;
+
+			vid = err;
+		}
 
 		/* update VSI info for VF in regards to VLAN table */
 		err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
@@ -1370,7 +1379,7 @@
 		mode = fm10k_iov_supported_xcast_mode_pf(vf_info, mode);
 
 		/* if mode is not currently enabled, enable it */
-		if (!(FM10K_VF_FLAG_ENABLED(vf_info) & (1 << mode)))
+		if (!(FM10K_VF_FLAG_ENABLED(vf_info) & BIT(mode)))
 			fm10k_update_xcast_mode_pf(hw, vf_info->glort, mode);
 
 		/* swap mode back to a bit flag */
@@ -1604,7 +1613,7 @@
  *  @hw: pointer to hardware structure
  *  @switch_ready: pointer to boolean value that will record switch state
  *
- *  This funciton will check the DMA_CTRL2 register and mailbox in order
+ *  This function will check the DMA_CTRL2 register and mailbox in order
  *  to determine if the switch is ready for the PF to begin requesting
  *  addresses and mapping traffic to the local interface.
  **/
@@ -1633,6 +1642,8 @@
 
 /* This structure defines the attibutes to be parsed below */
 const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = {
+	FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
+				 sizeof(struct fm10k_swapi_error)),
 	FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP),
 	FM10K_TLV_ATTR_LAST
 };
@@ -1773,89 +1784,6 @@
 	return 0;
 }
 
-const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[] = {
-	FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_TIMESTAMP,
-				 sizeof(struct fm10k_swapi_1588_timestamp)),
-	FM10K_TLV_ATTR_LAST
-};
-
-/* currently there is no shared 1588 timestamp handler */
-
-/**
- *  fm10k_adjust_systime_pf - Adjust systime frequency
- *  @hw: pointer to hardware structure
- *  @ppb: adjustment rate in parts per billion
- *
- *  This function will adjust the SYSTIME_CFG register contained in BAR 4
- *  if this function is supported for BAR 4 access.  The adjustment amount
- *  is based on the parts per billion value provided and adjusted to a
- *  value based on parts per 2^48 clock cycles.
- *
- *  If adjustment is not supported or the requested value is too large
- *  we will return an error.
- **/
-static s32 fm10k_adjust_systime_pf(struct fm10k_hw *hw, s32 ppb)
-{
-	u64 systime_adjust;
-
-	/* if sw_addr is not set we don't have switch register access */
-	if (!hw->sw_addr)
-		return ppb ? FM10K_ERR_PARAM : 0;
-
-	/* we must convert the value from parts per billion to parts per
-	 * 2^48 cycles.  In addition I have opted to only use the 30 most
-	 * significant bits of the adjustment value as the 8 least
-	 * significant bits are located in another register and represent
-	 * a value significantly less than a part per billion, the result
-	 * of dropping the 8 least significant bits is that the adjustment
-	 * value is effectively multiplied by 2^8 when we write it.
-	 *
-	 * As a result of all this the math for this breaks down as follows:
-	 *	ppb / 10^9 == adjust * 2^8 / 2^48
-	 * If we solve this for adjust, and simplify it comes out as:
-	 *	ppb * 2^31 / 5^9 == adjust
-	 */
-	systime_adjust = (ppb < 0) ? -ppb : ppb;
-	systime_adjust <<= 31;
-	do_div(systime_adjust, 1953125);
-
-	/* verify the requested adjustment value is in range */
-	if (systime_adjust > FM10K_SW_SYSTIME_ADJUST_MASK)
-		return FM10K_ERR_PARAM;
-
-	if (ppb > 0)
-		systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE;
-
-	fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_ADJUST, (u32)systime_adjust);
-
-	return 0;
-}
-
-/**
- *  fm10k_read_systime_pf - Reads value of systime registers
- *  @hw: pointer to the hardware structure
- *
- *  Function reads the content of 2 registers, combined to represent a 64 bit
- *  value measured in nanosecods.  In order to guarantee the value is accurate
- *  we check the 32 most significant bits both before and after reading the
- *  32 least significant bits to verify they didn't change as we were reading
- *  the registers.
- **/
-static u64 fm10k_read_systime_pf(struct fm10k_hw *hw)
-{
-	u32 systime_l, systime_h, systime_tmp;
-
-	systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1);
-
-	do {
-		systime_tmp = systime_h;
-		systime_l = fm10k_read_reg(hw, FM10K_SYSTIME);
-		systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1);
-	} while (systime_tmp != systime_h);
-
-	return ((u64)systime_h << 32) | systime_l;
-}
-
 static const struct fm10k_msg_data fm10k_msg_data_pf[] = {
 	FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
 	FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
@@ -1885,8 +1813,6 @@
 	.set_dma_mask		= fm10k_set_dma_mask_pf,
 	.get_fault		= fm10k_get_fault_pf,
 	.get_host_state		= fm10k_get_host_state_pf,
-	.adjust_systime		= fm10k_adjust_systime_pf,
-	.read_systime		= fm10k_read_systime_pf,
 };
 
 static const struct fm10k_iov_ops iov_ops_pf = {
@@ -1898,7 +1824,6 @@
 	.set_lport			= fm10k_iov_set_lport_pf,
 	.reset_lport			= fm10k_iov_reset_lport_pf,
 	.update_stats			= fm10k_iov_update_stats_pf,
-	.report_timestamp		= fm10k_iov_report_timestamp_pf,
 };
 
 static s32 fm10k_get_invariants_pf(struct fm10k_hw *hw)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
index b2d96b4..3336d3c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -42,8 +42,6 @@
 	FM10K_PF_MSG_ID_UPDATE_FLOW		= 0x503,
 	FM10K_PF_MSG_ID_DELETE_FLOW		= 0x504,
 	FM10K_PF_MSG_ID_SET_FLOW_STATE		= 0x505,
-	FM10K_PF_MSG_ID_GET_1588_INFO		= 0x506,
-	FM10K_PF_MSG_ID_1588_TIMESTAMP		= 0x701,
 };
 
 enum fm10k_pf_tlv_attr_id_v1 {
@@ -61,7 +59,6 @@
 	FM10K_PF_ATTR_ID_DELETE_FLOW		= 0x0B,
 	FM10K_PF_ATTR_ID_PORT			= 0x0C,
 	FM10K_PF_ATTR_ID_UPDATE_PVID		= 0x0D,
-	FM10K_PF_ATTR_ID_1588_TIMESTAMP		= 0x10,
 };
 
 #define FM10K_MSG_LPORT_MAP_GLORT_SHIFT	0
@@ -74,6 +71,8 @@
 #define FM10K_MSG_UPDATE_PVID_PVID_SHIFT	16
 #define FM10K_MSG_UPDATE_PVID_PVID_SIZE		16
 
+#define FM10K_MSG_ERR_PEP_NOT_SCHEDULED	280
+
 /* The following data structures are overlayed directly onto TLV mailbox
  * messages, and must not break 4 byte alignment. Ensure the structures line
  * up correctly as per their TLV definition.
@@ -100,13 +99,6 @@
 	struct fm10k_global_table_data	ffu;
 } __aligned(4) __packed;
 
-struct fm10k_swapi_1588_timestamp {
-	__le64 egress;
-	__le64 ingress;
-	__le16 dglort;
-	__le16 sglort;
-} __aligned(4) __packed;
-
 s32 fm10k_msg_lport_map_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
 extern const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[];
 #define FM10K_PF_MSG_LPORT_MAP_HANDLER(func) \
@@ -122,11 +114,6 @@
 #define FM10K_PF_MSG_ERR_HANDLER(msg, func) \
 	FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_##msg, fm10k_err_msg_attr, func)
 
-extern const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[];
-#define FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(func) \
-	FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_1588_TIMESTAMP, \
-			  fm10k_1588_timestamp_msg_attr, func)
-
 s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
 s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **,
 			      struct fm10k_mbx_info *);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
deleted file mode 100644
index b4945e8..0000000
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
+++ /dev/null
@@ -1,462 +0,0 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
-
-#include <linux/ptp_classify.h>
-#include <linux/ptp_clock_kernel.h>
-
-#include "fm10k.h"
-
-#define FM10K_TS_TX_TIMEOUT		(HZ * 15)
-
-void fm10k_systime_to_hwtstamp(struct fm10k_intfc *interface,
-			       struct skb_shared_hwtstamps *hwtstamp,
-			       u64 systime)
-{
-	unsigned long flags;
-
-	read_lock_irqsave(&interface->systime_lock, flags);
-	systime += interface->ptp_adjust;
-	read_unlock_irqrestore(&interface->systime_lock, flags);
-
-	hwtstamp->hwtstamp = ns_to_ktime(systime);
-}
-
-static struct sk_buff *fm10k_ts_tx_skb(struct fm10k_intfc *interface,
-				       __le16 dglort)
-{
-	struct sk_buff_head *list = &interface->ts_tx_skb_queue;
-	struct sk_buff *skb;
-
-	skb_queue_walk(list, skb) {
-		if (FM10K_CB(skb)->fi.w.dglort == dglort)
-			return skb;
-	}
-
-	return NULL;
-}
-
-void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb)
-{
-	struct sk_buff_head *list = &interface->ts_tx_skb_queue;
-	struct sk_buff *clone;
-	unsigned long flags;
-
-	/* create clone for us to return on the Tx path */
-	clone = skb_clone_sk(skb);
-	if (!clone)
-		return;
-
-	FM10K_CB(clone)->ts_tx_timeout = jiffies + FM10K_TS_TX_TIMEOUT;
-	spin_lock_irqsave(&list->lock, flags);
-
-	/* attempt to locate any buffers with the same dglort,
-	 * if none are present then insert skb in tail of list
-	 */
-	skb = fm10k_ts_tx_skb(interface, FM10K_CB(clone)->fi.w.dglort);
-	if (!skb) {
-		skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
-		__skb_queue_tail(list, clone);
-	}
-
-	spin_unlock_irqrestore(&list->lock, flags);
-
-	/* if list is already has one then we just free the clone */
-	if (skb)
-		dev_kfree_skb(clone);
-}
-
-void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort,
-			  u64 systime)
-{
-	struct skb_shared_hwtstamps shhwtstamps;
-	struct sk_buff_head *list = &interface->ts_tx_skb_queue;
-	struct sk_buff *skb;
-	unsigned long flags;
-
-	spin_lock_irqsave(&list->lock, flags);
-
-	/* attempt to locate and pull the sk_buff out of the list */
-	skb = fm10k_ts_tx_skb(interface, dglort);
-	if (skb)
-		__skb_unlink(skb, list);
-
-	spin_unlock_irqrestore(&list->lock, flags);
-
-	/* if not found do nothing */
-	if (!skb)
-		return;
-
-	/* timestamp the sk_buff and free out copy */
-	fm10k_systime_to_hwtstamp(interface, &shhwtstamps, systime);
-	skb_tstamp_tx(skb, &shhwtstamps);
-	dev_kfree_skb_any(skb);
-}
-
-void fm10k_ts_tx_subtask(struct fm10k_intfc *interface)
-{
-	struct sk_buff_head *list = &interface->ts_tx_skb_queue;
-	struct sk_buff *skb, *tmp;
-	unsigned long flags;
-
-	/* If we're down or resetting, just bail */
-	if (test_bit(__FM10K_DOWN, &interface->state) ||
-	    test_bit(__FM10K_RESETTING, &interface->state))
-		return;
-
-	spin_lock_irqsave(&list->lock, flags);
-
-	/* walk though the list and flush any expired timestamp packets */
-	skb_queue_walk_safe(list, skb, tmp) {
-		if (!time_is_after_jiffies(FM10K_CB(skb)->ts_tx_timeout))
-			continue;
-		__skb_unlink(skb, list);
-		kfree_skb(skb);
-		interface->tx_hwtstamp_timeouts++;
-	}
-
-	spin_unlock_irqrestore(&list->lock, flags);
-}
-
-static u64 fm10k_systime_read(struct fm10k_intfc *interface)
-{
-	struct fm10k_hw *hw = &interface->hw;
-
-	return hw->mac.ops.read_systime(hw);
-}
-
-void fm10k_ts_reset(struct fm10k_intfc *interface)
-{
-	s64 ns = ktime_to_ns(ktime_get_real());
-	unsigned long flags;
-
-	/* reinitialize the clock */
-	write_lock_irqsave(&interface->systime_lock, flags);
-	interface->ptp_adjust = fm10k_systime_read(interface) - ns;
-	write_unlock_irqrestore(&interface->systime_lock, flags);
-}
-
-void fm10k_ts_init(struct fm10k_intfc *interface)
-{
-	/* Initialize lock protecting systime access */
-	rwlock_init(&interface->systime_lock);
-
-	/* Initialize skb queue for pending timestamp requests */
-	skb_queue_head_init(&interface->ts_tx_skb_queue);
-
-	/* reset the clock to current kernel time */
-	fm10k_ts_reset(interface);
-}
-
-/**
- * fm10k_get_ts_config - get current hardware timestamping configuration
- * @netdev: network interface device structure
- * @ifreq: ioctl data
- *
- * This function returns the current timestamping settings. Rather than
- * attempt to deconstruct registers to fill in the values, simply keep a copy
- * of the old settings around, and return a copy when requested.
- */
-int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
-{
-	struct fm10k_intfc *interface = netdev_priv(netdev);
-	struct hwtstamp_config *config = &interface->ts_config;
-
-	return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
-		-EFAULT : 0;
-}
-
-/**
- * fm10k_set_ts_config - control hardware time stamping
- * @netdev: network interface device structure
- * @ifreq: ioctl data
- *
- * Outgoing time stamping can be enabled and disabled. Play nice and
- * disable it when requested, although it shouldn't cause any overhead
- * when no packet needs it. At most one packet in the queue may be
- * marked for time stamping, otherwise it would be impossible to tell
- * for sure to which packet the hardware time stamp belongs.
- *
- * Incoming time stamping has to be configured via the hardware
- * filters. Not all combinations are supported, in particular event
- * type has to be specified. Matching the kind of event packet is
- * not supported, with the exception of "all V2 events regardless of
- * level 2 or 4".
- *
- * Since hardware always timestamps Path delay packets when timestamping V2
- * packets, regardless of the type specified in the register, only use V2
- * Event mode. This more accurately tells the user what the hardware is going
- * to do anyways.
- */
-int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
-{
-	struct fm10k_intfc *interface = netdev_priv(netdev);
-	struct hwtstamp_config ts_config;
-
-	if (copy_from_user(&ts_config, ifr->ifr_data, sizeof(ts_config)))
-		return -EFAULT;
-
-	/* reserved for future extensions */
-	if (ts_config.flags)
-		return -EINVAL;
-
-	switch (ts_config.tx_type) {
-	case HWTSTAMP_TX_OFF:
-		break;
-	case HWTSTAMP_TX_ON:
-		/* we likely need some check here to see if this is supported */
-		break;
-	default:
-		return -ERANGE;
-	}
-
-	switch (ts_config.rx_filter) {
-	case HWTSTAMP_FILTER_NONE:
-		interface->flags &= ~FM10K_FLAG_RX_TS_ENABLED;
-		break;
-	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
-	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
-	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
-	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
-	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
-	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
-	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
-	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
-	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
-	case HWTSTAMP_FILTER_PTP_V2_EVENT:
-	case HWTSTAMP_FILTER_PTP_V2_SYNC:
-	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
-	case HWTSTAMP_FILTER_ALL:
-		interface->flags |= FM10K_FLAG_RX_TS_ENABLED;
-		ts_config.rx_filter = HWTSTAMP_FILTER_ALL;
-		break;
-	default:
-		return -ERANGE;
-	}
-
-	/* save these settings for future reference */
-	interface->ts_config = ts_config;
-
-	return copy_to_user(ifr->ifr_data, &ts_config, sizeof(ts_config)) ?
-		-EFAULT : 0;
-}
-
-static int fm10k_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
-{
-	struct fm10k_intfc *interface;
-	struct fm10k_hw *hw;
-	int err;
-
-	interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
-	hw = &interface->hw;
-
-	err = hw->mac.ops.adjust_systime(hw, ppb);
-
-	/* the only error we should see is if the value is out of range */
-	return (err == FM10K_ERR_PARAM) ? -ERANGE : err;
-}
-
-static int fm10k_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
-{
-	struct fm10k_intfc *interface;
-	unsigned long flags;
-
-	interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
-
-	write_lock_irqsave(&interface->systime_lock, flags);
-	interface->ptp_adjust += delta;
-	write_unlock_irqrestore(&interface->systime_lock, flags);
-
-	return 0;
-}
-
-static int fm10k_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
-{
-	struct fm10k_intfc *interface;
-	unsigned long flags;
-	u64 now;
-
-	interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
-
-	read_lock_irqsave(&interface->systime_lock, flags);
-	now = fm10k_systime_read(interface) + interface->ptp_adjust;
-	read_unlock_irqrestore(&interface->systime_lock, flags);
-
-	*ts = ns_to_timespec64(now);
-
-	return 0;
-}
-
-static int fm10k_ptp_settime(struct ptp_clock_info *ptp,
-			     const struct timespec64 *ts)
-{
-	struct fm10k_intfc *interface;
-	unsigned long flags;
-	u64 ns = timespec64_to_ns(ts);
-
-	interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
-
-	write_lock_irqsave(&interface->systime_lock, flags);
-	interface->ptp_adjust = fm10k_systime_read(interface) - ns;
-	write_unlock_irqrestore(&interface->systime_lock, flags);
-
-	return 0;
-}
-
-static int fm10k_ptp_enable(struct ptp_clock_info *ptp,
-			    struct ptp_clock_request *rq,
-			    int __always_unused on)
-{
-	struct ptp_clock_time *t = &rq->perout.period;
-	struct fm10k_intfc *interface;
-	struct fm10k_hw *hw;
-	u64 period;
-	u32 step;
-
-	/* we can only support periodic output */
-	if (rq->type != PTP_CLK_REQ_PEROUT)
-		return -EINVAL;
-
-	/* verify the requested channel is there */
-	if (rq->perout.index >= ptp->n_per_out)
-		return -EINVAL;
-
-	/* we cannot enforce start time as there is no
-	 * mechanism for that in the hardware, we can only control
-	 * the period.
-	 */
-
-	/* we cannot support periods greater than 4 seconds due to reg limit */
-	if (t->sec > 4 || t->sec < 0)
-		return -ERANGE;
-
-	interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
-	hw = &interface->hw;
-
-	/* we simply cannot support the operation if we don't have BAR4 */
-	if (!hw->sw_addr)
-		return -ENOTSUPP;
-
-	/* convert to unsigned 64b ns, verify we can put it in a 32b register */
-	period = t->sec * 1000000000LL + t->nsec;
-
-	/* determine the minimum size for period */
-	step = 2 * (fm10k_read_reg(hw, FM10K_SYSTIME_CFG) &
-		    FM10K_SYSTIME_CFG_STEP_MASK);
-
-	/* verify the value is in range supported by hardware */
-	if ((period && (period < step)) || (period > U32_MAX))
-		return -ERANGE;
-
-	/* notify hardware of request to being sending pulses */
-	fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_PULSE(rq->perout.index),
-			   (u32)period);
-
-	return 0;
-}
-
-static struct ptp_pin_desc fm10k_ptp_pd[2] = {
-	{
-		.name = "IEEE1588_PULSE0",
-		.index = 0,
-		.func = PTP_PF_PEROUT,
-		.chan = 0
-	},
-	{
-		.name = "IEEE1588_PULSE1",
-		.index = 1,
-		.func = PTP_PF_PEROUT,
-		.chan = 1
-	}
-};
-
-static int fm10k_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
-			    enum ptp_pin_function func, unsigned int chan)
-{
-	/* verify the requested pin is there */
-	if (pin >= ptp->n_pins || !ptp->pin_config)
-		return -EINVAL;
-
-	/* enforce locked channels, no changing them */
-	if (chan != ptp->pin_config[pin].chan)
-		return -EINVAL;
-
-	/* we want to keep the functions locked as well */
-	if (func != ptp->pin_config[pin].func)
-		return -EINVAL;
-
-	return 0;
-}
-
-void fm10k_ptp_register(struct fm10k_intfc *interface)
-{
-	struct ptp_clock_info *ptp_caps = &interface->ptp_caps;
-	struct device *dev = &interface->pdev->dev;
-	struct ptp_clock *ptp_clock;
-
-	snprintf(ptp_caps->name, sizeof(ptp_caps->name),
-		 "%s", interface->netdev->name);
-	ptp_caps->owner		= THIS_MODULE;
-	/* This math is simply the inverse of the math in
-	 * fm10k_adjust_systime_pf applied to an adjustment value
-	 * of 2^30 - 1 which is the maximum value of the register:
-	 * 	max_ppb == ((2^30 - 1) * 5^9) / 2^31
-	 */
-	ptp_caps->max_adj	= 976562;
-	ptp_caps->adjfreq	= fm10k_ptp_adjfreq;
-	ptp_caps->adjtime	= fm10k_ptp_adjtime;
-	ptp_caps->gettime64	= fm10k_ptp_gettime;
-	ptp_caps->settime64	= fm10k_ptp_settime;
-
-	/* provide pins if BAR4 is accessible */
-	if (interface->sw_addr) {
-		/* enable periodic outputs */
-		ptp_caps->n_per_out = 2;
-		ptp_caps->enable	= fm10k_ptp_enable;
-
-		/* enable clock pins */
-		ptp_caps->verify	= fm10k_ptp_verify;
-		ptp_caps->n_pins = 2;
-		ptp_caps->pin_config = fm10k_ptp_pd;
-	}
-
-	ptp_clock = ptp_clock_register(ptp_caps, dev);
-	if (IS_ERR(ptp_clock)) {
-		ptp_clock = NULL;
-		dev_err(dev, "ptp_clock_register failed\n");
-	} else {
-		dev_info(dev, "registered PHC device %s\n", ptp_caps->name);
-	}
-
-	interface->ptp_clock = ptp_clock;
-}
-
-void fm10k_ptp_unregister(struct fm10k_intfc *interface)
-{
-	struct ptp_clock *ptp_clock = interface->ptp_clock;
-	struct device *dev = &interface->pdev->dev;
-
-	if (!ptp_clock)
-		return;
-
-	interface->ptp_clock = NULL;
-
-	ptp_clock_unregister(ptp_clock);
-	dev_info(dev, "removed PHC %s\n", interface->ptp_caps.name);
-}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
index ab01bb3..f8e87bf 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -222,7 +222,7 @@
 	attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
 
 	if (len < 4) {
-		attr[1] = (u32)value & ((0x1ul << (8 * len)) - 1);
+		attr[1] = (u32)value & (BIT(8 * len) - 1);
 	} else {
 		attr[1] = (u32)value;
 		if (len > 4)
@@ -481,7 +481,8 @@
  *  up into an array of pointers stored in results.  The function will
  *  return FM10K_ERR_PARAM on any input or message error,
  *  FM10K_NOT_IMPLEMENTED for any attribute that is outside of the array
- *  and 0 on success.
+ *  and 0 on success. Any attributes not found in tlv_attr will be silently
+ *  ignored.
  **/
 static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results,
 				const struct fm10k_tlv_attr *tlv_attr)
@@ -518,14 +519,15 @@
 	while (offset < len) {
 		attr_id = *attr & FM10K_TLV_ID_MASK;
 
-		if (attr_id < FM10K_TLV_RESULTS_MAX)
-			err = fm10k_tlv_attr_validate(attr, tlv_attr);
-		else
-			err = FM10K_NOT_IMPLEMENTED;
+		if (attr_id >= FM10K_TLV_RESULTS_MAX)
+			return FM10K_NOT_IMPLEMENTED;
 
-		if (err < 0)
+		err = fm10k_tlv_attr_validate(attr, tlv_attr);
+		if (err == FM10K_NOT_IMPLEMENTED)
+			; /* silently ignore non-implemented attributes */
+		else if (err)
 			return err;
-		if (!err)
+		else
 			results[attr_id] = attr;
 
 		/* update offset */
@@ -652,29 +654,29 @@
  **/
 static void fm10k_tlv_msg_test_generate_data(u32 *msg, u32 attr_flags)
 {
-	if (attr_flags & (1 << FM10K_TEST_MSG_STRING))
+	if (attr_flags & BIT(FM10K_TEST_MSG_STRING))
 		fm10k_tlv_attr_put_null_string(msg, FM10K_TEST_MSG_STRING,
 					       test_str);
-	if (attr_flags & (1 << FM10K_TEST_MSG_MAC_ADDR))
+	if (attr_flags & BIT(FM10K_TEST_MSG_MAC_ADDR))
 		fm10k_tlv_attr_put_mac_vlan(msg, FM10K_TEST_MSG_MAC_ADDR,
 					    test_mac, test_vlan);
-	if (attr_flags & (1 << FM10K_TEST_MSG_U8))
+	if (attr_flags & BIT(FM10K_TEST_MSG_U8))
 		fm10k_tlv_attr_put_u8(msg, FM10K_TEST_MSG_U8,  test_u8);
-	if (attr_flags & (1 << FM10K_TEST_MSG_U16))
+	if (attr_flags & BIT(FM10K_TEST_MSG_U16))
 		fm10k_tlv_attr_put_u16(msg, FM10K_TEST_MSG_U16, test_u16);
-	if (attr_flags & (1 << FM10K_TEST_MSG_U32))
+	if (attr_flags & BIT(FM10K_TEST_MSG_U32))
 		fm10k_tlv_attr_put_u32(msg, FM10K_TEST_MSG_U32, test_u32);
-	if (attr_flags & (1 << FM10K_TEST_MSG_U64))
+	if (attr_flags & BIT(FM10K_TEST_MSG_U64))
 		fm10k_tlv_attr_put_u64(msg, FM10K_TEST_MSG_U64, test_u64);
-	if (attr_flags & (1 << FM10K_TEST_MSG_S8))
+	if (attr_flags & BIT(FM10K_TEST_MSG_S8))
 		fm10k_tlv_attr_put_s8(msg, FM10K_TEST_MSG_S8,  test_s8);
-	if (attr_flags & (1 << FM10K_TEST_MSG_S16))
+	if (attr_flags & BIT(FM10K_TEST_MSG_S16))
 		fm10k_tlv_attr_put_s16(msg, FM10K_TEST_MSG_S16, test_s16);
-	if (attr_flags & (1 << FM10K_TEST_MSG_S32))
+	if (attr_flags & BIT(FM10K_TEST_MSG_S32))
 		fm10k_tlv_attr_put_s32(msg, FM10K_TEST_MSG_S32, test_s32);
-	if (attr_flags & (1 << FM10K_TEST_MSG_S64))
+	if (attr_flags & BIT(FM10K_TEST_MSG_S64))
 		fm10k_tlv_attr_put_s64(msg, FM10K_TEST_MSG_S64, test_s64);
-	if (attr_flags & (1 << FM10K_TEST_MSG_LE_STRUCT))
+	if (attr_flags & BIT(FM10K_TEST_MSG_LE_STRUCT))
 		fm10k_tlv_attr_put_le_struct(msg, FM10K_TEST_MSG_LE_STRUCT,
 					     test_le, 8);
 }
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
index e1845e0..a1f1027 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 854ebb1..b8bc061 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -225,11 +225,6 @@
 #define FM10K_STATS_LOOPBACK_DROP	0x3806
 #define FM10K_STATS_NODESC_DROP		0x3807
 
-/* Timesync registers */
-#define FM10K_SYSTIME		0x3814
-#define FM10K_SYSTIME_CFG	0x3818
-#define FM10K_SYSTIME_CFG_STEP_MASK		0x0000000F
-
 /* PCIe state registers */
 #define FM10K_PHYADDR		0x381C
 
@@ -355,6 +350,7 @@
 #define FM10K_VLAN_TABLE_VSI_MAX		64
 #define FM10K_VLAN_LENGTH_SHIFT			16
 #define FM10K_VLAN_CLEAR			BIT(15)
+#define FM10K_VLAN_OVERRIDE			FM10K_VLAN_CLEAR
 #define FM10K_VLAN_ALL \
 	((FM10K_VLAN_TABLE_VID_MAX - 1) << FM10K_VLAN_LENGTH_SHIFT)
 
@@ -381,12 +377,6 @@
 #define FM10K_VFSYSTIME		0x00040
 #define FM10K_VFITR(_n)		((_n) + 0x00060)
 
-/* Registers contained in BAR 4 for Switch management */
-#define FM10K_SW_SYSTIME_ADJUST	0x0224D
-#define FM10K_SW_SYSTIME_ADJUST_MASK		0x3FFFFFFF
-#define FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE	0x80000000
-#define FM10K_SW_SYSTIME_PULSE(_n)	((_n) + 0x02252)
-
 enum fm10k_int_source {
 	fm10k_int_mailbox		= 0,
 	fm10k_int_pcie_fault		= 1,
@@ -550,8 +540,6 @@
 				    struct fm10k_dglort_cfg *);
 	void (*set_dma_mask)(struct fm10k_hw *, u64);
 	s32 (*get_fault)(struct fm10k_hw *, int, struct fm10k_fault *);
-	s32 (*adjust_systime)(struct fm10k_hw *, s32 ppb);
-	u64 (*read_systime)(struct fm10k_hw *);
 };
 
 enum fm10k_mac_type {
@@ -617,10 +605,10 @@
 						 */
 };
 
-#define FM10K_VF_FLAG_ALLMULTI_CAPABLE	((u8)1 << FM10K_XCAST_MODE_ALLMULTI)
-#define FM10K_VF_FLAG_MULTI_CAPABLE	((u8)1 << FM10K_XCAST_MODE_MULTI)
-#define FM10K_VF_FLAG_PROMISC_CAPABLE	((u8)1 << FM10K_XCAST_MODE_PROMISC)
-#define FM10K_VF_FLAG_NONE_CAPABLE	((u8)1 << FM10K_XCAST_MODE_NONE)
+#define FM10K_VF_FLAG_ALLMULTI_CAPABLE	(u8)(BIT(FM10K_XCAST_MODE_ALLMULTI))
+#define FM10K_VF_FLAG_MULTI_CAPABLE	(u8)(BIT(FM10K_XCAST_MODE_MULTI))
+#define FM10K_VF_FLAG_PROMISC_CAPABLE	(u8)(BIT(FM10K_XCAST_MODE_PROMISC))
+#define FM10K_VF_FLAG_NONE_CAPABLE	(u8)(BIT(FM10K_XCAST_MODE_NONE))
 #define FM10K_VF_FLAG_CAPABLE(vf_info)	((vf_info)->vf_flags & (u8)0xF)
 #define FM10K_VF_FLAG_ENABLED(vf_info)	((vf_info)->vf_flags >> 4)
 #define FM10K_VF_FLAG_SET_MODE(mode)	((u8)0x10 << (mode))
@@ -643,7 +631,6 @@
 	s32 (*set_lport)(struct fm10k_hw *, struct fm10k_vf_info *, u16, u8);
 	void (*reset_lport)(struct fm10k_hw *, struct fm10k_vf_info *);
 	void (*update_stats)(struct fm10k_hw *, struct fm10k_hw_stats_q *, u16);
-	s32 (*report_timestamp)(struct fm10k_hw *, struct fm10k_vf_info *, u64);
 };
 
 struct fm10k_iov_info {
@@ -667,7 +654,6 @@
 
 struct fm10k_hw {
 	u32 __iomem *hw_addr;
-	u32 __iomem *sw_addr;
 	void *back;
 	struct fm10k_mac_info mac;
 	struct fm10k_bus_info bus;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
index 91f8d73..3b06685e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -188,7 +188,7 @@
 	if (vsi)
 		return FM10K_ERR_PARAM;
 
-	/* verify upper 4 bits of vid and length are 0 */
+	/* clever trick to verify reserved bits in both vid and length */
 	if ((vid << 16 | vid) >> 28)
 		return FM10K_ERR_PARAM;
 
@@ -228,7 +228,7 @@
 
 	ether_addr_copy(hw->mac.perm_addr, perm_addr);
 	hw->mac.default_vid = vid & (FM10K_VLAN_TABLE_VID_MAX - 1);
-	hw->mac.vlan_override = !!(vid & FM10K_VLAN_CLEAR);
+	hw->mac.vlan_override = !!(vid & FM10K_VLAN_OVERRIDE);
 
 	return 0;
 }
@@ -451,13 +451,6 @@
 	return mbx->ops.enqueue_tx(hw, mbx, msg);
 }
 
-const struct fm10k_tlv_attr fm10k_1588_msg_attr[] = {
-	FM10K_TLV_ATTR_U64(FM10K_1588_MSG_TIMESTAMP),
-	FM10K_TLV_ATTR_LAST
-};
-
-/* currently there is no shared 1588 timestamp handler */
-
 /**
  *  fm10k_update_hw_stats_vf - Updates hardware related statistics of VF
  *  @hw: pointer to hardware structure
@@ -509,52 +502,6 @@
 	return 0;
 }
 
-/**
- *  fm10k_adjust_systime_vf - Adjust systime frequency
- *  @hw: pointer to hardware structure
- *  @ppb: adjustment rate in parts per billion
- *
- *  This function takes an adjustment rate in parts per billion and will
- *  verify that this value is 0 as the VF cannot support adjusting the
- *  systime clock.
- *
- *  If the ppb value is non-zero the return is ERR_PARAM else success
- **/
-static s32 fm10k_adjust_systime_vf(struct fm10k_hw *hw, s32 ppb)
-{
-	/* The VF cannot adjust the clock frequency, however it should
-	 * already have a syntonic clock with whichever host interface is
-	 * running as the master for the host interface clock domain so
-	 * there should be not frequency adjustment necessary.
-	 */
-	return ppb ? FM10K_ERR_PARAM : 0;
-}
-
-/**
- *  fm10k_read_systime_vf - Reads value of systime registers
- *  @hw: pointer to the hardware structure
- *
- *  Function reads the content of 2 registers, combined to represent a 64 bit
- *  value measured in nanoseconds.  In order to guarantee the value is accurate
- *  we check the 32 most significant bits both before and after reading the
- *  32 least significant bits to verify they didn't change as we were reading
- *  the registers.
- **/
-static u64 fm10k_read_systime_vf(struct fm10k_hw *hw)
-{
-	u32 systime_l, systime_h, systime_tmp;
-
-	systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1);
-
-	do {
-		systime_tmp = systime_h;
-		systime_l = fm10k_read_reg(hw, FM10K_VFSYSTIME);
-		systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1);
-	} while (systime_tmp != systime_h);
-
-	return ((u64)systime_h << 32) | systime_l;
-}
-
 static const struct fm10k_msg_data fm10k_msg_data_vf[] = {
 	FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
 	FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
@@ -579,8 +526,6 @@
 	.rebind_hw_stats	= fm10k_rebind_hw_stats_vf,
 	.configure_dglort_map	= fm10k_configure_dglort_map_vf,
 	.get_host_state		= fm10k_get_host_state_generic,
-	.adjust_systime		= fm10k_adjust_systime_vf,
-	.read_systime		= fm10k_read_systime_vf,
 };
 
 static s32 fm10k_get_invariants_vf(struct fm10k_hw *hw)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
index c4439f1..2662f33 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -29,7 +29,6 @@
 	FM10K_VF_MSG_ID_MSIX,
 	FM10K_VF_MSG_ID_MAC_VLAN,
 	FM10K_VF_MSG_ID_LPORT_STATE,
-	FM10K_VF_MSG_ID_1588,
 	FM10K_VF_MSG_ID_MAX,
 };
 
@@ -49,11 +48,6 @@
 	FM10K_LPORT_STATE_MSG_MAX
 };
 
-enum fm10k_tlv_1588_attr_id {
-	FM10K_1588_MSG_TIMESTAMP,
-	FM10K_1588_MSG_MAX
-};
-
 #define FM10K_VF_MSG_MSIX_HANDLER(func) \
 	 FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_MSIX, NULL, func)
 
@@ -70,9 +64,5 @@
 	FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_LPORT_STATE, \
 			  fm10k_lport_state_msg_attr, func)
 
-extern const struct fm10k_tlv_attr fm10k_1588_msg_attr[];
-#define FM10K_VF_MSG_1588_HANDLER(func) \
-	FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_1588, fm10k_1588_msg_attr, func)
-
 extern const struct fm10k_info fm10k_vf_info;
 #endif /* _FM10K_VF_H */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 1ce6e9c..9c44739 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -97,12 +97,12 @@
 #define I40E_INT_NAME_STR_LEN        (IFNAMSIZ + 16)
 
 /* Ethtool Private Flags */
-#define I40E_PRIV_FLAGS_NPAR_FLAG	BIT(0)
-#define I40E_PRIV_FLAGS_LINKPOLL_FLAG	BIT(1)
-#define I40E_PRIV_FLAGS_FD_ATR		BIT(2)
-#define I40E_PRIV_FLAGS_VEB_STATS	BIT(3)
-#define I40E_PRIV_FLAGS_PS		BIT(4)
-#define I40E_PRIV_FLAGS_HW_ATR_EVICT	BIT(5)
+#define	I40E_PRIV_FLAGS_MFP_FLAG		BIT(0)
+#define	I40E_PRIV_FLAGS_LINKPOLL_FLAG		BIT(1)
+#define I40E_PRIV_FLAGS_FD_ATR			BIT(2)
+#define I40E_PRIV_FLAGS_VEB_STATS		BIT(3)
+#define I40E_PRIV_FLAGS_HW_ATR_EVICT		BIT(4)
+#define I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT	BIT(5)
 
 #define I40E_NVM_VERSION_LO_SHIFT  0
 #define I40E_NVM_VERSION_LO_MASK   (0xff << I40E_NVM_VERSION_LO_SHIFT)
@@ -112,7 +112,9 @@
 #define I40E_OEM_VER_PATCH_MASK    0xff
 #define I40E_OEM_VER_BUILD_SHIFT   8
 #define I40E_OEM_VER_SHIFT         24
-#define I40E_PHY_DEBUG_PORT        BIT(4)
+#define I40E_PHY_DEBUG_ALL \
+	(I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW | \
+	I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW)
 
 /* The values in here are decimal coded as hex as is the case in the NVM map*/
 #define I40E_CURRENT_NVM_VERSION_HI 0x2
@@ -123,10 +125,7 @@
 #define XSTRINGIFY(bar) STRINGIFY(bar)
 
 #define I40E_RX_DESC(R, i)			\
-	((ring_is_16byte_desc_enabled(R))	\
-		? (union i40e_32byte_rx_desc *)	\
-			(&(((union i40e_16byte_rx_desc *)((R)->desc))[i])) \
-		: (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])))
+	(&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
 #define I40E_TX_DESC(R, i)			\
 	(&(((struct i40e_tx_desc *)((R)->desc))[i]))
 #define I40E_TX_CTXTDESC(R, i)			\
@@ -202,6 +201,7 @@
 
 #define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4)
 #define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4)
+#define I40E_VF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4)
 
 enum i40e_fd_stat_idx {
 	I40E_FD_STAT_ATR,
@@ -244,7 +244,6 @@
 #define I40E_DCB_PRIO_TYPE_STRICT	0
 #define I40E_DCB_PRIO_TYPE_ETS		1
 #define I40E_DCB_STRICT_PRIO_CREDITS	127
-#define I40E_MAX_USER_PRIORITY	8
 /* DCB per TC information data structure */
 struct i40e_tc_info {
 	u16	qoffset;	/* Queue offset from base queue */
@@ -320,8 +319,6 @@
 #define I40E_FLAG_RX_CSUM_ENABLED		BIT_ULL(1)
 #define I40E_FLAG_MSI_ENABLED			BIT_ULL(2)
 #define I40E_FLAG_MSIX_ENABLED			BIT_ULL(3)
-#define I40E_FLAG_RX_1BUF_ENABLED		BIT_ULL(4)
-#define I40E_FLAG_RX_PS_ENABLED			BIT_ULL(5)
 #define I40E_FLAG_RSS_ENABLED			BIT_ULL(6)
 #define I40E_FLAG_VMDQ_ENABLED			BIT_ULL(7)
 #define I40E_FLAG_FDIR_REQUIRES_REINIT		BIT_ULL(8)
@@ -330,7 +327,6 @@
 #ifdef I40E_FCOE
 #define I40E_FLAG_FCOE_ENABLED			BIT_ULL(11)
 #endif /* I40E_FCOE */
-#define I40E_FLAG_16BYTE_RX_DESC_ENABLED	BIT_ULL(13)
 #define I40E_FLAG_CLEAN_ADMINQ			BIT_ULL(14)
 #define I40E_FLAG_FILTER_SYNC			BIT_ULL(15)
 #define I40E_FLAG_SERVICE_CLIENT_REQUESTED	BIT_ULL(16)
@@ -363,6 +359,7 @@
 #define I40E_FLAG_STOP_FW_LLDP			BIT_ULL(47)
 #define I40E_FLAG_HAVE_10GBASET_PHY		BIT_ULL(48)
 #define I40E_FLAG_PF_MAC			BIT_ULL(50)
+#define I40E_FLAG_TRUE_PROMISC_SUPPORT		BIT_ULL(51)
 
 	/* tracks features that get auto disabled by errors */
 	u64 auto_disable_flags;
@@ -534,9 +531,7 @@
 	u8  *rss_lut_user;  /* User configured lookup table entries */
 
 	u16 max_frame;
-	u16 rx_hdr_len;
 	u16 rx_buf_len;
-	u8  dtype;
 
 	/* List of q_vectors allocated to this VSI */
 	struct i40e_q_vector **q_vectors;
@@ -554,7 +549,7 @@
 	u16 num_queue_pairs; /* Used tx and rx pairs */
 	u16 num_desc;
 	enum i40e_vsi_type type;  /* VSI type, e.g., LAN, FCoE, etc */
-	u16 vf_id;		/* Virtual function ID for SRIOV VSIs */
+	s16 vf_id;		/* Virtual function ID for SRIOV VSIs */
 
 	struct i40e_tc_configuration tc_config;
 	struct i40e_aqc_vsi_properties_data info;
@@ -811,6 +806,7 @@
 			  __always_unused __be16 proto, u16 vid);
 #endif
 int i40e_open(struct net_device *netdev);
+int i40e_close(struct net_device *netdev);
 int i40e_vsi_open(struct i40e_vsi *vsi);
 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
@@ -823,7 +819,6 @@
 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
 				      bool is_vf, bool is_netdev);
 #ifdef I40E_FCOE
-int i40e_close(struct net_device *netdev);
 int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
 		    struct tc_to_netdev *tc);
 void i40e_netpoll(struct net_device *netdev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index df8e2fd..738b42a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -33,16 +33,6 @@
 static void i40e_resume_aq(struct i40e_hw *hw);
 
 /**
- * i40e_is_nvm_update_op - return true if this is an NVM update operation
- * @desc: API request descriptor
- **/
-static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
-{
-	return (desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_erase)) ||
-		(desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_update));
-}
-
-/**
  *  i40e_adminq_init_regs - Initialize AdminQ registers
  *  @hw: pointer to the hardware structure
  *
@@ -624,13 +614,9 @@
 
 	/* pre-emptive resource lock release */
 	i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
-	hw->aq.nvm_release_on_done = false;
+	hw->nvm_release_on_done = false;
 	hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
 
-	ret_code = i40e_aq_set_hmc_resource_profile(hw,
-						    I40E_HMC_PROFILE_DEFAULT,
-						    0,
-						    NULL);
 	ret_code = 0;
 
 	/* success! */
@@ -1023,26 +1009,7 @@
 	hw->aq.arq.next_to_clean = ntc;
 	hw->aq.arq.next_to_use = ntu;
 
-	if (i40e_is_nvm_update_op(&e->desc)) {
-		if (hw->aq.nvm_release_on_done) {
-			i40e_release_nvm(hw);
-			hw->aq.nvm_release_on_done = false;
-		}
-
-		switch (hw->nvmupd_state) {
-		case I40E_NVMUPD_STATE_INIT_WAIT:
-			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
-			break;
-
-		case I40E_NVMUPD_STATE_WRITE_WAIT:
-			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
-			break;
-
-		default:
-			break;
-		}
-	}
-
+	i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode));
 clean_arq_element_out:
 	/* Set pending if needed, unlock and return */
 	if (pending)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 12fbbdd..d92aad3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -97,7 +97,6 @@
 	u32 fw_build;                   /* firmware build number */
 	u16 api_maj_ver;                /* api major version */
 	u16 api_min_ver;                /* api minor version */
-	bool nvm_release_on_done;
 
 	struct mutex asq_mutex; /* Send queue lock */
 	struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 8d5c65a..11cf1a5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -78,17 +78,17 @@
 #define I40E_AQ_FLAG_EI_SHIFT	14
 #define I40E_AQ_FLAG_FE_SHIFT	15
 
-#define I40E_AQ_FLAG_DD		(1 << I40E_AQ_FLAG_DD_SHIFT)  /* 0x1    */
-#define I40E_AQ_FLAG_CMP	(1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2    */
-#define I40E_AQ_FLAG_ERR	(1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4    */
-#define I40E_AQ_FLAG_VFE	(1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8    */
-#define I40E_AQ_FLAG_LB		(1 << I40E_AQ_FLAG_LB_SHIFT)  /* 0x200  */
-#define I40E_AQ_FLAG_RD		(1 << I40E_AQ_FLAG_RD_SHIFT)  /* 0x400  */
-#define I40E_AQ_FLAG_VFC	(1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800  */
-#define I40E_AQ_FLAG_BUF	(1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI		(1 << I40E_AQ_FLAG_SI_SHIFT)  /* 0x2000 */
-#define I40E_AQ_FLAG_EI		(1 << I40E_AQ_FLAG_EI_SHIFT)  /* 0x4000 */
-#define I40E_AQ_FLAG_FE		(1 << I40E_AQ_FLAG_FE_SHIFT)  /* 0x8000 */
+#define I40E_AQ_FLAG_DD		BIT(I40E_AQ_FLAG_DD_SHIFT)  /* 0x1    */
+#define I40E_AQ_FLAG_CMP	BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2    */
+#define I40E_AQ_FLAG_ERR	BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4    */
+#define I40E_AQ_FLAG_VFE	BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8    */
+#define I40E_AQ_FLAG_LB		BIT(I40E_AQ_FLAG_LB_SHIFT)  /* 0x200  */
+#define I40E_AQ_FLAG_RD		BIT(I40E_AQ_FLAG_RD_SHIFT)  /* 0x400  */
+#define I40E_AQ_FLAG_VFC	BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800  */
+#define I40E_AQ_FLAG_BUF	BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI		BIT(I40E_AQ_FLAG_SI_SHIFT)  /* 0x2000 */
+#define I40E_AQ_FLAG_EI		BIT(I40E_AQ_FLAG_EI_SHIFT)  /* 0x4000 */
+#define I40E_AQ_FLAG_FE		BIT(I40E_AQ_FLAG_FE_SHIFT)  /* 0x8000 */
 
 /* error codes */
 enum i40e_admin_queue_err {
@@ -205,10 +205,6 @@
 	i40e_aqc_opc_resume_port_tx				= 0x041C,
 	i40e_aqc_opc_configure_partition_bw			= 0x041D,
 
-	/* hmc */
-	i40e_aqc_opc_query_hmc_resource_profile	= 0x0500,
-	i40e_aqc_opc_set_hmc_resource_profile	= 0x0501,
-
 	/* phy commands*/
 	i40e_aqc_opc_get_phy_abilities		= 0x0600,
 	i40e_aqc_opc_set_phy_config		= 0x0601,
@@ -429,6 +425,7 @@
 #define I40E_AQ_CAP_ID_SDP		0x0062
 #define I40E_AQ_CAP_ID_MDIO		0x0063
 #define I40E_AQ_CAP_ID_WSR_PROT		0x0064
+#define I40E_AQ_CAP_ID_NVM_MGMT		0x0080
 #define I40E_AQ_CAP_ID_FLEX10		0x00F1
 #define I40E_AQ_CAP_ID_CEM		0x00F2
 
@@ -1585,27 +1582,6 @@
 
 I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
 
-/* Get and set the active HMC resource profile and status.
- * (direct 0x0500) and (direct 0x0501)
- */
-struct i40e_aq_get_set_hmc_resource_profile {
-	u8	pm_profile;
-	u8	pe_vf_enabled;
-	u8	reserved[14];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
-
-enum i40e_aq_hmc_profile {
-	/* I40E_HMC_PROFILE_NO_CHANGE    = 0, reserved */
-	I40E_HMC_PROFILE_DEFAULT	= 1,
-	I40E_HMC_PROFILE_FAVOR_VF	= 2,
-	I40E_HMC_PROFILE_EQUAL		= 3,
-};
-
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK	0xF
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK	0x3F
-
 /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
 
 /* set in param0 for get phy abilities to report qualified modules */
@@ -1652,11 +1628,11 @@
 
 enum i40e_aq_link_speed {
 	I40E_LINK_SPEED_UNKNOWN	= 0,
-	I40E_LINK_SPEED_100MB	= (1 << I40E_LINK_SPEED_100MB_SHIFT),
-	I40E_LINK_SPEED_1GB	= (1 << I40E_LINK_SPEED_1000MB_SHIFT),
-	I40E_LINK_SPEED_10GB	= (1 << I40E_LINK_SPEED_10GB_SHIFT),
-	I40E_LINK_SPEED_40GB	= (1 << I40E_LINK_SPEED_40GB_SHIFT),
-	I40E_LINK_SPEED_20GB	= (1 << I40E_LINK_SPEED_20GB_SHIFT)
+	I40E_LINK_SPEED_100MB	= BIT(I40E_LINK_SPEED_100MB_SHIFT),
+	I40E_LINK_SPEED_1GB	= BIT(I40E_LINK_SPEED_1000MB_SHIFT),
+	I40E_LINK_SPEED_10GB	= BIT(I40E_LINK_SPEED_10GB_SHIFT),
+	I40E_LINK_SPEED_40GB	= BIT(I40E_LINK_SPEED_40GB_SHIFT),
+	I40E_LINK_SPEED_20GB	= BIT(I40E_LINK_SPEED_20GB_SHIFT)
 };
 
 struct i40e_aqc_module_desc {
@@ -1857,7 +1833,10 @@
 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE	0x00
 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD	0x01
 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT	0x02
+/* Disable link manageability on a single port */
 #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW	0x10
+/* Disable link manageability on all ports */
+#define I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW	0x20
 	u8	reserved[15];
 };
 
@@ -1927,9 +1906,9 @@
 /* Used for 0x0704 as well as for 0x0705 commands */
 #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT		1
 #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
-				(1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+				BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
 #define I40E_AQ_ANVM_FEATURE		0
-#define I40E_AQ_ANVM_IMMEDIATE_FIELD	(1 << FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD	BIT(FEATURE_OR_IMMEDIATE_SHIFT)
 struct i40e_aqc_nvm_config_data_feature {
 	__le16 feature_id;
 #define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY		0x01
@@ -2226,13 +2205,11 @@
  */
 struct i40e_aqc_lldp_set_local_mib {
 #define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT	0
-#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK	(1 << SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
-#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK	(1 << \
-					SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK	BIT(SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
 #define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB	0x0
 #define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT	(1)
-#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK	(1 << \
-				SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK \
+			BIT(SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
 #define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS		0x1
 	u8	type;
 	u8	reserved0;
@@ -2250,7 +2227,7 @@
 struct i40e_aqc_lldp_stop_start_specific_agent {
 #define I40E_AQC_START_SPECIFIC_AGENT_SHIFT	0
 #define I40E_AQC_START_SPECIFIC_AGENT_MASK \
-				(1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
+				BIT(I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
 	u8	command;
 	u8	reserved[15];
 };
@@ -2303,7 +2280,7 @@
 I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
 
 struct i40e_aqc_get_set_rss_key {
-#define I40E_AQC_SET_RSS_KEY_VSI_VALID		(0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID		BIT(15)
 #define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT	0
 #define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK	(0x3FF << \
 					I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
@@ -2323,14 +2300,13 @@
 I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
 
 struct  i40e_aqc_get_set_rss_lut {
-#define I40E_AQC_SET_RSS_LUT_VSI_VALID		(0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID		BIT(15)
 #define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT	0
 #define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK	(0x3FF << \
 					I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
 	__le16	vsi_id;
 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT	0
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK	(0x1 << \
-					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK	BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
 
 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI	0
 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF	1
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
index bf6b453..a4601d9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.h
@@ -217,7 +217,7 @@
 #define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE	BIT(0)
 #define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS	BIT(2)
 	enum i40e_client_type type;
-	struct i40e_client_ops *ops;	/* client ops provided by the client */
+	const struct i40e_client_ops *ops; /* client ops provided by the client */
 };
 
 static inline bool i40e_client_is_registered(struct i40e_client *client)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 4596294..422b41d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -60,6 +60,8 @@
 		case I40E_DEV_ID_SFP_X722:
 		case I40E_DEV_ID_1G_BASE_T_X722:
 		case I40E_DEV_ID_10G_BASE_T_X722:
+		case I40E_DEV_ID_SFP_I_X722:
+		case I40E_DEV_ID_QSFP_I_X722:
 			hw->mac.type = I40E_MAC_X722;
 			break;
 		default:
@@ -694,7 +696,7 @@
 	/* Non Tunneled IPv6 */
 	I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
 	I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
-	I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP,  PAY3),
+	I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP,  PAY4),
 	I40E_PTT_UNUSED_ENTRY(91),
 	I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP,  PAY4),
 	I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
@@ -1901,13 +1903,13 @@
  *
  * Reset the external PHY.
  **/
-enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
-					struct i40e_asq_cmd_details *cmd_details)
+i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+				  struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
 	struct i40e_aqc_set_phy_debug *cmd =
 		(struct i40e_aqc_set_phy_debug *)&desc.params.raw;
-	enum i40e_status_code status;
+	i40e_status status;
 
 	i40e_fill_default_direct_cmd_desc(&desc,
 					  i40e_aqc_opc_set_phy_debug);
@@ -1970,10 +1972,12 @@
  * @seid: vsi number
  * @set: set unicast promiscuous enable/disable
  * @cmd_details: pointer to command details structure or NULL
+ * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
  **/
 i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
 				u16 seid, bool set,
-				struct i40e_asq_cmd_details *cmd_details)
+				struct i40e_asq_cmd_details *cmd_details,
+				bool rx_only_promisc)
 {
 	struct i40e_aq_desc desc;
 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
@@ -1986,8 +1990,9 @@
 
 	if (set) {
 		flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
-		if (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
-		    (hw->aq.api_maj_ver > 1))
+		if (rx_only_promisc &&
+		    (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
+		     (hw->aq.api_maj_ver > 1)))
 			flags |= I40E_AQC_SET_VSI_PROMISC_TX;
 	}
 
@@ -2037,6 +2042,76 @@
 }
 
 /**
+ * i40e_aq_set_vsi_mc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+							 u16 seid, bool enable,
+							 u16 vid,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+	enum i40e_status_code status;
+	u16 flags = 0;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+	if (enable)
+		flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+	cmd->promiscuous_flags = cpu_to_le16(flags);
+	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+	cmd->seid = cpu_to_le16(seid);
+	cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_set_vsi_uc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+							 u16 seid, bool enable,
+							 u16 vid,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+	enum i40e_status_code status;
+	u16 flags = 0;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+	if (enable)
+		flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+
+	cmd->promiscuous_flags = cpu_to_le16(flags);
+	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+	cmd->seid = cpu_to_le16(seid);
+	cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
  * i40e_aq_set_vsi_broadcast
  * @hw: pointer to the hw struct
  * @seid: vsi number
@@ -2157,6 +2232,9 @@
 	struct i40e_aq_desc desc;
 	struct i40e_aqc_add_get_update_vsi *cmd =
 		(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+	struct i40e_aqc_add_get_update_vsi_completion *resp =
+		(struct i40e_aqc_add_get_update_vsi_completion *)
+		&desc.params.raw;
 	i40e_status status;
 
 	i40e_fill_default_direct_cmd_desc(&desc,
@@ -2168,6 +2246,9 @@
 	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
 				    sizeof(vsi_ctx->info), cmd_details);
 
+	vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
+	vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
+
 	return status;
 }
 
@@ -2205,6 +2286,35 @@
 }
 
 /**
+ * i40e_aq_set_switch_config
+ * @hw: pointer to the hardware structure
+ * @flags: bit flag values to set
+ * @valid_flags: which bit flags to set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set switch configuration bits
+ **/
+enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
+						u16 flags,
+						u16 valid_flags,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_set_switch_config *scfg =
+		(struct i40e_aqc_set_switch_config *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_set_switch_config);
+	scfg->flags = cpu_to_le16(flags);
+	scfg->valid_flags = cpu_to_le16(valid_flags);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
  * i40e_aq_get_firmware_version
  * @hw: pointer to the hw struct
  * @fw_major_version: firmware major version
@@ -2660,10 +2770,7 @@
 			u16 *rules_used, u16 *rules_free)
 {
 	/* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
-	if (rule_type != I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
-		if (!rule_id)
-			return I40E_ERR_PARAM;
-	} else {
+	if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
 		/* count and mr_list shall be valid for rule_type INGRESS VLAN
 		 * mirroring. For other rule_type, count and rule_type should
 		 * not matter.
@@ -2780,36 +2887,6 @@
 }
 
 /**
- * i40e_aq_set_hmc_resource_profile
- * @hw: pointer to the hw struct
- * @profile: type of profile the HMC is to be set as
- * @pe_vf_enabled_count: the number of PE enabled VFs the system has
- * @cmd_details: pointer to command details structure or NULL
- *
- * set the HMC profile of the device.
- **/
-i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
-				enum i40e_aq_hmc_profile profile,
-				u8 pe_vf_enabled_count,
-				struct i40e_asq_cmd_details *cmd_details)
-{
-	struct i40e_aq_desc desc;
-	struct i40e_aq_get_set_hmc_resource_profile *cmd =
-		(struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
-	i40e_status status;
-
-	i40e_fill_default_direct_cmd_desc(&desc,
-					i40e_aqc_opc_set_hmc_resource_profile);
-
-	cmd->pm_profile = (u8)profile;
-	cmd->pe_vf_enabled = pe_vf_enabled_count;
-
-	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-	return status;
-}
-
-/**
  * i40e_aq_request_resource
  * @hw: pointer to the hw struct
  * @resource: resource id
@@ -3073,6 +3150,9 @@
 			break;
 		case I40E_AQ_CAP_ID_MSIX:
 			p->num_msix_vectors = number;
+			i40e_debug(hw, I40E_DEBUG_INIT,
+				   "HW Capability: MSIX vector count = %d\n",
+				   p->num_msix_vectors);
 			break;
 		case I40E_AQ_CAP_ID_VF_MSIX:
 			p->num_msix_vectors_vf = number;
@@ -3128,6 +3208,12 @@
 			p->wr_csr_prot = (u64)number;
 			p->wr_csr_prot |= (u64)logical_id << 32;
 			break;
+		case I40E_AQ_CAP_ID_NVM_MGMT:
+			if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
+				p->sec_rev_disabled = true;
+			if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
+				p->update_disabled = true;
+			break;
 		default:
 			break;
 		}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 0c97733..e6af8c8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -147,9 +147,8 @@
 		dev_info(&pf->pdev->dev, "        vlan_features = 0x%08lx\n",
 			 (unsigned long int)nd->vlan_features);
 	}
-	if (vsi->active_vlans)
-		dev_info(&pf->pdev->dev,
-			 "    vlgrp: & = %p\n", vsi->active_vlans);
+	dev_info(&pf->pdev->dev,
+		 "    vlgrp: & = %p\n", vsi->active_vlans);
 	dev_info(&pf->pdev->dev,
 		 "    state = %li flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
 		 vsi->state, vsi->flags,
@@ -269,13 +268,11 @@
 			 rx_ring->queue_index,
 			 rx_ring->reg_idx);
 		dev_info(&pf->pdev->dev,
-			 "    rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
-			 i, rx_ring->rx_hdr_len,
-			 rx_ring->rx_buf_len,
-			 rx_ring->dtype);
+			 "    rx_rings[%i]: rx_buf_len = %d\n",
+			 i, rx_ring->rx_buf_len);
 		dev_info(&pf->pdev->dev,
-			 "    rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
-			 i, ring_is_ps_enabled(rx_ring),
+			 "    rx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+			 i,
 			 rx_ring->next_to_use,
 			 rx_ring->next_to_clean,
 			 rx_ring->ring_active);
@@ -327,9 +324,6 @@
 			 tx_ring->queue_index,
 			 tx_ring->reg_idx);
 		dev_info(&pf->pdev->dev,
-			 "    tx_rings[%i]: dtype = %d\n",
-			 i, tx_ring->dtype);
-		dev_info(&pf->pdev->dev,
 			 "    tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
 			 i,
 			 tx_ring->next_to_use,
@@ -366,8 +360,8 @@
 		 "    work_limit = %d\n",
 		 vsi->work_limit);
 	dev_info(&pf->pdev->dev,
-		 "    max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",
-		 vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype);
+		 "    max_frame = %d, rx_buf_len = %d dtype = %d\n",
+		 vsi->max_frame, vsi->rx_buf_len, 0);
 	dev_info(&pf->pdev->dev,
 		 "    num_q_vectors = %i, base_vector = %i\n",
 		 vsi->num_q_vectors, vsi->base_vector);
@@ -592,13 +586,6 @@
 					 "   d[%03x] = 0x%016llx 0x%016llx\n",
 					 i, txd->buffer_addr,
 					 txd->cmd_type_offset_bsz);
-			} else if (sizeof(union i40e_rx_desc) ==
-				   sizeof(union i40e_16byte_rx_desc)) {
-				rxd = I40E_RX_DESC(ring, i);
-				dev_info(&pf->pdev->dev,
-					 "   d[%03x] = 0x%016llx 0x%016llx\n",
-					 i, rxd->read.pkt_addr,
-					 rxd->read.hdr_addr);
 			} else {
 				rxd = I40E_RX_DESC(ring, i);
 				dev_info(&pf->pdev->dev,
@@ -620,13 +607,6 @@
 				 "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
 				 vsi_seid, ring_id, desc_n,
 				 txd->buffer_addr, txd->cmd_type_offset_bsz);
-		} else if (sizeof(union i40e_rx_desc) ==
-			   sizeof(union i40e_16byte_rx_desc)) {
-			rxd = I40E_RX_DESC(ring, desc_n);
-			dev_info(&pf->pdev->dev,
-				 "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
-				 vsi_seid, ring_id, desc_n,
-				 rxd->read.pkt_addr, rxd->read.hdr_addr);
 		} else {
 			rxd = I40E_RX_DESC(ring, desc_n);
 			dev_info(&pf->pdev->dev,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index 99257fc..d701861 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -44,6 +44,8 @@
 #define I40E_DEV_ID_SFP_X722		0x37D0
 #define I40E_DEV_ID_1G_BASE_T_X722	0x37D1
 #define I40E_DEV_ID_10G_BASE_T_X722	0x37D2
+#define I40E_DEV_ID_SFP_I_X722		0x37D3
+#define I40E_DEV_ID_QSFP_I_X722		0x37D4
 
 #define i40e_is_40G_device(d)		((d) == I40E_DEV_ID_QSFP_A  || \
 					 (d) == I40E_DEV_ID_QSFP_B  || \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 784b165..5e8d84f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -230,12 +230,22 @@
 
 #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
 
+static const char i40e_priv_flags_strings_gl[][ETH_GSTRING_LEN] = {
+	"MFP",
+	"LinkPolling",
+	"flow-director-atr",
+	"veb-stats",
+	"hw-atr-eviction",
+	"vf-true-promisc-support",
+};
+
+#define I40E_PRIV_FLAGS_GL_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings_gl)
+
 static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
 	"NPAR",
 	"LinkPolling",
 	"flow-director-atr",
 	"veb-stats",
-	"packet-split",
 	"hw-atr-eviction",
 };
 
@@ -252,6 +262,110 @@
 }
 
 /**
+ * i40e_phy_type_to_ethtool - convert the phy_types to ethtool link modes
+ * @phy_types: PHY types to convert
+ * @supported: pointer to the ethtool supported variable to fill in
+ * @advertising: pointer to the ethtool advertising variable to fill in
+ *
+ **/
+static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
+				     u32 *advertising)
+{
+	enum i40e_aq_capabilities_phy_type phy_types = pf->hw.phy.phy_types;
+
+	*supported = 0x0;
+	*advertising = 0x0;
+
+	if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
+		*supported |= SUPPORTED_Autoneg |
+			      SUPPORTED_1000baseT_Full;
+		*advertising |= ADVERTISED_Autoneg |
+				ADVERTISED_1000baseT_Full;
+		if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
+			*supported |= SUPPORTED_100baseT_Full;
+			*advertising |= ADVERTISED_100baseT_Full;
+		}
+	}
+	if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
+	    phy_types & I40E_CAP_PHY_TYPE_XFI ||
+	    phy_types & I40E_CAP_PHY_TYPE_SFI ||
+	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU ||
+	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC)
+		*supported |= SUPPORTED_10000baseT_Full;
+	if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
+	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
+	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
+	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
+	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
+		*supported |= SUPPORTED_Autoneg |
+			      SUPPORTED_10000baseT_Full;
+		*advertising |= ADVERTISED_Autoneg |
+				ADVERTISED_10000baseT_Full;
+	}
+	if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
+	    phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
+	    phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
+		*supported |= SUPPORTED_40000baseCR4_Full;
+	if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
+	    phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
+		*supported |= SUPPORTED_Autoneg |
+			      SUPPORTED_40000baseCR4_Full;
+		*advertising |= ADVERTISED_Autoneg |
+				ADVERTISED_40000baseCR4_Full;
+	}
+	if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) &&
+	    !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) {
+		*supported |= SUPPORTED_Autoneg |
+			      SUPPORTED_100baseT_Full;
+		*advertising |= ADVERTISED_Autoneg |
+				ADVERTISED_100baseT_Full;
+	}
+	if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
+	    phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
+	    phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
+	    phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
+		*supported |= SUPPORTED_Autoneg |
+			      SUPPORTED_1000baseT_Full;
+		*advertising |= ADVERTISED_Autoneg |
+				ADVERTISED_1000baseT_Full;
+	}
+	if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
+		*supported |= SUPPORTED_40000baseSR4_Full;
+	if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
+		*supported |= SUPPORTED_40000baseLR4_Full;
+	if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
+		*supported |= SUPPORTED_40000baseKR4_Full |
+			      SUPPORTED_Autoneg;
+		*advertising |= ADVERTISED_40000baseKR4_Full |
+				ADVERTISED_Autoneg;
+	}
+	if (phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
+		*supported |= SUPPORTED_20000baseKR2_Full |
+			      SUPPORTED_Autoneg;
+		*advertising |= ADVERTISED_20000baseKR2_Full |
+				ADVERTISED_Autoneg;
+	}
+	if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {
+		*supported |= SUPPORTED_10000baseKR_Full |
+			      SUPPORTED_Autoneg;
+		*advertising |= ADVERTISED_10000baseKR_Full |
+				ADVERTISED_Autoneg;
+	}
+	if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
+		*supported |= SUPPORTED_10000baseKX4_Full |
+			      SUPPORTED_Autoneg;
+		*advertising |= ADVERTISED_10000baseKX4_Full |
+				ADVERTISED_Autoneg;
+	}
+	if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {
+		*supported |= SUPPORTED_1000baseKX_Full |
+			      SUPPORTED_Autoneg;
+		*advertising |= ADVERTISED_1000baseKX_Full |
+				ADVERTISED_Autoneg;
+	}
+}
+
+/**
  * i40e_get_settings_link_up - Get the Link settings for when link is up
  * @hw: hw structure
  * @ecmd: ethtool command to fill in
@@ -265,6 +379,8 @@
 {
 	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
 	u32 link_speed = hw_link_info->link_speed;
+	u32 e_advertising = 0x0;
+	u32 e_supported = 0x0;
 
 	/* Initialize supported and advertised settings based on phy settings */
 	switch (hw_link_info->phy_type) {
@@ -305,14 +421,18 @@
 		break;
 	case I40E_PHY_TYPE_10GBASE_T:
 	case I40E_PHY_TYPE_1000BASE_T:
+	case I40E_PHY_TYPE_100BASE_TX:
 		ecmd->supported = SUPPORTED_Autoneg |
 				  SUPPORTED_10000baseT_Full |
-				  SUPPORTED_1000baseT_Full;
+				  SUPPORTED_1000baseT_Full |
+				  SUPPORTED_100baseT_Full;
 		ecmd->advertising = ADVERTISED_Autoneg;
 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
 			ecmd->advertising |= ADVERTISED_10000baseT_Full;
 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
 			ecmd->advertising |= ADVERTISED_1000baseT_Full;
+		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
+			ecmd->advertising |= ADVERTISED_100baseT_Full;
 		break;
 	case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
 		ecmd->supported = SUPPORTED_Autoneg |
@@ -320,12 +440,6 @@
 		ecmd->advertising = ADVERTISED_Autoneg |
 				    ADVERTISED_1000baseT_Full;
 		break;
-	case I40E_PHY_TYPE_100BASE_TX:
-		ecmd->supported = SUPPORTED_Autoneg |
-				  SUPPORTED_100baseT_Full;
-		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
-			ecmd->advertising |= ADVERTISED_100baseT_Full;
-		break;
 	case I40E_PHY_TYPE_10GBASE_CR1_CU:
 	case I40E_PHY_TYPE_10GBASE_CR1:
 		ecmd->supported = SUPPORTED_Autoneg |
@@ -352,14 +466,23 @@
 				ecmd->advertising |= ADVERTISED_100baseT_Full;
 		}
 		break;
-	/* Backplane is set based on supported phy types in get_settings
-	 * so don't set anything here but don't warn either
-	 */
 	case I40E_PHY_TYPE_40GBASE_KR4:
 	case I40E_PHY_TYPE_20GBASE_KR2:
 	case I40E_PHY_TYPE_10GBASE_KR:
 	case I40E_PHY_TYPE_10GBASE_KX4:
 	case I40E_PHY_TYPE_1000BASE_KX:
+		ecmd->supported |= SUPPORTED_40000baseKR4_Full |
+				   SUPPORTED_20000baseKR2_Full |
+				   SUPPORTED_10000baseKR_Full |
+				   SUPPORTED_10000baseKX4_Full |
+				   SUPPORTED_1000baseKX_Full |
+				   SUPPORTED_Autoneg;
+		ecmd->advertising |= ADVERTISED_40000baseKR4_Full |
+				     ADVERTISED_20000baseKR2_Full |
+				     ADVERTISED_10000baseKR_Full |
+				     ADVERTISED_10000baseKX4_Full |
+				     ADVERTISED_1000baseKX_Full |
+				     ADVERTISED_Autoneg;
 		break;
 	default:
 		/* if we got here and link is up something bad is afoot */
@@ -367,6 +490,16 @@
 			    hw_link_info->phy_type);
 	}
 
+	/* Now that we've worked out everything that could be supported by the
+	 * current PHY type, get what is supported by the NVM and them to
+	 * get what is truly supported
+	 */
+	i40e_phy_type_to_ethtool(pf, &e_supported,
+				 &e_advertising);
+
+	ecmd->supported = ecmd->supported & e_supported;
+	ecmd->advertising = ecmd->advertising & e_advertising;
+
 	/* Set speed and duplex */
 	switch (link_speed) {
 	case I40E_LINK_SPEED_40GB:
@@ -401,74 +534,11 @@
 					struct ethtool_cmd *ecmd,
 					struct i40e_pf *pf)
 {
-	enum i40e_aq_capabilities_phy_type phy_types = hw->phy.phy_types;
-
 	/* link is down and the driver needs to fall back on
 	 * supported phy types to figure out what info to display
 	 */
-	ecmd->supported = 0x0;
-	ecmd->advertising = 0x0;
-	if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
-		ecmd->supported |= SUPPORTED_Autoneg |
-				   SUPPORTED_1000baseT_Full;
-		ecmd->advertising |= ADVERTISED_Autoneg |
-				     ADVERTISED_1000baseT_Full;
-		if (pf->hw.mac.type == I40E_MAC_X722) {
-			ecmd->supported |= SUPPORTED_100baseT_Full;
-			ecmd->advertising |= ADVERTISED_100baseT_Full;
-			if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
-				ecmd->supported |= SUPPORTED_100baseT_Full;
-				ecmd->advertising |= ADVERTISED_100baseT_Full;
-			}
-		}
-	}
-	if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
-	    phy_types & I40E_CAP_PHY_TYPE_XFI ||
-	    phy_types & I40E_CAP_PHY_TYPE_SFI ||
-	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU ||
-	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC)
-		ecmd->supported |= SUPPORTED_10000baseT_Full;
-	if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
-	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
-	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
-	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
-	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
-		ecmd->supported |= SUPPORTED_Autoneg |
-				   SUPPORTED_10000baseT_Full;
-		ecmd->advertising |= ADVERTISED_Autoneg |
-				     ADVERTISED_10000baseT_Full;
-	}
-	if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
-	    phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
-	    phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
-		ecmd->supported |= SUPPORTED_40000baseCR4_Full;
-	if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
-	    phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
-		ecmd->supported |= SUPPORTED_Autoneg |
-				  SUPPORTED_40000baseCR4_Full;
-		ecmd->advertising |= ADVERTISED_Autoneg |
-				    ADVERTISED_40000baseCR4_Full;
-	}
-	if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) &&
-	    !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) {
-		ecmd->supported |= SUPPORTED_Autoneg |
-				   SUPPORTED_100baseT_Full;
-		ecmd->advertising |= ADVERTISED_Autoneg |
-				     ADVERTISED_100baseT_Full;
-	}
-	if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
-	    phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
-	    phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
-	    phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
-		ecmd->supported |= SUPPORTED_Autoneg |
-				   SUPPORTED_1000baseT_Full;
-		ecmd->advertising |= ADVERTISED_Autoneg |
-				     ADVERTISED_1000baseT_Full;
-	}
-	if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
-		ecmd->supported |= SUPPORTED_40000baseSR4_Full;
-	if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
-		ecmd->supported |= SUPPORTED_40000baseLR4_Full;
+	i40e_phy_type_to_ethtool(pf, &ecmd->supported,
+				 &ecmd->advertising);
 
 	/* With no link speed and duplex are unknown */
 	ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
@@ -497,38 +567,6 @@
 		i40e_get_settings_link_down(hw, ecmd, pf);
 
 	/* Now set the settings that don't rely on link being up/down */
-
-	/* For backplane, supported and advertised are only reliant on the
-	 * phy types the NVM specifies are supported.
-	 */
-	if (hw->device_id == I40E_DEV_ID_KX_B ||
-	    hw->device_id == I40E_DEV_ID_KX_C ||
-	    hw->device_id == I40E_DEV_ID_20G_KR2 ||
-	    hw->device_id ==  I40E_DEV_ID_20G_KR2_A) {
-		ecmd->supported = SUPPORTED_Autoneg;
-		ecmd->advertising = ADVERTISED_Autoneg;
-		if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
-			ecmd->supported |= SUPPORTED_40000baseKR4_Full;
-			ecmd->advertising |= ADVERTISED_40000baseKR4_Full;
-		}
-		if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
-			ecmd->supported |= SUPPORTED_20000baseKR2_Full;
-			ecmd->advertising |= ADVERTISED_20000baseKR2_Full;
-		}
-		if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {
-			ecmd->supported |= SUPPORTED_10000baseKR_Full;
-			ecmd->advertising |= ADVERTISED_10000baseKR_Full;
-		}
-		if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
-			ecmd->supported |= SUPPORTED_10000baseKX4_Full;
-			ecmd->advertising |= ADVERTISED_10000baseKX4_Full;
-		}
-		if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {
-			ecmd->supported |= SUPPORTED_1000baseKX_Full;
-			ecmd->advertising |= ADVERTISED_1000baseKX_Full;
-		}
-	}
-
 	/* Set autoneg settings */
 	ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
 			  AUTONEG_ENABLE : AUTONEG_DISABLE);
@@ -1143,6 +1181,10 @@
 		sizeof(drvinfo->fw_version));
 	strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
 		sizeof(drvinfo->bus_info));
+	if (pf->hw.pf_id == 0)
+		drvinfo->n_priv_flags = I40E_PRIV_FLAGS_GL_STR_LEN;
+	else
+		drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
 }
 
 static void i40e_get_ringparam(struct net_device *netdev,
@@ -1259,6 +1301,13 @@
 		}
 
 		for (i = 0; i < vsi->num_queue_pairs; i++) {
+			/* this is to allow wr32 to have something to write to
+			 * during early allocation of Rx buffers
+			 */
+			u32 __iomem faketail = 0;
+			struct i40e_ring *ring;
+			u16 unused;
+
 			/* clone ring and setup updated count */
 			rx_rings[i] = *vsi->rx_rings[i];
 			rx_rings[i].count = new_rx_count;
@@ -1267,12 +1316,22 @@
 			 */
 			rx_rings[i].desc = NULL;
 			rx_rings[i].rx_bi = NULL;
+			rx_rings[i].tail = (u8 __iomem *)&faketail;
 			err = i40e_setup_rx_descriptors(&rx_rings[i]);
+			if (err)
+				goto rx_unwind;
+
+			/* now allocate the Rx buffers to make sure the OS
+			 * has enough memory, any failure here means abort
+			 */
+			ring = &rx_rings[i];
+			unused = I40E_DESC_UNUSED(ring);
+			err = i40e_alloc_rx_buffers(ring, unused);
+rx_unwind:
 			if (err) {
-				while (i) {
-					i--;
+				do {
 					i40e_free_rx_resources(&rx_rings[i]);
-				}
+				} while (i--);
 				kfree(rx_rings);
 				rx_rings = NULL;
 
@@ -1298,6 +1357,17 @@
 	if (rx_rings) {
 		for (i = 0; i < vsi->num_queue_pairs; i++) {
 			i40e_free_rx_resources(vsi->rx_rings[i]);
+			/* get the real tail offset */
+			rx_rings[i].tail = vsi->rx_rings[i]->tail;
+			/* this is to fake out the allocation routine
+			 * into thinking it has to realloc everything
+			 * but the recycling logic will let us re-use
+			 * the buffers allocated above
+			 */
+			rx_rings[i].next_to_use = 0;
+			rx_rings[i].next_to_clean = 0;
+			rx_rings[i].next_to_alloc = 0;
+			/* do a struct copy */
 			*vsi->rx_rings[i] = rx_rings[i];
 		}
 		kfree(rx_rings);
@@ -1342,7 +1412,10 @@
 			return I40E_VSI_STATS_LEN(netdev);
 		}
 	case ETH_SS_PRIV_FLAGS:
-		return I40E_PRIV_FLAGS_STR_LEN;
+		if (pf->hw.pf_id == 0)
+			return I40E_PRIV_FLAGS_GL_STR_LEN;
+		else
+			return I40E_PRIV_FLAGS_STR_LEN;
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -1540,10 +1613,18 @@
 		/* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
 		break;
 	case ETH_SS_PRIV_FLAGS:
-		for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
-			memcpy(data, i40e_priv_flags_strings[i],
-			       ETH_GSTRING_LEN);
-			data += ETH_GSTRING_LEN;
+		if (pf->hw.pf_id == 0) {
+			for (i = 0; i < I40E_PRIV_FLAGS_GL_STR_LEN; i++) {
+				memcpy(data, i40e_priv_flags_strings_gl[i],
+				       ETH_GSTRING_LEN);
+				data += ETH_GSTRING_LEN;
+			}
+		} else {
+			for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
+				memcpy(data, i40e_priv_flags_strings[i],
+				       ETH_GSTRING_LEN);
+				data += ETH_GSTRING_LEN;
+			}
 		}
 		break;
 	default:
@@ -1714,7 +1795,7 @@
 		/* If the device is online then take it offline */
 		if (if_running)
 			/* indicate we're in test mode */
-			dev_close(netdev);
+			i40e_close(netdev);
 		else
 			/* This reset does not affect link - if it is
 			 * changed to a type of reset that does affect
@@ -1743,7 +1824,7 @@
 		i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
 
 		if (if_running)
-			dev_open(netdev);
+			i40e_open(netdev);
 	} else {
 		/* Online tests */
 		netif_info(pf, drv, netdev, "online testing starting\n");
@@ -1837,7 +1918,7 @@
 		if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) {
 			pf->led_status = i40e_led_get(hw);
 		} else {
-			i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_PORT, NULL);
+			i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL, NULL);
 			ret = i40e_led_get_phy(hw, &temp_status,
 					       &pf->phy_led_val);
 			pf->led_status = temp_status;
@@ -2490,7 +2571,6 @@
 
 	if (!vsi)
 		return -EINVAL;
-
 	pf = vsi->back;
 
 	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
@@ -2548,15 +2628,18 @@
 	input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
 
 	if (ntohl(fsp->m_ext.data[1])) {
-		if (ntohl(fsp->h_ext.data[1]) >= pf->num_alloc_vfs) {
-			netif_info(pf, drv, vsi->netdev, "Invalid VF id\n");
+		vf_id = ntohl(fsp->h_ext.data[1]);
+		if (vf_id >= pf->num_alloc_vfs) {
+			netif_info(pf, drv, vsi->netdev,
+				   "Invalid VF id %d\n", vf_id);
 			goto free_input;
 		}
-		vf_id = ntohl(fsp->h_ext.data[1]);
 		/* Find vsi id from vf id and override dest vsi */
 		input->dest_vsi = pf->vf[vf_id].lan_vsi_id;
 		if (input->q_index >= pf->vf[vf_id].num_queue_pairs) {
-			netif_info(pf, drv, vsi->netdev, "Invalid queue id\n");
+			netif_info(pf, drv, vsi->netdev,
+				   "Invalid queue id %d for VF %d\n",
+				   input->q_index, vf_id);
 			goto free_input;
 		}
 	}
@@ -2803,18 +2886,18 @@
 	struct i40e_pf *pf = vsi->back;
 	u32 ret_flags = 0;
 
-	ret_flags |= pf->hw.func_caps.npar_enable ?
-		I40E_PRIV_FLAGS_NPAR_FLAG : 0;
 	ret_flags |= pf->flags & I40E_FLAG_LINK_POLLING_ENABLED ?
 		I40E_PRIV_FLAGS_LINKPOLL_FLAG : 0;
 	ret_flags |= pf->flags & I40E_FLAG_FD_ATR_ENABLED ?
 		I40E_PRIV_FLAGS_FD_ATR : 0;
 	ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ?
 		I40E_PRIV_FLAGS_VEB_STATS : 0;
-	ret_flags |= pf->flags & I40E_FLAG_RX_PS_ENABLED ?
-		I40E_PRIV_FLAGS_PS : 0;
 	ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ?
 		0 : I40E_PRIV_FLAGS_HW_ATR_EVICT;
+	if (pf->hw.pf_id == 0) {
+		ret_flags |= pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT ?
+			I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT : 0;
+	}
 
 	return ret_flags;
 }
@@ -2829,27 +2912,13 @@
 	struct i40e_netdev_priv *np = netdev_priv(dev);
 	struct i40e_vsi *vsi = np->vsi;
 	struct i40e_pf *pf = vsi->back;
+	u16 sw_flags = 0, valid_flags = 0;
 	bool reset_required = false;
+	bool promisc_change = false;
+	int ret;
 
 	/* NOTE: MFP is not settable */
 
-	/* allow the user to control the method of receive
-	 * buffer DMA, whether the packet is split at header
-	 * boundaries into two separate buffers.  In some cases
-	 * one routine or the other will perform better.
-	 */
-	if ((flags & I40E_PRIV_FLAGS_PS) &&
-	    !(pf->flags & I40E_FLAG_RX_PS_ENABLED)) {
-		pf->flags |= I40E_FLAG_RX_PS_ENABLED;
-		pf->flags &= ~I40E_FLAG_RX_1BUF_ENABLED;
-		reset_required = true;
-	} else if (!(flags & I40E_PRIV_FLAGS_PS) &&
-		   (pf->flags & I40E_FLAG_RX_PS_ENABLED)) {
-		pf->flags &= ~I40E_FLAG_RX_PS_ENABLED;
-		pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
-		reset_required = true;
-	}
-
 	if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG)
 		pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED;
 	else
@@ -2876,6 +2945,33 @@
 		reset_required = true;
 	}
 
+	if (pf->hw.pf_id == 0) {
+		if ((flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) &&
+		    !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
+			pf->flags |= I40E_FLAG_TRUE_PROMISC_SUPPORT;
+			promisc_change = true;
+		} else if (!(flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) &&
+			   (pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
+			pf->flags &= ~I40E_FLAG_TRUE_PROMISC_SUPPORT;
+			promisc_change = true;
+		}
+	}
+	if (promisc_change) {
+		if (!(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
+			sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+		valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+		ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags,
+						NULL);
+		if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+			dev_info(&pf->pdev->dev,
+				 "couldn't set switch config bits, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
+			/* not a fatal problem, just keep going */
+		}
+	}
+
 	if ((flags & I40E_PRIV_FLAGS_HW_ATR_EVICT) &&
 	    (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))
 		pf->auto_disable_flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index 8ad162c..58e6c15 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Copyright(c) 2013 - 2016 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -38,16 +38,6 @@
 #include "i40e_fcoe.h"
 
 /**
- * i40e_rx_is_fcoe - returns true if the rx packet type is FCoE
- * @ptype: the packet type field from rx descriptor write-back
- **/
-static inline bool i40e_rx_is_fcoe(u16 ptype)
-{
-	return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
-	       (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
-}
-
-/**
  * i40e_fcoe_sof_is_class2 - returns true if this is a FC Class 2 SOF
  * @sof: the FCoE start of frame delimiter
  **/
@@ -1371,7 +1361,7 @@
 	if (i40e_chk_linearize(skb, count)) {
 		if (__skb_linearize(skb))
 			goto out_drop;
-		count = TXD_USE_COUNT(skb->len);
+		count = i40e_txd_use_count(skb->len);
 		tx_ring->tx_stats.tx_linearize++;
 	}
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index 5ebe12d..a7c7b1d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -49,7 +49,7 @@
 	struct i40e_hmc_sd_entry *sd_entry;
 	bool dma_mem_alloc_done = false;
 	struct i40e_dma_mem mem;
-	i40e_status ret_code;
+	i40e_status ret_code = I40E_SUCCESS;
 	u64 alloc_len;
 
 	if (NULL == hmc_info->sd_table.sd_entry) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 6700643..1cd0ebf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -45,8 +45,8 @@
 #define DRV_KERN "-k"
 
 #define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 4
-#define DRV_VERSION_BUILD 25
+#define DRV_VERSION_MINOR 5
+#define DRV_VERSION_BUILD 16
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
 	     __stringify(DRV_VERSION_MINOR) "." \
 	     __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -90,6 +90,8 @@
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_I_X722), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
 	/* required last entry */
@@ -326,7 +328,7 @@
 		unsigned long trans_start;
 
 		q = netdev_get_tx_queue(netdev, i);
-		trans_start = q->trans_start ? : netdev->trans_start;
+		trans_start = q->trans_start;
 		if (netif_xmit_stopped(q) &&
 		    time_after(jiffies,
 			       (trans_start + netdev->watchdog_timeo))) {
@@ -396,24 +398,6 @@
 }
 
 /**
- * i40e_release_rx_desc - Store the new tail and head values
- * @rx_ring: ring to bump
- * @val: new head index
- **/
-static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
-{
-	rx_ring->next_to_use = val;
-
-	/* Force memory writes to complete before letting h/w
-	 * know there are new descriptors to fetch.  (Only
-	 * applicable for weak-ordered memory model archs,
-	 * such as IA-64).
-	 */
-	wmb();
-	writel(val, rx_ring->tail);
-}
-
-/**
  * i40e_get_vsi_stats_struct - Get System Network Statistics
  * @vsi: the VSI we care about
  *
@@ -2097,6 +2081,12 @@
 		}
 	}
 
+	/* if the VF is not trusted do not do promisc */
+	if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
+		clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+		goto out;
+	}
+
 	/* check for changes in promiscuous modes */
 	if (changed_flags & IFF_ALLMULTI) {
 		bool cur_multipromisc;
@@ -2138,7 +2128,8 @@
 			aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
 							  &vsi->back->hw,
 							  vsi->seid,
-							  cur_promisc, NULL);
+							  cur_promisc, NULL,
+							  true);
 			if (aq_ret) {
 				retval =
 				i40e_aq_rc_to_posix(aq_ret,
@@ -2865,34 +2856,21 @@
 	memset(&rx_ctx, 0, sizeof(rx_ctx));
 
 	ring->rx_buf_len = vsi->rx_buf_len;
-	ring->rx_hdr_len = vsi->rx_hdr_len;
 
 	rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
-	rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
 
 	rx_ctx.base = (ring->dma / 128);
 	rx_ctx.qlen = ring->count;
 
-	if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
-		set_ring_16byte_desc_enabled(ring);
-		rx_ctx.dsize = 0;
-	} else {
-		rx_ctx.dsize = 1;
-	}
+	/* use 32 byte descriptors */
+	rx_ctx.dsize = 1;
 
-	rx_ctx.dtype = vsi->dtype;
-	if (vsi->dtype) {
-		set_ring_ps_enabled(ring);
-		rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
-				  I40E_RX_SPLIT_IP      |
-				  I40E_RX_SPLIT_TCP_UDP |
-				  I40E_RX_SPLIT_SCTP;
-	} else {
-		rx_ctx.hsplit_0 = 0;
-	}
+	/* descriptor type is always zero
+	 * rx_ctx.dtype = 0;
+	 */
+	rx_ctx.hsplit_0 = 0;
 
-	rx_ctx.rxmax = min_t(u16, vsi->max_frame,
-				  (chain_len * ring->rx_buf_len));
+	rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
 	if (hw->revision_id == 0)
 		rx_ctx.lrxqthresh = 0;
 	else
@@ -2929,12 +2907,7 @@
 	ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
 	writel(0, ring->tail);
 
-	if (ring_is_ps_enabled(ring)) {
-		i40e_alloc_rx_headers(ring);
-		i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring));
-	} else {
-		i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring));
-	}
+	i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
 
 	return 0;
 }
@@ -2973,40 +2946,18 @@
 	else
 		vsi->max_frame = I40E_RXBUFFER_2048;
 
-	/* figure out correct receive buffer length */
-	switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
-				    I40E_FLAG_RX_PS_ENABLED)) {
-	case I40E_FLAG_RX_1BUF_ENABLED:
-		vsi->rx_hdr_len = 0;
-		vsi->rx_buf_len = vsi->max_frame;
-		vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
-		break;
-	case I40E_FLAG_RX_PS_ENABLED:
-		vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
-		vsi->rx_buf_len = I40E_RXBUFFER_2048;
-		vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
-		break;
-	default:
-		vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
-		vsi->rx_buf_len = I40E_RXBUFFER_2048;
-		vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
-		break;
-	}
+	vsi->rx_buf_len = I40E_RXBUFFER_2048;
 
 #ifdef I40E_FCOE
 	/* setup rx buffer for FCoE */
 	if ((vsi->type == I40E_VSI_FCOE) &&
 	    (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
-		vsi->rx_hdr_len = 0;
 		vsi->rx_buf_len = I40E_RXBUFFER_3072;
 		vsi->max_frame = I40E_RXBUFFER_3072;
-		vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
 	}
 
 #endif /* I40E_FCOE */
 	/* round up for the chip's needs */
-	vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
-				BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT));
 	vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
 				BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
 
@@ -4164,7 +4115,7 @@
 	int i;
 
 	i40e_stop_misc_vector(pf);
-	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+	if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
 		synchronize_irq(pf->msix_entries[0].vector);
 		free_irq(pf->msix_entries[0].vector, pf);
 	}
@@ -5509,11 +5460,7 @@
  *
  * Returns 0, this is not allowed to fail
  **/
-#ifdef I40E_FCOE
 int i40e_close(struct net_device *netdev)
-#else
-static int i40e_close(struct net_device *netdev)
-#endif
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_vsi *vsi = np->vsi;
@@ -5538,8 +5485,6 @@
 
 	WARN_ON(in_interrupt());
 
-	if (i40e_check_asq_alive(&pf->hw))
-		i40e_vc_notify_reset(pf);
 
 	/* do the biggest reset indicated */
 	if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
@@ -6377,7 +6322,7 @@
 			break;
 		default:
 			dev_info(&pf->pdev->dev,
-				 "ARQ Error: Unknown event 0x%04x received\n",
+				 "ARQ: Unknown event 0x%04x ignored\n",
 				 opcode);
 			break;
 		}
@@ -6742,6 +6687,8 @@
 	clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
 	if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
 		return;
+	if (i40e_check_asq_alive(&pf->hw))
+		i40e_vc_notify_reset(pf);
 
 	dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
 
@@ -6862,6 +6809,7 @@
 	 */
 	ret = i40e_aq_set_phy_int_mask(&pf->hw,
 				       ~(I40E_AQ_EVENT_LINK_UPDOWN |
+					 I40E_AQ_EVENT_MEDIA_NA |
 					 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
 	if (ret)
 		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
@@ -7525,10 +7473,6 @@
 		rx_ring->count = vsi->num_desc;
 		rx_ring->size = 0;
 		rx_ring->dcb_tc = 0;
-		if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
-			set_ring_16byte_desc_enabled(rx_ring);
-		else
-			clear_ring_16byte_desc_enabled(rx_ring);
 		rx_ring->rx_itr_setting = pf->rx_itr_default;
 		vsi->rx_rings[i] = rx_ring;
 	}
@@ -8084,24 +8028,45 @@
 {
 	struct i40e_pf *pf = vsi->back;
 	struct i40e_hw *hw = &pf->hw;
+	u16 vf_id = vsi->vf_id;
 	u8 i;
 
 	/* Fill out hash function seed */
 	if (seed) {
 		u32 *seed_dw = (u32 *)seed;
 
-		for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
-			i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
+		if (vsi->type == I40E_VSI_MAIN) {
+			for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+				i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i),
+						  seed_dw[i]);
+		} else if (vsi->type == I40E_VSI_SRIOV) {
+			for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
+				i40e_write_rx_ctl(hw,
+						  I40E_VFQF_HKEY1(i, vf_id),
+						  seed_dw[i]);
+		} else {
+			dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
+		}
 	}
 
 	if (lut) {
 		u32 *lut_dw = (u32 *)lut;
 
-		if (lut_size != I40E_HLUT_ARRAY_SIZE)
-			return -EINVAL;
-
-		for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
-			wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
+		if (vsi->type == I40E_VSI_MAIN) {
+			if (lut_size != I40E_HLUT_ARRAY_SIZE)
+				return -EINVAL;
+			for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
+				wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
+		} else if (vsi->type == I40E_VSI_SRIOV) {
+			if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
+				return -EINVAL;
+			for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
+				i40e_write_rx_ctl(hw,
+						  I40E_VFQF_HLUT1(i, vf_id),
+						  lut_dw[i]);
+		} else {
+			dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
+		}
 	}
 	i40e_flush(hw);
 
@@ -8440,7 +8405,6 @@
 
 	pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
 				(NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
-	pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
 	if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
 		if (I40E_DEBUG_USER & debug)
 			pf->hw.debug_mask = debug;
@@ -8451,14 +8415,8 @@
 	/* Set default capability flags */
 	pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
 		    I40E_FLAG_MSI_ENABLED     |
-		    I40E_FLAG_LINK_POLLING_ENABLED |
 		    I40E_FLAG_MSIX_ENABLED;
 
-	if (iommu_present(&pci_bus_type))
-		pf->flags |= I40E_FLAG_RX_PS_ENABLED;
-	else
-		pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
-
 	/* Set default ITR */
 	pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
 	pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
@@ -8559,6 +8517,7 @@
 			     I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
 			     I40E_FLAG_WB_ON_ITR_CAPABLE |
 			     I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
+			     I40E_FLAG_NO_PCI_LINK_CHECK |
 			     I40E_FLAG_100M_SGMII_CAPABLE |
 			     I40E_FLAG_USE_SET_LLDP_MIB |
 			     I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
@@ -9073,6 +9032,7 @@
 	.ndo_get_vf_config	= i40e_ndo_get_vf_config,
 	.ndo_set_vf_link_state	= i40e_ndo_set_vf_link_state,
 	.ndo_set_vf_spoofchk	= i40e_ndo_set_vf_spoofchk,
+	.ndo_set_vf_trust	= i40e_ndo_set_vf_trust,
 #if IS_ENABLED(CONFIG_VXLAN)
 	.ndo_add_vxlan_port	= i40e_add_vxlan_port,
 	.ndo_del_vxlan_port	= i40e_del_vxlan_port,
@@ -9113,40 +9073,44 @@
 	np = netdev_priv(netdev);
 	np->vsi = vsi;
 
-	netdev->hw_enc_features |= NETIF_F_IP_CSUM	       |
-				   NETIF_F_IPV6_CSUM	       |
-				   NETIF_F_TSO		       |
-				   NETIF_F_TSO6		       |
-				   NETIF_F_TSO_ECN	       |
-				   NETIF_F_GSO_GRE	       |
-				   NETIF_F_GSO_UDP_TUNNEL      |
-				   NETIF_F_GSO_UDP_TUNNEL_CSUM |
+	netdev->hw_enc_features |= NETIF_F_SG			|
+				   NETIF_F_IP_CSUM		|
+				   NETIF_F_IPV6_CSUM		|
+				   NETIF_F_HIGHDMA		|
+				   NETIF_F_SOFT_FEATURES	|
+				   NETIF_F_TSO			|
+				   NETIF_F_TSO_ECN		|
+				   NETIF_F_TSO6			|
+				   NETIF_F_GSO_GRE		|
+				   NETIF_F_GSO_GRE_CSUM		|
+				   NETIF_F_GSO_IPIP		|
+				   NETIF_F_GSO_SIT		|
+				   NETIF_F_GSO_UDP_TUNNEL	|
+				   NETIF_F_GSO_UDP_TUNNEL_CSUM	|
+				   NETIF_F_GSO_PARTIAL		|
+				   NETIF_F_SCTP_CRC		|
+				   NETIF_F_RXHASH		|
+				   NETIF_F_RXCSUM		|
 				   0;
 
-	netdev->features = NETIF_F_SG		       |
-			   NETIF_F_IP_CSUM	       |
-			   NETIF_F_SCTP_CRC	       |
-			   NETIF_F_HIGHDMA	       |
-			   NETIF_F_GSO_UDP_TUNNEL      |
-			   NETIF_F_GSO_GRE	       |
-			   NETIF_F_HW_VLAN_CTAG_TX     |
-			   NETIF_F_HW_VLAN_CTAG_RX     |
-			   NETIF_F_HW_VLAN_CTAG_FILTER |
-			   NETIF_F_IPV6_CSUM	       |
-			   NETIF_F_TSO		       |
-			   NETIF_F_TSO_ECN	       |
-			   NETIF_F_TSO6		       |
-			   NETIF_F_RXCSUM	       |
-			   NETIF_F_RXHASH	       |
-			   0;
+	if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE))
+		netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+	netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+
+	/* record features VLANs can make use of */
+	netdev->vlan_features |= netdev->hw_enc_features |
+				 NETIF_F_TSO_MANGLEID;
 
 	if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
-		netdev->features |= NETIF_F_NTUPLE;
-	if (pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)
-		netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+		netdev->hw_features |= NETIF_F_NTUPLE;
 
-	/* copy netdev features into list of user selectable features */
-	netdev->hw_features |= netdev->features;
+	netdev->hw_features |= netdev->hw_enc_features	|
+			       NETIF_F_HW_VLAN_CTAG_TX	|
+			       NETIF_F_HW_VLAN_CTAG_RX;
+
+	netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
+	netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
 
 	if (vsi->type == I40E_VSI_MAIN) {
 		SET_NETDEV_DEV(netdev, &pf->pdev->dev);
@@ -9162,6 +9126,12 @@
 					I40E_VLAN_ANY, false, true);
 			spin_unlock_bh(&vsi->mac_filter_list_lock);
 		}
+	} else if ((pf->hw.aq.api_maj_ver > 1) ||
+		   ((pf->hw.aq.api_maj_ver == 1) &&
+		    (pf->hw.aq.api_min_ver > 4))) {
+		/* Supported in FW API version higher than 1.4 */
+		pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
+		pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
 	} else {
 		/* relate the VSI_VMDQ name to the VSI_MAIN name */
 		snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
@@ -9179,12 +9149,7 @@
 
 	ether_addr_copy(netdev->dev_addr, mac_addr);
 	ether_addr_copy(netdev->perm_addr, mac_addr);
-	/* vlan gets same features (except vlan offload)
-	 * after any tweaks for specific VSI types
-	 */
-	netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
-						     NETIF_F_HW_VLAN_CTAG_RX |
-						   NETIF_F_HW_VLAN_CTAG_FILTER);
+
 	netdev->priv_flags |= IFF_UNICAST_FLT;
 	netdev->priv_flags |= IFF_SUPP_NOFCS;
 	/* Setup netdev TC information */
@@ -9397,7 +9362,8 @@
 			ctxt.info.valid_sections |=
 				cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
 			ctxt.info.queueing_opt_flags |=
-						I40E_AQ_VSI_QUE_OPT_TCP_ENA;
+				(I40E_AQ_VSI_QUE_OPT_TCP_ENA |
+				 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
 		}
 
 		ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
@@ -10443,6 +10409,7 @@
  **/
 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
 {
+	u16 flags = 0;
 	int ret;
 
 	/* find out what's out there already */
@@ -10456,6 +10423,32 @@
 	}
 	i40e_pf_reset_stats(pf);
 
+	/* set the switch config bit for the whole device to
+	 * support limited promisc or true promisc
+	 * when user requests promisc. The default is limited
+	 * promisc.
+	*/
+
+	if ((pf->hw.pf_id == 0) &&
+	    !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
+		flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+
+	if (pf->hw.pf_id == 0) {
+		u16 valid_flags;
+
+		valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+		ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags,
+						NULL);
+		if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+			dev_info(&pf->pdev->dev,
+				 "couldn't set switch config bits, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
+			/* not a fatal problem, just keep going */
+		}
+	}
+
 	/* first time setup */
 	if (pf->lan_vsi == I40E_NO_VSI || reinit) {
 		struct i40e_vsi *vsi = NULL;
@@ -10683,11 +10676,9 @@
 #ifdef CONFIG_PCI_IOV
 	i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
 #endif
-	i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d RX: %s",
+	i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
 		      pf->hw.func_caps.num_vsis,
-		      pf->vsi[pf->lan_vsi]->num_queue_pairs,
-		      pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF");
-
+		      pf->vsi[pf->lan_vsi]->num_queue_pairs);
 	if (pf->flags & I40E_FLAG_RSS_ENABLED)
 		i += snprintf(&buf[i], REMAIN(i), " RSS");
 	if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
@@ -10826,6 +10817,12 @@
 	hw->bus.func = PCI_FUNC(pdev->devfn);
 	pf->instance = pfs_found;
 
+	/* set up the locks for the AQ, do this only once in probe
+	 * and destroy them only once in remove
+	 */
+	mutex_init(&hw->aq.asq_mutex);
+	mutex_init(&hw->aq.arq_mutex);
+
 	if (debug != -1) {
 		pf->msg_enable = pf->hw.debug_mask;
 		pf->msg_enable = debug;
@@ -10871,12 +10868,6 @@
 	/* set up a default setting for link flow control */
 	pf->hw.fc.requested_mode = I40E_FC_NONE;
 
-	/* set up the locks for the AQ, do this only once in probe
-	 * and destroy them only once in remove
-	 */
-	mutex_init(&hw->aq.asq_mutex);
-	mutex_init(&hw->aq.arq_mutex);
-
 	err = i40e_init_adminq(hw);
 	if (err) {
 		if (err == I40E_ERR_FIRMWARE_API_VERSION)
@@ -11068,6 +11059,7 @@
 	 */
 	err = i40e_aq_set_phy_int_mask(&pf->hw,
 				       ~(I40E_AQ_EVENT_LINK_UPDOWN |
+					 I40E_AQ_EVENT_MEDIA_NA |
 					 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
 	if (err)
 		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
@@ -11269,7 +11261,6 @@
 	kfree(pf->qp_pile);
 err_sw_init:
 err_adminq_setup:
-	(void)i40e_shutdown_adminq(hw);
 err_pf_reset:
 	iounmap(hw->hw_addr);
 err_ioremap:
@@ -11311,8 +11302,10 @@
 	/* no more scheduling of any task */
 	set_bit(__I40E_SUSPENDED, &pf->state);
 	set_bit(__I40E_DOWN, &pf->state);
-	del_timer_sync(&pf->service_timer);
-	cancel_work_sync(&pf->service_task);
+	if (pf->service_timer.data)
+		del_timer_sync(&pf->service_timer);
+	if (pf->service_task.func)
+		cancel_work_sync(&pf->service_task);
 
 	if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
 		i40e_free_vfs(pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 5730f80..954efe3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -693,10 +693,10 @@
 	/* early check for status command and debug msgs */
 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
 
-	i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
+	i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
 		   i40e_nvm_update_state_str[upd_cmd],
 		   hw->nvmupd_state,
-		   hw->aq.nvm_release_on_done,
+		   hw->nvm_release_on_done, hw->nvm_wait_opcode,
 		   cmd->command, cmd->config, cmd->offset, cmd->data_size);
 
 	if (upd_cmd == I40E_NVMUPD_INVALID) {
@@ -710,7 +710,18 @@
 	 * going into the state machine
 	 */
 	if (upd_cmd == I40E_NVMUPD_STATUS) {
+		if (!cmd->data_size) {
+			*perrno = -EFAULT;
+			return I40E_ERR_BUF_TOO_SHORT;
+		}
+
 		bytes[0] = hw->nvmupd_state;
+
+		if (cmd->data_size >= 4) {
+			bytes[1] = 0;
+			*((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
+		}
+
 		return 0;
 	}
 
@@ -729,6 +740,14 @@
 
 	case I40E_NVMUPD_STATE_INIT_WAIT:
 	case I40E_NVMUPD_STATE_WRITE_WAIT:
+		/* if we need to stop waiting for an event, clear
+		 * the wait info and return before doing anything else
+		 */
+		if (cmd->offset == 0xffff) {
+			i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
+			return 0;
+		}
+
 		status = I40E_ERR_NOT_READY;
 		*perrno = -EBUSY;
 		break;
@@ -799,7 +818,8 @@
 			if (status) {
 				i40e_release_nvm(hw);
 			} else {
-				hw->aq.nvm_release_on_done = true;
+				hw->nvm_release_on_done = true;
+				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
 				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
 			}
 		}
@@ -815,7 +835,8 @@
 			if (status) {
 				i40e_release_nvm(hw);
 			} else {
-				hw->aq.nvm_release_on_done = true;
+				hw->nvm_release_on_done = true;
+				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
 				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
 			}
 		}
@@ -828,10 +849,12 @@
 						     hw->aq.asq_last_status);
 		} else {
 			status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
-			if (status)
+			if (status) {
 				i40e_release_nvm(hw);
-			else
+			} else {
+				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
 				hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+			}
 		}
 		break;
 
@@ -849,7 +872,8 @@
 				   -EIO;
 				i40e_release_nvm(hw);
 			} else {
-				hw->aq.nvm_release_on_done = true;
+				hw->nvm_release_on_done = true;
+				hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
 				hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
 			}
 		}
@@ -940,8 +964,10 @@
 	switch (upd_cmd) {
 	case I40E_NVMUPD_WRITE_CON:
 		status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
-		if (!status)
+		if (!status) {
+			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
 			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+		}
 		break;
 
 	case I40E_NVMUPD_WRITE_LCB:
@@ -953,7 +979,8 @@
 				   -EIO;
 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
 		} else {
-			hw->aq.nvm_release_on_done = true;
+			hw->nvm_release_on_done = true;
+			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
 		}
 		break;
@@ -967,6 +994,7 @@
 				   -EIO;
 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
 		} else {
+			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
 			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
 		}
 		break;
@@ -980,7 +1008,8 @@
 				   -EIO;
 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
 		} else {
-			hw->aq.nvm_release_on_done = true;
+			hw->nvm_release_on_done = true;
+			hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
 		}
 		break;
@@ -1030,6 +1059,37 @@
 }
 
 /**
+ * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * @hw: pointer to the hardware structure
+ * @opcode: the event that just happened
+ **/
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
+{
+	if (opcode == hw->nvm_wait_opcode) {
+		i40e_debug(hw, I40E_DEBUG_NVM,
+			   "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
+		if (hw->nvm_release_on_done) {
+			i40e_release_nvm(hw);
+			hw->nvm_release_on_done = false;
+		}
+		hw->nvm_wait_opcode = 0;
+
+		switch (hw->nvmupd_state) {
+		case I40E_NVMUPD_STATE_INIT_WAIT:
+			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+			break;
+
+		case I40E_NVMUPD_STATE_WRITE_WAIT:
+			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+			break;
+
+		default:
+			break;
+		}
+	}
+}
+
+/**
  * i40e_nvmupd_validate_command - Validate given command
  * @hw: pointer to hardware structure
  * @cmd: pointer to nvm update command buffer
@@ -1189,6 +1249,12 @@
 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
 	}
 
+	/* should we wait for a followup event? */
+	if (cmd->offset) {
+		hw->nvm_wait_opcode = cmd->offset;
+		hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+	}
+
 	return status;
 }
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index d51eee5..80403c6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -130,9 +130,18 @@
 				u16 vsi_id, bool set_filter,
 				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
-		u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+		u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details,
+		bool rx_only_promisc);
 i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
 		u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+							 u16 seid, bool enable,
+							 u16 vid,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+							 u16 seid, bool enable,
+							 u16 vid,
+				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
 				u16 seid, bool enable,
 				struct i40e_asq_cmd_details *cmd_details);
@@ -174,6 +183,10 @@
 				struct i40e_aqc_get_switch_config_resp *buf,
 				u16 buf_size, u16 *start_seid,
 				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
+						u16 flags,
+						u16 valid_flags,
+				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
 				enum i40e_aq_resources_ids resource,
 				enum i40e_aq_resource_access_type access,
@@ -228,10 +241,6 @@
 				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
 				struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
-				enum i40e_aq_hmc_profile profile,
-				u8 pe_vf_enabled_count,
-				struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
 				u16 seid, u16 credit, u8 max_bw,
 				struct i40e_asq_cmd_details *cmd_details);
@@ -308,6 +317,7 @@
 i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
 				struct i40e_nvm_access *cmd,
 				u8 *bytes, int *);
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode);
 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
 
 extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 565ca7c..ed39cba 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -158,9 +158,10 @@
 static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 {
 	struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
-	struct timespec64 now, then = ns_to_timespec64(delta);
+	struct timespec64 now, then;
 	unsigned long flags;
 
+	then = ns_to_timespec64(delta);
 	spin_lock_irqsave(&pf->tmreg_lock, flags);
 
 	i40e_ptp_read(pf, &now);
@@ -288,9 +289,7 @@
 		rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
 		pf->last_rx_ptp_check = jiffies;
 		pf->rx_hwtstamp_cleared++;
-		dev_warn(&vsi->back->pdev->dev,
-			 "%s: clearing Rx timestamp hang\n",
-			 __func__);
+		WARN_ONCE(1, "Detected Rx timestamp register hang\n");
 	}
 }
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 084d0ab..99a524d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -636,19 +636,21 @@
 
 /**
  * i40e_clean_tx_irq - Reclaim resources after transmit completes
- * @tx_ring:  tx ring to clean
- * @budget:   how many cleans we're allowed
+ * @vsi: the VSI we care about
+ * @tx_ring: Tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
  *
  * Returns true if there's any budget left (e.g. the clean is finished)
  **/
-static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
+static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
+			      struct i40e_ring *tx_ring, int napi_budget)
 {
 	u16 i = tx_ring->next_to_clean;
 	struct i40e_tx_buffer *tx_buf;
 	struct i40e_tx_desc *tx_head;
 	struct i40e_tx_desc *tx_desc;
-	unsigned int total_packets = 0;
-	unsigned int total_bytes = 0;
+	unsigned int total_bytes = 0, total_packets = 0;
+	unsigned int budget = vsi->work_limit;
 
 	tx_buf = &tx_ring->tx_bi[i];
 	tx_desc = I40E_TX_DESC(tx_ring, i);
@@ -678,7 +680,7 @@
 		total_packets += tx_buf->gso_segs;
 
 		/* free the skb */
-		dev_consume_skb_any(tx_buf->skb);
+		napi_consume_skb(tx_buf->skb, napi_budget);
 
 		/* unmap skb header data */
 		dma_unmap_single(tx_ring->dev,
@@ -749,7 +751,7 @@
 
 		if (budget &&
 		    ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
-		    !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+		    !test_bit(__I40E_DOWN, &vsi->state) &&
 		    (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
 			tx_ring->arm_wb = true;
 	}
@@ -767,7 +769,7 @@
 		smp_mb();
 		if (__netif_subqueue_stopped(tx_ring->netdev,
 					     tx_ring->queue_index) &&
-		   !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
+		   !test_bit(__I40E_DOWN, &vsi->state)) {
 			netif_wake_subqueue(tx_ring->netdev,
 					    tx_ring->queue_index);
 			++tx_ring->tx_stats.restart_queue;
@@ -1022,7 +1024,6 @@
 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
 {
 	struct device *dev = rx_ring->dev;
-	struct i40e_rx_buffer *rx_bi;
 	unsigned long bi_size;
 	u16 i;
 
@@ -1030,48 +1031,22 @@
 	if (!rx_ring->rx_bi)
 		return;
 
-	if (ring_is_ps_enabled(rx_ring)) {
-		int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
-
-		rx_bi = &rx_ring->rx_bi[0];
-		if (rx_bi->hdr_buf) {
-			dma_free_coherent(dev,
-					  bufsz,
-					  rx_bi->hdr_buf,
-					  rx_bi->dma);
-			for (i = 0; i < rx_ring->count; i++) {
-				rx_bi = &rx_ring->rx_bi[i];
-				rx_bi->dma = 0;
-				rx_bi->hdr_buf = NULL;
-			}
-		}
-	}
 	/* Free all the Rx ring sk_buffs */
 	for (i = 0; i < rx_ring->count; i++) {
-		rx_bi = &rx_ring->rx_bi[i];
-		if (rx_bi->dma) {
-			dma_unmap_single(dev,
-					 rx_bi->dma,
-					 rx_ring->rx_buf_len,
-					 DMA_FROM_DEVICE);
-			rx_bi->dma = 0;
-		}
+		struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+
 		if (rx_bi->skb) {
 			dev_kfree_skb(rx_bi->skb);
 			rx_bi->skb = NULL;
 		}
-		if (rx_bi->page) {
-			if (rx_bi->page_dma) {
-				dma_unmap_page(dev,
-					       rx_bi->page_dma,
-					       PAGE_SIZE,
-					       DMA_FROM_DEVICE);
-				rx_bi->page_dma = 0;
-			}
-			__free_page(rx_bi->page);
-			rx_bi->page = NULL;
-			rx_bi->page_offset = 0;
-		}
+		if (!rx_bi->page)
+			continue;
+
+		dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
+		__free_pages(rx_bi->page, 0);
+
+		rx_bi->page = NULL;
+		rx_bi->page_offset = 0;
 	}
 
 	bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
@@ -1080,6 +1055,7 @@
 	/* Zero out the descriptor ring */
 	memset(rx_ring->desc, 0, rx_ring->size);
 
+	rx_ring->next_to_alloc = 0;
 	rx_ring->next_to_clean = 0;
 	rx_ring->next_to_use = 0;
 }
@@ -1104,37 +1080,6 @@
 }
 
 /**
- * i40e_alloc_rx_headers - allocate rx header buffers
- * @rx_ring: ring to alloc buffers
- *
- * Allocate rx header buffers for the entire ring. As these are static,
- * this is only called when setting up a new ring.
- **/
-void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
-{
-	struct device *dev = rx_ring->dev;
-	struct i40e_rx_buffer *rx_bi;
-	dma_addr_t dma;
-	void *buffer;
-	int buf_size;
-	int i;
-
-	if (rx_ring->rx_bi[0].hdr_buf)
-		return;
-	/* Make sure the buffers don't cross cache line boundaries. */
-	buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
-	buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
-				    &dma, GFP_KERNEL);
-	if (!buffer)
-		return;
-	for (i = 0; i < rx_ring->count; i++) {
-		rx_bi = &rx_ring->rx_bi[i];
-		rx_bi->dma = dma + (i * buf_size);
-		rx_bi->hdr_buf = buffer + (i * buf_size);
-	}
-}
-
-/**
  * i40e_setup_rx_descriptors - Allocate Rx descriptors
  * @rx_ring: Rx descriptor ring (for a specific queue) to setup
  *
@@ -1155,9 +1100,7 @@
 	u64_stats_init(&rx_ring->syncp);
 
 	/* Round up to nearest 4K */
-	rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
-		? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
-		: rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+	rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
 	rx_ring->size = ALIGN(rx_ring->size, 4096);
 	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
 					   &rx_ring->dma, GFP_KERNEL);
@@ -1168,6 +1111,7 @@
 		goto err;
 	}
 
+	rx_ring->next_to_alloc = 0;
 	rx_ring->next_to_clean = 0;
 	rx_ring->next_to_use = 0;
 
@@ -1186,6 +1130,10 @@
 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
 {
 	rx_ring->next_to_use = val;
+
+	/* update next to alloc since we have filled the ring */
+	rx_ring->next_to_alloc = val;
+
 	/* Force memory writes to complete before letting h/w
 	 * know there are new descriptors to fetch.  (Only
 	 * applicable for weak-ordered memory model archs,
@@ -1196,164 +1144,48 @@
 }
 
 /**
- * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
- * @rx_ring: ring to place buffers on
- * @cleaned_count: number of buffers to replace
+ * i40e_alloc_mapped_page - recycle or make a new page
+ * @rx_ring: ring to use
+ * @bi: rx_buffer struct to modify
  *
- * Returns true if any errors on allocation
+ * Returns true if the page was successfully allocated or
+ * reused.
  **/
-bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
+static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
+				   struct i40e_rx_buffer *bi)
 {
-	u16 i = rx_ring->next_to_use;
-	union i40e_rx_desc *rx_desc;
-	struct i40e_rx_buffer *bi;
-	const int current_node = numa_node_id();
+	struct page *page = bi->page;
+	dma_addr_t dma;
 
-	/* do nothing if no valid netdev defined */
-	if (!rx_ring->netdev || !cleaned_count)
-		return false;
-
-	while (cleaned_count--) {
-		rx_desc = I40E_RX_DESC(rx_ring, i);
-		bi = &rx_ring->rx_bi[i];
-
-		if (bi->skb) /* desc is in use */
-			goto no_buffers;
-
-	/* If we've been moved to a different NUMA node, release the
-	 * page so we can get a new one on the current node.
-	 */
-		if (bi->page &&  page_to_nid(bi->page) != current_node) {
-			dma_unmap_page(rx_ring->dev,
-				       bi->page_dma,
-				       PAGE_SIZE,
-				       DMA_FROM_DEVICE);
-			__free_page(bi->page);
-			bi->page = NULL;
-			bi->page_dma = 0;
-			rx_ring->rx_stats.realloc_count++;
-		} else if (bi->page) {
-			rx_ring->rx_stats.page_reuse_count++;
-		}
-
-		if (!bi->page) {
-			bi->page = alloc_page(GFP_ATOMIC);
-			if (!bi->page) {
-				rx_ring->rx_stats.alloc_page_failed++;
-				goto no_buffers;
-			}
-			bi->page_dma = dma_map_page(rx_ring->dev,
-						    bi->page,
-						    0,
-						    PAGE_SIZE,
-						    DMA_FROM_DEVICE);
-			if (dma_mapping_error(rx_ring->dev, bi->page_dma)) {
-				rx_ring->rx_stats.alloc_page_failed++;
-				__free_page(bi->page);
-				bi->page = NULL;
-				bi->page_dma = 0;
-				bi->page_offset = 0;
-				goto no_buffers;
-			}
-			bi->page_offset = 0;
-		}
-
-		/* Refresh the desc even if buffer_addrs didn't change
-		 * because each write-back erases this info.
-		 */
-		rx_desc->read.pkt_addr =
-				cpu_to_le64(bi->page_dma + bi->page_offset);
-		rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
-		i++;
-		if (i == rx_ring->count)
-			i = 0;
+	/* since we are recycling buffers we should seldom need to alloc */
+	if (likely(page)) {
+		rx_ring->rx_stats.page_reuse_count++;
+		return true;
 	}
 
-	if (rx_ring->next_to_use != i)
-		i40e_release_rx_desc(rx_ring, i);
-
-	return false;
-
-no_buffers:
-	if (rx_ring->next_to_use != i)
-		i40e_release_rx_desc(rx_ring, i);
-
-	/* make sure to come back via polling to try again after
-	 * allocation failure
-	 */
-	return true;
-}
-
-/**
- * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
- * @rx_ring: ring to place buffers on
- * @cleaned_count: number of buffers to replace
- *
- * Returns true if any errors on allocation
- **/
-bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
-{
-	u16 i = rx_ring->next_to_use;
-	union i40e_rx_desc *rx_desc;
-	struct i40e_rx_buffer *bi;
-	struct sk_buff *skb;
-
-	/* do nothing if no valid netdev defined */
-	if (!rx_ring->netdev || !cleaned_count)
+	/* alloc new page for storage */
+	page = dev_alloc_page();
+	if (unlikely(!page)) {
+		rx_ring->rx_stats.alloc_page_failed++;
 		return false;
-
-	while (cleaned_count--) {
-		rx_desc = I40E_RX_DESC(rx_ring, i);
-		bi = &rx_ring->rx_bi[i];
-		skb = bi->skb;
-
-		if (!skb) {
-			skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
-							  rx_ring->rx_buf_len,
-							  GFP_ATOMIC |
-							  __GFP_NOWARN);
-			if (!skb) {
-				rx_ring->rx_stats.alloc_buff_failed++;
-				goto no_buffers;
-			}
-			/* initialize queue mapping */
-			skb_record_rx_queue(skb, rx_ring->queue_index);
-			bi->skb = skb;
-		}
-
-		if (!bi->dma) {
-			bi->dma = dma_map_single(rx_ring->dev,
-						 skb->data,
-						 rx_ring->rx_buf_len,
-						 DMA_FROM_DEVICE);
-			if (dma_mapping_error(rx_ring->dev, bi->dma)) {
-				rx_ring->rx_stats.alloc_buff_failed++;
-				bi->dma = 0;
-				dev_kfree_skb(bi->skb);
-				bi->skb = NULL;
-				goto no_buffers;
-			}
-		}
-
-		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
-		rx_desc->read.hdr_addr = 0;
-		i++;
-		if (i == rx_ring->count)
-			i = 0;
 	}
 
-	if (rx_ring->next_to_use != i)
-		i40e_release_rx_desc(rx_ring, i);
+	/* map page for use */
+	dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
 
-	return false;
-
-no_buffers:
-	if (rx_ring->next_to_use != i)
-		i40e_release_rx_desc(rx_ring, i);
-
-	/* make sure to come back via polling to try again after
-	 * allocation failure
+	/* if mapping failed free memory back to system since
+	 * there isn't much point in holding memory we can't use
 	 */
+	if (dma_mapping_error(rx_ring->dev, dma)) {
+		__free_pages(page, 0);
+		rx_ring->rx_stats.alloc_page_failed++;
+		return false;
+	}
+
+	bi->dma = dma;
+	bi->page = page;
+	bi->page_offset = 0;
+
 	return true;
 }
 
@@ -1368,31 +1200,103 @@
 {
 	struct i40e_q_vector *q_vector = rx_ring->q_vector;
 
-	if (vlan_tag & VLAN_VID_MASK)
+	if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+	    (vlan_tag & VLAN_VID_MASK))
 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
 
 	napi_gro_receive(&q_vector->napi, skb);
 }
 
 /**
+ * i40e_alloc_rx_buffers - Replace used receive buffers
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ *
+ * Returns false if all allocations were successful, true if any fail
+ **/
+bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
+{
+	u16 ntu = rx_ring->next_to_use;
+	union i40e_rx_desc *rx_desc;
+	struct i40e_rx_buffer *bi;
+
+	/* do nothing if no valid netdev defined */
+	if (!rx_ring->netdev || !cleaned_count)
+		return false;
+
+	rx_desc = I40E_RX_DESC(rx_ring, ntu);
+	bi = &rx_ring->rx_bi[ntu];
+
+	do {
+		if (!i40e_alloc_mapped_page(rx_ring, bi))
+			goto no_buffers;
+
+		/* Refresh the desc even if buffer_addrs didn't change
+		 * because each write-back erases this info.
+		 */
+		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+		rx_desc->read.hdr_addr = 0;
+
+		rx_desc++;
+		bi++;
+		ntu++;
+		if (unlikely(ntu == rx_ring->count)) {
+			rx_desc = I40E_RX_DESC(rx_ring, 0);
+			bi = rx_ring->rx_bi;
+			ntu = 0;
+		}
+
+		/* clear the status bits for the next_to_use descriptor */
+		rx_desc->wb.qword1.status_error_len = 0;
+
+		cleaned_count--;
+	} while (cleaned_count);
+
+	if (rx_ring->next_to_use != ntu)
+		i40e_release_rx_desc(rx_ring, ntu);
+
+	return false;
+
+no_buffers:
+	if (rx_ring->next_to_use != ntu)
+		i40e_release_rx_desc(rx_ring, ntu);
+
+	/* make sure to come back via polling to try again after
+	 * allocation failure
+	 */
+	return true;
+}
+
+/**
  * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
  * @vsi: the VSI we care about
  * @skb: skb currently being received and modified
- * @rx_status: status value of last descriptor in packet
- * @rx_error: error value of last descriptor in packet
- * @rx_ptype: ptype value of last descriptor in packet
+ * @rx_desc: the receive descriptor
+ *
+ * skb->protocol must be set before this function is called
  **/
 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
 				    struct sk_buff *skb,
-				    u32 rx_status,
-				    u32 rx_error,
-				    u16 rx_ptype)
+				    union i40e_rx_desc *rx_desc)
 {
-	struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
-	bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel;
+	struct i40e_rx_ptype_decoded decoded;
+	bool ipv4, ipv6, tunnel = false;
+	u32 rx_error, rx_status;
+	u8 ptype;
+	u64 qword;
+
+	qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+	ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
+	rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+		   I40E_RXD_QW1_ERROR_SHIFT;
+	rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+		    I40E_RXD_QW1_STATUS_SHIFT;
+	decoded = decode_rx_desc_ptype(ptype);
 
 	skb->ip_summed = CHECKSUM_NONE;
 
+	skb_checksum_none_assert(skb);
+
 	/* Rx csum enabled and ip headers found? */
 	if (!(vsi->netdev->features & NETIF_F_RXCSUM))
 		return;
@@ -1438,14 +1342,13 @@
 	 * doesn't make it a hard requirement so if we have validated the
 	 * inner checksum report CHECKSUM_UNNECESSARY.
 	 */
-
-	ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
-		     (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
-	ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
-		     (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+	if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP |
+				  I40E_RX_PTYPE_INNER_PROT_UDP |
+				  I40E_RX_PTYPE_INNER_PROT_SCTP))
+		tunnel = true;
 
 	skb->ip_summed = CHECKSUM_UNNECESSARY;
-	skb->csum_level = ipv4_tunnel || ipv6_tunnel;
+	skb->csum_level = tunnel ? 1 : 0;
 
 	return;
 
@@ -1459,7 +1362,7 @@
  *
  * Returns a hash type to be used by skb_set_hash
  **/
-static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
+static inline int i40e_ptype_to_htype(u8 ptype)
 {
 	struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
 
@@ -1487,11 +1390,11 @@
 				u8 rx_ptype)
 {
 	u32 hash;
-	const __le64 rss_mask  =
+	const __le64 rss_mask =
 		cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
 			    I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
 
-	if (ring->netdev->features & NETIF_F_RXHASH)
+	if (!(ring->netdev->features & NETIF_F_RXHASH))
 		return;
 
 	if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
@@ -1501,346 +1404,436 @@
 }
 
 /**
- * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
- * @rx_ring:  rx ring to clean
- * @budget:   how many cleans we're allowed
+ * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ * @rx_ptype: the packet type decoded by hardware
  *
- * Returns true if there's any budget left (e.g. the clean is finished)
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, protocol, and
+ * other fields within the skb.
  **/
-static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
+static inline
+void i40e_process_skb_fields(struct i40e_ring *rx_ring,
+			     union i40e_rx_desc *rx_desc, struct sk_buff *skb,
+			     u8 rx_ptype)
 {
-	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
-	u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
-	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
-	struct i40e_vsi *vsi = rx_ring->vsi;
-	u16 i = rx_ring->next_to_clean;
-	union i40e_rx_desc *rx_desc;
-	u32 rx_error, rx_status;
-	bool failure = false;
-	u8 rx_ptype;
-	u64 qword;
-	u32 copysize;
-
-	if (budget <= 0)
-		return 0;
-
-	do {
-		struct i40e_rx_buffer *rx_bi;
-		struct sk_buff *skb;
-		u16 vlan_tag;
-		/* return some buffers to hardware, one at a time is too slow */
-		if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
-			failure = failure ||
-				  i40e_alloc_rx_buffers_ps(rx_ring,
-							   cleaned_count);
-			cleaned_count = 0;
-		}
-
-		i = rx_ring->next_to_clean;
-		rx_desc = I40E_RX_DESC(rx_ring, i);
-		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+	u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+	u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
 			I40E_RXD_QW1_STATUS_SHIFT;
+	u32 rsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
+		   I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
 
-		if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
-			break;
+	if (unlikely(rsyn)) {
+		i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, rsyn);
+		rx_ring->last_rx_timestamp = jiffies;
+	}
 
-		/* This memory barrier is needed to keep us from reading
-		 * any other fields out of the rx_desc until we know the
-		 * DD bit is set.
-		 */
-		dma_rmb();
-		/* sync header buffer for reading */
-		dma_sync_single_range_for_cpu(rx_ring->dev,
-					      rx_ring->rx_bi[0].dma,
-					      i * rx_ring->rx_hdr_len,
-					      rx_ring->rx_hdr_len,
-					      DMA_FROM_DEVICE);
-		if (i40e_rx_is_programming_status(qword)) {
-			i40e_clean_programming_status(rx_ring, rx_desc);
-			I40E_RX_INCREMENT(rx_ring, i);
-			continue;
-		}
-		rx_bi = &rx_ring->rx_bi[i];
-		skb = rx_bi->skb;
-		if (likely(!skb)) {
-			skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
-							  rx_ring->rx_hdr_len,
-							  GFP_ATOMIC |
-							  __GFP_NOWARN);
-			if (!skb) {
-				rx_ring->rx_stats.alloc_buff_failed++;
-				failure = true;
-				break;
-			}
+	i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
 
-			/* initialize queue mapping */
-			skb_record_rx_queue(skb, rx_ring->queue_index);
-			/* we are reusing so sync this buffer for CPU use */
-			dma_sync_single_range_for_cpu(rx_ring->dev,
-						      rx_ring->rx_bi[0].dma,
-						      i * rx_ring->rx_hdr_len,
-						      rx_ring->rx_hdr_len,
-						      DMA_FROM_DEVICE);
-		}
-		rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
-				I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
-		rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
-				I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
-		rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
-			 I40E_RXD_QW1_LENGTH_SPH_SHIFT;
+	/* modifies the skb - consumes the enet header */
+	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 
-		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
-			   I40E_RXD_QW1_ERROR_SHIFT;
-		rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
-		rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+	i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
 
-		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
-			   I40E_RXD_QW1_PTYPE_SHIFT;
-		/* sync half-page for reading */
-		dma_sync_single_range_for_cpu(rx_ring->dev,
-					      rx_bi->page_dma,
-					      rx_bi->page_offset,
-					      PAGE_SIZE / 2,
-					      DMA_FROM_DEVICE);
-		prefetch(page_address(rx_bi->page) + rx_bi->page_offset);
-		rx_bi->skb = NULL;
-		cleaned_count++;
-		copysize = 0;
-		if (rx_hbo || rx_sph) {
-			int len;
-
-			if (rx_hbo)
-				len = I40E_RX_HDR_SIZE;
-			else
-				len = rx_header_len;
-			memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
-		} else if (skb->len == 0) {
-			int len;
-			unsigned char *va = page_address(rx_bi->page) +
-					    rx_bi->page_offset;
-
-			len = min(rx_packet_len, rx_ring->rx_hdr_len);
-			memcpy(__skb_put(skb, len), va, len);
-			copysize = len;
-			rx_packet_len -= len;
-		}
-		/* Get the rest of the data if this was a header split */
-		if (rx_packet_len) {
-			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
-					rx_bi->page,
-					rx_bi->page_offset + copysize,
-					rx_packet_len, I40E_RXBUFFER_2048);
-
-			/* If the page count is more than 2, then both halves
-			 * of the page are used and we need to free it. Do it
-			 * here instead of in the alloc code. Otherwise one
-			 * of the half-pages might be released between now and
-			 * then, and we wouldn't know which one to use.
-			 * Don't call get_page and free_page since those are
-			 * both expensive atomic operations that just change
-			 * the refcount in opposite directions. Just give the
-			 * page to the stack; he can have our refcount.
-			 */
-			if (page_count(rx_bi->page) > 2) {
-				dma_unmap_page(rx_ring->dev,
-					       rx_bi->page_dma,
-					       PAGE_SIZE,
-					       DMA_FROM_DEVICE);
-				rx_bi->page = NULL;
-				rx_bi->page_dma = 0;
-				rx_ring->rx_stats.realloc_count++;
-			} else {
-				get_page(rx_bi->page);
-				/* switch to the other half-page here; the
-				 * allocation code programs the right addr
-				 * into HW. If we haven't used this half-page,
-				 * the address won't be changed, and HW can
-				 * just use it next time through.
-				 */
-				rx_bi->page_offset ^= PAGE_SIZE / 2;
-			}
-
-		}
-		I40E_RX_INCREMENT(rx_ring, i);
-
-		if (unlikely(
-		    !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
-			struct i40e_rx_buffer *next_buffer;
-
-			next_buffer = &rx_ring->rx_bi[i];
-			next_buffer->skb = skb;
-			rx_ring->rx_stats.non_eop_descs++;
-			continue;
-		}
-
-		/* ERR_MASK will only have valid bits if EOP set */
-		if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
-			dev_kfree_skb_any(skb);
-			continue;
-		}
-
-		i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
-
-		if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
-			i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
-					   I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
-					   I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
-			rx_ring->last_rx_timestamp = jiffies;
-		}
-
-		/* probably a little skewed due to removing CRC */
-		total_rx_bytes += skb->len;
-		total_rx_packets++;
-
-		skb->protocol = eth_type_trans(skb, rx_ring->netdev);
-
-		i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
-
-		vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
-			 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
-			 : 0;
-#ifdef I40E_FCOE
-		if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
-			dev_kfree_skb_any(skb);
-			continue;
-		}
-#endif
-		i40e_receive_skb(rx_ring, skb, vlan_tag);
-
-		rx_desc->wb.qword1.status_error_len = 0;
-
-	} while (likely(total_rx_packets < budget));
-
-	u64_stats_update_begin(&rx_ring->syncp);
-	rx_ring->stats.packets += total_rx_packets;
-	rx_ring->stats.bytes += total_rx_bytes;
-	u64_stats_update_end(&rx_ring->syncp);
-	rx_ring->q_vector->rx.total_packets += total_rx_packets;
-	rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
-
-	return failure ? budget : total_rx_packets;
+	skb_record_rx_queue(skb, rx_ring->queue_index);
 }
 
 /**
- * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
- * @rx_ring:  rx ring to clean
- * @budget:   how many cleans we're allowed
+ * i40e_pull_tail - i40e specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being adjusted
  *
- * Returns number of packets cleaned
+ * This function is an i40e specific version of __pskb_pull_tail.  The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+	unsigned char *va;
+	unsigned int pull_len;
+
+	/* it is valid to use page_address instead of kmap since we are
+	 * working with pages allocated out of the lomem pool per
+	 * alloc_page(GFP_ATOMIC)
+	 */
+	va = skb_frag_address(frag);
+
+	/* we need the header to contain the greater of either ETH_HLEN or
+	 * 60 bytes if the skb->len is less than 60 for skb_pad.
+	 */
+	pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
+
+	/* align pull length to size of long to optimize memcpy performance */
+	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+	/* update all of the pointers */
+	skb_frag_size_sub(frag, pull_len);
+	frag->page_offset += pull_len;
+	skb->data_len -= pull_len;
+	skb->tail += pull_len;
+}
+
+/**
+ * i40e_cleanup_headers - Correct empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being fixed
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
  **/
-static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
+static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+	/* place header in linear portion of buffer */
+	if (skb_is_nonlinear(skb))
+		i40e_pull_tail(rx_ring, skb);
+
+	/* if eth_skb_pad returns an error the skb was freed */
+	if (eth_skb_pad(skb))
+		return true;
+
+	return false;
+}
+
+/**
+ * i40e_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
+			       struct i40e_rx_buffer *old_buff)
+{
+	struct i40e_rx_buffer *new_buff;
+	u16 nta = rx_ring->next_to_alloc;
+
+	new_buff = &rx_ring->rx_bi[nta];
+
+	/* update, and store next to alloc */
+	nta++;
+	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+	/* transfer page from old buffer to new buffer */
+	*new_buff = *old_buff;
+}
+
+/**
+ * i40e_page_is_reserved - check if reuse is possible
+ * @page: page struct to check
+ */
+static inline bool i40e_page_is_reserved(struct page *page)
+{
+	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+}
+
+/**
+ * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ **/
+static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
+			     struct i40e_rx_buffer *rx_buffer,
+			     union i40e_rx_desc *rx_desc,
+			     struct sk_buff *skb)
+{
+	struct page *page = rx_buffer->page;
+	u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+	unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+			    I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+#if (PAGE_SIZE < 8192)
+	unsigned int truesize = I40E_RXBUFFER_2048;
+#else
+	unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+	unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
+#endif
+
+	/* will the data fit in the skb we allocated? if so, just
+	 * copy it as it is pretty small anyway
+	 */
+	if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
+		unsigned char *va = page_address(page) + rx_buffer->page_offset;
+
+		memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+
+		/* page is not reserved, we can reuse buffer as-is */
+		if (likely(!i40e_page_is_reserved(page)))
+			return true;
+
+		/* this page cannot be reused so discard it */
+		__free_pages(page, 0);
+		return false;
+	}
+
+	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+			rx_buffer->page_offset, size, truesize);
+
+	/* avoid re-using remote pages */
+	if (unlikely(i40e_page_is_reserved(page)))
+		return false;
+
+#if (PAGE_SIZE < 8192)
+	/* if we are only owner of page we can reuse it */
+	if (unlikely(page_count(page) != 1))
+		return false;
+
+	/* flip page offset to other buffer */
+	rx_buffer->page_offset ^= truesize;
+#else
+	/* move offset up to the next cache line */
+	rx_buffer->page_offset += truesize;
+
+	if (rx_buffer->page_offset > last_offset)
+		return false;
+#endif
+
+	/* Even if we own the page, we are not allowed to use atomic_set()
+	 * This would break get_page_unless_zero() users.
+	 */
+	get_page(rx_buffer->page);
+
+	return true;
+}
+
+/**
+ * i40e_fetch_rx_buffer - Allocate skb and populate it
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_desc: descriptor containing info written by hardware
+ *
+ * This function allocates an skb on the fly, and populates it with the page
+ * data from the current receive descriptor, taking care to set up the skb
+ * correctly, as well as handling calling the page recycle function if
+ * necessary.
+ */
+static inline
+struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
+				     union i40e_rx_desc *rx_desc)
+{
+	struct i40e_rx_buffer *rx_buffer;
+	struct sk_buff *skb;
+	struct page *page;
+
+	rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
+	page = rx_buffer->page;
+	prefetchw(page);
+
+	skb = rx_buffer->skb;
+
+	if (likely(!skb)) {
+		void *page_addr = page_address(page) + rx_buffer->page_offset;
+
+		/* prefetch first cache line of first page */
+		prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+		prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+		/* allocate a skb to store the frags */
+		skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+				       I40E_RX_HDR_SIZE,
+				       GFP_ATOMIC | __GFP_NOWARN);
+		if (unlikely(!skb)) {
+			rx_ring->rx_stats.alloc_buff_failed++;
+			return NULL;
+		}
+
+		/* we will be copying header into skb->data in
+		 * pskb_may_pull so it is in our interest to prefetch
+		 * it now to avoid a possible cache miss
+		 */
+		prefetchw(skb->data);
+	} else {
+		rx_buffer->skb = NULL;
+	}
+
+	/* we are reusing so sync this buffer for CPU use */
+	dma_sync_single_range_for_cpu(rx_ring->dev,
+				      rx_buffer->dma,
+				      rx_buffer->page_offset,
+				      I40E_RXBUFFER_2048,
+				      DMA_FROM_DEVICE);
+
+	/* pull page into skb */
+	if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+		/* hand second half of page back to the ring */
+		i40e_reuse_rx_page(rx_ring, rx_buffer);
+		rx_ring->rx_stats.page_reuse_count++;
+	} else {
+		/* we are not reusing the buffer so unmap it */
+		dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
+			       DMA_FROM_DEVICE);
+	}
+
+	/* clear contents of buffer_info */
+	rx_buffer->page = NULL;
+
+	return skb;
+}
+
+/**
+ * i40e_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean.  If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
+			    union i40e_rx_desc *rx_desc,
+			    struct sk_buff *skb)
+{
+	u32 ntc = rx_ring->next_to_clean + 1;
+
+	/* fetch, update, and store next to clean */
+	ntc = (ntc < rx_ring->count) ? ntc : 0;
+	rx_ring->next_to_clean = ntc;
+
+	prefetch(I40E_RX_DESC(rx_ring, ntc));
+
+#define staterrlen rx_desc->wb.qword1.status_error_len
+	if (unlikely(i40e_rx_is_programming_status(le64_to_cpu(staterrlen)))) {
+		i40e_clean_programming_status(rx_ring, rx_desc);
+		rx_ring->rx_bi[ntc].skb = skb;
+		return true;
+	}
+	/* if we are the last buffer then there is nothing else to do */
+#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
+	if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
+		return false;
+
+	/* place skb in next buffer to be received */
+	rx_ring->rx_bi[ntc].skb = skb;
+	rx_ring->rx_stats.non_eop_descs++;
+
+	return true;
+}
+
+/**
+ * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing.  The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed
+ **/
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
 {
 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
-	struct i40e_vsi *vsi = rx_ring->vsi;
-	union i40e_rx_desc *rx_desc;
-	u32 rx_error, rx_status;
-	u16 rx_packet_len;
 	bool failure = false;
-	u8 rx_ptype;
-	u64 qword;
-	u16 i;
 
-	do {
-		struct i40e_rx_buffer *rx_bi;
+	while (likely(total_rx_packets < budget)) {
+		union i40e_rx_desc *rx_desc;
 		struct sk_buff *skb;
+		u32 rx_status;
 		u16 vlan_tag;
+		u8 rx_ptype;
+		u64 qword;
+
 		/* return some buffers to hardware, one at a time is too slow */
 		if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
 			failure = failure ||
-				  i40e_alloc_rx_buffers_1buf(rx_ring,
-							     cleaned_count);
+				  i40e_alloc_rx_buffers(rx_ring, cleaned_count);
 			cleaned_count = 0;
 		}
 
-		i = rx_ring->next_to_clean;
-		rx_desc = I40E_RX_DESC(rx_ring, i);
+		rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
 		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+			   I40E_RXD_QW1_PTYPE_SHIFT;
 		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
-			I40E_RXD_QW1_STATUS_SHIFT;
+			    I40E_RXD_QW1_STATUS_SHIFT;
 
 		if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
 			break;
 
+		/* status_error_len will always be zero for unused descriptors
+		 * because it's cleared in cleanup, and overlaps with hdr_addr
+		 * which is always zero because packet split isn't used, if the
+		 * hardware wrote DD then it will be non-zero
+		 */
+		if (!rx_desc->wb.qword1.status_error_len)
+			break;
+
 		/* This memory barrier is needed to keep us from reading
 		 * any other fields out of the rx_desc until we know the
 		 * DD bit is set.
 		 */
 		dma_rmb();
 
-		if (i40e_rx_is_programming_status(qword)) {
-			i40e_clean_programming_status(rx_ring, rx_desc);
-			I40E_RX_INCREMENT(rx_ring, i);
-			continue;
-		}
-		rx_bi = &rx_ring->rx_bi[i];
-		skb = rx_bi->skb;
-		prefetch(skb->data);
+		skb = i40e_fetch_rx_buffer(rx_ring, rx_desc);
+		if (!skb)
+			break;
 
-		rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
-				I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
-
-		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
-			   I40E_RXD_QW1_ERROR_SHIFT;
-		rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
-
-		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
-			   I40E_RXD_QW1_PTYPE_SHIFT;
-		rx_bi->skb = NULL;
 		cleaned_count++;
 
-		/* Get the header and possibly the whole packet
-		 * If this is an skb from previous receive dma will be 0
-		 */
-		skb_put(skb, rx_packet_len);
-		dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
-				 DMA_FROM_DEVICE);
-		rx_bi->dma = 0;
-
-		I40E_RX_INCREMENT(rx_ring, i);
-
-		if (unlikely(
-		    !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
-			rx_ring->rx_stats.non_eop_descs++;
+		if (i40e_is_non_eop(rx_ring, rx_desc, skb))
 			continue;
-		}
 
-		/* ERR_MASK will only have valid bits if EOP set */
-		if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+		/* ERR_MASK will only have valid bits if EOP set, and
+		 * what we are doing here is actually checking
+		 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
+		 * the error field
+		 */
+		if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
 			dev_kfree_skb_any(skb);
 			continue;
 		}
 
-		i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
-		if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
-			i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
-					   I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
-					   I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
-			rx_ring->last_rx_timestamp = jiffies;
-		}
+		if (i40e_cleanup_headers(rx_ring, skb))
+			continue;
 
 		/* probably a little skewed due to removing CRC */
 		total_rx_bytes += skb->len;
-		total_rx_packets++;
 
-		skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+		/* populate checksum, VLAN, and protocol */
+		i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
 
-		i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
-
-		vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
-			 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
-			 : 0;
 #ifdef I40E_FCOE
-		if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
+		if (unlikely(
+		    i40e_rx_is_fcoe(rx_ptype) &&
+		    !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) {
 			dev_kfree_skb_any(skb);
 			continue;
 		}
 #endif
+
+		vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
+			   le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
+
 		i40e_receive_skb(rx_ring, skb, vlan_tag);
 
-		rx_desc->wb.qword1.status_error_len = 0;
-	} while (likely(total_rx_packets < budget));
+		/* update budget accounting */
+		total_rx_packets++;
+	}
 
 	u64_stats_update_begin(&rx_ring->syncp);
 	rx_ring->stats.packets += total_rx_packets;
@@ -1849,6 +1842,7 @@
 	rx_ring->q_vector->rx.total_packets += total_rx_packets;
 	rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
 
+	/* guarantee a trip back through this routine if there was a failure */
 	return failure ? budget : total_rx_packets;
 }
 
@@ -1975,9 +1969,11 @@
 	 * budget and be more aggressive about cleaning up the Tx descriptors.
 	 */
 	i40e_for_each_ring(ring, q_vector->tx) {
-		clean_complete = clean_complete &&
-				 i40e_clean_tx_irq(ring, vsi->work_limit);
-		arm_wb = arm_wb || ring->arm_wb;
+		if (!i40e_clean_tx_irq(vsi, ring, budget)) {
+			clean_complete = false;
+			continue;
+		}
+		arm_wb |= ring->arm_wb;
 		ring->arm_wb = false;
 	}
 
@@ -1991,16 +1987,12 @@
 	budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
 
 	i40e_for_each_ring(ring, q_vector->rx) {
-		int cleaned;
-
-		if (ring_is_ps_enabled(ring))
-			cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
-		else
-			cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+		int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
 
 		work_done += cleaned;
-		/* if we didn't clean as many as budgeted, we must be done */
-		clean_complete = clean_complete && (budget_per_ring > cleaned);
+		/* if we clean as many as budgeted, we must not be done */
+		if (cleaned >= budget_per_ring)
+			clean_complete = false;
 	}
 
 	/* If work not completed, return budget and polling will return */
@@ -2247,15 +2239,13 @@
 
 /**
  * i40e_tso - set up the tso context descriptor
- * @tx_ring:  ptr to the ring to send
  * @skb:      ptr to the skb we're sending
  * @hdr_len:  ptr to the size of the packet header
  * @cd_type_cmd_tso_mss: Quad Word 1
  *
  * Returns 0 if no TSO can happen, 1 if tso is going, or error
  **/
-static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
-		    u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
+static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
 {
 	u64 cd_cmd, cd_tso_len, cd_mss;
 	union {
@@ -2292,16 +2282,22 @@
 		ip.v6->payload_len = 0;
 	}
 
-	if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE |
+	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+					 SKB_GSO_GRE_CSUM |
+					 SKB_GSO_IPIP |
+					 SKB_GSO_SIT |
+					 SKB_GSO_UDP_TUNNEL |
 					 SKB_GSO_UDP_TUNNEL_CSUM)) {
-		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
+			l4.udp->len = 0;
+
 			/* determine offset of outer transport header */
 			l4_offset = l4.hdr - skb->data;
 
 			/* remove payload length from outer checksum */
-			paylen = (__force u16)l4.udp->check;
-			paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
-			l4.udp->check = ~csum_fold((__force __wsum)paylen);
+			paylen = skb->len - l4_offset;
+			csum_replace_by_diff(&l4.udp->check, htonl(paylen));
 		}
 
 		/* reset pointers to inner headers */
@@ -2321,9 +2317,8 @@
 	l4_offset = l4.hdr - skb->data;
 
 	/* remove payload length from inner checksum */
-	paylen = (__force u16)l4.tcp->check;
-	paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
-	l4.tcp->check = ~csum_fold((__force __wsum)paylen);
+	paylen = skb->len - l4_offset;
+	csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
 
 	/* compute length of segmentation header */
 	*hdr_len = (l4.tcp->doff * 4) + l4_offset;
@@ -2405,7 +2400,7 @@
 		unsigned char *hdr;
 	} l4;
 	unsigned char *exthdr;
-	u32 offset, cmd = 0, tunnel = 0;
+	u32 offset, cmd = 0;
 	__be16 frag_off;
 	u8 l4_proto = 0;
 
@@ -2419,6 +2414,7 @@
 	offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
 
 	if (skb->encapsulation) {
+		u32 tunnel = 0;
 		/* define outer network header type */
 		if (*tx_flags & I40E_TX_FLAGS_IPV4) {
 			tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
@@ -2436,13 +2432,6 @@
 						 &l4_proto, &frag_off);
 		}
 
-		/* compute outer L3 header size */
-		tunnel |= ((l4.hdr - ip.hdr) / 4) <<
-			  I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
-
-		/* switch IP header pointer from outer to inner header */
-		ip.hdr = skb_inner_network_header(skb);
-
 		/* define outer transport */
 		switch (l4_proto) {
 		case IPPROTO_UDP:
@@ -2453,6 +2442,11 @@
 			tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
 			*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
 			break;
+		case IPPROTO_IPIP:
+		case IPPROTO_IPV6:
+			*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
+			l4.hdr = skb_inner_network_header(skb);
+			break;
 		default:
 			if (*tx_flags & I40E_TX_FLAGS_TSO)
 				return -1;
@@ -2461,12 +2455,20 @@
 			return 0;
 		}
 
+		/* compute outer L3 header size */
+		tunnel |= ((l4.hdr - ip.hdr) / 4) <<
+			  I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
+
+		/* switch IP header pointer from outer to inner header */
+		ip.hdr = skb_inner_network_header(skb);
+
 		/* compute tunnel header size */
 		tunnel |= ((ip.hdr - l4.hdr) / 2) <<
 			  I40E_TXD_CTX_QW0_NATLEN_SHIFT;
 
 		/* indicate if we need to offload outer UDP header */
 		if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
+		    !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
 			tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
 
@@ -2594,35 +2596,34 @@
 }
 
 /**
- * __i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
  * @skb:      send buffer
  *
- * Note: Our HW can't scatter-gather more than 8 fragments to build
- * a packet on the wire and so we need to figure out the cases where we
- * need to linearize the skb.
+ * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
+ * and so we need to figure out the cases where we need to linearize the skb.
+ *
+ * For TSO we need to count the TSO header and segment payload separately.
+ * As such we need to check cases where we have 7 fragments or more as we
+ * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
+ * the segment payload in the first descriptor, and another 7 for the
+ * fragments.
  **/
 bool __i40e_chk_linearize(struct sk_buff *skb)
 {
 	const struct skb_frag_struct *frag, *stale;
-	int gso_size, nr_frags, sum;
+	int nr_frags, sum;
 
-	/* check to see if TSO is enabled, if so we may get a repreive */
-	gso_size = skb_shinfo(skb)->gso_size;
-	if (unlikely(!gso_size))
-		return true;
-
-	/* no need to check if number of frags is less than 8 */
+	/* no need to check if number of frags is less than 7 */
 	nr_frags = skb_shinfo(skb)->nr_frags;
-	if (nr_frags < I40E_MAX_BUFFER_TXD)
+	if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
 		return false;
 
 	/* We need to walk through the list and validate that each group
 	 * of 6 fragments totals at least gso_size.  However we don't need
-	 * to perform such validation on the first or last 6 since the first
-	 * 6 cannot inherit any data from a descriptor before them, and the
-	 * last 6 cannot inherit any data from a descriptor after them.
+	 * to perform such validation on the last 6 since the last 6 cannot
+	 * inherit any data from a descriptor after them.
 	 */
-	nr_frags -= I40E_MAX_BUFFER_TXD - 1;
+	nr_frags -= I40E_MAX_BUFFER_TXD - 2;
 	frag = &skb_shinfo(skb)->frags[0];
 
 	/* Initialize size to the negative value of gso_size minus 1.  We
@@ -2631,21 +2632,21 @@
 	 * descriptors for a single transmit as the header and previous
 	 * fragment are already consuming 2 descriptors.
 	 */
-	sum = 1 - gso_size;
+	sum = 1 - skb_shinfo(skb)->gso_size;
 
-	/* Add size of frags 1 through 5 to create our initial sum */
-	sum += skb_frag_size(++frag);
-	sum += skb_frag_size(++frag);
-	sum += skb_frag_size(++frag);
-	sum += skb_frag_size(++frag);
-	sum += skb_frag_size(++frag);
+	/* Add size of frags 0 through 4 to create our initial sum */
+	sum += skb_frag_size(frag++);
+	sum += skb_frag_size(frag++);
+	sum += skb_frag_size(frag++);
+	sum += skb_frag_size(frag++);
+	sum += skb_frag_size(frag++);
 
 	/* Walk through fragments adding latest fragment, testing it, and
 	 * then removing stale fragments from the sum.
 	 */
 	stale = &skb_shinfo(skb)->frags[0];
 	for (;;) {
-		sum += skb_frag_size(++frag);
+		sum += skb_frag_size(frag++);
 
 		/* if sum is negative we failed to make sufficient progress */
 		if (sum < 0)
@@ -2655,7 +2656,7 @@
 		if (!--nr_frags)
 			break;
 
-		sum -= skb_frag_size(++stale);
+		sum -= skb_frag_size(stale++);
 	}
 
 	return false;
@@ -2717,6 +2718,8 @@
 	tx_bi = first;
 
 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+		unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
+
 		if (dma_mapping_error(tx_ring->dev, dma))
 			goto dma_error;
 
@@ -2724,12 +2727,14 @@
 		dma_unmap_len_set(tx_bi, len, size);
 		dma_unmap_addr_set(tx_bi, dma, dma);
 
+		/* align size to end of page */
+		max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
 		tx_desc->buffer_addr = cpu_to_le64(dma);
 
 		while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
 			tx_desc->cmd_type_offset_bsz =
 				build_ctob(td_cmd, td_offset,
-					   I40E_MAX_DATA_PER_TXD, td_tag);
+					   max_data, td_tag);
 
 			tx_desc++;
 			i++;
@@ -2740,9 +2745,10 @@
 				i = 0;
 			}
 
-			dma += I40E_MAX_DATA_PER_TXD;
-			size -= I40E_MAX_DATA_PER_TXD;
+			dma += max_data;
+			size -= max_data;
 
+			max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
 			tx_desc->buffer_addr = cpu_to_le64(dma);
 		}
 
@@ -2892,7 +2898,7 @@
 	if (i40e_chk_linearize(skb, count)) {
 		if (__skb_linearize(skb))
 			goto out_drop;
-		count = TXD_USE_COUNT(skb->len);
+		count = i40e_txd_use_count(skb->len);
 		tx_ring->tx_stats.tx_linearize++;
 	}
 
@@ -2923,7 +2929,7 @@
 	else if (protocol == htons(ETH_P_IPV6))
 		tx_flags |= I40E_TX_FLAGS_IPV6;
 
-	tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss);
+	tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss);
 
 	if (tso < 0)
 		goto out_drop;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index cdd5dc0..b78c810 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -102,8 +102,8 @@
 	(((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
 	  I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
 
-/* Supported Rx Buffer Sizes */
-#define I40E_RXBUFFER_512   512    /* Used for packet split */
+/* Supported Rx Buffer Sizes (a multiple of 128) */
+#define I40E_RXBUFFER_256   256
 #define I40E_RXBUFFER_2048  2048
 #define I40E_RXBUFFER_3072  3072   /* For FCoE MTU of 2158 */
 #define I40E_RXBUFFER_4096  4096
@@ -114,9 +114,28 @@
  * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
  * this adds up to 512 bytes of extra data meaning the smallest allocation
  * we could have is 1K.
- * i.e. RXBUFFER_512 --> size-1024 slab
+ * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
+ * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
  */
-#define I40E_RX_HDR_SIZE  I40E_RXBUFFER_512
+#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+/**
+ * i40e_test_staterr - tests bits in Rx descriptor status and error fields
+ * @rx_desc: pointer to receive descriptor (in le64 format)
+ * @stat_err_bits: value to mask
+ *
+ * This function does some fast chicanery in order to return the
+ * value of the mask which is really only used for boolean tests.
+ * The status_error_len doesn't need to be shifted because it begins
+ * at offset zero.
+ */
+static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
+				     const u64 stat_err_bits)
+{
+	return !!(rx_desc->wb.qword1.status_error_len &
+		  cpu_to_le64(stat_err_bits));
+}
 
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
 #define I40E_RX_BUFFER_WRITE	16	/* Must be power of 2 */
@@ -142,14 +161,41 @@
 		prefetch((n));				\
 	} while (0)
 
-#define i40e_rx_desc i40e_32byte_rx_desc
-
 #define I40E_MAX_BUFFER_TXD	8
 #define I40E_MIN_TX_LEN		17
-#define I40E_MAX_DATA_PER_TXD	8192
+
+/* The size limit for a transmit buffer in a descriptor is (16K - 1).
+ * In order to align with the read requests we will align the value to
+ * the nearest 4K which represents our maximum read request size.
+ */
+#define I40E_MAX_READ_REQ_SIZE		4096
+#define I40E_MAX_DATA_PER_TXD		(16 * 1024 - 1)
+#define I40E_MAX_DATA_PER_TXD_ALIGNED \
+	(I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
+
+/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is
+ * the value I40E_MAX_DATA_PER_TXD_ALIGNED.  It is needed due to the fact
+ * that 12K is not a power of 2 and division is expensive.  It is used to
+ * approximate the number of descriptors used per linear buffer.  Note
+ * that this will overestimate in some cases as it doesn't account for the
+ * fact that we will add up to 4K - 1 in aligning the 12K buffer, however
+ * the error should not impact things much as large buffers usually mean
+ * we will use fewer descriptors then there are frags in an skb.
+ */
+static inline unsigned int i40e_txd_use_count(unsigned int size)
+{
+	const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED;
+	const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max;
+	unsigned int adjust = ~(u32)0;
+
+	/* if we rounded up on the reciprocal pull down the adjustment */
+	if ((max * reciprocal) > adjust)
+		adjust = ~(u32)(reciprocal - 1);
+
+	return (u32)((((u64)size * reciprocal) + adjust) >> 32);
+}
 
 /* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
 #define I40E_MIN_DESC_PENDING	4
 
@@ -184,10 +230,8 @@
 
 struct i40e_rx_buffer {
 	struct sk_buff *skb;
-	void *hdr_buf;
 	dma_addr_t dma;
 	struct page *page;
-	dma_addr_t page_dma;
 	unsigned int page_offset;
 };
 
@@ -216,22 +260,18 @@
 enum i40e_ring_state_t {
 	__I40E_TX_FDIR_INIT_DONE,
 	__I40E_TX_XPS_INIT_DONE,
-	__I40E_RX_PS_ENABLED,
-	__I40E_RX_16BYTE_DESC_ENABLED,
 };
 
-#define ring_is_ps_enabled(ring) \
-	test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define set_ring_ps_enabled(ring) \
-	set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define clear_ring_ps_enabled(ring) \
-	clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define ring_is_16byte_desc_enabled(ring) \
-	test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
-#define set_ring_16byte_desc_enabled(ring) \
-	set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
-#define clear_ring_16byte_desc_enabled(ring) \
-	clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+/* some useful defines for virtchannel interface, which
+ * is the only remaining user of header split
+ */
+#define I40E_RX_DTYPE_NO_SPLIT      0
+#define I40E_RX_DTYPE_HEADER_SPLIT  1
+#define I40E_RX_DTYPE_SPLIT_ALWAYS  2
+#define I40E_RX_SPLIT_L2      0x1
+#define I40E_RX_SPLIT_IP      0x2
+#define I40E_RX_SPLIT_TCP_UDP 0x4
+#define I40E_RX_SPLIT_SCTP    0x8
 
 /* struct that defines a descriptor ring, associated with a VSI */
 struct i40e_ring {
@@ -258,16 +298,7 @@
 
 	u16 count;			/* Number of descriptors */
 	u16 reg_idx;			/* HW register index of the ring */
-	u16 rx_hdr_len;
 	u16 rx_buf_len;
-	u8  dtype;
-#define I40E_RX_DTYPE_NO_SPLIT      0
-#define I40E_RX_DTYPE_HEADER_SPLIT  1
-#define I40E_RX_DTYPE_SPLIT_ALWAYS  2
-#define I40E_RX_SPLIT_L2      0x1
-#define I40E_RX_SPLIT_IP      0x2
-#define I40E_RX_SPLIT_TCP_UDP 0x4
-#define I40E_RX_SPLIT_SCTP    0x8
 
 	/* used in interrupt processing */
 	u16 next_to_use;
@@ -301,6 +332,7 @@
 	struct i40e_q_vector *q_vector;	/* Backreference to associated vector */
 
 	struct rcu_head rcu;		/* to avoid race on free */
+	u16 next_to_alloc;
 } ____cacheline_internodealigned_in_smp;
 
 enum i40e_latency_range {
@@ -324,9 +356,7 @@
 #define i40e_for_each_ring(pos, head) \
 	for (pos = (head).ring; pos != NULL; pos = pos->next)
 
-bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
-bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
-void i40e_alloc_rx_headers(struct i40e_ring *rxr);
+bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
 void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
 void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
@@ -377,7 +407,7 @@
 	int count = 0, size = skb_headlen(skb);
 
 	for (;;) {
-		count += TXD_USE_COUNT(size);
+		count += i40e_txd_use_count(size);
 
 		if (!nr_frags--)
 			break;
@@ -413,10 +443,24 @@
  **/
 static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
 {
-	/* we can only support up to 8 data buffers for a single send */
-	if (likely(count <= I40E_MAX_BUFFER_TXD))
+	/* Both TSO and single send will work if count is less than 8 */
+	if (likely(count < I40E_MAX_BUFFER_TXD))
 		return false;
 
-	return __i40e_chk_linearize(skb);
+	if (skb_is_gso(skb))
+		return __i40e_chk_linearize(skb);
+
+	/* we can support up to 8 data buffers for a single send */
+	return count != I40E_MAX_BUFFER_TXD;
+}
+
+/**
+ * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE
+ * @ptype: the packet type field from Rx descriptor write-back
+ **/
+static inline bool i40e_rx_is_fcoe(u16 ptype)
+{
+	return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
+	       (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
 }
 #endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 3335f9d..bd5f13b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -36,7 +36,7 @@
 #include "i40e_devids.h"
 
 /* I40E_MASK is a macro used on 32 bit registers */
-#define I40E_MASK(mask, shift) (mask << shift)
+#define I40E_MASK(mask, shift) ((u32)(mask) << (shift))
 
 #define I40E_MAX_VSI_QP			16
 #define I40E_MAX_VF_VSI			3
@@ -275,6 +275,11 @@
 #define I40E_FLEX10_STATUS_DCC_ERROR	0x1
 #define I40E_FLEX10_STATUS_VC_MODE	0x2
 
+	bool sec_rev_disabled;
+	bool update_disabled;
+#define I40E_NVM_MGMT_SEC_REV_DISABLED	0x1
+#define I40E_NVM_MGMT_UPDATE_DISABLED	0x2
+
 	bool mgmt_cem;
 	bool ieee_1588;
 	bool iwarp;
@@ -549,6 +554,8 @@
 	enum i40e_nvmupd_state nvmupd_state;
 	struct i40e_aq_desc nvm_wb_desc;
 	struct i40e_virt_mem nvm_buff;
+	bool nvm_release_on_done;
+	u16 nvm_wait_opcode;
 
 	/* HMC info */
 	struct i40e_hmc_info hmc; /* HMC info struct */
@@ -1533,4 +1540,37 @@
 
 /* RSS Hash Table Size */
 #define I40E_PFQF_CTL_0_HASHLUTSIZE_512	0x00010000
+
+/* INPUT SET MASK for RSS, flow director, and flexible payload */
+#define I40E_L3_SRC_SHIFT		47
+#define I40E_L3_SRC_MASK		(0x3ULL << I40E_L3_SRC_SHIFT)
+#define I40E_L3_V6_SRC_SHIFT		43
+#define I40E_L3_V6_SRC_MASK		(0xFFULL << I40E_L3_V6_SRC_SHIFT)
+#define I40E_L3_DST_SHIFT		35
+#define I40E_L3_DST_MASK		(0x3ULL << I40E_L3_DST_SHIFT)
+#define I40E_L3_V6_DST_SHIFT		35
+#define I40E_L3_V6_DST_MASK		(0xFFULL << I40E_L3_V6_DST_SHIFT)
+#define I40E_L4_SRC_SHIFT		34
+#define I40E_L4_SRC_MASK		(0x1ULL << I40E_L4_SRC_SHIFT)
+#define I40E_L4_DST_SHIFT		33
+#define I40E_L4_DST_MASK		(0x1ULL << I40E_L4_DST_SHIFT)
+#define I40E_VERIFY_TAG_SHIFT		31
+#define I40E_VERIFY_TAG_MASK		(0x3ULL << I40E_VERIFY_TAG_SHIFT)
+
+#define I40E_FLEX_50_SHIFT		13
+#define I40E_FLEX_50_MASK		(0x1ULL << I40E_FLEX_50_SHIFT)
+#define I40E_FLEX_51_SHIFT		12
+#define I40E_FLEX_51_MASK		(0x1ULL << I40E_FLEX_51_SHIFT)
+#define I40E_FLEX_52_SHIFT		11
+#define I40E_FLEX_52_MASK		(0x1ULL << I40E_FLEX_52_SHIFT)
+#define I40E_FLEX_53_SHIFT		10
+#define I40E_FLEX_53_MASK		(0x1ULL << I40E_FLEX_53_SHIFT)
+#define I40E_FLEX_54_SHIFT		9
+#define I40E_FLEX_54_MASK		(0x1ULL << I40E_FLEX_54_SHIFT)
+#define I40E_FLEX_55_SHIFT		8
+#define I40E_FLEX_55_MASK		(0x1ULL << I40E_FLEX_55_SHIFT)
+#define I40E_FLEX_56_SHIFT		7
+#define I40E_FLEX_56_MASK		(0x1ULL << I40E_FLEX_56_SHIFT)
+#define I40E_FLEX_57_SHIFT		6
+#define I40E_FLEX_57_MASK		(0x1ULL << I40E_FLEX_57_SHIFT)
 #endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index ab866cf..c92a3bd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -80,10 +80,15 @@
 	I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
 	I40E_VIRTCHNL_OP_GET_STATS = 15,
 	I40E_VIRTCHNL_OP_FCOE = 16,
-	I40E_VIRTCHNL_OP_EVENT = 17,
+	I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
 	I40E_VIRTCHNL_OP_IWARP = 20,
 	I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21,
 	I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22,
+	I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+	I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+	I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+	I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
+
 };
 
 /* Virtual channel message descriptor. This overlays the admin queue
@@ -157,6 +162,7 @@
 #define I40E_VIRTCHNL_VF_OFFLOAD_VLAN		0x00010000
 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING	0x00020000
 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2	0x00040000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF		0X00080000
 
 struct i40e_virtchnl_vf_resource {
 	u16 num_vsis;
@@ -165,8 +171,8 @@
 	u16 max_mtu;
 
 	u32 vf_offload_flags;
-	u32 max_fcoe_contexts;
-	u32 max_fcoe_filters;
+	u32 rss_key_size;
+	u32 rss_lut_size;
 
 	struct i40e_virtchnl_vsi_resource vsi_res[1];
 };
@@ -325,6 +331,39 @@
  * PF replies with struct i40e_eth_stats in an external buffer.
  */
 
+/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
+ * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the RSS fields in
+ * the VF resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct i40e_virtchnl_rss_key {
+	u16 vsi_id;
+	u16 key_len;
+	u8 key[1];         /* RSS hash key, packed bytes */
+};
+
+struct i40e_virtchnl_rss_lut {
+	u16 vsi_id;
+	u16 lut_entries;
+	u8 lut[1];        /* RSS lookup table*/
+};
+
+/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * I40E_VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
+ */
+struct i40e_virtchnl_rss_hena {
+	u64 hena;
+};
+
 /* I40E_VIRTCHNL_OP_EVENT
  * PF sends this message to inform the VF driver of events that may affect it.
  * No direct response is expected from the VF, though it may generate other
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 816c6bb..1fcafcf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -48,7 +48,7 @@
 	int i;
 
 	for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
-		int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+		int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
 		/* Not all vfs are enabled so skip the ones that are not */
 		if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
 		    !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
@@ -63,7 +63,7 @@
 }
 
 /**
- * i40e_vc_notify_link_state
+ * i40e_vc_notify_vf_link_state
  * @vf: pointer to the VF structure
  *
  * send a link status message to a single VF
@@ -74,7 +74,7 @@
 	struct i40e_pf *pf = vf->pf;
 	struct i40e_hw *hw = &pf->hw;
 	struct i40e_link_status *ls = &pf->hw.phy.link_info;
-	int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+	int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
 
 	pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
 	pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
@@ -141,7 +141,7 @@
 	    !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
 		return;
 
-	abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
+	abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
 
 	pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
 	pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
@@ -590,7 +590,7 @@
 		}
 		rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
 
-		/* set splitalways mode 10b */
+		/* set split mode 10b */
 		rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
 	}
 
@@ -665,8 +665,6 @@
 		goto error_alloc_vsi_res;
 	}
 	if (type == I40E_VSI_SRIOV) {
-		u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
 		vf->lan_vsi_idx = vsi->idx;
 		vf->lan_vsi_id = vsi->id;
 		/* If the port VLAN has been configured and then the
@@ -688,12 +686,6 @@
 					 "Could not add MAC filter %pM for VF %d\n",
 					vf->default_lan_addr.addr, vf->vf_id);
 		}
-		f = i40e_add_filter(vsi, brdcast,
-				    vf->port_vlan_id ? vf->port_vlan_id : -1,
-				    true, false);
-		if (!f)
-			dev_info(&pf->pdev->dev,
-				 "Could not allocate VF broadcast filter\n");
 		spin_unlock_bh(&vsi->mac_filter_list_lock);
 	}
 
@@ -860,7 +852,11 @@
 	if (ret)
 		goto error_alloc;
 	total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
-	set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+
+	if (vf->trusted)
+		set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+	else
+		clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
 
 	/* store the total qps number for the runtime
 	 * VF req validation
@@ -917,9 +913,9 @@
 {
 	struct i40e_pf *pf = vf->pf;
 	struct i40e_hw *hw = &pf->hw;
+	u32 reg, reg_idx, bit_idx;
 	bool rsd = false;
 	int i;
-	u32 reg;
 
 	if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
 		return;
@@ -937,6 +933,11 @@
 		wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
 		i40e_flush(hw);
 	}
+	/* clear the VFLR bit in GLGEN_VFLRSTAT */
+	reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
+	bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
+	wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+	i40e_flush(hw);
 
 	if (i40e_quiesce_vf_pci(vf))
 		dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
@@ -988,6 +989,7 @@
 	}
 	/* tell the VF the reset is done */
 	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
+
 	i40e_flush(hw);
 	clear_bit(__I40E_VF_DISABLE, &pf->state);
 }
@@ -1227,8 +1229,8 @@
 	/* single place to detect unsuccessful return values */
 	if (v_retval) {
 		vf->num_invalid_msgs++;
-		dev_err(&pf->pdev->dev, "VF %d failed opcode %d, error: %d\n",
-			vf->vf_id, v_opcode, v_retval);
+		dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
+			 vf->vf_id, v_opcode, v_retval);
 		if (vf->num_invalid_msgs >
 		    I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
 			dev_err(&pf->pdev->dev,
@@ -1246,9 +1248,9 @@
 	aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id,	v_opcode, v_retval,
 					msg, msglen, NULL);
 	if (aq_ret) {
-		dev_err(&pf->pdev->dev,
-			"Unable to send the message to VF %d aq_err %d\n",
-			vf->vf_id, pf->hw.aq.asq_last_status);
+		dev_info(&pf->pdev->dev,
+			 "Unable to send the message to VF %d aq_err %d\n",
+			 vf->vf_id, pf->hw.aq.asq_last_status);
 		return -EIO;
 	}
 
@@ -1306,8 +1308,8 @@
 	struct i40e_pf *pf = vf->pf;
 	i40e_status aq_ret = 0;
 	struct i40e_vsi *vsi;
-	int i = 0, len = 0;
 	int num_vsis = 1;
+	int len = 0;
 	int ret;
 
 	if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
@@ -1342,12 +1344,16 @@
 		set_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states);
 	}
 
-	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
-		if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
-			vfres->vf_offload_flags |=
-				I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+	if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+		vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF;
 	} else {
-		vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
+		if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
+		    (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ))
+			vfres->vf_offload_flags |=
+					I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+		else
+			vfres->vf_offload_flags |=
+					I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
 	}
 
 	if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
@@ -1356,8 +1362,16 @@
 				I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
 	}
 
-	if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+	if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
+		if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+			dev_err(&pf->pdev->dev,
+				"VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
+				 vf->vf_id);
+			ret = I40E_ERR_PARAM;
+			goto err;
+		}
 		vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+	}
 
 	if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) {
 		if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
@@ -1368,16 +1382,18 @@
 	vfres->num_vsis = num_vsis;
 	vfres->num_queue_pairs = vf->num_queue_pairs;
 	vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
+	vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
+	vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
+
 	if (vf->lan_vsi_idx) {
-		vfres->vsi_res[i].vsi_id = vf->lan_vsi_id;
-		vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
-		vfres->vsi_res[i].num_queue_pairs = vsi->alloc_queue_pairs;
+		vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
+		vfres->vsi_res[0].vsi_type = I40E_VSI_SRIOV;
+		vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
 		/* VFs only use TC 0 */
-		vfres->vsi_res[i].qset_handle
+		vfres->vsi_res[0].qset_handle
 					  = le16_to_cpu(vsi->info.qs_handle[0]);
-		ether_addr_copy(vfres->vsi_res[i].default_mac_addr,
+		ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
 				vf->default_lan_addr.addr);
-		i++;
 	}
 	set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
 
@@ -1407,6 +1423,25 @@
 }
 
 /**
+ * i40e_getnum_vf_vsi_vlan_filters
+ * @vsi: pointer to the vsi
+ *
+ * called to get the number of VLANs offloaded on this VF
+ **/
+static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
+{
+	struct i40e_mac_filter *f;
+	int num_vlans = 0;
+
+	list_for_each_entry(f, &vsi->mac_filter_list, list) {
+		if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
+			num_vlans++;
+	}
+
+	return num_vlans;
+}
+
+/**
  * i40e_vc_config_promiscuous_mode_msg
  * @vf: pointer to the VF info
  * @msg: pointer to the msg buffer
@@ -1422,22 +1457,128 @@
 	    (struct i40e_virtchnl_promisc_info *)msg;
 	struct i40e_pf *pf = vf->pf;
 	struct i40e_hw *hw = &pf->hw;
-	struct i40e_vsi *vsi;
+	struct i40e_mac_filter *f;
+	i40e_status aq_ret = 0;
 	bool allmulti = false;
-	i40e_status aq_ret;
+	struct i40e_vsi *vsi;
+	bool alluni = false;
+	int aq_err = 0;
 
 	vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
 	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
-	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
-	    !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
-	    (vsi->type != I40E_VSI_FCOE)) {
+	    !i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
 		aq_ret = I40E_ERR_PARAM;
 		goto error_param;
 	}
+	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+		dev_err(&pf->pdev->dev,
+			"Unprivileged VF %d is attempting to configure promiscuous mode\n",
+			vf->vf_id);
+		/* Lie to the VF on purpose. */
+		aq_ret = 0;
+		goto error_param;
+	}
+	/* Multicast promiscuous handling*/
 	if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
 		allmulti = true;
-	aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
-						       allmulti, NULL);
+
+	if (vf->port_vlan_id) {
+		aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
+							    allmulti,
+							    vf->port_vlan_id,
+							    NULL);
+	} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
+		list_for_each_entry(f, &vsi->mac_filter_list, list) {
+			if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
+				continue;
+			aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
+								    vsi->seid,
+								    allmulti,
+								    f->vlan,
+								    NULL);
+			aq_err = pf->hw.aq.asq_last_status;
+			if (aq_ret) {
+				dev_err(&pf->pdev->dev,
+					"Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
+					f->vlan,
+					i40e_stat_str(&pf->hw, aq_ret),
+					i40e_aq_str(&pf->hw, aq_err));
+				break;
+			}
+		}
+	} else {
+		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
+							       allmulti, NULL);
+		aq_err = pf->hw.aq.asq_last_status;
+		if (aq_ret) {
+			dev_err(&pf->pdev->dev,
+				"VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
+				vf->vf_id,
+				i40e_stat_str(&pf->hw, aq_ret),
+				i40e_aq_str(&pf->hw, aq_err));
+			goto error_param_int;
+		}
+	}
+
+	if (!aq_ret) {
+		dev_info(&pf->pdev->dev,
+			 "VF %d successfully set multicast promiscuous mode\n",
+			 vf->vf_id);
+		if (allmulti)
+			set_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states);
+		else
+			clear_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states);
+	}
+
+	if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
+		alluni = true;
+	if (vf->port_vlan_id) {
+		aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
+							    alluni,
+							    vf->port_vlan_id,
+							    NULL);
+	} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
+		list_for_each_entry(f, &vsi->mac_filter_list, list) {
+			aq_ret = 0;
+			if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) {
+				aq_ret =
+				i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
+								   vsi->seid,
+								   alluni,
+								   f->vlan,
+								   NULL);
+				aq_err = pf->hw.aq.asq_last_status;
+			}
+			if (aq_ret)
+				dev_err(&pf->pdev->dev,
+					"Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
+					f->vlan,
+					i40e_stat_str(&pf->hw, aq_ret),
+					i40e_aq_str(&pf->hw, aq_err));
+		}
+	} else {
+		aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
+							     allmulti, NULL,
+							     true);
+		aq_err = pf->hw.aq.asq_last_status;
+		if (aq_ret)
+			dev_err(&pf->pdev->dev,
+				"VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n",
+				vf->vf_id, info->flags,
+				i40e_stat_str(&pf->hw, aq_ret),
+				i40e_aq_str(&pf->hw, aq_err));
+	}
+
+error_param_int:
+	if (!aq_ret) {
+		dev_info(&pf->pdev->dev,
+			 "VF %d successfully set unicast promiscuous mode\n",
+			 vf->vf_id);
+		if (alluni)
+			set_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states);
+		else
+			clear_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states);
+	}
 
 error_param:
 	/* send the response to the VF */
@@ -1688,6 +1829,10 @@
 				      (u8 *)&stats, sizeof(stats));
 }
 
+/* If the VF is not trusted restrict the number of MAC/VLAN it can program */
+#define I40E_VC_MAX_MAC_ADDR_PER_VF 8
+#define I40E_VC_MAX_VLAN_PER_VF 8
+
 /**
  * i40e_check_vf_permission
  * @vf: pointer to the VF info
@@ -1708,15 +1853,22 @@
 		dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
 		ret = I40E_ERR_INVALID_MAC_ADDR;
 	} else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) &&
+		   !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
 		   !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) {
 		/* If the host VMM administrator has set the VF MAC address
 		 * administratively via the ndo_set_vf_mac command then deny
 		 * permission to the VF to add or delete unicast MAC addresses.
+		 * Unless the VF is privileged and then it can do whatever.
 		 * The VF may request to set the MAC address filter already
 		 * assigned to it so do not return an error in that case.
 		 */
 		dev_err(&pf->pdev->dev,
-			"VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n");
+			"VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n");
+		ret = -EPERM;
+	} else if ((vf->num_mac >= I40E_VC_MAX_MAC_ADDR_PER_VF) &&
+		   !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+		dev_err(&pf->pdev->dev,
+			"VF is not trusted, switch the VF to trusted to add more functionality\n");
 		ret = -EPERM;
 	}
 	return ret;
@@ -1741,7 +1893,6 @@
 	int i;
 
 	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
-	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
 	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
 		ret = I40E_ERR_PARAM;
 		goto error_param;
@@ -1780,6 +1931,8 @@
 			ret = I40E_ERR_PARAM;
 			spin_unlock_bh(&vsi->mac_filter_list_lock);
 			goto error_param;
+		} else {
+			vf->num_mac++;
 		}
 	}
 	spin_unlock_bh(&vsi->mac_filter_list_lock);
@@ -1815,7 +1968,6 @@
 	int i;
 
 	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
-	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
 	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
 		ret = I40E_ERR_PARAM;
 		goto error_param;
@@ -1839,6 +1991,8 @@
 			ret = I40E_ERR_INVALID_MAC_ADDR;
 			spin_unlock_bh(&vsi->mac_filter_list_lock);
 			goto error_param;
+		} else {
+			vf->num_mac--;
 		}
 
 	spin_unlock_bh(&vsi->mac_filter_list_lock);
@@ -1873,8 +2027,13 @@
 	i40e_status aq_ret = 0;
 	int i;
 
+	if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
+	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+		dev_err(&pf->pdev->dev,
+			"VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
+		goto error_param;
+	}
 	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
-	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
 	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
 		aq_ret = I40E_ERR_PARAM;
 		goto error_param;
@@ -1898,6 +2057,19 @@
 	for (i = 0; i < vfl->num_elements; i++) {
 		/* add new VLAN filter */
 		int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
+		if (!ret)
+			vf->num_vlan++;
+
+		if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states))
+			i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
+							   true,
+							   vfl->vlan_id[i],
+							   NULL);
+		if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states))
+			i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
+							   true,
+							   vfl->vlan_id[i],
+							   NULL);
 
 		if (ret)
 			dev_err(&pf->pdev->dev,
@@ -1929,7 +2101,6 @@
 	int i;
 
 	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
-	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
 	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
 		aq_ret = I40E_ERR_PARAM;
 		goto error_param;
@@ -1950,6 +2121,19 @@
 
 	for (i = 0; i < vfl->num_elements; i++) {
 		int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
+		if (!ret)
+			vf->num_vlan--;
+
+		if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states))
+			i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
+							   false,
+							   vfl->vlan_id[i],
+							   NULL);
+		if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states))
+			i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
+							   false,
+							   vfl->vlan_id[i],
+							   NULL);
 
 		if (ret)
 			dev_err(&pf->pdev->dev,
@@ -2029,6 +2213,135 @@
 }
 
 /**
+ * i40e_vc_config_rss_key
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Configure the VF's RSS key
+ **/
+static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+	struct i40e_virtchnl_rss_key *vrk =
+		(struct i40e_virtchnl_rss_key *)msg;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_vsi *vsi = NULL;
+	u16 vsi_id = vrk->vsi_id;
+	i40e_status aq_ret = 0;
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+	    !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
+	    (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto err;
+	}
+
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
+err:
+	/* send the response to the VF */
+	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+				       aq_ret);
+}
+
+/**
+ * i40e_vc_config_rss_lut
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Configure the VF's RSS LUT
+ **/
+static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+	struct i40e_virtchnl_rss_lut *vrl =
+		(struct i40e_virtchnl_rss_lut *)msg;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_vsi *vsi = NULL;
+	u16 vsi_id = vrl->vsi_id;
+	i40e_status aq_ret = 0;
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+	    !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
+	    (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto err;
+	}
+
+	vsi = pf->vsi[vf->lan_vsi_idx];
+	aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
+	/* send the response to the VF */
+err:
+	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+				       aq_ret);
+}
+
+/**
+ * i40e_vc_get_rss_hena
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Return the RSS HENA bits allowed by the hardware
+ **/
+static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+	struct i40e_virtchnl_rss_hena *vrh = NULL;
+	struct i40e_pf *pf = vf->pf;
+	i40e_status aq_ret = 0;
+	int len = 0;
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto err;
+	}
+	len = sizeof(struct i40e_virtchnl_rss_hena);
+
+	vrh = kzalloc(len, GFP_KERNEL);
+	if (!vrh) {
+		aq_ret = I40E_ERR_NO_MEMORY;
+		len = 0;
+		goto err;
+	}
+	vrh->hena = i40e_pf_get_default_rss_hena(pf);
+err:
+	/* send the response back to the VF */
+	aq_ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS,
+					aq_ret, (u8 *)vrh, len);
+	return aq_ret;
+}
+
+/**
+ * i40e_vc_set_rss_hena
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Set the RSS HENA bits for the VF
+ **/
+static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+	struct i40e_virtchnl_rss_hena *vrh =
+		(struct i40e_virtchnl_rss_hena *)msg;
+	struct i40e_pf *pf = vf->pf;
+	struct i40e_hw *hw = &pf->hw;
+	i40e_status aq_ret = 0;
+
+	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+		aq_ret = I40E_ERR_PARAM;
+		goto err;
+	}
+	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
+	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
+			  (u32)(vrh->hena >> 32));
+
+	/* send the response to the VF */
+err:
+	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_SET_RSS_HENA,
+				       aq_ret);
+}
+
+/**
  * i40e_vc_validate_vf_msg
  * @vf: pointer to the VF info
  * @msg: pointer to the msg buffer
@@ -2041,7 +2354,7 @@
 				   u32 v_retval, u8 *msg, u16 msglen)
 {
 	bool err_msg_format = false;
-	int valid_len;
+	int valid_len = 0;
 
 	/* Check if VF is disabled. */
 	if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
@@ -2053,13 +2366,10 @@
 		valid_len = sizeof(struct i40e_virtchnl_version_info);
 		break;
 	case I40E_VIRTCHNL_OP_RESET_VF:
-		valid_len = 0;
 		break;
 	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
 		if (VF_IS_V11(vf))
 			valid_len = sizeof(u32);
-		else
-			valid_len = 0;
 		break;
 	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
 		valid_len = sizeof(struct i40e_virtchnl_txq_info);
@@ -2149,6 +2459,35 @@
 				sizeof(struct i40e_virtchnl_iwarp_qv_info));
 		}
 		break;
+	case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
+		valid_len = sizeof(struct i40e_virtchnl_rss_key);
+		if (msglen >= valid_len) {
+			struct i40e_virtchnl_rss_key *vrk =
+				(struct i40e_virtchnl_rss_key *)msg;
+			if (vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
+				err_msg_format = true;
+				break;
+			}
+			valid_len += vrk->key_len - 1;
+		}
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
+		valid_len = sizeof(struct i40e_virtchnl_rss_lut);
+		if (msglen >= valid_len) {
+			struct i40e_virtchnl_rss_lut *vrl =
+				(struct i40e_virtchnl_rss_lut *)msg;
+			if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
+				err_msg_format = true;
+				break;
+			}
+			valid_len += vrl->lut_entries - 1;
+		}
+		break;
+	case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+		break;
+	case I40E_VIRTCHNL_OP_SET_RSS_HENA:
+		valid_len = sizeof(struct i40e_virtchnl_rss_hena);
+		break;
 	/* These are always errors coming from the VF. */
 	case I40E_VIRTCHNL_OP_EVENT:
 	case I40E_VIRTCHNL_OP_UNKNOWN:
@@ -2175,11 +2514,11 @@
  * called from the common aeq/arq handler to
  * process request from VF
  **/
-int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
 			   u32 v_retval, u8 *msg, u16 msglen)
 {
 	struct i40e_hw *hw = &pf->hw;
-	unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id;
+	int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
 	struct i40e_vf *vf;
 	int ret;
 
@@ -2247,6 +2586,19 @@
 	case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
 		ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
 		break;
+	case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
+		ret = i40e_vc_config_rss_key(vf, msg, msglen);
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
+		ret = i40e_vc_config_rss_lut(vf, msg, msglen);
+		break;
+	case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+		ret = i40e_vc_get_rss_hena(vf, msg, msglen);
+		break;
+	case I40E_VIRTCHNL_OP_SET_RSS_HENA:
+		ret = i40e_vc_set_rss_hena(vf, msg, msglen);
+		break;
+
 	case I40E_VIRTCHNL_OP_UNKNOWN:
 	default:
 		dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
@@ -2268,9 +2620,10 @@
  **/
 int i40e_vc_process_vflr_event(struct i40e_pf *pf)
 {
-	u32 reg, reg_idx, bit_idx, vf_id;
 	struct i40e_hw *hw = &pf->hw;
+	u32 reg, reg_idx, bit_idx;
 	struct i40e_vf *vf;
+	int vf_id;
 
 	if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
 		return 0;
@@ -2292,13 +2645,9 @@
 		/* read GLGEN_VFLRSTAT register to find out the flr VFs */
 		vf = &pf->vf[vf_id];
 		reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
-		if (reg & BIT(bit_idx)) {
-			/* clear the bit in GLGEN_VFLRSTAT */
-			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
-
-			if (!test_bit(__I40E_DOWN, &pf->state))
-				i40e_reset_vf(vf, true);
-		}
+		if (reg & BIT(bit_idx))
+			/* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
+			i40e_reset_vf(vf, true);
 	}
 
 	return 0;
@@ -2762,3 +3111,45 @@
 out:
 	return ret;
 }
+
+/**
+ * i40e_ndo_set_vf_trust
+ * @netdev: network interface device structure of the pf
+ * @vf_id: VF identifier
+ * @setting: trust setting
+ *
+ * Enable or disable VF trust setting
+ **/
+int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
+{
+	struct i40e_netdev_priv *np = netdev_priv(netdev);
+	struct i40e_pf *pf = np->vsi->back;
+	struct i40e_vf *vf;
+	int ret = 0;
+
+	/* validate the request */
+	if (vf_id >= pf->num_alloc_vfs) {
+		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+		return -EINVAL;
+	}
+
+	if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+		dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
+		return -EINVAL;
+	}
+
+	vf = &pf->vf[vf_id];
+
+	if (!vf)
+		return -EINVAL;
+	if (setting == vf->trusted)
+		goto out;
+
+	vf->trusted = setting;
+	i40e_vc_notify_vf_reset(vf);
+	i40e_reset_vf(vf, false);
+	dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
+		 vf_id, setting ? "" : "un");
+out:
+	return ret;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index e7b2fba..8751741 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -61,6 +61,8 @@
 	I40E_VF_STAT_IWARPENA,
 	I40E_VF_STAT_FCOEENA,
 	I40E_VF_STAT_DISABLED,
+	I40E_VF_STAT_MC_PROMISC,
+	I40E_VF_STAT_UC_PROMISC,
 };
 
 /* VF capabilities */
@@ -75,7 +77,7 @@
 	struct i40e_pf *pf;
 
 	/* VF id in the PF space */
-	u16 vf_id;
+	s16 vf_id;
 	/* all VF vsis connect to the same parent */
 	enum i40e_switch_element_types parent_type;
 	struct i40e_virtchnl_version_info vf_ver;
@@ -88,6 +90,7 @@
 	struct i40e_virtchnl_ether_addr default_fcoe_addr;
 	u16 port_vlan_id;
 	bool pf_set_mac;	/* The VMM admin set the VF MAC address */
+	bool trusted;
 
 	/* VSI indices - actual VSI pointers are maintained in the PF structure
 	 * When assigned, these will be non-zero, because VSI 0 is always
@@ -108,6 +111,9 @@
 	bool link_forced;
 	bool link_up;		/* only valid if VF link is forced */
 	bool spoofchk;
+	u16 num_mac;
+	u16 num_vlan;
+
 	/* RDMA Client */
 	struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info;
 };
@@ -115,7 +121,7 @@
 void i40e_free_vfs(struct i40e_pf *pf);
 int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
-int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
 			   u32 v_retval, u8 *msg, u16 msglen);
 int i40e_vc_process_vflr_event(struct i40e_pf *pf);
 void i40e_reset_vf(struct i40e_vf *vf, bool flr);
@@ -127,6 +133,7 @@
 			      int vf_id, u16 vlan_id, u8 qos);
 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
 		       int max_tx_rate);
+int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting);
 int i40e_ndo_get_vf_config(struct net_device *netdev,
 			   int vf_id, struct ifla_vf_info *ivi);
 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index a3eae5d..1f9b3b5 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -97,7 +97,6 @@
 	u32 fw_build;                   /* firmware build number */
 	u16 api_maj_ver;                /* api major version */
 	u16 api_min_ver;                /* api minor version */
-	bool nvm_release_on_done;
 
 	struct mutex asq_mutex; /* Send queue lock */
 	struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index aad8d62..3114dcf 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -78,17 +78,17 @@
 #define I40E_AQ_FLAG_EI_SHIFT	14
 #define I40E_AQ_FLAG_FE_SHIFT	15
 
-#define I40E_AQ_FLAG_DD		(1 << I40E_AQ_FLAG_DD_SHIFT)  /* 0x1    */
-#define I40E_AQ_FLAG_CMP	(1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2    */
-#define I40E_AQ_FLAG_ERR	(1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4    */
-#define I40E_AQ_FLAG_VFE	(1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8    */
-#define I40E_AQ_FLAG_LB		(1 << I40E_AQ_FLAG_LB_SHIFT)  /* 0x200  */
-#define I40E_AQ_FLAG_RD		(1 << I40E_AQ_FLAG_RD_SHIFT)  /* 0x400  */
-#define I40E_AQ_FLAG_VFC	(1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800  */
-#define I40E_AQ_FLAG_BUF	(1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI		(1 << I40E_AQ_FLAG_SI_SHIFT)  /* 0x2000 */
-#define I40E_AQ_FLAG_EI		(1 << I40E_AQ_FLAG_EI_SHIFT)  /* 0x4000 */
-#define I40E_AQ_FLAG_FE		(1 << I40E_AQ_FLAG_FE_SHIFT)  /* 0x8000 */
+#define I40E_AQ_FLAG_DD		BIT(I40E_AQ_FLAG_DD_SHIFT)  /* 0x1    */
+#define I40E_AQ_FLAG_CMP	BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2    */
+#define I40E_AQ_FLAG_ERR	BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4    */
+#define I40E_AQ_FLAG_VFE	BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8    */
+#define I40E_AQ_FLAG_LB		BIT(I40E_AQ_FLAG_LB_SHIFT)  /* 0x200  */
+#define I40E_AQ_FLAG_RD		BIT(I40E_AQ_FLAG_RD_SHIFT)  /* 0x400  */
+#define I40E_AQ_FLAG_VFC	BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800  */
+#define I40E_AQ_FLAG_BUF	BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI		BIT(I40E_AQ_FLAG_SI_SHIFT)  /* 0x2000 */
+#define I40E_AQ_FLAG_EI		BIT(I40E_AQ_FLAG_EI_SHIFT)  /* 0x4000 */
+#define I40E_AQ_FLAG_FE		BIT(I40E_AQ_FLAG_FE_SHIFT)  /* 0x8000 */
 
 /* error codes */
 enum i40e_admin_queue_err {
@@ -205,10 +205,6 @@
 	i40e_aqc_opc_resume_port_tx				= 0x041C,
 	i40e_aqc_opc_configure_partition_bw			= 0x041D,
 
-	/* hmc */
-	i40e_aqc_opc_query_hmc_resource_profile	= 0x0500,
-	i40e_aqc_opc_set_hmc_resource_profile	= 0x0501,
-
 	/* phy commands*/
 	i40e_aqc_opc_get_phy_abilities		= 0x0600,
 	i40e_aqc_opc_set_phy_config		= 0x0601,
@@ -426,6 +422,7 @@
 #define I40E_AQ_CAP_ID_SDP		0x0062
 #define I40E_AQ_CAP_ID_MDIO		0x0063
 #define I40E_AQ_CAP_ID_WSR_PROT		0x0064
+#define I40E_AQ_CAP_ID_NVM_MGMT		0x0080
 #define I40E_AQ_CAP_ID_FLEX10		0x00F1
 #define I40E_AQ_CAP_ID_CEM		0x00F2
 
@@ -1582,27 +1579,6 @@
 
 I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
 
-/* Get and set the active HMC resource profile and status.
- * (direct 0x0500) and (direct 0x0501)
- */
-struct i40e_aq_get_set_hmc_resource_profile {
-	u8	pm_profile;
-	u8	pe_vf_enabled;
-	u8	reserved[14];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
-
-enum i40e_aq_hmc_profile {
-	/* I40E_HMC_PROFILE_NO_CHANGE    = 0, reserved */
-	I40E_HMC_PROFILE_DEFAULT	= 1,
-	I40E_HMC_PROFILE_FAVOR_VF	= 2,
-	I40E_HMC_PROFILE_EQUAL		= 3,
-};
-
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK	0xF
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK	0x3F
-
 /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
 
 /* set in param0 for get phy abilities to report qualified modules */
@@ -1649,11 +1625,11 @@
 
 enum i40e_aq_link_speed {
 	I40E_LINK_SPEED_UNKNOWN	= 0,
-	I40E_LINK_SPEED_100MB	= (1 << I40E_LINK_SPEED_100MB_SHIFT),
-	I40E_LINK_SPEED_1GB	= (1 << I40E_LINK_SPEED_1000MB_SHIFT),
-	I40E_LINK_SPEED_10GB	= (1 << I40E_LINK_SPEED_10GB_SHIFT),
-	I40E_LINK_SPEED_40GB	= (1 << I40E_LINK_SPEED_40GB_SHIFT),
-	I40E_LINK_SPEED_20GB	= (1 << I40E_LINK_SPEED_20GB_SHIFT)
+	I40E_LINK_SPEED_100MB	= BIT(I40E_LINK_SPEED_100MB_SHIFT),
+	I40E_LINK_SPEED_1GB	= BIT(I40E_LINK_SPEED_1000MB_SHIFT),
+	I40E_LINK_SPEED_10GB	= BIT(I40E_LINK_SPEED_10GB_SHIFT),
+	I40E_LINK_SPEED_40GB	= BIT(I40E_LINK_SPEED_40GB_SHIFT),
+	I40E_LINK_SPEED_20GB	= BIT(I40E_LINK_SPEED_20GB_SHIFT)
 };
 
 struct i40e_aqc_module_desc {
@@ -1924,9 +1900,9 @@
 /* Used for 0x0704 as well as for 0x0705 commands */
 #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT		1
 #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
-				(1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+				BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
 #define I40E_AQ_ANVM_FEATURE		0
-#define I40E_AQ_ANVM_IMMEDIATE_FIELD	(1 << FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD	BIT(FEATURE_OR_IMMEDIATE_SHIFT)
 struct i40e_aqc_nvm_config_data_feature {
 	__le16 feature_id;
 #define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY		0x01
@@ -2195,7 +2171,7 @@
 I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
 
 struct i40e_aqc_get_set_rss_key {
-#define I40E_AQC_SET_RSS_KEY_VSI_VALID		(0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID		BIT(15)
 #define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT	0
 #define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK	(0x3FF << \
 					I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
@@ -2215,14 +2191,14 @@
 I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
 
 struct  i40e_aqc_get_set_rss_lut {
-#define I40E_AQC_SET_RSS_LUT_VSI_VALID		(0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID		BIT(15)
 #define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT	0
 #define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK	(0x3FF << \
 					I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
 	__le16	vsi_id;
 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT	0
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK	(0x1 << \
-					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \
+				BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
 
 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI	0
 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF	1
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 771ac6a..8f64204 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -58,6 +58,8 @@
 		case I40E_DEV_ID_SFP_X722:
 		case I40E_DEV_ID_1G_BASE_T_X722:
 		case I40E_DEV_ID_10G_BASE_T_X722:
+		case I40E_DEV_ID_SFP_I_X722:
+		case I40E_DEV_ID_QSFP_I_X722:
 			hw->mac.type = I40E_MAC_X722;
 			break;
 		case I40E_DEV_ID_X722_VF:
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
index ca8b58c..d34972b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
@@ -44,6 +44,8 @@
 #define I40E_DEV_ID_SFP_X722		0x37D0
 #define I40E_DEV_ID_1G_BASE_T_X722	0x37D1
 #define I40E_DEV_ID_10G_BASE_T_X722	0x37D2
+#define I40E_DEV_ID_SFP_I_X722		0x37D3
+#define I40E_DEV_ID_QSFP_I_X722		0x37D4
 #define I40E_DEV_ID_X722_VF		0x37CD
 #define I40E_DEV_ID_X722_VF_HV		0x37D9
 
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index ebcc25c..fd7dae46 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -155,19 +155,21 @@
 
 /**
  * i40e_clean_tx_irq - Reclaim resources after transmit completes
- * @tx_ring:  tx ring to clean
- * @budget:   how many cleans we're allowed
+ * @vsi: the VSI we care about
+ * @tx_ring: Tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
  *
  * Returns true if there's any budget left (e.g. the clean is finished)
  **/
-static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
+static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
+			      struct i40e_ring *tx_ring, int napi_budget)
 {
 	u16 i = tx_ring->next_to_clean;
 	struct i40e_tx_buffer *tx_buf;
 	struct i40e_tx_desc *tx_head;
 	struct i40e_tx_desc *tx_desc;
-	unsigned int total_packets = 0;
-	unsigned int total_bytes = 0;
+	unsigned int total_bytes = 0, total_packets = 0;
+	unsigned int budget = vsi->work_limit;
 
 	tx_buf = &tx_ring->tx_bi[i];
 	tx_desc = I40E_TX_DESC(tx_ring, i);
@@ -197,7 +199,7 @@
 		total_packets += tx_buf->gso_segs;
 
 		/* free the skb */
-		dev_kfree_skb_any(tx_buf->skb);
+		napi_consume_skb(tx_buf->skb, napi_budget);
 
 		/* unmap skb header data */
 		dma_unmap_single(tx_ring->dev,
@@ -267,7 +269,7 @@
 
 		if (budget &&
 		    ((j / (WB_STRIDE + 1)) == 0) && (j > 0) &&
-		    !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+		    !test_bit(__I40E_DOWN, &vsi->state) &&
 		    (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
 			tx_ring->arm_wb = true;
 	}
@@ -285,7 +287,7 @@
 		smp_mb();
 		if (__netif_subqueue_stopped(tx_ring->netdev,
 					     tx_ring->queue_index) &&
-		   !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
+		   !test_bit(__I40E_DOWN, &vsi->state)) {
 			netif_wake_subqueue(tx_ring->netdev,
 					    tx_ring->queue_index);
 			++tx_ring->tx_stats.restart_queue;
@@ -494,7 +496,6 @@
 void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
 {
 	struct device *dev = rx_ring->dev;
-	struct i40e_rx_buffer *rx_bi;
 	unsigned long bi_size;
 	u16 i;
 
@@ -502,48 +503,22 @@
 	if (!rx_ring->rx_bi)
 		return;
 
-	if (ring_is_ps_enabled(rx_ring)) {
-		int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
-
-		rx_bi = &rx_ring->rx_bi[0];
-		if (rx_bi->hdr_buf) {
-			dma_free_coherent(dev,
-					  bufsz,
-					  rx_bi->hdr_buf,
-					  rx_bi->dma);
-			for (i = 0; i < rx_ring->count; i++) {
-				rx_bi = &rx_ring->rx_bi[i];
-				rx_bi->dma = 0;
-				rx_bi->hdr_buf = NULL;
-			}
-		}
-	}
 	/* Free all the Rx ring sk_buffs */
 	for (i = 0; i < rx_ring->count; i++) {
-		rx_bi = &rx_ring->rx_bi[i];
-		if (rx_bi->dma) {
-			dma_unmap_single(dev,
-					 rx_bi->dma,
-					 rx_ring->rx_buf_len,
-					 DMA_FROM_DEVICE);
-			rx_bi->dma = 0;
-		}
+		struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+
 		if (rx_bi->skb) {
 			dev_kfree_skb(rx_bi->skb);
 			rx_bi->skb = NULL;
 		}
-		if (rx_bi->page) {
-			if (rx_bi->page_dma) {
-				dma_unmap_page(dev,
-					       rx_bi->page_dma,
-					       PAGE_SIZE,
-					       DMA_FROM_DEVICE);
-				rx_bi->page_dma = 0;
-			}
-			__free_page(rx_bi->page);
-			rx_bi->page = NULL;
-			rx_bi->page_offset = 0;
-		}
+		if (!rx_bi->page)
+			continue;
+
+		dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
+		__free_pages(rx_bi->page, 0);
+
+		rx_bi->page = NULL;
+		rx_bi->page_offset = 0;
 	}
 
 	bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
@@ -552,6 +527,7 @@
 	/* Zero out the descriptor ring */
 	memset(rx_ring->desc, 0, rx_ring->size);
 
+	rx_ring->next_to_alloc = 0;
 	rx_ring->next_to_clean = 0;
 	rx_ring->next_to_use = 0;
 }
@@ -576,37 +552,6 @@
 }
 
 /**
- * i40evf_alloc_rx_headers - allocate rx header buffers
- * @rx_ring: ring to alloc buffers
- *
- * Allocate rx header buffers for the entire ring. As these are static,
- * this is only called when setting up a new ring.
- **/
-void i40evf_alloc_rx_headers(struct i40e_ring *rx_ring)
-{
-	struct device *dev = rx_ring->dev;
-	struct i40e_rx_buffer *rx_bi;
-	dma_addr_t dma;
-	void *buffer;
-	int buf_size;
-	int i;
-
-	if (rx_ring->rx_bi[0].hdr_buf)
-		return;
-	/* Make sure the buffers don't cross cache line boundaries. */
-	buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
-	buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
-				    &dma, GFP_KERNEL);
-	if (!buffer)
-		return;
-	for (i = 0; i < rx_ring->count; i++) {
-		rx_bi = &rx_ring->rx_bi[i];
-		rx_bi->dma = dma + (i * buf_size);
-		rx_bi->hdr_buf = buffer + (i * buf_size);
-	}
-}
-
-/**
  * i40evf_setup_rx_descriptors - Allocate Rx descriptors
  * @rx_ring: Rx descriptor ring (for a specific queue) to setup
  *
@@ -627,9 +572,7 @@
 	u64_stats_init(&rx_ring->syncp);
 
 	/* Round up to nearest 4K */
-	rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
-		? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
-		: rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+	rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
 	rx_ring->size = ALIGN(rx_ring->size, 4096);
 	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
 					   &rx_ring->dma, GFP_KERNEL);
@@ -640,6 +583,7 @@
 		goto err;
 	}
 
+	rx_ring->next_to_alloc = 0;
 	rx_ring->next_to_clean = 0;
 	rx_ring->next_to_use = 0;
 
@@ -658,6 +602,10 @@
 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
 {
 	rx_ring->next_to_use = val;
+
+	/* update next to alloc since we have filled the ring */
+	rx_ring->next_to_alloc = val;
+
 	/* Force memory writes to complete before letting h/w
 	 * know there are new descriptors to fetch.  (Only
 	 * applicable for weak-ordered memory model archs,
@@ -668,164 +616,48 @@
 }
 
 /**
- * i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split
- * @rx_ring: ring to place buffers on
- * @cleaned_count: number of buffers to replace
+ * i40e_alloc_mapped_page - recycle or make a new page
+ * @rx_ring: ring to use
+ * @bi: rx_buffer struct to modify
  *
- * Returns true if any errors on allocation
+ * Returns true if the page was successfully allocated or
+ * reused.
  **/
-bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
+static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
+				   struct i40e_rx_buffer *bi)
 {
-	u16 i = rx_ring->next_to_use;
-	union i40e_rx_desc *rx_desc;
-	struct i40e_rx_buffer *bi;
-	const int current_node = numa_node_id();
+	struct page *page = bi->page;
+	dma_addr_t dma;
 
-	/* do nothing if no valid netdev defined */
-	if (!rx_ring->netdev || !cleaned_count)
-		return false;
-
-	while (cleaned_count--) {
-		rx_desc = I40E_RX_DESC(rx_ring, i);
-		bi = &rx_ring->rx_bi[i];
-
-		if (bi->skb) /* desc is in use */
-			goto no_buffers;
-
-	/* If we've been moved to a different NUMA node, release the
-	 * page so we can get a new one on the current node.
-	 */
-		if (bi->page &&  page_to_nid(bi->page) != current_node) {
-			dma_unmap_page(rx_ring->dev,
-				       bi->page_dma,
-				       PAGE_SIZE,
-				       DMA_FROM_DEVICE);
-			__free_page(bi->page);
-			bi->page = NULL;
-			bi->page_dma = 0;
-			rx_ring->rx_stats.realloc_count++;
-		} else if (bi->page) {
-			rx_ring->rx_stats.page_reuse_count++;
-		}
-
-		if (!bi->page) {
-			bi->page = alloc_page(GFP_ATOMIC);
-			if (!bi->page) {
-				rx_ring->rx_stats.alloc_page_failed++;
-				goto no_buffers;
-			}
-			bi->page_dma = dma_map_page(rx_ring->dev,
-						    bi->page,
-						    0,
-						    PAGE_SIZE,
-						    DMA_FROM_DEVICE);
-			if (dma_mapping_error(rx_ring->dev, bi->page_dma)) {
-				rx_ring->rx_stats.alloc_page_failed++;
-				__free_page(bi->page);
-				bi->page = NULL;
-				bi->page_dma = 0;
-				bi->page_offset = 0;
-				goto no_buffers;
-			}
-			bi->page_offset = 0;
-		}
-
-		/* Refresh the desc even if buffer_addrs didn't change
-		 * because each write-back erases this info.
-		 */
-		rx_desc->read.pkt_addr =
-				cpu_to_le64(bi->page_dma + bi->page_offset);
-		rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
-		i++;
-		if (i == rx_ring->count)
-			i = 0;
+	/* since we are recycling buffers we should seldom need to alloc */
+	if (likely(page)) {
+		rx_ring->rx_stats.page_reuse_count++;
+		return true;
 	}
 
-	if (rx_ring->next_to_use != i)
-		i40e_release_rx_desc(rx_ring, i);
-
-	return false;
-
-no_buffers:
-	if (rx_ring->next_to_use != i)
-		i40e_release_rx_desc(rx_ring, i);
-
-	/* make sure to come back via polling to try again after
-	 * allocation failure
-	 */
-	return true;
-}
-
-/**
- * i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
- * @rx_ring: ring to place buffers on
- * @cleaned_count: number of buffers to replace
- *
- * Returns true if any errors on allocation
- **/
-bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
-{
-	u16 i = rx_ring->next_to_use;
-	union i40e_rx_desc *rx_desc;
-	struct i40e_rx_buffer *bi;
-	struct sk_buff *skb;
-
-	/* do nothing if no valid netdev defined */
-	if (!rx_ring->netdev || !cleaned_count)
+	/* alloc new page for storage */
+	page = dev_alloc_page();
+	if (unlikely(!page)) {
+		rx_ring->rx_stats.alloc_page_failed++;
 		return false;
-
-	while (cleaned_count--) {
-		rx_desc = I40E_RX_DESC(rx_ring, i);
-		bi = &rx_ring->rx_bi[i];
-		skb = bi->skb;
-
-		if (!skb) {
-			skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
-							  rx_ring->rx_buf_len,
-							  GFP_ATOMIC |
-							  __GFP_NOWARN);
-			if (!skb) {
-				rx_ring->rx_stats.alloc_buff_failed++;
-				goto no_buffers;
-			}
-			/* initialize queue mapping */
-			skb_record_rx_queue(skb, rx_ring->queue_index);
-			bi->skb = skb;
-		}
-
-		if (!bi->dma) {
-			bi->dma = dma_map_single(rx_ring->dev,
-						 skb->data,
-						 rx_ring->rx_buf_len,
-						 DMA_FROM_DEVICE);
-			if (dma_mapping_error(rx_ring->dev, bi->dma)) {
-				rx_ring->rx_stats.alloc_buff_failed++;
-				bi->dma = 0;
-				dev_kfree_skb(bi->skb);
-				bi->skb = NULL;
-				goto no_buffers;
-			}
-		}
-
-		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
-		rx_desc->read.hdr_addr = 0;
-		i++;
-		if (i == rx_ring->count)
-			i = 0;
 	}
 
-	if (rx_ring->next_to_use != i)
-		i40e_release_rx_desc(rx_ring, i);
+	/* map page for use */
+	dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
 
-	return false;
-
-no_buffers:
-	if (rx_ring->next_to_use != i)
-		i40e_release_rx_desc(rx_ring, i);
-
-	/* make sure to come back via polling to try again after
-	 * allocation failure
+	/* if mapping failed free memory back to system since
+	 * there isn't much point in holding memory we can't use
 	 */
+	if (dma_mapping_error(rx_ring->dev, dma)) {
+		__free_pages(page, 0);
+		rx_ring->rx_stats.alloc_page_failed++;
+		return false;
+	}
+
+	bi->dma = dma;
+	bi->page = page;
+	bi->page_offset = 0;
+
 	return true;
 }
 
@@ -840,31 +672,103 @@
 {
 	struct i40e_q_vector *q_vector = rx_ring->q_vector;
 
-	if (vlan_tag & VLAN_VID_MASK)
+	if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+	    (vlan_tag & VLAN_VID_MASK))
 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
 
 	napi_gro_receive(&q_vector->napi, skb);
 }
 
 /**
+ * i40evf_alloc_rx_buffers - Replace used receive buffers
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ *
+ * Returns false if all allocations were successful, true if any fail
+ **/
+bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
+{
+	u16 ntu = rx_ring->next_to_use;
+	union i40e_rx_desc *rx_desc;
+	struct i40e_rx_buffer *bi;
+
+	/* do nothing if no valid netdev defined */
+	if (!rx_ring->netdev || !cleaned_count)
+		return false;
+
+	rx_desc = I40E_RX_DESC(rx_ring, ntu);
+	bi = &rx_ring->rx_bi[ntu];
+
+	do {
+		if (!i40e_alloc_mapped_page(rx_ring, bi))
+			goto no_buffers;
+
+		/* Refresh the desc even if buffer_addrs didn't change
+		 * because each write-back erases this info.
+		 */
+		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+		rx_desc->read.hdr_addr = 0;
+
+		rx_desc++;
+		bi++;
+		ntu++;
+		if (unlikely(ntu == rx_ring->count)) {
+			rx_desc = I40E_RX_DESC(rx_ring, 0);
+			bi = rx_ring->rx_bi;
+			ntu = 0;
+		}
+
+		/* clear the status bits for the next_to_use descriptor */
+		rx_desc->wb.qword1.status_error_len = 0;
+
+		cleaned_count--;
+	} while (cleaned_count);
+
+	if (rx_ring->next_to_use != ntu)
+		i40e_release_rx_desc(rx_ring, ntu);
+
+	return false;
+
+no_buffers:
+	if (rx_ring->next_to_use != ntu)
+		i40e_release_rx_desc(rx_ring, ntu);
+
+	/* make sure to come back via polling to try again after
+	 * allocation failure
+	 */
+	return true;
+}
+
+/**
  * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
  * @vsi: the VSI we care about
  * @skb: skb currently being received and modified
- * @rx_status: status value of last descriptor in packet
- * @rx_error: error value of last descriptor in packet
- * @rx_ptype: ptype value of last descriptor in packet
+ * @rx_desc: the receive descriptor
+ *
+ * skb->protocol must be set before this function is called
  **/
 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
 				    struct sk_buff *skb,
-				    u32 rx_status,
-				    u32 rx_error,
-				    u16 rx_ptype)
+				    union i40e_rx_desc *rx_desc)
 {
-	struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
-	bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel;
+	struct i40e_rx_ptype_decoded decoded;
+	bool ipv4, ipv6, tunnel = false;
+	u32 rx_error, rx_status;
+	u8 ptype;
+	u64 qword;
+
+	qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+	ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
+	rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+		   I40E_RXD_QW1_ERROR_SHIFT;
+	rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+		    I40E_RXD_QW1_STATUS_SHIFT;
+	decoded = decode_rx_desc_ptype(ptype);
 
 	skb->ip_summed = CHECKSUM_NONE;
 
+	skb_checksum_none_assert(skb);
+
 	/* Rx csum enabled and ip headers found? */
 	if (!(vsi->netdev->features & NETIF_F_RXCSUM))
 		return;
@@ -910,14 +814,13 @@
 	 * doesn't make it a hard requirement so if we have validated the
 	 * inner checksum report CHECKSUM_UNNECESSARY.
 	 */
-
-	ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
-		     (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
-	ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
-		     (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+	if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP |
+				  I40E_RX_PTYPE_INNER_PROT_UDP |
+				  I40E_RX_PTYPE_INNER_PROT_SCTP))
+		tunnel = true;
 
 	skb->ip_summed = CHECKSUM_UNNECESSARY;
-	skb->csum_level = ipv4_tunnel || ipv6_tunnel;
+	skb->csum_level = tunnel ? 1 : 0;
 
 	return;
 
@@ -931,7 +834,7 @@
  *
  * Returns a hash type to be used by skb_set_hash
  **/
-static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
+static inline int i40e_ptype_to_htype(u8 ptype)
 {
 	struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
 
@@ -959,7 +862,7 @@
 				u8 rx_ptype)
 {
 	u32 hash;
-	const __le64 rss_mask  =
+	const __le64 rss_mask =
 		cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
 			    I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
 
@@ -973,313 +876,411 @@
 }
 
 /**
- * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
- * @rx_ring:  rx ring to clean
- * @budget:   how many cleans we're allowed
+ * i40evf_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ * @rx_ptype: the packet type decoded by hardware
  *
- * Returns true if there's any budget left (e.g. the clean is finished)
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, protocol, and
+ * other fields within the skb.
  **/
-static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
+static inline
+void i40evf_process_skb_fields(struct i40e_ring *rx_ring,
+			       union i40e_rx_desc *rx_desc, struct sk_buff *skb,
+			       u8 rx_ptype)
 {
-	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
-	u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
-	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
-	struct i40e_vsi *vsi = rx_ring->vsi;
-	u16 i = rx_ring->next_to_clean;
-	union i40e_rx_desc *rx_desc;
-	u32 rx_error, rx_status;
-	bool failure = false;
-	u8 rx_ptype;
-	u64 qword;
-	u32 copysize;
+	i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
 
-	do {
-		struct i40e_rx_buffer *rx_bi;
-		struct sk_buff *skb;
-		u16 vlan_tag;
-		/* return some buffers to hardware, one at a time is too slow */
-		if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
-			failure = failure ||
-				  i40evf_alloc_rx_buffers_ps(rx_ring,
-							     cleaned_count);
-			cleaned_count = 0;
-		}
+	/* modifies the skb - consumes the enet header */
+	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 
-		i = rx_ring->next_to_clean;
-		rx_desc = I40E_RX_DESC(rx_ring, i);
-		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
-			I40E_RXD_QW1_STATUS_SHIFT;
+	i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
 
-		if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
-			break;
-
-		/* This memory barrier is needed to keep us from reading
-		 * any other fields out of the rx_desc until we know the
-		 * DD bit is set.
-		 */
-		dma_rmb();
-		/* sync header buffer for reading */
-		dma_sync_single_range_for_cpu(rx_ring->dev,
-					      rx_ring->rx_bi[0].dma,
-					      i * rx_ring->rx_hdr_len,
-					      rx_ring->rx_hdr_len,
-					      DMA_FROM_DEVICE);
-		rx_bi = &rx_ring->rx_bi[i];
-		skb = rx_bi->skb;
-		if (likely(!skb)) {
-			skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
-							  rx_ring->rx_hdr_len,
-							  GFP_ATOMIC |
-							  __GFP_NOWARN);
-			if (!skb) {
-				rx_ring->rx_stats.alloc_buff_failed++;
-				failure = true;
-				break;
-			}
-
-			/* initialize queue mapping */
-			skb_record_rx_queue(skb, rx_ring->queue_index);
-			/* we are reusing so sync this buffer for CPU use */
-			dma_sync_single_range_for_cpu(rx_ring->dev,
-						      rx_ring->rx_bi[0].dma,
-						      i * rx_ring->rx_hdr_len,
-						      rx_ring->rx_hdr_len,
-						      DMA_FROM_DEVICE);
-		}
-		rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
-				I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
-		rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
-				I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
-		rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
-			 I40E_RXD_QW1_LENGTH_SPH_SHIFT;
-
-		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
-			   I40E_RXD_QW1_ERROR_SHIFT;
-		rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
-		rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
-
-		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
-			   I40E_RXD_QW1_PTYPE_SHIFT;
-		/* sync half-page for reading */
-		dma_sync_single_range_for_cpu(rx_ring->dev,
-					      rx_bi->page_dma,
-					      rx_bi->page_offset,
-					      PAGE_SIZE / 2,
-					      DMA_FROM_DEVICE);
-		prefetch(page_address(rx_bi->page) + rx_bi->page_offset);
-		rx_bi->skb = NULL;
-		cleaned_count++;
-		copysize = 0;
-		if (rx_hbo || rx_sph) {
-			int len;
-
-			if (rx_hbo)
-				len = I40E_RX_HDR_SIZE;
-			else
-				len = rx_header_len;
-			memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
-		} else if (skb->len == 0) {
-			int len;
-			unsigned char *va = page_address(rx_bi->page) +
-					    rx_bi->page_offset;
-
-			len = min(rx_packet_len, rx_ring->rx_hdr_len);
-			memcpy(__skb_put(skb, len), va, len);
-			copysize = len;
-			rx_packet_len -= len;
-		}
-		/* Get the rest of the data if this was a header split */
-		if (rx_packet_len) {
-			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
-					rx_bi->page,
-					rx_bi->page_offset + copysize,
-					rx_packet_len, I40E_RXBUFFER_2048);
-
-			/* If the page count is more than 2, then both halves
-			 * of the page are used and we need to free it. Do it
-			 * here instead of in the alloc code. Otherwise one
-			 * of the half-pages might be released between now and
-			 * then, and we wouldn't know which one to use.
-			 * Don't call get_page and free_page since those are
-			 * both expensive atomic operations that just change
-			 * the refcount in opposite directions. Just give the
-			 * page to the stack; he can have our refcount.
-			 */
-			if (page_count(rx_bi->page) > 2) {
-				dma_unmap_page(rx_ring->dev,
-					       rx_bi->page_dma,
-					       PAGE_SIZE,
-					       DMA_FROM_DEVICE);
-				rx_bi->page = NULL;
-				rx_bi->page_dma = 0;
-				rx_ring->rx_stats.realloc_count++;
-			} else {
-				get_page(rx_bi->page);
-				/* switch to the other half-page here; the
-				 * allocation code programs the right addr
-				 * into HW. If we haven't used this half-page,
-				 * the address won't be changed, and HW can
-				 * just use it next time through.
-				 */
-				rx_bi->page_offset ^= PAGE_SIZE / 2;
-			}
-
-		}
-		I40E_RX_INCREMENT(rx_ring, i);
-
-		if (unlikely(
-		    !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
-			struct i40e_rx_buffer *next_buffer;
-
-			next_buffer = &rx_ring->rx_bi[i];
-			next_buffer->skb = skb;
-			rx_ring->rx_stats.non_eop_descs++;
-			continue;
-		}
-
-		/* ERR_MASK will only have valid bits if EOP set */
-		if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
-			dev_kfree_skb_any(skb);
-			continue;
-		}
-
-		i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
-
-		/* probably a little skewed due to removing CRC */
-		total_rx_bytes += skb->len;
-		total_rx_packets++;
-
-		skb->protocol = eth_type_trans(skb, rx_ring->netdev);
-
-		i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
-
-		vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
-			 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
-			 : 0;
-#ifdef I40E_FCOE
-		if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
-			dev_kfree_skb_any(skb);
-			continue;
-		}
-#endif
-		i40e_receive_skb(rx_ring, skb, vlan_tag);
-
-		rx_desc->wb.qword1.status_error_len = 0;
-
-	} while (likely(total_rx_packets < budget));
-
-	u64_stats_update_begin(&rx_ring->syncp);
-	rx_ring->stats.packets += total_rx_packets;
-	rx_ring->stats.bytes += total_rx_bytes;
-	u64_stats_update_end(&rx_ring->syncp);
-	rx_ring->q_vector->rx.total_packets += total_rx_packets;
-	rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
-
-	return failure ? budget : total_rx_packets;
+	skb_record_rx_queue(skb, rx_ring->queue_index);
 }
 
 /**
- * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
- * @rx_ring:  rx ring to clean
- * @budget:   how many cleans we're allowed
+ * i40e_pull_tail - i40e specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being adjusted
  *
- * Returns number of packets cleaned
+ * This function is an i40e specific version of __pskb_pull_tail.  The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+	unsigned char *va;
+	unsigned int pull_len;
+
+	/* it is valid to use page_address instead of kmap since we are
+	 * working with pages allocated out of the lomem pool per
+	 * alloc_page(GFP_ATOMIC)
+	 */
+	va = skb_frag_address(frag);
+
+	/* we need the header to contain the greater of either ETH_HLEN or
+	 * 60 bytes if the skb->len is less than 60 for skb_pad.
+	 */
+	pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
+
+	/* align pull length to size of long to optimize memcpy performance */
+	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+	/* update all of the pointers */
+	skb_frag_size_sub(frag, pull_len);
+	frag->page_offset += pull_len;
+	skb->data_len -= pull_len;
+	skb->tail += pull_len;
+}
+
+/**
+ * i40e_cleanup_headers - Correct empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being fixed
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
  **/
-static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
+static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+	/* place header in linear portion of buffer */
+	if (skb_is_nonlinear(skb))
+		i40e_pull_tail(rx_ring, skb);
+
+	/* if eth_skb_pad returns an error the skb was freed */
+	if (eth_skb_pad(skb))
+		return true;
+
+	return false;
+}
+
+/**
+ * i40e_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
+			       struct i40e_rx_buffer *old_buff)
+{
+	struct i40e_rx_buffer *new_buff;
+	u16 nta = rx_ring->next_to_alloc;
+
+	new_buff = &rx_ring->rx_bi[nta];
+
+	/* update, and store next to alloc */
+	nta++;
+	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+	/* transfer page from old buffer to new buffer */
+	*new_buff = *old_buff;
+}
+
+/**
+ * i40e_page_is_reserved - check if reuse is possible
+ * @page: page struct to check
+ */
+static inline bool i40e_page_is_reserved(struct page *page)
+{
+	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+}
+
+/**
+ * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ **/
+static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
+			     struct i40e_rx_buffer *rx_buffer,
+			     union i40e_rx_desc *rx_desc,
+			     struct sk_buff *skb)
+{
+	struct page *page = rx_buffer->page;
+	u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+	unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+			    I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+#if (PAGE_SIZE < 8192)
+	unsigned int truesize = I40E_RXBUFFER_2048;
+#else
+	unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+	unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
+#endif
+
+	/* will the data fit in the skb we allocated? if so, just
+	 * copy it as it is pretty small anyway
+	 */
+	if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
+		unsigned char *va = page_address(page) + rx_buffer->page_offset;
+
+		memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+
+		/* page is not reserved, we can reuse buffer as-is */
+		if (likely(!i40e_page_is_reserved(page)))
+			return true;
+
+		/* this page cannot be reused so discard it */
+		__free_pages(page, 0);
+		return false;
+	}
+
+	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+			rx_buffer->page_offset, size, truesize);
+
+	/* avoid re-using remote pages */
+	if (unlikely(i40e_page_is_reserved(page)))
+		return false;
+
+#if (PAGE_SIZE < 8192)
+	/* if we are only owner of page we can reuse it */
+	if (unlikely(page_count(page) != 1))
+		return false;
+
+	/* flip page offset to other buffer */
+	rx_buffer->page_offset ^= truesize;
+#else
+	/* move offset up to the next cache line */
+	rx_buffer->page_offset += truesize;
+
+	if (rx_buffer->page_offset > last_offset)
+		return false;
+#endif
+
+	/* Even if we own the page, we are not allowed to use atomic_set()
+	 * This would break get_page_unless_zero() users.
+	 */
+	get_page(rx_buffer->page);
+
+	return true;
+}
+
+/**
+ * i40evf_fetch_rx_buffer - Allocate skb and populate it
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_desc: descriptor containing info written by hardware
+ *
+ * This function allocates an skb on the fly, and populates it with the page
+ * data from the current receive descriptor, taking care to set up the skb
+ * correctly, as well as handling calling the page recycle function if
+ * necessary.
+ */
+static inline
+struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
+				       union i40e_rx_desc *rx_desc)
+{
+	struct i40e_rx_buffer *rx_buffer;
+	struct sk_buff *skb;
+	struct page *page;
+
+	rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
+	page = rx_buffer->page;
+	prefetchw(page);
+
+	skb = rx_buffer->skb;
+
+	if (likely(!skb)) {
+		void *page_addr = page_address(page) + rx_buffer->page_offset;
+
+		/* prefetch first cache line of first page */
+		prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+		prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+		/* allocate a skb to store the frags */
+		skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+				       I40E_RX_HDR_SIZE,
+				       GFP_ATOMIC | __GFP_NOWARN);
+		if (unlikely(!skb)) {
+			rx_ring->rx_stats.alloc_buff_failed++;
+			return NULL;
+		}
+
+		/* we will be copying header into skb->data in
+		 * pskb_may_pull so it is in our interest to prefetch
+		 * it now to avoid a possible cache miss
+		 */
+		prefetchw(skb->data);
+	} else {
+		rx_buffer->skb = NULL;
+	}
+
+	/* we are reusing so sync this buffer for CPU use */
+	dma_sync_single_range_for_cpu(rx_ring->dev,
+				      rx_buffer->dma,
+				      rx_buffer->page_offset,
+				      I40E_RXBUFFER_2048,
+				      DMA_FROM_DEVICE);
+
+	/* pull page into skb */
+	if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+		/* hand second half of page back to the ring */
+		i40e_reuse_rx_page(rx_ring, rx_buffer);
+		rx_ring->rx_stats.page_reuse_count++;
+	} else {
+		/* we are not reusing the buffer so unmap it */
+		dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
+			       DMA_FROM_DEVICE);
+	}
+
+	/* clear contents of buffer_info */
+	rx_buffer->page = NULL;
+
+	return skb;
+}
+
+/**
+ * i40e_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean.  If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
+			    union i40e_rx_desc *rx_desc,
+			    struct sk_buff *skb)
+{
+	u32 ntc = rx_ring->next_to_clean + 1;
+
+	/* fetch, update, and store next to clean */
+	ntc = (ntc < rx_ring->count) ? ntc : 0;
+	rx_ring->next_to_clean = ntc;
+
+	prefetch(I40E_RX_DESC(rx_ring, ntc));
+
+	/* if we are the last buffer then there is nothing else to do */
+#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
+	if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
+		return false;
+
+	/* place skb in next buffer to be received */
+	rx_ring->rx_bi[ntc].skb = skb;
+	rx_ring->rx_stats.non_eop_descs++;
+
+	return true;
+}
+
+/**
+ * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing.  The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed
+ **/
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
 {
 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
-	struct i40e_vsi *vsi = rx_ring->vsi;
-	union i40e_rx_desc *rx_desc;
-	u32 rx_error, rx_status;
-	u16 rx_packet_len;
 	bool failure = false;
-	u8 rx_ptype;
-	u64 qword;
-	u16 i;
 
-	do {
-		struct i40e_rx_buffer *rx_bi;
+	while (likely(total_rx_packets < budget)) {
+		union i40e_rx_desc *rx_desc;
 		struct sk_buff *skb;
+		u32 rx_status;
 		u16 vlan_tag;
+		u8 rx_ptype;
+		u64 qword;
+
 		/* return some buffers to hardware, one at a time is too slow */
 		if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
 			failure = failure ||
-				  i40evf_alloc_rx_buffers_1buf(rx_ring,
-							       cleaned_count);
+				  i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
 			cleaned_count = 0;
 		}
 
-		i = rx_ring->next_to_clean;
-		rx_desc = I40E_RX_DESC(rx_ring, i);
+		rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
 		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+			   I40E_RXD_QW1_PTYPE_SHIFT;
 		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
-			I40E_RXD_QW1_STATUS_SHIFT;
+			    I40E_RXD_QW1_STATUS_SHIFT;
 
 		if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
 			break;
 
+		/* status_error_len will always be zero for unused descriptors
+		 * because it's cleared in cleanup, and overlaps with hdr_addr
+		 * which is always zero because packet split isn't used, if the
+		 * hardware wrote DD then it will be non-zero
+		 */
+		if (!rx_desc->wb.qword1.status_error_len)
+			break;
+
 		/* This memory barrier is needed to keep us from reading
 		 * any other fields out of the rx_desc until we know the
 		 * DD bit is set.
 		 */
 		dma_rmb();
 
-		rx_bi = &rx_ring->rx_bi[i];
-		skb = rx_bi->skb;
-		prefetch(skb->data);
+		skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc);
+		if (!skb)
+			break;
 
-		rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
-				I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
-
-		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
-			   I40E_RXD_QW1_ERROR_SHIFT;
-		rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
-
-		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
-			   I40E_RXD_QW1_PTYPE_SHIFT;
-		rx_bi->skb = NULL;
 		cleaned_count++;
 
-		/* Get the header and possibly the whole packet
-		 * If this is an skb from previous receive dma will be 0
-		 */
-		skb_put(skb, rx_packet_len);
-		dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
-				 DMA_FROM_DEVICE);
-		rx_bi->dma = 0;
-
-		I40E_RX_INCREMENT(rx_ring, i);
-
-		if (unlikely(
-		    !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
-			rx_ring->rx_stats.non_eop_descs++;
+		if (i40e_is_non_eop(rx_ring, rx_desc, skb))
 			continue;
-		}
 
-		/* ERR_MASK will only have valid bits if EOP set */
-		if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+		/* ERR_MASK will only have valid bits if EOP set, and
+		 * what we are doing here is actually checking
+		 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
+		 * the error field
+		 */
+		if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
 			dev_kfree_skb_any(skb);
 			continue;
 		}
 
-		i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
+		if (i40e_cleanup_headers(rx_ring, skb))
+			continue;
+
 		/* probably a little skewed due to removing CRC */
 		total_rx_bytes += skb->len;
-		total_rx_packets++;
 
-		skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+		/* populate checksum, VLAN, and protocol */
+		i40evf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
 
-		i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
 
-		vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
-			 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
-			 : 0;
+		vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
+			   le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
+
 		i40e_receive_skb(rx_ring, skb, vlan_tag);
 
-		rx_desc->wb.qword1.status_error_len = 0;
-	} while (likely(total_rx_packets < budget));
+		/* update budget accounting */
+		total_rx_packets++;
+	}
 
 	u64_stats_update_begin(&rx_ring->syncp);
 	rx_ring->stats.packets += total_rx_packets;
@@ -1288,6 +1289,7 @@
 	rx_ring->q_vector->rx.total_packets += total_rx_packets;
 	rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
 
+	/* guarantee a trip back through this routine if there was a failure */
 	return failure ? budget : total_rx_packets;
 }
 
@@ -1411,9 +1413,11 @@
 	 * budget and be more aggressive about cleaning up the Tx descriptors.
 	 */
 	i40e_for_each_ring(ring, q_vector->tx) {
-		clean_complete = clean_complete &&
-				 i40e_clean_tx_irq(ring, vsi->work_limit);
-		arm_wb = arm_wb || ring->arm_wb;
+		if (!i40e_clean_tx_irq(vsi, ring, budget)) {
+			clean_complete = false;
+			continue;
+		}
+		arm_wb |= ring->arm_wb;
 		ring->arm_wb = false;
 	}
 
@@ -1427,16 +1431,12 @@
 	budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
 
 	i40e_for_each_ring(ring, q_vector->rx) {
-		int cleaned;
-
-		if (ring_is_ps_enabled(ring))
-			cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
-		else
-			cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+		int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
 
 		work_done += cleaned;
-		/* if we didn't clean as many as budgeted, we must be done */
-		clean_complete = clean_complete && (budget_per_ring > cleaned);
+		/* if we clean as many as budgeted, we must not be done */
+		if (cleaned >= budget_per_ring)
+			clean_complete = false;
 	}
 
 	/* If work not completed, return budget and polling will return */
@@ -1514,15 +1514,13 @@
 
 /**
  * i40e_tso - set up the tso context descriptor
- * @tx_ring:  ptr to the ring to send
  * @skb:      ptr to the skb we're sending
  * @hdr_len:  ptr to the size of the packet header
  * @cd_type_cmd_tso_mss: Quad Word 1
  *
  * Returns 0 if no TSO can happen, 1 if tso is going, or error
  **/
-static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
-		    u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
+static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
 {
 	u64 cd_cmd, cd_tso_len, cd_mss;
 	union {
@@ -1559,16 +1557,22 @@
 		ip.v6->payload_len = 0;
 	}
 
-	if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE |
+	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+					 SKB_GSO_GRE_CSUM |
+					 SKB_GSO_IPIP |
+					 SKB_GSO_SIT |
+					 SKB_GSO_UDP_TUNNEL |
 					 SKB_GSO_UDP_TUNNEL_CSUM)) {
-		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
+			l4.udp->len = 0;
+
 			/* determine offset of outer transport header */
 			l4_offset = l4.hdr - skb->data;
 
 			/* remove payload length from outer checksum */
-			paylen = (__force u16)l4.udp->check;
-			paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
-			l4.udp->check = ~csum_fold((__force __wsum)paylen);
+			paylen = skb->len - l4_offset;
+			csum_replace_by_diff(&l4.udp->check, htonl(paylen));
 		}
 
 		/* reset pointers to inner headers */
@@ -1588,9 +1592,8 @@
 	l4_offset = l4.hdr - skb->data;
 
 	/* remove payload length from inner checksum */
-	paylen = (__force u16)l4.tcp->check;
-	paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
-	l4.tcp->check = ~csum_fold((__force __wsum)paylen);
+	paylen = skb->len - l4_offset;
+	csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
 
 	/* compute length of segmentation header */
 	*hdr_len = (l4.tcp->doff * 4) + l4_offset;
@@ -1630,7 +1633,7 @@
 		unsigned char *hdr;
 	} l4;
 	unsigned char *exthdr;
-	u32 offset, cmd = 0, tunnel = 0;
+	u32 offset, cmd = 0;
 	__be16 frag_off;
 	u8 l4_proto = 0;
 
@@ -1644,6 +1647,7 @@
 	offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
 
 	if (skb->encapsulation) {
+		u32 tunnel = 0;
 		/* define outer network header type */
 		if (*tx_flags & I40E_TX_FLAGS_IPV4) {
 			tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
@@ -1661,13 +1665,6 @@
 						 &l4_proto, &frag_off);
 		}
 
-		/* compute outer L3 header size */
-		tunnel |= ((l4.hdr - ip.hdr) / 4) <<
-			  I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
-
-		/* switch IP header pointer from outer to inner header */
-		ip.hdr = skb_inner_network_header(skb);
-
 		/* define outer transport */
 		switch (l4_proto) {
 		case IPPROTO_UDP:
@@ -1678,6 +1675,11 @@
 			tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
 			*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
 			break;
+		case IPPROTO_IPIP:
+		case IPPROTO_IPV6:
+			*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
+			l4.hdr = skb_inner_network_header(skb);
+			break;
 		default:
 			if (*tx_flags & I40E_TX_FLAGS_TSO)
 				return -1;
@@ -1686,12 +1688,20 @@
 			return 0;
 		}
 
+		/* compute outer L3 header size */
+		tunnel |= ((l4.hdr - ip.hdr) / 4) <<
+			  I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
+
+		/* switch IP header pointer from outer to inner header */
+		ip.hdr = skb_inner_network_header(skb);
+
 		/* compute tunnel header size */
 		tunnel |= ((ip.hdr - l4.hdr) / 2) <<
 			  I40E_TXD_CTX_QW0_NATLEN_SHIFT;
 
 		/* indicate if we need to offload outer UDP header */
 		if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
+		    !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
 			tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
 
@@ -1796,35 +1806,34 @@
 }
 
 /**
- * __i40evf_chk_linearize - Check if there are more than 8 fragments per packet
+ * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet
  * @skb:      send buffer
  *
- * Note: Our HW can't scatter-gather more than 8 fragments to build
- * a packet on the wire and so we need to figure out the cases where we
- * need to linearize the skb.
+ * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
+ * and so we need to figure out the cases where we need to linearize the skb.
+ *
+ * For TSO we need to count the TSO header and segment payload separately.
+ * As such we need to check cases where we have 7 fragments or more as we
+ * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
+ * the segment payload in the first descriptor, and another 7 for the
+ * fragments.
  **/
 bool __i40evf_chk_linearize(struct sk_buff *skb)
 {
 	const struct skb_frag_struct *frag, *stale;
-	int gso_size, nr_frags, sum;
+	int nr_frags, sum;
 
-	/* check to see if TSO is enabled, if so we may get a repreive */
-	gso_size = skb_shinfo(skb)->gso_size;
-	if (unlikely(!gso_size))
-		return true;
-
-	/* no need to check if number of frags is less than 8 */
+	/* no need to check if number of frags is less than 7 */
 	nr_frags = skb_shinfo(skb)->nr_frags;
-	if (nr_frags < I40E_MAX_BUFFER_TXD)
+	if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
 		return false;
 
 	/* We need to walk through the list and validate that each group
 	 * of 6 fragments totals at least gso_size.  However we don't need
-	 * to perform such validation on the first or last 6 since the first
-	 * 6 cannot inherit any data from a descriptor before them, and the
-	 * last 6 cannot inherit any data from a descriptor after them.
+	 * to perform such validation on the last 6 since the last 6 cannot
+	 * inherit any data from a descriptor after them.
 	 */
-	nr_frags -= I40E_MAX_BUFFER_TXD - 1;
+	nr_frags -= I40E_MAX_BUFFER_TXD - 2;
 	frag = &skb_shinfo(skb)->frags[0];
 
 	/* Initialize size to the negative value of gso_size minus 1.  We
@@ -1833,21 +1842,21 @@
 	 * descriptors for a single transmit as the header and previous
 	 * fragment are already consuming 2 descriptors.
 	 */
-	sum = 1 - gso_size;
+	sum = 1 - skb_shinfo(skb)->gso_size;
 
-	/* Add size of frags 1 through 5 to create our initial sum */
-	sum += skb_frag_size(++frag);
-	sum += skb_frag_size(++frag);
-	sum += skb_frag_size(++frag);
-	sum += skb_frag_size(++frag);
-	sum += skb_frag_size(++frag);
+	/* Add size of frags 0 through 4 to create our initial sum */
+	sum += skb_frag_size(frag++);
+	sum += skb_frag_size(frag++);
+	sum += skb_frag_size(frag++);
+	sum += skb_frag_size(frag++);
+	sum += skb_frag_size(frag++);
 
 	/* Walk through fragments adding latest fragment, testing it, and
 	 * then removing stale fragments from the sum.
 	 */
 	stale = &skb_shinfo(skb)->frags[0];
 	for (;;) {
-		sum += skb_frag_size(++frag);
+		sum += skb_frag_size(frag++);
 
 		/* if sum is negative we failed to make sufficient progress */
 		if (sum < 0)
@@ -1857,7 +1866,7 @@
 		if (!--nr_frags)
 			break;
 
-		sum -= skb_frag_size(++stale);
+		sum -= skb_frag_size(stale++);
 	}
 
 	return false;
@@ -1936,6 +1945,8 @@
 	tx_bi = first;
 
 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+		unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
+
 		if (dma_mapping_error(tx_ring->dev, dma))
 			goto dma_error;
 
@@ -1943,12 +1954,14 @@
 		dma_unmap_len_set(tx_bi, len, size);
 		dma_unmap_addr_set(tx_bi, dma, dma);
 
+		/* align size to end of page */
+		max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
 		tx_desc->buffer_addr = cpu_to_le64(dma);
 
 		while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
 			tx_desc->cmd_type_offset_bsz =
 				build_ctob(td_cmd, td_offset,
-					   I40E_MAX_DATA_PER_TXD, td_tag);
+					   max_data, td_tag);
 
 			tx_desc++;
 			i++;
@@ -1959,9 +1972,10 @@
 				i = 0;
 			}
 
-			dma += I40E_MAX_DATA_PER_TXD;
-			size -= I40E_MAX_DATA_PER_TXD;
+			dma += max_data;
+			size -= max_data;
 
+			max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
 			tx_desc->buffer_addr = cpu_to_le64(dma);
 		}
 
@@ -2110,7 +2124,7 @@
 	if (i40e_chk_linearize(skb, count)) {
 		if (__skb_linearize(skb))
 			goto out_drop;
-		count = TXD_USE_COUNT(skb->len);
+		count = i40e_txd_use_count(skb->len);
 		tx_ring->tx_stats.tx_linearize++;
 	}
 
@@ -2141,7 +2155,7 @@
 	else if (protocol == htons(ETH_P_IPV6))
 		tx_flags |= I40E_TX_FLAGS_IPV6;
 
-	tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss);
+	tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss);
 
 	if (tso < 0)
 		goto out_drop;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index c1dd8c5..0112277 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -102,8 +102,8 @@
 	(((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
 	  I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
 
-/* Supported Rx Buffer Sizes */
-#define I40E_RXBUFFER_512   512    /* Used for packet split */
+/* Supported Rx Buffer Sizes (a multiple of 128) */
+#define I40E_RXBUFFER_256   256
 #define I40E_RXBUFFER_2048  2048
 #define I40E_RXBUFFER_3072  3072   /* For FCoE MTU of 2158 */
 #define I40E_RXBUFFER_4096  4096
@@ -114,9 +114,28 @@
  * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
  * this adds up to 512 bytes of extra data meaning the smallest allocation
  * we could have is 1K.
- * i.e. RXBUFFER_512 --> size-1024 slab
+ * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
+ * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
  */
-#define I40E_RX_HDR_SIZE  I40E_RXBUFFER_512
+#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+/**
+ * i40e_test_staterr - tests bits in Rx descriptor status and error fields
+ * @rx_desc: pointer to receive descriptor (in le64 format)
+ * @stat_err_bits: value to mask
+ *
+ * This function does some fast chicanery in order to return the
+ * value of the mask which is really only used for boolean tests.
+ * The status_error_len doesn't need to be shifted because it begins
+ * at offset zero.
+ */
+static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
+				     const u64 stat_err_bits)
+{
+	return !!(rx_desc->wb.qword1.status_error_len &
+		  cpu_to_le64(stat_err_bits));
+}
 
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
 #define I40E_RX_BUFFER_WRITE	16	/* Must be power of 2 */
@@ -142,14 +161,41 @@
 		prefetch((n));				\
 	} while (0)
 
-#define i40e_rx_desc i40e_32byte_rx_desc
-
 #define I40E_MAX_BUFFER_TXD	8
 #define I40E_MIN_TX_LEN		17
-#define I40E_MAX_DATA_PER_TXD	8192
+
+/* The size limit for a transmit buffer in a descriptor is (16K - 1).
+ * In order to align with the read requests we will align the value to
+ * the nearest 4K which represents our maximum read request size.
+ */
+#define I40E_MAX_READ_REQ_SIZE		4096
+#define I40E_MAX_DATA_PER_TXD		(16 * 1024 - 1)
+#define I40E_MAX_DATA_PER_TXD_ALIGNED \
+	(I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
+
+/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is
+ * the value I40E_MAX_DATA_PER_TXD_ALIGNED.  It is needed due to the fact
+ * that 12K is not a power of 2 and division is expensive.  It is used to
+ * approximate the number of descriptors used per linear buffer.  Note
+ * that this will overestimate in some cases as it doesn't account for the
+ * fact that we will add up to 4K - 1 in aligning the 12K buffer, however
+ * the error should not impact things much as large buffers usually mean
+ * we will use fewer descriptors then there are frags in an skb.
+ */
+static inline unsigned int i40e_txd_use_count(unsigned int size)
+{
+	const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED;
+	const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max;
+	unsigned int adjust = ~(u32)0;
+
+	/* if we rounded up on the reciprocal pull down the adjustment */
+	if ((max * reciprocal) > adjust)
+		adjust = ~(u32)(reciprocal - 1);
+
+	return (u32)((((u64)size * reciprocal) + adjust) >> 32);
+}
 
 /* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
 #define I40E_MIN_DESC_PENDING	4
 
@@ -183,10 +229,8 @@
 
 struct i40e_rx_buffer {
 	struct sk_buff *skb;
-	void *hdr_buf;
 	dma_addr_t dma;
 	struct page *page;
-	dma_addr_t page_dma;
 	unsigned int page_offset;
 };
 
@@ -215,22 +259,18 @@
 enum i40e_ring_state_t {
 	__I40E_TX_FDIR_INIT_DONE,
 	__I40E_TX_XPS_INIT_DONE,
-	__I40E_RX_PS_ENABLED,
-	__I40E_RX_16BYTE_DESC_ENABLED,
 };
 
-#define ring_is_ps_enabled(ring) \
-	test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define set_ring_ps_enabled(ring) \
-	set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define clear_ring_ps_enabled(ring) \
-	clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define ring_is_16byte_desc_enabled(ring) \
-	test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
-#define set_ring_16byte_desc_enabled(ring) \
-	set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
-#define clear_ring_16byte_desc_enabled(ring) \
-	clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+/* some useful defines for virtchannel interface, which
+ * is the only remaining user of header split
+ */
+#define I40E_RX_DTYPE_NO_SPLIT      0
+#define I40E_RX_DTYPE_HEADER_SPLIT  1
+#define I40E_RX_DTYPE_SPLIT_ALWAYS  2
+#define I40E_RX_SPLIT_L2      0x1
+#define I40E_RX_SPLIT_IP      0x2
+#define I40E_RX_SPLIT_TCP_UDP 0x4
+#define I40E_RX_SPLIT_SCTP    0x8
 
 /* struct that defines a descriptor ring, associated with a VSI */
 struct i40e_ring {
@@ -249,16 +289,7 @@
 
 	u16 count;			/* Number of descriptors */
 	u16 reg_idx;			/* HW register index of the ring */
-	u16 rx_hdr_len;
 	u16 rx_buf_len;
-	u8  dtype;
-#define I40E_RX_DTYPE_NO_SPLIT      0
-#define I40E_RX_DTYPE_HEADER_SPLIT  1
-#define I40E_RX_DTYPE_SPLIT_ALWAYS  2
-#define I40E_RX_SPLIT_L2      0x1
-#define I40E_RX_SPLIT_IP      0x2
-#define I40E_RX_SPLIT_TCP_UDP 0x4
-#define I40E_RX_SPLIT_SCTP    0x8
 
 	/* used in interrupt processing */
 	u16 next_to_use;
@@ -290,6 +321,7 @@
 	struct i40e_q_vector *q_vector;	/* Backreference to associated vector */
 
 	struct rcu_head rcu;		/* to avoid race on free */
+	u16 next_to_alloc;
 } ____cacheline_internodealigned_in_smp;
 
 enum i40e_latency_range {
@@ -313,9 +345,7 @@
 #define i40e_for_each_ring(pos, head) \
 	for (pos = (head).ring; pos != NULL; pos = pos->next)
 
-bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
-bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
-void i40evf_alloc_rx_headers(struct i40e_ring *rxr);
+bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
 netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
 void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
 void i40evf_clean_rx_ring(struct i40e_ring *rx_ring);
@@ -359,7 +389,7 @@
 	int count = 0, size = skb_headlen(skb);
 
 	for (;;) {
-		count += TXD_USE_COUNT(size);
+		count += i40e_txd_use_count(size);
 
 		if (!nr_frags--)
 			break;
@@ -395,10 +425,24 @@
  **/
 static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
 {
-	/* we can only support up to 8 data buffers for a single send */
-	if (likely(count <= I40E_MAX_BUFFER_TXD))
+	/* Both TSO and single send will work if count is less than 8 */
+	if (likely(count < I40E_MAX_BUFFER_TXD))
 		return false;
 
-	return __i40evf_chk_linearize(skb);
+	if (skb_is_gso(skb))
+		return __i40evf_chk_linearize(skb);
+
+	/* we can support up to 8 data buffers for a single send */
+	return count != I40E_MAX_BUFFER_TXD;
+}
+
+/**
+ * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE
+ * @ptype: the packet type field from Rx descriptor write-back
+ **/
+static inline bool i40e_rx_is_fcoe(u16 ptype)
+{
+	return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
+	       (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
 }
 #endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 301fe2b..97f96e0 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -36,7 +36,7 @@
 #include "i40e_devids.h"
 
 /* I40E_MASK is a macro used on 32 bit registers */
-#define I40E_MASK(mask, shift) (mask << shift)
+#define I40E_MASK(mask, shift) ((u32)(mask) << (shift))
 
 #define I40E_MAX_VSI_QP			16
 #define I40E_MAX_VF_VSI			3
@@ -258,6 +258,11 @@
 #define I40E_FLEX10_STATUS_DCC_ERROR	0x1
 #define I40E_FLEX10_STATUS_VC_MODE	0x2
 
+	bool sec_rev_disabled;
+	bool update_disabled;
+#define I40E_NVM_MGMT_SEC_REV_DISABLED	0x1
+#define I40E_NVM_MGMT_UPDATE_DISABLED	0x2
+
 	bool mgmt_cem;
 	bool ieee_1588;
 	bool iwarp;
@@ -522,6 +527,8 @@
 	enum i40e_nvmupd_state nvmupd_state;
 	struct i40e_aq_desc nvm_wb_desc;
 	struct i40e_virt_mem nvm_buff;
+	bool nvm_release_on_done;
+	u16 nvm_wait_opcode;
 
 	/* HMC info */
 	struct i40e_hmc_info hmc; /* HMC info struct */
@@ -1329,4 +1336,46 @@
 
 /* RSS Hash Table Size */
 #define I40E_PFQF_CTL_0_HASHLUTSIZE_512	0x00010000
+
+/* INPUT SET MASK for RSS, flow director and flexible payload */
+#define I40E_FD_INSET_L3_SRC_SHIFT		47
+#define I40E_FD_INSET_L3_SRC_WORD_MASK		(0x3ULL << \
+						 I40E_FD_INSET_L3_SRC_SHIFT)
+#define I40E_FD_INSET_L3_DST_SHIFT		35
+#define I40E_FD_INSET_L3_DST_WORD_MASK		(0x3ULL << \
+						 I40E_FD_INSET_L3_DST_SHIFT)
+#define I40E_FD_INSET_L4_SRC_SHIFT		34
+#define I40E_FD_INSET_L4_SRC_WORD_MASK		(0x1ULL << \
+						 I40E_FD_INSET_L4_SRC_SHIFT)
+#define I40E_FD_INSET_L4_DST_SHIFT		33
+#define I40E_FD_INSET_L4_DST_WORD_MASK		(0x1ULL << \
+						 I40E_FD_INSET_L4_DST_SHIFT)
+#define I40E_FD_INSET_VERIFY_TAG_SHIFT		31
+#define I40E_FD_INSET_VERIFY_TAG_WORD_MASK	(0x3ULL << \
+						 I40E_FD_INSET_VERIFY_TAG_SHIFT)
+
+#define I40E_FD_INSET_FLEX_WORD50_SHIFT		17
+#define I40E_FD_INSET_FLEX_WORD50_MASK		(0x1ULL << \
+					I40E_FD_INSET_FLEX_WORD50_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD51_SHIFT		16
+#define I40E_FD_INSET_FLEX_WORD51_MASK		(0x1ULL << \
+					I40E_FD_INSET_FLEX_WORD51_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD52_SHIFT		15
+#define I40E_FD_INSET_FLEX_WORD52_MASK		(0x1ULL << \
+					I40E_FD_INSET_FLEX_WORD52_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD53_SHIFT		14
+#define I40E_FD_INSET_FLEX_WORD53_MASK		(0x1ULL << \
+					I40E_FD_INSET_FLEX_WORD53_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD54_SHIFT		13
+#define I40E_FD_INSET_FLEX_WORD54_MASK		(0x1ULL << \
+					I40E_FD_INSET_FLEX_WORD54_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD55_SHIFT		12
+#define I40E_FD_INSET_FLEX_WORD55_MASK		(0x1ULL << \
+					I40E_FD_INSET_FLEX_WORD55_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD56_SHIFT		11
+#define I40E_FD_INSET_FLEX_WORD56_MASK		(0x1ULL << \
+					I40E_FD_INSET_FLEX_WORD56_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD57_SHIFT		10
+#define I40E_FD_INSET_FLEX_WORD57_MASK		(0x1ULL << \
+					I40E_FD_INSET_FLEX_WORD57_SHIFT)
 #endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index 3b9d203..f04ce6c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -80,7 +80,12 @@
 	I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
 	I40E_VIRTCHNL_OP_GET_STATS = 15,
 	I40E_VIRTCHNL_OP_FCOE = 16,
-	I40E_VIRTCHNL_OP_EVENT = 17,
+	I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+	I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+	I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+	I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+	I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
+
 };
 
 /* Virtual channel message descriptor. This overlays the admin queue
@@ -154,6 +159,7 @@
 #define I40E_VIRTCHNL_VF_OFFLOAD_VLAN		0x00010000
 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING	0x00020000
 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2	0x00040000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF		0X00080000
 
 struct i40e_virtchnl_vf_resource {
 	u16 num_vsis;
@@ -162,8 +168,8 @@
 	u16 max_mtu;
 
 	u32 vf_offload_flags;
-	u32 max_fcoe_contexts;
-	u32 max_fcoe_filters;
+	u32 rss_key_size;
+	u32 rss_lut_size;
 
 	struct i40e_virtchnl_vsi_resource vsi_res[1];
 };
@@ -322,6 +328,39 @@
  * PF replies with struct i40e_eth_stats in an external buffer.
  */
 
+/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
+ * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the RSS fields in
+ * the VF resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct i40e_virtchnl_rss_key {
+	u16 vsi_id;
+	u16 key_len;
+	u8 key[1];         /* RSS hash key, packed bytes */
+};
+
+struct i40e_virtchnl_rss_lut {
+	u16 vsi_id;
+	u16 lut_entries;
+	u8 lut[1];        /* RSS lookup table*/
+};
+
+/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * I40E_VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
+ */
+struct i40e_virtchnl_rss_hena {
+	u64 hena;
+};
+
 /* I40E_VIRTCHNL_OP_EVENT
  * PF sends this message to inform the VF driver of events that may affect it.
  * No direct response is expected from the VF, though it may generate other
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index e657ecc..76ed97d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -67,8 +67,6 @@
 	u16 rx_itr_setting;
 	u16 tx_itr_setting;
 	u16 qs_handle;
-	u8 *rss_hkey_user; /* User configured hash keys */
-	u8 *rss_lut_user;  /* User configured lookup table entries */
 };
 
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -82,9 +80,6 @@
 #define I40EVF_REQ_DESCRIPTOR_MULTIPLE  32
 
 /* Supported Rx Buffer Sizes */
-#define I40EVF_RXBUFFER_64    64     /* Used for packet split */
-#define I40EVF_RXBUFFER_128   128    /* Used for packet split */
-#define I40EVF_RXBUFFER_256   256    /* Used for packet split */
 #define I40EVF_RXBUFFER_2048  2048
 #define I40EVF_MAX_RXBUFFER   16384  /* largest size for single descriptor */
 #define I40EVF_MAX_AQ_BUF_SIZE    4096
@@ -210,9 +205,6 @@
 
 	u32 flags;
 #define I40EVF_FLAG_RX_CSUM_ENABLED              BIT(0)
-#define I40EVF_FLAG_RX_1BUF_CAPABLE              BIT(1)
-#define I40EVF_FLAG_RX_PS_CAPABLE                BIT(2)
-#define I40EVF_FLAG_RX_PS_ENABLED                BIT(3)
 #define I40EVF_FLAG_IMIR_ENABLED                 BIT(5)
 #define I40EVF_FLAG_MQ_CAPABLE                   BIT(6)
 #define I40EVF_FLAG_NEED_LINK_UPDATE             BIT(7)
@@ -222,6 +214,8 @@
 #define I40EVF_FLAG_WB_ON_ITR_CAPABLE		BIT(11)
 #define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE	BIT(12)
 #define I40EVF_FLAG_ADDR_SET_BY_PF		BIT(13)
+#define I40EVF_FLAG_PROMISC_ON			BIT(15)
+#define I40EVF_FLAG_ALLMULTI_ON			BIT(16)
 /* duplicates for common code */
 #define I40E_FLAG_FDIR_ATR_ENABLED		 0
 #define I40E_FLAG_DCB_ENABLED			 0
@@ -239,8 +233,17 @@
 #define I40EVF_FLAG_AQ_CONFIGURE_QUEUES		BIT(6)
 #define I40EVF_FLAG_AQ_MAP_VECTORS		BIT(7)
 #define I40EVF_FLAG_AQ_HANDLE_RESET		BIT(8)
-#define I40EVF_FLAG_AQ_CONFIGURE_RSS		BIT(9)
+#define I40EVF_FLAG_AQ_CONFIGURE_RSS		BIT(9)	/* direct AQ config */
 #define I40EVF_FLAG_AQ_GET_CONFIG		BIT(10)
+/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */
+#define I40EVF_FLAG_AQ_GET_HENA			BIT(11)
+#define I40EVF_FLAG_AQ_SET_HENA			BIT(12)
+#define I40EVF_FLAG_AQ_SET_RSS_KEY		BIT(13)
+#define I40EVF_FLAG_AQ_SET_RSS_LUT		BIT(14)
+#define I40EVF_FLAG_AQ_REQUEST_PROMISC		BIT(15)
+#define I40EVF_FLAG_AQ_RELEASE_PROMISC		BIT(16)
+#define I40EVF_FLAG_AQ_REQUEST_ALLMULTI		BIT(17)
+#define I40EVF_FLAG_AQ_RELEASE_ALLMULTI		BIT(18)
 
 	/* OS defined structs */
 	struct net_device *netdev;
@@ -256,10 +259,18 @@
 	bool netdev_registered;
 	bool link_up;
 	enum i40e_virtchnl_ops current_op;
-#define CLIENT_ENABLED(_a) ((_a)->vf_res->vf_offload_flags & \
-			    I40E_VIRTCHNL_VF_OFFLOAD_IWARP)
+#define CLIENT_ENABLED(_a) ((_a)->vf_res ? \
+			    (_a)->vf_res->vf_offload_flags & \
+				I40E_VIRTCHNL_VF_OFFLOAD_IWARP : \
+			    0)
+/* RSS by the PF should be preferred over RSS via other methods. */
+#define RSS_PF(_a) ((_a)->vf_res->vf_offload_flags & \
+		    I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
 #define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \
 		    I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+#define RSS_REG(_a) (!((_a)->vf_res->vf_offload_flags & \
+		       (I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | \
+			I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)))
 #define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \
 			  I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
 	struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
@@ -271,11 +282,16 @@
 	struct i40e_eth_stats current_stats;
 	struct i40e_vsi vsi;
 	u32 aq_wait_count;
+	/* RSS stuff */
+	u64 hena;
+	u16 rss_key_size;
+	u16 rss_lut_size;
+	u8 *rss_key;
+	u8 *rss_lut;
 };
 
 
 /* Ethtool Private Flags */
-#define I40EVF_PRIV_FLAGS_PS		BIT(0)
 
 /* needed by i40evf_ethtool.c */
 extern char i40evf_driver_name[];
@@ -314,11 +330,12 @@
 void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags);
 void i40evf_request_stats(struct i40evf_adapter *adapter);
 void i40evf_request_reset(struct i40evf_adapter *adapter);
+void i40evf_get_hena(struct i40evf_adapter *adapter);
+void i40evf_set_hena(struct i40evf_adapter *adapter);
+void i40evf_set_rss_key(struct i40evf_adapter *adapter);
+void i40evf_set_rss_lut(struct i40evf_adapter *adapter);
 void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
 				enum i40e_virtchnl_ops v_opcode,
 				i40e_status v_retval, u8 *msg, u16 msglen);
-int i40evf_config_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut,
-		      u16 lut_size);
-int i40evf_get_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut,
-		   u16 lut_size);
+int i40evf_config_rss(struct i40evf_adapter *adapter);
 #endif /* _I40EVF_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index dd4430a..c9c202f6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -63,12 +63,6 @@
 #define I40EVF_STATS_LEN(_dev) \
 	(I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
 
-static const char i40evf_priv_flags_strings[][ETH_GSTRING_LEN] = {
-	"packet-split",
-};
-
-#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_priv_flags_strings)
-
 /**
  * i40evf_get_settings - Get Link Speed and Duplex settings
  * @netdev: network interface device structure
@@ -103,8 +97,6 @@
 {
 	if (sset == ETH_SS_STATS)
 		return I40EVF_STATS_LEN(netdev);
-	else if (sset == ETH_SS_PRIV_FLAGS)
-		return I40EVF_PRIV_FLAGS_STR_LEN;
 	else
 		return -EINVAL;
 }
@@ -170,12 +162,6 @@
 			snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
 			p += ETH_GSTRING_LEN;
 		}
-	} else if (sset == ETH_SS_PRIV_FLAGS) {
-		for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
-			memcpy(data, i40evf_priv_flags_strings[i],
-			       ETH_GSTRING_LEN);
-			data += ETH_GSTRING_LEN;
-		}
 	}
 }
 
@@ -225,7 +211,6 @@
 	strlcpy(drvinfo->version, i40evf_driver_version, 32);
 	strlcpy(drvinfo->fw_version, "N/A", 4);
 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
-	drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN;
 }
 
 /**
@@ -378,63 +363,6 @@
 }
 
 /**
- * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
- * @adapter: board private structure
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the flow is supported, else Invalid Input.
- **/
-static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
-				    struct ethtool_rxnfc *cmd)
-{
-	struct i40e_hw *hw = &adapter->hw;
-	u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
-		   ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
-
-	/* We always hash on IP src and dest addresses */
-	cmd->data = RXH_IP_SRC | RXH_IP_DST;
-
-	switch (cmd->flow_type) {
-	case TCP_V4_FLOW:
-		if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
-			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
-		break;
-	case UDP_V4_FLOW:
-		if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
-			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
-		break;
-
-	case SCTP_V4_FLOW:
-	case AH_ESP_V4_FLOW:
-	case AH_V4_FLOW:
-	case ESP_V4_FLOW:
-	case IPV4_FLOW:
-		break;
-
-	case TCP_V6_FLOW:
-		if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
-			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
-		break;
-	case UDP_V6_FLOW:
-		if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
-			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
-		break;
-
-	case SCTP_V6_FLOW:
-	case AH_ESP_V6_FLOW:
-	case AH_V6_FLOW:
-	case ESP_V6_FLOW:
-	case IPV6_FLOW:
-		break;
-	default:
-		cmd->data = 0;
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-/**
  * i40evf_get_rxnfc - command to get RX flow classification rules
  * @netdev: network interface device structure
  * @cmd: ethtool rxnfc command
@@ -454,7 +382,8 @@
 		ret = 0;
 		break;
 	case ETHTOOL_GRXFH:
-		ret = i40evf_get_rss_hash_opts(adapter, cmd);
+		netdev_info(netdev,
+			    "RSS hash info is not available to vf, use pf.\n");
 		break;
 	default:
 		break;
@@ -462,145 +391,6 @@
 
 	return ret;
 }
-
-/**
- * i40evf_set_rss_hash_opt - Enable/Disable flow types for RSS hash
- * @adapter: board private structure
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the flow input set is supported.
- **/
-static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
-				   struct ethtool_rxnfc *nfc)
-{
-	struct i40e_hw *hw = &adapter->hw;
-	u32 flags = adapter->vf_res->vf_offload_flags;
-
-	u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
-		   ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
-
-	/* RSS does not support anything other than hashing
-	 * to queues on src and dst IPs and ports
-	 */
-	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
-			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
-		return -EINVAL;
-
-	/* We need at least the IP SRC and DEST fields for hashing */
-	if (!(nfc->data & RXH_IP_SRC) ||
-	    !(nfc->data & RXH_IP_DST))
-		return -EINVAL;
-
-	switch (nfc->flow_type) {
-	case TCP_V4_FLOW:
-		if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
-			if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
-				hena |=
-			   BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
-
-			hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
-		} else {
-			return -EINVAL;
-		}
-		break;
-	case TCP_V6_FLOW:
-		if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
-			if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
-				hena |=
-			   BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
-
-			hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
-		} else {
-			return -EINVAL;
-		}
-		break;
-	case UDP_V4_FLOW:
-		if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
-			if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
-				hena |=
-			    BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
-			    BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
-
-			hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-				 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
-		} else {
-			return -EINVAL;
-		}
-		break;
-	case UDP_V6_FLOW:
-		if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
-			if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
-				hena |=
-			    BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
-			    BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
-
-			hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-				 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
-		} else {
-			return -EINVAL;
-		}
-		break;
-	case AH_ESP_V4_FLOW:
-	case AH_V4_FLOW:
-	case ESP_V4_FLOW:
-	case SCTP_V4_FLOW:
-		if ((nfc->data & RXH_L4_B_0_1) ||
-		    (nfc->data & RXH_L4_B_2_3))
-			return -EINVAL;
-		hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
-		break;
-	case AH_ESP_V6_FLOW:
-	case AH_V6_FLOW:
-	case ESP_V6_FLOW:
-	case SCTP_V6_FLOW:
-		if ((nfc->data & RXH_L4_B_0_1) ||
-		    (nfc->data & RXH_L4_B_2_3))
-			return -EINVAL;
-		hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
-		break;
-	case IPV4_FLOW:
-		hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
-			 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
-		break;
-	case IPV6_FLOW:
-		hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
-			 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
-	wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
-	i40e_flush(hw);
-
-	return 0;
-}
-
-/**
- * i40evf_set_rxnfc - command to set RX flow classification rules
- * @netdev: network interface device structure
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the command is supported.
- **/
-static int i40evf_set_rxnfc(struct net_device *netdev,
-			    struct ethtool_rxnfc *cmd)
-{
-	struct i40evf_adapter *adapter = netdev_priv(netdev);
-	int ret = -EOPNOTSUPP;
-
-	switch (cmd->cmd) {
-	case ETHTOOL_SRXFH:
-		ret = i40evf_set_rss_hash_opt(adapter, cmd);
-		break;
-	default:
-		break;
-	}
-
-	return ret;
-}
-
 /**
  * i40evf_get_channels: get the number of channels supported by the device
  * @netdev: network interface device structure
@@ -624,6 +414,19 @@
 }
 
 /**
+ * i40evf_get_rxfh_key_size - get the RSS hash key size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 i40evf_get_rxfh_key_size(struct net_device *netdev)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+	return adapter->rss_key_size;
+}
+
+/**
  * i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size
  * @netdev: network interface device structure
  *
@@ -631,7 +434,9 @@
  **/
 static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
 {
-	return (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+	return adapter->rss_lut_size;
 }
 
 /**
@@ -646,9 +451,6 @@
 			   u8 *hfunc)
 {
 	struct i40evf_adapter *adapter = netdev_priv(netdev);
-	struct i40e_vsi *vsi = &adapter->vsi;
-	u8 *seed = NULL, *lut;
-	int ret;
 	u16 i;
 
 	if (hfunc)
@@ -656,24 +458,13 @@
 	if (!indir)
 		return 0;
 
-	seed = key;
-
-	lut = kzalloc(I40EVF_HLUT_ARRAY_SIZE, GFP_KERNEL);
-	if (!lut)
-		return -ENOMEM;
-
-	ret = i40evf_get_rss(vsi, seed, lut, I40EVF_HLUT_ARRAY_SIZE);
-	if (ret)
-		goto out;
+	memcpy(key, adapter->rss_key, adapter->rss_key_size);
 
 	/* Each 32 bits pointed by 'indir' is stored with a lut entry */
-	for (i = 0; i < I40EVF_HLUT_ARRAY_SIZE; i++)
-		indir[i] = (u32)lut[i];
+	for (i = 0; i < adapter->rss_lut_size; i++)
+		indir[i] = (u32)adapter->rss_lut[i];
 
-out:
-	kfree(lut);
-
-	return ret;
+	return 0;
 }
 
 /**
@@ -689,8 +480,6 @@
 			   const u8 *key, const u8 hfunc)
 {
 	struct i40evf_adapter *adapter = netdev_priv(netdev);
-	struct i40e_vsi *vsi = &adapter->vsi;
-	u8 *seed = NULL;
 	u16 i;
 
 	/* We do not allow change in unsupported parameters */
@@ -701,76 +490,14 @@
 		return 0;
 
 	if (key) {
-		if (!vsi->rss_hkey_user) {
-			vsi->rss_hkey_user = kzalloc(I40EVF_HKEY_ARRAY_SIZE,
-						     GFP_KERNEL);
-			if (!vsi->rss_hkey_user)
-				return -ENOMEM;
-		}
-		memcpy(vsi->rss_hkey_user, key, I40EVF_HKEY_ARRAY_SIZE);
-		seed = vsi->rss_hkey_user;
-	}
-	if (!vsi->rss_lut_user) {
-		vsi->rss_lut_user = kzalloc(I40EVF_HLUT_ARRAY_SIZE,
-					    GFP_KERNEL);
-		if (!vsi->rss_lut_user)
-			return -ENOMEM;
+		memcpy(adapter->rss_key, key, adapter->rss_key_size);
 	}
 
 	/* Each 32 bits pointed by 'indir' is stored with a lut entry */
-	for (i = 0; i < I40EVF_HLUT_ARRAY_SIZE; i++)
-		vsi->rss_lut_user[i] = (u8)(indir[i]);
+	for (i = 0; i < adapter->rss_lut_size; i++)
+		adapter->rss_lut[i] = (u8)(indir[i]);
 
-	return i40evf_config_rss(vsi, seed, vsi->rss_lut_user,
-				 I40EVF_HLUT_ARRAY_SIZE);
-}
-
-/**
- * i40evf_get_priv_flags - report device private flags
- * @dev: network interface device structure
- *
- * The get string set count and the string set should be matched for each
- * flag returned.  Add new strings for each flag to the i40e_priv_flags_strings
- * array.
- *
- * Returns a u32 bitmap of flags.
- **/
-static u32 i40evf_get_priv_flags(struct net_device *dev)
-{
-	struct i40evf_adapter *adapter = netdev_priv(dev);
-	u32 ret_flags = 0;
-
-	ret_flags |= adapter->flags & I40EVF_FLAG_RX_PS_ENABLED ?
-		I40EVF_PRIV_FLAGS_PS : 0;
-
-	return ret_flags;
-}
-
-/**
- * i40evf_set_priv_flags - set private flags
- * @dev: network interface device structure
- * @flags: bit flags to be set
- **/
-static int i40evf_set_priv_flags(struct net_device *dev, u32 flags)
-{
-	struct i40evf_adapter *adapter = netdev_priv(dev);
-	bool reset_required = false;
-
-	if ((flags & I40EVF_PRIV_FLAGS_PS) &&
-	    !(adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) {
-		adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED;
-		reset_required = true;
-	} else if (!(flags & I40EVF_PRIV_FLAGS_PS) &&
-		   (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) {
-		adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
-		reset_required = true;
-	}
-
-	/* if needed, issue reset to cause things to take effect */
-	if (reset_required)
-		i40evf_schedule_reset(adapter);
-
-	return 0;
+	return i40evf_config_rss(adapter);
 }
 
 static const struct ethtool_ops i40evf_ethtool_ops = {
@@ -782,18 +509,16 @@
 	.get_strings		= i40evf_get_strings,
 	.get_ethtool_stats	= i40evf_get_ethtool_stats,
 	.get_sset_count		= i40evf_get_sset_count,
-	.get_priv_flags		= i40evf_get_priv_flags,
-	.set_priv_flags		= i40evf_set_priv_flags,
 	.get_msglevel		= i40evf_get_msglevel,
 	.set_msglevel		= i40evf_set_msglevel,
 	.get_coalesce		= i40evf_get_coalesce,
 	.set_coalesce		= i40evf_set_coalesce,
 	.get_rxnfc		= i40evf_get_rxnfc,
-	.set_rxnfc		= i40evf_set_rxnfc,
 	.get_rxfh_indir_size	= i40evf_get_rxfh_indir_size,
 	.get_rxfh		= i40evf_get_rxfh,
 	.set_rxfh		= i40evf_set_rxfh,
 	.get_channels		= i40evf_get_channels,
+	.get_rxfh_key_size	= i40evf_get_rxfh_key_size,
 };
 
 /**
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 4b70aae..642bb45 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -37,8 +37,8 @@
 #define DRV_KERN "-k"
 
 #define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 4
-#define DRV_VERSION_BUILD 15
+#define DRV_VERSION_MINOR 5
+#define DRV_VERSION_BUILD 10
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
 	     __stringify(DRV_VERSION_MINOR) "." \
 	     __stringify(DRV_VERSION_BUILD) \
@@ -641,28 +641,11 @@
 static void i40evf_configure_rx(struct i40evf_adapter *adapter)
 {
 	struct i40e_hw *hw = &adapter->hw;
-	struct net_device *netdev = adapter->netdev;
-	int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
 	int i;
-	int rx_buf_len;
-
-
-	/* Set the RX buffer length according to the mode */
-	if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED ||
-	    netdev->mtu <= ETH_DATA_LEN)
-		rx_buf_len = I40EVF_RXBUFFER_2048;
-	else
-		rx_buf_len = ALIGN(max_frame, 1024);
 
 	for (i = 0; i < adapter->num_active_queues; i++) {
 		adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
-		adapter->rx_rings[i].rx_buf_len = rx_buf_len;
-		if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
-			set_ring_ps_enabled(&adapter->rx_rings[i]);
-			adapter->rx_rings[i].rx_hdr_len = I40E_RX_HDR_SIZE;
-		} else {
-			clear_ring_ps_enabled(&adapter->rx_rings[i]);
-		}
+		adapter->rx_rings[i].rx_buf_len = I40EVF_RXBUFFER_2048;
 	}
 }
 
@@ -943,6 +926,21 @@
 bottom_of_search_loop:
 		continue;
 	}
+
+	if (netdev->flags & IFF_PROMISC &&
+	    !(adapter->flags & I40EVF_FLAG_PROMISC_ON))
+		adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_PROMISC;
+	else if (!(netdev->flags & IFF_PROMISC) &&
+		 adapter->flags & I40EVF_FLAG_PROMISC_ON)
+		adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC;
+
+	if (netdev->flags & IFF_ALLMULTI &&
+	    !(adapter->flags & I40EVF_FLAG_ALLMULTI_ON))
+		adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
+	else if (!(netdev->flags & IFF_ALLMULTI) &&
+		 adapter->flags & I40EVF_FLAG_ALLMULTI_ON)
+		adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI;
+
 	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 }
 
@@ -999,14 +997,7 @@
 	for (i = 0; i < adapter->num_active_queues; i++) {
 		struct i40e_ring *ring = &adapter->rx_rings[i];
 
-	if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
-		i40evf_alloc_rx_headers(ring);
-		i40evf_alloc_rx_buffers_ps(ring, ring->count);
-	} else {
-		i40evf_alloc_rx_buffers_1buf(ring, ring->count);
-	}
-		ring->next_to_use = ring->count - 1;
-		writel(ring->next_to_use, ring->tail);
+		i40evf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
 	}
 }
 
@@ -1224,24 +1215,18 @@
 }
 
 /**
- * i40e_config_rss_aq - Prepare for RSS using AQ commands
- * @vsi: vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
+ * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands
+ * @adapter: board private structure
  *
  * Return 0 on success, negative on failure
  **/
-static int i40evf_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
-				u8 *lut, u16 lut_size)
+static int i40evf_config_rss_aq(struct i40evf_adapter *adapter)
 {
-	struct i40evf_adapter *adapter = vsi->back;
+	struct i40e_aqc_get_set_rss_key_data *rss_key =
+		(struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key;
 	struct i40e_hw *hw = &adapter->hw;
 	int ret = 0;
 
-	if (!vsi->id)
-		return -EINVAL;
-
 	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
 		/* bail because we already have a command pending */
 		dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
@@ -1249,198 +1234,82 @@
 		return -EBUSY;
 	}
 
-	if (seed) {
-		struct i40e_aqc_get_set_rss_key_data *rss_key =
-			(struct i40e_aqc_get_set_rss_key_data *)seed;
-		ret = i40evf_aq_set_rss_key(hw, vsi->id, rss_key);
-		if (ret) {
-			dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
-				i40evf_stat_str(hw, ret),
-				i40evf_aq_str(hw, hw->aq.asq_last_status));
-			return ret;
-		}
+	ret = i40evf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
+	if (ret) {
+		dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
+			i40evf_stat_str(hw, ret),
+			i40evf_aq_str(hw, hw->aq.asq_last_status));
+		return ret;
+
 	}
 
-	if (lut) {
-		ret = i40evf_aq_set_rss_lut(hw, vsi->id, false, lut, lut_size);
-		if (ret) {
-			dev_err(&adapter->pdev->dev,
-				"Cannot set RSS lut, err %s aq_err %s\n",
-				i40evf_stat_str(hw, ret),
-				i40evf_aq_str(hw, hw->aq.asq_last_status));
-			return ret;
-		}
+	ret = i40evf_aq_set_rss_lut(hw, adapter->vsi.id, false,
+				    adapter->rss_lut, adapter->rss_lut_size);
+	if (ret) {
+		dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
+			i40evf_stat_str(hw, ret),
+			i40evf_aq_str(hw, hw->aq.asq_last_status));
 	}
 
 	return ret;
+
 }
 
 /**
  * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
+ * @adapter: board private structure
  *
  * Returns 0 on success, negative on failure
  **/
-static int i40evf_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
-				 const u8 *lut, u16 lut_size)
+static int i40evf_config_rss_reg(struct i40evf_adapter *adapter)
 {
-	struct i40evf_adapter *adapter = vsi->back;
 	struct i40e_hw *hw = &adapter->hw;
+	u32 *dw;
 	u16 i;
 
-	if (seed) {
-		u32 *seed_dw = (u32 *)seed;
+	dw = (u32 *)adapter->rss_key;
+	for (i = 0; i <= adapter->rss_key_size / 4; i++)
+		wr32(hw, I40E_VFQF_HKEY(i), dw[i]);
 
-		for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
-			wr32(hw, I40E_VFQF_HKEY(i), seed_dw[i]);
-	}
+	dw = (u32 *)adapter->rss_lut;
+	for (i = 0; i <= adapter->rss_lut_size / 4; i++)
+		wr32(hw, I40E_VFQF_HLUT(i), dw[i]);
 
-	if (lut) {
-		u32 *lut_dw = (u32 *)lut;
-
-		if (lut_size != I40EVF_HLUT_ARRAY_SIZE)
-			return -EINVAL;
-
-		for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
-			wr32(hw, I40E_VFQF_HLUT(i), lut_dw[i]);
-	}
 	i40e_flush(hw);
 
 	return 0;
 }
 
 /**
- *  * i40evf_get_rss_aq - Get RSS keys and lut by using AQ commands
- *  @vsi: Pointer to vsi structure
- *  @seed: RSS hash seed
- *  @lut: Lookup table
- *  @lut_size: Lookup table size
- *
- *  Return 0 on success, negative on failure
- **/
-static int i40evf_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
-			     u8 *lut, u16 lut_size)
-{
-	struct i40evf_adapter *adapter = vsi->back;
-	struct i40e_hw *hw = &adapter->hw;
-	int ret = 0;
-
-	if (seed) {
-		ret = i40evf_aq_get_rss_key(hw, vsi->id,
-			(struct i40e_aqc_get_set_rss_key_data *)seed);
-		if (ret) {
-			dev_err(&adapter->pdev->dev,
-				"Cannot get RSS key, err %s aq_err %s\n",
-				i40evf_stat_str(hw, ret),
-				i40evf_aq_str(hw, hw->aq.asq_last_status));
-			return ret;
-		}
-	}
-
-	if (lut) {
-		ret = i40evf_aq_get_rss_lut(hw, vsi->id, seed, lut, lut_size);
-		if (ret) {
-			dev_err(&adapter->pdev->dev,
-				"Cannot get RSS lut, err %s aq_err %s\n",
-				i40evf_stat_str(hw, ret),
-				i40evf_aq_str(hw, hw->aq.asq_last_status));
-			return ret;
-		}
-	}
-
-	return ret;
-}
-
-/**
- *  * i40evf_get_rss_reg - Get RSS keys and lut by reading registers
- *  @vsi: Pointer to vsi structure
- *  @seed: RSS hash seed
- *  @lut: Lookup table
- *  @lut_size: Lookup table size
- *
- *  Returns 0 on success, negative on failure
- **/
-static int i40evf_get_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
-			      const u8 *lut, u16 lut_size)
-{
-	struct i40evf_adapter *adapter = vsi->back;
-	struct i40e_hw *hw = &adapter->hw;
-	u16 i;
-
-	if (seed) {
-		u32 *seed_dw = (u32 *)seed;
-
-		for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
-			seed_dw[i] = rd32(hw, I40E_VFQF_HKEY(i));
-	}
-
-	if (lut) {
-		u32 *lut_dw = (u32 *)lut;
-
-		if (lut_size != I40EVF_HLUT_ARRAY_SIZE)
-			return -EINVAL;
-
-		for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
-			lut_dw[i] = rd32(hw, I40E_VFQF_HLUT(i));
-	}
-
-	return 0;
-}
-
-/**
  * i40evf_config_rss - Configure RSS keys and lut
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
+ * @adapter: board private structure
  *
  * Returns 0 on success, negative on failure
  **/
-int i40evf_config_rss(struct i40e_vsi *vsi, const u8 *seed,
-		      u8 *lut, u16 lut_size)
+int i40evf_config_rss(struct i40evf_adapter *adapter)
 {
-	struct i40evf_adapter *adapter = vsi->back;
 
-	if (RSS_AQ(adapter))
-		return i40evf_config_rss_aq(vsi, seed, lut, lut_size);
-	else
-		return i40evf_config_rss_reg(vsi, seed, lut, lut_size);
-}
-
-/**
- * i40evf_get_rss - Get RSS keys and lut
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
- *
- * Returns 0 on success, negative on failure
- **/
-int i40evf_get_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, u16 lut_size)
-{
-	struct i40evf_adapter *adapter = vsi->back;
-
-	if (RSS_AQ(adapter))
-		return i40evf_get_rss_aq(vsi, seed, lut, lut_size);
-	else
-		return i40evf_get_rss_reg(vsi, seed, lut, lut_size);
+	if (RSS_PF(adapter)) {
+		adapter->aq_required |= I40EVF_FLAG_AQ_SET_RSS_LUT |
+					I40EVF_FLAG_AQ_SET_RSS_KEY;
+		return 0;
+	} else if (RSS_AQ(adapter)) {
+		return i40evf_config_rss_aq(adapter);
+	} else {
+		return i40evf_config_rss_reg(adapter);
+	}
 }
 
 /**
  * i40evf_fill_rss_lut - Fill the lut with default values
- * @lut: Lookup table to be filled with
- * @rss_table_size: Lookup table size
- * @rss_size: Range of queue number for hashing
+ * @adapter: board private structure
  **/
-static void i40evf_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
+static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter)
 {
 	u16 i;
 
-	for (i = 0; i < rss_table_size; i++)
-		lut[i] = i % rss_size;
+	for (i = 0; i < adapter->rss_lut_size; i++)
+		adapter->rss_lut[i] = i % adapter->num_active_queues;
 }
 
 /**
@@ -1451,42 +1320,25 @@
  **/
 static int i40evf_init_rss(struct i40evf_adapter *adapter)
 {
-	struct i40e_vsi *vsi = &adapter->vsi;
 	struct i40e_hw *hw = &adapter->hw;
-	u8 seed[I40EVF_HKEY_ARRAY_SIZE];
-	u64 hena;
-	u8 *lut;
 	int ret;
 
-	/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
-	if (adapter->vf_res->vf_offload_flags &
-					I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
-		hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
-	else
-		hena = I40E_DEFAULT_RSS_HENA;
-	wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
-	wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+	if (!RSS_PF(adapter)) {
+		/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
+		if (adapter->vf_res->vf_offload_flags &
+		    I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+			adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
+		else
+			adapter->hena = I40E_DEFAULT_RSS_HENA;
 
-	lut = kzalloc(I40EVF_HLUT_ARRAY_SIZE, GFP_KERNEL);
-	if (!lut)
-		return -ENOMEM;
+		wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena);
+		wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32));
+	}
 
-	/* Use user configured lut if there is one, otherwise use default */
-	if (vsi->rss_lut_user)
-		memcpy(lut, vsi->rss_lut_user, I40EVF_HLUT_ARRAY_SIZE);
-	else
-		i40evf_fill_rss_lut(lut, I40EVF_HLUT_ARRAY_SIZE,
-				    adapter->num_active_queues);
+	i40evf_fill_rss_lut(adapter);
 
-	/* Use user configured hash key if there is one, otherwise
-	 * user default.
-	 */
-	if (vsi->rss_hkey_user)
-		memcpy(seed, vsi->rss_hkey_user, I40EVF_HKEY_ARRAY_SIZE);
-	else
-		netdev_rss_key_fill((void *)seed, I40EVF_HKEY_ARRAY_SIZE);
-	ret = i40evf_config_rss(vsi, seed, lut, I40EVF_HLUT_ARRAY_SIZE);
-	kfree(lut);
+	netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
+	ret = i40evf_config_rss(adapter);
 
 	return ret;
 }
@@ -1507,7 +1359,7 @@
 	adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
 				     GFP_KERNEL);
 	if (!adapter->q_vectors)
-		goto err_out;
+		return -ENOMEM;
 
 	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
 		q_vector = &adapter->q_vectors[q_idx];
@@ -1519,15 +1371,6 @@
 	}
 
 	return 0;
-
-err_out:
-	while (q_idx) {
-		q_idx--;
-		q_vector = &adapter->q_vectors[q_idx];
-		netif_napi_del(&q_vector->napi);
-	}
-	kfree(adapter->q_vectors);
-	return -ENOMEM;
 }
 
 /**
@@ -1610,19 +1453,16 @@
 }
 
 /**
- * i40evf_clear_rss_config_user - Clear user configurations of RSS
- * @vsi: Pointer to VSI structure
+ * i40evf_free_rss - Free memory used by RSS structs
+ * @adapter: board private structure
  **/
-static void i40evf_clear_rss_config_user(struct i40e_vsi *vsi)
+static void i40evf_free_rss(struct i40evf_adapter *adapter)
 {
-	if (!vsi)
-		return;
+	kfree(adapter->rss_key);
+	adapter->rss_key = NULL;
 
-	kfree(vsi->rss_hkey_user);
-	vsi->rss_hkey_user = NULL;
-
-	kfree(vsi->rss_lut_user);
-	vsi->rss_lut_user = NULL;
+	kfree(adapter->rss_lut);
+	adapter->rss_lut = NULL;
 }
 
 /**
@@ -1756,6 +1596,39 @@
 		adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS;
 		goto watchdog_done;
 	}
+	if (adapter->aq_required & I40EVF_FLAG_AQ_GET_HENA) {
+		i40evf_get_hena(adapter);
+		goto watchdog_done;
+	}
+	if (adapter->aq_required & I40EVF_FLAG_AQ_SET_HENA) {
+		i40evf_set_hena(adapter);
+		goto watchdog_done;
+	}
+	if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_KEY) {
+		i40evf_set_rss_key(adapter);
+		goto watchdog_done;
+	}
+	if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_LUT) {
+		i40evf_set_rss_lut(adapter);
+		goto watchdog_done;
+	}
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) {
+		i40evf_set_promiscuous(adapter, I40E_FLAG_VF_UNICAST_PROMISC |
+				       I40E_FLAG_VF_MULTICAST_PROMISC);
+		goto watchdog_done;
+	}
+
+	if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) {
+		i40evf_set_promiscuous(adapter, I40E_FLAG_VF_MULTICAST_PROMISC);
+		goto watchdog_done;
+	}
+
+	if ((adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) &&
+	    (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_ALLMULTI)) {
+		i40evf_set_promiscuous(adapter, 0);
+		goto watchdog_done;
+	}
 
 	if (adapter->state == __I40EVF_RUNNING)
 		i40evf_request_stats(adapter);
@@ -2003,6 +1876,8 @@
 
 	/* check for error indications */
 	val = rd32(hw, hw->aq.arq.len);
+	if (val == 0xdeadbeef) /* indicates device in reset */
+		goto freedom;
 	oldval = val;
 	if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) {
 		dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
@@ -2259,6 +2134,28 @@
 	return 0;
 }
 
+#define I40EVF_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_TX |\
+			      NETIF_F_HW_VLAN_CTAG_RX |\
+			      NETIF_F_HW_VLAN_CTAG_FILTER)
+
+/**
+ * i40evf_fix_features - fix up the netdev feature bits
+ * @netdev: our net device
+ * @features: desired feature bits
+ *
+ * Returns fixed-up features bits
+ **/
+static netdev_features_t i40evf_fix_features(struct net_device *netdev,
+					     netdev_features_t features)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+	features &= ~I40EVF_VLAN_FEATURES;
+	if (adapter->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
+		features |= I40EVF_VLAN_FEATURES;
+	return features;
+}
+
 static const struct net_device_ops i40evf_netdev_ops = {
 	.ndo_open		= i40evf_open,
 	.ndo_stop		= i40evf_close,
@@ -2271,6 +2168,7 @@
 	.ndo_tx_timeout		= i40evf_tx_timeout,
 	.ndo_vlan_rx_add_vid	= i40evf_vlan_rx_add_vid,
 	.ndo_vlan_rx_kill_vid	= i40evf_vlan_rx_kill_vid,
+	.ndo_fix_features	= i40evf_fix_features,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= i40evf_netpoll,
 #endif
@@ -2307,57 +2205,61 @@
  **/
 int i40evf_process_config(struct i40evf_adapter *adapter)
 {
+	struct i40e_virtchnl_vf_resource *vfres = adapter->vf_res;
 	struct net_device *netdev = adapter->netdev;
+	struct i40e_vsi *vsi = &adapter->vsi;
 	int i;
 
 	/* got VF config message back from PF, now we can parse it */
-	for (i = 0; i < adapter->vf_res->num_vsis; i++) {
-		if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
-			adapter->vsi_res = &adapter->vf_res->vsi_res[i];
+	for (i = 0; i < vfres->num_vsis; i++) {
+		if (vfres->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+			adapter->vsi_res = &vfres->vsi_res[i];
 	}
 	if (!adapter->vsi_res) {
 		dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
 		return -ENODEV;
 	}
 
-	if (adapter->vf_res->vf_offload_flags
-	    & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
-		netdev->vlan_features = netdev->features &
-					~(NETIF_F_HW_VLAN_CTAG_TX |
-					  NETIF_F_HW_VLAN_CTAG_RX |
-					  NETIF_F_HW_VLAN_CTAG_FILTER);
-		netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
-				    NETIF_F_HW_VLAN_CTAG_RX |
-				    NETIF_F_HW_VLAN_CTAG_FILTER;
-	}
-	netdev->features |= NETIF_F_HIGHDMA |
-			    NETIF_F_SG |
-			    NETIF_F_IP_CSUM |
-			    NETIF_F_SCTP_CRC |
-			    NETIF_F_IPV6_CSUM |
-			    NETIF_F_TSO |
-			    NETIF_F_TSO6 |
-			    NETIF_F_TSO_ECN |
-			    NETIF_F_GSO_GRE	       |
-			    NETIF_F_GSO_UDP_TUNNEL |
-			    NETIF_F_RXCSUM |
-			    NETIF_F_GRO;
+	netdev->hw_enc_features |= NETIF_F_SG			|
+				   NETIF_F_IP_CSUM		|
+				   NETIF_F_IPV6_CSUM		|
+				   NETIF_F_HIGHDMA		|
+				   NETIF_F_SOFT_FEATURES	|
+				   NETIF_F_TSO			|
+				   NETIF_F_TSO_ECN		|
+				   NETIF_F_TSO6			|
+				   NETIF_F_GSO_GRE		|
+				   NETIF_F_GSO_GRE_CSUM		|
+				   NETIF_F_GSO_IPIP		|
+				   NETIF_F_GSO_SIT		|
+				   NETIF_F_GSO_UDP_TUNNEL	|
+				   NETIF_F_GSO_UDP_TUNNEL_CSUM	|
+				   NETIF_F_GSO_PARTIAL		|
+				   NETIF_F_SCTP_CRC		|
+				   NETIF_F_RXHASH		|
+				   NETIF_F_RXCSUM		|
+				   0;
 
-	netdev->hw_enc_features |= NETIF_F_IP_CSUM	       |
-				   NETIF_F_IPV6_CSUM	       |
-				   NETIF_F_TSO		       |
-				   NETIF_F_TSO6		       |
-				   NETIF_F_TSO_ECN	       |
-				   NETIF_F_GSO_GRE	       |
-				   NETIF_F_GSO_UDP_TUNNEL      |
-				   NETIF_F_GSO_UDP_TUNNEL_CSUM;
+	if (!(adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE))
+		netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
 
-	if (adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE)
-		netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+	netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
 
-	/* copy netdev features into list of user selectable features */
-	netdev->hw_features |= netdev->features;
-	netdev->hw_features &= ~NETIF_F_RXCSUM;
+	/* record features VLANs can make use of */
+	netdev->vlan_features |= netdev->hw_enc_features |
+				 NETIF_F_TSO_MANGLEID;
+
+	/* Write features and hw_features separately to avoid polluting
+	 * with, or dropping, features that are set when we registgered.
+	 */
+	netdev->hw_features |= netdev->hw_enc_features;
+
+	netdev->features |= netdev->hw_enc_features | I40EVF_VLAN_FEATURES;
+	netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
+
+	/* disable VLAN features if not supported */
+	if (!(vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN))
+		netdev->features ^= I40EVF_VLAN_FEATURES;
 
 	adapter->vsi.id = adapter->vsi_res->vsi_id;
 
@@ -2368,8 +2270,16 @@
 				       ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
 	adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
 				       ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
-	adapter->vsi.netdev = adapter->netdev;
-	adapter->vsi.qs_handle = adapter->vsi_res->qset_handle;
+	vsi->netdev = adapter->netdev;
+	vsi->qs_handle = adapter->vsi_res->qset_handle;
+	if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+		adapter->rss_key_size = vfres->rss_key_size;
+		adapter->rss_lut_size = vfres->rss_lut_size;
+	} else {
+		adapter->rss_key_size = I40EVF_HKEY_ARRAY_SIZE;
+		adapter->rss_lut_size = I40EVF_HLUT_ARRAY_SIZE;
+	}
+
 	return 0;
 }
 
@@ -2502,11 +2412,6 @@
 	adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
 
 	adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
-	adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE;
-	adapter->flags |= I40EVF_FLAG_RX_PS_CAPABLE;
-
-	/* Default to single buffer rx, can be changed through ethtool. */
-	adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
 
 	netdev->netdev_ops = &i40evf_netdev_ops;
 	i40evf_set_ethtool_ops(netdev);
@@ -2565,6 +2470,11 @@
 	set_bit(__I40E_DOWN, &adapter->vsi.state);
 	i40evf_misc_irq_enable(adapter);
 
+	adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
+	adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
+	if (!adapter->rss_key || !adapter->rss_lut)
+		goto err_mem;
+
 	if (RSS_AQ(adapter)) {
 		adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
 		mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
@@ -2575,7 +2485,8 @@
 restart:
 	schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
 	return;
-
+err_mem:
+	i40evf_free_rss(adapter);
 err_register:
 	i40evf_free_misc_irq(adapter);
 err_sw_init:
@@ -2838,11 +2749,11 @@
 	adapter->state = __I40EVF_REMOVE;
 	adapter->aq_required = 0;
 	i40evf_request_reset(adapter);
-	msleep(20);
+	msleep(50);
 	/* If the FW isn't responding, kick it once, but only once. */
 	if (!i40evf_asq_done(hw)) {
 		i40evf_request_reset(adapter);
-		msleep(20);
+		msleep(50);
 	}
 
 	if (adapter->msix_entries) {
@@ -2857,8 +2768,7 @@
 
 	flush_scheduled_work();
 
-	/* Clear user configurations for RSS */
-	i40evf_clear_rss_config_user(&adapter->vsi);
+	i40evf_free_rss(adapter);
 
 	if (hw->aq.asq.count)
 		i40evf_shutdown_adminq(hw);
@@ -2869,7 +2779,6 @@
 
 	iounmap(hw->hw_addr);
 	pci_release_regions(pdev);
-
 	i40evf_free_all_tx_resources(adapter);
 	i40evf_free_all_rx_resources(adapter);
 	i40evf_free_queues(adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 488e738..f134456 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -270,10 +270,6 @@
 		vqpi->rxq.max_pkt_size = adapter->netdev->mtu
 					+ ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
 		vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
-		if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
-			vqpi->rxq.splithdr_enabled = true;
-			vqpi->rxq.hdr_size = I40E_RX_HDR_SIZE;
-		}
 		vqpi++;
 	}
 
@@ -645,6 +641,7 @@
 void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
 {
 	struct i40e_virtchnl_promisc_info vpi;
+	int promisc_all;
 
 	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
 		/* bail because we already have a command pending */
@@ -652,6 +649,27 @@
 			adapter->current_op);
 		return;
 	}
+
+	promisc_all = I40E_FLAG_VF_UNICAST_PROMISC |
+		      I40E_FLAG_VF_MULTICAST_PROMISC;
+	if ((flags & promisc_all) == promisc_all) {
+		adapter->flags |= I40EVF_FLAG_PROMISC_ON;
+		adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC;
+		dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
+	}
+
+	if (flags & I40E_FLAG_VF_MULTICAST_PROMISC) {
+		adapter->flags |= I40EVF_FLAG_ALLMULTI_ON;
+		adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
+		dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
+	}
+
+	if (!flags) {
+		adapter->flags &= ~I40EVF_FLAG_PROMISC_ON;
+		adapter->aq_required &= ~I40EVF_FLAG_AQ_RELEASE_PROMISC;
+		dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
+	}
+
 	adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
 	vpi.vsi_id = adapter->vsi_res->vsi_id;
 	vpi.flags = flags;
@@ -681,6 +699,115 @@
 		/* if the request failed, don't lock out others */
 		adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
 }
+
+/**
+ * i40evf_get_hena
+ * @adapter: adapter structure
+ *
+ * Request hash enable capabilities from PF
+ **/
+void i40evf_get_hena(struct i40evf_adapter *adapter)
+{
+	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
+			adapter->current_op);
+		return;
+	}
+	adapter->current_op = I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA;
+	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS,
+			   NULL, 0);
+}
+
+/**
+ * i40evf_set_hena
+ * @adapter: adapter structure
+ *
+ * Request the PF to set our RSS hash capabilities
+ **/
+void i40evf_set_hena(struct i40evf_adapter *adapter)
+{
+	struct i40e_virtchnl_rss_hena vrh;
+
+	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
+			adapter->current_op);
+		return;
+	}
+	vrh.hena = adapter->hena;
+	adapter->current_op = I40E_VIRTCHNL_OP_SET_RSS_HENA;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA;
+	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_SET_RSS_HENA,
+			   (u8 *)&vrh, sizeof(vrh));
+}
+
+/**
+ * i40evf_set_rss_key
+ * @adapter: adapter structure
+ *
+ * Request the PF to set our RSS hash key
+ **/
+void i40evf_set_rss_key(struct i40evf_adapter *adapter)
+{
+	struct i40e_virtchnl_rss_key *vrk;
+	int len;
+
+	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
+			adapter->current_op);
+		return;
+	}
+	len = sizeof(struct i40e_virtchnl_rss_key) +
+	      (adapter->rss_key_size * sizeof(u8)) - 1;
+	vrk = kzalloc(len, GFP_KERNEL);
+	if (!vrk)
+		return;
+	vrk->vsi_id = adapter->vsi.id;
+	vrk->key_len = adapter->rss_key_size;
+	memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
+
+	adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_KEY;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY;
+	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+			   (u8 *)vrk, len);
+	kfree(vrk);
+}
+
+/**
+ * i40evf_set_rss_lut
+ * @adapter: adapter structure
+ *
+ * Request the PF to set our RSS lookup table
+ **/
+void i40evf_set_rss_lut(struct i40evf_adapter *adapter)
+{
+	struct i40e_virtchnl_rss_lut *vrl;
+	int len;
+
+	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
+			adapter->current_op);
+		return;
+	}
+	len = sizeof(struct i40e_virtchnl_rss_lut) +
+	      (adapter->rss_lut_size * sizeof(u8)) - 1;
+	vrl = kzalloc(len, GFP_KERNEL);
+	if (!vrl)
+		return;
+	vrl->vsi_id = adapter->vsi.id;
+	vrl->lut_entries = adapter->rss_lut_size;
+	memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
+	adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_LUT;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT;
+	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+			   (u8 *)vrl, len);
+	kfree(vrl);
+}
+
 /**
  * i40evf_request_reset
  * @adapter: adapter structure
@@ -820,6 +947,16 @@
 		if (v_opcode != adapter->current_op)
 			return;
 		break;
+	case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
+		struct i40e_virtchnl_rss_hena *vrh =
+			(struct i40e_virtchnl_rss_hena *)msg;
+		if (msglen == sizeof(*vrh))
+			adapter->hena = vrh->hena;
+		else
+			dev_warn(&adapter->pdev->dev,
+				 "Invalid message %d from PF\n", v_opcode);
+		}
+		break;
 	default:
 		if (v_opcode != adapter->current_op)
 			dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index a23aa67..a61447f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -361,7 +361,7 @@
 	if (size > 15)
 		size = 15;
 
-	nvm->word_size = 1 << size;
+	nvm->word_size = BIT(size);
 	nvm->opcode_bits = 8;
 	nvm->delay_usec = 1;
 
@@ -380,7 +380,7 @@
 				    16 : 8;
 		break;
 	}
-	if (nvm->word_size == (1 << 15))
+	if (nvm->word_size == BIT(15))
 		nvm->page_size = 128;
 
 	nvm->type = e1000_nvm_eeprom_spi;
@@ -391,7 +391,7 @@
 	nvm->ops.write = igb_write_nvm_spi;
 	nvm->ops.validate = igb_validate_nvm_checksum;
 	nvm->ops.update = igb_update_nvm_checksum;
-	if (nvm->word_size < (1 << 15))
+	if (nvm->word_size < BIT(15))
 		nvm->ops.read = igb_read_nvm_eerd;
 	else
 		nvm->ops.read = igb_read_nvm_spi;
@@ -2107,7 +2107,7 @@
 		/* The PF can spoof - it has to in order to
 		 * support emulation mode NICs
 		 */
-		reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
+		reg_val ^= (BIT(pf) | BIT(pf + MAX_NUM_VFS));
 	} else {
 		reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
 			     E1000_DTXSWC_VLAN_SPOOF_MASK);
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index de8805a..199ff98 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -168,16 +168,16 @@
 #define E1000_DCA_CTRL_DCA_MODE_CB2     0x02 /* DCA Mode CB2 */
 
 #define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
-#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
-#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
-#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
-#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
+#define E1000_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */
+#define E1000_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */
+#define E1000_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */
+#define E1000_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */
 
 #define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
-#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
-#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
-#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
-#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+#define E1000_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */
+#define E1000_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */
+#define E1000_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */
 
 /* Additional DCA related definitions, note change in position of CPUID */
 #define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
@@ -186,8 +186,8 @@
 #define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */
 
 /* ETQF register bit definitions */
-#define E1000_ETQF_FILTER_ENABLE   (1 << 26)
-#define E1000_ETQF_1588            (1 << 30)
+#define E1000_ETQF_FILTER_ENABLE   BIT(26)
+#define E1000_ETQF_1588            BIT(30)
 
 /* FTQF register bit definitions */
 #define E1000_FTQF_VF_BP               0x00008000
@@ -203,16 +203,16 @@
 #define E1000_DTXSWC_VLAN_SPOOF_MASK  0x0000FF00 /* Per VF VLAN spoof control */
 #define E1000_DTXSWC_LLE_MASK         0x00FF0000 /* Per VF Local LB enables */
 #define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
-#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31)  /* global VF LB enable */
+#define E1000_DTXSWC_VMDQ_LOOPBACK_EN BIT(31)  /* global VF LB enable */
 
 /* Easy defines for setting default pool, would normally be left a zero */
 #define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
 #define E1000_VT_CTL_DEFAULT_POOL_MASK  (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
 
 /* Other useful VMD_CTL register defines */
-#define E1000_VT_CTL_IGNORE_MAC         (1 << 28)
-#define E1000_VT_CTL_DISABLE_DEF_POOL   (1 << 29)
-#define E1000_VT_CTL_VM_REPL_EN         (1 << 30)
+#define E1000_VT_CTL_IGNORE_MAC         BIT(28)
+#define E1000_VT_CTL_DISABLE_DEF_POOL   BIT(29)
+#define E1000_VT_CTL_VM_REPL_EN         BIT(30)
 
 /* Per VM Offload register setup */
 #define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
@@ -252,7 +252,7 @@
 #define E1000_DTXCTL_MDP_EN     0x0020
 #define E1000_DTXCTL_SPOOF_INT  0x0040
 
-#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT	(1 << 14)
+#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT	BIT(14)
 
 #define ALL_QUEUES   0xFFFF
 
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index e9f23ee..2997c44 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -530,65 +530,65 @@
 
 /* Time Sync Interrupt Cause/Mask Register Bits */
 
-#define TSINTR_SYS_WRAP  (1 << 0) /* SYSTIM Wrap around. */
-#define TSINTR_TXTS      (1 << 1) /* Transmit Timestamp. */
-#define TSINTR_RXTS      (1 << 2) /* Receive Timestamp. */
-#define TSINTR_TT0       (1 << 3) /* Target Time 0 Trigger. */
-#define TSINTR_TT1       (1 << 4) /* Target Time 1 Trigger. */
-#define TSINTR_AUTT0     (1 << 5) /* Auxiliary Timestamp 0 Taken. */
-#define TSINTR_AUTT1     (1 << 6) /* Auxiliary Timestamp 1 Taken. */
-#define TSINTR_TADJ      (1 << 7) /* Time Adjust Done. */
+#define TSINTR_SYS_WRAP  BIT(0) /* SYSTIM Wrap around. */
+#define TSINTR_TXTS      BIT(1) /* Transmit Timestamp. */
+#define TSINTR_RXTS      BIT(2) /* Receive Timestamp. */
+#define TSINTR_TT0       BIT(3) /* Target Time 0 Trigger. */
+#define TSINTR_TT1       BIT(4) /* Target Time 1 Trigger. */
+#define TSINTR_AUTT0     BIT(5) /* Auxiliary Timestamp 0 Taken. */
+#define TSINTR_AUTT1     BIT(6) /* Auxiliary Timestamp 1 Taken. */
+#define TSINTR_TADJ      BIT(7) /* Time Adjust Done. */
 
 #define TSYNC_INTERRUPTS TSINTR_TXTS
 #define E1000_TSICR_TXTS TSINTR_TXTS
 
 /* TSAUXC Configuration Bits */
-#define TSAUXC_EN_TT0    (1 << 0)  /* Enable target time 0. */
-#define TSAUXC_EN_TT1    (1 << 1)  /* Enable target time 1. */
-#define TSAUXC_EN_CLK0   (1 << 2)  /* Enable Configurable Frequency Clock 0. */
-#define TSAUXC_SAMP_AUT0 (1 << 3)  /* Latch SYSTIML/H into AUXSTMPL/0. */
-#define TSAUXC_ST0       (1 << 4)  /* Start Clock 0 Toggle on Target Time 0. */
-#define TSAUXC_EN_CLK1   (1 << 5)  /* Enable Configurable Frequency Clock 1. */
-#define TSAUXC_SAMP_AUT1 (1 << 6)  /* Latch SYSTIML/H into AUXSTMPL/1. */
-#define TSAUXC_ST1       (1 << 7)  /* Start Clock 1 Toggle on Target Time 1. */
-#define TSAUXC_EN_TS0    (1 << 8)  /* Enable hardware timestamp 0. */
-#define TSAUXC_AUTT0     (1 << 9)  /* Auxiliary Timestamp Taken. */
-#define TSAUXC_EN_TS1    (1 << 10) /* Enable hardware timestamp 0. */
-#define TSAUXC_AUTT1     (1 << 11) /* Auxiliary Timestamp Taken. */
-#define TSAUXC_PLSG      (1 << 17) /* Generate a pulse. */
-#define TSAUXC_DISABLE   (1 << 31) /* Disable SYSTIM Count Operation. */
+#define TSAUXC_EN_TT0    BIT(0)  /* Enable target time 0. */
+#define TSAUXC_EN_TT1    BIT(1)  /* Enable target time 1. */
+#define TSAUXC_EN_CLK0   BIT(2)  /* Enable Configurable Frequency Clock 0. */
+#define TSAUXC_SAMP_AUT0 BIT(3)  /* Latch SYSTIML/H into AUXSTMPL/0. */
+#define TSAUXC_ST0       BIT(4)  /* Start Clock 0 Toggle on Target Time 0. */
+#define TSAUXC_EN_CLK1   BIT(5)  /* Enable Configurable Frequency Clock 1. */
+#define TSAUXC_SAMP_AUT1 BIT(6)  /* Latch SYSTIML/H into AUXSTMPL/1. */
+#define TSAUXC_ST1       BIT(7)  /* Start Clock 1 Toggle on Target Time 1. */
+#define TSAUXC_EN_TS0    BIT(8)  /* Enable hardware timestamp 0. */
+#define TSAUXC_AUTT0     BIT(9)  /* Auxiliary Timestamp Taken. */
+#define TSAUXC_EN_TS1    BIT(10) /* Enable hardware timestamp 0. */
+#define TSAUXC_AUTT1     BIT(11) /* Auxiliary Timestamp Taken. */
+#define TSAUXC_PLSG      BIT(17) /* Generate a pulse. */
+#define TSAUXC_DISABLE   BIT(31) /* Disable SYSTIM Count Operation. */
 
 /* SDP Configuration Bits */
-#define AUX0_SEL_SDP0    (0 << 0)  /* Assign SDP0 to auxiliary time stamp 0. */
-#define AUX0_SEL_SDP1    (1 << 0)  /* Assign SDP1 to auxiliary time stamp 0. */
-#define AUX0_SEL_SDP2    (2 << 0)  /* Assign SDP2 to auxiliary time stamp 0. */
-#define AUX0_SEL_SDP3    (3 << 0)  /* Assign SDP3 to auxiliary time stamp 0. */
-#define AUX0_TS_SDP_EN   (1 << 2)  /* Enable auxiliary time stamp trigger 0. */
-#define AUX1_SEL_SDP0    (0 << 3)  /* Assign SDP0 to auxiliary time stamp 1. */
-#define AUX1_SEL_SDP1    (1 << 3)  /* Assign SDP1 to auxiliary time stamp 1. */
-#define AUX1_SEL_SDP2    (2 << 3)  /* Assign SDP2 to auxiliary time stamp 1. */
-#define AUX1_SEL_SDP3    (3 << 3)  /* Assign SDP3 to auxiliary time stamp 1. */
-#define AUX1_TS_SDP_EN   (1 << 5)  /* Enable auxiliary time stamp trigger 1. */
-#define TS_SDP0_SEL_TT0  (0 << 6)  /* Target time 0 is output on SDP0. */
-#define TS_SDP0_SEL_TT1  (1 << 6)  /* Target time 1 is output on SDP0. */
-#define TS_SDP0_SEL_FC0  (2 << 6)  /* Freq clock  0 is output on SDP0. */
-#define TS_SDP0_SEL_FC1  (3 << 6)  /* Freq clock  1 is output on SDP0. */
-#define TS_SDP0_EN       (1 << 8)  /* SDP0 is assigned to Tsync. */
-#define TS_SDP1_SEL_TT0  (0 << 9)  /* Target time 0 is output on SDP1. */
-#define TS_SDP1_SEL_TT1  (1 << 9)  /* Target time 1 is output on SDP1. */
-#define TS_SDP1_SEL_FC0  (2 << 9)  /* Freq clock  0 is output on SDP1. */
-#define TS_SDP1_SEL_FC1  (3 << 9)  /* Freq clock  1 is output on SDP1. */
-#define TS_SDP1_EN       (1 << 11) /* SDP1 is assigned to Tsync. */
-#define TS_SDP2_SEL_TT0  (0 << 12) /* Target time 0 is output on SDP2. */
-#define TS_SDP2_SEL_TT1  (1 << 12) /* Target time 1 is output on SDP2. */
-#define TS_SDP2_SEL_FC0  (2 << 12) /* Freq clock  0 is output on SDP2. */
-#define TS_SDP2_SEL_FC1  (3 << 12) /* Freq clock  1 is output on SDP2. */
-#define TS_SDP2_EN       (1 << 14) /* SDP2 is assigned to Tsync. */
-#define TS_SDP3_SEL_TT0  (0 << 15) /* Target time 0 is output on SDP3. */
-#define TS_SDP3_SEL_TT1  (1 << 15) /* Target time 1 is output on SDP3. */
-#define TS_SDP3_SEL_FC0  (2 << 15) /* Freq clock  0 is output on SDP3. */
-#define TS_SDP3_SEL_FC1  (3 << 15) /* Freq clock  1 is output on SDP3. */
-#define TS_SDP3_EN       (1 << 17) /* SDP3 is assigned to Tsync. */
+#define AUX0_SEL_SDP0    (0u << 0)  /* Assign SDP0 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP1    (1u << 0)  /* Assign SDP1 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP2    (2u << 0)  /* Assign SDP2 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP3    (3u << 0)  /* Assign SDP3 to auxiliary time stamp 0. */
+#define AUX0_TS_SDP_EN   (1u << 2)  /* Enable auxiliary time stamp trigger 0. */
+#define AUX1_SEL_SDP0    (0u << 3)  /* Assign SDP0 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP1    (1u << 3)  /* Assign SDP1 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP2    (2u << 3)  /* Assign SDP2 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP3    (3u << 3)  /* Assign SDP3 to auxiliary time stamp 1. */
+#define AUX1_TS_SDP_EN   (1u << 5)  /* Enable auxiliary time stamp trigger 1. */
+#define TS_SDP0_SEL_TT0  (0u << 6)  /* Target time 0 is output on SDP0. */
+#define TS_SDP0_SEL_TT1  (1u << 6)  /* Target time 1 is output on SDP0. */
+#define TS_SDP0_SEL_FC0  (2u << 6)  /* Freq clock  0 is output on SDP0. */
+#define TS_SDP0_SEL_FC1  (3u << 6)  /* Freq clock  1 is output on SDP0. */
+#define TS_SDP0_EN       (1u << 8)  /* SDP0 is assigned to Tsync. */
+#define TS_SDP1_SEL_TT0  (0u << 9)  /* Target time 0 is output on SDP1. */
+#define TS_SDP1_SEL_TT1  (1u << 9)  /* Target time 1 is output on SDP1. */
+#define TS_SDP1_SEL_FC0  (2u << 9)  /* Freq clock  0 is output on SDP1. */
+#define TS_SDP1_SEL_FC1  (3u << 9)  /* Freq clock  1 is output on SDP1. */
+#define TS_SDP1_EN       (1u << 11) /* SDP1 is assigned to Tsync. */
+#define TS_SDP2_SEL_TT0  (0u << 12) /* Target time 0 is output on SDP2. */
+#define TS_SDP2_SEL_TT1  (1u << 12) /* Target time 1 is output on SDP2. */
+#define TS_SDP2_SEL_FC0  (2u << 12) /* Freq clock  0 is output on SDP2. */
+#define TS_SDP2_SEL_FC1  (3u << 12) /* Freq clock  1 is output on SDP2. */
+#define TS_SDP2_EN       (1u << 14) /* SDP2 is assigned to Tsync. */
+#define TS_SDP3_SEL_TT0  (0u << 15) /* Target time 0 is output on SDP3. */
+#define TS_SDP3_SEL_TT1  (1u << 15) /* Target time 1 is output on SDP3. */
+#define TS_SDP3_SEL_FC0  (2u << 15) /* Freq clock  0 is output on SDP3. */
+#define TS_SDP3_SEL_FC1  (3u << 15) /* Freq clock  1 is output on SDP3. */
+#define TS_SDP3_EN       (1u << 17) /* SDP3 is assigned to Tsync. */
 
 #define E1000_MDICNFG_EXT_MDIO    0x80000000      /* MDI ext/int destination */
 #define E1000_MDICNFG_COM_MDIO    0x40000000      /* MDI shared w/ lan 0 */
@@ -997,8 +997,8 @@
 #define E1000_M88E1543_FIBER_CTRL	0x0
 #define E1000_EEE_ADV_DEV_I354		7
 #define E1000_EEE_ADV_ADDR_I354		60
-#define E1000_EEE_ADV_100_SUPPORTED	(1 << 1)   /* 100BaseTx EEE Supported */
-#define E1000_EEE_ADV_1000_SUPPORTED	(1 << 2)   /* 1000BaseT EEE Supported */
+#define E1000_EEE_ADV_100_SUPPORTED	BIT(1)   /* 100BaseTx EEE Supported */
+#define E1000_EEE_ADV_1000_SUPPORTED	BIT(2)   /* 1000BaseT EEE Supported */
 #define E1000_PCS_STATUS_DEV_I354	3
 #define E1000_PCS_STATUS_ADDR_I354	1
 #define E1000_PCS_STATUS_TX_LPI_IND	0x0200     /* Tx in LPI state */
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 07cf4fe..5010e22 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -212,7 +212,7 @@
 	 *    bits[4-0]:  which bit in the register
 	 */
 	regidx = vlan / 32;
-	vfta_delta = 1 << (vlan % 32);
+	vfta_delta = BIT(vlan % 32);
 	vfta = adapter->shadow_vfta[regidx];
 
 	/* vfta_delta represents the difference between the current value
@@ -243,12 +243,12 @@
 	bits = rd32(E1000_VLVF(vlvf_index));
 
 	/* set the pool bit */
-	bits |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind);
+	bits |= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
 	if (vlan_on)
 		goto vlvf_update;
 
 	/* clear the pool bit */
-	bits ^= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind);
+	bits ^= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
 
 	if (!(bits & E1000_VLVF_POOLSEL_MASK)) {
 		/* Clear VFTA first, then disable VLVF.  Otherwise
@@ -427,7 +427,7 @@
 
 	mta = array_rd32(E1000_MTA, hash_reg);
 
-	mta |= (1 << hash_bit);
+	mta |= BIT(hash_bit);
 
 	array_wr32(E1000_MTA, hash_reg, mta);
 	wrfl();
@@ -527,7 +527,7 @@
 		hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
 		hash_bit = hash_value & 0x1F;
 
-		hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+		hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
 		mc_addr_list += (ETH_ALEN);
 	}
 
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index 10f5c9e..00e263f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -302,9 +302,9 @@
 	u32 vflre = rd32(E1000_VFLRE);
 	s32 ret_val = -E1000_ERR_MBX;
 
-	if (vflre & (1 << vf_number)) {
+	if (vflre & BIT(vf_number)) {
 		ret_val = 0;
-		wr32(E1000_VFLRE, (1 << vf_number));
+		wr32(E1000_VFLRE, BIT(vf_number));
 		hw->mbx.stats.rsts++;
 	}
 
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index e8280d0..3582c5c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -72,7 +72,7 @@
 	u32 eecd = rd32(E1000_EECD);
 	u32 mask;
 
-	mask = 0x01 << (count - 1);
+	mask = 1u << (count - 1);
 	if (nvm->type == e1000_nvm_eeprom_spi)
 		eecd |= E1000_EECD_DO;
 
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 969a6dd..9b622b3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -91,10 +91,10 @@
 
 #define I82580_ADDR_REG                   16
 #define I82580_CFG_REG                    22
-#define I82580_CFG_ASSERT_CRS_ON_TX       (1 << 15)
-#define I82580_CFG_ENABLE_DOWNSHIFT       (3 << 10) /* auto downshift 100/10 */
+#define I82580_CFG_ASSERT_CRS_ON_TX       BIT(15)
+#define I82580_CFG_ENABLE_DOWNSHIFT       (3u << 10) /* auto downshift 100/10 */
 #define I82580_CTRL_REG                   23
-#define I82580_CTRL_DOWNSHIFT_MASK        (7 << 10)
+#define I82580_CTRL_DOWNSHIFT_MASK        (7u << 10)
 
 /* 82580 specific PHY registers */
 #define I82580_PHY_CTRL_2            18
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 9413fa6..b9609af 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -91,6 +91,14 @@
 #define NVM_COMB_VER_OFF	0x0083
 #define NVM_COMB_VER_PTR	0x003d
 
+/* Transmit and receive latency (for PTP timestamps) */
+#define IGB_I210_TX_LATENCY_10		9542
+#define IGB_I210_TX_LATENCY_100		1024
+#define IGB_I210_TX_LATENCY_1000	178
+#define IGB_I210_RX_LATENCY_10		20662
+#define IGB_I210_RX_LATENCY_100		2213
+#define IGB_I210_RX_LATENCY_1000	448
+
 struct vf_data_storage {
 	unsigned char vf_mac_addresses[ETH_ALEN];
 	u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
@@ -169,7 +177,7 @@
  * maintain a power of two alignment we have to limit ourselves to 32K.
  */
 #define IGB_MAX_TXD_PWR	15
-#define IGB_MAX_DATA_PER_TXD	(1 << IGB_MAX_TXD_PWR)
+#define IGB_MAX_DATA_PER_TXD	(1u << IGB_MAX_TXD_PWR)
 
 /* Tx Descriptors needed, worst case */
 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
@@ -466,21 +474,21 @@
 	u16 eee_advert;
 };
 
-#define IGB_FLAG_HAS_MSI		(1 << 0)
-#define IGB_FLAG_DCA_ENABLED		(1 << 1)
-#define IGB_FLAG_QUAD_PORT_A		(1 << 2)
-#define IGB_FLAG_QUEUE_PAIRS		(1 << 3)
-#define IGB_FLAG_DMAC			(1 << 4)
-#define IGB_FLAG_PTP			(1 << 5)
-#define IGB_FLAG_RSS_FIELD_IPV4_UDP	(1 << 6)
-#define IGB_FLAG_RSS_FIELD_IPV6_UDP	(1 << 7)
-#define IGB_FLAG_WOL_SUPPORTED		(1 << 8)
-#define IGB_FLAG_NEED_LINK_UPDATE	(1 << 9)
-#define IGB_FLAG_MEDIA_RESET		(1 << 10)
-#define IGB_FLAG_MAS_CAPABLE		(1 << 11)
-#define IGB_FLAG_MAS_ENABLE		(1 << 12)
-#define IGB_FLAG_HAS_MSIX		(1 << 13)
-#define IGB_FLAG_EEE			(1 << 14)
+#define IGB_FLAG_HAS_MSI		BIT(0)
+#define IGB_FLAG_DCA_ENABLED		BIT(1)
+#define IGB_FLAG_QUAD_PORT_A		BIT(2)
+#define IGB_FLAG_QUEUE_PAIRS		BIT(3)
+#define IGB_FLAG_DMAC			BIT(4)
+#define IGB_FLAG_PTP			BIT(5)
+#define IGB_FLAG_RSS_FIELD_IPV4_UDP	BIT(6)
+#define IGB_FLAG_RSS_FIELD_IPV6_UDP	BIT(7)
+#define IGB_FLAG_WOL_SUPPORTED		BIT(8)
+#define IGB_FLAG_NEED_LINK_UPDATE	BIT(9)
+#define IGB_FLAG_MEDIA_RESET		BIT(10)
+#define IGB_FLAG_MAS_CAPABLE		BIT(11)
+#define IGB_FLAG_MAS_ENABLE		BIT(12)
+#define IGB_FLAG_HAS_MSIX		BIT(13)
+#define IGB_FLAG_EEE			BIT(14)
 #define IGB_FLAG_VLAN_PROMISC		BIT(15)
 
 /* Media Auto Sense */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 7982243..64e91c5 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -466,7 +466,7 @@
 
 	memset(p, 0, IGB_REGS_LEN * sizeof(u32));
 
-	regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+	regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
 
 	/* General Registers */
 	regs_buff[0] = rd32(E1000_CTRL);
@@ -1448,7 +1448,7 @@
 	/* Test each interrupt */
 	for (; i < 31; i++) {
 		/* Interrupt to test */
-		mask = 1 << i;
+		mask = BIT(i);
 
 		if (!(mask & ics_mask))
 			continue;
@@ -2411,19 +2411,19 @@
 			SOF_TIMESTAMPING_RAW_HARDWARE;
 
 		info->tx_types =
-			(1 << HWTSTAMP_TX_OFF) |
-			(1 << HWTSTAMP_TX_ON);
+			BIT(HWTSTAMP_TX_OFF) |
+			BIT(HWTSTAMP_TX_ON);
 
-		info->rx_filters = 1 << HWTSTAMP_FILTER_NONE;
+		info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
 
 		/* 82576 does not support timestamping all packets. */
 		if (adapter->hw.mac.type >= e1000_82580)
-			info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL;
+			info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
 		else
 			info->rx_filters |=
-				(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
-				(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
-				(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+				BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+				BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+				BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
 
 		return 0;
 	default:
@@ -2831,7 +2831,8 @@
 
 	/* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
 	for (i = 0; i < last_word - first_word + 1; i++) {
-		status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]);
+		status = igb_read_phy_reg_i2c(hw, (first_word + i) * 2,
+					      &dataword[i]);
 		if (status) {
 			/* Error occurred while reading module */
 			kfree(dataword);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 55a1405c..2172769 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -50,6 +50,7 @@
 #include <linux/aer.h>
 #include <linux/prefetch.h>
 #include <linux/pm_runtime.h>
+#include <linux/etherdevice.h>
 #ifdef CONFIG_IGB_DCA
 #include <linux/dca.h>
 #endif
@@ -150,7 +151,7 @@
 static void igb_setup_dca(struct igb_adapter *);
 #endif /* CONFIG_IGB_DCA */
 static int igb_poll(struct napi_struct *, int);
-static bool igb_clean_tx_irq(struct igb_q_vector *);
+static bool igb_clean_tx_irq(struct igb_q_vector *, int);
 static int igb_clean_rx_irq(struct igb_q_vector *, int);
 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
 static void igb_tx_timeout(struct net_device *);
@@ -382,7 +383,7 @@
 		dev_info(&adapter->pdev->dev, "Net device Info\n");
 		pr_info("Device Name     state            trans_start      last_rx\n");
 		pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
-			netdev->state, netdev->trans_start, netdev->last_rx);
+			netdev->state, dev_trans_start(netdev), netdev->last_rx);
 	}
 
 	/* Print Registers */
@@ -835,7 +836,7 @@
 			igb_write_ivar(hw, msix_vector,
 				       tx_queue & 0x7,
 				       ((tx_queue & 0x8) << 1) + 8);
-		q_vector->eims_value = 1 << msix_vector;
+		q_vector->eims_value = BIT(msix_vector);
 		break;
 	case e1000_82580:
 	case e1000_i350:
@@ -856,7 +857,7 @@
 			igb_write_ivar(hw, msix_vector,
 				       tx_queue >> 1,
 				       ((tx_queue & 0x1) << 4) + 8);
-		q_vector->eims_value = 1 << msix_vector;
+		q_vector->eims_value = BIT(msix_vector);
 		break;
 	default:
 		BUG();
@@ -918,7 +919,7 @@
 		     E1000_GPIE_NSICR);
 
 		/* enable msix_other interrupt */
-		adapter->eims_other = 1 << vector;
+		adapter->eims_other = BIT(vector);
 		tmp = (vector++ | E1000_IVAR_VALID) << 8;
 
 		wr32(E1000_IVAR_MISC, tmp);
@@ -2086,6 +2087,40 @@
 	return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
 }
 
+#define IGB_MAX_MAC_HDR_LEN	127
+#define IGB_MAX_NETWORK_HDR_LEN	511
+
+static netdev_features_t
+igb_features_check(struct sk_buff *skb, struct net_device *dev,
+		   netdev_features_t features)
+{
+	unsigned int network_hdr_len, mac_hdr_len;
+
+	/* Make certain the headers can be described by a context descriptor */
+	mac_hdr_len = skb_network_header(skb) - skb->data;
+	if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
+		return features & ~(NETIF_F_HW_CSUM |
+				    NETIF_F_SCTP_CRC |
+				    NETIF_F_HW_VLAN_CTAG_TX |
+				    NETIF_F_TSO |
+				    NETIF_F_TSO6);
+
+	network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+	if (unlikely(network_hdr_len >  IGB_MAX_NETWORK_HDR_LEN))
+		return features & ~(NETIF_F_HW_CSUM |
+				    NETIF_F_SCTP_CRC |
+				    NETIF_F_TSO |
+				    NETIF_F_TSO6);
+
+	/* We can only support IPV4 TSO in tunnels if we can mangle the
+	 * inner IP ID field, so strip TSO if MANGLEID is not supported.
+	 */
+	if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+		features &= ~NETIF_F_TSO;
+
+	return features;
+}
+
 static const struct net_device_ops igb_netdev_ops = {
 	.ndo_open		= igb_open,
 	.ndo_stop		= igb_close,
@@ -2110,7 +2145,7 @@
 	.ndo_fix_features	= igb_fix_features,
 	.ndo_set_features	= igb_set_features,
 	.ndo_fdb_add		= igb_ndo_fdb_add,
-	.ndo_features_check	= passthru_features_check,
+	.ndo_features_check	= igb_features_check,
 };
 
 /**
@@ -2376,39 +2411,44 @@
 			    NETIF_F_TSO6 |
 			    NETIF_F_RXHASH |
 			    NETIF_F_RXCSUM |
-			    NETIF_F_HW_CSUM |
-			    NETIF_F_HW_VLAN_CTAG_RX |
-			    NETIF_F_HW_VLAN_CTAG_TX;
+			    NETIF_F_HW_CSUM;
 
 	if (hw->mac.type >= e1000_82576)
 		netdev->features |= NETIF_F_SCTP_CRC;
 
+#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+				  NETIF_F_GSO_GRE_CSUM | \
+				  NETIF_F_GSO_IPIP | \
+				  NETIF_F_GSO_SIT | \
+				  NETIF_F_GSO_UDP_TUNNEL | \
+				  NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
+	netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES;
+	netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES;
+
 	/* copy netdev features into list of user selectable features */
-	netdev->hw_features |= netdev->features;
-	netdev->hw_features |= NETIF_F_RXALL;
+	netdev->hw_features |= netdev->features |
+			       NETIF_F_HW_VLAN_CTAG_RX |
+			       NETIF_F_HW_VLAN_CTAG_TX |
+			       NETIF_F_RXALL;
 
 	if (hw->mac.type >= e1000_i350)
 		netdev->hw_features |= NETIF_F_NTUPLE;
 
-	/* set this bit last since it cannot be part of hw_features */
-	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+	if (pci_using_dac)
+		netdev->features |= NETIF_F_HIGHDMA;
 
-	netdev->vlan_features |= NETIF_F_SG |
-				 NETIF_F_TSO |
-				 NETIF_F_TSO6 |
-				 NETIF_F_HW_CSUM |
-				 NETIF_F_SCTP_CRC;
-
+	netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
 	netdev->mpls_features |= NETIF_F_HW_CSUM;
-	netdev->hw_enc_features |= NETIF_F_HW_CSUM;
+	netdev->hw_enc_features |= netdev->vlan_features;
+
+	/* set this bit last since it cannot be part of vlan_features */
+	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+			    NETIF_F_HW_VLAN_CTAG_RX |
+			    NETIF_F_HW_VLAN_CTAG_TX;
 
 	netdev->priv_flags |= IFF_SUPP_NOFCS;
 
-	if (pci_using_dac) {
-		netdev->features |= NETIF_F_HIGHDMA;
-		netdev->vlan_features |= NETIF_F_HIGHDMA;
-	}
-
 	netdev->priv_flags |= IFF_UNICAST_FLT;
 
 	adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
@@ -2442,9 +2482,11 @@
 		break;
 	}
 
-	/* copy the MAC address out of the NVM */
-	if (hw->mac.ops.read_mac_addr(hw))
-		dev_err(&pdev->dev, "NVM Read Error\n");
+	if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
+		/* copy the MAC address out of the NVM */
+		if (hw->mac.ops.read_mac_addr(hw))
+			dev_err(&pdev->dev, "NVM Read Error\n");
+	}
 
 	memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
 
@@ -4061,7 +4103,7 @@
 	for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
 		u32 vlvf = rd32(E1000_VLVF(i));
 
-		vlvf |= 1 << pf_id;
+		vlvf |= BIT(pf_id);
 		wr32(E1000_VLVF(i), vlvf);
 	}
 
@@ -4088,7 +4130,7 @@
 	/* guarantee that we don't scrub out management VLAN */
 	vid = adapter->mng_vlan_id;
 	if (vid >= vid_start && vid < vid_end)
-		vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
+		vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
 
 	if (!adapter->vfs_allocated_count)
 		goto set_vfta;
@@ -4107,7 +4149,7 @@
 
 		if (vlvf & E1000_VLVF_VLANID_ENABLE) {
 			/* record VLAN ID in VFTA */
-			vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
+			vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
 
 			/* if PF is part of this then continue */
 			if (test_bit(vid, adapter->active_vlans))
@@ -4115,7 +4157,7 @@
 		}
 
 		/* remove PF from the pool */
-		bits = ~(1 << pf_id);
+		bits = ~BIT(pf_id);
 		bits &= rd32(E1000_VLVF(i));
 		wr32(E1000_VLVF(i), bits);
 	}
@@ -4273,13 +4315,13 @@
 		return;
 
 	for (j = 0; j < adapter->vfs_allocated_count; j++) {
-		if (adapter->wvbr & (1 << j) ||
-		    adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
+		if (adapter->wvbr & BIT(j) ||
+		    adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) {
 			dev_warn(&adapter->pdev->dev,
 				"Spoof event(s) detected on VF %d\n", j);
 			adapter->wvbr &=
-				~((1 << j) |
-				  (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
+				~(BIT(j) |
+				  BIT(j + IGB_STAGGERED_QUEUE_OFFSET));
 		}
 	}
 }
@@ -4839,9 +4881,18 @@
 		   struct igb_tx_buffer *first,
 		   u8 *hdr_len)
 {
+	u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
 	struct sk_buff *skb = first->skb;
-	u32 vlan_macip_lens, type_tucmd;
-	u32 mss_l4len_idx, l4len;
+	union {
+		struct iphdr *v4;
+		struct ipv6hdr *v6;
+		unsigned char *hdr;
+	} ip;
+	union {
+		struct tcphdr *tcp;
+		unsigned char *hdr;
+	} l4;
+	u32 paylen, l4_offset;
 	int err;
 
 	if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -4854,45 +4905,52 @@
 	if (err < 0)
 		return err;
 
+	ip.hdr = skb_network_header(skb);
+	l4.hdr = skb_checksum_start(skb);
+
 	/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
 	type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
 
-	if (first->protocol == htons(ETH_P_IP)) {
-		struct iphdr *iph = ip_hdr(skb);
-		iph->tot_len = 0;
-		iph->check = 0;
-		tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
-							 iph->daddr, 0,
-							 IPPROTO_TCP,
-							 0);
+	/* initialize outer IP header fields */
+	if (ip.v4->version == 4) {
+		/* IP header will have to cancel out any data that
+		 * is not a part of the outer IP header
+		 */
+		ip.v4->check = csum_fold(csum_add(lco_csum(skb),
+						  csum_unfold(l4.tcp->check)));
 		type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
+
+		ip.v4->tot_len = 0;
 		first->tx_flags |= IGB_TX_FLAGS_TSO |
 				   IGB_TX_FLAGS_CSUM |
 				   IGB_TX_FLAGS_IPV4;
-	} else if (skb_is_gso_v6(skb)) {
-		ipv6_hdr(skb)->payload_len = 0;
-		tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-						       &ipv6_hdr(skb)->daddr,
-						       0, IPPROTO_TCP, 0);
+	} else {
+		ip.v6->payload_len = 0;
 		first->tx_flags |= IGB_TX_FLAGS_TSO |
 				   IGB_TX_FLAGS_CSUM;
 	}
 
-	/* compute header lengths */
-	l4len = tcp_hdrlen(skb);
-	*hdr_len = skb_transport_offset(skb) + l4len;
+	/* determine offset of inner transport header */
+	l4_offset = l4.hdr - skb->data;
+
+	/* compute length of segmentation header */
+	*hdr_len = (l4.tcp->doff * 4) + l4_offset;
+
+	/* remove payload length from inner checksum */
+	paylen = skb->len - l4_offset;
+	csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
 
 	/* update gso size and bytecount with header size */
 	first->gso_segs = skb_shinfo(skb)->gso_segs;
 	first->bytecount += (first->gso_segs - 1) * *hdr_len;
 
 	/* MSS L4LEN IDX */
-	mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
+	mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
 	mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
 
 	/* VLAN MACLEN IPLEN */
-	vlan_macip_lens = skb_network_header_len(skb);
-	vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
+	vlan_macip_lens = l4.hdr - ip.hdr;
+	vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
 	vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
 
 	igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
@@ -5960,11 +6018,11 @@
 
 	/* create mask for VF and other pools */
 	pool_mask = E1000_VLVF_POOLSEL_MASK;
-	vlvf_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
+	vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf);
 
 	/* drop PF from pool bits */
-	pool_mask &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT +
-			     adapter->vfs_allocated_count));
+	pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT +
+			     adapter->vfs_allocated_count);
 
 	/* Find the vlan filter for this id */
 	for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
@@ -5987,7 +6045,7 @@
 			goto update_vlvf;
 
 		vid = vlvf & E1000_VLVF_VLANID_MASK;
-		vfta_mask = 1 << (vid % 32);
+		vfta_mask = BIT(vid % 32);
 
 		/* clear bit from VFTA */
 		vfta = adapter->shadow_vfta[vid / 32];
@@ -6024,7 +6082,7 @@
 	return idx;
 }
 
-void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
+static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
 {
 	struct e1000_hw *hw = &adapter->hw;
 	u32 bits, pf_id;
@@ -6038,13 +6096,13 @@
 	 * entry other than the PF.
 	 */
 	pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
-	bits = ~(1 << pf_id) & E1000_VLVF_POOLSEL_MASK;
+	bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK;
 	bits &= rd32(E1000_VLVF(idx));
 
 	/* Disable the filter so this falls into the default pool. */
 	if (!bits) {
 		if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
-			wr32(E1000_VLVF(idx), 1 << pf_id);
+			wr32(E1000_VLVF(idx), BIT(pf_id));
 		else
 			wr32(E1000_VLVF(idx), 0);
 	}
@@ -6228,9 +6286,9 @@
 
 	/* enable transmit and receive for vf */
 	reg = rd32(E1000_VFTE);
-	wr32(E1000_VFTE, reg | (1 << vf));
+	wr32(E1000_VFTE, reg | BIT(vf));
 	reg = rd32(E1000_VFRE);
-	wr32(E1000_VFRE, reg | (1 << vf));
+	wr32(E1000_VFRE, reg | BIT(vf));
 
 	adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
 
@@ -6522,13 +6580,14 @@
 		igb_update_dca(q_vector);
 #endif
 	if (q_vector->tx.ring)
-		clean_complete = igb_clean_tx_irq(q_vector);
+		clean_complete = igb_clean_tx_irq(q_vector, budget);
 
 	if (q_vector->rx.ring) {
 		int cleaned = igb_clean_rx_irq(q_vector, budget);
 
 		work_done += cleaned;
-		clean_complete &= (cleaned < budget);
+		if (cleaned >= budget)
+			clean_complete = false;
 	}
 
 	/* If all work not completed, return budget and keep polling */
@@ -6545,10 +6604,11 @@
 /**
  *  igb_clean_tx_irq - Reclaim resources after transmit completes
  *  @q_vector: pointer to q_vector containing needed info
+ *  @napi_budget: Used to determine if we are in netpoll
  *
  *  returns true if ring is completely cleaned
  **/
-static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
+static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
 {
 	struct igb_adapter *adapter = q_vector->adapter;
 	struct igb_ring *tx_ring = q_vector->tx.ring;
@@ -6587,7 +6647,7 @@
 		total_packets += tx_buffer->gso_segs;
 
 		/* free the skb */
-		dev_consume_skb_any(tx_buffer->skb);
+		napi_consume_skb(tx_buffer->skb, napi_budget);
 
 		/* unmap skb header data */
 		dma_unmap_single(tx_ring->dev,
@@ -7574,7 +7634,6 @@
 
 	if (igb_init_interrupt_scheme(adapter, true)) {
 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
-		rtnl_unlock();
 		return -ENOMEM;
 	}
 
@@ -7845,11 +7904,13 @@
 	struct e1000_hw *hw = &adapter->hw;
 	u32 rar_low, rar_high;
 
-	/* HW expects these in little endian so we reverse the byte order
-	 * from network order (big endian) to CPU endian
+	/* HW expects these to be in network order when they are plugged
+	 * into the registers which are little endian.  In order to guarantee
+	 * that ordering we need to do an leXX_to_cpup here in order to be
+	 * ready for the byteswap that occurs with writel
 	 */
-	rar_low = le32_to_cpup((__be32 *)(addr));
-	rar_high = le16_to_cpup((__be16 *)(addr + 4));
+	rar_low = le32_to_cpup((__le32 *)(addr));
+	rar_high = le16_to_cpup((__le16 *)(addr + 4));
 
 	/* Indicate to hardware the Address is Valid. */
 	rar_high |= E1000_RAH_AV;
@@ -7921,7 +7982,7 @@
 		/* Calculate the rate factor values to set */
 		rf_int = link_speed / tx_rate;
 		rf_dec = (link_speed - (rf_int * tx_rate));
-		rf_dec = (rf_dec * (1 << E1000_RTTBCNRC_RF_INT_SHIFT)) /
+		rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) /
 			 tx_rate;
 
 		bcnrc_val = E1000_RTTBCNRC_RS_ENA;
@@ -8011,11 +8072,11 @@
 	reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
 	reg_val = rd32(reg_offset);
 	if (setting)
-		reg_val |= ((1 << vf) |
-			    (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
+		reg_val |= (BIT(vf) |
+			    BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
 	else
-		reg_val &= ~((1 << vf) |
-			     (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
+		reg_val &= ~(BIT(vf) |
+			     BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
 	wr32(reg_offset, reg_val);
 
 	adapter->vf_data[vf].spoofchk_enabled = setting;
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 22a8a29..f097c5a 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -69,9 +69,9 @@
 
 #define IGB_SYSTIM_OVERFLOW_PERIOD	(HZ * 60 * 9)
 #define IGB_PTP_TX_TIMEOUT		(HZ * 15)
-#define INCPERIOD_82576			(1 << E1000_TIMINCA_16NS_SHIFT)
-#define INCVALUE_82576_MASK		((1 << E1000_TIMINCA_16NS_SHIFT) - 1)
-#define INCVALUE_82576			(16 << IGB_82576_TSYNC_SHIFT)
+#define INCPERIOD_82576			BIT(E1000_TIMINCA_16NS_SHIFT)
+#define INCVALUE_82576_MASK		GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0)
+#define INCVALUE_82576			(16u << IGB_82576_TSYNC_SHIFT)
 #define IGB_NBITS_82580			40
 
 static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
@@ -722,11 +722,29 @@
 	struct e1000_hw *hw = &adapter->hw;
 	struct skb_shared_hwtstamps shhwtstamps;
 	u64 regval;
+	int adjust = 0;
 
 	regval = rd32(E1000_TXSTMPL);
 	regval |= (u64)rd32(E1000_TXSTMPH) << 32;
 
 	igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+	/* adjust timestamp for the TX latency based on link speed */
+	if (adapter->hw.mac.type == e1000_i210) {
+		switch (adapter->link_speed) {
+		case SPEED_10:
+			adjust = IGB_I210_TX_LATENCY_10;
+			break;
+		case SPEED_100:
+			adjust = IGB_I210_TX_LATENCY_100;
+			break;
+		case SPEED_1000:
+			adjust = IGB_I210_TX_LATENCY_1000;
+			break;
+		}
+	}
+
+	shhwtstamps.hwtstamp = ktime_sub_ns(shhwtstamps.hwtstamp, adjust);
+
 	skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
 	dev_kfree_skb_any(adapter->ptp_tx_skb);
 	adapter->ptp_tx_skb = NULL;
@@ -771,6 +789,7 @@
 	struct igb_adapter *adapter = q_vector->adapter;
 	struct e1000_hw *hw = &adapter->hw;
 	u64 regval;
+	int adjust = 0;
 
 	/* If this bit is set, then the RX registers contain the time stamp. No
 	 * other packet will be time stamped until we read these registers, so
@@ -790,6 +809,23 @@
 
 	igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
 
+	/* adjust timestamp for the RX latency based on link speed */
+	if (adapter->hw.mac.type == e1000_i210) {
+		switch (adapter->link_speed) {
+		case SPEED_10:
+			adjust = IGB_I210_RX_LATENCY_10;
+			break;
+		case SPEED_100:
+			adjust = IGB_I210_RX_LATENCY_100;
+			break;
+		case SPEED_1000:
+			adjust = IGB_I210_RX_LATENCY_1000;
+			break;
+		}
+	}
+	skb_hwtstamps(skb)->hwtstamp =
+		ktime_add_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
+
 	/* Update the last_rx_timestamp timer in order to enable watchdog check
 	 * for error case of latched timestamp on a dropped packet.
 	 */
diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h
index ae3f283..ee1ef08 100644
--- a/drivers/net/ethernet/intel/igbvf/defines.h
+++ b/drivers/net/ethernet/intel/igbvf/defines.h
@@ -113,7 +113,7 @@
 #define E1000_RXDCTL_QUEUE_ENABLE	0x02000000 /* Enable specific Rx Que */
 
 /* Direct Cache Access (DCA) definitions */
-#define E1000_DCA_TXCTRL_TX_WB_RO_EN	(1 << 11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_TX_WB_RO_EN	BIT(11) /* Tx Desc writeback RO bit */
 
 #define E1000_VF_INIT_TIMEOUT	200 /* Number of retries to clear RSTI */
 
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index b74ce53..8dea1b1 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -154,7 +154,8 @@
 
 	memset(p, 0, IGBVF_REGS_LEN * sizeof(u32));
 
-	regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
+	regs->version = (1u << 24) |
+			(adapter->pdev->revision << 16) |
 			adapter->pdev->device;
 
 	regs_buff[0] = er32(CTRL);
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index f166baa..6f4290d 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -287,8 +287,8 @@
 };
 
 /* hardware capability, feature, and workaround flags */
-#define IGBVF_FLAG_RX_CSUM_DISABLED	(1 << 0)
-#define IGBVF_FLAG_RX_LB_VLAN_BSWAP	(1 << 1)
+#define IGBVF_FLAG_RX_CSUM_DISABLED	BIT(0)
+#define IGBVF_FLAG_RX_LB_VLAN_BSWAP	BIT(1)
 #define IGBVF_RX_DESC_ADV(R, i)     \
 	(&((((R).desc))[i].rx_desc))
 #define IGBVF_TX_DESC_ADV(R, i)     \
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index c124422..322a2d7 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -964,7 +964,7 @@
 			ivar = ivar & 0xFFFFFF00;
 			ivar |= msix_vector | E1000_IVAR_VALID;
 		}
-		adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector;
+		adapter->rx_ring[rx_queue].eims_value = BIT(msix_vector);
 		array_ew32(IVAR0, index, ivar);
 	}
 	if (tx_queue > IGBVF_NO_QUEUE) {
@@ -979,7 +979,7 @@
 			ivar = ivar & 0xFFFF00FF;
 			ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
 		}
-		adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector;
+		adapter->tx_ring[tx_queue].eims_value = BIT(msix_vector);
 		array_ew32(IVAR0, index, ivar);
 	}
 }
@@ -1014,8 +1014,8 @@
 
 	ew32(IVAR_MISC, tmp);
 
-	adapter->eims_enable_mask = (1 << (vector)) - 1;
-	adapter->eims_other = 1 << (vector - 1);
+	adapter->eims_enable_mask = GENMASK(vector - 1, 0);
+	adapter->eims_other = BIT(vector - 1);
 	e1e_flush();
 }
 
@@ -1367,7 +1367,7 @@
 	struct e1000_hw *hw = &adapter->hw;
 	struct igbvf_ring *rx_ring = adapter->rx_ring;
 	u64 rdba;
-	u32 rdlen, rxdctl;
+	u32 rxdctl;
 
 	/* disable receives */
 	rxdctl = er32(RXDCTL(0));
@@ -1375,8 +1375,6 @@
 	e1e_flush();
 	msleep(10);
 
-	rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc);
-
 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
 	 * the Base and Length of the Rx Descriptor Ring
 	 */
@@ -1933,83 +1931,74 @@
 	buffer_info->dma = 0;
 }
 
-static int igbvf_tso(struct igbvf_adapter *adapter,
-		     struct igbvf_ring *tx_ring,
-		     struct sk_buff *skb, u32 tx_flags, u8 *hdr_len,
-		     __be16 protocol)
+static int igbvf_tso(struct igbvf_ring *tx_ring,
+		     struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
 {
-	struct e1000_adv_tx_context_desc *context_desc;
-	struct igbvf_buffer *buffer_info;
-	u32 info = 0, tu_cmd = 0;
-	u32 mss_l4len_idx, l4len;
-	unsigned int i;
+	u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
+	union {
+		struct iphdr *v4;
+		struct ipv6hdr *v6;
+		unsigned char *hdr;
+	} ip;
+	union {
+		struct tcphdr *tcp;
+		unsigned char *hdr;
+	} l4;
+	u32 paylen, l4_offset;
 	int err;
 
-	*hdr_len = 0;
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return 0;
+
+	if (!skb_is_gso(skb))
+		return 0;
 
 	err = skb_cow_head(skb, 0);
-	if (err < 0) {
-		dev_err(&adapter->pdev->dev, "igbvf_tso returning an error\n");
+	if (err < 0)
 		return err;
-	}
 
-	l4len = tcp_hdrlen(skb);
-	*hdr_len += l4len;
-
-	if (protocol == htons(ETH_P_IP)) {
-		struct iphdr *iph = ip_hdr(skb);
-
-		iph->tot_len = 0;
-		iph->check = 0;
-		tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
-							 iph->daddr, 0,
-							 IPPROTO_TCP,
-							 0);
-	} else if (skb_is_gso_v6(skb)) {
-		ipv6_hdr(skb)->payload_len = 0;
-		tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-						       &ipv6_hdr(skb)->daddr,
-						       0, IPPROTO_TCP, 0);
-	}
-
-	i = tx_ring->next_to_use;
-
-	buffer_info = &tx_ring->buffer_info[i];
-	context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
-	/* VLAN MACLEN IPLEN */
-	if (tx_flags & IGBVF_TX_FLAGS_VLAN)
-		info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
-	info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
-	*hdr_len += skb_network_offset(skb);
-	info |= (skb_transport_header(skb) - skb_network_header(skb));
-	*hdr_len += (skb_transport_header(skb) - skb_network_header(skb));
-	context_desc->vlan_macip_lens = cpu_to_le32(info);
+	ip.hdr = skb_network_header(skb);
+	l4.hdr = skb_checksum_start(skb);
 
 	/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
-	tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
+	type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
 
-	if (protocol == htons(ETH_P_IP))
-		tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
-	tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
+	/* initialize outer IP header fields */
+	if (ip.v4->version == 4) {
+		/* IP header will have to cancel out any data that
+		 * is not a part of the outer IP header
+		 */
+		ip.v4->check = csum_fold(csum_add(lco_csum(skb),
+						  csum_unfold(l4.tcp->check)));
+		type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
 
-	context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
+		ip.v4->tot_len = 0;
+	} else {
+		ip.v6->payload_len = 0;
+	}
+
+	/* determine offset of inner transport header */
+	l4_offset = l4.hdr - skb->data;
+
+	/* compute length of segmentation header */
+	*hdr_len = (l4.tcp->doff * 4) + l4_offset;
+
+	/* remove payload length from inner checksum */
+	paylen = skb->len - l4_offset;
+	csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
 
 	/* MSS L4LEN IDX */
-	mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
-	mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
+	mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
+	mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
 
-	context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
-	context_desc->seqnum_seed = 0;
+	/* VLAN MACLEN IPLEN */
+	vlan_macip_lens = l4.hdr - ip.hdr;
+	vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
+	vlan_macip_lens |= tx_flags & IGBVF_TX_FLAGS_VLAN_MASK;
 
-	buffer_info->time_stamp = jiffies;
-	buffer_info->dma = 0;
-	i++;
-	if (i == tx_ring->count)
-		i = 0;
+	igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
 
-	tx_ring->next_to_use = i;
-
-	return true;
+	return 1;
 }
 
 static inline bool igbvf_ipv6_csum_is_sctp(struct sk_buff *skb)
@@ -2091,7 +2080,7 @@
 }
 
 #define IGBVF_MAX_TXD_PWR	16
-#define IGBVF_MAX_DATA_PER_TXD	(1 << IGBVF_MAX_TXD_PWR)
+#define IGBVF_MAX_DATA_PER_TXD	(1u << IGBVF_MAX_TXD_PWR)
 
 static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
 				   struct igbvf_ring *tx_ring,
@@ -2271,8 +2260,7 @@
 
 	first = tx_ring->next_to_use;
 
-	tso = skb_is_gso(skb) ?
-		igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, protocol) : 0;
+	tso = igbvf_tso(tx_ring, skb, tx_flags, &hdr_len);
 	if (unlikely(tso < 0)) {
 		dev_kfree_skb_any(skb);
 		return NETDEV_TX_OK;
@@ -2615,6 +2603,40 @@
 	return 0;
 }
 
+#define IGBVF_MAX_MAC_HDR_LEN		127
+#define IGBVF_MAX_NETWORK_HDR_LEN	511
+
+static netdev_features_t
+igbvf_features_check(struct sk_buff *skb, struct net_device *dev,
+		     netdev_features_t features)
+{
+	unsigned int network_hdr_len, mac_hdr_len;
+
+	/* Make certain the headers can be described by a context descriptor */
+	mac_hdr_len = skb_network_header(skb) - skb->data;
+	if (unlikely(mac_hdr_len > IGBVF_MAX_MAC_HDR_LEN))
+		return features & ~(NETIF_F_HW_CSUM |
+				    NETIF_F_SCTP_CRC |
+				    NETIF_F_HW_VLAN_CTAG_TX |
+				    NETIF_F_TSO |
+				    NETIF_F_TSO6);
+
+	network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+	if (unlikely(network_hdr_len >  IGBVF_MAX_NETWORK_HDR_LEN))
+		return features & ~(NETIF_F_HW_CSUM |
+				    NETIF_F_SCTP_CRC |
+				    NETIF_F_TSO |
+				    NETIF_F_TSO6);
+
+	/* We can only support IPV4 TSO in tunnels if we can mangle the
+	 * inner IP ID field, so strip TSO if MANGLEID is not supported.
+	 */
+	if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+		features &= ~NETIF_F_TSO;
+
+	return features;
+}
+
 static const struct net_device_ops igbvf_netdev_ops = {
 	.ndo_open		= igbvf_open,
 	.ndo_stop		= igbvf_close,
@@ -2631,7 +2653,7 @@
 	.ndo_poll_controller	= igbvf_netpoll,
 #endif
 	.ndo_set_features	= igbvf_set_features,
-	.ndo_features_check	= passthru_features_check,
+	.ndo_features_check	= igbvf_features_check,
 };
 
 /**
@@ -2739,22 +2761,30 @@
 			      NETIF_F_HW_CSUM |
 			      NETIF_F_SCTP_CRC;
 
-	netdev->features = netdev->hw_features |
-			   NETIF_F_HW_VLAN_CTAG_TX |
-			   NETIF_F_HW_VLAN_CTAG_RX |
-			   NETIF_F_HW_VLAN_CTAG_FILTER;
+#define IGBVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+				    NETIF_F_GSO_GRE_CSUM | \
+				    NETIF_F_GSO_IPIP | \
+				    NETIF_F_GSO_SIT | \
+				    NETIF_F_GSO_UDP_TUNNEL | \
+				    NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
+	netdev->gso_partial_features = IGBVF_GSO_PARTIAL_FEATURES;
+	netdev->hw_features |= NETIF_F_GSO_PARTIAL |
+			       IGBVF_GSO_PARTIAL_FEATURES;
+
+	netdev->features = netdev->hw_features;
 
 	if (pci_using_dac)
 		netdev->features |= NETIF_F_HIGHDMA;
 
-	netdev->vlan_features |= NETIF_F_SG |
-				 NETIF_F_TSO |
-				 NETIF_F_TSO6 |
-				 NETIF_F_HW_CSUM |
-				 NETIF_F_SCTP_CRC;
-
+	netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
 	netdev->mpls_features |= NETIF_F_HW_CSUM;
-	netdev->hw_enc_features |= NETIF_F_HW_CSUM;
+	netdev->hw_enc_features |= netdev->vlan_features;
+
+	/* set this bit last since it cannot be part of vlan_features */
+	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+			    NETIF_F_HW_VLAN_CTAG_RX |
+			    NETIF_F_HW_VLAN_CTAG_TX;
 
 	/*reset the controller to put the device in a known good state */
 	err = hw->mac.ops.reset_hw(hw);
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index a13baa9..335ba66 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -266,7 +266,7 @@
 	msgbuf[1] = vid;
 	/* Setting the 8 bit field MSG INFO to true indicates "add" */
 	if (set)
-		msgbuf[0] |= 1 << E1000_VT_MSGINFO_SHIFT;
+		msgbuf[0] |= BIT(E1000_VT_MSGINFO_SHIFT);
 
 	mbx->ops.write_posted(hw, msgbuf, 2);
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 84fa28c..9f2db18 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
+  Copyright(c) 1999 - 2016 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -143,14 +143,11 @@
 	unsigned char vf_mac_addresses[ETH_ALEN];
 	u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
 	u16 num_vf_mc_hashes;
-	u16 default_vf_vlan_id;
-	u16 vlans_enabled;
 	bool clear_to_send;
 	bool pf_set_mac;
 	u16 pf_vlan; /* When set, guest VLAN config not allowed. */
 	u16 pf_qos;
 	u16 tx_rate;
-	u16 vlan_count;
 	u8 spoofchk_enabled;
 	bool rss_query_enabled;
 	u8 trusted;
@@ -173,7 +170,7 @@
 };
 
 #define IXGBE_MAX_TXD_PWR	14
-#define IXGBE_MAX_DATA_PER_TXD	(1 << IXGBE_MAX_TXD_PWR)
+#define IXGBE_MAX_DATA_PER_TXD	(1u << IXGBE_MAX_TXD_PWR)
 
 /* Tx Descriptors needed, worst case */
 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
@@ -456,7 +453,7 @@
 				IXGBE_QV_STATE_POLL);
 #ifdef BP_EXTENDED_STATS
 	if (rc != IXGBE_QV_STATE_IDLE)
-		q_vector->tx.ring->stats.yields++;
+		q_vector->rx.ring->stats.yields++;
 #endif
 	return rc == IXGBE_QV_STATE_IDLE;
 }
@@ -623,47 +620,46 @@
 	 * thus the additional *_CAPABLE flags.
 	 */
 	u32 flags;
-#define IXGBE_FLAG_MSI_ENABLED                  (u32)(1 << 1)
-#define IXGBE_FLAG_MSIX_ENABLED                 (u32)(1 << 3)
-#define IXGBE_FLAG_RX_1BUF_CAPABLE              (u32)(1 << 4)
-#define IXGBE_FLAG_RX_PS_CAPABLE                (u32)(1 << 5)
-#define IXGBE_FLAG_RX_PS_ENABLED                (u32)(1 << 6)
-#define IXGBE_FLAG_DCA_ENABLED                  (u32)(1 << 8)
-#define IXGBE_FLAG_DCA_CAPABLE                  (u32)(1 << 9)
-#define IXGBE_FLAG_IMIR_ENABLED                 (u32)(1 << 10)
-#define IXGBE_FLAG_MQ_CAPABLE                   (u32)(1 << 11)
-#define IXGBE_FLAG_DCB_ENABLED                  (u32)(1 << 12)
-#define IXGBE_FLAG_VMDQ_CAPABLE                 (u32)(1 << 13)
-#define IXGBE_FLAG_VMDQ_ENABLED                 (u32)(1 << 14)
-#define IXGBE_FLAG_FAN_FAIL_CAPABLE             (u32)(1 << 15)
-#define IXGBE_FLAG_NEED_LINK_UPDATE             (u32)(1 << 16)
-#define IXGBE_FLAG_NEED_LINK_CONFIG             (u32)(1 << 17)
-#define IXGBE_FLAG_FDIR_HASH_CAPABLE            (u32)(1 << 18)
-#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE         (u32)(1 << 19)
-#define IXGBE_FLAG_FCOE_CAPABLE                 (u32)(1 << 20)
-#define IXGBE_FLAG_FCOE_ENABLED                 (u32)(1 << 21)
-#define IXGBE_FLAG_SRIOV_CAPABLE                (u32)(1 << 22)
-#define IXGBE_FLAG_SRIOV_ENABLED                (u32)(1 << 23)
+#define IXGBE_FLAG_MSI_ENABLED			BIT(1)
+#define IXGBE_FLAG_MSIX_ENABLED			BIT(3)
+#define IXGBE_FLAG_RX_1BUF_CAPABLE		BIT(4)
+#define IXGBE_FLAG_RX_PS_CAPABLE		BIT(5)
+#define IXGBE_FLAG_RX_PS_ENABLED		BIT(6)
+#define IXGBE_FLAG_DCA_ENABLED			BIT(8)
+#define IXGBE_FLAG_DCA_CAPABLE			BIT(9)
+#define IXGBE_FLAG_IMIR_ENABLED			BIT(10)
+#define IXGBE_FLAG_MQ_CAPABLE			BIT(11)
+#define IXGBE_FLAG_DCB_ENABLED			BIT(12)
+#define IXGBE_FLAG_VMDQ_CAPABLE			BIT(13)
+#define IXGBE_FLAG_VMDQ_ENABLED			BIT(14)
+#define IXGBE_FLAG_FAN_FAIL_CAPABLE		BIT(15)
+#define IXGBE_FLAG_NEED_LINK_UPDATE		BIT(16)
+#define IXGBE_FLAG_NEED_LINK_CONFIG		BIT(17)
+#define IXGBE_FLAG_FDIR_HASH_CAPABLE		BIT(18)
+#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE		BIT(19)
+#define IXGBE_FLAG_FCOE_CAPABLE			BIT(20)
+#define IXGBE_FLAG_FCOE_ENABLED			BIT(21)
+#define IXGBE_FLAG_SRIOV_CAPABLE		BIT(22)
+#define IXGBE_FLAG_SRIOV_ENABLED		BIT(23)
 #define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE	BIT(24)
 #define IXGBE_FLAG_RX_HWTSTAMP_ENABLED		BIT(25)
 #define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER	BIT(26)
+#define IXGBE_FLAG_DCB_CAPABLE			BIT(27)
 
 	u32 flags2;
-#define IXGBE_FLAG2_RSC_CAPABLE                 (u32)(1 << 0)
-#define IXGBE_FLAG2_RSC_ENABLED                 (u32)(1 << 1)
-#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE         (u32)(1 << 2)
-#define IXGBE_FLAG2_TEMP_SENSOR_EVENT           (u32)(1 << 3)
-#define IXGBE_FLAG2_SEARCH_FOR_SFP              (u32)(1 << 4)
-#define IXGBE_FLAG2_SFP_NEEDS_RESET             (u32)(1 << 5)
-#define IXGBE_FLAG2_RESET_REQUESTED             (u32)(1 << 6)
-#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT        (u32)(1 << 7)
-#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP		(u32)(1 << 8)
-#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP		(u32)(1 << 9)
-#define IXGBE_FLAG2_PTP_PPS_ENABLED		(u32)(1 << 10)
-#define IXGBE_FLAG2_PHY_INTERRUPT		(u32)(1 << 11)
-#ifdef CONFIG_IXGBE_VXLAN
+#define IXGBE_FLAG2_RSC_CAPABLE			BIT(0)
+#define IXGBE_FLAG2_RSC_ENABLED			BIT(1)
+#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE		BIT(2)
+#define IXGBE_FLAG2_TEMP_SENSOR_EVENT		BIT(3)
+#define IXGBE_FLAG2_SEARCH_FOR_SFP		BIT(4)
+#define IXGBE_FLAG2_SFP_NEEDS_RESET		BIT(5)
+#define IXGBE_FLAG2_RESET_REQUESTED		BIT(6)
+#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT	BIT(7)
+#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP		BIT(8)
+#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP		BIT(9)
+#define IXGBE_FLAG2_PTP_PPS_ENABLED		BIT(10)
+#define IXGBE_FLAG2_PHY_INTERRUPT		BIT(11)
 #define IXGBE_FLAG2_VXLAN_REREG_NEEDED		BIT(12)
-#endif
 #define IXGBE_FLAG2_VLAN_PROMISC		BIT(13)
 
 	/* Tx fast path data */
@@ -675,6 +671,9 @@
 	int num_rx_queues;
 	u16 rx_itr_setting;
 
+	/* Port number used to identify VXLAN traffic */
+	__be16 vxlan_port;
+
 	/* TX */
 	struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
 
@@ -782,9 +781,6 @@
 	u32 timer_event_accumulator;
 	u32 vferr_refcount;
 	struct ixgbe_mac_addr *mac_table;
-#ifdef CONFIG_IXGBE_VXLAN
-	u16 vxlan_port;
-#endif
 	struct kobject *info_kobj;
 #ifdef CONFIG_IXGBE_HWMON
 	struct hwmon_buff *ixgbe_hwmon_buff;
@@ -797,7 +793,7 @@
 	unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
 
 #define IXGBE_MAX_LINK_HANDLE 10
-	struct ixgbe_mat_field *jump_tables[IXGBE_MAX_LINK_HANDLE];
+	struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE];
 	unsigned long tables;
 
 /* maximum number of RETA entries among all devices supported by ixgbe
@@ -808,6 +804,8 @@
 
 #define IXGBE_RSS_KEY_SIZE     40  /* size of RSS Hash Key in bytes */
 	u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)];
+
+	bool need_crosstalk_fix;
 };
 
 static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
@@ -819,6 +817,7 @@
 		return IXGBE_MAX_RSS_INDICES;
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		return IXGBE_MAX_RSS_INDICES_X550;
 	default:
 		return 0;
@@ -829,7 +828,7 @@
 	struct hlist_node fdir_node;
 	union ixgbe_atr_input filter;
 	u16 sw_idx;
-	u16 action;
+	u64 action;
 };
 
 enum ixgbe_state_t {
@@ -862,13 +861,15 @@
 	board_X540,
 	board_X550,
 	board_X550EM_x,
+	board_x550em_a,
 };
 
-extern struct ixgbe_info ixgbe_82598_info;
-extern struct ixgbe_info ixgbe_82599_info;
-extern struct ixgbe_info ixgbe_X540_info;
-extern struct ixgbe_info ixgbe_X550_info;
-extern struct ixgbe_info ixgbe_X550EM_x_info;
+extern const struct ixgbe_info ixgbe_82598_info;
+extern const struct ixgbe_info ixgbe_82599_info;
+extern const struct ixgbe_info ixgbe_X540_info;
+extern const struct ixgbe_info ixgbe_X550_info;
+extern const struct ixgbe_info ixgbe_X550EM_x_info;
+extern const struct ixgbe_info ixgbe_x550em_a_info;
 #ifdef CONFIG_IXGBE_DCB
 extern const struct dcbnl_rtnl_ops dcbnl_ops;
 #endif
@@ -879,6 +880,8 @@
 extern char ixgbe_default_device_descr[];
 #endif /* IXGBE_FCOE */
 
+int ixgbe_open(struct net_device *netdev);
+int ixgbe_close(struct net_device *netdev);
 void ixgbe_up(struct ixgbe_adapter *adapter);
 void ixgbe_down(struct ixgbe_adapter *adapter);
 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
@@ -893,8 +896,8 @@
 void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *);
 void ixgbe_update_stats(struct ixgbe_adapter *adapter);
 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
-int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
-			       u16 subdevice_id);
+bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
+			 u16 subdevice_id);
 #ifdef CONFIG_PCI_IOV
 void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
 #endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index d8a9fb8..fb51be7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2015 Intel Corporation.
+  Copyright(c) 1999 - 2016 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -792,7 +792,7 @@
 	}
 
 	gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
-	gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
+	gheccr &= ~(BIT(21) | BIT(18) | BIT(9) | BIT(6));
 	IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
 
 	/*
@@ -914,10 +914,10 @@
 	bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
 	if (vlan_on)
 		/* Turn on this VLAN id */
-		bits |= (1 << bitindex);
+		bits |= BIT(bitindex);
 	else
 		/* Turn off this VLAN id */
-		bits &= ~(1 << bitindex);
+		bits &= ~BIT(bitindex);
 	IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
 
 	return 0;
@@ -1160,7 +1160,7 @@
 		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
 }
 
-static struct ixgbe_mac_operations mac_ops_82598 = {
+static const struct ixgbe_mac_operations mac_ops_82598 = {
 	.init_hw		= &ixgbe_init_hw_generic,
 	.reset_hw		= &ixgbe_reset_hw_82598,
 	.start_hw		= &ixgbe_start_hw_82598,
@@ -1192,9 +1192,11 @@
 	.clear_vfta		= &ixgbe_clear_vfta_82598,
 	.set_vfta		= &ixgbe_set_vfta_82598,
 	.fc_enable		= &ixgbe_fc_enable_82598,
+	.setup_fc		= ixgbe_setup_fc_generic,
 	.set_fw_drv_ver         = NULL,
 	.acquire_swfw_sync      = &ixgbe_acquire_swfw_sync,
 	.release_swfw_sync      = &ixgbe_release_swfw_sync,
+	.init_swfw_sync		= NULL,
 	.get_thermal_sensor_data = NULL,
 	.init_thermal_sensor_thresh = NULL,
 	.prot_autoc_read	= &prot_autoc_read_generic,
@@ -1203,7 +1205,7 @@
 	.disable_rx		= &ixgbe_disable_rx_generic,
 };
 
-static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
+static const struct ixgbe_eeprom_operations eeprom_ops_82598 = {
 	.init_params		= &ixgbe_init_eeprom_params_generic,
 	.read			= &ixgbe_read_eerd_generic,
 	.write			= &ixgbe_write_eeprom_generic,
@@ -1214,7 +1216,7 @@
 	.update_checksum	= &ixgbe_update_eeprom_checksum_generic,
 };
 
-static struct ixgbe_phy_operations phy_ops_82598 = {
+static const struct ixgbe_phy_operations phy_ops_82598 = {
 	.identify		= &ixgbe_identify_phy_generic,
 	.identify_sfp		= &ixgbe_identify_module_generic,
 	.init			= &ixgbe_init_phy_ops_82598,
@@ -1230,7 +1232,7 @@
 	.check_overtemp		= &ixgbe_tn_check_overtemp,
 };
 
-struct ixgbe_info ixgbe_82598_info = {
+const struct ixgbe_info ixgbe_82598_info = {
 	.mac			= ixgbe_mac_82598EB,
 	.get_invariants		= &ixgbe_get_invariants_82598,
 	.mac_ops		= &mac_ops_82598,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index fa8d4f4..47afed7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2015 Intel Corporation.
+  Copyright(c) 1999 - 2016 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -1296,17 +1296,17 @@
 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
 do { \
 	u32 n = (_n); \
-	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
+	if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n)) \
 		common_hash ^= lo_hash_dword >> n; \
-	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+	else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \
 		bucket_hash ^= lo_hash_dword >> n; \
-	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
+	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n)) \
 		sig_hash ^= lo_hash_dword << (16 - n); \
-	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
+	if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n + 16)) \
 		common_hash ^= hi_hash_dword >> n; \
-	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+	else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \
 		bucket_hash ^= hi_hash_dword >> n; \
-	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
+	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n + 16)) \
 		sig_hash ^= hi_hash_dword << (16 - n); \
 } while (0)
 
@@ -1440,9 +1440,9 @@
 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
 do { \
 	u32 n = (_n); \
-	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+	if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \
 		bucket_hash ^= lo_hash_dword >> n; \
-	if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+	if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \
 		bucket_hash ^= hi_hash_dword >> n; \
 } while (0)
 
@@ -1633,6 +1633,7 @@
 	switch (hw->mac.type) {
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
 		break;
 	default:
@@ -2181,7 +2182,7 @@
 	return status;
 }
 
-static struct ixgbe_mac_operations mac_ops_82599 = {
+static const struct ixgbe_mac_operations mac_ops_82599 = {
 	.init_hw                = &ixgbe_init_hw_generic,
 	.reset_hw               = &ixgbe_reset_hw_82599,
 	.start_hw               = &ixgbe_start_hw_82599,
@@ -2220,6 +2221,7 @@
 	.clear_vfta             = &ixgbe_clear_vfta_generic,
 	.set_vfta               = &ixgbe_set_vfta_generic,
 	.fc_enable              = &ixgbe_fc_enable_generic,
+	.setup_fc		= ixgbe_setup_fc_generic,
 	.set_fw_drv_ver         = &ixgbe_set_fw_drv_ver_generic,
 	.init_uta_tables        = &ixgbe_init_uta_tables_generic,
 	.setup_sfp              = &ixgbe_setup_sfp_modules_82599,
@@ -2227,6 +2229,7 @@
 	.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
 	.acquire_swfw_sync      = &ixgbe_acquire_swfw_sync,
 	.release_swfw_sync      = &ixgbe_release_swfw_sync,
+	.init_swfw_sync		= NULL,
 	.get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic,
 	.init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic,
 	.prot_autoc_read	= &prot_autoc_read_82599,
@@ -2235,7 +2238,7 @@
 	.disable_rx		= &ixgbe_disable_rx_generic,
 };
 
-static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
+static const struct ixgbe_eeprom_operations eeprom_ops_82599 = {
 	.init_params		= &ixgbe_init_eeprom_params_generic,
 	.read			= &ixgbe_read_eeprom_82599,
 	.read_buffer		= &ixgbe_read_eeprom_buffer_82599,
@@ -2246,7 +2249,7 @@
 	.update_checksum	= &ixgbe_update_eeprom_checksum_generic,
 };
 
-static struct ixgbe_phy_operations phy_ops_82599 = {
+static const struct ixgbe_phy_operations phy_ops_82599 = {
 	.identify		= &ixgbe_identify_phy_82599,
 	.identify_sfp		= &ixgbe_identify_module_generic,
 	.init			= &ixgbe_init_phy_ops_82599,
@@ -2263,7 +2266,7 @@
 	.check_overtemp		= &ixgbe_tn_check_overtemp,
 };
 
-struct ixgbe_info ixgbe_82599_info = {
+const struct ixgbe_info ixgbe_82599_info = {
 	.mac                    = ixgbe_mac_82599EB,
 	.get_invariants         = &ixgbe_get_invariants_82599,
 	.mac_ops                = &mac_ops_82599,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 6404505..902d206 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2015 Intel Corporation.
+  Copyright(c) 1999 - 2016 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -97,6 +97,7 @@
 		case IXGBE_DEV_ID_X540T:
 		case IXGBE_DEV_ID_X540T1:
 		case IXGBE_DEV_ID_X550T:
+		case IXGBE_DEV_ID_X550T1:
 		case IXGBE_DEV_ID_X550EM_X_10G_T:
 			supported = true;
 			break;
@@ -111,12 +112,12 @@
 }
 
 /**
- *  ixgbe_setup_fc - Set up flow control
+ *  ixgbe_setup_fc_generic - Set up flow control
  *  @hw: pointer to hardware structure
  *
  *  Called at init time to set up flow control.
  **/
-static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
+s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
 {
 	s32 ret_val = 0;
 	u32 reg = 0, reg_bp = 0;
@@ -296,7 +297,7 @@
 	IXGBE_WRITE_FLUSH(hw);
 
 	/* Setup flow control */
-	ret_val = ixgbe_setup_fc(hw);
+	ret_val = hw->mac.ops.setup_fc(hw);
 	if (ret_val)
 		return ret_val;
 
@@ -681,6 +682,7 @@
 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
 {
 	struct ixgbe_bus_info *bus = &hw->bus;
+	u16 ee_ctrl_4;
 	u32 reg;
 
 	reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
@@ -691,6 +693,13 @@
 	reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw));
 	if (reg & IXGBE_FACTPS_LFS)
 		bus->func ^= 0x1;
+
+	/* Get MAC instance from EEPROM for configuring CS4227 */
+	if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
+		hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
+		bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
+				   IXGBE_EE_CTRL_4_INST_ID_SHIFT;
+	}
 }
 
 /**
@@ -816,8 +825,8 @@
 			 */
 			eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
 					    IXGBE_EEC_SIZE_SHIFT);
-			eeprom->word_size = 1 << (eeprom_size +
-						  IXGBE_EEPROM_WORD_SIZE_SHIFT);
+			eeprom->word_size = BIT(eeprom_size +
+						 IXGBE_EEPROM_WORD_SIZE_SHIFT);
 		}
 
 		if (eec & IXGBE_EEC_ADDR_SIZE)
@@ -1493,7 +1502,7 @@
 	 * Mask is used to shift "count" bits of "data" out to the EEPROM
 	 * one bit at a time.  Determine the starting bit based on count
 	 */
-	mask = 0x01 << (count - 1);
+	mask = BIT(count - 1);
 
 	for (i = 0; i < count; i++) {
 		/*
@@ -1982,7 +1991,7 @@
 	 */
 	vector_reg = (vector >> 5) & 0x7F;
 	vector_bit = vector & 0x1F;
-	hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
+	hw->mac.mta_shadow[vector_reg] |= BIT(vector_bit);
 }
 
 /**
@@ -2854,6 +2863,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
 		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
 		break;
@@ -2911,10 +2921,10 @@
 			mpsar_hi = 0;
 		}
 	} else if (vmdq < 32) {
-		mpsar_lo &= ~(1 << vmdq);
+		mpsar_lo &= ~BIT(vmdq);
 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
 	} else {
-		mpsar_hi &= ~(1 << (vmdq - 32));
+		mpsar_hi &= ~BIT(vmdq - 32);
 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
 	}
 
@@ -2943,11 +2953,11 @@
 
 	if (vmdq < 32) {
 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
-		mpsar |= 1 << vmdq;
+		mpsar |= BIT(vmdq);
 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
 	} else {
 		mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
-		mpsar |= 1 << (vmdq - 32);
+		mpsar |= BIT(vmdq - 32);
 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
 	}
 	return 0;
@@ -2968,11 +2978,11 @@
 	u32 rar = hw->mac.san_mac_rar_index;
 
 	if (vmdq < 32) {
-		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
+		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), BIT(vmdq));
 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
 	} else {
 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
+		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), BIT(vmdq - 32));
 	}
 
 	return 0;
@@ -3072,7 +3082,7 @@
 	 *    bits[4-0]:  which bit in the register
 	 */
 	regidx = vlan / 32;
-	vfta_delta = 1 << (vlan % 32);
+	vfta_delta = BIT(vlan % 32);
 	vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
 
 	/* vfta_delta represents the difference between the current value
@@ -3103,12 +3113,12 @@
 	bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
 
 	/* set the pool bit */
-	bits |= 1 << (vind % 32);
+	bits |= BIT(vind % 32);
 	if (vlan_on)
 		goto vlvf_update;
 
 	/* clear the pool bit */
-	bits ^= 1 << (vind % 32);
+	bits ^= BIT(vind % 32);
 
 	if (!bits &&
 	    !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
@@ -3300,43 +3310,25 @@
 /**
  *  ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
  *  @hw: pointer to hardware structure
- *  @enable: enable or disable switch for anti-spoofing
- *  @pf: Physical Function pool - do not enable anti-spoofing for the PF
+ *  @enable: enable or disable switch for MAC anti-spoofing
+ *  @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
  *
  **/
-void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
 {
-	int j;
-	int pf_target_reg = pf >> 3;
-	int pf_target_shift = pf % 8;
-	u32 pfvfspoof = 0;
+	int vf_target_reg = vf >> 3;
+	int vf_target_shift = vf % 8;
+	u32 pfvfspoof;
 
 	if (hw->mac.type == ixgbe_mac_82598EB)
 		return;
 
+	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
 	if (enable)
-		pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
-
-	/*
-	 * PFVFSPOOF register array is size 8 with 8 bits assigned to
-	 * MAC anti-spoof enables in each register array element.
-	 */
-	for (j = 0; j < pf_target_reg; j++)
-		IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
-
-	/*
-	 * The PF should be allowed to spoof so that it can support
-	 * emulation mode NICs.  Do not set the bits assigned to the PF
-	 */
-	pfvfspoof &= (1 << pf_target_shift) - 1;
-	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
-
-	/*
-	 * Remaining pools belong to the PF so they do not need to have
-	 * anti-spoofing enabled.
-	 */
-	for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
-		IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
+		pfvfspoof |= BIT(vf_target_shift);
+	else
+		pfvfspoof &= ~BIT(vf_target_shift);
+	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
 }
 
 /**
@@ -3357,9 +3349,9 @@
 
 	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
 	if (enable)
-		pfvfspoof |= (1 << vf_target_shift);
+		pfvfspoof |= BIT(vf_target_shift);
 	else
-		pfvfspoof &= ~(1 << vf_target_shift);
+		pfvfspoof &= ~BIT(vf_target_shift);
 	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
 }
 
@@ -3483,18 +3475,27 @@
  *  Communicates with the manageability block.  On success return 0
  *  else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
  **/
-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
 				 u32 length, u32 timeout,
 				 bool return_data)
 {
-	u32 hicr, i, bi, fwsts;
 	u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
+	u32 hicr, i, bi, fwsts;
 	u16 buf_len, dword_len;
+	union {
+		struct ixgbe_hic_hdr hdr;
+		u32 u32arr[1];
+	} *bp = buffer;
+	s32 status;
 
-	if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+	if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
 		hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
 		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
 	}
+	/* Take management host interface semaphore */
+	status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+	if (status)
+		return status;
 
 	/* Set bit 9 of FWSTS clearing FW reset indication */
 	fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
@@ -3502,26 +3503,27 @@
 
 	/* Check that the host interface is enabled. */
 	hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
-	if ((hicr & IXGBE_HICR_EN) == 0) {
+	if (!(hicr & IXGBE_HICR_EN)) {
 		hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
-		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+		status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+		goto rel_out;
 	}
 
 	/* Calculate length in DWORDs. We must be DWORD aligned */
-	if ((length % (sizeof(u32))) != 0) {
+	if (length % sizeof(u32)) {
 		hw_dbg(hw, "Buffer length failure, not aligned to dword");
-		return IXGBE_ERR_INVALID_ARGUMENT;
+		status = IXGBE_ERR_INVALID_ARGUMENT;
+		goto rel_out;
 	}
 
 	dword_len = length >> 2;
 
-	/*
-	 * The device driver writes the relevant command block
+	/* The device driver writes the relevant command block
 	 * into the ram area.
 	 */
 	for (i = 0; i < dword_len; i++)
 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
-				      i, cpu_to_le32(buffer[i]));
+				      i, cpu_to_le32(bp->u32arr[i]));
 
 	/* Setting this bit tells the ARC that a new command is pending. */
 	IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
@@ -3534,44 +3536,49 @@
 	}
 
 	/* Check command successful completion. */
-	if ((timeout != 0 && i == timeout) ||
-	    (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
+	if ((timeout && i == timeout) ||
+	    !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
 		hw_dbg(hw, "Command has failed with no status valid.\n");
-		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+		status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+		goto rel_out;
 	}
 
 	if (!return_data)
-		return 0;
+		goto rel_out;
 
 	/* Calculate length in DWORDs */
 	dword_len = hdr_size >> 2;
 
 	/* first pull in the header so we know the buffer length */
 	for (bi = 0; bi < dword_len; bi++) {
-		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
-		le32_to_cpus(&buffer[bi]);
+		bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+		le32_to_cpus(&bp->u32arr[bi]);
 	}
 
 	/* If there is any thing in data position pull it in */
-	buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
-	if (buf_len == 0)
-		return 0;
+	buf_len = bp->hdr.buf_len;
+	if (!buf_len)
+		goto rel_out;
 
-	if (length < (buf_len + hdr_size)) {
+	if (length < round_up(buf_len, 4) + hdr_size) {
 		hw_dbg(hw, "Buffer not large enough for reply message.\n");
-		return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+		status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+		goto rel_out;
 	}
 
 	/* Calculate length in DWORDs, add 3 for odd lengths */
 	dword_len = (buf_len + 3) >> 2;
 
-	/* Pull in the rest of the buffer (bi is where we left off)*/
+	/* Pull in the rest of the buffer (bi is where we left off) */
 	for (; bi <= dword_len; bi++) {
-		buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
-		le32_to_cpus(&buffer[bi]);
+		bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+		le32_to_cpus(&bp->u32arr[bi]);
 	}
 
-	return 0;
+rel_out:
+	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+
+	return status;
 }
 
 /**
@@ -3594,13 +3601,10 @@
 	int i;
 	s32 ret_val;
 
-	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM))
-		return IXGBE_ERR_SWFW_SYNC;
-
 	fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
 	fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
 	fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
-	fw_cmd.port_num = (u8)hw->bus.func;
+	fw_cmd.port_num = hw->bus.func;
 	fw_cmd.ver_maj = maj;
 	fw_cmd.ver_min = min;
 	fw_cmd.ver_build = build;
@@ -3612,7 +3616,7 @@
 	fw_cmd.pad2 = 0;
 
 	for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
-		ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+		ret_val = ixgbe_host_interface_command(hw, &fw_cmd,
 						       sizeof(fw_cmd),
 						       IXGBE_HI_COMMAND_TIMEOUT,
 						       true);
@@ -3628,7 +3632,6 @@
 		break;
 	}
 
-	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
 	return ret_val;
 }
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 2b95631..6d4c260 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2014 Intel Corporation.
+  Copyright(c) 1999 - 2016 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -81,6 +81,7 @@
 s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
+s32 ixgbe_setup_fc_generic(struct ixgbe_hw *);
 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
 void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
 
@@ -105,13 +106,13 @@
 
 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
-void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
 				 u8 build, u8 ver);
-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
-				 u32 length, u32 timeout, bool return_data);
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length,
+				 u32 timeout, bool return_data);
 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
 bool ixgbe_mng_present(struct ixgbe_hw *hw);
 bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 02c7333..072ef3b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2014 Intel Corporation.
+  Copyright(c) 1999 - 2016 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -186,7 +186,7 @@
 
 	for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
 		if (tc_config[tc].dcb_pfc != pfc_disabled)
-			*pfc_en |= 1 << tc;
+			*pfc_en |= BIT(tc);
 	}
 }
 
@@ -232,7 +232,7 @@
 u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
 {
 	struct tc_configuration *tc_config = &cfg->tc_config[0];
-	u8 prio_mask = 1 << up;
+	u8 prio_mask = BIT(up);
 	u8 tc = cfg->num_tcs.pg_tcs;
 
 	/* If tc is 0 then DCB is likely not enabled or supported */
@@ -293,6 +293,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		return ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max,
 						 bwgid, ptype, prio_tc);
 	default:
@@ -311,6 +312,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		return ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc);
 	default:
 		break;
@@ -368,6 +370,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max,
 						  bwg_id, prio_type, prio_tc);
 		ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
@@ -398,6 +401,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		ixgbe_dcb_read_rtrup2tc_82599(hw, map);
 		break;
 	default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index d3ba63f..b79e93a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -210,7 +210,7 @@
 
 	/* Configure PFC Tx thresholds per TC */
 	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-		if (!(pfc_en & (1 << i))) {
+		if (!(pfc_en & BIT(i))) {
 			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
 			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
 			continue;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index b5cc989..1011d64 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -248,7 +248,7 @@
 		int enabled = 0;
 
 		for (j = 0; j < MAX_USER_PRIORITY; j++) {
-			if ((prio_tc[j] == i) && (pfc_en & (1 << j))) {
+			if ((prio_tc[j] == i) && (pfc_en & BIT(j))) {
 				enabled = 1;
 				break;
 			}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 2707bda..b8fc3cf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -62,7 +62,7 @@
 			     };
 	u8 up = dcb_getapp(adapter->netdev, &app);
 
-	if (up && !(up & (1 << adapter->fcoe.up)))
+	if (up && !(up & BIT(adapter->fcoe.up)))
 		changes |= BIT_APP_UPCHG;
 #endif
 
@@ -657,7 +657,7 @@
 	    app->protocol == ETH_P_FCOE) {
 		u8 app_mask = dcb_ieee_getapp_mask(dev, app);
 
-		if (app_mask & (1 << adapter->fcoe.up))
+		if (app_mask & BIT(adapter->fcoe.up))
 			return 0;
 
 		adapter->fcoe.up = app->priority;
@@ -700,7 +700,7 @@
 	    app->protocol == ETH_P_FCOE) {
 		u8 app_mask = dcb_ieee_getapp_mask(dev, app);
 
-		if (app_mask & (1 << adapter->fcoe.up))
+		if (app_mask & BIT(adapter->fcoe.up))
 			return 0;
 
 		adapter->fcoe.up = app_mask ?
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 726e0ee..59b771b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2014 Intel Corporation.
+  Copyright(c) 1999 - 2016 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -533,10 +533,8 @@
 
 	/* Flow Control */
 	regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
-	regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0));
-	regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
-	regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
-	regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
+	for (i = 0; i < 4; i++)
+		regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i));
 	for (i = 0; i < 8; i++) {
 		switch (hw->mac.type) {
 		case ixgbe_mac_82598EB:
@@ -547,6 +545,7 @@
 		case ixgbe_mac_X540:
 		case ixgbe_mac_X550:
 		case ixgbe_mac_X550EM_x:
+		case ixgbe_mac_x550em_a:
 			regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
 			regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
 			break;
@@ -660,6 +659,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
 		regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
 		for (i = 0; i < 8; i++)
@@ -718,8 +718,10 @@
 	regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
 	regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
 	regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
-	regs_buff[942] = IXGBE_GET_STAT(adapter, gorc);
-	regs_buff[944] = IXGBE_GET_STAT(adapter, gotc);
+	regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc);
+	regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32);
+	regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc);
+	regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32);
 	for (i = 0; i < 8; i++)
 		regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
 	regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
@@ -729,7 +731,8 @@
 	regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
 	regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
 	regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
-	regs_buff[961] = IXGBE_GET_STAT(adapter, tor);
+	regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor);
+	regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32);
 	regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
 	regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
 	regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
@@ -801,15 +804,11 @@
 		regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
 	regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
 	regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
-	regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
-	regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1);
-	regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2);
-	regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3);
+	for (i = 0; i < 4; i++)
+		regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i));
 	regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
-	regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0);
-	regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1);
-	regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
-	regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
+	for (i = 0; i < 4; i++)
+		regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i));
 	for (i = 0; i < 8; i++)
 		regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
 	regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
@@ -1443,6 +1442,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		toggle = 0x7FFFF30F;
 		test = reg_test_82599;
 		break;
@@ -1583,7 +1583,7 @@
 	/* Test each interrupt */
 	for (; i < 10; i++) {
 		/* Interrupt to test */
-		mask = 1 << i;
+		mask = BIT(i);
 
 		if (!shared_int) {
 			/*
@@ -1681,6 +1681,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
 		reg_ctl &= ~IXGBE_DMATXCTL_TE;
 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
@@ -1720,6 +1721,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
 		reg_data |= IXGBE_DMATXCTL_TE;
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
@@ -1780,6 +1782,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
 		reg_data |= IXGBE_MACC_FLU;
 		IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
@@ -2053,7 +2056,7 @@
 
 		if (if_running)
 			/* indicate we're in test mode */
-			dev_close(netdev);
+			ixgbe_close(netdev);
 		else
 			ixgbe_reset(adapter);
 
@@ -2091,7 +2094,7 @@
 		/* clear testing bit and return adapter to previous state */
 		clear_bit(__IXGBE_TESTING, &adapter->state);
 		if (if_running)
-			dev_open(netdev);
+			ixgbe_open(netdev);
 		else if (hw->mac.ops.disable_tx_laser)
 			hw->mac.ops.disable_tx_laser(hw);
 	} else {
@@ -2991,6 +2994,7 @@
 	switch (adapter->hw.mac.type) {
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 	case ixgbe_mac_X540:
 	case ixgbe_mac_82599EB:
 		info->so_timestamping =
@@ -3007,14 +3011,14 @@
 			info->phc_index = -1;
 
 		info->tx_types =
-			(1 << HWTSTAMP_TX_OFF) |
-			(1 << HWTSTAMP_TX_ON);
+			BIT(HWTSTAMP_TX_OFF) |
+			BIT(HWTSTAMP_TX_ON);
 
 		info->rx_filters =
-			(1 << HWTSTAMP_FILTER_NONE) |
-			(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
-			(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
-			(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+			BIT(HWTSTAMP_FILTER_NONE) |
+			BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+			BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+			BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
 		break;
 	default:
 		return ethtool_op_get_ts_info(dev, info);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index e771e76..bcdc884 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
+  Copyright(c) 1999 - 2016 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -128,6 +128,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		if (num_tcs > 4) {
 			/*
 			 * TCs    : TC0/1 TC2/3 TC4-7
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 569cb07..9f3677c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2015 Intel Corporation.
+  Copyright(c) 1999 - 2016 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -53,15 +53,7 @@
 #include <net/vxlan.h>
 #include <net/pkt_cls.h>
 #include <net/tc_act/tc_gact.h>
-
-#ifdef CONFIG_OF
-#include <linux/of_net.h>
-#endif
-
-#ifdef CONFIG_SPARC
-#include <asm/idprom.h>
-#include <asm/prom.h>
-#endif
+#include <net/tc_act/tc_mirred.h>
 
 #include "ixgbe.h"
 #include "ixgbe_common.h"
@@ -79,10 +71,10 @@
 static char ixgbe_default_device_descr[] =
 			      "Intel(R) 10 Gigabit Network Connection";
 #endif
-#define DRV_VERSION "4.2.1-k"
+#define DRV_VERSION "4.4.0-k"
 const char ixgbe_driver_version[] = DRV_VERSION;
 static const char ixgbe_copyright[] =
-				"Copyright (c) 1999-2015 Intel Corporation.";
+				"Copyright (c) 1999-2016 Intel Corporation.";
 
 static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
 
@@ -92,6 +84,7 @@
 	[board_X540]		= &ixgbe_X540_info,
 	[board_X550]		= &ixgbe_X550_info,
 	[board_X550EM_x]	= &ixgbe_X550EM_x_info,
+	[board_x550em_a]	= &ixgbe_x550em_a_info,
 };
 
 /* ixgbe_pci_tbl - PCI Device ID Table
@@ -134,10 +127,17 @@
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a },
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a },
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a },
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
 	/* required last entry */
 	{0, }
 };
@@ -372,6 +372,27 @@
 
 	if (ixgbe_removed(reg_addr))
 		return IXGBE_FAILED_READ_REG;
+	if (unlikely(hw->phy.nw_mng_if_sel &
+		     IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M)) {
+		struct ixgbe_adapter *adapter;
+		int i;
+
+		for (i = 0; i < 200; ++i) {
+			value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY);
+			if (likely(!value))
+				goto writes_completed;
+			if (value == IXGBE_FAILED_READ_REG) {
+				ixgbe_remove_adapter(hw);
+				return IXGBE_FAILED_READ_REG;
+			}
+			udelay(5);
+		}
+
+		adapter = hw->back;
+		e_warn(hw, "register writes incomplete %08x\n", value);
+	}
+
+writes_completed:
 	value = readl(reg_addr + reg);
 	if (unlikely(value == IXGBE_FAILED_READ_REG))
 		ixgbe_check_remove(hw, reg);
@@ -588,7 +609,7 @@
 		pr_info("%-15s %016lX %016lX %016lX\n",
 			netdev->name,
 			netdev->state,
-			netdev->trans_start,
+			dev_trans_start(netdev),
 			netdev->last_rx);
 	}
 
@@ -869,6 +890,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		if (direction == -1) {
 			/* other causes */
 			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -907,6 +929,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		mask = (qmask & 0xFFFFFFFF);
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
 		mask = (qmask >> 32);
@@ -1087,9 +1110,40 @@
 }
 
 /**
+ * ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate
+ **/
+static int ixgbe_tx_maxrate(struct net_device *netdev,
+			    int queue_index, u32 maxrate)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 bcnrc_val = ixgbe_link_mbps(adapter);
+
+	if (!maxrate)
+		return 0;
+
+	/* Calculate the rate factor values to set */
+	bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
+	bcnrc_val /= maxrate;
+
+	/* clear everything but the rate factor */
+	bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
+	IXGBE_RTTBCNRC_RF_DEC_MASK;
+
+	/* enable the rate scheduler */
+	bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
+
+	IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index);
+	IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+
+	return 0;
+}
+
+/**
  * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
  * @q_vector: structure containing interrupt and ring information
  * @tx_ring: tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
  **/
 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
 			       struct ixgbe_ring *tx_ring, int napi_budget)
@@ -2192,7 +2246,7 @@
 
 	/* Populate MSIX to EITR Select */
 	if (adapter->num_vfs > 32) {
-		u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
+		u32 eitrsel = BIT(adapter->num_vfs - 32) - 1;
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
 	}
 
@@ -2222,6 +2276,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		ixgbe_set_ivar(adapter, -1, 1, v_idx);
 		break;
 	default:
@@ -2333,6 +2388,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		/*
 		 * set the WDIS bit to not clear the timer bits and cause an
 		 * immediate assertion of the interrupt
@@ -2494,6 +2550,7 @@
 		return false;
 	case ixgbe_mac_82599EB:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		switch (hw->mac.ops.get_media_type(hw)) {
 		case ixgbe_media_type_fiber:
 		case ixgbe_media_type_fiber_qsfp:
@@ -2568,6 +2625,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		mask = (qmask & 0xFFFFFFFF);
 		if (mask)
 			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
@@ -2596,6 +2654,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		mask = (qmask & 0xFFFFFFFF);
 		if (mask)
 			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
@@ -2631,6 +2690,7 @@
 		case ixgbe_mac_X540:
 		case ixgbe_mac_X550:
 		case ixgbe_mac_X550EM_x:
+		case ixgbe_mac_x550em_a:
 			mask |= IXGBE_EIMS_TS;
 			break;
 		default:
@@ -2646,7 +2706,10 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
-		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP)
+	case ixgbe_mac_x550em_a:
+		if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
+		    adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
+		    adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N)
 			mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
 		if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
 			mask |= IXGBE_EICR_GPI_SDP0_X540;
@@ -2704,6 +2767,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
 		    (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
 			adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
@@ -2786,8 +2850,10 @@
 		ixgbe_update_dca(q_vector);
 #endif
 
-	ixgbe_for_each_ring(ring, q_vector->tx)
-		clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring, budget);
+	ixgbe_for_each_ring(ring, q_vector->tx) {
+		if (!ixgbe_clean_tx_irq(q_vector, ring, budget))
+			clean_complete = false;
+	}
 
 	/* Exit if we are called by netpoll or busy polling is active */
 	if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector))
@@ -2805,7 +2871,8 @@
 						 per_ring_budget);
 
 		work_done += cleaned;
-		clean_complete &= (cleaned < per_ring_budget);
+		if (cleaned >= per_ring_budget)
+			clean_complete = false;
 	}
 
 	ixgbe_qv_unlock_napi(q_vector);
@@ -2818,7 +2885,7 @@
 	if (adapter->rx_itr_setting & 1)
 		ixgbe_set_itr(q_vector);
 	if (!test_bit(__IXGBE_DOWN, &adapter->state))
-		ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
+		ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
 
 	return 0;
 }
@@ -2937,6 +3004,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		if (eicr & IXGBE_EICR_ECC) {
 			e_info(link, "Received ECC Err, initiating reset\n");
 			adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
@@ -3033,6 +3101,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
@@ -3109,15 +3178,15 @@
 	 * currently 40.
 	 */
 	if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
-		txdctl |= (1 << 16);	/* WTHRESH = 1 */
+		txdctl |= 1u << 16;	/* WTHRESH = 1 */
 	else
-		txdctl |= (8 << 16);	/* WTHRESH = 8 */
+		txdctl |= 8u << 16;	/* WTHRESH = 8 */
 
 	/*
 	 * Setting PTHRESH to 32 both improves performance
 	 * and avoids a TX hang with DFP enabled
 	 */
-	txdctl |= (1 << 8) |	/* HTHRESH = 1 */
+	txdctl |= (1u << 8) |	/* HTHRESH = 1 */
 		   32;		/* PTHRESH = 32 */
 
 	/* reinitialize flowdirector state */
@@ -3669,9 +3738,9 @@
 		return;
 
 	if (rss_i > 3)
-		psrtype |= 2 << 29;
+		psrtype |= 2u << 29;
 	else if (rss_i > 1)
-		psrtype |= 1 << 29;
+		psrtype |= 1u << 29;
 
 	for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
@@ -3698,9 +3767,9 @@
 	reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
 
 	/* Enable only the PF's pool for Tx/Rx */
-	IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
+	IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift));
 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
-	IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
+	IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift));
 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
 	if (adapter->bridge_mode == BRIDGE_MODE_VEB)
 		IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
@@ -3729,34 +3798,10 @@
 
 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
 
-
-	/* Enable MAC Anti-Spoofing */
-	hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
-					  adapter->num_vfs);
-
-	/* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
-	 * calling set_ethertype_anti_spoofing for each VF in loop below
-	 */
-	if (hw->mac.ops.set_ethertype_anti_spoofing) {
-		IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
-				(IXGBE_ETQF_FILTER_EN    |
-				 IXGBE_ETQF_TX_ANTISPOOF |
-				 IXGBE_ETH_P_LLDP));
-
-		IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
-				(IXGBE_ETQF_FILTER_EN |
-				 IXGBE_ETQF_TX_ANTISPOOF |
-				 ETH_P_PAUSE));
-	}
-
-	/* For VFs that have spoof checking turned off */
 	for (i = 0; i < adapter->num_vfs; i++) {
-		if (!adapter->vfinfo[i].spoofchk_enabled)
-			ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false);
-
-		/* enable ethertype anti spoofing if hw supports it */
-		if (hw->mac.ops.set_ethertype_anti_spoofing)
-			hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i);
+		/* configure spoof checking */
+		ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i,
+					  adapter->vfinfo[i].spoofchk_enabled);
 
 		/* Enable/Disable RSS query feature  */
 		ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
@@ -3832,6 +3877,7 @@
 		break;
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		if (adapter->num_vfs)
 			rdrxctl |= IXGBE_RDRXCTL_PSP;
 		/* fall through for older HW */
@@ -3908,7 +3954,9 @@
 	struct ixgbe_hw *hw = &adapter->hw;
 
 	/* add VID to filter table */
-	hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, true);
+	if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
+		hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid);
+
 	set_bit(vid, adapter->active_vlans);
 
 	return 0;
@@ -3947,7 +3995,7 @@
 	 * entry other than the PF.
 	 */
 	word = idx * 2 + (VMDQ_P(0) / 32);
-	bits = ~(1 << (VMDQ_P(0)) % 32);
+	bits = ~BIT(VMDQ_P(0) % 32);
 	bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
 
 	/* Disable the filter so this falls into the default pool. */
@@ -3965,9 +4013,7 @@
 	struct ixgbe_hw *hw = &adapter->hw;
 
 	/* remove VID from filter table */
-	if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
-		ixgbe_update_pf_promisc_vlvf(adapter, vid);
-	else
+	if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
 		hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
 
 	clear_bit(vid, adapter->active_vlans);
@@ -3995,6 +4041,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		for (i = 0; i < adapter->num_rx_queues; i++) {
 			struct ixgbe_ring *ring = adapter->rx_ring[i];
 
@@ -4031,6 +4078,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		for (i = 0; i < adapter->num_rx_queues; i++) {
 			struct ixgbe_ring *ring = adapter->rx_ring[i];
 
@@ -4057,6 +4105,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 	default:
 		if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
 			break;
@@ -4081,7 +4130,7 @@
 		u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
 		u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
 
-		vlvfb |= 1 << (VMDQ_P(0) % 32);
+		vlvfb |= BIT(VMDQ_P(0) % 32);
 		IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
 	}
 
@@ -4111,7 +4160,7 @@
 
 		if (vlvf) {
 			/* record VLAN ID in VFTA */
-			vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
+			vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
 
 			/* if PF is part of this then continue */
 			if (test_bit(vid, adapter->active_vlans))
@@ -4120,7 +4169,7 @@
 
 		/* remove PF from the pool */
 		word = i * 2 + VMDQ_P(0) / 32;
-		bits = ~(1 << (VMDQ_P(0) % 32));
+		bits = ~BIT(VMDQ_P(0) % 32);
 		bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
 	}
@@ -4147,6 +4196,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 	default:
 		if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
 			break;
@@ -4172,11 +4222,11 @@
 
 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
 {
-	u16 vid;
+	u16 vid = 1;
 
 	ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
 
-	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+	for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
 		ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
 }
 
@@ -4426,6 +4476,7 @@
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
 	u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
+	netdev_features_t features = netdev->features;
 	int count;
 
 	/* Check for Promiscuous and All Multicast modes */
@@ -4443,14 +4494,13 @@
 		hw->addr_ctrl.user_set_promisc = true;
 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
 		vmolr |= IXGBE_VMOLR_MPE;
-		ixgbe_vlan_promisc_enable(adapter);
+		features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
 	} else {
 		if (netdev->flags & IFF_ALLMULTI) {
 			fctrl |= IXGBE_FCTRL_MPE;
 			vmolr |= IXGBE_VMOLR_MPE;
 		}
 		hw->addr_ctrl.user_set_promisc = false;
-		ixgbe_vlan_promisc_disable(adapter);
 	}
 
 	/*
@@ -4483,7 +4533,7 @@
 	}
 
 	/* This is useful for sniffing bad packets. */
-	if (adapter->netdev->features & NETIF_F_RXALL) {
+	if (features & NETIF_F_RXALL) {
 		/* UPE and MPE will be handled by normal PROMISC logic
 		 * in e1000e_set_rx_mode */
 		fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */
@@ -4496,10 +4546,15 @@
 
 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
 
-	if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+	if (features & NETIF_F_HW_VLAN_CTAG_RX)
 		ixgbe_vlan_strip_enable(adapter);
 	else
 		ixgbe_vlan_strip_disable(adapter);
+
+	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+		ixgbe_vlan_promisc_disable(adapter);
+	else
+		ixgbe_vlan_promisc_enable(adapter);
 }
 
 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -4530,10 +4585,9 @@
 	switch (adapter->hw.mac.type) {
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0);
-#ifdef CONFIG_IXGBE_VXLAN
 		adapter->vxlan_port = 0;
-#endif
 		break;
 	default:
 		break;
@@ -4632,6 +4686,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		dv_id = IXGBE_DV_X540(link, tc);
 		break;
 	default:
@@ -4692,6 +4747,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		dv_id = IXGBE_LOW_DV_X540(tc);
 		break;
 	default:
@@ -4807,9 +4863,9 @@
 		return;
 
 	if (rss_i > 3)
-		psrtype |= 2 << 29;
+		psrtype |= 2u << 29;
 	else if (rss_i > 1)
-		psrtype |= 1 << 29;
+		psrtype |= 1u << 29;
 
 	IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
 }
@@ -4873,7 +4929,7 @@
 	/* shutdown specific queue receive and wait for dma to settle */
 	ixgbe_disable_rx_queue(adapter, rx_ring);
 	usleep_range(10000, 20000);
-	ixgbe_irq_disable_queues(adapter, ((u64)1 << index));
+	ixgbe_irq_disable_queues(adapter, BIT_ULL(index));
 	ixgbe_clean_rx_ring(rx_ring);
 	rx_ring->l2_accel_priv = NULL;
 }
@@ -5108,6 +5164,7 @@
 		case ixgbe_mac_X540:
 		case ixgbe_mac_X550:
 		case ixgbe_mac_X550EM_x:
+		case ixgbe_mac_x550em_a:
 		default:
 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
@@ -5158,6 +5215,7 @@
 		gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
 		break;
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		gpie |= IXGBE_SDP0_GPIEN_X540;
 		break;
 	default:
@@ -5230,7 +5288,7 @@
 {
 	WARN_ON(in_interrupt());
 	/* put off any impending NetWatchDogTimeout */
-	adapter->netdev->trans_start = jiffies;
+	netif_trans_update(adapter->netdev);
 
 	while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
 		usleep_range(1000, 2000);
@@ -5469,6 +5527,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
 				(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
 				 ~IXGBE_DMATXCTL_TE));
@@ -5500,6 +5559,58 @@
 	ixgbe_tx_timeout_reset(adapter);
 }
 
+#ifdef CONFIG_IXGBE_DCB
+static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct tc_configuration *tc;
+	int j;
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+	case ixgbe_mac_82599EB:
+		adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
+		adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
+		break;
+	case ixgbe_mac_X540:
+	case ixgbe_mac_X550:
+		adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
+		adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
+		break;
+	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
+	default:
+		adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS;
+		adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS;
+		break;
+	}
+
+	/* Configure DCB traffic classes */
+	for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
+		tc = &adapter->dcb_cfg.tc_config[j];
+		tc->path[DCB_TX_CONFIG].bwg_id = 0;
+		tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
+		tc->path[DCB_RX_CONFIG].bwg_id = 0;
+		tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
+		tc->dcb_pfc = pfc_disabled;
+	}
+
+	/* Initialize default user to priority mapping, UPx->TC0 */
+	tc = &adapter->dcb_cfg.tc_config[0];
+	tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
+	tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
+
+	adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
+	adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
+	adapter->dcb_cfg.pfc_mode_enable = false;
+	adapter->dcb_set_bitmap = 0x00;
+	if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
+		adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
+	memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
+	       sizeof(adapter->temp_dcb_cfg));
+}
+#endif
+
 /**
  * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
  * @adapter: board private structure to initialize
@@ -5514,10 +5625,8 @@
 	struct pci_dev *pdev = adapter->pdev;
 	unsigned int rss, fdir;
 	u32 fwsm;
-#ifdef CONFIG_IXGBE_DCB
-	int j;
-	struct tc_configuration *tc;
-#endif
+	u16 device_caps;
+	int i;
 
 	/* PCI config space info */
 
@@ -5539,6 +5648,10 @@
 #ifdef CONFIG_IXGBE_DCA
 	adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
 #endif
+#ifdef CONFIG_IXGBE_DCB
+	adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
+	adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+#endif
 #ifdef IXGBE_FCOE
 	adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
 	adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
@@ -5549,7 +5662,14 @@
 #endif /* IXGBE_FCOE */
 
 	/* initialize static ixgbe jump table entries */
-	adapter->jump_tables[0] = ixgbe_ipv4_fields;
+	adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]),
+					  GFP_KERNEL);
+	if (!adapter->jump_tables[0])
+		return -ENOMEM;
+	adapter->jump_tables[0]->mat = ixgbe_ipv4_fields;
+
+	for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++)
+		adapter->jump_tables[i] = NULL;
 
 	adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
 				     hw->mac.num_rar_entries,
@@ -5587,6 +5707,17 @@
 			adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
 		break;
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
+#ifdef CONFIG_IXGBE_DCB
+		adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
+#endif
+#ifdef IXGBE_FCOE
+		adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
+#ifdef CONFIG_IXGBE_DCB
+		adapter->fcoe.up = 0;
+#endif /* IXGBE_DCB */
+#endif /* IXGBE_FCOE */
+	/* Fall Through */
 	case ixgbe_mac_X550:
 #ifdef CONFIG_IXGBE_DCA
 		adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
@@ -5608,42 +5739,7 @@
 	spin_lock_init(&adapter->fdir_perfect_lock);
 
 #ifdef CONFIG_IXGBE_DCB
-	switch (hw->mac.type) {
-	case ixgbe_mac_X540:
-	case ixgbe_mac_X550:
-	case ixgbe_mac_X550EM_x:
-		adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
-		adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
-		break;
-	default:
-		adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
-		adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
-		break;
-	}
-
-	/* Configure DCB traffic classes */
-	for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
-		tc = &adapter->dcb_cfg.tc_config[j];
-		tc->path[DCB_TX_CONFIG].bwg_id = 0;
-		tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
-		tc->path[DCB_RX_CONFIG].bwg_id = 0;
-		tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
-		tc->dcb_pfc = pfc_disabled;
-	}
-
-	/* Initialize default user to priority mapping, UPx->TC0 */
-	tc = &adapter->dcb_cfg.tc_config[0];
-	tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
-	tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
-
-	adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
-	adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
-	adapter->dcb_cfg.pfc_mode_enable = false;
-	adapter->dcb_set_bitmap = 0x00;
-	adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
-	memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
-	       sizeof(adapter->temp_dcb_cfg));
-
+	ixgbe_init_dcb(adapter);
 #endif
 
 	/* default flow control settings */
@@ -5677,6 +5773,22 @@
 	adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
 	adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
 
+	/* Cache bit indicating need for crosstalk fix */
+	switch (hw->mac.type) {
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
+		hw->mac.ops.get_device_caps(hw, &device_caps);
+		if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
+			adapter->need_crosstalk_fix = false;
+		else
+			adapter->need_crosstalk_fix = true;
+		break;
+	default:
+		adapter->need_crosstalk_fix = false;
+		break;
+	}
+
 	/* set default work limits */
 	adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
 
@@ -5994,7 +6106,7 @@
  * handler is registered with the OS, the watchdog timer is started,
  * and the stack is notified that the interface is ready.
  **/
-static int ixgbe_open(struct net_device *netdev)
+int ixgbe_open(struct net_device *netdev)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
@@ -6096,7 +6208,7 @@
  * needs to be disabled.  A global MAC reset is issued to stop the
  * hardware, and all transmit and receive resources are freed.
  **/
-static int ixgbe_close(struct net_device *netdev)
+int ixgbe_close(struct net_device *netdev)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
@@ -6219,6 +6331,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		pci_wake_from_d3(pdev, !!wufc);
 		break;
 	default:
@@ -6354,6 +6467,7 @@
 		case ixgbe_mac_X540:
 		case ixgbe_mac_X550:
 		case ixgbe_mac_X550EM_x:
+		case ixgbe_mac_x550em_a:
 			hwstats->pxonrxc[i] +=
 				IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
 			break;
@@ -6369,7 +6483,8 @@
 		if ((hw->mac.type == ixgbe_mac_82599EB) ||
 		    (hw->mac.type == ixgbe_mac_X540) ||
 		    (hw->mac.type == ixgbe_mac_X550) ||
-		    (hw->mac.type == ixgbe_mac_X550EM_x)) {
+		    (hw->mac.type == ixgbe_mac_X550EM_x) ||
+		    (hw->mac.type == ixgbe_mac_x550em_a)) {
 			hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
 			IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
 			hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
@@ -6394,6 +6509,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		/* OS2BMC stats are X540 and later */
 		hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
 		hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
@@ -6564,7 +6680,7 @@
 		for (i = 0; i < adapter->num_q_vectors; i++) {
 			struct ixgbe_q_vector *qv = adapter->q_vector[i];
 			if (qv->rx.ring || qv->tx.ring)
-				eics |= ((u64)1 << i);
+				eics |= BIT_ULL(i);
 		}
 	}
 
@@ -6595,6 +6711,18 @@
 		link_up = true;
 	}
 
+	/* If Crosstalk fix enabled do the sanity check of making sure
+	 * the SFP+ cage is empty.
+	 */
+	if (adapter->need_crosstalk_fix) {
+		u32 sfp_cage_full;
+
+		sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
+				IXGBE_ESDP_SDP2;
+		if (ixgbe_is_sfp(hw) && link_up && !sfp_cage_full)
+			link_up = false;
+	}
+
 	if (adapter->ixgbe_ieee_pfc)
 		pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
 
@@ -6664,6 +6792,7 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 	case ixgbe_mac_82599EB: {
 		u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
 		u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
@@ -6940,6 +7069,16 @@
 	struct ixgbe_hw *hw = &adapter->hw;
 	s32 err;
 
+	/* If crosstalk fix enabled verify the SFP+ cage is full */
+	if (adapter->need_crosstalk_fix) {
+		u32 sfp_cage_full;
+
+		sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
+				IXGBE_ESDP_SDP2;
+		if (!sfp_cage_full)
+			return;
+	}
+
 	/* not searching for SFP so there is nothing to do here */
 	if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
 	    !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
@@ -7124,10 +7263,12 @@
 		return;
 	}
 #ifdef CONFIG_IXGBE_VXLAN
+	rtnl_lock();
 	if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) {
 		adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED;
 		vxlan_get_rx_port(adapter->netdev);
 	}
+	rtnl_unlock();
 #endif /* CONFIG_IXGBE_VXLAN */
 	ixgbe_reset_subtask(adapter);
 	ixgbe_phy_interrupt_subtask(adapter);
@@ -7150,9 +7291,18 @@
 		     struct ixgbe_tx_buffer *first,
 		     u8 *hdr_len)
 {
+	u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
 	struct sk_buff *skb = first->skb;
-	u32 vlan_macip_lens, type_tucmd;
-	u32 mss_l4len_idx, l4len;
+	union {
+		struct iphdr *v4;
+		struct ipv6hdr *v6;
+		unsigned char *hdr;
+	} ip;
+	union {
+		struct tcphdr *tcp;
+		unsigned char *hdr;
+	} l4;
+	u32 paylen, l4_offset;
 	int err;
 
 	if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -7165,46 +7315,52 @@
 	if (err < 0)
 		return err;
 
+	ip.hdr = skb_network_header(skb);
+	l4.hdr = skb_checksum_start(skb);
+
 	/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
 	type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
 
-	if (first->protocol == htons(ETH_P_IP)) {
-		struct iphdr *iph = ip_hdr(skb);
-		iph->tot_len = 0;
-		iph->check = 0;
-		tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
-							 iph->daddr, 0,
-							 IPPROTO_TCP,
-							 0);
+	/* initialize outer IP header fields */
+	if (ip.v4->version == 4) {
+		/* IP header will have to cancel out any data that
+		 * is not a part of the outer IP header
+		 */
+		ip.v4->check = csum_fold(csum_add(lco_csum(skb),
+						  csum_unfold(l4.tcp->check)));
 		type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+
+		ip.v4->tot_len = 0;
 		first->tx_flags |= IXGBE_TX_FLAGS_TSO |
 				   IXGBE_TX_FLAGS_CSUM |
 				   IXGBE_TX_FLAGS_IPV4;
-	} else if (skb_is_gso_v6(skb)) {
-		ipv6_hdr(skb)->payload_len = 0;
-		tcp_hdr(skb)->check =
-		    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-				     &ipv6_hdr(skb)->daddr,
-				     0, IPPROTO_TCP, 0);
+	} else {
+		ip.v6->payload_len = 0;
 		first->tx_flags |= IXGBE_TX_FLAGS_TSO |
 				   IXGBE_TX_FLAGS_CSUM;
 	}
 
-	/* compute header lengths */
-	l4len = tcp_hdrlen(skb);
-	*hdr_len = skb_transport_offset(skb) + l4len;
+	/* determine offset of inner transport header */
+	l4_offset = l4.hdr - skb->data;
+
+	/* compute length of segmentation header */
+	*hdr_len = (l4.tcp->doff * 4) + l4_offset;
+
+	/* remove payload length from inner checksum */
+	paylen = skb->len - l4_offset;
+	csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
 
 	/* update gso size and bytecount with header size */
 	first->gso_segs = skb_shinfo(skb)->gso_segs;
 	first->bytecount += (first->gso_segs - 1) * *hdr_len;
 
 	/* mss_l4len_id: use 0 as index for TSO */
-	mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
+	mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
 	mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
 
 	/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
-	vlan_macip_lens = skb_network_header_len(skb);
-	vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+	vlan_macip_lens = l4.hdr - ip.hdr;
+	vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
 	vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
 
 	ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
@@ -7213,103 +7369,61 @@
 	return 1;
 }
 
+static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
+{
+	unsigned int offset = 0;
+
+	ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
+
+	return offset == skb_checksum_start_offset(skb);
+}
+
 static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
 			  struct ixgbe_tx_buffer *first)
 {
 	struct sk_buff *skb = first->skb;
 	u32 vlan_macip_lens = 0;
-	u32 mss_l4len_idx = 0;
 	u32 type_tucmd = 0;
 
 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
-		if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
-		    !(first->tx_flags & IXGBE_TX_FLAGS_CC))
+csum_failed:
+		if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN |
+					 IXGBE_TX_FLAGS_CC)))
 			return;
-		vlan_macip_lens = skb_network_offset(skb) <<
-				  IXGBE_ADVTXD_MACLEN_SHIFT;
-	} else {
-		u8 l4_hdr = 0;
-		union {
-			struct iphdr *ipv4;
-			struct ipv6hdr *ipv6;
-			u8 *raw;
-		} network_hdr;
-		union {
-			struct tcphdr *tcphdr;
-			u8 *raw;
-		} transport_hdr;
-		__be16 frag_off;
-
-		if (skb->encapsulation) {
-			network_hdr.raw = skb_inner_network_header(skb);
-			transport_hdr.raw = skb_inner_transport_header(skb);
-			vlan_macip_lens = skb_inner_network_offset(skb) <<
-					  IXGBE_ADVTXD_MACLEN_SHIFT;
-		} else {
-			network_hdr.raw = skb_network_header(skb);
-			transport_hdr.raw = skb_transport_header(skb);
-			vlan_macip_lens = skb_network_offset(skb) <<
-					  IXGBE_ADVTXD_MACLEN_SHIFT;
-		}
-
-		/* use first 4 bits to determine IP version */
-		switch (network_hdr.ipv4->version) {
-		case IPVERSION:
-			vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
-			type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
-			l4_hdr = network_hdr.ipv4->protocol;
-			break;
-		case 6:
-			vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
-			l4_hdr = network_hdr.ipv6->nexthdr;
-			if (likely((transport_hdr.raw - network_hdr.raw) ==
-				   sizeof(struct ipv6hdr)))
-				break;
-			ipv6_skip_exthdr(skb, network_hdr.raw - skb->data +
-					      sizeof(struct ipv6hdr),
-					 &l4_hdr, &frag_off);
-			if (unlikely(frag_off))
-				l4_hdr = NEXTHDR_FRAGMENT;
-			break;
-		default:
-			break;
-		}
-
-		switch (l4_hdr) {
-		case IPPROTO_TCP:
-			type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
-			mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) <<
-					IXGBE_ADVTXD_L4LEN_SHIFT;
-			break;
-		case IPPROTO_SCTP:
-			type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
-			mss_l4len_idx = sizeof(struct sctphdr) <<
-					IXGBE_ADVTXD_L4LEN_SHIFT;
-			break;
-		case IPPROTO_UDP:
-			mss_l4len_idx = sizeof(struct udphdr) <<
-					IXGBE_ADVTXD_L4LEN_SHIFT;
-			break;
-		default:
-			if (unlikely(net_ratelimit())) {
-				dev_warn(tx_ring->dev,
-					 "partial checksum, version=%d, l4 proto=%x\n",
-					 network_hdr.ipv4->version, l4_hdr);
-			}
-			skb_checksum_help(skb);
-			goto no_csum;
-		}
-
-		/* update TX checksum flag */
-		first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
+		goto no_csum;
 	}
 
+	switch (skb->csum_offset) {
+	case offsetof(struct tcphdr, check):
+		type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+		/* fall through */
+	case offsetof(struct udphdr, check):
+		break;
+	case offsetof(struct sctphdr, checksum):
+		/* validate that this is actually an SCTP request */
+		if (((first->protocol == htons(ETH_P_IP)) &&
+		     (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
+		    ((first->protocol == htons(ETH_P_IPV6)) &&
+		     ixgbe_ipv6_csum_is_sctp(skb))) {
+			type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
+			break;
+		}
+		/* fall through */
+	default:
+		skb_checksum_help(skb);
+		goto csum_failed;
+	}
+
+	/* update TX checksum flag */
+	first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
+	vlan_macip_lens = skb_checksum_start_offset(skb) -
+			  skb_network_offset(skb);
 no_csum:
 	/* vlan_macip_lens: MACLEN, VLAN tag */
+	vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
 	vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
 
-	ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
-			  type_tucmd, mss_l4len_idx);
+	ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0);
 }
 
 #define IXGBE_SET_FLAG(_input, _flag, _result) \
@@ -7560,11 +7674,10 @@
 		struct ipv6hdr *ipv6;
 	} hdr;
 	struct tcphdr *th;
+	unsigned int hlen;
 	struct sk_buff *skb;
-#ifdef CONFIG_IXGBE_VXLAN
-	u8 encap = false;
-#endif /* CONFIG_IXGBE_VXLAN */
 	__be16 vlan_id;
+	int l4_proto;
 
 	/* if ring doesn't have a interrupt vector, cannot perform ATR */
 	if (!q_vector)
@@ -7576,62 +7689,50 @@
 
 	ring->atr_count++;
 
+	/* currently only IPv4/IPv6 with TCP is supported */
+	if ((first->protocol != htons(ETH_P_IP)) &&
+	    (first->protocol != htons(ETH_P_IPV6)))
+		return;
+
 	/* snag network header to get L4 type and address */
 	skb = first->skb;
 	hdr.network = skb_network_header(skb);
-	if (!skb->encapsulation) {
-		th = tcp_hdr(skb);
-	} else {
 #ifdef CONFIG_IXGBE_VXLAN
+	if (skb->encapsulation &&
+	    first->protocol == htons(ETH_P_IP) &&
+	    hdr.ipv4->protocol != IPPROTO_UDP) {
 		struct ixgbe_adapter *adapter = q_vector->adapter;
 
-		if (!adapter->vxlan_port)
-			return;
-		if (first->protocol != htons(ETH_P_IP) ||
-		    hdr.ipv4->version != IPVERSION ||
-		    hdr.ipv4->protocol != IPPROTO_UDP) {
-			return;
-		}
-		if (ntohs(udp_hdr(skb)->dest) != adapter->vxlan_port)
-			return;
-		encap = true;
-		hdr.network = skb_inner_network_header(skb);
-		th = inner_tcp_hdr(skb);
-#else
-		return;
-#endif /* CONFIG_IXGBE_VXLAN */
+		/* verify the port is recognized as VXLAN */
+		if (adapter->vxlan_port &&
+		    udp_hdr(skb)->dest == adapter->vxlan_port)
+			hdr.network = skb_inner_network_header(skb);
 	}
+#endif /* CONFIG_IXGBE_VXLAN */
 
 	/* Currently only IPv4/IPv6 with TCP is supported */
 	switch (hdr.ipv4->version) {
 	case IPVERSION:
-		if (hdr.ipv4->protocol != IPPROTO_TCP)
-			return;
+		/* access ihl as u8 to avoid unaligned access on ia64 */
+		hlen = (hdr.network[0] & 0x0F) << 2;
+		l4_proto = hdr.ipv4->protocol;
 		break;
 	case 6:
-		if (likely((unsigned char *)th - hdr.network ==
-			   sizeof(struct ipv6hdr))) {
-			if (hdr.ipv6->nexthdr != IPPROTO_TCP)
-				return;
-		} else {
-			__be16 frag_off;
-			u8 l4_hdr;
-
-			ipv6_skip_exthdr(skb, hdr.network - skb->data +
-					      sizeof(struct ipv6hdr),
-					 &l4_hdr, &frag_off);
-			if (unlikely(frag_off))
-				return;
-			if (l4_hdr != IPPROTO_TCP)
-				return;
-		}
+		hlen = hdr.network - skb->data;
+		l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
+		hlen -= hdr.network - skb->data;
 		break;
 	default:
 		return;
 	}
 
-	/* skip this packet since it is invalid or the socket is closing */
-	if (!th || th->fin)
+	if (l4_proto != IPPROTO_TCP)
+		return;
+
+	th = (struct tcphdr *)(hdr.network + hlen);
+
+	/* skip this packet since the socket is closing */
+	if (th->fin)
 		return;
 
 	/* sample on all syn packets or once every atr sample count */
@@ -7682,10 +7783,8 @@
 		break;
 	}
 
-#ifdef CONFIG_IXGBE_VXLAN
-	if (encap)
+	if (hdr.network != skb_network_header(skb))
 		input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
-#endif /* CONFIG_IXGBE_VXLAN */
 
 	/* This assumes the Rx queue and Tx queue are bound to the same CPU */
 	ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
@@ -8209,10 +8308,17 @@
 static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
 			       struct tc_cls_u32_offload *cls)
 {
+	u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
+	u32 loc;
 	int err;
 
+	if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
+		return -EINVAL;
+
+	loc = cls->knode.handle & 0xfffff;
+
 	spin_lock(&adapter->fdir_perfect_lock);
-	err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, cls->knode.handle);
+	err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
 	spin_unlock(&adapter->fdir_perfect_lock);
 	return err;
 }
@@ -8221,20 +8327,158 @@
 					    __be16 protocol,
 					    struct tc_cls_u32_offload *cls)
 {
+	u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
+
+	if (uhtid >= IXGBE_MAX_LINK_HANDLE)
+		return -EINVAL;
+
 	/* This ixgbe devices do not support hash tables at the moment
 	 * so abort when given hash tables.
 	 */
 	if (cls->hnode.divisor > 0)
 		return -EINVAL;
 
-	set_bit(TC_U32_USERHTID(cls->hnode.handle), &adapter->tables);
+	set_bit(uhtid - 1, &adapter->tables);
 	return 0;
 }
 
 static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
 					    struct tc_cls_u32_offload *cls)
 {
-	clear_bit(TC_U32_USERHTID(cls->hnode.handle), &adapter->tables);
+	u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
+
+	if (uhtid >= IXGBE_MAX_LINK_HANDLE)
+		return -EINVAL;
+
+	clear_bit(uhtid - 1, &adapter->tables);
+	return 0;
+}
+
+#ifdef CONFIG_NET_CLS_ACT
+static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
+				  u8 *queue, u64 *action)
+{
+	unsigned int num_vfs = adapter->num_vfs, vf;
+	struct net_device *upper;
+	struct list_head *iter;
+
+	/* redirect to a SRIOV VF */
+	for (vf = 0; vf < num_vfs; ++vf) {
+		upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
+		if (upper->ifindex == ifindex) {
+			if (adapter->num_rx_pools > 1)
+				*queue = vf * 2;
+			else
+				*queue = vf * adapter->num_rx_queues_per_pool;
+
+			*action = vf + 1;
+			*action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+			return 0;
+		}
+	}
+
+	/* redirect to a offloaded macvlan netdev */
+	netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
+		if (netif_is_macvlan(upper)) {
+			struct macvlan_dev *dfwd = netdev_priv(upper);
+			struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
+
+			if (vadapter && vadapter->netdev->ifindex == ifindex) {
+				*queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
+				*action = *queue;
+				return 0;
+			}
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int parse_tc_actions(struct ixgbe_adapter *adapter,
+			    struct tcf_exts *exts, u64 *action, u8 *queue)
+{
+	const struct tc_action *a;
+	int err;
+
+	if (tc_no_actions(exts))
+		return -EINVAL;
+
+	tc_for_each_action(a, exts) {
+
+		/* Drop action */
+		if (is_tcf_gact_shot(a)) {
+			*action = IXGBE_FDIR_DROP_QUEUE;
+			*queue = IXGBE_FDIR_DROP_QUEUE;
+			return 0;
+		}
+
+		/* Redirect to a VF or a offloaded macvlan */
+		if (is_tcf_mirred_redirect(a)) {
+			int ifindex = tcf_mirred_ifindex(a);
+
+			err = handle_redirect_action(adapter, ifindex, queue,
+						     action);
+			if (err == 0)
+				return err;
+		}
+	}
+
+	return -EINVAL;
+}
+#else
+static int parse_tc_actions(struct ixgbe_adapter *adapter,
+			    struct tcf_exts *exts, u64 *action, u8 *queue)
+{
+	return -EINVAL;
+}
+#endif /* CONFIG_NET_CLS_ACT */
+
+static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
+				    union ixgbe_atr_input *mask,
+				    struct tc_cls_u32_offload *cls,
+				    struct ixgbe_mat_field *field_ptr,
+				    struct ixgbe_nexthdr *nexthdr)
+{
+	int i, j, off;
+	__be32 val, m;
+	bool found_entry = false, found_jump_field = false;
+
+	for (i = 0; i < cls->knode.sel->nkeys; i++) {
+		off = cls->knode.sel->keys[i].off;
+		val = cls->knode.sel->keys[i].val;
+		m = cls->knode.sel->keys[i].mask;
+
+		for (j = 0; field_ptr[j].val; j++) {
+			if (field_ptr[j].off == off) {
+				field_ptr[j].val(input, mask, val, m);
+				input->filter.formatted.flow_type |=
+					field_ptr[j].type;
+				found_entry = true;
+				break;
+			}
+		}
+		if (nexthdr) {
+			if (nexthdr->off == cls->knode.sel->keys[i].off &&
+			    nexthdr->val == cls->knode.sel->keys[i].val &&
+			    nexthdr->mask == cls->knode.sel->keys[i].mask)
+				found_jump_field = true;
+			else
+				continue;
+		}
+	}
+
+	if (nexthdr && !found_jump_field)
+		return -EINVAL;
+
+	if (!found_entry)
+		return 0;
+
+	mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
+				    IXGBE_ATR_L4TYPE_MASK;
+
+	if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
+		mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
+
 	return 0;
 }
 
@@ -8245,60 +8489,29 @@
 	u32 loc = cls->knode.handle & 0xfffff;
 	struct ixgbe_hw *hw = &adapter->hw;
 	struct ixgbe_mat_field *field_ptr;
-	struct ixgbe_fdir_filter *input;
-	union ixgbe_atr_input mask;
-#ifdef CONFIG_NET_CLS_ACT
-	const struct tc_action *a;
-#endif
-	int i, err = 0;
+	struct ixgbe_fdir_filter *input = NULL;
+	union ixgbe_atr_input *mask = NULL;
+	struct ixgbe_jump_table *jump = NULL;
+	int i, err = -EINVAL;
 	u8 queue;
-	u32 handle;
+	u32 uhtid, link_uhtid;
 
-	memset(&mask, 0, sizeof(union ixgbe_atr_input));
-	handle = cls->knode.handle;
+	uhtid = TC_U32_USERHTID(cls->knode.handle);
+	link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
 
-	/* At the moment cls_u32 jumps to transport layer and skips past
+	/* At the moment cls_u32 jumps to network layer and skips past
 	 * L2 headers. The canonical method to match L2 frames is to use
 	 * negative values. However this is error prone at best but really
 	 * just broken because there is no way to "know" what sort of hdr
-	 * is in front of the transport layer. Fix cls_u32 to support L2
+	 * is in front of the network layer. Fix cls_u32 to support L2
 	 * headers when needed.
 	 */
 	if (protocol != htons(ETH_P_IP))
-		return -EINVAL;
-
-	if (cls->knode.link_handle ||
-	    cls->knode.link_handle >= IXGBE_MAX_LINK_HANDLE) {
-		struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
-		u32 uhtid = TC_U32_USERHTID(cls->knode.link_handle);
-
-		if (!test_bit(uhtid, &adapter->tables))
-			return -EINVAL;
-
-		for (i = 0; nexthdr[i].jump; i++) {
-			if (nexthdr->o != cls->knode.sel->offoff ||
-			    nexthdr->s != cls->knode.sel->offshift ||
-			    nexthdr->m != cls->knode.sel->offmask ||
-			    /* do not support multiple key jumps its just mad */
-			    cls->knode.sel->nkeys > 1)
-				return -EINVAL;
-
-			if (nexthdr->off != cls->knode.sel->keys[0].off ||
-			    nexthdr->val != cls->knode.sel->keys[0].val ||
-			    nexthdr->mask != cls->knode.sel->keys[0].mask)
-				return -EINVAL;
-
-			if (uhtid >= IXGBE_MAX_LINK_HANDLE)
-				return -EINVAL;
-
-			adapter->jump_tables[uhtid] = nexthdr->jump;
-		}
-		return 0;
-	}
+		return err;
 
 	if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) {
 		e_err(drv, "Location out of range\n");
-		return -EINVAL;
+		return err;
 	}
 
 	/* cls u32 is a graph starting at root node 0x800. The driver tracks
@@ -8308,93 +8521,128 @@
 	 * To add support for new nodes update ixgbe_model.h parse structures
 	 * this function _should_ be generic try not to hardcode values here.
 	 */
-	if (TC_U32_USERHTID(handle) == 0x800) {
-		field_ptr = adapter->jump_tables[0];
+	if (uhtid == 0x800) {
+		field_ptr = (adapter->jump_tables[0])->mat;
 	} else {
-		if (TC_U32_USERHTID(handle) >= ARRAY_SIZE(adapter->jump_tables))
-			return -EINVAL;
-
-		field_ptr = adapter->jump_tables[TC_U32_USERHTID(handle)];
+		if (uhtid >= IXGBE_MAX_LINK_HANDLE)
+			return err;
+		if (!adapter->jump_tables[uhtid])
+			return err;
+		field_ptr = (adapter->jump_tables[uhtid])->mat;
 	}
 
 	if (!field_ptr)
-		return -EINVAL;
+		return err;
+
+	/* At this point we know the field_ptr is valid and need to either
+	 * build cls_u32 link or attach filter. Because adding a link to
+	 * a handle that does not exist is invalid and the same for adding
+	 * rules to handles that don't exist.
+	 */
+
+	if (link_uhtid) {
+		struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
+
+		if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
+			return err;
+
+		if (!test_bit(link_uhtid - 1, &adapter->tables))
+			return err;
+
+		for (i = 0; nexthdr[i].jump; i++) {
+			if (nexthdr[i].o != cls->knode.sel->offoff ||
+			    nexthdr[i].s != cls->knode.sel->offshift ||
+			    nexthdr[i].m != cls->knode.sel->offmask)
+				return err;
+
+			jump = kzalloc(sizeof(*jump), GFP_KERNEL);
+			if (!jump)
+				return -ENOMEM;
+			input = kzalloc(sizeof(*input), GFP_KERNEL);
+			if (!input) {
+				err = -ENOMEM;
+				goto free_jump;
+			}
+			mask = kzalloc(sizeof(*mask), GFP_KERNEL);
+			if (!mask) {
+				err = -ENOMEM;
+				goto free_input;
+			}
+			jump->input = input;
+			jump->mask = mask;
+			err = ixgbe_clsu32_build_input(input, mask, cls,
+						       field_ptr, &nexthdr[i]);
+			if (!err) {
+				jump->mat = nexthdr[i].jump;
+				adapter->jump_tables[link_uhtid] = jump;
+				break;
+			}
+		}
+		return 0;
+	}
 
 	input = kzalloc(sizeof(*input), GFP_KERNEL);
 	if (!input)
 		return -ENOMEM;
-
-	for (i = 0; i < cls->knode.sel->nkeys; i++) {
-		int off = cls->knode.sel->keys[i].off;
-		__be32 val = cls->knode.sel->keys[i].val;
-		__be32 m = cls->knode.sel->keys[i].mask;
-		bool found_entry = false;
-		int j;
-
-		for (j = 0; field_ptr[j].val; j++) {
-			if (field_ptr[j].off == off &&
-			    field_ptr[j].mask == m) {
-				field_ptr[j].val(input, &mask, val, m);
-				input->filter.formatted.flow_type |=
-					field_ptr[j].type;
-				found_entry = true;
-				break;
-			}
-		}
-
-		if (!found_entry)
-			goto err_out;
+	mask = kzalloc(sizeof(*mask), GFP_KERNEL);
+	if (!mask) {
+		err = -ENOMEM;
+		goto free_input;
 	}
 
-	mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
-				   IXGBE_ATR_L4TYPE_MASK;
-
-	if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
-		mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
-
-#ifdef CONFIG_NET_CLS_ACT
-	if (list_empty(&cls->knode.exts->actions))
+	if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) {
+		if ((adapter->jump_tables[uhtid])->input)
+			memcpy(input, (adapter->jump_tables[uhtid])->input,
+			       sizeof(*input));
+		if ((adapter->jump_tables[uhtid])->mask)
+			memcpy(mask, (adapter->jump_tables[uhtid])->mask,
+			       sizeof(*mask));
+	}
+	err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL);
+	if (err)
 		goto err_out;
 
-	list_for_each_entry(a, &cls->knode.exts->actions, list) {
-		if (!is_tcf_gact_shot(a))
-			goto err_out;
-	}
-#endif
+	err = parse_tc_actions(adapter, cls->knode.exts, &input->action,
+			       &queue);
+	if (err < 0)
+		goto err_out;
 
-	input->action = IXGBE_FDIR_DROP_QUEUE;
-	queue = IXGBE_FDIR_DROP_QUEUE;
 	input->sw_idx = loc;
 
 	spin_lock(&adapter->fdir_perfect_lock);
 
 	if (hlist_empty(&adapter->fdir_filter_list)) {
-		memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
-		err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
+		memcpy(&adapter->fdir_mask, mask, sizeof(*mask));
+		err = ixgbe_fdir_set_input_mask_82599(hw, mask);
 		if (err)
 			goto err_out_w_lock;
-	} else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
+	} else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) {
 		err = -EINVAL;
 		goto err_out_w_lock;
 	}
 
-	ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
+	ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
 	err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
 						    input->sw_idx, queue);
 	if (!err)
 		ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
 	spin_unlock(&adapter->fdir_perfect_lock);
 
+	kfree(mask);
 	return err;
 err_out_w_lock:
 	spin_unlock(&adapter->fdir_perfect_lock);
 err_out:
+	kfree(mask);
+free_input:
 	kfree(input);
-	return -EINVAL;
+free_jump:
+	kfree(jump);
+	return err;
 }
 
-int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
-		     struct tc_to_netdev *tc)
+static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
+			    struct tc_to_netdev *tc)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(dev);
 
@@ -8517,11 +8765,6 @@
 			adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
 	}
 
-	if (features & NETIF_F_HW_VLAN_CTAG_RX)
-		ixgbe_vlan_strip_enable(adapter);
-	else
-		ixgbe_vlan_strip_disable(adapter);
-
 	if (changed & NETIF_F_RXALL)
 		need_reset = true;
 
@@ -8538,6 +8781,9 @@
 
 	if (need_reset)
 		ixgbe_do_reset(netdev);
+	else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
+			    NETIF_F_HW_VLAN_CTAG_FILTER))
+		ixgbe_set_rx_mode(netdev);
 
 	return 0;
 }
@@ -8554,7 +8800,6 @@
 {
 	struct ixgbe_adapter *adapter = netdev_priv(dev);
 	struct ixgbe_hw *hw = &adapter->hw;
-	u16 new_port = ntohs(port);
 
 	if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
 		return;
@@ -8562,18 +8807,18 @@
 	if (sa_family == AF_INET6)
 		return;
 
-	if (adapter->vxlan_port == new_port)
+	if (adapter->vxlan_port == port)
 		return;
 
 	if (adapter->vxlan_port) {
 		netdev_info(dev,
 			    "Hit Max num of VXLAN ports, not adding port %d\n",
-			    new_port);
+			    ntohs(port));
 		return;
 	}
 
-	adapter->vxlan_port = new_port;
-	IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, new_port);
+	adapter->vxlan_port = port;
+	IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, ntohs(port));
 }
 
 /**
@@ -8586,7 +8831,6 @@
 				 __be16 port)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(dev);
-	u16 new_port = ntohs(port);
 
 	if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
 		return;
@@ -8594,9 +8838,9 @@
 	if (sa_family == AF_INET6)
 		return;
 
-	if (adapter->vxlan_port != new_port) {
+	if (adapter->vxlan_port != port) {
 		netdev_info(dev, "Port %d was not found, not deleting\n",
-			    new_port);
+			    ntohs(port));
 		return;
 	}
 
@@ -8837,17 +9081,36 @@
 	kfree(fwd_adapter);
 }
 
-#define IXGBE_MAX_TUNNEL_HDR_LEN 80
+#define IXGBE_MAX_MAC_HDR_LEN		127
+#define IXGBE_MAX_NETWORK_HDR_LEN	511
+
 static netdev_features_t
 ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
 		     netdev_features_t features)
 {
-	if (!skb->encapsulation)
-		return features;
+	unsigned int network_hdr_len, mac_hdr_len;
 
-	if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) >
-		     IXGBE_MAX_TUNNEL_HDR_LEN))
-		return features & ~NETIF_F_CSUM_MASK;
+	/* Make certain the headers can be described by a context descriptor */
+	mac_hdr_len = skb_network_header(skb) - skb->data;
+	if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
+		return features & ~(NETIF_F_HW_CSUM |
+				    NETIF_F_SCTP_CRC |
+				    NETIF_F_HW_VLAN_CTAG_TX |
+				    NETIF_F_TSO |
+				    NETIF_F_TSO6);
+
+	network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+	if (unlikely(network_hdr_len >  IXGBE_MAX_NETWORK_HDR_LEN))
+		return features & ~(NETIF_F_HW_CSUM |
+				    NETIF_F_SCTP_CRC |
+				    NETIF_F_TSO |
+				    NETIF_F_TSO6);
+
+	/* We can only support IPV4 TSO in tunnels if we can mangle the
+	 * inner IP ID field, so strip TSO if MANGLEID is not supported.
+	 */
+	if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+		features &= ~NETIF_F_TSO;
 
 	return features;
 }
@@ -8862,6 +9125,7 @@
 	.ndo_set_mac_address	= ixgbe_set_mac,
 	.ndo_change_mtu		= ixgbe_change_mtu,
 	.ndo_tx_timeout		= ixgbe_tx_timeout,
+	.ndo_set_tx_maxrate	= ixgbe_tx_maxrate,
 	.ndo_vlan_rx_add_vid	= ixgbe_vlan_rx_add_vid,
 	.ndo_vlan_rx_kill_vid	= ixgbe_vlan_rx_kill_vid,
 	.ndo_do_ioctl		= ixgbe_ioctl,
@@ -8947,7 +9211,7 @@
 
 /**
  * ixgbe_wol_supported - Check whether device supports WoL
- * @hw: hw specific details
+ * @adapter: the adapter private structure
  * @device_id: the device ID
  * @subdev_id: the subsystem device ID
  *
@@ -8955,19 +9219,33 @@
  * which devices have WoL support
  *
  **/
-int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
-			u16 subdevice_id)
+bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
+			 u16 subdevice_id)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 	u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
-	int is_wol_supported = 0;
 
+	/* WOL not supported on 82598 */
+	if (hw->mac.type == ixgbe_mac_82598EB)
+		return false;
+
+	/* check eeprom to see if WOL is enabled for X540 and newer */
+	if (hw->mac.type >= ixgbe_mac_X540) {
+		if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
+		    ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
+		     (hw->bus.func == 0)))
+			return true;
+	}
+
+	/* WOL is determined based on device IDs for 82599 MACs */
 	switch (device_id) {
 	case IXGBE_DEV_ID_82599_SFP:
 		/* Only these subdevices could supports WOL */
 		switch (subdevice_id) {
-		case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
 		case IXGBE_SUBDEV_ID_82599_560FLR:
+		case IXGBE_SUBDEV_ID_82599_LOM_SNAP6:
+		case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
+		case IXGBE_SUBDEV_ID_82599_SFP_2OCP:
 			/* only support first port */
 			if (hw->bus.func != 0)
 				break;
@@ -8975,66 +9253,31 @@
 		case IXGBE_SUBDEV_ID_82599_SFP:
 		case IXGBE_SUBDEV_ID_82599_RNDC:
 		case IXGBE_SUBDEV_ID_82599_ECNA_DP:
-		case IXGBE_SUBDEV_ID_82599_LOM_SFP:
-			is_wol_supported = 1;
-			break;
+		case IXGBE_SUBDEV_ID_82599_SFP_1OCP:
+		case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1:
+		case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2:
+			return true;
 		}
 		break;
 	case IXGBE_DEV_ID_82599EN_SFP:
-		/* Only this subdevice supports WOL */
+		/* Only these subdevices support WOL */
 		switch (subdevice_id) {
 		case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
-			is_wol_supported = 1;
-			break;
+			return true;
 		}
 		break;
 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
 		/* All except this subdevice support WOL */
 		if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
-			is_wol_supported = 1;
+			return true;
 		break;
 	case IXGBE_DEV_ID_82599_KX4:
-		is_wol_supported = 1;
-		break;
-	case IXGBE_DEV_ID_X540T:
-	case IXGBE_DEV_ID_X540T1:
-	case IXGBE_DEV_ID_X550T:
-	case IXGBE_DEV_ID_X550EM_X_KX4:
-	case IXGBE_DEV_ID_X550EM_X_KR:
-	case IXGBE_DEV_ID_X550EM_X_10G_T:
-		/* check eeprom to see if enabled wol */
-		if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
-		    ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
-		     (hw->bus.func == 0))) {
-			is_wol_supported = 1;
-		}
+		return  true;
+	default:
 		break;
 	}
 
-	return is_wol_supported;
-}
-
-/**
- * ixgbe_get_platform_mac_addr - Look up MAC address in Open Firmware / IDPROM
- * @adapter: Pointer to adapter struct
- */
-static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter)
-{
-#ifdef CONFIG_OF
-	struct device_node *dp = pci_device_to_OF_node(adapter->pdev);
-	struct ixgbe_hw *hw = &adapter->hw;
-	const unsigned char *addr;
-
-	addr = of_get_mac_address(dp);
-	if (addr) {
-		ether_addr_copy(hw->mac.perm_addr, addr);
-		return;
-	}
-#endif /* CONFIG_OF */
-
-#ifdef CONFIG_SPARC
-	ether_addr_copy(hw->mac.perm_addr, idprom->id_ethaddr);
-#endif /* CONFIG_SPARC */
+	return false;
 }
 
 /**
@@ -9140,23 +9383,23 @@
 	strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
 
 	/* Setup hw api */
-	memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
+	hw->mac.ops   = *ii->mac_ops;
 	hw->mac.type  = ii->mac;
 	hw->mvals     = ii->mvals;
 
 	/* EEPROM */
-	memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
+	hw->eeprom.ops = *ii->eeprom_ops;
 	eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
 	if (ixgbe_removed(hw->hw_addr)) {
 		err = -EIO;
 		goto err_ioremap;
 	}
 	/* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
-	if (!(eec & (1 << 8)))
+	if (!(eec & BIT(8)))
 		hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
 
 	/* PHY */
-	memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
+	hw->phy.ops = *ii->phy_ops;
 	hw->phy.sfp_type = ixgbe_sfp_type_unknown;
 	/* ixgbe_identify_phy_generic will set prtad and mmds properly */
 	hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
@@ -9173,12 +9416,17 @@
 	if (err)
 		goto err_sw_init;
 
+	/* Make sure the SWFW semaphore is in a valid state */
+	if (hw->mac.ops.init_swfw_sync)
+		hw->mac.ops.init_swfw_sync(hw);
+
 	/* Make it possible the adapter to be woken up via WOL */
 	switch (adapter->hw.mac.type) {
 	case ixgbe_mac_82599EB:
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
 		break;
 	default:
@@ -9219,65 +9467,62 @@
 		goto skip_sriov;
 	/* Mailbox */
 	ixgbe_init_mbx_params_pf(hw);
-	memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
+	hw->mbx.ops = ii->mbx_ops;
 	pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
 	ixgbe_enable_sriov(adapter);
 skip_sriov:
 
 #endif
 	netdev->features = NETIF_F_SG |
-			   NETIF_F_IP_CSUM |
-			   NETIF_F_IPV6_CSUM |
-			   NETIF_F_HW_VLAN_CTAG_TX |
-			   NETIF_F_HW_VLAN_CTAG_RX |
 			   NETIF_F_TSO |
 			   NETIF_F_TSO6 |
 			   NETIF_F_RXHASH |
-			   NETIF_F_RXCSUM;
+			   NETIF_F_RXCSUM |
+			   NETIF_F_HW_CSUM;
 
-	netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD;
+#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+				    NETIF_F_GSO_GRE_CSUM | \
+				    NETIF_F_GSO_IPIP | \
+				    NETIF_F_GSO_SIT | \
+				    NETIF_F_GSO_UDP_TUNNEL | \
+				    NETIF_F_GSO_UDP_TUNNEL_CSUM)
 
-	switch (adapter->hw.mac.type) {
-	case ixgbe_mac_82599EB:
-	case ixgbe_mac_X540:
-	case ixgbe_mac_X550:
-	case ixgbe_mac_X550EM_x:
+	netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES;
+	netdev->features |= NETIF_F_GSO_PARTIAL |
+			    IXGBE_GSO_PARTIAL_FEATURES;
+
+	if (hw->mac.type >= ixgbe_mac_82599EB)
 		netdev->features |= NETIF_F_SCTP_CRC;
-		netdev->hw_features |= NETIF_F_SCTP_CRC |
-				       NETIF_F_NTUPLE |
+
+	/* copy netdev features into list of user selectable features */
+	netdev->hw_features |= netdev->features |
+			       NETIF_F_HW_VLAN_CTAG_RX |
+			       NETIF_F_HW_VLAN_CTAG_TX |
+			       NETIF_F_RXALL |
+			       NETIF_F_HW_L2FW_DOFFLOAD;
+
+	if (hw->mac.type >= ixgbe_mac_82599EB)
+		netdev->hw_features |= NETIF_F_NTUPLE |
 				       NETIF_F_HW_TC;
-		break;
-	default:
-		break;
-	}
 
-	netdev->hw_features |= NETIF_F_RXALL;
-	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+	if (pci_using_dac)
+		netdev->features |= NETIF_F_HIGHDMA;
 
-	netdev->vlan_features |= NETIF_F_TSO;
-	netdev->vlan_features |= NETIF_F_TSO6;
-	netdev->vlan_features |= NETIF_F_IP_CSUM;
-	netdev->vlan_features |= NETIF_F_IPV6_CSUM;
-	netdev->vlan_features |= NETIF_F_SG;
+	netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
+	netdev->hw_enc_features |= netdev->vlan_features;
+	netdev->mpls_features |= NETIF_F_HW_CSUM;
 
-	netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+	/* set this bit last since it cannot be part of vlan_features */
+	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+			    NETIF_F_HW_VLAN_CTAG_RX |
+			    NETIF_F_HW_VLAN_CTAG_TX;
 
 	netdev->priv_flags |= IFF_UNICAST_FLT;
 	netdev->priv_flags |= IFF_SUPP_NOFCS;
 
-#ifdef CONFIG_IXGBE_VXLAN
-	switch (adapter->hw.mac.type) {
-	case ixgbe_mac_X550:
-	case ixgbe_mac_X550EM_x:
-		netdev->hw_enc_features |= NETIF_F_RXCSUM;
-		break;
-	default:
-		break;
-	}
-#endif /* CONFIG_IXGBE_VXLAN */
-
 #ifdef CONFIG_IXGBE_DCB
-	netdev->dcbnl_ops = &dcbnl_ops;
+	if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
+		netdev->dcbnl_ops = &dcbnl_ops;
 #endif
 
 #ifdef IXGBE_FCOE
@@ -9302,10 +9547,6 @@
 					 NETIF_F_FCOE_MTU;
 	}
 #endif /* IXGBE_FCOE */
-	if (pci_using_dac) {
-		netdev->features |= NETIF_F_HIGHDMA;
-		netdev->vlan_features |= NETIF_F_HIGHDMA;
-	}
 
 	if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
 		netdev->hw_features |= NETIF_F_LRO;
@@ -9319,7 +9560,8 @@
 		goto err_sw_init;
 	}
 
-	ixgbe_get_platform_mac_addr(adapter);
+	eth_platform_get_mac_address(&adapter->pdev->dev,
+				     adapter->hw.mac.perm_addr);
 
 	memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
 
@@ -9329,6 +9571,8 @@
 		goto err_sw_init;
 	}
 
+	/* Set hw->mac.addr to permanent MAC address */
+	ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
 	ixgbe_mac_set_default_filter(adapter);
 
 	setup_timer(&adapter->service_timer, &ixgbe_service_timer,
@@ -9468,6 +9712,7 @@
 	ixgbe_disable_sriov(adapter);
 	adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
 	iounmap(adapter->io_addr);
+	kfree(adapter->jump_tables[0]);
 	kfree(adapter->mac_table);
 err_ioremap:
 	disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
@@ -9496,6 +9741,7 @@
 	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
 	struct net_device *netdev;
 	bool disable_dev;
+	int i;
 
 	/* if !adapter then we already cleaned up in probe */
 	if (!adapter)
@@ -9545,6 +9791,14 @@
 
 	e_dev_info("complete\n");
 
+	for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) {
+		if (adapter->jump_tables[i]) {
+			kfree(adapter->jump_tables[i]->input);
+			kfree(adapter->jump_tables[i]->mask);
+		}
+		kfree(adapter->jump_tables[i]);
+	}
+
 	kfree(adapter->mac_table);
 	disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
 	free_netdev(netdev);
@@ -9625,6 +9879,9 @@
 		case ixgbe_mac_X550EM_x:
 			device_id = IXGBE_DEV_ID_X550EM_X_VF;
 			break;
+		case ixgbe_mac_x550em_a:
+			device_id = IXGBE_DEV_ID_X550EM_A_VF;
+			break;
 		default:
 			device_id = 0;
 			break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index 9993a47..a0cb843 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2014 Intel Corporation.
+  Copyright(c) 1999 - 2016 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -48,10 +48,10 @@
 	if (size > mbx->size)
 		size = mbx->size;
 
-	if (!mbx->ops.read)
+	if (!mbx->ops)
 		return IXGBE_ERR_MBX;
 
-	return mbx->ops.read(hw, msg, size, mbx_id);
+	return mbx->ops->read(hw, msg, size, mbx_id);
 }
 
 /**
@@ -70,10 +70,10 @@
 	if (size > mbx->size)
 		return IXGBE_ERR_MBX;
 
-	if (!mbx->ops.write)
+	if (!mbx->ops)
 		return IXGBE_ERR_MBX;
 
-	return mbx->ops.write(hw, msg, size, mbx_id);
+	return mbx->ops->write(hw, msg, size, mbx_id);
 }
 
 /**
@@ -87,10 +87,10 @@
 {
 	struct ixgbe_mbx_info *mbx = &hw->mbx;
 
-	if (!mbx->ops.check_for_msg)
+	if (!mbx->ops)
 		return IXGBE_ERR_MBX;
 
-	return mbx->ops.check_for_msg(hw, mbx_id);
+	return mbx->ops->check_for_msg(hw, mbx_id);
 }
 
 /**
@@ -104,10 +104,10 @@
 {
 	struct ixgbe_mbx_info *mbx = &hw->mbx;
 
-	if (!mbx->ops.check_for_ack)
+	if (!mbx->ops)
 		return IXGBE_ERR_MBX;
 
-	return mbx->ops.check_for_ack(hw, mbx_id);
+	return mbx->ops->check_for_ack(hw, mbx_id);
 }
 
 /**
@@ -121,10 +121,10 @@
 {
 	struct ixgbe_mbx_info *mbx = &hw->mbx;
 
-	if (!mbx->ops.check_for_rst)
+	if (!mbx->ops)
 		return IXGBE_ERR_MBX;
 
-	return mbx->ops.check_for_rst(hw, mbx_id);
+	return mbx->ops->check_for_rst(hw, mbx_id);
 }
 
 /**
@@ -139,10 +139,10 @@
 	struct ixgbe_mbx_info *mbx = &hw->mbx;
 	int countdown = mbx->timeout;
 
-	if (!countdown || !mbx->ops.check_for_msg)
+	if (!countdown || !mbx->ops)
 		return IXGBE_ERR_MBX;
 
-	while (mbx->ops.check_for_msg(hw, mbx_id)) {
+	while (mbx->ops->check_for_msg(hw, mbx_id)) {
 		countdown--;
 		if (!countdown)
 			return IXGBE_ERR_MBX;
@@ -164,10 +164,10 @@
 	struct ixgbe_mbx_info *mbx = &hw->mbx;
 	int countdown = mbx->timeout;
 
-	if (!countdown || !mbx->ops.check_for_ack)
+	if (!countdown || !mbx->ops)
 		return IXGBE_ERR_MBX;
 
-	while (mbx->ops.check_for_ack(hw, mbx_id)) {
+	while (mbx->ops->check_for_ack(hw, mbx_id)) {
 		countdown--;
 		if (!countdown)
 			return IXGBE_ERR_MBX;
@@ -193,7 +193,7 @@
 	struct ixgbe_mbx_info *mbx = &hw->mbx;
 	s32 ret_val;
 
-	if (!mbx->ops.read)
+	if (!mbx->ops)
 		return IXGBE_ERR_MBX;
 
 	ret_val = ixgbe_poll_for_msg(hw, mbx_id);
@@ -201,7 +201,7 @@
 		return ret_val;
 
 	/* if ack received read message */
-	return mbx->ops.read(hw, msg, size, mbx_id);
+	return mbx->ops->read(hw, msg, size, mbx_id);
 }
 
 /**
@@ -221,11 +221,11 @@
 	s32 ret_val;
 
 	/* exit if either we can't write or there isn't a defined timeout */
-	if (!mbx->ops.write || !mbx->timeout)
+	if (!mbx->ops || !mbx->timeout)
 		return IXGBE_ERR_MBX;
 
 	/* send msg */
-	ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+	ret_val = mbx->ops->write(hw, msg, size, mbx_id);
 	if (ret_val)
 		return ret_val;
 
@@ -307,14 +307,15 @@
 	case ixgbe_mac_X540:
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
 		break;
 	default:
 		break;
 	}
 
-	if (vflre & (1 << vf_shift)) {
-		IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
+	if (vflre & BIT(vf_shift)) {
+		IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), BIT(vf_shift));
 		hw->mbx.stats.rsts++;
 		return 0;
 	}
@@ -430,6 +431,7 @@
 	if (hw->mac.type != ixgbe_mac_82599EB &&
 	    hw->mac.type != ixgbe_mac_X550 &&
 	    hw->mac.type != ixgbe_mac_X550EM_x &&
+	    hw->mac.type != ixgbe_mac_x550em_a &&
 	    hw->mac.type != ixgbe_mac_X540)
 		return;
 
@@ -446,7 +448,7 @@
 }
 #endif /* CONFIG_PCI_IOV */
 
-struct ixgbe_mbx_operations mbx_ops_generic = {
+const struct ixgbe_mbx_operations mbx_ops_generic = {
 	.read                   = ixgbe_read_mbx_pf,
 	.write                  = ixgbe_write_mbx_pf,
 	.read_posted            = ixgbe_read_posted_mbx,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index 8daa95f..01c2667 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
+  Copyright(c) 1999 - 2016 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -123,6 +123,6 @@
 void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
 #endif /* CONFIG_PCI_IOV */
 
-extern struct ixgbe_mbx_operations mbx_ops_generic;
+extern const struct ixgbe_mbx_operations mbx_ops_generic;
 
 #endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
index ce48872..a8bed3d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
@@ -32,13 +32,18 @@
 
 struct ixgbe_mat_field {
 	unsigned int off;
-	unsigned int mask;
 	int (*val)(struct ixgbe_fdir_filter *input,
 		   union ixgbe_atr_input *mask,
 		   u32 val, u32 m);
 	unsigned int type;
 };
 
+struct ixgbe_jump_table {
+	struct ixgbe_mat_field *mat;
+	struct ixgbe_fdir_filter *input;
+	union ixgbe_atr_input *mask;
+};
+
 static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input,
 				     union ixgbe_atr_input *mask,
 				     u32 val, u32 m)
@@ -58,36 +63,34 @@
 }
 
 static struct ixgbe_mat_field ixgbe_ipv4_fields[] = {
-	{ .off = 12, .mask = -1, .val = ixgbe_mat_prgm_sip,
+	{ .off = 12, .val = ixgbe_mat_prgm_sip,
 	  .type = IXGBE_ATR_FLOW_TYPE_IPV4},
-	{ .off = 16, .mask = -1, .val = ixgbe_mat_prgm_dip,
+	{ .off = 16, .val = ixgbe_mat_prgm_dip,
 	  .type = IXGBE_ATR_FLOW_TYPE_IPV4},
 	{ .val = NULL } /* terminal node */
 };
 
-static inline int ixgbe_mat_prgm_sport(struct ixgbe_fdir_filter *input,
+static inline int ixgbe_mat_prgm_ports(struct ixgbe_fdir_filter *input,
 				       union ixgbe_atr_input *mask,
 				       u32 val, u32 m)
 {
 	input->filter.formatted.src_port = val & 0xffff;
 	mask->formatted.src_port = m & 0xffff;
-	return 0;
-};
+	input->filter.formatted.dst_port = val >> 16;
+	mask->formatted.dst_port = m >> 16;
 
-static inline int ixgbe_mat_prgm_dport(struct ixgbe_fdir_filter *input,
-				       union ixgbe_atr_input *mask,
-				       u32 val, u32 m)
-{
-	input->filter.formatted.dst_port = val & 0xffff;
-	mask->formatted.dst_port = m & 0xffff;
 	return 0;
 };
 
 static struct ixgbe_mat_field ixgbe_tcp_fields[] = {
-	{.off = 0, .mask = 0xffff, .val = ixgbe_mat_prgm_sport,
+	{.off = 0, .val = ixgbe_mat_prgm_ports,
 	 .type = IXGBE_ATR_FLOW_TYPE_TCPV4},
-	{.off = 2, .mask = 0xffff, .val = ixgbe_mat_prgm_dport,
-	 .type = IXGBE_ATR_FLOW_TYPE_TCPV4},
+	{ .val = NULL } /* terminal node */
+};
+
+static struct ixgbe_mat_field ixgbe_udp_fields[] = {
+	{.off = 0, .val = ixgbe_mat_prgm_ports,
+	 .type = IXGBE_ATR_FLOW_TYPE_UDPV4},
 	{ .val = NULL } /* terminal node */
 };
 
@@ -107,6 +110,8 @@
 static struct ixgbe_nexthdr ixgbe_ipv4_jumps[] = {
 	{ .o = 0, .s = 6, .m = 0xf,
 	  .off = 8, .val = 0x600, .mask = 0xff00, .jump = ixgbe_tcp_fields},
+	{ .o = 0, .s = 6, .m = 0xf,
+	  .off = 8, .val = 0x1100, .mask = 0xff00, .jump = ixgbe_udp_fields},
 	{ .jump = NULL } /* terminal node */
 };
 #endif /* _IXGBE_MODEL_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 5abd66c..cc735ec 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2014 Intel Corporation.
+  Copyright(c) 1999 - 2016 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -81,7 +81,11 @@
 #define IXGBE_I2C_EEPROM_STATUS_FAIL		0x2
 #define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS	0x3
 #define IXGBE_CS4227				0xBE    /* CS4227 address */
+#define IXGBE_CS4227_GLOBAL_ID_LSB		0
+#define IXGBE_CS4227_GLOBAL_ID_MSB		1
 #define IXGBE_CS4227_SCRATCH			2
+#define IXGBE_CS4223_PHY_ID			0x7003	/* Quad port */
+#define IXGBE_CS4227_PHY_ID			0x3003	/* Dual port */
 #define IXGBE_CS4227_RESET_PENDING		0x1357
 #define IXGBE_CS4227_RESET_COMPLETE		0x5AA5
 #define IXGBE_CS4227_RETRIES			15
@@ -103,7 +107,7 @@
 #define IXGBE_PE				0xE0	/* Port expander addr */
 #define IXGBE_PE_OUTPUT				1	/* Output reg offset */
 #define IXGBE_PE_CONFIG				3	/* Config reg offset */
-#define IXGBE_PE_BIT1				(1 << 1)
+#define IXGBE_PE_BIT1				BIT(1)
 
 /* Flow control defines */
 #define IXGBE_TAF_SYM_PAUSE                  0x400
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index ef1504d..e5431bf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2015 Intel Corporation.
+  Copyright(c) 1999 - 2016 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -333,6 +333,7 @@
 	 */
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		/* Upper 32 bits represent billions of cycles, lower 32 bits
 		 * represent cycles. However, we use timespec64_to_ns for the
 		 * correct math even though the units haven't been corrected
@@ -395,7 +396,7 @@
 		if (incval > 0x00FFFFFFULL)
 			e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n");
 		IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
-				(1 << IXGBE_INCPER_SHIFT_82599) |
+				BIT(IXGBE_INCPER_SHIFT_82599) |
 				((u32)incval & 0x00FFFFFFUL));
 		break;
 	default:
@@ -921,6 +922,7 @@
 	switch (hw->mac.type) {
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		/* enable timestamping all packets only if at least some
 		 * packets were requested. Otherwise, play nice and disable
 		 * timestamping
@@ -1083,6 +1085,7 @@
 			cc.shift = 2;
 		}
 		/* fallthrough */
+	case ixgbe_mac_x550em_a:
 	case ixgbe_mac_X550:
 		cc.read = ixgbe_ptp_read_X550;
 
@@ -1111,7 +1114,7 @@
 		incval >>= IXGBE_INCVAL_SHIFT_82599;
 		cc.shift -= IXGBE_INCVAL_SHIFT_82599;
 		IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
-				(1 << IXGBE_INCPER_SHIFT_82599) | incval);
+				BIT(IXGBE_INCPER_SHIFT_82599) | incval);
 		break;
 	default:
 		/* other devices aren't supported */
@@ -1223,6 +1226,7 @@
 		break;
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
+	case ixgbe_mac_x550em_a:
 		snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
 		adapter->ptp_caps.owner = THIS_MODULE;
 		adapter->ptp_caps.max_adj = 30000000;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 8025a3f..c5caacd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -406,7 +406,7 @@
 		vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
 		vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
 		mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
-		mta_reg |= (1 << vector_bit);
+		mta_reg |= BIT(vector_bit);
 		IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
 	}
 	vmolr |= IXGBE_VMOLR_ROMPE;
@@ -433,7 +433,7 @@
 			vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
 			vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
 			mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
-			mta_reg |= (1 << vector_bit);
+			mta_reg |= BIT(vector_bit);
 			IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
 		}
 
@@ -536,9 +536,9 @@
 		/* enable or disable receive depending on error */
 		vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
 		if (err)
-			vfre &= ~(1 << vf_shift);
+			vfre &= ~BIT(vf_shift);
 		else
-			vfre |= 1 << vf_shift;
+			vfre |= BIT(vf_shift);
 		IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre);
 
 		if (err) {
@@ -589,47 +589,47 @@
 static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
-	u32 i;
+	u32 vlvfb_mask, pool_mask, i;
+
+	/* create mask for VF and other pools */
+	pool_mask = ~BIT(VMDQ_P(0) % 32);
+	vlvfb_mask = BIT(vf % 32);
 
 	/* post increment loop, covers VLVF_ENTRIES - 1 to 0 */
 	for (i = IXGBE_VLVF_ENTRIES; i--;) {
 		u32 bits[2], vlvfb, vid, vfta, vlvf;
 		u32 word = i * 2 + vf / 32;
-		u32 mask = 1 << (vf % 32);
+		u32 mask;
 
 		vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
 
 		/* if our bit isn't set we can skip it */
-		if (!(vlvfb & mask))
+		if (!(vlvfb & vlvfb_mask))
 			continue;
 
 		/* clear our bit from vlvfb */
-		vlvfb ^= mask;
+		vlvfb ^= vlvfb_mask;
 
 		/* create 64b mask to chedk to see if we should clear VLVF */
 		bits[word % 2] = vlvfb;
 		bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1));
 
-		/* if promisc is enabled, PF will be present, leave VFTA */
-		if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) {
-			bits[VMDQ_P(0) / 32] &= ~(1 << (VMDQ_P(0) % 32));
-
-			if (bits[0] || bits[1])
-				goto update_vlvfb;
-			goto update_vlvf;
-		}
-
 		/* if other pools are present, just remove ourselves */
-		if (bits[0] || bits[1])
+		if (bits[(VMDQ_P(0) / 32) ^ 1] ||
+		    (bits[VMDQ_P(0) / 32] & pool_mask))
 			goto update_vlvfb;
 
+		/* if PF is present, leave VFTA */
+		if (bits[0] || bits[1])
+			goto update_vlvf;
+
 		/* if we cannot determine VLAN just remove ourselves */
 		vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
 		if (!vlvf)
 			goto update_vlvfb;
 
 		vid = vlvf & VLAN_VID_MASK;
-		mask = 1 << (vid % 32);
+		mask = BIT(vid % 32);
 
 		/* clear bit from VFTA */
 		vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32));
@@ -638,6 +638,9 @@
 update_vlvf:
 		/* clear POOL selection enable */
 		IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0);
+
+		if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
+			vlvfb = 0;
 update_vlvfb:
 		/* clear pool bits */
 		IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb);
@@ -810,7 +813,7 @@
 
 	/* enable transmit for vf */
 	reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
-	reg |= 1 << vf_shift;
+	reg |= BIT(vf_shift);
 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
 
 	/* force drop enable for all VF Rx queues */
@@ -818,7 +821,7 @@
 
 	/* enable receive for vf */
 	reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
-	reg |= 1 << vf_shift;
+	reg |= BIT(vf_shift);
 	/*
 	 * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
 	 * For more info take a look at ixgbe_set_vf_lpe
@@ -834,7 +837,7 @@
 
 #endif /* CONFIG_FCOE */
 		if (pf_max_frame > ETH_FRAME_LEN)
-			reg &= ~(1 << vf_shift);
+			reg &= ~BIT(vf_shift);
 	}
 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
 
@@ -843,7 +846,7 @@
 
 	/* Enable counting of spoofed packets in the SSVPC register */
 	reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
-	reg |= (1 << vf_shift);
+	reg |= BIT(vf_shift);
 	IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
 
 	/*
@@ -887,7 +890,7 @@
 		return -1;
 	}
 
-	if (adapter->vfinfo[vf].pf_set_mac &&
+	if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
 	    !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) {
 		e_warn(drv,
 		       "VF %d attempted to override administratively set MAC address\n"
@@ -905,8 +908,6 @@
 	u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
 	u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
 	u8 tcs = netdev_get_num_tc(adapter->netdev);
-	struct ixgbe_hw *hw = &adapter->hw;
-	int err;
 
 	if (adapter->vfinfo[vf].pf_vlan || tcs) {
 		e_warn(drv,
@@ -920,19 +921,7 @@
 	if (!vid && !add)
 		return 0;
 
-	err = ixgbe_set_vf_vlan(adapter, add, vid, vf);
-	if (err)
-		return err;
-
-	if (adapter->vfinfo[vf].spoofchk_enabled)
-		hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
-
-	if (add)
-		adapter->vfinfo[vf].vlan_count++;
-	else if (adapter->vfinfo[vf].vlan_count)
-		adapter->vfinfo[vf].vlan_count--;
-
-	return 0;
+	return ixgbe_set_vf_vlan(adapter, add, vid, vf);
 }
 
 static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
@@ -961,8 +950,11 @@
 		 * If the VF is allowed to set MAC filters then turn off
 		 * anti-spoofing to avoid false positives.
 		 */
-		if (adapter->vfinfo[vf].spoofchk_enabled)
-			ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
+		if (adapter->vfinfo[vf].spoofchk_enabled) {
+			struct ixgbe_hw *hw = &adapter->hw;
+
+			hw->mac.ops.set_mac_anti_spoofing(hw, false, vf);
+		}
 	}
 
 	err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac);
@@ -1318,9 +1310,6 @@
 
 	ixgbe_set_vmvir(adapter, vlan, qos, vf);
 	ixgbe_set_vmolr(hw, vf, false);
-	if (adapter->vfinfo[vf].spoofchk_enabled)
-		hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
-	adapter->vfinfo[vf].vlan_count++;
 
 	/* enable hide vlan on X550 */
 	if (hw->mac.type >= ixgbe_mac_X550)
@@ -1353,9 +1342,6 @@
 	ixgbe_set_vf_vlan(adapter, true, 0, vf);
 	ixgbe_clear_vmvir(adapter, vf);
 	ixgbe_set_vmolr(hw, vf, true);
-	hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
-	if (adapter->vfinfo[vf].vlan_count)
-		adapter->vfinfo[vf].vlan_count--;
 
 	/* disable hide VLAN on X550 */
 	if (hw->mac.type >= ixgbe_mac_X550)
@@ -1395,7 +1381,7 @@
 	return err;
 }
 
-static int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
+int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
 {
 	switch (adapter->link_speed) {
 	case IXGBE_LINK_SPEED_100_FULL:
@@ -1522,27 +1508,34 @@
 int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
-	int vf_target_reg = vf >> 3;
-	int vf_target_shift = vf % 8;
 	struct ixgbe_hw *hw = &adapter->hw;
-	u32 regval;
 
 	if (vf >= adapter->num_vfs)
 		return -EINVAL;
 
 	adapter->vfinfo[vf].spoofchk_enabled = setting;
 
-	regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
-	regval &= ~(1 << vf_target_shift);
-	regval |= (setting << vf_target_shift);
-	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval);
+	/* configure MAC spoofing */
+	hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf);
 
-	if (adapter->vfinfo[vf].vlan_count) {
-		vf_target_shift += IXGBE_SPOOF_VLANAS_SHIFT;
-		regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
-		regval &= ~(1 << vf_target_shift);
-		regval |= (setting << vf_target_shift);
-		IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval);
+	/* configure VLAN spoofing */
+	hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf);
+
+	/* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
+	 * calling set_ethertype_anti_spoofing for each VF in loop below
+	 */
+	if (hw->mac.ops.set_ethertype_anti_spoofing) {
+		IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
+				(IXGBE_ETQF_FILTER_EN    |
+				 IXGBE_ETQF_TX_ANTISPOOF |
+				 IXGBE_ETH_P_LLDP));
+
+		IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
+				(IXGBE_ETQF_FILTER_EN |
+				 IXGBE_ETQF_TX_ANTISPOOF |
+				 ETH_P_PAUSE));
+
+		hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf);
 	}
 
 	return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index dad9257..47e65e2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -44,6 +44,7 @@
 int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
 int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
 			   u8 qos);
+int ixgbe_link_mbps(struct ixgbe_adapter *adapter);
 int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
 			int max_tx_rate);
 int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index bf7367a..da3d835 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2015 Intel Corporation.
+  Copyright(c) 1999 - 2016 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -59,8 +59,12 @@
 #define IXGBE_SUBDEV_ID_82599_RNDC       0x1F72
 #define IXGBE_SUBDEV_ID_82599_560FLR     0x17D0
 #define IXGBE_SUBDEV_ID_82599_SP_560FLR  0x211B
+#define IXGBE_SUBDEV_ID_82599_LOM_SNAP6		0x2159
+#define IXGBE_SUBDEV_ID_82599_SFP_1OCP		0x000D
+#define IXGBE_SUBDEV_ID_82599_SFP_2OCP		0x0008
+#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1	0x8976
+#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2	0x06EE
 #define IXGBE_SUBDEV_ID_82599_ECNA_DP    0x0470
-#define IXGBE_SUBDEV_ID_82599_LOM_SFP    0x8976
 #define IXGBE_DEV_ID_82599_SFP_EM        0x1507
 #define IXGBE_DEV_ID_82599_SFP_SF2       0x154D
 #define IXGBE_DEV_ID_82599EN_SFP         0x1557
@@ -75,21 +79,25 @@
 #define IXGBE_DEV_ID_X540T1              0x1560
 
 #define IXGBE_DEV_ID_X550T		0x1563
+#define IXGBE_DEV_ID_X550T1		0x15D1
 #define IXGBE_DEV_ID_X550EM_X_KX4	0x15AA
 #define IXGBE_DEV_ID_X550EM_X_KR	0x15AB
 #define IXGBE_DEV_ID_X550EM_X_SFP	0x15AC
 #define IXGBE_DEV_ID_X550EM_X_10G_T	0x15AD
 #define IXGBE_DEV_ID_X550EM_X_1G_T	0x15AE
-#define IXGBE_DEV_ID_X550_VF_HV	0x1564
-#define IXGBE_DEV_ID_X550_VF		0x1565
-#define IXGBE_DEV_ID_X550EM_X_VF	0x15A8
-#define IXGBE_DEV_ID_X550EM_X_VF_HV	0x15A9
+#define IXGBE_DEV_ID_X550EM_A_KR	0x15C2
+#define IXGBE_DEV_ID_X550EM_A_KR_L	0x15C3
+#define IXGBE_DEV_ID_X550EM_A_SFP_N	0x15C4
+#define IXGBE_DEV_ID_X550EM_A_SGMII	0x15C6
+#define IXGBE_DEV_ID_X550EM_A_SGMII_L	0x15C7
+#define IXGBE_DEV_ID_X550EM_A_SFP	0x15CE
 
 /* VF Device IDs */
-#define IXGBE_DEV_ID_82599_VF           0x10ED
-#define IXGBE_DEV_ID_X540_VF            0x1515
+#define IXGBE_DEV_ID_82599_VF		0x10ED
+#define IXGBE_DEV_ID_X540_VF		0x1515
 #define IXGBE_DEV_ID_X550_VF		0x1565
 #define IXGBE_DEV_ID_X550EM_X_VF	0x15A8
+#define IXGBE_DEV_ID_X550EM_A_VF	0x15C5
 
 #define IXGBE_CAT(r, m)	IXGBE_##r##_##m
 
@@ -128,7 +136,7 @@
 #define IXGBE_FLA_X540		IXGBE_FLA_8259X
 #define IXGBE_FLA_X550		IXGBE_FLA_8259X
 #define IXGBE_FLA_X550EM_x	IXGBE_FLA_8259X
-#define IXGBE_FLA_X550EM_a	0x15F6C
+#define IXGBE_FLA_X550EM_a	0x15F68
 #define IXGBE_FLA(_hw)		IXGBE_BY_MAC((_hw), FLA)
 #define IXGBE_EEMNGCTL  0x10110
 #define IXGBE_EEMNGDATA 0x10114
@@ -143,13 +151,6 @@
 #define IXGBE_GRC_X550EM_a	0x15F64
 #define IXGBE_GRC(_hw)		IXGBE_BY_MAC((_hw), GRC)
 
-#define IXGBE_SRAMREL_8259X	0x10210
-#define IXGBE_SRAMREL_X540	IXGBE_SRAMREL_8259X
-#define IXGBE_SRAMREL_X550	IXGBE_SRAMREL_8259X
-#define IXGBE_SRAMREL_X550EM_x	IXGBE_SRAMREL_8259X
-#define IXGBE_SRAMREL_X550EM_a	0x15F6C
-#define IXGBE_SRAMREL(_hw)	IXGBE_BY_MAC((_hw), SRAMREL)
-
 /* General Receive Control */
 #define IXGBE_GRC_MNG  0x00000001 /* Manageability Enable */
 #define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
@@ -375,6 +376,8 @@
 #define IXGBE_MRCTL(_i)      (0x0F600 + ((_i) * 4))
 #define IXGBE_VMRVLAN(_i)    (0x0F610 + ((_i) * 4))
 #define IXGBE_VMRVM(_i)      (0x0F630 + ((_i) * 4))
+#define IXGBE_WQBR_RX(_i)    (0x2FB0 + ((_i) * 4)) /* 4 total */
+#define IXGBE_WQBR_TX(_i)    (0x8130 + ((_i) * 4)) /* 4 total */
 #define IXGBE_L34T_IMIR(_i)  (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
 #define IXGBE_RXFECCERR0         0x051B8
 #define IXGBE_LLITHRESH 0x0EC90
@@ -446,6 +449,8 @@
 #define IXGBE_DMATXCTL_TE       0x1 /* Transmit Enable */
 #define IXGBE_DMATXCTL_NS       0x2 /* No Snoop LSO hdr buffer */
 #define IXGBE_DMATXCTL_GDV      0x8 /* Global Double VLAN */
+#define IXGBE_DMATXCTL_MDP_EN   0x20 /* Bit 5 */
+#define IXGBE_DMATXCTL_MBINTEN  0x40 /* Bit 6 */
 #define IXGBE_DMATXCTL_VT_SHIFT 16  /* VLAN EtherType */
 
 #define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
@@ -543,6 +548,7 @@
 /* DCB registers */
 #define MAX_TRAFFIC_CLASS        8
 #define X540_TRAFFIC_CLASS       4
+#define DEF_TRAFFIC_CLASS        1
 #define IXGBE_RMCS      0x03D00
 #define IXGBE_DPMCS     0x07F40
 #define IXGBE_PDPMCS    0x0CD00
@@ -554,7 +560,6 @@
 #define IXGBE_TDPT2TCCR(_i)     (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
 #define IXGBE_TDPT2TCSR(_i)     (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
 
-
 /* Security Control Registers */
 #define IXGBE_SECTXCTRL         0x08800
 #define IXGBE_SECTXSTAT         0x08804
@@ -693,16 +698,16 @@
 #define IXGBE_FCDMARW   0x02420 /* FC Receive DMA RW */
 #define IXGBE_FCINVST0  0x03FC0 /* FC Invalid DMA Context Status Reg 0 */
 #define IXGBE_FCINVST(_i)       (IXGBE_FCINVST0 + ((_i) * 4))
-#define IXGBE_FCBUFF_VALID      (1 << 0)   /* DMA Context Valid */
-#define IXGBE_FCBUFF_BUFFSIZE   (3 << 3)   /* User Buffer Size */
-#define IXGBE_FCBUFF_WRCONTX    (1 << 7)   /* 0: Initiator, 1: Target */
+#define IXGBE_FCBUFF_VALID      BIT(0)    /* DMA Context Valid */
+#define IXGBE_FCBUFF_BUFFSIZE   (3u << 3) /* User Buffer Size */
+#define IXGBE_FCBUFF_WRCONTX    BIT(7)    /* 0: Initiator, 1: Target */
 #define IXGBE_FCBUFF_BUFFCNT    0x0000ff00 /* Number of User Buffers */
 #define IXGBE_FCBUFF_OFFSET     0xffff0000 /* User Buffer Offset */
 #define IXGBE_FCBUFF_BUFFSIZE_SHIFT  3
 #define IXGBE_FCBUFF_BUFFCNT_SHIFT   8
 #define IXGBE_FCBUFF_OFFSET_SHIFT    16
-#define IXGBE_FCDMARW_WE        (1 << 14)   /* Write enable */
-#define IXGBE_FCDMARW_RE        (1 << 15)   /* Read enable */
+#define IXGBE_FCDMARW_WE        BIT(14)   /* Write enable */
+#define IXGBE_FCDMARW_RE        BIT(15)   /* Read enable */
 #define IXGBE_FCDMARW_FCOESEL   0x000001ff  /* FC X_ID: 11 bits */
 #define IXGBE_FCDMARW_LASTSIZE  0xffff0000  /* Last User Buffer Size */
 #define IXGBE_FCDMARW_LASTSIZE_SHIFT 16
@@ -719,23 +724,23 @@
 #define IXGBE_FCFLT     0x05108 /* FC FLT Context */
 #define IXGBE_FCFLTRW   0x05110 /* FC Filter RW Control */
 #define IXGBE_FCPARAM   0x051d8 /* FC Offset Parameter */
-#define IXGBE_FCFLT_VALID       (1 << 0)   /* Filter Context Valid */
-#define IXGBE_FCFLT_FIRST       (1 << 1)   /* Filter First */
+#define IXGBE_FCFLT_VALID       BIT(0)   /* Filter Context Valid */
+#define IXGBE_FCFLT_FIRST       BIT(1)   /* Filter First */
 #define IXGBE_FCFLT_SEQID       0x00ff0000 /* Sequence ID */
 #define IXGBE_FCFLT_SEQCNT      0xff000000 /* Sequence Count */
-#define IXGBE_FCFLTRW_RVALDT    (1 << 13)  /* Fast Re-Validation */
-#define IXGBE_FCFLTRW_WE        (1 << 14)  /* Write Enable */
-#define IXGBE_FCFLTRW_RE        (1 << 15)  /* Read Enable */
+#define IXGBE_FCFLTRW_RVALDT    BIT(13)  /* Fast Re-Validation */
+#define IXGBE_FCFLTRW_WE        BIT(14)  /* Write Enable */
+#define IXGBE_FCFLTRW_RE        BIT(15)  /* Read Enable */
 /* FCoE Receive Control */
 #define IXGBE_FCRXCTRL  0x05100 /* FC Receive Control */
-#define IXGBE_FCRXCTRL_FCOELLI  (1 << 0)   /* Low latency interrupt */
-#define IXGBE_FCRXCTRL_SAVBAD   (1 << 1)   /* Save Bad Frames */
-#define IXGBE_FCRXCTRL_FRSTRDH  (1 << 2)   /* EN 1st Read Header */
-#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3)   /* EN Last Header in Seq */
-#define IXGBE_FCRXCTRL_ALLH     (1 << 4)   /* EN All Headers */
-#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5)   /* EN 1st Seq. Header */
-#define IXGBE_FCRXCTRL_ICRC     (1 << 6)   /* Ignore Bad FC CRC */
-#define IXGBE_FCRXCTRL_FCCRCBO  (1 << 7)   /* FC CRC Byte Ordering */
+#define IXGBE_FCRXCTRL_FCOELLI  BIT(0)   /* Low latency interrupt */
+#define IXGBE_FCRXCTRL_SAVBAD   BIT(1)   /* Save Bad Frames */
+#define IXGBE_FCRXCTRL_FRSTRDH  BIT(2)   /* EN 1st Read Header */
+#define IXGBE_FCRXCTRL_LASTSEQH BIT(3)   /* EN Last Header in Seq */
+#define IXGBE_FCRXCTRL_ALLH     BIT(4)   /* EN All Headers */
+#define IXGBE_FCRXCTRL_FRSTSEQH BIT(5)   /* EN 1st Seq. Header */
+#define IXGBE_FCRXCTRL_ICRC     BIT(6)   /* Ignore Bad FC CRC */
+#define IXGBE_FCRXCTRL_FCCRCBO  BIT(7)   /* FC CRC Byte Ordering */
 #define IXGBE_FCRXCTRL_FCOEVER  0x00000f00 /* FCoE Version: 4 bits */
 #define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8
 /* FCoE Redirection */
@@ -1056,15 +1061,9 @@
 #define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4))
 #define IXGBE_TDPROBE     0x07F20
 #define IXGBE_TXBUFCTRL   0x0C600
-#define IXGBE_TXBUFDATA0  0x0C610
-#define IXGBE_TXBUFDATA1  0x0C614
-#define IXGBE_TXBUFDATA2  0x0C618
-#define IXGBE_TXBUFDATA3  0x0C61C
+#define IXGBE_TXBUFDATA(_i) (0x0C610 + ((_i) * 4)) /* 4 of these (0-3) */
 #define IXGBE_RXBUFCTRL   0x03600
-#define IXGBE_RXBUFDATA0  0x03610
-#define IXGBE_RXBUFDATA1  0x03614
-#define IXGBE_RXBUFDATA2  0x03618
-#define IXGBE_RXBUFDATA3  0x0361C
+#define IXGBE_RXBUFDATA(_i) (0x03610 + ((_i) * 4)) /* 4 of these (0-3) */
 #define IXGBE_PCIE_DIAG(_i)     (0x11090 + ((_i) * 4)) /* 8 of these */
 #define IXGBE_RFVAL     0x050A4
 #define IXGBE_MDFTC1    0x042B8
@@ -1127,6 +1126,7 @@
 #define IXGBE_XPCSS     0x04290
 #define IXGBE_MFLCN     0x04294
 #define IXGBE_SERDESC   0x04298
+#define IXGBE_MAC_SGMII_BUSY 0x04298
 #define IXGBE_MACS      0x0429C
 #define IXGBE_AUTOC     0x042A0
 #define IXGBE_LINKS     0x042A4
@@ -1203,6 +1203,8 @@
 #define IXGBE_RDRXCTL_RSCLLIDIS     0x00800000 /* Disable RSC compl on LLI */
 #define IXGBE_RDRXCTL_RSCACKC       0x02000000 /* must set 1 when RSC enabled */
 #define IXGBE_RDRXCTL_FCOE_WRFIX    0x04000000 /* must set 1 when RSC enabled */
+#define IXGBE_RDRXCTL_MBINTEN       0x10000000
+#define IXGBE_RDRXCTL_MDP_EN        0x20000000
 
 /* RQTC Bit Masks and Shifts */
 #define IXGBE_RQTC_SHIFT_TC(_i)     ((_i) * 4)
@@ -1249,20 +1251,20 @@
 #define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
 #define IXGBE_DCA_RXCTRL_CPUID_MASK_82599  0xFF000000 /* Rx CPUID Mask */
 #define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */
-#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
-#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
-#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
-#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
-#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */
-#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DATA_WRO_EN BIT(13) /* Rx wr data Relax Order */
+#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN BIT(15) /* Rx wr header RO */
 
 #define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
 #define IXGBE_DCA_TXCTRL_CPUID_MASK_82599  0xFF000000 /* Tx CPUID Mask */
 #define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */
-#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
-#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
-#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */
-#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_WRO_EN BIT(11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */
 #define IXGBE_DCA_MAX_QUEUES_82598   16 /* DCA regs only on 16 queues */
 
 /* MSCA Bit Masks */
@@ -1309,6 +1311,7 @@
 
 /* MDIO definitions */
 
+#define IXGBE_MDIO_ZERO_DEV_TYPE		0x0
 #define IXGBE_MDIO_PMA_PMD_DEV_TYPE		0x1
 #define IXGBE_MDIO_PCS_DEV_TYPE		0x3
 #define IXGBE_MDIO_PHY_XS_DEV_TYPE		0x4
@@ -1740,7 +1743,7 @@
 #define IXGBE_ETQF_TX_ANTISPOOF	0x20000000 /* bit 29 */
 #define IXGBE_ETQF_1588         0x40000000 /* bit 30 */
 #define IXGBE_ETQF_FILTER_EN    0x80000000 /* bit 31 */
-#define IXGBE_ETQF_POOL_ENABLE   (1 << 26) /* bit 26 */
+#define IXGBE_ETQF_POOL_ENABLE   BIT(26) /* bit 26 */
 #define IXGBE_ETQF_POOL_SHIFT		20
 
 #define IXGBE_ETQS_RX_QUEUE     0x007F0000 /* bits 22:16 */
@@ -1866,20 +1869,20 @@
 #define IXGBE_AUTOC_1G_PMA_PMD_SHIFT   9
 #define IXGBE_AUTOC_10G_PMA_PMD_MASK   0x00000180
 #define IXGBE_AUTOC_10G_PMA_PMD_SHIFT  7
-#define IXGBE_AUTOC_10G_XAUI   (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_10G_KX4    (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_10G_CX4    (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_BX      (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_KX      (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_SFI     (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_KX_BX   (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_XAUI   (0u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_KX4    (1u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_CX4    (2u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_BX      (0u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX      (1u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_SFI     (0u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX_BX   (1u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
 
 #define IXGBE_AUTOC2_UPPER_MASK  0xFFFF0000
 #define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK  0x00030000
 #define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16
-#define IXGBE_AUTOC2_10G_KR  (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_KR  (0u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_XFI (1u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_SFI (2u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
 #define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK  0x50000000
 #define IXGBE_AUTOC2_LINK_DISABLE_MASK        0x70000000
 
@@ -1957,7 +1960,9 @@
 #define IXGBE_GSSR_PHY1_SM		0x0004
 #define IXGBE_GSSR_MAC_CSR_SM		0x0008
 #define IXGBE_GSSR_FLASH_SM		0x0010
+#define IXGBE_GSSR_NVM_UPDATE_SM	0x0200
 #define IXGBE_GSSR_SW_MNG_SM		0x0400
+#define IXGBE_GSSR_TOKEN_SM	0x40000000 /* SW bit for shared access */
 #define IXGBE_GSSR_SHARED_I2C_SM	0x1806 /* Wait for both phys & I2Cs */
 #define IXGBE_GSSR_I2C_MASK		0x1800
 #define IXGBE_GSSR_NVM_PHY_MASK		0xF
@@ -1997,6 +2002,9 @@
 #define IXGBE_PBANUM_PTR_GUARD		0xFAFA
 #define IXGBE_EEPROM_CHECKSUM		0x3F
 #define IXGBE_EEPROM_SUM		0xBABA
+#define IXGBE_EEPROM_CTRL_4		0x45
+#define IXGBE_EE_CTRL_4_INST_ID		0x10
+#define IXGBE_EE_CTRL_4_INST_ID_SHIFT	4
 #define IXGBE_PCIE_ANALOG_PTR		0x03
 #define IXGBE_ATLAS0_CONFIG_PTR		0x04
 #define IXGBE_PHY_PTR			0x04
@@ -2111,6 +2119,7 @@
 #define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET  0x3
 #define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP  0x1
 #define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS  0x2
+#define IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR	BIT(7)
 #define IXGBE_FW_LESM_PARAMETERS_PTR     0x2
 #define IXGBE_FW_LESM_STATE_1            0x1
 #define IXGBE_FW_LESM_STATE_ENABLED      0x8000 /* LESM Enable bit */
@@ -2530,6 +2539,10 @@
 #define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS     0x00000080
 #define IXGBE_FDIRCTRL_DROP_Q_SHIFT             8
 #define IXGBE_FDIRCTRL_FLEX_SHIFT               16
+#define IXGBE_FDIRCTRL_DROP_NO_MATCH		0x00008000
+#define IXGBE_FDIRCTRL_FILTERMODE_SHIFT		21
+#define IXGBE_FDIRCTRL_FILTERMODE_MACVLAN	0x0001 /* bit 23:21, 001b */
+#define IXGBE_FDIRCTRL_FILTERMODE_CLOUD		0x0002 /* bit 23:21, 010b */
 #define IXGBE_FDIRCTRL_SEARCHLIM                0x00800000
 #define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT         24
 #define IXGBE_FDIRCTRL_FULL_THRESH_MASK         0xF0000000
@@ -2620,6 +2633,20 @@
 #define FW_MAX_READ_BUFFER_SIZE		1024
 #define FW_DISABLE_RXEN_CMD		0xDE
 #define FW_DISABLE_RXEN_LEN		0x1
+#define FW_PHY_MGMT_REQ_CMD		0x20
+#define FW_PHY_TOKEN_REQ_CMD		0x0A
+#define FW_PHY_TOKEN_REQ_LEN		2
+#define FW_PHY_TOKEN_REQ		0
+#define FW_PHY_TOKEN_REL		1
+#define FW_PHY_TOKEN_OK			1
+#define FW_PHY_TOKEN_RETRY		0x80
+#define FW_PHY_TOKEN_DELAY		5	/* milliseconds */
+#define FW_PHY_TOKEN_WAIT		5	/* seconds */
+#define FW_PHY_TOKEN_RETRIES ((FW_PHY_TOKEN_WAIT * 1000) / FW_PHY_TOKEN_DELAY)
+#define FW_INT_PHY_REQ_CMD		0xB
+#define FW_INT_PHY_REQ_LEN		10
+#define FW_INT_PHY_REQ_READ		0
+#define FW_INT_PHY_REQ_WRITE		1
 
 /* Host Interface Command Structures */
 struct ixgbe_hic_hdr {
@@ -2688,6 +2715,28 @@
 	u16 pad3;
 };
 
+struct ixgbe_hic_phy_token_req {
+	struct ixgbe_hic_hdr hdr;
+	u8 port_number;
+	u8 command_type;
+	u16 pad;
+};
+
+struct ixgbe_hic_internal_phy_req {
+	struct ixgbe_hic_hdr hdr;
+	u8 port_number;
+	u8 command_type;
+	__be16 address;
+	u16 rsv1;
+	__be32 write_data;
+	u16 pad;
+} __packed;
+
+struct ixgbe_hic_internal_phy_resp {
+	struct ixgbe_hic_hdr hdr;
+	__be32 read_data;
+};
+
 /* Transmit Descriptor - Advanced */
 union ixgbe_adv_tx_desc {
 	struct {
@@ -2786,15 +2835,15 @@
 #define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
 #define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */
 #define IXGBE_ADVTXT_TUCMD_FCOE      0x00008000       /* FCoE Frame Type */
-#define IXGBE_ADVTXD_FCOEF_EOF_MASK  (0x3 << 10)      /* FC EOF index */
-#define IXGBE_ADVTXD_FCOEF_SOF       ((1 << 2) << 10) /* FC SOF index */
-#define IXGBE_ADVTXD_FCOEF_PARINC    ((1 << 3) << 10) /* Rel_Off in F_CTL */
-#define IXGBE_ADVTXD_FCOEF_ORIE      ((1 << 4) << 10) /* Orientation: End */
-#define IXGBE_ADVTXD_FCOEF_ORIS      ((1 << 5) << 10) /* Orientation: Start */
-#define IXGBE_ADVTXD_FCOEF_EOF_N     (0x0 << 10)      /* 00: EOFn */
-#define IXGBE_ADVTXD_FCOEF_EOF_T     (0x1 << 10)      /* 01: EOFt */
-#define IXGBE_ADVTXD_FCOEF_EOF_NI    (0x2 << 10)      /* 10: EOFni */
-#define IXGBE_ADVTXD_FCOEF_EOF_A     (0x3 << 10)      /* 11: EOFa */
+#define IXGBE_ADVTXD_FCOEF_SOF       (BIT(2) << 10) /* FC SOF index */
+#define IXGBE_ADVTXD_FCOEF_PARINC    (BIT(3) << 10) /* Rel_Off in F_CTL */
+#define IXGBE_ADVTXD_FCOEF_ORIE      (BIT(4) << 10) /* Orientation: End */
+#define IXGBE_ADVTXD_FCOEF_ORIS      (BIT(5) << 10) /* Orientation: Start */
+#define IXGBE_ADVTXD_FCOEF_EOF_N     (0u << 10)  /* 00: EOFn */
+#define IXGBE_ADVTXD_FCOEF_EOF_T     (1u << 10)  /* 01: EOFt */
+#define IXGBE_ADVTXD_FCOEF_EOF_NI    (2u << 10)  /* 10: EOFni */
+#define IXGBE_ADVTXD_FCOEF_EOF_A     (3u << 10)  /* 11: EOFa */
+#define IXGBE_ADVTXD_FCOEF_EOF_MASK  (3u << 10)  /* FC EOF index */
 #define IXGBE_ADVTXD_L4LEN_SHIFT     8  /* Adv ctxt L4LEN shift */
 #define IXGBE_ADVTXD_MSS_SHIFT       16  /* Adv ctxt MSS shift */
 
@@ -2948,7 +2997,6 @@
 	IXGBE_CAT(EEC, m),		\
 	IXGBE_CAT(FLA, m),		\
 	IXGBE_CAT(GRC, m),		\
-	IXGBE_CAT(SRAMREL, m),		\
 	IXGBE_CAT(FACTPS, m),		\
 	IXGBE_CAT(SWSM, m),		\
 	IXGBE_CAT(SWFW_SYNC, m),	\
@@ -2989,6 +3037,7 @@
 	ixgbe_mac_X540,
 	ixgbe_mac_X550,
 	ixgbe_mac_X550EM_x,
+	ixgbe_mac_x550em_a,
 	ixgbe_num_macs
 };
 
@@ -3017,6 +3066,7 @@
 	ixgbe_phy_qsfp_intel,
 	ixgbe_phy_qsfp_unknown,
 	ixgbe_phy_sfp_unsupported,
+	ixgbe_phy_sgmii,
 	ixgbe_phy_generic
 };
 
@@ -3130,8 +3180,9 @@
 	enum ixgbe_bus_width width;
 	enum ixgbe_bus_type type;
 
-	u16 func;
-	u16 lan_id;
+	u8 func;
+	u8 lan_id;
+	u8 instance_id;
 };
 
 /* Flow control parameters */
@@ -3266,6 +3317,7 @@
 	s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
 	s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
 	void (*release_swfw_sync)(struct ixgbe_hw *, u32);
+	void (*init_swfw_sync)(struct ixgbe_hw *);
 	s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
 	s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
 
@@ -3308,6 +3360,7 @@
 
 	/* Flow Control */
 	s32 (*fc_enable)(struct ixgbe_hw *);
+	s32 (*setup_fc)(struct ixgbe_hw *);
 
 	/* Manageability interface */
 	s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
@@ -3323,6 +3376,8 @@
 	s32 (*dmac_config)(struct ixgbe_hw *hw);
 	s32 (*dmac_update_tcs)(struct ixgbe_hw *hw);
 	s32 (*dmac_config_tcs)(struct ixgbe_hw *hw);
+	s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
+	s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
 };
 
 struct ixgbe_phy_operations {
@@ -3442,7 +3497,7 @@
 };
 
 struct ixgbe_mbx_info {
-	struct ixgbe_mbx_operations ops;
+	const struct ixgbe_mbx_operations *ops;
 	struct ixgbe_mbx_stats stats;
 	u32 timeout;
 	u32 usec_delay;
@@ -3475,10 +3530,10 @@
 struct ixgbe_info {
 	enum ixgbe_mac_type		mac;
 	s32 				(*get_invariants)(struct ixgbe_hw *);
-	struct ixgbe_mac_operations	*mac_ops;
-	struct ixgbe_eeprom_operations	*eeprom_ops;
-	struct ixgbe_phy_operations	*phy_ops;
-	struct ixgbe_mbx_operations	*mbx_ops;
+	const struct ixgbe_mac_operations	*mac_ops;
+	const struct ixgbe_eeprom_operations	*eeprom_ops;
+	const struct ixgbe_phy_operations	*phy_ops;
+	const struct ixgbe_mbx_operations	*mbx_ops;
 	const u32			*mvals;
 };
 
@@ -3517,14 +3572,19 @@
 #define IXGBE_ERR_INVALID_ARGUMENT              -32
 #define IXGBE_ERR_HOST_INTERFACE_COMMAND        -33
 #define IXGBE_ERR_FDIR_CMD_INCOMPLETE		-38
+#define IXGBE_ERR_FW_RESP_INVALID		-39
+#define IXGBE_ERR_TOKEN_RETRY			-40
 #define IXGBE_NOT_IMPLEMENTED                   0x7FFFFFFF
 
 #define IXGBE_FUSES0_GROUP(_i)		(0x11158 + ((_i) * 4))
 #define IXGBE_FUSES0_300MHZ		BIT(5)
-#define IXGBE_FUSES0_REV_MASK		(3 << 6)
+#define IXGBE_FUSES0_REV_MASK		(3u << 6)
 
 #define IXGBE_KRM_PORT_CAR_GEN_CTRL(P)	((P) ? 0x8010 : 0x4010)
 #define IXGBE_KRM_LINK_CTRL_1(P)	((P) ? 0x820C : 0x420C)
+#define IXGBE_KRM_AN_CNTL_1(P)		((P) ? 0x822C : 0x422C)
+#define IXGBE_KRM_AN_CNTL_8(P)		((P) ? 0x8248 : 0x4248)
+#define IXGBE_KRM_SGMII_CTRL(P)		((P) ? 0x82A0 : 0x42A0)
 #define IXGBE_KRM_DSP_TXFFE_STATE_4(P)	((P) ? 0x8634 : 0x4634)
 #define IXGBE_KRM_DSP_TXFFE_STATE_5(P)	((P) ? 0x8638 : 0x4638)
 #define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P)	((P) ? 0x8B00 : 0x4B00)
@@ -3532,43 +3592,54 @@
 #define IXGBE_KRM_TX_COEFF_CTRL_1(P)	((P) ? 0x9520 : 0x5520)
 #define IXGBE_KRM_RX_ANA_CTL(P)		((P) ? 0x9A00 : 0x5A00)
 
-#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B		(1 << 9)
-#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS		(1 << 11)
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B		BIT(9)
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS		BIT(11)
 
-#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK	(0x7 << 8)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G	(2 << 8)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G	(4 << 8)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ		(1 << 14)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC		(1 << 15)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX		(1 << 16)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR		(1 << 18)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX		(1 << 24)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR		(1 << 26)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE		(1 << 29)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART		(1 << 31)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK	(7u << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G	(2u << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G	(4u << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN		BIT(12)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN	BIT(13)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ		BIT(14)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC		BIT(15)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX		BIT(16)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR		BIT(18)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX		BIT(24)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR		BIT(26)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE		BIT(29)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART		BIT(31)
 
-#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN			(1 << 6)
-#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN		(1 << 15)
-#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN		(1 << 16)
+#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE			BIT(28)
+#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE			BIT(29)
 
-#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL	(1 << 4)
-#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS	(1 << 2)
+#define IXGBE_KRM_AN_CNTL_8_LINEAR			BIT(0)
+#define IXGBE_KRM_AN_CNTL_8_LIMITING			BIT(1)
 
-#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK	(0x3 << 16)
+#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D	BIT(12)
+#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D		BIT(19)
 
-#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN	(1 << 1)
-#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN	(1 << 2)
-#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN		(1 << 3)
-#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN		(1 << 31)
+#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN			BIT(6)
+#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN		BIT(15)
+#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN		BIT(16)
+
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL	BIT(4)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS	BIT(2)
+
+#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK	(3u << 16)
+
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN	BIT(1)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN	BIT(2)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN		BIT(3)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN		BIT(31)
 
 #define IXGBE_KX4_LINK_CNTL_1				0x4C
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX		(1 << 16)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4		(1 << 17)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX		(1 << 24)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4		(1 << 25)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE		(1 << 29)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP	(1 << 30)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART		(1 << 31)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX		BIT(16)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4		BIT(17)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX		BIT(24)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4		BIT(25)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE		BIT(29)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP	BIT(30)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART		BIT(31)
 
 #define IXGBE_SB_IOSF_INDIRECT_CTRL		0x00011144
 #define IXGBE_SB_IOSF_INDIRECT_DATA		0x00011148
@@ -3584,12 +3655,17 @@
 #define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT	28
 #define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK	0x7
 #define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT		31
-#define IXGBE_SB_IOSF_CTRL_BUSY		(1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
+#define IXGBE_SB_IOSF_CTRL_BUSY		BIT(IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
 #define IXGBE_SB_IOSF_TARGET_KR_PHY	0
 #define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY	1
 #define IXGBE_SB_IOSF_TARGET_KX4_PCS0	2
 #define IXGBE_SB_IOSF_TARGET_KX4_PCS1	3
 
 #define IXGBE_NW_MNG_IF_SEL		0x00011178
+#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT		BIT(1)
+#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M	BIT(23)
 #define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE	BIT(24)
+#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT	3
+#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD	\
+				(0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT)
 #endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 2358c1b..f2b1d48 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2014 Intel Corporation.
+  Copyright(c) 1999 - 2016 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -214,8 +214,8 @@
 		eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
 		eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
 				    IXGBE_EEC_SIZE_SHIFT);
-		eeprom->word_size = 1 << (eeprom_size +
-					  IXGBE_EEPROM_WORD_SIZE_SHIFT);
+		eeprom->word_size = BIT(eeprom_size +
+					IXGBE_EEPROM_WORD_SIZE_SHIFT);
 
 		hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
 		       eeprom->type, eeprom->word_size);
@@ -747,6 +747,25 @@
 }
 
 /**
+ *  ixgbe_init_swfw_sync_X540 - Release hardware semaphore
+ *  @hw: pointer to hardware structure
+ *
+ *  This function reset hardware semaphore bits for a semaphore that may
+ *  have be left locked due to a catastrophic failure.
+ **/
+void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw)
+{
+	/* First try to grab the semaphore but we don't need to bother
+	 * looking to see whether we got the lock or not since we do
+	 * the same thing regardless of whether we got the lock or not.
+	 * We got the lock - we release it.
+	 * We timeout trying to get the lock - we force its release.
+	 */
+	ixgbe_get_swfw_sync_semaphore(hw);
+	ixgbe_release_swfw_sync_semaphore(hw);
+}
+
+/**
  * ixgbe_blink_led_start_X540 - Blink LED based on index.
  * @hw: pointer to hardware structure
  * @index: led number to blink
@@ -810,7 +829,7 @@
 
 	return 0;
 }
-static struct ixgbe_mac_operations mac_ops_X540 = {
+static const struct ixgbe_mac_operations mac_ops_X540 = {
 	.init_hw                = &ixgbe_init_hw_generic,
 	.reset_hw               = &ixgbe_reset_hw_X540,
 	.start_hw               = &ixgbe_start_hw_X540,
@@ -846,6 +865,7 @@
 	.clear_vfta             = &ixgbe_clear_vfta_generic,
 	.set_vfta               = &ixgbe_set_vfta_generic,
 	.fc_enable              = &ixgbe_fc_enable_generic,
+	.setup_fc		= ixgbe_setup_fc_generic,
 	.set_fw_drv_ver         = &ixgbe_set_fw_drv_ver_generic,
 	.init_uta_tables        = &ixgbe_init_uta_tables_generic,
 	.setup_sfp              = NULL,
@@ -853,6 +873,7 @@
 	.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
 	.acquire_swfw_sync      = &ixgbe_acquire_swfw_sync_X540,
 	.release_swfw_sync      = &ixgbe_release_swfw_sync_X540,
+	.init_swfw_sync		= &ixgbe_init_swfw_sync_X540,
 	.disable_rx_buff	= &ixgbe_disable_rx_buff_generic,
 	.enable_rx_buff		= &ixgbe_enable_rx_buff_generic,
 	.get_thermal_sensor_data = NULL,
@@ -863,7 +884,7 @@
 	.disable_rx		= &ixgbe_disable_rx_generic,
 };
 
-static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
+static const struct ixgbe_eeprom_operations eeprom_ops_X540 = {
 	.init_params            = &ixgbe_init_eeprom_params_X540,
 	.read                   = &ixgbe_read_eerd_X540,
 	.read_buffer		= &ixgbe_read_eerd_buffer_X540,
@@ -874,7 +895,7 @@
 	.update_checksum        = &ixgbe_update_eeprom_checksum_X540,
 };
 
-static struct ixgbe_phy_operations phy_ops_X540 = {
+static const struct ixgbe_phy_operations phy_ops_X540 = {
 	.identify               = &ixgbe_identify_phy_generic,
 	.identify_sfp           = &ixgbe_identify_sfp_module_generic,
 	.init			= NULL,
@@ -897,7 +918,7 @@
 	IXGBE_MVALS_INIT(X540)
 };
 
-struct ixgbe_info ixgbe_X540_info = {
+const struct ixgbe_info ixgbe_X540_info = {
 	.mac                    = ixgbe_mac_X540,
 	.get_invariants         = &ixgbe_get_invariants_X540,
 	.mac_ops                = &mac_ops_X540,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
index a1468b1..e21cd48 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
@@ -36,4 +36,5 @@
 s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
 s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
 void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw);
 s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 87aca3f..19b75cd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  *  Intel 10 Gigabit PCI Express Linux driver
- *  Copyright(c) 1999 - 2015 Intel Corporation.
+ *  Copyright(c) 1999 - 2016 Intel Corporation.
  *
  *  This program is free software; you can redistribute it and/or modify it
  *  under the terms and conditions of the GNU General Public License,
@@ -27,6 +27,7 @@
 #include "ixgbe_phy.h"
 
 static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed);
+static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *);
 
 static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
 {
@@ -272,16 +273,26 @@
 static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
 {
 	switch (hw->device_id) {
+	case IXGBE_DEV_ID_X550EM_A_SFP:
+		if (hw->bus.lan_id)
+			hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
+		else
+			hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
+		return ixgbe_identify_module_generic(hw);
 	case IXGBE_DEV_ID_X550EM_X_SFP:
 		/* set up for CS4227 usage */
 		hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
 		ixgbe_setup_mux_ctl(hw);
 		ixgbe_check_cs4227(hw);
+		/* Fallthrough */
+	case IXGBE_DEV_ID_X550EM_A_SFP_N:
 		return ixgbe_identify_module_generic(hw);
 	case IXGBE_DEV_ID_X550EM_X_KX4:
 		hw->phy.type = ixgbe_phy_x550em_kx4;
 		break;
 	case IXGBE_DEV_ID_X550EM_X_KR:
+	case IXGBE_DEV_ID_X550EM_A_KR:
+	case IXGBE_DEV_ID_X550EM_A_KR_L:
 		hw->phy.type = ixgbe_phy_x550em_kr;
 		break;
 	case IXGBE_DEV_ID_X550EM_X_1G_T:
@@ -324,8 +335,8 @@
 		eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
 		eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
 				    IXGBE_EEC_SIZE_SHIFT);
-		eeprom->word_size = 1 << (eeprom_size +
-					  IXGBE_EEPROM_WORD_SIZE_SHIFT);
+		eeprom->word_size = BIT(eeprom_size +
+					IXGBE_EEPROM_WORD_SIZE_SHIFT);
 
 		hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
 		       eeprom->type, eeprom->word_size);
@@ -355,7 +366,7 @@
 		command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
 		if (!(command & IXGBE_SB_IOSF_CTRL_BUSY))
 			break;
-		usleep_range(10, 20);
+		udelay(10);
 	}
 	if (ctrl)
 		*ctrl = command;
@@ -412,6 +423,121 @@
 	return ret;
 }
 
+/**
+ * ixgbe_get_phy_token - Get the token for shared PHY access
+ * @hw: Pointer to hardware structure
+ */
+static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
+{
+	struct ixgbe_hic_phy_token_req token_cmd;
+	s32 status;
+
+	token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
+	token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
+	token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
+	token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+	token_cmd.port_number = hw->bus.lan_id;
+	token_cmd.command_type = FW_PHY_TOKEN_REQ;
+	token_cmd.pad = 0;
+	status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd),
+					      IXGBE_HI_COMMAND_TIMEOUT,
+					      true);
+	if (status)
+		return status;
+	if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
+		return 0;
+	if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY)
+		return IXGBE_ERR_FW_RESP_INVALID;
+
+	return IXGBE_ERR_TOKEN_RETRY;
+}
+
+/**
+ * ixgbe_put_phy_token - Put the token for shared PHY access
+ * @hw: Pointer to hardware structure
+ */
+static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
+{
+	struct ixgbe_hic_phy_token_req token_cmd;
+	s32 status;
+
+	token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
+	token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
+	token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
+	token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+	token_cmd.port_number = hw->bus.lan_id;
+	token_cmd.command_type = FW_PHY_TOKEN_REL;
+	token_cmd.pad = 0;
+	status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd),
+					      IXGBE_HI_COMMAND_TIMEOUT,
+					      true);
+	if (status)
+		return status;
+	if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
+		return 0;
+	return IXGBE_ERR_FW_RESP_INVALID;
+}
+
+/**
+ *  ixgbe_write_iosf_sb_reg_x550a - Write to IOSF PHY register
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit PHY register to write
+ *  @device_type: 3 bit device type
+ *  @data: Data to write to the register
+ **/
+static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+					 __always_unused u32 device_type,
+					 u32 data)
+{
+	struct ixgbe_hic_internal_phy_req write_cmd;
+
+	memset(&write_cmd, 0, sizeof(write_cmd));
+	write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
+	write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
+	write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+	write_cmd.port_number = hw->bus.lan_id;
+	write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
+	write_cmd.address = cpu_to_be16(reg_addr);
+	write_cmd.write_data = cpu_to_be32(data);
+
+	return ixgbe_host_interface_command(hw, &write_cmd, sizeof(write_cmd),
+					    IXGBE_HI_COMMAND_TIMEOUT, false);
+}
+
+/**
+ *  ixgbe_read_iosf_sb_reg_x550a - Read from IOSF PHY register
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit PHY register to write
+ *  @device_type: 3 bit device type
+ *  @data: Pointer to read data from the register
+ **/
+static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+					__always_unused u32 device_type,
+					u32 *data)
+{
+	union {
+		struct ixgbe_hic_internal_phy_req cmd;
+		struct ixgbe_hic_internal_phy_resp rsp;
+	} hic;
+	s32 status;
+
+	memset(&hic, 0, sizeof(hic));
+	hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
+	hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
+	hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+	hic.cmd.port_number = hw->bus.lan_id;
+	hic.cmd.command_type = FW_INT_PHY_REQ_READ;
+	hic.cmd.address = cpu_to_be16(reg_addr);
+
+	status = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd),
+					      IXGBE_HI_COMMAND_TIMEOUT, true);
+
+	/* Extract the register value from the response. */
+	*data = be32_to_cpu(hic.rsp.read_data);
+
+	return status;
+}
+
 /** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface
  *  command assuming that the semaphore is already obtained.
  *  @hw: pointer to hardware structure
@@ -436,8 +562,7 @@
 	/* one word */
 	buffer.length = cpu_to_be16(sizeof(u16));
 
-	status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
-					      sizeof(buffer),
+	status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer),
 					      IXGBE_HI_COMMAND_TIMEOUT, false);
 	if (status)
 		return status;
@@ -487,7 +612,7 @@
 		buffer.address = cpu_to_be32((offset + current_word) * 2);
 		buffer.length = cpu_to_be16(words_to_read * 2);
 
-		status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+		status = ixgbe_host_interface_command(hw, &buffer,
 						      sizeof(buffer),
 						      IXGBE_HI_COMMAND_TIMEOUT,
 						      false);
@@ -770,8 +895,7 @@
 	buffer.data = data;
 	buffer.address = cpu_to_be32(offset * 2);
 
-	status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
-					      sizeof(buffer),
+	status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer),
 					      IXGBE_HI_COMMAND_TIMEOUT, false);
 	return status;
 }
@@ -813,8 +937,7 @@
 	buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
 	buffer.req.checksum = FW_DEFAULT_CHECKSUM;
 
-	status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
-					      sizeof(buffer),
+	status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer),
 					      IXGBE_HI_COMMAND_TIMEOUT, false);
 	return status;
 }
@@ -861,9 +984,9 @@
 		fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
 		fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
 		fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
-		fw_cmd.port_number = (u8)hw->bus.lan_id;
+		fw_cmd.port_number = hw->bus.lan_id;
 
-		status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+		status = ixgbe_host_interface_command(hw, &fw_cmd,
 					sizeof(struct ixgbe_hic_disable_rxen),
 					IXGBE_HI_COMMAND_TIMEOUT, true);
 
@@ -1248,6 +1371,117 @@
 }
 
 /**
+ * ixgbe_setup_mac_link_sfp_n - Setup internal PHY for native SFP
+ * @hw: pointer to hardware structure
+ *
+ * Configure the the integrated PHY for native SFP support.
+ */
+static s32
+ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+			   __always_unused bool autoneg_wait_to_complete)
+{
+	bool setup_linear = false;
+	u32 reg_phy_int;
+	s32 rc;
+
+	/* Check if SFP module is supported and linear */
+	rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
+
+	/* If no SFP module present, then return success. Return success since
+	 * SFP not present error is not excepted in the setup MAC link flow.
+	 */
+	if (rc == IXGBE_ERR_SFP_NOT_PRESENT)
+		return 0;
+
+	if (!rc)
+		return rc;
+
+	/* Configure internal PHY for native SFI */
+	rc = hw->mac.ops.read_iosf_sb_reg(hw,
+					  IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id),
+					  IXGBE_SB_IOSF_TARGET_KR_PHY,
+					  &reg_phy_int);
+	if (rc)
+		return rc;
+
+	if (setup_linear) {
+		reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LIMITING;
+		reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LINEAR;
+	} else {
+		reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LIMITING;
+		reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LINEAR;
+	}
+
+	rc = hw->mac.ops.write_iosf_sb_reg(hw,
+					   IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id),
+					   IXGBE_SB_IOSF_TARGET_KR_PHY,
+					   reg_phy_int);
+	if (rc)
+		return rc;
+
+	/* Setup XFI/SFI internal link */
+	return ixgbe_setup_ixfi_x550em(hw, &speed);
+}
+
+/**
+ * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
+ * @hw: pointer to hardware structure
+ *
+ * Configure the the integrated PHY for SFP support.
+ */
+static s32
+ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+			       __always_unused bool autoneg_wait_to_complete)
+{
+	u32 reg_slice, slice_offset;
+	bool setup_linear = false;
+	u16 reg_phy_ext;
+	s32 rc;
+
+	/* Check if SFP module is supported and linear */
+	rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
+
+	/* If no SFP module present, then return success. Return success since
+	 * SFP not present error is not excepted in the setup MAC link flow.
+	 */
+	if (rc == IXGBE_ERR_SFP_NOT_PRESENT)
+		return 0;
+
+	if (!rc)
+		return rc;
+
+	/* Configure internal PHY for KR/KX. */
+	ixgbe_setup_kr_speed_x550em(hw, speed);
+
+	if (!hw->phy.mdio.prtad || hw->phy.mdio.prtad == 0xFFFF)
+		return IXGBE_ERR_PHY_ADDR_INVALID;
+
+	/* Get external PHY device id */
+	rc = hw->phy.ops.read_reg(hw, IXGBE_CS4227_GLOBAL_ID_MSB,
+				  IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
+	if (rc)
+		return rc;
+
+	/* When configuring quad port CS4223, the MAC instance is part
+	 * of the slice offset.
+	 */
+	if (reg_phy_ext == IXGBE_CS4223_PHY_ID)
+		slice_offset = (hw->bus.lan_id +
+				(hw->bus.instance_id << 1)) << 12;
+	else
+		slice_offset = hw->bus.lan_id << 12;
+
+	/* Configure CS4227/CS4223 LINE side to proper mode. */
+	reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
+	if (setup_linear)
+		reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1;
+	else
+		reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1;
+	return hw->phy.ops.write_reg(hw, reg_slice, IXGBE_MDIO_ZERO_DEV_TYPE,
+				     reg_phy_ext);
+}
+
+/**
  * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
  * @hw: pointer to hardware structure
  * @speed: new link speed
@@ -1326,6 +1560,57 @@
 	return 0;
 }
 
+/**
+ * ixgbe_setup_sgmii - Set up link for sgmii
+ * @hw: pointer to hardware structure
+ */
+static s32
+ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
+		  __always_unused bool autoneg_wait_to_complete)
+{
+	struct ixgbe_mac_info *mac = &hw->mac;
+	u32 lval, sval;
+	s32 rc;
+
+	rc = mac->ops.read_iosf_sb_reg(hw,
+				       IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+				       IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
+	if (rc)
+		return rc;
+
+	lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+	lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+	lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
+	lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
+	lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
+	rc = mac->ops.write_iosf_sb_reg(hw,
+					IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+					IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+	if (rc)
+		return rc;
+
+	rc = mac->ops.read_iosf_sb_reg(hw,
+				       IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+				       IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
+	if (rc)
+		return rc;
+
+	sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
+	sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
+	rc = mac->ops.write_iosf_sb_reg(hw,
+					IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+					IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
+	if (rc)
+		return rc;
+
+	lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+	rc = mac->ops.write_iosf_sb_reg(hw,
+					IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+					IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+
+	return rc;
+}
+
 /** ixgbe_init_mac_link_ops_X550em - init mac link function pointers
  *  @hw: pointer to hardware structure
  **/
@@ -1342,15 +1627,35 @@
 		mac->ops.enable_tx_laser = NULL;
 		mac->ops.flap_tx_laser = NULL;
 		mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
-		mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550em;
+		mac->ops.setup_fc = ixgbe_setup_fc_x550em;
+		switch (hw->device_id) {
+		case IXGBE_DEV_ID_X550EM_A_SFP_N:
+			mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_n;
+			break;
+		case IXGBE_DEV_ID_X550EM_A_SFP:
+			mac->ops.setup_mac_link =
+						ixgbe_setup_mac_link_sfp_x550a;
+			break;
+		default:
+			mac->ops.setup_mac_link =
+						ixgbe_setup_mac_link_sfp_x550em;
+			break;
+		}
 		mac->ops.set_rate_select_speed =
 					ixgbe_set_soft_rate_select_speed;
 		break;
 	case ixgbe_media_type_copper:
 		mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
+		mac->ops.setup_fc = ixgbe_setup_fc_generic;
 		mac->ops.check_link = ixgbe_check_link_t_X550em;
+		return;
+	case ixgbe_media_type_backplane:
+		if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
+		    hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
+			mac->ops.setup_link = ixgbe_setup_sgmii;
 		break;
 	default:
+		mac->ops.setup_fc = ixgbe_setup_fc_x550em;
 		break;
 	}
 }
@@ -1614,7 +1919,7 @@
 	s32 status;
 	u32 reg_val;
 
-	status = ixgbe_read_iosf_sb_reg_x550(hw,
+	status = hw->mac.ops.read_iosf_sb_reg(hw,
 					IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
 					IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
 	if (status)
@@ -1636,7 +1941,7 @@
 
 	/* Restart auto-negotiation. */
 	reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
-	status = ixgbe_write_iosf_sb_reg_x550(hw,
+	status = hw->mac.ops.write_iosf_sb_reg(hw,
 					IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
 					IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
 
@@ -1653,9 +1958,9 @@
 	s32 status;
 	u32 reg_val;
 
-	status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
-					     IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
-					     hw->bus.lan_id, &reg_val);
+	status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1,
+					      IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
+					      hw->bus.lan_id, &reg_val);
 	if (status)
 		return status;
 
@@ -1674,20 +1979,24 @@
 
 	/* Restart auto-negotiation. */
 	reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART;
-	status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
-					      IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
-					      hw->bus.lan_id, reg_val);
+	status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1,
+					       IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
+					       hw->bus.lan_id, reg_val);
 
 	return status;
 }
 
-/**  ixgbe_setup_kr_x550em - Configure the KR PHY.
- *   @hw: pointer to hardware structure
+/**
+ * ixgbe_setup_kr_x550em - Configure the KR PHY
+ * @hw: pointer to hardware structure
  *
- *   Configures the integrated KR PHY.
+ * Configures the integrated KR PHY for X550EM_x.
  **/
 static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
 {
+	if (hw->mac.type != ixgbe_mac_X550EM_x)
+		return 0;
+
 	return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
 }
 
@@ -1842,6 +2151,86 @@
 	return status;
 }
 
+/**
+ * ixgbe_setup_fc_x550em - Set up flow control
+ * @hw: pointer to hardware structure
+ */
+static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
+{
+	bool pause, asm_dir;
+	u32 reg_val;
+	s32 rc;
+
+	/* Validate the requested mode */
+	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+		hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+		return IXGBE_ERR_INVALID_LINK_SETTINGS;
+	}
+
+	/* 10gig parts do not have a word in the EEPROM to determine the
+	 * default flow control setting, so we explicitly set it to full.
+	 */
+	if (hw->fc.requested_mode == ixgbe_fc_default)
+		hw->fc.requested_mode = ixgbe_fc_full;
+
+	/* Determine PAUSE and ASM_DIR bits. */
+	switch (hw->fc.requested_mode) {
+	case ixgbe_fc_none:
+		pause = false;
+		asm_dir = false;
+		break;
+	case ixgbe_fc_tx_pause:
+		pause = false;
+		asm_dir = true;
+		break;
+	case ixgbe_fc_rx_pause:
+		/* Rx Flow control is enabled and Tx Flow control is
+		 * disabled by software override. Since there really
+		 * isn't a way to advertise that we are capable of RX
+		 * Pause ONLY, we will advertise that we support both
+		 * symmetric and asymmetric Rx PAUSE, as such we fall
+		 * through to the fc_full statement.  Later, we will
+		 * disable the adapter's ability to send PAUSE frames.
+		 */
+		/* Fallthrough */
+	case ixgbe_fc_full:
+		pause = true;
+		asm_dir = true;
+		break;
+	default:
+		hw_err(hw, "Flow control param set incorrectly\n");
+		return IXGBE_ERR_CONFIG;
+	}
+
+	if (hw->device_id != IXGBE_DEV_ID_X550EM_X_KR &&
+	    hw->device_id != IXGBE_DEV_ID_X550EM_A_KR &&
+	    hw->device_id != IXGBE_DEV_ID_X550EM_A_KR_L)
+		return 0;
+
+	rc = hw->mac.ops.read_iosf_sb_reg(hw,
+					  IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+					  IXGBE_SB_IOSF_TARGET_KR_PHY,
+					  &reg_val);
+	if (rc)
+		return rc;
+
+	reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
+		     IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
+	if (pause)
+		reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
+	if (asm_dir)
+		reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
+	rc = hw->mac.ops.write_iosf_sb_reg(hw,
+					   IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+					   IXGBE_SB_IOSF_TARGET_KR_PHY,
+					   reg_val);
+
+	/* This device does not fully support AN. */
+	hw->fc.disable_fc_autoneg = true;
+
+	return rc;
+}
+
 /** ixgbe_enter_lplu_x550em - Transition to low power states
  *  @hw: pointer to hardware structure
  *
@@ -1939,6 +2328,36 @@
 	return status;
 }
 
+/**
+ * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
+ * @hw: pointer to hardware structure
+ *
+ * Read NW_MNG_IF_SEL register and save field values.
+ */
+static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
+{
+	/* Save NW management interface connected on board. This is used
+	 * to determine internal PHY mode.
+	 */
+	hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
+
+	/* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
+	 * PHY address. This register field was has only been used for X552.
+	 */
+	if (!hw->phy.nw_mng_if_sel) {
+		if (hw->mac.type == ixgbe_mac_x550em_a) {
+			struct ixgbe_adapter *adapter = hw->back;
+
+			e_warn(drv, "nw_mng_if_sel not set\n");
+		}
+		return;
+	}
+
+	hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel &
+			      IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
+			     IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
+}
+
 /** ixgbe_init_phy_ops_X550em - PHY/SFP specific init
  *  @hw: pointer to hardware structure
  *
@@ -1953,14 +2372,11 @@
 
 	hw->mac.ops.set_lan_id(hw);
 
+	ixgbe_read_mng_if_sel_x550em(hw);
+
 	if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
 		phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
 		ixgbe_setup_mux_ctl(hw);
-
-		/* Save NW management interface connected on board. This is used
-		 * to determine internal PHY mode.
-		 */
-		phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
 	}
 
 	/* Identify the PHY or SFP module */
@@ -2023,16 +2439,24 @@
 
 	/* Detect if there is a copper PHY attached. */
 	switch (hw->device_id) {
+	case IXGBE_DEV_ID_X550EM_A_SGMII:
+	case IXGBE_DEV_ID_X550EM_A_SGMII_L:
+		hw->phy.type = ixgbe_phy_sgmii;
+		/* Fallthrough */
 	case IXGBE_DEV_ID_X550EM_X_KR:
 	case IXGBE_DEV_ID_X550EM_X_KX4:
+	case IXGBE_DEV_ID_X550EM_A_KR:
+	case IXGBE_DEV_ID_X550EM_A_KR_L:
 		media_type = ixgbe_media_type_backplane;
 		break;
 	case IXGBE_DEV_ID_X550EM_X_SFP:
+	case IXGBE_DEV_ID_X550EM_A_SFP:
+	case IXGBE_DEV_ID_X550EM_A_SFP_N:
 		media_type = ixgbe_media_type_fiber;
 		break;
 	case IXGBE_DEV_ID_X550EM_X_1G_T:
 	case IXGBE_DEV_ID_X550EM_X_10G_T:
-		 media_type = ixgbe_media_type_copper;
+		media_type = ixgbe_media_type_copper;
 		break;
 	default:
 		media_type = ixgbe_media_type_unknown;
@@ -2080,6 +2504,27 @@
 	return status;
 }
 
+/**
+ * ixgbe_set_mdio_speed - Set MDIO clock speed
+ * @hw: pointer to hardware structure
+ */
+static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
+{
+	u32 hlreg0;
+
+	switch (hw->device_id) {
+	case IXGBE_DEV_ID_X550EM_X_10G_T:
+	case IXGBE_DEV_ID_X550EM_A_SFP:
+		/* Config MDIO clock speed before the first MDIO PHY access */
+		hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+		hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
+		IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+		break;
+	default:
+		break;
+	}
+}
+
 /**  ixgbe_reset_hw_X550em - Perform hardware reset
  **  @hw: pointer to hardware structure
  **
@@ -2093,7 +2538,6 @@
 	s32 status;
 	u32 ctrl = 0;
 	u32 i;
-	u32 hlreg0;
 	bool link_up = false;
 
 	/* Call adapter stop to disable Tx/Rx and clear interrupts */
@@ -2179,11 +2623,7 @@
 	hw->mac.num_rar_entries = 128;
 	hw->mac.ops.init_rx_addrs(hw);
 
-	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
-		hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
-		hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
-		IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
-	}
+	ixgbe_set_mdio_speed(hw);
 
 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
 		ixgbe_setup_mux_ctl(hw);
@@ -2206,9 +2646,9 @@
 
 	pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
 	if (enable)
-		pfvfspoof |= (1 << vf_target_shift);
+		pfvfspoof |= BIT(vf_target_shift);
 	else
-		pfvfspoof &= ~(1 << vf_target_shift);
+		pfvfspoof &= ~BIT(vf_target_shift);
 
 	IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
 }
@@ -2296,6 +2736,110 @@
 	ixgbe_release_swfw_sync_X540(hw, mask);
 }
 
+/**
+ * ixgbe_acquire_swfw_sync_x550em_a - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore and get the shared PHY token as needed
+ */
+static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
+{
+	u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
+	int retries = FW_PHY_TOKEN_RETRIES;
+	s32 status;
+
+	while (--retries) {
+		status = 0;
+		if (hmask)
+			status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
+		if (status)
+			return status;
+		if (!(mask & IXGBE_GSSR_TOKEN_SM))
+			return 0;
+
+		status = ixgbe_get_phy_token(hw);
+		if (!status)
+			return 0;
+		if (hmask)
+			ixgbe_release_swfw_sync_X540(hw, hmask);
+		if (status != IXGBE_ERR_TOKEN_RETRY)
+			return status;
+		msleep(FW_PHY_TOKEN_DELAY);
+	}
+
+	return status;
+}
+
+/**
+ * ixgbe_release_swfw_sync_x550em_a - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Release the SWFW semaphore and puts the shared PHY token as needed
+ */
+static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
+{
+	u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
+
+	if (mask & IXGBE_GSSR_TOKEN_SM)
+		ixgbe_put_phy_token(hw);
+
+	if (hmask)
+		ixgbe_release_swfw_sync_X540(hw, hmask);
+}
+
+/**
+ * ixgbe_read_phy_reg_x550a - Reads specified PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @phy_data: Pointer to read data from PHY register
+ *
+ * Reads a value from a specified PHY register using the SWFW lock and PHY
+ * Token. The PHY Token is needed since the MDIO is shared between to MAC
+ * instances.
+ */
+static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+				    u32 device_type, u16 *phy_data)
+{
+	u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
+	s32 status;
+
+	if (hw->mac.ops.acquire_swfw_sync(hw, mask))
+		return IXGBE_ERR_SWFW_SYNC;
+
+	status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
+
+	hw->mac.ops.release_swfw_sync(hw, mask);
+
+	return status;
+}
+
+/**
+ * ixgbe_write_phy_reg_x550a - Writes specified PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 5 bit device type
+ * @phy_data: Data to write to the PHY register
+ *
+ * Writes a value to specified PHY register using the SWFW lock and PHY Token.
+ * The PHY Token is needed since the MDIO is shared between to MAC instances.
+ */
+static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+				     u32 device_type, u16 phy_data)
+{
+	u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
+	s32 status;
+
+	if (hw->mac.ops.acquire_swfw_sync(hw, mask))
+		return IXGBE_ERR_SWFW_SYNC;
+
+	status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data);
+	hw->mac.ops.release_swfw_sync(hw, mask);
+
+	return status;
+}
+
 #define X550_COMMON_MAC \
 	.init_hw			= &ixgbe_init_hw_generic, \
 	.start_hw			= &ixgbe_start_hw_X540, \
@@ -2337,12 +2881,10 @@
 	.enable_rx_buff			= &ixgbe_enable_rx_buff_generic, \
 	.get_thermal_sensor_data	= NULL, \
 	.init_thermal_sensor_thresh	= NULL, \
-	.prot_autoc_read		= &prot_autoc_read_generic, \
-	.prot_autoc_write		= &prot_autoc_write_generic, \
 	.enable_rx			= &ixgbe_enable_rx_generic, \
 	.disable_rx			= &ixgbe_disable_rx_x550, \
 
-static struct ixgbe_mac_operations mac_ops_X550 = {
+static const struct ixgbe_mac_operations mac_ops_X550 = {
 	X550_COMMON_MAC
 	.reset_hw		= &ixgbe_reset_hw_X540,
 	.get_media_type		= &ixgbe_get_media_type_X540,
@@ -2354,20 +2896,45 @@
 	.setup_sfp		= NULL,
 	.acquire_swfw_sync	= &ixgbe_acquire_swfw_sync_X540,
 	.release_swfw_sync	= &ixgbe_release_swfw_sync_X540,
+	.init_swfw_sync		= &ixgbe_init_swfw_sync_X540,
+	.prot_autoc_read	= prot_autoc_read_generic,
+	.prot_autoc_write	= prot_autoc_write_generic,
+	.setup_fc		= ixgbe_setup_fc_generic,
 };
 
-static struct ixgbe_mac_operations mac_ops_X550EM_x = {
+static const struct ixgbe_mac_operations mac_ops_X550EM_x = {
 	X550_COMMON_MAC
 	.reset_hw		= &ixgbe_reset_hw_X550em,
 	.get_media_type		= &ixgbe_get_media_type_X550em,
 	.get_san_mac_addr	= NULL,
 	.get_wwn_prefix		= NULL,
-	.setup_link		= NULL, /* defined later */
+	.setup_link		= &ixgbe_setup_mac_link_X540,
 	.get_link_capabilities	= &ixgbe_get_link_capabilities_X550em,
 	.get_bus_info		= &ixgbe_get_bus_info_X550em,
 	.setup_sfp		= ixgbe_setup_sfp_modules_X550em,
 	.acquire_swfw_sync	= &ixgbe_acquire_swfw_sync_X550em,
 	.release_swfw_sync	= &ixgbe_release_swfw_sync_X550em,
+	.init_swfw_sync		= &ixgbe_init_swfw_sync_X540,
+	.setup_fc		= NULL, /* defined later */
+	.read_iosf_sb_reg	= ixgbe_read_iosf_sb_reg_x550,
+	.write_iosf_sb_reg	= ixgbe_write_iosf_sb_reg_x550,
+};
+
+static struct ixgbe_mac_operations mac_ops_x550em_a = {
+	X550_COMMON_MAC
+	.reset_hw		= ixgbe_reset_hw_X550em,
+	.get_media_type		= ixgbe_get_media_type_X550em,
+	.get_san_mac_addr	= NULL,
+	.get_wwn_prefix		= NULL,
+	.setup_link		= NULL, /* defined later */
+	.get_link_capabilities	= ixgbe_get_link_capabilities_X550em,
+	.get_bus_info		= ixgbe_get_bus_info_X550em,
+	.setup_sfp		= ixgbe_setup_sfp_modules_X550em,
+	.acquire_swfw_sync	= ixgbe_acquire_swfw_sync_x550em_a,
+	.release_swfw_sync	= ixgbe_release_swfw_sync_x550em_a,
+	.setup_fc		= ixgbe_setup_fc_x550em,
+	.read_iosf_sb_reg	= ixgbe_read_iosf_sb_reg_x550a,
+	.write_iosf_sb_reg	= ixgbe_write_iosf_sb_reg_x550a,
 };
 
 #define X550_COMMON_EEP \
@@ -2379,12 +2946,12 @@
 	.update_checksum	= &ixgbe_update_eeprom_checksum_X550, \
 	.calc_checksum		= &ixgbe_calc_eeprom_checksum_X550, \
 
-static struct ixgbe_eeprom_operations eeprom_ops_X550 = {
+static const struct ixgbe_eeprom_operations eeprom_ops_X550 = {
 	X550_COMMON_EEP
 	.init_params		= &ixgbe_init_eeprom_params_X550,
 };
 
-static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
+static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
 	X550_COMMON_EEP
 	.init_params		= &ixgbe_init_eeprom_params_X540,
 };
@@ -2398,23 +2965,25 @@
 	.read_i2c_sff8472	= &ixgbe_read_i2c_sff8472_generic, \
 	.read_i2c_eeprom	= &ixgbe_read_i2c_eeprom_generic, \
 	.write_i2c_eeprom	= &ixgbe_write_i2c_eeprom_generic, \
-	.read_reg		= &ixgbe_read_phy_reg_generic, \
-	.write_reg		= &ixgbe_write_phy_reg_generic, \
 	.setup_link		= &ixgbe_setup_phy_link_generic, \
 	.set_phy_power		= NULL, \
 	.check_overtemp		= &ixgbe_tn_check_overtemp, \
 	.get_firmware_version	= &ixgbe_get_phy_firmware_version_generic,
 
-static struct ixgbe_phy_operations phy_ops_X550 = {
+static const struct ixgbe_phy_operations phy_ops_X550 = {
 	X550_COMMON_PHY
 	.init			= NULL,
 	.identify		= &ixgbe_identify_phy_generic,
+	.read_reg		= &ixgbe_read_phy_reg_generic,
+	.write_reg		= &ixgbe_write_phy_reg_generic,
 };
 
-static struct ixgbe_phy_operations phy_ops_X550EM_x = {
+static const struct ixgbe_phy_operations phy_ops_X550EM_x = {
 	X550_COMMON_PHY
 	.init			= &ixgbe_init_phy_ops_X550em,
 	.identify		= &ixgbe_identify_phy_x550em,
+	.read_reg		= &ixgbe_read_phy_reg_generic,
+	.write_reg		= &ixgbe_write_phy_reg_generic,
 	.read_i2c_combined	= &ixgbe_read_i2c_combined_generic,
 	.write_i2c_combined	= &ixgbe_write_i2c_combined_generic,
 	.read_i2c_combined_unlocked = &ixgbe_read_i2c_combined_generic_unlocked,
@@ -2422,6 +2991,14 @@
 				     &ixgbe_write_i2c_combined_generic_unlocked,
 };
 
+static const struct ixgbe_phy_operations phy_ops_x550em_a = {
+	X550_COMMON_PHY
+	.init			= &ixgbe_init_phy_ops_X550em,
+	.identify		= &ixgbe_identify_phy_x550em,
+	.read_reg		= &ixgbe_read_phy_reg_x550a,
+	.write_reg		= &ixgbe_write_phy_reg_x550a,
+};
+
 static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = {
 	IXGBE_MVALS_INIT(X550)
 };
@@ -2430,7 +3007,11 @@
 	IXGBE_MVALS_INIT(X550EM_x)
 };
 
-struct ixgbe_info ixgbe_X550_info = {
+static const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = {
+	IXGBE_MVALS_INIT(X550EM_a)
+};
+
+const struct ixgbe_info ixgbe_X550_info = {
 	.mac			= ixgbe_mac_X550,
 	.get_invariants		= &ixgbe_get_invariants_X540,
 	.mac_ops		= &mac_ops_X550,
@@ -2440,7 +3021,7 @@
 	.mvals			= ixgbe_mvals_X550,
 };
 
-struct ixgbe_info ixgbe_X550EM_x_info = {
+const struct ixgbe_info ixgbe_X550EM_x_info = {
 	.mac			= ixgbe_mac_X550EM_x,
 	.get_invariants		= &ixgbe_get_invariants_X550_x,
 	.mac_ops		= &mac_ops_X550EM_x,
@@ -2449,3 +3030,13 @@
 	.mbx_ops		= &mbx_ops_generic,
 	.mvals			= ixgbe_mvals_X550EM_x,
 };
+
+const struct ixgbe_info ixgbe_x550em_a_info = {
+	.mac			= ixgbe_mac_x550em_a,
+	.get_invariants		= &ixgbe_get_invariants_X550_x,
+	.mac_ops		= &mac_ops_x550em_a,
+	.eeprom_ops		= &eeprom_ops_X550EM_x,
+	.phy_ops		= &phy_ops_x550em_a,
+	.mbx_ops		= &mbx_ops_generic,
+	.mvals			= ixgbe_mvals_x550em_a,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 5843458..ae09d60 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -33,6 +33,11 @@
 #define IXGBE_DEV_ID_X550_VF		0x1565
 #define IXGBE_DEV_ID_X550EM_X_VF	0x15A8
 
+#define IXGBE_DEV_ID_82599_VF_HV	0x152E
+#define IXGBE_DEV_ID_X540_VF_HV		0x1530
+#define IXGBE_DEV_ID_X550_VF_HV		0x1564
+#define IXGBE_DEV_ID_X550EM_X_VF_HV	0x15A9
+
 #define IXGBE_VF_IRQ_CLEAR_MASK		7
 #define IXGBE_VF_MAX_TX_QUEUES		8
 #define IXGBE_VF_MAX_RX_QUEUES		8
@@ -74,7 +79,7 @@
 #define IXGBE_RXDCTL_RLPML_EN	0x00008000
 
 /* DCA Control */
-#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */
 
 /* PSRTYPE bit definitions */
 #define IXGBE_PSRTYPE_TCPHDR	0x00000010
@@ -296,16 +301,16 @@
 #define IXGBE_TXDCTL_SWFLSH		0x04000000 /* Tx Desc. wr-bk flushing */
 #define IXGBE_TXDCTL_WTHRESH_SHIFT	16	   /* shift to WTHRESH bits */
 
-#define IXGBE_DCA_RXCTRL_DESC_DCA_EN	(1 << 5)  /* Rx Desc enable */
-#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN	(1 << 6)  /* Rx Desc header ena */
-#define IXGBE_DCA_RXCTRL_DATA_DCA_EN	(1 << 7)  /* Rx Desc payload ena */
-#define IXGBE_DCA_RXCTRL_DESC_RRO_EN	(1 << 9)  /* Rx rd Desc Relax Order */
-#define IXGBE_DCA_RXCTRL_DATA_WRO_EN	(1 << 13) /* Rx wr data Relax Order */
-#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN	(1 << 15) /* Rx wr header RO */
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN	BIT(5)  /* Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN	BIT(6)  /* Rx Desc header ena */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN	BIT(7)  /* Rx Desc payload ena */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN	BIT(9)  /* Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DATA_WRO_EN	BIT(13) /* Rx wr data Relax Order */
+#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN	BIT(15) /* Rx wr header RO */
 
-#define IXGBE_DCA_TXCTRL_DESC_DCA_EN	(1 << 5)  /* DCA Tx Desc enable */
-#define IXGBE_DCA_TXCTRL_DESC_RRO_EN	(1 << 9)  /* Tx rd Desc Relax Order */
-#define IXGBE_DCA_TXCTRL_DESC_WRO_EN	(1 << 11) /* Tx Desc writeback RO bit */
-#define IXGBE_DCA_TXCTRL_DATA_RRO_EN	(1 << 13) /* Tx rd data Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN	BIT(5)  /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_DESC_RRO_EN	BIT(9)  /* Tx rd Desc Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_WRO_EN	BIT(11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DATA_RRO_EN	BIT(13) /* Tx rd data Relax Order */
 
 #endif /* _IXGBEVF_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index c48aef6..508e72c 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -42,65 +42,54 @@
 
 #define IXGBE_ALL_RAR_ENTRIES 16
 
+enum {NETDEV_STATS, IXGBEVF_STATS};
+
 struct ixgbe_stats {
 	char stat_string[ETH_GSTRING_LEN];
-	struct {
-		int sizeof_stat;
-		int stat_offset;
-		int base_stat_offset;
-		int saved_reset_offset;
-	};
+	int type;
+	int sizeof_stat;
+	int stat_offset;
 };
 
-#define IXGBEVF_STAT(m, b, r) { \
-	.sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
-	.stat_offset = offsetof(struct ixgbevf_adapter, m), \
-	.base_stat_offset = offsetof(struct ixgbevf_adapter, b), \
-	.saved_reset_offset = offsetof(struct ixgbevf_adapter, r) \
+#define IXGBEVF_STAT(_name, _stat) { \
+	.stat_string = _name, \
+	.type = IXGBEVF_STATS, \
+	.sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, _stat), \
+	.stat_offset = offsetof(struct ixgbevf_adapter, _stat) \
 }
 
-#define IXGBEVF_ZSTAT(m) { \
-	.sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
-	.stat_offset = offsetof(struct ixgbevf_adapter, m), \
-	.base_stat_offset = -1, \
-	.saved_reset_offset = -1 \
+#define IXGBEVF_NETDEV_STAT(_net_stat) { \
+	.stat_string = #_net_stat, \
+	.type = NETDEV_STATS, \
+	.sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
+	.stat_offset = offsetof(struct net_device_stats, _net_stat) \
 }
 
-static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
-	{"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
-				    stats.saved_reset_vfgprc)},
-	{"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc,
-				    stats.saved_reset_vfgptc)},
-	{"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc,
-				  stats.saved_reset_vfgorc)},
-	{"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
-				  stats.saved_reset_vfgotc)},
-	{"tx_busy", IXGBEVF_ZSTAT(tx_busy)},
-	{"tx_restart_queue", IXGBEVF_ZSTAT(restart_queue)},
-	{"tx_timeout_count", IXGBEVF_ZSTAT(tx_timeout_count)},
-	{"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
-				   stats.saved_reset_vfmprc)},
-	{"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)},
-#ifdef BP_EXTENDED_STATS
-	{"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)},
-	{"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)},
-	{"rx_bp_misses", IXGBEVF_ZSTAT(bp_rx_missed)},
-	{"tx_bp_napi_yield", IXGBEVF_ZSTAT(bp_tx_yields)},
-	{"tx_bp_cleaned", IXGBEVF_ZSTAT(bp_tx_cleaned)},
-	{"tx_bp_misses", IXGBEVF_ZSTAT(bp_tx_missed)},
-#endif
+static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
+	IXGBEVF_NETDEV_STAT(rx_packets),
+	IXGBEVF_NETDEV_STAT(tx_packets),
+	IXGBEVF_NETDEV_STAT(rx_bytes),
+	IXGBEVF_NETDEV_STAT(tx_bytes),
+	IXGBEVF_STAT("tx_busy", tx_busy),
+	IXGBEVF_STAT("tx_restart_queue", restart_queue),
+	IXGBEVF_STAT("tx_timeout_count", tx_timeout_count),
+	IXGBEVF_NETDEV_STAT(multicast),
+	IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error),
 };
 
-#define IXGBE_QUEUE_STATS_LEN 0
-#define IXGBE_GLOBAL_STATS_LEN	ARRAY_SIZE(ixgbe_gstrings_stats)
+#define IXGBEVF_QUEUE_STATS_LEN ( \
+	(((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \
+	 ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \
+	 (sizeof(struct ixgbe_stats) / sizeof(u64)))
+#define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats)
 
-#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
+#define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN)
 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
 	"Register test  (offline)",
 	"Link test   (on/offline)"
 };
 
-#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
+#define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
 
 static int ixgbevf_get_settings(struct net_device *netdev,
 				struct ethtool_cmd *ecmd)
@@ -177,7 +166,8 @@
 
 	memset(p, 0, regs_len);
 
-	regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
+	/* generate a number suitable for ethtool's register version */
+	regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
 
 	/* General Registers */
 	regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
@@ -392,13 +382,13 @@
 	return err;
 }
 
-static int ixgbevf_get_sset_count(struct net_device *dev, int stringset)
+static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset)
 {
 	switch (stringset) {
 	case ETH_SS_TEST:
-		return IXGBE_TEST_LEN;
+		return IXGBEVF_TEST_LEN;
 	case ETH_SS_STATS:
-		return IXGBE_GLOBAL_STATS_LEN;
+		return IXGBEVF_STATS_LEN;
 	default:
 		return -EINVAL;
 	}
@@ -408,70 +398,138 @@
 				      struct ethtool_stats *stats, u64 *data)
 {
 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-	char *base = (char *)adapter;
-	int i;
-#ifdef BP_EXTENDED_STATS
-	u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0,
-	    tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
-
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		rx_yields += adapter->rx_ring[i]->stats.yields;
-		rx_cleaned += adapter->rx_ring[i]->stats.cleaned;
-		rx_yields += adapter->rx_ring[i]->stats.yields;
-	}
-
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		tx_yields += adapter->tx_ring[i]->stats.yields;
-		tx_cleaned += adapter->tx_ring[i]->stats.cleaned;
-		tx_yields += adapter->tx_ring[i]->stats.yields;
-	}
-
-	adapter->bp_rx_yields = rx_yields;
-	adapter->bp_rx_cleaned = rx_cleaned;
-	adapter->bp_rx_missed = rx_missed;
-
-	adapter->bp_tx_yields = tx_yields;
-	adapter->bp_tx_cleaned = tx_cleaned;
-	adapter->bp_tx_missed = tx_missed;
-#endif
+	struct rtnl_link_stats64 temp;
+	const struct rtnl_link_stats64 *net_stats;
+	unsigned int start;
+	struct ixgbevf_ring *ring;
+	int i, j;
+	char *p;
 
 	ixgbevf_update_stats(adapter);
-	for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
-		char *p = base + ixgbe_gstrings_stats[i].stat_offset;
-		char *b = base + ixgbe_gstrings_stats[i].base_stat_offset;
-		char *r = base + ixgbe_gstrings_stats[i].saved_reset_offset;
-
-		if (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) {
-			if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
-				data[i] = *(u64 *)p - *(u64 *)b + *(u64 *)r;
-			else
-				data[i] = *(u64 *)p;
-		} else {
-			if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
-				data[i] = *(u32 *)p - *(u32 *)b + *(u32 *)r;
-			else
-				data[i] = *(u32 *)p;
+	net_stats = dev_get_stats(netdev, &temp);
+	for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
+		switch (ixgbevf_gstrings_stats[i].type) {
+		case NETDEV_STATS:
+			p = (char *)net_stats +
+					ixgbevf_gstrings_stats[i].stat_offset;
+			break;
+		case IXGBEVF_STATS:
+			p = (char *)adapter +
+					ixgbevf_gstrings_stats[i].stat_offset;
+			break;
+		default:
+			data[i] = 0;
+			continue;
 		}
+
+		data[i] = (ixgbevf_gstrings_stats[i].sizeof_stat ==
+			   sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+	}
+
+	/* populate Tx queue data */
+	for (j = 0; j < adapter->num_tx_queues; j++) {
+		ring = adapter->tx_ring[j];
+		if (!ring) {
+			data[i++] = 0;
+			data[i++] = 0;
+#ifdef BP_EXTENDED_STATS
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+#endif
+			continue;
+		}
+
+		do {
+			start = u64_stats_fetch_begin_irq(&ring->syncp);
+			data[i]   = ring->stats.packets;
+			data[i + 1] = ring->stats.bytes;
+		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+		i += 2;
+#ifdef BP_EXTENDED_STATS
+		data[i] = ring->stats.yields;
+		data[i + 1] = ring->stats.misses;
+		data[i + 2] = ring->stats.cleaned;
+		i += 3;
+#endif
+	}
+
+	/* populate Rx queue data */
+	for (j = 0; j < adapter->num_rx_queues; j++) {
+		ring = adapter->rx_ring[j];
+		if (!ring) {
+			data[i++] = 0;
+			data[i++] = 0;
+#ifdef BP_EXTENDED_STATS
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+#endif
+			continue;
+		}
+
+		do {
+			start = u64_stats_fetch_begin_irq(&ring->syncp);
+			data[i]   = ring->stats.packets;
+			data[i + 1] = ring->stats.bytes;
+		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+		i += 2;
+#ifdef BP_EXTENDED_STATS
+		data[i] = ring->stats.yields;
+		data[i + 1] = ring->stats.misses;
+		data[i + 2] = ring->stats.cleaned;
+		i += 3;
+#endif
 	}
 }
 
 static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
 				u8 *data)
 {
+	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 	char *p = (char *)data;
 	int i;
 
 	switch (stringset) {
 	case ETH_SS_TEST:
 		memcpy(data, *ixgbe_gstrings_test,
-		       IXGBE_TEST_LEN * ETH_GSTRING_LEN);
+		       IXGBEVF_TEST_LEN * ETH_GSTRING_LEN);
 		break;
 	case ETH_SS_STATS:
-		for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
-			memcpy(p, ixgbe_gstrings_stats[i].stat_string,
+		for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
+			memcpy(p, ixgbevf_gstrings_stats[i].stat_string,
 			       ETH_GSTRING_LEN);
 			p += ETH_GSTRING_LEN;
 		}
+
+		for (i = 0; i < adapter->num_tx_queues; i++) {
+			sprintf(p, "tx_queue_%u_packets", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "tx_queue_%u_bytes", i);
+			p += ETH_GSTRING_LEN;
+#ifdef BP_EXTENDED_STATS
+			sprintf(p, "tx_queue_%u_bp_napi_yield", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "tx_queue_%u_bp_misses", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "tx_queue_%u_bp_cleaned", i);
+			p += ETH_GSTRING_LEN;
+#endif /* BP_EXTENDED_STATS */
+		}
+		for (i = 0; i < adapter->num_rx_queues; i++) {
+			sprintf(p, "rx_queue_%u_packets", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "rx_queue_%u_bytes", i);
+			p += ETH_GSTRING_LEN;
+#ifdef BP_EXTENDED_STATS
+			sprintf(p, "rx_queue_%u_bp_poll_yield", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "rx_queue_%u_bp_misses", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "rx_queue_%u_bp_cleaned", i);
+			p += ETH_GSTRING_LEN;
+#endif /* BP_EXTENDED_STATS */
+		}
 		break;
 	}
 }
@@ -680,7 +738,7 @@
 
 		if (if_running)
 			/* indicate we're in test mode */
-			dev_close(netdev);
+			ixgbevf_close(netdev);
 		else
 			ixgbevf_reset(adapter);
 
@@ -692,7 +750,7 @@
 
 		clear_bit(__IXGBEVF_TESTING, &adapter->state);
 		if (if_running)
-			dev_open(netdev);
+			ixgbevf_open(netdev);
 	} else {
 		hw_dbg(&adapter->hw, "online testing starting\n");
 		/* Online tests */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 68ec7daa..d5944c3 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -166,10 +166,10 @@
 
 #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
 
-#define IXGBE_TX_FLAGS_CSUM		(u32)(1)
-#define IXGBE_TX_FLAGS_VLAN		(u32)(1 << 1)
-#define IXGBE_TX_FLAGS_TSO		(u32)(1 << 2)
-#define IXGBE_TX_FLAGS_IPV4		(u32)(1 << 3)
+#define IXGBE_TX_FLAGS_CSUM		BIT(0)
+#define IXGBE_TX_FLAGS_VLAN		BIT(1)
+#define IXGBE_TX_FLAGS_TSO		BIT(2)
+#define IXGBE_TX_FLAGS_IPV4		BIT(3)
 #define IXGBE_TX_FLAGS_VLAN_MASK	0xffff0000
 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK	0x0000e000
 #define IXGBE_TX_FLAGS_VLAN_SHIFT	16
@@ -403,13 +403,6 @@
 	u32 alloc_rx_page_failed;
 	u32 alloc_rx_buff_failed;
 
-	/* Some features need tri-state capability,
-	 * thus the additional *_CAPABLE flags.
-	 */
-	u32 flags;
-#define IXGBEVF_FLAG_RESET_REQUESTED		(u32)(1)
-#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED	(u32)(1 << 2)
-
 	struct msix_entry *msix_entries;
 
 	/* OS defined structs */
@@ -429,16 +422,6 @@
 	unsigned int tx_ring_count;
 	unsigned int rx_ring_count;
 
-#ifdef BP_EXTENDED_STATS
-	u64 bp_rx_yields;
-	u64 bp_rx_cleaned;
-	u64 bp_rx_missed;
-
-	u64 bp_tx_yields;
-	u64 bp_tx_cleaned;
-	u64 bp_tx_missed;
-#endif
-
 	u8 __iomem *io_addr; /* Mainly for iounmap use */
 	u32 link_speed;
 	bool link_up;
@@ -461,13 +444,19 @@
 	__IXGBEVF_REMOVING,
 	__IXGBEVF_SERVICE_SCHED,
 	__IXGBEVF_SERVICE_INITED,
+	__IXGBEVF_RESET_REQUESTED,
+	__IXGBEVF_QUEUE_RESET_REQUESTED,
 };
 
 enum ixgbevf_boards {
 	board_82599_vf,
+	board_82599_vf_hv,
 	board_X540_vf,
+	board_X540_vf_hv,
 	board_X550_vf,
+	board_X550_vf_hv,
 	board_X550EM_x_vf,
+	board_X550EM_x_vf_hv,
 };
 
 enum ixgbevf_xcast_modes {
@@ -482,10 +471,18 @@
 extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
 extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
 
+extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info;
+extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info;
+extern const struct ixgbevf_info ixgbevf_X550_vf_hv_info;
+extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info;
+extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops;
+
 /* needed by ethtool.c */
 extern const char ixgbevf_driver_name[];
 extern const char ixgbevf_driver_version[];
 
+int ixgbevf_open(struct net_device *netdev);
+int ixgbevf_close(struct net_device *netdev);
 void ixgbevf_up(struct ixgbevf_adapter *adapter);
 void ixgbevf_down(struct ixgbevf_adapter *adapter);
 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 0ea14c0..5e348b1 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -62,10 +62,14 @@
 	"Copyright (c) 2009 - 2015 Intel Corporation.";
 
 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
-	[board_82599_vf] = &ixgbevf_82599_vf_info,
-	[board_X540_vf]  = &ixgbevf_X540_vf_info,
-	[board_X550_vf]  = &ixgbevf_X550_vf_info,
-	[board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
+	[board_82599_vf]	= &ixgbevf_82599_vf_info,
+	[board_82599_vf_hv]	= &ixgbevf_82599_vf_hv_info,
+	[board_X540_vf]		= &ixgbevf_X540_vf_info,
+	[board_X540_vf_hv]	= &ixgbevf_X540_vf_hv_info,
+	[board_X550_vf]		= &ixgbevf_X550_vf_info,
+	[board_X550_vf_hv]	= &ixgbevf_X550_vf_hv_info,
+	[board_X550EM_x_vf]	= &ixgbevf_X550EM_x_vf_info,
+	[board_X550EM_x_vf_hv]	= &ixgbevf_X550EM_x_vf_hv_info,
 };
 
 /* ixgbevf_pci_tbl - PCI Device ID Table
@@ -78,9 +82,13 @@
  */
 static const struct pci_device_id ixgbevf_pci_tbl[] = {
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv },
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv },
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv },
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
 	/* required last entry */
 	{0, }
 };
@@ -268,7 +276,7 @@
 {
 	/* Do the reset outside of interrupt context */
 	if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
-		adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
+		set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
 		ixgbevf_service_event_schedule(adapter);
 	}
 }
@@ -288,9 +296,10 @@
  * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
  * @q_vector: board private structure
  * @tx_ring: tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
  **/
 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
-				 struct ixgbevf_ring *tx_ring)
+				 struct ixgbevf_ring *tx_ring, int napi_budget)
 {
 	struct ixgbevf_adapter *adapter = q_vector->adapter;
 	struct ixgbevf_tx_buffer *tx_buffer;
@@ -328,7 +337,7 @@
 		total_packets += tx_buffer->gso_segs;
 
 		/* free the skb */
-		dev_kfree_skb_any(tx_buffer->skb);
+		napi_consume_skb(tx_buffer->skb, napi_budget);
 
 		/* unmap skb header data */
 		dma_unmap_single(tx_ring->dev,
@@ -1013,8 +1022,10 @@
 	int per_ring_budget, work_done = 0;
 	bool clean_complete = true;
 
-	ixgbevf_for_each_ring(ring, q_vector->tx)
-		clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
+	ixgbevf_for_each_ring(ring, q_vector->tx) {
+		if (!ixgbevf_clean_tx_irq(q_vector, ring, budget))
+			clean_complete = false;
+	}
 
 	if (budget <= 0)
 		return budget;
@@ -1035,7 +1046,8 @@
 		int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
 						   per_ring_budget);
 		work_done += cleaned;
-		clean_complete &= (cleaned < per_ring_budget);
+		if (cleaned >= per_ring_budget)
+			clean_complete = false;
 	}
 
 #ifdef CONFIG_NET_RX_BUSY_POLL
@@ -1052,7 +1064,7 @@
 	if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
 	    !test_bit(__IXGBEVF_REMOVING, &adapter->state))
 		ixgbevf_irq_enable_queues(adapter,
-					  1 << q_vector->v_idx);
+					  BIT(q_vector->v_idx));
 
 	return 0;
 }
@@ -1154,14 +1166,14 @@
 		}
 
 		/* add q_vector eims value to global eims_enable_mask */
-		adapter->eims_enable_mask |= 1 << v_idx;
+		adapter->eims_enable_mask |= BIT(v_idx);
 
 		ixgbevf_write_eitr(q_vector);
 	}
 
 	ixgbevf_set_ivar(adapter, -1, 1, v_idx);
 	/* setup eims_other and add value to global eims_enable_mask */
-	adapter->eims_other = 1 << v_idx;
+	adapter->eims_other = BIT(v_idx);
 	adapter->eims_enable_mask |= adapter->eims_other;
 }
 
@@ -1585,8 +1597,8 @@
 	txdctl |= (8 << 16);    /* WTHRESH = 8 */
 
 	/* Setting PTHRESH to 32 both improves performance */
-	txdctl |= (1 << 8) |    /* HTHRESH = 1 */
-		  32;          /* PTHRESH = 32 */
+	txdctl |= (1u << 8) |    /* HTHRESH = 1 */
+		   32;           /* PTHRESH = 32 */
 
 	clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
 
@@ -1642,7 +1654,7 @@
 		      IXGBE_PSRTYPE_L2HDR;
 
 	if (adapter->num_rx_queues > 1)
-		psrtype |= 1 << 29;
+		psrtype |= BIT(29);
 
 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
 }
@@ -1748,9 +1760,15 @@
 	IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
 			ring->count * sizeof(union ixgbe_adv_rx_desc));
 
+#ifndef CONFIG_SPARC
 	/* enable relaxed ordering */
 	IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
 			IXGBE_DCA_RXCTRL_DESC_RRO_EN);
+#else
+	IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
+			IXGBE_DCA_RXCTRL_DESC_RRO_EN |
+			IXGBE_DCA_RXCTRL_DATA_WRO_EN);
+#endif
 
 	/* reset head and tail pointers */
 	IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
@@ -1791,7 +1809,7 @@
 		ixgbevf_setup_vfmrqc(adapter);
 
 	/* notify the PF of our intent to use this size of frame */
-	ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
+	hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
 
 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
 	 * the Base and Length of the Rx Descriptor Ring
@@ -1904,7 +1922,7 @@
 
 	spin_lock_bh(&adapter->mbx_lock);
 
-	hw->mac.ops.update_xcast_mode(hw, netdev, xcast_mode);
+	hw->mac.ops.update_xcast_mode(hw, xcast_mode);
 
 	/* reprogram multicast list */
 	hw->mac.ops.update_mc_addr_list(hw, netdev);
@@ -1984,7 +2002,7 @@
 		hw->mbx.timeout = 0;
 
 		/* wait for watchdog to come around and bail us out */
-		adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
+		set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state);
 	}
 
 	return 0;
@@ -2052,7 +2070,7 @@
 	spin_lock_bh(&adapter->mbx_lock);
 
 	while (api[idx] != ixgbe_mbox_api_unknown) {
-		err = ixgbevf_negotiate_api_version(hw, api[idx]);
+		err = hw->mac.ops.negotiate_api_version(hw, api[idx]);
 		if (!err)
 			break;
 		idx++;
@@ -2749,11 +2767,9 @@
 
 static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
 {
-	if (!(adapter->flags & IXGBEVF_FLAG_RESET_REQUESTED))
+	if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state))
 		return;
 
-	adapter->flags &= ~IXGBEVF_FLAG_RESET_REQUESTED;
-
 	/* If we're already down or resetting, just bail */
 	if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
 	    test_bit(__IXGBEVF_RESETTING, &adapter->state))
@@ -2795,7 +2811,7 @@
 		struct ixgbevf_q_vector *qv = adapter->q_vector[i];
 
 		if (qv->rx.ring || qv->tx.ring)
-			eics |= 1 << i;
+			eics |= BIT(i);
 	}
 
 	/* Cause software interrupt to ensure rings are cleaned */
@@ -2821,7 +2837,7 @@
 
 	/* if check for link returns error we will need to reset */
 	if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
-		adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
+		set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
 		link_up = false;
 	}
 
@@ -3122,7 +3138,7 @@
  * handler is registered with the OS, the watchdog timer is started,
  * and the stack is notified that the interface is ready.
  **/
-static int ixgbevf_open(struct net_device *netdev)
+int ixgbevf_open(struct net_device *netdev)
 {
 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
@@ -3205,7 +3221,7 @@
  * needs to be disabled.  A global MAC reset is issued to stop the
  * hardware, and all transmit and receive resources are freed.
  **/
-static int ixgbevf_close(struct net_device *netdev)
+int ixgbevf_close(struct net_device *netdev)
 {
 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 
@@ -3222,11 +3238,10 @@
 {
 	struct net_device *dev = adapter->netdev;
 
-	if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
+	if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED,
+				&adapter->state))
 		return;
 
-	adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
-
 	/* if interface is down do nothing */
 	if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
 	    test_bit(__IXGBEVF_RESETTING, &adapter->state))
@@ -3271,9 +3286,18 @@
 		       struct ixgbevf_tx_buffer *first,
 		       u8 *hdr_len)
 {
+	u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
 	struct sk_buff *skb = first->skb;
-	u32 vlan_macip_lens, type_tucmd;
-	u32 mss_l4len_idx, l4len;
+	union {
+		struct iphdr *v4;
+		struct ipv6hdr *v6;
+		unsigned char *hdr;
+	} ip;
+	union {
+		struct tcphdr *tcp;
+		unsigned char *hdr;
+	} l4;
+	u32 paylen, l4_offset;
 	int err;
 
 	if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -3286,49 +3310,53 @@
 	if (err < 0)
 		return err;
 
+	ip.hdr = skb_network_header(skb);
+	l4.hdr = skb_checksum_start(skb);
+
 	/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
 	type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
 
-	if (first->protocol == htons(ETH_P_IP)) {
-		struct iphdr *iph = ip_hdr(skb);
-
-		iph->tot_len = 0;
-		iph->check = 0;
-		tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
-							 iph->daddr, 0,
-							 IPPROTO_TCP,
-							 0);
+	/* initialize outer IP header fields */
+	if (ip.v4->version == 4) {
+		/* IP header will have to cancel out any data that
+		 * is not a part of the outer IP header
+		 */
+		ip.v4->check = csum_fold(csum_add(lco_csum(skb),
+						  csum_unfold(l4.tcp->check)));
 		type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+
+		ip.v4->tot_len = 0;
 		first->tx_flags |= IXGBE_TX_FLAGS_TSO |
 				   IXGBE_TX_FLAGS_CSUM |
 				   IXGBE_TX_FLAGS_IPV4;
-	} else if (skb_is_gso_v6(skb)) {
-		ipv6_hdr(skb)->payload_len = 0;
-		tcp_hdr(skb)->check =
-		    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-				     &ipv6_hdr(skb)->daddr,
-				     0, IPPROTO_TCP, 0);
+	} else {
+		ip.v6->payload_len = 0;
 		first->tx_flags |= IXGBE_TX_FLAGS_TSO |
 				   IXGBE_TX_FLAGS_CSUM;
 	}
 
-	/* compute header lengths */
-	l4len = tcp_hdrlen(skb);
-	*hdr_len += l4len;
-	*hdr_len = skb_transport_offset(skb) + l4len;
+	/* determine offset of inner transport header */
+	l4_offset = l4.hdr - skb->data;
 
-	/* update GSO size and bytecount with header size */
+	/* compute length of segmentation header */
+	*hdr_len = (l4.tcp->doff * 4) + l4_offset;
+
+	/* remove payload length from inner checksum */
+	paylen = skb->len - l4_offset;
+	csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
+
+	/* update gso size and bytecount with header size */
 	first->gso_segs = skb_shinfo(skb)->gso_segs;
 	first->bytecount += (first->gso_segs - 1) * *hdr_len;
 
 	/* mss_l4len_id: use 1 as index for TSO */
-	mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
+	mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
 	mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
-	mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
+	mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
 
 	/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
-	vlan_macip_lens = skb_network_header_len(skb);
-	vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+	vlan_macip_lens = l4.hdr - ip.hdr;
+	vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
 	vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
 
 	ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
@@ -3337,76 +3365,55 @@
 	return 1;
 }
 
+static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb)
+{
+	unsigned int offset = 0;
+
+	ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
+
+	return offset == skb_checksum_start_offset(skb);
+}
+
 static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
 			    struct ixgbevf_tx_buffer *first)
 {
 	struct sk_buff *skb = first->skb;
 	u32 vlan_macip_lens = 0;
-	u32 mss_l4len_idx = 0;
 	u32 type_tucmd = 0;
 
-	if (skb->ip_summed == CHECKSUM_PARTIAL) {
-		u8 l4_hdr = 0;
-		__be16 frag_off;
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		goto no_csum;
 
-		switch (first->protocol) {
-		case htons(ETH_P_IP):
-			vlan_macip_lens |= skb_network_header_len(skb);
-			type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
-			l4_hdr = ip_hdr(skb)->protocol;
-			break;
-		case htons(ETH_P_IPV6):
-			vlan_macip_lens |= skb_network_header_len(skb);
-			l4_hdr = ipv6_hdr(skb)->nexthdr;
-			if (likely(skb_network_header_len(skb) ==
-				   sizeof(struct ipv6hdr)))
-				break;
-			ipv6_skip_exthdr(skb, skb_network_offset(skb) +
-					      sizeof(struct ipv6hdr),
-					 &l4_hdr, &frag_off);
-			if (unlikely(frag_off))
-				l4_hdr = NEXTHDR_FRAGMENT;
-			break;
-		default:
+	switch (skb->csum_offset) {
+	case offsetof(struct tcphdr, check):
+		type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+		/* fall through */
+	case offsetof(struct udphdr, check):
+		break;
+	case offsetof(struct sctphdr, checksum):
+		/* validate that this is actually an SCTP request */
+		if (((first->protocol == htons(ETH_P_IP)) &&
+		     (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
+		    ((first->protocol == htons(ETH_P_IPV6)) &&
+		     ixgbevf_ipv6_csum_is_sctp(skb))) {
+			type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
 			break;
 		}
-
-		switch (l4_hdr) {
-		case IPPROTO_TCP:
-			type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
-			mss_l4len_idx = tcp_hdrlen(skb) <<
-					IXGBE_ADVTXD_L4LEN_SHIFT;
-			break;
-		case IPPROTO_SCTP:
-			type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
-			mss_l4len_idx = sizeof(struct sctphdr) <<
-					IXGBE_ADVTXD_L4LEN_SHIFT;
-			break;
-		case IPPROTO_UDP:
-			mss_l4len_idx = sizeof(struct udphdr) <<
-					IXGBE_ADVTXD_L4LEN_SHIFT;
-			break;
-		default:
-			if (unlikely(net_ratelimit())) {
-				dev_warn(tx_ring->dev,
-					 "partial checksum, l3 proto=%x, l4 proto=%x\n",
-					 first->protocol, l4_hdr);
-			}
-			skb_checksum_help(skb);
-			goto no_csum;
-		}
-
-		/* update TX checksum flag */
-		first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
+		/* fall through */
+	default:
+		skb_checksum_help(skb);
+		goto no_csum;
 	}
-
+	/* update TX checksum flag */
+	first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
+	vlan_macip_lens = skb_checksum_start_offset(skb) -
+			  skb_network_offset(skb);
 no_csum:
 	/* vlan_macip_lens: MACLEN, VLAN tag */
 	vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
 	vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
 
-	ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
-			    type_tucmd, mss_l4len_idx);
+	ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
 }
 
 static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
@@ -3442,7 +3449,7 @@
 
 	/* use index 1 context for TSO/FSO/FCOE */
 	if (tx_flags & IXGBE_TX_FLAGS_TSO)
-		olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
+		olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
 
 	/* Check Context must be set if Tx switch is enabled, which it
 	 * always is for case where virtual functions are running
@@ -3692,19 +3699,23 @@
 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
 	struct sockaddr *addr = p;
+	int err;
 
 	if (!is_valid_ether_addr(addr->sa_data))
 		return -EADDRNOTAVAIL;
 
-	ether_addr_copy(netdev->dev_addr, addr->sa_data);
-	ether_addr_copy(hw->mac.addr, addr->sa_data);
-
 	spin_lock_bh(&adapter->mbx_lock);
 
-	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+	err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0);
 
 	spin_unlock_bh(&adapter->mbx_lock);
 
+	if (err)
+		return -EPERM;
+
+	ether_addr_copy(hw->mac.addr, addr->sa_data);
+	ether_addr_copy(netdev->dev_addr, addr->sa_data);
+
 	return 0;
 }
 
@@ -3743,7 +3754,7 @@
 	netdev->mtu = new_mtu;
 
 	/* notify the PF of our intent to use this size of frame */
-	ixgbevf_rlpml_set_vf(hw, max_frame);
+	hw->mac.ops.set_rlpml(hw, max_frame);
 
 	return 0;
 }
@@ -3886,6 +3897,40 @@
 	return stats;
 }
 
+#define IXGBEVF_MAX_MAC_HDR_LEN		127
+#define IXGBEVF_MAX_NETWORK_HDR_LEN	511
+
+static netdev_features_t
+ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
+		       netdev_features_t features)
+{
+	unsigned int network_hdr_len, mac_hdr_len;
+
+	/* Make certain the headers can be described by a context descriptor */
+	mac_hdr_len = skb_network_header(skb) - skb->data;
+	if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
+		return features & ~(NETIF_F_HW_CSUM |
+				    NETIF_F_SCTP_CRC |
+				    NETIF_F_HW_VLAN_CTAG_TX |
+				    NETIF_F_TSO |
+				    NETIF_F_TSO6);
+
+	network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+	if (unlikely(network_hdr_len >  IXGBEVF_MAX_NETWORK_HDR_LEN))
+		return features & ~(NETIF_F_HW_CSUM |
+				    NETIF_F_SCTP_CRC |
+				    NETIF_F_TSO |
+				    NETIF_F_TSO6);
+
+	/* We can only support IPV4 TSO in tunnels if we can mangle the
+	 * inner IP ID field, so strip TSO if MANGLEID is not supported.
+	 */
+	if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+		features &= ~NETIF_F_TSO;
+
+	return features;
+}
+
 static const struct net_device_ops ixgbevf_netdev_ops = {
 	.ndo_open		= ixgbevf_open,
 	.ndo_stop		= ixgbevf_close,
@@ -3904,7 +3949,7 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= ixgbevf_netpoll,
 #endif
-	.ndo_features_check	= passthru_features_check,
+	.ndo_features_check	= ixgbevf_features_check,
 };
 
 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
@@ -4009,26 +4054,37 @@
 	}
 
 	netdev->hw_features = NETIF_F_SG |
-			      NETIF_F_IP_CSUM |
-			      NETIF_F_IPV6_CSUM |
 			      NETIF_F_TSO |
 			      NETIF_F_TSO6 |
-			      NETIF_F_RXCSUM;
+			      NETIF_F_RXCSUM |
+			      NETIF_F_HW_CSUM |
+			      NETIF_F_SCTP_CRC;
 
-	netdev->features = netdev->hw_features |
-			   NETIF_F_HW_VLAN_CTAG_TX |
-			   NETIF_F_HW_VLAN_CTAG_RX |
-			   NETIF_F_HW_VLAN_CTAG_FILTER;
+#define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+				      NETIF_F_GSO_GRE_CSUM | \
+				      NETIF_F_GSO_IPIP | \
+				      NETIF_F_GSO_SIT | \
+				      NETIF_F_GSO_UDP_TUNNEL | \
+				      NETIF_F_GSO_UDP_TUNNEL_CSUM)
 
-	netdev->vlan_features |= NETIF_F_TSO |
-				 NETIF_F_TSO6 |
-				 NETIF_F_IP_CSUM |
-				 NETIF_F_IPV6_CSUM |
-				 NETIF_F_SG;
+	netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES;
+	netdev->hw_features |= NETIF_F_GSO_PARTIAL |
+			       IXGBEVF_GSO_PARTIAL_FEATURES;
+
+	netdev->features = netdev->hw_features;
 
 	if (pci_using_dac)
 		netdev->features |= NETIF_F_HIGHDMA;
 
+	netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
+	netdev->mpls_features |= NETIF_F_HW_CSUM;
+	netdev->hw_enc_features |= netdev->vlan_features;
+
+	/* set this bit last since it cannot be part of vlan_features */
+	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+			    NETIF_F_HW_VLAN_CTAG_RX |
+			    NETIF_F_HW_VLAN_CTAG_TX;
+
 	netdev->priv_flags |= IFF_UNICAST_FLT;
 
 	if (IXGBE_REMOVED(hw->hw_addr)) {
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index dc68fea..61a80da 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -346,3 +346,14 @@
 	.check_for_rst	= ixgbevf_check_for_rst_vf,
 };
 
+/* Mailbox operations when running on Hyper-V.
+ * On Hyper-V, PF/VF communication is not through the
+ * hardware mailbox; this communication is through
+ * a software mediated path.
+ * Most mail box operations are noop while running on
+ * Hyper-V.
+ */
+const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops = {
+	.init_params	= ixgbevf_init_mbx_params_vf,
+	.check_for_rst	= ixgbevf_check_for_rst_vf,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 61a98f4..e670d3b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -27,6 +27,12 @@
 #include "vf.h"
 #include "ixgbevf.h"
 
+/* On Hyper-V, to reset, we need to read from this offset
+ * from the PCI config space. This is the mechanism used on
+ * Hyper-V to support PF/VF communication.
+ */
+#define IXGBE_HV_RESET_OFFSET           0x201
+
 /**
  *  ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
  *  @hw: pointer to hardware structure
@@ -126,6 +132,27 @@
 }
 
 /**
+ * Hyper-V variant; the VF/PF communication is through the PCI
+ * config space.
+ */
+static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
+{
+#if IS_ENABLED(CONFIG_PCI_MMCONFIG)
+	struct ixgbevf_adapter *adapter = hw->back;
+	int i;
+
+	for (i = 0; i < 6; i++)
+		pci_read_config_byte(adapter->pdev,
+				     (i + IXGBE_HV_RESET_OFFSET),
+				     &hw->mac.perm_addr[i]);
+	return 0;
+#else
+	pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
+	return -EOPNOTSUPP;
+#endif
+}
+
+/**
  *  ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
  *  @hw: pointer to hardware structure
  *
@@ -258,6 +285,11 @@
 	return ret_val;
 }
 
+static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
+{
+	return -EOPNOTSUPP;
+}
+
 /**
  * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
  * @adapter: pointer to the port handle
@@ -408,12 +440,34 @@
 
 	/* if nacked the address was rejected, use "perm_addr" */
 	if (!ret_val &&
-	    (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
+	    (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
 		ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
+		return IXGBE_ERR_MBX;
+	}
 
 	return ret_val;
 }
 
+/**
+ *  ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
+ *  @hw: pointer to hardware structure
+ *  @index: Receive address register to write
+ *  @addr: Address to put into receive address register
+ *  @vmdq: Unused in this implementation
+ *
+ * We don't really allow setting the device MAC address. However,
+ * if the address being set is the permanent MAC address we will
+ * permit that.
+ **/
+static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
+				 u32 vmdq)
+{
+	if (ether_addr_equal(addr, hw->mac.perm_addr))
+		return 0;
+
+	return -EOPNOTSUPP;
+}
+
 static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
 				       u32 *msg, u16 size)
 {
@@ -471,15 +525,22 @@
 }
 
 /**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
+					     struct net_device *netdev)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
  *  ixgbevf_update_xcast_mode - Update Multicast mode
  *  @hw: pointer to the HW structure
- *  @netdev: pointer to net device structure
  *  @xcast_mode: new multicast mode
  *
  *  Updates the Multicast Mode of VF.
  **/
-static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw,
-				     struct net_device *netdev, int xcast_mode)
+static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
 {
 	struct ixgbe_mbx_info *mbx = &hw->mbx;
 	u32 msgbuf[2];
@@ -511,6 +572,14 @@
 }
 
 /**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
  *  ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
  *  @hw: pointer to the HW structure
  *  @vlan: 12 bit VLAN ID
@@ -549,6 +618,15 @@
 }
 
 /**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+				  bool vlan_on)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
  *  ixgbevf_setup_mac_link_vf - Setup MAC link settings
  *  @hw: pointer to hardware structure
  *  @speed: Unused in this implementation
@@ -654,11 +732,72 @@
 }
 
 /**
- *  ixgbevf_rlpml_set_vf - Set the maximum receive packet length
+ * Hyper-V variant; there is no mailbox communication.
+ */
+static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
+					ixgbe_link_speed *speed,
+					bool *link_up,
+					bool autoneg_wait_to_complete)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	struct ixgbe_mac_info *mac = &hw->mac;
+	u32 links_reg;
+
+	/* If we were hit with a reset drop the link */
+	if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
+		mac->get_link_status = true;
+
+	if (!mac->get_link_status)
+		goto out;
+
+	/* if link status is down no point in checking to see if pf is up */
+	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+	if (!(links_reg & IXGBE_LINKS_UP))
+		goto out;
+
+	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+	 * before the link status is correct
+	 */
+	if (mac->type == ixgbe_mac_82599_vf) {
+		int i;
+
+		for (i = 0; i < 5; i++) {
+			udelay(100);
+			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+			if (!(links_reg & IXGBE_LINKS_UP))
+				goto out;
+		}
+	}
+
+	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+	case IXGBE_LINKS_SPEED_10G_82599:
+		*speed = IXGBE_LINK_SPEED_10GB_FULL;
+		break;
+	case IXGBE_LINKS_SPEED_1G_82599:
+		*speed = IXGBE_LINK_SPEED_1GB_FULL;
+		break;
+	case IXGBE_LINKS_SPEED_100_82599:
+		*speed = IXGBE_LINK_SPEED_100_FULL;
+		break;
+	}
+
+	/* if we passed all the tests above then the link is up and we no
+	 * longer need to check for link
+	 */
+	mac->get_link_status = false;
+
+out:
+	*link_up = !mac->get_link_status;
+	return 0;
+}
+
+/**
+ *  ixgbevf_set_rlpml_vf - Set the maximum receive packet length
  *  @hw: pointer to the HW structure
  *  @max_size: value to assign to max frame size
  **/
-void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
+static void ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
 {
 	u32 msgbuf[2];
 
@@ -668,11 +807,30 @@
 }
 
 /**
- *  ixgbevf_negotiate_api_version - Negotiate supported API version
+ * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
+ * @hw: pointer to the HW structure
+ * @max_size: value to assign to max frame size
+ * Hyper-V variant.
+ **/
+static void ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
+{
+	u32 reg;
+
+	/* If we are on Hyper-V, we implement this functionality
+	 * differently.
+	 */
+	reg =  IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
+	/* CRC == 4 */
+	reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
+	IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
+}
+
+/**
+ *  ixgbevf_negotiate_api_version_vf - Negotiate supported API version
  *  @hw: pointer to the HW structure
  *  @api: integer containing requested API version
  **/
-int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
+static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
 {
 	int err;
 	u32 msg[3];
@@ -701,6 +859,21 @@
 	return err;
 }
 
+/**
+ *  ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
+ *  @hw: pointer to the HW structure
+ *  @api: integer containing requested API version
+ *  Hyper-V version - only ixgbe_mbox_api_10 supported.
+ **/
+static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
+{
+	/* Hyper-V only supports api version ixgbe_mbox_api_10 */
+	if (api != ixgbe_mbox_api_10)
+		return IXGBE_ERR_INVALID_ARGUMENT;
+
+	return 0;
+}
+
 int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
 		       unsigned int *default_tc)
 {
@@ -767,11 +940,30 @@
 	.stop_adapter		= ixgbevf_stop_hw_vf,
 	.setup_link		= ixgbevf_setup_mac_link_vf,
 	.check_link		= ixgbevf_check_mac_link_vf,
+	.negotiate_api_version	= ixgbevf_negotiate_api_version_vf,
 	.set_rar		= ixgbevf_set_rar_vf,
 	.update_mc_addr_list	= ixgbevf_update_mc_addr_list_vf,
 	.update_xcast_mode	= ixgbevf_update_xcast_mode,
 	.set_uc_addr		= ixgbevf_set_uc_addr_vf,
 	.set_vfta		= ixgbevf_set_vfta_vf,
+	.set_rlpml		= ixgbevf_set_rlpml_vf,
+};
+
+static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
+	.init_hw		= ixgbevf_init_hw_vf,
+	.reset_hw		= ixgbevf_hv_reset_hw_vf,
+	.start_hw		= ixgbevf_start_hw_vf,
+	.get_mac_addr		= ixgbevf_get_mac_addr_vf,
+	.stop_adapter		= ixgbevf_stop_hw_vf,
+	.setup_link		= ixgbevf_setup_mac_link_vf,
+	.check_link		= ixgbevf_hv_check_mac_link_vf,
+	.negotiate_api_version	= ixgbevf_hv_negotiate_api_version_vf,
+	.set_rar		= ixgbevf_hv_set_rar_vf,
+	.update_mc_addr_list	= ixgbevf_hv_update_mc_addr_list_vf,
+	.update_xcast_mode	= ixgbevf_hv_update_xcast_mode,
+	.set_uc_addr		= ixgbevf_hv_set_uc_addr_vf,
+	.set_vfta		= ixgbevf_hv_set_vfta_vf,
+	.set_rlpml		= ixgbevf_hv_set_rlpml_vf,
 };
 
 const struct ixgbevf_info ixgbevf_82599_vf_info = {
@@ -779,17 +971,37 @@
 	.mac_ops = &ixgbevf_mac_ops,
 };
 
+const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
+	.mac = ixgbe_mac_82599_vf,
+	.mac_ops = &ixgbevf_hv_mac_ops,
+};
+
 const struct ixgbevf_info ixgbevf_X540_vf_info = {
 	.mac = ixgbe_mac_X540_vf,
 	.mac_ops = &ixgbevf_mac_ops,
 };
 
+const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
+	.mac = ixgbe_mac_X540_vf,
+	.mac_ops = &ixgbevf_hv_mac_ops,
+};
+
 const struct ixgbevf_info ixgbevf_X550_vf_info = {
 	.mac = ixgbe_mac_X550_vf,
 	.mac_ops = &ixgbevf_mac_ops,
 };
 
+const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
+	.mac = ixgbe_mac_X550_vf,
+	.mac_ops = &ixgbevf_hv_mac_ops,
+};
+
 const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
 	.mac = ixgbe_mac_X550EM_x_vf,
 	.mac_ops = &ixgbevf_mac_ops,
 };
+
+const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
+	.mac = ixgbe_mac_X550EM_x_vf,
+	.mac_ops = &ixgbevf_hv_mac_ops,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index ef9f773..2cac610 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -51,6 +51,7 @@
 	s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
 	s32 (*stop_adapter)(struct ixgbe_hw *);
 	s32 (*get_bus_info)(struct ixgbe_hw *);
+	s32 (*negotiate_api_version)(struct ixgbe_hw *hw, int api);
 
 	/* Link */
 	s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
@@ -63,11 +64,12 @@
 	s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *);
 	s32 (*init_rx_addrs)(struct ixgbe_hw *);
 	s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
-	s32 (*update_xcast_mode)(struct ixgbe_hw *, struct net_device *, int);
+	s32 (*update_xcast_mode)(struct ixgbe_hw *, int);
 	s32 (*enable_mc)(struct ixgbe_hw *);
 	s32 (*disable_mc)(struct ixgbe_hw *);
 	s32 (*clear_vfta)(struct ixgbe_hw *);
 	s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
+	void (*set_rlpml)(struct ixgbe_hw *, u16);
 };
 
 enum ixgbe_mac_type {
@@ -207,8 +209,6 @@
 
 #define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o)
 
-void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
-int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
 int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
 		       unsigned int *default_tc);
 int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues);
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 3ddf657..836ebd8 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -222,7 +222,7 @@
 	jwrite32f(jme, JME_GHC, jme->reg_ghc);
 }
 
-static inline void
+static void
 jme_reset_mac_processor(struct jme_adapter *jme)
 {
 	static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index d74f5f4..1799fe1 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -152,7 +152,7 @@
 	       writel(0x10, &ch->dmac);
 
 	       while (!(readl(&ch->dmas) & DMA_STAT_HALT))
-		       dev->trans_start = jiffies;
+		       netif_trans_update(dev);
 
 	       writel(0, &ch->dmas);
        }
@@ -283,7 +283,7 @@
 	}
 	dma_cache_wback((u32) td, sizeof(*td));
 
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	spin_unlock_irqrestore(&lp->lock, flags);
 
 	return NETDEV_TX_OK;
@@ -622,7 +622,7 @@
 				&(lp->tx_dma_regs->dmandptr));
 			lp->tx_chain_status = desc_empty;
 			lp->tx_chain_head = lp->tx_chain_tail;
-			dev->trans_start = jiffies;
+			netif_trans_update(dev);
 		}
 		if (dmas & DMA_STAT_ERR)
 			printk(KERN_ERR "%s: DMA error\n", dev->name);
@@ -811,7 +811,7 @@
 	/* reset ethernet logic */
 	writel(0, &lp->eth_regs->ethintfc);
 	while ((readl(&lp->eth_regs->ethintfc) & ETH_INT_FC_RIP))
-		dev->trans_start = jiffies;
+		netif_trans_update(dev);
 
 	/* Enable Ethernet Interface */
 	writel(ETH_INT_FC_EN, &lp->eth_regs->ethintfc);
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index b630ef1..dc82b1b 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -519,7 +519,7 @@
 	byte_offset = CPHYSADDR(skb->data) % 16;
 	ch->skb[ch->dma.desc] = skb;
 
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 
 	spin_lock_irqsave(&priv->lock, flags);
 	desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
@@ -657,7 +657,7 @@
 	err = ltq_etop_hw_init(dev);
 	if (err)
 		goto err_hw;
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	netif_wake_queue(dev);
 	return;
 
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index b5c6d42..2664827 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -68,7 +68,7 @@
 
 config MVNETA_BM
 	tristate
-	default y if MVNETA=y && MVNETA_BM_ENABLE
+	default y if MVNETA=y && MVNETA_BM_ENABLE!=n
 	default MVNETA_BM_ENABLE
 	select HWBM
 	help
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 577f7ca..a6d26d3 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -260,7 +260,6 @@
 
 #define MVNETA_VLAN_TAG_LEN             4
 
-#define MVNETA_CPU_D_CACHE_LINE_SIZE    32
 #define MVNETA_TX_CSUM_DEF_SIZE		1600
 #define MVNETA_TX_CSUM_MAX_SIZE		9800
 #define MVNETA_ACC_MODE_EXT1		1
@@ -300,7 +299,7 @@
 #define MVNETA_RX_PKT_SIZE(mtu) \
 	ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
 	      ETH_HLEN + ETH_FCS_LEN,			     \
-	      MVNETA_CPU_D_CACHE_LINE_SIZE)
+	      cache_line_size())
 
 #define IS_TSO_HEADER(txq, addr) \
 	((addr >= txq->tso_hdrs_phys) && \
@@ -2764,9 +2763,6 @@
 	if (rxq->descs == NULL)
 		return -ENOMEM;
 
-	BUG_ON(rxq->descs !=
-	       PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
-
 	rxq->last_desc = rxq->size - 1;
 
 	/* Set Rx descriptors queue starting address */
@@ -2837,10 +2833,6 @@
 	if (txq->descs == NULL)
 		return -ENOMEM;
 
-	/* Make sure descriptor address is cache line size aligned  */
-	BUG_ON(txq->descs !=
-	       PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
-
 	txq->last_desc = txq->size - 1;
 
 	/* Set maximum bandwidth for enabled TXQs */
@@ -3050,6 +3042,20 @@
 	return mtu;
 }
 
+static void mvneta_percpu_enable(void *arg)
+{
+	struct mvneta_port *pp = arg;
+
+	enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
+}
+
+static void mvneta_percpu_disable(void *arg)
+{
+	struct mvneta_port *pp = arg;
+
+	disable_percpu_irq(pp->dev->irq);
+}
+
 /* Change the device mtu */
 static int mvneta_change_mtu(struct net_device *dev, int mtu)
 {
@@ -3074,6 +3080,7 @@
 	 * reallocation of the queues
 	 */
 	mvneta_stop_dev(pp);
+	on_each_cpu(mvneta_percpu_disable, pp, true);
 
 	mvneta_cleanup_txqs(pp);
 	mvneta_cleanup_rxqs(pp);
@@ -3097,6 +3104,7 @@
 		return ret;
 	}
 
+	on_each_cpu(mvneta_percpu_enable, pp, true);
 	mvneta_start_dev(pp);
 	mvneta_port_up(pp);
 
@@ -3250,20 +3258,6 @@
 	pp->phy_dev = NULL;
 }
 
-static void mvneta_percpu_enable(void *arg)
-{
-	struct mvneta_port *pp = arg;
-
-	enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
-}
-
-static void mvneta_percpu_disable(void *arg)
-{
-	struct mvneta_port *pp = arg;
-
-	disable_percpu_irq(pp->dev->irq);
-}
-
 /* Electing a CPU must be done in an atomic way: it should be done
  * after or before the removal/insertion of a CPU and this function is
  * not reentrant.
@@ -3360,8 +3354,7 @@
 		/* Enable per-CPU interrupts on the CPU that is
 		 * brought up.
 		 */
-		smp_call_function_single(cpu, mvneta_percpu_enable,
-					 pp, true);
+		mvneta_percpu_enable(pp);
 
 		/* Enable per-CPU interrupt on the one CPU we care
 		 * about.
@@ -3393,8 +3386,7 @@
 		/* Disable per-CPU interrupts on the CPU that is
 		 * brought down.
 		 */
-		smp_call_function_single(cpu, mvneta_percpu_disable,
-					 pp, true);
+		mvneta_percpu_disable(pp);
 
 		break;
 	case CPU_DEAD:
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index c797971a..868a957 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -321,7 +321,6 @@
 /* Lbtd 802.3 type */
 #define MVPP2_IP_LBDT_TYPE		0xfffa
 
-#define MVPP2_CPU_D_CACHE_LINE_SIZE	32
 #define MVPP2_TX_CSUM_MAX_SIZE		9800
 
 /* Timeout constants */
@@ -377,7 +376,7 @@
 
 #define MVPP2_RX_PKT_SIZE(mtu) \
 	ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
-	      ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
+	      ETH_HLEN + ETH_FCS_LEN, cache_line_size())
 
 #define MVPP2_RX_BUF_SIZE(pkt_size)	((pkt_size) + NET_SKB_PAD)
 #define MVPP2_RX_TOTAL_SIZE(buf_size)	((buf_size) + MVPP2_SKB_SHINFO_SIZE)
@@ -4493,10 +4492,6 @@
 	if (!aggr_txq->descs)
 		return -ENOMEM;
 
-	/* Make sure descriptor address is cache line size aligned  */
-	BUG_ON(aggr_txq->descs !=
-	       PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
-
 	aggr_txq->last_desc = aggr_txq->size - 1;
 
 	/* Aggr TXQ no reset WA */
@@ -4526,9 +4521,6 @@
 	if (!rxq->descs)
 		return -ENOMEM;
 
-	BUG_ON(rxq->descs !=
-	       PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
-
 	rxq->last_desc = rxq->size - 1;
 
 	/* Zero occupied and non-occupied counters - direct access */
@@ -4616,10 +4608,6 @@
 	if (!txq->descs)
 		return -ENOMEM;
 
-	/* Make sure descriptor address is cache line size aligned  */
-	BUG_ON(txq->descs !=
-	       PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
-
 	txq->last_desc = txq->size - 1;
 
 	/* Set Tx descriptors queue starting address - indirect access */
@@ -6059,8 +6047,10 @@
 
 		/* Map physical Rx queue to port's logical Rx queue */
 		rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
-		if (!rxq)
+		if (!rxq) {
+			err = -ENOMEM;
 			goto err_free_percpu;
+		}
 		/* Map this Rx queue to a physical queue */
 		rxq->id = port->first_rxq + queue;
 		rxq->port = port->id;
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 7ace07d..54d5154 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -286,12 +286,12 @@
 
 static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
 {
-	return readl(pep->base + offset);
+	return readl_relaxed(pep->base + offset);
 }
 
 static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
 {
-	writel(data, pep->base + offset);
+	writel_relaxed(data, pep->base + offset);
 }
 
 static void abort_dma(struct pxa168_eth_private *pep)
@@ -342,9 +342,9 @@
 		pep->rx_skb[used_rx_desc] = skb;
 
 		/* Return the descriptor to DMA ownership */
-		wmb();
+		dma_wmb();
 		p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
-		wmb();
+		dma_wmb();
 
 		/* Move the used descriptor pointer to the next descriptor */
 		pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
@@ -794,7 +794,7 @@
 		rx_used_desc = pep->rx_used_desc_q;
 		rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
 		cmd_sts = rx_desc->cmd_sts;
-		rmb();
+		dma_rmb();
 		if (cmd_sts & (BUF_OWNED_BY_DMA))
 			break;
 		skb = pep->rx_skb[rx_curr_desc];
@@ -979,8 +979,8 @@
 		return 0;
 
 	pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
-	if (!pep->phy)
-		return -ENODEV;
+	if (IS_ERR(pep->phy))
+		return PTR_ERR(pep->phy);
 
 	err = phy_connect_direct(dev, pep->phy, pxa168_eth_adjust_link,
 				 pep->phy_intf);
@@ -1287,7 +1287,7 @@
 
 	skb_tx_timestamp(skb);
 
-	wmb();
+	dma_wmb();
 	desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
 			TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
 	wmb();
@@ -1295,7 +1295,7 @@
 
 	stats->tx_bytes += length;
 	stats->tx_packets++;
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
 		/* We handled the current skb, but now we are out of space.*/
 		netif_stop_queue(dev);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index ec0a221..467138b 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2418,7 +2418,7 @@
 	sky2_write32(hw, B0_IMSK, 0);
 	sky2_read32(hw, B0_IMSK);
 
-	dev->trans_start = jiffies;	/* prevent tx timeout */
+	netif_trans_update(dev);	/* prevent tx timeout */
 	napi_disable(&hw->napi);
 	netif_tx_disable(dev);
 
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index e0b68af..c984462 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -536,7 +536,6 @@
 	struct mtk_eth *eth = mac->hw;
 	struct mtk_tx_dma *itxd, *txd;
 	struct mtk_tx_buf *tx_buf;
-	unsigned long flags;
 	dma_addr_t mapped_addr;
 	unsigned int nr_frags;
 	int i, n_desc = 1;
@@ -568,11 +567,6 @@
 	if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
 		return -ENOMEM;
 
-	/* normally we can rely on the stack not calling this more than once,
-	 * however we have 2 queues running ont he same ring so we need to lock
-	 * the ring access
-	 */
-	spin_lock_irqsave(&eth->page_lock, flags);
 	WRITE_ONCE(itxd->txd1, mapped_addr);
 	tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
 	dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
@@ -609,8 +603,7 @@
 			WRITE_ONCE(txd->txd1, mapped_addr);
 			WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
 					       TX_DMA_PLEN0(frag_map_size) |
-					       last_frag * TX_DMA_LS0) |
-					       mac->id);
+					       last_frag * TX_DMA_LS0));
 			WRITE_ONCE(txd->txd4, 0);
 
 			tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
@@ -632,8 +625,6 @@
 	WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
 				(!nr_frags * TX_DMA_LS0)));
 
-	spin_unlock_irqrestore(&eth->page_lock, flags);
-
 	netdev_sent_queue(dev, skb->len);
 	skb_tx_timestamp(skb);
 
@@ -661,8 +652,6 @@
 		itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
 	} while (itxd != txd);
 
-	spin_unlock_irqrestore(&eth->page_lock, flags);
-
 	return -ENOMEM;
 }
 
@@ -681,7 +670,29 @@
 		nfrags += skb_shinfo(skb)->nr_frags;
 	}
 
-	return DIV_ROUND_UP(nfrags, 2);
+	return nfrags;
+}
+
+static void mtk_wake_queue(struct mtk_eth *eth)
+{
+	int i;
+
+	for (i = 0; i < MTK_MAC_COUNT; i++) {
+		if (!eth->netdev[i])
+			continue;
+		netif_wake_queue(eth->netdev[i]);
+	}
+}
+
+static void mtk_stop_queue(struct mtk_eth *eth)
+{
+	int i;
+
+	for (i = 0; i < MTK_MAC_COUNT; i++) {
+		if (!eth->netdev[i])
+			continue;
+		netif_stop_queue(eth->netdev[i]);
+	}
 }
 
 static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -690,14 +701,22 @@
 	struct mtk_eth *eth = mac->hw;
 	struct mtk_tx_ring *ring = &eth->tx_ring;
 	struct net_device_stats *stats = &dev->stats;
+	unsigned long flags;
 	bool gso = false;
 	int tx_num;
 
+	/* normally we can rely on the stack not calling this more than once,
+	 * however we have 2 queues running on the same ring so we need to lock
+	 * the ring access
+	 */
+	spin_lock_irqsave(&eth->page_lock, flags);
+
 	tx_num = mtk_cal_txd_req(skb);
 	if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
-		netif_stop_queue(dev);
+		mtk_stop_queue(eth);
 		netif_err(eth, tx_queued, dev,
 			  "Tx Ring full when queue awake!\n");
+		spin_unlock_irqrestore(&eth->page_lock, flags);
 		return NETDEV_TX_BUSY;
 	}
 
@@ -720,15 +739,17 @@
 		goto drop;
 
 	if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) {
-		netif_stop_queue(dev);
+		mtk_stop_queue(eth);
 		if (unlikely(atomic_read(&ring->free_count) >
 			     ring->thresh))
-			netif_wake_queue(dev);
+			mtk_wake_queue(eth);
 	}
+	spin_unlock_irqrestore(&eth->page_lock, flags);
 
 	return NETDEV_TX_OK;
 
 drop:
+	spin_unlock_irqrestore(&eth->page_lock, flags);
 	stats->tx_dropped++;
 	dev_kfree_skb(skb);
 	return NETDEV_TX_OK;
@@ -897,13 +918,8 @@
 	if (!total)
 		return 0;
 
-	for (i = 0; i < MTK_MAC_COUNT; i++) {
-		if (!eth->netdev[i] ||
-		    unlikely(!netif_queue_stopped(eth->netdev[i])))
-			continue;
-		if (atomic_read(&ring->free_count) > ring->thresh)
-			netif_wake_queue(eth->netdev[i]);
-	}
+	if (atomic_read(&ring->free_count) > ring->thresh)
+		mtk_wake_queue(eth);
 
 	return total;
 }
@@ -1176,7 +1192,7 @@
 	eth->netdev[mac->id]->stats.tx_errors++;
 	netif_err(eth, tx_err, dev,
 		  "transmit timed out\n");
-	schedule_work(&mac->pending_work);
+	schedule_work(&eth->pending_work);
 }
 
 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
@@ -1413,19 +1429,30 @@
 
 static void mtk_pending_work(struct work_struct *work)
 {
-	struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work);
-	struct mtk_eth *eth = mac->hw;
-	struct net_device *dev = eth->netdev[mac->id];
-	int err;
+	struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
+	int err, i;
+	unsigned long restart = 0;
 
 	rtnl_lock();
-	mtk_stop(dev);
 
-	err = mtk_open(dev);
-	if (err) {
-		netif_alert(eth, ifup, dev,
-			    "Driver up/down cycle failed, closing device.\n");
-		dev_close(dev);
+	/* stop all devices to make sure that dma is properly shut down */
+	for (i = 0; i < MTK_MAC_COUNT; i++) {
+		if (!eth->netdev[i])
+			continue;
+		mtk_stop(eth->netdev[i]);
+		__set_bit(i, &restart);
+	}
+
+	/* restart DMA and enable IRQs */
+	for (i = 0; i < MTK_MAC_COUNT; i++) {
+		if (!test_bit(i, &restart))
+			continue;
+		err = mtk_open(eth->netdev[i]);
+		if (err) {
+			netif_alert(eth, ifup, eth->netdev[i],
+			      "Driver up/down cycle failed, closing device.\n");
+			dev_close(eth->netdev[i]);
+		}
 	}
 	rtnl_unlock();
 }
@@ -1435,15 +1462,13 @@
 	int i;
 
 	for (i = 0; i < MTK_MAC_COUNT; i++) {
-		struct mtk_mac *mac = netdev_priv(eth->netdev[i]);
-
 		if (!eth->netdev[i])
 			continue;
 
 		unregister_netdev(eth->netdev[i]);
 		free_netdev(eth->netdev[i]);
-		cancel_work_sync(&mac->pending_work);
 	}
+	cancel_work_sync(&eth->pending_work);
 
 	return 0;
 }
@@ -1631,7 +1656,6 @@
 	mac->id = id;
 	mac->hw = eth;
 	mac->of_node = np;
-	INIT_WORK(&mac->pending_work, mtk_pending_work);
 
 	mac->hw_stats = devm_kzalloc(eth->dev,
 				     sizeof(*mac->hw_stats),
@@ -1645,6 +1669,7 @@
 	mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
 
 	SET_NETDEV_DEV(eth->netdev[id], eth->dev);
+	eth->netdev[id]->watchdog_timeo = HZ;
 	eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
 	eth->netdev[id]->base_addr = (unsigned long)eth->base;
 	eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
@@ -1678,10 +1703,6 @@
 	struct mtk_eth *eth;
 	int err;
 
-	err = device_reset(&pdev->dev);
-	if (err)
-		return err;
-
 	match = of_match_device(of_mtk_match, &pdev->dev);
 	soc = (struct mtk_soc_data *)match->data;
 
@@ -1736,6 +1757,7 @@
 
 	eth->dev = &pdev->dev;
 	eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
+	INIT_WORK(&eth->pending_work, mtk_pending_work);
 
 	err = mtk_hw_init(eth);
 	if (err)
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 48a5292..eed626d 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -363,6 +363,7 @@
  * @clk_gp1:		The gmac1 clock
  * @clk_gp2:		The gmac2 clock
  * @mii_bus:		If there is a bus we need to create an instance for it
+ * @pending_work:	The workqueue used to reset the dma ring
  */
 
 struct mtk_eth {
@@ -389,6 +390,7 @@
 	struct clk			*clk_gp1;
 	struct clk			*clk_gp2;
 	struct mii_bus			*mii_bus;
+	struct work_struct		pending_work;
 };
 
 /* struct mtk_mac -	the structure that holds the info about the MACs of the
@@ -398,7 +400,6 @@
  * @hw:			Backpointer to our main datastruture
  * @hw_stats:		Packet statistics counter
  * @phy_dev:		The attached PHY if available
- * @pending_work:	The workqueue used to reset the dma ring
  */
 struct mtk_mac {
 	int				id;
@@ -406,7 +407,6 @@
 	struct mtk_eth			*hw;
 	struct mtk_hw_stats		*hw_stats;
 	struct phy_device		*phy_dev;
-	struct work_struct		pending_work;
 };
 
 /* the struct describing the SoC. these are declared in the soc_xyz.c files */
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 0c51c69..249a458 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -576,41 +576,48 @@
 
 	return res;
 }
-/*
- * Handling for queue buffers -- we allocate a bunch of memory and
- * register it in a memory region at HCA virtual address 0.  If the
- * requested size is > max_direct, we split the allocation into
- * multiple pages, so we don't require too much contiguous memory.
- */
 
-int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
-		   struct mlx4_buf *buf, gfp_t gfp)
+static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
+				 struct mlx4_buf *buf, gfp_t gfp)
 {
 	dma_addr_t t;
 
+	buf->nbufs        = 1;
+	buf->npages       = 1;
+	buf->page_shift   = get_order(size) + PAGE_SHIFT;
+	buf->direct.buf   =
+		dma_zalloc_coherent(&dev->persist->pdev->dev,
+				    size, &t, gfp);
+	if (!buf->direct.buf)
+		return -ENOMEM;
+
+	buf->direct.map = t;
+
+	while (t & ((1 << buf->page_shift) - 1)) {
+		--buf->page_shift;
+		buf->npages *= 2;
+	}
+
+	return 0;
+}
+
+/* Handling for queue buffers -- we allocate a bunch of memory and
+ * register it in a memory region at HCA virtual address 0. If the
+ *  requested size is > max_direct, we split the allocation into
+ *  multiple pages, so we don't require too much contiguous memory.
+ */
+int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
+		   struct mlx4_buf *buf, gfp_t gfp)
+{
 	if (size <= max_direct) {
-		buf->nbufs        = 1;
-		buf->npages       = 1;
-		buf->page_shift   = get_order(size) + PAGE_SHIFT;
-		buf->direct.buf   = dma_alloc_coherent(&dev->persist->pdev->dev,
-						       size, &t, gfp);
-		if (!buf->direct.buf)
-			return -ENOMEM;
-
-		buf->direct.map = t;
-
-		while (t & ((1 << buf->page_shift) - 1)) {
-			--buf->page_shift;
-			buf->npages *= 2;
-		}
-
-		memset(buf->direct.buf, 0, size);
+		return mlx4_buf_direct_alloc(dev, size, buf, gfp);
 	} else {
+		dma_addr_t t;
 		int i;
 
-		buf->direct.buf  = NULL;
-		buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE;
-		buf->npages      = buf->nbufs;
+		buf->direct.buf = NULL;
+		buf->nbufs	= (size + PAGE_SIZE - 1) / PAGE_SIZE;
+		buf->npages	= buf->nbufs;
 		buf->page_shift  = PAGE_SHIFT;
 		buf->page_list   = kcalloc(buf->nbufs, sizeof(*buf->page_list),
 					   gfp);
@@ -619,28 +626,12 @@
 
 		for (i = 0; i < buf->nbufs; ++i) {
 			buf->page_list[i].buf =
-				dma_alloc_coherent(&dev->persist->pdev->dev,
-						   PAGE_SIZE,
-						   &t, gfp);
+				dma_zalloc_coherent(&dev->persist->pdev->dev,
+						    PAGE_SIZE, &t, gfp);
 			if (!buf->page_list[i].buf)
 				goto err_free;
 
 			buf->page_list[i].map = t;
-
-			memset(buf->page_list[i].buf, 0, PAGE_SIZE);
-		}
-
-		if (BITS_PER_LONG == 64) {
-			struct page **pages;
-			pages = kmalloc(sizeof *pages * buf->nbufs, gfp);
-			if (!pages)
-				goto err_free;
-			for (i = 0; i < buf->nbufs; ++i)
-				pages[i] = virt_to_page(buf->page_list[i].buf);
-			buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
-			kfree(pages);
-			if (!buf->direct.buf)
-				goto err_free;
 		}
 	}
 
@@ -655,15 +646,11 @@
 
 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
 {
-	int i;
-
-	if (buf->nbufs == 1)
+	if (buf->nbufs == 1) {
 		dma_free_coherent(&dev->persist->pdev->dev, size,
-				  buf->direct.buf,
-				  buf->direct.map);
-	else {
-		if (BITS_PER_LONG == 64)
-			vunmap(buf->direct.buf);
+				  buf->direct.buf, buf->direct.map);
+	} else {
+		int i;
 
 		for (i = 0; i < buf->nbufs; ++i)
 			if (buf->page_list[i].buf)
@@ -789,7 +776,7 @@
 EXPORT_SYMBOL_GPL(mlx4_db_free);
 
 int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
-		       int size, int max_direct)
+		       int size)
 {
 	int err;
 
@@ -799,7 +786,7 @@
 
 	*wqres->db.db = 0;
 
-	err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf, GFP_KERNEL);
+	err = mlx4_buf_direct_alloc(dev, size, &wqres->buf, GFP_KERNEL);
 	if (err)
 		goto err_db;
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index af975a2..132cea6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -73,22 +73,16 @@
 	 */
 	set_dev_node(&mdev->dev->persist->pdev->dev, node);
 	err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
-				cq->buf_size, 2 * PAGE_SIZE);
+				cq->buf_size);
 	set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
 	if (err)
 		goto err_cq;
 
-	err = mlx4_en_map_buffer(&cq->wqres.buf);
-	if (err)
-		goto err_res;
-
 	cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf;
 	*pcq = cq;
 
 	return 0;
 
-err_res:
-	mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
 err_cq:
 	kfree(cq);
 	*pcq = NULL;
@@ -177,7 +171,6 @@
 	struct mlx4_en_dev *mdev = priv->mdev;
 	struct mlx4_en_cq *cq = *pcq;
 
-	mlx4_en_unmap_buffer(&cq->wqres.buf);
 	mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
 	if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) &&
 	    cq->is_tx == RX)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index f69584a..c761194 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -337,7 +337,7 @@
 	case ETH_SS_STATS:
 		return bitmap_iterator_count(&it) +
 			(priv->tx_ring_num * 2) +
-			(priv->rx_ring_num * 2);
+			(priv->rx_ring_num * 3);
 	case ETH_SS_TEST:
 		return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
 					& MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
@@ -404,6 +404,7 @@
 	for (i = 0; i < priv->rx_ring_num; i++) {
 		data[index++] = priv->rx_ring[i]->packets;
 		data[index++] = priv->rx_ring[i]->bytes;
+		data[index++] = priv->rx_ring[i]->dropped;
 	}
 	spin_unlock_bh(&priv->stats_lock);
 
@@ -477,6 +478,8 @@
 				"rx%d_packets", i);
 			sprintf(data + (index++) * ETH_GSTRING_LEN,
 				"rx%d_bytes", i);
+			sprintf(data + (index++) * ETH_GSTRING_LEN,
+				"rx%d_dropped", i);
 		}
 		break;
 	case ETH_SS_PRIV_FLAGS:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index b4b258c..92e0624 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1856,6 +1856,7 @@
 
 	en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
 
+	rtnl_lock();
 	mutex_lock(&mdev->state_lock);
 	if (priv->port_up) {
 		mlx4_en_stop_port(dev, 1);
@@ -1863,6 +1864,7 @@
 			en_err(priv, "Failed restarting port %d\n", priv->port);
 	}
 	mutex_unlock(&mdev->state_lock);
+	rtnl_unlock();
 }
 
 static void mlx4_en_clear_stats(struct net_device *dev)
@@ -2355,8 +2357,12 @@
 	}
 
 	/* set offloads */
-	priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
-				      NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
+	priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+				      NETIF_F_RXCSUM |
+				      NETIF_F_TSO | NETIF_F_TSO6 |
+				      NETIF_F_GSO_UDP_TUNNEL |
+				      NETIF_F_GSO_UDP_TUNNEL_CSUM |
+				      NETIF_F_GSO_PARTIAL;
 }
 
 static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
@@ -2365,8 +2371,12 @@
 	struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
 						 vxlan_del_task);
 	/* unset offloads */
-	priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
-				      NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
+	priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+					NETIF_F_RXCSUM |
+					NETIF_F_TSO | NETIF_F_TSO6 |
+					NETIF_F_GSO_UDP_TUNNEL |
+					NETIF_F_GSO_UDP_TUNNEL_CSUM |
+					NETIF_F_GSO_PARTIAL);
 
 	ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
 				  VXLAN_STEER_BY_OUTER_MAC, 0);
@@ -2425,7 +2435,18 @@
 						netdev_features_t features)
 {
 	features = vlan_features_check(skb, features);
-	return vxlan_features_check(skb, features);
+	features = vxlan_features_check(skb, features);
+
+	/* The ConnectX-3 doesn't support outer IPv6 checksums but it does
+	 * support inner IPv6 checksums and segmentation so  we need to
+	 * strip that feature if this is an IPv6 encapsulated frame.
+	 */
+	if (skb->encapsulation &&
+	    (skb->ip_summed == CHECKSUM_PARTIAL) &&
+	    (ip_hdr(skb)->version != 4))
+		features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+
+	return features;
 }
 #endif
 
@@ -2907,7 +2928,7 @@
 
 	/* Allocate page for receive rings */
 	err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
-				MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
+				MLX4_EN_PAGE_SIZE);
 	if (err) {
 		en_err(priv, "Failed to allocate page for rx qps\n");
 		goto out;
@@ -2990,8 +3011,13 @@
 	}
 
 	if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
-		dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
-		dev->features    |= NETIF_F_GSO_UDP_TUNNEL;
+		dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
+				    NETIF_F_GSO_UDP_TUNNEL_CSUM |
+				    NETIF_F_GSO_PARTIAL;
+		dev->features    |= NETIF_F_GSO_UDP_TUNNEL |
+				    NETIF_F_GSO_UDP_TUNNEL_CSUM |
+				    NETIF_F_GSO_PARTIAL;
+		dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
 	}
 
 	mdev->pndev[port] = dev;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 3904b5f..20b6c2e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -158,6 +158,7 @@
 	u64 in_mod = reset << 8 | port;
 	int err;
 	int i, counter_index;
+	unsigned long sw_rx_dropped = 0;
 
 	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
 	if (IS_ERR(mailbox))
@@ -180,6 +181,7 @@
 	for (i = 0; i < priv->rx_ring_num; i++) {
 		stats->rx_packets += priv->rx_ring[i]->packets;
 		stats->rx_bytes += priv->rx_ring[i]->bytes;
+		sw_rx_dropped += priv->rx_ring[i]->dropped;
 		priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
 		priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
 		priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete;
@@ -236,7 +238,8 @@
 					  &mlx4_en_stats->MCAST_prio_1,
 					  NUM_PRIORITIES);
 	stats->collisions = 0;
-	stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
+	stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
+			    sw_rx_dropped;
 	stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
 	stats->rx_over_errors = 0;
 	stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 02e925d..a6b0db0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -107,37 +107,6 @@
 	return ret;
 }
 
-int mlx4_en_map_buffer(struct mlx4_buf *buf)
-{
-	struct page **pages;
-	int i;
-
-	if (BITS_PER_LONG == 64 || buf->nbufs == 1)
-		return 0;
-
-	pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
-	if (!pages)
-		return -ENOMEM;
-
-	for (i = 0; i < buf->nbufs; ++i)
-		pages[i] = virt_to_page(buf->page_list[i].buf);
-
-	buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
-	kfree(pages);
-	if (!buf->direct.buf)
-		return -ENOMEM;
-
-	return 0;
-}
-
-void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
-{
-	if (BITS_PER_LONG == 64 || buf->nbufs == 1)
-		return;
-
-	vunmap(buf->direct.buf);
-}
-
 void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
 {
     return;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 86bcfe5..c1b3a9c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -61,7 +61,7 @@
 		gfp_t gfp = _gfp;
 
 		if (order)
-			gfp |= __GFP_COMP | __GFP_NOWARN;
+			gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC;
 		page = alloc_pages(gfp, order);
 		if (likely(page))
 			break;
@@ -126,7 +126,9 @@
 			dma_unmap_page(priv->ddev, page_alloc[i].dma,
 				page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
 			page = page_alloc[i].page;
-			set_page_count(page, 1);
+			/* Revert changes done by mlx4_alloc_pages */
+			page_ref_sub(page, page_alloc[i].page_size /
+					   priv->frag_info[i].frag_stride - 1);
 			put_page(page);
 		}
 	}
@@ -176,7 +178,9 @@
 		dma_unmap_page(priv->ddev, page_alloc->dma,
 			       page_alloc->page_size, PCI_DMA_FROMDEVICE);
 		page = page_alloc->page;
-		set_page_count(page, 1);
+		/* Revert changes done by mlx4_alloc_pages */
+		page_ref_sub(page, page_alloc->page_size /
+				   priv->frag_info[i].frag_stride - 1);
 		put_page(page);
 		page_alloc->page = NULL;
 	}
@@ -390,17 +394,11 @@
 
 	/* Allocate HW buffers on provided NUMA node */
 	set_dev_node(&mdev->dev->persist->pdev->dev, node);
-	err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
-				 ring->buf_size, 2 * PAGE_SIZE);
+	err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
 	set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
 	if (err)
 		goto err_info;
 
-	err = mlx4_en_map_buffer(&ring->wqres.buf);
-	if (err) {
-		en_err(priv, "Failed to map RX buffer\n");
-		goto err_hwq;
-	}
 	ring->buf = ring->wqres.buf.direct.buf;
 
 	ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;
@@ -408,8 +406,6 @@
 	*pring = ring;
 	return 0;
 
-err_hwq:
-	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
 err_info:
 	vfree(ring->rx_info);
 	ring->rx_info = NULL;
@@ -513,7 +509,6 @@
 	struct mlx4_en_dev *mdev = priv->mdev;
 	struct mlx4_en_rx_ring *ring = *pring;
 
-	mlx4_en_unmap_buffer(&ring->wqres.buf);
 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
 	vfree(ring->rx_info);
 	ring->rx_info = NULL;
@@ -703,7 +698,7 @@
 
 	if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS)
 		return -1;
-	hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8));
+	hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
 
 	csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
 				       sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
@@ -939,7 +934,7 @@
 		/* GRO not possible, complete processing here */
 		skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
 		if (!skb) {
-			priv->stats.rx_dropped++;
+			ring->dropped++;
 			goto next;
 		}
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index c0d7b72..f6e6157 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -41,6 +41,7 @@
 #include <linux/vmalloc.h>
 #include <linux/tcp.h>
 #include <linux/ip.h>
+#include <linux/ipv6.h>
 #include <linux/moduleparam.h>
 
 #include "mlx4_en.h"
@@ -93,20 +94,13 @@
 
 	/* Allocate HW buffers on provided NUMA node */
 	set_dev_node(&mdev->dev->persist->pdev->dev, node);
-	err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
-				 2 * PAGE_SIZE);
+	err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
 	set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
 	if (err) {
 		en_err(priv, "Failed allocating hwq resources\n");
 		goto err_bounce;
 	}
 
-	err = mlx4_en_map_buffer(&ring->wqres.buf);
-	if (err) {
-		en_err(priv, "Failed to map TX buffer\n");
-		goto err_hwq_res;
-	}
-
 	ring->buf = ring->wqres.buf.direct.buf;
 
 	en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n",
@@ -117,7 +111,7 @@
 				    MLX4_RESERVE_ETH_BF_QP);
 	if (err) {
 		en_err(priv, "failed reserving qp for TX ring\n");
-		goto err_map;
+		goto err_hwq_res;
 	}
 
 	err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL);
@@ -154,8 +148,6 @@
 
 err_reserve:
 	mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
-err_map:
-	mlx4_en_unmap_buffer(&ring->wqres.buf);
 err_hwq_res:
 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
 err_bounce:
@@ -182,7 +174,6 @@
 	mlx4_qp_remove(mdev->dev, &ring->qp);
 	mlx4_qp_free(mdev->dev, &ring->qp);
 	mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
-	mlx4_en_unmap_buffer(&ring->wqres.buf);
 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
 	kfree(ring->bounce_buf);
 	ring->bounce_buf = NULL;
@@ -405,7 +396,6 @@
 	u32 packets = 0;
 	u32 bytes = 0;
 	int factor = priv->cqe_factor;
-	u64 timestamp = 0;
 	int done = 0;
 	int budget = priv->tx_work_limit;
 	u32 last_nr_txbb;
@@ -445,9 +435,12 @@
 		new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
 
 		do {
+			u64 timestamp = 0;
+
 			txbbs_skipped += last_nr_txbb;
 			ring_index = (ring_index + last_nr_txbb) & size_mask;
-			if (ring->tx_info[ring_index].ts_requested)
+
+			if (unlikely(ring->tx_info[ring_index].ts_requested))
 				timestamp = mlx4_en_get_cqe_ts(cqe);
 
 			/* free next descriptor */
@@ -918,8 +911,18 @@
 				 tx_ind, fragptr);
 
 	if (skb->encapsulation) {
-		struct iphdr *ipv4 = (struct iphdr *)skb_inner_network_header(skb);
-		if (ipv4->protocol == IPPROTO_TCP || ipv4->protocol == IPPROTO_UDP)
+		union {
+			struct iphdr *v4;
+			struct ipv6hdr *v6;
+			unsigned char *hdr;
+		} ip;
+		u8 proto;
+
+		ip.hdr = skb_inner_network_header(skb);
+		proto = (ip.v4->version == 4) ? ip.v4->protocol :
+						ip.v6->nexthdr;
+
+		if (proto == IPPROTO_TCP || proto == IPPROTO_UDP)
 			op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP | MLX4_WQE_CTRL_ILP);
 		else
 			op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 358f723..12c77a7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3172,6 +3172,34 @@
 	return 0;
 }
 
+static int mlx4_pci_enable_device(struct mlx4_dev *dev)
+{
+	struct pci_dev *pdev = dev->persist->pdev;
+	int err = 0;
+
+	mutex_lock(&dev->persist->pci_status_mutex);
+	if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) {
+		err = pci_enable_device(pdev);
+		if (!err)
+			dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED;
+	}
+	mutex_unlock(&dev->persist->pci_status_mutex);
+
+	return err;
+}
+
+static void mlx4_pci_disable_device(struct mlx4_dev *dev)
+{
+	struct pci_dev *pdev = dev->persist->pdev;
+
+	mutex_lock(&dev->persist->pci_status_mutex);
+	if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) {
+		pci_disable_device(pdev);
+		dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED;
+	}
+	mutex_unlock(&dev->persist->pci_status_mutex);
+}
+
 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
 			 int total_vfs, int *nvfs, struct mlx4_priv *priv,
 			 int reset_flow)
@@ -3582,7 +3610,7 @@
 
 	pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
 
-	err = pci_enable_device(pdev);
+	err = mlx4_pci_enable_device(&priv->dev);
 	if (err) {
 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
 		return err;
@@ -3715,7 +3743,7 @@
 	pci_release_regions(pdev);
 
 err_disable_pdev:
-	pci_disable_device(pdev);
+	mlx4_pci_disable_device(&priv->dev);
 	pci_set_drvdata(pdev, NULL);
 	return err;
 }
@@ -3775,6 +3803,7 @@
 	priv->pci_dev_data = id->driver_data;
 	mutex_init(&dev->persist->device_state_mutex);
 	mutex_init(&dev->persist->interface_state_mutex);
+	mutex_init(&dev->persist->pci_status_mutex);
 
 	ret = devlink_register(devlink, &pdev->dev);
 	if (ret)
@@ -3923,7 +3952,7 @@
 	}
 
 	pci_release_regions(pdev);
-	pci_disable_device(pdev);
+	mlx4_pci_disable_device(dev);
 	devlink_unregister(devlink);
 	kfree(dev->persist);
 	devlink_free(devlink);
@@ -4042,7 +4071,7 @@
 	if (state == pci_channel_io_perm_failure)
 		return PCI_ERS_RESULT_DISCONNECT;
 
-	pci_disable_device(pdev);
+	mlx4_pci_disable_device(persist->dev);
 	return PCI_ERS_RESULT_NEED_RESET;
 }
 
@@ -4050,45 +4079,53 @@
 {
 	struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
 	struct mlx4_dev	 *dev  = persist->dev;
-	struct mlx4_priv *priv = mlx4_priv(dev);
-	int               ret;
-	int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
-	int total_vfs;
+	int err;
 
 	mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
-	ret = pci_enable_device(pdev);
-	if (ret) {
-		mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret);
+	err = mlx4_pci_enable_device(dev);
+	if (err) {
+		mlx4_err(dev, "Can not re-enable device, err=%d\n", err);
 		return PCI_ERS_RESULT_DISCONNECT;
 	}
 
 	pci_set_master(pdev);
 	pci_restore_state(pdev);
 	pci_save_state(pdev);
+	return PCI_ERS_RESULT_RECOVERED;
+}
 
+static void mlx4_pci_resume(struct pci_dev *pdev)
+{
+	struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+	struct mlx4_dev	 *dev  = persist->dev;
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+	int total_vfs;
+	int err;
+
+	mlx4_err(dev, "%s was called\n", __func__);
 	total_vfs = dev->persist->num_vfs;
 	memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
 
 	mutex_lock(&persist->interface_state_mutex);
 	if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
-		ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
+		err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
 				    priv, 1);
-		if (ret) {
-			mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n",
-				 __func__,  ret);
+		if (err) {
+			mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n",
+				 __func__,  err);
 			goto end;
 		}
 
-		ret = restore_current_port_types(dev, dev->persist->
+		err = restore_current_port_types(dev, dev->persist->
 						 curr_port_type, dev->persist->
 						 curr_port_poss_type);
-		if (ret)
-			mlx4_err(dev, "could not restore original port types (%d)\n", ret);
+		if (err)
+			mlx4_err(dev, "could not restore original port types (%d)\n", err);
 	}
 end:
 	mutex_unlock(&persist->interface_state_mutex);
 
-	return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
 }
 
 static void mlx4_shutdown(struct pci_dev *pdev)
@@ -4105,6 +4142,7 @@
 static const struct pci_error_handlers mlx4_err_handler = {
 	.error_detected = mlx4_pci_err_detected,
 	.slot_reset     = mlx4_pci_slot_reset,
+	.resume		= mlx4_pci_resume,
 };
 
 static struct pci_driver mlx4_driver = {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 6aa7397..f2d0920 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1102,7 +1102,7 @@
 	struct mlx4_cmd_mailbox *mailbox;
 	struct mlx4_mgm *mgm;
 	u32 members_count;
-	int index, prev;
+	int index = -1, prev;
 	int link = 0;
 	int i;
 	int err;
@@ -1181,7 +1181,7 @@
 		goto out;
 
 out:
-	if (prot == MLX4_PROT_ETH) {
+	if (prot == MLX4_PROT_ETH && index != -1) {
 		/* manage the steering entry for promisc mode */
 		if (new_entry)
 			err = new_steering_entry(dev, port, steer,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index ef96831..c9d7fc51 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -586,6 +586,8 @@
 	struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
 	int			init_port_ref[MLX4_MAX_PORTS + 1];
 	u16			max_mtu[MLX4_MAX_PORTS + 1];
+	u8			pptx;
+	u8			pprx;
 	int			disable_mcast_ref[MLX4_MAX_PORTS + 1];
 	struct mlx4_resource_tracker res_tracker;
 	struct workqueue_struct *comm_wq;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index d12ab6a..cc84e09 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -323,6 +323,7 @@
 	unsigned long csum_ok;
 	unsigned long csum_none;
 	unsigned long csum_complete;
+	unsigned long dropped;
 	int hwtstamp_rx_filter;
 	cpumask_var_t affinity_mask;
 };
@@ -671,8 +672,6 @@
 		int is_tx, int rss, int qpn, int cqn, int user_prio,
 		struct mlx4_qp_context *context);
 void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
-int mlx4_en_map_buffer(struct mlx4_buf *buf);
-void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
 int mlx4_en_change_mcast_lb(struct mlx4_en_priv *priv, struct mlx4_qp *qp,
 			    int loopback);
 void mlx4_en_calc_rx_buf(struct net_device *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 211c650..087b23b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1317,6 +1317,19 @@
 			}
 
 			gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
+			/* Slave cannot change Global Pause configuration */
+			if (slave != mlx4_master_func_num(dev) &&
+			    ((gen_context->pptx != master->pptx) ||
+			     (gen_context->pprx != master->pprx))) {
+				gen_context->pptx = master->pptx;
+				gen_context->pprx = master->pprx;
+				mlx4_warn(dev,
+					  "denying Global Pause change for slave:%d\n",
+					  slave);
+			} else {
+				master->pptx = gen_context->pptx;
+				master->pprx = gen_context->pprx;
+			}
 			break;
 		case MLX4_SET_PORT_GID_TABLE:
 			/* change to MULTIPLE entries: number of guest's gids
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 4fc45ee..9ea7b58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -2,10 +2,10 @@
 
 mlx5_core-y :=	main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
 		health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o   \
-		mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o
+		mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o fs_counters.o
 
 mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \
 		en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \
-		en_txrx.o en_clock.o vxlan.o en_tc.o
+		en_txrx.o en_clock.o vxlan.o en_tc.o en_arfs.o
 
 mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) +=  en_dcbnl.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index eb926e1..dcd2df6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -294,6 +294,7 @@
 	case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
 	case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
 	case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
+	case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
 		return MLX5_CMD_STAT_OK;
 
 	case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -395,6 +396,8 @@
 	case MLX5_CMD_OP_QUERY_FLOW_GROUP:
 	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
 	case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
+	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
+	case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
 		*status = MLX5_DRIVER_STATUS_ABORTED;
 		*synd = MLX5_DRIVER_SYND;
 		return -EIO;
@@ -406,178 +409,142 @@
 
 const char *mlx5_command_str(int command)
 {
+#define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
+
 	switch (command) {
-	case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
-		return "QUERY_HCA_VPORT_CONTEXT";
-
-	case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
-		return "MODIFY_HCA_VPORT_CONTEXT";
-
-	case MLX5_CMD_OP_QUERY_HCA_CAP:
-		return "QUERY_HCA_CAP";
-
-	case MLX5_CMD_OP_SET_HCA_CAP:
-		return "SET_HCA_CAP";
-
-	case MLX5_CMD_OP_QUERY_ADAPTER:
-		return "QUERY_ADAPTER";
-
-	case MLX5_CMD_OP_INIT_HCA:
-		return "INIT_HCA";
-
-	case MLX5_CMD_OP_TEARDOWN_HCA:
-		return "TEARDOWN_HCA";
-
-	case MLX5_CMD_OP_ENABLE_HCA:
-		return "MLX5_CMD_OP_ENABLE_HCA";
-
-	case MLX5_CMD_OP_DISABLE_HCA:
-		return "MLX5_CMD_OP_DISABLE_HCA";
-
-	case MLX5_CMD_OP_QUERY_PAGES:
-		return "QUERY_PAGES";
-
-	case MLX5_CMD_OP_MANAGE_PAGES:
-		return "MANAGE_PAGES";
-
-	case MLX5_CMD_OP_CREATE_MKEY:
-		return "CREATE_MKEY";
-
-	case MLX5_CMD_OP_QUERY_MKEY:
-		return "QUERY_MKEY";
-
-	case MLX5_CMD_OP_DESTROY_MKEY:
-		return "DESTROY_MKEY";
-
-	case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
-		return "QUERY_SPECIAL_CONTEXTS";
-
-	case MLX5_CMD_OP_CREATE_EQ:
-		return "CREATE_EQ";
-
-	case MLX5_CMD_OP_DESTROY_EQ:
-		return "DESTROY_EQ";
-
-	case MLX5_CMD_OP_QUERY_EQ:
-		return "QUERY_EQ";
-
-	case MLX5_CMD_OP_CREATE_CQ:
-		return "CREATE_CQ";
-
-	case MLX5_CMD_OP_DESTROY_CQ:
-		return "DESTROY_CQ";
-
-	case MLX5_CMD_OP_QUERY_CQ:
-		return "QUERY_CQ";
-
-	case MLX5_CMD_OP_MODIFY_CQ:
-		return "MODIFY_CQ";
-
-	case MLX5_CMD_OP_CREATE_QP:
-		return "CREATE_QP";
-
-	case MLX5_CMD_OP_DESTROY_QP:
-		return "DESTROY_QP";
-
-	case MLX5_CMD_OP_RST2INIT_QP:
-		return "RST2INIT_QP";
-
-	case MLX5_CMD_OP_INIT2RTR_QP:
-		return "INIT2RTR_QP";
-
-	case MLX5_CMD_OP_RTR2RTS_QP:
-		return "RTR2RTS_QP";
-
-	case MLX5_CMD_OP_RTS2RTS_QP:
-		return "RTS2RTS_QP";
-
-	case MLX5_CMD_OP_SQERR2RTS_QP:
-		return "SQERR2RTS_QP";
-
-	case MLX5_CMD_OP_2ERR_QP:
-		return "2ERR_QP";
-
-	case MLX5_CMD_OP_2RST_QP:
-		return "2RST_QP";
-
-	case MLX5_CMD_OP_QUERY_QP:
-		return "QUERY_QP";
-
-	case MLX5_CMD_OP_MAD_IFC:
-		return "MAD_IFC";
-
-	case MLX5_CMD_OP_INIT2INIT_QP:
-		return "INIT2INIT_QP";
-
-	case MLX5_CMD_OP_CREATE_PSV:
-		return "CREATE_PSV";
-
-	case MLX5_CMD_OP_DESTROY_PSV:
-		return "DESTROY_PSV";
-
-	case MLX5_CMD_OP_CREATE_SRQ:
-		return "CREATE_SRQ";
-
-	case MLX5_CMD_OP_DESTROY_SRQ:
-		return "DESTROY_SRQ";
-
-	case MLX5_CMD_OP_QUERY_SRQ:
-		return "QUERY_SRQ";
-
-	case MLX5_CMD_OP_ARM_RQ:
-		return "ARM_RQ";
-
-	case MLX5_CMD_OP_CREATE_XRC_SRQ:
-		return "CREATE_XRC_SRQ";
-
-	case MLX5_CMD_OP_DESTROY_XRC_SRQ:
-		return "DESTROY_XRC_SRQ";
-
-	case MLX5_CMD_OP_QUERY_XRC_SRQ:
-		return "QUERY_XRC_SRQ";
-
-	case MLX5_CMD_OP_ARM_XRC_SRQ:
-		return "ARM_XRC_SRQ";
-
-	case MLX5_CMD_OP_ALLOC_PD:
-		return "ALLOC_PD";
-
-	case MLX5_CMD_OP_DEALLOC_PD:
-		return "DEALLOC_PD";
-
-	case MLX5_CMD_OP_ALLOC_UAR:
-		return "ALLOC_UAR";
-
-	case MLX5_CMD_OP_DEALLOC_UAR:
-		return "DEALLOC_UAR";
-
-	case MLX5_CMD_OP_ATTACH_TO_MCG:
-		return "ATTACH_TO_MCG";
-
-	case MLX5_CMD_OP_DETTACH_FROM_MCG:
-		return "DETTACH_FROM_MCG";
-
-	case MLX5_CMD_OP_ALLOC_XRCD:
-		return "ALLOC_XRCD";
-
-	case MLX5_CMD_OP_DEALLOC_XRCD:
-		return "DEALLOC_XRCD";
-
-	case MLX5_CMD_OP_ACCESS_REG:
-		return "MLX5_CMD_OP_ACCESS_REG";
-
-	case MLX5_CMD_OP_SET_WOL_ROL:
-		return "SET_WOL_ROL";
-
-	case MLX5_CMD_OP_QUERY_WOL_ROL:
-		return "QUERY_WOL_ROL";
-
-	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
-		return "ADD_VXLAN_UDP_DPORT";
-
-	case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
-		return "DELETE_VXLAN_UDP_DPORT";
-
+	MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP);
+	MLX5_COMMAND_STR_CASE(QUERY_ADAPTER);
+	MLX5_COMMAND_STR_CASE(INIT_HCA);
+	MLX5_COMMAND_STR_CASE(TEARDOWN_HCA);
+	MLX5_COMMAND_STR_CASE(ENABLE_HCA);
+	MLX5_COMMAND_STR_CASE(DISABLE_HCA);
+	MLX5_COMMAND_STR_CASE(QUERY_PAGES);
+	MLX5_COMMAND_STR_CASE(MANAGE_PAGES);
+	MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
+	MLX5_COMMAND_STR_CASE(QUERY_ISSI);
+	MLX5_COMMAND_STR_CASE(SET_ISSI);
+	MLX5_COMMAND_STR_CASE(CREATE_MKEY);
+	MLX5_COMMAND_STR_CASE(QUERY_MKEY);
+	MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
+	MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS);
+	MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME);
+	MLX5_COMMAND_STR_CASE(CREATE_EQ);
+	MLX5_COMMAND_STR_CASE(DESTROY_EQ);
+	MLX5_COMMAND_STR_CASE(QUERY_EQ);
+	MLX5_COMMAND_STR_CASE(GEN_EQE);
+	MLX5_COMMAND_STR_CASE(CREATE_CQ);
+	MLX5_COMMAND_STR_CASE(DESTROY_CQ);
+	MLX5_COMMAND_STR_CASE(QUERY_CQ);
+	MLX5_COMMAND_STR_CASE(MODIFY_CQ);
+	MLX5_COMMAND_STR_CASE(CREATE_QP);
+	MLX5_COMMAND_STR_CASE(DESTROY_QP);
+	MLX5_COMMAND_STR_CASE(RST2INIT_QP);
+	MLX5_COMMAND_STR_CASE(INIT2RTR_QP);
+	MLX5_COMMAND_STR_CASE(RTR2RTS_QP);
+	MLX5_COMMAND_STR_CASE(RTS2RTS_QP);
+	MLX5_COMMAND_STR_CASE(SQERR2RTS_QP);
+	MLX5_COMMAND_STR_CASE(2ERR_QP);
+	MLX5_COMMAND_STR_CASE(2RST_QP);
+	MLX5_COMMAND_STR_CASE(QUERY_QP);
+	MLX5_COMMAND_STR_CASE(SQD_RTS_QP);
+	MLX5_COMMAND_STR_CASE(INIT2INIT_QP);
+	MLX5_COMMAND_STR_CASE(CREATE_PSV);
+	MLX5_COMMAND_STR_CASE(DESTROY_PSV);
+	MLX5_COMMAND_STR_CASE(CREATE_SRQ);
+	MLX5_COMMAND_STR_CASE(DESTROY_SRQ);
+	MLX5_COMMAND_STR_CASE(QUERY_SRQ);
+	MLX5_COMMAND_STR_CASE(ARM_RQ);
+	MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ);
+	MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ);
+	MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ);
+	MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ);
+	MLX5_COMMAND_STR_CASE(CREATE_DCT);
+	MLX5_COMMAND_STR_CASE(DESTROY_DCT);
+	MLX5_COMMAND_STR_CASE(DRAIN_DCT);
+	MLX5_COMMAND_STR_CASE(QUERY_DCT);
+	MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION);
+	MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE);
+	MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE);
+	MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT);
+	MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT);
+	MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT);
+	MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT);
+	MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS);
+	MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS);
+	MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT);
+	MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
+	MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
+	MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
+	MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
+	MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
+	MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
+	MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
+	MLX5_COMMAND_STR_CASE(ALLOC_PD);
+	MLX5_COMMAND_STR_CASE(DEALLOC_PD);
+	MLX5_COMMAND_STR_CASE(ALLOC_UAR);
+	MLX5_COMMAND_STR_CASE(DEALLOC_UAR);
+	MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
+	MLX5_COMMAND_STR_CASE(ACCESS_REG);
+	MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
+	MLX5_COMMAND_STR_CASE(DETTACH_FROM_MCG);
+	MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
+	MLX5_COMMAND_STR_CASE(MAD_IFC);
+	MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
+	MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX);
+	MLX5_COMMAND_STR_CASE(NOP);
+	MLX5_COMMAND_STR_CASE(ALLOC_XRCD);
+	MLX5_COMMAND_STR_CASE(DEALLOC_XRCD);
+	MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN);
+	MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN);
+	MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS);
+	MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS);
+	MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS);
+	MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS);
+	MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS);
+	MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT);
+	MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT);
+	MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY);
+	MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY);
+	MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
+	MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
+	MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
+	MLX5_COMMAND_STR_CASE(CREATE_TIR);
+	MLX5_COMMAND_STR_CASE(MODIFY_TIR);
+	MLX5_COMMAND_STR_CASE(DESTROY_TIR);
+	MLX5_COMMAND_STR_CASE(QUERY_TIR);
+	MLX5_COMMAND_STR_CASE(CREATE_SQ);
+	MLX5_COMMAND_STR_CASE(MODIFY_SQ);
+	MLX5_COMMAND_STR_CASE(DESTROY_SQ);
+	MLX5_COMMAND_STR_CASE(QUERY_SQ);
+	MLX5_COMMAND_STR_CASE(CREATE_RQ);
+	MLX5_COMMAND_STR_CASE(MODIFY_RQ);
+	MLX5_COMMAND_STR_CASE(DESTROY_RQ);
+	MLX5_COMMAND_STR_CASE(QUERY_RQ);
+	MLX5_COMMAND_STR_CASE(CREATE_RMP);
+	MLX5_COMMAND_STR_CASE(MODIFY_RMP);
+	MLX5_COMMAND_STR_CASE(DESTROY_RMP);
+	MLX5_COMMAND_STR_CASE(QUERY_RMP);
+	MLX5_COMMAND_STR_CASE(CREATE_TIS);
+	MLX5_COMMAND_STR_CASE(MODIFY_TIS);
+	MLX5_COMMAND_STR_CASE(DESTROY_TIS);
+	MLX5_COMMAND_STR_CASE(QUERY_TIS);
+	MLX5_COMMAND_STR_CASE(CREATE_RQT);
+	MLX5_COMMAND_STR_CASE(MODIFY_RQT);
+	MLX5_COMMAND_STR_CASE(DESTROY_RQT);
+	MLX5_COMMAND_STR_CASE(QUERY_RQT);
+	MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT);
+	MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE);
+	MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE);
+	MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE);
+	MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP);
+	MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP);
+	MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP);
+	MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY);
+	MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY);
+	MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY);
+	MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
+	MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
+	MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
 	default: return "unknown command opcode";
 	}
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 879e627..e8a6c33 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -46,6 +46,9 @@
 #include <linux/rhashtable.h>
 #include "wq.h"
 #include "mlx5_core.h"
+#include "en_stats.h"
+
+#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
 
 #define MLX5E_MAX_NUM_TC	8
 
@@ -57,12 +60,30 @@
 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE                0xa
 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE                0xd
 
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW            0x1
+#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW            0x4
+#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW            0x6
+
+#define MLX5_MPWRQ_LOG_STRIDE_SIZE		6  /* >= 6, HW restriction */
+#define MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS	8  /* >= 6, HW restriction */
+#define MLX5_MPWRQ_LOG_WQE_SZ			17
+#define MLX5_MPWRQ_WQE_PAGE_ORDER  (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
+				    MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
+#define MLX5_MPWRQ_PAGES_PER_WQE		BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
+#define MLX5_MPWRQ_STRIDES_PER_PAGE		(MLX5_MPWRQ_NUM_STRIDES >> \
+						 MLX5_MPWRQ_WQE_PAGE_ORDER)
+#define MLX5_CHANNEL_MAX_NUM_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8) * \
+				   BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW))
+#define MLX5_UMR_ALIGN				(2048)
+#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD	(128)
+
 #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (64 * 1024)
 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC      0x10
 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS      0x20
 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC      0x10
 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS      0x20
 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES                0x80
+#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW            0x2
 
 #define MLX5E_LOG_INDIR_RQT_SIZE       0x7
 #define MLX5E_INDIR_RQT_SIZE           BIT(MLX5E_LOG_INDIR_RQT_SIZE)
@@ -73,233 +94,70 @@
 
 #define MLX5E_NUM_MAIN_GROUPS 9
 
+static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
+{
+	switch (wq_type) {
+	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+		return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
+			     wq_size / 2);
+	default:
+		return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
+			     wq_size / 2);
+	}
+}
+
+static inline int mlx5_min_log_rq_size(int wq_type)
+{
+	switch (wq_type) {
+	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+		return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
+	default:
+		return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
+	}
+}
+
+static inline int mlx5_max_log_rq_size(int wq_type)
+{
+	switch (wq_type) {
+	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+		return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW;
+	default:
+		return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
+	}
+}
+
+struct mlx5e_tx_wqe {
+	struct mlx5_wqe_ctrl_seg ctrl;
+	struct mlx5_wqe_eth_seg  eth;
+};
+
+struct mlx5e_rx_wqe {
+	struct mlx5_wqe_srq_next_seg  next;
+	struct mlx5_wqe_data_seg      data;
+};
+
+struct mlx5e_umr_wqe {
+	struct mlx5_wqe_ctrl_seg       ctrl;
+	struct mlx5_wqe_umr_ctrl_seg   uctrl;
+	struct mlx5_mkey_seg           mkc;
+	struct mlx5_wqe_data_seg       data;
+};
+
 #ifdef CONFIG_MLX5_CORE_EN_DCB
 #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
 #define MLX5E_MIN_BW_ALLOC 1   /* Min percentage of BW allocation */
 #endif
 
-static const char vport_strings[][ETH_GSTRING_LEN] = {
-	/* vport statistics */
-	"rx_packets",
-	"rx_bytes",
-	"tx_packets",
-	"tx_bytes",
-	"rx_error_packets",
-	"rx_error_bytes",
-	"tx_error_packets",
-	"tx_error_bytes",
-	"rx_unicast_packets",
-	"rx_unicast_bytes",
-	"tx_unicast_packets",
-	"tx_unicast_bytes",
-	"rx_multicast_packets",
-	"rx_multicast_bytes",
-	"tx_multicast_packets",
-	"tx_multicast_bytes",
-	"rx_broadcast_packets",
-	"rx_broadcast_bytes",
-	"tx_broadcast_packets",
-	"tx_broadcast_bytes",
-
-	/* SW counters */
-	"tso_packets",
-	"tso_bytes",
-	"tso_inner_packets",
-	"tso_inner_bytes",
-	"lro_packets",
-	"lro_bytes",
-	"rx_csum_good",
-	"rx_csum_none",
-	"rx_csum_sw",
-	"tx_csum_offload",
-	"tx_csum_inner",
-	"tx_queue_stopped",
-	"tx_queue_wake",
-	"tx_queue_dropped",
-	"rx_wqe_err",
-};
-
-struct mlx5e_vport_stats {
-	/* HW counters */
-	u64 rx_packets;
-	u64 rx_bytes;
-	u64 tx_packets;
-	u64 tx_bytes;
-	u64 rx_error_packets;
-	u64 rx_error_bytes;
-	u64 tx_error_packets;
-	u64 tx_error_bytes;
-	u64 rx_unicast_packets;
-	u64 rx_unicast_bytes;
-	u64 tx_unicast_packets;
-	u64 tx_unicast_bytes;
-	u64 rx_multicast_packets;
-	u64 rx_multicast_bytes;
-	u64 tx_multicast_packets;
-	u64 tx_multicast_bytes;
-	u64 rx_broadcast_packets;
-	u64 rx_broadcast_bytes;
-	u64 tx_broadcast_packets;
-	u64 tx_broadcast_bytes;
-
-	/* SW counters */
-	u64 tso_packets;
-	u64 tso_bytes;
-	u64 tso_inner_packets;
-	u64 tso_inner_bytes;
-	u64 lro_packets;
-	u64 lro_bytes;
-	u64 rx_csum_good;
-	u64 rx_csum_none;
-	u64 rx_csum_sw;
-	u64 tx_csum_offload;
-	u64 tx_csum_inner;
-	u64 tx_queue_stopped;
-	u64 tx_queue_wake;
-	u64 tx_queue_dropped;
-	u64 rx_wqe_err;
-
-#define NUM_VPORT_COUNTERS     35
-};
-
-static const char pport_strings[][ETH_GSTRING_LEN] = {
-	/* IEEE802.3 counters */
-	"frames_tx",
-	"frames_rx",
-	"check_seq_err",
-	"alignment_err",
-	"octets_tx",
-	"octets_received",
-	"multicast_xmitted",
-	"broadcast_xmitted",
-	"multicast_rx",
-	"broadcast_rx",
-	"in_range_len_errors",
-	"out_of_range_len",
-	"too_long_errors",
-	"symbol_err",
-	"mac_control_tx",
-	"mac_control_rx",
-	"unsupported_op_rx",
-	"pause_ctrl_rx",
-	"pause_ctrl_tx",
-
-	/* RFC2863 counters */
-	"in_octets",
-	"in_ucast_pkts",
-	"in_discards",
-	"in_errors",
-	"in_unknown_protos",
-	"out_octets",
-	"out_ucast_pkts",
-	"out_discards",
-	"out_errors",
-	"in_multicast_pkts",
-	"in_broadcast_pkts",
-	"out_multicast_pkts",
-	"out_broadcast_pkts",
-
-	/* RFC2819 counters */
-	"drop_events",
-	"octets",
-	"pkts",
-	"broadcast_pkts",
-	"multicast_pkts",
-	"crc_align_errors",
-	"undersize_pkts",
-	"oversize_pkts",
-	"fragments",
-	"jabbers",
-	"collisions",
-	"p64octets",
-	"p65to127octets",
-	"p128to255octets",
-	"p256to511octets",
-	"p512to1023octets",
-	"p1024to1518octets",
-	"p1519to2047octets",
-	"p2048to4095octets",
-	"p4096to8191octets",
-	"p8192to10239octets",
-};
-
-#define NUM_IEEE_802_3_COUNTERS		19
-#define NUM_RFC_2863_COUNTERS		13
-#define NUM_RFC_2819_COUNTERS		21
-#define NUM_PPORT_COUNTERS		(NUM_IEEE_802_3_COUNTERS + \
-					 NUM_RFC_2863_COUNTERS + \
-					 NUM_RFC_2819_COUNTERS)
-
-struct mlx5e_pport_stats {
-	__be64 IEEE_802_3_counters[NUM_IEEE_802_3_COUNTERS];
-	__be64 RFC_2863_counters[NUM_RFC_2863_COUNTERS];
-	__be64 RFC_2819_counters[NUM_RFC_2819_COUNTERS];
-};
-
-static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
-	"packets",
-	"bytes",
-	"csum_none",
-	"csum_sw",
-	"lro_packets",
-	"lro_bytes",
-	"wqe_err"
-};
-
-struct mlx5e_rq_stats {
-	u64 packets;
-	u64 bytes;
-	u64 csum_none;
-	u64 csum_sw;
-	u64 lro_packets;
-	u64 lro_bytes;
-	u64 wqe_err;
-#define NUM_RQ_STATS 7
-};
-
-static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
-	"packets",
-	"bytes",
-	"tso_packets",
-	"tso_bytes",
-	"tso_inner_packets",
-	"tso_inner_bytes",
-	"csum_offload_inner",
-	"nop",
-	"csum_offload_none",
-	"stopped",
-	"wake",
-	"dropped",
-};
-
-struct mlx5e_sq_stats {
-	/* commonly accessed in data path */
-	u64 packets;
-	u64 bytes;
-	u64 tso_packets;
-	u64 tso_bytes;
-	u64 tso_inner_packets;
-	u64 tso_inner_bytes;
-	u64 csum_offload_inner;
-	u64 nop;
-	/* less likely accessed in data path */
-	u64 csum_offload_none;
-	u64 stopped;
-	u64 wake;
-	u64 dropped;
-#define NUM_SQ_STATS 12
-};
-
-struct mlx5e_stats {
-	struct mlx5e_vport_stats   vport;
-	struct mlx5e_pport_stats   pport;
-};
-
 struct mlx5e_params {
 	u8  log_sq_size;
+	u8  rq_wq_type;
+	u8  mpwqe_log_stride_sz;
+	u8  mpwqe_log_num_strides;
 	u8  log_rq_size;
 	u16 num_channels;
 	u8  num_tc;
+	bool rx_cqe_compress_admin;
+	bool rx_cqe_compress;
 	u16 rx_cq_moderation_usec;
 	u16 rx_cq_moderation_pkts;
 	u16 tx_cq_moderation_usec;
@@ -311,6 +169,7 @@
 	u8  rss_hfunc;
 	u8  toeplitz_hash_key[40];
 	u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
+	bool vlan_strip_disable;
 #ifdef CONFIG_MLX5_CORE_EN_DCB
 	struct ieee_ets ets;
 #endif
@@ -331,6 +190,7 @@
 
 enum {
 	MLX5E_RQ_STATE_POST_WQES_ENABLE,
+	MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
 };
 
 struct mlx5e_cq {
@@ -343,32 +203,88 @@
 	struct mlx5e_channel      *channel;
 	struct mlx5e_priv         *priv;
 
+	/* cqe decompression */
+	struct mlx5_cqe64          title;
+	struct mlx5_mini_cqe8      mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
+	u8                         mini_arr_idx;
+	u16                        decmprs_left;
+	u16                        decmprs_wqe_counter;
+
 	/* control */
 	struct mlx5_wq_ctrl        wq_ctrl;
 } ____cacheline_aligned_in_smp;
 
+struct mlx5e_rq;
+typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq,
+				       struct mlx5_cqe64 *cqe);
+typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
+				  u16 ix);
+
+struct mlx5e_dma_info {
+	struct page	*page;
+	dma_addr_t	addr;
+};
+
 struct mlx5e_rq {
 	/* data path */
 	struct mlx5_wq_ll      wq;
 	u32                    wqe_sz;
 	struct sk_buff       **skb;
+	struct mlx5e_mpw_info *wqe_info;
+	__be32                 mkey_be;
+	__be32                 umr_mkey_be;
 
 	struct device         *pdev;
 	struct net_device     *netdev;
 	struct mlx5e_tstamp   *tstamp;
 	struct mlx5e_rq_stats  stats;
 	struct mlx5e_cq        cq;
+	mlx5e_fp_handle_rx_cqe handle_rx_cqe;
+	mlx5e_fp_alloc_wqe     alloc_wqe;
 
 	unsigned long          state;
 	int                    ix;
 
 	/* control */
 	struct mlx5_wq_ctrl    wq_ctrl;
+	u8                     wq_type;
+	u32                    mpwqe_stride_sz;
+	u32                    mpwqe_num_strides;
 	u32                    rqn;
 	struct mlx5e_channel  *channel;
 	struct mlx5e_priv     *priv;
 } ____cacheline_aligned_in_smp;
 
+struct mlx5e_umr_dma_info {
+	__be64                *mtt;
+	__be64                *mtt_no_align;
+	dma_addr_t             mtt_addr;
+	struct mlx5e_dma_info *dma_info;
+};
+
+struct mlx5e_mpw_info {
+	union {
+		struct mlx5e_dma_info     dma_info;
+		struct mlx5e_umr_dma_info umr;
+	};
+	u16 consumed_strides;
+	u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
+
+	void (*dma_pre_sync)(struct device *pdev,
+			     struct mlx5e_mpw_info *wi,
+			     u32 wqe_offset, u32 len);
+	void (*add_skb_frag)(struct mlx5e_rq *rq,
+			     struct sk_buff *skb,
+			     struct mlx5e_mpw_info *wi,
+			     u32 page_idx, u32 frag_offset, u32 len);
+	void (*copy_skb_header)(struct device *pdev,
+				struct sk_buff *skb,
+				struct mlx5e_mpw_info *wi,
+				u32 page_idx, u32 offset,
+				u32 headlen);
+	void (*free_wqe)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
+};
+
 struct mlx5e_tx_wqe_info {
 	u32 num_bytes;
 	u8  num_wqebbs;
@@ -391,6 +307,11 @@
 	MLX5E_SQ_STATE_BF_ENABLE,
 };
 
+struct mlx5e_ico_wqe_info {
+	u8  opcode;
+	u8  num_wqebbs;
+};
+
 struct mlx5e_sq {
 	/* data path */
 
@@ -432,6 +353,7 @@
 	struct mlx5_uar            uar;
 	struct mlx5e_channel      *channel;
 	int                        tc;
+	struct mlx5e_ico_wqe_info *ico_wqe_info;
 } ____cacheline_aligned_in_smp;
 
 static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
@@ -448,6 +370,7 @@
 	/* data path */
 	struct mlx5e_rq            rq;
 	struct mlx5e_sq            sq[MLX5E_MAX_NUM_TC];
+	struct mlx5e_sq            icosq;   /* internal control operations */
 	struct napi_struct         napi;
 	struct device             *pdev;
 	struct net_device         *netdev;
@@ -474,33 +397,7 @@
 	MLX5E_TT_IPV6,
 	MLX5E_TT_ANY,
 	MLX5E_NUM_TT,
-};
-
-#define IS_HASHING_TT(tt) (tt != MLX5E_TT_ANY)
-
-enum mlx5e_rqt_ix {
-	MLX5E_INDIRECTION_RQT,
-	MLX5E_SINGLE_RQ_RQT,
-	MLX5E_NUM_RQT,
-};
-
-struct mlx5e_eth_addr_info {
-	u8  addr[ETH_ALEN + 2];
-	u32 tt_vec;
-	struct mlx5_flow_rule *ft_rule[MLX5E_NUM_TT];
-};
-
-#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE)
-
-struct mlx5e_eth_addr_db {
-	struct hlist_head          netdev_uc[MLX5E_ETH_ADDR_HASH_SIZE];
-	struct hlist_head          netdev_mc[MLX5E_ETH_ADDR_HASH_SIZE];
-	struct mlx5e_eth_addr_info broadcast;
-	struct mlx5e_eth_addr_info allmulti;
-	struct mlx5e_eth_addr_info promisc;
-	bool                       broadcast_enabled;
-	bool                       allmulti_enabled;
-	bool                       promisc_enabled;
+	MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
 };
 
 enum {
@@ -509,7 +406,33 @@
 	MLX5E_STATE_DESTROYING,
 };
 
-struct mlx5e_vlan_db {
+struct mlx5e_vxlan_db {
+	spinlock_t			lock; /* protect vxlan table */
+	struct radix_tree_root		tree;
+};
+
+struct mlx5e_l2_rule {
+	u8  addr[ETH_ALEN + 2];
+	struct mlx5_flow_rule *rule;
+};
+
+struct mlx5e_flow_table {
+	int num_groups;
+	struct mlx5_flow_table *t;
+	struct mlx5_flow_group **g;
+};
+
+#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
+
+struct mlx5e_tc_table {
+	struct mlx5_flow_table		*t;
+
+	struct rhashtable_params        ht_params;
+	struct rhashtable               ht;
+};
+
+struct mlx5e_vlan_table {
+	struct mlx5e_flow_table		ft;
 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 	struct mlx5_flow_rule	*active_vlans_rule[VLAN_N_VID];
 	struct mlx5_flow_rule	*untagged_rule;
@@ -517,29 +440,74 @@
 	bool          filter_disabled;
 };
 
-struct mlx5e_vxlan_db {
-	spinlock_t			lock; /* protect vxlan table */
-	struct radix_tree_root		tree;
+struct mlx5e_l2_table {
+	struct mlx5e_flow_table    ft;
+	struct hlist_head          netdev_uc[MLX5E_L2_ADDR_HASH_SIZE];
+	struct hlist_head          netdev_mc[MLX5E_L2_ADDR_HASH_SIZE];
+	struct mlx5e_l2_rule	   broadcast;
+	struct mlx5e_l2_rule	   allmulti;
+	struct mlx5e_l2_rule	   promisc;
+	bool                       broadcast_enabled;
+	bool                       allmulti_enabled;
+	bool                       promisc_enabled;
 };
 
-struct mlx5e_flow_table {
-	int num_groups;
-	struct mlx5_flow_table		*t;
-	struct mlx5_flow_group		**g;
+/* L3/L4 traffic type classifier */
+struct mlx5e_ttc_table {
+	struct mlx5e_flow_table  ft;
+	struct mlx5_flow_rule	 *rules[MLX5E_NUM_TT];
 };
 
-struct mlx5e_tc_flow_table {
-	struct mlx5_flow_table		*t;
-
-	struct rhashtable_params        ht_params;
-	struct rhashtable               ht;
+#define ARFS_HASH_SHIFT BITS_PER_BYTE
+#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
+struct arfs_table {
+	struct mlx5e_flow_table  ft;
+	struct mlx5_flow_rule    *default_rule;
+	struct hlist_head	 rules_hash[ARFS_HASH_SIZE];
 };
 
-struct mlx5e_flow_tables {
-	struct mlx5_flow_namespace	*ns;
-	struct mlx5e_tc_flow_table	tc;
-	struct mlx5e_flow_table		vlan;
-	struct mlx5e_flow_table		main;
+enum  arfs_type {
+	ARFS_IPV4_TCP,
+	ARFS_IPV6_TCP,
+	ARFS_IPV4_UDP,
+	ARFS_IPV6_UDP,
+	ARFS_NUM_TYPES,
+};
+
+struct mlx5e_arfs_tables {
+	struct arfs_table arfs_tables[ARFS_NUM_TYPES];
+	/* Protect aRFS rules list */
+	spinlock_t                     arfs_lock;
+	struct list_head               rules;
+	int                            last_filter_id;
+	struct workqueue_struct        *wq;
+};
+
+/* NIC prio FTS */
+enum {
+	MLX5E_VLAN_FT_LEVEL = 0,
+	MLX5E_L2_FT_LEVEL,
+	MLX5E_TTC_FT_LEVEL,
+	MLX5E_ARFS_FT_LEVEL
+};
+
+struct mlx5e_flow_steering {
+	struct mlx5_flow_namespace      *ns;
+	struct mlx5e_tc_table           tc;
+	struct mlx5e_vlan_table         vlan;
+	struct mlx5e_l2_table           l2;
+	struct mlx5e_ttc_table          ttc;
+	struct mlx5e_arfs_tables        arfs;
+};
+
+struct mlx5e_direct_tir {
+	u32              tirn;
+	u32              rqtn;
+};
+
+enum {
+	MLX5E_TC_PRIO = 0,
+	MLX5E_NIC_PRIO
 };
 
 struct mlx5e_priv {
@@ -554,19 +522,20 @@
 	u32                        pdn;
 	u32                        tdn;
 	struct mlx5_core_mkey      mkey;
+	struct mlx5_core_mkey      umr_mkey;
 	struct mlx5e_rq            drop_rq;
 
 	struct mlx5e_channel     **channel;
 	u32                        tisn[MLX5E_MAX_NUM_TC];
-	u32                        rqtn[MLX5E_NUM_RQT];
-	u32                        tirn[MLX5E_NUM_TT];
+	u32                        indir_rqtn;
+	u32                        indir_tirn[MLX5E_NUM_INDIR_TIRS];
+	struct mlx5e_direct_tir    direct_tir[MLX5E_MAX_NUM_CHANNELS];
 
-	struct mlx5e_flow_tables   fts;
-	struct mlx5e_eth_addr_db   eth_addr;
-	struct mlx5e_vlan_db       vlan;
+	struct mlx5e_flow_steering fs;
 	struct mlx5e_vxlan_db      vxlan;
 
 	struct mlx5e_params        params;
+	struct workqueue_struct    *wq;
 	struct work_struct         update_carrier_work;
 	struct work_struct         set_rx_mode_work;
 	struct delayed_work        update_stats_work;
@@ -575,18 +544,7 @@
 	struct net_device         *netdev;
 	struct mlx5e_stats         stats;
 	struct mlx5e_tstamp        tstamp;
-};
-
-#define MLX5E_NET_IP_ALIGN 2
-
-struct mlx5e_tx_wqe {
-	struct mlx5_wqe_ctrl_seg ctrl;
-	struct mlx5_wqe_eth_seg  eth;
-};
-
-struct mlx5e_rx_wqe {
-	struct mlx5_wqe_srq_next_seg  next;
-	struct mlx5_wqe_data_seg      data;
+	u16 q_counter;
 };
 
 enum mlx5e_link_mode {
@@ -609,7 +567,7 @@
 	MLX5E_100GBASE_KR4	 = 22,
 	MLX5E_100GBASE_LR4	 = 23,
 	MLX5E_100BASE_TX	 = 24,
-	MLX5E_100BASE_T		 = 25,
+	MLX5E_1000BASE_T	 = 25,
 	MLX5E_10GBASE_T		 = 26,
 	MLX5E_25GBASE_CR	 = 27,
 	MLX5E_25GBASE_KR	 = 28,
@@ -631,14 +589,35 @@
 int mlx5e_napi_poll(struct napi_struct *napi, int budget);
 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
+
+void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
+int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
+int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
+void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq);
+void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq,
+				    struct mlx5_cqe64 *cqe,
+				    u16 byte_cnt,
+				    struct mlx5e_mpw_info *wi,
+				    struct sk_buff *skb);
+void mlx5e_complete_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
+					struct mlx5_cqe64 *cqe,
+					u16 byte_cnt,
+					struct mlx5e_mpw_info *wi,
+					struct sk_buff *skb);
+void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq,
+				struct mlx5e_mpw_info *wi);
+void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
+				    struct mlx5e_mpw_info *wi);
 struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
 
 void mlx5e_update_stats(struct mlx5e_priv *priv);
 
-int mlx5e_create_flow_tables(struct mlx5e_priv *priv);
-void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv);
-void mlx5e_init_eth_addr(struct mlx5e_priv *priv);
+int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
+void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
+void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
+void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
 void mlx5e_set_rx_mode_work(struct work_struct *work);
 
 void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp,
@@ -647,6 +626,7 @@
 void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv);
 int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr);
 int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr);
+void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val);
 
 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
 			  u16 vid);
@@ -655,16 +635,20 @@
 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
 
-int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix);
+int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
+
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
 void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
 
 int mlx5e_open_locked(struct net_device *netdev);
 int mlx5e_close_locked(struct net_device *netdev);
-void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
+void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
+				   u32 *indirection_rqt, int len,
 				   int num_channels);
+int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
 
 static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
-				      struct mlx5e_tx_wqe *wqe, int bf_sz)
+				      struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz)
 {
 	u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
 
@@ -678,9 +662,9 @@
 	 */
 	wmb();
 	if (bf_sz)
-		__iowrite64_copy(sq->uar_map + ofst, &wqe->ctrl, bf_sz);
+		__iowrite64_copy(sq->uar_map + ofst, ctrl, bf_sz);
 	else
-		mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
+		mlx5_write64((__be32 *)ctrl, sq->uar_map + ofst, NULL);
 	/* flush the write-combining mapped buffer */
 	wmb();
 
@@ -701,12 +685,43 @@
 		     MLX5E_MAX_NUM_CHANNELS);
 }
 
+static inline int mlx5e_get_mtt_octw(int npages)
+{
+	return ALIGN(npages, 8) / 2;
+}
+
 extern const struct ethtool_ops mlx5e_ethtool_ops;
 #ifdef CONFIG_MLX5_CORE_EN_DCB
 extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
 int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
 #endif
 
+#ifndef CONFIG_RFS_ACCEL
+static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
+{
+	return 0;
+}
+
+static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
+
+static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
+{
+	return -ENOTSUPP;
+}
+
+static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
+{
+	return -ENOTSUPP;
+}
+#else
+int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
+void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
+int mlx5e_arfs_enable(struct mlx5e_priv *priv);
+int mlx5e_arfs_disable(struct mlx5e_priv *priv);
+int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+			u16 rxq_index, u32 flow_id);
+#endif
+
 u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
 
 #endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
new file mode 100644
index 0000000..3515e78
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -0,0 +1,752 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef CONFIG_RFS_ACCEL
+
+#include <linux/hash.h>
+#include <linux/mlx5/fs.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include "en.h"
+
+struct arfs_tuple {
+	__be16 etype;
+	u8     ip_proto;
+	union {
+		__be32 src_ipv4;
+		struct in6_addr src_ipv6;
+	};
+	union {
+		__be32 dst_ipv4;
+		struct in6_addr dst_ipv6;
+	};
+	__be16 src_port;
+	__be16 dst_port;
+};
+
+struct arfs_rule {
+	struct mlx5e_priv	*priv;
+	struct work_struct      arfs_work;
+	struct mlx5_flow_rule   *rule;
+	struct hlist_node	hlist;
+	int			rxq;
+	/* Flow ID passed to ndo_rx_flow_steer */
+	int			flow_id;
+	/* Filter ID returned by ndo_rx_flow_steer */
+	int			filter_id;
+	struct arfs_tuple	tuple;
+};
+
+#define mlx5e_for_each_arfs_rule(hn, tmp, arfs_tables, i, j) \
+	for (i = 0; i < ARFS_NUM_TYPES; i++) \
+		mlx5e_for_each_hash_arfs_rule(hn, tmp, arfs_tables[i].rules_hash, j)
+
+#define mlx5e_for_each_hash_arfs_rule(hn, tmp, hash, j) \
+	for (j = 0; j < ARFS_HASH_SIZE; j++) \
+		hlist_for_each_entry_safe(hn, tmp, &hash[j], hlist)
+
+static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type)
+{
+	switch (type) {
+	case ARFS_IPV4_TCP:
+		return MLX5E_TT_IPV4_TCP;
+	case ARFS_IPV4_UDP:
+		return MLX5E_TT_IPV4_UDP;
+	case ARFS_IPV6_TCP:
+		return MLX5E_TT_IPV6_TCP;
+	case ARFS_IPV6_UDP:
+		return MLX5E_TT_IPV6_UDP;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int arfs_disable(struct mlx5e_priv *priv)
+{
+	struct mlx5_flow_destination dest;
+	u32 *tirn = priv->indir_tirn;
+	int err = 0;
+	int tt;
+	int i;
+
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+	for (i = 0; i < ARFS_NUM_TYPES; i++) {
+		dest.tir_num = tirn[i];
+		tt = arfs_get_tt(i);
+		/* Modify ttc rules destination to bypass the aRFS tables*/
+		err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
+						   &dest);
+		if (err) {
+			netdev_err(priv->netdev,
+				   "%s: modify ttc destination failed\n",
+				   __func__);
+			return err;
+		}
+	}
+	return 0;
+}
+
+static void arfs_del_rules(struct mlx5e_priv *priv);
+
+int mlx5e_arfs_disable(struct mlx5e_priv *priv)
+{
+	arfs_del_rules(priv);
+
+	return arfs_disable(priv);
+}
+
+int mlx5e_arfs_enable(struct mlx5e_priv *priv)
+{
+	struct mlx5_flow_destination dest;
+	int err = 0;
+	int tt;
+	int i;
+
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+	for (i = 0; i < ARFS_NUM_TYPES; i++) {
+		dest.ft = priv->fs.arfs.arfs_tables[i].ft.t;
+		tt = arfs_get_tt(i);
+		/* Modify ttc rules destination to point on the aRFS FTs */
+		err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
+						   &dest);
+		if (err) {
+			netdev_err(priv->netdev,
+				   "%s: modify ttc destination failed err=%d\n",
+				   __func__, err);
+			arfs_disable(priv);
+			return err;
+		}
+	}
+	return 0;
+}
+
+static void arfs_destroy_table(struct arfs_table *arfs_t)
+{
+	mlx5_del_flow_rule(arfs_t->default_rule);
+	mlx5e_destroy_flow_table(&arfs_t->ft);
+}
+
+void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
+{
+	int i;
+
+	if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
+		return;
+
+	arfs_del_rules(priv);
+	destroy_workqueue(priv->fs.arfs.wq);
+	for (i = 0; i < ARFS_NUM_TYPES; i++) {
+		if (!IS_ERR_OR_NULL(priv->fs.arfs.arfs_tables[i].ft.t))
+			arfs_destroy_table(&priv->fs.arfs.arfs_tables[i]);
+	}
+}
+
+static int arfs_add_default_rule(struct mlx5e_priv *priv,
+				 enum arfs_type type)
+{
+	struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
+	struct mlx5_flow_destination dest;
+	u8 match_criteria_enable = 0;
+	u32 *tirn = priv->indir_tirn;
+	u32 *match_criteria;
+	u32 *match_value;
+	int err = 0;
+
+	match_value	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+	match_criteria	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+	if (!match_value || !match_criteria) {
+		netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+		err = -ENOMEM;
+		goto out;
+	}
+
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+	switch (type) {
+	case ARFS_IPV4_TCP:
+		dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
+		break;
+	case ARFS_IPV4_UDP:
+		dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
+		break;
+	case ARFS_IPV6_TCP:
+		dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
+		break;
+	case ARFS_IPV6_UDP:
+		dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
+		break;
+	default:
+		err = -EINVAL;
+		goto out;
+	}
+
+	arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, match_criteria_enable,
+						  match_criteria, match_value,
+						  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+						  MLX5_FS_DEFAULT_FLOW_TAG,
+						  &dest);
+	if (IS_ERR(arfs_t->default_rule)) {
+		err = PTR_ERR(arfs_t->default_rule);
+		arfs_t->default_rule = NULL;
+		netdev_err(priv->netdev, "%s: add rule failed, arfs type=%d\n",
+			   __func__, type);
+	}
+out:
+	kvfree(match_criteria);
+	kvfree(match_value);
+	return err;
+}
+
+#define MLX5E_ARFS_NUM_GROUPS	2
+#define MLX5E_ARFS_GROUP1_SIZE	BIT(12)
+#define MLX5E_ARFS_GROUP2_SIZE	BIT(0)
+#define MLX5E_ARFS_TABLE_SIZE	(MLX5E_ARFS_GROUP1_SIZE +\
+				 MLX5E_ARFS_GROUP2_SIZE)
+static int arfs_create_groups(struct mlx5e_flow_table *ft,
+			      enum  arfs_type type)
+{
+	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+	void *outer_headers_c;
+	int ix = 0;
+	u32 *in;
+	int err;
+	u8 *mc;
+
+	ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
+			sizeof(*ft->g), GFP_KERNEL);
+	in = mlx5_vzalloc(inlen);
+	if  (!in || !ft->g) {
+		kvfree(ft->g);
+		kvfree(in);
+		return -ENOMEM;
+	}
+
+	mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+	outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc,
+				       outer_headers);
+	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ethertype);
+	switch (type) {
+	case ARFS_IPV4_TCP:
+	case ARFS_IPV6_TCP:
+		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport);
+		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport);
+		break;
+	case ARFS_IPV4_UDP:
+	case ARFS_IPV6_UDP:
+		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
+		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_sport);
+		break;
+	default:
+		err = -EINVAL;
+		goto out;
+	}
+
+	switch (type) {
+	case ARFS_IPV4_TCP:
+	case ARFS_IPV4_UDP:
+		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
+				 src_ipv4_src_ipv6.ipv4_layout.ipv4);
+		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
+				 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+		break;
+	case ARFS_IPV6_TCP:
+	case ARFS_IPV6_UDP:
+		memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
+		       0xff, 16);
+		memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+		       0xff, 16);
+		break;
+	default:
+		err = -EINVAL;
+		goto out;
+	}
+
+	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+	MLX5_SET_CFG(in, start_flow_index, ix);
+	ix += MLX5E_ARFS_GROUP1_SIZE;
+	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+	if (IS_ERR(ft->g[ft->num_groups]))
+		goto err;
+	ft->num_groups++;
+
+	memset(in, 0, inlen);
+	MLX5_SET_CFG(in, start_flow_index, ix);
+	ix += MLX5E_ARFS_GROUP2_SIZE;
+	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+	if (IS_ERR(ft->g[ft->num_groups]))
+		goto err;
+	ft->num_groups++;
+
+	kvfree(in);
+	return 0;
+
+err:
+	err = PTR_ERR(ft->g[ft->num_groups]);
+	ft->g[ft->num_groups] = NULL;
+out:
+	kvfree(in);
+
+	return err;
+}
+
+static int arfs_create_table(struct mlx5e_priv *priv,
+			     enum arfs_type type)
+{
+	struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
+	struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
+	int err;
+
+	ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
+				       MLX5E_ARFS_TABLE_SIZE, MLX5E_ARFS_FT_LEVEL);
+	if (IS_ERR(ft->t)) {
+		err = PTR_ERR(ft->t);
+		ft->t = NULL;
+		return err;
+	}
+
+	err = arfs_create_groups(ft, type);
+	if (err)
+		goto err;
+
+	err = arfs_add_default_rule(priv, type);
+	if (err)
+		goto err;
+
+	return 0;
+err:
+	mlx5e_destroy_flow_table(ft);
+	return err;
+}
+
+int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
+{
+	int err = 0;
+	int i;
+
+	if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
+		return 0;
+
+	spin_lock_init(&priv->fs.arfs.arfs_lock);
+	INIT_LIST_HEAD(&priv->fs.arfs.rules);
+	priv->fs.arfs.wq = create_singlethread_workqueue("mlx5e_arfs");
+	if (!priv->fs.arfs.wq)
+		return -ENOMEM;
+
+	for (i = 0; i < ARFS_NUM_TYPES; i++) {
+		err = arfs_create_table(priv, i);
+		if (err)
+			goto err;
+	}
+	return 0;
+err:
+	mlx5e_arfs_destroy_tables(priv);
+	return err;
+}
+
+#define MLX5E_ARFS_EXPIRY_QUOTA 60
+
+static void arfs_may_expire_flow(struct mlx5e_priv *priv)
+{
+	struct arfs_rule *arfs_rule;
+	struct hlist_node *htmp;
+	int quota = 0;
+	int i;
+	int j;
+
+	HLIST_HEAD(del_list);
+	spin_lock_bh(&priv->fs.arfs.arfs_lock);
+	mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
+		if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
+			break;
+		if (!work_pending(&arfs_rule->arfs_work) &&
+		    rps_may_expire_flow(priv->netdev,
+					arfs_rule->rxq, arfs_rule->flow_id,
+					arfs_rule->filter_id)) {
+			hlist_del_init(&arfs_rule->hlist);
+			hlist_add_head(&arfs_rule->hlist, &del_list);
+		}
+	}
+	spin_unlock_bh(&priv->fs.arfs.arfs_lock);
+	hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
+		if (arfs_rule->rule)
+			mlx5_del_flow_rule(arfs_rule->rule);
+		hlist_del(&arfs_rule->hlist);
+		kfree(arfs_rule);
+	}
+}
+
+static void arfs_del_rules(struct mlx5e_priv *priv)
+{
+	struct hlist_node *htmp;
+	struct arfs_rule *rule;
+	int i;
+	int j;
+
+	HLIST_HEAD(del_list);
+	spin_lock_bh(&priv->fs.arfs.arfs_lock);
+	mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
+		hlist_del_init(&rule->hlist);
+		hlist_add_head(&rule->hlist, &del_list);
+	}
+	spin_unlock_bh(&priv->fs.arfs.arfs_lock);
+
+	hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
+		cancel_work_sync(&rule->arfs_work);
+		if (rule->rule)
+			mlx5_del_flow_rule(rule->rule);
+		hlist_del(&rule->hlist);
+		kfree(rule);
+	}
+}
+
+static struct hlist_head *
+arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
+		 __be16 dst_port)
+{
+	unsigned long l;
+	int bucket_idx;
+
+	l = (__force unsigned long)src_port |
+	    ((__force unsigned long)dst_port << 2);
+
+	bucket_idx = hash_long(l, ARFS_HASH_SHIFT);
+
+	return &arfs_t->rules_hash[bucket_idx];
+}
+
+static u8 arfs_get_ip_proto(const struct sk_buff *skb)
+{
+	return (skb->protocol == htons(ETH_P_IP)) ?
+		ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
+}
+
+static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
+					 u8 ip_proto, __be16 etype)
+{
+	if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_TCP)
+		return &arfs->arfs_tables[ARFS_IPV4_TCP];
+	if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_UDP)
+		return &arfs->arfs_tables[ARFS_IPV4_UDP];
+	if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_TCP)
+		return &arfs->arfs_tables[ARFS_IPV6_TCP];
+	if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_UDP)
+		return &arfs->arfs_tables[ARFS_IPV6_UDP];
+
+	return NULL;
+}
+
+static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
+					    struct arfs_rule *arfs_rule)
+{
+	struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
+	struct arfs_tuple *tuple = &arfs_rule->tuple;
+	struct mlx5_flow_rule *rule = NULL;
+	struct mlx5_flow_destination dest;
+	struct arfs_table *arfs_table;
+	u8 match_criteria_enable = 0;
+	struct mlx5_flow_table *ft;
+	u32 *match_criteria;
+	u32 *match_value;
+	int err = 0;
+
+	match_value	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+	match_criteria	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+	if (!match_value || !match_criteria) {
+		netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+		err = -ENOMEM;
+		goto out;
+	}
+	match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+			 outer_headers.ethertype);
+	MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+		 ntohs(tuple->etype));
+	arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype);
+	if (!arfs_table) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	ft = arfs_table->ft.t;
+	if (tuple->ip_proto == IPPROTO_TCP) {
+		MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+				 outer_headers.tcp_dport);
+		MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+				 outer_headers.tcp_sport);
+		MLX5_SET(fte_match_param, match_value, outer_headers.tcp_dport,
+			 ntohs(tuple->dst_port));
+		MLX5_SET(fte_match_param, match_value, outer_headers.tcp_sport,
+			 ntohs(tuple->src_port));
+	} else {
+		MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+				 outer_headers.udp_dport);
+		MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+				 outer_headers.udp_sport);
+		MLX5_SET(fte_match_param, match_value, outer_headers.udp_dport,
+			 ntohs(tuple->dst_port));
+		MLX5_SET(fte_match_param, match_value, outer_headers.udp_sport,
+			 ntohs(tuple->src_port));
+	}
+	if (tuple->etype == htons(ETH_P_IP)) {
+		memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
+				    outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
+		       &tuple->src_ipv4,
+		       4);
+		memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
+				    outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+		       &tuple->dst_ipv4,
+		       4);
+		MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+				 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
+		MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+				 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+	} else {
+		memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
+				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
+		       &tuple->src_ipv6,
+		       16);
+		memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
+				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+		       &tuple->dst_ipv6,
+		       16);
+		memset(MLX5_ADDR_OF(fte_match_param, match_criteria,
+				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
+		       0xff,
+		       16);
+		memset(MLX5_ADDR_OF(fte_match_param, match_criteria,
+				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+		       0xff,
+		       16);
+	}
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+	dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
+	rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria,
+				  match_value, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+				  MLX5_FS_DEFAULT_FLOW_TAG,
+				  &dest);
+	if (IS_ERR(rule)) {
+		err = PTR_ERR(rule);
+		netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n",
+			   __func__, arfs_rule->filter_id, arfs_rule->rxq, err);
+	}
+
+out:
+	kvfree(match_criteria);
+	kvfree(match_value);
+	return err ? ERR_PTR(err) : rule;
+}
+
+static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
+				struct mlx5_flow_rule *rule, u16 rxq)
+{
+	struct mlx5_flow_destination dst;
+	int err = 0;
+
+	dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+	dst.tir_num = priv->direct_tir[rxq].tirn;
+	err =  mlx5_modify_rule_destination(rule, &dst);
+	if (err)
+		netdev_warn(priv->netdev,
+			    "Failed to modfiy aRFS rule destination to rq=%d\n", rxq);
+}
+
+static void arfs_handle_work(struct work_struct *work)
+{
+	struct arfs_rule *arfs_rule = container_of(work,
+						   struct arfs_rule,
+						   arfs_work);
+	struct mlx5e_priv *priv = arfs_rule->priv;
+	struct mlx5_flow_rule *rule;
+
+	mutex_lock(&priv->state_lock);
+	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+		spin_lock_bh(&priv->fs.arfs.arfs_lock);
+		hlist_del(&arfs_rule->hlist);
+		spin_unlock_bh(&priv->fs.arfs.arfs_lock);
+
+		mutex_unlock(&priv->state_lock);
+		kfree(arfs_rule);
+		goto out;
+	}
+	mutex_unlock(&priv->state_lock);
+
+	if (!arfs_rule->rule) {
+		rule = arfs_add_rule(priv, arfs_rule);
+		if (IS_ERR(rule))
+			goto out;
+		arfs_rule->rule = rule;
+	} else {
+		arfs_modify_rule_rq(priv, arfs_rule->rule,
+				    arfs_rule->rxq);
+	}
+out:
+	arfs_may_expire_flow(priv);
+}
+
+/* return L4 destination port from ip4/6 packets */
+static __be16 arfs_get_dst_port(const struct sk_buff *skb)
+{
+	char *transport_header;
+
+	transport_header = skb_transport_header(skb);
+	if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
+		return ((struct tcphdr *)transport_header)->dest;
+	return ((struct udphdr *)transport_header)->dest;
+}
+
+/* return L4 source port from ip4/6 packets */
+static __be16 arfs_get_src_port(const struct sk_buff *skb)
+{
+	char *transport_header;
+
+	transport_header = skb_transport_header(skb);
+	if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
+		return ((struct tcphdr *)transport_header)->source;
+	return ((struct udphdr *)transport_header)->source;
+}
+
+static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
+					 struct arfs_table *arfs_t,
+					 const struct sk_buff *skb,
+					 u16 rxq, u32 flow_id)
+{
+	struct arfs_rule *rule;
+	struct arfs_tuple *tuple;
+
+	rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
+	if (!rule)
+		return NULL;
+
+	rule->priv = priv;
+	rule->rxq = rxq;
+	INIT_WORK(&rule->arfs_work, arfs_handle_work);
+
+	tuple = &rule->tuple;
+	tuple->etype = skb->protocol;
+	if (tuple->etype == htons(ETH_P_IP)) {
+		tuple->src_ipv4 = ip_hdr(skb)->saddr;
+		tuple->dst_ipv4 = ip_hdr(skb)->daddr;
+	} else {
+		memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
+		       sizeof(struct in6_addr));
+		memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
+		       sizeof(struct in6_addr));
+	}
+	tuple->ip_proto = arfs_get_ip_proto(skb);
+	tuple->src_port = arfs_get_src_port(skb);
+	tuple->dst_port = arfs_get_dst_port(skb);
+
+	rule->flow_id = flow_id;
+	rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
+
+	hlist_add_head(&rule->hlist,
+		       arfs_hash_bucket(arfs_t, tuple->src_port,
+					tuple->dst_port));
+	return rule;
+}
+
+static bool arfs_cmp_ips(struct arfs_tuple *tuple,
+			 const struct sk_buff *skb)
+{
+	if (tuple->etype == htons(ETH_P_IP) &&
+	    tuple->src_ipv4 == ip_hdr(skb)->saddr &&
+	    tuple->dst_ipv4 == ip_hdr(skb)->daddr)
+		return true;
+	if (tuple->etype == htons(ETH_P_IPV6) &&
+	    (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
+		     sizeof(struct in6_addr))) &&
+	    (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
+		     sizeof(struct in6_addr))))
+		return true;
+	return false;
+}
+
+static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
+					const struct sk_buff *skb)
+{
+	struct arfs_rule *arfs_rule;
+	struct hlist_head *head;
+	__be16 src_port = arfs_get_src_port(skb);
+	__be16 dst_port = arfs_get_dst_port(skb);
+
+	head = arfs_hash_bucket(arfs_t, src_port, dst_port);
+	hlist_for_each_entry(arfs_rule, head, hlist) {
+		if (arfs_rule->tuple.src_port == src_port &&
+		    arfs_rule->tuple.dst_port == dst_port &&
+		    arfs_cmp_ips(&arfs_rule->tuple, skb)) {
+			return arfs_rule;
+		}
+	}
+
+	return NULL;
+}
+
+int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+			u16 rxq_index, u32 flow_id)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
+	struct arfs_table *arfs_t;
+	struct arfs_rule *arfs_rule;
+
+	if (skb->protocol != htons(ETH_P_IP) &&
+	    skb->protocol != htons(ETH_P_IPV6))
+		return -EPROTONOSUPPORT;
+
+	arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
+	if (!arfs_t)
+		return -EPROTONOSUPPORT;
+
+	spin_lock_bh(&arfs->arfs_lock);
+	arfs_rule = arfs_find_rule(arfs_t, skb);
+	if (arfs_rule) {
+		if (arfs_rule->rxq == rxq_index) {
+			spin_unlock_bh(&arfs->arfs_lock);
+			return arfs_rule->filter_id;
+		}
+		arfs_rule->rxq = rxq_index;
+	} else {
+		arfs_rule = arfs_alloc_rule(priv, arfs_t, skb,
+					    rxq_index, flow_id);
+		if (!arfs_rule) {
+			spin_unlock_bh(&arfs->arfs_lock);
+			return -ENOMEM;
+		}
+	}
+	queue_work(priv->fs.arfs.wq, &arfs_rule->arfs_work);
+	spin_unlock_bh(&arfs->arfs_lock);
+	return arfs_rule->filter_id;
+}
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index 2018eeb..847a8f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -93,6 +93,8 @@
 	/* RX HW timestamp */
 	switch (config.rx_filter) {
 	case HWTSTAMP_FILTER_NONE:
+		/* Reset CQE compression to Admin default */
+		mlx5e_modify_rx_cqe_compression(priv, priv->params.rx_cqe_compress_admin);
 		break;
 	case HWTSTAMP_FILTER_ALL:
 	case HWTSTAMP_FILTER_SOME:
@@ -108,6 +110,8 @@
 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+		/* Disable CQE compression */
+		mlx5e_modify_rx_cqe_compression(priv, false);
 		config.rx_filter = HWTSTAMP_FILTER_ALL;
 		break;
 	default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 3036f27..b2db180 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -174,8 +174,14 @@
 {
 	struct mlx5e_priv *priv = netdev_priv(dev);
 	struct mlx5_core_dev *mdev = priv->mdev;
+	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+	int i;
 
 	pfc->pfc_cap = mlx5_max_tc(mdev) + 1;
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		pfc->requests[i]    = PPORT_PER_PRIO_GET(pstats, i, tx_pause);
+		pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause);
+	}
 
 	return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL);
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 68834b7..fc7dcc0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -138,10 +138,10 @@
 	[MLX5E_100BASE_TX]   = {
 		.speed      = 100,
 	},
-	[MLX5E_100BASE_T]    = {
-		.supported  = SUPPORTED_100baseT_Full,
-		.advertised = ADVERTISED_100baseT_Full,
-		.speed      = 100,
+	[MLX5E_1000BASE_T]    = {
+		.supported  = SUPPORTED_1000baseT_Full,
+		.advertised = ADVERTISED_1000baseT_Full,
+		.speed      = 1000,
 	},
 	[MLX5E_10GBASE_T]    = {
 		.supported  = SUPPORTED_10000baseT_Full,
@@ -165,26 +165,112 @@
 	},
 };
 
+static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u8 pfc_en_tx;
+	u8 pfc_en_rx;
+	int err;
+
+	err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
+
+	return err ? 0 : pfc_en_tx | pfc_en_rx;
+}
+
+#define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter))
+#define MLX5E_NUM_RQ_STATS(priv) \
+	(NUM_RQ_STATS * priv->params.num_channels * \
+	 test_bit(MLX5E_STATE_OPENED, &priv->state))
+#define MLX5E_NUM_SQ_STATS(priv) \
+	(NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \
+	 test_bit(MLX5E_STATE_OPENED, &priv->state))
+#define MLX5E_NUM_PFC_COUNTERS(priv) hweight8(mlx5e_query_pfc_combined(priv))
+
 static int mlx5e_get_sset_count(struct net_device *dev, int sset)
 {
 	struct mlx5e_priv *priv = netdev_priv(dev);
 
 	switch (sset) {
 	case ETH_SS_STATS:
-		return NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
-		       priv->params.num_channels * NUM_RQ_STATS +
-		       priv->params.num_channels * priv->params.num_tc *
-						   NUM_SQ_STATS;
+		return NUM_SW_COUNTERS +
+		       MLX5E_NUM_Q_CNTRS(priv) +
+		       NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
+		       MLX5E_NUM_RQ_STATS(priv) +
+		       MLX5E_NUM_SQ_STATS(priv) +
+		       MLX5E_NUM_PFC_COUNTERS(priv);
 	/* fallthrough */
 	default:
 		return -EOPNOTSUPP;
 	}
 }
 
+static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
+{
+	int i, j, tc, prio, idx = 0;
+	unsigned long pfc_combined;
+
+	/* SW counters */
+	for (i = 0; i < NUM_SW_COUNTERS; i++)
+		strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].name);
+
+	/* Q counters */
+	for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
+		strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].name);
+
+	/* VPORT counters */
+	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+		strcpy(data + (idx++) * ETH_GSTRING_LEN,
+		       vport_stats_desc[i].name);
+
+	/* PPORT counters */
+	for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
+		strcpy(data + (idx++) * ETH_GSTRING_LEN,
+		       pport_802_3_stats_desc[i].name);
+
+	for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
+		strcpy(data + (idx++) * ETH_GSTRING_LEN,
+		       pport_2863_stats_desc[i].name);
+
+	for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
+		strcpy(data + (idx++) * ETH_GSTRING_LEN,
+		       pport_2819_stats_desc[i].name);
+
+	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+		for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
+			sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s",
+				prio,
+				pport_per_prio_traffic_stats_desc[i].name);
+	}
+
+	pfc_combined = mlx5e_query_pfc_combined(priv);
+	for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
+		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+			sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s",
+				prio, pport_per_prio_pfc_stats_desc[i].name);
+		}
+	}
+
+	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+		return;
+
+	/* per channel counters */
+	for (i = 0; i < priv->params.num_channels; i++)
+		for (j = 0; j < NUM_RQ_STATS; j++)
+			sprintf(data + (idx++) * ETH_GSTRING_LEN, "rx%d_%s", i,
+				rq_stats_desc[j].name);
+
+	for (tc = 0; tc < priv->params.num_tc; tc++)
+		for (i = 0; i < priv->params.num_channels; i++)
+			for (j = 0; j < NUM_SQ_STATS; j++)
+				sprintf(data + (idx++) * ETH_GSTRING_LEN,
+					"tx%d_%s",
+					priv->channeltc_to_txq_map[i][tc],
+					sq_stats_desc[j].name);
+}
+
 static void mlx5e_get_strings(struct net_device *dev,
 			      uint32_t stringset, uint8_t *data)
 {
-	int i, j, tc, idx = 0;
 	struct mlx5e_priv *priv = netdev_priv(dev);
 
 	switch (stringset) {
@@ -195,30 +281,7 @@
 		break;
 
 	case ETH_SS_STATS:
-		/* VPORT counters */
-		for (i = 0; i < NUM_VPORT_COUNTERS; i++)
-			strcpy(data + (idx++) * ETH_GSTRING_LEN,
-			       vport_strings[i]);
-
-		/* PPORT counters */
-		for (i = 0; i < NUM_PPORT_COUNTERS; i++)
-			strcpy(data + (idx++) * ETH_GSTRING_LEN,
-			       pport_strings[i]);
-
-		/* per channel counters */
-		for (i = 0; i < priv->params.num_channels; i++)
-			for (j = 0; j < NUM_RQ_STATS; j++)
-				sprintf(data + (idx++) * ETH_GSTRING_LEN,
-					"rx%d_%s", i, rq_stats_strings[j]);
-
-		for (tc = 0; tc < priv->params.num_tc; tc++)
-			for (i = 0; i < priv->params.num_channels; i++)
-				for (j = 0; j < NUM_SQ_STATS; j++)
-					sprintf(data +
-					      (idx++) * ETH_GSTRING_LEN,
-					      "tx%d_%s",
-					      priv->channeltc_to_txq_map[i][tc],
-					      sq_stats_strings[j]);
+		mlx5e_fill_stats_strings(priv, data);
 		break;
 	}
 }
@@ -227,7 +290,8 @@
 				    struct ethtool_stats *stats, u64 *data)
 {
 	struct mlx5e_priv *priv = netdev_priv(dev);
-	int i, j, tc, idx = 0;
+	int i, j, tc, prio, idx = 0;
+	unsigned long pfc_combined;
 
 	if (!data)
 		return;
@@ -237,33 +301,68 @@
 		mlx5e_update_stats(priv);
 	mutex_unlock(&priv->state_lock);
 
-	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
-		data[idx++] = ((u64 *)&priv->stats.vport)[i];
+	for (i = 0; i < NUM_SW_COUNTERS; i++)
+		data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
+						   sw_stats_desc, i);
 
-	for (i = 0; i < NUM_PPORT_COUNTERS; i++)
-		data[idx++] = be64_to_cpu(((__be64 *)&priv->stats.pport)[i]);
+	for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
+		data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
+						   q_stats_desc, i);
+
+	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+		data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
+						  vport_stats_desc, i);
+
+	for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
+		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
+						  pport_802_3_stats_desc, i);
+
+	for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
+		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
+						  pport_2863_stats_desc, i);
+
+	for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
+		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
+						  pport_2819_stats_desc, i);
+
+	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+		for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
+			data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
+						 pport_per_prio_traffic_stats_desc, i);
+	}
+
+	pfc_combined = mlx5e_query_pfc_combined(priv);
+	for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
+		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+			data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
+							  pport_per_prio_pfc_stats_desc, i);
+		}
+	}
+
+	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+		return;
 
 	/* per channel counters */
 	for (i = 0; i < priv->params.num_channels; i++)
 		for (j = 0; j < NUM_RQ_STATS; j++)
-			data[idx++] = !test_bit(MLX5E_STATE_OPENED,
-						&priv->state) ? 0 :
-				       ((u64 *)&priv->channel[i]->rq.stats)[j];
+			data[idx++] =
+			       MLX5E_READ_CTR64_CPU(&priv->channel[i]->rq.stats,
+						    rq_stats_desc, j);
 
 	for (tc = 0; tc < priv->params.num_tc; tc++)
 		for (i = 0; i < priv->params.num_channels; i++)
 			for (j = 0; j < NUM_SQ_STATS; j++)
-				data[idx++] = !test_bit(MLX5E_STATE_OPENED,
-							&priv->state) ? 0 :
-				((u64 *)&priv->channel[i]->sq[tc].stats)[j];
+				data[idx++] = MLX5E_READ_CTR64_CPU(&priv->channel[i]->sq[tc].stats,
+								   sq_stats_desc, j);
 }
 
 static void mlx5e_get_ringparam(struct net_device *dev,
 				struct ethtool_ringparam *param)
 {
 	struct mlx5e_priv *priv = netdev_priv(dev);
+	int rq_wq_type = priv->params.rq_wq_type;
 
-	param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
+	param->rx_max_pending = 1 << mlx5_max_log_rq_size(rq_wq_type);
 	param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
 	param->rx_pending     = 1 << priv->params.log_rq_size;
 	param->tx_pending     = 1 << priv->params.log_sq_size;
@@ -274,6 +373,7 @@
 {
 	struct mlx5e_priv *priv = netdev_priv(dev);
 	bool was_opened;
+	int rq_wq_type = priv->params.rq_wq_type;
 	u16 min_rx_wqes;
 	u8 log_rq_size;
 	u8 log_sq_size;
@@ -289,16 +389,16 @@
 			    __func__);
 		return -EINVAL;
 	}
-	if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
+	if (param->rx_pending < (1 << mlx5_min_log_rq_size(rq_wq_type))) {
 		netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
 			    __func__, param->rx_pending,
-			    1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
+			    1 << mlx5_min_log_rq_size(rq_wq_type));
 		return -EINVAL;
 	}
-	if (param->rx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE)) {
+	if (param->rx_pending > (1 << mlx5_max_log_rq_size(rq_wq_type))) {
 		netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
 			    __func__, param->rx_pending,
-			    1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE);
+			    1 << mlx5_max_log_rq_size(rq_wq_type));
 		return -EINVAL;
 	}
 	if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
@@ -316,8 +416,7 @@
 
 	log_rq_size = order_base_2(param->rx_pending);
 	log_sq_size = order_base_2(param->tx_pending);
-	min_rx_wqes = min_t(u16, param->rx_pending - 1,
-			    MLX5E_PARAMS_DEFAULT_MIN_RX_WQES);
+	min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, param->rx_pending);
 
 	if (log_rq_size == priv->params.log_rq_size &&
 	    log_sq_size == priv->params.log_sq_size &&
@@ -357,6 +456,7 @@
 	struct mlx5e_priv *priv = netdev_priv(dev);
 	int ncv = mlx5e_get_max_num_channels(priv->mdev);
 	unsigned int count = ch->combined_count;
+	bool arfs_enabled;
 	bool was_opened;
 	int err = 0;
 
@@ -385,13 +485,27 @@
 	if (was_opened)
 		mlx5e_close_locked(dev);
 
+	arfs_enabled = dev->features & NETIF_F_NTUPLE;
+	if (arfs_enabled)
+		mlx5e_arfs_disable(priv);
+
 	priv->params.num_channels = count;
-	mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
+	mlx5e_build_default_indir_rqt(priv->mdev, priv->params.indirection_rqt,
 				      MLX5E_INDIR_RQT_SIZE, count);
 
 	if (was_opened)
 		err = mlx5e_open_locked(dev);
+	if (err)
+		goto out;
 
+	if (arfs_enabled) {
+		err = mlx5e_arfs_enable(priv);
+		if (err)
+			netdev_err(dev, "%s: mlx5e_arfs_enable failed: %d\n",
+				   __func__, err);
+	}
+
+out:
 	mutex_unlock(&priv->state_lock);
 
 	return err;
@@ -499,6 +613,25 @@
 	return 0;
 }
 
+int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
+{
+	u32 max_speed = 0;
+	u32 proto_cap;
+	int err;
+	int i;
+
+	err = mlx5_query_port_proto_cap(mdev, &proto_cap, MLX5_PTYS_EN);
+	if (err)
+		return err;
+
+	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i)
+		if (proto_cap & MLX5E_PROT_MASK(i))
+			max_speed = max(max_speed, ptys2ethtool_table[i].speed);
+
+	*speed = max_speed;
+	return 0;
+}
+
 static void get_speed_duplex(struct net_device *netdev,
 			     u32 eth_proto_oper,
 			     struct ethtool_cmd *cmd)
@@ -727,9 +860,8 @@
 	MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
 	mlx5e_build_tir_ctx_hash(tirc, priv);
 
-	for (i = 0; i < MLX5E_NUM_TT; i++)
-		if (IS_HASHING_TT(i))
-			mlx5_core_modify_tir(mdev, priv->tirn[i], in, inlen);
+	for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
+		mlx5_core_modify_tir(mdev, priv->indir_tirn[i], in, inlen);
 }
 
 static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
@@ -751,9 +883,11 @@
 	mutex_lock(&priv->state_lock);
 
 	if (indir) {
+		u32 rqtn = priv->indir_rqtn;
+
 		memcpy(priv->params.indirection_rqt, indir,
 		       sizeof(priv->params.indirection_rqt));
-		mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
+		mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
 	}
 
 	if (key)
@@ -1036,6 +1170,108 @@
 	return mlx5_set_port_wol(mdev, mlx5_wol_mode);
 }
 
+static int mlx5e_set_phys_id(struct net_device *dev,
+			     enum ethtool_phys_id_state state)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u16 beacon_duration;
+
+	if (!MLX5_CAP_GEN(mdev, beacon_led))
+		return -EOPNOTSUPP;
+
+	switch (state) {
+	case ETHTOOL_ID_ACTIVE:
+		beacon_duration = MLX5_BEACON_DURATION_INF;
+		break;
+	case ETHTOOL_ID_INACTIVE:
+		beacon_duration = MLX5_BEACON_DURATION_OFF;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return mlx5_set_port_beacon(mdev, beacon_duration);
+}
+
+static int mlx5e_get_module_info(struct net_device *netdev,
+				 struct ethtool_modinfo *modinfo)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5_core_dev *dev = priv->mdev;
+	int size_read = 0;
+	u8 data[4];
+
+	size_read = mlx5_query_module_eeprom(dev, 0, 2, data);
+	if (size_read < 2)
+		return -EIO;
+
+	/* data[0] = identifier byte */
+	switch (data[0]) {
+	case MLX5_MODULE_ID_QSFP:
+		modinfo->type       = ETH_MODULE_SFF_8436;
+		modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+		break;
+	case MLX5_MODULE_ID_QSFP_PLUS:
+	case MLX5_MODULE_ID_QSFP28:
+		/* data[1] = revision id */
+		if (data[0] == MLX5_MODULE_ID_QSFP28 || data[1] >= 0x3) {
+			modinfo->type       = ETH_MODULE_SFF_8636;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+		} else {
+			modinfo->type       = ETH_MODULE_SFF_8436;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+		}
+		break;
+	case MLX5_MODULE_ID_SFP:
+		modinfo->type       = ETH_MODULE_SFF_8472;
+		modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+		break;
+	default:
+		netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
+			   __func__, data[0]);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mlx5e_get_module_eeprom(struct net_device *netdev,
+				   struct ethtool_eeprom *ee,
+				   u8 *data)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	int offset = ee->offset;
+	int size_read;
+	int i = 0;
+
+	if (!ee->len)
+		return -EINVAL;
+
+	memset(data, 0, ee->len);
+
+	while (i < ee->len) {
+		size_read = mlx5_query_module_eeprom(mdev, offset, ee->len - i,
+						     data + i);
+
+		if (!size_read)
+			/* Done reading */
+			return 0;
+
+		if (size_read < 0) {
+			netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n",
+				   __func__, size_read);
+			return 0;
+		}
+
+		i += size_read;
+		offset += size_read;
+	}
+
+	return 0;
+}
+
 const struct ethtool_ops mlx5e_ethtool_ops = {
 	.get_drvinfo       = mlx5e_get_drvinfo,
 	.get_link          = ethtool_op_get_link,
@@ -1060,6 +1296,9 @@
 	.get_pauseparam    = mlx5e_get_pauseparam,
 	.set_pauseparam    = mlx5e_set_pauseparam,
 	.get_ts_info       = mlx5e_get_ts_info,
+	.set_phys_id       = mlx5e_set_phys_id,
 	.get_wol	   = mlx5e_get_wol,
 	.set_wol	   = mlx5e_set_wol,
+	.get_module_info   = mlx5e_get_module_info,
+	.get_module_eeprom = mlx5e_get_module_eeprom,
 };
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index d00a242..b327400 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -37,7 +37,10 @@
 #include <linux/mlx5/fs.h>
 #include "en.h"
 
-#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
+static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
+				  struct mlx5e_l2_rule *ai, int type);
+static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
+				   struct mlx5e_l2_rule *ai);
 
 enum {
 	MLX5E_FULLMATCH = 0,
@@ -58,21 +61,21 @@
 	MLX5E_ACTION_DEL  = 2,
 };
 
-struct mlx5e_eth_addr_hash_node {
+struct mlx5e_l2_hash_node {
 	struct hlist_node          hlist;
 	u8                         action;
-	struct mlx5e_eth_addr_info ai;
+	struct mlx5e_l2_rule ai;
 };
 
-static inline int mlx5e_hash_eth_addr(u8 *addr)
+static inline int mlx5e_hash_l2(u8 *addr)
 {
 	return addr[5];
 }
 
-static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
+static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
 {
-	struct mlx5e_eth_addr_hash_node *hn;
-	int ix = mlx5e_hash_eth_addr(addr);
+	struct mlx5e_l2_hash_node *hn;
+	int ix = mlx5e_hash_l2(addr);
 	int found = 0;
 
 	hlist_for_each_entry(hn, &hash[ix], hlist)
@@ -96,371 +99,12 @@
 	hlist_add_head(&hn->hlist, &hash[ix]);
 }
 
-static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
+static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
 {
 	hlist_del(&hn->hlist);
 	kfree(hn);
 }
 
-static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
-					       struct mlx5e_eth_addr_info *ai)
-{
-	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
-		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
-
-	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
-		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
-
-	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
-		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
-
-	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
-		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
-
-	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
-		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]);
-
-	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
-		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]);
-
-	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
-		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]);
-
-	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
-		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]);
-
-	if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
-		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]);
-
-	if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
-		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]);
-
-	if (ai->tt_vec & BIT(MLX5E_TT_ANY))
-		mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]);
-}
-
-static int mlx5e_get_eth_addr_type(u8 *addr)
-{
-	if (is_unicast_ether_addr(addr))
-		return MLX5E_UC;
-
-	if ((addr[0] == 0x01) &&
-	    (addr[1] == 0x00) &&
-	    (addr[2] == 0x5e) &&
-	   !(addr[3] &  0x80))
-		return MLX5E_MC_IPV4;
-
-	if ((addr[0] == 0x33) &&
-	    (addr[1] == 0x33))
-		return MLX5E_MC_IPV6;
-
-	return MLX5E_MC_OTHER;
-}
-
-static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
-{
-	int eth_addr_type;
-	u32 ret;
-
-	switch (type) {
-	case MLX5E_FULLMATCH:
-		eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
-		switch (eth_addr_type) {
-		case MLX5E_UC:
-			ret =
-				BIT(MLX5E_TT_IPV4_TCP)       |
-				BIT(MLX5E_TT_IPV6_TCP)       |
-				BIT(MLX5E_TT_IPV4_UDP)       |
-				BIT(MLX5E_TT_IPV6_UDP)       |
-				BIT(MLX5E_TT_IPV4_IPSEC_AH)  |
-				BIT(MLX5E_TT_IPV6_IPSEC_AH)  |
-				BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
-				BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
-				BIT(MLX5E_TT_IPV4)           |
-				BIT(MLX5E_TT_IPV6)           |
-				BIT(MLX5E_TT_ANY)            |
-				0;
-			break;
-
-		case MLX5E_MC_IPV4:
-			ret =
-				BIT(MLX5E_TT_IPV4_UDP)       |
-				BIT(MLX5E_TT_IPV4)           |
-				0;
-			break;
-
-		case MLX5E_MC_IPV6:
-			ret =
-				BIT(MLX5E_TT_IPV6_UDP)       |
-				BIT(MLX5E_TT_IPV6)           |
-				0;
-			break;
-
-		case MLX5E_MC_OTHER:
-			ret =
-				BIT(MLX5E_TT_ANY)            |
-				0;
-			break;
-		}
-
-		break;
-
-	case MLX5E_ALLMULTI:
-		ret =
-			BIT(MLX5E_TT_IPV4_UDP) |
-			BIT(MLX5E_TT_IPV6_UDP) |
-			BIT(MLX5E_TT_IPV4)     |
-			BIT(MLX5E_TT_IPV6)     |
-			BIT(MLX5E_TT_ANY)      |
-			0;
-		break;
-
-	default: /* MLX5E_PROMISC */
-		ret =
-			BIT(MLX5E_TT_IPV4_TCP)       |
-			BIT(MLX5E_TT_IPV6_TCP)       |
-			BIT(MLX5E_TT_IPV4_UDP)       |
-			BIT(MLX5E_TT_IPV6_UDP)       |
-			BIT(MLX5E_TT_IPV4_IPSEC_AH)  |
-			BIT(MLX5E_TT_IPV6_IPSEC_AH)  |
-			BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
-			BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
-			BIT(MLX5E_TT_IPV4)           |
-			BIT(MLX5E_TT_IPV6)           |
-			BIT(MLX5E_TT_ANY)            |
-			0;
-		break;
-	}
-
-	return ret;
-}
-
-static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
-				     struct mlx5e_eth_addr_info *ai,
-				     int type, u32 *mc, u32 *mv)
-{
-	struct mlx5_flow_destination dest;
-	u8 match_criteria_enable = 0;
-	struct mlx5_flow_rule **rule_p;
-	struct mlx5_flow_table *ft = priv->fts.main.t;
-	u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
-				   outer_headers.dmac_47_16);
-	u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
-				   outer_headers.dmac_47_16);
-	u32 *tirn = priv->tirn;
-	u32 tt_vec;
-	int err = 0;
-
-	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
-
-	switch (type) {
-	case MLX5E_FULLMATCH:
-		match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-		eth_broadcast_addr(mc_dmac);
-		ether_addr_copy(mv_dmac, ai->addr);
-		break;
-
-	case MLX5E_ALLMULTI:
-		match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-		mc_dmac[0] = 0x01;
-		mv_dmac[0] = 0x01;
-		break;
-
-	case MLX5E_PROMISC:
-		break;
-	}
-
-	tt_vec = mlx5e_get_tt_vec(ai, type);
-
-	if (tt_vec & BIT(MLX5E_TT_ANY)) {
-		rule_p = &ai->ft_rule[MLX5E_TT_ANY];
-		dest.tir_num = tirn[MLX5E_TT_ANY];
-		*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
-					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-					     MLX5_FS_DEFAULT_FLOW_TAG, &dest);
-		if (IS_ERR_OR_NULL(*rule_p))
-			goto err_del_ai;
-		ai->tt_vec |= BIT(MLX5E_TT_ANY);
-	}
-
-	match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
-
-	if (tt_vec & BIT(MLX5E_TT_IPV4)) {
-		rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
-		dest.tir_num = tirn[MLX5E_TT_IPV4];
-		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
-			 ETH_P_IP);
-		*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
-					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-					     MLX5_FS_DEFAULT_FLOW_TAG, &dest);
-		if (IS_ERR_OR_NULL(*rule_p))
-			goto err_del_ai;
-		ai->tt_vec |= BIT(MLX5E_TT_IPV4);
-	}
-
-	if (tt_vec & BIT(MLX5E_TT_IPV6)) {
-		rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
-		dest.tir_num = tirn[MLX5E_TT_IPV6];
-		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
-			 ETH_P_IPV6);
-		*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
-					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-					     MLX5_FS_DEFAULT_FLOW_TAG, &dest);
-		if (IS_ERR_OR_NULL(*rule_p))
-			goto err_del_ai;
-		ai->tt_vec |= BIT(MLX5E_TT_IPV6);
-	}
-
-	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
-	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
-
-	if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
-		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
-		dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
-		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
-			 ETH_P_IP);
-		*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
-					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-					     MLX5_FS_DEFAULT_FLOW_TAG, &dest);
-		if (IS_ERR_OR_NULL(*rule_p))
-			goto err_del_ai;
-		ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
-	}
-
-	if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
-		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
-		dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
-		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
-			 ETH_P_IPV6);
-		*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
-					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-					     MLX5_FS_DEFAULT_FLOW_TAG, &dest);
-		if (IS_ERR_OR_NULL(*rule_p))
-			goto err_del_ai;
-		ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
-	}
-
-	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
-
-	if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
-		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
-		dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
-		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
-			 ETH_P_IP);
-		*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
-					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-					     MLX5_FS_DEFAULT_FLOW_TAG, &dest);
-		if (IS_ERR_OR_NULL(*rule_p))
-			goto err_del_ai;
-		ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
-	}
-
-	if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
-		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
-		dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
-		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
-			 ETH_P_IPV6);
-		*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
-					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-					     MLX5_FS_DEFAULT_FLOW_TAG, &dest);
-		if (IS_ERR_OR_NULL(*rule_p))
-			goto err_del_ai;
-
-		ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
-	}
-
-	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
-
-	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
-		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
-		dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
-		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
-			 ETH_P_IP);
-		*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
-					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-					     MLX5_FS_DEFAULT_FLOW_TAG, &dest);
-		if (IS_ERR_OR_NULL(*rule_p))
-			goto err_del_ai;
-		ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
-	}
-
-	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
-		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
-		dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
-		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
-			 ETH_P_IPV6);
-		*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
-					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-					     MLX5_FS_DEFAULT_FLOW_TAG, &dest);
-		if (IS_ERR_OR_NULL(*rule_p))
-			goto err_del_ai;
-		ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
-	}
-
-	MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
-
-	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
-		rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
-		dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
-		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
-			 ETH_P_IP);
-		*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
-					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-					     MLX5_FS_DEFAULT_FLOW_TAG, &dest);
-		if (IS_ERR_OR_NULL(*rule_p))
-			goto err_del_ai;
-		ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
-	}
-
-	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
-		rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
-		dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
-		MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
-			 ETH_P_IPV6);
-		*rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
-					     MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-					     MLX5_FS_DEFAULT_FLOW_TAG, &dest);
-		if (IS_ERR_OR_NULL(*rule_p))
-			goto err_del_ai;
-		ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
-	}
-
-	return 0;
-
-err_del_ai:
-	err = PTR_ERR(*rule_p);
-	*rule_p = NULL;
-	mlx5e_del_eth_addr_from_flow_table(priv, ai);
-
-	return err;
-}
-
-static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
-				   struct mlx5e_eth_addr_info *ai, int type)
-{
-	u32 *match_criteria;
-	u32 *match_value;
-	int err = 0;
-
-	match_value	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
-	match_criteria	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
-	if (!match_value || !match_criteria) {
-		netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
-		err = -ENOMEM;
-		goto add_eth_addr_rule_out;
-	}
-
-	err = __mlx5e_add_eth_addr_rule(priv, ai, type, match_criteria,
-					match_value);
-
-add_eth_addr_rule_out:
-	kvfree(match_criteria);
-	kvfree(match_value);
-
-	return err;
-}
-
 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
 {
 	struct net_device *ndev = priv->netdev;
@@ -472,7 +116,7 @@
 	int i;
 
 	list_size = 0;
-	for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
+	for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID)
 		list_size++;
 
 	max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
@@ -489,7 +133,7 @@
 		return -ENOMEM;
 
 	i = 0;
-	for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
+	for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) {
 		if (i >= list_size)
 			break;
 		vlans[i++] = vlan;
@@ -514,28 +158,28 @@
 				 enum mlx5e_vlan_rule_type rule_type,
 				 u16 vid, u32 *mc, u32 *mv)
 {
-	struct mlx5_flow_table *ft = priv->fts.vlan.t;
+	struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
 	struct mlx5_flow_destination dest;
 	u8 match_criteria_enable = 0;
 	struct mlx5_flow_rule **rule_p;
 	int err = 0;
 
 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-	dest.ft = priv->fts.main.t;
+	dest.ft = priv->fs.l2.ft.t;
 
 	match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
 
 	switch (rule_type) {
 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
-		rule_p = &priv->vlan.untagged_rule;
+		rule_p = &priv->fs.vlan.untagged_rule;
 		break;
 	case MLX5E_VLAN_RULE_TYPE_ANY_VID:
-		rule_p = &priv->vlan.any_vlan_rule;
+		rule_p = &priv->fs.vlan.any_vlan_rule;
 		MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
 		break;
 	default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
-		rule_p = &priv->vlan.active_vlans_rule[vid];
+		rule_p = &priv->fs.vlan.active_vlans_rule[vid];
 		MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
 		MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
@@ -589,22 +233,22 @@
 {
 	switch (rule_type) {
 	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
-		if (priv->vlan.untagged_rule) {
-			mlx5_del_flow_rule(priv->vlan.untagged_rule);
-			priv->vlan.untagged_rule = NULL;
+		if (priv->fs.vlan.untagged_rule) {
+			mlx5_del_flow_rule(priv->fs.vlan.untagged_rule);
+			priv->fs.vlan.untagged_rule = NULL;
 		}
 		break;
 	case MLX5E_VLAN_RULE_TYPE_ANY_VID:
-		if (priv->vlan.any_vlan_rule) {
-			mlx5_del_flow_rule(priv->vlan.any_vlan_rule);
-			priv->vlan.any_vlan_rule = NULL;
+		if (priv->fs.vlan.any_vlan_rule) {
+			mlx5_del_flow_rule(priv->fs.vlan.any_vlan_rule);
+			priv->fs.vlan.any_vlan_rule = NULL;
 		}
 		break;
 	case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
 		mlx5e_vport_context_update_vlans(priv);
-		if (priv->vlan.active_vlans_rule[vid]) {
-			mlx5_del_flow_rule(priv->vlan.active_vlans_rule[vid]);
-			priv->vlan.active_vlans_rule[vid] = NULL;
+		if (priv->fs.vlan.active_vlans_rule[vid]) {
+			mlx5_del_flow_rule(priv->fs.vlan.active_vlans_rule[vid]);
+			priv->fs.vlan.active_vlans_rule[vid] = NULL;
 		}
 		mlx5e_vport_context_update_vlans(priv);
 		break;
@@ -613,10 +257,10 @@
 
 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
 {
-	if (!priv->vlan.filter_disabled)
+	if (!priv->fs.vlan.filter_disabled)
 		return;
 
-	priv->vlan.filter_disabled = false;
+	priv->fs.vlan.filter_disabled = false;
 	if (priv->netdev->flags & IFF_PROMISC)
 		return;
 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
@@ -624,10 +268,10 @@
 
 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
 {
-	if (priv->vlan.filter_disabled)
+	if (priv->fs.vlan.filter_disabled)
 		return;
 
-	priv->vlan.filter_disabled = true;
+	priv->fs.vlan.filter_disabled = true;
 	if (priv->netdev->flags & IFF_PROMISC)
 		return;
 	mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
@@ -638,7 +282,7 @@
 {
 	struct mlx5e_priv *priv = netdev_priv(dev);
 
-	set_bit(vid, priv->vlan.active_vlans);
+	set_bit(vid, priv->fs.vlan.active_vlans);
 
 	return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
 }
@@ -648,7 +292,7 @@
 {
 	struct mlx5e_priv *priv = netdev_priv(dev);
 
-	clear_bit(vid, priv->vlan.active_vlans);
+	clear_bit(vid, priv->fs.vlan.active_vlans);
 
 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
 
@@ -656,21 +300,21 @@
 }
 
 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
-	for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
+	for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
 		hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
 
-static void mlx5e_execute_action(struct mlx5e_priv *priv,
-				 struct mlx5e_eth_addr_hash_node *hn)
+static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
+				    struct mlx5e_l2_hash_node *hn)
 {
 	switch (hn->action) {
 	case MLX5E_ACTION_ADD:
-		mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
+		mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
 		hn->action = MLX5E_ACTION_NONE;
 		break;
 
 	case MLX5E_ACTION_DEL:
-		mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
-		mlx5e_del_eth_addr_from_hash(hn);
+		mlx5e_del_l2_flow_rule(priv, &hn->ai);
+		mlx5e_del_l2_from_hash(hn);
 		break;
 	}
 }
@@ -682,14 +326,14 @@
 
 	netif_addr_lock_bh(netdev);
 
-	mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc,
-				   priv->netdev->dev_addr);
+	mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
+			     priv->netdev->dev_addr);
 
 	netdev_for_each_uc_addr(ha, netdev)
-		mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr);
+		mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
 
 	netdev_for_each_mc_addr(ha, netdev)
-		mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr);
+		mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
 
 	netif_addr_unlock_bh(netdev);
 }
@@ -699,17 +343,17 @@
 {
 	bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
 	struct net_device *ndev = priv->netdev;
-	struct mlx5e_eth_addr_hash_node *hn;
+	struct mlx5e_l2_hash_node *hn;
 	struct hlist_head *addr_list;
 	struct hlist_node *tmp;
 	int i = 0;
 	int hi;
 
-	addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc;
+	addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
 
 	if (is_uc) /* Make sure our own address is pushed first */
 		ether_addr_copy(addr_array[i++], ndev->dev_addr);
-	else if (priv->eth_addr.broadcast_enabled)
+	else if (priv->fs.l2.broadcast_enabled)
 		ether_addr_copy(addr_array[i++], ndev->broadcast);
 
 	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
@@ -725,7 +369,7 @@
 						 int list_type)
 {
 	bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
-	struct mlx5e_eth_addr_hash_node *hn;
+	struct mlx5e_l2_hash_node *hn;
 	u8 (*addr_array)[ETH_ALEN] = NULL;
 	struct hlist_head *addr_list;
 	struct hlist_node *tmp;
@@ -734,12 +378,12 @@
 	int err;
 	int hi;
 
-	size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
+	size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
 	max_size = is_uc ?
 		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
 		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
 
-	addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc;
+	addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
 	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
 		size++;
 
@@ -770,7 +414,7 @@
 
 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
 {
-	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
+	struct mlx5e_l2_table *ea = &priv->fs.l2;
 
 	mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
 	mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
@@ -781,26 +425,26 @@
 
 static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
 {
-	struct mlx5e_eth_addr_hash_node *hn;
+	struct mlx5e_l2_hash_node *hn;
 	struct hlist_node *tmp;
 	int i;
 
-	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
-		mlx5e_execute_action(priv, hn);
+	mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
+		mlx5e_execute_l2_action(priv, hn);
 
-	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
-		mlx5e_execute_action(priv, hn);
+	mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
+		mlx5e_execute_l2_action(priv, hn);
 }
 
 static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
 {
-	struct mlx5e_eth_addr_hash_node *hn;
+	struct mlx5e_l2_hash_node *hn;
 	struct hlist_node *tmp;
 	int i;
 
-	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
+	mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
 		hn->action = MLX5E_ACTION_DEL;
-	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
+	mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
 		hn->action = MLX5E_ACTION_DEL;
 
 	if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
@@ -814,7 +458,7 @@
 	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
 					       set_rx_mode_work);
 
-	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
+	struct mlx5e_l2_table *ea = &priv->fs.l2;
 	struct net_device *ndev = priv->netdev;
 
 	bool rx_mode_enable   = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
@@ -830,27 +474,27 @@
 	bool disable_broadcast =  ea->broadcast_enabled && !broadcast_enabled;
 
 	if (enable_promisc) {
-		mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
-		if (!priv->vlan.filter_disabled)
+		mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
+		if (!priv->fs.vlan.filter_disabled)
 			mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
 					    0);
 	}
 	if (enable_allmulti)
-		mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
+		mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
 	if (enable_broadcast)
-		mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
+		mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
 
 	mlx5e_handle_netdev_addr(priv);
 
 	if (disable_broadcast)
-		mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
+		mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
 	if (disable_allmulti)
-		mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
+		mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
 	if (disable_promisc) {
-		if (!priv->vlan.filter_disabled)
+		if (!priv->fs.vlan.filter_disabled)
 			mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
 					    0);
-		mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
+		mlx5e_del_l2_flow_rule(priv, &ea->promisc);
 	}
 
 	ea->promisc_enabled   = promisc_enabled;
@@ -872,224 +516,454 @@
 	ft->num_groups = 0;
 }
 
-void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
+void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
 {
-	ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
+	ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
 }
 
-#define MLX5E_MAIN_GROUP0_SIZE	BIT(3)
-#define MLX5E_MAIN_GROUP1_SIZE	BIT(1)
-#define MLX5E_MAIN_GROUP2_SIZE	BIT(0)
-#define MLX5E_MAIN_GROUP3_SIZE	BIT(14)
-#define MLX5E_MAIN_GROUP4_SIZE	BIT(13)
-#define MLX5E_MAIN_GROUP5_SIZE	BIT(11)
-#define MLX5E_MAIN_GROUP6_SIZE	BIT(2)
-#define MLX5E_MAIN_GROUP7_SIZE	BIT(1)
-#define MLX5E_MAIN_GROUP8_SIZE	BIT(0)
-#define MLX5E_MAIN_TABLE_SIZE	(MLX5E_MAIN_GROUP0_SIZE +\
-				 MLX5E_MAIN_GROUP1_SIZE +\
-				 MLX5E_MAIN_GROUP2_SIZE +\
-				 MLX5E_MAIN_GROUP3_SIZE +\
-				 MLX5E_MAIN_GROUP4_SIZE +\
-				 MLX5E_MAIN_GROUP5_SIZE +\
-				 MLX5E_MAIN_GROUP6_SIZE +\
-				 MLX5E_MAIN_GROUP7_SIZE +\
-				 MLX5E_MAIN_GROUP8_SIZE)
-
-static int __mlx5e_create_main_groups(struct mlx5e_flow_table *ft, u32 *in,
-				      int inlen)
+void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
 {
-	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
-	u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
-				match_criteria.outer_headers.dmac_47_16);
+	mlx5e_destroy_groups(ft);
+	kfree(ft->g);
+	mlx5_destroy_flow_table(ft->t);
+	ft->t = NULL;
+}
+
+static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
+{
+	int i;
+
+	for (i = 0; i < MLX5E_NUM_TT; i++) {
+		if (!IS_ERR_OR_NULL(ttc->rules[i])) {
+			mlx5_del_flow_rule(ttc->rules[i]);
+			ttc->rules[i] = NULL;
+		}
+	}
+}
+
+static struct {
+	u16 etype;
+	u8 proto;
+} ttc_rules[] = {
+	[MLX5E_TT_IPV4_TCP] = {
+		.etype = ETH_P_IP,
+		.proto = IPPROTO_TCP,
+	},
+	[MLX5E_TT_IPV6_TCP] = {
+		.etype = ETH_P_IPV6,
+		.proto = IPPROTO_TCP,
+	},
+	[MLX5E_TT_IPV4_UDP] = {
+		.etype = ETH_P_IP,
+		.proto = IPPROTO_UDP,
+	},
+	[MLX5E_TT_IPV6_UDP] = {
+		.etype = ETH_P_IPV6,
+		.proto = IPPROTO_UDP,
+	},
+	[MLX5E_TT_IPV4_IPSEC_AH] = {
+		.etype = ETH_P_IP,
+		.proto = IPPROTO_AH,
+	},
+	[MLX5E_TT_IPV6_IPSEC_AH] = {
+		.etype = ETH_P_IPV6,
+		.proto = IPPROTO_AH,
+	},
+	[MLX5E_TT_IPV4_IPSEC_ESP] = {
+		.etype = ETH_P_IP,
+		.proto = IPPROTO_ESP,
+	},
+	[MLX5E_TT_IPV6_IPSEC_ESP] = {
+		.etype = ETH_P_IPV6,
+		.proto = IPPROTO_ESP,
+	},
+	[MLX5E_TT_IPV4] = {
+		.etype = ETH_P_IP,
+		.proto = 0,
+	},
+	[MLX5E_TT_IPV6] = {
+		.etype = ETH_P_IPV6,
+		.proto = 0,
+	},
+	[MLX5E_TT_ANY] = {
+		.etype = 0,
+		.proto = 0,
+	},
+};
+
+static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
+						      struct mlx5_flow_table *ft,
+						      struct mlx5_flow_destination *dest,
+						      u16 etype,
+						      u8 proto)
+{
+	struct mlx5_flow_rule *rule;
+	u8 match_criteria_enable = 0;
+	u32 *match_criteria;
+	u32 *match_value;
+	int err = 0;
+
+	match_value	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+	match_criteria	= mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+	if (!match_value || !match_criteria) {
+		netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+		err = -ENOMEM;
+		goto out;
+	}
+
+	if (proto) {
+		match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+		MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ip_protocol);
+		MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, proto);
+	}
+	if (etype) {
+		match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+		MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ethertype);
+		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, etype);
+	}
+
+	rule = mlx5_add_flow_rule(ft, match_criteria_enable,
+				  match_criteria, match_value,
+				  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+				  MLX5_FS_DEFAULT_FLOW_TAG,
+				  dest);
+	if (IS_ERR(rule)) {
+		err = PTR_ERR(rule);
+		netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
+	}
+out:
+	kvfree(match_criteria);
+	kvfree(match_value);
+	return err ? ERR_PTR(err) : rule;
+}
+
+static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
+{
+	struct mlx5_flow_destination dest;
+	struct mlx5e_ttc_table *ttc;
+	struct mlx5_flow_rule **rules;
+	struct mlx5_flow_table *ft;
+	int tt;
 	int err;
+
+	ttc = &priv->fs.ttc;
+	ft = ttc->ft.t;
+	rules = ttc->rules;
+
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+	for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
+		if (tt == MLX5E_TT_ANY)
+			dest.tir_num = priv->direct_tir[0].tirn;
+		else
+			dest.tir_num = priv->indir_tirn[tt];
+		rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
+						    ttc_rules[tt].etype,
+						    ttc_rules[tt].proto);
+		if (IS_ERR(rules[tt]))
+			goto del_rules;
+	}
+
+	return 0;
+
+del_rules:
+	err = PTR_ERR(rules[tt]);
+	rules[tt] = NULL;
+	mlx5e_cleanup_ttc_rules(ttc);
+	return err;
+}
+
+#define MLX5E_TTC_NUM_GROUPS	3
+#define MLX5E_TTC_GROUP1_SIZE	BIT(3)
+#define MLX5E_TTC_GROUP2_SIZE	BIT(1)
+#define MLX5E_TTC_GROUP3_SIZE	BIT(0)
+#define MLX5E_TTC_TABLE_SIZE	(MLX5E_TTC_GROUP1_SIZE +\
+				 MLX5E_TTC_GROUP2_SIZE +\
+				 MLX5E_TTC_GROUP3_SIZE)
+static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc)
+{
+	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+	struct mlx5e_flow_table *ft = &ttc->ft;
 	int ix = 0;
+	u32 *in;
+	int err;
+	u8 *mc;
 
-	memset(in, 0, inlen);
-	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+	ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
+			sizeof(*ft->g), GFP_KERNEL);
+	if (!ft->g)
+		return -ENOMEM;
+	in = mlx5_vzalloc(inlen);
+	if (!in) {
+		kfree(ft->g);
+		return -ENOMEM;
+	}
+
+	/* L4 Group */
+	mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
-	MLX5_SET_CFG(in, start_flow_index, ix);
-	ix += MLX5E_MAIN_GROUP0_SIZE;
-	MLX5_SET_CFG(in, end_flow_index, ix - 1);
-	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
-	if (IS_ERR(ft->g[ft->num_groups]))
-		goto err_destroy_groups;
-	ft->num_groups++;
-
-	memset(in, 0, inlen);
-	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
-	MLX5_SET_CFG(in, start_flow_index, ix);
-	ix += MLX5E_MAIN_GROUP1_SIZE;
-	MLX5_SET_CFG(in, end_flow_index, ix - 1);
-	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
-	if (IS_ERR(ft->g[ft->num_groups]))
-		goto err_destroy_groups;
-	ft->num_groups++;
-
-	memset(in, 0, inlen);
-	MLX5_SET_CFG(in, start_flow_index, ix);
-	ix += MLX5E_MAIN_GROUP2_SIZE;
-	MLX5_SET_CFG(in, end_flow_index, ix - 1);
-	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
-	if (IS_ERR(ft->g[ft->num_groups]))
-		goto err_destroy_groups;
-	ft->num_groups++;
-
-	memset(in, 0, inlen);
 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
-	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
-	eth_broadcast_addr(dmac);
 	MLX5_SET_CFG(in, start_flow_index, ix);
-	ix += MLX5E_MAIN_GROUP3_SIZE;
+	ix += MLX5E_TTC_GROUP1_SIZE;
 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
 	if (IS_ERR(ft->g[ft->num_groups]))
-		goto err_destroy_groups;
+		goto err;
 	ft->num_groups++;
 
+	/* L3 Group */
+	MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
+	MLX5_SET_CFG(in, start_flow_index, ix);
+	ix += MLX5E_TTC_GROUP2_SIZE;
+	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+	if (IS_ERR(ft->g[ft->num_groups]))
+		goto err;
+	ft->num_groups++;
+
+	/* Any Group */
 	memset(in, 0, inlen);
-	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
-	eth_broadcast_addr(dmac);
 	MLX5_SET_CFG(in, start_flow_index, ix);
-	ix += MLX5E_MAIN_GROUP4_SIZE;
+	ix += MLX5E_TTC_GROUP3_SIZE;
+	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+	if (IS_ERR(ft->g[ft->num_groups]))
+		goto err;
+	ft->num_groups++;
+
+	kvfree(in);
+	return 0;
+
+err:
+	err = PTR_ERR(ft->g[ft->num_groups]);
+	ft->g[ft->num_groups] = NULL;
+	kvfree(in);
+
+	return err;
+}
+
+static void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
+{
+	struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
+
+	mlx5e_cleanup_ttc_rules(ttc);
+	mlx5e_destroy_flow_table(&ttc->ft);
+}
+
+static int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
+{
+	struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
+	struct mlx5e_flow_table *ft = &ttc->ft;
+	int err;
+
+	ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
+				       MLX5E_TTC_TABLE_SIZE, MLX5E_TTC_FT_LEVEL);
+	if (IS_ERR(ft->t)) {
+		err = PTR_ERR(ft->t);
+		ft->t = NULL;
+		return err;
+	}
+
+	err = mlx5e_create_ttc_table_groups(ttc);
+	if (err)
+		goto err;
+
+	err = mlx5e_generate_ttc_table_rules(priv);
+	if (err)
+		goto err;
+
+	return 0;
+err:
+	mlx5e_destroy_flow_table(ft);
+	return err;
+}
+
+static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
+				   struct mlx5e_l2_rule *ai)
+{
+	if (!IS_ERR_OR_NULL(ai->rule)) {
+		mlx5_del_flow_rule(ai->rule);
+		ai->rule = NULL;
+	}
+}
+
+static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
+				  struct mlx5e_l2_rule *ai, int type)
+{
+	struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
+	struct mlx5_flow_destination dest;
+	u8 match_criteria_enable = 0;
+	u32 *match_criteria;
+	u32 *match_value;
+	int err = 0;
+	u8 *mc_dmac;
+	u8 *mv_dmac;
+
+	match_value    = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+	match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+	if (!match_value || !match_criteria) {
+		netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+		err = -ENOMEM;
+		goto add_l2_rule_out;
+	}
+
+	mc_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
+			       outer_headers.dmac_47_16);
+	mv_dmac = MLX5_ADDR_OF(fte_match_param, match_value,
+			       outer_headers.dmac_47_16);
+
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+	dest.ft = priv->fs.ttc.ft.t;
+
+	switch (type) {
+	case MLX5E_FULLMATCH:
+		match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+		eth_broadcast_addr(mc_dmac);
+		ether_addr_copy(mv_dmac, ai->addr);
+		break;
+
+	case MLX5E_ALLMULTI:
+		match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+		mc_dmac[0] = 0x01;
+		mv_dmac[0] = 0x01;
+		break;
+
+	case MLX5E_PROMISC:
+		break;
+	}
+
+	ai->rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria,
+				      match_value,
+				      MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+				      MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+	if (IS_ERR(ai->rule)) {
+		netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
+			   __func__, mv_dmac);
+		err = PTR_ERR(ai->rule);
+		ai->rule = NULL;
+	}
+
+add_l2_rule_out:
+	kvfree(match_criteria);
+	kvfree(match_value);
+
+	return err;
+}
+
+#define MLX5E_NUM_L2_GROUPS	   3
+#define MLX5E_L2_GROUP1_SIZE	   BIT(0)
+#define MLX5E_L2_GROUP2_SIZE	   BIT(15)
+#define MLX5E_L2_GROUP3_SIZE	   BIT(0)
+#define MLX5E_L2_TABLE_SIZE	   (MLX5E_L2_GROUP1_SIZE +\
+				    MLX5E_L2_GROUP2_SIZE +\
+				    MLX5E_L2_GROUP3_SIZE)
+static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
+{
+	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+	struct mlx5e_flow_table *ft = &l2_table->ft;
+	int ix = 0;
+	u8 *mc_dmac;
+	u32 *in;
+	int err;
+	u8 *mc;
+
+	ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+	if (!ft->g)
+		return -ENOMEM;
+	in = mlx5_vzalloc(inlen);
+	if (!in) {
+		kfree(ft->g);
+		return -ENOMEM;
+	}
+
+	mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+	mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
+			       outer_headers.dmac_47_16);
+	/* Flow Group for promiscuous */
+	MLX5_SET_CFG(in, start_flow_index, ix);
+	ix += MLX5E_L2_GROUP1_SIZE;
 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
 	if (IS_ERR(ft->g[ft->num_groups]))
 		goto err_destroy_groups;
 	ft->num_groups++;
 
-	memset(in, 0, inlen);
+	/* Flow Group for full match */
+	eth_broadcast_addr(mc_dmac);
 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-	eth_broadcast_addr(dmac);
 	MLX5_SET_CFG(in, start_flow_index, ix);
-	ix += MLX5E_MAIN_GROUP5_SIZE;
+	ix += MLX5E_L2_GROUP2_SIZE;
 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
 	if (IS_ERR(ft->g[ft->num_groups]))
 		goto err_destroy_groups;
 	ft->num_groups++;
 
-	memset(in, 0, inlen);
-	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
-	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
-	dmac[0] = 0x01;
+	/* Flow Group for allmulti */
+	eth_zero_addr(mc_dmac);
+	mc_dmac[0] = 0x01;
 	MLX5_SET_CFG(in, start_flow_index, ix);
-	ix += MLX5E_MAIN_GROUP6_SIZE;
+	ix += MLX5E_L2_GROUP3_SIZE;
 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
 	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
 	if (IS_ERR(ft->g[ft->num_groups]))
 		goto err_destroy_groups;
 	ft->num_groups++;
 
-	memset(in, 0, inlen);
-	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
-	dmac[0] = 0x01;
-	MLX5_SET_CFG(in, start_flow_index, ix);
-	ix += MLX5E_MAIN_GROUP7_SIZE;
-	MLX5_SET_CFG(in, end_flow_index, ix - 1);
-	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
-	if (IS_ERR(ft->g[ft->num_groups]))
-		goto err_destroy_groups;
-	ft->num_groups++;
-
-	memset(in, 0, inlen);
-	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-	dmac[0] = 0x01;
-	MLX5_SET_CFG(in, start_flow_index, ix);
-	ix += MLX5E_MAIN_GROUP8_SIZE;
-	MLX5_SET_CFG(in, end_flow_index, ix - 1);
-	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
-	if (IS_ERR(ft->g[ft->num_groups]))
-		goto err_destroy_groups;
-	ft->num_groups++;
-
+	kvfree(in);
 	return 0;
 
 err_destroy_groups:
 	err = PTR_ERR(ft->g[ft->num_groups]);
 	ft->g[ft->num_groups] = NULL;
 	mlx5e_destroy_groups(ft);
-
-	return err;
-}
-
-static int mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
-{
-	u32 *in;
-	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
-	int err;
-
-	in = mlx5_vzalloc(inlen);
-	if (!in)
-		return -ENOMEM;
-
-	err = __mlx5e_create_main_groups(ft, in, inlen);
-
 	kvfree(in);
+
 	return err;
 }
 
-static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
+static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
 {
-	struct mlx5e_flow_table *ft = &priv->fts.main;
+	mlx5e_destroy_flow_table(&priv->fs.l2.ft);
+}
+
+static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
+{
+	struct mlx5e_l2_table *l2_table = &priv->fs.l2;
+	struct mlx5e_flow_table *ft = &l2_table->ft;
 	int err;
 
 	ft->num_groups = 0;
-	ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_MAIN_TABLE_SIZE);
+	ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
+				       MLX5E_L2_TABLE_SIZE, MLX5E_L2_FT_LEVEL);
 
 	if (IS_ERR(ft->t)) {
 		err = PTR_ERR(ft->t);
 		ft->t = NULL;
 		return err;
 	}
-	ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
-	if (!ft->g) {
-		err = -ENOMEM;
-		goto err_destroy_main_flow_table;
-	}
 
-	err = mlx5e_create_main_groups(ft);
+	err = mlx5e_create_l2_table_groups(l2_table);
 	if (err)
-		goto err_free_g;
+		goto err_destroy_flow_table;
+
 	return 0;
 
-err_free_g:
-	kfree(ft->g);
-
-err_destroy_main_flow_table:
+err_destroy_flow_table:
 	mlx5_destroy_flow_table(ft->t);
 	ft->t = NULL;
 
 	return err;
 }
 
-static void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
-{
-	mlx5e_destroy_groups(ft);
-	kfree(ft->g);
-	mlx5_destroy_flow_table(ft->t);
-	ft->t = NULL;
-}
-
-static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
-{
-	mlx5e_destroy_flow_table(&priv->fts.main);
-}
-
 #define MLX5E_NUM_VLAN_GROUPS	2
 #define MLX5E_VLAN_GROUP0_SIZE	BIT(12)
 #define MLX5E_VLAN_GROUP1_SIZE	BIT(1)
 #define MLX5E_VLAN_TABLE_SIZE	(MLX5E_VLAN_GROUP0_SIZE +\
 				 MLX5E_VLAN_GROUP1_SIZE)
 
-static int __mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft, u32 *in,
-				      int inlen)
+static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
+					    int inlen)
 {
 	int err;
 	int ix = 0;
@@ -1128,7 +1002,7 @@
 	return err;
 }
 
-static int mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
+static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
 {
 	u32 *in;
 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -1138,19 +1012,20 @@
 	if (!in)
 		return -ENOMEM;
 
-	err = __mlx5e_create_vlan_groups(ft, in, inlen);
+	err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
 
 	kvfree(in);
 	return err;
 }
 
-static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
+static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
 {
-	struct mlx5e_flow_table *ft = &priv->fts.vlan;
+	struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
 	int err;
 
 	ft->num_groups = 0;
-	ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_VLAN_TABLE_SIZE);
+	ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
+				       MLX5E_VLAN_TABLE_SIZE, MLX5E_VLAN_FT_LEVEL);
 
 	if (IS_ERR(ft->t)) {
 		err = PTR_ERR(ft->t);
@@ -1160,65 +1035,90 @@
 	ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
 	if (!ft->g) {
 		err = -ENOMEM;
-		goto err_destroy_vlan_flow_table;
+		goto err_destroy_vlan_table;
 	}
 
-	err = mlx5e_create_vlan_groups(ft);
+	err = mlx5e_create_vlan_table_groups(ft);
 	if (err)
 		goto err_free_g;
 
+	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+	if (err)
+		goto err_destroy_vlan_flow_groups;
+
 	return 0;
 
+err_destroy_vlan_flow_groups:
+	mlx5e_destroy_groups(ft);
 err_free_g:
 	kfree(ft->g);
-
-err_destroy_vlan_flow_table:
+err_destroy_vlan_table:
 	mlx5_destroy_flow_table(ft->t);
 	ft->t = NULL;
 
 	return err;
 }
 
-static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
+static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
 {
-	mlx5e_destroy_flow_table(&priv->fts.vlan);
+	mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
 }
 
-int mlx5e_create_flow_tables(struct mlx5e_priv *priv)
+int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
 {
 	int err;
 
-	priv->fts.ns = mlx5_get_flow_namespace(priv->mdev,
+	priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
 					       MLX5_FLOW_NAMESPACE_KERNEL);
 
-	if (!priv->fts.ns)
+	if (!priv->fs.ns)
 		return -EINVAL;
 
-	err = mlx5e_create_vlan_flow_table(priv);
-	if (err)
-		return err;
+	err = mlx5e_arfs_create_tables(priv);
+	if (err) {
+		netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
+			   err);
+		priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
+	}
 
-	err = mlx5e_create_main_flow_table(priv);
-	if (err)
-		goto err_destroy_vlan_flow_table;
+	err = mlx5e_create_ttc_table(priv);
+	if (err) {
+		netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
+			   err);
+		goto err_destroy_arfs_tables;
+	}
 
-	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
-	if (err)
-		goto err_destroy_main_flow_table;
+	err = mlx5e_create_l2_table(priv);
+	if (err) {
+		netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
+			   err);
+		goto err_destroy_ttc_table;
+	}
+
+	err = mlx5e_create_vlan_table(priv);
+	if (err) {
+		netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
+			   err);
+		goto err_destroy_l2_table;
+	}
 
 	return 0;
 
-err_destroy_main_flow_table:
-	mlx5e_destroy_main_flow_table(priv);
-err_destroy_vlan_flow_table:
-	mlx5e_destroy_vlan_flow_table(priv);
+err_destroy_l2_table:
+	mlx5e_destroy_l2_table(priv);
+err_destroy_ttc_table:
+	mlx5e_destroy_ttc_table(priv);
+err_destroy_arfs_tables:
+	mlx5e_arfs_destroy_tables(priv);
 
 	return err;
 }
 
-void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv)
+void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
 {
 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
-	mlx5e_destroy_main_flow_table(priv);
-	mlx5e_destroy_vlan_flow_table(priv);
+	mlx5e_destroy_vlan_table(priv);
+	mlx5e_destroy_l2_table(priv);
+	mlx5e_destroy_ttc_table(priv);
+	mlx5e_arfs_destroy_tables(priv);
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index e0adb60..fd43929 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -48,6 +48,7 @@
 	u32                        sqc[MLX5_ST_SZ_DW(sqc)];
 	struct mlx5_wq_param       wq;
 	u16                        max_inline;
+	bool                       icosq;
 };
 
 struct mlx5e_cq_param {
@@ -59,8 +60,10 @@
 struct mlx5e_channel_param {
 	struct mlx5e_rq_param      rq;
 	struct mlx5e_sq_param      sq;
+	struct mlx5e_sq_param      icosq;
 	struct mlx5e_cq_param      rx_cq;
 	struct mlx5e_cq_param      tx_cq;
+	struct mlx5e_cq_param      icosq_cq;
 };
 
 static void mlx5e_update_carrier(struct mlx5e_priv *priv)
@@ -88,82 +91,15 @@
 	mutex_unlock(&priv->state_lock);
 }
 
-static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
+static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
 {
-	struct mlx5_core_dev *mdev = priv->mdev;
-	struct mlx5e_pport_stats *s = &priv->stats.pport;
-	u32 *in;
-	u32 *out;
-	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
-
-	in  = mlx5_vzalloc(sz);
-	out = mlx5_vzalloc(sz);
-	if (!in || !out)
-		goto free_out;
-
-	MLX5_SET(ppcnt_reg, in, local_port, 1);
-
-	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
-	mlx5_core_access_reg(mdev, in, sz, out,
-			     sz, MLX5_REG_PPCNT, 0, 0);
-	memcpy(s->IEEE_802_3_counters,
-	       MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
-	       sizeof(s->IEEE_802_3_counters));
-
-	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
-	mlx5_core_access_reg(mdev, in, sz, out,
-			     sz, MLX5_REG_PPCNT, 0, 0);
-	memcpy(s->RFC_2863_counters,
-	       MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
-	       sizeof(s->RFC_2863_counters));
-
-	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
-	mlx5_core_access_reg(mdev, in, sz, out,
-			     sz, MLX5_REG_PPCNT, 0, 0);
-	memcpy(s->RFC_2819_counters,
-	       MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
-	       sizeof(s->RFC_2819_counters));
-
-free_out:
-	kvfree(in);
-	kvfree(out);
-}
-
-void mlx5e_update_stats(struct mlx5e_priv *priv)
-{
-	struct mlx5_core_dev *mdev = priv->mdev;
-	struct mlx5e_vport_stats *s = &priv->stats.vport;
+	struct mlx5e_sw_stats *s = &priv->stats.sw;
 	struct mlx5e_rq_stats *rq_stats;
 	struct mlx5e_sq_stats *sq_stats;
-	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
-	u32 *out;
-	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
-	u64 tx_offload_none;
+	u64 tx_offload_none = 0;
 	int i, j;
 
-	out = mlx5_vzalloc(outlen);
-	if (!out)
-		return;
-
-	/* Collect firts the SW counters and then HW for consistency */
-	s->rx_packets		= 0;
-	s->rx_bytes		= 0;
-	s->tx_packets		= 0;
-	s->tx_bytes		= 0;
-	s->tso_packets		= 0;
-	s->tso_bytes		= 0;
-	s->tso_inner_packets	= 0;
-	s->tso_inner_bytes	= 0;
-	s->tx_queue_stopped	= 0;
-	s->tx_queue_wake	= 0;
-	s->tx_queue_dropped	= 0;
-	s->tx_csum_inner	= 0;
-	tx_offload_none		= 0;
-	s->lro_packets		= 0;
-	s->lro_bytes		= 0;
-	s->rx_csum_none		= 0;
-	s->rx_csum_sw		= 0;
-	s->rx_wqe_err		= 0;
+	memset(s, 0, sizeof(*s));
 	for (i = 0; i < priv->params.num_channels; i++) {
 		rq_stats = &priv->channel[i]->rq.stats;
 
@@ -173,7 +109,13 @@
 		s->lro_bytes	+= rq_stats->lro_bytes;
 		s->rx_csum_none	+= rq_stats->csum_none;
 		s->rx_csum_sw	+= rq_stats->csum_sw;
+		s->rx_csum_inner += rq_stats->csum_inner;
 		s->rx_wqe_err   += rq_stats->wqe_err;
+		s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
+		s->rx_mpwqe_frag   += rq_stats->mpwqe_frag;
+		s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
+		s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
+		s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
 
 		for (j = 0; j < priv->params.num_tc; j++) {
 			sq_stats = &priv->channel[i]->sq[j].stats;
@@ -192,7 +134,23 @@
 		}
 	}
 
-	/* HW counters */
+	/* Update calculated offload counters */
+	s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner;
+	s->rx_csum_good    = s->rx_packets - s->rx_csum_none -
+			     s->rx_csum_sw;
+
+	s->link_down_events = MLX5_GET(ppcnt_reg,
+				priv->stats.pport.phy_counters,
+				counter_set.phys_layer_cntrs.link_down_events);
+}
+
+static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
+{
+	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+	u32 *out = (u32 *)priv->stats.vport.query_vport_out;
+	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
+	struct mlx5_core_dev *mdev = priv->mdev;
+
 	memset(in, 0, sizeof(in));
 
 	MLX5_SET(query_vport_counter_in, in, opcode,
@@ -202,56 +160,69 @@
 
 	memset(out, 0, outlen);
 
-	if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
+	mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
+}
+
+static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
+{
+	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+	struct mlx5_core_dev *mdev = priv->mdev;
+	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+	int prio;
+	void *out;
+	u32 *in;
+
+	in = mlx5_vzalloc(sz);
+	if (!in)
 		goto free_out;
 
-#define MLX5_GET_CTR(p, x) \
-	MLX5_GET64(query_vport_counter_out, p, x)
+	MLX5_SET(ppcnt_reg, in, local_port, 1);
 
-	s->rx_error_packets     =
-		MLX5_GET_CTR(out, received_errors.packets);
-	s->rx_error_bytes       =
-		MLX5_GET_CTR(out, received_errors.octets);
-	s->tx_error_packets     =
-		MLX5_GET_CTR(out, transmit_errors.packets);
-	s->tx_error_bytes       =
-		MLX5_GET_CTR(out, transmit_errors.octets);
+	out = pstats->IEEE_802_3_counters;
+	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
+	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
 
-	s->rx_unicast_packets   =
-		MLX5_GET_CTR(out, received_eth_unicast.packets);
-	s->rx_unicast_bytes     =
-		MLX5_GET_CTR(out, received_eth_unicast.octets);
-	s->tx_unicast_packets   =
-		MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
-	s->tx_unicast_bytes     =
-		MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
+	out = pstats->RFC_2863_counters;
+	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
+	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
 
-	s->rx_multicast_packets =
-		MLX5_GET_CTR(out, received_eth_multicast.packets);
-	s->rx_multicast_bytes   =
-		MLX5_GET_CTR(out, received_eth_multicast.octets);
-	s->tx_multicast_packets =
-		MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
-	s->tx_multicast_bytes   =
-		MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
+	out = pstats->RFC_2819_counters;
+	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
+	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
 
-	s->rx_broadcast_packets =
-		MLX5_GET_CTR(out, received_eth_broadcast.packets);
-	s->rx_broadcast_bytes   =
-		MLX5_GET_CTR(out, received_eth_broadcast.octets);
-	s->tx_broadcast_packets =
-		MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
-	s->tx_broadcast_bytes   =
-		MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
+	out = pstats->phy_counters;
+	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
+	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
 
-	/* Update calculated offload counters */
-	s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner;
-	s->rx_csum_good    = s->rx_packets - s->rx_csum_none -
-			       s->rx_csum_sw;
+	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
+	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+		out = pstats->per_prio_counters[prio];
+		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
+		mlx5_core_access_reg(mdev, in, sz, out, sz,
+				     MLX5_REG_PPCNT, 0, 0);
+	}
 
-	mlx5e_update_pport_counters(priv);
 free_out:
-	kvfree(out);
+	kvfree(in);
+}
+
+static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
+{
+	struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
+
+	if (!priv->q_counter)
+		return;
+
+	mlx5_core_query_out_of_buffer(priv->mdev, priv->q_counter,
+				      &qcnt->rx_out_of_buffer);
+}
+
+void mlx5e_update_stats(struct mlx5e_priv *priv)
+{
+	mlx5e_update_q_counter(priv);
+	mlx5e_update_vport_counters(priv);
+	mlx5e_update_pport_counters(priv);
+	mlx5e_update_sw_counters(priv);
 }
 
 static void mlx5e_update_stats_work(struct work_struct *work)
@@ -262,9 +233,8 @@
 	mutex_lock(&priv->state_lock);
 	if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
 		mlx5e_update_stats(priv);
-		schedule_delayed_work(dwork,
-				      msecs_to_jiffies(
-					      MLX5E_UPDATE_STATS_INTERVAL));
+		queue_delayed_work(priv->wq, dwork,
+				   msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
 	}
 	mutex_unlock(&priv->state_lock);
 }
@@ -280,7 +250,7 @@
 	switch (event) {
 	case MLX5_DEV_EVENT_PORT_UP:
 	case MLX5_DEV_EVENT_PORT_DOWN:
-		schedule_work(&priv->update_carrier_work);
+		queue_work(priv->wq, &priv->update_carrier_work);
 		break;
 
 	default:
@@ -310,6 +280,7 @@
 	struct mlx5_core_dev *mdev = priv->mdev;
 	void *rqc = param->rqc;
 	void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+	u32 byte_count;
 	int wq_sz;
 	int err;
 	int i;
@@ -324,32 +295,56 @@
 	rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
 
 	wq_sz = mlx5_wq_ll_get_size(&rq->wq);
-	rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
-			       cpu_to_node(c->cpu));
-	if (!rq->skb) {
-		err = -ENOMEM;
-		goto err_rq_wq_destroy;
-	}
 
-	rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
-					     MLX5E_SW2HW_MTU(priv->netdev->mtu);
-	rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN);
+	switch (priv->params.rq_wq_type) {
+	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+		rq->wqe_info = kzalloc_node(wq_sz * sizeof(*rq->wqe_info),
+					    GFP_KERNEL, cpu_to_node(c->cpu));
+		if (!rq->wqe_info) {
+			err = -ENOMEM;
+			goto err_rq_wq_destroy;
+		}
+		rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
+		rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
+
+		rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
+		rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
+		rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
+		byte_count = rq->wqe_sz;
+		break;
+	default: /* MLX5_WQ_TYPE_LINKED_LIST */
+		rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
+				       cpu_to_node(c->cpu));
+		if (!rq->skb) {
+			err = -ENOMEM;
+			goto err_rq_wq_destroy;
+		}
+		rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
+		rq->alloc_wqe = mlx5e_alloc_rx_wqe;
+
+		rq->wqe_sz = (priv->params.lro_en) ?
+				priv->params.lro_wqe_sz :
+				MLX5E_SW2HW_MTU(priv->netdev->mtu);
+		rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz);
+		byte_count = rq->wqe_sz;
+		byte_count |= MLX5_HW_START_PADDING;
+	}
 
 	for (i = 0; i < wq_sz; i++) {
 		struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
-		u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
 
-		wqe->data.lkey       = c->mkey_be;
-		wqe->data.byte_count =
-			cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
+		wqe->data.byte_count = cpu_to_be32(byte_count);
 	}
 
+	rq->wq_type = priv->params.rq_wq_type;
 	rq->pdev    = c->pdev;
 	rq->netdev  = c->netdev;
 	rq->tstamp  = &priv->tstamp;
 	rq->channel = c;
 	rq->ix      = c->ix;
 	rq->priv    = c->priv;
+	rq->mkey_be = c->mkey_be;
+	rq->umr_mkey_be = cpu_to_be32(c->priv->umr_mkey.key);
 
 	return 0;
 
@@ -361,7 +356,14 @@
 
 static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
 {
-	kfree(rq->skb);
+	switch (rq->wq_type) {
+	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+		kfree(rq->wqe_info);
+		break;
+	default: /* MLX5_WQ_TYPE_LINKED_LIST */
+		kfree(rq->skb);
+	}
+
 	mlx5_wq_destroy(&rq->wq_ctrl);
 }
 
@@ -390,6 +392,7 @@
 	MLX5_SET(rqc,  rqc, cqn,		rq->cq.mcq.cqn);
 	MLX5_SET(rqc,  rqc, state,		MLX5_RQC_STATE_RST);
 	MLX5_SET(rqc,  rqc, flush_in_error_en,	1);
+	MLX5_SET(rqc,  rqc, vsd, priv->params.vlan_strip_disable);
 	MLX5_SET(wq,   wq,  log_wq_pg_sz,	rq->wq_ctrl.buf.page_shift -
 						MLX5_ADAPTER_PAGE_SHIFT);
 	MLX5_SET64(wq, wq,  dbr_addr,		rq->wq_ctrl.db.dma);
@@ -404,7 +407,8 @@
 	return err;
 }
 
-static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
+static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
+				 int next_state)
 {
 	struct mlx5e_channel *c = rq->channel;
 	struct mlx5e_priv *priv = c->priv;
@@ -432,6 +436,36 @@
 	return err;
 }
 
+static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
+{
+	struct mlx5e_channel *c = rq->channel;
+	struct mlx5e_priv *priv = c->priv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	void *in;
+	void *rqc;
+	int inlen;
+	int err;
+
+	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+	MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
+	MLX5_SET64(modify_rq_in, in, modify_bitmask, MLX5_RQ_BITMASK_VSD);
+	MLX5_SET(rqc, rqc, vsd, vsd);
+	MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
+
+	err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+
+	kvfree(in);
+
+	return err;
+}
+
 static void mlx5e_disable_rq(struct mlx5e_rq *rq)
 {
 	mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
@@ -458,6 +492,8 @@
 			 struct mlx5e_rq_param *param,
 			 struct mlx5e_rq *rq)
 {
+	struct mlx5e_sq *sq = &c->icosq;
+	u16 pi = sq->pc & sq->wq.sz_m1;
 	int err;
 
 	err = mlx5e_create_rq(c, param, rq);
@@ -468,12 +504,15 @@
 	if (err)
 		goto err_destroy_rq;
 
-	err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+	err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
 	if (err)
 		goto err_disable_rq;
 
 	set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
-	mlx5e_send_nop(&c->sq[0], true); /* trigger mlx5e_post_rx_wqes() */
+
+	sq->ico_wqe_info[pi].opcode     = MLX5_OPCODE_NOP;
+	sq->ico_wqe_info[pi].num_wqebbs = 1;
+	mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
 
 	return 0;
 
@@ -490,7 +529,7 @@
 	clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
 	napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
 
-	mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
+	mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
 	while (!mlx5_wq_ll_is_empty(&rq->wq))
 		msleep(20);
 
@@ -539,7 +578,6 @@
 
 	void *sqc = param->sqc;
 	void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
-	int txq_ix;
 	int err;
 
 	err = mlx5_alloc_map_uar(mdev, &sq->uar, true);
@@ -567,8 +605,24 @@
 	if (err)
 		goto err_sq_wq_destroy;
 
-	txq_ix = c->ix + tc * priv->params.num_channels;
-	sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
+	if (param->icosq) {
+		u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+
+		sq->ico_wqe_info = kzalloc_node(sizeof(*sq->ico_wqe_info) *
+						wq_sz,
+						GFP_KERNEL,
+						cpu_to_node(c->cpu));
+		if (!sq->ico_wqe_info) {
+			err = -ENOMEM;
+			goto err_free_sq_db;
+		}
+	} else {
+		int txq_ix;
+
+		txq_ix = c->ix + tc * priv->params.num_channels;
+		sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
+		priv->txq_to_sq_map[txq_ix] = sq;
+	}
 
 	sq->pdev      = c->pdev;
 	sq->tstamp    = &priv->tstamp;
@@ -577,10 +631,12 @@
 	sq->tc        = tc;
 	sq->edge      = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
 	sq->bf_budget = MLX5E_SQ_BF_BUDGET;
-	priv->txq_to_sq_map[txq_ix] = sq;
 
 	return 0;
 
+err_free_sq_db:
+	mlx5e_free_sq_db(sq);
+
 err_sq_wq_destroy:
 	mlx5_wq_destroy(&sq->wq_ctrl);
 
@@ -595,6 +651,7 @@
 	struct mlx5e_channel *c = sq->channel;
 	struct mlx5e_priv *priv = c->priv;
 
+	kfree(sq->ico_wqe_info);
 	mlx5e_free_sq_db(sq);
 	mlx5_wq_destroy(&sq->wq_ctrl);
 	mlx5_unmap_free_uar(priv->mdev, &sq->uar);
@@ -623,10 +680,10 @@
 
 	memcpy(sqc, param->sqc, sizeof(param->sqc));
 
-	MLX5_SET(sqc,  sqc, tis_num_0,		priv->tisn[sq->tc]);
-	MLX5_SET(sqc,  sqc, cqn,		c->sq[sq->tc].cq.mcq.cqn);
+	MLX5_SET(sqc,  sqc, tis_num_0, param->icosq ? 0 : priv->tisn[sq->tc]);
+	MLX5_SET(sqc,  sqc, cqn,		sq->cq.mcq.cqn);
 	MLX5_SET(sqc,  sqc, state,		MLX5_SQC_STATE_RST);
-	MLX5_SET(sqc,  sqc, tis_lst_sz,		1);
+	MLX5_SET(sqc,  sqc, tis_lst_sz,		param->icosq ? 0 : 1);
 	MLX5_SET(sqc,  sqc, flush_in_error_en,	1);
 
 	MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
@@ -701,9 +758,11 @@
 	if (err)
 		goto err_disable_sq;
 
-	set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
-	netdev_tx_reset_queue(sq->txq);
-	netif_tx_start_queue(sq->txq);
+	if (sq->txq) {
+		set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
+		netdev_tx_reset_queue(sq->txq);
+		netif_tx_start_queue(sq->txq);
+	}
 
 	return 0;
 
@@ -724,15 +783,19 @@
 
 static void mlx5e_close_sq(struct mlx5e_sq *sq)
 {
-	clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
-	napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */
-	netif_tx_disable_queue(sq->txq);
+	if (sq->txq) {
+		clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
+		/* prevent netif_tx_wake_queue */
+		napi_synchronize(&sq->channel->napi);
+		netif_tx_disable_queue(sq->txq);
 
-	/* ensure hw is notified of all pending wqes */
-	if (mlx5e_sq_has_room_for(sq, 1))
-		mlx5e_send_nop(sq, true);
+		/* ensure hw is notified of all pending wqes */
+		if (mlx5e_sq_has_room_for(sq, 1))
+			mlx5e_send_nop(sq, true);
 
-	mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
+		mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
+	}
+
 	while (sq->cc != sq->pc) /* wait till sq is empty */
 		msleep(20);
 
@@ -986,10 +1049,14 @@
 
 	netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
 
-	err = mlx5e_open_tx_cqs(c, cparam);
+	err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, 0, 0);
 	if (err)
 		goto err_napi_del;
 
+	err = mlx5e_open_tx_cqs(c, cparam);
+	if (err)
+		goto err_close_icosq_cq;
+
 	err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
 			    priv->params.rx_cq_moderation_usec,
 			    priv->params.rx_cq_moderation_pkts);
@@ -998,10 +1065,14 @@
 
 	napi_enable(&c->napi);
 
-	err = mlx5e_open_sqs(c, cparam);
+	err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq);
 	if (err)
 		goto err_disable_napi;
 
+	err = mlx5e_open_sqs(c, cparam);
+	if (err)
+		goto err_close_icosq;
+
 	err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
 	if (err)
 		goto err_close_sqs;
@@ -1014,6 +1085,9 @@
 err_close_sqs:
 	mlx5e_close_sqs(c);
 
+err_close_icosq:
+	mlx5e_close_sq(&c->icosq);
+
 err_disable_napi:
 	napi_disable(&c->napi);
 	mlx5e_close_cq(&c->rq.cq);
@@ -1021,6 +1095,9 @@
 err_close_tx_cqs:
 	mlx5e_close_tx_cqs(c);
 
+err_close_icosq_cq:
+	mlx5e_close_cq(&c->icosq.cq);
+
 err_napi_del:
 	netif_napi_del(&c->napi);
 	napi_hash_del(&c->napi);
@@ -1033,9 +1110,11 @@
 {
 	mlx5e_close_rq(&c->rq);
 	mlx5e_close_sqs(c);
+	mlx5e_close_sq(&c->icosq);
 	napi_disable(&c->napi);
 	mlx5e_close_cq(&c->rq.cq);
 	mlx5e_close_tx_cqs(c);
+	mlx5e_close_cq(&c->icosq.cq);
 	netif_napi_del(&c->napi);
 
 	napi_hash_del(&c->napi);
@@ -1050,11 +1129,23 @@
 	void *rqc = param->rqc;
 	void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
 
-	MLX5_SET(wq, wq, wq_type,          MLX5_WQ_TYPE_LINKED_LIST);
+	switch (priv->params.rq_wq_type) {
+	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+		MLX5_SET(wq, wq, log_wqe_num_of_strides,
+			 priv->params.mpwqe_log_num_strides - 9);
+		MLX5_SET(wq, wq, log_wqe_stride_size,
+			 priv->params.mpwqe_log_stride_sz - 6);
+		MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
+		break;
+	default: /* MLX5_WQ_TYPE_LINKED_LIST */
+		MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+	}
+
 	MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
 	MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
 	MLX5_SET(wq, wq, log_wq_sz,        priv->params.log_rq_size);
 	MLX5_SET(wq, wq, pd,               priv->pdn);
+	MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
 
 	param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
 	param->wq.linear = 1;
@@ -1069,17 +1160,27 @@
 	MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
 }
 
+static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
+					struct mlx5e_sq_param *param)
+{
+	void *sqc = param->sqc;
+	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+	MLX5_SET(wq, wq, pd,            priv->pdn);
+
+	param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
+}
+
 static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
 				 struct mlx5e_sq_param *param)
 {
 	void *sqc = param->sqc;
 	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
 
+	mlx5e_build_sq_param_common(priv, param);
 	MLX5_SET(wq, wq, log_wq_sz,     priv->params.log_sq_size);
-	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
-	MLX5_SET(wq, wq, pd,            priv->pdn);
 
-	param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
 	param->max_inline = priv->params.tx_max_inline;
 }
 
@@ -1095,8 +1196,22 @@
 				    struct mlx5e_cq_param *param)
 {
 	void *cqc = param->cqc;
+	u8 log_cq_size;
 
-	MLX5_SET(cqc, cqc, log_cq_size,  priv->params.log_rq_size);
+	switch (priv->params.rq_wq_type) {
+	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+		log_cq_size = priv->params.log_rq_size +
+			priv->params.mpwqe_log_num_strides;
+		break;
+	default: /* MLX5_WQ_TYPE_LINKED_LIST */
+		log_cq_size = priv->params.log_rq_size;
+	}
+
+	MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
+	if (priv->params.rx_cqe_compress) {
+		MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
+		MLX5_SET(cqc, cqc, cqe_comp_en, 1);
+	}
 
 	mlx5e_build_common_cq_param(priv, param);
 }
@@ -1106,25 +1221,52 @@
 {
 	void *cqc = param->cqc;
 
-	MLX5_SET(cqc, cqc, log_cq_size,  priv->params.log_sq_size);
+	MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
 
 	mlx5e_build_common_cq_param(priv, param);
 }
 
-static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
-				      struct mlx5e_channel_param *cparam)
+static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
+				     struct mlx5e_cq_param *param,
+				     u8 log_wq_size)
 {
-	memset(cparam, 0, sizeof(*cparam));
+	void *cqc = param->cqc;
+
+	MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
+
+	mlx5e_build_common_cq_param(priv, param);
+}
+
+static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
+				    struct mlx5e_sq_param *param,
+				    u8 log_wq_size)
+{
+	void *sqc = param->sqc;
+	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+	mlx5e_build_sq_param_common(priv, param);
+
+	MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
+	MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
+
+	param->icosq = true;
+}
+
+static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam)
+{
+	u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
 
 	mlx5e_build_rq_param(priv, &cparam->rq);
 	mlx5e_build_sq_param(priv, &cparam->sq);
+	mlx5e_build_icosq_param(priv, &cparam->icosq, icosq_log_wq_sz);
 	mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
 	mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
+	mlx5e_build_ico_cq_param(priv, &cparam->icosq_cq, icosq_log_wq_sz);
 }
 
 static int mlx5e_open_channels(struct mlx5e_priv *priv)
 {
-	struct mlx5e_channel_param cparam;
+	struct mlx5e_channel_param *cparam;
 	int nch = priv->params.num_channels;
 	int err = -ENOMEM;
 	int i;
@@ -1136,12 +1278,15 @@
 	priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
 				      sizeof(struct mlx5e_sq *), GFP_KERNEL);
 
-	if (!priv->channel || !priv->txq_to_sq_map)
+	cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
+
+	if (!priv->channel || !priv->txq_to_sq_map || !cparam)
 		goto err_free_txq_to_sq_map;
 
-	mlx5e_build_channel_param(priv, &cparam);
+	mlx5e_build_channel_param(priv, cparam);
+
 	for (i = 0; i < nch; i++) {
-		err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
+		err = mlx5e_open_channel(priv, i, cparam, &priv->channel[i]);
 		if (err)
 			goto err_close_channels;
 	}
@@ -1152,6 +1297,7 @@
 			goto err_close_channels;
 	}
 
+	kfree(cparam);
 	return 0;
 
 err_close_channels:
@@ -1161,6 +1307,7 @@
 err_free_txq_to_sq_map:
 	kfree(priv->txq_to_sq_map);
 	kfree(priv->channel);
+	kfree(cparam);
 
 	return err;
 }
@@ -1200,48 +1347,36 @@
 
 	for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
 		int ix = i;
+		u32 rqn;
 
 		if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
 			ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
 
 		ix = priv->params.indirection_rqt[ix];
-		MLX5_SET(rqtc, rqtc, rq_num[i],
-			 test_bit(MLX5E_STATE_OPENED, &priv->state) ?
-			 priv->channel[ix]->rq.rqn :
-			 priv->drop_rq.rqn);
+		rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+				priv->channel[ix]->rq.rqn :
+				priv->drop_rq.rqn;
+		MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
 	}
 }
 
-static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
-				enum mlx5e_rqt_ix rqt_ix)
+static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv *priv, void *rqtc,
+				      int ix)
 {
+	u32 rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+			priv->channel[ix]->rq.rqn :
+			priv->drop_rq.rqn;
 
-	switch (rqt_ix) {
-	case MLX5E_INDIRECTION_RQT:
-		mlx5e_fill_indir_rqt_rqns(priv, rqtc);
-
-		break;
-
-	default: /* MLX5E_SINGLE_RQ_RQT */
-		MLX5_SET(rqtc, rqtc, rq_num[0],
-			 test_bit(MLX5E_STATE_OPENED, &priv->state) ?
-			 priv->channel[0]->rq.rqn :
-			 priv->drop_rq.rqn);
-
-		break;
-	}
+	MLX5_SET(rqtc, rqtc, rq_num[0], rqn);
 }
 
-static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
+static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, int ix, u32 *rqtn)
 {
 	struct mlx5_core_dev *mdev = priv->mdev;
-	u32 *in;
 	void *rqtc;
 	int inlen;
-	int sz;
 	int err;
-
-	sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
+	u32 *in;
 
 	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
 	in = mlx5_vzalloc(inlen);
@@ -1253,26 +1388,73 @@
 	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
 	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
 
-	mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
+	if (sz > 1) /* RSS */
+		mlx5e_fill_indir_rqt_rqns(priv, rqtc);
+	else
+		mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
 
-	err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]);
+	err = mlx5_core_create_rqt(mdev, in, inlen, rqtn);
 
 	kvfree(in);
+	return err;
+}
+
+static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, u32 rqtn)
+{
+	mlx5_core_destroy_rqt(priv->mdev, rqtn);
+}
+
+static int mlx5e_create_rqts(struct mlx5e_priv *priv)
+{
+	int nch = mlx5e_get_max_num_channels(priv->mdev);
+	u32 *rqtn;
+	int err;
+	int ix;
+
+	/* Indirect RQT */
+	rqtn = &priv->indir_rqtn;
+	err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqtn);
+	if (err)
+		return err;
+
+	/* Direct RQTs */
+	for (ix = 0; ix < nch; ix++) {
+		rqtn = &priv->direct_tir[ix].rqtn;
+		err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqtn);
+		if (err)
+			goto err_destroy_rqts;
+	}
+
+	return 0;
+
+err_destroy_rqts:
+	for (ix--; ix >= 0; ix--)
+		mlx5e_destroy_rqt(priv, priv->direct_tir[ix].rqtn);
+
+	mlx5e_destroy_rqt(priv, priv->indir_rqtn);
 
 	return err;
 }
 
-int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
+static void mlx5e_destroy_rqts(struct mlx5e_priv *priv)
+{
+	int nch = mlx5e_get_max_num_channels(priv->mdev);
+	int i;
+
+	for (i = 0; i < nch; i++)
+		mlx5e_destroy_rqt(priv, priv->direct_tir[i].rqtn);
+
+	mlx5e_destroy_rqt(priv, priv->indir_rqtn);
+}
+
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix)
 {
 	struct mlx5_core_dev *mdev = priv->mdev;
-	u32 *in;
 	void *rqtc;
 	int inlen;
-	int sz;
+	u32 *in;
 	int err;
 
-	sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
-
 	inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
 	in = mlx5_vzalloc(inlen);
 	if (!in)
@@ -1281,27 +1463,31 @@
 	rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
 
 	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
-
-	mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
+	if (sz > 1) /* RSS */
+		mlx5e_fill_indir_rqt_rqns(priv, rqtc);
+	else
+		mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
 
 	MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
 
-	err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen);
+	err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
 
 	kvfree(in);
 
 	return err;
 }
 
-static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
-{
-	mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]);
-}
-
 static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
 {
-	mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
-	mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+	u32 rqtn;
+	int ix;
+
+	rqtn = priv->indir_rqtn;
+	mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
+	for (ix = 0; ix < priv->params.num_channels; ix++) {
+		rqtn = priv->direct_tir[ix].rqtn;
+		mlx5e_redirect_rqt(priv, rqtn, 1, ix);
+	}
 }
 
 static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
@@ -1346,6 +1532,7 @@
 	int inlen;
 	int err;
 	int tt;
+	int ix;
 
 	inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
 	in = mlx5_vzalloc(inlen);
@@ -1357,23 +1544,32 @@
 
 	mlx5e_build_tir_ctx_lro(tirc, priv);
 
-	for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
-		err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
+	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+		err = mlx5_core_modify_tir(mdev, priv->indir_tirn[tt], in,
+					   inlen);
 		if (err)
-			break;
+			goto free_in;
 	}
 
+	for (ix = 0; ix < mlx5e_get_max_num_channels(mdev); ix++) {
+		err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
+					   in, inlen);
+		if (err)
+			goto free_in;
+	}
+
+free_in:
 	kvfree(in);
 
 	return err;
 }
 
-static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev,
-						  u32 tirn)
+static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
 {
 	void *in;
 	int inlen;
 	int err;
+	int i;
 
 	inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
 	in = mlx5_vzalloc(inlen);
@@ -1382,46 +1578,70 @@
 
 	MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
 
-	err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
-
-	kvfree(in);
-
-	return err;
-}
-
-static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
-{
-	int err;
-	int i;
-
-	for (i = 0; i < MLX5E_NUM_TT; i++) {
-		err = mlx5e_refresh_tir_self_loopback_enable(priv->mdev,
-							     priv->tirn[i]);
+	for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
+		err = mlx5_core_modify_tir(priv->mdev, priv->indir_tirn[i], in,
+					   inlen);
 		if (err)
 			return err;
 	}
 
+	for (i = 0; i < priv->params.num_channels; i++) {
+		err = mlx5_core_modify_tir(priv->mdev,
+					   priv->direct_tir[i].tirn, in,
+					   inlen);
+		if (err)
+			return err;
+	}
+
+	kvfree(in);
+
 	return 0;
 }
 
+static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
+	int err;
+
+	err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
+	if (err)
+		return err;
+
+	/* Update vport context MTU */
+	mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
+	return 0;
+}
+
+static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u16 hw_mtu = 0;
+	int err;
+
+	err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
+	if (err || !hw_mtu) /* fallback to port oper mtu */
+		mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
+
+	*mtu = MLX5E_HW2SW_MTU(hw_mtu);
+}
+
 static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
 {
 	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-	int hw_mtu;
+	u16 mtu;
 	int err;
 
-	err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
+	err = mlx5e_set_mtu(priv, netdev->mtu);
 	if (err)
 		return err;
 
-	mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
+	mlx5e_query_mtu(priv, &mtu);
+	if (mtu != netdev->mtu)
+		netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
+			    __func__, mtu, netdev->mtu);
 
-	if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
-		netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
-			    __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
-
-	netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
+	netdev->mtu = mtu;
 	return 0;
 }
 
@@ -1478,8 +1698,11 @@
 	mlx5e_redirect_rqts(priv);
 	mlx5e_update_carrier(priv);
 	mlx5e_timestamp_init(priv);
+#ifdef CONFIG_RFS_ACCEL
+	priv->netdev->rx_cpu_rmap = priv->mdev->rmap;
+#endif
 
-	schedule_delayed_work(&priv->update_stats_work, 0);
+	queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
 
 	return 0;
 
@@ -1685,7 +1908,8 @@
 		mlx5e_destroy_tis(priv, tc);
 }
 
-static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
+static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
+				      enum mlx5e_traffic_types tt)
 {
 	void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
 
@@ -1706,19 +1930,8 @@
 	mlx5e_build_tir_ctx_lro(tirc, priv);
 
 	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
-
-	switch (tt) {
-	case MLX5E_TT_ANY:
-		MLX5_SET(tirc, tirc, indirect_table,
-			 priv->rqtn[MLX5E_SINGLE_RQ_RQT]);
-		MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
-		break;
-	default:
-		MLX5_SET(tirc, tirc, indirect_table,
-			 priv->rqtn[MLX5E_INDIRECTION_RQT]);
-		mlx5e_build_tir_ctx_hash(tirc, priv);
-		break;
-	}
+	MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqtn);
+	mlx5e_build_tir_ctx_hash(tirc, priv);
 
 	switch (tt) {
 	case MLX5E_TT_IPV4_TCP:
@@ -1798,64 +2011,107 @@
 		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
 			 MLX5_HASH_IP);
 		break;
+	default:
+		WARN_ONCE(true,
+			  "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
 	}
 }
 
-static int mlx5e_create_tir(struct mlx5e_priv *priv, int tt)
+static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
+				       u32 rqtn)
 {
-	struct mlx5_core_dev *mdev = priv->mdev;
-	u32 *in;
+	MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+
+	mlx5e_build_tir_ctx_lro(tirc, priv);
+
+	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+	MLX5_SET(tirc, tirc, indirect_table, rqtn);
+	MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
+}
+
+static int mlx5e_create_tirs(struct mlx5e_priv *priv)
+{
+	int nch = mlx5e_get_max_num_channels(priv->mdev);
 	void *tirc;
 	int inlen;
+	u32 *tirn;
 	int err;
+	u32 *in;
+	int ix;
+	int tt;
 
 	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
 	in = mlx5_vzalloc(inlen);
 	if (!in)
 		return -ENOMEM;
 
-	tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+	/* indirect tirs */
+	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+		memset(in, 0, inlen);
+		tirn = &priv->indir_tirn[tt];
+		tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+		mlx5e_build_indir_tir_ctx(priv, tirc, tt);
+		err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn);
+		if (err)
+			goto err_destroy_tirs;
+	}
 
-	mlx5e_build_tir_ctx(priv, tirc, tt);
+	/* direct tirs */
+	for (ix = 0; ix < nch; ix++) {
+		memset(in, 0, inlen);
+		tirn = &priv->direct_tir[ix].tirn;
+		tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+		mlx5e_build_direct_tir_ctx(priv, tirc,
+					   priv->direct_tir[ix].rqtn);
+		err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn);
+		if (err)
+			goto err_destroy_ch_tirs;
+	}
 
-	err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+	kvfree(in);
+
+	return 0;
+
+err_destroy_ch_tirs:
+	for (ix--; ix >= 0; ix--)
+		mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[ix].tirn);
+
+err_destroy_tirs:
+	for (tt--; tt >= 0; tt--)
+		mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[tt]);
 
 	kvfree(in);
 
 	return err;
 }
 
-static void mlx5e_destroy_tir(struct mlx5e_priv *priv, int tt)
+static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
 {
-	mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
-}
-
-static int mlx5e_create_tirs(struct mlx5e_priv *priv)
-{
-	int err;
+	int nch = mlx5e_get_max_num_channels(priv->mdev);
 	int i;
 
-	for (i = 0; i < MLX5E_NUM_TT; i++) {
-		err = mlx5e_create_tir(priv, i);
+	for (i = 0; i < nch; i++)
+		mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[i].tirn);
+
+	for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
+		mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[i]);
+}
+
+int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd)
+{
+	int err = 0;
+	int i;
+
+	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+		return 0;
+
+	for (i = 0; i < priv->params.num_channels; i++) {
+		err = mlx5e_modify_rq_vsd(&priv->channel[i]->rq, vsd);
 		if (err)
-			goto err_destroy_tirs;
+			return err;
 	}
 
 	return 0;
-
-err_destroy_tirs:
-	for (i--; i >= 0; i--)
-		mlx5e_destroy_tir(priv, i);
-
-	return err;
-}
-
-static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
-{
-	int i;
-
-	for (i = 0; i < MLX5E_NUM_TT; i++)
-		mlx5e_destroy_tir(priv, i);
 }
 
 static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
@@ -1898,6 +2154,8 @@
 			return mlx5e_configure_flower(priv, proto, tc->cls_flower);
 		case TC_CLSFLOWER_DESTROY:
 			return mlx5e_delete_flower(priv, tc->cls_flower);
+		case TC_CLSFLOWER_STATS:
+			return mlx5e_stats_flower(priv, tc->cls_flower);
 		}
 	default:
 		return -EOPNOTSUPP;
@@ -1914,19 +2172,37 @@
 mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
 	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5e_sw_stats *sstats = &priv->stats.sw;
 	struct mlx5e_vport_stats *vstats = &priv->stats.vport;
+	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
 
-	stats->rx_packets = vstats->rx_packets;
-	stats->rx_bytes   = vstats->rx_bytes;
-	stats->tx_packets = vstats->tx_packets;
-	stats->tx_bytes   = vstats->tx_bytes;
-	stats->multicast  = vstats->rx_multicast_packets +
-			    vstats->tx_multicast_packets;
-	stats->tx_errors  = vstats->tx_error_packets;
-	stats->rx_errors  = vstats->rx_error_packets;
-	stats->tx_dropped = vstats->tx_queue_dropped;
-	stats->rx_crc_errors = 0;
-	stats->rx_length_errors = 0;
+	stats->rx_packets = sstats->rx_packets;
+	stats->rx_bytes   = sstats->rx_bytes;
+	stats->tx_packets = sstats->tx_packets;
+	stats->tx_bytes   = sstats->tx_bytes;
+
+	stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
+	stats->tx_dropped = sstats->tx_queue_dropped;
+
+	stats->rx_length_errors =
+		PPORT_802_3_GET(pstats, a_in_range_length_errors) +
+		PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
+		PPORT_802_3_GET(pstats, a_frame_too_long_errors);
+	stats->rx_crc_errors =
+		PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
+	stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
+	stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
+	stats->tx_carrier_errors =
+		PPORT_802_3_GET(pstats, a_symbol_error_during_carrier);
+	stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
+			   stats->rx_frame_errors;
+	stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
+
+	/* vport multicast also counts packets that are dropped due to steering
+	 * or rx out of buffer
+	 */
+	stats->multicast =
+		VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
 
 	return stats;
 }
@@ -1935,7 +2211,7 @@
 {
 	struct mlx5e_priv *priv = netdev_priv(dev);
 
-	schedule_work(&priv->set_rx_mode_work);
+	queue_work(priv->wq, &priv->set_rx_mode_work);
 }
 
 static int mlx5e_set_mac(struct net_device *netdev, void *addr)
@@ -1950,71 +2226,180 @@
 	ether_addr_copy(netdev->dev_addr, saddr->sa_data);
 	netif_addr_unlock_bh(netdev);
 
-	schedule_work(&priv->set_rx_mode_work);
+	queue_work(priv->wq, &priv->set_rx_mode_work);
 
 	return 0;
 }
 
+#define MLX5E_SET_FEATURE(netdev, feature, enable)	\
+	do {						\
+		if (enable)				\
+			netdev->features |= feature;	\
+		else					\
+			netdev->features &= ~feature;	\
+	} while (0)
+
+typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
+
+static int set_feature_lro(struct net_device *netdev, bool enable)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+	int err;
+
+	mutex_lock(&priv->state_lock);
+
+	if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
+		mlx5e_close_locked(priv->netdev);
+
+	priv->params.lro_en = enable;
+	err = mlx5e_modify_tirs_lro(priv);
+	if (err) {
+		netdev_err(netdev, "lro modify failed, %d\n", err);
+		priv->params.lro_en = !enable;
+	}
+
+	if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
+		mlx5e_open_locked(priv->netdev);
+
+	mutex_unlock(&priv->state_lock);
+
+	return err;
+}
+
+static int set_feature_vlan_filter(struct net_device *netdev, bool enable)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+
+	if (enable)
+		mlx5e_enable_vlan_filter(priv);
+	else
+		mlx5e_disable_vlan_filter(priv);
+
+	return 0;
+}
+
+static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+
+	if (!enable && mlx5e_tc_num_filters(priv)) {
+		netdev_err(netdev,
+			   "Active offloaded tc filters, can't turn hw_tc_offload off\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int set_feature_rx_all(struct net_device *netdev, bool enable)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	return mlx5_set_port_fcs(mdev, !enable);
+}
+
+static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	int err;
+
+	mutex_lock(&priv->state_lock);
+
+	priv->params.vlan_strip_disable = !enable;
+	err = mlx5e_modify_rqs_vsd(priv, !enable);
+	if (err)
+		priv->params.vlan_strip_disable = enable;
+
+	mutex_unlock(&priv->state_lock);
+
+	return err;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static int set_feature_arfs(struct net_device *netdev, bool enable)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	int err;
+
+	if (enable)
+		err = mlx5e_arfs_enable(priv);
+	else
+		err = mlx5e_arfs_disable(priv);
+
+	return err;
+}
+#endif
+
+static int mlx5e_handle_feature(struct net_device *netdev,
+				netdev_features_t wanted_features,
+				netdev_features_t feature,
+				mlx5e_feature_handler feature_handler)
+{
+	netdev_features_t changes = wanted_features ^ netdev->features;
+	bool enable = !!(wanted_features & feature);
+	int err;
+
+	if (!(changes & feature))
+		return 0;
+
+	err = feature_handler(netdev, enable);
+	if (err) {
+		netdev_err(netdev, "%s feature 0x%llx failed err %d\n",
+			   enable ? "Enable" : "Disable", feature, err);
+		return err;
+	}
+
+	MLX5E_SET_FEATURE(netdev, feature, enable);
+	return 0;
+}
+
 static int mlx5e_set_features(struct net_device *netdev,
 			      netdev_features_t features)
 {
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	int err = 0;
-	netdev_features_t changes = features ^ netdev->features;
+	int err;
 
-	mutex_lock(&priv->state_lock);
+	err  = mlx5e_handle_feature(netdev, features, NETIF_F_LRO,
+				    set_feature_lro);
+	err |= mlx5e_handle_feature(netdev, features,
+				    NETIF_F_HW_VLAN_CTAG_FILTER,
+				    set_feature_vlan_filter);
+	err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
+				    set_feature_tc_num_filters);
+	err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
+				    set_feature_rx_all);
+	err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
+				    set_feature_rx_vlan);
+#ifdef CONFIG_RFS_ACCEL
+	err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE,
+				    set_feature_arfs);
+#endif
 
-	if (changes & NETIF_F_LRO) {
-		bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-
-		if (was_opened)
-			mlx5e_close_locked(priv->netdev);
-
-		priv->params.lro_en = !!(features & NETIF_F_LRO);
-		err = mlx5e_modify_tirs_lro(priv);
-		if (err)
-			mlx5_core_warn(priv->mdev, "lro modify failed, %d\n",
-				       err);
-
-		if (was_opened)
-			err = mlx5e_open_locked(priv->netdev);
-	}
-
-	mutex_unlock(&priv->state_lock);
-
-	if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
-		if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
-			mlx5e_enable_vlan_filter(priv);
-		else
-			mlx5e_disable_vlan_filter(priv);
-	}
-
-	if ((changes & NETIF_F_HW_TC) && !(features & NETIF_F_HW_TC) &&
-	    mlx5e_tc_num_filters(priv)) {
-		netdev_err(netdev,
-			   "Active offloaded tc filters, can't turn hw_tc_offload off\n");
-		return -EINVAL;
-	}
-
-	return err;
+	return err ? -EINVAL : 0;
 }
 
+#define MXL5_HW_MIN_MTU 64
+#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
+
 static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
 {
 	struct mlx5e_priv *priv = netdev_priv(netdev);
 	struct mlx5_core_dev *mdev = priv->mdev;
 	bool was_opened;
-	int max_mtu;
+	u16 max_mtu;
+	u16 min_mtu;
 	int err = 0;
 
 	mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
 
 	max_mtu = MLX5E_HW2SW_MTU(max_mtu);
+	min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
 
-	if (new_mtu > max_mtu) {
+	if (new_mtu > max_mtu || new_mtu < min_mtu) {
 		netdev_err(netdev,
-			   "%s: Bad MTU (%d) > (%d) Max\n",
-			   __func__, new_mtu, max_mtu);
+			   "%s: Bad MTU (%d), valid range is: [%d..%d]\n",
+			   __func__, new_mtu, min_mtu, max_mtu);
 		return -EINVAL;
 	}
 
@@ -2063,6 +2448,21 @@
 					   vlan, qos);
 }
 
+static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
+}
+
+static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
+}
 static int mlx5_vport_link2ifla(u8 esw_link)
 {
 	switch (esw_link) {
@@ -2127,7 +2527,7 @@
 	if (!mlx5e_vxlan_allowed(priv->mdev))
 		return;
 
-	mlx5e_vxlan_add_port(priv, be16_to_cpu(port));
+	mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 1);
 }
 
 static void mlx5e_del_vxlan_port(struct net_device *netdev,
@@ -2138,7 +2538,7 @@
 	if (!mlx5e_vxlan_allowed(priv->mdev))
 		return;
 
-	mlx5e_vxlan_del_port(priv, be16_to_cpu(port));
+	mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 0);
 }
 
 static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
@@ -2205,6 +2605,9 @@
 	.ndo_set_features        = mlx5e_set_features,
 	.ndo_change_mtu          = mlx5e_change_mtu,
 	.ndo_do_ioctl            = mlx5e_ioctl,
+#ifdef CONFIG_RFS_ACCEL
+	.ndo_rx_flow_steer	 = mlx5e_rx_flow_steer,
+#endif
 };
 
 static const struct net_device_ops mlx5e_netdev_ops_sriov = {
@@ -2224,8 +2627,13 @@
 	.ndo_add_vxlan_port      = mlx5e_add_vxlan_port,
 	.ndo_del_vxlan_port      = mlx5e_del_vxlan_port,
 	.ndo_features_check      = mlx5e_features_check,
+#ifdef CONFIG_RFS_ACCEL
+	.ndo_rx_flow_steer	 = mlx5e_rx_flow_steer,
+#endif
 	.ndo_set_vf_mac          = mlx5e_set_vf_mac,
 	.ndo_set_vf_vlan         = mlx5e_set_vf_vlan,
+	.ndo_set_vf_spoofchk     = mlx5e_set_vf_spoofchk,
+	.ndo_set_vf_trust        = mlx5e_set_vf_trust,
 	.ndo_get_vf_config       = mlx5e_get_vf_config,
 	.ndo_set_vf_link_state   = mlx5e_set_vf_link_state,
 	.ndo_get_vf_stats        = mlx5e_get_vf_stats,
@@ -2283,25 +2691,121 @@
 }
 #endif
 
-void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
+void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
+				   u32 *indirection_rqt, int len,
 				   int num_channels)
 {
+	int node = mdev->priv.numa_node;
+	int node_num_of_cores;
 	int i;
 
+	if (node == -1)
+		node = first_online_node;
+
+	node_num_of_cores = cpumask_weight(cpumask_of_node(node));
+
+	if (node_num_of_cores)
+		num_channels = min_t(int, num_channels, node_num_of_cores);
+
 	for (i = 0; i < len; i++)
 		indirection_rqt[i] = i % num_channels;
 }
 
+static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
+{
+	return MLX5_CAP_GEN(mdev, striding_rq) &&
+		MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
+		MLX5_CAP_ETH(mdev, reg_umr_sq);
+}
+
+static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw)
+{
+	enum pcie_link_width width;
+	enum pci_bus_speed speed;
+	int err = 0;
+
+	err = pcie_get_minimum_link(mdev->pdev, &speed, &width);
+	if (err)
+		return err;
+
+	if (speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
+		return -EINVAL;
+
+	switch (speed) {
+	case PCIE_SPEED_2_5GT:
+		*pci_bw = 2500 * width;
+		break;
+	case PCIE_SPEED_5_0GT:
+		*pci_bw = 5000 * width;
+		break;
+	case PCIE_SPEED_8_0GT:
+		*pci_bw = 8000 * width;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
+{
+	return (link_speed && pci_bw &&
+		(pci_bw < 40000) && (pci_bw < link_speed));
+}
+
 static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
 				    struct net_device *netdev,
 				    int num_channels)
 {
 	struct mlx5e_priv *priv = netdev_priv(netdev);
+	u32 link_speed = 0;
+	u32 pci_bw = 0;
 
 	priv->params.log_sq_size           =
 		MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
-	priv->params.log_rq_size           =
-		MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+	priv->params.rq_wq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) ?
+		MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
+		MLX5_WQ_TYPE_LINKED_LIST;
+
+	/* set CQE compression */
+	priv->params.rx_cqe_compress_admin = false;
+	if (MLX5_CAP_GEN(mdev, cqe_compression) &&
+	    MLX5_CAP_GEN(mdev, vport_group_manager)) {
+		mlx5e_get_max_linkspeed(mdev, &link_speed);
+		mlx5e_get_pci_bw(mdev, &pci_bw);
+		mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
+			      link_speed, pci_bw);
+		priv->params.rx_cqe_compress_admin =
+			cqe_compress_heuristic(link_speed, pci_bw);
+	}
+
+	priv->params.rx_cqe_compress = priv->params.rx_cqe_compress_admin;
+
+	switch (priv->params.rq_wq_type) {
+	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+		priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
+		priv->params.mpwqe_log_stride_sz =
+			priv->params.rx_cqe_compress ?
+			MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
+			MLX5_MPWRQ_LOG_STRIDE_SIZE;
+		priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
+			priv->params.mpwqe_log_stride_sz;
+		priv->params.lro_en = true;
+		break;
+	default: /* MLX5_WQ_TYPE_LINKED_LIST */
+		priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+	}
+
+	mlx5_core_info(mdev,
+		       "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
+		       priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
+		       BIT(priv->params.log_rq_size),
+		       BIT(priv->params.mpwqe_log_stride_sz),
+		       priv->params.rx_cqe_compress_admin);
+
+	priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
+					    BIT(priv->params.log_rq_size));
 	priv->params.rx_cq_moderation_usec =
 		MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
 	priv->params.rx_cq_moderation_pkts =
@@ -2311,15 +2815,13 @@
 	priv->params.tx_cq_moderation_pkts =
 		MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
 	priv->params.tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
-	priv->params.min_rx_wqes           =
-		MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
 	priv->params.num_tc                = 1;
 	priv->params.rss_hfunc             = ETH_RSS_HASH_XOR;
 
 	netdev_rss_key_fill(priv->params.toeplitz_hash_key,
 			    sizeof(priv->params.toeplitz_hash_key));
 
-	mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
+	mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
 				      MLX5E_INDIR_RQT_SIZE, num_channels);
 
 	priv->params.lro_wqe_sz            =
@@ -2356,6 +2858,8 @@
 {
 	struct mlx5e_priv *priv = netdev_priv(netdev);
 	struct mlx5_core_dev *mdev = priv->mdev;
+	bool fcs_supported;
+	bool fcs_enabled;
 
 	SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
 
@@ -2390,25 +2894,41 @@
 	netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_FILTER;
 
 	if (mlx5e_vxlan_allowed(mdev)) {
-		netdev->hw_features     |= NETIF_F_GSO_UDP_TUNNEL;
+		netdev->hw_features     |= NETIF_F_GSO_UDP_TUNNEL |
+					   NETIF_F_GSO_UDP_TUNNEL_CSUM |
+					   NETIF_F_GSO_PARTIAL;
 		netdev->hw_enc_features |= NETIF_F_IP_CSUM;
-		netdev->hw_enc_features |= NETIF_F_RXCSUM;
+		netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
 		netdev->hw_enc_features |= NETIF_F_TSO;
 		netdev->hw_enc_features |= NETIF_F_TSO6;
-		netdev->hw_enc_features |= NETIF_F_RXHASH;
 		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
+		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
+					   NETIF_F_GSO_PARTIAL;
+		netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
 	}
 
+	mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
+
+	if (fcs_supported)
+		netdev->hw_features |= NETIF_F_RXALL;
+
 	netdev->features          = netdev->hw_features;
 	if (!priv->params.lro_en)
 		netdev->features  &= ~NETIF_F_LRO;
 
+	if (fcs_enabled)
+		netdev->features  &= ~NETIF_F_RXALL;
+
 #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
 	if (FT_CAP(flow_modify_en) &&
 	    FT_CAP(modify_root) &&
 	    FT_CAP(identified_miss_table_mode) &&
-	    FT_CAP(flow_table_modify))
-		priv->netdev->hw_features      |= NETIF_F_HW_TC;
+	    FT_CAP(flow_table_modify)) {
+		netdev->hw_features      |= NETIF_F_HW_TC;
+#ifdef CONFIG_RFS_ACCEL
+		netdev->hw_features	 |= NETIF_F_NTUPLE;
+#endif
+	}
 
 	netdev->features         |= NETIF_F_HIGHDMA;
 
@@ -2442,6 +2962,61 @@
 	return err;
 }
 
+static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	int err;
+
+	err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
+	if (err) {
+		mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
+		priv->q_counter = 0;
+	}
+}
+
+static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
+{
+	if (!priv->q_counter)
+		return;
+
+	mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
+}
+
+static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	struct mlx5_create_mkey_mbox_in *in;
+	struct mlx5_mkey_seg *mkc;
+	int inlen = sizeof(*in);
+	u64 npages =
+		mlx5e_get_max_num_channels(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS;
+	int err;
+
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	mkc = &in->seg;
+	mkc->status = MLX5_MKEY_STATUS_FREE;
+	mkc->flags = MLX5_PERM_UMR_EN |
+		     MLX5_PERM_LOCAL_READ |
+		     MLX5_PERM_LOCAL_WRITE |
+		     MLX5_ACCESS_MODE_MTT;
+
+	mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+	mkc->flags_pd = cpu_to_be32(priv->pdn);
+	mkc->len = cpu_to_be64(npages << PAGE_SHIFT);
+	mkc->xlt_oct_size = cpu_to_be32(mlx5e_get_mtt_octw(npages));
+	mkc->log2_page_size = PAGE_SHIFT;
+
+	err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL,
+				    NULL, NULL);
+
+	kvfree(in);
+
+	return err;
+}
+
 static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
 {
 	struct net_device *netdev;
@@ -2467,10 +3042,14 @@
 
 	priv = netdev_priv(netdev);
 
+	priv->wq = create_singlethread_workqueue("mlx5e");
+	if (!priv->wq)
+		goto err_free_netdev;
+
 	err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false);
 	if (err) {
 		mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
-		goto err_free_netdev;
+		goto err_destroy_wq;
 	}
 
 	err = mlx5_core_alloc_pd(mdev, &priv->pdn);
@@ -2491,10 +3070,16 @@
 		goto err_dealloc_transport_domain;
 	}
 
+	err = mlx5e_create_umr_mkey(priv);
+	if (err) {
+		mlx5_core_err(mdev, "create umr mkey failed, %d\n", err);
+		goto err_destroy_mkey;
+	}
+
 	err = mlx5e_create_tises(priv);
 	if (err) {
 		mlx5_core_warn(mdev, "create tises failed, %d\n", err);
-		goto err_destroy_mkey;
+		goto err_destroy_umr_mkey;
 	}
 
 	err = mlx5e_open_drop_rq(priv);
@@ -2503,37 +3088,33 @@
 		goto err_destroy_tises;
 	}
 
-	err = mlx5e_create_rqt(priv, MLX5E_INDIRECTION_RQT);
+	err = mlx5e_create_rqts(priv);
 	if (err) {
-		mlx5_core_warn(mdev, "create rqt(INDIR) failed, %d\n", err);
+		mlx5_core_warn(mdev, "create rqts failed, %d\n", err);
 		goto err_close_drop_rq;
 	}
 
-	err = mlx5e_create_rqt(priv, MLX5E_SINGLE_RQ_RQT);
-	if (err) {
-		mlx5_core_warn(mdev, "create rqt(SINGLE) failed, %d\n", err);
-		goto err_destroy_rqt_indir;
-	}
-
 	err = mlx5e_create_tirs(priv);
 	if (err) {
 		mlx5_core_warn(mdev, "create tirs failed, %d\n", err);
-		goto err_destroy_rqt_single;
+		goto err_destroy_rqts;
 	}
 
-	err = mlx5e_create_flow_tables(priv);
+	err = mlx5e_create_flow_steering(priv);
 	if (err) {
-		mlx5_core_warn(mdev, "create flow tables failed, %d\n", err);
+		mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
 		goto err_destroy_tirs;
 	}
 
-	mlx5e_init_eth_addr(priv);
+	mlx5e_create_q_counter(priv);
+
+	mlx5e_init_l2_addr(priv);
 
 	mlx5e_vxlan_init(priv);
 
 	err = mlx5e_tc_init(priv);
 	if (err)
-		goto err_destroy_flow_tables;
+		goto err_dealloc_q_counters;
 
 #ifdef CONFIG_MLX5_CORE_EN_DCB
 	mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets);
@@ -2545,28 +3126,29 @@
 		goto err_tc_cleanup;
 	}
 
-	if (mlx5e_vxlan_allowed(mdev))
+	if (mlx5e_vxlan_allowed(mdev)) {
+		rtnl_lock();
 		vxlan_get_rx_port(netdev);
+		rtnl_unlock();
+	}
 
 	mlx5e_enable_async_events(priv);
-	schedule_work(&priv->set_rx_mode_work);
+	queue_work(priv->wq, &priv->set_rx_mode_work);
 
 	return priv;
 
 err_tc_cleanup:
 	mlx5e_tc_cleanup(priv);
 
-err_destroy_flow_tables:
-	mlx5e_destroy_flow_tables(priv);
+err_dealloc_q_counters:
+	mlx5e_destroy_q_counter(priv);
+	mlx5e_destroy_flow_steering(priv);
 
 err_destroy_tirs:
 	mlx5e_destroy_tirs(priv);
 
-err_destroy_rqt_single:
-	mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
-
-err_destroy_rqt_indir:
-	mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
+err_destroy_rqts:
+	mlx5e_destroy_rqts(priv);
 
 err_close_drop_rq:
 	mlx5e_close_drop_rq(priv);
@@ -2574,6 +3156,9 @@
 err_destroy_tises:
 	mlx5e_destroy_tises(priv);
 
+err_destroy_umr_mkey:
+	mlx5_core_destroy_mkey(mdev, &priv->umr_mkey);
+
 err_destroy_mkey:
 	mlx5_core_destroy_mkey(mdev, &priv->mkey);
 
@@ -2586,6 +3171,9 @@
 err_unmap_free_uar:
 	mlx5_unmap_free_uar(mdev, &priv->cq_uar);
 
+err_destroy_wq:
+	destroy_workqueue(priv->wq);
+
 err_free_netdev:
 	free_netdev(netdev);
 
@@ -2599,23 +3187,37 @@
 
 	set_bit(MLX5E_STATE_DESTROYING, &priv->state);
 
-	schedule_work(&priv->set_rx_mode_work);
+	queue_work(priv->wq, &priv->set_rx_mode_work);
 	mlx5e_disable_async_events(priv);
-	flush_scheduled_work();
-	unregister_netdev(netdev);
+	flush_workqueue(priv->wq);
+	if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
+		netif_device_detach(netdev);
+		mutex_lock(&priv->state_lock);
+		if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+			mlx5e_close_locked(netdev);
+		mutex_unlock(&priv->state_lock);
+	} else {
+		unregister_netdev(netdev);
+	}
+
 	mlx5e_tc_cleanup(priv);
 	mlx5e_vxlan_cleanup(priv);
-	mlx5e_destroy_flow_tables(priv);
+	mlx5e_destroy_q_counter(priv);
+	mlx5e_destroy_flow_steering(priv);
 	mlx5e_destroy_tirs(priv);
-	mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
-	mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
+	mlx5e_destroy_rqts(priv);
 	mlx5e_close_drop_rq(priv);
 	mlx5e_destroy_tises(priv);
+	mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey);
 	mlx5_core_destroy_mkey(priv->mdev, &priv->mkey);
 	mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
 	mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
 	mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
-	free_netdev(netdev);
+	cancel_delayed_work_sync(&priv->update_stats_work);
+	destroy_workqueue(priv->wq);
+
+	if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
+		free_netdev(netdev);
 }
 
 static void *mlx5e_get_netdev(void *vpriv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 58d4e2f..f345679 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -42,13 +42,149 @@
 	return tstamp->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
 }
 
-static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
-				     struct mlx5e_rx_wqe *wqe, u16 ix)
+static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc,
+				       void *data)
+{
+	u32 ci = cqcc & cq->wq.sz_m1;
+
+	memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64));
+}
+
+static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
+					 struct mlx5e_cq *cq, u32 cqcc)
+{
+	mlx5e_read_cqe_slot(cq, cqcc, &cq->title);
+	cq->decmprs_left        = be32_to_cpu(cq->title.byte_cnt);
+	cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter);
+	rq->stats.cqe_compress_blks++;
+}
+
+static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc)
+{
+	mlx5e_read_cqe_slot(cq, cqcc, cq->mini_arr);
+	cq->mini_arr_idx = 0;
+}
+
+static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n)
+{
+	u8 op_own = (cqcc >> cq->wq.log_sz) & 1;
+	u32 wq_sz = 1 << cq->wq.log_sz;
+	u32 ci = cqcc & cq->wq.sz_m1;
+	u32 ci_top = min_t(u32, wq_sz, ci + n);
+
+	for (; ci < ci_top; ci++, n--) {
+		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci);
+
+		cqe->op_own = op_own;
+	}
+
+	if (unlikely(ci == wq_sz)) {
+		op_own = !op_own;
+		for (ci = 0; ci < n; ci++) {
+			struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci);
+
+			cqe->op_own = op_own;
+		}
+	}
+}
+
+static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
+					struct mlx5e_cq *cq, u32 cqcc)
+{
+	u16 wqe_cnt_step;
+
+	cq->title.byte_cnt     = cq->mini_arr[cq->mini_arr_idx].byte_cnt;
+	cq->title.check_sum    = cq->mini_arr[cq->mini_arr_idx].checksum;
+	cq->title.op_own      &= 0xf0;
+	cq->title.op_own      |= 0x01 & (cqcc >> cq->wq.log_sz);
+	cq->title.wqe_counter  = cpu_to_be16(cq->decmprs_wqe_counter);
+
+	wqe_cnt_step =
+		rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
+		mpwrq_get_cqe_consumed_strides(&cq->title) : 1;
+	cq->decmprs_wqe_counter =
+		(cq->decmprs_wqe_counter + wqe_cnt_step) & rq->wq.sz_m1;
+}
+
+static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
+						struct mlx5e_cq *cq, u32 cqcc)
+{
+	mlx5e_decompress_cqe(rq, cq, cqcc);
+	cq->title.rss_hash_type   = 0;
+	cq->title.rss_hash_result = 0;
+}
+
+static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
+					     struct mlx5e_cq *cq,
+					     int update_owner_only,
+					     int budget_rem)
+{
+	u32 cqcc = cq->wq.cc + update_owner_only;
+	u32 cqe_count;
+	u32 i;
+
+	cqe_count = min_t(u32, cq->decmprs_left, budget_rem);
+
+	for (i = update_owner_only; i < cqe_count;
+	     i++, cq->mini_arr_idx++, cqcc++) {
+		if (cq->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)
+			mlx5e_read_mini_arr_slot(cq, cqcc);
+
+		mlx5e_decompress_cqe_no_hash(rq, cq, cqcc);
+		rq->handle_rx_cqe(rq, &cq->title);
+	}
+	mlx5e_cqes_update_owner(cq, cq->wq.cc, cqcc - cq->wq.cc);
+	cq->wq.cc = cqcc;
+	cq->decmprs_left -= cqe_count;
+	rq->stats.cqe_compress_pkts += cqe_count;
+
+	return cqe_count;
+}
+
+static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
+					      struct mlx5e_cq *cq,
+					      int budget_rem)
+{
+	mlx5e_read_title_slot(rq, cq, cq->wq.cc);
+	mlx5e_read_mini_arr_slot(cq, cq->wq.cc + 1);
+	mlx5e_decompress_cqe(rq, cq, cq->wq.cc);
+	rq->handle_rx_cqe(rq, &cq->title);
+	cq->mini_arr_idx++;
+
+	return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1;
+}
+
+void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val)
+{
+	bool was_opened;
+
+	if (!MLX5_CAP_GEN(priv->mdev, cqe_compression))
+		return;
+
+	mutex_lock(&priv->state_lock);
+
+	if (priv->params.rx_cqe_compress == val)
+		goto unlock;
+
+	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+	if (was_opened)
+		mlx5e_close_locked(priv->netdev);
+
+	priv->params.rx_cqe_compress = val;
+
+	if (was_opened)
+		mlx5e_open_locked(priv->netdev);
+
+unlock:
+	mutex_unlock(&priv->state_lock);
+}
+
+int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
 {
 	struct sk_buff *skb;
 	dma_addr_t dma_addr;
 
-	skb = netdev_alloc_skb(rq->netdev, rq->wqe_sz);
+	skb = napi_alloc_skb(rq->cq.napi, rq->wqe_sz);
 	if (unlikely(!skb))
 		return -ENOMEM;
 
@@ -62,10 +198,9 @@
 	if (unlikely(dma_mapping_error(rq->pdev, dma_addr)))
 		goto err_free_skb;
 
-	skb_reserve(skb, MLX5E_NET_IP_ALIGN);
-
 	*((dma_addr_t *)skb->cb) = dma_addr;
-	wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN);
+	wqe->data.addr = cpu_to_be64(dma_addr);
+	wqe->data.lkey = rq->mkey_be;
 
 	rq->skb[ix] = skb;
 
@@ -77,18 +212,389 @@
 	return -ENOMEM;
 }
 
+static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
+{
+	return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
+}
+
+static inline void
+mlx5e_dma_pre_sync_linear_mpwqe(struct device *pdev,
+				struct mlx5e_mpw_info *wi,
+				u32 wqe_offset, u32 len)
+{
+	dma_sync_single_for_cpu(pdev, wi->dma_info.addr + wqe_offset,
+				len, DMA_FROM_DEVICE);
+}
+
+static inline void
+mlx5e_dma_pre_sync_fragmented_mpwqe(struct device *pdev,
+				    struct mlx5e_mpw_info *wi,
+				    u32 wqe_offset, u32 len)
+{
+	/* No dma pre sync for fragmented MPWQE */
+}
+
+static inline void
+mlx5e_add_skb_frag_linear_mpwqe(struct mlx5e_rq *rq,
+				struct sk_buff *skb,
+				struct mlx5e_mpw_info *wi,
+				u32 page_idx, u32 frag_offset,
+				u32 len)
+{
+	unsigned int truesize =	ALIGN(len, rq->mpwqe_stride_sz);
+
+	wi->skbs_frags[page_idx]++;
+	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+			&wi->dma_info.page[page_idx], frag_offset,
+			len, truesize);
+}
+
+static inline void
+mlx5e_add_skb_frag_fragmented_mpwqe(struct mlx5e_rq *rq,
+				    struct sk_buff *skb,
+				    struct mlx5e_mpw_info *wi,
+				    u32 page_idx, u32 frag_offset,
+				    u32 len)
+{
+	unsigned int truesize =	ALIGN(len, rq->mpwqe_stride_sz);
+
+	dma_sync_single_for_cpu(rq->pdev,
+				wi->umr.dma_info[page_idx].addr + frag_offset,
+				len, DMA_FROM_DEVICE);
+	wi->skbs_frags[page_idx]++;
+	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+			wi->umr.dma_info[page_idx].page, frag_offset,
+			len, truesize);
+}
+
+static inline void
+mlx5e_copy_skb_header_linear_mpwqe(struct device *pdev,
+				   struct sk_buff *skb,
+				   struct mlx5e_mpw_info *wi,
+				   u32 page_idx, u32 offset,
+				   u32 headlen)
+{
+	struct page *page = &wi->dma_info.page[page_idx];
+
+	skb_copy_to_linear_data(skb, page_address(page) + offset,
+				ALIGN(headlen, sizeof(long)));
+}
+
+static inline void
+mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev,
+				       struct sk_buff *skb,
+				       struct mlx5e_mpw_info *wi,
+				       u32 page_idx, u32 offset,
+				       u32 headlen)
+{
+	u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset);
+	struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[page_idx];
+	unsigned int len;
+
+	 /* Aligning len to sizeof(long) optimizes memcpy performance */
+	len = ALIGN(headlen_pg, sizeof(long));
+	dma_sync_single_for_cpu(pdev, dma_info->addr + offset, len,
+				DMA_FROM_DEVICE);
+	skb_copy_to_linear_data_offset(skb, 0,
+				       page_address(dma_info->page) + offset,
+				       len);
+	if (unlikely(offset + headlen > PAGE_SIZE)) {
+		dma_info++;
+		headlen_pg = len;
+		len = ALIGN(headlen - headlen_pg, sizeof(long));
+		dma_sync_single_for_cpu(pdev, dma_info->addr, len,
+					DMA_FROM_DEVICE);
+		skb_copy_to_linear_data_offset(skb, headlen_pg,
+					       page_address(dma_info->page),
+					       len);
+	}
+}
+
+static u16 mlx5e_get_wqe_mtt_offset(u16 rq_ix, u16 wqe_ix)
+{
+	return rq_ix * MLX5_CHANNEL_MAX_NUM_MTTS +
+		wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
+}
+
+static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
+				struct mlx5e_sq *sq,
+				struct mlx5e_umr_wqe *wqe,
+				u16 ix)
+{
+	struct mlx5_wqe_ctrl_seg      *cseg = &wqe->ctrl;
+	struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
+	struct mlx5_wqe_data_seg      *dseg = &wqe->data;
+	struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+	u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
+	u16 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix);
+
+	memset(wqe, 0, sizeof(*wqe));
+	cseg->opmod_idx_opcode =
+		cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
+			    MLX5_OPCODE_UMR);
+	cseg->qpn_ds    = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
+				      ds_cnt);
+	cseg->fm_ce_se  = MLX5_WQE_CTRL_CQ_UPDATE;
+	cseg->imm       = rq->umr_mkey_be;
+
+	ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
+	ucseg->klm_octowords =
+		cpu_to_be16(mlx5e_get_mtt_octw(MLX5_MPWRQ_PAGES_PER_WQE));
+	ucseg->bsf_octowords =
+		cpu_to_be16(mlx5e_get_mtt_octw(umr_wqe_mtt_offset));
+	ucseg->mkey_mask     = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+
+	dseg->lkey = sq->mkey_be;
+	dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
+}
+
+static void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
+{
+	struct mlx5e_sq *sq = &rq->channel->icosq;
+	struct mlx5_wq_cyc *wq = &sq->wq;
+	struct mlx5e_umr_wqe *wqe;
+	u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB);
+	u16 pi;
+
+	/* fill sq edge with nops to avoid wqe wrap around */
+	while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
+		sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
+		sq->ico_wqe_info[pi].num_wqebbs = 1;
+		mlx5e_send_nop(sq, true);
+	}
+
+	wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+	mlx5e_build_umr_wqe(rq, sq, wqe, ix);
+	sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_UMR;
+	sq->ico_wqe_info[pi].num_wqebbs = num_wqebbs;
+	sq->pc += num_wqebbs;
+	mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
+}
+
+static inline int mlx5e_get_wqe_mtt_sz(void)
+{
+	/* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
+	 * To avoid copying garbage after the mtt array, we allocate
+	 * a little more.
+	 */
+	return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
+		     MLX5_UMR_MTT_ALIGNMENT);
+}
+
+static int mlx5e_alloc_and_map_page(struct mlx5e_rq *rq,
+				    struct mlx5e_mpw_info *wi,
+				    int i)
+{
+	struct page *page;
+
+	page = dev_alloc_page();
+	if (unlikely(!page))
+		return -ENOMEM;
+
+	wi->umr.dma_info[i].page = page;
+	wi->umr.dma_info[i].addr = dma_map_page(rq->pdev, page, 0, PAGE_SIZE,
+						PCI_DMA_FROMDEVICE);
+	if (unlikely(dma_mapping_error(rq->pdev, wi->umr.dma_info[i].addr))) {
+		put_page(page);
+		return -ENOMEM;
+	}
+	wi->umr.mtt[i] = cpu_to_be64(wi->umr.dma_info[i].addr | MLX5_EN_WR);
+
+	return 0;
+}
+
+static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
+					   struct mlx5e_rx_wqe *wqe,
+					   u16 ix)
+{
+	struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+	int mtt_sz = mlx5e_get_wqe_mtt_sz();
+	u32 dma_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix) << PAGE_SHIFT;
+	int i;
+
+	wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) *
+				   MLX5_MPWRQ_PAGES_PER_WQE,
+				   GFP_ATOMIC);
+	if (unlikely(!wi->umr.dma_info))
+		goto err_out;
+
+	/* We allocate more than mtt_sz as we will align the pointer */
+	wi->umr.mtt_no_align = kzalloc(mtt_sz + MLX5_UMR_ALIGN - 1,
+				       GFP_ATOMIC);
+	if (unlikely(!wi->umr.mtt_no_align))
+		goto err_free_umr;
+
+	wi->umr.mtt = PTR_ALIGN(wi->umr.mtt_no_align, MLX5_UMR_ALIGN);
+	wi->umr.mtt_addr = dma_map_single(rq->pdev, wi->umr.mtt, mtt_sz,
+					  PCI_DMA_TODEVICE);
+	if (unlikely(dma_mapping_error(rq->pdev, wi->umr.mtt_addr)))
+		goto err_free_mtt;
+
+	for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+		if (unlikely(mlx5e_alloc_and_map_page(rq, wi, i)))
+			goto err_unmap;
+		atomic_add(mlx5e_mpwqe_strides_per_page(rq),
+			   &wi->umr.dma_info[i].page->_count);
+		wi->skbs_frags[i] = 0;
+	}
+
+	wi->consumed_strides = 0;
+	wi->dma_pre_sync = mlx5e_dma_pre_sync_fragmented_mpwqe;
+	wi->add_skb_frag = mlx5e_add_skb_frag_fragmented_mpwqe;
+	wi->copy_skb_header = mlx5e_copy_skb_header_fragmented_mpwqe;
+	wi->free_wqe     = mlx5e_free_rx_fragmented_mpwqe;
+	wqe->data.lkey = rq->umr_mkey_be;
+	wqe->data.addr = cpu_to_be64(dma_offset);
+
+	return 0;
+
+err_unmap:
+	while (--i >= 0) {
+		dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE,
+			       PCI_DMA_FROMDEVICE);
+		atomic_sub(mlx5e_mpwqe_strides_per_page(rq),
+			   &wi->umr.dma_info[i].page->_count);
+		put_page(wi->umr.dma_info[i].page);
+	}
+	dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE);
+
+err_free_mtt:
+	kfree(wi->umr.mtt_no_align);
+
+err_free_umr:
+	kfree(wi->umr.dma_info);
+
+err_out:
+	return -ENOMEM;
+}
+
+void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
+				    struct mlx5e_mpw_info *wi)
+{
+	int mtt_sz = mlx5e_get_wqe_mtt_sz();
+	int i;
+
+	for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+		dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE,
+			       PCI_DMA_FROMDEVICE);
+		atomic_sub(mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i],
+			   &wi->umr.dma_info[i].page->_count);
+		put_page(wi->umr.dma_info[i].page);
+	}
+	dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE);
+	kfree(wi->umr.mtt_no_align);
+	kfree(wi->umr.dma_info);
+}
+
+void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq)
+{
+	struct mlx5_wq_ll *wq = &rq->wq;
+	struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
+
+	clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
+	mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
+	rq->stats.mpwqe_frag++;
+
+	/* ensure wqes are visible to device before updating doorbell record */
+	dma_wmb();
+
+	mlx5_wq_ll_update_db_record(wq);
+}
+
+static int mlx5e_alloc_rx_linear_mpwqe(struct mlx5e_rq *rq,
+				       struct mlx5e_rx_wqe *wqe,
+				       u16 ix)
+{
+	struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+	gfp_t gfp_mask;
+	int i;
+
+	gfp_mask = GFP_ATOMIC | __GFP_COLD | __GFP_MEMALLOC;
+	wi->dma_info.page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
+					     MLX5_MPWRQ_WQE_PAGE_ORDER);
+	if (unlikely(!wi->dma_info.page))
+		return -ENOMEM;
+
+	wi->dma_info.addr = dma_map_page(rq->pdev, wi->dma_info.page, 0,
+					 rq->wqe_sz, PCI_DMA_FROMDEVICE);
+	if (unlikely(dma_mapping_error(rq->pdev, wi->dma_info.addr))) {
+		put_page(wi->dma_info.page);
+		return -ENOMEM;
+	}
+
+	/* We split the high-order page into order-0 ones and manage their
+	 * reference counter to minimize the memory held by small skb fragments
+	 */
+	split_page(wi->dma_info.page, MLX5_MPWRQ_WQE_PAGE_ORDER);
+	for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+		atomic_add(mlx5e_mpwqe_strides_per_page(rq),
+			   &wi->dma_info.page[i]._count);
+		wi->skbs_frags[i] = 0;
+	}
+
+	wi->consumed_strides = 0;
+	wi->dma_pre_sync = mlx5e_dma_pre_sync_linear_mpwqe;
+	wi->add_skb_frag = mlx5e_add_skb_frag_linear_mpwqe;
+	wi->copy_skb_header = mlx5e_copy_skb_header_linear_mpwqe;
+	wi->free_wqe     = mlx5e_free_rx_linear_mpwqe;
+	wqe->data.lkey = rq->mkey_be;
+	wqe->data.addr = cpu_to_be64(wi->dma_info.addr);
+
+	return 0;
+}
+
+void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq,
+				struct mlx5e_mpw_info *wi)
+{
+	int i;
+
+	dma_unmap_page(rq->pdev, wi->dma_info.addr, rq->wqe_sz,
+		       PCI_DMA_FROMDEVICE);
+	for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+		atomic_sub(mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i],
+			   &wi->dma_info.page[i]._count);
+		put_page(&wi->dma_info.page[i]);
+	}
+}
+
+int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
+{
+	int err;
+
+	err = mlx5e_alloc_rx_linear_mpwqe(rq, wqe, ix);
+	if (unlikely(err)) {
+		err = mlx5e_alloc_rx_fragmented_mpwqe(rq, wqe, ix);
+		if (unlikely(err))
+			return err;
+		set_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
+		mlx5e_post_umr_wqe(rq, ix);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+#define RQ_CANNOT_POST(rq) \
+		(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \
+		 test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
+
 bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
 {
 	struct mlx5_wq_ll *wq = &rq->wq;
 
-	if (unlikely(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state)))
+	if (unlikely(RQ_CANNOT_POST(rq)))
 		return false;
 
 	while (!mlx5_wq_ll_is_full(wq)) {
 		struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
+		int err;
 
-		if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head)))
+		err = rq->alloc_wqe(rq, wqe, wq->head);
+		if (unlikely(err)) {
+			if (err != -EBUSY)
+				rq->stats.buff_alloc_err++;
 			break;
+		}
 
 		mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
 	}
@@ -101,7 +607,8 @@
 	return !mlx5_wq_ll_is_full(wq);
 }
 
-static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
+static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
+				 u32 cqe_bcnt)
 {
 	struct ethhdr	*eth	= (struct ethhdr *)(skb->data);
 	struct iphdr	*ipv4	= (struct iphdr *)(skb->data + ETH_HLEN);
@@ -112,7 +619,7 @@
 	int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA  == l4_hdr_type) ||
 		       (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
 
-	u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETH_HLEN;
+	u16 tot_len = cqe_bcnt - ETH_HLEN;
 
 	if (eth->h_proto == htons(ETH_P_IP)) {
 		tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
@@ -176,35 +683,43 @@
 
 	if (lro) {
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
-	} else if (likely(is_first_ethertype_ip(skb))) {
+		return;
+	}
+
+	if (is_first_ethertype_ip(skb)) {
 		skb->ip_summed = CHECKSUM_COMPLETE;
 		skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
 		rq->stats.csum_sw++;
-	} else {
-		goto csum_none;
+		return;
 	}
 
-	return;
-
+	if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
+		   (cqe->hds_ip_ext & CQE_L4_OK))) {
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+		if (cqe_is_tunneled(cqe)) {
+			skb->csum_level = 1;
+			skb->encapsulation = 1;
+			rq->stats.csum_inner++;
+		}
+		return;
+	}
 csum_none:
 	skb->ip_summed = CHECKSUM_NONE;
 	rq->stats.csum_none++;
 }
 
 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+				      u32 cqe_bcnt,
 				      struct mlx5e_rq *rq,
 				      struct sk_buff *skb)
 {
 	struct net_device *netdev = rq->netdev;
-	u32 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
 	struct mlx5e_tstamp *tstamp = rq->tstamp;
 	int lro_num_seg;
 
-	skb_put(skb, cqe_bcnt);
-
 	lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
 	if (lro_num_seg > 1) {
-		mlx5e_lro_update_hdr(skb, cqe);
+		mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
 		skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
 		rq->stats.lro_packets++;
 		rq->stats.lro_bytes += cqe_bcnt;
@@ -213,10 +728,6 @@
 	if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
 		mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb));
 
-	mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
-
-	skb->protocol = eth_type_trans(skb, netdev);
-
 	skb_record_rx_queue(skb, rq->ix);
 
 	if (likely(netdev->features & NETIF_F_RXHASH))
@@ -227,52 +738,165 @@
 				       be16_to_cpu(cqe->vlan_info));
 
 	skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
+
+	mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
+	skb->protocol = eth_type_trans(skb, netdev);
+}
+
+static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
+					 struct mlx5_cqe64 *cqe,
+					 u32 cqe_bcnt,
+					 struct sk_buff *skb)
+{
+	rq->stats.packets++;
+	rq->stats.bytes += cqe_bcnt;
+	mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
+	napi_gro_receive(rq->cq.napi, skb);
+}
+
+void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+	struct mlx5e_rx_wqe *wqe;
+	struct sk_buff *skb;
+	__be16 wqe_counter_be;
+	u16 wqe_counter;
+	u32 cqe_bcnt;
+
+	wqe_counter_be = cqe->wqe_counter;
+	wqe_counter    = be16_to_cpu(wqe_counter_be);
+	wqe            = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+	skb            = rq->skb[wqe_counter];
+	prefetch(skb->data);
+	rq->skb[wqe_counter] = NULL;
+
+	dma_unmap_single(rq->pdev,
+			 *((dma_addr_t *)skb->cb),
+			 rq->wqe_sz,
+			 DMA_FROM_DEVICE);
+
+	if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+		rq->stats.wqe_err++;
+		dev_kfree_skb(skb);
+		goto wq_ll_pop;
+	}
+
+	cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+	skb_put(skb, cqe_bcnt);
+
+	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+
+wq_ll_pop:
+	mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
+		       &wqe->next.next_wqe_index);
+}
+
+static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq,
+					   struct mlx5_cqe64 *cqe,
+					   struct mlx5e_mpw_info *wi,
+					   u32 cqe_bcnt,
+					   struct sk_buff *skb)
+{
+	u32 consumed_bytes = ALIGN(cqe_bcnt, rq->mpwqe_stride_sz);
+	u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
+	u32 wqe_offset     = stride_ix * rq->mpwqe_stride_sz;
+	u32 head_offset    = wqe_offset & (PAGE_SIZE - 1);
+	u32 page_idx       = wqe_offset >> PAGE_SHIFT;
+	u32 head_page_idx  = page_idx;
+	u16 headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt);
+	u32 frag_offset    = head_offset + headlen;
+	u16 byte_cnt       = cqe_bcnt - headlen;
+
+	if (unlikely(frag_offset >= PAGE_SIZE)) {
+		page_idx++;
+		frag_offset -= PAGE_SIZE;
+	}
+	wi->dma_pre_sync(rq->pdev, wi, wqe_offset, consumed_bytes);
+
+	while (byte_cnt) {
+		u32 pg_consumed_bytes =
+			min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
+
+		wi->add_skb_frag(rq, skb, wi, page_idx, frag_offset,
+				 pg_consumed_bytes);
+		byte_cnt -= pg_consumed_bytes;
+		frag_offset = 0;
+		page_idx++;
+	}
+	/* copy header */
+	wi->copy_skb_header(rq->pdev, skb, wi, head_page_idx, head_offset,
+			    headlen);
+	/* skb linear part was allocated with headlen and aligned to long */
+	skb->tail += headlen;
+	skb->len  += headlen;
+}
+
+void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+	u16 cstrides       = mpwrq_get_cqe_consumed_strides(cqe);
+	u16 wqe_id         = be16_to_cpu(cqe->wqe_id);
+	struct mlx5e_mpw_info *wi = &rq->wqe_info[wqe_id];
+	struct mlx5e_rx_wqe  *wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id);
+	struct sk_buff *skb;
+	u16 cqe_bcnt;
+
+	wi->consumed_strides += cstrides;
+
+	if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+		rq->stats.wqe_err++;
+		goto mpwrq_cqe_out;
+	}
+
+	if (unlikely(mpwrq_is_filler_cqe(cqe))) {
+		rq->stats.mpwqe_filler++;
+		goto mpwrq_cqe_out;
+	}
+
+	skb = napi_alloc_skb(rq->cq.napi,
+			     ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD,
+				   sizeof(long)));
+	if (unlikely(!skb)) {
+		rq->stats.buff_alloc_err++;
+		goto mpwrq_cqe_out;
+	}
+
+	prefetch(skb->data);
+	cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
+
+	mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb);
+	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+
+mpwrq_cqe_out:
+	if (likely(wi->consumed_strides < rq->mpwqe_num_strides))
+		return;
+
+	wi->free_wqe(rq, wi);
+	mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index);
 }
 
 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
 {
 	struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
-	int work_done;
+	int work_done = 0;
 
-	for (work_done = 0; work_done < budget; work_done++) {
-		struct mlx5e_rx_wqe *wqe;
-		struct mlx5_cqe64 *cqe;
-		struct sk_buff *skb;
-		__be16 wqe_counter_be;
-		u16 wqe_counter;
+	if (cq->decmprs_left)
+		work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
 
-		cqe = mlx5e_get_cqe(cq);
+	for (; work_done < budget; work_done++) {
+		struct mlx5_cqe64 *cqe = mlx5e_get_cqe(cq);
+
 		if (!cqe)
 			break;
 
-		mlx5_cqwq_pop(&cq->wq);
-
-		wqe_counter_be = cqe->wqe_counter;
-		wqe_counter    = be16_to_cpu(wqe_counter_be);
-		wqe            = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
-		skb            = rq->skb[wqe_counter];
-		prefetch(skb->data);
-		rq->skb[wqe_counter] = NULL;
-
-		dma_unmap_single(rq->pdev,
-				 *((dma_addr_t *)skb->cb),
-				 rq->wqe_sz,
-				 DMA_FROM_DEVICE);
-
-		if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
-			rq->stats.wqe_err++;
-			dev_kfree_skb(skb);
-			goto wq_ll_pop;
+		if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
+			work_done +=
+				mlx5e_decompress_cqes_start(rq, cq,
+							    budget - work_done);
+			continue;
 		}
 
-		mlx5e_build_rx_skb(cqe, rq, skb);
-		rq->stats.packets++;
-		rq->stats.bytes += be32_to_cpu(cqe->byte_cnt);
-		napi_gro_receive(cq->napi, skb);
+		mlx5_cqwq_pop(&cq->wq);
 
-wq_ll_pop:
-		mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
-			       &wqe->next.next_wqe_index);
+		rq->handle_rx_cqe(rq, cqe);
 	}
 
 	mlx5_cqwq_update_db_record(&cq->wq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
new file mode 100644
index 0000000..83bc32b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -0,0 +1,367 @@
+/*
+ * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __MLX5_EN_STATS_H__
+#define __MLX5_EN_STATS_H__
+
+#define MLX5E_READ_CTR64_CPU(ptr, dsc, i) \
+	(*(u64 *)((char *)ptr + dsc[i].offset))
+#define MLX5E_READ_CTR64_BE(ptr, dsc, i) \
+	be64_to_cpu(*(__be64 *)((char *)ptr + dsc[i].offset))
+#define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
+	(*(u32 *)((char *)ptr + dsc[i].offset))
+#define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
+	be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
+
+#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
+
+struct counter_desc {
+	char		name[ETH_GSTRING_LEN];
+	int		offset; /* Byte offset */
+};
+
+struct mlx5e_sw_stats {
+	u64 rx_packets;
+	u64 rx_bytes;
+	u64 tx_packets;
+	u64 tx_bytes;
+	u64 tso_packets;
+	u64 tso_bytes;
+	u64 tso_inner_packets;
+	u64 tso_inner_bytes;
+	u64 lro_packets;
+	u64 lro_bytes;
+	u64 rx_csum_good;
+	u64 rx_csum_none;
+	u64 rx_csum_sw;
+	u64 rx_csum_inner;
+	u64 tx_csum_offload;
+	u64 tx_csum_inner;
+	u64 tx_queue_stopped;
+	u64 tx_queue_wake;
+	u64 tx_queue_dropped;
+	u64 rx_wqe_err;
+	u64 rx_mpwqe_filler;
+	u64 rx_mpwqe_frag;
+	u64 rx_buff_alloc_err;
+	u64 rx_cqe_compress_blks;
+	u64 rx_cqe_compress_pkts;
+
+	/* Special handling counters */
+	u64 link_down_events;
+};
+
+static const struct counter_desc sw_stats_desc[] = {
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_packets) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_bytes) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_inner_packets) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_inner_bytes) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, lro_packets) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, lro_bytes) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_good) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_sw) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_inner) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_offload) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_inner) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events) },
+};
+
+struct mlx5e_qcounter_stats {
+	u32 rx_out_of_buffer;
+};
+
+static const struct counter_desc q_stats_desc[] = {
+	{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
+};
+
+#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
+#define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \
+						vstats->query_vport_out, c)
+
+struct mlx5e_vport_stats {
+	__be64 query_vport_out[MLX5_ST_SZ_QW(query_vport_counter_out)];
+};
+
+static const struct counter_desc vport_stats_desc[] = {
+	{ "rx_vport_error_packets",
+		VPORT_COUNTER_OFF(received_errors.packets) },
+	{ "rx_vport_error_bytes", VPORT_COUNTER_OFF(received_errors.octets) },
+	{ "tx_vport_error_packets",
+		VPORT_COUNTER_OFF(transmit_errors.packets) },
+	{ "tx_vport_error_bytes", VPORT_COUNTER_OFF(transmit_errors.octets) },
+	{ "rx_vport_unicast_packets",
+		VPORT_COUNTER_OFF(received_eth_unicast.packets) },
+	{ "rx_vport_unicast_bytes",
+		VPORT_COUNTER_OFF(received_eth_unicast.octets) },
+	{ "tx_vport_unicast_packets",
+		VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
+	{ "tx_vport_unicast_bytes",
+		VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
+	{ "rx_vport_multicast_packets",
+		VPORT_COUNTER_OFF(received_eth_multicast.packets) },
+	{ "rx_vport_multicast_bytes",
+		VPORT_COUNTER_OFF(received_eth_multicast.octets) },
+	{ "tx_vport_multicast_packets",
+		VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
+	{ "tx_vport_multicast_bytes",
+		VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
+	{ "rx_vport_broadcast_packets",
+		VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
+	{ "rx_vport_broadcast_bytes",
+		VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
+	{ "tx_vport_broadcast_packets",
+		VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
+	{ "tx_vport_broadcast_bytes",
+		VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
+};
+
+#define PPORT_802_3_OFF(c) \
+	MLX5_BYTE_OFF(ppcnt_reg, \
+		      counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
+#define PPORT_802_3_GET(pstats, c) \
+	MLX5_GET64(ppcnt_reg, pstats->IEEE_802_3_counters, \
+		   counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
+#define PPORT_2863_OFF(c) \
+	MLX5_BYTE_OFF(ppcnt_reg, \
+		      counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
+#define PPORT_2863_GET(pstats, c) \
+	MLX5_GET64(ppcnt_reg, pstats->RFC_2863_counters, \
+		   counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
+#define PPORT_2819_OFF(c) \
+	MLX5_BYTE_OFF(ppcnt_reg, \
+		      counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
+#define PPORT_2819_GET(pstats, c) \
+	MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \
+		   counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
+#define PPORT_PER_PRIO_OFF(c) \
+	MLX5_BYTE_OFF(ppcnt_reg, \
+		      counter_set.eth_per_prio_grp_data_layout.c##_high)
+#define PPORT_PER_PRIO_GET(pstats, prio, c) \
+	MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
+		   counter_set.eth_per_prio_grp_data_layout.c##_high)
+#define NUM_PPORT_PRIO				8
+
+struct mlx5e_pport_stats {
+	__be64 IEEE_802_3_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+	__be64 RFC_2863_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+	__be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+	__be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
+	__be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+};
+
+static const struct counter_desc pport_802_3_stats_desc[] = {
+	{ "frames_tx", PPORT_802_3_OFF(a_frames_transmitted_ok) },
+	{ "frames_rx", PPORT_802_3_OFF(a_frames_received_ok) },
+	{ "check_seq_err", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
+	{ "alignment_err", PPORT_802_3_OFF(a_alignment_errors) },
+	{ "octets_tx", PPORT_802_3_OFF(a_octets_transmitted_ok) },
+	{ "octets_received", PPORT_802_3_OFF(a_octets_received_ok) },
+	{ "multicast_xmitted", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
+	{ "broadcast_xmitted", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
+	{ "multicast_rx", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
+	{ "broadcast_rx", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
+	{ "in_range_len_errors", PPORT_802_3_OFF(a_in_range_length_errors) },
+	{ "out_of_range_len", PPORT_802_3_OFF(a_out_of_range_length_field) },
+	{ "too_long_errors", PPORT_802_3_OFF(a_frame_too_long_errors) },
+	{ "symbol_err", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
+	{ "mac_control_tx", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
+	{ "mac_control_rx", PPORT_802_3_OFF(a_mac_control_frames_received) },
+	{ "unsupported_op_rx",
+		PPORT_802_3_OFF(a_unsupported_opcodes_received) },
+	{ "pause_ctrl_rx", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
+	{ "pause_ctrl_tx",
+		PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
+};
+
+static const struct counter_desc pport_2863_stats_desc[] = {
+	{ "in_octets", PPORT_2863_OFF(if_in_octets) },
+	{ "in_ucast_pkts", PPORT_2863_OFF(if_in_ucast_pkts) },
+	{ "in_discards", PPORT_2863_OFF(if_in_discards) },
+	{ "in_errors", PPORT_2863_OFF(if_in_errors) },
+	{ "in_unknown_protos", PPORT_2863_OFF(if_in_unknown_protos) },
+	{ "out_octets", PPORT_2863_OFF(if_out_octets) },
+	{ "out_ucast_pkts", PPORT_2863_OFF(if_out_ucast_pkts) },
+	{ "out_discards", PPORT_2863_OFF(if_out_discards) },
+	{ "out_errors", PPORT_2863_OFF(if_out_errors) },
+	{ "in_multicast_pkts", PPORT_2863_OFF(if_in_multicast_pkts) },
+	{ "in_broadcast_pkts", PPORT_2863_OFF(if_in_broadcast_pkts) },
+	{ "out_multicast_pkts", PPORT_2863_OFF(if_out_multicast_pkts) },
+	{ "out_broadcast_pkts", PPORT_2863_OFF(if_out_broadcast_pkts) },
+};
+
+static const struct counter_desc pport_2819_stats_desc[] = {
+	{ "drop_events", PPORT_2819_OFF(ether_stats_drop_events) },
+	{ "octets", PPORT_2819_OFF(ether_stats_octets) },
+	{ "pkts", PPORT_2819_OFF(ether_stats_pkts) },
+	{ "broadcast_pkts", PPORT_2819_OFF(ether_stats_broadcast_pkts) },
+	{ "multicast_pkts", PPORT_2819_OFF(ether_stats_multicast_pkts) },
+	{ "crc_align_errors", PPORT_2819_OFF(ether_stats_crc_align_errors) },
+	{ "undersize_pkts", PPORT_2819_OFF(ether_stats_undersize_pkts) },
+	{ "oversize_pkts", PPORT_2819_OFF(ether_stats_oversize_pkts) },
+	{ "fragments", PPORT_2819_OFF(ether_stats_fragments) },
+	{ "jabbers", PPORT_2819_OFF(ether_stats_jabbers) },
+	{ "collisions", PPORT_2819_OFF(ether_stats_collisions) },
+	{ "p64octets", PPORT_2819_OFF(ether_stats_pkts64octets) },
+	{ "p65to127octets", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
+	{ "p128to255octets", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
+	{ "p256to511octets", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
+	{ "p512to1023octets", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
+	{ "p1024to1518octets",
+		PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
+	{ "p1519to2047octets",
+		PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
+	{ "p2048to4095octets",
+		PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
+	{ "p4096to8191octets",
+		PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
+	{ "p8192to10239octets",
+		PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
+};
+
+static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
+	{ "rx_octets", PPORT_PER_PRIO_OFF(rx_octets) },
+	{ "rx_frames", PPORT_PER_PRIO_OFF(rx_frames) },
+	{ "tx_octets", PPORT_PER_PRIO_OFF(tx_octets) },
+	{ "tx_frames", PPORT_PER_PRIO_OFF(tx_frames) },
+};
+
+static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
+	{ "rx_pause", PPORT_PER_PRIO_OFF(rx_pause) },
+	{ "rx_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
+	{ "tx_pause", PPORT_PER_PRIO_OFF(tx_pause) },
+	{ "tx_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
+	{ "rx_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
+};
+
+struct mlx5e_rq_stats {
+	u64 packets;
+	u64 bytes;
+	u64 csum_sw;
+	u64 csum_inner;
+	u64 csum_none;
+	u64 lro_packets;
+	u64 lro_bytes;
+	u64 wqe_err;
+	u64 mpwqe_filler;
+	u64 mpwqe_frag;
+	u64 buff_alloc_err;
+	u64 cqe_compress_blks;
+	u64 cqe_compress_pkts;
+};
+
+static const struct counter_desc rq_stats_desc[] = {
+	{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, packets) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, bytes) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_sw) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_inner) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_none) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_packets) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_bytes) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, wqe_err) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_frag) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
+};
+
+struct mlx5e_sq_stats {
+	/* commonly accessed in data path */
+	u64 packets;
+	u64 bytes;
+	u64 tso_packets;
+	u64 tso_bytes;
+	u64 tso_inner_packets;
+	u64 tso_inner_bytes;
+	u64 csum_offload_inner;
+	u64 nop;
+	/* less likely accessed in data path */
+	u64 csum_offload_none;
+	u64 stopped;
+	u64 wake;
+	u64 dropped;
+};
+
+static const struct counter_desc sq_stats_desc[] = {
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, packets) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, bytes) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_packets) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_bytes) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, csum_offload_inner) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, nop) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, csum_offload_none) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, stopped) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, wake) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, dropped) },
+};
+
+#define NUM_SW_COUNTERS			ARRAY_SIZE(sw_stats_desc)
+#define NUM_Q_COUNTERS			ARRAY_SIZE(q_stats_desc)
+#define NUM_VPORT_COUNTERS		ARRAY_SIZE(vport_stats_desc)
+#define NUM_PPORT_802_3_COUNTERS	ARRAY_SIZE(pport_802_3_stats_desc)
+#define NUM_PPORT_2863_COUNTERS		ARRAY_SIZE(pport_2863_stats_desc)
+#define NUM_PPORT_2819_COUNTERS		ARRAY_SIZE(pport_2819_stats_desc)
+#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
+	ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
+#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
+	ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
+#define NUM_PPORT_COUNTERS		(NUM_PPORT_802_3_COUNTERS + \
+					 NUM_PPORT_2863_COUNTERS  + \
+					 NUM_PPORT_2819_COUNTERS  + \
+					 NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
+					 NUM_PPORT_PRIO)
+#define NUM_RQ_STATS			ARRAY_SIZE(rq_stats_desc)
+#define NUM_SQ_STATS			ARRAY_SIZE(sq_stats_desc)
+
+struct mlx5e_stats {
+	struct mlx5e_sw_stats sw;
+	struct mlx5e_qcounter_stats qcnt;
+	struct mlx5e_vport_stats vport;
+	struct mlx5e_pport_stats pport;
+};
+
+#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index b3de09f..704c3d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -46,43 +46,65 @@
 	struct mlx5_flow_rule	*rule;
 };
 
-#define MLX5E_TC_FLOW_TABLE_NUM_ENTRIES 1024
-#define MLX5E_TC_FLOW_TABLE_NUM_GROUPS 4
+#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
+#define MLX5E_TC_TABLE_NUM_GROUPS 4
 
 static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
 						u32 *match_c, u32 *match_v,
 						u32 action, u32 flow_tag)
 {
-	struct mlx5_flow_destination dest = {
-		.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
-		{.ft = priv->fts.vlan.t},
-	};
+	struct mlx5_core_dev *dev = priv->mdev;
+	struct mlx5_flow_destination dest = { 0 };
+	struct mlx5_fc *counter = NULL;
 	struct mlx5_flow_rule *rule;
 	bool table_created = false;
 
-	if (IS_ERR_OR_NULL(priv->fts.tc.t)) {
-		priv->fts.tc.t =
-			mlx5_create_auto_grouped_flow_table(priv->fts.ns, 0,
-							    MLX5E_TC_FLOW_TABLE_NUM_ENTRIES,
-							    MLX5E_TC_FLOW_TABLE_NUM_GROUPS);
-		if (IS_ERR(priv->fts.tc.t)) {
+	if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+		dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+		dest.ft = priv->fs.vlan.ft.t;
+	} else {
+		counter = mlx5_fc_create(dev, true);
+		if (IS_ERR(counter))
+			return ERR_CAST(counter);
+
+		dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+		dest.counter = counter;
+	}
+
+	if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
+		priv->fs.tc.t =
+			mlx5_create_auto_grouped_flow_table(priv->fs.ns,
+							    MLX5E_TC_PRIO,
+							    MLX5E_TC_TABLE_NUM_ENTRIES,
+							    MLX5E_TC_TABLE_NUM_GROUPS,
+							    0);
+		if (IS_ERR(priv->fs.tc.t)) {
 			netdev_err(priv->netdev,
 				   "Failed to create tc offload table\n");
-			return ERR_CAST(priv->fts.tc.t);
+			rule = ERR_CAST(priv->fs.tc.t);
+			goto err_create_ft;
 		}
 
 		table_created = true;
 	}
 
-	rule = mlx5_add_flow_rule(priv->fts.tc.t, MLX5_MATCH_OUTER_HEADERS,
+	rule = mlx5_add_flow_rule(priv->fs.tc.t, MLX5_MATCH_OUTER_HEADERS,
 				  match_c, match_v,
 				  action, flow_tag,
-				  action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST ? &dest : NULL);
+				  &dest);
 
-	if (IS_ERR(rule) && table_created) {
-		mlx5_destroy_flow_table(priv->fts.tc.t);
-		priv->fts.tc.t = NULL;
+	if (IS_ERR(rule))
+		goto err_add_rule;
+
+	return rule;
+
+err_add_rule:
+	if (table_created) {
+		mlx5_destroy_flow_table(priv->fs.tc.t);
+		priv->fs.tc.t = NULL;
 	}
+err_create_ft:
+	mlx5_fc_destroy(dev, counter);
 
 	return rule;
 }
@@ -90,11 +112,17 @@
 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
 			      struct mlx5_flow_rule *rule)
 {
+	struct mlx5_fc *counter = NULL;
+
+	counter = mlx5_flow_rule_counter(rule);
+
 	mlx5_del_flow_rule(rule);
 
+	mlx5_fc_destroy(priv->mdev, counter);
+
 	if (!mlx5e_tc_num_filters(priv)) {
-		mlx5_destroy_flow_table(priv->fts.tc.t);
-		priv->fts.tc.t = NULL;
+		mlx5_destroy_flow_table(priv->fs.tc.t);
+		priv->fs.tc.t = NULL;
 	}
 }
 
@@ -284,6 +312,9 @@
 
 		if (is_tcf_gact_shot(a)) {
 			*action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+			if (MLX5_CAP_FLOWTABLE(priv->mdev,
+					       flow_table_properties_nic_receive.flow_counter))
+				*action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
 			continue;
 		}
 
@@ -310,7 +341,7 @@
 int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
 			   struct tc_cls_flower_offload *f)
 {
-	struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
+	struct mlx5e_tc_table *tc = &priv->fs.tc;
 	u32 *match_c;
 	u32 *match_v;
 	int err = 0;
@@ -376,7 +407,7 @@
 			struct tc_cls_flower_offload *f)
 {
 	struct mlx5e_tc_flow *flow;
-	struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
+	struct mlx5e_tc_table *tc = &priv->fs.tc;
 
 	flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
 				      tc->ht_params);
@@ -392,6 +423,34 @@
 	return 0;
 }
 
+int mlx5e_stats_flower(struct mlx5e_priv *priv,
+		       struct tc_cls_flower_offload *f)
+{
+	struct mlx5e_tc_table *tc = &priv->fs.tc;
+	struct mlx5e_tc_flow *flow;
+	struct tc_action *a;
+	struct mlx5_fc *counter;
+	u64 bytes;
+	u64 packets;
+	u64 lastuse;
+
+	flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
+				      tc->ht_params);
+	if (!flow)
+		return -EINVAL;
+
+	counter = mlx5_flow_rule_counter(flow->rule);
+	if (!counter)
+		return 0;
+
+	mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
+
+	tc_for_each_action(a, f->exts)
+		tcf_action_stats_update(a, bytes, packets, lastuse);
+
+	return 0;
+}
+
 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
 	.head_offset = offsetof(struct mlx5e_tc_flow, node),
 	.key_offset = offsetof(struct mlx5e_tc_flow, cookie),
@@ -401,7 +460,7 @@
 
 int mlx5e_tc_init(struct mlx5e_priv *priv)
 {
-	struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
+	struct mlx5e_tc_table *tc = &priv->fs.tc;
 
 	tc->ht_params = mlx5e_tc_flow_ht_params;
 	return rhashtable_init(&tc->ht, &tc->ht_params);
@@ -418,12 +477,12 @@
 
 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
 {
-	struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
+	struct mlx5e_tc_table *tc = &priv->fs.tc;
 
 	rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
 
-	if (!IS_ERR_OR_NULL(priv->fts.tc.t)) {
-		mlx5_destroy_flow_table(priv->fts.tc.t);
-		priv->fts.tc.t = NULL;
+	if (!IS_ERR_OR_NULL(tc->t)) {
+		mlx5_destroy_flow_table(tc->t);
+		tc->t = NULL;
 	}
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index d677428..34bf903 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -43,9 +43,12 @@
 int mlx5e_delete_flower(struct mlx5e_priv *priv,
 			struct tc_cls_flower_offload *f);
 
+int mlx5e_stats_flower(struct mlx5e_priv *priv,
+		       struct tc_cls_flower_offload *f);
+
 static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
 {
-	return atomic_read(&priv->fts.tc.ht.nelems);
+	return atomic_read(&priv->fs.tc.ht.nelems);
 }
 
 #endif /* __MLX5_EN_TC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 1ffc7cb..229ab16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -54,10 +54,11 @@
 
 	sq->skb[pi] = NULL;
 	sq->pc++;
+	sq->stats.nop++;
 
 	if (notify_hw) {
 		cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
-		mlx5e_tx_notify_hw(sq, wqe, 0);
+		mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
 	}
 }
 
@@ -309,7 +310,7 @@
 			bf_sz = wi->num_wqebbs << 3;
 
 		cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
-		mlx5e_tx_notify_hw(sq, wqe, bf_sz);
+		mlx5e_tx_notify_hw(sq, &wqe->ctrl, bf_sz);
 	}
 
 	/* fill sq edge with nops to avoid wqe wrap around */
@@ -387,7 +388,6 @@
 			wi = &sq->wqe_info[ci];
 
 			if (unlikely(!skb)) { /* nop */
-				sq->stats.nop++;
 				sqcc++;
 				continue;
 			}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 9bb4395..c38781f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -49,6 +49,60 @@
 	return cqe;
 }
 
+static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
+{
+	struct mlx5_wq_cyc *wq;
+	struct mlx5_cqe64 *cqe;
+	struct mlx5e_sq *sq;
+	u16 sqcc;
+
+	cqe = mlx5e_get_cqe(cq);
+	if (likely(!cqe))
+		return;
+
+	sq = container_of(cq, struct mlx5e_sq, cq);
+	wq = &sq->wq;
+
+	/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+	 * otherwise a cq overrun may occur
+	 */
+	sqcc = sq->cc;
+
+	do {
+		u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1;
+		struct mlx5e_ico_wqe_info *icowi = &sq->ico_wqe_info[ci];
+
+		mlx5_cqwq_pop(&cq->wq);
+		sqcc += icowi->num_wqebbs;
+
+		if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) {
+			WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n",
+				  cqe->op_own);
+			break;
+		}
+
+		switch (icowi->opcode) {
+		case MLX5_OPCODE_NOP:
+			break;
+		case MLX5_OPCODE_UMR:
+			mlx5e_post_rx_fragmented_mpwqe(&sq->channel->rq);
+			break;
+		default:
+			WARN_ONCE(true,
+				  "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n",
+				  icowi->opcode);
+		}
+
+	} while ((cqe = mlx5e_get_cqe(cq)));
+
+	mlx5_cqwq_update_db_record(&cq->wq);
+
+	/* ensure cq space is freed before enabling more cqes */
+	wmb();
+
+	sq->cc = sqcc;
+}
+
 int mlx5e_napi_poll(struct napi_struct *napi, int budget)
 {
 	struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
@@ -64,6 +118,9 @@
 
 	work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
 	busy |= work_done == budget;
+
+	mlx5e_poll_ico_cq(&c->icosq.cq);
+
 	busy |= mlx5e_post_rx_wqes(&c->rq);
 
 	if (busy)
@@ -80,6 +137,7 @@
 	for (i = 0; i < c->num_tc; i++)
 		mlx5e_cq_arm(&c->sq[i].cq);
 	mlx5e_cq_arm(&c->rq.cq);
+	mlx5e_cq_arm(&c->icosq.cq);
 
 	return work_done;
 }
@@ -89,7 +147,6 @@
 	struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
 
 	set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
-	barrier();
 	napi_schedule(cq->napi);
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index bc3d9f8a..b84a691 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -77,16 +77,20 @@
 	u8                     action;
 	u32                    vport;
 	struct mlx5_flow_rule *flow_rule; /* SRIOV only */
+	/* A flag indicating that mac was added due to mc promiscuous vport */
+	bool mc_promisc;
 };
 
 enum {
 	UC_ADDR_CHANGE = BIT(0),
 	MC_ADDR_CHANGE = BIT(1),
+	PROMISC_CHANGE = BIT(3),
 };
 
 /* Vport context events */
 #define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
-			    MC_ADDR_CHANGE)
+			    MC_ADDR_CHANGE | \
+			    PROMISC_CHANGE)
 
 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
 					u32 events_mask)
@@ -116,6 +120,9 @@
 	if (events_mask & MC_ADDR_CHANGE)
 		MLX5_SET(nic_vport_context, nic_vport_ctx,
 			 event_on_mc_address_change, 1);
+	if (events_mask & PROMISC_CHANGE)
+		MLX5_SET(nic_vport_context, nic_vport_ctx,
+			 event_on_promisc_change, 1);
 
 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
 	if (err)
@@ -323,30 +330,45 @@
 
 /* E-Switch FDB */
 static struct mlx5_flow_rule *
-esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
+__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
+			 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
 {
-	int match_header = MLX5_MATCH_OUTER_HEADERS;
-	struct mlx5_flow_destination dest;
+	int match_header = (is_zero_ether_addr(mac_c) ? 0 :
+			    MLX5_MATCH_OUTER_HEADERS);
 	struct mlx5_flow_rule *flow_rule = NULL;
+	struct mlx5_flow_destination dest;
+	void *mv_misc = NULL;
+	void *mc_misc = NULL;
+	u8 *dmac_v = NULL;
+	u8 *dmac_c = NULL;
 	u32 *match_v;
 	u32 *match_c;
-	u8 *dmac_v;
-	u8 *dmac_c;
 
+	if (rx_rule)
+		match_header |= MLX5_MATCH_MISC_PARAMETERS;
 	match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
 	match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
 	if (!match_v || !match_c) {
 		pr_warn("FDB: Failed to alloc match parameters\n");
 		goto out;
 	}
+
 	dmac_v = MLX5_ADDR_OF(fte_match_param, match_v,
 			      outer_headers.dmac_47_16);
 	dmac_c = MLX5_ADDR_OF(fte_match_param, match_c,
 			      outer_headers.dmac_47_16);
 
-	ether_addr_copy(dmac_v, mac);
-	/* Match criteria mask */
-	memset(dmac_c, 0xff, 6);
+	if (match_header & MLX5_MATCH_OUTER_HEADERS) {
+		ether_addr_copy(dmac_v, mac_v);
+		ether_addr_copy(dmac_c, mac_c);
+	}
+
+	if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
+		mv_misc  = MLX5_ADDR_OF(fte_match_param, match_v, misc_parameters);
+		mc_misc  = MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters);
+		MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT);
+		MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
+	}
 
 	dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
 	dest.vport_num = vport;
@@ -373,6 +395,39 @@
 	return flow_rule;
 }
 
+static struct mlx5_flow_rule *
+esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
+{
+	u8 mac_c[ETH_ALEN];
+
+	eth_broadcast_addr(mac_c);
+	return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
+}
+
+static struct mlx5_flow_rule *
+esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
+{
+	u8 mac_c[ETH_ALEN];
+	u8 mac_v[ETH_ALEN];
+
+	eth_zero_addr(mac_c);
+	eth_zero_addr(mac_v);
+	mac_c[0] = 0x01;
+	mac_v[0] = 0x01;
+	return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
+}
+
+static struct mlx5_flow_rule *
+esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
+{
+	u8 mac_c[ETH_ALEN];
+	u8 mac_v[ETH_ALEN];
+
+	eth_zero_addr(mac_c);
+	eth_zero_addr(mac_v);
+	return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
+}
+
 static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
 {
 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -401,34 +456,80 @@
 	memset(flow_group_in, 0, inlen);
 
 	table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
-	fdb = mlx5_create_flow_table(root_ns, 0, table_size);
+	fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
 	if (IS_ERR_OR_NULL(fdb)) {
 		err = PTR_ERR(fdb);
 		esw_warn(dev, "Failed to create FDB Table err %d\n", err);
 		goto out;
 	}
+	esw->fdb_table.fdb = fdb;
 
+	/* Addresses group : Full match unicast/multicast addresses */
 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
 		 MLX5_MATCH_OUTER_HEADERS);
 	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
 	dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
-	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
+	/* Preserve 2 entries for allmulti and promisc rules*/
+	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
 	eth_broadcast_addr(dmac);
-
 	g = mlx5_create_flow_group(fdb, flow_group_in);
 	if (IS_ERR_OR_NULL(g)) {
 		err = PTR_ERR(g);
 		esw_warn(dev, "Failed to create flow group err(%d)\n", err);
 		goto out;
 	}
-
 	esw->fdb_table.addr_grp = g;
-	esw->fdb_table.fdb = fdb;
+
+	/* Allmulti group : One rule that forwards any mcast traffic */
+	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+		 MLX5_MATCH_OUTER_HEADERS);
+	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
+	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
+	eth_zero_addr(dmac);
+	dmac[0] = 0x01;
+	g = mlx5_create_flow_group(fdb, flow_group_in);
+	if (IS_ERR_OR_NULL(g)) {
+		err = PTR_ERR(g);
+		esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
+		goto out;
+	}
+	esw->fdb_table.allmulti_grp = g;
+
+	/* Promiscuous group :
+	 * One rule that forward all unmatched traffic from previous groups
+	 */
+	eth_zero_addr(dmac);
+	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+		 MLX5_MATCH_MISC_PARAMETERS);
+	MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
+	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
+	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
+	g = mlx5_create_flow_group(fdb, flow_group_in);
+	if (IS_ERR_OR_NULL(g)) {
+		err = PTR_ERR(g);
+		esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
+		goto out;
+	}
+	esw->fdb_table.promisc_grp = g;
+
 out:
+	if (err) {
+		if (!IS_ERR_OR_NULL(esw->fdb_table.allmulti_grp)) {
+			mlx5_destroy_flow_group(esw->fdb_table.allmulti_grp);
+			esw->fdb_table.allmulti_grp = NULL;
+		}
+		if (!IS_ERR_OR_NULL(esw->fdb_table.addr_grp)) {
+			mlx5_destroy_flow_group(esw->fdb_table.addr_grp);
+			esw->fdb_table.addr_grp = NULL;
+		}
+		if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) {
+			mlx5_destroy_flow_table(esw->fdb_table.fdb);
+			esw->fdb_table.fdb = NULL;
+		}
+	}
+
 	kfree(flow_group_in);
-	if (err && !IS_ERR_OR_NULL(fdb))
-		mlx5_destroy_flow_table(fdb);
 	return err;
 }
 
@@ -438,10 +539,14 @@
 		return;
 
 	esw_debug(esw->dev, "Destroy FDB Table\n");
+	mlx5_destroy_flow_group(esw->fdb_table.promisc_grp);
+	mlx5_destroy_flow_group(esw->fdb_table.allmulti_grp);
 	mlx5_destroy_flow_group(esw->fdb_table.addr_grp);
 	mlx5_destroy_flow_table(esw->fdb_table.fdb);
 	esw->fdb_table.fdb = NULL;
 	esw->fdb_table.addr_grp = NULL;
+	esw->fdb_table.allmulti_grp = NULL;
+	esw->fdb_table.promisc_grp = NULL;
 }
 
 /* E-Switch vport UC/MC lists management */
@@ -511,6 +616,52 @@
 	return 0;
 }
 
+static void update_allmulti_vports(struct mlx5_eswitch *esw,
+				   struct vport_addr *vaddr,
+				   struct esw_mc_addr *esw_mc)
+{
+	u8 *mac = vaddr->node.addr;
+	u32 vport_idx = 0;
+
+	for (vport_idx = 0; vport_idx < esw->total_vports; vport_idx++) {
+		struct mlx5_vport *vport = &esw->vports[vport_idx];
+		struct hlist_head *vport_hash = vport->mc_list;
+		struct vport_addr *iter_vaddr =
+					l2addr_hash_find(vport_hash,
+							 mac,
+							 struct vport_addr);
+		if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
+		    vaddr->vport == vport_idx)
+			continue;
+		switch (vaddr->action) {
+		case MLX5_ACTION_ADD:
+			if (iter_vaddr)
+				continue;
+			iter_vaddr = l2addr_hash_add(vport_hash, mac,
+						     struct vport_addr,
+						     GFP_KERNEL);
+			if (!iter_vaddr) {
+				esw_warn(esw->dev,
+					 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
+					 mac, vport_idx);
+				continue;
+			}
+			iter_vaddr->vport = vport_idx;
+			iter_vaddr->flow_rule =
+					esw_fdb_set_vport_rule(esw,
+							       mac,
+							       vport_idx);
+			break;
+		case MLX5_ACTION_DEL:
+			if (!iter_vaddr)
+				continue;
+			mlx5_del_flow_rule(iter_vaddr->flow_rule);
+			l2addr_hash_del(iter_vaddr);
+			break;
+		}
+	}
+}
+
 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
 {
 	struct hlist_head *hash = esw->mc_table;
@@ -531,8 +682,17 @@
 
 	esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
 		esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
+
+	/* Add this multicast mac to all the mc promiscuous vports */
+	update_allmulti_vports(esw, vaddr, esw_mc);
+
 add:
-	esw_mc->refcnt++;
+	/* If the multicast mac is added as a result of mc promiscuous vport,
+	 * don't increment the multicast ref count
+	 */
+	if (!vaddr->mc_promisc)
+		esw_mc->refcnt++;
+
 	/* Forward MC MAC to vport */
 	vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
 	esw_debug(esw->dev,
@@ -568,9 +728,15 @@
 		mlx5_del_flow_rule(vaddr->flow_rule);
 	vaddr->flow_rule = NULL;
 
-	if (--esw_mc->refcnt)
+	/* If the multicast mac is added as a result of mc promiscuous vport,
+	 * don't decrement the multicast ref count.
+	 */
+	if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
 		return 0;
 
+	/* Remove this multicast mac from all the mc promiscuous vports */
+	update_allmulti_vports(esw, vaddr, esw_mc);
+
 	if (esw_mc->uplink_rule)
 		mlx5_del_flow_rule(esw_mc->uplink_rule);
 
@@ -643,10 +809,13 @@
 		addr->action = MLX5_ACTION_DEL;
 	}
 
+	if (!vport->enabled)
+		goto out;
+
 	err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
 					    mac_list, &size);
 	if (err)
-		return;
+		goto out;
 	esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
 		  vport_num, is_uc ? "UC" : "MC", size);
 
@@ -660,6 +829,24 @@
 		addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
 		if (addr) {
 			addr->action = MLX5_ACTION_NONE;
+			/* If this mac was previously added because of allmulti
+			 * promiscuous rx mode, its now converted to be original
+			 * vport mac.
+			 */
+			if (addr->mc_promisc) {
+				struct esw_mc_addr *esw_mc =
+					l2addr_hash_find(esw->mc_table,
+							 mac_list[i],
+							 struct esw_mc_addr);
+				if (!esw_mc) {
+					esw_warn(esw->dev,
+						 "Failed to MAC(%pM) in mcast DB\n",
+						 mac_list[i]);
+					continue;
+				}
+				esw_mc->refcnt++;
+				addr->mc_promisc = false;
+			}
 			continue;
 		}
 
@@ -674,13 +861,121 @@
 		addr->vport = vport_num;
 		addr->action = MLX5_ACTION_ADD;
 	}
+out:
 	kfree(mac_list);
 }
 
-static void esw_vport_change_handler(struct work_struct *work)
+/* Sync vport UC/MC list from vport context
+ * Must be called after esw_update_vport_addr_list
+ */
+static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num)
 {
-	struct mlx5_vport *vport =
-		container_of(work, struct mlx5_vport, vport_change_handler);
+	struct mlx5_vport *vport = &esw->vports[vport_num];
+	struct l2addr_node *node;
+	struct vport_addr *addr;
+	struct hlist_head *hash;
+	struct hlist_node *tmp;
+	int hi;
+
+	hash = vport->mc_list;
+
+	for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
+		u8 *mac = node->addr;
+
+		addr = l2addr_hash_find(hash, mac, struct vport_addr);
+		if (addr) {
+			if (addr->action == MLX5_ACTION_DEL)
+				addr->action = MLX5_ACTION_NONE;
+			continue;
+		}
+		addr = l2addr_hash_add(hash, mac, struct vport_addr,
+				       GFP_KERNEL);
+		if (!addr) {
+			esw_warn(esw->dev,
+				 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
+				 mac, vport_num);
+			continue;
+		}
+		addr->vport = vport_num;
+		addr->action = MLX5_ACTION_ADD;
+		addr->mc_promisc = true;
+	}
+}
+
+/* Apply vport rx mode to HW FDB table */
+static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
+				    bool promisc, bool mc_promisc)
+{
+	struct esw_mc_addr *allmulti_addr = esw->mc_promisc;
+	struct mlx5_vport *vport = &esw->vports[vport_num];
+
+	if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
+		goto promisc;
+
+	if (mc_promisc) {
+		vport->allmulti_rule =
+				esw_fdb_set_vport_allmulti_rule(esw, vport_num);
+		if (!allmulti_addr->uplink_rule)
+			allmulti_addr->uplink_rule =
+				esw_fdb_set_vport_allmulti_rule(esw,
+								UPLINK_VPORT);
+		allmulti_addr->refcnt++;
+	} else if (vport->allmulti_rule) {
+		mlx5_del_flow_rule(vport->allmulti_rule);
+		vport->allmulti_rule = NULL;
+
+		if (--allmulti_addr->refcnt > 0)
+			goto promisc;
+
+		if (allmulti_addr->uplink_rule)
+			mlx5_del_flow_rule(allmulti_addr->uplink_rule);
+		allmulti_addr->uplink_rule = NULL;
+	}
+
+promisc:
+	if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
+		return;
+
+	if (promisc) {
+		vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw,
+								     vport_num);
+	} else if (vport->promisc_rule) {
+		mlx5_del_flow_rule(vport->promisc_rule);
+		vport->promisc_rule = NULL;
+	}
+}
+
+/* Sync vport rx mode from vport context */
+static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num)
+{
+	struct mlx5_vport *vport = &esw->vports[vport_num];
+	int promisc_all = 0;
+	int promisc_uc = 0;
+	int promisc_mc = 0;
+	int err;
+
+	err = mlx5_query_nic_vport_promisc(esw->dev,
+					   vport_num,
+					   &promisc_uc,
+					   &promisc_mc,
+					   &promisc_all);
+	if (err)
+		return;
+	esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
+		  vport_num, promisc_all, promisc_mc);
+
+	if (!vport->trusted || !vport->enabled) {
+		promisc_uc = 0;
+		promisc_mc = 0;
+		promisc_all = 0;
+	}
+
+	esw_apply_vport_rx_mode(esw, vport_num, promisc_all,
+				(promisc_all || promisc_mc));
+}
+
+static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
+{
 	struct mlx5_core_dev *dev = vport->dev;
 	struct mlx5_eswitch *esw = dev->priv.eswitch;
 	u8 mac[ETH_ALEN];
@@ -699,6 +994,15 @@
 	if (vport->enabled_events & MC_ADDR_CHANGE) {
 		esw_update_vport_addr_list(esw, vport->vport,
 					   MLX5_NVPRT_LIST_TYPE_MC);
+	}
+
+	if (vport->enabled_events & PROMISC_CHANGE) {
+		esw_update_vport_rx_mode(esw, vport->vport);
+		if (!IS_ERR_OR_NULL(vport->allmulti_rule))
+			esw_update_vport_mc_promisc(esw, vport->vport);
+	}
+
+	if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) {
 		esw_apply_vport_addr_list(esw, vport->vport,
 					  MLX5_NVPRT_LIST_TYPE_MC);
 	}
@@ -709,15 +1013,477 @@
 					     vport->enabled_events);
 }
 
+static void esw_vport_change_handler(struct work_struct *work)
+{
+	struct mlx5_vport *vport =
+		container_of(work, struct mlx5_vport, vport_change_handler);
+	struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+
+	mutex_lock(&esw->state_lock);
+	esw_vport_change_handle_locked(vport);
+	mutex_unlock(&esw->state_lock);
+}
+
+static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
+					struct mlx5_vport *vport)
+{
+	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+	struct mlx5_flow_group *vlan_grp = NULL;
+	struct mlx5_flow_group *drop_grp = NULL;
+	struct mlx5_core_dev *dev = esw->dev;
+	struct mlx5_flow_namespace *root_ns;
+	struct mlx5_flow_table *acl;
+	void *match_criteria;
+	u32 *flow_group_in;
+	/* The egress acl table contains 2 rules:
+	 * 1)Allow traffic with vlan_tag=vst_vlan_id
+	 * 2)Drop all other traffic.
+	 */
+	int table_size = 2;
+	int err = 0;
+
+	if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support) ||
+	    !IS_ERR_OR_NULL(vport->egress.acl))
+		return;
+
+	esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
+		  vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
+
+	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
+	if (!root_ns) {
+		esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
+		return;
+	}
+
+	flow_group_in = mlx5_vzalloc(inlen);
+	if (!flow_group_in)
+		return;
+
+	acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
+	if (IS_ERR_OR_NULL(acl)) {
+		err = PTR_ERR(acl);
+		esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
+			 vport->vport, err);
+		goto out;
+	}
+
+	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
+	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
+
+	vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
+	if (IS_ERR_OR_NULL(vlan_grp)) {
+		err = PTR_ERR(vlan_grp);
+		esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
+			 vport->vport, err);
+		goto out;
+	}
+
+	memset(flow_group_in, 0, inlen);
+	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
+	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+	drop_grp = mlx5_create_flow_group(acl, flow_group_in);
+	if (IS_ERR_OR_NULL(drop_grp)) {
+		err = PTR_ERR(drop_grp);
+		esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
+			 vport->vport, err);
+		goto out;
+	}
+
+	vport->egress.acl = acl;
+	vport->egress.drop_grp = drop_grp;
+	vport->egress.allowed_vlans_grp = vlan_grp;
+out:
+	kfree(flow_group_in);
+	if (err && !IS_ERR_OR_NULL(vlan_grp))
+		mlx5_destroy_flow_group(vlan_grp);
+	if (err && !IS_ERR_OR_NULL(acl))
+		mlx5_destroy_flow_table(acl);
+}
+
+static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
+					   struct mlx5_vport *vport)
+{
+	if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
+		mlx5_del_flow_rule(vport->egress.allowed_vlan);
+
+	if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
+		mlx5_del_flow_rule(vport->egress.drop_rule);
+
+	vport->egress.allowed_vlan = NULL;
+	vport->egress.drop_rule = NULL;
+}
+
+static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
+					 struct mlx5_vport *vport)
+{
+	if (IS_ERR_OR_NULL(vport->egress.acl))
+		return;
+
+	esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
+
+	esw_vport_cleanup_egress_rules(esw, vport);
+	mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
+	mlx5_destroy_flow_group(vport->egress.drop_grp);
+	mlx5_destroy_flow_table(vport->egress.acl);
+	vport->egress.allowed_vlans_grp = NULL;
+	vport->egress.drop_grp = NULL;
+	vport->egress.acl = NULL;
+}
+
+static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
+					 struct mlx5_vport *vport)
+{
+	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+	struct mlx5_core_dev *dev = esw->dev;
+	struct mlx5_flow_namespace *root_ns;
+	struct mlx5_flow_table *acl;
+	struct mlx5_flow_group *g;
+	void *match_criteria;
+	u32 *flow_group_in;
+	/* The ingress acl table contains 4 groups
+	 * (2 active rules at the same time -
+	 *      1 allow rule from one of the first 3 groups.
+	 *      1 drop rule from the last group):
+	 * 1)Allow untagged traffic with smac=original mac.
+	 * 2)Allow untagged traffic.
+	 * 3)Allow traffic with smac=original mac.
+	 * 4)Drop all other traffic.
+	 */
+	int table_size = 4;
+	int err = 0;
+
+	if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support) ||
+	    !IS_ERR_OR_NULL(vport->ingress.acl))
+		return;
+
+	esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
+		  vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
+
+	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
+	if (!root_ns) {
+		esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
+		return;
+	}
+
+	flow_group_in = mlx5_vzalloc(inlen);
+	if (!flow_group_in)
+		return;
+
+	acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
+	if (IS_ERR_OR_NULL(acl)) {
+		err = PTR_ERR(acl);
+		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
+			 vport->vport, err);
+		goto out;
+	}
+	vport->ingress.acl = acl;
+
+	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+
+	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
+	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
+	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
+
+	g = mlx5_create_flow_group(acl, flow_group_in);
+	if (IS_ERR_OR_NULL(g)) {
+		err = PTR_ERR(g);
+		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
+			 vport->vport, err);
+		goto out;
+	}
+	vport->ingress.allow_untagged_spoofchk_grp = g;
+
+	memset(flow_group_in, 0, inlen);
+	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
+	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+
+	g = mlx5_create_flow_group(acl, flow_group_in);
+	if (IS_ERR_OR_NULL(g)) {
+		err = PTR_ERR(g);
+		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
+			 vport->vport, err);
+		goto out;
+	}
+	vport->ingress.allow_untagged_only_grp = g;
+
+	memset(flow_group_in, 0, inlen);
+	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
+	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
+	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
+	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
+
+	g = mlx5_create_flow_group(acl, flow_group_in);
+	if (IS_ERR_OR_NULL(g)) {
+		err = PTR_ERR(g);
+		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
+			 vport->vport, err);
+		goto out;
+	}
+	vport->ingress.allow_spoofchk_only_grp = g;
+
+	memset(flow_group_in, 0, inlen);
+	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
+	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
+
+	g = mlx5_create_flow_group(acl, flow_group_in);
+	if (IS_ERR_OR_NULL(g)) {
+		err = PTR_ERR(g);
+		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
+			 vport->vport, err);
+		goto out;
+	}
+	vport->ingress.drop_grp = g;
+
+out:
+	if (err) {
+		if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
+			mlx5_destroy_flow_group(
+					vport->ingress.allow_spoofchk_only_grp);
+		if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
+			mlx5_destroy_flow_group(
+					vport->ingress.allow_untagged_only_grp);
+		if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
+			mlx5_destroy_flow_group(
+				vport->ingress.allow_untagged_spoofchk_grp);
+		if (!IS_ERR_OR_NULL(vport->ingress.acl))
+			mlx5_destroy_flow_table(vport->ingress.acl);
+	}
+
+	kfree(flow_group_in);
+}
+
+static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
+					    struct mlx5_vport *vport)
+{
+	if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
+		mlx5_del_flow_rule(vport->ingress.drop_rule);
+
+	if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
+		mlx5_del_flow_rule(vport->ingress.allow_rule);
+
+	vport->ingress.drop_rule = NULL;
+	vport->ingress.allow_rule = NULL;
+}
+
+static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
+					  struct mlx5_vport *vport)
+{
+	if (IS_ERR_OR_NULL(vport->ingress.acl))
+		return;
+
+	esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
+
+	esw_vport_cleanup_ingress_rules(esw, vport);
+	mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
+	mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
+	mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
+	mlx5_destroy_flow_group(vport->ingress.drop_grp);
+	mlx5_destroy_flow_table(vport->ingress.acl);
+	vport->ingress.acl = NULL;
+	vport->ingress.drop_grp = NULL;
+	vport->ingress.allow_spoofchk_only_grp = NULL;
+	vport->ingress.allow_untagged_only_grp = NULL;
+	vport->ingress.allow_untagged_spoofchk_grp = NULL;
+}
+
+static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
+				    struct mlx5_vport *vport)
+{
+	u8 smac[ETH_ALEN];
+	u32 *match_v;
+	u32 *match_c;
+	int err = 0;
+	u8 *smac_v;
+
+	if (vport->spoofchk) {
+		err = mlx5_query_nic_vport_mac_address(esw->dev, vport->vport, smac);
+		if (err) {
+			esw_warn(esw->dev,
+				 "vport[%d] configure ingress rules failed, query smac failed, err(%d)\n",
+				 vport->vport, err);
+			return err;
+		}
+
+		if (!is_valid_ether_addr(smac)) {
+			mlx5_core_warn(esw->dev,
+				       "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
+				       vport->vport);
+			return -EPERM;
+		}
+	}
+
+	esw_vport_cleanup_ingress_rules(esw, vport);
+
+	if (!vport->vlan && !vport->qos && !vport->spoofchk) {
+		esw_vport_disable_ingress_acl(esw, vport);
+		return 0;
+	}
+
+	esw_vport_enable_ingress_acl(esw, vport);
+
+	esw_debug(esw->dev,
+		  "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
+		  vport->vport, vport->vlan, vport->qos);
+
+	match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+	match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+	if (!match_v || !match_c) {
+		err = -ENOMEM;
+		esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
+			 vport->vport, err);
+		goto out;
+	}
+
+	if (vport->vlan || vport->qos)
+		MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag);
+
+	if (vport->spoofchk) {
+		MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_47_16);
+		MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_15_0);
+		smac_v = MLX5_ADDR_OF(fte_match_param,
+				      match_v,
+				      outer_headers.smac_47_16);
+		ether_addr_copy(smac_v, smac);
+	}
+
+	vport->ingress.allow_rule =
+		mlx5_add_flow_rule(vport->ingress.acl,
+				   MLX5_MATCH_OUTER_HEADERS,
+				   match_c,
+				   match_v,
+				   MLX5_FLOW_CONTEXT_ACTION_ALLOW,
+				   0, NULL);
+	if (IS_ERR_OR_NULL(vport->ingress.allow_rule)) {
+		err = PTR_ERR(vport->ingress.allow_rule);
+		pr_warn("vport[%d] configure ingress allow rule, err(%d)\n",
+			vport->vport, err);
+		vport->ingress.allow_rule = NULL;
+		goto out;
+	}
+
+	memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+	memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+	vport->ingress.drop_rule =
+		mlx5_add_flow_rule(vport->ingress.acl,
+				   0,
+				   match_c,
+				   match_v,
+				   MLX5_FLOW_CONTEXT_ACTION_DROP,
+				   0, NULL);
+	if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) {
+		err = PTR_ERR(vport->ingress.drop_rule);
+		pr_warn("vport[%d] configure ingress drop rule, err(%d)\n",
+			vport->vport, err);
+		vport->ingress.drop_rule = NULL;
+		goto out;
+	}
+
+out:
+	if (err)
+		esw_vport_cleanup_ingress_rules(esw, vport);
+
+	kfree(match_v);
+	kfree(match_c);
+	return err;
+}
+
+static int esw_vport_egress_config(struct mlx5_eswitch *esw,
+				   struct mlx5_vport *vport)
+{
+	u32 *match_v;
+	u32 *match_c;
+	int err = 0;
+
+	esw_vport_cleanup_egress_rules(esw, vport);
+
+	if (!vport->vlan && !vport->qos) {
+		esw_vport_disable_egress_acl(esw, vport);
+		return 0;
+	}
+
+	esw_vport_enable_egress_acl(esw, vport);
+
+	esw_debug(esw->dev,
+		  "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
+		  vport->vport, vport->vlan, vport->qos);
+
+	match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+	match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+	if (!match_v || !match_c) {
+		err = -ENOMEM;
+		esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
+			 vport->vport, err);
+		goto out;
+	}
+
+	/* Allowed vlan rule */
+	MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag);
+	MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.vlan_tag);
+	MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.first_vid);
+	MLX5_SET(fte_match_param, match_v, outer_headers.first_vid, vport->vlan);
+
+	vport->egress.allowed_vlan =
+		mlx5_add_flow_rule(vport->egress.acl,
+				   MLX5_MATCH_OUTER_HEADERS,
+				   match_c,
+				   match_v,
+				   MLX5_FLOW_CONTEXT_ACTION_ALLOW,
+				   0, NULL);
+	if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
+		err = PTR_ERR(vport->egress.allowed_vlan);
+		pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
+			vport->vport, err);
+		vport->egress.allowed_vlan = NULL;
+		goto out;
+	}
+
+	/* Drop others rule (star rule) */
+	memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+	memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+	vport->egress.drop_rule =
+		mlx5_add_flow_rule(vport->egress.acl,
+				   0,
+				   match_c,
+				   match_v,
+				   MLX5_FLOW_CONTEXT_ACTION_DROP,
+				   0, NULL);
+	if (IS_ERR_OR_NULL(vport->egress.drop_rule)) {
+		err = PTR_ERR(vport->egress.drop_rule);
+		pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n",
+			vport->vport, err);
+		vport->egress.drop_rule = NULL;
+	}
+out:
+	kfree(match_v);
+	kfree(match_c);
+	return err;
+}
+
 static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
 			     int enable_events)
 {
 	struct mlx5_vport *vport = &esw->vports[vport_num];
-	unsigned long flags;
 
+	mutex_lock(&esw->state_lock);
 	WARN_ON(vport->enabled);
 
 	esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
+
+	if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */
+		esw_vport_ingress_config(esw, vport);
+		esw_vport_egress_config(esw, vport);
+	}
+
 	mlx5_modify_vport_admin_state(esw->dev,
 				      MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
 				      vport_num,
@@ -725,53 +1491,32 @@
 
 	/* Sync with current vport context */
 	vport->enabled_events = enable_events;
-	esw_vport_change_handler(&vport->vport_change_handler);
+	esw_vport_change_handle_locked(vport);
 
-	spin_lock_irqsave(&vport->lock, flags);
 	vport->enabled = true;
-	spin_unlock_irqrestore(&vport->lock, flags);
+
+	/* only PF is trusted by default */
+	vport->trusted = (vport_num) ? false : true;
 
 	arm_vport_context_events_cmd(esw->dev, vport_num, enable_events);
 
 	esw->enabled_vports++;
 	esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
-}
-
-static void esw_cleanup_vport(struct mlx5_eswitch *esw, u16 vport_num)
-{
-	struct mlx5_vport *vport = &esw->vports[vport_num];
-	struct l2addr_node *node;
-	struct vport_addr *addr;
-	struct hlist_node *tmp;
-	int hi;
-
-	for_each_l2hash_node(node, tmp, vport->uc_list, hi) {
-		addr = container_of(node, struct vport_addr, node);
-		addr->action = MLX5_ACTION_DEL;
-	}
-	esw_apply_vport_addr_list(esw, vport_num, MLX5_NVPRT_LIST_TYPE_UC);
-
-	for_each_l2hash_node(node, tmp, vport->mc_list, hi) {
-		addr = container_of(node, struct vport_addr, node);
-		addr->action = MLX5_ACTION_DEL;
-	}
-	esw_apply_vport_addr_list(esw, vport_num, MLX5_NVPRT_LIST_TYPE_MC);
+	mutex_unlock(&esw->state_lock);
 }
 
 static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
 {
 	struct mlx5_vport *vport = &esw->vports[vport_num];
-	unsigned long flags;
 
 	if (!vport->enabled)
 		return;
 
 	esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
 	/* Mark this vport as disabled to discard new events */
-	spin_lock_irqsave(&vport->lock, flags);
 	vport->enabled = false;
-	vport->enabled_events = 0;
-	spin_unlock_irqrestore(&vport->lock, flags);
+
+	synchronize_irq(mlx5_get_msix_vec(esw->dev, MLX5_EQ_VEC_ASYNC));
 
 	mlx5_modify_vport_admin_state(esw->dev,
 				      MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
@@ -781,9 +1526,19 @@
 	flush_workqueue(esw->work_queue);
 	/* Disable events from this vport */
 	arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
-	/* We don't assume VFs will cleanup after themselves */
-	esw_cleanup_vport(esw, vport_num);
+	mutex_lock(&esw->state_lock);
+	/* We don't assume VFs will cleanup after themselves.
+	 * Calling vport change handler while vport is disabled will cleanup
+	 * the vport resources.
+	 */
+	esw_vport_change_handle_locked(vport);
+	vport->enabled_events = 0;
+	if (vport_num) {
+		esw_vport_disable_egress_acl(esw, vport);
+		esw_vport_disable_ingress_acl(esw, vport);
+	}
 	esw->enabled_vports--;
+	mutex_unlock(&esw->state_lock);
 }
 
 /* Public E-Switch API */
@@ -802,6 +1557,12 @@
 		return -ENOTSUPP;
 	}
 
+	if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
+		esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
+
+	if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
+		esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
+
 	esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d)\n", nvfs);
 
 	esw_disable_vport(esw, 0);
@@ -824,6 +1585,7 @@
 
 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
 {
+	struct esw_mc_addr *mc_promisc;
 	int i;
 
 	if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
@@ -833,9 +1595,14 @@
 	esw_info(esw->dev, "disable SRIOV: active vports(%d)\n",
 		 esw->enabled_vports);
 
+	mc_promisc = esw->mc_promisc;
+
 	for (i = 0; i < esw->total_vports; i++)
 		esw_disable_vport(esw, i);
 
+	if (mc_promisc && mc_promisc->uplink_rule)
+		mlx5_del_flow_rule(mc_promisc->uplink_rule);
+
 	esw_destroy_fdb_table(esw);
 
 	/* VPORT 0 (PF) must be enabled back with non-sriov configuration */
@@ -845,7 +1612,8 @@
 int mlx5_eswitch_init(struct mlx5_core_dev *dev)
 {
 	int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
-	int total_vports = 1 + pci_sriov_get_totalvfs(dev->pdev);
+	int total_vports = MLX5_TOTAL_VPORTS(dev);
+	struct esw_mc_addr *mc_promisc;
 	struct mlx5_eswitch *esw;
 	int vport_num;
 	int err;
@@ -874,6 +1642,13 @@
 	}
 	esw->l2_table.size = l2_table_size;
 
+	mc_promisc = kzalloc(sizeof(*mc_promisc), GFP_KERNEL);
+	if (!mc_promisc) {
+		err = -ENOMEM;
+		goto abort;
+	}
+	esw->mc_promisc = mc_promisc;
+
 	esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
 	if (!esw->work_queue) {
 		err = -ENOMEM;
@@ -887,6 +1662,8 @@
 		goto abort;
 	}
 
+	mutex_init(&esw->state_lock);
+
 	for (vport_num = 0; vport_num < total_vports; vport_num++) {
 		struct mlx5_vport *vport = &esw->vports[vport_num];
 
@@ -894,7 +1671,6 @@
 		vport->dev = dev;
 		INIT_WORK(&vport->vport_change_handler,
 			  esw_vport_change_handler);
-		spin_lock_init(&vport->lock);
 	}
 
 	esw->total_vports = total_vports;
@@ -925,6 +1701,7 @@
 	esw->dev->priv.eswitch = NULL;
 	destroy_workqueue(esw->work_queue);
 	kfree(esw->l2_table.bitmap);
+	kfree(esw->mc_promisc);
 	kfree(esw->vports);
 	kfree(esw);
 }
@@ -942,10 +1719,8 @@
 	}
 
 	vport = &esw->vports[vport_num];
-	spin_lock(&vport->lock);
 	if (vport->enabled)
 		queue_work(esw->work_queue, &vport->vport_change_handler);
-	spin_unlock(&vport->lock);
 }
 
 /* Vport Administration */
@@ -957,12 +1732,22 @@
 			       int vport, u8 mac[ETH_ALEN])
 {
 	int err = 0;
+	struct mlx5_vport *evport;
 
 	if (!ESW_ALLOWED(esw))
 		return -EPERM;
 	if (!LEGAL_VPORT(esw, vport))
 		return -EINVAL;
 
+	evport = &esw->vports[vport];
+
+	if (evport->spoofchk && !is_valid_ether_addr(mac)) {
+		mlx5_core_warn(esw->dev,
+			       "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
+			       vport);
+		return -EPERM;
+	}
+
 	err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
 	if (err) {
 		mlx5_core_warn(esw->dev,
@@ -971,6 +1756,11 @@
 		return err;
 	}
 
+	mutex_lock(&esw->state_lock);
+	if (evport->enabled)
+		err = esw_vport_ingress_config(esw, evport);
+	mutex_unlock(&esw->state_lock);
+
 	return err;
 }
 
@@ -990,6 +1780,7 @@
 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
 				  int vport, struct ifla_vf_info *ivi)
 {
+	struct mlx5_vport *evport;
 	u16 vlan;
 	u8 qos;
 
@@ -998,6 +1789,8 @@
 	if (!LEGAL_VPORT(esw, vport))
 		return -EINVAL;
 
+	evport = &esw->vports[vport];
+
 	memset(ivi, 0, sizeof(*ivi));
 	ivi->vf = vport - 1;
 
@@ -1008,7 +1801,7 @@
 	query_esw_vport_cvlan(esw->dev, vport, &vlan, &qos);
 	ivi->vlan = vlan;
 	ivi->qos = qos;
-	ivi->spoofchk = 0;
+	ivi->spoofchk = evport->spoofchk;
 
 	return 0;
 }
@@ -1016,6 +1809,8 @@
 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
 				int vport, u16 vlan, u8 qos)
 {
+	struct mlx5_vport *evport;
+	int err = 0;
 	int set = 0;
 
 	if (!ESW_ALLOWED(esw))
@@ -1026,7 +1821,72 @@
 	if (vlan || qos)
 		set = 1;
 
-	return modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set);
+	evport = &esw->vports[vport];
+
+	err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set);
+	if (err)
+		return err;
+
+	mutex_lock(&esw->state_lock);
+	evport->vlan = vlan;
+	evport->qos = qos;
+	if (evport->enabled) {
+		err = esw_vport_ingress_config(esw, evport);
+		if (err)
+			goto out;
+		err = esw_vport_egress_config(esw, evport);
+	}
+
+out:
+	mutex_unlock(&esw->state_lock);
+	return err;
+}
+
+int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
+				    int vport, bool spoofchk)
+{
+	struct mlx5_vport *evport;
+	bool pschk;
+	int err = 0;
+
+	if (!ESW_ALLOWED(esw))
+		return -EPERM;
+	if (!LEGAL_VPORT(esw, vport))
+		return -EINVAL;
+
+	evport = &esw->vports[vport];
+
+	mutex_lock(&esw->state_lock);
+	pschk = evport->spoofchk;
+	evport->spoofchk = spoofchk;
+	if (evport->enabled)
+		err = esw_vport_ingress_config(esw, evport);
+	if (err)
+		evport->spoofchk = pschk;
+	mutex_unlock(&esw->state_lock);
+
+	return err;
+}
+
+int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
+				 int vport, bool setting)
+{
+	struct mlx5_vport *evport;
+
+	if (!ESW_ALLOWED(esw))
+		return -EPERM;
+	if (!LEGAL_VPORT(esw, vport))
+		return -EINVAL;
+
+	evport = &esw->vports[vport];
+
+	mutex_lock(&esw->state_lock);
+	evport->trusted = setting;
+	if (evport->enabled)
+		esw_vport_change_handle_locked(evport);
+	mutex_unlock(&esw->state_lock);
+
+	return 0;
 }
 
 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 3416a42..fd68002 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -88,18 +88,40 @@
 	kfree(ptr);                                         \
 })
 
+struct vport_ingress {
+	struct mlx5_flow_table *acl;
+	struct mlx5_flow_group *allow_untagged_spoofchk_grp;
+	struct mlx5_flow_group *allow_spoofchk_only_grp;
+	struct mlx5_flow_group *allow_untagged_only_grp;
+	struct mlx5_flow_group *drop_grp;
+	struct mlx5_flow_rule  *allow_rule;
+	struct mlx5_flow_rule  *drop_rule;
+};
+
+struct vport_egress {
+	struct mlx5_flow_table *acl;
+	struct mlx5_flow_group *allowed_vlans_grp;
+	struct mlx5_flow_group *drop_grp;
+	struct mlx5_flow_rule  *allowed_vlan;
+	struct mlx5_flow_rule  *drop_rule;
+};
+
 struct mlx5_vport {
 	struct mlx5_core_dev    *dev;
 	int                     vport;
 	struct hlist_head       uc_list[MLX5_L2_ADDR_HASH_SIZE];
 	struct hlist_head       mc_list[MLX5_L2_ADDR_HASH_SIZE];
+	struct mlx5_flow_rule   *promisc_rule;
+	struct mlx5_flow_rule   *allmulti_rule;
 	struct work_struct      vport_change_handler;
 
-	/* This spinlock protects access to vport data, between
-	 * "esw_vport_disable" and ongoing interrupt "mlx5_eswitch_vport_event"
-	 * once vport marked as disabled new interrupts are discarded.
-	 */
-	spinlock_t              lock; /* vport events sync */
+	struct vport_ingress    ingress;
+	struct vport_egress     egress;
+
+	u16                     vlan;
+	u8                      qos;
+	bool                    spoofchk;
+	bool                    trusted;
 	bool                    enabled;
 	u16                     enabled_events;
 };
@@ -113,6 +135,8 @@
 struct mlx5_eswitch_fdb {
 	void *fdb;
 	struct mlx5_flow_group *addr_grp;
+	struct mlx5_flow_group *allmulti_grp;
+	struct mlx5_flow_group *promisc_grp;
 };
 
 struct mlx5_eswitch {
@@ -124,6 +148,11 @@
 	struct mlx5_vport       *vports;
 	int                     total_vports;
 	int                     enabled_vports;
+	/* Synchronize between vport change events
+	 * and async SRIOV admin state changes
+	 */
+	struct mutex            state_lock;
+	struct esw_mc_addr      *mc_promisc;
 };
 
 /* E-Switch API */
@@ -138,6 +167,10 @@
 				 int vport, int link_state);
 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
 				int vport, u16 vlan, u8 qos);
+int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
+				    int vport, bool spoofchk);
+int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
+				 int vport_num, bool setting);
 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
 				  int vport, struct ifla_vf_info *ivi);
 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index f46f1db..a5bb6b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -50,6 +50,10 @@
 		 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
 	MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
 	MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
+	if (ft->vport) {
+		MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
+		MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
+	}
 
 	memset(out, 0, sizeof(out));
 	return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
@@ -57,6 +61,7 @@
 }
 
 int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
+			       u16 vport,
 			       enum fs_flow_table_type type, unsigned int level,
 			       unsigned int log_size, struct mlx5_flow_table
 			       *next_ft, unsigned int *table_id)
@@ -77,6 +82,10 @@
 	MLX5_SET(create_flow_table_in, in, table_type, type);
 	MLX5_SET(create_flow_table_in, in, level, level);
 	MLX5_SET(create_flow_table_in, in, log_size, log_size);
+	if (vport) {
+		MLX5_SET(create_flow_table_in, in, vport_number, vport);
+		MLX5_SET(create_flow_table_in, in, other_vport, 1);
+	}
 
 	memset(out, 0, sizeof(out));
 	err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
@@ -101,6 +110,10 @@
 		 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
 	MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
 	MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
+	if (ft->vport) {
+		MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
+		MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
+	}
 
 	return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
 					  sizeof(out));
@@ -120,6 +133,10 @@
 		 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
 	MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
 	MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
+	if (ft->vport) {
+		MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
+		MLX5_SET(modify_flow_table_in, in, other_vport, 1);
+	}
 	MLX5_SET(modify_flow_table_in, in, modify_field_select,
 		 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
 	if (next_ft) {
@@ -148,6 +165,10 @@
 		 MLX5_CMD_OP_CREATE_FLOW_GROUP);
 	MLX5_SET(create_flow_group_in, in, table_type, ft->type);
 	MLX5_SET(create_flow_group_in, in, table_id, ft->id);
+	if (ft->vport) {
+		MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
+		MLX5_SET(create_flow_group_in, in, other_vport, 1);
+	}
 
 	err = mlx5_cmd_exec_check_status(dev, in,
 					 inlen, out,
@@ -174,6 +195,10 @@
 	MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
 	MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
 	MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
+	if (ft->vport) {
+		MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
+		MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
+	}
 
 	return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
 					  sizeof(out));
@@ -207,22 +232,29 @@
 	MLX5_SET(set_fte_in, in, table_type, ft->type);
 	MLX5_SET(set_fte_in, in, table_id,   ft->id);
 	MLX5_SET(set_fte_in, in, flow_index, fte->index);
+	if (ft->vport) {
+		MLX5_SET(set_fte_in, in, vport_number, ft->vport);
+		MLX5_SET(set_fte_in, in, other_vport, 1);
+	}
 
 	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
 	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
 	MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
 	MLX5_SET(flow_context, in_flow_context, action, fte->action);
-	MLX5_SET(flow_context, in_flow_context, destination_list_size,
-		 fte->dests_size);
 	in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
 				      match_value);
 	memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
 
+	in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
 	if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
-		in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+		int list_size = 0;
+
 		list_for_each_entry(dst, &fte->node.children, node.list) {
 			unsigned int id;
 
+			if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+				continue;
+
 			MLX5_SET(dest_format_struct, in_dests, destination_type,
 				 dst->dest_attr.type);
 			if (dst->dest_attr.type ==
@@ -233,8 +265,31 @@
 			}
 			MLX5_SET(dest_format_struct, in_dests, destination_id, id);
 			in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
+			list_size++;
 		}
+
+		MLX5_SET(flow_context, in_flow_context, destination_list_size,
+			 list_size);
 	}
+
+	if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+		int list_size = 0;
+
+		list_for_each_entry(dst, &fte->node.children, node.list) {
+			if (dst->dest_attr.type !=
+			    MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+				continue;
+
+			MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
+				 dst->dest_attr.counter->id);
+			in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
+			list_size++;
+		}
+
+		MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
+			 list_size);
+	}
+
 	memset(out, 0, sizeof(out));
 	err = mlx5_cmd_exec_check_status(dev, in, inlen, out,
 					 sizeof(out));
@@ -254,18 +309,16 @@
 int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
 			struct mlx5_flow_table *ft,
 			unsigned group_id,
+			int modify_mask,
 			struct fs_fte *fte)
 {
 	int opmod;
-	int modify_mask;
 	int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
 						flow_table_properties_nic_receive.
 						flow_modify_en);
 	if (!atomic_mod_cap)
 		return -ENOTSUPP;
 	opmod = 1;
-	modify_mask = 1 <<
-		MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST;
 
 	return	mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
 }
@@ -285,8 +338,78 @@
 	MLX5_SET(delete_fte_in, in, table_type, ft->type);
 	MLX5_SET(delete_fte_in, in, table_id, ft->id);
 	MLX5_SET(delete_fte_in, in, flow_index, index);
+	if (ft->vport) {
+		MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
+		MLX5_SET(delete_fte_in, in, other_vport, 1);
+	}
 
 	err =  mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
 
 	return err;
 }
+
+int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id)
+{
+	u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)];
+	u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)];
+	int err;
+
+	memset(in, 0, sizeof(in));
+	memset(out, 0, sizeof(out));
+
+	MLX5_SET(alloc_flow_counter_in, in, opcode,
+		 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
+
+	err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+					 sizeof(out));
+	if (err)
+		return err;
+
+	*id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
+
+	return 0;
+}
+
+int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id)
+{
+	u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)];
+	u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)];
+
+	memset(in, 0, sizeof(in));
+	memset(out, 0, sizeof(out));
+
+	MLX5_SET(dealloc_flow_counter_in, in, opcode,
+		 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
+	MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
+
+	return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+					  sizeof(out));
+}
+
+int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
+		      u64 *packets, u64 *bytes)
+{
+	u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
+		MLX5_ST_SZ_BYTES(traffic_counter)];
+	u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)];
+	void *stats;
+	int err = 0;
+
+	memset(in, 0, sizeof(in));
+	memset(out, 0, sizeof(out));
+
+	MLX5_SET(query_flow_counter_in, in, opcode,
+		 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
+	MLX5_SET(query_flow_counter_in, in, op_mod, 0);
+	MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
+
+	err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+	if (err)
+		return err;
+
+	stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
+	*packets = MLX5_GET64(traffic_counter, stats, packets);
+	*bytes = MLX5_GET64(traffic_counter, stats, octets);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 9814d47..fc4f7b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -34,6 +34,7 @@
 #define _MLX5_FS_CMD_
 
 int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
+			       u16 vport,
 			       enum fs_flow_table_type type, unsigned int level,
 			       unsigned int log_size, struct mlx5_flow_table
 			       *next_ft, unsigned int *table_id);
@@ -61,6 +62,7 @@
 int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
 			struct mlx5_flow_table *ft,
 			unsigned group_id,
+			int modify_mask,
 			struct fs_fte *fte);
 
 int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
@@ -69,4 +71,9 @@
 
 int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
 			    struct mlx5_flow_table *ft);
+
+int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id);
+int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id);
+int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
+		      u64 *packets, u64 *bytes);
 #endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 5121be4..8b5f0b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -40,18 +40,18 @@
 #define INIT_TREE_NODE_ARRAY_SIZE(...)	(sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
 					 sizeof(struct init_tree_node))
 
-#define ADD_PRIO(num_prios_val, min_level_val, max_ft_val, caps_val,\
+#define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
 		 ...) {.type = FS_TYPE_PRIO,\
 	.min_ft_level = min_level_val,\
-	.max_ft = max_ft_val,\
+	.num_levels = num_levels_val,\
 	.num_leaf_prios = num_prios_val,\
 	.caps = caps_val,\
 	.children = (struct init_tree_node[]) {__VA_ARGS__},\
 	.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
 }
 
-#define ADD_MULTIPLE_PRIO(num_prios_val, max_ft_val, ...)\
-	ADD_PRIO(num_prios_val, 0, max_ft_val, {},\
+#define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
+	ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
 		 __VA_ARGS__)\
 
 #define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
@@ -67,17 +67,20 @@
 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
 			       .caps = (long[]) {__VA_ARGS__} }
 
-#define LEFTOVERS_MAX_FT 1
+#define LEFTOVERS_NUM_LEVELS 1
 #define LEFTOVERS_NUM_PRIOS 1
-#define BY_PASS_PRIO_MAX_FT 1
-#define BY_PASS_MIN_LEVEL (KENREL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
-			   LEFTOVERS_MAX_FT)
 
-#define KERNEL_MAX_FT 3
-#define KERNEL_NUM_PRIOS 2
-#define KENREL_MIN_LEVEL 2
+#define BY_PASS_PRIO_NUM_LEVELS 1
+#define BY_PASS_MIN_LEVEL (KERNEL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
+			   LEFTOVERS_NUM_PRIOS)
 
-#define ANCHOR_MAX_FT 1
+/* Vlan, mac, ttc, aRFS */
+#define KERNEL_NIC_PRIO_NUM_LEVELS 4
+#define KERNEL_NIC_NUM_PRIOS 1
+/* One more level for tc */
+#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
+
+#define ANCHOR_NUM_LEVELS 1
 #define ANCHOR_NUM_PRIOS 1
 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
 struct node_caps {
@@ -92,7 +95,7 @@
 	int min_ft_level;
 	int num_leaf_prios;
 	int prio;
-	int max_ft;
+	int num_levels;
 } root_fs = {
 	.type = FS_TYPE_NAMESPACE,
 	.ar_size = 4,
@@ -102,17 +105,20 @@
 					  FS_CAP(flow_table_properties_nic_receive.modify_root),
 					  FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode),
 					  FS_CAP(flow_table_properties_nic_receive.flow_table_modify)),
-			 ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, BY_PASS_PRIO_MAX_FT))),
-		ADD_PRIO(0, KENREL_MIN_LEVEL, 0, {},
-			 ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NUM_PRIOS, KERNEL_MAX_FT))),
+			 ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
+						  BY_PASS_PRIO_NUM_LEVELS))),
+		ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
+			 ADD_NS(ADD_MULTIPLE_PRIO(1, 1),
+				ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
+						  KERNEL_NIC_PRIO_NUM_LEVELS))),
 		ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
 			 FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en),
 					  FS_CAP(flow_table_properties_nic_receive.modify_root),
 					  FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode),
 					  FS_CAP(flow_table_properties_nic_receive.flow_table_modify)),
-			 ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_MAX_FT))),
+			 ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
 		ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
-			 ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_MAX_FT))),
+			 ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
 	}
 };
 
@@ -222,19 +228,6 @@
 	return NULL;
 }
 
-static unsigned int find_next_free_level(struct fs_prio *prio)
-{
-	if (!list_empty(&prio->node.children)) {
-		struct mlx5_flow_table *ft;
-
-		ft = list_last_entry(&prio->node.children,
-				     struct mlx5_flow_table,
-				     node.list);
-		return ft->level + 1;
-	}
-	return prio->start_level;
-}
-
 static bool masked_memcmp(void *mask, void *val1, void *val2, size_t size)
 {
 	unsigned int i;
@@ -351,6 +344,7 @@
 	struct mlx5_flow_group *fg;
 	struct fs_fte *fte;
 	u32	*match_value;
+	int modify_mask;
 	struct mlx5_core_dev *dev = get_dev(node);
 	int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
 	int err;
@@ -374,8 +368,11 @@
 	}
 	if ((fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
 	    --fte->dests_size) {
+		modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
 		err = mlx5_cmd_update_fte(dev, ft,
-					  fg->id, fte);
+					  fg->id,
+					  modify_mask,
+					  fte);
 		if (err)
 			pr_warn("%s can't del rule fg id=%d fte_index=%d\n",
 				__func__, fg->id, fte->index);
@@ -464,7 +461,7 @@
 	return fg;
 }
 
-static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte,
+static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
 						enum fs_flow_table_type table_type)
 {
 	struct mlx5_flow_table *ft;
@@ -476,6 +473,7 @@
 	ft->level = level;
 	ft->node.type = FS_TYPE_FLOW_TABLE;
 	ft->type = table_type;
+	ft->vport = vport;
 	ft->max_fte = max_fte;
 	INIT_LIST_HEAD(&ft->fwd_rules);
 	mutex_init(&ft->lock);
@@ -615,12 +613,13 @@
 	return err;
 }
 
-static int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
-					struct mlx5_flow_destination *dest)
+int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
+				 struct mlx5_flow_destination *dest)
 {
 	struct mlx5_flow_table *ft;
 	struct mlx5_flow_group *fg;
 	struct fs_fte *fte;
+	int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
 	int err = 0;
 
 	fs_get_obj(fte, rule->node.parent);
@@ -632,7 +631,9 @@
 
 	memcpy(&rule->dest_attr, dest, sizeof(*dest));
 	err = mlx5_cmd_update_fte(get_dev(&ft->node),
-				  ft, fg->id, fte);
+				  ft, fg->id,
+				  modify_mask,
+				  fte);
 	unlock_ref_node(&fte->node);
 
 	return err;
@@ -693,9 +694,23 @@
 	return err;
 }
 
-struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
-					       int prio,
-					       int max_fte)
+static void list_add_flow_table(struct mlx5_flow_table *ft,
+				struct fs_prio *prio)
+{
+	struct list_head *prev = &prio->node.children;
+	struct mlx5_flow_table *iter;
+
+	fs_for_each_ft(iter, prio) {
+		if (iter->level > ft->level)
+			break;
+		prev = &iter->node.list;
+	}
+	list_add(&ft->node.list, prev);
+}
+
+static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+							u16 vport, int prio,
+							int max_fte, u32 level)
 {
 	struct mlx5_flow_table *next_ft = NULL;
 	struct mlx5_flow_table *ft;
@@ -716,12 +731,16 @@
 		err = -EINVAL;
 		goto unlock_root;
 	}
-	if (fs_prio->num_ft == fs_prio->max_ft) {
+	if (level >= fs_prio->num_levels) {
 		err = -ENOSPC;
 		goto unlock_root;
 	}
-
-	ft = alloc_flow_table(find_next_free_level(fs_prio),
+	/* The level is related to the
+	 * priority level range.
+	 */
+	level += fs_prio->start_level;
+	ft = alloc_flow_table(level,
+			      vport,
 			      roundup_pow_of_two(max_fte),
 			      root->table_type);
 	if (!ft) {
@@ -732,7 +751,7 @@
 	tree_init_node(&ft->node, 1, del_flow_table);
 	log_table_sz = ilog2(ft->max_fte);
 	next_ft = find_next_chained_ft(fs_prio);
-	err = mlx5_cmd_create_flow_table(root->dev, ft->type, ft->level,
+	err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->type, ft->level,
 					 log_table_sz, next_ft, &ft->id);
 	if (err)
 		goto free_ft;
@@ -742,7 +761,7 @@
 		goto destroy_ft;
 	lock_ref_node(&fs_prio->node);
 	tree_add_node(&ft->node, &fs_prio->node);
-	list_add_tail(&ft->node.list, &fs_prio->node.children);
+	list_add_flow_table(ft, fs_prio);
 	fs_prio->num_ft++;
 	unlock_ref_node(&fs_prio->node);
 	mutex_unlock(&root->chain_lock);
@@ -756,17 +775,32 @@
 	return ERR_PTR(err);
 }
 
+struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+					       int prio, int max_fte,
+					       u32 level)
+{
+	return __mlx5_create_flow_table(ns, 0, prio, max_fte, level);
+}
+
+struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
+						     int prio, int max_fte,
+						     u32 level, u16 vport)
+{
+	return __mlx5_create_flow_table(ns, vport, prio, max_fte, level);
+}
+
 struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
 							    int prio,
 							    int num_flow_table_entries,
-							    int max_num_groups)
+							    int max_num_groups,
+							    u32 level)
 {
 	struct mlx5_flow_table *ft;
 
 	if (max_num_groups > num_flow_table_entries)
 		return ERR_PTR(-EINVAL);
 
-	ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries);
+	ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries, level);
 	if (IS_ERR(ft))
 		return ft;
 
@@ -850,6 +884,7 @@
 {
 	struct mlx5_flow_table *ft;
 	struct mlx5_flow_rule *rule;
+	int modify_mask = 0;
 	int err;
 
 	rule = alloc_rule(dest);
@@ -865,14 +900,20 @@
 		list_add(&rule->node.list, &fte->node.children);
 	else
 		list_add_tail(&rule->node.list, &fte->node.children);
-	if (dest)
+	if (dest) {
 		fte->dests_size++;
+
+		modify_mask |= dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ?
+			BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS) :
+			BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
+	}
+
 	if (fte->dests_size == 1 || !dest)
 		err = mlx5_cmd_create_fte(get_dev(&ft->node),
 					  ft, fg->id, fte);
 	else
 		err = mlx5_cmd_update_fte(get_dev(&ft->node),
-					  ft, fg->id, fte);
+					  ft, fg->id, modify_mask, fte);
 	if (err)
 		goto free_rule;
 
@@ -1065,31 +1106,48 @@
 	return rule;
 }
 
-static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft,
-						  u8 match_criteria_enable,
-						  u32 *match_criteria,
-						  u32 *match_value,
-						  u8 action,
-						  u32 flow_tag,
-						  struct mlx5_flow_destination *dest)
+struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule)
 {
-	struct mlx5_flow_rule *rule;
-	struct mlx5_flow_group *g;
+	struct mlx5_flow_rule *dst;
+	struct fs_fte *fte;
 
-	g = create_autogroup(ft, match_criteria_enable, match_criteria);
-	if (IS_ERR(g))
-		return (void *)g;
+	fs_get_obj(fte, rule->node.parent);
 
-	rule = add_rule_fg(g, match_value,
-			   action, flow_tag, dest);
-	if (IS_ERR(rule)) {
-		/* Remove assumes refcount > 0 and autogroup creates a group
-		 * with a refcount = 0.
-		 */
-		tree_get_node(&g->node);
-		tree_remove_node(&g->node);
+	fs_for_each_dst(dst, fte) {
+		if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+			return dst->dest_attr.counter;
 	}
-	return rule;
+
+	return NULL;
+}
+
+static bool counter_is_valid(struct mlx5_fc *counter, u32 action)
+{
+	if (!(action & MLX5_FLOW_CONTEXT_ACTION_COUNT))
+		return !counter;
+
+	if (!counter)
+		return false;
+
+	/* Hardware support counter for a drop action only */
+	return action == (MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT);
+}
+
+static bool dest_is_valid(struct mlx5_flow_destination *dest,
+			  u32 action,
+			  struct mlx5_flow_table *ft)
+{
+	if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
+		return counter_is_valid(dest->counter, action);
+
+	if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+		return true;
+
+	if (!dest || ((dest->type ==
+	    MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
+	    (dest->ft->level <= ft->level)))
+		return false;
+	return true;
 }
 
 static struct mlx5_flow_rule *
@@ -1104,7 +1162,7 @@
 	struct mlx5_flow_group *g;
 	struct mlx5_flow_rule *rule;
 
-	if ((action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && !dest)
+	if (!dest_is_valid(dest, action, ft))
 		return ERR_PTR(-EINVAL);
 
 	nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT);
@@ -1119,8 +1177,23 @@
 				goto unlock;
 		}
 
-	rule = add_rule_to_auto_fg(ft, match_criteria_enable, match_criteria,
-				   match_value, action, flow_tag, dest);
+	g = create_autogroup(ft, match_criteria_enable, match_criteria);
+	if (IS_ERR(g)) {
+		rule = (void *)g;
+		goto unlock;
+	}
+
+	rule = add_rule_fg(g, match_value,
+			   action, flow_tag, dest);
+	if (IS_ERR(rule)) {
+		/* Remove assumes refcount > 0 and autogroup creates a group
+		 * with a refcount = 0.
+		 */
+		unlock_ref_node(&ft->node);
+		tree_get_node(&g->node);
+		tree_remove_node(&g->node);
+		return rule;
+	}
 unlock:
 	unlock_ref_node(&ft->node);
 	return rule;
@@ -1288,7 +1361,7 @@
 {
 	struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
 	int prio;
-	static struct fs_prio *fs_prio;
+	struct fs_prio *fs_prio;
 	struct mlx5_flow_namespace *ns;
 
 	if (!root_ns)
@@ -1306,6 +1379,16 @@
 			return &dev->priv.fdb_root_ns->ns;
 		else
 			return NULL;
+	case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
+		if (dev->priv.esw_egress_root_ns)
+			return &dev->priv.esw_egress_root_ns->ns;
+		else
+			return NULL;
+	case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
+		if (dev->priv.esw_ingress_root_ns)
+			return &dev->priv.esw_ingress_root_ns->ns;
+		else
+			return NULL;
 	default:
 		return NULL;
 	}
@@ -1323,7 +1406,7 @@
 EXPORT_SYMBOL(mlx5_get_flow_namespace);
 
 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
-				      unsigned prio, int max_ft)
+				      unsigned int prio, int num_levels)
 {
 	struct fs_prio *fs_prio;
 
@@ -1334,7 +1417,7 @@
 	fs_prio->node.type = FS_TYPE_PRIO;
 	tree_init_node(&fs_prio->node, 1, NULL);
 	tree_add_node(&fs_prio->node, &ns->node);
-	fs_prio->max_ft = max_ft;
+	fs_prio->num_levels = num_levels;
 	fs_prio->prio = prio;
 	list_add_tail(&fs_prio->node.list, &ns->node.children);
 
@@ -1365,14 +1448,14 @@
 	return ns;
 }
 
-static int create_leaf_prios(struct mlx5_flow_namespace *ns, struct init_tree_node
-			     *prio_metadata)
+static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
+			     struct init_tree_node *prio_metadata)
 {
 	struct fs_prio *fs_prio;
 	int i;
 
 	for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
-		fs_prio = fs_create_prio(ns, i, prio_metadata->max_ft);
+		fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
 		if (IS_ERR(fs_prio))
 			return PTR_ERR(fs_prio);
 	}
@@ -1399,7 +1482,7 @@
 				    struct init_tree_node *init_node,
 				    struct fs_node *fs_parent_node,
 				    struct init_tree_node *init_parent_node,
-				    int index)
+				    int prio)
 {
 	int max_ft_level = MLX5_CAP_FLOWTABLE(dev,
 					      flow_table_properties_nic_receive.
@@ -1417,8 +1500,8 @@
 
 		fs_get_obj(fs_ns, fs_parent_node);
 		if (init_node->num_leaf_prios)
-			return create_leaf_prios(fs_ns, init_node);
-		fs_prio = fs_create_prio(fs_ns, index, init_node->max_ft);
+			return create_leaf_prios(fs_ns, prio, init_node);
+		fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
 		if (IS_ERR(fs_prio))
 			return PTR_ERR(fs_prio);
 		base = &fs_prio->node;
@@ -1431,11 +1514,16 @@
 	} else {
 		return -EINVAL;
 	}
+	prio = 0;
 	for (i = 0; i < init_node->ar_size; i++) {
 		err = init_root_tree_recursive(dev, &init_node->children[i],
-					       base, init_node, i);
+					       base, init_node, prio);
 		if (err)
 			return err;
+		if (init_node->children[i].type == FS_TYPE_PRIO &&
+		    init_node->children[i].num_leaf_prios) {
+			prio += init_node->children[i].num_leaf_prios;
+		}
 	}
 
 	return 0;
@@ -1491,9 +1579,9 @@
 	struct fs_prio *prio;
 
 	fs_for_each_prio(prio, ns) {
-		 /* This updates prio start_level and max_ft */
+		 /* This updates prio start_level and num_levels */
 		set_prio_attrs_in_prio(prio, acc_level);
-		acc_level += prio->max_ft;
+		acc_level += prio->num_levels;
 	}
 	return acc_level;
 }
@@ -1505,11 +1593,11 @@
 
 	prio->start_level = acc_level;
 	fs_for_each_ns(ns, prio)
-		/* This updates start_level and max_ft of ns's priority descendants */
+		/* This updates start_level and num_levels of ns's priority descendants */
 		acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
-	if (!prio->max_ft)
-		prio->max_ft = acc_level_ns - prio->start_level;
-	WARN_ON(prio->max_ft < acc_level_ns - prio->start_level);
+	if (!prio->num_levels)
+		prio->num_levels = acc_level_ns - prio->start_level;
+	WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
 }
 
 static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
@@ -1520,12 +1608,13 @@
 
 	fs_for_each_prio(prio, ns) {
 		set_prio_attrs_in_prio(prio, start_level);
-		start_level += prio->max_ft;
+		start_level += prio->num_levels;
 	}
 }
 
 #define ANCHOR_PRIO 0
 #define ANCHOR_SIZE 1
+#define ANCHOR_LEVEL 0
 static int create_anchor_flow_table(struct mlx5_core_dev
 							*dev)
 {
@@ -1535,7 +1624,7 @@
 	ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ANCHOR);
 	if (!ns)
 		return -EINVAL;
-	ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE);
+	ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL);
 	if (IS_ERR(ft)) {
 		mlx5_core_err(dev, "Failed to create last anchor flow table");
 		return PTR_ERR(ft);
@@ -1680,6 +1769,9 @@
 {
 	cleanup_root_ns(dev);
 	cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
+	cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns);
+	cleanup_single_prio_root_ns(dev, dev->priv.esw_ingress_root_ns);
+	mlx5_cleanup_fc_stats(dev);
 }
 
 static int init_fdb_root_ns(struct mlx5_core_dev *dev)
@@ -1700,20 +1792,69 @@
 	}
 }
 
+static int init_egress_acl_root_ns(struct mlx5_core_dev *dev)
+{
+	struct fs_prio *prio;
+
+	dev->priv.esw_egress_root_ns = create_root_ns(dev, FS_FT_ESW_EGRESS_ACL);
+	if (!dev->priv.esw_egress_root_ns)
+		return -ENOMEM;
+
+	/* create 1 prio*/
+	prio = fs_create_prio(&dev->priv.esw_egress_root_ns->ns, 0, MLX5_TOTAL_VPORTS(dev));
+	if (IS_ERR(prio))
+		return PTR_ERR(prio);
+	else
+		return 0;
+}
+
+static int init_ingress_acl_root_ns(struct mlx5_core_dev *dev)
+{
+	struct fs_prio *prio;
+
+	dev->priv.esw_ingress_root_ns = create_root_ns(dev, FS_FT_ESW_INGRESS_ACL);
+	if (!dev->priv.esw_ingress_root_ns)
+		return -ENOMEM;
+
+	/* create 1 prio*/
+	prio = fs_create_prio(&dev->priv.esw_ingress_root_ns->ns, 0, MLX5_TOTAL_VPORTS(dev));
+	if (IS_ERR(prio))
+		return PTR_ERR(prio);
+	else
+		return 0;
+}
+
 int mlx5_init_fs(struct mlx5_core_dev *dev)
 {
 	int err = 0;
 
+	err = mlx5_init_fc_stats(dev);
+	if (err)
+		return err;
+
 	if (MLX5_CAP_GEN(dev, nic_flow_table)) {
 		err = init_root_ns(dev);
 		if (err)
-			return err;
+			goto err;
 	}
 	if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
 		err = init_fdb_root_ns(dev);
 		if (err)
-			cleanup_root_ns(dev);
+			goto err;
+	}
+	if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
+		err = init_egress_acl_root_ns(dev);
+		if (err)
+			goto err;
+	}
+	if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
+		err = init_ingress_acl_root_ns(dev);
+		if (err)
+			goto err;
 	}
 
+	return 0;
+err:
+	mlx5_cleanup_fs(dev);
 	return err;
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index f37a624..aa41a73 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -45,8 +45,10 @@
 };
 
 enum fs_flow_table_type {
-	FS_FT_NIC_RX	 = 0x0,
-	FS_FT_FDB	 = 0X4,
+	FS_FT_NIC_RX          = 0x0,
+	FS_FT_ESW_EGRESS_ACL  = 0x2,
+	FS_FT_ESW_INGRESS_ACL = 0x3,
+	FS_FT_FDB             = 0X4,
 };
 
 enum fs_fte_status {
@@ -79,6 +81,7 @@
 struct mlx5_flow_table {
 	struct fs_node			node;
 	u32				id;
+	u16				vport;
 	unsigned int			max_fte;
 	unsigned int			level;
 	enum fs_flow_table_type		type;
@@ -93,6 +96,28 @@
 	struct list_head		fwd_rules;
 };
 
+struct mlx5_fc_cache {
+	u64 packets;
+	u64 bytes;
+	u64 lastuse;
+};
+
+struct mlx5_fc {
+	struct list_head list;
+
+	/* last{packets,bytes} members are used when calculating the delta since
+	 * last reading
+	 */
+	u64 lastpackets;
+	u64 lastbytes;
+
+	u16 id;
+	bool deleted;
+	bool aging;
+
+	struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
+};
+
 /* Type of children is mlx5_flow_rule */
 struct fs_fte {
 	struct fs_node			node;
@@ -102,12 +127,13 @@
 	u32				index;
 	u32				action;
 	enum fs_fte_status		status;
+	struct mlx5_fc			*counter;
 };
 
 /* Type of children is mlx5_flow_table/namespace */
 struct fs_prio {
 	struct fs_node			node;
-	unsigned int			max_ft;
+	unsigned int			num_levels;
 	unsigned int			start_level;
 	unsigned int			prio;
 	unsigned int			num_ft;
@@ -143,6 +169,9 @@
 	struct mutex			chain_lock;
 };
 
+int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
+void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev);
+
 int mlx5_init_fs(struct mlx5_core_dev *dev);
 void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
new file mode 100644
index 0000000..164dc37
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/fs.h>
+#include "mlx5_core.h"
+#include "fs_core.h"
+#include "fs_cmd.h"
+
+#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
+
+/* locking scheme:
+ *
+ * It is the responsibility of the user to prevent concurrent calls or bad
+ * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference
+ * to struct mlx5_fc.
+ * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a
+ * dump (access to struct mlx5_fc) after a counter is destroyed.
+ *
+ * access to counter list:
+ * - create (user context)
+ *   - mlx5_fc_create() only adds to an addlist to be used by
+ *     mlx5_fc_stats_query_work(). addlist is protected by a spinlock.
+ *   - spawn thread to do the actual destroy
+ *
+ * - destroy (user context)
+ *   - mark a counter as deleted
+ *   - spawn thread to do the actual del
+ *
+ * - dump (user context)
+ *   user should not call dump after destroy
+ *
+ * - query (single thread workqueue context)
+ *   destroy/dump - no conflict (see destroy)
+ *   query/dump - packets and bytes might be inconsistent (since update is not
+ *                atomic)
+ *   query/create - no conflict (see create)
+ *   since every create/destroy spawn the work, only after necessary time has
+ *   elapsed, the thread will actually query the hardware.
+ */
+
+static void mlx5_fc_stats_work(struct work_struct *work)
+{
+	struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
+						 priv.fc_stats.work.work);
+	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+	unsigned long now = jiffies;
+	struct mlx5_fc *counter;
+	struct mlx5_fc *tmp;
+	int err = 0;
+
+	spin_lock(&fc_stats->addlist_lock);
+
+	list_splice_tail_init(&fc_stats->addlist, &fc_stats->list);
+
+	if (!list_empty(&fc_stats->list))
+		queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD);
+
+	spin_unlock(&fc_stats->addlist_lock);
+
+	list_for_each_entry_safe(counter, tmp, &fc_stats->list, list) {
+		struct mlx5_fc_cache *c = &counter->cache;
+		u64 packets;
+		u64 bytes;
+
+		if (counter->deleted) {
+			list_del(&counter->list);
+
+			mlx5_cmd_fc_free(dev, counter->id);
+
+			kfree(counter);
+			continue;
+		}
+
+		if (time_before(now, fc_stats->next_query))
+			continue;
+
+		err = mlx5_cmd_fc_query(dev, counter->id, &packets, &bytes);
+		if (err) {
+			pr_err("Error querying stats for counter id %d\n",
+			       counter->id);
+			continue;
+		}
+
+		if (packets == c->packets)
+			continue;
+
+		c->lastuse = jiffies;
+		c->packets = packets;
+		c->bytes   = bytes;
+	}
+
+	if (time_after_eq(now, fc_stats->next_query))
+		fc_stats->next_query = now + MLX5_FC_STATS_PERIOD;
+}
+
+struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
+{
+	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+	struct mlx5_fc *counter;
+	int err;
+
+	counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+	if (!counter)
+		return ERR_PTR(-ENOMEM);
+
+	err = mlx5_cmd_fc_alloc(dev, &counter->id);
+	if (err)
+		goto err_out;
+
+	if (aging) {
+		counter->aging = true;
+
+		spin_lock(&fc_stats->addlist_lock);
+		list_add(&counter->list, &fc_stats->addlist);
+		spin_unlock(&fc_stats->addlist_lock);
+
+		mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
+	}
+
+	return counter;
+
+err_out:
+	kfree(counter);
+
+	return ERR_PTR(err);
+}
+
+void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
+{
+	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+
+	if (!counter)
+		return;
+
+	if (counter->aging) {
+		counter->deleted = true;
+		mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
+		return;
+	}
+
+	mlx5_cmd_fc_free(dev, counter->id);
+	kfree(counter);
+}
+
+int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
+{
+	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+
+	INIT_LIST_HEAD(&fc_stats->list);
+	INIT_LIST_HEAD(&fc_stats->addlist);
+	spin_lock_init(&fc_stats->addlist_lock);
+
+	fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
+	if (!fc_stats->wq)
+		return -ENOMEM;
+
+	INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
+
+	return 0;
+}
+
+void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
+{
+	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+	struct mlx5_fc *counter;
+	struct mlx5_fc *tmp;
+
+	cancel_delayed_work_sync(&dev->priv.fc_stats.work);
+	destroy_workqueue(dev->priv.fc_stats.wq);
+	dev->priv.fc_stats.wq = NULL;
+
+	list_splice_tail_init(&fc_stats->addlist, &fc_stats->list);
+
+	list_for_each_entry_safe(counter, tmp, &fc_stats->list, list) {
+		list_del(&counter->list);
+
+		mlx5_cmd_fc_free(dev, counter->id);
+
+		kfree(counter);
+	}
+}
+
+void mlx5_fc_query_cached(struct mlx5_fc *counter,
+			  u64 *bytes, u64 *packets, u64 *lastuse)
+{
+	struct mlx5_fc_cache c;
+
+	c = counter->cache;
+
+	*bytes = c.bytes - counter->lastbytes;
+	*packets = c.packets - counter->lastpackets;
+	*lastuse = c.lastuse;
+
+	counter->lastbytes = c.bytes;
+	counter->lastpackets = c.packets;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index f5deb64..42d16b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -176,11 +176,11 @@
 	case MLX5_HEALTH_SYNDR_EQ_ERR:
 		return "EQ error";
 	case MLX5_HEALTH_SYNDR_EQ_INV:
-		return "Invalid EQ refrenced";
+		return "Invalid EQ referenced";
 	case MLX5_HEALTH_SYNDR_FFSER_ERR:
 		return "FFSER error";
 	case MLX5_HEALTH_SYNDR_HIGH_TEMP:
-		return "High temprature";
+		return "High temperature";
 	default:
 		return "unrecognized error";
 	}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 3f3b2fa..6feef7f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -48,6 +48,9 @@
 #include <linux/kmod.h>
 #include <linux/delay.h>
 #include <linux/mlx5/mlx5_ifc.h>
+#ifdef CONFIG_RFS_ACCEL
+#include <linux/cpu_rmap.h>
+#endif
 #include "mlx5_core.h"
 #include "fs_core.h"
 #ifdef CONFIG_MLX5_CORE_EN
@@ -665,6 +668,12 @@
 	struct mlx5_eq_table *table = &dev->priv.eq_table;
 	struct mlx5_eq *eq, *n;
 
+#ifdef CONFIG_RFS_ACCEL
+	if (dev->rmap) {
+		free_irq_cpu_rmap(dev->rmap);
+		dev->rmap = NULL;
+	}
+#endif
 	spin_lock(&table->lock);
 	list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
 		list_del(&eq->list);
@@ -691,6 +700,11 @@
 	INIT_LIST_HEAD(&table->comp_eqs_list);
 	ncomp_vec = table->num_comp_vectors;
 	nent = MLX5_COMP_EQ_SIZE;
+#ifdef CONFIG_RFS_ACCEL
+	dev->rmap = alloc_irq_cpu_rmap(ncomp_vec);
+	if (!dev->rmap)
+		return -ENOMEM;
+#endif
 	for (i = 0; i < ncomp_vec; i++) {
 		eq = kzalloc(sizeof(*eq), GFP_KERNEL);
 		if (!eq) {
@@ -698,6 +712,10 @@
 			goto clean;
 		}
 
+#ifdef CONFIG_RFS_ACCEL
+		irq_cpu_rmap_add(dev->rmap,
+				 dev->priv.msix_arr[i + MLX5_EQ_VEC_COMP_BASE].vector);
+#endif
 		snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
 		err = mlx5_create_map_eq(dev, eq,
 					 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
@@ -966,7 +984,7 @@
 	int err;
 
 	mutex_lock(&dev->intf_state_mutex);
-	if (dev->interface_state == MLX5_INTERFACE_STATE_UP) {
+	if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
 		dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
 			 __func__);
 		goto out;
@@ -1133,7 +1151,8 @@
 	if (err)
 		pr_info("failed request module on %s\n", MLX5_IB_MOD);
 
-	dev->interface_state = MLX5_INTERFACE_STATE_UP;
+	clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
+	set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
 out:
 	mutex_unlock(&dev->intf_state_mutex);
 
@@ -1207,7 +1226,7 @@
 	}
 
 	mutex_lock(&dev->intf_state_mutex);
-	if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) {
+	if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
 		dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
 			 __func__);
 		goto out;
@@ -1241,7 +1260,8 @@
 	mlx5_cmd_cleanup(dev);
 
 out:
-	dev->interface_state = MLX5_INTERFACE_STATE_DOWN;
+	clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
+	set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
 	mutex_unlock(&dev->intf_state_mutex);
 	return err;
 }
@@ -1452,6 +1472,18 @@
 	.resume		= mlx5_pci_resume
 };
 
+static void shutdown(struct pci_dev *pdev)
+{
+	struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
+	struct mlx5_priv *priv = &dev->priv;
+
+	dev_info(&pdev->dev, "Shutdown was called\n");
+	/* Notify mlx5 clients that the kernel is being shut down */
+	set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
+	mlx5_unload_one(dev, priv);
+	mlx5_pci_disable_device(dev);
+}
+
 static const struct pci_device_id mlx5_core_pci_table[] = {
 	{ PCI_VDEVICE(MELLANOX, 0x1011) },			/* Connect-IB */
 	{ PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF},	/* Connect-IB VF */
@@ -1459,6 +1491,8 @@
 	{ PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF},	/* ConnectX-4 VF */
 	{ PCI_VDEVICE(MELLANOX, 0x1015) },			/* ConnectX-4LX */
 	{ PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF},	/* ConnectX-4LX VF */
+	{ PCI_VDEVICE(MELLANOX, 0x1017) },			/* ConnectX-5 */
+	{ PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF},	/* ConnectX-5 VF */
 	{ 0, }
 };
 
@@ -1469,6 +1503,7 @@
 	.id_table       = mlx5_core_pci_table,
 	.probe          = init_one,
 	.remove         = remove_one,
+	.shutdown	= shutdown,
 	.err_handler	= &mlx5_err_handler,
 	.sriov_configure   = mlx5_core_sriov_configure,
 };
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 0b0b226..482604b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -42,6 +42,8 @@
 #define DRIVER_VERSION "3.0-1"
 #define DRIVER_RELDATE  "January 2015"
 
+#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev))
+
 extern int mlx5_core_debug_mask;
 
 #define mlx5_core_dbg(__dev, format, ...)				\
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index ae378c5..3e35611 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -115,6 +115,19 @@
 }
 EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
 
+int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration)
+{
+	u32 out[MLX5_ST_SZ_DW(mlcr_reg)];
+	u32 in[MLX5_ST_SZ_DW(mlcr_reg)];
+
+	memset(in, 0, sizeof(in));
+	MLX5_SET(mlcr_reg, in, local_port, 1);
+	MLX5_SET(mlcr_reg, in, beacon_duration, beacon_duration);
+
+	return mlx5_core_access_reg(dev, in, sizeof(in), out,
+				    sizeof(out), MLX5_REG_MLCR, 0, 1);
+}
+
 int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
 			      u32 *proto_cap, int proto_mask)
 {
@@ -247,8 +260,8 @@
 }
 EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
 
-static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
-				int *max_mtu, int *oper_mtu, u8 port)
+static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
+				u16 *max_mtu, u16 *oper_mtu, u8 port)
 {
 	u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
 	u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
@@ -268,7 +281,7 @@
 		*admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
 }
 
-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
 {
 	u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
 	u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
@@ -283,20 +296,96 @@
 }
 EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
 
-void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
 			     u8 port)
 {
 	mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
 }
 EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
 
-void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
 			      u8 port)
 {
 	mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
 }
 EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
 
+static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
+{
+	u32 out[MLX5_ST_SZ_DW(pmlp_reg)];
+	u32 in[MLX5_ST_SZ_DW(pmlp_reg)];
+	int module_mapping;
+	int err;
+
+	memset(in, 0, sizeof(in));
+
+	MLX5_SET(pmlp_reg, in, local_port, 1);
+
+	err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+				   MLX5_REG_PMLP, 0, 0);
+	if (err)
+		return err;
+
+	module_mapping = MLX5_GET(pmlp_reg, out, lane0_module_mapping);
+	*module_num = module_mapping & MLX5_EEPROM_IDENTIFIER_BYTE_MASK;
+
+	return 0;
+}
+
+int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
+			     u16 offset, u16 size, u8 *data)
+{
+	u32 out[MLX5_ST_SZ_DW(mcia_reg)];
+	u32 in[MLX5_ST_SZ_DW(mcia_reg)];
+	int module_num;
+	u16 i2c_addr;
+	int status;
+	int err;
+	void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
+
+	err = mlx5_query_module_num(dev, &module_num);
+	if (err)
+		return err;
+
+	memset(in, 0, sizeof(in));
+	size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
+
+	if (offset < MLX5_EEPROM_PAGE_LENGTH &&
+	    offset + size > MLX5_EEPROM_PAGE_LENGTH)
+		/* Cross pages read, read until offset 256 in low page */
+		size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
+
+	i2c_addr = MLX5_I2C_ADDR_LOW;
+	if (offset >= MLX5_EEPROM_PAGE_LENGTH) {
+		i2c_addr = MLX5_I2C_ADDR_HIGH;
+		offset -= MLX5_EEPROM_PAGE_LENGTH;
+	}
+
+	MLX5_SET(mcia_reg, in, l, 0);
+	MLX5_SET(mcia_reg, in, module, module_num);
+	MLX5_SET(mcia_reg, in, i2c_device_address, i2c_addr);
+	MLX5_SET(mcia_reg, in, page_number, 0);
+	MLX5_SET(mcia_reg, in, device_address, offset);
+	MLX5_SET(mcia_reg, in, size, size);
+
+	err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+				   sizeof(out), MLX5_REG_MCIA, 0, 0);
+	if (err)
+		return err;
+
+	status = MLX5_GET(mcia_reg, out, status);
+	if (status) {
+		mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
+			      status);
+		return -EIO;
+	}
+
+	memcpy(data, ptr, size);
+
+	return size;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom);
+
 static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
 				int pvlc_size,  u8 local_port)
 {
@@ -607,3 +696,52 @@
 	return err;
 }
 EXPORT_SYMBOL_GPL(mlx5_query_port_wol);
+
+static int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out,
+				  int outlen)
+{
+	u32 in[MLX5_ST_SZ_DW(pcmr_reg)];
+
+	memset(in, 0, sizeof(in));
+	MLX5_SET(pcmr_reg, in, local_port, 1);
+
+	return mlx5_core_access_reg(mdev, in, sizeof(in), out,
+				    outlen, MLX5_REG_PCMR, 0, 0);
+}
+
+static int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen)
+{
+	u32 out[MLX5_ST_SZ_DW(pcmr_reg)];
+
+	return mlx5_core_access_reg(mdev, in, inlen, out,
+				    sizeof(out), MLX5_REG_PCMR, 0, 1);
+}
+
+int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable)
+{
+	u32 in[MLX5_ST_SZ_DW(pcmr_reg)];
+
+	memset(in, 0, sizeof(in));
+	MLX5_SET(pcmr_reg, in, local_port, 1);
+	MLX5_SET(pcmr_reg, in, fcs_chk, enable);
+
+	return mlx5_set_ports_check(mdev, in, sizeof(in));
+}
+
+void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
+			 bool *enabled)
+{
+	u32 out[MLX5_ST_SZ_DW(pcmr_reg)];
+	/* Default values for FW which do not support MLX5_REG_PCMR */
+	*supported = false;
+	*enabled = true;
+
+	if (!MLX5_CAP_GEN(mdev, ports_check))
+		return;
+
+	if (mlx5_query_ports_check(mdev, out, sizeof(out)))
+		return;
+
+	*supported = !!(MLX5_GET(pcmr_reg, out, fcs_cap));
+	*enabled = !!(MLX5_GET(pcmr_reg, out, fcs_chk));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index def2893..b720a27 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -538,3 +538,71 @@
 	mlx5_core_destroy_sq(dev, sq->qpn);
 }
 EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
+
+int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id)
+{
+	u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)];
+	u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)];
+	int err;
+
+	memset(in, 0, sizeof(in));
+	memset(out, 0, sizeof(out));
+
+	MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
+	err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+	if (!err)
+		*counter_id = MLX5_GET(alloc_q_counter_out, out,
+				       counter_set_id);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
+
+int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id)
+{
+	u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)];
+	u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)];
+
+	memset(in, 0, sizeof(in));
+	memset(out, 0, sizeof(out));
+
+	MLX5_SET(dealloc_q_counter_in, in, opcode,
+		 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+	MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id);
+	return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+					  sizeof(out));
+}
+EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter);
+
+int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
+			      int reset, void *out, int out_size)
+{
+	u32 in[MLX5_ST_SZ_DW(query_q_counter_in)];
+
+	memset(in, 0, sizeof(in));
+
+	MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
+	MLX5_SET(query_q_counter_in, in, clear, reset);
+	MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id);
+	return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_size);
+}
+EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
+
+int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id,
+				  u32 *out_of_buffer)
+{
+	int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
+	void *out;
+	int err;
+
+	out = mlx5_vzalloc(outlen);
+	if (!out)
+		return -ENOMEM;
+
+	err = mlx5_core_query_q_counter(dev, counter_id, 0, out, outlen);
+	if (!err)
+		*out_of_buffer = MLX5_GET(query_q_counter_out, out,
+					  out_of_buffer);
+
+	kfree(out);
+	return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 7b24386..d6a3f41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -140,7 +140,7 @@
 	struct mlx5_core_sriov *sriov = &dev->priv.sriov;
 	int err;
 
-	mlx5_core_dbg(dev, "requsted num_vfs %d\n", num_vfs);
+	mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs);
 	if (!mlx5_core_is_pf(dev))
 		return -EPERM;
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 8ba080e..5ff8af4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -269,8 +269,10 @@
 
 void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
 {
-	iounmap(uar->map);
-	iounmap(uar->bf_map);
+	if (uar->map)
+		iounmap(uar->map);
+	else
+		iounmap(uar->bf_map);
 	mlx5_cmd_free_uar(mdev, uar->index);
 }
 EXPORT_SYMBOL(mlx5_unmap_free_uar);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index bd51840..b69dadc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -196,6 +196,46 @@
 }
 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
 
+int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
+{
+	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+	u32 *out;
+	int err;
+
+	out = mlx5_vzalloc(outlen);
+	if (!out)
+		return -ENOMEM;
+
+	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+	if (!err)
+		*mtu = MLX5_GET(query_nic_vport_context_out, out,
+				nic_vport_context.mtu);
+
+	kvfree(out);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
+
+int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
+{
+	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+	void *in;
+	int err;
+
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
+	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
+
+	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+	kvfree(in);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
+
 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
 				  u32 vport,
 				  enum mlx5_list_type list_type,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
index 9f10df2..f2fd1ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
@@ -95,21 +95,22 @@
 	return vxlan;
 }
 
-int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port)
+static void mlx5e_vxlan_add_port(struct work_struct *work)
 {
+	struct mlx5e_vxlan_work *vxlan_work =
+		container_of(work, struct mlx5e_vxlan_work, work);
+	struct mlx5e_priv *priv = vxlan_work->priv;
 	struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
+	u16 port = vxlan_work->port;
 	struct mlx5e_vxlan *vxlan;
 	int err;
 
-	err = mlx5e_vxlan_core_add_port_cmd(priv->mdev, port);
-	if (err)
-		return err;
+	if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
+		goto free_work;
 
 	vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
-	if (!vxlan) {
-		err = -ENOMEM;
+	if (!vxlan)
 		goto err_delete_port;
-	}
 
 	vxlan->udp_port = port;
 
@@ -119,13 +120,14 @@
 	if (err)
 		goto err_free;
 
-	return 0;
+	goto free_work;
 
 err_free:
 	kfree(vxlan);
 err_delete_port:
 	mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
-	return err;
+free_work:
+	kfree(vxlan_work);
 }
 
 static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
@@ -145,12 +147,36 @@
 	kfree(vxlan);
 }
 
-void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port)
+static void mlx5e_vxlan_del_port(struct work_struct *work)
 {
-	if (!mlx5e_vxlan_lookup_port(priv, port))
-		return;
+	struct mlx5e_vxlan_work *vxlan_work =
+		container_of(work, struct mlx5e_vxlan_work, work);
+	struct mlx5e_priv *priv = vxlan_work->priv;
+	u16 port = vxlan_work->port;
 
 	__mlx5e_vxlan_core_del_port(priv, port);
+
+	kfree(vxlan_work);
+}
+
+void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
+			    u16 port, int add)
+{
+	struct mlx5e_vxlan_work *vxlan_work;
+
+	vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
+	if (!vxlan_work)
+		return;
+
+	if (add)
+		INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_port);
+	else
+		INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_port);
+
+	vxlan_work->priv = priv;
+	vxlan_work->port = port;
+	vxlan_work->sa_family = sa_family;
+	queue_work(priv->wq, &vxlan_work->work);
 }
 
 void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
index a016850..5def12c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
@@ -39,6 +39,13 @@
 	u16 udp_port;
 };
 
+struct mlx5e_vxlan_work {
+	struct work_struct	work;
+	struct mlx5e_priv	*priv;
+	sa_family_t		sa_family;
+	u16			port;
+};
+
 static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
 {
 	return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
@@ -46,9 +53,10 @@
 }
 
 void mlx5e_vxlan_init(struct mlx5e_priv *priv);
-int  mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port);
-void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port);
-struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port);
 void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv);
 
+void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
+			    u16 port, int add);
+struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port);
+
 #endif /* __MLX5_VXLAN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 2ad7f67..5989f7c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -50,3 +50,11 @@
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called mlxsw_spectrum.
+
+config MLXSW_SPECTRUM_DCB
+	bool "Data Center Bridging (DCB) support"
+	depends on MLXSW_SPECTRUM && DCB
+	default y
+	---help---
+	  Say Y here if you want to use Data Center Bridging (DCB) in the
+	  driver.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 584cac4..9b5ebf8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -8,3 +8,4 @@
 obj-$(CONFIG_MLXSW_SPECTRUM)	+= mlxsw_spectrum.o
 mlxsw_spectrum-objs		:= spectrum.o spectrum_buffers.o \
 				   spectrum_switchdev.o
+mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB)	+= spectrum_dcb.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index f69f628..b0a0b01 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -44,7 +44,7 @@
 #include <linux/seq_file.h>
 #include <linux/u64_stats_sync.h>
 #include <linux/netdevice.h>
-#include <linux/wait.h>
+#include <linux/completion.h>
 #include <linux/skbuff.h>
 #include <linux/etherdevice.h>
 #include <linux/types.h>
@@ -55,6 +55,7 @@
 #include <linux/mutex.h>
 #include <linux/rcupdate.h>
 #include <linux/slab.h>
+#include <linux/workqueue.h>
 #include <asm/byteorder.h>
 #include <net/devlink.h>
 
@@ -73,6 +74,8 @@
 
 static struct dentry *mlxsw_core_dbg_root;
 
+static struct workqueue_struct *mlxsw_wq;
+
 struct mlxsw_core_pcpu_stats {
 	u64			trap_rx_packets[MLXSW_TRAP_ID_MAX];
 	u64			trap_rx_bytes[MLXSW_TRAP_ID_MAX];
@@ -93,11 +96,9 @@
 	struct list_head rx_listener_list;
 	struct list_head event_listener_list;
 	struct {
-		struct sk_buff *resp_skb;
-		u64 tid;
-		wait_queue_head_t wait;
-		bool trans_active;
-		struct mutex lock; /* One EMAD transaction at a time. */
+		atomic64_t tid;
+		struct list_head trans_list;
+		spinlock_t trans_list_lock; /* protects trans_list writes */
 		bool use_emad;
 	} emad;
 	struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
@@ -114,6 +115,12 @@
 	/* driver_priv has to be always the last item */
 };
 
+void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
+{
+	return mlxsw_core->driver_priv;
+}
+EXPORT_SYMBOL(mlxsw_core_driver_priv);
+
 struct mlxsw_rx_listener_item {
 	struct list_head list;
 	struct mlxsw_rx_listener rxl;
@@ -284,7 +291,7 @@
 static void mlxsw_emad_pack_op_tlv(char *op_tlv,
 				   const struct mlxsw_reg_info *reg,
 				   enum mlxsw_core_reg_access_type type,
-				   struct mlxsw_core *mlxsw_core)
+				   u64 tid)
 {
 	mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
 	mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
@@ -300,7 +307,7 @@
 					     MLXSW_EMAD_OP_TLV_METHOD_WRITE);
 	mlxsw_emad_op_tlv_class_set(op_tlv,
 				    MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
-	mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid);
+	mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
 }
 
 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
@@ -322,7 +329,7 @@
 				 const struct mlxsw_reg_info *reg,
 				 char *payload,
 				 enum mlxsw_core_reg_access_type type,
-				 struct mlxsw_core *mlxsw_core)
+				 u64 tid)
 {
 	char *buf;
 
@@ -333,7 +340,7 @@
 	mlxsw_emad_pack_reg_tlv(buf, reg, payload);
 
 	buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
-	mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core);
+	mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
 
 	mlxsw_emad_construct_eth_hdr(skb);
 }
@@ -370,58 +377,16 @@
 	return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
 }
 
-#define MLXSW_EMAD_TIMEOUT_MS 200
-
-static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
-				 struct sk_buff *skb,
-				 const struct mlxsw_tx_info *tx_info)
+static int mlxsw_emad_process_status(char *op_tlv,
+				     enum mlxsw_emad_op_tlv_status *p_status)
 {
-	int err;
-	int ret;
+	*p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
 
-	mlxsw_core->emad.trans_active = true;
-
-	err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info);
-	if (err) {
-		dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
-			mlxsw_core->emad.tid);
-		dev_kfree_skb(skb);
-		goto trans_inactive_out;
-	}
-
-	ret = wait_event_timeout(mlxsw_core->emad.wait,
-				 !(mlxsw_core->emad.trans_active),
-				 msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
-	if (!ret) {
-		dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
-			 mlxsw_core->emad.tid);
-		err = -EIO;
-		goto trans_inactive_out;
-	}
-
-	return 0;
-
-trans_inactive_out:
-	mlxsw_core->emad.trans_active = false;
-	return err;
-}
-
-static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
-				     char *op_tlv)
-{
-	enum mlxsw_emad_op_tlv_status status;
-	u64 tid;
-
-	status = mlxsw_emad_op_tlv_status_get(op_tlv);
-	tid = mlxsw_emad_op_tlv_tid_get(op_tlv);
-
-	switch (status) {
+	switch (*p_status) {
 	case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
 		return 0;
 	case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
 	case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
-		dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n",
-			 tid, status, mlxsw_emad_op_tlv_status_str(status));
 		return -EAGAIN;
 	case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
 	case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
@@ -432,70 +397,150 @@
 	case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
 	case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
 	default:
-		dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n",
-			tid, status, mlxsw_emad_op_tlv_status_str(status));
 		return -EIO;
 	}
 }
 
-static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core,
-					 struct sk_buff *skb)
+static int
+mlxsw_emad_process_status_skb(struct sk_buff *skb,
+			      enum mlxsw_emad_op_tlv_status *p_status)
 {
-	return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb));
+	return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
+}
+
+struct mlxsw_reg_trans {
+	struct list_head list;
+	struct list_head bulk_list;
+	struct mlxsw_core *core;
+	struct sk_buff *tx_skb;
+	struct mlxsw_tx_info tx_info;
+	struct delayed_work timeout_dw;
+	unsigned int retries;
+	u64 tid;
+	struct completion completion;
+	atomic_t active;
+	mlxsw_reg_trans_cb_t *cb;
+	unsigned long cb_priv;
+	const struct mlxsw_reg_info *reg;
+	enum mlxsw_core_reg_access_type type;
+	int err;
+	enum mlxsw_emad_op_tlv_status emad_status;
+	struct rcu_head rcu;
+};
+
+#define MLXSW_EMAD_TIMEOUT_MS 200
+
+static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
+{
+	unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
+
+	mlxsw_core_schedule_dw(&trans->timeout_dw, timeout);
 }
 
 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
-			       struct sk_buff *skb,
-			       const struct mlxsw_tx_info *tx_info)
+			       struct mlxsw_reg_trans *trans)
 {
-	struct sk_buff *trans_skb;
-	int n_retry;
+	struct sk_buff *skb;
 	int err;
 
-	n_retry = 0;
-retry:
-	/* We copy the EMAD to a new skb, since we might need
-	 * to retransmit it in case of failure.
-	 */
-	trans_skb = skb_copy(skb, GFP_KERNEL);
-	if (!trans_skb) {
-		err = -ENOMEM;
-		goto out;
+	skb = skb_copy(trans->tx_skb, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	atomic_set(&trans->active, 1);
+	err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
+	if (err) {
+		dev_kfree_skb(skb);
+		return err;
 	}
-
-	err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info);
-	if (!err) {
-		struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb;
-
-		err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb);
-		if (err)
-			dev_kfree_skb(resp_skb);
-		if (!err || err != -EAGAIN)
-			goto out;
-	}
-	if (n_retry++ < MLXSW_EMAD_MAX_RETRY)
-		goto retry;
-
-out:
-	dev_kfree_skb(skb);
-	mlxsw_core->emad.tid++;
-	return err;
+	mlxsw_emad_trans_timeout_schedule(trans);
+	return 0;
 }
 
+static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
+{
+	struct mlxsw_core *mlxsw_core = trans->core;
+
+	dev_kfree_skb(trans->tx_skb);
+	spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
+	list_del_rcu(&trans->list);
+	spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
+	trans->err = err;
+	complete(&trans->completion);
+}
+
+static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
+				      struct mlxsw_reg_trans *trans)
+{
+	int err;
+
+	if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
+		trans->retries++;
+		err = mlxsw_emad_transmit(trans->core, trans);
+		if (err == 0)
+			return;
+	} else {
+		err = -EIO;
+	}
+	mlxsw_emad_trans_finish(trans, err);
+}
+
+static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
+{
+	struct mlxsw_reg_trans *trans = container_of(work,
+						     struct mlxsw_reg_trans,
+						     timeout_dw.work);
+
+	if (!atomic_dec_and_test(&trans->active))
+		return;
+
+	mlxsw_emad_transmit_retry(trans->core, trans);
+}
+
+static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
+					struct mlxsw_reg_trans *trans,
+					struct sk_buff *skb)
+{
+	int err;
+
+	if (!atomic_dec_and_test(&trans->active))
+		return;
+
+	err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
+	if (err == -EAGAIN) {
+		mlxsw_emad_transmit_retry(mlxsw_core, trans);
+	} else {
+		if (err == 0) {
+			char *op_tlv = mlxsw_emad_op_tlv(skb);
+
+			if (trans->cb)
+				trans->cb(mlxsw_core,
+					  mlxsw_emad_reg_payload(op_tlv),
+					  trans->reg->len, trans->cb_priv);
+		}
+		mlxsw_emad_trans_finish(trans, err);
+	}
+}
+
+/* called with rcu read lock held */
 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
 					void *priv)
 {
 	struct mlxsw_core *mlxsw_core = priv;
+	struct mlxsw_reg_trans *trans;
 
-	if (mlxsw_emad_is_resp(skb) &&
-	    mlxsw_core->emad.trans_active &&
-	    mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) {
-		mlxsw_core->emad.resp_skb = skb;
-		mlxsw_core->emad.trans_active = false;
-		wake_up(&mlxsw_core->emad.wait);
-	} else {
-		dev_kfree_skb(skb);
+	if (!mlxsw_emad_is_resp(skb))
+		goto free_skb;
+
+	list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
+		if (mlxsw_emad_get_tid(skb) == trans->tid) {
+			mlxsw_emad_process_response(mlxsw_core, trans, skb);
+			break;
+		}
 	}
+
+free_skb:
+	dev_kfree_skb(skb);
 }
 
 static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
@@ -522,18 +567,19 @@
 
 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
 {
+	u64 tid;
 	int err;
 
 	/* Set the upper 32 bits of the transaction ID field to a random
 	 * number. This allows us to discard EMADs addressed to other
 	 * devices.
 	 */
-	get_random_bytes(&mlxsw_core->emad.tid, 4);
-	mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32;
+	get_random_bytes(&tid, 4);
+	tid <<= 32;
+	atomic64_set(&mlxsw_core->emad.tid, tid);
 
-	init_waitqueue_head(&mlxsw_core->emad.wait);
-	mlxsw_core->emad.trans_active = false;
-	mutex_init(&mlxsw_core->emad.lock);
+	INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
+	spin_lock_init(&mlxsw_core->emad.trans_list_lock);
 
 	err = mlxsw_core_rx_listener_register(mlxsw_core,
 					      &mlxsw_emad_rx_listener,
@@ -591,6 +637,59 @@
 	return skb;
 }
 
+static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
+				 const struct mlxsw_reg_info *reg,
+				 char *payload,
+				 enum mlxsw_core_reg_access_type type,
+				 struct mlxsw_reg_trans *trans,
+				 struct list_head *bulk_list,
+				 mlxsw_reg_trans_cb_t *cb,
+				 unsigned long cb_priv, u64 tid)
+{
+	struct sk_buff *skb;
+	int err;
+
+	dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
+		trans->tid, reg->id, mlxsw_reg_id_str(reg->id),
+		mlxsw_core_reg_access_type_str(type));
+
+	skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
+	if (!skb)
+		return -ENOMEM;
+
+	list_add_tail(&trans->bulk_list, bulk_list);
+	trans->core = mlxsw_core;
+	trans->tx_skb = skb;
+	trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
+	trans->tx_info.is_emad = true;
+	INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
+	trans->tid = tid;
+	init_completion(&trans->completion);
+	trans->cb = cb;
+	trans->cb_priv = cb_priv;
+	trans->reg = reg;
+	trans->type = type;
+
+	mlxsw_emad_construct(skb, reg, payload, type, trans->tid);
+	mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
+
+	spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
+	list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
+	spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
+	err = mlxsw_emad_transmit(mlxsw_core, trans);
+	if (err)
+		goto err_out;
+	return 0;
+
+err_out:
+	spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
+	list_del_rcu(&trans->list);
+	spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
+	list_del(&trans->bulk_list);
+	dev_kfree_skb(trans->tx_skb);
+	return err;
+}
+
 /*****************
  * Core functions
  *****************/
@@ -680,24 +779,6 @@
 	.llseek = seq_lseek
 };
 
-static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
-				    const char *buf, size_t size)
-{
-	__be32 *m = (__be32 *) buf;
-	int i;
-	int count = size / sizeof(__be32);
-
-	for (i = count - 1; i >= 0; i--)
-		if (m[i])
-			break;
-	i++;
-	count = i ? i : 1;
-	for (i = 0; i < count; i += 4)
-		dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
-			i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
-			be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
-}
-
 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
 {
 	spin_lock(&mlxsw_core_driver_list_lock);
@@ -795,8 +876,7 @@
 		return -EINVAL;
 	if (!mlxsw_core->driver->port_split)
 		return -EOPNOTSUPP;
-	return mlxsw_core->driver->port_split(mlxsw_core->driver_priv,
-					      port_index, count);
+	return mlxsw_core->driver->port_split(mlxsw_core, port_index, count);
 }
 
 static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
@@ -808,13 +888,171 @@
 		return -EINVAL;
 	if (!mlxsw_core->driver->port_unsplit)
 		return -EOPNOTSUPP;
-	return mlxsw_core->driver->port_unsplit(mlxsw_core->driver_priv,
-						port_index);
+	return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index);
+}
+
+static int
+mlxsw_devlink_sb_pool_get(struct devlink *devlink,
+			  unsigned int sb_index, u16 pool_index,
+			  struct devlink_sb_pool_info *pool_info)
+{
+	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+	if (!mlxsw_driver->sb_pool_get)
+		return -EOPNOTSUPP;
+	return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
+					 pool_index, pool_info);
+}
+
+static int
+mlxsw_devlink_sb_pool_set(struct devlink *devlink,
+			  unsigned int sb_index, u16 pool_index, u32 size,
+			  enum devlink_sb_threshold_type threshold_type)
+{
+	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+	if (!mlxsw_driver->sb_pool_set)
+		return -EOPNOTSUPP;
+	return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
+					 pool_index, size, threshold_type);
+}
+
+static void *__dl_port(struct devlink_port *devlink_port)
+{
+	return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
+}
+
+static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
+					  unsigned int sb_index, u16 pool_index,
+					  u32 *p_threshold)
+{
+	struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+	struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+	if (!mlxsw_driver->sb_port_pool_get)
+		return -EOPNOTSUPP;
+	return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
+					      pool_index, p_threshold);
+}
+
+static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
+					  unsigned int sb_index, u16 pool_index,
+					  u32 threshold)
+{
+	struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+	struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+	if (!mlxsw_driver->sb_port_pool_set)
+		return -EOPNOTSUPP;
+	return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
+					      pool_index, threshold);
+}
+
+static int
+mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
+				  unsigned int sb_index, u16 tc_index,
+				  enum devlink_sb_pool_type pool_type,
+				  u16 *p_pool_index, u32 *p_threshold)
+{
+	struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+	struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+	if (!mlxsw_driver->sb_tc_pool_bind_get)
+		return -EOPNOTSUPP;
+	return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
+						 tc_index, pool_type,
+						 p_pool_index, p_threshold);
+}
+
+static int
+mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
+				  unsigned int sb_index, u16 tc_index,
+				  enum devlink_sb_pool_type pool_type,
+				  u16 pool_index, u32 threshold)
+{
+	struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+	struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+	if (!mlxsw_driver->sb_tc_pool_bind_set)
+		return -EOPNOTSUPP;
+	return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
+						 tc_index, pool_type,
+						 pool_index, threshold);
+}
+
+static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
+					 unsigned int sb_index)
+{
+	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+	if (!mlxsw_driver->sb_occ_snapshot)
+		return -EOPNOTSUPP;
+	return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
+}
+
+static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
+					  unsigned int sb_index)
+{
+	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+	if (!mlxsw_driver->sb_occ_max_clear)
+		return -EOPNOTSUPP;
+	return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
+}
+
+static int
+mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
+				   unsigned int sb_index, u16 pool_index,
+				   u32 *p_cur, u32 *p_max)
+{
+	struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+	struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+	if (!mlxsw_driver->sb_occ_port_pool_get)
+		return -EOPNOTSUPP;
+	return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
+						  pool_index, p_cur, p_max);
+}
+
+static int
+mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
+				      unsigned int sb_index, u16 tc_index,
+				      enum devlink_sb_pool_type pool_type,
+				      u32 *p_cur, u32 *p_max)
+{
+	struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+	struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+	struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+	if (!mlxsw_driver->sb_occ_tc_port_bind_get)
+		return -EOPNOTSUPP;
+	return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
+						     sb_index, tc_index,
+						     pool_type, p_cur, p_max);
 }
 
 static const struct devlink_ops mlxsw_devlink_ops = {
-	.port_split	= mlxsw_devlink_port_split,
-	.port_unsplit	= mlxsw_devlink_port_unsplit,
+	.port_split			= mlxsw_devlink_port_split,
+	.port_unsplit			= mlxsw_devlink_port_unsplit,
+	.sb_pool_get			= mlxsw_devlink_sb_pool_get,
+	.sb_pool_set			= mlxsw_devlink_sb_pool_set,
+	.sb_port_pool_get		= mlxsw_devlink_sb_port_pool_get,
+	.sb_port_pool_set		= mlxsw_devlink_sb_port_pool_set,
+	.sb_tc_pool_bind_get		= mlxsw_devlink_sb_tc_pool_bind_get,
+	.sb_tc_pool_bind_set		= mlxsw_devlink_sb_tc_pool_bind_set,
+	.sb_occ_snapshot		= mlxsw_devlink_sb_occ_snapshot,
+	.sb_occ_max_clear		= mlxsw_devlink_sb_occ_max_clear,
+	.sb_occ_port_pool_get		= mlxsw_devlink_sb_occ_port_pool_get,
+	.sb_occ_tc_port_bind_get	= mlxsw_devlink_sb_occ_tc_port_bind_get,
 };
 
 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
@@ -880,8 +1118,7 @@
 	if (err)
 		goto err_devlink_register;
 
-	err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core,
-				 mlxsw_bus_info);
+	err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
 	if (err)
 		goto err_driver_init;
 
@@ -892,7 +1129,7 @@
 	return 0;
 
 err_debugfs_init:
-	mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+	mlxsw_core->driver->fini(mlxsw_core);
 err_driver_init:
 	devlink_unregister(devlink);
 err_devlink_register:
@@ -918,7 +1155,7 @@
 	struct devlink *devlink = priv_to_devlink(mlxsw_core);
 
 	mlxsw_core_debugfs_fini(mlxsw_core);
-	mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+	mlxsw_core->driver->fini(mlxsw_core);
 	devlink_unregister(devlink);
 	mlxsw_emad_fini(mlxsw_core);
 	mlxsw_core->bus->fini(mlxsw_core->bus_priv);
@@ -929,26 +1166,17 @@
 }
 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
 
-static struct mlxsw_core *__mlxsw_core_get(void *driver_priv)
-{
-	return container_of(driver_priv, struct mlxsw_core, driver_priv);
-}
-
-bool mlxsw_core_skb_transmit_busy(void *driver_priv,
+bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
 				  const struct mlxsw_tx_info *tx_info)
 {
-	struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
-
 	return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
 						  tx_info);
 }
 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
 
-int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
+int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
 			    const struct mlxsw_tx_info *tx_info)
 {
-	struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
-
 	return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
 					     tx_info);
 }
@@ -1108,56 +1336,112 @@
 }
 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
 
+static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
+{
+	return atomic64_inc_return(&mlxsw_core->emad.tid);
+}
+
 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
 				      const struct mlxsw_reg_info *reg,
 				      char *payload,
-				      enum mlxsw_core_reg_access_type type)
+				      enum mlxsw_core_reg_access_type type,
+				      struct list_head *bulk_list,
+				      mlxsw_reg_trans_cb_t *cb,
+				      unsigned long cb_priv)
 {
+	u64 tid = mlxsw_core_tid_get(mlxsw_core);
+	struct mlxsw_reg_trans *trans;
 	int err;
-	char *op_tlv;
-	struct sk_buff *skb;
-	struct mlxsw_tx_info tx_info = {
-		.local_port = MLXSW_PORT_CPU_PORT,
-		.is_emad = true,
-	};
 
-	skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
-	if (!skb)
+	trans = kzalloc(sizeof(*trans), GFP_KERNEL);
+	if (!trans)
 		return -ENOMEM;
 
-	mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core);
-	mlxsw_core->driver->txhdr_construct(skb, &tx_info);
-
-	dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n",
-		mlxsw_core->emad.tid);
-	mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len);
-
-	err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info);
-	if (!err) {
-		op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb);
-		memcpy(payload, mlxsw_emad_reg_payload(op_tlv),
-		       reg->len);
-
-		dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n",
-			mlxsw_core->emad.tid - 1);
-		mlxsw_core_buf_dump_dbg(mlxsw_core,
-					mlxsw_core->emad.resp_skb->data,
-					mlxsw_core->emad.resp_skb->len);
-
-		dev_kfree_skb(mlxsw_core->emad.resp_skb);
+	err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
+				    bulk_list, cb, cb_priv, tid);
+	if (err) {
+		kfree(trans);
+		return err;
 	}
+	return 0;
+}
 
+int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
+			  const struct mlxsw_reg_info *reg, char *payload,
+			  struct list_head *bulk_list,
+			  mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
+{
+	return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
+					  MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
+					  bulk_list, cb, cb_priv);
+}
+EXPORT_SYMBOL(mlxsw_reg_trans_query);
+
+int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
+			  const struct mlxsw_reg_info *reg, char *payload,
+			  struct list_head *bulk_list,
+			  mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
+{
+	return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
+					  MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
+					  bulk_list, cb, cb_priv);
+}
+EXPORT_SYMBOL(mlxsw_reg_trans_write);
+
+static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
+{
+	struct mlxsw_core *mlxsw_core = trans->core;
+	int err;
+
+	wait_for_completion(&trans->completion);
+	cancel_delayed_work_sync(&trans->timeout_dw);
+	err = trans->err;
+
+	if (trans->retries)
+		dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
+			 trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
+	if (err)
+		dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
+			trans->tid, trans->reg->id,
+			mlxsw_reg_id_str(trans->reg->id),
+			mlxsw_core_reg_access_type_str(trans->type),
+			trans->emad_status,
+			mlxsw_emad_op_tlv_status_str(trans->emad_status));
+
+	list_del(&trans->bulk_list);
+	kfree_rcu(trans, rcu);
 	return err;
 }
 
+int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
+{
+	struct mlxsw_reg_trans *trans;
+	struct mlxsw_reg_trans *tmp;
+	int sum_err = 0;
+	int err;
+
+	list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
+		err = mlxsw_reg_trans_wait(trans);
+		if (err && sum_err == 0)
+			sum_err = err; /* first error to be returned */
+	}
+	return sum_err;
+}
+EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
+
 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
 				     const struct mlxsw_reg_info *reg,
 				     char *payload,
 				     enum mlxsw_core_reg_access_type type)
 {
+	enum mlxsw_emad_op_tlv_status status;
 	int err, n_retry;
 	char *in_mbox, *out_mbox, *tmp;
 
+	dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
+		reg->id, mlxsw_reg_id_str(reg->id),
+		mlxsw_core_reg_access_type_str(type));
+
 	in_mbox = mlxsw_cmd_mbox_alloc();
 	if (!in_mbox)
 		return -ENOMEM;
@@ -1168,7 +1452,8 @@
 		goto free_in_mbox;
 	}
 
-	mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core);
+	mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
+			       mlxsw_core_tid_get(mlxsw_core));
 	tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
 	mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
 
@@ -1176,60 +1461,61 @@
 retry:
 	err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
 	if (!err) {
-		err = mlxsw_emad_process_status(mlxsw_core, out_mbox);
-		if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
-			goto retry;
+		err = mlxsw_emad_process_status(out_mbox, &status);
+		if (err) {
+			if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
+				goto retry;
+			dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
+				status, mlxsw_emad_op_tlv_status_str(status));
+		}
 	}
 
 	if (!err)
 		memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
 		       reg->len);
 
-	mlxsw_core->emad.tid++;
 	mlxsw_cmd_mbox_free(out_mbox);
 free_in_mbox:
 	mlxsw_cmd_mbox_free(in_mbox);
+	if (err)
+		dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
+			reg->id, mlxsw_reg_id_str(reg->id),
+			mlxsw_core_reg_access_type_str(type));
 	return err;
 }
 
+static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
+				     char *payload, size_t payload_len,
+				     unsigned long cb_priv)
+{
+	char *orig_payload = (char *) cb_priv;
+
+	memcpy(orig_payload, payload, payload_len);
+}
+
 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
 				 const struct mlxsw_reg_info *reg,
 				 char *payload,
 				 enum mlxsw_core_reg_access_type type)
 {
-	u64 cur_tid;
+	LIST_HEAD(bulk_list);
 	int err;
 
-	if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) {
-		dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n",
-			reg->id, mlxsw_reg_id_str(reg->id),
-			mlxsw_core_reg_access_type_str(type));
-		return -EINTR;
-	}
-
-	cur_tid = mlxsw_core->emad.tid;
-	dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
-		cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
-		mlxsw_core_reg_access_type_str(type));
-
 	/* During initialization EMAD interface is not available to us,
 	 * so we default to command interface. We switch to EMAD interface
 	 * after setting the appropriate traps.
 	 */
 	if (!mlxsw_core->emad.use_emad)
-		err = mlxsw_core_reg_access_cmd(mlxsw_core, reg,
-						payload, type);
-	else
-		err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
+		return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
 						 payload, type);
 
+	err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
+					 payload, type, &bulk_list,
+					 mlxsw_core_reg_access_cb,
+					 (unsigned long) payload);
 	if (err)
-		dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n",
-			cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
-			mlxsw_core_reg_access_type_str(type));
-
-	mutex_unlock(&mlxsw_core->emad.lock);
-	return err;
+		return err;
+	return mlxsw_reg_trans_bulk_wait(&bulk_list);
 }
 
 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
@@ -1358,6 +1644,46 @@
 }
 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
 
+int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core,
+			 struct mlxsw_core_port *mlxsw_core_port, u8 local_port,
+			 struct net_device *dev, bool split, u32 split_group)
+{
+	struct devlink *devlink = priv_to_devlink(mlxsw_core);
+	struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
+
+	if (split)
+		devlink_port_split_set(devlink_port, split_group);
+	devlink_port_type_eth_set(devlink_port, dev);
+	return devlink_port_register(devlink, devlink_port, local_port);
+}
+EXPORT_SYMBOL(mlxsw_core_port_init);
+
+void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port)
+{
+	struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
+
+	devlink_port_unregister(devlink_port);
+}
+EXPORT_SYMBOL(mlxsw_core_port_fini);
+
+static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
+				    const char *buf, size_t size)
+{
+	__be32 *m = (__be32 *) buf;
+	int i;
+	int count = size / sizeof(__be32);
+
+	for (i = count - 1; i >= 0; i--)
+		if (m[i])
+			break;
+	i++;
+	count = i ? i : 1;
+	for (i = 0; i < count; i += 4)
+		dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
+			i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
+			be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
+}
+
 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
 		   u32 in_mod, bool out_mbox_direct,
 		   char *in_mbox, size_t in_mbox_size,
@@ -1400,17 +1726,35 @@
 }
 EXPORT_SYMBOL(mlxsw_cmd_exec);
 
+int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
+{
+	return queue_delayed_work(mlxsw_wq, dwork, delay);
+}
+EXPORT_SYMBOL(mlxsw_core_schedule_dw);
+
 static int __init mlxsw_core_module_init(void)
 {
-	mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
-	if (!mlxsw_core_dbg_root)
+	int err;
+
+	mlxsw_wq = create_workqueue(mlxsw_core_driver_name);
+	if (!mlxsw_wq)
 		return -ENOMEM;
+	mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
+	if (!mlxsw_core_dbg_root) {
+		err = -ENOMEM;
+		goto err_debugfs_create_dir;
+	}
 	return 0;
+
+err_debugfs_create_dir:
+	destroy_workqueue(mlxsw_wq);
+	return err;
 }
 
 static void __exit mlxsw_core_module_exit(void)
 {
 	debugfs_remove_recursive(mlxsw_core_dbg_root);
+	destroy_workqueue(mlxsw_wq);
 }
 
 module_init(mlxsw_core_module_init);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index c73d1c0..436bc49 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -43,6 +43,8 @@
 #include <linux/gfp.h>
 #include <linux/types.h>
 #include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <net/devlink.h>
 
 #include "trap.h"
 #include "reg.h"
@@ -61,6 +63,8 @@
 struct mlxsw_bus;
 struct mlxsw_bus_info;
 
+void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
+
 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver);
 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
 
@@ -74,10 +78,9 @@
 	bool is_emad;
 };
 
-bool mlxsw_core_skb_transmit_busy(void *driver_priv,
+bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
 				  const struct mlxsw_tx_info *tx_info);
-
-int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
+int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
 			    const struct mlxsw_tx_info *tx_info);
 
 struct mlxsw_rx_listener {
@@ -106,6 +109,19 @@
 					  const struct mlxsw_event_listener *el,
 					  void *priv);
 
+typedef void mlxsw_reg_trans_cb_t(struct mlxsw_core *mlxsw_core, char *payload,
+				  size_t payload_len, unsigned long cb_priv);
+
+int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
+			  const struct mlxsw_reg_info *reg, char *payload,
+			  struct list_head *bulk_list,
+			  mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv);
+int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
+			  const struct mlxsw_reg_info *reg, char *payload,
+			  struct list_head *bulk_list,
+			  mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv);
+int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list);
+
 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
 		    const struct mlxsw_reg_info *reg, char *payload);
 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
@@ -131,6 +147,26 @@
 void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
 				  u16 lag_id, u8 local_port);
 
+struct mlxsw_core_port {
+	struct devlink_port devlink_port;
+};
+
+static inline void *
+mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
+{
+	/* mlxsw_core_port is ensured to always be the first field in driver
+	 * port structure.
+	 */
+	return mlxsw_core_port;
+}
+
+int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core,
+			 struct mlxsw_core_port *mlxsw_core_port, u8 local_port,
+			 struct net_device *dev, bool split, u32 split_group);
+void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port);
+
+int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
+
 #define MLXSW_CONFIG_PROFILE_SWID_COUNT 8
 
 struct mlxsw_swid_config {
@@ -183,11 +219,43 @@
 	const char *kind;
 	struct module *owner;
 	size_t priv_size;
-	int (*init)(void *driver_priv, struct mlxsw_core *mlxsw_core,
+	int (*init)(struct mlxsw_core *mlxsw_core,
 		    const struct mlxsw_bus_info *mlxsw_bus_info);
-	void (*fini)(void *driver_priv);
-	int (*port_split)(void *driver_priv, u8 local_port, unsigned int count);
-	int (*port_unsplit)(void *driver_priv, u8 local_port);
+	void (*fini)(struct mlxsw_core *mlxsw_core);
+	int (*port_split)(struct mlxsw_core *mlxsw_core, u8 local_port,
+			  unsigned int count);
+	int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port);
+	int (*sb_pool_get)(struct mlxsw_core *mlxsw_core,
+			   unsigned int sb_index, u16 pool_index,
+			   struct devlink_sb_pool_info *pool_info);
+	int (*sb_pool_set)(struct mlxsw_core *mlxsw_core,
+			   unsigned int sb_index, u16 pool_index, u32 size,
+			   enum devlink_sb_threshold_type threshold_type);
+	int (*sb_port_pool_get)(struct mlxsw_core_port *mlxsw_core_port,
+				unsigned int sb_index, u16 pool_index,
+				u32 *p_threshold);
+	int (*sb_port_pool_set)(struct mlxsw_core_port *mlxsw_core_port,
+				unsigned int sb_index, u16 pool_index,
+				u32 threshold);
+	int (*sb_tc_pool_bind_get)(struct mlxsw_core_port *mlxsw_core_port,
+				   unsigned int sb_index, u16 tc_index,
+				   enum devlink_sb_pool_type pool_type,
+				   u16 *p_pool_index, u32 *p_threshold);
+	int (*sb_tc_pool_bind_set)(struct mlxsw_core_port *mlxsw_core_port,
+				   unsigned int sb_index, u16 tc_index,
+				   enum devlink_sb_pool_type pool_type,
+				   u16 pool_index, u32 threshold);
+	int (*sb_occ_snapshot)(struct mlxsw_core *mlxsw_core,
+			       unsigned int sb_index);
+	int (*sb_occ_max_clear)(struct mlxsw_core *mlxsw_core,
+				unsigned int sb_index);
+	int (*sb_occ_port_pool_get)(struct mlxsw_core_port *mlxsw_core_port,
+				    unsigned int sb_index, u16 pool_index,
+				    u32 *p_cur, u32 *p_max);
+	int (*sb_occ_tc_port_bind_get)(struct mlxsw_core_port *mlxsw_core_port,
+				       unsigned int sb_index, u16 tc_index,
+				       enum devlink_sb_pool_type pool_type,
+				       u32 *p_cur, u32 *p_max);
 	void (*txhdr_construct)(struct sk_buff *skb,
 				const struct mlxsw_tx_info *tx_info);
 	u8 txhdr_len;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index ffe4c03..1977e7a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -1805,6 +1805,184 @@
 	}
 }
 
+/* QTCT - QoS Switch Traffic Class Table
+ * -------------------------------------
+ * Configures the mapping between the packet switch priority and the
+ * traffic class on the transmit port.
+ */
+#define MLXSW_REG_QTCT_ID 0x400A
+#define MLXSW_REG_QTCT_LEN 0x08
+
+static const struct mlxsw_reg_info mlxsw_reg_qtct = {
+	.id = MLXSW_REG_QTCT_ID,
+	.len = MLXSW_REG_QTCT_LEN,
+};
+
+/* reg_qtct_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: CPU port is not supported.
+ */
+MLXSW_ITEM32(reg, qtct, local_port, 0x00, 16, 8);
+
+/* reg_qtct_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qtct, sub_port, 0x00, 8, 8);
+
+/* reg_qtct_switch_prio
+ * Switch priority.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qtct, switch_prio, 0x00, 0, 4);
+
+/* reg_qtct_tclass
+ * Traffic class.
+ * Default values:
+ * switch_prio 0 : tclass 1
+ * switch_prio 1 : tclass 0
+ * switch_prio i : tclass i, for i > 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qtct, tclass, 0x04, 0, 4);
+
+static inline void mlxsw_reg_qtct_pack(char *payload, u8 local_port,
+				       u8 switch_prio, u8 tclass)
+{
+	MLXSW_REG_ZERO(qtct, payload);
+	mlxsw_reg_qtct_local_port_set(payload, local_port);
+	mlxsw_reg_qtct_switch_prio_set(payload, switch_prio);
+	mlxsw_reg_qtct_tclass_set(payload, tclass);
+}
+
+/* QEEC - QoS ETS Element Configuration Register
+ * ---------------------------------------------
+ * Configures the ETS elements.
+ */
+#define MLXSW_REG_QEEC_ID 0x400D
+#define MLXSW_REG_QEEC_LEN 0x1C
+
+static const struct mlxsw_reg_info mlxsw_reg_qeec = {
+	.id = MLXSW_REG_QEEC_ID,
+	.len = MLXSW_REG_QEEC_LEN,
+};
+
+/* reg_qeec_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: CPU port is supported.
+ */
+MLXSW_ITEM32(reg, qeec, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_qeec_hr {
+	MLXSW_REG_QEEC_HIERARCY_PORT,
+	MLXSW_REG_QEEC_HIERARCY_GROUP,
+	MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
+	MLXSW_REG_QEEC_HIERARCY_TC,
+};
+
+/* reg_qeec_element_hierarchy
+ * 0 - Port
+ * 1 - Group
+ * 2 - Subgroup
+ * 3 - Traffic Class
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qeec, element_hierarchy, 0x04, 16, 4);
+
+/* reg_qeec_element_index
+ * The index of the element in the hierarchy.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qeec, element_index, 0x04, 0, 8);
+
+/* reg_qeec_next_element_index
+ * The index of the next (lower) element in the hierarchy.
+ * Access: RW
+ *
+ * Note: Reserved for element_hierarchy 0.
+ */
+MLXSW_ITEM32(reg, qeec, next_element_index, 0x08, 0, 8);
+
+enum {
+	MLXSW_REG_QEEC_BYTES_MODE,
+	MLXSW_REG_QEEC_PACKETS_MODE,
+};
+
+/* reg_qeec_pb
+ * Packets or bytes mode.
+ * 0 - Bytes mode
+ * 1 - Packets mode
+ * Access: RW
+ *
+ * Note: Used for max shaper configuration. For Spectrum, packets mode
+ * is supported only for traffic classes of CPU port.
+ */
+MLXSW_ITEM32(reg, qeec, pb, 0x0C, 28, 1);
+
+/* reg_qeec_mase
+ * Max shaper configuration enable. Enables configuration of the max
+ * shaper on this ETS element.
+ * 0 - Disable
+ * 1 - Enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, mase, 0x10, 31, 1);
+
+/* A large max rate will disable the max shaper. */
+#define MLXSW_REG_QEEC_MAS_DIS	200000000	/* Kbps */
+
+/* reg_qeec_max_shaper_rate
+ * Max shaper information rate.
+ * For CPU port, can only be configured for port hierarchy.
+ * When in bytes mode, value is specified in units of 1000bps.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, max_shaper_rate, 0x10, 0, 28);
+
+/* reg_qeec_de
+ * DWRR configuration enable. Enables configuration of the dwrr and
+ * dwrr_weight.
+ * 0 - Disable
+ * 1 - Enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, de, 0x18, 31, 1);
+
+/* reg_qeec_dwrr
+ * Transmission selection algorithm to use on the link going down from
+ * the ETS element.
+ * 0 - Strict priority
+ * 1 - DWRR
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, dwrr, 0x18, 15, 1);
+
+/* reg_qeec_dwrr_weight
+ * DWRR weight on the link going down from the ETS element. The
+ * percentage of bandwidth guaranteed to an ETS element within
+ * its hierarchy. The sum of all weights across all ETS elements
+ * within one hierarchy should be equal to 100. Reserved when
+ * transmission selection algorithm is strict priority.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, dwrr_weight, 0x18, 0, 8);
+
+static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
+				       enum mlxsw_reg_qeec_hr hr, u8 index,
+				       u8 next_index)
+{
+	MLXSW_REG_ZERO(qeec, payload);
+	mlxsw_reg_qeec_local_port_set(payload, local_port);
+	mlxsw_reg_qeec_element_hierarchy_set(payload, hr);
+	mlxsw_reg_qeec_element_index_set(payload, index);
+	mlxsw_reg_qeec_next_element_index_set(payload, next_index);
+}
+
 /* PMLP - Ports Module to Local Port Register
  * ------------------------------------------
  * Configures the assignment of modules to local ports.
@@ -2141,6 +2319,145 @@
 	mlxsw_reg_paos_e_set(payload, 1);
 }
 
+/* PFCC - Ports Flow Control Configuration Register
+ * ------------------------------------------------
+ * Configures and retrieves the per port flow control configuration.
+ */
+#define MLXSW_REG_PFCC_ID 0x5007
+#define MLXSW_REG_PFCC_LEN 0x20
+
+static const struct mlxsw_reg_info mlxsw_reg_pfcc = {
+	.id = MLXSW_REG_PFCC_ID,
+	.len = MLXSW_REG_PFCC_LEN,
+};
+
+/* reg_pfcc_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pfcc, local_port, 0x00, 16, 8);
+
+/* reg_pfcc_pnat
+ * Port number access type. Determines the way local_port is interpreted:
+ * 0 - Local port number.
+ * 1 - IB / label port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pfcc, pnat, 0x00, 14, 2);
+
+/* reg_pfcc_shl_cap
+ * Send to higher layers capabilities:
+ * 0 - No capability of sending Pause and PFC frames to higher layers.
+ * 1 - Device has capability of sending Pause and PFC frames to higher
+ *     layers.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pfcc, shl_cap, 0x00, 1, 1);
+
+/* reg_pfcc_shl_opr
+ * Send to higher layers operation:
+ * 0 - Pause and PFC frames are handled by the port (default).
+ * 1 - Pause and PFC frames are handled by the port and also sent to
+ *     higher layers. Only valid if shl_cap = 1.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pfcc, shl_opr, 0x00, 0, 1);
+
+/* reg_pfcc_ppan
+ * Pause policy auto negotiation.
+ * 0 - Disabled. Generate / ignore Pause frames based on pptx / pprtx.
+ * 1 - Enabled. When auto-negotiation is performed, set the Pause policy
+ *     based on the auto-negotiation resolution.
+ * Access: RW
+ *
+ * Note: The auto-negotiation advertisement is set according to pptx and
+ * pprtx. When PFC is set on Tx / Rx, ppan must be set to 0.
+ */
+MLXSW_ITEM32(reg, pfcc, ppan, 0x04, 28, 4);
+
+/* reg_pfcc_prio_mask_tx
+ * Bit per priority indicating if Tx flow control policy should be
+ * updated based on bit pfctx.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pfcc, prio_mask_tx, 0x04, 16, 8);
+
+/* reg_pfcc_prio_mask_rx
+ * Bit per priority indicating if Rx flow control policy should be
+ * updated based on bit pfcrx.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pfcc, prio_mask_rx, 0x04, 0, 8);
+
+/* reg_pfcc_pptx
+ * Admin Pause policy on Tx.
+ * 0 - Never generate Pause frames (default).
+ * 1 - Generate Pause frames according to Rx buffer threshold.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pfcc, pptx, 0x08, 31, 1);
+
+/* reg_pfcc_aptx
+ * Active (operational) Pause policy on Tx.
+ * 0 - Never generate Pause frames.
+ * 1 - Generate Pause frames according to Rx buffer threshold.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pfcc, aptx, 0x08, 30, 1);
+
+/* reg_pfcc_pfctx
+ * Priority based flow control policy on Tx[7:0]. Per-priority bit mask:
+ * 0 - Never generate priority Pause frames on the specified priority
+ *     (default).
+ * 1 - Generate priority Pause frames according to Rx buffer threshold on
+ *     the specified priority.
+ * Access: RW
+ *
+ * Note: pfctx and pptx must be mutually exclusive.
+ */
+MLXSW_ITEM32(reg, pfcc, pfctx, 0x08, 16, 8);
+
+/* reg_pfcc_pprx
+ * Admin Pause policy on Rx.
+ * 0 - Ignore received Pause frames (default).
+ * 1 - Respect received Pause frames.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pfcc, pprx, 0x0C, 31, 1);
+
+/* reg_pfcc_aprx
+ * Active (operational) Pause policy on Rx.
+ * 0 - Ignore received Pause frames.
+ * 1 - Respect received Pause frames.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pfcc, aprx, 0x0C, 30, 1);
+
+/* reg_pfcc_pfcrx
+ * Priority based flow control policy on Rx[7:0]. Per-priority bit mask:
+ * 0 - Ignore incoming priority Pause frames on the specified priority
+ *     (default).
+ * 1 - Respect incoming priority Pause frames on the specified priority.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pfcc, pfcrx, 0x0C, 16, 8);
+
+#define MLXSW_REG_PFCC_ALL_PRIO 0xFF
+
+static inline void mlxsw_reg_pfcc_prio_pack(char *payload, u8 pfc_en)
+{
+	mlxsw_reg_pfcc_prio_mask_tx_set(payload, MLXSW_REG_PFCC_ALL_PRIO);
+	mlxsw_reg_pfcc_prio_mask_rx_set(payload, MLXSW_REG_PFCC_ALL_PRIO);
+	mlxsw_reg_pfcc_pfctx_set(payload, pfc_en);
+	mlxsw_reg_pfcc_pfcrx_set(payload, pfc_en);
+}
+
+static inline void mlxsw_reg_pfcc_pack(char *payload, u8 local_port)
+{
+	MLXSW_REG_ZERO(pfcc, payload);
+	mlxsw_reg_pfcc_local_port_set(payload, local_port);
+}
+
 /* PPCNT - Ports Performance Counters Register
  * -------------------------------------------
  * The PPCNT register retrieves per port performance counters.
@@ -2180,6 +2497,11 @@
  */
 MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
 
+enum mlxsw_reg_ppcnt_grp {
+	MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0,
+	MLXSW_REG_PPCNT_PRIO_CNT = 0x10,
+};
+
 /* reg_ppcnt_grp
  * Performance counter group.
  * Group 63 indicates all groups. Only valid on Set() operation with
@@ -2215,6 +2537,8 @@
  */
 MLXSW_ITEM32(reg, ppcnt, prio_tc, 0x04, 0, 5);
 
+/* Ethernet IEEE 802.3 Counter Group */
+
 /* reg_ppcnt_a_frames_transmitted_ok
  * Access: RO
  */
@@ -2329,15 +2653,145 @@
 MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
 	     0x08 + 0x90, 0, 64);
 
-static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port)
+/* Ethernet Per Priority Group Counters */
+
+/* reg_ppcnt_rx_octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, rx_octets, 0x08 + 0x00, 0, 64);
+
+/* reg_ppcnt_rx_frames
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, rx_frames, 0x08 + 0x20, 0, 64);
+
+/* reg_ppcnt_tx_octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tx_octets, 0x08 + 0x28, 0, 64);
+
+/* reg_ppcnt_tx_frames
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tx_frames, 0x08 + 0x48, 0, 64);
+
+/* reg_ppcnt_rx_pause
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, rx_pause, 0x08 + 0x50, 0, 64);
+
+/* reg_ppcnt_rx_pause_duration
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, rx_pause_duration, 0x08 + 0x58, 0, 64);
+
+/* reg_ppcnt_tx_pause
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tx_pause, 0x08 + 0x60, 0, 64);
+
+/* reg_ppcnt_tx_pause_duration
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tx_pause_duration, 0x08 + 0x68, 0, 64);
+
+/* reg_ppcnt_rx_pause_transition
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tx_pause_transition, 0x08 + 0x70, 0, 64);
+
+static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
+					enum mlxsw_reg_ppcnt_grp grp,
+					u8 prio_tc)
 {
 	MLXSW_REG_ZERO(ppcnt, payload);
 	mlxsw_reg_ppcnt_swid_set(payload, 0);
 	mlxsw_reg_ppcnt_local_port_set(payload, local_port);
 	mlxsw_reg_ppcnt_pnat_set(payload, 0);
-	mlxsw_reg_ppcnt_grp_set(payload, 0);
+	mlxsw_reg_ppcnt_grp_set(payload, grp);
 	mlxsw_reg_ppcnt_clr_set(payload, 0);
-	mlxsw_reg_ppcnt_prio_tc_set(payload, 0);
+	mlxsw_reg_ppcnt_prio_tc_set(payload, prio_tc);
+}
+
+/* PPTB - Port Prio To Buffer Register
+ * -----------------------------------
+ * Configures the switch priority to buffer table.
+ */
+#define MLXSW_REG_PPTB_ID 0x500B
+#define MLXSW_REG_PPTB_LEN 0x0C
+
+static const struct mlxsw_reg_info mlxsw_reg_pptb = {
+	.id = MLXSW_REG_PPTB_ID,
+	.len = MLXSW_REG_PPTB_LEN,
+};
+
+enum {
+	MLXSW_REG_PPTB_MM_UM,
+	MLXSW_REG_PPTB_MM_UNICAST,
+	MLXSW_REG_PPTB_MM_MULTICAST,
+};
+
+/* reg_pptb_mm
+ * Mapping mode.
+ * 0 - Map both unicast and multicast packets to the same buffer.
+ * 1 - Map only unicast packets.
+ * 2 - Map only multicast packets.
+ * Access: Index
+ *
+ * Note: SwitchX-2 only supports the first option.
+ */
+MLXSW_ITEM32(reg, pptb, mm, 0x00, 28, 2);
+
+/* reg_pptb_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pptb, local_port, 0x00, 16, 8);
+
+/* reg_pptb_um
+ * Enables the update of the untagged_buf field.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pptb, um, 0x00, 8, 1);
+
+/* reg_pptb_pm
+ * Enables the update of the prio_to_buff field.
+ * Bit <i> is a flag for updating the mapping for switch priority <i>.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pptb, pm, 0x00, 0, 8);
+
+/* reg_pptb_prio_to_buff
+ * Mapping of switch priority <i> to one of the allocated receive port
+ * buffers.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff, 0x04, 0x04, 4);
+
+/* reg_pptb_pm_msb
+ * Enables the update of the prio_to_buff field.
+ * Bit <i> is a flag for updating the mapping for switch priority <i+8>.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pptb, pm_msb, 0x08, 24, 8);
+
+/* reg_pptb_untagged_buff
+ * Mapping of untagged frames to one of the allocated receive port buffers.
+ * Access: RW
+ *
+ * Note: In SwitchX-2 this field must be mapped to buffer 8. Reserved for
+ * Spectrum, as it maps untagged packets based on the default switch priority.
+ */
+MLXSW_ITEM32(reg, pptb, untagged_buff, 0x08, 0, 4);
+
+#define MLXSW_REG_PPTB_ALL_PRIO 0xFF
+
+static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port)
+{
+	MLXSW_REG_ZERO(pptb, payload);
+	mlxsw_reg_pptb_mm_set(payload, MLXSW_REG_PPTB_MM_UM);
+	mlxsw_reg_pptb_local_port_set(payload, local_port);
+	mlxsw_reg_pptb_pm_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
 }
 
 /* PBMC - Port Buffer Management Control Register
@@ -2346,7 +2800,7 @@
  * allocation for different Prios, and the Pause threshold management.
  */
 #define MLXSW_REG_PBMC_ID 0x500C
-#define MLXSW_REG_PBMC_LEN 0x68
+#define MLXSW_REG_PBMC_LEN 0x6C
 
 static const struct mlxsw_reg_info mlxsw_reg_pbmc = {
 	.id = MLXSW_REG_PBMC_ID,
@@ -2374,6 +2828,8 @@
  */
 MLXSW_ITEM32(reg, pbmc, xoff_refresh, 0x04, 0, 16);
 
+#define MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX 11
+
 /* reg_pbmc_buf_lossy
  * The field indicates if the buffer is lossy.
  * 0 - Lossless
@@ -2398,6 +2854,30 @@
  */
 MLXSW_ITEM32_INDEXED(reg, pbmc, buf_size, 0x0C, 0, 16, 0x08, 0x00, false);
 
+/* reg_pbmc_buf_xoff_threshold
+ * Once the amount of data in the buffer goes above this value, device
+ * starts sending PFC frames for all priorities associated with the
+ * buffer. Units are represented in cells. Reserved in case of lossy
+ * buffer.
+ * Access: RW
+ *
+ * Note: In Spectrum, reserved for buffer[9].
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xoff_threshold, 0x0C, 16, 16,
+		     0x08, 0x04, false);
+
+/* reg_pbmc_buf_xon_threshold
+ * When the amount of data in the buffer goes below this value, device
+ * stops sending PFC frames for the priorities associated with the
+ * buffer. Units are represented in cells. Reserved in case of lossy
+ * buffer.
+ * Access: RW
+ *
+ * Note: In Spectrum, reserved for buffer[9].
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xon_threshold, 0x0C, 0, 16,
+		     0x08, 0x04, false);
+
 static inline void mlxsw_reg_pbmc_pack(char *payload, u8 local_port,
 				       u16 xoff_timer_value, u16 xoff_refresh)
 {
@@ -2416,6 +2896,17 @@
 	mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size);
 }
 
+static inline void mlxsw_reg_pbmc_lossless_buffer_pack(char *payload,
+						       int buf_index, u16 size,
+						       u16 threshold)
+{
+	mlxsw_reg_pbmc_buf_lossy_set(payload, buf_index, 0);
+	mlxsw_reg_pbmc_buf_epsb_set(payload, buf_index, 0);
+	mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size);
+	mlxsw_reg_pbmc_buf_xoff_threshold_set(payload, buf_index, threshold);
+	mlxsw_reg_pbmc_buf_xon_threshold_set(payload, buf_index, threshold);
+}
+
 /* PSPA - Port Switch Partition Allocation
  * ---------------------------------------
  * Controls the association of a port with a switch partition and enables
@@ -2985,9 +3476,10 @@
 	.len = MLXSW_REG_SBPR_LEN,
 };
 
-enum mlxsw_reg_sbpr_dir {
-	MLXSW_REG_SBPR_DIR_INGRESS,
-	MLXSW_REG_SBPR_DIR_EGRESS,
+/* shared direstion enum for SBPR, SBCM, SBPM */
+enum mlxsw_reg_sbxx_dir {
+	MLXSW_REG_SBXX_DIR_INGRESS,
+	MLXSW_REG_SBXX_DIR_EGRESS,
 };
 
 /* reg_sbpr_dir
@@ -3020,7 +3512,7 @@
 MLXSW_ITEM32(reg, sbpr, mode, 0x08, 0, 4);
 
 static inline void mlxsw_reg_sbpr_pack(char *payload, u8 pool,
-				       enum mlxsw_reg_sbpr_dir dir,
+				       enum mlxsw_reg_sbxx_dir dir,
 				       enum mlxsw_reg_sbpr_mode mode, u32 size)
 {
 	MLXSW_REG_ZERO(sbpr, payload);
@@ -3062,11 +3554,6 @@
  */
 MLXSW_ITEM32(reg, sbcm, pg_buff, 0x00, 8, 6);
 
-enum mlxsw_reg_sbcm_dir {
-	MLXSW_REG_SBCM_DIR_INGRESS,
-	MLXSW_REG_SBCM_DIR_EGRESS,
-};
-
 /* reg_sbcm_dir
  * Direction.
  * Access: Index
@@ -3079,6 +3566,10 @@
  */
 MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24);
 
+/* shared max_buff limits for dynamic threshold for SBCM, SBPM */
+#define MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN 1
+#define MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX 14
+
 /* reg_sbcm_max_buff
  * When the pool associated to the port-pg/tclass is configured to
  * static, Maximum buffer size for the limiter configured in cells.
@@ -3099,7 +3590,7 @@
 MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4);
 
 static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff,
-				       enum mlxsw_reg_sbcm_dir dir,
+				       enum mlxsw_reg_sbxx_dir dir,
 				       u32 min_buff, u32 max_buff, u8 pool)
 {
 	MLXSW_REG_ZERO(sbcm, payload);
@@ -3111,8 +3602,8 @@
 	mlxsw_reg_sbcm_pool_set(payload, pool);
 }
 
-/* SBPM - Shared Buffer Class Management Register
- * ----------------------------------------------
+/* SBPM - Shared Buffer Port Management Register
+ * ---------------------------------------------
  * The SBPM register configures and retrieves the shared buffer allocation
  * and configuration according to Port-Pool, including the definition
  * of the associated quota.
@@ -3139,17 +3630,33 @@
  */
 MLXSW_ITEM32(reg, sbpm, pool, 0x00, 8, 4);
 
-enum mlxsw_reg_sbpm_dir {
-	MLXSW_REG_SBPM_DIR_INGRESS,
-	MLXSW_REG_SBPM_DIR_EGRESS,
-};
-
 /* reg_sbpm_dir
  * Direction.
  * Access: Index
  */
 MLXSW_ITEM32(reg, sbpm, dir, 0x00, 0, 2);
 
+/* reg_sbpm_buff_occupancy
+ * Current buffer occupancy in cells.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, sbpm, buff_occupancy, 0x10, 0, 24);
+
+/* reg_sbpm_clr
+ * Clear Max Buffer Occupancy
+ * When this bit is set, max_buff_occupancy field is cleared (and a
+ * new max value is tracked from the time the clear was performed).
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sbpm, clr, 0x14, 31, 1);
+
+/* reg_sbpm_max_buff_occupancy
+ * Maximum value of buffer occupancy in cells monitored. Cleared by
+ * writing to the clr field.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, sbpm, max_buff_occupancy, 0x14, 0, 24);
+
 /* reg_sbpm_min_buff
  * Minimum buffer size for the limiter, in cells.
  * Access: RW
@@ -3170,17 +3677,25 @@
 MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24);
 
 static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool,
-				       enum mlxsw_reg_sbpm_dir dir,
+				       enum mlxsw_reg_sbxx_dir dir, bool clr,
 				       u32 min_buff, u32 max_buff)
 {
 	MLXSW_REG_ZERO(sbpm, payload);
 	mlxsw_reg_sbpm_local_port_set(payload, local_port);
 	mlxsw_reg_sbpm_pool_set(payload, pool);
 	mlxsw_reg_sbpm_dir_set(payload, dir);
+	mlxsw_reg_sbpm_clr_set(payload, clr);
 	mlxsw_reg_sbpm_min_buff_set(payload, min_buff);
 	mlxsw_reg_sbpm_max_buff_set(payload, max_buff);
 }
 
+static inline void mlxsw_reg_sbpm_unpack(char *payload, u32 *p_buff_occupancy,
+					 u32 *p_max_buff_occupancy)
+{
+	*p_buff_occupancy = mlxsw_reg_sbpm_buff_occupancy_get(payload);
+	*p_max_buff_occupancy = mlxsw_reg_sbpm_max_buff_occupancy_get(payload);
+}
+
 /* SBMM - Shared Buffer Multicast Management Register
  * --------------------------------------------------
  * The SBMM register configures and retrieves the shared buffer allocation
@@ -3236,6 +3751,104 @@
 	mlxsw_reg_sbmm_pool_set(payload, pool);
 }
 
+/* SBSR - Shared Buffer Status Register
+ * ------------------------------------
+ * The SBSR register retrieves the shared buffer occupancy according to
+ * Port-Pool. Note that this register enables reading a large amount of data.
+ * It is the user's responsibility to limit the amount of data to ensure the
+ * response can match the maximum transfer unit. In case the response exceeds
+ * the maximum transport unit, it will be truncated with no special notice.
+ */
+#define MLXSW_REG_SBSR_ID 0xB005
+#define MLXSW_REG_SBSR_BASE_LEN 0x5C /* base length, without records */
+#define MLXSW_REG_SBSR_REC_LEN 0x8 /* record length */
+#define MLXSW_REG_SBSR_REC_MAX_COUNT 120
+#define MLXSW_REG_SBSR_LEN (MLXSW_REG_SBSR_BASE_LEN +	\
+			    MLXSW_REG_SBSR_REC_LEN *	\
+			    MLXSW_REG_SBSR_REC_MAX_COUNT)
+
+static const struct mlxsw_reg_info mlxsw_reg_sbsr = {
+	.id = MLXSW_REG_SBSR_ID,
+	.len = MLXSW_REG_SBSR_LEN,
+};
+
+/* reg_sbsr_clr
+ * Clear Max Buffer Occupancy. When this bit is set, the max_buff_occupancy
+ * field is cleared (and a new max value is tracked from the time the clear
+ * was performed).
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sbsr, clr, 0x00, 31, 1);
+
+/* reg_sbsr_ingress_port_mask
+ * Bit vector for all ingress network ports.
+ * Indicates which of the ports (for which the relevant bit is set)
+ * are affected by the set operation. Configuration of any other port
+ * does not change.
+ * Access: Index
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sbsr, ingress_port_mask, 0x10, 0x20, 1);
+
+/* reg_sbsr_pg_buff_mask
+ * Bit vector for all switch priority groups.
+ * Indicates which of the priorities (for which the relevant bit is set)
+ * are affected by the set operation. Configuration of any other priority
+ * does not change.
+ * Range is 0..cap_max_pg_buffers - 1
+ * Access: Index
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sbsr, pg_buff_mask, 0x30, 0x4, 1);
+
+/* reg_sbsr_egress_port_mask
+ * Bit vector for all egress network ports.
+ * Indicates which of the ports (for which the relevant bit is set)
+ * are affected by the set operation. Configuration of any other port
+ * does not change.
+ * Access: Index
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sbsr, egress_port_mask, 0x34, 0x20, 1);
+
+/* reg_sbsr_tclass_mask
+ * Bit vector for all traffic classes.
+ * Indicates which of the traffic classes (for which the relevant bit is
+ * set) are affected by the set operation. Configuration of any other
+ * traffic class does not change.
+ * Range is 0..cap_max_tclass - 1
+ * Access: Index
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sbsr, tclass_mask, 0x54, 0x8, 1);
+
+static inline void mlxsw_reg_sbsr_pack(char *payload, bool clr)
+{
+	MLXSW_REG_ZERO(sbsr, payload);
+	mlxsw_reg_sbsr_clr_set(payload, clr);
+}
+
+/* reg_sbsr_rec_buff_occupancy
+ * Current buffer occupancy in cells.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sbsr, rec_buff_occupancy, MLXSW_REG_SBSR_BASE_LEN,
+		     0, 24, MLXSW_REG_SBSR_REC_LEN, 0x00, false);
+
+/* reg_sbsr_rec_max_buff_occupancy
+ * Maximum value of buffer occupancy in cells monitored. Cleared by
+ * writing to the clr field.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sbsr, rec_max_buff_occupancy, MLXSW_REG_SBSR_BASE_LEN,
+		     0, 24, MLXSW_REG_SBSR_REC_LEN, 0x04, false);
+
+static inline void mlxsw_reg_sbsr_rec_unpack(char *payload, int rec_index,
+					     u32 *p_buff_occupancy,
+					     u32 *p_max_buff_occupancy)
+{
+	*p_buff_occupancy =
+		mlxsw_reg_sbsr_rec_buff_occupancy_get(payload, rec_index);
+	*p_max_buff_occupancy =
+		mlxsw_reg_sbsr_rec_max_buff_occupancy_get(payload, rec_index);
+}
+
 static inline const char *mlxsw_reg_id_str(u16 reg_id)
 {
 	switch (reg_id) {
@@ -3283,6 +3896,10 @@
 		return "SFMR";
 	case MLXSW_REG_SPVMLR_ID:
 		return "SPVMLR";
+	case MLXSW_REG_QTCT_ID:
+		return "QTCT";
+	case MLXSW_REG_QEEC_ID:
+		return "QEEC";
 	case MLXSW_REG_PMLP_ID:
 		return "PMLP";
 	case MLXSW_REG_PMTU_ID:
@@ -3293,8 +3910,12 @@
 		return "PPAD";
 	case MLXSW_REG_PAOS_ID:
 		return "PAOS";
+	case MLXSW_REG_PFCC_ID:
+		return "PFCC";
 	case MLXSW_REG_PPCNT_ID:
 		return "PPCNT";
+	case MLXSW_REG_PPTB_ID:
+		return "PPTB";
 	case MLXSW_REG_PBMC_ID:
 		return "PBMC";
 	case MLXSW_REG_PSPA_ID:
@@ -3323,6 +3944,8 @@
 		return "SBPM";
 	case MLXSW_REG_SBMM_ID:
 		return "SBMM";
+	case MLXSW_REG_SBSR_ID:
+		return "SBSR";
 	default:
 		return "*UNKNOWN*";
 	}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 4afbc3e..4a72737 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -49,7 +49,7 @@
 #include <linux/jiffies.h>
 #include <linux/bitops.h>
 #include <linux/list.h>
-#include <net/devlink.h>
+#include <linux/dcbnl.h>
 #include <net/switchdev.h>
 #include <generated/utsrelease.h>
 
@@ -305,9 +305,9 @@
 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
 }
 
-static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
-					 u8 local_port, u8 *p_module,
-					 u8 *p_width)
+static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
+					   u8 local_port, u8 *p_module,
+					   u8 *p_width, u8 *p_lane)
 {
 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
 	int err;
@@ -318,9 +318,20 @@
 		return err;
 	*p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
 	*p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
+	*p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
 	return 0;
 }
 
+static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
+					 u8 local_port, u8 *p_module,
+					 u8 *p_width)
+{
+	u8 lane;
+
+	return __mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, p_module,
+					       p_width, &lane);
+}
+
 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
 				    u8 module, u8 width, u8 lane)
 {
@@ -379,7 +390,7 @@
 	u64 len;
 	int err;
 
-	if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info))
+	if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
 		return NETDEV_TX_BUSY;
 
 	if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
@@ -403,7 +414,7 @@
 	/* Due to a race we might fail here because of a full queue. In that
 	 * unlikely case we simply drop the packet.
 	 */
-	err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info);
+	err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
 
 	if (!err) {
 		pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
@@ -438,16 +449,89 @@
 	return 0;
 }
 
+static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
+				 bool pause_en, bool pfc_en, u16 delay)
+{
+	u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
+
+	delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
+			 MLXSW_SP_PAUSE_DELAY;
+
+	if (pause_en || pfc_en)
+		mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
+						    pg_size + delay, pg_size);
+	else
+		mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
+}
+
+int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
+				 u8 *prio_tc, bool pause_en,
+				 struct ieee_pfc *my_pfc)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
+	u16 delay = !!my_pfc ? my_pfc->delay : 0;
+	char pbmc_pl[MLXSW_REG_PBMC_LEN];
+	int i, j, err;
+
+	mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
+	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
+	if (err)
+		return err;
+
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		bool configure = false;
+		bool pfc = false;
+
+		for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
+			if (prio_tc[j] == i) {
+				pfc = pfc_en & BIT(j);
+				configure = true;
+				break;
+			}
+		}
+
+		if (!configure)
+			continue;
+		mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
+	}
+
+	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
+}
+
+static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
+				      int mtu, bool pause_en)
+{
+	u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
+	bool dcb_en = !!mlxsw_sp_port->dcb.ets;
+	struct ieee_pfc *my_pfc;
+	u8 *prio_tc;
+
+	prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
+	my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
+
+	return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
+					    pause_en, my_pfc);
+}
+
 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
 {
 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+	bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
 	int err;
 
-	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
+	err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
 	if (err)
 		return err;
+	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
+	if (err)
+		goto err_port_mtu_set;
 	dev->mtu = mtu;
 	return 0;
+
+err_port_mtu_set:
+	mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
+	return err;
 }
 
 static struct rtnl_link_stats64 *
@@ -861,6 +945,33 @@
 	return 0;
 }
 
+static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
+					    size_t len)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+	u8 module, width, lane;
+	int err;
+
+	err = __mlxsw_sp_port_module_info_get(mlxsw_sp_port->mlxsw_sp,
+					      mlxsw_sp_port->local_port,
+					      &module, &width, &lane);
+	if (err) {
+		netdev_err(dev, "Failed to retrieve module information\n");
+		return err;
+	}
+
+	if (!mlxsw_sp_port->split)
+		err = snprintf(name, len, "p%d", module + 1);
+	else
+		err = snprintf(name, len, "p%ds%d", module + 1,
+			       lane / width);
+
+	if (err >= len)
+		return -EINVAL;
+
+	return 0;
+}
+
 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
 	.ndo_open		= mlxsw_sp_port_open,
 	.ndo_stop		= mlxsw_sp_port_stop,
@@ -877,6 +988,7 @@
 	.ndo_bridge_setlink	= switchdev_port_bridge_setlink,
 	.ndo_bridge_getlink	= switchdev_port_bridge_getlink,
 	.ndo_bridge_dellink	= switchdev_port_bridge_dellink,
+	.ndo_get_phys_port_name	= mlxsw_sp_port_get_phys_port_name,
 };
 
 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
@@ -897,6 +1009,68 @@
 		sizeof(drvinfo->bus_info));
 }
 
+static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
+					 struct ethtool_pauseparam *pause)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+	pause->rx_pause = mlxsw_sp_port->link.rx_pause;
+	pause->tx_pause = mlxsw_sp_port->link.tx_pause;
+}
+
+static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
+				   struct ethtool_pauseparam *pause)
+{
+	char pfcc_pl[MLXSW_REG_PFCC_LEN];
+
+	mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
+	mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
+	mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
+
+	return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
+			       pfcc_pl);
+}
+
+static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
+					struct ethtool_pauseparam *pause)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+	bool pause_en = pause->tx_pause || pause->rx_pause;
+	int err;
+
+	if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
+		netdev_err(dev, "PFC already enabled on port\n");
+		return -EINVAL;
+	}
+
+	if (pause->autoneg) {
+		netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
+		return -EINVAL;
+	}
+
+	err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
+	if (err) {
+		netdev_err(dev, "Failed to configure port's headroom\n");
+		return err;
+	}
+
+	err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
+	if (err) {
+		netdev_err(dev, "Failed to set PAUSE parameters\n");
+		goto err_port_pause_configure;
+	}
+
+	mlxsw_sp_port->link.rx_pause = pause->rx_pause;
+	mlxsw_sp_port->link.tx_pause = pause->tx_pause;
+
+	return 0;
+
+err_port_pause_configure:
+	pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
+	mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
+	return err;
+}
+
 struct mlxsw_sp_port_hw_stats {
 	char str[ETH_GSTRING_LEN];
 	u64 (*getter)(char *payload);
@@ -1032,7 +1206,8 @@
 	int i;
 	int err;
 
-	mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port);
+	mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port,
+			     MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
 	for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
 		data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
@@ -1380,6 +1555,8 @@
 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
 	.get_drvinfo		= mlxsw_sp_port_get_drvinfo,
 	.get_link		= ethtool_op_get_link,
+	.get_pauseparam		= mlxsw_sp_port_get_pauseparam,
+	.set_pauseparam		= mlxsw_sp_port_set_pauseparam,
 	.get_strings		= mlxsw_sp_port_get_strings,
 	.set_phys_id		= mlxsw_sp_port_set_phys_id,
 	.get_ethtool_stats	= mlxsw_sp_port_get_stats,
@@ -1402,12 +1579,112 @@
 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
 }
 
+int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
+			  enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
+			  bool dwrr, u8 dwrr_weight)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	char qeec_pl[MLXSW_REG_QEEC_LEN];
+
+	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
+			    next_index);
+	mlxsw_reg_qeec_de_set(qeec_pl, true);
+	mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
+	mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
+	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
+}
+
+int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
+				  enum mlxsw_reg_qeec_hr hr, u8 index,
+				  u8 next_index, u32 maxrate)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	char qeec_pl[MLXSW_REG_QEEC_LEN];
+
+	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
+			    next_index);
+	mlxsw_reg_qeec_mase_set(qeec_pl, true);
+	mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
+	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
+}
+
+int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
+			      u8 switch_prio, u8 tclass)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	char qtct_pl[MLXSW_REG_QTCT_LEN];
+
+	mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
+			    tclass);
+	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
+}
+
+static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	int err, i;
+
+	/* Setup the elements hierarcy, so that each TC is linked to
+	 * one subgroup, which are all member in the same group.
+	 */
+	err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+				    MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
+				    0);
+	if (err)
+		return err;
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+					    MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
+					    0, false, 0);
+		if (err)
+			return err;
+	}
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+					    MLXSW_REG_QEEC_HIERARCY_TC, i, i,
+					    false, 0);
+		if (err)
+			return err;
+	}
+
+	/* Make sure the max shaper is disabled in all hierarcies that
+	 * support it.
+	 */
+	err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+					    MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
+					    MLXSW_REG_QEEC_MAS_DIS);
+	if (err)
+		return err;
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+						    MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
+						    i, 0,
+						    MLXSW_REG_QEEC_MAS_DIS);
+		if (err)
+			return err;
+	}
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+						    MLXSW_REG_QEEC_HIERARCY_TC,
+						    i, i,
+						    MLXSW_REG_QEEC_MAS_DIS);
+		if (err)
+			return err;
+	}
+
+	/* Map all priorities to traffic class 0. */
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
 static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
 				  bool split, u8 module, u8 width)
 {
-	struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
 	struct mlxsw_sp_port *mlxsw_sp_port;
-	struct devlink_port *devlink_port;
 	struct net_device *dev;
 	size_t bytes;
 	int err;
@@ -1460,16 +1737,6 @@
 	 */
 	dev->hard_header_len += MLXSW_TXHDR_LEN;
 
-	devlink_port = &mlxsw_sp_port->devlink_port;
-	if (mlxsw_sp_port->split)
-		devlink_port_split_set(devlink_port, module);
-	err = devlink_port_register(devlink, devlink_port, local_port);
-	if (err) {
-		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register devlink port\n",
-			mlxsw_sp_port->local_port);
-		goto err_devlink_port_register;
-	}
-
 	err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
 	if (err) {
 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
@@ -1509,6 +1776,21 @@
 		goto err_port_buffers_init;
 	}
 
+	err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
+	if (err) {
+		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
+			mlxsw_sp_port->local_port);
+		goto err_port_ets_init;
+	}
+
+	/* ETS and buffers must be initialized before DCB. */
+	err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
+	if (err) {
+		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
+			mlxsw_sp_port->local_port);
+		goto err_port_dcb_init;
+	}
+
 	mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
 	err = register_netdev(dev);
 	if (err) {
@@ -1517,7 +1799,14 @@
 		goto err_register_netdev;
 	}
 
-	devlink_port_type_eth_set(devlink_port, dev);
+	err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port,
+				   mlxsw_sp_port->local_port, dev,
+				   mlxsw_sp_port->split, module);
+	if (err) {
+		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
+			mlxsw_sp_port->local_port);
+		goto err_core_port_init;
+	}
 
 	err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
 	if (err)
@@ -1527,16 +1816,18 @@
 	return 0;
 
 err_port_vlan_init:
+	mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
+err_core_port_init:
 	unregister_netdev(dev);
 err_register_netdev:
+err_port_dcb_init:
+err_port_ets_init:
 err_port_buffers_init:
 err_port_admin_status_set:
 err_port_mtu_set:
 err_port_speed_by_width_set:
 err_port_swid_set:
 err_port_system_port_mapping_set:
-	devlink_port_unregister(&mlxsw_sp_port->devlink_port);
-err_devlink_port_register:
 err_dev_addr_init:
 	free_percpu(mlxsw_sp_port->pcpu_stats);
 err_alloc_stats:
@@ -1590,15 +1881,13 @@
 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
 {
 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
-	struct devlink_port *devlink_port;
 
 	if (!mlxsw_sp_port)
 		return;
 	mlxsw_sp->ports[local_port] = NULL;
-	devlink_port = &mlxsw_sp_port->devlink_port;
-	devlink_port_type_clear(devlink_port);
+	mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
 	unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
-	devlink_port_unregister(devlink_port);
+	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
 	mlxsw_sp_port_vports_fini(mlxsw_sp_port);
 	mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
 	mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
@@ -1659,9 +1948,10 @@
 	return local_port - offset;
 }
 
-static int mlxsw_sp_port_split(void *priv, u8 local_port, unsigned int count)
+static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
+			       unsigned int count)
 {
-	struct mlxsw_sp *mlxsw_sp = priv;
+	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
 	struct mlxsw_sp_port *mlxsw_sp_port;
 	u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
 	u8 module, cur_width, base_port;
@@ -1733,9 +2023,9 @@
 	return err;
 }
 
-static int mlxsw_sp_port_unsplit(void *priv, u8 local_port)
+static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
 {
-	struct mlxsw_sp *mlxsw_sp = priv;
+	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
 	struct mlxsw_sp_port *mlxsw_sp_port;
 	u8 module, cur_width, base_port;
 	unsigned int count;
@@ -2080,10 +2370,10 @@
 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
 }
 
-static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
+static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
 			 const struct mlxsw_bus_info *mlxsw_bus_info)
 {
-	struct mlxsw_sp *mlxsw_sp = priv;
+	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
 	int err;
 
 	mlxsw_sp->core = mlxsw_core;
@@ -2144,6 +2434,7 @@
 
 err_switchdev_init:
 err_lag_init:
+	mlxsw_sp_buffers_fini(mlxsw_sp);
 err_buffers_init:
 err_flood_init:
 	mlxsw_sp_traps_fini(mlxsw_sp);
@@ -2154,11 +2445,12 @@
 	return err;
 }
 
-static void mlxsw_sp_fini(void *priv)
+static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
 {
-	struct mlxsw_sp *mlxsw_sp = priv;
+	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
 
 	mlxsw_sp_switchdev_fini(mlxsw_sp);
+	mlxsw_sp_buffers_fini(mlxsw_sp);
 	mlxsw_sp_traps_fini(mlxsw_sp);
 	mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
 	mlxsw_sp_ports_remove(mlxsw_sp);
@@ -2201,16 +2493,26 @@
 };
 
 static struct mlxsw_driver mlxsw_sp_driver = {
-	.kind			= MLXSW_DEVICE_KIND_SPECTRUM,
-	.owner			= THIS_MODULE,
-	.priv_size		= sizeof(struct mlxsw_sp),
-	.init			= mlxsw_sp_init,
-	.fini			= mlxsw_sp_fini,
-	.port_split		= mlxsw_sp_port_split,
-	.port_unsplit		= mlxsw_sp_port_unsplit,
-	.txhdr_construct	= mlxsw_sp_txhdr_construct,
-	.txhdr_len		= MLXSW_TXHDR_LEN,
-	.profile		= &mlxsw_sp_config_profile,
+	.kind				= MLXSW_DEVICE_KIND_SPECTRUM,
+	.owner				= THIS_MODULE,
+	.priv_size			= sizeof(struct mlxsw_sp),
+	.init				= mlxsw_sp_init,
+	.fini				= mlxsw_sp_fini,
+	.port_split			= mlxsw_sp_port_split,
+	.port_unsplit			= mlxsw_sp_port_unsplit,
+	.sb_pool_get			= mlxsw_sp_sb_pool_get,
+	.sb_pool_set			= mlxsw_sp_sb_pool_set,
+	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
+	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
+	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
+	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
+	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
+	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
+	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
+	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
+	.txhdr_construct		= mlxsw_sp_txhdr_construct,
+	.txhdr_len			= MLXSW_TXHDR_LEN,
+	.profile			= &mlxsw_sp_config_profile,
 };
 
 static int
@@ -2541,11 +2843,11 @@
 	lag->ref_count++;
 	return 0;
 
+err_col_port_enable:
+	mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
 err_col_port_add:
 	if (!lag->ref_count)
 		mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
-err_col_port_enable:
-	mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
 	return err;
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 4b8abaf..e2c022d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -42,15 +42,15 @@
 #include <linux/bitops.h>
 #include <linux/if_vlan.h>
 #include <linux/list.h>
+#include <linux/dcbnl.h>
 #include <net/switchdev.h>
-#include <net/devlink.h>
 
 #include "port.h"
 #include "core.h"
 
 #define MLXSW_SP_VFID_BASE VLAN_N_VID
 #define MLXSW_SP_VFID_PORT_MAX 512	/* Non-bridged VLAN interfaces */
-#define MLXSW_SP_VFID_BR_MAX 8192	/* Bridged VLAN interfaces */
+#define MLXSW_SP_VFID_BR_MAX 6144	/* Bridged VLAN interfaces */
 #define MLXSW_SP_VFID_MAX (MLXSW_SP_VFID_PORT_MAX + MLXSW_SP_VFID_BR_MAX)
 
 #define MLXSW_SP_LAG_MAX 64
@@ -62,6 +62,24 @@
 
 #define MLXSW_SP_PORT_BASE_SPEED 25000	/* Mb/s */
 
+#define MLXSW_SP_BYTES_PER_CELL 96
+
+#define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL)
+#define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL)
+
+/* Maximum delay buffer needed in case of PAUSE frames, in cells.
+ * Assumes 100m cable and maximum MTU.
+ */
+#define MLXSW_SP_PAUSE_DELAY 612
+
+#define MLXSW_SP_CELL_FACTOR 2	/* 2 * cell_size / (IPG + cell_size + 1) */
+
+static inline u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay)
+{
+	delay = MLXSW_SP_BYTES_TO_CELLS(DIV_ROUND_UP(delay, BITS_PER_BYTE));
+	return MLXSW_SP_CELL_FACTOR * delay + MLXSW_SP_BYTES_TO_CELLS(mtu);
+}
+
 struct mlxsw_sp_port;
 
 struct mlxsw_sp_upper {
@@ -100,6 +118,40 @@
 	return fid >= MLXSW_SP_VFID_BASE;
 }
 
+struct mlxsw_sp_sb_pr {
+	enum mlxsw_reg_sbpr_mode mode;
+	u32 size;
+};
+
+struct mlxsw_cp_sb_occ {
+	u32 cur;
+	u32 max;
+};
+
+struct mlxsw_sp_sb_cm {
+	u32 min_buff;
+	u32 max_buff;
+	u8 pool;
+	struct mlxsw_cp_sb_occ occ;
+};
+
+struct mlxsw_sp_sb_pm {
+	u32 min_buff;
+	u32 max_buff;
+	struct mlxsw_cp_sb_occ occ;
+};
+
+#define MLXSW_SP_SB_POOL_COUNT	4
+#define MLXSW_SP_SB_TC_COUNT	8
+
+struct mlxsw_sp_sb {
+	struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
+	struct {
+		struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
+		struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
+	} ports[MLXSW_PORT_MAX_PORTS];
+};
+
 struct mlxsw_sp {
 	struct {
 		struct list_head list;
@@ -130,6 +182,7 @@
 	struct mlxsw_sp_upper master_bridge;
 	struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
 	u8 port_to_module[MLXSW_PORT_MAX_PORTS];
+	struct mlxsw_sp_sb sb;
 };
 
 static inline struct mlxsw_sp_upper *
@@ -148,6 +201,7 @@
 };
 
 struct mlxsw_sp_port {
+	struct mlxsw_core_port core_port; /* must be first */
 	struct net_device *dev;
 	struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
 	struct mlxsw_sp *mlxsw_sp;
@@ -166,14 +220,28 @@
 		struct mlxsw_sp_vfid *vfid;
 		u16 vid;
 	} vport;
+	struct {
+		u8 tx_pause:1,
+		   rx_pause:1;
+	} link;
+	struct {
+		struct ieee_ets *ets;
+		struct ieee_maxrate *maxrate;
+		struct ieee_pfc *pfc;
+	} dcb;
 	/* 802.1Q bridge VLANs */
 	unsigned long *active_vlans;
 	unsigned long *untagged_vlans;
 	/* VLAN interfaces */
 	struct list_head vports_list;
-	struct devlink_port devlink_port;
 };
 
+static inline bool
+mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	return mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause;
+}
+
 static inline struct mlxsw_sp_port *
 mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
 {
@@ -245,7 +313,39 @@
 };
 
 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp);
 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port);
+int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
+			 unsigned int sb_index, u16 pool_index,
+			 struct devlink_sb_pool_info *pool_info);
+int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
+			 unsigned int sb_index, u16 pool_index, u32 size,
+			 enum devlink_sb_threshold_type threshold_type);
+int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
+			      unsigned int sb_index, u16 pool_index,
+			      u32 *p_threshold);
+int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
+			      unsigned int sb_index, u16 pool_index,
+			      u32 threshold);
+int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
+				 unsigned int sb_index, u16 tc_index,
+				 enum devlink_sb_pool_type pool_type,
+				 u16 *p_pool_index, u32 *p_threshold);
+int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
+				 unsigned int sb_index, u16 tc_index,
+				 enum devlink_sb_pool_type pool_type,
+				 u16 pool_index, u32 threshold);
+int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
+			     unsigned int sb_index);
+int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
+			      unsigned int sb_index);
+int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
+				  unsigned int sb_index, u16 pool_index,
+				  u32 *p_cur, u32 *p_max);
+int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
+				     unsigned int sb_index, u16 tc_index,
+				     enum devlink_sb_pool_type pool_type,
+				     u32 *p_cur, u32 *p_max);
 
 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
@@ -265,5 +365,33 @@
 			     bool set, bool only_uc);
 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
+int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
+			  enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
+			  bool dwrr, u8 dwrr_weight);
+int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
+			      u8 switch_prio, u8 tclass);
+int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
+				 u8 *prio_tc, bool pause_en,
+				 struct ieee_pfc *my_pfc);
+int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
+				  enum mlxsw_reg_qeec_hr hr, u8 index,
+				  u8 next_index, u32 maxrate);
+
+#ifdef CONFIG_MLXSW_SPECTRUM_DCB
+
+int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port);
+
+#else
+
+static inline int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	return 0;
+}
+
+static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{}
+
+#endif
 
 #endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index d59195e..a3720a0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -34,36 +34,140 @@
 
 #include <linux/kernel.h>
 #include <linux/types.h>
+#include <linux/dcbnl.h>
+#include <linux/if_ether.h>
+#include <linux/list.h>
 
 #include "spectrum.h"
 #include "core.h"
 #include "port.h"
 #include "reg.h"
 
-struct mlxsw_sp_pb {
-	u8 index;
-	u16 size;
-};
+static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
+						 u8 pool,
+						 enum mlxsw_reg_sbxx_dir dir)
+{
+	return &mlxsw_sp->sb.prs[dir][pool];
+}
 
-#define MLXSW_SP_PB(_index, _size)	\
-	{				\
-		.index = _index,	\
-		.size = _size,		\
+static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
+						 u8 local_port, u8 pg_buff,
+						 enum mlxsw_reg_sbxx_dir dir)
+{
+	return &mlxsw_sp->sb.ports[local_port].cms[dir][pg_buff];
+}
+
+static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
+						 u8 local_port, u8 pool,
+						 enum mlxsw_reg_sbxx_dir dir)
+{
+	return &mlxsw_sp->sb.ports[local_port].pms[dir][pool];
+}
+
+static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool,
+				enum mlxsw_reg_sbxx_dir dir,
+				enum mlxsw_reg_sbpr_mode mode, u32 size)
+{
+	char sbpr_pl[MLXSW_REG_SBPR_LEN];
+	struct mlxsw_sp_sb_pr *pr;
+	int err;
+
+	mlxsw_reg_sbpr_pack(sbpr_pl, pool, dir, mode, size);
+	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
+	if (err)
+		return err;
+
+	pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+	pr->mode = mode;
+	pr->size = size;
+	return 0;
+}
+
+static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+				u8 pg_buff, enum mlxsw_reg_sbxx_dir dir,
+				u32 min_buff, u32 max_buff, u8 pool)
+{
+	char sbcm_pl[MLXSW_REG_SBCM_LEN];
+	int err;
+
+	mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, dir,
+			    min_buff, max_buff, pool);
+	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
+	if (err)
+		return err;
+	if (pg_buff < MLXSW_SP_SB_TC_COUNT) {
+		struct mlxsw_sp_sb_cm *cm;
+
+		cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, dir);
+		cm->min_buff = min_buff;
+		cm->max_buff = max_buff;
+		cm->pool = pool;
 	}
+	return 0;
+}
 
-static const struct mlxsw_sp_pb mlxsw_sp_pbs[] = {
-	MLXSW_SP_PB(0, 208),
-	MLXSW_SP_PB(1, 208),
-	MLXSW_SP_PB(2, 208),
-	MLXSW_SP_PB(3, 208),
-	MLXSW_SP_PB(4, 208),
-	MLXSW_SP_PB(5, 208),
-	MLXSW_SP_PB(6, 208),
-	MLXSW_SP_PB(7, 208),
-	MLXSW_SP_PB(9, 208),
+static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+				u8 pool, enum mlxsw_reg_sbxx_dir dir,
+				u32 min_buff, u32 max_buff)
+{
+	char sbpm_pl[MLXSW_REG_SBPM_LEN];
+	struct mlxsw_sp_sb_pm *pm;
+	int err;
+
+	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false,
+			    min_buff, max_buff);
+	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
+	if (err)
+		return err;
+
+	pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
+	pm->min_buff = min_buff;
+	pm->max_buff = max_buff;
+	return 0;
+}
+
+static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+				    u8 pool, enum mlxsw_reg_sbxx_dir dir,
+				    struct list_head *bulk_list)
+{
+	char sbpm_pl[MLXSW_REG_SBPM_LEN];
+
+	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, true, 0, 0);
+	return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
+				     bulk_list, NULL, 0);
+}
+
+static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
+					char *sbpm_pl, size_t sbpm_pl_len,
+					unsigned long cb_priv)
+{
+	struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
+
+	mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
+}
+
+static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+				    u8 pool, enum mlxsw_reg_sbxx_dir dir,
+				    struct list_head *bulk_list)
+{
+	char sbpm_pl[MLXSW_REG_SBPM_LEN];
+	struct mlxsw_sp_sb_pm *pm;
+
+	pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
+	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, 0, 0);
+	return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
+				     bulk_list,
+				     mlxsw_sp_sb_pm_occ_query_cb,
+				     (unsigned long) pm);
+}
+
+static const u16 mlxsw_sp_pbs[] = {
+	[0] = 2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN),
+	[9] = 2 * MLXSW_SP_BYTES_TO_CELLS(MLXSW_PORT_MAX_MTU),
 };
 
 #define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
+#define MLXSW_SP_PB_UNUSED 8
 
 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
 {
@@ -73,194 +177,206 @@
 	mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
 			    0xffff, 0xffff / 2);
 	for (i = 0; i < MLXSW_SP_PBS_LEN; i++) {
-		const struct mlxsw_sp_pb *pb;
-
-		pb = &mlxsw_sp_pbs[i];
-		mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pb->index, pb->size);
+		if (i == MLXSW_SP_PB_UNUSED)
+			continue;
+		mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, mlxsw_sp_pbs[i]);
 	}
+	mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
+					 MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
 	return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
 			       MLXSW_REG(pbmc), pbmc_pl);
 }
 
-#define MLXSW_SP_SB_BYTES_PER_CELL 96
+static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	char pptb_pl[MLXSW_REG_PPTB_LEN];
+	int i;
 
-struct mlxsw_sp_sb_pool {
-	u8 pool;
-	enum mlxsw_reg_sbpr_dir dir;
-	enum mlxsw_reg_sbpr_mode mode;
-	u32 size;
-};
+	mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+		mlxsw_reg_pptb_prio_to_buff_set(pptb_pl, i, 0);
+	return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
+			       pptb_pl);
+}
 
-#define MLXSW_SP_SB_POOL_INGRESS_SIZE				\
-	((15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS)) /	\
-	 MLXSW_SP_SB_BYTES_PER_CELL)
-#define MLXSW_SP_SB_POOL_EGRESS_SIZE				\
-	((14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS)) /	\
-	 MLXSW_SP_SB_BYTES_PER_CELL)
+static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	int err;
 
-#define MLXSW_SP_SB_POOL(_pool, _dir, _mode, _size)		\
-	{							\
-		.pool = _pool,					\
-		.dir = _dir,					\
-		.mode = _mode,					\
-		.size = _size,					\
+	err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
+	if (err)
+		return err;
+	return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
+}
+
+#define MLXSW_SP_SB_PR_INGRESS_SIZE				\
+	(15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS))
+#define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
+#define MLXSW_SP_SB_PR_EGRESS_SIZE				\
+	(14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS))
+
+#define MLXSW_SP_SB_PR(_mode, _size)	\
+	{				\
+		.mode = _mode,		\
+		.size = _size,		\
 	}
 
-#define MLXSW_SP_SB_POOL_INGRESS(_pool, _size)			\
-	MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_INGRESS,	\
-			 MLXSW_REG_SBPR_MODE_DYNAMIC, _size)
-
-#define MLXSW_SP_SB_POOL_EGRESS(_pool, _size)			\
-	MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_EGRESS,	\
-			 MLXSW_REG_SBPR_MODE_DYNAMIC, _size)
-
-static const struct mlxsw_sp_sb_pool mlxsw_sp_sb_pools[] = {
-	MLXSW_SP_SB_POOL_INGRESS(0, MLXSW_SP_SB_POOL_INGRESS_SIZE),
-	MLXSW_SP_SB_POOL_INGRESS(1, 0),
-	MLXSW_SP_SB_POOL_INGRESS(2, 0),
-	MLXSW_SP_SB_POOL_INGRESS(3, 0),
-	MLXSW_SP_SB_POOL_EGRESS(0, MLXSW_SP_SB_POOL_EGRESS_SIZE),
-	MLXSW_SP_SB_POOL_EGRESS(1, 0),
-	MLXSW_SP_SB_POOL_EGRESS(2, 0),
-	MLXSW_SP_SB_POOL_EGRESS(2, MLXSW_SP_SB_POOL_EGRESS_SIZE),
+static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = {
+	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
+		       MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_SIZE)),
+	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
+		       MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_MNG_SIZE)),
 };
 
-#define MLXSW_SP_SB_POOLS_LEN ARRAY_SIZE(mlxsw_sp_sb_pools)
+#define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress)
 
-static int mlxsw_sp_sb_pools_init(struct mlxsw_sp *mlxsw_sp)
+static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = {
+	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
+		       MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_EGRESS_SIZE)),
+	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+};
+
+#define MLXSW_SP_SB_PRS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_egress)
+
+static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
+				  enum mlxsw_reg_sbxx_dir dir,
+				  const struct mlxsw_sp_sb_pr *prs,
+				  size_t prs_len)
 {
-	char sbpr_pl[MLXSW_REG_SBPR_LEN];
 	int i;
 	int err;
 
-	for (i = 0; i < MLXSW_SP_SB_POOLS_LEN; i++) {
-		const struct mlxsw_sp_sb_pool *pool;
+	for (i = 0; i < prs_len; i++) {
+		const struct mlxsw_sp_sb_pr *pr;
 
-		pool = &mlxsw_sp_sb_pools[i];
-		mlxsw_reg_sbpr_pack(sbpr_pl, pool->pool, pool->dir,
-				    pool->mode, pool->size);
-		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
+		pr = &prs[i];
+		err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir,
+					   pr->mode, pr->size);
 		if (err)
 			return err;
 	}
 	return 0;
 }
 
-struct mlxsw_sp_sb_cm {
-	union {
-		u8 pg;
-		u8 tc;
-	} u;
-	enum mlxsw_reg_sbcm_dir dir;
-	u32 min_buff;
-	u32 max_buff;
-	u8 pool;
-};
+static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp)
+{
+	int err;
 
-#define MLXSW_SP_SB_CM(_pg_tc, _dir, _min_buff, _max_buff, _pool)	\
-	{								\
-		.u.pg = _pg_tc,						\
-		.dir = _dir,						\
-		.min_buff = _min_buff,					\
-		.max_buff = _max_buff,					\
-		.pool = _pool,						\
+	err = __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS,
+				     mlxsw_sp_sb_prs_ingress,
+				     MLXSW_SP_SB_PRS_INGRESS_LEN);
+	if (err)
+		return err;
+	return __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS,
+				      mlxsw_sp_sb_prs_egress,
+				      MLXSW_SP_SB_PRS_EGRESS_LEN);
+}
+
+#define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool)	\
+	{						\
+		.min_buff = _min_buff,			\
+		.max_buff = _max_buff,			\
+		.pool = _pool,				\
 	}
 
-#define MLXSW_SP_SB_CM_INGRESS(_pg, _min_buff, _max_buff)		\
-	MLXSW_SP_SB_CM(_pg, MLXSW_REG_SBCM_DIR_INGRESS,			\
-		       _min_buff, _max_buff, 0)
-
-#define MLXSW_SP_SB_CM_EGRESS(_tc, _min_buff, _max_buff)		\
-	MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS,			\
-		       _min_buff, _max_buff, 0)
-
-#define MLXSW_SP_CPU_PORT_SB_CM_EGRESS(_tc)				\
-	MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, 104, 2, 3)
-
-static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms[] = {
-	MLXSW_SP_SB_CM_INGRESS(0, 10000 / MLXSW_SP_SB_BYTES_PER_CELL, 8),
-	MLXSW_SP_SB_CM_INGRESS(1, 0, 0),
-	MLXSW_SP_SB_CM_INGRESS(2, 0, 0),
-	MLXSW_SP_SB_CM_INGRESS(3, 0, 0),
-	MLXSW_SP_SB_CM_INGRESS(4, 0, 0),
-	MLXSW_SP_SB_CM_INGRESS(5, 0, 0),
-	MLXSW_SP_SB_CM_INGRESS(6, 0, 0),
-	MLXSW_SP_SB_CM_INGRESS(7, 0, 0),
-	MLXSW_SP_SB_CM_INGRESS(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff),
-	MLXSW_SP_SB_CM_EGRESS(0, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
-	MLXSW_SP_SB_CM_EGRESS(1, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
-	MLXSW_SP_SB_CM_EGRESS(2, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
-	MLXSW_SP_SB_CM_EGRESS(3, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
-	MLXSW_SP_SB_CM_EGRESS(4, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
-	MLXSW_SP_SB_CM_EGRESS(5, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
-	MLXSW_SP_SB_CM_EGRESS(6, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
-	MLXSW_SP_SB_CM_EGRESS(7, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
-	MLXSW_SP_SB_CM_EGRESS(8, 0, 0),
-	MLXSW_SP_SB_CM_EGRESS(9, 0, 0),
-	MLXSW_SP_SB_CM_EGRESS(10, 0, 0),
-	MLXSW_SP_SB_CM_EGRESS(11, 0, 0),
-	MLXSW_SP_SB_CM_EGRESS(12, 0, 0),
-	MLXSW_SP_SB_CM_EGRESS(13, 0, 0),
-	MLXSW_SP_SB_CM_EGRESS(14, 0, 0),
-	MLXSW_SP_SB_CM_EGRESS(15, 0, 0),
-	MLXSW_SP_SB_CM_EGRESS(16, 1, 0xff),
+static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
+	MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 8, 0),
+	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+	MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
+	MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(20000), 1, 3),
 };
 
-#define MLXSW_SP_SB_CMS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms)
+#define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress)
+
+static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
+	MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+	MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+	MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+	MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+	MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+	MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+	MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+	MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+	MLXSW_SP_SB_CM(0, 0, 0),
+	MLXSW_SP_SB_CM(0, 0, 0),
+	MLXSW_SP_SB_CM(0, 0, 0),
+	MLXSW_SP_SB_CM(0, 0, 0),
+	MLXSW_SP_SB_CM(0, 0, 0),
+	MLXSW_SP_SB_CM(0, 0, 0),
+	MLXSW_SP_SB_CM(0, 0, 0),
+	MLXSW_SP_SB_CM(0, 0, 0),
+	MLXSW_SP_SB_CM(1, 0xff, 0),
+};
+
+#define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress)
+
+#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 0)
 
 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(0),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(1),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(2),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(3),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(4),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(5),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(6),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(7),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(8),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(9),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(10),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(11),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(12),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(13),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(14),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(15),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(16),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(17),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(18),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(19),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(20),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(21),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(22),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(23),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(24),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(25),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(26),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(27),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(28),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(29),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(30),
-	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(31),
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
+	MLXSW_SP_CPU_PORT_SB_CM,
 };
 
 #define MLXSW_SP_CPU_PORT_SB_MCS_LEN \
 	ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms)
 
-static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
-				const struct mlxsw_sp_sb_cm *cms,
-				size_t cms_len)
+static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+				  enum mlxsw_reg_sbxx_dir dir,
+				  const struct mlxsw_sp_sb_cm *cms,
+				  size_t cms_len)
 {
-	char sbcm_pl[MLXSW_REG_SBCM_LEN];
 	int i;
 	int err;
 
 	for (i = 0; i < cms_len; i++) {
 		const struct mlxsw_sp_sb_cm *cm;
 
+		if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
+			continue; /* PG number 8 does not exist, skip it */
 		cm = &cms[i];
-		mlxsw_reg_sbcm_pack(sbcm_pl, local_port, cm->u.pg, cm->dir,
-				    cm->min_buff, cm->max_buff, cm->pool);
-		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
+		err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir,
+					   cm->min_buff, cm->max_buff,
+					   cm->pool);
 		if (err)
 			return err;
 	}
@@ -269,105 +385,120 @@
 
 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
 {
-	return mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
-				    mlxsw_sp_port->local_port, mlxsw_sp_sb_cms,
-				    MLXSW_SP_SB_CMS_LEN);
+	int err;
+
+	err = __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
+				     mlxsw_sp_port->local_port,
+				     MLXSW_REG_SBXX_DIR_INGRESS,
+				     mlxsw_sp_sb_cms_ingress,
+				     MLXSW_SP_SB_CMS_INGRESS_LEN);
+	if (err)
+		return err;
+	return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
+				      mlxsw_sp_port->local_port,
+				      MLXSW_REG_SBXX_DIR_EGRESS,
+				      mlxsw_sp_sb_cms_egress,
+				      MLXSW_SP_SB_CMS_EGRESS_LEN);
 }
 
 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
 {
-	return mlxsw_sp_sb_cms_init(mlxsw_sp, 0, mlxsw_sp_cpu_port_sb_cms,
-				    MLXSW_SP_CPU_PORT_SB_MCS_LEN);
+	return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
+				      mlxsw_sp_cpu_port_sb_cms,
+				      MLXSW_SP_CPU_PORT_SB_MCS_LEN);
 }
 
-struct mlxsw_sp_sb_pm {
-	u8 pool;
-	enum mlxsw_reg_sbpm_dir dir;
-	u32 min_buff;
-	u32 max_buff;
-};
-
-#define MLXSW_SP_SB_PM(_pool, _dir, _min_buff, _max_buff)	\
-	{							\
-		.pool = _pool,					\
-		.dir = _dir,					\
-		.min_buff = _min_buff,				\
-		.max_buff = _max_buff,				\
+#define MLXSW_SP_SB_PM(_min_buff, _max_buff)	\
+	{					\
+		.min_buff = _min_buff,		\
+		.max_buff = _max_buff,		\
 	}
 
-#define MLXSW_SP_SB_PM_INGRESS(_pool, _min_buff, _max_buff)	\
-	MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_INGRESS,	\
-		       _min_buff, _max_buff)
-
-#define MLXSW_SP_SB_PM_EGRESS(_pool, _min_buff, _max_buff)	\
-	MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_EGRESS,	\
-		       _min_buff, _max_buff)
-
-static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = {
-	MLXSW_SP_SB_PM_INGRESS(0, 0, 0xff),
-	MLXSW_SP_SB_PM_INGRESS(1, 0, 0),
-	MLXSW_SP_SB_PM_INGRESS(2, 0, 0),
-	MLXSW_SP_SB_PM_INGRESS(3, 0, 0),
-	MLXSW_SP_SB_PM_EGRESS(0, 0, 7),
-	MLXSW_SP_SB_PM_EGRESS(1, 0, 0),
-	MLXSW_SP_SB_PM_EGRESS(2, 0, 0),
-	MLXSW_SP_SB_PM_EGRESS(3, 0, 0),
+static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_ingress[] = {
+	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
+	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
 };
 
-#define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms)
+#define MLXSW_SP_SB_PMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_ingress)
 
-static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
+static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_egress[] = {
+	MLXSW_SP_SB_PM(0, 7),
+	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+};
+
+#define MLXSW_SP_SB_PMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_egress)
+
+static int __mlxsw_sp_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+				       enum mlxsw_reg_sbxx_dir dir,
+				       const struct mlxsw_sp_sb_pm *pms,
+				       size_t pms_len)
 {
-	char sbpm_pl[MLXSW_REG_SBPM_LEN];
 	int i;
 	int err;
 
-	for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) {
+	for (i = 0; i < pms_len; i++) {
 		const struct mlxsw_sp_sb_pm *pm;
 
-		pm = &mlxsw_sp_sb_pms[i];
-		mlxsw_reg_sbpm_pack(sbpm_pl, mlxsw_sp_port->local_port,
-				    pm->pool, pm->dir,
-				    pm->min_buff, pm->max_buff);
-		err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
-				      MLXSW_REG(sbpm), sbpm_pl);
+		pm = &pms[i];
+		err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, dir,
+					   pm->min_buff, pm->max_buff);
 		if (err)
 			return err;
 	}
 	return 0;
 }
 
+static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	int err;
+
+	err = __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
+					  mlxsw_sp_port->local_port,
+					  MLXSW_REG_SBXX_DIR_INGRESS,
+					  mlxsw_sp_sb_pms_ingress,
+					  MLXSW_SP_SB_PMS_INGRESS_LEN);
+	if (err)
+		return err;
+	return __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
+					   mlxsw_sp_port->local_port,
+					   MLXSW_REG_SBXX_DIR_EGRESS,
+					   mlxsw_sp_sb_pms_egress,
+					   MLXSW_SP_SB_PMS_EGRESS_LEN);
+}
+
 struct mlxsw_sp_sb_mm {
-	u8 prio;
 	u32 min_buff;
 	u32 max_buff;
 	u8 pool;
 };
 
-#define MLXSW_SP_SB_MM(_prio, _min_buff, _max_buff, _pool)	\
-	{							\
-		.prio = _prio,					\
-		.min_buff = _min_buff,				\
-		.max_buff = _max_buff,				\
-		.pool = _pool,					\
+#define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool)	\
+	{						\
+		.min_buff = _min_buff,			\
+		.max_buff = _max_buff,			\
+		.pool = _pool,				\
 	}
 
 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
-	MLXSW_SP_SB_MM(0, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
-	MLXSW_SP_SB_MM(1, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
-	MLXSW_SP_SB_MM(2, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
-	MLXSW_SP_SB_MM(3, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
-	MLXSW_SP_SB_MM(4, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
-	MLXSW_SP_SB_MM(5, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
-	MLXSW_SP_SB_MM(6, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
-	MLXSW_SP_SB_MM(7, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
-	MLXSW_SP_SB_MM(8, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
-	MLXSW_SP_SB_MM(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
-	MLXSW_SP_SB_MM(10, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
-	MLXSW_SP_SB_MM(11, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
-	MLXSW_SP_SB_MM(12, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
-	MLXSW_SP_SB_MM(13, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
-	MLXSW_SP_SB_MM(14, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+	MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
 };
 
 #define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
@@ -382,7 +513,7 @@
 		const struct mlxsw_sp_sb_mm *mc;
 
 		mc = &mlxsw_sp_sb_mms[i];
-		mlxsw_reg_sbmm_pack(sbmm_pl, mc->prio, mc->min_buff,
+		mlxsw_reg_sbmm_pack(sbmm_pl, i, mc->min_buff,
 				    mc->max_buff, mc->pool);
 		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
 		if (err)
@@ -391,26 +522,39 @@
 	return 0;
 }
 
+#define MLXSW_SP_SB_SIZE (16 * 1024 * 1024)
+
 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
 {
 	int err;
 
-	err = mlxsw_sp_sb_pools_init(mlxsw_sp);
+	err = mlxsw_sp_sb_prs_init(mlxsw_sp);
 	if (err)
 		return err;
 	err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
 	if (err)
 		return err;
 	err = mlxsw_sp_sb_mms_init(mlxsw_sp);
+	if (err)
+		return err;
+	return devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
+				   MLXSW_SP_SB_SIZE,
+				   MLXSW_SP_SB_POOL_COUNT,
+				   MLXSW_SP_SB_POOL_COUNT,
+				   MLXSW_SP_SB_TC_COUNT,
+				   MLXSW_SP_SB_TC_COUNT);
+}
 
-	return err;
+void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
+{
+	devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
 }
 
 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
 {
 	int err;
 
-	err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
+	err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
 	if (err)
 		return err;
 	err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
@@ -420,3 +564,394 @@
 
 	return err;
 }
+
+static u8 pool_get(u16 pool_index)
+{
+	return pool_index % MLXSW_SP_SB_POOL_COUNT;
+}
+
+static u16 pool_index_get(u8 pool, enum mlxsw_reg_sbxx_dir dir)
+{
+	u16 pool_index;
+
+	pool_index = pool;
+	if (dir == MLXSW_REG_SBXX_DIR_EGRESS)
+		pool_index += MLXSW_SP_SB_POOL_COUNT;
+	return pool_index;
+}
+
+static enum mlxsw_reg_sbxx_dir dir_get(u16 pool_index)
+{
+	return pool_index < MLXSW_SP_SB_POOL_COUNT ?
+	       MLXSW_REG_SBXX_DIR_INGRESS : MLXSW_REG_SBXX_DIR_EGRESS;
+}
+
+int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
+			 unsigned int sb_index, u16 pool_index,
+			 struct devlink_sb_pool_info *pool_info)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+	u8 pool = pool_get(pool_index);
+	enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
+	struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+
+	pool_info->pool_type = dir;
+	pool_info->size = MLXSW_SP_CELLS_TO_BYTES(pr->size);
+	pool_info->threshold_type = pr->mode;
+	return 0;
+}
+
+int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
+			 unsigned int sb_index, u16 pool_index, u32 size,
+			 enum devlink_sb_threshold_type threshold_type)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+	u8 pool = pool_get(pool_index);
+	enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
+	enum mlxsw_reg_sbpr_mode mode = threshold_type;
+	u32 pool_size = MLXSW_SP_BYTES_TO_CELLS(size);
+
+	return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size);
+}
+
+#define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
+
+static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool,
+				     enum mlxsw_reg_sbxx_dir dir, u32 max_buff)
+{
+	struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+
+	if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
+		return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
+	return MLXSW_SP_CELLS_TO_BYTES(max_buff);
+}
+
+static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool,
+				    enum mlxsw_reg_sbxx_dir dir, u32 threshold,
+				    u32 *p_max_buff)
+{
+	struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+
+	if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
+		int val;
+
+		val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
+		if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
+		    val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX)
+			return -EINVAL;
+		*p_max_buff = val;
+	} else {
+		*p_max_buff = MLXSW_SP_BYTES_TO_CELLS(threshold);
+	}
+	return 0;
+}
+
+int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
+			      unsigned int sb_index, u16 pool_index,
+			      u32 *p_threshold)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port =
+			mlxsw_core_port_driver_priv(mlxsw_core_port);
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	u8 local_port = mlxsw_sp_port->local_port;
+	u8 pool = pool_get(pool_index);
+	enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
+	struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
+						       pool, dir);
+
+	*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool, dir,
+						 pm->max_buff);
+	return 0;
+}
+
+int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
+			      unsigned int sb_index, u16 pool_index,
+			      u32 threshold)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port =
+			mlxsw_core_port_driver_priv(mlxsw_core_port);
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	u8 local_port = mlxsw_sp_port->local_port;
+	u8 pool = pool_get(pool_index);
+	enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
+	u32 max_buff;
+	int err;
+
+	err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
+				       threshold, &max_buff);
+	if (err)
+		return err;
+
+	return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool, dir,
+				    0, max_buff);
+}
+
+int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
+				 unsigned int sb_index, u16 tc_index,
+				 enum devlink_sb_pool_type pool_type,
+				 u16 *p_pool_index, u32 *p_threshold)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port =
+			mlxsw_core_port_driver_priv(mlxsw_core_port);
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	u8 local_port = mlxsw_sp_port->local_port;
+	u8 pg_buff = tc_index;
+	enum mlxsw_reg_sbxx_dir dir = pool_type;
+	struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
+						       pg_buff, dir);
+
+	*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir,
+						 cm->max_buff);
+	*p_pool_index = pool_index_get(cm->pool, pool_type);
+	return 0;
+}
+
+int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
+				 unsigned int sb_index, u16 tc_index,
+				 enum devlink_sb_pool_type pool_type,
+				 u16 pool_index, u32 threshold)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port =
+			mlxsw_core_port_driver_priv(mlxsw_core_port);
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	u8 local_port = mlxsw_sp_port->local_port;
+	u8 pg_buff = tc_index;
+	enum mlxsw_reg_sbxx_dir dir = pool_type;
+	u8 pool = pool_index;
+	u32 max_buff;
+	int err;
+
+	err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
+				       threshold, &max_buff);
+	if (err)
+		return err;
+
+	if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS) {
+		if (pool < MLXSW_SP_SB_POOL_COUNT)
+			return -EINVAL;
+		pool -= MLXSW_SP_SB_POOL_COUNT;
+	} else if (pool >= MLXSW_SP_SB_POOL_COUNT) {
+		return -EINVAL;
+	}
+	return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir,
+				    0, max_buff, pool);
+}
+
+#define MASKED_COUNT_MAX \
+	(MLXSW_REG_SBSR_REC_MAX_COUNT / (MLXSW_SP_SB_TC_COUNT * 2))
+
+struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
+	u8 masked_count;
+	u8 local_port_1;
+};
+
+static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
+					char *sbsr_pl, size_t sbsr_pl_len,
+					unsigned long cb_priv)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+	struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
+	u8 masked_count;
+	u8 local_port;
+	int rec_index = 0;
+	struct mlxsw_sp_sb_cm *cm;
+	int i;
+
+	memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
+
+	masked_count = 0;
+	for (local_port = cb_ctx.local_port_1;
+	     local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+		if (!mlxsw_sp->ports[local_port])
+			continue;
+		for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
+			cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
+						MLXSW_REG_SBXX_DIR_INGRESS);
+			mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
+						  &cm->occ.cur, &cm->occ.max);
+		}
+		if (++masked_count == cb_ctx.masked_count)
+			break;
+	}
+	masked_count = 0;
+	for (local_port = cb_ctx.local_port_1;
+	     local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+		if (!mlxsw_sp->ports[local_port])
+			continue;
+		for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
+			cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
+						MLXSW_REG_SBXX_DIR_EGRESS);
+			mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
+						  &cm->occ.cur, &cm->occ.max);
+		}
+		if (++masked_count == cb_ctx.masked_count)
+			break;
+	}
+}
+
+int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
+			     unsigned int sb_index)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+	struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
+	unsigned long cb_priv;
+	LIST_HEAD(bulk_list);
+	char *sbsr_pl;
+	u8 masked_count;
+	u8 local_port_1;
+	u8 local_port = 0;
+	int i;
+	int err;
+	int err2;
+
+	sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
+	if (!sbsr_pl)
+		return -ENOMEM;
+
+next_batch:
+	local_port++;
+	local_port_1 = local_port;
+	masked_count = 0;
+	mlxsw_reg_sbsr_pack(sbsr_pl, false);
+	for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
+		mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
+		mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
+	}
+	for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+		if (!mlxsw_sp->ports[local_port])
+			continue;
+		mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
+		mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
+		for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
+			err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
+						       MLXSW_REG_SBXX_DIR_INGRESS,
+						       &bulk_list);
+			if (err)
+				goto out;
+			err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
+						       MLXSW_REG_SBXX_DIR_EGRESS,
+						       &bulk_list);
+			if (err)
+				goto out;
+		}
+		if (++masked_count == MASKED_COUNT_MAX)
+			goto do_query;
+	}
+
+do_query:
+	cb_ctx.masked_count = masked_count;
+	cb_ctx.local_port_1 = local_port_1;
+	memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
+	err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
+				    &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
+				    cb_priv);
+	if (err)
+		goto out;
+	if (local_port < MLXSW_PORT_MAX_PORTS)
+		goto next_batch;
+
+out:
+	err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
+	if (!err)
+		err = err2;
+	kfree(sbsr_pl);
+	return err;
+}
+
+int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
+			      unsigned int sb_index)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+	LIST_HEAD(bulk_list);
+	char *sbsr_pl;
+	unsigned int masked_count;
+	u8 local_port = 0;
+	int i;
+	int err;
+	int err2;
+
+	sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
+	if (!sbsr_pl)
+		return -ENOMEM;
+
+next_batch:
+	local_port++;
+	masked_count = 0;
+	mlxsw_reg_sbsr_pack(sbsr_pl, true);
+	for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
+		mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
+		mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
+	}
+	for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+		if (!mlxsw_sp->ports[local_port])
+			continue;
+		mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
+		mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
+		for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
+			err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
+						       MLXSW_REG_SBXX_DIR_INGRESS,
+						       &bulk_list);
+			if (err)
+				goto out;
+			err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
+						       MLXSW_REG_SBXX_DIR_EGRESS,
+						       &bulk_list);
+			if (err)
+				goto out;
+		}
+		if (++masked_count == MASKED_COUNT_MAX)
+			goto do_query;
+	}
+
+do_query:
+	err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
+				    &bulk_list, NULL, 0);
+	if (err)
+		goto out;
+	if (local_port < MLXSW_PORT_MAX_PORTS)
+		goto next_batch;
+
+out:
+	err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
+	if (!err)
+		err = err2;
+	kfree(sbsr_pl);
+	return err;
+}
+
+int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
+				  unsigned int sb_index, u16 pool_index,
+				  u32 *p_cur, u32 *p_max)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port =
+			mlxsw_core_port_driver_priv(mlxsw_core_port);
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	u8 local_port = mlxsw_sp_port->local_port;
+	u8 pool = pool_get(pool_index);
+	enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
+	struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
+						       pool, dir);
+
+	*p_cur = MLXSW_SP_CELLS_TO_BYTES(pm->occ.cur);
+	*p_max = MLXSW_SP_CELLS_TO_BYTES(pm->occ.max);
+	return 0;
+}
+
+int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
+				     unsigned int sb_index, u16 tc_index,
+				     enum devlink_sb_pool_type pool_type,
+				     u32 *p_cur, u32 *p_max)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port =
+			mlxsw_core_port_driver_priv(mlxsw_core_port);
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	u8 local_port = mlxsw_sp_port->local_port;
+	u8 pg_buff = tc_index;
+	enum mlxsw_reg_sbxx_dir dir = pool_type;
+	struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
+						       pg_buff, dir);
+
+	*p_cur = MLXSW_SP_CELLS_TO_BYTES(cm->occ.cur);
+	*p_max = MLXSW_SP_CELLS_TO_BYTES(cm->occ.max);
+	return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
new file mode 100644
index 0000000..0b32366
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -0,0 +1,480 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <net/dcbnl.h>
+
+#include "spectrum.h"
+#include "reg.h"
+
+static u8 mlxsw_sp_dcbnl_getdcbx(struct net_device __always_unused *dev)
+{
+	return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+}
+
+static u8 mlxsw_sp_dcbnl_setdcbx(struct net_device __always_unused *dev,
+				 u8 mode)
+{
+	return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0;
+}
+
+static int mlxsw_sp_dcbnl_ieee_getets(struct net_device *dev,
+				      struct ieee_ets *ets)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+	memcpy(ets, mlxsw_sp_port->dcb.ets, sizeof(*ets));
+
+	return 0;
+}
+
+static int mlxsw_sp_port_ets_validate(struct mlxsw_sp_port *mlxsw_sp_port,
+				      struct ieee_ets *ets)
+{
+	struct net_device *dev = mlxsw_sp_port->dev;
+	bool has_ets_tc = false;
+	int i, tx_bw_sum = 0;
+
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		switch (ets->tc_tsa[i]) {
+		case IEEE_8021QAZ_TSA_STRICT:
+			break;
+		case IEEE_8021QAZ_TSA_ETS:
+			has_ets_tc = true;
+			tx_bw_sum += ets->tc_tx_bw[i];
+			break;
+		default:
+			netdev_err(dev, "Only strict priority and ETS are supported\n");
+			return -EINVAL;
+		}
+
+		if (ets->prio_tc[i] >= IEEE_8021QAZ_MAX_TCS) {
+			netdev_err(dev, "Invalid TC\n");
+			return -EINVAL;
+		}
+	}
+
+	if (has_ets_tc && tx_bw_sum != 100) {
+		netdev_err(dev, "Total ETS bandwidth should equal 100\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mlxsw_sp_port_pg_prio_map(struct mlxsw_sp_port *mlxsw_sp_port,
+				     u8 *prio_tc)
+{
+	char pptb_pl[MLXSW_REG_PPTB_LEN];
+	int i;
+
+	mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+		mlxsw_reg_pptb_prio_to_buff_set(pptb_pl, i, prio_tc[i]);
+	return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
+			       pptb_pl);
+}
+
+static bool mlxsw_sp_ets_has_pg(u8 *prio_tc, u8 pg)
+{
+	int i;
+
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+		if (prio_tc[i] == pg)
+			return true;
+	return false;
+}
+
+static int mlxsw_sp_port_pg_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+				    u8 *old_prio_tc, u8 *new_prio_tc)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	char pbmc_pl[MLXSW_REG_PBMC_LEN];
+	int err, i;
+
+	mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
+	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
+	if (err)
+		return err;
+
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		u8 pg = old_prio_tc[i];
+
+		if (!mlxsw_sp_ets_has_pg(new_prio_tc, pg))
+			mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg, 0);
+	}
+
+	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
+}
+
+static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
+				      struct ieee_ets *ets)
+{
+	bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
+	struct ieee_ets *my_ets = mlxsw_sp_port->dcb.ets;
+	struct net_device *dev = mlxsw_sp_port->dev;
+	int err;
+
+	/* Create the required PGs, but don't destroy existing ones, as
+	 * traffic is still directed to them.
+	 */
+	err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
+					   ets->prio_tc, pause_en,
+					   mlxsw_sp_port->dcb.pfc);
+	if (err) {
+		netdev_err(dev, "Failed to configure port's headroom\n");
+		return err;
+	}
+
+	err = mlxsw_sp_port_pg_prio_map(mlxsw_sp_port, ets->prio_tc);
+	if (err) {
+		netdev_err(dev, "Failed to set PG-priority mapping\n");
+		goto err_port_prio_pg_map;
+	}
+
+	err = mlxsw_sp_port_pg_destroy(mlxsw_sp_port, my_ets->prio_tc,
+				       ets->prio_tc);
+	if (err)
+		netdev_warn(dev, "Failed to remove ununsed PGs\n");
+
+	return 0;
+
+err_port_prio_pg_map:
+	mlxsw_sp_port_pg_destroy(mlxsw_sp_port, ets->prio_tc, my_ets->prio_tc);
+	return err;
+}
+
+static int __mlxsw_sp_dcbnl_ieee_setets(struct mlxsw_sp_port *mlxsw_sp_port,
+					struct ieee_ets *ets)
+{
+	struct ieee_ets *my_ets = mlxsw_sp_port->dcb.ets;
+	struct net_device *dev = mlxsw_sp_port->dev;
+	int i, err;
+
+	/* Egress configuration. */
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		bool dwrr = ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS;
+		u8 weight = ets->tc_tx_bw[i];
+
+		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+					    MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
+					    0, dwrr, weight);
+		if (err) {
+			netdev_err(dev, "Failed to link subgroup ETS element %d to group\n",
+				   i);
+			goto err_port_ets_set;
+		}
+	}
+
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
+						ets->prio_tc[i]);
+		if (err) {
+			netdev_err(dev, "Failed to map prio %d to TC %d\n", i,
+				   ets->prio_tc[i]);
+			goto err_port_prio_tc_set;
+		}
+	}
+
+	/* Ingress configuration. */
+	err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, ets);
+	if (err)
+		goto err_port_headroom_set;
+
+	return 0;
+
+err_port_headroom_set:
+	i = IEEE_8021QAZ_MAX_TCS;
+err_port_prio_tc_set:
+	for (i--; i >= 0; i--)
+		mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, my_ets->prio_tc[i]);
+	i = IEEE_8021QAZ_MAX_TCS;
+err_port_ets_set:
+	for (i--; i >= 0; i--) {
+		bool dwrr = my_ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS;
+		u8 weight = my_ets->tc_tx_bw[i];
+
+		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+					    MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
+					    0, dwrr, weight);
+	}
+	return err;
+}
+
+static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev,
+				      struct ieee_ets *ets)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+	int err;
+
+	err = mlxsw_sp_port_ets_validate(mlxsw_sp_port, ets);
+	if (err)
+		return err;
+
+	err = __mlxsw_sp_dcbnl_ieee_setets(mlxsw_sp_port, ets);
+	if (err)
+		return err;
+
+	memcpy(mlxsw_sp_port->dcb.ets, ets, sizeof(*ets));
+
+	return 0;
+}
+
+static int mlxsw_sp_dcbnl_ieee_getmaxrate(struct net_device *dev,
+					  struct ieee_maxrate *maxrate)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+	memcpy(maxrate, mlxsw_sp_port->dcb.maxrate, sizeof(*maxrate));
+
+	return 0;
+}
+
+static int mlxsw_sp_dcbnl_ieee_setmaxrate(struct net_device *dev,
+					  struct ieee_maxrate *maxrate)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+	struct ieee_maxrate *my_maxrate = mlxsw_sp_port->dcb.maxrate;
+	int err, i;
+
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+						    MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
+						    i, 0,
+						    maxrate->tc_maxrate[i]);
+		if (err) {
+			netdev_err(dev, "Failed to set maxrate for TC %d\n", i);
+			goto err_port_ets_maxrate_set;
+		}
+	}
+
+	memcpy(mlxsw_sp_port->dcb.maxrate, maxrate, sizeof(*maxrate));
+
+	return 0;
+
+err_port_ets_maxrate_set:
+	for (i--; i >= 0; i--)
+		mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+					      MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
+					      i, 0, my_maxrate->tc_maxrate[i]);
+	return err;
+}
+
+static int mlxsw_sp_port_pfc_cnt_get(struct mlxsw_sp_port *mlxsw_sp_port,
+				     u8 prio)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	struct ieee_pfc *my_pfc = mlxsw_sp_port->dcb.pfc;
+	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+	int err;
+
+	mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port,
+			     MLXSW_REG_PPCNT_PRIO_CNT, prio);
+	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
+	if (err)
+		return err;
+
+	my_pfc->requests[prio] = mlxsw_reg_ppcnt_tx_pause_get(ppcnt_pl);
+	my_pfc->indications[prio] = mlxsw_reg_ppcnt_rx_pause_get(ppcnt_pl);
+
+	return 0;
+}
+
+static int mlxsw_sp_dcbnl_ieee_getpfc(struct net_device *dev,
+				      struct ieee_pfc *pfc)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+	int err, i;
+
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		err = mlxsw_sp_port_pfc_cnt_get(mlxsw_sp_port, i);
+		if (err) {
+			netdev_err(dev, "Failed to get PFC count for priority %d\n",
+				   i);
+			return err;
+		}
+	}
+
+	memcpy(pfc, mlxsw_sp_port->dcb.pfc, sizeof(*pfc));
+
+	return 0;
+}
+
+static int mlxsw_sp_port_pfc_set(struct mlxsw_sp_port *mlxsw_sp_port,
+				 struct ieee_pfc *pfc)
+{
+	char pfcc_pl[MLXSW_REG_PFCC_LEN];
+
+	mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
+	mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en);
+
+	return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
+			       pfcc_pl);
+}
+
+static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
+				      struct ieee_pfc *pfc)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+	int err;
+
+	if (mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) {
+		netdev_err(dev, "PAUSE frames already enabled on port\n");
+		return -EINVAL;
+	}
+
+	err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
+					   mlxsw_sp_port->dcb.ets->prio_tc,
+					   false, pfc);
+	if (err) {
+		netdev_err(dev, "Failed to configure port's headroom for PFC\n");
+		return err;
+	}
+
+	err = mlxsw_sp_port_pfc_set(mlxsw_sp_port, pfc);
+	if (err) {
+		netdev_err(dev, "Failed to configure PFC\n");
+		goto err_port_pfc_set;
+	}
+
+	memcpy(mlxsw_sp_port->dcb.pfc, pfc, sizeof(*pfc));
+
+	return 0;
+
+err_port_pfc_set:
+	__mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
+				     mlxsw_sp_port->dcb.ets->prio_tc, false,
+				     mlxsw_sp_port->dcb.pfc);
+	return err;
+}
+
+static const struct dcbnl_rtnl_ops mlxsw_sp_dcbnl_ops = {
+	.ieee_getets		= mlxsw_sp_dcbnl_ieee_getets,
+	.ieee_setets		= mlxsw_sp_dcbnl_ieee_setets,
+	.ieee_getmaxrate	= mlxsw_sp_dcbnl_ieee_getmaxrate,
+	.ieee_setmaxrate	= mlxsw_sp_dcbnl_ieee_setmaxrate,
+	.ieee_getpfc		= mlxsw_sp_dcbnl_ieee_getpfc,
+	.ieee_setpfc		= mlxsw_sp_dcbnl_ieee_setpfc,
+
+	.getdcbx		= mlxsw_sp_dcbnl_getdcbx,
+	.setdcbx		= mlxsw_sp_dcbnl_setdcbx,
+};
+
+static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	mlxsw_sp_port->dcb.ets = kzalloc(sizeof(*mlxsw_sp_port->dcb.ets),
+					 GFP_KERNEL);
+	if (!mlxsw_sp_port->dcb.ets)
+		return -ENOMEM;
+
+	mlxsw_sp_port->dcb.ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
+
+	return 0;
+}
+
+static void mlxsw_sp_port_ets_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	kfree(mlxsw_sp_port->dcb.ets);
+}
+
+static int mlxsw_sp_port_maxrate_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	int i;
+
+	mlxsw_sp_port->dcb.maxrate = kmalloc(sizeof(*mlxsw_sp_port->dcb.maxrate),
+					     GFP_KERNEL);
+	if (!mlxsw_sp_port->dcb.maxrate)
+		return -ENOMEM;
+
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+		mlxsw_sp_port->dcb.maxrate->tc_maxrate[i] = MLXSW_REG_QEEC_MAS_DIS;
+
+	return 0;
+}
+
+static void mlxsw_sp_port_maxrate_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	kfree(mlxsw_sp_port->dcb.maxrate);
+}
+
+static int mlxsw_sp_port_pfc_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	mlxsw_sp_port->dcb.pfc = kzalloc(sizeof(*mlxsw_sp_port->dcb.pfc),
+					 GFP_KERNEL);
+	if (!mlxsw_sp_port->dcb.pfc)
+		return -ENOMEM;
+
+	mlxsw_sp_port->dcb.pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
+
+	return 0;
+}
+
+static void mlxsw_sp_port_pfc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	kfree(mlxsw_sp_port->dcb.pfc);
+}
+
+int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	int err;
+
+	err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
+	if (err)
+		return err;
+	err = mlxsw_sp_port_maxrate_init(mlxsw_sp_port);
+	if (err)
+		goto err_port_maxrate_init;
+	err = mlxsw_sp_port_pfc_init(mlxsw_sp_port);
+	if (err)
+		goto err_port_pfc_init;
+
+	mlxsw_sp_port->dev->dcbnl_ops = &mlxsw_sp_dcbnl_ops;
+
+	return 0;
+
+err_port_pfc_init:
+	mlxsw_sp_port_maxrate_fini(mlxsw_sp_port);
+err_port_maxrate_init:
+	mlxsw_sp_port_ets_fini(mlxsw_sp_port);
+	return err;
+}
+
+void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	mlxsw_sp_port_pfc_fini(mlxsw_sp_port);
+	mlxsw_sp_port_maxrate_fini(mlxsw_sp_port);
+	mlxsw_sp_port_ets_fini(mlxsw_sp_port);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index e1c74ef..3710f19 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -214,7 +214,15 @@
 	mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
 			    table_type, range, local_port, set);
 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+	if (err)
+		goto err_flood_bm_set;
+	else
+		goto buffer_out;
 
+err_flood_bm_set:
+	mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
+			    table_type, range, local_port, !set);
+	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
 buffer_out:
 	kfree(sftr_pl);
 	return err;
@@ -1430,8 +1438,8 @@
 
 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
 {
-	schedule_delayed_work(&mlxsw_sp->fdb_notify.dw,
-			      msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
+	mlxsw_core_schedule_dw(&mlxsw_sp->fdb_notify.dw,
+			       msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
 }
 
 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 7a60a26..3842eab 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -43,7 +43,6 @@
 #include <linux/device.h>
 #include <linux/skbuff.h>
 #include <linux/if_vlan.h>
-#include <net/devlink.h>
 #include <net/switchdev.h>
 #include <generated/utsrelease.h>
 
@@ -75,11 +74,11 @@
 };
 
 struct mlxsw_sx_port {
+	struct mlxsw_core_port core_port; /* must be first */
 	struct net_device *dev;
 	struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
 	struct mlxsw_sx *mlxsw_sx;
 	u8 local_port;
-	struct devlink_port devlink_port;
 };
 
 /* tx_hdr_version
@@ -303,7 +302,7 @@
 	u64 len;
 	int err;
 
-	if (mlxsw_core_skb_transmit_busy(mlxsw_sx, &tx_info))
+	if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info))
 		return NETDEV_TX_BUSY;
 
 	if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
@@ -321,7 +320,7 @@
 	/* Due to a race we might fail here because of a full queue. In that
 	 * unlikely case we simply drop the packet.
 	 */
-	err = mlxsw_core_skb_transmit(mlxsw_sx, skb, &tx_info);
+	err = mlxsw_core_skb_transmit(mlxsw_sx->core, skb, &tx_info);
 
 	if (!err) {
 		pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
@@ -518,7 +517,8 @@
 	int i;
 	int err;
 
-	mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port);
+	mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port,
+			     MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
 	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl);
 	for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++)
 		data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0;
@@ -955,9 +955,7 @@
 
 static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
 {
-	struct devlink *devlink = priv_to_devlink(mlxsw_sx->core);
 	struct mlxsw_sx_port *mlxsw_sx_port;
-	struct devlink_port *devlink_port;
 	struct net_device *dev;
 	bool usable;
 	int err;
@@ -1011,14 +1009,6 @@
 		goto port_not_usable;
 	}
 
-	devlink_port = &mlxsw_sx_port->devlink_port;
-	err = devlink_port_register(devlink, devlink_port, local_port);
-	if (err) {
-		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register devlink port\n",
-			mlxsw_sx_port->local_port);
-		goto err_devlink_port_register;
-	}
-
 	err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
 	if (err) {
 		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
@@ -1076,11 +1066,19 @@
 		goto err_register_netdev;
 	}
 
-	devlink_port_type_eth_set(devlink_port, dev);
+	err = mlxsw_core_port_init(mlxsw_sx->core, &mlxsw_sx_port->core_port,
+				   mlxsw_sx_port->local_port, dev, false, 0);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n",
+			mlxsw_sx_port->local_port);
+		goto err_core_port_init;
+	}
 
 	mlxsw_sx->ports[local_port] = mlxsw_sx_port;
 	return 0;
 
+err_core_port_init:
+	unregister_netdev(dev);
 err_register_netdev:
 err_port_mac_learning_mode_set:
 err_port_stp_state_set:
@@ -1089,8 +1087,6 @@
 err_port_speed_set:
 err_port_swid_set:
 err_port_system_port_mapping_set:
-	devlink_port_unregister(&mlxsw_sx_port->devlink_port);
-err_devlink_port_register:
 port_not_usable:
 err_port_module_check:
 err_dev_addr_get:
@@ -1103,15 +1099,12 @@
 static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
 {
 	struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
-	struct devlink_port *devlink_port;
 
 	if (!mlxsw_sx_port)
 		return;
-	devlink_port = &mlxsw_sx_port->devlink_port;
-	devlink_port_type_clear(devlink_port);
+	mlxsw_core_port_fini(&mlxsw_sx_port->core_port);
 	unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
 	mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
-	devlink_port_unregister(devlink_port);
 	free_percpu(mlxsw_sx_port->pcpu_stats);
 	free_netdev(mlxsw_sx_port->dev);
 }
@@ -1454,10 +1447,10 @@
 	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
 }
 
-static int mlxsw_sx_init(void *priv, struct mlxsw_core *mlxsw_core,
+static int mlxsw_sx_init(struct mlxsw_core *mlxsw_core,
 			 const struct mlxsw_bus_info *mlxsw_bus_info)
 {
-	struct mlxsw_sx *mlxsw_sx = priv;
+	struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
 	int err;
 
 	mlxsw_sx->core = mlxsw_core;
@@ -1504,9 +1497,9 @@
 	return err;
 }
 
-static void mlxsw_sx_fini(void *priv)
+static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core)
 {
-	struct mlxsw_sx *mlxsw_sx = priv;
+	struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
 
 	mlxsw_sx_traps_fini(mlxsw_sx);
 	mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index a8522d8..20cb85b 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -1354,6 +1354,7 @@
 	struct resource *rxirq_res, *txirq_res, *linkirq_res;
 	int ret = 0;
 	int buff_n;
+	bool inv_mac_addr = false;
 	u32 machigh, maclow;
 
 	/* Initialise a net_device */
@@ -1456,8 +1457,7 @@
 	ndev->dev_addr[5] = maclow & 0xFF;
 
 	if (!is_valid_ether_addr(ndev->dev_addr))
-		dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please "
-			 "set using ifconfig\n", ndev->name);
+		inv_mac_addr = true;
 
 	/* In order to be efficient memory-wise, we allocate both
 	 * rings in one go.
@@ -1520,6 +1520,9 @@
 	ret = register_netdev(ndev);
 
 	if (ret == 0) {
+		if (inv_mac_addr)
+			dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please set using ip\n",
+				 ndev->name);
 		dev_info(ksp->dev, "ks8695 ethernet (%s) MAC: %pM\n",
 			 ks8695_port_type(ksp), ndev->dev_addr);
 	} else {
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 75dc46c..280e761 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4790,7 +4790,7 @@
 
 	/* Notify the network subsystem that the packet has been sent. */
 	if (dev)
-		dev->trans_start = jiffies;
+		netif_trans_update(dev);
 }
 
 /**
@@ -4965,7 +4965,7 @@
 		hw_ena_intr(hw);
 	}
 
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	netif_wake_queue(dev);
 }
 
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 86ea17e..7066954 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -28,11 +28,12 @@
 #include <linux/skbuff.h>
 #include <linux/delay.h>
 #include <linux/spi/spi.h>
+#include <linux/of_net.h>
 
 #include "enc28j60_hw.h"
 
 #define DRV_NAME	"enc28j60"
-#define DRV_VERSION	"1.01"
+#define DRV_VERSION	"1.02"
 
 #define SPI_OPLEN	1
 
@@ -89,22 +90,26 @@
 {
 	u8 *rx_buf = priv->spi_transfer_buf + 4;
 	u8 *tx_buf = priv->spi_transfer_buf;
-	struct spi_transfer t = {
+	struct spi_transfer tx = {
 		.tx_buf = tx_buf,
+		.len = SPI_OPLEN,
+	};
+	struct spi_transfer rx = {
 		.rx_buf = rx_buf,
-		.len = SPI_OPLEN + len,
+		.len = len,
 	};
 	struct spi_message msg;
 	int ret;
 
 	tx_buf[0] = ENC28J60_READ_BUF_MEM;
-	tx_buf[1] = tx_buf[2] = tx_buf[3] = 0;	/* don't care */
 
 	spi_message_init(&msg);
-	spi_message_add_tail(&t, &msg);
+	spi_message_add_tail(&tx, &msg);
+	spi_message_add_tail(&rx, &msg);
+
 	ret = spi_sync(priv->spi, &msg);
 	if (ret == 0) {
-		memcpy(data, &rx_buf[SPI_OPLEN], len);
+		memcpy(data, rx_buf, len);
 		ret = msg.status;
 	}
 	if (ret && netif_msg_drv(priv))
@@ -1544,6 +1549,7 @@
 {
 	struct net_device *dev;
 	struct enc28j60_net *priv;
+	const void *macaddr;
 	int ret = 0;
 
 	if (netif_msg_drv(&debug))
@@ -1575,7 +1581,12 @@
 		ret = -EIO;
 		goto error_irq;
 	}
-	eth_hw_addr_random(dev);
+
+	macaddr = of_get_mac_address(spi->dev.of_node);
+	if (macaddr)
+		ether_addr_copy(dev->dev_addr, macaddr);
+	else
+		eth_hw_addr_random(dev);
 	enc28j60_set_hw_macaddr(dev);
 
 	/* Board setup must set the relevant edge trigger type;
@@ -1630,9 +1641,16 @@
 	return 0;
 }
 
+static const struct of_device_id enc28j60_dt_ids[] = {
+	{ .compatible = "microchip,enc28j60" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, enc28j60_dt_ids);
+
 static struct spi_driver enc28j60_driver = {
 	.driver = {
-		   .name = DRV_NAME,
+		.name = DRV_NAME,
+		.of_match_table = enc28j60_dt_ids,
 	 },
 	.probe = enc28j60_probe,
 	.remove = enc28j60_remove,
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index 7df3183..42e3407 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -874,7 +874,7 @@
 	netif_stop_queue(dev);
 
 	/* save the timestamp */
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 
 	/* Remember the skb for deferred processing */
 	priv->tx_skb = skb;
@@ -890,7 +890,7 @@
 	struct encx24j600_priv *priv = netdev_priv(dev);
 
 	netif_err(priv, tx_err, dev, "TX timeout at %ld, latency %ld\n",
-		  jiffies, jiffies - dev->trans_start);
+		  jiffies, jiffies - dev_trans_start(dev));
 
 	dev->stats.tx_errors++;
 	netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 3e67f45..4367dd6 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -376,7 +376,7 @@
 
 	priv->tx_head = TX_NEXT(tx_head);
 
-	ndev->trans_start = jiffies;
+	netif_trans_update(ndev);
 	ret = NETDEV_TX_OK;
 out_unlock:
 	spin_unlock_irq(&priv->txlock);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 270c9ee..6d1a956 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -2668,9 +2668,9 @@
 
 	del_timer_sync(&mgp->watchdog_timer);
 	mgp->running = MYRI10GE_ETH_STOPPING;
-	local_bh_disable(); /* myri10ge_ss_lock_napi needs bh disabled */
 	for (i = 0; i < mgp->num_slices; i++) {
 		napi_disable(&mgp->ss[i].napi);
+		local_bh_disable(); /* myri10ge_ss_lock_napi needs this */
 		/* Lock the slice to prevent the busy_poll handler from
 		 * accessing it.  Later when we bring the NIC up, myri10ge_open
 		 * resets the slice including this lock.
@@ -2679,8 +2679,8 @@
 			pr_info("Slice %d locked\n", i);
 			mdelay(1);
 		}
+		local_bh_enable();
 	}
-	local_bh_enable();
 	netif_carrier_off(dev);
 
 	netif_tx_stop_all_queues(dev);
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 122c2ee..ed89029 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -1904,7 +1904,7 @@
 	spin_unlock_irq(&np->lock);
 	enable_irq(irq);
 
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	dev->stats.tx_errors++;
 	netif_wake_queue(dev);
 }
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index 1bd419d..612c7a4 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -174,7 +174,7 @@
 	/* Try to restart the adaptor. */
 	sonic_init(dev);
 	lp->stats.tx_errors++;
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	netif_wake_queue(dev);
 }
 
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 9ba9758..2874dff 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -4021,7 +4021,6 @@
 	unsigned long flags = 0;
 	u16 vlan_tag = 0;
 	struct fifo_info *fifo = NULL;
-	int do_spin_lock = 1;
 	int offload_type;
 	int enable_per_list_interrupt = 0;
 	struct config_param *config = &sp->config;
@@ -4074,7 +4073,6 @@
 					queue += sp->udp_fifo_idx;
 					if (skb->len > 1024)
 						enable_per_list_interrupt = 1;
-					do_spin_lock = 0;
 				}
 			}
 		}
@@ -4084,12 +4082,7 @@
 			[skb->priority & (MAX_TX_FIFOS - 1)];
 	fifo = &mac_control->fifos[queue];
 
-	if (do_spin_lock)
-		spin_lock_irqsave(&fifo->tx_lock, flags);
-	else {
-		if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
-			return NETDEV_TX_LOCKED;
-	}
+	spin_lock_irqsave(&fifo->tx_lock, flags);
 
 	if (sp->config.multiq) {
 		if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 75683fb..e744acc 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -59,8 +59,8 @@
 			netdev_warn((nn)->netdev, fmt, ## args);	\
 	} while (0)
 
-/* Max time to wait for NFP to respond on updates (in ms) */
-#define NFP_NET_POLL_TIMEOUT	5000
+/* Max time to wait for NFP to respond on updates (in seconds) */
+#define NFP_NET_POLL_TIMEOUT	5
 
 /* Bar allocation */
 #define NFP_NET_CRTL_BAR	0
@@ -298,6 +298,8 @@
  * @rxds:       Virtual address of FL/RX ring in host memory
  * @dma:        DMA address of the FL/RX ring
  * @size:       Size, in bytes, of the FL/RX ring (needed to free)
+ * @bufsz:	Buffer allocation size for convenience of management routines
+ *		(NOTE: this is in second cache line, do not use on fast path!)
  */
 struct nfp_net_rx_ring {
 	struct nfp_net_r_vector *r_vec;
@@ -319,6 +321,7 @@
 
 	dma_addr_t dma;
 	unsigned int size;
+	unsigned int bufsz;
 } ____cacheline_aligned;
 
 /**
@@ -444,6 +447,10 @@
  * @shared_name:        Name for shared interrupt
  * @me_freq_mhz:        ME clock_freq (MHz)
  * @reconfig_lock:	Protects HW reconfiguration request regs/machinery
+ * @reconfig_posted:	Pending reconfig bits coming from async sources
+ * @reconfig_timer_active:  Timer for reading reconfiguration results is pending
+ * @reconfig_sync_present:  Some thread is performing synchronous reconfig
+ * @reconfig_timer:	Timer for async reading of reconfig results
  * @link_up:            Is the link up?
  * @link_status_lock:	Protects @link_up and ensures atomicity with BAR reading
  * @rx_coalesce_usecs:      RX interrupt moderation usecs delay parameter
@@ -472,6 +479,9 @@
 
 	u32 rx_offset;
 
+	struct nfp_net_tx_ring *tx_rings;
+	struct nfp_net_rx_ring *rx_rings;
+
 #ifdef CONFIG_PCI_IOV
 	unsigned int num_vfs;
 	struct vf_data_storage *vfinfo;
@@ -504,9 +514,6 @@
 	int txd_cnt;
 	int rxd_cnt;
 
-	struct nfp_net_tx_ring tx_rings[NFP_NET_MAX_TX_RINGS];
-	struct nfp_net_rx_ring rx_rings[NFP_NET_MAX_RX_RINGS];
-
 	u8 num_irqs;
 	u8 num_r_vecs;
 	struct nfp_net_r_vector r_vecs[NFP_NET_MAX_TX_RINGS];
@@ -528,6 +535,10 @@
 	spinlock_t link_status_lock;
 
 	spinlock_t reconfig_lock;
+	u32 reconfig_posted;
+	bool reconfig_timer_active;
+	bool reconfig_sync_present;
+	struct timer_list reconfig_timer;
 
 	u32 rx_coalesce_usecs;
 	u32 rx_coalesce_max_frames;
@@ -721,6 +732,7 @@
 void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
 int nfp_net_irqs_alloc(struct nfp_net *nn);
 void nfp_net_irqs_disable(struct nfp_net *nn);
+int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt);
 
 #ifdef CONFIG_NFP_NET_DEBUG
 void nfp_net_debugfs_create(void);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 43c618b..fa47c14 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -80,6 +80,116 @@
 	put_unaligned_le32(reg, fw_ver);
 }
 
+/* Firmware reconfig
+ *
+ * Firmware reconfig may take a while so we have two versions of it -
+ * synchronous and asynchronous (posted).  All synchronous callers are holding
+ * RTNL so we don't have to worry about serializing them.
+ */
+static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update)
+{
+	nn_writel(nn, NFP_NET_CFG_UPDATE, update);
+	/* ensure update is written before pinging HW */
+	nn_pci_flush(nn);
+	nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
+}
+
+/* Pass 0 as update to run posted reconfigs. */
+static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update)
+{
+	update |= nn->reconfig_posted;
+	nn->reconfig_posted = 0;
+
+	nfp_net_reconfig_start(nn, update);
+
+	nn->reconfig_timer_active = true;
+	mod_timer(&nn->reconfig_timer, jiffies + NFP_NET_POLL_TIMEOUT * HZ);
+}
+
+static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
+{
+	u32 reg;
+
+	reg = nn_readl(nn, NFP_NET_CFG_UPDATE);
+	if (reg == 0)
+		return true;
+	if (reg & NFP_NET_CFG_UPDATE_ERR) {
+		nn_err(nn, "Reconfig error: 0x%08x\n", reg);
+		return true;
+	} else if (last_check) {
+		nn_err(nn, "Reconfig timeout: 0x%08x\n", reg);
+		return true;
+	}
+
+	return false;
+}
+
+static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
+{
+	bool timed_out = false;
+
+	/* Poll update field, waiting for NFP to ack the config */
+	while (!nfp_net_reconfig_check_done(nn, timed_out)) {
+		msleep(1);
+		timed_out = time_is_before_eq_jiffies(deadline);
+	}
+
+	if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
+		return -EIO;
+
+	return timed_out ? -EIO : 0;
+}
+
+static void nfp_net_reconfig_timer(unsigned long data)
+{
+	struct nfp_net *nn = (void *)data;
+
+	spin_lock_bh(&nn->reconfig_lock);
+
+	nn->reconfig_timer_active = false;
+
+	/* If sync caller is present it will take over from us */
+	if (nn->reconfig_sync_present)
+		goto done;
+
+	/* Read reconfig status and report errors */
+	nfp_net_reconfig_check_done(nn, true);
+
+	if (nn->reconfig_posted)
+		nfp_net_reconfig_start_async(nn, 0);
+done:
+	spin_unlock_bh(&nn->reconfig_lock);
+}
+
+/**
+ * nfp_net_reconfig_post() - Post async reconfig request
+ * @nn:      NFP Net device to reconfigure
+ * @update:  The value for the update field in the BAR config
+ *
+ * Record FW reconfiguration request.  Reconfiguration will be kicked off
+ * whenever reconfiguration machinery is idle.  Multiple requests can be
+ * merged together!
+ */
+static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update)
+{
+	spin_lock_bh(&nn->reconfig_lock);
+
+	/* Sync caller will kick off async reconf when it's done, just post */
+	if (nn->reconfig_sync_present) {
+		nn->reconfig_posted |= update;
+		goto done;
+	}
+
+	/* Opportunistically check if the previous command is done */
+	if (!nn->reconfig_timer_active ||
+	    nfp_net_reconfig_check_done(nn, false))
+		nfp_net_reconfig_start_async(nn, update);
+	else
+		nn->reconfig_posted |= update;
+done:
+	spin_unlock_bh(&nn->reconfig_lock);
+}
+
 /**
  * nfp_net_reconfig() - Reconfigure the firmware
  * @nn:      NFP Net device to reconfigure
@@ -93,35 +203,45 @@
  */
 int nfp_net_reconfig(struct nfp_net *nn, u32 update)
 {
-	int cnt, ret = 0;
-	u32 new;
+	bool cancelled_timer = false;
+	u32 pre_posted_requests;
+	int ret;
 
 	spin_lock_bh(&nn->reconfig_lock);
 
-	nn_writel(nn, NFP_NET_CFG_UPDATE, update);
-	/* ensure update is written before pinging HW */
-	nn_pci_flush(nn);
-	nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
+	nn->reconfig_sync_present = true;
 
-	/* Poll update field, waiting for NFP to ack the config */
-	for (cnt = 0; ; cnt++) {
-		new = nn_readl(nn, NFP_NET_CFG_UPDATE);
-		if (new == 0)
-			break;
-		if (new & NFP_NET_CFG_UPDATE_ERR) {
-			nn_err(nn, "Reconfig error: 0x%08x\n", new);
-			ret = -EIO;
-			break;
-		} else if (cnt >= NFP_NET_POLL_TIMEOUT) {
-			nn_err(nn, "Reconfig timeout for 0x%08x after %dms\n",
-			       update, cnt);
-			ret = -EIO;
-			break;
-		}
-		mdelay(1);
+	if (nn->reconfig_timer_active) {
+		del_timer(&nn->reconfig_timer);
+		nn->reconfig_timer_active = false;
+		cancelled_timer = true;
 	}
+	pre_posted_requests = nn->reconfig_posted;
+	nn->reconfig_posted = 0;
 
 	spin_unlock_bh(&nn->reconfig_lock);
+
+	if (cancelled_timer)
+		nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
+
+	/* Run the posted reconfigs which were issued before we started */
+	if (pre_posted_requests) {
+		nfp_net_reconfig_start(nn, pre_posted_requests);
+		nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
+	}
+
+	nfp_net_reconfig_start(nn, update);
+	ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
+
+	spin_lock_bh(&nn->reconfig_lock);
+
+	if (nn->reconfig_posted)
+		nfp_net_reconfig_start_async(nn, 0);
+
+	nn->reconfig_sync_present = false;
+
+	spin_unlock_bh(&nn->reconfig_lock);
+
 	return ret;
 }
 
@@ -347,12 +467,18 @@
 /**
  * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
  * @tx_ring:  TX ring structure
+ * @r_vec:    IRQ vector servicing this ring
+ * @idx:      Ring index
  */
-static void nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring)
+static void
+nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
+		     struct nfp_net_r_vector *r_vec, unsigned int idx)
 {
-	struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
 	struct nfp_net *nn = r_vec->nfp_net;
 
+	tx_ring->idx = idx;
+	tx_ring->r_vec = r_vec;
+
 	tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
 	tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
 }
@@ -360,12 +486,18 @@
 /**
  * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
  * @rx_ring:  RX ring structure
+ * @r_vec:    IRQ vector servicing this ring
+ * @idx:      Ring index
  */
-static void nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring)
+static void
+nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
+		     struct nfp_net_r_vector *r_vec, unsigned int idx)
 {
-	struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
 	struct nfp_net *nn = r_vec->nfp_net;
 
+	rx_ring->idx = idx;
+	rx_ring->r_vec = r_vec;
+
 	rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
 	rx_ring->rx_qcidx = rx_ring->fl_qcidx + (nn->stride_rx - 1);
 
@@ -401,16 +533,6 @@
 		r_vec->irq_idx = NFP_NET_NON_Q_VECTORS + r;
 
 		cpumask_set_cpu(r, &r_vec->affinity_mask);
-
-		r_vec->tx_ring = &nn->tx_rings[r];
-		nn->tx_rings[r].idx = r;
-		nn->tx_rings[r].r_vec = r_vec;
-		nfp_net_tx_ring_init(r_vec->tx_ring);
-
-		r_vec->rx_ring = &nn->rx_rings[r];
-		nn->rx_rings[r].idx = r;
-		nn->rx_rings[r].r_vec = r_vec;
-		nfp_net_rx_ring_init(r_vec->rx_ring);
 	}
 }
 
@@ -865,61 +987,59 @@
 }
 
 /**
- * nfp_net_tx_flush() - Free any untransmitted buffers currently on the TX ring
- * @tx_ring:     TX ring structure
+ * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
+ * @nn:		NFP Net device
+ * @tx_ring:	TX ring structure
  *
  * Assumes that the device is stopped
  */
-static void nfp_net_tx_flush(struct nfp_net_tx_ring *tx_ring)
+static void
+nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
 {
-	struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
-	struct nfp_net *nn = r_vec->nfp_net;
-	struct pci_dev *pdev = nn->pdev;
 	const struct skb_frag_struct *frag;
 	struct netdev_queue *nd_q;
-	struct sk_buff *skb;
-	int nr_frags;
-	int fidx;
-	int idx;
+	struct pci_dev *pdev = nn->pdev;
 
 	while (tx_ring->rd_p != tx_ring->wr_p) {
+		int nr_frags, fidx, idx;
+		struct sk_buff *skb;
+
 		idx = tx_ring->rd_p % tx_ring->cnt;
-
 		skb = tx_ring->txbufs[idx].skb;
-		if (skb) {
-			nr_frags = skb_shinfo(skb)->nr_frags;
-			fidx = tx_ring->txbufs[idx].fidx;
+		nr_frags = skb_shinfo(skb)->nr_frags;
+		fidx = tx_ring->txbufs[idx].fidx;
 
-			if (fidx == -1) {
-				/* unmap head */
-				dma_unmap_single(&pdev->dev,
-						 tx_ring->txbufs[idx].dma_addr,
-						 skb_headlen(skb),
-						 DMA_TO_DEVICE);
-			} else {
-				/* unmap fragment */
-				frag = &skb_shinfo(skb)->frags[fidx];
-				dma_unmap_page(&pdev->dev,
-					       tx_ring->txbufs[idx].dma_addr,
-					       skb_frag_size(frag),
-					       DMA_TO_DEVICE);
-			}
-
-			/* check for last gather fragment */
-			if (fidx == nr_frags - 1)
-				dev_kfree_skb_any(skb);
-
-			tx_ring->txbufs[idx].dma_addr = 0;
-			tx_ring->txbufs[idx].skb = NULL;
-			tx_ring->txbufs[idx].fidx = -2;
+		if (fidx == -1) {
+			/* unmap head */
+			dma_unmap_single(&pdev->dev,
+					 tx_ring->txbufs[idx].dma_addr,
+					 skb_headlen(skb), DMA_TO_DEVICE);
+		} else {
+			/* unmap fragment */
+			frag = &skb_shinfo(skb)->frags[fidx];
+			dma_unmap_page(&pdev->dev,
+				       tx_ring->txbufs[idx].dma_addr,
+				       skb_frag_size(frag), DMA_TO_DEVICE);
 		}
 
-		memset(&tx_ring->txds[idx], 0, sizeof(tx_ring->txds[idx]));
+		/* check for last gather fragment */
+		if (fidx == nr_frags - 1)
+			dev_kfree_skb_any(skb);
+
+		tx_ring->txbufs[idx].dma_addr = 0;
+		tx_ring->txbufs[idx].skb = NULL;
+		tx_ring->txbufs[idx].fidx = -2;
 
 		tx_ring->qcp_rd_p++;
 		tx_ring->rd_p++;
 	}
 
+	memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt);
+	tx_ring->wr_p = 0;
+	tx_ring->rd_p = 0;
+	tx_ring->qcp_rd_p = 0;
+	tx_ring->wr_ptr_add = 0;
+
 	nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
 	netdev_tx_reset_queue(nd_q);
 }
@@ -957,25 +1077,27 @@
  * nfp_net_rx_alloc_one() - Allocate and map skb for RX
  * @rx_ring:	RX ring structure of the skb
  * @dma_addr:	Pointer to storage for DMA address (output param)
+ * @fl_bufsz:	size of freelist buffers
  *
  * This function will allcate a new skb, map it for DMA.
  *
  * Return: allocated skb or NULL on failure.
  */
 static struct sk_buff *
-nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr)
+nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr,
+		     unsigned int fl_bufsz)
 {
 	struct nfp_net *nn = rx_ring->r_vec->nfp_net;
 	struct sk_buff *skb;
 
-	skb = netdev_alloc_skb(nn->netdev, nn->fl_bufsz);
+	skb = netdev_alloc_skb(nn->netdev, fl_bufsz);
 	if (!skb) {
 		nn_warn_ratelimit(nn, "Failed to alloc receive SKB\n");
 		return NULL;
 	}
 
 	*dma_addr = dma_map_single(&nn->pdev->dev, skb->data,
-				  nn->fl_bufsz, DMA_FROM_DEVICE);
+				   fl_bufsz, DMA_FROM_DEVICE);
 	if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
 		dev_kfree_skb_any(skb);
 		nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
@@ -1020,62 +1142,101 @@
 }
 
 /**
- * nfp_net_rx_flush() - Free any buffers currently on the RX ring
- * @rx_ring:  RX ring to remove buffers from
+ * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
+ * @rx_ring:	RX ring structure
  *
- * Assumes that the device is stopped
+ * Warning: Do *not* call if ring buffers were never put on the FW freelist
+ *	    (i.e. device was not enabled)!
  */
-static void nfp_net_rx_flush(struct nfp_net_rx_ring *rx_ring)
+static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
 {
-	struct nfp_net *nn = rx_ring->r_vec->nfp_net;
+	unsigned int wr_idx, last_idx;
+
+	/* Move the empty entry to the end of the list */
+	wr_idx = rx_ring->wr_p % rx_ring->cnt;
+	last_idx = rx_ring->cnt - 1;
+	rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr;
+	rx_ring->rxbufs[wr_idx].skb = rx_ring->rxbufs[last_idx].skb;
+	rx_ring->rxbufs[last_idx].dma_addr = 0;
+	rx_ring->rxbufs[last_idx].skb = NULL;
+
+	memset(rx_ring->rxds, 0, sizeof(*rx_ring->rxds) * rx_ring->cnt);
+	rx_ring->wr_p = 0;
+	rx_ring->rd_p = 0;
+	rx_ring->wr_ptr_add = 0;
+}
+
+/**
+ * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
+ * @nn:		NFP Net device
+ * @rx_ring:	RX ring to remove buffers from
+ *
+ * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
+ * entries.  After device is disabled nfp_net_rx_ring_reset() must be called
+ * to restore required ring geometry.
+ */
+static void
+nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
+{
 	struct pci_dev *pdev = nn->pdev;
-	int idx;
+	unsigned int i;
 
-	while (rx_ring->rd_p != rx_ring->wr_p) {
-		idx = rx_ring->rd_p % rx_ring->cnt;
+	for (i = 0; i < rx_ring->cnt - 1; i++) {
+		/* NULL skb can only happen when initial filling of the ring
+		 * fails to allocate enough buffers and calls here to free
+		 * already allocated ones.
+		 */
+		if (!rx_ring->rxbufs[i].skb)
+			continue;
 
-		if (rx_ring->rxbufs[idx].skb) {
-			dma_unmap_single(&pdev->dev,
-					 rx_ring->rxbufs[idx].dma_addr,
-					 nn->fl_bufsz, DMA_FROM_DEVICE);
-			dev_kfree_skb_any(rx_ring->rxbufs[idx].skb);
-			rx_ring->rxbufs[idx].dma_addr = 0;
-			rx_ring->rxbufs[idx].skb = NULL;
-		}
-
-		memset(&rx_ring->rxds[idx], 0, sizeof(rx_ring->rxds[idx]));
-
-		rx_ring->rd_p++;
+		dma_unmap_single(&pdev->dev, rx_ring->rxbufs[i].dma_addr,
+				 rx_ring->bufsz, DMA_FROM_DEVICE);
+		dev_kfree_skb_any(rx_ring->rxbufs[i].skb);
+		rx_ring->rxbufs[i].dma_addr = 0;
+		rx_ring->rxbufs[i].skb = NULL;
 	}
 }
 
 /**
- * nfp_net_rx_fill_freelist() - Attempt filling freelist with RX buffers
- * @rx_ring: RX ring to fill
- *
- * Try to fill as many buffers as possible into freelist.  Return
- * number of buffers added.
- *
- * Return: Number of freelist buffers added.
+ * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
+ * @nn:		NFP Net device
+ * @rx_ring:	RX ring to remove buffers from
  */
-static int nfp_net_rx_fill_freelist(struct nfp_net_rx_ring *rx_ring)
+static int
+nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
 {
-	struct sk_buff *skb;
-	dma_addr_t dma_addr;
+	struct nfp_net_rx_buf *rxbufs;
+	unsigned int i;
 
-	while (nfp_net_rx_space(rx_ring)) {
-		skb = nfp_net_rx_alloc_one(rx_ring, &dma_addr);
-		if (!skb) {
-			nfp_net_rx_flush(rx_ring);
+	rxbufs = rx_ring->rxbufs;
+
+	for (i = 0; i < rx_ring->cnt - 1; i++) {
+		rxbufs[i].skb =
+			nfp_net_rx_alloc_one(rx_ring, &rxbufs[i].dma_addr,
+					     rx_ring->bufsz);
+		if (!rxbufs[i].skb) {
+			nfp_net_rx_ring_bufs_free(nn, rx_ring);
 			return -ENOMEM;
 		}
-		nfp_net_rx_give_one(rx_ring, skb, dma_addr);
 	}
 
 	return 0;
 }
 
 /**
+ * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW
+ * @rx_ring: RX ring to fill
+ */
+static void nfp_net_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring)
+{
+	unsigned int i;
+
+	for (i = 0; i < rx_ring->cnt - 1; i++)
+		nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[i].skb,
+				    rx_ring->rxbufs[i].dma_addr);
+}
+
+/**
  * nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors
  * @flags: RX descriptor flags field in CPU byte order
  */
@@ -1240,7 +1401,8 @@
 
 		skb = rx_ring->rxbufs[idx].skb;
 
-		new_skb = nfp_net_rx_alloc_one(rx_ring, &new_dma_addr);
+		new_skb = nfp_net_rx_alloc_one(rx_ring, &new_dma_addr,
+					       nn->fl_bufsz);
 		if (!new_skb) {
 			nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[idx].skb,
 					    rx_ring->rxbufs[idx].dma_addr);
@@ -1256,23 +1418,25 @@
 
 		nfp_net_rx_give_one(rx_ring, new_skb, new_dma_addr);
 
+		/*         < meta_len >
+		 *  <-- [rx_offset] -->
+		 *  ---------------------------------------------------------
+		 * | [XX] |  metadata  |             packet           | XXXX |
+		 *  ---------------------------------------------------------
+		 *         <---------------- data_len --------------->
+		 *
+		 * The rx_offset is fixed for all packets, the meta_len can vary
+		 * on a packet by packet basis. If rx_offset is set to zero
+		 * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
+		 * buffer and is immediately followed by the packet (no [XX]).
+		 */
 		meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
 		data_len = le16_to_cpu(rxd->rxd.data_len);
 
-		if (WARN_ON_ONCE(data_len > nn->fl_bufsz)) {
-			dev_kfree_skb_any(skb);
-			continue;
-		}
-
-		if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) {
-			/* The packet data starts after the metadata */
+		if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
 			skb_reserve(skb, meta_len);
-		} else {
-			/* The packet data starts at a fixed offset */
+		else
 			skb_reserve(skb, nn->rx_offset);
-		}
-
-		/* Adjust the SKB for the dynamic meta data pre-pended */
 		skb_put(skb, data_len - meta_len);
 
 		nfp_net_set_hash(nn->netdev, skb, rxd);
@@ -1349,10 +1513,6 @@
 	struct nfp_net *nn = r_vec->nfp_net;
 	struct pci_dev *pdev = nn->pdev;
 
-	nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(tx_ring->idx), 0);
-	nn_writeb(nn, NFP_NET_CFG_TXR_SZ(tx_ring->idx), 0);
-	nn_writeb(nn, NFP_NET_CFG_TXR_VEC(tx_ring->idx), 0);
-
 	kfree(tx_ring->txbufs);
 
 	if (tx_ring->txds)
@@ -1360,11 +1520,6 @@
 				  tx_ring->txds, tx_ring->dma);
 
 	tx_ring->cnt = 0;
-	tx_ring->wr_p = 0;
-	tx_ring->rd_p = 0;
-	tx_ring->qcp_rd_p = 0;
-	tx_ring->wr_ptr_add = 0;
-
 	tx_ring->txbufs = NULL;
 	tx_ring->txds = NULL;
 	tx_ring->dma = 0;
@@ -1374,17 +1529,18 @@
 /**
  * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
  * @tx_ring:   TX Ring structure to allocate
+ * @cnt:       Ring buffer count
  *
  * Return: 0 on success, negative errno otherwise.
  */
-static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring)
+static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt)
 {
 	struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
 	struct nfp_net *nn = r_vec->nfp_net;
 	struct pci_dev *pdev = nn->pdev;
 	int sz;
 
-	tx_ring->cnt = nn->txd_cnt;
+	tx_ring->cnt = cnt;
 
 	tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt;
 	tx_ring->txds = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
@@ -1397,11 +1553,6 @@
 	if (!tx_ring->txbufs)
 		goto err_alloc;
 
-	/* Write the DMA address, size and MSI-X info to the device */
-	nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(tx_ring->idx), tx_ring->dma);
-	nn_writeb(nn, NFP_NET_CFG_TXR_SZ(tx_ring->idx), ilog2(tx_ring->cnt));
-	nn_writeb(nn, NFP_NET_CFG_TXR_VEC(tx_ring->idx), r_vec->irq_idx);
-
 	netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask, tx_ring->idx);
 
 	nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p\n",
@@ -1415,6 +1566,59 @@
 	return -ENOMEM;
 }
 
+static struct nfp_net_tx_ring *
+nfp_net_shadow_tx_rings_prepare(struct nfp_net *nn, u32 buf_cnt)
+{
+	struct nfp_net_tx_ring *rings;
+	unsigned int r;
+
+	rings = kcalloc(nn->num_tx_rings, sizeof(*rings), GFP_KERNEL);
+	if (!rings)
+		return NULL;
+
+	for (r = 0; r < nn->num_tx_rings; r++) {
+		nfp_net_tx_ring_init(&rings[r], nn->tx_rings[r].r_vec, r);
+
+		if (nfp_net_tx_ring_alloc(&rings[r], buf_cnt))
+			goto err_free_prev;
+	}
+
+	return rings;
+
+err_free_prev:
+	while (r--)
+		nfp_net_tx_ring_free(&rings[r]);
+	kfree(rings);
+	return NULL;
+}
+
+static struct nfp_net_tx_ring *
+nfp_net_shadow_tx_rings_swap(struct nfp_net *nn, struct nfp_net_tx_ring *rings)
+{
+	struct nfp_net_tx_ring *old = nn->tx_rings;
+	unsigned int r;
+
+	for (r = 0; r < nn->num_tx_rings; r++)
+		old[r].r_vec->tx_ring = &rings[r];
+
+	nn->tx_rings = rings;
+	return old;
+}
+
+static void
+nfp_net_shadow_tx_rings_free(struct nfp_net *nn, struct nfp_net_tx_ring *rings)
+{
+	unsigned int r;
+
+	if (!rings)
+		return;
+
+	for (r = 0; r < nn->num_tx_rings; r++)
+		nfp_net_tx_ring_free(&rings[r]);
+
+	kfree(rings);
+}
+
 /**
  * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
  * @rx_ring:  RX ring to free
@@ -1425,10 +1629,6 @@
 	struct nfp_net *nn = r_vec->nfp_net;
 	struct pci_dev *pdev = nn->pdev;
 
-	nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(rx_ring->idx), 0);
-	nn_writeb(nn, NFP_NET_CFG_RXR_SZ(rx_ring->idx), 0);
-	nn_writeb(nn, NFP_NET_CFG_RXR_VEC(rx_ring->idx), 0);
-
 	kfree(rx_ring->rxbufs);
 
 	if (rx_ring->rxds)
@@ -1436,10 +1636,6 @@
 				  rx_ring->rxds, rx_ring->dma);
 
 	rx_ring->cnt = 0;
-	rx_ring->wr_p = 0;
-	rx_ring->rd_p = 0;
-	rx_ring->wr_ptr_add = 0;
-
 	rx_ring->rxbufs = NULL;
 	rx_ring->rxds = NULL;
 	rx_ring->dma = 0;
@@ -1449,17 +1645,22 @@
 /**
  * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
  * @rx_ring:  RX ring to allocate
+ * @fl_bufsz: Size of buffers to allocate
+ * @cnt:      Ring buffer count
  *
  * Return: 0 on success, negative errno otherwise.
  */
-static int nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring)
+static int
+nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz,
+		      u32 cnt)
 {
 	struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
 	struct nfp_net *nn = r_vec->nfp_net;
 	struct pci_dev *pdev = nn->pdev;
 	int sz;
 
-	rx_ring->cnt = nn->rxd_cnt;
+	rx_ring->cnt = cnt;
+	rx_ring->bufsz = fl_bufsz;
 
 	rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
 	rx_ring->rxds = dma_zalloc_coherent(&pdev->dev, rx_ring->size,
@@ -1472,11 +1673,6 @@
 	if (!rx_ring->rxbufs)
 		goto err_alloc;
 
-	/* Write the DMA address, size and MSI-X info to the device */
-	nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(rx_ring->idx), rx_ring->dma);
-	nn_writeb(nn, NFP_NET_CFG_RXR_SZ(rx_ring->idx), ilog2(rx_ring->cnt));
-	nn_writeb(nn, NFP_NET_CFG_RXR_VEC(rx_ring->idx), r_vec->irq_idx);
-
 	nn_dbg(nn, "RxQ%02d: FlQCidx=%02d RxQCidx=%02d cnt=%d dma=%#llx host=%p\n",
 	       rx_ring->idx, rx_ring->fl_qcidx, rx_ring->rx_qcidx,
 	       rx_ring->cnt, (unsigned long long)rx_ring->dma, rx_ring->rxds);
@@ -1488,91 +1684,109 @@
 	return -ENOMEM;
 }
 
-static void __nfp_net_free_rings(struct nfp_net *nn, unsigned int n_free)
+static struct nfp_net_rx_ring *
+nfp_net_shadow_rx_rings_prepare(struct nfp_net *nn, unsigned int fl_bufsz,
+				u32 buf_cnt)
 {
-	struct nfp_net_r_vector *r_vec;
-	struct msix_entry *entry;
+	struct nfp_net_rx_ring *rings;
+	unsigned int r;
 
-	while (n_free--) {
-		r_vec = &nn->r_vecs[n_free];
-		entry = &nn->irq_entries[r_vec->irq_idx];
+	rings = kcalloc(nn->num_rx_rings, sizeof(*rings), GFP_KERNEL);
+	if (!rings)
+		return NULL;
 
-		nfp_net_rx_ring_free(r_vec->rx_ring);
-		nfp_net_tx_ring_free(r_vec->tx_ring);
+	for (r = 0; r < nn->num_rx_rings; r++) {
+		nfp_net_rx_ring_init(&rings[r], nn->rx_rings[r].r_vec, r);
 
-		irq_set_affinity_hint(entry->vector, NULL);
-		free_irq(entry->vector, r_vec);
+		if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz, buf_cnt))
+			goto err_free_prev;
 
-		netif_napi_del(&r_vec->napi);
+		if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r]))
+			goto err_free_ring;
 	}
+
+	return rings;
+
+err_free_prev:
+	while (r--) {
+		nfp_net_rx_ring_bufs_free(nn, &rings[r]);
+err_free_ring:
+		nfp_net_rx_ring_free(&rings[r]);
+	}
+	kfree(rings);
+	return NULL;
 }
 
-/**
- * nfp_net_free_rings() - Free all ring resources
- * @nn:      NFP Net device to reconfigure
- */
-static void nfp_net_free_rings(struct nfp_net *nn)
+static struct nfp_net_rx_ring *
+nfp_net_shadow_rx_rings_swap(struct nfp_net *nn, struct nfp_net_rx_ring *rings)
 {
-	__nfp_net_free_rings(nn, nn->num_r_vecs);
+	struct nfp_net_rx_ring *old = nn->rx_rings;
+	unsigned int r;
+
+	for (r = 0; r < nn->num_rx_rings; r++)
+		old[r].r_vec->rx_ring = &rings[r];
+
+	nn->rx_rings = rings;
+	return old;
 }
 
-/**
- * nfp_net_alloc_rings() - Allocate resources for RX and TX rings
- * @nn:      NFP Net device to reconfigure
- *
- * Return: 0 on success or negative errno on error.
- */
-static int nfp_net_alloc_rings(struct nfp_net *nn)
+static void
+nfp_net_shadow_rx_rings_free(struct nfp_net *nn, struct nfp_net_rx_ring *rings)
 {
-	struct nfp_net_r_vector *r_vec;
-	struct msix_entry *entry;
-	int err;
-	int r;
+	unsigned int r;
+
+	if (!rings)
+		return;
 
 	for (r = 0; r < nn->num_r_vecs; r++) {
-		r_vec = &nn->r_vecs[r];
-		entry = &nn->irq_entries[r_vec->irq_idx];
-
-		/* Setup NAPI */
-		netif_napi_add(nn->netdev, &r_vec->napi,
-			       nfp_net_poll, NAPI_POLL_WEIGHT);
-
-		snprintf(r_vec->name, sizeof(r_vec->name),
-			 "%s-rxtx-%d", nn->netdev->name, r);
-		err = request_irq(entry->vector, r_vec->handler, 0,
-				  r_vec->name, r_vec);
-		if (err) {
-			nn_dbg(nn, "Error requesting IRQ %d\n", entry->vector);
-			goto err_napi_del;
-		}
-
-		irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask);
-
-		nn_dbg(nn, "RV%02d: irq=%03d/%03d\n",
-		       r, entry->vector, entry->entry);
-
-		/* Allocate TX ring resources */
-		err = nfp_net_tx_ring_alloc(r_vec->tx_ring);
-		if (err)
-			goto err_free_irq;
-
-		/* Allocate RX ring resources */
-		err = nfp_net_rx_ring_alloc(r_vec->rx_ring);
-		if (err)
-			goto err_free_tx;
+		nfp_net_rx_ring_bufs_free(nn, &rings[r]);
+		nfp_net_rx_ring_free(&rings[r]);
 	}
 
-	return 0;
+	kfree(rings);
+}
 
-err_free_tx:
-	nfp_net_tx_ring_free(r_vec->tx_ring);
-err_free_irq:
+static int
+nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+		       int idx)
+{
+	struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
+	int err;
+
+	r_vec->tx_ring = &nn->tx_rings[idx];
+	nfp_net_tx_ring_init(r_vec->tx_ring, r_vec, idx);
+
+	r_vec->rx_ring = &nn->rx_rings[idx];
+	nfp_net_rx_ring_init(r_vec->rx_ring, r_vec, idx);
+
+	snprintf(r_vec->name, sizeof(r_vec->name),
+		 "%s-rxtx-%d", nn->netdev->name, idx);
+	err = request_irq(entry->vector, r_vec->handler, 0, r_vec->name, r_vec);
+	if (err) {
+		nn_err(nn, "Error requesting IRQ %d\n", entry->vector);
+		return err;
+	}
+	disable_irq(entry->vector);
+
+	/* Setup NAPI */
+	netif_napi_add(nn->netdev, &r_vec->napi,
+		       nfp_net_poll, NAPI_POLL_WEIGHT);
+
+	irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask);
+
+	nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, entry->vector, entry->entry);
+
+	return 0;
+}
+
+static void
+nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
+{
+	struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
+
 	irq_set_affinity_hint(entry->vector, NULL);
-	free_irq(entry->vector, r_vec);
-err_napi_del:
 	netif_napi_del(&r_vec->napi);
-	__nfp_net_free_rings(nn, r);
-	return err;
+	free_irq(entry->vector, r_vec);
 }
 
 /**
@@ -1646,6 +1860,17 @@
 		  get_unaligned_be16(nn->netdev->dev_addr + 4) << 16);
 }
 
+static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
+{
+	nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
+	nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
+	nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
+
+	nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
+	nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
+	nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
+}
+
 /**
  * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
  * @nn:      NFP Net device to reconfigure
@@ -1653,6 +1878,7 @@
 static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
 {
 	u32 new_ctrl, update;
+	unsigned int r;
 	int err;
 
 	new_ctrl = nn->ctrl;
@@ -1669,79 +1895,40 @@
 
 	nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
 	err = nfp_net_reconfig(nn, update);
-	if (err) {
+	if (err)
 		nn_err(nn, "Could not disable device: %d\n", err);
-		return;
+
+	for (r = 0; r < nn->num_r_vecs; r++) {
+		nfp_net_rx_ring_reset(nn->r_vecs[r].rx_ring);
+		nfp_net_tx_ring_reset(nn, nn->r_vecs[r].tx_ring);
+		nfp_net_vec_clear_ring_data(nn, r);
 	}
 
 	nn->ctrl = new_ctrl;
 }
 
-/**
- * nfp_net_start_vec() - Start ring vector
- * @nn:      NFP Net device structure
- * @r_vec:   Ring vector to be started
- */
-static int nfp_net_start_vec(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
+static void
+nfp_net_vec_write_ring_data(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+			    unsigned int idx)
 {
-	unsigned int irq_vec;
-	int err = 0;
+	/* Write the DMA address, size and MSI-X info to the device */
+	nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), r_vec->rx_ring->dma);
+	nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(r_vec->rx_ring->cnt));
+	nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), r_vec->irq_idx);
 
-	irq_vec = nn->irq_entries[r_vec->irq_idx].vector;
-
-	disable_irq(irq_vec);
-
-	err = nfp_net_rx_fill_freelist(r_vec->rx_ring);
-	if (err) {
-		nn_err(nn, "RV%02d: couldn't allocate enough buffers\n",
-		       r_vec->irq_idx);
-		goto out;
-	}
-
-	napi_enable(&r_vec->napi);
-out:
-	enable_irq(irq_vec);
-
-	return err;
+	nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), r_vec->tx_ring->dma);
+	nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(r_vec->tx_ring->cnt));
+	nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), r_vec->irq_idx);
 }
 
-static int nfp_net_netdev_open(struct net_device *netdev)
+static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
 {
-	struct nfp_net *nn = netdev_priv(netdev);
-	int err, r;
-	u32 update = 0;
-	u32 new_ctrl;
-
-	if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) {
-		nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl);
-		return -EBUSY;
-	}
+	u32 new_ctrl, update = 0;
+	unsigned int r;
+	int err;
 
 	new_ctrl = nn->ctrl;
 
-	/* Step 1: Allocate resources for rings and the like
-	 * - Request interrupts
-	 * - Allocate RX and TX ring resources
-	 * - Setup initial RSS table
-	 */
-	err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
-				      nn->exn_name, sizeof(nn->exn_name),
-				      NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
-	if (err)
-		return err;
-
-	err = nfp_net_alloc_rings(nn);
-	if (err)
-		goto err_free_exn;
-
-	err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings);
-	if (err)
-		goto err_free_rings;
-
-	err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings);
-	if (err)
-		goto err_free_rings;
-
 	if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
 		nfp_net_rss_write_key(nn);
 		nfp_net_rss_write_itbl(nn);
@@ -1756,22 +1943,18 @@
 		update |= NFP_NET_CFG_UPDATE_IRQMOD;
 	}
 
-	/* Step 2: Configure the NFP
-	 * - Enable rings from 0 to tx_rings/rx_rings - 1.
-	 * - Write MAC address (in case it changed)
-	 * - Set the MTU
-	 * - Set the Freelist buffer size
-	 * - Enable the FW
-	 */
+	for (r = 0; r < nn->num_r_vecs; r++)
+		nfp_net_vec_write_ring_data(nn, &nn->r_vecs[r], r);
+
 	nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ?
 		  0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1);
 
 	nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ?
 		  0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1);
 
-	nfp_net_write_mac_addr(nn, netdev->dev_addr);
+	nfp_net_write_mac_addr(nn, nn->netdev->dev_addr);
 
-	nn_writel(nn, NFP_NET_CFG_MTU, netdev->mtu);
+	nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu);
 	nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz);
 
 	/* Enable device */
@@ -1784,69 +1967,213 @@
 
 	nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
 	err = nfp_net_reconfig(nn, update);
-	if (err)
-		goto err_clear_config;
 
 	nn->ctrl = new_ctrl;
 
+	for (r = 0; r < nn->num_r_vecs; r++)
+		nfp_net_rx_ring_fill_freelist(nn->r_vecs[r].rx_ring);
+
 	/* Since reconfiguration requests while NFP is down are ignored we
 	 * have to wipe the entire VXLAN configuration and reinitialize it.
 	 */
 	if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) {
 		memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports));
 		memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt));
-		vxlan_get_rx_port(netdev);
+		vxlan_get_rx_port(nn->netdev);
 	}
 
+	return err;
+}
+
+/**
+ * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
+ * @nn:      NFP Net device to reconfigure
+ */
+static int nfp_net_set_config_and_enable(struct nfp_net *nn)
+{
+	int err;
+
+	err = __nfp_net_set_config_and_enable(nn);
+	if (err)
+		nfp_net_clear_config_and_disable(nn);
+
+	return err;
+}
+
+/**
+ * nfp_net_open_stack() - Start the device from stack's perspective
+ * @nn:      NFP Net device to reconfigure
+ */
+static void nfp_net_open_stack(struct nfp_net *nn)
+{
+	unsigned int r;
+
+	for (r = 0; r < nn->num_r_vecs; r++) {
+		napi_enable(&nn->r_vecs[r].napi);
+		enable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector);
+	}
+
+	netif_tx_wake_all_queues(nn->netdev);
+
+	enable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
+	nfp_net_read_link_status(nn);
+}
+
+static int nfp_net_netdev_open(struct net_device *netdev)
+{
+	struct nfp_net *nn = netdev_priv(netdev);
+	int err, r;
+
+	if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) {
+		nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl);
+		return -EBUSY;
+	}
+
+	/* Step 1: Allocate resources for rings and the like
+	 * - Request interrupts
+	 * - Allocate RX and TX ring resources
+	 * - Setup initial RSS table
+	 */
+	err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
+				      nn->exn_name, sizeof(nn->exn_name),
+				      NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
+	if (err)
+		return err;
+	err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc",
+				      nn->lsc_name, sizeof(nn->lsc_name),
+				      NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
+	if (err)
+		goto err_free_exn;
+	disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
+
+	nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
+			       GFP_KERNEL);
+	if (!nn->rx_rings)
+		goto err_free_lsc;
+	nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings),
+			       GFP_KERNEL);
+	if (!nn->tx_rings)
+		goto err_free_rx_rings;
+
+	for (r = 0; r < nn->num_r_vecs; r++) {
+		err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
+		if (err)
+			goto err_free_prev_vecs;
+
+		err = nfp_net_tx_ring_alloc(nn->r_vecs[r].tx_ring, nn->txd_cnt);
+		if (err)
+			goto err_cleanup_vec_p;
+
+		err = nfp_net_rx_ring_alloc(nn->r_vecs[r].rx_ring,
+					    nn->fl_bufsz, nn->rxd_cnt);
+		if (err)
+			goto err_free_tx_ring_p;
+
+		err = nfp_net_rx_ring_bufs_alloc(nn, nn->r_vecs[r].rx_ring);
+		if (err)
+			goto err_flush_rx_ring_p;
+	}
+
+	err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings);
+	if (err)
+		goto err_free_rings;
+
+	err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings);
+	if (err)
+		goto err_free_rings;
+
+	/* Step 2: Configure the NFP
+	 * - Enable rings from 0 to tx_rings/rx_rings - 1.
+	 * - Write MAC address (in case it changed)
+	 * - Set the MTU
+	 * - Set the Freelist buffer size
+	 * - Enable the FW
+	 */
+	err = nfp_net_set_config_and_enable(nn);
+	if (err)
+		goto err_free_rings;
+
 	/* Step 3: Enable for kernel
 	 * - put some freelist descriptors on each RX ring
 	 * - enable NAPI on each ring
 	 * - enable all TX queues
 	 * - set link state
 	 */
-	for (r = 0; r < nn->num_r_vecs; r++) {
-		err = nfp_net_start_vec(nn, &nn->r_vecs[r]);
-		if (err)
-			goto err_disable_napi;
-	}
-
-	netif_tx_wake_all_queues(netdev);
-
-	err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc",
-				      nn->lsc_name, sizeof(nn->lsc_name),
-				      NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
-	if (err)
-		goto err_stop_tx;
-	nfp_net_read_link_status(nn);
+	nfp_net_open_stack(nn);
 
 	return 0;
 
-err_stop_tx:
-	netif_tx_disable(netdev);
-	for (r = 0; r < nn->num_r_vecs; r++)
-		nfp_net_tx_flush(nn->r_vecs[r].tx_ring);
-err_disable_napi:
-	while (r--) {
-		napi_disable(&nn->r_vecs[r].napi);
-		nfp_net_rx_flush(nn->r_vecs[r].rx_ring);
-	}
-err_clear_config:
-	nfp_net_clear_config_and_disable(nn);
 err_free_rings:
-	nfp_net_free_rings(nn);
+	r = nn->num_r_vecs;
+err_free_prev_vecs:
+	while (r--) {
+		nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
+err_flush_rx_ring_p:
+		nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
+err_free_tx_ring_p:
+		nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
+err_cleanup_vec_p:
+		nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
+	}
+	kfree(nn->tx_rings);
+err_free_rx_rings:
+	kfree(nn->rx_rings);
+err_free_lsc:
+	nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
 err_free_exn:
 	nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
 	return err;
 }
 
 /**
+ * nfp_net_close_stack() - Quiescent the stack (part of close)
+ * @nn:	     NFP Net device to reconfigure
+ */
+static void nfp_net_close_stack(struct nfp_net *nn)
+{
+	unsigned int r;
+
+	disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
+	netif_carrier_off(nn->netdev);
+	nn->link_up = false;
+
+	for (r = 0; r < nn->num_r_vecs; r++) {
+		disable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector);
+		napi_disable(&nn->r_vecs[r].napi);
+	}
+
+	netif_tx_disable(nn->netdev);
+}
+
+/**
+ * nfp_net_close_free_all() - Free all runtime resources
+ * @nn:      NFP Net device to reconfigure
+ */
+static void nfp_net_close_free_all(struct nfp_net *nn)
+{
+	unsigned int r;
+
+	for (r = 0; r < nn->num_r_vecs; r++) {
+		nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
+		nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
+		nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
+		nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
+	}
+
+	kfree(nn->rx_rings);
+	kfree(nn->tx_rings);
+
+	nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
+	nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
+}
+
+/**
  * nfp_net_netdev_close() - Called when the device is downed
  * @netdev:      netdev structure
  */
 static int nfp_net_netdev_close(struct net_device *netdev)
 {
 	struct nfp_net *nn = netdev_priv(netdev);
-	int r;
 
 	if (!(nn->ctrl & NFP_NET_CFG_CTRL_ENABLE)) {
 		nn_err(nn, "Dev is not up: 0x%08x\n", nn->ctrl);
@@ -1855,14 +2182,7 @@
 
 	/* Step 1: Disable RX and TX rings from the Linux kernel perspective
 	 */
-	nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
-	netif_carrier_off(netdev);
-	nn->link_up = false;
-
-	for (r = 0; r < nn->num_r_vecs; r++)
-		napi_disable(&nn->r_vecs[r].napi);
-
-	netif_tx_disable(netdev);
+	nfp_net_close_stack(nn);
 
 	/* Step 2: Tell NFP
 	 */
@@ -1870,13 +2190,7 @@
 
 	/* Step 3: Free resources
 	 */
-	for (r = 0; r < nn->num_r_vecs; r++) {
-		nfp_net_rx_flush(nn->r_vecs[r].rx_ring);
-		nfp_net_tx_flush(nn->r_vecs[r].tx_ring);
-	}
-
-	nfp_net_free_rings(nn);
-	nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
+	nfp_net_close_free_all(nn);
 
 	nn_dbg(nn, "%s down", netdev->name);
 	return 0;
@@ -1902,37 +2216,139 @@
 		return;
 
 	nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
-	if (nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN))
-		return;
+	nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
 
 	nn->ctrl = new_ctrl;
 }
 
 static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
 {
+	unsigned int old_mtu, old_fl_bufsz, new_fl_bufsz;
 	struct nfp_net *nn = netdev_priv(netdev);
-	u32 tmp;
-
-	nn_dbg(nn, "New MTU = %d\n", new_mtu);
+	struct nfp_net_rx_ring *tmp_rings;
+	int err;
 
 	if (new_mtu < 68 || new_mtu > nn->max_mtu) {
 		nn_err(nn, "New MTU (%d) is not valid\n", new_mtu);
 		return -EINVAL;
 	}
 
-	netdev->mtu = new_mtu;
+	old_mtu = netdev->mtu;
+	old_fl_bufsz = nn->fl_bufsz;
+	new_fl_bufsz = NFP_NET_MAX_PREPEND + ETH_HLEN + VLAN_HLEN * 2 + new_mtu;
 
-	/* Freelist buffer size rounded up to the nearest 1K */
-	tmp = new_mtu + ETH_HLEN + VLAN_HLEN + NFP_NET_MAX_PREPEND;
-	nn->fl_bufsz = roundup(tmp, 1024);
-
-	/* restart if running */
-	if (netif_running(netdev)) {
-		nfp_net_netdev_close(netdev);
-		nfp_net_netdev_open(netdev);
+	if (!netif_running(netdev)) {
+		netdev->mtu = new_mtu;
+		nn->fl_bufsz = new_fl_bufsz;
+		return 0;
 	}
 
-	return 0;
+	/* Prepare new rings */
+	tmp_rings = nfp_net_shadow_rx_rings_prepare(nn, new_fl_bufsz,
+						    nn->rxd_cnt);
+	if (!tmp_rings)
+		return -ENOMEM;
+
+	/* Stop device, swap in new rings, try to start the firmware */
+	nfp_net_close_stack(nn);
+	nfp_net_clear_config_and_disable(nn);
+
+	tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings);
+
+	netdev->mtu = new_mtu;
+	nn->fl_bufsz = new_fl_bufsz;
+
+	err = nfp_net_set_config_and_enable(nn);
+	if (err) {
+		const int err_new = err;
+
+		/* Try with old configuration and old rings */
+		tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings);
+
+		netdev->mtu = old_mtu;
+		nn->fl_bufsz = old_fl_bufsz;
+
+		err = __nfp_net_set_config_and_enable(nn);
+		if (err)
+			nn_err(nn, "Can't restore MTU - FW communication failed (%d,%d)\n",
+			       err_new, err);
+	}
+
+	nfp_net_shadow_rx_rings_free(nn, tmp_rings);
+
+	nfp_net_open_stack(nn);
+
+	return err;
+}
+
+int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
+{
+	struct nfp_net_tx_ring *tx_rings = NULL;
+	struct nfp_net_rx_ring *rx_rings = NULL;
+	u32 old_rxd_cnt, old_txd_cnt;
+	int err;
+
+	if (!netif_running(nn->netdev)) {
+		nn->rxd_cnt = rxd_cnt;
+		nn->txd_cnt = txd_cnt;
+		return 0;
+	}
+
+	old_rxd_cnt = nn->rxd_cnt;
+	old_txd_cnt = nn->txd_cnt;
+
+	/* Prepare new rings */
+	if (nn->rxd_cnt != rxd_cnt) {
+		rx_rings = nfp_net_shadow_rx_rings_prepare(nn, nn->fl_bufsz,
+							   rxd_cnt);
+		if (!rx_rings)
+			return -ENOMEM;
+	}
+	if (nn->txd_cnt != txd_cnt) {
+		tx_rings = nfp_net_shadow_tx_rings_prepare(nn, txd_cnt);
+		if (!tx_rings) {
+			nfp_net_shadow_rx_rings_free(nn, rx_rings);
+			return -ENOMEM;
+		}
+	}
+
+	/* Stop device, swap in new rings, try to start the firmware */
+	nfp_net_close_stack(nn);
+	nfp_net_clear_config_and_disable(nn);
+
+	if (rx_rings)
+		rx_rings = nfp_net_shadow_rx_rings_swap(nn, rx_rings);
+	if (tx_rings)
+		tx_rings = nfp_net_shadow_tx_rings_swap(nn, tx_rings);
+
+	nn->rxd_cnt = rxd_cnt;
+	nn->txd_cnt = txd_cnt;
+
+	err = nfp_net_set_config_and_enable(nn);
+	if (err) {
+		const int err_new = err;
+
+		/* Try with old configuration and old rings */
+		if (rx_rings)
+			rx_rings = nfp_net_shadow_rx_rings_swap(nn, rx_rings);
+		if (tx_rings)
+			tx_rings = nfp_net_shadow_tx_rings_swap(nn, tx_rings);
+
+		nn->rxd_cnt = old_rxd_cnt;
+		nn->txd_cnt = old_txd_cnt;
+
+		err = __nfp_net_set_config_and_enable(nn);
+		if (err)
+			nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
+			       err_new, err);
+	}
+
+	nfp_net_shadow_rx_rings_free(nn, rx_rings);
+	nfp_net_shadow_tx_rings_free(nn, tx_rings);
+
+	nfp_net_open_stack(nn);
+
+	return err;
 }
 
 static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev,
@@ -2108,7 +2524,7 @@
 			  be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 |
 			  be16_to_cpu(nn->vxlan_ports[i]));
 
-	nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_VXLAN);
+	nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_VXLAN);
 }
 
 /**
@@ -2254,6 +2670,9 @@
 	spin_lock_init(&nn->reconfig_lock);
 	spin_lock_init(&nn->link_status_lock);
 
+	setup_timer(&nn->reconfig_timer,
+		    nfp_net_reconfig_timer, (unsigned long)nn);
+
 	return nn;
 }
 
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 8692003..ad6c4e3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -81,14 +81,10 @@
 
 /**
  * @NFP_NET_TXR_MAX:         Maximum number of TX rings
- * @NFP_NET_TXR_MASK:        Mask for TX rings
  * @NFP_NET_RXR_MAX:         Maximum number of RX rings
- * @NFP_NET_RXR_MASK:        Mask for RX rings
  */
 #define NFP_NET_TXR_MAX                 64
-#define NFP_NET_TXR_MASK                (NFP_NET_TXR_MAX - 1)
 #define NFP_NET_RXR_MAX                 64
-#define NFP_NET_RXR_MASK                (NFP_NET_RXR_MAX - 1)
 
 /**
  * Read/Write config words (0x0000 - 0x002c)
@@ -152,9 +148,9 @@
  * @NFP_NET_CFG_VERSION:     Firmware version number
  * @NFP_NET_CFG_STS:         Status
  * @NFP_NET_CFG_CAP:         Capabilities (same bits as @NFP_NET_CFG_CTRL)
- * @NFP_NET_MAX_TXRINGS:     Maximum number of TX rings
- * @NFP_NET_MAX_RXRINGS:     Maximum number of RX rings
- * @NFP_NET_MAX_MTU:         Maximum support MTU
+ * @NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings
+ * @NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings
+ * @NFP_NET_CFG_MAX_MTU:     Maximum support MTU
  * @NFP_NET_CFG_START_TXQ:   Start Queue Control Queue to use for TX (PF only)
  * @NFP_NET_CFG_START_RXQ:   Start Queue Control Queue to use for RX (PF only)
  *
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index 4c97c71..f7c9a5b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -40,8 +40,9 @@
 
 static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
 {
-	struct nfp_net_rx_ring *rx_ring = file->private;
 	int fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p, rxd_cnt;
+	struct nfp_net_r_vector *r_vec = file->private;
+	struct nfp_net_rx_ring *rx_ring;
 	struct nfp_net_rx_desc *rxd;
 	struct sk_buff *skb;
 	struct nfp_net *nn;
@@ -49,9 +50,10 @@
 
 	rtnl_lock();
 
-	if (!rx_ring->r_vec || !rx_ring->r_vec->nfp_net)
+	if (!r_vec->nfp_net || !r_vec->rx_ring)
 		goto out;
-	nn = rx_ring->r_vec->nfp_net;
+	nn = r_vec->nfp_net;
+	rx_ring = r_vec->rx_ring;
 	if (!netif_running(nn->netdev))
 		goto out;
 
@@ -115,7 +117,8 @@
 
 static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
 {
-	struct nfp_net_tx_ring *tx_ring = file->private;
+	struct nfp_net_r_vector *r_vec = file->private;
+	struct nfp_net_tx_ring *tx_ring;
 	struct nfp_net_tx_desc *txd;
 	int d_rd_p, d_wr_p, txd_cnt;
 	struct sk_buff *skb;
@@ -124,9 +127,10 @@
 
 	rtnl_lock();
 
-	if (!tx_ring->r_vec || !tx_ring->r_vec->nfp_net)
+	if (!r_vec->nfp_net || !r_vec->tx_ring)
 		goto out;
-	nn = tx_ring->r_vec->nfp_net;
+	nn = r_vec->nfp_net;
+	tx_ring = r_vec->tx_ring;
 	if (!netif_running(nn->netdev))
 		goto out;
 
@@ -183,7 +187,7 @@
 
 void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
 {
-	static struct dentry *queues, *tx, *rx;
+	struct dentry *queues, *tx, *rx;
 	char int_name[16];
 	int i;
 
@@ -196,7 +200,7 @@
 
 	/* Create queue debugging sub-tree */
 	queues = debugfs_create_dir("queue", nn->debugfs_dir);
-	if (IS_ERR_OR_NULL(nn->debugfs_dir))
+	if (IS_ERR_OR_NULL(queues))
 		return;
 
 	rx = debugfs_create_dir("rx", queues);
@@ -207,13 +211,13 @@
 	for (i = 0; i < nn->num_rx_rings; i++) {
 		sprintf(int_name, "%d", i);
 		debugfs_create_file(int_name, S_IRUSR, rx,
-				    &nn->rx_rings[i], &nfp_rx_q_fops);
+				    &nn->r_vecs[i], &nfp_rx_q_fops);
 	}
 
 	for (i = 0; i < nn->num_tx_rings; i++) {
 		sprintf(int_name, "%d", i);
 		debugfs_create_file(int_name, S_IRUSR, tx,
-				    &nn->tx_rings[i], &nfp_tx_q_fops);
+				    &nn->r_vecs[i], &nfp_tx_q_fops);
 	}
 }
 
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 9a4084a..ccfef1f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -153,37 +153,25 @@
 	struct nfp_net *nn = netdev_priv(netdev);
 	u32 rxd_cnt, txd_cnt;
 
-	if (netif_running(netdev)) {
-		/* Some NIC drivers allow reconfiguration on the fly,
-		 * some down the interface, change and then up it
-		 * again.  For now we don't allow changes when the
-		 * device is up.
-		 */
-		nn_warn(nn, "Can't change rings while device is up\n");
-		return -EBUSY;
-	}
-
 	/* We don't have separate queues/rings for small/large frames. */
 	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
 		return -EINVAL;
 
 	/* Round up to supported values */
 	rxd_cnt = roundup_pow_of_two(ring->rx_pending);
-	rxd_cnt = max_t(u32, rxd_cnt, NFP_NET_MIN_RX_DESCS);
-	rxd_cnt = min_t(u32, rxd_cnt, NFP_NET_MAX_RX_DESCS);
-
 	txd_cnt = roundup_pow_of_two(ring->tx_pending);
-	txd_cnt = max_t(u32, txd_cnt, NFP_NET_MIN_TX_DESCS);
-	txd_cnt = min_t(u32, txd_cnt, NFP_NET_MAX_TX_DESCS);
 
-	if (nn->rxd_cnt != rxd_cnt || nn->txd_cnt != txd_cnt)
-		nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
-		       nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt);
+	if (rxd_cnt < NFP_NET_MIN_RX_DESCS || rxd_cnt > NFP_NET_MAX_RX_DESCS ||
+	    txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS)
+		return -EINVAL;
 
-	nn->rxd_cnt = rxd_cnt;
-	nn->txd_cnt = txd_cnt;
+	if (nn->rxd_cnt == rxd_cnt && nn->txd_cnt == txd_cnt)
+		return 0;
 
-	return 0;
+	nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
+	       nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt);
+
+	return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
 }
 
 static void nfp_net_get_strings(struct net_device *netdev,
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index 9fbc302..adbc47f 100644
--- a/drivers/net/ethernet/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -313,7 +313,8 @@
 {
 	struct netx_eth_priv *priv = netdev_priv(ndev);
 	unsigned int mac4321, mac65;
-	int running, i;
+	int running, i, ret;
+	bool inv_mac_addr = false;
 
 	ndev->netdev_ops = &netx_eth_netdev_ops;
 	ndev->watchdog_timeo = msecs_to_jiffies(5000);
@@ -358,15 +359,18 @@
 	xc_start(priv->xc);
 
 	if (!is_valid_ether_addr(ndev->dev_addr))
-		printk("%s: Invalid ethernet MAC address.  Please "
-		       "set using ifconfig\n", ndev->name);
+		inv_mac_addr = true;
 
 	for (i=2; i<=18; i++)
 		pfifo_push(EMPTY_PTR_FIFO(priv->id),
 			FIFO_PTR_FRAMENO(i) | FIFO_PTR_SEGMENT(priv->id));
 
-	return register_netdev(ndev);
+	ret = register_netdev(ndev);
+	if (inv_mac_addr)
+		printk("%s: Invalid ethernet MAC address. Please set using ip\n",
+		       ndev->name);
 
+	return ret;
 }
 
 static int netx_eth_drv_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 52d9a94..87b7b81 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -476,7 +476,7 @@
 
 	w90p910_init_desc(dev);
 
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	ether->cur_tx = 0x0;
 	ether->finish_tx = 0x0;
 	ether->cur_rx = 0x0;
@@ -490,7 +490,7 @@
 	w90p910_trigger_tx(dev);
 	w90p910_trigger_rx(dev);
 
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 
 	if (netif_queue_stopped(dev))
 		netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 2a55d6d..8d710a3 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -481,7 +481,6 @@
 
 /**
  * struct pch_gbe_tx_ring - tx ring information
- * @tx_lock:	spinlock structs
  * @desc:	pointer to the descriptor ring memory
  * @dma:	physical address of the descriptor ring
  * @size:	length of descriptor ring in bytes
@@ -491,7 +490,6 @@
  * @buffer_info:	array of buffer information structs
  */
 struct pch_gbe_tx_ring {
-	spinlock_t tx_lock;
 	struct pch_gbe_tx_desc *desc;
 	dma_addr_t dma;
 	unsigned int size;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 3b98b263b..3cd87a4 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1640,7 +1640,7 @@
 		   cleaned_count);
 	if (cleaned_count > 0)  { /*skip this if nothing cleaned*/
 		/* Recover from running out of Tx resources in xmit_frame */
-		spin_lock(&tx_ring->tx_lock);
+		netif_tx_lock(adapter->netdev);
 		if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev))))
 		{
 			netif_wake_queue(adapter->netdev);
@@ -1652,7 +1652,7 @@
 
 		netdev_dbg(adapter->netdev, "next_to_clean : %d\n",
 			   tx_ring->next_to_clean);
-		spin_unlock(&tx_ring->tx_lock);
+		netif_tx_unlock(adapter->netdev);
 	}
 	return cleaned;
 }
@@ -1805,7 +1805,6 @@
 
 	tx_ring->next_to_use = 0;
 	tx_ring->next_to_clean = 0;
-	spin_lock_init(&tx_ring->tx_lock);
 
 	for (desNo = 0; desNo < tx_ring->count; desNo++) {
 		tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
@@ -2135,15 +2134,9 @@
 {
 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 	struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
-	unsigned long flags;
 
-	if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
-		/* Collision - tell upper layer to requeue */
-		return NETDEV_TX_LOCKED;
-	}
 	if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
 		netif_stop_queue(netdev);
-		spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
 		netdev_dbg(netdev,
 			   "Return : BUSY  next_to use : 0x%08x  next_to clean : 0x%08x\n",
 			   tx_ring->next_to_use, tx_ring->next_to_clean);
@@ -2152,7 +2145,6 @@
 
 	/* CRC,ITAG no support */
 	pch_gbe_tx_queue(adapter, tx_ring, skb);
-	spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
 	return NETDEV_TX_OK;
 }
 
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 13d88a6..91be2f0 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -1144,7 +1144,7 @@
 	hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
 
 	/* Trigger an immediate transmit demand. */
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	dev->stats.tx_errors++;
 
 	/* Restart the chip's Tx/Rx processes . */
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index fa2db41..fb1d103 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -714,7 +714,7 @@
 	if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
 		netif_wake_queue (dev);		/* Typical path */
 
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	dev->stats.tx_errors++;
 }
 
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index ddcfcab..680d8c7 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -98,9 +98,40 @@
 	---help---
 	  This enables the support for ...
 
+config QED_SRIOV
+	bool "QLogic QED 25/40/100Gb SR-IOV support"
+	depends on QED && PCI_IOV
+	default y
+	---help---
+	  This configuration parameter enables Single Root Input Output
+	  Virtualization support for QED devices.
+	  This allows for virtual function acceleration in virtualized
+	  environments.
+
 config QEDE
 	tristate "QLogic QED 25/40/100Gb Ethernet NIC"
 	depends on QED
 	---help---
 	  This enables the support for ...
+
+config QEDE_VXLAN
+	bool "Virtual eXtensible Local Area Network support"
+	default n
+	depends on QEDE && VXLAN && !(QEDE=y && VXLAN=m)
+	---help---
+	  This enables hardware offload support for VXLAN protocol over
+	  qede module. Say Y here if you want to enable hardware offload
+	  support for Virtual eXtensible Local Area Network (VXLAN)
+	  in the driver.
+
+config QEDE_GENEVE
+	bool "Generic Network Virtualization Encapsulation (GENEVE) support"
+	depends on QEDE && GENEVE && !(QEDE=y && GENEVE=m)
+	---help---
+	  This allows one to create GENEVE virtual interfaces that provide
+	  Layer 2 Networks over Layer 3 Networks. GENEVE is often used
+	  to tunnel virtual network infrastructure in virtualized environments.
+	  Say Y here if you want to enable hardware offload support for
+	  Generic Network Virtualization Encapsulation (GENEVE) in the driver.
+
 endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index db80eb1..2b10f1b 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -1015,20 +1015,24 @@
 {
 	int i, v, addr;
 	__le32 *ptr32;
+	int ret;
 
 	addr = base;
 	ptr32 = buf;
 	for (i = 0; i < size / sizeof(u32); i++) {
-		if (netxen_rom_fast_read(adapter, addr, &v) == -1)
-			return -1;
+		ret = netxen_rom_fast_read(adapter, addr, &v);
+		if (ret)
+			return ret;
+
 		*ptr32 = cpu_to_le32(v);
 		ptr32++;
 		addr += sizeof(u32);
 	}
 	if ((char *)buf + size > (char *)ptr32) {
 		__le32 local;
-		if (netxen_rom_fast_read(adapter, addr, &v) == -1)
-			return -1;
+		ret = netxen_rom_fast_read(adapter, addr, &v);
+		if (ret)
+			return ret;
 		local = cpu_to_le32(v);
 		memcpy(ptr32, &local, (char *)buf + size - (char *)ptr32);
 	}
@@ -1940,7 +1944,7 @@
 				if (adapter->phy_read &&
 				    adapter->phy_read(adapter,
 						      NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
-						      &autoneg) != 0)
+						      &autoneg) == 0)
 					adapter->link_autoneg = autoneg;
 			} else
 				goto link_down;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index fd362b6..7a0281a 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -852,7 +852,8 @@
 	ptr32 = (__le32 *)&serial_num;
 	offset = NX_FW_SERIAL_NUM_OFFSET;
 	for (i = 0; i < 8; i++) {
-		if (netxen_rom_fast_read(adapter, offset, &val) == -1) {
+		err = netxen_rom_fast_read(adapter, offset, &val);
+		if (err) {
 			dev_err(&pdev->dev, "error reading board info\n");
 			adapter->driver_mismatch = 1;
 			return;
@@ -2285,7 +2286,7 @@
 			goto request_reset;
 		}
 	}
-	adapter->netdev->trans_start = jiffies;
+	netif_trans_update(adapter->netdev);
 	rtnl_unlock();
 	return;
 
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index 5c2fd57..d1f157e 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -1,4 +1,6 @@
 obj-$(CONFIG_QED) := qed.o
 
 qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
-	 qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o
+	 qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
+	 qed_selftest.o qed_dcbx.o
+qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index fcb8e9b..1042f2a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -26,12 +26,14 @@
 #include "qed_hsi.h"
 
 extern const struct qed_common_ops qed_common_ops_pass;
-#define DRV_MODULE_VERSION "8.7.0.0"
+#define DRV_MODULE_VERSION "8.7.1.20"
 
 #define MAX_HWFNS_PER_DEVICE    (4)
 #define NAME_SIZE 16
 #define VER_SIZE 16
 
+#define QED_WFQ_UNIT	100
+
 /* cau states */
 enum qed_coalescing_mode {
 	QED_COAL_MODE_DISABLE,
@@ -74,6 +76,51 @@
 	bool	*b_valid;
 };
 
+enum qed_tunn_mode {
+	QED_MODE_L2GENEVE_TUNN,
+	QED_MODE_IPGENEVE_TUNN,
+	QED_MODE_L2GRE_TUNN,
+	QED_MODE_IPGRE_TUNN,
+	QED_MODE_VXLAN_TUNN,
+};
+
+enum qed_tunn_clss {
+	QED_TUNN_CLSS_MAC_VLAN,
+	QED_TUNN_CLSS_MAC_VNI,
+	QED_TUNN_CLSS_INNER_MAC_VLAN,
+	QED_TUNN_CLSS_INNER_MAC_VNI,
+	MAX_QED_TUNN_CLSS,
+};
+
+struct qed_tunn_start_params {
+	unsigned long	tunn_mode;
+	u16		vxlan_udp_port;
+	u16		geneve_udp_port;
+	u8		update_vxlan_udp_port;
+	u8		update_geneve_udp_port;
+	u8		tunn_clss_vxlan;
+	u8		tunn_clss_l2geneve;
+	u8		tunn_clss_ipgeneve;
+	u8		tunn_clss_l2gre;
+	u8		tunn_clss_ipgre;
+};
+
+struct qed_tunn_update_params {
+	unsigned long	tunn_mode_update_mask;
+	unsigned long	tunn_mode;
+	u16		vxlan_udp_port;
+	u16		geneve_udp_port;
+	u8		update_rx_pf_clss;
+	u8		update_tx_pf_clss;
+	u8		update_vxlan_udp_port;
+	u8		update_geneve_udp_port;
+	u8		tunn_clss_vxlan;
+	u8		tunn_clss_l2geneve;
+	u8		tunn_clss_ipgeneve;
+	u8		tunn_clss_l2gre;
+	u8		tunn_clss_ipgre;
+};
+
 /* The PCI personality is not quite synonymous to protocol ID:
  * 1. All personalities need CORE connections
  * 2. The Ethernet personality may support also the RoCE protocol
@@ -105,6 +152,7 @@
 
 enum QED_FEATURE {
 	QED_PF_L2_QUE,
+	QED_VF,
 	QED_MAX_FEATURES,
 };
 
@@ -192,6 +240,12 @@
 	struct dmae_cmd *p_dmae_cmd;
 };
 
+struct qed_wfq_data {
+	/* when feature is configured for at least 1 vport */
+	u32	min_speed;
+	bool	configured;
+};
+
 struct qed_qm_info {
 	struct init_qm_pq_params	*qm_pq_params;
 	struct init_qm_vport_params	*qm_vport_params;
@@ -212,6 +266,7 @@
 	bool				vport_wfq_en;
 	u8				pf_wfq;
 	u32				pf_rl;
+	struct qed_wfq_data		*wfq_data;
 };
 
 struct storm_stats {
@@ -256,6 +311,8 @@
 	bool				first_on_engine;
 	bool				hw_init_done;
 
+	u8				num_funcs_on_engine;
+
 	/* BAR access */
 	void __iomem			*regview;
 	void __iomem			*doorbells;
@@ -306,8 +363,12 @@
 	/* True if the driver requests for the link */
 	bool				b_drv_link_init;
 
+	struct qed_vf_iov		*vf_iov_info;
+	struct qed_pf_iov		*pf_iov_info;
 	struct qed_mcp_info		*mcp_info;
 
+	struct qed_dcbx_info		*p_dcbx_info;
+
 	struct qed_hw_cid_data		*p_tx_cids;
 	struct qed_hw_cid_data		*p_rx_cids;
 
@@ -322,6 +383,12 @@
 
 	struct qed_simd_fp_handler	simd_proto_handler[64];
 
+#ifdef CONFIG_QED_SRIOV
+	struct workqueue_struct *iov_wq;
+	struct delayed_work iov_task;
+	unsigned long iov_task_flags;
+#endif
+
 	struct z_stream_s		*stream;
 };
 
@@ -430,6 +497,13 @@
 	u8				num_hwfns;
 	struct qed_hwfn			hwfns[MAX_HWFNS_PER_DEVICE];
 
+	/* SRIOV */
+	struct qed_hw_sriov_info *p_iov_info;
+#define IS_QED_SRIOV(cdev)              (!!(cdev)->p_iov_info)
+
+	unsigned long			tunn_mode;
+
+	bool				b_is_vf;
 	u32				drv_type;
 
 	struct qed_eth_stats		*reset_stats;
@@ -459,6 +533,8 @@
 	const struct firmware		*firmware;
 };
 
+#define NUM_OF_VFS(dev)         MAX_NUM_VFS_BB
+#define NUM_OF_L2_QUEUES(dev)	MAX_NUM_L2_QUEUES_BB
 #define NUM_OF_SBS(dev)         MAX_SB_PER_PATH_BB
 #define NUM_OF_ENG_PFS(dev)     MAX_NUM_PFS_BB
 
@@ -480,6 +556,10 @@
 
 #define PURE_LB_TC 8
 
+int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
+void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate);
+
+void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
 #define QED_LEADING_HWFN(dev)   (&dev->hwfns[0])
 
 /* Other Linux specific common definitions */
@@ -507,6 +587,4 @@
 
 int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
 
-#define QED_ETH_INTERFACE_VERSION       300
-
 #endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index fc767c0..ac284c5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -24,11 +24,13 @@
 #include "qed_hw.h"
 #include "qed_init_ops.h"
 #include "qed_reg_addr.h"
+#include "qed_sriov.h"
 
 /* Max number of connection types in HW (DQ/CDU etc.) */
 #define MAX_CONN_TYPES		PROTOCOLID_COMMON
 #define NUM_TASK_TYPES		2
 #define NUM_TASK_PF_SEGMENTS	4
+#define NUM_TASK_VF_SEGMENTS	1
 
 /* QM constants */
 #define QM_PQ_ELEMENT_SIZE	4 /* in bytes */
@@ -63,10 +65,12 @@
 struct qed_conn_type_cfg {
 	u32 cid_count;
 	u32 cid_start;
+	u32 cids_per_vf;
 };
 
 /* ILT Client configuration, Per connection type (protocol) resources. */
 #define ILT_CLI_PF_BLOCKS	(1 + NUM_TASK_PF_SEGMENTS * 2)
+#define ILT_CLI_VF_BLOCKS       (1 + NUM_TASK_VF_SEGMENTS * 2)
 #define CDUC_BLK		(0)
 
 enum ilt_clients {
@@ -97,6 +101,10 @@
 	/* ILT client blocks for PF */
 	struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
 	u32 pf_total_lines;
+
+	/* ILT client blocks for VFs */
+	struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
+	u32 vf_total_lines;
 };
 
 /* Per Path -
@@ -123,6 +131,11 @@
 	/* computed ILT structure */
 	struct qed_ilt_client_cfg	clients[ILT_CLI_MAX];
 
+	/* total number of VFs for this hwfn -
+	 * ALL VFs are symmetric in terms of HW resources
+	 */
+	u32				vf_count;
+
 	/* Acquired CIDs */
 	struct qed_cid_acquired_map	acquired[MAX_CONN_TYPES];
 
@@ -131,37 +144,60 @@
 	u32				pf_start_line;
 };
 
-static u32 qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr)
+/* counts the iids for the CDU/CDUC ILT client configuration */
+struct qed_cdu_iids {
+	u32 pf_cids;
+	u32 per_vf_cids;
+};
+
+static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr,
+			     struct qed_cdu_iids *iids)
 {
-	u32 type, pf_cids = 0;
+	u32 type;
 
-	for (type = 0; type < MAX_CONN_TYPES; type++)
-		pf_cids += p_mngr->conn_cfg[type].cid_count;
-
-	return pf_cids;
+	for (type = 0; type < MAX_CONN_TYPES; type++) {
+		iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
+		iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
+	}
 }
 
 static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
 			    struct qed_qm_iids *iids)
 {
 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
-	int type;
+	u32 vf_cids = 0, type;
 
-	for (type = 0; type < MAX_CONN_TYPES; type++)
+	for (type = 0; type < MAX_CONN_TYPES; type++) {
 		iids->cids += p_mngr->conn_cfg[type].cid_count;
+		vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
+	}
 
-	DP_VERBOSE(p_hwfn, QED_MSG_ILT, "iids: CIDS %08x\n", iids->cids);
+	iids->vf_cids += vf_cids * p_mngr->vf_count;
+	DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+		   "iids: CIDS %08x vf_cids %08x\n",
+		   iids->cids, iids->vf_cids);
 }
 
 /* set the iids count per protocol */
 static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
 					enum protocol_type type,
-					u32 cid_count)
+					u32 cid_count, u32 vf_cid_cnt)
 {
 	struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
 	struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
 
 	p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
+	p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN);
+}
+
+u32 qed_cxt_get_proto_cid_count(struct qed_hwfn		*p_hwfn,
+				enum protocol_type	type,
+				u32			*vf_cid)
+{
+	if (vf_cid)
+		*vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
+
+	return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
 }
 
 static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
@@ -210,10 +246,12 @@
 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 	struct qed_ilt_client_cfg *p_cli;
 	struct qed_ilt_cli_blk *p_blk;
-	u32 curr_line, total, pf_cids;
+	struct qed_cdu_iids cdu_iids;
 	struct qed_qm_iids qm_iids;
+	u32 curr_line, total, i;
 
 	memset(&qm_iids, 0, sizeof(qm_iids));
+	memset(&cdu_iids, 0, sizeof(cdu_iids));
 
 	p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
 
@@ -224,14 +262,16 @@
 	/* CDUC */
 	p_cli = &p_mngr->clients[ILT_CLI_CDUC];
 	curr_line = p_mngr->pf_start_line;
+
+	/* CDUC PF */
 	p_cli->pf_total_lines = 0;
 
 	/* get the counters for the CDUC and QM clients  */
-	pf_cids = qed_cxt_cdu_iids(p_mngr);
+	qed_cxt_cdu_iids(p_mngr, &cdu_iids);
 
 	p_blk = &p_cli->pf_blks[CDUC_BLK];
 
-	total = pf_cids * CONN_CXT_SIZE(p_hwfn);
+	total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
 
 	qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
 			     total, CONN_CXT_SIZE(p_hwfn));
@@ -239,17 +279,36 @@
 	qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
 	p_cli->pf_total_lines = curr_line - p_blk->start_line;
 
+	/* CDUC VF */
+	p_blk = &p_cli->vf_blks[CDUC_BLK];
+	total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
+
+	qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+			     total, CONN_CXT_SIZE(p_hwfn));
+
+	qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
+	p_cli->vf_total_lines = curr_line - p_blk->start_line;
+
+	for (i = 1; i < p_mngr->vf_count; i++)
+		qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+				     ILT_CLI_CDUC);
+
 	/* QM */
 	p_cli = &p_mngr->clients[ILT_CLI_QM];
 	p_blk = &p_cli->pf_blks[0];
 
 	qed_cxt_qm_iids(p_hwfn, &qm_iids);
-	total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, 0, 0,
-				   p_hwfn->qm_info.num_pqs, 0);
+	total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
+				   qm_iids.vf_cids, 0,
+				   p_hwfn->qm_info.num_pqs,
+				   p_hwfn->qm_info.num_vf_pqs);
 
-	DP_VERBOSE(p_hwfn, QED_MSG_ILT,
-		   "QM ILT Info, (cids=%d, num_pqs=%d, memory_size=%d)\n",
-		   qm_iids.cids, p_hwfn->qm_info.num_pqs, total);
+	DP_VERBOSE(p_hwfn,
+		   QED_MSG_ILT,
+		   "QM ILT Info, (cids=%d, vf_cids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
+		   qm_iids.cids,
+		   qm_iids.vf_cids,
+		   p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
 
 	qed_ilt_cli_blk_fill(p_cli, p_blk,
 			     curr_line, total * 0x1000,
@@ -358,7 +417,7 @@
 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 	struct qed_ilt_client_cfg *clients = p_mngr->clients;
 	struct qed_ilt_cli_blk *p_blk;
-	u32 size, i, j;
+	u32 size, i, j, k;
 	int rc;
 
 	size = qed_cxt_ilt_shadow_size(clients);
@@ -383,6 +442,16 @@
 			if (rc != 0)
 				goto ilt_shadow_fail;
 		}
+		for (k = 0; k < p_mngr->vf_count; k++) {
+			for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
+				u32 lines = clients[i].vf_total_lines * k;
+
+				p_blk = &clients[i].vf_blks[j];
+				rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
+				if (rc != 0)
+					goto ilt_shadow_fail;
+			}
+		}
 	}
 
 	return 0;
@@ -467,6 +536,9 @@
 	for (i = 0; i < ILT_CLI_MAX; i++)
 		p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
 
+	if (p_hwfn->cdev->p_iov_info)
+		p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
+
 	/* Set the cxt mangr pointer priori to further allocations */
 	p_hwfn->p_cxt_mngr = p_mngr;
 
@@ -579,8 +651,10 @@
 	params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
 	params.is_first_pf = p_hwfn->first_on_engine;
 	params.num_pf_cids = iids.cids;
+	params.num_vf_cids = iids.vf_cids;
 	params.start_pq = qm_info->start_pq;
-	params.num_pf_pqs = qm_info->num_pqs;
+	params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
+	params.num_vf_pqs = qm_info->num_vf_pqs;
 	params.start_vport = qm_info->start_vport;
 	params.num_vports = qm_info->num_vports;
 	params.pf_wfq = qm_info->pf_wfq;
@@ -610,26 +684,55 @@
 static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
 {
 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
-	u32 dq_pf_max_cid = 0;
+	u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
 
 	dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
 	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
 
+	dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
+	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
+
 	dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
 	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
 
+	dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
+	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
+
 	dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
 	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
 
+	dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
+	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
+
 	dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
 	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
 
+	dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
+	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
+
 	dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
 	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
 
-	/* 5 - PF */
+	dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
+	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
+
 	dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
 	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
+
+	dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
+	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
+
+	/* Connection types 6 & 7 are not in use, yet they must be configured
+	 * as the highest possible connection. Not configuring them means the
+	 * defaults will be  used, and with a large number of cids a bug may
+	 * occur, if the defaults will be smaller than dq_pf_max_cid /
+	 * dq_vf_max_cid.
+	 */
+	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
+	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
+
+	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
+	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
 }
 
 static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
@@ -653,6 +756,38 @@
 	}
 }
 
+static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn)
+{
+	struct qed_ilt_client_cfg *p_cli;
+	u32 blk_factor;
+
+	/* For simplicty  we set the 'block' to be an ILT page */
+	if (p_hwfn->cdev->p_iov_info) {
+		struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+
+		STORE_RT_REG(p_hwfn,
+			     PSWRQ2_REG_VF_BASE_RT_OFFSET,
+			     p_iov->first_vf_in_pf);
+		STORE_RT_REG(p_hwfn,
+			     PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
+			     p_iov->first_vf_in_pf + p_iov->total_vfs);
+	}
+
+	p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+	blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+	if (p_cli->active) {
+		STORE_RT_REG(p_hwfn,
+			     PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
+			     blk_factor);
+		STORE_RT_REG(p_hwfn,
+			     PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+			     p_cli->pf_total_lines);
+		STORE_RT_REG(p_hwfn,
+			     PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
+			     p_cli->vf_total_lines);
+	}
+}
+
 /* ILT (PSWRQ2) PF */
 static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
 {
@@ -662,6 +797,7 @@
 	u32 line, rt_offst, i;
 
 	qed_ilt_bounds_init(p_hwfn);
+	qed_ilt_vf_bounds_init(p_hwfn);
 
 	p_mngr = p_hwfn->p_cxt_mngr;
 	p_shdw = p_mngr->ilt_shadow;
@@ -839,10 +975,10 @@
 	/* Set the number of required CORE connections */
 	u32 core_cids = 1; /* SPQ */
 
-	qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids);
+	qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
 
 	qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
-				    p_params->num_cons);
+				    p_params->num_cons, 1);
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index c8e1f5e..234c0fa 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -51,6 +51,9 @@
 	QED_ELEM_TASK
 };
 
+u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
+				enum protocol_type type, u32 *vf_cid);
+
 /**
  * @brief qed_cxt_set_pf_params - Set the PF params for cxt init
  *
@@ -128,6 +131,16 @@
 void qed_qm_init_pf(struct qed_hwfn *p_hwfn);
 
 /**
+ * @brief Reconfigures QM pf on the fly
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int
+ */
+int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
  * @brief qed_cxt_release - Release a cid
  *
  * @param p_hwfn
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
new file mode 100644
index 0000000..cbf58e1
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -0,0 +1,562 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dcbx.h"
+#include "qed_hsi.h"
+#include "qed_sp.h"
+
+#define QED_DCBX_MAX_MIB_READ_TRY       (100)
+#define QED_ETH_TYPE_DEFAULT            (0)
+#define QED_ETH_TYPE_ROCE               (0x8915)
+#define QED_UDP_PORT_TYPE_ROCE_V2       (0x12B7)
+#define QED_ETH_TYPE_FCOE               (0x8906)
+#define QED_TCP_PORT_ISCSI              (0xCBC)
+
+#define QED_DCBX_INVALID_PRIORITY       0xFF
+
+/* Get Traffic Class from priority traffic class table, 4 bits represent
+ * the traffic class corresponding to the priority.
+ */
+#define QED_DCBX_PRIO2TC(prio_tc_tbl, prio) \
+	((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7)
+
+static const struct qed_dcbx_app_metadata qed_dcbx_app_update[] = {
+	{DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_DEFAULT},
+	{DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_DEFAULT},
+	{DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_DEFAULT},
+	{DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_DEFAULT},
+	{DCBX_PROTOCOL_ETH, "ETH", QED_PCI_ETH}
+};
+
+static bool qed_dcbx_app_ethtype(u32 app_info_bitmap)
+{
+	return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+		  DCBX_APP_SF_ETHTYPE);
+}
+
+static bool qed_dcbx_app_port(u32 app_info_bitmap)
+{
+	return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+		  DCBX_APP_SF_PORT);
+}
+
+static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id)
+{
+	return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
+		  proto_id == QED_ETH_TYPE_DEFAULT);
+}
+
+static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id)
+{
+	return !!(qed_dcbx_app_port(app_info_bitmap) &&
+		  proto_id == QED_TCP_PORT_ISCSI);
+}
+
+static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id)
+{
+	return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
+		  proto_id == QED_ETH_TYPE_FCOE);
+}
+
+static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id)
+{
+	return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
+		  proto_id == QED_ETH_TYPE_ROCE);
+}
+
+static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id)
+{
+	return !!(qed_dcbx_app_port(app_info_bitmap) &&
+		  proto_id == QED_UDP_PORT_TYPE_ROCE_V2);
+}
+
+static void
+qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
+{
+	enum dcbx_protocol_type id;
+	int i;
+
+	DP_VERBOSE(p_hwfn, QED_MSG_DCB, "DCBX negotiated: %d\n",
+		   p_data->dcbx_enabled);
+
+	for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) {
+		id = qed_dcbx_app_update[i].id;
+
+		DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+			   "%s info: update %d, enable %d, prio %d, tc %d, num_tc %d\n",
+			   qed_dcbx_app_update[i].name, p_data->arr[id].update,
+			   p_data->arr[id].enable, p_data->arr[id].priority,
+			   p_data->arr[id].tc, p_hwfn->hw_info.num_tc);
+	}
+}
+
+static void
+qed_dcbx_set_params(struct qed_dcbx_results *p_data,
+		    struct qed_hw_info *p_info,
+		    bool enable,
+		    bool update,
+		    u8 prio,
+		    u8 tc,
+		    enum dcbx_protocol_type type,
+		    enum qed_pci_personality personality)
+{
+	/* PF update ramrod data */
+	p_data->arr[type].update = update;
+	p_data->arr[type].enable = enable;
+	p_data->arr[type].priority = prio;
+	p_data->arr[type].tc = tc;
+
+	/* QM reconf data */
+	if (p_info->personality == personality) {
+		if (personality == QED_PCI_ETH)
+			p_info->non_offload_tc = tc;
+		else
+			p_info->offload_tc = tc;
+	}
+}
+
+/* Update app protocol data and hw_info fields with the TLV info */
+static void
+qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
+			 struct qed_hwfn *p_hwfn,
+			 bool enable,
+			 bool update,
+			 u8 prio, u8 tc, enum dcbx_protocol_type type)
+{
+	struct qed_hw_info *p_info = &p_hwfn->hw_info;
+	enum qed_pci_personality personality;
+	enum dcbx_protocol_type id;
+	char *name;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) {
+		id = qed_dcbx_app_update[i].id;
+
+		if (type != id)
+			continue;
+
+		personality = qed_dcbx_app_update[i].personality;
+		name = qed_dcbx_app_update[i].name;
+
+		qed_dcbx_set_params(p_data, p_info, enable, update,
+				    prio, tc, type, personality);
+	}
+}
+
+static bool
+qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
+			       u32 app_prio_bitmap,
+			       u16 id, enum dcbx_protocol_type *type)
+{
+	if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id)) {
+		*type = DCBX_PROTOCOL_FCOE;
+	} else if (qed_dcbx_roce_tlv(app_prio_bitmap, id)) {
+		*type = DCBX_PROTOCOL_ROCE;
+	} else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id)) {
+		*type = DCBX_PROTOCOL_ISCSI;
+	} else if (qed_dcbx_default_tlv(app_prio_bitmap, id)) {
+		*type = DCBX_PROTOCOL_ETH;
+	} else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id)) {
+		*type = DCBX_PROTOCOL_ROCE_V2;
+	} else {
+		*type = DCBX_MAX_PROTOCOL_TYPE;
+		DP_ERR(p_hwfn,
+		       "No action required, App TLV id = 0x%x app_prio_bitmap = 0x%x\n",
+		       id, app_prio_bitmap);
+		return false;
+	}
+
+	return true;
+}
+
+/* Parse app TLV's to update TC information in hw_info structure for
+ * reconfiguring QM. Get protocol specific data for PF update ramrod command.
+ */
+static int
+qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
+		     struct qed_dcbx_results *p_data,
+		     struct dcbx_app_priority_entry *p_tbl,
+		     u32 pri_tc_tbl, int count, bool dcbx_enabled)
+{
+	u8 tc, priority, priority_map;
+	enum dcbx_protocol_type type;
+	u16 protocol_id;
+	bool enable;
+	int i;
+
+	DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count);
+
+	/* Parse APP TLV */
+	for (i = 0; i < count; i++) {
+		protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
+						DCBX_APP_PROTOCOL_ID);
+		priority_map = QED_MFW_GET_FIELD(p_tbl[i].entry,
+						 DCBX_APP_PRI_MAP);
+		priority = ffs(priority_map) - 1;
+		if (priority < 0) {
+			DP_ERR(p_hwfn, "Invalid priority\n");
+			return -EINVAL;
+		}
+
+		tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority);
+		if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
+						   protocol_id, &type)) {
+			/* ETH always have the enable bit reset, as it gets
+			 * vlan information per packet. For other protocols,
+			 * should be set according to the dcbx_enabled
+			 * indication, but we only got here if there was an
+			 * app tlv for the protocol, so dcbx must be enabled.
+			 */
+			enable = !!(type == DCBX_PROTOCOL_ETH);
+
+			qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
+						 priority, tc, type);
+		}
+	}
+
+	/* If RoCE-V2 TLV is not detected, driver need to use RoCE app
+	 * data for RoCE-v2 not the default app data.
+	 */
+	if (!p_data->arr[DCBX_PROTOCOL_ROCE_V2].update &&
+	    p_data->arr[DCBX_PROTOCOL_ROCE].update) {
+		tc = p_data->arr[DCBX_PROTOCOL_ROCE].tc;
+		priority = p_data->arr[DCBX_PROTOCOL_ROCE].priority;
+		qed_dcbx_update_app_info(p_data, p_hwfn, true, true,
+					 priority, tc, DCBX_PROTOCOL_ROCE_V2);
+	}
+
+	/* Update ramrod protocol data and hw_info fields
+	 * with default info when corresponding APP TLV's are not detected.
+	 * The enabled field has a different logic for ethernet as only for
+	 * ethernet dcb should disabled by default, as the information arrives
+	 * from the OS (unless an explicit app tlv was present).
+	 */
+	tc = p_data->arr[DCBX_PROTOCOL_ETH].tc;
+	priority = p_data->arr[DCBX_PROTOCOL_ETH].priority;
+	for (type = 0; type < DCBX_MAX_PROTOCOL_TYPE; type++) {
+		if (p_data->arr[type].update)
+			continue;
+
+		enable = (type == DCBX_PROTOCOL_ETH) ? false : dcbx_enabled;
+		qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
+					 priority, tc, type);
+	}
+
+	return 0;
+}
+
+/* Parse app TLV's to update TC information in hw_info structure for
+ * reconfiguring QM. Get protocol specific data for PF update ramrod command.
+ */
+static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
+{
+	struct dcbx_app_priority_feature *p_app;
+	struct dcbx_app_priority_entry *p_tbl;
+	struct qed_dcbx_results data = { 0 };
+	struct dcbx_ets_feature *p_ets;
+	struct qed_hw_info *p_info;
+	u32 pri_tc_tbl, flags;
+	bool dcbx_enabled;
+	int num_entries;
+	int rc = 0;
+
+	/* If DCBx version is non zero, then negotiation was
+	 * successfuly performed
+	 */
+	flags = p_hwfn->p_dcbx_info->operational.flags;
+	dcbx_enabled = !!QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION);
+
+	p_app = &p_hwfn->p_dcbx_info->operational.features.app;
+	p_tbl = p_app->app_pri_tbl;
+
+	p_ets = &p_hwfn->p_dcbx_info->operational.features.ets;
+	pri_tc_tbl = p_ets->pri_tc_tbl[0];
+
+	p_info = &p_hwfn->hw_info;
+	num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
+
+	rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
+				  num_entries, dcbx_enabled);
+	if (rc)
+		return rc;
+
+	p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
+	data.pf_id = p_hwfn->rel_pf_id;
+	data.dcbx_enabled = dcbx_enabled;
+
+	qed_dcbx_dp_protocol(p_hwfn, &data);
+
+	memcpy(&p_hwfn->p_dcbx_info->results, &data,
+	       sizeof(struct qed_dcbx_results));
+
+	return 0;
+}
+
+static int
+qed_dcbx_copy_mib(struct qed_hwfn *p_hwfn,
+		  struct qed_ptt *p_ptt,
+		  struct qed_dcbx_mib_meta_data *p_data,
+		  enum qed_mib_read_type type)
+{
+	u32 prefix_seq_num, suffix_seq_num;
+	int read_count = 0;
+	int rc = 0;
+
+	/* The data is considered to be valid only if both sequence numbers are
+	 * the same.
+	 */
+	do {
+		if (type == QED_DCBX_REMOTE_LLDP_MIB) {
+			qed_memcpy_from(p_hwfn, p_ptt, p_data->lldp_remote,
+					p_data->addr, p_data->size);
+			prefix_seq_num = p_data->lldp_remote->prefix_seq_num;
+			suffix_seq_num = p_data->lldp_remote->suffix_seq_num;
+		} else {
+			qed_memcpy_from(p_hwfn, p_ptt, p_data->mib,
+					p_data->addr, p_data->size);
+			prefix_seq_num = p_data->mib->prefix_seq_num;
+			suffix_seq_num = p_data->mib->suffix_seq_num;
+		}
+		read_count++;
+
+		DP_VERBOSE(p_hwfn,
+			   QED_MSG_DCB,
+			   "mib type = %d, try count = %d prefix seq num  = %d suffix seq num = %d\n",
+			   type, read_count, prefix_seq_num, suffix_seq_num);
+	} while ((prefix_seq_num != suffix_seq_num) &&
+		 (read_count < QED_DCBX_MAX_MIB_READ_TRY));
+
+	if (read_count >= QED_DCBX_MAX_MIB_READ_TRY) {
+		DP_ERR(p_hwfn,
+		       "MIB read err, mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n",
+		       type, read_count, prefix_seq_num, suffix_seq_num);
+		rc = -EIO;
+	}
+
+	return rc;
+}
+
+static int
+qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+	struct qed_dcbx_mib_meta_data data;
+	int rc = 0;
+
+	memset(&data, 0, sizeof(data));
+	data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
+							   lldp_config_params);
+	data.lldp_local = p_hwfn->p_dcbx_info->lldp_local;
+	data.size = sizeof(struct lldp_config_params_s);
+	qed_memcpy_from(p_hwfn, p_ptt, data.lldp_local, data.addr, data.size);
+
+	return rc;
+}
+
+static int
+qed_dcbx_read_remote_lldp_mib(struct qed_hwfn *p_hwfn,
+			      struct qed_ptt *p_ptt,
+			      enum qed_mib_read_type type)
+{
+	struct qed_dcbx_mib_meta_data data;
+	int rc = 0;
+
+	memset(&data, 0, sizeof(data));
+	data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
+							   lldp_status_params);
+	data.lldp_remote = p_hwfn->p_dcbx_info->lldp_remote;
+	data.size = sizeof(struct lldp_status_params_s);
+	rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+	return rc;
+}
+
+static int
+qed_dcbx_read_operational_mib(struct qed_hwfn *p_hwfn,
+			      struct qed_ptt *p_ptt,
+			      enum qed_mib_read_type type)
+{
+	struct qed_dcbx_mib_meta_data data;
+	int rc = 0;
+
+	memset(&data, 0, sizeof(data));
+	data.addr = p_hwfn->mcp_info->port_addr +
+		    offsetof(struct public_port, operational_dcbx_mib);
+	data.mib = &p_hwfn->p_dcbx_info->operational;
+	data.size = sizeof(struct dcbx_mib);
+	rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+	return rc;
+}
+
+static int
+qed_dcbx_read_remote_mib(struct qed_hwfn *p_hwfn,
+			 struct qed_ptt *p_ptt, enum qed_mib_read_type type)
+{
+	struct qed_dcbx_mib_meta_data data;
+	int rc = 0;
+
+	memset(&data, 0, sizeof(data));
+	data.addr = p_hwfn->mcp_info->port_addr +
+		    offsetof(struct public_port, remote_dcbx_mib);
+	data.mib = &p_hwfn->p_dcbx_info->remote;
+	data.size = sizeof(struct dcbx_mib);
+	rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+	return rc;
+}
+
+static int
+qed_dcbx_read_local_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+	struct qed_dcbx_mib_meta_data data;
+	int rc = 0;
+
+	memset(&data, 0, sizeof(data));
+	data.addr = p_hwfn->mcp_info->port_addr +
+		    offsetof(struct public_port, local_admin_dcbx_mib);
+	data.local_admin = &p_hwfn->p_dcbx_info->local_admin;
+	data.size = sizeof(struct dcbx_local_params);
+	qed_memcpy_from(p_hwfn, p_ptt, data.local_admin, data.addr, data.size);
+
+	return rc;
+}
+
+static int qed_dcbx_read_mib(struct qed_hwfn *p_hwfn,
+			     struct qed_ptt *p_ptt, enum qed_mib_read_type type)
+{
+	int rc = -EINVAL;
+
+	switch (type) {
+	case QED_DCBX_OPERATIONAL_MIB:
+		rc = qed_dcbx_read_operational_mib(p_hwfn, p_ptt, type);
+		break;
+	case QED_DCBX_REMOTE_MIB:
+		rc = qed_dcbx_read_remote_mib(p_hwfn, p_ptt, type);
+		break;
+	case QED_DCBX_LOCAL_MIB:
+		rc = qed_dcbx_read_local_mib(p_hwfn, p_ptt);
+		break;
+	case QED_DCBX_REMOTE_LLDP_MIB:
+		rc = qed_dcbx_read_remote_lldp_mib(p_hwfn, p_ptt, type);
+		break;
+	case QED_DCBX_LOCAL_LLDP_MIB:
+		rc = qed_dcbx_read_local_lldp_mib(p_hwfn, p_ptt);
+		break;
+	default:
+		DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
+	}
+
+	return rc;
+}
+
+/* Read updated MIB.
+ * Reconfigure QM and invoke PF update ramrod command if operational MIB
+ * change is detected.
+ */
+int
+qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn,
+			  struct qed_ptt *p_ptt, enum qed_mib_read_type type)
+{
+	int rc = 0;
+
+	rc = qed_dcbx_read_mib(p_hwfn, p_ptt, type);
+	if (rc)
+		return rc;
+
+	if (type == QED_DCBX_OPERATIONAL_MIB) {
+		rc = qed_dcbx_process_mib_info(p_hwfn);
+		if (!rc) {
+			/* reconfigure tcs of QM queues according
+			 * to negotiation results
+			 */
+			qed_qm_reconf(p_hwfn, p_ptt);
+
+			/* update storm FW with negotiation results */
+			qed_sp_pf_update(p_hwfn);
+		}
+	}
+
+	return rc;
+}
+
+int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn)
+{
+	int rc = 0;
+
+	p_hwfn->p_dcbx_info = kzalloc(sizeof(*p_hwfn->p_dcbx_info), GFP_KERNEL);
+	if (!p_hwfn->p_dcbx_info) {
+		DP_NOTICE(p_hwfn,
+			  "Failed to allocate 'struct qed_dcbx_info'\n");
+		rc = -ENOMEM;
+	}
+
+	return rc;
+}
+
+void qed_dcbx_info_free(struct qed_hwfn *p_hwfn,
+			struct qed_dcbx_info *p_dcbx_info)
+{
+	kfree(p_hwfn->p_dcbx_info);
+}
+
+static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
+					  struct qed_dcbx_results *p_src,
+					  enum dcbx_protocol_type type)
+{
+	p_data->dcb_enable_flag = p_src->arr[type].enable;
+	p_data->dcb_priority = p_src->arr[type].priority;
+	p_data->dcb_tc = p_src->arr[type].tc;
+}
+
+/* Set pf update ramrod command params */
+void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
+				   struct pf_update_ramrod_data *p_dest)
+{
+	struct protocol_dcb_data *p_dcb_data;
+	bool update_flag = false;
+
+	p_dest->pf_id = p_src->pf_id;
+
+	update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update;
+	p_dest->update_fcoe_dcb_data_flag = update_flag;
+
+	update_flag = p_src->arr[DCBX_PROTOCOL_ROCE].update;
+	p_dest->update_roce_dcb_data_flag = update_flag;
+	update_flag = p_src->arr[DCBX_PROTOCOL_ROCE_V2].update;
+	p_dest->update_roce_dcb_data_flag = update_flag;
+
+	update_flag = p_src->arr[DCBX_PROTOCOL_ISCSI].update;
+	p_dest->update_iscsi_dcb_data_flag = update_flag;
+	update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update;
+	p_dest->update_eth_dcb_data_flag = update_flag;
+
+	p_dcb_data = &p_dest->fcoe_dcb_data;
+	qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_FCOE);
+	p_dcb_data = &p_dest->roce_dcb_data;
+
+	if (p_src->arr[DCBX_PROTOCOL_ROCE].update)
+		qed_dcbx_update_protocol_data(p_dcb_data, p_src,
+					      DCBX_PROTOCOL_ROCE);
+	if (p_src->arr[DCBX_PROTOCOL_ROCE_V2].update)
+		qed_dcbx_update_protocol_data(p_dcb_data, p_src,
+					      DCBX_PROTOCOL_ROCE_V2);
+
+	p_dcb_data = &p_dest->iscsi_dcb_data;
+	qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ISCSI);
+	p_dcb_data = &p_dest->eth_dcb_data;
+	qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
new file mode 100644
index 0000000..e7f834d
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -0,0 +1,80 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_DCBX_H
+#define _QED_DCBX_H
+#include <linux/types.h>
+#include <linux/slab.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+
+#define DCBX_CONFIG_MAX_APP_PROTOCOL    4
+
+enum qed_mib_read_type {
+	QED_DCBX_OPERATIONAL_MIB,
+	QED_DCBX_REMOTE_MIB,
+	QED_DCBX_LOCAL_MIB,
+	QED_DCBX_REMOTE_LLDP_MIB,
+	QED_DCBX_LOCAL_LLDP_MIB
+};
+
+struct qed_dcbx_app_data {
+	bool enable;		/* DCB enabled */
+	bool update;		/* Update indication */
+	u8 priority;		/* Priority */
+	u8 tc;			/* Traffic Class */
+};
+
+struct qed_dcbx_results {
+	bool dcbx_enabled;
+	u8 pf_id;
+	struct qed_dcbx_app_data arr[DCBX_MAX_PROTOCOL_TYPE];
+};
+
+struct qed_dcbx_app_metadata {
+	enum dcbx_protocol_type id;
+	char *name;
+	enum qed_pci_personality personality;
+};
+
+#define QED_MFW_GET_FIELD(name, field) \
+	(((name) & (field ## _MASK)) >> (field ## _SHIFT))
+
+struct qed_dcbx_info {
+	struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
+	struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
+	struct dcbx_local_params local_admin;
+	struct qed_dcbx_results results;
+	struct dcbx_mib operational;
+	struct dcbx_mib remote;
+	u8 dcbx_cap;
+};
+
+struct qed_dcbx_mib_meta_data {
+	struct lldp_config_params_s *lldp_local;
+	struct lldp_status_params_s *lldp_remote;
+	struct dcbx_local_params *local_admin;
+	struct dcbx_mib *mib;
+	size_t size;
+	u32 addr;
+};
+
+/* QED local interface routines */
+int
+qed_dcbx_mib_update_event(struct qed_hwfn *,
+			  struct qed_ptt *, enum qed_mib_read_type);
+
+int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn);
+void qed_dcbx_info_free(struct qed_hwfn *, struct qed_dcbx_info *);
+void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
+				   struct pf_update_ramrod_data *p_dest);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index b7d100f..089016f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -22,6 +22,7 @@
 #include <linux/qed/qed_if.h>
 #include "qed.h"
 #include "qed_cxt.h"
+#include "qed_dcbx.h"
 #include "qed_dev_api.h"
 #include "qed_hsi.h"
 #include "qed_hw.h"
@@ -30,6 +31,11 @@
 #include "qed_mcp.h"
 #include "qed_reg_addr.h"
 #include "qed_sp.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
+
+static spinlock_t qm_lock;
+static bool qm_lock_init = false;
 
 /* API common to all protocols */
 enum BAR_ID {
@@ -40,10 +46,14 @@
 static u32 qed_hw_bar_size(struct qed_hwfn	*p_hwfn,
 			   enum BAR_ID		bar_id)
 {
-	u32	bar_reg = (bar_id == BAR_ID_0 ?
-			   PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
-	u32	val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+	u32 bar_reg = (bar_id == BAR_ID_0 ?
+		       PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
+	u32 val;
 
+	if (IS_VF(p_hwfn->cdev))
+		return 1 << 17;
+
+	val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
 	if (val)
 		return 1 << (val + 15);
 
@@ -105,12 +115,17 @@
 	qm_info->qm_vport_params = NULL;
 	kfree(qm_info->qm_port_params);
 	qm_info->qm_port_params = NULL;
+	kfree(qm_info->wfq_data);
+	qm_info->wfq_data = NULL;
 }
 
 void qed_resc_free(struct qed_dev *cdev)
 {
 	int i;
 
+	if (IS_VF(cdev))
+		return;
+
 	kfree(cdev->fw_data);
 	cdev->fw_data = NULL;
 
@@ -134,20 +149,27 @@
 		qed_eq_free(p_hwfn, p_hwfn->p_eq);
 		qed_consq_free(p_hwfn, p_hwfn->p_consq);
 		qed_int_free(p_hwfn);
+		qed_iov_free(p_hwfn);
 		qed_dmae_info_free(p_hwfn);
+		qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
 	}
 }
 
 static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
 {
+	u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
 	struct init_qm_port_params *p_qm_port;
-	u8 num_vports, i, vport_id, num_ports;
 	u16 num_pqs, multi_cos_tcs = 1;
+	u16 num_vfs = 0;
 
+#ifdef CONFIG_QED_SRIOV
+	if (p_hwfn->cdev->p_iov_info)
+		num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
+#endif
 	memset(qm_info, 0, sizeof(*qm_info));
 
-	num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */
+	num_pqs = multi_cos_tcs + num_vfs + 1;	/* The '1' is for pure-LB */
 	num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
 
 	/* Sanity checking that setup requires legal number of resources */
@@ -175,25 +197,50 @@
 	if (!qm_info->qm_port_params)
 		goto alloc_err;
 
+	qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data),
+				    GFP_KERNEL);
+	if (!qm_info->wfq_data)
+		goto alloc_err;
+
 	vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
 
 	/* First init per-TC PQs */
 	for (i = 0; i < multi_cos_tcs; i++) {
-		struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
+		struct init_qm_pq_params *params =
+		    &qm_info->qm_pq_params[curr_queue++];
 
-		params->vport_id = vport_id;
-		params->tc_id = p_hwfn->hw_info.non_offload_tc;
-		params->wrr_group = 1;
+		if (p_hwfn->hw_info.personality == QED_PCI_ETH) {
+			params->vport_id = vport_id;
+			params->tc_id = p_hwfn->hw_info.non_offload_tc;
+			params->wrr_group = 1;
+		} else {
+			params->vport_id = vport_id;
+			params->tc_id = p_hwfn->hw_info.offload_tc;
+			params->wrr_group = 1;
+		}
 	}
 
 	/* Then init pure-LB PQ */
-	qm_info->pure_lb_pq = i;
-	qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
-	qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
-	qm_info->qm_pq_params[i].wrr_group = 1;
-	i++;
+	qm_info->pure_lb_pq = curr_queue;
+	qm_info->qm_pq_params[curr_queue].vport_id =
+	    (u8) RESC_START(p_hwfn, QED_VPORT);
+	qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC;
+	qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+	curr_queue++;
 
 	qm_info->offload_pq = 0;
+	/* Then init per-VF PQs */
+	vf_offset = curr_queue;
+	for (i = 0; i < num_vfs; i++) {
+		/* First vport is used by the PF */
+		qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1;
+		qm_info->qm_pq_params[curr_queue].tc_id =
+		    p_hwfn->hw_info.non_offload_tc;
+		qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+		curr_queue++;
+	}
+
+	qm_info->vf_queues_offset = vf_offset;
 	qm_info->num_pqs = num_pqs;
 	qm_info->num_vports = num_vports;
 
@@ -211,29 +258,91 @@
 
 	qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
 
-	qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
+	qm_info->num_vf_pqs = num_vfs;
+	qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
+
+	for (i = 0; i < qm_info->num_vports; i++)
+		qm_info->qm_vport_params[i].vport_wfq = 1;
 
 	qm_info->pf_wfq = 0;
 	qm_info->pf_rl = 0;
 	qm_info->vport_rl_en = 1;
+	qm_info->vport_wfq_en = 1;
 
 	return 0;
 
 alloc_err:
 	DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
-	kfree(qm_info->qm_pq_params);
-	kfree(qm_info->qm_vport_params);
-	kfree(qm_info->qm_port_params);
-
+	qed_qm_info_free(p_hwfn);
 	return -ENOMEM;
 }
 
+/* This function reconfigures the QM pf on the fly.
+ * For this purpose we:
+ * 1. reconfigure the QM database
+ * 2. set new values to runtime arrat
+ * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
+ * 4. activate init tool in QM_PF stage
+ * 5. send an sdm_qm_cmd through rbc interface to release the QM
+ */
+int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+	bool b_rc;
+	int rc;
+
+	/* qm_info is allocated in qed_init_qm_info() which is already called
+	 * from qed_resc_alloc() or previous call of qed_qm_reconf().
+	 * The allocated size may change each init, so we free it before next
+	 * allocation.
+	 */
+	qed_qm_info_free(p_hwfn);
+
+	/* initialize qed's qm data structure */
+	rc = qed_init_qm_info(p_hwfn);
+	if (rc)
+		return rc;
+
+	/* stop PF's qm queues */
+	spin_lock_bh(&qm_lock);
+	b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
+				    qm_info->start_pq, qm_info->num_pqs);
+	spin_unlock_bh(&qm_lock);
+	if (!b_rc)
+		return -EINVAL;
+
+	/* clear the QM_PF runtime phase leftovers from previous init */
+	qed_init_clear_rt_data(p_hwfn);
+
+	/* prepare QM portion of runtime array */
+	qed_qm_init_pf(p_hwfn);
+
+	/* activate init tool on runtime array */
+	rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
+			  p_hwfn->hw_info.hw_mode);
+	if (rc)
+		return rc;
+
+	/* start PF's qm queues */
+	spin_lock_bh(&qm_lock);
+	b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, true, true,
+				    qm_info->start_pq, qm_info->num_pqs);
+	spin_unlock_bh(&qm_lock);
+	if (!b_rc)
+		return -EINVAL;
+
+	return 0;
+}
+
 int qed_resc_alloc(struct qed_dev *cdev)
 {
 	struct qed_consq *p_consq;
 	struct qed_eq *p_eq;
 	int i, rc = 0;
 
+	if (IS_VF(cdev))
+		return rc;
+
 	cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
 	if (!cdev->fw_data)
 		return -ENOMEM;
@@ -308,6 +417,10 @@
 		if (rc)
 			goto alloc_err;
 
+		rc = qed_iov_alloc(p_hwfn);
+		if (rc)
+			goto alloc_err;
+
 		/* EQ */
 		p_eq = qed_eq_alloc(p_hwfn, 256);
 		if (!p_eq) {
@@ -330,6 +443,14 @@
 				  "Failed to allocate memory for dmae_info structure\n");
 			goto alloc_err;
 		}
+
+		/* DCBX initialization */
+		rc = qed_dcbx_info_alloc(p_hwfn);
+		if (rc) {
+			DP_NOTICE(p_hwfn,
+				  "Failed to allocate memory for dcbx structure\n");
+			goto alloc_err;
+		}
 	}
 
 	cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
@@ -350,6 +471,9 @@
 {
 	int i;
 
+	if (IS_VF(cdev))
+		return;
+
 	for_each_hwfn(cdev, i) {
 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 
@@ -365,14 +489,15 @@
 		       p_hwfn->mcp_info->mfw_mb_length);
 
 		qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
+
+		qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
 	}
 }
 
 #define FINAL_CLEANUP_POLL_CNT          (100)
 #define FINAL_CLEANUP_POLL_TIME         (10)
 int qed_final_cleanup(struct qed_hwfn *p_hwfn,
-		      struct qed_ptt *p_ptt,
-		      u16 id)
+		      struct qed_ptt *p_ptt, u16 id, bool is_vf)
 {
 	u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
 	int rc = -EBUSY;
@@ -380,6 +505,9 @@
 	addr = GTT_BAR0_MAP_REG_USDM_RAM +
 		USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
 
+	if (is_vf)
+		id += 0x10;
+
 	command |= X_FINAL_CLEANUP_AGG_INT <<
 		SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
 	command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
@@ -492,7 +620,9 @@
 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
 	struct qed_qm_common_rt_init_params params;
 	struct qed_dev *cdev = p_hwfn->cdev;
+	u32 concrete_fid;
 	int rc = 0;
+	u8 vf_id;
 
 	qed_init_cau_rt_data(cdev);
 
@@ -542,6 +672,14 @@
 	qed_wr(p_hwfn, p_ptt, 0x20b4,
 	       qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
 
+	for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) {
+		concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
+		qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
+		qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
+	}
+	/* pretend to original PF */
+	qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+
 	return rc;
 }
 
@@ -558,6 +696,7 @@
 
 static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
 			  struct qed_ptt *p_ptt,
+			  struct qed_tunn_start_params *p_tunn,
 			  int hw_mode,
 			  bool b_hw_start,
 			  enum qed_int_mode int_mode,
@@ -574,7 +713,7 @@
 			p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
 
 		/* Update rate limit once we'll actually have a link */
-		p_hwfn->qm_info.pf_rl = 100;
+		p_hwfn->qm_info.pf_rl = 100000;
 	}
 
 	qed_cxt_hw_init_pf(p_hwfn);
@@ -603,7 +742,7 @@
 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
 
 	/* Cleanup chip from previous driver if such remains exist */
-	rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id);
+	rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
 	if (rc != 0)
 		return rc;
 
@@ -625,7 +764,8 @@
 		qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
 
 		/* send function start command */
-		rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode);
+		rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode,
+				     allow_npar_tx_switch);
 		if (rc)
 			DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
 	}
@@ -672,6 +812,7 @@
 }
 
 int qed_hw_init(struct qed_dev *cdev,
+		struct qed_tunn_start_params *p_tunn,
 		bool b_hw_start,
 		enum qed_int_mode int_mode,
 		bool allow_npar_tx_switch,
@@ -680,13 +821,20 @@
 	u32 load_code, param;
 	int rc, mfw_rc, i;
 
-	rc = qed_init_fw_data(cdev, bin_fw_data);
-	if (rc != 0)
-		return rc;
+	if (IS_PF(cdev)) {
+		rc = qed_init_fw_data(cdev, bin_fw_data);
+		if (rc != 0)
+			return rc;
+	}
 
 	for_each_hwfn(cdev, i) {
 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 
+		if (IS_VF(cdev)) {
+			p_hwfn->b_int_enabled = 1;
+			continue;
+		}
+
 		/* Enable DMAE in PXP */
 		rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
 
@@ -708,6 +856,11 @@
 		p_hwfn->first_on_engine = (load_code ==
 					   FW_MSG_CODE_DRV_LOAD_ENGINE);
 
+		if (!qm_lock_init) {
+			spin_lock_init(&qm_lock);
+			qm_lock_init = true;
+		}
+
 		switch (load_code) {
 		case FW_MSG_CODE_DRV_LOAD_ENGINE:
 			rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
@@ -724,7 +877,7 @@
 		/* Fall into */
 		case FW_MSG_CODE_DRV_LOAD_FUNCTION:
 			rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
-					    p_hwfn->hw_info.hw_mode,
+					    p_tunn, p_hwfn->hw_info.hw_mode,
 					    b_hw_start, int_mode,
 					    allow_npar_tx_switch);
 			break;
@@ -749,6 +902,20 @@
 			return mfw_rc;
 		}
 
+		/* send DCBX attention request command */
+		DP_VERBOSE(p_hwfn,
+			   QED_MSG_DCB,
+			   "sending phony dcbx set command to trigger DCBx attention handling\n");
+		mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+				     DRV_MSG_CODE_SET_DCBX,
+				     1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
+				     &load_code, &param);
+		if (mfw_rc) {
+			DP_NOTICE(p_hwfn,
+				  "Failed to send DCBX attention request\n");
+			return mfw_rc;
+		}
+
 		p_hwfn->hw_init_done = true;
 	}
 
@@ -811,6 +978,11 @@
 
 		DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
 
+		if (IS_VF(cdev)) {
+			qed_vf_pf_int_cleanup(p_hwfn);
+			continue;
+		}
+
 		/* mark the hw as uninitialized... */
 		p_hwfn->hw_init_done = false;
 
@@ -842,15 +1014,16 @@
 		usleep_range(1000, 2000);
 	}
 
-	/* Disable DMAE in PXP - in CMT, this should only be done for
-	 * first hw-function, and only after all transactions have
-	 * stopped for all active hw-functions.
-	 */
-	t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
-				   cdev->hwfns[0].p_main_ptt,
-				   false);
-	if (t_rc != 0)
-		rc = t_rc;
+	if (IS_PF(cdev)) {
+		/* Disable DMAE in PXP - in CMT, this should only be done for
+		 * first hw-function, and only after all transactions have
+		 * stopped for all active hw-functions.
+		 */
+		t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
+					   cdev->hwfns[0].p_main_ptt, false);
+		if (t_rc != 0)
+			rc = t_rc;
+	}
 
 	return rc;
 }
@@ -861,7 +1034,12 @@
 
 	for_each_hwfn(cdev, j) {
 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
-		struct qed_ptt *p_ptt   = p_hwfn->p_main_ptt;
+		struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+		if (IS_VF(cdev)) {
+			qed_vf_pf_int_cleanup(p_hwfn);
+			continue;
+		}
 
 		DP_VERBOSE(p_hwfn,
 			   NETIF_MSG_IFDOWN,
@@ -885,6 +1063,9 @@
 
 void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
 {
+	if (IS_VF(p_hwfn->cdev))
+		return;
+
 	/* Re-open incoming traffic */
 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
 	       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
@@ -914,6 +1095,13 @@
 	for_each_hwfn(cdev, i) {
 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 
+		if (IS_VF(cdev)) {
+			rc = qed_vf_pf_reset(p_hwfn);
+			if (rc)
+				return rc;
+			continue;
+		}
+
 		DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
 
 		/* Check for incorrect states */
@@ -1009,13 +1197,19 @@
 static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
 {
 	u32 *resc_start = p_hwfn->hw_info.resc_start;
+	u8 num_funcs = p_hwfn->num_funcs_on_engine;
 	u32 *resc_num = p_hwfn->hw_info.resc_num;
 	struct qed_sb_cnt_info sb_cnt_info;
-	int num_funcs, i;
-
-	num_funcs = MAX_NUM_PFS_BB;
+	int i, max_vf_vlan_filters;
 
 	memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+
+#ifdef CONFIG_QED_SRIOV
+	max_vf_vlan_filters = QED_ETH_MAX_VF_NUM_VLAN_FILTERS;
+#else
+	max_vf_vlan_filters = 0;
+#endif
+
 	qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
 
 	resc_num[QED_SB] = min_t(u32,
@@ -1220,6 +1414,51 @@
 	return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
 }
 
+static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+	u32 reg_function_hide, tmp, eng_mask;
+	u8 num_funcs;
+
+	num_funcs = MAX_NUM_PFS_BB;
+
+	/* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
+	 * in the other bits are selected.
+	 * Bits 1-15 are for functions 1-15, respectively, and their value is
+	 * '0' only for enabled functions (function 0 always exists and
+	 * enabled).
+	 * In case of CMT, only the "even" functions are enabled, and thus the
+	 * number of functions for both hwfns is learnt from the same bits.
+	 */
+	reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
+
+	if (reg_function_hide & 0x1) {
+		if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) {
+			num_funcs = 0;
+			eng_mask = 0xaaaa;
+		} else {
+			num_funcs = 1;
+			eng_mask = 0x5554;
+		}
+
+		/* Get the number of the enabled functions on the engine */
+		tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
+		while (tmp) {
+			if (tmp & 0x1)
+				num_funcs++;
+			tmp >>= 0x1;
+		}
+	}
+
+	p_hwfn->num_funcs_on_engine = num_funcs;
+
+	DP_VERBOSE(p_hwfn,
+		   NETIF_MSG_PROBE,
+		   "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n",
+		   p_hwfn->rel_pf_id,
+		   p_hwfn->abs_pf_id,
+		   p_hwfn->num_funcs_on_engine);
+}
+
 static int
 qed_get_hw_info(struct qed_hwfn *p_hwfn,
 		struct qed_ptt *p_ptt,
@@ -1228,6 +1467,13 @@
 	u32 port_mode;
 	int rc;
 
+	/* Since all information is common, only first hwfns should do this */
+	if (IS_LEAD_HWFN(p_hwfn)) {
+		rc = qed_iov_hw_info(p_hwfn);
+		if (rc)
+			return rc;
+	}
+
 	/* Read the port mode */
 	port_mode = qed_rd(p_hwfn, p_ptt,
 			   CNIG_REG_NW_PORT_MODE_BB_B0);
@@ -1271,6 +1517,8 @@
 		p_hwfn->hw_info.personality = protocol;
 	}
 
+	qed_get_num_funcs(p_hwfn, p_ptt);
+
 	qed_hw_get_resc(p_hwfn);
 
 	return rc;
@@ -1336,6 +1584,9 @@
 	p_hwfn->regview = p_regview;
 	p_hwfn->doorbells = p_doorbells;
 
+	if (IS_VF(p_hwfn->cdev))
+		return qed_vf_hw_prepare(p_hwfn);
+
 	/* Validate that chip access is feasible */
 	if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
 		DP_ERR(p_hwfn,
@@ -1387,6 +1638,8 @@
 
 	return rc;
 err2:
+	if (IS_LEAD_HWFN(p_hwfn))
+		qed_iov_free_hw_info(p_hwfn->cdev);
 	qed_mcp_free(p_hwfn);
 err1:
 	qed_hw_hwfn_free(p_hwfn);
@@ -1401,7 +1654,8 @@
 	int rc;
 
 	/* Store the precompiled init data ptrs */
-	qed_init_iro_array(cdev);
+	if (IS_PF(cdev))
+		qed_init_iro_array(cdev);
 
 	/* Initialize the first hwfn - will learn number of hwfns */
 	rc = qed_hw_prepare_single(p_hwfn,
@@ -1433,9 +1687,11 @@
 		 * initiliazed hwfn 0.
 		 */
 		if (rc) {
-			qed_init_free(p_hwfn);
-			qed_mcp_free(p_hwfn);
-			qed_hw_hwfn_free(p_hwfn);
+			if (IS_PF(cdev)) {
+				qed_init_free(p_hwfn);
+				qed_mcp_free(p_hwfn);
+				qed_hw_hwfn_free(p_hwfn);
+			}
 		}
 	}
 
@@ -1449,10 +1705,17 @@
 	for_each_hwfn(cdev, i) {
 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 
+		if (IS_VF(cdev)) {
+			qed_vf_pf_release(p_hwfn);
+			continue;
+		}
+
 		qed_init_free(p_hwfn);
 		qed_hw_hwfn_free(p_hwfn);
 		qed_mcp_free(p_hwfn);
 	}
+
+	qed_iov_free_hw_info(cdev);
 }
 
 int qed_chain_alloc(struct qed_dev *cdev,
@@ -1593,3 +1856,388 @@
 
 	return 0;
 }
+
+/* Calculate final WFQ values for all vports and configure them.
+ * After this configuration each vport will have
+ * approx min rate =  min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
+ */
+static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
+					     struct qed_ptt *p_ptt,
+					     u32 min_pf_rate)
+{
+	struct init_qm_vport_params *vport_params;
+	int i;
+
+	vport_params = p_hwfn->qm_info.qm_vport_params;
+
+	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+		u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+
+		vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) /
+						min_pf_rate;
+		qed_init_vport_wfq(p_hwfn, p_ptt,
+				   vport_params[i].first_tx_pq_id,
+				   vport_params[i].vport_wfq);
+	}
+}
+
+static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
+				       u32 min_pf_rate)
+
+{
+	int i;
+
+	for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
+		p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
+}
+
+static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
+					   struct qed_ptt *p_ptt,
+					   u32 min_pf_rate)
+{
+	struct init_qm_vport_params *vport_params;
+	int i;
+
+	vport_params = p_hwfn->qm_info.qm_vport_params;
+
+	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+		qed_init_wfq_default_param(p_hwfn, min_pf_rate);
+		qed_init_vport_wfq(p_hwfn, p_ptt,
+				   vport_params[i].first_tx_pq_id,
+				   vport_params[i].vport_wfq);
+	}
+}
+
+/* This function performs several validations for WFQ
+ * configuration and required min rate for a given vport
+ * 1. req_rate must be greater than one percent of min_pf_rate.
+ * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
+ *    rates to get less than one percent of min_pf_rate.
+ * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
+ */
+static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
+			      u16 vport_id, u32 req_rate,
+			      u32 min_pf_rate)
+{
+	u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
+	int non_requested_count = 0, req_count = 0, i, num_vports;
+
+	num_vports = p_hwfn->qm_info.num_vports;
+
+	/* Accounting for the vports which are configured for WFQ explicitly */
+	for (i = 0; i < num_vports; i++) {
+		u32 tmp_speed;
+
+		if ((i != vport_id) &&
+		    p_hwfn->qm_info.wfq_data[i].configured) {
+			req_count++;
+			tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+			total_req_min_rate += tmp_speed;
+		}
+	}
+
+	/* Include current vport data as well */
+	req_count++;
+	total_req_min_rate += req_rate;
+	non_requested_count = num_vports - req_count;
+
+	if (req_rate < min_pf_rate / QED_WFQ_UNIT) {
+		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+			   "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
+			   vport_id, req_rate, min_pf_rate);
+		return -EINVAL;
+	}
+
+	if (num_vports > QED_WFQ_UNIT) {
+		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+			   "Number of vports is greater than %d\n",
+			   QED_WFQ_UNIT);
+		return -EINVAL;
+	}
+
+	if (total_req_min_rate > min_pf_rate) {
+		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+			   "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
+			   total_req_min_rate, min_pf_rate);
+		return -EINVAL;
+	}
+
+	total_left_rate	= min_pf_rate - total_req_min_rate;
+
+	left_rate_per_vp = total_left_rate / non_requested_count;
+	if (left_rate_per_vp <  min_pf_rate / QED_WFQ_UNIT) {
+		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+			   "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
+			   left_rate_per_vp, min_pf_rate);
+		return -EINVAL;
+	}
+
+	p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
+	p_hwfn->qm_info.wfq_data[vport_id].configured = true;
+
+	for (i = 0; i < num_vports; i++) {
+		if (p_hwfn->qm_info.wfq_data[i].configured)
+			continue;
+
+		p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
+	}
+
+	return 0;
+}
+
+static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn,
+				     struct qed_ptt *p_ptt, u16 vp_id, u32 rate)
+{
+	struct qed_mcp_link_state *p_link;
+	int rc = 0;
+
+	p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output;
+
+	if (!p_link->min_pf_rate) {
+		p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
+		p_hwfn->qm_info.wfq_data[vp_id].configured = true;
+		return rc;
+	}
+
+	rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
+
+	if (rc == 0)
+		qed_configure_wfq_for_all_vports(p_hwfn, p_ptt,
+						 p_link->min_pf_rate);
+	else
+		DP_NOTICE(p_hwfn,
+			  "Validation failed while configuring min rate\n");
+
+	return rc;
+}
+
+static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn,
+						 struct qed_ptt *p_ptt,
+						 u32 min_pf_rate)
+{
+	bool use_wfq = false;
+	int rc = 0;
+	u16 i;
+
+	/* Validate all pre configured vports for wfq */
+	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+		u32 rate;
+
+		if (!p_hwfn->qm_info.wfq_data[i].configured)
+			continue;
+
+		rate = p_hwfn->qm_info.wfq_data[i].min_speed;
+		use_wfq = true;
+
+		rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
+		if (rc) {
+			DP_NOTICE(p_hwfn,
+				  "WFQ validation failed while configuring min rate\n");
+			break;
+		}
+	}
+
+	if (!rc && use_wfq)
+		qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+	else
+		qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+
+	return rc;
+}
+
+/* Main API for qed clients to configure vport min rate.
+ * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
+ * rate - Speed in Mbps needs to be assigned to a given vport.
+ */
+int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate)
+{
+	int i, rc = -EINVAL;
+
+	/* Currently not supported; Might change in future */
+	if (cdev->num_hwfns > 1) {
+		DP_NOTICE(cdev,
+			  "WFQ configuration is not supported for this device\n");
+		return rc;
+	}
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+		struct qed_ptt *p_ptt;
+
+		p_ptt = qed_ptt_acquire(p_hwfn);
+		if (!p_ptt)
+			return -EBUSY;
+
+		rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
+
+		if (!rc) {
+			qed_ptt_release(p_hwfn, p_ptt);
+			return rc;
+		}
+
+		qed_ptt_release(p_hwfn, p_ptt);
+	}
+
+	return rc;
+}
+
+/* API to configure WFQ from mcp link change */
+void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
+{
+	int i;
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+		__qed_configure_vp_wfq_on_link_change(p_hwfn,
+						      p_hwfn->p_dpc_ptt,
+						      min_pf_rate);
+	}
+}
+
+int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
+				     struct qed_ptt *p_ptt,
+				     struct qed_mcp_link_state *p_link,
+				     u8 max_bw)
+{
+	int rc = 0;
+
+	p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
+
+	if (!p_link->line_speed && (max_bw != 100))
+		return rc;
+
+	p_link->speed = (p_link->line_speed * max_bw) / 100;
+	p_hwfn->qm_info.pf_rl = p_link->speed;
+
+	/* Since the limiter also affects Tx-switched traffic, we don't want it
+	 * to limit such traffic in case there's no actual limit.
+	 * In that case, set limit to imaginary high boundary.
+	 */
+	if (max_bw == 100)
+		p_hwfn->qm_info.pf_rl = 100000;
+
+	rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+			    p_hwfn->qm_info.pf_rl);
+
+	DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+		   "Configured MAX bandwidth to be %08x Mb/sec\n",
+		   p_link->speed);
+
+	return rc;
+}
+
+/* Main API to configure PF max bandwidth where bw range is [1 - 100] */
+int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw)
+{
+	int i, rc = -EINVAL;
+
+	if (max_bw < 1 || max_bw > 100) {
+		DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n");
+		return rc;
+	}
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn	*p_hwfn = &cdev->hwfns[i];
+		struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
+		struct qed_mcp_link_state *p_link;
+		struct qed_ptt *p_ptt;
+
+		p_link = &p_lead->mcp_info->link_output;
+
+		p_ptt = qed_ptt_acquire(p_hwfn);
+		if (!p_ptt)
+			return -EBUSY;
+
+		rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt,
+						      p_link, max_bw);
+
+		qed_ptt_release(p_hwfn, p_ptt);
+
+		if (rc)
+			break;
+	}
+
+	return rc;
+}
+
+int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
+				     struct qed_ptt *p_ptt,
+				     struct qed_mcp_link_state *p_link,
+				     u8 min_bw)
+{
+	int rc = 0;
+
+	p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
+	p_hwfn->qm_info.pf_wfq = min_bw;
+
+	if (!p_link->line_speed)
+		return rc;
+
+	p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
+
+	rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
+
+	DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+		   "Configured MIN bandwidth to be %d Mb/sec\n",
+		   p_link->min_pf_rate);
+
+	return rc;
+}
+
+/* Main API to configure PF min bandwidth where bw range is [1-100] */
+int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw)
+{
+	int i, rc = -EINVAL;
+
+	if (min_bw < 1 || min_bw > 100) {
+		DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n");
+		return rc;
+	}
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+		struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
+		struct qed_mcp_link_state *p_link;
+		struct qed_ptt *p_ptt;
+
+		p_link = &p_lead->mcp_info->link_output;
+
+		p_ptt = qed_ptt_acquire(p_hwfn);
+		if (!p_ptt)
+			return -EBUSY;
+
+		rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt,
+						      p_link, min_bw);
+		if (rc) {
+			qed_ptt_release(p_hwfn, p_ptt);
+			return rc;
+		}
+
+		if (p_link->min_pf_rate) {
+			u32 min_rate = p_link->min_pf_rate;
+
+			rc = __qed_configure_vp_wfq_on_link_change(p_hwfn,
+								   p_ptt,
+								   min_rate);
+		}
+
+		qed_ptt_release(p_hwfn, p_ptt);
+	}
+
+	return rc;
+}
+
+void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+	struct qed_mcp_link_state *p_link;
+
+	p_link = &p_hwfn->mcp_info->link_output;
+
+	if (p_link->min_pf_rate)
+		qed_disable_wfq_for_all_vports(p_hwfn, p_ptt,
+					       p_link->min_pf_rate);
+
+	memset(p_hwfn->qm_info.wfq_data, 0,
+	       sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index d6c7ddf..dde364d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -62,6 +62,7 @@
  * @brief qed_hw_init -
  *
  * @param cdev
+ * @param p_tunn
  * @param b_hw_start
  * @param int_mode - interrupt mode [msix, inta, etc.] to use.
  * @param allow_npar_tx_switch - npar tx switching to be used
@@ -72,6 +73,7 @@
  * @return int
  */
 int qed_hw_init(struct qed_dev *cdev,
+		struct qed_tunn_start_params *p_tunn,
 		bool b_hw_start,
 		enum qed_int_mode int_mode,
 		bool allow_npar_tx_switch,
@@ -180,11 +182,15 @@
  * used mostly to write a zeroed buffer to destination address
  * using DMA
  */
-#define QED_DMAE_FLAG_RW_REPL_SRC       0x00000001
-#define QED_DMAE_FLAG_COMPLETION_DST    0x00000008
+#define QED_DMAE_FLAG_RW_REPL_SRC	0x00000001
+#define QED_DMAE_FLAG_VF_SRC		0x00000002
+#define QED_DMAE_FLAG_VF_DST		0x00000004
+#define QED_DMAE_FLAG_COMPLETION_DST	0x00000008
 
 struct qed_dmae_params {
-	u32	flags; /* consists of QED_DMAE_FLAG_* values */
+	u32 flags; /* consists of QED_DMAE_FLAG_* values */
+	u8 src_vfid;
+	u8 dst_vfid;
 };
 
 /**
@@ -207,6 +213,23 @@
 		  u32 flags);
 
 /**
+ * @brief qed_dmae_host2host - copy data from to source address
+ * to a destination adress (for SRIOV) using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_addr
+ * @param dest_addr
+ * @param size_in_dwords
+ * @param params
+ */
+int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
+		       struct qed_ptt *p_ptt,
+		       dma_addr_t source_addr,
+		       dma_addr_t dest_addr,
+		       u32 size_in_dwords, struct qed_dmae_params *p_params);
+
+/**
  * @brief qed_chain_alloc - Allocate and initialize a chain
  *
  * @param p_hwfn
@@ -280,11 +303,11 @@
  * @param p_hwfn
  * @param p_ptt
  * @param id - For PF, engine-relative. For VF, PF-relative.
+ * @param is_vf - true iff cleanup is made for a VF.
  *
  * @return int
  */
 int qed_final_cleanup(struct qed_hwfn *p_hwfn,
-		      struct qed_ptt *p_ptt,
-		      u16 id);
+		      struct qed_ptt *p_ptt, u16 id, bool is_vf);
 
 #endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index a368f5e..9afc15f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -29,9 +29,9 @@
 enum common_event_opcode {
 	COMMON_EVENT_PF_START,
 	COMMON_EVENT_PF_STOP,
-	COMMON_EVENT_RESERVED,
-	COMMON_EVENT_RESERVED2,
-	COMMON_EVENT_RESERVED3,
+	COMMON_EVENT_VF_START,
+	COMMON_EVENT_VF_STOP,
+	COMMON_EVENT_VF_PF_CHANNEL,
 	COMMON_EVENT_RESERVED4,
 	COMMON_EVENT_RESERVED5,
 	COMMON_EVENT_RESERVED6,
@@ -44,9 +44,9 @@
 	COMMON_RAMROD_UNUSED,
 	COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
 	COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
-	COMMON_RAMROD_RESERVED,
-	COMMON_RAMROD_RESERVED2,
-	COMMON_RAMROD_RESERVED3,
+	COMMON_RAMROD_VF_START,
+	COMMON_RAMROD_VF_STOP,
+	COMMON_RAMROD_PF_UPDATE,
 	COMMON_RAMROD_EMPTY,
 	MAX_COMMON_RAMROD_CMD_ID
 };
@@ -573,6 +573,14 @@
 	struct event_ring_next_addr	next_addr;
 };
 
+struct mstorm_non_trigger_vf_zone {
+	struct eth_mstorm_per_queue_stat eth_queue_stat;
+};
+
+struct mstorm_vf_zone {
+	struct mstorm_non_trigger_vf_zone non_trigger;
+};
+
 enum personality_type {
 	BAD_PERSONALITY_TYP,
 	PERSONALITY_RESERVED,
@@ -626,6 +634,59 @@
 	u8				reserved0[4];
 };
 
+/* Data for port update ramrod */
+struct protocol_dcb_data {
+	u8 dcb_enable_flag;
+	u8 dcb_priority;
+	u8 dcb_tc;
+	u8 reserved;
+};
+
+/* tunnel configuration */
+struct pf_update_tunnel_config {
+	u8	update_rx_pf_clss;
+	u8	update_tx_pf_clss;
+	u8	set_vxlan_udp_port_flg;
+	u8	set_geneve_udp_port_flg;
+	u8	tx_enable_vxlan;
+	u8	tx_enable_l2geneve;
+	u8	tx_enable_ipgeneve;
+	u8	tx_enable_l2gre;
+	u8	tx_enable_ipgre;
+	u8	tunnel_clss_vxlan;
+	u8	tunnel_clss_l2geneve;
+	u8	tunnel_clss_ipgeneve;
+	u8	tunnel_clss_l2gre;
+	u8	tunnel_clss_ipgre;
+	__le16	vxlan_udp_port;
+	__le16	geneve_udp_port;
+	__le16	reserved[3];
+};
+
+struct pf_update_ramrod_data {
+	u8 pf_id;
+	u8 update_eth_dcb_data_flag;
+	u8 update_fcoe_dcb_data_flag;
+	u8 update_iscsi_dcb_data_flag;
+	u8 update_roce_dcb_data_flag;
+	u8 update_mf_vlan_flag;
+	__le16 mf_vlan;
+	struct protocol_dcb_data eth_dcb_data;
+	struct protocol_dcb_data fcoe_dcb_data;
+	struct protocol_dcb_data iscsi_dcb_data;
+	struct protocol_dcb_data roce_dcb_data;
+	struct pf_update_tunnel_config	tunnel_config;
+};
+
+/* Tunnel classification scheme */
+enum tunnel_clss {
+	TUNNEL_CLSS_MAC_VLAN = 0,
+	TUNNEL_CLSS_MAC_VNI,
+	TUNNEL_CLSS_INNER_MAC_VLAN,
+	TUNNEL_CLSS_INNER_MAC_VNI,
+	MAX_TUNNEL_CLSS
+};
+
 enum ports_mode {
 	ENGX2_PORTX1 /* 2 engines x 1 port */,
 	ENGX2_PORTX2 /* 2 engines x 2 ports */,
@@ -635,6 +696,16 @@
 	MAX_PORTS_MODE
 };
 
+struct pstorm_non_trigger_vf_zone {
+	struct eth_pstorm_per_queue_stat eth_queue_stat;
+	struct regpair reserved[2];
+};
+
+struct pstorm_vf_zone {
+	struct pstorm_non_trigger_vf_zone non_trigger;
+	struct regpair reserved[7];
+};
+
 /* Ramrod Header of SPQE */
 struct ramrod_header {
 	__le32	cid /* Slowpath Connection CID */;
@@ -664,6 +735,36 @@
 	struct regpair	preroce_irregular_pkt;
 };
 
+struct ustorm_non_trigger_vf_zone {
+	struct eth_ustorm_per_queue_stat eth_queue_stat;
+	struct regpair vf_pf_msg_addr;
+};
+
+struct ustorm_trigger_vf_zone {
+	u8 vf_pf_msg_valid;
+	u8 reserved[7];
+};
+
+struct ustorm_vf_zone {
+	struct ustorm_non_trigger_vf_zone non_trigger;
+	struct ustorm_trigger_vf_zone trigger;
+};
+
+struct vf_start_ramrod_data {
+	u8 vf_id;
+	u8 enable_flr_ack;
+	__le16 opaque_fid;
+	u8 personality;
+	u8 reserved[3];
+};
+
+struct vf_stop_ramrod_data {
+	u8 vf_id;
+	u8 reserved0;
+	__le16 reserved1;
+	__le32 reserved2;
+};
+
 struct atten_status_block {
 	__le32	atten_bits;
 	__le32	atten_ack;
@@ -990,7 +1091,7 @@
 	PHASE_ENGINE,
 	PHASE_PORT,
 	PHASE_PF,
-	PHASE_RESERVED,
+	PHASE_VF,
 	PHASE_QM_PF,
 	MAX_INIT_PHASES
 };
@@ -1603,6 +1704,19 @@
 			  u16			start_pq,
 			  u16			num_pqs);
 
+void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
+			     struct qed_ptt  *p_ptt, u16 dest_port);
+void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
+			  struct qed_ptt *p_ptt, bool vxlan_enable);
+void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
+			struct qed_ptt  *p_ptt, bool eth_gre_enable,
+			bool ip_gre_enable);
+void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
+			      struct qed_ptt *p_ptt, u16 dest_port);
+void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
+			   struct qed_ptt *p_ptt, bool eth_geneve_enable,
+			   bool ip_geneve_enable);
+
 /* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
 #define YSTORM_FLOW_CONTROL_MODE_OFFSET  (IRO[0].base)
 #define YSTORM_FLOW_CONTROL_MODE_SIZE    (IRO[0].size)
@@ -3788,7 +3902,7 @@
 
 #define DRV_MSG_CODE_SET_LLDP                   0x24000000
 #define DRV_MSG_CODE_SET_DCBX                   0x25000000
-
+#define DRV_MSG_CODE_BW_UPDATE_ACK		0x32000000
 #define DRV_MSG_CODE_NIG_DRAIN                  0x30000000
 
 #define DRV_MSG_CODE_INITIATE_FLR               0x02000000
@@ -3808,6 +3922,7 @@
 #define DRV_MSG_CODE_PHY_CORE_WRITE             0x000e0000
 #define DRV_MSG_CODE_SET_VERSION                0x000f0000
 
+#define DRV_MSG_CODE_BIST_TEST                  0x001e0000
 #define DRV_MSG_CODE_SET_LED_MODE               0x00200000
 
 #define DRV_MSG_SEQ_NUMBER_MASK                 0x0000ffff
@@ -3865,6 +3980,18 @@
 #define DRV_MB_PARAM_SET_LED_MODE_ON            0x1
 #define DRV_MB_PARAM_SET_LED_MODE_OFF           0x2
 
+#define DRV_MB_PARAM_BIST_UNKNOWN_TEST          0
+#define DRV_MB_PARAM_BIST_REGISTER_TEST         1
+#define DRV_MB_PARAM_BIST_CLOCK_TEST            2
+
+#define DRV_MB_PARAM_BIST_RC_UNKNOWN            0
+#define DRV_MB_PARAM_BIST_RC_PASSED             1
+#define DRV_MB_PARAM_BIST_RC_FAILED             2
+#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER          3
+
+#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT      0
+#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK       0x000000FF
+
 	u32 fw_mb_header;
 #define FW_MSG_CODE_MASK                        0xffff0000
 #define FW_MSG_CODE_DRV_LOAD_ENGINE             0x10100000
@@ -5067,4 +5194,8 @@
 	struct hw_set_info	hw_sets[1];
 };
 
+int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+		    u8 pf_id, u16 pf_wfq);
+int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+		       u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq);
 #endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index a95a3e4..0ada7fd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -23,6 +23,7 @@
 #include "qed_hsi.h"
 #include "qed_hw.h"
 #include "qed_reg_addr.h"
+#include "qed_sriov.h"
 
 #define QED_BAR_ACQUIRE_TIMEOUT 1000
 
@@ -236,8 +237,12 @@
 		quota = min_t(size_t, n - done,
 			      PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
 
-		qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
-		hw_offset = qed_ptt_get_bar_addr(p_ptt);
+		if (IS_PF(p_hwfn->cdev)) {
+			qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
+			hw_offset = qed_ptt_get_bar_addr(p_ptt);
+		} else {
+			hw_offset = hw_addr + done;
+		}
 
 		dw_count = quota / 4;
 		host_addr = (u32 *)((u8 *)addr + done);
@@ -338,14 +343,25 @@
 	       *(u32 *)&p_ptt->pxp.pretend);
 }
 
+u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
+{
+	u32 concrete_fid = 0;
+
+	SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
+	SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
+	SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
+
+	return concrete_fid;
+}
+
 /* DMAE */
 static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
 			    const u8 is_src_type_grc,
 			    const u8 is_dst_type_grc,
 			    struct qed_dmae_params *p_params)
 {
+	u16 opcode_b = 0;
 	u32 opcode = 0;
-	u16 opcodeB = 0;
 
 	/* Whether the source is the PCIe or the GRC.
 	 * 0- The source is the PCIe
@@ -387,14 +403,24 @@
 	opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
 		   DMAE_CMD_DST_ADDR_RESET_SHIFT);
 
-	opcodeB |= (DMAE_CMD_SRC_VF_ID_MASK <<
-		    DMAE_CMD_SRC_VF_ID_SHIFT);
+	/* SRC/DST VFID: all 1's - pf, otherwise VF id */
+	if (p_params->flags & QED_DMAE_FLAG_VF_SRC) {
+		opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT;
+		opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT;
+	} else {
+		opcode_b |= DMAE_CMD_SRC_VF_ID_MASK <<
+			    DMAE_CMD_SRC_VF_ID_SHIFT;
+	}
 
-	opcodeB |= (DMAE_CMD_DST_VF_ID_MASK <<
-		    DMAE_CMD_DST_VF_ID_SHIFT);
+	if (p_params->flags & QED_DMAE_FLAG_VF_DST) {
+		opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
+		opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
+	} else {
+		opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
+	}
 
 	p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
-	p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcodeB);
+	p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcode_b);
 }
 
 u32 qed_dmae_idx_to_go_cmd(u8 idx)
@@ -742,6 +768,28 @@
 	return rc;
 }
 
+int
+qed_dmae_host2host(struct qed_hwfn *p_hwfn,
+		   struct qed_ptt *p_ptt,
+		   dma_addr_t source_addr,
+		   dma_addr_t dest_addr,
+		   u32 size_in_dwords, struct qed_dmae_params *p_params)
+{
+	int rc;
+
+	mutex_lock(&(p_hwfn->dmae_info.mutex));
+
+	rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
+				      dest_addr,
+				      QED_DMAE_ADDRESS_HOST_PHYS,
+				      QED_DMAE_ADDRESS_HOST_PHYS,
+				      size_in_dwords, p_params);
+
+	mutex_unlock(&(p_hwfn->dmae_info.mutex));
+
+	return rc;
+}
+
 u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
 		  enum protocol_type proto,
 		  union qed_qm_pq_params *p_params)
@@ -765,6 +813,9 @@
 		break;
 	case PROTOCOLID_ETH:
 		pq_id = p_params->eth.tc;
+		if (p_params->eth.is_vf)
+			pq_id += p_hwfn->qm_info.vf_queues_offset +
+				 p_params->eth.vf_id;
 		break;
 	default:
 		pq_id = 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index e56d433..4367363 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -221,6 +221,16 @@
 			struct qed_ptt *p_ptt);
 
 /**
+ * @brief qed_vfid_to_concrete - build a concrete FID for a
+ *        given VF ID
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfid
+ */
+u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid);
+
+/**
  * @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd
  * this is declared here since other files will require it.
  * @param idx
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index f55ebdc..e8a3b9d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -712,6 +712,21 @@
 	return 0;
 }
 
+int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
+		    struct qed_ptt *p_ptt,
+		    u8 pf_id, u16 pf_wfq)
+{
+	u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
+
+	if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+		DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
+		return -1;
+	}
+
+	qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
+	return 0;
+}
+
 int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
 		   struct qed_ptt *p_ptt,
 		   u8 pf_id,
@@ -732,6 +747,31 @@
 	return 0;
 }
 
+int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
+		       struct qed_ptt *p_ptt,
+		       u16 first_tx_pq_id[NUM_OF_TCS],
+		       u16 vport_wfq)
+{
+	u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
+	u8 tc;
+
+	if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+		DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration");
+		return -1;
+	}
+
+	for (tc = 0; tc < NUM_OF_TCS; tc++) {
+		u16 vport_pq_id = first_tx_pq_id[tc];
+
+		if (vport_pq_id != QM_INVALID_PQ_ID)
+			qed_wr(p_hwfn, p_ptt,
+			       QM_REG_WFQVPWEIGHT + vport_pq_id * 4,
+			       inc_val);
+	}
+
+	return 0;
+}
+
 int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
 		      struct qed_ptt *p_ptt,
 		      u8 vport_id,
@@ -788,3 +828,130 @@
 
 	return true;
 }
+
+static void
+qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
+{
+	if (enable)
+		set_bit(bit, var);
+	else
+		clear_bit(bit, var);
+}
+
+#define PRS_ETH_TUNN_FIC_FORMAT	-188897008
+
+void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
+			     struct qed_ptt *p_ptt,
+			     u16 dest_port)
+{
+	qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
+	qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_PORT, dest_port);
+	qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
+}
+
+void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
+			  struct qed_ptt *p_ptt,
+			  bool vxlan_enable)
+{
+	unsigned long reg_val = 0;
+	u8 shift;
+
+	reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+	shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
+	qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
+
+	qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+
+	if (reg_val)
+		qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+		       PRS_ETH_TUNN_FIC_FORMAT);
+
+	reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
+	shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
+	qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
+
+	qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
+
+	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
+	       vxlan_enable ? 1 : 0);
+}
+
+void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+			bool eth_gre_enable, bool ip_gre_enable)
+{
+	unsigned long reg_val = 0;
+	u8 shift;
+
+	reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+	shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
+	qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
+
+	shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
+	qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
+	qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+	if (reg_val)
+		qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+		       PRS_ETH_TUNN_FIC_FORMAT);
+
+	reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
+	shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
+	qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
+
+	shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
+	qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
+	qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
+
+	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
+	       eth_gre_enable ? 1 : 0);
+	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
+	       ip_gre_enable ? 1 : 0);
+}
+
+void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
+			      struct qed_ptt *p_ptt,
+			      u16 dest_port)
+{
+	qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
+	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
+	qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
+}
+
+void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
+			   struct qed_ptt *p_ptt,
+			   bool eth_geneve_enable,
+			   bool ip_geneve_enable)
+{
+	unsigned long reg_val = 0;
+	u8 shift;
+
+	reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+	shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
+	qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_geneve_enable);
+
+	shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
+	qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_geneve_enable);
+
+	qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+	if (reg_val)
+		qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+		       PRS_ETH_TUNN_FIC_FORMAT);
+
+	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
+	       eth_geneve_enable ? 1 : 0);
+	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
+
+	/* comp ver */
+	reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
+	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
+	qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
+	qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
+
+	/* EDPM with geneve tunnel not supported in BB_B0 */
+	if (QED_IS_BB_B0(p_hwfn->cdev))
+		return;
+
+	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
+	       eth_geneve_enable ? 1 : 0);
+	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
+	       ip_geneve_enable ? 1 : 0);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 3269b36..d358c3b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -18,6 +18,7 @@
 #include "qed_hw.h"
 #include "qed_init_ops.h"
 #include "qed_reg_addr.h"
+#include "qed_sriov.h"
 
 #define QED_INIT_MAX_POLL_COUNT 100
 #define QED_INIT_POLL_PERIOD_US 500
@@ -128,6 +129,9 @@
 {
 	struct qed_rt_data *rt_data = &p_hwfn->rt_data;
 
+	if (IS_VF(p_hwfn->cdev))
+		return 0;
+
 	rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE,
 				   GFP_KERNEL);
 	if (!rt_data->b_valid)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index ffd0acc..09a6ad3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -26,6 +26,8 @@
 #include "qed_mcp.h"
 #include "qed_reg_addr.h"
 #include "qed_sp.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
 
 struct qed_pi_info {
 	qed_int_comp_cb_t	comp_cb;
@@ -2513,6 +2515,9 @@
 	u32 sb_offset;
 	u32 pi_offset;
 
+	if (IS_VF(p_hwfn->cdev))
+		return;
+
 	sb_offset = igu_sb_id * PIS_PER_SB;
 	memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
 
@@ -2542,8 +2547,9 @@
 	sb_info->sb_ack = 0;
 	memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
 
-	qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
-			    sb_info->igu_sb_id, 0, 0);
+	if (IS_PF(p_hwfn->cdev))
+		qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
+				    sb_info->igu_sb_id, 0, 0);
 }
 
 /**
@@ -2563,8 +2569,10 @@
 	/* Assuming continuous set of IGU SBs dedicated for given PF */
 	if (sb_id == QED_SP_SB_ID)
 		igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
-	else
+	else if (IS_PF(p_hwfn->cdev))
 		igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
+	else
+		igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
 
 	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n",
 		   (sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id);
@@ -2594,9 +2602,16 @@
 	/* The igu address will hold the absolute address that needs to be
 	 * written to for a specific status block
 	 */
-	sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
-					  GTT_BAR0_MAP_REG_IGU_CMD +
-					  (sb_info->igu_sb_id << 3);
+	if (IS_PF(p_hwfn->cdev)) {
+		sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
+						  GTT_BAR0_MAP_REG_IGU_CMD +
+						  (sb_info->igu_sb_id << 3);
+	} else {
+		sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
+						  PXP_VF_BAR0_START_IGU +
+						  ((IGU_CMD_INT_ACK_BASE +
+						    sb_info->igu_sb_id) << 3);
+	}
 
 	sb_info->flags |= QED_SB_INFO_INIT;
 
@@ -2750,7 +2765,7 @@
 int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
 		       enum qed_int_mode int_mode)
 {
-	int rc;
+	int rc = 0;
 
 	/* Configure AEU signal change to produce attentions */
 	qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
@@ -2783,24 +2798,20 @@
 {
 	p_hwfn->b_int_enabled = 0;
 
+	if (IS_VF(p_hwfn->cdev))
+		return;
+
 	qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
 }
 
 #define IGU_CLEANUP_SLEEP_LENGTH                (1000)
-void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
-			    struct qed_ptt *p_ptt,
-			    u32 sb_id,
-			    bool cleanup_set,
-			    u16 opaque_fid
-			    )
+static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
+				   struct qed_ptt *p_ptt,
+				   u32 sb_id, bool cleanup_set, u16 opaque_fid)
 {
+	u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
 	u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
 	u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
-	u32 data = 0;
-	u32 cmd_ctrl = 0;
-	u32 val = 0;
-	u32 sb_bit = 0;
-	u32 sb_bit_addr = 0;
 
 	/* Set the data field */
 	SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
@@ -2845,11 +2856,9 @@
 
 void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
 				     struct qed_ptt *p_ptt,
-				     u32 sb_id,
-				     u16 opaque,
-				     bool b_set)
+				     u32 sb_id, u16 opaque, bool b_set)
 {
-	int pi;
+	int pi, i;
 
 	/* Set */
 	if (b_set)
@@ -2858,6 +2867,22 @@
 	/* Clear */
 	qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
 
+	/* Wait for the IGU SB to cleanup */
+	for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
+		u32 val;
+
+		val = qed_rd(p_hwfn, p_ptt,
+			     IGU_REG_WRITE_DONE_PENDING + ((sb_id / 32) * 4));
+		if (val & (1 << (sb_id % 32)))
+			usleep_range(10, 20);
+		else
+			break;
+	}
+	if (i == IGU_CLEANUP_SLEEP_LENGTH)
+		DP_NOTICE(p_hwfn,
+			  "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
+			  sb_id);
+
 	/* Clear the CAU for the SB */
 	for (pi = 0; pi < 12; pi++)
 		qed_wr(p_hwfn, p_ptt,
@@ -2866,13 +2891,11 @@
 
 void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
 			      struct qed_ptt *p_ptt,
-			      bool b_set,
-			      bool b_slowpath)
+			      bool b_set, bool b_slowpath)
 {
 	u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
 	u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
-	u32 sb_id = 0;
-	u32 val = 0;
+	u32 sb_id = 0, val = 0;
 
 	val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
 	val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
@@ -2888,14 +2911,14 @@
 						p_hwfn->hw_info.opaque_fid,
 						b_set);
 
-	if (b_slowpath) {
-		sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
-		DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
-			   "IGU cleaning slowpath SB [%d]\n", sb_id);
-		qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
-						p_hwfn->hw_info.opaque_fid,
-						b_set);
-	}
+	if (!b_slowpath)
+		return;
+
+	sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+		   "IGU cleaning slowpath SB [%d]\n", sb_id);
+	qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+					p_hwfn->hw_info.opaque_fid, b_set);
 }
 
 static u32 qed_int_igu_read_cam_block(struct qed_hwfn	*p_hwfn,
@@ -2935,9 +2958,9 @@
 			 struct qed_ptt *p_ptt)
 {
 	struct qed_igu_info *p_igu_info;
+	u32 val, min_vf = 0, max_vf = 0;
+	u16 sb_id, last_iov_sb_id = 0;
 	struct qed_igu_block *blk;
-	u32 val;
-	u16 sb_id;
 	u16 prev_sb_id = 0xFF;
 
 	p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
@@ -2947,12 +2970,19 @@
 
 	p_igu_info = p_hwfn->hw_info.p_igu_info;
 
-	/* Initialize base sb / sb cnt for PFs */
+	/* Initialize base sb / sb cnt for PFs and VFs */
 	p_igu_info->igu_base_sb		= 0xffff;
 	p_igu_info->igu_sb_cnt		= 0;
 	p_igu_info->igu_dsb_id		= 0xffff;
 	p_igu_info->igu_base_sb_iov	= 0xffff;
 
+	if (p_hwfn->cdev->p_iov_info) {
+		struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+
+		min_vf	= p_iov->first_vf_in_pf;
+		max_vf	= p_iov->first_vf_in_pf + p_iov->total_vfs;
+	}
+
 	for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
 	     sb_id++) {
 		blk = &p_igu_info->igu_map.igu_blocks[sb_id];
@@ -2986,14 +3016,43 @@
 					(p_igu_info->igu_sb_cnt)++;
 				}
 			}
+		} else {
+			if ((blk->function_id >= min_vf) &&
+			    (blk->function_id < max_vf)) {
+				/* Available for VFs of this PF */
+				if (p_igu_info->igu_base_sb_iov == 0xffff) {
+					p_igu_info->igu_base_sb_iov = sb_id;
+				} else if (last_iov_sb_id != sb_id - 1) {
+					if (!val) {
+						DP_VERBOSE(p_hwfn->cdev,
+							   NETIF_MSG_INTR,
+							   "First uninitialized IGU CAM entry at index 0x%04x\n",
+							   sb_id);
+					} else {
+						DP_NOTICE(p_hwfn->cdev,
+							  "Consecutive igu vectors for HWFN %x vfs is broken [jumps from %04x to %04x]\n",
+							  p_hwfn->rel_pf_id,
+							  last_iov_sb_id,
+							  sb_id); }
+					break;
+				}
+				blk->status |= QED_IGU_STATUS_FREE;
+				p_hwfn->hw_info.p_igu_info->free_blks++;
+				last_iov_sb_id = sb_id;
+			}
 		}
 	}
+	p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
 
-	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
-		   "IGU igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n",
-		   p_igu_info->igu_base_sb,
-		   p_igu_info->igu_sb_cnt,
-		   p_igu_info->igu_dsb_id);
+	DP_VERBOSE(
+		p_hwfn,
+		NETIF_MSG_INTR,
+		"IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] igu_dsb_id=0x%x\n",
+		p_igu_info->igu_base_sb,
+		p_igu_info->igu_base_sb_iov,
+		p_igu_info->igu_sb_cnt,
+		p_igu_info->igu_sb_cnt_iov,
+		p_igu_info->igu_dsb_id);
 
 	if (p_igu_info->igu_base_sb == 0xffff ||
 	    p_igu_info->igu_dsb_id == 0xffff ||
@@ -3116,6 +3175,23 @@
 	p_sb_cnt_info->sb_free_blk	= info->free_blks;
 }
 
+u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
+{
+	struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+
+	/* Determine origin of SB id */
+	if ((sb_id >= p_info->igu_base_sb) &&
+	    (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
+		return sb_id - p_info->igu_base_sb;
+	} else if ((sb_id >= p_info->igu_base_sb_iov) &&
+		   (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
+		return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
+	} else {
+		DP_NOTICE(p_hwfn, "SB %d not in range for function\n", sb_id);
+		return 0;
+	}
+}
+
 void qed_int_disable_post_isr_release(struct qed_dev *cdev)
 {
 	int i;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index c57f2e6..20b4686 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -20,6 +20,12 @@
 #define IGU_PF_CONF_ATTN_BIT_EN   (0x1 << 3)    /* attention enable       */
 #define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4)    /* single ISR mode enable */
 #define IGU_PF_CONF_SIMD_MODE     (0x1 << 5)    /* simd all ones mode     */
+/* Fields of IGU VF CONFIGRATION REGISTER */
+#define IGU_VF_CONF_FUNC_EN        (0x1 << 0)	/* function enable        */
+#define IGU_VF_CONF_MSI_MSIX_EN    (0x1 << 1)	/* MSI/MSIX enable        */
+#define IGU_VF_CONF_SINGLE_ISR_EN  (0x1 << 4)	/* single ISR mode enable */
+#define IGU_VF_CONF_PARENT_MASK    (0xF)	/* Parent PF              */
+#define IGU_VF_CONF_PARENT_SHIFT   5		/* Parent PF              */
 
 /* Igu control commands
  */
@@ -292,26 +298,8 @@
  * @param p_hwfn
  * @param p_ptt
  * @param sb_id		- igu status block id
- * @param cleanup_set	- set(1) / clear(0)
- * @param opaque_fid    - the function for which to perform
- *			cleanup, for example a PF on behalf of
- *			its VFs.
- */
-void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
-			    struct qed_ptt *p_ptt,
-			    u32 sb_id,
-			    bool cleanup_set,
-			    u16 opaque_fid);
-
-/**
- * @brief Status block cleanup. Should be called for each status
- *        block that will be used -> both PF / VF
- *
- * @param p_hwfn
- * @param p_ptt
- * @param sb_id		- igu status block id
  * @param opaque	- opaque fid of the sb owner.
- * @param cleanup_set	- set(1) / clear(0)
+ * @param b_set		- set(1) / clear(0)
  */
 void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
 				     struct qed_ptt *p_ptt,
@@ -365,6 +353,16 @@
 		   struct qed_ptt *p_ptt);
 
 /**
+ * @brief - Returns an Rx queue index appropriate for usage with given SB.
+ *
+ * @param p_hwfn
+ * @param sb_id - absolute index of SB
+ *
+ * @return index of Rx queue
+ */
+u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
+
+/**
  * @brief - Enable Interrupt & Attention for hw function
  *
  * @param p_hwfn
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 3f35c6c..8fba87dd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -31,137 +31,25 @@
 #include "qed_hsi.h"
 #include "qed_hw.h"
 #include "qed_int.h"
+#include "qed_l2.h"
 #include "qed_mcp.h"
 #include "qed_reg_addr.h"
 #include "qed_sp.h"
+#include "qed_sriov.h"
 
-enum qed_rss_caps {
-	QED_RSS_IPV4		= 0x1,
-	QED_RSS_IPV6		= 0x2,
-	QED_RSS_IPV4_TCP	= 0x4,
-	QED_RSS_IPV6_TCP	= 0x8,
-	QED_RSS_IPV4_UDP	= 0x10,
-	QED_RSS_IPV6_UDP	= 0x20,
-};
-
-/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
-#define QED_RSS_IND_TABLE_SIZE 128
-#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
-
-struct qed_rss_params {
-	u8	update_rss_config;
-	u8	rss_enable;
-	u8	rss_eng_id;
-	u8	update_rss_capabilities;
-	u8	update_rss_ind_table;
-	u8	update_rss_key;
-	u8	rss_caps;
-	u8	rss_table_size_log;
-	u16	rss_ind_table[QED_RSS_IND_TABLE_SIZE];
-	u32	rss_key[QED_RSS_KEY_SIZE];
-};
-
-enum qed_filter_opcode {
-	QED_FILTER_ADD,
-	QED_FILTER_REMOVE,
-	QED_FILTER_MOVE,
-	QED_FILTER_REPLACE,     /* Delete all MACs and add new one instead */
-	QED_FILTER_FLUSH,       /* Removes all filters */
-};
-
-enum qed_filter_ucast_type {
-	QED_FILTER_MAC,
-	QED_FILTER_VLAN,
-	QED_FILTER_MAC_VLAN,
-	QED_FILTER_INNER_MAC,
-	QED_FILTER_INNER_VLAN,
-	QED_FILTER_INNER_PAIR,
-	QED_FILTER_INNER_MAC_VNI_PAIR,
-	QED_FILTER_MAC_VNI_PAIR,
-	QED_FILTER_VNI,
-};
-
-struct qed_filter_ucast {
-	enum qed_filter_opcode		opcode;
-	enum qed_filter_ucast_type	type;
-	u8				is_rx_filter;
-	u8				is_tx_filter;
-	u8				vport_to_add_to;
-	u8				vport_to_remove_from;
-	unsigned char			mac[ETH_ALEN];
-	u8				assert_on_error;
-	u16				vlan;
-	u32				vni;
-};
-
-struct qed_filter_mcast {
-	/* MOVE is not supported for multicast */
-	enum qed_filter_opcode	opcode;
-	u8			vport_to_add_to;
-	u8			vport_to_remove_from;
-	u8			num_mc_addrs;
-#define QED_MAX_MC_ADDRS        64
-	unsigned char		mac[QED_MAX_MC_ADDRS][ETH_ALEN];
-};
-
-struct qed_filter_accept_flags {
-	u8	update_rx_mode_config;
-	u8	update_tx_mode_config;
-	u8	rx_accept_filter;
-	u8	tx_accept_filter;
-#define QED_ACCEPT_NONE         0x01
-#define QED_ACCEPT_UCAST_MATCHED        0x02
-#define QED_ACCEPT_UCAST_UNMATCHED      0x04
-#define QED_ACCEPT_MCAST_MATCHED        0x08
-#define QED_ACCEPT_MCAST_UNMATCHED      0x10
-#define QED_ACCEPT_BCAST                0x20
-};
-
-struct qed_sp_vport_update_params {
-	u16				opaque_fid;
-	u8				vport_id;
-	u8				update_vport_active_rx_flg;
-	u8				vport_active_rx_flg;
-	u8				update_vport_active_tx_flg;
-	u8				vport_active_tx_flg;
-	u8				update_approx_mcast_flg;
-	u8				update_accept_any_vlan_flg;
-	u8				accept_any_vlan;
-	unsigned long			bins[8];
-	struct qed_rss_params		*rss_params;
-	struct qed_filter_accept_flags	accept_flags;
-};
-
-enum qed_tpa_mode {
-	QED_TPA_MODE_NONE,
-	QED_TPA_MODE_UNUSED,
-	QED_TPA_MODE_GRO,
-	QED_TPA_MODE_MAX
-};
-
-struct qed_sp_vport_start_params {
-	enum qed_tpa_mode	tpa_mode;
-	bool			remove_inner_vlan;
-	bool			drop_ttl0;
-	u8			max_buffers_per_cqe;
-	u32			concrete_fid;
-	u16			opaque_fid;
-	u8			vport_id;
-	u16			mtu;
-};
 
 #define QED_MAX_SGES_NUM 16
 #define CRC32_POLY 0x1edc6f41
 
-static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
-			      struct qed_sp_vport_start_params *p_params)
+int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
+			   struct qed_sp_vport_start_params *p_params)
 {
 	struct vport_start_ramrod_data *p_ramrod = NULL;
 	struct qed_spq_entry *p_ent =  NULL;
 	struct qed_sp_init_data init_data;
+	u8 abs_vport_id = 0;
 	int rc = -EINVAL;
 	u16 rx_mode = 0;
-	u8 abs_vport_id = 0;
 
 	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
 	if (rc != 0)
@@ -211,6 +99,8 @@
 		break;
 	}
 
+	p_ramrod->tx_switching_en = p_params->tx_switching;
+
 	/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
 	p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
 						  p_params->concrete_fid);
@@ -218,6 +108,21 @@
 	return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 
+int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
+		       struct qed_sp_vport_start_params *p_params)
+{
+	if (IS_VF(p_hwfn->cdev)) {
+		return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
+					     p_params->mtu,
+					     p_params->remove_inner_vlan,
+					     p_params->tpa_mode,
+					     p_params->max_buffers_per_cqe,
+					     p_params->only_untagged);
+	}
+
+	return qed_sp_eth_vport_start(p_hwfn, p_params);
+}
+
 static int
 qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
 			struct vport_update_ramrod_data *p_ramrod,
@@ -363,6 +268,38 @@
 }
 
 static void
+qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn,
+			    struct vport_update_ramrod_data *p_ramrod,
+			    struct qed_sge_tpa_params *p_params)
+{
+	struct eth_vport_tpa_param *p_tpa;
+
+	if (!p_params) {
+		p_ramrod->common.update_tpa_param_flg = 0;
+		p_ramrod->common.update_tpa_en_flg = 0;
+		p_ramrod->common.update_tpa_param_flg = 0;
+		return;
+	}
+
+	p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
+	p_tpa = &p_ramrod->tpa_param;
+	p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
+	p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
+	p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
+	p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
+
+	p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
+	p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
+	p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
+	p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
+	p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
+	p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
+	p_tpa->tpa_max_size = p_params->tpa_max_size;
+	p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
+	p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
+}
+
+static void
 qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
 			struct vport_update_ramrod_data *p_ramrod,
 			struct qed_sp_vport_update_params *p_params)
@@ -383,20 +320,24 @@
 	}
 }
 
-static int
-qed_sp_vport_update(struct qed_hwfn *p_hwfn,
-		    struct qed_sp_vport_update_params *p_params,
-		    enum spq_mode comp_mode,
-		    struct qed_spq_comp_cb *p_comp_data)
+int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
+			struct qed_sp_vport_update_params *p_params,
+			enum spq_mode comp_mode,
+			struct qed_spq_comp_cb *p_comp_data)
 {
 	struct qed_rss_params *p_rss_params = p_params->rss_params;
 	struct vport_update_ramrod_data_cmn *p_cmn;
 	struct qed_sp_init_data init_data;
 	struct vport_update_ramrod_data *p_ramrod = NULL;
 	struct qed_spq_entry *p_ent = NULL;
-	u8 abs_vport_id = 0;
+	u8 abs_vport_id = 0, val;
 	int rc = -EINVAL;
 
+	if (IS_VF(p_hwfn->cdev)) {
+		rc = qed_vf_pf_vport_update(p_hwfn, p_params);
+		return rc;
+	}
+
 	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
 	if (rc != 0)
 		return rc;
@@ -425,6 +366,27 @@
 	p_cmn->accept_any_vlan = p_params->accept_any_vlan;
 	p_cmn->update_accept_any_vlan_flg =
 			p_params->update_accept_any_vlan_flg;
+
+	p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
+	val = p_params->update_inner_vlan_removal_flg;
+	p_cmn->update_inner_vlan_removal_en_flg = val;
+
+	p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
+	val = p_params->update_default_vlan_enable_flg;
+	p_cmn->update_default_vlan_en_flg = val;
+
+	p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan);
+	p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
+
+	p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
+
+	p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
+	p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
+
+	p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
+	val = p_params->update_anti_spoofing_en_flg;
+	p_ramrod->common.update_anti_spoofing_en_flg = val;
+
 	rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
 	if (rc) {
 		/* Return spq entry which is taken in qed_sp_init_request()*/
@@ -436,12 +398,11 @@
 	qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
 
 	qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
+	qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params);
 	return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 
-static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
-			     u16 opaque_fid,
-			     u8 vport_id)
+int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
 {
 	struct vport_stop_ramrod_data *p_ramrod;
 	struct qed_sp_init_data init_data;
@@ -449,6 +410,9 @@
 	u8 abs_vport_id = 0;
 	int rc;
 
+	if (IS_VF(p_hwfn->cdev))
+		return qed_vf_pf_vport_stop(p_hwfn);
+
 	rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
 	if (rc != 0)
 		return rc;
@@ -470,13 +434,26 @@
 	return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 
+static int
+qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn,
+		       struct qed_filter_accept_flags *p_accept_flags)
+{
+	struct qed_sp_vport_update_params s_params;
+
+	memset(&s_params, 0, sizeof(s_params));
+	memcpy(&s_params.accept_flags, p_accept_flags,
+	       sizeof(struct qed_filter_accept_flags));
+
+	return qed_vf_pf_vport_update(p_hwfn, &s_params);
+}
+
 static int qed_filter_accept_cmd(struct qed_dev *cdev,
 				 u8 vport,
 				 struct qed_filter_accept_flags accept_flags,
 				 u8 update_accept_any_vlan,
 				 u8 accept_any_vlan,
-				enum spq_mode comp_mode,
-				struct qed_spq_comp_cb *p_comp_data)
+				 enum spq_mode comp_mode,
+				 struct qed_spq_comp_cb *p_comp_data)
 {
 	struct qed_sp_vport_update_params vport_update_params;
 	int i, rc;
@@ -493,6 +470,13 @@
 
 		vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
 
+		if (IS_VF(cdev)) {
+			rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags);
+			if (rc)
+				return rc;
+			continue;
+		}
+
 		rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
 					 comp_mode, p_comp_data);
 		if (rc != 0) {
@@ -527,16 +511,14 @@
 	return 0;
 }
 
-static int
-qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
-			    u16 opaque_fid,
-			    u32 cid,
-			    struct qed_queue_start_common_params *params,
-			    u8 stats_id,
-			    u16 bd_max_bytes,
-			    dma_addr_t bd_chain_phys_addr,
-			    dma_addr_t cqe_pbl_addr,
-			    u16 cqe_pbl_size)
+int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+				u16 opaque_fid,
+				u32 cid,
+				struct qed_queue_start_common_params *params,
+				u8 stats_id,
+				u16 bd_max_bytes,
+				dma_addr_t bd_chain_phys_addr,
+				dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
 {
 	struct rx_queue_start_ramrod_data *p_ramrod = NULL;
 	struct qed_spq_entry *p_ent = NULL;
@@ -605,8 +587,7 @@
 			  u16 bd_max_bytes,
 			  dma_addr_t bd_chain_phys_addr,
 			  dma_addr_t cqe_pbl_addr,
-			  u16 cqe_pbl_size,
-			  void __iomem **pp_prod)
+			  u16 cqe_pbl_size, void __iomem **pp_prod)
 {
 	struct qed_hw_cid_data *p_rx_cid;
 	u64 init_prod_val = 0;
@@ -614,6 +595,16 @@
 	u8 abs_stats_id = 0;
 	int rc;
 
+	if (IS_VF(p_hwfn->cdev)) {
+		return qed_vf_pf_rxq_start(p_hwfn,
+					   params->queue_id,
+					   params->sb,
+					   params->sb_idx,
+					   bd_max_bytes,
+					   bd_chain_phys_addr,
+					   cqe_pbl_addr, cqe_pbl_size, pp_prod);
+	}
+
 	rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
 	if (rc != 0)
 		return rc;
@@ -656,10 +647,59 @@
 	return rc;
 }
 
-static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
-				    u16 rx_queue_id,
-				    bool eq_completion_only,
-				    bool cqe_completion)
+int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
+				u16 rx_queue_id,
+				u8 num_rxqs,
+				u8 complete_cqe_flg,
+				u8 complete_event_flg,
+				enum spq_mode comp_mode,
+				struct qed_spq_comp_cb *p_comp_data)
+{
+	struct rx_queue_update_ramrod_data *p_ramrod = NULL;
+	struct qed_spq_entry *p_ent = NULL;
+	struct qed_sp_init_data init_data;
+	struct qed_hw_cid_data *p_rx_cid;
+	u16 qid, abs_rx_q_id = 0;
+	int rc = -EINVAL;
+	u8 i;
+
+	memset(&init_data, 0, sizeof(init_data));
+	init_data.comp_mode = comp_mode;
+	init_data.p_comp_data = p_comp_data;
+
+	for (i = 0; i < num_rxqs; i++) {
+		qid = rx_queue_id + i;
+		p_rx_cid = &p_hwfn->p_rx_cids[qid];
+
+		/* Get SPQ entry */
+		init_data.cid = p_rx_cid->cid;
+		init_data.opaque_fid = p_rx_cid->opaque_fid;
+
+		rc = qed_sp_init_request(p_hwfn, &p_ent,
+					 ETH_RAMROD_RX_QUEUE_UPDATE,
+					 PROTOCOLID_ETH, &init_data);
+		if (rc)
+			return rc;
+
+		p_ramrod = &p_ent->ramrod.rx_queue_update;
+
+		qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
+		qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
+		p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+		p_ramrod->complete_cqe_flg = complete_cqe_flg;
+		p_ramrod->complete_event_flg = complete_event_flg;
+
+		rc = qed_spq_post(p_hwfn, p_ent, NULL);
+		if (rc)
+			return rc;
+	}
+
+	return rc;
+}
+
+int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+			     u16 rx_queue_id,
+			     bool eq_completion_only, bool cqe_completion)
 {
 	struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
 	struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
@@ -668,6 +708,9 @@
 	u16 abs_rx_q_id = 0;
 	int rc = -EINVAL;
 
+	if (IS_VF(p_hwfn->cdev))
+		return qed_vf_pf_rxq_stop(p_hwfn, rx_queue_id, cqe_completion);
+
 	/* Get SPQ entry */
 	memset(&init_data, 0, sizeof(init_data));
 	init_data.cid = p_rx_cid->cid;
@@ -703,15 +746,14 @@
 	return qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
 }
 
-static int
-qed_sp_eth_txq_start_ramrod(struct qed_hwfn  *p_hwfn,
-			    u16  opaque_fid,
-			    u32  cid,
-			    struct qed_queue_start_common_params *p_params,
-			    u8  stats_id,
-			    dma_addr_t pbl_addr,
-			    u16 pbl_size,
-			    union qed_qm_pq_params *p_pq_params)
+int qed_sp_eth_txq_start_ramrod(struct qed_hwfn  *p_hwfn,
+				u16  opaque_fid,
+				u32  cid,
+				struct qed_queue_start_common_params *p_params,
+				u8  stats_id,
+				dma_addr_t pbl_addr,
+				u16 pbl_size,
+				union qed_qm_pq_params *p_pq_params)
 {
 	struct tx_queue_start_ramrod_data *p_ramrod = NULL;
 	struct qed_spq_entry *p_ent = NULL;
@@ -765,14 +807,21 @@
 			  u16 opaque_fid,
 			  struct qed_queue_start_common_params *p_params,
 			  dma_addr_t pbl_addr,
-			  u16 pbl_size,
-			  void __iomem **pp_doorbell)
+			  u16 pbl_size, void __iomem **pp_doorbell)
 {
 	struct qed_hw_cid_data *p_tx_cid;
 	union qed_qm_pq_params pq_params;
 	u8 abs_stats_id = 0;
 	int rc;
 
+	if (IS_VF(p_hwfn->cdev)) {
+		return qed_vf_pf_txq_start(p_hwfn,
+					   p_params->queue_id,
+					   p_params->sb,
+					   p_params->sb_idx,
+					   pbl_addr, pbl_size, pp_doorbell);
+	}
+
 	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
 	if (rc)
 		return rc;
@@ -813,14 +862,16 @@
 	return rc;
 }
 
-static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn,
-				    u16 tx_queue_id)
+int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id)
 {
 	struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
 	struct qed_spq_entry *p_ent = NULL;
 	struct qed_sp_init_data init_data;
 	int rc = -EINVAL;
 
+	if (IS_VF(p_hwfn->cdev))
+		return qed_vf_pf_txq_stop(p_hwfn, tx_queue_id);
+
 	/* Get SPQ entry */
 	memset(&init_data, 0, sizeof(init_data));
 	init_data.cid = p_tx_cid->cid;
@@ -1016,11 +1067,11 @@
 	return 0;
 }
 
-static int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
-				   u16 opaque_fid,
-				   struct qed_filter_ucast *p_filter_cmd,
-				   enum spq_mode comp_mode,
-				   struct qed_spq_comp_cb *p_comp_data)
+int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
+			    u16 opaque_fid,
+			    struct qed_filter_ucast *p_filter_cmd,
+			    enum spq_mode comp_mode,
+			    struct qed_spq_comp_cb *p_comp_data)
 {
 	struct vport_filter_update_ramrod_data	*p_ramrod	= NULL;
 	struct qed_spq_entry			*p_ent		= NULL;
@@ -1118,7 +1169,7 @@
 	return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
 }
 
-static u8 qed_mcast_bin_from_mac(u8 *mac)
+u8 qed_mcast_bin_from_mac(u8 *mac)
 {
 	u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
 				mac, ETH_ALEN);
@@ -1201,11 +1252,10 @@
 	return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 
-static int
-qed_filter_mcast_cmd(struct qed_dev *cdev,
-		     struct qed_filter_mcast *p_filter_cmd,
-		     enum spq_mode comp_mode,
-		     struct qed_spq_comp_cb *p_comp_data)
+static int qed_filter_mcast_cmd(struct qed_dev *cdev,
+				struct qed_filter_mcast *p_filter_cmd,
+				enum spq_mode comp_mode,
+				struct qed_spq_comp_cb *p_comp_data)
 {
 	int rc = 0;
 	int i;
@@ -1221,8 +1271,10 @@
 
 		u16 opaque_fid;
 
-		if (rc != 0)
-			break;
+		if (IS_VF(cdev)) {
+			qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
+			continue;
+		}
 
 		opaque_fid = p_hwfn->hw_info.opaque_fid;
 
@@ -1247,8 +1299,10 @@
 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 		u16 opaque_fid;
 
-		if (rc != 0)
-			break;
+		if (IS_VF(cdev)) {
+			rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
+			continue;
+		}
 
 		opaque_fid = p_hwfn->hw_info.opaque_fid;
 
@@ -1257,6 +1311,8 @@
 					     p_filter_cmd,
 					     comp_mode,
 					     p_comp_data);
+		if (rc != 0)
+			break;
 	}
 
 	return rc;
@@ -1265,12 +1321,19 @@
 /* Statistics related code */
 static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
 					   u32 *p_addr,
-					   u32 *p_len,
-					   u16 statistics_bin)
+					   u32 *p_len, u16 statistics_bin)
 {
-	*p_addr = BAR0_MAP_REG_PSDM_RAM +
-		  PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
-	*p_len = sizeof(struct eth_pstorm_per_queue_stat);
+	if (IS_PF(p_hwfn->cdev)) {
+		*p_addr = BAR0_MAP_REG_PSDM_RAM +
+		    PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+		*p_len = sizeof(struct eth_pstorm_per_queue_stat);
+	} else {
+		struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+		*p_addr = p_resp->pfdev_info.stats_info.pstats.address;
+		*p_len = p_resp->pfdev_info.stats_info.pstats.len;
+	}
 }
 
 static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
@@ -1285,32 +1348,15 @@
 				       statistics_bin);
 
 	memset(&pstats, 0, sizeof(pstats));
-	qed_memcpy_from(p_hwfn, p_ptt, &pstats,
-			pstats_addr, pstats_len);
+	qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
 
-	p_stats->tx_ucast_bytes +=
-		HILO_64_REGPAIR(pstats.sent_ucast_bytes);
-	p_stats->tx_mcast_bytes +=
-		HILO_64_REGPAIR(pstats.sent_mcast_bytes);
-	p_stats->tx_bcast_bytes +=
-		HILO_64_REGPAIR(pstats.sent_bcast_bytes);
-	p_stats->tx_ucast_pkts +=
-		HILO_64_REGPAIR(pstats.sent_ucast_pkts);
-	p_stats->tx_mcast_pkts +=
-		HILO_64_REGPAIR(pstats.sent_mcast_pkts);
-	p_stats->tx_bcast_pkts +=
-		HILO_64_REGPAIR(pstats.sent_bcast_pkts);
-	p_stats->tx_err_drop_pkts +=
-		HILO_64_REGPAIR(pstats.error_drop_pkts);
-}
-
-static void __qed_get_vport_tstats_addrlen(struct qed_hwfn *p_hwfn,
-					   u32 *p_addr,
-					   u32 *p_len)
-{
-	*p_addr = BAR0_MAP_REG_TSDM_RAM +
-		  TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
-	*p_len = sizeof(struct tstorm_per_port_stat);
+	p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+	p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+	p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+	p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+	p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+	p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+	p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
 }
 
 static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
@@ -1318,14 +1364,23 @@
 				   struct qed_eth_stats *p_stats,
 				   u16 statistics_bin)
 {
-	u32 tstats_addr = 0, tstats_len = 0;
 	struct tstorm_per_port_stat tstats;
+	u32 tstats_addr, tstats_len;
 
-	__qed_get_vport_tstats_addrlen(p_hwfn, &tstats_addr, &tstats_len);
+	if (IS_PF(p_hwfn->cdev)) {
+		tstats_addr = BAR0_MAP_REG_TSDM_RAM +
+		    TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
+		tstats_len = sizeof(struct tstorm_per_port_stat);
+	} else {
+		struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+		tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
+		tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
+	}
 
 	memset(&tstats, 0, sizeof(tstats));
-	qed_memcpy_from(p_hwfn, p_ptt, &tstats,
-			tstats_addr, tstats_len);
+	qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
 
 	p_stats->mftag_filter_discards +=
 		HILO_64_REGPAIR(tstats.mftag_filter_discard);
@@ -1335,12 +1390,19 @@
 
 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
 					   u32 *p_addr,
-					   u32 *p_len,
-					   u16 statistics_bin)
+					   u32 *p_len, u16 statistics_bin)
 {
-	*p_addr = BAR0_MAP_REG_USDM_RAM +
-		  USTORM_QUEUE_STAT_OFFSET(statistics_bin);
-	*p_len = sizeof(struct eth_ustorm_per_queue_stat);
+	if (IS_PF(p_hwfn->cdev)) {
+		*p_addr = BAR0_MAP_REG_USDM_RAM +
+		    USTORM_QUEUE_STAT_OFFSET(statistics_bin);
+		*p_len = sizeof(struct eth_ustorm_per_queue_stat);
+	} else {
+		struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+		*p_addr = p_resp->pfdev_info.stats_info.ustats.address;
+		*p_len = p_resp->pfdev_info.stats_info.ustats.len;
+	}
 }
 
 static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
@@ -1355,31 +1417,31 @@
 				       statistics_bin);
 
 	memset(&ustats, 0, sizeof(ustats));
-	qed_memcpy_from(p_hwfn, p_ptt, &ustats,
-			ustats_addr, ustats_len);
+	qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
 
-	p_stats->rx_ucast_bytes +=
-		HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
-	p_stats->rx_mcast_bytes +=
-		HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
-	p_stats->rx_bcast_bytes +=
-		HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
-	p_stats->rx_ucast_pkts +=
-		HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
-	p_stats->rx_mcast_pkts +=
-		HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
-	p_stats->rx_bcast_pkts +=
-		HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+	p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+	p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+	p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+	p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+	p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+	p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
 }
 
 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
 					   u32 *p_addr,
-					   u32 *p_len,
-					   u16 statistics_bin)
+					   u32 *p_len, u16 statistics_bin)
 {
-	*p_addr = BAR0_MAP_REG_MSDM_RAM +
-		  MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
-	*p_len = sizeof(struct eth_mstorm_per_queue_stat);
+	if (IS_PF(p_hwfn->cdev)) {
+		*p_addr = BAR0_MAP_REG_MSDM_RAM +
+		    MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+		*p_len = sizeof(struct eth_mstorm_per_queue_stat);
+	} else {
+		struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+		*p_addr = p_resp->pfdev_info.stats_info.mstats.address;
+		*p_len = p_resp->pfdev_info.stats_info.mstats.len;
+	}
 }
 
 static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
@@ -1394,21 +1456,17 @@
 				       statistics_bin);
 
 	memset(&mstats, 0, sizeof(mstats));
-	qed_memcpy_from(p_hwfn, p_ptt, &mstats,
-			mstats_addr, mstats_len);
+	qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
 
-	p_stats->no_buff_discards +=
-		HILO_64_REGPAIR(mstats.no_buff_discard);
+	p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
 	p_stats->packet_too_big_discard +=
 		HILO_64_REGPAIR(mstats.packet_too_big_discard);
-	p_stats->ttl0_discard +=
-		HILO_64_REGPAIR(mstats.ttl0_discard);
+	p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
 	p_stats->tpa_coalesced_pkts +=
 		HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
 	p_stats->tpa_coalesced_events +=
 		HILO_64_REGPAIR(mstats.tpa_coalesced_events);
-	p_stats->tpa_aborts_num +=
-		HILO_64_REGPAIR(mstats.tpa_aborts_num);
+	p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
 	p_stats->tpa_coalesced_bytes +=
 		HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
 }
@@ -1428,16 +1486,16 @@
 			sizeof(port_stats));
 
 	p_stats->rx_64_byte_packets		+= port_stats.pmm.r64;
-	p_stats->rx_127_byte_packets		+= port_stats.pmm.r127;
-	p_stats->rx_255_byte_packets		+= port_stats.pmm.r255;
-	p_stats->rx_511_byte_packets		+= port_stats.pmm.r511;
-	p_stats->rx_1023_byte_packets		+= port_stats.pmm.r1023;
-	p_stats->rx_1518_byte_packets		+= port_stats.pmm.r1518;
-	p_stats->rx_1522_byte_packets		+= port_stats.pmm.r1522;
-	p_stats->rx_2047_byte_packets		+= port_stats.pmm.r2047;
-	p_stats->rx_4095_byte_packets		+= port_stats.pmm.r4095;
-	p_stats->rx_9216_byte_packets		+= port_stats.pmm.r9216;
-	p_stats->rx_16383_byte_packets		+= port_stats.pmm.r16383;
+	p_stats->rx_65_to_127_byte_packets	+= port_stats.pmm.r127;
+	p_stats->rx_128_to_255_byte_packets	+= port_stats.pmm.r255;
+	p_stats->rx_256_to_511_byte_packets	+= port_stats.pmm.r511;
+	p_stats->rx_512_to_1023_byte_packets	+= port_stats.pmm.r1023;
+	p_stats->rx_1024_to_1518_byte_packets	+= port_stats.pmm.r1518;
+	p_stats->rx_1519_to_1522_byte_packets	+= port_stats.pmm.r1522;
+	p_stats->rx_1519_to_2047_byte_packets	+= port_stats.pmm.r2047;
+	p_stats->rx_2048_to_4095_byte_packets	+= port_stats.pmm.r4095;
+	p_stats->rx_4096_to_9216_byte_packets	+= port_stats.pmm.r9216;
+	p_stats->rx_9217_to_16383_byte_packets	+= port_stats.pmm.r16383;
 	p_stats->rx_crc_errors			+= port_stats.pmm.rfcs;
 	p_stats->rx_mac_crtl_frames		+= port_stats.pmm.rxcf;
 	p_stats->rx_pause_frames		+= port_stats.pmm.rxpf;
@@ -1481,44 +1539,49 @@
 static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
 				  struct qed_ptt *p_ptt,
 				  struct qed_eth_stats *stats,
-				  u16 statistics_bin)
+				  u16 statistics_bin, bool b_get_port_stats)
 {
 	__qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
 	__qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
 	__qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
 	__qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
 
-	if (p_hwfn->mcp_info)
+	if (b_get_port_stats && p_hwfn->mcp_info)
 		__qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
 }
 
 static void _qed_get_vport_stats(struct qed_dev *cdev,
 				 struct qed_eth_stats *stats)
 {
-	u8	fw_vport = 0;
-	int	i;
+	u8 fw_vport = 0;
+	int i;
 
 	memset(stats, 0, sizeof(*stats));
 
 	for_each_hwfn(cdev, i) {
 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
-		struct qed_ptt *p_ptt;
+		struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
+						    :  NULL;
 
-		/* The main vport index is relative first */
-		if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
-			DP_ERR(p_hwfn, "No vport available!\n");
-			continue;
+		if (IS_PF(cdev)) {
+			/* The main vport index is relative first */
+			if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
+				DP_ERR(p_hwfn, "No vport available!\n");
+				goto out;
+			}
 		}
 
-		p_ptt = qed_ptt_acquire(p_hwfn);
-		if (!p_ptt) {
+		if (IS_PF(cdev) && !p_ptt) {
 			DP_ERR(p_hwfn, "Failed to acquire ptt\n");
 			continue;
 		}
 
-		__qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport);
+		__qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
+				      IS_PF(cdev) ? true : false);
 
-		qed_ptt_release(p_hwfn, p_ptt);
+out:
+		if (IS_PF(cdev) && p_ptt)
+			qed_ptt_release(p_hwfn, p_ptt);
 	}
 }
 
@@ -1552,10 +1615,11 @@
 		struct eth_mstorm_per_queue_stat mstats;
 		struct eth_ustorm_per_queue_stat ustats;
 		struct eth_pstorm_per_queue_stat pstats;
-		struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+		struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
+						    : NULL;
 		u32 addr = 0, len = 0;
 
-		if (!p_ptt) {
+		if (IS_PF(cdev) && !p_ptt) {
 			DP_ERR(p_hwfn, "Failed to acquire ptt\n");
 			continue;
 		}
@@ -1572,7 +1636,8 @@
 		__qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
 		qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
 
-		qed_ptt_release(p_hwfn, p_ptt);
+		if (IS_PF(cdev))
+			qed_ptt_release(p_hwfn, p_ptt);
 	}
 
 	/* PORT statistics are not necessarily reset, so we need to
@@ -1593,32 +1658,61 @@
 
 	info->num_tc = 1;
 
-	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
-		for_each_hwfn(cdev, i)
-			info->num_queues += FEAT_NUM(&cdev->hwfns[i],
-						     QED_PF_L2_QUE);
-		if (cdev->int_params.fp_msix_cnt)
-			info->num_queues = min_t(u8, info->num_queues,
-						 cdev->int_params.fp_msix_cnt);
+	if (IS_PF(cdev)) {
+		if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+			for_each_hwfn(cdev, i)
+			    info->num_queues +=
+			    FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
+			if (cdev->int_params.fp_msix_cnt)
+				info->num_queues =
+				    min_t(u8, info->num_queues,
+					  cdev->int_params.fp_msix_cnt);
+		} else {
+			info->num_queues = cdev->num_hwfns;
+		}
+
+		info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
+		ether_addr_copy(info->port_mac,
+				cdev->hwfns[0].hw_info.hw_mac_addr);
 	} else {
-		info->num_queues = cdev->num_hwfns;
+		qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues);
+		if (cdev->num_hwfns > 1) {
+			u8 queues = 0;
+
+			qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues);
+			info->num_queues += queues;
+		}
+
+		qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
+					    &info->num_vlan_filters);
+		qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
 	}
 
-	info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
-	ether_addr_copy(info->port_mac,
-			cdev->hwfns[0].hw_info.hw_mac_addr);
-
 	qed_fill_dev_info(cdev, &info->common);
 
+	if (IS_VF(cdev))
+		memset(info->common.hw_mac, 0, ETH_ALEN);
+
 	return 0;
 }
 
 static void qed_register_eth_ops(struct qed_dev *cdev,
-				 struct qed_eth_cb_ops *ops,
-				 void *cookie)
+				 struct qed_eth_cb_ops *ops, void *cookie)
 {
-	cdev->protocol_ops.eth	= ops;
-	cdev->ops_cookie	= cookie;
+	cdev->protocol_ops.eth = ops;
+	cdev->ops_cookie = cookie;
+
+	/* For VF, we start bulletin reading */
+	if (IS_VF(cdev))
+		qed_vf_start_iov_wq(cdev);
+}
+
+static bool qed_check_mac(struct qed_dev *cdev, u8 *mac)
+{
+	if (IS_PF(cdev))
+		return true;
+
+	return qed_vf_check_mac(&cdev->hwfns[0], mac);
 }
 
 static int qed_start_vport(struct qed_dev *cdev,
@@ -1633,6 +1727,7 @@
 		start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
 							QED_TPA_MODE_NONE;
 		start.remove_inner_vlan = params->remove_inner_vlan;
+		start.only_untagged = true;	/* untagged only */
 		start.drop_ttl0 = params->drop_ttl0;
 		start.opaque_fid = p_hwfn->hw_info.opaque_fid;
 		start.concrete_fid = p_hwfn->hw_info.concrete_fid;
@@ -1699,6 +1794,8 @@
 		params->update_vport_active_flg;
 	sp_params.vport_active_rx_flg = params->vport_active_flg;
 	sp_params.vport_active_tx_flg = params->vport_active_flg;
+	sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
+	sp_params.tx_switching_flg = params->tx_switching_flg;
 	sp_params.accept_any_vlan = params->accept_any_vlan;
 	sp_params.update_accept_any_vlan_flg =
 		params->update_accept_any_vlan_flg;
@@ -1744,9 +1841,7 @@
 		sp_rss_params.update_rss_capabilities = 1;
 		sp_rss_params.update_rss_ind_table = 1;
 		sp_rss_params.update_rss_key = 1;
-		sp_rss_params.rss_caps = QED_RSS_IPV4 |
-					 QED_RSS_IPV6 |
-					 QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
+		sp_rss_params.rss_caps = params->rss_params.rss_caps;
 		sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
 		memcpy(sp_rss_params.rss_ind_table,
 		       params->rss_params.rss_ind_table,
@@ -1899,6 +1994,39 @@
 	return 0;
 }
 
+static int qed_tunn_configure(struct qed_dev *cdev,
+			      struct qed_tunn_params *tunn_params)
+{
+	struct qed_tunn_update_params tunn_info;
+	int i, rc;
+
+	if (IS_VF(cdev))
+		return 0;
+
+	memset(&tunn_info, 0, sizeof(tunn_info));
+	if (tunn_params->update_vxlan_port == 1) {
+		tunn_info.update_vxlan_udp_port = 1;
+		tunn_info.vxlan_udp_port = tunn_params->vxlan_port;
+	}
+
+	if (tunn_params->update_geneve_port == 1) {
+		tunn_info.update_geneve_udp_port = 1;
+		tunn_info.geneve_udp_port = tunn_params->geneve_port;
+	}
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *hwfn = &cdev->hwfns[i];
+
+		rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info,
+					       QED_SPQ_MODE_EBLOCK, NULL);
+
+		if (rc)
+			return rc;
+	}
+
+	return 0;
+}
+
 static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
 					enum qed_filter_rx_mode_type type)
 {
@@ -2026,10 +2154,18 @@
 				      cqe);
 }
 
+#ifdef CONFIG_QED_SRIOV
+extern const struct qed_iov_hv_ops qed_iov_ops_pass;
+#endif
+
 static const struct qed_eth_ops qed_eth_ops_pass = {
 	.common = &qed_common_ops_pass,
+#ifdef CONFIG_QED_SRIOV
+	.iov = &qed_iov_ops_pass,
+#endif
 	.fill_dev_info = &qed_fill_eth_dev_info,
 	.register_ops = &qed_register_eth_ops,
+	.check_mac = &qed_check_mac,
 	.vport_start = &qed_start_vport,
 	.vport_stop = &qed_stop_vport,
 	.vport_update = &qed_update_vport,
@@ -2041,16 +2177,11 @@
 	.fastpath_stop = &qed_fastpath_stop,
 	.eth_cqe_completion = &qed_fp_cqe_completion,
 	.get_vport_stats = &qed_get_vport_stats,
+	.tunn_config = &qed_tunn_configure,
 };
 
-const struct qed_eth_ops *qed_get_eth_ops(u32 version)
+const struct qed_eth_ops *qed_get_eth_ops(void)
 {
-	if (version != QED_ETH_INTERFACE_VERSION) {
-		pr_notice("Cannot supply ethtool operations [%08x != %08x]\n",
-			  version, QED_ETH_INTERFACE_VERSION);
-		return NULL;
-	}
-
 	return &qed_eth_ops_pass;
 }
 EXPORT_SYMBOL(qed_get_eth_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
new file mode 100644
index 0000000..0021145
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -0,0 +1,239 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#ifndef _QED_L2_H
+#define _QED_L2_H
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_eth_if.h>
+#include "qed.h"
+#include "qed_hw.h"
+#include "qed_sp.h"
+
+struct qed_sge_tpa_params {
+	u8 max_buffers_per_cqe;
+
+	u8 update_tpa_en_flg;
+	u8 tpa_ipv4_en_flg;
+	u8 tpa_ipv6_en_flg;
+	u8 tpa_ipv4_tunn_en_flg;
+	u8 tpa_ipv6_tunn_en_flg;
+
+	u8 update_tpa_param_flg;
+	u8 tpa_pkt_split_flg;
+	u8 tpa_hdr_data_split_flg;
+	u8 tpa_gro_consistent_flg;
+	u8 tpa_max_aggs_num;
+	u16 tpa_max_size;
+	u16 tpa_min_size_to_start;
+	u16 tpa_min_size_to_cont;
+};
+
+enum qed_filter_opcode {
+	QED_FILTER_ADD,
+	QED_FILTER_REMOVE,
+	QED_FILTER_MOVE,
+	QED_FILTER_REPLACE,	/* Delete all MACs and add new one instead */
+	QED_FILTER_FLUSH,	/* Removes all filters */
+};
+
+enum qed_filter_ucast_type {
+	QED_FILTER_MAC,
+	QED_FILTER_VLAN,
+	QED_FILTER_MAC_VLAN,
+	QED_FILTER_INNER_MAC,
+	QED_FILTER_INNER_VLAN,
+	QED_FILTER_INNER_PAIR,
+	QED_FILTER_INNER_MAC_VNI_PAIR,
+	QED_FILTER_MAC_VNI_PAIR,
+	QED_FILTER_VNI,
+};
+
+struct qed_filter_ucast {
+	enum qed_filter_opcode opcode;
+	enum qed_filter_ucast_type type;
+	u8 is_rx_filter;
+	u8 is_tx_filter;
+	u8 vport_to_add_to;
+	u8 vport_to_remove_from;
+	unsigned char mac[ETH_ALEN];
+	u8 assert_on_error;
+	u16 vlan;
+	u32 vni;
+};
+
+struct qed_filter_mcast {
+	/* MOVE is not supported for multicast */
+	enum qed_filter_opcode opcode;
+	u8 vport_to_add_to;
+	u8 vport_to_remove_from;
+	u8 num_mc_addrs;
+#define QED_MAX_MC_ADDRS        64
+	unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
+};
+
+int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+			     u16 rx_queue_id,
+			     bool eq_completion_only, bool cqe_completion);
+
+int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id);
+
+enum qed_tpa_mode {
+	QED_TPA_MODE_NONE,
+	QED_TPA_MODE_UNUSED,
+	QED_TPA_MODE_GRO,
+	QED_TPA_MODE_MAX
+};
+
+struct qed_sp_vport_start_params {
+	enum qed_tpa_mode tpa_mode;
+	bool remove_inner_vlan;
+	bool tx_switching;
+	bool only_untagged;
+	bool drop_ttl0;
+	u8 max_buffers_per_cqe;
+	u32 concrete_fid;
+	u16 opaque_fid;
+	u8 vport_id;
+	u16 mtu;
+};
+
+int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
+			   struct qed_sp_vport_start_params *p_params);
+
+struct qed_rss_params {
+	u8	update_rss_config;
+	u8	rss_enable;
+	u8	rss_eng_id;
+	u8	update_rss_capabilities;
+	u8	update_rss_ind_table;
+	u8	update_rss_key;
+	u8	rss_caps;
+	u8	rss_table_size_log;
+	u16	rss_ind_table[QED_RSS_IND_TABLE_SIZE];
+	u32	rss_key[QED_RSS_KEY_SIZE];
+};
+
+struct qed_filter_accept_flags {
+	u8	update_rx_mode_config;
+	u8	update_tx_mode_config;
+	u8	rx_accept_filter;
+	u8	tx_accept_filter;
+#define QED_ACCEPT_NONE         0x01
+#define QED_ACCEPT_UCAST_MATCHED        0x02
+#define QED_ACCEPT_UCAST_UNMATCHED      0x04
+#define QED_ACCEPT_MCAST_MATCHED        0x08
+#define QED_ACCEPT_MCAST_UNMATCHED      0x10
+#define QED_ACCEPT_BCAST                0x20
+};
+
+struct qed_sp_vport_update_params {
+	u16				opaque_fid;
+	u8				vport_id;
+	u8				update_vport_active_rx_flg;
+	u8				vport_active_rx_flg;
+	u8				update_vport_active_tx_flg;
+	u8				vport_active_tx_flg;
+	u8				update_inner_vlan_removal_flg;
+	u8				inner_vlan_removal_flg;
+	u8				silent_vlan_removal_flg;
+	u8				update_default_vlan_enable_flg;
+	u8				default_vlan_enable_flg;
+	u8				update_default_vlan_flg;
+	u16				default_vlan;
+	u8				update_tx_switching_flg;
+	u8				tx_switching_flg;
+	u8				update_approx_mcast_flg;
+	u8				update_anti_spoofing_en_flg;
+	u8				anti_spoofing_en;
+	u8				update_accept_any_vlan_flg;
+	u8				accept_any_vlan;
+	unsigned long			bins[8];
+	struct qed_rss_params		*rss_params;
+	struct qed_filter_accept_flags	accept_flags;
+	struct qed_sge_tpa_params	*sge_tpa_params;
+};
+
+int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
+			struct qed_sp_vport_update_params *p_params,
+			enum spq_mode comp_mode,
+			struct qed_spq_comp_cb *p_comp_data);
+
+/**
+ * @brief qed_sp_vport_stop -
+ *
+ * This ramrod closes a VPort after all its RX and TX queues are terminated.
+ * An Assert is generated if any queues are left open.
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @param vport_id VPort ID
+ *
+ * @return int
+ */
+int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id);
+
+int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
+			    u16 opaque_fid,
+			    struct qed_filter_ucast *p_filter_cmd,
+			    enum spq_mode comp_mode,
+			    struct qed_spq_comp_cb *p_comp_data);
+
+/**
+ * @brief qed_sp_rx_eth_queues_update -
+ *
+ * This ramrod updates an RX queue. It is used for setting the active state
+ * of the queue and updating the TPA and SGE parameters.
+ *
+ * @note At the moment - only used by non-linux VFs.
+ *
+ * @param p_hwfn
+ * @param rx_queue_id		RX Queue ID
+ * @param num_rxqs		Allow to update multiple rx
+ *				queues, from rx_queue_id to
+ *				(rx_queue_id + num_rxqs)
+ * @param complete_cqe_flg	Post completion to the CQE Ring if set
+ * @param complete_event_flg	Post completion to the Event Ring if set
+ *
+ * @return int
+ */
+
+int
+qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
+			    u16 rx_queue_id,
+			    u8 num_rxqs,
+			    u8 complete_cqe_flg,
+			    u8 complete_event_flg,
+			    enum spq_mode comp_mode,
+			    struct qed_spq_comp_cb *p_comp_data);
+
+int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
+			   struct qed_sp_vport_start_params *p_params);
+
+int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+				u16 opaque_fid,
+				u32 cid,
+				struct qed_queue_start_common_params *params,
+				u8 stats_id,
+				u16 bd_max_bytes,
+				dma_addr_t bd_chain_phys_addr,
+				dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
+
+int qed_sp_eth_txq_start_ramrod(struct qed_hwfn  *p_hwfn,
+				u16  opaque_fid,
+				u32  cid,
+				struct qed_queue_start_common_params *p_params,
+				u8  stats_id,
+				dma_addr_t pbl_addr,
+				u16 pbl_size,
+				union qed_qm_pq_params *p_pq_params);
+
+u8 qed_mcast_bin_from_mac(u8 *mac);
+
+#endif /* _QED_L2_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 26d40db..8b22f87 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -24,10 +24,12 @@
 #include <linux/qed/qed_if.h>
 
 #include "qed.h"
+#include "qed_sriov.h"
 #include "qed_sp.h"
 #include "qed_dev_api.h"
 #include "qed_mcp.h"
 #include "qed_hw.h"
+#include "qed_selftest.h"
 
 static char version[] =
 	"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -124,7 +126,7 @@
 		goto err1;
 	}
 
-	if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+	if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
 		DP_NOTICE(cdev, "No memory region found in bar #2\n");
 		rc = -EIO;
 		goto err1;
@@ -156,7 +158,7 @@
 	}
 
 	cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
-	if (cdev->pci_params.pm_cap == 0)
+	if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
 		DP_NOTICE(cdev, "Cannot find power management capability\n");
 
 	rc = qed_set_coherency_mask(cdev);
@@ -174,12 +176,14 @@
 		goto err2;
 	}
 
-	cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
-	cdev->db_size = pci_resource_len(cdev->pdev, 2);
-	cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
-	if (!cdev->doorbells) {
-		DP_NOTICE(cdev, "Cannot map doorbell space\n");
-		return -ENOMEM;
+	if (IS_PF(cdev)) {
+		cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
+		cdev->db_size = pci_resource_len(cdev->pdev, 2);
+		cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
+		if (!cdev->doorbells) {
+			DP_NOTICE(cdev, "Cannot map doorbell space\n");
+			return -ENOMEM;
+		}
 	}
 
 	return 0;
@@ -206,20 +210,33 @@
 	dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
 	ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
 
-	dev_info->fw_major = FW_MAJOR_VERSION;
-	dev_info->fw_minor = FW_MINOR_VERSION;
-	dev_info->fw_rev = FW_REVISION_VERSION;
-	dev_info->fw_eng = FW_ENGINEERING_VERSION;
-	dev_info->mf_mode = cdev->mf_mode;
+	if (IS_PF(cdev)) {
+		dev_info->fw_major = FW_MAJOR_VERSION;
+		dev_info->fw_minor = FW_MINOR_VERSION;
+		dev_info->fw_rev = FW_REVISION_VERSION;
+		dev_info->fw_eng = FW_ENGINEERING_VERSION;
+		dev_info->mf_mode = cdev->mf_mode;
+		dev_info->tx_switching = true;
+	} else {
+		qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
+				      &dev_info->fw_minor, &dev_info->fw_rev,
+				      &dev_info->fw_eng);
+	}
 
-	qed_mcp_get_mfw_ver(cdev, &dev_info->mfw_rev);
+	if (IS_PF(cdev)) {
+		ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+		if (ptt) {
+			qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
+					    &dev_info->mfw_rev, NULL);
 
-	ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
-	if (ptt) {
-		qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
-				       &dev_info->flash_size);
+			qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
+					       &dev_info->flash_size);
 
-		qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
+			qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
+		}
+	} else {
+		qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
+				    &dev_info->mfw_rev, NULL);
 	}
 
 	return 0;
@@ -256,9 +273,7 @@
 
 /* probing */
 static struct qed_dev *qed_probe(struct pci_dev *pdev,
-				 enum qed_protocol protocol,
-				 u32 dp_module,
-				 u8 dp_level)
+				 struct qed_probe_params *params)
 {
 	struct qed_dev *cdev;
 	int rc;
@@ -267,9 +282,12 @@
 	if (!cdev)
 		goto err0;
 
-	cdev->protocol = protocol;
+	cdev->protocol = params->protocol;
 
-	qed_init_dp(cdev, dp_module, dp_level);
+	if (params->is_vf)
+		cdev->b_is_vf = true;
+
+	qed_init_dp(cdev, params->dp_module, params->dp_level);
 
 	rc = qed_init_pci(cdev, pdev);
 	if (rc) {
@@ -663,6 +681,35 @@
 	return 0;
 }
 
+static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
+{
+	int rc;
+
+	memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
+	cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
+
+	qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
+			    &cdev->int_params.in.num_vectors);
+	if (cdev->num_hwfns > 1) {
+		u8 vectors = 0;
+
+		qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
+		cdev->int_params.in.num_vectors += vectors;
+	}
+
+	/* We want a minimum of one fastpath vector per vf hwfn */
+	cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
+
+	rc = qed_set_int_mode(cdev, true);
+	if (rc)
+		return rc;
+
+	cdev->int_params.fp_msix_base = 0;
+	cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
+
+	return 0;
+}
+
 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
 		   u8 *input_buf, u32 max_size, u8 *unzip_buf)
 {
@@ -744,39 +791,62 @@
 static int qed_slowpath_start(struct qed_dev *cdev,
 			      struct qed_slowpath_params *params)
 {
+	struct qed_tunn_start_params tunn_info;
 	struct qed_mcp_drv_version drv_version;
 	const u8 *data = NULL;
 	struct qed_hwfn *hwfn;
-	int rc;
+	int rc = -EINVAL;
 
-	rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
-			      &cdev->pdev->dev);
-	if (rc) {
-		DP_NOTICE(cdev,
-			  "Failed to find fw file - /lib/firmware/%s\n",
-			  QED_FW_FILE_NAME);
+	if (qed_iov_wq_start(cdev))
 		goto err;
+
+	if (IS_PF(cdev)) {
+		rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
+				      &cdev->pdev->dev);
+		if (rc) {
+			DP_NOTICE(cdev,
+				  "Failed to find fw file - /lib/firmware/%s\n",
+				  QED_FW_FILE_NAME);
+			goto err;
+		}
 	}
 
 	rc = qed_nic_setup(cdev);
 	if (rc)
 		goto err;
 
-	rc = qed_slowpath_setup_int(cdev, params->int_mode);
+	if (IS_PF(cdev))
+		rc = qed_slowpath_setup_int(cdev, params->int_mode);
+	else
+		rc = qed_slowpath_vf_setup_int(cdev);
 	if (rc)
 		goto err1;
 
-	/* Allocate stream for unzipping */
-	rc = qed_alloc_stream_mem(cdev);
-	if (rc) {
-		DP_NOTICE(cdev, "Failed to allocate stream memory\n");
-		goto err2;
+	if (IS_PF(cdev)) {
+		/* Allocate stream for unzipping */
+		rc = qed_alloc_stream_mem(cdev);
+		if (rc) {
+			DP_NOTICE(cdev, "Failed to allocate stream memory\n");
+			goto err2;
+		}
+
+		data = cdev->firmware->data;
 	}
 
-	/* Start the slowpath */
-	data = cdev->firmware->data;
+	memset(&tunn_info, 0, sizeof(tunn_info));
+	tunn_info.tunn_mode |=  1 << QED_MODE_VXLAN_TUNN |
+				1 << QED_MODE_L2GRE_TUNN |
+				1 << QED_MODE_IPGRE_TUNN |
+				1 << QED_MODE_L2GENEVE_TUNN |
+				1 << QED_MODE_IPGENEVE_TUNN;
 
-	rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode,
+	tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
+	tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
+	tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
+
+	/* Start the slowpath */
+	rc = qed_hw_init(cdev, &tunn_info, true,
+			 cdev->int_params.out.int_mode,
 			 true, data);
 	if (rc)
 		goto err2;
@@ -784,18 +854,20 @@
 	DP_INFO(cdev,
 		"HW initialization and function start completed successfully\n");
 
-	hwfn = QED_LEADING_HWFN(cdev);
-	drv_version.version = (params->drv_major << 24) |
-			      (params->drv_minor << 16) |
-			      (params->drv_rev << 8) |
-			      (params->drv_eng);
-	strlcpy(drv_version.name, params->name,
-		MCP_DRV_VER_STR_SIZE - 4);
-	rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
-				      &drv_version);
-	if (rc) {
-		DP_NOTICE(cdev, "Failed sending drv version command\n");
-		return rc;
+	if (IS_PF(cdev)) {
+		hwfn = QED_LEADING_HWFN(cdev);
+		drv_version.version = (params->drv_major << 24) |
+				      (params->drv_minor << 16) |
+				      (params->drv_rev << 8) |
+				      (params->drv_eng);
+		strlcpy(drv_version.name, params->name,
+			MCP_DRV_VER_STR_SIZE - 4);
+		rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
+					      &drv_version);
+		if (rc) {
+			DP_NOTICE(cdev, "Failed sending drv version command\n");
+			return rc;
+		}
 	}
 
 	qed_reset_vport_stats(cdev);
@@ -804,13 +876,17 @@
 
 err2:
 	qed_hw_timers_stop_all(cdev);
-	qed_slowpath_irq_free(cdev);
+	if (IS_PF(cdev))
+		qed_slowpath_irq_free(cdev);
 	qed_free_stream_mem(cdev);
 	qed_disable_msix(cdev);
 err1:
 	qed_resc_free(cdev);
 err:
-	release_firmware(cdev->firmware);
+	if (IS_PF(cdev))
+		release_firmware(cdev->firmware);
+
+	qed_iov_wq_stop(cdev, false);
 
 	return rc;
 }
@@ -820,15 +896,21 @@
 	if (!cdev)
 		return -ENODEV;
 
-	qed_free_stream_mem(cdev);
+	if (IS_PF(cdev)) {
+		qed_free_stream_mem(cdev);
+		qed_sriov_disable(cdev, true);
 
-	qed_nic_stop(cdev);
-	qed_slowpath_irq_free(cdev);
+		qed_nic_stop(cdev);
+		qed_slowpath_irq_free(cdev);
+	}
 
 	qed_disable_msix(cdev);
 	qed_nic_reset(cdev);
 
-	release_firmware(cdev->firmware);
+	qed_iov_wq_stop(cdev, true);
+
+	if (IS_PF(cdev))
+		release_firmware(cdev->firmware);
 
 	return 0;
 }
@@ -902,6 +984,11 @@
 	return rc;
 }
 
+static bool qed_can_link_change(struct qed_dev *cdev)
+{
+	return true;
+}
+
 static int qed_set_link(struct qed_dev *cdev,
 			struct qed_link_params *params)
 {
@@ -913,6 +1000,9 @@
 	if (!cdev)
 		return -ENODEV;
 
+	if (IS_VF(cdev))
+		return 0;
+
 	/* The link should be set only once per PF */
 	hwfn = &cdev->hwfns[0];
 
@@ -944,6 +1034,39 @@
 	}
 	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
 		link_params->speed.forced_speed = params->forced_speed;
+	if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
+		if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
+			link_params->pause.autoneg = true;
+		else
+			link_params->pause.autoneg = false;
+		if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
+			link_params->pause.forced_rx = true;
+		else
+			link_params->pause.forced_rx = false;
+		if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
+			link_params->pause.forced_tx = true;
+		else
+			link_params->pause.forced_tx = false;
+	}
+	if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
+		switch (params->loopback_mode) {
+		case QED_LINK_LOOPBACK_INT_PHY:
+			link_params->loopback_mode = PMM_LOOPBACK_INT_PHY;
+			break;
+		case QED_LINK_LOOPBACK_EXT_PHY:
+			link_params->loopback_mode = PMM_LOOPBACK_EXT_PHY;
+			break;
+		case QED_LINK_LOOPBACK_EXT:
+			link_params->loopback_mode = PMM_LOOPBACK_EXT;
+			break;
+		case QED_LINK_LOOPBACK_MAC:
+			link_params->loopback_mode = PMM_LOOPBACK_MAC;
+			break;
+		default:
+			link_params->loopback_mode = PMM_LOOPBACK_NONE;
+			break;
+		}
+	}
 
 	rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
 
@@ -991,10 +1114,16 @@
 	memset(if_link, 0, sizeof(*if_link));
 
 	/* Prepare source inputs */
-	memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
-	memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
-	memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
-	       sizeof(link_caps));
+	if (IS_PF(hwfn->cdev)) {
+		memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
+		memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
+		memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
+		       sizeof(link_caps));
+	} else {
+		qed_vf_get_link_params(hwfn, &params);
+		qed_vf_get_link_state(hwfn, &link);
+		qed_vf_get_link_caps(hwfn, &link_caps);
+	}
 
 	/* Set the link parameters to pass to protocol driver */
 	if (link.link_up)
@@ -1096,7 +1225,12 @@
 static void qed_get_current_link(struct qed_dev *cdev,
 				 struct qed_link_output *if_link)
 {
+	int i;
+
 	qed_fill_link(&cdev->hwfns[0], if_link);
+
+	for_each_hwfn(cdev, i)
+		qed_inform_vf_link_state(&cdev->hwfns[i]);
 }
 
 void qed_link_update(struct qed_hwfn *hwfn)
@@ -1106,6 +1240,7 @@
 	struct qed_link_output if_link;
 
 	qed_fill_link(hwfn, &if_link);
+	qed_inform_vf_link_state(hwfn);
 
 	if (IS_LEAD_HWFN(hwfn) && cookie)
 		op->link_update(cookie, &if_link);
@@ -1117,6 +1252,9 @@
 	struct qed_ptt *ptt;
 	int i, rc;
 
+	if (IS_VF(cdev))
+		return 0;
+
 	for_each_hwfn(cdev, i) {
 		hwfn = &cdev->hwfns[i];
 		ptt = qed_ptt_acquire(hwfn);
@@ -1150,7 +1288,15 @@
 	return status;
 }
 
+struct qed_selftest_ops qed_selftest_ops_pass = {
+	.selftest_memory = &qed_selftest_memory,
+	.selftest_interrupt = &qed_selftest_interrupt,
+	.selftest_register = &qed_selftest_register,
+	.selftest_clock = &qed_selftest_clock,
+};
+
 const struct qed_common_ops qed_common_ops_pass = {
+	.selftest = &qed_selftest_ops_pass,
 	.probe = &qed_probe,
 	.remove = &qed_remove,
 	.set_power_state = &qed_set_power_state,
@@ -1164,6 +1310,7 @@
 	.sb_release = &qed_sb_release,
 	.simd_handler_config = &qed_simd_handler_config,
 	.simd_handler_clean = &qed_simd_handler_clean,
+	.can_link_change = &qed_can_link_change,
 	.set_link = &qed_set_link,
 	.get_link = &qed_get_current_link,
 	.drain = &qed_drain,
@@ -1172,14 +1319,3 @@
 	.chain_free = &qed_chain_free,
 	.set_led = &qed_set_led,
 };
-
-u32 qed_get_protocol_version(enum qed_protocol protocol)
-{
-	switch (protocol) {
-	case QED_PROTOCOL_ETH:
-		return QED_ETH_INTERFACE_VERSION;
-	default:
-		return 0;
-	}
-}
-EXPORT_SYMBOL(qed_get_protocol_version);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index b89c9a8..1182361 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -15,10 +15,13 @@
 #include <linux/spinlock.h>
 #include <linux/string.h>
 #include "qed.h"
+#include "qed_dcbx.h"
 #include "qed_hsi.h"
 #include "qed_hw.h"
 #include "qed_mcp.h"
 #include "qed_reg_addr.h"
+#include "qed_sriov.h"
+
 #define CHIP_MCP_RESP_ITER_US 10
 
 #define QED_DRV_MB_MAX_RETRIES	(500 * 1000)	/* Account for 5 sec */
@@ -440,6 +443,75 @@
 	return 0;
 }
 
+static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
+				  struct qed_ptt *p_ptt)
+{
+	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+					PUBLIC_PATH);
+	u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
+	u32 path_addr = SECTION_ADDR(mfw_path_offsize,
+				     QED_PATH_ID(p_hwfn));
+	u32 disabled_vfs[VF_MAX_STATIC / 32];
+	int i;
+
+	DP_VERBOSE(p_hwfn,
+		   QED_MSG_SP,
+		   "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
+		   mfw_path_offsize, path_addr);
+
+	for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
+		disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
+					 path_addr +
+					 offsetof(struct public_path,
+						  mcp_vf_disabled) +
+					 sizeof(u32) * i);
+		DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
+			   "FLR-ed VFs [%08x,...,%08x] - %08x\n",
+			   i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
+	}
+
+	if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
+		qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
+}
+
+int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
+		       struct qed_ptt *p_ptt, u32 *vfs_to_ack)
+{
+	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+					PUBLIC_FUNC);
+	u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
+	u32 func_addr = SECTION_ADDR(mfw_func_offsize,
+				     MCP_PF_ID(p_hwfn));
+	struct qed_mcp_mb_params mb_params;
+	union drv_union_data union_data;
+	int rc;
+	int i;
+
+	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+		DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
+			   "Acking VFs [%08x,...,%08x] - %08x\n",
+			   i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
+
+	memset(&mb_params, 0, sizeof(mb_params));
+	mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
+	memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
+	mb_params.p_data_src = &union_data;
+	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+	if (rc) {
+		DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
+		return -EBUSY;
+	}
+
+	/* Clear the ACK bits */
+	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+		qed_wr(p_hwfn, p_ptt,
+		       func_addr +
+		       offsetof(struct public_func, drv_ack_vf_disabled) +
+		       i * sizeof(u32), 0);
+
+	return rc;
+}
+
 static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
 					      struct qed_ptt *p_ptt)
 {
@@ -472,6 +544,7 @@
 				       bool b_reset)
 {
 	struct qed_mcp_link_state *p_link;
+	u8 max_bw, min_bw;
 	u32 status = 0;
 
 	p_link = &p_hwfn->mcp_info->link_output;
@@ -527,17 +600,20 @@
 		p_link->speed = 0;
 	}
 
-	/* Correct speed according to bandwidth allocation */
-	if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) {
-		p_link->speed = p_link->speed *
-				p_hwfn->mcp_info->func_info.bandwidth_max /
-				100;
-		qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
-			       p_link->speed);
-		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
-			   "Configured MAX bandwidth to be %08x Mb/sec\n",
-			   p_link->speed);
-	}
+	if (p_link->link_up && p_link->speed)
+		p_link->line_speed = p_link->speed;
+	else
+		p_link->line_speed = 0;
+
+	max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
+	min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
+
+	/* Max bandwidth configuration */
+	__qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
+
+	/* Min bandwidth configuration */
+	__qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
+	qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate);
 
 	p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
 	p_link->an_complete = !!(status &
@@ -648,6 +724,77 @@
 	return 0;
 }
 
+static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
+				  struct public_func *p_shmem_info)
+{
+	struct qed_mcp_function_info *p_info;
+
+	p_info = &p_hwfn->mcp_info->func_info;
+
+	p_info->bandwidth_min = (p_shmem_info->config &
+				 FUNC_MF_CFG_MIN_BW_MASK) >>
+					FUNC_MF_CFG_MIN_BW_SHIFT;
+	if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
+		DP_INFO(p_hwfn,
+			"bandwidth minimum out of bounds [%02x]. Set to 1\n",
+			p_info->bandwidth_min);
+		p_info->bandwidth_min = 1;
+	}
+
+	p_info->bandwidth_max = (p_shmem_info->config &
+				 FUNC_MF_CFG_MAX_BW_MASK) >>
+					FUNC_MF_CFG_MAX_BW_SHIFT;
+	if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
+		DP_INFO(p_hwfn,
+			"bandwidth maximum out of bounds [%02x]. Set to 100\n",
+			p_info->bandwidth_max);
+		p_info->bandwidth_max = 100;
+	}
+}
+
+static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
+				  struct qed_ptt *p_ptt,
+				  struct public_func *p_data,
+				  int pfid)
+{
+	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+					PUBLIC_FUNC);
+	u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
+	u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+	u32 i, size;
+
+	memset(p_data, 0, sizeof(*p_data));
+
+	size = min_t(u32, sizeof(*p_data),
+		     QED_SECTION_SIZE(mfw_path_offsize));
+	for (i = 0; i < size / sizeof(u32); i++)
+		((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
+					    func_addr + (i << 2));
+	return size;
+}
+
+static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn,
+			      struct qed_ptt *p_ptt)
+{
+	struct qed_mcp_function_info *p_info;
+	struct public_func shmem_info;
+	u32 resp = 0, param = 0;
+
+	qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+			       MCP_PF_ID(p_hwfn));
+
+	qed_read_pf_bandwidth(p_hwfn, &shmem_info);
+
+	p_info = &p_hwfn->mcp_info->func_info;
+
+	qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
+	qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
+
+	/* Acknowledge the MFW */
+	qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
+		    &param);
+}
+
 int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
 			  struct qed_ptt *p_ptt)
 {
@@ -676,9 +823,27 @@
 		case MFW_DRV_MSG_LINK_CHANGE:
 			qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
 			break;
+		case MFW_DRV_MSG_VF_DISABLED:
+			qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
+			break;
+		case MFW_DRV_MSG_LLDP_DATA_UPDATED:
+			qed_dcbx_mib_update_event(p_hwfn, p_ptt,
+						  QED_DCBX_REMOTE_LLDP_MIB);
+			break;
+		case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
+			qed_dcbx_mib_update_event(p_hwfn, p_ptt,
+						  QED_DCBX_REMOTE_MIB);
+			break;
+		case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
+			qed_dcbx_mib_update_event(p_hwfn, p_ptt,
+						  QED_DCBX_OPERATIONAL_MIB);
+			break;
 		case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
 			qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
 			break;
+		case MFW_DRV_MSG_BW_UPDATE:
+			qed_mcp_update_bw(p_hwfn, p_ptt);
+			break;
 		default:
 			DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
 			rc = -EINVAL;
@@ -709,26 +874,42 @@
 	return rc;
 }
 
-int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
-			u32 *p_mfw_ver)
+int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
+			struct qed_ptt *p_ptt,
+			u32 *p_mfw_ver, u32 *p_running_bundle_id)
 {
-	struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
-	struct qed_ptt *p_ptt;
 	u32 global_offsize;
 
-	p_ptt = qed_ptt_acquire(p_hwfn);
-	if (!p_ptt)
-		return -EBUSY;
+	if (IS_VF(p_hwfn->cdev)) {
+		if (p_hwfn->vf_iov_info) {
+			struct pfvf_acquire_resp_tlv *p_resp;
+
+			p_resp = &p_hwfn->vf_iov_info->acquire_resp;
+			*p_mfw_ver = p_resp->pfdev_info.mfw_ver;
+			return 0;
+		} else {
+			DP_VERBOSE(p_hwfn,
+				   QED_MSG_IOV,
+				   "VF requested MFW version prior to ACQUIRE\n");
+			return -EINVAL;
+		}
+	}
 
 	global_offsize = qed_rd(p_hwfn, p_ptt,
-				SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
-						     public_base,
+				SECTION_OFFSIZE_ADDR(p_hwfn->
+						     mcp_info->public_base,
 						     PUBLIC_GLOBAL));
-	*p_mfw_ver = qed_rd(p_hwfn, p_ptt,
-			    SECTION_ADDR(global_offsize, 0) +
-			    offsetof(struct public_global, mfw_ver));
+	*p_mfw_ver =
+	    qed_rd(p_hwfn, p_ptt,
+		   SECTION_ADDR(global_offsize,
+				0) + offsetof(struct public_global, mfw_ver));
 
-	qed_ptt_release(p_hwfn, p_ptt);
+	if (p_running_bundle_id != NULL) {
+		*p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
+					      SECTION_ADDR(global_offsize, 0) +
+					      offsetof(struct public_global,
+						       running_bundle_id));
+	}
 
 	return 0;
 }
@@ -739,6 +920,9 @@
 	struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
 	struct qed_ptt  *p_ptt;
 
+	if (IS_VF(cdev))
+		return -EINVAL;
+
 	if (!qed_mcp_is_init(p_hwfn)) {
 		DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
 		return -EBUSY;
@@ -758,28 +942,6 @@
 	return 0;
 }
 
-static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
-				  struct qed_ptt *p_ptt,
-				  struct public_func *p_data,
-				  int pfid)
-{
-	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
-					PUBLIC_FUNC);
-	u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
-	u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
-	u32 i, size;
-
-	memset(p_data, 0, sizeof(*p_data));
-
-	size = min_t(u32, sizeof(*p_data),
-		     QED_SECTION_SIZE(mfw_path_offsize));
-	for (i = 0; i < size / sizeof(u32); i++)
-		((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
-					    func_addr + (i << 2));
-
-	return size;
-}
-
 static int
 qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
 			struct public_func *p_info,
@@ -818,26 +980,7 @@
 		return -EINVAL;
 	}
 
-
-	info->bandwidth_min = (shmem_info.config &
-			       FUNC_MF_CFG_MIN_BW_MASK) >>
-			      FUNC_MF_CFG_MIN_BW_SHIFT;
-	if (info->bandwidth_min < 1 || info->bandwidth_min > 100) {
-		DP_INFO(p_hwfn,
-			"bandwidth minimum out of bounds [%02x]. Set to 1\n",
-			info->bandwidth_min);
-		info->bandwidth_min = 1;
-	}
-
-	info->bandwidth_max = (shmem_info.config &
-			       FUNC_MF_CFG_MAX_BW_MASK) >>
-			      FUNC_MF_CFG_MAX_BW_SHIFT;
-	if (info->bandwidth_max < 1 || info->bandwidth_max > 100) {
-		DP_INFO(p_hwfn,
-			"bandwidth maximum out of bounds [%02x]. Set to 100\n",
-			info->bandwidth_max);
-		info->bandwidth_max = 100;
-	}
+	qed_read_pf_bandwidth(p_hwfn, &shmem_info);
 
 	if (shmem_info.mac_upper || shmem_info.mac_lower) {
 		info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
@@ -914,6 +1057,9 @@
 {
 	u32 flash_size;
 
+	if (IS_VF(p_hwfn->cdev))
+		return -EINVAL;
+
 	flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
 	flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
 		      MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
@@ -924,6 +1070,37 @@
 	return 0;
 }
 
+int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
+			   struct qed_ptt *p_ptt, u8 vf_id, u8 num)
+{
+	u32 resp = 0, param = 0, rc_param = 0;
+	int rc;
+
+	/* Only Leader can configure MSIX, and need to take CMT into account */
+	if (!IS_LEAD_HWFN(p_hwfn))
+		return 0;
+	num *= p_hwfn->cdev->num_hwfns;
+
+	param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
+		 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
+	param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
+		 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
+
+	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
+			 &resp, &rc_param);
+
+	if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
+		DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
+		rc = -EINVAL;
+	} else {
+		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+			   "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
+			   num, vf_id);
+	}
+
+	return rc;
+}
+
 int
 qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
 			 struct qed_ptt *p_ptt,
@@ -938,9 +1115,10 @@
 
 	p_drv_version = &union_data.drv_version;
 	p_drv_version->version = p_ver->version;
+
 	for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) {
 		val = cpu_to_be32(p_ver->name[i]);
-		*(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
+		*(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
 	}
 
 	memset(&mb_params, 0, sizeof(mb_params));
@@ -979,3 +1157,45 @@
 
 	return rc;
 }
+
+int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+	u32 drv_mb_param = 0, rsp, param;
+	int rc = 0;
+
+	drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
+			DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+
+	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+			 drv_mb_param, &rsp, &param);
+
+	if (rc)
+		return rc;
+
+	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+	    (param != DRV_MB_PARAM_BIST_RC_PASSED))
+		rc = -EAGAIN;
+
+	return rc;
+}
+
+int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+	u32 drv_mb_param, rsp, param;
+	int rc = 0;
+
+	drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
+			DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+
+	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+			 drv_mb_param, &rsp, &param);
+
+	if (rc)
+		return rc;
+
+	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+	    (param != DRV_MB_PARAM_BIST_RC_PASSED))
+		rc = -EAGAIN;
+
+	return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 50917a2..6dd59eb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -40,7 +40,15 @@
 struct qed_mcp_link_state {
 	bool    link_up;
 
-	u32     speed; /* In Mb/s */
+	u32	min_pf_rate;
+
+	/* Actual link speed in Mb/s */
+	u32	line_speed;
+
+	/* PF max speed in Mb/s, deduced from line_speed
+	 * according to PF max bandwidth configuration.
+	 */
+	u32     speed;
 	bool    full_duplex;
 
 	bool    an;
@@ -141,13 +149,16 @@
 /**
  * @brief Get the management firmware version value
  *
- * @param cdev       - qed dev pointer
- * @param mfw_ver    - mfw version value
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_mfw_ver    - mfw version value
+ * @param p_running_bundle_id	- image id in nvram; Optional.
  *
- * @return int - 0 - operation was successul.
+ * @return int - 0 - operation was successful.
  */
-int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
-			u32 *mfw_ver);
+int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
+			struct qed_ptt *p_ptt,
+			u32 *p_mfw_ver, u32 *p_running_bundle_id);
 
 /**
  * @brief Get media type value of the port.
@@ -237,6 +248,28 @@
 		    struct qed_ptt *p_ptt,
 		    enum qed_led_mode mode);
 
+/**
+ * @brief Bist register test
+ *
+ *  @param p_hwfn    - hw function
+ *  @param p_ptt     - PTT required for register access
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn,
+			       struct qed_ptt *p_ptt);
+
+/**
+ * @brief Bist clock test
+ *
+ *  @param p_hwfn    - hw function
+ *  @param p_ptt     - PTT required for register access
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn,
+			    struct qed_ptt *p_ptt);
+
 /* Using hwfn number (and not pf_num) is required since in CMT mode,
  * same pf_num may be used by two different hwfn
  * TODO - this shouldn't really be in .h file, but until all fields
@@ -360,6 +393,18 @@
 		     struct qed_ptt *p_ptt);
 
 /**
+ * @brief Ack to mfw that driver finished FLR process for VFs
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks.
+ *
+ * @param return int - 0 upon success.
+ */
+int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
+		       struct qed_ptt *p_ptt, u32 *vfs_to_ack);
+
+/**
  * @brief - calls during init to read shmem of all function-related info.
  *
  * @param p_hwfn
@@ -389,4 +434,27 @@
  */
 bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
 
+/**
+ * @brief request MFW to configure MSI-X for a VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vf_id - absolute inside engine
+ * @param num_sbs - number of entries to request
+ *
+ * @return int
+ */
+int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
+			   struct qed_ptt *p_ptt, u8 vf_id, u8 num);
+
+int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw);
+int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw);
+int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
+				     struct qed_ptt *p_ptt,
+				     struct qed_mcp_link_state *p_link,
+				     u8 max_bw);
+int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
+				     struct qed_ptt *p_ptt,
+				     struct qed_mcp_link_state *p_link,
+				     u8 min_bw);
 #endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index c15b162..3a6c506 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -39,6 +39,10 @@
 	0x2aae04UL
 #define  PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER	\
 	0x2aa16cUL
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR \
+	0x2aa118UL
+#define PSWHST_REG_ZONE_PERMISSION_TABLE \
+	0x2a0800UL
 #define  BAR0_MAP_REG_MSDM_RAM \
 	0x1d00000UL
 #define  BAR0_MAP_REG_USDM_RAM \
@@ -77,6 +81,8 @@
 	0x2f2eb0UL
 #define  DORQ_REG_PF_DB_ENABLE \
 	0x100508UL
+#define DORQ_REG_VF_USAGE_CNT \
+	0x1009c4UL
 #define  QM_REG_PF_EN \
 	0x2f2ea4UL
 #define  TCFC_REG_STRONG_ENABLE_PF \
@@ -111,6 +117,8 @@
 	0x009778UL
 #define  MISCS_REG_CHIP_METAL \
 	0x009774UL
+#define MISCS_REG_FUNCTION_HIDE \
+	0x0096f0UL
 #define  BRB_REG_HEADER_SIZE \
 	0x340804UL
 #define  BTB_REG_HEADER_SIZE \
@@ -119,6 +127,8 @@
 	0x1c0708UL
 #define  CCFC_REG_ACTIVITY_COUNTER \
 	0x2e8800UL
+#define CCFC_REG_STRONG_ENABLE_VF \
+	0x2e070cUL
 #define  CDU_REG_CID_ADDR_PARAMS	\
 	0x580900UL
 #define  DBG_REG_CLIENT_ENABLE \
@@ -161,6 +171,10 @@
 	0x040200UL
 #define  PBF_REG_INIT \
 	0xd80000UL
+#define PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 \
+	0xd806c8UL
+#define PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 \
+	0xd806ccUL
 #define  PTU_REG_ATC_INIT_ARRAY \
 	0x560000UL
 #define  PCM_REG_INIT \
@@ -385,6 +399,8 @@
 	0x1d0000UL
 #define  IGU_REG_PF_CONFIGURATION \
 	0x180800UL
+#define IGU_REG_VF_CONFIGURATION \
+	0x180804UL
 #define  MISC_REG_AEU_ENABLE1_IGU_OUT_0 \
 	0x00849cUL
 #define MISC_REG_AEU_AFTER_INVERT_1_IGU	\
@@ -411,6 +427,10 @@
 		0x1 << 0)
 #define  IGU_REG_MAPPING_MEMORY \
 	0x184000UL
+#define IGU_REG_STATISTIC_NUM_VF_MSG_SENT \
+	0x180408UL
+#define IGU_REG_WRITE_DONE_PENDING \
+	0x180900UL
 #define  MISCS_REG_GENERIC_POR_0	\
 	0x0096d4UL
 #define  MCP_REG_NVM_CFG4 \
@@ -427,4 +447,37 @@
 	0x2aae60UL
 #define PGLUE_B_REG_PF_BAR1_SIZE \
 	0x2aae64UL
+#define PRS_REG_ENCAPSULATION_TYPE_EN	0x1f0730UL
+#define PRS_REG_GRE_PROTOCOL		0x1f0734UL
+#define PRS_REG_VXLAN_PORT		0x1f0738UL
+#define PRS_REG_OUTPUT_FORMAT_4_0	0x1f099cUL
+#define NIG_REG_ENC_TYPE_ENABLE		0x501058UL
+
+#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE		(0x1 << 0)
+#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT	0
+#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE		(0x1 << 1)
+#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT	1
+#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE			(0x1 << 2)
+#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT		2
+
+#define NIG_REG_VXLAN_PORT		0x50105cUL
+#define PBF_REG_VXLAN_PORT		0xd80518UL
+#define PBF_REG_NGE_PORT		0xd8051cUL
+#define PRS_REG_NGE_PORT		0x1f086cUL
+#define NIG_REG_NGE_PORT		0x508b38UL
+
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN	0x10090cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN	0x100910UL
+#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN	0x100914UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN	0x10092cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN	0x100930UL
+
+#define NIG_REG_NGE_IP_ENABLE			0x508b28UL
+#define NIG_REG_NGE_ETH_ENABLE			0x508b2cUL
+#define NIG_REG_NGE_COMP_VER			0x508b30UL
+#define PBF_REG_NGE_COMP_VER			0xd80524UL
+#define PRS_REG_NGE_COMP_VER			0x1f0878UL
+
+#define QM_REG_WFQPFWEIGHT	0x2f4e80UL
+#define QM_REG_WFQVPWEIGHT	0x2fa000UL
 #endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
new file mode 100644
index 0000000..a342bfe
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
@@ -0,0 +1,76 @@
+#include "qed.h"
+#include "qed_dev_api.h"
+#include "qed_mcp.h"
+#include "qed_sp.h"
+
+int qed_selftest_memory(struct qed_dev *cdev)
+{
+	int rc = 0, i;
+
+	for_each_hwfn(cdev, i) {
+		rc = qed_sp_heartbeat_ramrod(&cdev->hwfns[i]);
+		if (rc)
+			return rc;
+	}
+
+	return rc;
+}
+
+int qed_selftest_interrupt(struct qed_dev *cdev)
+{
+	int rc = 0, i;
+
+	for_each_hwfn(cdev, i) {
+		rc = qed_sp_heartbeat_ramrod(&cdev->hwfns[i]);
+		if (rc)
+			return rc;
+	}
+
+	return rc;
+}
+
+int qed_selftest_register(struct qed_dev *cdev)
+{
+	struct qed_hwfn *p_hwfn;
+	struct qed_ptt *p_ptt;
+	int rc = 0, i;
+
+	/* although performed by MCP, this test is per engine */
+	for_each_hwfn(cdev, i) {
+		p_hwfn = &cdev->hwfns[i];
+		p_ptt = qed_ptt_acquire(p_hwfn);
+		if (!p_ptt) {
+			DP_ERR(p_hwfn, "failed to acquire ptt\n");
+			return -EBUSY;
+		}
+		rc = qed_mcp_bist_register_test(p_hwfn, p_ptt);
+		qed_ptt_release(p_hwfn, p_ptt);
+		if (rc)
+			break;
+	}
+
+	return rc;
+}
+
+int qed_selftest_clock(struct qed_dev *cdev)
+{
+	struct qed_hwfn *p_hwfn;
+	struct qed_ptt *p_ptt;
+	int rc = 0, i;
+
+	/* although performed by MCP, this test is per engine */
+	for_each_hwfn(cdev, i) {
+		p_hwfn = &cdev->hwfns[i];
+		p_ptt = qed_ptt_acquire(p_hwfn);
+		if (!p_ptt) {
+			DP_ERR(p_hwfn, "failed to acquire ptt\n");
+			return -EBUSY;
+		}
+		rc = qed_mcp_bist_clock_test(p_hwfn, p_ptt);
+		qed_ptt_release(p_hwfn, p_ptt);
+		if (rc)
+			break;
+	}
+
+	return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h
new file mode 100644
index 0000000..50eb0b49
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.h
@@ -0,0 +1,40 @@
+#ifndef _QED_SELFTEST_API_H
+#define _QED_SELFTEST_API_H
+#include <linux/types.h>
+
+/**
+ * @brief qed_selftest_memory - Perform memory test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_memory(struct qed_dev *cdev);
+
+/**
+ * @brief qed_selftest_interrupt - Perform interrupt test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_interrupt(struct qed_dev *cdev);
+
+/**
+ * @brief qed_selftest_register - Perform register test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_register(struct qed_dev *cdev);
+
+/**
+ * @brief qed_selftest_clock - Perform clock test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_clock(struct qed_dev *cdev);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index d39f914..ea4e9ce 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -52,6 +52,7 @@
 
 union ramrod_data {
 	struct pf_start_ramrod_data pf_start;
+	struct pf_update_ramrod_data pf_update;
 	struct rx_queue_start_ramrod_data rx_queue_start;
 	struct rx_queue_update_ramrod_data rx_queue_update;
 	struct rx_queue_stop_ramrod_data rx_queue_stop;
@@ -61,6 +62,9 @@
 	struct vport_stop_ramrod_data vport_stop;
 	struct vport_update_ramrod_data vport_update;
 	struct vport_filter_update_ramrod_data vport_filter_update;
+
+	struct vf_start_ramrod_data vf_start;
+	struct vf_stop_ramrod_data vf_stop;
 };
 
 #define EQ_MAX_CREDIT   0xffffffff
@@ -338,13 +342,29 @@
  * to the internal RAM of the UStorm by the Function Start Ramrod.
  *
  * @param p_hwfn
+ * @param p_tunn
  * @param mode
+ * @param allow_npar_tx_switch
  *
  * @return int
  */
 
 int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
-		    enum qed_mf_mode mode);
+		    struct qed_tunn_start_params *p_tunn,
+		    enum qed_mf_mode mode, bool allow_npar_tx_switch);
+
+/**
+ * @brief qed_sp_pf_update - PF Function Update Ramrod
+ *
+ * This ramrod updates function-related parameters. Every parameter can be
+ * updated independently, according to configuration flags.
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+
+int qed_sp_pf_update(struct qed_hwfn *p_hwfn);
 
 /**
  * @brief qed_sp_pf_stop - PF Function Stop Ramrod
@@ -362,4 +382,18 @@
 
 int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
 
+int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
+			      struct qed_tunn_update_params *p_tunn,
+			      enum spq_mode comp_mode,
+			      struct qed_spq_comp_cb *p_comp_data);
+/**
+ * @brief qed_sp_heartbeat_ramrod - Send empty Ramrod
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+
+int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn);
+
 #endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 1c06c37..67f6ce3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -15,11 +15,13 @@
 #include "qed.h"
 #include <linux/qed/qed_chain.h>
 #include "qed_cxt.h"
+#include "qed_dcbx.h"
 #include "qed_hsi.h"
 #include "qed_hw.h"
 #include "qed_int.h"
 #include "qed_reg_addr.h"
 #include "qed_sp.h"
+#include "qed_sriov.h"
 
 int qed_sp_init_request(struct qed_hwfn *p_hwfn,
 			struct qed_spq_entry **pp_ent,
@@ -87,8 +89,218 @@
 	return 0;
 }
 
+static enum tunnel_clss qed_tunn_get_clss_type(u8 type)
+{
+	switch (type) {
+	case QED_TUNN_CLSS_MAC_VLAN:
+		return TUNNEL_CLSS_MAC_VLAN;
+	case QED_TUNN_CLSS_MAC_VNI:
+		return TUNNEL_CLSS_MAC_VNI;
+	case QED_TUNN_CLSS_INNER_MAC_VLAN:
+		return TUNNEL_CLSS_INNER_MAC_VLAN;
+	case QED_TUNN_CLSS_INNER_MAC_VNI:
+		return TUNNEL_CLSS_INNER_MAC_VNI;
+	default:
+		return TUNNEL_CLSS_MAC_VLAN;
+	}
+}
+
+static void
+qed_tunn_set_pf_fix_tunn_mode(struct qed_hwfn *p_hwfn,
+			      struct qed_tunn_update_params *p_src,
+			      struct pf_update_tunnel_config *p_tunn_cfg)
+{
+	unsigned long cached_tunn_mode = p_hwfn->cdev->tunn_mode;
+	unsigned long update_mask = p_src->tunn_mode_update_mask;
+	unsigned long tunn_mode = p_src->tunn_mode;
+	unsigned long new_tunn_mode = 0;
+
+	if (test_bit(QED_MODE_L2GRE_TUNN, &update_mask)) {
+		if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+			__set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
+	} else {
+		if (test_bit(QED_MODE_L2GRE_TUNN, &cached_tunn_mode))
+			__set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
+	}
+
+	if (test_bit(QED_MODE_IPGRE_TUNN, &update_mask)) {
+		if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+			__set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
+	} else {
+		if (test_bit(QED_MODE_IPGRE_TUNN, &cached_tunn_mode))
+			__set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
+	}
+
+	if (test_bit(QED_MODE_VXLAN_TUNN, &update_mask)) {
+		if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+			__set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
+	} else {
+		if (test_bit(QED_MODE_VXLAN_TUNN, &cached_tunn_mode))
+			__set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
+	}
+
+	if (p_src->update_geneve_udp_port) {
+		p_tunn_cfg->set_geneve_udp_port_flg = 1;
+		p_tunn_cfg->geneve_udp_port =
+				cpu_to_le16(p_src->geneve_udp_port);
+	}
+
+	if (test_bit(QED_MODE_L2GENEVE_TUNN, &update_mask)) {
+		if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+			__set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
+	} else {
+		if (test_bit(QED_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
+			__set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
+	}
+
+	if (test_bit(QED_MODE_IPGENEVE_TUNN, &update_mask)) {
+		if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+			__set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
+	} else {
+		if (test_bit(QED_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
+			__set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
+	}
+
+	p_src->tunn_mode = new_tunn_mode;
+}
+
+static void
+qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
+			      struct qed_tunn_update_params *p_src,
+			      struct pf_update_tunnel_config *p_tunn_cfg)
+{
+	unsigned long tunn_mode = p_src->tunn_mode;
+	enum tunnel_clss type;
+
+	qed_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
+	p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
+	p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
+
+	type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
+	p_tunn_cfg->tunnel_clss_vxlan  = type;
+
+	type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
+	p_tunn_cfg->tunnel_clss_l2gre = type;
+
+	type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
+	p_tunn_cfg->tunnel_clss_ipgre = type;
+
+	if (p_src->update_vxlan_udp_port) {
+		p_tunn_cfg->set_vxlan_udp_port_flg = 1;
+		p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
+	}
+
+	if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+		p_tunn_cfg->tx_enable_l2gre = 1;
+
+	if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+		p_tunn_cfg->tx_enable_ipgre = 1;
+
+	if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+		p_tunn_cfg->tx_enable_vxlan = 1;
+
+	if (p_src->update_geneve_udp_port) {
+		p_tunn_cfg->set_geneve_udp_port_flg = 1;
+		p_tunn_cfg->geneve_udp_port =
+				cpu_to_le16(p_src->geneve_udp_port);
+	}
+
+	if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+		p_tunn_cfg->tx_enable_l2geneve = 1;
+
+	if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+		p_tunn_cfg->tx_enable_ipgeneve = 1;
+
+	type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
+	p_tunn_cfg->tunnel_clss_l2geneve = type;
+
+	type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
+	p_tunn_cfg->tunnel_clss_ipgeneve = type;
+}
+
+static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
+				 struct qed_ptt *p_ptt,
+				 unsigned long tunn_mode)
+{
+	u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
+	u8 l2geneve_enable = 0, ipgeneve_enable = 0;
+
+	if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+		l2gre_enable = 1;
+
+	if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+		ipgre_enable = 1;
+
+	if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+		vxlan_enable = 1;
+
+	qed_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
+	qed_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
+
+	if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+		l2geneve_enable = 1;
+
+	if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+		ipgeneve_enable = 1;
+
+	qed_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
+			      ipgeneve_enable);
+}
+
+static void
+qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
+			     struct qed_tunn_start_params *p_src,
+			     struct pf_start_tunnel_config *p_tunn_cfg)
+{
+	unsigned long tunn_mode;
+	enum tunnel_clss type;
+
+	if (!p_src)
+		return;
+
+	tunn_mode = p_src->tunn_mode;
+	type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
+	p_tunn_cfg->tunnel_clss_vxlan = type;
+	type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
+	p_tunn_cfg->tunnel_clss_l2gre = type;
+	type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
+	p_tunn_cfg->tunnel_clss_ipgre = type;
+
+	if (p_src->update_vxlan_udp_port) {
+		p_tunn_cfg->set_vxlan_udp_port_flg = 1;
+		p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
+	}
+
+	if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+		p_tunn_cfg->tx_enable_l2gre = 1;
+
+	if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+		p_tunn_cfg->tx_enable_ipgre = 1;
+
+	if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+		p_tunn_cfg->tx_enable_vxlan = 1;
+
+	if (p_src->update_geneve_udp_port) {
+		p_tunn_cfg->set_geneve_udp_port_flg = 1;
+		p_tunn_cfg->geneve_udp_port =
+				cpu_to_le16(p_src->geneve_udp_port);
+	}
+
+	if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+		p_tunn_cfg->tx_enable_l2geneve = 1;
+
+	if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+		p_tunn_cfg->tx_enable_ipgeneve = 1;
+
+	type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
+	p_tunn_cfg->tunnel_clss_l2geneve = type;
+	type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
+	p_tunn_cfg->tunnel_clss_ipgeneve = type;
+}
+
 int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
-		    enum qed_mf_mode mode)
+		    struct qed_tunn_start_params *p_tunn,
+		    enum qed_mf_mode mode, bool allow_npar_tx_switch)
 {
 	struct pf_start_ramrod_data *p_ramrod = NULL;
 	u16 sb = qed_int_get_sp_sb_id(p_hwfn);
@@ -143,16 +355,103 @@
 	DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
 		       p_hwfn->p_consq->chain.pbl.p_phys_table);
 
+	qed_tunn_set_pf_start_params(p_hwfn, p_tunn,
+				     &p_ramrod->tunnel_config);
 	p_hwfn->hw_info.personality = PERSONALITY_ETH;
 
+	if (IS_MF_SI(p_hwfn))
+		p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
+
+	if (p_hwfn->cdev->p_iov_info) {
+		struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+
+		p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf;
+		p_ramrod->num_vfs = (u8) p_iov->total_vfs;
+	}
+
 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 		   "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
 		   sb, sb_index,
 		   p_ramrod->outer_tag);
 
+	rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+	if (p_tunn) {
+		qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
+				     p_tunn->tunn_mode);
+		p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
+	}
+
+	return rc;
+}
+
+int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
+{
+	struct qed_spq_entry *p_ent = NULL;
+	struct qed_sp_init_data init_data;
+	int rc = -EINVAL;
+
+	/* Get SPQ entry */
+	memset(&init_data, 0, sizeof(init_data));
+	init_data.cid = qed_spq_get_cid(p_hwfn);
+	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+	init_data.comp_mode = QED_SPQ_MODE_CB;
+
+	rc = qed_sp_init_request(p_hwfn, &p_ent,
+				 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+				 &init_data);
+	if (rc)
+		return rc;
+
+	qed_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
+				      &p_ent->ramrod.pf_update);
+
 	return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 
+/* Set pf update ramrod command params */
+int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
+			      struct qed_tunn_update_params *p_tunn,
+			      enum spq_mode comp_mode,
+			      struct qed_spq_comp_cb *p_comp_data)
+{
+	struct qed_spq_entry *p_ent = NULL;
+	struct qed_sp_init_data init_data;
+	int rc = -EINVAL;
+
+	/* Get SPQ entry */
+	memset(&init_data, 0, sizeof(init_data));
+	init_data.cid = qed_spq_get_cid(p_hwfn);
+	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+	init_data.comp_mode = comp_mode;
+	init_data.p_comp_data = p_comp_data;
+
+	rc = qed_sp_init_request(p_hwfn, &p_ent,
+				 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+				 &init_data);
+	if (rc)
+		return rc;
+
+	qed_tunn_set_pf_update_params(p_hwfn, p_tunn,
+				      &p_ent->ramrod.pf_update.tunnel_config);
+
+	rc = qed_spq_post(p_hwfn, p_ent, NULL);
+	if (rc)
+		return rc;
+
+	if (p_tunn->update_vxlan_udp_port)
+		qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+					p_tunn->vxlan_udp_port);
+	if (p_tunn->update_geneve_udp_port)
+		qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+					 p_tunn->geneve_udp_port);
+
+	qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode);
+	p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
+
+	return rc;
+}
+
 int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
 {
 	struct qed_spq_entry *p_ent = NULL;
@@ -173,3 +472,24 @@
 
 	return qed_spq_post(p_hwfn, p_ent, NULL);
 }
+
+int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn)
+{
+	struct qed_spq_entry *p_ent = NULL;
+	struct qed_sp_init_data init_data;
+	int rc;
+
+	/* Get SPQ entry */
+	memset(&init_data, 0, sizeof(init_data));
+	init_data.cid = qed_spq_get_cid(p_hwfn);
+	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+	rc = qed_sp_init_request(p_hwfn, &p_ent,
+				 COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
+				 &init_data);
+	if (rc)
+		return rc;
+
+	return qed_spq_post(p_hwfn, p_ent, NULL);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 89469d5..acac662 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -27,6 +27,7 @@
 #include "qed_mcp.h"
 #include "qed_reg_addr.h"
 #include "qed_sp.h"
+#include "qed_sriov.h"
 
 /***************************************************************************
 * Structures & Definitions
@@ -242,10 +243,17 @@
 qed_async_event_completion(struct qed_hwfn *p_hwfn,
 			   struct event_ring_entry *p_eqe)
 {
-	DP_NOTICE(p_hwfn,
-		  "Unknown Async completion for protocol: %d\n",
-		   p_eqe->protocol_id);
-	return -EINVAL;
+	switch (p_eqe->protocol_id) {
+	case PROTOCOLID_COMMON:
+		return qed_sriov_eqe_event(p_hwfn,
+					   p_eqe->opcode,
+					   p_eqe->echo, &p_eqe->data);
+	default:
+		DP_NOTICE(p_hwfn,
+			  "Unknown Async completion for protocol: %d\n",
+			  p_eqe->protocol_id);
+		return -EINVAL;
+	}
 }
 
 /***************************************************************************
@@ -379,6 +387,9 @@
 	struct eth_slow_path_rx_cqe *cqe,
 	enum protocol_type protocol)
 {
+	if (IS_VF(p_hwfn->cdev))
+		return 0;
+
 	/* @@@tmp - it's possible we'll eventually want to handle some
 	 * actual commands that can arrive here, but for now this is only
 	 * used to complete the ramrod using the echo value on the cqe
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
new file mode 100644
index 0000000..c325ee8
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -0,0 +1,3613 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <linux/qed/qed_iov_if.h>
+#include "qed_cxt.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
+
+/* IOV ramrods */
+static int qed_sp_vf_start(struct qed_hwfn *p_hwfn,
+			   u32 concrete_vfid, u16 opaque_vfid)
+{
+	struct vf_start_ramrod_data *p_ramrod = NULL;
+	struct qed_spq_entry *p_ent = NULL;
+	struct qed_sp_init_data init_data;
+	int rc = -EINVAL;
+
+	/* Get SPQ entry */
+	memset(&init_data, 0, sizeof(init_data));
+	init_data.cid = qed_spq_get_cid(p_hwfn);
+	init_data.opaque_fid = opaque_vfid;
+	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+	rc = qed_sp_init_request(p_hwfn, &p_ent,
+				 COMMON_RAMROD_VF_START,
+				 PROTOCOLID_COMMON, &init_data);
+	if (rc)
+		return rc;
+
+	p_ramrod = &p_ent->ramrod.vf_start;
+
+	p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
+	p_ramrod->opaque_fid = cpu_to_le16(opaque_vfid);
+
+	p_ramrod->personality = PERSONALITY_ETH;
+
+	return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
+			  u32 concrete_vfid, u16 opaque_vfid)
+{
+	struct vf_stop_ramrod_data *p_ramrod = NULL;
+	struct qed_spq_entry *p_ent = NULL;
+	struct qed_sp_init_data init_data;
+	int rc = -EINVAL;
+
+	/* Get SPQ entry */
+	memset(&init_data, 0, sizeof(init_data));
+	init_data.cid = qed_spq_get_cid(p_hwfn);
+	init_data.opaque_fid = opaque_vfid;
+	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+	rc = qed_sp_init_request(p_hwfn, &p_ent,
+				 COMMON_RAMROD_VF_STOP,
+				 PROTOCOLID_COMMON, &init_data);
+	if (rc)
+		return rc;
+
+	p_ramrod = &p_ent->ramrod.vf_stop;
+
+	p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
+
+	return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+			   int rel_vf_id, bool b_enabled_only)
+{
+	if (!p_hwfn->pf_iov_info) {
+		DP_NOTICE(p_hwfn->cdev, "No iov info\n");
+		return false;
+	}
+
+	if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
+	    (rel_vf_id < 0))
+		return false;
+
+	if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
+	    b_enabled_only)
+		return false;
+
+	return true;
+}
+
+static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
+					       u16 relative_vf_id,
+					       bool b_enabled_only)
+{
+	struct qed_vf_info *vf = NULL;
+
+	if (!p_hwfn->pf_iov_info) {
+		DP_NOTICE(p_hwfn->cdev, "No iov info\n");
+		return NULL;
+	}
+
+	if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
+		vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
+	else
+		DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
+		       relative_vf_id);
+
+	return vf;
+}
+
+int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
+			     int vfid, struct qed_ptt *p_ptt)
+{
+	struct qed_bulletin_content *p_bulletin;
+	int crc_size = sizeof(p_bulletin->crc);
+	struct qed_dmae_params params;
+	struct qed_vf_info *p_vf;
+
+	p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+	if (!p_vf)
+		return -EINVAL;
+
+	if (!p_vf->vf_bulletin)
+		return -EINVAL;
+
+	p_bulletin = p_vf->bulletin.p_virt;
+
+	/* Increment bulletin board version and compute crc */
+	p_bulletin->version++;
+	p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size,
+				p_vf->bulletin.size - crc_size);
+
+	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+		   "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
+		   p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
+
+	/* propagate bulletin board via dmae to vm memory */
+	memset(&params, 0, sizeof(params));
+	params.flags = QED_DMAE_FLAG_VF_DST;
+	params.dst_vfid = p_vf->abs_vf_id;
+	return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
+				  p_vf->vf_bulletin, p_vf->bulletin.size / 4,
+				  &params);
+}
+
+static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
+{
+	struct qed_hw_sriov_info *iov = cdev->p_iov_info;
+	int pos = iov->pos;
+
+	DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
+	pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
+
+	pci_read_config_word(cdev->pdev,
+			     pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
+	pci_read_config_word(cdev->pdev,
+			     pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
+
+	pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
+	if (iov->num_vfs) {
+		DP_VERBOSE(cdev,
+			   QED_MSG_IOV,
+			   "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
+		iov->num_vfs = 0;
+	}
+
+	pci_read_config_word(cdev->pdev,
+			     pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
+
+	pci_read_config_word(cdev->pdev,
+			     pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
+
+	pci_read_config_word(cdev->pdev,
+			     pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
+
+	pci_read_config_dword(cdev->pdev,
+			      pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
+
+	pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
+
+	pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
+
+	DP_VERBOSE(cdev,
+		   QED_MSG_IOV,
+		   "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
+		   iov->nres,
+		   iov->cap,
+		   iov->ctrl,
+		   iov->total_vfs,
+		   iov->initial_vfs,
+		   iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
+
+	/* Some sanity checks */
+	if (iov->num_vfs > NUM_OF_VFS(cdev) ||
+	    iov->total_vfs > NUM_OF_VFS(cdev)) {
+		/* This can happen only due to a bug. In this case we set
+		 * num_vfs to zero to avoid memory corruption in the code that
+		 * assumes max number of vfs
+		 */
+		DP_NOTICE(cdev,
+			  "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
+			  iov->num_vfs);
+
+		iov->num_vfs = 0;
+		iov->total_vfs = 0;
+	}
+
+	return 0;
+}
+
+static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
+					struct qed_ptt *p_ptt)
+{
+	struct qed_igu_block *p_sb;
+	u16 sb_id;
+	u32 val;
+
+	if (!p_hwfn->hw_info.p_igu_info) {
+		DP_ERR(p_hwfn,
+		       "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
+		return;
+	}
+
+	for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
+	     sb_id++) {
+		p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
+		if ((p_sb->status & QED_IGU_STATUS_FREE) &&
+		    !(p_sb->status & QED_IGU_STATUS_PF)) {
+			val = qed_rd(p_hwfn, p_ptt,
+				     IGU_REG_MAPPING_MEMORY + sb_id * 4);
+			SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
+			qed_wr(p_hwfn, p_ptt,
+			       IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
+		}
+	}
+}
+
+static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
+{
+	struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+	struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+	struct qed_bulletin_content *p_bulletin_virt;
+	dma_addr_t req_p, rply_p, bulletin_p;
+	union pfvf_tlvs *p_reply_virt_addr;
+	union vfpf_tlvs *p_req_virt_addr;
+	u8 idx = 0;
+
+	memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
+
+	p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
+	req_p = p_iov_info->mbx_msg_phys_addr;
+	p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
+	rply_p = p_iov_info->mbx_reply_phys_addr;
+	p_bulletin_virt = p_iov_info->p_bulletins;
+	bulletin_p = p_iov_info->bulletins_phys;
+	if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
+		DP_ERR(p_hwfn,
+		       "qed_iov_setup_vfdb called without allocating mem first\n");
+		return;
+	}
+
+	for (idx = 0; idx < p_iov->total_vfs; idx++) {
+		struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
+		u32 concrete;
+
+		vf->vf_mbx.req_virt = p_req_virt_addr + idx;
+		vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
+		vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
+		vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
+
+		vf->state = VF_STOPPED;
+		vf->b_init = false;
+
+		vf->bulletin.phys = idx *
+				    sizeof(struct qed_bulletin_content) +
+				    bulletin_p;
+		vf->bulletin.p_virt = p_bulletin_virt + idx;
+		vf->bulletin.size = sizeof(struct qed_bulletin_content);
+
+		vf->relative_vf_id = idx;
+		vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
+		concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
+		vf->concrete_fid = concrete;
+		vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
+				 (vf->abs_vf_id << 8);
+		vf->vport_id = idx + 1;
+	}
+}
+
+static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
+{
+	struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+	void **p_v_addr;
+	u16 num_vfs = 0;
+
+	num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
+
+	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+		   "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
+
+	/* Allocate PF Mailbox buffer (per-VF) */
+	p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
+	p_v_addr = &p_iov_info->mbx_msg_virt_addr;
+	*p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+				       p_iov_info->mbx_msg_size,
+				       &p_iov_info->mbx_msg_phys_addr,
+				       GFP_KERNEL);
+	if (!*p_v_addr)
+		return -ENOMEM;
+
+	/* Allocate PF Mailbox Reply buffer (per-VF) */
+	p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
+	p_v_addr = &p_iov_info->mbx_reply_virt_addr;
+	*p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+				       p_iov_info->mbx_reply_size,
+				       &p_iov_info->mbx_reply_phys_addr,
+				       GFP_KERNEL);
+	if (!*p_v_addr)
+		return -ENOMEM;
+
+	p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
+				     num_vfs;
+	p_v_addr = &p_iov_info->p_bulletins;
+	*p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+				       p_iov_info->bulletins_size,
+				       &p_iov_info->bulletins_phys,
+				       GFP_KERNEL);
+	if (!*p_v_addr)
+		return -ENOMEM;
+
+	DP_VERBOSE(p_hwfn,
+		   QED_MSG_IOV,
+		   "PF's Requests mailbox [%p virt 0x%llx phys],  Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
+		   p_iov_info->mbx_msg_virt_addr,
+		   (u64) p_iov_info->mbx_msg_phys_addr,
+		   p_iov_info->mbx_reply_virt_addr,
+		   (u64) p_iov_info->mbx_reply_phys_addr,
+		   p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
+
+	return 0;
+}
+
+static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
+{
+	struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+
+	if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
+		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+				  p_iov_info->mbx_msg_size,
+				  p_iov_info->mbx_msg_virt_addr,
+				  p_iov_info->mbx_msg_phys_addr);
+
+	if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
+		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+				  p_iov_info->mbx_reply_size,
+				  p_iov_info->mbx_reply_virt_addr,
+				  p_iov_info->mbx_reply_phys_addr);
+
+	if (p_iov_info->p_bulletins)
+		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+				  p_iov_info->bulletins_size,
+				  p_iov_info->p_bulletins,
+				  p_iov_info->bulletins_phys);
+}
+
+int qed_iov_alloc(struct qed_hwfn *p_hwfn)
+{
+	struct qed_pf_iov *p_sriov;
+
+	if (!IS_PF_SRIOV(p_hwfn)) {
+		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+			   "No SR-IOV - no need for IOV db\n");
+		return 0;
+	}
+
+	p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
+	if (!p_sriov) {
+		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
+		return -ENOMEM;
+	}
+
+	p_hwfn->pf_iov_info = p_sriov;
+
+	return qed_iov_allocate_vfdb(p_hwfn);
+}
+
+void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+	if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
+		return;
+
+	qed_iov_setup_vfdb(p_hwfn);
+	qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
+}
+
+void qed_iov_free(struct qed_hwfn *p_hwfn)
+{
+	if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
+		qed_iov_free_vfdb(p_hwfn);
+		kfree(p_hwfn->pf_iov_info);
+	}
+}
+
+void qed_iov_free_hw_info(struct qed_dev *cdev)
+{
+	kfree(cdev->p_iov_info);
+	cdev->p_iov_info = NULL;
+}
+
+int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
+{
+	struct qed_dev *cdev = p_hwfn->cdev;
+	int pos;
+	int rc;
+
+	if (IS_VF(p_hwfn->cdev))
+		return 0;
+
+	/* Learn the PCI configuration */
+	pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
+				      PCI_EXT_CAP_ID_SRIOV);
+	if (!pos) {
+		DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
+		return 0;
+	}
+
+	/* Allocate a new struct for IOV information */
+	cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
+	if (!cdev->p_iov_info) {
+		DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n");
+		return -ENOMEM;
+	}
+	cdev->p_iov_info->pos = pos;
+
+	rc = qed_iov_pci_cfg_info(cdev);
+	if (rc)
+		return rc;
+
+	/* We want PF IOV to be synonemous with the existance of p_iov_info;
+	 * In case the capability is published but there are no VFs, simply
+	 * de-allocate the struct.
+	 */
+	if (!cdev->p_iov_info->total_vfs) {
+		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+			   "IOV capabilities, but no VFs are published\n");
+		kfree(cdev->p_iov_info);
+		cdev->p_iov_info = NULL;
+		return 0;
+	}
+
+	/* Calculate the first VF index - this is a bit tricky; Basically,
+	 * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
+	 * after the first engine's VFs.
+	 */
+	cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
+					   p_hwfn->abs_pf_id - 16;
+	if (QED_PATH_ID(p_hwfn))
+		cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+
+	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+		   "First VF in hwfn 0x%08x\n",
+		   cdev->p_iov_info->first_vf_in_pf);
+
+	return 0;
+}
+
+static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
+{
+	/* Check PF supports sriov */
+	if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
+	    !IS_PF_SRIOV_ALLOC(p_hwfn))
+		return false;
+
+	/* Check VF validity */
+	if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true))
+		return false;
+
+	return true;
+}
+
+static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
+				      u16 rel_vf_id, u8 to_disable)
+{
+	struct qed_vf_info *vf;
+	int i;
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+		vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+		if (!vf)
+			continue;
+
+		vf->to_disable = to_disable;
+	}
+}
+
+void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
+{
+	u16 i;
+
+	if (!IS_QED_SRIOV(cdev))
+		return;
+
+	for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
+		qed_iov_set_vf_to_disable(cdev, i, to_disable);
+}
+
+static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
+				       struct qed_ptt *p_ptt, u8 abs_vfid)
+{
+	qed_wr(p_hwfn, p_ptt,
+	       PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
+	       1 << (abs_vfid & 0x1f));
+}
+
+static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
+				 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
+{
+	int i;
+
+	/* Set VF masks and configuration - pretend */
+	qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+
+	qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
+
+	/* unpretend */
+	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+
+	/* iterate over all queues, clear sb consumer */
+	for (i = 0; i < vf->num_sbs; i++)
+		qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+						vf->igu_sbs[i],
+						vf->opaque_fid, true);
+}
+
+static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
+				   struct qed_ptt *p_ptt,
+				   struct qed_vf_info *vf, bool enable)
+{
+	u32 igu_vf_conf;
+
+	qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+
+	igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
+
+	if (enable)
+		igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
+	else
+		igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
+
+	qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
+
+	/* unpretend */
+	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+}
+
+static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
+				    struct qed_ptt *p_ptt,
+				    struct qed_vf_info *vf)
+{
+	u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
+	int rc;
+
+	if (vf->to_disable)
+		return 0;
+
+	DP_VERBOSE(p_hwfn,
+		   QED_MSG_IOV,
+		   "Enable internal access for vf %x [abs %x]\n",
+		   vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
+
+	qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
+
+	qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
+
+	rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
+	if (rc)
+		return rc;
+
+	qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+
+	SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
+	STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
+
+	qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
+		     p_hwfn->hw_info.hw_mode);
+
+	/* unpretend */
+	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+
+	if (vf->state != VF_STOPPED) {
+		DP_NOTICE(p_hwfn, "VF[%02x] is already started\n",
+			  vf->abs_vf_id);
+		return -EINVAL;
+	}
+
+	/* Start VF */
+	rc = qed_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid);
+	if (rc)
+		DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
+
+	vf->state = VF_FREE;
+
+	return rc;
+}
+
+/**
+ * @brief qed_iov_config_perm_table - configure the permission
+ *      zone table.
+ *      In E4, queue zone permission table size is 320x9. There
+ *      are 320 VF queues for single engine device (256 for dual
+ *      engine device), and each entry has the following format:
+ *      {Valid, VF[7:0]}
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vf
+ * @param enable
+ */
+static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
+				      struct qed_ptt *p_ptt,
+				      struct qed_vf_info *vf, u8 enable)
+{
+	u32 reg_addr, val;
+	u16 qzone_id = 0;
+	int qid;
+
+	for (qid = 0; qid < vf->num_rxqs; qid++) {
+		qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
+				&qzone_id);
+
+		reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
+		val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
+		qed_wr(p_hwfn, p_ptt, reg_addr, val);
+	}
+}
+
+static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn,
+				      struct qed_ptt *p_ptt,
+				      struct qed_vf_info *vf)
+{
+	/* Reset vf in IGU - interrupts are still disabled */
+	qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
+
+	qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
+
+	/* Permission Table */
+	qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
+}
+
+static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
+				   struct qed_ptt *p_ptt,
+				   struct qed_vf_info *vf, u16 num_rx_queues)
+{
+	struct qed_igu_block *igu_blocks;
+	int qid = 0, igu_id = 0;
+	u32 val = 0;
+
+	igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
+
+	if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
+		num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
+	p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
+
+	SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
+	SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
+	SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
+
+	while ((qid < num_rx_queues) &&
+	       (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) {
+		if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) {
+			struct cau_sb_entry sb_entry;
+
+			vf->igu_sbs[qid] = (u16)igu_id;
+			igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE;
+
+			SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
+
+			qed_wr(p_hwfn, p_ptt,
+			       IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
+			       val);
+
+			/* Configure igu sb in CAU which were marked valid */
+			qed_init_cau_sb_entry(p_hwfn, &sb_entry,
+					      p_hwfn->rel_pf_id,
+					      vf->abs_vf_id, 1);
+			qed_dmae_host2grc(p_hwfn, p_ptt,
+					  (u64)(uintptr_t)&sb_entry,
+					  CAU_REG_SB_VAR_MEMORY +
+					  igu_id * sizeof(u64), 2, 0);
+			qid++;
+		}
+		igu_id++;
+	}
+
+	vf->num_sbs = (u8) num_rx_queues;
+
+	return vf->num_sbs;
+}
+
+static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
+				    struct qed_ptt *p_ptt,
+				    struct qed_vf_info *vf)
+{
+	struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+	int idx, igu_id;
+	u32 addr, val;
+
+	/* Invalidate igu CAM lines and mark them as free */
+	for (idx = 0; idx < vf->num_sbs; idx++) {
+		igu_id = vf->igu_sbs[idx];
+		addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
+
+		val = qed_rd(p_hwfn, p_ptt, addr);
+		SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
+		qed_wr(p_hwfn, p_ptt, addr, val);
+
+		p_info->igu_map.igu_blocks[igu_id].status |=
+		    QED_IGU_STATUS_FREE;
+
+		p_hwfn->hw_info.p_igu_info->free_blks++;
+	}
+
+	vf->num_sbs = 0;
+}
+
+static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
+				  struct qed_ptt *p_ptt,
+				  u16 rel_vf_id, u16 num_rx_queues)
+{
+	u8 num_of_vf_avaiable_chains = 0;
+	struct qed_vf_info *vf = NULL;
+	int rc = 0;
+	u32 cids;
+	u8 i;
+
+	vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+	if (!vf) {
+		DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
+		return -EINVAL;
+	}
+
+	if (vf->b_init) {
+		DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", rel_vf_id);
+		return -EINVAL;
+	}
+
+	/* Limit number of queues according to number of CIDs */
+	qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
+	DP_VERBOSE(p_hwfn,
+		   QED_MSG_IOV,
+		   "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
+		   vf->relative_vf_id, num_rx_queues, (u16) cids);
+	num_rx_queues = min_t(u16, num_rx_queues, ((u16) cids));
+
+	num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
+							     p_ptt,
+							     vf,
+							     num_rx_queues);
+	if (!num_of_vf_avaiable_chains) {
+		DP_ERR(p_hwfn, "no available igu sbs\n");
+		return -ENOMEM;
+	}
+
+	/* Choose queue number and index ranges */
+	vf->num_rxqs = num_of_vf_avaiable_chains;
+	vf->num_txqs = num_of_vf_avaiable_chains;
+
+	for (i = 0; i < vf->num_rxqs; i++) {
+		u16 queue_id = qed_int_queue_id_from_sb_id(p_hwfn,
+							   vf->igu_sbs[i]);
+
+		if (queue_id > RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
+			DP_NOTICE(p_hwfn,
+				  "VF[%d] will require utilizing of out-of-bounds queues - %04x\n",
+				  vf->relative_vf_id, queue_id);
+			return -EINVAL;
+		}
+
+		/* CIDs are per-VF, so no problem having them 0-based. */
+		vf->vf_queues[i].fw_rx_qid = queue_id;
+		vf->vf_queues[i].fw_tx_qid = queue_id;
+		vf->vf_queues[i].fw_cid = i;
+
+		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+			   "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
+			   vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
+	}
+	rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
+	if (!rc) {
+		vf->b_init = true;
+
+		if (IS_LEAD_HWFN(p_hwfn))
+			p_hwfn->cdev->p_iov_info->num_vfs++;
+	}
+
+	return rc;
+}
+
+static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
+			     u16 vfid,
+			     struct qed_mcp_link_params *params,
+			     struct qed_mcp_link_state *link,
+			     struct qed_mcp_link_capabilities *p_caps)
+{
+	struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
+						       vfid,
+						       false);
+	struct qed_bulletin_content *p_bulletin;
+
+	if (!p_vf)
+		return;
+
+	p_bulletin = p_vf->bulletin.p_virt;
+	p_bulletin->req_autoneg = params->speed.autoneg;
+	p_bulletin->req_adv_speed = params->speed.advertised_speeds;
+	p_bulletin->req_forced_speed = params->speed.forced_speed;
+	p_bulletin->req_autoneg_pause = params->pause.autoneg;
+	p_bulletin->req_forced_rx = params->pause.forced_rx;
+	p_bulletin->req_forced_tx = params->pause.forced_tx;
+	p_bulletin->req_loopback = params->loopback_mode;
+
+	p_bulletin->link_up = link->link_up;
+	p_bulletin->speed = link->speed;
+	p_bulletin->full_duplex = link->full_duplex;
+	p_bulletin->autoneg = link->an;
+	p_bulletin->autoneg_complete = link->an_complete;
+	p_bulletin->parallel_detection = link->parallel_detection;
+	p_bulletin->pfc_enabled = link->pfc_enabled;
+	p_bulletin->partner_adv_speed = link->partner_adv_speed;
+	p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
+	p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
+	p_bulletin->partner_adv_pause = link->partner_adv_pause;
+	p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
+
+	p_bulletin->capability_speed = p_caps->speed_capabilities;
+}
+
+static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
+				     struct qed_ptt *p_ptt, u16 rel_vf_id)
+{
+	struct qed_mcp_link_capabilities caps;
+	struct qed_mcp_link_params params;
+	struct qed_mcp_link_state link;
+	struct qed_vf_info *vf = NULL;
+	int rc = 0;
+
+	vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+	if (!vf) {
+		DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
+		return -EINVAL;
+	}
+
+	if (vf->bulletin.p_virt)
+		memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt));
+
+	memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
+
+	/* Get the link configuration back in bulletin so
+	 * that when VFs are re-enabled they get the actual
+	 * link configuration.
+	 */
+	memcpy(&params, qed_mcp_get_link_params(p_hwfn), sizeof(params));
+	memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link));
+	memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
+	qed_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
+
+	if (vf->state != VF_STOPPED) {
+		/* Stopping the VF */
+		rc = qed_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid);
+
+		if (rc != 0) {
+			DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
+			       rc);
+			return rc;
+		}
+
+		vf->state = VF_STOPPED;
+	}
+
+	/* disablng interrupts and resetting permission table was done during
+	 * vf-close, however, we could get here without going through vf_close
+	 */
+	/* Disable Interrupts for VF */
+	qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
+
+	/* Reset Permission table */
+	qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
+
+	vf->num_rxqs = 0;
+	vf->num_txqs = 0;
+	qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
+
+	if (vf->b_init) {
+		vf->b_init = false;
+
+		if (IS_LEAD_HWFN(p_hwfn))
+			p_hwfn->cdev->p_iov_info->num_vfs--;
+	}
+
+	return 0;
+}
+
+static bool qed_iov_tlv_supported(u16 tlvtype)
+{
+	return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
+}
+
+/* place a given tlv on the tlv buffer, continuing current tlv list */
+void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
+{
+	struct channel_tlv *tl = (struct channel_tlv *)*offset;
+
+	tl->type = type;
+	tl->length = length;
+
+	/* Offset should keep pointing to next TLV (the end of the last) */
+	*offset += length;
+
+	/* Return a pointer to the start of the added tlv */
+	return *offset - length;
+}
+
+/* list the types and lengths of the tlvs on the buffer */
+void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
+{
+	u16 i = 1, total_length = 0;
+	struct channel_tlv *tlv;
+
+	do {
+		tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
+
+		/* output tlv */
+		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+			   "TLV number %d: type %d, length %d\n",
+			   i, tlv->type, tlv->length);
+
+		if (tlv->type == CHANNEL_TLV_LIST_END)
+			return;
+
+		/* Validate entry - protect against malicious VFs */
+		if (!tlv->length) {
+			DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
+			return;
+		}
+
+		total_length += tlv->length;
+
+		if (total_length >= sizeof(struct tlv_buffer_size)) {
+			DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
+			return;
+		}
+
+		i++;
+	} while (1);
+}
+
+static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
+				  struct qed_ptt *p_ptt,
+				  struct qed_vf_info *p_vf,
+				  u16 length, u8 status)
+{
+	struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+	struct qed_dmae_params params;
+	u8 eng_vf_id;
+
+	mbx->reply_virt->default_resp.hdr.status = status;
+
+	qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
+
+	eng_vf_id = p_vf->abs_vf_id;
+
+	memset(&params, 0, sizeof(struct qed_dmae_params));
+	params.flags = QED_DMAE_FLAG_VF_DST;
+	params.dst_vfid = eng_vf_id;
+
+	qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
+			   mbx->req_virt->first_tlv.reply_address +
+			   sizeof(u64),
+			   (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
+			   &params);
+
+	qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
+			   mbx->req_virt->first_tlv.reply_address,
+			   sizeof(u64) / 4, &params);
+
+	REG_WR(p_hwfn,
+	       GTT_BAR0_MAP_REG_USDM_RAM +
+	       USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+}
+
+static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
+				enum qed_iov_vport_update_flag flag)
+{
+	switch (flag) {
+	case QED_IOV_VP_UPDATE_ACTIVATE:
+		return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+	case QED_IOV_VP_UPDATE_VLAN_STRIP:
+		return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
+	case QED_IOV_VP_UPDATE_TX_SWITCH:
+		return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+	case QED_IOV_VP_UPDATE_MCAST:
+		return CHANNEL_TLV_VPORT_UPDATE_MCAST;
+	case QED_IOV_VP_UPDATE_ACCEPT_PARAM:
+		return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+	case QED_IOV_VP_UPDATE_RSS:
+		return CHANNEL_TLV_VPORT_UPDATE_RSS;
+	case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
+		return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+	case QED_IOV_VP_UPDATE_SGE_TPA:
+		return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
+	default:
+		return 0;
+	}
+}
+
+static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
+					    struct qed_vf_info *p_vf,
+					    struct qed_iov_vf_mbx *p_mbx,
+					    u8 status,
+					    u16 tlvs_mask, u16 tlvs_accepted)
+{
+	struct pfvf_def_resp_tlv *resp;
+	u16 size, total_len, i;
+
+	memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
+	p_mbx->offset = (u8 *)p_mbx->reply_virt;
+	size = sizeof(struct pfvf_def_resp_tlv);
+	total_len = size;
+
+	qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
+
+	/* Prepare response for all extended tlvs if they are found by PF */
+	for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
+		if (!(tlvs_mask & (1 << i)))
+			continue;
+
+		resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
+				   qed_iov_vport_to_tlv(p_hwfn, i), size);
+
+		if (tlvs_accepted & (1 << i))
+			resp->hdr.status = status;
+		else
+			resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
+
+		DP_VERBOSE(p_hwfn,
+			   QED_MSG_IOV,
+			   "VF[%d] - vport_update response: TLV %d, status %02x\n",
+			   p_vf->relative_vf_id,
+			   qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
+
+		total_len += size;
+	}
+
+	qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
+		    sizeof(struct channel_list_end_tlv));
+
+	return total_len;
+}
+
+static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
+				 struct qed_ptt *p_ptt,
+				 struct qed_vf_info *vf_info,
+				 u16 type, u16 length, u8 status)
+{
+	struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
+
+	mbx->offset = (u8 *)mbx->reply_virt;
+
+	qed_add_tlv(p_hwfn, &mbx->offset, type, length);
+	qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+		    sizeof(struct channel_list_end_tlv));
+
+	qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
+}
+
+struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
+						      u16 relative_vf_id,
+						      bool b_enabled_only)
+{
+	struct qed_vf_info *vf = NULL;
+
+	vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
+	if (!vf)
+		return NULL;
+
+	return &vf->p_vf_info;
+}
+
+void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
+{
+	struct qed_public_vf_info *vf_info;
+
+	vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
+
+	if (!vf_info)
+		return;
+
+	/* Clear the VF mac */
+	memset(vf_info->mac, 0, ETH_ALEN);
+}
+
+static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
+			       struct qed_vf_info *p_vf)
+{
+	u32 i;
+
+	p_vf->vf_bulletin = 0;
+	p_vf->vport_instance = 0;
+	p_vf->num_mac_filters = 0;
+	p_vf->num_vlan_filters = 0;
+	p_vf->configured_features = 0;
+
+	/* If VF previously requested less resources, go back to default */
+	p_vf->num_rxqs = p_vf->num_sbs;
+	p_vf->num_txqs = p_vf->num_sbs;
+
+	p_vf->num_active_rxqs = 0;
+
+	for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++)
+		p_vf->vf_queues[i].rxq_active = 0;
+
+	memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
+	qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
+}
+
+static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
+				   struct qed_ptt *p_ptt,
+				   struct qed_vf_info *vf)
+{
+	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+	struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
+	struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
+	struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
+	u8 i, vfpf_status = PFVF_STATUS_SUCCESS;
+	struct pf_vf_resc *resc = &resp->resc;
+
+	/* Validate FW compatibility */
+	if (req->vfdev_info.fw_major != FW_MAJOR_VERSION ||
+	    req->vfdev_info.fw_minor != FW_MINOR_VERSION ||
+	    req->vfdev_info.fw_revision != FW_REVISION_VERSION ||
+	    req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) {
+		DP_INFO(p_hwfn,
+			"VF[%d] is running an incompatible driver [VF needs FW %02x:%02x:%02x:%02x but Hypervisor is using %02x:%02x:%02x:%02x]\n",
+			vf->abs_vf_id,
+			req->vfdev_info.fw_major,
+			req->vfdev_info.fw_minor,
+			req->vfdev_info.fw_revision,
+			req->vfdev_info.fw_engineering,
+			FW_MAJOR_VERSION,
+			FW_MINOR_VERSION,
+			FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
+		vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
+		goto out;
+	}
+
+	/* On 100g PFs, prevent old VFs from loading */
+	if ((p_hwfn->cdev->num_hwfns > 1) &&
+	    !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
+		DP_INFO(p_hwfn,
+			"VF[%d] is running an old driver that doesn't support 100g\n",
+			vf->abs_vf_id);
+		vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
+		goto out;
+	}
+
+	memset(resp, 0, sizeof(*resp));
+
+	/* Fill in vf info stuff */
+	vf->opaque_fid = req->vfdev_info.opaque_fid;
+	vf->num_mac_filters = 1;
+	vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
+
+	vf->vf_bulletin = req->bulletin_addr;
+	vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
+			    vf->bulletin.size : req->bulletin_size;
+
+	/* fill in pfdev info */
+	pfdev_info->chip_num = p_hwfn->cdev->chip_num;
+	pfdev_info->db_size = 0;
+	pfdev_info->indices_per_sb = PIS_PER_SB;
+
+	pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
+				   PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
+	if (p_hwfn->cdev->num_hwfns > 1)
+		pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
+
+	pfdev_info->stats_info.mstats.address =
+	    PXP_VF_BAR0_START_MSDM_ZONE_B +
+	    offsetof(struct mstorm_vf_zone, non_trigger.eth_queue_stat);
+	pfdev_info->stats_info.mstats.len =
+	    sizeof(struct eth_mstorm_per_queue_stat);
+
+	pfdev_info->stats_info.ustats.address =
+	    PXP_VF_BAR0_START_USDM_ZONE_B +
+	    offsetof(struct ustorm_vf_zone, non_trigger.eth_queue_stat);
+	pfdev_info->stats_info.ustats.len =
+	    sizeof(struct eth_ustorm_per_queue_stat);
+
+	pfdev_info->stats_info.pstats.address =
+	    PXP_VF_BAR0_START_PSDM_ZONE_B +
+	    offsetof(struct pstorm_vf_zone, non_trigger.eth_queue_stat);
+	pfdev_info->stats_info.pstats.len =
+	    sizeof(struct eth_pstorm_per_queue_stat);
+
+	pfdev_info->stats_info.tstats.address = 0;
+	pfdev_info->stats_info.tstats.len = 0;
+
+	memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
+
+	pfdev_info->fw_major = FW_MAJOR_VERSION;
+	pfdev_info->fw_minor = FW_MINOR_VERSION;
+	pfdev_info->fw_rev = FW_REVISION_VERSION;
+	pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
+	pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
+	qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
+
+	pfdev_info->dev_type = p_hwfn->cdev->type;
+	pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
+
+	resc->num_rxqs = vf->num_rxqs;
+	resc->num_txqs = vf->num_txqs;
+	resc->num_sbs = vf->num_sbs;
+	for (i = 0; i < resc->num_sbs; i++) {
+		resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i];
+		resc->hw_sbs[i].sb_qid = 0;
+	}
+
+	for (i = 0; i < resc->num_rxqs; i++) {
+		qed_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid,
+				(u16 *)&resc->hw_qid[i]);
+		resc->cid[i] = vf->vf_queues[i].fw_cid;
+	}
+
+	resc->num_mac_filters = min_t(u8, vf->num_mac_filters,
+				      req->resc_request.num_mac_filters);
+	resc->num_vlan_filters = min_t(u8, vf->num_vlan_filters,
+				       req->resc_request.num_vlan_filters);
+
+	/* This isn't really required as VF isn't limited, but some VFs might
+	 * actually test this value, so need to provide it.
+	 */
+	resc->num_mc_filters = req->resc_request.num_mc_filters;
+
+	/* Fill agreed size of bulletin board in response */
+	resp->bulletin_size = vf->bulletin.size;
+	qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
+
+	DP_VERBOSE(p_hwfn,
+		   QED_MSG_IOV,
+		   "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
+		   "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
+		   vf->abs_vf_id,
+		   resp->pfdev_info.chip_num,
+		   resp->pfdev_info.db_size,
+		   resp->pfdev_info.indices_per_sb,
+		   resp->pfdev_info.capabilities,
+		   resc->num_rxqs,
+		   resc->num_txqs,
+		   resc->num_sbs,
+		   resc->num_mac_filters,
+		   resc->num_vlan_filters);
+	vf->state = VF_ACQUIRED;
+
+	/* Prepare Response */
+out:
+	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
+			     sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
+}
+
+static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn,
+				  struct qed_vf_info *p_vf, bool val)
+{
+	struct qed_sp_vport_update_params params;
+	int rc;
+
+	if (val == p_vf->spoof_chk) {
+		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+			   "Spoofchk value[%d] is already configured\n", val);
+		return 0;
+	}
+
+	memset(&params, 0, sizeof(struct qed_sp_vport_update_params));
+	params.opaque_fid = p_vf->opaque_fid;
+	params.vport_id = p_vf->vport_id;
+	params.update_anti_spoofing_en_flg = 1;
+	params.anti_spoofing_en = val;
+
+	rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
+	if (rc) {
+		p_vf->spoof_chk = val;
+		p_vf->req_spoofchk_val = p_vf->spoof_chk;
+		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+			   "Spoofchk val[%d] configured\n", val);
+	} else {
+		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+			   "Spoofchk configuration[val:%d] failed for VF[%d]\n",
+			   val, p_vf->relative_vf_id);
+	}
+
+	return rc;
+}
+
+static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
+					    struct qed_vf_info *p_vf)
+{
+	struct qed_filter_ucast filter;
+	int rc = 0;
+	int i;
+
+	memset(&filter, 0, sizeof(filter));
+	filter.is_rx_filter = 1;
+	filter.is_tx_filter = 1;
+	filter.vport_to_add_to = p_vf->vport_id;
+	filter.opcode = QED_FILTER_ADD;
+
+	/* Reconfigure vlans */
+	for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
+		if (!p_vf->shadow_config.vlans[i].used)
+			continue;
+
+		filter.type = QED_FILTER_VLAN;
+		filter.vlan = p_vf->shadow_config.vlans[i].vid;
+		DP_VERBOSE(p_hwfn,
+			   QED_MSG_IOV,
+			   "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
+			   filter.vlan, p_vf->relative_vf_id);
+		rc = qed_sp_eth_filter_ucast(p_hwfn,
+					     p_vf->opaque_fid,
+					     &filter,
+					     QED_SPQ_MODE_CB, NULL);
+		if (rc) {
+			DP_NOTICE(p_hwfn,
+				  "Failed to configure VLAN [%04x] to VF [%04x]\n",
+				  filter.vlan, p_vf->relative_vf_id);
+			break;
+		}
+	}
+
+	return rc;
+}
+
+static int
+qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
+				   struct qed_vf_info *p_vf, u64 events)
+{
+	int rc = 0;
+
+	if ((events & (1 << VLAN_ADDR_FORCED)) &&
+	    !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
+		rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
+
+	return rc;
+}
+
+static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
+					  struct qed_vf_info *p_vf, u64 events)
+{
+	int rc = 0;
+	struct qed_filter_ucast filter;
+
+	if (!p_vf->vport_instance)
+		return -EINVAL;
+
+	if (events & (1 << MAC_ADDR_FORCED)) {
+		/* Since there's no way [currently] of removing the MAC,
+		 * we can always assume this means we need to force it.
+		 */
+		memset(&filter, 0, sizeof(filter));
+		filter.type = QED_FILTER_MAC;
+		filter.opcode = QED_FILTER_REPLACE;
+		filter.is_rx_filter = 1;
+		filter.is_tx_filter = 1;
+		filter.vport_to_add_to = p_vf->vport_id;
+		ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac);
+
+		rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+					     &filter, QED_SPQ_MODE_CB, NULL);
+		if (rc) {
+			DP_NOTICE(p_hwfn,
+				  "PF failed to configure MAC for VF\n");
+			return rc;
+		}
+
+		p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
+	}
+
+	if (events & (1 << VLAN_ADDR_FORCED)) {
+		struct qed_sp_vport_update_params vport_update;
+		u8 removal;
+		int i;
+
+		memset(&filter, 0, sizeof(filter));
+		filter.type = QED_FILTER_VLAN;
+		filter.is_rx_filter = 1;
+		filter.is_tx_filter = 1;
+		filter.vport_to_add_to = p_vf->vport_id;
+		filter.vlan = p_vf->bulletin.p_virt->pvid;
+		filter.opcode = filter.vlan ? QED_FILTER_REPLACE :
+					      QED_FILTER_FLUSH;
+
+		/* Send the ramrod */
+		rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+					     &filter, QED_SPQ_MODE_CB, NULL);
+		if (rc) {
+			DP_NOTICE(p_hwfn,
+				  "PF failed to configure VLAN for VF\n");
+			return rc;
+		}
+
+		/* Update the default-vlan & silent vlan stripping */
+		memset(&vport_update, 0, sizeof(vport_update));
+		vport_update.opaque_fid = p_vf->opaque_fid;
+		vport_update.vport_id = p_vf->vport_id;
+		vport_update.update_default_vlan_enable_flg = 1;
+		vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
+		vport_update.update_default_vlan_flg = 1;
+		vport_update.default_vlan = filter.vlan;
+
+		vport_update.update_inner_vlan_removal_flg = 1;
+		removal = filter.vlan ? 1
+				      : p_vf->shadow_config.inner_vlan_removal;
+		vport_update.inner_vlan_removal_flg = removal;
+		vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
+		rc = qed_sp_vport_update(p_hwfn,
+					 &vport_update,
+					 QED_SPQ_MODE_EBLOCK, NULL);
+		if (rc) {
+			DP_NOTICE(p_hwfn,
+				  "PF failed to configure VF vport for vlan\n");
+			return rc;
+		}
+
+		/* Update all the Rx queues */
+		for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
+			u16 qid;
+
+			if (!p_vf->vf_queues[i].rxq_active)
+				continue;
+
+			qid = p_vf->vf_queues[i].fw_rx_qid;
+
+			rc = qed_sp_eth_rx_queues_update(p_hwfn, qid,
+							 1, 0, 1,
+							 QED_SPQ_MODE_EBLOCK,
+							 NULL);
+			if (rc) {
+				DP_NOTICE(p_hwfn,
+					  "Failed to send Rx update fo queue[0x%04x]\n",
+					  qid);
+				return rc;
+			}
+		}
+
+		if (filter.vlan)
+			p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
+		else
+			p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
+	}
+
+	/* If forced features are terminated, we need to configure the shadow
+	 * configuration back again.
+	 */
+	if (events)
+		qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
+
+	return rc;
+}
+
+static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
+				       struct qed_ptt *p_ptt,
+				       struct qed_vf_info *vf)
+{
+	struct qed_sp_vport_start_params params = { 0 };
+	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+	struct vfpf_vport_start_tlv *start;
+	u8 status = PFVF_STATUS_SUCCESS;
+	struct qed_vf_info *vf_info;
+	u64 *p_bitmap;
+	int sb_id;
+	int rc;
+
+	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
+	if (!vf_info) {
+		DP_NOTICE(p_hwfn->cdev,
+			  "Failed to get VF info, invalid vfid [%d]\n",
+			  vf->relative_vf_id);
+		return;
+	}
+
+	vf->state = VF_ENABLED;
+	start = &mbx->req_virt->start_vport;
+
+	/* Initialize Status block in CAU */
+	for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
+		if (!start->sb_addr[sb_id]) {
+			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+				   "VF[%d] did not fill the address of SB %d\n",
+				   vf->relative_vf_id, sb_id);
+			break;
+		}
+
+		qed_int_cau_conf_sb(p_hwfn, p_ptt,
+				    start->sb_addr[sb_id],
+				    vf->igu_sbs[sb_id],
+				    vf->abs_vf_id, 1);
+	}
+	qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
+
+	vf->mtu = start->mtu;
+	vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
+
+	/* Take into consideration configuration forced by hypervisor;
+	 * If none is configured, use the supplied VF values [for old
+	 * vfs that would still be fine, since they passed '0' as padding].
+	 */
+	p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
+	if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
+		u8 vf_req = start->only_untagged;
+
+		vf_info->bulletin.p_virt->default_only_untagged = vf_req;
+		*p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
+	}
+
+	params.tpa_mode = start->tpa_mode;
+	params.remove_inner_vlan = start->inner_vlan_removal;
+	params.tx_switching = true;
+
+	params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
+	params.drop_ttl0 = false;
+	params.concrete_fid = vf->concrete_fid;
+	params.opaque_fid = vf->opaque_fid;
+	params.vport_id = vf->vport_id;
+	params.max_buffers_per_cqe = start->max_buffers_per_cqe;
+	params.mtu = vf->mtu;
+
+	rc = qed_sp_eth_vport_start(p_hwfn, &params);
+	if (rc != 0) {
+		DP_ERR(p_hwfn,
+		       "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
+		status = PFVF_STATUS_FAILURE;
+	} else {
+		vf->vport_instance++;
+
+		/* Force configuration if needed on the newly opened vport */
+		qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
+
+		__qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
+	}
+	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
+			     sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
+				      struct qed_ptt *p_ptt,
+				      struct qed_vf_info *vf)
+{
+	u8 status = PFVF_STATUS_SUCCESS;
+	int rc;
+
+	vf->vport_instance--;
+	vf->spoof_chk = false;
+
+	rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
+	if (rc != 0) {
+		DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
+		       rc);
+		status = PFVF_STATUS_FAILURE;
+	}
+
+	/* Forget the configuration on the vport */
+	vf->configured_features = 0;
+	memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
+
+	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
+			     sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+#define TSTORM_QZONE_START   PXP_VF_BAR0_START_SDM_ZONE_A
+#define MSTORM_QZONE_START(dev)   (TSTORM_QZONE_START +	\
+				   (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
+
+static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
+					  struct qed_ptt *p_ptt,
+					  struct qed_vf_info *vf, u8 status)
+{
+	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+	struct pfvf_start_queue_resp_tlv *p_tlv;
+	struct vfpf_start_rxq_tlv *req;
+
+	mbx->offset = (u8 *)mbx->reply_virt;
+
+	p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
+			    sizeof(*p_tlv));
+	qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+		    sizeof(struct channel_list_end_tlv));
+
+	/* Update the TLV with the response */
+	if (status == PFVF_STATUS_SUCCESS) {
+		u16 hw_qid = 0;
+
+		req = &mbx->req_virt->start_rxq;
+		qed_fw_l2_queue(p_hwfn, vf->vf_queues[req->rx_qid].fw_rx_qid,
+				&hw_qid);
+
+		p_tlv->offset = MSTORM_QZONE_START(p_hwfn->cdev) +
+				hw_qid * MSTORM_QZONE_SIZE +
+				offsetof(struct mstorm_eth_queue_zone,
+					 rx_producers);
+	}
+
+	qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status);
+}
+
+static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
+				     struct qed_ptt *p_ptt,
+				     struct qed_vf_info *vf)
+{
+	struct qed_queue_start_common_params params;
+	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+	u8 status = PFVF_STATUS_SUCCESS;
+	struct vfpf_start_rxq_tlv *req;
+	int rc;
+
+	memset(&params, 0, sizeof(params));
+	req = &mbx->req_virt->start_rxq;
+	params.queue_id =  vf->vf_queues[req->rx_qid].fw_rx_qid;
+	params.vport_id = vf->vport_id;
+	params.sb = req->hw_sb;
+	params.sb_idx = req->sb_index;
+
+	rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
+					 vf->vf_queues[req->rx_qid].fw_cid,
+					 &params,
+					 vf->abs_vf_id + 0x10,
+					 req->bd_max_bytes,
+					 req->rxq_addr,
+					 req->cqe_pbl_addr, req->cqe_pbl_size);
+
+	if (rc) {
+		status = PFVF_STATUS_FAILURE;
+	} else {
+		vf->vf_queues[req->rx_qid].rxq_active = true;
+		vf->num_active_rxqs++;
+	}
+
+	qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status);
+}
+
+static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
+				     struct qed_ptt *p_ptt,
+				     struct qed_vf_info *vf)
+{
+	u16 length = sizeof(struct pfvf_def_resp_tlv);
+	struct qed_queue_start_common_params params;
+	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+	union qed_qm_pq_params pq_params;
+	u8 status = PFVF_STATUS_SUCCESS;
+	struct vfpf_start_txq_tlv *req;
+	int rc;
+
+	/* Prepare the parameters which would choose the right PQ */
+	memset(&pq_params, 0, sizeof(pq_params));
+	pq_params.eth.is_vf = 1;
+	pq_params.eth.vf_id = vf->relative_vf_id;
+
+	memset(&params, 0, sizeof(params));
+	req = &mbx->req_virt->start_txq;
+	params.queue_id =  vf->vf_queues[req->tx_qid].fw_tx_qid;
+	params.vport_id = vf->vport_id;
+	params.sb = req->hw_sb;
+	params.sb_idx = req->sb_index;
+
+	rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
+					 vf->opaque_fid,
+					 vf->vf_queues[req->tx_qid].fw_cid,
+					 &params,
+					 vf->abs_vf_id + 0x10,
+					 req->pbl_addr,
+					 req->pbl_size, &pq_params);
+
+	if (rc)
+		status = PFVF_STATUS_FAILURE;
+	else
+		vf->vf_queues[req->tx_qid].txq_active = true;
+
+	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_TXQ,
+			     length, status);
+}
+
+static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
+				struct qed_vf_info *vf,
+				u16 rxq_id, u8 num_rxqs, bool cqe_completion)
+{
+	int rc = 0;
+	int qid;
+
+	if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues))
+		return -EINVAL;
+
+	for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
+		if (vf->vf_queues[qid].rxq_active) {
+			rc = qed_sp_eth_rx_queue_stop(p_hwfn,
+						      vf->vf_queues[qid].
+						      fw_rx_qid, false,
+						      cqe_completion);
+
+			if (rc)
+				return rc;
+		}
+		vf->vf_queues[qid].rxq_active = false;
+		vf->num_active_rxqs--;
+	}
+
+	return rc;
+}
+
+static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
+				struct qed_vf_info *vf, u16 txq_id, u8 num_txqs)
+{
+	int rc = 0;
+	int qid;
+
+	if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues))
+		return -EINVAL;
+
+	for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
+		if (vf->vf_queues[qid].txq_active) {
+			rc = qed_sp_eth_tx_queue_stop(p_hwfn,
+						      vf->vf_queues[qid].
+						      fw_tx_qid);
+
+			if (rc)
+				return rc;
+		}
+		vf->vf_queues[qid].txq_active = false;
+	}
+	return rc;
+}
+
+static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
+				     struct qed_ptt *p_ptt,
+				     struct qed_vf_info *vf)
+{
+	u16 length = sizeof(struct pfvf_def_resp_tlv);
+	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+	u8 status = PFVF_STATUS_SUCCESS;
+	struct vfpf_stop_rxqs_tlv *req;
+	int rc;
+
+	/* We give the option of starting from qid != 0, in this case we
+	 * need to make sure that qid + num_qs doesn't exceed the actual
+	 * amount of queues that exist.
+	 */
+	req = &mbx->req_virt->stop_rxqs;
+	rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
+				  req->num_rxqs, req->cqe_completion);
+	if (rc)
+		status = PFVF_STATUS_FAILURE;
+
+	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
+			     length, status);
+}
+
+static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
+				     struct qed_ptt *p_ptt,
+				     struct qed_vf_info *vf)
+{
+	u16 length = sizeof(struct pfvf_def_resp_tlv);
+	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+	u8 status = PFVF_STATUS_SUCCESS;
+	struct vfpf_stop_txqs_tlv *req;
+	int rc;
+
+	/* We give the option of starting from qid != 0, in this case we
+	 * need to make sure that qid + num_qs doesn't exceed the actual
+	 * amount of queues that exist.
+	 */
+	req = &mbx->req_virt->stop_txqs;
+	rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
+	if (rc)
+		status = PFVF_STATUS_FAILURE;
+
+	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
+			     length, status);
+}
+
+static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
+				       struct qed_ptt *p_ptt,
+				       struct qed_vf_info *vf)
+{
+	u16 length = sizeof(struct pfvf_def_resp_tlv);
+	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+	struct vfpf_update_rxq_tlv *req;
+	u8 status = PFVF_STATUS_SUCCESS;
+	u8 complete_event_flg;
+	u8 complete_cqe_flg;
+	u16 qid;
+	int rc;
+	u8 i;
+
+	req = &mbx->req_virt->update_rxq;
+	complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
+	complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
+
+	for (i = 0; i < req->num_rxqs; i++) {
+		qid = req->rx_qid + i;
+
+		if (!vf->vf_queues[qid].rxq_active) {
+			DP_NOTICE(p_hwfn, "VF rx_qid = %d isn`t active!\n",
+				  qid);
+			status = PFVF_STATUS_FAILURE;
+			break;
+		}
+
+		rc = qed_sp_eth_rx_queues_update(p_hwfn,
+						 vf->vf_queues[qid].fw_rx_qid,
+						 1,
+						 complete_cqe_flg,
+						 complete_event_flg,
+						 QED_SPQ_MODE_EBLOCK, NULL);
+
+		if (rc) {
+			status = PFVF_STATUS_FAILURE;
+			break;
+		}
+	}
+
+	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
+			     length, status);
+}
+
+void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
+			       void *p_tlvs_list, u16 req_type)
+{
+	struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
+	int len = 0;
+
+	do {
+		if (!p_tlv->length) {
+			DP_NOTICE(p_hwfn, "Zero length TLV found\n");
+			return NULL;
+		}
+
+		if (p_tlv->type == req_type) {
+			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+				   "Extended tlv type %d, length %d found\n",
+				   p_tlv->type, p_tlv->length);
+			return p_tlv;
+		}
+
+		len += p_tlv->length;
+		p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
+
+		if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
+			DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n");
+			return NULL;
+		}
+	} while (p_tlv->type != CHANNEL_TLV_LIST_END);
+
+	return NULL;
+}
+
+static void
+qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn,
+			    struct qed_sp_vport_update_params *p_data,
+			    struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+	struct vfpf_vport_update_activate_tlv *p_act_tlv;
+	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+
+	p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
+		    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+	if (!p_act_tlv)
+		return;
+
+	p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
+	p_data->vport_active_rx_flg = p_act_tlv->active_rx;
+	p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
+	p_data->vport_active_tx_flg = p_act_tlv->active_tx;
+	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE;
+}
+
+static void
+qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
+			     struct qed_sp_vport_update_params *p_data,
+			     struct qed_vf_info *p_vf,
+			     struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+	struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
+	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
+
+	p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
+		     qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+	if (!p_vlan_tlv)
+		return;
+
+	p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
+
+	/* Ignore the VF request if we're forcing a vlan */
+	if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
+		p_data->update_inner_vlan_removal_flg = 1;
+		p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
+	}
+
+	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP;
+}
+
+static void
+qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn,
+			    struct qed_sp_vport_update_params *p_data,
+			    struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+	struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
+	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+
+	p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
+			  qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
+						   tlv);
+	if (!p_tx_switch_tlv)
+		return;
+
+	p_data->update_tx_switching_flg = 1;
+	p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
+	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH;
+}
+
+static void
+qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
+				  struct qed_sp_vport_update_params *p_data,
+				  struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+	struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
+	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
+
+	p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
+	    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+	if (!p_mcast_tlv)
+		return;
+
+	p_data->update_approx_mcast_flg = 1;
+	memcpy(p_data->bins, p_mcast_tlv->bins,
+	       sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
+}
+
+static void
+qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn,
+			      struct qed_sp_vport_update_params *p_data,
+			      struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+	struct qed_filter_accept_flags *p_flags = &p_data->accept_flags;
+	struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
+	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+
+	p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
+	    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+	if (!p_accept_tlv)
+		return;
+
+	p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
+	p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
+	p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
+	p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
+	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM;
+}
+
+static void
+qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn,
+				  struct qed_sp_vport_update_params *p_data,
+				  struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+	struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
+	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+
+	p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
+			    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
+						     tlv);
+	if (!p_accept_any_vlan)
+		return;
+
+	p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
+	p_data->update_accept_any_vlan_flg =
+		    p_accept_any_vlan->update_accept_any_vlan_flg;
+	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
+}
+
+static void
+qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
+			    struct qed_vf_info *vf,
+			    struct qed_sp_vport_update_params *p_data,
+			    struct qed_rss_params *p_rss,
+			    struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+	struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
+	u16 i, q_idx, max_q_idx;
+	u16 table_size;
+
+	p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
+		    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+	if (!p_rss_tlv) {
+		p_data->rss_params = NULL;
+		return;
+	}
+
+	memset(p_rss, 0, sizeof(struct qed_rss_params));
+
+	p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
+				      VFPF_UPDATE_RSS_CONFIG_FLAG);
+	p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
+					    VFPF_UPDATE_RSS_CAPS_FLAG);
+	p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
+					 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
+	p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
+				   VFPF_UPDATE_RSS_KEY_FLAG);
+
+	p_rss->rss_enable = p_rss_tlv->rss_enable;
+	p_rss->rss_eng_id = vf->relative_vf_id + 1;
+	p_rss->rss_caps = p_rss_tlv->rss_caps;
+	p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
+	memcpy(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
+	       sizeof(p_rss->rss_ind_table));
+	memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
+
+	table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
+			   (1 << p_rss_tlv->rss_table_size_log));
+
+	max_q_idx = ARRAY_SIZE(vf->vf_queues);
+
+	for (i = 0; i < table_size; i++) {
+		u16 index = vf->vf_queues[0].fw_rx_qid;
+
+		q_idx = p_rss->rss_ind_table[i];
+		if (q_idx >= max_q_idx)
+			DP_NOTICE(p_hwfn,
+				  "rss_ind_table[%d] = %d, rxq is out of range\n",
+				  i, q_idx);
+		else if (!vf->vf_queues[q_idx].rxq_active)
+			DP_NOTICE(p_hwfn,
+				  "rss_ind_table[%d] = %d, rxq is not active\n",
+				  i, q_idx);
+		else
+			index = vf->vf_queues[q_idx].fw_rx_qid;
+		p_rss->rss_ind_table[i] = index;
+	}
+
+	p_data->rss_params = p_rss;
+	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
+}
+
+static void
+qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn,
+				struct qed_vf_info *vf,
+				struct qed_sp_vport_update_params *p_data,
+				struct qed_sge_tpa_params *p_sge_tpa,
+				struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+	struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
+	u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
+
+	p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
+	    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+
+	if (!p_sge_tpa_tlv) {
+		p_data->sge_tpa_params = NULL;
+		return;
+	}
+
+	memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params));
+
+	p_sge_tpa->update_tpa_en_flg =
+	    !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
+	p_sge_tpa->update_tpa_param_flg =
+	    !!(p_sge_tpa_tlv->update_sge_tpa_flags &
+		VFPF_UPDATE_TPA_PARAM_FLAG);
+
+	p_sge_tpa->tpa_ipv4_en_flg =
+	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
+	p_sge_tpa->tpa_ipv6_en_flg =
+	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
+	p_sge_tpa->tpa_pkt_split_flg =
+	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
+	p_sge_tpa->tpa_hdr_data_split_flg =
+	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
+	p_sge_tpa->tpa_gro_consistent_flg =
+	    !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
+
+	p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
+	p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
+	p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
+	p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
+	p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
+
+	p_data->sge_tpa_params = p_sge_tpa;
+
+	*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA;
+}
+
+static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
+					struct qed_ptt *p_ptt,
+					struct qed_vf_info *vf)
+{
+	struct qed_sp_vport_update_params params;
+	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+	struct qed_sge_tpa_params sge_tpa_params;
+	struct qed_rss_params rss_params;
+	u8 status = PFVF_STATUS_SUCCESS;
+	u16 tlvs_mask = 0;
+	u16 length;
+	int rc;
+
+	memset(&params, 0, sizeof(params));
+	params.opaque_fid = vf->opaque_fid;
+	params.vport_id = vf->vport_id;
+	params.rss_params = NULL;
+
+	/* Search for extended tlvs list and update values
+	 * from VF in struct qed_sp_vport_update_params.
+	 */
+	qed_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
+	qed_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);
+	qed_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
+	qed_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
+	qed_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
+	qed_iov_vp_update_rss_param(p_hwfn, vf, &params, &rss_params,
+				    mbx, &tlvs_mask);
+	qed_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
+	qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
+					&sge_tpa_params, mbx, &tlvs_mask);
+
+	/* Just log a message if there is no single extended tlv in buffer.
+	 * When all features of vport update ramrod would be requested by VF
+	 * as extended TLVs in buffer then an error can be returned in response
+	 * if there is no extended TLV present in buffer.
+	 */
+	if (!tlvs_mask) {
+		DP_NOTICE(p_hwfn,
+			  "No feature tlvs found for vport update\n");
+		status = PFVF_STATUS_NOT_SUPPORTED;
+		goto out;
+	}
+
+	rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
+
+	if (rc)
+		status = PFVF_STATUS_FAILURE;
+
+out:
+	length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
+						  tlvs_mask, tlvs_mask);
+	qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
+}
+
+static int qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
+					    struct qed_vf_info *p_vf,
+					    struct qed_filter_ucast *p_params)
+{
+	int i;
+
+	if (p_params->type == QED_FILTER_MAC)
+		return 0;
+
+	/* First remove entries and then add new ones */
+	if (p_params->opcode == QED_FILTER_REMOVE) {
+		for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
+			if (p_vf->shadow_config.vlans[i].used &&
+			    p_vf->shadow_config.vlans[i].vid ==
+			    p_params->vlan) {
+				p_vf->shadow_config.vlans[i].used = false;
+				break;
+			}
+		if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
+			DP_VERBOSE(p_hwfn,
+				   QED_MSG_IOV,
+				   "VF [%d] - Tries to remove a non-existing vlan\n",
+				   p_vf->relative_vf_id);
+			return -EINVAL;
+		}
+	} else if (p_params->opcode == QED_FILTER_REPLACE ||
+		   p_params->opcode == QED_FILTER_FLUSH) {
+		for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
+			p_vf->shadow_config.vlans[i].used = false;
+	}
+
+	/* In forced mode, we're willing to remove entries - but we don't add
+	 * new ones.
+	 */
+	if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
+		return 0;
+
+	if (p_params->opcode == QED_FILTER_ADD ||
+	    p_params->opcode == QED_FILTER_REPLACE) {
+		for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
+			if (p_vf->shadow_config.vlans[i].used)
+				continue;
+
+			p_vf->shadow_config.vlans[i].used = true;
+			p_vf->shadow_config.vlans[i].vid = p_params->vlan;
+			break;
+		}
+
+		if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
+			DP_VERBOSE(p_hwfn,
+				   QED_MSG_IOV,
+				   "VF [%d] - Tries to configure more than %d vlan filters\n",
+				   p_vf->relative_vf_id,
+				   QED_ETH_VF_NUM_VLAN_FILTERS + 1);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
+		      int vfid, struct qed_filter_ucast *params)
+{
+	struct qed_public_vf_info *vf;
+
+	vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
+	if (!vf)
+		return -EINVAL;
+
+	/* No real decision to make; Store the configured MAC */
+	if (params->type == QED_FILTER_MAC ||
+	    params->type == QED_FILTER_MAC_VLAN)
+		ether_addr_copy(vf->mac, params->mac);
+
+	return 0;
+}
+
+static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
+					struct qed_ptt *p_ptt,
+					struct qed_vf_info *vf)
+{
+	struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt;
+	struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+	struct vfpf_ucast_filter_tlv *req;
+	u8 status = PFVF_STATUS_SUCCESS;
+	struct qed_filter_ucast params;
+	int rc;
+
+	/* Prepare the unicast filter params */
+	memset(&params, 0, sizeof(struct qed_filter_ucast));
+	req = &mbx->req_virt->ucast_filter;
+	params.opcode = (enum qed_filter_opcode)req->opcode;
+	params.type = (enum qed_filter_ucast_type)req->type;
+
+	params.is_rx_filter = 1;
+	params.is_tx_filter = 1;
+	params.vport_to_remove_from = vf->vport_id;
+	params.vport_to_add_to = vf->vport_id;
+	memcpy(params.mac, req->mac, ETH_ALEN);
+	params.vlan = req->vlan;
+
+	DP_VERBOSE(p_hwfn,
+		   QED_MSG_IOV,
+		   "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
+		   vf->abs_vf_id, params.opcode, params.type,
+		   params.is_rx_filter ? "RX" : "",
+		   params.is_tx_filter ? "TX" : "",
+		   params.vport_to_add_to,
+		   params.mac[0], params.mac[1],
+		   params.mac[2], params.mac[3],
+		   params.mac[4], params.mac[5], params.vlan);
+
+	if (!vf->vport_instance) {
+		DP_VERBOSE(p_hwfn,
+			   QED_MSG_IOV,
+			   "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
+			   vf->abs_vf_id);
+		status = PFVF_STATUS_FAILURE;
+		goto out;
+	}
+
+	/* Update shadow copy of the VF configuration */
+	if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, &params)) {
+		status = PFVF_STATUS_FAILURE;
+		goto out;
+	}
+
+	/* Determine if the unicast filtering is acceptible by PF */
+	if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
+	    (params.type == QED_FILTER_VLAN ||
+	     params.type == QED_FILTER_MAC_VLAN)) {
+		/* Once VLAN is forced or PVID is set, do not allow
+		 * to add/replace any further VLANs.
+		 */
+		if (params.opcode == QED_FILTER_ADD ||
+		    params.opcode == QED_FILTER_REPLACE)
+			status = PFVF_STATUS_FORCED;
+		goto out;
+	}
+
+	if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
+	    (params.type == QED_FILTER_MAC ||
+	     params.type == QED_FILTER_MAC_VLAN)) {
+		if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
+		    (params.opcode != QED_FILTER_ADD &&
+		     params.opcode != QED_FILTER_REPLACE))
+			status = PFVF_STATUS_FORCED;
+		goto out;
+	}
+
+	rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, &params);
+	if (rc) {
+		status = PFVF_STATUS_FAILURE;
+		goto out;
+	}
+
+	rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
+				     QED_SPQ_MODE_CB, NULL);
+	if (rc)
+		status = PFVF_STATUS_FAILURE;
+
+out:
+	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
+			     sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
+				       struct qed_ptt *p_ptt,
+				       struct qed_vf_info *vf)
+{
+	int i;
+
+	/* Reset the SBs */
+	for (i = 0; i < vf->num_sbs; i++)
+		qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+						vf->igu_sbs[i],
+						vf->opaque_fid, false);
+
+	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
+			     sizeof(struct pfvf_def_resp_tlv),
+			     PFVF_STATUS_SUCCESS);
+}
+
+static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
+				 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
+{
+	u16 length = sizeof(struct pfvf_def_resp_tlv);
+	u8 status = PFVF_STATUS_SUCCESS;
+
+	/* Disable Interrupts for VF */
+	qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
+
+	/* Reset Permission table */
+	qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
+
+	qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
+			     length, status);
+}
+
+static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
+				   struct qed_ptt *p_ptt,
+				   struct qed_vf_info *p_vf)
+{
+	u16 length = sizeof(struct pfvf_def_resp_tlv);
+
+	qed_iov_vf_cleanup(p_hwfn, p_vf);
+
+	qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
+			     length, PFVF_STATUS_SUCCESS);
+}
+
+static int
+qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
+			 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
+{
+	int cnt;
+	u32 val;
+
+	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
+
+	for (cnt = 0; cnt < 50; cnt++) {
+		val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
+		if (!val)
+			break;
+		msleep(20);
+	}
+	qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+
+	if (cnt == 50) {
+		DP_ERR(p_hwfn,
+		       "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
+		       p_vf->abs_vf_id, val);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int
+qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
+			struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
+{
+	u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
+	int i, cnt;
+
+	/* Read initial consumers & producers */
+	for (i = 0; i < MAX_NUM_VOQS; i++) {
+		u32 prod;
+
+		cons[i] = qed_rd(p_hwfn, p_ptt,
+				 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
+				 i * 0x40);
+		prod = qed_rd(p_hwfn, p_ptt,
+			      PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
+			      i * 0x40);
+		distance[i] = prod - cons[i];
+	}
+
+	/* Wait for consumers to pass the producers */
+	i = 0;
+	for (cnt = 0; cnt < 50; cnt++) {
+		for (; i < MAX_NUM_VOQS; i++) {
+			u32 tmp;
+
+			tmp = qed_rd(p_hwfn, p_ptt,
+				     PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
+				     i * 0x40);
+			if (distance[i] > tmp - cons[i])
+				break;
+		}
+
+		if (i == MAX_NUM_VOQS)
+			break;
+
+		msleep(20);
+	}
+
+	if (cnt == 50) {
+		DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
+		       p_vf->abs_vf_id, i);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
+			       struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
+{
+	int rc;
+
+	rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
+	if (rc)
+		return rc;
+
+	rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static int
+qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
+			       struct qed_ptt *p_ptt,
+			       u16 rel_vf_id, u32 *ack_vfs)
+{
+	struct qed_vf_info *p_vf;
+	int rc = 0;
+
+	p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+	if (!p_vf)
+		return 0;
+
+	if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
+	    (1ULL << (rel_vf_id % 64))) {
+		u16 vfid = p_vf->abs_vf_id;
+
+		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+			   "VF[%d] - Handling FLR\n", vfid);
+
+		qed_iov_vf_cleanup(p_hwfn, p_vf);
+
+		/* If VF isn't active, no need for anything but SW */
+		if (!p_vf->b_init)
+			goto cleanup;
+
+		rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
+		if (rc)
+			goto cleanup;
+
+		rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
+		if (rc) {
+			DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
+			return rc;
+		}
+
+		/* VF_STOPPED has to be set only after final cleanup
+		 * but prior to re-enabling the VF.
+		 */
+		p_vf->state = VF_STOPPED;
+
+		rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
+		if (rc) {
+			DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
+			       vfid);
+			return rc;
+		}
+cleanup:
+		/* Mark VF for ack and clean pending state */
+		if (p_vf->state == VF_RESET)
+			p_vf->state = VF_STOPPED;
+		ack_vfs[vfid / 32] |= (1 << (vfid % 32));
+		p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
+		    ~(1ULL << (rel_vf_id % 64));
+		p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
+		    ~(1ULL << (rel_vf_id % 64));
+	}
+
+	return rc;
+}
+
+int qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+	u32 ack_vfs[VF_MAX_STATIC / 32];
+	int rc = 0;
+	u16 i;
+
+	memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
+
+	/* Since BRB <-> PRS interface can't be tested as part of the flr
+	 * polling due to HW limitations, simply sleep a bit. And since
+	 * there's no need to wait per-vf, do it before looping.
+	 */
+	msleep(100);
+
+	for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
+		qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
+
+	rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
+	return rc;
+}
+
+int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
+{
+	u16 i, found = 0;
+
+	DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
+	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+			   "[%08x,...,%08x]: %08x\n",
+			   i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
+
+	if (!p_hwfn->cdev->p_iov_info) {
+		DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
+		return 0;
+	}
+
+	/* Mark VFs */
+	for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
+		struct qed_vf_info *p_vf;
+		u8 vfid;
+
+		p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
+		if (!p_vf)
+			continue;
+
+		vfid = p_vf->abs_vf_id;
+		if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
+			u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
+			u16 rel_vf_id = p_vf->relative_vf_id;
+
+			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+				   "VF[%d] [rel %d] got FLR-ed\n",
+				   vfid, rel_vf_id);
+
+			p_vf->state = VF_RESET;
+
+			/* No need to lock here, since pending_flr should
+			 * only change here and before ACKing MFw. Since
+			 * MFW will not trigger an additional attention for
+			 * VF flr until ACKs, we're safe.
+			 */
+			p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
+			found = 1;
+		}
+	}
+
+	return found;
+}
+
+static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
+			     u16 vfid,
+			     struct qed_mcp_link_params *p_params,
+			     struct qed_mcp_link_state *p_link,
+			     struct qed_mcp_link_capabilities *p_caps)
+{
+	struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
+						       vfid,
+						       false);
+	struct qed_bulletin_content *p_bulletin;
+
+	if (!p_vf)
+		return;
+
+	p_bulletin = p_vf->bulletin.p_virt;
+
+	if (p_params)
+		__qed_vf_get_link_params(p_hwfn, p_params, p_bulletin);
+	if (p_link)
+		__qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
+	if (p_caps)
+		__qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
+}
+
+static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
+				    struct qed_ptt *p_ptt, int vfid)
+{
+	struct qed_iov_vf_mbx *mbx;
+	struct qed_vf_info *p_vf;
+	int i;
+
+	p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+	if (!p_vf)
+		return;
+
+	mbx = &p_vf->vf_mbx;
+
+	/* qed_iov_process_mbx_request */
+	DP_VERBOSE(p_hwfn,
+		   QED_MSG_IOV,
+		   "qed_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id);
+
+	mbx->first_tlv = mbx->req_virt->first_tlv;
+
+	/* check if tlv type is known */
+	if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
+		switch (mbx->first_tlv.tl.type) {
+		case CHANNEL_TLV_ACQUIRE:
+			qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
+			break;
+		case CHANNEL_TLV_VPORT_START:
+			qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
+			break;
+		case CHANNEL_TLV_VPORT_TEARDOWN:
+			qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
+			break;
+		case CHANNEL_TLV_START_RXQ:
+			qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
+			break;
+		case CHANNEL_TLV_START_TXQ:
+			qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
+			break;
+		case CHANNEL_TLV_STOP_RXQS:
+			qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
+			break;
+		case CHANNEL_TLV_STOP_TXQS:
+			qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
+			break;
+		case CHANNEL_TLV_UPDATE_RXQ:
+			qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
+			break;
+		case CHANNEL_TLV_VPORT_UPDATE:
+			qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
+			break;
+		case CHANNEL_TLV_UCAST_FILTER:
+			qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
+			break;
+		case CHANNEL_TLV_CLOSE:
+			qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
+			break;
+		case CHANNEL_TLV_INT_CLEANUP:
+			qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
+			break;
+		case CHANNEL_TLV_RELEASE:
+			qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
+			break;
+		}
+	} else {
+		/* unknown TLV - this may belong to a VF driver from the future
+		 * - a version written after this PF driver was written, which
+		 * supports features unknown as of yet. Too bad since we don't
+		 * support them. Or this may be because someone wrote a crappy
+		 * VF driver and is sending garbage over the channel.
+		 */
+		DP_ERR(p_hwfn,
+		       "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
+		       mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
+
+		for (i = 0; i < 20; i++) {
+			DP_VERBOSE(p_hwfn,
+				   QED_MSG_IOV,
+				   "%x ",
+				   mbx->req_virt->tlv_buf_size.tlv_buffer[i]);
+		}
+	}
+}
+
+void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
+{
+	u64 add_bit = 1ULL << (vfid % 64);
+
+	p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
+}
+
+static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn,
+						    u64 *events)
+{
+	u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
+
+	memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH);
+	memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
+}
+
+static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
+			      u16 abs_vfid, struct regpair *vf_msg)
+{
+	u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf;
+	struct qed_vf_info *p_vf;
+
+	if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) {
+		DP_VERBOSE(p_hwfn,
+			   QED_MSG_IOV,
+			   "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n",
+			   abs_vfid);
+		return 0;
+	}
+	p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
+
+	/* List the physical address of the request so that handler
+	 * could later on copy the message from it.
+	 */
+	p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
+
+	/* Mark the event and schedule the workqueue */
+	qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id);
+	qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
+
+	return 0;
+}
+
+int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+			u8 opcode, __le16 echo, union event_ring_data *data)
+{
+	switch (opcode) {
+	case COMMON_EVENT_VF_PF_CHANNEL:
+		return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
+					  &data->vf_pf_channel.msg_addr);
+	default:
+		DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
+			opcode);
+		return -EINVAL;
+	}
+}
+
+u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
+{
+	struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+	u16 i;
+
+	if (!p_iov)
+		goto out;
+
+	for (i = rel_vf_id; i < p_iov->total_vfs; i++)
+		if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
+			return i;
+
+out:
+	return MAX_NUM_VFS;
+}
+
+static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
+			       int vfid)
+{
+	struct qed_dmae_params params;
+	struct qed_vf_info *vf_info;
+
+	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+	if (!vf_info)
+		return -EINVAL;
+
+	memset(&params, 0, sizeof(struct qed_dmae_params));
+	params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
+	params.src_vfid = vf_info->abs_vf_id;
+
+	if (qed_dmae_host2host(p_hwfn, ptt,
+			       vf_info->vf_mbx.pending_req,
+			       vf_info->vf_mbx.req_phys,
+			       sizeof(union vfpf_tlvs) / 4, &params)) {
+		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+			   "Failed to copy message from VF 0x%02x\n", vfid);
+
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
+					    u8 *mac, int vfid)
+{
+	struct qed_vf_info *vf_info;
+	u64 feature;
+
+	vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+	if (!vf_info) {
+		DP_NOTICE(p_hwfn->cdev,
+			  "Can not set forced MAC, invalid vfid [%d]\n", vfid);
+		return;
+	}
+
+	feature = 1 << MAC_ADDR_FORCED;
+	memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
+
+	vf_info->bulletin.p_virt->valid_bitmap |= feature;
+	/* Forced MAC will disable MAC_ADDR */
+	vf_info->bulletin.p_virt->valid_bitmap &=
+				~(1 << VFPF_BULLETIN_MAC_ADDR);
+
+	qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+}
+
+void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
+				      u16 pvid, int vfid)
+{
+	struct qed_vf_info *vf_info;
+	u64 feature;
+
+	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+	if (!vf_info) {
+		DP_NOTICE(p_hwfn->cdev,
+			  "Can not set forced MAC, invalid vfid [%d]\n", vfid);
+		return;
+	}
+
+	feature = 1 << VLAN_ADDR_FORCED;
+	vf_info->bulletin.p_virt->pvid = pvid;
+	if (pvid)
+		vf_info->bulletin.p_virt->valid_bitmap |= feature;
+	else
+		vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
+
+	qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+}
+
+static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
+{
+	struct qed_vf_info *p_vf_info;
+
+	p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+	if (!p_vf_info)
+		return false;
+
+	return !!p_vf_info->vport_instance;
+}
+
+bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
+{
+	struct qed_vf_info *p_vf_info;
+
+	p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+	if (!p_vf_info)
+		return true;
+
+	return p_vf_info->state == VF_STOPPED;
+}
+
+static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
+{
+	struct qed_vf_info *vf_info;
+
+	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+	if (!vf_info)
+		return false;
+
+	return vf_info->spoof_chk;
+}
+
+int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
+{
+	struct qed_vf_info *vf;
+	int rc = -EINVAL;
+
+	if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
+		DP_NOTICE(p_hwfn,
+			  "SR-IOV sanity check failed, can't set spoofchk\n");
+		goto out;
+	}
+
+	vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+	if (!vf)
+		goto out;
+
+	if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) {
+		/* After VF VPORT start PF will configure spoof check */
+		vf->req_spoofchk_val = val;
+		rc = 0;
+		goto out;
+	}
+
+	rc = __qed_iov_spoofchk_set(p_hwfn, vf, val);
+
+out:
+	return rc;
+}
+
+static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
+					   u16 rel_vf_id)
+{
+	struct qed_vf_info *p_vf;
+
+	p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+	if (!p_vf || !p_vf->bulletin.p_virt)
+		return NULL;
+
+	if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+		return NULL;
+
+	return p_vf->bulletin.p_virt->mac;
+}
+
+u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
+{
+	struct qed_vf_info *p_vf;
+
+	p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+	if (!p_vf || !p_vf->bulletin.p_virt)
+		return 0;
+
+	if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
+		return 0;
+
+	return p_vf->bulletin.p_virt->pvid;
+}
+
+static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
+				     struct qed_ptt *p_ptt, int vfid, int val)
+{
+	struct qed_vf_info *vf;
+	u8 abs_vp_id = 0;
+	int rc;
+
+	vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+	if (!vf)
+		return -EINVAL;
+
+	rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
+	if (rc)
+		return rc;
+
+	return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
+}
+
+int qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
+{
+	struct qed_vf_info *vf;
+	u8 vport_id;
+	int i;
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+		if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
+			DP_NOTICE(p_hwfn,
+				  "SR-IOV sanity check failed, can't set min rate\n");
+			return -EINVAL;
+		}
+	}
+
+	vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
+	vport_id = vf->vport_id;
+
+	return qed_configure_vport_wfq(cdev, vport_id, rate);
+}
+
+static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
+{
+	struct qed_wfq_data *vf_vp_wfq;
+	struct qed_vf_info *vf_info;
+
+	vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+	if (!vf_info)
+		return 0;
+
+	vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
+
+	if (vf_vp_wfq->configured)
+		return vf_vp_wfq->min_speed;
+	else
+		return 0;
+}
+
+/**
+ * qed_schedule_iov - schedules IOV task for VF and PF
+ * @hwfn: hardware function pointer
+ * @flag: IOV flag for VF/PF
+ */
+void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
+{
+	smp_mb__before_atomic();
+	set_bit(flag, &hwfn->iov_task_flags);
+	smp_mb__after_atomic();
+	DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
+	queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
+}
+
+void qed_vf_start_iov_wq(struct qed_dev *cdev)
+{
+	int i;
+
+	for_each_hwfn(cdev, i)
+	    queue_delayed_work(cdev->hwfns[i].iov_wq,
+			       &cdev->hwfns[i].iov_task, 0);
+}
+
+int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
+{
+	int i, j;
+
+	for_each_hwfn(cdev, i)
+	    if (cdev->hwfns[i].iov_wq)
+		flush_workqueue(cdev->hwfns[i].iov_wq);
+
+	/* Mark VFs for disablement */
+	qed_iov_set_vfs_to_disable(cdev, true);
+
+	if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
+		pci_disable_sriov(cdev->pdev);
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *hwfn = &cdev->hwfns[i];
+		struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+
+		/* Failure to acquire the ptt in 100g creates an odd error
+		 * where the first engine has already relased IOV.
+		 */
+		if (!ptt) {
+			DP_ERR(hwfn, "Failed to acquire ptt\n");
+			return -EBUSY;
+		}
+
+		/* Clean WFQ db and configure equal weight for all vports */
+		qed_clean_wfq_db(hwfn, ptt);
+
+		qed_for_each_vf(hwfn, j) {
+			int k;
+
+			if (!qed_iov_is_valid_vfid(hwfn, j, true))
+				continue;
+
+			/* Wait until VF is disabled before releasing */
+			for (k = 0; k < 100; k++) {
+				if (!qed_iov_is_vf_stopped(hwfn, j))
+					msleep(20);
+				else
+					break;
+			}
+
+			if (k < 100)
+				qed_iov_release_hw_for_vf(&cdev->hwfns[i],
+							  ptt, j);
+			else
+				DP_ERR(hwfn,
+				       "Timeout waiting for VF's FLR to end\n");
+		}
+
+		qed_ptt_release(hwfn, ptt);
+	}
+
+	qed_iov_set_vfs_to_disable(cdev, false);
+
+	return 0;
+}
+
+static int qed_sriov_enable(struct qed_dev *cdev, int num)
+{
+	struct qed_sb_cnt_info sb_cnt_info;
+	int i, j, rc;
+
+	if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
+		DP_NOTICE(cdev, "Can start at most %d VFs\n",
+			  RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
+		return -EINVAL;
+	}
+
+	/* Initialize HW for VF access */
+	for_each_hwfn(cdev, j) {
+		struct qed_hwfn *hwfn = &cdev->hwfns[j];
+		struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+		int num_sbs = 0, limit = 16;
+
+		if (!ptt) {
+			DP_ERR(hwfn, "Failed to acquire ptt\n");
+			rc = -EBUSY;
+			goto err;
+		}
+
+		if (IS_MF_DEFAULT(hwfn))
+			limit = MAX_NUM_VFS_BB / hwfn->num_funcs_on_engine;
+
+		memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+		qed_int_get_num_sbs(hwfn, &sb_cnt_info);
+		num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit);
+
+		for (i = 0; i < num; i++) {
+			if (!qed_iov_is_valid_vfid(hwfn, i, false))
+				continue;
+
+			rc = qed_iov_init_hw_for_vf(hwfn,
+						    ptt, i, num_sbs / num);
+			if (rc) {
+				DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
+				qed_ptt_release(hwfn, ptt);
+				goto err;
+			}
+		}
+
+		qed_ptt_release(hwfn, ptt);
+	}
+
+	/* Enable SRIOV PCIe functions */
+	rc = pci_enable_sriov(cdev->pdev, num);
+	if (rc) {
+		DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
+		goto err;
+	}
+
+	return num;
+
+err:
+	qed_sriov_disable(cdev, false);
+	return rc;
+}
+
+static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
+{
+	if (!IS_QED_SRIOV(cdev)) {
+		DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
+		return -EOPNOTSUPP;
+	}
+
+	if (num_vfs_param)
+		return qed_sriov_enable(cdev, num_vfs_param);
+	else
+		return qed_sriov_disable(cdev, true);
+}
+
+static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
+{
+	int i;
+
+	if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
+		DP_VERBOSE(cdev, QED_MSG_IOV,
+			   "Cannot set a VF MAC; Sriov is not enabled\n");
+		return -EINVAL;
+	}
+
+	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) {
+		DP_VERBOSE(cdev, QED_MSG_IOV,
+			   "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
+		return -EINVAL;
+	}
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *hwfn = &cdev->hwfns[i];
+		struct qed_public_vf_info *vf_info;
+
+		vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
+		if (!vf_info)
+			continue;
+
+		/* Set the forced MAC, and schedule the IOV task */
+		ether_addr_copy(vf_info->forced_mac, mac);
+		qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
+	}
+
+	return 0;
+}
+
+static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
+{
+	int i;
+
+	if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
+		DP_VERBOSE(cdev, QED_MSG_IOV,
+			   "Cannot set a VF MAC; Sriov is not enabled\n");
+		return -EINVAL;
+	}
+
+	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) {
+		DP_VERBOSE(cdev, QED_MSG_IOV,
+			   "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
+		return -EINVAL;
+	}
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *hwfn = &cdev->hwfns[i];
+		struct qed_public_vf_info *vf_info;
+
+		vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
+		if (!vf_info)
+			continue;
+
+		/* Set the forced vlan, and schedule the IOV task */
+		vf_info->forced_vlan = vid;
+		qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
+	}
+
+	return 0;
+}
+
+static int qed_get_vf_config(struct qed_dev *cdev,
+			     int vf_id, struct ifla_vf_info *ivi)
+{
+	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+	struct qed_public_vf_info *vf_info;
+	struct qed_mcp_link_state link;
+	u32 tx_rate;
+
+	/* Sanitize request */
+	if (IS_VF(cdev))
+		return -EINVAL;
+
+	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) {
+		DP_VERBOSE(cdev, QED_MSG_IOV,
+			   "VF index [%d] isn't active\n", vf_id);
+		return -EINVAL;
+	}
+
+	vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
+
+	qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
+
+	/* Fill information about VF */
+	ivi->vf = vf_id;
+
+	if (is_valid_ether_addr(vf_info->forced_mac))
+		ether_addr_copy(ivi->mac, vf_info->forced_mac);
+	else
+		ether_addr_copy(ivi->mac, vf_info->mac);
+
+	ivi->vlan = vf_info->forced_vlan;
+	ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id);
+	ivi->linkstate = vf_info->link_state;
+	tx_rate = vf_info->tx_rate;
+	ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
+	ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
+
+	return 0;
+}
+
+void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
+{
+	struct qed_mcp_link_capabilities caps;
+	struct qed_mcp_link_params params;
+	struct qed_mcp_link_state link;
+	int i;
+
+	if (!hwfn->pf_iov_info)
+		return;
+
+	/* Update bulletin of all future possible VFs with link configuration */
+	for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) {
+		struct qed_public_vf_info *vf_info;
+
+		vf_info = qed_iov_get_public_vf_info(hwfn, i, false);
+		if (!vf_info)
+			continue;
+
+		memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
+		memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
+		memcpy(&caps, qed_mcp_get_link_capabilities(hwfn),
+		       sizeof(caps));
+
+		/* Modify link according to the VF's configured link state */
+		switch (vf_info->link_state) {
+		case IFLA_VF_LINK_STATE_DISABLE:
+			link.link_up = false;
+			break;
+		case IFLA_VF_LINK_STATE_ENABLE:
+			link.link_up = true;
+			/* Set speed according to maximum supported by HW.
+			 * that is 40G for regular devices and 100G for CMT
+			 * mode devices.
+			 */
+			link.speed = (hwfn->cdev->num_hwfns > 1) ?
+				     100000 : 40000;
+		default:
+			/* In auto mode pass PF link image to VF */
+			break;
+		}
+
+		if (link.link_up && vf_info->tx_rate) {
+			struct qed_ptt *ptt;
+			int rate;
+
+			rate = min_t(int, vf_info->tx_rate, link.speed);
+
+			ptt = qed_ptt_acquire(hwfn);
+			if (!ptt) {
+				DP_NOTICE(hwfn, "Failed to acquire PTT\n");
+				return;
+			}
+
+			if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) {
+				vf_info->tx_rate = rate;
+				link.speed = rate;
+			}
+
+			qed_ptt_release(hwfn, ptt);
+		}
+
+		qed_iov_set_link(hwfn, i, &params, &link, &caps);
+	}
+
+	qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+}
+
+static int qed_set_vf_link_state(struct qed_dev *cdev,
+				 int vf_id, int link_state)
+{
+	int i;
+
+	/* Sanitize request */
+	if (IS_VF(cdev))
+		return -EINVAL;
+
+	if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) {
+		DP_VERBOSE(cdev, QED_MSG_IOV,
+			   "VF index [%d] isn't active\n", vf_id);
+		return -EINVAL;
+	}
+
+	/* Handle configuration of link state */
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *hwfn = &cdev->hwfns[i];
+		struct qed_public_vf_info *vf;
+
+		vf = qed_iov_get_public_vf_info(hwfn, vf_id, true);
+		if (!vf)
+			continue;
+
+		if (vf->link_state == link_state)
+			continue;
+
+		vf->link_state = link_state;
+		qed_inform_vf_link_state(&cdev->hwfns[i]);
+	}
+
+	return 0;
+}
+
+static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val)
+{
+	int i, rc = -EINVAL;
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+		rc = qed_iov_spoofchk_set(p_hwfn, vfid, val);
+		if (rc)
+			break;
+	}
+
+	return rc;
+}
+
+static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate)
+{
+	int i;
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+		struct qed_public_vf_info *vf;
+
+		if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
+			DP_NOTICE(p_hwfn,
+				  "SR-IOV sanity check failed, can't set tx rate\n");
+			return -EINVAL;
+		}
+
+		vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true);
+
+		vf->tx_rate = rate;
+
+		qed_inform_vf_link_state(p_hwfn);
+	}
+
+	return 0;
+}
+
+static int qed_set_vf_rate(struct qed_dev *cdev,
+			   int vfid, u32 min_rate, u32 max_rate)
+{
+	int rc_min = 0, rc_max = 0;
+
+	if (max_rate)
+		rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate);
+
+	if (min_rate)
+		rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate);
+
+	if (rc_max | rc_min)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
+{
+	u64 events[QED_VF_ARRAY_LENGTH];
+	struct qed_ptt *ptt;
+	int i;
+
+	ptt = qed_ptt_acquire(hwfn);
+	if (!ptt) {
+		DP_VERBOSE(hwfn, QED_MSG_IOV,
+			   "Can't acquire PTT; re-scheduling\n");
+		qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
+		return;
+	}
+
+	qed_iov_pf_get_and_clear_pending_events(hwfn, events);
+
+	DP_VERBOSE(hwfn, QED_MSG_IOV,
+		   "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
+		   events[0], events[1], events[2]);
+
+	qed_for_each_vf(hwfn, i) {
+		/* Skip VFs with no pending messages */
+		if (!(events[i / 64] & (1ULL << (i % 64))))
+			continue;
+
+		DP_VERBOSE(hwfn, QED_MSG_IOV,
+			   "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
+			   i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+
+		/* Copy VF's message to PF's request buffer for that VF */
+		if (qed_iov_copy_vf_msg(hwfn, ptt, i))
+			continue;
+
+		qed_iov_process_mbx_req(hwfn, ptt, i);
+	}
+
+	qed_ptt_release(hwfn, ptt);
+}
+
+static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
+{
+	int i;
+
+	qed_for_each_vf(hwfn, i) {
+		struct qed_public_vf_info *info;
+		bool update = false;
+		u8 *mac;
+
+		info = qed_iov_get_public_vf_info(hwfn, i, true);
+		if (!info)
+			continue;
+
+		/* Update data on bulletin board */
+		mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
+		if (is_valid_ether_addr(info->forced_mac) &&
+		    (!mac || !ether_addr_equal(mac, info->forced_mac))) {
+			DP_VERBOSE(hwfn,
+				   QED_MSG_IOV,
+				   "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
+				   i,
+				   hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+
+			/* Update bulletin board with forced MAC */
+			qed_iov_bulletin_set_forced_mac(hwfn,
+							info->forced_mac, i);
+			update = true;
+		}
+
+		if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^
+		    info->forced_vlan) {
+			DP_VERBOSE(hwfn,
+				   QED_MSG_IOV,
+				   "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
+				   info->forced_vlan,
+				   i,
+				   hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+			qed_iov_bulletin_set_forced_vlan(hwfn,
+							 info->forced_vlan, i);
+			update = true;
+		}
+
+		if (update)
+			qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+	}
+}
+
+static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
+{
+	struct qed_ptt *ptt;
+	int i;
+
+	ptt = qed_ptt_acquire(hwfn);
+	if (!ptt) {
+		DP_NOTICE(hwfn, "Failed allocating a ptt entry\n");
+		qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+		return;
+	}
+
+	qed_for_each_vf(hwfn, i)
+	    qed_iov_post_vf_bulletin(hwfn, i, ptt);
+
+	qed_ptt_release(hwfn, ptt);
+}
+
+void qed_iov_pf_task(struct work_struct *work)
+{
+	struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
+					     iov_task.work);
+	int rc;
+
+	if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
+		return;
+
+	if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
+		struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+
+		if (!ptt) {
+			qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
+			return;
+		}
+
+		rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
+		if (rc)
+			qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
+
+		qed_ptt_release(hwfn, ptt);
+	}
+
+	if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
+		qed_handle_vf_msg(hwfn);
+
+	if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
+			       &hwfn->iov_task_flags))
+		qed_handle_pf_set_vf_unicast(hwfn);
+
+	if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
+			       &hwfn->iov_task_flags))
+		qed_handle_bulletin_post(hwfn);
+}
+
+void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
+{
+	int i;
+
+	for_each_hwfn(cdev, i) {
+		if (!cdev->hwfns[i].iov_wq)
+			continue;
+
+		if (schedule_first) {
+			qed_schedule_iov(&cdev->hwfns[i],
+					 QED_IOV_WQ_STOP_WQ_FLAG);
+			cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
+		}
+
+		flush_workqueue(cdev->hwfns[i].iov_wq);
+		destroy_workqueue(cdev->hwfns[i].iov_wq);
+	}
+}
+
+int qed_iov_wq_start(struct qed_dev *cdev)
+{
+	char name[NAME_SIZE];
+	int i;
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+		/* PFs needs a dedicated workqueue only if they support IOV.
+		 * VFs always require one.
+		 */
+		if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn))
+			continue;
+
+		snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
+			 cdev->pdev->bus->number,
+			 PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
+
+		p_hwfn->iov_wq = create_singlethread_workqueue(name);
+		if (!p_hwfn->iov_wq) {
+			DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
+			return -ENOMEM;
+		}
+
+		if (IS_PF(cdev))
+			INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
+		else
+			INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task);
+	}
+
+	return 0;
+}
+
+const struct qed_iov_hv_ops qed_iov_ops_pass = {
+	.configure = &qed_sriov_configure,
+	.set_mac = &qed_sriov_pf_set_mac,
+	.set_vlan = &qed_sriov_pf_set_vlan,
+	.get_config = &qed_get_vf_config,
+	.set_link_state = &qed_set_vf_link_state,
+	.set_spoof = &qed_spoof_configure,
+	.set_rate = &qed_set_vf_rate,
+};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
new file mode 100644
index 0000000..c8667c6
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -0,0 +1,386 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_SRIOV_H
+#define _QED_SRIOV_H
+#include <linux/types.h>
+#include "qed_vf.h"
+#define QED_VF_ARRAY_LENGTH (3)
+
+#define IS_VF(cdev)             ((cdev)->b_is_vf)
+#define IS_PF(cdev)             (!((cdev)->b_is_vf))
+#ifdef CONFIG_QED_SRIOV
+#define IS_PF_SRIOV(p_hwfn)     (!!((p_hwfn)->cdev->p_iov_info))
+#else
+#define IS_PF_SRIOV(p_hwfn)     (0)
+#endif
+#define IS_PF_SRIOV_ALLOC(p_hwfn)       (!!((p_hwfn)->pf_iov_info))
+
+#define QED_MAX_VF_CHAINS_PER_PF 16
+#define QED_ETH_VF_NUM_VLAN_FILTERS 2
+
+#define QED_ETH_MAX_VF_NUM_VLAN_FILTERS	\
+	(MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS)
+
+enum qed_iov_vport_update_flag {
+	QED_IOV_VP_UPDATE_ACTIVATE,
+	QED_IOV_VP_UPDATE_VLAN_STRIP,
+	QED_IOV_VP_UPDATE_TX_SWITCH,
+	QED_IOV_VP_UPDATE_MCAST,
+	QED_IOV_VP_UPDATE_ACCEPT_PARAM,
+	QED_IOV_VP_UPDATE_RSS,
+	QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN,
+	QED_IOV_VP_UPDATE_SGE_TPA,
+	QED_IOV_VP_UPDATE_MAX,
+};
+
+struct qed_public_vf_info {
+	/* These copies will later be reflected in the bulletin board,
+	 * but this copy should be newer.
+	 */
+	u8 forced_mac[ETH_ALEN];
+	u16 forced_vlan;
+	u8 mac[ETH_ALEN];
+
+	/* IFLA_VF_LINK_STATE_<X> */
+	int link_state;
+
+	/* Currently configured Tx rate in MB/sec. 0 if unconfigured */
+	int tx_rate;
+};
+
+/* This struct is part of qed_dev and contains data relevant to all hwfns;
+ * Initialized only if SR-IOV cpabability is exposed in PCIe config space.
+ */
+struct qed_hw_sriov_info {
+	int pos;		/* capability position */
+	int nres;		/* number of resources */
+	u32 cap;		/* SR-IOV Capabilities */
+	u16 ctrl;		/* SR-IOV Control */
+	u16 total_vfs;		/* total VFs associated with the PF */
+	u16 num_vfs;		/* number of vfs that have been started */
+	u16 initial_vfs;	/* initial VFs associated with the PF */
+	u16 nr_virtfn;		/* number of VFs available */
+	u16 offset;		/* first VF Routing ID offset */
+	u16 stride;		/* following VF stride */
+	u16 vf_device_id;	/* VF device id */
+	u32 pgsz;		/* page size for BAR alignment */
+	u8 link;		/* Function Dependency Link */
+
+	u32 first_vf_in_pf;
+};
+
+/* This mailbox is maintained per VF in its PF contains all information
+ * required for sending / receiving a message.
+ */
+struct qed_iov_vf_mbx {
+	union vfpf_tlvs *req_virt;
+	dma_addr_t req_phys;
+	union pfvf_tlvs *reply_virt;
+	dma_addr_t reply_phys;
+
+	/* Address in VF where a pending message is located */
+	dma_addr_t pending_req;
+
+	u8 *offset;
+
+	/* saved VF request header */
+	struct vfpf_first_tlv first_tlv;
+};
+
+struct qed_vf_q_info {
+	u16 fw_rx_qid;
+	u16 fw_tx_qid;
+	u8 fw_cid;
+	u8 rxq_active;
+	u8 txq_active;
+};
+
+enum vf_state {
+	VF_FREE = 0,		/* VF ready to be acquired holds no resc */
+	VF_ACQUIRED,		/* VF, acquired, but not initalized */
+	VF_ENABLED,		/* VF, Enabled */
+	VF_RESET,		/* VF, FLR'd, pending cleanup */
+	VF_STOPPED		/* VF, Stopped */
+};
+
+struct qed_vf_vlan_shadow {
+	bool used;
+	u16 vid;
+};
+
+struct qed_vf_shadow_config {
+	/* Shadow copy of all guest vlans */
+	struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1];
+
+	u8 inner_vlan_removal;
+};
+
+/* PFs maintain an array of this structure, per VF */
+struct qed_vf_info {
+	struct qed_iov_vf_mbx vf_mbx;
+	enum vf_state state;
+	bool b_init;
+	u8 to_disable;
+
+	struct qed_bulletin bulletin;
+	dma_addr_t vf_bulletin;
+
+	u32 concrete_fid;
+	u16 opaque_fid;
+	u16 mtu;
+
+	u8 vport_id;
+	u8 relative_vf_id;
+	u8 abs_vf_id;
+#define QED_VF_ABS_ID(p_hwfn, p_vf)	(QED_PATH_ID(p_hwfn) ?		      \
+					 (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
+					 (p_vf)->abs_vf_id)
+
+	u8 vport_instance;
+	u8 num_rxqs;
+	u8 num_txqs;
+
+	u8 num_sbs;
+
+	u8 num_mac_filters;
+	u8 num_vlan_filters;
+	struct qed_vf_q_info vf_queues[QED_MAX_VF_CHAINS_PER_PF];
+	u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF];
+	u8 num_active_rxqs;
+	struct qed_public_vf_info p_vf_info;
+	bool spoof_chk;
+	bool req_spoofchk_val;
+
+	/* Stores the configuration requested by VF */
+	struct qed_vf_shadow_config shadow_config;
+
+	/* A bitfield using bulletin's valid-map bits, used to indicate
+	 * which of the bulletin board features have been configured.
+	 */
+	u64 configured_features;
+#define QED_IOV_CONFIGURED_FEATURES_MASK        ((1 << MAC_ADDR_FORCED) | \
+						 (1 << VLAN_ADDR_FORCED))
+};
+
+/* This structure is part of qed_hwfn and used only for PFs that have sriov
+ * capability enabled.
+ */
+struct qed_pf_iov {
+	struct qed_vf_info vfs_array[MAX_NUM_VFS];
+	u64 pending_events[QED_VF_ARRAY_LENGTH];
+	u64 pending_flr[QED_VF_ARRAY_LENGTH];
+
+	/* Allocate message address continuosuly and split to each VF */
+	void *mbx_msg_virt_addr;
+	dma_addr_t mbx_msg_phys_addr;
+	u32 mbx_msg_size;
+	void *mbx_reply_virt_addr;
+	dma_addr_t mbx_reply_phys_addr;
+	u32 mbx_reply_size;
+	void *p_bulletins;
+	dma_addr_t bulletins_phys;
+	u32 bulletins_size;
+};
+
+enum qed_iov_wq_flag {
+	QED_IOV_WQ_MSG_FLAG,
+	QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
+	QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
+	QED_IOV_WQ_STOP_WQ_FLAG,
+	QED_IOV_WQ_FLR_FLAG,
+};
+
+#ifdef CONFIG_QED_SRIOV
+/**
+ * @brief - Given a VF index, return index of next [including that] active VF.
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return MAX_NUM_VFS in case no further active VFs, otherwise index.
+ */
+u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
+
+/**
+ * @brief Read sriov related information and allocated resources
+ *  reads from configuraiton space, shmem, etc.
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset
+ *
+ * @param p_hwfn
+ * @param p_iov
+ * @param type
+ * @param length
+ *
+ * @return pointer to the newly placed tlv
+ */
+void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
+
+/**
+ * @brief list the types and lengths of the tlvs on the buffer
+ *
+ * @param p_hwfn
+ * @param tlvs_list
+ */
+void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
+
+/**
+ * @brief qed_iov_alloc - allocate sriov related resources
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_iov_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_iov_setup - setup sriov related resources
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_iov_free - free sriov related resources
+ *
+ * @param p_hwfn
+ */
+void qed_iov_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief free sriov related memory that was allocated during hw_prepare
+ *
+ * @param cdev
+ */
+void qed_iov_free_hw_info(struct qed_dev *cdev);
+
+/**
+ * @brief qed_sriov_eqe_event - handle async sriov event arrived on eqe.
+ *
+ * @param p_hwfn
+ * @param opcode
+ * @param echo
+ * @param data
+ */
+int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+			u8 opcode, __le16 echo, union event_ring_data *data);
+
+/**
+ * @brief Mark structs of vfs that have been FLR-ed.
+ *
+ * @param p_hwfn
+ * @param disabled_vfs - bitmask of all VFs on path that were FLRed
+ *
+ * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
+ */
+int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
+
+/**
+ * @brief Search extended TLVs in request/reply buffer.
+ *
+ * @param p_hwfn
+ * @param p_tlvs_list - Pointer to tlvs list
+ * @param req_type - Type of TLV
+ *
+ * @return pointer to tlv type if found, otherwise returns NULL.
+ */
+void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
+			       void *p_tlvs_list, u16 req_type);
+
+void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first);
+int qed_iov_wq_start(struct qed_dev *cdev);
+
+void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag);
+void qed_vf_start_iov_wq(struct qed_dev *cdev);
+int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
+void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
+#else
+static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
+					     u16 rel_vf_id)
+{
+	return MAX_NUM_VFS;
+}
+
+static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
+{
+	return 0;
+}
+
+static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn)
+{
+	return 0;
+}
+
+static inline void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+}
+
+static inline void qed_iov_free(struct qed_hwfn *p_hwfn)
+{
+}
+
+static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
+{
+}
+
+static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+				      u8 opcode,
+				      __le16 echo, union event_ring_data *data)
+{
+	return -EINVAL;
+}
+
+static inline int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
+				      u32 *disabled_vfs)
+{
+	return 0;
+}
+
+static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
+{
+}
+
+static inline int qed_iov_wq_start(struct qed_dev *cdev)
+{
+	return 0;
+}
+
+static inline void qed_schedule_iov(struct qed_hwfn *hwfn,
+				    enum qed_iov_wq_flag flag)
+{
+}
+
+static inline void qed_vf_start_iov_wq(struct qed_dev *cdev)
+{
+}
+
+static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
+{
+	return 0;
+}
+
+static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
+{
+}
+#endif
+
+#define qed_for_each_vf(_p_hwfn, _i)			  \
+	for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \
+	     _i < MAX_NUM_VFS;				  \
+	     _i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1))
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
new file mode 100644
index 0000000..72e69c0
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -0,0 +1,1102 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/crc32.h>
+#include <linux/etherdevice.h>
+#include "qed.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
+
+static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length)
+{
+	struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+	void *p_tlv;
+
+	/* This lock is released when we receive PF's response
+	 * in qed_send_msg2pf().
+	 * So, qed_vf_pf_prep() and qed_send_msg2pf()
+	 * must come in sequence.
+	 */
+	mutex_lock(&(p_iov->mutex));
+
+	DP_VERBOSE(p_hwfn,
+		   QED_MSG_IOV,
+		   "preparing to send 0x%04x tlv over vf pf channel\n",
+		   type);
+
+	/* Reset Requst offset */
+	p_iov->offset = (u8 *)p_iov->vf2pf_request;
+
+	/* Clear mailbox - both request and reply */
+	memset(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs));
+	memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
+
+	/* Init type and length */
+	p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length);
+
+	/* Init first tlv header */
+	((struct vfpf_first_tlv *)p_tlv)->reply_address =
+	    (u64)p_iov->pf2vf_reply_phys;
+
+	return p_tlv;
+}
+
+static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
+{
+	union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
+	struct ustorm_trigger_vf_zone trigger;
+	struct ustorm_vf_zone *zone_data;
+	int rc = 0, time = 100;
+
+	zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
+
+	/* output tlvs list */
+	qed_dp_tlv_list(p_hwfn, p_req);
+
+	/* need to add the END TLV to the message size */
+	resp_size += sizeof(struct channel_list_end_tlv);
+
+	/* Send TLVs over HW channel */
+	memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
+	trigger.vf_pf_msg_valid = 1;
+
+	DP_VERBOSE(p_hwfn,
+		   QED_MSG_IOV,
+		   "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n",
+		   GET_FIELD(p_hwfn->hw_info.concrete_fid,
+			     PXP_CONCRETE_FID_PFID),
+		   upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
+		   lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
+		   &zone_data->non_trigger.vf_pf_msg_addr,
+		   *((u32 *)&trigger), &zone_data->trigger);
+
+	REG_WR(p_hwfn,
+	       (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo,
+	       lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
+
+	REG_WR(p_hwfn,
+	       (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi,
+	       upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
+
+	/* The message data must be written first, to prevent trigger before
+	 * data is written.
+	 */
+	wmb();
+
+	REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
+
+	/* When PF would be done with the response, it would write back to the
+	 * `done' address. Poll until then.
+	 */
+	while ((!*done) && time) {
+		msleep(25);
+		time--;
+	}
+
+	if (!*done) {
+		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+			   "VF <-- PF Timeout [Type %d]\n",
+			   p_req->first_tlv.tl.type);
+		rc = -EBUSY;
+		goto exit;
+	} else {
+		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+			   "PF response: %d [Type %d]\n",
+			   *done, p_req->first_tlv.tl.type);
+	}
+
+exit:
+	mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
+
+	return rc;
+}
+
+#define VF_ACQUIRE_THRESH 3
+#define VF_ACQUIRE_MAC_FILTERS 1
+
+static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
+{
+	struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+	struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
+	struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
+	u8 rx_count = 1, tx_count = 1, num_sbs = 1;
+	u8 num_mac = VF_ACQUIRE_MAC_FILTERS;
+	bool resources_acquired = false;
+	struct vfpf_acquire_tlv *req;
+	int rc = 0, attempts = 0;
+
+	/* clear mailbox and prep first tlv */
+	req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
+
+	/* starting filling the request */
+	req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+	req->resc_request.num_rxqs = rx_count;
+	req->resc_request.num_txqs = tx_count;
+	req->resc_request.num_sbs = num_sbs;
+	req->resc_request.num_mac_filters = num_mac;
+	req->resc_request.num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
+
+	req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX;
+	req->vfdev_info.fw_major = FW_MAJOR_VERSION;
+	req->vfdev_info.fw_minor = FW_MINOR_VERSION;
+	req->vfdev_info.fw_revision = FW_REVISION_VERSION;
+	req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
+
+	/* Fill capability field with any non-deprecated config we support */
+	req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
+
+	/* pf 2 vf bulletin board address */
+	req->bulletin_addr = p_iov->bulletin.phys;
+	req->bulletin_size = p_iov->bulletin.size;
+
+	/* add list termination tlv */
+	qed_add_tlv(p_hwfn, &p_iov->offset,
+		    CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+	while (!resources_acquired) {
+		DP_VERBOSE(p_hwfn,
+			   QED_MSG_IOV, "attempting to acquire resources\n");
+
+		/* send acquire request */
+		rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+		if (rc)
+			return rc;
+
+		/* copy acquire response from buffer to p_hwfn */
+		memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp));
+
+		attempts++;
+
+		if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
+			/* PF agrees to allocate our resources */
+			if (!(resp->pfdev_info.capabilities &
+			      PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
+				DP_INFO(p_hwfn,
+					"PF is using old incompatible driver; Either downgrade driver or request provider to update hypervisor version\n");
+				return -EINVAL;
+			}
+			DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n");
+			resources_acquired = true;
+		} else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
+			   attempts < VF_ACQUIRE_THRESH) {
+			DP_VERBOSE(p_hwfn,
+				   QED_MSG_IOV,
+				   "PF unwilling to fullfill resource request. Try PF recommended amount\n");
+
+			/* humble our request */
+			req->resc_request.num_txqs = resp->resc.num_txqs;
+			req->resc_request.num_rxqs = resp->resc.num_rxqs;
+			req->resc_request.num_sbs = resp->resc.num_sbs;
+			req->resc_request.num_mac_filters =
+			    resp->resc.num_mac_filters;
+			req->resc_request.num_vlan_filters =
+			    resp->resc.num_vlan_filters;
+
+			/* Clear response buffer */
+			memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
+		} else {
+			DP_ERR(p_hwfn,
+			       "PF returned error %d to VF acquisition request\n",
+			       resp->hdr.status);
+			return -EAGAIN;
+		}
+	}
+
+	/* Update bulletin board size with response from PF */
+	p_iov->bulletin.size = resp->bulletin_size;
+
+	/* get HW info */
+	p_hwfn->cdev->type = resp->pfdev_info.dev_type;
+	p_hwfn->cdev->chip_rev = resp->pfdev_info.chip_rev;
+
+	p_hwfn->cdev->chip_num = pfdev_info->chip_num & 0xffff;
+
+	/* Learn of the possibility of CMT */
+	if (IS_LEAD_HWFN(p_hwfn)) {
+		if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {
+			DP_NOTICE(p_hwfn, "100g VF\n");
+			p_hwfn->cdev->num_hwfns = 2;
+		}
+	}
+
+	return 0;
+}
+
+int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
+{
+	struct qed_vf_iov *p_iov;
+	u32 reg;
+
+	/* Set number of hwfns - might be overriden once leading hwfn learns
+	 * actual configuration from PF.
+	 */
+	if (IS_LEAD_HWFN(p_hwfn))
+		p_hwfn->cdev->num_hwfns = 1;
+
+	/* Set the doorbell bar. Assumption: regview is set */
+	p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
+					  PXP_VF_BAR0_START_DQ;
+
+	reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
+	p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
+
+	reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS;
+	p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg);
+
+	/* Allocate vf sriov info */
+	p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL);
+	if (!p_iov) {
+		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
+		return -ENOMEM;
+	}
+
+	/* Allocate vf2pf msg */
+	p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+						  sizeof(union vfpf_tlvs),
+						  &p_iov->vf2pf_request_phys,
+						  GFP_KERNEL);
+	if (!p_iov->vf2pf_request) {
+		DP_NOTICE(p_hwfn,
+			  "Failed to allocate `vf2pf_request' DMA memory\n");
+		goto free_p_iov;
+	}
+
+	p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+						sizeof(union pfvf_tlvs),
+						&p_iov->pf2vf_reply_phys,
+						GFP_KERNEL);
+	if (!p_iov->pf2vf_reply) {
+		DP_NOTICE(p_hwfn,
+			  "Failed to allocate `pf2vf_reply' DMA memory\n");
+		goto free_vf2pf_request;
+	}
+
+	DP_VERBOSE(p_hwfn,
+		   QED_MSG_IOV,
+		   "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n",
+		   p_iov->vf2pf_request,
+		   (u64) p_iov->vf2pf_request_phys,
+		   p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys);
+
+	/* Allocate Bulletin board */
+	p_iov->bulletin.size = sizeof(struct qed_bulletin_content);
+	p_iov->bulletin.p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+						    p_iov->bulletin.size,
+						    &p_iov->bulletin.phys,
+						    GFP_KERNEL);
+	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+		   "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
+		   p_iov->bulletin.p_virt,
+		   (u64)p_iov->bulletin.phys, p_iov->bulletin.size);
+
+	mutex_init(&p_iov->mutex);
+
+	p_hwfn->vf_iov_info = p_iov;
+
+	p_hwfn->hw_info.personality = QED_PCI_ETH;
+
+	return qed_vf_pf_acquire(p_hwfn);
+
+free_vf2pf_request:
+	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+			  sizeof(union vfpf_tlvs),
+			  p_iov->vf2pf_request, p_iov->vf2pf_request_phys);
+free_p_iov:
+	kfree(p_iov);
+
+	return -ENOMEM;
+}
+
+int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
+			u8 rx_qid,
+			u16 sb,
+			u8 sb_index,
+			u16 bd_max_bytes,
+			dma_addr_t bd_chain_phys_addr,
+			dma_addr_t cqe_pbl_addr,
+			u16 cqe_pbl_size, void __iomem **pp_prod)
+{
+	struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+	struct pfvf_start_queue_resp_tlv *resp;
+	struct vfpf_start_rxq_tlv *req;
+	int rc;
+
+	/* clear mailbox and prep first tlv */
+	req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
+
+	req->rx_qid = rx_qid;
+	req->cqe_pbl_addr = cqe_pbl_addr;
+	req->cqe_pbl_size = cqe_pbl_size;
+	req->rxq_addr = bd_chain_phys_addr;
+	req->hw_sb = sb;
+	req->sb_index = sb_index;
+	req->bd_max_bytes = bd_max_bytes;
+	req->stat_id = -1;
+
+	/* add list termination tlv */
+	qed_add_tlv(p_hwfn, &p_iov->offset,
+		    CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+	resp = &p_iov->pf2vf_reply->queue_start;
+	rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+	if (rc)
+		return rc;
+
+	if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+		return -EINVAL;
+
+	/* Learn the address of the producer from the response */
+	if (pp_prod) {
+		u64 init_prod_val = 0;
+
+		*pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset;
+		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+			   "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
+			   rx_qid, *pp_prod, resp->offset);
+
+		/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+		__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
+				  (u32 *)&init_prod_val);
+	}
+
+	return rc;
+}
+
+int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion)
+{
+	struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+	struct vfpf_stop_rxqs_tlv *req;
+	struct pfvf_def_resp_tlv *resp;
+	int rc;
+
+	/* clear mailbox and prep first tlv */
+	req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
+
+	req->rx_qid = rx_qid;
+	req->num_rxqs = 1;
+	req->cqe_completion = cqe_completion;
+
+	/* add list termination tlv */
+	qed_add_tlv(p_hwfn, &p_iov->offset,
+		    CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+	resp = &p_iov->pf2vf_reply->default_resp;
+	rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+	if (rc)
+		return rc;
+
+	if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+		return -EINVAL;
+
+	return rc;
+}
+
+int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+			u16 tx_queue_id,
+			u16 sb,
+			u8 sb_index,
+			dma_addr_t pbl_addr,
+			u16 pbl_size, void __iomem **pp_doorbell)
+{
+	struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+	struct vfpf_start_txq_tlv *req;
+	struct pfvf_def_resp_tlv *resp;
+	int rc;
+
+	/* clear mailbox and prep first tlv */
+	req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
+
+	req->tx_qid = tx_queue_id;
+
+	/* Tx */
+	req->pbl_addr = pbl_addr;
+	req->pbl_size = pbl_size;
+	req->hw_sb = sb;
+	req->sb_index = sb_index;
+
+	/* add list termination tlv */
+	qed_add_tlv(p_hwfn, &p_iov->offset,
+		    CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+	resp = &p_iov->pf2vf_reply->default_resp;
+	rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+	if (rc)
+		return rc;
+
+	if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+		return -EINVAL;
+
+	if (pp_doorbell) {
+		u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
+
+		*pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
+					     qed_db_addr(cid, DQ_DEMS_LEGACY);
+	}
+
+	return rc;
+}
+
+int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
+{
+	struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+	struct vfpf_stop_txqs_tlv *req;
+	struct pfvf_def_resp_tlv *resp;
+	int rc;
+
+	/* clear mailbox and prep first tlv */
+	req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
+
+	req->tx_qid = tx_qid;
+	req->num_txqs = 1;
+
+	/* add list termination tlv */
+	qed_add_tlv(p_hwfn, &p_iov->offset,
+		    CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+	resp = &p_iov->pf2vf_reply->default_resp;
+	rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+	if (rc)
+		return rc;
+
+	if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+		return -EINVAL;
+
+	return rc;
+}
+
+int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
+			  u8 vport_id,
+			  u16 mtu,
+			  u8 inner_vlan_removal,
+			  enum qed_tpa_mode tpa_mode,
+			  u8 max_buffers_per_cqe, u8 only_untagged)
+{
+	struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+	struct vfpf_vport_start_tlv *req;
+	struct pfvf_def_resp_tlv *resp;
+	int rc, i;
+
+	/* clear mailbox and prep first tlv */
+	req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
+
+	req->mtu = mtu;
+	req->vport_id = vport_id;
+	req->inner_vlan_removal = inner_vlan_removal;
+	req->tpa_mode = tpa_mode;
+	req->max_buffers_per_cqe = max_buffers_per_cqe;
+	req->only_untagged = only_untagged;
+
+	/* status blocks */
+	for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++)
+		if (p_hwfn->sbs_info[i])
+			req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
+
+	/* add list termination tlv */
+	qed_add_tlv(p_hwfn, &p_iov->offset,
+		    CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+	resp = &p_iov->pf2vf_reply->default_resp;
+	rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+	if (rc)
+		return rc;
+
+	if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+		return -EINVAL;
+
+	return rc;
+}
+
+int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
+{
+	struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+	struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+	int rc;
+
+	/* clear mailbox and prep first tlv */
+	qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
+		       sizeof(struct vfpf_first_tlv));
+
+	/* add list termination tlv */
+	qed_add_tlv(p_hwfn, &p_iov->offset,
+		    CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+	rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+	if (rc)
+		return rc;
+
+	if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+		return -EINVAL;
+
+	return rc;
+}
+
+static bool
+qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn,
+				  struct qed_sp_vport_update_params *p_data,
+				  u16 tlv)
+{
+	switch (tlv) {
+	case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE:
+		return !!(p_data->update_vport_active_rx_flg ||
+			  p_data->update_vport_active_tx_flg);
+	case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH:
+		return !!p_data->update_tx_switching_flg;
+	case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP:
+		return !!p_data->update_inner_vlan_removal_flg;
+	case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN:
+		return !!p_data->update_accept_any_vlan_flg;
+	case CHANNEL_TLV_VPORT_UPDATE_MCAST:
+		return !!p_data->update_approx_mcast_flg;
+	case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM:
+		return !!(p_data->accept_flags.update_rx_mode_config ||
+			  p_data->accept_flags.update_tx_mode_config);
+	case CHANNEL_TLV_VPORT_UPDATE_RSS:
+		return !!p_data->rss_params;
+	case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA:
+		return !!p_data->sge_tpa_params;
+	default:
+		DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n",
+			tlv);
+		return false;
+	}
+}
+
+static void
+qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn *p_hwfn,
+				  struct qed_sp_vport_update_params *p_data)
+{
+	struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+	struct pfvf_def_resp_tlv *p_resp;
+	u16 tlv;
+
+	for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+	     tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; tlv++) {
+		if (!qed_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv))
+			continue;
+
+		p_resp = (struct pfvf_def_resp_tlv *)
+			 qed_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply,
+						  tlv);
+		if (p_resp && p_resp->hdr.status)
+			DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+				   "TLV[%d] Configuration %s\n",
+				   tlv,
+				   (p_resp && p_resp->hdr.status) ? "succeeded"
+								  : "failed");
+	}
+}
+
+int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
+			   struct qed_sp_vport_update_params *p_params)
+{
+	struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+	struct vfpf_vport_update_tlv *req;
+	struct pfvf_def_resp_tlv *resp;
+	u8 update_rx, update_tx;
+	u32 resp_size = 0;
+	u16 size, tlv;
+	int rc;
+
+	resp = &p_iov->pf2vf_reply->default_resp;
+	resp_size = sizeof(*resp);
+
+	update_rx = p_params->update_vport_active_rx_flg;
+	update_tx = p_params->update_vport_active_tx_flg;
+
+	/* clear mailbox and prep header tlv */
+	qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req));
+
+	/* Prepare extended tlvs */
+	if (update_rx || update_tx) {
+		struct vfpf_vport_update_activate_tlv *p_act_tlv;
+
+		size = sizeof(struct vfpf_vport_update_activate_tlv);
+		p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
+					CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
+					size);
+		resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+		if (update_rx) {
+			p_act_tlv->update_rx = update_rx;
+			p_act_tlv->active_rx = p_params->vport_active_rx_flg;
+		}
+
+		if (update_tx) {
+			p_act_tlv->update_tx = update_tx;
+			p_act_tlv->active_tx = p_params->vport_active_tx_flg;
+		}
+	}
+
+	if (p_params->update_tx_switching_flg) {
+		struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
+
+		size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
+		tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+		p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
+					      tlv, size);
+		resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+		p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;
+	}
+
+	if (p_params->update_approx_mcast_flg) {
+		struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
+
+		size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
+		p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
+					  CHANNEL_TLV_VPORT_UPDATE_MCAST, size);
+		resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+		memcpy(p_mcast_tlv->bins, p_params->bins,
+		       sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+	}
+
+	update_rx = p_params->accept_flags.update_rx_mode_config;
+	update_tx = p_params->accept_flags.update_tx_mode_config;
+
+	if (update_rx || update_tx) {
+		struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
+
+		tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+		size = sizeof(struct vfpf_vport_update_accept_param_tlv);
+		p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
+		resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+		if (update_rx) {
+			p_accept_tlv->update_rx_mode = update_rx;
+			p_accept_tlv->rx_accept_filter =
+			    p_params->accept_flags.rx_accept_filter;
+		}
+
+		if (update_tx) {
+			p_accept_tlv->update_tx_mode = update_tx;
+			p_accept_tlv->tx_accept_filter =
+			    p_params->accept_flags.tx_accept_filter;
+		}
+	}
+
+	if (p_params->rss_params) {
+		struct qed_rss_params *rss_params = p_params->rss_params;
+		struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+
+		size = sizeof(struct vfpf_vport_update_rss_tlv);
+		p_rss_tlv = qed_add_tlv(p_hwfn,
+					&p_iov->offset,
+					CHANNEL_TLV_VPORT_UPDATE_RSS, size);
+		resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+		if (rss_params->update_rss_config)
+			p_rss_tlv->update_rss_flags |=
+			    VFPF_UPDATE_RSS_CONFIG_FLAG;
+		if (rss_params->update_rss_capabilities)
+			p_rss_tlv->update_rss_flags |=
+			    VFPF_UPDATE_RSS_CAPS_FLAG;
+		if (rss_params->update_rss_ind_table)
+			p_rss_tlv->update_rss_flags |=
+			    VFPF_UPDATE_RSS_IND_TABLE_FLAG;
+		if (rss_params->update_rss_key)
+			p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG;
+
+		p_rss_tlv->rss_enable = rss_params->rss_enable;
+		p_rss_tlv->rss_caps = rss_params->rss_caps;
+		p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
+		memcpy(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table,
+		       sizeof(rss_params->rss_ind_table));
+		memcpy(p_rss_tlv->rss_key, rss_params->rss_key,
+		       sizeof(rss_params->rss_key));
+	}
+
+	if (p_params->update_accept_any_vlan_flg) {
+		struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;
+
+		size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
+		tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+		p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
+
+		resp_size += sizeof(struct pfvf_def_resp_tlv);
+		p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
+		p_any_vlan_tlv->update_accept_any_vlan_flg =
+		    p_params->update_accept_any_vlan_flg;
+	}
+
+	/* add list termination tlv */
+	qed_add_tlv(p_hwfn, &p_iov->offset,
+		    CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+	rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
+	if (rc)
+		return rc;
+
+	if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+		return -EINVAL;
+
+	qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
+
+	return rc;
+}
+
+int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
+{
+	struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+	struct pfvf_def_resp_tlv *resp;
+	struct vfpf_first_tlv *req;
+	int rc;
+
+	/* clear mailbox and prep first tlv */
+	req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
+
+	/* add list termination tlv */
+	qed_add_tlv(p_hwfn, &p_iov->offset,
+		    CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+	resp = &p_iov->pf2vf_reply->default_resp;
+	rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+	if (rc)
+		return rc;
+
+	if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+		return -EAGAIN;
+
+	p_hwfn->b_int_enabled = 0;
+
+	return 0;
+}
+
+int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
+{
+	struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+	struct pfvf_def_resp_tlv *resp;
+	struct vfpf_first_tlv *req;
+	u32 size;
+	int rc;
+
+	/* clear mailbox and prep first tlv */
+	req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
+
+	/* add list termination tlv */
+	qed_add_tlv(p_hwfn, &p_iov->offset,
+		    CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+	resp = &p_iov->pf2vf_reply->default_resp;
+	rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+	if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
+		rc = -EAGAIN;
+
+	p_hwfn->b_int_enabled = 0;
+
+	if (p_iov->vf2pf_request)
+		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+				  sizeof(union vfpf_tlvs),
+				  p_iov->vf2pf_request,
+				  p_iov->vf2pf_request_phys);
+	if (p_iov->pf2vf_reply)
+		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+				  sizeof(union pfvf_tlvs),
+				  p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
+
+	if (p_iov->bulletin.p_virt) {
+		size = sizeof(struct qed_bulletin_content);
+		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+				  size,
+				  p_iov->bulletin.p_virt, p_iov->bulletin.phys);
+	}
+
+	kfree(p_hwfn->vf_iov_info);
+	p_hwfn->vf_iov_info = NULL;
+
+	return rc;
+}
+
+void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
+			    struct qed_filter_mcast *p_filter_cmd)
+{
+	struct qed_sp_vport_update_params sp_params;
+	int i;
+
+	memset(&sp_params, 0, sizeof(sp_params));
+	sp_params.update_approx_mcast_flg = 1;
+
+	if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+		for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
+			u32 bit;
+
+			bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
+			__set_bit(bit, sp_params.bins);
+		}
+	}
+
+	qed_vf_pf_vport_update(p_hwfn, &sp_params);
+}
+
+int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
+			   struct qed_filter_ucast *p_ucast)
+{
+	struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+	struct vfpf_ucast_filter_tlv *req;
+	struct pfvf_def_resp_tlv *resp;
+	int rc;
+
+	/* clear mailbox and prep first tlv */
+	req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
+	req->opcode = (u8) p_ucast->opcode;
+	req->type = (u8) p_ucast->type;
+	memcpy(req->mac, p_ucast->mac, ETH_ALEN);
+	req->vlan = p_ucast->vlan;
+
+	/* add list termination tlv */
+	qed_add_tlv(p_hwfn, &p_iov->offset,
+		    CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+	resp = &p_iov->pf2vf_reply->default_resp;
+	rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+	if (rc)
+		return rc;
+
+	if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+		return -EAGAIN;
+
+	return 0;
+}
+
+int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
+{
+	struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+	struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+	int rc;
+
+	/* clear mailbox and prep first tlv */
+	qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
+		       sizeof(struct vfpf_first_tlv));
+
+	/* add list termination tlv */
+	qed_add_tlv(p_hwfn, &p_iov->offset,
+		    CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+	rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+	if (rc)
+		return rc;
+
+	if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+		return -EINVAL;
+
+	return 0;
+}
+
+u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
+{
+	struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+
+	if (!p_iov) {
+		DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n");
+		return 0;
+	}
+
+	return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
+}
+
+int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change)
+{
+	struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+	struct qed_bulletin_content shadow;
+	u32 crc, crc_size;
+
+	crc_size = sizeof(p_iov->bulletin.p_virt->crc);
+	*p_change = 0;
+
+	/* Need to guarantee PF is not in the middle of writing it */
+	memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size);
+
+	/* If version did not update, no need to do anything */
+	if (shadow.version == p_iov->bulletin_shadow.version)
+		return 0;
+
+	/* Verify the bulletin we see is valid */
+	crc = crc32(0, (u8 *)&shadow + crc_size,
+		    p_iov->bulletin.size - crc_size);
+	if (crc != shadow.crc)
+		return -EAGAIN;
+
+	/* Set the shadow bulletin and process it */
+	memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size);
+
+	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+		   "Read a bulletin update %08x\n", shadow.version);
+
+	*p_change = 1;
+
+	return 0;
+}
+
+void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+			      struct qed_mcp_link_params *p_params,
+			      struct qed_bulletin_content *p_bulletin)
+{
+	memset(p_params, 0, sizeof(*p_params));
+
+	p_params->speed.autoneg = p_bulletin->req_autoneg;
+	p_params->speed.advertised_speeds = p_bulletin->req_adv_speed;
+	p_params->speed.forced_speed = p_bulletin->req_forced_speed;
+	p_params->pause.autoneg = p_bulletin->req_autoneg_pause;
+	p_params->pause.forced_rx = p_bulletin->req_forced_rx;
+	p_params->pause.forced_tx = p_bulletin->req_forced_tx;
+	p_params->loopback_mode = p_bulletin->req_loopback;
+}
+
+void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+			    struct qed_mcp_link_params *params)
+{
+	__qed_vf_get_link_params(p_hwfn, params,
+				 &(p_hwfn->vf_iov_info->bulletin_shadow));
+}
+
+void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+			     struct qed_mcp_link_state *p_link,
+			     struct qed_bulletin_content *p_bulletin)
+{
+	memset(p_link, 0, sizeof(*p_link));
+
+	p_link->link_up = p_bulletin->link_up;
+	p_link->speed = p_bulletin->speed;
+	p_link->full_duplex = p_bulletin->full_duplex;
+	p_link->an = p_bulletin->autoneg;
+	p_link->an_complete = p_bulletin->autoneg_complete;
+	p_link->parallel_detection = p_bulletin->parallel_detection;
+	p_link->pfc_enabled = p_bulletin->pfc_enabled;
+	p_link->partner_adv_speed = p_bulletin->partner_adv_speed;
+	p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en;
+	p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en;
+	p_link->partner_adv_pause = p_bulletin->partner_adv_pause;
+	p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault;
+}
+
+void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+			   struct qed_mcp_link_state *link)
+{
+	__qed_vf_get_link_state(p_hwfn, link,
+				&(p_hwfn->vf_iov_info->bulletin_shadow));
+}
+
+void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+			    struct qed_mcp_link_capabilities *p_link_caps,
+			    struct qed_bulletin_content *p_bulletin)
+{
+	memset(p_link_caps, 0, sizeof(*p_link_caps));
+	p_link_caps->speed_capabilities = p_bulletin->capability_speed;
+}
+
+void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+			  struct qed_mcp_link_capabilities *p_link_caps)
+{
+	__qed_vf_get_link_caps(p_hwfn, p_link_caps,
+			       &(p_hwfn->vf_iov_info->bulletin_shadow));
+}
+
+void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
+{
+	*num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
+}
+
+void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
+{
+	memcpy(port_mac,
+	       p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, ETH_ALEN);
+}
+
+void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters)
+{
+	struct qed_vf_iov *p_vf;
+
+	p_vf = p_hwfn->vf_iov_info;
+	*num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
+}
+
+bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
+{
+	struct qed_bulletin_content *bulletin;
+
+	bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
+	if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+		return true;
+
+	/* Forbid VF from changing a MAC enforced by PF */
+	if (ether_addr_equal(bulletin->mac, mac))
+		return false;
+
+	return false;
+}
+
+bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
+				    u8 *dst_mac, u8 *p_is_forced)
+{
+	struct qed_bulletin_content *bulletin;
+
+	bulletin = &hwfn->vf_iov_info->bulletin_shadow;
+
+	if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
+		if (p_is_forced)
+			*p_is_forced = 1;
+	} else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) {
+		if (p_is_forced)
+			*p_is_forced = 0;
+	} else {
+		return false;
+	}
+
+	ether_addr_copy(dst_mac, bulletin->mac);
+
+	return true;
+}
+
+void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
+			   u16 *fw_major, u16 *fw_minor,
+			   u16 *fw_rev, u16 *fw_eng)
+{
+	struct pf_vf_pfdev_info *info;
+
+	info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info;
+
+	*fw_major = info->fw_major;
+	*fw_minor = info->fw_minor;
+	*fw_rev = info->fw_rev;
+	*fw_eng = info->fw_eng;
+}
+
+static void qed_handle_bulletin_change(struct qed_hwfn *hwfn)
+{
+	struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth;
+	u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced;
+	void *cookie = hwfn->cdev->ops_cookie;
+
+	is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac,
+						      &is_mac_forced);
+	if (is_mac_exist && is_mac_forced && cookie)
+		ops->force_mac(cookie, mac);
+
+	/* Always update link configuration according to bulletin */
+	qed_link_update(hwfn);
+}
+
+void qed_iov_vf_task(struct work_struct *work)
+{
+	struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
+					     iov_task.work);
+	u8 change = 0;
+
+	if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
+		return;
+
+	/* Handle bulletin board changes */
+	qed_vf_read_bulletin(hwfn, &change);
+	if (change)
+		qed_handle_bulletin_change(hwfn);
+
+	/* As VF is polling bulletin board, need to constantly re-schedule */
+	queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, HZ);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
new file mode 100644
index 0000000..b82fda9
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -0,0 +1,990 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_VF_H
+#define _QED_VF_H
+
+#include "qed_l2.h"
+#include "qed_mcp.h"
+
+#define T_ETH_INDIRECTION_TABLE_SIZE 128
+#define T_ETH_RSS_KEY_SIZE 10
+
+struct vf_pf_resc_request {
+	u8 num_rxqs;
+	u8 num_txqs;
+	u8 num_sbs;
+	u8 num_mac_filters;
+	u8 num_vlan_filters;
+	u8 num_mc_filters;
+	u16 padding;
+};
+
+struct hw_sb_info {
+	u16 hw_sb_id;
+	u8 sb_qid;
+	u8 padding[5];
+};
+
+#define TLV_BUFFER_SIZE                 1024
+
+enum {
+	PFVF_STATUS_WAITING,
+	PFVF_STATUS_SUCCESS,
+	PFVF_STATUS_FAILURE,
+	PFVF_STATUS_NOT_SUPPORTED,
+	PFVF_STATUS_NO_RESOURCE,
+	PFVF_STATUS_FORCED,
+};
+
+/* vf pf channel tlvs */
+/* general tlv header (used for both vf->pf request and pf->vf response) */
+struct channel_tlv {
+	u16 type;
+	u16 length;
+};
+
+/* header of first vf->pf tlv carries the offset used to calculate reponse
+ * buffer address
+ */
+struct vfpf_first_tlv {
+	struct channel_tlv tl;
+	u32 padding;
+	u64 reply_address;
+};
+
+/* header of pf->vf tlvs, carries the status of handling the request */
+struct pfvf_tlv {
+	struct channel_tlv tl;
+	u8 status;
+	u8 padding[3];
+};
+
+/* response tlv used for most tlvs */
+struct pfvf_def_resp_tlv {
+	struct pfvf_tlv hdr;
+};
+
+/* used to terminate and pad a tlv list */
+struct channel_list_end_tlv {
+	struct channel_tlv tl;
+	u8 padding[4];
+};
+
+#define VFPF_ACQUIRE_OS_LINUX (0)
+#define VFPF_ACQUIRE_OS_WINDOWS (1)
+#define VFPF_ACQUIRE_OS_ESX (2)
+#define VFPF_ACQUIRE_OS_SOLARIS (3)
+#define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
+
+struct vfpf_acquire_tlv {
+	struct vfpf_first_tlv first_tlv;
+
+	struct vf_pf_vfdev_info {
+#define VFPF_ACQUIRE_CAP_OBSOLETE	(1 << 0)
+#define VFPF_ACQUIRE_CAP_100G		(1 << 1) /* VF can support 100g */
+		u64 capabilities;
+		u8 fw_major;
+		u8 fw_minor;
+		u8 fw_revision;
+		u8 fw_engineering;
+		u32 driver_version;
+		u16 opaque_fid;	/* ME register value */
+		u8 os_type;	/* VFPF_ACQUIRE_OS_* value */
+		u8 padding[5];
+	} vfdev_info;
+
+	struct vf_pf_resc_request resc_request;
+
+	u64 bulletin_addr;
+	u32 bulletin_size;
+	u32 padding;
+};
+
+/* receive side scaling tlv */
+struct vfpf_vport_update_rss_tlv {
+	struct channel_tlv tl;
+
+	u8 update_rss_flags;
+#define VFPF_UPDATE_RSS_CONFIG_FLAG       BIT(0)
+#define VFPF_UPDATE_RSS_CAPS_FLAG         BIT(1)
+#define VFPF_UPDATE_RSS_IND_TABLE_FLAG    BIT(2)
+#define VFPF_UPDATE_RSS_KEY_FLAG          BIT(3)
+
+	u8 rss_enable;
+	u8 rss_caps;
+	u8 rss_table_size_log;	/* The table size is 2 ^ rss_table_size_log */
+	u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+	u32 rss_key[T_ETH_RSS_KEY_SIZE];
+};
+
+struct pfvf_storm_stats {
+	u32 address;
+	u32 len;
+};
+
+struct pfvf_stats_info {
+	struct pfvf_storm_stats mstats;
+	struct pfvf_storm_stats pstats;
+	struct pfvf_storm_stats tstats;
+	struct pfvf_storm_stats ustats;
+};
+
+struct pfvf_acquire_resp_tlv {
+	struct pfvf_tlv hdr;
+
+	struct pf_vf_pfdev_info {
+		u32 chip_num;
+		u32 mfw_ver;
+
+		u16 fw_major;
+		u16 fw_minor;
+		u16 fw_rev;
+		u16 fw_eng;
+
+		u64 capabilities;
+#define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED	BIT(0)
+#define PFVF_ACQUIRE_CAP_100G			BIT(1)	/* If set, 100g PF */
+/* There are old PF versions where the PF might mistakenly override the sanity
+ * mechanism [version-based] and allow a VF that can't be supported to pass
+ * the acquisition phase.
+ * To overcome this, PFs now indicate that they're past that point and the new
+ * VFs would fail probe on the older PFs that fail to do so.
+ */
+#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE	BIT(2)
+
+		u16 db_size;
+		u8 indices_per_sb;
+		u8 os_type;
+
+		/* These should match the PF's qed_dev values */
+		u16 chip_rev;
+		u8 dev_type;
+
+		u8 padding;
+
+		struct pfvf_stats_info stats_info;
+
+		u8 port_mac[ETH_ALEN];
+		u8 padding2[2];
+	} pfdev_info;
+
+	struct pf_vf_resc {
+#define PFVF_MAX_QUEUES_PER_VF		16
+#define PFVF_MAX_SBS_PER_VF		16
+		struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
+		u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
+		u8 cid[PFVF_MAX_QUEUES_PER_VF];
+
+		u8 num_rxqs;
+		u8 num_txqs;
+		u8 num_sbs;
+		u8 num_mac_filters;
+		u8 num_vlan_filters;
+		u8 num_mc_filters;
+		u8 padding[2];
+	} resc;
+
+	u32 bulletin_size;
+	u32 padding;
+};
+
+struct pfvf_start_queue_resp_tlv {
+	struct pfvf_tlv hdr;
+	u32 offset;		/* offset to consumer/producer of queue */
+	u8 padding[4];
+};
+
+/* Setup Queue */
+struct vfpf_start_rxq_tlv {
+	struct vfpf_first_tlv first_tlv;
+
+	/* physical addresses */
+	u64 rxq_addr;
+	u64 deprecated_sge_addr;
+	u64 cqe_pbl_addr;
+
+	u16 cqe_pbl_size;
+	u16 hw_sb;
+	u16 rx_qid;
+	u16 hc_rate;		/* desired interrupts per sec. */
+
+	u16 bd_max_bytes;
+	u16 stat_id;
+	u8 sb_index;
+	u8 padding[3];
+};
+
+struct vfpf_start_txq_tlv {
+	struct vfpf_first_tlv first_tlv;
+
+	/* physical addresses */
+	u64 pbl_addr;
+	u16 pbl_size;
+	u16 stat_id;
+	u16 tx_qid;
+	u16 hw_sb;
+
+	u32 flags;		/* VFPF_QUEUE_FLG_X flags */
+	u16 hc_rate;		/* desired interrupts per sec. */
+	u8 sb_index;
+	u8 padding[3];
+};
+
+/* Stop RX Queue */
+struct vfpf_stop_rxqs_tlv {
+	struct vfpf_first_tlv first_tlv;
+
+	u16 rx_qid;
+	u8 num_rxqs;
+	u8 cqe_completion;
+	u8 padding[4];
+};
+
+/* Stop TX Queues */
+struct vfpf_stop_txqs_tlv {
+	struct vfpf_first_tlv first_tlv;
+
+	u16 tx_qid;
+	u8 num_txqs;
+	u8 padding[5];
+};
+
+struct vfpf_update_rxq_tlv {
+	struct vfpf_first_tlv first_tlv;
+
+	u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
+
+	u16 rx_qid;
+	u8 num_rxqs;
+	u8 flags;
+#define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG    BIT(0)
+#define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG          BIT(1)
+#define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG        BIT(2)
+
+	u8 padding[4];
+};
+
+/* Set Queue Filters */
+struct vfpf_q_mac_vlan_filter {
+	u32 flags;
+#define VFPF_Q_FILTER_DEST_MAC_VALID    0x01
+#define VFPF_Q_FILTER_VLAN_TAG_VALID    0x02
+#define VFPF_Q_FILTER_SET_MAC           0x100	/* set/clear */
+
+	u8 mac[ETH_ALEN];
+	u16 vlan_tag;
+
+	u8 padding[4];
+};
+
+/* Start a vport */
+struct vfpf_vport_start_tlv {
+	struct vfpf_first_tlv first_tlv;
+
+	u64 sb_addr[PFVF_MAX_SBS_PER_VF];
+
+	u32 tpa_mode;
+	u16 dep1;
+	u16 mtu;
+
+	u8 vport_id;
+	u8 inner_vlan_removal;
+
+	u8 only_untagged;
+	u8 max_buffers_per_cqe;
+
+	u8 padding[4];
+};
+
+/* Extended tlvs - need to add rss, mcast, accept mode tlvs */
+struct vfpf_vport_update_activate_tlv {
+	struct channel_tlv tl;
+	u8 update_rx;
+	u8 update_tx;
+	u8 active_rx;
+	u8 active_tx;
+};
+
+struct vfpf_vport_update_tx_switch_tlv {
+	struct channel_tlv tl;
+	u8 tx_switching;
+	u8 padding[3];
+};
+
+struct vfpf_vport_update_vlan_strip_tlv {
+	struct channel_tlv tl;
+	u8 remove_vlan;
+	u8 padding[3];
+};
+
+struct vfpf_vport_update_mcast_bin_tlv {
+	struct channel_tlv tl;
+	u8 padding[4];
+
+	u64 bins[8];
+};
+
+struct vfpf_vport_update_accept_param_tlv {
+	struct channel_tlv tl;
+	u8 update_rx_mode;
+	u8 update_tx_mode;
+	u8 rx_accept_filter;
+	u8 tx_accept_filter;
+};
+
+struct vfpf_vport_update_accept_any_vlan_tlv {
+	struct channel_tlv tl;
+	u8 update_accept_any_vlan_flg;
+	u8 accept_any_vlan;
+
+	u8 padding[2];
+};
+
+struct vfpf_vport_update_sge_tpa_tlv {
+	struct channel_tlv tl;
+
+	u16 sge_tpa_flags;
+#define VFPF_TPA_IPV4_EN_FLAG		BIT(0)
+#define VFPF_TPA_IPV6_EN_FLAG		BIT(1)
+#define VFPF_TPA_PKT_SPLIT_FLAG		BIT(2)
+#define VFPF_TPA_HDR_DATA_SPLIT_FLAG	BIT(3)
+#define VFPF_TPA_GRO_CONSIST_FLAG	BIT(4)
+
+	u8 update_sge_tpa_flags;
+#define VFPF_UPDATE_SGE_DEPRECATED_FLAG	BIT(0)
+#define VFPF_UPDATE_TPA_EN_FLAG		BIT(1)
+#define VFPF_UPDATE_TPA_PARAM_FLAG	BIT(2)
+
+	u8 max_buffers_per_cqe;
+
+	u16 deprecated_sge_buff_size;
+	u16 tpa_max_size;
+	u16 tpa_min_size_to_start;
+	u16 tpa_min_size_to_cont;
+
+	u8 tpa_max_aggs_num;
+	u8 padding[7];
+};
+
+/* Primary tlv as a header for various extended tlvs for
+ * various functionalities in vport update ramrod.
+ */
+struct vfpf_vport_update_tlv {
+	struct vfpf_first_tlv first_tlv;
+};
+
+struct vfpf_ucast_filter_tlv {
+	struct vfpf_first_tlv first_tlv;
+
+	u8 opcode;
+	u8 type;
+
+	u8 mac[ETH_ALEN];
+
+	u16 vlan;
+	u16 padding[3];
+};
+
+struct tlv_buffer_size {
+	u8 tlv_buffer[TLV_BUFFER_SIZE];
+};
+
+union vfpf_tlvs {
+	struct vfpf_first_tlv first_tlv;
+	struct vfpf_acquire_tlv acquire;
+	struct vfpf_start_rxq_tlv start_rxq;
+	struct vfpf_start_txq_tlv start_txq;
+	struct vfpf_stop_rxqs_tlv stop_rxqs;
+	struct vfpf_stop_txqs_tlv stop_txqs;
+	struct vfpf_update_rxq_tlv update_rxq;
+	struct vfpf_vport_start_tlv start_vport;
+	struct vfpf_vport_update_tlv vport_update;
+	struct vfpf_ucast_filter_tlv ucast_filter;
+	struct channel_list_end_tlv list_end;
+	struct tlv_buffer_size tlv_buf_size;
+};
+
+union pfvf_tlvs {
+	struct pfvf_def_resp_tlv default_resp;
+	struct pfvf_acquire_resp_tlv acquire_resp;
+	struct tlv_buffer_size tlv_buf_size;
+	struct pfvf_start_queue_resp_tlv queue_start;
+};
+
+enum qed_bulletin_bit {
+	/* Alert the VF that a forced MAC was set by the PF */
+	MAC_ADDR_FORCED = 0,
+	/* Alert the VF that a forced VLAN was set by the PF */
+	VLAN_ADDR_FORCED = 2,
+
+	/* Indicate that `default_only_untagged' contains actual data */
+	VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
+	VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,
+
+	/* Alert the VF that suggested mac was sent by the PF.
+	 * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set.
+	 */
+	VFPF_BULLETIN_MAC_ADDR = 5
+};
+
+struct qed_bulletin_content {
+	/* crc of structure to ensure is not in mid-update */
+	u32 crc;
+
+	u32 version;
+
+	/* bitmap indicating which fields hold valid values */
+	u64 valid_bitmap;
+
+	/* used for MAC_ADDR or MAC_ADDR_FORCED */
+	u8 mac[ETH_ALEN];
+
+	/* If valid, 1 => only untagged Rx if no vlan is configured */
+	u8 default_only_untagged;
+	u8 padding;
+
+	/* The following is a 'copy' of qed_mcp_link_state,
+	 * qed_mcp_link_params and qed_mcp_link_capabilities. Since it's
+	 * possible the structs will increase further along the road we cannot
+	 * have it here; Instead we need to have all of its fields.
+	 */
+	u8 req_autoneg;
+	u8 req_autoneg_pause;
+	u8 req_forced_rx;
+	u8 req_forced_tx;
+	u8 padding2[4];
+
+	u32 req_adv_speed;
+	u32 req_forced_speed;
+	u32 req_loopback;
+	u32 padding3;
+
+	u8 link_up;
+	u8 full_duplex;
+	u8 autoneg;
+	u8 autoneg_complete;
+	u8 parallel_detection;
+	u8 pfc_enabled;
+	u8 partner_tx_flow_ctrl_en;
+	u8 partner_rx_flow_ctrl_en;
+	u8 partner_adv_pause;
+	u8 sfp_tx_fault;
+	u8 padding4[6];
+
+	u32 speed;
+	u32 partner_adv_speed;
+
+	u32 capability_speed;
+
+	/* Forced vlan */
+	u16 pvid;
+	u16 padding5;
+};
+
+struct qed_bulletin {
+	dma_addr_t phys;
+	struct qed_bulletin_content *p_virt;
+	u32 size;
+};
+
+enum {
+	CHANNEL_TLV_NONE,	/* ends tlv sequence */
+	CHANNEL_TLV_ACQUIRE,
+	CHANNEL_TLV_VPORT_START,
+	CHANNEL_TLV_VPORT_UPDATE,
+	CHANNEL_TLV_VPORT_TEARDOWN,
+	CHANNEL_TLV_START_RXQ,
+	CHANNEL_TLV_START_TXQ,
+	CHANNEL_TLV_STOP_RXQS,
+	CHANNEL_TLV_STOP_TXQS,
+	CHANNEL_TLV_UPDATE_RXQ,
+	CHANNEL_TLV_INT_CLEANUP,
+	CHANNEL_TLV_CLOSE,
+	CHANNEL_TLV_RELEASE,
+	CHANNEL_TLV_LIST_END,
+	CHANNEL_TLV_UCAST_FILTER,
+	CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
+	CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
+	CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
+	CHANNEL_TLV_VPORT_UPDATE_MCAST,
+	CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
+	CHANNEL_TLV_VPORT_UPDATE_RSS,
+	CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
+	CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
+	CHANNEL_TLV_MAX,
+
+	/* Required for iterating over vport-update tlvs.
+	 * Will break in case non-sequential vport-update tlvs.
+	 */
+	CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
+};
+
+/* This data is held in the qed_hwfn structure for VFs only. */
+struct qed_vf_iov {
+	union vfpf_tlvs *vf2pf_request;
+	dma_addr_t vf2pf_request_phys;
+	union pfvf_tlvs *pf2vf_reply;
+	dma_addr_t pf2vf_reply_phys;
+
+	/* Should be taken whenever the mailbox buffers are accessed */
+	struct mutex mutex;
+	u8 *offset;
+
+	/* Bulletin Board */
+	struct qed_bulletin bulletin;
+	struct qed_bulletin_content bulletin_shadow;
+
+	/* we set aside a copy of the acquire response */
+	struct pfvf_acquire_resp_tlv acquire_resp;
+};
+
+#ifdef CONFIG_QED_SRIOV
+/**
+ * @brief Read the VF bulletin and act on it if needed
+ *
+ * @param p_hwfn
+ * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise.
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change);
+
+/**
+ * @brief Get link paramters for VF from qed
+ *
+ * @param p_hwfn
+ * @param params - the link params structure to be filled for the VF
+ */
+void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+			    struct qed_mcp_link_params *params);
+
+/**
+ * @brief Get link state for VF from qed
+ *
+ * @param p_hwfn
+ * @param link - the link state structure to be filled for the VF
+ */
+void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+			   struct qed_mcp_link_state *link);
+
+/**
+ * @brief Get link capabilities for VF from qed
+ *
+ * @param p_hwfn
+ * @param p_link_caps - the link capabilities structure to be filled for the VF
+ */
+void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+			  struct qed_mcp_link_capabilities *p_link_caps);
+
+/**
+ * @brief Get number of Rx queues allocated for VF by qed
+ *
+ *  @param p_hwfn
+ *  @param num_rxqs - allocated RX queues
+ */
+void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
+
+/**
+ * @brief Get port mac address for VF
+ *
+ * @param p_hwfn
+ * @param port_mac - destination location for port mac
+ */
+void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
+
+/**
+ * @brief Get number of VLAN filters allocated for VF by qed
+ *
+ *  @param p_hwfn
+ *  @param num_rxqs - allocated VLAN filters
+ */
+void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
+				 u8 *num_vlan_filters);
+
+/**
+ * @brief Check if VF can set a MAC address
+ *
+ * @param p_hwfn
+ * @param mac
+ *
+ * @return bool
+ */
+bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac);
+
+/**
+ * @brief Set firmware version information in dev_info from VFs acquire response tlv
+ *
+ * @param p_hwfn
+ * @param fw_major
+ * @param fw_minor
+ * @param fw_rev
+ * @param fw_eng
+ */
+void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
+			   u16 *fw_major, u16 *fw_minor,
+			   u16 *fw_rev, u16 *fw_eng);
+
+/**
+ * @brief hw preparation for VF
+ *      sends ACQUIRE message
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief VF - start the RX Queue by sending a message to the PF
+ * @param p_hwfn
+ * @param cid                   - zero based within the VF
+ * @param rx_queue_id           - zero based within the VF
+ * @param sb                    - VF status block for this queue
+ * @param sb_index              - Index within the status block
+ * @param bd_max_bytes          - maximum number of bytes per bd
+ * @param bd_chain_phys_addr    - physical address of bd chain
+ * @param cqe_pbl_addr          - physical address of pbl
+ * @param cqe_pbl_size          - pbl size
+ * @param pp_prod               - pointer to the producer to be
+ *				  used in fastpath
+ *
+ * @return int
+ */
+int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
+			u8 rx_queue_id,
+			u16 sb,
+			u8 sb_index,
+			u16 bd_max_bytes,
+			dma_addr_t bd_chain_phys_addr,
+			dma_addr_t cqe_pbl_addr,
+			u16 cqe_pbl_size, void __iomem **pp_prod);
+
+/**
+ * @brief VF - start the TX queue by sending a message to the
+ *        PF.
+ *
+ * @param p_hwfn
+ * @param tx_queue_id           - zero based within the VF
+ * @param sb                    - status block for this queue
+ * @param sb_index              - index within the status block
+ * @param bd_chain_phys_addr    - physical address of tx chain
+ * @param pp_doorbell           - pointer to address to which to
+ *                      write the doorbell too..
+ *
+ * @return int
+ */
+int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+			u16 tx_queue_id,
+			u16 sb,
+			u8 sb_index,
+			dma_addr_t pbl_addr,
+			u16 pbl_size, void __iomem **pp_doorbell);
+
+/**
+ * @brief VF - stop the RX queue by sending a message to the PF
+ *
+ * @param p_hwfn
+ * @param rx_qid
+ * @param cqe_completion
+ *
+ * @return int
+ */
+int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
+		       u16 rx_qid, bool cqe_completion);
+
+/**
+ * @brief VF - stop the TX queue by sending a message to the PF
+ *
+ * @param p_hwfn
+ * @param tx_qid
+ *
+ * @return int
+ */
+int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid);
+
+/**
+ * @brief VF - send a vport update command
+ *
+ * @param p_hwfn
+ * @param params
+ *
+ * @return int
+ */
+int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
+			   struct qed_sp_vport_update_params *p_params);
+
+/**
+ *
+ * @brief VF - send a close message to PF
+ *
+ * @param p_hwfn
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_reset(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief VF - free vf`s memories
+ *
+ * @param p_hwfn
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given
+ *        sb_id. For VFs igu sbs don't have to be contiguous
+ *
+ * @param p_hwfn
+ * @param sb_id
+ *
+ * @return INLINE u16
+ */
+u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
+
+/**
+ * @brief qed_vf_pf_vport_start - perform vport start for VF.
+ *
+ * @param p_hwfn
+ * @param vport_id
+ * @param mtu
+ * @param inner_vlan_removal
+ * @param tpa_mode
+ * @param max_buffers_per_cqe,
+ * @param only_untagged - default behavior regarding vlan acceptance
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
+			  u8 vport_id,
+			  u16 mtu,
+			  u8 inner_vlan_removal,
+			  enum qed_tpa_mode tpa_mode,
+			  u8 max_buffers_per_cqe, u8 only_untagged);
+
+/**
+ * @brief qed_vf_pf_vport_stop - stop the VF's vport
+ *
+ * @param p_hwfn
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn);
+
+int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
+			   struct qed_filter_ucast *p_param);
+
+void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
+			    struct qed_filter_mcast *p_filter_cmd);
+
+/**
+ * @brief qed_vf_pf_int_cleanup - clean the SB of the VF
+ *
+ * @param p_hwfn
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief - return the link params in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_params - pointer to a struct to fill with link params
+ * @param p_bulletin
+ */
+void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+			      struct qed_mcp_link_params *p_params,
+			      struct qed_bulletin_content *p_bulletin);
+
+/**
+ * @brief - return the link state in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_link - pointer to a struct to fill with link state
+ * @param p_bulletin
+ */
+void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+			     struct qed_mcp_link_state *p_link,
+			     struct qed_bulletin_content *p_bulletin);
+
+/**
+ * @brief - return the link capabilities in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_link - pointer to a struct to fill with link capabilities
+ * @param p_bulletin
+ */
+void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+			    struct qed_mcp_link_capabilities *p_link_caps,
+			    struct qed_bulletin_content *p_bulletin);
+
+void qed_iov_vf_task(struct work_struct *work);
+#else
+static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+					  struct qed_mcp_link_params *params)
+{
+}
+
+static inline void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+					 struct qed_mcp_link_state *link)
+{
+}
+
+static inline void
+qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+		     struct qed_mcp_link_capabilities *p_link_caps)
+{
+}
+
+static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
+{
+}
+
+static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
+{
+}
+
+static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
+					       u8 *num_vlan_filters)
+{
+}
+
+static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
+{
+	return false;
+}
+
+static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
+					 u16 *fw_major, u16 *fw_minor,
+					 u16 *fw_rev, u16 *fw_eng)
+{
+}
+
+static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
+{
+	return -EINVAL;
+}
+
+static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
+				      u8 rx_queue_id,
+				      u16 sb,
+				      u8 sb_index,
+				      u16 bd_max_bytes,
+				      dma_addr_t bd_chain_phys_adr,
+				      dma_addr_t cqe_pbl_addr,
+				      u16 cqe_pbl_size, void __iomem **pp_prod)
+{
+	return -EINVAL;
+}
+
+static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+				      u16 tx_queue_id,
+				      u16 sb,
+				      u8 sb_index,
+				      dma_addr_t pbl_addr,
+				      u16 pbl_size, void __iomem **pp_doorbell)
+{
+	return -EINVAL;
+}
+
+static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
+				     u16 rx_qid, bool cqe_completion)
+{
+	return -EINVAL;
+}
+
+static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
+{
+	return -EINVAL;
+}
+
+static inline int
+qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
+		       struct qed_sp_vport_update_params *p_params)
+{
+	return -EINVAL;
+}
+
+static inline int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
+{
+	return -EINVAL;
+}
+
+static inline int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
+{
+	return -EINVAL;
+}
+
+static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
+{
+	return 0;
+}
+
+static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
+					u8 vport_id,
+					u16 mtu,
+					u8 inner_vlan_removal,
+					enum qed_tpa_mode tpa_mode,
+					u8 max_buffers_per_cqe,
+					u8 only_untagged)
+{
+	return -EINVAL;
+}
+
+static inline int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
+{
+	return -EINVAL;
+}
+
+static inline int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
+					 struct qed_filter_ucast *p_param)
+{
+	return -EINVAL;
+}
+
+static inline void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
+					  struct qed_filter_mcast *p_filter_cmd)
+{
+}
+
+static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
+{
+	return -EINVAL;
+}
+
+static inline void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+					    struct qed_mcp_link_params
+					    *p_params,
+					    struct qed_bulletin_content
+					    *p_bulletin)
+{
+}
+
+static inline void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+					   struct qed_mcp_link_state *p_link,
+					   struct qed_bulletin_content
+					   *p_bulletin)
+{
+}
+
+static inline void
+__qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+		       struct qed_mcp_link_capabilities *p_link_caps,
+		       struct qed_bulletin_content *p_bulletin)
+{
+}
+
+static inline void qed_iov_vf_task(struct work_struct *work)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index d023251..47d6b22 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -25,15 +25,13 @@
 
 #define QEDE_MAJOR_VERSION		8
 #define QEDE_MINOR_VERSION		7
-#define QEDE_REVISION_VERSION		0
-#define QEDE_ENGINEERING_VERSION	0
+#define QEDE_REVISION_VERSION		1
+#define QEDE_ENGINEERING_VERSION	20
 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "."	\
 		__stringify(QEDE_MINOR_VERSION) "."		\
 		__stringify(QEDE_REVISION_VERSION) "."		\
 		__stringify(QEDE_ENGINEERING_VERSION)
 
-#define QEDE_ETH_INTERFACE_VERSION	300
-
 #define DRV_MODULE_SYM		qede
 
 struct qede_stats {
@@ -61,16 +59,16 @@
 
 	/* port */
 	u64 rx_64_byte_packets;
-	u64 rx_127_byte_packets;
-	u64 rx_255_byte_packets;
-	u64 rx_511_byte_packets;
-	u64 rx_1023_byte_packets;
-	u64 rx_1518_byte_packets;
-	u64 rx_1522_byte_packets;
-	u64 rx_2047_byte_packets;
-	u64 rx_4095_byte_packets;
-	u64 rx_9216_byte_packets;
-	u64 rx_16383_byte_packets;
+	u64 rx_65_to_127_byte_packets;
+	u64 rx_128_to_255_byte_packets;
+	u64 rx_256_to_511_byte_packets;
+	u64 rx_512_to_1023_byte_packets;
+	u64 rx_1024_to_1518_byte_packets;
+	u64 rx_1519_to_1522_byte_packets;
+	u64 rx_1519_to_2047_byte_packets;
+	u64 rx_2048_to_4095_byte_packets;
+	u64 rx_4096_to_9216_byte_packets;
+	u64 rx_9217_to_16383_byte_packets;
 	u64 rx_crc_errors;
 	u64 rx_mac_crtl_frames;
 	u64 rx_pause_frames;
@@ -114,6 +112,10 @@
 	u32				dp_module;
 	u8				dp_level;
 
+	u32 flags;
+#define QEDE_FLAG_IS_VF	BIT(0)
+#define IS_VF(edev)	(!!((edev)->flags & QEDE_FLAG_IS_VF))
+
 	const struct qed_eth_ops	*ops;
 
 	struct qed_dev_eth_info	dev_info;
@@ -156,6 +158,10 @@
 	      SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
 
 	struct qede_stats		stats;
+#define QEDE_RSS_INDIR_INITED	BIT(0)
+#define QEDE_RSS_KEY_INITED	BIT(1)
+#define QEDE_RSS_CAPS_INITED	BIT(2)
+	u32 rss_params_inited; /* bit-field to track initialized rss params */
 	struct qed_update_vport_rss_params	rss_params;
 	u16			q_num_rx_buffers; /* Must be a power of two */
 	u16			q_num_tx_buffers; /* Must be a power of two */
@@ -167,6 +173,8 @@
 	bool accept_any_vlan;
 	struct delayed_work		sp_task;
 	unsigned long			sp_flags;
+	u16				vxlan_dst_port;
+	u16				geneve_dst_port;
 };
 
 enum QEDE_STATE {
@@ -286,8 +294,11 @@
 
 #define QEDE_CSUM_ERROR			BIT(0)
 #define QEDE_CSUM_UNNECESSARY		BIT(1)
+#define QEDE_TUNN_CSUM_UNNECESSARY	BIT(2)
 
-#define QEDE_SP_RX_MODE		1
+#define QEDE_SP_RX_MODE			1
+#define QEDE_SP_VXLAN_PORT_CONFIG	2
+#define QEDE_SP_GENEVE_PORT_CONFIG	3
 
 union qede_reload_args {
 	u16 mtu;
@@ -301,6 +312,10 @@
 		 union qede_reload_args *args);
 int qede_change_mtu(struct net_device *dev, int new_mtu);
 void qede_fill_by_demand_stats(struct qede_dev *edev);
+bool qede_has_rx_work(struct qede_rx_queue *rxq);
+int qede_txq_has_work(struct qede_tx_queue *txq);
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
+			     u8 count);
 
 #define RX_RING_SIZE_POW	13
 #define RX_RING_SIZE		((u16)BIT(RX_RING_SIZE_POW))
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index c49dc10..1bc7535 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -9,6 +9,7 @@
 #include <linux/version.h>
 #include <linux/types.h>
 #include <linux/netdevice.h>
+#include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/string.h>
 #include <linux/pci.h>
@@ -27,6 +28,9 @@
 #define QEDE_RQSTAT_STRING(stat_name) (#stat_name)
 #define QEDE_RQSTAT(stat_name) \
 	 {QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)}
+
+#define QEDE_SELFTEST_POLL_COUNT 100
+
 static const struct {
 	u64 offset;
 	char string[ETH_GSTRING_LEN];
@@ -59,16 +63,16 @@
 	QEDE_STAT(tx_bcast_pkts),
 
 	QEDE_PF_STAT(rx_64_byte_packets),
-	QEDE_PF_STAT(rx_127_byte_packets),
-	QEDE_PF_STAT(rx_255_byte_packets),
-	QEDE_PF_STAT(rx_511_byte_packets),
-	QEDE_PF_STAT(rx_1023_byte_packets),
-	QEDE_PF_STAT(rx_1518_byte_packets),
-	QEDE_PF_STAT(rx_1522_byte_packets),
-	QEDE_PF_STAT(rx_2047_byte_packets),
-	QEDE_PF_STAT(rx_4095_byte_packets),
-	QEDE_PF_STAT(rx_9216_byte_packets),
-	QEDE_PF_STAT(rx_16383_byte_packets),
+	QEDE_PF_STAT(rx_65_to_127_byte_packets),
+	QEDE_PF_STAT(rx_128_to_255_byte_packets),
+	QEDE_PF_STAT(rx_256_to_511_byte_packets),
+	QEDE_PF_STAT(rx_512_to_1023_byte_packets),
+	QEDE_PF_STAT(rx_1024_to_1518_byte_packets),
+	QEDE_PF_STAT(rx_1519_to_1522_byte_packets),
+	QEDE_PF_STAT(rx_1519_to_2047_byte_packets),
+	QEDE_PF_STAT(rx_2048_to_4095_byte_packets),
+	QEDE_PF_STAT(rx_4096_to_9216_byte_packets),
+	QEDE_PF_STAT(rx_9217_to_16383_byte_packets),
 	QEDE_PF_STAT(tx_64_byte_packets),
 	QEDE_PF_STAT(tx_65_to_127_byte_packets),
 	QEDE_PF_STAT(tx_128_to_255_byte_packets),
@@ -116,11 +120,39 @@
 
 #define QEDE_NUM_STATS	ARRAY_SIZE(qede_stats_arr)
 
+enum {
+	QEDE_PRI_FLAG_CMT,
+	QEDE_PRI_FLAG_LEN,
+};
+
+static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
+	"Coupled-Function",
+};
+
+enum qede_ethtool_tests {
+	QEDE_ETHTOOL_INT_LOOPBACK,
+	QEDE_ETHTOOL_INTERRUPT_TEST,
+	QEDE_ETHTOOL_MEMORY_TEST,
+	QEDE_ETHTOOL_REGISTER_TEST,
+	QEDE_ETHTOOL_CLOCK_TEST,
+	QEDE_ETHTOOL_TEST_MAX
+};
+
+static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = {
+	"Internal loopback (offline)",
+	"Interrupt (online)\t",
+	"Memory (online)\t\t",
+	"Register (online)\t",
+	"Clock (online)\t\t",
+};
+
 static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
 {
 	int i, j, k;
 
 	for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) {
+		if (IS_VF(edev) && qede_stats_arr[i].pf_only)
+			continue;
 		strcpy(buf + j * ETH_GSTRING_LEN,
 		       qede_stats_arr[i].string);
 		j++;
@@ -139,6 +171,14 @@
 	case ETH_SS_STATS:
 		qede_get_strings_stats(edev, buf);
 		break;
+	case ETH_SS_PRIV_FLAGS:
+		memcpy(buf, qede_private_arr,
+		       ETH_GSTRING_LEN * QEDE_PRI_FLAG_LEN);
+		break;
+	case ETH_SS_TEST:
+		memcpy(buf, qede_tests_str_arr,
+		       ETH_GSTRING_LEN * QEDE_ETHTOOL_TEST_MAX);
+		break;
 	default:
 		DP_VERBOSE(edev, QED_MSG_DEBUG,
 			   "Unsupported stringset 0x%08x\n", stringset);
@@ -156,8 +196,11 @@
 
 	mutex_lock(&edev->qede_lock);
 
-	for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++)
+	for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) {
+		if (IS_VF(edev) && qede_stats_arr[sidx].pf_only)
+			continue;
 		buf[cnt++] = QEDE_STATS_DATA(edev, sidx);
+	}
 
 	for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) {
 		buf[cnt] = 0;
@@ -176,8 +219,18 @@
 
 	switch (stringset) {
 	case ETH_SS_STATS:
-		return num_stats + QEDE_NUM_RQSTATS;
+		if (IS_VF(edev)) {
+			int i;
 
+			for (i = 0; i < QEDE_NUM_STATS; i++)
+				if (qede_stats_arr[i].pf_only)
+					num_stats--;
+		}
+		return num_stats + QEDE_NUM_RQSTATS;
+	case ETH_SS_PRIV_FLAGS:
+		return QEDE_PRI_FLAG_LEN;
+	case ETH_SS_TEST:
+		return QEDE_ETHTOOL_TEST_MAX;
 	default:
 		DP_VERBOSE(edev, QED_MSG_DEBUG,
 			   "Unsupported stringset 0x%08x\n", stringset);
@@ -185,6 +238,13 @@
 	}
 }
 
+static u32 qede_get_priv_flags(struct net_device *dev)
+{
+	struct qede_dev *edev = netdev_priv(dev);
+
+	return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT;
+}
+
 static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
 	struct qede_dev *edev = netdev_priv(dev);
@@ -217,9 +277,9 @@
 	struct qed_link_params params;
 	u32 speed;
 
-	if (!edev->dev_info.common.is_mf_default) {
+	if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
 		DP_INFO(edev,
-			"Link parameters can not be changed in non-default mode\n");
+			"Link settings are not allowed to be changed\n");
 		return -EOPNOTSUPP;
 	}
 
@@ -328,6 +388,12 @@
 	struct qed_link_output current_link;
 	struct qed_link_params link_params;
 
+	if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
+		DP_INFO(edev,
+			"Link settings are not allowed to be changed\n");
+		return -EOPNOTSUPP;
+	}
+
 	if (!netif_running(dev))
 		return 0;
 
@@ -428,9 +494,9 @@
 	struct qed_link_params params;
 	struct qed_link_output current_link;
 
-	if (!edev->dev_info.common.is_mf_default) {
+	if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
 		DP_INFO(edev,
-			"Pause parameters can not be updated in non-default mode\n");
+			"Pause settings are not allowed to be changed\n");
 		return -EOPNOTSUPP;
 	}
 
@@ -569,6 +635,497 @@
 	return 0;
 }
 
+static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
+{
+	info->data = RXH_IP_SRC | RXH_IP_DST;
+
+	switch (info->flow_type) {
+	case TCP_V4_FLOW:
+	case TCP_V6_FLOW:
+		info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+		break;
+	case UDP_V4_FLOW:
+		if (edev->rss_params.rss_caps & QED_RSS_IPV4_UDP)
+			info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+		break;
+	case UDP_V6_FLOW:
+		if (edev->rss_params.rss_caps & QED_RSS_IPV6_UDP)
+			info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+		break;
+	case IPV4_FLOW:
+	case IPV6_FLOW:
+		break;
+	default:
+		info->data = 0;
+		break;
+	}
+
+	return 0;
+}
+
+static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+			  u32 *rules __always_unused)
+{
+	struct qede_dev *edev = netdev_priv(dev);
+
+	switch (info->cmd) {
+	case ETHTOOL_GRXRINGS:
+		info->data = edev->num_rss;
+		return 0;
+	case ETHTOOL_GRXFH:
+		return qede_get_rss_flags(edev, info);
+	default:
+		DP_ERR(edev, "Command parameters not supported\n");
+		return -EOPNOTSUPP;
+	}
+}
+
+static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
+{
+	struct qed_update_vport_params vport_update_params;
+	u8 set_caps = 0, clr_caps = 0;
+
+	DP_VERBOSE(edev, QED_MSG_DEBUG,
+		   "Set rss flags command parameters: flow type = %d, data = %llu\n",
+		   info->flow_type, info->data);
+
+	switch (info->flow_type) {
+	case TCP_V4_FLOW:
+	case TCP_V6_FLOW:
+		/* For TCP only 4-tuple hash is supported */
+		if (info->data ^ (RXH_IP_SRC | RXH_IP_DST |
+				  RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+			DP_INFO(edev, "Command parameters not supported\n");
+			return -EINVAL;
+		}
+		return 0;
+	case UDP_V4_FLOW:
+		/* For UDP either 2-tuple hash or 4-tuple hash is supported */
+		if (info->data == (RXH_IP_SRC | RXH_IP_DST |
+				   RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+			set_caps = QED_RSS_IPV4_UDP;
+			DP_VERBOSE(edev, QED_MSG_DEBUG,
+				   "UDP 4-tuple enabled\n");
+		} else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
+			clr_caps = QED_RSS_IPV4_UDP;
+			DP_VERBOSE(edev, QED_MSG_DEBUG,
+				   "UDP 4-tuple disabled\n");
+		} else {
+			return -EINVAL;
+		}
+		break;
+	case UDP_V6_FLOW:
+		/* For UDP either 2-tuple hash or 4-tuple hash is supported */
+		if (info->data == (RXH_IP_SRC | RXH_IP_DST |
+				   RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+			set_caps = QED_RSS_IPV6_UDP;
+			DP_VERBOSE(edev, QED_MSG_DEBUG,
+				   "UDP 4-tuple enabled\n");
+		} else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
+			clr_caps = QED_RSS_IPV6_UDP;
+			DP_VERBOSE(edev, QED_MSG_DEBUG,
+				   "UDP 4-tuple disabled\n");
+		} else {
+			return -EINVAL;
+		}
+		break;
+	case IPV4_FLOW:
+	case IPV6_FLOW:
+		/* For IP only 2-tuple hash is supported */
+		if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) {
+			DP_INFO(edev, "Command parameters not supported\n");
+			return -EINVAL;
+		}
+		return 0;
+	case SCTP_V4_FLOW:
+	case AH_ESP_V4_FLOW:
+	case AH_V4_FLOW:
+	case ESP_V4_FLOW:
+	case SCTP_V6_FLOW:
+	case AH_ESP_V6_FLOW:
+	case AH_V6_FLOW:
+	case ESP_V6_FLOW:
+	case IP_USER_FLOW:
+	case ETHER_FLOW:
+		/* RSS is not supported for these protocols */
+		if (info->data) {
+			DP_INFO(edev, "Command parameters not supported\n");
+			return -EINVAL;
+		}
+		return 0;
+	default:
+		return -EINVAL;
+	}
+
+	/* No action is needed if there is no change in the rss capability */
+	if (edev->rss_params.rss_caps == ((edev->rss_params.rss_caps &
+					   ~clr_caps) | set_caps))
+		return 0;
+
+	/* Update internal configuration */
+	edev->rss_params.rss_caps = (edev->rss_params.rss_caps & ~clr_caps) |
+				    set_caps;
+	edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
+
+	/* Re-configure if possible */
+	if (netif_running(edev->ndev)) {
+		memset(&vport_update_params, 0, sizeof(vport_update_params));
+		vport_update_params.update_rss_flg = 1;
+		vport_update_params.vport_id = 0;
+		memcpy(&vport_update_params.rss_params, &edev->rss_params,
+		       sizeof(vport_update_params.rss_params));
+		return edev->ops->vport_update(edev->cdev,
+					       &vport_update_params);
+	}
+
+	return 0;
+}
+
+static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+	struct qede_dev *edev = netdev_priv(dev);
+
+	switch (info->cmd) {
+	case ETHTOOL_SRXFH:
+		return qede_set_rss_flags(edev, info);
+	default:
+		DP_INFO(edev, "Command parameters not supported\n");
+		return -EOPNOTSUPP;
+	}
+}
+
+static u32 qede_get_rxfh_indir_size(struct net_device *dev)
+{
+	return QED_RSS_IND_TABLE_SIZE;
+}
+
+static u32 qede_get_rxfh_key_size(struct net_device *dev)
+{
+	struct qede_dev *edev = netdev_priv(dev);
+
+	return sizeof(edev->rss_params.rss_key);
+}
+
+static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
+{
+	struct qede_dev *edev = netdev_priv(dev);
+	int i;
+
+	if (hfunc)
+		*hfunc = ETH_RSS_HASH_TOP;
+
+	if (!indir)
+		return 0;
+
+	for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
+		indir[i] = edev->rss_params.rss_ind_table[i];
+
+	if (key)
+		memcpy(key, edev->rss_params.rss_key,
+		       qede_get_rxfh_key_size(dev));
+
+	return 0;
+}
+
+static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
+			 const u8 *key, const u8 hfunc)
+{
+	struct qed_update_vport_params vport_update_params;
+	struct qede_dev *edev = netdev_priv(dev);
+	int i;
+
+	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+		return -EOPNOTSUPP;
+
+	if (!indir && !key)
+		return 0;
+
+	if (indir) {
+		for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
+			edev->rss_params.rss_ind_table[i] = indir[i];
+		edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
+	}
+
+	if (key) {
+		memcpy(&edev->rss_params.rss_key, key,
+		       qede_get_rxfh_key_size(dev));
+		edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
+	}
+
+	if (netif_running(edev->ndev)) {
+		memset(&vport_update_params, 0, sizeof(vport_update_params));
+		vport_update_params.update_rss_flg = 1;
+		vport_update_params.vport_id = 0;
+		memcpy(&vport_update_params.rss_params, &edev->rss_params,
+		       sizeof(vport_update_params.rss_params));
+		return edev->ops->vport_update(edev->cdev,
+					       &vport_update_params);
+	}
+
+	return 0;
+}
+
+/* This function enables the interrupt generation and the NAPI on the device */
+static void qede_netif_start(struct qede_dev *edev)
+{
+	int i;
+
+	if (!netif_running(edev->ndev))
+		return;
+
+	for_each_rss(i) {
+		/* Update and reenable interrupts */
+		qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1);
+		napi_enable(&edev->fp_array[i].napi);
+	}
+}
+
+/* This function disables the NAPI and the interrupt generation on the device */
+static void qede_netif_stop(struct qede_dev *edev)
+{
+	int i;
+
+	for_each_rss(i) {
+		napi_disable(&edev->fp_array[i].napi);
+		/* Disable interrupts */
+		qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0);
+	}
+}
+
+static int qede_selftest_transmit_traffic(struct qede_dev *edev,
+					  struct sk_buff *skb)
+{
+	struct qede_tx_queue *txq = &edev->fp_array[0].txqs[0];
+	struct eth_tx_1st_bd *first_bd;
+	dma_addr_t mapping;
+	int i, idx, val;
+
+	/* Fill the entry in the SW ring and the BDs in the FW ring */
+	idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+	txq->sw_tx_ring[idx].skb = skb;
+	first_bd = qed_chain_produce(&txq->tx_pbl);
+	memset(first_bd, 0, sizeof(*first_bd));
+	val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+	first_bd->data.bd_flags.bitfields = val;
+
+	/* Map skb linear data for DMA and set in the first BD */
+	mapping = dma_map_single(&edev->pdev->dev, skb->data,
+				 skb_headlen(skb), DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+		DP_NOTICE(edev, "SKB mapping failed\n");
+		return -ENOMEM;
+	}
+	BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
+
+	/* update the first BD with the actual num BDs */
+	first_bd->data.nbds = 1;
+	txq->sw_tx_prod++;
+	/* 'next page' entries are counted in the producer value */
+	val = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
+	txq->tx_db.data.bd_prod = val;
+
+	/* wmb makes sure that the BDs data is updated before updating the
+	 * producer, otherwise FW may read old data from the BDs.
+	 */
+	wmb();
+	barrier();
+	writel(txq->tx_db.raw, txq->doorbell_addr);
+
+	/* mmiowb is needed to synchronize doorbell writes from more than one
+	 * processor. It guarantees that the write arrives to the device before
+	 * the queue lock is released and another start_xmit is called (possibly
+	 * on another CPU). Without this barrier, the next doorbell can bypass
+	 * this doorbell. This is applicable to IA64/Altix systems.
+	 */
+	mmiowb();
+
+	for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
+		if (qede_txq_has_work(txq))
+			break;
+		usleep_range(100, 200);
+	}
+
+	if (!qede_txq_has_work(txq)) {
+		DP_NOTICE(edev, "Tx completion didn't happen\n");
+		return -1;
+	}
+
+	first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+	dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+		       BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
+	txq->sw_tx_cons++;
+	txq->sw_tx_ring[idx].skb = NULL;
+
+	return 0;
+}
+
+static int qede_selftest_receive_traffic(struct qede_dev *edev)
+{
+	struct qede_rx_queue *rxq = edev->fp_array[0].rxq;
+	u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len;
+	struct eth_fast_path_rx_reg_cqe *fp_cqe;
+	struct sw_rx_data *sw_rx_data;
+	union eth_rx_cqe *cqe;
+	u8 *data_ptr;
+	int i;
+
+	/* The packet is expected to receive on rx-queue 0 even though RSS is
+	 * enabled. This is because the queue 0 is configured as the default
+	 * queue and that the loopback traffic is not IP.
+	 */
+	for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
+		if (qede_has_rx_work(rxq))
+			break;
+		usleep_range(100, 200);
+	}
+
+	if (!qede_has_rx_work(rxq)) {
+		DP_NOTICE(edev, "Failed to receive the traffic\n");
+		return -1;
+	}
+
+	hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+	sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+	/* Memory barrier to prevent the CPU from doing speculative reads of CQE
+	 * / BD before reading hw_comp_cons. If the CQE is read before it is
+	 * written by FW, then FW writes CQE and SB, and then the CPU reads the
+	 * hw_comp_cons, it will use an old CQE.
+	 */
+	rmb();
+
+	/* Get the CQE from the completion ring */
+	cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
+
+	/* Get the data from the SW ring */
+	sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+	sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+	fp_cqe = &cqe->fast_path_regular;
+	len =  le16_to_cpu(fp_cqe->len_on_first_bd);
+	data_ptr = (u8 *)(page_address(sw_rx_data->data) +
+		     fp_cqe->placement_offset + sw_rx_data->page_offset);
+	for (i = ETH_HLEN; i < len; i++)
+		if (data_ptr[i] != (unsigned char)(i & 0xff)) {
+			DP_NOTICE(edev, "Loopback test failed\n");
+			qede_recycle_rx_bd_ring(rxq, edev, 1);
+			return -1;
+		}
+
+	qede_recycle_rx_bd_ring(rxq, edev, 1);
+
+	return 0;
+}
+
+static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode)
+{
+	struct qed_link_params link_params;
+	struct sk_buff *skb = NULL;
+	int rc = 0, i;
+	u32 pkt_size;
+	u8 *packet;
+
+	if (!netif_running(edev->ndev)) {
+		DP_NOTICE(edev, "Interface is down\n");
+		return -EINVAL;
+	}
+
+	qede_netif_stop(edev);
+
+	/* Bring up the link in Loopback mode */
+	memset(&link_params, 0, sizeof(link_params));
+	link_params.link_up = true;
+	link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE;
+	link_params.loopback_mode = loopback_mode;
+	edev->ops->common->set_link(edev->cdev, &link_params);
+
+	/* Wait for loopback configuration to apply */
+	msleep_interruptible(500);
+
+	/* prepare the loopback packet */
+	pkt_size = edev->ndev->mtu + ETH_HLEN;
+
+	skb = netdev_alloc_skb(edev->ndev, pkt_size);
+	if (!skb) {
+		DP_INFO(edev, "Can't allocate skb\n");
+		rc = -ENOMEM;
+		goto test_loopback_exit;
+	}
+	packet = skb_put(skb, pkt_size);
+	ether_addr_copy(packet, edev->ndev->dev_addr);
+	ether_addr_copy(packet + ETH_ALEN, edev->ndev->dev_addr);
+	memset(packet + (2 * ETH_ALEN), 0x77, (ETH_HLEN - (2 * ETH_ALEN)));
+	for (i = ETH_HLEN; i < pkt_size; i++)
+		packet[i] = (unsigned char)(i & 0xff);
+
+	rc = qede_selftest_transmit_traffic(edev, skb);
+	if (rc)
+		goto test_loopback_exit;
+
+	rc = qede_selftest_receive_traffic(edev);
+	if (rc)
+		goto test_loopback_exit;
+
+	DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, "Loopback test successful\n");
+
+test_loopback_exit:
+	dev_kfree_skb(skb);
+
+	/* Bring up the link in Normal mode */
+	memset(&link_params, 0, sizeof(link_params));
+	link_params.link_up = true;
+	link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE;
+	link_params.loopback_mode = QED_LINK_LOOPBACK_NONE;
+	edev->ops->common->set_link(edev->cdev, &link_params);
+
+	/* Wait for loopback configuration to apply */
+	msleep_interruptible(500);
+
+	qede_netif_start(edev);
+
+	return rc;
+}
+
+static void qede_self_test(struct net_device *dev,
+			   struct ethtool_test *etest, u64 *buf)
+{
+	struct qede_dev *edev = netdev_priv(dev);
+
+	DP_VERBOSE(edev, QED_MSG_DEBUG,
+		   "Self-test command parameters: offline = %d, external_lb = %d\n",
+		   (etest->flags & ETH_TEST_FL_OFFLINE),
+		   (etest->flags & ETH_TEST_FL_EXTERNAL_LB) >> 2);
+
+	memset(buf, 0, sizeof(u64) * QEDE_ETHTOOL_TEST_MAX);
+
+	if (etest->flags & ETH_TEST_FL_OFFLINE) {
+		if (qede_selftest_run_loopback(edev,
+					       QED_LINK_LOOPBACK_INT_PHY)) {
+			buf[QEDE_ETHTOOL_INT_LOOPBACK] = 1;
+			etest->flags |= ETH_TEST_FL_FAILED;
+		}
+	}
+
+	if (edev->ops->common->selftest->selftest_interrupt(edev->cdev)) {
+		buf[QEDE_ETHTOOL_INTERRUPT_TEST] = 1;
+		etest->flags |= ETH_TEST_FL_FAILED;
+	}
+
+	if (edev->ops->common->selftest->selftest_memory(edev->cdev)) {
+		buf[QEDE_ETHTOOL_MEMORY_TEST] = 1;
+		etest->flags |= ETH_TEST_FL_FAILED;
+	}
+
+	if (edev->ops->common->selftest->selftest_register(edev->cdev)) {
+		buf[QEDE_ETHTOOL_REGISTER_TEST] = 1;
+		etest->flags |= ETH_TEST_FL_FAILED;
+	}
+
+	if (edev->ops->common->selftest->selftest_clock(edev->cdev)) {
+		buf[QEDE_ETHTOOL_CLOCK_TEST] = 1;
+		etest->flags |= ETH_TEST_FL_FAILED;
+	}
+}
+
 static const struct ethtool_ops qede_ethtool_ops = {
 	.get_settings = qede_get_settings,
 	.set_settings = qede_set_settings,
@@ -584,13 +1141,47 @@
 	.get_strings = qede_get_strings,
 	.set_phys_id = qede_set_phys_id,
 	.get_ethtool_stats = qede_get_ethtool_stats,
+	.get_priv_flags = qede_get_priv_flags,
 	.get_sset_count = qede_get_sset_count,
+	.get_rxnfc = qede_get_rxnfc,
+	.set_rxnfc = qede_set_rxnfc,
+	.get_rxfh_indir_size = qede_get_rxfh_indir_size,
+	.get_rxfh_key_size = qede_get_rxfh_key_size,
+	.get_rxfh = qede_get_rxfh,
+	.set_rxfh = qede_set_rxfh,
+	.get_channels = qede_get_channels,
+	.set_channels = qede_set_channels,
+	.self_test = qede_self_test,
+};
 
+static const struct ethtool_ops qede_vf_ethtool_ops = {
+	.get_settings = qede_get_settings,
+	.get_drvinfo = qede_get_drvinfo,
+	.get_msglevel = qede_get_msglevel,
+	.set_msglevel = qede_set_msglevel,
+	.get_link = qede_get_link,
+	.get_ringparam = qede_get_ringparam,
+	.set_ringparam = qede_set_ringparam,
+	.get_strings = qede_get_strings,
+	.get_ethtool_stats = qede_get_ethtool_stats,
+	.get_priv_flags = qede_get_priv_flags,
+	.get_sset_count = qede_get_sset_count,
+	.get_rxnfc = qede_get_rxnfc,
+	.set_rxnfc = qede_set_rxnfc,
+	.get_rxfh_indir_size = qede_get_rxfh_indir_size,
+	.get_rxfh_key_size = qede_get_rxfh_key_size,
+	.get_rxfh = qede_get_rxfh,
+	.set_rxfh = qede_set_rxfh,
 	.get_channels = qede_get_channels,
 	.set_channels = qede_set_channels,
 };
 
 void qede_set_ethtool_ops(struct net_device *dev)
 {
-	dev->ethtool_ops = &qede_ethtool_ops;
+	struct qede_dev *edev = netdev_priv(dev);
+
+	if (IS_VF(edev))
+		dev->ethtool_ops = &qede_vf_ethtool_ops;
+	else
+		dev->ethtool_ops = &qede_ethtool_ops;
 }
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 518af32..8114541 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -24,7 +24,12 @@
 #include <linux/netdev_features.h>
 #include <linux/udp.h>
 #include <linux/tcp.h>
+#ifdef CONFIG_QEDE_VXLAN
 #include <net/vxlan.h>
+#endif
+#ifdef CONFIG_QEDE_GENEVE
+#include <net/geneve.h>
+#endif
 #include <linux/ip.h>
 #include <net/ipv6.h>
 #include <net/tcp.h>
@@ -58,6 +63,7 @@
 #define CHIP_NUM_57980S_100		0x1644
 #define CHIP_NUM_57980S_50		0x1654
 #define CHIP_NUM_57980S_25		0x1656
+#define CHIP_NUM_57980S_IOV		0x1664
 
 #ifndef PCI_DEVICE_ID_NX2_57980E
 #define PCI_DEVICE_ID_57980S_40		CHIP_NUM_57980S_40
@@ -66,15 +72,22 @@
 #define PCI_DEVICE_ID_57980S_100	CHIP_NUM_57980S_100
 #define PCI_DEVICE_ID_57980S_50		CHIP_NUM_57980S_50
 #define PCI_DEVICE_ID_57980S_25		CHIP_NUM_57980S_25
+#define PCI_DEVICE_ID_57980S_IOV	CHIP_NUM_57980S_IOV
 #endif
 
+enum qede_pci_private {
+	QEDE_PRIVATE_PF,
+	QEDE_PRIVATE_VF
+};
+
 static const struct pci_device_id qede_pci_tbl[] = {
-	{ PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), 0 },
-	{ PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), 0 },
-	{ PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), 0 },
-	{ PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), 0 },
-	{ PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), 0 },
-	{ PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), 0 },
+	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
+	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
+	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
+	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
+	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
+	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
+	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
 	{ 0 }
 };
 
@@ -89,17 +102,87 @@
 				struct qede_rx_queue *rxq);
 static void qede_link_update(void *dev, struct qed_link_output *link);
 
+#ifdef CONFIG_QED_SRIOV
+static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos)
+{
+	struct qede_dev *edev = netdev_priv(ndev);
+
+	if (vlan > 4095) {
+		DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
+		return -EINVAL;
+	}
+
+	DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
+		   vlan, vf);
+
+	return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
+}
+
+static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
+{
+	struct qede_dev *edev = netdev_priv(ndev);
+
+	DP_VERBOSE(edev, QED_MSG_IOV,
+		   "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
+		   mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
+
+	if (!is_valid_ether_addr(mac)) {
+		DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
+		return -EINVAL;
+	}
+
+	return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
+}
+
+static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
+{
+	struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
+	struct qed_dev_info *qed_info = &edev->dev_info.common;
+	int rc;
+
+	DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
+
+	rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
+
+	/* Enable/Disable Tx switching for PF */
+	if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
+	    qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
+		struct qed_update_vport_params params;
+
+		memset(&params, 0, sizeof(params));
+		params.vport_id = 0;
+		params.update_tx_switching_flg = 1;
+		params.tx_switching_flg = num_vfs_param ? 1 : 0;
+		edev->ops->vport_update(edev->cdev, &params);
+	}
+
+	return rc;
+}
+#endif
+
 static struct pci_driver qede_pci_driver = {
 	.name = "qede",
 	.id_table = qede_pci_tbl,
 	.probe = qede_probe,
 	.remove = qede_remove,
+#ifdef CONFIG_QED_SRIOV
+	.sriov_configure = qede_sriov_configure,
+#endif
 };
 
+static void qede_force_mac(void *dev, u8 *mac)
+{
+	struct qede_dev *edev = dev;
+
+	ether_addr_copy(edev->ndev->dev_addr, mac);
+	ether_addr_copy(edev->primary_mac, mac);
+}
+
 static struct qed_eth_cb_ops qede_ll_ops = {
 	{
 		.link_update = qede_link_update,
 	},
+	.force_mac = qede_force_mac,
 };
 
 static int qede_netdev_event(struct notifier_block *this, unsigned long event,
@@ -141,19 +224,10 @@
 int __init qede_init(void)
 {
 	int ret;
-	u32 qed_ver;
 
 	pr_notice("qede_init: %s\n", version);
 
-	qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH);
-	if (qed_ver !=  QEDE_ETH_INTERFACE_VERSION) {
-		pr_notice("Version mismatch [%08x != %08x]\n",
-			  qed_ver,
-			  QEDE_ETH_INTERFACE_VERSION);
-		return -EINVAL;
-	}
-
-	qed_ops = qed_get_eth_ops(QEDE_ETH_INTERFACE_VERSION);
+	qed_ops = qed_get_eth_ops();
 	if (!qed_ops) {
 		pr_notice("Failed to get qed ethtool operations\n");
 		return -EINVAL;
@@ -319,6 +393,9 @@
 	    (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
 		*ipv6_ext = 1;
 
+	if (skb->encapsulation)
+		rc |= XMIT_ENC;
+
 	if (skb_is_gso(skb))
 		rc |= XMIT_LSO;
 
@@ -380,6 +457,16 @@
 	return 0;
 }
 
+static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
+{
+	if (is_encap_pkt)
+		return (skb_inner_transport_header(skb) +
+			inner_tcp_hdrlen(skb) - skb->data);
+	else
+		return (skb_transport_header(skb) +
+			tcp_hdrlen(skb) - skb->data);
+}
+
 /* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
 static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
@@ -390,8 +477,7 @@
 	if (xmit_type & XMIT_LSO) {
 		int hlen;
 
-		hlen = skb_transport_header(skb) +
-		       tcp_hdrlen(skb) - skb->data;
+		hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
 
 		/* linear payload would require its own BD */
 		if (skb_headlen(skb) > hlen)
@@ -421,7 +507,7 @@
 	u8 xmit_type;
 	u16 idx;
 	u16 hlen;
-	bool data_split;
+	bool data_split = false;
 
 	/* Get tx-queue context and netdev index */
 	txq_index = skb_get_queue_mapping(skb);
@@ -499,7 +585,18 @@
 		first_bd->data.bd_flags.bitfields |=
 			1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
 
-		first_bd->data.bitfields |= cpu_to_le16(temp);
+		if (xmit_type & XMIT_ENC) {
+			first_bd->data.bd_flags.bitfields |=
+				1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+		} else {
+			/* In cases when OS doesn't indicate for inner offloads
+			 * when packet is tunnelled, we need to override the HW
+			 * tunnel configuration so that packets are treated as
+			 * regular non tunnelled packets and no inner offloads
+			 * are done by the hardware.
+			 */
+			first_bd->data.bitfields |= cpu_to_le16(temp);
+		}
 
 		/* If the packet is IPv6 with extension header, indicate that
 		 * to FW and pass few params, since the device cracker doesn't
@@ -515,10 +612,15 @@
 		third_bd->data.lso_mss =
 			cpu_to_le16(skb_shinfo(skb)->gso_size);
 
-		first_bd->data.bd_flags.bitfields |=
-		1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
-		hlen = skb_transport_header(skb) +
-		       tcp_hdrlen(skb) - skb->data;
+		if (unlikely(xmit_type & XMIT_ENC)) {
+			first_bd->data.bd_flags.bitfields |=
+				1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
+			hlen = qede_get_skb_hlen(skb, true);
+		} else {
+			first_bd->data.bd_flags.bitfields |=
+				1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+			hlen = qede_get_skb_hlen(skb, false);
+		}
 
 		/* @@@TBD - if will not be removed need to check */
 		third_bd->data.bitfields |=
@@ -644,7 +746,7 @@
 	return NETDEV_TX_OK;
 }
 
-static int qede_txq_has_work(struct qede_tx_queue *txq)
+int qede_txq_has_work(struct qede_tx_queue *txq)
 {
 	u16 hw_bd_cons;
 
@@ -727,7 +829,7 @@
 	return 0;
 }
 
-static bool qede_has_rx_work(struct qede_rx_queue *rxq)
+bool qede_has_rx_work(struct qede_rx_queue *rxq)
 {
 	u16 hw_comp_cons, sw_comp_cons;
 
@@ -750,6 +852,12 @@
 	return false;
 }
 
+static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
+{
+	qed_chain_consume(&rxq->rx_bd_ring);
+	rxq->sw_rx_cons++;
+}
+
 /* This function reuses the buffer(from an offset) from
  * consumer index to producer index in the bd ring
  */
@@ -773,6 +881,21 @@
 	curr_cons->data = NULL;
 }
 
+/* In case of allocation failures reuse buffers
+ * from consumer index to produce buffers for firmware
+ */
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
+			     struct qede_dev *edev, u8 count)
+{
+	struct sw_rx_data *curr_cons;
+
+	for (; count > 0; count--) {
+		curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+		qede_reuse_page(edev, rxq, curr_cons);
+		qede_rx_bd_ring_consume(rxq);
+	}
+}
+
 static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
 					 struct qede_rx_queue *rxq,
 					 struct sw_rx_data *curr_cons)
@@ -781,8 +904,14 @@
 	curr_cons->page_offset += rxq->rx_buf_seg_size;
 
 	if (curr_cons->page_offset == PAGE_SIZE) {
-		if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
+		if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
+			/* Since we failed to allocate new buffer
+			 * current buffer can be used again.
+			 */
+			curr_cons->page_offset -= rxq->rx_buf_seg_size;
+
 			return -ENOMEM;
+		}
 
 		dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
 			       PAGE_SIZE, DMA_FROM_DEVICE);
@@ -852,6 +981,9 @@
 
 	if (csum_flag & QEDE_CSUM_UNNECESSARY)
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+	if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY)
+		skb->csum_level = 1;
 }
 
 static inline void qede_skb_receive(struct qede_dev *edev,
@@ -901,7 +1033,10 @@
 			   len_on_bd);
 
 	if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
-		tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+		/* Incr page ref count to reuse on allocation failure
+		 * so that it doesn't get freed while freeing SKB.
+		 */
+		atomic_inc(&current_bd->data->_count);
 		goto out;
 	}
 
@@ -915,6 +1050,8 @@
 	return 0;
 
 out:
+	tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+	qede_recycle_rx_bd_ring(rxq, edev, 1);
 	return -ENOMEM;
 }
 
@@ -966,8 +1103,9 @@
 	tpa_info->skb = netdev_alloc_skb(edev->ndev,
 					 le16_to_cpu(cqe->len_on_first_bd));
 	if (unlikely(!tpa_info->skb)) {
+		DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
 		tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
-		return;
+		goto cons_buf;
 	}
 
 	skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
@@ -990,6 +1128,7 @@
 	/* This is needed in order to enable forwarding support */
 	qede_set_gro_params(edev, tpa_info->skb, cqe);
 
+cons_buf: /* We still need to handle bd_len_list to consume buffers */
 	if (likely(cqe->ext_bd_len_list[0]))
 		qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
 				   le16_to_cpu(cqe->ext_bd_len_list[0]));
@@ -1007,7 +1146,6 @@
 	const struct iphdr *iph = ip_hdr(skb);
 	struct tcphdr *th;
 
-	skb_set_network_header(skb, 0);
 	skb_set_transport_header(skb, sizeof(struct iphdr));
 	th = tcp_hdr(skb);
 
@@ -1022,7 +1160,6 @@
 	struct ipv6hdr *iph = ipv6_hdr(skb);
 	struct tcphdr *th;
 
-	skb_set_network_header(skb, 0);
 	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
 	th = tcp_hdr(skb);
 
@@ -1037,8 +1174,21 @@
 			     struct sk_buff *skb,
 			     u16 vlan_tag)
 {
+	/* FW can send a single MTU sized packet from gro flow
+	 * due to aggregation timeout/last segment etc. which
+	 * is not expected to be a gro packet. If a skb has zero
+	 * frags then simply push it in the stack as non gso skb.
+	 */
+	if (unlikely(!skb->data_len)) {
+		skb_shinfo(skb)->gso_type = 0;
+		skb_shinfo(skb)->gso_size = 0;
+		goto send_skb;
+	}
+
 #ifdef CONFIG_INET
 	if (skb_shinfo(skb)->gso_size) {
+		skb_set_network_header(skb, 0);
+
 		switch (skb->protocol) {
 		case htons(ETH_P_IP):
 			qede_gro_ip_csum(skb);
@@ -1053,6 +1203,8 @@
 		}
 	}
 #endif
+
+send_skb:
 	skb_record_rx_queue(skb, fp->rss_id);
 	qede_skb_receive(edev, fp, skb, vlan_tag);
 }
@@ -1141,13 +1293,47 @@
 	tpa_info->skb = NULL;
 }
 
-static u8 qede_check_csum(u16 flag)
+static bool qede_tunn_exist(u16 flag)
+{
+	return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
+			  PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
+}
+
+static u8 qede_check_tunn_csum(u16 flag)
+{
+	u16 csum_flag = 0;
+	u8 tcsum = 0;
+
+	if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
+		    PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
+		csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
+			     PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
+
+	if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+		    PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
+		csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+			     PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+		tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
+	}
+
+	csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
+		     PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
+		     PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+		     PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+	if (csum_flag & flag)
+		return QEDE_CSUM_ERROR;
+
+	return QEDE_CSUM_UNNECESSARY | tcsum;
+}
+
+static u8 qede_check_notunn_csum(u16 flag)
 {
 	u16 csum_flag = 0;
 	u8 csum = 0;
 
-	if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
-	     PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
+	if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+		    PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
 		csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
 			     PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
 		csum = QEDE_CSUM_UNNECESSARY;
@@ -1162,6 +1348,14 @@
 	return csum;
 }
 
+static u8 qede_check_csum(u16 flag)
+{
+	if (!qede_tunn_exist(flag))
+		return qede_check_notunn_csum(flag);
+	else
+		return qede_check_tunn_csum(flag);
+}
+
 static int qede_rx_int(struct qede_fastpath *fp, int budget)
 {
 	struct qede_dev *edev = fp->edev;
@@ -1244,17 +1438,17 @@
 				  "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
 				  sw_comp_cons, parse_flag);
 			rxq->rx_hw_errors++;
-			qede_reuse_page(edev, rxq, sw_rx_data);
-			goto next_rx;
+			qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
+			goto next_cqe;
 		}
 
 		skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
 		if (unlikely(!skb)) {
 			DP_NOTICE(edev,
 				  "Build_skb failed, dropping incoming packet\n");
-			qede_reuse_page(edev, rxq, sw_rx_data);
+			qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
 			rxq->rx_alloc_errors++;
-			goto next_rx;
+			goto next_cqe;
 		}
 
 		/* Copy data into SKB */
@@ -1288,11 +1482,22 @@
 			if (unlikely(qede_realloc_rx_buffer(edev, rxq,
 							    sw_rx_data))) {
 				DP_ERR(edev, "Failed to allocate rx buffer\n");
+				/* Incr page ref count to reuse on allocation
+				 * failure so that it doesn't get freed while
+				 * freeing SKB.
+				 */
+
+				atomic_inc(&sw_rx_data->data->_count);
 				rxq->rx_alloc_errors++;
+				qede_recycle_rx_bd_ring(rxq, edev,
+							fp_cqe->bd_num);
+				dev_kfree_skb_any(skb);
 				goto next_cqe;
 			}
 		}
 
+		qede_rx_bd_ring_consume(rxq);
+
 		if (fp_cqe->bd_num != 1) {
 			u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
 			u8 num_frags;
@@ -1303,18 +1508,27 @@
 			     num_frags--) {
 				u16 cur_size = pkt_len > rxq->rx_buf_size ?
 						rxq->rx_buf_size : pkt_len;
-
-				WARN_ONCE(!cur_size,
-					  "Still got %d BDs for mapping jumbo, but length became 0\n",
-					  num_frags);
-
-				if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
+				if (unlikely(!cur_size)) {
+					DP_ERR(edev,
+					       "Still got %d BDs for mapping jumbo, but length became 0\n",
+					       num_frags);
+					qede_recycle_rx_bd_ring(rxq, edev,
+								num_frags);
+					dev_kfree_skb_any(skb);
 					goto next_cqe;
+				}
 
-				rxq->sw_rx_cons++;
+				if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
+					qede_recycle_rx_bd_ring(rxq, edev,
+								num_frags);
+					dev_kfree_skb_any(skb);
+					goto next_cqe;
+				}
+
 				sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
 				sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
-				qed_chain_consume(&rxq->rx_bd_ring);
+				qede_rx_bd_ring_consume(rxq);
+
 				dma_unmap_page(&edev->pdev->dev,
 					       sw_rx_data->mapping,
 					       PAGE_SIZE, DMA_FROM_DEVICE);
@@ -1330,7 +1544,7 @@
 				pkt_len -= cur_size;
 			}
 
-			if (pkt_len)
+			if (unlikely(pkt_len))
 				DP_ERR(edev,
 				       "Mapped all BDs of jumbo, but still have %d bytes\n",
 				       pkt_len);
@@ -1349,10 +1563,6 @@
 		skb_record_rx_queue(skb, fp->rss_id);
 
 		qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
-
-		qed_chain_consume(&rxq->rx_bd_ring);
-next_rx:
-		rxq->sw_rx_cons++;
 next_rx_only:
 		rx_pkt++;
 
@@ -1506,16 +1716,25 @@
 	edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
 
 	edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
-	edev->stats.rx_127_byte_packets = stats.rx_127_byte_packets;
-	edev->stats.rx_255_byte_packets = stats.rx_255_byte_packets;
-	edev->stats.rx_511_byte_packets = stats.rx_511_byte_packets;
-	edev->stats.rx_1023_byte_packets = stats.rx_1023_byte_packets;
-	edev->stats.rx_1518_byte_packets = stats.rx_1518_byte_packets;
-	edev->stats.rx_1522_byte_packets = stats.rx_1522_byte_packets;
-	edev->stats.rx_2047_byte_packets = stats.rx_2047_byte_packets;
-	edev->stats.rx_4095_byte_packets = stats.rx_4095_byte_packets;
-	edev->stats.rx_9216_byte_packets = stats.rx_9216_byte_packets;
-	edev->stats.rx_16383_byte_packets = stats.rx_16383_byte_packets;
+	edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets;
+	edev->stats.rx_128_to_255_byte_packets =
+				stats.rx_128_to_255_byte_packets;
+	edev->stats.rx_256_to_511_byte_packets =
+				stats.rx_256_to_511_byte_packets;
+	edev->stats.rx_512_to_1023_byte_packets =
+				stats.rx_512_to_1023_byte_packets;
+	edev->stats.rx_1024_to_1518_byte_packets =
+				stats.rx_1024_to_1518_byte_packets;
+	edev->stats.rx_1519_to_1522_byte_packets =
+				stats.rx_1519_to_1522_byte_packets;
+	edev->stats.rx_1519_to_2047_byte_packets =
+				stats.rx_1519_to_2047_byte_packets;
+	edev->stats.rx_2048_to_4095_byte_packets =
+				stats.rx_2048_to_4095_byte_packets;
+	edev->stats.rx_4096_to_9216_byte_packets =
+				stats.rx_4096_to_9216_byte_packets;
+	edev->stats.rx_9217_to_16383_byte_packets =
+				stats.rx_9217_to_16383_byte_packets;
 	edev->stats.rx_crc_errors = stats.rx_crc_errors;
 	edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
 	edev->stats.rx_pause_frames = stats.rx_pause_frames;
@@ -1589,6 +1808,49 @@
 	return stats;
 }
 
+#ifdef CONFIG_QED_SRIOV
+static int qede_get_vf_config(struct net_device *dev, int vfidx,
+			      struct ifla_vf_info *ivi)
+{
+	struct qede_dev *edev = netdev_priv(dev);
+
+	if (!edev->ops)
+		return -EINVAL;
+
+	return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
+}
+
+static int qede_set_vf_rate(struct net_device *dev, int vfidx,
+			    int min_tx_rate, int max_tx_rate)
+{
+	struct qede_dev *edev = netdev_priv(dev);
+
+	return edev->ops->iov->set_rate(edev->cdev, vfidx, max_tx_rate,
+					max_tx_rate);
+}
+
+static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
+{
+	struct qede_dev *edev = netdev_priv(dev);
+
+	if (!edev->ops)
+		return -EINVAL;
+
+	return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
+}
+
+static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
+				  int link_state)
+{
+	struct qede_dev *edev = netdev_priv(dev);
+
+	if (!edev->ops)
+		return -EINVAL;
+
+	return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
+}
+#endif
+
 static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
 {
 	struct qed_update_vport_params params;
@@ -1830,6 +2092,76 @@
 	edev->accept_any_vlan = false;
 }
 
+#ifdef CONFIG_QEDE_VXLAN
+static void qede_add_vxlan_port(struct net_device *dev,
+				sa_family_t sa_family, __be16 port)
+{
+	struct qede_dev *edev = netdev_priv(dev);
+	u16 t_port = ntohs(port);
+
+	if (edev->vxlan_dst_port)
+		return;
+
+	edev->vxlan_dst_port = t_port;
+
+	DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", t_port);
+
+	set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+	schedule_delayed_work(&edev->sp_task, 0);
+}
+
+static void qede_del_vxlan_port(struct net_device *dev,
+				sa_family_t sa_family, __be16 port)
+{
+	struct qede_dev *edev = netdev_priv(dev);
+	u16 t_port = ntohs(port);
+
+	if (t_port != edev->vxlan_dst_port)
+		return;
+
+	edev->vxlan_dst_port = 0;
+
+	DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", t_port);
+
+	set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+	schedule_delayed_work(&edev->sp_task, 0);
+}
+#endif
+
+#ifdef CONFIG_QEDE_GENEVE
+static void qede_add_geneve_port(struct net_device *dev,
+				 sa_family_t sa_family, __be16 port)
+{
+	struct qede_dev *edev = netdev_priv(dev);
+	u16 t_port = ntohs(port);
+
+	if (edev->geneve_dst_port)
+		return;
+
+	edev->geneve_dst_port = t_port;
+
+	DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", t_port);
+	set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+	schedule_delayed_work(&edev->sp_task, 0);
+}
+
+static void qede_del_geneve_port(struct net_device *dev,
+				 sa_family_t sa_family, __be16 port)
+{
+	struct qede_dev *edev = netdev_priv(dev);
+	u16 t_port = ntohs(port);
+
+	if (t_port != edev->geneve_dst_port)
+		return;
+
+	edev->geneve_dst_port = 0;
+
+	DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", t_port);
+	set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+	schedule_delayed_work(&edev->sp_task, 0);
+}
+#endif
+
 static const struct net_device_ops qede_netdev_ops = {
 	.ndo_open = qede_open,
 	.ndo_stop = qede_close,
@@ -1838,9 +2170,27 @@
 	.ndo_set_mac_address = qede_set_mac_addr,
 	.ndo_validate_addr = eth_validate_addr,
 	.ndo_change_mtu = qede_change_mtu,
+#ifdef CONFIG_QED_SRIOV
+	.ndo_set_vf_mac = qede_set_vf_mac,
+	.ndo_set_vf_vlan = qede_set_vf_vlan,
+#endif
 	.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
 	.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
 	.ndo_get_stats64 = qede_get_stats64,
+#ifdef CONFIG_QED_SRIOV
+	.ndo_set_vf_link_state = qede_set_vf_link_state,
+	.ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
+	.ndo_get_vf_config = qede_get_vf_config,
+	.ndo_set_vf_rate = qede_set_vf_rate,
+#endif
+#ifdef CONFIG_QEDE_VXLAN
+	.ndo_add_vxlan_port = qede_add_vxlan_port,
+	.ndo_del_vxlan_port = qede_del_vxlan_port,
+#endif
+#ifdef CONFIG_QEDE_GENEVE
+	.ndo_add_geneve_port = qede_add_geneve_port,
+	.ndo_del_geneve_port = qede_del_geneve_port,
+#endif
 };
 
 /* -------------------------------------------------------------------------
@@ -1875,8 +2225,6 @@
 	edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
 	edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
 
-	DP_INFO(edev, "Allocated netdev with 64 tx queues and 64 rx queues\n");
-
 	SET_NETDEV_DEV(ndev, &pdev->dev);
 
 	memset(&edev->stats, 0, sizeof(edev->stats));
@@ -1913,6 +2261,14 @@
 		      NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 		      NETIF_F_TSO | NETIF_F_TSO6;
 
+	/* Encap features*/
+	hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
+		       NETIF_F_TSO_ECN;
+	ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+				NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
+				NETIF_F_TSO6 | NETIF_F_GSO_GRE |
+				NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM;
+
 	ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
 			      NETIF_F_HIGHDMA;
 	ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
@@ -2013,6 +2369,8 @@
 {
 	struct qede_dev *edev = container_of(work, struct qede_dev,
 					     sp_task.work);
+	struct qed_dev *cdev = edev->cdev;
+
 	mutex_lock(&edev->qede_lock);
 
 	if (edev->state == QEDE_STATE_OPEN) {
@@ -2020,6 +2378,24 @@
 			qede_config_rx_mode(edev->ndev);
 	}
 
+	if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) {
+		struct qed_tunn_params tunn_params;
+
+		memset(&tunn_params, 0, sizeof(tunn_params));
+		tunn_params.update_vxlan_port = 1;
+		tunn_params.vxlan_port = edev->vxlan_dst_port;
+		qed_ops->tunn_config(cdev, &tunn_params);
+	}
+
+	if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags)) {
+		struct qed_tunn_params tunn_params;
+
+		memset(&tunn_params, 0, sizeof(tunn_params));
+		tunn_params.update_geneve_port = 1;
+		tunn_params.geneve_port = edev->geneve_dst_port;
+		qed_ops->tunn_config(cdev, &tunn_params);
+	}
+
 	mutex_unlock(&edev->qede_lock);
 }
 
@@ -2027,9 +2403,9 @@
 {
 	struct qed_pf_params pf_params;
 
-	/* 16 rx + 16 tx */
+	/* 64 rx + 64 tx */
 	memset(&pf_params, 0, sizeof(struct qed_pf_params));
-	pf_params.eth_pf_params.num_cons = 32;
+	pf_params.eth_pf_params.num_cons = 128;
 	qed_ops->common->update_pf_params(cdev, &pf_params);
 }
 
@@ -2038,8 +2414,9 @@
 };
 
 static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
-			enum qede_probe_mode mode)
+			bool is_vf, enum qede_probe_mode mode)
 {
+	struct qed_probe_params probe_params;
 	struct qed_slowpath_params params;
 	struct qed_dev_eth_info dev_info;
 	struct qede_dev *edev;
@@ -2049,8 +2426,12 @@
 	if (unlikely(dp_level & QED_LEVEL_INFO))
 		pr_notice("Starting qede probe\n");
 
-	cdev = qed_ops->common->probe(pdev, QED_PROTOCOL_ETH,
-				      dp_module, dp_level);
+	memset(&probe_params, 0, sizeof(probe_params));
+	probe_params.protocol = QED_PROTOCOL_ETH;
+	probe_params.dp_module = dp_module;
+	probe_params.dp_level = dp_level;
+	probe_params.is_vf = is_vf;
+	cdev = qed_ops->common->probe(pdev, &probe_params);
 	if (!cdev) {
 		rc = -ENODEV;
 		goto err0;
@@ -2084,6 +2465,9 @@
 		goto err2;
 	}
 
+	if (is_vf)
+		edev->flags |= QEDE_FLAG_IS_VF;
+
 	qede_init_ndev(edev);
 
 	rc = register_netdev(edev->ndev);
@@ -2115,12 +2499,24 @@
 
 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
+	bool is_vf = false;
 	u32 dp_module = 0;
 	u8 dp_level = 0;
 
+	switch ((enum qede_pci_private)id->driver_data) {
+	case QEDE_PRIVATE_VF:
+		if (debug & QED_LOG_VERBOSE_MASK)
+			dev_err(&pdev->dev, "Probing a VF\n");
+		is_vf = true;
+		break;
+	default:
+		if (debug & QED_LOG_VERBOSE_MASK)
+			dev_err(&pdev->dev, "Probing a PF\n");
+	}
+
 	qede_config_debug(debug, &dp_module, &dp_level);
 
-	return __qede_probe(pdev, dp_module, dp_level,
+	return __qede_probe(pdev, dp_module, dp_level, is_vf,
 			    QEDE_PROBE_NORMAL);
 }
 
@@ -2257,7 +2653,7 @@
 		struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
 		struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
 
-		if (replace_buf) {
+		if (replace_buf->data) {
 			dma_unmap_page(&edev->pdev->dev,
 				       dma_unmap_addr(replace_buf, mapping),
 				       PAGE_SIZE, DMA_FROM_DEVICE);
@@ -2377,7 +2773,7 @@
 static int qede_alloc_mem_rxq(struct qede_dev *edev,
 			      struct qede_rx_queue *rxq)
 {
-	int i, rc, size, num_allocated;
+	int i, rc, size;
 
 	rxq->num_rx_buffers = edev->q_num_rx_buffers;
 
@@ -2394,6 +2790,7 @@
 	rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
 	if (!rxq->sw_rx_ring) {
 		DP_ERR(edev, "Rx buffers ring allocation failed\n");
+		rc = -ENOMEM;
 		goto err;
 	}
 
@@ -2421,26 +2818,16 @@
 	/* Allocate buffers for the Rx ring */
 	for (i = 0; i < rxq->num_rx_buffers; i++) {
 		rc = qede_alloc_rx_buffer(edev, rxq);
-		if (rc)
-			break;
-	}
-	num_allocated = i;
-	if (!num_allocated) {
-		DP_ERR(edev, "Rx buffers allocation failed\n");
-		goto err;
-	} else if (num_allocated < rxq->num_rx_buffers) {
-		DP_NOTICE(edev,
-			  "Allocated less buffers than desired (%d allocated)\n",
-			  num_allocated);
+		if (rc) {
+			DP_ERR(edev,
+			       "Rx buffers allocation failed at index %d\n", i);
+			goto err;
+		}
 	}
 
-	qede_alloc_sge_mem(edev, rxq);
-
-	return 0;
-
+	rc = qede_alloc_sge_mem(edev, rxq);
 err:
-	qede_free_mem_rxq(edev, rxq);
-	return -ENOMEM;
+	return rc;
 }
 
 static void qede_free_mem_txq(struct qede_dev *edev,
@@ -2523,10 +2910,8 @@
 	}
 
 	return 0;
-
 err:
-	qede_free_mem_fp(edev, fp);
-	return -ENOMEM;
+	return rc;
 }
 
 static void qede_free_mem_load(struct qede_dev *edev)
@@ -2549,22 +2934,13 @@
 		struct qede_fastpath *fp = &edev->fp_array[rss_id];
 
 		rc = qede_alloc_mem_fp(edev, fp);
-		if (rc)
-			break;
-	}
-
-	if (rss_id != QEDE_RSS_CNT(edev)) {
-		/* Failed allocating memory for all the queues */
-		if (!rss_id) {
+		if (rc) {
 			DP_ERR(edev,
-			       "Failed to allocate memory for the leading queue\n");
-			rc = -ENOMEM;
-		} else {
-			DP_NOTICE(edev,
-				  "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n",
-				  QEDE_RSS_CNT(edev), rss_id);
+			       "Failed to allocate memory for fastpath - rss id = %d\n",
+			       rss_id);
+			qede_free_mem_load(edev);
+			return rc;
 		}
-		edev->num_rss = rss_id;
 	}
 
 	return 0;
@@ -2835,10 +3211,11 @@
 	int rc, tc, i;
 	int vlan_removal_en = 1;
 	struct qed_dev *cdev = edev->cdev;
-	struct qed_update_vport_rss_params *rss_params = &edev->rss_params;
 	struct qed_update_vport_params vport_update_params;
 	struct qed_queue_start_common_params q_params;
+	struct qed_dev_info *qed_info = &edev->dev_info.common;
 	struct qed_start_vport_params start = {0};
+	bool reset_rss_indir = false;
 
 	if (!edev->num_rss) {
 		DP_ERR(edev,
@@ -2930,19 +3307,59 @@
 	vport_update_params.update_vport_active_flg = 1;
 	vport_update_params.vport_active_flg = 1;
 
+	if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
+	    qed_info->tx_switching) {
+		vport_update_params.update_tx_switching_flg = 1;
+		vport_update_params.tx_switching_flg = 1;
+	}
+
 	/* Fill struct with RSS params */
 	if (QEDE_RSS_CNT(edev) > 1) {
 		vport_update_params.update_rss_flg = 1;
-		for (i = 0; i < 128; i++)
-			rss_params->rss_ind_table[i] =
-			ethtool_rxfh_indir_default(i, QEDE_RSS_CNT(edev));
-		netdev_rss_key_fill(rss_params->rss_key,
-				    sizeof(rss_params->rss_key));
+
+		/* Need to validate current RSS config uses valid entries */
+		for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+			if (edev->rss_params.rss_ind_table[i] >=
+			    edev->num_rss) {
+				reset_rss_indir = true;
+				break;
+			}
+		}
+
+		if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) ||
+		    reset_rss_indir) {
+			u16 val;
+
+			for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+				u16 indir_val;
+
+				val = QEDE_RSS_CNT(edev);
+				indir_val = ethtool_rxfh_indir_default(i, val);
+				edev->rss_params.rss_ind_table[i] = indir_val;
+			}
+			edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
+		}
+
+		if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
+			netdev_rss_key_fill(edev->rss_params.rss_key,
+					    sizeof(edev->rss_params.rss_key));
+			edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
+		}
+
+		if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
+			edev->rss_params.rss_caps = QED_RSS_IPV4 |
+						    QED_RSS_IPV6 |
+						    QED_RSS_IPV4_TCP |
+						    QED_RSS_IPV6_TCP;
+			edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
+		}
+
+		memcpy(&vport_update_params.rss_params, &edev->rss_params,
+		       sizeof(vport_update_params.rss_params));
 	} else {
-		memset(rss_params, 0, sizeof(*rss_params));
+		memset(&vport_update_params.rss_params, 0,
+		       sizeof(vport_update_params.rss_params));
 	}
-	memcpy(&vport_update_params.rss_params, rss_params,
-	       sizeof(*rss_params));
 
 	rc = edev->ops->vport_update(cdev, &vport_update_params);
 	if (rc) {
@@ -3124,12 +3541,24 @@
 static int qede_open(struct net_device *ndev)
 {
 	struct qede_dev *edev = netdev_priv(ndev);
+	int rc;
 
 	netif_carrier_off(ndev);
 
 	edev->ops->common->set_power_state(edev->cdev, PCI_D0);
 
-	return qede_load(edev, QEDE_LOAD_NORMAL);
+	rc = qede_load(edev, QEDE_LOAD_NORMAL);
+
+	if (rc)
+		return rc;
+
+#ifdef CONFIG_QEDE_VXLAN
+	vxlan_get_rx_port(ndev);
+#endif
+#ifdef CONFIG_QEDE_GENEVE
+	geneve_get_rx_port(ndev);
+#endif
+	return 0;
 }
 
 static int qede_close(struct net_device *ndev)
@@ -3180,6 +3609,11 @@
 		return -EFAULT;
 	}
 
+	if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
+		DP_NOTICE(edev, "qed prevents setting MAC\n");
+		return -EINVAL;
+	}
+
 	ether_addr_copy(ndev->dev_addr, addr->sa_data);
 
 	if (!netif_running(ndev))  {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 55007f1..caf6ddb 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -37,8 +37,8 @@
 
 #define _QLCNIC_LINUX_MAJOR 5
 #define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 63
-#define QLCNIC_LINUX_VERSIONID  "5.3.63"
+#define _QLCNIC_LINUX_SUBVERSION 64
+#define QLCNIC_LINUX_VERSIONID  "5.3.64"
 #define QLCNIC_DRV_IDC_VER  0x01
 #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
 		 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 1205f6f..1c29105 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -3952,8 +3952,14 @@
 
 static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev)
 {
-	return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
-				PCI_ERS_RESULT_RECOVERED;
+	pci_ers_result_t res;
+
+	rtnl_lock();
+	res = qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
+					 PCI_ERS_RESULT_RECOVERED;
+	rtnl_unlock();
+
+	return res;
 }
 
 static void qlcnic_82xx_io_resume(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index cda9e60..0844b7c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -1417,6 +1417,7 @@
 	struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
 	struct pci_dev *pdev = adapter->pdev;
 	bool extended = false;
+	int ret;
 
 	prev_version = adapter->fw_version;
 	current_version = qlcnic_83xx_get_fw_version(adapter);
@@ -1427,8 +1428,11 @@
 		if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
 			extended = !qlcnic_83xx_extend_md_capab(adapter);
 
-		if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
-			dev_info(&pdev->dev, "Supports FW dump capability\n");
+		ret = qlcnic_fw_cmd_get_minidump_temp(adapter);
+		if (ret)
+			return;
+
+		dev_info(&pdev->dev, "Supports FW dump capability\n");
 
 		/* Once we have minidump template with extended iSCSI dump
 		 * capability, update the minidump capture mask to 0x1f as
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index ef33270..6d31f92 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,7 +18,7 @@
  */
 #define DRV_NAME  	"qlge"
 #define DRV_STRING 	"QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION	"1.00.00.34"
+#define DRV_VERSION	"1.00.00.35"
 
 #define WQ_ADDR_ALIGN	0x3	/* 4 byte alignment */
 
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index b28e73e..83d7210 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4687,7 +4687,7 @@
 	/*
 	 * Set up the operating parameters.
 	 */
-	qdev->workqueue = create_singlethread_workqueue(ndev->name);
+	qdev->workqueue = alloc_ordered_workqueue(ndev->name, WQ_MEM_RECLAIM);
 	INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
 	INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
 	INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 1ef0393..6e2add9 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -719,7 +719,7 @@
 		qca->stats.ring_full++;
 	}
 
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 
 	if (qca->spi_thread &&
 	    qca->spi_thread->state != TASK_RUNNING)
@@ -734,7 +734,7 @@
 	struct qcaspi *qca = netdev_priv(dev);
 
 	netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
-		    jiffies, jiffies - dev->trans_start);
+		    jiffies, jiffies - dev_trans_start(dev));
 	qca->net_dev->stats.tx_errors++;
 	/* Trigger tx queue flush and QCA7000 reset */
 	qca->sync = QCASPI_SYNC_UNKNOWN;
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index d77d60e..5cb9678 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -544,7 +544,7 @@
 	dev->stats.tx_errors++;
 	/* Try to restart the adapter. */
 	hardware_init(dev);
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	netif_wake_queue(dev);
 	dev->stats.tx_errors++;
 }
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 94f08f1..0e62d74 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -345,7 +345,7 @@
 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
 
 static int rx_buf_sz = 16383;
-static int use_dac;
+static int use_dac = -1;
 static struct {
 	u32 msg_enable;
 } debug = { -1 };
@@ -8224,20 +8224,6 @@
 		goto err_out_mwi_2;
 	}
 
-	tp->cp_cmd = 0;
-
-	if ((sizeof(dma_addr_t) > 4) &&
-	    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
-		tp->cp_cmd |= PCIDAC;
-		dev->features |= NETIF_F_HIGHDMA;
-	} else {
-		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-		if (rc < 0) {
-			netif_err(tp, probe, dev, "DMA configuration failed\n");
-			goto err_out_free_res_3;
-		}
-	}
-
 	/* ioremap MMIO region */
 	ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
 	if (!ioaddr) {
@@ -8253,6 +8239,25 @@
 	/* Identify chip attached to board */
 	rtl8169_get_mac_version(tp, dev, cfg->default_ver);
 
+	tp->cp_cmd = 0;
+
+	if ((sizeof(dma_addr_t) > 4) &&
+	    (use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) &&
+			      tp->mac_version >= RTL_GIGA_MAC_VER_18)) &&
+	    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+
+		/* CPlusCmd Dual Access Cycle is only needed for non-PCIe */
+		if (!pci_is_pcie(pdev))
+			tp->cp_cmd |= PCIDAC;
+		dev->features |= NETIF_F_HIGHDMA;
+	} else {
+		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (rc < 0) {
+			netif_err(tp, probe, dev, "DMA configuration failed\n");
+			goto err_out_unmap_4;
+		}
+	}
+
 	rtl_init_rxcfg(tp);
 
 	rtl_irq_disable(tp);
@@ -8412,12 +8417,12 @@
 					   &tp->counters_phys_addr, GFP_KERNEL);
 	if (!tp->counters) {
 		rc = -ENOMEM;
-		goto err_out_msi_4;
+		goto err_out_msi_5;
 	}
 
 	rc = register_netdev(dev);
 	if (rc < 0)
-		goto err_out_cnt_5;
+		goto err_out_cnt_6;
 
 	pci_set_drvdata(pdev, dev);
 
@@ -8451,12 +8456,13 @@
 out:
 	return rc;
 
-err_out_cnt_5:
+err_out_cnt_6:
 	dma_free_coherent(&pdev->dev, sizeof(*tp->counters), tp->counters,
 			  tp->counters_phys_addr);
-err_out_msi_4:
+err_out_msi_5:
 	netif_napi_del(&tp->napi);
 	rtl_disable_msi(pdev, tp);
+err_out_unmap_4:
 	iounmap(ioaddr);
 err_out_free_res_3:
 	pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index b2160d1..4e5d5e9 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -157,6 +157,7 @@
 	TIC	= 0x0378,
 	TIS	= 0x037C,
 	ISS	= 0x0380,
+	CIE	= 0x0384,	/* R-Car Gen3 only */
 	GCCR	= 0x0390,
 	GMTT	= 0x0394,
 	GPTC	= 0x0398,
@@ -170,6 +171,15 @@
 	GCT0	= 0x03B8,
 	GCT1	= 0x03BC,
 	GCT2	= 0x03C0,
+	GIE	= 0x03CC,	/* R-Car Gen3 only */
+	GID	= 0x03D0,	/* R-Car Gen3 only */
+	DIL	= 0x0440,	/* R-Car Gen3 only */
+	RIE0	= 0x0460,	/* R-Car Gen3 only */
+	RID0	= 0x0464,	/* R-Car Gen3 only */
+	RIE2	= 0x0470,	/* R-Car Gen3 only */
+	RID2	= 0x0474,	/* R-Car Gen3 only */
+	TIE	= 0x0478,	/* R-Car Gen3 only */
+	TID	= 0x047c,	/* R-Car Gen3 only */
 
 	/* E-MAC registers */
 	ECMR	= 0x0500,
@@ -556,6 +566,16 @@
 	ISS_DPS15	= 0x80000000,
 };
 
+/* CIE (R-Car Gen3 only) */
+enum CIE_BIT {
+	CIE_CRIE	= 0x00000001,
+	CIE_CTIE	= 0x00000100,
+	CIE_RQFM	= 0x00010000,
+	CIE_CL0M	= 0x00020000,
+	CIE_RFWL	= 0x00040000,
+	CIE_RFFL	= 0x00080000,
+};
+
 /* GCCR */
 enum GCCR_BIT {
 	GCCR_TCR	= 0x00000003,
@@ -592,6 +612,188 @@
 	GIS_PTMF	= 0x00000004,
 };
 
+/* GIE (R-Car Gen3 only) */
+enum GIE_BIT {
+	GIE_PTCS	= 0x00000001,
+	GIE_PTOS	= 0x00000002,
+	GIE_PTMS0	= 0x00000004,
+	GIE_PTMS1	= 0x00000008,
+	GIE_PTMS2	= 0x00000010,
+	GIE_PTMS3	= 0x00000020,
+	GIE_PTMS4	= 0x00000040,
+	GIE_PTMS5	= 0x00000080,
+	GIE_PTMS6	= 0x00000100,
+	GIE_PTMS7	= 0x00000200,
+	GIE_ATCS0	= 0x00010000,
+	GIE_ATCS1	= 0x00020000,
+	GIE_ATCS2	= 0x00040000,
+	GIE_ATCS3	= 0x00080000,
+	GIE_ATCS4	= 0x00100000,
+	GIE_ATCS5	= 0x00200000,
+	GIE_ATCS6	= 0x00400000,
+	GIE_ATCS7	= 0x00800000,
+	GIE_ATCS8	= 0x01000000,
+	GIE_ATCS9	= 0x02000000,
+	GIE_ATCS10	= 0x04000000,
+	GIE_ATCS11	= 0x08000000,
+	GIE_ATCS12	= 0x10000000,
+	GIE_ATCS13	= 0x20000000,
+	GIE_ATCS14	= 0x40000000,
+	GIE_ATCS15	= 0x80000000,
+};
+
+/* GID (R-Car Gen3 only) */
+enum GID_BIT {
+	GID_PTCD	= 0x00000001,
+	GID_PTOD	= 0x00000002,
+	GID_PTMD0	= 0x00000004,
+	GID_PTMD1	= 0x00000008,
+	GID_PTMD2	= 0x00000010,
+	GID_PTMD3	= 0x00000020,
+	GID_PTMD4	= 0x00000040,
+	GID_PTMD5	= 0x00000080,
+	GID_PTMD6	= 0x00000100,
+	GID_PTMD7	= 0x00000200,
+	GID_ATCD0	= 0x00010000,
+	GID_ATCD1	= 0x00020000,
+	GID_ATCD2	= 0x00040000,
+	GID_ATCD3	= 0x00080000,
+	GID_ATCD4	= 0x00100000,
+	GID_ATCD5	= 0x00200000,
+	GID_ATCD6	= 0x00400000,
+	GID_ATCD7	= 0x00800000,
+	GID_ATCD8	= 0x01000000,
+	GID_ATCD9	= 0x02000000,
+	GID_ATCD10	= 0x04000000,
+	GID_ATCD11	= 0x08000000,
+	GID_ATCD12	= 0x10000000,
+	GID_ATCD13	= 0x20000000,
+	GID_ATCD14	= 0x40000000,
+	GID_ATCD15	= 0x80000000,
+};
+
+/* RIE0 (R-Car Gen3 only) */
+enum RIE0_BIT {
+	RIE0_FRS0	= 0x00000001,
+	RIE0_FRS1	= 0x00000002,
+	RIE0_FRS2	= 0x00000004,
+	RIE0_FRS3	= 0x00000008,
+	RIE0_FRS4	= 0x00000010,
+	RIE0_FRS5	= 0x00000020,
+	RIE0_FRS6	= 0x00000040,
+	RIE0_FRS7	= 0x00000080,
+	RIE0_FRS8	= 0x00000100,
+	RIE0_FRS9	= 0x00000200,
+	RIE0_FRS10	= 0x00000400,
+	RIE0_FRS11	= 0x00000800,
+	RIE0_FRS12	= 0x00001000,
+	RIE0_FRS13	= 0x00002000,
+	RIE0_FRS14	= 0x00004000,
+	RIE0_FRS15	= 0x00008000,
+	RIE0_FRS16	= 0x00010000,
+	RIE0_FRS17	= 0x00020000,
+};
+
+/* RID0 (R-Car Gen3 only) */
+enum RID0_BIT {
+	RID0_FRD0	= 0x00000001,
+	RID0_FRD1	= 0x00000002,
+	RID0_FRD2	= 0x00000004,
+	RID0_FRD3	= 0x00000008,
+	RID0_FRD4	= 0x00000010,
+	RID0_FRD5	= 0x00000020,
+	RID0_FRD6	= 0x00000040,
+	RID0_FRD7	= 0x00000080,
+	RID0_FRD8	= 0x00000100,
+	RID0_FRD9	= 0x00000200,
+	RID0_FRD10	= 0x00000400,
+	RID0_FRD11	= 0x00000800,
+	RID0_FRD12	= 0x00001000,
+	RID0_FRD13	= 0x00002000,
+	RID0_FRD14	= 0x00004000,
+	RID0_FRD15	= 0x00008000,
+	RID0_FRD16	= 0x00010000,
+	RID0_FRD17	= 0x00020000,
+};
+
+/* RIE2 (R-Car Gen3 only) */
+enum RIE2_BIT {
+	RIE2_QFS0	= 0x00000001,
+	RIE2_QFS1	= 0x00000002,
+	RIE2_QFS2	= 0x00000004,
+	RIE2_QFS3	= 0x00000008,
+	RIE2_QFS4	= 0x00000010,
+	RIE2_QFS5	= 0x00000020,
+	RIE2_QFS6	= 0x00000040,
+	RIE2_QFS7	= 0x00000080,
+	RIE2_QFS8	= 0x00000100,
+	RIE2_QFS9	= 0x00000200,
+	RIE2_QFS10	= 0x00000400,
+	RIE2_QFS11	= 0x00000800,
+	RIE2_QFS12	= 0x00001000,
+	RIE2_QFS13	= 0x00002000,
+	RIE2_QFS14	= 0x00004000,
+	RIE2_QFS15	= 0x00008000,
+	RIE2_QFS16	= 0x00010000,
+	RIE2_QFS17	= 0x00020000,
+	RIE2_RFFS	= 0x80000000,
+};
+
+/* RID2 (R-Car Gen3 only) */
+enum RID2_BIT {
+	RID2_QFD0	= 0x00000001,
+	RID2_QFD1	= 0x00000002,
+	RID2_QFD2	= 0x00000004,
+	RID2_QFD3	= 0x00000008,
+	RID2_QFD4	= 0x00000010,
+	RID2_QFD5	= 0x00000020,
+	RID2_QFD6	= 0x00000040,
+	RID2_QFD7	= 0x00000080,
+	RID2_QFD8	= 0x00000100,
+	RID2_QFD9	= 0x00000200,
+	RID2_QFD10	= 0x00000400,
+	RID2_QFD11	= 0x00000800,
+	RID2_QFD12	= 0x00001000,
+	RID2_QFD13	= 0x00002000,
+	RID2_QFD14	= 0x00004000,
+	RID2_QFD15	= 0x00008000,
+	RID2_QFD16	= 0x00010000,
+	RID2_QFD17	= 0x00020000,
+	RID2_RFFD	= 0x80000000,
+};
+
+/* TIE (R-Car Gen3 only) */
+enum TIE_BIT {
+	TIE_FTS0	= 0x00000001,
+	TIE_FTS1	= 0x00000002,
+	TIE_FTS2	= 0x00000004,
+	TIE_FTS3	= 0x00000008,
+	TIE_TFUS	= 0x00000100,
+	TIE_TFWS	= 0x00000200,
+	TIE_MFUS	= 0x00000400,
+	TIE_MFWS	= 0x00000800,
+	TIE_TDPS0	= 0x00010000,
+	TIE_TDPS1	= 0x00020000,
+	TIE_TDPS2	= 0x00040000,
+	TIE_TDPS3	= 0x00080000,
+};
+
+/* TID (R-Car Gen3 only) */
+enum TID_BIT {
+	TID_FTD0	= 0x00000001,
+	TID_FTD1	= 0x00000002,
+	TID_FTD2	= 0x00000004,
+	TID_FTD3	= 0x00000008,
+	TID_TFUD	= 0x00000100,
+	TID_TFWD	= 0x00000200,
+	TID_MFUD	= 0x00000400,
+	TID_MFWD	= 0x00000800,
+	TID_TDPD0	= 0x00010000,
+	TID_TDPD1	= 0x00020000,
+	TID_TDPD2	= 0x00040000,
+	TID_TDPD3	= 0x00080000,
+};
+
 /* ECMR */
 enum ECMR_BIT {
 	ECMR_PRM	= 0x00000001,
@@ -817,6 +1019,8 @@
 	int duplex;
 	int emac_irq;
 	enum ravb_chip_id chip_id;
+	int rx_irqs[NUM_RX_QUEUE];
+	int tx_irqs[NUM_TX_QUEUE];
 
 	unsigned no_avb_link:1;
 	unsigned avb_link_active_low:1;
@@ -841,7 +1045,7 @@
 		 u32 set);
 int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value);
 
-irqreturn_t ravb_ptp_interrupt(struct net_device *ndev);
+void ravb_ptp_interrupt(struct net_device *ndev);
 void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev);
 void ravb_ptp_stop(struct net_device *ndev);
 
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 4e1a7db..867caf6 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -42,6 +42,16 @@
 		 NETIF_MSG_RX_ERR | \
 		 NETIF_MSG_TX_ERR)
 
+static const char *ravb_rx_irqs[NUM_RX_QUEUE] = {
+	"ch0", /* RAVB_BE */
+	"ch1", /* RAVB_NC */
+};
+
+static const char *ravb_tx_irqs[NUM_TX_QUEUE] = {
+	"ch18", /* RAVB_BE */
+	"ch19", /* RAVB_NC */
+};
+
 void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
 		 u32 set)
 {
@@ -236,10 +246,9 @@
 	for (i = 0; i < priv->num_rx_ring[q]; i++) {
 		/* RX descriptor */
 		rx_desc = &priv->rx_ring[q][i];
-		/* The size of the buffer should be on 16-byte boundary. */
-		rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
+		rx_desc->ds_cc = cpu_to_le16(PKT_BUF_SZ);
 		dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
-					  ALIGN(PKT_BUF_SZ, 16),
+					  PKT_BUF_SZ,
 					  DMA_FROM_DEVICE);
 		/* We just set the data size to 0 for a failed mapping which
 		 * should prevent DMA from happening...
@@ -365,6 +374,7 @@
 /* Device init function for Ethernet AVB */
 static int ravb_dmac_init(struct net_device *ndev)
 {
+	struct ravb_private *priv = netdev_priv(ndev);
 	int error;
 
 	/* Set CONFIG mode */
@@ -401,6 +411,12 @@
 	ravb_write(ndev, TCCR_TFEN, TCCR);
 
 	/* Interrupt init: */
+	if (priv->chip_id == RCAR_GEN3) {
+		/* Clear DIL.DPLx */
+		ravb_write(ndev, 0, DIL);
+		/* Set queue specific interrupt */
+		ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE);
+	}
 	/* Frame receive */
 	ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
 	/* Disable FIFO full warning */
@@ -541,7 +557,7 @@
 			skb = priv->rx_skb[q][entry];
 			priv->rx_skb[q][entry] = NULL;
 			dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
-					 ALIGN(PKT_BUF_SZ, 16),
+					 PKT_BUF_SZ,
 					 DMA_FROM_DEVICE);
 			get_ts &= (q == RAVB_NC) ?
 					RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
@@ -571,8 +587,7 @@
 	for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
 		entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
 		desc = &priv->rx_ring[q][entry];
-		/* The size of the buffer should be on 16-byte boundary. */
-		desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
+		desc->ds_cc = cpu_to_le16(PKT_BUF_SZ);
 
 		if (!priv->rx_skb[q][entry]) {
 			skb = netdev_alloc_skb(ndev,
@@ -643,7 +658,7 @@
 }
 
 /* E-MAC interrupt handler */
-static void ravb_emac_interrupt(struct net_device *ndev)
+static void ravb_emac_interrupt_unlocked(struct net_device *ndev)
 {
 	struct ravb_private *priv = netdev_priv(ndev);
 	u32 ecsr, psr;
@@ -669,6 +684,18 @@
 	}
 }
 
+static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
+{
+	struct net_device *ndev = dev_id;
+	struct ravb_private *priv = netdev_priv(ndev);
+
+	spin_lock(&priv->lock);
+	ravb_emac_interrupt_unlocked(ndev);
+	mmiowb();
+	spin_unlock(&priv->lock);
+	return IRQ_HANDLED;
+}
+
 /* Error interrupt handler */
 static void ravb_error_interrupt(struct net_device *ndev)
 {
@@ -695,6 +722,50 @@
 	}
 }
 
+static bool ravb_queue_interrupt(struct net_device *ndev, int q)
+{
+	struct ravb_private *priv = netdev_priv(ndev);
+	u32 ris0 = ravb_read(ndev, RIS0);
+	u32 ric0 = ravb_read(ndev, RIC0);
+	u32 tis  = ravb_read(ndev, TIS);
+	u32 tic  = ravb_read(ndev, TIC);
+
+	if (((ris0 & ric0) & BIT(q)) || ((tis  & tic)  & BIT(q))) {
+		if (napi_schedule_prep(&priv->napi[q])) {
+			/* Mask RX and TX interrupts */
+			if (priv->chip_id == RCAR_GEN2) {
+				ravb_write(ndev, ric0 & ~BIT(q), RIC0);
+				ravb_write(ndev, tic & ~BIT(q), TIC);
+			} else {
+				ravb_write(ndev, BIT(q), RID0);
+				ravb_write(ndev, BIT(q), TID);
+			}
+			__napi_schedule(&priv->napi[q]);
+		} else {
+			netdev_warn(ndev,
+				    "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
+				    ris0, ric0);
+			netdev_warn(ndev,
+				    "                    tx status 0x%08x, tx mask 0x%08x.\n",
+				    tis, tic);
+		}
+		return true;
+	}
+	return false;
+}
+
+static bool ravb_timestamp_interrupt(struct net_device *ndev)
+{
+	u32 tis = ravb_read(ndev, TIS);
+
+	if (tis & TIS_TFUF) {
+		ravb_write(ndev, ~TIS_TFUF, TIS);
+		ravb_get_tx_tstamp(ndev);
+		return true;
+	}
+	return false;
+}
+
 static irqreturn_t ravb_interrupt(int irq, void *dev_id)
 {
 	struct net_device *ndev = dev_id;
@@ -708,46 +779,22 @@
 
 	/* Received and transmitted interrupts */
 	if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
-		u32 ris0 = ravb_read(ndev, RIS0);
-		u32 ric0 = ravb_read(ndev, RIC0);
-		u32 tis  = ravb_read(ndev, TIS);
-		u32 tic  = ravb_read(ndev, TIC);
 		int q;
 
 		/* Timestamp updated */
-		if (tis & TIS_TFUF) {
-			ravb_write(ndev, ~TIS_TFUF, TIS);
-			ravb_get_tx_tstamp(ndev);
+		if (ravb_timestamp_interrupt(ndev))
 			result = IRQ_HANDLED;
-		}
 
 		/* Network control and best effort queue RX/TX */
 		for (q = RAVB_NC; q >= RAVB_BE; q--) {
-			if (((ris0 & ric0) & BIT(q)) ||
-			    ((tis  & tic)  & BIT(q))) {
-				if (napi_schedule_prep(&priv->napi[q])) {
-					/* Mask RX and TX interrupts */
-					ric0 &= ~BIT(q);
-					tic &= ~BIT(q);
-					ravb_write(ndev, ric0, RIC0);
-					ravb_write(ndev, tic, TIC);
-					__napi_schedule(&priv->napi[q]);
-				} else {
-					netdev_warn(ndev,
-						    "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
-						    ris0, ric0);
-					netdev_warn(ndev,
-						    "                    tx status 0x%08x, tx mask 0x%08x.\n",
-						    tis, tic);
-				}
+			if (ravb_queue_interrupt(ndev, q))
 				result = IRQ_HANDLED;
-			}
 		}
 	}
 
 	/* E-MAC status summary */
 	if (iss & ISS_MS) {
-		ravb_emac_interrupt(ndev);
+		ravb_emac_interrupt_unlocked(ndev);
 		result = IRQ_HANDLED;
 	}
 
@@ -757,7 +804,60 @@
 		result = IRQ_HANDLED;
 	}
 
-	if ((iss & ISS_CGIS) && ravb_ptp_interrupt(ndev) == IRQ_HANDLED)
+	/* gPTP interrupt status summary */
+	if (iss & ISS_CGIS) {
+		ravb_ptp_interrupt(ndev);
+		result = IRQ_HANDLED;
+	}
+
+	mmiowb();
+	spin_unlock(&priv->lock);
+	return result;
+}
+
+/* Timestamp/Error/gPTP interrupt handler */
+static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
+{
+	struct net_device *ndev = dev_id;
+	struct ravb_private *priv = netdev_priv(ndev);
+	irqreturn_t result = IRQ_NONE;
+	u32 iss;
+
+	spin_lock(&priv->lock);
+	/* Get interrupt status */
+	iss = ravb_read(ndev, ISS);
+
+	/* Timestamp updated */
+	if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev))
+		result = IRQ_HANDLED;
+
+	/* Error status summary */
+	if (iss & ISS_ES) {
+		ravb_error_interrupt(ndev);
+		result = IRQ_HANDLED;
+	}
+
+	/* gPTP interrupt status summary */
+	if (iss & ISS_CGIS) {
+		ravb_ptp_interrupt(ndev);
+		result = IRQ_HANDLED;
+	}
+
+	mmiowb();
+	spin_unlock(&priv->lock);
+	return result;
+}
+
+static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
+{
+	struct net_device *ndev = dev_id;
+	struct ravb_private *priv = netdev_priv(ndev);
+	irqreturn_t result = IRQ_NONE;
+
+	spin_lock(&priv->lock);
+
+	/* Network control/Best effort queue RX/TX */
+	if (ravb_queue_interrupt(ndev, q))
 		result = IRQ_HANDLED;
 
 	mmiowb();
@@ -765,6 +865,16 @@
 	return result;
 }
 
+static irqreturn_t ravb_be_interrupt(int irq, void *dev_id)
+{
+	return ravb_dma_interrupt(irq, dev_id, RAVB_BE);
+}
+
+static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id)
+{
+	return ravb_dma_interrupt(irq, dev_id, RAVB_NC);
+}
+
 static int ravb_poll(struct napi_struct *napi, int budget)
 {
 	struct net_device *ndev = napi->dev;
@@ -804,8 +914,13 @@
 
 	/* Re-enable RX/TX interrupts */
 	spin_lock_irqsave(&priv->lock, flags);
-	ravb_modify(ndev, RIC0, mask, mask);
-	ravb_modify(ndev, TIC,  mask, mask);
+	if (priv->chip_id == RCAR_GEN2) {
+		ravb_modify(ndev, RIC0, mask, mask);
+		ravb_modify(ndev, TIC,  mask, mask);
+	} else {
+		ravb_write(ndev, mask, RIE0);
+		ravb_write(ndev, mask, TIE);
+	}
 	mmiowb();
 	spin_unlock_irqrestore(&priv->lock, flags);
 
@@ -1208,35 +1323,72 @@
 	.get_ts_info		= ravb_get_ts_info,
 };
 
+static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
+				struct net_device *ndev, struct device *dev,
+				const char *ch)
+{
+	char *name;
+	int error;
+
+	name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
+	if (!name)
+		return -ENOMEM;
+	error = request_irq(irq, handler, 0, name, ndev);
+	if (error)
+		netdev_err(ndev, "cannot request IRQ %s\n", name);
+
+	return error;
+}
+
 /* Network device open function for Ethernet AVB */
 static int ravb_open(struct net_device *ndev)
 {
 	struct ravb_private *priv = netdev_priv(ndev);
+	struct platform_device *pdev = priv->pdev;
+	struct device *dev = &pdev->dev;
 	int error;
 
 	napi_enable(&priv->napi[RAVB_BE]);
 	napi_enable(&priv->napi[RAVB_NC]);
 
-	error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, ndev->name,
-			    ndev);
-	if (error) {
-		netdev_err(ndev, "cannot request IRQ\n");
-		goto out_napi_off;
-	}
-
-	if (priv->chip_id == RCAR_GEN3) {
-		error = request_irq(priv->emac_irq, ravb_interrupt,
-				    IRQF_SHARED, ndev->name, ndev);
+	if (priv->chip_id == RCAR_GEN2) {
+		error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
+				    ndev->name, ndev);
 		if (error) {
 			netdev_err(ndev, "cannot request IRQ\n");
-			goto out_free_irq;
+			goto out_napi_off;
 		}
+	} else {
+		error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev,
+				      dev, "ch22:multi");
+		if (error)
+			goto out_napi_off;
+		error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev,
+				      dev, "ch24:emac");
+		if (error)
+			goto out_free_irq;
+		error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt,
+				      ndev, dev, "ch0:rx_be");
+		if (error)
+			goto out_free_irq_emac;
+		error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt,
+				      ndev, dev, "ch18:tx_be");
+		if (error)
+			goto out_free_irq_be_rx;
+		error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt,
+				      ndev, dev, "ch1:rx_nc");
+		if (error)
+			goto out_free_irq_be_tx;
+		error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt,
+				      ndev, dev, "ch19:tx_nc");
+		if (error)
+			goto out_free_irq_nc_rx;
 	}
 
 	/* Device init */
 	error = ravb_dmac_init(ndev);
 	if (error)
-		goto out_free_irq2;
+		goto out_free_irq_nc_tx;
 	ravb_emac_init(ndev);
 
 	/* Initialise PTP Clock driver */
@@ -1256,9 +1408,18 @@
 	/* Stop PTP Clock driver */
 	if (priv->chip_id == RCAR_GEN2)
 		ravb_ptp_stop(ndev);
-out_free_irq2:
-	if (priv->chip_id == RCAR_GEN3)
-		free_irq(priv->emac_irq, ndev);
+out_free_irq_nc_tx:
+	if (priv->chip_id == RCAR_GEN2)
+		goto out_free_irq;
+	free_irq(priv->tx_irqs[RAVB_NC], ndev);
+out_free_irq_nc_rx:
+	free_irq(priv->rx_irqs[RAVB_NC], ndev);
+out_free_irq_be_tx:
+	free_irq(priv->tx_irqs[RAVB_BE], ndev);
+out_free_irq_be_rx:
+	free_irq(priv->rx_irqs[RAVB_BE], ndev);
+out_free_irq_emac:
+	free_irq(priv->emac_irq, ndev);
 out_free_irq:
 	free_irq(ndev->irq, ndev);
 out_napi_off:
@@ -1377,11 +1538,11 @@
 
 		/* TAG and timestamp required flag */
 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
-		skb_tx_timestamp(skb);
 		desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
 		desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12);
 	}
 
+	skb_tx_timestamp(skb);
 	/* Descriptor type must be set after all the above writes */
 	dma_wmb();
 	desc->die_dt = DT_FEND;
@@ -1506,6 +1667,13 @@
 		priv->phydev = NULL;
 	}
 
+	if (priv->chip_id != RCAR_GEN2) {
+		free_irq(priv->tx_irqs[RAVB_NC], ndev);
+		free_irq(priv->rx_irqs[RAVB_NC], ndev);
+		free_irq(priv->tx_irqs[RAVB_BE], ndev);
+		free_irq(priv->rx_irqs[RAVB_BE], ndev);
+		free_irq(priv->emac_irq, ndev);
+	}
 	free_irq(ndev->irq, ndev);
 
 	napi_disable(&priv->napi[RAVB_NC]);
@@ -1691,6 +1859,9 @@
 	rate = clk_get_rate(clk);
 	clk_put(clk);
 
+	if (!rate)
+		return -EINVAL;
+
 	inc = 1000000000ULL << 20;
 	do_div(inc, rate);
 
@@ -1713,6 +1884,7 @@
 	struct net_device *ndev;
 	int error, irq, q;
 	struct resource *res;
+	int i;
 
 	if (!np) {
 		dev_err(&pdev->dev,
@@ -1782,6 +1954,22 @@
 			goto out_release;
 		}
 		priv->emac_irq = irq;
+		for (i = 0; i < NUM_RX_QUEUE; i++) {
+			irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]);
+			if (irq < 0) {
+				error = irq;
+				goto out_release;
+			}
+			priv->rx_irqs[i] = irq;
+		}
+		for (i = 0; i < NUM_TX_QUEUE; i++) {
+			irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]);
+			if (irq < 0) {
+				error = irq;
+				goto out_release;
+			}
+			priv->tx_irqs[i] = irq;
+		}
 	}
 
 	priv->chip_id = chip_id;
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index 57992cc..eede70e 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -194,7 +194,12 @@
 	priv->ptp.extts[req->index] = on;
 
 	spin_lock_irqsave(&priv->lock, flags);
-	ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0);
+	if (priv->chip_id == RCAR_GEN2)
+		ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0);
+	else if (on)
+		ravb_write(ndev, GIE_PTCS, GIE);
+	else
+		ravb_write(ndev, GID_PTCD, GID);
 	mmiowb();
 	spin_unlock_irqrestore(&priv->lock, flags);
 
@@ -241,7 +246,10 @@
 		error = ravb_ptp_update_compare(priv, (u32)start_ns);
 		if (!error) {
 			/* Unmask interrupt */
-			ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME);
+			if (priv->chip_id == RCAR_GEN2)
+				ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME);
+			else
+				ravb_write(ndev, GIE_PTMS0, GIE);
 		}
 	} else	{
 		spin_lock_irqsave(&priv->lock, flags);
@@ -250,7 +258,10 @@
 		perout->period = 0;
 
 		/* Mask interrupt */
-		ravb_modify(ndev, GIC, GIC_PTME, 0);
+		if (priv->chip_id == RCAR_GEN2)
+			ravb_modify(ndev, GIC, GIC_PTME, 0);
+		else
+			ravb_write(ndev, GID_PTMD0, GID);
 	}
 	mmiowb();
 	spin_unlock_irqrestore(&priv->lock, flags);
@@ -285,7 +296,7 @@
 };
 
 /* Caller must hold the lock */
-irqreturn_t ravb_ptp_interrupt(struct net_device *ndev)
+void ravb_ptp_interrupt(struct net_device *ndev)
 {
 	struct ravb_private *priv = netdev_priv(ndev);
 	u32 gis = ravb_read(ndev, GIS);
@@ -308,12 +319,7 @@
 		}
 	}
 
-	if (gis) {
-		ravb_write(ndev, ~gis, GIS);
-		return IRQ_HANDLED;
-	}
-
-	return IRQ_NONE;
+	ravb_write(ndev, ~gis, GIS);
 }
 
 void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 004e2d7..04cd39f 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -482,7 +482,7 @@
 	struct sh_eth_private *mdp = netdev_priv(ndev);
 
 	/* reset device */
-	sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
+	sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR);
 	mdelay(1);
 }
 
@@ -537,11 +537,7 @@
 
 static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
 {
-	struct sh_eth_private *mdp = netdev_priv(ndev);
-
-	/* reset device */
-	sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
-	mdelay(1);
+	sh_eth_chip_reset(ndev);
 
 	sh_eth_select_mii(ndev);
 }
@@ -725,8 +721,8 @@
 #define GIGA_MAHR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
 static void sh_eth_chip_reset_giga(struct net_device *ndev)
 {
-	int i;
 	u32 mahr[2], malr[2];
+	int i;
 
 	/* save MAHR and MALR */
 	for (i = 0; i < 2; i++) {
@@ -734,9 +730,7 @@
 		mahr[i] = ioread32((void *)GIGA_MAHR(i));
 	}
 
-	/* reset device */
-	iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
-	mdelay(1);
+	sh_eth_chip_reset(ndev);
 
 	/* restore MAHR and MALR */
 	for (i = 0; i < 2; i++) {
@@ -899,7 +893,7 @@
 	int cnt = 100;
 
 	while (cnt > 0) {
-		if (!(sh_eth_read(ndev, EDMR) & 0x3))
+		if (!(sh_eth_read(ndev, EDMR) & EDMR_SRST_GETHER))
 			break;
 		mdelay(1);
 		cnt--;
@@ -1229,7 +1223,7 @@
 	return -ENOMEM;
 }
 
-static int sh_eth_dev_init(struct net_device *ndev, bool start)
+static int sh_eth_dev_init(struct net_device *ndev)
 {
 	struct sh_eth_private *mdp = netdev_priv(ndev);
 	int ret;
@@ -1279,10 +1273,8 @@
 		     RFLR);
 
 	sh_eth_modify(ndev, EESR, 0, 0);
-	if (start) {
-		mdp->irq_enabled = true;
-		sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
-	}
+	mdp->irq_enabled = true;
+	sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
 
 	/* PAUSE Prohibition */
 	sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) |
@@ -1295,8 +1287,7 @@
 	sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
 
 	/* E-MAC Interrupt Enable register */
-	if (start)
-		sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
+	sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
 
 	/* Set MAC address */
 	update_mac_address(ndev);
@@ -1309,10 +1300,8 @@
 	if (mdp->cd->tpauser)
 		sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
 
-	if (start) {
-		/* Setting the Rx mode will start the Rx process. */
-		sh_eth_write(ndev, EDRRR_R, EDRRR);
-	}
+	/* Setting the Rx mode will start the Rx process. */
+	sh_eth_write(ndev, EDRRR_R, EDRRR);
 
 	return ret;
 }
@@ -2194,17 +2183,13 @@
 				   __func__);
 			return ret;
 		}
-		ret = sh_eth_dev_init(ndev, false);
+		ret = sh_eth_dev_init(ndev);
 		if (ret < 0) {
 			netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
 				   __func__);
 			return ret;
 		}
 
-		mdp->irq_enabled = true;
-		sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
-		/* Setting the Rx mode will start the Rx process. */
-		sh_eth_write(ndev, EDRRR_R, EDRRR);
 		netif_device_attach(ndev);
 	}
 
@@ -2250,7 +2235,7 @@
 		goto out_free_irq;
 
 	/* device init */
-	ret = sh_eth_dev_init(ndev, true);
+	ret = sh_eth_dev_init(ndev);
 	if (ret)
 		goto out_free_irq;
 
@@ -2303,7 +2288,7 @@
 	}
 
 	/* device init */
-	sh_eth_dev_init(ndev, true);
+	sh_eth_dev_init(ndev);
 
 	netif_start_queue(ndev);
 }
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 8fa4ef3..c62380e 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -394,7 +394,7 @@
 #define DEFAULT_FDR_INIT	0x00000707
 
 /* ARSTR */
-enum ARSTR_BIT { ARSTR_ARSTR = 0x00000001, };
+enum ARSTR_BIT { ARSTR_ARST = 0x00000001, };
 
 /* TSU_FWEN0 */
 enum TSU_FWEN0_BIT {
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 0e758bc..1ca7963 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -2727,7 +2727,7 @@
 
 	return ofdpa_port_fib_ipv4(ofdpa_port, trans,
 				   htonl(fib4->dst), fib4->dst_len,
-				   &fib4->fi, fib4->tb_id, 0);
+				   fib4->fi, fib4->tb_id, 0);
 }
 
 static int ofdpa_port_obj_fib4_del(struct rocker_port *rocker_port,
@@ -2737,7 +2737,7 @@
 
 	return ofdpa_port_fib_ipv4(ofdpa_port, NULL,
 				   htonl(fib4->dst), fib4->dst_len,
-				   &fib4->fi, fib4->tb_id,
+				   fib4->fi, fib4->tb_id,
 				   OFDPA_OP_FLAG_REMOVE);
 }
 
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index b02eed1..73427e2 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -155,11 +155,11 @@
 	return 0;
 
 err_rx_irq_unmap:
-	while (--i)
+	while (i--)
 		irq_dispose_mapping(priv->rxq[i]->irq_no);
 	i = SXGBE_TX_QUEUES;
 err_tx_irq_unmap:
-	while (--i)
+	while (i--)
 		irq_dispose_mapping(priv->txq[i]->irq_no);
 	irq_dispose_mapping(priv->irq);
 err_drv_remove:
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index ca73366..c2bd537 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -572,7 +572,7 @@
 	if (err)
 		return err;
 
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	netif_wake_queue(dev);
 
 	return 0;
@@ -648,7 +648,7 @@
 	printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name);
 	sgiseeq_reset(dev);
 
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	netif_wake_queue(dev);
 }
 
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 98d33d4..1681084 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1920,6 +1920,10 @@
 		return 0;
 	}
 
+	if (nic_data->datapath_caps &
+	    1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
+		return -EOPNOTSUPP;
+
 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
 		       nic_data->vport_id);
 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
@@ -2923,9 +2927,16 @@
 				      bool replacing)
 {
 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	u32 flags = spec->flags;
 
 	memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
 
+	/* Remove RSS flag if we don't have an RSS context. */
+	if (flags & EFX_FILTER_FLAG_RX_RSS &&
+	    spec->rss_context == EFX_FILTER_RSS_CONTEXT_DEFAULT &&
+	    nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID)
+		flags &= ~EFX_FILTER_FLAG_RX_RSS;
+
 	if (replacing) {
 		MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
 			       MC_CMD_FILTER_OP_IN_OP_REPLACE);
@@ -2985,10 +2996,10 @@
 		       spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
 		       0 : spec->dmaq_id);
 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
-		       (spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
+		       (flags & EFX_FILTER_FLAG_RX_RSS) ?
 		       MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
 		       MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
-	if (spec->flags & EFX_FILTER_FLAG_RX_RSS)
+	if (flags & EFX_FILTER_FLAG_RX_RSS)
 		MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
 			       spec->rss_context !=
 			       EFX_FILTER_RSS_CONTEXT_DEFAULT ?
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 5eac523..aaa80f1 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -708,7 +708,7 @@
 	mace->eth.dma_ctrl = priv->dma_ctrl;
 
 	meth_add_to_tx_ring(priv, skb);
-	dev->trans_start = jiffies; /* save the timestamp */
+	netif_trans_update(dev); /* save the timestamp */
 
 	/* If TX ring is full, tell the upper layer to stop sending packets */
 	if (meth_tx_full(dev)) {
@@ -756,7 +756,7 @@
 	/* Enable interrupt */
 	spin_unlock_irqrestore(&priv->meth_lock, flags);
 
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	netif_wake_queue(dev);
 }
 
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index fd812d2..95001ee4 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1575,7 +1575,7 @@
 
 	spin_unlock_irqrestore(&sis_priv->lock, flags);
 
-	net_dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(net_dev); /* prevent tx timeout */
 
 	/* load Transmit Descriptor Register */
 	sw32(txdp, sis_priv->tx_ring_dma);
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 443f1da..7186b89 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -889,7 +889,7 @@
 		ew32(COMMAND, TxQueued);
 	}
 
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	dev->stats.tx_errors++;
 	if (!ep->tx_full)
 		netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index a733868..cb49c96 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -499,7 +499,7 @@
 	/* DMA complete IRQ will free buffer and set jiffies */
 #else
 	SMC_PUSH_DATA(lp, buf, len);
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	dev_kfree_skb_irq(skb);
 #endif
 	if (!lp->tx_throttle) {
@@ -1189,7 +1189,7 @@
 	DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n");
 	BUG_ON(skb == NULL);
 	dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE);
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	dev_kfree_skb_irq(skb);
 	lp->current_tx_skb = NULL;
 	if (lp->pending_tx_skb != NULL)
@@ -1283,7 +1283,7 @@
 		schedule_work(&lp->phy_configure);
 
 	/* We can accept TX packets again */
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	netif_wake_queue(dev);
 }
 
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index 664f596..d496888 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -663,7 +663,7 @@
 	lp->saved_skb = NULL;
 	dev_kfree_skb_any (skb);
 
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 
 	/* we can send another packet */
 	netif_wake_queue(dev);
@@ -1104,7 +1104,7 @@
 	/* "kick" the adaptor */
 	smc_reset( dev->base_addr );
 	smc_enable( dev->base_addr );
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	/* clear anything saved */
 	((struct smc_local *)netdev_priv(dev))->saved_skb = NULL;
 	netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 3449893..db3c696 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1172,7 +1172,7 @@
 
     smc->saved_skb = NULL;
     dev_kfree_skb_irq(skb);
-    dev->trans_start = jiffies;
+    netif_trans_update(dev);
     netif_start_queue(dev);
 }
 
@@ -1187,7 +1187,7 @@
 		  inw(ioaddr)&0xff, inw(ioaddr + 2));
     dev->stats.tx_errors++;
     smc_reset(dev);
-    dev->trans_start = jiffies; /* prevent tx timeout */
+    netif_trans_update(dev); /* prevent tx timeout */
     smc->saved_skb = NULL;
     netif_wake_queue(dev);
 }
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index c5ed27c..18ac52d 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -619,7 +619,7 @@
 	SMC_SET_MMU_CMD(lp, MC_ENQUEUE);
 	smc_special_unlock(&lp->lock, flags);
 
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	dev->stats.tx_packets++;
 	dev->stats.tx_bytes += len;
 
@@ -1364,7 +1364,7 @@
 		schedule_work(&lp->phy_configure);
 
 	/* We can accept TX packets again */
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	netif_wake_queue(dev);
 }
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index b390161..0fb362d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -2,7 +2,8 @@
 stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o	\
 	      chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o	\
 	      dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o	\
-	      mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o $(stmmac-y)
+	      mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o	\
+	      dwmac4_dma.o dwmac4_lib.o dwmac4_core.o $(stmmac-y)
 
 # Ordering matters. Generic driver must be last.
 obj-$(CONFIG_STMMAC_PLATFORM)	+= stmmac-platform.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index f96d257..fc60368 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -41,6 +41,8 @@
 /* Synopsys Core versions */
 #define	DWMAC_CORE_3_40	0x34
 #define	DWMAC_CORE_3_50	0x35
+#define	DWMAC_CORE_4_00	0x40
+#define STMMAC_CHAN0	0	/* Always supported and default for all chips */
 
 #define DMA_TX_SIZE 512
 #define DMA_RX_SIZE 512
@@ -167,6 +169,9 @@
 	unsigned long mtl_rx_fifo_ctrl_active;
 	unsigned long mac_rx_frame_ctrl_fifo;
 	unsigned long mac_gmii_rx_proto_engine;
+	/* TSO */
+	unsigned long tx_tso_frames;
+	unsigned long tx_tso_nfrags;
 };
 
 /* CSR Frequency Access Defines*/
@@ -243,6 +248,7 @@
 	csum_none = 0x2,
 	llc_snap = 0x4,
 	dma_own = 0x8,
+	rx_not_ls = 0x10,
 };
 
 /* Tx status */
@@ -269,6 +275,7 @@
 #define	CORE_PCS_ANE_COMPLETE		(1 << 5)
 #define	CORE_PCS_LINK_STATUS		(1 << 6)
 #define	CORE_RGMII_IRQ			(1 << 7)
+#define CORE_IRQ_MTL_RX_OVERFLOW	BIT(8)
 
 /* Physical Coding Sublayer */
 struct rgmii_adv {
@@ -300,8 +307,10 @@
 	/* 802.3az - Energy-Efficient Ethernet (EEE) */
 	unsigned int eee;
 	unsigned int av;
+	unsigned int tsoen;
 	/* TX and RX csum */
 	unsigned int tx_coe;
+	unsigned int rx_coe;
 	unsigned int rx_coe_type1;
 	unsigned int rx_coe_type2;
 	unsigned int rxfifo_over_2048;
@@ -348,6 +357,10 @@
 	void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
 				 bool csum_flag, int mode, bool tx_own,
 				 bool ls);
+	void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1,
+				    int len2, bool tx_own, bool ls,
+				    unsigned int tcphdrlen,
+				    unsigned int tcppayloadlen);
 	/* Set/get the owner of the descriptor */
 	void (*set_tx_owner) (struct dma_desc *p);
 	int (*get_tx_owner) (struct dma_desc *p);
@@ -380,6 +393,10 @@
 	 u64(*get_timestamp) (void *desc, u32 ats);
 	/* get rx timestamp status */
 	int (*get_rx_timestamp_status) (void *desc, u32 ats);
+	/* Display ring */
+	void (*display_ring)(void *head, unsigned int size, bool rx);
+	/* set MSS via context descriptor */
+	void (*set_mss)(struct dma_desc *p, unsigned int mss);
 };
 
 extern const struct stmmac_desc_ops enh_desc_ops;
@@ -412,9 +429,15 @@
 	int (*dma_interrupt) (void __iomem *ioaddr,
 			      struct stmmac_extra_stats *x);
 	/* If supported then get the optional core features */
-	unsigned int (*get_hw_feature) (void __iomem *ioaddr);
+	void (*get_hw_feature)(void __iomem *ioaddr,
+			       struct dma_features *dma_cap);
 	/* Program the HW RX Watchdog */
 	void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt);
+	void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len);
+	void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len);
+	void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+	void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+	void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
 };
 
 struct mac_device_info;
@@ -463,6 +486,7 @@
 };
 
 extern const struct stmmac_hwtimestamp stmmac_ptp;
+extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
 
 struct mac_link {
 	int port;
@@ -495,7 +519,6 @@
 	const struct stmmac_hwtimestamp *ptp;
 	struct mii_regs mii;	/* MII register Addresses */
 	struct mac_link link;
-	unsigned int synopsys_uid;
 	void __iomem *pcsr;     /* vpointer to device CSRs */
 	int multicast_filter_bins;
 	int unicast_filter_entries;
@@ -504,18 +527,47 @@
 };
 
 struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
-					int perfect_uc_entries);
-struct mac_device_info *dwmac100_setup(void __iomem *ioaddr);
+					int perfect_uc_entries,
+					int *synopsys_id);
+struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id);
+struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
+				     int perfect_uc_entries, int *synopsys_id);
 
 void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
 			 unsigned int high, unsigned int low);
 void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
 			 unsigned int high, unsigned int low);
-
 void stmmac_set_mac(void __iomem *ioaddr, bool enable);
 
+void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+				unsigned int high, unsigned int low);
+void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+				unsigned int high, unsigned int low);
+void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable);
+
 void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
 extern const struct stmmac_mode_ops ring_mode_ops;
 extern const struct stmmac_mode_ops chain_mode_ops;
+extern const struct stmmac_desc_ops dwmac4_desc_ops;
 
+/**
+ * stmmac_get_synopsys_id - return the SYINID.
+ * @priv: driver private structure
+ * Description: this simple function is to decode and return the SYINID
+ * starting from the HW core register.
+ */
+static inline u32 stmmac_get_synopsys_id(u32 hwid)
+{
+	/* Check Synopsys Id (not available on old chips) */
+	if (likely(hwid)) {
+		u32 uid = ((hwid & 0x0000ff00) >> 8);
+		u32 synid = (hwid & 0x000000ff);
+
+		pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n",
+			uid, synid);
+
+		return synid;
+	}
+	return 0;
+}
 #endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index f0d797a..f13499f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -34,6 +34,9 @@
 #define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003
 #define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010
 
+#define SYSMGR_FPGAGRP_MODULE_REG  0x00000028
+#define SYSMGR_FPGAGRP_MODULE_EMAC 0x00000004
+
 #define EMAC_SPLITTER_CTRL_REG			0x0
 #define EMAC_SPLITTER_CTRL_SPEED_MASK		0x3
 #define EMAC_SPLITTER_CTRL_SPEED_10		0x2
@@ -89,15 +92,6 @@
 	struct device_node *np_splitter;
 	struct resource res_splitter;
 
-	dwmac->stmmac_rst = devm_reset_control_get(dev,
-						  STMMAC_RESOURCE_NAME);
-	if (IS_ERR(dwmac->stmmac_rst)) {
-		dev_info(dev, "Could not get reset control!\n");
-		if (PTR_ERR(dwmac->stmmac_rst) == -EPROBE_DEFER)
-			return -EPROBE_DEFER;
-		dwmac->stmmac_rst = NULL;
-	}
-
 	dwmac->interface = of_get_phy_mode(np);
 
 	sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon");
@@ -142,13 +136,13 @@
 	return 0;
 }
 
-static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
+static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
 {
 	struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr;
 	int phymode = dwmac->interface;
 	u32 reg_offset = dwmac->reg_offset;
 	u32 reg_shift = dwmac->reg_shift;
-	u32 ctrl, val;
+	u32 ctrl, val, module;
 
 	switch (phymode) {
 	case PHY_INTERFACE_MODE_RGMII:
@@ -171,48 +165,26 @@
 	if (dwmac->splitter_base)
 		val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
 
-	regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
-	ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
-	ctrl |= val << reg_shift;
-
-	if (dwmac->f2h_ptp_ref_clk)
-		ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
-	else
-		ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2));
-
-	regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
-	return 0;
-}
-
-static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv)
-{
-	struct socfpga_dwmac	*dwmac = priv;
-
-	/* On socfpga platform exit, assert and hold reset to the
-	 * enet controller - the default state after a hard reset.
-	 */
-	if (dwmac->stmmac_rst)
-		reset_control_assert(dwmac->stmmac_rst);
-}
-
-static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
-{
-	struct socfpga_dwmac	*dwmac = priv;
-	struct net_device *ndev = platform_get_drvdata(pdev);
-	struct stmmac_priv *stpriv = NULL;
-	int ret = 0;
-
-	if (ndev)
-		stpriv = netdev_priv(ndev);
-
 	/* Assert reset to the enet controller before changing the phy mode */
 	if (dwmac->stmmac_rst)
 		reset_control_assert(dwmac->stmmac_rst);
 
-	/* Setup the phy mode in the system manager registers according to
-	 * devicetree configuration
-	 */
-	ret = socfpga_dwmac_setup(dwmac);
+	regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
+	ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
+	ctrl |= val << reg_shift;
+
+	if (dwmac->f2h_ptp_ref_clk) {
+		ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
+		regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
+			    &module);
+		module |= (SYSMGR_FPGAGRP_MODULE_EMAC << (reg_shift / 2));
+		regmap_write(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
+			     module);
+	} else {
+		ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2));
+	}
+
+	regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
 
 	/* Deassert reset for the phy configuration to be sampled by
 	 * the enet controller, and operation to start in requested mode
@@ -220,25 +192,7 @@
 	if (dwmac->stmmac_rst)
 		reset_control_deassert(dwmac->stmmac_rst);
 
-	/* Before the enet controller is suspended, the phy is suspended.
-	 * This causes the phy clock to be gated. The enet controller is
-	 * resumed before the phy, so the clock is still gated "off" when
-	 * the enet controller is resumed. This code makes sure the phy
-	 * is "resumed" before reinitializing the enet controller since
-	 * the enet controller depends on an active phy clock to complete
-	 * a DMA reset. A DMA reset will "time out" if executed
-	 * with no phy clock input on the Synopsys enet controller.
-	 * Verified through Synopsys Case #8000711656.
-	 *
-	 * Note that the phy clock is also gated when the phy is isolated.
-	 * Phy "suspend" and "isolate" controls are located in phy basic
-	 * control register 0, and can be modified by the phy driver
-	 * framework.
-	 */
-	if (stpriv && stpriv->phydev)
-		phy_resume(stpriv->phydev);
-
-	return ret;
+	return 0;
 }
 
 static int socfpga_dwmac_probe(struct platform_device *pdev)
@@ -267,24 +221,59 @@
 		return ret;
 	}
 
-	ret = socfpga_dwmac_setup(dwmac);
-	if (ret) {
-		dev_err(dev, "couldn't setup SoC glue (%d)\n", ret);
-		return ret;
-	}
-
 	plat_dat->bsp_priv = dwmac;
-	plat_dat->init = socfpga_dwmac_init;
-	plat_dat->exit = socfpga_dwmac_exit;
 	plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
 
-	ret = socfpga_dwmac_init(pdev, plat_dat->bsp_priv);
-	if (ret)
-		return ret;
+	ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+	if (!ret) {
+		struct net_device *ndev = platform_get_drvdata(pdev);
+		struct stmmac_priv *stpriv = netdev_priv(ndev);
 
-	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+		/* The socfpga driver needs to control the stmmac reset to
+		 * set the phy mode. Create a copy of the core reset handel
+		 * so it can be used by the driver later.
+		 */
+		dwmac->stmmac_rst = stpriv->stmmac_rst;
+
+		ret = socfpga_dwmac_set_phy_mode(dwmac);
+	}
+
+	return ret;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int socfpga_dwmac_resume(struct device *dev)
+{
+	struct net_device *ndev = dev_get_drvdata(dev);
+	struct stmmac_priv *priv = netdev_priv(ndev);
+
+	socfpga_dwmac_set_phy_mode(priv->plat->bsp_priv);
+
+	/* Before the enet controller is suspended, the phy is suspended.
+	 * This causes the phy clock to be gated. The enet controller is
+	 * resumed before the phy, so the clock is still gated "off" when
+	 * the enet controller is resumed. This code makes sure the phy
+	 * is "resumed" before reinitializing the enet controller since
+	 * the enet controller depends on an active phy clock to complete
+	 * a DMA reset. A DMA reset will "time out" if executed
+	 * with no phy clock input on the Synopsys enet controller.
+	 * Verified through Synopsys Case #8000711656.
+	 *
+	 * Note that the phy clock is also gated when the phy is isolated.
+	 * Phy "suspend" and "isolate" controls are located in phy basic
+	 * control register 0, and can be modified by the phy driver
+	 * framework.
+	 */
+	if (priv->phydev)
+		phy_resume(priv->phydev);
+
+	return stmmac_resume(dev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(socfpga_dwmac_pm_ops, stmmac_suspend,
+					       socfpga_dwmac_resume);
+
 static const struct of_device_id socfpga_dwmac_match[] = {
 	{ .compatible = "altr,socfpga-stmmac" },
 	{ }
@@ -296,7 +285,7 @@
 	.remove = stmmac_pltfr_remove,
 	.driver = {
 		.name           = "socfpga-dwmac",
-		.pm		= &stmmac_pltfr_pm_ops,
+		.pm		= &socfpga_dwmac_pm_ops,
 		.of_match_table = socfpga_dwmac_match,
 	},
 };
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index c294117..fb1eb57 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -491,7 +491,8 @@
 };
 
 struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
-					int perfect_uc_entries)
+					int perfect_uc_entries,
+					int *synopsys_id)
 {
 	struct mac_device_info *mac;
 	u32 hwid = readl(ioaddr + GMAC_VERSION);
@@ -516,7 +517,9 @@
 	mac->link.speed = GMAC_CONTROL_FES;
 	mac->mii.addr = GMAC_MII_ADDR;
 	mac->mii.data = GMAC_MII_DATA;
-	mac->synopsys_uid = hwid;
+
+	/* Get and dump the chip ID */
+	*synopsys_id = stmmac_get_synopsys_id(hwid);
 
 	return mac;
 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index da32d60..99074695 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -215,9 +215,40 @@
 	}
 }
 
-static unsigned int dwmac1000_get_hw_feature(void __iomem *ioaddr)
+static void dwmac1000_get_hw_feature(void __iomem *ioaddr,
+				     struct dma_features *dma_cap)
 {
-	return readl(ioaddr + DMA_HW_FEATURE);
+	u32 hw_cap = readl(ioaddr + DMA_HW_FEATURE);
+
+	dma_cap->mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
+	dma_cap->mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
+	dma_cap->half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
+	dma_cap->hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
+	dma_cap->multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5;
+	dma_cap->pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
+	dma_cap->sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
+	dma_cap->pmt_remote_wake_up = (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
+	dma_cap->pmt_magic_frame = (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
+	/* MMC */
+	dma_cap->rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
+	/* IEEE 1588-2002 */
+	dma_cap->time_stamp =
+	    (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
+	/* IEEE 1588-2008 */
+	dma_cap->atime_stamp = (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
+	/* 802.3az - Energy-Efficient Ethernet (EEE) */
+	dma_cap->eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
+	dma_cap->av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
+	/* TX and RX csum */
+	dma_cap->tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
+	dma_cap->rx_coe_type1 = (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
+	dma_cap->rx_coe_type2 = (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
+	dma_cap->rxfifo_over_2048 = (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
+	/* TX and RX number of channels */
+	dma_cap->number_rx_channel = (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
+	dma_cap->number_tx_channel = (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
+	/* Alternate (enhanced) DESC mode */
+	dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
 }
 
 static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index f8dd773..6418b2e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -173,7 +173,7 @@
 	.get_umac_addr = dwmac100_get_umac_addr,
 };
 
-struct mac_device_info *dwmac100_setup(void __iomem *ioaddr)
+struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id)
 {
 	struct mac_device_info *mac;
 
@@ -192,7 +192,8 @@
 	mac->link.speed = 0;
 	mac->mii.addr = MAC_MII_ADDR;
 	mac->mii.data = MAC_MII_DATA;
-	mac->synopsys_uid = 0;
+	/* Synopsys Id is not available on old chips */
+	*synopsys_id = 0;
 
 	return mac;
 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
new file mode 100644
index 0000000..bc50952
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -0,0 +1,255 @@
+/*
+ * DWMAC4 Header file.
+ *
+ * Copyright (C) 2015  STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#ifndef __DWMAC4_H__
+#define __DWMAC4_H__
+
+#include "common.h"
+
+/*  MAC registers */
+#define GMAC_CONFIG			0x00000000
+#define GMAC_PACKET_FILTER		0x00000008
+#define GMAC_HASH_TAB_0_31		0x00000010
+#define GMAC_HASH_TAB_32_63		0x00000014
+#define GMAC_RX_FLOW_CTRL		0x00000090
+#define GMAC_QX_TX_FLOW_CTRL(x)		(0x70 + x * 4)
+#define GMAC_INT_STATUS			0x000000b0
+#define GMAC_INT_EN			0x000000b4
+#define GMAC_AN_CTRL			0x000000e0
+#define GMAC_AN_STATUS			0x000000e4
+#define GMAC_AN_ADV			0x000000e8
+#define GMAC_AN_LPA			0x000000ec
+#define GMAC_PMT			0x000000c0
+#define GMAC_VERSION			0x00000110
+#define GMAC_DEBUG			0x00000114
+#define GMAC_HW_FEATURE0		0x0000011c
+#define GMAC_HW_FEATURE1		0x00000120
+#define GMAC_HW_FEATURE2		0x00000124
+#define GMAC_MDIO_ADDR			0x00000200
+#define GMAC_MDIO_DATA			0x00000204
+#define GMAC_ADDR_HIGH(reg)		(0x300 + reg * 8)
+#define GMAC_ADDR_LOW(reg)		(0x304 + reg * 8)
+
+/* MAC Packet Filtering */
+#define GMAC_PACKET_FILTER_PR		BIT(0)
+#define GMAC_PACKET_FILTER_HMC		BIT(2)
+#define GMAC_PACKET_FILTER_PM		BIT(4)
+
+#define GMAC_MAX_PERFECT_ADDRESSES	128
+
+/* MAC Flow Control RX */
+#define GMAC_RX_FLOW_CTRL_RFE		BIT(0)
+
+/* MAC Flow Control TX */
+#define GMAC_TX_FLOW_CTRL_TFE		BIT(1)
+#define GMAC_TX_FLOW_CTRL_PT_SHIFT	16
+
+/*  MAC Interrupt bitmap*/
+#define GMAC_INT_PMT_EN			BIT(4)
+#define GMAC_INT_LPI_EN			BIT(5)
+
+enum dwmac4_irq_status {
+	time_stamp_irq = 0x00001000,
+	mmc_rx_csum_offload_irq = 0x00000800,
+	mmc_tx_irq = 0x00000400,
+	mmc_rx_irq = 0x00000200,
+	mmc_irq = 0x00000100,
+	pmt_irq = 0x00000010,
+	pcs_ane_irq = 0x00000004,
+	pcs_link_irq = 0x00000002,
+};
+
+/* MAC Auto-Neg bitmap*/
+#define	GMAC_AN_CTRL_RAN		BIT(9)
+#define	GMAC_AN_CTRL_ANE		BIT(12)
+#define GMAC_AN_CTRL_ELE		BIT(14)
+#define GMAC_AN_FD			BIT(5)
+#define GMAC_AN_HD			BIT(6)
+#define GMAC_AN_PSE_MASK		GENMASK(8, 7)
+#define GMAC_AN_PSE_SHIFT		7
+
+/* MAC PMT bitmap */
+enum power_event {
+	pointer_reset =	0x80000000,
+	global_unicast = 0x00000200,
+	wake_up_rx_frame = 0x00000040,
+	magic_frame = 0x00000020,
+	wake_up_frame_en = 0x00000004,
+	magic_pkt_en = 0x00000002,
+	power_down = 0x00000001,
+};
+
+/* MAC Debug bitmap */
+#define GMAC_DEBUG_TFCSTS_MASK		GENMASK(18, 17)
+#define GMAC_DEBUG_TFCSTS_SHIFT		17
+#define GMAC_DEBUG_TFCSTS_IDLE		0
+#define GMAC_DEBUG_TFCSTS_WAIT		1
+#define GMAC_DEBUG_TFCSTS_GEN_PAUSE	2
+#define GMAC_DEBUG_TFCSTS_XFER		3
+#define GMAC_DEBUG_TPESTS		BIT(16)
+#define GMAC_DEBUG_RFCFCSTS_MASK	GENMASK(2, 1)
+#define GMAC_DEBUG_RFCFCSTS_SHIFT	1
+#define GMAC_DEBUG_RPESTS		BIT(0)
+
+/* MAC config */
+#define GMAC_CONFIG_IPC			BIT(27)
+#define GMAC_CONFIG_2K			BIT(22)
+#define GMAC_CONFIG_ACS			BIT(20)
+#define GMAC_CONFIG_BE			BIT(18)
+#define GMAC_CONFIG_JD			BIT(17)
+#define GMAC_CONFIG_JE			BIT(16)
+#define GMAC_CONFIG_PS			BIT(15)
+#define GMAC_CONFIG_FES			BIT(14)
+#define GMAC_CONFIG_DM			BIT(13)
+#define GMAC_CONFIG_DCRS		BIT(9)
+#define GMAC_CONFIG_TE			BIT(1)
+#define GMAC_CONFIG_RE			BIT(0)
+
+/* MAC HW features0 bitmap */
+#define GMAC_HW_FEAT_ADDMAC		BIT(18)
+#define GMAC_HW_FEAT_RXCOESEL		BIT(16)
+#define GMAC_HW_FEAT_TXCOSEL		BIT(14)
+#define GMAC_HW_FEAT_EEESEL		BIT(13)
+#define GMAC_HW_FEAT_TSSEL		BIT(12)
+#define GMAC_HW_FEAT_MMCSEL		BIT(8)
+#define GMAC_HW_FEAT_MGKSEL		BIT(7)
+#define GMAC_HW_FEAT_RWKSEL		BIT(6)
+#define GMAC_HW_FEAT_SMASEL		BIT(5)
+#define GMAC_HW_FEAT_VLHASH		BIT(4)
+#define GMAC_HW_FEAT_PCSSEL		BIT(3)
+#define GMAC_HW_FEAT_HDSEL		BIT(2)
+#define GMAC_HW_FEAT_GMIISEL		BIT(1)
+#define GMAC_HW_FEAT_MIISEL		BIT(0)
+
+/* MAC HW features1 bitmap */
+#define GMAC_HW_FEAT_AVSEL		BIT(20)
+#define GMAC_HW_TSOEN			BIT(18)
+
+/* MAC HW features2 bitmap */
+#define GMAC_HW_FEAT_TXCHCNT		GENMASK(21, 18)
+#define GMAC_HW_FEAT_RXCHCNT		GENMASK(15, 12)
+
+/* MAC HW ADDR regs */
+#define GMAC_HI_DCS			GENMASK(18, 16)
+#define GMAC_HI_DCS_SHIFT		16
+#define GMAC_HI_REG_AE			BIT(31)
+
+/*  MTL registers */
+#define MTL_INT_STATUS			0x00000c20
+#define MTL_INT_Q0			BIT(0)
+
+#define MTL_CHAN_BASE_ADDR		0x00000d00
+#define MTL_CHAN_BASE_OFFSET		0x40
+#define MTL_CHANX_BASE_ADDR(x)		(MTL_CHAN_BASE_ADDR + \
+					(x * MTL_CHAN_BASE_OFFSET))
+
+#define MTL_CHAN_TX_OP_MODE(x)		MTL_CHANX_BASE_ADDR(x)
+#define MTL_CHAN_TX_DEBUG(x)		(MTL_CHANX_BASE_ADDR(x) + 0x8)
+#define MTL_CHAN_INT_CTRL(x)		(MTL_CHANX_BASE_ADDR(x) + 0x2c)
+#define MTL_CHAN_RX_OP_MODE(x)		(MTL_CHANX_BASE_ADDR(x) + 0x30)
+#define MTL_CHAN_RX_DEBUG(x)		(MTL_CHANX_BASE_ADDR(x) + 0x38)
+
+#define MTL_OP_MODE_RSF			BIT(5)
+#define MTL_OP_MODE_TSF			BIT(1)
+
+#define MTL_OP_MODE_TTC_MASK		0x70
+#define MTL_OP_MODE_TTC_SHIFT		4
+
+#define MTL_OP_MODE_TTC_32		0
+#define MTL_OP_MODE_TTC_64		(1 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_96		(2 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_128		(3 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_192		(4 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_256		(5 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_384		(6 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_512		(7 << MTL_OP_MODE_TTC_SHIFT)
+
+#define MTL_OP_MODE_RTC_MASK		0x18
+#define MTL_OP_MODE_RTC_SHIFT		3
+
+#define MTL_OP_MODE_RTC_32		(1 << MTL_OP_MODE_RTC_SHIFT)
+#define MTL_OP_MODE_RTC_64		0
+#define MTL_OP_MODE_RTC_96		(2 << MTL_OP_MODE_RTC_SHIFT)
+#define MTL_OP_MODE_RTC_128		(3 << MTL_OP_MODE_RTC_SHIFT)
+
+/*  MTL debug */
+#define MTL_DEBUG_TXSTSFSTS		BIT(5)
+#define MTL_DEBUG_TXFSTS		BIT(4)
+#define MTL_DEBUG_TWCSTS		BIT(3)
+
+/* MTL debug: Tx FIFO Read Controller Status */
+#define MTL_DEBUG_TRCSTS_MASK		GENMASK(2, 1)
+#define MTL_DEBUG_TRCSTS_SHIFT		1
+#define MTL_DEBUG_TRCSTS_IDLE		0
+#define MTL_DEBUG_TRCSTS_READ		1
+#define MTL_DEBUG_TRCSTS_TXW		2
+#define MTL_DEBUG_TRCSTS_WRITE		3
+#define MTL_DEBUG_TXPAUSED		BIT(0)
+
+/* MAC debug: GMII or MII Transmit Protocol Engine Status */
+#define MTL_DEBUG_RXFSTS_MASK		GENMASK(5, 4)
+#define MTL_DEBUG_RXFSTS_SHIFT		4
+#define MTL_DEBUG_RXFSTS_EMPTY		0
+#define MTL_DEBUG_RXFSTS_BT		1
+#define MTL_DEBUG_RXFSTS_AT		2
+#define MTL_DEBUG_RXFSTS_FULL		3
+#define MTL_DEBUG_RRCSTS_MASK		GENMASK(2, 1)
+#define MTL_DEBUG_RRCSTS_SHIFT		1
+#define MTL_DEBUG_RRCSTS_IDLE		0
+#define MTL_DEBUG_RRCSTS_RDATA		1
+#define MTL_DEBUG_RRCSTS_RSTAT		2
+#define MTL_DEBUG_RRCSTS_FLUSH		3
+#define MTL_DEBUG_RWCSTS		BIT(0)
+
+/*  MTL interrupt */
+#define MTL_RX_OVERFLOW_INT_EN		BIT(24)
+#define MTL_RX_OVERFLOW_INT		BIT(16)
+
+/* Default operating mode of the MAC */
+#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | GMAC_CONFIG_ACS | \
+			GMAC_CONFIG_BE | GMAC_CONFIG_DCRS)
+
+/* To dump the core regs excluding  the Address Registers */
+#define	GMAC_REG_NUM	132
+
+/*  MTL debug */
+#define MTL_DEBUG_TXSTSFSTS		BIT(5)
+#define MTL_DEBUG_TXFSTS		BIT(4)
+#define MTL_DEBUG_TWCSTS		BIT(3)
+
+/* MTL debug: Tx FIFO Read Controller Status */
+#define MTL_DEBUG_TRCSTS_MASK		GENMASK(2, 1)
+#define MTL_DEBUG_TRCSTS_SHIFT		1
+#define MTL_DEBUG_TRCSTS_IDLE		0
+#define MTL_DEBUG_TRCSTS_READ		1
+#define MTL_DEBUG_TRCSTS_TXW		2
+#define MTL_DEBUG_TRCSTS_WRITE		3
+#define MTL_DEBUG_TXPAUSED		BIT(0)
+
+/* MAC debug: GMII or MII Transmit Protocol Engine Status */
+#define MTL_DEBUG_RXFSTS_MASK		GENMASK(5, 4)
+#define MTL_DEBUG_RXFSTS_SHIFT		4
+#define MTL_DEBUG_RXFSTS_EMPTY		0
+#define MTL_DEBUG_RXFSTS_BT		1
+#define MTL_DEBUG_RXFSTS_AT		2
+#define MTL_DEBUG_RXFSTS_FULL		3
+#define MTL_DEBUG_RRCSTS_MASK		GENMASK(2, 1)
+#define MTL_DEBUG_RRCSTS_SHIFT		1
+#define MTL_DEBUG_RRCSTS_IDLE		0
+#define MTL_DEBUG_RRCSTS_RDATA		1
+#define MTL_DEBUG_RRCSTS_RSTAT		2
+#define MTL_DEBUG_RRCSTS_FLUSH		3
+#define MTL_DEBUG_RWCSTS		BIT(0)
+
+extern const struct stmmac_dma_ops dwmac4_dma_ops;
+extern const struct stmmac_dma_ops dwmac410_dma_ops;
+#endif /* __DWMAC4_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
new file mode 100644
index 0000000..4f7283d
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -0,0 +1,407 @@
+/*
+ * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
+ * DWC Ether MAC version 4.00  has been used for developing this code.
+ *
+ * This only implements the mac core functions for this chip.
+ *
+ * Copyright (C) 2015  STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#include <linux/crc32.h>
+#include <linux/slab.h>
+#include <linux/ethtool.h>
+#include <linux/io.h>
+#include "dwmac4.h"
+
+static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
+{
+	void __iomem *ioaddr = hw->pcsr;
+	u32 value = readl(ioaddr + GMAC_CONFIG);
+
+	value |= GMAC_CORE_INIT;
+
+	if (mtu > 1500)
+		value |= GMAC_CONFIG_2K;
+	if (mtu > 2000)
+		value |= GMAC_CONFIG_JE;
+
+	writel(value, ioaddr + GMAC_CONFIG);
+
+	/* Mask GMAC interrupts */
+	writel(GMAC_INT_PMT_EN, ioaddr + GMAC_INT_EN);
+}
+
+static void dwmac4_dump_regs(struct mac_device_info *hw)
+{
+	void __iomem *ioaddr = hw->pcsr;
+	int i;
+
+	pr_debug("\tDWMAC4 regs (base addr = 0x%p)\n", ioaddr);
+
+	for (i = 0; i < GMAC_REG_NUM; i++) {
+		int offset = i * 4;
+
+		pr_debug("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
+			 offset, readl(ioaddr + offset));
+	}
+}
+
+static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
+{
+	void __iomem *ioaddr = hw->pcsr;
+	u32 value = readl(ioaddr + GMAC_CONFIG);
+
+	if (hw->rx_csum)
+		value |= GMAC_CONFIG_IPC;
+	else
+		value &= ~GMAC_CONFIG_IPC;
+
+	writel(value, ioaddr + GMAC_CONFIG);
+
+	value = readl(ioaddr + GMAC_CONFIG);
+
+	return !!(value & GMAC_CONFIG_IPC);
+}
+
+static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
+{
+	void __iomem *ioaddr = hw->pcsr;
+	unsigned int pmt = 0;
+
+	if (mode & WAKE_MAGIC) {
+		pr_debug("GMAC: WOL Magic frame\n");
+		pmt |= power_down | magic_pkt_en;
+	}
+	if (mode & WAKE_UCAST) {
+		pr_debug("GMAC: WOL on global unicast\n");
+		pmt |= global_unicast;
+	}
+
+	writel(pmt, ioaddr + GMAC_PMT);
+}
+
+static void dwmac4_set_umac_addr(struct mac_device_info *hw,
+				 unsigned char *addr, unsigned int reg_n)
+{
+	void __iomem *ioaddr = hw->pcsr;
+
+	stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+				   GMAC_ADDR_LOW(reg_n));
+}
+
+static void dwmac4_get_umac_addr(struct mac_device_info *hw,
+				 unsigned char *addr, unsigned int reg_n)
+{
+	void __iomem *ioaddr = hw->pcsr;
+
+	stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+				   GMAC_ADDR_LOW(reg_n));
+}
+
+static void dwmac4_set_filter(struct mac_device_info *hw,
+			      struct net_device *dev)
+{
+	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+	unsigned int value = 0;
+
+	if (dev->flags & IFF_PROMISC) {
+		value = GMAC_PACKET_FILTER_PR;
+	} else if ((dev->flags & IFF_ALLMULTI) ||
+			(netdev_mc_count(dev) > HASH_TABLE_SIZE)) {
+		/* Pass all multi */
+		value = GMAC_PACKET_FILTER_PM;
+		/* Set the 64 bits of the HASH tab. To be updated if taller
+		 * hash table is used
+		 */
+		writel(0xffffffff, ioaddr + GMAC_HASH_TAB_0_31);
+		writel(0xffffffff, ioaddr + GMAC_HASH_TAB_32_63);
+	} else if (!netdev_mc_empty(dev)) {
+		u32 mc_filter[2];
+		struct netdev_hw_addr *ha;
+
+		/* Hash filter for multicast */
+		value = GMAC_PACKET_FILTER_HMC;
+
+		memset(mc_filter, 0, sizeof(mc_filter));
+		netdev_for_each_mc_addr(ha, dev) {
+			/* The upper 6 bits of the calculated CRC are used to
+			 * index the content of the Hash Table Reg 0 and 1.
+			 */
+			int bit_nr =
+				(bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26);
+			/* The most significant bit determines the register
+			 * to use while the other 5 bits determines the bit
+			 * within the selected register
+			 */
+			mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1F));
+		}
+		writel(mc_filter[0], ioaddr + GMAC_HASH_TAB_0_31);
+		writel(mc_filter[1], ioaddr + GMAC_HASH_TAB_32_63);
+	}
+
+	/* Handle multiple unicast addresses */
+	if (netdev_uc_count(dev) > GMAC_MAX_PERFECT_ADDRESSES) {
+		/* Switch to promiscuous mode if more than 128 addrs
+		 * are required
+		 */
+		value |= GMAC_PACKET_FILTER_PR;
+	} else if (!netdev_uc_empty(dev)) {
+		int reg = 1;
+		struct netdev_hw_addr *ha;
+
+		netdev_for_each_uc_addr(ha, dev) {
+			dwmac4_set_umac_addr(ioaddr, ha->addr, reg);
+			reg++;
+		}
+	}
+
+	writel(value, ioaddr + GMAC_PACKET_FILTER);
+}
+
+static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
+			     unsigned int fc, unsigned int pause_time)
+{
+	void __iomem *ioaddr = hw->pcsr;
+	u32 channel = STMMAC_CHAN0;	/* FIXME */
+	unsigned int flow = 0;
+
+	pr_debug("GMAC Flow-Control:\n");
+	if (fc & FLOW_RX) {
+		pr_debug("\tReceive Flow-Control ON\n");
+		flow |= GMAC_RX_FLOW_CTRL_RFE;
+		writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
+	}
+	if (fc & FLOW_TX) {
+		pr_debug("\tTransmit Flow-Control ON\n");
+		flow |= GMAC_TX_FLOW_CTRL_TFE;
+		writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
+
+		if (duplex) {
+			pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
+			flow |= (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
+			writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
+		}
+	}
+}
+
+static void dwmac4_ctrl_ane(struct mac_device_info *hw, bool restart)
+{
+	void __iomem *ioaddr = hw->pcsr;
+
+	/* auto negotiation enable and External Loopback enable */
+	u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
+
+	if (restart)
+		value |= GMAC_AN_CTRL_RAN;
+
+	writel(value, ioaddr + GMAC_AN_CTRL);
+}
+
+static void dwmac4_get_adv(struct mac_device_info *hw, struct rgmii_adv *adv)
+{
+	void __iomem *ioaddr = hw->pcsr;
+	u32 value = readl(ioaddr + GMAC_AN_ADV);
+
+	if (value & GMAC_AN_FD)
+		adv->duplex = DUPLEX_FULL;
+	if (value & GMAC_AN_HD)
+		adv->duplex |= DUPLEX_HALF;
+
+	adv->pause = (value & GMAC_AN_PSE_MASK) >> GMAC_AN_PSE_SHIFT;
+
+	value = readl(ioaddr + GMAC_AN_LPA);
+
+	if (value & GMAC_AN_FD)
+		adv->lp_duplex = DUPLEX_FULL;
+	if (value & GMAC_AN_HD)
+		adv->lp_duplex = DUPLEX_HALF;
+
+	adv->lp_pause = (value & GMAC_AN_PSE_MASK) >> GMAC_AN_PSE_SHIFT;
+}
+
+static int dwmac4_irq_status(struct mac_device_info *hw,
+			     struct stmmac_extra_stats *x)
+{
+	void __iomem *ioaddr = hw->pcsr;
+	u32 mtl_int_qx_status;
+	u32 intr_status;
+	int ret = 0;
+
+	intr_status = readl(ioaddr + GMAC_INT_STATUS);
+
+	/* Not used events (e.g. MMC interrupts) are not handled. */
+	if ((intr_status & mmc_tx_irq))
+		x->mmc_tx_irq_n++;
+	if (unlikely(intr_status & mmc_rx_irq))
+		x->mmc_rx_irq_n++;
+	if (unlikely(intr_status & mmc_rx_csum_offload_irq))
+		x->mmc_rx_csum_offload_irq_n++;
+	/* Clear the PMT bits 5 and 6 by reading the PMT status reg */
+	if (unlikely(intr_status & pmt_irq)) {
+		readl(ioaddr + GMAC_PMT);
+		x->irq_receive_pmt_irq_n++;
+	}
+
+	if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) {
+		readl(ioaddr + GMAC_AN_STATUS);
+		x->irq_pcs_ane_n++;
+	}
+
+	mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
+	/* Check MTL Interrupt: Currently only one queue is used: Q0. */
+	if (mtl_int_qx_status & MTL_INT_Q0) {
+		/* read Queue 0 Interrupt status */
+		u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
+
+		if (status & MTL_RX_OVERFLOW_INT) {
+			/*  clear Interrupt */
+			writel(status | MTL_RX_OVERFLOW_INT,
+			       ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
+			ret = CORE_IRQ_MTL_RX_OVERFLOW;
+		}
+	}
+
+	return ret;
+}
+
+static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
+{
+	u32 value;
+
+	/*  Currently only channel 0 is supported */
+	value = readl(ioaddr + MTL_CHAN_TX_DEBUG(STMMAC_CHAN0));
+
+	if (value & MTL_DEBUG_TXSTSFSTS)
+		x->mtl_tx_status_fifo_full++;
+	if (value & MTL_DEBUG_TXFSTS)
+		x->mtl_tx_fifo_not_empty++;
+	if (value & MTL_DEBUG_TWCSTS)
+		x->mmtl_fifo_ctrl++;
+	if (value & MTL_DEBUG_TRCSTS_MASK) {
+		u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
+			     >> MTL_DEBUG_TRCSTS_SHIFT;
+		if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
+			x->mtl_tx_fifo_read_ctrl_write++;
+		else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
+			x->mtl_tx_fifo_read_ctrl_wait++;
+		else if (trcsts == MTL_DEBUG_TRCSTS_READ)
+			x->mtl_tx_fifo_read_ctrl_read++;
+		else
+			x->mtl_tx_fifo_read_ctrl_idle++;
+	}
+	if (value & MTL_DEBUG_TXPAUSED)
+		x->mac_tx_in_pause++;
+
+	value = readl(ioaddr + MTL_CHAN_RX_DEBUG(STMMAC_CHAN0));
+
+	if (value & MTL_DEBUG_RXFSTS_MASK) {
+		u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
+			     >> MTL_DEBUG_RRCSTS_SHIFT;
+
+		if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
+			x->mtl_rx_fifo_fill_level_full++;
+		else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
+			x->mtl_rx_fifo_fill_above_thresh++;
+		else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
+			x->mtl_rx_fifo_fill_below_thresh++;
+		else
+			x->mtl_rx_fifo_fill_level_empty++;
+	}
+	if (value & MTL_DEBUG_RRCSTS_MASK) {
+		u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
+			     MTL_DEBUG_RRCSTS_SHIFT;
+
+		if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
+			x->mtl_rx_fifo_read_ctrl_flush++;
+		else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
+			x->mtl_rx_fifo_read_ctrl_read_data++;
+		else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
+			x->mtl_rx_fifo_read_ctrl_status++;
+		else
+			x->mtl_rx_fifo_read_ctrl_idle++;
+	}
+	if (value & MTL_DEBUG_RWCSTS)
+		x->mtl_rx_fifo_ctrl_active++;
+
+	/* GMAC debug */
+	value = readl(ioaddr + GMAC_DEBUG);
+
+	if (value & GMAC_DEBUG_TFCSTS_MASK) {
+		u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
+			      >> GMAC_DEBUG_TFCSTS_SHIFT;
+
+		if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
+			x->mac_tx_frame_ctrl_xfer++;
+		else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
+			x->mac_tx_frame_ctrl_pause++;
+		else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
+			x->mac_tx_frame_ctrl_wait++;
+		else
+			x->mac_tx_frame_ctrl_idle++;
+	}
+	if (value & GMAC_DEBUG_TPESTS)
+		x->mac_gmii_tx_proto_engine++;
+	if (value & GMAC_DEBUG_RFCFCSTS_MASK)
+		x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
+					    >> GMAC_DEBUG_RFCFCSTS_SHIFT;
+	if (value & GMAC_DEBUG_RPESTS)
+		x->mac_gmii_rx_proto_engine++;
+}
+
+static const struct stmmac_ops dwmac4_ops = {
+	.core_init = dwmac4_core_init,
+	.rx_ipc = dwmac4_rx_ipc_enable,
+	.dump_regs = dwmac4_dump_regs,
+	.host_irq_status = dwmac4_irq_status,
+	.flow_ctrl = dwmac4_flow_ctrl,
+	.pmt = dwmac4_pmt,
+	.set_umac_addr = dwmac4_set_umac_addr,
+	.get_umac_addr = dwmac4_get_umac_addr,
+	.ctrl_ane = dwmac4_ctrl_ane,
+	.get_adv = dwmac4_get_adv,
+	.debug = dwmac4_debug,
+	.set_filter = dwmac4_set_filter,
+};
+
+struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
+				     int perfect_uc_entries, int *synopsys_id)
+{
+	struct mac_device_info *mac;
+	u32 hwid = readl(ioaddr + GMAC_VERSION);
+
+	mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+	if (!mac)
+		return NULL;
+
+	mac->pcsr = ioaddr;
+	mac->multicast_filter_bins = mcbins;
+	mac->unicast_filter_entries = perfect_uc_entries;
+	mac->mcast_bits_log2 = 0;
+
+	if (mac->multicast_filter_bins)
+		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+
+	mac->mac = &dwmac4_ops;
+
+	mac->link.port = GMAC_CONFIG_PS;
+	mac->link.duplex = GMAC_CONFIG_DM;
+	mac->link.speed = GMAC_CONFIG_FES;
+	mac->mii.addr = GMAC_MDIO_ADDR;
+	mac->mii.data = GMAC_MDIO_DATA;
+
+	/* Get and dump the chip ID */
+	*synopsys_id = stmmac_get_synopsys_id(hwid);
+
+	if (*synopsys_id > DWMAC_CORE_4_00)
+		mac->dma = &dwmac410_dma_ops;
+	else
+		mac->dma = &dwmac4_dma_ops;
+
+	return mac;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
new file mode 100644
index 0000000..4ec7397
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -0,0 +1,389 @@
+/*
+ * This contains the functions to handle the descriptors for DesignWare databook
+ * 4.xx.
+ *
+ * Copyright (C) 2015  STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#include <linux/stmmac.h>
+#include "common.h"
+#include "dwmac4_descs.h"
+
+static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x,
+				       struct dma_desc *p,
+				       void __iomem *ioaddr)
+{
+	struct net_device_stats *stats = (struct net_device_stats *)data;
+	unsigned int tdes3;
+	int ret = tx_done;
+
+	tdes3 = p->des3;
+
+	/* Get tx owner first */
+	if (unlikely(tdes3 & TDES3_OWN))
+		return tx_dma_own;
+
+	/* Verify tx error by looking at the last segment. */
+	if (likely(!(tdes3 & TDES3_LAST_DESCRIPTOR)))
+		return tx_not_ls;
+
+	if (unlikely(tdes3 & TDES3_ERROR_SUMMARY)) {
+		if (unlikely(tdes3 & TDES3_JABBER_TIMEOUT))
+			x->tx_jabber++;
+		if (unlikely(tdes3 & TDES3_PACKET_FLUSHED))
+			x->tx_frame_flushed++;
+		if (unlikely(tdes3 & TDES3_LOSS_CARRIER)) {
+			x->tx_losscarrier++;
+			stats->tx_carrier_errors++;
+		}
+		if (unlikely(tdes3 & TDES3_NO_CARRIER)) {
+			x->tx_carrier++;
+			stats->tx_carrier_errors++;
+		}
+		if (unlikely((tdes3 & TDES3_LATE_COLLISION) ||
+			     (tdes3 & TDES3_EXCESSIVE_COLLISION)))
+			stats->collisions +=
+			    (tdes3 & TDES3_COLLISION_COUNT_MASK)
+			    >> TDES3_COLLISION_COUNT_SHIFT;
+
+		if (unlikely(tdes3 & TDES3_EXCESSIVE_DEFERRAL))
+			x->tx_deferred++;
+
+		if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR))
+			x->tx_underflow++;
+
+		if (unlikely(tdes3 & TDES3_IP_HDR_ERROR))
+			x->tx_ip_header_error++;
+
+		if (unlikely(tdes3 & TDES3_PAYLOAD_ERROR))
+			x->tx_payload_error++;
+
+		ret = tx_err;
+	}
+
+	if (unlikely(tdes3 & TDES3_DEFERRED))
+		x->tx_deferred++;
+
+	return ret;
+}
+
+static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
+				       struct dma_desc *p)
+{
+	struct net_device_stats *stats = (struct net_device_stats *)data;
+	unsigned int rdes1 = p->des1;
+	unsigned int rdes2 = p->des2;
+	unsigned int rdes3 = p->des3;
+	int message_type;
+	int ret = good_frame;
+
+	if (unlikely(rdes3 & RDES3_OWN))
+		return dma_own;
+
+	/* Verify rx error by looking at the last segment. */
+	if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR)))
+		return discard_frame;
+
+	if (unlikely(rdes3 & RDES3_ERROR_SUMMARY)) {
+		if (unlikely(rdes3 & RDES3_GIANT_PACKET))
+			stats->rx_length_errors++;
+		if (unlikely(rdes3 & RDES3_OVERFLOW_ERROR))
+			x->rx_gmac_overflow++;
+
+		if (unlikely(rdes3 & RDES3_RECEIVE_WATCHDOG))
+			x->rx_watchdog++;
+
+		if (unlikely(rdes3 & RDES3_RECEIVE_ERROR))
+			x->rx_mii++;
+
+		if (unlikely(rdes3 & RDES3_CRC_ERROR)) {
+			x->rx_crc++;
+			stats->rx_crc_errors++;
+		}
+
+		if (unlikely(rdes3 & RDES3_DRIBBLE_ERROR))
+			x->dribbling_bit++;
+
+		ret = discard_frame;
+	}
+
+	message_type = (rdes1 & ERDES4_MSG_TYPE_MASK) >> 8;
+
+	if (rdes1 & RDES1_IP_HDR_ERROR)
+		x->ip_hdr_err++;
+	if (rdes1 & RDES1_IP_CSUM_BYPASSED)
+		x->ip_csum_bypassed++;
+	if (rdes1 & RDES1_IPV4_HEADER)
+		x->ipv4_pkt_rcvd++;
+	if (rdes1 & RDES1_IPV6_HEADER)
+		x->ipv6_pkt_rcvd++;
+	if (message_type == RDES_EXT_SYNC)
+		x->rx_msg_type_sync++;
+	else if (message_type == RDES_EXT_FOLLOW_UP)
+		x->rx_msg_type_follow_up++;
+	else if (message_type == RDES_EXT_DELAY_REQ)
+		x->rx_msg_type_delay_req++;
+	else if (message_type == RDES_EXT_DELAY_RESP)
+		x->rx_msg_type_delay_resp++;
+	else if (message_type == RDES_EXT_PDELAY_REQ)
+		x->rx_msg_type_pdelay_req++;
+	else if (message_type == RDES_EXT_PDELAY_RESP)
+		x->rx_msg_type_pdelay_resp++;
+	else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
+		x->rx_msg_type_pdelay_follow_up++;
+	else
+		x->rx_msg_type_ext_no_ptp++;
+
+	if (rdes1 & RDES1_PTP_PACKET_TYPE)
+		x->ptp_frame_type++;
+	if (rdes1 & RDES1_PTP_VER)
+		x->ptp_ver++;
+	if (rdes1 & RDES1_TIMESTAMP_DROPPED)
+		x->timestamp_dropped++;
+
+	if (unlikely(rdes2 & RDES2_SA_FILTER_FAIL)) {
+		x->sa_rx_filter_fail++;
+		ret = discard_frame;
+	}
+	if (unlikely(rdes2 & RDES2_DA_FILTER_FAIL)) {
+		x->da_rx_filter_fail++;
+		ret = discard_frame;
+	}
+
+	if (rdes2 & RDES2_L3_FILTER_MATCH)
+		x->l3_filter_match++;
+	if (rdes2 & RDES2_L4_FILTER_MATCH)
+		x->l4_filter_match++;
+	if ((rdes2 & RDES2_L3_L4_FILT_NB_MATCH_MASK)
+	    >> RDES2_L3_L4_FILT_NB_MATCH_SHIFT)
+		x->l3_l4_filter_no_match++;
+
+	return ret;
+}
+
+static int dwmac4_rd_get_tx_len(struct dma_desc *p)
+{
+	return (p->des2 & TDES2_BUFFER1_SIZE_MASK);
+}
+
+static int dwmac4_get_tx_owner(struct dma_desc *p)
+{
+	return (p->des3 & TDES3_OWN) >> TDES3_OWN_SHIFT;
+}
+
+static void dwmac4_set_tx_owner(struct dma_desc *p)
+{
+	p->des3 |= TDES3_OWN;
+}
+
+static void dwmac4_set_rx_owner(struct dma_desc *p)
+{
+	p->des3 |= RDES3_OWN;
+}
+
+static int dwmac4_get_tx_ls(struct dma_desc *p)
+{
+	return (p->des3 & TDES3_LAST_DESCRIPTOR) >> TDES3_LAST_DESCRIPTOR_SHIFT;
+}
+
+static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe)
+{
+	return (p->des3 & RDES3_PACKET_SIZE_MASK);
+}
+
+static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p)
+{
+	p->des2 |= TDES2_TIMESTAMP_ENABLE;
+}
+
+static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
+{
+	return (p->des3 & TDES3_TIMESTAMP_STATUS)
+		>> TDES3_TIMESTAMP_STATUS_SHIFT;
+}
+
+/*  NOTE: For RX CTX bit has to be checked before
+ *  HAVE a specific function for TX and another one for RX
+ */
+static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats)
+{
+	struct dma_desc *p = (struct dma_desc *)desc;
+	u64 ns;
+
+	ns = p->des0;
+	/* convert high/sec time stamp value to nanosecond */
+	ns += p->des1 * 1000000000ULL;
+
+	return ns;
+}
+
+static int dwmac4_context_get_rx_timestamp_status(void *desc, u32 ats)
+{
+	struct dma_desc *p = (struct dma_desc *)desc;
+
+	return (p->des1 & RDES1_TIMESTAMP_AVAILABLE)
+		>> RDES1_TIMESTAMP_AVAILABLE_SHIFT;
+}
+
+static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
+				   int mode, int end)
+{
+	p->des3 = RDES3_OWN | RDES3_BUFFER1_VALID_ADDR;
+
+	if (!disable_rx_ic)
+		p->des3 |= RDES3_INT_ON_COMPLETION_EN;
+}
+
+static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
+{
+	p->des0 = 0;
+	p->des1 = 0;
+	p->des2 = 0;
+	p->des3 = 0;
+}
+
+static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+				      bool csum_flag, int mode, bool tx_own,
+				      bool ls)
+{
+	unsigned int tdes3 = p->des3;
+
+	p->des2 |= (len & TDES2_BUFFER1_SIZE_MASK);
+
+	if (is_fs)
+		tdes3 |= TDES3_FIRST_DESCRIPTOR;
+	else
+		tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
+
+	if (likely(csum_flag))
+		tdes3 |= (TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
+	else
+		tdes3 &= ~(TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
+
+	if (ls)
+		tdes3 |= TDES3_LAST_DESCRIPTOR;
+	else
+		tdes3 &= ~TDES3_LAST_DESCRIPTOR;
+
+	/* Finally set the OWN bit. Later the DMA will start! */
+	if (tx_own)
+		tdes3 |= TDES3_OWN;
+
+	if (is_fs & tx_own)
+		/* When the own bit, for the first frame, has to be set, all
+		 * descriptors for the same frame has to be set before, to
+		 * avoid race condition.
+		 */
+		wmb();
+
+	p->des3 = tdes3;
+}
+
+static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
+					  int len1, int len2, bool tx_own,
+					  bool ls, unsigned int tcphdrlen,
+					  unsigned int tcppayloadlen)
+{
+	unsigned int tdes3 = p->des3;
+
+	if (len1)
+		p->des2 |= (len1 & TDES2_BUFFER1_SIZE_MASK);
+
+	if (len2)
+		p->des2 |= (len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT)
+			    & TDES2_BUFFER2_SIZE_MASK;
+
+	if (is_fs) {
+		tdes3 |= TDES3_FIRST_DESCRIPTOR |
+			 TDES3_TCP_SEGMENTATION_ENABLE |
+			 ((tcphdrlen << TDES3_HDR_LEN_SHIFT) &
+			  TDES3_SLOT_NUMBER_MASK) |
+			 ((tcppayloadlen & TDES3_TCP_PKT_PAYLOAD_MASK));
+	} else {
+		tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
+	}
+
+	if (ls)
+		tdes3 |= TDES3_LAST_DESCRIPTOR;
+	else
+		tdes3 &= ~TDES3_LAST_DESCRIPTOR;
+
+	/* Finally set the OWN bit. Later the DMA will start! */
+	if (tx_own)
+		tdes3 |= TDES3_OWN;
+
+	if (is_fs & tx_own)
+		/* When the own bit, for the first frame, has to be set, all
+		 * descriptors for the same frame has to be set before, to
+		 * avoid race condition.
+		 */
+		wmb();
+
+	p->des3 = tdes3;
+}
+
+static void dwmac4_release_tx_desc(struct dma_desc *p, int mode)
+{
+	p->des2 = 0;
+	p->des3 = 0;
+}
+
+static void dwmac4_rd_set_tx_ic(struct dma_desc *p)
+{
+	p->des2 |= TDES2_INTERRUPT_ON_COMPLETION;
+}
+
+static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
+{
+	struct dma_desc *p = (struct dma_desc *)head;
+	int i;
+
+	pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
+
+	for (i = 0; i < size; i++) {
+		if (p->des0)
+			pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+				i, (unsigned int)virt_to_phys(p),
+				p->des0, p->des1, p->des2, p->des3);
+		p++;
+	}
+}
+
+static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss)
+{
+	p->des0 = 0;
+	p->des1 = 0;
+	p->des2 = mss;
+	p->des3 = TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV;
+}
+
+const struct stmmac_desc_ops dwmac4_desc_ops = {
+	.tx_status = dwmac4_wrback_get_tx_status,
+	.rx_status = dwmac4_wrback_get_rx_status,
+	.get_tx_len = dwmac4_rd_get_tx_len,
+	.get_tx_owner = dwmac4_get_tx_owner,
+	.set_tx_owner = dwmac4_set_tx_owner,
+	.set_rx_owner = dwmac4_set_rx_owner,
+	.get_tx_ls = dwmac4_get_tx_ls,
+	.get_rx_frame_len = dwmac4_wrback_get_rx_frame_len,
+	.enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp,
+	.get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status,
+	.get_timestamp = dwmac4_wrback_get_timestamp,
+	.get_rx_timestamp_status = dwmac4_context_get_rx_timestamp_status,
+	.set_tx_ic = dwmac4_rd_set_tx_ic,
+	.prepare_tx_desc = dwmac4_rd_prepare_tx_desc,
+	.prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc,
+	.release_tx_desc = dwmac4_release_tx_desc,
+	.init_rx_desc = dwmac4_rd_init_rx_desc,
+	.init_tx_desc = dwmac4_rd_init_tx_desc,
+	.display_ring = dwmac4_display_ring,
+	.set_mss = dwmac4_set_mss_ctxt,
+};
+
+const struct stmmac_mode_ops dwmac4_ring_mode_ops = { };
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
new file mode 100644
index 0000000..0902a2e
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
@@ -0,0 +1,129 @@
+/*
+ * Header File to describe the DMA descriptors and related definitions specific
+ * for DesignWare databook 4.xx.
+ *
+ * Copyright (C) 2015  STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#ifndef __DWMAC4_DESCS_H__
+#define __DWMAC4_DESCS_H__
+
+#include <linux/bitops.h>
+
+/* Normal transmit descriptor defines (without split feature) */
+
+/* TDES2 (read format) */
+#define TDES2_BUFFER1_SIZE_MASK		GENMASK(13, 0)
+#define TDES2_VLAN_TAG_MASK		GENMASK(15, 14)
+#define TDES2_BUFFER2_SIZE_MASK		GENMASK(29, 16)
+#define TDES2_BUFFER2_SIZE_MASK_SHIFT	16
+#define TDES2_TIMESTAMP_ENABLE		BIT(30)
+#define TDES2_INTERRUPT_ON_COMPLETION	BIT(31)
+
+/* TDES3 (read format) */
+#define TDES3_PACKET_SIZE_MASK		GENMASK(14, 0)
+#define TDES3_CHECKSUM_INSERTION_MASK	GENMASK(17, 16)
+#define TDES3_CHECKSUM_INSERTION_SHIFT	16
+#define TDES3_TCP_PKT_PAYLOAD_MASK	GENMASK(17, 0)
+#define TDES3_TCP_SEGMENTATION_ENABLE	BIT(18)
+#define TDES3_HDR_LEN_SHIFT		19
+#define TDES3_SLOT_NUMBER_MASK		GENMASK(22, 19)
+#define TDES3_SA_INSERT_CTRL_MASK	GENMASK(25, 23)
+#define TDES3_CRC_PAD_CTRL_MASK		GENMASK(27, 26)
+
+/* TDES3 (write back format) */
+#define TDES3_IP_HDR_ERROR		BIT(0)
+#define TDES3_DEFERRED			BIT(1)
+#define TDES3_UNDERFLOW_ERROR		BIT(2)
+#define TDES3_EXCESSIVE_DEFERRAL	BIT(3)
+#define TDES3_COLLISION_COUNT_MASK	GENMASK(7, 4)
+#define TDES3_COLLISION_COUNT_SHIFT	4
+#define TDES3_EXCESSIVE_COLLISION	BIT(8)
+#define TDES3_LATE_COLLISION		BIT(9)
+#define TDES3_NO_CARRIER		BIT(10)
+#define TDES3_LOSS_CARRIER		BIT(11)
+#define TDES3_PAYLOAD_ERROR		BIT(12)
+#define TDES3_PACKET_FLUSHED		BIT(13)
+#define TDES3_JABBER_TIMEOUT		BIT(14)
+#define TDES3_ERROR_SUMMARY		BIT(15)
+#define TDES3_TIMESTAMP_STATUS		BIT(17)
+#define TDES3_TIMESTAMP_STATUS_SHIFT	17
+
+/* TDES3 context */
+#define TDES3_CTXT_TCMSSV		BIT(26)
+
+/* TDES3 Common */
+#define TDES3_LAST_DESCRIPTOR		BIT(28)
+#define TDES3_LAST_DESCRIPTOR_SHIFT	28
+#define TDES3_FIRST_DESCRIPTOR		BIT(29)
+#define TDES3_CONTEXT_TYPE		BIT(30)
+
+/* TDS3 use for both format (read and write back) */
+#define TDES3_OWN			BIT(31)
+#define TDES3_OWN_SHIFT			31
+
+/* Normal receive descriptor defines (without split feature) */
+
+/* RDES0 (write back format) */
+#define RDES0_VLAN_TAG_MASK		GENMASK(15, 0)
+
+/* RDES1 (write back format) */
+#define RDES1_IP_PAYLOAD_TYPE_MASK	GENMASK(2, 0)
+#define RDES1_IP_HDR_ERROR		BIT(3)
+#define RDES1_IPV4_HEADER		BIT(4)
+#define RDES1_IPV6_HEADER		BIT(5)
+#define RDES1_IP_CSUM_BYPASSED		BIT(6)
+#define RDES1_IP_CSUM_ERROR		BIT(7)
+#define RDES1_PTP_MSG_TYPE_MASK		GENMASK(11, 8)
+#define RDES1_PTP_PACKET_TYPE		BIT(12)
+#define RDES1_PTP_VER			BIT(13)
+#define RDES1_TIMESTAMP_AVAILABLE	BIT(14)
+#define RDES1_TIMESTAMP_AVAILABLE_SHIFT	14
+#define RDES1_TIMESTAMP_DROPPED		BIT(15)
+#define RDES1_IP_TYPE1_CSUM_MASK	GENMASK(31, 16)
+
+/* RDES2 (write back format) */
+#define RDES2_L3_L4_HEADER_SIZE_MASK	GENMASK(9, 0)
+#define RDES2_VLAN_FILTER_STATUS	BIT(15)
+#define RDES2_SA_FILTER_FAIL		BIT(16)
+#define RDES2_DA_FILTER_FAIL		BIT(17)
+#define RDES2_HASH_FILTER_STATUS	BIT(18)
+#define RDES2_MAC_ADDR_MATCH_MASK	GENMASK(26, 19)
+#define RDES2_HASH_VALUE_MATCH_MASK	GENMASK(26, 19)
+#define RDES2_L3_FILTER_MATCH		BIT(27)
+#define RDES2_L4_FILTER_MATCH		BIT(28)
+#define RDES2_L3_L4_FILT_NB_MATCH_MASK	GENMASK(27, 26)
+#define RDES2_L3_L4_FILT_NB_MATCH_SHIFT	26
+
+/* RDES3 (write back format) */
+#define RDES3_PACKET_SIZE_MASK		GENMASK(14, 0)
+#define RDES3_ERROR_SUMMARY		BIT(15)
+#define RDES3_PACKET_LEN_TYPE_MASK	GENMASK(18, 16)
+#define RDES3_DRIBBLE_ERROR		BIT(19)
+#define RDES3_RECEIVE_ERROR		BIT(20)
+#define RDES3_OVERFLOW_ERROR		BIT(21)
+#define RDES3_RECEIVE_WATCHDOG		BIT(22)
+#define RDES3_GIANT_PACKET		BIT(23)
+#define RDES3_CRC_ERROR			BIT(24)
+#define RDES3_RDES0_VALID		BIT(25)
+#define RDES3_RDES1_VALID		BIT(26)
+#define RDES3_RDES2_VALID		BIT(27)
+#define RDES3_LAST_DESCRIPTOR		BIT(28)
+#define RDES3_FIRST_DESCRIPTOR		BIT(29)
+#define RDES3_CONTEXT_DESCRIPTOR	BIT(30)
+
+/* RDES3 (read format) */
+#define RDES3_BUFFER1_VALID_ADDR	BIT(24)
+#define RDES3_BUFFER2_VALID_ADDR	BIT(25)
+#define RDES3_INT_ON_COMPLETION_EN	BIT(30)
+
+/* TDS3 use for both format (read and write back) */
+#define RDES3_OWN			BIT(31)
+
+#endif /* __DWMAC4_DESCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
new file mode 100644
index 0000000..116151c
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -0,0 +1,354 @@
+/*
+ * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
+ * DWC Ether MAC version 4.xx  has been used for  developing this code.
+ *
+ * This contains the functions to handle the dma.
+ *
+ * Copyright (C) 2015  STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#include <linux/io.h>
+#include "dwmac4.h"
+#include "dwmac4_dma.h"
+
+static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
+{
+	u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
+	int i;
+
+	pr_info("dwmac4: Master AXI performs %s burst length\n",
+		(value & DMA_SYS_BUS_FB) ? "fixed" : "any");
+
+	if (axi->axi_lpi_en)
+		value |= DMA_AXI_EN_LPI;
+	if (axi->axi_xit_frm)
+		value |= DMA_AXI_LPI_XIT_FRM;
+
+	value |= (axi->axi_wr_osr_lmt & DMA_AXI_OSR_MAX) <<
+		 DMA_AXI_WR_OSR_LMT_SHIFT;
+
+	value |= (axi->axi_rd_osr_lmt & DMA_AXI_OSR_MAX) <<
+		 DMA_AXI_RD_OSR_LMT_SHIFT;
+
+	/* Depending on the UNDEF bit the Master AXI will perform any burst
+	 * length according to the BLEN programmed (by default all BLEN are
+	 * set).
+	 */
+	for (i = 0; i < AXI_BLEN; i++) {
+		switch (axi->axi_blen[i]) {
+		case 256:
+			value |= DMA_AXI_BLEN256;
+			break;
+		case 128:
+			value |= DMA_AXI_BLEN128;
+			break;
+		case 64:
+			value |= DMA_AXI_BLEN64;
+			break;
+		case 32:
+			value |= DMA_AXI_BLEN32;
+			break;
+		case 16:
+			value |= DMA_AXI_BLEN16;
+			break;
+		case 8:
+			value |= DMA_AXI_BLEN8;
+			break;
+		case 4:
+			value |= DMA_AXI_BLEN4;
+			break;
+		}
+	}
+
+	writel(value, ioaddr + DMA_SYS_BUS_MODE);
+}
+
+static void dwmac4_dma_init_channel(void __iomem *ioaddr, int pbl,
+				    u32 dma_tx_phy, u32 dma_rx_phy,
+				    u32 channel)
+{
+	u32 value;
+
+	/* set PBL for each channels. Currently we affect same configuration
+	 * on each channel
+	 */
+	value = readl(ioaddr + DMA_CHAN_CONTROL(channel));
+	value = value | DMA_BUS_MODE_PBL;
+	writel(value, ioaddr + DMA_CHAN_CONTROL(channel));
+
+	value = readl(ioaddr + DMA_CHAN_TX_CONTROL(channel));
+	value = value | (pbl << DMA_BUS_MODE_PBL_SHIFT);
+	writel(value, ioaddr + DMA_CHAN_TX_CONTROL(channel));
+
+	value = readl(ioaddr + DMA_CHAN_RX_CONTROL(channel));
+	value = value | (pbl << DMA_BUS_MODE_RPBL_SHIFT);
+	writel(value, ioaddr + DMA_CHAN_RX_CONTROL(channel));
+
+	/* Mask interrupts by writing to CSR7 */
+	writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(channel));
+
+	writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(channel));
+	writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(channel));
+}
+
+static void dwmac4_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
+			    int aal, u32 dma_tx, u32 dma_rx, int atds)
+{
+	u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
+	int i;
+
+	/* Set the Fixed burst mode */
+	if (fb)
+		value |= DMA_SYS_BUS_FB;
+
+	/* Mixed Burst has no effect when fb is set */
+	if (mb)
+		value |= DMA_SYS_BUS_MB;
+
+	if (aal)
+		value |= DMA_SYS_BUS_AAL;
+
+	writel(value, ioaddr + DMA_SYS_BUS_MODE);
+
+	for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
+		dwmac4_dma_init_channel(ioaddr, pbl, dma_tx, dma_rx, i);
+}
+
+static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel)
+{
+	pr_debug(" Channel %d\n", channel);
+	pr_debug("\tDMA_CHAN_CONTROL, offset: 0x%x, val: 0x%x\n", 0,
+		 readl(ioaddr + DMA_CHAN_CONTROL(channel)));
+	pr_debug("\tDMA_CHAN_TX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x4,
+		 readl(ioaddr + DMA_CHAN_TX_CONTROL(channel)));
+	pr_debug("\tDMA_CHAN_RX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x8,
+		 readl(ioaddr + DMA_CHAN_RX_CONTROL(channel)));
+	pr_debug("\tDMA_CHAN_TX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x14,
+		 readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel)));
+	pr_debug("\tDMA_CHAN_RX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x1c,
+		 readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel)));
+	pr_debug("\tDMA_CHAN_TX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x20,
+		 readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel)));
+	pr_debug("\tDMA_CHAN_RX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x28,
+		 readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel)));
+	pr_debug("\tDMA_CHAN_TX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x2c,
+		 readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel)));
+	pr_debug("\tDMA_CHAN_RX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x30,
+		 readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel)));
+	pr_debug("\tDMA_CHAN_INTR_ENA, offset: 0x%x, val: 0x%x\n", 0x34,
+		 readl(ioaddr + DMA_CHAN_INTR_ENA(channel)));
+	pr_debug("\tDMA_CHAN_RX_WATCHDOG, offset: 0x%x, val: 0x%x\n", 0x38,
+		 readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel)));
+	pr_debug("\tDMA_CHAN_SLOT_CTRL_STATUS, offset: 0x%x, val: 0x%x\n", 0x3c,
+		 readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel)));
+	pr_debug("\tDMA_CHAN_CUR_TX_DESC, offset: 0x%x, val: 0x%x\n", 0x44,
+		 readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel)));
+	pr_debug("\tDMA_CHAN_CUR_RX_DESC, offset: 0x%x, val: 0x%x\n", 0x4c,
+		 readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel)));
+	pr_debug("\tDMA_CHAN_CUR_TX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x54,
+		 readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel)));
+	pr_debug("\tDMA_CHAN_CUR_RX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x5c,
+		 readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel)));
+	pr_debug("\tDMA_CHAN_STATUS, offset: 0x%x, val: 0x%x\n", 0x60,
+		 readl(ioaddr + DMA_CHAN_STATUS(channel)));
+}
+
+static void dwmac4_dump_dma_regs(void __iomem *ioaddr)
+{
+	int i;
+
+	pr_debug(" GMAC4 DMA registers\n");
+
+	for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
+		_dwmac4_dump_dma_regs(ioaddr, i);
+}
+
+static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt)
+{
+	int i;
+
+	for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
+		writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(i));
+}
+
+static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
+				    int rxmode, u32 channel)
+{
+	u32 mtl_tx_op, mtl_rx_op, mtl_rx_int;
+
+	/* Following code only done for channel 0, other channels not yet
+	 * supported.
+	 */
+	mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+
+	if (txmode == SF_DMA_MODE) {
+		pr_debug("GMAC: enable TX store and forward mode\n");
+		/* Transmit COE type 2 cannot be done in cut-through mode. */
+		mtl_tx_op |= MTL_OP_MODE_TSF;
+	} else {
+		pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode);
+		mtl_tx_op &= ~MTL_OP_MODE_TSF;
+		mtl_tx_op &= MTL_OP_MODE_TTC_MASK;
+		/* Set the transmit threshold */
+		if (txmode <= 32)
+			mtl_tx_op |= MTL_OP_MODE_TTC_32;
+		else if (txmode <= 64)
+			mtl_tx_op |= MTL_OP_MODE_TTC_64;
+		else if (txmode <= 96)
+			mtl_tx_op |= MTL_OP_MODE_TTC_96;
+		else if (txmode <= 128)
+			mtl_tx_op |= MTL_OP_MODE_TTC_128;
+		else if (txmode <= 192)
+			mtl_tx_op |= MTL_OP_MODE_TTC_192;
+		else if (txmode <= 256)
+			mtl_tx_op |= MTL_OP_MODE_TTC_256;
+		else if (txmode <= 384)
+			mtl_tx_op |= MTL_OP_MODE_TTC_384;
+		else
+			mtl_tx_op |= MTL_OP_MODE_TTC_512;
+	}
+
+	writel(mtl_tx_op, ioaddr +  MTL_CHAN_TX_OP_MODE(channel));
+
+	mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
+
+	if (rxmode == SF_DMA_MODE) {
+		pr_debug("GMAC: enable RX store and forward mode\n");
+		mtl_rx_op |= MTL_OP_MODE_RSF;
+	} else {
+		pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode);
+		mtl_rx_op &= ~MTL_OP_MODE_RSF;
+		mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
+		if (rxmode <= 32)
+			mtl_rx_op |= MTL_OP_MODE_RTC_32;
+		else if (rxmode <= 64)
+			mtl_rx_op |= MTL_OP_MODE_RTC_64;
+		else if (rxmode <= 96)
+			mtl_rx_op |= MTL_OP_MODE_RTC_96;
+		else
+			mtl_rx_op |= MTL_OP_MODE_RTC_128;
+	}
+
+	writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
+
+	/* Enable MTL RX overflow */
+	mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
+	writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
+	       ioaddr + MTL_CHAN_INT_CTRL(channel));
+}
+
+static void dwmac4_dma_operation_mode(void __iomem *ioaddr, int txmode,
+				      int rxmode, int rxfifosz)
+{
+	/* Only Channel 0 is actually configured and used */
+	dwmac4_dma_chan_op_mode(ioaddr, txmode, rxmode, 0);
+}
+
+static void dwmac4_get_hw_feature(void __iomem *ioaddr,
+				  struct dma_features *dma_cap)
+{
+	u32 hw_cap = readl(ioaddr + GMAC_HW_FEATURE0);
+
+	/*  MAC HW feature0 */
+	dma_cap->mbps_10_100 = (hw_cap & GMAC_HW_FEAT_MIISEL);
+	dma_cap->mbps_1000 = (hw_cap & GMAC_HW_FEAT_GMIISEL) >> 1;
+	dma_cap->half_duplex = (hw_cap & GMAC_HW_FEAT_HDSEL) >> 2;
+	dma_cap->hash_filter = (hw_cap & GMAC_HW_FEAT_VLHASH) >> 4;
+	dma_cap->multi_addr = (hw_cap & GMAC_HW_FEAT_ADDMAC) >> 18;
+	dma_cap->pcs = (hw_cap & GMAC_HW_FEAT_PCSSEL) >> 3;
+	dma_cap->sma_mdio = (hw_cap & GMAC_HW_FEAT_SMASEL) >> 5;
+	dma_cap->pmt_remote_wake_up = (hw_cap & GMAC_HW_FEAT_RWKSEL) >> 6;
+	dma_cap->pmt_magic_frame = (hw_cap & GMAC_HW_FEAT_MGKSEL) >> 7;
+	/* MMC */
+	dma_cap->rmon = (hw_cap & GMAC_HW_FEAT_MMCSEL) >> 8;
+	/* IEEE 1588-2008 */
+	dma_cap->atime_stamp = (hw_cap & GMAC_HW_FEAT_TSSEL) >> 12;
+	/* 802.3az - Energy-Efficient Ethernet (EEE) */
+	dma_cap->eee = (hw_cap & GMAC_HW_FEAT_EEESEL) >> 13;
+	/* TX and RX csum */
+	dma_cap->tx_coe = (hw_cap & GMAC_HW_FEAT_TXCOSEL) >> 14;
+	dma_cap->rx_coe =  (hw_cap & GMAC_HW_FEAT_RXCOESEL) >> 16;
+
+	/* MAC HW feature1 */
+	hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
+	dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
+	dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
+	/* MAC HW feature2 */
+	hw_cap = readl(ioaddr + GMAC_HW_FEATURE2);
+	/* TX and RX number of channels */
+	dma_cap->number_rx_channel =
+		((hw_cap & GMAC_HW_FEAT_RXCHCNT) >> 12) + 1;
+	dma_cap->number_tx_channel =
+		((hw_cap & GMAC_HW_FEAT_TXCHCNT) >> 18) + 1;
+
+	/* IEEE 1588-2002 */
+	dma_cap->time_stamp = 0;
+}
+
+/* Enable/disable TSO feature and set MSS */
+static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
+{
+	u32 value;
+
+	if (en) {
+		/* enable TSO */
+		value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
+		writel(value | DMA_CONTROL_TSE,
+		       ioaddr + DMA_CHAN_TX_CONTROL(chan));
+	} else {
+		/* enable TSO */
+		value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
+		writel(value & ~DMA_CONTROL_TSE,
+		       ioaddr + DMA_CHAN_TX_CONTROL(chan));
+	}
+}
+
+const struct stmmac_dma_ops dwmac4_dma_ops = {
+	.reset = dwmac4_dma_reset,
+	.init = dwmac4_dma_init,
+	.axi = dwmac4_dma_axi,
+	.dump_regs = dwmac4_dump_dma_regs,
+	.dma_mode = dwmac4_dma_operation_mode,
+	.enable_dma_irq = dwmac4_enable_dma_irq,
+	.disable_dma_irq = dwmac4_disable_dma_irq,
+	.start_tx = dwmac4_dma_start_tx,
+	.stop_tx = dwmac4_dma_stop_tx,
+	.start_rx = dwmac4_dma_start_rx,
+	.stop_rx = dwmac4_dma_stop_rx,
+	.dma_interrupt = dwmac4_dma_interrupt,
+	.get_hw_feature = dwmac4_get_hw_feature,
+	.rx_watchdog = dwmac4_rx_watchdog,
+	.set_rx_ring_len = dwmac4_set_rx_ring_len,
+	.set_tx_ring_len = dwmac4_set_tx_ring_len,
+	.set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
+	.set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
+	.enable_tso = dwmac4_enable_tso,
+};
+
+const struct stmmac_dma_ops dwmac410_dma_ops = {
+	.reset = dwmac4_dma_reset,
+	.init = dwmac4_dma_init,
+	.axi = dwmac4_dma_axi,
+	.dump_regs = dwmac4_dump_dma_regs,
+	.dma_mode = dwmac4_dma_operation_mode,
+	.enable_dma_irq = dwmac410_enable_dma_irq,
+	.disable_dma_irq = dwmac4_disable_dma_irq,
+	.start_tx = dwmac4_dma_start_tx,
+	.stop_tx = dwmac4_dma_stop_tx,
+	.start_rx = dwmac4_dma_start_rx,
+	.stop_rx = dwmac4_dma_stop_rx,
+	.dma_interrupt = dwmac4_dma_interrupt,
+	.get_hw_feature = dwmac4_get_hw_feature,
+	.rx_watchdog = dwmac4_rx_watchdog,
+	.set_rx_ring_len = dwmac4_set_rx_ring_len,
+	.set_tx_ring_len = dwmac4_set_tx_ring_len,
+	.set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
+	.set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
+	.enable_tso = dwmac4_enable_tso,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
new file mode 100644
index 0000000..1b06df7
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -0,0 +1,202 @@
+/*
+ * DWMAC4 DMA Header file.
+ *
+ *
+ * Copyright (C) 2007-2015  STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#ifndef __DWMAC4_DMA_H__
+#define __DWMAC4_DMA_H__
+
+/* Define the max channel number used for tx (also rx).
+ * dwmac4 accepts up to 8 channels for TX (and also 8 channels for RX
+ */
+#define DMA_CHANNEL_NB_MAX		1
+
+#define DMA_BUS_MODE			0x00001000
+#define DMA_SYS_BUS_MODE		0x00001004
+#define DMA_STATUS			0x00001008
+#define DMA_DEBUG_STATUS_0		0x0000100c
+#define DMA_DEBUG_STATUS_1		0x00001010
+#define DMA_DEBUG_STATUS_2		0x00001014
+#define DMA_AXI_BUS_MODE		0x00001028
+
+/* DMA Bus Mode bitmap */
+#define DMA_BUS_MODE_SFT_RESET		BIT(0)
+
+/* DMA SYS Bus Mode bitmap */
+#define DMA_BUS_MODE_SPH		BIT(24)
+#define DMA_BUS_MODE_PBL		BIT(16)
+#define DMA_BUS_MODE_PBL_SHIFT		16
+#define DMA_BUS_MODE_RPBL_SHIFT		16
+#define DMA_BUS_MODE_MB			BIT(14)
+#define DMA_BUS_MODE_FB			BIT(0)
+
+/* DMA Interrupt top status */
+#define DMA_STATUS_MAC			BIT(17)
+#define DMA_STATUS_MTL			BIT(16)
+#define DMA_STATUS_CHAN7		BIT(7)
+#define DMA_STATUS_CHAN6		BIT(6)
+#define DMA_STATUS_CHAN5		BIT(5)
+#define DMA_STATUS_CHAN4		BIT(4)
+#define DMA_STATUS_CHAN3		BIT(3)
+#define DMA_STATUS_CHAN2		BIT(2)
+#define DMA_STATUS_CHAN1		BIT(1)
+#define DMA_STATUS_CHAN0		BIT(0)
+
+/* DMA debug status bitmap */
+#define DMA_DEBUG_STATUS_TS_MASK	0xf
+#define DMA_DEBUG_STATUS_RS_MASK	0xf
+
+/* DMA AXI bitmap */
+#define DMA_AXI_EN_LPI			BIT(31)
+#define DMA_AXI_LPI_XIT_FRM		BIT(30)
+#define DMA_AXI_WR_OSR_LMT		GENMASK(27, 24)
+#define DMA_AXI_WR_OSR_LMT_SHIFT	24
+#define DMA_AXI_RD_OSR_LMT		GENMASK(19, 16)
+#define DMA_AXI_RD_OSR_LMT_SHIFT	16
+
+#define DMA_AXI_OSR_MAX			0xf
+#define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \
+				(DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT))
+
+#define DMA_SYS_BUS_MB			BIT(14)
+#define DMA_AXI_1KBBE			BIT(13)
+#define DMA_SYS_BUS_AAL			BIT(12)
+#define DMA_AXI_BLEN256			BIT(7)
+#define DMA_AXI_BLEN128			BIT(6)
+#define DMA_AXI_BLEN64			BIT(5)
+#define DMA_AXI_BLEN32			BIT(4)
+#define DMA_AXI_BLEN16			BIT(3)
+#define DMA_AXI_BLEN8			BIT(2)
+#define DMA_AXI_BLEN4			BIT(1)
+#define DMA_SYS_BUS_FB			BIT(0)
+
+#define DMA_BURST_LEN_DEFAULT		(DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \
+					DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \
+					DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \
+					DMA_AXI_BLEN4)
+
+#define DMA_AXI_BURST_LEN_MASK		0x000000FE
+
+/* Following DMA defines are chanels oriented */
+#define DMA_CHAN_BASE_ADDR		0x00001100
+#define DMA_CHAN_BASE_OFFSET		0x80
+#define DMA_CHANX_BASE_ADDR(x)		(DMA_CHAN_BASE_ADDR + \
+					(x * DMA_CHAN_BASE_OFFSET))
+#define DMA_CHAN_REG_NUMBER		17
+
+#define DMA_CHAN_CONTROL(x)		DMA_CHANX_BASE_ADDR(x)
+#define DMA_CHAN_TX_CONTROL(x)		(DMA_CHANX_BASE_ADDR(x) + 0x4)
+#define DMA_CHAN_RX_CONTROL(x)		(DMA_CHANX_BASE_ADDR(x) + 0x8)
+#define DMA_CHAN_TX_BASE_ADDR(x)	(DMA_CHANX_BASE_ADDR(x) + 0x14)
+#define DMA_CHAN_RX_BASE_ADDR(x)	(DMA_CHANX_BASE_ADDR(x) + 0x1c)
+#define DMA_CHAN_TX_END_ADDR(x)		(DMA_CHANX_BASE_ADDR(x) + 0x20)
+#define DMA_CHAN_RX_END_ADDR(x)		(DMA_CHANX_BASE_ADDR(x) + 0x28)
+#define DMA_CHAN_TX_RING_LEN(x)		(DMA_CHANX_BASE_ADDR(x) + 0x2c)
+#define DMA_CHAN_RX_RING_LEN(x)		(DMA_CHANX_BASE_ADDR(x) + 0x30)
+#define DMA_CHAN_INTR_ENA(x)		(DMA_CHANX_BASE_ADDR(x) + 0x34)
+#define DMA_CHAN_RX_WATCHDOG(x)		(DMA_CHANX_BASE_ADDR(x) + 0x38)
+#define DMA_CHAN_SLOT_CTRL_STATUS(x)	(DMA_CHANX_BASE_ADDR(x) + 0x3c)
+#define DMA_CHAN_CUR_TX_DESC(x)		(DMA_CHANX_BASE_ADDR(x) + 0x44)
+#define DMA_CHAN_CUR_RX_DESC(x)		(DMA_CHANX_BASE_ADDR(x) + 0x4c)
+#define DMA_CHAN_CUR_TX_BUF_ADDR(x)	(DMA_CHANX_BASE_ADDR(x) + 0x54)
+#define DMA_CHAN_CUR_RX_BUF_ADDR(x)	(DMA_CHANX_BASE_ADDR(x) + 0x5c)
+#define DMA_CHAN_STATUS(x)		(DMA_CHANX_BASE_ADDR(x) + 0x60)
+
+/* DMA Control X */
+#define DMA_CONTROL_MSS_MASK		GENMASK(13, 0)
+
+/* DMA Tx Channel X Control register defines */
+#define DMA_CONTROL_TSE			BIT(12)
+#define DMA_CONTROL_OSP			BIT(4)
+#define DMA_CONTROL_ST			BIT(0)
+
+/* DMA Rx Channel X Control register defines */
+#define DMA_CONTROL_SR			BIT(0)
+
+/* Interrupt status per channel */
+#define DMA_CHAN_STATUS_REB		GENMASK(21, 19)
+#define DMA_CHAN_STATUS_REB_SHIFT	19
+#define DMA_CHAN_STATUS_TEB		GENMASK(18, 16)
+#define DMA_CHAN_STATUS_TEB_SHIFT	16
+#define DMA_CHAN_STATUS_NIS		BIT(15)
+#define DMA_CHAN_STATUS_AIS		BIT(14)
+#define DMA_CHAN_STATUS_CDE		BIT(13)
+#define DMA_CHAN_STATUS_FBE		BIT(12)
+#define DMA_CHAN_STATUS_ERI		BIT(11)
+#define DMA_CHAN_STATUS_ETI		BIT(10)
+#define DMA_CHAN_STATUS_RWT		BIT(9)
+#define DMA_CHAN_STATUS_RPS		BIT(8)
+#define DMA_CHAN_STATUS_RBU		BIT(7)
+#define DMA_CHAN_STATUS_RI		BIT(6)
+#define DMA_CHAN_STATUS_TBU		BIT(2)
+#define DMA_CHAN_STATUS_TPS		BIT(1)
+#define DMA_CHAN_STATUS_TI		BIT(0)
+
+/* Interrupt enable bits per channel */
+#define DMA_CHAN_INTR_ENA_NIE		BIT(16)
+#define DMA_CHAN_INTR_ENA_AIE		BIT(15)
+#define DMA_CHAN_INTR_ENA_NIE_4_10	BIT(15)
+#define DMA_CHAN_INTR_ENA_AIE_4_10	BIT(14)
+#define DMA_CHAN_INTR_ENA_CDE		BIT(13)
+#define DMA_CHAN_INTR_ENA_FBE		BIT(12)
+#define DMA_CHAN_INTR_ENA_ERE		BIT(11)
+#define DMA_CHAN_INTR_ENA_ETE		BIT(10)
+#define DMA_CHAN_INTR_ENA_RWE		BIT(9)
+#define DMA_CHAN_INTR_ENA_RSE		BIT(8)
+#define DMA_CHAN_INTR_ENA_RBUE		BIT(7)
+#define DMA_CHAN_INTR_ENA_RIE		BIT(6)
+#define DMA_CHAN_INTR_ENA_TBUE		BIT(2)
+#define DMA_CHAN_INTR_ENA_TSE		BIT(1)
+#define DMA_CHAN_INTR_ENA_TIE		BIT(0)
+
+#define DMA_CHAN_INTR_NORMAL		(DMA_CHAN_INTR_ENA_NIE | \
+					 DMA_CHAN_INTR_ENA_RIE | \
+					 DMA_CHAN_INTR_ENA_TIE)
+
+#define DMA_CHAN_INTR_ABNORMAL		(DMA_CHAN_INTR_ENA_AIE | \
+					 DMA_CHAN_INTR_ENA_FBE)
+/* DMA default interrupt mask for 4.00 */
+#define DMA_CHAN_INTR_DEFAULT_MASK	(DMA_CHAN_INTR_NORMAL | \
+					 DMA_CHAN_INTR_ABNORMAL)
+
+#define DMA_CHAN_INTR_NORMAL_4_10	(DMA_CHAN_INTR_ENA_NIE_4_10 | \
+					 DMA_CHAN_INTR_ENA_RIE | \
+					 DMA_CHAN_INTR_ENA_TIE)
+
+#define DMA_CHAN_INTR_ABNORMAL_4_10	(DMA_CHAN_INTR_ENA_AIE_4_10 | \
+					 DMA_CHAN_INTR_ENA_FBE)
+/* DMA default interrupt mask for 4.10a */
+#define DMA_CHAN_INTR_DEFAULT_MASK_4_10	(DMA_CHAN_INTR_NORMAL_4_10 | \
+					 DMA_CHAN_INTR_ABNORMAL_4_10)
+
+/* channel 0 specific fields */
+#define DMA_CHAN0_DBG_STAT_TPS		GENMASK(15, 12)
+#define DMA_CHAN0_DBG_STAT_TPS_SHIFT	12
+#define DMA_CHAN0_DBG_STAT_RPS		GENMASK(11, 8)
+#define DMA_CHAN0_DBG_STAT_RPS_SHIFT	8
+
+int dwmac4_dma_reset(void __iomem *ioaddr);
+void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr);
+void dwmac4_enable_dma_irq(void __iomem *ioaddr);
+void dwmac410_enable_dma_irq(void __iomem *ioaddr);
+void dwmac4_disable_dma_irq(void __iomem *ioaddr);
+void dwmac4_dma_start_tx(void __iomem *ioaddr);
+void dwmac4_dma_stop_tx(void __iomem *ioaddr);
+void dwmac4_dma_start_rx(void __iomem *ioaddr);
+void dwmac4_dma_stop_rx(void __iomem *ioaddr);
+int dwmac4_dma_interrupt(void __iomem *ioaddr,
+			 struct stmmac_extra_stats *x);
+void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len);
+void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len);
+void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+
+#endif /* __DWMAC4_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
new file mode 100644
index 0000000..c7326d5
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) 2007-2015  STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include "common.h"
+#include "dwmac4_dma.h"
+#include "dwmac4.h"
+
+int dwmac4_dma_reset(void __iomem *ioaddr)
+{
+	u32 value = readl(ioaddr + DMA_BUS_MODE);
+	int limit;
+
+	/* DMA SW reset */
+	value |= DMA_BUS_MODE_SFT_RESET;
+	writel(value, ioaddr + DMA_BUS_MODE);
+	limit = 10;
+	while (limit--) {
+		if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
+			break;
+		mdelay(10);
+	}
+
+	if (limit < 0)
+		return -EBUSY;
+
+	return 0;
+}
+
+void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
+{
+	writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(0));
+}
+
+void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
+{
+	writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(0));
+}
+
+void dwmac4_dma_start_tx(void __iomem *ioaddr)
+{
+	u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+
+	value |= DMA_CONTROL_ST;
+	writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+
+	value = readl(ioaddr + GMAC_CONFIG);
+	value |= GMAC_CONFIG_TE;
+	writel(value, ioaddr + GMAC_CONFIG);
+}
+
+void dwmac4_dma_stop_tx(void __iomem *ioaddr)
+{
+	u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+
+	value &= ~DMA_CONTROL_ST;
+	writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+
+	value = readl(ioaddr + GMAC_CONFIG);
+	value &= ~GMAC_CONFIG_TE;
+	writel(value, ioaddr + GMAC_CONFIG);
+}
+
+void dwmac4_dma_start_rx(void __iomem *ioaddr)
+{
+	u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+
+	value |= DMA_CONTROL_SR;
+
+	writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+
+	value = readl(ioaddr + GMAC_CONFIG);
+	value |= GMAC_CONFIG_RE;
+	writel(value, ioaddr + GMAC_CONFIG);
+}
+
+void dwmac4_dma_stop_rx(void __iomem *ioaddr)
+{
+	u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+
+	value &= ~DMA_CONTROL_SR;
+	writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+
+	value = readl(ioaddr + GMAC_CONFIG);
+	value &= ~GMAC_CONFIG_RE;
+	writel(value, ioaddr + GMAC_CONFIG);
+}
+
+void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len)
+{
+	writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(STMMAC_CHAN0));
+}
+
+void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len)
+{
+	writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(STMMAC_CHAN0));
+}
+
+void dwmac4_enable_dma_irq(void __iomem *ioaddr)
+{
+	writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr +
+	       DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+}
+
+void dwmac410_enable_dma_irq(void __iomem *ioaddr)
+{
+	writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10,
+	       ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+}
+
+void dwmac4_disable_dma_irq(void __iomem *ioaddr)
+{
+	writel(0, ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+}
+
+int dwmac4_dma_interrupt(void __iomem *ioaddr,
+			 struct stmmac_extra_stats *x)
+{
+	int ret = 0;
+
+	u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(0));
+
+	/* ABNORMAL interrupts */
+	if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) {
+		if (unlikely(intr_status & DMA_CHAN_STATUS_RBU))
+			x->rx_buf_unav_irq++;
+		if (unlikely(intr_status & DMA_CHAN_STATUS_RPS))
+			x->rx_process_stopped_irq++;
+		if (unlikely(intr_status & DMA_CHAN_STATUS_RWT))
+			x->rx_watchdog_irq++;
+		if (unlikely(intr_status & DMA_CHAN_STATUS_ETI))
+			x->tx_early_irq++;
+		if (unlikely(intr_status & DMA_CHAN_STATUS_TPS)) {
+			x->tx_process_stopped_irq++;
+			ret = tx_hard_error;
+		}
+		if (unlikely(intr_status & DMA_CHAN_STATUS_FBE)) {
+			x->fatal_bus_error_irq++;
+			ret = tx_hard_error;
+		}
+	}
+	/* TX/RX NORMAL interrupts */
+	if (likely(intr_status & DMA_CHAN_STATUS_NIS)) {
+		x->normal_irq_n++;
+		if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
+			u32 value;
+
+			value = readl(ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+			/* to schedule NAPI on real RIE event. */
+			if (likely(value & DMA_CHAN_INTR_ENA_RIE)) {
+				x->rx_normal_irq_n++;
+				ret |= handle_rx;
+			}
+		}
+		if (likely(intr_status & DMA_CHAN_STATUS_TI)) {
+			x->tx_normal_irq_n++;
+			ret |= handle_tx;
+		}
+		if (unlikely(intr_status & DMA_CHAN_STATUS_ERI))
+			x->rx_early_irq++;
+	}
+
+	/* Clear the interrupt by writing a logic 1 to the chanX interrupt
+	 * status [21-0] expect reserved bits [5-3]
+	 */
+	writel((intr_status & 0x3fffc7),
+	       ioaddr + DMA_CHAN_STATUS(STMMAC_CHAN0));
+
+	return ret;
+}
+
+void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+				unsigned int high, unsigned int low)
+{
+	unsigned long data;
+
+	data = (addr[5] << 8) | addr[4];
+	/* For MAC Addr registers se have to set the Address Enable (AE)
+	 * bit that has no effect on the High Reg 0 where the bit 31 (MO)
+	 * is RO.
+	 */
+	data |= (STMMAC_CHAN0 << GMAC_HI_DCS_SHIFT);
+	writel(data | GMAC_HI_REG_AE, ioaddr + high);
+	data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+	writel(data, ioaddr + low);
+}
+
+/* Enable disable MAC RX/TX */
+void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable)
+{
+	u32 value = readl(ioaddr + GMAC_CONFIG);
+
+	if (enable)
+		value |= GMAC_CONFIG_RE | GMAC_CONFIG_TE;
+	else
+		value &= ~(GMAC_CONFIG_TE | GMAC_CONFIG_RE);
+
+	writel(value, ioaddr + GMAC_CONFIG);
+}
+
+void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+				unsigned int high, unsigned int low)
+{
+	unsigned int hi_addr, lo_addr;
+
+	/* Read the MAC address from the hardware */
+	hi_addr = readl(ioaddr + high);
+	lo_addr = readl(ioaddr + low);
+
+	/* Extract the MAC address from the high and low words */
+	addr[0] = lo_addr & 0xff;
+	addr[1] = (lo_addr >> 8) & 0xff;
+	addr[2] = (lo_addr >> 16) & 0xff;
+	addr[3] = (lo_addr >> 24) & 0xff;
+	addr[4] = hi_addr & 0xff;
+	addr[5] = (hi_addr >> 8) & 0xff;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index cfb018c..38f19c9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -411,6 +411,26 @@
 	}
 }
 
+static void enh_desc_display_ring(void *head, unsigned int size, bool rx)
+{
+	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
+	int i;
+
+	pr_info("Extended %s descriptor ring:\n", rx ? "RX" : "TX");
+
+	for (i = 0; i < size; i++) {
+		u64 x;
+
+		x = *(u64 *)ep;
+		pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+			i, (unsigned int)virt_to_phys(ep),
+			(unsigned int)x, (unsigned int)(x >> 32),
+			ep->basic.des2, ep->basic.des3);
+		ep++;
+	}
+	pr_info("\n");
+}
+
 const struct stmmac_desc_ops enh_desc_ops = {
 	.tx_status = enh_desc_get_tx_status,
 	.rx_status = enh_desc_get_rx_status,
@@ -430,4 +450,5 @@
 	.get_tx_timestamp_status = enh_desc_get_tx_timestamp_status,
 	.get_timestamp = enh_desc_get_timestamp,
 	.get_rx_timestamp_status = enh_desc_get_rx_timestamp_status,
+	.display_ring = enh_desc_display_ring,
 };
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 192c249..38a1a56 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -35,6 +35,10 @@
 						 * current value.*/
 #define MMC_CNTRL_PRESET		0x10
 #define MMC_CNTRL_FULL_HALF_PRESET	0x20
+
+#define MMC_GMAC4_OFFSET		0x700
+#define MMC_GMAC3_X_OFFSET		0x100
+
 struct stmmac_counters {
 	unsigned int mmc_tx_octetcount_gb;
 	unsigned int mmc_tx_framecount_gb;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 3f20bb1..ce9aa79 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -28,12 +28,12 @@
 
 /* MAC Management Counters register offset */
 
-#define MMC_CNTRL		0x00000100	/* MMC Control */
-#define MMC_RX_INTR		0x00000104	/* MMC RX Interrupt */
-#define MMC_TX_INTR		0x00000108	/* MMC TX Interrupt */
-#define MMC_RX_INTR_MASK	0x0000010c	/* MMC Interrupt Mask */
-#define MMC_TX_INTR_MASK	0x00000110	/* MMC Interrupt Mask */
-#define MMC_DEFAULT_MASK		0xffffffff
+#define MMC_CNTRL		0x00	/* MMC Control */
+#define MMC_RX_INTR		0x04	/* MMC RX Interrupt */
+#define MMC_TX_INTR		0x08	/* MMC TX Interrupt */
+#define MMC_RX_INTR_MASK	0x0c	/* MMC Interrupt Mask */
+#define MMC_TX_INTR_MASK	0x10	/* MMC Interrupt Mask */
+#define MMC_DEFAULT_MASK	0xffffffff
 
 /* MMC TX counter registers */
 
@@ -41,115 +41,115 @@
  * _GB register stands for good and bad frames
  * _G is for good only.
  */
-#define MMC_TX_OCTETCOUNT_GB		0x00000114
-#define MMC_TX_FRAMECOUNT_GB		0x00000118
-#define MMC_TX_BROADCASTFRAME_G		0x0000011c
-#define MMC_TX_MULTICASTFRAME_G		0x00000120
-#define MMC_TX_64_OCTETS_GB		0x00000124
-#define MMC_TX_65_TO_127_OCTETS_GB	0x00000128
-#define MMC_TX_128_TO_255_OCTETS_GB	0x0000012c
-#define MMC_TX_256_TO_511_OCTETS_GB	0x00000130
-#define MMC_TX_512_TO_1023_OCTETS_GB	0x00000134
-#define MMC_TX_1024_TO_MAX_OCTETS_GB	0x00000138
-#define MMC_TX_UNICAST_GB		0x0000013c
-#define MMC_TX_MULTICAST_GB		0x00000140
-#define MMC_TX_BROADCAST_GB		0x00000144
-#define MMC_TX_UNDERFLOW_ERROR		0x00000148
-#define MMC_TX_SINGLECOL_G		0x0000014c
-#define MMC_TX_MULTICOL_G		0x00000150
-#define MMC_TX_DEFERRED			0x00000154
-#define MMC_TX_LATECOL			0x00000158
-#define MMC_TX_EXESSCOL			0x0000015c
-#define MMC_TX_CARRIER_ERROR		0x00000160
-#define MMC_TX_OCTETCOUNT_G		0x00000164
-#define MMC_TX_FRAMECOUNT_G		0x00000168
-#define MMC_TX_EXCESSDEF		0x0000016c
-#define MMC_TX_PAUSE_FRAME		0x00000170
-#define MMC_TX_VLAN_FRAME_G		0x00000174
+#define MMC_TX_OCTETCOUNT_GB		0x14
+#define MMC_TX_FRAMECOUNT_GB		0x18
+#define MMC_TX_BROADCASTFRAME_G		0x1c
+#define MMC_TX_MULTICASTFRAME_G		0x20
+#define MMC_TX_64_OCTETS_GB		0x24
+#define MMC_TX_65_TO_127_OCTETS_GB	0x28
+#define MMC_TX_128_TO_255_OCTETS_GB	0x2c
+#define MMC_TX_256_TO_511_OCTETS_GB	0x30
+#define MMC_TX_512_TO_1023_OCTETS_GB	0x34
+#define MMC_TX_1024_TO_MAX_OCTETS_GB	0x38
+#define MMC_TX_UNICAST_GB		0x3c
+#define MMC_TX_MULTICAST_GB		0x40
+#define MMC_TX_BROADCAST_GB		0x44
+#define MMC_TX_UNDERFLOW_ERROR		0x48
+#define MMC_TX_SINGLECOL_G		0x4c
+#define MMC_TX_MULTICOL_G		0x50
+#define MMC_TX_DEFERRED			0x54
+#define MMC_TX_LATECOL			0x58
+#define MMC_TX_EXESSCOL			0x5c
+#define MMC_TX_CARRIER_ERROR		0x60
+#define MMC_TX_OCTETCOUNT_G		0x64
+#define MMC_TX_FRAMECOUNT_G		0x68
+#define MMC_TX_EXCESSDEF		0x6c
+#define MMC_TX_PAUSE_FRAME		0x70
+#define MMC_TX_VLAN_FRAME_G		0x74
 
 /* MMC RX counter registers */
-#define MMC_RX_FRAMECOUNT_GB		0x00000180
-#define MMC_RX_OCTETCOUNT_GB		0x00000184
-#define MMC_RX_OCTETCOUNT_G		0x00000188
-#define MMC_RX_BROADCASTFRAME_G		0x0000018c
-#define MMC_RX_MULTICASTFRAME_G		0x00000190
-#define MMC_RX_CRC_ERROR		0x00000194
-#define MMC_RX_ALIGN_ERROR		0x00000198
-#define MMC_RX_RUN_ERROR		0x0000019C
-#define MMC_RX_JABBER_ERROR		0x000001A0
-#define MMC_RX_UNDERSIZE_G		0x000001A4
-#define MMC_RX_OVERSIZE_G		0x000001A8
-#define MMC_RX_64_OCTETS_GB		0x000001AC
-#define MMC_RX_65_TO_127_OCTETS_GB	0x000001b0
-#define MMC_RX_128_TO_255_OCTETS_GB	0x000001b4
-#define MMC_RX_256_TO_511_OCTETS_GB	0x000001b8
-#define MMC_RX_512_TO_1023_OCTETS_GB	0x000001bc
-#define MMC_RX_1024_TO_MAX_OCTETS_GB	0x000001c0
-#define MMC_RX_UNICAST_G		0x000001c4
-#define MMC_RX_LENGTH_ERROR		0x000001c8
-#define MMC_RX_AUTOFRANGETYPE		0x000001cc
-#define MMC_RX_PAUSE_FRAMES		0x000001d0
-#define MMC_RX_FIFO_OVERFLOW		0x000001d4
-#define MMC_RX_VLAN_FRAMES_GB		0x000001d8
-#define MMC_RX_WATCHDOG_ERROR		0x000001dc
+#define MMC_RX_FRAMECOUNT_GB		0x80
+#define MMC_RX_OCTETCOUNT_GB		0x84
+#define MMC_RX_OCTETCOUNT_G		0x88
+#define MMC_RX_BROADCASTFRAME_G		0x8c
+#define MMC_RX_MULTICASTFRAME_G		0x90
+#define MMC_RX_CRC_ERROR		0x94
+#define MMC_RX_ALIGN_ERROR		0x98
+#define MMC_RX_RUN_ERROR		0x9C
+#define MMC_RX_JABBER_ERROR		0xA0
+#define MMC_RX_UNDERSIZE_G		0xA4
+#define MMC_RX_OVERSIZE_G		0xA8
+#define MMC_RX_64_OCTETS_GB		0xAC
+#define MMC_RX_65_TO_127_OCTETS_GB	0xb0
+#define MMC_RX_128_TO_255_OCTETS_GB	0xb4
+#define MMC_RX_256_TO_511_OCTETS_GB	0xb8
+#define MMC_RX_512_TO_1023_OCTETS_GB	0xbc
+#define MMC_RX_1024_TO_MAX_OCTETS_GB	0xc0
+#define MMC_RX_UNICAST_G		0xc4
+#define MMC_RX_LENGTH_ERROR		0xc8
+#define MMC_RX_AUTOFRANGETYPE		0xcc
+#define MMC_RX_PAUSE_FRAMES		0xd0
+#define MMC_RX_FIFO_OVERFLOW		0xd4
+#define MMC_RX_VLAN_FRAMES_GB		0xd8
+#define MMC_RX_WATCHDOG_ERROR		0xdc
 /* IPC*/
-#define MMC_RX_IPC_INTR_MASK		0x00000200
-#define MMC_RX_IPC_INTR			0x00000208
+#define MMC_RX_IPC_INTR_MASK		0x100
+#define MMC_RX_IPC_INTR			0x108
 /* IPv4*/
-#define MMC_RX_IPV4_GD			0x00000210
-#define MMC_RX_IPV4_HDERR		0x00000214
-#define MMC_RX_IPV4_NOPAY		0x00000218
-#define MMC_RX_IPV4_FRAG		0x0000021C
-#define MMC_RX_IPV4_UDSBL		0x00000220
+#define MMC_RX_IPV4_GD			0x110
+#define MMC_RX_IPV4_HDERR		0x114
+#define MMC_RX_IPV4_NOPAY		0x118
+#define MMC_RX_IPV4_FRAG		0x11C
+#define MMC_RX_IPV4_UDSBL		0x120
 
-#define MMC_RX_IPV4_GD_OCTETS		0x00000250
-#define MMC_RX_IPV4_HDERR_OCTETS	0x00000254
-#define MMC_RX_IPV4_NOPAY_OCTETS	0x00000258
-#define MMC_RX_IPV4_FRAG_OCTETS		0x0000025c
-#define MMC_RX_IPV4_UDSBL_OCTETS	0x00000260
+#define MMC_RX_IPV4_GD_OCTETS		0x150
+#define MMC_RX_IPV4_HDERR_OCTETS	0x154
+#define MMC_RX_IPV4_NOPAY_OCTETS	0x158
+#define MMC_RX_IPV4_FRAG_OCTETS		0x15c
+#define MMC_RX_IPV4_UDSBL_OCTETS	0x160
 
 /* IPV6*/
-#define MMC_RX_IPV6_GD_OCTETS		0x00000264
-#define MMC_RX_IPV6_HDERR_OCTETS	0x00000268
-#define MMC_RX_IPV6_NOPAY_OCTETS	0x0000026c
+#define MMC_RX_IPV6_GD_OCTETS		0x164
+#define MMC_RX_IPV6_HDERR_OCTETS	0x168
+#define MMC_RX_IPV6_NOPAY_OCTETS	0x16c
 
-#define MMC_RX_IPV6_GD			0x00000224
-#define MMC_RX_IPV6_HDERR		0x00000228
-#define MMC_RX_IPV6_NOPAY		0x0000022c
+#define MMC_RX_IPV6_GD			0x124
+#define MMC_RX_IPV6_HDERR		0x128
+#define MMC_RX_IPV6_NOPAY		0x12c
 
 /* Protocols*/
-#define MMC_RX_UDP_GD			0x00000230
-#define MMC_RX_UDP_ERR			0x00000234
-#define MMC_RX_TCP_GD			0x00000238
-#define MMC_RX_TCP_ERR			0x0000023c
-#define MMC_RX_ICMP_GD			0x00000240
-#define MMC_RX_ICMP_ERR			0x00000244
+#define MMC_RX_UDP_GD			0x130
+#define MMC_RX_UDP_ERR			0x134
+#define MMC_RX_TCP_GD			0x138
+#define MMC_RX_TCP_ERR			0x13c
+#define MMC_RX_ICMP_GD			0x140
+#define MMC_RX_ICMP_ERR			0x144
 
-#define MMC_RX_UDP_GD_OCTETS		0x00000270
-#define MMC_RX_UDP_ERR_OCTETS		0x00000274
-#define MMC_RX_TCP_GD_OCTETS		0x00000278
-#define MMC_RX_TCP_ERR_OCTETS		0x0000027c
-#define MMC_RX_ICMP_GD_OCTETS		0x00000280
-#define MMC_RX_ICMP_ERR_OCTETS		0x00000284
+#define MMC_RX_UDP_GD_OCTETS		0x170
+#define MMC_RX_UDP_ERR_OCTETS		0x174
+#define MMC_RX_TCP_GD_OCTETS		0x178
+#define MMC_RX_TCP_ERR_OCTETS		0x17c
+#define MMC_RX_ICMP_GD_OCTETS		0x180
+#define MMC_RX_ICMP_ERR_OCTETS		0x184
 
-void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
+void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
 {
-	u32 value = readl(ioaddr + MMC_CNTRL);
+	u32 value = readl(mmcaddr + MMC_CNTRL);
 
 	value |= (mode & 0x3F);
 
-	writel(value, ioaddr + MMC_CNTRL);
+	writel(value, mmcaddr + MMC_CNTRL);
 
 	pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
 		 MMC_CNTRL, value);
 }
 
 /* To mask all all interrupts.*/
-void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
+void dwmac_mmc_intr_all_mask(void __iomem *mmcaddr)
 {
-	writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
-	writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
-	writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_IPC_INTR_MASK);
+	writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK);
+	writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK);
+	writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_IPC_INTR_MASK);
 }
 
 /* This reads the MAC core counters (if actaully supported).
@@ -157,111 +157,116 @@
  * counter after a read. So all the field of the mmc struct
  * have to be incremented.
  */
-void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc)
+void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
 {
-	mmc->mmc_tx_octetcount_gb += readl(ioaddr + MMC_TX_OCTETCOUNT_GB);
-	mmc->mmc_tx_framecount_gb += readl(ioaddr + MMC_TX_FRAMECOUNT_GB);
-	mmc->mmc_tx_broadcastframe_g += readl(ioaddr + MMC_TX_BROADCASTFRAME_G);
-	mmc->mmc_tx_multicastframe_g += readl(ioaddr + MMC_TX_MULTICASTFRAME_G);
-	mmc->mmc_tx_64_octets_gb += readl(ioaddr + MMC_TX_64_OCTETS_GB);
+	mmc->mmc_tx_octetcount_gb += readl(mmcaddr + MMC_TX_OCTETCOUNT_GB);
+	mmc->mmc_tx_framecount_gb += readl(mmcaddr + MMC_TX_FRAMECOUNT_GB);
+	mmc->mmc_tx_broadcastframe_g += readl(mmcaddr +
+					      MMC_TX_BROADCASTFRAME_G);
+	mmc->mmc_tx_multicastframe_g += readl(mmcaddr +
+					      MMC_TX_MULTICASTFRAME_G);
+	mmc->mmc_tx_64_octets_gb += readl(mmcaddr + MMC_TX_64_OCTETS_GB);
 	mmc->mmc_tx_65_to_127_octets_gb +=
-	    readl(ioaddr + MMC_TX_65_TO_127_OCTETS_GB);
+	    readl(mmcaddr + MMC_TX_65_TO_127_OCTETS_GB);
 	mmc->mmc_tx_128_to_255_octets_gb +=
-	    readl(ioaddr + MMC_TX_128_TO_255_OCTETS_GB);
+	    readl(mmcaddr + MMC_TX_128_TO_255_OCTETS_GB);
 	mmc->mmc_tx_256_to_511_octets_gb +=
-	    readl(ioaddr + MMC_TX_256_TO_511_OCTETS_GB);
+	    readl(mmcaddr + MMC_TX_256_TO_511_OCTETS_GB);
 	mmc->mmc_tx_512_to_1023_octets_gb +=
-	    readl(ioaddr + MMC_TX_512_TO_1023_OCTETS_GB);
+	    readl(mmcaddr + MMC_TX_512_TO_1023_OCTETS_GB);
 	mmc->mmc_tx_1024_to_max_octets_gb +=
-	    readl(ioaddr + MMC_TX_1024_TO_MAX_OCTETS_GB);
-	mmc->mmc_tx_unicast_gb += readl(ioaddr + MMC_TX_UNICAST_GB);
-	mmc->mmc_tx_multicast_gb += readl(ioaddr + MMC_TX_MULTICAST_GB);
-	mmc->mmc_tx_broadcast_gb += readl(ioaddr + MMC_TX_BROADCAST_GB);
-	mmc->mmc_tx_underflow_error += readl(ioaddr + MMC_TX_UNDERFLOW_ERROR);
-	mmc->mmc_tx_singlecol_g += readl(ioaddr + MMC_TX_SINGLECOL_G);
-	mmc->mmc_tx_multicol_g += readl(ioaddr + MMC_TX_MULTICOL_G);
-	mmc->mmc_tx_deferred += readl(ioaddr + MMC_TX_DEFERRED);
-	mmc->mmc_tx_latecol += readl(ioaddr + MMC_TX_LATECOL);
-	mmc->mmc_tx_exesscol += readl(ioaddr + MMC_TX_EXESSCOL);
-	mmc->mmc_tx_carrier_error += readl(ioaddr + MMC_TX_CARRIER_ERROR);
-	mmc->mmc_tx_octetcount_g += readl(ioaddr + MMC_TX_OCTETCOUNT_G);
-	mmc->mmc_tx_framecount_g += readl(ioaddr + MMC_TX_FRAMECOUNT_G);
-	mmc->mmc_tx_excessdef += readl(ioaddr + MMC_TX_EXCESSDEF);
-	mmc->mmc_tx_pause_frame += readl(ioaddr + MMC_TX_PAUSE_FRAME);
-	mmc->mmc_tx_vlan_frame_g += readl(ioaddr + MMC_TX_VLAN_FRAME_G);
+	    readl(mmcaddr + MMC_TX_1024_TO_MAX_OCTETS_GB);
+	mmc->mmc_tx_unicast_gb += readl(mmcaddr + MMC_TX_UNICAST_GB);
+	mmc->mmc_tx_multicast_gb += readl(mmcaddr + MMC_TX_MULTICAST_GB);
+	mmc->mmc_tx_broadcast_gb += readl(mmcaddr + MMC_TX_BROADCAST_GB);
+	mmc->mmc_tx_underflow_error += readl(mmcaddr + MMC_TX_UNDERFLOW_ERROR);
+	mmc->mmc_tx_singlecol_g += readl(mmcaddr + MMC_TX_SINGLECOL_G);
+	mmc->mmc_tx_multicol_g += readl(mmcaddr + MMC_TX_MULTICOL_G);
+	mmc->mmc_tx_deferred += readl(mmcaddr + MMC_TX_DEFERRED);
+	mmc->mmc_tx_latecol += readl(mmcaddr + MMC_TX_LATECOL);
+	mmc->mmc_tx_exesscol += readl(mmcaddr + MMC_TX_EXESSCOL);
+	mmc->mmc_tx_carrier_error += readl(mmcaddr + MMC_TX_CARRIER_ERROR);
+	mmc->mmc_tx_octetcount_g += readl(mmcaddr + MMC_TX_OCTETCOUNT_G);
+	mmc->mmc_tx_framecount_g += readl(mmcaddr + MMC_TX_FRAMECOUNT_G);
+	mmc->mmc_tx_excessdef += readl(mmcaddr + MMC_TX_EXCESSDEF);
+	mmc->mmc_tx_pause_frame += readl(mmcaddr + MMC_TX_PAUSE_FRAME);
+	mmc->mmc_tx_vlan_frame_g += readl(mmcaddr + MMC_TX_VLAN_FRAME_G);
 
 	/* MMC RX counter registers */
-	mmc->mmc_rx_framecount_gb += readl(ioaddr + MMC_RX_FRAMECOUNT_GB);
-	mmc->mmc_rx_octetcount_gb += readl(ioaddr + MMC_RX_OCTETCOUNT_GB);
-	mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G);
-	mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G);
-	mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G);
-	mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERROR);
-	mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR);
-	mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR);
-	mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR);
-	mmc->mmc_rx_undersize_g += readl(ioaddr + MMC_RX_UNDERSIZE_G);
-	mmc->mmc_rx_oversize_g += readl(ioaddr + MMC_RX_OVERSIZE_G);
-	mmc->mmc_rx_64_octets_gb += readl(ioaddr + MMC_RX_64_OCTETS_GB);
+	mmc->mmc_rx_framecount_gb += readl(mmcaddr + MMC_RX_FRAMECOUNT_GB);
+	mmc->mmc_rx_octetcount_gb += readl(mmcaddr + MMC_RX_OCTETCOUNT_GB);
+	mmc->mmc_rx_octetcount_g += readl(mmcaddr + MMC_RX_OCTETCOUNT_G);
+	mmc->mmc_rx_broadcastframe_g += readl(mmcaddr +
+					      MMC_RX_BROADCASTFRAME_G);
+	mmc->mmc_rx_multicastframe_g += readl(mmcaddr +
+					      MMC_RX_MULTICASTFRAME_G);
+	mmc->mmc_rx_crc_error += readl(mmcaddr + MMC_RX_CRC_ERROR);
+	mmc->mmc_rx_align_error += readl(mmcaddr + MMC_RX_ALIGN_ERROR);
+	mmc->mmc_rx_run_error += readl(mmcaddr + MMC_RX_RUN_ERROR);
+	mmc->mmc_rx_jabber_error += readl(mmcaddr + MMC_RX_JABBER_ERROR);
+	mmc->mmc_rx_undersize_g += readl(mmcaddr + MMC_RX_UNDERSIZE_G);
+	mmc->mmc_rx_oversize_g += readl(mmcaddr + MMC_RX_OVERSIZE_G);
+	mmc->mmc_rx_64_octets_gb += readl(mmcaddr + MMC_RX_64_OCTETS_GB);
 	mmc->mmc_rx_65_to_127_octets_gb +=
-	    readl(ioaddr + MMC_RX_65_TO_127_OCTETS_GB);
+	    readl(mmcaddr + MMC_RX_65_TO_127_OCTETS_GB);
 	mmc->mmc_rx_128_to_255_octets_gb +=
-	    readl(ioaddr + MMC_RX_128_TO_255_OCTETS_GB);
+	    readl(mmcaddr + MMC_RX_128_TO_255_OCTETS_GB);
 	mmc->mmc_rx_256_to_511_octets_gb +=
-	    readl(ioaddr + MMC_RX_256_TO_511_OCTETS_GB);
+	    readl(mmcaddr + MMC_RX_256_TO_511_OCTETS_GB);
 	mmc->mmc_rx_512_to_1023_octets_gb +=
-	    readl(ioaddr + MMC_RX_512_TO_1023_OCTETS_GB);
+	    readl(mmcaddr + MMC_RX_512_TO_1023_OCTETS_GB);
 	mmc->mmc_rx_1024_to_max_octets_gb +=
-	    readl(ioaddr + MMC_RX_1024_TO_MAX_OCTETS_GB);
-	mmc->mmc_rx_unicast_g += readl(ioaddr + MMC_RX_UNICAST_G);
-	mmc->mmc_rx_length_error += readl(ioaddr + MMC_RX_LENGTH_ERROR);
-	mmc->mmc_rx_autofrangetype += readl(ioaddr + MMC_RX_AUTOFRANGETYPE);
-	mmc->mmc_rx_pause_frames += readl(ioaddr + MMC_RX_PAUSE_FRAMES);
-	mmc->mmc_rx_fifo_overflow += readl(ioaddr + MMC_RX_FIFO_OVERFLOW);
-	mmc->mmc_rx_vlan_frames_gb += readl(ioaddr + MMC_RX_VLAN_FRAMES_GB);
-	mmc->mmc_rx_watchdog_error += readl(ioaddr + MMC_RX_WATCHDOG_ERROR);
+	    readl(mmcaddr + MMC_RX_1024_TO_MAX_OCTETS_GB);
+	mmc->mmc_rx_unicast_g += readl(mmcaddr + MMC_RX_UNICAST_G);
+	mmc->mmc_rx_length_error += readl(mmcaddr + MMC_RX_LENGTH_ERROR);
+	mmc->mmc_rx_autofrangetype += readl(mmcaddr + MMC_RX_AUTOFRANGETYPE);
+	mmc->mmc_rx_pause_frames += readl(mmcaddr + MMC_RX_PAUSE_FRAMES);
+	mmc->mmc_rx_fifo_overflow += readl(mmcaddr + MMC_RX_FIFO_OVERFLOW);
+	mmc->mmc_rx_vlan_frames_gb += readl(mmcaddr + MMC_RX_VLAN_FRAMES_GB);
+	mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_RX_WATCHDOG_ERROR);
 	/* IPC */
-	mmc->mmc_rx_ipc_intr_mask += readl(ioaddr + MMC_RX_IPC_INTR_MASK);
-	mmc->mmc_rx_ipc_intr += readl(ioaddr + MMC_RX_IPC_INTR);
+	mmc->mmc_rx_ipc_intr_mask += readl(mmcaddr + MMC_RX_IPC_INTR_MASK);
+	mmc->mmc_rx_ipc_intr += readl(mmcaddr + MMC_RX_IPC_INTR);
 	/* IPv4 */
-	mmc->mmc_rx_ipv4_gd += readl(ioaddr + MMC_RX_IPV4_GD);
-	mmc->mmc_rx_ipv4_hderr += readl(ioaddr + MMC_RX_IPV4_HDERR);
-	mmc->mmc_rx_ipv4_nopay += readl(ioaddr + MMC_RX_IPV4_NOPAY);
-	mmc->mmc_rx_ipv4_frag += readl(ioaddr + MMC_RX_IPV4_FRAG);
-	mmc->mmc_rx_ipv4_udsbl += readl(ioaddr + MMC_RX_IPV4_UDSBL);
+	mmc->mmc_rx_ipv4_gd += readl(mmcaddr + MMC_RX_IPV4_GD);
+	mmc->mmc_rx_ipv4_hderr += readl(mmcaddr + MMC_RX_IPV4_HDERR);
+	mmc->mmc_rx_ipv4_nopay += readl(mmcaddr + MMC_RX_IPV4_NOPAY);
+	mmc->mmc_rx_ipv4_frag += readl(mmcaddr + MMC_RX_IPV4_FRAG);
+	mmc->mmc_rx_ipv4_udsbl += readl(mmcaddr + MMC_RX_IPV4_UDSBL);
 
-	mmc->mmc_rx_ipv4_gd_octets += readl(ioaddr + MMC_RX_IPV4_GD_OCTETS);
+	mmc->mmc_rx_ipv4_gd_octets += readl(mmcaddr + MMC_RX_IPV4_GD_OCTETS);
 	mmc->mmc_rx_ipv4_hderr_octets +=
-	    readl(ioaddr + MMC_RX_IPV4_HDERR_OCTETS);
+	    readl(mmcaddr + MMC_RX_IPV4_HDERR_OCTETS);
 	mmc->mmc_rx_ipv4_nopay_octets +=
-	    readl(ioaddr + MMC_RX_IPV4_NOPAY_OCTETS);
-	mmc->mmc_rx_ipv4_frag_octets += readl(ioaddr + MMC_RX_IPV4_FRAG_OCTETS);
+	    readl(mmcaddr + MMC_RX_IPV4_NOPAY_OCTETS);
+	mmc->mmc_rx_ipv4_frag_octets += readl(mmcaddr +
+					      MMC_RX_IPV4_FRAG_OCTETS);
 	mmc->mmc_rx_ipv4_udsbl_octets +=
-	    readl(ioaddr + MMC_RX_IPV4_UDSBL_OCTETS);
+	    readl(mmcaddr + MMC_RX_IPV4_UDSBL_OCTETS);
 
 	/* IPV6 */
-	mmc->mmc_rx_ipv6_gd_octets += readl(ioaddr + MMC_RX_IPV6_GD_OCTETS);
+	mmc->mmc_rx_ipv6_gd_octets += readl(mmcaddr + MMC_RX_IPV6_GD_OCTETS);
 	mmc->mmc_rx_ipv6_hderr_octets +=
-	    readl(ioaddr + MMC_RX_IPV6_HDERR_OCTETS);
+	    readl(mmcaddr + MMC_RX_IPV6_HDERR_OCTETS);
 	mmc->mmc_rx_ipv6_nopay_octets +=
-	    readl(ioaddr + MMC_RX_IPV6_NOPAY_OCTETS);
+	    readl(mmcaddr + MMC_RX_IPV6_NOPAY_OCTETS);
 
-	mmc->mmc_rx_ipv6_gd += readl(ioaddr + MMC_RX_IPV6_GD);
-	mmc->mmc_rx_ipv6_hderr += readl(ioaddr + MMC_RX_IPV6_HDERR);
-	mmc->mmc_rx_ipv6_nopay += readl(ioaddr + MMC_RX_IPV6_NOPAY);
+	mmc->mmc_rx_ipv6_gd += readl(mmcaddr + MMC_RX_IPV6_GD);
+	mmc->mmc_rx_ipv6_hderr += readl(mmcaddr + MMC_RX_IPV6_HDERR);
+	mmc->mmc_rx_ipv6_nopay += readl(mmcaddr + MMC_RX_IPV6_NOPAY);
 
 	/* Protocols */
-	mmc->mmc_rx_udp_gd += readl(ioaddr + MMC_RX_UDP_GD);
-	mmc->mmc_rx_udp_err += readl(ioaddr + MMC_RX_UDP_ERR);
-	mmc->mmc_rx_tcp_gd += readl(ioaddr + MMC_RX_TCP_GD);
-	mmc->mmc_rx_tcp_err += readl(ioaddr + MMC_RX_TCP_ERR);
-	mmc->mmc_rx_icmp_gd += readl(ioaddr + MMC_RX_ICMP_GD);
-	mmc->mmc_rx_icmp_err += readl(ioaddr + MMC_RX_ICMP_ERR);
+	mmc->mmc_rx_udp_gd += readl(mmcaddr + MMC_RX_UDP_GD);
+	mmc->mmc_rx_udp_err += readl(mmcaddr + MMC_RX_UDP_ERR);
+	mmc->mmc_rx_tcp_gd += readl(mmcaddr + MMC_RX_TCP_GD);
+	mmc->mmc_rx_tcp_err += readl(mmcaddr + MMC_RX_TCP_ERR);
+	mmc->mmc_rx_icmp_gd += readl(mmcaddr + MMC_RX_ICMP_GD);
+	mmc->mmc_rx_icmp_err += readl(mmcaddr + MMC_RX_ICMP_ERR);
 
-	mmc->mmc_rx_udp_gd_octets += readl(ioaddr + MMC_RX_UDP_GD_OCTETS);
-	mmc->mmc_rx_udp_err_octets += readl(ioaddr + MMC_RX_UDP_ERR_OCTETS);
-	mmc->mmc_rx_tcp_gd_octets += readl(ioaddr + MMC_RX_TCP_GD_OCTETS);
-	mmc->mmc_rx_tcp_err_octets += readl(ioaddr + MMC_RX_TCP_ERR_OCTETS);
-	mmc->mmc_rx_icmp_gd_octets += readl(ioaddr + MMC_RX_ICMP_GD_OCTETS);
-	mmc->mmc_rx_icmp_err_octets += readl(ioaddr + MMC_RX_ICMP_ERR_OCTETS);
+	mmc->mmc_rx_udp_gd_octets += readl(mmcaddr + MMC_RX_UDP_GD_OCTETS);
+	mmc->mmc_rx_udp_err_octets += readl(mmcaddr + MMC_RX_UDP_ERR_OCTETS);
+	mmc->mmc_rx_tcp_gd_octets += readl(mmcaddr + MMC_RX_TCP_GD_OCTETS);
+	mmc->mmc_rx_tcp_err_octets += readl(mmcaddr + MMC_RX_TCP_ERR_OCTETS);
+	mmc->mmc_rx_icmp_gd_octets += readl(mmcaddr + MMC_RX_ICMP_GD_OCTETS);
+	mmc->mmc_rx_icmp_err_octets += readl(mmcaddr + MMC_RX_ICMP_ERR_OCTETS);
 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index e13228f..2beacd0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -199,11 +199,6 @@
 {
 	unsigned int tdes1 = p->des1;
 
-	if (mode == STMMAC_CHAIN_MODE)
-		norm_set_tx_desc_len_on_chain(p, len);
-	else
-		norm_set_tx_desc_len_on_ring(p, len);
-
 	if (is_fs)
 		tdes1 |= TDES1_FIRST_SEGMENT;
 	else
@@ -217,10 +212,15 @@
 	if (ls)
 		tdes1 |= TDES1_LAST_SEGMENT;
 
-	if (tx_own)
-		tdes1 |= TDES0_OWN;
-
 	p->des1 = tdes1;
+
+	if (mode == STMMAC_CHAIN_MODE)
+		norm_set_tx_desc_len_on_chain(p, len);
+	else
+		norm_set_tx_desc_len_on_ring(p, len);
+
+	if (tx_own)
+		p->des0 |= TDES0_OWN;
 }
 
 static void ndesc_set_tx_ic(struct dma_desc *p)
@@ -279,6 +279,26 @@
 		return 1;
 }
 
+static void ndesc_display_ring(void *head, unsigned int size, bool rx)
+{
+	struct dma_desc *p = (struct dma_desc *)head;
+	int i;
+
+	pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
+
+	for (i = 0; i < size; i++) {
+		u64 x;
+
+		x = *(u64 *)p;
+		pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
+			i, (unsigned int)virt_to_phys(p),
+			(unsigned int)x, (unsigned int)(x >> 32),
+			p->des2, p->des3);
+		p++;
+	}
+	pr_info("\n");
+}
+
 const struct stmmac_desc_ops ndesc_ops = {
 	.tx_status = ndesc_get_tx_status,
 	.rx_status = ndesc_get_rx_status,
@@ -297,4 +317,5 @@
 	.get_tx_timestamp_status = ndesc_get_tx_timestamp_status,
 	.get_timestamp = ndesc_get_timestamp,
 	.get_rx_timestamp_status = ndesc_get_rx_timestamp_status,
+	.display_ring = ndesc_display_ring,
 };
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 8bbab97..59ae608 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -24,7 +24,7 @@
 #define __STMMAC_H__
 
 #define STMMAC_RESOURCE_NAME   "stmmaceth"
-#define DRV_MODULE_VERSION	"Oct_2015"
+#define DRV_MODULE_VERSION	"Jan_2016"
 
 #include <linux/clk.h>
 #include <linux/stmmac.h>
@@ -67,6 +67,7 @@
 	spinlock_t tx_lock;
 	bool tx_path_in_lpi_mode;
 	struct timer_list txtimer;
+	bool tso;
 
 	struct dma_desc *dma_rx	____cacheline_aligned_in_smp;
 	struct dma_extended_desc *dma_erx;
@@ -128,6 +129,10 @@
 	int use_riwt;
 	int irq_wake;
 	spinlock_t ptp_lock;
+	void __iomem *mmcaddr;
+	u32 rx_tail_addr;
+	u32 tx_tail_addr;
+	u32 mss;
 
 #ifdef CONFIG_DEBUG_FS
 	struct dentry *dbgfs_dir;
@@ -143,9 +148,9 @@
 
 int stmmac_ptp_register(struct stmmac_priv *priv);
 void stmmac_ptp_unregister(struct stmmac_priv *priv);
-int stmmac_resume(struct net_device *ndev);
-int stmmac_suspend(struct net_device *ndev);
-int stmmac_dvr_remove(struct net_device *ndev);
+int stmmac_resume(struct device *dev);
+int stmmac_suspend(struct device *dev);
+int stmmac_dvr_remove(struct device *dev);
 int stmmac_dvr_probe(struct device *device,
 		     struct plat_stmmacenet_data *plat_dat,
 		     struct stmmac_resources *res);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 3c7928e..e2b98b0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -161,6 +161,9 @@
 	STMMAC_STAT(mtl_rx_fifo_ctrl_active),
 	STMMAC_STAT(mac_rx_frame_ctrl_fifo),
 	STMMAC_STAT(mac_gmii_rx_proto_engine),
+	/* TSO */
+	STMMAC_STAT(tx_tso_frames),
+	STMMAC_STAT(tx_tso_nfrags),
 };
 #define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
 
@@ -499,14 +502,14 @@
 	int i, j = 0;
 
 	/* Update the DMA HW counters for dwmac10/100 */
-	if (!priv->plat->has_gmac)
+	if (priv->hw->dma->dma_diagnostic_fr)
 		priv->hw->dma->dma_diagnostic_fr(&dev->stats,
 						 (void *) &priv->xstats,
 						 priv->ioaddr);
 	else {
 		/* If supported, for new GMAC chips expose the MMC counters */
 		if (priv->dma_cap.rmon) {
-			dwmac_mmc_read(priv->ioaddr, &priv->mmc);
+			dwmac_mmc_read(priv->mmcaddr, &priv->mmc);
 
 			for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
 				char *p;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 4c5ce98..eac45d0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -56,6 +56,7 @@
 #include "dwmac1000.h"
 
 #define STMMAC_ALIGN(x)	L1_CACHE_ALIGN(x)
+#define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
 
 /* Module parameters */
 #define TX_TIMEO	5000
@@ -278,7 +279,6 @@
  */
 bool stmmac_eee_init(struct stmmac_priv *priv)
 {
-	char *phy_bus_name = priv->plat->phy_bus_name;
 	unsigned long flags;
 	bool ret = false;
 
@@ -289,10 +289,6 @@
 	    (priv->pcs == STMMAC_PCS_RTBI))
 		goto out;
 
-	/* Never init EEE in case of a switch is attached */
-	if (phy_bus_name && (!strcmp(phy_bus_name, "fixed")))
-		goto out;
-
 	/* MAC core supports the EEE feature. */
 	if (priv->dma_cap.eee) {
 		int tx_lpi_timer = priv->tx_lpi_timer;
@@ -726,13 +722,15 @@
 			new_state = 1;
 			switch (phydev->speed) {
 			case 1000:
-				if (likely(priv->plat->has_gmac))
+				if (likely((priv->plat->has_gmac) ||
+					   (priv->plat->has_gmac4)))
 					ctrl &= ~priv->hw->link.port;
 				stmmac_hw_fix_mac_speed(priv);
 				break;
 			case 100:
 			case 10:
-				if (priv->plat->has_gmac) {
+				if (likely((priv->plat->has_gmac) ||
+					   (priv->plat->has_gmac4))) {
 					ctrl |= priv->hw->link.port;
 					if (phydev->speed == SPEED_100) {
 						ctrl |= priv->hw->link.speed;
@@ -772,10 +770,16 @@
 
 	spin_unlock_irqrestore(&priv->lock, flags);
 
-	/* At this stage, it could be needed to setup the EEE or adjust some
-	 * MAC related HW registers.
-	 */
-	priv->eee_enabled = stmmac_eee_init(priv);
+	if (phydev->is_pseudo_fixed_link)
+		/* Stop PHY layer to call the hook to adjust the link in case
+		 * of a switch is attached to the stmmac driver.
+		 */
+		phydev->irq = PHY_IGNORE_INTERRUPT;
+	else
+		/* At this stage, init the EEE if supported.
+		 * Never called in case of fixed_link.
+		 */
+		priv->eee_enabled = stmmac_eee_init(priv);
 }
 
 /**
@@ -827,12 +831,8 @@
 		phydev = of_phy_connect(dev, priv->plat->phy_node,
 					&stmmac_adjust_link, 0, interface);
 	} else {
-		if (priv->plat->phy_bus_name)
-			snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
-				 priv->plat->phy_bus_name, priv->plat->bus_id);
-		else
-			snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
-				 priv->plat->bus_id);
+		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
+			 priv->plat->bus_id);
 
 		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
 			 priv->plat->phy_addr);
@@ -870,11 +870,6 @@
 		return -ENODEV;
 	}
 
-	/* If attached to a switch, there is no reason to poll phy handler */
-	if (priv->plat->phy_bus_name)
-		if (!strcmp(priv->plat->phy_bus_name, "fixed"))
-			phydev->irq = PHY_IGNORE_INTERRUPT;
-
 	pr_debug("stmmac_init_phy:  %s: attached to PHY (UID 0x%x)"
 		 " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
 
@@ -883,53 +878,22 @@
 	return 0;
 }
 
-/**
- * stmmac_display_ring - display ring
- * @head: pointer to the head of the ring passed.
- * @size: size of the ring.
- * @extend_desc: to verify if extended descriptors are used.
- * Description: display the control/status and buffer descriptors.
- */
-static void stmmac_display_ring(void *head, int size, int extend_desc)
-{
-	int i;
-	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
-	struct dma_desc *p = (struct dma_desc *)head;
-
-	for (i = 0; i < size; i++) {
-		u64 x;
-		if (extend_desc) {
-			x = *(u64 *) ep;
-			pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
-				i, (unsigned int)virt_to_phys(ep),
-				(unsigned int)x, (unsigned int)(x >> 32),
-				ep->basic.des2, ep->basic.des3);
-			ep++;
-		} else {
-			x = *(u64 *) p;
-			pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
-				i, (unsigned int)virt_to_phys(p),
-				(unsigned int)x, (unsigned int)(x >> 32),
-				p->des2, p->des3);
-			p++;
-		}
-		pr_info("\n");
-	}
-}
-
 static void stmmac_display_rings(struct stmmac_priv *priv)
 {
+	void *head_rx, *head_tx;
+
 	if (priv->extend_desc) {
-		pr_info("Extended RX descriptor ring:\n");
-		stmmac_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1);
-		pr_info("Extended TX descriptor ring:\n");
-		stmmac_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1);
+		head_rx = (void *)priv->dma_erx;
+		head_tx = (void *)priv->dma_etx;
 	} else {
-		pr_info("RX descriptor ring:\n");
-		stmmac_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0);
-		pr_info("TX descriptor ring:\n");
-		stmmac_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0);
+		head_rx = (void *)priv->dma_rx;
+		head_tx = (void *)priv->dma_tx;
 	}
+
+	/* Display Rx ring */
+	priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
+	/* Display Tx ring */
+	priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
 }
 
 static int stmmac_set_bfsize(int mtu, int bufsize)
@@ -1008,7 +972,10 @@
 		return -EINVAL;
 	}
 
-	p->des2 = priv->rx_skbuff_dma[i];
+	if (priv->synopsys_id >= DWMAC_CORE_4_00)
+		p->des0 = priv->rx_skbuff_dma[i];
+	else
+		p->des2 = priv->rx_skbuff_dma[i];
 
 	if ((priv->hw->mode->init_desc3) &&
 	    (priv->dma_buf_sz == BUF_SIZE_16KiB))
@@ -1099,7 +1066,16 @@
 			p = &((priv->dma_etx + i)->basic);
 		else
 			p = priv->dma_tx + i;
-		p->des2 = 0;
+
+		if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+			p->des0 = 0;
+			p->des1 = 0;
+			p->des2 = 0;
+			p->des3 = 0;
+		} else {
+			p->des2 = 0;
+		}
+
 		priv->tx_skbuff_dma[i].buf = 0;
 		priv->tx_skbuff_dma[i].map_as_page = false;
 		priv->tx_skbuff_dma[i].len = 0;
@@ -1362,9 +1338,13 @@
 						 priv->tx_skbuff_dma[entry].len,
 						 DMA_TO_DEVICE);
 			priv->tx_skbuff_dma[entry].buf = 0;
+			priv->tx_skbuff_dma[entry].len = 0;
 			priv->tx_skbuff_dma[entry].map_as_page = false;
 		}
-		priv->hw->mode->clean_desc3(priv, p);
+
+		if (priv->hw->mode->clean_desc3)
+			priv->hw->mode->clean_desc3(priv, p);
+
 		priv->tx_skbuff_dma[entry].last_segment = false;
 		priv->tx_skbuff_dma[entry].is_jumbo = false;
 
@@ -1487,41 +1467,23 @@
 static void stmmac_mmc_setup(struct stmmac_priv *priv)
 {
 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
-	    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
+			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
 
-	dwmac_mmc_intr_all_mask(priv->ioaddr);
+	if (priv->synopsys_id >= DWMAC_CORE_4_00)
+		priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
+	else
+		priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
+
+	dwmac_mmc_intr_all_mask(priv->mmcaddr);
 
 	if (priv->dma_cap.rmon) {
-		dwmac_mmc_ctrl(priv->ioaddr, mode);
+		dwmac_mmc_ctrl(priv->mmcaddr, mode);
 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
 	} else
 		pr_info(" No MAC Management Counters available\n");
 }
 
 /**
- * stmmac_get_synopsys_id - return the SYINID.
- * @priv: driver private structure
- * Description: this simple function is to decode and return the SYINID
- * starting from the HW core register.
- */
-static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
-{
-	u32 hwid = priv->hw->synopsys_uid;
-
-	/* Check Synopsys Id (not available on old chips) */
-	if (likely(hwid)) {
-		u32 uid = ((hwid & 0x0000ff00) >> 8);
-		u32 synid = (hwid & 0x000000ff);
-
-		pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n",
-			uid, synid);
-
-		return synid;
-	}
-	return 0;
-}
-
-/**
  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
  * @priv: driver private structure
  * Description: select the Enhanced/Alternate or Normal descriptors.
@@ -1558,51 +1520,15 @@
  */
 static int stmmac_get_hw_features(struct stmmac_priv *priv)
 {
-	u32 hw_cap = 0;
+	u32 ret = 0;
 
 	if (priv->hw->dma->get_hw_feature) {
-		hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr);
-
-		priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
-		priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
-		priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
-		priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
-		priv->dma_cap.multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5;
-		priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
-		priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
-		priv->dma_cap.pmt_remote_wake_up =
-		    (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
-		priv->dma_cap.pmt_magic_frame =
-		    (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
-		/* MMC */
-		priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
-		/* IEEE 1588-2002 */
-		priv->dma_cap.time_stamp =
-		    (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
-		/* IEEE 1588-2008 */
-		priv->dma_cap.atime_stamp =
-		    (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
-		/* 802.3az - Energy-Efficient Ethernet (EEE) */
-		priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
-		priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
-		/* TX and RX csum */
-		priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
-		priv->dma_cap.rx_coe_type1 =
-		    (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
-		priv->dma_cap.rx_coe_type2 =
-		    (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
-		priv->dma_cap.rxfifo_over_2048 =
-		    (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
-		/* TX and RX number of channels */
-		priv->dma_cap.number_rx_channel =
-		    (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
-		priv->dma_cap.number_tx_channel =
-		    (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
-		/* Alternate (enhanced) DESC mode */
-		priv->dma_cap.enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
+		priv->hw->dma->get_hw_feature(priv->ioaddr,
+					      &priv->dma_cap);
+		ret = 1;
 	}
 
-	return hw_cap;
+	return ret;
 }
 
 /**
@@ -1658,8 +1584,19 @@
 	priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
 			    aal, priv->dma_tx_phy, priv->dma_rx_phy, atds);
 
-	if ((priv->synopsys_id >= DWMAC_CORE_3_50) &&
-	    (priv->plat->axi && priv->hw->dma->axi))
+	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+		priv->rx_tail_addr = priv->dma_rx_phy +
+			    (DMA_RX_SIZE * sizeof(struct dma_desc));
+		priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr,
+					       STMMAC_CHAN0);
+
+		priv->tx_tail_addr = priv->dma_tx_phy +
+			    (DMA_TX_SIZE * sizeof(struct dma_desc));
+		priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
+					       STMMAC_CHAN0);
+	}
+
+	if (priv->plat->axi && priv->hw->dma->axi)
 		priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
 
 	return ret;
@@ -1739,7 +1676,10 @@
 	}
 
 	/* Enable the MAC Rx/Tx */
-	stmmac_set_mac(priv->ioaddr, true);
+	if (priv->synopsys_id >= DWMAC_CORE_4_00)
+		stmmac_dwmac4_set_mac(priv->ioaddr, true);
+	else
+		stmmac_set_mac(priv->ioaddr, true);
 
 	/* Set the HW DMA mode and the COE */
 	stmmac_dma_operation_mode(priv);
@@ -1777,6 +1717,18 @@
 	if (priv->pcs && priv->hw->mac->ctrl_ane)
 		priv->hw->mac->ctrl_ane(priv->hw, 0);
 
+	/*  set TX ring length */
+	if (priv->hw->dma->set_tx_ring_len)
+		priv->hw->dma->set_tx_ring_len(priv->ioaddr,
+					       (DMA_TX_SIZE - 1));
+	/*  set RX ring length */
+	if (priv->hw->dma->set_rx_ring_len)
+		priv->hw->dma->set_rx_ring_len(priv->ioaddr,
+					       (DMA_RX_SIZE - 1));
+	/* Enable TSO */
+	if (priv->tso)
+		priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);
+
 	return 0;
 }
 
@@ -1942,6 +1894,239 @@
 }
 
 /**
+ *  stmmac_tso_allocator - close entry point of the driver
+ *  @priv: driver private structure
+ *  @des: buffer start address
+ *  @total_len: total length to fill in descriptors
+ *  @last_segmant: condition for the last descriptor
+ *  Description:
+ *  This function fills descriptor and request new descriptors according to
+ *  buffer length to fill
+ */
+static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
+				 int total_len, bool last_segment)
+{
+	struct dma_desc *desc;
+	int tmp_len;
+	u32 buff_size;
+
+	tmp_len = total_len;
+
+	while (tmp_len > 0) {
+		priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
+		desc = priv->dma_tx + priv->cur_tx;
+
+		desc->des0 = des + (total_len - tmp_len);
+		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
+			    TSO_MAX_BUFF_SIZE : tmp_len;
+
+		priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
+			0, 1,
+			(last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
+			0, 0);
+
+		tmp_len -= TSO_MAX_BUFF_SIZE;
+	}
+}
+
+/**
+ *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
+ *  @skb : the socket buffer
+ *  @dev : device pointer
+ *  Description: this is the transmit function that is called on TSO frames
+ *  (support available on GMAC4 and newer chips).
+ *  Diagram below show the ring programming in case of TSO frames:
+ *
+ *  First Descriptor
+ *   --------
+ *   | DES0 |---> buffer1 = L2/L3/L4 header
+ *   | DES1 |---> TCP Payload (can continue on next descr...)
+ *   | DES2 |---> buffer 1 and 2 len
+ *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
+ *   --------
+ *	|
+ *     ...
+ *	|
+ *   --------
+ *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
+ *   | DES1 | --|
+ *   | DES2 | --> buffer 1 and 2 len
+ *   | DES3 |
+ *   --------
+ *
+ * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
+ */
+static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	u32 pay_len, mss;
+	int tmp_pay_len = 0;
+	struct stmmac_priv *priv = netdev_priv(dev);
+	int nfrags = skb_shinfo(skb)->nr_frags;
+	unsigned int first_entry, des;
+	struct dma_desc *desc, *first, *mss_desc = NULL;
+	u8 proto_hdr_len;
+	int i;
+
+	spin_lock(&priv->tx_lock);
+
+	/* Compute header lengths */
+	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+	/* Desc availability based on threshold should be enough safe */
+	if (unlikely(stmmac_tx_avail(priv) <
+		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
+		if (!netif_queue_stopped(dev)) {
+			netif_stop_queue(dev);
+			/* This is a hard error, log it. */
+			pr_err("%s: Tx Ring full when queue awake\n", __func__);
+		}
+		spin_unlock(&priv->tx_lock);
+		return NETDEV_TX_BUSY;
+	}
+
+	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
+
+	mss = skb_shinfo(skb)->gso_size;
+
+	/* set new MSS value if needed */
+	if (mss != priv->mss) {
+		mss_desc = priv->dma_tx + priv->cur_tx;
+		priv->hw->desc->set_mss(mss_desc, mss);
+		priv->mss = mss;
+		priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
+	}
+
+	if (netif_msg_tx_queued(priv)) {
+		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
+			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
+		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
+			skb->data_len);
+	}
+
+	first_entry = priv->cur_tx;
+
+	desc = priv->dma_tx + first_entry;
+	first = desc;
+
+	/* first descriptor: fill Headers on Buf1 */
+	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
+			     DMA_TO_DEVICE);
+	if (dma_mapping_error(priv->device, des))
+		goto dma_map_err;
+
+	priv->tx_skbuff_dma[first_entry].buf = des;
+	priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
+	priv->tx_skbuff[first_entry] = skb;
+
+	first->des0 = des;
+
+	/* Fill start of payload in buff2 of first descriptor */
+	if (pay_len)
+		first->des1 =  des + proto_hdr_len;
+
+	/* If needed take extra descriptors to fill the remaining payload */
+	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
+
+	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0));
+
+	/* Prepare fragments */
+	for (i = 0; i < nfrags; i++) {
+		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		des = skb_frag_dma_map(priv->device, frag, 0,
+				       skb_frag_size(frag),
+				       DMA_TO_DEVICE);
+
+		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
+				     (i == nfrags - 1));
+
+		priv->tx_skbuff_dma[priv->cur_tx].buf = des;
+		priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag);
+		priv->tx_skbuff[priv->cur_tx] = NULL;
+		priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true;
+	}
+
+	priv->tx_skbuff_dma[priv->cur_tx].last_segment = true;
+
+	priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
+
+	if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
+		if (netif_msg_hw(priv))
+			pr_debug("%s: stop transmitted packets\n", __func__);
+		netif_stop_queue(dev);
+	}
+
+	dev->stats.tx_bytes += skb->len;
+	priv->xstats.tx_tso_frames++;
+	priv->xstats.tx_tso_nfrags += nfrags;
+
+	/* Manage tx mitigation */
+	priv->tx_count_frames += nfrags + 1;
+	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
+		mod_timer(&priv->txtimer,
+			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
+	} else {
+		priv->tx_count_frames = 0;
+		priv->hw->desc->set_tx_ic(desc);
+		priv->xstats.tx_set_ic_bit++;
+	}
+
+	if (!priv->hwts_tx_en)
+		skb_tx_timestamp(skb);
+
+	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+		     priv->hwts_tx_en)) {
+		/* declare that device is doing timestamping */
+		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+		priv->hw->desc->enable_tx_timestamp(first);
+	}
+
+	/* Complete the first descriptor before granting the DMA */
+	priv->hw->desc->prepare_tso_tx_desc(first, 1,
+			proto_hdr_len,
+			pay_len,
+			1, priv->tx_skbuff_dma[first_entry].last_segment,
+			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
+
+	/* If context desc is used to change MSS */
+	if (mss_desc)
+		priv->hw->desc->set_tx_owner(mss_desc);
+
+	/* The own bit must be the latest setting done when prepare the
+	 * descriptor and then barrier is needed to make sure that
+	 * all is coherent before granting the DMA engine.
+	 */
+	smp_wmb();
+
+	if (netif_msg_pktdata(priv)) {
+		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
+			__func__, priv->cur_tx, priv->dirty_tx, first_entry,
+			priv->cur_tx, first, nfrags);
+
+		priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE,
+					     0);
+
+		pr_info(">>> frame to be transmitted: ");
+		print_pkt(skb->data, skb_headlen(skb));
+	}
+
+	netdev_sent_queue(dev, skb->len);
+
+	priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
+				       STMMAC_CHAN0);
+
+	spin_unlock(&priv->tx_lock);
+	return NETDEV_TX_OK;
+
+dma_map_err:
+	spin_unlock(&priv->tx_lock);
+	dev_err(priv->device, "Tx dma map failed\n");
+	dev_kfree_skb(skb);
+	priv->dev->stats.tx_dropped++;
+	return NETDEV_TX_OK;
+}
+
+/**
  *  stmmac_xmit - Tx entry point of the driver
  *  @skb : the socket buffer
  *  @dev : device pointer
@@ -1958,6 +2143,13 @@
 	unsigned int entry, first_entry;
 	struct dma_desc *desc, *first;
 	unsigned int enh_desc;
+	unsigned int des;
+
+	/* Manage oversized TCP frames for GMAC4 device */
+	if (skb_is_gso(skb) && priv->tso) {
+		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+			return stmmac_tso_xmit(skb, dev);
+	}
 
 	spin_lock(&priv->tx_lock);
 
@@ -1993,7 +2185,8 @@
 	if (enh_desc)
 		is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
 
-	if (unlikely(is_jumbo)) {
+	if (unlikely(is_jumbo) && likely(priv->synopsys_id <
+					 DWMAC_CORE_4_00)) {
 		entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
 		if (unlikely(entry < 0))
 			goto dma_map_err;
@@ -2011,13 +2204,21 @@
 		else
 			desc = priv->dma_tx + entry;
 
-		desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
-					      DMA_TO_DEVICE);
-		if (dma_mapping_error(priv->device, desc->des2))
+		des = skb_frag_dma_map(priv->device, frag, 0, len,
+				       DMA_TO_DEVICE);
+		if (dma_mapping_error(priv->device, des))
 			goto dma_map_err; /* should reuse desc w/o issues */
 
 		priv->tx_skbuff[entry] = NULL;
-		priv->tx_skbuff_dma[entry].buf = desc->des2;
+
+		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
+			desc->des0 = des;
+			priv->tx_skbuff_dma[entry].buf = desc->des0;
+		} else {
+			desc->des2 = des;
+			priv->tx_skbuff_dma[entry].buf = desc->des2;
+		}
+
 		priv->tx_skbuff_dma[entry].map_as_page = true;
 		priv->tx_skbuff_dma[entry].len = len;
 		priv->tx_skbuff_dma[entry].last_segment = last_segment;
@@ -2032,16 +2233,18 @@
 	priv->cur_tx = entry;
 
 	if (netif_msg_pktdata(priv)) {
+		void *tx_head;
+
 		pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
 			 __func__, priv->cur_tx, priv->dirty_tx, first_entry,
 			 entry, first, nfrags);
 
 		if (priv->extend_desc)
-			stmmac_display_ring((void *)priv->dma_etx,
-					    DMA_TX_SIZE, 1);
+			tx_head = (void *)priv->dma_etx;
 		else
-			stmmac_display_ring((void *)priv->dma_tx,
-					    DMA_TX_SIZE, 0);
+			tx_head = (void *)priv->dma_tx;
+
+		priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
 
 		pr_debug(">>> frame to be transmitted: ");
 		print_pkt(skb->data, skb->len);
@@ -2080,12 +2283,19 @@
 	if (likely(!is_jumbo)) {
 		bool last_segment = (nfrags == 0);
 
-		first->des2 = dma_map_single(priv->device, skb->data,
-					     nopaged_len, DMA_TO_DEVICE);
-		if (dma_mapping_error(priv->device, first->des2))
+		des = dma_map_single(priv->device, skb->data,
+				     nopaged_len, DMA_TO_DEVICE);
+		if (dma_mapping_error(priv->device, des))
 			goto dma_map_err;
 
-		priv->tx_skbuff_dma[first_entry].buf = first->des2;
+		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
+			first->des0 = des;
+			priv->tx_skbuff_dma[first_entry].buf = first->des0;
+		} else {
+			first->des2 = des;
+			priv->tx_skbuff_dma[first_entry].buf = first->des2;
+		}
+
 		priv->tx_skbuff_dma[first_entry].len = nopaged_len;
 		priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
 
@@ -2109,7 +2319,12 @@
 	}
 
 	netdev_sent_queue(dev, skb->len);
-	priv->hw->dma->enable_dma_transmission(priv->ioaddr);
+
+	if (priv->synopsys_id < DWMAC_CORE_4_00)
+		priv->hw->dma->enable_dma_transmission(priv->ioaddr);
+	else
+		priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
+					       STMMAC_CHAN0);
 
 	spin_unlock(&priv->tx_lock);
 	return NETDEV_TX_OK;
@@ -2191,9 +2406,15 @@
 				dev_kfree_skb(skb);
 				break;
 			}
-			p->des2 = priv->rx_skbuff_dma[entry];
 
-			priv->hw->mode->refill_desc3(priv, p);
+			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
+				p->des0 = priv->rx_skbuff_dma[entry];
+				p->des1 = 0;
+			} else {
+				p->des2 = priv->rx_skbuff_dma[entry];
+			}
+			if (priv->hw->mode->refill_desc3)
+				priv->hw->mode->refill_desc3(priv, p);
 
 			if (priv->rx_zeroc_thresh > 0)
 				priv->rx_zeroc_thresh--;
@@ -2201,9 +2422,13 @@
 			if (netif_msg_rx_status(priv))
 				pr_debug("\trefill entry #%d\n", entry);
 		}
-
 		wmb();
-		priv->hw->desc->set_rx_owner(p);
+
+		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
+			priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
+		else
+			priv->hw->desc->set_rx_owner(p);
+
 		wmb();
 
 		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
@@ -2226,13 +2451,15 @@
 	int coe = priv->hw->rx_csum;
 
 	if (netif_msg_rx_status(priv)) {
+		void *rx_head;
+
 		pr_debug("%s: descriptor ring:\n", __func__);
 		if (priv->extend_desc)
-			stmmac_display_ring((void *)priv->dma_erx,
-					    DMA_RX_SIZE, 1);
+			rx_head = (void *)priv->dma_erx;
 		else
-			stmmac_display_ring((void *)priv->dma_rx,
-					    DMA_RX_SIZE, 0);
+			rx_head = (void *)priv->dma_rx;
+
+		priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
 	}
 	while (count < limit) {
 		int status;
@@ -2282,11 +2509,23 @@
 		} else {
 			struct sk_buff *skb;
 			int frame_len;
+			unsigned int des;
+
+			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
+				des = p->des0;
+			else
+				des = p->des2;
 
 			frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
 
-			/*  check if frame_len fits the preallocated memory */
+			/*  If frame length is greather than skb buffer size
+			 *  (preallocated during init) then the packet is
+			 *  ignored
+			 */
 			if (frame_len > priv->dma_buf_sz) {
+				pr_err("%s: len %d larger than size (%d)\n",
+				       priv->dev->name, frame_len,
+				       priv->dma_buf_sz);
 				priv->dev->stats.rx_length_errors++;
 				break;
 			}
@@ -2299,14 +2538,19 @@
 
 			if (netif_msg_rx_status(priv)) {
 				pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
-					 p, entry, p->des2);
+					p, entry, des);
 				if (frame_len > ETH_FRAME_LEN)
 					pr_debug("\tframe size %d, COE: %d\n",
 						 frame_len, status);
 			}
 
-			if (unlikely((frame_len < priv->rx_copybreak) ||
-				     stmmac_rx_threshold_count(priv))) {
+			/* The zero-copy is always used for all the sizes
+			 * in case of GMAC4 because it needs
+			 * to refill the used descriptors, always.
+			 */
+			if (unlikely(!priv->plat->has_gmac4 &&
+				     ((frame_len < priv->rx_copybreak) ||
+				     stmmac_rx_threshold_count(priv)))) {
 				skb = netdev_alloc_skb_ip_align(priv->dev,
 								frame_len);
 				if (unlikely(!skb)) {
@@ -2458,7 +2702,7 @@
 		return -EBUSY;
 	}
 
-	if (priv->plat->enh_desc)
+	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
 		max_mtu = JUMBO_LEN;
 	else
 		max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
@@ -2472,6 +2716,7 @@
 	}
 
 	dev->mtu = new_mtu;
+
 	netdev_update_features(dev);
 
 	return 0;
@@ -2496,6 +2741,14 @@
 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
 		features &= ~NETIF_F_CSUM_MASK;
 
+	/* Disable tso if asked by ethtool */
+	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
+		if (features & NETIF_F_TSO)
+			priv->tso = true;
+		else
+			priv->tso = false;
+	}
+
 	return features;
 }
 
@@ -2542,7 +2795,7 @@
 	}
 
 	/* To handle GMAC own interrupts */
-	if (priv->plat->has_gmac) {
+	if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
 		int status = priv->hw->mac->host_irq_status(priv->hw,
 							    &priv->xstats);
 		if (unlikely(status)) {
@@ -2551,6 +2804,10 @@
 				priv->tx_path_in_lpi_mode = true;
 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
 				priv->tx_path_in_lpi_mode = false;
+			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
+				priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
+							priv->rx_tail_addr,
+							STMMAC_CHAN0);
 		}
 	}
 
@@ -2623,15 +2880,14 @@
 			x = *(u64 *) ep;
 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
 				   i, (unsigned int)virt_to_phys(ep),
-				   (unsigned int)x, (unsigned int)(x >> 32),
+				   ep->basic.des0, ep->basic.des1,
 				   ep->basic.des2, ep->basic.des3);
 			ep++;
 		} else {
 			x = *(u64 *) p;
 			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
 				   i, (unsigned int)virt_to_phys(ep),
-				   (unsigned int)x, (unsigned int)(x >> 32),
-				   p->des2, p->des3);
+				   p->des0, p->des1, p->des2, p->des3);
 			p++;
 		}
 		seq_printf(seq, "\n");
@@ -2714,10 +2970,15 @@
 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
-	seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
-		   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
-	seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
-		   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
+	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
+			   (priv->dma_cap.rx_coe) ? "Y" : "N");
+	} else {
+		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
+			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
+		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
+			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
+	}
 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
@@ -2826,27 +3087,35 @@
 		priv->dev->priv_flags |= IFF_UNICAST_FLT;
 		mac = dwmac1000_setup(priv->ioaddr,
 				      priv->plat->multicast_filter_bins,
-				      priv->plat->unicast_filter_entries);
+				      priv->plat->unicast_filter_entries,
+				      &priv->synopsys_id);
+	} else if (priv->plat->has_gmac4) {
+		priv->dev->priv_flags |= IFF_UNICAST_FLT;
+		mac = dwmac4_setup(priv->ioaddr,
+				   priv->plat->multicast_filter_bins,
+				   priv->plat->unicast_filter_entries,
+				   &priv->synopsys_id);
 	} else {
-		mac = dwmac100_setup(priv->ioaddr);
+		mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
 	}
 	if (!mac)
 		return -ENOMEM;
 
 	priv->hw = mac;
 
-	/* Get and dump the chip ID */
-	priv->synopsys_id = stmmac_get_synopsys_id(priv);
-
 	/* To use the chained or ring mode */
-	if (chain_mode) {
-		priv->hw->mode = &chain_mode_ops;
-		pr_info(" Chain mode enabled\n");
-		priv->mode = STMMAC_CHAIN_MODE;
+	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+		priv->hw->mode = &dwmac4_ring_mode_ops;
 	} else {
-		priv->hw->mode = &ring_mode_ops;
-		pr_info(" Ring mode enabled\n");
-		priv->mode = STMMAC_RING_MODE;
+		if (chain_mode) {
+			priv->hw->mode = &chain_mode_ops;
+			pr_info(" Chain mode enabled\n");
+			priv->mode = STMMAC_CHAIN_MODE;
+		} else {
+			priv->hw->mode = &ring_mode_ops;
+			pr_info(" Ring mode enabled\n");
+			priv->mode = STMMAC_RING_MODE;
+		}
 	}
 
 	/* Get the HW capability (new GMAC newer than 3.50a) */
@@ -2868,6 +3137,9 @@
 		else
 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
 
+		/* In case of GMAC4 rx_coe is from HW cap register. */
+		priv->plat->rx_coe = priv->dma_cap.rx_coe;
+
 		if (priv->dma_cap.rx_coe_type2)
 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
 		else if (priv->dma_cap.rx_coe_type1)
@@ -2876,13 +3148,17 @@
 	} else
 		pr_info(" No HW DMA feature register supported");
 
-	/* To use alternate (extended) or normal descriptor structures */
-	stmmac_selec_desc_mode(priv);
+	/* To use alternate (extended), normal or GMAC4 descriptor structures */
+	if (priv->synopsys_id >= DWMAC_CORE_4_00)
+		priv->hw->desc = &dwmac4_desc_ops;
+	else
+		stmmac_selec_desc_mode(priv);
 
 	if (priv->plat->rx_coe) {
 		priv->hw->rx_csum = priv->plat->rx_coe;
-		pr_info(" RX Checksum Offload Engine supported (type %d)\n",
-			priv->plat->rx_coe);
+		pr_info(" RX Checksum Offload Engine supported\n");
+		if (priv->synopsys_id < DWMAC_CORE_4_00)
+			pr_info("\tCOE Type %d\n", priv->hw->rx_csum);
 	}
 	if (priv->plat->tx_coe)
 		pr_info(" TX Checksum insertion supported\n");
@@ -2892,6 +3168,9 @@
 		device_set_wakeup_capable(priv->device, 1);
 	}
 
+	if (priv->dma_cap.tsoen)
+		pr_info(" TSO supported\n");
+
 	return 0;
 }
 
@@ -2995,6 +3274,12 @@
 
 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 			    NETIF_F_RXCSUM;
+
+	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
+		ndev->hw_features |= NETIF_F_TSO;
+		priv->tso = true;
+		pr_info(" TSO feature enabled\n");
+	}
 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
 #ifdef STMMAC_VLAN_TAG_USED
@@ -3070,12 +3355,13 @@
 
 /**
  * stmmac_dvr_remove
- * @ndev: net device pointer
+ * @dev: device pointer
  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
  * changes the link status, releases the DMA descriptor rings.
  */
-int stmmac_dvr_remove(struct net_device *ndev)
+int stmmac_dvr_remove(struct device *dev)
 {
+	struct net_device *ndev = dev_get_drvdata(dev);
 	struct stmmac_priv *priv = netdev_priv(ndev);
 
 	pr_info("%s:\n\tremoving driver", __func__);
@@ -3101,13 +3387,14 @@
 
 /**
  * stmmac_suspend - suspend callback
- * @ndev: net device pointer
+ * @dev: device pointer
  * Description: this is the function to suspend the device and it is called
  * by the platform driver to stop the network queue, release the resources,
  * program the PMT register (for WoL), clean and release driver resources.
  */
-int stmmac_suspend(struct net_device *ndev)
+int stmmac_suspend(struct device *dev)
 {
+	struct net_device *ndev = dev_get_drvdata(dev);
 	struct stmmac_priv *priv = netdev_priv(ndev);
 	unsigned long flags;
 
@@ -3150,12 +3437,13 @@
 
 /**
  * stmmac_resume - resume callback
- * @ndev: net device pointer
+ * @dev: device pointer
  * Description: when resume this function is invoked to setup the DMA and CORE
  * in a usable state.
  */
-int stmmac_resume(struct net_device *ndev)
+int stmmac_resume(struct device *dev)
 {
+	struct net_device *ndev = dev_get_drvdata(dev);
 	struct stmmac_priv *priv = netdev_priv(ndev);
 	unsigned long flags;
 
@@ -3189,6 +3477,11 @@
 	priv->dirty_rx = 0;
 	priv->dirty_tx = 0;
 	priv->cur_tx = 0;
+	/* reset private mss value to force mss context settings at
+	 * next tso xmit (only used for gmac4).
+	 */
+	priv->mss = 0;
+
 	stmmac_clear_descriptors(priv);
 
 	stmmac_hw_setup(ndev, false);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index ea76129..3f83c36 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -37,6 +37,18 @@
 #define MII_BUSY 0x00000001
 #define MII_WRITE 0x00000002
 
+/* GMAC4 defines */
+#define MII_GMAC4_GOC_SHIFT		2
+#define MII_GMAC4_WRITE			(1 << MII_GMAC4_GOC_SHIFT)
+#define MII_GMAC4_READ			(3 << MII_GMAC4_GOC_SHIFT)
+
+#define MII_PHY_ADDR_GMAC4_SHIFT	21
+#define MII_PHY_ADDR_GMAC4_MASK		GENMASK(25, 21)
+#define MII_PHY_REG_GMAC4_SHIFT		16
+#define MII_PHY_REG_GMAC4_MASK		GENMASK(20, 16)
+#define MII_CSR_CLK_GMAC4_SHIFT		8
+#define MII_CSR_CLK_GMAC4_MASK		GENMASK(11, 8)
+
 static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr)
 {
 	unsigned long curr;
@@ -124,6 +136,80 @@
 }
 
 /**
+ * stmmac_mdio_read_gmac4
+ * @bus: points to the mii_bus structure
+ * @phyaddr: MII addr reg bits 25-21
+ * @phyreg: MII addr reg bits 20-16
+ * Description: it reads data from the MII register of GMAC4 from within
+ * the phy device.
+ */
+static int stmmac_mdio_read_gmac4(struct mii_bus *bus, int phyaddr, int phyreg)
+{
+	struct net_device *ndev = bus->priv;
+	struct stmmac_priv *priv = netdev_priv(ndev);
+	unsigned int mii_address = priv->hw->mii.addr;
+	unsigned int mii_data = priv->hw->mii.data;
+	int data;
+	u32 value = (((phyaddr << MII_PHY_ADDR_GMAC4_SHIFT) &
+		     (MII_PHY_ADDR_GMAC4_MASK)) |
+		     ((phyreg << MII_PHY_REG_GMAC4_SHIFT) &
+		     (MII_PHY_REG_GMAC4_MASK))) | MII_GMAC4_READ;
+
+	value |= MII_BUSY | ((priv->clk_csr & MII_CSR_CLK_GMAC4_MASK)
+		 << MII_CSR_CLK_GMAC4_SHIFT);
+
+	if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+		return -EBUSY;
+
+	writel(value, priv->ioaddr + mii_address);
+
+	if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+		return -EBUSY;
+
+	/* Read the data from the MII data register */
+	data = (int)readl(priv->ioaddr + mii_data);
+
+	return data;
+}
+
+/**
+ * stmmac_mdio_write_gmac4
+ * @bus: points to the mii_bus structure
+ * @phyaddr: MII addr reg bits 25-21
+ * @phyreg: MII addr reg bits 20-16
+ * @phydata: phy data
+ * Description: it writes the data into the MII register of GMAC4 from within
+ * the device.
+ */
+static int stmmac_mdio_write_gmac4(struct mii_bus *bus, int phyaddr, int phyreg,
+				   u16 phydata)
+{
+	struct net_device *ndev = bus->priv;
+	struct stmmac_priv *priv = netdev_priv(ndev);
+	unsigned int mii_address = priv->hw->mii.addr;
+	unsigned int mii_data = priv->hw->mii.data;
+
+	u32 value = (((phyaddr << MII_PHY_ADDR_GMAC4_SHIFT) &
+		     (MII_PHY_ADDR_GMAC4_MASK)) |
+		     ((phyreg << MII_PHY_REG_GMAC4_SHIFT) &
+		     (MII_PHY_REG_GMAC4_MASK))) | MII_GMAC4_WRITE;
+
+	value |= MII_BUSY | ((priv->clk_csr & MII_CSR_CLK_GMAC4_MASK)
+		 << MII_CSR_CLK_GMAC4_SHIFT);
+
+	/* Wait until any existing MII operation is complete */
+	if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+		return -EBUSY;
+
+	/* Set the MII address register to write */
+	writel(phydata, priv->ioaddr + mii_data);
+	writel(value, priv->ioaddr + mii_address);
+
+	/* Wait until any existing MII operation is complete */
+	return stmmac_mdio_busy_wait(priv->ioaddr, mii_address);
+}
+
+/**
  * stmmac_mdio_reset
  * @bus: points to the mii_bus structure
  * Description: reset the MII bus
@@ -180,9 +266,11 @@
 
 	/* This is a workaround for problems with the STE101P PHY.
 	 * It doesn't complete its reset until at least one clock cycle
-	 * on MDC, so perform a dummy mdio read.
+	 * on MDC, so perform a dummy mdio read. To be upadted for GMAC4
+	 * if needed.
 	 */
-	writel(0, priv->ioaddr + mii_address);
+	if (!priv->plat->has_gmac4)
+		writel(0, priv->ioaddr + mii_address);
 #endif
 	return 0;
 }
@@ -198,20 +286,12 @@
 	struct mii_bus *new_bus;
 	struct stmmac_priv *priv = netdev_priv(ndev);
 	struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
-	int addr, found;
 	struct device_node *mdio_node = priv->plat->mdio_node;
+	int addr, found;
 
 	if (!mdio_bus_data)
 		return 0;
 
-	if (IS_ENABLED(CONFIG_OF)) {
-		if (mdio_node) {
-			netdev_dbg(ndev, "FOUND MDIO subnode\n");
-		} else {
-			netdev_warn(ndev, "No MDIO subnode found\n");
-		}
-	}
-
 	new_bus = mdiobus_alloc();
 	if (new_bus == NULL)
 		return -ENOMEM;
@@ -225,8 +305,14 @@
 #endif
 
 	new_bus->name = "stmmac";
-	new_bus->read = &stmmac_mdio_read;
-	new_bus->write = &stmmac_mdio_write;
+	if (priv->plat->has_gmac4) {
+		new_bus->read = &stmmac_mdio_read_gmac4;
+		new_bus->write = &stmmac_mdio_write_gmac4;
+	} else {
+		new_bus->read = &stmmac_mdio_read;
+		new_bus->write = &stmmac_mdio_write;
+	}
+
 	new_bus->reset = &stmmac_mdio_reset;
 	snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
 		 new_bus->name, priv->plat->bus_id);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index ae43887..56c8a23 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -231,30 +231,10 @@
  */
 static void stmmac_pci_remove(struct pci_dev *pdev)
 {
-	struct net_device *ndev = pci_get_drvdata(pdev);
-
-	stmmac_dvr_remove(ndev);
+	stmmac_dvr_remove(&pdev->dev);
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int stmmac_pci_suspend(struct device *dev)
-{
-	struct pci_dev *pdev = to_pci_dev(dev);
-	struct net_device *ndev = pci_get_drvdata(pdev);
-
-	return stmmac_suspend(ndev);
-}
-
-static int stmmac_pci_resume(struct device *dev)
-{
-	struct pci_dev *pdev = to_pci_dev(dev);
-	struct net_device *ndev = pci_get_drvdata(pdev);
-
-	return stmmac_resume(ndev);
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
+static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_suspend, stmmac_resume);
 
 #define STMMAC_VENDOR_ID 0x700
 #define STMMAC_QUARK_ID  0x0937
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index dcbd2a1..409db91 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -132,6 +132,69 @@
 }
 
 /**
+ * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
+ * @plat: driver data platform structure
+ * @np: device tree node
+ * @dev: device pointer
+ * Description:
+ * The mdio bus will be allocated in case of a phy transceiver is on board;
+ * it will be NULL if the fixed-link is configured.
+ * If there is the "snps,dwmac-mdio" sub-node the mdio will be allocated
+ * in any case (for DSA, mdio must be registered even if fixed-link).
+ * The table below sums the supported configurations:
+ *	-------------------------------
+ *	snps,phy-addr	|     Y
+ *	-------------------------------
+ *	phy-handle	|     Y
+ *	-------------------------------
+ *	fixed-link	|     N
+ *	-------------------------------
+ *	snps,dwmac-mdio	|
+ *	  even if	|     Y
+ *	fixed-link	|
+ *	-------------------------------
+ *
+ * It returns 0 in case of success otherwise -ENODEV.
+ */
+static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
+			 struct device_node *np, struct device *dev)
+{
+	bool mdio = true;
+
+	/* If phy-handle property is passed from DT, use it as the PHY */
+	plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
+	if (plat->phy_node)
+		dev_dbg(dev, "Found phy-handle subnode\n");
+
+	/* If phy-handle is not specified, check if we have a fixed-phy */
+	if (!plat->phy_node && of_phy_is_fixed_link(np)) {
+		if ((of_phy_register_fixed_link(np) < 0))
+			return -ENODEV;
+
+		dev_dbg(dev, "Found fixed-link subnode\n");
+		plat->phy_node = of_node_get(np);
+		mdio = false;
+	}
+
+	/* If snps,dwmac-mdio is passed from DT, always register the MDIO */
+	for_each_child_of_node(np, plat->mdio_node) {
+		if (of_device_is_compatible(plat->mdio_node, "snps,dwmac-mdio"))
+			break;
+	}
+
+	if (plat->mdio_node) {
+		dev_dbg(dev, "Found MDIO subnode\n");
+		mdio = true;
+	}
+
+	if (mdio)
+		plat->mdio_bus_data =
+			devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data),
+				     GFP_KERNEL);
+	return 0;
+}
+
+/**
  * stmmac_probe_config_dt - parse device-tree driver parameters
  * @pdev: platform_device structure
  * @plat: driver data platform structure
@@ -146,7 +209,6 @@
 	struct device_node *np = pdev->dev.of_node;
 	struct plat_stmmacenet_data *plat;
 	struct stmmac_dma_cfg *dma_cfg;
-	struct device_node *child_node = NULL;
 
 	plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
 	if (!plat)
@@ -166,36 +228,15 @@
 	/* Default to phy auto-detection */
 	plat->phy_addr = -1;
 
-	/* If we find a phy-handle property, use it as the PHY */
-	plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
-
-	/* If phy-handle is not specified, check if we have a fixed-phy */
-	if (!plat->phy_node && of_phy_is_fixed_link(np)) {
-		if ((of_phy_register_fixed_link(np) < 0))
-			return ERR_PTR(-ENODEV);
-
-		plat->phy_node = of_node_get(np);
-	}
-
-	for_each_child_of_node(np, child_node)
-		if (of_device_is_compatible(child_node,	"snps,dwmac-mdio")) {
-			plat->mdio_node = child_node;
-			break;
-		}
-
 	/* "snps,phy-addr" is not a standard property. Mark it as deprecated
 	 * and warn of its use. Remove this when phy node support is added.
 	 */
 	if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
 		dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
 
-	if ((plat->phy_node && !of_phy_is_fixed_link(np)) || !plat->mdio_node)
-		plat->mdio_bus_data = NULL;
-	else
-		plat->mdio_bus_data =
-			devm_kzalloc(&pdev->dev,
-				     sizeof(struct stmmac_mdio_bus_data),
-				     GFP_KERNEL);
+	/* To Configure PHY by using all device-tree supported properties */
+	if (stmmac_dt_phy(plat, np, &pdev->dev))
+		return ERR_PTR(-ENODEV);
 
 	of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
 
@@ -243,6 +284,13 @@
 		plat->pmt = 1;
 	}
 
+	if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
+	    of_device_is_compatible(np, "snps,dwmac-4.10a")) {
+		plat->has_gmac4 = 1;
+		plat->pmt = 1;
+		plat->tso_en = of_property_read_bool(np, "snps,tso");
+	}
+
 	if (of_device_is_compatible(np, "snps,dwmac-3.610") ||
 		of_device_is_compatible(np, "snps,dwmac-3.710")) {
 		plat->enh_desc = 1;
@@ -338,7 +386,7 @@
 {
 	struct net_device *ndev = platform_get_drvdata(pdev);
 	struct stmmac_priv *priv = netdev_priv(ndev);
-	int ret = stmmac_dvr_remove(ndev);
+	int ret = stmmac_dvr_remove(&pdev->dev);
 
 	if (priv->plat->exit)
 		priv->plat->exit(pdev, priv->plat->bsp_priv);
@@ -362,7 +410,7 @@
 	struct stmmac_priv *priv = netdev_priv(ndev);
 	struct platform_device *pdev = to_platform_device(dev);
 
-	ret = stmmac_suspend(ndev);
+	ret = stmmac_suspend(dev);
 	if (priv->plat->exit)
 		priv->plat->exit(pdev, priv->plat->bsp_priv);
 
@@ -385,7 +433,7 @@
 	if (priv->plat->init)
 		priv->plat->init(pdev, priv->plat->bsp_priv);
 
-	return stmmac_resume(ndev);
+	return stmmac_resume(dev);
 }
 #endif /* CONFIG_PM_SLEEP */
 
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 9cc4564..a2371aa 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6431,7 +6431,7 @@
 
 static void niu_netif_stop(struct niu *np)
 {
-	np->dev->trans_start = jiffies;	/* prevent tx timeout */
+	netif_trans_update(np->dev);	/* prevent tx timeout */
 
 	niu_disable_napi(np);
 
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 2437227..d6ad0fb 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -226,7 +226,7 @@
 
 static inline void gem_netif_stop(struct gem *gp)
 {
-	gp->dev->trans_start = jiffies;	/* prevent tx timeout */
+	netif_trans_update(gp->dev);	/* prevent tx timeout */
 	napi_disable(&gp->napi);
 	netif_tx_disable(gp->dev);
 }
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index af11ed1..158213c 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -949,7 +949,7 @@
 
 	if (status_change) {
 		if (phydev->link) {
-			lp->ndev->trans_start = jiffies;
+			netif_trans_update(lp->ndev);
 			dwceqos_link_up(lp);
 		} else {
 			dwceqos_link_down(lp);
@@ -2203,7 +2203,7 @@
 	netdev_sent_queue(ndev, skb->len);
 	spin_unlock_bh(&lp->tx_lock);
 
-	ndev->trans_start = jiffies;
+	netif_trans_update(ndev);
 	return 0;
 
 tx_error:
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 14c9d1b..7452b5f 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1610,7 +1610,6 @@
  * o NETDEV_TX_BUSY Cannot transmit packet, try later
  *   Usually a bug, means queue start/stop flow control is broken in
  *   the driver. Note: the driver must NOT put the skb in its DMA ring.
- * o NETDEV_TX_LOCKED Locking failed, please retry quickly.
  */
 static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
 				   struct net_device *ndev)
@@ -1630,12 +1629,7 @@
 
 	ENTER;
 	local_irq_save(flags);
-	if (!spin_trylock(&priv->tx_lock)) {
-		local_irq_restore(flags);
-		DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n",
-		    BDX_DRV_NAME, ndev->name);
-		return NETDEV_TX_LOCKED;
-	}
+	spin_lock(&priv->tx_lock);
 
 	/* build tx descriptor */
 	BDX_ASSERT(f->m.wptr >= f->m.memsz);	/* started with valid wptr */
@@ -1707,7 +1701,7 @@
 
 #endif
 #ifdef BDX_LLTX
-	ndev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
+	netif_trans_update(ndev); /* NETIF_F_LLTX driver :( */
 #endif
 	ndev->stats.tx_packets++;
 	ndev->stats.tx_bytes += skb->len;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 42fdfd4..4b08a2f 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -367,7 +367,6 @@
 	spinlock_t			lock;
 	struct platform_device		*pdev;
 	struct net_device		*ndev;
-	struct device_node		*phy_node;
 	struct napi_struct		napi_rx;
 	struct napi_struct		napi_tx;
 	struct device			*dev;
@@ -381,7 +380,6 @@
 	u32				coal_intvl;
 	u32				bus_freq_mhz;
 	int				rx_packet_max;
-	int				host_port;
 	struct clk			*clk;
 	u8				mac_addr[ETH_ALEN];
 	struct cpsw_slave		*slaves;
@@ -531,21 +529,18 @@
 			int slave_port = cpsw_get_slave_port(priv,	\
 						slave->slave_num);	\
 			cpsw_ale_add_mcast(priv->ale, addr,		\
-				1 << slave_port | 1 << priv->host_port,	\
+				1 << slave_port | ALE_PORT_HOST,	\
 				ALE_VLAN, slave->port_vlan, 0);		\
 		} else {						\
 			cpsw_ale_add_mcast(priv->ale, addr,		\
-				ALE_ALL_PORTS << priv->host_port,	\
+				ALE_ALL_PORTS,				\
 				0, 0, 0);				\
 		}							\
 	} while (0)
 
 static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
 {
-	if (priv->host_port == 0)
-		return slave_num + 1;
-	else
-		return slave_num;
+	return slave_num + 1;
 }
 
 static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
@@ -602,8 +597,7 @@
 			cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
 
 			/* Clear all mcast from ALE */
-			cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS <<
-						 priv->host_port, -1);
+			cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
 
 			/* Flood All Unicast Packets to Host port */
 			cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
@@ -648,8 +642,7 @@
 	cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI);
 
 	/* Clear all mcast from ALE */
-	cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port,
-				 vid);
+	cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS, vid);
 
 	if (!netdev_mc_empty(ndev)) {
 		struct netdev_hw_addr *ha;
@@ -1092,7 +1085,7 @@
 		struct cpsw_priv *priv, struct cpsw_slave *slave,
 		u32 slave_port)
 {
-	u32 port_mask = 1 << slave_port | 1 << priv->host_port;
+	u32 port_mask = 1 << slave_port | ALE_PORT_HOST;
 
 	if (priv->version == CPSW_VERSION_1)
 		slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
@@ -1103,7 +1096,7 @@
 	cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
 			   port_mask, ALE_VLAN, slave->port_vlan, 0);
 	cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
-		priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan);
+		HOST_PORT_NUM, ALE_VLAN | ALE_SECURE, slave->port_vlan);
 }
 
 static void soft_reset_slave(struct cpsw_slave *slave)
@@ -1148,31 +1141,39 @@
 		cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
 				   1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
 
-	if (priv->phy_node)
-		slave->phy = of_phy_connect(priv->ndev, priv->phy_node,
+	if (slave->data->phy_node) {
+		slave->phy = of_phy_connect(priv->ndev, slave->data->phy_node,
 				 &cpsw_adjust_link, 0, slave->data->phy_if);
-	else
+		if (!slave->phy) {
+			dev_err(priv->dev, "phy \"%s\" not found on slave %d\n",
+				slave->data->phy_node->full_name,
+				slave->slave_num);
+			return;
+		}
+	} else {
 		slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
 				 &cpsw_adjust_link, slave->data->phy_if);
-	if (IS_ERR(slave->phy)) {
-		dev_err(priv->dev, "phy %s not found on slave %d\n",
-			slave->data->phy_id, slave->slave_num);
-		slave->phy = NULL;
-	} else {
-		phy_attached_info(slave->phy);
-
-		phy_start(slave->phy);
-
-		/* Configure GMII_SEL register */
-		cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface,
-			     slave->slave_num);
+		if (IS_ERR(slave->phy)) {
+			dev_err(priv->dev,
+				"phy \"%s\" not found on slave %d, err %ld\n",
+				slave->data->phy_id, slave->slave_num,
+				PTR_ERR(slave->phy));
+			slave->phy = NULL;
+			return;
+		}
 	}
+
+	phy_attached_info(slave->phy);
+
+	phy_start(slave->phy);
+
+	/* Configure GMII_SEL register */
+	cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface, slave->slave_num);
 }
 
 static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
 {
 	const int vlan = priv->data.default_vlan;
-	const int port = priv->host_port;
 	u32 reg;
 	int i;
 	int unreg_mcast_mask;
@@ -1190,9 +1191,9 @@
 	else
 		unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
 
-	cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port,
-			  ALE_ALL_PORTS << port, ALE_ALL_PORTS << port,
-			  unreg_mcast_mask << port);
+	cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS,
+			  ALE_ALL_PORTS, ALE_ALL_PORTS,
+			  unreg_mcast_mask);
 }
 
 static void cpsw_init_host_port(struct cpsw_priv *priv)
@@ -1205,7 +1206,7 @@
 	cpsw_ale_start(priv->ale);
 
 	/* switch to vlan unaware mode */
-	cpsw_ale_control_set(priv->ale, priv->host_port, ALE_VLAN_AWARE,
+	cpsw_ale_control_set(priv->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
 			     CPSW_ALE_VLAN_AWARE);
 	control_reg = readl(&priv->regs->control);
 	control_reg |= CPSW_VLAN_AWARE;
@@ -1219,14 +1220,14 @@
 		     &priv->host_port_regs->cpdma_tx_pri_map);
 	__raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
 
-	cpsw_ale_control_set(priv->ale, priv->host_port,
+	cpsw_ale_control_set(priv->ale, HOST_PORT_NUM,
 			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
 
 	if (!priv->data.dual_emac) {
-		cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port,
+		cpsw_ale_add_ucast(priv->ale, priv->mac_addr, HOST_PORT_NUM,
 				   0, 0);
 		cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
-				   1 << priv->host_port, 0, 0, ALE_MCAST_FWD_2);
+				   ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2);
 	}
 }
 
@@ -1251,12 +1252,12 @@
 	int i, ret;
 	u32 reg;
 
+	pm_runtime_get_sync(&priv->pdev->dev);
+
 	if (!cpsw_common_res_usage_state(priv))
 		cpsw_intr_disable(priv);
 	netif_carrier_off(ndev);
 
-	pm_runtime_get_sync(&priv->pdev->dev);
-
 	reg = priv->version;
 
 	dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
@@ -1273,8 +1274,7 @@
 		cpsw_add_default_vlan(priv);
 	else
 		cpsw_ale_add_vlan(priv->ale, priv->data.default_vlan,
-				  ALE_ALL_PORTS << priv->host_port,
-				  ALE_ALL_PORTS << priv->host_port, 0, 0);
+				  ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
 
 	if (!cpsw_common_res_usage_state(priv)) {
 		struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0);
@@ -1389,7 +1389,7 @@
 	struct cpsw_priv *priv = netdev_priv(ndev);
 	int ret;
 
-	ndev->trans_start = jiffies;
+	netif_trans_update(ndev);
 
 	if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
 		cpsw_err(priv, tx_err, "packet pad failed\n");
@@ -1620,9 +1620,9 @@
 		flags = ALE_VLAN;
 	}
 
-	cpsw_ale_del_ucast(priv->ale, priv->mac_addr, priv->host_port,
+	cpsw_ale_del_ucast(priv->ale, priv->mac_addr, HOST_PORT_NUM,
 			   flags, vid);
-	cpsw_ale_add_ucast(priv->ale, addr->sa_data, priv->host_port,
+	cpsw_ale_add_ucast(priv->ale, addr->sa_data, HOST_PORT_NUM,
 			   flags, vid);
 
 	memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
@@ -1666,12 +1666,12 @@
 	}
 
 	ret = cpsw_ale_add_vlan(priv->ale, vid, port_mask, 0, port_mask,
-				unreg_mcast_mask << priv->host_port);
+				unreg_mcast_mask);
 	if (ret != 0)
 		return ret;
 
 	ret = cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
-				 priv->host_port, ALE_VLAN, vid);
+				 HOST_PORT_NUM, ALE_VLAN, vid);
 	if (ret != 0)
 		goto clean_vid;
 
@@ -1683,7 +1683,7 @@
 
 clean_vlan_ucast:
 	cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
-			    priv->host_port, ALE_VLAN, vid);
+			   HOST_PORT_NUM, ALE_VLAN, vid);
 clean_vid:
 	cpsw_ale_del_vlan(priv->ale, vid, 0);
 	return ret;
@@ -1738,7 +1738,7 @@
 		return ret;
 
 	ret = cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
-				 priv->host_port, ALE_VLAN, vid);
+				 HOST_PORT_NUM, ALE_VLAN, vid);
 	if (ret != 0)
 		return ret;
 
@@ -1940,12 +1940,11 @@
 	slave->port_vlan = data->dual_emac_res_vlan;
 }
 
-static int cpsw_probe_dt(struct cpsw_priv *priv,
+static int cpsw_probe_dt(struct cpsw_platform_data *data,
 			 struct platform_device *pdev)
 {
 	struct device_node *node = pdev->dev.of_node;
 	struct device_node *slave_node;
-	struct cpsw_platform_data *data = &priv->data;
 	int i = 0, ret;
 	u32 prop;
 
@@ -2033,25 +2032,21 @@
 		if (strcmp(slave_node->name, "slave"))
 			continue;
 
-		priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0);
+		slave_data->phy_node = of_parse_phandle(slave_node,
+							"phy-handle", 0);
 		parp = of_get_property(slave_node, "phy_id", &lenp);
-		if (of_phy_is_fixed_link(slave_node)) {
-			struct device_node *phy_node;
-			struct phy_device *phy_dev;
-
+		if (slave_data->phy_node) {
+			dev_dbg(&pdev->dev,
+				"slave[%d] using phy-handle=\"%s\"\n",
+				i, slave_data->phy_node->full_name);
+		} else if (of_phy_is_fixed_link(slave_node)) {
 			/* In the case of a fixed PHY, the DT node associated
 			 * to the PHY is the Ethernet MAC DT node.
 			 */
 			ret = of_phy_register_fixed_link(slave_node);
 			if (ret)
 				return ret;
-			phy_node = of_node_get(slave_node);
-			phy_dev = of_phy_find_device(phy_node);
-			if (!phy_dev)
-				return -ENODEV;
-			snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
-				 PHY_ID_FMT, phy_dev->mdio.bus->id,
-				 phy_dev->mdio.addr);
+			slave_data->phy_node = of_node_get(slave_node);
 		} else if (parp) {
 			u32 phyid;
 			struct device_node *mdio_node;
@@ -2072,7 +2067,9 @@
 			snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
 				 PHY_ID_FMT, mdio->name, phyid);
 		} else {
-			dev_err(&pdev->dev, "No slave[%d] phy_id or fixed-link property\n", i);
+			dev_err(&pdev->dev,
+				"No slave[%d] phy_id, phy-handle, or fixed-link property\n",
+				i);
 			goto no_phy_slave;
 		}
 		slave_data->phy_if = of_get_phy_mode(slave_node);
@@ -2152,7 +2149,6 @@
 	priv_sl2->bus_freq_mhz = priv->bus_freq_mhz;
 
 	priv_sl2->regs = priv->regs;
-	priv_sl2->host_port = priv->host_port;
 	priv_sl2->host_port_regs = priv->host_port_regs;
 	priv_sl2->wr_regs = priv->wr_regs;
 	priv_sl2->hw_stats = priv->hw_stats;
@@ -2275,7 +2271,7 @@
 	/* Select default pin state */
 	pinctrl_pm_select_default_state(&pdev->dev);
 
-	if (cpsw_probe_dt(priv, pdev)) {
+	if (cpsw_probe_dt(&priv->data, pdev)) {
 		dev_err(&pdev->dev, "cpsw: platform data missing\n");
 		ret = -ENODEV;
 		goto clean_runtime_disable_ret;
@@ -2321,7 +2317,6 @@
 		goto clean_runtime_disable_ret;
 	}
 	priv->regs = ss_regs;
-	priv->host_port = HOST_PORT_NUM;
 
 	/* Need to enable clocks with runtime PM api to access module
 	 * registers
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index 442a703..e50afd1 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -18,6 +18,7 @@
 #include <linux/phy.h>
 
 struct cpsw_slave_data {
+	struct device_node *phy_node;
 	char		phy_id[MII_BUS_ID_SIZE];
 	int		phy_if;
 	u8		mac_addr[ETH_ALEN];
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 5d9abed..f56d66e 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1512,7 +1512,10 @@
 
 	/* TODO: Add phy read and write and private statistics get feature */
 
-	return phy_mii_ioctl(priv->phydev, ifrq, cmd);
+	if (priv->phydev)
+		return phy_mii_ioctl(priv->phydev, ifrq, cmd);
+	else
+		return -EOPNOTSUPP;
 }
 
 static int match_first_device(struct device *dev, void *data)
@@ -1878,8 +1881,6 @@
 		pdata->hw_ram_addr = auxdata->hw_ram_addr;
 	}
 
-	pdev->dev.platform_data = pdata;
-
 	return  pdata;
 }
 
@@ -2101,6 +2102,7 @@
 	cpdma_ctlr_destroy(priv->dma);
 
 	unregister_netdev(ndev);
+	pm_runtime_disable(&pdev->dev);
 	free_netdev(ndev);
 
 	return 0;
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 1d0942c..3251666 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1272,7 +1272,7 @@
 	if (ret)
 		goto drop;
 
-	ndev->trans_start = jiffies;
+	netif_trans_update(ndev);
 
 	/* Check Tx pool count & stop subqueue if needed */
 	desc_count = knav_pool_count(netcp->tx_pool);
@@ -1788,7 +1788,7 @@
 
 	dev_err(netcp->ndev_dev, "transmit timed out tx descs(%d)\n", descs);
 	netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
-	ndev->trans_start = jiffies;
+	netif_trans_update(ndev);
 	netif_tx_wake_all_queues(ndev);
 }
 
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index a274cd4..5617033 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -1007,7 +1007,7 @@
 	tlan_reset_lists(dev);
 	tlan_read_and_clear_stats(dev, TLAN_IGNORE);
 	tlan_reset_adapter(dev);
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	netif_wake_queue(dev);
 
 }
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 298e059..922a443 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -1883,7 +1883,7 @@
 
 
 	/* Save the timestamp. */
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 
 
 #ifdef TILE_NET_PARANOIA
@@ -2026,7 +2026,7 @@
 {
 	PDEBUG("tile_net_tx_timeout()\n");
 	PDEBUG("Transmit timeout at %ld, latency %ld\n", jiffies,
-	       jiffies - dev->trans_start);
+	       jiffies - dev_trans_start(dev));
 
 	/* XXX: ISSUE: This doesn't seem useful for us. */
 	netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index 13214a6..743b182 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -1622,7 +1622,7 @@
 			continue;
 
 		/* copy hw scan info */
-		memcpy(target->hwinfo, scan_info, scan_info->size);
+		memcpy(target->hwinfo, scan_info, be16_to_cpu(scan_info->size));
 		target->essid_len = strnlen(scan_info->essid,
 					    sizeof(scan_info->essid));
 		target->rate_len = 0;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 67610270..36a6e8b 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -705,7 +705,7 @@
 	wmb();
 	descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
 
-	card->netdev->trans_start = jiffies; /* set netdev watchdog timer */
+	netif_trans_update(card->netdev); /* set netdev watchdog timer */
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 520cf50..01a7714 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1314,7 +1314,8 @@
 	data->txring = dma_zalloc_coherent(NULL, txring_size, &data->txdma,
 					   GFP_KERNEL);
 	if (!data->txring) {
-		pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
+		pci_free_consistent(NULL, rxring_size, data->rxring,
+				    data->rxdma);
 		return -ENOMEM;
 	}
 
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 2b7550c..9d14731 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1758,7 +1758,7 @@
 
 	spin_unlock_bh(&rp->lock);
 
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	dev->stats.tx_errors++;
 	netif_wake_queue(dev);
 
diff --git a/drivers/net/ethernet/wiznet/Kconfig b/drivers/net/ethernet/wiznet/Kconfig
index f98b91d..1981e88c 100644
--- a/drivers/net/ethernet/wiznet/Kconfig
+++ b/drivers/net/ethernet/wiznet/Kconfig
@@ -69,4 +69,18 @@
 	  Performance may decrease compared to explicitly selected bus mode.
 endchoice
 
+config WIZNET_W5100_SPI
+	tristate "WIZnet W5100/W5200/W5500 Ethernet support for SPI mode"
+	depends on WIZNET_BUS_ANY && WIZNET_W5100
+	depends on SPI
+	---help---
+	  In SPI mode host system accesses registers using SPI protocol
+	  (mode 0) on the SPI bus.
+
+	  Performance decreases compared to other bus interface mode.
+	  In W5100 SPI mode, burst READ/WRITE processing are not provided.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called w5100-spi.
+
 endif # NET_VENDOR_WIZNET
diff --git a/drivers/net/ethernet/wiznet/Makefile b/drivers/net/ethernet/wiznet/Makefile
index c614535..1e05e1a 100644
--- a/drivers/net/ethernet/wiznet/Makefile
+++ b/drivers/net/ethernet/wiznet/Makefile
@@ -1,2 +1,3 @@
 obj-$(CONFIG_WIZNET_W5100) += w5100.o
+obj-$(CONFIG_WIZNET_W5100_SPI) += w5100-spi.o
 obj-$(CONFIG_WIZNET_W5300) += w5300.o
diff --git a/drivers/net/ethernet/wiznet/w5100-spi.c b/drivers/net/ethernet/wiznet/w5100-spi.c
new file mode 100644
index 0000000..93a2d3c
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/w5100-spi.c
@@ -0,0 +1,466 @@
+/*
+ * Ethernet driver for the WIZnet W5100/W5200/W5500 chip.
+ *
+ * Copyright (C) 2016 Akinobu Mita <akinobu.mita@gmail.com>
+ *
+ * Licensed under the GPL-2 or later.
+ *
+ * Datasheet:
+ * http://www.wiznet.co.kr/wp-content/uploads/wiznethome/Chip/W5100/Document/W5100_Datasheet_v1.2.6.pdf
+ * http://wiznethome.cafe24.com/wp-content/uploads/wiznethome/Chip/W5200/Documents/W5200_DS_V140E.pdf
+ * http://wizwiki.net/wiki/lib/exe/fetch.php?media=products:w5500:w5500_ds_v106e_141230.pdf
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/of_net.h>
+#include <linux/spi/spi.h>
+
+#include "w5100.h"
+
+#define W5100_SPI_WRITE_OPCODE 0xf0
+#define W5100_SPI_READ_OPCODE 0x0f
+
+static int w5100_spi_read(struct net_device *ndev, u32 addr)
+{
+	struct spi_device *spi = to_spi_device(ndev->dev.parent);
+	u8 cmd[3] = { W5100_SPI_READ_OPCODE, addr >> 8, addr & 0xff };
+	u8 data;
+	int ret;
+
+	ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, 1);
+
+	return ret ? ret : data;
+}
+
+static int w5100_spi_write(struct net_device *ndev, u32 addr, u8 data)
+{
+	struct spi_device *spi = to_spi_device(ndev->dev.parent);
+	u8 cmd[4] = { W5100_SPI_WRITE_OPCODE, addr >> 8, addr & 0xff, data};
+
+	return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0);
+}
+
+static int w5100_spi_read16(struct net_device *ndev, u32 addr)
+{
+	u16 data;
+	int ret;
+
+	ret = w5100_spi_read(ndev, addr);
+	if (ret < 0)
+		return ret;
+	data = ret << 8;
+	ret = w5100_spi_read(ndev, addr + 1);
+
+	return ret < 0 ? ret : data | ret;
+}
+
+static int w5100_spi_write16(struct net_device *ndev, u32 addr, u16 data)
+{
+	int ret;
+
+	ret = w5100_spi_write(ndev, addr, data >> 8);
+	if (ret)
+		return ret;
+
+	return w5100_spi_write(ndev, addr + 1, data & 0xff);
+}
+
+static int w5100_spi_readbulk(struct net_device *ndev, u32 addr, u8 *buf,
+			      int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++) {
+		int ret = w5100_spi_read(ndev, addr + i);
+
+		if (ret < 0)
+			return ret;
+		buf[i] = ret;
+	}
+
+	return 0;
+}
+
+static int w5100_spi_writebulk(struct net_device *ndev, u32 addr, const u8 *buf,
+			       int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++) {
+		int ret = w5100_spi_write(ndev, addr + i, buf[i]);
+
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static const struct w5100_ops w5100_spi_ops = {
+	.may_sleep = true,
+	.chip_id = W5100,
+	.read = w5100_spi_read,
+	.write = w5100_spi_write,
+	.read16 = w5100_spi_read16,
+	.write16 = w5100_spi_write16,
+	.readbulk = w5100_spi_readbulk,
+	.writebulk = w5100_spi_writebulk,
+};
+
+#define W5200_SPI_WRITE_OPCODE 0x80
+
+struct w5200_spi_priv {
+	/* Serialize access to cmd_buf */
+	struct mutex cmd_lock;
+
+	/* DMA (thus cache coherency maintenance) requires the
+	 * transfer buffers to live in their own cache lines.
+	 */
+	u8 cmd_buf[4] ____cacheline_aligned;
+};
+
+static struct w5200_spi_priv *w5200_spi_priv(struct net_device *ndev)
+{
+	return w5100_ops_priv(ndev);
+}
+
+static int w5200_spi_init(struct net_device *ndev)
+{
+	struct w5200_spi_priv *spi_priv = w5200_spi_priv(ndev);
+
+	mutex_init(&spi_priv->cmd_lock);
+
+	return 0;
+}
+
+static int w5200_spi_read(struct net_device *ndev, u32 addr)
+{
+	struct spi_device *spi = to_spi_device(ndev->dev.parent);
+	u8 cmd[4] = { addr >> 8, addr & 0xff, 0, 1 };
+	u8 data;
+	int ret;
+
+	ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, 1);
+
+	return ret ? ret : data;
+}
+
+static int w5200_spi_write(struct net_device *ndev, u32 addr, u8 data)
+{
+	struct spi_device *spi = to_spi_device(ndev->dev.parent);
+	u8 cmd[5] = { addr >> 8, addr & 0xff, W5200_SPI_WRITE_OPCODE, 1, data };
+
+	return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0);
+}
+
+static int w5200_spi_read16(struct net_device *ndev, u32 addr)
+{
+	struct spi_device *spi = to_spi_device(ndev->dev.parent);
+	u8 cmd[4] = { addr >> 8, addr & 0xff, 0, 2 };
+	__be16 data;
+	int ret;
+
+	ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, sizeof(data));
+
+	return ret ? ret : be16_to_cpu(data);
+}
+
+static int w5200_spi_write16(struct net_device *ndev, u32 addr, u16 data)
+{
+	struct spi_device *spi = to_spi_device(ndev->dev.parent);
+	u8 cmd[6] = {
+		addr >> 8, addr & 0xff,
+		W5200_SPI_WRITE_OPCODE, 2,
+		data >> 8, data & 0xff
+	};
+
+	return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0);
+}
+
+static int w5200_spi_readbulk(struct net_device *ndev, u32 addr, u8 *buf,
+			      int len)
+{
+	struct spi_device *spi = to_spi_device(ndev->dev.parent);
+	struct w5200_spi_priv *spi_priv = w5200_spi_priv(ndev);
+	struct spi_transfer xfer[] = {
+		{
+			.tx_buf = spi_priv->cmd_buf,
+			.len = sizeof(spi_priv->cmd_buf),
+		},
+		{
+			.rx_buf = buf,
+			.len = len,
+		},
+	};
+	int ret;
+
+	mutex_lock(&spi_priv->cmd_lock);
+
+	spi_priv->cmd_buf[0] = addr >> 8;
+	spi_priv->cmd_buf[1] = addr;
+	spi_priv->cmd_buf[2] = len >> 8;
+	spi_priv->cmd_buf[3] = len;
+	ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+
+	mutex_unlock(&spi_priv->cmd_lock);
+
+	return ret;
+}
+
+static int w5200_spi_writebulk(struct net_device *ndev, u32 addr, const u8 *buf,
+			       int len)
+{
+	struct spi_device *spi = to_spi_device(ndev->dev.parent);
+	struct w5200_spi_priv *spi_priv = w5200_spi_priv(ndev);
+	struct spi_transfer xfer[] = {
+		{
+			.tx_buf = spi_priv->cmd_buf,
+			.len = sizeof(spi_priv->cmd_buf),
+		},
+		{
+			.tx_buf = buf,
+			.len = len,
+		},
+	};
+	int ret;
+
+	mutex_lock(&spi_priv->cmd_lock);
+
+	spi_priv->cmd_buf[0] = addr >> 8;
+	spi_priv->cmd_buf[1] = addr;
+	spi_priv->cmd_buf[2] = W5200_SPI_WRITE_OPCODE | (len >> 8);
+	spi_priv->cmd_buf[3] = len;
+	ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+
+	mutex_unlock(&spi_priv->cmd_lock);
+
+	return ret;
+}
+
+static const struct w5100_ops w5200_ops = {
+	.may_sleep = true,
+	.chip_id = W5200,
+	.read = w5200_spi_read,
+	.write = w5200_spi_write,
+	.read16 = w5200_spi_read16,
+	.write16 = w5200_spi_write16,
+	.readbulk = w5200_spi_readbulk,
+	.writebulk = w5200_spi_writebulk,
+	.init = w5200_spi_init,
+};
+
+#define W5500_SPI_BLOCK_SELECT(addr) (((addr) >> 16) & 0x1f)
+#define W5500_SPI_READ_CONTROL(addr) (W5500_SPI_BLOCK_SELECT(addr) << 3)
+#define W5500_SPI_WRITE_CONTROL(addr)	\
+	((W5500_SPI_BLOCK_SELECT(addr) << 3) | BIT(2))
+
+struct w5500_spi_priv {
+	/* Serialize access to cmd_buf */
+	struct mutex cmd_lock;
+
+	/* DMA (thus cache coherency maintenance) requires the
+	 * transfer buffers to live in their own cache lines.
+	 */
+	u8 cmd_buf[3] ____cacheline_aligned;
+};
+
+static struct w5500_spi_priv *w5500_spi_priv(struct net_device *ndev)
+{
+	return w5100_ops_priv(ndev);
+}
+
+static int w5500_spi_init(struct net_device *ndev)
+{
+	struct w5500_spi_priv *spi_priv = w5500_spi_priv(ndev);
+
+	mutex_init(&spi_priv->cmd_lock);
+
+	return 0;
+}
+
+static int w5500_spi_read(struct net_device *ndev, u32 addr)
+{
+	struct spi_device *spi = to_spi_device(ndev->dev.parent);
+	u8 cmd[3] = {
+		addr >> 8,
+		addr,
+		W5500_SPI_READ_CONTROL(addr)
+	};
+	u8 data;
+	int ret;
+
+	ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, 1);
+
+	return ret ? ret : data;
+}
+
+static int w5500_spi_write(struct net_device *ndev, u32 addr, u8 data)
+{
+	struct spi_device *spi = to_spi_device(ndev->dev.parent);
+	u8 cmd[4] = {
+		addr >> 8,
+		addr,
+		W5500_SPI_WRITE_CONTROL(addr),
+		data
+	};
+
+	return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0);
+}
+
+static int w5500_spi_read16(struct net_device *ndev, u32 addr)
+{
+	struct spi_device *spi = to_spi_device(ndev->dev.parent);
+	u8 cmd[3] = {
+		addr >> 8,
+		addr,
+		W5500_SPI_READ_CONTROL(addr)
+	};
+	__be16 data;
+	int ret;
+
+	ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, sizeof(data));
+
+	return ret ? ret : be16_to_cpu(data);
+}
+
+static int w5500_spi_write16(struct net_device *ndev, u32 addr, u16 data)
+{
+	struct spi_device *spi = to_spi_device(ndev->dev.parent);
+	u8 cmd[5] = {
+		addr >> 8,
+		addr,
+		W5500_SPI_WRITE_CONTROL(addr),
+		data >> 8,
+		data
+	};
+
+	return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0);
+}
+
+static int w5500_spi_readbulk(struct net_device *ndev, u32 addr, u8 *buf,
+			      int len)
+{
+	struct spi_device *spi = to_spi_device(ndev->dev.parent);
+	struct w5500_spi_priv *spi_priv = w5500_spi_priv(ndev);
+	struct spi_transfer xfer[] = {
+		{
+			.tx_buf = spi_priv->cmd_buf,
+			.len = sizeof(spi_priv->cmd_buf),
+		},
+		{
+			.rx_buf = buf,
+			.len = len,
+		},
+	};
+	int ret;
+
+	mutex_lock(&spi_priv->cmd_lock);
+
+	spi_priv->cmd_buf[0] = addr >> 8;
+	spi_priv->cmd_buf[1] = addr;
+	spi_priv->cmd_buf[2] = W5500_SPI_READ_CONTROL(addr);
+	ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+
+	mutex_unlock(&spi_priv->cmd_lock);
+
+	return ret;
+}
+
+static int w5500_spi_writebulk(struct net_device *ndev, u32 addr, const u8 *buf,
+			       int len)
+{
+	struct spi_device *spi = to_spi_device(ndev->dev.parent);
+	struct w5500_spi_priv *spi_priv = w5500_spi_priv(ndev);
+	struct spi_transfer xfer[] = {
+		{
+			.tx_buf = spi_priv->cmd_buf,
+			.len = sizeof(spi_priv->cmd_buf),
+		},
+		{
+			.tx_buf = buf,
+			.len = len,
+		},
+	};
+	int ret;
+
+	mutex_lock(&spi_priv->cmd_lock);
+
+	spi_priv->cmd_buf[0] = addr >> 8;
+	spi_priv->cmd_buf[1] = addr;
+	spi_priv->cmd_buf[2] = W5500_SPI_WRITE_CONTROL(addr);
+	ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+
+	mutex_unlock(&spi_priv->cmd_lock);
+
+	return ret;
+}
+
+static const struct w5100_ops w5500_ops = {
+	.may_sleep = true,
+	.chip_id = W5500,
+	.read = w5500_spi_read,
+	.write = w5500_spi_write,
+	.read16 = w5500_spi_read16,
+	.write16 = w5500_spi_write16,
+	.readbulk = w5500_spi_readbulk,
+	.writebulk = w5500_spi_writebulk,
+	.init = w5500_spi_init,
+};
+
+static int w5100_spi_probe(struct spi_device *spi)
+{
+	const struct spi_device_id *id = spi_get_device_id(spi);
+	const struct w5100_ops *ops;
+	int priv_size;
+	const void *mac = of_get_mac_address(spi->dev.of_node);
+
+	switch (id->driver_data) {
+	case W5100:
+		ops = &w5100_spi_ops;
+		priv_size = 0;
+		break;
+	case W5200:
+		ops = &w5200_ops;
+		priv_size = sizeof(struct w5200_spi_priv);
+		break;
+	case W5500:
+		ops = &w5500_ops;
+		priv_size = sizeof(struct w5500_spi_priv);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return w5100_probe(&spi->dev, ops, priv_size, mac, spi->irq, -EINVAL);
+}
+
+static int w5100_spi_remove(struct spi_device *spi)
+{
+	return w5100_remove(&spi->dev);
+}
+
+static const struct spi_device_id w5100_spi_ids[] = {
+	{ "w5100", W5100 },
+	{ "w5200", W5200 },
+	{ "w5500", W5500 },
+	{}
+};
+MODULE_DEVICE_TABLE(spi, w5100_spi_ids);
+
+static struct spi_driver w5100_spi_driver = {
+	.driver		= {
+		.name	= "w5100",
+		.pm	= &w5100_pm_ops,
+	},
+	.probe		= w5100_spi_probe,
+	.remove		= w5100_spi_remove,
+	.id_table	= w5100_spi_ids,
+};
+module_spi_driver(w5100_spi_driver);
+
+MODULE_DESCRIPTION("WIZnet W5100/W5200/W5500 Ethernet driver for SPI mode");
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 8b282d0..4f6255c 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -27,6 +27,8 @@
 #include <linux/irq.h>
 #include <linux/gpio.h>
 
+#include "w5100.h"
+
 #define DRV_NAME	"w5100"
 #define DRV_VERSION	"2012-04-04"
 
@@ -36,7 +38,7 @@
 MODULE_LICENSE("GPL");
 
 /*
- * Registers
+ * W5100/W5200/W5500 common registers
  */
 #define W5100_COMMON_REGS	0x0000
 #define W5100_MR		0x0000 /* Mode Register */
@@ -46,55 +48,115 @@
 #define   MR_IND		  0x01 /* Indirect mode */
 #define W5100_SHAR		0x0009 /* Source MAC address */
 #define W5100_IR		0x0015 /* Interrupt Register */
-#define W5100_IMR		0x0016 /* Interrupt Mask Register */
-#define   IR_S0			  0x01 /* S0 interrupt */
-#define W5100_RTR		0x0017 /* Retry Time-value Register */
-#define   RTR_DEFAULT		  2000 /* =0x07d0 (2000) */
-#define W5100_RMSR		0x001a /* Receive Memory Size */
-#define W5100_TMSR		0x001b /* Transmit Memory Size */
 #define W5100_COMMON_REGS_LEN	0x0040
 
-#define W5100_S0_REGS		0x0400
-#define W5100_S0_MR		0x0400 /* S0 Mode Register */
-#define   S0_MR_MACRAW		  0x04 /* MAC RAW mode (promiscuous) */
-#define   S0_MR_MACRAW_MF	  0x44 /* MAC RAW mode (filtered) */
-#define W5100_S0_CR		0x0401 /* S0 Command Register */
+#define W5100_Sn_MR		0x0000 /* Sn Mode Register */
+#define W5100_Sn_CR		0x0001 /* Sn Command Register */
+#define W5100_Sn_IR		0x0002 /* Sn Interrupt Register */
+#define W5100_Sn_SR		0x0003 /* Sn Status Register */
+#define W5100_Sn_TX_FSR		0x0020 /* Sn Transmit free memory size */
+#define W5100_Sn_TX_RD		0x0022 /* Sn Transmit memory read pointer */
+#define W5100_Sn_TX_WR		0x0024 /* Sn Transmit memory write pointer */
+#define W5100_Sn_RX_RSR		0x0026 /* Sn Receive free memory size */
+#define W5100_Sn_RX_RD		0x0028 /* Sn Receive memory read pointer */
+
+#define S0_REGS(priv)		((priv)->s0_regs)
+
+#define W5100_S0_MR(priv)	(S0_REGS(priv) + W5100_Sn_MR)
+#define   S0_MR_MACRAW		  0x04 /* MAC RAW mode */
+#define   S0_MR_MF		  0x40 /* MAC Filter for W5100 and W5200 */
+#define   W5500_S0_MR_MF	  0x80 /* MAC Filter for W5500 */
+#define W5100_S0_CR(priv)	(S0_REGS(priv) + W5100_Sn_CR)
 #define   S0_CR_OPEN		  0x01 /* OPEN command */
 #define   S0_CR_CLOSE		  0x10 /* CLOSE command */
 #define   S0_CR_SEND		  0x20 /* SEND command */
 #define   S0_CR_RECV		  0x40 /* RECV command */
-#define W5100_S0_IR		0x0402 /* S0 Interrupt Register */
+#define W5100_S0_IR(priv)	(S0_REGS(priv) + W5100_Sn_IR)
 #define   S0_IR_SENDOK		  0x10 /* complete sending */
 #define   S0_IR_RECV		  0x04 /* receiving data */
-#define W5100_S0_SR		0x0403 /* S0 Status Register */
+#define W5100_S0_SR(priv)	(S0_REGS(priv) + W5100_Sn_SR)
 #define   S0_SR_MACRAW		  0x42 /* mac raw mode */
-#define W5100_S0_TX_FSR		0x0420 /* S0 Transmit free memory size */
-#define W5100_S0_TX_RD		0x0422 /* S0 Transmit memory read pointer */
-#define W5100_S0_TX_WR		0x0424 /* S0 Transmit memory write pointer */
-#define W5100_S0_RX_RSR		0x0426 /* S0 Receive free memory size */
-#define W5100_S0_RX_RD		0x0428 /* S0 Receive memory read pointer */
+#define W5100_S0_TX_FSR(priv)	(S0_REGS(priv) + W5100_Sn_TX_FSR)
+#define W5100_S0_TX_RD(priv)	(S0_REGS(priv) + W5100_Sn_TX_RD)
+#define W5100_S0_TX_WR(priv)	(S0_REGS(priv) + W5100_Sn_TX_WR)
+#define W5100_S0_RX_RSR(priv)	(S0_REGS(priv) + W5100_Sn_RX_RSR)
+#define W5100_S0_RX_RD(priv)	(S0_REGS(priv) + W5100_Sn_RX_RD)
+
 #define W5100_S0_REGS_LEN	0x0040
 
+/*
+ * W5100 and W5200 common registers
+ */
+#define W5100_IMR		0x0016 /* Interrupt Mask Register */
+#define   IR_S0			  0x01 /* S0 interrupt */
+#define W5100_RTR		0x0017 /* Retry Time-value Register */
+#define   RTR_DEFAULT		  2000 /* =0x07d0 (2000) */
+
+/*
+ * W5100 specific register and memory
+ */
+#define W5100_RMSR		0x001a /* Receive Memory Size */
+#define W5100_TMSR		0x001b /* Transmit Memory Size */
+
+#define W5100_S0_REGS		0x0400
+
 #define W5100_TX_MEM_START	0x4000
-#define W5100_TX_MEM_END	0x5fff
-#define W5100_TX_MEM_MASK	0x1fff
+#define W5100_TX_MEM_SIZE	0x2000
 #define W5100_RX_MEM_START	0x6000
-#define W5100_RX_MEM_END	0x7fff
-#define W5100_RX_MEM_MASK	0x1fff
+#define W5100_RX_MEM_SIZE	0x2000
+
+/*
+ * W5200 specific register and memory
+ */
+#define W5200_S0_REGS		0x4000
+
+#define W5200_Sn_RXMEM_SIZE(n)	(0x401e + (n) * 0x0100) /* Sn RX Memory Size */
+#define W5200_Sn_TXMEM_SIZE(n)	(0x401f + (n) * 0x0100) /* Sn TX Memory Size */
+
+#define W5200_TX_MEM_START	0x8000
+#define W5200_TX_MEM_SIZE	0x4000
+#define W5200_RX_MEM_START	0xc000
+#define W5200_RX_MEM_SIZE	0x4000
+
+/*
+ * W5500 specific register and memory
+ *
+ * W5500 register and memory are organized by multiple blocks.  Each one is
+ * selected by 16bits offset address and 5bits block select bits.  So we
+ * encode it into 32bits address. (lower 16bits is offset address and
+ * upper 16bits is block select bits)
+ */
+#define W5500_SIMR		0x0018 /* Socket Interrupt Mask Register */
+#define W5500_RTR		0x0019 /* Retry Time-value Register */
+
+#define W5500_S0_REGS		0x10000
+
+#define W5500_Sn_RXMEM_SIZE(n)	\
+		(0x1001e + (n) * 0x40000) /* Sn RX Memory Size */
+#define W5500_Sn_TXMEM_SIZE(n)	\
+		(0x1001f + (n) * 0x40000) /* Sn TX Memory Size */
+
+#define W5500_TX_MEM_START	0x20000
+#define W5500_TX_MEM_SIZE	0x04000
+#define W5500_RX_MEM_START	0x30000
+#define W5500_RX_MEM_SIZE	0x04000
 
 /*
  * Device driver private data structure
  */
+
 struct w5100_priv {
-	void __iomem *base;
-	spinlock_t reg_lock;
-	bool indirect;
-	u8   (*read)(struct w5100_priv *priv, u16 addr);
-	void (*write)(struct w5100_priv *priv, u16 addr, u8 data);
-	u16  (*read16)(struct w5100_priv *priv, u16 addr);
-	void (*write16)(struct w5100_priv *priv, u16 addr, u16 data);
-	void (*readbuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len);
-	void (*writebuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len);
+	const struct w5100_ops *ops;
+
+	/* Socket 0 register offset address */
+	u32 s0_regs;
+	/* Socket 0 TX buffer offset address and size */
+	u32 s0_tx_buf;
+	u16 s0_tx_buf_size;
+	/* Socket 0 RX buffer offset address and size */
+	u32 s0_rx_buf;
+	u16 s0_rx_buf_size;
+
 	int irq;
 	int link_irq;
 	int link_gpio;
@@ -103,6 +165,13 @@
 	struct net_device *ndev;
 	bool promisc;
 	u32 msg_enable;
+
+	struct workqueue_struct *xfer_wq;
+	struct work_struct rx_work;
+	struct sk_buff *tx_skb;
+	struct work_struct tx_work;
+	struct work_struct setrx_work;
+	struct work_struct restart_work;
 };
 
 /************************************************************************
@@ -111,63 +180,122 @@
  *
  ***********************************************************************/
 
+struct w5100_mmio_priv {
+	void __iomem *base;
+	/* Serialize access in indirect address mode */
+	spinlock_t reg_lock;
+};
+
+static inline struct w5100_mmio_priv *w5100_mmio_priv(struct net_device *dev)
+{
+	return w5100_ops_priv(dev);
+}
+
+static inline void __iomem *w5100_mmio(struct net_device *ndev)
+{
+	struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
+
+	return mmio_priv->base;
+}
+
 /*
  * In direct address mode host system can directly access W5100 registers
  * after mapping to Memory-Mapped I/O space.
  *
  * 0x8000 bytes are required for memory space.
  */
-static inline u8 w5100_read_direct(struct w5100_priv *priv, u16 addr)
+static inline int w5100_read_direct(struct net_device *ndev, u32 addr)
 {
-	return ioread8(priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
+	return ioread8(w5100_mmio(ndev) + (addr << CONFIG_WIZNET_BUS_SHIFT));
 }
 
-static inline void w5100_write_direct(struct w5100_priv *priv,
-				      u16 addr, u8 data)
+static inline int __w5100_write_direct(struct net_device *ndev, u32 addr,
+				       u8 data)
 {
-	iowrite8(data, priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
+	iowrite8(data, w5100_mmio(ndev) + (addr << CONFIG_WIZNET_BUS_SHIFT));
+
+	return 0;
 }
 
-static u16 w5100_read16_direct(struct w5100_priv *priv, u16 addr)
+static inline int w5100_write_direct(struct net_device *ndev, u32 addr, u8 data)
+{
+	__w5100_write_direct(ndev, addr, data);
+	mmiowb();
+
+	return 0;
+}
+
+static int w5100_read16_direct(struct net_device *ndev, u32 addr)
 {
 	u16 data;
-	data  = w5100_read_direct(priv, addr) << 8;
-	data |= w5100_read_direct(priv, addr + 1);
+	data  = w5100_read_direct(ndev, addr) << 8;
+	data |= w5100_read_direct(ndev, addr + 1);
 	return data;
 }
 
-static void w5100_write16_direct(struct w5100_priv *priv, u16 addr, u16 data)
+static int w5100_write16_direct(struct net_device *ndev, u32 addr, u16 data)
 {
-	w5100_write_direct(priv, addr, data >> 8);
-	w5100_write_direct(priv, addr + 1, data);
+	__w5100_write_direct(ndev, addr, data >> 8);
+	__w5100_write_direct(ndev, addr + 1, data);
+	mmiowb();
+
+	return 0;
 }
 
-static void w5100_readbuf_direct(struct w5100_priv *priv,
-				 u16 offset, u8 *buf, int len)
+static int w5100_readbulk_direct(struct net_device *ndev, u32 addr, u8 *buf,
+				 int len)
 {
-	u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK);
 	int i;
 
-	for (i = 0; i < len; i++, addr++) {
-		if (unlikely(addr > W5100_RX_MEM_END))
-			addr = W5100_RX_MEM_START;
-		*buf++ = w5100_read_direct(priv, addr);
-	}
+	for (i = 0; i < len; i++, addr++)
+		*buf++ = w5100_read_direct(ndev, addr);
+
+	return 0;
 }
 
-static void w5100_writebuf_direct(struct w5100_priv *priv,
-				  u16 offset, u8 *buf, int len)
+static int w5100_writebulk_direct(struct net_device *ndev, u32 addr,
+				  const u8 *buf, int len)
 {
-	u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK);
 	int i;
 
-	for (i = 0; i < len; i++, addr++) {
-		if (unlikely(addr > W5100_TX_MEM_END))
-			addr = W5100_TX_MEM_START;
-		w5100_write_direct(priv, addr, *buf++);
-	}
+	for (i = 0; i < len; i++, addr++)
+		__w5100_write_direct(ndev, addr, *buf++);
+
+	mmiowb();
+
+	return 0;
 }
 
+static int w5100_mmio_init(struct net_device *ndev)
+{
+	struct platform_device *pdev = to_platform_device(ndev->dev.parent);
+	struct w5100_priv *priv = netdev_priv(ndev);
+	struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
+	struct resource *mem;
+
+	spin_lock_init(&mmio_priv->reg_lock);
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	mmio_priv->base = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(mmio_priv->base))
+		return PTR_ERR(mmio_priv->base);
+
+	netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, priv->irq);
+
+	return 0;
+}
+
+static const struct w5100_ops w5100_mmio_direct_ops = {
+	.chip_id = W5100,
+	.read = w5100_read_direct,
+	.write = w5100_write_direct,
+	.read16 = w5100_read16_direct,
+	.write16 = w5100_write16_direct,
+	.readbulk = w5100_readbulk_direct,
+	.writebulk = w5100_writebulk_direct,
+	.init = w5100_mmio_init,
+};
+
 /*
  * In indirect address mode host system indirectly accesses registers by
  * using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data
@@ -179,139 +307,290 @@
 #define W5100_IDM_AR		0x01   /* Indirect Mode Address Register */
 #define W5100_IDM_DR		0x03   /* Indirect Mode Data Register */
 
-static u8 w5100_read_indirect(struct w5100_priv *priv, u16 addr)
+static int w5100_read_indirect(struct net_device *ndev, u32 addr)
 {
+	struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
 	unsigned long flags;
 	u8 data;
 
-	spin_lock_irqsave(&priv->reg_lock, flags);
-	w5100_write16_direct(priv, W5100_IDM_AR, addr);
-	mmiowb();
-	data = w5100_read_direct(priv, W5100_IDM_DR);
-	spin_unlock_irqrestore(&priv->reg_lock, flags);
+	spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+	w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+	data = w5100_read_direct(ndev, W5100_IDM_DR);
+	spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
 
 	return data;
 }
 
-static void w5100_write_indirect(struct w5100_priv *priv, u16 addr, u8 data)
+static int w5100_write_indirect(struct net_device *ndev, u32 addr, u8 data)
 {
+	struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
 	unsigned long flags;
 
-	spin_lock_irqsave(&priv->reg_lock, flags);
-	w5100_write16_direct(priv, W5100_IDM_AR, addr);
-	mmiowb();
-	w5100_write_direct(priv, W5100_IDM_DR, data);
-	mmiowb();
-	spin_unlock_irqrestore(&priv->reg_lock, flags);
+	spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+	w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+	w5100_write_direct(ndev, W5100_IDM_DR, data);
+	spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
+
+	return 0;
 }
 
-static u16 w5100_read16_indirect(struct w5100_priv *priv, u16 addr)
+static int w5100_read16_indirect(struct net_device *ndev, u32 addr)
 {
+	struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
 	unsigned long flags;
 	u16 data;
 
-	spin_lock_irqsave(&priv->reg_lock, flags);
-	w5100_write16_direct(priv, W5100_IDM_AR, addr);
-	mmiowb();
-	data  = w5100_read_direct(priv, W5100_IDM_DR) << 8;
-	data |= w5100_read_direct(priv, W5100_IDM_DR);
-	spin_unlock_irqrestore(&priv->reg_lock, flags);
+	spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+	w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+	data  = w5100_read_direct(ndev, W5100_IDM_DR) << 8;
+	data |= w5100_read_direct(ndev, W5100_IDM_DR);
+	spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
 
 	return data;
 }
 
-static void w5100_write16_indirect(struct w5100_priv *priv, u16 addr, u16 data)
+static int w5100_write16_indirect(struct net_device *ndev, u32 addr, u16 data)
 {
+	struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
 	unsigned long flags;
 
-	spin_lock_irqsave(&priv->reg_lock, flags);
-	w5100_write16_direct(priv, W5100_IDM_AR, addr);
-	mmiowb();
-	w5100_write_direct(priv, W5100_IDM_DR, data >> 8);
-	w5100_write_direct(priv, W5100_IDM_DR, data);
-	mmiowb();
-	spin_unlock_irqrestore(&priv->reg_lock, flags);
+	spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+	w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+	__w5100_write_direct(ndev, W5100_IDM_DR, data >> 8);
+	w5100_write_direct(ndev, W5100_IDM_DR, data);
+	spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
+
+	return 0;
 }
 
-static void w5100_readbuf_indirect(struct w5100_priv *priv,
-				   u16 offset, u8 *buf, int len)
+static int w5100_readbulk_indirect(struct net_device *ndev, u32 addr, u8 *buf,
+				   int len)
 {
-	u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK);
+	struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
 	unsigned long flags;
 	int i;
 
-	spin_lock_irqsave(&priv->reg_lock, flags);
-	w5100_write16_direct(priv, W5100_IDM_AR, addr);
-	mmiowb();
+	spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+	w5100_write16_direct(ndev, W5100_IDM_AR, addr);
 
-	for (i = 0; i < len; i++, addr++) {
-		if (unlikely(addr > W5100_RX_MEM_END)) {
-			addr = W5100_RX_MEM_START;
-			w5100_write16_direct(priv, W5100_IDM_AR, addr);
-			mmiowb();
-		}
-		*buf++ = w5100_read_direct(priv, W5100_IDM_DR);
-	}
+	for (i = 0; i < len; i++)
+		*buf++ = w5100_read_direct(ndev, W5100_IDM_DR);
+
 	mmiowb();
-	spin_unlock_irqrestore(&priv->reg_lock, flags);
+	spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
+
+	return 0;
 }
 
-static void w5100_writebuf_indirect(struct w5100_priv *priv,
-				    u16 offset, u8 *buf, int len)
+static int w5100_writebulk_indirect(struct net_device *ndev, u32 addr,
+				    const u8 *buf, int len)
 {
-	u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK);
+	struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
 	unsigned long flags;
 	int i;
 
-	spin_lock_irqsave(&priv->reg_lock, flags);
-	w5100_write16_direct(priv, W5100_IDM_AR, addr);
-	mmiowb();
+	spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+	w5100_write16_direct(ndev, W5100_IDM_AR, addr);
 
-	for (i = 0; i < len; i++, addr++) {
-		if (unlikely(addr > W5100_TX_MEM_END)) {
-			addr = W5100_TX_MEM_START;
-			w5100_write16_direct(priv, W5100_IDM_AR, addr);
-			mmiowb();
-		}
-		w5100_write_direct(priv, W5100_IDM_DR, *buf++);
-	}
+	for (i = 0; i < len; i++)
+		__w5100_write_direct(ndev, W5100_IDM_DR, *buf++);
+
 	mmiowb();
-	spin_unlock_irqrestore(&priv->reg_lock, flags);
+	spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
+
+	return 0;
 }
 
+static int w5100_reset_indirect(struct net_device *ndev)
+{
+	w5100_write_direct(ndev, W5100_MR, MR_RST);
+	mdelay(5);
+	w5100_write_direct(ndev, W5100_MR, MR_PB | MR_AI | MR_IND);
+
+	return 0;
+}
+
+static const struct w5100_ops w5100_mmio_indirect_ops = {
+	.chip_id = W5100,
+	.read = w5100_read_indirect,
+	.write = w5100_write_indirect,
+	.read16 = w5100_read16_indirect,
+	.write16 = w5100_write16_indirect,
+	.readbulk = w5100_readbulk_indirect,
+	.writebulk = w5100_writebulk_indirect,
+	.init = w5100_mmio_init,
+	.reset = w5100_reset_indirect,
+};
+
 #if defined(CONFIG_WIZNET_BUS_DIRECT)
-#define w5100_read	w5100_read_direct
-#define w5100_write	w5100_write_direct
-#define w5100_read16	w5100_read16_direct
-#define w5100_write16	w5100_write16_direct
-#define w5100_readbuf	w5100_readbuf_direct
-#define w5100_writebuf	w5100_writebuf_direct
+
+static int w5100_read(struct w5100_priv *priv, u32 addr)
+{
+	return w5100_read_direct(priv->ndev, addr);
+}
+
+static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
+{
+	return w5100_write_direct(priv->ndev, addr, data);
+}
+
+static int w5100_read16(struct w5100_priv *priv, u32 addr)
+{
+	return w5100_read16_direct(priv->ndev, addr);
+}
+
+static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
+{
+	return w5100_write16_direct(priv->ndev, addr, data);
+}
+
+static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
+{
+	return w5100_readbulk_direct(priv->ndev, addr, buf, len);
+}
+
+static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
+			   int len)
+{
+	return w5100_writebulk_direct(priv->ndev, addr, buf, len);
+}
 
 #elif defined(CONFIG_WIZNET_BUS_INDIRECT)
-#define w5100_read	w5100_read_indirect
-#define w5100_write	w5100_write_indirect
-#define w5100_read16	w5100_read16_indirect
-#define w5100_write16	w5100_write16_indirect
-#define w5100_readbuf	w5100_readbuf_indirect
-#define w5100_writebuf	w5100_writebuf_indirect
+
+static int w5100_read(struct w5100_priv *priv, u32 addr)
+{
+	return w5100_read_indirect(priv->ndev, addr);
+}
+
+static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
+{
+	return w5100_write_indirect(priv->ndev, addr, data);
+}
+
+static int w5100_read16(struct w5100_priv *priv, u32 addr)
+{
+	return w5100_read16_indirect(priv->ndev, addr);
+}
+
+static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
+{
+	return w5100_write16_indirect(priv->ndev, addr, data);
+}
+
+static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
+{
+	return w5100_readbulk_indirect(priv->ndev, addr, buf, len);
+}
+
+static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
+			   int len)
+{
+	return w5100_writebulk_indirect(priv->ndev, addr, buf, len);
+}
 
 #else /* CONFIG_WIZNET_BUS_ANY */
-#define w5100_read	priv->read
-#define w5100_write	priv->write
-#define w5100_read16	priv->read16
-#define w5100_write16	priv->write16
-#define w5100_readbuf	priv->readbuf
-#define w5100_writebuf	priv->writebuf
+
+static int w5100_read(struct w5100_priv *priv, u32 addr)
+{
+	return priv->ops->read(priv->ndev, addr);
+}
+
+static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
+{
+	return priv->ops->write(priv->ndev, addr, data);
+}
+
+static int w5100_read16(struct w5100_priv *priv, u32 addr)
+{
+	return priv->ops->read16(priv->ndev, addr);
+}
+
+static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
+{
+	return priv->ops->write16(priv->ndev, addr, data);
+}
+
+static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
+{
+	return priv->ops->readbulk(priv->ndev, addr, buf, len);
+}
+
+static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
+			   int len)
+{
+	return priv->ops->writebulk(priv->ndev, addr, buf, len);
+}
+
 #endif
 
+static int w5100_readbuf(struct w5100_priv *priv, u16 offset, u8 *buf, int len)
+{
+	u32 addr;
+	int remain = 0;
+	int ret;
+	const u32 mem_start = priv->s0_rx_buf;
+	const u16 mem_size = priv->s0_rx_buf_size;
+
+	offset %= mem_size;
+	addr = mem_start + offset;
+
+	if (offset + len > mem_size) {
+		remain = (offset + len) % mem_size;
+		len = mem_size - offset;
+	}
+
+	ret = w5100_readbulk(priv, addr, buf, len);
+	if (ret || !remain)
+		return ret;
+
+	return w5100_readbulk(priv, mem_start, buf + len, remain);
+}
+
+static int w5100_writebuf(struct w5100_priv *priv, u16 offset, const u8 *buf,
+			  int len)
+{
+	u32 addr;
+	int ret;
+	int remain = 0;
+	const u32 mem_start = priv->s0_tx_buf;
+	const u16 mem_size = priv->s0_tx_buf_size;
+
+	offset %= mem_size;
+	addr = mem_start + offset;
+
+	if (offset + len > mem_size) {
+		remain = (offset + len) % mem_size;
+		len = mem_size - offset;
+	}
+
+	ret = w5100_writebulk(priv, addr, buf, len);
+	if (ret || !remain)
+		return ret;
+
+	return w5100_writebulk(priv, mem_start, buf + len, remain);
+}
+
+static int w5100_reset(struct w5100_priv *priv)
+{
+	if (priv->ops->reset)
+		return priv->ops->reset(priv->ndev);
+
+	w5100_write(priv, W5100_MR, MR_RST);
+	mdelay(5);
+	w5100_write(priv, W5100_MR, MR_PB);
+
+	return 0;
+}
+
 static int w5100_command(struct w5100_priv *priv, u16 cmd)
 {
-	unsigned long timeout = jiffies + msecs_to_jiffies(100);
+	unsigned long timeout;
 
-	w5100_write(priv, W5100_S0_CR, cmd);
-	mmiowb();
+	w5100_write(priv, W5100_S0_CR(priv), cmd);
 
-	while (w5100_read(priv, W5100_S0_CR) != 0) {
+	timeout = jiffies + msecs_to_jiffies(100);
+
+	while (w5100_read(priv, W5100_S0_CR(priv)) != 0) {
 		if (time_after(jiffies, timeout))
 			return -EIO;
 		cpu_relax();
@@ -323,47 +602,124 @@
 static void w5100_write_macaddr(struct w5100_priv *priv)
 {
 	struct net_device *ndev = priv->ndev;
-	int i;
 
-	for (i = 0; i < ETH_ALEN; i++)
-		w5100_write(priv, W5100_SHAR + i, ndev->dev_addr[i]);
-	mmiowb();
+	w5100_writebulk(priv, W5100_SHAR, ndev->dev_addr, ETH_ALEN);
 }
 
-static void w5100_hw_reset(struct w5100_priv *priv)
+static void w5100_socket_intr_mask(struct w5100_priv *priv, u8 mask)
 {
-	w5100_write_direct(priv, W5100_MR, MR_RST);
-	mmiowb();
-	mdelay(5);
-	w5100_write_direct(priv, W5100_MR, priv->indirect ?
-				  MR_PB | MR_AI | MR_IND :
-				  MR_PB);
-	mmiowb();
-	w5100_write(priv, W5100_IMR, 0);
-	w5100_write_macaddr(priv);
+	u32 imr;
 
+	if (priv->ops->chip_id == W5500)
+		imr = W5500_SIMR;
+	else
+		imr = W5100_IMR;
+
+	w5100_write(priv, imr, mask);
+}
+
+static void w5100_enable_intr(struct w5100_priv *priv)
+{
+	w5100_socket_intr_mask(priv, IR_S0);
+}
+
+static void w5100_disable_intr(struct w5100_priv *priv)
+{
+	w5100_socket_intr_mask(priv, 0);
+}
+
+static void w5100_memory_configure(struct w5100_priv *priv)
+{
 	/* Configure 16K of internal memory
 	 * as 8K RX buffer and 8K TX buffer
 	 */
 	w5100_write(priv, W5100_RMSR, 0x03);
 	w5100_write(priv, W5100_TMSR, 0x03);
-	mmiowb();
+}
+
+static void w5200_memory_configure(struct w5100_priv *priv)
+{
+	int i;
+
+	/* Configure internal RX memory as 16K RX buffer and
+	 * internal TX memory as 16K TX buffer
+	 */
+	w5100_write(priv, W5200_Sn_RXMEM_SIZE(0), 0x10);
+	w5100_write(priv, W5200_Sn_TXMEM_SIZE(0), 0x10);
+
+	for (i = 1; i < 8; i++) {
+		w5100_write(priv, W5200_Sn_RXMEM_SIZE(i), 0);
+		w5100_write(priv, W5200_Sn_TXMEM_SIZE(i), 0);
+	}
+}
+
+static void w5500_memory_configure(struct w5100_priv *priv)
+{
+	int i;
+
+	/* Configure internal RX memory as 16K RX buffer and
+	 * internal TX memory as 16K TX buffer
+	 */
+	w5100_write(priv, W5500_Sn_RXMEM_SIZE(0), 0x10);
+	w5100_write(priv, W5500_Sn_TXMEM_SIZE(0), 0x10);
+
+	for (i = 1; i < 8; i++) {
+		w5100_write(priv, W5500_Sn_RXMEM_SIZE(i), 0);
+		w5100_write(priv, W5500_Sn_TXMEM_SIZE(i), 0);
+	}
+}
+
+static int w5100_hw_reset(struct w5100_priv *priv)
+{
+	u32 rtr;
+
+	w5100_reset(priv);
+
+	w5100_disable_intr(priv);
+	w5100_write_macaddr(priv);
+
+	switch (priv->ops->chip_id) {
+	case W5100:
+		w5100_memory_configure(priv);
+		rtr = W5100_RTR;
+		break;
+	case W5200:
+		w5200_memory_configure(priv);
+		rtr = W5100_RTR;
+		break;
+	case W5500:
+		w5500_memory_configure(priv);
+		rtr = W5500_RTR;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (w5100_read16(priv, rtr) != RTR_DEFAULT)
+		return -ENODEV;
+
+	return 0;
 }
 
 static void w5100_hw_start(struct w5100_priv *priv)
 {
-	w5100_write(priv, W5100_S0_MR, priv->promisc ?
-			  S0_MR_MACRAW : S0_MR_MACRAW_MF);
-	mmiowb();
+	u8 mode = S0_MR_MACRAW;
+
+	if (!priv->promisc) {
+		if (priv->ops->chip_id == W5500)
+			mode |= W5500_S0_MR_MF;
+		else
+			mode |= S0_MR_MF;
+	}
+
+	w5100_write(priv, W5100_S0_MR(priv), mode);
 	w5100_command(priv, S0_CR_OPEN);
-	w5100_write(priv, W5100_IMR, IR_S0);
-	mmiowb();
+	w5100_enable_intr(priv);
 }
 
 static void w5100_hw_close(struct w5100_priv *priv)
 {
-	w5100_write(priv, W5100_IMR, 0);
-	mmiowb();
+	w5100_disable_intr(priv);
 	w5100_command(priv, S0_CR_CLOSE);
 }
 
@@ -412,20 +768,17 @@
 }
 
 static void w5100_get_regs(struct net_device *ndev,
-			   struct ethtool_regs *regs, void *_buf)
+			   struct ethtool_regs *regs, void *buf)
 {
 	struct w5100_priv *priv = netdev_priv(ndev);
-	u8 *buf = _buf;
-	u16 i;
 
 	regs->version = 1;
-	for (i = 0; i < W5100_COMMON_REGS_LEN; i++)
-		*buf++ = w5100_read(priv, W5100_COMMON_REGS + i);
-	for (i = 0; i < W5100_S0_REGS_LEN; i++)
-		*buf++ = w5100_read(priv, W5100_S0_REGS + i);
+	w5100_readbulk(priv, W5100_COMMON_REGS, buf, W5100_COMMON_REGS_LEN);
+	buf += W5100_COMMON_REGS_LEN;
+	w5100_readbulk(priv, S0_REGS(priv), buf, W5100_S0_REGS_LEN);
 }
 
-static void w5100_tx_timeout(struct net_device *ndev)
+static void w5100_restart(struct net_device *ndev)
 {
 	struct w5100_priv *priv = netdev_priv(ndev);
 
@@ -433,74 +786,138 @@
 	w5100_hw_reset(priv);
 	w5100_hw_start(priv);
 	ndev->stats.tx_errors++;
-	ndev->trans_start = jiffies;
+	netif_trans_update(ndev);
 	netif_wake_queue(ndev);
 }
 
-static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
+static void w5100_restart_work(struct work_struct *work)
+{
+	struct w5100_priv *priv = container_of(work, struct w5100_priv,
+					       restart_work);
+
+	w5100_restart(priv->ndev);
+}
+
+static void w5100_tx_timeout(struct net_device *ndev)
+{
+	struct w5100_priv *priv = netdev_priv(ndev);
+
+	if (priv->ops->may_sleep)
+		schedule_work(&priv->restart_work);
+	else
+		w5100_restart(ndev);
+}
+
+static void w5100_tx_skb(struct net_device *ndev, struct sk_buff *skb)
 {
 	struct w5100_priv *priv = netdev_priv(ndev);
 	u16 offset;
 
-	netif_stop_queue(ndev);
-
-	offset = w5100_read16(priv, W5100_S0_TX_WR);
+	offset = w5100_read16(priv, W5100_S0_TX_WR(priv));
 	w5100_writebuf(priv, offset, skb->data, skb->len);
-	w5100_write16(priv, W5100_S0_TX_WR, offset + skb->len);
-	mmiowb();
+	w5100_write16(priv, W5100_S0_TX_WR(priv), offset + skb->len);
 	ndev->stats.tx_bytes += skb->len;
 	ndev->stats.tx_packets++;
 	dev_kfree_skb(skb);
 
 	w5100_command(priv, S0_CR_SEND);
+}
+
+static void w5100_tx_work(struct work_struct *work)
+{
+	struct w5100_priv *priv = container_of(work, struct w5100_priv,
+					       tx_work);
+	struct sk_buff *skb = priv->tx_skb;
+
+	priv->tx_skb = NULL;
+
+	if (WARN_ON(!skb))
+		return;
+	w5100_tx_skb(priv->ndev, skb);
+}
+
+static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct w5100_priv *priv = netdev_priv(ndev);
+
+	netif_stop_queue(ndev);
+
+	if (priv->ops->may_sleep) {
+		WARN_ON(priv->tx_skb);
+		priv->tx_skb = skb;
+		queue_work(priv->xfer_wq, &priv->tx_work);
+	} else {
+		w5100_tx_skb(ndev, skb);
+	}
 
 	return NETDEV_TX_OK;
 }
 
+static struct sk_buff *w5100_rx_skb(struct net_device *ndev)
+{
+	struct w5100_priv *priv = netdev_priv(ndev);
+	struct sk_buff *skb;
+	u16 rx_len;
+	u16 offset;
+	u8 header[2];
+	u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR(priv));
+
+	if (rx_buf_len == 0)
+		return NULL;
+
+	offset = w5100_read16(priv, W5100_S0_RX_RD(priv));
+	w5100_readbuf(priv, offset, header, 2);
+	rx_len = get_unaligned_be16(header) - 2;
+
+	skb = netdev_alloc_skb_ip_align(ndev, rx_len);
+	if (unlikely(!skb)) {
+		w5100_write16(priv, W5100_S0_RX_RD(priv), offset + rx_buf_len);
+		w5100_command(priv, S0_CR_RECV);
+		ndev->stats.rx_dropped++;
+		return NULL;
+	}
+
+	skb_put(skb, rx_len);
+	w5100_readbuf(priv, offset + 2, skb->data, rx_len);
+	w5100_write16(priv, W5100_S0_RX_RD(priv), offset + 2 + rx_len);
+	w5100_command(priv, S0_CR_RECV);
+	skb->protocol = eth_type_trans(skb, ndev);
+
+	ndev->stats.rx_packets++;
+	ndev->stats.rx_bytes += rx_len;
+
+	return skb;
+}
+
+static void w5100_rx_work(struct work_struct *work)
+{
+	struct w5100_priv *priv = container_of(work, struct w5100_priv,
+					       rx_work);
+	struct sk_buff *skb;
+
+	while ((skb = w5100_rx_skb(priv->ndev)))
+		netif_rx_ni(skb);
+
+	w5100_enable_intr(priv);
+}
+
 static int w5100_napi_poll(struct napi_struct *napi, int budget)
 {
 	struct w5100_priv *priv = container_of(napi, struct w5100_priv, napi);
-	struct net_device *ndev = priv->ndev;
-	struct sk_buff *skb;
 	int rx_count;
-	u16 rx_len;
-	u16 offset;
-	u8 header[2];
 
 	for (rx_count = 0; rx_count < budget; rx_count++) {
-		u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR);
-		if (rx_buf_len == 0)
+		struct sk_buff *skb = w5100_rx_skb(priv->ndev);
+
+		if (skb)
+			netif_receive_skb(skb);
+		else
 			break;
-
-		offset = w5100_read16(priv, W5100_S0_RX_RD);
-		w5100_readbuf(priv, offset, header, 2);
-		rx_len = get_unaligned_be16(header) - 2;
-
-		skb = netdev_alloc_skb_ip_align(ndev, rx_len);
-		if (unlikely(!skb)) {
-			w5100_write16(priv, W5100_S0_RX_RD,
-					    offset + rx_buf_len);
-			w5100_command(priv, S0_CR_RECV);
-			ndev->stats.rx_dropped++;
-			return -ENOMEM;
-		}
-
-		skb_put(skb, rx_len);
-		w5100_readbuf(priv, offset + 2, skb->data, rx_len);
-		w5100_write16(priv, W5100_S0_RX_RD, offset + 2 + rx_len);
-		mmiowb();
-		w5100_command(priv, S0_CR_RECV);
-		skb->protocol = eth_type_trans(skb, ndev);
-
-		netif_receive_skb(skb);
-		ndev->stats.rx_packets++;
-		ndev->stats.rx_bytes += rx_len;
 	}
 
 	if (rx_count < budget) {
 		napi_complete(napi);
-		w5100_write(priv, W5100_IMR, IR_S0);
-		mmiowb();
+		w5100_enable_intr(priv);
 	}
 
 	return rx_count;
@@ -511,11 +928,10 @@
 	struct net_device *ndev = ndev_instance;
 	struct w5100_priv *priv = netdev_priv(ndev);
 
-	int ir = w5100_read(priv, W5100_S0_IR);
+	int ir = w5100_read(priv, W5100_S0_IR(priv));
 	if (!ir)
 		return IRQ_NONE;
-	w5100_write(priv, W5100_S0_IR, ir);
-	mmiowb();
+	w5100_write(priv, W5100_S0_IR(priv), ir);
 
 	if (ir & S0_IR_SENDOK) {
 		netif_dbg(priv, tx_done, ndev, "tx done\n");
@@ -523,11 +939,12 @@
 	}
 
 	if (ir & S0_IR_RECV) {
-		if (napi_schedule_prep(&priv->napi)) {
-			w5100_write(priv, W5100_IMR, 0);
-			mmiowb();
+		w5100_disable_intr(priv);
+
+		if (priv->ops->may_sleep)
+			queue_work(priv->xfer_wq, &priv->rx_work);
+		else if (napi_schedule_prep(&priv->napi))
 			__napi_schedule(&priv->napi);
-		}
 	}
 
 	return IRQ_HANDLED;
@@ -551,6 +968,14 @@
 	return IRQ_HANDLED;
 }
 
+static void w5100_setrx_work(struct work_struct *work)
+{
+	struct w5100_priv *priv = container_of(work, struct w5100_priv,
+					       setrx_work);
+
+	w5100_hw_start(priv);
+}
+
 static void w5100_set_rx_mode(struct net_device *ndev)
 {
 	struct w5100_priv *priv = netdev_priv(ndev);
@@ -558,7 +983,11 @@
 
 	if (priv->promisc != set_promisc) {
 		priv->promisc = set_promisc;
-		w5100_hw_start(priv);
+
+		if (priv->ops->may_sleep)
+			schedule_work(&priv->setrx_work);
+		else
+			w5100_hw_start(priv);
 	}
 }
 
@@ -620,95 +1049,100 @@
 	.ndo_change_mtu		= eth_change_mtu,
 };
 
-static int w5100_hw_probe(struct platform_device *pdev)
+static int w5100_mmio_probe(struct platform_device *pdev)
 {
 	struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev);
-	struct net_device *ndev = platform_get_drvdata(pdev);
-	struct w5100_priv *priv = netdev_priv(ndev);
-	const char *name = netdev_name(ndev);
+	const void *mac_addr = NULL;
 	struct resource *mem;
-	int mem_size;
+	const struct w5100_ops *ops;
 	int irq;
-	int ret;
 
-	if (data && is_valid_ether_addr(data->mac_addr)) {
-		memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
-	} else {
-		eth_hw_addr_random(ndev);
-	}
+	if (data && is_valid_ether_addr(data->mac_addr))
+		mac_addr = data->mac_addr;
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	priv->base = devm_ioremap_resource(&pdev->dev, mem);
-	if (IS_ERR(priv->base))
-		return PTR_ERR(priv->base);
-
-	mem_size = resource_size(mem);
-
-	spin_lock_init(&priv->reg_lock);
-	priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE;
-	if (priv->indirect) {
-		priv->read     = w5100_read_indirect;
-		priv->write    = w5100_write_indirect;
-		priv->read16   = w5100_read16_indirect;
-		priv->write16  = w5100_write16_indirect;
-		priv->readbuf  = w5100_readbuf_indirect;
-		priv->writebuf = w5100_writebuf_indirect;
-	} else {
-		priv->read     = w5100_read_direct;
-		priv->write    = w5100_write_direct;
-		priv->read16   = w5100_read16_direct;
-		priv->write16  = w5100_write16_direct;
-		priv->readbuf  = w5100_readbuf_direct;
-		priv->writebuf = w5100_writebuf_direct;
-	}
-
-	w5100_hw_reset(priv);
-	if (w5100_read16(priv, W5100_RTR) != RTR_DEFAULT)
-		return -ENODEV;
+	if (resource_size(mem) < W5100_BUS_DIRECT_SIZE)
+		ops = &w5100_mmio_indirect_ops;
+	else
+		ops = &w5100_mmio_direct_ops;
 
 	irq = platform_get_irq(pdev, 0);
 	if (irq < 0)
 		return irq;
-	ret = request_irq(irq, w5100_interrupt,
-			  IRQ_TYPE_LEVEL_LOW, name, ndev);
-	if (ret < 0)
-		return ret;
-	priv->irq = irq;
 
-	priv->link_gpio = data ? data->link_gpio : -EINVAL;
-	if (gpio_is_valid(priv->link_gpio)) {
-		char *link_name = devm_kzalloc(&pdev->dev, 16, GFP_KERNEL);
-		if (!link_name)
-			return -ENOMEM;
-		snprintf(link_name, 16, "%s-link", name);
-		priv->link_irq = gpio_to_irq(priv->link_gpio);
-		if (request_any_context_irq(priv->link_irq, w5100_detect_link,
-				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-				link_name, priv->ndev) < 0)
-			priv->link_gpio = -EINVAL;
-	}
-
-	netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, irq);
-	return 0;
+	return w5100_probe(&pdev->dev, ops, sizeof(struct w5100_mmio_priv),
+			   mac_addr, irq, data ? data->link_gpio : -EINVAL);
 }
 
-static int w5100_probe(struct platform_device *pdev)
+static int w5100_mmio_remove(struct platform_device *pdev)
+{
+	return w5100_remove(&pdev->dev);
+}
+
+void *w5100_ops_priv(const struct net_device *ndev)
+{
+	return netdev_priv(ndev) +
+	       ALIGN(sizeof(struct w5100_priv), NETDEV_ALIGN);
+}
+EXPORT_SYMBOL_GPL(w5100_ops_priv);
+
+int w5100_probe(struct device *dev, const struct w5100_ops *ops,
+		int sizeof_ops_priv, const void *mac_addr, int irq,
+		int link_gpio)
 {
 	struct w5100_priv *priv;
 	struct net_device *ndev;
 	int err;
+	size_t alloc_size;
 
-	ndev = alloc_etherdev(sizeof(*priv));
+	alloc_size = sizeof(*priv);
+	if (sizeof_ops_priv) {
+		alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
+		alloc_size += sizeof_ops_priv;
+	}
+	alloc_size += NETDEV_ALIGN - 1;
+
+	ndev = alloc_etherdev(alloc_size);
 	if (!ndev)
 		return -ENOMEM;
-	SET_NETDEV_DEV(ndev, &pdev->dev);
-	platform_set_drvdata(pdev, ndev);
+	SET_NETDEV_DEV(ndev, dev);
+	dev_set_drvdata(dev, ndev);
 	priv = netdev_priv(ndev);
+
+	switch (ops->chip_id) {
+	case W5100:
+		priv->s0_regs = W5100_S0_REGS;
+		priv->s0_tx_buf = W5100_TX_MEM_START;
+		priv->s0_tx_buf_size = W5100_TX_MEM_SIZE;
+		priv->s0_rx_buf = W5100_RX_MEM_START;
+		priv->s0_rx_buf_size = W5100_RX_MEM_SIZE;
+		break;
+	case W5200:
+		priv->s0_regs = W5200_S0_REGS;
+		priv->s0_tx_buf = W5200_TX_MEM_START;
+		priv->s0_tx_buf_size = W5200_TX_MEM_SIZE;
+		priv->s0_rx_buf = W5200_RX_MEM_START;
+		priv->s0_rx_buf_size = W5200_RX_MEM_SIZE;
+		break;
+	case W5500:
+		priv->s0_regs = W5500_S0_REGS;
+		priv->s0_tx_buf = W5500_TX_MEM_START;
+		priv->s0_tx_buf_size = W5500_TX_MEM_SIZE;
+		priv->s0_rx_buf = W5500_RX_MEM_START;
+		priv->s0_rx_buf_size = W5500_RX_MEM_SIZE;
+		break;
+	default:
+		err = -EINVAL;
+		goto err_register;
+	}
+
 	priv->ndev = ndev;
+	priv->ops = ops;
+	priv->irq = irq;
+	priv->link_gpio = link_gpio;
 
 	ndev->netdev_ops = &w5100_netdev_ops;
 	ndev->ethtool_ops = &w5100_ethtool_ops;
-	ndev->watchdog_timeo = HZ;
 	netif_napi_add(ndev, &priv->napi, w5100_napi_poll, 16);
 
 	/* This chip doesn't support VLAN packets with normal MTU,
@@ -720,22 +1154,76 @@
 	if (err < 0)
 		goto err_register;
 
-	err = w5100_hw_probe(pdev);
-	if (err < 0)
-		goto err_hw_probe;
+	priv->xfer_wq = create_workqueue(netdev_name(ndev));
+	if (!priv->xfer_wq) {
+		err = -ENOMEM;
+		goto err_wq;
+	}
+
+	INIT_WORK(&priv->rx_work, w5100_rx_work);
+	INIT_WORK(&priv->tx_work, w5100_tx_work);
+	INIT_WORK(&priv->setrx_work, w5100_setrx_work);
+	INIT_WORK(&priv->restart_work, w5100_restart_work);
+
+	if (mac_addr)
+		memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+	else
+		eth_hw_addr_random(ndev);
+
+	if (priv->ops->init) {
+		err = priv->ops->init(priv->ndev);
+		if (err)
+			goto err_hw;
+	}
+
+	err = w5100_hw_reset(priv);
+	if (err)
+		goto err_hw;
+
+	if (ops->may_sleep) {
+		err = request_threaded_irq(priv->irq, NULL, w5100_interrupt,
+					   IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+					   netdev_name(ndev), ndev);
+	} else {
+		err = request_irq(priv->irq, w5100_interrupt,
+				  IRQF_TRIGGER_LOW, netdev_name(ndev), ndev);
+	}
+	if (err)
+		goto err_hw;
+
+	if (gpio_is_valid(priv->link_gpio)) {
+		char *link_name = devm_kzalloc(dev, 16, GFP_KERNEL);
+
+		if (!link_name) {
+			err = -ENOMEM;
+			goto err_gpio;
+		}
+		snprintf(link_name, 16, "%s-link", netdev_name(ndev));
+		priv->link_irq = gpio_to_irq(priv->link_gpio);
+		if (request_any_context_irq(priv->link_irq, w5100_detect_link,
+					    IRQF_TRIGGER_RISING |
+					    IRQF_TRIGGER_FALLING,
+					    link_name, priv->ndev) < 0)
+			priv->link_gpio = -EINVAL;
+	}
 
 	return 0;
 
-err_hw_probe:
+err_gpio:
+	free_irq(priv->irq, ndev);
+err_hw:
+	destroy_workqueue(priv->xfer_wq);
+err_wq:
 	unregister_netdev(ndev);
 err_register:
 	free_netdev(ndev);
 	return err;
 }
+EXPORT_SYMBOL_GPL(w5100_probe);
 
-static int w5100_remove(struct platform_device *pdev)
+int w5100_remove(struct device *dev)
 {
-	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct net_device *ndev = dev_get_drvdata(dev);
 	struct w5100_priv *priv = netdev_priv(ndev);
 
 	w5100_hw_reset(priv);
@@ -743,16 +1231,21 @@
 	if (gpio_is_valid(priv->link_gpio))
 		free_irq(priv->link_irq, ndev);
 
+	flush_work(&priv->setrx_work);
+	flush_work(&priv->restart_work);
+	flush_workqueue(priv->xfer_wq);
+	destroy_workqueue(priv->xfer_wq);
+
 	unregister_netdev(ndev);
 	free_netdev(ndev);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(w5100_remove);
 
 #ifdef CONFIG_PM_SLEEP
 static int w5100_suspend(struct device *dev)
 {
-	struct platform_device *pdev = to_platform_device(dev);
-	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct net_device *ndev = dev_get_drvdata(dev);
 	struct w5100_priv *priv = netdev_priv(ndev);
 
 	if (netif_running(ndev)) {
@@ -766,8 +1259,7 @@
 
 static int w5100_resume(struct device *dev)
 {
-	struct platform_device *pdev = to_platform_device(dev);
-	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct net_device *ndev = dev_get_drvdata(dev);
 	struct w5100_priv *priv = netdev_priv(ndev);
 
 	if (netif_running(ndev)) {
@@ -783,15 +1275,15 @@
 }
 #endif /* CONFIG_PM_SLEEP */
 
-static SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume);
+SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume);
+EXPORT_SYMBOL_GPL(w5100_pm_ops);
 
-static struct platform_driver w5100_driver = {
+static struct platform_driver w5100_mmio_driver = {
 	.driver		= {
 		.name	= DRV_NAME,
 		.pm	= &w5100_pm_ops,
 	},
-	.probe		= w5100_probe,
-	.remove		= w5100_remove,
+	.probe		= w5100_mmio_probe,
+	.remove		= w5100_mmio_remove,
 };
-
-module_platform_driver(w5100_driver);
+module_platform_driver(w5100_mmio_driver);
diff --git a/drivers/net/ethernet/wiznet/w5100.h b/drivers/net/ethernet/wiznet/w5100.h
new file mode 100644
index 0000000..17983a3
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/w5100.h
@@ -0,0 +1,37 @@
+/*
+ * Ethernet driver for the WIZnet W5100 chip.
+ *
+ * Copyright (C) 2006-2008 WIZnet Co.,Ltd.
+ * Copyright (C) 2012 Mike Sinkovsky <msink@permonline.ru>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+enum {
+	W5100,
+	W5200,
+	W5500,
+};
+
+struct w5100_ops {
+	bool may_sleep;
+	int chip_id;
+	int (*read)(struct net_device *ndev, u32 addr);
+	int (*write)(struct net_device *ndev, u32 addr, u8 data);
+	int (*read16)(struct net_device *ndev, u32 addr);
+	int (*write16)(struct net_device *ndev, u32 addr, u16 data);
+	int (*readbulk)(struct net_device *ndev, u32 addr, u8 *buf, int len);
+	int (*writebulk)(struct net_device *ndev, u32 addr, const u8 *buf,
+			 int len);
+	int (*reset)(struct net_device *ndev);
+	int (*init)(struct net_device *ndev);
+};
+
+void *w5100_ops_priv(const struct net_device *ndev);
+
+int w5100_probe(struct device *dev, const struct w5100_ops *ops,
+		int sizeof_ops_priv, const void *mac_addr, int irq,
+		int link_gpio);
+int w5100_remove(struct device *dev);
+
+extern const struct dev_pm_ops w5100_pm_ops;
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 8da7b93..0b37ce9 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -362,7 +362,7 @@
 	w5300_hw_reset(priv);
 	w5300_hw_start(priv);
 	ndev->stats.tx_errors++;
-	ndev->trans_start = jiffies;
+	netif_trans_update(ndev);
 	netif_wake_queue(ndev);
 }
 
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 5a1068d..7397087 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -584,7 +584,7 @@
 		dev_err(&ndev->dev, "Error setting TEMAC options\n");
 
 	/* Init Driver variable */
-	ndev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(ndev); /* prevent tx timeout */
 }
 
 static void temac_adjust_link(struct net_device *ndev)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 4684644..8c7f5be 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -508,7 +508,7 @@
 	axienet_set_multicast_list(ndev);
 	axienet_setoptions(ndev, lp->options);
 
-	ndev->trans_start = jiffies;
+	netif_trans_update(ndev);
 }
 
 /**
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index e324b30..3cee84a 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -531,7 +531,7 @@
 	}
 
 	/* To exclude tx timeout */
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 
 	/* We're all ready to go. Start the queue */
 	netif_wake_queue(dev);
@@ -563,7 +563,7 @@
 			dev->stats.tx_bytes += lp->deferred_skb->len;
 			dev_kfree_skb_irq(lp->deferred_skb);
 			lp->deferred_skb = NULL;
-			dev->trans_start = jiffies; /* prevent tx timeout */
+			netif_trans_update(dev); /* prevent tx timeout */
 			netif_wake_queue(dev);
 		}
 	}
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index d56f869..7b44968 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1199,7 +1199,7 @@
 	struct net_device *dev = local->dev;
     /* reset the card */
     do_reset(dev,1);
-    dev->trans_start = jiffies; /* prevent tx timeout */
+    netif_trans_update(dev); /* prevent tx timeout */
     netif_wake_queue(dev);
 }
 
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
index b103adb..0dbafed 100644
--- a/drivers/net/fjes/fjes_hw.c
+++ b/drivers/net/fjes/fjes_hw.c
@@ -179,6 +179,8 @@
 
 	for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
 		info->v1i.vlan_id[i] = vlan_id[i];
+
+	info->v1i.rx_status |= FJES_RX_MTU_CHANGING_DONE;
 }
 
 void
@@ -214,6 +216,7 @@
 	u8 mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
 	struct fjes_device_command_param param;
 	struct ep_share_mem_info *buf_pair;
+	unsigned long flags;
 	size_t mem_size;
 	int result;
 	int epidx;
@@ -262,10 +265,12 @@
 			if (result)
 				return result;
 
+			spin_lock_irqsave(&hw->rx_status_lock, flags);
 			fjes_hw_setup_epbuf(&buf_pair->tx, mac,
 					    fjes_support_mtu[0]);
 			fjes_hw_setup_epbuf(&buf_pair->rx, mac,
 					    fjes_support_mtu[0]);
+			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 		}
 	}
 
@@ -327,6 +332,7 @@
 	INIT_WORK(&hw->epstop_task, fjes_hw_epstop_task);
 
 	mutex_init(&hw->hw_info.lock);
+	spin_lock_init(&hw->rx_status_lock);
 
 	hw->max_epid = fjes_hw_get_max_epid(hw);
 	hw->my_epid = fjes_hw_get_my_epid(hw);
@@ -734,6 +740,7 @@
 void fjes_hw_raise_epstop(struct fjes_hw *hw)
 {
 	enum ep_partner_status status;
+	unsigned long flags;
 	int epidx;
 
 	for (epidx = 0; epidx < hw->max_epid; epidx++) {
@@ -753,8 +760,10 @@
 		set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
 		set_bit(epidx, &hw->txrx_stop_req_bit);
 
+		spin_lock_irqsave(&hw->rx_status_lock, flags);
 		hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
 				FJES_RX_STOP_REQ_REQUEST;
+		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 	}
 }
 
@@ -810,7 +819,8 @@
 {
 	union ep_buffer_info *info = epbh->info;
 
-	return (info->v1i.frame_max == FJES_MTU_TO_FRAME_SIZE(mtu));
+	return ((info->v1i.frame_max == FJES_MTU_TO_FRAME_SIZE(mtu)) &&
+		info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE);
 }
 
 bool fjes_hw_check_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
@@ -863,6 +873,9 @@
 {
 	union ep_buffer_info *info = epbh->info;
 
+	if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
+		return true;
+
 	if (info->v1i.count_max == 0)
 		return true;
 
@@ -932,6 +945,7 @@
 
 	struct fjes_adapter *adapter;
 	struct net_device *netdev;
+	unsigned long flags;
 
 	ulong unshare_bit = 0;
 	ulong share_bit = 0;
@@ -1024,8 +1038,10 @@
 			continue;
 
 		if (test_bit(epidx, &share_bit)) {
+			spin_lock_irqsave(&hw->rx_status_lock, flags);
 			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
 					    netdev->dev_addr, netdev->mtu);
+			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 
 			mutex_lock(&hw->hw_info.lock);
 
@@ -1069,10 +1085,14 @@
 
 			mutex_unlock(&hw->hw_info.lock);
 
-			if (ret == 0)
+			if (ret == 0) {
+				spin_lock_irqsave(&hw->rx_status_lock, flags);
 				fjes_hw_setup_epbuf(
 					&hw->ep_shm_info[epidx].tx,
 					netdev->dev_addr, netdev->mtu);
+				spin_unlock_irqrestore(&hw->rx_status_lock,
+						       flags);
+			}
 		}
 
 		if (test_bit(epidx, &irq_bit)) {
@@ -1080,9 +1100,11 @@
 						REG_ICTL_MASK_TXRX_STOP_REQ);
 
 			set_bit(epidx, &hw->txrx_stop_req_bit);
+			spin_lock_irqsave(&hw->rx_status_lock, flags);
 			hw->ep_shm_info[epidx].tx.
 				info->v1i.rx_status |=
 					FJES_RX_STOP_REQ_REQUEST;
+			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 			set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
 		}
 	}
@@ -1098,6 +1120,7 @@
 {
 	struct fjes_hw *hw = container_of(work, struct fjes_hw, epstop_task);
 	struct fjes_adapter *adapter = (struct fjes_adapter *)hw->back;
+	unsigned long flags;
 
 	ulong remain_bit;
 	int epid_bit;
@@ -1105,9 +1128,12 @@
 	while ((remain_bit = hw->epstop_req_bit)) {
 		for (epid_bit = 0; remain_bit; remain_bit >>= 1, epid_bit++) {
 			if (remain_bit & 1) {
+				spin_lock_irqsave(&hw->rx_status_lock, flags);
 				hw->ep_shm_info[epid_bit].
 					tx.info->v1i.rx_status |=
 						FJES_RX_STOP_REQ_DONE;
+				spin_unlock_irqrestore(&hw->rx_status_lock,
+						       flags);
 
 				clear_bit(epid_bit, &hw->epstop_req_bit);
 				set_bit(epid_bit,
diff --git a/drivers/net/fjes/fjes_hw.h b/drivers/net/fjes/fjes_hw.h
index 6d57b89..1445ac9 100644
--- a/drivers/net/fjes/fjes_hw.h
+++ b/drivers/net/fjes/fjes_hw.h
@@ -33,9 +33,9 @@
 #define EP_BUFFER_SUPPORT_VLAN_MAX 4
 #define EP_BUFFER_INFO_SIZE 4096
 
-#define FJES_DEVICE_RESET_TIMEOUT  ((17 + 1) * 3) /* sec */
-#define FJES_COMMAND_REQ_TIMEOUT  (5 + 1) /* sec */
-#define FJES_COMMAND_REQ_BUFF_TIMEOUT	(8 * 3) /* sec */
+#define FJES_DEVICE_RESET_TIMEOUT  ((17 + 1) * 3 * 8) /* sec */
+#define FJES_COMMAND_REQ_TIMEOUT  ((5 + 1) * 3 * 8) /* sec */
+#define FJES_COMMAND_REQ_BUFF_TIMEOUT	(60 * 3) /* sec */
 #define FJES_COMMAND_EPSTOP_WAIT_TIMEOUT	(1) /* sec */
 
 #define FJES_CMD_REQ_ERR_INFO_PARAM  (0x0001)
@@ -57,6 +57,7 @@
 #define FJES_RX_STOP_REQ_DONE		(0x1)
 #define FJES_RX_STOP_REQ_REQUEST	(0x2)
 #define FJES_RX_POLL_WORK		(0x4)
+#define FJES_RX_MTU_CHANGING_DONE	(0x8)
 
 #define EP_BUFFER_SIZE \
 	(((sizeof(union ep_buffer_info) + (128 * (64 * 1024))) \
@@ -299,6 +300,8 @@
 	u8 *base;
 
 	struct fjes_hw_info hw_info;
+
+	spinlock_t rx_status_lock; /* spinlock for rx_status */
 };
 
 int fjes_hw_init(struct fjes_hw *);
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 0ddb54f..86c331b 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -29,7 +29,7 @@
 #include "fjes.h"
 
 #define MAJ 1
-#define MIN 0
+#define MIN 1
 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
 #define DRV_NAME	"fjes"
 char fjes_driver_name[] = DRV_NAME;
@@ -290,6 +290,7 @@
 {
 	struct fjes_adapter *adapter = netdev_priv(netdev);
 	struct fjes_hw *hw = &adapter->hw;
+	unsigned long flags;
 	int epidx;
 
 	netif_tx_stop_all_queues(netdev);
@@ -299,13 +300,18 @@
 
 	napi_disable(&adapter->napi);
 
+	spin_lock_irqsave(&hw->rx_status_lock, flags);
 	for (epidx = 0; epidx < hw->max_epid; epidx++) {
 		if (epidx == hw->my_epid)
 			continue;
 
-		adapter->hw.ep_shm_info[epidx].tx.info->v1i.rx_status &=
-			~FJES_RX_POLL_WORK;
+		if (fjes_hw_get_partner_ep_status(hw, epidx) ==
+		    EP_PARTNER_SHARED)
+			adapter->hw.ep_shm_info[epidx]
+				   .tx.info->v1i.rx_status &=
+				~FJES_RX_POLL_WORK;
 	}
+	spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 
 	fjes_free_irq(adapter);
 
@@ -330,6 +336,7 @@
 	struct net_device *netdev = adapter->netdev;
 	struct ep_share_mem_info *buf_pair;
 	struct fjes_hw *hw = &adapter->hw;
+	unsigned long flags;
 	int result;
 	int epidx;
 
@@ -371,8 +378,10 @@
 
 		buf_pair = &hw->ep_shm_info[epidx];
 
+		spin_lock_irqsave(&hw->rx_status_lock, flags);
 		fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr,
 				    netdev->mtu);
+		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 
 		if (fjes_hw_epid_is_same_zone(hw, epidx)) {
 			mutex_lock(&hw->hw_info.lock);
@@ -402,6 +411,7 @@
 	struct ep_share_mem_info *buf_pair;
 	struct fjes_hw *hw = &adapter->hw;
 	bool reset_flag = false;
+	unsigned long flags;
 	int result;
 	int epidx;
 
@@ -418,8 +428,10 @@
 
 		buf_pair = &hw->ep_shm_info[epidx];
 
+		spin_lock_irqsave(&hw->rx_status_lock, flags);
 		fjes_hw_setup_epbuf(&buf_pair->tx,
 				    netdev->dev_addr, netdev->mtu);
+		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 
 		clear_bit(epidx, &hw->txrx_stop_req_bit);
 	}
@@ -459,7 +471,7 @@
 	int i;
 
 	if (((long)jiffies -
-		(long)(netdev->trans_start)) > FJES_TX_TX_STALL_TIMEOUT) {
+		dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) {
 		netif_wake_queue(netdev);
 		return;
 	}
@@ -481,6 +493,9 @@
 
 			info = adapter->hw.ep_shm_info[epid].tx.info;
 
+			if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
+				return;
+
 			if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
 					 info->v1i.count_max)) {
 				all_queue_available = 0;
@@ -549,7 +564,8 @@
 		if ((hw->ep_shm_info[epid].tx_status_work ==
 		     FJES_TX_DELAY_SEND_PENDING) &&
 		    (pstatus == EP_PARTNER_SHARED) &&
-		    !(hw->ep_shm_info[epid].rx.info->v1i.rx_status)) {
+		    !(hw->ep_shm_info[epid].rx.info->v1i.rx_status &
+		      FJES_RX_POLL_WORK)) {
 			fjes_hw_raise_interrupt(hw, epid,
 						REG_ICTL_MASK_RX_DATA);
 		}
@@ -653,7 +669,7 @@
 				&adapter->hw.ep_shm_info[dest_epid].rx, 0)) {
 			/* version is NOT 0 */
 			adapter->stats64.tx_carrier_errors += 1;
-			hw->ep_shm_info[my_epid].net_stats
+			hw->ep_shm_info[dest_epid].net_stats
 						.tx_carrier_errors += 1;
 
 			ret = NETDEV_TX_OK;
@@ -661,9 +677,9 @@
 				&adapter->hw.ep_shm_info[dest_epid].rx,
 				netdev->mtu)) {
 			adapter->stats64.tx_dropped += 1;
-			hw->ep_shm_info[my_epid].net_stats.tx_dropped += 1;
+			hw->ep_shm_info[dest_epid].net_stats.tx_dropped += 1;
 			adapter->stats64.tx_errors += 1;
-			hw->ep_shm_info[my_epid].net_stats.tx_errors += 1;
+			hw->ep_shm_info[dest_epid].net_stats.tx_errors += 1;
 
 			ret = NETDEV_TX_OK;
 		} else if (vlan &&
@@ -694,15 +710,15 @@
 					    (long)adapter->tx_start_jiffies) >=
 					    FJES_TX_RETRY_TIMEOUT) {
 					adapter->stats64.tx_fifo_errors += 1;
-					hw->ep_shm_info[my_epid].net_stats
+					hw->ep_shm_info[dest_epid].net_stats
 								.tx_fifo_errors += 1;
 					adapter->stats64.tx_errors += 1;
-					hw->ep_shm_info[my_epid].net_stats
+					hw->ep_shm_info[dest_epid].net_stats
 								.tx_errors += 1;
 
 					ret = NETDEV_TX_OK;
 				} else {
-					netdev->trans_start = jiffies;
+					netif_trans_update(netdev);
 					netif_tx_stop_queue(cur_queue);
 
 					if (!work_pending(&adapter->tx_stall_task))
@@ -714,10 +730,10 @@
 			} else {
 				if (!is_multi) {
 					adapter->stats64.tx_packets += 1;
-					hw->ep_shm_info[my_epid].net_stats
+					hw->ep_shm_info[dest_epid].net_stats
 								.tx_packets += 1;
 					adapter->stats64.tx_bytes += len;
-					hw->ep_shm_info[my_epid].net_stats
+					hw->ep_shm_info[dest_epid].net_stats
 								.tx_bytes += len;
 				}
 
@@ -759,9 +775,12 @@
 
 static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
 {
+	struct fjes_adapter *adapter = netdev_priv(netdev);
 	bool running = netif_running(netdev);
-	int ret = 0;
-	int idx;
+	struct fjes_hw *hw = &adapter->hw;
+	unsigned long flags;
+	int ret = -EINVAL;
+	int idx, epidx;
 
 	for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
 		if (new_mtu <= fjes_support_mtu[idx]) {
@@ -769,19 +788,58 @@
 			if (new_mtu == netdev->mtu)
 				return 0;
 
-			if (running)
-				fjes_close(netdev);
-
-			netdev->mtu = new_mtu;
-
-			if (running)
-				ret = fjes_open(netdev);
-
-			return ret;
+			ret = 0;
+			break;
 		}
 	}
 
-	return -EINVAL;
+	if (ret)
+		return ret;
+
+	if (running) {
+		spin_lock_irqsave(&hw->rx_status_lock, flags);
+		for (epidx = 0; epidx < hw->max_epid; epidx++) {
+			if (epidx == hw->my_epid)
+				continue;
+			hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
+				~FJES_RX_MTU_CHANGING_DONE;
+		}
+		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
+
+		netif_tx_stop_all_queues(netdev);
+		netif_carrier_off(netdev);
+		cancel_work_sync(&adapter->tx_stall_task);
+		napi_disable(&adapter->napi);
+
+		msleep(1000);
+
+		netif_tx_stop_all_queues(netdev);
+	}
+
+	netdev->mtu = new_mtu;
+
+	if (running) {
+		for (epidx = 0; epidx < hw->max_epid; epidx++) {
+			if (epidx == hw->my_epid)
+				continue;
+
+			spin_lock_irqsave(&hw->rx_status_lock, flags);
+			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
+					    netdev->dev_addr,
+					    netdev->mtu);
+
+			hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
+				FJES_RX_MTU_CHANGING_DONE;
+			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
+		}
+
+		netif_tx_wake_all_queues(netdev);
+		netif_carrier_on(netdev);
+		napi_enable(&adapter->napi);
+		napi_schedule(&adapter->napi);
+	}
+
+	return ret;
 }
 
 static int fjes_vlan_rx_add_vid(struct net_device *netdev,
@@ -825,6 +883,7 @@
 {
 	struct fjes_hw *hw = &adapter->hw;
 	enum ep_partner_status status;
+	unsigned long flags;
 
 	status = fjes_hw_get_partner_ep_status(hw, src_epid);
 	switch (status) {
@@ -834,8 +893,10 @@
 		break;
 	case EP_PARTNER_WAITING:
 		if (src_epid < hw->my_epid) {
+			spin_lock_irqsave(&hw->rx_status_lock, flags);
 			hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
 				FJES_RX_STOP_REQ_DONE;
+			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 
 			clear_bit(src_epid, &hw->txrx_stop_req_bit);
 			set_bit(src_epid, &adapter->unshare_watch_bitmask);
@@ -861,14 +922,17 @@
 {
 	struct fjes_hw *hw = &adapter->hw;
 	enum ep_partner_status status;
+	unsigned long flags;
 
 	set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
 
 	status = fjes_hw_get_partner_ep_status(hw, src_epid);
 	switch (status) {
 	case EP_PARTNER_WAITING:
+		spin_lock_irqsave(&hw->rx_status_lock, flags);
 		hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
 				FJES_RX_STOP_REQ_DONE;
+		spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 		clear_bit(src_epid, &hw->txrx_stop_req_bit);
 		/* fall through */
 	case EP_PARTNER_UNSHARE:
@@ -1001,13 +1065,17 @@
 	size_t frame_len;
 	void *frame;
 
+	spin_lock(&hw->rx_status_lock);
 	for (epidx = 0; epidx < hw->max_epid; epidx++) {
 		if (epidx == hw->my_epid)
 			continue;
 
-		adapter->hw.ep_shm_info[epidx].tx.info->v1i.rx_status |=
-			FJES_RX_POLL_WORK;
+		if (fjes_hw_get_partner_ep_status(hw, epidx) ==
+		    EP_PARTNER_SHARED)
+			adapter->hw.ep_shm_info[epidx]
+				   .tx.info->v1i.rx_status |= FJES_RX_POLL_WORK;
 	}
+	spin_unlock(&hw->rx_status_lock);
 
 	while (work_done < budget) {
 		prefetch(&adapter->hw);
@@ -1065,13 +1133,17 @@
 		if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
 			napi_reschedule(napi);
 		} else {
+			spin_lock(&hw->rx_status_lock);
 			for (epidx = 0; epidx < hw->max_epid; epidx++) {
 				if (epidx == hw->my_epid)
 					continue;
-				adapter->hw.ep_shm_info[epidx]
-					   .tx.info->v1i.rx_status &=
+				if (fjes_hw_get_partner_ep_status(hw, epidx) ==
+				    EP_PARTNER_SHARED)
+					adapter->hw.ep_shm_info[epidx].tx
+						   .info->v1i.rx_status &=
 						~FJES_RX_POLL_WORK;
 			}
+			spin_unlock(&hw->rx_status_lock);
 
 			fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false);
 		}
@@ -1129,7 +1201,7 @@
 
 	res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
 	hw->hw_res.start = res->start;
-	hw->hw_res.size = res->end - res->start + 1;
+	hw->hw_res.size = resource_size(res);
 	hw->hw_res.irq = platform_get_irq(plat_dev, 0);
 	err = fjes_hw_init(&adapter->hw);
 	if (err)
@@ -1203,7 +1275,7 @@
 	netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
 	netdev->netdev_ops = &fjes_netdev_ops;
 	fjes_set_ethtool_ops(netdev);
-	netdev->mtu = fjes_support_mtu[0];
+	netdev->mtu = fjes_support_mtu[3];
 	netdev->flags |= IFF_BROADCAST;
 	netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER;
 }
@@ -1240,6 +1312,7 @@
 	int max_epid, my_epid, epidx;
 	int stop_req, stop_req_done;
 	ulong unshare_watch_bitmask;
+	unsigned long flags;
 	int wait_time = 0;
 	int is_shared;
 	int ret;
@@ -1292,8 +1365,10 @@
 			}
 			mutex_unlock(&hw->hw_info.lock);
 
+			spin_lock_irqsave(&hw->rx_status_lock, flags);
 			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
 					    netdev->dev_addr, netdev->mtu);
+			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
 
 			clear_bit(epidx, &hw->txrx_stop_req_bit);
 			clear_bit(epidx, &unshare_watch_bitmask);
@@ -1331,9 +1406,12 @@
 				}
 				mutex_unlock(&hw->hw_info.lock);
 
+				spin_lock_irqsave(&hw->rx_status_lock, flags);
 				fjes_hw_setup_epbuf(
 					&hw->ep_shm_info[epidx].tx,
 					netdev->dev_addr, netdev->mtu);
+				spin_unlock_irqrestore(&hw->rx_status_lock,
+						       flags);
 
 				clear_bit(epidx, &hw->txrx_stop_req_bit);
 				clear_bit(epidx, &unshare_watch_bitmask);
@@ -1341,8 +1419,11 @@
 			}
 
 			if (test_bit(epidx, &unshare_watch_bitmask)) {
+				spin_lock_irqsave(&hw->rx_status_lock, flags);
 				hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
 						~FJES_RX_STOP_REQ_DONE;
+				spin_unlock_irqrestore(&hw->rx_status_lock,
+						       flags);
 			}
 		}
 	}
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index bc16889..a6dc11c 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -87,7 +87,6 @@
 	struct socket		*sock;
 	struct rcu_head		rcu;
 	int			refcnt;
-	struct udp_offload	udp_offloads;
 	struct hlist_head	vni_list[VNI_HASH_SIZE];
 	u32			flags;
 };
@@ -409,14 +408,6 @@
 	struct net *net = sock_net(sk);
 	sa_family_t sa_family = geneve_get_sk_family(gs);
 	__be16 port = inet_sk(sk)->inet_sport;
-	int err;
-
-	if (sa_family == AF_INET) {
-		err = udp_add_offload(sock_net(sk), &gs->udp_offloads);
-		if (err)
-			pr_warn("geneve: udp_add_offload failed with status %d\n",
-				err);
-	}
 
 	rcu_read_lock();
 	for_each_netdev_rcu(net, dev) {
@@ -432,9 +423,9 @@
 	return sizeof(*gh) + gh->opt_len * 4;
 }
 
-static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
-					   struct sk_buff *skb,
-					   struct udp_offload *uoff)
+static struct sk_buff **geneve_gro_receive(struct sock *sk,
+					   struct sk_buff **head,
+					   struct sk_buff *skb)
 {
 	struct sk_buff *p, **pp = NULL;
 	struct genevehdr *gh, *gh2;
@@ -495,8 +486,8 @@
 	return pp;
 }
 
-static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
-			       struct udp_offload *uoff)
+static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb,
+			       int nhoff)
 {
 	struct genevehdr *gh;
 	struct packet_offload *ptype;
@@ -504,8 +495,6 @@
 	int gh_len;
 	int err = -ENOSYS;
 
-	udp_tunnel_gro_complete(skb, nhoff);
-
 	gh = (struct genevehdr *)(skb->data + nhoff);
 	gh_len = geneve_hlen(gh);
 	type = gh->proto_type;
@@ -516,6 +505,9 @@
 		err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
 
 	rcu_read_unlock();
+
+	skb_set_inner_mac_header(skb, nhoff + gh_len);
+
 	return err;
 }
 
@@ -545,14 +537,14 @@
 		INIT_HLIST_HEAD(&gs->vni_list[h]);
 
 	/* Initialize the geneve udp offloads structure */
-	gs->udp_offloads.port = port;
-	gs->udp_offloads.callbacks.gro_receive  = geneve_gro_receive;
-	gs->udp_offloads.callbacks.gro_complete = geneve_gro_complete;
 	geneve_notify_add_rx_port(gs);
 
 	/* Mark socket as an encapsulation socket */
+	memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
 	tunnel_cfg.sk_user_data = gs;
 	tunnel_cfg.encap_type = 1;
+	tunnel_cfg.gro_receive = geneve_gro_receive;
+	tunnel_cfg.gro_complete = geneve_gro_complete;
 	tunnel_cfg.encap_rcv = geneve_udp_encap_recv;
 	tunnel_cfg.encap_destroy = NULL;
 	setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
@@ -576,9 +568,6 @@
 	}
 
 	rcu_read_unlock();
-
-	if (sa_family == AF_INET)
-		udp_del_offload(&gs->udp_offloads);
 }
 
 static void __geneve_sock_release(struct geneve_sock *gs)
@@ -708,16 +697,12 @@
 	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
 			+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr);
 	err = skb_cow_head(skb, min_headroom);
-	if (unlikely(err)) {
-		kfree_skb(skb);
+	if (unlikely(err))
 		goto free_rt;
-	}
 
-	skb = udp_tunnel_handle_offloads(skb, udp_sum);
-	if (IS_ERR(skb)) {
-		err = PTR_ERR(skb);
+	err = udp_tunnel_handle_offloads(skb, udp_sum);
+	if (err)
 		goto free_rt;
-	}
 
 	gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
 	geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
@@ -745,16 +730,12 @@
 	min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
 			+ GENEVE_BASE_HLEN + opt_len + sizeof(struct ipv6hdr);
 	err = skb_cow_head(skb, min_headroom);
-	if (unlikely(err)) {
-		kfree_skb(skb);
+	if (unlikely(err))
 		goto free_dst;
-	}
 
-	skb = udp_tunnel_handle_offloads(skb, udp_sum);
-	if (IS_ERR(skb)) {
-		err = PTR_ERR(skb);
+	err = udp_tunnel_handle_offloads(skb, udp_sum);
+	if (err)
 		goto free_dst;
-	}
 
 	gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
 	geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
@@ -949,7 +930,7 @@
 		err = geneve_build_skb(rt, skb, key->tun_flags, vni,
 				       info->options_len, opts, flags, xnet);
 		if (unlikely(err))
-			goto err;
+			goto tx_error;
 
 		tos = ip_tunnel_ecn_encap(key->tos, iip, skb);
 		ttl = key->ttl;
@@ -958,7 +939,7 @@
 		err = geneve_build_skb(rt, skb, 0, geneve->vni,
 				       0, NULL, flags, xnet);
 		if (unlikely(err))
-			goto err;
+			goto tx_error;
 
 		tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
 		ttl = geneve->ttl;
@@ -976,7 +957,7 @@
 
 tx_error:
 	dev_kfree_skb(skb);
-err:
+
 	if (err == -ELOOP)
 		dev->stats.collisions++;
 	else if (err == -ENETUNREACH)
@@ -1038,7 +1019,7 @@
 					info->options_len, opts,
 					flags, xnet);
 		if (unlikely(err))
-			goto err;
+			goto tx_error;
 
 		prio = ip_tunnel_ecn_encap(key->tos, iip, skb);
 		ttl = key->ttl;
@@ -1047,7 +1028,7 @@
 		err = geneve6_build_skb(dst, skb, 0, geneve->vni,
 					0, NULL, flags, xnet);
 		if (unlikely(err))
-			goto err;
+			goto tx_error;
 
 		prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel),
 					   iip, skb);
@@ -1066,7 +1047,7 @@
 
 tx_error:
 	dev_kfree_skb(skb);
-err:
+
 	if (err == -ELOOP)
 		dev->stats.collisions++;
 	else if (err == -ENETUNREACH)
@@ -1192,7 +1173,7 @@
  * supply the listening GENEVE udp ports. Callers are expected
  * to implement the ndo_add_geneve_port.
  */
-void geneve_get_rx_port(struct net_device *dev)
+static void geneve_push_rx_ports(struct net_device *dev)
 {
 	struct net *net = dev_net(dev);
 	struct geneve_net *gn = net_generic(net, geneve_net_id);
@@ -1201,6 +1182,9 @@
 	struct sock *sk;
 	__be16 port;
 
+	if (!dev->netdev_ops->ndo_add_geneve_port)
+		return;
+
 	rcu_read_lock();
 	list_for_each_entry_rcu(gs, &gn->sock_list, list) {
 		sk = gs->sock->sk;
@@ -1210,7 +1194,6 @@
 	}
 	rcu_read_unlock();
 }
-EXPORT_SYMBOL_GPL(geneve_get_rx_port);
 
 /* Initialize the device structure. */
 static void geneve_setup(struct net_device *dev)
@@ -1558,6 +1541,21 @@
 }
 EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
 
+static int geneve_netdevice_event(struct notifier_block *unused,
+				  unsigned long event, void *ptr)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+	if (event == NETDEV_OFFLOAD_PUSH_GENEVE)
+		geneve_push_rx_ports(dev);
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block geneve_notifier_block __read_mostly = {
+	.notifier_call = geneve_netdevice_event,
+};
+
 static __net_init int geneve_init_net(struct net *net)
 {
 	struct geneve_net *gn = net_generic(net, geneve_net_id);
@@ -1610,11 +1608,18 @@
 	if (rc)
 		goto out1;
 
-	rc = rtnl_link_register(&geneve_link_ops);
+	rc = register_netdevice_notifier(&geneve_notifier_block);
 	if (rc)
 		goto out2;
 
+	rc = rtnl_link_register(&geneve_link_ops);
+	if (rc)
+		goto out3;
+
 	return 0;
+
+out3:
+	unregister_netdevice_notifier(&geneve_notifier_block);
 out2:
 	unregister_pernet_subsys(&geneve_net_ops);
 out1:
@@ -1625,6 +1630,7 @@
 static void __exit geneve_cleanup_module(void)
 {
 	rtnl_link_unregister(&geneve_link_ops);
+	unregister_netdevice_notifier(&geneve_notifier_block);
 	unregister_pernet_subsys(&geneve_net_ops);
 }
 module_exit(geneve_cleanup_module);
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
new file mode 100644
index 0000000..4e976a0
--- /dev/null
+++ b/drivers/net/gtp.c
@@ -0,0 +1,1375 @@
+/* GTP according to GSM TS 09.60 / 3GPP TS 29.060
+ *
+ * (C) 2012-2014 by sysmocom - s.f.m.c. GmbH
+ * (C) 2016 by Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * Author: Harald Welte <hwelte@sysmocom.de>
+ *	   Pablo Neira Ayuso <pablo@netfilter.org>
+ *	   Andreas Schultz <aschultz@travelping.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/skbuff.h>
+#include <linux/udp.h>
+#include <linux/rculist.h>
+#include <linux/jhash.h>
+#include <linux/if_tunnel.h>
+#include <linux/net.h>
+#include <linux/file.h>
+#include <linux/gtp.h>
+
+#include <net/net_namespace.h>
+#include <net/protocol.h>
+#include <net/ip.h>
+#include <net/udp.h>
+#include <net/udp_tunnel.h>
+#include <net/icmp.h>
+#include <net/xfrm.h>
+#include <net/genetlink.h>
+#include <net/netns/generic.h>
+#include <net/gtp.h>
+
+/* An active session for the subscriber. */
+struct pdp_ctx {
+	struct hlist_node	hlist_tid;
+	struct hlist_node	hlist_addr;
+
+	union {
+		u64		tid;
+		struct {
+			u64	tid;
+			u16	flow;
+		} v0;
+		struct {
+			u32	i_tei;
+			u32	o_tei;
+		} v1;
+	} u;
+	u8			gtp_version;
+	u16			af;
+
+	struct in_addr		ms_addr_ip4;
+	struct in_addr		sgsn_addr_ip4;
+
+	atomic_t		tx_seq;
+	struct rcu_head		rcu_head;
+};
+
+/* One instance of the GTP device. */
+struct gtp_dev {
+	struct list_head	list;
+
+	struct socket		*sock0;
+	struct socket		*sock1u;
+
+	struct net		*net;
+	struct net_device	*dev;
+
+	unsigned int		hash_size;
+	struct hlist_head	*tid_hash;
+	struct hlist_head	*addr_hash;
+};
+
+static int gtp_net_id __read_mostly;
+
+struct gtp_net {
+	struct list_head gtp_dev_list;
+};
+
+static u32 gtp_h_initval;
+
+static inline u32 gtp0_hashfn(u64 tid)
+{
+	u32 *tid32 = (u32 *) &tid;
+	return jhash_2words(tid32[0], tid32[1], gtp_h_initval);
+}
+
+static inline u32 gtp1u_hashfn(u32 tid)
+{
+	return jhash_1word(tid, gtp_h_initval);
+}
+
+static inline u32 ipv4_hashfn(__be32 ip)
+{
+	return jhash_1word((__force u32)ip, gtp_h_initval);
+}
+
+/* Resolve a PDP context structure based on the 64bit TID. */
+static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid)
+{
+	struct hlist_head *head;
+	struct pdp_ctx *pdp;
+
+	head = &gtp->tid_hash[gtp0_hashfn(tid) % gtp->hash_size];
+
+	hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
+		if (pdp->gtp_version == GTP_V0 &&
+		    pdp->u.v0.tid == tid)
+			return pdp;
+	}
+	return NULL;
+}
+
+/* Resolve a PDP context structure based on the 32bit TEI. */
+static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid)
+{
+	struct hlist_head *head;
+	struct pdp_ctx *pdp;
+
+	head = &gtp->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size];
+
+	hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
+		if (pdp->gtp_version == GTP_V1 &&
+		    pdp->u.v1.i_tei == tid)
+			return pdp;
+	}
+	return NULL;
+}
+
+/* Resolve a PDP context based on IPv4 address of MS. */
+static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr)
+{
+	struct hlist_head *head;
+	struct pdp_ctx *pdp;
+
+	head = &gtp->addr_hash[ipv4_hashfn(ms_addr) % gtp->hash_size];
+
+	hlist_for_each_entry_rcu(pdp, head, hlist_addr) {
+		if (pdp->af == AF_INET &&
+		    pdp->ms_addr_ip4.s_addr == ms_addr)
+			return pdp;
+	}
+
+	return NULL;
+}
+
+static bool gtp_check_src_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
+				  unsigned int hdrlen)
+{
+	struct iphdr *iph;
+
+	if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr)))
+		return false;
+
+	iph = (struct iphdr *)(skb->data + hdrlen + sizeof(struct iphdr));
+
+	return iph->saddr != pctx->ms_addr_ip4.s_addr;
+}
+
+/* Check if the inner IP source address in this packet is assigned to any
+ * existing mobile subscriber.
+ */
+static bool gtp_check_src_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
+			     unsigned int hdrlen)
+{
+	switch (ntohs(skb->protocol)) {
+	case ETH_P_IP:
+		return gtp_check_src_ms_ipv4(skb, pctx, hdrlen);
+	}
+	return false;
+}
+
+/* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
+static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
+			       bool xnet)
+{
+	unsigned int hdrlen = sizeof(struct udphdr) +
+			      sizeof(struct gtp0_header);
+	struct gtp0_header *gtp0;
+	struct pdp_ctx *pctx;
+	int ret = 0;
+
+	if (!pskb_may_pull(skb, hdrlen))
+		return -1;
+
+	gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
+
+	if ((gtp0->flags >> 5) != GTP_V0)
+		return 1;
+
+	if (gtp0->type != GTP_TPDU)
+		return 1;
+
+	rcu_read_lock();
+	pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid));
+	if (!pctx) {
+		netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
+		ret = -1;
+		goto out_rcu;
+	}
+
+	if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
+		netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
+		ret = -1;
+		goto out_rcu;
+	}
+	rcu_read_unlock();
+
+	/* Get rid of the GTP + UDP headers. */
+	return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
+out_rcu:
+	rcu_read_unlock();
+	return ret;
+}
+
+static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
+				bool xnet)
+{
+	unsigned int hdrlen = sizeof(struct udphdr) +
+			      sizeof(struct gtp1_header);
+	struct gtp1_header *gtp1;
+	struct pdp_ctx *pctx;
+	int ret = 0;
+
+	if (!pskb_may_pull(skb, hdrlen))
+		return -1;
+
+	gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
+
+	if ((gtp1->flags >> 5) != GTP_V1)
+		return 1;
+
+	if (gtp1->type != GTP_TPDU)
+		return 1;
+
+	/* From 29.060: "This field shall be present if and only if any one or
+	 * more of the S, PN and E flags are set.".
+	 *
+	 * If any of the bit is set, then the remaining ones also have to be
+	 * set.
+	 */
+	if (gtp1->flags & GTP1_F_MASK)
+		hdrlen += 4;
+
+	/* Make sure the header is larger enough, including extensions. */
+	if (!pskb_may_pull(skb, hdrlen))
+		return -1;
+
+	gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
+
+	rcu_read_lock();
+	pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid));
+	if (!pctx) {
+		netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
+		ret = -1;
+		goto out_rcu;
+	}
+
+	if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
+		netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
+		ret = -1;
+		goto out_rcu;
+	}
+	rcu_read_unlock();
+
+	/* Get rid of the GTP + UDP headers. */
+	return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
+out_rcu:
+	rcu_read_unlock();
+	return ret;
+}
+
+static void gtp_encap_disable(struct gtp_dev *gtp)
+{
+	if (gtp->sock0 && gtp->sock0->sk) {
+		udp_sk(gtp->sock0->sk)->encap_type = 0;
+		rcu_assign_sk_user_data(gtp->sock0->sk, NULL);
+	}
+	if (gtp->sock1u && gtp->sock1u->sk) {
+		udp_sk(gtp->sock1u->sk)->encap_type = 0;
+		rcu_assign_sk_user_data(gtp->sock1u->sk, NULL);
+	}
+
+	gtp->sock0 = NULL;
+	gtp->sock1u = NULL;
+}
+
+static void gtp_encap_destroy(struct sock *sk)
+{
+	struct gtp_dev *gtp;
+
+	gtp = rcu_dereference_sk_user_data(sk);
+	if (gtp)
+		gtp_encap_disable(gtp);
+}
+
+/* UDP encapsulation receive handler. See net/ipv4/udp.c.
+ * Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket.
+ */
+static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+	struct pcpu_sw_netstats *stats;
+	struct gtp_dev *gtp;
+	bool xnet;
+	int ret;
+
+	gtp = rcu_dereference_sk_user_data(sk);
+	if (!gtp)
+		return 1;
+
+	netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
+
+	xnet = !net_eq(gtp->net, dev_net(gtp->dev));
+
+	switch (udp_sk(sk)->encap_type) {
+	case UDP_ENCAP_GTP0:
+		netdev_dbg(gtp->dev, "received GTP0 packet\n");
+		ret = gtp0_udp_encap_recv(gtp, skb, xnet);
+		break;
+	case UDP_ENCAP_GTP1U:
+		netdev_dbg(gtp->dev, "received GTP1U packet\n");
+		ret = gtp1u_udp_encap_recv(gtp, skb, xnet);
+		break;
+	default:
+		ret = -1; /* Shouldn't happen. */
+	}
+
+	switch (ret) {
+	case 1:
+		netdev_dbg(gtp->dev, "pass up to the process\n");
+		return 1;
+	case 0:
+		netdev_dbg(gtp->dev, "forwarding packet from GGSN to uplink\n");
+		break;
+	case -1:
+		netdev_dbg(gtp->dev, "GTP packet has been dropped\n");
+		kfree_skb(skb);
+		return 0;
+	}
+
+	/* Now that the UDP and the GTP header have been removed, set up the
+	 * new network header. This is required by the upper layer to
+	 * calculate the transport header.
+	 */
+	skb_reset_network_header(skb);
+
+	skb->dev = gtp->dev;
+
+	stats = this_cpu_ptr(gtp->dev->tstats);
+	u64_stats_update_begin(&stats->syncp);
+	stats->rx_packets++;
+	stats->rx_bytes += skb->len;
+	u64_stats_update_end(&stats->syncp);
+
+	netif_rx(skb);
+
+	return 0;
+}
+
+static int gtp_dev_init(struct net_device *dev)
+{
+	struct gtp_dev *gtp = netdev_priv(dev);
+
+	gtp->dev = dev;
+
+	dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+	if (!dev->tstats)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void gtp_dev_uninit(struct net_device *dev)
+{
+	struct gtp_dev *gtp = netdev_priv(dev);
+
+	gtp_encap_disable(gtp);
+	free_percpu(dev->tstats);
+}
+
+static struct rtable *ip4_route_output_gtp(struct net *net, struct flowi4 *fl4,
+					   const struct sock *sk, __be32 daddr)
+{
+	memset(fl4, 0, sizeof(*fl4));
+	fl4->flowi4_oif		= sk->sk_bound_dev_if;
+	fl4->daddr		= daddr;
+	fl4->saddr		= inet_sk(sk)->inet_saddr;
+	fl4->flowi4_tos		= RT_CONN_FLAGS(sk);
+	fl4->flowi4_proto	= sk->sk_protocol;
+
+	return ip_route_output_key(net, fl4);
+}
+
+static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
+{
+	int payload_len = skb->len;
+	struct gtp0_header *gtp0;
+
+	gtp0 = (struct gtp0_header *) skb_push(skb, sizeof(*gtp0));
+
+	gtp0->flags	= 0x1e; /* v0, GTP-non-prime. */
+	gtp0->type	= GTP_TPDU;
+	gtp0->length	= htons(payload_len);
+	gtp0->seq	= htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff);
+	gtp0->flow	= htons(pctx->u.v0.flow);
+	gtp0->number	= 0xff;
+	gtp0->spare[0]	= gtp0->spare[1] = gtp0->spare[2] = 0xff;
+	gtp0->tid	= cpu_to_be64(pctx->u.v0.tid);
+}
+
+static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
+{
+	int payload_len = skb->len;
+	struct gtp1_header *gtp1;
+
+	gtp1 = (struct gtp1_header *) skb_push(skb, sizeof(*gtp1));
+
+	/* Bits    8  7  6  5  4  3  2	1
+	 *	  +--+--+--+--+--+--+--+--+
+	 *	  |version |PT| 1| E| S|PN|
+	 *	  +--+--+--+--+--+--+--+--+
+	 *	    0  0  1  1	1  0  0  0
+	 */
+	gtp1->flags	= 0x38; /* v1, GTP-non-prime. */
+	gtp1->type	= GTP_TPDU;
+	gtp1->length	= htons(payload_len);
+	gtp1->tid	= htonl(pctx->u.v1.o_tei);
+
+	/* TODO: Suppport for extension header, sequence number and N-PDU.
+	 *	 Update the length field if any of them is available.
+	 */
+}
+
+struct gtp_pktinfo {
+	struct sock		*sk;
+	struct iphdr		*iph;
+	struct flowi4		fl4;
+	struct rtable		*rt;
+	struct pdp_ctx		*pctx;
+	struct net_device	*dev;
+	__be16			gtph_port;
+};
+
+static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo)
+{
+	switch (pktinfo->pctx->gtp_version) {
+	case GTP_V0:
+		pktinfo->gtph_port = htons(GTP0_PORT);
+		gtp0_push_header(skb, pktinfo->pctx);
+		break;
+	case GTP_V1:
+		pktinfo->gtph_port = htons(GTP1U_PORT);
+		gtp1_push_header(skb, pktinfo->pctx);
+		break;
+	}
+}
+
+static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo,
+					struct sock *sk, struct iphdr *iph,
+					struct pdp_ctx *pctx, struct rtable *rt,
+					struct flowi4 *fl4,
+					struct net_device *dev)
+{
+	pktinfo->sk	= sk;
+	pktinfo->iph	= iph;
+	pktinfo->pctx	= pctx;
+	pktinfo->rt	= rt;
+	pktinfo->fl4	= *fl4;
+	pktinfo->dev	= dev;
+}
+
+static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
+			     struct gtp_pktinfo *pktinfo)
+{
+	struct gtp_dev *gtp = netdev_priv(dev);
+	struct pdp_ctx *pctx;
+	struct rtable *rt;
+	struct flowi4 fl4;
+	struct iphdr *iph;
+	struct sock *sk;
+	__be16 df;
+	int mtu;
+
+	/* Read the IP destination address and resolve the PDP context.
+	 * Prepend PDP header with TEI/TID from PDP ctx.
+	 */
+	iph = ip_hdr(skb);
+	pctx = ipv4_pdp_find(gtp, iph->daddr);
+	if (!pctx) {
+		netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
+			   &iph->daddr);
+		return -ENOENT;
+	}
+	netdev_dbg(dev, "found PDP context %p\n", pctx);
+
+	switch (pctx->gtp_version) {
+	case GTP_V0:
+		if (gtp->sock0)
+			sk = gtp->sock0->sk;
+		else
+			sk = NULL;
+		break;
+	case GTP_V1:
+		if (gtp->sock1u)
+			sk = gtp->sock1u->sk;
+		else
+			sk = NULL;
+		break;
+	default:
+		return -ENOENT;
+	}
+
+	if (!sk) {
+		netdev_dbg(dev, "no userspace socket is available, skip\n");
+		return -ENOENT;
+	}
+
+	rt = ip4_route_output_gtp(sock_net(sk), &fl4, gtp->sock0->sk,
+				  pctx->sgsn_addr_ip4.s_addr);
+	if (IS_ERR(rt)) {
+		netdev_dbg(dev, "no route to SSGN %pI4\n",
+			   &pctx->sgsn_addr_ip4.s_addr);
+		dev->stats.tx_carrier_errors++;
+		goto err;
+	}
+
+	if (rt->dst.dev == dev) {
+		netdev_dbg(dev, "circular route to SSGN %pI4\n",
+			   &pctx->sgsn_addr_ip4.s_addr);
+		dev->stats.collisions++;
+		goto err_rt;
+	}
+
+	skb_dst_drop(skb);
+
+	/* This is similar to tnl_update_pmtu(). */
+	df = iph->frag_off;
+	if (df) {
+		mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
+			sizeof(struct iphdr) - sizeof(struct udphdr);
+		switch (pctx->gtp_version) {
+		case GTP_V0:
+			mtu -= sizeof(struct gtp0_header);
+			break;
+		case GTP_V1:
+			mtu -= sizeof(struct gtp1_header);
+			break;
+		}
+	} else {
+		mtu = dst_mtu(&rt->dst);
+	}
+
+	rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu);
+
+	if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
+	    mtu < ntohs(iph->tot_len)) {
+		netdev_dbg(dev, "packet too big, fragmentation needed\n");
+		memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+			  htonl(mtu));
+		goto err_rt;
+	}
+
+	gtp_set_pktinfo_ipv4(pktinfo, sk, iph, pctx, rt, &fl4, dev);
+	gtp_push_header(skb, pktinfo);
+
+	return 0;
+err_rt:
+	ip_rt_put(rt);
+err:
+	return -EBADMSG;
+}
+
+static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	unsigned int proto = ntohs(skb->protocol);
+	struct gtp_pktinfo pktinfo;
+	int err;
+
+	/* Ensure there is sufficient headroom. */
+	if (skb_cow_head(skb, dev->needed_headroom))
+		goto tx_err;
+
+	skb_reset_inner_headers(skb);
+
+	/* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
+	rcu_read_lock();
+	switch (proto) {
+	case ETH_P_IP:
+		err = gtp_build_skb_ip4(skb, dev, &pktinfo);
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+	rcu_read_unlock();
+
+	if (err < 0)
+		goto tx_err;
+
+	switch (proto) {
+	case ETH_P_IP:
+		netdev_dbg(pktinfo.dev, "gtp -> IP src: %pI4 dst: %pI4\n",
+			   &pktinfo.iph->saddr, &pktinfo.iph->daddr);
+		udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,
+				    pktinfo.fl4.saddr, pktinfo.fl4.daddr,
+				    pktinfo.iph->tos,
+				    ip4_dst_hoplimit(&pktinfo.rt->dst),
+				    htons(IP_DF),
+				    pktinfo.gtph_port, pktinfo.gtph_port,
+				    true, false);
+		break;
+	}
+
+	return NETDEV_TX_OK;
+tx_err:
+	dev->stats.tx_errors++;
+	dev_kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops gtp_netdev_ops = {
+	.ndo_init		= gtp_dev_init,
+	.ndo_uninit		= gtp_dev_uninit,
+	.ndo_start_xmit		= gtp_dev_xmit,
+	.ndo_get_stats64	= ip_tunnel_get_stats64,
+};
+
+static void gtp_link_setup(struct net_device *dev)
+{
+	dev->netdev_ops		= &gtp_netdev_ops;
+	dev->destructor		= free_netdev;
+
+	dev->hard_header_len = 0;
+	dev->addr_len = 0;
+
+	/* Zero header length. */
+	dev->type = ARPHRD_NONE;
+	dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+
+	dev->priv_flags	|= IFF_NO_QUEUE;
+	dev->features	|= NETIF_F_LLTX;
+	netif_keep_dst(dev);
+
+	/* Assume largest header, ie. GTPv0. */
+	dev->needed_headroom	= LL_MAX_HEADER +
+				  sizeof(struct iphdr) +
+				  sizeof(struct udphdr) +
+				  sizeof(struct gtp0_header);
+}
+
+static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
+static void gtp_hashtable_free(struct gtp_dev *gtp);
+static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
+			    int fd_gtp0, int fd_gtp1, struct net *src_net);
+
+static int gtp_newlink(struct net *src_net, struct net_device *dev,
+			struct nlattr *tb[], struct nlattr *data[])
+{
+	int hashsize, err, fd0, fd1;
+	struct gtp_dev *gtp;
+	struct gtp_net *gn;
+
+	if (!data[IFLA_GTP_FD0] || !data[IFLA_GTP_FD1])
+		return -EINVAL;
+
+	gtp = netdev_priv(dev);
+
+	fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
+	fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
+
+	err = gtp_encap_enable(dev, gtp, fd0, fd1, src_net);
+	if (err < 0)
+		goto out_err;
+
+	if (!data[IFLA_GTP_PDP_HASHSIZE])
+		hashsize = 1024;
+	else
+		hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]);
+
+	err = gtp_hashtable_new(gtp, hashsize);
+	if (err < 0)
+		goto out_encap;
+
+	err = register_netdevice(dev);
+	if (err < 0) {
+		netdev_dbg(dev, "failed to register new netdev %d\n", err);
+		goto out_hashtable;
+	}
+
+	gn = net_generic(dev_net(dev), gtp_net_id);
+	list_add_rcu(&gtp->list, &gn->gtp_dev_list);
+
+	netdev_dbg(dev, "registered new GTP interface\n");
+
+	return 0;
+
+out_hashtable:
+	gtp_hashtable_free(gtp);
+out_encap:
+	gtp_encap_disable(gtp);
+out_err:
+	return err;
+}
+
+static void gtp_dellink(struct net_device *dev, struct list_head *head)
+{
+	struct gtp_dev *gtp = netdev_priv(dev);
+
+	gtp_encap_disable(gtp);
+	gtp_hashtable_free(gtp);
+	list_del_rcu(&gtp->list);
+	unregister_netdevice_queue(dev, head);
+}
+
+static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
+	[IFLA_GTP_FD0]			= { .type = NLA_U32 },
+	[IFLA_GTP_FD1]			= { .type = NLA_U32 },
+	[IFLA_GTP_PDP_HASHSIZE]		= { .type = NLA_U32 },
+};
+
+static int gtp_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+	if (!data)
+		return -EINVAL;
+
+	return 0;
+}
+
+static size_t gtp_get_size(const struct net_device *dev)
+{
+	return nla_total_size(sizeof(__u32));	/* IFLA_GTP_PDP_HASHSIZE */
+}
+
+static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+	struct gtp_dev *gtp = netdev_priv(dev);
+
+	if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size))
+		goto nla_put_failure;
+
+	return 0;
+
+nla_put_failure:
+	return -EMSGSIZE;
+}
+
+static struct rtnl_link_ops gtp_link_ops __read_mostly = {
+	.kind		= "gtp",
+	.maxtype	= IFLA_GTP_MAX,
+	.policy		= gtp_policy,
+	.priv_size	= sizeof(struct gtp_dev),
+	.setup		= gtp_link_setup,
+	.validate	= gtp_validate,
+	.newlink	= gtp_newlink,
+	.dellink	= gtp_dellink,
+	.get_size	= gtp_get_size,
+	.fill_info	= gtp_fill_info,
+};
+
+static struct net *gtp_genl_get_net(struct net *src_net, struct nlattr *tb[])
+{
+	struct net *net;
+
+	/* Examine the link attributes and figure out which network namespace
+	 * we are talking about.
+	 */
+	if (tb[GTPA_NET_NS_FD])
+		net = get_net_ns_by_fd(nla_get_u32(tb[GTPA_NET_NS_FD]));
+	else
+		net = get_net(src_net);
+
+	return net;
+}
+
+static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
+{
+	int i;
+
+	gtp->addr_hash = kmalloc(sizeof(struct hlist_head) * hsize, GFP_KERNEL);
+	if (gtp->addr_hash == NULL)
+		return -ENOMEM;
+
+	gtp->tid_hash = kmalloc(sizeof(struct hlist_head) * hsize, GFP_KERNEL);
+	if (gtp->tid_hash == NULL)
+		goto err1;
+
+	gtp->hash_size = hsize;
+
+	for (i = 0; i < hsize; i++) {
+		INIT_HLIST_HEAD(&gtp->addr_hash[i]);
+		INIT_HLIST_HEAD(&gtp->tid_hash[i]);
+	}
+	return 0;
+err1:
+	kfree(gtp->addr_hash);
+	return -ENOMEM;
+}
+
+static void gtp_hashtable_free(struct gtp_dev *gtp)
+{
+	struct pdp_ctx *pctx;
+	int i;
+
+	for (i = 0; i < gtp->hash_size; i++) {
+		hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) {
+			hlist_del_rcu(&pctx->hlist_tid);
+			hlist_del_rcu(&pctx->hlist_addr);
+			kfree_rcu(pctx, rcu_head);
+		}
+	}
+	synchronize_rcu();
+	kfree(gtp->addr_hash);
+	kfree(gtp->tid_hash);
+}
+
+static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
+			    int fd_gtp0, int fd_gtp1, struct net *src_net)
+{
+	struct udp_tunnel_sock_cfg tuncfg = {NULL};
+	struct socket *sock0, *sock1u;
+	int err;
+
+	netdev_dbg(dev, "enable gtp on %d, %d\n", fd_gtp0, fd_gtp1);
+
+	sock0 = sockfd_lookup(fd_gtp0, &err);
+	if (sock0 == NULL) {
+		netdev_dbg(dev, "socket fd=%d not found (gtp0)\n", fd_gtp0);
+		return -ENOENT;
+	}
+
+	if (sock0->sk->sk_protocol != IPPROTO_UDP) {
+		netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp0);
+		err = -EINVAL;
+		goto err1;
+	}
+
+	sock1u = sockfd_lookup(fd_gtp1, &err);
+	if (sock1u == NULL) {
+		netdev_dbg(dev, "socket fd=%d not found (gtp1u)\n", fd_gtp1);
+		err = -ENOENT;
+		goto err1;
+	}
+
+	if (sock1u->sk->sk_protocol != IPPROTO_UDP) {
+		netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp1);
+		err = -EINVAL;
+		goto err2;
+	}
+
+	netdev_dbg(dev, "enable gtp on %p, %p\n", sock0, sock1u);
+
+	gtp->sock0 = sock0;
+	gtp->sock1u = sock1u;
+	gtp->net = src_net;
+
+	tuncfg.sk_user_data = gtp;
+	tuncfg.encap_rcv = gtp_encap_recv;
+	tuncfg.encap_destroy = gtp_encap_destroy;
+
+	tuncfg.encap_type = UDP_ENCAP_GTP0;
+	setup_udp_tunnel_sock(sock_net(gtp->sock0->sk), gtp->sock0, &tuncfg);
+
+	tuncfg.encap_type = UDP_ENCAP_GTP1U;
+	setup_udp_tunnel_sock(sock_net(gtp->sock1u->sk), gtp->sock1u, &tuncfg);
+
+	err = 0;
+err2:
+	sockfd_put(sock1u);
+err1:
+	sockfd_put(sock0);
+	return err;
+}
+
+static struct net_device *gtp_find_dev(struct net *net, int ifindex)
+{
+	struct gtp_net *gn = net_generic(net, gtp_net_id);
+	struct gtp_dev *gtp;
+
+	list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
+		if (ifindex == gtp->dev->ifindex)
+			return gtp->dev;
+	}
+	return NULL;
+}
+
+static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
+{
+	pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
+	pctx->af = AF_INET;
+	pctx->sgsn_addr_ip4.s_addr =
+		nla_get_be32(info->attrs[GTPA_SGSN_ADDRESS]);
+	pctx->ms_addr_ip4.s_addr =
+		nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
+
+	switch (pctx->gtp_version) {
+	case GTP_V0:
+		/* According to TS 09.60, sections 7.5.1 and 7.5.2, the flow
+		 * label needs to be the same for uplink and downlink packets,
+		 * so let's annotate this.
+		 */
+		pctx->u.v0.tid = nla_get_u64(info->attrs[GTPA_TID]);
+		pctx->u.v0.flow = nla_get_u16(info->attrs[GTPA_FLOW]);
+		break;
+	case GTP_V1:
+		pctx->u.v1.i_tei = nla_get_u32(info->attrs[GTPA_I_TEI]);
+		pctx->u.v1.o_tei = nla_get_u32(info->attrs[GTPA_O_TEI]);
+		break;
+	default:
+		break;
+	}
+}
+
+static int ipv4_pdp_add(struct net_device *dev, struct genl_info *info)
+{
+	struct gtp_dev *gtp = netdev_priv(dev);
+	u32 hash_ms, hash_tid = 0;
+	struct pdp_ctx *pctx;
+	bool found = false;
+	__be32 ms_addr;
+
+	ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
+	hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
+
+	hlist_for_each_entry_rcu(pctx, &gtp->addr_hash[hash_ms], hlist_addr) {
+		if (pctx->ms_addr_ip4.s_addr == ms_addr) {
+			found = true;
+			break;
+		}
+	}
+
+	if (found) {
+		if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
+			return -EEXIST;
+		if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
+			return -EOPNOTSUPP;
+
+		ipv4_pdp_fill(pctx, info);
+
+		if (pctx->gtp_version == GTP_V0)
+			netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n",
+				   pctx->u.v0.tid, pctx);
+		else if (pctx->gtp_version == GTP_V1)
+			netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n",
+				   pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
+
+		return 0;
+
+	}
+
+	pctx = kmalloc(sizeof(struct pdp_ctx), GFP_KERNEL);
+	if (pctx == NULL)
+		return -ENOMEM;
+
+	ipv4_pdp_fill(pctx, info);
+	atomic_set(&pctx->tx_seq, 0);
+
+	switch (pctx->gtp_version) {
+	case GTP_V0:
+		/* TS 09.60: "The flow label identifies unambiguously a GTP
+		 * flow.". We use the tid for this instead, I cannot find a
+		 * situation in which this doesn't unambiguosly identify the
+		 * PDP context.
+		 */
+		hash_tid = gtp0_hashfn(pctx->u.v0.tid) % gtp->hash_size;
+		break;
+	case GTP_V1:
+		hash_tid = gtp1u_hashfn(pctx->u.v1.i_tei) % gtp->hash_size;
+		break;
+	}
+
+	hlist_add_head_rcu(&pctx->hlist_addr, &gtp->addr_hash[hash_ms]);
+	hlist_add_head_rcu(&pctx->hlist_tid, &gtp->tid_hash[hash_tid]);
+
+	switch (pctx->gtp_version) {
+	case GTP_V0:
+		netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
+			   pctx->u.v0.tid, &pctx->sgsn_addr_ip4,
+			   &pctx->ms_addr_ip4, pctx);
+		break;
+	case GTP_V1:
+		netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
+			   pctx->u.v1.i_tei, pctx->u.v1.o_tei,
+			   &pctx->sgsn_addr_ip4, &pctx->ms_addr_ip4, pctx);
+		break;
+	}
+
+	return 0;
+}
+
+static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
+{
+	struct net_device *dev;
+	struct net *net;
+
+	if (!info->attrs[GTPA_VERSION] ||
+	    !info->attrs[GTPA_LINK] ||
+	    !info->attrs[GTPA_SGSN_ADDRESS] ||
+	    !info->attrs[GTPA_MS_ADDRESS])
+		return -EINVAL;
+
+	switch (nla_get_u32(info->attrs[GTPA_VERSION])) {
+	case GTP_V0:
+		if (!info->attrs[GTPA_TID] ||
+		    !info->attrs[GTPA_FLOW])
+			return -EINVAL;
+		break;
+	case GTP_V1:
+		if (!info->attrs[GTPA_I_TEI] ||
+		    !info->attrs[GTPA_O_TEI])
+			return -EINVAL;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
+	if (IS_ERR(net))
+		return PTR_ERR(net);
+
+	/* Check if there's an existing gtpX device to configure */
+	dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
+	if (dev == NULL) {
+		put_net(net);
+		return -ENODEV;
+	}
+	put_net(net);
+
+	return ipv4_pdp_add(dev, info);
+}
+
+static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
+{
+	struct net_device *dev;
+	struct pdp_ctx *pctx;
+	struct gtp_dev *gtp;
+	struct net *net;
+
+	if (!info->attrs[GTPA_VERSION] ||
+	    !info->attrs[GTPA_LINK])
+		return -EINVAL;
+
+	net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
+	if (IS_ERR(net))
+		return PTR_ERR(net);
+
+	/* Check if there's an existing gtpX device to configure */
+	dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
+	if (dev == NULL) {
+		put_net(net);
+		return -ENODEV;
+	}
+	put_net(net);
+
+	gtp = netdev_priv(dev);
+
+	switch (nla_get_u32(info->attrs[GTPA_VERSION])) {
+	case GTP_V0:
+		if (!info->attrs[GTPA_TID])
+			return -EINVAL;
+		pctx = gtp0_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_TID]));
+		break;
+	case GTP_V1:
+		if (!info->attrs[GTPA_I_TEI])
+			return -EINVAL;
+		pctx = gtp1_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_I_TEI]));
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	if (pctx == NULL)
+		return -ENOENT;
+
+	if (pctx->gtp_version == GTP_V0)
+		netdev_dbg(dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
+			   pctx->u.v0.tid, pctx);
+	else if (pctx->gtp_version == GTP_V1)
+		netdev_dbg(dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
+			   pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
+
+	hlist_del_rcu(&pctx->hlist_tid);
+	hlist_del_rcu(&pctx->hlist_addr);
+	kfree_rcu(pctx, rcu_head);
+
+	return 0;
+}
+
+static struct genl_family gtp_genl_family = {
+	.id		= GENL_ID_GENERATE,
+	.name		= "gtp",
+	.version	= 0,
+	.hdrsize	= 0,
+	.maxattr	= GTPA_MAX,
+	.netnsok	= true,
+};
+
+static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
+			      u32 type, struct pdp_ctx *pctx)
+{
+	void *genlh;
+
+	genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, 0,
+			    type);
+	if (genlh == NULL)
+		goto nlmsg_failure;
+
+	if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
+	    nla_put_be32(skb, GTPA_SGSN_ADDRESS, pctx->sgsn_addr_ip4.s_addr) ||
+	    nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr))
+		goto nla_put_failure;
+
+	switch (pctx->gtp_version) {
+	case GTP_V0:
+		if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) ||
+		    nla_put_u16(skb, GTPA_FLOW, pctx->u.v0.flow))
+			goto nla_put_failure;
+		break;
+	case GTP_V1:
+		if (nla_put_u32(skb, GTPA_I_TEI, pctx->u.v1.i_tei) ||
+		    nla_put_u32(skb, GTPA_O_TEI, pctx->u.v1.o_tei))
+			goto nla_put_failure;
+		break;
+	}
+	genlmsg_end(skb, genlh);
+	return 0;
+
+nlmsg_failure:
+nla_put_failure:
+	genlmsg_cancel(skb, genlh);
+	return -EMSGSIZE;
+}
+
+static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
+{
+	struct pdp_ctx *pctx = NULL;
+	struct net_device *dev;
+	struct sk_buff *skb2;
+	struct gtp_dev *gtp;
+	u32 gtp_version;
+	struct net *net;
+	int err;
+
+	if (!info->attrs[GTPA_VERSION] ||
+	    !info->attrs[GTPA_LINK])
+		return -EINVAL;
+
+	gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
+	switch (gtp_version) {
+	case GTP_V0:
+	case GTP_V1:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
+	if (IS_ERR(net))
+		return PTR_ERR(net);
+
+	/* Check if there's an existing gtpX device to configure */
+	dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
+	if (dev == NULL) {
+		put_net(net);
+		return -ENODEV;
+	}
+	put_net(net);
+
+	gtp = netdev_priv(dev);
+
+	rcu_read_lock();
+	if (gtp_version == GTP_V0 &&
+	    info->attrs[GTPA_TID]) {
+		u64 tid = nla_get_u64(info->attrs[GTPA_TID]);
+
+		pctx = gtp0_pdp_find(gtp, tid);
+	} else if (gtp_version == GTP_V1 &&
+		 info->attrs[GTPA_I_TEI]) {
+		u32 tid = nla_get_u32(info->attrs[GTPA_I_TEI]);
+
+		pctx = gtp1_pdp_find(gtp, tid);
+	} else if (info->attrs[GTPA_MS_ADDRESS]) {
+		__be32 ip = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
+
+		pctx = ipv4_pdp_find(gtp, ip);
+	}
+
+	if (pctx == NULL) {
+		err = -ENOENT;
+		goto err_unlock;
+	}
+
+	skb2 = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+	if (skb2 == NULL) {
+		err = -ENOMEM;
+		goto err_unlock;
+	}
+
+	err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid,
+				 info->snd_seq, info->nlhdr->nlmsg_type, pctx);
+	if (err < 0)
+		goto err_unlock_free;
+
+	rcu_read_unlock();
+	return genlmsg_unicast(genl_info_net(info), skb2, info->snd_portid);
+
+err_unlock_free:
+	kfree_skb(skb2);
+err_unlock:
+	rcu_read_unlock();
+	return err;
+}
+
+static int gtp_genl_dump_pdp(struct sk_buff *skb,
+				struct netlink_callback *cb)
+{
+	struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
+	struct net *net = sock_net(skb->sk);
+	struct gtp_net *gn = net_generic(net, gtp_net_id);
+	unsigned long tid = cb->args[1];
+	int i, k = cb->args[0], ret;
+	struct pdp_ctx *pctx;
+
+	if (cb->args[4])
+		return 0;
+
+	list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
+		if (last_gtp && last_gtp != gtp)
+			continue;
+		else
+			last_gtp = NULL;
+
+		for (i = k; i < gtp->hash_size; i++) {
+			hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) {
+				if (tid && tid != pctx->u.tid)
+					continue;
+				else
+					tid = 0;
+
+				ret = gtp_genl_fill_info(skb,
+							 NETLINK_CB(cb->skb).portid,
+							 cb->nlh->nlmsg_seq,
+							 cb->nlh->nlmsg_type, pctx);
+				if (ret < 0) {
+					cb->args[0] = i;
+					cb->args[1] = pctx->u.tid;
+					cb->args[2] = (unsigned long)gtp;
+					goto out;
+				}
+			}
+		}
+	}
+	cb->args[4] = 1;
+out:
+	return skb->len;
+}
+
+static struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
+	[GTPA_LINK]		= { .type = NLA_U32, },
+	[GTPA_VERSION]		= { .type = NLA_U32, },
+	[GTPA_TID]		= { .type = NLA_U64, },
+	[GTPA_SGSN_ADDRESS]	= { .type = NLA_U32, },
+	[GTPA_MS_ADDRESS]	= { .type = NLA_U32, },
+	[GTPA_FLOW]		= { .type = NLA_U16, },
+	[GTPA_NET_NS_FD]	= { .type = NLA_U32, },
+	[GTPA_I_TEI]		= { .type = NLA_U32, },
+	[GTPA_O_TEI]		= { .type = NLA_U32, },
+};
+
+static const struct genl_ops gtp_genl_ops[] = {
+	{
+		.cmd = GTP_CMD_NEWPDP,
+		.doit = gtp_genl_new_pdp,
+		.policy = gtp_genl_policy,
+		.flags = GENL_ADMIN_PERM,
+	},
+	{
+		.cmd = GTP_CMD_DELPDP,
+		.doit = gtp_genl_del_pdp,
+		.policy = gtp_genl_policy,
+		.flags = GENL_ADMIN_PERM,
+	},
+	{
+		.cmd = GTP_CMD_GETPDP,
+		.doit = gtp_genl_get_pdp,
+		.dumpit = gtp_genl_dump_pdp,
+		.policy = gtp_genl_policy,
+		.flags = GENL_ADMIN_PERM,
+	},
+};
+
+static int __net_init gtp_net_init(struct net *net)
+{
+	struct gtp_net *gn = net_generic(net, gtp_net_id);
+
+	INIT_LIST_HEAD(&gn->gtp_dev_list);
+	return 0;
+}
+
+static void __net_exit gtp_net_exit(struct net *net)
+{
+	struct gtp_net *gn = net_generic(net, gtp_net_id);
+	struct gtp_dev *gtp;
+	LIST_HEAD(list);
+
+	rtnl_lock();
+	list_for_each_entry(gtp, &gn->gtp_dev_list, list)
+		gtp_dellink(gtp->dev, &list);
+
+	unregister_netdevice_many(&list);
+	rtnl_unlock();
+}
+
+static struct pernet_operations gtp_net_ops = {
+	.init	= gtp_net_init,
+	.exit	= gtp_net_exit,
+	.id	= &gtp_net_id,
+	.size	= sizeof(struct gtp_net),
+};
+
+static int __init gtp_init(void)
+{
+	int err;
+
+	get_random_bytes(&gtp_h_initval, sizeof(gtp_h_initval));
+
+	err = rtnl_link_register(&gtp_link_ops);
+	if (err < 0)
+		goto error_out;
+
+	err = genl_register_family_with_ops(&gtp_genl_family, gtp_genl_ops);
+	if (err < 0)
+		goto unreg_rtnl_link;
+
+	err = register_pernet_subsys(&gtp_net_ops);
+	if (err < 0)
+		goto unreg_genl_family;
+
+	pr_info("GTP module loaded (pdp ctx size %Zd bytes)\n",
+		sizeof(struct pdp_ctx));
+	return 0;
+
+unreg_genl_family:
+	genl_unregister_family(&gtp_genl_family);
+unreg_rtnl_link:
+	rtnl_link_unregister(&gtp_link_ops);
+error_out:
+	pr_err("error loading GTP module loaded\n");
+	return err;
+}
+late_initcall(gtp_init);
+
+static void __exit gtp_fini(void)
+{
+	unregister_pernet_subsys(&gtp_net_ops);
+	genl_unregister_family(&gtp_genl_family);
+	rtnl_link_unregister(&gtp_link_ops);
+
+	pr_info("GTP module unloaded\n");
+}
+module_exit(gtp_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
+MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
+MODULE_ALIAS_RTNL_LINK("gtp");
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 72c9f1f..78dbc44 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -635,10 +635,10 @@
 
 #ifdef __i386__
 #include <asm/msr.h>
-#define GETTICK(x)                                                \
-({                                                                \
-	if (cpu_has_tsc)                                          \
-		x = (unsigned int)rdtsc();		  \
+#define GETTICK(x)						\
+({								\
+	if (boot_cpu_has(X86_FEATURE_TSC))			\
+		x = (unsigned int)rdtsc();			\
 })
 #else /* __i386__ */
 #define GETTICK(x)
@@ -780,8 +780,10 @@
 		dev_kfree_skb(skb);
 		return NETDEV_TX_OK;
 	}
-	if (bc->skb)
-		return NETDEV_TX_LOCKED;
+	if (bc->skb) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
 	/* strip KISS byte */
 	if (skb->len >= HDLCDRV_MAXFLEN+1 || skb->len < 3) {
 		dev_kfree_skb(skb);
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 49fe59b..4bad0b8 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -412,8 +412,10 @@
 		dev_kfree_skb(skb);
 		return NETDEV_TX_OK;
 	}
-	if (sm->skb)
-		return NETDEV_TX_LOCKED;
+	if (sm->skb) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
 	netif_stop_queue(dev);
 	sm->skb = skb;
 	return NETDEV_TX_OK;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 85828f1..1dfe230 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -519,7 +519,7 @@
 	dev->stats.tx_packets++;
 	dev->stats.tx_bytes += actual;
 
-	ax->dev->trans_start = jiffies;
+	netif_trans_update(ax->dev);
 	ax->xleft = count - actual;
 	ax->xhead = ax->xbuff + actual;
 }
@@ -542,7 +542,7 @@
 		 * May be we must check transmitter timeout here ?
 		 *      14 Oct 1994 Dmitry Gorodchanin.
 		 */
-		if (time_before(jiffies, dev->trans_start + 20 * HZ)) {
+		if (time_before(jiffies, dev_trans_start(dev) + 20 * HZ)) {
 			/* 20 sec timeout not reached */
 			return NETDEV_TX_BUSY;
 		}
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index ce88df3..b808316 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1669,7 +1669,7 @@
 		dev_kfree_skb(skb_del);
 	}
 	skb_queue_tail(&scc->tx_queue, skb);
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	
 
 	/*
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 1a4729c..aaff07c 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -601,7 +601,7 @@
 		return ax25_ip_xmit(skb);
 
 	skb_queue_tail(&yp->send_queue, skb);
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	return NETDEV_TX_OK;
 }
 
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 8b3bd8e..c270c5a 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -158,7 +158,7 @@
 };
 
 struct rndis_device {
-	struct netvsc_device *net_dev;
+	struct net_device *ndev;
 
 	enum rndis_device_state state;
 	bool link_state;
@@ -202,6 +202,8 @@
 int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
 int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac);
 
+void netvsc_switch_datapath(struct net_device *nv_dev, bool vf);
+
 #define NVSP_INVALID_PROTOCOL_VERSION	((u32)0xFFFFFFFF)
 
 #define NVSP_PROTOCOL_VERSION_1		2
@@ -641,10 +643,18 @@
 	u32 event;
 };
 
+struct garp_wrk {
+	struct work_struct dwrk;
+	struct net_device *netdev;
+	struct netvsc_device *netvsc_dev;
+};
+
 /* The context of the netvsc device  */
 struct net_device_context {
 	/* point back to our device context */
 	struct hv_device *device_ctx;
+	/* netvsc_device */
+	struct netvsc_device *nvdev;
 	/* reconfigure work */
 	struct delayed_work dwork;
 	/* last reconfig time */
@@ -656,6 +666,7 @@
 
 	struct work_struct work;
 	u32 msg_enable; /* debug level */
+	struct garp_wrk gwrk;
 
 	struct netvsc_stats __percpu *tx_stats;
 	struct netvsc_stats __percpu *rx_stats;
@@ -663,17 +674,17 @@
 	/* Ethtool settings */
 	u8 duplex;
 	u32 speed;
+
+	/* the device is going away */
+	bool start_remove;
 };
 
 /* Per netvsc device */
 struct netvsc_device {
-	struct hv_device *dev;
-
 	u32 nvsp_version;
 
 	atomic_t num_outstanding_sends;
 	wait_queue_head_t wait_drain;
-	bool start_remove;
 	bool destroy;
 
 	/* Receive buffer allocated by us but manages by NetVSP */
@@ -699,8 +710,6 @@
 	struct nvsp_message revoke_packet;
 	/* unsigned char HwMacAddr[HW_MACADDR_LEN]; */
 
-	struct net_device *ndev;
-
 	struct vmbus_channel *chn_table[VRSS_CHANNEL_MAX];
 	u32 send_table[VRSS_SEND_TAB_SIZE];
 	u32 max_chn;
@@ -723,13 +732,15 @@
 	u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
 	u32 pkt_align; /* alignment bytes, e.g. 8 */
 
-	/* The net device context */
-	struct net_device_context *nd_ctx;
-
 	/* 1: allocated, serial number is valid. 0: not allocated */
 	u32 vf_alloc;
 	/* Serial number of the VF to team with */
 	u32 vf_serial;
+	atomic_t open_cnt;
+	/* State to manage the associated VF interface. */
+	bool vf_inject;
+	struct net_device *vf_netdev;
+	atomic_t vf_use_cnt;
 };
 
 /* NdisInitialize message */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index ec313fc..719cb35 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -33,11 +33,36 @@
 
 #include "hyperv_net.h"
 
+/*
+ * Switch the data path from the synthetic interface to the VF
+ * interface.
+ */
+void netvsc_switch_datapath(struct net_device *ndev, bool vf)
+{
+	struct net_device_context *net_device_ctx = netdev_priv(ndev);
+	struct hv_device *dev = net_device_ctx->device_ctx;
+	struct netvsc_device *nv_dev = net_device_ctx->nvdev;
+	struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
 
-static struct netvsc_device *alloc_net_device(struct hv_device *device)
+	memset(init_pkt, 0, sizeof(struct nvsp_message));
+	init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
+	if (vf)
+		init_pkt->msg.v4_msg.active_dp.active_datapath =
+			NVSP_DATAPATH_VF;
+	else
+		init_pkt->msg.v4_msg.active_dp.active_datapath =
+			NVSP_DATAPATH_SYNTHETIC;
+
+	vmbus_sendpacket(dev->channel, init_pkt,
+			       sizeof(struct nvsp_message),
+			       (unsigned long)init_pkt,
+			       VM_PKT_DATA_INBAND, 0);
+}
+
+
+static struct netvsc_device *alloc_net_device(void)
 {
 	struct netvsc_device *net_device;
-	struct net_device *ndev = hv_get_drvdata(device);
 
 	net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
 	if (!net_device)
@@ -50,14 +75,15 @@
 	}
 
 	init_waitqueue_head(&net_device->wait_drain);
-	net_device->start_remove = false;
 	net_device->destroy = false;
-	net_device->dev = device;
-	net_device->ndev = ndev;
+	atomic_set(&net_device->open_cnt, 0);
+	atomic_set(&net_device->vf_use_cnt, 0);
 	net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
 	net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
 
-	hv_set_drvdata(device, net_device);
+	net_device->vf_netdev = NULL;
+	net_device->vf_inject = false;
+
 	return net_device;
 }
 
@@ -69,9 +95,10 @@
 
 static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
 {
-	struct netvsc_device *net_device;
+	struct net_device *ndev = hv_get_drvdata(device);
+	struct net_device_context *net_device_ctx = netdev_priv(ndev);
+	struct netvsc_device *net_device = net_device_ctx->nvdev;
 
-	net_device = hv_get_drvdata(device);
 	if (net_device && net_device->destroy)
 		net_device = NULL;
 
@@ -80,9 +107,9 @@
 
 static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
 {
-	struct netvsc_device *net_device;
-
-	net_device = hv_get_drvdata(device);
+	struct net_device *ndev = hv_get_drvdata(device);
+	struct net_device_context *net_device_ctx = netdev_priv(ndev);
+	struct netvsc_device *net_device = net_device_ctx->nvdev;
 
 	if (!net_device)
 		goto get_in_err;
@@ -96,11 +123,13 @@
 }
 
 
-static int netvsc_destroy_buf(struct netvsc_device *net_device)
+static int netvsc_destroy_buf(struct hv_device *device)
 {
 	struct nvsp_message *revoke_packet;
 	int ret = 0;
-	struct net_device *ndev = net_device->ndev;
+	struct net_device *ndev = hv_get_drvdata(device);
+	struct net_device_context *net_device_ctx = netdev_priv(ndev);
+	struct netvsc_device *net_device = net_device_ctx->nvdev;
 
 	/*
 	 * If we got a section count, it means we received a
@@ -118,7 +147,7 @@
 		revoke_packet->msg.v1_msg.
 		revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
 
-		ret = vmbus_sendpacket(net_device->dev->channel,
+		ret = vmbus_sendpacket(device->channel,
 				       revoke_packet,
 				       sizeof(struct nvsp_message),
 				       (unsigned long)revoke_packet,
@@ -136,8 +165,8 @@
 
 	/* Teardown the gpadl on the vsp end */
 	if (net_device->recv_buf_gpadl_handle) {
-		ret = vmbus_teardown_gpadl(net_device->dev->channel,
-			   net_device->recv_buf_gpadl_handle);
+		ret = vmbus_teardown_gpadl(device->channel,
+					   net_device->recv_buf_gpadl_handle);
 
 		/* If we failed here, we might as well return and have a leak
 		 * rather than continue and a bugchk
@@ -178,7 +207,7 @@
 		revoke_packet->msg.v1_msg.revoke_send_buf.id =
 			NETVSC_SEND_BUFFER_ID;
 
-		ret = vmbus_sendpacket(net_device->dev->channel,
+		ret = vmbus_sendpacket(device->channel,
 				       revoke_packet,
 				       sizeof(struct nvsp_message),
 				       (unsigned long)revoke_packet,
@@ -194,7 +223,7 @@
 	}
 	/* Teardown the gpadl on the vsp end */
 	if (net_device->send_buf_gpadl_handle) {
-		ret = vmbus_teardown_gpadl(net_device->dev->channel,
+		ret = vmbus_teardown_gpadl(device->channel,
 					   net_device->send_buf_gpadl_handle);
 
 		/* If we failed here, we might as well return and have a leak
@@ -229,7 +258,7 @@
 	net_device = get_outbound_net_device(device);
 	if (!net_device)
 		return -ENODEV;
-	ndev = net_device->ndev;
+	ndev = hv_get_drvdata(device);
 
 	node = cpu_to_node(device->channel->target_cpu);
 	net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node);
@@ -406,7 +435,7 @@
 	goto exit;
 
 cleanup:
-	netvsc_destroy_buf(net_device);
+	netvsc_destroy_buf(device);
 
 exit:
 	return ret;
@@ -419,6 +448,7 @@
 			      struct nvsp_message *init_packet,
 			      u32 nvsp_ver)
 {
+	struct net_device *ndev = hv_get_drvdata(device);
 	int ret;
 	unsigned long t;
 
@@ -452,8 +482,7 @@
 	/* NVSPv2 or later: Send NDIS config */
 	memset(init_packet, 0, sizeof(struct nvsp_message));
 	init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
-	init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu +
-						       ETH_HLEN;
+	init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
 	init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
 
 	if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5)
@@ -473,7 +502,6 @@
 	struct netvsc_device *net_device;
 	struct nvsp_message *init_packet;
 	int ndis_version;
-	struct net_device *ndev;
 	u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
 		NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
 	int i, num_ver = 4; /* number of different NVSP versions */
@@ -481,7 +509,6 @@
 	net_device = get_outbound_net_device(device);
 	if (!net_device)
 		return -ENODEV;
-	ndev = net_device->ndev;
 
 	init_packet = &net_device->channel_init_pkt;
 
@@ -537,9 +564,9 @@
 	return ret;
 }
 
-static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
+static void netvsc_disconnect_vsp(struct hv_device *device)
 {
-	netvsc_destroy_buf(net_device);
+	netvsc_destroy_buf(device);
 }
 
 /*
@@ -547,24 +574,13 @@
  */
 int netvsc_device_remove(struct hv_device *device)
 {
-	struct netvsc_device *net_device;
-	unsigned long flags;
+	struct net_device *ndev = hv_get_drvdata(device);
+	struct net_device_context *net_device_ctx = netdev_priv(ndev);
+	struct netvsc_device *net_device = net_device_ctx->nvdev;
 
-	net_device = hv_get_drvdata(device);
+	netvsc_disconnect_vsp(device);
 
-	netvsc_disconnect_vsp(net_device);
-
-	/*
-	 * Since we have already drained, we don't need to busy wait
-	 * as was done in final_release_stor_device()
-	 * Note that we cannot set the ext pointer to NULL until
-	 * we have drained - to drain the outgoing packets, we need to
-	 * allow incoming packets.
-	 */
-
-	spin_lock_irqsave(&device->channel->inbound_lock, flags);
-	hv_set_drvdata(device, NULL);
-	spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
+	net_device_ctx->nvdev = NULL;
 
 	/*
 	 * At this point, no one should be accessing net_device
@@ -612,12 +628,11 @@
 {
 	struct nvsp_message *nvsp_packet;
 	struct hv_netvsc_packet *nvsc_packet;
-	struct net_device *ndev;
+	struct net_device *ndev = hv_get_drvdata(device);
+	struct net_device_context *net_device_ctx = netdev_priv(ndev);
 	u32 send_index;
 	struct sk_buff *skb;
 
-	ndev = net_device->ndev;
-
 	nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
 			(packet->offset8 << 3));
 
@@ -662,7 +677,7 @@
 			wake_up(&net_device->wait_drain);
 
 		if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
-		    !net_device->start_remove &&
+		    !net_device_ctx->start_remove &&
 		    (hv_ringbuf_avail_percent(&channel->outbound) >
 		     RING_AVAIL_PERCENT_HIWATER || queue_sends < 1))
 				netif_tx_wake_queue(netdev_get_tx_queue(
@@ -746,6 +761,7 @@
 }
 
 static inline int netvsc_send_pkt(
+	struct hv_device *device,
 	struct hv_netvsc_packet *packet,
 	struct netvsc_device *net_device,
 	struct hv_page_buffer **pb,
@@ -754,7 +770,7 @@
 	struct nvsp_message nvmsg;
 	u16 q_idx = packet->q_idx;
 	struct vmbus_channel *out_channel = net_device->chn_table[q_idx];
-	struct net_device *ndev = net_device->ndev;
+	struct net_device *ndev = hv_get_drvdata(device);
 	u64 req_id;
 	int ret;
 	struct hv_page_buffer *pgbuf;
@@ -949,7 +965,8 @@
 	}
 
 	if (msd_send) {
-		m_ret = netvsc_send_pkt(msd_send, net_device, NULL, msd_skb);
+		m_ret = netvsc_send_pkt(device, msd_send, net_device,
+					NULL, msd_skb);
 
 		if (m_ret != 0) {
 			netvsc_free_send_slot(net_device,
@@ -960,7 +977,7 @@
 
 send_now:
 	if (cur_send)
-		ret = netvsc_send_pkt(cur_send, net_device, pb, skb);
+		ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
 
 	if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
 		netvsc_free_send_slot(net_device, section_index);
@@ -976,9 +993,7 @@
 	struct nvsp_message recvcompMessage;
 	int retries = 0;
 	int ret;
-	struct net_device *ndev;
-
-	ndev = net_device->ndev;
+	struct net_device *ndev = hv_get_drvdata(device);
 
 	recvcompMessage.hdr.msg_type =
 				NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
@@ -1025,11 +1040,9 @@
 	u32 status = NVSP_STAT_SUCCESS;
 	int i;
 	int count = 0;
-	struct net_device *ndev;
+	struct net_device *ndev = hv_get_drvdata(device);
 	void *data;
 
-	ndev = net_device->ndev;
-
 	/*
 	 * All inbound packets other than send completion should be xfer page
 	 * packet
@@ -1085,14 +1098,13 @@
 			      struct nvsp_message *nvmsg)
 {
 	struct netvsc_device *nvscdev;
-	struct net_device *ndev;
+	struct net_device *ndev = hv_get_drvdata(hdev);
 	int i;
 	u32 count, *tab;
 
 	nvscdev = get_outbound_net_device(hdev);
 	if (!nvscdev)
 		return;
-	ndev = nvscdev->ndev;
 
 	count = nvmsg->msg.v5_msg.send_table.count;
 	if (count != VRSS_SEND_TAB_SIZE) {
@@ -1151,7 +1163,7 @@
 	net_device = get_inbound_net_device(device);
 	if (!net_device)
 		return;
-	ndev = net_device->ndev;
+	ndev = hv_get_drvdata(device);
 	buffer = get_per_channel_state(channel);
 
 	do {
@@ -1224,30 +1236,19 @@
  */
 int netvsc_device_add(struct hv_device *device, void *additional_info)
 {
-	int ret = 0;
+	int i, ret = 0;
 	int ring_size =
 	((struct netvsc_device_info *)additional_info)->ring_size;
 	struct netvsc_device *net_device;
-	struct net_device *ndev;
+	struct net_device *ndev = hv_get_drvdata(device);
+	struct net_device_context *net_device_ctx = netdev_priv(ndev);
 
-	net_device = alloc_net_device(device);
+	net_device = alloc_net_device();
 	if (!net_device)
 		return -ENOMEM;
 
 	net_device->ring_size = ring_size;
 
-	/*
-	 * Coming into this function, struct net_device * is
-	 * registered as the driver private data.
-	 * In alloc_net_device(), we register struct netvsc_device *
-	 * as the driver private data and stash away struct net_device *
-	 * in struct netvsc_device *.
-	 */
-	ndev = net_device->ndev;
-
-	/* Add netvsc_device context to netvsc_device */
-	net_device->nd_ctx = netdev_priv(ndev);
-
 	/* Initialize the NetVSC channel extension */
 	init_completion(&net_device->channel_init_wait);
 
@@ -1266,7 +1267,19 @@
 	/* Channel is opened */
 	pr_info("hv_netvsc channel opened successfully\n");
 
-	net_device->chn_table[0] = device->channel;
+	/* If we're reopening the device we may have multiple queues, fill the
+	 * chn_table with the default channel to use it before subchannels are
+	 * opened.
+	 */
+	for (i = 0; i < VRSS_CHANNEL_MAX; i++)
+		net_device->chn_table[i] = device->channel;
+
+	/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
+	 * populated.
+	 */
+	wmb();
+
+	net_device_ctx->nvdev = net_device;
 
 	/* Connect with the NetVsp */
 	ret = netvsc_connect_vsp(device);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index b8121eb..6a69b5c 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -67,18 +67,19 @@
 {
 	struct net_device_context *ndevctx =
 		container_of(w, struct net_device_context, work);
-	struct netvsc_device *nvdev;
+	struct hv_device *device_obj = ndevctx->device_ctx;
+	struct net_device *ndev = hv_get_drvdata(device_obj);
+	struct netvsc_device *nvdev = ndevctx->nvdev;
 	struct rndis_device *rdev;
 
-	nvdev = hv_get_drvdata(ndevctx->device_ctx);
-	if (nvdev == NULL || nvdev->ndev == NULL)
+	if (!nvdev)
 		return;
 
 	rdev = nvdev->extension;
 	if (rdev == NULL)
 		return;
 
-	if (nvdev->ndev->flags & IFF_PROMISC)
+	if (ndev->flags & IFF_PROMISC)
 		rndis_filter_set_packet_filter(rdev,
 			NDIS_PACKET_TYPE_PROMISCUOUS);
 	else
@@ -99,7 +100,7 @@
 {
 	struct net_device_context *net_device_ctx = netdev_priv(net);
 	struct hv_device *device_obj = net_device_ctx->device_ctx;
-	struct netvsc_device *nvdev;
+	struct netvsc_device *nvdev = net_device_ctx->nvdev;
 	struct rndis_device *rdev;
 	int ret = 0;
 
@@ -114,7 +115,6 @@
 
 	netif_tx_wake_all_queues(net);
 
-	nvdev = hv_get_drvdata(device_obj);
 	rdev = nvdev->extension;
 	if (!rdev->link_state)
 		netif_carrier_on(net);
@@ -126,7 +126,7 @@
 {
 	struct net_device_context *net_device_ctx = netdev_priv(net);
 	struct hv_device *device_obj = net_device_ctx->device_ctx;
-	struct netvsc_device *nvdev = hv_get_drvdata(device_obj);
+	struct netvsc_device *nvdev = net_device_ctx->nvdev;
 	int ret;
 	u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
 	struct vmbus_channel *chn;
@@ -205,8 +205,7 @@
 			void *accel_priv, select_queue_fallback_t fallback)
 {
 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
-	struct hv_device *hdev =  net_device_ctx->device_ctx;
-	struct netvsc_device *nvsc_dev = hv_get_drvdata(hdev);
+	struct netvsc_device *nvsc_dev = net_device_ctx->nvdev;
 	u32 hash;
 	u16 q_idx = 0;
 
@@ -580,7 +579,6 @@
 	struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
 	struct net_device *net;
 	struct net_device_context *ndev_ctx;
-	struct netvsc_device *net_device;
 	struct netvsc_reconfig *event;
 	unsigned long flags;
 
@@ -590,8 +588,7 @@
 	    indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
 		return;
 
-	net_device = hv_get_drvdata(device_obj);
-	net = net_device->ndev;
+	net = hv_get_drvdata(device_obj);
 
 	if (!net || net->reg_state != NETREG_REGISTERED)
 		return;
@@ -610,42 +607,24 @@
 	schedule_delayed_work(&ndev_ctx->dwork, 0);
 }
 
-/*
- * netvsc_recv_callback -  Callback when we receive a packet from the
- * "wire" on the specified device.
- */
-int netvsc_recv_callback(struct hv_device *device_obj,
+
+static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
 				struct hv_netvsc_packet *packet,
-				void **data,
 				struct ndis_tcp_ip_checksum_info *csum_info,
-				struct vmbus_channel *channel,
-				u16 vlan_tci)
+				void *data, u16 vlan_tci)
 {
-	struct net_device *net;
-	struct net_device_context *net_device_ctx;
 	struct sk_buff *skb;
-	struct netvsc_stats *rx_stats;
 
-	net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
-	if (!net || net->reg_state != NETREG_REGISTERED) {
-		return NVSP_STAT_FAIL;
-	}
-	net_device_ctx = netdev_priv(net);
-	rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
-
-	/* Allocate a skb - TODO direct I/O to pages? */
 	skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
-	if (unlikely(!skb)) {
-		++net->stats.rx_dropped;
-		return NVSP_STAT_FAIL;
-	}
+	if (!skb)
+		return skb;
 
 	/*
 	 * Copy to skb. This copy is needed here since the memory pointed by
 	 * hv_netvsc_packet cannot be deallocated
 	 */
-	memcpy(skb_put(skb, packet->total_data_buflen), *data,
-		packet->total_data_buflen);
+	memcpy(skb_put(skb, packet->total_data_buflen), data,
+	       packet->total_data_buflen);
 
 	skb->protocol = eth_type_trans(skb, net);
 	if (csum_info) {
@@ -663,6 +642,74 @@
 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
 				       vlan_tci);
 
+	return skb;
+}
+
+/*
+ * netvsc_recv_callback -  Callback when we receive a packet from the
+ * "wire" on the specified device.
+ */
+int netvsc_recv_callback(struct hv_device *device_obj,
+				struct hv_netvsc_packet *packet,
+				void **data,
+				struct ndis_tcp_ip_checksum_info *csum_info,
+				struct vmbus_channel *channel,
+				u16 vlan_tci)
+{
+	struct net_device *net = hv_get_drvdata(device_obj);
+	struct net_device_context *net_device_ctx = netdev_priv(net);
+	struct sk_buff *skb;
+	struct sk_buff *vf_skb;
+	struct netvsc_stats *rx_stats;
+	struct netvsc_device *netvsc_dev = net_device_ctx->nvdev;
+	u32 bytes_recvd = packet->total_data_buflen;
+	int ret = 0;
+
+	if (!net || net->reg_state != NETREG_REGISTERED)
+		return NVSP_STAT_FAIL;
+
+	if (READ_ONCE(netvsc_dev->vf_inject)) {
+		atomic_inc(&netvsc_dev->vf_use_cnt);
+		if (!READ_ONCE(netvsc_dev->vf_inject)) {
+			/*
+			 * We raced; just move on.
+			 */
+			atomic_dec(&netvsc_dev->vf_use_cnt);
+			goto vf_injection_done;
+		}
+
+		/*
+		 * Inject this packet into the VF inerface.
+		 * On Hyper-V, multicast and brodcast packets
+		 * are only delivered on the synthetic interface
+		 * (after subjecting these to policy filters on
+		 * the host). Deliver these via the VF interface
+		 * in the guest.
+		 */
+		vf_skb = netvsc_alloc_recv_skb(netvsc_dev->vf_netdev, packet,
+					       csum_info, *data, vlan_tci);
+		if (vf_skb != NULL) {
+			++netvsc_dev->vf_netdev->stats.rx_packets;
+			netvsc_dev->vf_netdev->stats.rx_bytes += bytes_recvd;
+			netif_receive_skb(vf_skb);
+		} else {
+			++net->stats.rx_dropped;
+			ret = NVSP_STAT_FAIL;
+		}
+		atomic_dec(&netvsc_dev->vf_use_cnt);
+		return ret;
+	}
+
+vf_injection_done:
+	net_device_ctx = netdev_priv(net);
+	rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
+
+	/* Allocate a skb - TODO direct I/O to pages? */
+	skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
+	if (unlikely(!skb)) {
+		++net->stats.rx_dropped;
+		return NVSP_STAT_FAIL;
+	}
 	skb_record_rx_queue(skb, channel->
 			    offermsg.offer.sub_channel_index);
 
@@ -692,8 +739,7 @@
 				struct ethtool_channels *channel)
 {
 	struct net_device_context *net_device_ctx = netdev_priv(net);
-	struct hv_device *dev = net_device_ctx->device_ctx;
-	struct netvsc_device *nvdev = hv_get_drvdata(dev);
+	struct netvsc_device *nvdev = net_device_ctx->nvdev;
 
 	if (nvdev) {
 		channel->max_combined	= nvdev->max_chn;
@@ -706,14 +752,14 @@
 {
 	struct net_device_context *net_device_ctx = netdev_priv(net);
 	struct hv_device *dev = net_device_ctx->device_ctx;
-	struct netvsc_device *nvdev = hv_get_drvdata(dev);
+	struct netvsc_device *nvdev = net_device_ctx->nvdev;
 	struct netvsc_device_info device_info;
 	u32 num_chn;
 	u32 max_chn;
 	int ret = 0;
 	bool recovering = false;
 
-	if (!nvdev || nvdev->destroy)
+	if (net_device_ctx->start_remove || !nvdev || nvdev->destroy)
 		return -ENODEV;
 
 	num_chn = nvdev->num_chn;
@@ -742,14 +788,11 @@
 		goto out;
 
  do_set:
-	nvdev->start_remove = true;
+	net_device_ctx->start_remove = true;
 	rndis_filter_device_remove(dev);
 
 	nvdev->num_chn = channels->combined_count;
 
-	net_device_ctx->device_ctx = dev;
-	hv_set_drvdata(dev, net);
-
 	memset(&device_info, 0, sizeof(device_info));
 	device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */
 	device_info.ring_size = ring_size;
@@ -764,7 +807,7 @@
 		goto recover;
 	}
 
-	nvdev = hv_get_drvdata(dev);
+	nvdev = net_device_ctx->nvdev;
 
 	ret = netif_set_real_num_tx_queues(net, nvdev->num_chn);
 	if (ret) {
@@ -786,6 +829,9 @@
 
  out:
 	netvsc_open(net);
+	net_device_ctx->start_remove = false;
+	/* We may have missed link change notifications */
+	schedule_delayed_work(&net_device_ctx->dwork, 0);
 
 	return ret;
 
@@ -854,14 +900,14 @@
 static int netvsc_change_mtu(struct net_device *ndev, int mtu)
 {
 	struct net_device_context *ndevctx = netdev_priv(ndev);
-	struct hv_device *hdev =  ndevctx->device_ctx;
-	struct netvsc_device *nvdev = hv_get_drvdata(hdev);
+	struct netvsc_device *nvdev = ndevctx->nvdev;
+	struct hv_device *hdev = ndevctx->device_ctx;
 	struct netvsc_device_info device_info;
 	int limit = ETH_DATA_LEN;
 	u32 num_chn;
 	int ret = 0;
 
-	if (nvdev == NULL || nvdev->destroy)
+	if (ndevctx->start_remove || !nvdev || nvdev->destroy)
 		return -ENODEV;
 
 	if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
@@ -876,14 +922,11 @@
 
 	num_chn = nvdev->num_chn;
 
-	nvdev->start_remove = true;
+	ndevctx->start_remove = true;
 	rndis_filter_device_remove(hdev);
 
 	ndev->mtu = mtu;
 
-	ndevctx->device_ctx = hdev;
-	hv_set_drvdata(hdev, ndev);
-
 	memset(&device_info, 0, sizeof(device_info));
 	device_info.ring_size = ring_size;
 	device_info.num_chn = num_chn;
@@ -892,6 +935,10 @@
 
 out:
 	netvsc_open(ndev);
+	ndevctx->start_remove = false;
+
+	/* We may have missed link change notifications */
+	schedule_delayed_work(&ndevctx->dwork, 0);
 
 	return ret;
 }
@@ -1004,18 +1051,22 @@
  */
 static void netvsc_link_change(struct work_struct *w)
 {
-	struct net_device_context *ndev_ctx;
-	struct net_device *net;
+	struct net_device_context *ndev_ctx =
+		container_of(w, struct net_device_context, dwork.work);
+	struct hv_device *device_obj = ndev_ctx->device_ctx;
+	struct net_device *net = hv_get_drvdata(device_obj);
 	struct netvsc_device *net_device;
 	struct rndis_device *rdev;
 	struct netvsc_reconfig *event = NULL;
 	bool notify = false, reschedule = false;
 	unsigned long flags, next_reconfig, delay;
 
-	ndev_ctx = container_of(w, struct net_device_context, dwork.work);
-	net_device = hv_get_drvdata(ndev_ctx->device_ctx);
+	rtnl_lock();
+	if (ndev_ctx->start_remove)
+		goto out_unlock;
+
+	net_device = ndev_ctx->nvdev;
 	rdev = net_device->extension;
-	net = net_device->ndev;
 
 	next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
 	if (time_is_after_jiffies(next_reconfig)) {
@@ -1026,7 +1077,7 @@
 		delay = next_reconfig - jiffies;
 		delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
 		schedule_delayed_work(&ndev_ctx->dwork, delay);
-		return;
+		goto out_unlock;
 	}
 	ndev_ctx->last_reconfig = jiffies;
 
@@ -1040,9 +1091,7 @@
 	spin_unlock_irqrestore(&ndev_ctx->lock, flags);
 
 	if (!event)
-		return;
-
-	rtnl_lock();
+		goto out_unlock;
 
 	switch (event->event) {
 		/* Only the following events are possible due to the check in
@@ -1074,7 +1123,7 @@
 			netif_tx_stop_all_queues(net);
 			event->event = RNDIS_STATUS_MEDIA_CONNECT;
 			spin_lock_irqsave(&ndev_ctx->lock, flags);
-			list_add_tail(&event->list, &ndev_ctx->reconfig_events);
+			list_add(&event->list, &ndev_ctx->reconfig_events);
 			spin_unlock_irqrestore(&ndev_ctx->lock, flags);
 			reschedule = true;
 		}
@@ -1091,6 +1140,11 @@
 	 */
 	if (reschedule)
 		schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
+
+	return;
+
+out_unlock:
+	rtnl_unlock();
 }
 
 static void netvsc_free_netdev(struct net_device *netdev)
@@ -1102,6 +1156,192 @@
 	free_netdev(netdev);
 }
 
+static void netvsc_notify_peers(struct work_struct *wrk)
+{
+	struct garp_wrk *gwrk;
+
+	gwrk = container_of(wrk, struct garp_wrk, dwrk);
+
+	netdev_notify_peers(gwrk->netdev);
+
+	atomic_dec(&gwrk->netvsc_dev->vf_use_cnt);
+}
+
+static struct net_device *get_netvsc_net_device(char *mac)
+{
+	struct net_device *dev, *found = NULL;
+	int rtnl_locked;
+
+	rtnl_locked = rtnl_trylock();
+
+	for_each_netdev(&init_net, dev) {
+		if (memcmp(dev->dev_addr, mac, ETH_ALEN) == 0) {
+			if (dev->netdev_ops != &device_ops)
+				continue;
+			found = dev;
+			break;
+		}
+	}
+	if (rtnl_locked)
+		rtnl_unlock();
+
+	return found;
+}
+
+static int netvsc_register_vf(struct net_device *vf_netdev)
+{
+	struct net_device *ndev;
+	struct net_device_context *net_device_ctx;
+	struct netvsc_device *netvsc_dev;
+	const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
+
+	if (eth_ops == NULL || eth_ops == &ethtool_ops)
+		return NOTIFY_DONE;
+
+	/*
+	 * We will use the MAC address to locate the synthetic interface to
+	 * associate with the VF interface. If we don't find a matching
+	 * synthetic interface, move on.
+	 */
+	ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+	if (!ndev)
+		return NOTIFY_DONE;
+
+	net_device_ctx = netdev_priv(ndev);
+	netvsc_dev = net_device_ctx->nvdev;
+	if (netvsc_dev == NULL)
+		return NOTIFY_DONE;
+
+	netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
+	/*
+	 * Take a reference on the module.
+	 */
+	try_module_get(THIS_MODULE);
+	netvsc_dev->vf_netdev = vf_netdev;
+	return NOTIFY_OK;
+}
+
+
+static int netvsc_vf_up(struct net_device *vf_netdev)
+{
+	struct net_device *ndev;
+	struct netvsc_device *netvsc_dev;
+	const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
+	struct net_device_context *net_device_ctx;
+
+	if (eth_ops == &ethtool_ops)
+		return NOTIFY_DONE;
+
+	ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+	if (!ndev)
+		return NOTIFY_DONE;
+
+	net_device_ctx = netdev_priv(ndev);
+	netvsc_dev = net_device_ctx->nvdev;
+
+	if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
+		return NOTIFY_DONE;
+
+	netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
+	netvsc_dev->vf_inject = true;
+
+	/*
+	 * Open the device before switching data path.
+	 */
+	rndis_filter_open(net_device_ctx->device_ctx);
+
+	/*
+	 * notify the host to switch the data path.
+	 */
+	netvsc_switch_datapath(ndev, true);
+	netdev_info(ndev, "Data path switched to VF: %s\n", vf_netdev->name);
+
+	netif_carrier_off(ndev);
+
+	/*
+	 * Now notify peers. We are scheduling work to
+	 * notify peers; take a reference to prevent
+	 * the VF interface from vanishing.
+	 */
+	atomic_inc(&netvsc_dev->vf_use_cnt);
+	net_device_ctx->gwrk.netdev = vf_netdev;
+	net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
+	schedule_work(&net_device_ctx->gwrk.dwrk);
+
+	return NOTIFY_OK;
+}
+
+
+static int netvsc_vf_down(struct net_device *vf_netdev)
+{
+	struct net_device *ndev;
+	struct netvsc_device *netvsc_dev;
+	struct net_device_context *net_device_ctx;
+	const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
+
+	if (eth_ops == &ethtool_ops)
+		return NOTIFY_DONE;
+
+	ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+	if (!ndev)
+		return NOTIFY_DONE;
+
+	net_device_ctx = netdev_priv(ndev);
+	netvsc_dev = net_device_ctx->nvdev;
+
+	if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
+		return NOTIFY_DONE;
+
+	netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
+	netvsc_dev->vf_inject = false;
+	/*
+	 * Wait for currently active users to
+	 * drain out.
+	 */
+
+	while (atomic_read(&netvsc_dev->vf_use_cnt) != 0)
+		udelay(50);
+	netvsc_switch_datapath(ndev, false);
+	netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
+	rndis_filter_close(net_device_ctx->device_ctx);
+	netif_carrier_on(ndev);
+	/*
+	 * Notify peers.
+	 */
+	atomic_inc(&netvsc_dev->vf_use_cnt);
+	net_device_ctx->gwrk.netdev = ndev;
+	net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
+	schedule_work(&net_device_ctx->gwrk.dwrk);
+
+	return NOTIFY_OK;
+}
+
+
+static int netvsc_unregister_vf(struct net_device *vf_netdev)
+{
+	struct net_device *ndev;
+	struct netvsc_device *netvsc_dev;
+	const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
+	struct net_device_context *net_device_ctx;
+
+	if (eth_ops == &ethtool_ops)
+		return NOTIFY_DONE;
+
+	ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+	if (!ndev)
+		return NOTIFY_DONE;
+
+	net_device_ctx = netdev_priv(ndev);
+	netvsc_dev = net_device_ctx->nvdev;
+	if (netvsc_dev == NULL)
+		return NOTIFY_DONE;
+	netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
+
+	netvsc_dev->vf_netdev = NULL;
+	module_put(THIS_MODULE);
+	return NOTIFY_OK;
+}
+
 static int netvsc_probe(struct hv_device *dev,
 			const struct hv_vmbus_device_id *dev_id)
 {
@@ -1138,8 +1378,12 @@
 	}
 
 	hv_set_drvdata(dev, net);
+
+	net_device_ctx->start_remove = false;
+
 	INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
 	INIT_WORK(&net_device_ctx->work, do_set_multicast);
+	INIT_WORK(&net_device_ctx->gwrk.dwrk, netvsc_notify_peers);
 
 	spin_lock_init(&net_device_ctx->lock);
 	INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
@@ -1168,7 +1412,7 @@
 	}
 	memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
 
-	nvdev = hv_get_drvdata(dev);
+	nvdev = net_device_ctx->nvdev;
 	netif_set_real_num_tx_queues(net, nvdev->num_chn);
 	netif_set_real_num_rx_queues(net, nvdev->num_chn);
 
@@ -1190,17 +1434,24 @@
 	struct net_device_context *ndev_ctx;
 	struct netvsc_device *net_device;
 
-	net_device = hv_get_drvdata(dev);
-	net = net_device->ndev;
+	net = hv_get_drvdata(dev);
 
 	if (net == NULL) {
 		dev_err(&dev->device, "No net device to remove\n");
 		return 0;
 	}
 
-	net_device->start_remove = true;
 
 	ndev_ctx = netdev_priv(net);
+	net_device = ndev_ctx->nvdev;
+
+	/* Avoid racing with netvsc_change_mtu()/netvsc_set_channels()
+	 * removing the device.
+	 */
+	rtnl_lock();
+	ndev_ctx->start_remove = true;
+	rtnl_unlock();
+
 	cancel_delayed_work_sync(&ndev_ctx->dwork);
 	cancel_work_sync(&ndev_ctx->work);
 
@@ -1215,6 +1466,8 @@
 	 */
 	rndis_filter_device_remove(dev);
 
+	hv_set_drvdata(dev, NULL);
+
 	netvsc_free_netdev(net);
 	return 0;
 }
@@ -1235,19 +1488,58 @@
 	.remove = netvsc_remove,
 };
 
+
+/*
+ * On Hyper-V, every VF interface is matched with a corresponding
+ * synthetic interface. The synthetic interface is presented first
+ * to the guest. When the corresponding VF instance is registered,
+ * we will take care of switching the data path.
+ */
+static int netvsc_netdev_event(struct notifier_block *this,
+			       unsigned long event, void *ptr)
+{
+	struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
+
+	switch (event) {
+	case NETDEV_REGISTER:
+		return netvsc_register_vf(event_dev);
+	case NETDEV_UNREGISTER:
+		return netvsc_unregister_vf(event_dev);
+	case NETDEV_UP:
+		return netvsc_vf_up(event_dev);
+	case NETDEV_DOWN:
+		return netvsc_vf_down(event_dev);
+	default:
+		return NOTIFY_DONE;
+	}
+}
+
+static struct notifier_block netvsc_netdev_notifier = {
+	.notifier_call = netvsc_netdev_event,
+};
+
 static void __exit netvsc_drv_exit(void)
 {
+	unregister_netdevice_notifier(&netvsc_netdev_notifier);
 	vmbus_driver_unregister(&netvsc_drv);
 }
 
 static int __init netvsc_drv_init(void)
 {
+	int ret;
+
 	if (ring_size < RING_SIZE_MIN) {
 		ring_size = RING_SIZE_MIN;
 		pr_info("Increased ring_size to %d (min allowed)\n",
 			ring_size);
 	}
-	return vmbus_driver_register(&netvsc_drv);
+	ret = vmbus_driver_register(&netvsc_drv);
+
+	if (ret)
+		return ret;
+
+	register_netdevice_notifier(&netvsc_netdev_notifier);
+	return 0;
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index c4e1e04..97c292b 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -126,11 +126,7 @@
 static void dump_rndis_message(struct hv_device *hv_dev,
 			struct rndis_message *rndis_msg)
 {
-	struct net_device *netdev;
-	struct netvsc_device *net_device;
-
-	net_device = hv_get_drvdata(hv_dev);
-	netdev = net_device->ndev;
+	struct net_device *netdev = hv_get_drvdata(hv_dev);
 
 	switch (rndis_msg->ndis_msg_type) {
 	case RNDIS_MSG_PACKET:
@@ -211,6 +207,7 @@
 	struct hv_netvsc_packet *packet;
 	struct hv_page_buffer page_buf[2];
 	struct hv_page_buffer *pb = page_buf;
+	struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
 
 	/* Setup the packet to send it */
 	packet = &req->pkt;
@@ -236,7 +233,7 @@
 			pb[0].len;
 	}
 
-	ret = netvsc_send(dev->net_dev->dev, packet, NULL, &pb, NULL);
+	ret = netvsc_send(net_device_ctx->device_ctx, packet, NULL, &pb, NULL);
 	return ret;
 }
 
@@ -262,9 +259,7 @@
 	struct rndis_request *request = NULL;
 	bool found = false;
 	unsigned long flags;
-	struct net_device *ndev;
-
-	ndev = dev->net_dev->ndev;
+	struct net_device *ndev = dev->ndev;
 
 	spin_lock_irqsave(&dev->request_lock, flags);
 	list_for_each_entry(request, &dev->req_list, list_ent) {
@@ -355,6 +350,7 @@
 	struct ndis_pkt_8021q_info *vlan;
 	struct ndis_tcp_ip_checksum_info *csum_info;
 	u16 vlan_tci = 0;
+	struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
 
 	rndis_pkt = &msg->msg.pkt;
 
@@ -368,7 +364,7 @@
 	 * should be the data packet size plus the trailer padding size
 	 */
 	if (pkt->total_data_buflen < rndis_pkt->data_len) {
-		netdev_err(dev->net_dev->ndev, "rndis message buffer "
+		netdev_err(dev->ndev, "rndis message buffer "
 			   "overflow detected (got %u, min %u)"
 			   "...dropping this message!\n",
 			   pkt->total_data_buflen, rndis_pkt->data_len);
@@ -390,7 +386,7 @@
 	}
 
 	csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO);
-	return netvsc_recv_callback(dev->net_dev->dev, pkt, data,
+	return netvsc_recv_callback(net_device_ctx->device_ctx, pkt, data,
 				    csum_info, channel, vlan_tci);
 }
 
@@ -399,10 +395,11 @@
 				void **data,
 				struct vmbus_channel *channel)
 {
-	struct netvsc_device *net_dev = hv_get_drvdata(dev);
+	struct net_device *ndev = hv_get_drvdata(dev);
+	struct net_device_context *net_device_ctx = netdev_priv(ndev);
+	struct netvsc_device *net_dev = net_device_ctx->nvdev;
 	struct rndis_device *rndis_dev;
 	struct rndis_message *rndis_msg;
-	struct net_device *ndev;
 	int ret = 0;
 
 	if (!net_dev) {
@@ -410,8 +407,6 @@
 		goto exit;
 	}
 
-	ndev = net_dev->ndev;
-
 	/* Make sure the rndis device state is initialized */
 	if (!net_dev->extension) {
 		netdev_err(ndev, "got rndis message but no rndis device - "
@@ -430,7 +425,7 @@
 
 	rndis_msg = *data;
 
-	if (netif_msg_rx_err(net_dev->nd_ctx))
+	if (netif_msg_rx_err(net_device_ctx))
 		dump_rndis_message(dev, rndis_msg);
 
 	switch (rndis_msg->ndis_msg_type) {
@@ -550,9 +545,10 @@
 
 int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac)
 {
-	struct netvsc_device *nvdev = hv_get_drvdata(hdev);
+	struct net_device *ndev = hv_get_drvdata(hdev);
+	struct net_device_context *net_device_ctx = netdev_priv(ndev);
+	struct netvsc_device *nvdev = net_device_ctx->nvdev;
 	struct rndis_device *rdev = nvdev->extension;
-	struct net_device *ndev = nvdev->ndev;
 	struct rndis_request *request;
 	struct rndis_set_request *set;
 	struct rndis_config_parameter_info *cpi;
@@ -629,9 +625,10 @@
 rndis_filter_set_offload_params(struct hv_device *hdev,
 				struct ndis_offload_params *req_offloads)
 {
-	struct netvsc_device *nvdev = hv_get_drvdata(hdev);
+	struct net_device *ndev = hv_get_drvdata(hdev);
+	struct net_device_context *net_device_ctx = netdev_priv(ndev);
+	struct netvsc_device *nvdev = net_device_ctx->nvdev;
 	struct rndis_device *rdev = nvdev->extension;
-	struct net_device *ndev = nvdev->ndev;
 	struct rndis_request *request;
 	struct rndis_set_request *set;
 	struct ndis_offload_params *offload_params;
@@ -703,7 +700,7 @@
 
 static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
 {
-	struct net_device *ndev = rdev->net_dev->ndev;
+	struct net_device *ndev = rdev->ndev;
 	struct rndis_request *request;
 	struct rndis_set_request *set;
 	struct rndis_set_complete *set_complete;
@@ -799,9 +796,7 @@
 	u32 status;
 	int ret;
 	unsigned long t;
-	struct net_device *ndev;
-
-	ndev = dev->net_dev->ndev;
+	struct net_device *ndev = dev->ndev;
 
 	request = get_rndis_request(dev, RNDIS_MSG_SET,
 			RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
@@ -856,7 +851,8 @@
 	u32 status;
 	int ret;
 	unsigned long t;
-	struct netvsc_device *nvdev = dev->net_dev;
+	struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
+	struct netvsc_device *nvdev = net_device_ctx->nvdev;
 
 	request = get_rndis_request(dev, RNDIS_MSG_INIT,
 			RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
@@ -879,7 +875,6 @@
 		goto cleanup;
 	}
 
-
 	t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
 
 	if (t == 0) {
@@ -910,8 +905,9 @@
 {
 	struct rndis_request *request;
 	struct rndis_halt_request *halt;
-	struct netvsc_device *nvdev = dev->net_dev;
-	struct hv_device *hdev = nvdev->dev;
+	struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
+	struct netvsc_device *nvdev = net_device_ctx->nvdev;
+	struct hv_device *hdev = net_device_ctx->device_ctx;
 	ulong flags;
 
 	/* Attempt to do a rndis device halt */
@@ -979,13 +975,14 @@
 
 static void netvsc_sc_open(struct vmbus_channel *new_sc)
 {
-	struct netvsc_device *nvscdev;
+	struct net_device *ndev =
+		hv_get_drvdata(new_sc->primary_channel->device_obj);
+	struct net_device_context *net_device_ctx = netdev_priv(ndev);
+	struct netvsc_device *nvscdev = net_device_ctx->nvdev;
 	u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
 	int ret;
 	unsigned long flags;
 
-	nvscdev = hv_get_drvdata(new_sc->primary_channel->device_obj);
-
 	if (chn_index >= nvscdev->num_chn)
 		return;
 
@@ -1010,6 +1007,8 @@
 				  void *additional_info)
 {
 	int ret;
+	struct net_device *net = hv_get_drvdata(dev);
+	struct net_device_context *net_device_ctx = netdev_priv(net);
 	struct netvsc_device *net_device;
 	struct rndis_device *rndis_device;
 	struct netvsc_device_info *device_info = additional_info;
@@ -1040,16 +1039,15 @@
 		return ret;
 	}
 
-
 	/* Initialize the rndis device */
-	net_device = hv_get_drvdata(dev);
+	net_device = net_device_ctx->nvdev;
 	net_device->max_chn = 1;
 	net_device->num_chn = 1;
 
 	spin_lock_init(&net_device->sc_lock);
 
 	net_device->extension = rndis_device;
-	rndis_device->net_dev = net_device;
+	rndis_device->ndev = net;
 
 	/* Send the rndis initialization message */
 	ret = rndis_filter_init_device(rndis_device);
@@ -1063,8 +1061,8 @@
 	ret = rndis_filter_query_device(rndis_device,
 					RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
 					&mtu, &size);
-	if (ret == 0 && size == sizeof(u32) && mtu < net_device->ndev->mtu)
-		net_device->ndev->mtu = mtu;
+	if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
+		net->mtu = mtu;
 
 	/* Get the mac address */
 	ret = rndis_filter_query_device_mac(rndis_device);
@@ -1198,7 +1196,9 @@
 
 void rndis_filter_device_remove(struct hv_device *dev)
 {
-	struct netvsc_device *net_dev = hv_get_drvdata(dev);
+	struct net_device *ndev = hv_get_drvdata(dev);
+	struct net_device_context *net_device_ctx = netdev_priv(ndev);
+	struct netvsc_device *net_dev = net_device_ctx->nvdev;
 	struct rndis_device *rndis_dev = net_dev->extension;
 	unsigned long t;
 
@@ -1224,20 +1224,30 @@
 
 int rndis_filter_open(struct hv_device *dev)
 {
-	struct netvsc_device *net_device = hv_get_drvdata(dev);
+	struct net_device *ndev = hv_get_drvdata(dev);
+	struct net_device_context *net_device_ctx = netdev_priv(ndev);
+	struct netvsc_device *net_device = net_device_ctx->nvdev;
 
 	if (!net_device)
 		return -EINVAL;
 
+	if (atomic_inc_return(&net_device->open_cnt) != 1)
+		return 0;
+
 	return rndis_filter_open_device(net_device->extension);
 }
 
 int rndis_filter_close(struct hv_device *dev)
 {
-	struct netvsc_device *nvdev = hv_get_drvdata(dev);
+	struct net_device *ndev = hv_get_drvdata(dev);
+	struct net_device_context *net_device_ctx = netdev_priv(ndev);
+	struct netvsc_device *nvdev = net_device_ctx->nvdev;
 
 	if (!nvdev)
 		return -EINVAL;
 
+	if (atomic_dec_return(&nvdev->open_cnt) != 0)
+		return 0;
+
 	return rndis_filter_close_device(nvdev->extension);
 }
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 89154c0..b82e39d 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1030,6 +1030,7 @@
 	if (ret) {
 		dev_err(&lp->spi->dev,
 			"upload firmware failed with %d\n", ret);
+		release_firmware(fw);
 		return ret;
 	}
 
@@ -1037,6 +1038,7 @@
 	if (ret) {
 		dev_err(&lp->spi->dev,
 			"verify firmware failed with %d\n", ret);
+		release_firmware(fw);
 		return ret;
 	}
 
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index cb9e9fe..9f10da6 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -1340,7 +1340,7 @@
 	.t_off_to_aack = 80,
 	.t_off_to_tx_on = 80,
 	.t_off_to_sleep = 35,
-	.t_sleep_to_off = 210,
+	.t_sleep_to_off = 1000,
 	.t_frame = 4096,
 	.t_p_ack = 545,
 	.rssi_base_val = -91,
@@ -1355,7 +1355,7 @@
 	.t_off_to_aack = 110,
 	.t_off_to_tx_on = 110,
 	.t_off_to_sleep = 35,
-	.t_sleep_to_off = 380,
+	.t_sleep_to_off = 1000,
 	.t_frame = 4096,
 	.t_p_ack = 545,
 	.rssi_base_val = -91,
@@ -1370,7 +1370,7 @@
 	.t_off_to_aack = 200,
 	.t_off_to_tx_on = 200,
 	.t_off_to_sleep = 35,
-	.t_sleep_to_off = 380,
+	.t_sleep_to_off = 1000,
 	.t_frame = 4096,
 	.t_p_ack = 545,
 	.rssi_base_val = -100,
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index b1cd865..52c9051 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -3,6 +3,8 @@
  *
  * Written 2013 by Werner Almesberger <werner@almesberger.net>
  *
+ * Copyright (c) 2015 - 2016 Stefan Schmidt <stefan@datenfreihafen.org>
+ *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as
  * published by the Free Software Foundation, version 2
@@ -472,6 +474,76 @@
 	return -EINVAL;
 }
 
+#define ATUSB_MAX_ED_LEVELS 0xF
+static const s32 atusb_ed_levels[ATUSB_MAX_ED_LEVELS + 1] = {
+	-9100, -8900, -8700, -8500, -8300, -8100, -7900, -7700, -7500, -7300,
+	-7100, -6900, -6700, -6500, -6300, -6100,
+};
+
+static int
+atusb_set_cca_mode(struct ieee802154_hw *hw, const struct wpan_phy_cca *cca)
+{
+	struct atusb *atusb = hw->priv;
+	u8 val;
+
+	/* mapping 802.15.4 to driver spec */
+	switch (cca->mode) {
+	case NL802154_CCA_ENERGY:
+		val = 1;
+		break;
+	case NL802154_CCA_CARRIER:
+		val = 2;
+		break;
+	case NL802154_CCA_ENERGY_CARRIER:
+		switch (cca->opt) {
+		case NL802154_CCA_OPT_ENERGY_CARRIER_AND:
+			val = 3;
+			break;
+		case NL802154_CCA_OPT_ENERGY_CARRIER_OR:
+			val = 0;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return atusb_write_subreg(atusb, SR_CCA_MODE, val);
+}
+
+static int
+atusb_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
+{
+	struct atusb *atusb = hw->priv;
+	u32 i;
+
+	for (i = 0; i < hw->phy->supported.cca_ed_levels_size; i++) {
+		if (hw->phy->supported.cca_ed_levels[i] == mbm)
+			return atusb_write_subreg(atusb, SR_CCA_ED_THRES, i);
+	}
+
+	return -EINVAL;
+}
+
+static int
+atusb_set_csma_params(struct ieee802154_hw *hw, u8 min_be, u8 max_be, u8 retries)
+{
+	struct atusb *atusb = hw->priv;
+	int ret;
+
+	ret = atusb_write_subreg(atusb, SR_MIN_BE, min_be);
+	if (ret)
+		return ret;
+
+	ret = atusb_write_subreg(atusb, SR_MAX_BE, max_be);
+	if (ret)
+		return ret;
+
+	return atusb_write_subreg(atusb, SR_MAX_CSMA_RETRIES, retries);
+}
+
 static int
 atusb_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on)
 {
@@ -508,6 +580,9 @@
 	.stop			= atusb_stop,
 	.set_hw_addr_filt	= atusb_set_hw_addr_filt,
 	.set_txpower		= atusb_set_txpower,
+	.set_cca_mode		= atusb_set_cca_mode,
+	.set_cca_ed_level	= atusb_set_cca_ed_level,
+	.set_csma_params	= atusb_set_csma_params,
 	.set_promiscuous_mode	= atusb_set_promiscuous_mode,
 };
 
@@ -636,9 +711,20 @@
 
 	hw->parent = &usb_dev->dev;
 	hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
-		    IEEE802154_HW_PROMISCUOUS;
+		    IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS;
 
-	hw->phy->flags = WPAN_PHY_FLAG_TXPOWER;
+	hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL |
+			 WPAN_PHY_FLAG_CCA_MODE;
+
+	hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) |
+		BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER);
+	hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) |
+		BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR);
+
+	hw->phy->supported.cca_ed_levels = atusb_ed_levels;
+	hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(atusb_ed_levels);
+
+	hw->phy->cca.mode = NL802154_CCA_ENERGY;
 
 	hw->phy->current_page = 0;
 	hw->phy->current_channel = 11;	/* reset default */
@@ -647,6 +733,7 @@
 	hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers);
 	hw->phy->transmit_power = hw->phy->supported.tx_powers[0];
 	ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
+	hw->phy->cca_ed_level = hw->phy->supported.cca_ed_levels[7];
 
 	atusb_command(atusb, ATUSB_RF_RESET, 0);
 	atusb_get_and_show_chip(atusb);
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 764a2bd..f446db8 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -61,6 +61,7 @@
 #define REG_TXBCON0	0x1A
 #define REG_TXNCON	0x1B  /* Transmit Normal FIFO Control */
 #define BIT_TXNTRIG	BIT(0)
+#define BIT_TXNSECEN	BIT(1)
 #define BIT_TXNACKREQ	BIT(2)
 
 #define REG_TXG1CON	0x1C
@@ -85,10 +86,13 @@
 #define REG_INTSTAT	0x31  /* Interrupt Status */
 #define BIT_TXNIF	BIT(0)
 #define BIT_RXIF	BIT(3)
+#define BIT_SECIF	BIT(4)
+#define BIT_SECIGNORE	BIT(7)
 
 #define REG_INTCON	0x32  /* Interrupt Control */
 #define BIT_TXNIE	BIT(0)
 #define BIT_RXIE	BIT(3)
+#define BIT_SECIE	BIT(4)
 
 #define REG_GPIO	0x33  /* GPIO */
 #define REG_TRISGPIO	0x34  /* GPIO direction */
@@ -548,6 +552,9 @@
 	u8 val = BIT_TXNTRIG;
 	int ret;
 
+	if (ieee802154_is_secen(fc))
+		val |= BIT_TXNSECEN;
+
 	if (ieee802154_is_ackreq(fc))
 		val |= BIT_TXNACKREQ;
 
@@ -616,7 +623,7 @@
 
 	/* Clear TXNIE and RXIE. Enable interrupts */
 	return regmap_update_bits(devrec->regmap_short, REG_INTCON,
-				  BIT_TXNIE | BIT_RXIE, 0);
+				  BIT_TXNIE | BIT_RXIE | BIT_SECIE, 0);
 }
 
 static void mrf24j40_stop(struct ieee802154_hw *hw)
@@ -1025,6 +1032,11 @@
 
 	enable_irq(devrec->spi->irq);
 
+	/* Ignore Rx security decryption */
+	if (intstat & BIT_SECIF)
+		regmap_write_async(devrec->regmap_short, REG_SECCON0,
+				   BIT_SECIGNORE);
+
 	/* Check for TX complete */
 	if (intstat & BIT_TXNIF)
 		ieee802154_xmit_complete(devrec->hw, devrec->tx_skb, false);
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index cc56fac..66c0eea 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -196,6 +196,7 @@
 
 #define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG  | NETIF_F_FRAGLIST	| \
 		      NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6	| \
+		      NETIF_F_GSO_ENCAP_ALL 				| \
 		      NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX		| \
 		      NETIF_F_HW_VLAN_STAG_TX)
 
@@ -224,6 +225,8 @@
 	dev->tx_queue_len = TX_Q_LIMIT;
 
 	dev->features |= IFB_FEATURES;
+	dev->hw_features |= dev->features;
+	dev->hw_enc_features |= dev->features;
 	dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX |
 					       NETIF_F_HW_VLAN_STAG_TX);
 
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 57941d3..1c4d395 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -113,6 +113,7 @@
 {
 	struct ipvl_dev *ipvlan = netdev_priv(dev);
 	const struct net_device *phy_dev = ipvlan->phy_dev;
+	struct ipvl_port *port = ipvlan->port;
 
 	dev->state = (dev->state & ~IPVLAN_STATE_MASK) |
 		     (phy_dev->state & IPVLAN_STATE_MASK);
@@ -128,6 +129,8 @@
 	if (!ipvlan->pcpu_stats)
 		return -ENOMEM;
 
+	port->count += 1;
+
 	return 0;
 }
 
@@ -481,27 +484,21 @@
 
 	dev->priv_flags |= IFF_IPVLAN_SLAVE;
 
-	port->count += 1;
 	err = register_netdevice(dev);
 	if (err < 0)
-		goto ipvlan_destroy_port;
+		return err;
 
 	err = netdev_upper_dev_link(phy_dev, dev);
-	if (err)
-		goto ipvlan_destroy_port;
+	if (err) {
+		unregister_netdevice(dev);
+		return err;
+	}
 
 	list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans);
 	ipvlan_set_port_mode(port, mode);
 
 	netif_stacked_transfer_operstate(phy_dev, dev);
 	return 0;
-
-ipvlan_destroy_port:
-	port->count -= 1;
-	if (!port->count)
-		ipvlan_port_destroy(phy_dev);
-
-	return err;
 }
 
 static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index a2c227b..e070e12 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -394,12 +394,5 @@
 	  To compile it as a module, choose M here: the module will be called
 	  mcs7780.
 
-config SH_IRDA
-	tristate "SuperH IrDA driver"
-	depends on IRDA
-	depends on (ARCH_SHMOBILE || COMPILE_TEST) && HAS_IOMEM
-	help
-	  Say Y here if your want to enable SuperH IrDA devices.
-
 endmenu
 
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index be8ab5b..4c34443 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -19,7 +19,6 @@
 obj-$(CONFIG_PXA_FICP)	        += pxaficp_ir.o
 obj-$(CONFIG_MCS_FIR)	        += mcs7780.o
 obj-$(CONFIG_AU1000_FIR)	+= au1k_ir.o
-obj-$(CONFIG_SH_IRDA)		+= sh_irda.o
 # SIR drivers
 obj-$(CONFIG_IRTTY_SIR)		+= irtty-sir.o	sir-dev.o
 obj-$(CONFIG_BFIN_SIR)		+= bfin_sir.o
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 64bb44d..c285eaf 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -1427,7 +1427,7 @@
 		/* Check for empty frame */
 		if (!skb->len) {
 			ali_ircc_change_speed(self, speed); 
-			dev->trans_start = jiffies;
+			netif_trans_update(dev);
 			spin_unlock_irqrestore(&self->lock, flags);
 			dev_kfree_skb(skb);
 			return NETDEV_TX_OK;
@@ -1533,7 +1533,7 @@
 	/* Restore bank register */
 	switch_bank(iobase, BANK0);
 
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	spin_unlock_irqrestore(&self->lock, flags);
 	dev_kfree_skb(skb);
 
@@ -1946,7 +1946,7 @@
 		/* Check for empty frame */
 		if (!skb->len) {
 			ali_ircc_change_speed(self, speed); 
-			dev->trans_start = jiffies;
+			netif_trans_update(dev);
 			spin_unlock_irqrestore(&self->lock, flags);
 			dev_kfree_skb(skb);
 			return NETDEV_TX_OK;
@@ -1966,7 +1966,7 @@
 	/* Turn on transmit finished interrupt. Will fire immediately!  */
 	outb(UART_IER_THRI, iobase+UART_IER); 
 
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	spin_unlock_irqrestore(&self->lock, flags);
 
 	dev_kfree_skb(skb);
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index 303c4bd..be5bb0b 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -531,7 +531,7 @@
 	bfin_sir_dma_tx_chars(dev);
 #endif
 	bfin_sir_enable_tx(port);
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 }
 
 static int bfin_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 25f2196..a198946 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -429,7 +429,7 @@
 			 * do an extra memcpy and increment packet counters...
 			 * Jean II */
 			irda_usb_change_speed_xbofs(self);
-			netdev->trans_start = jiffies;
+			netif_trans_update(netdev);
 			/* Will netif_wake_queue() in callback */
 			goto drop;
 		}
@@ -526,7 +526,7 @@
 		netdev->stats.tx_packets++;
                 netdev->stats.tx_bytes += skb->len;
 		
-		netdev->trans_start = jiffies;
+		netif_trans_update(netdev);
 	}
 	spin_unlock_irqrestore(&self->lock, flags);
 	
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index dc0dbd8..9ef13d8 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -1399,7 +1399,7 @@
 				 * to make sure packets gets through the
 				 * proper xmit handler - Jean II */
 			}
-			dev->trans_start = jiffies;
+			netif_trans_update(dev);
 			spin_unlock_irqrestore(&self->lock, flags);
 			dev_kfree_skb(skb);
 			return NETDEV_TX_OK;
@@ -1424,7 +1424,7 @@
 	/* Restore bank register */
 	outb(bank, iobase+BSR);
 
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	spin_unlock_irqrestore(&self->lock, flags);
 
 	dev_kfree_skb(skb);
@@ -1470,7 +1470,7 @@
 				 * the speed change has been done.
 				 * Jean II */
 			}
-			dev->trans_start = jiffies;
+			netif_trans_update(dev);
 			spin_unlock_irqrestore(&self->lock, flags);
 			dev_kfree_skb(skb);
 			return NETDEV_TX_OK;
@@ -1553,7 +1553,7 @@
 	/* Restore bank register */
 	outb(bank, iobase+BSR);
 
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	spin_unlock_irqrestore(&self->lock, flags);
 	dev_kfree_skb(skb);
 
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
deleted file mode 100644
index c96b46b..0000000
--- a/drivers/net/irda/sh_irda.c
+++ /dev/null
@@ -1,875 +0,0 @@
-/*
- * SuperH IrDA Driver
- *
- * Copyright (C) 2010 Renesas Solutions Corp.
- * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * Based on sh_sir.c
- * Copyright (C) 2009 Renesas Solutions Corp.
- * Copyright 2006-2009 Analog Devices Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/*
- * CAUTION
- *
- * This driver is very simple.
- * So, it doesn't have below support now
- *  - MIR/FIR support
- *  - DMA transfer support
- *  - FIFO mode support
- */
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/clk.h>
-#include <net/irda/wrapper.h>
-#include <net/irda/irda_device.h>
-
-#define DRIVER_NAME "sh_irda"
-
-#define __IRDARAM_LEN	0x1039
-
-#define IRTMR		0x1F00 /* Transfer mode */
-#define IRCFR		0x1F02 /* Configuration */
-#define IRCTR		0x1F04 /* IR control */
-#define IRTFLR		0x1F20 /* Transmit frame length */
-#define IRTCTR		0x1F22 /* Transmit control */
-#define IRRFLR		0x1F40 /* Receive frame length */
-#define IRRCTR		0x1F42 /* Receive control */
-#define SIRISR		0x1F60 /* SIR-UART mode interrupt source */
-#define SIRIMR		0x1F62 /* SIR-UART mode interrupt mask */
-#define SIRICR		0x1F64 /* SIR-UART mode interrupt clear */
-#define SIRBCR		0x1F68 /* SIR-UART mode baud rate count */
-#define MFIRISR		0x1F70 /* MIR/FIR mode interrupt source */
-#define MFIRIMR		0x1F72 /* MIR/FIR mode interrupt mask */
-#define MFIRICR		0x1F74 /* MIR/FIR mode interrupt clear */
-#define CRCCTR		0x1F80 /* CRC engine control */
-#define CRCIR		0x1F86 /* CRC engine input data */
-#define CRCCR		0x1F8A /* CRC engine calculation */
-#define CRCOR		0x1F8E /* CRC engine output data */
-#define FIFOCP		0x1FC0 /* FIFO current pointer */
-#define FIFOFP		0x1FC2 /* FIFO follow pointer */
-#define FIFORSMSK	0x1FC4 /* FIFO receive status mask */
-#define FIFORSOR	0x1FC6 /* FIFO receive status OR */
-#define FIFOSEL		0x1FC8 /* FIFO select */
-#define FIFORS		0x1FCA /* FIFO receive status */
-#define FIFORFL		0x1FCC /* FIFO receive frame length */
-#define FIFORAMCP	0x1FCE /* FIFO RAM current pointer */
-#define FIFORAMFP	0x1FD0 /* FIFO RAM follow pointer */
-#define BIFCTL		0x1FD2 /* BUS interface control */
-#define IRDARAM		0x0000 /* IrDA buffer RAM */
-#define IRDARAM_LEN	__IRDARAM_LEN /* - 8/16/32 (read-only for 32) */
-
-/* IRTMR */
-#define TMD_MASK	(0x3 << 14) /* Transfer Mode */
-#define TMD_SIR		(0x0 << 14)
-#define TMD_MIR		(0x3 << 14)
-#define TMD_FIR		(0x2 << 14)
-
-#define FIFORIM		(1 << 8) /* FIFO receive interrupt mask */
-#define MIM		(1 << 4) /* MIR/FIR Interrupt Mask */
-#define SIM		(1 << 0) /* SIR Interrupt Mask */
-#define xIM_MASK	(FIFORIM | MIM | SIM)
-
-/* IRCFR */
-#define RTO_SHIFT	8 /* shift for Receive Timeout */
-#define RTO		(0x3 << RTO_SHIFT)
-
-/* IRTCTR */
-#define ARMOD		(1 << 15) /* Auto-Receive Mode */
-#define TE		(1 <<  0) /* Transmit Enable */
-
-/* IRRFLR */
-#define RFL_MASK	(0x1FFF) /* mask for Receive Frame Length */
-
-/* IRRCTR */
-#define RE		(1 <<  0) /* Receive Enable */
-
-/*
- * SIRISR,  SIRIMR,  SIRICR,
- * MFIRISR, MFIRIMR, MFIRICR
- */
-#define FRE		(1 << 15) /* Frame Receive End */
-#define TROV		(1 << 11) /* Transfer Area Overflow */
-#define xIR_9		(1 << 9)
-#define TOT		xIR_9     /* for SIR     Timeout */
-#define ABTD		xIR_9     /* for MIR/FIR Abort Detection */
-#define xIR_8		(1 << 8)
-#define FER		xIR_8     /* for SIR     Framing Error */
-#define CRCER		xIR_8     /* for MIR/FIR CRC error */
-#define FTE		(1 << 7)  /* Frame Transmit End */
-#define xIR_MASK	(FRE | TROV | xIR_9 | xIR_8 | FTE)
-
-/* SIRBCR */
-#define BRC_MASK	(0x3F) /* mask for Baud Rate Count */
-
-/* CRCCTR */
-#define CRC_RST		(1 << 15) /* CRC Engine Reset */
-#define CRC_CT_MASK	0x0FFF    /* mask for CRC Engine Input Data Count */
-
-/* CRCIR */
-#define CRC_IN_MASK	0x0FFF    /* mask for CRC Engine Input Data */
-
-/************************************************************************
-
-
-			enum / structure
-
-
-************************************************************************/
-enum sh_irda_mode {
-	SH_IRDA_NONE = 0,
-	SH_IRDA_SIR,
-	SH_IRDA_MIR,
-	SH_IRDA_FIR,
-};
-
-struct sh_irda_self;
-struct sh_irda_xir_func {
-	int (*xir_fre)	(struct sh_irda_self *self);
-	int (*xir_trov)	(struct sh_irda_self *self);
-	int (*xir_9)	(struct sh_irda_self *self);
-	int (*xir_8)	(struct sh_irda_self *self);
-	int (*xir_fte)	(struct sh_irda_self *self);
-};
-
-struct sh_irda_self {
-	void __iomem		*membase;
-	unsigned int		irq;
-	struct platform_device	*pdev;
-
-	struct net_device	*ndev;
-
-	struct irlap_cb		*irlap;
-	struct qos_info		qos;
-
-	iobuff_t		tx_buff;
-	iobuff_t		rx_buff;
-
-	enum sh_irda_mode	mode;
-	spinlock_t		lock;
-
-	struct sh_irda_xir_func	*xir_func;
-};
-
-/************************************************************************
-
-
-			common function
-
-
-************************************************************************/
-static void sh_irda_write(struct sh_irda_self *self, u32 offset, u16 data)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&self->lock, flags);
-	iowrite16(data, self->membase + offset);
-	spin_unlock_irqrestore(&self->lock, flags);
-}
-
-static u16 sh_irda_read(struct sh_irda_self *self, u32 offset)
-{
-	unsigned long flags;
-	u16 ret;
-
-	spin_lock_irqsave(&self->lock, flags);
-	ret = ioread16(self->membase + offset);
-	spin_unlock_irqrestore(&self->lock, flags);
-
-	return ret;
-}
-
-static void sh_irda_update_bits(struct sh_irda_self *self, u32 offset,
-			       u16 mask, u16 data)
-{
-	unsigned long flags;
-	u16 old, new;
-
-	spin_lock_irqsave(&self->lock, flags);
-	old = ioread16(self->membase + offset);
-	new = (old & ~mask) | data;
-	if (old != new)
-		iowrite16(data, self->membase + offset);
-	spin_unlock_irqrestore(&self->lock, flags);
-}
-
-/************************************************************************
-
-
-			mode function
-
-
-************************************************************************/
-/*=====================================
- *
- *		common
- *
- *=====================================*/
-static void sh_irda_rcv_ctrl(struct sh_irda_self *self, int enable)
-{
-	struct device *dev = &self->ndev->dev;
-
-	sh_irda_update_bits(self, IRRCTR, RE, enable ? RE : 0);
-	dev_dbg(dev, "recv %s\n", enable ? "enable" : "disable");
-}
-
-static int sh_irda_set_timeout(struct sh_irda_self *self, int interval)
-{
-	struct device *dev = &self->ndev->dev;
-
-	if (SH_IRDA_SIR != self->mode)
-		interval = 0;
-
-	if (interval < 0 || interval > 2) {
-		dev_err(dev, "unsupported timeout interval\n");
-		return -EINVAL;
-	}
-
-	sh_irda_update_bits(self, IRCFR, RTO, interval << RTO_SHIFT);
-	return 0;
-}
-
-static int sh_irda_set_baudrate(struct sh_irda_self *self, int baudrate)
-{
-	struct device *dev = &self->ndev->dev;
-	u16 val;
-
-	if (baudrate < 0)
-		return 0;
-
-	if (SH_IRDA_SIR != self->mode) {
-		dev_err(dev, "it is not SIR mode\n");
-		return -EINVAL;
-	}
-
-	/*
-	 * Baud rate (bits/s) =
-	 *   (48 MHz / 26) / (baud rate counter value + 1) x 16
-	 */
-	val = (48000000 / 26 / 16 / baudrate) - 1;
-	dev_dbg(dev, "baudrate = %d,  val = 0x%02x\n", baudrate, val);
-
-	sh_irda_update_bits(self, SIRBCR, BRC_MASK, val);
-
-	return 0;
-}
-
-static int sh_irda_get_rcv_length(struct sh_irda_self *self)
-{
-	return RFL_MASK & sh_irda_read(self, IRRFLR);
-}
-
-/*=====================================
- *
- *		NONE MODE
- *
- *=====================================*/
-static int sh_irda_xir_fre(struct sh_irda_self *self)
-{
-	struct device *dev = &self->ndev->dev;
-	dev_err(dev, "none mode: frame recv\n");
-	return 0;
-}
-
-static int sh_irda_xir_trov(struct sh_irda_self *self)
-{
-	struct device *dev = &self->ndev->dev;
-	dev_err(dev, "none mode: buffer ram over\n");
-	return 0;
-}
-
-static int sh_irda_xir_9(struct sh_irda_self *self)
-{
-	struct device *dev = &self->ndev->dev;
-	dev_err(dev, "none mode: time over\n");
-	return 0;
-}
-
-static int sh_irda_xir_8(struct sh_irda_self *self)
-{
-	struct device *dev = &self->ndev->dev;
-	dev_err(dev, "none mode: framing error\n");
-	return 0;
-}
-
-static int sh_irda_xir_fte(struct sh_irda_self *self)
-{
-	struct device *dev = &self->ndev->dev;
-	dev_err(dev, "none mode: frame transmit end\n");
-	return 0;
-}
-
-static struct sh_irda_xir_func sh_irda_xir_func = {
-	.xir_fre	= sh_irda_xir_fre,
-	.xir_trov	= sh_irda_xir_trov,
-	.xir_9		= sh_irda_xir_9,
-	.xir_8		= sh_irda_xir_8,
-	.xir_fte	= sh_irda_xir_fte,
-};
-
-/*=====================================
- *
- *		MIR/FIR MODE
- *
- * MIR/FIR are not supported now
- *=====================================*/
-static struct sh_irda_xir_func sh_irda_mfir_func = {
-	.xir_fre	= sh_irda_xir_fre,
-	.xir_trov	= sh_irda_xir_trov,
-	.xir_9		= sh_irda_xir_9,
-	.xir_8		= sh_irda_xir_8,
-	.xir_fte	= sh_irda_xir_fte,
-};
-
-/*=====================================
- *
- *		SIR MODE
- *
- *=====================================*/
-static int sh_irda_sir_fre(struct sh_irda_self *self)
-{
-	struct device *dev = &self->ndev->dev;
-	u16 data16;
-	u8  *data = (u8 *)&data16;
-	int len = sh_irda_get_rcv_length(self);
-	int i, j;
-
-	if (len > IRDARAM_LEN)
-		len = IRDARAM_LEN;
-
-	dev_dbg(dev, "frame recv length = %d\n", len);
-
-	for (i = 0; i < len; i++) {
-		j = i % 2;
-		if (!j)
-			data16 = sh_irda_read(self, IRDARAM + i);
-
-		async_unwrap_char(self->ndev, &self->ndev->stats,
-				  &self->rx_buff, data[j]);
-	}
-	self->ndev->last_rx = jiffies;
-
-	sh_irda_rcv_ctrl(self, 1);
-
-	return 0;
-}
-
-static int sh_irda_sir_trov(struct sh_irda_self *self)
-{
-	struct device *dev = &self->ndev->dev;
-
-	dev_err(dev, "buffer ram over\n");
-	sh_irda_rcv_ctrl(self, 1);
-	return 0;
-}
-
-static int sh_irda_sir_tot(struct sh_irda_self *self)
-{
-	struct device *dev = &self->ndev->dev;
-
-	dev_err(dev, "time over\n");
-	sh_irda_set_baudrate(self, 9600);
-	sh_irda_rcv_ctrl(self, 1);
-	return 0;
-}
-
-static int sh_irda_sir_fer(struct sh_irda_self *self)
-{
-	struct device *dev = &self->ndev->dev;
-
-	dev_err(dev, "framing error\n");
-	sh_irda_rcv_ctrl(self, 1);
-	return 0;
-}
-
-static int sh_irda_sir_fte(struct sh_irda_self *self)
-{
-	struct device *dev = &self->ndev->dev;
-
-	dev_dbg(dev, "frame transmit end\n");
-	netif_wake_queue(self->ndev);
-
-	return 0;
-}
-
-static struct sh_irda_xir_func sh_irda_sir_func = {
-	.xir_fre	= sh_irda_sir_fre,
-	.xir_trov	= sh_irda_sir_trov,
-	.xir_9		= sh_irda_sir_tot,
-	.xir_8		= sh_irda_sir_fer,
-	.xir_fte	= sh_irda_sir_fte,
-};
-
-static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode)
-{
-	struct device *dev = &self->ndev->dev;
-	struct sh_irda_xir_func	*func;
-	const char *name;
-	u16 data;
-
-	switch (mode) {
-	case SH_IRDA_SIR:
-		name	= "SIR";
-		data	= TMD_SIR;
-		func	= &sh_irda_sir_func;
-		break;
-	case SH_IRDA_MIR:
-		name	= "MIR";
-		data	= TMD_MIR;
-		func	= &sh_irda_mfir_func;
-		break;
-	case SH_IRDA_FIR:
-		name	= "FIR";
-		data	= TMD_FIR;
-		func	= &sh_irda_mfir_func;
-		break;
-	default:
-		name	= "NONE";
-		data	= 0;
-		func	= &sh_irda_xir_func;
-		break;
-	}
-
-	self->mode = mode;
-	self->xir_func = func;
-	sh_irda_update_bits(self, IRTMR, TMD_MASK, data);
-
-	dev_dbg(dev, "switch to %s mode", name);
-}
-
-/************************************************************************
-
-
-			irq function
-
-
-************************************************************************/
-static void sh_irda_set_irq_mask(struct sh_irda_self *self)
-{
-	u16 tmr_hole;
-	u16 xir_reg;
-
-	/* set all mask */
-	sh_irda_update_bits(self, IRTMR,   xIM_MASK, xIM_MASK);
-	sh_irda_update_bits(self, SIRIMR,  xIR_MASK, xIR_MASK);
-	sh_irda_update_bits(self, MFIRIMR, xIR_MASK, xIR_MASK);
-
-	/* clear irq */
-	sh_irda_update_bits(self, SIRICR,  xIR_MASK, xIR_MASK);
-	sh_irda_update_bits(self, MFIRICR, xIR_MASK, xIR_MASK);
-
-	switch (self->mode) {
-	case SH_IRDA_SIR:
-		tmr_hole	= SIM;
-		xir_reg		= SIRIMR;
-		break;
-	case SH_IRDA_MIR:
-	case SH_IRDA_FIR:
-		tmr_hole	= MIM;
-		xir_reg		= MFIRIMR;
-		break;
-	default:
-		tmr_hole	= 0;
-		xir_reg		= 0;
-		break;
-	}
-
-	/* open mask */
-	if (xir_reg) {
-		sh_irda_update_bits(self, IRTMR, tmr_hole, 0);
-		sh_irda_update_bits(self, xir_reg, xIR_MASK, 0);
-	}
-}
-
-static irqreturn_t sh_irda_irq(int irq, void *dev_id)
-{
-	struct sh_irda_self *self = dev_id;
-	struct sh_irda_xir_func	*func = self->xir_func;
-	u16 isr = sh_irda_read(self, SIRISR);
-
-	/* clear irq */
-	sh_irda_write(self, SIRICR, isr);
-
-	if (isr & FRE)
-		func->xir_fre(self);
-	if (isr & TROV)
-		func->xir_trov(self);
-	if (isr & xIR_9)
-		func->xir_9(self);
-	if (isr & xIR_8)
-		func->xir_8(self);
-	if (isr & FTE)
-		func->xir_fte(self);
-
-	return IRQ_HANDLED;
-}
-
-/************************************************************************
-
-
-			CRC function
-
-
-************************************************************************/
-static void sh_irda_crc_reset(struct sh_irda_self *self)
-{
-	sh_irda_write(self, CRCCTR, CRC_RST);
-}
-
-static void sh_irda_crc_add(struct sh_irda_self *self, u16 data)
-{
-	sh_irda_write(self, CRCIR, data & CRC_IN_MASK);
-}
-
-static u16 sh_irda_crc_cnt(struct sh_irda_self *self)
-{
-	return CRC_CT_MASK & sh_irda_read(self, CRCCTR);
-}
-
-static u16 sh_irda_crc_out(struct sh_irda_self *self)
-{
-	return sh_irda_read(self, CRCOR);
-}
-
-static int sh_irda_crc_init(struct sh_irda_self *self)
-{
-	struct device *dev = &self->ndev->dev;
-	int ret = -EIO;
-	u16 val;
-
-	sh_irda_crc_reset(self);
-
-	sh_irda_crc_add(self, 0xCC);
-	sh_irda_crc_add(self, 0xF5);
-	sh_irda_crc_add(self, 0xF1);
-	sh_irda_crc_add(self, 0xA7);
-
-	val = sh_irda_crc_cnt(self);
-	if (4 != val) {
-		dev_err(dev, "CRC count error %x\n", val);
-		goto crc_init_out;
-	}
-
-	val = sh_irda_crc_out(self);
-	if (0x51DF != val) {
-		dev_err(dev, "CRC result error%x\n", val);
-		goto crc_init_out;
-	}
-
-	ret = 0;
-
-crc_init_out:
-
-	sh_irda_crc_reset(self);
-	return ret;
-}
-
-/************************************************************************
-
-
-			iobuf function
-
-
-************************************************************************/
-static void sh_irda_remove_iobuf(struct sh_irda_self *self)
-{
-	kfree(self->rx_buff.head);
-
-	self->tx_buff.head = NULL;
-	self->tx_buff.data = NULL;
-	self->rx_buff.head = NULL;
-	self->rx_buff.data = NULL;
-}
-
-static int sh_irda_init_iobuf(struct sh_irda_self *self, int rxsize, int txsize)
-{
-	if (self->rx_buff.head ||
-	    self->tx_buff.head) {
-		dev_err(&self->ndev->dev, "iobuff has already existed.");
-		return -EINVAL;
-	}
-
-	/* rx_buff */
-	self->rx_buff.head = kmalloc(rxsize, GFP_KERNEL);
-	if (!self->rx_buff.head)
-		return -ENOMEM;
-
-	self->rx_buff.truesize	= rxsize;
-	self->rx_buff.in_frame	= FALSE;
-	self->rx_buff.state	= OUTSIDE_FRAME;
-	self->rx_buff.data	= self->rx_buff.head;
-
-	/* tx_buff */
-	self->tx_buff.head	= self->membase + IRDARAM;
-	self->tx_buff.truesize	= IRDARAM_LEN;
-
-	return 0;
-}
-
-/************************************************************************
-
-
-			net_device_ops function
-
-
-************************************************************************/
-static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
-{
-	struct sh_irda_self *self = netdev_priv(ndev);
-	struct device *dev = &self->ndev->dev;
-	int speed = irda_get_next_speed(skb);
-	int ret;
-
-	dev_dbg(dev, "hard xmit\n");
-
-	netif_stop_queue(ndev);
-	sh_irda_rcv_ctrl(self, 0);
-
-	ret = sh_irda_set_baudrate(self, speed);
-	if (ret < 0)
-		goto sh_irda_hard_xmit_end;
-
-	self->tx_buff.len = 0;
-	if (skb->len) {
-		unsigned long flags;
-
-		spin_lock_irqsave(&self->lock, flags);
-		self->tx_buff.len = async_wrap_skb(skb,
-						   self->tx_buff.head,
-						   self->tx_buff.truesize);
-		spin_unlock_irqrestore(&self->lock, flags);
-
-		if (self->tx_buff.len > self->tx_buff.truesize)
-			self->tx_buff.len = self->tx_buff.truesize;
-
-		sh_irda_write(self, IRTFLR, self->tx_buff.len);
-		sh_irda_write(self, IRTCTR, ARMOD | TE);
-	} else
-		goto sh_irda_hard_xmit_end;
-
-	dev_kfree_skb(skb);
-
-	return 0;
-
-sh_irda_hard_xmit_end:
-	sh_irda_set_baudrate(self, 9600);
-	netif_wake_queue(self->ndev);
-	sh_irda_rcv_ctrl(self, 1);
-	dev_kfree_skb(skb);
-
-	return ret;
-
-}
-
-static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
-{
-	/*
-	 * FIXME
-	 *
-	 * This function is needed for irda framework.
-	 * But nothing to do now
-	 */
-	return 0;
-}
-
-static struct net_device_stats *sh_irda_stats(struct net_device *ndev)
-{
-	struct sh_irda_self *self = netdev_priv(ndev);
-
-	return &self->ndev->stats;
-}
-
-static int sh_irda_open(struct net_device *ndev)
-{
-	struct sh_irda_self *self = netdev_priv(ndev);
-	int err;
-
-	pm_runtime_get_sync(&self->pdev->dev);
-	err = sh_irda_crc_init(self);
-	if (err)
-		goto open_err;
-
-	sh_irda_set_mode(self, SH_IRDA_SIR);
-	sh_irda_set_timeout(self, 2);
-	sh_irda_set_baudrate(self, 9600);
-
-	self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
-	if (!self->irlap) {
-		err = -ENODEV;
-		goto open_err;
-	}
-
-	netif_start_queue(ndev);
-	sh_irda_rcv_ctrl(self, 1);
-	sh_irda_set_irq_mask(self);
-
-	dev_info(&ndev->dev, "opened\n");
-
-	return 0;
-
-open_err:
-	pm_runtime_put_sync(&self->pdev->dev);
-
-	return err;
-}
-
-static int sh_irda_stop(struct net_device *ndev)
-{
-	struct sh_irda_self *self = netdev_priv(ndev);
-
-	/* Stop IrLAP */
-	if (self->irlap) {
-		irlap_close(self->irlap);
-		self->irlap = NULL;
-	}
-
-	netif_stop_queue(ndev);
-	pm_runtime_put_sync(&self->pdev->dev);
-
-	dev_info(&ndev->dev, "stopped\n");
-
-	return 0;
-}
-
-static const struct net_device_ops sh_irda_ndo = {
-	.ndo_open		= sh_irda_open,
-	.ndo_stop		= sh_irda_stop,
-	.ndo_start_xmit		= sh_irda_hard_xmit,
-	.ndo_do_ioctl		= sh_irda_ioctl,
-	.ndo_get_stats		= sh_irda_stats,
-};
-
-/************************************************************************
-
-
-			platform_driver function
-
-
-************************************************************************/
-static int sh_irda_probe(struct platform_device *pdev)
-{
-	struct net_device *ndev;
-	struct sh_irda_self *self;
-	struct resource *res;
-	int irq;
-	int err = -ENOMEM;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	irq = platform_get_irq(pdev, 0);
-	if (!res || irq < 0) {
-		dev_err(&pdev->dev, "Not enough platform resources.\n");
-		goto exit;
-	}
-
-	ndev = alloc_irdadev(sizeof(*self));
-	if (!ndev)
-		goto exit;
-
-	self = netdev_priv(ndev);
-	self->membase = ioremap_nocache(res->start, resource_size(res));
-	if (!self->membase) {
-		err = -ENXIO;
-		dev_err(&pdev->dev, "Unable to ioremap.\n");
-		goto err_mem_1;
-	}
-
-	err = sh_irda_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
-	if (err)
-		goto err_mem_2;
-
-	self->pdev = pdev;
-	pm_runtime_enable(&pdev->dev);
-
-	irda_init_max_qos_capabilies(&self->qos);
-
-	ndev->netdev_ops	= &sh_irda_ndo;
-	ndev->irq		= irq;
-
-	self->ndev			= ndev;
-	self->qos.baud_rate.bits	&= IR_9600; /* FIXME */
-	self->qos.min_turn_time.bits	= 1; /* 10 ms or more */
-	spin_lock_init(&self->lock);
-
-	irda_qos_bits_to_value(&self->qos);
-
-	err = register_netdev(ndev);
-	if (err)
-		goto err_mem_4;
-
-	platform_set_drvdata(pdev, ndev);
-	err = devm_request_irq(&pdev->dev, irq, sh_irda_irq, 0, "sh_irda", self);
-	if (err) {
-		dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
-		goto err_mem_4;
-	}
-
-	dev_info(&pdev->dev, "SuperH IrDA probed\n");
-
-	goto exit;
-
-err_mem_4:
-	pm_runtime_disable(&pdev->dev);
-	sh_irda_remove_iobuf(self);
-err_mem_2:
-	iounmap(self->membase);
-err_mem_1:
-	free_netdev(ndev);
-exit:
-	return err;
-}
-
-static int sh_irda_remove(struct platform_device *pdev)
-{
-	struct net_device *ndev = platform_get_drvdata(pdev);
-	struct sh_irda_self *self = netdev_priv(ndev);
-
-	if (!self)
-		return 0;
-
-	unregister_netdev(ndev);
-	pm_runtime_disable(&pdev->dev);
-	sh_irda_remove_iobuf(self);
-	iounmap(self->membase);
-	free_netdev(ndev);
-
-	return 0;
-}
-
-static int sh_irda_runtime_nop(struct device *dev)
-{
-	/* Runtime PM callback shared between ->runtime_suspend()
-	 * and ->runtime_resume(). Simply returns success.
-	 *
-	 * This driver re-initializes all registers after
-	 * pm_runtime_get_sync() anyway so there is no need
-	 * to save and restore registers here.
-	 */
-	return 0;
-}
-
-static const struct dev_pm_ops sh_irda_pm_ops = {
-	.runtime_suspend	= sh_irda_runtime_nop,
-	.runtime_resume		= sh_irda_runtime_nop,
-};
-
-static struct platform_driver sh_irda_driver = {
-	.probe	= sh_irda_probe,
-	.remove	= sh_irda_remove,
-	.driver	= {
-		.name	= DRIVER_NAME,
-		.pm	= &sh_irda_pm_ops,
-	},
-};
-
-module_platform_driver(sh_irda_driver);
-
-MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
-MODULE_DESCRIPTION("SuperH IrDA driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index b455ffe..dcf92ba 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -862,7 +862,7 @@
 	spin_lock_irqsave(&self->lock, flags);
 	smsc_ircc_sir_start(self);
 	smsc_ircc_change_speed(self, self->io.speed);
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 	netif_wake_queue(dev);
 	spin_unlock_irqrestore(&self->lock, flags);
 }
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index 83cc48a..42da094 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -718,7 +718,7 @@
 
 	stir->netdev->stats.tx_packets++;
 	stir->netdev->stats.tx_bytes += skb->len;
-	stir->netdev->trans_start = jiffies;
+	netif_trans_update(stir->netdev);
 	pr_debug("send %d (%d)\n", skb->len, wraplen);
 
 	if (usb_bulk_msg(stir->usbdev, usb_sndbulkpipe(stir->usbdev, 1),
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 6960d4c..ca4442a 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -774,7 +774,7 @@
 		/* Check for empty frame */
 		if (!skb->len) {
 			via_ircc_change_speed(self, speed);
-			dev->trans_start = jiffies;
+			netif_trans_update(dev);
 			dev_kfree_skb(skb);
 			return NETDEV_TX_OK;
 		} else
@@ -821,7 +821,7 @@
 	RXStart(iobase, OFF);
 	TXStart(iobase, ON);
 
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	spin_unlock_irqrestore(&self->lock, flags);
 	dev_kfree_skb(skb);
 	return NETDEV_TX_OK;
@@ -849,7 +849,7 @@
 	if ((speed != self->io.speed) && (speed != -1)) {
 		if (!skb->len) {
 			via_ircc_change_speed(self, speed);
-			dev->trans_start = jiffies;
+			netif_trans_update(dev);
 			dev_kfree_skb(skb);
 			return NETDEV_TX_OK;
 		} else
@@ -869,7 +869,7 @@
 	via_ircc_dma_xmit(self, iobase);
 //F01   }
 //F01   if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) netif_wake_queue(self->netdev);
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	dev_kfree_skb(skb);
 	spin_unlock_irqrestore(&self->lock, flags);
 	return NETDEV_TX_OK;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 84d3e5c..460740c 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -85,7 +85,7 @@
  * @tfm: crypto struct, key storage
  */
 struct macsec_key {
-	u64 id;
+	u8 id[MACSEC_KEYID_LEN];
 	struct crypto_aead *tfm;
 };
 
@@ -880,12 +880,12 @@
 	macsec_skb_cb(skb)->valid = false;
 	skb = skb_share_check(skb, GFP_ATOMIC);
 	if (!skb)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 	req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC);
 	if (!req) {
 		kfree_skb(skb);
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 	}
 
 	hdr = (struct macsec_eth_header *)skb->data;
@@ -905,7 +905,7 @@
 		skb = skb_unshare(skb, GFP_ATOMIC);
 		if (!skb) {
 			aead_request_free(req);
-			return NULL;
+			return ERR_PTR(-ENOMEM);
 		}
 	} else {
 		/* integrity only: all headers + data authenticated */
@@ -921,14 +921,14 @@
 	dev_hold(dev);
 	ret = crypto_aead_decrypt(req);
 	if (ret == -EINPROGRESS) {
-		return NULL;
+		return ERR_PTR(ret);
 	} else if (ret != 0) {
 		/* decryption/authentication failed
 		 * 10.6 if validateFrames is disabled, deliver anyway
 		 */
 		if (ret != -EBADMSG) {
 			kfree_skb(skb);
-			skb = NULL;
+			skb = ERR_PTR(ret);
 		}
 	} else {
 		macsec_skb_cb(skb)->valid = true;
@@ -1146,8 +1146,10 @@
 	    secy->validate_frames != MACSEC_VALIDATE_DISABLED)
 		skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
 
-	if (!skb) {
-		macsec_rxsa_put(rx_sa);
+	if (IS_ERR(skb)) {
+		/* the decrypt callback needs the reference */
+		if (PTR_ERR(skb) != -EINPROGRESS)
+			macsec_rxsa_put(rx_sa);
 		rcu_read_unlock();
 		*pskb = NULL;
 		return RX_HANDLER_CONSUMED;
@@ -1161,7 +1163,8 @@
 			    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
 	macsec_reset_skb(skb, secy->netdev);
 
-	macsec_rxsa_put(rx_sa);
+	if (rx_sa)
+		macsec_rxsa_put(rx_sa);
 	count_rx(dev, skb->len);
 
 	rcu_read_unlock();
@@ -1405,9 +1408,10 @@
 	return (__force sci_t)nla_get_u64(nla);
 }
 
-static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value)
+static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
+		       int padattr)
 {
-	return nla_put_u64(skb, attrtype, (__force u64)value);
+	return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
 }
 
 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
@@ -1526,7 +1530,8 @@
 	[MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
 	[MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
 	[MACSEC_SA_ATTR_PN] = { .type = NLA_U32 },
-	[MACSEC_SA_ATTR_KEYID] = { .type = NLA_U64 },
+	[MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
+				   .len = MACSEC_KEYID_LEN, },
 	[MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
 				 .len = MACSEC_MAX_KEY_LEN, },
 };
@@ -1573,6 +1578,9 @@
 			return false;
 	}
 
+	if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
+		return false;
+
 	return true;
 }
 
@@ -1622,8 +1630,9 @@
 	}
 
 	rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
-	if (init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len,
-		       secy->icv_len)) {
+	if (!rx_sa || init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
+				 secy->key_len, secy->icv_len)) {
+		kfree(rx_sa);
 		rtnl_unlock();
 		return -ENOMEM;
 	}
@@ -1637,7 +1646,7 @@
 	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
 		rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
 
-	rx_sa->key.id = nla_get_u64(tb_sa[MACSEC_SA_ATTR_KEYID]);
+	nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEY], MACSEC_KEYID_LEN);
 	rx_sa->sc = rx_sc;
 	rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
 
@@ -1718,6 +1727,9 @@
 			return false;
 	}
 
+	if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
+		return false;
+
 	return true;
 }
 
@@ -1768,11 +1780,12 @@
 	tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
 	if (!tx_sa || init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
 				 secy->key_len, secy->icv_len)) {
+		kfree(tx_sa);
 		rtnl_unlock();
 		return -ENOMEM;
 	}
 
-	tx_sa->key.id = nla_get_u64(tb_sa[MACSEC_SA_ATTR_KEYID]);
+	nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEY], MACSEC_KEYID_LEN);
 
 	spin_lock_bh(&tx_sa->lock);
 	tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
@@ -2131,16 +2144,36 @@
 		sum.InPktsUnusedSA    += tmp.InPktsUnusedSA;
 	}
 
-	if (nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, sum.InOctetsValidated) ||
-	    nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, sum.InOctetsDecrypted) ||
-	    nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, sum.InPktsUnchecked) ||
-	    nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, sum.InPktsDelayed) ||
-	    nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) ||
-	    nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) ||
-	    nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, sum.InPktsLate) ||
-	    nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) ||
-	    nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) ||
-	    nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA))
+	if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
+			      sum.InOctetsValidated,
+			      MACSEC_RXSC_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
+			      sum.InOctetsDecrypted,
+			      MACSEC_RXSC_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
+			      sum.InPktsUnchecked,
+			      MACSEC_RXSC_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
+			      sum.InPktsDelayed,
+			      MACSEC_RXSC_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
+			      sum.InPktsOK,
+			      MACSEC_RXSC_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
+			      sum.InPktsInvalid,
+			      MACSEC_RXSC_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
+			      sum.InPktsLate,
+			      MACSEC_RXSC_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
+			      sum.InPktsNotValid,
+			      MACSEC_RXSC_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
+			      sum.InPktsNotUsingSA,
+			      MACSEC_RXSC_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
+			      sum.InPktsUnusedSA,
+			      MACSEC_RXSC_STATS_ATTR_PAD))
 		return -EMSGSIZE;
 
 	return 0;
@@ -2169,10 +2202,18 @@
 		sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted;
 	}
 
-	if (nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) ||
-	    nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted) ||
-	    nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, sum.OutOctetsProtected) ||
-	    nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, sum.OutOctetsEncrypted))
+	if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
+			      sum.OutPktsProtected,
+			      MACSEC_TXSC_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
+			      sum.OutPktsEncrypted,
+			      MACSEC_TXSC_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
+			      sum.OutOctetsProtected,
+			      MACSEC_TXSC_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
+			      sum.OutOctetsEncrypted,
+			      MACSEC_TXSC_STATS_ATTR_PAD))
 		return -EMSGSIZE;
 
 	return 0;
@@ -2205,14 +2246,30 @@
 		sum.InPktsOverrun    += tmp.InPktsOverrun;
 	}
 
-	if (nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, sum.OutPktsUntagged) ||
-	    nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, sum.InPktsUntagged) ||
-	    nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, sum.OutPktsTooLong) ||
-	    nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, sum.InPktsNoTag) ||
-	    nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, sum.InPktsBadTag) ||
-	    nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, sum.InPktsUnknownSCI) ||
-	    nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, sum.InPktsNoSCI) ||
-	    nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, sum.InPktsOverrun))
+	if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
+			      sum.OutPktsUntagged,
+			      MACSEC_SECY_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
+			      sum.InPktsUntagged,
+			      MACSEC_SECY_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
+			      sum.OutPktsTooLong,
+			      MACSEC_SECY_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
+			      sum.InPktsNoTag,
+			      MACSEC_SECY_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
+			      sum.InPktsBadTag,
+			      MACSEC_SECY_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
+			      sum.InPktsUnknownSCI,
+			      MACSEC_SECY_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
+			      sum.InPktsNoSCI,
+			      MACSEC_SECY_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
+			      sum.InPktsOverrun,
+			      MACSEC_SECY_STATS_ATTR_PAD))
 		return -EMSGSIZE;
 
 	return 0;
@@ -2226,8 +2283,11 @@
 	if (!secy_nest)
 		return 1;
 
-	if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci) ||
-	    nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, DEFAULT_CIPHER_ID) ||
+	if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
+			MACSEC_SECY_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
+			      MACSEC_DEFAULT_CIPHER_ID,
+			      MACSEC_SECY_ATTR_PAD) ||
 	    nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
 	    nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
 	    nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
@@ -2268,7 +2328,7 @@
 	if (!hdr)
 		return -EMSGSIZE;
 
-	rtnl_lock();
+	genl_dump_check_consistent(cb, hdr, &macsec_fam);
 
 	if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
 		goto nla_put_failure;
@@ -2312,7 +2372,7 @@
 
 		if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
 		    nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) ||
-		    nla_put_u64(skb, MACSEC_SA_ATTR_KEYID, tx_sa->key.id) ||
+		    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
 		    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
 			nla_nest_cancel(skb, txsa_nest);
 			nla_nest_cancel(skb, txsa_list);
@@ -2353,7 +2413,8 @@
 		}
 
 		if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
-		    nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci)) {
+		    nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci,
+				MACSEC_RXSC_ATTR_PAD)) {
 			nla_nest_cancel(skb, rxsc_nest);
 			nla_nest_cancel(skb, rxsc_list);
 			goto nla_put_failure;
@@ -2413,7 +2474,7 @@
 
 			if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
 			    nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) ||
-			    nla_put_u64(skb, MACSEC_SA_ATTR_KEYID, rx_sa->key.id) ||
+			    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
 			    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
 				nla_nest_cancel(skb, rxsa_nest);
 				nla_nest_cancel(skb, rxsc_nest);
@@ -2429,18 +2490,17 @@
 
 	nla_nest_end(skb, rxsc_list);
 
-	rtnl_unlock();
-
 	genlmsg_end(skb, hdr);
 
 	return 0;
 
 nla_put_failure:
-	rtnl_unlock();
 	genlmsg_cancel(skb, hdr);
 	return -EMSGSIZE;
 }
 
+static int macsec_generation = 1; /* protected by RTNL */
+
 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
 {
 	struct net *net = sock_net(skb->sk);
@@ -2450,6 +2510,10 @@
 	dev_idx = cb->args[0];
 
 	d = 0;
+	rtnl_lock();
+
+	cb->seq = macsec_generation;
+
 	for_each_netdev(net, dev) {
 		struct macsec_secy *secy;
 
@@ -2467,6 +2531,7 @@
 	}
 
 done:
+	rtnl_unlock();
 	cb->args[0] = d;
 	return skb->len;
 }
@@ -2826,7 +2891,7 @@
 static void macsec_setup(struct net_device *dev)
 {
 	ether_setup(dev);
-	dev->tx_queue_len = 0;
+	dev->priv_flags |= IFF_NO_QUEUE;
 	dev->netdev_ops = &macsec_netdev_ops;
 	dev->destructor = macsec_free_netdev;
 
@@ -2920,10 +2985,14 @@
 	struct net_device *real_dev = macsec->real_dev;
 	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
 
+	macsec_generation++;
+
 	unregister_netdevice_queue(dev, head);
 	list_del_rcu(&macsec->secys);
-	if (list_empty(&rxd->secys))
+	if (list_empty(&rxd->secys)) {
 		netdev_rx_handler_unregister(real_dev);
+		kfree(rxd);
+	}
 
 	macsec_del_dev(macsec);
 }
@@ -2945,8 +3014,10 @@
 
 		err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
 						 rxd);
-		if (err < 0)
+		if (err < 0) {
+			kfree(rxd);
 			return err;
+		}
 	}
 
 	list_add_tail_rcu(&macsec->secys, &rxd->secys);
@@ -3066,6 +3137,8 @@
 	if (err < 0)
 		goto del_dev;
 
+	macsec_generation++;
+
 	dev_hold(real_dev);
 
 	return 0;
@@ -3079,7 +3152,7 @@
 
 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
 {
-	u64 csid = DEFAULT_CIPHER_ID;
+	u64 csid = MACSEC_DEFAULT_CIPHER_ID;
 	u8 icv_len = DEFAULT_ICV_LEN;
 	int flag;
 	bool es, scb, sci;
@@ -3094,8 +3167,8 @@
 		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
 
 	switch (csid) {
-	case DEFAULT_CIPHER_ID:
-	case DEFAULT_CIPHER_ALT:
+	case MACSEC_DEFAULT_CIPHER_ID:
+	case MACSEC_DEFAULT_CIPHER_ALT:
 		if (icv_len < MACSEC_MIN_ICV_LEN ||
 		    icv_len > MACSEC_MAX_ICV_LEN)
 			return -EINVAL;
@@ -3129,8 +3202,8 @@
 	    nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
 		return -EINVAL;
 
-	if ((data[IFLA_MACSEC_PROTECT] &&
-	     nla_get_u8(data[IFLA_MACSEC_PROTECT])) &&
+	if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
+	     nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
 	    !data[IFLA_MACSEC_WINDOW])
 		return -EINVAL;
 
@@ -3145,9 +3218,9 @@
 static size_t macsec_get_size(const struct net_device *dev)
 {
 	return 0 +
-		nla_total_size(8) + /* SCI */
+		nla_total_size_64bit(8) + /* SCI */
 		nla_total_size(1) + /* ICV_LEN */
-		nla_total_size(8) + /* CIPHER_SUITE */
+		nla_total_size_64bit(8) + /* CIPHER_SUITE */
 		nla_total_size(4) + /* WINDOW */
 		nla_total_size(1) + /* ENCODING_SA */
 		nla_total_size(1) + /* ENCRYPT */
@@ -3166,9 +3239,11 @@
 	struct macsec_secy *secy = &macsec_priv(dev)->secy;
 	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
 
-	if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci) ||
+	if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
+			IFLA_MACSEC_PAD) ||
 	    nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
-	    nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE, DEFAULT_CIPHER_ID) ||
+	    nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
+			      MACSEC_DEFAULT_CIPHER_ID, IFLA_MACSEC_PAD) ||
 	    nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
 	    nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
 	    nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 2bcf1f3..cb01023 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -795,6 +795,7 @@
 {
 	struct macvlan_dev *vlan = netdev_priv(dev);
 	const struct net_device *lowerdev = vlan->lowerdev;
+	struct macvlan_port *port = vlan->port;
 
 	dev->state		= (dev->state & ~MACVLAN_STATE_MASK) |
 				  (lowerdev->state & MACVLAN_STATE_MASK);
@@ -812,6 +813,8 @@
 	if (!vlan->pcpu_stats)
 		return -ENOMEM;
 
+	port->count += 1;
+
 	return 0;
 }
 
@@ -1312,10 +1315,9 @@
 			return err;
 	}
 
-	port->count += 1;
 	err = register_netdevice(dev);
 	if (err < 0)
-		goto destroy_port;
+		return err;
 
 	dev->priv_flags |= IFF_MACVLAN;
 	err = netdev_upper_dev_link(lowerdev, dev);
@@ -1330,10 +1332,6 @@
 
 unregister_netdev:
 	unregister_netdevice(dev);
-destroy_port:
-	port->count -= 1;
-	if (!port->count)
-		macvlan_port_destroy(lowerdev);
 
 	return err;
 }
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 95394ed..bd67209 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -129,7 +129,18 @@
 static DEFINE_IDR(minor_idr);
 
 #define GOODCOPY_LEN 128
-static struct class *macvtap_class;
+static const void *macvtap_net_namespace(struct device *d)
+{
+	struct net_device *dev = to_net_dev(d->parent);
+	return dev_net(dev);
+}
+
+static struct class macvtap_class = {
+	.name = "macvtap",
+	.owner = THIS_MODULE,
+	.ns_type = &net_ns_type_operations,
+	.namespace = macvtap_net_namespace,
+};
 static struct cdev macvtap_cdev;
 
 static const struct proto_ops macvtap_socket_ops;
@@ -373,7 +384,7 @@
 			goto wake_up;
 		}
 
-		kfree_skb(skb);
+		consume_skb(skb);
 		while (segs) {
 			struct sk_buff *nskb = segs->next;
 
@@ -1278,10 +1289,12 @@
 	struct device *classdev;
 	dev_t devt;
 	int err;
+	char tap_name[IFNAMSIZ];
 
 	if (dev->rtnl_link_ops != &macvtap_link_ops)
 		return NOTIFY_DONE;
 
+	snprintf(tap_name, IFNAMSIZ, "tap%d", dev->ifindex);
 	vlan = netdev_priv(dev);
 
 	switch (event) {
@@ -1295,16 +1308,24 @@
 			return notifier_from_errno(err);
 
 		devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
-		classdev = device_create(macvtap_class, &dev->dev, devt,
-					 dev, "tap%d", dev->ifindex);
+		classdev = device_create(&macvtap_class, &dev->dev, devt,
+					 dev, tap_name);
 		if (IS_ERR(classdev)) {
 			macvtap_free_minor(vlan);
 			return notifier_from_errno(PTR_ERR(classdev));
 		}
+		err = sysfs_create_link(&dev->dev.kobj, &classdev->kobj,
+					tap_name);
+		if (err)
+			return notifier_from_errno(err);
 		break;
 	case NETDEV_UNREGISTER:
+		/* vlan->minor == 0 if NETDEV_REGISTER above failed */
+		if (vlan->minor == 0)
+			break;
+		sysfs_remove_link(&dev->dev.kobj, tap_name);
 		devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
-		device_destroy(macvtap_class, devt);
+		device_destroy(&macvtap_class, devt);
 		macvtap_free_minor(vlan);
 		break;
 	}
@@ -1330,11 +1351,9 @@
 	if (err)
 		goto out2;
 
-	macvtap_class = class_create(THIS_MODULE, "macvtap");
-	if (IS_ERR(macvtap_class)) {
-		err = PTR_ERR(macvtap_class);
+	err = class_register(&macvtap_class);
+	if (err)
 		goto out3;
-	}
 
 	err = register_netdevice_notifier(&macvtap_notifier_block);
 	if (err)
@@ -1349,7 +1368,7 @@
 out5:
 	unregister_netdevice_notifier(&macvtap_notifier_block);
 out4:
-	class_unregister(macvtap_class);
+	class_unregister(&macvtap_class);
 out3:
 	cdev_del(&macvtap_cdev);
 out2:
@@ -1363,7 +1382,7 @@
 {
 	rtnl_link_unregister(&macvtap_link_ops);
 	unregister_netdevice_notifier(&macvtap_notifier_block);
-	class_unregister(macvtap_class);
+	class_unregister(&macvtap_class);
 	cdev_del(&macvtap_cdev);
 	unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
 	idr_destroy(&minor_idr);
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index b3ffaee..f279a89 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -359,27 +359,25 @@
 	 * in the FIFO. In such cases, the FIFO enters an error mode it
 	 * cannot recover from by software.
 	 */
-	if (phydev->drv->phy_id == ATH8030_PHY_ID) {
-		if (phydev->state == PHY_NOLINK) {
-			if (priv->gpiod_reset && !priv->phy_reset) {
-				struct at803x_context context;
+	if (phydev->state == PHY_NOLINK) {
+		if (priv->gpiod_reset && !priv->phy_reset) {
+			struct at803x_context context;
 
-				at803x_context_save(phydev, &context);
+			at803x_context_save(phydev, &context);
 
-				gpiod_set_value(priv->gpiod_reset, 1);
-				msleep(1);
-				gpiod_set_value(priv->gpiod_reset, 0);
-				msleep(1);
+			gpiod_set_value(priv->gpiod_reset, 1);
+			msleep(1);
+			gpiod_set_value(priv->gpiod_reset, 0);
+			msleep(1);
 
-				at803x_context_restore(phydev, &context);
+			at803x_context_restore(phydev, &context);
 
-				phydev_dbg(phydev, "%s(): phy was reset\n",
-					   __func__);
-				priv->phy_reset = true;
-			}
-		} else {
-			priv->phy_reset = false;
+			phydev_dbg(phydev, "%s(): phy was reset\n",
+				   __func__);
+			priv->phy_reset = true;
 		}
+	} else {
+		priv->phy_reset = false;
 	}
 }
 
@@ -391,7 +389,6 @@
 	.phy_id_mask		= 0xffffffef,
 	.probe			= at803x_probe,
 	.config_init		= at803x_config_init,
-	.link_change_notify	= at803x_link_change_notify,
 	.set_wol		= at803x_set_wol,
 	.get_wol		= at803x_get_wol,
 	.suspend		= at803x_suspend,
@@ -427,7 +424,6 @@
 	.phy_id_mask		= 0xffffffef,
 	.probe			= at803x_probe,
 	.config_init		= at803x_config_init,
-	.link_change_notify	= at803x_link_change_notify,
 	.set_wol		= at803x_set_wol,
 	.get_wol		= at803x_get_wol,
 	.suspend		= at803x_suspend,
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index b881a7b1..9636da0 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -339,6 +339,8 @@
 	BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"),
 	BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"),
 	BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"),
+	BCM7XXX_40NM_EPHY(PHY_ID_BCM7346, "Broadcom BCM7346"),
+	BCM7XXX_40NM_EPHY(PHY_ID_BCM7362, "Broadcom BCM7362"),
 	BCM7XXX_40NM_EPHY(PHY_ID_BCM7425, "Broadcom BCM7425"),
 	BCM7XXX_40NM_EPHY(PHY_ID_BCM7429, "Broadcom BCM7429"),
 	BCM7XXX_40NM_EPHY(PHY_ID_BCM7435, "Broadcom BCM7435"),
@@ -348,6 +350,8 @@
 	{ PHY_ID_BCM7250, 0xfffffff0, },
 	{ PHY_ID_BCM7364, 0xfffffff0, },
 	{ PHY_ID_BCM7366, 0xfffffff0, },
+	{ PHY_ID_BCM7346, 0xfffffff0, },
+	{ PHY_ID_BCM7362, 0xfffffff0, },
 	{ PHY_ID_BCM7425, 0xfffffff0, },
 	{ PHY_ID_BCM7429, 0xfffffff0, },
 	{ PHY_ID_BCM7439, 0xfffffff0, },
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index fc07a88..9050f21 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -328,7 +328,7 @@
 		return ERR_PTR(ret);
 
 	phy = get_phy_device(fmb->mii_bus, phy_addr, false);
-	if (!phy || IS_ERR(phy)) {
+	if (IS_ERR(phy)) {
 		fixed_phy_del(phy_addr);
 		return ERR_PTR(-EINVAL);
 	}
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index f607837..b9fde1b 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -80,23 +80,15 @@
 
 static int lxt970_config_intr(struct phy_device *phydev)
 {
-	int err;
-
 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
-		err = phy_write(phydev, MII_LXT970_IER, MII_LXT970_IER_IEN);
+		return phy_write(phydev, MII_LXT970_IER, MII_LXT970_IER_IEN);
 	else
-		err = phy_write(phydev, MII_LXT970_IER, 0);
-
-	return err;
+		return phy_write(phydev, MII_LXT970_IER, 0);
 }
 
 static int lxt970_config_init(struct phy_device *phydev)
 {
-	int err;
-
-	err = phy_write(phydev, MII_LXT970_CONFIG, 0);
-
-	return err;
+	return phy_write(phydev, MII_LXT970_CONFIG, 0);
 }
 
 
@@ -112,14 +104,10 @@
 
 static int lxt971_config_intr(struct phy_device *phydev)
 {
-	int err;
-
 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
-		err = phy_write(phydev, MII_LXT971_IER, MII_LXT971_IER_IEN);
+		return phy_write(phydev, MII_LXT971_IER, MII_LXT971_IER_IEN);
 	else
-		err = phy_write(phydev, MII_LXT971_IER, 0);
-
-	return err;
+		return phy_write(phydev, MII_LXT971_IER, 0);
 }
 
 /*
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 308ade0..5c81d6f 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -45,13 +45,7 @@
 	struct mdio_mux_parent_bus *pb = cb->parent;
 	int r;
 
-	/* In theory multiple mdio_mux could be stacked, thus creating
-	 * more than a single level of nesting.  But in practice,
-	 * SINGLE_DEPTH_NESTING will cover the vast majority of use
-	 * cases.  We use it, instead of trying to handle the general
-	 * case.
-	 */
-	mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
+	mutex_lock_nested(&pb->mii_bus->mdio_lock, MDIO_MUTEX_MUX);
 	r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
 	if (r)
 		goto out;
@@ -76,7 +70,7 @@
 
 	int r;
 
-	mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
+	mutex_lock_nested(&pb->mii_bus->mdio_lock, MDIO_MUTEX_MUX);
 	r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
 	if (r)
 		goto out;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 0cba64f..09deef4 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -333,7 +333,7 @@
 			struct phy_device *phydev;
 
 			phydev = mdiobus_scan(bus, i);
-			if (IS_ERR(phydev)) {
+			if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV)) {
 				err = PTR_ERR(phydev);
 				goto error;
 			}
@@ -419,7 +419,7 @@
 	int err;
 
 	phydev = get_phy_device(bus, addr, false);
-	if (IS_ERR(phydev) || phydev == NULL)
+	if (IS_ERR(phydev))
 		return phydev;
 
 	/*
@@ -431,7 +431,7 @@
 	err = phy_device_register(phydev);
 	if (err) {
 		phy_device_free(phydev);
-		return NULL;
+		return ERR_PTR(-ENODEV);
 	}
 
 	return phydev;
@@ -457,7 +457,7 @@
 
 	BUG_ON(in_interrupt());
 
-	mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
+	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
 	retval = bus->read(bus, addr, regnum);
 	mutex_unlock(&bus->mdio_lock);
 
@@ -509,7 +509,7 @@
 
 	BUG_ON(in_interrupt());
 
-	mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
+	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
 	err = bus->write(bus, addr, regnum, val);
 	mutex_unlock(&bus->mdio_lock);
 
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 4516c8a..5a8fefc 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -726,7 +726,7 @@
 static struct phy_driver ksphy_driver[] = {
 {
 	.phy_id		= PHY_ID_KS8737,
-	.phy_id_mask	= 0x00fffff0,
+	.phy_id_mask	= MICREL_PHY_ID_MASK,
 	.name		= "Micrel KS8737",
 	.features	= (PHY_BASIC_FEATURES | SUPPORTED_Pause),
 	.flags		= PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
@@ -781,7 +781,7 @@
 	.resume		= genphy_resume,
 }, {
 	.phy_id		= PHY_ID_KSZ8041,
-	.phy_id_mask	= 0x00fffff0,
+	.phy_id_mask	= MICREL_PHY_ID_MASK,
 	.name		= "Micrel KSZ8041",
 	.features	= (PHY_BASIC_FEATURES | SUPPORTED_Pause
 				| SUPPORTED_Asym_Pause),
@@ -800,7 +800,7 @@
 	.resume		= genphy_resume,
 }, {
 	.phy_id		= PHY_ID_KSZ8041RNLI,
-	.phy_id_mask	= 0x00fffff0,
+	.phy_id_mask	= MICREL_PHY_ID_MASK,
 	.name		= "Micrel KSZ8041RNLI",
 	.features	= PHY_BASIC_FEATURES |
 			  SUPPORTED_Pause | SUPPORTED_Asym_Pause,
@@ -819,7 +819,7 @@
 	.resume		= genphy_resume,
 }, {
 	.phy_id		= PHY_ID_KSZ8051,
-	.phy_id_mask	= 0x00fffff0,
+	.phy_id_mask	= MICREL_PHY_ID_MASK,
 	.name		= "Micrel KSZ8051",
 	.features	= (PHY_BASIC_FEATURES | SUPPORTED_Pause
 				| SUPPORTED_Asym_Pause),
@@ -857,7 +857,7 @@
 }, {
 	.phy_id		= PHY_ID_KSZ8081,
 	.name		= "Micrel KSZ8081 or KSZ8091",
-	.phy_id_mask	= 0x00fffff0,
+	.phy_id_mask	= MICREL_PHY_ID_MASK,
 	.features	= (PHY_BASIC_FEATURES | SUPPORTED_Pause),
 	.flags		= PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
 	.driver_data	= &ksz8081_type,
@@ -875,7 +875,7 @@
 }, {
 	.phy_id		= PHY_ID_KSZ8061,
 	.name		= "Micrel KSZ8061",
-	.phy_id_mask	= 0x00fffff0,
+	.phy_id_mask	= MICREL_PHY_ID_MASK,
 	.features	= (PHY_BASIC_FEATURES | SUPPORTED_Pause),
 	.flags		= PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
 	.config_init	= kszphy_config_init,
@@ -909,7 +909,7 @@
 	.write_mmd_indirect = ksz9021_wr_mmd_phyreg,
 }, {
 	.phy_id		= PHY_ID_KSZ9031,
-	.phy_id_mask	= 0x00fffff0,
+	.phy_id_mask	= MICREL_PHY_ID_MASK,
 	.name		= "Micrel KSZ9031 Gigabit PHY",
 	.features	= (PHY_GBIT_FEATURES | SUPPORTED_Pause),
 	.flags		= PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
@@ -926,7 +926,7 @@
 	.resume		= genphy_resume,
 }, {
 	.phy_id		= PHY_ID_KSZ8873MLL,
-	.phy_id_mask	= 0x00fffff0,
+	.phy_id_mask	= MICREL_PHY_ID_MASK,
 	.name		= "Micrel KSZ8873MLL Switch",
 	.features	= (SUPPORTED_Pause | SUPPORTED_Asym_Pause),
 	.flags		= PHY_HAS_MAGICANEG,
@@ -940,7 +940,7 @@
 	.resume		= genphy_resume,
 }, {
 	.phy_id		= PHY_ID_KSZ886X,
-	.phy_id_mask	= 0x00fffff0,
+	.phy_id_mask	= MICREL_PHY_ID_MASK,
 	.name		= "Micrel KSZ886X Switch",
 	.features	= (PHY_BASIC_FEATURES | SUPPORTED_Pause),
 	.flags		= PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
@@ -962,17 +962,17 @@
 
 static struct mdio_device_id __maybe_unused micrel_tbl[] = {
 	{ PHY_ID_KSZ9021, 0x000ffffe },
-	{ PHY_ID_KSZ9031, 0x00fffff0 },
+	{ PHY_ID_KSZ9031, MICREL_PHY_ID_MASK },
 	{ PHY_ID_KSZ8001, 0x00ffffff },
-	{ PHY_ID_KS8737, 0x00fffff0 },
+	{ PHY_ID_KS8737, MICREL_PHY_ID_MASK },
 	{ PHY_ID_KSZ8021, 0x00ffffff },
 	{ PHY_ID_KSZ8031, 0x00ffffff },
-	{ PHY_ID_KSZ8041, 0x00fffff0 },
-	{ PHY_ID_KSZ8051, 0x00fffff0 },
-	{ PHY_ID_KSZ8061, 0x00fffff0 },
-	{ PHY_ID_KSZ8081, 0x00fffff0 },
-	{ PHY_ID_KSZ8873MLL, 0x00fffff0 },
-	{ PHY_ID_KSZ886X, 0x00fffff0 },
+	{ PHY_ID_KSZ8041, MICREL_PHY_ID_MASK },
+	{ PHY_ID_KSZ8051, MICREL_PHY_ID_MASK },
+	{ PHY_ID_KSZ8061, MICREL_PHY_ID_MASK },
+	{ PHY_ID_KSZ8081, MICREL_PHY_ID_MASK },
+	{ PHY_ID_KSZ8873MLL, MICREL_PHY_ID_MASK },
+	{ PHY_ID_KSZ886X, MICREL_PHY_ID_MASK },
 	{ }
 };
 
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 5590b9c..c5dc2c36 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -362,6 +362,60 @@
 }
 EXPORT_SYMBOL(phy_ethtool_sset);
 
+int phy_ethtool_ksettings_set(struct phy_device *phydev,
+			      const struct ethtool_link_ksettings *cmd)
+{
+	u8 autoneg = cmd->base.autoneg;
+	u8 duplex = cmd->base.duplex;
+	u32 speed = cmd->base.speed;
+	u32 advertising;
+
+	if (cmd->base.phy_address != phydev->mdio.addr)
+		return -EINVAL;
+
+	ethtool_convert_link_mode_to_legacy_u32(&advertising,
+						cmd->link_modes.advertising);
+
+	/* We make sure that we don't pass unsupported values in to the PHY */
+	advertising &= phydev->supported;
+
+	/* Verify the settings we care about. */
+	if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
+		return -EINVAL;
+
+	if (autoneg == AUTONEG_ENABLE && advertising == 0)
+		return -EINVAL;
+
+	if (autoneg == AUTONEG_DISABLE &&
+	    ((speed != SPEED_1000 &&
+	      speed != SPEED_100 &&
+	      speed != SPEED_10) ||
+	     (duplex != DUPLEX_HALF &&
+	      duplex != DUPLEX_FULL)))
+		return -EINVAL;
+
+	phydev->autoneg = autoneg;
+
+	phydev->speed = speed;
+
+	phydev->advertising = advertising;
+
+	if (autoneg == AUTONEG_ENABLE)
+		phydev->advertising |= ADVERTISED_Autoneg;
+	else
+		phydev->advertising &= ~ADVERTISED_Autoneg;
+
+	phydev->duplex = duplex;
+
+	phydev->mdix = cmd->base.eth_tp_mdix_ctrl;
+
+	/* Restart the PHY */
+	phy_start_aneg(phydev);
+
+	return 0;
+}
+EXPORT_SYMBOL(phy_ethtool_ksettings_set);
+
 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
 {
 	cmd->supported = phydev->supported;
@@ -385,6 +439,33 @@
 }
 EXPORT_SYMBOL(phy_ethtool_gset);
 
+int phy_ethtool_ksettings_get(struct phy_device *phydev,
+			      struct ethtool_link_ksettings *cmd)
+{
+	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+						phydev->supported);
+
+	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+						phydev->advertising);
+
+	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
+						phydev->lp_advertising);
+
+	cmd->base.speed = phydev->speed;
+	cmd->base.duplex = phydev->duplex;
+	if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
+		cmd->base.port = PORT_BNC;
+	else
+		cmd->base.port = PORT_MII;
+
+	cmd->base.phy_address = phydev->mdio.addr;
+	cmd->base.autoneg = phydev->autoneg;
+	cmd->base.eth_tp_mdix_ctrl = phydev->mdix;
+
+	return 0;
+}
+EXPORT_SYMBOL(phy_ethtool_ksettings_get);
+
 /**
  * phy_mii_ioctl - generic PHY MII ioctl interface
  * @phydev: the phy_device struct
@@ -790,9 +871,11 @@
 		break;
 	case PHY_HALTED:
 		/* make sure interrupts are re-enabled for the PHY */
-		err = phy_enable_interrupts(phydev);
-		if (err < 0)
-			break;
+		if (phydev->irq != PHY_POLL) {
+			err = phy_enable_interrupts(phydev);
+			if (err < 0)
+				break;
+		}
 
 		phydev->state = PHY_RESUMING;
 		do_resume = true;
@@ -1266,3 +1349,27 @@
 		phydev->drv->get_wol(phydev, wol);
 }
 EXPORT_SYMBOL(phy_ethtool_get_wol);
+
+int phy_ethtool_get_link_ksettings(struct net_device *ndev,
+				   struct ethtool_link_ksettings *cmd)
+{
+	struct phy_device *phydev = ndev->phydev;
+
+	if (!phydev)
+		return -ENODEV;
+
+	return phy_ethtool_ksettings_get(phydev, cmd);
+}
+EXPORT_SYMBOL(phy_ethtool_get_link_ksettings);
+
+int phy_ethtool_set_link_ksettings(struct net_device *ndev,
+				   const struct ethtool_link_ksettings *cmd)
+{
+	struct phy_device *phydev = ndev->phydev;
+
+	if (!phydev)
+		return -ENODEV;
+
+	return phy_ethtool_ksettings_set(phydev, cmd);
+}
+EXPORT_SYMBOL(phy_ethtool_set_link_ksettings);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index e551f3a..307f72a 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -34,6 +34,7 @@
 #include <linux/io.h>
 #include <linux/uaccess.h>
 #include <linux/of.h>
+#include <linux/gpio/consumer.h>
 
 #include <asm/irq.h>
 
@@ -529,7 +530,7 @@
 
 	/* If the phy_id is mostly Fs, there is no device there */
 	if ((phy_id & 0x1fffffff) == 0x1fffffff)
-		return NULL;
+		return ERR_PTR(-ENODEV);
 
 	return phy_device_create(bus, addr, phy_id, is_c45, &c45_ids);
 }
@@ -1123,8 +1124,9 @@
  */
 int genphy_setup_forced(struct phy_device *phydev)
 {
-	int ctl = 0;
+	int ctl = phy_read(phydev, MII_BMCR);
 
+	ctl &= BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN;
 	phydev->pause = 0;
 	phydev->asym_pause = 0;
 
@@ -1569,9 +1571,16 @@
 	struct device_driver *drv = phydev->mdio.dev.driver;
 	struct phy_driver *phydrv = to_phy_driver(drv);
 	int err = 0;
+	struct gpio_descs *reset_gpios;
 
 	phydev->drv = phydrv;
 
+	/* take phy out of reset */
+	reset_gpios = devm_gpiod_get_array_optional(dev, "reset",
+						    GPIOD_OUT_LOW);
+	if (IS_ERR(reset_gpios))
+		return PTR_ERR(reset_gpios);
+
 	/* Disable the interrupt if the PHY doesn't support it
 	 * but the interrupt is still a valid one
 	 */
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index b5d50d4..93ffedf 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -441,7 +441,7 @@
 		return -ENOMEM;
 
 	mutex_init(&ks->lock);
-	ks->spi = spi_dev_get(spi);
+	ks->spi = spi;
 	ks->chip = &ks8995_chip[variant];
 
 	if (ks->spi->dev.of_node) {
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index f572b31..8dedafa 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -46,6 +46,7 @@
 #include <linux/device.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
+#include <linux/file.h>
 #include <asm/unaligned.h>
 #include <net/slhc_vj.h>
 #include <linux/atomic.h>
@@ -183,6 +184,12 @@
 #endif /* CONFIG_PPP_MULTILINK */
 };
 
+struct ppp_config {
+	struct file *file;
+	s32 unit;
+	bool ifname_is_set;
+};
+
 /*
  * SMP locking issues:
  * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels
@@ -269,8 +276,7 @@
 static void ppp_ccp_closed(struct ppp *ppp);
 static struct compressor *find_compressor(int type);
 static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
-static struct ppp *ppp_create_interface(struct net *net, int unit,
-					struct file *file, int *retp);
+static int ppp_create_interface(struct net *net, struct file *file, int *unit);
 static void init_ppp_file(struct ppp_file *pf, int kind);
 static void ppp_destroy_interface(struct ppp *ppp);
 static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
@@ -282,6 +288,7 @@
 static int unit_set(struct idr *p, void *ptr, int n);
 static void unit_put(struct idr *p, int n);
 static void *unit_find(struct idr *p, int n);
+static void ppp_setup(struct net_device *dev);
 
 static const struct net_device_ops ppp_netdev_ops;
 
@@ -853,12 +860,12 @@
 		/* Create a new ppp unit */
 		if (get_user(unit, p))
 			break;
-		ppp = ppp_create_interface(net, unit, file, &err);
-		if (!ppp)
+		err = ppp_create_interface(net, file, &unit);
+		if (err < 0)
 			break;
-		file->private_data = &ppp->file;
+
 		err = -EFAULT;
-		if (put_user(ppp->file.index, p))
+		if (put_user(unit, p))
 			break;
 		err = 0;
 		break;
@@ -960,6 +967,188 @@
 	.size = sizeof(struct ppp_net),
 };
 
+static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
+{
+	struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
+	int ret;
+
+	mutex_lock(&pn->all_ppp_mutex);
+
+	if (unit < 0) {
+		ret = unit_get(&pn->units_idr, ppp);
+		if (ret < 0)
+			goto err;
+	} else {
+		/* Caller asked for a specific unit number. Fail with -EEXIST
+		 * if unavailable. For backward compatibility, return -EEXIST
+		 * too if idr allocation fails; this makes pppd retry without
+		 * requesting a specific unit number.
+		 */
+		if (unit_find(&pn->units_idr, unit)) {
+			ret = -EEXIST;
+			goto err;
+		}
+		ret = unit_set(&pn->units_idr, ppp, unit);
+		if (ret < 0) {
+			/* Rewrite error for backward compatibility */
+			ret = -EEXIST;
+			goto err;
+		}
+	}
+	ppp->file.index = ret;
+
+	if (!ifname_is_set)
+		snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index);
+
+	ret = register_netdevice(ppp->dev);
+	if (ret < 0)
+		goto err_unit;
+
+	atomic_inc(&ppp_unit_count);
+
+	mutex_unlock(&pn->all_ppp_mutex);
+
+	return 0;
+
+err_unit:
+	unit_put(&pn->units_idr, ppp->file.index);
+err:
+	mutex_unlock(&pn->all_ppp_mutex);
+
+	return ret;
+}
+
+static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
+			     const struct ppp_config *conf)
+{
+	struct ppp *ppp = netdev_priv(dev);
+	int indx;
+	int err;
+
+	ppp->dev = dev;
+	ppp->ppp_net = src_net;
+	ppp->mru = PPP_MRU;
+	ppp->owner = conf->file;
+
+	init_ppp_file(&ppp->file, INTERFACE);
+	ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
+
+	for (indx = 0; indx < NUM_NP; ++indx)
+		ppp->npmode[indx] = NPMODE_PASS;
+	INIT_LIST_HEAD(&ppp->channels);
+	spin_lock_init(&ppp->rlock);
+	spin_lock_init(&ppp->wlock);
+#ifdef CONFIG_PPP_MULTILINK
+	ppp->minseq = -1;
+	skb_queue_head_init(&ppp->mrq);
+#endif /* CONFIG_PPP_MULTILINK */
+#ifdef CONFIG_PPP_FILTER
+	ppp->pass_filter = NULL;
+	ppp->active_filter = NULL;
+#endif /* CONFIG_PPP_FILTER */
+
+	err = ppp_unit_register(ppp, conf->unit, conf->ifname_is_set);
+	if (err < 0)
+		return err;
+
+	conf->file->private_data = &ppp->file;
+
+	return 0;
+}
+
+static const struct nla_policy ppp_nl_policy[IFLA_PPP_MAX + 1] = {
+	[IFLA_PPP_DEV_FD]	= { .type = NLA_S32 },
+};
+
+static int ppp_nl_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+	if (!data)
+		return -EINVAL;
+
+	if (!data[IFLA_PPP_DEV_FD])
+		return -EINVAL;
+	if (nla_get_s32(data[IFLA_PPP_DEV_FD]) < 0)
+		return -EBADF;
+
+	return 0;
+}
+
+static int ppp_nl_newlink(struct net *src_net, struct net_device *dev,
+			  struct nlattr *tb[], struct nlattr *data[])
+{
+	struct ppp_config conf = {
+		.unit = -1,
+		.ifname_is_set = true,
+	};
+	struct file *file;
+	int err;
+
+	file = fget(nla_get_s32(data[IFLA_PPP_DEV_FD]));
+	if (!file)
+		return -EBADF;
+
+	/* rtnl_lock is already held here, but ppp_create_interface() locks
+	 * ppp_mutex before holding rtnl_lock. Using mutex_trylock() avoids
+	 * possible deadlock due to lock order inversion, at the cost of
+	 * pushing the problem back to userspace.
+	 */
+	if (!mutex_trylock(&ppp_mutex)) {
+		err = -EBUSY;
+		goto out;
+	}
+
+	if (file->f_op != &ppp_device_fops || file->private_data) {
+		err = -EBADF;
+		goto out_unlock;
+	}
+
+	conf.file = file;
+	err = ppp_dev_configure(src_net, dev, &conf);
+
+out_unlock:
+	mutex_unlock(&ppp_mutex);
+out:
+	fput(file);
+
+	return err;
+}
+
+static void ppp_nl_dellink(struct net_device *dev, struct list_head *head)
+{
+	unregister_netdevice_queue(dev, head);
+}
+
+static size_t ppp_nl_get_size(const struct net_device *dev)
+{
+	return 0;
+}
+
+static int ppp_nl_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+	return 0;
+}
+
+static struct net *ppp_nl_get_link_net(const struct net_device *dev)
+{
+	struct ppp *ppp = netdev_priv(dev);
+
+	return ppp->ppp_net;
+}
+
+static struct rtnl_link_ops ppp_link_ops __read_mostly = {
+	.kind		= "ppp",
+	.maxtype	= IFLA_PPP_MAX,
+	.policy		= ppp_nl_policy,
+	.priv_size	= sizeof(struct ppp),
+	.setup		= ppp_setup,
+	.validate	= ppp_nl_validate,
+	.newlink	= ppp_nl_newlink,
+	.dellink	= ppp_nl_dellink,
+	.get_size	= ppp_nl_get_size,
+	.fill_info	= ppp_nl_fill_info,
+	.get_link_net	= ppp_nl_get_link_net,
+};
+
 #define PPP_MAJOR	108
 
 /* Called at boot time if ppp is compiled into the kernel,
@@ -988,11 +1177,19 @@
 		goto out_chrdev;
 	}
 
+	err = rtnl_link_register(&ppp_link_ops);
+	if (err) {
+		pr_err("failed to register rtnetlink PPP handler\n");
+		goto out_class;
+	}
+
 	/* not a big deal if we fail here :-) */
 	device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
 
 	return 0;
 
+out_class:
+	class_destroy(ppp_class);
 out_chrdev:
 	unregister_chrdev(PPP_MAJOR, "ppp");
 out_net:
@@ -2732,102 +2929,42 @@
  * or if there is already a unit with the requested number.
  * unit == -1 means allocate a new number.
  */
-static struct ppp *ppp_create_interface(struct net *net, int unit,
-					struct file *file, int *retp)
+static int ppp_create_interface(struct net *net, struct file *file, int *unit)
 {
+	struct ppp_config conf = {
+		.file = file,
+		.unit = *unit,
+		.ifname_is_set = false,
+	};
+	struct net_device *dev;
 	struct ppp *ppp;
-	struct ppp_net *pn;
-	struct net_device *dev = NULL;
-	int ret = -ENOMEM;
-	int i;
+	int err;
 
 	dev = alloc_netdev(sizeof(struct ppp), "", NET_NAME_ENUM, ppp_setup);
-	if (!dev)
-		goto out1;
-
-	pn = ppp_pernet(net);
-
-	ppp = netdev_priv(dev);
-	ppp->dev = dev;
-	ppp->mru = PPP_MRU;
-	init_ppp_file(&ppp->file, INTERFACE);
-	ppp->file.hdrlen = PPP_HDRLEN - 2;	/* don't count proto bytes */
-	ppp->owner = file;
-	for (i = 0; i < NUM_NP; ++i)
-		ppp->npmode[i] = NPMODE_PASS;
-	INIT_LIST_HEAD(&ppp->channels);
-	spin_lock_init(&ppp->rlock);
-	spin_lock_init(&ppp->wlock);
-#ifdef CONFIG_PPP_MULTILINK
-	ppp->minseq = -1;
-	skb_queue_head_init(&ppp->mrq);
-#endif /* CONFIG_PPP_MULTILINK */
-#ifdef CONFIG_PPP_FILTER
-	ppp->pass_filter = NULL;
-	ppp->active_filter = NULL;
-#endif /* CONFIG_PPP_FILTER */
-
-	/*
-	 * drum roll: don't forget to set
-	 * the net device is belong to
-	 */
+	if (!dev) {
+		err = -ENOMEM;
+		goto err;
+	}
 	dev_net_set(dev, net);
+	dev->rtnl_link_ops = &ppp_link_ops;
 
 	rtnl_lock();
-	mutex_lock(&pn->all_ppp_mutex);
 
-	if (unit < 0) {
-		unit = unit_get(&pn->units_idr, ppp);
-		if (unit < 0) {
-			ret = unit;
-			goto out2;
-		}
-	} else {
-		ret = -EEXIST;
-		if (unit_find(&pn->units_idr, unit))
-			goto out2; /* unit already exists */
-		/*
-		 * if caller need a specified unit number
-		 * lets try to satisfy him, otherwise --
-		 * he should better ask us for new unit number
-		 *
-		 * NOTE: yes I know that returning EEXIST it's not
-		 * fair but at least pppd will ask us to allocate
-		 * new unit in this case so user is happy :)
-		 */
-		unit = unit_set(&pn->units_idr, ppp, unit);
-		if (unit < 0)
-			goto out2;
-	}
+	err = ppp_dev_configure(net, dev, &conf);
+	if (err < 0)
+		goto err_dev;
+	ppp = netdev_priv(dev);
+	*unit = ppp->file.index;
 
-	/* Initialize the new ppp unit */
-	ppp->file.index = unit;
-	sprintf(dev->name, "ppp%d", unit);
-
-	ret = register_netdevice(dev);
-	if (ret != 0) {
-		unit_put(&pn->units_idr, unit);
-		netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n",
-			   dev->name, ret);
-		goto out2;
-	}
-
-	ppp->ppp_net = net;
-
-	atomic_inc(&ppp_unit_count);
-	mutex_unlock(&pn->all_ppp_mutex);
 	rtnl_unlock();
 
-	*retp = 0;
-	return ppp;
+	return 0;
 
-out2:
-	mutex_unlock(&pn->all_ppp_mutex);
+err_dev:
 	rtnl_unlock();
 	free_netdev(dev);
-out1:
-	*retp = ret;
-	return NULL;
+err:
+	return err;
 }
 
 /*
@@ -3016,6 +3153,7 @@
 	/* should never happen */
 	if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
 		pr_err("PPP: removing module but units remain!\n");
+	rtnl_link_unregister(&ppp_link_ops);
 	unregister_chrdev(PPP_MAJOR, "ppp");
 	device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
 	class_destroy(ppp_class);
@@ -3074,4 +3212,5 @@
 EXPORT_SYMBOL(ppp_unregister_compressor);
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
+MODULE_ALIAS_RTNL_LINK("ppp");
 MODULE_ALIAS("devname:ppp");
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 9cfe6ae..a31f461 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -179,11 +179,7 @@
 	unsigned long flags;
 	int add_num = 1;
 
-	local_irq_save(flags);
-	if (!spin_trylock(&rnet->tx_lock)) {
-		local_irq_restore(flags);
-		return NETDEV_TX_LOCKED;
-	}
+	spin_lock_irqsave(&rnet->tx_lock, flags);
 
 	if (is_multicast_ether_addr(eth->h_dest))
 		add_num = nets[rnet->mport->id].nact;
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index a17d86a..9ed6d1c 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -407,7 +407,7 @@
 	set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
 	actual = sl->tty->ops->write(sl->tty, sl->xbuff, count);
 #ifdef SL_CHECK_TRANSMIT
-	sl->dev->trans_start = jiffies;
+	netif_trans_update(sl->dev);
 #endif
 	sl->xleft = count - actual;
 	sl->xhead = sl->xbuff + actual;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 26c64d2..a0f64cb 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1198,6 +1198,9 @@
 		goto err_dev_open;
 	}
 
+	dev_uc_sync_multiple(port_dev, dev);
+	dev_mc_sync_multiple(port_dev, dev);
+
 	err = vlan_vids_add_by_dev(port_dev, dev);
 	if (err) {
 		netdev_err(dev, "Failed to add vlan ids to device %s\n",
@@ -1261,6 +1264,8 @@
 	vlan_vids_del_by_dev(port_dev, dev);
 
 err_vids_add:
+	dev_uc_unsync(port_dev, dev);
+	dev_mc_unsync(port_dev, dev);
 	dev_close(port_dev);
 
 err_dev_open:
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index afdf950..425e983 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -131,6 +131,17 @@
 
 #define TUN_FLOW_EXPIRE (3 * HZ)
 
+struct tun_pcpu_stats {
+	u64 rx_packets;
+	u64 rx_bytes;
+	u64 tx_packets;
+	u64 tx_bytes;
+	struct u64_stats_sync syncp;
+	u32 rx_dropped;
+	u32 tx_dropped;
+	u32 rx_frame_errors;
+};
+
 /* A tun_file connects an open character device to a tuntap netdevice. It
  * also contains all socket related structures (except sock_fprog and tap_filter)
  * to serve as one transmit queue for tuntap device. The sock_fprog and
@@ -205,6 +216,7 @@
 	struct list_head disabled;
 	void *security;
 	u32 flow_count;
+	struct tun_pcpu_stats __percpu *pcpu_stats;
 };
 
 #ifdef CONFIG_TUN_VNET_CROSS_LE
@@ -622,7 +634,9 @@
 
 	/* Re-attach the filter to persist device */
 	if (!skip_filter && (tun->filter_attached == true)) {
+		lock_sock(tfile->socket.sk);
 		err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
+		release_sock(tfile->socket.sk);
 		if (!err)
 			goto out;
 	}
@@ -819,7 +833,8 @@
 	if (txq >= numqueues)
 		goto drop;
 
-	if (numqueues == 1) {
+#ifdef CONFIG_RPS
+	if (numqueues == 1 && static_key_false(&rps_needed)) {
 		/* Select queue was not called for the skbuff, so we extract the
 		 * RPS hash and save it into the flow_table here.
 		 */
@@ -834,6 +849,7 @@
 				tun_flow_save_rps_rxhash(e, rxhash);
 		}
 	}
+#endif
 
 	tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
 
@@ -860,7 +876,8 @@
 		goto drop;
 
 	if (skb->sk && sk_fullsock(skb->sk)) {
-		sock_tx_timestamp(skb->sk, &skb_shinfo(skb)->tx_flags);
+		sock_tx_timestamp(skb->sk, skb->sk->sk_tsflags,
+				  &skb_shinfo(skb)->tx_flags);
 		sw_tx_timestamp(skb);
 	}
 
@@ -883,7 +900,7 @@
 	return NETDEV_TX_OK;
 
 drop:
-	dev->stats.tx_dropped++;
+	this_cpu_inc(tun->pcpu_stats->tx_dropped);
 	skb_tx_error(skb);
 	kfree_skb(skb);
 	rcu_read_unlock();
@@ -946,6 +963,43 @@
 	tun->align = new_hr;
 }
 
+static struct rtnl_link_stats64 *
+tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+	u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
+	struct tun_struct *tun = netdev_priv(dev);
+	struct tun_pcpu_stats *p;
+	int i;
+
+	for_each_possible_cpu(i) {
+		u64 rxpackets, rxbytes, txpackets, txbytes;
+		unsigned int start;
+
+		p = per_cpu_ptr(tun->pcpu_stats, i);
+		do {
+			start = u64_stats_fetch_begin(&p->syncp);
+			rxpackets	= p->rx_packets;
+			rxbytes		= p->rx_bytes;
+			txpackets	= p->tx_packets;
+			txbytes		= p->tx_bytes;
+		} while (u64_stats_fetch_retry(&p->syncp, start));
+
+		stats->rx_packets	+= rxpackets;
+		stats->rx_bytes		+= rxbytes;
+		stats->tx_packets	+= txpackets;
+		stats->tx_bytes		+= txbytes;
+
+		/* u32 counters */
+		rx_dropped	+= p->rx_dropped;
+		rx_frame_errors	+= p->rx_frame_errors;
+		tx_dropped	+= p->tx_dropped;
+	}
+	stats->rx_dropped  = rx_dropped;
+	stats->rx_frame_errors = rx_frame_errors;
+	stats->tx_dropped = tx_dropped;
+	return stats;
+}
+
 static const struct net_device_ops tun_netdev_ops = {
 	.ndo_uninit		= tun_net_uninit,
 	.ndo_open		= tun_net_open,
@@ -958,6 +1012,7 @@
 	.ndo_poll_controller	= tun_poll_controller,
 #endif
 	.ndo_set_rx_headroom	= tun_set_headroom,
+	.ndo_get_stats64	= tun_net_get_stats64,
 };
 
 static const struct net_device_ops tap_netdev_ops = {
@@ -976,6 +1031,7 @@
 #endif
 	.ndo_features_check	= passthru_features_check,
 	.ndo_set_rx_headroom	= tun_set_headroom,
+	.ndo_get_stats64	= tun_net_get_stats64,
 };
 
 static void tun_flow_init(struct tun_struct *tun)
@@ -1014,7 +1070,6 @@
 		/* Zero header length */
 		dev->type = ARPHRD_NONE;
 		dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
-		dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
 		break;
 
 	case IFF_TAP:
@@ -1026,7 +1081,6 @@
 
 		eth_hw_addr_random(dev);
 
-		dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
 		break;
 	}
 }
@@ -1102,6 +1156,7 @@
 	size_t total_len = iov_iter_count(from);
 	size_t len = total_len, align = tun->align, linear;
 	struct virtio_net_hdr gso = { 0 };
+	struct tun_pcpu_stats *stats;
 	int good_linear;
 	int copylen;
 	bool zerocopy = false;
@@ -1176,7 +1231,7 @@
 	skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
 	if (IS_ERR(skb)) {
 		if (PTR_ERR(skb) != -EAGAIN)
-			tun->dev->stats.rx_dropped++;
+			this_cpu_inc(tun->pcpu_stats->rx_dropped);
 		return PTR_ERR(skb);
 	}
 
@@ -1191,7 +1246,7 @@
 	}
 
 	if (err) {
-		tun->dev->stats.rx_dropped++;
+		this_cpu_inc(tun->pcpu_stats->rx_dropped);
 		kfree_skb(skb);
 		return -EFAULT;
 	}
@@ -1199,7 +1254,7 @@
 	if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
 		if (!skb_partial_csum_set(skb, tun16_to_cpu(tun, gso.csum_start),
 					  tun16_to_cpu(tun, gso.csum_offset))) {
-			tun->dev->stats.rx_frame_errors++;
+			this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
 			kfree_skb(skb);
 			return -EINVAL;
 		}
@@ -1216,7 +1271,7 @@
 				pi.proto = htons(ETH_P_IPV6);
 				break;
 			default:
-				tun->dev->stats.rx_dropped++;
+				this_cpu_inc(tun->pcpu_stats->rx_dropped);
 				kfree_skb(skb);
 				return -EINVAL;
 			}
@@ -1244,7 +1299,7 @@
 			skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
 			break;
 		default:
-			tun->dev->stats.rx_frame_errors++;
+			this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
 			kfree_skb(skb);
 			return -EINVAL;
 		}
@@ -1254,7 +1309,7 @@
 
 		skb_shinfo(skb)->gso_size = tun16_to_cpu(tun, gso.gso_size);
 		if (skb_shinfo(skb)->gso_size == 0) {
-			tun->dev->stats.rx_frame_errors++;
+			this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
 			kfree_skb(skb);
 			return -EINVAL;
 		}
@@ -1277,8 +1332,12 @@
 	rxhash = skb_get_hash(skb);
 	netif_rx_ni(skb);
 
-	tun->dev->stats.rx_packets++;
-	tun->dev->stats.rx_bytes += len;
+	stats = get_cpu_ptr(tun->pcpu_stats);
+	u64_stats_update_begin(&stats->syncp);
+	stats->rx_packets++;
+	stats->rx_bytes += len;
+	u64_stats_update_end(&stats->syncp);
+	put_cpu_ptr(stats);
 
 	tun_flow_update(tun, rxhash, tfile);
 	return total_len;
@@ -1307,6 +1366,7 @@
 			    struct iov_iter *iter)
 {
 	struct tun_pi pi = { 0, skb->protocol };
+	struct tun_pcpu_stats *stats;
 	ssize_t total;
 	int vlan_offset = 0;
 	int vlan_hlen = 0;
@@ -1407,8 +1467,13 @@
 	skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
 
 done:
-	tun->dev->stats.tx_packets++;
-	tun->dev->stats.tx_bytes += skb->len + vlan_hlen;
+	/* caller is in process context, */
+	stats = get_cpu_ptr(tun->pcpu_stats);
+	u64_stats_update_begin(&stats->syncp);
+	stats->tx_packets++;
+	stats->tx_bytes += skb->len + vlan_hlen;
+	u64_stats_update_end(&stats->syncp);
+	put_cpu_ptr(tun->pcpu_stats);
 
 	return total;
 }
@@ -1466,6 +1531,7 @@
 	struct tun_struct *tun = netdev_priv(dev);
 
 	BUG_ON(!(list_empty(&tun->disabled)));
+	free_percpu(tun->pcpu_stats);
 	tun_flow_uninit(tun);
 	security_tun_dev_free_security(tun->security);
 	free_netdev(dev);
@@ -1480,6 +1546,8 @@
 
 	dev->ethtool_ops = &tun_ethtool_ops;
 	dev->destructor = tun_free_netdev;
+	/* We prefer our own queue length */
+	dev->tx_queue_len = TUN_READQ_SIZE;
 }
 
 /* Trivial set of netlink ops to allow deleting tun or tap
@@ -1712,11 +1780,17 @@
 		tun->filter_attached = false;
 		tun->sndbuf = tfile->socket.sk->sk_sndbuf;
 
+		tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
+		if (!tun->pcpu_stats) {
+			err = -ENOMEM;
+			goto err_free_dev;
+		}
+
 		spin_lock_init(&tun->lock);
 
 		err = security_tun_dev_alloc_security(&tun->security);
 		if (err < 0)
-			goto err_free_dev;
+			goto err_free_stat;
 
 		tun_net_init(dev);
 		tun_flow_init(tun);
@@ -1724,7 +1798,7 @@
 		dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
 				   TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
 				   NETIF_F_HW_VLAN_STAG_TX;
-		dev->features = dev->hw_features;
+		dev->features = dev->hw_features | NETIF_F_LLTX;
 		dev->vlan_features = dev->features &
 				     ~(NETIF_F_HW_VLAN_CTAG_TX |
 				       NETIF_F_HW_VLAN_STAG_TX);
@@ -1760,6 +1834,8 @@
 err_free_flow:
 	tun_flow_uninit(tun);
 	security_tun_dev_free_security(tun->security);
+err_free_stat:
+	free_percpu(tun->pcpu_stats);
 err_free_dev:
 	free_netdev(dev);
 	return err;
@@ -1822,7 +1898,9 @@
 
 	for (i = 0; i < n; i++) {
 		tfile = rtnl_dereference(tun->tfiles[i]);
+		lock_sock(tfile->socket.sk);
 		sk_detach_filter(tfile->socket.sk);
+		release_sock(tfile->socket.sk);
 	}
 
 	tun->filter_attached = false;
@@ -1835,7 +1913,9 @@
 
 	for (i = 0; i < tun->numqueues; i++) {
 		tfile = rtnl_dereference(tun->tfiles[i]);
+		lock_sock(tfile->socket.sk);
 		ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
+		release_sock(tfile->socket.sk);
 		if (ret) {
 			tun_detach_filter(tun, i);
 			return ret;
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 0c5c22b..7de5ab5 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -66,7 +66,7 @@
 	 * buffer.
 	 */
 	if (rx->remaining && (rx->remaining + sizeof(u32) <= skb->len)) {
-		offset = ((rx->remaining + 1) & 0xfffe) + sizeof(u32);
+		offset = ((rx->remaining + 1) & 0xfffe);
 		rx->header = get_unaligned_le32(skb->data + offset);
 		offset = 0;
 
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 4e2b26a..d9ca05d 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -376,7 +376,7 @@
 	catc->tx_idx = !catc->tx_idx;
 	catc->tx_ptr = 0;
 
-	catc->netdev->trans_start = jiffies;
+	netif_trans_update(catc->netdev);
 	return status;
 }
 
@@ -389,7 +389,7 @@
 	if (status == -ECONNRESET) {
 		dev_dbg(&urb->dev->dev, "Tx Reset.\n");
 		urb->status = 0;
-		catc->netdev->trans_start = jiffies;
+		netif_trans_update(catc->netdev);
 		catc->netdev->stats.tx_errors++;
 		clear_bit(TX_RUNNING, &catc->flags);
 		netif_wake_queue(catc->netdev);
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index bdd83d9..96a5028 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -617,8 +617,13 @@
 	{ USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
 	  .driver_info = (unsigned long)&cdc_mbim_info,
 	},
-	/* Huawei E3372 fails unless NDP comes after the IP packets */
-	{ USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+
+	/* Some Huawei devices, ME906s-158 (12d1:15c1) and E3372
+	 * (12d1:157d), are known to fail unless the NDP is placed
+	 * after the IP packets.  Applying the quirk to all Huawei
+	 * devices is broader than necessary, but harmless.
+	 */
+	{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
 	  .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
 	},
 	/* default entry */
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 86ba30b..2fb31ed 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1626,6 +1626,13 @@
 	  .driver_info = (unsigned long) &wwan_info,
 	},
 
+	/* Telit LE910 V2 */
+	{ USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x0036,
+		USB_CLASS_COMM,
+		USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+	  .driver_info = (unsigned long)&wwan_noarp_info,
+	},
+
 	/* DW5812 LTE Verizon Mobile Broadband Card
 	 * Unlike DW5550 this device requires FLAG_NOARP
 	 */
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index f64b25c..770212b 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -938,7 +938,7 @@
 
 	dev_warn(&net->dev, "%s: Tx timed out. Resetting.\n", net->name);
 	kaweth->stats.tx_errors++;
-	net->trans_start = jiffies;
+	netif_trans_update(net);
 
 	usb_unlink_urb(kaweth->tx_urb);
 }
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index f20890e..6a9d474 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -269,6 +269,7 @@
 	struct lan78xx_net *dev;
 	enum skb_state state;
 	size_t length;
+	int num_of_packet;
 };
 
 struct usb_context {
@@ -1803,7 +1804,34 @@
 
 static void lan78xx_link_status_change(struct net_device *net)
 {
-	/* nothing to do */
+	struct phy_device *phydev = net->phydev;
+	int ret, temp;
+
+	/* At forced 100 F/H mode, chip may fail to set mode correctly
+	 * when cable is switched between long(~50+m) and short one.
+	 * As workaround, set to 10 before setting to 100
+	 * at forced 100 F/H mode.
+	 */
+	if (!phydev->autoneg && (phydev->speed == 100)) {
+		/* disable phy interrupt */
+		temp = phy_read(phydev, LAN88XX_INT_MASK);
+		temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
+		ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
+
+		temp = phy_read(phydev, MII_BMCR);
+		temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
+		phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
+		temp |= BMCR_SPEED100;
+		phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
+
+		/* clear pending interrupt generated while workaround */
+		temp = phy_read(phydev, LAN88XX_INT_STS);
+
+		/* enable phy interrupt back */
+		temp = phy_read(phydev, LAN88XX_INT_MASK);
+		temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
+		ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
+	}
 }
 
 static int lan78xx_phy_init(struct lan78xx_net *dev)
@@ -2464,7 +2492,7 @@
 	struct lan78xx_net *dev = entry->dev;
 
 	if (urb->status == 0) {
-		dev->net->stats.tx_packets++;
+		dev->net->stats.tx_packets += entry->num_of_packet;
 		dev->net->stats.tx_bytes += entry->length;
 	} else {
 		dev->net->stats.tx_errors++;
@@ -2681,10 +2709,11 @@
 		return;
 	}
 
-	skb->protocol = eth_type_trans(skb, dev->net);
 	dev->net->stats.rx_packets++;
 	dev->net->stats.rx_bytes += skb->len;
 
+	skb->protocol = eth_type_trans(skb, dev->net);
+
 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
 		  skb->len + sizeof(struct ethhdr), skb->protocol);
 	memset(skb->cb, 0, sizeof(struct skb_data));
@@ -2934,13 +2963,16 @@
 
 	skb_totallen = 0;
 	pkt_cnt = 0;
+	count = 0;
+	length = 0;
 	for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
 		if (skb_is_gso(skb)) {
 			if (pkt_cnt) {
 				/* handle previous packets first */
 				break;
 			}
-			length = skb->len;
+			count = 1;
+			length = skb->len - TX_OVERHEAD;
 			skb2 = skb_dequeue(tqp);
 			goto gso_skb;
 		}
@@ -2961,14 +2993,13 @@
 	for (count = pos = 0; count < pkt_cnt; count++) {
 		skb2 = skb_dequeue(tqp);
 		if (skb2) {
+			length += (skb2->len - TX_OVERHEAD);
 			memcpy(skb->data + pos, skb2->data, skb2->len);
 			pos += roundup(skb2->len, sizeof(u32));
 			dev_kfree_skb(skb2);
 		}
 	}
 
-	length = skb_totallen;
-
 gso_skb:
 	urb = usb_alloc_urb(0, GFP_ATOMIC);
 	if (!urb) {
@@ -2980,6 +3011,7 @@
 	entry->urb = urb;
 	entry->dev = dev;
 	entry->length = length;
+	entry->num_of_packet = count;
 
 	spin_lock_irqsave(&dev->txq.lock, flags);
 	ret = usb_autopm_get_interface_async(dev->intf);
@@ -3013,7 +3045,7 @@
 	ret = usb_submit_urb(urb, GFP_ATOMIC);
 	switch (ret) {
 	case 0:
-		dev->net->trans_start = jiffies;
+		netif_trans_update(dev->net);
 		lan78xx_queue_skb(&dev->txq, skb, tx_start);
 		if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
 			netif_stop_queue(dev->net);
@@ -3697,7 +3729,7 @@
 				usb_free_urb(res);
 				usb_autopm_put_interface_async(dev->intf);
 			} else {
-				dev->net->trans_start = jiffies;
+				netif_trans_update(dev->net);
 				lan78xx_queue_skb(&dev->txq, skb, tx_start);
 			}
 		}
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index f840802..36cd7f0 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -411,7 +411,7 @@
 	int ret;
 
 	read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart);
-	data[0] = 0xc9;
+	data[0] = 0xc8; /* TX & RX enable, append status, no CRC */
 	data[1] = 0;
 	if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL))
 		data[1] |= 0x20;	/* set full duplex */
@@ -497,7 +497,7 @@
 		pkt_len = buf[count - 3] << 8;
 		pkt_len += buf[count - 4];
 		pkt_len &= 0xfff;
-		pkt_len -= 8;
+		pkt_len -= 4;
 	}
 
 	/*
@@ -528,7 +528,7 @@
 goon:
 	usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
 			  usb_rcvbulkpipe(pegasus->usb, 1),
-			  pegasus->rx_skb->data, PEGASUS_MTU + 8,
+			  pegasus->rx_skb->data, PEGASUS_MTU,
 			  read_bulk_callback, pegasus);
 	rx_status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC);
 	if (rx_status == -ENODEV)
@@ -569,7 +569,7 @@
 	}
 	usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
 			  usb_rcvbulkpipe(pegasus->usb, 1),
-			  pegasus->rx_skb->data, PEGASUS_MTU + 8,
+			  pegasus->rx_skb->data, PEGASUS_MTU,
 			  read_bulk_callback, pegasus);
 try_again:
 	status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC);
@@ -615,7 +615,7 @@
 		break;
 	}
 
-	net->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(net); /* prevent tx timeout */
 	netif_wake_queue(net);
 }
 
@@ -823,7 +823,7 @@
 
 	usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
 			  usb_rcvbulkpipe(pegasus->usb, 1),
-			  pegasus->rx_skb->data, PEGASUS_MTU + 8,
+			  pegasus->rx_skb->data, PEGASUS_MTU,
 			  read_bulk_callback, pegasus);
 	if ((res = usb_submit_urb(pegasus->rx_urb, GFP_KERNEL))) {
 		if (res == -ENODEV)
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 1bfe0fc..22e1a9a 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -38,7 +38,7 @@
  * HEADS UP:  this handshaking isn't all that robust.  This driver
  * gets confused easily if you unplug one end of the cable then
  * try to connect it again; you'll need to restart both ends. The
- * "naplink" software (used by some PlayStation/2 deveopers) does
+ * "naplink" software (used by some PlayStation/2 developers) does
  * the handshaking much better!   Also, sometimes this hardware
  * seems to get wedged under load.  Prolific docs are weak, and
  * don't identify differences between PL2301 and PL2302, much less
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 7d717c6..9d1fce8 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -844,6 +844,7 @@
 	{QMI_FIXED_INTF(0x19d2, 0x1426, 2)},	/* ZTE MF91 */
 	{QMI_FIXED_INTF(0x19d2, 0x1428, 2)},	/* Telewell TW-LTE 4G v2 */
 	{QMI_FIXED_INTF(0x19d2, 0x2002, 4)},	/* ZTE (Vodafone) K3765-Z */
+	{QMI_FIXED_INTF(0x2001, 0x7e19, 4)},	/* D-Link DWM-221 B1 */
 	{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
 	{QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
 	{QMI_FIXED_INTF(0x1199, 0x68a2, 8)},	/* Sierra Wireless MC7710 in QMI mode */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d1f78c2..3f9f6ed 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -3366,7 +3366,7 @@
 	ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data);
 
 	ocp_data = FIFO_EMPTY_1FB | ROK_EXIT_LPM;
-	if (tp->version == RTL_VER_04 && tp->udev->speed != USB_SPEED_SUPER)
+	if (tp->version == RTL_VER_04 && tp->udev->speed < USB_SPEED_SUPER)
 		ocp_data |= LPM_TIMER_500MS;
 	else
 		ocp_data |= LPM_TIMER_500US;
@@ -4211,6 +4211,7 @@
 
 	switch (udev->speed) {
 	case USB_SPEED_SUPER:
+	case USB_SPEED_SUPER_PLUS:
 		tp->coalesce = COALESCE_SUPER;
 		break;
 	case USB_SPEED_HIGH:
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index d37b7dc..7c72bfa 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -451,7 +451,7 @@
 	if (status)
 		dev_info(&urb->dev->dev, "%s: Tx status %d\n",
 			 dev->netdev->name, status);
-	dev->netdev->trans_start = jiffies;
+	netif_trans_update(dev->netdev);
 	netif_wake_queue(dev->netdev);
 }
 
@@ -694,7 +694,7 @@
 	} else {
 		netdev->stats.tx_packets++;
 		netdev->stats.tx_bytes += skb->len;
-		netdev->trans_start = jiffies;
+		netif_trans_update(netdev);
 	}
 
 	return NETDEV_TX_OK;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 30033db..9af9799 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -29,6 +29,7 @@
 #include <linux/crc32.h>
 #include <linux/usb/usbnet.h>
 #include <linux/slab.h>
+#include <linux/of_net.h>
 #include "smsc75xx.h"
 
 #define SMSC_CHIPNAME			"smsc75xx"
@@ -98,9 +99,11 @@
 	ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
 		 | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
 		 0, index, &buf, 4);
-	if (unlikely(ret < 0))
+	if (unlikely(ret < 0)) {
 		netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
 			    index, ret);
+		return ret;
+	}
 
 	le32_to_cpus(&buf);
 	*data = buf;
@@ -761,6 +764,15 @@
 
 static void smsc75xx_init_mac_address(struct usbnet *dev)
 {
+	const u8 *mac_addr;
+
+	/* maybe the boot loader passed the MAC address in devicetree */
+	mac_addr = of_get_mac_address(dev->udev->dev.of_node);
+	if (mac_addr) {
+		memcpy(dev->net->dev_addr, mac_addr, ETH_ALEN);
+		return;
+	}
+
 	/* try reading mac address from EEPROM */
 	if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
 			dev->net->dev_addr) == 0) {
@@ -772,7 +784,7 @@
 		}
 	}
 
-	/* no eeprom, or eeprom values are invalid. generate random MAC */
+	/* no useful static MAC address found. generate a random one */
 	eth_hw_addr_random(dev->net);
 	netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
 }
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 66b3ab9..d9d2806 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -29,6 +29,7 @@
 #include <linux/crc32.h>
 #include <linux/usb/usbnet.h>
 #include <linux/slab.h>
+#include <linux/of_net.h>
 #include "smsc95xx.h"
 
 #define SMSC_CHIPNAME			"smsc95xx"
@@ -91,9 +92,11 @@
 	ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
 		 | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
 		 0, index, &buf, 4);
-	if (unlikely(ret < 0))
+	if (unlikely(ret < 0)) {
 		netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
 			    index, ret);
+		return ret;
+	}
 
 	le32_to_cpus(&buf);
 	*data = buf;
@@ -765,6 +768,15 @@
 
 static void smsc95xx_init_mac_address(struct usbnet *dev)
 {
+	const u8 *mac_addr;
+
+	/* maybe the boot loader passed the MAC address in devicetree */
+	mac_addr = of_get_mac_address(dev->udev->dev.of_node);
+	if (mac_addr) {
+		memcpy(dev->net->dev_addr, mac_addr, ETH_ALEN);
+		return;
+	}
+
 	/* try reading mac address from EEPROM */
 	if (smsc95xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
 			dev->net->dev_addr) == 0) {
@@ -775,7 +787,7 @@
 		}
 	}
 
-	/* no eeprom, or eeprom values are invalid. generate random MAC */
+	/* no useful static MAC address found. generate a random one */
 	eth_hw_addr_random(dev->net);
 	netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
 }
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 1079812..61ba464 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -356,6 +356,7 @@
 		dev->tx_qlen = MAX_QUEUE_MEMORY / dev->hard_mtu;
 		break;
 	case USB_SPEED_SUPER:
+	case USB_SPEED_SUPER_PLUS:
 		/*
 		 * Not take default 5ms qlen for super speed HC to
 		 * save memory, and iperf tests show 2.5ms qlen can
@@ -1415,7 +1416,7 @@
 			  "tx: submit urb err %d\n", retval);
 		break;
 	case 0:
-		net->trans_start = jiffies;
+		netif_trans_update(net);
 		__usbnet_queue_skb(&dev->txq, skb, tx_start);
 		if (dev->txq.qlen >= TX_QLEN (dev))
 			netif_stop_queue (net);
@@ -1844,7 +1845,7 @@
 				usb_free_urb(res);
 				usb_autopm_put_interface_async(dev->intf);
 			} else {
-				dev->net->trans_start = jiffies;
+				netif_trans_update(dev->net);
 				__skb_queue_tail(&dev->txq, skb);
 			}
 		}
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 4f30a6a..f37a6e6 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -312,10 +312,9 @@
 	.ndo_set_rx_headroom	= veth_set_rx_headroom,
 };
 
-#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO |    \
-		       NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \
-		       NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |	    \
-		       NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT | NETIF_F_UFO	|   \
+#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
+		       NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \
+		       NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
 		       NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
 		       NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
 
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index b2348f6..db8022a 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1152,12 +1152,16 @@
 		union Vmxnet3_GenericDesc *gdesc)
 {
 	if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
-		/* typical case: TCP/UDP over IP and both csums are correct */
-		if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
-							VMXNET3_RCD_CSUM_OK) {
+		if (gdesc->rcd.v4 &&
+		    (le32_to_cpu(gdesc->dword[3]) &
+		     VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
 			skb->ip_summed = CHECKSUM_UNNECESSARY;
 			BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
-			BUG_ON(!(gdesc->rcd.v4  || gdesc->rcd.v6));
+			BUG_ON(gdesc->rcd.frg);
+		} else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
+					     (1 << VMXNET3_RCD_TUC_SHIFT))) {
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+			BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
 			BUG_ON(gdesc->rcd.frg);
 		} else {
 			if (gdesc->rcd.csum) {
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 729c344..c482539 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.4.6.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.4.7.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01040600
+#define VMXNET3_DRIVER_VERSION_NUM      0x01040700
 
 #if defined(CONFIG_PCI_MSI)
 	/* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 9a9fabb..dff0884 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -42,12 +42,9 @@
 #define DRV_NAME	"vrf"
 #define DRV_VERSION	"1.0"
 
-#define vrf_master_get_rcu(dev) \
-	((struct net_device *)rcu_dereference(dev->rx_handler_data))
-
 struct net_vrf {
-	struct rtable           *rth;
-	struct rt6_info		*rt6;
+	struct rtable __rcu	*rth;
+	struct rt6_info	__rcu	*rt6;
 	u32                     tb_id;
 };
 
@@ -60,125 +57,12 @@
 	struct u64_stats_sync	syncp;
 };
 
-static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie)
-{
-	return dst;
-}
-
-static int vrf_ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
-{
-	return ip_local_out(net, sk, skb);
-}
-
-static unsigned int vrf_v4_mtu(const struct dst_entry *dst)
-{
-	/* TO-DO: return max ethernet size? */
-	return dst->dev->mtu;
-}
-
-static void vrf_dst_destroy(struct dst_entry *dst)
-{
-	/* our dst lives forever - or until the device is closed */
-}
-
-static unsigned int vrf_default_advmss(const struct dst_entry *dst)
-{
-	return 65535 - 40;
-}
-
-static struct dst_ops vrf_dst_ops = {
-	.family		= AF_INET,
-	.local_out	= vrf_ip_local_out,
-	.check		= vrf_ip_check,
-	.mtu		= vrf_v4_mtu,
-	.destroy	= vrf_dst_destroy,
-	.default_advmss	= vrf_default_advmss,
-};
-
-/* neighbor handling is done with actual device; do not want
- * to flip skb->dev for those ndisc packets. This really fails
- * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
- * a start.
- */
-#if IS_ENABLED(CONFIG_IPV6)
-static bool check_ipv6_frame(const struct sk_buff *skb)
-{
-	const struct ipv6hdr *ipv6h;
-	struct ipv6hdr _ipv6h;
-	bool rc = true;
-
-	ipv6h = skb_header_pointer(skb, 0, sizeof(_ipv6h), &_ipv6h);
-	if (!ipv6h)
-		goto out;
-
-	if (ipv6h->nexthdr == NEXTHDR_ICMP) {
-		const struct icmp6hdr *icmph;
-		struct icmp6hdr _icmph;
-
-		icmph = skb_header_pointer(skb, sizeof(_ipv6h),
-					   sizeof(_icmph), &_icmph);
-		if (!icmph)
-			goto out;
-
-		switch (icmph->icmp6_type) {
-		case NDISC_ROUTER_SOLICITATION:
-		case NDISC_ROUTER_ADVERTISEMENT:
-		case NDISC_NEIGHBOUR_SOLICITATION:
-		case NDISC_NEIGHBOUR_ADVERTISEMENT:
-		case NDISC_REDIRECT:
-			rc = false;
-			break;
-		}
-	}
-
-out:
-	return rc;
-}
-#else
-static bool check_ipv6_frame(const struct sk_buff *skb)
-{
-	return false;
-}
-#endif
-
-static bool is_ip_rx_frame(struct sk_buff *skb)
-{
-	switch (skb->protocol) {
-	case htons(ETH_P_IP):
-		return true;
-	case htons(ETH_P_IPV6):
-		return check_ipv6_frame(skb);
-	}
-	return false;
-}
-
 static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
 {
 	vrf_dev->stats.tx_errors++;
 	kfree_skb(skb);
 }
 
-/* note: already called with rcu_read_lock */
-static rx_handler_result_t vrf_handle_frame(struct sk_buff **pskb)
-{
-	struct sk_buff *skb = *pskb;
-
-	if (is_ip_rx_frame(skb)) {
-		struct net_device *dev = vrf_master_get_rcu(skb->dev);
-		struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
-
-		u64_stats_update_begin(&dstats->syncp);
-		dstats->rx_pkts++;
-		dstats->rx_bytes += skb->len;
-		u64_stats_update_end(&dstats->syncp);
-
-		skb->dev = dev;
-
-		return RX_HANDLER_ANOTHER;
-	}
-	return RX_HANDLER_PASS;
-}
-
 static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
 						 struct rtnl_link_stats64 *stats)
 {
@@ -349,46 +233,6 @@
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
-static struct dst_entry *vrf_ip6_check(struct dst_entry *dst, u32 cookie)
-{
-	return dst;
-}
-
-static struct dst_ops vrf_dst_ops6 = {
-	.family		= AF_INET6,
-	.local_out	= ip6_local_out,
-	.check		= vrf_ip6_check,
-	.mtu		= vrf_v4_mtu,
-	.destroy	= vrf_dst_destroy,
-	.default_advmss	= vrf_default_advmss,
-};
-
-static int init_dst_ops6_kmem_cachep(void)
-{
-	vrf_dst_ops6.kmem_cachep = kmem_cache_create("vrf_ip6_dst_cache",
-						     sizeof(struct rt6_info),
-						     0,
-						     SLAB_HWCACHE_ALIGN,
-						     NULL);
-
-	if (!vrf_dst_ops6.kmem_cachep)
-		return -ENOMEM;
-
-	return 0;
-}
-
-static void free_dst_ops6_kmem_cachep(void)
-{
-	kmem_cache_destroy(vrf_dst_ops6.kmem_cachep);
-}
-
-static int vrf_input6(struct sk_buff *skb)
-{
-	skb->dev->stats.rx_errors++;
-	kfree_skb(skb);
-	return 0;
-}
-
 /* modelled after ip6_finish_output2 */
 static int vrf_finish_output6(struct net *net, struct sock *sk,
 			      struct sk_buff *skb)
@@ -429,67 +273,46 @@
 			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 }
 
-static void vrf_rt6_destroy(struct net_vrf *vrf)
+/* holding rtnl */
+static void vrf_rt6_release(struct net_vrf *vrf)
 {
-	dst_destroy(&vrf->rt6->dst);
-	free_percpu(vrf->rt6->rt6i_pcpu);
-	vrf->rt6 = NULL;
+	struct rt6_info *rt6 = rtnl_dereference(vrf->rt6);
+
+	rcu_assign_pointer(vrf->rt6, NULL);
+
+	if (rt6)
+		dst_release(&rt6->dst);
 }
 
 static int vrf_rt6_create(struct net_device *dev)
 {
 	struct net_vrf *vrf = netdev_priv(dev);
-	struct dst_entry *dst;
+	struct net *net = dev_net(dev);
+	struct fib6_table *rt6i_table;
 	struct rt6_info *rt6;
-	int cpu;
 	int rc = -ENOMEM;
 
-	rt6 = dst_alloc(&vrf_dst_ops6, dev, 0,
-			DST_OBSOLETE_NONE,
-			(DST_HOST | DST_NOPOLICY | DST_NOXFRM));
+	rt6i_table = fib6_new_table(net, vrf->tb_id);
+	if (!rt6i_table)
+		goto out;
+
+	rt6 = ip6_dst_alloc(net, dev,
+			    DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE);
 	if (!rt6)
 		goto out;
 
-	dst = &rt6->dst;
+	dst_hold(&rt6->dst);
 
-	rt6->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_KERNEL);
-	if (!rt6->rt6i_pcpu) {
-		dst_destroy(dst);
-		goto out;
-	}
-	for_each_possible_cpu(cpu) {
-		struct rt6_info **p = per_cpu_ptr(rt6->rt6i_pcpu, cpu);
-		*p =  NULL;
-	}
-
-	memset(dst + 1, 0, sizeof(*rt6) - sizeof(*dst));
-
-	INIT_LIST_HEAD(&rt6->rt6i_siblings);
-	INIT_LIST_HEAD(&rt6->rt6i_uncached);
-
-	rt6->dst.input	= vrf_input6;
+	rt6->rt6i_table = rt6i_table;
 	rt6->dst.output	= vrf_output6;
+	rcu_assign_pointer(vrf->rt6, rt6);
 
-	rt6->rt6i_table = fib6_get_table(dev_net(dev), vrf->tb_id);
-
-	atomic_set(&rt6->dst.__refcnt, 2);
-
-	vrf->rt6 = rt6;
 	rc = 0;
 out:
 	return rc;
 }
 #else
-static int init_dst_ops6_kmem_cachep(void)
-{
-	return 0;
-}
-
-static void free_dst_ops6_kmem_cachep(void)
-{
-}
-
-static void vrf_rt6_destroy(struct net_vrf *vrf)
+static void vrf_rt6_release(struct net_vrf *vrf)
 {
 }
 
@@ -557,38 +380,35 @@
 			    !(IPCB(skb)->flags & IPSKB_REROUTED));
 }
 
-static void vrf_rtable_destroy(struct net_vrf *vrf)
+/* holding rtnl */
+static void vrf_rtable_release(struct net_vrf *vrf)
 {
-	struct dst_entry *dst = (struct dst_entry *)vrf->rth;
+	struct rtable *rth = rtnl_dereference(vrf->rth);
 
-	dst_destroy(dst);
-	vrf->rth = NULL;
+	rcu_assign_pointer(vrf->rth, NULL);
+
+	if (rth)
+		dst_release(&rth->dst);
 }
 
-static struct rtable *vrf_rtable_create(struct net_device *dev)
+static int vrf_rtable_create(struct net_device *dev)
 {
 	struct net_vrf *vrf = netdev_priv(dev);
 	struct rtable *rth;
 
-	rth = dst_alloc(&vrf_dst_ops, dev, 2,
-			DST_OBSOLETE_NONE,
-			(DST_HOST | DST_NOPOLICY | DST_NOXFRM));
-	if (rth) {
-		rth->dst.output	= vrf_output;
-		rth->rt_genid	= rt_genid_ipv4(dev_net(dev));
-		rth->rt_flags	= 0;
-		rth->rt_type	= RTN_UNICAST;
-		rth->rt_is_input = 0;
-		rth->rt_iif	= 0;
-		rth->rt_pmtu	= 0;
-		rth->rt_gateway	= 0;
-		rth->rt_uses_gateway = 0;
-		rth->rt_table_id = vrf->tb_id;
-		INIT_LIST_HEAD(&rth->rt_uncached);
-		rth->rt_uncached_list = NULL;
-	}
+	if (!fib_new_table(dev_net(dev), vrf->tb_id))
+		return -ENOMEM;
 
-	return rth;
+	rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
+	if (!rth)
+		return -ENOMEM;
+
+	rth->dst.output	= vrf_output;
+	rth->rt_table_id = vrf->tb_id;
+
+	rcu_assign_pointer(vrf->rth, rth);
+
+	return 0;
 }
 
 /**************************** device handling ********************/
@@ -617,28 +437,14 @@
 {
 	int ret;
 
-	/* register the packet handler for slave ports */
-	ret = netdev_rx_handler_register(port_dev, vrf_handle_frame, dev);
-	if (ret) {
-		netdev_err(port_dev,
-			   "Device %s failed to register rx_handler\n",
-			   port_dev->name);
-		goto out_fail;
-	}
-
 	ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL);
 	if (ret < 0)
-		goto out_unregister;
+		return ret;
 
 	port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
 	cycle_netdev(port_dev);
 
 	return 0;
-
-out_unregister:
-	netdev_rx_handler_unregister(port_dev);
-out_fail:
-	return ret;
 }
 
 static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
@@ -655,8 +461,6 @@
 	netdev_upper_dev_unlink(port_dev, dev);
 	port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
 
-	netdev_rx_handler_unregister(port_dev);
-
 	cycle_netdev(port_dev);
 
 	return 0;
@@ -673,8 +477,8 @@
 	struct net_device *port_dev;
 	struct list_head *iter;
 
-	vrf_rtable_destroy(vrf);
-	vrf_rt6_destroy(vrf);
+	vrf_rtable_release(vrf);
+	vrf_rt6_release(vrf);
 
 	netdev_for_each_lower_dev(dev, port_dev, iter)
 		vrf_del_slave(dev, port_dev);
@@ -692,8 +496,7 @@
 		goto out_nomem;
 
 	/* create the default dst which points back to us */
-	vrf->rth = vrf_rtable_create(dev);
-	if (!vrf->rth)
+	if (vrf_rtable_create(dev) != 0)
 		goto out_stats;
 
 	if (vrf_rt6_create(dev) != 0)
@@ -704,7 +507,7 @@
 	return 0;
 
 out_rth:
-	vrf_rtable_destroy(vrf);
+	vrf_rtable_release(vrf);
 out_stats:
 	free_percpu(dev->dstats);
 	dev->dstats = NULL;
@@ -736,8 +539,13 @@
 	if (!(fl4->flowi4_flags & FLOWI_FLAG_L3MDEV_SRC)) {
 		struct net_vrf *vrf = netdev_priv(dev);
 
-		rth = vrf->rth;
-		atomic_inc(&rth->dst.__refcnt);
+		rcu_read_lock();
+
+		rth = rcu_dereference(vrf->rth);
+		if (likely(rth))
+			dst_hold(&rth->dst);
+
+		rcu_read_unlock();
 	}
 
 	return rth;
@@ -759,6 +567,8 @@
 
 	fl4->flowi4_flags |= FLOWI_FLAG_SKIP_NH_OIF;
 	fl4->flowi4_iif = LOOPBACK_IFINDEX;
+	/* make sure oif is set to VRF device for lookup */
+	fl4->flowi4_oif = dev->ifindex;
 	fl4->flowi4_tos = tos & IPTOS_RT_MASK;
 	fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
 			     RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
@@ -779,19 +589,116 @@
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
+/* neighbor handling is done with actual device; do not want
+ * to flip skb->dev for those ndisc packets. This really fails
+ * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
+ * a start.
+ */
+static bool ipv6_ndisc_frame(const struct sk_buff *skb)
+{
+	const struct ipv6hdr *iph = ipv6_hdr(skb);
+	bool rc = false;
+
+	if (iph->nexthdr == NEXTHDR_ICMP) {
+		const struct icmp6hdr *icmph;
+		struct icmp6hdr _icmph;
+
+		icmph = skb_header_pointer(skb, sizeof(*iph),
+					   sizeof(_icmph), &_icmph);
+		if (!icmph)
+			goto out;
+
+		switch (icmph->icmp6_type) {
+		case NDISC_ROUTER_SOLICITATION:
+		case NDISC_ROUTER_ADVERTISEMENT:
+		case NDISC_NEIGHBOUR_SOLICITATION:
+		case NDISC_NEIGHBOUR_ADVERTISEMENT:
+		case NDISC_REDIRECT:
+			rc = true;
+			break;
+		}
+	}
+
+out:
+	return rc;
+}
+
+static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
+				   struct sk_buff *skb)
+{
+	/* if packet is NDISC keep the ingress interface */
+	if (!ipv6_ndisc_frame(skb)) {
+		skb->dev = vrf_dev;
+		skb->skb_iif = vrf_dev->ifindex;
+
+		skb_push(skb, skb->mac_len);
+		dev_queue_xmit_nit(skb, vrf_dev);
+		skb_pull(skb, skb->mac_len);
+
+		IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
+	}
+
+	return skb;
+}
+
+#else
+static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
+				   struct sk_buff *skb)
+{
+	return skb;
+}
+#endif
+
+static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
+				  struct sk_buff *skb)
+{
+	skb->dev = vrf_dev;
+	skb->skb_iif = vrf_dev->ifindex;
+
+	skb_push(skb, skb->mac_len);
+	dev_queue_xmit_nit(skb, vrf_dev);
+	skb_pull(skb, skb->mac_len);
+
+	return skb;
+}
+
+/* called with rcu lock held */
+static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev,
+				  struct sk_buff *skb,
+				  u16 proto)
+{
+	switch (proto) {
+	case AF_INET:
+		return vrf_ip_rcv(vrf_dev, skb);
+	case AF_INET6:
+		return vrf_ip6_rcv(vrf_dev, skb);
+	}
+
+	return skb;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
 static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
 					 const struct flowi6 *fl6)
 {
-	struct rt6_info *rt = NULL;
+	struct dst_entry *dst = NULL;
 
 	if (!(fl6->flowi6_flags & FLOWI_FLAG_L3MDEV_SRC)) {
 		struct net_vrf *vrf = netdev_priv(dev);
+		struct rt6_info *rt;
 
-		rt = vrf->rt6;
-		atomic_inc(&rt->dst.__refcnt);
+		rcu_read_lock();
+
+		rt = rcu_dereference(vrf->rt6);
+		if (likely(rt)) {
+			dst = &rt->dst;
+			dst_hold(dst);
+		}
+
+		rcu_read_unlock();
 	}
 
-	return (struct dst_entry *)rt;
+	return dst;
 }
 #endif
 
@@ -799,6 +706,7 @@
 	.l3mdev_fib_table	= vrf_fib_table,
 	.l3mdev_get_rtable	= vrf_get_rtable,
 	.l3mdev_get_saddr	= vrf_get_saddr,
+	.l3mdev_l3_rcv		= vrf_l3_rcv,
 #if IS_ENABLED(CONFIG_IPV6)
 	.l3mdev_get_rt6_dst	= vrf_get_rt6_dst,
 #endif
@@ -946,19 +854,6 @@
 {
 	int rc;
 
-	vrf_dst_ops.kmem_cachep =
-		kmem_cache_create("vrf_ip_dst_cache",
-				  sizeof(struct rtable), 0,
-				  SLAB_HWCACHE_ALIGN,
-				  NULL);
-
-	if (!vrf_dst_ops.kmem_cachep)
-		return -ENOMEM;
-
-	rc = init_dst_ops6_kmem_cachep();
-	if (rc != 0)
-		goto error2;
-
 	register_netdevice_notifier(&vrf_notifier_block);
 
 	rc = rtnl_link_register(&vrf_link_ops);
@@ -969,22 +864,10 @@
 
 error:
 	unregister_netdevice_notifier(&vrf_notifier_block);
-	free_dst_ops6_kmem_cachep();
-error2:
-	kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
 	return rc;
 }
 
-static void __exit vrf_cleanup_module(void)
-{
-	rtnl_link_unregister(&vrf_link_ops);
-	unregister_netdevice_notifier(&vrf_notifier_block);
-	kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
-	free_dst_ops6_kmem_cachep();
-}
-
 module_init(vrf_init_module);
-module_exit(vrf_cleanup_module);
 MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
 MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
 MODULE_LICENSE("GPL");
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 1c0fa36..25ab6bf 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -98,7 +98,6 @@
 
 /* salt for hash table */
 static u32 vxlan_salt __read_mostly;
-static struct workqueue_struct *vxlan_wq;
 
 static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
 {
@@ -551,16 +550,15 @@
 	return vh;
 }
 
-static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
-					  struct sk_buff *skb,
-					  struct udp_offload *uoff)
+static struct sk_buff **vxlan_gro_receive(struct sock *sk,
+					  struct sk_buff **head,
+					  struct sk_buff *skb)
 {
 	struct sk_buff *p, **pp = NULL;
 	struct vxlanhdr *vh, *vh2;
 	unsigned int hlen, off_vx;
 	int flush = 1;
-	struct vxlan_sock *vs = container_of(uoff, struct vxlan_sock,
-					     udp_offloads);
+	struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk);
 	__be32 flags;
 	struct gro_remcsum grc;
 
@@ -613,11 +611,11 @@
 	return pp;
 }
 
-static int vxlan_gro_complete(struct sk_buff *skb, int nhoff,
-			      struct udp_offload *uoff)
+static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
 {
-	udp_tunnel_gro_complete(skb, nhoff);
-
+	/* Sets 'skb->inner_mac_header' since we are always called with
+	 * 'skb->encapsulation' set.
+	 */
 	return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
 }
 
@@ -629,13 +627,6 @@
 	struct net *net = sock_net(sk);
 	sa_family_t sa_family = vxlan_get_sk_family(vs);
 	__be16 port = inet_sk(sk)->inet_sport;
-	int err;
-
-	if (sa_family == AF_INET) {
-		err = udp_add_offload(net, &vs->udp_offloads);
-		if (err)
-			pr_warn("vxlan: udp_add_offload failed with status %d\n", err);
-	}
 
 	rcu_read_lock();
 	for_each_netdev_rcu(net, dev) {
@@ -662,9 +653,6 @@
 							    port);
 	}
 	rcu_read_unlock();
-
-	if (sa_family == AF_INET)
-		udp_del_offload(&vs->udp_offloads);
 }
 
 /* Add new entry to forwarding table -- assumes lock held */
@@ -1050,14 +1038,14 @@
 	return false;
 }
 
-static void __vxlan_sock_release(struct vxlan_sock *vs)
+static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
 {
 	struct vxlan_net *vn;
 
 	if (!vs)
-		return;
+		return false;
 	if (!atomic_dec_and_test(&vs->refcnt))
-		return;
+		return false;
 
 	vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
 	spin_lock(&vn->sock_lock);
@@ -1065,14 +1053,28 @@
 	vxlan_notify_del_rx_port(vs);
 	spin_unlock(&vn->sock_lock);
 
-	queue_work(vxlan_wq, &vs->del_work);
+	return true;
 }
 
 static void vxlan_sock_release(struct vxlan_dev *vxlan)
 {
-	__vxlan_sock_release(vxlan->vn4_sock);
+	bool ipv4 = __vxlan_sock_release_prep(vxlan->vn4_sock);
 #if IS_ENABLED(CONFIG_IPV6)
-	__vxlan_sock_release(vxlan->vn6_sock);
+	bool ipv6 = __vxlan_sock_release_prep(vxlan->vn6_sock);
+#endif
+
+	synchronize_net();
+
+	if (ipv4) {
+		udp_tunnel_sock_release(vxlan->vn4_sock->sock);
+		kfree(vxlan->vn4_sock);
+	}
+
+#if IS_ENABLED(CONFIG_IPV6)
+	if (ipv6) {
+		udp_tunnel_sock_release(vxlan->vn6_sock->sock);
+		kfree(vxlan->vn6_sock);
+	}
 #endif
 }
 
@@ -1192,6 +1194,45 @@
 	unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
 }
 
+static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed,
+				__be16 *protocol,
+				struct sk_buff *skb, u32 vxflags)
+{
+	struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)unparsed;
+
+	/* Need to have Next Protocol set for interfaces in GPE mode. */
+	if (!gpe->np_applied)
+		return false;
+	/* "The initial version is 0. If a receiver does not support the
+	 * version indicated it MUST drop the packet.
+	 */
+	if (gpe->version != 0)
+		return false;
+	/* "When the O bit is set to 1, the packet is an OAM packet and OAM
+	 * processing MUST occur." However, we don't implement OAM
+	 * processing, thus drop the packet.
+	 */
+	if (gpe->oam_flag)
+		return false;
+
+	switch (gpe->next_protocol) {
+	case VXLAN_GPE_NP_IPV4:
+		*protocol = htons(ETH_P_IP);
+		break;
+	case VXLAN_GPE_NP_IPV6:
+		*protocol = htons(ETH_P_IPV6);
+		break;
+	case VXLAN_GPE_NP_ETHERNET:
+		*protocol = htons(ETH_P_TEB);
+		break;
+	default:
+		return false;
+	}
+
+	unparsed->vx_flags &= ~VXLAN_GPE_USED_BITS;
+	return true;
+}
+
 static bool vxlan_set_mac(struct vxlan_dev *vxlan,
 			  struct vxlan_sock *vs,
 			  struct sk_buff *skb)
@@ -1257,9 +1298,11 @@
 	struct vxlanhdr unparsed;
 	struct vxlan_metadata _md;
 	struct vxlan_metadata *md = &_md;
+	__be16 protocol = htons(ETH_P_TEB);
+	bool raw_proto = false;
 	void *oiph;
 
-	/* Need Vxlan and inner Ethernet header to be present */
+	/* Need UDP and VXLAN header to be present */
 	if (!pskb_may_pull(skb, VXLAN_HLEN))
 		return 1;
 
@@ -1283,9 +1326,18 @@
 	if (!vxlan)
 		goto drop;
 
-	if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB),
-				 !net_eq(vxlan->net, dev_net(vxlan->dev))))
-		goto drop;
+	/* For backwards compatibility, only allow reserved fields to be
+	 * used by VXLAN extensions if explicitly requested.
+	 */
+	if (vs->flags & VXLAN_F_GPE) {
+		if (!vxlan_parse_gpe_hdr(&unparsed, &protocol, skb, vs->flags))
+			goto drop;
+		raw_proto = true;
+	}
+
+	if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto,
+				   !net_eq(vxlan->net, dev_net(vxlan->dev))))
+			goto drop;
 
 	if (vxlan_collect_metadata(vs)) {
 		__be32 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
@@ -1304,14 +1356,14 @@
 		memset(md, 0, sizeof(*md));
 	}
 
-	/* For backwards compatibility, only allow reserved fields to be
-	 * used by VXLAN extensions if explicitly requested.
-	 */
 	if (vs->flags & VXLAN_F_REMCSUM_RX)
 		if (!vxlan_remcsum(&unparsed, skb, vs->flags))
 			goto drop;
 	if (vs->flags & VXLAN_F_GBP)
 		vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
+	/* Note that GBP and GPE can never be active together. This is
+	 * ensured in vxlan_dev_configure.
+	 */
 
 	if (unparsed.vx_flags || unparsed.vx_vni) {
 		/* If there are any unprocessed flags remaining treat
@@ -1325,8 +1377,14 @@
 		goto drop;
 	}
 
-	if (!vxlan_set_mac(vxlan, vs, skb))
-		goto drop;
+	if (!raw_proto) {
+		if (!vxlan_set_mac(vxlan, vs, skb))
+			goto drop;
+	} else {
+		skb_reset_mac_header(skb);
+		skb->dev = vxlan->dev;
+		skb->pkt_type = PACKET_HOST;
+	}
 
 	oiph = skb_network_header(skb);
 	skb_reset_network_header(skb);
@@ -1685,6 +1743,27 @@
 	gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
 }
 
+static int vxlan_build_gpe_hdr(struct vxlanhdr *vxh, u32 vxflags,
+			       __be16 protocol)
+{
+	struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)vxh;
+
+	gpe->np_applied = 1;
+
+	switch (protocol) {
+	case htons(ETH_P_IP):
+		gpe->next_protocol = VXLAN_GPE_NP_IPV4;
+		return 0;
+	case htons(ETH_P_IPV6):
+		gpe->next_protocol = VXLAN_GPE_NP_IPV6;
+		return 0;
+	case htons(ETH_P_TEB):
+		gpe->next_protocol = VXLAN_GPE_NP_ETHERNET;
+		return 0;
+	}
+	return -EPFNOSUPPORT;
+}
+
 static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
 			   int iphdr_len, __be32 vni,
 			   struct vxlan_metadata *md, u32 vxflags,
@@ -1694,6 +1773,7 @@
 	int min_headroom;
 	int err;
 	int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+	__be16 inner_protocol = htons(ETH_P_TEB);
 
 	if ((vxflags & VXLAN_F_REMCSUM_TX) &&
 	    skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -1712,18 +1792,16 @@
 
 	/* Need space for new headers (invalidates iph ptr) */
 	err = skb_cow_head(skb, min_headroom);
-	if (unlikely(err)) {
-		kfree_skb(skb);
-		return err;
-	}
+	if (unlikely(err))
+		goto out_free;
 
 	skb = vlan_hwaccel_push_inside(skb);
 	if (WARN_ON(!skb))
 		return -ENOMEM;
 
-	skb = iptunnel_handle_offloads(skb, type);
-	if (IS_ERR(skb))
-		return PTR_ERR(skb);
+	err = iptunnel_handle_offloads(skb, type);
+	if (err)
+		goto out_free;
 
 	vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
 	vxh->vx_flags = VXLAN_HF_VNI;
@@ -1744,9 +1822,19 @@
 
 	if (vxflags & VXLAN_F_GBP)
 		vxlan_build_gbp_hdr(vxh, vxflags, md);
+	if (vxflags & VXLAN_F_GPE) {
+		err = vxlan_build_gpe_hdr(vxh, vxflags, skb->protocol);
+		if (err < 0)
+			goto out_free;
+		inner_protocol = skb->protocol;
+	}
 
-	skb_set_inner_protocol(skb, htons(ETH_P_TEB));
+	skb_set_inner_protocol(skb, inner_protocol);
 	return 0;
+
+out_free:
+	kfree_skb(skb);
+	return err;
 }
 
 static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan,
@@ -2106,9 +2194,17 @@
 	info = skb_tunnel_info(skb);
 
 	skb_reset_mac_header(skb);
-	eth = eth_hdr(skb);
 
-	if ((vxlan->flags & VXLAN_F_PROXY)) {
+	if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
+		if (info && info->mode & IP_TUNNEL_INFO_TX)
+			vxlan_xmit_one(skb, dev, NULL, false);
+		else
+			kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (vxlan->flags & VXLAN_F_PROXY) {
+		eth = eth_hdr(skb);
 		if (ntohs(eth->h_proto) == ETH_P_ARP)
 			return arp_reduce(dev, skb);
 #if IS_ENABLED(CONFIG_IPV6)
@@ -2123,18 +2219,10 @@
 				    msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
 					return neigh_reduce(dev, skb);
 		}
-		eth = eth_hdr(skb);
 #endif
 	}
 
-	if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
-		if (info && info->mode & IP_TUNNEL_INFO_TX)
-			vxlan_xmit_one(skb, dev, NULL, false);
-		else
-			kfree_skb(skb);
-		return NETDEV_TX_OK;
-	}
-
+	eth = eth_hdr(skb);
 	f = vxlan_find_mac(vxlan, eth->h_dest);
 	did_rsc = false;
 
@@ -2404,7 +2492,7 @@
 	return 0;
 }
 
-static const struct net_device_ops vxlan_netdev_ops = {
+static const struct net_device_ops vxlan_netdev_ether_ops = {
 	.ndo_init		= vxlan_init,
 	.ndo_uninit		= vxlan_uninit,
 	.ndo_open		= vxlan_open,
@@ -2421,6 +2509,17 @@
 	.ndo_fill_metadata_dst	= vxlan_fill_metadata_dst,
 };
 
+static const struct net_device_ops vxlan_netdev_raw_ops = {
+	.ndo_init		= vxlan_init,
+	.ndo_uninit		= vxlan_uninit,
+	.ndo_open		= vxlan_open,
+	.ndo_stop		= vxlan_stop,
+	.ndo_start_xmit		= vxlan_xmit,
+	.ndo_get_stats64	= ip_tunnel_get_stats64,
+	.ndo_change_mtu		= vxlan_change_mtu,
+	.ndo_fill_metadata_dst	= vxlan_fill_metadata_dst,
+};
+
 /* Info for udev, that this is a virtual tunnel endpoint */
 static struct device_type vxlan_type = {
 	.name = "vxlan",
@@ -2430,7 +2529,7 @@
  * supply the listening VXLAN udp ports. Callers are expected
  * to implement the ndo_add_vxlan_port.
  */
-void vxlan_get_rx_port(struct net_device *dev)
+static void vxlan_push_rx_ports(struct net_device *dev)
 {
 	struct vxlan_sock *vs;
 	struct net *net = dev_net(dev);
@@ -2439,6 +2538,9 @@
 	__be16 port;
 	unsigned int i;
 
+	if (!dev->netdev_ops->ndo_add_vxlan_port)
+		return;
+
 	spin_lock(&vn->sock_lock);
 	for (i = 0; i < PORT_HASH_SIZE; ++i) {
 		hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
@@ -2450,7 +2552,6 @@
 	}
 	spin_unlock(&vn->sock_lock);
 }
-EXPORT_SYMBOL_GPL(vxlan_get_rx_port);
 
 /* Initialize the device structure. */
 static void vxlan_setup(struct net_device *dev)
@@ -2461,7 +2562,6 @@
 	eth_hw_addr_random(dev);
 	ether_setup(dev);
 
-	dev->netdev_ops = &vxlan_netdev_ops;
 	dev->destructor = free_netdev;
 	SET_NETDEV_DEVTYPE(dev, &vxlan_type);
 
@@ -2476,8 +2576,7 @@
 	dev->hw_features |= NETIF_F_GSO_SOFTWARE;
 	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
 	netif_keep_dst(dev);
-	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
-	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
+	dev->priv_flags |= IFF_NO_QUEUE;
 
 	INIT_LIST_HEAD(&vxlan->next);
 	spin_lock_init(&vxlan->hash_lock);
@@ -2496,6 +2595,23 @@
 		INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
 }
 
+static void vxlan_ether_setup(struct net_device *dev)
+{
+	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+	dev->netdev_ops = &vxlan_netdev_ether_ops;
+}
+
+static void vxlan_raw_setup(struct net_device *dev)
+{
+	dev->header_ops = NULL;
+	dev->type = ARPHRD_NONE;
+	dev->hard_header_len = 0;
+	dev->addr_len = 0;
+	dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+	dev->netdev_ops = &vxlan_netdev_raw_ops;
+}
+
 static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
 	[IFLA_VXLAN_ID]		= { .type = NLA_U32 },
 	[IFLA_VXLAN_GROUP]	= { .len = FIELD_SIZEOF(struct iphdr, daddr) },
@@ -2522,6 +2638,7 @@
 	[IFLA_VXLAN_REMCSUM_TX]	= { .type = NLA_U8 },
 	[IFLA_VXLAN_REMCSUM_RX]	= { .type = NLA_U8 },
 	[IFLA_VXLAN_GBP]	= { .type = NLA_FLAG, },
+	[IFLA_VXLAN_GPE]	= { .type = NLA_FLAG, },
 	[IFLA_VXLAN_REMCSUM_NOPARTIAL]	= { .type = NLA_FLAG },
 };
 
@@ -2574,13 +2691,6 @@
 	.get_link	= ethtool_op_get_link,
 };
 
-static void vxlan_del_work(struct work_struct *work)
-{
-	struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work);
-	udp_tunnel_sock_release(vs->sock);
-	kfree_rcu(vs, rcu);
-}
-
 static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
 					__be16 port, u32 flags)
 {
@@ -2626,8 +2736,6 @@
 	for (h = 0; h < VNI_HASH_SIZE; ++h)
 		INIT_HLIST_HEAD(&vs->vni_list[h]);
 
-	INIT_WORK(&vs->del_work, vxlan_del_work);
-
 	sock = vxlan_create_sock(net, ipv6, port, flags);
 	if (IS_ERR(sock)) {
 		pr_info("Cannot bind port %d, err=%ld\n", ntohs(port),
@@ -2640,21 +2748,19 @@
 	atomic_set(&vs->refcnt, 1);
 	vs->flags = (flags & VXLAN_F_RCV_FLAGS);
 
-	/* Initialize the vxlan udp offloads structure */
-	vs->udp_offloads.port = port;
-	vs->udp_offloads.callbacks.gro_receive  = vxlan_gro_receive;
-	vs->udp_offloads.callbacks.gro_complete = vxlan_gro_complete;
-
 	spin_lock(&vn->sock_lock);
 	hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
 	vxlan_notify_add_rx_port(vs);
 	spin_unlock(&vn->sock_lock);
 
 	/* Mark socket as an encapsulation socket. */
+	memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
 	tunnel_cfg.sk_user_data = vs;
 	tunnel_cfg.encap_type = 1;
 	tunnel_cfg.encap_rcv = vxlan_rcv;
 	tunnel_cfg.encap_destroy = NULL;
+	tunnel_cfg.gro_receive = vxlan_gro_receive;
+	tunnel_cfg.gro_complete = vxlan_gro_complete;
 
 	setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
 
@@ -2722,6 +2828,21 @@
 	__be16 default_port = vxlan->cfg.dst_port;
 	struct net_device *lowerdev = NULL;
 
+	if (conf->flags & VXLAN_F_GPE) {
+		if (conf->flags & ~VXLAN_F_ALLOWED_GPE)
+			return -EINVAL;
+		/* For now, allow GPE only together with COLLECT_METADATA.
+		 * This can be relaxed later; in such case, the other side
+		 * of the PtP link will have to be provided.
+		 */
+		if (!(conf->flags & VXLAN_F_COLLECT_METADATA))
+			return -EINVAL;
+
+		vxlan_raw_setup(dev);
+	} else {
+		vxlan_ether_setup(dev);
+	}
+
 	vxlan->net = src_net;
 
 	dst->remote_vni = conf->vni;
@@ -2783,8 +2904,12 @@
 	dev->needed_headroom = needed_headroom;
 
 	memcpy(&vxlan->cfg, conf, sizeof(*conf));
-	if (!vxlan->cfg.dst_port)
-		vxlan->cfg.dst_port = default_port;
+	if (!vxlan->cfg.dst_port) {
+		if (conf->flags & VXLAN_F_GPE)
+			vxlan->cfg.dst_port = 4790; /* IANA assigned VXLAN-GPE port */
+		else
+			vxlan->cfg.dst_port = default_port;
+	}
 	vxlan->flags |= conf->flags;
 
 	if (!vxlan->cfg.age_interval)
@@ -2955,6 +3080,9 @@
 	if (data[IFLA_VXLAN_GBP])
 		conf.flags |= VXLAN_F_GBP;
 
+	if (data[IFLA_VXLAN_GPE])
+		conf.flags |= VXLAN_F_GPE;
+
 	if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
 		conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
 
@@ -2971,6 +3099,10 @@
 	case -EEXIST:
 		pr_info("duplicate VNI %u\n", be32_to_cpu(conf.vni));
 		break;
+
+	case -EINVAL:
+		pr_info("unsupported combination of extensions\n");
+		break;
 	}
 
 	return err;
@@ -3098,6 +3230,10 @@
 	    nla_put_flag(skb, IFLA_VXLAN_GBP))
 		goto nla_put_failure;
 
+	if (vxlan->flags & VXLAN_F_GPE &&
+	    nla_put_flag(skb, IFLA_VXLAN_GPE))
+		goto nla_put_failure;
+
 	if (vxlan->flags & VXLAN_F_REMCSUM_NOPARTIAL &&
 	    nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
 		goto nla_put_failure;
@@ -3151,20 +3287,22 @@
 	unregister_netdevice_many(&list_kill);
 }
 
-static int vxlan_lowerdev_event(struct notifier_block *unused,
-				unsigned long event, void *ptr)
+static int vxlan_netdevice_event(struct notifier_block *unused,
+				 unsigned long event, void *ptr)
 {
 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 	struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
 
 	if (event == NETDEV_UNREGISTER)
 		vxlan_handle_lowerdev_unregister(vn, dev);
+	else if (event == NETDEV_OFFLOAD_PUSH_VXLAN)
+		vxlan_push_rx_ports(dev);
 
 	return NOTIFY_DONE;
 }
 
 static struct notifier_block vxlan_notifier_block __read_mostly = {
-	.notifier_call = vxlan_lowerdev_event,
+	.notifier_call = vxlan_netdevice_event,
 };
 
 static __net_init int vxlan_init_net(struct net *net)
@@ -3218,10 +3356,6 @@
 {
 	int rc;
 
-	vxlan_wq = alloc_workqueue("vxlan", 0, 0);
-	if (!vxlan_wq)
-		return -ENOMEM;
-
 	get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
 
 	rc = register_pernet_subsys(&vxlan_net_ops);
@@ -3242,7 +3376,6 @@
 out2:
 	unregister_pernet_subsys(&vxlan_net_ops);
 out1:
-	destroy_workqueue(vxlan_wq);
 	return rc;
 }
 late_initcall(vxlan_init_module);
@@ -3251,7 +3384,6 @@
 {
 	rtnl_link_unregister(&vxlan_link_ops);
 	unregister_netdevice_notifier(&vxlan_notifier_block);
-	destroy_workqueue(vxlan_wq);
 	unregister_pernet_subsys(&vxlan_net_ops);
 	/* rcu_barrier() is called by netns */
 }
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 848ea6a..b87fe0a 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -739,7 +739,7 @@
 		chan->netdev->stats.rx_dropped++;
 		return NULL;
 	}
-	chan->netdev->trans_start = jiffies;
+	netif_trans_update(chan->netdev);
 	return skb_put(chan->rx_skb, size);
 }
 
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 69b994f..3c9cbf9 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -831,7 +831,7 @@
 		DMA_OWN | TX_STP | TX_ENP);
 	dev->stats.tx_packets++;
 	dev->stats.tx_bytes += len;
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 }
 
 /*
@@ -1389,7 +1389,7 @@
 						DMA_OWN | TX_STP | TX_ENP);
 					dev->stats.tx_packets++;
 					dev->stats.tx_bytes += skb->len;
-					dev->trans_start = jiffies;
+					netif_trans_update(dev);
 				} else {
 					/* Or do it through dma */
 					memcpy(card->tx_dma_handle_host,
@@ -2258,7 +2258,7 @@
 	    card->card_no, port->index);
 	fst_issue_cmd(port, ABORTTX);
 
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	netif_wake_queue(dev);
 	port->start = 0;
 }
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index bb33b24..299140c 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -2105,7 +2105,7 @@
     sc->lmc_device->stats.tx_errors++;
     sc->extra_stats.tx_ProcTimeout++; /* -baz */
 
-    dev->trans_start = jiffies; /* prevent tx timeout */
+    netif_trans_update(dev); /* prevent tx timeout */
 
 bug_out:
 
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 8fef8d8..d98c7e5 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -860,9 +860,9 @@
 
 	outb( inb( dev->base_addr + CSR0 ) | TR_REQ,  dev->base_addr + CSR0 );
 #ifdef CONFIG_SBNI_MULTILINE
-	nl->master->trans_start = jiffies;
+	netif_trans_update(nl->master);
 #else
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 #endif
 }
 
@@ -889,10 +889,10 @@
 	nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
 #ifdef CONFIG_SBNI_MULTILINE
 	netif_start_queue( nl->master );
-	nl->master->trans_start = jiffies;
+	netif_trans_update(nl->master);
 #else
 	netif_start_queue( dev );
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 #endif
 }
 
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index a9970f1..bb74f4b 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -334,7 +334,7 @@
 	d_fnstart(3, dev, "(i2400m %p net_dev %p skb %p)\n",
 		  i2400m, net_dev, skb);
 	/* FIXME: check eth hdr, only IPv4 is routed by the device as of now */
-	net_dev->trans_start = jiffies;
+	netif_trans_update(net_dev);
 	i2400m_tx_prep_header(skb);
 	d_printf(3, dev, "NETTX: skb %p sending %d bytes to radio\n",
 		 skb, skb->len);
diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
index 15f057e..70ecd82 100644
--- a/drivers/net/wireless/admtek/adm8211.c
+++ b/drivers/net/wireless/admtek/adm8211.c
@@ -440,7 +440,7 @@
 			rx_status.rate_idx = rate;
 
 			rx_status.freq = adm8211_channels[priv->channel - 1].center_freq;
-			rx_status.band = IEEE80211_BAND_2GHZ;
+			rx_status.band = NL80211_BAND_2GHZ;
 
 			memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
 			ieee80211_rx_irqsafe(dev, skb);
@@ -1894,7 +1894,7 @@
 
 	priv->channel = 1;
 
-	dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+	dev->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
 
 	err = ieee80211_register_hw(dev);
 	if (err) {
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 3b343c6..8aded24 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1471,12 +1471,12 @@
 	memcpy(ar->channels, ar5523_channels, sizeof(ar5523_channels));
 	memcpy(ar->rates, ar5523_rates, sizeof(ar5523_rates));
 
-	ar->band.band = IEEE80211_BAND_2GHZ;
+	ar->band.band = NL80211_BAND_2GHZ;
 	ar->band.channels = ar->channels;
 	ar->band.n_channels = ARRAY_SIZE(ar5523_channels);
 	ar->band.bitrates = ar->rates;
 	ar->band.n_bitrates = ARRAY_SIZE(ar5523_rates);
-	ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar->band;
+	ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = &ar->band;
 	return 0;
 }
 
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 65ef483..da7a7c8 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -185,7 +185,7 @@
 	bool bt_ant_diversity;
 
 	int last_rssi;
-	struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+	struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
 };
 
 static inline const struct ath_ps_ops *ath_ps_ops(struct ath_common *common)
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index edf3629..9fb8d74 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -411,7 +411,8 @@
 
 	lockdep_assert_held(&ar_pci->ce_lock);
 
-	if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
+	if ((pipe->id != 5) &&
+	    CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
 		return -ENOSPC;
 
 	desc->addr = __cpu_to_le32(paddr);
@@ -425,6 +426,19 @@
 	return 0;
 }
 
+void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
+{
+	struct ath10k *ar = pipe->ar;
+	struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
+	unsigned int nentries_mask = dest_ring->nentries_mask;
+	unsigned int write_index = dest_ring->write_index;
+	u32 ctrl_addr = pipe->ctrl_addr;
+
+	write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
+	ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
+	dest_ring->write_index = write_index;
+}
+
 int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
 {
 	struct ath10k *ar = pipe->ar;
@@ -444,14 +458,10 @@
  */
 int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
 					 void **per_transfer_contextp,
-					 u32 *bufferp,
-					 unsigned int *nbytesp,
-					 unsigned int *transfer_idp,
-					 unsigned int *flagsp)
+					 unsigned int *nbytesp)
 {
 	struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
 	unsigned int nentries_mask = dest_ring->nentries_mask;
-	struct ath10k *ar = ce_state->ar;
 	unsigned int sw_index = dest_ring->sw_index;
 
 	struct ce_desc *base = dest_ring->base_addr_owner_space;
@@ -476,21 +486,17 @@
 	desc->nbytes = 0;
 
 	/* Return data from completed destination descriptor */
-	*bufferp = __le32_to_cpu(sdesc.addr);
 	*nbytesp = nbytes;
-	*transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA);
-
-	if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP)
-		*flagsp = CE_RECV_FLAG_SWAPPED;
-	else
-		*flagsp = 0;
 
 	if (per_transfer_contextp)
 		*per_transfer_contextp =
 			dest_ring->per_transfer_context[sw_index];
 
-	/* sanity */
-	dest_ring->per_transfer_context[sw_index] = NULL;
+	/* Copy engine 5 (HTT Rx) will reuse the same transfer context.
+	 * So update transfer context all CEs except CE5.
+	 */
+	if (ce_state->id != 5)
+		dest_ring->per_transfer_context[sw_index] = NULL;
 
 	/* Update sw_index */
 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
@@ -501,10 +507,7 @@
 
 int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
 				  void **per_transfer_contextp,
-				  u32 *bufferp,
-				  unsigned int *nbytesp,
-				  unsigned int *transfer_idp,
-				  unsigned int *flagsp)
+				  unsigned int *nbytesp)
 {
 	struct ath10k *ar = ce_state->ar;
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -513,8 +516,7 @@
 	spin_lock_bh(&ar_pci->ce_lock);
 	ret = ath10k_ce_completed_recv_next_nolock(ce_state,
 						   per_transfer_contextp,
-						   bufferp, nbytesp,
-						   transfer_idp, flagsp);
+						   nbytesp);
 	spin_unlock_bh(&ar_pci->ce_lock);
 
 	return ret;
@@ -1048,11 +1050,11 @@
 	 *
 	 * For the lack of a better place do the check here.
 	 */
-	BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
+	BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
 		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
-	BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
+	BUILD_BUG_ON(2 * TARGET_10X_NUM_MSDU_DESC >
 		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
-	BUILD_BUG_ON(2*TARGET_TLV_NUM_MSDU_DESC >
+	BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
 		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
 
 	ce_state->ar = ar;
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 47b734c..dfc0986 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -22,7 +22,7 @@
 
 /* Maximum number of Copy Engine's supported */
 #define CE_COUNT_MAX 12
-#define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
+#define CE_HTT_H2T_MSG_SRC_NENTRIES 8192
 
 /* Descriptor rings must be aligned to this boundary */
 #define CE_DESC_RING_ALIGN	8
@@ -166,6 +166,7 @@
 int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe);
 int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
 int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
+void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries);
 
 /* recv flags */
 /* Data is byte-swapped */
@@ -177,10 +178,7 @@
  */
 int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
 				  void **per_transfer_contextp,
-				  u32 *bufferp,
-				  unsigned int *nbytesp,
-				  unsigned int *transfer_idp,
-				  unsigned int *flagsp);
+				  unsigned int *nbytesp);
 /*
  * Supply data for the next completed unprocessed send descriptor.
  * Pops 1 completed send buffer from Source ring.
@@ -212,10 +210,7 @@
 
 int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
 					 void **per_transfer_contextp,
-					 u32 *bufferp,
-					 unsigned int *nbytesp,
-					 unsigned int *transfer_idp,
-					 unsigned int *flagsp);
+					 unsigned int *nbytesp);
 
 /*
  * Support clean shutdown by allowing the caller to cancel
@@ -413,9 +408,11 @@
 
 /* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
 #define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
-	(((int)(toidx)-(int)(fromidx)) & (nentries_mask))
+	(((int)(toidx) - (int)(fromidx)) & (nentries_mask))
 
 #define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
+#define CE_RING_IDX_ADD(nentries_mask, idx, num) \
+		(((idx) + (num)) & (nentries_mask))
 
 #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \
 				ar->regs->ce_wrap_intr_sum_host_msi_lsb
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index c84c2d3..e94cb87 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -60,10 +60,9 @@
 		.channel_counters_freq_hz = 88000,
 		.max_probe_resp_desc_thres = 0,
 		.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
+		.cal_data_len = 2116,
 		.fw = {
 			.dir = QCA988X_HW_2_0_FW_DIR,
-			.fw = QCA988X_HW_2_0_FW_FILE,
-			.otp = QCA988X_HW_2_0_OTP_FILE,
 			.board = QCA988X_HW_2_0_BOARD_DATA_FILE,
 			.board_size = QCA988X_BOARD_DATA_SZ,
 			.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
@@ -78,10 +77,9 @@
 		.otp_exe_param = 0,
 		.channel_counters_freq_hz = 88000,
 		.max_probe_resp_desc_thres = 0,
+		.cal_data_len = 8124,
 		.fw = {
 			.dir = QCA6174_HW_2_1_FW_DIR,
-			.fw = QCA6174_HW_2_1_FW_FILE,
-			.otp = QCA6174_HW_2_1_OTP_FILE,
 			.board = QCA6174_HW_2_1_BOARD_DATA_FILE,
 			.board_size = QCA6174_BOARD_DATA_SZ,
 			.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
@@ -97,10 +95,9 @@
 		.channel_counters_freq_hz = 88000,
 		.max_probe_resp_desc_thres = 0,
 		.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
+		.cal_data_len = 8124,
 		.fw = {
 			.dir = QCA6174_HW_2_1_FW_DIR,
-			.fw = QCA6174_HW_2_1_FW_FILE,
-			.otp = QCA6174_HW_2_1_OTP_FILE,
 			.board = QCA6174_HW_2_1_BOARD_DATA_FILE,
 			.board_size = QCA6174_BOARD_DATA_SZ,
 			.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
@@ -116,10 +113,9 @@
 		.channel_counters_freq_hz = 88000,
 		.max_probe_resp_desc_thres = 0,
 		.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
+		.cal_data_len = 8124,
 		.fw = {
 			.dir = QCA6174_HW_3_0_FW_DIR,
-			.fw = QCA6174_HW_3_0_FW_FILE,
-			.otp = QCA6174_HW_3_0_OTP_FILE,
 			.board = QCA6174_HW_3_0_BOARD_DATA_FILE,
 			.board_size = QCA6174_BOARD_DATA_SZ,
 			.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
@@ -135,11 +131,10 @@
 		.channel_counters_freq_hz = 88000,
 		.max_probe_resp_desc_thres = 0,
 		.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
+		.cal_data_len = 8124,
 		.fw = {
 			/* uses same binaries as hw3.0 */
 			.dir = QCA6174_HW_3_0_FW_DIR,
-			.fw = QCA6174_HW_3_0_FW_FILE,
-			.otp = QCA6174_HW_3_0_OTP_FILE,
 			.board = QCA6174_HW_3_0_BOARD_DATA_FILE,
 			.board_size = QCA6174_BOARD_DATA_SZ,
 			.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
@@ -156,15 +151,12 @@
 		.channel_counters_freq_hz = 150000,
 		.max_probe_resp_desc_thres = 24,
 		.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
-		.num_msdu_desc = 1424,
-		.qcache_active_peers = 50,
 		.tx_chain_mask = 0xf,
 		.rx_chain_mask = 0xf,
 		.max_spatial_stream = 4,
+		.cal_data_len = 12064,
 		.fw = {
 			.dir = QCA99X0_HW_2_0_FW_DIR,
-			.fw = QCA99X0_HW_2_0_FW_FILE,
-			.otp = QCA99X0_HW_2_0_OTP_FILE,
 			.board = QCA99X0_HW_2_0_BOARD_DATA_FILE,
 			.board_size = QCA99X0_BOARD_DATA_SZ,
 			.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
@@ -179,10 +171,9 @@
 		.otp_exe_param = 0,
 		.channel_counters_freq_hz = 88000,
 		.max_probe_resp_desc_thres = 0,
+		.cal_data_len = 8124,
 		.fw = {
 			.dir = QCA9377_HW_1_0_FW_DIR,
-			.fw = QCA9377_HW_1_0_FW_FILE,
-			.otp = QCA9377_HW_1_0_OTP_FILE,
 			.board = QCA9377_HW_1_0_BOARD_DATA_FILE,
 			.board_size = QCA9377_BOARD_DATA_SZ,
 			.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
@@ -197,10 +188,9 @@
 		.otp_exe_param = 0,
 		.channel_counters_freq_hz = 88000,
 		.max_probe_resp_desc_thres = 0,
+		.cal_data_len = 8124,
 		.fw = {
 			.dir = QCA9377_HW_1_0_FW_DIR,
-			.fw = QCA9377_HW_1_0_FW_FILE,
-			.otp = QCA9377_HW_1_0_OTP_FILE,
 			.board = QCA9377_HW_1_0_BOARD_DATA_FILE,
 			.board_size = QCA9377_BOARD_DATA_SZ,
 			.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
@@ -217,15 +207,12 @@
 		.channel_counters_freq_hz = 125000,
 		.max_probe_resp_desc_thres = 24,
 		.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
-		.num_msdu_desc = 2500,
-		.qcache_active_peers = 35,
 		.tx_chain_mask = 0x3,
 		.rx_chain_mask = 0x3,
 		.max_spatial_stream = 2,
+		.cal_data_len = 12064,
 		.fw = {
 			.dir = QCA4019_HW_1_0_FW_DIR,
-			.fw = QCA4019_HW_1_0_FW_FILE,
-			.otp = QCA4019_HW_1_0_OTP_FILE,
 			.board = QCA4019_HW_1_0_BOARD_DATA_FILE,
 			.board_size = QCA4019_BOARD_DATA_SZ,
 			.board_ext_size = QCA4019_BOARD_EXT_DATA_SZ,
@@ -274,7 +261,7 @@
 	int i;
 
 	for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
-		if (test_bit(i, ar->fw_features)) {
+		if (test_bit(i, ar->normal_mode_fw.fw_file.fw_features)) {
 			if (len > 0)
 				len += scnprintf(buf + len, buf_len - len, ",");
 
@@ -466,18 +453,18 @@
 	return ret;
 }
 
-static int ath10k_download_cal_file(struct ath10k *ar)
+static int ath10k_download_cal_file(struct ath10k *ar,
+				    const struct firmware *file)
 {
 	int ret;
 
-	if (!ar->cal_file)
+	if (!file)
 		return -ENOENT;
 
-	if (IS_ERR(ar->cal_file))
-		return PTR_ERR(ar->cal_file);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
 
-	ret = ath10k_download_board_data(ar, ar->cal_file->data,
-					 ar->cal_file->size);
+	ret = ath10k_download_board_data(ar, file->data, file->size);
 	if (ret) {
 		ath10k_err(ar, "failed to download cal_file data: %d\n", ret);
 		return ret;
@@ -488,7 +475,7 @@
 	return 0;
 }
 
-static int ath10k_download_cal_dt(struct ath10k *ar)
+static int ath10k_download_cal_dt(struct ath10k *ar, const char *dt_name)
 {
 	struct device_node *node;
 	int data_len;
@@ -502,13 +489,12 @@
 		 */
 		return -ENOENT;
 
-	if (!of_get_property(node, "qcom,ath10k-calibration-data",
-			     &data_len)) {
+	if (!of_get_property(node, dt_name, &data_len)) {
 		/* The calibration data node is optional */
 		return -ENOENT;
 	}
 
-	if (data_len != QCA988X_CAL_DATA_LEN) {
+	if (data_len != ar->hw_params.cal_data_len) {
 		ath10k_warn(ar, "invalid calibration data length in DT: %d\n",
 			    data_len);
 		ret = -EMSGSIZE;
@@ -521,8 +507,7 @@
 		goto out;
 	}
 
-	ret = of_property_read_u8_array(node, "qcom,ath10k-calibration-data",
-					data, data_len);
+	ret = of_property_read_u8_array(node, dt_name, data, data_len);
 	if (ret) {
 		ath10k_warn(ar, "failed to read calibration data from DT: %d\n",
 			    ret);
@@ -553,7 +538,8 @@
 
 	address = ar->hw_params.patch_load_addr;
 
-	if (!ar->otp_data || !ar->otp_len) {
+	if (!ar->normal_mode_fw.fw_file.otp_data ||
+	    !ar->normal_mode_fw.fw_file.otp_len) {
 		ath10k_warn(ar,
 			    "failed to retrieve board id because of invalid otp\n");
 		return -ENODATA;
@@ -561,9 +547,11 @@
 
 	ath10k_dbg(ar, ATH10K_DBG_BOOT,
 		   "boot upload otp to 0x%x len %zd for board id\n",
-		   address, ar->otp_len);
+		   address, ar->normal_mode_fw.fw_file.otp_len);
 
-	ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
+	ret = ath10k_bmi_fast_download(ar, address,
+				       ar->normal_mode_fw.fw_file.otp_data,
+				       ar->normal_mode_fw.fw_file.otp_len);
 	if (ret) {
 		ath10k_err(ar, "could not write otp for board id check: %d\n",
 			   ret);
@@ -601,7 +589,9 @@
 	u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param;
 	int ret;
 
-	ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len);
+	ret = ath10k_download_board_data(ar,
+					 ar->running_fw->board_data,
+					 ar->running_fw->board_len);
 	if (ret) {
 		ath10k_err(ar, "failed to download board data: %d\n", ret);
 		return ret;
@@ -609,16 +599,20 @@
 
 	/* OTP is optional */
 
-	if (!ar->otp_data || !ar->otp_len) {
+	if (!ar->running_fw->fw_file.otp_data ||
+	    !ar->running_fw->fw_file.otp_len) {
 		ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
-			    ar->otp_data, ar->otp_len);
+			    ar->running_fw->fw_file.otp_data,
+			    ar->running_fw->fw_file.otp_len);
 		return 0;
 	}
 
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
-		   address, ar->otp_len);
+		   address, ar->running_fw->fw_file.otp_len);
 
-	ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
+	ret = ath10k_bmi_fast_download(ar, address,
+				       ar->running_fw->fw_file.otp_data,
+				       ar->running_fw->fw_file.otp_len);
 	if (ret) {
 		ath10k_err(ar, "could not write otp (%d)\n", ret);
 		return ret;
@@ -633,7 +627,7 @@
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
 
 	if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
-				   ar->fw_features)) &&
+				   ar->running_fw->fw_file.fw_features)) &&
 	    result != 0) {
 		ath10k_err(ar, "otp calibration failed: %d", result);
 		return -EINVAL;
@@ -642,46 +636,32 @@
 	return 0;
 }
 
-static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
+static int ath10k_download_fw(struct ath10k *ar)
 {
 	u32 address, data_len;
-	const char *mode_name;
 	const void *data;
 	int ret;
 
 	address = ar->hw_params.patch_load_addr;
 
-	switch (mode) {
-	case ATH10K_FIRMWARE_MODE_NORMAL:
-		data = ar->firmware_data;
-		data_len = ar->firmware_len;
-		mode_name = "normal";
-		ret = ath10k_swap_code_seg_configure(ar,
-						     ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW);
-		if (ret) {
-			ath10k_err(ar, "failed to configure fw code swap: %d\n",
-				   ret);
-			return ret;
-		}
-		break;
-	case ATH10K_FIRMWARE_MODE_UTF:
-		data = ar->testmode.utf_firmware_data;
-		data_len = ar->testmode.utf_firmware_len;
-		mode_name = "utf";
-		break;
-	default:
-		ath10k_err(ar, "unknown firmware mode: %d\n", mode);
-		return -EINVAL;
+	data = ar->running_fw->fw_file.firmware_data;
+	data_len = ar->running_fw->fw_file.firmware_len;
+
+	ret = ath10k_swap_code_seg_configure(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to configure fw code swap: %d\n",
+			   ret);
+		return ret;
 	}
 
 	ath10k_dbg(ar, ATH10K_DBG_BOOT,
-		   "boot uploading firmware image %p len %d mode %s\n",
-		   data, data_len, mode_name);
+		   "boot uploading firmware image %p len %d\n",
+		   data, data_len);
 
 	ret = ath10k_bmi_fast_download(ar, address, data, data_len);
 	if (ret) {
-		ath10k_err(ar, "failed to download %s firmware: %d\n",
-			   mode_name, ret);
+		ath10k_err(ar, "failed to download firmware: %d\n",
+			   ret);
 		return ret;
 	}
 
@@ -690,34 +670,30 @@
 
 static void ath10k_core_free_board_files(struct ath10k *ar)
 {
-	if (!IS_ERR(ar->board))
-		release_firmware(ar->board);
+	if (!IS_ERR(ar->normal_mode_fw.board))
+		release_firmware(ar->normal_mode_fw.board);
 
-	ar->board = NULL;
-	ar->board_data = NULL;
-	ar->board_len = 0;
+	ar->normal_mode_fw.board = NULL;
+	ar->normal_mode_fw.board_data = NULL;
+	ar->normal_mode_fw.board_len = 0;
 }
 
 static void ath10k_core_free_firmware_files(struct ath10k *ar)
 {
-	if (!IS_ERR(ar->otp))
-		release_firmware(ar->otp);
-
-	if (!IS_ERR(ar->firmware))
-		release_firmware(ar->firmware);
+	if (!IS_ERR(ar->normal_mode_fw.fw_file.firmware))
+		release_firmware(ar->normal_mode_fw.fw_file.firmware);
 
 	if (!IS_ERR(ar->cal_file))
 		release_firmware(ar->cal_file);
 
 	ath10k_swap_code_seg_release(ar);
 
-	ar->otp = NULL;
-	ar->otp_data = NULL;
-	ar->otp_len = 0;
+	ar->normal_mode_fw.fw_file.otp_data = NULL;
+	ar->normal_mode_fw.fw_file.otp_len = 0;
 
-	ar->firmware = NULL;
-	ar->firmware_data = NULL;
-	ar->firmware_len = 0;
+	ar->normal_mode_fw.fw_file.firmware = NULL;
+	ar->normal_mode_fw.fw_file.firmware_data = NULL;
+	ar->normal_mode_fw.fw_file.firmware_len = 0;
 
 	ar->cal_file = NULL;
 }
@@ -726,6 +702,14 @@
 {
 	char filename[100];
 
+	/* pre-cal-<bus>-<id>.bin */
+	scnprintf(filename, sizeof(filename), "pre-cal-%s-%s.bin",
+		  ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
+
+	ar->pre_cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename);
+	if (!IS_ERR(ar->pre_cal_file))
+		goto success;
+
 	/* cal-<bus>-<id>.bin */
 	scnprintf(filename, sizeof(filename), "cal-%s-%s.bin",
 		  ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
@@ -734,7 +718,7 @@
 	if (IS_ERR(ar->cal_file))
 		/* calibration file is optional, don't print any warnings */
 		return PTR_ERR(ar->cal_file);
-
+success:
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "found calibration file %s/%s\n",
 		   ATH10K_FW_DIR, filename);
 
@@ -748,14 +732,14 @@
 		return -EINVAL;
 	}
 
-	ar->board = ath10k_fetch_fw_file(ar,
-					 ar->hw_params.fw.dir,
-					 ar->hw_params.fw.board);
-	if (IS_ERR(ar->board))
-		return PTR_ERR(ar->board);
+	ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
+							ar->hw_params.fw.dir,
+							ar->hw_params.fw.board);
+	if (IS_ERR(ar->normal_mode_fw.board))
+		return PTR_ERR(ar->normal_mode_fw.board);
 
-	ar->board_data = ar->board->data;
-	ar->board_len = ar->board->size;
+	ar->normal_mode_fw.board_data = ar->normal_mode_fw.board->data;
+	ar->normal_mode_fw.board_len = ar->normal_mode_fw.board->size;
 
 	return 0;
 }
@@ -815,8 +799,8 @@
 				   "boot found board data for '%s'",
 				   boardname);
 
-			ar->board_data = board_ie_data;
-			ar->board_len = board_ie_len;
+			ar->normal_mode_fw.board_data = board_ie_data;
+			ar->normal_mode_fw.board_len = board_ie_len;
 
 			ret = 0;
 			goto out;
@@ -849,12 +833,14 @@
 	const u8 *data;
 	int ret, ie_id;
 
-	ar->board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, filename);
-	if (IS_ERR(ar->board))
-		return PTR_ERR(ar->board);
+	ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
+							ar->hw_params.fw.dir,
+							filename);
+	if (IS_ERR(ar->normal_mode_fw.board))
+		return PTR_ERR(ar->normal_mode_fw.board);
 
-	data = ar->board->data;
-	len = ar->board->size;
+	data = ar->normal_mode_fw.board->data;
+	len = ar->normal_mode_fw.board->size;
 
 	/* magic has extra null byte padded */
 	magic_len = strlen(ATH10K_BOARD_MAGIC) + 1;
@@ -921,7 +907,7 @@
 	}
 
 out:
-	if (!ar->board_data || !ar->board_len) {
+	if (!ar->normal_mode_fw.board_data || !ar->normal_mode_fw.board_len) {
 		ath10k_err(ar,
 			   "failed to fetch board data for %s from %s/%s\n",
 			   boardname, ar->hw_params.fw.dir, filename);
@@ -989,51 +975,8 @@
 	return 0;
 }
 
-static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
-{
-	int ret = 0;
-
-	if (ar->hw_params.fw.fw == NULL) {
-		ath10k_err(ar, "firmware file not defined\n");
-		return -EINVAL;
-	}
-
-	ar->firmware = ath10k_fetch_fw_file(ar,
-					    ar->hw_params.fw.dir,
-					    ar->hw_params.fw.fw);
-	if (IS_ERR(ar->firmware)) {
-		ret = PTR_ERR(ar->firmware);
-		ath10k_err(ar, "could not fetch firmware (%d)\n", ret);
-		goto err;
-	}
-
-	ar->firmware_data = ar->firmware->data;
-	ar->firmware_len = ar->firmware->size;
-
-	/* OTP may be undefined. If so, don't fetch it at all */
-	if (ar->hw_params.fw.otp == NULL)
-		return 0;
-
-	ar->otp = ath10k_fetch_fw_file(ar,
-				       ar->hw_params.fw.dir,
-				       ar->hw_params.fw.otp);
-	if (IS_ERR(ar->otp)) {
-		ret = PTR_ERR(ar->otp);
-		ath10k_err(ar, "could not fetch otp (%d)\n", ret);
-		goto err;
-	}
-
-	ar->otp_data = ar->otp->data;
-	ar->otp_len = ar->otp->size;
-
-	return 0;
-
-err:
-	ath10k_core_free_firmware_files(ar);
-	return ret;
-}
-
-static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
+int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
+				     struct ath10k_fw_file *fw_file)
 {
 	size_t magic_len, len, ie_len;
 	int ie_id, i, index, bit, ret;
@@ -1042,15 +985,17 @@
 	__le32 *timestamp, *version;
 
 	/* first fetch the firmware file (firmware-*.bin) */
-	ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
-	if (IS_ERR(ar->firmware)) {
+	fw_file->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
+						 name);
+	if (IS_ERR(fw_file->firmware)) {
 		ath10k_err(ar, "could not fetch firmware file '%s/%s': %ld\n",
-			   ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware));
-		return PTR_ERR(ar->firmware);
+			   ar->hw_params.fw.dir, name,
+			   PTR_ERR(fw_file->firmware));
+		return PTR_ERR(fw_file->firmware);
 	}
 
-	data = ar->firmware->data;
-	len = ar->firmware->size;
+	data = fw_file->firmware->data;
+	len = fw_file->firmware->size;
 
 	/* magic also includes the null byte, check that as well */
 	magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
@@ -1093,15 +1038,15 @@
 
 		switch (ie_id) {
 		case ATH10K_FW_IE_FW_VERSION:
-			if (ie_len > sizeof(ar->hw->wiphy->fw_version) - 1)
+			if (ie_len > sizeof(fw_file->fw_version) - 1)
 				break;
 
-			memcpy(ar->hw->wiphy->fw_version, data, ie_len);
-			ar->hw->wiphy->fw_version[ie_len] = '\0';
+			memcpy(fw_file->fw_version, data, ie_len);
+			fw_file->fw_version[ie_len] = '\0';
 
 			ath10k_dbg(ar, ATH10K_DBG_BOOT,
 				   "found fw version %s\n",
-				    ar->hw->wiphy->fw_version);
+				    fw_file->fw_version);
 			break;
 		case ATH10K_FW_IE_TIMESTAMP:
 			if (ie_len != sizeof(u32))
@@ -1128,21 +1073,21 @@
 					ath10k_dbg(ar, ATH10K_DBG_BOOT,
 						   "Enabling feature bit: %i\n",
 						   i);
-					__set_bit(i, ar->fw_features);
+					__set_bit(i, fw_file->fw_features);
 				}
 			}
 
 			ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "",
-					ar->fw_features,
-					sizeof(ar->fw_features));
+					ar->running_fw->fw_file.fw_features,
+					sizeof(fw_file->fw_features));
 			break;
 		case ATH10K_FW_IE_FW_IMAGE:
 			ath10k_dbg(ar, ATH10K_DBG_BOOT,
 				   "found fw image ie (%zd B)\n",
 				   ie_len);
 
-			ar->firmware_data = data;
-			ar->firmware_len = ie_len;
+			fw_file->firmware_data = data;
+			fw_file->firmware_len = ie_len;
 
 			break;
 		case ATH10K_FW_IE_OTP_IMAGE:
@@ -1150,8 +1095,8 @@
 				   "found otp image ie (%zd B)\n",
 				   ie_len);
 
-			ar->otp_data = data;
-			ar->otp_len = ie_len;
+			fw_file->otp_data = data;
+			fw_file->otp_len = ie_len;
 
 			break;
 		case ATH10K_FW_IE_WMI_OP_VERSION:
@@ -1160,10 +1105,10 @@
 
 			version = (__le32 *)data;
 
-			ar->wmi.op_version = le32_to_cpup(version);
+			fw_file->wmi_op_version = le32_to_cpup(version);
 
 			ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n",
-				   ar->wmi.op_version);
+				   fw_file->wmi_op_version);
 			break;
 		case ATH10K_FW_IE_HTT_OP_VERSION:
 			if (ie_len != sizeof(u32))
@@ -1171,17 +1116,17 @@
 
 			version = (__le32 *)data;
 
-			ar->htt.op_version = le32_to_cpup(version);
+			fw_file->htt_op_version = le32_to_cpup(version);
 
 			ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n",
-				   ar->htt.op_version);
+				   fw_file->htt_op_version);
 			break;
 		case ATH10K_FW_IE_FW_CODE_SWAP_IMAGE:
 			ath10k_dbg(ar, ATH10K_DBG_BOOT,
 				   "found fw code swap image ie (%zd B)\n",
 				   ie_len);
-			ar->swap.firmware_codeswap_data = data;
-			ar->swap.firmware_codeswap_len = ie_len;
+			fw_file->codeswap_data = data;
+			fw_file->codeswap_len = ie_len;
 			break;
 		default:
 			ath10k_warn(ar, "Unknown FW IE: %u\n",
@@ -1196,7 +1141,8 @@
 		data += ie_len;
 	}
 
-	if (!ar->firmware_data || !ar->firmware_len) {
+	if (!fw_file->firmware_data ||
+	    !fw_file->firmware_len) {
 		ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
 			    ar->hw_params.fw.dir, name);
 		ret = -ENOMEDIUM;
@@ -1220,35 +1166,32 @@
 	ar->fw_api = 5;
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
 
-	ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE);
+	ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE,
+					       &ar->normal_mode_fw.fw_file);
 	if (ret == 0)
 		goto success;
 
 	ar->fw_api = 4;
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
 
-	ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE);
+	ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE,
+					       &ar->normal_mode_fw.fw_file);
 	if (ret == 0)
 		goto success;
 
 	ar->fw_api = 3;
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
 
-	ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE);
+	ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE,
+					       &ar->normal_mode_fw.fw_file);
 	if (ret == 0)
 		goto success;
 
 	ar->fw_api = 2;
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
 
-	ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
-	if (ret == 0)
-		goto success;
-
-	ar->fw_api = 1;
-	ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
-
-	ret = ath10k_core_fetch_firmware_api_1(ar);
+	ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE,
+					       &ar->normal_mode_fw.fw_file);
 	if (ret)
 		return ret;
 
@@ -1258,11 +1201,77 @@
 	return 0;
 }
 
+static int ath10k_core_pre_cal_download(struct ath10k *ar)
+{
+	int ret;
+
+	ret = ath10k_download_cal_file(ar, ar->pre_cal_file);
+	if (ret == 0) {
+		ar->cal_mode = ATH10K_PRE_CAL_MODE_FILE;
+		goto success;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT,
+		   "boot did not find a pre calibration file, try DT next: %d\n",
+		   ret);
+
+	ret = ath10k_download_cal_dt(ar, "qcom,ath10k-pre-calibration-data");
+	if (ret) {
+		ath10k_dbg(ar, ATH10K_DBG_BOOT,
+			   "unable to load pre cal data from DT: %d\n", ret);
+		return ret;
+	}
+	ar->cal_mode = ATH10K_PRE_CAL_MODE_DT;
+
+success:
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n",
+		   ath10k_cal_mode_str(ar->cal_mode));
+
+	return 0;
+}
+
+static int ath10k_core_pre_cal_config(struct ath10k *ar)
+{
+	int ret;
+
+	ret = ath10k_core_pre_cal_download(ar);
+	if (ret) {
+		ath10k_dbg(ar, ATH10K_DBG_BOOT,
+			   "failed to load pre cal data: %d\n", ret);
+		return ret;
+	}
+
+	ret = ath10k_core_get_board_id_from_otp(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to get board id: %d\n", ret);
+		return ret;
+	}
+
+	ret = ath10k_download_and_run_otp(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to run otp: %d\n", ret);
+		return ret;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT,
+		   "pre cal configuration done successfully\n");
+
+	return 0;
+}
+
 static int ath10k_download_cal_data(struct ath10k *ar)
 {
 	int ret;
 
-	ret = ath10k_download_cal_file(ar);
+	ret = ath10k_core_pre_cal_config(ar);
+	if (ret == 0)
+		return 0;
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT,
+		   "pre cal download procedure failed, try cal file: %d\n",
+		   ret);
+
+	ret = ath10k_download_cal_file(ar, ar->cal_file);
 	if (ret == 0) {
 		ar->cal_mode = ATH10K_CAL_MODE_FILE;
 		goto done;
@@ -1272,7 +1281,7 @@
 		   "boot did not find a calibration file, try DT next: %d\n",
 		   ret);
 
-	ret = ath10k_download_cal_dt(ar);
+	ret = ath10k_download_cal_dt(ar, "qcom,ath10k-calibration-data");
 	if (ret == 0) {
 		ar->cal_mode = ATH10K_CAL_MODE_DT;
 		goto done;
@@ -1420,15 +1429,17 @@
 
 static int ath10k_core_init_firmware_features(struct ath10k *ar)
 {
-	if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features) &&
-	    !test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+	struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file;
+
+	if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, fw_file->fw_features) &&
+	    !test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) {
 		ath10k_err(ar, "feature bits corrupted: 10.2 feature requires 10.x feature to be set as well");
 		return -EINVAL;
 	}
 
-	if (ar->wmi.op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) {
+	if (fw_file->wmi_op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) {
 		ath10k_err(ar, "unsupported WMI OP version (max %d): %d\n",
-			   ATH10K_FW_WMI_OP_VERSION_MAX, ar->wmi.op_version);
+			   ATH10K_FW_WMI_OP_VERSION_MAX, fw_file->wmi_op_version);
 		return -EINVAL;
 	}
 
@@ -1440,7 +1451,7 @@
 		break;
 	case ATH10K_CRYPT_MODE_SW:
 		if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
-			      ar->fw_features)) {
+			      fw_file->fw_features)) {
 			ath10k_err(ar, "cryptmode > 0 requires raw mode support from firmware");
 			return -EINVAL;
 		}
@@ -1459,7 +1470,7 @@
 
 	if (rawmode) {
 		if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
-			      ar->fw_features)) {
+			      fw_file->fw_features)) {
 			ath10k_err(ar, "rawmode = 1 requires support from firmware");
 			return -EINVAL;
 		}
@@ -1484,19 +1495,19 @@
 	/* Backwards compatibility for firmwares without
 	 * ATH10K_FW_IE_WMI_OP_VERSION.
 	 */
-	if (ar->wmi.op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) {
-		if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+	if (fw_file->wmi_op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) {
+		if (test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) {
 			if (test_bit(ATH10K_FW_FEATURE_WMI_10_2,
-				     ar->fw_features))
-				ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_2;
+				     fw_file->fw_features))
+				fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_2;
 			else
-				ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
+				fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
 		} else {
-			ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_MAIN;
+			fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_MAIN;
 		}
 	}
 
-	switch (ar->wmi.op_version) {
+	switch (fw_file->wmi_op_version) {
 	case ATH10K_FW_WMI_OP_VERSION_MAIN:
 		ar->max_num_peers = TARGET_NUM_PEERS;
 		ar->max_num_stations = TARGET_NUM_STATIONS;
@@ -1509,7 +1520,7 @@
 	case ATH10K_FW_WMI_OP_VERSION_10_1:
 	case ATH10K_FW_WMI_OP_VERSION_10_2:
 	case ATH10K_FW_WMI_OP_VERSION_10_2_4:
-		if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) {
+		if (ath10k_peer_stats_enabled(ar)) {
 			ar->max_num_peers = TARGET_10X_TX_STATS_NUM_PEERS;
 			ar->max_num_stations = TARGET_10X_TX_STATS_NUM_STATIONS;
 		} else {
@@ -1538,9 +1549,15 @@
 		ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS;
 		ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS;
 		ar->num_tids = TARGET_10_4_TGT_NUM_TIDS;
-		ar->htt.max_num_pending_tx = ar->hw_params.num_msdu_desc;
-		ar->fw_stats_req_mask = WMI_STAT_PEER;
+		ar->fw_stats_req_mask = WMI_10_4_STAT_PEER |
+					WMI_10_4_STAT_PEER_EXTD;
 		ar->max_spatial_stream = ar->hw_params.max_spatial_stream;
+
+		if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+			     fw_file->fw_features))
+			ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC_PFC;
+		else
+			ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC;
 		break;
 	case ATH10K_FW_WMI_OP_VERSION_UNSET:
 	case ATH10K_FW_WMI_OP_VERSION_MAX:
@@ -1551,18 +1568,18 @@
 	/* Backwards compatibility for firmwares without
 	 * ATH10K_FW_IE_HTT_OP_VERSION.
 	 */
-	if (ar->htt.op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) {
-		switch (ar->wmi.op_version) {
+	if (fw_file->htt_op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) {
+		switch (fw_file->wmi_op_version) {
 		case ATH10K_FW_WMI_OP_VERSION_MAIN:
-			ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_MAIN;
+			fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_MAIN;
 			break;
 		case ATH10K_FW_WMI_OP_VERSION_10_1:
 		case ATH10K_FW_WMI_OP_VERSION_10_2:
 		case ATH10K_FW_WMI_OP_VERSION_10_2_4:
-			ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
+			fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
 			break;
 		case ATH10K_FW_WMI_OP_VERSION_TLV:
-			ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
+			fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
 			break;
 		case ATH10K_FW_WMI_OP_VERSION_10_4:
 		case ATH10K_FW_WMI_OP_VERSION_UNSET:
@@ -1575,14 +1592,18 @@
 	return 0;
 }
 
-int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
+int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
+		      const struct ath10k_fw_components *fw)
 {
 	int status;
+	u32 val;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
 	clear_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
 
+	ar->running_fw = fw;
+
 	ath10k_bmi_start(ar);
 
 	if (ath10k_init_configure_target(ar)) {
@@ -1601,7 +1622,7 @@
 	 * to set the clock source once the target is initialized.
 	 */
 	if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
-		     ar->fw_features)) {
+		     ar->running_fw->fw_file.fw_features)) {
 		status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1);
 		if (status) {
 			ath10k_err(ar, "could not write to skip_clock_init: %d\n",
@@ -1610,7 +1631,7 @@
 		}
 	}
 
-	status = ath10k_download_fw(ar, mode);
+	status = ath10k_download_fw(ar);
 	if (status)
 		goto err;
 
@@ -1698,6 +1719,20 @@
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n",
 		   ar->hw->wiphy->fw_version);
 
+	if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map)) {
+		val = 0;
+		if (ath10k_peer_stats_enabled(ar))
+			val = WMI_10_4_PEER_STATS;
+
+		status = ath10k_mac_ext_resource_config(ar, val);
+		if (status) {
+			ath10k_err(ar,
+				   "failed to send ext resource cfg command : %d\n",
+				   status);
+			goto err_hif_stop;
+		}
+	}
+
 	status = ath10k_wmi_cmd_init(ar);
 	if (status) {
 		ath10k_err(ar, "could not send WMI init command (%d)\n",
@@ -1832,13 +1867,27 @@
 		goto err_power_down;
 	}
 
+	BUILD_BUG_ON(sizeof(ar->hw->wiphy->fw_version) !=
+		     sizeof(ar->normal_mode_fw.fw_file.fw_version));
+	memcpy(ar->hw->wiphy->fw_version, ar->normal_mode_fw.fw_file.fw_version,
+	       sizeof(ar->hw->wiphy->fw_version));
+
 	ath10k_debug_print_hwfw_info(ar);
 
+	ret = ath10k_core_pre_cal_download(ar);
+	if (ret) {
+		/* pre calibration data download is not necessary
+		 * for all the chipsets. Ignore failures and continue.
+		 */
+		ath10k_dbg(ar, ATH10K_DBG_BOOT,
+			   "could not load pre cal data: %d\n", ret);
+	}
+
 	ret = ath10k_core_get_board_id_from_otp(ar);
 	if (ret && ret != -EOPNOTSUPP) {
 		ath10k_err(ar, "failed to get board id from otp: %d\n",
 			   ret);
-		return ret;
+		goto err_free_firmware_files;
 	}
 
 	ret = ath10k_core_fetch_board_file(ar);
@@ -1865,7 +1914,8 @@
 
 	mutex_lock(&ar->conf_mutex);
 
-	ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
+	ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
+				&ar->normal_mode_fw);
 	if (ret) {
 		ath10k_err(ar, "could not init core (%d)\n", ret);
 		goto err_unlock;
@@ -2048,7 +2098,9 @@
 
 	mutex_init(&ar->conf_mutex);
 	spin_lock_init(&ar->data_lock);
+	spin_lock_init(&ar->txqs_lock);
 
+	INIT_LIST_HEAD(&ar->txqs);
 	INIT_LIST_HEAD(&ar->peers);
 	init_waitqueue_head(&ar->peer_mapping_wq);
 	init_waitqueue_head(&ar->htt.empty_tx_wq);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index a62b62a..1379054 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -44,8 +44,8 @@
 
 #define ATH10K_SCAN_ID 0
 #define WMI_READY_TIMEOUT (5 * HZ)
-#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
-#define ATH10K_CONNECTION_LOSS_HZ (3*HZ)
+#define ATH10K_FLUSH_TIMEOUT_HZ (5 * HZ)
+#define ATH10K_CONNECTION_LOSS_HZ (3 * HZ)
 #define ATH10K_NUM_CHANS 39
 
 /* Antenna noise floor */
@@ -98,6 +98,7 @@
 	u8 eid;
 	u16 msdu_id;
 	struct ieee80211_vif *vif;
+	struct ieee80211_txq *txq;
 } __packed;
 
 struct ath10k_skb_rxcb {
@@ -138,7 +139,6 @@
 };
 
 struct ath10k_wmi {
-	enum ath10k_fw_wmi_op_version op_version;
 	enum ath10k_htc_ep_id eid;
 	struct completion service_ready;
 	struct completion unified_ready;
@@ -297,6 +297,9 @@
 
 struct ath10k_peer {
 	struct list_head list;
+	struct ieee80211_vif *vif;
+	struct ieee80211_sta *sta;
+
 	int vdev_id;
 	u8 addr[ETH_ALEN];
 	DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS);
@@ -305,6 +308,12 @@
 	struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
 };
 
+struct ath10k_txq {
+	struct list_head list;
+	unsigned long num_fw_queued;
+	unsigned long num_push_allowed;
+};
+
 struct ath10k_sta {
 	struct ath10k_vif *arvif;
 
@@ -313,6 +322,7 @@
 	u32 bw;
 	u32 nss;
 	u32 smps;
+	u16 peer_id;
 
 	struct work_struct update_wk;
 
@@ -323,7 +333,7 @@
 #endif
 };
 
-#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
+#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5 * HZ)
 
 enum ath10k_beacon_state {
 	ATH10K_BEACON_SCHEDULED = 0,
@@ -335,6 +345,7 @@
 	struct list_head list;
 
 	u32 vdev_id;
+	u16 peer_id;
 	enum wmi_vdev_type vdev_type;
 	enum wmi_vdev_subtype vdev_subtype;
 	u32 beacon_interval;
@@ -549,12 +560,17 @@
 
 	/* Bluetooth coexistance enabled */
 	ATH10K_FLAG_BTCOEX,
+
+	/* Per Station statistics service */
+	ATH10K_FLAG_PEER_STATS,
 };
 
 enum ath10k_cal_mode {
 	ATH10K_CAL_MODE_FILE,
 	ATH10K_CAL_MODE_OTP,
 	ATH10K_CAL_MODE_DT,
+	ATH10K_PRE_CAL_MODE_FILE,
+	ATH10K_PRE_CAL_MODE_DT,
 };
 
 enum ath10k_crypt_mode {
@@ -573,6 +589,10 @@
 		return "otp";
 	case ATH10K_CAL_MODE_DT:
 		return "dt";
+	case ATH10K_PRE_CAL_MODE_FILE:
+		return "pre-cal-file";
+	case ATH10K_PRE_CAL_MODE_DT:
+		return "pre-cal-dt";
 	}
 
 	return "unknown";
@@ -606,6 +626,34 @@
 	ATH10K_TX_PAUSE_MAX,
 };
 
+struct ath10k_fw_file {
+	const struct firmware *firmware;
+
+	char fw_version[ETHTOOL_FWVERS_LEN];
+
+	DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
+
+	enum ath10k_fw_wmi_op_version wmi_op_version;
+	enum ath10k_fw_htt_op_version htt_op_version;
+
+	const void *firmware_data;
+	size_t firmware_len;
+
+	const void *otp_data;
+	size_t otp_len;
+
+	const void *codeswap_data;
+	size_t codeswap_len;
+};
+
+struct ath10k_fw_components {
+	const struct firmware *board;
+	const void *board_data;
+	size_t board_len;
+
+	struct ath10k_fw_file fw_file;
+};
+
 struct ath10k {
 	struct ath_common ath_common;
 	struct ieee80211_hw *hw;
@@ -631,8 +679,6 @@
 	/* protected by conf_mutex */
 	bool ani_enabled;
 
-	DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
-
 	bool p2p;
 
 	struct {
@@ -680,39 +726,31 @@
 		/* The padding bytes's location is different on various chips */
 		enum ath10k_hw_4addr_pad hw_4addr_pad;
 
-		u32 num_msdu_desc;
-		u32 qcache_active_peers;
 		u32 tx_chain_mask;
 		u32 rx_chain_mask;
 		u32 max_spatial_stream;
+		u32 cal_data_len;
 
 		struct ath10k_hw_params_fw {
 			const char *dir;
-			const char *fw;
-			const char *otp;
 			const char *board;
 			size_t board_size;
 			size_t board_ext_size;
 		} fw;
 	} hw_params;
 
-	const struct firmware *board;
-	const void *board_data;
-	size_t board_len;
+	/* contains the firmware images used with ATH10K_FIRMWARE_MODE_NORMAL */
+	struct ath10k_fw_components normal_mode_fw;
 
-	const struct firmware *otp;
-	const void *otp_data;
-	size_t otp_len;
+	/* READ-ONLY images of the running firmware, which can be either
+	 * normal or UTF. Do not modify, release etc!
+	 */
+	const struct ath10k_fw_components *running_fw;
 
-	const struct firmware *firmware;
-	const void *firmware_data;
-	size_t firmware_len;
-
+	const struct firmware *pre_cal_file;
 	const struct firmware *cal_file;
 
 	struct {
-		const void *firmware_codeswap_data;
-		size_t firmware_codeswap_len;
 		struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
 	} swap;
 
@@ -744,7 +782,7 @@
 	} scan;
 
 	struct {
-		struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+		struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
 	} mac;
 
 	/* should never be NULL; needed for regular htt rx */
@@ -756,6 +794,9 @@
 	/* current operating channel definition */
 	struct cfg80211_chan_def chandef;
 
+	/* currently configured operating channel in firmware */
+	struct ieee80211_channel *tgt_oper_chan;
+
 	unsigned long long free_vdev_map;
 	struct ath10k_vif *monitor_arvif;
 	bool monitor;
@@ -786,9 +827,13 @@
 
 	/* protects shared structure data */
 	spinlock_t data_lock;
+	/* protects: ar->txqs, artxq->list */
+	spinlock_t txqs_lock;
 
+	struct list_head txqs;
 	struct list_head arvifs;
 	struct list_head peers;
+	struct ath10k_peer *peer_map[ATH10K_MAX_NUM_PEER_IDS];
 	wait_queue_head_t peer_mapping_wq;
 
 	/* protected by conf_mutex */
@@ -851,13 +896,8 @@
 
 	struct {
 		/* protected by conf_mutex */
-		const struct firmware *utf;
-		char utf_version[32];
-		const void *utf_firmware_data;
-		size_t utf_firmware_len;
-		DECLARE_BITMAP(orig_fw_features, ATH10K_FW_FEATURE_COUNT);
-		enum ath10k_fw_wmi_op_version orig_wmi_op_version;
-		enum ath10k_fw_wmi_op_version op_version;
+		struct ath10k_fw_components utf_mode_fw;
+
 		/* protected by data_lock */
 		bool utf_monitor;
 	} testmode;
@@ -876,6 +916,15 @@
 	u8 drv_priv[0] __aligned(sizeof(void *));
 };
 
+static inline bool ath10k_peer_stats_enabled(struct ath10k *ar)
+{
+	if (test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) &&
+	    test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+		return true;
+
+	return false;
+}
+
 struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
 				  enum ath10k_bus bus,
 				  enum ath10k_hw_rev hw_rev,
@@ -884,8 +933,11 @@
 void ath10k_core_get_fw_features_str(struct ath10k *ar,
 				     char *buf,
 				     size_t max_len);
+int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
+				     struct ath10k_fw_file *fw_file);
 
-int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode);
+int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
+		      const struct ath10k_fw_components *fw_components);
 int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
 void ath10k_core_stop(struct ath10k *ar);
 int ath10k_core_register(struct ath10k *ar, u32 chip_id);
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 076d29b..e251155 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -126,7 +126,9 @@
 
 void ath10k_debug_print_hwfw_info(struct ath10k *ar)
 {
+	const struct firmware *firmware;
 	char fw_features[128] = {};
+	u32 crc = 0;
 
 	ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features));
 
@@ -143,11 +145,15 @@
 		    config_enabled(CONFIG_ATH10K_DFS_CERTIFIED),
 		    config_enabled(CONFIG_NL80211_TESTMODE));
 
+	firmware = ar->normal_mode_fw.fw_file.firmware;
+	if (firmware)
+		crc = crc32_le(0, firmware->data, firmware->size);
+
 	ath10k_info(ar, "firmware ver %s api %d features %s crc32 %08x\n",
 		    ar->hw->wiphy->fw_version,
 		    ar->fw_api,
 		    fw_features,
-		    crc32_le(0, ar->firmware->data, ar->firmware->size));
+		    crc);
 }
 
 void ath10k_debug_print_board_info(struct ath10k *ar)
@@ -163,7 +169,8 @@
 	ath10k_info(ar, "board_file api %d bmi_id %s crc32 %08x",
 		    ar->bd_api,
 		    boardinfo,
-		    crc32_le(0, ar->board->data, ar->board->size));
+		    crc32_le(0, ar->normal_mode_fw.board->data,
+			     ar->normal_mode_fw.board->size));
 }
 
 void ath10k_debug_print_boot_info(struct ath10k *ar)
@@ -171,8 +178,8 @@
 	ath10k_info(ar, "htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d\n",
 		    ar->htt.target_version_major,
 		    ar->htt.target_version_minor,
-		    ar->wmi.op_version,
-		    ar->htt.op_version,
+		    ar->normal_mode_fw.fw_file.wmi_op_version,
+		    ar->normal_mode_fw.fw_file.htt_op_version,
 		    ath10k_cal_mode_str(ar->cal_mode),
 		    ar->max_num_stations,
 		    test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags),
@@ -319,7 +326,7 @@
 void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
 {
 	struct ath10k_fw_stats stats = {};
-	bool is_start, is_started, is_end, peer_stats_svc;
+	bool is_start, is_started, is_end;
 	size_t num_peers;
 	size_t num_vdevs;
 	int ret;
@@ -346,13 +353,11 @@
 	 *  b) consume stat update events until another one with pdev stats is
 	 *     delivered which is treated as end-of-data and is itself discarded
 	 */
-
-	peer_stats_svc = test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map);
-	if (peer_stats_svc)
+	if (ath10k_peer_stats_enabled(ar))
 		ath10k_sta_update_rx_duration(ar, &stats.peers);
 
 	if (ar->debug.fw_stats_done) {
-		if (!peer_stats_svc)
+		if (!ath10k_peer_stats_enabled(ar))
 			ath10k_warn(ar, "received unsolicited stats update event\n");
 
 		goto free;
@@ -1447,7 +1452,7 @@
 		goto err;
 	}
 
-	buf = vmalloc(QCA988X_CAL_DATA_LEN);
+	buf = vmalloc(ar->hw_params.cal_data_len);
 	if (!buf) {
 		ret = -ENOMEM;
 		goto err;
@@ -1462,7 +1467,7 @@
 	}
 
 	ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf,
-				   QCA988X_CAL_DATA_LEN);
+				   ar->hw_params.cal_data_len);
 	if (ret) {
 		ath10k_warn(ar, "failed to read calibration data: %d\n", ret);
 		goto err_vfree;
@@ -1487,10 +1492,11 @@
 					  char __user *user_buf,
 					  size_t count, loff_t *ppos)
 {
+	struct ath10k *ar = file->private_data;
 	void *buf = file->private_data;
 
 	return simple_read_from_buffer(user_buf, count, ppos,
-				       buf, QCA988X_CAL_DATA_LEN);
+				       buf, ar->hw_params.cal_data_len);
 }
 
 static int ath10k_debug_cal_data_release(struct inode *inode,
@@ -2019,7 +2025,12 @@
 		goto out;
 	}
 
-	if (filter && (filter != ar->debug.pktlog_filter)) {
+	if (filter == ar->debug.pktlog_filter) {
+		ret = count;
+		goto out;
+	}
+
+	if (filter) {
 		ret = ath10k_wmi_pdev_pktlog_enable(ar, filter);
 		if (ret) {
 			ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n",
@@ -2114,7 +2125,7 @@
 	struct ath10k *ar = file->private_data;
 	char buf[32];
 	size_t buf_size;
-	int ret = 0;
+	int ret;
 	bool val;
 
 	buf_size = min(count, (sizeof(buf) - 1));
@@ -2134,8 +2145,10 @@
 		goto exit;
 	}
 
-	if (!(test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) ^ val))
+	if (!(test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) ^ val)) {
+		ret = count;
 		goto exit;
+	}
 
 	if (val)
 		set_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
@@ -2174,6 +2187,75 @@
 	.open = simple_open
 };
 
+static ssize_t ath10k_write_peer_stats(struct file *file,
+				       const char __user *ubuf,
+				       size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	char buf[32];
+	size_t buf_size;
+	int ret;
+	bool val;
+
+	buf_size = min(count, (sizeof(buf) - 1));
+	if (copy_from_user(buf, ubuf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size] = '\0';
+
+	if (strtobool(buf, &val) != 0)
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH10K_STATE_ON &&
+	    ar->state != ATH10K_STATE_RESTARTED) {
+		ret = -ENETDOWN;
+		goto exit;
+	}
+
+	if (!(test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) ^ val)) {
+		ret = count;
+		goto exit;
+	}
+
+	if (val)
+		set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
+	else
+		clear_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
+
+	ath10k_info(ar, "restarting firmware due to Peer stats change");
+
+	queue_work(ar->workqueue, &ar->restart_work);
+	ret = count;
+
+exit:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static ssize_t ath10k_read_peer_stats(struct file *file, char __user *ubuf,
+				      size_t count, loff_t *ppos)
+
+{
+	char buf[32];
+	struct ath10k *ar = file->private_data;
+	int len = 0;
+
+	mutex_lock(&ar->conf_mutex);
+	len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+			test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags));
+	mutex_unlock(&ar->conf_mutex);
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_peer_stats = {
+	.read = ath10k_read_peer_stats,
+	.write = ath10k_write_peer_stats,
+	.open = simple_open
+};
+
 static ssize_t ath10k_debug_fw_checksums_read(struct file *file,
 					      char __user *user_buf,
 					      size_t count, loff_t *ppos)
@@ -2191,23 +2273,28 @@
 
 	len += scnprintf(buf + len, buf_len - len,
 			 "firmware-N.bin\t\t%08x\n",
-			 crc32_le(0, ar->firmware->data, ar->firmware->size));
+			 crc32_le(0, ar->normal_mode_fw.fw_file.firmware->data,
+				  ar->normal_mode_fw.fw_file.firmware->size));
 	len += scnprintf(buf + len, buf_len - len,
 			 "athwlan\t\t\t%08x\n",
-			 crc32_le(0, ar->firmware_data, ar->firmware_len));
+			 crc32_le(0, ar->normal_mode_fw.fw_file.firmware_data,
+				  ar->normal_mode_fw.fw_file.firmware_len));
 	len += scnprintf(buf + len, buf_len - len,
 			 "otp\t\t\t%08x\n",
-			 crc32_le(0, ar->otp_data, ar->otp_len));
+			 crc32_le(0, ar->normal_mode_fw.fw_file.otp_data,
+				  ar->normal_mode_fw.fw_file.otp_len));
 	len += scnprintf(buf + len, buf_len - len,
 			 "codeswap\t\t%08x\n",
-			 crc32_le(0, ar->swap.firmware_codeswap_data,
-				  ar->swap.firmware_codeswap_len));
+			 crc32_le(0, ar->normal_mode_fw.fw_file.codeswap_data,
+				  ar->normal_mode_fw.fw_file.codeswap_len));
 	len += scnprintf(buf + len, buf_len - len,
 			 "board-N.bin\t\t%08x\n",
-			 crc32_le(0, ar->board->data, ar->board->size));
+			 crc32_le(0, ar->normal_mode_fw.board->data,
+				  ar->normal_mode_fw.board->size));
 	len += scnprintf(buf + len, buf_len - len,
 			 "board\t\t\t%08x\n",
-			 crc32_le(0, ar->board_data, ar->board_len));
+			 crc32_le(0, ar->normal_mode_fw.board_data,
+				  ar->normal_mode_fw.board_len));
 
 	ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
 
@@ -2337,6 +2424,11 @@
 		debugfs_create_file("btcoex", S_IRUGO | S_IWUSR,
 				    ar->debug.debugfs_phy, ar, &fops_btcoex);
 
+	if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+		debugfs_create_file("peer_stats", S_IRUGO | S_IWUSR,
+				    ar->debug.debugfs_phy, ar,
+				    &fops_peer_stats);
+
 	debugfs_create_file("fw_checksums", S_IRUSR,
 			    ar->debug.debugfs_phy, ar, &fops_fw_checksums);
 
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 6206edd..75c89e3 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -57,7 +57,7 @@
 };
 
 /* FIXME: How to calculate the buffer size sanely? */
-#define ATH10K_FW_STATS_BUF_SIZE (1024*1024)
+#define ATH10K_FW_STATS_BUF_SIZE (1024 * 1024)
 
 extern unsigned int ath10k_debug_mask;
 
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index e70aa38..cc82718 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -297,10 +297,10 @@
 #define ATH10K_NUM_CONTROL_TX_BUFFERS 2
 #define ATH10K_HTC_MAX_LEN 4096
 #define ATH10K_HTC_MAX_CTRL_MSG_LEN 256
-#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1*HZ)
+#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1 * HZ)
 #define ATH10K_HTC_CONTROL_BUFFER_SIZE (ATH10K_HTC_MAX_CTRL_MSG_LEN + \
 					sizeof(struct ath10k_htc_hdr))
-#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1*HZ)
+#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1 * HZ)
 
 struct ath10k_htc_ep {
 	struct ath10k_htc *htc;
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 7561f22..130cd95 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -149,7 +149,7 @@
 	memset(&conn_resp, 0, sizeof(conn_resp));
 
 	conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete;
-	conn_req.ep_ops.ep_rx_complete = ath10k_htt_t2h_msg_handler;
+	conn_req.ep_ops.ep_rx_complete = ath10k_htt_htc_t2h_msg_handler;
 
 	/* connect to control service */
 	conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
@@ -183,7 +183,7 @@
 		8 + /* llc snap */
 		2; /* ip4 dscp or ip6 priority */
 
-	switch (ar->htt.op_version) {
+	switch (ar->running_fw->fw_file.htt_op_version) {
 	case ATH10K_FW_HTT_OP_VERSION_10_4:
 		ar->htt.t2h_msg_types = htt_10_4_t2h_msg_types;
 		ar->htt.t2h_msg_types_max = HTT_10_4_T2H_NUM_MSGS;
@@ -208,7 +208,7 @@
 	return 0;
 }
 
-#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)
+#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
 
 static int ath10k_htt_verify_version(struct ath10k_htt *htt)
 {
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 13391ea..911c535 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -22,6 +22,7 @@
 #include <linux/interrupt.h>
 #include <linux/dmapool.h>
 #include <linux/hashtable.h>
+#include <linux/kfifo.h>
 #include <net/mac80211.h>
 
 #include "htc.h"
@@ -1461,15 +1462,23 @@
 	struct htt_tx_mode_switch_record records[0];
 } __packed;
 
+struct htt_channel_change {
+	u8 pad[3];
+	__le32 freq;
+	__le32 center_freq1;
+	__le32 center_freq2;
+	__le32 phymode;
+} __packed;
+
 union htt_rx_pn_t {
 	/* WEP: 24-bit PN */
 	u32 pn24;
 
 	/* TKIP or CCMP: 48-bit PN */
-	u_int64_t pn48;
+	u64 pn48;
 
 	/* WAPI: 128-bit PN */
-	u_int64_t pn128[2];
+	u64 pn128[2];
 };
 
 struct htt_cmd {
@@ -1511,16 +1520,22 @@
 		struct htt_tx_fetch_ind tx_fetch_ind;
 		struct htt_tx_fetch_confirm tx_fetch_confirm;
 		struct htt_tx_mode_switch_ind tx_mode_switch_ind;
+		struct htt_channel_change chan_change;
 	};
 } __packed;
 
 /*** host side structures follow ***/
 
 struct htt_tx_done {
-	u32 msdu_id;
-	bool discard;
-	bool no_ack;
-	bool success;
+	u16 msdu_id;
+	u16 status;
+};
+
+enum htt_tx_compl_state {
+	HTT_TX_COMPL_STATE_NONE,
+	HTT_TX_COMPL_STATE_ACK,
+	HTT_TX_COMPL_STATE_NOACK,
+	HTT_TX_COMPL_STATE_DISCARD,
 };
 
 struct htt_peer_map_event {
@@ -1547,7 +1562,6 @@
 	u8 target_version_major;
 	u8 target_version_minor;
 	struct completion target_version_received;
-	enum ath10k_fw_htt_op_version op_version;
 	u8 max_num_amsdu;
 	u8 max_num_ampdu;
 
@@ -1641,17 +1655,20 @@
 	struct idr pending_tx;
 	wait_queue_head_t empty_tx_wq;
 
+	/* FIFO for storing tx done status {ack, no-ack, discard} and msdu id */
+	DECLARE_KFIFO_PTR(txdone_fifo, struct htt_tx_done);
+
 	/* set if host-fw communication goes haywire
 	 * used to avoid further failures */
 	bool rx_confused;
-	struct tasklet_struct rx_replenish_task;
+	atomic_t num_mpdus_ready;
 
 	/* This is used to group tx/rx completions separately and process them
 	 * in batches to reduce cache stalls */
 	struct tasklet_struct txrx_compl_task;
-	struct sk_buff_head tx_compl_q;
 	struct sk_buff_head rx_compl_q;
 	struct sk_buff_head rx_in_ord_compl_q;
+	struct sk_buff_head tx_fetch_ind_q;
 
 	/* rx_status template */
 	struct ieee80211_rx_status rx_status;
@@ -1667,10 +1684,13 @@
 	} txbuf;
 
 	struct {
+		bool enabled;
 		struct htt_q_state *vaddr;
 		dma_addr_t paddr;
+		u16 num_push_allowed;
 		u16 num_peers;
 		u16 num_tids;
+		enum htt_tx_mode_switch_mode mode;
 		enum htt_q_depth_type type;
 	} tx_q_state;
 };
@@ -1715,7 +1735,7 @@
 
 /* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
  * aggregated traffic more nicely. */
-#define ATH10K_HTT_MAX_NUM_REFILL 16
+#define ATH10K_HTT_MAX_NUM_REFILL 100
 
 /*
  * DMA_MAP expects the buffer to be an integral number of cache lines.
@@ -1743,7 +1763,8 @@
 void ath10k_htt_rx_free(struct ath10k_htt *htt);
 
 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
-void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
+bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
 int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
 int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt);
@@ -1752,8 +1773,23 @@
 				u8 max_subfrms_ampdu,
 				u8 max_subfrms_amsdu);
 void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
+			     __le32 token,
+			     __le16 fetch_seq_num,
+			     struct htt_tx_fetch_record *records,
+			     size_t num_records);
 
-void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc);
+void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
+			      struct ieee80211_txq *txq);
+void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
+			      struct ieee80211_txq *txq);
+void ath10k_htt_tx_txq_sync(struct ath10k *ar);
+void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
+int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt);
+void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt);
+int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
+				   bool is_presp);
+
 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index ae9b686..cc979a4 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -31,6 +31,8 @@
 /* when under memory pressure rx ring refill may fail and needs a retry */
 #define HTT_RX_RING_REFILL_RETRY_MS 50
 
+#define HTT_RX_RING_REFILL_RESCHED_MS 5
+
 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
 static void ath10k_htt_txrx_compl_task(unsigned long ptr);
 
@@ -192,7 +194,8 @@
 		mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
 			  msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
 	} else if (num_deficit > 0) {
-		tasklet_schedule(&htt->rx_replenish_task);
+		mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
+			  msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
 	}
 	spin_unlock_bh(&htt->rx_ring.lock);
 }
@@ -223,12 +226,11 @@
 void ath10k_htt_rx_free(struct ath10k_htt *htt)
 {
 	del_timer_sync(&htt->rx_ring.refill_retry_timer);
-	tasklet_kill(&htt->rx_replenish_task);
 	tasklet_kill(&htt->txrx_compl_task);
 
-	skb_queue_purge(&htt->tx_compl_q);
 	skb_queue_purge(&htt->rx_compl_q);
 	skb_queue_purge(&htt->rx_in_ord_compl_q);
+	skb_queue_purge(&htt->tx_fetch_ind_q);
 
 	ath10k_htt_rx_ring_free(htt);
 
@@ -281,7 +283,6 @@
 
 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
-				   u8 **fw_desc, int *fw_desc_len,
 				   struct sk_buff_head *amsdu)
 {
 	struct ath10k *ar = htt->ar;
@@ -323,48 +324,6 @@
 			return -EIO;
 		}
 
-		/*
-		 * Copy the FW rx descriptor for this MSDU from the rx
-		 * indication message into the MSDU's netbuf. HL uses the
-		 * same rx indication message definition as LL, and simply
-		 * appends new info (fields from the HW rx desc, and the
-		 * MSDU payload itself). So, the offset into the rx
-		 * indication message only has to account for the standard
-		 * offset of the per-MSDU FW rx desc info within the
-		 * message, and how many bytes of the per-MSDU FW rx desc
-		 * info have already been consumed. (And the endianness of
-		 * the host, since for a big-endian host, the rx ind
-		 * message contents, including the per-MSDU rx desc bytes,
-		 * were byteswapped during upload.)
-		 */
-		if (*fw_desc_len > 0) {
-			rx_desc->fw_desc.info0 = **fw_desc;
-			/*
-			 * The target is expected to only provide the basic
-			 * per-MSDU rx descriptors. Just to be sure, verify
-			 * that the target has not attached extension data
-			 * (e.g. LRO flow ID).
-			 */
-
-			/* or more, if there's extension data */
-			(*fw_desc)++;
-			(*fw_desc_len)--;
-		} else {
-			/*
-			 * When an oversized AMSDU happened, FW will lost
-			 * some of MSDU status - in this case, the FW
-			 * descriptors provided will be less than the
-			 * actual MSDUs inside this MPDU. Mark the FW
-			 * descriptors so that it will still deliver to
-			 * upper stack, if no CRC error for this MPDU.
-			 *
-			 * FIX THIS - the FW descriptors are actually for
-			 * MSDUs in the end of this A-MSDU instead of the
-			 * beginning.
-			 */
-			rx_desc->fw_desc.info0 = 0;
-		}
-
 		msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
 					& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
 					   RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
@@ -423,13 +382,6 @@
 	return msdu_chaining;
 }
 
-static void ath10k_htt_rx_replenish_task(unsigned long ptr)
-{
-	struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
-
-	ath10k_htt_rx_msdu_buff_replenish(htt);
-}
-
 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
 					       u32 paddr)
 {
@@ -563,12 +515,10 @@
 	htt->rx_ring.sw_rd_idx.msdu_payld = 0;
 	hash_init(htt->rx_ring.skb_table);
 
-	tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
-		     (unsigned long)htt);
-
-	skb_queue_head_init(&htt->tx_compl_q);
 	skb_queue_head_init(&htt->rx_compl_q);
 	skb_queue_head_init(&htt->rx_in_ord_compl_q);
+	skb_queue_head_init(&htt->tx_fetch_ind_q);
+	atomic_set(&htt->num_mpdus_ready, 0);
 
 	tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
 		     (unsigned long)htt);
@@ -860,6 +810,8 @@
 		ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
 	if (!ch)
 		ch = ath10k_htt_rx_h_any_channel(ar);
+	if (!ch)
+		ch = ar->tgt_oper_chan;
 	spin_unlock_bh(&ar->data_lock);
 
 	if (!ch)
@@ -979,7 +931,7 @@
 	*status = *rx_status;
 
 	ath10k_dbg(ar, ATH10K_DBG_DATA,
-		   "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+		   "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
 		   skb,
 		   skb->len,
 		   ieee80211_get_SA(hdr),
@@ -1014,7 +966,7 @@
 	int len = ieee80211_hdrlen(hdr->frame_control);
 
 	if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
-		      ar->fw_features))
+		      ar->running_fw->fw_file.fw_features))
 		len = round_up(len, 4);
 
 	return len;
@@ -1076,20 +1028,25 @@
 	hdr = (void *)msdu->data;
 
 	/* Tail */
-	skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype));
+	if (status->flag & RX_FLAG_IV_STRIPPED)
+		skb_trim(msdu, msdu->len -
+			 ath10k_htt_rx_crypto_tail_len(ar, enctype));
 
 	/* MMIC */
-	if (!ieee80211_has_morefrags(hdr->frame_control) &&
+	if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
+	    !ieee80211_has_morefrags(hdr->frame_control) &&
 	    enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
 		skb_trim(msdu, msdu->len - 8);
 
 	/* Head */
-	hdr_len = ieee80211_hdrlen(hdr->frame_control);
-	crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
+	if (status->flag & RX_FLAG_IV_STRIPPED) {
+		hdr_len = ieee80211_hdrlen(hdr->frame_control);
+		crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
 
-	memmove((void *)msdu->data + crypto_len,
-		(void *)msdu->data, hdr_len);
-	skb_pull(msdu, crypto_len);
+		memmove((void *)msdu->data + crypto_len,
+			(void *)msdu->data, hdr_len);
+		skb_pull(msdu, crypto_len);
+	}
 }
 
 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
@@ -1343,6 +1300,7 @@
 	bool has_tkip_err;
 	bool has_peer_idx_invalid;
 	bool is_decrypted;
+	bool is_mgmt;
 	u32 attention;
 
 	if (skb_queue_empty(amsdu))
@@ -1351,6 +1309,9 @@
 	first = skb_peek(amsdu);
 	rxd = (void *)first->data - sizeof(*rxd);
 
+	is_mgmt = !!(rxd->attention.flags &
+		     __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
+
 	enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
 		     RX_MPDU_START_INFO0_ENCRYPT_TYPE);
 
@@ -1392,6 +1353,7 @@
 			  RX_FLAG_MMIC_ERROR |
 			  RX_FLAG_DECRYPTED |
 			  RX_FLAG_IV_STRIPPED |
+			  RX_FLAG_ONLY_MONITOR |
 			  RX_FLAG_MMIC_STRIPPED);
 
 	if (has_fcs_err)
@@ -1400,10 +1362,21 @@
 	if (has_tkip_err)
 		status->flag |= RX_FLAG_MMIC_ERROR;
 
-	if (is_decrypted)
-		status->flag |= RX_FLAG_DECRYPTED |
-				RX_FLAG_IV_STRIPPED |
-				RX_FLAG_MMIC_STRIPPED;
+	/* Firmware reports all necessary management frames via WMI already.
+	 * They are not reported to monitor interfaces at all so pass the ones
+	 * coming via HTT to monitor interfaces instead. This simplifies
+	 * matters a lot.
+	 */
+	if (is_mgmt)
+		status->flag |= RX_FLAG_ONLY_MONITOR;
+
+	if (is_decrypted) {
+		status->flag |= RX_FLAG_DECRYPTED;
+
+		if (likely(!is_mgmt))
+			status->flag |= RX_FLAG_IV_STRIPPED |
+					RX_FLAG_MMIC_STRIPPED;
+}
 
 	skb_queue_walk(amsdu, msdu) {
 		ath10k_htt_rx_h_csum_offload(msdu);
@@ -1416,6 +1389,8 @@
 		 */
 		if (!is_decrypted)
 			continue;
+		if (is_mgmt)
+			continue;
 
 		hdr = (void *)msdu->data;
 		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
@@ -1516,14 +1491,6 @@
 					struct sk_buff_head *amsdu,
 					struct ieee80211_rx_status *rx_status)
 {
-	struct sk_buff *msdu;
-	struct htt_rx_desc *rxd;
-	bool is_mgmt;
-	bool has_fcs_err;
-
-	msdu = skb_peek(amsdu);
-	rxd = (void *)msdu->data - sizeof(*rxd);
-
 	/* FIXME: It might be a good idea to do some fuzzy-testing to drop
 	 * invalid/dangerous frames.
 	 */
@@ -1533,23 +1500,6 @@
 		return false;
 	}
 
-	is_mgmt = !!(rxd->attention.flags &
-		     __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
-	has_fcs_err = !!(rxd->attention.flags &
-			 __cpu_to_le32(RX_ATTENTION_FLAGS_FCS_ERR));
-
-	/* Management frames are handled via WMI events. The pros of such
-	 * approach is that channel is explicitly provided in WMI events
-	 * whereas HTT doesn't provide channel information for Rxed frames.
-	 *
-	 * However some firmware revisions don't report corrupted frames via
-	 * WMI so don't drop them.
-	 */
-	if (is_mgmt && !has_fcs_err) {
-		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
-		return false;
-	}
-
 	if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
 		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
 		return false;
@@ -1571,25 +1521,49 @@
 	__skb_queue_purge(amsdu);
 }
 
-static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
-				  struct htt_rx_indication *rx)
+static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
 {
 	struct ath10k *ar = htt->ar;
-	struct ieee80211_rx_status *rx_status = &htt->rx_status;
-	struct htt_rx_indication_mpdu_range *mpdu_ranges;
+	static struct ieee80211_rx_status rx_status;
 	struct sk_buff_head amsdu;
+	int ret;
+
+	__skb_queue_head_init(&amsdu);
+
+	spin_lock_bh(&htt->rx_ring.lock);
+	if (htt->rx_confused) {
+		spin_unlock_bh(&htt->rx_ring.lock);
+		return -EIO;
+	}
+	ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
+	spin_unlock_bh(&htt->rx_ring.lock);
+
+	if (ret < 0) {
+		ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
+		__skb_queue_purge(&amsdu);
+		/* FIXME: It's probably a good idea to reboot the
+		 * device instead of leaving it inoperable.
+		 */
+		htt->rx_confused = true;
+		return ret;
+	}
+
+	ath10k_htt_rx_h_ppdu(ar, &amsdu, &rx_status, 0xffff);
+	ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
+	ath10k_htt_rx_h_filter(ar, &amsdu, &rx_status);
+	ath10k_htt_rx_h_mpdu(ar, &amsdu, &rx_status);
+	ath10k_htt_rx_h_deliver(ar, &amsdu, &rx_status);
+
+	return 0;
+}
+
+static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
+				      struct htt_rx_indication *rx)
+{
+	struct ath10k *ar = htt->ar;
+	struct htt_rx_indication_mpdu_range *mpdu_ranges;
 	int num_mpdu_ranges;
-	int fw_desc_len;
-	u8 *fw_desc;
-	int i, ret, mpdu_count = 0;
-
-	lockdep_assert_held(&htt->rx_ring.lock);
-
-	if (htt->rx_confused)
-		return;
-
-	fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
-	fw_desc = (u8 *)&rx->fw_desc;
+	int i, mpdu_count = 0;
 
 	num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
 			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
@@ -1603,80 +1577,19 @@
 	for (i = 0; i < num_mpdu_ranges; i++)
 		mpdu_count += mpdu_ranges[i].mpdu_count;
 
-	while (mpdu_count--) {
-		__skb_queue_head_init(&amsdu);
-		ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc,
-					      &fw_desc_len, &amsdu);
-		if (ret < 0) {
-			ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
-			__skb_queue_purge(&amsdu);
-			/* FIXME: It's probably a good idea to reboot the
-			 * device instead of leaving it inoperable.
-			 */
-			htt->rx_confused = true;
-			break;
-		}
+	atomic_add(mpdu_count, &htt->num_mpdus_ready);
 
-		ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
-		ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
-		ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
-		ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
-		ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
-	}
-
-	tasklet_schedule(&htt->rx_replenish_task);
+	tasklet_schedule(&htt->txrx_compl_task);
 }
 
-static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
-				       struct htt_rx_fragment_indication *frag)
+static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt)
 {
-	struct ath10k *ar = htt->ar;
-	struct ieee80211_rx_status *rx_status = &htt->rx_status;
-	struct sk_buff_head amsdu;
-	int ret;
-	u8 *fw_desc;
-	int fw_desc_len;
+	atomic_inc(&htt->num_mpdus_ready);
 
-	fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
-	fw_desc = (u8 *)frag->fw_msdu_rx_desc;
-
-	__skb_queue_head_init(&amsdu);
-
-	spin_lock_bh(&htt->rx_ring.lock);
-	ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
-				      &amsdu);
-	spin_unlock_bh(&htt->rx_ring.lock);
-
-	tasklet_schedule(&htt->rx_replenish_task);
-
-	ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
-
-	if (ret) {
-		ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
-			    ret);
-		__skb_queue_purge(&amsdu);
-		return;
-	}
-
-	if (skb_queue_len(&amsdu) != 1) {
-		ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n");
-		__skb_queue_purge(&amsdu);
-		return;
-	}
-
-	ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
-	ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
-	ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
-	ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
-
-	if (fw_desc_len > 0) {
-		ath10k_dbg(ar, ATH10K_DBG_HTT,
-			   "expecting more fragmented rx in one indication %d\n",
-			   fw_desc_len);
-	}
+	tasklet_schedule(&htt->txrx_compl_task);
 }
 
-static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
+static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
 				       struct sk_buff *skb)
 {
 	struct ath10k_htt *htt = &ar->htt;
@@ -1688,19 +1601,19 @@
 
 	switch (status) {
 	case HTT_DATA_TX_STATUS_NO_ACK:
-		tx_done.no_ack = true;
+		tx_done.status = HTT_TX_COMPL_STATE_NOACK;
 		break;
 	case HTT_DATA_TX_STATUS_OK:
-		tx_done.success = true;
+		tx_done.status = HTT_TX_COMPL_STATE_ACK;
 		break;
 	case HTT_DATA_TX_STATUS_DISCARD:
 	case HTT_DATA_TX_STATUS_POSTPONE:
 	case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
-		tx_done.discard = true;
+		tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
 		break;
 	default:
 		ath10k_warn(ar, "unhandled tx completion status %d\n", status);
-		tx_done.discard = true;
+		tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
 		break;
 	}
 
@@ -1710,7 +1623,20 @@
 	for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
 		msdu_id = resp->data_tx_completion.msdus[i];
 		tx_done.msdu_id = __le16_to_cpu(msdu_id);
-		ath10k_txrx_tx_unref(htt, &tx_done);
+
+		/* kfifo_put: In practice firmware shouldn't fire off per-CE
+		 * interrupt and main interrupt (MSI/-X range case) for the same
+		 * HTC service so it should be safe to use kfifo_put w/o lock.
+		 *
+		 * From kfifo_put() documentation:
+		 *  Note that with only one concurrent reader and one concurrent
+		 *  writer, you don't need extra locking to use these macro.
+		 */
+		if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
+			ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
+				    tx_done.msdu_id, tx_done.status);
+			ath10k_txrx_tx_unref(htt, &tx_done);
+		}
 	}
 }
 
@@ -1978,11 +1904,324 @@
 			return;
 		}
 	}
-
-	tasklet_schedule(&htt->rx_replenish_task);
+	ath10k_htt_rx_msdu_buff_replenish(htt);
 }
 
-void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
+static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
+						   const __le32 *resp_ids,
+						   int num_resp_ids)
+{
+	int i;
+	u32 resp_id;
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
+		   num_resp_ids);
+
+	for (i = 0; i < num_resp_ids; i++) {
+		resp_id = le32_to_cpu(resp_ids[i]);
+
+		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
+			   resp_id);
+
+		/* TODO: free resp_id */
+	}
+}
+
+static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct ieee80211_hw *hw = ar->hw;
+	struct ieee80211_txq *txq;
+	struct htt_resp *resp = (struct htt_resp *)skb->data;
+	struct htt_tx_fetch_record *record;
+	size_t len;
+	size_t max_num_bytes;
+	size_t max_num_msdus;
+	size_t num_bytes;
+	size_t num_msdus;
+	const __le32 *resp_ids;
+	u16 num_records;
+	u16 num_resp_ids;
+	u16 peer_id;
+	u8 tid;
+	int ret;
+	int i;
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
+
+	len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
+	if (unlikely(skb->len < len)) {
+		ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
+		return;
+	}
+
+	num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
+	num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
+
+	len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
+	len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
+
+	if (unlikely(skb->len < len)) {
+		ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
+		return;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
+		   num_records, num_resp_ids,
+		   le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
+
+	if (!ar->htt.tx_q_state.enabled) {
+		ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
+		return;
+	}
+
+	if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
+		ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
+		return;
+	}
+
+	rcu_read_lock();
+
+	for (i = 0; i < num_records; i++) {
+		record = &resp->tx_fetch_ind.records[i];
+		peer_id = MS(le16_to_cpu(record->info),
+			     HTT_TX_FETCH_RECORD_INFO_PEER_ID);
+		tid = MS(le16_to_cpu(record->info),
+			 HTT_TX_FETCH_RECORD_INFO_TID);
+		max_num_msdus = le16_to_cpu(record->num_msdus);
+		max_num_bytes = le32_to_cpu(record->num_bytes);
+
+		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
+			   i, peer_id, tid, max_num_msdus, max_num_bytes);
+
+		if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
+		    unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
+			ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
+				    peer_id, tid);
+			continue;
+		}
+
+		spin_lock_bh(&ar->data_lock);
+		txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
+		spin_unlock_bh(&ar->data_lock);
+
+		/* It is okay to release the lock and use txq because RCU read
+		 * lock is held.
+		 */
+
+		if (unlikely(!txq)) {
+			ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
+				    peer_id, tid);
+			continue;
+		}
+
+		num_msdus = 0;
+		num_bytes = 0;
+
+		while (num_msdus < max_num_msdus &&
+		       num_bytes < max_num_bytes) {
+			ret = ath10k_mac_tx_push_txq(hw, txq);
+			if (ret < 0)
+				break;
+
+			num_msdus++;
+			num_bytes += ret;
+		}
+
+		record->num_msdus = cpu_to_le16(num_msdus);
+		record->num_bytes = cpu_to_le32(num_bytes);
+
+		ath10k_htt_tx_txq_recalc(hw, txq);
+	}
+
+	rcu_read_unlock();
+
+	resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
+	ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
+
+	ret = ath10k_htt_tx_fetch_resp(ar,
+				       resp->tx_fetch_ind.token,
+				       resp->tx_fetch_ind.fetch_seq_num,
+				       resp->tx_fetch_ind.records,
+				       num_records);
+	if (unlikely(ret)) {
+		ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
+			    le32_to_cpu(resp->tx_fetch_ind.token), ret);
+		/* FIXME: request fw restart */
+	}
+
+	ath10k_htt_tx_txq_sync(ar);
+}
+
+static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
+					   struct sk_buff *skb)
+{
+	const struct htt_resp *resp = (void *)skb->data;
+	size_t len;
+	int num_resp_ids;
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
+
+	len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
+	if (unlikely(skb->len < len)) {
+		ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
+		return;
+	}
+
+	num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
+	len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
+
+	if (unlikely(skb->len < len)) {
+		ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
+		return;
+	}
+
+	ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
+					       resp->tx_fetch_confirm.resp_ids,
+					       num_resp_ids);
+}
+
+static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
+					     struct sk_buff *skb)
+{
+	const struct htt_resp *resp = (void *)skb->data;
+	const struct htt_tx_mode_switch_record *record;
+	struct ieee80211_txq *txq;
+	struct ath10k_txq *artxq;
+	size_t len;
+	size_t num_records;
+	enum htt_tx_mode_switch_mode mode;
+	bool enable;
+	u16 info0;
+	u16 info1;
+	u16 threshold;
+	u16 peer_id;
+	u8 tid;
+	int i;
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
+
+	len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
+	if (unlikely(skb->len < len)) {
+		ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
+		return;
+	}
+
+	info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
+	info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
+
+	enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
+	num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
+	mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
+	threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT,
+		   "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
+		   info0, info1, enable, num_records, mode, threshold);
+
+	len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
+
+	if (unlikely(skb->len < len)) {
+		ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
+		return;
+	}
+
+	switch (mode) {
+	case HTT_TX_MODE_SWITCH_PUSH:
+	case HTT_TX_MODE_SWITCH_PUSH_PULL:
+		break;
+	default:
+		ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
+			    mode);
+		return;
+	}
+
+	if (!enable)
+		return;
+
+	ar->htt.tx_q_state.enabled = enable;
+	ar->htt.tx_q_state.mode = mode;
+	ar->htt.tx_q_state.num_push_allowed = threshold;
+
+	rcu_read_lock();
+
+	for (i = 0; i < num_records; i++) {
+		record = &resp->tx_mode_switch_ind.records[i];
+		info0 = le16_to_cpu(record->info0);
+		peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
+		tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
+
+		if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
+		    unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
+			ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
+				    peer_id, tid);
+			continue;
+		}
+
+		spin_lock_bh(&ar->data_lock);
+		txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
+		spin_unlock_bh(&ar->data_lock);
+
+		/* It is okay to release the lock and use txq because RCU read
+		 * lock is held.
+		 */
+
+		if (unlikely(!txq)) {
+			ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
+				    peer_id, tid);
+			continue;
+		}
+
+		spin_lock_bh(&ar->htt.tx_lock);
+		artxq = (void *)txq->drv_priv;
+		artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
+		spin_unlock_bh(&ar->htt.tx_lock);
+	}
+
+	rcu_read_unlock();
+
+	ath10k_mac_tx_push_pending(ar);
+}
+
+static inline enum nl80211_band phy_mode_to_band(u32 phy_mode)
+{
+	enum nl80211_band band;
+
+	switch (phy_mode) {
+	case MODE_11A:
+	case MODE_11NA_HT20:
+	case MODE_11NA_HT40:
+	case MODE_11AC_VHT20:
+	case MODE_11AC_VHT40:
+	case MODE_11AC_VHT80:
+		band = NL80211_BAND_5GHZ;
+		break;
+	case MODE_11G:
+	case MODE_11B:
+	case MODE_11GONLY:
+	case MODE_11NG_HT20:
+	case MODE_11NG_HT40:
+	case MODE_11AC_VHT20_2G:
+	case MODE_11AC_VHT40_2G:
+	case MODE_11AC_VHT80_2G:
+	default:
+		band = NL80211_BAND_2GHZ;
+	}
+
+	return band;
+}
+
+void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
+{
+	bool release;
+
+	release = ath10k_htt_t2h_msg_handler(ar, skb);
+
+	/* Free the indication buffer */
+	if (release)
+		dev_kfree_skb_any(skb);
+}
+
+bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
 {
 	struct ath10k_htt *htt = &ar->htt;
 	struct htt_resp *resp = (struct htt_resp *)skb->data;
@@ -1998,8 +2237,7 @@
 	if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
 		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
 			   resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
-		dev_kfree_skb_any(skb);
-		return;
+		return true;
 	}
 	type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
 
@@ -2011,9 +2249,8 @@
 		break;
 	}
 	case HTT_T2H_MSG_TYPE_RX_IND:
-		skb_queue_tail(&htt->rx_compl_q, skb);
-		tasklet_schedule(&htt->txrx_compl_task);
-		return;
+		ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
+		break;
 	case HTT_T2H_MSG_TYPE_PEER_MAP: {
 		struct htt_peer_map_event ev = {
 			.vdev_id = resp->peer_map.vdev_id,
@@ -2034,28 +2271,33 @@
 		struct htt_tx_done tx_done = {};
 		int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
 
-		tx_done.msdu_id =
-			__le32_to_cpu(resp->mgmt_tx_completion.desc_id);
+		tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
 
 		switch (status) {
 		case HTT_MGMT_TX_STATUS_OK:
-			tx_done.success = true;
+			tx_done.status = HTT_TX_COMPL_STATE_ACK;
 			break;
 		case HTT_MGMT_TX_STATUS_RETRY:
-			tx_done.no_ack = true;
+			tx_done.status = HTT_TX_COMPL_STATE_NOACK;
 			break;
 		case HTT_MGMT_TX_STATUS_DROP:
-			tx_done.discard = true;
+			tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
 			break;
 		}
 
-		ath10k_txrx_tx_unref(htt, &tx_done);
+		status = ath10k_txrx_tx_unref(htt, &tx_done);
+		if (!status) {
+			spin_lock_bh(&htt->tx_lock);
+			ath10k_htt_tx_mgmt_dec_pending(htt);
+			spin_unlock_bh(&htt->tx_lock);
+		}
+		ath10k_mac_tx_push_pending(ar);
 		break;
 	}
 	case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
-		skb_queue_tail(&htt->tx_compl_q, skb);
+		ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
 		tasklet_schedule(&htt->txrx_compl_task);
-		return;
+		break;
 	case HTT_T2H_MSG_TYPE_SEC_IND: {
 		struct ath10k *ar = htt->ar;
 		struct htt_security_indication *ev = &resp->security_indication;
@@ -2071,7 +2313,7 @@
 	case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
 		ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
 				skb->data, skb->len);
-		ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
+		ath10k_htt_rx_frag_handler(htt);
 		break;
 	}
 	case HTT_T2H_MSG_TYPE_TEST:
@@ -2111,18 +2353,39 @@
 	case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
 		skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
 		tasklet_schedule(&htt->txrx_compl_task);
-		return;
+		return false;
 	}
 	case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
 		break;
-	case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
+	case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
+		u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
+		u32 freq = __le32_to_cpu(resp->chan_change.freq);
+
+		ar->tgt_oper_chan =
+			__ieee80211_get_channel(ar->hw->wiphy, freq);
+		ath10k_dbg(ar, ATH10K_DBG_HTT,
+			   "htt chan change freq %u phymode %s\n",
+			   freq, ath10k_wmi_phymode_str(phymode));
 		break;
+	}
 	case HTT_T2H_MSG_TYPE_AGGR_CONF:
 		break;
-	case HTT_T2H_MSG_TYPE_TX_FETCH_IND:
+	case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
+		struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
+
+		if (!tx_fetch_ind) {
+			ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
+			break;
+		}
+		skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
+		tasklet_schedule(&htt->txrx_compl_task);
+		break;
+	}
 	case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
+		ath10k_htt_rx_tx_fetch_confirm(ar, skb);
+		break;
 	case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
-		/* TODO: Implement pull-push logic */
+		ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
 		break;
 	case HTT_T2H_MSG_TYPE_EN_STATS:
 	default:
@@ -2132,9 +2395,7 @@
 				skb->data, skb->len);
 		break;
 	};
-
-	/* Free the indication buffer */
-	dev_kfree_skb_any(skb);
+	return true;
 }
 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
 
@@ -2150,40 +2411,47 @@
 {
 	struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
 	struct ath10k *ar = htt->ar;
-	struct sk_buff_head tx_q;
-	struct sk_buff_head rx_q;
+	struct htt_tx_done tx_done = {};
 	struct sk_buff_head rx_ind_q;
-	struct htt_resp *resp;
+	struct sk_buff_head tx_ind_q;
 	struct sk_buff *skb;
 	unsigned long flags;
+	int num_mpdus;
 
-	__skb_queue_head_init(&tx_q);
-	__skb_queue_head_init(&rx_q);
 	__skb_queue_head_init(&rx_ind_q);
-
-	spin_lock_irqsave(&htt->tx_compl_q.lock, flags);
-	skb_queue_splice_init(&htt->tx_compl_q, &tx_q);
-	spin_unlock_irqrestore(&htt->tx_compl_q.lock, flags);
-
-	spin_lock_irqsave(&htt->rx_compl_q.lock, flags);
-	skb_queue_splice_init(&htt->rx_compl_q, &rx_q);
-	spin_unlock_irqrestore(&htt->rx_compl_q.lock, flags);
+	__skb_queue_head_init(&tx_ind_q);
 
 	spin_lock_irqsave(&htt->rx_in_ord_compl_q.lock, flags);
 	skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q);
 	spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags);
 
-	while ((skb = __skb_dequeue(&tx_q))) {
-		ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
+	spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
+	skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
+	spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
+
+	/* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
+	 * From kfifo_get() documentation:
+	 *  Note that with only one concurrent reader and one concurrent writer,
+	 *  you don't need extra locking to use these macro.
+	 */
+	while (kfifo_get(&htt->txdone_fifo, &tx_done))
+		ath10k_txrx_tx_unref(htt, &tx_done);
+
+	while ((skb = __skb_dequeue(&tx_ind_q))) {
+		ath10k_htt_rx_tx_fetch_ind(ar, skb);
 		dev_kfree_skb_any(skb);
 	}
 
-	while ((skb = __skb_dequeue(&rx_q))) {
-		resp = (struct htt_resp *)skb->data;
-		spin_lock_bh(&htt->rx_ring.lock);
-		ath10k_htt_rx_handler(htt, &resp->rx_ind);
-		spin_unlock_bh(&htt->rx_ring.lock);
-		dev_kfree_skb_any(skb);
+	ath10k_mac_tx_push_pending(ar);
+
+	num_mpdus = atomic_read(&htt->num_mpdus_ready);
+
+	while (num_mpdus) {
+		if (ath10k_htt_rx_handle_amsdu(htt))
+			break;
+
+		num_mpdus--;
+		atomic_dec(&htt->num_mpdus_ready);
 	}
 
 	while ((skb = __skb_dequeue(&rx_ind_q))) {
@@ -2192,4 +2460,6 @@
 		spin_unlock_bh(&htt->rx_ring.lock);
 		dev_kfree_skb_any(skb);
 	}
+
+	ath10k_htt_rx_msdu_buff_replenish(htt);
 }
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 95acb72..6269c61 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -22,53 +22,183 @@
 #include "txrx.h"
 #include "debug.h"
 
-void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc)
+static u8 ath10k_htt_tx_txq_calc_size(size_t count)
 {
-	if (limit_mgmt_desc)
-		htt->num_pending_mgmt_tx--;
+	int exp;
+	int factor;
+
+	exp = 0;
+	factor = count >> 7;
+
+	while (factor >= 64 && exp < 4) {
+		factor >>= 3;
+		exp++;
+	}
+
+	if (exp == 4)
+		return 0xff;
+
+	if (count > 0)
+		factor = max(1, factor);
+
+	return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) |
+	       SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR);
+}
+
+static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
+				       struct ieee80211_txq *txq)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_sta *arsta = (void *)txq->sta->drv_priv;
+	struct ath10k_vif *arvif = (void *)txq->vif->drv_priv;
+	unsigned long frame_cnt;
+	unsigned long byte_cnt;
+	int idx;
+	u32 bit;
+	u16 peer_id;
+	u8 tid;
+	u8 count;
+
+	lockdep_assert_held(&ar->htt.tx_lock);
+
+	if (!ar->htt.tx_q_state.enabled)
+		return;
+
+	if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
+		return;
+
+	if (txq->sta)
+		peer_id = arsta->peer_id;
+	else
+		peer_id = arvif->peer_id;
+
+	tid = txq->tid;
+	bit = BIT(peer_id % 32);
+	idx = peer_id / 32;
+
+	ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
+	count = ath10k_htt_tx_txq_calc_size(byte_cnt);
+
+	if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
+	    unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
+		ath10k_warn(ar, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n",
+			    peer_id, tid);
+		return;
+	}
+
+	ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count;
+	ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit;
+	ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0;
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n",
+		   peer_id, tid, count);
+}
+
+static void __ath10k_htt_tx_txq_sync(struct ath10k *ar)
+{
+	u32 seq;
+	size_t size;
+
+	lockdep_assert_held(&ar->htt.tx_lock);
+
+	if (!ar->htt.tx_q_state.enabled)
+		return;
+
+	if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
+		return;
+
+	seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq);
+	seq++;
+	ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq);
+
+	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n",
+		   seq);
+
+	size = sizeof(*ar->htt.tx_q_state.vaddr);
+	dma_sync_single_for_device(ar->dev,
+				   ar->htt.tx_q_state.paddr,
+				   size,
+				   DMA_TO_DEVICE);
+}
+
+void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
+			      struct ieee80211_txq *txq)
+{
+	struct ath10k *ar = hw->priv;
+
+	spin_lock_bh(&ar->htt.tx_lock);
+	__ath10k_htt_tx_txq_recalc(hw, txq);
+	spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+void ath10k_htt_tx_txq_sync(struct ath10k *ar)
+{
+	spin_lock_bh(&ar->htt.tx_lock);
+	__ath10k_htt_tx_txq_sync(ar);
+	spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
+			      struct ieee80211_txq *txq)
+{
+	struct ath10k *ar = hw->priv;
+
+	spin_lock_bh(&ar->htt.tx_lock);
+	__ath10k_htt_tx_txq_recalc(hw, txq);
+	__ath10k_htt_tx_txq_sync(ar);
+	spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
+{
+	lockdep_assert_held(&htt->tx_lock);
 
 	htt->num_pending_tx--;
 	if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
 		ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
 }
 
-static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt,
-				      bool limit_mgmt_desc)
+int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
 {
-	spin_lock_bh(&htt->tx_lock);
-	__ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
-	spin_unlock_bh(&htt->tx_lock);
-}
+	lockdep_assert_held(&htt->tx_lock);
 
-static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt,
-				     bool limit_mgmt_desc, bool is_probe_resp)
-{
-	struct ath10k *ar = htt->ar;
-	int ret = 0;
-
-	spin_lock_bh(&htt->tx_lock);
-
-	if (htt->num_pending_tx >= htt->max_num_pending_tx) {
-		ret = -EBUSY;
-		goto exit;
-	}
-
-	if (limit_mgmt_desc) {
-		if (is_probe_resp && (htt->num_pending_mgmt_tx >
-		    ar->hw_params.max_probe_resp_desc_thres)) {
-			ret = -EBUSY;
-			goto exit;
-		}
-		htt->num_pending_mgmt_tx++;
-	}
+	if (htt->num_pending_tx >= htt->max_num_pending_tx)
+		return -EBUSY;
 
 	htt->num_pending_tx++;
 	if (htt->num_pending_tx == htt->max_num_pending_tx)
 		ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
 
-exit:
-	spin_unlock_bh(&htt->tx_lock);
-	return ret;
+	return 0;
+}
+
+int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
+				   bool is_presp)
+{
+	struct ath10k *ar = htt->ar;
+
+	lockdep_assert_held(&htt->tx_lock);
+
+	if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres)
+		return 0;
+
+	if (is_presp &&
+	    ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx)
+		return -EBUSY;
+
+	htt->num_pending_mgmt_tx++;
+
+	return 0;
+}
+
+void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt)
+{
+	lockdep_assert_held(&htt->tx_lock);
+
+	if (!htt->ar->hw_params.max_probe_resp_desc_thres)
+		return;
+
+	htt->num_pending_mgmt_tx--;
 }
 
 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
@@ -137,7 +267,8 @@
 	struct ath10k *ar = htt->ar;
 	size_t size;
 
-	if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features))
+	if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+		      ar->running_fw->fw_file.fw_features))
 		return;
 
 	size = sizeof(*htt->tx_q_state.vaddr);
@@ -152,7 +283,8 @@
 	size_t size;
 	int ret;
 
-	if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features))
+	if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+		      ar->running_fw->fw_file.fw_features))
 		return 0;
 
 	htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS;
@@ -209,8 +341,18 @@
 		goto free_frag_desc;
 	}
 
+	size = roundup_pow_of_two(htt->max_num_pending_tx);
+	ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL);
+	if (ret) {
+		ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret);
+		goto free_txq;
+	}
+
 	return 0;
 
+free_txq:
+	ath10k_htt_tx_free_txq(htt);
+
 free_frag_desc:
 	ath10k_htt_tx_free_cont_frag_desc(htt);
 
@@ -234,8 +376,8 @@
 
 	ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
 
-	tx_done.discard = 1;
 	tx_done.msdu_id = msdu_id;
+	tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
 
 	ath10k_txrx_tx_unref(htt, &tx_done);
 
@@ -258,6 +400,8 @@
 
 	ath10k_htt_tx_free_txq(htt);
 	ath10k_htt_tx_free_cont_frag_desc(htt);
+	WARN_ON(!kfifo_is_empty(&htt->txdone_fifo));
+	kfifo_free(&htt->txdone_fifo);
 }
 
 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
@@ -371,7 +515,8 @@
 	info |= SM(htt->tx_q_state.type,
 		   HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
 
-	if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features))
+	if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+		     ar->running_fw->fw_file.fw_features))
 		info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
 
 	cfg = &cmd->frag_desc_bank_cfg;
@@ -535,6 +680,55 @@
 	return 0;
 }
 
+int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
+			     __le32 token,
+			     __le16 fetch_seq_num,
+			     struct htt_tx_fetch_record *records,
+			     size_t num_records)
+{
+	struct sk_buff *skb;
+	struct htt_cmd *cmd;
+	const u16 resp_id = 0;
+	int len = 0;
+	int ret;
+
+	/* Response IDs are echo-ed back only for host driver convienence
+	 * purposes. They aren't used for anything in the driver yet so use 0.
+	 */
+
+	len += sizeof(cmd->hdr);
+	len += sizeof(cmd->tx_fetch_resp);
+	len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records;
+
+	skb = ath10k_htc_alloc_skb(ar, len);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, len);
+	cmd = (struct htt_cmd *)skb->data;
+	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP;
+	cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id);
+	cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num;
+	cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records);
+	cmd->tx_fetch_resp.token = token;
+
+	memcpy(cmd->tx_fetch_resp.records, records,
+	       sizeof(records[0]) * num_records);
+
+	ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb);
+	if (ret) {
+		ath10k_warn(ar, "failed to submit htc command: %d\n", ret);
+		goto err_free_skb;
+	}
+
+	return 0;
+
+err_free_skb:
+	dev_kfree_skb_any(skb);
+
+	return ret;
+}
+
 static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb)
 {
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -576,20 +770,6 @@
 	int msdu_id = -1;
 	int res;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
-	bool limit_mgmt_desc = false;
-	bool is_probe_resp = false;
-
-	if (ar->hw_params.max_probe_resp_desc_thres) {
-		limit_mgmt_desc = true;
-
-		if (ieee80211_is_probe_resp(hdr->frame_control))
-			is_probe_resp = true;
-	}
-
-	res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
-
-	if (res)
-		goto err;
 
 	len += sizeof(cmd->hdr);
 	len += sizeof(cmd->mgmt_tx);
@@ -598,7 +778,7 @@
 	res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
 	spin_unlock_bh(&htt->tx_lock);
 	if (res < 0)
-		goto err_tx_dec;
+		goto err;
 
 	msdu_id = res;
 
@@ -649,8 +829,6 @@
 	spin_lock_bh(&htt->tx_lock);
 	ath10k_htt_tx_free_msdu_id(htt, msdu_id);
 	spin_unlock_bh(&htt->tx_lock);
-err_tx_dec:
-	ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
 err:
 	return res;
 }
@@ -677,26 +855,12 @@
 	u32 frags_paddr = 0;
 	u32 txbuf_paddr;
 	struct htt_msdu_ext_desc *ext_desc = NULL;
-	bool limit_mgmt_desc = false;
-	bool is_probe_resp = false;
-
-	if (unlikely(ieee80211_is_mgmt(hdr->frame_control)) &&
-	    ar->hw_params.max_probe_resp_desc_thres) {
-		limit_mgmt_desc = true;
-
-		if (ieee80211_is_probe_resp(hdr->frame_control))
-			is_probe_resp = true;
-	}
-
-	res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
-	if (res)
-		goto err;
 
 	spin_lock_bh(&htt->tx_lock);
 	res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
 	spin_unlock_bh(&htt->tx_lock);
 	if (res < 0)
-		goto err_tx_dec;
+		goto err;
 
 	msdu_id = res;
 
@@ -862,11 +1026,7 @@
 err_unmap_msdu:
 	dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
 err_free_msdu_id:
-	spin_lock_bh(&htt->tx_lock);
 	ath10k_htt_tx_free_msdu_id(htt, msdu_id);
-	spin_unlock_bh(&htt->tx_lock);
-err_tx_dec:
-	ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
 err:
 	return res;
 }
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index f0cfbc7..aedd898 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -35,8 +35,6 @@
 #define QCA988X_HW_2_0_VERSION		0x4100016c
 #define QCA988X_HW_2_0_CHIP_ID_REV	0x2
 #define QCA988X_HW_2_0_FW_DIR		ATH10K_FW_DIR "/QCA988X/hw2.0"
-#define QCA988X_HW_2_0_FW_FILE		"firmware.bin"
-#define QCA988X_HW_2_0_OTP_FILE		"otp.bin"
 #define QCA988X_HW_2_0_BOARD_DATA_FILE	"board.bin"
 #define QCA988X_HW_2_0_PATCH_LOAD_ADDR	0x1234
 
@@ -76,14 +74,10 @@
 };
 
 #define QCA6174_HW_2_1_FW_DIR		"ath10k/QCA6174/hw2.1"
-#define QCA6174_HW_2_1_FW_FILE		"firmware.bin"
-#define QCA6174_HW_2_1_OTP_FILE		"otp.bin"
 #define QCA6174_HW_2_1_BOARD_DATA_FILE	"board.bin"
 #define QCA6174_HW_2_1_PATCH_LOAD_ADDR	0x1234
 
 #define QCA6174_HW_3_0_FW_DIR		"ath10k/QCA6174/hw3.0"
-#define QCA6174_HW_3_0_FW_FILE		"firmware.bin"
-#define QCA6174_HW_3_0_OTP_FILE		"otp.bin"
 #define QCA6174_HW_3_0_BOARD_DATA_FILE	"board.bin"
 #define QCA6174_HW_3_0_PATCH_LOAD_ADDR	0x1234
 
@@ -94,23 +88,17 @@
 #define QCA99X0_HW_2_0_DEV_VERSION     0x01000000
 #define QCA99X0_HW_2_0_CHIP_ID_REV     0x1
 #define QCA99X0_HW_2_0_FW_DIR          ATH10K_FW_DIR "/QCA99X0/hw2.0"
-#define QCA99X0_HW_2_0_FW_FILE         "firmware.bin"
-#define QCA99X0_HW_2_0_OTP_FILE        "otp.bin"
 #define QCA99X0_HW_2_0_BOARD_DATA_FILE "board.bin"
 #define QCA99X0_HW_2_0_PATCH_LOAD_ADDR	0x1234
 
 /* QCA9377 1.0 definitions */
 #define QCA9377_HW_1_0_FW_DIR          ATH10K_FW_DIR "/QCA9377/hw1.0"
-#define QCA9377_HW_1_0_FW_FILE         "firmware.bin"
-#define QCA9377_HW_1_0_OTP_FILE        "otp.bin"
 #define QCA9377_HW_1_0_BOARD_DATA_FILE "board.bin"
 #define QCA9377_HW_1_0_PATCH_LOAD_ADDR	0x1234
 
 /* QCA4019 1.0 definitions */
 #define QCA4019_HW_1_0_DEV_VERSION     0x01000000
 #define QCA4019_HW_1_0_FW_DIR          ATH10K_FW_DIR "/QCA4019/hw1.0"
-#define QCA4019_HW_1_0_FW_FILE         "firmware.bin"
-#define QCA4019_HW_1_0_OTP_FILE        "otp.bin"
 #define QCA4019_HW_1_0_BOARD_DATA_FILE "board.bin"
 #define QCA4019_HW_1_0_PATCH_LOAD_ADDR  0x1234
 
@@ -134,8 +122,6 @@
 
 #define REG_DUMP_COUNT_QCA988X 60
 
-#define QCA988X_CAL_DATA_LEN		2116
-
 struct ath10k_fw_ie {
 	__le32 id;
 	__le32 len;
@@ -431,10 +417,14 @@
 #define TARGET_10_4_ACTIVE_PEERS		0
 
 #define TARGET_10_4_NUM_QCACHE_PEERS_MAX	512
+#define TARGET_10_4_QCACHE_ACTIVE_PEERS		50
+#define TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC	35
 #define TARGET_10_4_NUM_OFFLOAD_PEERS		0
 #define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS	0
 #define TARGET_10_4_NUM_PEER_KEYS		2
 #define TARGET_10_4_TGT_NUM_TIDS		((TARGET_10_4_NUM_PEERS) * 2)
+#define TARGET_10_4_NUM_MSDU_DESC		(1024 + 400)
+#define TARGET_10_4_NUM_MSDU_DESC_PFC		2500
 #define TARGET_10_4_AST_SKID_LIMIT		32
 
 /* 100 ms for video, best-effort, and background */
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 78999c9..0e24f9e 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -157,6 +157,26 @@
 	return 1;
 }
 
+int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val)
+{
+	enum wmi_host_platform_type platform_type;
+	int ret;
+
+	if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map))
+		platform_type = WMI_HOST_PLATFORM_LOW_PERF;
+	else
+		platform_type = WMI_HOST_PLATFORM_HIGH_PERF;
+
+	ret = ath10k_wmi_ext_resource_config(ar, platform_type, val);
+
+	if (ret && ret != -EOPNOTSUPP) {
+		ath10k_warn(ar, "failed to configure ext resource: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
 /**********/
 /* Crypto */
 /**********/
@@ -449,10 +469,10 @@
 	lockdep_assert_held(&ar->conf_mutex);
 
 	list_for_each_entry(peer, &ar->peers, list) {
-		if (!memcmp(peer->addr, arvif->vif->addr, ETH_ALEN))
+		if (ether_addr_equal(peer->addr, arvif->vif->addr))
 			continue;
 
-		if (!memcmp(peer->addr, arvif->bssid, ETH_ALEN))
+		if (ether_addr_equal(peer->addr, arvif->bssid))
 			continue;
 
 		if (peer->keys[key->keyidx] == key)
@@ -482,7 +502,7 @@
 	enum wmi_phy_mode phymode = MODE_UNKNOWN;
 
 	switch (chandef->chan->band) {
-	case IEEE80211_BAND_2GHZ:
+	case NL80211_BAND_2GHZ:
 		switch (chandef->width) {
 		case NL80211_CHAN_WIDTH_20_NOHT:
 			if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
@@ -505,7 +525,7 @@
 			break;
 		}
 		break;
-	case IEEE80211_BAND_5GHZ:
+	case NL80211_BAND_5GHZ:
 		switch (chandef->width) {
 		case NL80211_CHAN_WIDTH_20_NOHT:
 			phymode = MODE_11A;
@@ -618,10 +638,15 @@
 	*def = &conf->def;
 }
 
-static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr,
+static int ath10k_peer_create(struct ath10k *ar,
+			      struct ieee80211_vif *vif,
+			      struct ieee80211_sta *sta,
+			      u32 vdev_id,
+			      const u8 *addr,
 			      enum wmi_peer_type peer_type)
 {
 	struct ath10k_vif *arvif;
+	struct ath10k_peer *peer;
 	int num_peers = 0;
 	int ret;
 
@@ -650,6 +675,22 @@
 		return ret;
 	}
 
+	spin_lock_bh(&ar->data_lock);
+
+	peer = ath10k_peer_find(ar, vdev_id, addr);
+	if (!peer) {
+		ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
+			    addr, vdev_id);
+		ath10k_wmi_peer_delete(ar, vdev_id, addr);
+		spin_unlock_bh(&ar->data_lock);
+		return -ENOENT;
+	}
+
+	peer->vif = vif;
+	peer->sta = sta;
+
+	spin_unlock_bh(&ar->data_lock);
+
 	ar->num_peers++;
 
 	return 0;
@@ -731,6 +772,7 @@
 static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
 {
 	struct ath10k_peer *peer, *tmp;
+	int peer_id;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
@@ -742,6 +784,11 @@
 		ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
 			    peer->addr, vdev_id);
 
+		for_each_set_bit(peer_id, peer->peer_ids,
+				 ATH10K_MAX_NUM_PEER_IDS) {
+			ar->peer_map[peer_id] = NULL;
+		}
+
 		list_del(&peer->list);
 		kfree(peer);
 		ar->num_peers--;
@@ -1725,7 +1772,7 @@
 
 	if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
 	    !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
-		      ar->fw_features)) {
+		      ar->running_fw->fw_file.fw_features)) {
 		ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
 			    arvif->vdev_id);
 		enable_ps = false;
@@ -2013,7 +2060,8 @@
 	}
 
 	if (sta->mfp &&
-	    test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT, ar->fw_features)) {
+	    test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT,
+		     ar->running_fw->fw_file.fw_features)) {
 		arg->peer_flags |= ar->wmi.peer_flags->pmf;
 	}
 }
@@ -2028,7 +2076,7 @@
 	struct cfg80211_chan_def def;
 	const struct ieee80211_supported_band *sband;
 	const struct ieee80211_rate *rates;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	u32 ratemask;
 	u8 rate;
 	int i;
@@ -2088,7 +2136,7 @@
 	const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
 	struct cfg80211_chan_def def;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	const u8 *ht_mcs_mask;
 	const u16 *vht_mcs_mask;
 	int i, n;
@@ -2312,7 +2360,7 @@
 	const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
 	struct cfg80211_chan_def def;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	const u16 *vht_mcs_mask;
 	u8 ampdu_factor;
 
@@ -2330,7 +2378,7 @@
 
 	arg->peer_flags |= ar->wmi.peer_flags->vht;
 
-	if (def.chan->band == IEEE80211_BAND_2GHZ)
+	if (def.chan->band == NL80211_BAND_2GHZ)
 		arg->peer_flags |= ar->wmi.peer_flags->vht_2g;
 
 	arg->peer_vht_caps = vht_cap->cap;
@@ -2399,7 +2447,7 @@
 
 static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
 {
-	return sta->supp_rates[IEEE80211_BAND_2GHZ] >>
+	return sta->supp_rates[NL80211_BAND_2GHZ] >>
 	       ATH10K_MAC_FIRST_OFDM_RATE_IDX;
 }
 
@@ -2410,7 +2458,7 @@
 {
 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
 	struct cfg80211_chan_def def;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	const u8 *ht_mcs_mask;
 	const u16 *vht_mcs_mask;
 	enum wmi_phy_mode phymode = MODE_UNKNOWN;
@@ -2423,7 +2471,7 @@
 	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
 
 	switch (band) {
-	case IEEE80211_BAND_2GHZ:
+	case NL80211_BAND_2GHZ:
 		if (sta->vht_cap.vht_supported &&
 		    !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
 			if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
@@ -2443,7 +2491,7 @@
 		}
 
 		break;
-	case IEEE80211_BAND_5GHZ:
+	case NL80211_BAND_5GHZ:
 		/*
 		 * Check VHT first.
 		 */
@@ -2821,7 +2869,7 @@
 {
 	struct ieee80211_hw *hw = ar->hw;
 	struct ieee80211_supported_band **bands;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	struct ieee80211_channel *channel;
 	struct wmi_scan_chan_list_arg arg = {0};
 	struct wmi_channel_arg *ch;
@@ -2833,7 +2881,7 @@
 	lockdep_assert_held(&ar->conf_mutex);
 
 	bands = hw->wiphy->bands;
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+	for (band = 0; band < NUM_NL80211_BANDS; band++) {
 		if (!bands[band])
 			continue;
 
@@ -2852,7 +2900,7 @@
 		return -ENOMEM;
 
 	ch = arg.channels;
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+	for (band = 0; band < NUM_NL80211_BANDS; band++) {
 		if (!bands[band])
 			continue;
 
@@ -2890,7 +2938,7 @@
 			/* FIXME: why use only legacy modes, why not any
 			 * HT/VHT modes? Would that even make any
 			 * difference? */
-			if (channel->band == IEEE80211_BAND_2GHZ)
+			if (channel->band == NL80211_BAND_2GHZ)
 				ch->mode = MODE_11G;
 			else
 				ch->mode = MODE_11A;
@@ -2994,6 +3042,13 @@
 /* TX handlers */
 /***************/
 
+enum ath10k_mac_tx_path {
+	ATH10K_MAC_TX_HTT,
+	ATH10K_MAC_TX_HTT_MGMT,
+	ATH10K_MAC_TX_WMI_MGMT,
+	ATH10K_MAC_TX_UNKNOWN,
+};
+
 void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
 {
 	lockdep_assert_held(&ar->htt.tx_lock);
@@ -3153,7 +3208,8 @@
 	 */
 	if (ar->htt.target_version_major < 3 &&
 	    (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
-	    !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, ar->fw_features))
+	    !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+		      ar->running_fw->fw_file.fw_features))
 		return ATH10K_HW_TXRX_MGMT;
 
 	/* Workaround:
@@ -3271,6 +3327,28 @@
 	}
 }
 
+static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
+				    struct ieee80211_vif *vif,
+				    struct ieee80211_txq *txq,
+				    struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (void *)skb->data;
+	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+
+	cb->flags = 0;
+	if (!ath10k_tx_h_use_hwcrypto(vif, skb))
+		cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
+
+	if (ieee80211_is_mgmt(hdr->frame_control))
+		cb->flags |= ATH10K_SKB_F_MGMT;
+
+	if (ieee80211_is_data_qos(hdr->frame_control))
+		cb->flags |= ATH10K_SKB_F_QOS;
+
+	cb->vif = vif;
+	cb->txq = txq;
+}
+
 bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
 {
 	/* FIXME: Not really sure since when the behaviour changed. At some
@@ -3281,7 +3359,7 @@
 	 */
 	return (ar->htt.target_version_major >= 3 &&
 		ar->htt.target_version_minor >= 4 &&
-		ar->htt.op_version == ATH10K_FW_HTT_OP_VERSION_TLV);
+		ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV);
 }
 
 static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
@@ -3306,26 +3384,50 @@
 	return ret;
 }
 
-static void ath10k_mac_tx(struct ath10k *ar, enum ath10k_hw_txrx_mode txmode,
-			  struct sk_buff *skb)
+static enum ath10k_mac_tx_path
+ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
+			   struct sk_buff *skb,
+			   enum ath10k_hw_txrx_mode txmode)
 {
-	struct ath10k_htt *htt = &ar->htt;
-	int ret = 0;
-
 	switch (txmode) {
 	case ATH10K_HW_TXRX_RAW:
 	case ATH10K_HW_TXRX_NATIVE_WIFI:
 	case ATH10K_HW_TXRX_ETHERNET:
-		ret = ath10k_htt_tx(htt, txmode, skb);
-		break;
+		return ATH10K_MAC_TX_HTT;
 	case ATH10K_HW_TXRX_MGMT:
 		if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
-			     ar->fw_features))
-			ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
+			     ar->running_fw->fw_file.fw_features))
+			return ATH10K_MAC_TX_WMI_MGMT;
 		else if (ar->htt.target_version_major >= 3)
-			ret = ath10k_htt_tx(htt, txmode, skb);
+			return ATH10K_MAC_TX_HTT;
 		else
-			ret = ath10k_htt_mgmt_tx(htt, skb);
+			return ATH10K_MAC_TX_HTT_MGMT;
+	}
+
+	return ATH10K_MAC_TX_UNKNOWN;
+}
+
+static int ath10k_mac_tx_submit(struct ath10k *ar,
+				enum ath10k_hw_txrx_mode txmode,
+				enum ath10k_mac_tx_path txpath,
+				struct sk_buff *skb)
+{
+	struct ath10k_htt *htt = &ar->htt;
+	int ret = -EINVAL;
+
+	switch (txpath) {
+	case ATH10K_MAC_TX_HTT:
+		ret = ath10k_htt_tx(htt, txmode, skb);
+		break;
+	case ATH10K_MAC_TX_HTT_MGMT:
+		ret = ath10k_htt_mgmt_tx(htt, skb);
+		break;
+	case ATH10K_MAC_TX_WMI_MGMT:
+		ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
+		break;
+	case ATH10K_MAC_TX_UNKNOWN:
+		WARN_ON_ONCE(1);
+		ret = -EINVAL;
 		break;
 	}
 
@@ -3334,6 +3436,64 @@
 			    ret);
 		ieee80211_free_txskb(ar->hw, skb);
 	}
+
+	return ret;
+}
+
+/* This function consumes the sk_buff regardless of return value as far as
+ * caller is concerned so no freeing is necessary afterwards.
+ */
+static int ath10k_mac_tx(struct ath10k *ar,
+			 struct ieee80211_vif *vif,
+			 struct ieee80211_sta *sta,
+			 enum ath10k_hw_txrx_mode txmode,
+			 enum ath10k_mac_tx_path txpath,
+			 struct sk_buff *skb)
+{
+	struct ieee80211_hw *hw = ar->hw;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	int ret;
+
+	/* We should disable CCK RATE due to P2P */
+	if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
+		ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
+
+	switch (txmode) {
+	case ATH10K_HW_TXRX_MGMT:
+	case ATH10K_HW_TXRX_NATIVE_WIFI:
+		ath10k_tx_h_nwifi(hw, skb);
+		ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
+		ath10k_tx_h_seq_no(vif, skb);
+		break;
+	case ATH10K_HW_TXRX_ETHERNET:
+		ath10k_tx_h_8023(skb);
+		break;
+	case ATH10K_HW_TXRX_RAW:
+		if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+			WARN_ON_ONCE(1);
+			ieee80211_free_txskb(hw, skb);
+			return -ENOTSUPP;
+		}
+	}
+
+	if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
+		if (!ath10k_mac_tx_frm_has_freq(ar)) {
+			ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
+				   skb);
+
+			skb_queue_tail(&ar->offchan_tx_queue, skb);
+			ieee80211_queue_work(hw, &ar->offchan_tx_work);
+			return 0;
+		}
+	}
+
+	ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb);
+	if (ret) {
+		ath10k_warn(ar, "failed to submit frame: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
 }
 
 void ath10k_offchan_tx_purge(struct ath10k *ar)
@@ -3354,12 +3514,13 @@
 	struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
 	struct ath10k_peer *peer;
 	struct ath10k_vif *arvif;
+	enum ath10k_hw_txrx_mode txmode;
+	enum ath10k_mac_tx_path txpath;
 	struct ieee80211_hdr *hdr;
 	struct ieee80211_vif *vif;
 	struct ieee80211_sta *sta;
 	struct sk_buff *skb;
 	const u8 *peer_addr;
-	enum ath10k_hw_txrx_mode txmode;
 	int vdev_id;
 	int ret;
 	unsigned long time_left;
@@ -3396,7 +3557,8 @@
 				   peer_addr, vdev_id);
 
 		if (!peer) {
-			ret = ath10k_peer_create(ar, vdev_id, peer_addr,
+			ret = ath10k_peer_create(ar, NULL, NULL, vdev_id,
+						 peer_addr,
 						 WMI_PEER_TYPE_DEFAULT);
 			if (ret)
 				ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
@@ -3423,8 +3585,14 @@
 		}
 
 		txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+		txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
 
-		ath10k_mac_tx(ar, txmode, skb);
+		ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
+		if (ret) {
+			ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
+				    ret);
+			/* not serious */
+		}
 
 		time_left =
 		wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
@@ -3476,6 +3644,175 @@
 	}
 }
 
+static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
+{
+	struct ath10k_txq *artxq = (void *)txq->drv_priv;
+
+	if (!txq)
+		return;
+
+	INIT_LIST_HEAD(&artxq->list);
+}
+
+static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
+{
+	struct ath10k_txq *artxq = (void *)txq->drv_priv;
+	struct ath10k_skb_cb *cb;
+	struct sk_buff *msdu;
+	int msdu_id;
+
+	if (!txq)
+		return;
+
+	spin_lock_bh(&ar->txqs_lock);
+	if (!list_empty(&artxq->list))
+		list_del_init(&artxq->list);
+	spin_unlock_bh(&ar->txqs_lock);
+
+	spin_lock_bh(&ar->htt.tx_lock);
+	idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) {
+		cb = ATH10K_SKB_CB(msdu);
+		if (cb->txq == txq)
+			cb->txq = NULL;
+	}
+	spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
+					    u16 peer_id,
+					    u8 tid)
+{
+	struct ath10k_peer *peer;
+
+	lockdep_assert_held(&ar->data_lock);
+
+	peer = ar->peer_map[peer_id];
+	if (!peer)
+		return NULL;
+
+	if (peer->sta)
+		return peer->sta->txq[tid];
+	else if (peer->vif)
+		return peer->vif->txq;
+	else
+		return NULL;
+}
+
+static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw,
+				   struct ieee80211_txq *txq)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_txq *artxq = (void *)txq->drv_priv;
+
+	/* No need to get locks */
+
+	if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH)
+		return true;
+
+	if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed)
+		return true;
+
+	if (artxq->num_fw_queued < artxq->num_push_allowed)
+		return true;
+
+	return false;
+}
+
+int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
+			   struct ieee80211_txq *txq)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_htt *htt = &ar->htt;
+	struct ath10k_txq *artxq = (void *)txq->drv_priv;
+	struct ieee80211_vif *vif = txq->vif;
+	struct ieee80211_sta *sta = txq->sta;
+	enum ath10k_hw_txrx_mode txmode;
+	enum ath10k_mac_tx_path txpath;
+	struct sk_buff *skb;
+	size_t skb_len;
+	int ret;
+
+	spin_lock_bh(&ar->htt.tx_lock);
+	ret = ath10k_htt_tx_inc_pending(htt);
+	spin_unlock_bh(&ar->htt.tx_lock);
+
+	if (ret)
+		return ret;
+
+	skb = ieee80211_tx_dequeue(hw, txq);
+	if (!skb) {
+		spin_lock_bh(&ar->htt.tx_lock);
+		ath10k_htt_tx_dec_pending(htt);
+		spin_unlock_bh(&ar->htt.tx_lock);
+
+		return -ENOENT;
+	}
+
+	ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
+
+	skb_len = skb->len;
+	txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+	txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+
+	ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
+	if (unlikely(ret)) {
+		ath10k_warn(ar, "failed to push frame: %d\n", ret);
+
+		spin_lock_bh(&ar->htt.tx_lock);
+		ath10k_htt_tx_dec_pending(htt);
+		spin_unlock_bh(&ar->htt.tx_lock);
+
+		return ret;
+	}
+
+	spin_lock_bh(&ar->htt.tx_lock);
+	artxq->num_fw_queued++;
+	spin_unlock_bh(&ar->htt.tx_lock);
+
+	return skb_len;
+}
+
+void ath10k_mac_tx_push_pending(struct ath10k *ar)
+{
+	struct ieee80211_hw *hw = ar->hw;
+	struct ieee80211_txq *txq;
+	struct ath10k_txq *artxq;
+	struct ath10k_txq *last;
+	int ret;
+	int max;
+
+	spin_lock_bh(&ar->txqs_lock);
+	rcu_read_lock();
+
+	last = list_last_entry(&ar->txqs, struct ath10k_txq, list);
+	while (!list_empty(&ar->txqs)) {
+		artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
+		txq = container_of((void *)artxq, struct ieee80211_txq,
+				   drv_priv);
+
+		/* Prevent aggressive sta/tid taking over tx queue */
+		max = 16;
+		ret = 0;
+		while (ath10k_mac_tx_can_push(hw, txq) && max--) {
+			ret = ath10k_mac_tx_push_txq(hw, txq);
+			if (ret < 0)
+				break;
+		}
+
+		list_del_init(&artxq->list);
+		if (ret != -ENOENT)
+			list_add_tail(&artxq->list, &ar->txqs);
+
+		ath10k_htt_tx_txq_update(hw, txq);
+
+		if (artxq == last || (ret < 0 && ret != -ENOENT))
+			break;
+	}
+
+	rcu_read_unlock();
+	spin_unlock_bh(&ar->txqs_lock);
+}
+
 /************/
 /* Scanning */
 /************/
@@ -3531,7 +3868,7 @@
 		goto out;
 	}
 
-	ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
+	ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
 	if (ret == 0) {
 		ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n");
 		ret = -ETIMEDOUT;
@@ -3611,7 +3948,7 @@
 	if (ret)
 		return ret;
 
-	ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ);
+	ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
 	if (ret == 0) {
 		ret = ath10k_scan_stop(ar);
 		if (ret)
@@ -3638,66 +3975,86 @@
 /* mac80211 callbacks */
 /**********************/
 
-static void ath10k_tx(struct ieee80211_hw *hw,
-		      struct ieee80211_tx_control *control,
-		      struct sk_buff *skb)
+static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
+			     struct ieee80211_tx_control *control,
+			     struct sk_buff *skb)
 {
 	struct ath10k *ar = hw->priv;
-	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+	struct ath10k_htt *htt = &ar->htt;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_vif *vif = info->control.vif;
 	struct ieee80211_sta *sta = control->sta;
-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct ieee80211_txq *txq = NULL;
+	struct ieee80211_hdr *hdr = (void *)skb->data;
 	enum ath10k_hw_txrx_mode txmode;
+	enum ath10k_mac_tx_path txpath;
+	bool is_htt;
+	bool is_mgmt;
+	bool is_presp;
+	int ret;
 
-	/* We should disable CCK RATE due to P2P */
-	if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
-		ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
+	ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
 
 	txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+	txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+	is_htt = (txpath == ATH10K_MAC_TX_HTT ||
+		  txpath == ATH10K_MAC_TX_HTT_MGMT);
+	is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
 
-	skb_cb->flags = 0;
-	if (!ath10k_tx_h_use_hwcrypto(vif, skb))
-		skb_cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
+	if (is_htt) {
+		spin_lock_bh(&ar->htt.tx_lock);
+		is_presp = ieee80211_is_probe_resp(hdr->frame_control);
 
-	if (ieee80211_is_mgmt(hdr->frame_control))
-		skb_cb->flags |= ATH10K_SKB_F_MGMT;
-
-	if (ieee80211_is_data_qos(hdr->frame_control))
-		skb_cb->flags |= ATH10K_SKB_F_QOS;
-
-	skb_cb->vif = vif;
-
-	switch (txmode) {
-	case ATH10K_HW_TXRX_MGMT:
-	case ATH10K_HW_TXRX_NATIVE_WIFI:
-		ath10k_tx_h_nwifi(hw, skb);
-		ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
-		ath10k_tx_h_seq_no(vif, skb);
-		break;
-	case ATH10K_HW_TXRX_ETHERNET:
-		ath10k_tx_h_8023(skb);
-		break;
-	case ATH10K_HW_TXRX_RAW:
-		if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
-			WARN_ON_ONCE(1);
-			ieee80211_free_txskb(hw, skb);
+		ret = ath10k_htt_tx_inc_pending(htt);
+		if (ret) {
+			ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n",
+				    ret);
+			spin_unlock_bh(&ar->htt.tx_lock);
+			ieee80211_free_txskb(ar->hw, skb);
 			return;
 		}
-	}
 
-	if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
-		if (!ath10k_mac_tx_frm_has_freq(ar)) {
-			ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
-				   skb);
-
-			skb_queue_tail(&ar->offchan_tx_queue, skb);
-			ieee80211_queue_work(hw, &ar->offchan_tx_work);
+		ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
+		if (ret) {
+			ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n",
+				   ret);
+			ath10k_htt_tx_dec_pending(htt);
+			spin_unlock_bh(&ar->htt.tx_lock);
+			ieee80211_free_txskb(ar->hw, skb);
 			return;
 		}
+		spin_unlock_bh(&ar->htt.tx_lock);
 	}
 
-	ath10k_mac_tx(ar, txmode, skb);
+	ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
+	if (ret) {
+		ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
+		if (is_htt) {
+			spin_lock_bh(&ar->htt.tx_lock);
+			ath10k_htt_tx_dec_pending(htt);
+			if (is_mgmt)
+				ath10k_htt_tx_mgmt_dec_pending(htt);
+			spin_unlock_bh(&ar->htt.tx_lock);
+		}
+		return;
+	}
+}
+
+static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
+					struct ieee80211_txq *txq)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_txq *artxq = (void *)txq->drv_priv;
+
+	spin_lock_bh(&ar->txqs_lock);
+	if (list_empty(&artxq->list))
+		list_add_tail(&artxq->list, &ar->txqs);
+	spin_unlock_bh(&ar->txqs_lock);
+
+	if (ath10k_mac_tx_can_push(hw, txq))
+		tasklet_schedule(&ar->htt.txrx_compl_task);
+
+	ath10k_htt_tx_txq_update(hw, txq);
 }
 
 /* Must not be called with conf_mutex held as workers can use that also. */
@@ -3919,14 +4276,14 @@
 	vht_cap = ath10k_create_vht_cap(ar);
 
 	if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
-		band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
+		band = &ar->mac.sbands[NL80211_BAND_2GHZ];
 		band->ht_cap = ht_cap;
 
 		/* Enable the VHT support at 2.4 GHz */
 		band->vht_cap = vht_cap;
 	}
 	if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
-		band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
+		band = &ar->mac.sbands[NL80211_BAND_5GHZ];
 		band->ht_cap = ht_cap;
 		band->vht_cap = vht_cap;
 	}
@@ -4021,7 +4378,8 @@
 		goto err_off;
 	}
 
-	ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
+	ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
+				&ar->normal_mode_fw);
 	if (ret) {
 		ath10k_err(ar, "Could not init core: %d\n", ret);
 		goto err_power_down;
@@ -4079,7 +4437,7 @@
 	}
 
 	if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA,
-		     ar->fw_features)) {
+		     ar->running_fw->fw_file.fw_features)) {
 		ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1,
 							  WMI_CCA_DETECT_LEVEL_AUTO,
 							  WMI_CCA_DETECT_MARGIN_AUTO);
@@ -4100,7 +4458,7 @@
 
 	ar->ani_enabled = true;
 
-	if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) {
+	if (ath10k_peer_stats_enabled(ar)) {
 		param = ar->wmi.pdev_param->peer_stats_update_period;
 		ret = ath10k_wmi_pdev_set_param(ar, param,
 						PEER_DEFAULT_STATS_UPDATE_PERIOD);
@@ -4313,6 +4671,7 @@
 {
 	struct ath10k *ar = hw->priv;
 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct ath10k_peer *peer;
 	enum wmi_sta_powersave_param param;
 	int ret = 0;
 	u32 value;
@@ -4325,6 +4684,7 @@
 	mutex_lock(&ar->conf_mutex);
 
 	memset(arvif, 0, sizeof(*arvif));
+	ath10k_mac_txq_init(vif->txq);
 
 	arvif->ar = ar;
 	arvif->vif = vif;
@@ -4489,7 +4849,10 @@
 		goto err_vdev_delete;
 	}
 
-	if (ar->cfg_tx_chainmask) {
+	/* Configuring number of spatial stream for monitor interface is causing
+	 * target assert in qca9888 and qca6174.
+	 */
+	if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) {
 		u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
 
 		vdev_param = ar->wmi.vdev_param->nss;
@@ -4505,13 +4868,31 @@
 
 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
 	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
-		ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr,
-					 WMI_PEER_TYPE_DEFAULT);
+		ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id,
+					 vif->addr, WMI_PEER_TYPE_DEFAULT);
 		if (ret) {
 			ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
 				    arvif->vdev_id, ret);
 			goto err_vdev_delete;
 		}
+
+		spin_lock_bh(&ar->data_lock);
+
+		peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr);
+		if (!peer) {
+			ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
+				    vif->addr, arvif->vdev_id);
+			spin_unlock_bh(&ar->data_lock);
+			ret = -ENOENT;
+			goto err_peer_delete;
+		}
+
+		arvif->peer_id = find_first_bit(peer->peer_ids,
+						ATH10K_MAX_NUM_PEER_IDS);
+
+		spin_unlock_bh(&ar->data_lock);
+	} else {
+		arvif->peer_id = HTT_INVALID_PEERID;
 	}
 
 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
@@ -4622,7 +5003,9 @@
 {
 	struct ath10k *ar = hw->priv;
 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	struct ath10k_peer *peer;
 	int ret;
+	int i;
 
 	cancel_work_sync(&arvif->ap_csa_work);
 	cancel_delayed_work_sync(&arvif->connection_loss_work);
@@ -4676,7 +5059,22 @@
 		spin_unlock_bh(&ar->data_lock);
 	}
 
+	spin_lock_bh(&ar->data_lock);
+	for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
+		peer = ar->peer_map[i];
+		if (!peer)
+			continue;
+
+		if (peer->vif == vif) {
+			ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n",
+				    vif->addr, arvif->vdev_id);
+			peer->vif = NULL;
+		}
+	}
+	spin_unlock_bh(&ar->data_lock);
+
 	ath10k_peer_cleanup(ar, arvif->vdev_id);
+	ath10k_mac_txq_unref(ar, vif->txq);
 
 	if (vif->type == NL80211_IFTYPE_MONITOR) {
 		ar->monitor_arvif = NULL;
@@ -4689,6 +5087,8 @@
 	ath10k_mac_vif_tx_unlock_all(arvif);
 	spin_unlock_bh(&ar->htt.tx_lock);
 
+	ath10k_mac_txq_unref(ar, vif->txq);
+
 	mutex_unlock(&ar->conf_mutex);
 }
 
@@ -5218,7 +5618,7 @@
 	struct ath10k_sta *arsta;
 	struct ieee80211_sta *sta;
 	struct cfg80211_chan_def def;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	const u8 *ht_mcs_mask;
 	const u16 *vht_mcs_mask;
 	u32 changed, bw, nss, smps;
@@ -5393,13 +5793,18 @@
 	struct ath10k *ar = hw->priv;
 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
 	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+	struct ath10k_peer *peer;
 	int ret = 0;
+	int i;
 
 	if (old_state == IEEE80211_STA_NOTEXIST &&
 	    new_state == IEEE80211_STA_NONE) {
 		memset(arsta, 0, sizeof(*arsta));
 		arsta->arvif = arvif;
 		INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
+
+		for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+			ath10k_mac_txq_init(sta->txq[i]);
 	}
 
 	/* cancel must be done outside the mutex to avoid deadlock */
@@ -5434,8 +5839,8 @@
 		if (sta->tdls)
 			peer_type = WMI_PEER_TYPE_TDLS;
 
-		ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr,
-					 peer_type);
+		ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id,
+					 sta->addr, peer_type);
 		if (ret) {
 			ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
 				    sta->addr, arvif->vdev_id, ret);
@@ -5443,6 +5848,24 @@
 			goto exit;
 		}
 
+		spin_lock_bh(&ar->data_lock);
+
+		peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
+		if (!peer) {
+			ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
+				    vif->addr, arvif->vdev_id);
+			spin_unlock_bh(&ar->data_lock);
+			ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+			ath10k_mac_dec_num_stations(arvif, sta);
+			ret = -ENOENT;
+			goto exit;
+		}
+
+		arsta->peer_id = find_first_bit(peer->peer_ids,
+						ATH10K_MAX_NUM_PEER_IDS);
+
+		spin_unlock_bh(&ar->data_lock);
+
 		if (!sta->tdls)
 			goto exit;
 
@@ -5505,6 +5928,23 @@
 
 		ath10k_mac_dec_num_stations(arvif, sta);
 
+		spin_lock_bh(&ar->data_lock);
+		for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
+			peer = ar->peer_map[i];
+			if (!peer)
+				continue;
+
+			if (peer->sta == sta) {
+				ath10k_warn(ar, "found sta peer %pM entry on vdev %i after it was supposedly removed\n",
+					    sta->addr, arvif->vdev_id);
+				peer->sta = NULL;
+			}
+		}
+		spin_unlock_bh(&ar->data_lock);
+
+		for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+			ath10k_mac_txq_unref(ar, sta->txq[i]);
+
 		if (!sta->tdls)
 			goto exit;
 
@@ -5751,7 +6191,7 @@
 	return ret;
 }
 
-#define ATH10K_ROC_TIMEOUT_HZ (2*HZ)
+#define ATH10K_ROC_TIMEOUT_HZ (2 * HZ)
 
 static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
 				    struct ieee80211_vif *vif,
@@ -5815,7 +6255,7 @@
 		goto exit;
 	}
 
-	ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ);
+	ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
 	if (ret == 0) {
 		ath10k_warn(ar, "failed to switch to channel for roc scan\n");
 
@@ -5977,14 +6417,14 @@
 
 	mutex_lock(&ar->conf_mutex);
 
-	sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+	sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
 	if (sband && idx >= sband->n_channels) {
 		idx -= sband->n_channels;
 		sband = NULL;
 	}
 
 	if (!sband)
-		sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
+		sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
 
 	if (!sband || idx >= sband->n_channels) {
 		ret = -ENOENT;
@@ -6007,7 +6447,7 @@
 
 static bool
 ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
-					enum ieee80211_band band,
+					enum nl80211_band band,
 					const struct cfg80211_bitrate_mask *mask)
 {
 	int num_rates = 0;
@@ -6026,7 +6466,7 @@
 
 static bool
 ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
-				       enum ieee80211_band band,
+				       enum nl80211_band band,
 				       const struct cfg80211_bitrate_mask *mask,
 				       int *nss)
 {
@@ -6075,7 +6515,7 @@
 
 static int
 ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
-					enum ieee80211_band band,
+					enum nl80211_band band,
 					const struct cfg80211_bitrate_mask *mask,
 					u8 *rate, u8 *nss)
 {
@@ -6176,7 +6616,7 @@
 
 static bool
 ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
-				enum ieee80211_band band,
+				enum nl80211_band band,
 				const struct cfg80211_bitrate_mask *mask)
 {
 	int i;
@@ -6228,7 +6668,7 @@
 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
 	struct cfg80211_chan_def def;
 	struct ath10k *ar = arvif->ar;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	const u8 *ht_mcs_mask;
 	const u16 *vht_mcs_mask;
 	u8 rate;
@@ -6379,6 +6819,32 @@
 	return 0;
 }
 
+static void ath10k_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			   u64 tsf)
+{
+	struct ath10k *ar = hw->priv;
+	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+	u32 tsf_offset, vdev_param = ar->wmi.vdev_param->set_tsf;
+	int ret;
+
+	/* Workaround:
+	 *
+	 * Given tsf argument is entire TSF value, but firmware accepts
+	 * only TSF offset to current TSF.
+	 *
+	 * get_tsf function is used to get offset value, however since
+	 * ath10k_get_tsf is not implemented properly, it will return 0 always.
+	 * Luckily all the caller functions to set_tsf, as of now, also rely on
+	 * get_tsf function to get entire tsf value such get_tsf() + tsf_delta,
+	 * final tsf offset value to firmware will be arithmetically correct.
+	 */
+	tsf_offset = tsf - ath10k_get_tsf(hw, vif);
+	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+					vdev_param, tsf_offset);
+	if (ret && ret != -EOPNOTSUPP)
+		ath10k_warn(ar, "failed to set tsf offset: %d\n", ret);
+}
+
 static int ath10k_ampdu_action(struct ieee80211_hw *hw,
 			       struct ieee80211_vif *vif,
 			       struct ieee80211_ampdu_params *params)
@@ -6450,7 +6916,13 @@
 			def = &vifs[0].new_ctx->def;
 
 		ar->rx_channel = def->chan;
-	} else if (ctx && ath10k_mac_num_chanctxs(ar) == 0) {
+	} else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) ||
+		   (ctx && (ar->state == ATH10K_STATE_RESTARTED))) {
+		/* During driver restart due to firmware assert, since mac80211
+		 * already has valid channel context for given radio, channel
+		 * context iteration return num_chanctx > 0. So fix rx_channel
+		 * when restart is in progress.
+		 */
 		ar->rx_channel = ctx->def.chan;
 	} else {
 		ar->rx_channel = NULL;
@@ -6807,7 +7279,8 @@
 }
 
 static const struct ieee80211_ops ath10k_ops = {
-	.tx				= ath10k_tx,
+	.tx				= ath10k_mac_op_tx,
+	.wake_tx_queue			= ath10k_mac_op_wake_tx_queue,
 	.start				= ath10k_start,
 	.stop				= ath10k_stop,
 	.config				= ath10k_config,
@@ -6834,6 +7307,7 @@
 	.set_bitrate_mask		= ath10k_mac_op_set_bitrate_mask,
 	.sta_rc_update			= ath10k_sta_rc_update,
 	.get_tsf			= ath10k_get_tsf,
+	.set_tsf			= ath10k_set_tsf,
 	.ampdu_action			= ath10k_ampdu_action,
 	.get_et_sset_count		= ath10k_debug_get_et_sset_count,
 	.get_et_stats			= ath10k_debug_get_et_stats,
@@ -6857,7 +7331,7 @@
 };
 
 #define CHAN2G(_channel, _freq, _flags) { \
-	.band			= IEEE80211_BAND_2GHZ, \
+	.band			= NL80211_BAND_2GHZ, \
 	.hw_value		= (_channel), \
 	.center_freq		= (_freq), \
 	.flags			= (_flags), \
@@ -6866,7 +7340,7 @@
 }
 
 #define CHAN5G(_channel, _freq, _flags) { \
-	.band			= IEEE80211_BAND_5GHZ, \
+	.band			= NL80211_BAND_5GHZ, \
 	.hw_value		= (_channel), \
 	.center_freq		= (_freq), \
 	.flags			= (_flags), \
@@ -7186,13 +7660,13 @@
 			goto err_free;
 		}
 
-		band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
+		band = &ar->mac.sbands[NL80211_BAND_2GHZ];
 		band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
 		band->channels = channels;
 		band->n_bitrates = ath10k_g_rates_size;
 		band->bitrates = ath10k_g_rates;
 
-		ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band;
+		ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
 	}
 
 	if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
@@ -7204,12 +7678,12 @@
 			goto err_free;
 		}
 
-		band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
+		band = &ar->mac.sbands[NL80211_BAND_5GHZ];
 		band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
 		band->channels = channels;
 		band->n_bitrates = ath10k_a_rates_size;
 		band->bitrates = ath10k_a_rates;
-		ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band;
+		ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
 	}
 
 	ath10k_mac_setup_ht_vht_cap(ar);
@@ -7222,7 +7696,7 @@
 	ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask;
 	ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask;
 
-	if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features))
+	if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features))
 		ar->hw->wiphy->interface_modes |=
 			BIT(NL80211_IFTYPE_P2P_DEVICE) |
 			BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -7262,6 +7736,7 @@
 
 	ar->hw->vif_data_size = sizeof(struct ath10k_vif);
 	ar->hw->sta_data_size = sizeof(struct ath10k_sta);
+	ar->hw->txq_data_size = sizeof(struct ath10k_txq);
 
 	ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
 
@@ -7286,7 +7761,8 @@
 	ar->hw->wiphy->max_remain_on_channel_duration = 5000;
 
 	ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
-	ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
+	ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
+				   NL80211_FEATURE_AP_SCAN;
 
 	ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
 
@@ -7310,7 +7786,7 @@
 	 */
 	ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
 
-	switch (ar->wmi.op_version) {
+	switch (ar->running_fw->fw_file.wmi_op_version) {
 	case ATH10K_FW_WMI_OP_VERSION_MAIN:
 		ar->hw->wiphy->iface_combinations = ath10k_if_comb;
 		ar->hw->wiphy->n_iface_combinations =
@@ -7395,8 +7871,8 @@
 		ar->dfs_detector->exit(ar->dfs_detector);
 
 err_free:
-	kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
-	kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
+	kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+	kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
 
 	SET_IEEE80211_DEV(ar->hw, NULL);
 	return ret;
@@ -7409,8 +7885,8 @@
 	if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
 		ar->dfs_detector->exit(ar->dfs_detector);
 
-	kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
-	kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
+	kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+	kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
 
 	SET_IEEE80211_DEV(ar->hw, NULL);
 }
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index 5309158..1bd29ec 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -75,6 +75,13 @@
 void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason);
 void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason);
 bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar);
+void ath10k_mac_tx_push_pending(struct ath10k *ar);
+int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
+			   struct ieee80211_txq *txq);
+struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
+					    u16 peer_id,
+					    u8 tid);
+int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val);
 
 static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
 {
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index b3cff1d..8133d7b 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -33,12 +33,6 @@
 #include "ce.h"
 #include "pci.h"
 
-enum ath10k_pci_irq_mode {
-	ATH10K_PCI_IRQ_AUTO = 0,
-	ATH10K_PCI_IRQ_LEGACY = 1,
-	ATH10K_PCI_IRQ_MSI = 2,
-};
-
 enum ath10k_pci_reset_mode {
 	ATH10K_PCI_RESET_AUTO = 0,
 	ATH10K_PCI_RESET_WARM_ONLY = 1,
@@ -745,10 +739,7 @@
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
-	if (ar_pci->num_msi_intrs > 1)
-		return "msi-x";
-
-	if (ar_pci->num_msi_intrs == 1)
+	if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)
 		return "msi";
 
 	return "legacy";
@@ -809,7 +800,8 @@
 	spin_lock_bh(&ar_pci->ce_lock);
 	num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
 	spin_unlock_bh(&ar_pci->ce_lock);
-	while (num--) {
+
+	while (num >= 0) {
 		ret = __ath10k_pci_rx_post_buf(pipe);
 		if (ret) {
 			if (ret == -ENOSPC)
@@ -819,6 +811,7 @@
 				  ATH10K_PCI_RX_POST_RETRY_MS);
 			break;
 		}
+		num--;
 	}
 }
 
@@ -870,10 +863,8 @@
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 	int ret = 0;
-	u32 buf;
+	u32 *buf;
 	unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
-	unsigned int id;
-	unsigned int flags;
 	struct ath10k_ce_pipe *ce_diag;
 	/* Host buffer address in CE space */
 	u32 ce_data;
@@ -909,7 +900,7 @@
 		nbytes = min_t(unsigned int, remaining_bytes,
 			       DIAG_TRANSFER_LIMIT);
 
-		ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
+		ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
 		if (ret != 0)
 			goto done;
 
@@ -940,9 +931,10 @@
 		}
 
 		i = 0;
-		while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
-							    &completed_nbytes,
-							    &id, &flags) != 0) {
+		while (ath10k_ce_completed_recv_next_nolock(ce_diag,
+							    (void **)&buf,
+							    &completed_nbytes)
+								!= 0) {
 			mdelay(1);
 
 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
@@ -956,7 +948,7 @@
 			goto done;
 		}
 
-		if (buf != ce_data) {
+		if (*buf != ce_data) {
 			ret = -EIO;
 			goto done;
 		}
@@ -1026,10 +1018,8 @@
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 	int ret = 0;
-	u32 buf;
+	u32 *buf;
 	unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
-	unsigned int id;
-	unsigned int flags;
 	struct ath10k_ce_pipe *ce_diag;
 	void *data_buf = NULL;
 	u32 ce_data;	/* Host buffer address in CE space */
@@ -1078,7 +1068,7 @@
 		nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
 
 		/* Set up to receive directly into Target(!) address */
-		ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address);
+		ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address);
 		if (ret != 0)
 			goto done;
 
@@ -1103,9 +1093,10 @@
 		}
 
 		i = 0;
-		while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
-							    &completed_nbytes,
-							    &id, &flags) != 0) {
+		while (ath10k_ce_completed_recv_next_nolock(ce_diag,
+							    (void **)&buf,
+							    &completed_nbytes)
+								!= 0) {
 			mdelay(1);
 
 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
@@ -1119,7 +1110,7 @@
 			goto done;
 		}
 
-		if (buf != address) {
+		if (*buf != address) {
 			ret = -EIO;
 			goto done;
 		}
@@ -1181,15 +1172,11 @@
 	struct sk_buff *skb;
 	struct sk_buff_head list;
 	void *transfer_context;
-	u32 ce_data;
 	unsigned int nbytes, max_nbytes;
-	unsigned int transfer_id;
-	unsigned int flags;
 
 	__skb_queue_head_init(&list);
 	while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
-					     &ce_data, &nbytes, &transfer_id,
-					     &flags) == 0) {
+					     &nbytes) == 0) {
 		skb = transfer_context;
 		max_nbytes = skb->len + skb_tailroom(skb);
 		dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
@@ -1218,6 +1205,63 @@
 	ath10k_pci_rx_post_pipe(pipe_info);
 }
 
+static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
+					 void (*callback)(struct ath10k *ar,
+							  struct sk_buff *skb))
+{
+	struct ath10k *ar = ce_state->ar;
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
+	struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
+	struct sk_buff *skb;
+	struct sk_buff_head list;
+	void *transfer_context;
+	unsigned int nbytes, max_nbytes, nentries;
+	int orig_len;
+
+	/* No need to aquire ce_lock for CE5, since this is the only place CE5
+	 * is processed other than init and deinit. Before releasing CE5
+	 * buffers, interrupts are disabled. Thus CE5 access is serialized.
+	 */
+	__skb_queue_head_init(&list);
+	while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
+						    &nbytes) == 0) {
+		skb = transfer_context;
+		max_nbytes = skb->len + skb_tailroom(skb);
+
+		if (unlikely(max_nbytes < nbytes)) {
+			ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
+				    nbytes, max_nbytes);
+			continue;
+		}
+
+		dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+					max_nbytes, DMA_FROM_DEVICE);
+		skb_put(skb, nbytes);
+		__skb_queue_tail(&list, skb);
+	}
+
+	nentries = skb_queue_len(&list);
+	while ((skb = __skb_dequeue(&list))) {
+		ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
+			   ce_state->id, skb->len);
+		ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
+				skb->data, skb->len);
+
+		orig_len = skb->len;
+		callback(ar, skb);
+		skb_push(skb, orig_len - skb->len);
+		skb_reset_tail_pointer(skb);
+		skb_trim(skb, 0);
+
+		/*let device gain the buffer again*/
+		dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+					   skb->len + skb_tailroom(skb),
+					   DMA_FROM_DEVICE);
+	}
+	ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
+}
+
 /* Called by lower (CE) layer when data is received from the Target. */
 static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
 {
@@ -1274,7 +1318,7 @@
 	 */
 	ath10k_ce_per_engine_service(ce_state->ar, 4);
 
-	ath10k_pci_process_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
+	ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
 }
 
 int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
@@ -1449,13 +1493,8 @@
 void ath10k_pci_kill_tasklet(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	int i;
 
 	tasklet_kill(&ar_pci->intr_tq);
-	tasklet_kill(&ar_pci->msi_fw_err);
-
-	for (i = 0; i < CE_COUNT; i++)
-		tasklet_kill(&ar_pci->pipe_info[i].intr);
 
 	del_timer_sync(&ar_pci->rx_post_retry);
 }
@@ -1571,10 +1610,8 @@
 static void ath10k_pci_irq_sync(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	int i;
 
-	for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
-		synchronize_irq(ar_pci->pdev->irq + i);
+	synchronize_irq(ar_pci->pdev->irq);
 }
 
 static void ath10k_pci_irq_enable(struct ath10k *ar)
@@ -1835,13 +1872,10 @@
 {
 	struct ath10k *ar = ce_state->ar;
 	struct bmi_xfer *xfer;
-	u32 ce_data;
 	unsigned int nbytes;
-	unsigned int transfer_id;
-	unsigned int flags;
 
-	if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
-					  &nbytes, &transfer_id, &flags))
+	if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
+					  &nbytes))
 		return;
 
 	if (WARN_ON_ONCE(!xfer))
@@ -2546,65 +2580,6 @@
 #endif
 };
 
-static void ath10k_pci_ce_tasklet(unsigned long ptr)
-{
-	struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
-	struct ath10k_pci *ar_pci = pipe->ar_pci;
-
-	ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
-}
-
-static void ath10k_msi_err_tasklet(unsigned long data)
-{
-	struct ath10k *ar = (struct ath10k *)data;
-
-	if (!ath10k_pci_has_fw_crashed(ar)) {
-		ath10k_warn(ar, "received unsolicited fw crash interrupt\n");
-		return;
-	}
-
-	ath10k_pci_irq_disable(ar);
-	ath10k_pci_fw_crashed_clear(ar);
-	ath10k_pci_fw_crashed_dump(ar);
-}
-
-/*
- * Handler for a per-engine interrupt on a PARTICULAR CE.
- * This is used in cases where each CE has a private MSI interrupt.
- */
-static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
-{
-	struct ath10k *ar = arg;
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
-
-	if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
-		ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
-			    ce_id);
-		return IRQ_HANDLED;
-	}
-
-	/*
-	 * NOTE: We are able to derive ce_id from irq because we
-	 * use a one-to-one mapping for CE's 0..5.
-	 * CE's 6 & 7 do not use interrupts at all.
-	 *
-	 * This mapping must be kept in sync with the mapping
-	 * used by firmware.
-	 */
-	tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
-	return IRQ_HANDLED;
-}
-
-static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
-{
-	struct ath10k *ar = arg;
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
-	tasklet_schedule(&ar_pci->msi_fw_err);
-	return IRQ_HANDLED;
-}
-
 /*
  * Top-level interrupt handler for all PCI interrupts from a Target.
  * When a block of MSI interrupts is allocated, this top-level handler
@@ -2622,7 +2597,7 @@
 		return IRQ_NONE;
 	}
 
-	if (ar_pci->num_msi_intrs == 0) {
+	if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) {
 		if (!ath10k_pci_irq_pending(ar))
 			return IRQ_NONE;
 
@@ -2649,43 +2624,10 @@
 	ath10k_ce_per_engine_service_any(ar);
 
 	/* Re-enable legacy irq that was disabled in the irq handler */
-	if (ar_pci->num_msi_intrs == 0)
+	if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
 		ath10k_pci_enable_legacy_irq(ar);
 }
 
-static int ath10k_pci_request_irq_msix(struct ath10k *ar)
-{
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	int ret, i;
-
-	ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
-			  ath10k_pci_msi_fw_handler,
-			  IRQF_SHARED, "ath10k_pci", ar);
-	if (ret) {
-		ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n",
-			    ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
-		return ret;
-	}
-
-	for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
-		ret = request_irq(ar_pci->pdev->irq + i,
-				  ath10k_pci_per_engine_handler,
-				  IRQF_SHARED, "ath10k_pci", ar);
-		if (ret) {
-			ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n",
-				    ar_pci->pdev->irq + i, ret);
-
-			for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
-				free_irq(ar_pci->pdev->irq + i, ar);
-
-			free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
-			return ret;
-		}
-	}
-
-	return 0;
-}
-
 static int ath10k_pci_request_irq_msi(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -2724,41 +2666,28 @@
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
-	switch (ar_pci->num_msi_intrs) {
-	case 0:
+	switch (ar_pci->oper_irq_mode) {
+	case ATH10K_PCI_IRQ_LEGACY:
 		return ath10k_pci_request_irq_legacy(ar);
-	case 1:
+	case ATH10K_PCI_IRQ_MSI:
 		return ath10k_pci_request_irq_msi(ar);
 	default:
-		return ath10k_pci_request_irq_msix(ar);
+		return -EINVAL;
 	}
 }
 
 static void ath10k_pci_free_irq(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	int i;
 
-	/* There's at least one interrupt irregardless whether its legacy INTR
-	 * or MSI or MSI-X */
-	for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
-		free_irq(ar_pci->pdev->irq + i, ar);
+	free_irq(ar_pci->pdev->irq, ar);
 }
 
 void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	int i;
 
 	tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
-	tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
-		     (unsigned long)ar);
-
-	for (i = 0; i < CE_COUNT; i++) {
-		ar_pci->pipe_info[i].ar_pci = ar_pci;
-		tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
-			     (unsigned long)&ar_pci->pipe_info[i]);
-	}
 }
 
 static int ath10k_pci_init_irq(struct ath10k *ar)
@@ -2772,20 +2701,9 @@
 		ath10k_info(ar, "limiting irq mode to: %d\n",
 			    ath10k_pci_irq_mode);
 
-	/* Try MSI-X */
-	if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
-		ar_pci->num_msi_intrs = MSI_ASSIGN_CE_MAX + 1;
-		ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
-					   ar_pci->num_msi_intrs);
-		if (ret > 0)
-			return 0;
-
-		/* fall-through */
-	}
-
 	/* Try MSI */
 	if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
-		ar_pci->num_msi_intrs = 1;
+		ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
 		ret = pci_enable_msi(ar_pci->pdev);
 		if (ret == 0)
 			return 0;
@@ -2801,7 +2719,7 @@
 	 * This write might get lost if target has NOT written BAR.
 	 * For now, fix the race by repeating the write in below
 	 * synchronization checking. */
-	ar_pci->num_msi_intrs = 0;
+	ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
 
 	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
 			   PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
@@ -2819,8 +2737,8 @@
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
-	switch (ar_pci->num_msi_intrs) {
-	case 0:
+	switch (ar_pci->oper_irq_mode) {
+	case ATH10K_PCI_IRQ_LEGACY:
 		ath10k_pci_deinit_irq_legacy(ar);
 		break;
 	default:
@@ -2858,7 +2776,7 @@
 		if (val & FW_IND_INITIALIZED)
 			break;
 
-		if (ar_pci->num_msi_intrs == 0)
+		if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
 			/* Fix potential race by repeating CORE_BASE writes */
 			ath10k_pci_enable_legacy_irq(ar);
 
@@ -3136,8 +3054,8 @@
 		goto err_sleep;
 	}
 
-	ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
-		    ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
+	ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",
+		    ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,
 		    ath10k_pci_irq_mode, ath10k_pci_reset_mode);
 
 	ret = ath10k_pci_request_irq(ar);
@@ -3255,7 +3173,6 @@
 MODULE_LICENSE("Dual BSD/GPL");
 
 /* QCA988x 2.0 firmware files */
-MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 249c73a..959dc32 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -148,9 +148,6 @@
 
 	/* protects compl_free and num_send_allowed */
 	spinlock_t pipe_lock;
-
-	struct ath10k_pci *ar_pci;
-	struct tasklet_struct intr;
 };
 
 struct ath10k_pci_supp_chip {
@@ -164,6 +161,12 @@
 	int (*get_num_banks)(struct ath10k *ar);
 };
 
+enum ath10k_pci_irq_mode {
+	ATH10K_PCI_IRQ_AUTO = 0,
+	ATH10K_PCI_IRQ_LEGACY = 1,
+	ATH10K_PCI_IRQ_MSI = 2,
+};
+
 struct ath10k_pci {
 	struct pci_dev *pdev;
 	struct device *dev;
@@ -171,14 +174,10 @@
 	void __iomem *mem;
 	size_t mem_len;
 
-	/*
-	 * Number of MSI interrupts granted, 0 --> using legacy PCI line
-	 * interrupts.
-	 */
-	int num_msi_intrs;
+	/* Operating interrupt mode */
+	enum ath10k_pci_irq_mode oper_irq_mode;
 
 	struct tasklet_struct intr_tq;
-	struct tasklet_struct msi_fw_err;
 
 	struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
 
diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c
index 3ca3fae..0c5f586 100644
--- a/drivers/net/wireless/ath/ath10k/swap.c
+++ b/drivers/net/wireless/ath/ath10k/swap.c
@@ -134,27 +134,17 @@
 	return seg_info;
 }
 
-int ath10k_swap_code_seg_configure(struct ath10k *ar,
-				   enum ath10k_swap_code_seg_bin_type type)
+int ath10k_swap_code_seg_configure(struct ath10k *ar)
 {
 	int ret;
 	struct ath10k_swap_code_seg_info *seg_info = NULL;
 
-	switch (type) {
-	case ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW:
-		if (!ar->swap.firmware_swap_code_seg_info)
-			return 0;
-
-		ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
-		seg_info = ar->swap.firmware_swap_code_seg_info;
-		break;
-	default:
-	case ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP:
-	case ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF:
-		ath10k_warn(ar, "ignoring unknown code swap binary type %d\n",
-			    type);
+	if (!ar->swap.firmware_swap_code_seg_info)
 		return 0;
-	}
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
+
+	seg_info = ar->swap.firmware_swap_code_seg_info;
 
 	ret = ath10k_bmi_write_memory(ar, seg_info->target_addr,
 				      &seg_info->seg_hw_info,
@@ -171,8 +161,13 @@
 void ath10k_swap_code_seg_release(struct ath10k *ar)
 {
 	ath10k_swap_code_seg_free(ar, ar->swap.firmware_swap_code_seg_info);
-	ar->swap.firmware_codeswap_data = NULL;
-	ar->swap.firmware_codeswap_len = 0;
+
+	/* FIXME: these two assignments look to bein wrong place! Shouldn't
+	 * they be in ath10k_core_free_firmware_files() like the rest?
+	 */
+	ar->normal_mode_fw.fw_file.codeswap_data = NULL;
+	ar->normal_mode_fw.fw_file.codeswap_len = 0;
+
 	ar->swap.firmware_swap_code_seg_info = NULL;
 }
 
@@ -180,20 +175,23 @@
 {
 	int ret;
 	struct ath10k_swap_code_seg_info *seg_info;
+	const void *codeswap_data;
+	size_t codeswap_len;
 
-	if (!ar->swap.firmware_codeswap_len || !ar->swap.firmware_codeswap_data)
+	codeswap_data = ar->normal_mode_fw.fw_file.codeswap_data;
+	codeswap_len = ar->normal_mode_fw.fw_file.codeswap_len;
+
+	if (!codeswap_len || !codeswap_data)
 		return 0;
 
-	seg_info = ath10k_swap_code_seg_alloc(ar,
-					      ar->swap.firmware_codeswap_len);
+	seg_info = ath10k_swap_code_seg_alloc(ar, codeswap_len);
 	if (!seg_info) {
 		ath10k_err(ar, "failed to allocate fw code swap segment\n");
 		return -ENOMEM;
 	}
 
 	ret = ath10k_swap_code_seg_fill(ar, seg_info,
-					ar->swap.firmware_codeswap_data,
-					ar->swap.firmware_codeswap_len);
+					codeswap_data, codeswap_len);
 
 	if (ret) {
 		ath10k_warn(ar, "failed to initialize fw code swap segment: %d\n",
diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h
index 5c89952..36991c7 100644
--- a/drivers/net/wireless/ath/ath10k/swap.h
+++ b/drivers/net/wireless/ath/ath10k/swap.h
@@ -39,12 +39,6 @@
 	struct ath10k_swap_code_seg_tail tail;
 } __packed;
 
-enum ath10k_swap_code_seg_bin_type {
-	 ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP,
-	 ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW,
-	 ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF,
-};
-
 struct ath10k_swap_code_seg_hw_info {
 	/* Swap binary image size */
 	__le32 swap_size;
@@ -64,8 +58,7 @@
 	dma_addr_t paddr[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
 };
 
-int ath10k_swap_code_seg_configure(struct ath10k *ar,
-				   enum ath10k_swap_code_seg_bin_type type);
+int ath10k_swap_code_seg_configure(struct ath10k *ar);
 void ath10k_swap_code_seg_release(struct ath10k *ar);
 int ath10k_swap_code_seg_init(struct ath10k *ar);
 
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index 361f143..8e24099 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -438,7 +438,7 @@
 	((HOST_INTEREST->hi_pwr_save_flags & HI_PWR_SAVE_LPL_ENABLED))
 #define HI_DEV_LPL_TYPE_GET(_devix) \
 	(HOST_INTEREST->hi_pwr_save_flags & ((HI_PWR_SAVE_LPL_DEV_MASK) << \
-	 (HI_PWR_SAVE_LPL_DEV0_LSB + (_devix)*2)))
+	 (HI_PWR_SAVE_LPL_DEV0_LSB + (_devix) * 2)))
 
 #define HOST_INTEREST_SMPS_IS_ALLOWED() \
 	((HOST_INTEREST->hi_smps_options & HI_SMPS_ALLOW_MASK))
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
index 1d5a2fd..120f423 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.c
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -139,127 +139,8 @@
 	return cfg80211_testmode_reply(skb);
 }
 
-static int ath10k_tm_fetch_utf_firmware_api_2(struct ath10k *ar)
-{
-	size_t len, magic_len, ie_len;
-	struct ath10k_fw_ie *hdr;
-	char filename[100];
-	__le32 *version;
-	const u8 *data;
-	int ie_id, ret;
-
-	snprintf(filename, sizeof(filename), "%s/%s",
-		 ar->hw_params.fw.dir, ATH10K_FW_UTF_API2_FILE);
-
-	/* load utf firmware image */
-	ret = request_firmware(&ar->testmode.utf, filename, ar->dev);
-	if (ret) {
-		ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
-			    filename, ret);
-		return ret;
-	}
-
-	data = ar->testmode.utf->data;
-	len = ar->testmode.utf->size;
-
-	/* FIXME: call release_firmware() in error cases */
-
-	/* magic also includes the null byte, check that as well */
-	magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
-
-	if (len < magic_len) {
-		ath10k_err(ar, "utf firmware file is too small to contain magic\n");
-		ret = -EINVAL;
-		goto err;
-	}
-
-	if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
-		ath10k_err(ar, "invalid firmware magic\n");
-		ret = -EINVAL;
-		goto err;
-	}
-
-	/* jump over the padding */
-	magic_len = ALIGN(magic_len, 4);
-
-	len -= magic_len;
-	data += magic_len;
-
-	/* loop elements */
-	while (len > sizeof(struct ath10k_fw_ie)) {
-		hdr = (struct ath10k_fw_ie *)data;
-
-		ie_id = le32_to_cpu(hdr->id);
-		ie_len = le32_to_cpu(hdr->len);
-
-		len -= sizeof(*hdr);
-		data += sizeof(*hdr);
-
-		if (len < ie_len) {
-			ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n",
-				   ie_id, len, ie_len);
-			ret = -EINVAL;
-			goto err;
-		}
-
-		switch (ie_id) {
-		case ATH10K_FW_IE_FW_VERSION:
-			if (ie_len > sizeof(ar->testmode.utf_version) - 1)
-				break;
-
-			memcpy(ar->testmode.utf_version, data, ie_len);
-			ar->testmode.utf_version[ie_len] = '\0';
-
-			ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
-				   "testmode found fw utf version %s\n",
-				   ar->testmode.utf_version);
-			break;
-		case ATH10K_FW_IE_TIMESTAMP:
-			/* ignore timestamp, but don't warn about it either */
-			break;
-		case ATH10K_FW_IE_FW_IMAGE:
-			ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
-				   "testmode found fw image ie (%zd B)\n",
-				   ie_len);
-
-			ar->testmode.utf_firmware_data = data;
-			ar->testmode.utf_firmware_len = ie_len;
-			break;
-		case ATH10K_FW_IE_WMI_OP_VERSION:
-			if (ie_len != sizeof(u32))
-				break;
-			version = (__le32 *)data;
-			ar->testmode.op_version = le32_to_cpup(version);
-			ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode found fw ie wmi op version %d\n",
-				   ar->testmode.op_version);
-			break;
-		default:
-			ath10k_warn(ar, "Unknown testmode FW IE: %u\n",
-				    le32_to_cpu(hdr->id));
-			break;
-		}
-		/* jump over the padding */
-		ie_len = ALIGN(ie_len, 4);
-
-		len -= ie_len;
-		data += ie_len;
-	}
-
-	if (!ar->testmode.utf_firmware_data || !ar->testmode.utf_firmware_len) {
-		ath10k_err(ar, "No ATH10K_FW_IE_FW_IMAGE found\n");
-		ret = -EINVAL;
-		goto err;
-	}
-
-	return 0;
-
-err:
-	release_firmware(ar->testmode.utf);
-
-	return ret;
-}
-
-static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar)
+static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar,
+					      struct ath10k_fw_file *fw_file)
 {
 	char filename[100];
 	int ret;
@@ -268,7 +149,7 @@
 		 ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE);
 
 	/* load utf firmware image */
-	ret = request_firmware(&ar->testmode.utf, filename, ar->dev);
+	ret = request_firmware(&fw_file->firmware, filename, ar->dev);
 	if (ret) {
 		ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
 			    filename, ret);
@@ -281,24 +162,27 @@
 	 * correct WMI interface.
 	 */
 
-	ar->testmode.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
-	ar->testmode.utf_firmware_data = ar->testmode.utf->data;
-	ar->testmode.utf_firmware_len = ar->testmode.utf->size;
+	fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
+	fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
+	fw_file->firmware_data = fw_file->firmware->data;
+	fw_file->firmware_len = fw_file->firmware->size;
 
 	return 0;
 }
 
 static int ath10k_tm_fetch_firmware(struct ath10k *ar)
 {
+	struct ath10k_fw_components *utf_mode_fw;
 	int ret;
 
-	ret = ath10k_tm_fetch_utf_firmware_api_2(ar);
+	ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_UTF_API2_FILE,
+					       &ar->testmode.utf_mode_fw.fw_file);
 	if (ret == 0) {
 		ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using fw utf api 2");
-		return 0;
+		goto out;
 	}
 
-	ret = ath10k_tm_fetch_utf_firmware_api_1(ar);
+	ret = ath10k_tm_fetch_utf_firmware_api_1(ar, &ar->testmode.utf_mode_fw.fw_file);
 	if (ret) {
 		ath10k_err(ar, "failed to fetch utf firmware binary: %d", ret);
 		return ret;
@@ -306,6 +190,21 @@
 
 	ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using utf api 1");
 
+out:
+	utf_mode_fw = &ar->testmode.utf_mode_fw;
+
+	/* Use the same board data file as the normal firmware uses (but
+	 * it's still "owned" by normal_mode_fw so we shouldn't free it.
+	 */
+	utf_mode_fw->board_data = ar->normal_mode_fw.board_data;
+	utf_mode_fw->board_len = ar->normal_mode_fw.board_len;
+
+	if (!utf_mode_fw->fw_file.otp_data) {
+		ath10k_info(ar, "utf.bin didn't contain otp binary, taking it from the normal mode firmware");
+		utf_mode_fw->fw_file.otp_data = ar->normal_mode_fw.fw_file.otp_data;
+		utf_mode_fw->fw_file.otp_len = ar->normal_mode_fw.fw_file.otp_len;
+	}
+
 	return 0;
 }
 
@@ -329,7 +228,7 @@
 		goto err;
 	}
 
-	if (WARN_ON(ar->testmode.utf != NULL)) {
+	if (WARN_ON(ar->testmode.utf_mode_fw.fw_file.firmware != NULL)) {
 		/* utf image is already downloaded, it shouldn't be */
 		ret = -EEXIST;
 		goto err;
@@ -344,27 +243,19 @@
 	spin_lock_bh(&ar->data_lock);
 	ar->testmode.utf_monitor = true;
 	spin_unlock_bh(&ar->data_lock);
-	BUILD_BUG_ON(sizeof(ar->fw_features) !=
-		     sizeof(ar->testmode.orig_fw_features));
-
-	memcpy(ar->testmode.orig_fw_features, ar->fw_features,
-	       sizeof(ar->fw_features));
-	ar->testmode.orig_wmi_op_version = ar->wmi.op_version;
-	memset(ar->fw_features, 0, sizeof(ar->fw_features));
-
-	ar->wmi.op_version = ar->testmode.op_version;
 
 	ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode wmi version %d\n",
-		   ar->wmi.op_version);
+		   ar->testmode.utf_mode_fw.fw_file.wmi_op_version);
 
 	ret = ath10k_hif_power_up(ar);
 	if (ret) {
 		ath10k_err(ar, "failed to power up hif (testmode): %d\n", ret);
 		ar->state = ATH10K_STATE_OFF;
-		goto err_fw_features;
+		goto err_release_utf_mode_fw;
 	}
 
-	ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF);
+	ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF,
+				&ar->testmode.utf_mode_fw);
 	if (ret) {
 		ath10k_err(ar, "failed to start core (testmode): %d\n", ret);
 		ar->state = ATH10K_STATE_OFF;
@@ -373,8 +264,8 @@
 
 	ar->state = ATH10K_STATE_UTF;
 
-	if (strlen(ar->testmode.utf_version) > 0)
-		ver = ar->testmode.utf_version;
+	if (strlen(ar->testmode.utf_mode_fw.fw_file.fw_version) > 0)
+		ver = ar->testmode.utf_mode_fw.fw_file.fw_version;
 	else
 		ver = "API 1";
 
@@ -387,14 +278,9 @@
 err_power_down:
 	ath10k_hif_power_down(ar);
 
-err_fw_features:
-	/* return the original firmware features */
-	memcpy(ar->fw_features, ar->testmode.orig_fw_features,
-	       sizeof(ar->fw_features));
-	ar->wmi.op_version = ar->testmode.orig_wmi_op_version;
-
-	release_firmware(ar->testmode.utf);
-	ar->testmode.utf = NULL;
+err_release_utf_mode_fw:
+	release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
+	ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
 
 err:
 	mutex_unlock(&ar->conf_mutex);
@@ -415,13 +301,8 @@
 
 	spin_unlock_bh(&ar->data_lock);
 
-	/* return the original firmware features */
-	memcpy(ar->fw_features, ar->testmode.orig_fw_features,
-	       sizeof(ar->fw_features));
-	ar->wmi.op_version = ar->testmode.orig_wmi_op_version;
-
-	release_firmware(ar->testmode.utf);
-	ar->testmode.utf = NULL;
+	release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
+	ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
 
 	ar->state = ATH10K_STATE_OFF;
 }
diff --git a/drivers/net/wireless/ath/ath10k/thermal.h b/drivers/net/wireless/ath/ath10k/thermal.h
index c9223e9..3abb97f 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.h
+++ b/drivers/net/wireless/ath/ath10k/thermal.h
@@ -20,7 +20,7 @@
 #define ATH10K_QUIET_PERIOD_MIN         25
 #define ATH10K_QUIET_START_OFFSET       10
 #define ATH10K_HWMON_NAME_LEN           15
-#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5*HZ)
+#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5 * HZ)
 #define ATH10K_THERMAL_THROTTLE_MAX     100
 
 struct ath10k_thermal {
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index fbfb608..576e7c4 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -49,25 +49,25 @@
 	spin_unlock_bh(&ar->data_lock);
 }
 
-void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
-			  const struct htt_tx_done *tx_done)
+int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+			 const struct htt_tx_done *tx_done)
 {
 	struct ath10k *ar = htt->ar;
 	struct device *dev = ar->dev;
 	struct ieee80211_tx_info *info;
+	struct ieee80211_txq *txq;
 	struct ath10k_skb_cb *skb_cb;
+	struct ath10k_txq *artxq;
 	struct sk_buff *msdu;
-	bool limit_mgmt_desc = false;
 
 	ath10k_dbg(ar, ATH10K_DBG_HTT,
-		   "htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
-		   tx_done->msdu_id, !!tx_done->discard,
-		   !!tx_done->no_ack, !!tx_done->success);
+		   "htt tx completion msdu_id %u status %d\n",
+		   tx_done->msdu_id, tx_done->status);
 
 	if (tx_done->msdu_id >= htt->max_num_pending_tx) {
 		ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
 			    tx_done->msdu_id);
-		return;
+		return -EINVAL;
 	}
 
 	spin_lock_bh(&htt->tx_lock);
@@ -76,17 +76,18 @@
 		ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
 			    tx_done->msdu_id);
 		spin_unlock_bh(&htt->tx_lock);
-		return;
+		return -ENOENT;
 	}
 
 	skb_cb = ATH10K_SKB_CB(msdu);
+	txq = skb_cb->txq;
+	artxq = (void *)txq->drv_priv;
 
-	if (unlikely(skb_cb->flags & ATH10K_SKB_F_MGMT) &&
-	    ar->hw_params.max_probe_resp_desc_thres)
-		limit_mgmt_desc = true;
+	if (txq)
+		artxq->num_fw_queued--;
 
 	ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
-	__ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
+	ath10k_htt_tx_dec_pending(htt);
 	if (htt->num_pending_tx == 0)
 		wake_up(&htt->empty_tx_wq);
 	spin_unlock_bh(&htt->tx_lock);
@@ -99,22 +100,24 @@
 	memset(&info->status, 0, sizeof(info->status));
 	trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
 
-	if (tx_done->discard) {
+	if (tx_done->status == HTT_TX_COMPL_STATE_DISCARD) {
 		ieee80211_free_txskb(htt->ar->hw, msdu);
-		return;
+		return 0;
 	}
 
 	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
 		info->flags |= IEEE80211_TX_STAT_ACK;
 
-	if (tx_done->no_ack)
+	if (tx_done->status == HTT_TX_COMPL_STATE_NOACK)
 		info->flags &= ~IEEE80211_TX_STAT_ACK;
 
-	if (tx_done->success && (info->flags & IEEE80211_TX_CTL_NO_ACK))
+	if ((tx_done->status == HTT_TX_COMPL_STATE_ACK) &&
+	    (info->flags & IEEE80211_TX_CTL_NO_ACK))
 		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
 
 	ieee80211_tx_status(htt->ar->hw, msdu);
 	/* we do not own the msdu anymore */
+	return 0;
 }
 
 struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
@@ -127,7 +130,7 @@
 	list_for_each_entry(peer, &ar->peers, list) {
 		if (peer->vdev_id != vdev_id)
 			continue;
-		if (memcmp(peer->addr, addr, ETH_ALEN))
+		if (!ether_addr_equal(peer->addr, addr))
 			continue;
 
 		return peer;
@@ -163,7 +166,7 @@
 
 			(mapped == expect_mapped ||
 			 test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
-		}), 3*HZ);
+		}), 3 * HZ);
 
 	if (time_left == 0)
 		return -ETIMEDOUT;
@@ -187,6 +190,13 @@
 	struct ath10k *ar = htt->ar;
 	struct ath10k_peer *peer;
 
+	if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
+		ath10k_warn(ar,
+			    "received htt peer map event with idx out of bounds: %hu\n",
+			    ev->peer_id);
+		return;
+	}
+
 	spin_lock_bh(&ar->data_lock);
 	peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
 	if (!peer) {
@@ -203,6 +213,7 @@
 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
 		   ev->vdev_id, ev->addr, ev->peer_id);
 
+	ar->peer_map[ev->peer_id] = peer;
 	set_bit(ev->peer_id, peer->peer_ids);
 exit:
 	spin_unlock_bh(&ar->data_lock);
@@ -214,6 +225,13 @@
 	struct ath10k *ar = htt->ar;
 	struct ath10k_peer *peer;
 
+	if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
+		ath10k_warn(ar,
+			    "received htt peer unmap event with idx out of bounds: %hu\n",
+			    ev->peer_id);
+		return;
+	}
+
 	spin_lock_bh(&ar->data_lock);
 	peer = ath10k_peer_find_by_id(ar, ev->peer_id);
 	if (!peer) {
@@ -225,6 +243,7 @@
 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
 		   peer->vdev_id, peer->addr, ev->peer_id);
 
+	ar->peer_map[ev->peer_id] = NULL;
 	clear_bit(ev->peer_id, peer->peer_ids);
 
 	if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
index a90e09f..e7ea1ae 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.h
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -19,8 +19,8 @@
 
 #include "htt.h"
 
-void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
-			  const struct htt_tx_done *tx_done);
+int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+			 const struct htt_tx_done *tx_done);
 
 struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
 				     const u8 *addr);
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 32ab34e..7fb00dc 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -186,6 +186,9 @@
 							u8 enable,
 							u32 detect_level,
 							u32 detect_margin);
+	struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
+					       enum wmi_host_platform_type type,
+					       u32 fw_feature_bitmap);
 	int (*get_vdev_subtype)(struct ath10k *ar,
 				enum wmi_vdev_subtype subtype);
 };
@@ -1330,6 +1333,26 @@
 }
 
 static inline int
+ath10k_wmi_ext_resource_config(struct ath10k *ar,
+			       enum wmi_host_platform_type type,
+			       u32 fw_feature_bitmap)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->ext_resource_config)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->ext_resource_config(ar, type,
+					       fw_feature_bitmap);
+
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->ext_resource_cfg_cmdid);
+}
+
+static inline int
 ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
 {
 	if (!ar->wmi.ops->get_vdev_subtype)
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 1085932..e09337e 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -3409,6 +3409,7 @@
 	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
 	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
 	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.set_tsf = WMI_VDEV_PARAM_UNSUPPORTED,
 };
 
 static const struct wmi_ops wmi_tlv_ops = {
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index dd67859..b8aa600 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -968,8 +968,8 @@
 
 #define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
 	((svc_id) < (len) && \
-	 __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
-	 BIT((svc_id)%(sizeof(u32))))
+	 __le32_to_cpu((wmi_svc_bmap)[(svc_id) / (sizeof(u32))]) & \
+	 BIT((svc_id) % (sizeof(u32))))
 
 #define SVCMAP(x, y, len) \
 	do { \
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 7026138..621019f 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -705,6 +705,7 @@
 	.set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID,
 	.pdev_bss_chan_info_request_cmdid =
 			WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+	.ext_resource_cfg_cmdid = WMI_10_4_EXT_RESOURCE_CFG_CMDID,
 };
 
 /* MAIN WMI VDEV param map */
@@ -780,6 +781,7 @@
 	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
 	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
 	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.set_tsf = WMI_VDEV_PARAM_UNSUPPORTED,
 };
 
 /* 10.X WMI VDEV param map */
@@ -855,6 +857,7 @@
 	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
 	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
 	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.set_tsf = WMI_VDEV_PARAM_UNSUPPORTED,
 };
 
 static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
@@ -929,6 +932,7 @@
 	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
 	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
 	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.set_tsf = WMI_10X_VDEV_PARAM_TSF_INCREMENT,
 };
 
 static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
@@ -1004,6 +1008,7 @@
 	.meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC,
 	.rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
 	.bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
+	.set_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
 };
 
 static struct wmi_pdev_param_map wmi_pdev_param_map = {
@@ -1803,7 +1808,7 @@
 			ret = -ESHUTDOWN;
 
 		(ret != -EAGAIN);
-	}), 3*HZ);
+	}), 3 * HZ);
 
 	if (ret)
 		dev_kfree_skb_any(skb);
@@ -2099,34 +2104,6 @@
 	return 0;
 }
 
-static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
-{
-	enum ieee80211_band band;
-
-	switch (phy_mode) {
-	case MODE_11A:
-	case MODE_11NA_HT20:
-	case MODE_11NA_HT40:
-	case MODE_11AC_VHT20:
-	case MODE_11AC_VHT40:
-	case MODE_11AC_VHT80:
-		band = IEEE80211_BAND_5GHZ;
-		break;
-	case MODE_11G:
-	case MODE_11B:
-	case MODE_11GONLY:
-	case MODE_11NG_HT20:
-	case MODE_11NG_HT40:
-	case MODE_11AC_VHT20_2G:
-	case MODE_11AC_VHT40_2G:
-	case MODE_11AC_VHT80_2G:
-	default:
-		band = IEEE80211_BAND_2GHZ;
-	}
-
-	return band;
-}
-
 /* If keys are configured, HW decrypts all frames
  * with protected bit set. Mark such frames as decrypted.
  */
@@ -2167,10 +2144,13 @@
 	struct wmi_mgmt_rx_event_v1 *ev_v1;
 	struct wmi_mgmt_rx_event_v2 *ev_v2;
 	struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
+	struct wmi_mgmt_rx_ext_info *ext_info;
 	size_t pull_len;
 	u32 msdu_len;
+	u32 len;
 
-	if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
+	if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX,
+		     ar->running_fw->fw_file.fw_features)) {
 		ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
 		ev_hdr = &ev_v2->hdr.v1;
 		pull_len = sizeof(*ev_v2);
@@ -2195,6 +2175,12 @@
 	if (skb->len < msdu_len)
 		return -EPROTO;
 
+	if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
+		len = ALIGN(le32_to_cpu(arg->buf_len), 4);
+		ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
+		memcpy(&arg->ext_info, ext_info,
+		       sizeof(struct wmi_mgmt_rx_ext_info));
+	}
 	/* the WMI buffer might've ended up being padded to 4 bytes due to HTC
 	 * trailer with credit update. Trim the excess garbage.
 	 */
@@ -2211,6 +2197,8 @@
 	struct wmi_10_4_mgmt_rx_hdr *ev_hdr;
 	size_t pull_len;
 	u32 msdu_len;
+	struct wmi_mgmt_rx_ext_info *ext_info;
+	u32 len;
 
 	ev = (struct wmi_10_4_mgmt_rx_event *)skb->data;
 	ev_hdr = &ev->hdr;
@@ -2231,6 +2219,13 @@
 	if (skb->len < msdu_len)
 		return -EPROTO;
 
+	if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
+		len = ALIGN(le32_to_cpu(arg->buf_len), 4);
+		ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
+		memcpy(&arg->ext_info, ext_info,
+		       sizeof(struct wmi_mgmt_rx_ext_info));
+	}
+
 	/* Make sure bytes added for padding are removed. */
 	skb_trim(skb, msdu_len);
 
@@ -2281,14 +2276,19 @@
 	if (rx_status & WMI_RX_STATUS_ERR_MIC)
 		status->flag |= RX_FLAG_MMIC_ERROR;
 
+	if (rx_status & WMI_RX_STATUS_EXT_INFO) {
+		status->mactime =
+			__le64_to_cpu(arg.ext_info.rx_mac_timestamp);
+		status->flag |= RX_FLAG_MACTIME_END;
+	}
 	/* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
 	 * MODE_11B. This means phy_mode is not a reliable source for the band
 	 * of mgmt rx.
 	 */
 	if (channel >= 1 && channel <= 14) {
-		status->band = IEEE80211_BAND_2GHZ;
+		status->band = NL80211_BAND_2GHZ;
 	} else if (channel >= 36 && channel <= 165) {
-		status->band = IEEE80211_BAND_5GHZ;
+		status->band = NL80211_BAND_5GHZ;
 	} else {
 		/* Shouldn't happen unless list of advertised channels to
 		 * mac80211 has been changed.
@@ -2298,7 +2298,7 @@
 		return 0;
 	}
 
-	if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ)
+	if (phy_mode == MODE_11B && status->band == NL80211_BAND_5GHZ)
 		ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
 
 	sband = &ar->mac.sbands[status->band];
@@ -2310,6 +2310,12 @@
 	hdr = (struct ieee80211_hdr *)skb->data;
 	fc = le16_to_cpu(hdr->frame_control);
 
+	/* Firmware is guaranteed to report all essential management frames via
+	 * WMI while it can deliver some extra via HTT. Since there can be
+	 * duplicates split the reporting wrt monitor/sniffing.
+	 */
+	status->flag |= RX_FLAG_SKIP_MONITOR;
+
 	ath10k_wmi_handle_wep_reauth(ar, skb, status);
 
 	/* FW delivers WEP Shared Auth frame with Protected Bit set and
@@ -2351,7 +2357,7 @@
 	struct ieee80211_supported_band *sband;
 	int band, ch, idx = 0;
 
-	for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+	for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
 		sband = ar->hw->wiphy->bands[band];
 		if (!sband)
 			continue;
@@ -2612,6 +2618,16 @@
 	dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
 }
 
+static void
+ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats *src,
+				struct ath10k_fw_stats_peer *dst)
+{
+	ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
+	dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
+	dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
+	dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+}
+
 static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar,
 					    struct sk_buff *skb,
 					    struct ath10k_fw_stats *stats)
@@ -2865,11 +2881,8 @@
 		const struct wmi_10_2_4_ext_peer_stats *src;
 		struct ath10k_fw_stats_peer *dst;
 		int stats_len;
-		bool ext_peer_stats_support;
 
-		ext_peer_stats_support = test_bit(WMI_SERVICE_PEER_STATS,
-						  ar->wmi.svc_map);
-		if (ext_peer_stats_support)
+		if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
 			stats_len = sizeof(struct wmi_10_2_4_ext_peer_stats);
 		else
 			stats_len = sizeof(struct wmi_10_2_4_peer_stats);
@@ -2886,7 +2899,7 @@
 
 		dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
 
-		if (ext_peer_stats_support)
+		if (ath10k_peer_stats_enabled(ar))
 			dst->rx_duration = __le32_to_cpu(src->rx_duration);
 		/* FIXME: expose 10.2 specific values */
 
@@ -2905,6 +2918,7 @@
 	u32 num_pdev_ext_stats;
 	u32 num_vdev_stats;
 	u32 num_peer_stats;
+	u32 stats_id;
 	int i;
 
 	if (!skb_pull(skb, sizeof(*ev)))
@@ -2914,6 +2928,7 @@
 	num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
 	num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+	stats_id = __le32_to_cpu(ev->stats_id);
 
 	for (i = 0; i < num_pdev_stats; i++) {
 		const struct wmi_10_4_pdev_stats *src;
@@ -2953,22 +2968,28 @@
 	/* fw doesn't implement vdev stats */
 
 	for (i = 0; i < num_peer_stats; i++) {
-		const struct wmi_10_4_peer_stats *src;
+		const struct wmi_10_4_peer_extd_stats *src;
 		struct ath10k_fw_stats_peer *dst;
+		int stats_len;
+		bool extd_peer_stats = !!(stats_id & WMI_10_4_STAT_PEER_EXTD);
+
+		if (extd_peer_stats)
+			stats_len = sizeof(struct wmi_10_4_peer_extd_stats);
+		else
+			stats_len = sizeof(struct wmi_10_4_peer_stats);
 
 		src = (void *)skb->data;
-		if (!skb_pull(skb, sizeof(*src)))
+		if (!skb_pull(skb, stats_len))
 			return -EPROTO;
 
 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
 		if (!dst)
 			continue;
 
-		ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
-		dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
-		dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
-		dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+		ath10k_wmi_10_4_pull_peer_stats(&src->common, dst);
 		/* FIXME: expose 10.4 specific values */
+		if (extd_peer_stats)
+			dst->rx_duration = __le32_to_cpu(src->rx_duration);
 
 		list_add_tail(&dst->list, &stats->peers);
 	}
@@ -4584,10 +4605,6 @@
 	ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
 			arg.service_map, arg.service_map_len);
 
-	/* only manually set fw features when not using FW IE format */
-	if (ar->fw_api == 1 && ar->fw_version_build > 636)
-		set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
-
 	if (ar->num_rf_chains > ar->max_spatial_stream) {
 		ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
 			    ar->num_rf_chains, ar->max_spatial_stream);
@@ -4617,10 +4634,16 @@
 	}
 
 	if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) {
+		if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+			     ar->running_fw->fw_file.fw_features))
+			ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC +
+					       ar->max_num_vdevs;
+		else
+			ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS +
+					       ar->max_num_vdevs;
+
 		ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX +
 				    ar->max_num_vdevs;
-		ar->num_active_peers = ar->hw_params.qcache_active_peers +
-				       ar->max_num_vdevs;
 		ar->num_tids = ar->num_active_peers * 2;
 		ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX;
 	}
@@ -5517,7 +5540,8 @@
 
 	config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
 	config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
-	if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) {
+
+	if (ath10k_peer_stats_enabled(ar)) {
 		config.num_peers = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_PEERS);
 		config.num_tids = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_TIDS);
 	} else {
@@ -5579,7 +5603,7 @@
 	    test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
 		features |= WMI_10_2_COEX_GPIO;
 
-	if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+	if (ath10k_peer_stats_enabled(ar))
 		features |= WMI_10_2_PEER_STATS;
 
 	cmd->resource_config.feature_mask = __cpu_to_le32(features);
@@ -5800,9 +5824,8 @@
 		bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
 
 		for (i = 0; i < arg->n_bssids; i++)
-			memcpy(&bssids->bssid_list[i],
-			       arg->bssids[i].bssid,
-			       ETH_ALEN);
+			ether_addr_copy(bssids->bssid_list[i].addr,
+					arg->bssids[i].bssid);
 
 		ptr += sizeof(*bssids);
 		ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
@@ -7484,6 +7507,28 @@
 	return -ENOTSUPP;
 }
 
+static struct sk_buff *
+ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar,
+				    enum wmi_host_platform_type type,
+				    u32 fw_feature_bitmap)
+{
+	struct wmi_ext_resource_config_10_4_cmd *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data;
+	cmd->host_platform_config = __cpu_to_le32(type);
+	cmd->fw_feature_bitmap = __cpu_to_le32(fw_feature_bitmap);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi ext resource config host type %d firmware feature bitmap %08x\n",
+		   type, fw_feature_bitmap);
+	return skb;
+}
+
 static const struct wmi_ops wmi_ops = {
 	.rx = ath10k_wmi_op_rx,
 	.map_svc = wmi_main_svc_map,
@@ -7810,6 +7855,7 @@
 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
 	.fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill,
+	.ext_resource_config = ath10k_wmi_10_4_ext_resource_config,
 
 	/* shared with 10.2 */
 	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
@@ -7819,7 +7865,7 @@
 
 int ath10k_wmi_attach(struct ath10k *ar)
 {
-	switch (ar->wmi.op_version) {
+	switch (ar->running_fw->fw_file.wmi_op_version) {
 	case ATH10K_FW_WMI_OP_VERSION_10_4:
 		ar->wmi.ops = &wmi_10_4_ops;
 		ar->wmi.cmd = &wmi_10_4_cmd_map;
@@ -7861,7 +7907,7 @@
 	case ATH10K_FW_WMI_OP_VERSION_UNSET:
 	case ATH10K_FW_WMI_OP_VERSION_MAX:
 		ath10k_err(ar, "unsupported WMI op version: %d\n",
-			   ar->wmi.op_version);
+			   ar->running_fw->fw_file.wmi_op_version);
 		return -EINVAL;
 	}
 
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 4d3cbc4..db25535 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -180,6 +180,9 @@
 	WMI_SERVICE_MESH_NON_11S,
 	WMI_SERVICE_PEER_STATS,
 	WMI_SERVICE_RESTRT_CHNL_SUPPORT,
+	WMI_SERVICE_TX_MODE_PUSH_ONLY,
+	WMI_SERVICE_TX_MODE_PUSH_PULL,
+	WMI_SERVICE_TX_MODE_DYNAMIC,
 
 	/* keep last */
 	WMI_SERVICE_MAX,
@@ -302,6 +305,9 @@
 	WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT,
 	WMI_10_4_SERVICE_PEER_STATS,
 	WMI_10_4_SERVICE_MESH_11S,
+	WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
+	WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
+	WMI_10_4_SERVICE_TX_MODE_DYNAMIC,
 };
 
 static inline char *wmi_service_name(int service_id)
@@ -396,6 +402,9 @@
 	SVCSTR(WMI_SERVICE_MESH_NON_11S);
 	SVCSTR(WMI_SERVICE_PEER_STATS);
 	SVCSTR(WMI_SERVICE_RESTRT_CHNL_SUPPORT);
+	SVCSTR(WMI_SERVICE_TX_MODE_PUSH_ONLY);
+	SVCSTR(WMI_SERVICE_TX_MODE_PUSH_PULL);
+	SVCSTR(WMI_SERVICE_TX_MODE_DYNAMIC);
 	default:
 		return NULL;
 	}
@@ -405,8 +414,8 @@
 
 #define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
 	((svc_id) < (len) && \
-	 __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
-	 BIT((svc_id)%(sizeof(u32))))
+	 __le32_to_cpu((wmi_svc_bmap)[(svc_id) / (sizeof(u32))]) & \
+	 BIT((svc_id) % (sizeof(u32))))
 
 #define SVCMAP(x, y, len) \
 	do { \
@@ -643,6 +652,12 @@
 	       WMI_SERVICE_PEER_STATS, len);
 	SVCMAP(WMI_10_4_SERVICE_MESH_11S,
 	       WMI_SERVICE_MESH_11S, len);
+	SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
+	       WMI_SERVICE_TX_MODE_PUSH_ONLY, len);
+	SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
+	       WMI_SERVICE_TX_MODE_PUSH_PULL, len);
+	SVCMAP(WMI_10_4_SERVICE_TX_MODE_DYNAMIC,
+	       WMI_SERVICE_TX_MODE_DYNAMIC, len);
 }
 
 #undef SVCMAP
@@ -816,6 +831,7 @@
 	u32 set_cca_params_cmdid;
 	u32 pdev_bss_chan_info_request_cmdid;
 	u32 pdev_enable_adaptive_cca_cmdid;
+	u32 ext_resource_cfg_cmdid;
 };
 
 /*
@@ -1308,7 +1324,7 @@
 	WMI_10X_PDEV_TPC_CONFIG_EVENTID,
 
 	WMI_10X_GPIO_INPUT_EVENTID,
-	WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID-1,
+	WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID - 1,
 };
 
 enum wmi_10_2_cmd_id {
@@ -2041,8 +2057,8 @@
 	struct wlan_host_mem_req mem_reqs[0];
 } __packed;
 
-#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
-#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ)
+#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
+#define WMI_UNIFIED_READY_TIMEOUT_HZ (5 * HZ)
 
 struct wmi_ready_event {
 	__le32 sw_version;
@@ -2660,13 +2676,43 @@
 	 */
 	__le32 iphdr_pad_config;
 
-	/* qwrap configuration
+	/* qwrap configuration (bits 15-0)
 	 * 1  - This is qwrap configuration
 	 * 0  - This is not qwrap
+	 *
+	 * Bits 31-16 is alloc_frag_desc_for_data_pkt (1 enables, 0 disables)
+	 * In order to get ack-RSSI reporting and to specify the tx-rate for
+	 * individual frames, this option must be enabled.  This uses an extra
+	 * 4 bytes per tx-msdu descriptor, so don't enable it unless you need it.
 	 */
 	__le32 qwrap_config;
 } __packed;
 
+/**
+ * enum wmi_10_4_feature_mask - WMI 10.4 feature enable/disable flags
+ * @WMI_10_4_LTEU_SUPPORT: LTEU config
+ * @WMI_10_4_COEX_GPIO_SUPPORT: COEX GPIO config
+ * @WMI_10_4_AUX_RADIO_SPECTRAL_INTF: AUX Radio Enhancement for spectral scan
+ * @WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF: AUX Radio Enhancement for chan load scan
+ * @WMI_10_4_BSS_CHANNEL_INFO_64: BSS channel info stats
+ * @WMI_10_4_PEER_STATS: Per station stats
+ */
+enum wmi_10_4_feature_mask {
+	WMI_10_4_LTEU_SUPPORT			= BIT(0),
+	WMI_10_4_COEX_GPIO_SUPPORT		= BIT(1),
+	WMI_10_4_AUX_RADIO_SPECTRAL_INTF	= BIT(2),
+	WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF	= BIT(3),
+	WMI_10_4_BSS_CHANNEL_INFO_64		= BIT(4),
+	WMI_10_4_PEER_STATS			= BIT(5),
+};
+
+struct wmi_ext_resource_config_10_4_cmd {
+	/* contains enum wmi_host_platform_type */
+	__le32 host_platform_config;
+	/* see enum wmi_10_4_feature_mask */
+	__le32 fw_feature_bitmap;
+};
+
 /* strucutre describing host memory chunk. */
 struct host_memory_chunk {
 	/* id of the request that is passed up in service ready */
@@ -3037,11 +3083,17 @@
 	u8 buf[0];
 } __packed;
 
+struct wmi_mgmt_rx_ext_info {
+	__le64 rx_mac_timestamp;
+} __packed __aligned(4);
+
 #define WMI_RX_STATUS_OK			0x00
 #define WMI_RX_STATUS_ERR_CRC			0x01
 #define WMI_RX_STATUS_ERR_DECRYPT		0x08
 #define WMI_RX_STATUS_ERR_MIC			0x10
 #define WMI_RX_STATUS_ERR_KEY_CACHE_MISS	0x20
+/* Extension data at the end of mgmt frame */
+#define WMI_RX_STATUS_EXT_INFO		0x40
 
 #define PHY_ERROR_GEN_SPECTRAL_SCAN		0x26
 #define PHY_ERROR_GEN_FALSE_RADAR_EXT		0x24
@@ -4072,6 +4124,13 @@
 	WMI_STAT_VDEV_RATE = BIT(5),
 };
 
+enum wmi_10_4_stats_id {
+	WMI_10_4_STAT_PEER		= BIT(0),
+	WMI_10_4_STAT_AP		= BIT(1),
+	WMI_10_4_STAT_INST		= BIT(2),
+	WMI_10_4_STAT_PEER_EXTD		= BIT(3),
+};
+
 struct wlan_inst_rssi_args {
 	__le16 cfg_retry_count;
 	__le16 retry_count;
@@ -4271,6 +4330,15 @@
 	__le32 peer_rssi_changed;
 } __packed;
 
+struct wmi_10_4_peer_extd_stats {
+	struct wmi_10_4_peer_stats common;
+	struct wmi_mac_addr peer_macaddr;
+	__le32 inactive_time;
+	__le32 peer_chain_rssi;
+	__le32 rx_duration;
+	__le32 reserved[10];
+} __packed;
+
 struct wmi_10_2_pdev_ext_stats {
 	__le32 rx_rssi_comb;
 	__le32 rx_rssi[4];
@@ -4336,14 +4404,14 @@
 /*
  * Indicates that AP VDEV uses hidden ssid. only valid for
  *  AP/GO */
-#define WMI_VDEV_START_HIDDEN_SSID  (1<<0)
+#define WMI_VDEV_START_HIDDEN_SSID  (1 << 0)
 /*
  * Indicates if robust management frame/management frame
  *  protection is enabled. For GO/AP vdevs, it indicates that
  *  it may support station/client associations with RMF enabled.
  *  For STA/client vdevs, it indicates that sta will
  *  associate with AP with RMF enabled. */
-#define WMI_VDEV_START_PMF_ENABLED  (1<<1)
+#define WMI_VDEV_START_PMF_ENABLED  (1 << 1)
 
 struct wmi_p2p_noa_descriptor {
 	__le32 type_count; /* 255: continuous schedule, 0: reserved */
@@ -4582,6 +4650,7 @@
 	u32 meru_vc;
 	u32 rx_decap_type;
 	u32 bw_nss_ratemask;
+	u32 set_tsf;
 };
 
 #define WMI_VDEV_PARAM_UNSUPPORTED 0
@@ -4838,6 +4907,7 @@
 	WMI_10X_VDEV_PARAM_RTS_FIXED_RATE,
 	WMI_10X_VDEV_PARAM_VHT_SGIMASK,
 	WMI_10X_VDEV_PARAM_VHT80_RATEMASK,
+	WMI_10X_VDEV_PARAM_TSF_INCREMENT,
 };
 
 enum wmi_10_4_vdev_param {
@@ -4907,6 +4977,12 @@
 	WMI_10_4_VDEV_PARAM_MERU_VC,
 	WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
 	WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
+	WMI_10_4_VDEV_PARAM_SENSOR_AP,
+	WMI_10_4_VDEV_PARAM_BEACON_RATE,
+	WMI_10_4_VDEV_PARAM_DTIM_ENABLE_CTS,
+	WMI_10_4_VDEV_PARAM_STA_KICKOUT,
+	WMI_10_4_VDEV_PARAM_CAPABILITIES,
+	WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
 };
 
 #define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
@@ -5281,7 +5357,7 @@
 #define WMI_UAPSD_AC_TYPE_TRIG 1
 
 #define WMI_UAPSD_AC_BIT_MASK(ac, type) \
-	((type ==  WMI_UAPSD_AC_TYPE_DELI) ? (1<<(ac<<1)) : (1<<((ac<<1)+1)))
+	((type ==  WMI_UAPSD_AC_TYPE_DELI) ? (1 << (ac << 1)) : (1 << ((ac << 1) + 1)))
 
 enum wmi_sta_ps_param_uapsd {
 	WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
@@ -5696,7 +5772,7 @@
 	 * the rates are filled from least significant byte to most
 	 * significant byte.
 	 */
-	__le32 rates[(MAX_SUPPORTED_RATES/4)+1];
+	__le32 rates[(MAX_SUPPORTED_RATES / 4) + 1];
 } __packed;
 
 struct wmi_rate_set_arg {
@@ -6116,6 +6192,7 @@
 	__le32 phy_mode;
 	__le32 buf_len;
 	__le32 status; /* %WMI_RX_STATUS_ */
+	struct wmi_mgmt_rx_ext_info ext_info;
 };
 
 struct wmi_ch_info_ev_arg {
@@ -6401,6 +6478,11 @@
 	__le32 cca_detect_margin;
 } __packed;
 
+enum wmi_host_platform_type {
+	WMI_HOST_PLATFORM_HIGH_PERF,
+	WMI_HOST_PLATFORM_LOW_PERF,
+};
+
 struct ath10k;
 struct ath10k_vif;
 struct ath10k_fw_stats_pdev;
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
index 8e02b38..77100d4 100644
--- a/drivers/net/wireless/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -233,7 +233,7 @@
 	mutex_lock(&ar->conf_mutex);
 
 	if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
-			      ar->fw_features))) {
+			      ar->running_fw->fw_file.fw_features))) {
 		ret = 1;
 		goto exit;
 	}
@@ -285,7 +285,7 @@
 	mutex_lock(&ar->conf_mutex);
 
 	if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
-			      ar->fw_features))) {
+			      ar->running_fw->fw_file.fw_features))) {
 		ret = 1;
 		goto exit;
 	}
@@ -325,7 +325,8 @@
 
 int ath10k_wow_init(struct ath10k *ar)
 {
-	if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, ar->fw_features))
+	if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+		      ar->running_fw->fw_file.fw_features))
 		return 0;
 
 	if (WARN_ON(!test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map)))
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index 38be270..0624333 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -279,7 +279,7 @@
 		if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
 			ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
 		return;
-	} else if (ah->ah_current_channel->band == IEEE80211_BAND_2GHZ) {
+	} else if (ah->ah_current_channel->band == NL80211_BAND_2GHZ) {
 		/* beacon RSSI is low. in B/G mode turn of OFDM weak signal
 		 * detect and zero firstep level to maximize CCK sensitivity */
 		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index ba12f7f..67fedb6 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1265,10 +1265,10 @@
 	void __iomem		*iobase;	/* address of the device */
 	struct mutex		lock;		/* dev-level lock */
 	struct ieee80211_hw	*hw;		/* IEEE 802.11 common */
-	struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+	struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
 	struct ieee80211_channel channels[ATH_CHAN_MAX];
-	struct ieee80211_rate	rates[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
-	s8			rate_idx[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
+	struct ieee80211_rate	rates[NUM_NL80211_BANDS][AR5K_MAX_RATES];
+	s8			rate_idx[NUM_NL80211_BANDS][AR5K_MAX_RATES];
 	enum nl80211_iftype	opmode;
 
 #ifdef CONFIG_ATH5K_DEBUG
@@ -1532,7 +1532,7 @@
 
 /* Protocol Control Unit Functions */
 /* Helpers */
-int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band,
+int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum nl80211_band band,
 		int len, struct ieee80211_rate *rate, bool shortpre);
 unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah);
 unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah);
@@ -1611,7 +1611,7 @@
 
 /* PHY functions */
 /* Misc PHY functions */
-u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band);
+u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum nl80211_band band);
 int ath5k_hw_phy_disable(struct ath5k_hw *ah);
 /* Gain_F optimization */
 enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah);
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index 66b6366..233054b 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -152,7 +152,7 @@
 	ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) &
 			0xffffffff;
 	ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah,
-			IEEE80211_BAND_5GHZ);
+			NL80211_BAND_5GHZ);
 
 	/* Try to identify radio chip based on its srev */
 	switch (ah->ah_radio_5ghz_revision & 0xf0) {
@@ -160,14 +160,14 @@
 		ah->ah_radio = AR5K_RF5111;
 		ah->ah_single_chip = false;
 		ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
-							IEEE80211_BAND_2GHZ);
+							NL80211_BAND_2GHZ);
 		break;
 	case AR5K_SREV_RAD_5112:
 	case AR5K_SREV_RAD_2112:
 		ah->ah_radio = AR5K_RF5112;
 		ah->ah_single_chip = false;
 		ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
-							IEEE80211_BAND_2GHZ);
+							NL80211_BAND_2GHZ);
 		break;
 	case AR5K_SREV_RAD_2413:
 		ah->ah_radio = AR5K_RF2413;
@@ -204,7 +204,7 @@
 			ah->ah_radio = AR5K_RF5111;
 			ah->ah_single_chip = false;
 			ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
-							IEEE80211_BAND_2GHZ);
+							NL80211_BAND_2GHZ);
 		} else if (ah->ah_mac_version == (AR5K_SREV_AR2425 >> 4) ||
 			   ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4) ||
 			   ah->ah_phy_revision == AR5K_SREV_PHY_2425) {
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 3d946d8..d98fd42 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -268,15 +268,15 @@
  * Returns true for the channel numbers used.
  */
 #ifdef CONFIG_ATH5K_TEST_CHANNELS
-static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
+static bool ath5k_is_standard_channel(short chan, enum nl80211_band band)
 {
 	return true;
 }
 
 #else
-static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
+static bool ath5k_is_standard_channel(short chan, enum nl80211_band band)
 {
-	if (band == IEEE80211_BAND_2GHZ && chan <= 14)
+	if (band == NL80211_BAND_2GHZ && chan <= 14)
 		return true;
 
 	return	/* UNII 1,2 */
@@ -297,18 +297,18 @@
 		unsigned int mode, unsigned int max)
 {
 	unsigned int count, size, freq, ch;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 
 	switch (mode) {
 	case AR5K_MODE_11A:
 		/* 1..220, but 2GHz frequencies are filtered by check_channel */
 		size = 220;
-		band = IEEE80211_BAND_5GHZ;
+		band = NL80211_BAND_5GHZ;
 		break;
 	case AR5K_MODE_11B:
 	case AR5K_MODE_11G:
 		size = 26;
-		band = IEEE80211_BAND_2GHZ;
+		band = NL80211_BAND_2GHZ;
 		break;
 	default:
 		ATH5K_WARN(ah, "bad mode, not copying channels\n");
@@ -363,13 +363,13 @@
 	int max_c, count_c = 0;
 	int i;
 
-	BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < IEEE80211_NUM_BANDS);
+	BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < NUM_NL80211_BANDS);
 	max_c = ARRAY_SIZE(ah->channels);
 
 	/* 2GHz band */
-	sband = &ah->sbands[IEEE80211_BAND_2GHZ];
-	sband->band = IEEE80211_BAND_2GHZ;
-	sband->bitrates = &ah->rates[IEEE80211_BAND_2GHZ][0];
+	sband = &ah->sbands[NL80211_BAND_2GHZ];
+	sband->band = NL80211_BAND_2GHZ;
+	sband->bitrates = &ah->rates[NL80211_BAND_2GHZ][0];
 
 	if (test_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode)) {
 		/* G mode */
@@ -381,7 +381,7 @@
 		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
 					AR5K_MODE_11G, max_c);
 
-		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+		hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
 		count_c = sband->n_channels;
 		max_c -= count_c;
 	} else if (test_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode)) {
@@ -407,7 +407,7 @@
 		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
 					AR5K_MODE_11B, max_c);
 
-		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+		hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
 		count_c = sband->n_channels;
 		max_c -= count_c;
 	}
@@ -415,9 +415,9 @@
 
 	/* 5GHz band, A mode */
 	if (test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) {
-		sband = &ah->sbands[IEEE80211_BAND_5GHZ];
-		sband->band = IEEE80211_BAND_5GHZ;
-		sband->bitrates = &ah->rates[IEEE80211_BAND_5GHZ][0];
+		sband = &ah->sbands[NL80211_BAND_5GHZ];
+		sband->band = NL80211_BAND_5GHZ;
+		sband->bitrates = &ah->rates[NL80211_BAND_5GHZ][0];
 
 		memcpy(sband->bitrates, &ath5k_rates[4],
 		       sizeof(struct ieee80211_rate) * 8);
@@ -427,7 +427,7 @@
 		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
 					AR5K_MODE_11A, max_c);
 
-		hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
+		hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
 	}
 	ath5k_setup_rate_idx(ah, sband);
 
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 654a1e3..929d7cc 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -1043,14 +1043,14 @@
 
 	BUG_ON(!ah->sbands);
 
-	for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
+	for (b = 0; b < NUM_NL80211_BANDS; b++) {
 		struct ieee80211_supported_band *band = &ah->sbands[b];
 		char bname[6];
 		switch (band->band) {
-		case IEEE80211_BAND_2GHZ:
+		case NL80211_BAND_2GHZ:
 			strcpy(bname, "2 GHz");
 			break;
-		case IEEE80211_BAND_5GHZ:
+		case NL80211_BAND_5GHZ:
 			strcpy(bname, "5 GHz");
 			break;
 		default:
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index 803030f..6a2a1685 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -77,7 +77,7 @@
 	/* HP Compaq CQ60-206US (ddreggors@jumptv.com) */
 	{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137a), ATH_LED(3, 1) },
 	/* HP Compaq C700 (nitrousnrg@gmail.com) */
-	{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 1) },
+	{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 0) },
 	/* LiteOn AR5BXB63 (magooz@salug.it) */
 	{ ATH_SDEVICE(PCI_VENDOR_ID_ATHEROS, 0x3067), ATH_LED(3, 0) },
 	/* IBM-specific AR5212 (all others) */
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index bf29da5..fc47b70 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -110,7 +110,7 @@
  * bwmodes.
  */
 int
-ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band,
+ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum nl80211_band band,
 		int len, struct ieee80211_rate *rate, bool shortpre)
 {
 	int sifs, preamble, plcp_bits, sym_time;
@@ -221,7 +221,7 @@
 	case AR5K_BWMODE_DEFAULT:
 		sifs = AR5K_INIT_SIFS_DEFAULT_BG;
 	default:
-		if (channel->band == IEEE80211_BAND_5GHZ)
+		if (channel->band == NL80211_BAND_5GHZ)
 			sifs = AR5K_INIT_SIFS_DEFAULT_A;
 		break;
 	}
@@ -279,7 +279,7 @@
 	struct ieee80211_rate *rate;
 	unsigned int i;
 	/* 802.11g covers both OFDM and CCK */
-	u8 band = IEEE80211_BAND_2GHZ;
+	u8 band = NL80211_BAND_2GHZ;
 
 	/* Write rate duration table */
 	for (i = 0; i < ah->sbands[band].n_bitrates; i++) {
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 0fce1c7..641b13a 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -75,13 +75,13 @@
 /**
  * ath5k_hw_radio_revision() - Get the PHY Chip revision
  * @ah: The &struct ath5k_hw
- * @band: One of enum ieee80211_band
+ * @band: One of enum nl80211_band
  *
  * Returns the revision number of a 2GHz, 5GHz or single chip
  * radio.
  */
 u16
-ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
+ath5k_hw_radio_revision(struct ath5k_hw *ah, enum nl80211_band band)
 {
 	unsigned int i;
 	u32 srev;
@@ -91,10 +91,10 @@
 	 * Set the radio chip access register
 	 */
 	switch (band) {
-	case IEEE80211_BAND_2GHZ:
+	case NL80211_BAND_2GHZ:
 		ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_2GHZ, AR5K_PHY(0));
 		break;
-	case IEEE80211_BAND_5GHZ:
+	case NL80211_BAND_5GHZ:
 		ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
 		break;
 	default:
@@ -138,11 +138,11 @@
 	u16 freq = channel->center_freq;
 
 	/* Check if the channel is in our supported range */
-	if (channel->band == IEEE80211_BAND_2GHZ) {
+	if (channel->band == NL80211_BAND_2GHZ) {
 		if ((freq >= ah->ah_capabilities.cap_range.range_2ghz_min) &&
 		    (freq <= ah->ah_capabilities.cap_range.range_2ghz_max))
 			return true;
-	} else if (channel->band == IEEE80211_BAND_5GHZ)
+	} else if (channel->band == NL80211_BAND_5GHZ)
 		if ((freq >= ah->ah_capabilities.cap_range.range_5ghz_min) &&
 		    (freq <= ah->ah_capabilities.cap_range.range_5ghz_max))
 			return true;
@@ -743,7 +743,7 @@
 /**
  * ath5k_hw_rfgain_init() - Write initial RF gain settings to hw
  * @ah: The &struct ath5k_hw
- * @band: One of enum ieee80211_band
+ * @band: One of enum nl80211_band
  *
  * Write initial RF gain table to set the RF sensitivity.
  *
@@ -751,7 +751,7 @@
  * with Gain_F calibration
  */
 static int
-ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
+ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum nl80211_band band)
 {
 	const struct ath5k_ini_rfgain *ath5k_rfg;
 	unsigned int i, size, index;
@@ -786,7 +786,7 @@
 		return -EINVAL;
 	}
 
-	index = (band == IEEE80211_BAND_2GHZ) ? 1 : 0;
+	index = (band == NL80211_BAND_2GHZ) ? 1 : 0;
 
 	for (i = 0; i < size; i++) {
 		AR5K_REG_WAIT(i);
@@ -917,7 +917,7 @@
 	}
 
 	/* Set Output and Driver bias current (OB/DB) */
-	if (channel->band == IEEE80211_BAND_2GHZ) {
+	if (channel->band == NL80211_BAND_2GHZ) {
 
 		if (channel->hw_value == AR5K_MODE_11B)
 			ee_mode = AR5K_EEPROM_MODE_11B;
@@ -944,7 +944,7 @@
 						AR5K_RF_DB_2GHZ, true);
 
 	/* RF5111 always needs OB/DB for 5GHz, even if we use 2GHz */
-	} else if ((channel->band == IEEE80211_BAND_5GHZ) ||
+	} else if ((channel->band == NL80211_BAND_5GHZ) ||
 			(ah->ah_radio == AR5K_RF5111)) {
 
 		/* For 11a, Turbo and XR we need to choose
@@ -1145,7 +1145,7 @@
 	}
 
 	if (ah->ah_radio == AR5K_RF5413 &&
-	channel->band == IEEE80211_BAND_2GHZ) {
+	channel->band == NL80211_BAND_2GHZ) {
 
 		ath5k_hw_rfb_op(ah, rf_regs, 1, AR5K_RF_DERBY_CHAN_SEL_MODE,
 									true);
@@ -1270,7 +1270,7 @@
 	 */
 	data0 = data1 = 0;
 
-	if (channel->band == IEEE80211_BAND_2GHZ) {
+	if (channel->band == NL80211_BAND_2GHZ) {
 		/* Map 2GHz channel to 5GHz Atheros channel ID */
 		ret = ath5k_hw_rf5111_chan2athchan(
 			ieee80211_frequency_to_channel(channel->center_freq),
@@ -1446,7 +1446,7 @@
 			"channel frequency (%u MHz) out of supported "
 			"band range\n",
 			channel->center_freq);
-			return -EINVAL;
+		return -EINVAL;
 	}
 
 	/*
@@ -1919,7 +1919,7 @@
 	/* Convert current frequency to fbin value (the same way channels
 	 * are stored on EEPROM, check out ath5k_eeprom_bin2freq) and scale
 	 * up by 2 so we can compare it later */
-	if (channel->band == IEEE80211_BAND_2GHZ) {
+	if (channel->band == NL80211_BAND_2GHZ) {
 		chan_fbin = (channel->center_freq - 2300) * 10;
 		freq_band = AR5K_EEPROM_BAND_2GHZ;
 	} else {
@@ -1983,7 +1983,7 @@
 			symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 4;
 			break;
 		default:
-			if (channel->band == IEEE80211_BAND_5GHZ) {
+			if (channel->band == NL80211_BAND_5GHZ) {
 				/* Both sample_freq and chip_freq are 40MHz */
 				spur_delta_phase = (spur_offset << 17) / 25;
 				spur_freq_sigma_delta =
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index ddaad71..beda11c 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -559,7 +559,7 @@
 int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
 {
 	struct ieee80211_channel *channel = ah->ah_current_channel;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	struct ieee80211_supported_band *sband;
 	struct ieee80211_rate *rate;
 	u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
@@ -596,10 +596,10 @@
 	 *
 	 * Also we have different lowest rate for 802.11a
 	 */
-	if (channel->band == IEEE80211_BAND_5GHZ)
-		band = IEEE80211_BAND_5GHZ;
+	if (channel->band == NL80211_BAND_5GHZ)
+		band = NL80211_BAND_5GHZ;
 	else
-		band = IEEE80211_BAND_2GHZ;
+		band = NL80211_BAND_2GHZ;
 
 	switch (ah->ah_bwmode) {
 	case AR5K_BWMODE_5MHZ:
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 99e62f9..56d7925 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -634,7 +634,7 @@
 		ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
 			AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
 			AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
-			usleep_range(2000, 2500);
+		usleep_range(2000, 2500);
 	} else {
 		ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
 			AR5K_RESET_CTL_BASEBAND | bus_flags);
@@ -699,7 +699,7 @@
 		ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
 			AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
 			AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
-			usleep_range(2000, 2500);
+		usleep_range(2000, 2500);
 	} else {
 		if (ath5k_get_bus_type(ah) == ATH_AHB)
 			ret = ath5k_hw_wisoc_reset(ah, AR5K_RESET_CTL_PCU |
@@ -752,7 +752,7 @@
 			clock = AR5K_PHY_PLL_RF5111;		/*Zero*/
 		}
 
-		if (channel->band == IEEE80211_BAND_2GHZ) {
+		if (channel->band == NL80211_BAND_2GHZ) {
 			mode |= AR5K_PHY_MODE_FREQ_2GHZ;
 			clock |= AR5K_PHY_PLL_44MHZ;
 
@@ -771,7 +771,7 @@
 				else
 					mode |= AR5K_PHY_MODE_MOD_DYN;
 			}
-		} else if (channel->band == IEEE80211_BAND_5GHZ) {
+		} else if (channel->band == NL80211_BAND_5GHZ) {
 			mode |= (AR5K_PHY_MODE_FREQ_5GHZ |
 				 AR5K_PHY_MODE_MOD_OFDM);
 
@@ -906,7 +906,7 @@
 		u32 data;
 		ath5k_hw_reg_write(ah, AR5K_PHY_CCKTXCTL_WORLD,
 				AR5K_PHY_CCKTXCTL);
-		if (channel->band == IEEE80211_BAND_5GHZ)
+		if (channel->band == NL80211_BAND_5GHZ)
 			data = 0xffb81020;
 		else
 			data = 0xffb80d20;
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 7f3f94f..4e11ba0 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -34,7 +34,7 @@
 }
 
 #define CHAN2G(_channel, _freq, _flags) {   \
-	.band           = IEEE80211_BAND_2GHZ,  \
+	.band           = NL80211_BAND_2GHZ,  \
 	.hw_value       = (_channel),           \
 	.center_freq    = (_freq),              \
 	.flags          = (_flags),             \
@@ -43,7 +43,7 @@
 }
 
 #define CHAN5G(_channel, _flags) {		    \
-	.band           = IEEE80211_BAND_5GHZ,      \
+	.band           = NL80211_BAND_5GHZ,      \
 	.hw_value       = (_channel),               \
 	.center_freq    = 5000 + (5 * (_channel)),  \
 	.flags          = (_flags),                 \
@@ -2583,7 +2583,7 @@
 }
 #endif
 
-static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band,
+static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum nl80211_band band,
 			    bool ht_enable)
 {
 	struct ath6kl_htcap *htcap = &vif->htcap[band];
@@ -2594,7 +2594,7 @@
 	if (ht_enable) {
 		/* Set default ht capabilities */
 		htcap->ht_enable = true;
-		htcap->cap_info = (band == IEEE80211_BAND_2GHZ) ?
+		htcap->cap_info = (band == NL80211_BAND_2GHZ) ?
 				   ath6kl_g_htcap : ath6kl_a_htcap;
 		htcap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K;
 	} else /* Disable ht */
@@ -2609,7 +2609,7 @@
 	struct wiphy *wiphy = vif->ar->wiphy;
 	int band, ret = 0;
 
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+	for (band = 0; band < NUM_NL80211_BANDS; band++) {
 		if (!wiphy->bands[band])
 			continue;
 
@@ -3530,7 +3530,7 @@
 				       struct regulatory_request *request)
 {
 	struct ath6kl *ar = wiphy_priv(wiphy);
-	u32 rates[IEEE80211_NUM_BANDS];
+	u32 rates[NUM_NL80211_BANDS];
 	int ret, i;
 
 	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
@@ -3555,7 +3555,7 @@
 	 * changed.
 	 */
 
-	for (i = 0; i < IEEE80211_NUM_BANDS; i++)
+	for (i = 0; i < NUM_NL80211_BANDS; i++)
 		if (wiphy->bands[i])
 			rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1;
 
@@ -3791,8 +3791,8 @@
 	vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL;
 	vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME;
 	vif->bg_scan_period = 0;
-	vif->htcap[IEEE80211_BAND_2GHZ].ht_enable = true;
-	vif->htcap[IEEE80211_BAND_5GHZ].ht_enable = true;
+	vif->htcap[NL80211_BAND_2GHZ].ht_enable = true;
+	vif->htcap[NL80211_BAND_5GHZ].ht_enable = true;
 
 	memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
 	if (fw_vif_idx != 0) {
@@ -3943,9 +3943,9 @@
 	wiphy->available_antennas_rx = ar->hw.rx_ant;
 
 	if (band_2gig)
-		wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
+		wiphy->bands[NL80211_BAND_2GHZ] = &ath6kl_band_2ghz;
 	if (band_5gig)
-		wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
+		wiphy->bands[NL80211_BAND_5GHZ] = &ath6kl_band_5ghz;
 
 	wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
 
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 5f3acfe..713a571 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -623,7 +623,7 @@
 	struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
 	struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
 	struct aggr_info *aggr_cntxt;
-	struct ath6kl_htcap htcap[IEEE80211_NUM_BANDS];
+	struct ath6kl_htcap htcap[NUM_NL80211_BANDS];
 
 	struct timer_list disconnect_timer;
 	struct timer_list sched_scan_timer;
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index a5e1de7..631c3a0 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -1584,6 +1584,11 @@
 	if (len < sizeof(*ev))
 		return -EINVAL;
 
+	if (vif->nw_type != INFRA_NETWORK ||
+	    !test_bit(ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY,
+		      vif->ar->fw_capabilities))
+		return -EOPNOTSUPP;
+
 	if (vif->sme_state != SME_CONNECTED)
 		return -ENOTCONN;
 
@@ -2043,7 +2048,7 @@
 	sc->no_cck = cpu_to_le32(no_cck);
 	sc->num_ch = num_chan;
 
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+	for (band = 0; band < NUM_NL80211_BANDS; band++) {
 		sband = ar->wiphy->bands[band];
 
 		if (!sband)
@@ -2765,10 +2770,10 @@
 	memset(&ratemask, 0, sizeof(ratemask));
 
 	/* only check 2.4 and 5 GHz bands, skip the rest */
-	for (band = 0; band <= IEEE80211_BAND_5GHZ; band++) {
+	for (band = 0; band <= NL80211_BAND_5GHZ; band++) {
 		/* copy legacy rate mask */
 		ratemask[band] = mask->control[band].legacy;
-		if (band == IEEE80211_BAND_5GHZ)
+		if (band == NL80211_BAND_5GHZ)
 			ratemask[band] =
 				mask->control[band].legacy << 4;
 
@@ -2794,9 +2799,9 @@
 		if (mode == WMI_RATES_MODE_11A ||
 		    mode == WMI_RATES_MODE_11A_HT20 ||
 		    mode == WMI_RATES_MODE_11A_HT40)
-			band = IEEE80211_BAND_5GHZ;
+			band = NL80211_BAND_5GHZ;
 		else
-			band = IEEE80211_BAND_2GHZ;
+			band = NL80211_BAND_2GHZ;
 		cmd->ratemask[mode] = cpu_to_le64(ratemask[band]);
 	}
 
@@ -2817,10 +2822,10 @@
 	memset(&ratemask, 0, sizeof(ratemask));
 
 	/* only check 2.4 and 5 GHz bands, skip the rest */
-	for (band = 0; band <= IEEE80211_BAND_5GHZ; band++) {
+	for (band = 0; band <= NL80211_BAND_5GHZ; band++) {
 		/* copy legacy rate mask */
 		ratemask[band] = mask->control[band].legacy;
-		if (band == IEEE80211_BAND_5GHZ)
+		if (band == NL80211_BAND_5GHZ)
 			ratemask[band] =
 				mask->control[band].legacy << 4;
 
@@ -2844,9 +2849,9 @@
 		if (mode == WMI_RATES_MODE_11A ||
 		    mode == WMI_RATES_MODE_11A_HT20 ||
 		    mode == WMI_RATES_MODE_11A_HT40)
-			band = IEEE80211_BAND_5GHZ;
+			band = NL80211_BAND_5GHZ;
 		else
-			band = IEEE80211_BAND_2GHZ;
+			band = NL80211_BAND_2GHZ;
 		cmd->ratemask[mode] = cpu_to_le32(ratemask[band]);
 	}
 
@@ -3169,7 +3174,7 @@
 }
 
 int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx,
-			     enum ieee80211_band band,
+			     enum nl80211_band band,
 			     struct ath6kl_htcap *htcap)
 {
 	struct sk_buff *skb;
@@ -3182,7 +3187,7 @@
 	cmd = (struct wmi_set_htcap_cmd *) skb->data;
 
 	/*
-	 * NOTE: Band in firmware matches enum ieee80211_band, it is unlikely
+	 * NOTE: Band in firmware matches enum nl80211_band, it is unlikely
 	 * this will be changed in firmware. If at all there is any change in
 	 * band value, the host needs to be fixed.
 	 */
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 05d25a9..3af464a 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -2628,7 +2628,7 @@
 int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
 				 u8 keep_alive_intvl);
 int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx,
-			     enum ieee80211_band band,
+			     enum nl80211_band band,
 			     struct ath6kl_htcap *htcap);
 int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len);
 
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 8f87930..1b271b9 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -274,6 +274,9 @@
 	};
 	static const int inc[4] = { 0, 100, 0, 0 };
 
+	memset(&mask_m, 0, sizeof(int8_t) * 123);
+	memset(&mask_p, 0, sizeof(int8_t) * 123);
+
 	cur_bin = -6000;
 	upper = bin + 100;
 	lower = bin - 100;
@@ -424,14 +427,9 @@
 	int tmp, new;
 	int i;
 
-	int8_t mask_m[123];
-	int8_t mask_p[123];
 	int cur_bb_spur;
 	bool is2GHz = IS_CHAN_2GHZ(chan);
 
-	memset(&mask_m, 0, sizeof(int8_t) * 123);
-	memset(&mask_p, 0, sizeof(int8_t) * 123);
-
 	for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
 		cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
 		if (AR_NO_SPUR == cur_bb_spur)
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index db66245..53d7445 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -178,14 +178,9 @@
 	int i;
 	struct chan_centers centers;
 
-	int8_t mask_m[123];
-	int8_t mask_p[123];
 	int cur_bb_spur;
 	bool is2GHz = IS_CHAN_2GHZ(chan);
 
-	memset(&mask_m, 0, sizeof(int8_t) * 123);
-	memset(&mask_p, 0, sizeof(int8_t) * 123);
-
 	ath9k_hw_get_channel_centers(ah, chan, &centers);
 	freq = centers.synth_center;
 
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index c38399b..c07866a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -331,7 +331,7 @@
 	{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
 	{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
 	{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
-	{0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+	{0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
 	{0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
 	{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
 	{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -351,7 +351,7 @@
 	{0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
 	{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
 	{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
-	{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+	{0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
 	{0x0000a2d0, 0x00041983, 0x00041983, 0x00041981, 0x00041982},
 	{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
 	{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 0c39199..518e649 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -1203,12 +1203,12 @@
 static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
 {
 	int offset[8] = {0}, total = 0, test;
-	int agc_out, i, peak_detect_threshold;
+	int agc_out, i, peak_detect_threshold = 0;
 
 	if (AR_SREV_9550(ah) || AR_SREV_9531(ah))
 		peak_detect_threshold = 8;
-	else
-		peak_detect_threshold = 0;
+	else if (AR_SREV_9561(ah))
+		peak_detect_threshold = 11;
 
 	/*
 	 * Turn off LNA/SW.
@@ -1249,17 +1249,14 @@
 		REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
 			      AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, 0x0);
 
-	if (AR_SREV_9003_PCOEM(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
-	    AR_SREV_9561(ah)) {
-		if (is_2g)
-			REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
-				      AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR,
-				      peak_detect_threshold);
-		else
-			REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
-				      AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR,
-				      peak_detect_threshold);
-	}
+	if (is_2g)
+		REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+			      AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR,
+			      peak_detect_threshold);
+	else
+		REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+			      AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR,
+			      peak_detect_threshold);
 
 	for (i = 6; i > 0; i--) {
 		offset[i] = BIT(i - 1);
@@ -1311,9 +1308,6 @@
 	struct ath9k_hw_cal_data *caldata = ah->caldata;
 	int i;
 
-	if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah) && !AR_SREV_9485(ah))
-		return;
-
 	if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && !run_rtt_cal)
 		return;
 
@@ -1641,14 +1635,12 @@
 
 skip_tx_iqcal:
 	if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
-		if (AR_SREV_9330_11(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah) ||
-		    AR_SREV_9561(ah)) {
-			for (i = 0; i < AR9300_MAX_CHAINS; i++) {
-				if (!(ah->rxchainmask & (1 << i)))
-					continue;
-				ar9003_hw_manual_peak_cal(ah, i,
-							  IS_CHAN_2GHZ(chan));
-			}
+		for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+			if (!(ah->rxchainmask & (1 << i)))
+				continue;
+
+			ar9003_hw_manual_peak_cal(ah, i,
+						  IS_CHAN_2GHZ(chan));
 		}
 
 		/*
@@ -1709,7 +1701,7 @@
 	struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
 	struct ath_hw_ops *ops = ath9k_hw_ops(ah);
 
-	if (AR_SREV_9485(ah) || AR_SREV_9462(ah) || AR_SREV_9565(ah))
+	if (AR_SREV_9003_PCOEM(ah))
 		priv_ops->init_cal = ar9003_hw_init_cal_pcoem;
 	else
 		priv_ops->init_cal = ar9003_hw_init_cal_soc;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 54ed2f7..f680982 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3590,8 +3590,8 @@
 		else
 			gpio = AR9300_EXT_LNA_CTL_GPIO_AR9485;
 
-		ath9k_hw_cfg_output(ah, gpio,
-				    AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED);
+		ath9k_hw_gpio_request_out(ah, gpio, NULL,
+					  AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED);
 	}
 
 	value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
@@ -4097,16 +4097,16 @@
 		REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4,
 			      AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
 
-	therm_on = (thermometer < 0) ? 0 : (thermometer == 0);
+	therm_on = thermometer == 0;
 	REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4,
 		      AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
 	if (pCap->chip_chainmask & BIT(1)) {
-		therm_on = (thermometer < 0) ? 0 : (thermometer == 1);
+		therm_on = thermometer == 1;
 		REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4,
 			      AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
 	}
 	if (pCap->chip_chainmask & BIT(2)) {
-		therm_on = (thermometer < 0) ? 0 : (thermometer == 2);
+		therm_on = thermometer == 2;
 		REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4,
 			      AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
 	}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index af5ee41..0fe9c83 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -427,21 +427,34 @@
 	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
 
 	if (mci->config & ATH_MCI_CONFIG_MCI_OBS_MCI) {
-		ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA);
-		ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK);
-		ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
-		ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
+		ath9k_hw_gpio_request_out(ah, 3, NULL,
+					  AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA);
+		ath9k_hw_gpio_request_out(ah, 2, NULL,
+					  AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK);
+		ath9k_hw_gpio_request_out(ah, 1, NULL,
+					  AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
+		ath9k_hw_gpio_request_out(ah, 0, NULL,
+					  AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
 	} else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_TXRX) {
-		ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX);
-		ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX);
-		ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
-		ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
-		ath9k_hw_cfg_output(ah, 5, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+		ath9k_hw_gpio_request_out(ah, 3, NULL,
+					  AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX);
+		ath9k_hw_gpio_request_out(ah, 2, NULL,
+					  AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX);
+		ath9k_hw_gpio_request_out(ah, 1, NULL,
+					  AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
+		ath9k_hw_gpio_request_out(ah, 0, NULL,
+					  AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
+		ath9k_hw_gpio_request_out(ah, 5, NULL,
+					  AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
 	} else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_BT) {
-		ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
-		ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
-		ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
-		ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
+		ath9k_hw_gpio_request_out(ah, 3, NULL,
+					  AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
+		ath9k_hw_gpio_request_out(ah, 2, NULL,
+					  AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
+		ath9k_hw_gpio_request_out(ah, 1, NULL,
+					  AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
+		ath9k_hw_gpio_request_out(ah, 0, NULL,
+					  AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
 	} else
 		return;
 
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 06c1ca6..be14a8e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -1337,11 +1337,11 @@
 				chan->channel,
 				aniState->mrcCCK ? "on" : "off",
 				is_on ? "on" : "off");
-		if (is_on)
-			ah->stats.ast_ani_ccklow++;
-		else
-			ah->stats.ast_ani_cckhigh++;
-		aniState->mrcCCK = is_on;
+			if (is_on)
+				ah->stats.ast_ani_ccklow++;
+			else
+				ah->stats.ast_ani_cckhigh++;
+			aniState->mrcCCK = is_on;
 		}
 	break;
 	}
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
index 2c42ff0..29479af 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
@@ -40,7 +40,7 @@
 	{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
 	{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
 	{0x00009e2c, 0x0000001c, 0x0000001c, 0x00003221, 0x00003221},
-	{0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
+	{0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
 	{0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
 	{0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
 	{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -59,7 +59,7 @@
 	{0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
+	{0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
 	{0x0000a2d0, 0x00071982, 0x00071982, 0x00071982, 0x00071982},
 	{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
 	{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
index 2154efcd..c4a6ffa 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
@@ -345,7 +345,7 @@
 	{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
 	{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
 	{0x00009e2c, 0x0000001c, 0x0000001c, 0x00003221, 0x00003221},
-	{0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
+	{0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
 	{0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
 	{0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
 	{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -364,7 +364,7 @@
 	{0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+	{0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
 	{0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071981},
 	{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
 	{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
index b995ffe..2eb163f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
@@ -245,7 +245,7 @@
 	{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
 	{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
 	{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
-	{0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+	{0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
 	{0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
 	{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
 	{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -265,7 +265,7 @@
 	{0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
 	{0x0000a288, 0x00000220, 0x00000220, 0x00000110, 0x00000110},
 	{0x0000a28c, 0x00011111, 0x00011111, 0x00022222, 0x00022222},
-	{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+	{0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
 	{0x0000a2d0, 0x00041983, 0x00041983, 0x00041982, 0x00041982},
 	{0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
 	{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 1b6b4d0..b00dd64 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -59,7 +59,7 @@
 	{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
 	{0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5},
 	{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
-	{0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+	{0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c780, 0xcfd5c280},
 	{0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
 	{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
 	{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -79,7 +79,7 @@
 	{0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
 	{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
 	{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
-	{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+	{0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
 	{0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
 	{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
 	{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
index dc3adda..0f8745e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
@@ -239,7 +239,7 @@
 	{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
 	{0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5},
 	{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
-	{0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+	{0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c780, 0xcfd5c280},
 	{0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
 	{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
 	{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -259,7 +259,7 @@
 	{0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
 	{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
 	{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
-	{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+	{0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
 	{0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
 	{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
 	{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index ce83ce4..bdf6f10 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -1026,7 +1026,7 @@
 	{0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
 	{0x00009e14, 0x31395d53, 0x31396053, 0x312e6053, 0x312e5d53},
 	{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
-	{0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+	{0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
 	{0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
 	{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
 	{0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0},
@@ -1044,7 +1044,7 @@
 	{0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
 	{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 	{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-	{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+	{0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
 	{0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
 	{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
 	{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
index c0b90da..924ae6b 100644
--- a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
@@ -988,7 +988,7 @@
 	{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
 	{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
 	{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
-	{0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946222, 0xcf946222},
+	{0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946220, 0xcf946220},
 	{0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
 	{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
 	{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -1008,7 +1008,7 @@
 	{0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
 	{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
 	{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
-	{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+	{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
 	{0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
 	{0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
 	{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
diff --git a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
index 148562a..67edf34 100644
--- a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
@@ -83,7 +83,7 @@
 	{0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
 	{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
 	{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
-	{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
+	{0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
 	{0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
 	{0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
 	{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
index 10d4a6c..35c1bbb 100644
--- a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
@@ -347,7 +347,7 @@
 	{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
 	{0x00009e20, 0x000003b5, 0x000003b5, 0x000003a4, 0x000003a4},
 	{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
-	{0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946220, 0xcf946220},
+	{0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
 	{0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
 	{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
 	{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
diff --git a/drivers/net/wireless/ath/ath9k/ar956x_initvals.h b/drivers/net/wireless/ath/ath9k/ar956x_initvals.h
index c3a47ea..db05107 100644
--- a/drivers/net/wireless/ath/ath9k/ar956x_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar956x_initvals.h
@@ -220,7 +220,7 @@
 	{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
 	{0x00009e20, 0x000003b5, 0x000003b5, 0x000003a6, 0x000003a6},
 	{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
-	{0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946222, 0xcf946222},
+	{0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946220, 0xcf946220},
 	{0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
 	{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
 	{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
index 5d4629f..f4c9bef 100644
--- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -1290,7 +1290,7 @@
 	{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
 	{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
 	{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
-	{0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+	{0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
 	{0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
 	{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
 	{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -1310,7 +1310,7 @@
 	{0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
 	{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
 	{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
-	{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+	{0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
 	{0x0000a2d0, 0x00041983, 0x00041983, 0x00041981, 0x00041982},
 	{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
 	{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 5294595..93b3793 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -813,7 +813,6 @@
 #ifdef CONFIG_MAC80211_LEDS
 void ath_init_leds(struct ath_softc *sc);
 void ath_deinit_leds(struct ath_softc *sc);
-void ath_fill_led_pin(struct ath_softc *sc);
 #else
 static inline void ath_init_leds(struct ath_softc *sc)
 {
@@ -822,9 +821,6 @@
 static inline void ath_deinit_leds(struct ath_softc *sc)
 {
 }
-static inline void ath_fill_led_pin(struct ath_softc *sc)
-{
-}
 #endif
 
 /************************/
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 5a084d9..618c9df 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -15,6 +15,8 @@
  */
 
 #include <linux/export.h>
+#include <linux/types.h>
+#include <linux/ath9k_platform.h>
 #include "hw.h"
 
 enum ath_bt_mode {
@@ -34,6 +36,8 @@
 	u8 bt_priority_time;
 	u8 bt_first_slot_time;
 	bool bt_hold_rx_clear;
+	u8 wl_active_time;
+	u8 wl_qc_time;
 };
 
 static const u32 ar9003_wlan_weights[ATH_BTCOEX_STOMP_MAX]
@@ -65,31 +69,71 @@
 		.bt_priority_time = 2,
 		.bt_first_slot_time = 5,
 		.bt_hold_rx_clear = true,
+		.wl_active_time = 0x20,
+		.wl_qc_time = 0x20,
 	};
 	bool rxclear_polarity = ath_bt_config.bt_rxclear_polarity;
+	u8 time_extend = ath_bt_config.bt_time_extend;
+	u8 first_slot_time = ath_bt_config.bt_first_slot_time;
 
 	if (AR_SREV_9300_20_OR_LATER(ah))
 		rxclear_polarity = !ath_bt_config.bt_rxclear_polarity;
 
+	if (AR_SREV_SOC(ah)) {
+		first_slot_time = 0x1d;
+		time_extend = 0xa;
+
+		btcoex_hw->bt_coex_mode3 =
+			SM(ath_bt_config.wl_active_time, AR_BT_WL_ACTIVE_TIME) |
+			SM(ath_bt_config.wl_qc_time, AR_BT_WL_QC_TIME);
+
+		btcoex_hw->bt_coex_mode2 =
+			AR_BT_PROTECT_BT_AFTER_WAKEUP |
+			AR_BT_PHY_ERR_BT_COLL_ENABLE;
+	}
+
 	btcoex_hw->bt_coex_mode =
 		(btcoex_hw->bt_coex_mode & AR_BT_QCU_THRESH) |
-		SM(ath_bt_config.bt_time_extend, AR_BT_TIME_EXTEND) |
+		SM(time_extend, AR_BT_TIME_EXTEND) |
 		SM(ath_bt_config.bt_txstate_extend, AR_BT_TXSTATE_EXTEND) |
 		SM(ath_bt_config.bt_txframe_extend, AR_BT_TX_FRAME_EXTEND) |
 		SM(ath_bt_config.bt_mode, AR_BT_MODE) |
 		SM(ath_bt_config.bt_quiet_collision, AR_BT_QUIET) |
 		SM(rxclear_polarity, AR_BT_RX_CLEAR_POLARITY) |
 		SM(ath_bt_config.bt_priority_time, AR_BT_PRIORITY_TIME) |
-		SM(ath_bt_config.bt_first_slot_time, AR_BT_FIRST_SLOT_TIME) |
+		SM(first_slot_time, AR_BT_FIRST_SLOT_TIME) |
 		SM(qnum, AR_BT_QCU_THRESH);
 
-	btcoex_hw->bt_coex_mode2 =
+	btcoex_hw->bt_coex_mode2 |=
 		SM(ath_bt_config.bt_hold_rx_clear, AR_BT_HOLD_RX_CLEAR) |
 		SM(ATH_BTCOEX_BMISS_THRESH, AR_BT_BCN_MISS_THRESH) |
 		AR_BT_DISABLE_BT_ANT;
 }
 EXPORT_SYMBOL(ath9k_hw_init_btcoex_hw);
 
+static void ath9k_hw_btcoex_pin_init(struct ath_hw *ah, u8 wlanactive_gpio,
+				     u8 btactive_gpio, u8 btpriority_gpio)
+{
+	struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+	struct ath9k_platform_data *pdata = ah->dev->platform_data;
+
+	if (btcoex_hw->scheme != ATH_BTCOEX_CFG_2WIRE &&
+	    btcoex_hw->scheme != ATH_BTCOEX_CFG_3WIRE)
+		return;
+
+	/* bt priority GPIO will be ignored by 2 wire scheme */
+	if (pdata && (pdata->bt_active_pin || pdata->bt_priority_pin ||
+		      pdata->wlan_active_pin)) {
+		btcoex_hw->btactive_gpio = pdata->bt_active_pin;
+		btcoex_hw->wlanactive_gpio = pdata->wlan_active_pin;
+		btcoex_hw->btpriority_gpio = pdata->bt_priority_pin;
+	} else {
+		btcoex_hw->btactive_gpio = btactive_gpio;
+		btcoex_hw->wlanactive_gpio = wlanactive_gpio;
+		btcoex_hw->btpriority_gpio = btpriority_gpio;
+	}
+}
+
 void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah)
 {
 	struct ath_common *common = ath9k_hw_common(ah);
@@ -107,19 +151,19 @@
 		btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI;
 	} else if (AR_SREV_9300_20_OR_LATER(ah)) {
 		btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
-		btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300;
-		btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300;
-		btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO_9300;
-	} else if (AR_SREV_9280_20_OR_LATER(ah)) {
-		btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9280;
-		btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9280;
 
-		if (AR_SREV_9285(ah)) {
+		ath9k_hw_btcoex_pin_init(ah, ATH_WLANACTIVE_GPIO_9300,
+					 ATH_BTACTIVE_GPIO_9300,
+					 ATH_BTPRIORITY_GPIO_9300);
+	} else if (AR_SREV_9280_20_OR_LATER(ah)) {
+		if (AR_SREV_9285(ah))
 			btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
-			btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO_9285;
-		} else {
+		else
 			btcoex_hw->scheme = ATH_BTCOEX_CFG_2WIRE;
-		}
+
+		ath9k_hw_btcoex_pin_init(ah, ATH_WLANACTIVE_GPIO_9280,
+					 ATH_BTACTIVE_GPIO_9280,
+					 ATH_BTPRIORITY_GPIO_9285);
 	}
 }
 EXPORT_SYMBOL(ath9k_hw_btcoex_init_scheme);
@@ -137,12 +181,14 @@
 		    AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB);
 
 	/* Set input mux for bt_active to gpio pin */
-	REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
-		      AR_GPIO_INPUT_MUX1_BT_ACTIVE,
-		      btcoex_hw->btactive_gpio);
+	if (!AR_SREV_SOC(ah))
+		REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
+			      AR_GPIO_INPUT_MUX1_BT_ACTIVE,
+			      btcoex_hw->btactive_gpio);
 
 	/* Configure the desired gpio port for input */
-	ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btactive_gpio);
+	ath9k_hw_gpio_request_in(ah, btcoex_hw->btactive_gpio,
+				 "ath9k-btactive");
 }
 EXPORT_SYMBOL(ath9k_hw_btcoex_init_2wire);
 
@@ -157,21 +203,33 @@
 
 	/* Set input mux for bt_prority_async and
 	 *                  bt_active_async to GPIO pins */
-	REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
-			AR_GPIO_INPUT_MUX1_BT_ACTIVE,
-			btcoex_hw->btactive_gpio);
-
-	REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
-			AR_GPIO_INPUT_MUX1_BT_PRIORITY,
-			btcoex_hw->btpriority_gpio);
+	if (!AR_SREV_SOC(ah)) {
+		REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
+			      AR_GPIO_INPUT_MUX1_BT_ACTIVE,
+			      btcoex_hw->btactive_gpio);
+		REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
+			      AR_GPIO_INPUT_MUX1_BT_PRIORITY,
+			      btcoex_hw->btpriority_gpio);
+	}
 
 	/* Configure the desired GPIO ports for input */
-
-	ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btactive_gpio);
-	ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btpriority_gpio);
+	ath9k_hw_gpio_request_in(ah, btcoex_hw->btactive_gpio,
+				 "ath9k-btactive");
+	ath9k_hw_gpio_request_in(ah, btcoex_hw->btpriority_gpio,
+				 "ath9k-btpriority");
 }
 EXPORT_SYMBOL(ath9k_hw_btcoex_init_3wire);
 
+void ath9k_hw_btcoex_deinit(struct ath_hw *ah)
+{
+	struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+
+	ath9k_hw_gpio_free(ah, btcoex_hw->btactive_gpio);
+	ath9k_hw_gpio_free(ah, btcoex_hw->btpriority_gpio);
+	ath9k_hw_gpio_free(ah, btcoex_hw->wlanactive_gpio);
+}
+EXPORT_SYMBOL(ath9k_hw_btcoex_deinit);
+
 void ath9k_hw_btcoex_init_mci(struct ath_hw *ah)
 {
 	ah->btcoex_hw.mci.ready = false;
@@ -201,8 +259,9 @@
 	struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
 
 	/* Configure the desired GPIO port for TX_FRAME output */
-	ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
-			    AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
+	ath9k_hw_gpio_request_out(ah, btcoex_hw->wlanactive_gpio,
+				  "ath9k-wlanactive",
+				  AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
 }
 
 /*
@@ -247,13 +306,13 @@
 				 txprio_shift[i-1]);
 		}
 	}
+
 	/* Last WLAN weight has to be adjusted wrt tx priority */
 	if (concur_tx) {
 		btcoex_hw->wlan_weight[i-1] &= ~(0xff << txprio_shift[i-1]);
 		btcoex_hw->wlan_weight[i-1] |= (btcoex_hw->tx_prio[stomp_type]
 						      << txprio_shift[i-1]);
 	}
-
 }
 EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
 
@@ -268,9 +327,14 @@
 	 * Program coex mode and weight registers to
 	 * enable coex 3-wire
 	 */
+	if (AR_SREV_SOC(ah))
+		REG_CLR_BIT(ah, AR_BT_COEX_MODE2, AR_BT_PHY_ERR_BT_COLL_ENABLE);
+
 	REG_WRITE(ah, AR_BT_COEX_MODE, btcoex->bt_coex_mode);
 	REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2);
 
+	if (AR_SREV_SOC(ah))
+		REG_WRITE(ah, AR_BT_COEX_MODE3, btcoex->bt_coex_mode3);
 
 	if (AR_SREV_9300_20_OR_LATER(ah)) {
 		REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, btcoex->wlan_weight[0]);
@@ -281,8 +345,6 @@
 	} else
 		REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex->bt_coex_weights);
 
-
-
 	if (AR_SREV_9271(ah)) {
 		val = REG_READ(ah, 0x50040);
 		val &= 0xFFFFFEFF;
@@ -292,8 +354,9 @@
 	REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
 	REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
 
-	ath9k_hw_cfg_output(ah, btcoex->wlanactive_gpio,
-			    AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL);
+	ath9k_hw_gpio_request_out(ah, btcoex->wlanactive_gpio,
+				  "ath9k-wlanactive",
+				  AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL);
 }
 
 static void ath9k_hw_btcoex_enable_mci(struct ath_hw *ah)
@@ -339,7 +402,8 @@
 		break;
 	}
 
-	if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI) {
+	if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI &&
+	    !AR_SREV_SOC(ah)) {
 		REG_RMW(ah, AR_GPIO_PDPU,
 			(0x2 << (btcoex_hw->btactive_gpio * 2)),
 			(0x3 << (btcoex_hw->btactive_gpio * 2)));
@@ -364,8 +428,8 @@
 	if (!AR_SREV_9300_20_OR_LATER(ah))
 		ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
 
-	ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
-			AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+	ath9k_hw_gpio_request_out(ah, btcoex_hw->wlanactive_gpio,
+				  NULL, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
 
 	if (btcoex_hw->scheme == ATH_BTCOEX_CFG_3WIRE) {
 		REG_WRITE(ah, AR_BT_COEX_MODE, AR_BT_QUIET | AR_BT_MODE);
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index cd2f0a2..1bdfa84 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -115,6 +115,7 @@
 	u32 bt_coex_mode; 	/* Register setting for AR_BT_COEX_MODE */
 	u32 bt_coex_weights; 	/* Register setting for AR_BT_COEX_WEIGHT */
 	u32 bt_coex_mode2; 	/* Register setting for AR_BT_COEX_MODE2 */
+	u32 bt_coex_mode3;	/* Register setting for AR_BT_COEX_MODE3 */
 	u32 bt_weight[AR9300_NUM_BT_WEIGHTS];
 	u32 wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
 	u8 tx_prio[ATH_BTCOEX_STOMP_MAX];
@@ -123,6 +124,7 @@
 void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah);
 void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah);
 void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah);
+void ath9k_hw_btcoex_deinit(struct ath_hw *ah);
 void ath9k_hw_btcoex_init_mci(struct ath_hw *ah);
 void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum);
 void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 37f6d66..0f71146 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -145,14 +145,14 @@
 }
 
 static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah,
-				   enum ieee80211_band band,
+				   enum nl80211_band band,
 				   int16_t *nft)
 {
 	switch (band) {
-	case IEEE80211_BAND_5GHZ:
+	case NL80211_BAND_5GHZ:
 		*nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_5);
 		break;
-	case IEEE80211_BAND_2GHZ:
+	case NL80211_BAND_2GHZ:
 		*nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_2);
 		break;
 	default:
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index 319cb5f..e56bafc 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -107,9 +107,9 @@
 	struct ieee80211_channel *chan;
 	int i, j;
 
-	sband = &common->sbands[IEEE80211_BAND_2GHZ];
+	sband = &common->sbands[NL80211_BAND_2GHZ];
 	if (!sband->n_channels)
-		sband = &common->sbands[IEEE80211_BAND_5GHZ];
+		sband = &common->sbands[NL80211_BAND_5GHZ];
 
 	chan = &sband->channels[0];
 	for (i = 0; i < ATH9K_NUM_CHANCTX; i++) {
@@ -1333,9 +1333,9 @@
 	struct ieee80211_channel *chan;
 	int i;
 
-	sband = &common->sbands[IEEE80211_BAND_2GHZ];
+	sband = &common->sbands[NL80211_BAND_2GHZ];
 	if (!sband->n_channels)
-		sband = &common->sbands[IEEE80211_BAND_5GHZ];
+		sband = &common->sbands[NL80211_BAND_5GHZ];
 
 	chan = &sband->channels[0];
 
diff --git a/drivers/net/wireless/ath/ath9k/common-init.c b/drivers/net/wireless/ath/ath9k/common-init.c
index a006c14..8b4f7fd 100644
--- a/drivers/net/wireless/ath/ath9k/common-init.c
+++ b/drivers/net/wireless/ath/ath9k/common-init.c
@@ -19,14 +19,14 @@
 #include "common.h"
 
 #define CHAN2G(_freq, _idx)  { \
-	.band = IEEE80211_BAND_2GHZ, \
+	.band = NL80211_BAND_2GHZ, \
 	.center_freq = (_freq), \
 	.hw_value = (_idx), \
 	.max_power = 20, \
 }
 
 #define CHAN5G(_freq, _idx) { \
-	.band = IEEE80211_BAND_5GHZ, \
+	.band = NL80211_BAND_5GHZ, \
 	.center_freq = (_freq), \
 	.hw_value = (_idx), \
 	.max_power = 20, \
@@ -139,12 +139,12 @@
 
 		memcpy(channels, ath9k_2ghz_chantable,
 		       sizeof(ath9k_2ghz_chantable));
-		common->sbands[IEEE80211_BAND_2GHZ].channels = channels;
-		common->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
-		common->sbands[IEEE80211_BAND_2GHZ].n_channels =
+		common->sbands[NL80211_BAND_2GHZ].channels = channels;
+		common->sbands[NL80211_BAND_2GHZ].band = NL80211_BAND_2GHZ;
+		common->sbands[NL80211_BAND_2GHZ].n_channels =
 			ARRAY_SIZE(ath9k_2ghz_chantable);
-		common->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
-		common->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
+		common->sbands[NL80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
+		common->sbands[NL80211_BAND_2GHZ].n_bitrates =
 			ARRAY_SIZE(ath9k_legacy_rates);
 	}
 
@@ -156,13 +156,13 @@
 
 		memcpy(channels, ath9k_5ghz_chantable,
 		       sizeof(ath9k_5ghz_chantable));
-		common->sbands[IEEE80211_BAND_5GHZ].channels = channels;
-		common->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
-		common->sbands[IEEE80211_BAND_5GHZ].n_channels =
+		common->sbands[NL80211_BAND_5GHZ].channels = channels;
+		common->sbands[NL80211_BAND_5GHZ].band = NL80211_BAND_5GHZ;
+		common->sbands[NL80211_BAND_5GHZ].n_channels =
 			ARRAY_SIZE(ath9k_5ghz_chantable);
-		common->sbands[IEEE80211_BAND_5GHZ].bitrates =
+		common->sbands[NL80211_BAND_5GHZ].bitrates =
 			ath9k_legacy_rates + 4;
-		common->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
+		common->sbands[NL80211_BAND_5GHZ].n_bitrates =
 			ARRAY_SIZE(ath9k_legacy_rates) - 4;
 	}
 	return 0;
@@ -236,9 +236,9 @@
 
 	if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
 		ath9k_cmn_setup_ht_cap(ah,
-			&common->sbands[IEEE80211_BAND_2GHZ].ht_cap);
+			&common->sbands[NL80211_BAND_2GHZ].ht_cap);
 	if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
 		ath9k_cmn_setup_ht_cap(ah,
-			&common->sbands[IEEE80211_BAND_5GHZ].ht_cap);
+			&common->sbands[NL80211_BAND_5GHZ].ht_cap);
 }
 EXPORT_SYMBOL(ath9k_cmn_reload_chainmask);
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index e8c6994..b80e08b 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -173,7 +173,7 @@
 			   struct ieee80211_rx_status *rxs)
 {
 	struct ieee80211_supported_band *sband;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	unsigned int i = 0;
 	struct ath_hw *ah = common->ah;
 
@@ -305,7 +305,7 @@
 	ichan->channel = chan->center_freq;
 	ichan->chan = chan;
 
-	if (chan->band == IEEE80211_BAND_5GHZ)
+	if (chan->band == NL80211_BAND_5GHZ)
 		flags |= CHANNEL_5GHZ;
 
 	switch (chandef->width) {
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 6de64cf..c56e40f 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -916,10 +916,21 @@
 	struct ath_softc *sc = inode->i_private;
 	unsigned int len = 0;
 	u8 *buf;
-	int i;
+	int i, j = 0;
 	unsigned long num_regs, regdump_len, max_reg_offset;
+	const struct reg_hole {
+		u32 start;
+		u32 end;
+	} reg_hole_list[] = {
+		{0x0200, 0x07fc},
+		{0x0c00, 0x0ffc},
+		{0x2000, 0x3ffc},
+		{0x4100, 0x6ffc},
+		{0x705c, 0x7ffc},
+		{0x0000, 0x0000}
+	};
 
-	max_reg_offset = AR_SREV_9300_20_OR_LATER(sc->sc_ah) ? 0x16bd4 : 0xb500;
+	max_reg_offset = AR_SREV_9300_20_OR_LATER(sc->sc_ah) ? 0x8800 : 0xb500;
 	num_regs = max_reg_offset / 4 + 1;
 	regdump_len = num_regs * REGDUMP_LINE_SIZE + 1;
 	buf = vmalloc(regdump_len);
@@ -927,9 +938,16 @@
 		return -ENOMEM;
 
 	ath9k_ps_wakeup(sc);
-	for (i = 0; i < num_regs; i++)
+	for (i = 0; i < num_regs; i++) {
+		if (reg_hole_list[j].start == i << 2) {
+			i = reg_hole_list[j].end >> 2;
+			j++;
+			continue;
+		}
+
 		len += scnprintf(buf + len, regdump_len - len,
 			"0x%06x 0x%08x\n", i << 2, REG_READ(sc->sc_ah, i << 2));
+	}
 	ath9k_ps_restore(sc);
 
 	file->private_data = buf;
diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c
index c2ca57a..b66cfa9 100644
--- a/drivers/net/wireless/ath/ath9k/debug_sta.c
+++ b/drivers/net/wireless/ath/ath9k/debug_sta.c
@@ -139,7 +139,7 @@
 	}
 
 	if (IS_OFDM_RATE(rs->rs_rate)) {
-		if (ah->curchan->chan->band == IEEE80211_BAND_2GHZ)
+		if (ah->curchan->chan->band == NL80211_BAND_2GHZ)
 			rstats->ofdm_stats[rxs->rate_idx - 4].ofdm_cnt++;
 		else
 			rstats->ofdm_stats[rxs->rate_idx].ofdm_cnt++;
@@ -173,7 +173,7 @@
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_rx_rate_stats *rstats;
 	struct ieee80211_sta *sta = an->sta;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	u32 len = 0, size = 4096;
 	char *buf;
 	size_t retval;
@@ -206,7 +206,7 @@
 	len += scnprintf(buf + len, size - len, "\n");
 
 legacy:
-	if (band == IEEE80211_BAND_2GHZ) {
+	if (band == NL80211_BAND_2GHZ) {
 		PRINT_CCK_RATE("CCK-1M/LP", 0, false);
 		PRINT_CCK_RATE("CCK-2M/LP", 1, false);
 		PRINT_CCK_RATE("CCK-5.5M/LP", 2, false);
diff --git a/drivers/net/wireless/ath/ath9k/dynack.c b/drivers/net/wireless/ath/ath9k/dynack.c
index 22b3cc4..d2ff0fc 100644
--- a/drivers/net/wireless/ath/ath9k/dynack.c
+++ b/drivers/net/wireless/ath/ath9k/dynack.c
@@ -212,7 +212,7 @@
 		struct ieee80211_tx_rate *rates = info->status.rates;
 
 		rate = &common->sbands[info->band].bitrates[rates[ridx].idx];
-		if (info->band == IEEE80211_BAND_2GHZ &&
+		if (info->band == NL80211_BAND_2GHZ &&
 		    !(rate->flags & IEEE80211_RATE_ERP_G))
 			phy = WLAN_RC_PHY_CCK;
 		else
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 2847067..490f74d 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -21,6 +21,33 @@
 /********************************/
 
 #ifdef CONFIG_MAC80211_LEDS
+
+void ath_fill_led_pin(struct ath_softc *sc)
+{
+	struct ath_hw *ah = sc->sc_ah;
+
+	/* Set default led pin if invalid */
+	if (ah->led_pin < 0) {
+		if (AR_SREV_9287(ah))
+			ah->led_pin = ATH_LED_PIN_9287;
+		else if (AR_SREV_9485(ah))
+			ah->led_pin = ATH_LED_PIN_9485;
+		else if (AR_SREV_9300(ah))
+			ah->led_pin = ATH_LED_PIN_9300;
+		else if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
+			ah->led_pin = ATH_LED_PIN_9462;
+		else
+			ah->led_pin = ATH_LED_PIN_DEF;
+	}
+
+	/* Configure gpio for output */
+	ath9k_hw_gpio_request_out(ah, ah->led_pin, "ath9k-led",
+				  AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+
+	/* LED off, active low */
+	ath9k_hw_set_gpio(ah, ah->led_pin, ah->config.led_active_high ? 0 : 1);
+}
+
 static void ath_led_brightness(struct led_classdev *led_cdev,
 			       enum led_brightness brightness)
 {
@@ -40,6 +67,8 @@
 
 	ath_led_brightness(&sc->led_cdev, LED_OFF);
 	led_classdev_unregister(&sc->led_cdev);
+
+	ath9k_hw_gpio_free(sc->sc_ah, sc->sc_ah->led_pin);
 }
 
 void ath_init_leds(struct ath_softc *sc)
@@ -49,6 +78,8 @@
 	if (AR_SREV_9100(sc->sc_ah))
 		return;
 
+	ath_fill_led_pin(sc);
+
 	if (!ath9k_led_blink)
 		sc->led_cdev.default_trigger =
 			ieee80211_get_radio_led_name(sc->hw);
@@ -64,37 +95,6 @@
 
 	sc->led_registered = true;
 }
-
-void ath_fill_led_pin(struct ath_softc *sc)
-{
-	struct ath_hw *ah = sc->sc_ah;
-
-	if (AR_SREV_9100(ah))
-		return;
-
-	if (ah->led_pin >= 0) {
-		if (!((1 << ah->led_pin) & AR_GPIO_OE_OUT_MASK))
-			ath9k_hw_request_gpio(ah, ah->led_pin, "ath9k-led");
-		return;
-	}
-
-	if (AR_SREV_9287(ah))
-		ah->led_pin = ATH_LED_PIN_9287;
-	else if (AR_SREV_9485(sc->sc_ah))
-		ah->led_pin = ATH_LED_PIN_9485;
-	else if (AR_SREV_9300(sc->sc_ah))
-		ah->led_pin = ATH_LED_PIN_9300;
-	else if (AR_SREV_9462(sc->sc_ah) || AR_SREV_9565(sc->sc_ah))
-		ah->led_pin = ATH_LED_PIN_9462;
-	else
-		ah->led_pin = ATH_LED_PIN_DEF;
-
-	/* Configure gpio 1 for output */
-	ath9k_hw_cfg_output(ah, ah->led_pin, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
-
-	/* LED off, active low */
-	ath9k_hw_set_gpio(ah, ah->led_pin, (ah->config.led_active_high) ? 0 : 1);
-}
 #endif
 
 /*******************/
@@ -402,6 +402,13 @@
 
 	if (ath9k_hw_mci_is_enabled(ah))
 		ath_mci_cleanup(sc);
+	else {
+		enum ath_btcoex_scheme scheme = ath9k_hw_get_btcoex_scheme(ah);
+
+		if (scheme == ATH_BTCOEX_CFG_2WIRE ||
+		    scheme == ATH_BTCOEX_CFG_3WIRE)
+			ath9k_hw_btcoex_deinit(sc->sc_ah);
+	}
 }
 
 int ath9k_init_btcoex(struct ath_softc *sc)
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 8cbf490..e1c338c 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -527,7 +527,7 @@
 				    struct sk_buff *skb)
 {
 	struct sk_buff *nskb, *skb_pool[MAX_PKT_NUM_IN_TRANSFER];
-	int index = 0, i = 0, len = skb->len;
+	int index = 0, i, len = skb->len;
 	int rx_remain_len, rx_pkt_len;
 	u16 pool_index = 0;
 	u8 *ptr;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index 2aabcbd..ecb848b 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -253,17 +253,19 @@
 	ath9k_led_brightness(&priv->led_cdev, LED_OFF);
 	led_classdev_unregister(&priv->led_cdev);
 	cancel_work_sync(&priv->led_work);
+
+	ath9k_hw_gpio_free(priv->ah, priv->ah->led_pin);
 }
 
 
 void ath9k_configure_leds(struct ath9k_htc_priv *priv)
 {
 	/* Configure gpio 1 for output */
-	ath9k_hw_cfg_output(priv->ah, priv->ah->led_pin,
-			    AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+	ath9k_hw_gpio_request_out(priv->ah, priv->ah->led_pin,
+				  "ath9k-led",
+				  AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
 	/* LED off, active low */
 	ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
-
 }
 
 void ath9k_init_leds(struct ath9k_htc_priv *priv)
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 8647ab7..c148c6c 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -262,11 +262,11 @@
 	__be32 tmpval[8];
 	int i, ret;
 
-       for (i = 0; i < count; i++) {
-	       tmpaddr[i] = cpu_to_be32(addr[i]);
-       }
+	for (i = 0; i < count; i++) {
+		tmpaddr[i] = cpu_to_be32(addr[i]);
+	}
 
-       ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID,
+	ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID,
 			   (u8 *)tmpaddr , sizeof(u32) * count,
 			   (u8 *)tmpval, sizeof(u32) * count,
 			   100);
@@ -275,9 +275,9 @@
 			"Multiple REGISTER READ FAILED (count: %d)\n", count);
 	}
 
-       for (i = 0; i < count; i++) {
-	       val[i] = be32_to_cpu(tmpval[i]);
-       }
+	for (i = 0; i < count; i++) {
+		val[i] = be32_to_cpu(tmpval[i]);
+	}
 }
 
 static void ath9k_regwrite_multi(struct ath_common *common)
@@ -765,11 +765,11 @@
 		sizeof(struct htc_frame_hdr) + 4;
 
 	if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
-		hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-			&common->sbands[IEEE80211_BAND_2GHZ];
+		hw->wiphy->bands[NL80211_BAND_2GHZ] =
+			&common->sbands[NL80211_BAND_2GHZ];
 	if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
-		hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-			&common->sbands[IEEE80211_BAND_5GHZ];
+		hw->wiphy->bands[NL80211_BAND_5GHZ] =
+			&common->sbands[NL80211_BAND_5GHZ];
 
 	ath9k_cmn_reload_chainmask(ah);
 
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 639294a..a553c91 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -246,7 +246,7 @@
 	struct ieee80211_conf *conf = &common->hw->conf;
 	bool fastcc;
 	struct ieee80211_channel *channel = hw->conf.chandef.chan;
-	struct ath9k_hw_cal_data *caldata = NULL;
+	struct ath9k_hw_cal_data *caldata;
 	enum htc_phymode mode;
 	__be16 htc_mode;
 	u8 cmd_rsp;
@@ -274,10 +274,7 @@
 		priv->ah->curchan->channel,
 		channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf),
 		fastcc);
-
-	if (!fastcc)
-		caldata = &priv->caldata;
-
+	caldata = fastcc ? NULL : &priv->caldata;
 	ret = ath9k_hw_reset(ah, hchan, caldata, fastcc);
 	if (ret) {
 		ath_err(common,
@@ -1770,8 +1767,8 @@
 	memset(&tmask, 0, sizeof(struct ath9k_htc_target_rate_mask));
 
 	tmask.vif_index = avp->index;
-	tmask.band = IEEE80211_BAND_2GHZ;
-	tmask.mask = cpu_to_be32(mask->control[IEEE80211_BAND_2GHZ].legacy);
+	tmask.band = NL80211_BAND_2GHZ;
+	tmask.mask = cpu_to_be32(mask->control[NL80211_BAND_2GHZ].legacy);
 
 	WMI_CMD_BUF(WMI_BITRATE_MASK_CMDID, &tmask);
 	if (ret) {
@@ -1781,8 +1778,8 @@
 		goto out;
 	}
 
-	tmask.band = IEEE80211_BAND_5GHZ;
-	tmask.mask = cpu_to_be32(mask->control[IEEE80211_BAND_5GHZ].legacy);
+	tmask.band = NL80211_BAND_5GHZ;
+	tmask.mask = cpu_to_be32(mask->control[NL80211_BAND_5GHZ].legacy);
 
 	WMI_CMD_BUF(WMI_BITRATE_MASK_CMDID, &tmask);
 	if (ret) {
@@ -1793,8 +1790,8 @@
 	}
 
 	ath_dbg(common, CONFIG, "Set bitrate masks: 0x%x, 0x%x\n",
-		mask->control[IEEE80211_BAND_2GHZ].legacy,
-		mask->control[IEEE80211_BAND_5GHZ].legacy);
+		mask->control[NL80211_BAND_2GHZ].legacy,
+		mask->control[NL80211_BAND_5GHZ].legacy);
 out:
 	return ret;
 }
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index cc9648f..f333ef1 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -494,7 +494,7 @@
 		if (txs->ts_flags & ATH9K_HTC_TXSTAT_SGI)
 			rate->flags |= IEEE80211_TX_RC_SHORT_GI;
 	} else {
-		if (cur_conf->chandef.chan->band == IEEE80211_BAND_5GHZ)
+		if (cur_conf->chandef.chan->band == NL80211_BAND_5GHZ)
 			rate->idx += 4; /* No CCK rates */
 	}
 
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index e7a3101..8b2895f 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1582,8 +1582,10 @@
 		if (!(gpio_mask & 1))
 			continue;
 
-		ath9k_hw_cfg_output(ah, i, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+		ath9k_hw_gpio_request_out(ah, i, NULL,
+					  AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
 		ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i)));
+		ath9k_hw_gpio_free(ah, i);
 	}
 }
 
@@ -1958,7 +1960,7 @@
 	ath9k_hw_init_qos(ah);
 
 	if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
-		ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
+		ath9k_hw_gpio_request_in(ah, ah->rfkill_gpio, "ath9k-rfkill");
 
 	ath9k_hw_init_global_settings(ah);
 
@@ -2385,6 +2387,61 @@
 	}
 }
 
+static void ath9k_gpio_cap_init(struct ath_hw *ah)
+{
+	struct ath9k_hw_capabilities *pCap = &ah->caps;
+
+	if (AR_SREV_9271(ah)) {
+		pCap->num_gpio_pins = AR9271_NUM_GPIO;
+		pCap->gpio_mask = AR9271_GPIO_MASK;
+	} else if (AR_DEVID_7010(ah)) {
+		pCap->num_gpio_pins = AR7010_NUM_GPIO;
+		pCap->gpio_mask = AR7010_GPIO_MASK;
+	} else if (AR_SREV_9287(ah)) {
+		pCap->num_gpio_pins = AR9287_NUM_GPIO;
+		pCap->gpio_mask = AR9287_GPIO_MASK;
+	} else if (AR_SREV_9285(ah)) {
+		pCap->num_gpio_pins = AR9285_NUM_GPIO;
+		pCap->gpio_mask = AR9285_GPIO_MASK;
+	} else if (AR_SREV_9280(ah)) {
+		pCap->num_gpio_pins = AR9280_NUM_GPIO;
+		pCap->gpio_mask = AR9280_GPIO_MASK;
+	} else if (AR_SREV_9300(ah)) {
+		pCap->num_gpio_pins = AR9300_NUM_GPIO;
+		pCap->gpio_mask = AR9300_GPIO_MASK;
+	} else if (AR_SREV_9330(ah)) {
+		pCap->num_gpio_pins = AR9330_NUM_GPIO;
+		pCap->gpio_mask = AR9330_GPIO_MASK;
+	} else if (AR_SREV_9340(ah)) {
+		pCap->num_gpio_pins = AR9340_NUM_GPIO;
+		pCap->gpio_mask = AR9340_GPIO_MASK;
+	} else if (AR_SREV_9462(ah)) {
+		pCap->num_gpio_pins = AR9462_NUM_GPIO;
+		pCap->gpio_mask = AR9462_GPIO_MASK;
+	} else if (AR_SREV_9485(ah)) {
+		pCap->num_gpio_pins = AR9485_NUM_GPIO;
+		pCap->gpio_mask = AR9485_GPIO_MASK;
+	} else if (AR_SREV_9531(ah)) {
+		pCap->num_gpio_pins = AR9531_NUM_GPIO;
+		pCap->gpio_mask = AR9531_GPIO_MASK;
+	} else if (AR_SREV_9550(ah)) {
+		pCap->num_gpio_pins = AR9550_NUM_GPIO;
+		pCap->gpio_mask = AR9550_GPIO_MASK;
+	} else if (AR_SREV_9561(ah)) {
+		pCap->num_gpio_pins = AR9561_NUM_GPIO;
+		pCap->gpio_mask = AR9561_GPIO_MASK;
+	} else if (AR_SREV_9565(ah)) {
+		pCap->num_gpio_pins = AR9565_NUM_GPIO;
+		pCap->gpio_mask = AR9565_GPIO_MASK;
+	} else if (AR_SREV_9580(ah)) {
+		pCap->num_gpio_pins = AR9580_NUM_GPIO;
+		pCap->gpio_mask = AR9580_GPIO_MASK;
+	} else {
+		pCap->num_gpio_pins = AR_NUM_GPIO;
+		pCap->gpio_mask = AR_GPIO_MASK;
+	}
+}
+
 int ath9k_hw_fill_cap_info(struct ath_hw *ah)
 {
 	struct ath9k_hw_capabilities *pCap = &ah->caps;
@@ -2478,20 +2535,7 @@
 	else
 		pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
 
-	if (AR_SREV_9271(ah))
-		pCap->num_gpio_pins = AR9271_NUM_GPIO;
-	else if (AR_DEVID_7010(ah))
-		pCap->num_gpio_pins = AR7010_NUM_GPIO;
-	else if (AR_SREV_9300_20_OR_LATER(ah))
-		pCap->num_gpio_pins = AR9300_NUM_GPIO;
-	else if (AR_SREV_9287_11_OR_LATER(ah))
-		pCap->num_gpio_pins = AR9287_NUM_GPIO;
-	else if (AR_SREV_9285_12_OR_LATER(ah))
-		pCap->num_gpio_pins = AR9285_NUM_GPIO;
-	else if (AR_SREV_9280_20_OR_LATER(ah))
-		pCap->num_gpio_pins = AR928X_NUM_GPIO;
-	else
-		pCap->num_gpio_pins = AR_NUM_GPIO;
+	ath9k_gpio_cap_init(ah);
 
 	if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah))
 		pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
@@ -2612,8 +2656,7 @@
 /* GPIO / RFKILL / Antennae */
 /****************************/
 
-static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah,
-					 u32 gpio, u32 type)
+static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, u32 gpio, u32 type)
 {
 	int addr;
 	u32 gpio_shift, tmp;
@@ -2627,8 +2670,8 @@
 
 	gpio_shift = (gpio % 6) * 5;
 
-	if (AR_SREV_9280_20_OR_LATER(ah)
-	    || (addr != AR_GPIO_OUTPUT_MUX1)) {
+	if (AR_SREV_9280_20_OR_LATER(ah) ||
+	    (addr != AR_GPIO_OUTPUT_MUX1)) {
 		REG_RMW(ah, addr, (type << gpio_shift),
 			(0x1f << gpio_shift));
 	} else {
@@ -2640,107 +2683,145 @@
 	}
 }
 
-void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio)
+/* BSP should set the corresponding MUX register correctly.
+ */
+static void ath9k_hw_gpio_cfg_soc(struct ath_hw *ah, u32 gpio, bool out,
+				  const char *label)
 {
-	u32 gpio_shift;
+	if (ah->caps.gpio_requested & BIT(gpio))
+		return;
 
-	BUG_ON(gpio >= ah->caps.num_gpio_pins);
+	/* may be requested by BSP, free anyway */
+	gpio_free(gpio);
+
+	if (gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label))
+		return;
+
+	ah->caps.gpio_requested |= BIT(gpio);
+}
+
+static void ath9k_hw_gpio_cfg_wmac(struct ath_hw *ah, u32 gpio, bool out,
+				   u32 ah_signal_type)
+{
+	u32 gpio_set, gpio_shift = gpio;
 
 	if (AR_DEVID_7010(ah)) {
-		gpio_shift = gpio;
-		REG_RMW(ah, AR7010_GPIO_OE,
-			(AR7010_GPIO_OE_AS_INPUT << gpio_shift),
-			(AR7010_GPIO_OE_MASK << gpio_shift));
-		return;
-	}
+		gpio_set = out ?
+			AR7010_GPIO_OE_AS_OUTPUT : AR7010_GPIO_OE_AS_INPUT;
+		REG_RMW(ah, AR7010_GPIO_OE, gpio_set << gpio_shift,
+			AR7010_GPIO_OE_MASK << gpio_shift);
+	} else if (AR_SREV_SOC(ah)) {
+		gpio_set = out ? 1 : 0;
+		REG_RMW(ah, AR_GPIO_OE_OUT, gpio_set << gpio_shift,
+			gpio_set << gpio_shift);
+	} else {
+		gpio_shift = gpio << 1;
+		gpio_set = out ?
+			AR_GPIO_OE_OUT_DRV_ALL : AR_GPIO_OE_OUT_DRV_NO;
+		REG_RMW(ah, AR_GPIO_OE_OUT, gpio_set << gpio_shift,
+			AR_GPIO_OE_OUT_DRV << gpio_shift);
 
-	gpio_shift = gpio << 1;
-	REG_RMW(ah,
-		AR_GPIO_OE_OUT,
-		(AR_GPIO_OE_OUT_DRV_NO << gpio_shift),
-		(AR_GPIO_OE_OUT_DRV << gpio_shift));
+		if (out)
+			ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type);
+	}
 }
-EXPORT_SYMBOL(ath9k_hw_cfg_gpio_input);
+
+static void ath9k_hw_gpio_request(struct ath_hw *ah, u32 gpio, bool out,
+				  const char *label, u32 ah_signal_type)
+{
+	WARN_ON(gpio >= ah->caps.num_gpio_pins);
+
+	if (BIT(gpio) & ah->caps.gpio_mask)
+		ath9k_hw_gpio_cfg_wmac(ah, gpio, out, ah_signal_type);
+	else if (AR_SREV_SOC(ah))
+		ath9k_hw_gpio_cfg_soc(ah, gpio, out, label);
+	else
+		WARN_ON(1);
+}
+
+void ath9k_hw_gpio_request_in(struct ath_hw *ah, u32 gpio, const char *label)
+{
+	ath9k_hw_gpio_request(ah, gpio, false, label, 0);
+}
+EXPORT_SYMBOL(ath9k_hw_gpio_request_in);
+
+void ath9k_hw_gpio_request_out(struct ath_hw *ah, u32 gpio, const char *label,
+			       u32 ah_signal_type)
+{
+	ath9k_hw_gpio_request(ah, gpio, true, label, ah_signal_type);
+}
+EXPORT_SYMBOL(ath9k_hw_gpio_request_out);
+
+void ath9k_hw_gpio_free(struct ath_hw *ah, u32 gpio)
+{
+	if (!AR_SREV_SOC(ah))
+		return;
+
+	WARN_ON(gpio >= ah->caps.num_gpio_pins);
+
+	if (ah->caps.gpio_requested & BIT(gpio)) {
+		gpio_free(gpio);
+		ah->caps.gpio_requested &= ~BIT(gpio);
+	}
+}
+EXPORT_SYMBOL(ath9k_hw_gpio_free);
 
 u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
 {
+	u32 val = 0xffffffff;
+
 #define MS_REG_READ(x, y) \
-	(MS(REG_READ(ah, AR_GPIO_IN_OUT), x##_GPIO_IN_VAL) & (AR_GPIO_BIT(y)))
+	(MS(REG_READ(ah, AR_GPIO_IN_OUT), x##_GPIO_IN_VAL) & BIT(y))
 
-	if (gpio >= ah->caps.num_gpio_pins)
-		return 0xffffffff;
+	WARN_ON(gpio >= ah->caps.num_gpio_pins);
 
-	if (AR_DEVID_7010(ah)) {
-		u32 val;
-		val = REG_READ(ah, AR7010_GPIO_IN);
-		return (MS(val, AR7010_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) == 0;
-	} else if (AR_SREV_9300_20_OR_LATER(ah))
-		return (MS(REG_READ(ah, AR_GPIO_IN), AR9300_GPIO_IN_VAL) &
-			AR_GPIO_BIT(gpio)) != 0;
-	else if (AR_SREV_9271(ah))
-		return MS_REG_READ(AR9271, gpio) != 0;
-	else if (AR_SREV_9287_11_OR_LATER(ah))
-		return MS_REG_READ(AR9287, gpio) != 0;
-	else if (AR_SREV_9285_12_OR_LATER(ah))
-		return MS_REG_READ(AR9285, gpio) != 0;
-	else if (AR_SREV_9280_20_OR_LATER(ah))
-		return MS_REG_READ(AR928X, gpio) != 0;
-	else
-		return MS_REG_READ(AR, gpio) != 0;
+	if (BIT(gpio) & ah->caps.gpio_mask) {
+		if (AR_SREV_9271(ah))
+			val = MS_REG_READ(AR9271, gpio);
+		else if (AR_SREV_9287(ah))
+			val = MS_REG_READ(AR9287, gpio);
+		else if (AR_SREV_9285(ah))
+			val = MS_REG_READ(AR9285, gpio);
+		else if (AR_SREV_9280(ah))
+			val = MS_REG_READ(AR928X, gpio);
+		else if (AR_DEVID_7010(ah))
+			val = REG_READ(ah, AR7010_GPIO_IN) & BIT(gpio);
+		else if (AR_SREV_9300_20_OR_LATER(ah))
+			val = REG_READ(ah, AR_GPIO_IN) & BIT(gpio);
+		else
+			val = MS_REG_READ(AR, gpio);
+	} else if (BIT(gpio) & ah->caps.gpio_requested) {
+		val = gpio_get_value(gpio) & BIT(gpio);
+	} else {
+		WARN_ON(1);
+	}
+
+	return val;
 }
 EXPORT_SYMBOL(ath9k_hw_gpio_get);
 
-void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
-			 u32 ah_signal_type)
-{
-	u32 gpio_shift;
-
-	if (AR_DEVID_7010(ah)) {
-		gpio_shift = gpio;
-		REG_RMW(ah, AR7010_GPIO_OE,
-			(AR7010_GPIO_OE_AS_OUTPUT << gpio_shift),
-			(AR7010_GPIO_OE_MASK << gpio_shift));
-		return;
-	}
-
-	ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type);
-	gpio_shift = 2 * gpio;
-	REG_RMW(ah,
-		AR_GPIO_OE_OUT,
-		(AR_GPIO_OE_OUT_DRV_ALL << gpio_shift),
-		(AR_GPIO_OE_OUT_DRV << gpio_shift));
-}
-EXPORT_SYMBOL(ath9k_hw_cfg_output);
-
 void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
 {
-	if (AR_DEVID_7010(ah)) {
-		val = val ? 0 : 1;
-		REG_RMW(ah, AR7010_GPIO_OUT, ((val&1) << gpio),
-			AR_GPIO_BIT(gpio));
-		return;
-	}
+	WARN_ON(gpio >= ah->caps.num_gpio_pins);
 
-	if (AR_SREV_9271(ah))
-		val = ~val;
-
-	if ((1 << gpio) & AR_GPIO_OE_OUT_MASK)
-		REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
-			AR_GPIO_BIT(gpio));
+	if (AR_DEVID_7010(ah) || AR_SREV_9271(ah))
+		val = !val;
 	else
-		gpio_set_value(gpio, val & 1);
+		val = !!val;
+
+	if (BIT(gpio) & ah->caps.gpio_mask) {
+		u32 out_addr = AR_DEVID_7010(ah) ?
+			AR7010_GPIO_OUT : AR_GPIO_IN_OUT;
+
+		REG_RMW(ah, out_addr, val << gpio, BIT(gpio));
+	} else if (BIT(gpio) & ah->caps.gpio_requested) {
+		gpio_set_value(gpio, val);
+	} else {
+		WARN_ON(1);
+	}
 }
 EXPORT_SYMBOL(ath9k_hw_set_gpio);
 
-void ath9k_hw_request_gpio(struct ath_hw *ah, u32 gpio, const char *label)
-{
-	if (gpio >= ah->caps.num_gpio_pins)
-		return;
-
-	gpio_request_one(gpio, GPIOF_DIR_OUT | GPIOF_INIT_LOW, label);
-}
-EXPORT_SYMBOL(ath9k_hw_request_gpio);
-
 void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna)
 {
 	REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7));
@@ -2833,8 +2914,7 @@
 {
 	struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
 	struct ieee80211_channel *channel;
-	int chan_pwr, new_pwr, max_gain;
-	int ant_gain, ant_reduction = 0;
+	int chan_pwr, new_pwr;
 
 	if (!chan)
 		return;
@@ -2842,15 +2922,10 @@
 	channel = chan->chan;
 	chan_pwr = min_t(int, channel->max_power * 2, MAX_RATE_POWER);
 	new_pwr = min_t(int, chan_pwr, reg->power_limit);
-	max_gain = chan_pwr - new_pwr + channel->max_antenna_gain * 2;
-
-	ant_gain = get_antenna_gain(ah, chan);
-	if (ant_gain > max_gain)
-		ant_reduction = ant_gain - max_gain;
 
 	ah->eep_ops->set_txpower(ah, chan,
 				 ath9k_regd_get_ctl(reg, chan),
-				 ant_reduction, new_pwr, test);
+				 get_antenna_gain(ah, chan), new_pwr, test);
 }
 
 void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test)
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 831a544..9cbca12 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -160,7 +160,6 @@
 #define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_DATA        0x1e
 
 #define AR_GPIOD_MASK               0x00001FFF
-#define AR_GPIO_BIT(_gpio)          (1 << (_gpio))
 
 #define BASE_ACTIVATE_DELAY         100
 #define RTC_PLL_SETTLE_DELAY        (AR_SREV_9340(ah) ? 1000 : 100)
@@ -301,6 +300,8 @@
 	u8 max_txchains;
 	u8 max_rxchains;
 	u8 num_gpio_pins;
+	u32 gpio_mask;
+	u32 gpio_requested;
 	u8 rx_hp_qdepth;
 	u8 rx_lp_qdepth;
 	u8 rx_status_len;
@@ -1019,12 +1020,12 @@
 u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan);
 
 /* GPIO / RFKILL / Antennae */
-void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio);
+void ath9k_hw_gpio_request_in(struct ath_hw *ah, u32 gpio, const char *label);
+void ath9k_hw_gpio_request_out(struct ath_hw *ah, u32 gpio, const char *label,
+			       u32 ah_signal_type);
+void ath9k_hw_gpio_free(struct ath_hw *ah, u32 gpio);
 u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio);
-void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
-			 u32 ah_signal_type);
 void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val);
-void ath9k_hw_request_gpio(struct ath_hw *ah, u32 gpio, const char *label);
 void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna);
 
 /* General Operation */
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 1c226d6..2ee8624 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -49,6 +49,10 @@
 module_param_named(blink, ath9k_led_blink, int, 0444);
 MODULE_PARM_DESC(blink, "Enable LED blink on activity");
 
+static int ath9k_led_active_high = -1;
+module_param_named(led_active_high, ath9k_led_active_high, int, 0444);
+MODULE_PARM_DESC(led_active_high, "Invert LED polarity");
+
 static int ath9k_btcoex_enable;
 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
@@ -477,7 +481,7 @@
 static int ath9k_eeprom_request(struct ath_softc *sc, const char *name)
 {
 	struct ath9k_eeprom_ctx ec;
-	struct ath_hw *ah = ah = sc->sc_ah;
+	struct ath_hw *ah = sc->sc_ah;
 	int err;
 
 	/* try to load the EEPROM content asynchronously */
@@ -600,6 +604,9 @@
 	if (ret)
 		return ret;
 
+	if (ath9k_led_active_high != -1)
+		ah->config.led_active_high = ath9k_led_active_high == 1;
+
 	/*
 	 * Enable WLAN/BT RX Antenna diversity only when:
 	 *
@@ -660,7 +667,6 @@
 
 	ath9k_cmn_init_crypto(sc->sc_ah);
 	ath9k_init_misc(sc);
-	ath_fill_led_pin(sc);
 	ath_chanctx_init(sc);
 	ath9k_offchannel_init(sc);
 
@@ -706,9 +712,9 @@
 	struct ath9k_channel *curchan = ah->curchan;
 
 	if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
-		ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
+		ath9k_init_band_txpower(sc, NL80211_BAND_2GHZ);
 	if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
-		ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
+		ath9k_init_band_txpower(sc, NL80211_BAND_5GHZ);
 
 	ah->curchan = curchan;
 }
@@ -880,11 +886,11 @@
 	sc->ant_tx = hw->wiphy->available_antennas_tx;
 
 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
-		hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-			&common->sbands[IEEE80211_BAND_2GHZ];
+		hw->wiphy->bands[NL80211_BAND_2GHZ] =
+			&common->sbands[NL80211_BAND_2GHZ];
 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
-		hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-			&common->sbands[IEEE80211_BAND_5GHZ];
+		hw->wiphy->bands[NL80211_BAND_5GHZ] =
+			&common->sbands[NL80211_BAND_5GHZ];
 
 #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
 	ath9k_set_mcc_capab(sc, hw);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 3aed43a..8b63988 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -718,12 +718,9 @@
 	if (!ath_complete_reset(sc, false))
 		ah->reset_power_on = false;
 
-	if (ah->led_pin >= 0) {
-		ath9k_hw_cfg_output(ah, ah->led_pin,
-				    AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+	if (ah->led_pin >= 0)
 		ath9k_hw_set_gpio(ah, ah->led_pin,
 				  (ah->config.led_active_high) ? 1 : 0);
-	}
 
 	/*
 	 * Reset key cache to sane defaults (all entries cleared) instead of
@@ -867,11 +864,9 @@
 
 	spin_lock_bh(&sc->sc_pcu_lock);
 
-	if (ah->led_pin >= 0) {
+	if (ah->led_pin >= 0)
 		ath9k_hw_set_gpio(ah, ah->led_pin,
 				  (ah->config.led_active_high) ? 0 : 1);
-		ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
-	}
 
 	ath_prepare_reset(sc);
 
@@ -1938,14 +1933,14 @@
 	if (idx == 0)
 		ath_update_survey_stats(sc);
 
-	sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+	sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
 	if (sband && idx >= sband->n_channels) {
 		idx -= sband->n_channels;
 		sband = NULL;
 	}
 
 	if (!sband)
-		sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
+		sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
 
 	if (!sband || idx >= sband->n_channels) {
 		spin_unlock_bh(&common->cc_lock);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index e6fef1b..7cdaf40 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -28,6 +28,16 @@
 	{ PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
 	{ PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI   */
 	{ PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI   */
+
+#ifdef CONFIG_ATH9K_PCOEM
+	/* Mini PCI AR9220 MB92 cards: Compex WLM200NX, Wistron DNMA-92 */
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+			 0x0029,
+			 PCI_VENDOR_ID_ATHEROS,
+			 0x2096),
+	  .driver_data = ATH9K_PCI_LED_ACT_HI },
+#endif
+
 	{ PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
 
 #ifdef CONFIG_ATH9K_PCOEM
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index c8d35fe..9272ca9 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -985,6 +985,10 @@
 #define AR_SREV_9561(_ah) \
 	(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9561))
 
+#define AR_SREV_SOC(_ah) \
+	(AR_SREV_9340(_ah) || AR_SREV_9531(_ah) || AR_SREV_9550(ah) || \
+	 AR_SREV_9561(ah))
+
 /* NOTE: When adding chips newer than Peacock, add chip check here */
 #define AR_SREV_9580_10_OR_LATER(_ah) \
 	(AR_SREV_9580(_ah))
@@ -1104,14 +1108,46 @@
 
 #define AR_PCIE_PHY_REG3			 0x18c08
 
+/* Define correct GPIO numbers and MASK bits to indicate the WMAC
+ * GPIO resource.
+ * Allow SOC chips(AR9340, AR9531, AR9550, AR9561) to access all GPIOs
+ * which rely on gpiolib framework. But restrict SOC AR9330 only to
+ * access WMAC GPIO which has the same design with the old chips.
+ */
 #define AR_NUM_GPIO                              14
-#define AR928X_NUM_GPIO                          10
+#define AR9280_NUM_GPIO                          10
 #define AR9285_NUM_GPIO                          12
-#define AR9287_NUM_GPIO                          11
+#define AR9287_NUM_GPIO                          10
 #define AR9271_NUM_GPIO                          16
-#define AR9300_NUM_GPIO                          17
+#define AR9300_NUM_GPIO                          16
+#define AR9330_NUM_GPIO				 16
+#define AR9340_NUM_GPIO				 23
+#define AR9462_NUM_GPIO				 10
+#define AR9485_NUM_GPIO				 12
+#define AR9531_NUM_GPIO				 18
+#define AR9550_NUM_GPIO				 24
+#define AR9561_NUM_GPIO				 23
+#define AR9565_NUM_GPIO				 12
+#define AR9580_NUM_GPIO				 16
 #define AR7010_NUM_GPIO                          16
 
+#define AR_GPIO_MASK				 0x00003FFF
+#define AR9271_GPIO_MASK			 0x0000FFFF
+#define AR9280_GPIO_MASK			 0x000003FF
+#define AR9285_GPIO_MASK			 0x00000FFF
+#define AR9287_GPIO_MASK			 0x000003FF
+#define AR9300_GPIO_MASK			 0x0000F4FF
+#define AR9330_GPIO_MASK			 0x0000F4FF
+#define AR9340_GPIO_MASK			 0x0000000F
+#define AR9462_GPIO_MASK			 0x000003FF
+#define AR9485_GPIO_MASK			 0x00000FFF
+#define AR9531_GPIO_MASK			 0x0000000F
+#define AR9550_GPIO_MASK			 0x0000000F
+#define AR9561_GPIO_MASK			 0x0000000F
+#define AR9565_GPIO_MASK			 0x00000FFF
+#define AR9580_GPIO_MASK			 0x0000F4FF
+#define AR7010_GPIO_MASK			 0x0000FFFF
+
 #define AR_GPIO_IN_OUT                           (AR_SREV_9340(ah) ? 0x4028 : 0x4048)
 #define AR_GPIO_IN_VAL                           0x0FFFC000
 #define AR_GPIO_IN_VAL_S                         14
@@ -1132,8 +1168,6 @@
 
 #define AR_GPIO_OE_OUT                           (AR_SREV_9340(ah) ? 0x4030 : \
 						  (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c))
-#define AR_GPIO_OE_OUT_MASK			 (AR_SREV_9550_OR_LATER(ah) ? \
-						  0x0000000F : 0xFFFFFFFF)
 #define AR_GPIO_OE_OUT_DRV                       0x3
 #define AR_GPIO_OE_OUT_DRV_NO                    0x0
 #define AR_GPIO_OE_OUT_DRV_LOW                   0x1
@@ -1858,15 +1892,33 @@
 
 #define AR9300_BT_WGHT             0xcccc4444
 
-#define AR_BT_COEX_MODE2           0x817c
-#define AR_BT_BCN_MISS_THRESH      0x000000ff
-#define AR_BT_BCN_MISS_THRESH_S    0
-#define AR_BT_BCN_MISS_CNT         0x0000ff00
-#define AR_BT_BCN_MISS_CNT_S       8
-#define AR_BT_HOLD_RX_CLEAR        0x00010000
-#define AR_BT_HOLD_RX_CLEAR_S      16
-#define AR_BT_DISABLE_BT_ANT       0x00100000
-#define AR_BT_DISABLE_BT_ANT_S     20
+#define AR_BT_COEX_MODE2		0x817c
+#define AR_BT_BCN_MISS_THRESH		0x000000ff
+#define AR_BT_BCN_MISS_THRESH_S		0
+#define AR_BT_BCN_MISS_CNT		0x0000ff00
+#define AR_BT_BCN_MISS_CNT_S		8
+#define AR_BT_HOLD_RX_CLEAR		0x00010000
+#define AR_BT_HOLD_RX_CLEAR_S		16
+#define AR_BT_PROTECT_BT_AFTER_WAKEUP	0x00080000
+#define AR_BT_PROTECT_BT_AFTER_WAKEUP_S 19
+#define AR_BT_DISABLE_BT_ANT		0x00100000
+#define AR_BT_DISABLE_BT_ANT_S		20
+#define AR_BT_QUIET_2_WIRE		0x00200000
+#define AR_BT_QUIET_2_WIRE_S		21
+#define AR_BT_WL_ACTIVE_MODE		0x00c00000
+#define AR_BT_WL_ACTIVE_MODE_S		22
+#define AR_BT_WL_TXRX_SEPARATE		0x01000000
+#define AR_BT_WL_TXRX_SEPARATE_S	24
+#define AR_BT_RS_DISCARD_EXTEND		0x02000000
+#define AR_BT_RS_DISCARD_EXTEND_S	25
+#define AR_BT_TSF_BT_ACTIVE_CTRL	0x0c000000
+#define AR_BT_TSF_BT_ACTIVE_CTRL_S	26
+#define AR_BT_TSF_BT_PRIORITY_CTRL	0x30000000
+#define AR_BT_TSF_BT_PRIORITY_CTRL_S	28
+#define AR_BT_INTERRUPT_ENABLE		0x40000000
+#define AR_BT_INTERRUPT_ENABLE_S	30
+#define AR_BT_PHY_ERR_BT_COLL_ENABLE	0x80000000
+#define AR_BT_PHY_ERR_BT_COLL_ENABLE_S	31
 
 #define AR_TXSIFS              0x81d0
 #define AR_TXSIFS_TIME         0x000000FF
@@ -1875,6 +1927,16 @@
 #define AR_TXSIFS_ACK_SHIFT    0x00007000
 #define AR_TXSIFS_ACK_SHIFT_S  12
 
+#define AR_BT_COEX_MODE3			0x81d4
+#define AR_BT_WL_ACTIVE_TIME			0x000000ff
+#define AR_BT_WL_ACTIVE_TIME_S			0
+#define AR_BT_WL_QC_TIME			0x0000ff00
+#define AR_BT_WL_QC_TIME_S			8
+#define AR_BT_ALLOW_CONCURRENT_ACCESS		0x000f0000
+#define AR_BT_ALLOW_CONCURRENT_ACCESS_S		16
+#define AR_BT_AGC_SATURATION_CNT_ENABLE		0x00100000
+#define AR_BT_AGC_SATURATION_CNT_ENABLE_S	20
+
 #define AR_TXOP_X          0x81ec
 #define AR_TXOP_X_VAL      0x000000FF
 
diff --git a/drivers/net/wireless/ath/ath9k/rng.c b/drivers/net/wireless/ath/ath9k/rng.c
index c9cb2aa..d38e50f 100644
--- a/drivers/net/wireless/ath/ath9k/rng.c
+++ b/drivers/net/wireless/ath/ath9k/rng.c
@@ -55,11 +55,26 @@
 	return j << 2;
 }
 
+static u32 ath9k_rng_delay_get(u32 fail_stats)
+{
+	u32 delay;
+
+	if (fail_stats < 100)
+		delay = 10;
+	else if (fail_stats < 105)
+		delay = 1000;
+	else
+		delay = 10000;
+
+	return delay;
+}
+
 static int ath9k_rng_kthread(void *data)
 {
 	int bytes_read;
 	struct ath_softc *sc = data;
 	u32 *rng_buf;
+	u32 delay, fail_stats = 0;
 
 	rng_buf = kmalloc_array(ATH9K_RNG_BUF_SIZE, sizeof(u32), GFP_KERNEL);
 	if (!rng_buf)
@@ -69,10 +84,13 @@
 		bytes_read = ath9k_rng_data_read(sc, rng_buf,
 						 ATH9K_RNG_BUF_SIZE);
 		if (unlikely(!bytes_read)) {
-			msleep_interruptible(10);
+			delay = ath9k_rng_delay_get(++fail_stats);
+			msleep_interruptible(delay);
 			continue;
 		}
 
+		fail_stats = 0;
+
 		/* sleep until entropy bits under write_wakeup_threshold */
 		add_hwgenerator_randomness((void *)rng_buf, bytes_read,
 					   ATH9K_RNG_ENTROPY(bytes_read));
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index fe795fc..8ddd604 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1112,7 +1112,7 @@
 				bool is_2ghz;
 				struct modal_eep_header *pmodal;
 
-				is_2ghz = info->band == IEEE80211_BAND_2GHZ;
+				is_2ghz = info->band == NL80211_BAND_2GHZ;
 				pmodal = &eep->modalHeader[is_2ghz];
 				power_ht40delta = pmodal->ht40PowerIncForPdadc;
 			} else {
@@ -1236,7 +1236,7 @@
 
 		/* legacy rates */
 		rate = &common->sbands[tx_info->band].bitrates[rates[i].idx];
-		if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
+		if ((tx_info->band == NL80211_BAND_2GHZ) &&
 		    !(rate->flags & IEEE80211_RATE_ERP_G))
 			phy = WLAN_RC_PHY_CCK;
 		else
diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c
index a2f00570..7d4a72d 100644
--- a/drivers/net/wireless/ath/carl9170/mac.c
+++ b/drivers/net/wireless/ath/carl9170/mac.c
@@ -48,7 +48,7 @@
 	if (conf_is_ht40(&ar->hw->conf))
 		val = 0x010a;
 	else {
-		if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
+		if (ar->hw->conf.chandef.chan->band == NL80211_BAND_2GHZ)
 			val = 0x105;
 		else
 			val = 0x104;
@@ -66,7 +66,7 @@
 		rts_rate = 0x1da;
 		cts_rate = 0x10a;
 	} else {
-		if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) {
+		if (ar->hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) {
 			/* 11 mbit CCK */
 			rts_rate = 033;
 			cts_rate = 003;
@@ -93,7 +93,7 @@
 		return 0;
 	}
 
-	if ((ar->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ) ||
+	if ((ar->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ) ||
 	    vif->bss_conf.use_short_slot)
 		slottime = 9;
 
@@ -120,7 +120,7 @@
 	basic |= (vif->bss_conf.basic_rates & 0xff0) << 4;
 	rcu_read_unlock();
 
-	if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ)
+	if (ar->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ)
 		mandatory = 0xff00; /* OFDM 6/9/12/18/24/36/48/54 */
 	else
 		mandatory = 0xff0f; /* OFDM (6/9../54) + CCK (1/2/5.5/11) */
@@ -512,10 +512,10 @@
 		chains = AR9170_TX_PHY_TXCHAIN_1;
 
 	switch (channel->band) {
-	case IEEE80211_BAND_2GHZ:
+	case NL80211_BAND_2GHZ:
 		power = ar->power_2G_ofdm[0] & 0x3f;
 		break;
-	case IEEE80211_BAND_5GHZ:
+	case NL80211_BAND_5GHZ:
 		power = ar->power_5G_leg[0] & 0x3f;
 		break;
 	default:
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 4d1527a..ffb22a0 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1666,7 +1666,7 @@
 			return err;
 	}
 
-	for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
+	for (b = 0; b < NUM_NL80211_BANDS; b++) {
 		band = ar->hw->wiphy->bands[b];
 
 		if (!band)
@@ -1941,13 +1941,13 @@
 	}
 
 	if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
-		ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+		ar->hw->wiphy->bands[NL80211_BAND_2GHZ] =
 			&carl9170_band_2GHz;
 		chans += carl9170_band_2GHz.n_channels;
 		bands++;
 	}
 	if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
-		ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+		ar->hw->wiphy->bands[NL80211_BAND_5GHZ] =
 			&carl9170_band_5GHz;
 		chans += carl9170_band_5GHz.n_channels;
 		bands++;
diff --git a/drivers/net/wireless/ath/carl9170/phy.c b/drivers/net/wireless/ath/carl9170/phy.c
index dca6df1..34d9fd7 100644
--- a/drivers/net/wireless/ath/carl9170/phy.c
+++ b/drivers/net/wireless/ath/carl9170/phy.c
@@ -540,11 +540,11 @@
 	return carl9170_regwrite_result();
 }
 
-static int carl9170_init_phy(struct ar9170 *ar, enum ieee80211_band band)
+static int carl9170_init_phy(struct ar9170 *ar, enum nl80211_band band)
 {
 	int i, err;
 	u32 val;
-	bool is_2ghz = band == IEEE80211_BAND_2GHZ;
+	bool is_2ghz = band == NL80211_BAND_2GHZ;
 	bool is_40mhz = conf_is_ht40(&ar->hw->conf);
 
 	carl9170_regwrite_begin(ar);
@@ -1125,13 +1125,13 @@
 	u8 f, tmp;
 
 	switch (channel->band) {
-	case IEEE80211_BAND_2GHZ:
+	case NL80211_BAND_2GHZ:
 		f = channel->center_freq - 2300;
 		cal_freq_pier = ar->eeprom.cal_freq_pier_2G;
 		i = AR5416_NUM_2G_CAL_PIERS - 1;
 		break;
 
-	case IEEE80211_BAND_5GHZ:
+	case NL80211_BAND_5GHZ:
 		f = (channel->center_freq - 4800) / 5;
 		cal_freq_pier = ar->eeprom.cal_freq_pier_5G;
 		i = AR5416_NUM_5G_CAL_PIERS - 1;
@@ -1158,12 +1158,12 @@
 			int j;
 
 			switch (channel->band) {
-			case IEEE80211_BAND_2GHZ:
+			case NL80211_BAND_2GHZ:
 				cal_pier_data = &ar->eeprom.
 					cal_pier_data_2G[chain][idx];
 				break;
 
-			case IEEE80211_BAND_5GHZ:
+			case NL80211_BAND_5GHZ:
 				cal_pier_data = &ar->eeprom.
 					cal_pier_data_5G[chain][idx];
 				break;
@@ -1340,7 +1340,7 @@
 		/* skip CTL and heavy clip for CTL_MKK and CTL_ETSI */
 		return;
 
-	if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) {
+	if (ar->hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) {
 		modes = mode_list_2ghz;
 		nr_modes = ARRAY_SIZE(mode_list_2ghz);
 	} else {
@@ -1607,7 +1607,7 @@
 		return err;
 
 	err = carl9170_init_rf_banks_0_7(ar,
-					 channel->band == IEEE80211_BAND_5GHZ);
+					 channel->band == NL80211_BAND_5GHZ);
 	if (err)
 		return err;
 
@@ -1621,7 +1621,7 @@
 		return err;
 
 	err = carl9170_init_rf_bank4_pwr(ar,
-					 channel->band == IEEE80211_BAND_5GHZ,
+					 channel->band == NL80211_BAND_5GHZ,
 					 channel->center_freq, bw);
 	if (err)
 		return err;
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index d66533c..0c34c87 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -417,7 +417,7 @@
 
 			return -EINVAL;
 		}
-		if (status->band == IEEE80211_BAND_2GHZ)
+		if (status->band == NL80211_BAND_2GHZ)
 			status->rate_idx += 4;
 		break;
 
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index ae86a600..2bf04c9 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -720,12 +720,12 @@
 			/* +1 dBm for HT40 */
 			*tpc += 2;
 
-			if (info->band == IEEE80211_BAND_2GHZ)
+			if (info->band == NL80211_BAND_2GHZ)
 				txpower = ar->power_2G_ht40;
 			else
 				txpower = ar->power_5G_ht40;
 		} else {
-			if (info->band == IEEE80211_BAND_2GHZ)
+			if (info->band == NL80211_BAND_2GHZ)
 				txpower = ar->power_2G_ht20;
 			else
 				txpower = ar->power_5G_ht20;
@@ -734,7 +734,7 @@
 		*phyrate = txrate->idx;
 		*tpc += txpower[idx & 7];
 	} else {
-		if (info->band == IEEE80211_BAND_2GHZ) {
+		if (info->band == NL80211_BAND_2GHZ) {
 			if (idx < 4)
 				txpower = ar->power_2G_cck;
 			else
@@ -797,7 +797,7 @@
 		 * tmp |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
 		 */
 	} else {
-		if (info->band == IEEE80211_BAND_2GHZ) {
+		if (info->band == NL80211_BAND_2GHZ) {
 			if (txrate->idx <= AR9170_TX_PHY_RATE_CCK_11M)
 				tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_CCK);
 			else
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 06ea6cc..7e15ed9 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -336,12 +336,12 @@
 			      struct ath_regulatory *reg,
 			      enum nl80211_reg_initiator initiator)
 {
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	struct ieee80211_supported_band *sband;
 	struct ieee80211_channel *ch;
 	unsigned int i;
 
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+	for (band = 0; band < NUM_NL80211_BANDS; band++) {
 		if (!wiphy->bands[band])
 			continue;
 		sband = wiphy->bands[band];
@@ -374,7 +374,7 @@
 {
 	struct ieee80211_supported_band *sband;
 
-	sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+	sband = wiphy->bands[NL80211_BAND_2GHZ];
 	if (!sband)
 		return;
 
@@ -402,10 +402,10 @@
 	struct ieee80211_channel *ch;
 	unsigned int i;
 
-	if (!wiphy->bands[IEEE80211_BAND_5GHZ])
+	if (!wiphy->bands[NL80211_BAND_5GHZ])
 		return;
 
-	sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+	sband = wiphy->bands[NL80211_BAND_5GHZ];
 
 	for (i = 0; i < sband->n_channels; i++) {
 		ch = &sband->channels[i];
@@ -772,7 +772,7 @@
 EXPORT_SYMBOL(ath_regd_init);
 
 u32 ath_regd_get_band_ctl(struct ath_regulatory *reg,
-			  enum ieee80211_band band)
+			  enum nl80211_band band)
 {
 	if (!reg->regpair ||
 	    (reg->country_code == CTRY_DEFAULT &&
@@ -794,9 +794,9 @@
 	}
 
 	switch (band) {
-	case IEEE80211_BAND_2GHZ:
+	case NL80211_BAND_2GHZ:
 		return reg->regpair->reg_2ghz_ctl;
-	case IEEE80211_BAND_5GHZ:
+	case NL80211_BAND_5GHZ:
 		return reg->regpair->reg_5ghz_ctl;
 	default:
 		return NO_CTL;
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index 37f53bd..565d307 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -255,7 +255,7 @@
 		  void (*reg_notifier)(struct wiphy *wiphy,
 				       struct regulatory_request *request));
 u32 ath_regd_get_band_ctl(struct ath_regulatory *reg,
-			  enum ieee80211_band band);
+			  enum nl80211_band band);
 void ath_reg_notifier_apply(struct wiphy *wiphy,
 			    struct regulatory_request *request,
 			    struct ath_regulatory *reg);
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.c b/drivers/net/wireless/ath/wcn36xx/debug.c
index ef44a2da..2a6bb62 100644
--- a/drivers/net/wireless/ath/wcn36xx/debug.c
+++ b/drivers/net/wireless/ath/wcn36xx/debug.c
@@ -33,9 +33,7 @@
 	char buf[3];
 
 	list_for_each_entry(vif_priv, &wcn->vif_list, list) {
-			vif = container_of((void *)vif_priv,
-				   struct ieee80211_vif,
-				   drv_priv);
+			vif = wcn36xx_priv_to_vif(vif_priv);
 			if (NL80211_IFTYPE_STATION == vif->type) {
 				if (vif_priv->pw_state == WCN36XX_BMPS)
 					buf[0] = '1';
@@ -70,9 +68,7 @@
 	case 'Y':
 	case '1':
 		list_for_each_entry(vif_priv, &wcn->vif_list, list) {
-			vif = container_of((void *)vif_priv,
-				   struct ieee80211_vif,
-				   drv_priv);
+			vif = wcn36xx_priv_to_vif(vif_priv);
 			if (NL80211_IFTYPE_STATION == vif->type) {
 				wcn36xx_enable_keep_alive_null_packet(wcn, vif);
 				wcn36xx_pmc_enter_bmps_state(wcn, vif);
@@ -83,9 +79,7 @@
 	case 'N':
 	case '0':
 		list_for_each_entry(vif_priv, &wcn->vif_list, list) {
-			vif = container_of((void *)vif_priv,
-				   struct ieee80211_vif,
-				   drv_priv);
+			vif = wcn36xx_priv_to_vif(vif_priv);
 			if (NL80211_IFTYPE_STATION == vif->type)
 				wcn36xx_pmc_exit_bmps_state(wcn, vif);
 		}
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index b947de0..658bfb8 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -48,12 +48,15 @@
 
 #define WCN36XX_HAL_IPV4_ADDR_LEN       4
 
-#define WALN_HAL_STA_INVALID_IDX 0xFF
+#define WCN36XX_HAL_STA_INVALID_IDX 0xFF
 #define WCN36XX_HAL_BSS_INVALID_IDX 0xFF
 
 /* Default Beacon template size */
 #define BEACON_TEMPLATE_SIZE 0x180
 
+/* Minimum PVM size that the FW expects. See comment in smd.c for details. */
+#define TIM_MIN_PVM_SIZE 6
+
 /* Param Change Bitmap sent to HAL */
 #define PARAM_BCN_INTERVAL_CHANGED                      (1 << 0)
 #define PARAM_SHORT_PREAMBLE_CHANGED                 (1 << 1)
@@ -2884,11 +2887,14 @@
 struct wcn36xx_hal_send_beacon_req_msg {
 	struct wcn36xx_hal_msg_header header;
 
+	/* length of the template + 6. Only qcom knows why */
+	u32 beacon_length6;
+
 	/* length of the template. */
 	u32 beacon_length;
 
 	/* Beacon data. */
-	u8 beacon[BEACON_TEMPLATE_SIZE];
+	u8 beacon[BEACON_TEMPLATE_SIZE - sizeof(u32)];
 
 	u8 bssid[ETH_ALEN];
 
@@ -4261,9 +4267,9 @@
 	u8 data_offset;
 
 	u32 mc_addr_count;
-	u8 mc_addr[ETH_ALEN][WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS];
+	u8 mc_addr[WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS][ETH_ALEN];
 	u8 bss_index;
-};
+} __packed;
 
 struct wcn36xx_hal_set_pkt_filter_rsp_msg {
 	struct wcn36xx_hal_msg_header header;
@@ -4317,7 +4323,7 @@
 struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg {
 	struct wcn36xx_hal_msg_header header;
 	struct wcn36xx_hal_rcv_flt_mc_addr_list_type mc_addr_list;
-};
+} __packed;
 
 struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_rsp_msg {
 	struct wcn36xx_hal_msg_header header;
@@ -4383,6 +4389,45 @@
 	RTT = 20,
 	RATECTRL = 21,
 	WOW = 22,
+	WLAN_ROAM_SCAN_OFFLOAD = 23,
+	SPECULATIVE_PS_POLL = 24,
+	SCAN_SCH = 25,
+	IBSS_HEARTBEAT_OFFLOAD = 26,
+	WLAN_SCAN_OFFLOAD = 27,
+	WLAN_PERIODIC_TX_PTRN = 28,
+	ADVANCE_TDLS = 29,
+	BATCH_SCAN = 30,
+	FW_IN_TX_PATH = 31,
+	EXTENDED_NSOFFLOAD_SLOT = 32,
+	CH_SWITCH_V1 = 33,
+	HT40_OBSS_SCAN = 34,
+	UPDATE_CHANNEL_LIST = 35,
+	WLAN_MCADDR_FLT = 36,
+	WLAN_CH144 = 37,
+	NAN = 38,
+	TDLS_SCAN_COEXISTENCE = 39,
+	LINK_LAYER_STATS_MEAS = 40,
+	MU_MIMO = 41,
+	EXTENDED_SCAN = 42,
+	DYNAMIC_WMM_PS = 43,
+	MAC_SPOOFED_SCAN = 44,
+	BMU_ERROR_GENERIC_RECOVERY = 45,
+	DISA = 46,
+	FW_STATS = 47,
+	WPS_PRBRSP_TMPL = 48,
+	BCN_IE_FLT_DELTA = 49,
+	TDLS_OFF_CHANNEL = 51,
+	RTT3 = 52,
+	MGMT_FRAME_LOGGING = 53,
+	ENHANCED_TXBD_COMPLETION = 54,
+	LOGGING_ENHANCEMENT = 55,
+	EXT_SCAN_ENHANCED = 56,
+	MEMORY_DUMP_SUPPORTED = 57,
+	PER_PKT_STATS_SUPPORTED = 58,
+	EXT_LL_STAT = 60,
+	WIFI_CONFIG = 61,
+	ANTENNA_DIVERSITY_SELECTION = 62,
+
 	MAX_FEATURE_SUPPORTED = 128,
 };
 
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index a27279c..a920d70 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -26,14 +26,14 @@
 MODULE_PARM_DESC(debug_mask, "Debugging mask");
 
 #define CHAN2G(_freq, _idx) { \
-	.band = IEEE80211_BAND_2GHZ, \
+	.band = NL80211_BAND_2GHZ, \
 	.center_freq = (_freq), \
 	.hw_value = (_idx), \
 	.max_power = 25, \
 }
 
 #define CHAN5G(_freq, _idx) { \
-	.band = IEEE80211_BAND_5GHZ, \
+	.band = NL80211_BAND_5GHZ, \
 	.center_freq = (_freq), \
 	.hw_value = (_idx), \
 	.max_power = 25, \
@@ -201,7 +201,45 @@
 	"BCN_FILTER",			/* 19 */
 	"RTT",				/* 20 */
 	"RATECTRL",			/* 21 */
-	"WOW"				/* 22 */
+	"WOW",				/* 22 */
+	"WLAN_ROAM_SCAN_OFFLOAD",	/* 23 */
+	"SPECULATIVE_PS_POLL",		/* 24 */
+	"SCAN_SCH",			/* 25 */
+	"IBSS_HEARTBEAT_OFFLOAD",	/* 26 */
+	"WLAN_SCAN_OFFLOAD",		/* 27 */
+	"WLAN_PERIODIC_TX_PTRN",	/* 28 */
+	"ADVANCE_TDLS",			/* 29 */
+	"BATCH_SCAN",			/* 30 */
+	"FW_IN_TX_PATH",		/* 31 */
+	"EXTENDED_NSOFFLOAD_SLOT",	/* 32 */
+	"CH_SWITCH_V1",			/* 33 */
+	"HT40_OBSS_SCAN",		/* 34 */
+	"UPDATE_CHANNEL_LIST",		/* 35 */
+	"WLAN_MCADDR_FLT",		/* 36 */
+	"WLAN_CH144",			/* 37 */
+	"NAN",				/* 38 */
+	"TDLS_SCAN_COEXISTENCE",	/* 39 */
+	"LINK_LAYER_STATS_MEAS",	/* 40 */
+	"MU_MIMO",			/* 41 */
+	"EXTENDED_SCAN",		/* 42 */
+	"DYNAMIC_WMM_PS",		/* 43 */
+	"MAC_SPOOFED_SCAN",		/* 44 */
+	"BMU_ERROR_GENERIC_RECOVERY",	/* 45 */
+	"DISA",				/* 46 */
+	"FW_STATS",			/* 47 */
+	"WPS_PRBRSP_TMPL",		/* 48 */
+	"BCN_IE_FLT_DELTA",		/* 49 */
+	"TDLS_OFF_CHANNEL",		/* 51 */
+	"RTT3",				/* 52 */
+	"MGMT_FRAME_LOGGING",		/* 53 */
+	"ENHANCED_TXBD_COMPLETION",	/* 54 */
+	"LOGGING_ENHANCEMENT",		/* 55 */
+	"EXT_SCAN_ENHANCED",		/* 56 */
+	"MEMORY_DUMP_SUPPORTED",	/* 57 */
+	"PER_PKT_STATS_SUPPORTED",	/* 58 */
+	"EXT_LL_STAT",			/* 60 */
+	"WIFI_CONFIG",			/* 61 */
+	"ANTENNA_DIVERSITY_SELECTION",	/* 62 */
 };
 
 static const char *wcn36xx_get_cap_name(enum place_holder_in_cap_bitmap x)
@@ -287,6 +325,7 @@
 	}
 
 	wcn36xx_detect_chip_version(wcn);
+	wcn36xx_smd_update_cfg(wcn, WCN36XX_HAL_CFG_ENABLE_MC_ADDR_LIST, 1);
 
 	/* DMA channel initialization */
 	ret = wcn36xx_dxe_init(wcn);
@@ -346,9 +385,7 @@
 		wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n",
 			    ch);
 		list_for_each_entry(tmp, &wcn->vif_list, list) {
-			vif = container_of((void *)tmp,
-					   struct ieee80211_vif,
-					   drv_priv);
+			vif = wcn36xx_priv_to_vif(tmp);
 			wcn36xx_smd_switch_channel(wcn, vif, ch);
 		}
 	}
@@ -356,15 +393,57 @@
 	return 0;
 }
 
-#define WCN36XX_SUPPORTED_FILTERS (0)
-
 static void wcn36xx_configure_filter(struct ieee80211_hw *hw,
 				     unsigned int changed,
 				     unsigned int *total, u64 multicast)
 {
+	struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp;
+	struct wcn36xx *wcn = hw->priv;
+	struct wcn36xx_vif *tmp;
+	struct ieee80211_vif *vif = NULL;
+
 	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac configure filter\n");
 
-	*total &= WCN36XX_SUPPORTED_FILTERS;
+	*total &= FIF_ALLMULTI;
+
+	fp = (void *)(unsigned long)multicast;
+	list_for_each_entry(tmp, &wcn->vif_list, list) {
+		vif = wcn36xx_priv_to_vif(tmp);
+
+		/* FW handles MC filtering only when connected as STA */
+		if (*total & FIF_ALLMULTI)
+			wcn36xx_smd_set_mc_list(wcn, vif, NULL);
+		else if (NL80211_IFTYPE_STATION == vif->type && tmp->sta_assoc)
+			wcn36xx_smd_set_mc_list(wcn, vif, fp);
+	}
+	kfree(fp);
+}
+
+static u64 wcn36xx_prepare_multicast(struct ieee80211_hw *hw,
+				     struct netdev_hw_addr_list *mc_list)
+{
+	struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp;
+	struct netdev_hw_addr *ha;
+
+	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac prepare multicast list\n");
+	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
+	if (!fp) {
+		wcn36xx_err("Out of memory setting filters.\n");
+		return 0;
+	}
+
+	fp->mc_addr_count = 0;
+	/* update multicast filtering parameters */
+	if (netdev_hw_addr_list_count(mc_list) <=
+	    WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS) {
+		netdev_hw_addr_list_for_each(ha, mc_list) {
+			memcpy(fp->mc_addr[fp->mc_addr_count],
+					ha->addr, ETH_ALEN);
+			fp->mc_addr_count++;
+		}
+	}
+
+	return (u64)(unsigned long)fp;
 }
 
 static void wcn36xx_tx(struct ieee80211_hw *hw,
@@ -375,7 +454,7 @@
 	struct wcn36xx_sta *sta_priv = NULL;
 
 	if (control->sta)
-		sta_priv = (struct wcn36xx_sta *)control->sta->drv_priv;
+		sta_priv = wcn36xx_sta_to_priv(control->sta);
 
 	if (wcn36xx_start_tx(wcn, sta_priv, skb))
 		ieee80211_free_txskb(wcn->hw, skb);
@@ -387,8 +466,8 @@
 			   struct ieee80211_key_conf *key_conf)
 {
 	struct wcn36xx *wcn = hw->priv;
-	struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
-	struct wcn36xx_sta *sta_priv = vif_priv->sta;
+	struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+	struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
 	int ret = 0;
 	u8 key[WLAN_MAX_KEY_LEN];
 
@@ -473,6 +552,7 @@
 		break;
 	case DISABLE_KEY:
 		if (!(IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags)) {
+			vif_priv->encrypt_type = WCN36XX_HAL_ED_NONE;
 			wcn36xx_smd_remove_bsskey(wcn,
 				vif_priv->encrypt_type,
 				key_conf->keyidx);
@@ -516,11 +596,11 @@
 }
 
 static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
-					 enum ieee80211_band band)
+					 enum nl80211_band band)
 {
 	int i, size;
 	u16 *rates_table;
-	struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+	struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
 	u32 rates = sta->supp_rates[band];
 
 	memset(&sta_priv->supported_rates, 0,
@@ -529,7 +609,7 @@
 
 	size = ARRAY_SIZE(sta_priv->supported_rates.dsss_rates);
 	rates_table = sta_priv->supported_rates.dsss_rates;
-	if (band == IEEE80211_BAND_2GHZ) {
+	if (band == NL80211_BAND_2GHZ) {
 		for (i = 0; i < size; i++) {
 			if (rates & 0x01) {
 				rates_table[i] = wcn_2ghz_rates[i].hw_value;
@@ -590,7 +670,7 @@
 	struct sk_buff *skb = NULL;
 	u16 tim_off, tim_len;
 	enum wcn36xx_hal_link_state link_state;
-	struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+	struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
 
 	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss info changed vif %p changed 0x%08x\n",
 		    vif, changed);
@@ -620,7 +700,7 @@
 
 		if (!is_zero_ether_addr(bss_conf->bssid)) {
 			vif_priv->is_joining = true;
-			vif_priv->bss_index = 0xff;
+			vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX;
 			wcn36xx_smd_join(wcn, bss_conf->bssid,
 					 vif->addr, WCN36XX_HW_CHANNEL(wcn));
 			wcn36xx_smd_config_bss(wcn, vif, NULL,
@@ -628,6 +708,7 @@
 		} else {
 			vif_priv->is_joining = false;
 			wcn36xx_smd_delete_bss(wcn, vif);
+			vif_priv->encrypt_type = WCN36XX_HAL_ED_NONE;
 		}
 	}
 
@@ -655,6 +736,7 @@
 				     vif->addr,
 				     bss_conf->aid);
 
+			vif_priv->sta_assoc = true;
 			rcu_read_lock();
 			sta = ieee80211_find_sta(vif, bss_conf->bssid);
 			if (!sta) {
@@ -663,7 +745,7 @@
 				rcu_read_unlock();
 				goto out;
 			}
-			sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+			sta_priv = wcn36xx_sta_to_priv(sta);
 
 			wcn36xx_update_allowed_rates(sta, WCN36XX_BAND(wcn));
 
@@ -686,6 +768,7 @@
 				    bss_conf->bssid,
 				    vif->addr,
 				    bss_conf->aid);
+			vif_priv->sta_assoc = false;
 			wcn36xx_smd_set_link_st(wcn,
 						bss_conf->bssid,
 						vif->addr,
@@ -713,7 +796,7 @@
 
 		if (bss_conf->enable_beacon) {
 			vif_priv->dtim_period = bss_conf->dtim_period;
-			vif_priv->bss_index = 0xff;
+			vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX;
 			wcn36xx_smd_config_bss(wcn, vif, NULL,
 					       vif->addr, false);
 			skb = ieee80211_beacon_get_tim(hw, vif, &tim_off,
@@ -734,9 +817,9 @@
 			wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr,
 						link_state);
 		} else {
+			wcn36xx_smd_delete_bss(wcn, vif);
 			wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr,
 						WCN36XX_HAL_LINK_IDLE_STATE);
-			wcn36xx_smd_delete_bss(wcn, vif);
 		}
 	}
 out:
@@ -757,7 +840,7 @@
 				     struct ieee80211_vif *vif)
 {
 	struct wcn36xx *wcn = hw->priv;
-	struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+	struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
 	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac remove interface vif %p\n", vif);
 
 	list_del(&vif_priv->list);
@@ -768,7 +851,7 @@
 				 struct ieee80211_vif *vif)
 {
 	struct wcn36xx *wcn = hw->priv;
-	struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+	struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
 
 	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac add interface vif %p type %d\n",
 		    vif, vif->type);
@@ -792,13 +875,12 @@
 			   struct ieee80211_sta *sta)
 {
 	struct wcn36xx *wcn = hw->priv;
-	struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
-	struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+	struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+	struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
 	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta add vif %p sta %pM\n",
 		    vif, sta->addr);
 
 	spin_lock_init(&sta_priv->ampdu_lock);
-	vif_priv->sta = sta_priv;
 	sta_priv->vif = vif_priv;
 	/*
 	 * For STA mode HW will be configured on BSS_CHANGED_ASSOC because
@@ -817,14 +899,12 @@
 			      struct ieee80211_sta *sta)
 {
 	struct wcn36xx *wcn = hw->priv;
-	struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
-	struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+	struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
 
 	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta remove vif %p sta %pM index %d\n",
 		    vif, sta->addr, sta_priv->sta_index);
 
 	wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index);
-	vif_priv->sta = NULL;
 	sta_priv->vif = NULL;
 	return 0;
 }
@@ -860,7 +940,7 @@
 		    struct ieee80211_ampdu_params *params)
 {
 	struct wcn36xx *wcn = hw->priv;
-	struct wcn36xx_sta *sta_priv = NULL;
+	struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(params->sta);
 	struct ieee80211_sta *sta = params->sta;
 	enum ieee80211_ampdu_mlme_action action = params->action;
 	u16 tid = params->tid;
@@ -869,8 +949,6 @@
 	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n",
 		    action, tid);
 
-	sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
-
 	switch (action) {
 	case IEEE80211_AMPDU_RX_START:
 		sta_priv->tid = tid;
@@ -923,6 +1001,7 @@
 	.resume			= wcn36xx_resume,
 #endif
 	.config			= wcn36xx_config,
+	.prepare_multicast	= wcn36xx_prepare_multicast,
 	.configure_filter       = wcn36xx_configure_filter,
 	.tx			= wcn36xx_tx,
 	.set_key		= wcn36xx_set_key,
@@ -958,8 +1037,8 @@
 		BIT(NL80211_IFTYPE_ADHOC) |
 		BIT(NL80211_IFTYPE_MESH_POINT);
 
-	wcn->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wcn_band_2ghz;
-	wcn->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wcn_band_5ghz;
+	wcn->hw->wiphy->bands[NL80211_BAND_2GHZ] = &wcn_band_2ghz;
+	wcn->hw->wiphy->bands[NL80211_BAND_5GHZ] = &wcn_band_5ghz;
 
 	wcn->hw->wiphy->cipher_suites = cipher_suites;
 	wcn->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c
index 28b515c..589fe5f 100644
--- a/drivers/net/wireless/ath/wcn36xx/pmc.c
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.c
@@ -22,7 +22,7 @@
 				 struct ieee80211_vif *vif)
 {
 	int ret = 0;
-	struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+	struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
 	/* TODO: Make sure the TX chain clean */
 	ret = wcn36xx_smd_enter_bmps(wcn, vif);
 	if (!ret) {
@@ -42,7 +42,7 @@
 int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
 				struct ieee80211_vif *vif)
 {
-	struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+	struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
 
 	if (WCN36XX_BMPS != vif_priv->pw_state) {
 		wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n");
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 74f56a8..e8b630c 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -104,11 +104,11 @@
 		struct ieee80211_sta *sta,
 		struct wcn36xx_hal_config_bss_params *bss_params)
 {
-	if (IEEE80211_BAND_5GHZ == WCN36XX_BAND(wcn))
+	if (NL80211_BAND_5GHZ == WCN36XX_BAND(wcn))
 		bss_params->nw_type = WCN36XX_HAL_11A_NW_TYPE;
 	else if (sta && sta->ht_cap.ht_supported)
 		bss_params->nw_type = WCN36XX_HAL_11N_NW_TYPE;
-	else if (sta && (sta->supp_rates[IEEE80211_BAND_2GHZ] & 0x7f))
+	else if (sta && (sta->supp_rates[NL80211_BAND_2GHZ] & 0x7f))
 		bss_params->nw_type = WCN36XX_HAL_11G_NW_TYPE;
 	else
 		bss_params->nw_type = WCN36XX_HAL_11B_NW_TYPE;
@@ -191,16 +191,16 @@
 		struct ieee80211_sta *sta,
 		struct wcn36xx_hal_config_sta_params *sta_params)
 {
-	struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
-	struct wcn36xx_sta *priv_sta = NULL;
+	struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+	struct wcn36xx_sta *sta_priv = NULL;
 	if (vif->type == NL80211_IFTYPE_ADHOC ||
 	    vif->type == NL80211_IFTYPE_AP ||
 	    vif->type == NL80211_IFTYPE_MESH_POINT) {
 		sta_params->type = 1;
-		sta_params->sta_index = 0xFF;
+		sta_params->sta_index = WCN36XX_HAL_STA_INVALID_IDX;
 	} else {
 		sta_params->type = 0;
-		sta_params->sta_index = 1;
+		sta_params->sta_index = vif_priv->self_sta_index;
 	}
 
 	sta_params->listen_interval = WCN36XX_LISTEN_INTERVAL(wcn);
@@ -215,7 +215,7 @@
 	else
 		memcpy(&sta_params->bssid, vif->addr, ETH_ALEN);
 
-	sta_params->encrypt_type = priv_vif->encrypt_type;
+	sta_params->encrypt_type = vif_priv->encrypt_type;
 	sta_params->short_preamble_supported = true;
 
 	sta_params->rifs_mode = 0;
@@ -224,21 +224,21 @@
 	sta_params->uapsd = 0;
 	sta_params->mimo_ps = WCN36XX_HAL_HT_MIMO_PS_STATIC;
 	sta_params->max_ampdu_duration = 0;
-	sta_params->bssid_index = priv_vif->bss_index;
+	sta_params->bssid_index = vif_priv->bss_index;
 	sta_params->p2p = 0;
 
 	if (sta) {
-		priv_sta = (struct wcn36xx_sta *)sta->drv_priv;
+		sta_priv = wcn36xx_sta_to_priv(sta);
 		if (NL80211_IFTYPE_STATION == vif->type)
 			memcpy(&sta_params->bssid, sta->addr, ETH_ALEN);
 		else
 			memcpy(&sta_params->mac, sta->addr, ETH_ALEN);
 		sta_params->wmm_enabled = sta->wme;
 		sta_params->max_sp_len = sta->max_sp;
-		sta_params->aid = priv_sta->aid;
+		sta_params->aid = sta_priv->aid;
 		wcn36xx_smd_set_sta_ht_params(sta, sta_params);
-		memcpy(&sta_params->supported_rates, &priv_sta->supported_rates,
-			sizeof(priv_sta->supported_rates));
+		memcpy(&sta_params->supported_rates, &sta_priv->supported_rates,
+			sizeof(sta_priv->supported_rates));
 	} else {
 		wcn36xx_set_default_rates(&sta_params->supported_rates);
 		wcn36xx_smd_set_sta_default_ht_params(sta_params);
@@ -271,6 +271,16 @@
 	return ret;
 }
 
+static void init_hal_msg(struct wcn36xx_hal_msg_header *hdr,
+			 enum wcn36xx_hal_host_msg_type msg_type,
+			 size_t msg_size)
+{
+	memset(hdr, 0, msg_size + sizeof(*hdr));
+	hdr->msg_type = msg_type;
+	hdr->msg_version = WCN36XX_HAL_MSG_VERSION0;
+	hdr->len = msg_size + sizeof(*hdr);
+}
+
 #define INIT_HAL_MSG(msg_body, type) \
 	do {								\
 		memset(&msg_body, 0, sizeof(msg_body));			\
@@ -302,22 +312,6 @@
 	return 0;
 }
 
-static int wcn36xx_smd_rsp_status_check_v2(struct wcn36xx *wcn, void *buf,
-					     size_t len)
-{
-	struct wcn36xx_fw_msg_status_rsp_v2 *rsp;
-
-	if (len < sizeof(struct wcn36xx_hal_msg_header) + sizeof(*rsp))
-		return wcn36xx_smd_rsp_status_check(buf, len);
-
-	rsp = buf + sizeof(struct wcn36xx_hal_msg_header);
-
-	if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status)
-		return rsp->status;
-
-	return 0;
-}
-
 int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
 {
 	struct nv_data *nv_d;
@@ -726,7 +720,7 @@
 					size_t len)
 {
 	struct wcn36xx_hal_add_sta_self_rsp_msg *rsp;
-	struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+	struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
 
 	if (len < sizeof(*rsp))
 		return -EINVAL;
@@ -743,8 +737,8 @@
 		    "hal add sta self status %d self_sta_index %d dpu_index %d\n",
 		    rsp->status, rsp->self_sta_index, rsp->dpu_index);
 
-	priv_vif->self_sta_index = rsp->self_sta_index;
-	priv_vif->self_dpu_desc_index = rsp->dpu_index;
+	vif_priv->self_sta_index = rsp->self_sta_index;
+	vif_priv->self_dpu_desc_index = rsp->dpu_index;
 
 	return 0;
 }
@@ -949,17 +943,32 @@
 	memcpy(&v1->mac, orig->mac, ETH_ALEN);
 	v1->aid = orig->aid;
 	v1->type = orig->type;
+	v1->short_preamble_supported = orig->short_preamble_supported;
 	v1->listen_interval = orig->listen_interval;
+	v1->wmm_enabled = orig->wmm_enabled;
 	v1->ht_capable = orig->ht_capable;
-
+	v1->tx_channel_width_set = orig->tx_channel_width_set;
+	v1->rifs_mode = orig->rifs_mode;
+	v1->lsig_txop_protection = orig->lsig_txop_protection;
 	v1->max_ampdu_size = orig->max_ampdu_size;
 	v1->max_ampdu_density = orig->max_ampdu_density;
 	v1->sgi_40mhz = orig->sgi_40mhz;
 	v1->sgi_20Mhz = orig->sgi_20Mhz;
-
+	v1->rmf = orig->rmf;
+	v1->encrypt_type = orig->encrypt_type;
+	v1->action = orig->action;
+	v1->uapsd = orig->uapsd;
+	v1->max_sp_len = orig->max_sp_len;
+	v1->green_field_capable = orig->green_field_capable;
+	v1->mimo_ps = orig->mimo_ps;
+	v1->delayed_ba_support = orig->delayed_ba_support;
+	v1->max_ampdu_duration = orig->max_ampdu_duration;
+	v1->dsss_cck_mode_40mhz = orig->dsss_cck_mode_40mhz;
 	memcpy(&v1->supported_rates, &orig->supported_rates,
 	       sizeof(orig->supported_rates));
 	v1->sta_index = orig->sta_index;
+	v1->bssid_index = orig->bssid_index;
+	v1->p2p = orig->p2p;
 }
 
 static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
@@ -969,7 +978,7 @@
 {
 	struct wcn36xx_hal_config_sta_rsp_msg *rsp;
 	struct config_sta_rsp_params *params;
-	struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+	struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
 
 	if (len < sizeof(*rsp))
 		return -EINVAL;
@@ -1170,12 +1179,13 @@
 
 static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
 				      struct ieee80211_vif *vif,
+				      struct ieee80211_sta *sta,
 				      void *buf,
 				      size_t len)
 {
 	struct wcn36xx_hal_config_bss_rsp_msg *rsp;
 	struct wcn36xx_hal_config_bss_rsp_params *params;
-	struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+	struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
 
 	if (len < sizeof(*rsp))
 		return -EINVAL;
@@ -1198,14 +1208,15 @@
 		    params->bss_bcast_sta_idx, params->mac,
 		    params->tx_mgmt_power, params->ucast_dpu_signature);
 
-	priv_vif->bss_index = params->bss_index;
+	vif_priv->bss_index = params->bss_index;
 
-	if (priv_vif->sta) {
-		priv_vif->sta->bss_sta_index =  params->bss_sta_index;
-		priv_vif->sta->bss_dpu_desc_index = params->dpu_desc_index;
+	if (sta) {
+		struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
+		sta_priv->bss_sta_index = params->bss_sta_index;
+		sta_priv->bss_dpu_desc_index = params->dpu_desc_index;
 	}
 
-	priv_vif->self_ucast_dpu_sign = params->ucast_dpu_signature;
+	vif_priv->self_ucast_dpu_sign = params->ucast_dpu_signature;
 
 	return 0;
 }
@@ -1217,7 +1228,7 @@
 	struct wcn36xx_hal_config_bss_req_msg msg;
 	struct wcn36xx_hal_config_bss_params *bss;
 	struct wcn36xx_hal_config_sta_params *sta_params;
-	struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+	struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
 	int ret = 0;
 
 	mutex_lock(&wcn->hal_mutex);
@@ -1329,6 +1340,7 @@
 	}
 	ret = wcn36xx_smd_config_bss_rsp(wcn,
 					 vif,
+					 sta,
 					 wcn->hal_buf,
 					 wcn->hal_rsp_len);
 	if (ret) {
@@ -1343,13 +1355,13 @@
 int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif)
 {
 	struct wcn36xx_hal_delete_bss_req_msg msg_body;
-	struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+	struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
 	int ret = 0;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_BSS_REQ);
 
-	msg_body.bss_index = priv_vif->bss_index;
+	msg_body.bss_index = vif_priv->bss_index;
 
 	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
 
@@ -1375,26 +1387,47 @@
 			    u16 p2p_off)
 {
 	struct wcn36xx_hal_send_beacon_req_msg msg_body;
-	int ret = 0;
+	int ret = 0, pad, pvm_len;
 
 	mutex_lock(&wcn->hal_mutex);
 	INIT_HAL_MSG(msg_body, WCN36XX_HAL_SEND_BEACON_REQ);
 
-	/* TODO need to find out why this is needed? */
-	msg_body.beacon_length = skb_beacon->len + 6;
+	pvm_len = skb_beacon->data[tim_off + 1] - 3;
+	pad = TIM_MIN_PVM_SIZE - pvm_len;
 
-	if (BEACON_TEMPLATE_SIZE > msg_body.beacon_length) {
-		memcpy(&msg_body.beacon, &skb_beacon->len, sizeof(u32));
-		memcpy(&(msg_body.beacon[4]), skb_beacon->data,
-		       skb_beacon->len);
-	} else {
+	/* Padding is irrelevant to mesh mode since tim_off is always 0. */
+	if (vif->type == NL80211_IFTYPE_MESH_POINT)
+		pad = 0;
+
+	msg_body.beacon_length = skb_beacon->len + pad;
+	/* TODO need to find out why + 6 is needed */
+	msg_body.beacon_length6 = msg_body.beacon_length + 6;
+
+	if (msg_body.beacon_length > BEACON_TEMPLATE_SIZE) {
 		wcn36xx_err("Beacon is to big: beacon size=%d\n",
 			      msg_body.beacon_length);
 		ret = -ENOMEM;
 		goto out;
 	}
+	memcpy(msg_body.beacon, skb_beacon->data, skb_beacon->len);
 	memcpy(msg_body.bssid, vif->addr, ETH_ALEN);
 
+	if (pad > 0) {
+		/*
+		 * The wcn36xx FW has a fixed size for the PVM in the TIM. If
+		 * given the beacon template from mac80211 with a PVM shorter
+		 * than the FW expectes it will overwrite the data after the
+		 * TIM.
+		 */
+		wcn36xx_dbg(WCN36XX_DBG_HAL, "Pad TIM PVM. %d bytes at %d\n",
+			    pad, pvm_len);
+		memmove(&msg_body.beacon[tim_off + 5 + pvm_len + pad],
+			&msg_body.beacon[tim_off + 5 + pvm_len],
+			skb_beacon->len - (tim_off + 5 + pvm_len));
+		memset(&msg_body.beacon[tim_off + 5 + pvm_len], 0, pad);
+		msg_body.beacon[tim_off + 1] += pad;
+	}
+
 	/* TODO need to find out why this is needed? */
 	if (vif->type == NL80211_IFTYPE_MESH_POINT)
 		/* mesh beacon don't need this, so push further down */
@@ -1598,8 +1631,7 @@
 		wcn36xx_err("Sending hal_remove_bsskey failed\n");
 		goto out;
 	}
-	ret = wcn36xx_smd_rsp_status_check_v2(wcn, wcn->hal_buf,
-					      wcn->hal_rsp_len);
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
 	if (ret) {
 		wcn36xx_err("hal_remove_bsskey response failed err=%d\n", ret);
 		goto out;
@@ -1612,7 +1644,7 @@
 int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
 {
 	struct wcn36xx_hal_enter_bmps_req_msg msg_body;
-	struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+	struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
 	int ret = 0;
 
 	mutex_lock(&wcn->hal_mutex);
@@ -1641,8 +1673,8 @@
 
 int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
 {
-	struct wcn36xx_hal_enter_bmps_req_msg msg_body;
-	struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+	struct wcn36xx_hal_exit_bmps_req_msg msg_body;
+	struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
 	int ret = 0;
 
 	mutex_lock(&wcn->hal_mutex);
@@ -1703,7 +1735,7 @@
 			       int packet_type)
 {
 	struct wcn36xx_hal_keep_alive_req_msg msg_body;
-	struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+	struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
 	int ret = 0;
 
 	mutex_lock(&wcn->hal_mutex);
@@ -1944,6 +1976,17 @@
 	return ret;
 }
 
+static int wcn36xx_smd_trigger_ba_rsp(void *buf, int len)
+{
+	struct wcn36xx_hal_trigger_ba_rsp_msg *rsp;
+
+	if (len < sizeof(*rsp))
+		return -EINVAL;
+
+	rsp = (struct wcn36xx_hal_trigger_ba_rsp_msg *) buf;
+	return rsp->status;
+}
+
 int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
 {
 	struct wcn36xx_hal_trigger_ba_req_msg msg_body;
@@ -1968,8 +2011,7 @@
 		wcn36xx_err("Sending hal_trigger_ba failed\n");
 		goto out;
 	}
-	ret = wcn36xx_smd_rsp_status_check_v2(wcn, wcn->hal_buf,
-						wcn->hal_rsp_len);
+	ret = wcn36xx_smd_trigger_ba_rsp(wcn->hal_buf, wcn->hal_rsp_len);
 	if (ret) {
 		wcn36xx_err("hal_trigger_ba response failed err=%d\n", ret);
 		goto out;
@@ -2006,9 +2048,7 @@
 		list_for_each_entry(tmp, &wcn->vif_list, list) {
 			wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
 				    tmp->bss_index);
-			vif = container_of((void *)tmp,
-						 struct ieee80211_vif,
-						 drv_priv);
+			vif = wcn36xx_priv_to_vif(tmp);
 			ieee80211_connection_loss(vif);
 		}
 		return 0;
@@ -2023,9 +2063,7 @@
 		if (tmp->bss_index == rsp->bss_index) {
 			wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
 				    rsp->bss_index);
-			vif = container_of((void *)tmp,
-						 struct ieee80211_vif,
-						 drv_priv);
+			vif = wcn36xx_priv_to_vif(tmp);
 			ieee80211_connection_loss(vif);
 			return 0;
 		}
@@ -2041,25 +2079,24 @@
 {
 	struct wcn36xx_hal_delete_sta_context_ind_msg *rsp = buf;
 	struct wcn36xx_vif *tmp;
-	struct ieee80211_sta *sta = NULL;
+	struct ieee80211_sta *sta;
 
 	if (len != sizeof(*rsp)) {
 		wcn36xx_warn("Corrupted delete sta indication\n");
 		return -EIO;
 	}
 
+	wcn36xx_dbg(WCN36XX_DBG_HAL, "delete station indication %pM index %d\n",
+		    rsp->addr2, rsp->sta_id);
+
 	list_for_each_entry(tmp, &wcn->vif_list, list) {
-		if (sta && (tmp->sta->sta_index == rsp->sta_id)) {
-			sta = container_of((void *)tmp->sta,
-						 struct ieee80211_sta,
-						 drv_priv);
-			wcn36xx_dbg(WCN36XX_DBG_HAL,
-				    "delete station indication %pM index %d\n",
-				    rsp->addr2,
-				    rsp->sta_id);
+		rcu_read_lock();
+		sta = ieee80211_find_sta(wcn36xx_priv_to_vif(tmp), rsp->addr2);
+		if (sta)
 			ieee80211_report_low_ack(sta, 0);
+		rcu_read_unlock();
+		if (sta)
 			return 0;
-		}
 	}
 
 	wcn36xx_warn("STA with addr %pM and index %d not found\n",
@@ -2100,6 +2137,46 @@
 	mutex_unlock(&wcn->hal_mutex);
 	return ret;
 }
+
+int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn,
+			    struct ieee80211_vif *vif,
+			    struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp)
+{
+	struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+	struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg *msg_body = NULL;
+	int ret = 0;
+
+	mutex_lock(&wcn->hal_mutex);
+
+	msg_body = (struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg *)
+		   wcn->hal_buf;
+	init_hal_msg(&msg_body->header, WCN36XX_HAL_8023_MULTICAST_LIST_REQ,
+		     sizeof(msg_body->mc_addr_list));
+
+	/* An empty list means all mc traffic will be received */
+	if (fp)
+		memcpy(&msg_body->mc_addr_list, fp,
+		       sizeof(msg_body->mc_addr_list));
+	else
+		msg_body->mc_addr_list.mc_addr_count = 0;
+
+	msg_body->mc_addr_list.bss_index = vif_priv->bss_index;
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body->header.len);
+	if (ret) {
+		wcn36xx_err("Sending HAL_8023_MULTICAST_LIST failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("HAL_8023_MULTICAST_LIST rsp failed err=%d\n", ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
 static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
 {
 	struct wcn36xx_hal_msg_header *msg_header = buf;
@@ -2141,6 +2218,7 @@
 	case WCN36XX_HAL_UPDATE_SCAN_PARAM_RSP:
 	case WCN36XX_HAL_CH_SWITCH_RSP:
 	case WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP:
+	case WCN36XX_HAL_8023_MULTICAST_LIST_RSP:
 		memcpy(wcn->hal_buf, buf, len);
 		wcn->hal_rsp_len = len;
 		complete(&wcn->hal_rsp_compl);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index 8361f9e..d74d781 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -44,15 +44,6 @@
 	u32	status;
 } __packed;
 
-/* wcn3620 returns this for tigger_ba */
-
-struct wcn36xx_fw_msg_status_rsp_v2 {
-	u8	bss_id[6];
-	u32	status __packed;
-	u16	count_following_candidates __packed;
-	/* candidate list follows */
-};
-
 struct wcn36xx_hal_ind_msg {
 	struct list_head list;
 	u8 *msg;
@@ -136,4 +127,7 @@
 int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index);
 
 int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value);
+int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn,
+			    struct ieee80211_vif *vif,
+			    struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp);
 #endif	/* _SMD_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index 9bec823..1f34c2e 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -57,7 +57,7 @@
 		       RX_FLAG_MMIC_STRIPPED |
 		       RX_FLAG_DECRYPTED;
 
-	wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag);
+	wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%llx\n", status.flag);
 
 	memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
 
@@ -102,9 +102,7 @@
 	struct wcn36xx_vif *vif_priv = NULL;
 	struct ieee80211_vif *vif = NULL;
 	list_for_each_entry(vif_priv, &wcn->vif_list, list) {
-			vif = container_of((void *)vif_priv,
-				   struct ieee80211_vif,
-				   drv_priv);
+			vif = wcn36xx_priv_to_vif(vif_priv);
 			if (memcmp(vif->addr, addr, ETH_ALEN) == 0)
 				return vif_priv;
 	}
@@ -167,9 +165,7 @@
 	 */
 	if (sta_priv) {
 		__vif_priv = sta_priv->vif;
-		vif = container_of((void *)__vif_priv,
-				   struct ieee80211_vif,
-				   drv_priv);
+		vif = wcn36xx_priv_to_vif(__vif_priv);
 
 		bd->dpu_sign = sta_priv->ucast_dpu_sign;
 		if (vif->type == NL80211_IFTYPE_STATION) {
@@ -225,7 +221,7 @@
 
 	/* default rate for unicast */
 	if (ieee80211_is_mgmt(hdr->frame_control))
-		bd->bd_rate = (WCN36XX_BAND(wcn) == IEEE80211_BAND_5GHZ) ?
+		bd->bd_rate = (WCN36XX_BAND(wcn) == NL80211_BAND_5GHZ) ?
 			WCN36XX_BD_RATE_CTRL :
 			WCN36XX_BD_RATE_MGMT;
 	else if (ieee80211_is_ctl(hdr->frame_control))
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index 7b41e83..7433d67 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -125,10 +125,10 @@
  */
 struct wcn36xx_vif {
 	struct list_head list;
-	struct wcn36xx_sta *sta;
 	u8 dtim_period;
 	enum ani_ed_type encrypt_type;
 	bool is_joining;
+	bool sta_assoc;
 	struct wcn36xx_hal_mac_ssid ssid;
 
 	/* Power management */
@@ -263,4 +263,22 @@
 	return container_of((void *)sta_priv, struct ieee80211_sta, drv_priv);
 }
 
+static inline
+struct wcn36xx_vif *wcn36xx_vif_to_priv(struct ieee80211_vif *vif)
+{
+	return (struct wcn36xx_vif *) vif->drv_priv;
+}
+
+static inline
+struct ieee80211_vif *wcn36xx_priv_to_vif(struct wcn36xx_vif *vif_priv)
+{
+	return container_of((void *) vif_priv, struct ieee80211_vif, drv_priv);
+}
+
+static inline
+struct wcn36xx_sta *wcn36xx_sta_to_priv(struct ieee80211_sta *sta)
+{
+	return (struct wcn36xx_sta *)sta->drv_priv;
+}
+
 #endif	/* _WCN36XX_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index fdf63d5..11b544b 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -18,6 +18,7 @@
 wil6210-y += wil_platform.o
 wil6210-y += ethtool.o
 wil6210-y += wil_crash_dump.o
+wil6210-y += p2p.o
 
 # for tracing framework to find trace.h
 CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 11f1bb8..0fb3a79 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -18,8 +18,10 @@
 #include "wil6210.h"
 #include "wmi.h"
 
+#define WIL_MAX_ROC_DURATION_MS 5000
+
 #define CHAN60G(_channel, _flags) {				\
-	.band			= IEEE80211_BAND_60GHZ,		\
+	.band			= NL80211_BAND_60GHZ,		\
 	.center_freq		= 56160 + (2160 * (_channel)),	\
 	.hw_value		= (_channel),			\
 	.flags			= (_flags),			\
@@ -76,12 +78,24 @@
 		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
 		BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
 	},
+	[NL80211_IFTYPE_P2P_DEVICE] = {
+		.tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+		BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+	},
 };
 
 static const u32 wil_cipher_suites[] = {
 	WLAN_CIPHER_SUITE_GCMP,
 };
 
+static const char * const key_usage_str[] = {
+	[WMI_KEY_USE_PAIRWISE]	= "PTK",
+	[WMI_KEY_USE_RX_GROUP]	= "RX_GTK",
+	[WMI_KEY_USE_TX_GROUP]	= "TX_GTK",
+};
+
 int wil_iftype_nl2wmi(enum nl80211_iftype type)
 {
 	static const struct {
@@ -113,7 +127,7 @@
 		.interval_usec = 0,
 	};
 	struct {
-		struct wil6210_mbox_hdr_wmi wmi;
+		struct wmi_cmd_hdr wmi;
 		struct wmi_notify_req_done_event evt;
 	} __packed reply;
 	struct wil_net_stats *stats = &wil->sta[cid].stats;
@@ -226,13 +240,82 @@
 	return rc;
 }
 
+static struct wireless_dev *
+wil_cfg80211_add_iface(struct wiphy *wiphy, const char *name,
+		       unsigned char name_assign_type,
+		       enum nl80211_iftype type,
+		       u32 *flags, struct vif_params *params)
+{
+	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+	struct net_device *ndev = wil_to_ndev(wil);
+	struct wireless_dev *p2p_wdev;
+
+	wil_dbg_misc(wil, "%s()\n", __func__);
+
+	if (type != NL80211_IFTYPE_P2P_DEVICE) {
+		wil_err(wil, "%s: unsupported iftype %d\n", __func__, type);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (wil->p2p_wdev) {
+		wil_err(wil, "%s: P2P_DEVICE interface already created\n",
+			__func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	p2p_wdev = kzalloc(sizeof(*p2p_wdev), GFP_KERNEL);
+	if (!p2p_wdev)
+		return ERR_PTR(-ENOMEM);
+
+	p2p_wdev->iftype = type;
+	p2p_wdev->wiphy = wiphy;
+	/* use our primary ethernet address */
+	ether_addr_copy(p2p_wdev->address, ndev->perm_addr);
+
+	wil->p2p_wdev = p2p_wdev;
+
+	return p2p_wdev;
+}
+
+static int wil_cfg80211_del_iface(struct wiphy *wiphy,
+				  struct wireless_dev *wdev)
+{
+	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+	wil_dbg_misc(wil, "%s()\n", __func__);
+
+	if (wdev != wil->p2p_wdev) {
+		wil_err(wil, "%s: delete of incorrect interface 0x%p\n",
+			__func__, wdev);
+		return -EINVAL;
+	}
+
+	wil_p2p_wdev_free(wil);
+
+	return 0;
+}
+
 static int wil_cfg80211_change_iface(struct wiphy *wiphy,
 				     struct net_device *ndev,
 				     enum nl80211_iftype type, u32 *flags,
 				     struct vif_params *params)
 {
 	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
-	struct wireless_dev *wdev = wil->wdev;
+	struct wireless_dev *wdev = wil_to_wdev(wil);
+	int rc;
+
+	wil_dbg_misc(wil, "%s() type=%d\n", __func__, type);
+
+	if (netif_running(wil_to_ndev(wil)) && !wil_is_recovery_blocked(wil)) {
+		wil_dbg_misc(wil, "interface is up. resetting...\n");
+		mutex_lock(&wil->mutex);
+		__wil_down(wil);
+		rc = __wil_up(wil);
+		mutex_unlock(&wil->mutex);
+
+		if (rc)
+			return rc;
+	}
 
 	switch (type) {
 	case NL80211_IFTYPE_STATION:
@@ -260,7 +343,7 @@
 			     struct cfg80211_scan_request *request)
 {
 	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
-	struct wireless_dev *wdev = wil->wdev;
+	struct wireless_dev *wdev = request->wdev;
 	struct {
 		struct wmi_start_scan_cmd cmd;
 		u16 chnl[4];
@@ -268,6 +351,9 @@
 	uint i, n;
 	int rc;
 
+	wil_dbg_misc(wil, "%s(), wdev=0x%p iftype=%d\n",
+		     __func__, wdev, wdev->iftype);
+
 	if (wil->scan_request) {
 		wil_err(wil, "Already scanning\n");
 		return -EAGAIN;
@@ -277,6 +363,7 @@
 	switch (wdev->iftype) {
 	case NL80211_IFTYPE_STATION:
 	case NL80211_IFTYPE_P2P_CLIENT:
+	case NL80211_IFTYPE_P2P_DEVICE:
 		break;
 	default:
 		return -EOPNOTSUPP;
@@ -288,6 +375,20 @@
 		return -EBUSY;
 	}
 
+	/* scan on P2P_DEVICE is handled as p2p search */
+	if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
+		wil->scan_request = request;
+		wil->radio_wdev = wdev;
+		rc = wil_p2p_search(wil, request);
+		if (rc) {
+			wil->radio_wdev = wil_to_wdev(wil);
+			wil->scan_request = NULL;
+		}
+		return rc;
+	}
+
+	(void)wil_p2p_stop_discovery(wil);
+
 	wil_dbg_misc(wil, "Start scan_request 0x%p\n", request);
 	wil_dbg_misc(wil, "SSID count: %d", request->n_ssids);
 
@@ -313,6 +414,7 @@
 	mod_timer(&wil->scan_timer, jiffies + WIL6210_SCAN_TO);
 
 	memset(&cmd, 0, sizeof(cmd));
+	cmd.cmd.scan_type = WMI_ACTIVE_SCAN;
 	cmd.cmd.num_channels = 0;
 	n = min(request->n_channels, 4U);
 	for (i = 0; i < n; i++) {
@@ -340,12 +442,19 @@
 	if (rc)
 		goto out;
 
+	if (wil->discovery_mode && cmd.cmd.scan_type == WMI_ACTIVE_SCAN) {
+		cmd.cmd.discovery_mode = 1;
+		wil_dbg_misc(wil, "active scan with discovery_mode=1\n");
+	}
+
+	wil->radio_wdev = wdev;
 	rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
 			cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
 
 out:
 	if (rc) {
 		del_timer_sync(&wil->scan_timer);
+		wil->radio_wdev = wil_to_wdev(wil);
 		wil->scan_request = NULL;
 	}
 
@@ -390,6 +499,7 @@
 		print_hex_dump(KERN_INFO, "  SSID: ", DUMP_PREFIX_OFFSET,
 			       16, 1, sme->ssid, sme->ssid_len, true);
 	wil_info(wil, "  Privacy: %s\n", sme->privacy ? "secure" : "open");
+	wil_info(wil, "  PBSS: %d\n", sme->pbss);
 	wil_print_crypto(wil, &sme->crypto);
 }
 
@@ -404,7 +514,9 @@
 	const u8 *rsn_eid;
 	int ch;
 	int rc = 0;
+	enum ieee80211_bss_type bss_type = IEEE80211_BSS_TYPE_ESS;
 
+	wil_dbg_misc(wil, "%s()\n", __func__);
 	wil_print_connect_params(wil, sme);
 
 	if (test_bit(wil_status_fwconnecting, wil->status) ||
@@ -422,14 +534,12 @@
 	if (sme->privacy && !rsn_eid)
 		wil_info(wil, "WSC connection\n");
 
-	if (sme->pbss) {
-		wil_err(wil, "connect - PBSS not yet supported\n");
-		return -EOPNOTSUPP;
-	}
+	if (sme->pbss)
+		bss_type = IEEE80211_BSS_TYPE_PBSS;
 
 	bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
 			       sme->ssid, sme->ssid_len,
-			       IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY);
+			       bss_type, IEEE80211_PRIVACY_ANY);
 	if (!bss) {
 		wil_err(wil, "Unable to find BSS\n");
 		return -ENOENT;
@@ -568,10 +678,20 @@
 	struct ieee80211_mgmt *mgmt_frame = (void *)buf;
 	struct wmi_sw_tx_req_cmd *cmd;
 	struct {
-		struct wil6210_mbox_hdr_wmi wmi;
+		struct wmi_cmd_hdr wmi;
 		struct wmi_sw_tx_complete_event evt;
 	} __packed evt;
 
+	/* Note, currently we do not support the "wait" parameter, user-space
+	 * must call remain_on_channel before mgmt_tx or listen on a channel
+	 * another way (AP/PCP or connected station)
+	 * in addition we need to check if specified "chan" argument is
+	 * different from currently "listened" channel and fail if it is.
+	 */
+
+	wil_dbg_misc(wil, "%s()\n", __func__);
+	print_hex_dump_bytes("mgmt tx frame ", DUMP_PREFIX_OFFSET, buf, len);
+
 	cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
 	if (!cmd) {
 		rc = -ENOMEM;
@@ -598,7 +718,7 @@
 				    struct cfg80211_chan_def *chandef)
 {
 	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
-	struct wireless_dev *wdev = wil->wdev;
+	struct wireless_dev *wdev = wil_to_wdev(wil);
 
 	wdev->preset_chandef = *chandef;
 
@@ -608,22 +728,19 @@
 static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil,
 					       bool pairwise)
 {
-	struct wireless_dev *wdev = wil->wdev;
+	struct wireless_dev *wdev = wil_to_wdev(wil);
 	enum wmi_key_usage rc;
-	static const char * const key_usage_str[] = {
-		[WMI_KEY_USE_PAIRWISE]	= "WMI_KEY_USE_PAIRWISE",
-		[WMI_KEY_USE_RX_GROUP]	= "WMI_KEY_USE_RX_GROUP",
-		[WMI_KEY_USE_TX_GROUP]	= "WMI_KEY_USE_TX_GROUP",
-	};
 
 	if (pairwise) {
 		rc = WMI_KEY_USE_PAIRWISE;
 	} else {
 		switch (wdev->iftype) {
 		case NL80211_IFTYPE_STATION:
+		case NL80211_IFTYPE_P2P_CLIENT:
 			rc = WMI_KEY_USE_RX_GROUP;
 			break;
 		case NL80211_IFTYPE_AP:
+		case NL80211_IFTYPE_P2P_GO:
 			rc = WMI_KEY_USE_TX_GROUP;
 			break;
 		default:
@@ -638,20 +755,86 @@
 	return rc;
 }
 
+static struct wil_tid_crypto_rx_single *
+wil_find_crypto_ctx(struct wil6210_priv *wil, u8 key_index,
+		    enum wmi_key_usage key_usage, const u8 *mac_addr)
+{
+	int cid = -EINVAL;
+	int tid = 0;
+	struct wil_sta_info *s;
+	struct wil_tid_crypto_rx *c;
+
+	if (key_usage == WMI_KEY_USE_TX_GROUP)
+		return NULL; /* not needed */
+
+	/* supplicant provides Rx group key in STA mode with NULL MAC address */
+	if (mac_addr)
+		cid = wil_find_cid(wil, mac_addr);
+	else if (key_usage == WMI_KEY_USE_RX_GROUP)
+		cid = wil_find_cid_by_idx(wil, 0);
+	if (cid < 0) {
+		wil_err(wil, "No CID for %pM %s[%d]\n", mac_addr,
+			key_usage_str[key_usage], key_index);
+		return ERR_PTR(cid);
+	}
+
+	s = &wil->sta[cid];
+	if (key_usage == WMI_KEY_USE_PAIRWISE)
+		c = &s->tid_crypto_rx[tid];
+	else
+		c = &s->group_crypto_rx;
+
+	return &c->key_id[key_index];
+}
+
 static int wil_cfg80211_add_key(struct wiphy *wiphy,
 				struct net_device *ndev,
 				u8 key_index, bool pairwise,
 				const u8 *mac_addr,
 				struct key_params *params)
 {
+	int rc;
 	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 	enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
+	struct wil_tid_crypto_rx_single *cc = wil_find_crypto_ctx(wil,
+								  key_index,
+								  key_usage,
+								  mac_addr);
 
-	wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index,
-		     pairwise ? "PTK" : "GTK");
+	wil_dbg_misc(wil, "%s(%pM %s[%d] PN %*phN)\n", __func__,
+		     mac_addr, key_usage_str[key_usage], key_index,
+		     params->seq_len, params->seq);
 
-	return wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len,
-				  params->key, key_usage);
+	if (IS_ERR(cc)) {
+		wil_err(wil, "Not connected, %s(%pM %s[%d] PN %*phN)\n",
+			__func__, mac_addr, key_usage_str[key_usage], key_index,
+			params->seq_len, params->seq);
+		return -EINVAL;
+	}
+
+	if (cc)
+		cc->key_set = false;
+
+	if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) {
+		wil_err(wil,
+			"Wrong PN len %d, %s(%pM %s[%d] PN %*phN)\n",
+			params->seq_len, __func__, mac_addr,
+			key_usage_str[key_usage], key_index,
+			params->seq_len, params->seq);
+		return -EINVAL;
+	}
+
+	rc = wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len,
+				params->key, key_usage);
+	if ((rc == 0) && cc) {
+		if (params->seq)
+			memcpy(cc->pn, params->seq, IEEE80211_GCMP_PN_LEN);
+		else
+			memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
+		cc->key_set = true;
+	}
+
+	return rc;
 }
 
 static int wil_cfg80211_del_key(struct wiphy *wiphy,
@@ -661,9 +844,20 @@
 {
 	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 	enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
+	struct wil_tid_crypto_rx_single *cc = wil_find_crypto_ctx(wil,
+								  key_index,
+								  key_usage,
+								  mac_addr);
 
-	wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index,
-		     pairwise ? "PTK" : "GTK");
+	wil_dbg_misc(wil, "%s(%pM %s[%d])\n", __func__, mac_addr,
+		     key_usage_str[key_usage], key_index);
+
+	if (IS_ERR(cc))
+		wil_info(wil, "Not connected, %s(%pM %s[%d])\n", __func__,
+			 mac_addr, key_usage_str[key_usage], key_index);
+
+	if (!IS_ERR_OR_NULL(cc))
+		cc->key_set = false;
 
 	return wmi_del_cipher_key(wil, key_index, mac_addr, key_usage);
 }
@@ -674,6 +868,9 @@
 					u8 key_index, bool unicast,
 					bool multicast)
 {
+	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+	wil_dbg_misc(wil, "%s: entered\n", __func__);
 	return 0;
 }
 
@@ -686,16 +883,19 @@
 	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 	int rc;
 
-	/* TODO: handle duration */
-	wil_info(wil, "%s(%d, %d ms)\n", __func__, chan->center_freq, duration);
+	wil_dbg_misc(wil, "%s() center_freq=%d, duration=%d iftype=%d\n",
+		     __func__, chan->center_freq, duration, wdev->iftype);
 
-	rc = wmi_set_channel(wil, chan->hw_value);
+	rc = wil_p2p_listen(wil, duration, chan, cookie);
 	if (rc)
 		return rc;
 
-	rc = wmi_rxon(wil, true);
+	wil->radio_wdev = wdev;
 
-	return rc;
+	cfg80211_ready_on_channel(wdev, *cookie, chan, duration,
+				  GFP_KERNEL);
+
+	return 0;
 }
 
 static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
@@ -703,13 +903,10 @@
 					u64 cookie)
 {
 	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
-	int rc;
 
-	wil_info(wil, "%s()\n", __func__);
+	wil_dbg_misc(wil, "%s()\n", __func__);
 
-	rc = wmi_rxon(wil, false);
-
-	return rc;
+	return wil_p2p_cancel_listen(wil, cookie);
 }
 
 /**
@@ -852,12 +1049,22 @@
 				  const u8 *ssid, size_t ssid_len, u32 privacy,
 				  int bi, u8 chan,
 				  struct cfg80211_beacon_data *bcon,
-				  u8 hidden_ssid)
+				  u8 hidden_ssid, u32 pbss)
 {
 	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 	int rc;
 	struct wireless_dev *wdev = ndev->ieee80211_ptr;
 	u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+	u8 is_go = (wdev->iftype == NL80211_IFTYPE_P2P_GO);
+
+	if (pbss)
+		wmi_nettype = WMI_NETTYPE_P2P;
+
+	wil_dbg_misc(wil, "%s: is_go=%d\n", __func__, is_go);
+	if (is_go && !pbss) {
+		wil_err(wil, "%s: P2P GO must be in PBSS\n", __func__);
+		return -ENOTSUPP;
+	}
 
 	wil_set_recovery_state(wil, fw_recovery_idle);
 
@@ -879,10 +1086,11 @@
 	wil->privacy = privacy;
 	wil->channel = chan;
 	wil->hidden_ssid = hidden_ssid;
+	wil->pbss = pbss;
 
 	netif_carrier_on(ndev);
 
-	rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid);
+	rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid, is_go);
 	if (rc)
 		goto err_pcp_start;
 
@@ -928,7 +1136,8 @@
 					    wdev->ssid_len, privacy,
 					    wdev->beacon_interval,
 					    wil->channel, bcon,
-					    wil->hidden_ssid);
+					    wil->hidden_ssid,
+					    wil->pbss);
 	} else {
 		rc = _wil_cfg80211_set_ies(wiphy, bcon);
 	}
@@ -954,11 +1163,6 @@
 		return -EINVAL;
 	}
 
-	if (info->pbss) {
-		wil_err(wil, "AP: PBSS not yet supported\n");
-		return -EOPNOTSUPP;
-	}
-
 	switch (info->hidden_ssid) {
 	case NL80211_HIDDEN_SSID_NOT_IN_USE:
 		hidden_ssid = WMI_HIDDEN_SSID_DISABLED;
@@ -984,6 +1188,7 @@
 		     info->hidden_ssid);
 	wil_dbg_misc(wil, "BI %d DTIM %d\n", info->beacon_interval,
 		     info->dtim_period);
+	wil_dbg_misc(wil, "PBSS %d\n", info->pbss);
 	print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
 			     info->ssid, info->ssid_len);
 	wil_print_bcon_data(bcon);
@@ -992,7 +1197,7 @@
 	rc = _wil_cfg80211_start_ap(wiphy, ndev,
 				    info->ssid, info->ssid_len, info->privacy,
 				    info->beacon_interval, channel->hw_value,
-				    bcon, hidden_ssid);
+				    bcon, hidden_ssid, info->pbss);
 
 	return rc;
 }
@@ -1139,7 +1344,26 @@
 	return 0;
 }
 
+static int wil_cfg80211_start_p2p_device(struct wiphy *wiphy,
+					 struct wireless_dev *wdev)
+{
+	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+	wil_dbg_misc(wil, "%s: entered\n", __func__);
+	return 0;
+}
+
+static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy,
+					 struct wireless_dev *wdev)
+{
+	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+	wil_dbg_misc(wil, "%s: entered\n", __func__);
+}
+
 static struct cfg80211_ops wil_cfg80211_ops = {
+	.add_virtual_intf = wil_cfg80211_add_iface,
+	.del_virtual_intf = wil_cfg80211_del_iface,
 	.scan = wil_cfg80211_scan,
 	.connect = wil_cfg80211_connect,
 	.disconnect = wil_cfg80211_disconnect,
@@ -1160,20 +1384,25 @@
 	.del_station = wil_cfg80211_del_station,
 	.probe_client = wil_cfg80211_probe_client,
 	.change_bss = wil_cfg80211_change_bss,
+	/* P2P device */
+	.start_p2p_device = wil_cfg80211_start_p2p_device,
+	.stop_p2p_device = wil_cfg80211_stop_p2p_device,
 };
 
 static void wil_wiphy_init(struct wiphy *wiphy)
 {
 	wiphy->max_scan_ssids = 1;
 	wiphy->max_scan_ie_len = WMI_MAX_IE_LEN;
+	wiphy->max_remain_on_channel_duration = WIL_MAX_ROC_DURATION_MS;
 	wiphy->max_num_pmkids = 0 /* TODO: */;
 	wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
 				 BIT(NL80211_IFTYPE_AP) |
+				 BIT(NL80211_IFTYPE_P2P_CLIENT) |
+				 BIT(NL80211_IFTYPE_P2P_GO) |
+				 BIT(NL80211_IFTYPE_P2P_DEVICE) |
 				 BIT(NL80211_IFTYPE_MONITOR);
-	/* TODO: enable P2P when integrated with supplicant:
-	 * BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO)
-	 */
 	wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
+			WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
 			WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
 	dev_dbg(wiphy_dev(wiphy), "%s : flags = 0x%08x\n",
 		__func__, wiphy->flags);
@@ -1182,7 +1411,7 @@
 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
 
-	wiphy->bands[IEEE80211_BAND_60GHZ] = &wil_band_60ghz;
+	wiphy->bands[NL80211_BAND_60GHZ] = &wil_band_60ghz;
 
 	/* TODO: figure this out */
 	wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
@@ -1241,3 +1470,18 @@
 	wiphy_free(wdev->wiphy);
 	kfree(wdev);
 }
+
+void wil_p2p_wdev_free(struct wil6210_priv *wil)
+{
+	struct wireless_dev *p2p_wdev;
+
+	mutex_lock(&wil->p2p_wdev_mutex);
+	p2p_wdev = wil->p2p_wdev;
+	if (p2p_wdev) {
+		wil->p2p_wdev = NULL;
+		wil->radio_wdev = wil_to_wdev(wil);
+		cfg80211_unregister_wdev(p2p_wdev);
+		kfree(p2p_wdev);
+	}
+	mutex_unlock(&wil->p2p_wdev_mutex);
+}
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 3bbe73b..b338a09 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -37,6 +37,7 @@
 	doff_x32 = 1,
 	doff_ulong = 2,
 	doff_io32 = 3,
+	doff_u8 = 4
 };
 
 /* offset to "wil" */
@@ -346,6 +347,10 @@
 							 tbl[i].mode, dbg,
 							 base + tbl[i].off);
 			break;
+		case doff_u8:
+			f = debugfs_create_u8(tbl[i].name, tbl[i].mode, dbg,
+					      base + tbl[i].off);
+			break;
 		default:
 			f = ERR_PTR(-EINVAL);
 		}
@@ -821,13 +826,13 @@
 				  size_t len, loff_t *ppos)
 {
 	struct wil6210_priv *wil = file->private_data;
-	struct wil6210_mbox_hdr_wmi *wmi;
+	struct wmi_cmd_hdr *wmi;
 	void *cmd;
-	int cmdlen = len - sizeof(struct wil6210_mbox_hdr_wmi);
+	int cmdlen = len - sizeof(struct wmi_cmd_hdr);
 	u16 cmdid;
 	int rc, rc1;
 
-	if (cmdlen <= 0)
+	if (cmdlen < 0)
 		return -EINVAL;
 
 	wmi = kmalloc(len, GFP_KERNEL);
@@ -840,8 +845,8 @@
 		return rc;
 	}
 
-	cmd = &wmi[1];
-	cmdid = le16_to_cpu(wmi->id);
+	cmd = (cmdlen > 0) ? &wmi[1] : NULL;
+	cmdid = le16_to_cpu(wmi->command_id);
 
 	rc1 = wmi_send(wil, cmdid, cmd, cmdlen);
 	kfree(wmi);
@@ -985,7 +990,7 @@
 		.interval_usec = 0,
 	};
 	struct {
-		struct wil6210_mbox_hdr_wmi wmi;
+		struct wmi_cmd_hdr wmi;
 		struct wmi_notify_req_done_event evt;
 	} __packed reply;
 
@@ -1333,6 +1338,34 @@
 		   r->ssn_last_drop);
 }
 
+static void wil_print_rxtid_crypto(struct seq_file *s, int tid,
+				   struct wil_tid_crypto_rx *c)
+{
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		struct wil_tid_crypto_rx_single *cc = &c->key_id[i];
+
+		if (cc->key_set)
+			goto has_keys;
+	}
+	return;
+
+has_keys:
+	if (tid < WIL_STA_TID_NUM)
+		seq_printf(s, "  [%2d] PN", tid);
+	else
+		seq_puts(s, "  [GR] PN");
+
+	for (i = 0; i < 4; i++) {
+		struct wil_tid_crypto_rx_single *cc = &c->key_id[i];
+
+		seq_printf(s, " [%i%s]%6phN", i, cc->key_set ? "+" : "-",
+			   cc->pn);
+	}
+	seq_puts(s, "\n");
+}
+
 static int wil_sta_debugfs_show(struct seq_file *s, void *data)
 __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
 {
@@ -1360,18 +1393,25 @@
 			spin_lock_bh(&p->tid_rx_lock);
 			for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
 				struct wil_tid_ampdu_rx *r = p->tid_rx[tid];
+				struct wil_tid_crypto_rx *c =
+						&p->tid_crypto_rx[tid];
 
 				if (r) {
-					seq_printf(s, "[%2d] ", tid);
+					seq_printf(s, "  [%2d] ", tid);
 					wil_print_rxtid(s, r);
 				}
+
+				wil_print_rxtid_crypto(s, tid, c);
 			}
+			wil_print_rxtid_crypto(s, WIL_STA_TID_NUM,
+					       &p->group_crypto_rx);
 			spin_unlock_bh(&p->tid_rx_lock);
 			seq_printf(s,
-				   "Rx invalid frame: non-data %lu, short %lu, large %lu\n",
+				   "Rx invalid frame: non-data %lu, short %lu, large %lu, replay %lu\n",
 				   p->stats.rx_non_data_frame,
 				   p->stats.rx_short_frame,
-				   p->stats.rx_large_frame);
+				   p->stats.rx_large_frame,
+				   p->stats.rx_replay);
 
 			seq_puts(s, "Rx/MCS:");
 			for (mcs = 0; mcs < ARRAY_SIZE(p->stats.rx_per_mcs);
@@ -1487,6 +1527,7 @@
 	WIL_FIELD(hw_version,	S_IRUGO,		doff_x32),
 	WIL_FIELD(recovery_count, S_IRUGO,		doff_u32),
 	WIL_FIELD(ap_isolate,	S_IRUGO,		doff_u32),
+	WIL_FIELD(discovery_mode, S_IRUGO | S_IWUSR,	doff_u8),
 	{},
 };
 
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 4f2ffa5..fe66b2b 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -391,12 +391,14 @@
 	wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr);
 
 	if (isr & ISR_MISC_FW_ERROR) {
+		wil->recovery_state = fw_recovery_pending;
 		wil_fw_core_dump(wil);
 		wil_notify_fw_error(wil);
 		isr &= ~ISR_MISC_FW_ERROR;
-		if (wil->platform_ops.notify_crash) {
+		if (wil->platform_ops.notify) {
 			wil_err(wil, "notify platform driver about FW crash");
-			wil->platform_ops.notify_crash(wil->platform_handle);
+			wil->platform_ops.notify(wil->platform_handle,
+						 WIL_PLATFORM_EVT_FW_CRASH);
 		} else {
 			wil_fw_error_recovery(wil);
 		}
diff --git a/drivers/net/wireless/ath/wil6210/ioctl.c b/drivers/net/wireless/ath/wil6210/ioctl.c
index f7f9486..6303800 100644
--- a/drivers/net/wireless/ath/wil6210/ioctl.c
+++ b/drivers/net/wireless/ath/wil6210/ioctl.c
@@ -161,13 +161,20 @@
 
 int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd)
 {
+	int ret;
+
 	switch (cmd) {
 	case WIL_IOCTL_MEMIO:
-		return wil_ioc_memio_dword(wil, data);
+		ret = wil_ioc_memio_dword(wil, data);
+		break;
 	case WIL_IOCTL_MEMIO_BLOCK:
-		return wil_ioc_memio_block(wil, data);
+		ret = wil_ioc_memio_block(wil, data);
+		break;
 	default:
 		wil_dbg_ioctl(wil, "Unsupported IOCTL 0x%04x\n", cmd);
 		return -ENOIOCTLCMD;
 	}
+
+	wil_dbg_ioctl(wil, "ioctl(0x%04x) -> %d\n", cmd, ret);
+	return ret;
 }
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 78ba6e0..8d4e884 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -27,6 +27,11 @@
 module_param(debug_fw, bool, S_IRUGO);
 MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug");
 
+static bool oob_mode;
+module_param(oob_mode, bool, S_IRUGO);
+MODULE_PARM_DESC(oob_mode,
+		 " enable out of the box (OOB) mode in FW, for diagnostics and certification");
+
 bool no_fw_recovery;
 module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery");
@@ -149,7 +154,7 @@
 	might_sleep();
 	wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid,
 		     sta->status);
-
+	/* inform upper/lower layers */
 	if (sta->status != wil_sta_unused) {
 		if (!from_event)
 			wmi_disconnect_sta(wil, sta->addr, reason_code, true);
@@ -165,7 +170,7 @@
 		}
 		sta->status = wil_sta_unused;
 	}
-
+	/* reorder buffers */
 	for (i = 0; i < WIL_STA_TID_NUM; i++) {
 		struct wil_tid_ampdu_rx *r;
 
@@ -177,10 +182,15 @@
 
 		spin_unlock_bh(&sta->tid_rx_lock);
 	}
+	/* crypto context */
+	memset(sta->tid_crypto_rx, 0, sizeof(sta->tid_crypto_rx));
+	memset(&sta->group_crypto_rx, 0, sizeof(sta->group_crypto_rx));
+	/* release vrings */
 	for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
 		if (wil->vring2cid_tid[i][0] == cid)
 			wil_vring_fini_tx(wil, i);
 	}
+	/* statistics */
 	memset(&sta->stats, 0, sizeof(sta->stats));
 }
 
@@ -300,6 +310,11 @@
 	wake_up_interruptible(&wil->wq);
 }
 
+bool wil_is_recovery_blocked(struct wil6210_priv *wil)
+{
+	return no_fw_recovery && (wil->recovery_state == fw_recovery_pending);
+}
+
 static void wil_fw_error_worker(struct work_struct *work)
 {
 	struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
@@ -440,9 +455,8 @@
 
 	mutex_init(&wil->mutex);
 	mutex_init(&wil->wmi_mutex);
-	mutex_init(&wil->back_rx_mutex);
-	mutex_init(&wil->back_tx_mutex);
 	mutex_init(&wil->probe_client_mutex);
+	mutex_init(&wil->p2p_wdev_mutex);
 
 	init_completion(&wil->wmi_ready);
 	init_completion(&wil->wmi_call);
@@ -450,17 +464,15 @@
 	wil->bcast_vring = -1;
 	setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
 	setup_timer(&wil->scan_timer, wil_scan_timer_fn, (ulong)wil);
+	setup_timer(&wil->p2p.discovery_timer, wil_p2p_discovery_timer_fn,
+		    (ulong)wil);
 
 	INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
 	INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
 	INIT_WORK(&wil->fw_error_worker, wil_fw_error_worker);
-	INIT_WORK(&wil->back_rx_worker, wil_back_rx_worker);
-	INIT_WORK(&wil->back_tx_worker, wil_back_tx_worker);
 	INIT_WORK(&wil->probe_client_worker, wil_probe_client_worker);
 
 	INIT_LIST_HEAD(&wil->pending_wmi_ev);
-	INIT_LIST_HEAD(&wil->back_rx_pending);
-	INIT_LIST_HEAD(&wil->back_tx_pending);
 	INIT_LIST_HEAD(&wil->probe_client_pending);
 	spin_lock_init(&wil->wmi_ev_lock);
 	init_waitqueue_head(&wil->wq);
@@ -514,16 +526,14 @@
 
 	wil_set_recovery_state(wil, fw_recovery_idle);
 	del_timer_sync(&wil->scan_timer);
+	del_timer_sync(&wil->p2p.discovery_timer);
 	cancel_work_sync(&wil->disconnect_worker);
 	cancel_work_sync(&wil->fw_error_worker);
+	cancel_work_sync(&wil->p2p.discovery_expired_work);
 	mutex_lock(&wil->mutex);
 	wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
 	mutex_unlock(&wil->mutex);
 	wmi_event_flush(wil);
-	wil_back_rx_flush(wil);
-	cancel_work_sync(&wil->back_rx_worker);
-	wil_back_tx_flush(wil);
-	cancel_work_sync(&wil->back_tx_worker);
 	wil_probe_client_flush(wil);
 	cancel_work_sync(&wil->probe_client_worker);
 	destroy_workqueue(wil->wq_service);
@@ -542,6 +552,16 @@
 	wil_w(wil, RGF_USER_USER_CPU_0, 1);
 }
 
+static void wil_set_oob_mode(struct wil6210_priv *wil, bool enable)
+{
+	wil_info(wil, "%s: enable=%d\n", __func__, enable);
+	if (enable) {
+		wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
+	} else {
+		wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
+	}
+}
+
 static int wil_target_reset(struct wil6210_priv *wil)
 {
 	int delay = 0;
@@ -637,6 +657,7 @@
 static int wil_get_bl_info(struct wil6210_priv *wil)
 {
 	struct net_device *ndev = wil_to_ndev(wil);
+	struct wiphy *wiphy = wil_to_wiphy(wil);
 	union {
 		struct bl_dedicated_registers_v0 bl0;
 		struct bl_dedicated_registers_v1 bl1;
@@ -681,6 +702,7 @@
 	}
 
 	ether_addr_copy(ndev->perm_addr, mac);
+	ether_addr_copy(wiphy->perm_addr, mac);
 	if (!is_valid_ether_addr(ndev->dev_addr))
 		ether_addr_copy(ndev->dev_addr, mac);
 
@@ -767,6 +789,15 @@
 	if (wil->hw_version == HW_VER_UNKNOWN)
 		return -ENODEV;
 
+	if (wil->platform_ops.notify) {
+		rc = wil->platform_ops.notify(wil->platform_handle,
+					      WIL_PLATFORM_EVT_PRE_RESET);
+		if (rc)
+			wil_err(wil,
+				"%s: PRE_RESET platform notify failed, rc %d\n",
+				__func__, rc);
+	}
+
 	set_bit(wil_status_resetting, wil->status);
 
 	cancel_work_sync(&wil->disconnect_worker);
@@ -807,6 +838,7 @@
 	if (rc)
 		return rc;
 
+	wil_set_oob_mode(wil, oob_mode);
 	if (load_fw) {
 		wil_info(wil, "Use firmware <%s> + board <%s>\n", WIL_FW_NAME,
 			 WIL_FW2_NAME);
@@ -846,8 +878,27 @@
 
 		/* we just started MAC, wait for FW ready */
 		rc = wil_wait_for_fw_ready(wil);
-		if (rc == 0) /* check FW is responsive */
-			rc = wmi_echo(wil);
+		if (rc)
+			return rc;
+
+		/* check FW is responsive */
+		rc = wmi_echo(wil);
+		if (rc) {
+			wil_err(wil, "%s: wmi_echo failed, rc %d\n",
+				__func__, rc);
+			return rc;
+		}
+
+		if (wil->platform_ops.notify) {
+			rc = wil->platform_ops.notify(wil->platform_handle,
+						      WIL_PLATFORM_EVT_FW_RDY);
+			if (rc) {
+				wil_err(wil,
+					"%s: FW_RDY notify failed, rc %d\n",
+					__func__, rc);
+				rc = 0;
+			}
+		}
 	}
 
 	return rc;
@@ -954,6 +1005,8 @@
 	}
 	wil_enable_irq(wil);
 
+	(void)wil_p2p_stop_discovery(wil);
+
 	if (wil->scan_request) {
 		wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
 			     wil->scan_request);
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index ecc3c1b..09840975 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -60,11 +60,7 @@
 {
 	struct wil6210_priv *wil = ndev_to_wil(ndev);
 
-	int ret = wil_ioctl(wil, ifr->ifr_data, cmd);
-
-	wil_dbg_misc(wil, "ioctl(0x%04x) -> %d\n", cmd, ret);
-
-	return ret;
+	return wil_ioctl(wil, ifr->ifr_data, cmd);
 }
 
 static const struct net_device_ops wil_netdev_ops = {
@@ -149,6 +145,7 @@
 
 	wil = wdev_to_wil(wdev);
 	wil->wdev = wdev;
+	wil->radio_wdev = wdev;
 
 	wil_dbg_misc(wil, "%s()\n", __func__);
 
@@ -160,7 +157,7 @@
 
 	wdev->iftype = NL80211_IFTYPE_STATION; /* TODO */
 	/* default monitor channel */
-	ch = wdev->wiphy->bands[IEEE80211_BAND_60GHZ]->channels;
+	ch = wdev->wiphy->bands[NL80211_BAND_60GHZ]->channels;
 	cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT);
 
 	ndev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, wil_dev_setup);
diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c
new file mode 100644
index 0000000..2c1b895
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/p2p.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "wil6210.h"
+#include "wmi.h"
+
+#define P2P_WILDCARD_SSID "DIRECT-"
+#define P2P_DMG_SOCIAL_CHANNEL 2
+#define P2P_SEARCH_DURATION_MS 500
+#define P2P_DEFAULT_BI 100
+
+void wil_p2p_discovery_timer_fn(ulong x)
+{
+	struct wil6210_priv *wil = (void *)x;
+
+	wil_dbg_misc(wil, "%s\n", __func__);
+
+	schedule_work(&wil->p2p.discovery_expired_work);
+}
+
+int wil_p2p_search(struct wil6210_priv *wil,
+		   struct cfg80211_scan_request *request)
+{
+	int rc;
+	struct wil_p2p_info *p2p = &wil->p2p;
+
+	wil_dbg_misc(wil, "%s: channel %d\n",
+		     __func__, P2P_DMG_SOCIAL_CHANNEL);
+
+	mutex_lock(&wil->mutex);
+
+	if (p2p->discovery_started) {
+		wil_err(wil, "%s: search failed. discovery already ongoing\n",
+			__func__);
+		rc = -EBUSY;
+		goto out;
+	}
+
+	rc = wmi_p2p_cfg(wil, P2P_DMG_SOCIAL_CHANNEL, P2P_DEFAULT_BI);
+	if (rc) {
+		wil_err(wil, "%s: wmi_p2p_cfg failed\n", __func__);
+		goto out;
+	}
+
+	rc = wmi_set_ssid(wil, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID);
+	if (rc) {
+		wil_err(wil, "%s: wmi_set_ssid failed\n", __func__);
+		goto out_stop;
+	}
+
+	/* Set application IE to probe request and probe response */
+	rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ,
+			request->ie_len, request->ie);
+	if (rc) {
+		wil_err(wil, "%s: wmi_set_ie(WMI_FRAME_PROBE_REQ) failed\n",
+			__func__);
+		goto out_stop;
+	}
+
+	/* supplicant doesn't provide Probe Response IEs. As a workaround -
+	 * re-use Probe Request IEs
+	 */
+	rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP,
+			request->ie_len, request->ie);
+	if (rc) {
+		wil_err(wil, "%s: wmi_set_ie(WMI_FRAME_PROBE_RESP) failed\n",
+			__func__);
+		goto out_stop;
+	}
+
+	rc = wmi_start_search(wil);
+	if (rc) {
+		wil_err(wil, "%s: wmi_start_search failed\n", __func__);
+		goto out_stop;
+	}
+
+	p2p->discovery_started = 1;
+	INIT_WORK(&p2p->discovery_expired_work, wil_p2p_search_expired);
+	mod_timer(&p2p->discovery_timer,
+		  jiffies + msecs_to_jiffies(P2P_SEARCH_DURATION_MS));
+
+out_stop:
+	if (rc)
+		wmi_stop_discovery(wil);
+
+out:
+	mutex_unlock(&wil->mutex);
+	return rc;
+}
+
+int wil_p2p_listen(struct wil6210_priv *wil, unsigned int duration,
+		   struct ieee80211_channel *chan, u64 *cookie)
+{
+	struct wil_p2p_info *p2p = &wil->p2p;
+	u8 channel = P2P_DMG_SOCIAL_CHANNEL;
+	int rc;
+
+	if (chan)
+		channel = chan->hw_value;
+
+	wil_dbg_misc(wil, "%s: duration %d\n", __func__, duration);
+
+	mutex_lock(&wil->mutex);
+
+	if (p2p->discovery_started) {
+		wil_err(wil, "%s: discovery already ongoing\n", __func__);
+		rc = -EBUSY;
+		goto out;
+	}
+
+	rc = wmi_p2p_cfg(wil, channel, P2P_DEFAULT_BI);
+	if (rc) {
+		wil_err(wil, "%s: wmi_p2p_cfg failed\n", __func__);
+		goto out;
+	}
+
+	rc = wmi_set_ssid(wil, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID);
+	if (rc) {
+		wil_err(wil, "%s: wmi_set_ssid failed\n", __func__);
+		goto out_stop;
+	}
+
+	rc = wmi_start_listen(wil);
+	if (rc) {
+		wil_err(wil, "%s: wmi_start_listen failed\n", __func__);
+		goto out_stop;
+	}
+
+	memcpy(&p2p->listen_chan, chan, sizeof(*chan));
+	*cookie = ++p2p->cookie;
+
+	p2p->discovery_started = 1;
+	INIT_WORK(&p2p->discovery_expired_work, wil_p2p_listen_expired);
+	mod_timer(&p2p->discovery_timer,
+		  jiffies + msecs_to_jiffies(duration));
+
+out_stop:
+	if (rc)
+		wmi_stop_discovery(wil);
+
+out:
+	mutex_unlock(&wil->mutex);
+	return rc;
+}
+
+u8 wil_p2p_stop_discovery(struct wil6210_priv *wil)
+{
+	struct wil_p2p_info *p2p = &wil->p2p;
+	u8 started = p2p->discovery_started;
+
+	if (p2p->discovery_started) {
+		del_timer_sync(&p2p->discovery_timer);
+		p2p->discovery_started = 0;
+		wmi_stop_discovery(wil);
+	}
+
+	return started;
+}
+
+int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie)
+{
+	struct wil_p2p_info *p2p = &wil->p2p;
+	u8 started;
+
+	mutex_lock(&wil->mutex);
+
+	if (cookie != p2p->cookie) {
+		wil_info(wil, "%s: Cookie mismatch: 0x%016llx vs. 0x%016llx\n",
+			 __func__, p2p->cookie, cookie);
+		mutex_unlock(&wil->mutex);
+		return -ENOENT;
+	}
+
+	started = wil_p2p_stop_discovery(wil);
+
+	mutex_unlock(&wil->mutex);
+
+	if (!started) {
+		wil_err(wil, "%s: listen not started\n", __func__);
+		return -ENOENT;
+	}
+
+	mutex_lock(&wil->p2p_wdev_mutex);
+	cfg80211_remain_on_channel_expired(wil->radio_wdev,
+					   p2p->cookie,
+					   &p2p->listen_chan,
+					   GFP_KERNEL);
+	wil->radio_wdev = wil->wdev;
+	mutex_unlock(&wil->p2p_wdev_mutex);
+	return 0;
+}
+
+void wil_p2p_listen_expired(struct work_struct *work)
+{
+	struct wil_p2p_info *p2p = container_of(work,
+			struct wil_p2p_info, discovery_expired_work);
+	struct wil6210_priv *wil = container_of(p2p,
+			struct wil6210_priv, p2p);
+	u8 started;
+
+	wil_dbg_misc(wil, "%s()\n", __func__);
+
+	mutex_lock(&wil->mutex);
+	started = wil_p2p_stop_discovery(wil);
+	mutex_unlock(&wil->mutex);
+
+	if (started) {
+		mutex_lock(&wil->p2p_wdev_mutex);
+		cfg80211_remain_on_channel_expired(wil->radio_wdev,
+						   p2p->cookie,
+						   &p2p->listen_chan,
+						   GFP_KERNEL);
+		wil->radio_wdev = wil->wdev;
+		mutex_unlock(&wil->p2p_wdev_mutex);
+	}
+
+}
+
+void wil_p2p_search_expired(struct work_struct *work)
+{
+	struct wil_p2p_info *p2p = container_of(work,
+			struct wil_p2p_info, discovery_expired_work);
+	struct wil6210_priv *wil = container_of(p2p,
+			struct wil6210_priv, p2p);
+	u8 started;
+
+	wil_dbg_misc(wil, "%s()\n", __func__);
+
+	mutex_lock(&wil->mutex);
+	started = wil_p2p_stop_discovery(wil);
+	mutex_unlock(&wil->mutex);
+
+	if (started) {
+		mutex_lock(&wil->p2p_wdev_mutex);
+		cfg80211_scan_done(wil->scan_request, 0);
+		wil->scan_request = NULL;
+		wil->radio_wdev = wil->wdev;
+		mutex_unlock(&wil->p2p_wdev_mutex);
+	}
+}
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index e36f2a0..aeb72c4 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -275,6 +275,7 @@
 	pci_disable_device(pdev);
 	if (wil->platform_ops.uninit)
 		wil->platform_ops.uninit(wil->platform_handle);
+	wil_p2p_wdev_free(wil);
 	wil_if_free(wil);
 }
 
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index 32031e7..19ed127 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -291,35 +291,15 @@
 	return min(max_agg_size, req_agg_wsize);
 }
 
-/* Block Ack - Rx side (recipient */
+/* Block Ack - Rx side (recipient) */
 int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid,
 			 u8 dialog_token, __le16 ba_param_set,
 			 __le16 ba_timeout, __le16 ba_seq_ctrl)
-{
-	struct wil_back_rx *req = kzalloc(sizeof(*req), GFP_KERNEL);
-
-	if (!req)
-		return -ENOMEM;
-
-	req->cidxtid = cidxtid;
-	req->dialog_token = dialog_token;
-	req->ba_param_set = le16_to_cpu(ba_param_set);
-	req->ba_timeout = le16_to_cpu(ba_timeout);
-	req->ba_seq_ctrl = le16_to_cpu(ba_seq_ctrl);
-
-	mutex_lock(&wil->back_rx_mutex);
-	list_add_tail(&req->list, &wil->back_rx_pending);
-	mutex_unlock(&wil->back_rx_mutex);
-
-	queue_work(wil->wq_service, &wil->back_rx_worker);
-
-	return 0;
-}
-
-static void wil_back_rx_handle(struct wil6210_priv *wil,
-			       struct wil_back_rx *req)
 __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
 {
+	u16 param_set = le16_to_cpu(ba_param_set);
+	u16 agg_timeout = le16_to_cpu(ba_timeout);
+	u16 seq_ctrl = le16_to_cpu(ba_seq_ctrl);
 	struct wil_sta_info *sta;
 	u8 cid, tid;
 	u16 agg_wsize = 0;
@@ -328,34 +308,35 @@
 	 * bits 2..5: TID
 	 * bits 6..15: buffer size
 	 */
-	u16 req_agg_wsize = WIL_GET_BITS(req->ba_param_set, 6, 15);
-	bool agg_amsdu = !!(req->ba_param_set & BIT(0));
-	int ba_policy = req->ba_param_set & BIT(1);
-	u16 agg_timeout = req->ba_timeout;
+	u16 req_agg_wsize = WIL_GET_BITS(param_set, 6, 15);
+	bool agg_amsdu = !!(param_set & BIT(0));
+	int ba_policy = param_set & BIT(1);
 	u16 status = WLAN_STATUS_SUCCESS;
-	u16 ssn = req->ba_seq_ctrl >> 4;
+	u16 ssn = seq_ctrl >> 4;
 	struct wil_tid_ampdu_rx *r;
-	int rc;
+	int rc = 0;
 
 	might_sleep();
-	parse_cidxtid(req->cidxtid, &cid, &tid);
+	parse_cidxtid(cidxtid, &cid, &tid);
 
 	/* sanity checks */
 	if (cid >= WIL6210_MAX_CID) {
 		wil_err(wil, "BACK: invalid CID %d\n", cid);
-		return;
+		rc = -EINVAL;
+		goto out;
 	}
 
 	sta = &wil->sta[cid];
 	if (sta->status != wil_sta_connected) {
 		wil_err(wil, "BACK: CID %d not connected\n", cid);
-		return;
+		rc = -EINVAL;
+		goto out;
 	}
 
 	wil_dbg_wmi(wil,
 		    "ADDBA request for CID %d %pM TID %d size %d timeout %d AMSDU%s policy %d token %d SSN 0x%03x\n",
-		    cid, sta->addr, tid, req_agg_wsize, req->ba_timeout,
-		    agg_amsdu ? "+" : "-", !!ba_policy, req->dialog_token, ssn);
+		    cid, sta->addr, tid, req_agg_wsize, agg_timeout,
+		    agg_amsdu ? "+" : "-", !!ba_policy, dialog_token, ssn);
 
 	/* apply policies */
 	if (ba_policy) {
@@ -365,10 +346,13 @@
 	if (status == WLAN_STATUS_SUCCESS)
 		agg_wsize = wil_agg_size(wil, req_agg_wsize);
 
-	rc = wmi_addba_rx_resp(wil, cid, tid, req->dialog_token, status,
+	rc = wmi_addba_rx_resp(wil, cid, tid, dialog_token, status,
 			       agg_amsdu, agg_wsize, agg_timeout);
-	if (rc || (status != WLAN_STATUS_SUCCESS))
-		return;
+	if (rc || (status != WLAN_STATUS_SUCCESS)) {
+		wil_err(wil, "%s: do not apply ba, rc(%d), status(%d)\n",
+			__func__, rc, status);
+		goto out;
+	}
 
 	/* apply */
 	r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn);
@@ -376,143 +360,37 @@
 	wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]);
 	sta->tid_rx[tid] = r;
 	spin_unlock_bh(&sta->tid_rx_lock);
+
+out:
+	return rc;
 }
 
-void wil_back_rx_flush(struct wil6210_priv *wil)
+/* BACK - Tx side (originator) */
+int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
 {
-	struct wil_back_rx *evt, *t;
-
-	wil_dbg_misc(wil, "%s()\n", __func__);
-
-	mutex_lock(&wil->back_rx_mutex);
-
-	list_for_each_entry_safe(evt, t, &wil->back_rx_pending, list) {
-		list_del(&evt->list);
-		kfree(evt);
-	}
-
-	mutex_unlock(&wil->back_rx_mutex);
-}
-
-/* Retrieve next ADDBA request from the pending list */
-static struct list_head *next_back_rx(struct wil6210_priv *wil)
-{
-	struct list_head *ret = NULL;
-
-	mutex_lock(&wil->back_rx_mutex);
-
-	if (!list_empty(&wil->back_rx_pending)) {
-		ret = wil->back_rx_pending.next;
-		list_del(ret);
-	}
-
-	mutex_unlock(&wil->back_rx_mutex);
-
-	return ret;
-}
-
-void wil_back_rx_worker(struct work_struct *work)
-{
-	struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
-						back_rx_worker);
-	struct wil_back_rx *evt;
-	struct list_head *lh;
-
-	while ((lh = next_back_rx(wil)) != NULL) {
-		evt = list_entry(lh, struct wil_back_rx, list);
-
-		wil_back_rx_handle(wil, evt);
-		kfree(evt);
-	}
-}
-
-/* BACK - Tx (originator) side */
-static void wil_back_tx_handle(struct wil6210_priv *wil,
-			       struct wil_back_tx *req)
-{
-	struct vring_tx_data *txdata = &wil->vring_tx_data[req->ringid];
-	int rc;
+	u8 agg_wsize = wil_agg_size(wil, wsize);
+	u16 agg_timeout = 0;
+	struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
+	int rc = 0;
 
 	if (txdata->addba_in_progress) {
 		wil_dbg_misc(wil, "ADDBA for vring[%d] already in progress\n",
-			     req->ringid);
-		return;
+			     ringid);
+		goto out;
 	}
 	if (txdata->agg_wsize) {
 		wil_dbg_misc(wil,
-			     "ADDBA for vring[%d] already established wsize %d\n",
-			     req->ringid, txdata->agg_wsize);
-		return;
+			     "ADDBA for vring[%d] already done for wsize %d\n",
+			     ringid, txdata->agg_wsize);
+		goto out;
 	}
 	txdata->addba_in_progress = true;
-	rc = wmi_addba(wil, req->ringid, req->agg_wsize, req->agg_timeout);
-	if (rc)
+	rc = wmi_addba(wil, ringid, agg_wsize, agg_timeout);
+	if (rc) {
+		wil_err(wil, "%s: wmi_addba failed, rc (%d)", __func__, rc);
 		txdata->addba_in_progress = false;
-}
-
-static struct list_head *next_back_tx(struct wil6210_priv *wil)
-{
-	struct list_head *ret = NULL;
-
-	mutex_lock(&wil->back_tx_mutex);
-
-	if (!list_empty(&wil->back_tx_pending)) {
-		ret = wil->back_tx_pending.next;
-		list_del(ret);
 	}
 
-	mutex_unlock(&wil->back_tx_mutex);
-
-	return ret;
-}
-
-void wil_back_tx_worker(struct work_struct *work)
-{
-	struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
-						 back_tx_worker);
-	struct wil_back_tx *evt;
-	struct list_head *lh;
-
-	while ((lh = next_back_tx(wil)) != NULL) {
-		evt = list_entry(lh, struct wil_back_tx, list);
-
-		wil_back_tx_handle(wil, evt);
-		kfree(evt);
-	}
-}
-
-void wil_back_tx_flush(struct wil6210_priv *wil)
-{
-	struct wil_back_tx *evt, *t;
-
-	wil_dbg_misc(wil, "%s()\n", __func__);
-
-	mutex_lock(&wil->back_tx_mutex);
-
-	list_for_each_entry_safe(evt, t, &wil->back_tx_pending, list) {
-		list_del(&evt->list);
-		kfree(evt);
-	}
-
-	mutex_unlock(&wil->back_tx_mutex);
-}
-
-int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
-{
-	struct wil_back_tx *req = kzalloc(sizeof(*req), GFP_KERNEL);
-
-	if (!req)
-		return -ENOMEM;
-
-	req->ringid = ringid;
-	req->agg_wsize = wil_agg_size(wil, wsize);
-	req->agg_timeout = 0;
-
-	mutex_lock(&wil->back_tx_mutex);
-	list_add_tail(&req->list, &wil->back_tx_pending);
-	mutex_unlock(&wil->back_tx_mutex);
-
-	queue_work(wil->wq_service, &wil->back_tx_worker);
-
-	return 0;
+out:
+	return rc;
 }
diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h
index e59239d..c4db2a9 100644
--- a/drivers/net/wireless/ath/wil6210/trace.h
+++ b/drivers/net/wireless/ath/wil6210/trace.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2013-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -37,39 +37,40 @@
 #endif /* !CONFIG_WIL6210_TRACING || defined(__CHECKER__) */
 
 DECLARE_EVENT_CLASS(wil6210_wmi,
-	TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
+	TP_PROTO(struct wmi_cmd_hdr *wmi, void *buf, u16 buf_len),
 
 	TP_ARGS(wmi, buf, buf_len),
 
 	TP_STRUCT__entry(
 		__field(u8, mid)
-		__field(u16, id)
-		__field(u32, timestamp)
+		__field(u16, command_id)
+		__field(u32, fw_timestamp)
 		__field(u16, buf_len)
 		__dynamic_array(u8, buf, buf_len)
 	),
 
 	TP_fast_assign(
 		__entry->mid = wmi->mid;
-		__entry->id = le16_to_cpu(wmi->id);
-		__entry->timestamp = le32_to_cpu(wmi->timestamp);
+		__entry->command_id = le16_to_cpu(wmi->command_id);
+		__entry->fw_timestamp = le32_to_cpu(wmi->fw_timestamp);
 		__entry->buf_len = buf_len;
 		memcpy(__get_dynamic_array(buf), buf, buf_len);
 	),
 
 	TP_printk(
 		"MID %d id 0x%04x len %d timestamp %d",
-		__entry->mid, __entry->id, __entry->buf_len, __entry->timestamp
+		__entry->mid, __entry->command_id, __entry->buf_len,
+		__entry->fw_timestamp
 	)
 );
 
 DEFINE_EVENT(wil6210_wmi, wil6210_wmi_cmd,
-	TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
+	TP_PROTO(struct wmi_cmd_hdr *wmi, void *buf, u16 buf_len),
 	TP_ARGS(wmi, buf, buf_len)
 );
 
 DEFINE_EVENT(wil6210_wmi, wil6210_wmi_event,
-	TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
+	TP_PROTO(struct wmi_cmd_hdr *wmi, void *buf, u16 buf_len),
 	TP_ARGS(wmi, buf, buf_len)
 );
 
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 6af2090..f260b232 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -549,6 +549,60 @@
 	return rc;
 }
 
+/**
+ * reverse_memcmp - Compare two areas of memory, in reverse order
+ * @cs: One area of memory
+ * @ct: Another area of memory
+ * @count: The size of the area.
+ *
+ * Cut'n'paste from original memcmp (see lib/string.c)
+ * with minimal modifications
+ */
+static int reverse_memcmp(const void *cs, const void *ct, size_t count)
+{
+	const unsigned char *su1, *su2;
+	int res = 0;
+
+	for (su1 = cs + count - 1, su2 = ct + count - 1; count > 0;
+	     --su1, --su2, count--) {
+		res = *su1 - *su2;
+		if (res)
+			break;
+	}
+	return res;
+}
+
+static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
+{
+	struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+	int cid = wil_rxdesc_cid(d);
+	int tid = wil_rxdesc_tid(d);
+	int key_id = wil_rxdesc_key_id(d);
+	int mc = wil_rxdesc_mcast(d);
+	struct wil_sta_info *s = &wil->sta[cid];
+	struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx :
+				      &s->tid_crypto_rx[tid];
+	struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id];
+	const u8 *pn = (u8 *)&d->mac.pn_15_0;
+
+	if (!cc->key_set) {
+		wil_err_ratelimited(wil,
+				    "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
+				    cid, tid, mc, key_id);
+		return -EINVAL;
+	}
+
+	if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
+		wil_err_ratelimited(wil,
+				    "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
+				    cid, tid, mc, key_id, pn, cc->pn);
+		return -EINVAL;
+	}
+	memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
+
+	return 0;
+}
+
 /*
  * Pass Rx packet to the netif. Update statistics.
  * Called in softirq context (NAPI poll).
@@ -561,6 +615,7 @@
 	unsigned int len = skb->len;
 	struct vring_rx_desc *d = wil_skb_rxdesc(skb);
 	int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
+	int security = wil_rxdesc_security(d);
 	struct ethhdr *eth = (void *)skb->data;
 	/* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
 	 * is not suitable, need to look at data
@@ -586,6 +641,13 @@
 
 	skb_orphan(skb);
 
+	if (security && (wil_rx_crypto_check(wil, skb) != 0)) {
+		rc = GRO_DROP;
+		dev_kfree_skb(skb);
+		stats->rx_replay++;
+		goto stats;
+	}
+
 	if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
 		if (mcast) {
 			/* send multicast frames both to higher layers in
@@ -627,6 +689,7 @@
 		wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
 			     len, gro_res_str[rc]);
 	}
+stats:
 	/* statistics. rc set to GRO_NORMAL for AP bridging */
 	if (unlikely(rc == GRO_DROP)) {
 		ndev->stats.rx_dropped++;
@@ -757,7 +820,7 @@
 		},
 	};
 	struct {
-		struct wil6210_mbox_hdr_wmi wmi;
+		struct wmi_cmd_hdr wmi;
 		struct wmi_vring_cfg_done_event cmd;
 	} __packed reply;
 	struct vring *vring = &wil->vring_tx[id];
@@ -834,7 +897,7 @@
 		},
 	};
 	struct {
-		struct wil6210_mbox_hdr_wmi wmi;
+		struct wmi_cmd_hdr wmi;
 		struct wmi_vring_cfg_done_event cmd;
 	} __packed reply;
 	struct vring *vring = &wil->vring_tx[id];
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index ee7c7b4..fcdffaa 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -480,6 +480,16 @@
 	return WIL_GET_BITS(d->mac.d0, 28, 31);
 }
 
+static inline int wil_rxdesc_key_id(struct vring_rx_desc *d)
+{
+	return WIL_GET_BITS(d->mac.d1, 4, 5);
+}
+
+static inline int wil_rxdesc_security(struct vring_rx_desc *d)
+{
+	return WIL_GET_BITS(d->mac.d1, 7, 7);
+}
+
 static inline int wil_rxdesc_ds_bits(struct vring_rx_desc *d)
 {
 	return WIL_GET_BITS(d->mac.d1, 8, 9);
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 8427d68..4d699ea4 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -22,6 +22,7 @@
 #include <net/cfg80211.h>
 #include <linux/timex.h>
 #include <linux/types.h>
+#include "wmi.h"
 #include "wil_platform.h"
 
 extern bool no_fw_recovery;
@@ -131,6 +132,7 @@
 /* registers - FW addresses */
 #define RGF_USER_USAGE_1		(0x880004)
 #define RGF_USER_USAGE_6		(0x880018)
+	#define BIT_USER_OOB_MODE		BIT(31)
 #define RGF_USER_HW_MACHINE_STATE	(0x8801dc)
 	#define HW_MACHINE_BOOT_DONE	(0x3fffffd)
 #define RGF_USER_USER_CPU_0		(0x8801e0)
@@ -334,29 +336,11 @@
 /* max. value for wil6210_mbox_hdr.len */
 #define MAX_MBOXITEM_SIZE   (240)
 
-/**
- * struct wil6210_mbox_hdr_wmi - WMI header
- *
- * @mid: MAC ID
- *	00 - default, created by FW
- *	01..0f - WiFi ports, driver to create
- *	10..fe - debug
- *	ff - broadcast
- * @id: command/event ID
- * @timestamp: FW fills for events, free-running msec timer
- */
-struct wil6210_mbox_hdr_wmi {
-	u8 mid;
-	u8 reserved;
-	__le16 id;
-	__le32 timestamp;
-} __packed;
-
 struct pending_wmi_event {
 	struct list_head list;
 	struct {
 		struct wil6210_mbox_hdr hdr;
-		struct wil6210_mbox_hdr_wmi wmi;
+		struct wmi_cmd_hdr wmi;
 		u8 data[0];
 	} __packed event;
 };
@@ -455,6 +439,29 @@
 	bool first_time; /* is it 1-st time this buffer used? */
 };
 
+/**
+ * struct wil_tid_crypto_rx_single - TID crypto information (Rx).
+ *
+ * @pn: GCMP PN for the session
+ * @key_set: valid key present
+ */
+struct wil_tid_crypto_rx_single {
+	u8 pn[IEEE80211_GCMP_PN_LEN];
+	bool key_set;
+};
+
+struct wil_tid_crypto_rx {
+	struct wil_tid_crypto_rx_single key_id[4];
+};
+
+struct wil_p2p_info {
+	struct ieee80211_channel listen_chan;
+	u8 discovery_started;
+	u64 cookie;
+	struct timer_list discovery_timer; /* listen/search duration */
+	struct work_struct discovery_expired_work; /* listen/search expire */
+};
+
 enum wil_sta_status {
 	wil_sta_unused = 0,
 	wil_sta_conn_pending = 1,
@@ -474,6 +481,7 @@
 	unsigned long	rx_non_data_frame;
 	unsigned long	rx_short_frame;
 	unsigned long	rx_large_frame;
+	unsigned long	rx_replay;
 	u16 last_mcs_rx;
 	u64 rx_per_mcs[WIL_MCS_MAX + 1];
 };
@@ -495,6 +503,8 @@
 	spinlock_t tid_rx_lock; /* guarding tid_rx array */
 	unsigned long tid_rx_timer_expired[BITS_TO_LONGS(WIL_STA_TID_NUM)];
 	unsigned long tid_rx_stop_requested[BITS_TO_LONGS(WIL_STA_TID_NUM)];
+	struct wil_tid_crypto_rx tid_crypto_rx[WIL_STA_TID_NUM];
+	struct wil_tid_crypto_rx group_crypto_rx;
 };
 
 enum {
@@ -507,24 +517,6 @@
 	hw_capability_last
 };
 
-struct wil_back_rx {
-	struct list_head list;
-	/* request params, converted to CPU byte order - what we asked for */
-	u8 cidxtid;
-	u8 dialog_token;
-	u16 ba_param_set;
-	u16 ba_timeout;
-	u16 ba_seq_ctrl;
-};
-
-struct wil_back_tx {
-	struct list_head list;
-	/* request params, converted to CPU byte order - what we asked for */
-	u8 ringid;
-	u8 agg_wsize;
-	u16 agg_timeout;
-};
-
 struct wil_probe_client_req {
 	struct list_head list;
 	u64 cookie;
@@ -595,13 +587,6 @@
 	spinlock_t wmi_ev_lock;
 	struct napi_struct napi_rx;
 	struct napi_struct napi_tx;
-	/* BACK */
-	struct list_head back_rx_pending;
-	struct mutex back_rx_mutex; /* protect @back_rx_pending */
-	struct work_struct back_rx_worker;
-	struct list_head back_tx_pending;
-	struct mutex back_tx_mutex; /* protect @back_tx_pending */
-	struct work_struct back_tx_worker;
 	/* keep alive */
 	struct list_head probe_client_pending;
 	struct mutex probe_client_mutex; /* protect @probe_client_pending */
@@ -622,11 +607,21 @@
 	/* debugfs */
 	struct dentry *debug;
 	struct debugfs_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)];
+	u8 discovery_mode;
 
 	void *platform_handle;
 	struct wil_platform_ops platform_ops;
 
 	struct pmc_ctx pmc;
+
+	bool pbss;
+
+	struct wil_p2p_info p2p;
+
+	/* P2P_DEVICE vif */
+	struct wireless_dev *p2p_wdev;
+	struct mutex p2p_wdev_mutex; /* protect @p2p_wdev */
+	struct wireless_dev *radio_wdev;
 };
 
 #define wil_to_wiphy(i) (i->wdev->wiphy)
@@ -722,6 +717,7 @@
 int wil_reset(struct wil6210_priv *wil, bool no_fw);
 void wil_fw_error_recovery(struct wil6210_priv *wil);
 void wil_set_recovery_state(struct wil6210_priv *wil, int state);
+bool wil_is_recovery_blocked(struct wil6210_priv *wil);
 int wil_up(struct wil6210_priv *wil);
 int __wil_up(struct wil6210_priv *wil);
 int wil_down(struct wil6210_priv *wil);
@@ -752,7 +748,6 @@
 int wmi_echo(struct wil6210_priv *wil);
 int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
 int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
-int wmi_p2p_cfg(struct wil6210_priv *wil, int channel);
 int wmi_rxon(struct wil6210_priv *wil, bool on);
 int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
 int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason,
@@ -765,11 +760,7 @@
 int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid,
 			 u8 dialog_token, __le16 ba_param_set,
 			 __le16 ba_timeout, __le16 ba_seq_ctrl);
-void wil_back_rx_worker(struct work_struct *work);
-void wil_back_rx_flush(struct wil6210_priv *wil);
 int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize);
-void wil_back_tx_worker(struct work_struct *work);
-void wil_back_tx_flush(struct wil6210_priv *wil);
 
 void wil6210_clear_irq(struct wil6210_priv *wil);
 int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi);
@@ -779,6 +770,24 @@
 void wil_configure_interrupt_moderation(struct wil6210_priv *wil);
 void wil_disable_irq(struct wil6210_priv *wil);
 void wil_enable_irq(struct wil6210_priv *wil);
+
+/* P2P */
+void wil_p2p_discovery_timer_fn(ulong x);
+int wil_p2p_search(struct wil6210_priv *wil,
+		   struct cfg80211_scan_request *request);
+int wil_p2p_listen(struct wil6210_priv *wil, unsigned int duration,
+		   struct ieee80211_channel *chan, u64 *cookie);
+u8 wil_p2p_stop_discovery(struct wil6210_priv *wil);
+int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie);
+void wil_p2p_listen_expired(struct work_struct *work);
+void wil_p2p_search_expired(struct work_struct *work);
+
+/* WMI for P2P */
+int wmi_p2p_cfg(struct wil6210_priv *wil, int channel, int bi);
+int wmi_start_listen(struct wil6210_priv *wil);
+int wmi_start_search(struct wil6210_priv *wil);
+int wmi_stop_discovery(struct wil6210_priv *wil);
+
 int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
 			 struct cfg80211_mgmt_tx_params *params,
 			 u64 *cookie);
@@ -790,10 +799,11 @@
 
 struct wireless_dev *wil_cfg80211_init(struct device *dev);
 void wil_wdev_free(struct wil6210_priv *wil);
+void wil_p2p_wdev_free(struct wil6210_priv *wil);
 
 int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
 int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
-		  u8 chan, u8 hidden_ssid);
+		  u8 chan, u8 hidden_ssid, u8 is_go);
 int wmi_pcp_stop(struct wil6210_priv *wil);
 void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
 			u16 reason_code, bool from_event);
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index 9a949d9..33d4a34 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -19,6 +19,12 @@
 
 struct device;
 
+enum wil_platform_event {
+	WIL_PLATFORM_EVT_FW_CRASH = 0,
+	WIL_PLATFORM_EVT_PRE_RESET = 1,
+	WIL_PLATFORM_EVT_FW_RDY = 2,
+};
+
 /**
  * struct wil_platform_ops - wil platform module calls from this
  * driver to platform driver
@@ -28,7 +34,7 @@
 	int (*suspend)(void *handle);
 	int (*resume)(void *handle);
 	void (*uninit)(void *handle);
-	int (*notify_crash)(void *handle);
+	int (*notify)(void *handle, enum wil_platform_event evt);
 };
 
 /**
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 493e721..6ca28c3 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -176,7 +176,7 @@
 {
 	struct {
 		struct wil6210_mbox_hdr hdr;
-		struct wil6210_mbox_hdr_wmi wmi;
+		struct wmi_cmd_hdr wmi;
 	} __packed cmd = {
 		.hdr = {
 			.type = WIL_MBOX_HDR_TYPE_WMI,
@@ -185,7 +185,7 @@
 		},
 		.wmi = {
 			.mid = 0,
-			.id = cpu_to_le16(cmdid),
+			.command_id = cpu_to_le16(cmdid),
 		},
 	};
 	struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx;
@@ -333,7 +333,7 @@
 	}
 
 	ch_no = data->info.channel + 1;
-	freq = ieee80211_channel_to_frequency(ch_no, IEEE80211_BAND_60GHZ);
+	freq = ieee80211_channel_to_frequency(ch_no, NL80211_BAND_60GHZ);
 	channel = ieee80211_get_channel(wiphy, freq);
 	signal = data->info.sqi;
 	d_status = le16_to_cpu(data->info.status);
@@ -368,6 +368,8 @@
 		wil_hex_dump_wmi("IE ", DUMP_PREFIX_OFFSET, 16, 1, ie_buf,
 				 ie_len, true);
 
+		wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap);
+
 		bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
 						d_len, signal, GFP_KERNEL);
 		if (bss) {
@@ -378,8 +380,10 @@
 			wil_err(wil, "cfg80211_inform_bss_frame() failed\n");
 		}
 	} else {
-		cfg80211_rx_mgmt(wil->wdev, freq, signal,
+		mutex_lock(&wil->p2p_wdev_mutex);
+		cfg80211_rx_mgmt(wil->radio_wdev, freq, signal,
 				 (void *)rx_mgmt_frame, d_len, 0);
+		mutex_unlock(&wil->p2p_wdev_mutex);
 	}
 }
 
@@ -406,7 +410,10 @@
 			     wil->scan_request, aborted);
 
 		del_timer_sync(&wil->scan_timer);
+		mutex_lock(&wil->p2p_wdev_mutex);
 		cfg80211_scan_done(wil->scan_request, aborted);
+		wil->radio_wdev = wil->wdev;
+		mutex_unlock(&wil->p2p_wdev_mutex);
 		wil->scan_request = NULL;
 	} else {
 		wil_err(wil, "SCAN_COMPLETE while not scanning\n");
@@ -487,6 +494,14 @@
 			return;
 		}
 		del_timer_sync(&wil->connect_timer);
+	} else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
+		   (wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
+		if (wil->sta[evt->cid].status != wil_sta_unused) {
+			wil_err(wil, "%s: AP: Invalid status %d for CID %d\n",
+				__func__, wil->sta[evt->cid].status, evt->cid);
+			mutex_unlock(&wil->mutex);
+			return;
+		}
 	}
 
 	/* FIXME FW can transmit only ucast frames to peer */
@@ -648,7 +663,7 @@
 static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d,
 			      int len)
 {
-	struct wmi_vring_ba_status_event *evt = d;
+	struct wmi_ba_status_event *evt = d;
 	struct vring_tx_data *txdata;
 
 	wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d AMSDU%s\n",
@@ -834,10 +849,10 @@
 		      offsetof(struct wil6210_mbox_ring_desc, sync), 0);
 		/* indicate */
 		if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
-		    (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
-			struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi;
-			u16 id = le16_to_cpu(wmi->id);
-			u32 tstamp = le32_to_cpu(wmi->timestamp);
+		    (len >= sizeof(struct wmi_cmd_hdr))) {
+			struct wmi_cmd_hdr *wmi = &evt->event.wmi;
+			u16 id = le16_to_cpu(wmi->command_id);
+			u32 tstamp = le32_to_cpu(wmi->fw_timestamp);
 			spin_lock_irqsave(&wil->wmi_ev_lock, flags);
 			if (wil->reply_id && wil->reply_id == id) {
 				if (wil->reply_buf) {
@@ -947,7 +962,7 @@
 }
 
 int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
-		  u8 chan, u8 hidden_ssid)
+		  u8 chan, u8 hidden_ssid, u8 is_go)
 {
 	int rc;
 
@@ -958,9 +973,10 @@
 		.channel = chan - 1,
 		.pcp_max_assoc_sta = max_assoc_sta,
 		.hidden_ssid = hidden_ssid,
+		.is_go = is_go,
 	};
 	struct {
-		struct wil6210_mbox_hdr_wmi wmi;
+		struct wmi_cmd_hdr wmi;
 		struct wmi_pcp_started_event evt;
 	} __packed reply;
 
@@ -1014,7 +1030,7 @@
 {
 	int rc;
 	struct {
-		struct wil6210_mbox_hdr_wmi wmi;
+		struct wmi_cmd_hdr wmi;
 		struct wmi_set_ssid_cmd cmd;
 	} __packed reply;
 	int len; /* reply.cmd.ssid_len in CPU order */
@@ -1047,7 +1063,7 @@
 {
 	int rc;
 	struct {
-		struct wil6210_mbox_hdr_wmi wmi;
+		struct wmi_cmd_hdr wmi;
 		struct wmi_set_pcp_channel_cmd cmd;
 	} __packed reply;
 
@@ -1064,14 +1080,86 @@
 	return 0;
 }
 
-int wmi_p2p_cfg(struct wil6210_priv *wil, int channel)
+int wmi_p2p_cfg(struct wil6210_priv *wil, int channel, int bi)
 {
+	int rc;
 	struct wmi_p2p_cfg_cmd cmd = {
-		.discovery_mode = WMI_DISCOVERY_MODE_NON_OFFLOAD,
+		.discovery_mode = WMI_DISCOVERY_MODE_PEER2PEER,
+		.bcon_interval = cpu_to_le16(bi),
 		.channel = channel - 1,
 	};
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_p2p_cfg_done_event evt;
+	} __packed reply;
 
-	return wmi_send(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd));
+	wil_dbg_wmi(wil, "sending WMI_P2P_CFG_CMDID\n");
+
+	rc = wmi_call(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd),
+		      WMI_P2P_CFG_DONE_EVENTID, &reply, sizeof(reply), 300);
+	if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+		wil_err(wil, "P2P_CFG failed. status %d\n", reply.evt.status);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+int wmi_start_listen(struct wil6210_priv *wil)
+{
+	int rc;
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_listen_started_event evt;
+	} __packed reply;
+
+	wil_dbg_wmi(wil, "sending WMI_START_LISTEN_CMDID\n");
+
+	rc = wmi_call(wil, WMI_START_LISTEN_CMDID, NULL, 0,
+		      WMI_LISTEN_STARTED_EVENTID, &reply, sizeof(reply), 300);
+	if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+		wil_err(wil, "device failed to start listen. status %d\n",
+			reply.evt.status);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+int wmi_start_search(struct wil6210_priv *wil)
+{
+	int rc;
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_search_started_event evt;
+	} __packed reply;
+
+	wil_dbg_wmi(wil, "sending WMI_START_SEARCH_CMDID\n");
+
+	rc = wmi_call(wil, WMI_START_SEARCH_CMDID, NULL, 0,
+		      WMI_SEARCH_STARTED_EVENTID, &reply, sizeof(reply), 300);
+	if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+		wil_err(wil, "device failed to start search. status %d\n",
+			reply.evt.status);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+int wmi_stop_discovery(struct wil6210_priv *wil)
+{
+	int rc;
+
+	wil_dbg_wmi(wil, "sending WMI_DISCOVERY_STOP_CMDID\n");
+
+	rc = wmi_call(wil, WMI_DISCOVERY_STOP_CMDID, NULL, 0,
+		      WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0, 100);
+
+	if (rc)
+		wil_err(wil, "Failed to stop discovery\n");
+
+	return rc;
 }
 
 int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
@@ -1155,7 +1243,7 @@
 {
 	int rc;
 	struct {
-		struct wil6210_mbox_hdr_wmi wmi;
+		struct wmi_cmd_hdr wmi;
 		struct wmi_listen_started_event evt;
 	} __packed reply;
 
@@ -1192,7 +1280,7 @@
 		.host_thrsh = cpu_to_le16(rx_ring_overflow_thrsh),
 	};
 	struct {
-		struct wil6210_mbox_hdr_wmi wmi;
+		struct wmi_cmd_hdr wmi;
 		struct wmi_cfg_rx_chain_done_event evt;
 	} __packed evt;
 	int rc;
@@ -1246,7 +1334,7 @@
 		.measure_mode = cpu_to_le32(TEMPERATURE_MEASURE_NOW),
 	};
 	struct {
-		struct wil6210_mbox_hdr_wmi wmi;
+		struct wmi_cmd_hdr wmi;
 		struct wmi_temp_sense_done_event evt;
 	} __packed reply;
 
@@ -1272,7 +1360,7 @@
 		.disconnect_reason = cpu_to_le16(reason),
 	};
 	struct {
-		struct wil6210_mbox_hdr_wmi wmi;
+		struct wmi_cmd_hdr wmi;
 		struct wmi_disconnect_event evt;
 	} __packed reply;
 
@@ -1364,7 +1452,7 @@
 		.ba_timeout = cpu_to_le16(timeout),
 	};
 	struct {
-		struct wil6210_mbox_hdr_wmi wmi;
+		struct wmi_cmd_hdr wmi;
 		struct wmi_rcp_addba_resp_sent_event evt;
 	} __packed reply;
 
@@ -1420,10 +1508,10 @@
 	u16 len = le16_to_cpu(hdr->len);
 
 	if ((hdr->type == WIL_MBOX_HDR_TYPE_WMI) &&
-	    (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
-		struct wil6210_mbox_hdr_wmi *wmi = (void *)(&hdr[1]);
+	    (len >= sizeof(struct wmi_cmd_hdr))) {
+		struct wmi_cmd_hdr *wmi = (void *)(&hdr[1]);
 		void *evt_data = (void *)(&wmi[1]);
-		u16 id = le16_to_cpu(wmi->id);
+		u16 id = le16_to_cpu(wmi->command_id);
 
 		wil_dbg_wmi(wil, "Handle WMI 0x%04x (reply_id 0x%04x)\n",
 			    id, wil->reply_id);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 6e90e78..29865e0 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
- * Copyright (c) 2006-2012 Wilocity .
+ * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2006-2012 Wilocity
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -17,187 +17,197 @@
 
 /*
  * This file contains the definitions of the WMI protocol specified in the
- * Wireless Module Interface (WMI) for the Wilocity
- * MARLON 60 Gigabit wireless solution.
+ * Wireless Module Interface (WMI) for the Qualcomm
+ * 60 GHz wireless solution.
  * It includes definitions of all the commands and events.
  * Commands are messages from the host to the WM.
  * Events are messages from the WM to the host.
+ *
+ * This is an automatically generated file.
  */
 
 #ifndef __WILOCITY_WMI_H__
 #define __WILOCITY_WMI_H__
 
 /* General */
-#define WILOCITY_MAX_ASSOC_STA (8)
-#define WILOCITY_DEFAULT_ASSOC_STA (1)
-#define WMI_MAC_LEN		(6)
-#define WMI_PROX_RANGE_NUM	(3)
-#define WMI_MAX_LOSS_DMG_BEACONS	(32)
+#define WMI_MAX_ASSOC_STA		(8)
+#define WMI_DEFAULT_ASSOC_STA		(1)
+#define WMI_MAC_LEN			(6)
+#define WMI_PROX_RANGE_NUM		(3)
+#define WMI_MAX_LOSS_DMG_BEACONS	(20)
+
+/* Mailbox interface
+ * used for commands and events
+ */
+enum wmi_mid {
+	MID_DEFAULT		= 0x00,
+	FIRST_DBG_MID_ID	= 0x10,
+	LAST_DBG_MID_ID		= 0xFE,
+	MID_BROADCAST		= 0xFF,
+};
+
+/* WMI_CMD_HDR */
+struct wmi_cmd_hdr {
+	u8 mid;
+	u8 reserved;
+	__le16 command_id;
+	__le32 fw_timestamp;
+} __packed;
 
 /* List of Commands */
 enum wmi_command_id {
-	WMI_CONNECT_CMDID		= 0x0001,
-	WMI_DISCONNECT_CMDID		= 0x0003,
-	WMI_DISCONNECT_STA_CMDID	= 0x0004,
-	WMI_START_SCAN_CMDID		= 0x0007,
-	WMI_SET_BSS_FILTER_CMDID	= 0x0009,
-	WMI_SET_PROBED_SSID_CMDID	= 0x000a,
-	WMI_SET_LISTEN_INT_CMDID	= 0x000b,
-	WMI_BCON_CTRL_CMDID		= 0x000f,
-	WMI_ADD_CIPHER_KEY_CMDID	= 0x0016,
-	WMI_DELETE_CIPHER_KEY_CMDID	= 0x0017,
-	WMI_SET_APPIE_CMDID		= 0x003f,
-	WMI_SET_WSC_STATUS_CMDID	= 0x0041,
-	WMI_PXMT_RANGE_CFG_CMDID	= 0x0042,
-	WMI_PXMT_SNR2_RANGE_CFG_CMDID	= 0x0043,
-/*	WMI_FAST_MEM_ACC_MODE_CMDID	= 0x0300, */
-	WMI_MEM_READ_CMDID		= 0x0800,
-	WMI_MEM_WR_CMDID		= 0x0801,
-	WMI_ECHO_CMDID			= 0x0803,
-	WMI_DEEP_ECHO_CMDID		= 0x0804,
-	WMI_CONFIG_MAC_CMDID		= 0x0805,
-	WMI_CONFIG_PHY_DEBUG_CMDID	= 0x0806,
-	WMI_ADD_DEBUG_TX_PCKT_CMDID	= 0x0808,
-	WMI_PHY_GET_STATISTICS_CMDID	= 0x0809,
-	WMI_FS_TUNE_CMDID		= 0x080a,
-	WMI_CORR_MEASURE_CMDID		= 0x080b,
-	WMI_READ_RSSI_CMDID		= 0x080c,
-	WMI_TEMP_SENSE_CMDID		= 0x080e,
-	WMI_DC_CALIB_CMDID		= 0x080f,
-	WMI_SEND_TONE_CMDID		= 0x0810,
-	WMI_IQ_TX_CALIB_CMDID		= 0x0811,
-	WMI_IQ_RX_CALIB_CMDID		= 0x0812,
-	WMI_SET_UCODE_IDLE_CMDID	= 0x0813,
-	WMI_SET_WORK_MODE_CMDID		= 0x0815,
-	WMI_LO_LEAKAGE_CALIB_CMDID	= 0x0816,
-	WMI_MARLON_R_READ_CMDID		= 0x0818,
-	WMI_MARLON_R_WRITE_CMDID	= 0x0819,
-	WMI_MARLON_R_TXRX_SEL_CMDID	= 0x081a,
-	MAC_IO_STATIC_PARAMS_CMDID	= 0x081b,
-	MAC_IO_DYNAMIC_PARAMS_CMDID	= 0x081c,
-	WMI_SILENT_RSSI_CALIB_CMDID	= 0x081d,
-	WMI_RF_RX_TEST_CMDID		= 0x081e,
-	WMI_CFG_RX_CHAIN_CMDID		= 0x0820,
-	WMI_VRING_CFG_CMDID		= 0x0821,
-	WMI_BCAST_VRING_CFG_CMDID	= 0x0822,
-	WMI_VRING_BA_EN_CMDID		= 0x0823,
-	WMI_VRING_BA_DIS_CMDID		= 0x0824,
-	WMI_RCP_ADDBA_RESP_CMDID	= 0x0825,
-	WMI_RCP_DELBA_CMDID		= 0x0826,
-	WMI_SET_SSID_CMDID		= 0x0827,
-	WMI_GET_SSID_CMDID		= 0x0828,
-	WMI_SET_PCP_CHANNEL_CMDID	= 0x0829,
-	WMI_GET_PCP_CHANNEL_CMDID	= 0x082a,
-	WMI_SW_TX_REQ_CMDID		= 0x082b,
-	WMI_READ_MAC_RXQ_CMDID		= 0x0830,
-	WMI_READ_MAC_TXQ_CMDID		= 0x0831,
-	WMI_WRITE_MAC_RXQ_CMDID		= 0x0832,
-	WMI_WRITE_MAC_TXQ_CMDID		= 0x0833,
-	WMI_WRITE_MAC_XQ_FIELD_CMDID	= 0x0834,
-	WMI_MLME_PUSH_CMDID		= 0x0835,
-	WMI_BEAMFORMING_MGMT_CMDID	= 0x0836,
-	WMI_BF_TXSS_MGMT_CMDID		= 0x0837,
-	WMI_BF_SM_MGMT_CMDID		= 0x0838,
-	WMI_BF_RXSS_MGMT_CMDID		= 0x0839,
-	WMI_BF_TRIG_CMDID		= 0x083A,
-	WMI_SET_SECTORS_CMDID		= 0x0849,
-	WMI_MAINTAIN_PAUSE_CMDID	= 0x0850,
-	WMI_MAINTAIN_RESUME_CMDID	= 0x0851,
-	WMI_RS_MGMT_CMDID		= 0x0852,
-	WMI_RF_MGMT_CMDID		= 0x0853,
-	WMI_THERMAL_THROTTLING_CTRL_CMDID	= 0x0854,
-	WMI_THERMAL_THROTTLING_GET_STATUS_CMDID	= 0x0855,
+	WMI_CONNECT_CMDID			= 0x01,
+	WMI_DISCONNECT_CMDID			= 0x03,
+	WMI_DISCONNECT_STA_CMDID		= 0x04,
+	WMI_START_SCAN_CMDID			= 0x07,
+	WMI_SET_BSS_FILTER_CMDID		= 0x09,
+	WMI_SET_PROBED_SSID_CMDID		= 0x0A,
+	WMI_SET_LISTEN_INT_CMDID		= 0x0B,
+	WMI_BCON_CTRL_CMDID			= 0x0F,
+	WMI_ADD_CIPHER_KEY_CMDID		= 0x16,
+	WMI_DELETE_CIPHER_KEY_CMDID		= 0x17,
+	WMI_PCP_CONF_CMDID			= 0x18,
+	WMI_SET_APPIE_CMDID			= 0x3F,
+	WMI_SET_WSC_STATUS_CMDID		= 0x41,
+	WMI_PXMT_RANGE_CFG_CMDID		= 0x42,
+	WMI_PXMT_SNR2_RANGE_CFG_CMDID		= 0x43,
+	WMI_MEM_READ_CMDID			= 0x800,
+	WMI_MEM_WR_CMDID			= 0x801,
+	WMI_ECHO_CMDID				= 0x803,
+	WMI_DEEP_ECHO_CMDID			= 0x804,
+	WMI_CONFIG_MAC_CMDID			= 0x805,
+	WMI_CONFIG_PHY_DEBUG_CMDID		= 0x806,
+	WMI_ADD_DEBUG_TX_PCKT_CMDID		= 0x808,
+	WMI_PHY_GET_STATISTICS_CMDID		= 0x809,
+	WMI_FS_TUNE_CMDID			= 0x80A,
+	WMI_CORR_MEASURE_CMDID			= 0x80B,
+	WMI_READ_RSSI_CMDID			= 0x80C,
+	WMI_TEMP_SENSE_CMDID			= 0x80E,
+	WMI_DC_CALIB_CMDID			= 0x80F,
+	WMI_SEND_TONE_CMDID			= 0x810,
+	WMI_IQ_TX_CALIB_CMDID			= 0x811,
+	WMI_IQ_RX_CALIB_CMDID			= 0x812,
+	WMI_SET_UCODE_IDLE_CMDID		= 0x813,
+	WMI_SET_WORK_MODE_CMDID			= 0x815,
+	WMI_LO_LEAKAGE_CALIB_CMDID		= 0x816,
+	WMI_MARLON_R_READ_CMDID			= 0x818,
+	WMI_MARLON_R_WRITE_CMDID		= 0x819,
+	WMI_MARLON_R_TXRX_SEL_CMDID		= 0x81A,
+	MAC_IO_STATIC_PARAMS_CMDID		= 0x81B,
+	MAC_IO_DYNAMIC_PARAMS_CMDID		= 0x81C,
+	WMI_SILENT_RSSI_CALIB_CMDID		= 0x81D,
+	WMI_RF_RX_TEST_CMDID			= 0x81E,
+	WMI_CFG_RX_CHAIN_CMDID			= 0x820,
+	WMI_VRING_CFG_CMDID			= 0x821,
+	WMI_BCAST_VRING_CFG_CMDID		= 0x822,
+	WMI_VRING_BA_EN_CMDID			= 0x823,
+	WMI_VRING_BA_DIS_CMDID			= 0x824,
+	WMI_RCP_ADDBA_RESP_CMDID		= 0x825,
+	WMI_RCP_DELBA_CMDID			= 0x826,
+	WMI_SET_SSID_CMDID			= 0x827,
+	WMI_GET_SSID_CMDID			= 0x828,
+	WMI_SET_PCP_CHANNEL_CMDID		= 0x829,
+	WMI_GET_PCP_CHANNEL_CMDID		= 0x82A,
+	WMI_SW_TX_REQ_CMDID			= 0x82B,
+	WMI_READ_MAC_RXQ_CMDID			= 0x830,
+	WMI_READ_MAC_TXQ_CMDID			= 0x831,
+	WMI_WRITE_MAC_RXQ_CMDID			= 0x832,
+	WMI_WRITE_MAC_TXQ_CMDID			= 0x833,
+	WMI_WRITE_MAC_XQ_FIELD_CMDID		= 0x834,
+	WMI_MLME_PUSH_CMDID			= 0x835,
+	WMI_BEAMFORMING_MGMT_CMDID		= 0x836,
+	WMI_BF_TXSS_MGMT_CMDID			= 0x837,
+	WMI_BF_SM_MGMT_CMDID			= 0x838,
+	WMI_BF_RXSS_MGMT_CMDID			= 0x839,
+	WMI_BF_TRIG_CMDID			= 0x83A,
+	WMI_SET_SECTORS_CMDID			= 0x849,
+	WMI_MAINTAIN_PAUSE_CMDID		= 0x850,
+	WMI_MAINTAIN_RESUME_CMDID		= 0x851,
+	WMI_RS_MGMT_CMDID			= 0x852,
+	WMI_RF_MGMT_CMDID			= 0x853,
+	WMI_THERMAL_THROTTLING_CTRL_CMDID	= 0x854,
+	WMI_THERMAL_THROTTLING_GET_STATUS_CMDID	= 0x855,
+	WMI_OTP_READ_CMDID			= 0x856,
+	WMI_OTP_WRITE_CMDID			= 0x857,
 	/* Performance monitoring commands */
-	WMI_BF_CTRL_CMDID		= 0x0862,
-	WMI_NOTIFY_REQ_CMDID		= 0x0863,
-	WMI_GET_STATUS_CMDID		= 0x0864,
-	WMI_UNIT_TEST_CMDID		= 0x0900,
-	WMI_HICCUP_CMDID		= 0x0901,
-	WMI_FLASH_READ_CMDID		= 0x0902,
-	WMI_FLASH_WRITE_CMDID		= 0x0903,
-	WMI_SECURITY_UNIT_TEST_CMDID	= 0x0904,
-	/*P2P*/
-	WMI_P2P_CFG_CMDID		= 0x0910,
-	WMI_PORT_ALLOCATE_CMDID		= 0x0911,
-	WMI_PORT_DELETE_CMDID		= 0x0912,
-	WMI_POWER_MGMT_CFG_CMDID	= 0x0913,
-	WMI_START_LISTEN_CMDID		= 0x0914,
-	WMI_START_SEARCH_CMDID		= 0x0915,
-	WMI_DISCOVERY_START_CMDID	= 0x0916,
-	WMI_DISCOVERY_STOP_CMDID	= 0x0917,
-	WMI_PCP_START_CMDID		= 0x0918,
-	WMI_PCP_STOP_CMDID		= 0x0919,
-	WMI_GET_PCP_FACTOR_CMDID	= 0x091b,
-
-	WMI_SET_MAC_ADDRESS_CMDID	= 0xf003,
-	WMI_ABORT_SCAN_CMDID		= 0xf007,
-	WMI_SET_PMK_CMDID		= 0xf028,
-
-	WMI_SET_PROMISCUOUS_MODE_CMDID	= 0xf041,
-	WMI_GET_PMK_CMDID		= 0xf048,
-	WMI_SET_PASSPHRASE_CMDID	= 0xf049,
-	WMI_SEND_ASSOC_RES_CMDID	= 0xf04a,
-	WMI_SET_ASSOC_REQ_RELAY_CMDID	= 0xf04b,
-	WMI_EAPOL_TX_CMDID		= 0xf04c,
-	WMI_MAC_ADDR_REQ_CMDID		= 0xf04d,
-	WMI_FW_VER_CMDID		= 0xf04e,
-	WMI_PMC_CMDID			= 0xf04f,
+	WMI_BF_CTRL_CMDID			= 0x862,
+	WMI_NOTIFY_REQ_CMDID			= 0x863,
+	WMI_GET_STATUS_CMDID			= 0x864,
+	WMI_UNIT_TEST_CMDID			= 0x900,
+	WMI_HICCUP_CMDID			= 0x901,
+	WMI_FLASH_READ_CMDID			= 0x902,
+	WMI_FLASH_WRITE_CMDID			= 0x903,
+	/* P2P */
+	WMI_P2P_CFG_CMDID			= 0x910,
+	WMI_PORT_ALLOCATE_CMDID			= 0x911,
+	WMI_PORT_DELETE_CMDID			= 0x912,
+	WMI_POWER_MGMT_CFG_CMDID		= 0x913,
+	WMI_START_LISTEN_CMDID			= 0x914,
+	WMI_START_SEARCH_CMDID			= 0x915,
+	WMI_DISCOVERY_START_CMDID		= 0x916,
+	WMI_DISCOVERY_STOP_CMDID		= 0x917,
+	WMI_PCP_START_CMDID			= 0x918,
+	WMI_PCP_STOP_CMDID			= 0x919,
+	WMI_GET_PCP_FACTOR_CMDID		= 0x91B,
+	WMI_SET_MAC_ADDRESS_CMDID		= 0xF003,
+	WMI_ABORT_SCAN_CMDID			= 0xF007,
+	WMI_SET_PROMISCUOUS_MODE_CMDID		= 0xF041,
+	WMI_GET_PMK_CMDID			= 0xF048,
+	WMI_SET_PASSPHRASE_CMDID		= 0xF049,
+	WMI_SEND_ASSOC_RES_CMDID		= 0xF04A,
+	WMI_SET_ASSOC_REQ_RELAY_CMDID		= 0xF04B,
+	WMI_MAC_ADDR_REQ_CMDID			= 0xF04D,
+	WMI_FW_VER_CMDID			= 0xF04E,
+	WMI_PMC_CMDID				= 0xF04F,
 };
 
-/*
- * Commands data structures
- */
-
-/*
- * WMI_CONNECT_CMDID
- */
+/* WMI_CONNECT_CMDID */
 enum wmi_network_type {
 	WMI_NETTYPE_INFRA		= 0x01,
 	WMI_NETTYPE_ADHOC		= 0x02,
 	WMI_NETTYPE_ADHOC_CREATOR	= 0x04,
 	WMI_NETTYPE_AP			= 0x10,
 	WMI_NETTYPE_P2P			= 0x20,
-	WMI_NETTYPE_WBE			= 0x40, /* PCIE over 60g */
+	/* PCIE over 60g */
+	WMI_NETTYPE_WBE			= 0x40,
 };
 
 enum wmi_dot11_auth_mode {
-	WMI_AUTH11_OPEN			= 0x01,
-	WMI_AUTH11_SHARED		= 0x02,
-	WMI_AUTH11_LEAP			= 0x04,
-	WMI_AUTH11_WSC			= 0x08,
+	WMI_AUTH11_OPEN		= 0x01,
+	WMI_AUTH11_SHARED	= 0x02,
+	WMI_AUTH11_LEAP		= 0x04,
+	WMI_AUTH11_WSC		= 0x08,
 };
 
 enum wmi_auth_mode {
-	WMI_AUTH_NONE			= 0x01,
-	WMI_AUTH_WPA			= 0x02,
-	WMI_AUTH_WPA2			= 0x04,
-	WMI_AUTH_WPA_PSK		= 0x08,
-	WMI_AUTH_WPA2_PSK		= 0x10,
-	WMI_AUTH_WPA_CCKM		= 0x20,
-	WMI_AUTH_WPA2_CCKM		= 0x40,
+	WMI_AUTH_NONE		= 0x01,
+	WMI_AUTH_WPA		= 0x02,
+	WMI_AUTH_WPA2		= 0x04,
+	WMI_AUTH_WPA_PSK	= 0x08,
+	WMI_AUTH_WPA2_PSK	= 0x10,
+	WMI_AUTH_WPA_CCKM	= 0x20,
+	WMI_AUTH_WPA2_CCKM	= 0x40,
 };
 
 enum wmi_crypto_type {
-	WMI_CRYPT_NONE			= 0x01,
-	WMI_CRYPT_WEP			= 0x02,
-	WMI_CRYPT_TKIP			= 0x04,
-	WMI_CRYPT_AES			= 0x08,
-	WMI_CRYPT_AES_GCMP		= 0x20,
+	WMI_CRYPT_NONE		= 0x01,
+	WMI_CRYPT_AES_GCMP	= 0x20,
 };
 
 enum wmi_connect_ctrl_flag_bits {
-	WMI_CONNECT_ASSOC_POLICY_USER		= 0x0001,
-	WMI_CONNECT_SEND_REASSOC		= 0x0002,
-	WMI_CONNECT_IGNORE_WPA_GROUP_CIPHER	= 0x0004,
-	WMI_CONNECT_PROFILE_MATCH_DONE		= 0x0008,
-	WMI_CONNECT_IGNORE_AAC_BEACON		= 0x0010,
-	WMI_CONNECT_CSA_FOLLOW_BSS		= 0x0020,
-	WMI_CONNECT_DO_WPA_OFFLOAD		= 0x0040,
-	WMI_CONNECT_DO_NOT_DEAUTH		= 0x0080,
+	WMI_CONNECT_ASSOC_POLICY_USER		= 0x01,
+	WMI_CONNECT_SEND_REASSOC		= 0x02,
+	WMI_CONNECT_IGNORE_WPA_GROUP_CIPHER	= 0x04,
+	WMI_CONNECT_PROFILE_MATCH_DONE		= 0x08,
+	WMI_CONNECT_IGNORE_AAC_BEACON		= 0x10,
+	WMI_CONNECT_CSA_FOLLOW_BSS		= 0x20,
+	WMI_CONNECT_DO_WPA_OFFLOAD		= 0x40,
+	WMI_CONNECT_DO_NOT_DEAUTH		= 0x80,
 };
 
-#define WMI_MAX_SSID_LEN    (32)
+#define WMI_MAX_SSID_LEN	(32)
 
+/* WMI_CONNECT_CMDID */
 struct wmi_connect_cmd {
 	u8 network_type;
 	u8 dot11_auth_mode;
@@ -216,31 +226,17 @@
 	u8 reserved1[2];
 } __packed;
 
-/*
- * WMI_DISCONNECT_STA_CMDID
- */
+/* WMI_DISCONNECT_STA_CMDID */
 struct wmi_disconnect_sta_cmd {
 	u8 dst_mac[WMI_MAC_LEN];
 	__le16 disconnect_reason;
 } __packed;
 
-/*
- * WMI_SET_PMK_CMDID
- */
-
-#define WMI_MIN_KEY_INDEX	(0)
 #define WMI_MAX_KEY_INDEX	(3)
 #define WMI_MAX_KEY_LEN		(32)
 #define WMI_PASSPHRASE_LEN	(64)
-#define WMI_PMK_LEN		(32)
 
-struct  wmi_set_pmk_cmd {
-	u8 pmk[WMI_PMK_LEN];
-} __packed;
-
-/*
- * WMI_SET_PASSPHRASE_CMDID
- */
+/* WMI_SET_PASSPHRASE_CMDID */
 struct wmi_set_passphrase_cmd {
 	u8 ssid[WMI_MAX_SSID_LEN];
 	u8 passphrase[WMI_PASSPHRASE_LEN];
@@ -248,36 +244,34 @@
 	u8 passphrase_len;
 } __packed;
 
-/*
- * WMI_ADD_CIPHER_KEY_CMDID
- */
+/* WMI_ADD_CIPHER_KEY_CMDID */
 enum wmi_key_usage {
-	WMI_KEY_USE_PAIRWISE	= 0,
-	WMI_KEY_USE_RX_GROUP	= 1,
-	WMI_KEY_USE_TX_GROUP	= 2,
+	WMI_KEY_USE_PAIRWISE	= 0x00,
+	WMI_KEY_USE_RX_GROUP	= 0x01,
+	WMI_KEY_USE_TX_GROUP	= 0x02,
 };
 
 struct wmi_add_cipher_key_cmd {
 	u8 key_index;
 	u8 key_type;
-	u8 key_usage;		/* enum wmi_key_usage */
+	/* enum wmi_key_usage */
+	u8 key_usage;
 	u8 key_len;
-	u8 key_rsc[8];		/* key replay sequence counter */
+	/* key replay sequence counter */
+	u8 key_rsc[8];
 	u8 key[WMI_MAX_KEY_LEN];
-	u8 key_op_ctrl;		/* Additional Key Control information */
+	/* Additional Key Control information */
+	u8 key_op_ctrl;
 	u8 mac[WMI_MAC_LEN];
 } __packed;
 
-/*
- * WMI_DELETE_CIPHER_KEY_CMDID
- */
+/* WMI_DELETE_CIPHER_KEY_CMDID */
 struct wmi_delete_cipher_key_cmd {
 	u8 key_index;
 	u8 mac[WMI_MAC_LEN];
 } __packed;
 
-/*
- * WMI_START_SCAN_CMDID
+/* WMI_START_SCAN_CMDID
  *
  * Start L1 scan operation
  *
@@ -286,146 +280,142 @@
  * - WMI_SCAN_COMPLETE_EVENTID
  */
 enum wmi_scan_type {
-	WMI_LONG_SCAN		= 0,
-	WMI_SHORT_SCAN		= 1,
-	WMI_PBC_SCAN		= 2,
-	WMI_DIRECT_SCAN		= 3,
-	WMI_ACTIVE_SCAN		= 4,
+	WMI_ACTIVE_SCAN		= 0x00,
+	WMI_SHORT_SCAN		= 0x01,
+	WMI_PASSIVE_SCAN	= 0x02,
+	WMI_DIRECT_SCAN		= 0x03,
+	WMI_LONG_SCAN		= 0x04,
 };
 
+/* WMI_START_SCAN_CMDID */
 struct wmi_start_scan_cmd {
-	u8 direct_scan_mac_addr[6];
-	u8 reserved[2];
-	__le32 home_dwell_time;	/* Max duration in the home channel(ms) */
-	__le32 force_scan_interval;	/* Time interval between scans (ms)*/
-	u8 scan_type;		/* wmi_scan_type */
-	u8 num_channels;		/* how many channels follow */
+	u8 direct_scan_mac_addr[WMI_MAC_LEN];
+	/* DMG Beacon frame is transmitted during active scanning */
+	u8 discovery_mode;
+	/* reserved */
+	u8 reserved;
+	/* Max duration in the home channel(ms) */
+	__le32 dwell_time;
+	/* Time interval between scans (ms) */
+	__le32 force_scan_interval;
+	/* enum wmi_scan_type */
+	u8 scan_type;
+	/* how many channels follow */
+	u8 num_channels;
+	/* channels ID's:
+	 * 0 - 58320 MHz
+	 * 1 - 60480 MHz
+	 * 2 - 62640 MHz
+	 */
 	struct {
 		u8 channel;
 		u8 reserved;
-	} channel_list[0];	/* channels ID's */
-				/* 0 - 58320 MHz */
-				/* 1 - 60480 MHz */
-				/* 2 - 62640 MHz */
+	} channel_list[0];
 } __packed;
 
-/*
- * WMI_SET_PROBED_SSID_CMDID
- */
+/* WMI_SET_PROBED_SSID_CMDID */
 #define MAX_PROBED_SSID_INDEX	(3)
 
 enum wmi_ssid_flag {
-	WMI_SSID_FLAG_DISABLE	= 0,	/* disables entry */
-	WMI_SSID_FLAG_SPECIFIC	= 1,	/* probes specified ssid */
-	WMI_SSID_FLAG_ANY	= 2,	/* probes for any ssid */
+	/* disables entry */
+	WMI_SSID_FLAG_DISABLE	= 0x00,
+	/* probes specified ssid */
+	WMI_SSID_FLAG_SPECIFIC	= 0x01,
+	/* probes for any ssid */
+	WMI_SSID_FLAG_ANY	= 0x02,
 };
 
 struct wmi_probed_ssid_cmd {
-	u8 entry_index;			/* 0 to MAX_PROBED_SSID_INDEX */
-	u8 flag;			/* enum wmi_ssid_flag */
+	/* 0 to MAX_PROBED_SSID_INDEX */
+	u8 entry_index;
+	/* enum wmi_ssid_flag */
+	u8 flag;
 	u8 ssid_len;
 	u8 ssid[WMI_MAX_SSID_LEN];
 } __packed;
 
-/*
- * WMI_SET_APPIE_CMDID
+/* WMI_SET_APPIE_CMDID
  * Add Application specified IE to a management frame
  */
-#define WMI_MAX_IE_LEN		(1024)
+#define WMI_MAX_IE_LEN	(1024)
 
-/*
- * Frame Types
- */
+/* Frame Types */
 enum wmi_mgmt_frame_type {
-	WMI_FRAME_BEACON	= 0,
-	WMI_FRAME_PROBE_REQ	= 1,
-	WMI_FRAME_PROBE_RESP	= 2,
-	WMI_FRAME_ASSOC_REQ	= 3,
-	WMI_FRAME_ASSOC_RESP	= 4,
-	WMI_NUM_MGMT_FRAME,
+	WMI_FRAME_BEACON	= 0x00,
+	WMI_FRAME_PROBE_REQ	= 0x01,
+	WMI_FRAME_PROBE_RESP	= 0x02,
+	WMI_FRAME_ASSOC_REQ	= 0x03,
+	WMI_FRAME_ASSOC_RESP	= 0x04,
+	WMI_NUM_MGMT_FRAME	= 0x05,
 };
 
 struct wmi_set_appie_cmd {
-	u8 mgmt_frm_type;	/* enum wmi_mgmt_frame_type */
+	/* enum wmi_mgmt_frame_type */
+	u8 mgmt_frm_type;
 	u8 reserved;
-	__le16 ie_len;	/* Length of the IE to be added to MGMT frame */
+	/* Length of the IE to be added to MGMT frame */
+	__le16 ie_len;
 	u8 ie_info[0];
 } __packed;
 
-/*
- * WMI_PXMT_RANGE_CFG_CMDID
- */
+/* WMI_PXMT_RANGE_CFG_CMDID */
 struct wmi_pxmt_range_cfg_cmd {
 	u8 dst_mac[WMI_MAC_LEN];
 	__le16 range;
 } __packed;
 
-/*
- * WMI_PXMT_SNR2_RANGE_CFG_CMDID
- */
+/* WMI_PXMT_SNR2_RANGE_CFG_CMDID */
 struct wmi_pxmt_snr2_range_cfg_cmd {
-	s8 snr2range_arr[WMI_PROX_RANGE_NUM-1];
+	s8 snr2range_arr[2];
 } __packed;
 
-/*
- * WMI_RF_MGMT_CMDID
- */
+/* WMI_RF_MGMT_CMDID */
 enum wmi_rf_mgmt_type {
-	WMI_RF_MGMT_W_DISABLE	= 0,
-	WMI_RF_MGMT_W_ENABLE	= 1,
-	WMI_RF_MGMT_GET_STATUS	= 2,
+	WMI_RF_MGMT_W_DISABLE	= 0x00,
+	WMI_RF_MGMT_W_ENABLE	= 0x01,
+	WMI_RF_MGMT_GET_STATUS	= 0x02,
 };
 
+/* WMI_RF_MGMT_CMDID */
 struct wmi_rf_mgmt_cmd {
 	__le32 rf_mgmt_type;
 } __packed;
 
-/*
- * WMI_THERMAL_THROTTLING_CTRL_CMDID
- */
+/* WMI_THERMAL_THROTTLING_CTRL_CMDID */
 #define THERMAL_THROTTLING_USE_DEFAULT_MAX_TXOP_LENGTH	(0xFFFFFFFF)
 
+/* WMI_THERMAL_THROTTLING_CTRL_CMDID */
 struct wmi_thermal_throttling_ctrl_cmd {
 	__le32 time_on_usec;
 	__le32 time_off_usec;
 	__le32 max_txop_length_usec;
 } __packed;
 
-/*
- * WMI_RF_RX_TEST_CMDID
- */
+/* WMI_RF_RX_TEST_CMDID */
 struct wmi_rf_rx_test_cmd {
 	__le32 sector;
 } __packed;
 
-/*
- * WMI_CORR_MEASURE_CMDID
- */
+/* WMI_CORR_MEASURE_CMDID */
 struct wmi_corr_measure_cmd {
-	s32 freq_mhz;
+	__le32 freq_mhz;
 	__le32 length_samples;
 	__le32 iterations;
 } __packed;
 
-/*
- * WMI_SET_SSID_CMDID
- */
+/* WMI_SET_SSID_CMDID */
 struct wmi_set_ssid_cmd {
 	__le32 ssid_len;
 	u8 ssid[WMI_MAX_SSID_LEN];
 } __packed;
 
-/*
- * WMI_SET_PCP_CHANNEL_CMDID
- */
+/* WMI_SET_PCP_CHANNEL_CMDID */
 struct wmi_set_pcp_channel_cmd {
 	u8 channel;
 	u8 reserved[3];
 } __packed;
 
-/*
- * WMI_BCON_CTRL_CMDID
- */
+/* WMI_BCON_CTRL_CMDID */
 struct wmi_bcon_ctrl_cmd {
 	__le16 bcon_interval;
 	__le16 frag_num;
@@ -434,214 +424,192 @@
 	u8 pcp_max_assoc_sta;
 	u8 disable_sec_offload;
 	u8 disable_sec;
+	u8 hidden_ssid;
+	u8 is_go;
+	u8 reserved[2];
 } __packed;
 
-/******* P2P ***********/
-
-/*
- * WMI_PORT_ALLOCATE_CMDID
- */
+/* WMI_PORT_ALLOCATE_CMDID */
 enum wmi_port_role {
-	WMI_PORT_STA		= 0,
-	WMI_PORT_PCP		= 1,
-	WMI_PORT_AP		= 2,
-	WMI_PORT_P2P_DEV	= 3,
-	WMI_PORT_P2P_CLIENT	= 4,
-	WMI_PORT_P2P_GO		= 5,
+	WMI_PORT_STA		= 0x00,
+	WMI_PORT_PCP		= 0x01,
+	WMI_PORT_AP		= 0x02,
+	WMI_PORT_P2P_DEV	= 0x03,
+	WMI_PORT_P2P_CLIENT	= 0x04,
+	WMI_PORT_P2P_GO		= 0x05,
 };
 
+/* WMI_PORT_ALLOCATE_CMDID */
 struct wmi_port_allocate_cmd {
 	u8 mac[WMI_MAC_LEN];
 	u8 port_role;
 	u8 mid;
 } __packed;
 
-/*
- * WMI_PORT_DELETE_CMDID
- */
-struct wmi_delete_port_cmd {
+/* WMI_PORT_DELETE_CMDID */
+struct wmi_port_delete_cmd {
 	u8 mid;
 	u8 reserved[3];
 } __packed;
 
-/*
- * WMI_P2P_CFG_CMDID
- */
+/* WMI_P2P_CFG_CMDID */
 enum wmi_discovery_mode {
-	WMI_DISCOVERY_MODE_NON_OFFLOAD	= 0,
-	WMI_DISCOVERY_MODE_OFFLOAD	= 1,
-	WMI_DISCOVERY_MODE_PEER2PEER	= 2,
+	WMI_DISCOVERY_MODE_NON_OFFLOAD	= 0x00,
+	WMI_DISCOVERY_MODE_OFFLOAD	= 0x01,
+	WMI_DISCOVERY_MODE_PEER2PEER	= 0x02,
 };
 
 struct wmi_p2p_cfg_cmd {
-	u8 discovery_mode;	/* wmi_discovery_mode */
+	/* enum wmi_discovery_mode */
+	u8 discovery_mode;
 	u8 channel;
-	__le16 bcon_interval; /* base to listen/search duration calculation */
+	/* base to listen/search duration calculation */
+	__le16 bcon_interval;
 } __packed;
 
-/*
- * WMI_POWER_MGMT_CFG_CMDID
- */
+/* WMI_POWER_MGMT_CFG_CMDID */
 enum wmi_power_source_type {
-	WMI_POWER_SOURCE_BATTERY	= 0,
-	WMI_POWER_SOURCE_OTHER		= 1,
+	WMI_POWER_SOURCE_BATTERY	= 0x00,
+	WMI_POWER_SOURCE_OTHER		= 0x01,
 };
 
 struct wmi_power_mgmt_cfg_cmd {
-	u8 power_source;	/* wmi_power_source_type */
+	/* enum wmi_power_source_type */
+	u8 power_source;
 	u8 reserved[3];
 } __packed;
 
-/*
- * WMI_PCP_START_CMDID
- */
-
-enum wmi_hidden_ssid {
-	WMI_HIDDEN_SSID_DISABLED	= 0,
-	WMI_HIDDEN_SSID_SEND_EMPTY	= 1,
-	WMI_HIDDEN_SSID_CLEAR	= 2,
-};
-
+/* WMI_PCP_START_CMDID */
 struct wmi_pcp_start_cmd {
 	__le16 bcon_interval;
 	u8 pcp_max_assoc_sta;
 	u8 hidden_ssid;
-	u8 reserved0[8];
+	u8 is_go;
+	u8 reserved0[7];
 	u8 network_type;
 	u8 channel;
 	u8 disable_sec_offload;
 	u8 disable_sec;
 } __packed;
 
-/*
- * WMI_SW_TX_REQ_CMDID
- */
+/* WMI_SW_TX_REQ_CMDID */
 struct wmi_sw_tx_req_cmd {
 	u8 dst_mac[WMI_MAC_LEN];
 	__le16 len;
 	u8 payload[0];
 } __packed;
 
-/*
- * WMI_VRING_CFG_CMDID
- */
-
 struct wmi_sw_ring_cfg {
 	__le64 ring_mem_base;
 	__le16 ring_size;
 	__le16 max_mpdu_size;
 } __packed;
 
+/* wmi_vring_cfg_schd */
 struct wmi_vring_cfg_schd {
 	__le16 priority;
 	__le16 timeslot_us;
 } __packed;
 
 enum wmi_vring_cfg_encap_trans_type {
-	WMI_VRING_ENC_TYPE_802_3		= 0,
-	WMI_VRING_ENC_TYPE_NATIVE_WIFI		= 1,
+	WMI_VRING_ENC_TYPE_802_3	= 0x00,
+	WMI_VRING_ENC_TYPE_NATIVE_WIFI	= 0x01,
 };
 
 enum wmi_vring_cfg_ds_cfg {
-	WMI_VRING_DS_PBSS			= 0,
-	WMI_VRING_DS_STATION			= 1,
-	WMI_VRING_DS_AP				= 2,
-	WMI_VRING_DS_ADDR4			= 3,
+	WMI_VRING_DS_PBSS	= 0x00,
+	WMI_VRING_DS_STATION	= 0x01,
+	WMI_VRING_DS_AP		= 0x02,
+	WMI_VRING_DS_ADDR4	= 0x03,
 };
 
 enum wmi_vring_cfg_nwifi_ds_trans_type {
-	WMI_NWIFI_TX_TRANS_MODE_NO		= 0,
-	WMI_NWIFI_TX_TRANS_MODE_AP2PBSS		= 1,
-	WMI_NWIFI_TX_TRANS_MODE_STA2PBSS	= 2,
+	WMI_NWIFI_TX_TRANS_MODE_NO		= 0x00,
+	WMI_NWIFI_TX_TRANS_MODE_AP2PBSS		= 0x01,
+	WMI_NWIFI_TX_TRANS_MODE_STA2PBSS	= 0x02,
 };
 
 enum wmi_vring_cfg_schd_params_priority {
-	WMI_SCH_PRIO_REGULAR			= 0,
-	WMI_SCH_PRIO_HIGH			= 1,
+	WMI_SCH_PRIO_REGULAR	= 0x00,
+	WMI_SCH_PRIO_HIGH	= 0x01,
 };
 
-#define CIDXTID_CID_POS (0)
-#define CIDXTID_CID_LEN (4)
-#define CIDXTID_CID_MSK (0xF)
-#define CIDXTID_TID_POS (4)
-#define CIDXTID_TID_LEN (4)
-#define CIDXTID_TID_MSK (0xF0)
+#define CIDXTID_CID_POS				(0)
+#define CIDXTID_CID_LEN				(4)
+#define CIDXTID_CID_MSK				(0xF)
+#define CIDXTID_TID_POS				(4)
+#define CIDXTID_TID_LEN				(4)
+#define CIDXTID_TID_MSK				(0xF0)
+#define VRING_CFG_MAC_CTRL_LIFETIME_EN_POS	(0)
+#define VRING_CFG_MAC_CTRL_LIFETIME_EN_LEN	(1)
+#define VRING_CFG_MAC_CTRL_LIFETIME_EN_MSK	(0x1)
+#define VRING_CFG_MAC_CTRL_AGGR_EN_POS		(1)
+#define VRING_CFG_MAC_CTRL_AGGR_EN_LEN		(1)
+#define VRING_CFG_MAC_CTRL_AGGR_EN_MSK		(0x2)
+#define VRING_CFG_TO_RESOLUTION_VALUE_POS	(0)
+#define VRING_CFG_TO_RESOLUTION_VALUE_LEN	(6)
+#define VRING_CFG_TO_RESOLUTION_VALUE_MSK	(0x3F)
 
 struct wmi_vring_cfg {
 	struct wmi_sw_ring_cfg tx_sw_ring;
-	u8 ringid;				/* 0-23 vrings */
-
+	/* 0-23 vrings */
+	u8 ringid;
 	u8 cidxtid;
-
 	u8 encap_trans_type;
-	u8 ds_cfg;				/* 802.3 DS cfg */
+	/* 802.3 DS cfg */
+	u8 ds_cfg;
 	u8 nwifi_ds_trans_type;
-
-	#define VRING_CFG_MAC_CTRL_LIFETIME_EN_POS (0)
-	#define VRING_CFG_MAC_CTRL_LIFETIME_EN_LEN (1)
-	#define VRING_CFG_MAC_CTRL_LIFETIME_EN_MSK (0x1)
-	#define VRING_CFG_MAC_CTRL_AGGR_EN_POS (1)
-	#define VRING_CFG_MAC_CTRL_AGGR_EN_LEN (1)
-	#define VRING_CFG_MAC_CTRL_AGGR_EN_MSK (0x2)
 	u8 mac_ctrl;
-
-	#define VRING_CFG_TO_RESOLUTION_VALUE_POS (0)
-	#define VRING_CFG_TO_RESOLUTION_VALUE_LEN (6)
-	#define VRING_CFG_TO_RESOLUTION_VALUE_MSK (0x3F)
 	u8 to_resolution;
 	u8 agg_max_wsize;
 	struct wmi_vring_cfg_schd schd_params;
 } __packed;
 
 enum wmi_vring_cfg_cmd_action {
-	WMI_VRING_CMD_ADD			= 0,
-	WMI_VRING_CMD_MODIFY			= 1,
-	WMI_VRING_CMD_DELETE			= 2,
+	WMI_VRING_CMD_ADD	= 0x00,
+	WMI_VRING_CMD_MODIFY	= 0x01,
+	WMI_VRING_CMD_DELETE	= 0x02,
 };
 
+/* WMI_VRING_CFG_CMDID */
 struct wmi_vring_cfg_cmd {
 	__le32 action;
 	struct wmi_vring_cfg vring_cfg;
 } __packed;
 
-/*
- * WMI_BCAST_VRING_CFG_CMDID
- */
 struct wmi_bcast_vring_cfg {
 	struct wmi_sw_ring_cfg tx_sw_ring;
-	u8 ringid;				/* 0-23 vrings */
+	/* 0-23 vrings */
+	u8 ringid;
 	u8 encap_trans_type;
-	u8 ds_cfg;				/* 802.3 DS cfg */
+	/* 802.3 DS cfg */
+	u8 ds_cfg;
 	u8 nwifi_ds_trans_type;
 } __packed;
 
+/* WMI_BCAST_VRING_CFG_CMDID */
 struct wmi_bcast_vring_cfg_cmd {
 	__le32 action;
 	struct wmi_bcast_vring_cfg vring_cfg;
 } __packed;
 
-/*
- * WMI_VRING_BA_EN_CMDID
- */
+/* WMI_VRING_BA_EN_CMDID */
 struct wmi_vring_ba_en_cmd {
 	u8 ringid;
 	u8 agg_max_wsize;
 	__le16 ba_timeout;
 	u8 amsdu;
+	u8 reserved[3];
 } __packed;
 
-/*
- * WMI_VRING_BA_DIS_CMDID
- */
+/* WMI_VRING_BA_DIS_CMDID */
 struct wmi_vring_ba_dis_cmd {
 	u8 ringid;
 	u8 reserved;
 	__le16 reason;
 } __packed;
 
-/*
- * WMI_NOTIFY_REQ_CMDID
- */
+/* WMI_NOTIFY_REQ_CMDID */
 struct wmi_notify_req_cmd {
 	u8 cid;
 	u8 year;
@@ -654,102 +622,100 @@
 	u8 miliseconds;
 } __packed;
 
-/*
- * WMI_CFG_RX_CHAIN_CMDID
- */
+/* WMI_CFG_RX_CHAIN_CMDID */
 enum wmi_sniffer_cfg_mode {
-	WMI_SNIFFER_OFF				= 0,
-	WMI_SNIFFER_ON				= 1,
+	WMI_SNIFFER_OFF	= 0x00,
+	WMI_SNIFFER_ON	= 0x01,
 };
 
 enum wmi_sniffer_cfg_phy_info_mode {
-	WMI_SNIFFER_PHY_INFO_DISABLED		= 0,
-	WMI_SNIFFER_PHY_INFO_ENABLED		= 1,
+	WMI_SNIFFER_PHY_INFO_DISABLED	= 0x00,
+	WMI_SNIFFER_PHY_INFO_ENABLED	= 0x01,
 };
 
 enum wmi_sniffer_cfg_phy_support {
-	WMI_SNIFFER_CP				= 0,
-	WMI_SNIFFER_DP				= 1,
-	WMI_SNIFFER_BOTH_PHYS			= 2,
+	WMI_SNIFFER_CP		= 0x00,
+	WMI_SNIFFER_DP		= 0x01,
+	WMI_SNIFFER_BOTH_PHYS	= 0x02,
 };
 
+/* wmi_sniffer_cfg */
 struct wmi_sniffer_cfg {
-	__le32 mode;		/* enum wmi_sniffer_cfg_mode */
-	__le32 phy_info_mode;	/* enum wmi_sniffer_cfg_phy_info_mode */
-	__le32 phy_support;	/* enum wmi_sniffer_cfg_phy_support */
+	/* enum wmi_sniffer_cfg_mode */
+	__le32 mode;
+	/* enum wmi_sniffer_cfg_phy_info_mode */
+	__le32 phy_info_mode;
+	/* enum wmi_sniffer_cfg_phy_support */
+	__le32 phy_support;
 	u8 channel;
 	u8 reserved[3];
 } __packed;
 
 enum wmi_cfg_rx_chain_cmd_action {
-	WMI_RX_CHAIN_ADD			= 0,
-	WMI_RX_CHAIN_DEL			= 1,
+	WMI_RX_CHAIN_ADD	= 0x00,
+	WMI_RX_CHAIN_DEL	= 0x01,
 };
 
 enum wmi_cfg_rx_chain_cmd_decap_trans_type {
-	WMI_DECAP_TYPE_802_3			= 0,
-	WMI_DECAP_TYPE_NATIVE_WIFI		= 1,
-	WMI_DECAP_TYPE_NONE			= 2,
+	WMI_DECAP_TYPE_802_3		= 0x00,
+	WMI_DECAP_TYPE_NATIVE_WIFI	= 0x01,
+	WMI_DECAP_TYPE_NONE		= 0x02,
 };
 
 enum wmi_cfg_rx_chain_cmd_nwifi_ds_trans_type {
-	WMI_NWIFI_RX_TRANS_MODE_NO		= 0,
-	WMI_NWIFI_RX_TRANS_MODE_PBSS2AP		= 1,
-	WMI_NWIFI_RX_TRANS_MODE_PBSS2STA	= 2,
+	WMI_NWIFI_RX_TRANS_MODE_NO		= 0x00,
+	WMI_NWIFI_RX_TRANS_MODE_PBSS2AP		= 0x01,
+	WMI_NWIFI_RX_TRANS_MODE_PBSS2STA	= 0x02,
 };
 
 enum wmi_cfg_rx_chain_cmd_reorder_type {
-	WMI_RX_HW_REORDER = 0,
-	WMI_RX_SW_REORDER = 1,
+	WMI_RX_HW_REORDER	= 0x00,
+	WMI_RX_SW_REORDER	= 0x01,
 };
 
+#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS	(0)
+#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN	(1)
+#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK	(0x1)
+#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_POS		(1)
+#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_LEN		(1)
+#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK		(0x2)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS		(0)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_LEN		(1)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_MSK		(0x1)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_POS		(1)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_LEN		(1)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_MSK		(0x2)
+#define L3_L4_CTRL_IPV4_CHECKSUM_EN_POS			(0)
+#define L3_L4_CTRL_IPV4_CHECKSUM_EN_LEN			(1)
+#define L3_L4_CTRL_IPV4_CHECKSUM_EN_MSK			(0x1)
+#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS		(1)
+#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_LEN		(1)
+#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_MSK		(0x2)
+#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_POS		(0)
+#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_LEN		(1)
+#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_MSK		(0x1)
+#define RING_CTRL_OVERRIDE_WB_THRSH_POS			(1)
+#define RING_CTRL_OVERRIDE_WB_THRSH_LEN			(1)
+#define RING_CTRL_OVERRIDE_WB_THRSH_MSK			(0x2)
+#define RING_CTRL_OVERRIDE_ITR_THRSH_POS		(2)
+#define RING_CTRL_OVERRIDE_ITR_THRSH_LEN		(1)
+#define RING_CTRL_OVERRIDE_ITR_THRSH_MSK		(0x4)
+#define RING_CTRL_OVERRIDE_HOST_THRSH_POS		(3)
+#define RING_CTRL_OVERRIDE_HOST_THRSH_LEN		(1)
+#define RING_CTRL_OVERRIDE_HOST_THRSH_MSK		(0x8)
+
+/* WMI_CFG_RX_CHAIN_CMDID */
 struct wmi_cfg_rx_chain_cmd {
 	__le32 action;
 	struct wmi_sw_ring_cfg rx_sw_ring;
 	u8 mid;
 	u8 decap_trans_type;
-
-	#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0)
-	#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1)
-	#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1)
-	#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_POS (1)
-	#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_LEN (1)
-	#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK (0x2)
 	u8 l2_802_3_offload_ctrl;
-
-	#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0)
-	#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_LEN (1)
-	#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_MSK (0x1)
-	#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_POS (1)
-	#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_LEN (1)
-	#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_MSK (0x2)
 	u8 l2_nwifi_offload_ctrl;
-
 	u8 vlan_id;
 	u8 nwifi_ds_trans_type;
-
-	#define L3_L4_CTRL_IPV4_CHECKSUM_EN_POS (0)
-	#define L3_L4_CTRL_IPV4_CHECKSUM_EN_LEN (1)
-	#define L3_L4_CTRL_IPV4_CHECKSUM_EN_MSK (0x1)
-	#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS (1)
-	#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_LEN (1)
-	#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_MSK (0x2)
 	u8 l3_l4_ctrl;
-
-	#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_POS (0)
-	#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_LEN (1)
-	#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_MSK (0x1)
-	#define RING_CTRL_OVERRIDE_WB_THRSH_POS (1)
-	#define RING_CTRL_OVERRIDE_WB_THRSH_LEN (1)
-	#define RING_CTRL_OVERRIDE_WB_THRSH_MSK (0x2)
-	#define RING_CTRL_OVERRIDE_ITR_THRSH_POS (2)
-	#define RING_CTRL_OVERRIDE_ITR_THRSH_LEN (1)
-	#define RING_CTRL_OVERRIDE_ITR_THRSH_MSK (0x4)
-	#define RING_CTRL_OVERRIDE_HOST_THRSH_POS (3)
-	#define RING_CTRL_OVERRIDE_HOST_THRSH_LEN (1)
-	#define RING_CTRL_OVERRIDE_HOST_THRSH_MSK (0x8)
 	u8 ring_ctrl;
-
 	__le16 prefetch_thrsh;
 	__le16 wb_thrsh;
 	__le32 itr_value;
@@ -757,31 +723,27 @@
 	u8 reorder_type;
 	u8 reserved;
 	struct wmi_sniffer_cfg sniffer_cfg;
+	__le16 max_rx_pl_per_desc;
 } __packed;
 
-/*
- * WMI_RCP_ADDBA_RESP_CMDID
- */
+/* WMI_RCP_ADDBA_RESP_CMDID */
 struct wmi_rcp_addba_resp_cmd {
 	u8 cidxtid;
 	u8 dialog_token;
 	__le16 status_code;
-	__le16 ba_param_set;	/* ieee80211_ba_parameterset field to send */
+	/* ieee80211_ba_parameterset field to send */
+	__le16 ba_param_set;
 	__le16 ba_timeout;
 } __packed;
 
-/*
- * WMI_RCP_DELBA_CMDID
- */
+/* WMI_RCP_DELBA_CMDID */
 struct wmi_rcp_delba_cmd {
 	u8 cidxtid;
 	u8 reserved;
 	__le16 reason;
 } __packed;
 
-/*
- * WMI_RCP_ADDBA_REQ_CMDID
- */
+/* WMI_RCP_ADDBA_REQ_CMDID */
 struct wmi_rcp_addba_req_cmd {
 	u8 cidxtid;
 	u8 dialog_token;
@@ -792,32 +754,16 @@
 	__le16 ba_seq_ctrl;
 } __packed;
 
-/*
- * WMI_SET_MAC_ADDRESS_CMDID
- */
+/* WMI_SET_MAC_ADDRESS_CMDID */
 struct wmi_set_mac_address_cmd {
 	u8 mac[WMI_MAC_LEN];
 	u8 reserved[2];
 } __packed;
 
-/*
-* WMI_EAPOL_TX_CMDID
-*/
-struct wmi_eapol_tx_cmd {
-	u8 dst_mac[WMI_MAC_LEN];
-	__le16 eapol_len;
-	u8 eapol[0];
-} __packed;
-
-/*
- * WMI_ECHO_CMDID
- *
+/* WMI_ECHO_CMDID
  * Check FW is alive
- *
  * WMI_DEEP_ECHO_CMDID
- *
  * Check FW and ucode are alive
- *
  * Returned event: WMI_ECHO_RSP_EVENTID
  * same event for both commands
  */
@@ -825,70 +771,79 @@
 	__le32 value;
 } __packed;
 
-/*
- * WMI_TEMP_SENSE_CMDID
+/* WMI_OTP_READ_CMDID */
+struct wmi_otp_read_cmd {
+	__le32 addr;
+	__le32 size;
+	__le32 values;
+} __packed;
+
+/* WMI_OTP_WRITE_CMDID */
+struct wmi_otp_write_cmd {
+	__le32 addr;
+	__le32 size;
+	__le32 values;
+} __packed;
+
+/* WMI_TEMP_SENSE_CMDID
  *
  * Measure MAC and radio temperatures
+ *
+ * Possible modes for temperature measurement
  */
-
-/* Possible modes for temperature measurement */
 enum wmi_temperature_measure_mode {
-	TEMPERATURE_USE_OLD_VALUE	= 0x1,
-	TEMPERATURE_MEASURE_NOW		= 0x2,
+	TEMPERATURE_USE_OLD_VALUE	= 0x01,
+	TEMPERATURE_MEASURE_NOW		= 0x02,
 };
 
+/* WMI_TEMP_SENSE_CMDID */
 struct wmi_temp_sense_cmd {
 	__le32 measure_baseband_en;
 	__le32 measure_rf_en;
 	__le32 measure_mode;
 } __packed;
 
-/*
- * WMI_PMC_CMDID
- */
-enum wmi_pmc_op_e {
-	WMI_PMC_ALLOCATE = 0,
-	WMI_PMC_RELEASE = 1,
+enum wmi_pmc_op {
+	WMI_PMC_ALLOCATE	= 0x00,
+	WMI_PMC_RELEASE		= 0x01,
 };
 
+/* WMI_PMC_CMDID */
 struct wmi_pmc_cmd {
-	u8 op;		/* enum wmi_pmc_cmd_op_type */
+	/* enum wmi_pmc_cmd_op_type */
+	u8 op;
 	u8 reserved;
 	__le16 ring_size;
 	__le64 mem_base;
 } __packed;
 
-/*
- * WMI Events
- */
-
-/*
+/* WMI Events
  * List of Events (target to host)
  */
 enum wmi_event_id {
 	WMI_READY_EVENTID			= 0x1001,
 	WMI_CONNECT_EVENTID			= 0x1002,
 	WMI_DISCONNECT_EVENTID			= 0x1003,
-	WMI_SCAN_COMPLETE_EVENTID		= 0x100a,
-	WMI_REPORT_STATISTICS_EVENTID		= 0x100b,
+	WMI_SCAN_COMPLETE_EVENTID		= 0x100A,
+	WMI_REPORT_STATISTICS_EVENTID		= 0x100B,
 	WMI_RD_MEM_RSP_EVENTID			= 0x1800,
 	WMI_FW_READY_EVENTID			= 0x1801,
-	WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID	= 0x0200,
+	WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID	= 0x200,
 	WMI_ECHO_RSP_EVENTID			= 0x1803,
-	WMI_FS_TUNE_DONE_EVENTID		= 0x180a,
-	WMI_CORR_MEASURE_EVENTID		= 0x180b,
-	WMI_READ_RSSI_EVENTID			= 0x180c,
-	WMI_TEMP_SENSE_DONE_EVENTID		= 0x180e,
-	WMI_DC_CALIB_DONE_EVENTID		= 0x180f,
+	WMI_FS_TUNE_DONE_EVENTID		= 0x180A,
+	WMI_CORR_MEASURE_EVENTID		= 0x180B,
+	WMI_READ_RSSI_EVENTID			= 0x180C,
+	WMI_TEMP_SENSE_DONE_EVENTID		= 0x180E,
+	WMI_DC_CALIB_DONE_EVENTID		= 0x180F,
 	WMI_IQ_TX_CALIB_DONE_EVENTID		= 0x1811,
 	WMI_IQ_RX_CALIB_DONE_EVENTID		= 0x1812,
 	WMI_SET_WORK_MODE_DONE_EVENTID		= 0x1815,
 	WMI_LO_LEAKAGE_CALIB_DONE_EVENTID	= 0x1816,
 	WMI_MARLON_R_READ_DONE_EVENTID		= 0x1818,
 	WMI_MARLON_R_WRITE_DONE_EVENTID		= 0x1819,
-	WMI_MARLON_R_TXRX_SEL_DONE_EVENTID	= 0x181a,
-	WMI_SILENT_RSSI_CALIB_DONE_EVENTID	= 0x181d,
-	WMI_RF_RX_TEST_DONE_EVENTID		= 0x181e,
+	WMI_MARLON_R_TXRX_SEL_DONE_EVENTID	= 0x181A,
+	WMI_SILENT_RSSI_CALIB_DONE_EVENTID	= 0x181D,
+	WMI_RF_RX_TEST_DONE_EVENTID		= 0x181E,
 	WMI_CFG_RX_CHAIN_DONE_EVENTID		= 0x1820,
 	WMI_VRING_CFG_DONE_EVENTID		= 0x1821,
 	WMI_BA_STATUS_EVENTID			= 0x1823,
@@ -896,15 +851,13 @@
 	WMI_RCP_ADDBA_RESP_SENT_EVENTID		= 0x1825,
 	WMI_DELBA_EVENTID			= 0x1826,
 	WMI_GET_SSID_EVENTID			= 0x1828,
-	WMI_GET_PCP_CHANNEL_EVENTID		= 0x182a,
-	WMI_SW_TX_COMPLETE_EVENTID		= 0x182b,
-
+	WMI_GET_PCP_CHANNEL_EVENTID		= 0x182A,
+	WMI_SW_TX_COMPLETE_EVENTID		= 0x182B,
 	WMI_READ_MAC_RXQ_EVENTID		= 0x1830,
 	WMI_READ_MAC_TXQ_EVENTID		= 0x1831,
 	WMI_WRITE_MAC_RXQ_EVENTID		= 0x1832,
 	WMI_WRITE_MAC_TXQ_EVENTID		= 0x1833,
 	WMI_WRITE_MAC_XQ_FIELD_EVENTID		= 0x1834,
-
 	WMI_BEAMFORMING_MGMT_DONE_EVENTID	= 0x1836,
 	WMI_BF_TXSS_MGMT_DONE_EVENTID		= 0x1837,
 	WMI_BF_RXSS_MGMT_DONE_EVENTID		= 0x1839,
@@ -914,20 +867,18 @@
 	WMI_BF_SM_MGMT_DONE_EVENTID		= 0x1838,
 	WMI_RX_MGMT_PACKET_EVENTID		= 0x1840,
 	WMI_TX_MGMT_PACKET_EVENTID		= 0x1841,
-
+	WMI_OTP_READ_RESULT_EVENTID		= 0x1856,
 	/* Performance monitoring events */
 	WMI_DATA_PORT_OPEN_EVENTID		= 0x1860,
 	WMI_WBE_LINK_DOWN_EVENTID		= 0x1861,
-
 	WMI_BF_CTRL_DONE_EVENTID		= 0x1862,
 	WMI_NOTIFY_REQ_DONE_EVENTID		= 0x1863,
 	WMI_GET_STATUS_DONE_EVENTID		= 0x1864,
 	WMI_VRING_EN_EVENTID			= 0x1865,
-
 	WMI_UNIT_TEST_EVENTID			= 0x1900,
 	WMI_FLASH_READ_DONE_EVENTID		= 0x1902,
 	WMI_FLASH_WRITE_DONE_EVENTID		= 0x1903,
-	/*P2P*/
+	/* P2P */
 	WMI_P2P_CFG_DONE_EVENTID		= 0x1910,
 	WMI_PORT_ALLOCATED_EVENTID		= 0x1911,
 	WMI_PORT_DELETED_EVENTID		= 0x1912,
@@ -937,49 +888,42 @@
 	WMI_DISCOVERY_STOPPED_EVENTID		= 0x1917,
 	WMI_PCP_STARTED_EVENTID			= 0x1918,
 	WMI_PCP_STOPPED_EVENTID			= 0x1919,
-	WMI_PCP_FACTOR_EVENTID			= 0x191a,
+	WMI_PCP_FACTOR_EVENTID			= 0x191A,
 	WMI_SET_CHANNEL_EVENTID			= 0x9000,
 	WMI_ASSOC_REQ_EVENTID			= 0x9001,
 	WMI_EAPOL_RX_EVENTID			= 0x9002,
 	WMI_MAC_ADDR_RESP_EVENTID		= 0x9003,
 	WMI_FW_VER_EVENTID			= 0x9004,
+	WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID	= 0x9005,
 };
 
-/*
- * Events data structures
- */
-
+/* Events data structures */
 enum wmi_fw_status {
-	WMI_FW_STATUS_SUCCESS,
-	WMI_FW_STATUS_FAILURE,
+	WMI_FW_STATUS_SUCCESS	= 0x00,
+	WMI_FW_STATUS_FAILURE	= 0x01,
 };
 
-/*
- * WMI_RF_MGMT_STATUS_EVENTID
- */
+/* WMI_RF_MGMT_STATUS_EVENTID */
 enum wmi_rf_status {
-	WMI_RF_ENABLED			= 0,
-	WMI_RF_DISABLED_HW		= 1,
-	WMI_RF_DISABLED_SW		= 2,
-	WMI_RF_DISABLED_HW_SW		= 3,
+	WMI_RF_ENABLED		= 0x00,
+	WMI_RF_DISABLED_HW	= 0x01,
+	WMI_RF_DISABLED_SW	= 0x02,
+	WMI_RF_DISABLED_HW_SW	= 0x03,
 };
 
+/* WMI_RF_MGMT_STATUS_EVENTID */
 struct wmi_rf_mgmt_status_event {
 	__le32 rf_status;
 } __packed;
 
-/*
- * WMI_THERMAL_THROTTLING_STATUS_EVENTID
- */
+/* WMI_THERMAL_THROTTLING_STATUS_EVENTID */
 struct wmi_thermal_throttling_status_event {
 	__le32 time_on_usec;
 	__le32 time_off_usec;
 	__le32 max_txop_length_usec;
 } __packed;
 
-/*
- * WMI_GET_STATUS_DONE_EVENTID
- */
+/* WMI_GET_STATUS_DONE_EVENTID */
 struct wmi_get_status_done_event {
 	__le32 is_associated;
 	u8 cid;
@@ -995,9 +939,7 @@
 	__le32 is_secured;
 } __packed;
 
-/*
- * WMI_FW_VER_EVENTID
- */
+/* WMI_FW_VER_EVENTID */
 struct wmi_fw_ver_event {
 	u8 major;
 	u8 minor;
@@ -1005,9 +947,7 @@
 	__le16 build;
 } __packed;
 
-/*
-* WMI_MAC_ADDR_RESP_EVENTID
-*/
+/* WMI_MAC_ADDR_RESP_EVENTID */
 struct wmi_mac_addr_resp_event {
 	u8 mac[WMI_MAC_LEN];
 	u8 auth_mode;
@@ -1015,42 +955,38 @@
 	__le32 offload_mode;
 } __packed;
 
-/*
-* WMI_EAPOL_RX_EVENTID
-*/
+/* WMI_EAPOL_RX_EVENTID */
 struct wmi_eapol_rx_event {
 	u8 src_mac[WMI_MAC_LEN];
 	__le16 eapol_len;
 	u8 eapol[0];
 } __packed;
 
-/*
-* WMI_READY_EVENTID
-*/
+/* WMI_READY_EVENTID */
 enum wmi_phy_capability {
-	WMI_11A_CAPABILITY		= 1,
-	WMI_11G_CAPABILITY		= 2,
-	WMI_11AG_CAPABILITY		= 3,
-	WMI_11NA_CAPABILITY		= 4,
-	WMI_11NG_CAPABILITY		= 5,
-	WMI_11NAG_CAPABILITY		= 6,
-	WMI_11AD_CAPABILITY		= 7,
-	WMI_11N_CAPABILITY_OFFSET = WMI_11NA_CAPABILITY - WMI_11A_CAPABILITY,
+	WMI_11A_CAPABILITY		= 0x01,
+	WMI_11G_CAPABILITY		= 0x02,
+	WMI_11AG_CAPABILITY		= 0x03,
+	WMI_11NA_CAPABILITY		= 0x04,
+	WMI_11NG_CAPABILITY		= 0x05,
+	WMI_11NAG_CAPABILITY		= 0x06,
+	WMI_11AD_CAPABILITY		= 0x07,
+	WMI_11N_CAPABILITY_OFFSET	= 0x03,
 };
 
 struct wmi_ready_event {
 	__le32 sw_version;
 	__le32 abi_version;
 	u8 mac[WMI_MAC_LEN];
-	u8 phy_capability;		/* enum wmi_phy_capability */
+	/* enum wmi_phy_capability */
+	u8 phy_capability;
 	u8 numof_additional_mids;
 } __packed;
 
-/*
- * WMI_NOTIFY_REQ_DONE_EVENTID
- */
+/* WMI_NOTIFY_REQ_DONE_EVENTID */
 struct wmi_notify_req_done_event {
-	__le32 status; /* beamforming status, 0: fail; 1: OK; 2: retrying */
+	/* beamforming status, 0: fail; 1: OK; 2: retrying */
+	__le32 status;
 	__le64 tsf;
 	__le32 snr_val;
 	__le32 tx_tpt;
@@ -1066,9 +1002,7 @@
 	u8 reserved[3];
 } __packed;
 
-/*
- * WMI_CONNECT_EVENTID
- */
+/* WMI_CONNECT_EVENTID */
 struct wmi_connect_event {
 	u8 channel;
 	u8 reserved0;
@@ -1082,68 +1016,103 @@
 	u8 assoc_resp_len;
 	u8 cid;
 	u8 reserved2[3];
+	/* not in use */
 	u8 assoc_info[0];
 } __packed;
 
-/*
- * WMI_DISCONNECT_EVENTID
- */
+/* WMI_DISCONNECT_EVENTID */
 enum wmi_disconnect_reason {
-	WMI_DIS_REASON_NO_NETWORK_AVAIL		= 1,
-	WMI_DIS_REASON_LOST_LINK		= 2, /* bmiss */
-	WMI_DIS_REASON_DISCONNECT_CMD		= 3,
-	WMI_DIS_REASON_BSS_DISCONNECTED		= 4,
-	WMI_DIS_REASON_AUTH_FAILED		= 5,
-	WMI_DIS_REASON_ASSOC_FAILED		= 6,
-	WMI_DIS_REASON_NO_RESOURCES_AVAIL	= 7,
-	WMI_DIS_REASON_CSERV_DISCONNECT		= 8,
-	WMI_DIS_REASON_INVALID_PROFILE		= 10,
-	WMI_DIS_REASON_DOT11H_CHANNEL_SWITCH	= 11,
-	WMI_DIS_REASON_PROFILE_MISMATCH		= 12,
-	WMI_DIS_REASON_CONNECTION_EVICTED	= 13,
-	WMI_DIS_REASON_IBSS_MERGE		= 14,
+	WMI_DIS_REASON_NO_NETWORK_AVAIL		= 0x01,
+	/* bmiss */
+	WMI_DIS_REASON_LOST_LINK		= 0x02,
+	WMI_DIS_REASON_DISCONNECT_CMD		= 0x03,
+	WMI_DIS_REASON_BSS_DISCONNECTED		= 0x04,
+	WMI_DIS_REASON_AUTH_FAILED		= 0x05,
+	WMI_DIS_REASON_ASSOC_FAILED		= 0x06,
+	WMI_DIS_REASON_NO_RESOURCES_AVAIL	= 0x07,
+	WMI_DIS_REASON_CSERV_DISCONNECT		= 0x08,
+	WMI_DIS_REASON_INVALID_PROFILE		= 0x0A,
+	WMI_DIS_REASON_DOT11H_CHANNEL_SWITCH	= 0x0B,
+	WMI_DIS_REASON_PROFILE_MISMATCH		= 0x0C,
+	WMI_DIS_REASON_CONNECTION_EVICTED	= 0x0D,
+	WMI_DIS_REASON_IBSS_MERGE		= 0x0E,
 };
 
 struct wmi_disconnect_event {
-	__le16 protocol_reason_status;	/* reason code, see 802.11 spec. */
-	u8 bssid[WMI_MAC_LEN];		/* set if known */
-	u8 disconnect_reason;		/* see wmi_disconnect_reason */
-	u8 assoc_resp_len;	/* not used */
-	u8 assoc_info[0];	/* not used */
+	/* reason code, see 802.11 spec. */
+	__le16 protocol_reason_status;
+	/* set if known */
+	u8 bssid[WMI_MAC_LEN];
+	/* see enum wmi_disconnect_reason */
+	u8 disconnect_reason;
+	/* last assoc req may passed to host - not in used */
+	u8 assoc_resp_len;
+	/* last assoc req may passed to host - not in used */
+	u8 assoc_info[0];
 } __packed;
 
-/*
- * WMI_SCAN_COMPLETE_EVENTID
- */
+/* WMI_SCAN_COMPLETE_EVENTID */
 enum scan_status {
-	WMI_SCAN_SUCCESS	= 0,
-	WMI_SCAN_FAILED		= 1,
-	WMI_SCAN_ABORTED	= 2,
-	WMI_SCAN_REJECTED	= 3,
+	WMI_SCAN_SUCCESS	= 0x00,
+	WMI_SCAN_FAILED		= 0x01,
+	WMI_SCAN_ABORTED	= 0x02,
+	WMI_SCAN_REJECTED	= 0x03,
+	WMI_SCAN_ABORT_REJECTED	= 0x04,
 };
 
 struct wmi_scan_complete_event {
-	__le32 status;	/* scan_status */
+	/* enum scan_status */
+	__le32 status;
 } __packed;
 
-/*
- * WMI_BA_STATUS_EVENTID
- */
-enum wmi_vring_ba_status {
-	WMI_BA_AGREED			= 0,
-	WMI_BA_NON_AGREED		= 1,
-	/* BA_EN in middle of teardown flow */
-	WMI_BA_TD_WIP			= 2,
-	/* BA_DIS or BA_EN in middle of BA SETUP flow */
-	WMI_BA_SETUP_WIP		= 3,
-	/* BA_EN when the BA session is already active */
-	WMI_BA_SESSION_ACTIVE		= 4,
-	/* BA_DIS when the BA session is not active */
-	WMI_BA_SESSION_NOT_ACTIVE	= 5,
+/* WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT */
+enum wmi_acs_info_bitmask {
+	WMI_ACS_INFO_BITMASK_BEACON_FOUND	= 0x01,
+	WMI_ACS_INFO_BITMASK_BUSY_TIME		= 0x02,
+	WMI_ACS_INFO_BITMASK_TX_TIME		= 0x04,
+	WMI_ACS_INFO_BITMASK_RX_TIME		= 0x08,
+	WMI_ACS_INFO_BITMASK_NOISE		= 0x10,
 };
 
-struct wmi_vring_ba_status_event {
-	__le16 status; /* enum wmi_vring_ba_status */
+struct scan_acs_info {
+	u8 channel;
+	u8 beacon_found;
+	/* msec */
+	__le16 busy_time;
+	__le16 tx_time;
+	__le16 rx_time;
+	u8 noise;
+	u8 reserved[3];
+} __packed;
+
+struct wmi_acs_passive_scan_complete_event {
+	__le32 dwell_time;
+	/* valid fields within channel info according to
+	 * their appearance in struct order
+	 */
+	__le16 filled;
+	u8 num_scanned_channels;
+	u8 reserved;
+	struct scan_acs_info scan_info_list[0];
+} __packed;
+
+/* WMI_BA_STATUS_EVENTID */
+enum wmi_vring_ba_status {
+	WMI_BA_AGREED			= 0x00,
+	WMI_BA_NON_AGREED		= 0x01,
+	/* BA_EN in middle of teardown flow */
+	WMI_BA_TD_WIP			= 0x02,
+	/* BA_DIS or BA_EN in middle of BA SETUP flow */
+	WMI_BA_SETUP_WIP		= 0x03,
+	/* BA_EN when the BA session is already active */
+	WMI_BA_SESSION_ACTIVE		= 0x04,
+	/* BA_DIS when the BA session is not active */
+	WMI_BA_SESSION_NOT_ACTIVE	= 0x05,
+};
+
+struct wmi_ba_status_event {
+	/* enum wmi_vring_ba_status */
+	__le16 status;
 	u8 reserved[2];
 	u8 ringid;
 	u8 agg_wsize;
@@ -1151,18 +1120,14 @@
 	u8 amsdu;
 } __packed;
 
-/*
- * WMI_DELBA_EVENTID
- */
+/* WMI_DELBA_EVENTID */
 struct wmi_delba_event {
 	u8 cidxtid;
 	u8 from_initiator;
 	__le16 reason;
 } __packed;
 
-/*
- * WMI_VRING_CFG_DONE_EVENTID
- */
+/* WMI_VRING_CFG_DONE_EVENTID */
 struct wmi_vring_cfg_done_event {
 	u8 ringid;
 	u8 status;
@@ -1170,174 +1135,151 @@
 	__le32 tx_vring_tail_ptr;
 } __packed;
 
-/*
- * WMI_RCP_ADDBA_RESP_SENT_EVENTID
- */
+/* WMI_RCP_ADDBA_RESP_SENT_EVENTID */
 struct wmi_rcp_addba_resp_sent_event {
 	u8 cidxtid;
 	u8 reserved;
 	__le16 status;
 } __packed;
 
-/*
- * WMI_RCP_ADDBA_REQ_EVENTID
- */
+/* WMI_RCP_ADDBA_REQ_EVENTID */
 struct wmi_rcp_addba_req_event {
 	u8 cidxtid;
 	u8 dialog_token;
-	__le16 ba_param_set;	/* ieee80211_ba_parameterset as it received */
+	/* ieee80211_ba_parameterset as it received */
+	__le16 ba_param_set;
 	__le16 ba_timeout;
-	__le16 ba_seq_ctrl;	/* ieee80211_ba_seqstrl field as it received */
+	/* ieee80211_ba_seqstrl field as it received */
+	__le16 ba_seq_ctrl;
 } __packed;
 
-/*
- * WMI_CFG_RX_CHAIN_DONE_EVENTID
- */
+/* WMI_CFG_RX_CHAIN_DONE_EVENTID */
 enum wmi_cfg_rx_chain_done_event_status {
-	WMI_CFG_RX_CHAIN_SUCCESS	= 1,
+	WMI_CFG_RX_CHAIN_SUCCESS	= 0x01,
 };
 
 struct wmi_cfg_rx_chain_done_event {
-	__le32 rx_ring_tail_ptr;	/* Rx V-Ring Tail pointer */
+	/* V-Ring Tail pointer */
+	__le32 rx_ring_tail_ptr;
 	__le32 status;
 } __packed;
 
-/*
- * WMI_WBE_LINK_DOWN_EVENTID
- */
+/* WMI_WBE_LINK_DOWN_EVENTID */
 enum wmi_wbe_link_down_event_reason {
-	WMI_WBE_REASON_USER_REQUEST	= 0,
-	WMI_WBE_REASON_RX_DISASSOC	= 1,
-	WMI_WBE_REASON_BAD_PHY_LINK	= 2,
+	WMI_WBE_REASON_USER_REQUEST	= 0x00,
+	WMI_WBE_REASON_RX_DISASSOC	= 0x01,
+	WMI_WBE_REASON_BAD_PHY_LINK	= 0x02,
 };
 
+/* WMI_WBE_LINK_DOWN_EVENTID */
 struct wmi_wbe_link_down_event {
 	u8 cid;
 	u8 reserved[3];
 	__le32 reason;
 } __packed;
 
-/*
- * WMI_DATA_PORT_OPEN_EVENTID
- */
+/* WMI_DATA_PORT_OPEN_EVENTID */
 struct wmi_data_port_open_event {
 	u8 cid;
 	u8 reserved[3];
 } __packed;
 
-/*
- * WMI_VRING_EN_EVENTID
- */
+/* WMI_VRING_EN_EVENTID */
 struct wmi_vring_en_event {
 	u8 vring_index;
 	u8 reserved[3];
 } __packed;
 
-/*
- * WMI_GET_PCP_CHANNEL_EVENTID
- */
+/* WMI_GET_PCP_CHANNEL_EVENTID */
 struct wmi_get_pcp_channel_event {
 	u8 channel;
 	u8 reserved[3];
 } __packed;
 
-/*
- * WMI_P2P_CFG_DONE_EVENTID
- */
+/* WMI_P2P_CFG_DONE_EVENTID */
 struct wmi_p2p_cfg_done_event {
-	u8 status;	/* wmi_fw_status */
+	/* wmi_fw_status */
+	u8 status;
 	u8 reserved[3];
 } __packed;
 
-/*
-* WMI_PORT_ALLOCATED_EVENTID
-*/
+/* WMI_PORT_ALLOCATED_EVENTID */
 struct wmi_port_allocated_event {
-	u8 status;	/* wmi_fw_status */
+	/* wmi_fw_status */
+	u8 status;
 	u8 reserved[3];
 } __packed;
 
-/*
-* WMI_PORT_DELETED_EVENTID
-*/
+/* WMI_PORT_DELETED_EVENTID */
 struct wmi_port_deleted_event {
-	u8 status;	/* wmi_fw_status */
+	/* wmi_fw_status */
+	u8 status;
 	u8 reserved[3];
 } __packed;
 
-/*
- * WMI_LISTEN_STARTED_EVENTID
- */
+/* WMI_LISTEN_STARTED_EVENTID */
 struct wmi_listen_started_event {
-	u8 status;	/* wmi_fw_status */
+	/* wmi_fw_status */
+	u8 status;
 	u8 reserved[3];
 } __packed;
 
-/*
- * WMI_SEARCH_STARTED_EVENTID
- */
+/* WMI_SEARCH_STARTED_EVENTID */
 struct wmi_search_started_event {
-	u8 status;	/* wmi_fw_status */
+	/* wmi_fw_status */
+	u8 status;
 	u8 reserved[3];
 } __packed;
 
-/*
- * WMI_PCP_STARTED_EVENTID
- */
+/* WMI_PCP_STARTED_EVENTID */
 struct wmi_pcp_started_event {
-	u8 status;	/* wmi_fw_status */
+	/* wmi_fw_status */
+	u8 status;
 	u8 reserved[3];
 } __packed;
 
-/*
- * WMI_PCP_FACTOR_EVENTID
- */
+/* WMI_PCP_FACTOR_EVENTID */
 struct wmi_pcp_factor_event {
 	__le32 pcp_factor;
 } __packed;
 
-/*
- * WMI_SW_TX_COMPLETE_EVENTID
- */
 enum wmi_sw_tx_status {
-	WMI_TX_SW_STATUS_SUCCESS		= 0,
-	WMI_TX_SW_STATUS_FAILED_NO_RESOURCES	= 1,
-	WMI_TX_SW_STATUS_FAILED_TX		= 2,
+	WMI_TX_SW_STATUS_SUCCESS		= 0x00,
+	WMI_TX_SW_STATUS_FAILED_NO_RESOURCES	= 0x01,
+	WMI_TX_SW_STATUS_FAILED_TX		= 0x02,
 };
 
+/* WMI_SW_TX_COMPLETE_EVENTID */
 struct wmi_sw_tx_complete_event {
-	u8 status;	/* enum wmi_sw_tx_status */
+	/* enum wmi_sw_tx_status */
+	u8 status;
 	u8 reserved[3];
 } __packed;
 
-/*
- * WMI_CORR_MEASURE_EVENTID
- */
+/* WMI_CORR_MEASURE_EVENTID */
 struct wmi_corr_measure_event {
-	s32 i;
-	s32 q;
-	s32 image_i;
-	s32 image_q;
+	/* signed */
+	__le32 i;
+	/* signed */
+	__le32 q;
+	/* signed */
+	__le32 image_i;
+	/* signed */
+	__le32 image_q;
 } __packed;
 
-/*
- * WMI_READ_RSSI_EVENTID
- */
+/* WMI_READ_RSSI_EVENTID */
 struct wmi_read_rssi_event {
 	__le32 ina_rssi_adc_dbm;
 } __packed;
 
-/*
- * WMI_GET_SSID_EVENTID
- */
+/* WMI_GET_SSID_EVENTID */
 struct wmi_get_ssid_event {
 	__le32 ssid_len;
 	u8 ssid[WMI_MAX_SSID_LEN];
 } __packed;
 
-/*
- * WMI_RX_MGMT_PACKET_EVENTID
- */
+/* wmi_rx_mgmt_info */
 struct wmi_rx_mgmt_info {
 	u8 mcs;
 	s8 snr;
@@ -1346,39 +1288,65 @@
 	__le16 stype;
 	__le16 status;
 	__le32 len;
+	/* Not resolved when == 0xFFFFFFFF  ==> Broadcast to all MIDS */
 	u8 qid;
+	/* Not resolved when == 0xFFFFFFFF  ==> Broadcast to all MIDS */
 	u8 mid;
 	u8 cid;
-	u8 channel;	/* From Radio MNGR */
+	/* From Radio MNGR */
+	u8 channel;
 } __packed;
 
-/*
- * WMI_TX_MGMT_PACKET_EVENTID
- */
+/* wmi_otp_read_write_cmd */
+struct wmi_otp_read_write_cmd {
+	__le32 addr;
+	__le32 size;
+	u8 values[0];
+} __packed;
+
+/* WMI_OTP_READ_RESULT_EVENTID */
+struct wmi_otp_read_result_event {
+	u8 payload[0];
+} __packed;
+
+/* WMI_TX_MGMT_PACKET_EVENTID */
 struct wmi_tx_mgmt_packet_event {
 	u8 payload[0];
 } __packed;
 
+/* WMI_RX_MGMT_PACKET_EVENTID */
 struct wmi_rx_mgmt_packet_event {
 	struct wmi_rx_mgmt_info info;
 	u8 payload[0];
 } __packed;
 
-/*
- * WMI_ECHO_RSP_EVENTID
- */
-struct wmi_echo_event {
+/* WMI_ECHO_RSP_EVENTID */
+struct wmi_echo_rsp_event {
 	__le32 echoed_value;
 } __packed;
 
-/*
- * WMI_TEMP_SENSE_DONE_EVENTID
+/* WMI_TEMP_SENSE_DONE_EVENTID
  *
  * Measure MAC and radio temperatures
  */
 struct wmi_temp_sense_done_event {
+	/* Temperature times 1000 (actual temperature will be achieved by
+	 * dividing the value by 1000)
+	 */
 	__le32 baseband_t1000;
+	/* Temperature times 1000 (actual temperature will be achieved by
+	 * dividing the value by 1000)
+	 */
 	__le32 rf_t1000;
 } __packed;
 
+#define WMI_SCAN_DWELL_TIME_MS	(100)
+#define WMI_SURVEY_TIMEOUT_MS	(10000)
+
+enum wmi_hidden_ssid {
+	WMI_HIDDEN_SSID_DISABLED	= 0x00,
+	WMI_HIDDEN_SSID_SEND_EMPTY	= 0x10,
+	WMI_HIDDEN_SSID_CLEAR		= 0xFE,
+};
+
 #endif /* __WILOCITY_WMI_H__ */
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index 1efb1d6..7c10804 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -1547,7 +1547,7 @@
 		channel = el[2];
 
 exit:
-	return ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
+	return ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
 }
 
 static void at76_rx_tasklet(unsigned long param)
@@ -1590,7 +1590,7 @@
 	rx_status.signal = buf->rssi;
 	rx_status.flag |= RX_FLAG_DECRYPTED;
 	rx_status.flag |= RX_FLAG_IV_STRIPPED;
-	rx_status.band = IEEE80211_BAND_2GHZ;
+	rx_status.band = NL80211_BAND_2GHZ;
 	rx_status.freq = at76_guess_freq(priv);
 
 	at76_dbg(DBG_MAC80211, "calling ieee80211_rx_irqsafe(): %d/%d",
@@ -2359,7 +2359,7 @@
 	priv->hw->wiphy->max_scan_ssids = 1;
 	priv->hw->wiphy->max_scan_ie_len = 0;
 	priv->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
-	priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &at76_supported_band;
+	priv->hw->wiphy->bands[NL80211_BAND_2GHZ] = &at76_supported_band;
 	ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
 	ieee80211_hw_set(priv->hw, SIGNAL_UNSPEC);
 	priv->hw->max_signal = 100;
diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c
index 6a1f03c..8f8f37f 100644
--- a/drivers/net/wireless/atmel/atmel.c
+++ b/drivers/net/wireless/atmel/atmel.c
@@ -2434,7 +2434,7 @@
 
 			/* Values in MHz -> * 10^5 * 10 */
 			range->freq[k].m = 100000 *
-			 ieee80211_channel_to_frequency(i, IEEE80211_BAND_2GHZ);
+			 ieee80211_channel_to_frequency(i, NL80211_BAND_2GHZ);
 			range->freq[k++].e = 1;
 		}
 		range->num_frequency = k;
diff --git a/drivers/net/wireless/broadcom/b43/b43.h b/drivers/net/wireless/broadcom/b43/b43.h
index 0365524..d7d42f0 100644
--- a/drivers/net/wireless/broadcom/b43/b43.h
+++ b/drivers/net/wireless/broadcom/b43/b43.h
@@ -992,9 +992,9 @@
 
 /**
  * b43_current_band - Returns the currently used band.
- * Returns one of IEEE80211_BAND_2GHZ and IEEE80211_BAND_5GHZ.
+ * Returns one of NL80211_BAND_2GHZ and NL80211_BAND_5GHZ.
  */
-static inline enum ieee80211_band b43_current_band(struct b43_wl *wl)
+static inline enum nl80211_band b43_current_band(struct b43_wl *wl)
 {
 	return wl->hw->conf.chandef.chan->band;
 }
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 72380af..4ee5c58 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -187,7 +187,7 @@
 #define b43_g_ratetable_size	12
 
 #define CHAN2G(_channel, _freq, _flags) {			\
-	.band			= IEEE80211_BAND_2GHZ,		\
+	.band			= NL80211_BAND_2GHZ,		\
 	.center_freq		= (_freq),			\
 	.hw_value		= (_channel),			\
 	.flags			= (_flags),			\
@@ -216,7 +216,7 @@
 #undef CHAN2G
 
 #define CHAN4G(_channel, _flags) {				\
-	.band			= IEEE80211_BAND_5GHZ,		\
+	.band			= NL80211_BAND_5GHZ,		\
 	.center_freq		= 4000 + (5 * (_channel)),	\
 	.hw_value		= (_channel),			\
 	.flags			= (_flags),			\
@@ -224,7 +224,7 @@
 	.max_power		= 30,				\
 }
 #define CHAN5G(_channel, _flags) {				\
-	.band			= IEEE80211_BAND_5GHZ,		\
+	.band			= NL80211_BAND_5GHZ,		\
 	.center_freq		= 5000 + (5 * (_channel)),	\
 	.hw_value		= (_channel),			\
 	.flags			= (_flags),			\
@@ -323,7 +323,7 @@
 #undef CHAN5G
 
 static struct ieee80211_supported_band b43_band_5GHz_nphy = {
-	.band		= IEEE80211_BAND_5GHZ,
+	.band		= NL80211_BAND_5GHZ,
 	.channels	= b43_5ghz_nphy_chantable,
 	.n_channels	= ARRAY_SIZE(b43_5ghz_nphy_chantable),
 	.bitrates	= b43_a_ratetable,
@@ -331,7 +331,7 @@
 };
 
 static struct ieee80211_supported_band b43_band_5GHz_nphy_limited = {
-	.band		= IEEE80211_BAND_5GHZ,
+	.band		= NL80211_BAND_5GHZ,
 	.channels	= b43_5ghz_nphy_chantable_limited,
 	.n_channels	= ARRAY_SIZE(b43_5ghz_nphy_chantable_limited),
 	.bitrates	= b43_a_ratetable,
@@ -339,7 +339,7 @@
 };
 
 static struct ieee80211_supported_band b43_band_5GHz_aphy = {
-	.band		= IEEE80211_BAND_5GHZ,
+	.band		= NL80211_BAND_5GHZ,
 	.channels	= b43_5ghz_aphy_chantable,
 	.n_channels	= ARRAY_SIZE(b43_5ghz_aphy_chantable),
 	.bitrates	= b43_a_ratetable,
@@ -347,7 +347,7 @@
 };
 
 static struct ieee80211_supported_band b43_band_2GHz = {
-	.band		= IEEE80211_BAND_2GHZ,
+	.band		= NL80211_BAND_2GHZ,
 	.channels	= b43_2ghz_chantable,
 	.n_channels	= ARRAY_SIZE(b43_2ghz_chantable),
 	.bitrates	= b43_g_ratetable,
@@ -355,7 +355,7 @@
 };
 
 static struct ieee80211_supported_band b43_band_2ghz_limited = {
-	.band		= IEEE80211_BAND_2GHZ,
+	.band		= NL80211_BAND_2GHZ,
 	.channels	= b43_2ghz_chantable,
 	.n_channels	= b43_2ghz_chantable_limited_size,
 	.bitrates	= b43_g_ratetable,
@@ -717,7 +717,7 @@
 {
 	/* slot_time is in usec. */
 	/* This test used to exit for all but a G PHY. */
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+	if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
 		return;
 	b43_write16(dev, B43_MMIO_IFSSLOT, 510 + slot_time);
 	/* Shared memory location 0x0010 is the slot time and should be
@@ -3880,12 +3880,12 @@
 	mutex_unlock(&wl->mutex);
 }
 
-static const char *band_to_string(enum ieee80211_band band)
+static const char *band_to_string(enum nl80211_band band)
 {
 	switch (band) {
-	case IEEE80211_BAND_5GHZ:
+	case NL80211_BAND_5GHZ:
 		return "5";
-	case IEEE80211_BAND_2GHZ:
+	case NL80211_BAND_2GHZ:
 		return "2.4";
 	default:
 		break;
@@ -3903,10 +3903,10 @@
 	u32 tmp;
 
 	switch (chan->band) {
-	case IEEE80211_BAND_5GHZ:
+	case NL80211_BAND_5GHZ:
 		gmode = false;
 		break;
-	case IEEE80211_BAND_2GHZ:
+	case NL80211_BAND_2GHZ:
 		gmode = true;
 		break;
 	default:
@@ -5294,16 +5294,16 @@
 		     phy->radio_rev == 9;
 
 	if (have_2ghz_phy)
-		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = limited_2g ?
+		hw->wiphy->bands[NL80211_BAND_2GHZ] = limited_2g ?
 			&b43_band_2ghz_limited : &b43_band_2GHz;
 	if (dev->phy.type == B43_PHYTYPE_N) {
 		if (have_5ghz_phy)
-			hw->wiphy->bands[IEEE80211_BAND_5GHZ] = limited_5g ?
+			hw->wiphy->bands[NL80211_BAND_5GHZ] = limited_5g ?
 				&b43_band_5GHz_nphy_limited :
 				&b43_band_5GHz_nphy;
 	} else {
 		if (have_5ghz_phy)
-			hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &b43_band_5GHz_aphy;
+			hw->wiphy->bands[NL80211_BAND_5GHZ] = &b43_band_5GHz_aphy;
 	}
 
 	dev->phy.supports_2ghz = have_2ghz_phy;
@@ -5680,11 +5680,12 @@
 	INIT_WORK(&wl->firmware_load, b43_request_firmware);
 	schedule_work(&wl->firmware_load);
 
-bcma_out:
 	return err;
 
 bcma_err_wireless_exit:
 	ieee80211_free_hw(wl->hw);
+bcma_out:
+	kfree(dev);
 	return err;
 }
 
@@ -5712,8 +5713,8 @@
 	b43_rng_exit(wl);
 
 	b43_leds_unregister(wl);
-
 	ieee80211_free_hw(wl->hw);
+	kfree(wldev->dev);
 }
 
 static struct bcma_driver b43_bcma_driver = {
@@ -5796,6 +5797,7 @@
 
 	b43_leds_unregister(wl);
 	b43_wireless_exit(dev, wl);
+	kfree(dev);
 }
 
 static struct ssb_driver b43_ssb_driver = {
diff --git a/drivers/net/wireless/broadcom/b43/phy_ac.c b/drivers/net/wireless/broadcom/b43/phy_ac.c
index e75633d..52f8aba 100644
--- a/drivers/net/wireless/broadcom/b43/phy_ac.c
+++ b/drivers/net/wireless/broadcom/b43/phy_ac.c
@@ -61,7 +61,7 @@
 
 static unsigned int b43_phy_ac_op_get_default_chan(struct b43_wldev *dev)
 {
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
 		return 11;
 	return 36;
 }
diff --git a/drivers/net/wireless/broadcom/b43/phy_common.c b/drivers/net/wireless/broadcom/b43/phy_common.c
index ec2b9c5..85f2ca9 100644
--- a/drivers/net/wireless/broadcom/b43/phy_common.c
+++ b/drivers/net/wireless/broadcom/b43/phy_common.c
@@ -436,7 +436,7 @@
 	 * firmware from sending ghost packets.
 	 */
 	channelcookie = new_channel;
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+	if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
 		channelcookie |= B43_SHM_SH_CHAN_5GHZ;
 	/* FIXME: set 40Mhz flag if required */
 	if (0)
diff --git a/drivers/net/wireless/broadcom/b43/phy_ht.c b/drivers/net/wireless/broadcom/b43/phy_ht.c
index bd68945..718c90e 100644
--- a/drivers/net/wireless/broadcom/b43/phy_ht.c
+++ b/drivers/net/wireless/broadcom/b43/phy_ht.c
@@ -568,7 +568,7 @@
 	} else {
 		b43_phy_set(dev, B43_PHY_HT_TXPCTL_CMD_C1, en_bits);
 
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+		if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
 			for (i = 0; i < 3; i++)
 				b43_phy_write(dev, cmd_regs[i], 0x32);
 		}
@@ -643,7 +643,7 @@
 	u16 freq = dev->phy.chandef->chan->center_freq;
 	int i, c;
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 		for (c = 0; c < 3; c++) {
 			target[c] = sprom->core_pwr_info[c].maxpwr_2g;
 			a1[c] = sprom->core_pwr_info[c].pa_2g[0];
@@ -777,7 +777,7 @@
 				const struct b43_phy_ht_channeltab_e_phy *e,
 				struct ieee80211_channel *new_channel)
 {
-	if (new_channel->band == IEEE80211_BAND_5GHZ) {
+	if (new_channel->band == NL80211_BAND_5GHZ) {
 		/* Switch to 2 GHz for a moment to access B-PHY regs */
 		b43_phy_mask(dev, B43_PHY_HT_BANDCTL, ~B43_PHY_HT_BANDCTL_5GHZ);
 
@@ -805,7 +805,7 @@
 	} else {
 		b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_OFDM_EN,
 				      B43_PHY_HT_CLASS_CTL_OFDM_EN);
-		if (new_channel->band == IEEE80211_BAND_2GHZ)
+		if (new_channel->band == NL80211_BAND_2GHZ)
 			b43_phy_mask(dev, B43_PHY_HT_TEST, ~0x840);
 	}
 
@@ -916,7 +916,7 @@
 	if (0) /* TODO: condition */
 		; /* TODO: PHY op on reg 0x217 */
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+	if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
 		b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_CCK_EN, 0);
 	else
 		b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_CCK_EN,
@@ -1005,7 +1005,7 @@
 	b43_phy_ht_classifier(dev, 0, 0);
 	b43_phy_ht_read_clip_detection(dev, clip_state);
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
 		b43_phy_ht_bphy_init(dev);
 
 	b43_httab_write_bulk(dev, B43_HTTAB32(0x1a, 0xc0),
@@ -1077,7 +1077,7 @@
 	enum nl80211_channel_type channel_type =
 		cfg80211_get_chandef_type(&dev->wl->hw->conf.chandef);
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 		if ((new_channel < 1) || (new_channel > 14))
 			return -EINVAL;
 	} else {
@@ -1089,7 +1089,7 @@
 
 static unsigned int b43_phy_ht_op_get_default_chan(struct b43_wldev *dev)
 {
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
 		return 11;
 	return 36;
 }
diff --git a/drivers/net/wireless/broadcom/b43/phy_lcn.c b/drivers/net/wireless/broadcom/b43/phy_lcn.c
index 97461cc..63bd29f 100644
--- a/drivers/net/wireless/broadcom/b43/phy_lcn.c
+++ b/drivers/net/wireless/broadcom/b43/phy_lcn.c
@@ -108,7 +108,7 @@
 /* wlc_radio_2064_init */
 static void b43_radio_2064_init(struct b43_wldev *dev)
 {
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 		b43_radio_write(dev, 0x09c, 0x0020);
 		b43_radio_write(dev, 0x105, 0x0008);
 	} else {
@@ -535,7 +535,7 @@
 	b43_mac_suspend(dev);
 
 	if (!dev->phy.lcn->hw_pwr_ctl_capable) {
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+		if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 			tx_gains.gm_gain = 4;
 			tx_gains.pga_gain = 12;
 			tx_gains.pad_gain = 12;
@@ -720,7 +720,7 @@
 	else
 		B43_WARN_ON(1);
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
 		b43_phy_lcn_tx_pwr_ctl_init(dev);
 
 	b43_switch_channel(dev, dev->phy.channel);
@@ -779,7 +779,7 @@
 	enum nl80211_channel_type channel_type =
 		cfg80211_get_chandef_type(&dev->wl->hw->conf.chandef);
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 		if ((new_channel < 1) || (new_channel > 14))
 			return -EINVAL;
 	} else {
@@ -791,7 +791,7 @@
 
 static unsigned int b43_phy_lcn_op_get_default_chan(struct b43_wldev *dev)
 {
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
 		return 1;
 	return 36;
 }
diff --git a/drivers/net/wireless/broadcom/b43/phy_lp.c b/drivers/net/wireless/broadcom/b43/phy_lp.c
index 058a9f2..6922cbb 100644
--- a/drivers/net/wireless/broadcom/b43/phy_lp.c
+++ b/drivers/net/wireless/broadcom/b43/phy_lp.c
@@ -46,7 +46,7 @@
 
 static unsigned int b43_lpphy_op_get_default_chan(struct b43_wldev *dev)
 {
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
 		return 1;
 	return 36;
 }
@@ -91,7 +91,7 @@
 	u32 ofdmpo;
 	int i;
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 		lpphy->tx_isolation_med_band = sprom->tri2g;
 		lpphy->bx_arch = sprom->bxa2g;
 		lpphy->rx_pwr_offset = sprom->rxpo2g;
@@ -174,7 +174,7 @@
 
 	B43_WARN_ON(dev->phy.rev >= 2);
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
 		isolation = lpphy->tx_isolation_med_band;
 	else if (freq <= 5320)
 		isolation = lpphy->tx_isolation_low_band;
@@ -238,7 +238,7 @@
 	b43_phy_maskset(dev, B43_LPPHY_INPUT_PWRDB,
 			0xFF00, lpphy->rx_pwr_offset);
 	if ((sprom->boardflags_lo & B43_BFL_FEM) &&
-	   ((b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
+	   ((b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ||
 	   (sprom->boardflags_hi & B43_BFH_PAREF))) {
 		ssb_pmu_set_ldo_voltage(&bus->chipco, LDO_PAREF, 0x28);
 		ssb_pmu_set_ldo_paref(&bus->chipco, true);
@@ -280,7 +280,7 @@
 		b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_7, 0xC0FF, 0x0900);
 		b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xFFC0, 0x000A);
 		b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xC0FF, 0x0B00);
-	} else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ ||
+	} else if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ ||
 		   (dev->dev->board_type == SSB_BOARD_BU4312) ||
 		   (dev->phy.rev == 0 && (sprom->boardflags_lo & B43_BFL_FEM))) {
 		b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x0001);
@@ -326,7 +326,7 @@
 		//FIXME the Broadcom driver caches & delays this HF write!
 		b43_hf_write(dev, b43_hf_read(dev) | B43_HF_PR45960W);
 	}
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 		b43_phy_set(dev, B43_LPPHY_LP_PHY_CTL, 0x8000);
 		b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x0040);
 		b43_phy_maskset(dev, B43_LPPHY_MINPWR_LEVEL, 0x00FF, 0xA400);
@@ -466,7 +466,7 @@
 		b43_lptab_write(dev, B43_LPTAB16(0x08, 0x12), 0x40);
 	}
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 		b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x40);
 		b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xF0FF, 0xB00);
 		b43_phy_maskset(dev, B43_LPPHY_SYNCPEAKCNT, 0xFFF8, 0x6);
@@ -547,7 +547,7 @@
 		b43_radio_write(dev, B2062_S_BG_CTL1,
 			(b43_radio_read(dev, B2062_N_COMM2) >> 1) | 0x80);
 	}
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
 		b43_radio_set(dev, B2062_N_TSSI_CTL0, 0x1);
 	else
 		b43_radio_mask(dev, B2062_N_TSSI_CTL0, ~0x1);
@@ -746,7 +746,7 @@
 		lpphy->crs_sys_disable = false;
 
 	if (!lpphy->crs_usr_disable && !lpphy->crs_sys_disable) {
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+		if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
 			b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL,
 					0xFF1F, 0x60);
 		else
@@ -807,7 +807,7 @@
 	b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFBF);
 	if (dev->phy.rev >= 2) {
 		b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFEFF);
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+		if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 			b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFBFF);
 			b43_phy_mask(dev, B43_PHY_OFDM(0xE5), 0xFFF7);
 		}
@@ -823,7 +823,7 @@
 	b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x40);
 	if (dev->phy.rev >= 2) {
 		b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x100);
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+		if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 			b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x400);
 			b43_phy_set(dev, B43_PHY_OFDM(0xE5), 0x8);
 		}
@@ -951,7 +951,7 @@
 			0xFBFF, ext_lna << 10);
 	b43_phy_write(dev, B43_LPPHY_RX_GAIN_CTL_OVERRIDE_VAL, low_gain);
 	b43_phy_maskset(dev, B43_LPPHY_AFE_DDFS, 0xFFF0, high_gain);
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 		tmp = (gain >> 2) & 0x3;
 		b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL,
 				0xE7FF, tmp<<11);
@@ -1344,7 +1344,7 @@
 	if (dev->phy.rev >= 2) {
 		lpphy_rev2plus_rc_calib(dev);
 	} else if (!lpphy->rc_cap) {
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+		if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
 			lpphy_rev0_1_rc_calib(dev);
 	} else {
 		lpphy_set_rc_cap(dev);
@@ -1548,7 +1548,7 @@
 {
 	struct lpphy_tx_gains gains;
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 		gains.gm = 4;
 		gains.pad = 12;
 		gains.pga = 12;
@@ -1902,7 +1902,7 @@
 
 	lpphy_set_trsw_over(dev, tx, rx);
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 		b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x8);
 		b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0,
 				0xFFF7, pa << 3);
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index 9f0bcf3..a5557d7 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -105,9 +105,9 @@
 
 static inline bool b43_nphy_ipa(struct b43_wldev *dev)
 {
-	enum ieee80211_band band = b43_current_band(dev->wl);
-	return ((dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) ||
-		(dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ));
+	enum nl80211_band band = b43_current_band(dev->wl);
+	return ((dev->phy.n->ipa2g_on && band == NL80211_BAND_2GHZ) ||
+		(dev->phy.n->ipa5g_on && band == NL80211_BAND_5GHZ));
 }
 
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreGetState */
@@ -357,7 +357,7 @@
 			break;
 		case N_INTC_OVERRIDE_PA:
 			tmp = 0x0030;
-			if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+			if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
 				val = value << 5;
 			else
 				val = value << 4;
@@ -365,7 +365,7 @@
 			b43_phy_set(dev, reg, 0x1000);
 			break;
 		case N_INTC_OVERRIDE_EXT_LNA_PU:
-			if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+			if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
 				tmp = 0x0001;
 				tmp2 = 0x0004;
 				val = value;
@@ -378,7 +378,7 @@
 			b43_phy_mask(dev, reg, ~tmp2);
 			break;
 		case N_INTC_OVERRIDE_EXT_LNA_GAIN:
-			if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+			if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
 				tmp = 0x0002;
 				tmp2 = 0x0008;
 				val = value << 1;
@@ -465,7 +465,7 @@
 			}
 			break;
 		case N_INTC_OVERRIDE_PA:
-			if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+			if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
 				tmp = 0x0020;
 				val = value << 5;
 			} else {
@@ -475,7 +475,7 @@
 			b43_phy_maskset(dev, reg, ~tmp, val);
 			break;
 		case N_INTC_OVERRIDE_EXT_LNA_PU:
-			if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+			if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
 				tmp = 0x0001;
 				val = value;
 			} else {
@@ -485,7 +485,7 @@
 			b43_phy_maskset(dev, reg, ~tmp, val);
 			break;
 		case N_INTC_OVERRIDE_EXT_LNA_GAIN:
-			if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+			if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
 				tmp = 0x0002;
 				val = value << 1;
 			} else {
@@ -600,7 +600,7 @@
 		b43_nphy_stay_in_carrier_search(dev, 1);
 
 	if (nphy->gain_boost) {
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+		if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 			gain[0] = 6;
 			gain[1] = 6;
 		} else {
@@ -736,7 +736,7 @@
 	switch (phy->radio_rev) {
 	case 0 ... 4:
 	case 6:
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+		if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 			b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_R1, 0x3f);
 			b43_radio_write(dev, R2057_CP_KPD_IDAC, 0x3f);
 			b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_C1, 0x8);
@@ -751,7 +751,7 @@
 	case 9: /* e.g. PHY rev 16 */
 		b43_radio_write(dev, R2057_LOGEN_PTAT_RESETS, 0x20);
 		b43_radio_write(dev, R2057_VCOBUF_IDACS, 0x18);
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+		if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
 			b43_radio_write(dev, R2057_LOGEN_PTAT_RESETS, 0x38);
 			b43_radio_write(dev, R2057_VCOBUF_IDACS, 0x0f);
 
@@ -775,7 +775,7 @@
 		break;
 	}
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 		u16 txmix2g_tune_boost_pu = 0;
 		u16 pad2g_tune_pus = 0;
 
@@ -1135,7 +1135,7 @@
 {
 	struct b43_phy *phy = &dev->phy;
 	struct ssb_sprom *sprom = dev->dev->bus_sprom;
-	enum ieee80211_band band = b43_current_band(dev->wl);
+	enum nl80211_band band = b43_current_band(dev->wl);
 	u16 offset;
 	u8 i;
 	u16 bias, cbias;
@@ -1152,10 +1152,10 @@
 		 dev->dev->chip_pkg == BCMA_PKG_ID_BCM43224_FAB_SMIC);
 
 	b43_chantab_radio_2056_upload(dev, e);
-	b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ);
+	b2056_upload_syn_pll_cp2(dev, band == NL80211_BAND_5GHZ);
 
 	if (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
-	    b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+	    b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 		b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
 		b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
 		if (dev->dev->chip_id == BCMA_CHIP_ID_BCM4716 ||
@@ -1168,21 +1168,21 @@
 		}
 	}
 	if (sprom->boardflags2_hi & B43_BFH2_GPLL_WAR2 &&
-	    b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+	    b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 		b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1f);
 		b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1f);
 		b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0b);
 		b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x20);
 	}
 	if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
-	    b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+	    b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
 		b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
 		b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
 		b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x05);
 		b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x0C);
 	}
 
-	if (dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) {
+	if (dev->phy.n->ipa2g_on && band == NL80211_BAND_2GHZ) {
 		for (i = 0; i < 2; i++) {
 			offset = i ? B2056_TX1 : B2056_TX0;
 			if (dev->phy.rev >= 5) {
@@ -1244,7 +1244,7 @@
 			}
 			b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee);
 		}
-	} else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) {
+	} else if (dev->phy.n->ipa5g_on && band == NL80211_BAND_5GHZ) {
 		u16 freq = phy->chandef->chan->center_freq;
 		if (freq < 5100) {
 			paa_boost = 0xA;
@@ -1501,7 +1501,7 @@
 		/* Follow wl, not specs. Do not force uploading all regs */
 		b2055_upload_inittab(dev, 0, 0);
 	} else {
-		bool ghz5 = b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ;
+		bool ghz5 = b43_current_band(dev->wl) == NL80211_BAND_5GHZ;
 		b2055_upload_inittab(dev, ghz5, 0);
 	}
 	b43_radio_init2055_post(dev);
@@ -1785,7 +1785,7 @@
 				b43_phy_maskset(dev, reg, 0xFFC3, 0);
 
 				if (rssi_type == N_RSSI_W1)
-					val = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 4 : 8;
+					val = (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ? 4 : 8;
 				else if (rssi_type == N_RSSI_W2)
 					val = 16;
 				else
@@ -1813,12 +1813,12 @@
 
 				if (rssi_type != N_RSSI_IQ &&
 				    rssi_type != N_RSSI_TBD) {
-					enum ieee80211_band band =
+					enum nl80211_band band =
 						b43_current_band(dev->wl);
 
 					if (dev->phy.rev < 7) {
 						if (b43_nphy_ipa(dev))
-							val = (band == IEEE80211_BAND_5GHZ) ? 0xC : 0xE;
+							val = (band == NL80211_BAND_5GHZ) ? 0xC : 0xE;
 						else
 							val = 0x11;
 						reg = (i == 0) ? B2056_TX0 : B2056_TX1;
@@ -2120,7 +2120,7 @@
 						     1, 0, false);
 		b43_nphy_rf_ctl_override_rev7(dev, 0x80, 1, 0, false, 0);
 		b43_nphy_rf_ctl_override_rev7(dev, 0x40, 1, 0, false, 0);
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+		if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
 			b43_nphy_rf_ctl_override_rev7(dev, 0x20, 0, 0, false,
 						      0);
 			b43_nphy_rf_ctl_override_rev7(dev, 0x10, 1, 0, false,
@@ -2136,7 +2136,7 @@
 		b43_nphy_rf_ctl_override(dev, 0x2, 1, 0, false);
 		b43_nphy_rf_ctl_override(dev, 0x80, 1, 0, false);
 		b43_nphy_rf_ctl_override(dev, 0x40, 1, 0, false);
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+		if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
 			b43_nphy_rf_ctl_override(dev, 0x20, 0, 0, false);
 			b43_nphy_rf_ctl_override(dev, 0x10, 1, 0, false);
 		} else {
@@ -2257,7 +2257,7 @@
 		b43_phy_write(dev, regs_to_store[i], saved_regs_phy[i]);
 
 	/* Store for future configuration */
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 		rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G;
 		rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G;
 	} else {
@@ -2289,7 +2289,7 @@
 	rssical_phy_regs[11] = b43_phy_read(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y);
 
 	/* Remember for which channel we store configuration */
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
 		nphy->rssical_chanspec_2G.center_freq = phy->chandef->chan->center_freq;
 	else
 		nphy->rssical_chanspec_5G.center_freq = phy->chandef->chan->center_freq;
@@ -2336,7 +2336,7 @@
 	b43_nphy_read_clip_detection(dev, clip_state);
 	b43_nphy_write_clip_detection(dev, clip_off);
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+	if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
 		override = 0x140;
 	else
 		override = 0x110;
@@ -2629,7 +2629,7 @@
 	b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
 
 	if (nphy->gain_boost) {
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ &&
+		if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ &&
 		    b43_is_40mhz(dev))
 			code = 4;
 		else
@@ -2688,7 +2688,7 @@
 		~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF,
 		0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
 		b43_phy_maskset(dev, B43_PHY_N(0xC5D), 0xFF80, 4);
 }
 
@@ -2803,7 +2803,7 @@
 	scap_val = b43_radio_read(dev, R2057_RCCAL_SCAP_VAL);
 
 	if (b43_nphy_ipa(dev)) {
-		bool ghz2 = b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ;
+		bool ghz2 = b43_current_band(dev->wl) == NL80211_BAND_2GHZ;
 
 		switch (phy->radio_rev) {
 		case 5:
@@ -2831,7 +2831,7 @@
 				bcap_val_11b[core] = bcap_val;
 				lpf_ofdm_20mhz[core] = 4;
 				lpf_11b[core] = 1;
-				if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+				if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 					scap_val_11n_20[core] = 0xc;
 					bcap_val_11n_20[core] = 0xc;
 					scap_val_11n_40[core] = 0xa;
@@ -2982,7 +2982,7 @@
 			conv = 0x7f;
 			filt = 0xee;
 		}
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+		if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 			for (core = 0; core < 2; core++) {
 				if (core == 0) {
 					b43_radio_write(dev, 0x5F, bias);
@@ -2998,7 +2998,7 @@
 	}
 
 	if (b43_nphy_ipa(dev)) {
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+		if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 			if (phy->radio_rev == 3 || phy->radio_rev == 4 ||
 			    phy->radio_rev == 6) {
 				for (core = 0; core < 2; core++) {
@@ -3221,7 +3221,7 @@
 					 ARRAY_SIZE(rx2tx_events));
 	}
 
-	tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ?
+	tmp16 = (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) ?
 		0x2 : 0x9C40;
 	b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16);
 
@@ -3240,7 +3240,7 @@
 	b43_ntab_write(dev, B43_NTAB16(8, 0), 2);
 	b43_ntab_write(dev, B43_NTAB16(8, 16), 2);
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
 		pdet_range = sprom->fem.ghz2.pdet_range;
 	else
 		pdet_range = sprom->fem.ghz5.pdet_range;
@@ -3249,7 +3249,7 @@
 	switch (pdet_range) {
 	case 3:
 		if (!(dev->phy.rev >= 4 &&
-		      b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
+		      b43_current_band(dev->wl) == NL80211_BAND_2GHZ))
 			break;
 		/* FALL THROUGH */
 	case 0:
@@ -3261,7 +3261,7 @@
 		break;
 	case 2:
 		if (dev->phy.rev >= 6) {
-			if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+			if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
 				vmid[3] = 0x94;
 			else
 				vmid[3] = 0x8e;
@@ -3277,7 +3277,7 @@
 		break;
 	case 4:
 	case 5:
-		if (b43_current_band(dev->wl) != IEEE80211_BAND_2GHZ) {
+		if (b43_current_band(dev->wl) != NL80211_BAND_2GHZ) {
 			if (pdet_range == 4) {
 				vmid[3] = 0x8e;
 				tmp16 = 0x96;
@@ -3322,9 +3322,9 @@
 	/* N PHY WAR TX Chain Update with hw_phytxchain as argument */
 
 	if ((sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
-	     b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
+	     b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ||
 	    (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
-	     b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
+	     b43_current_band(dev->wl) == NL80211_BAND_2GHZ))
 		tmp32 = 0x00088888;
 	else
 		tmp32 = 0x88888888;
@@ -3333,7 +3333,7 @@
 	b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
 
 	if (dev->phy.rev == 4 &&
-	    b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+	    b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
 		b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
 				0x70);
 		b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
@@ -3376,7 +3376,7 @@
 		delays1[5] = 0x14;
 	}
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
+	if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ &&
 	    nphy->band5g_pwrgain) {
 		b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
 		b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8);
@@ -3451,7 +3451,7 @@
 	struct b43_phy *phy = &dev->phy;
 	struct b43_phy_n *nphy = phy->n;
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+	if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
 		b43_nphy_classifier(dev, 1, 0);
 	else
 		b43_nphy_classifier(dev, 1, 1);
@@ -3586,7 +3586,7 @@
 		gain = (target.pad[core]) | (target.pga[core] << 4) |
 			(target.txgm[core] << 8);
 
-		indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ?
+		indx = (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ?
 			1 : 0;
 		for (i = 0; i < 9; i++)
 			if (tbl_iqcal_gainparams[indx][i][0] == gain)
@@ -3614,7 +3614,7 @@
 	struct b43_phy_n *nphy = dev->phy.n;
 	u8 i;
 	u16 bmask, val, tmp;
-	enum ieee80211_band band = b43_current_band(dev->wl);
+	enum nl80211_band band = b43_current_band(dev->wl);
 
 	if (nphy->hang_avoid)
 		b43_nphy_stay_in_carrier_search(dev, 1);
@@ -3679,7 +3679,7 @@
 		}
 		b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~(bmask), val);
 
-		if (band == IEEE80211_BAND_5GHZ) {
+		if (band == NL80211_BAND_5GHZ) {
 			if (phy->rev >= 19) {
 				/* TODO */
 			} else if (phy->rev >= 7) {
@@ -3770,7 +3770,7 @@
 		txpi[0] = 72;
 		txpi[1] = 72;
 	} else {
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+		if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 			txpi[0] = sprom->txpid2g[0];
 			txpi[1] = sprom->txpid2g[1];
 		} else if (freq >= 4900 && freq < 5100) {
@@ -3868,7 +3868,7 @@
 	} else if (phy->rev >= 7) {
 		for (core = 0; core < 2; core++) {
 			r = core ? 0x190 : 0x170;
-			if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+			if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 				b43_radio_write(dev, r + 0x5, 0x5);
 				b43_radio_write(dev, r + 0x9, 0xE);
 				if (phy->rev != 5)
@@ -3892,7 +3892,7 @@
 			b43_radio_write(dev, r + 0xC, 0);
 		}
 	} else {
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+		if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
 			b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x128);
 		else
 			b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x80);
@@ -3909,7 +3909,7 @@
 			b43_radio_write(dev, r | B2056_TX_TSSI_MISC1, 8);
 			b43_radio_write(dev, r | B2056_TX_TSSI_MISC2, 0);
 			b43_radio_write(dev, r | B2056_TX_TSSI_MISC3, 0);
-			if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+			if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 				b43_radio_write(dev, r | B2056_TX_TX_SSI_MASTER,
 						0x5);
 				if (phy->rev != 5)
@@ -4098,7 +4098,7 @@
 		b0[0] = b0[1] = 5612;
 		b1[0] = b1[1] = -1393;
 	} else {
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+		if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 			for (c = 0; c < 2; c++) {
 				idle[c] = nphy->pwr_ctl_info[c].idle_tssi_2g;
 				target[c] = sprom->core_pwr_info[c].maxpwr_2g;
@@ -4153,11 +4153,11 @@
 			for (c = 0; c < 2; c++) {
 				r = c ? 0x190 : 0x170;
 				if (b43_nphy_ipa(dev))
-					b43_radio_write(dev, r + 0x9, (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ? 0xE : 0xC);
+					b43_radio_write(dev, r + 0x9, (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) ? 0xE : 0xC);
 			}
 		} else {
 			if (b43_nphy_ipa(dev)) {
-				tmp = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 0xC : 0xE;
+				tmp = (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ? 0xC : 0xE;
 				b43_radio_write(dev,
 					B2056_TX0 | B2056_TX_TX_SSI_MUX, tmp);
 				b43_radio_write(dev,
@@ -4267,13 +4267,13 @@
 		} else if (phy->rev >= 7) {
 			pga_gain = (table[i] >> 24) & 0xf;
 			pad_gain = (table[i] >> 19) & 0x1f;
-			if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+			if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
 				rfpwr_offset = rf_pwr_offset_table[pad_gain];
 			else
 				rfpwr_offset = rf_pwr_offset_table[pga_gain];
 		} else {
 			pga_gain = (table[i] >> 24) & 0xF;
-			if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+			if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
 				rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_2g[pga_gain];
 			else
 				rfpwr_offset = 0; /* FIXME */
@@ -4288,7 +4288,7 @@
 static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
 {
 	struct b43_phy_n *nphy = dev->phy.n;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	u16 tmp;
 
 	if (!enable) {
@@ -4300,12 +4300,12 @@
 		if (dev->phy.rev >= 7) {
 			tmp = 0x1480;
 		} else if (dev->phy.rev >= 3) {
-			if (band == IEEE80211_BAND_5GHZ)
+			if (band == NL80211_BAND_5GHZ)
 				tmp = 0x600;
 			else
 				tmp = 0x480;
 		} else {
-			if (band == IEEE80211_BAND_5GHZ)
+			if (band == NL80211_BAND_5GHZ)
 				tmp = 0x180;
 			else
 				tmp = 0x120;
@@ -4734,7 +4734,7 @@
 	u16 *rssical_radio_regs = NULL;
 	u16 *rssical_phy_regs = NULL;
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 		if (!nphy->rssical_chanspec_2G.center_freq)
 			return;
 		rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G;
@@ -4804,7 +4804,7 @@
 		save[off + 7] = b43_radio_read(dev, r + R2057_TX0_TSSIG);
 		save[off + 8] = b43_radio_read(dev, r + R2057_TX0_TSSI_MISC1);
 
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+		if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
 			b43_radio_write(dev, r + R2057_TX0_TX_SSI_MASTER, 0xA);
 			b43_radio_write(dev, r + R2057_TX0_IQCAL_VCM_HG, 0x43);
 			b43_radio_write(dev, r + R2057_TX0_IQCAL_IDAC, 0x55);
@@ -4864,7 +4864,7 @@
 		save[offset + 9] = b43_radio_read(dev, B2055_XOMISC);
 		save[offset + 10] = b43_radio_read(dev, B2055_PLL_LFC1);
 
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+		if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
 			b43_radio_write(dev, tmp | B2055_CAL_RVARCTL, 0x0A);
 			b43_radio_write(dev, tmp | B2055_CAL_LPOCTL, 0x40);
 			b43_radio_write(dev, tmp | B2055_CAL_TS, 0x55);
@@ -5005,7 +5005,7 @@
 		b43_nphy_pa_set_tx_dig_filter(dev, 0x186,
 					      tbl_tx_filter_coef_rev4[3]);
 	} else {
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+		if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
 			b43_nphy_pa_set_tx_dig_filter(dev, 0x186,
 						      tbl_tx_filter_coef_rev4[5]);
 		if (dev->phy.channel == 14)
@@ -5185,7 +5185,7 @@
 							      false, 0);
 			} else if (phy->rev == 7) {
 				b43_radio_maskset(dev, R2057_OVR_REG0, 1 << 4, 1 << 4);
-				if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+				if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 					b43_radio_maskset(dev, R2057_PAD2G_TUNE_PUS_CORE0, ~1, 0);
 					b43_radio_maskset(dev, R2057_PAD2G_TUNE_PUS_CORE1, ~1, 0);
 				} else {
@@ -5210,7 +5210,7 @@
 		b43_ntab_write(dev, B43_NTAB16(8, 18), tmp);
 		regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
 		regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+		if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
 			tmp = 0x0180;
 		else
 			tmp = 0x0120;
@@ -5233,7 +5233,7 @@
 	if (nphy->hang_avoid)
 		b43_nphy_stay_in_carrier_search(dev, 1);
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 		rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G;
 		txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G;
 		iqcal_chanspec = &nphy->iqcal_chanspec_2G;
@@ -5304,7 +5304,7 @@
 	u16 *txcal_radio_regs = NULL;
 	struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 		if (!nphy->iqcal_chanspec_2G.center_freq)
 			return;
 		table = nphy->cal_cache.txcal_coeffs_2G;
@@ -5332,7 +5332,7 @@
 	if (dev->phy.rev < 2)
 		b43_nphy_tx_iq_workaround(dev);
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 		txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G;
 		rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G;
 	} else {
@@ -5422,7 +5422,7 @@
 
 	phy6or5x = dev->phy.rev >= 6 ||
 		(dev->phy.rev == 5 && nphy->ipa2g_on &&
-		b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ);
+		b43_current_band(dev->wl) == NL80211_BAND_2GHZ);
 	if (phy6or5x) {
 		if (b43_is_40mhz(dev)) {
 			b43_ntab_write_bulk(dev, B43_NTAB16(15, 0), 18,
@@ -5657,7 +5657,7 @@
 	u16 tmp[6];
 	u16 uninitialized_var(cur_hpf1), uninitialized_var(cur_hpf2), cur_lna;
 	u32 real, imag;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 
 	u8 use;
 	u16 cur_hpf;
@@ -5712,18 +5712,18 @@
 		band = b43_current_band(dev->wl);
 
 		if (nphy->rxcalparams & 0xFF000000) {
-			if (band == IEEE80211_BAND_5GHZ)
+			if (band == NL80211_BAND_5GHZ)
 				b43_phy_write(dev, rfctl[0], 0x140);
 			else
 				b43_phy_write(dev, rfctl[0], 0x110);
 		} else {
-			if (band == IEEE80211_BAND_5GHZ)
+			if (band == NL80211_BAND_5GHZ)
 				b43_phy_write(dev, rfctl[0], 0x180);
 			else
 				b43_phy_write(dev, rfctl[0], 0x120);
 		}
 
-		if (band == IEEE80211_BAND_5GHZ)
+		if (band == NL80211_BAND_5GHZ)
 			b43_phy_write(dev, rfctl[1], 0x148);
 		else
 			b43_phy_write(dev, rfctl[1], 0x114);
@@ -5919,7 +5919,7 @@
 #if 0
 	/* Some extra gains */
 	hw_gain = 6; /* N-PHY specific */
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
 		hw_gain += sprom->antenna_gain.a0;
 	else
 		hw_gain += sprom->antenna_gain.a1;
@@ -6043,7 +6043,7 @@
 	u8 tx_pwr_state;
 	struct nphy_txgains target;
 	u16 tmp;
-	enum ieee80211_band tmp2;
+	enum nl80211_band tmp2;
 	bool do_rssi_cal;
 
 	u16 clip[2];
@@ -6051,7 +6051,7 @@
 
 	if ((dev->phy.rev >= 3) &&
 	   (sprom->boardflags_lo & B43_BFL_EXTLNA) &&
-	   (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) {
+	   (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)) {
 		switch (dev->dev->bus_type) {
 #ifdef CONFIG_B43_BCMA
 		case B43_BUS_BCMA:
@@ -6170,7 +6170,7 @@
 
 	b43_nphy_classifier(dev, 0, 0);
 	b43_nphy_read_clip_detection(dev, clip);
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
 		b43_nphy_bphy_init(dev);
 
 	tx_pwr_state = nphy->txpwrctrl;
@@ -6187,7 +6187,7 @@
 
 	do_rssi_cal = false;
 	if (phy->rev >= 3) {
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+		if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
 			do_rssi_cal = !nphy->rssical_chanspec_2G.center_freq;
 		else
 			do_rssi_cal = !nphy->rssical_chanspec_5G.center_freq;
@@ -6201,7 +6201,7 @@
 	}
 
 	if (!((nphy->measure_hold & 0x6) != 0)) {
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+		if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
 			do_cal = !nphy->iqcal_chanspec_2G.center_freq;
 		else
 			do_cal = !nphy->iqcal_chanspec_5G.center_freq;
@@ -6291,7 +6291,7 @@
 	int ch = new_channel->hw_value;
 	u16 tmp16;
 
-	if (new_channel->band == IEEE80211_BAND_5GHZ) {
+	if (new_channel->band == NL80211_BAND_5GHZ) {
 		/* Switch to 2 GHz for a moment to access B43_PHY_B_BBCFG */
 		b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
 
@@ -6302,7 +6302,7 @@
 			    B43_PHY_B_BBCFG_RSTCCA | B43_PHY_B_BBCFG_RSTRX);
 		b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16);
 		b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ);
-	} else if (new_channel->band == IEEE80211_BAND_2GHZ) {
+	} else if (new_channel->band == NL80211_BAND_2GHZ) {
 		b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
 		tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR);
 		b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4);
@@ -6319,7 +6319,7 @@
 		b43_phy_set(dev, B43_PHY_B_TEST, 0x0800);
 	} else {
 		b43_nphy_classifier(dev, 2, 2);
-		if (new_channel->band == IEEE80211_BAND_2GHZ)
+		if (new_channel->band == NL80211_BAND_2GHZ)
 			b43_phy_mask(dev, B43_PHY_B_TEST, ~0x840);
 	}
 
@@ -6449,7 +6449,7 @@
 			&(tabent_r7->phy_regs) : &(tabent_r7_2g->phy_regs);
 
 		if (phy->radio_rev <= 4 || phy->radio_rev == 6) {
-			tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 2 : 0;
+			tmp = (channel->band == NL80211_BAND_5GHZ) ? 2 : 0;
 			b43_radio_maskset(dev, R2057_TIA_CONFIG_CORE0, ~2, tmp);
 			b43_radio_maskset(dev, R2057_TIA_CONFIG_CORE1, ~2, tmp);
 		}
@@ -6457,12 +6457,12 @@
 		b43_radio_2057_setup(dev, tabent_r7, tabent_r7_2g);
 		b43_nphy_channel_setup(dev, phy_regs, channel);
 	} else if (phy->rev >= 3) {
-		tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 4 : 0;
+		tmp = (channel->band == NL80211_BAND_5GHZ) ? 4 : 0;
 		b43_radio_maskset(dev, 0x08, 0xFFFB, tmp);
 		b43_radio_2056_setup(dev, tabent_r3);
 		b43_nphy_channel_setup(dev, &(tabent_r3->phy_regs), channel);
 	} else {
-		tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 0x0020 : 0x0050;
+		tmp = (channel->band == NL80211_BAND_5GHZ) ? 0x0020 : 0x0050;
 		b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, tmp);
 		b43_radio_2055_setup(dev, tabent_r2);
 		b43_nphy_channel_setup(dev, &(tabent_r2->phy_regs), channel);
@@ -6692,7 +6692,7 @@
 	enum nl80211_channel_type channel_type =
 		cfg80211_get_chandef_type(&dev->wl->hw->conf.chandef);
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 		if ((new_channel < 1) || (new_channel > 14))
 			return -EINVAL;
 	} else {
@@ -6705,7 +6705,7 @@
 
 static unsigned int b43_nphy_op_get_default_chan(struct b43_wldev *dev)
 {
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
 		return 1;
 	return 36;
 }
diff --git a/drivers/net/wireless/broadcom/b43/tables_lpphy.c b/drivers/net/wireless/broadcom/b43/tables_lpphy.c
index cff187c..ce01e16 100644
--- a/drivers/net/wireless/broadcom/b43/tables_lpphy.c
+++ b/drivers/net/wireless/broadcom/b43/tables_lpphy.c
@@ -560,7 +560,7 @@
 
 	for (i = 0; i < ARRAY_SIZE(b2062_init_tab); i++) {
 		e = &b2062_init_tab[i];
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+		if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 			if (!(e->flags & B206X_FLAG_G))
 				continue;
 			b43_radio_write(dev, e->offset, e->value_g);
@@ -579,7 +579,7 @@
 
 	for (i = 0; i < ARRAY_SIZE(b2063_init_tab); i++) {
 		e = &b2063_init_tab[i];
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+		if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 			if (!(e->flags & B206X_FLAG_G))
 				continue;
 			b43_radio_write(dev, e->offset, e->value_g);
@@ -2379,12 +2379,12 @@
 	tmp |= data.pga << 8;
 	tmp |= data.gm;
 	if (dev->phy.rev >= 3) {
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+		if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
 			tmp |= 0x10 << 24;
 		else
 			tmp |= 0x70 << 24;
 	} else {
-		if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+		if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
 			tmp |= 0x14 << 24;
 		else
 			tmp |= 0x7F << 24;
@@ -2423,7 +2423,7 @@
 		    (sprom->boardflags_lo & B43_BFL_HGPA))
 			lpphy_write_gain_table_bulk(dev, 0, 128,
 					lpphy_rev0_nopa_tx_gain_table);
-		else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+		else if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
 			lpphy_write_gain_table_bulk(dev, 0, 128,
 					lpphy_rev0_2ghz_tx_gain_table);
 		else
@@ -2435,7 +2435,7 @@
 		    (sprom->boardflags_lo & B43_BFL_HGPA))
 			lpphy_write_gain_table_bulk(dev, 0, 128,
 					lpphy_rev1_nopa_tx_gain_table);
-		else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+		else if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
 			lpphy_write_gain_table_bulk(dev, 0, 128,
 					lpphy_rev1_2ghz_tx_gain_table);
 		else
@@ -2446,7 +2446,7 @@
 		if (sprom->boardflags_hi & B43_BFH_NOPA)
 			lpphy_write_gain_table_bulk(dev, 0, 128,
 					lpphy_rev2_nopa_tx_gain_table);
-		else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+		else if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
 			lpphy_write_gain_table_bulk(dev, 0, 128,
 					lpphy_rev2_2ghz_tx_gain_table);
 		else
diff --git a/drivers/net/wireless/broadcom/b43/tables_nphy.c b/drivers/net/wireless/broadcom/b43/tables_nphy.c
index b2f0d24..44e0957 100644
--- a/drivers/net/wireless/broadcom/b43/tables_nphy.c
+++ b/drivers/net/wireless/broadcom/b43/tables_nphy.c
@@ -3502,7 +3502,7 @@
 		{ 0x2, 0x18, 0x2 }, /* Core 1 */
 	};
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+	if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
 		antswlut = sprom->fem.ghz5.antswlut;
 	else
 		antswlut = sprom->fem.ghz2.antswlut;
@@ -3566,7 +3566,7 @@
 	struct ssb_sprom *sprom = dev->dev->bus_sprom;
 	u8 antswlut;
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+	if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
 		antswlut = sprom->fem.ghz5.antswlut;
 	else
 		antswlut = sprom->fem.ghz2.antswlut;
@@ -3651,7 +3651,7 @@
 {
 	struct b43_phy *phy = &dev->phy;
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 		switch (phy->rev) {
 		case 17:
 			if (phy->radio_rev == 14)
@@ -3698,17 +3698,17 @@
 const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev)
 {
 	struct b43_phy *phy = &dev->phy;
-	enum ieee80211_band band = b43_current_band(dev->wl);
+	enum nl80211_band band = b43_current_band(dev->wl);
 	struct ssb_sprom *sprom = dev->dev->bus_sprom;
 
 	if (dev->phy.rev < 3)
 		return b43_ntab_tx_gain_rev0_1_2;
 
 	/* rev 3+ */
-	if ((dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) ||
-	    (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ)) {
+	if ((dev->phy.n->ipa2g_on && band == NL80211_BAND_2GHZ) ||
+	    (dev->phy.n->ipa5g_on && band == NL80211_BAND_5GHZ)) {
 		return b43_nphy_get_ipa_gain_table(dev);
-	} else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+	} else if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
 		switch (phy->rev) {
 		case 6:
 		case 5:
@@ -3746,7 +3746,7 @@
 {
 	struct b43_phy *phy = &dev->phy;
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 		switch (phy->rev) {
 		case 17:
 			if (phy->radio_rev == 14)
diff --git a/drivers/net/wireless/broadcom/b43/tables_phy_lcn.c b/drivers/net/wireless/broadcom/b43/tables_phy_lcn.c
index e347b8d..704ef1b 100644
--- a/drivers/net/wireless/broadcom/b43/tables_phy_lcn.c
+++ b/drivers/net/wireless/broadcom/b43/tables_phy_lcn.c
@@ -701,7 +701,7 @@
 
 	b43_phy_lcn_upload_static_tables(dev);
 
-	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+	if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
 		if (sprom->boardflags_lo & B43_BFL_FEM)
 			b43_phy_lcn_load_tx_gain_tab(dev,
 				b43_lcntab_tx_gain_tbl_2ghz_ext_pa_rev0);
diff --git a/drivers/net/wireless/broadcom/b43/xmit.c b/drivers/net/wireless/broadcom/b43/xmit.c
index 426dc13..f620126 100644
--- a/drivers/net/wireless/broadcom/b43/xmit.c
+++ b/drivers/net/wireless/broadcom/b43/xmit.c
@@ -803,7 +803,7 @@
 	chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT;
 	switch (chanstat & B43_RX_CHAN_PHYTYPE) {
 	case B43_PHYTYPE_A:
-		status.band = IEEE80211_BAND_5GHZ;
+		status.band = NL80211_BAND_5GHZ;
 		B43_WARN_ON(1);
 		/* FIXME: We don't really know which value the "chanid" contains.
 		 *        So the following assignment might be wrong. */
@@ -811,7 +811,7 @@
 			ieee80211_channel_to_frequency(chanid, status.band);
 		break;
 	case B43_PHYTYPE_G:
-		status.band = IEEE80211_BAND_2GHZ;
+		status.band = NL80211_BAND_2GHZ;
 		/* Somewhere between 478.104 and 508.1084 firmware for G-PHY
 		 * has been modified to be compatible with N-PHY and others.
 		 */
@@ -826,9 +826,9 @@
 		/* chanid is the SHM channel cookie. Which is the plain
 		 * channel number in b43. */
 		if (chanstat & B43_RX_CHAN_5GHZ)
-			status.band = IEEE80211_BAND_5GHZ;
+			status.band = NL80211_BAND_5GHZ;
 		else
-			status.band = IEEE80211_BAND_2GHZ;
+			status.band = NL80211_BAND_2GHZ;
 		status.freq =
 			ieee80211_channel_to_frequency(chanid, status.band);
 		break;
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index afc1fb3..83770d2 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -1056,7 +1056,7 @@
 	b43legacy_generate_plcp_hdr(&plcp, size + FCS_LEN, rate->hw_value);
 	dur = ieee80211_generic_frame_duration(dev->wl->hw,
 					       dev->wl->vif,
-					       IEEE80211_BAND_2GHZ,
+					       NL80211_BAND_2GHZ,
 					       size,
 					       rate);
 	/* Write PLCP in two parts and timing for packet transfer */
@@ -1122,7 +1122,7 @@
 					 IEEE80211_STYPE_PROBE_RESP);
 	dur = ieee80211_generic_frame_duration(dev->wl->hw,
 					       dev->wl->vif,
-					       IEEE80211_BAND_2GHZ,
+					       NL80211_BAND_2GHZ,
 					       *dest_size,
 					       rate);
 	hdr->duration_id = dur;
@@ -2719,7 +2719,7 @@
 
 	/* Switch the PHY mode (if necessary). */
 	switch (conf->chandef.chan->band) {
-	case IEEE80211_BAND_2GHZ:
+	case NL80211_BAND_2GHZ:
 		if (phy->type == B43legacy_PHYTYPE_B)
 			new_phymode = B43legacy_PHYMODE_B;
 		else
@@ -2792,7 +2792,7 @@
 static void b43legacy_update_basic_rates(struct b43legacy_wldev *dev, u32 brates)
 {
 	struct ieee80211_supported_band *sband =
-		dev->wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+		dev->wl->hw->wiphy->bands[NL80211_BAND_2GHZ];
 	struct ieee80211_rate *rate;
 	int i;
 	u16 basic, direct, offset, basic_offset, rateptr;
@@ -3630,13 +3630,13 @@
 
 	phy->possible_phymodes = 0;
 	if (have_bphy) {
-		hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+		hw->wiphy->bands[NL80211_BAND_2GHZ] =
 			&b43legacy_band_2GHz_BPHY;
 		phy->possible_phymodes |= B43legacy_PHYMODE_B;
 	}
 
 	if (have_gphy) {
-		hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+		hw->wiphy->bands[NL80211_BAND_2GHZ] =
 			&b43legacy_band_2GHz_GPHY;
 		phy->possible_phymodes |= B43legacy_PHYMODE_G;
 	}
diff --git a/drivers/net/wireless/broadcom/b43legacy/xmit.c b/drivers/net/wireless/broadcom/b43legacy/xmit.c
index 34bf3f0..35ccf40 100644
--- a/drivers/net/wireless/broadcom/b43legacy/xmit.c
+++ b/drivers/net/wireless/broadcom/b43legacy/xmit.c
@@ -565,7 +565,7 @@
 	switch (chanstat & B43legacy_RX_CHAN_PHYTYPE) {
 	case B43legacy_PHYTYPE_B:
 	case B43legacy_PHYTYPE_G:
-		status.band = IEEE80211_BAND_2GHZ;
+		status.band = NL80211_BAND_2GHZ;
 		status.freq = chanid + 2400;
 		break;
 	default:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
index 6af658e..d1bc51f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
@@ -321,7 +321,8 @@
 	if (pktbuf->len == 0)
 		return -ENODATA;
 
-	*ifp = tmp_if;
+	if (ifp != NULL)
+		*ifp = tmp_if;
 	return 0;
 }
 
@@ -351,6 +352,12 @@
 {
 }
 
+static void brcmf_proto_bcdc_rxreorder(struct brcmf_if *ifp,
+				       struct sk_buff *skb)
+{
+	brcmf_fws_rxreorder(ifp, skb);
+}
+
 int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr)
 {
 	struct brcmf_bcdc *bcdc;
@@ -372,6 +379,7 @@
 	drvr->proto->configure_addr_mode = brcmf_proto_bcdc_configure_addr_mode;
 	drvr->proto->delete_peer = brcmf_proto_bcdc_delete_peer;
 	drvr->proto->add_tdls_peer = brcmf_proto_bcdc_add_tdls_peer;
+	drvr->proto->rxreorder = brcmf_proto_bcdc_rxreorder;
 	drvr->proto->pd = bcdc;
 
 	drvr->hdrlen += BCDC_HEADER_LEN + BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index da0cdd3..2fc0597 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -250,7 +250,7 @@
 				    u32 addr, u8 regsz, void *data, bool write)
 {
 	struct sdio_func *func;
-	int ret;
+	int ret = -EINVAL;
 
 	brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
 		  write, fn, addr, regsz);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index 8e02a47..2b24654 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -216,7 +216,9 @@
 		      int prec);
 
 /* Receive frame for delivery to OS.  Callee disposes of rxp. */
-void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp);
+void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp, bool handle_event);
+/* Receive async event packet from firmware. Callee disposes of rxp. */
+void brcmf_rx_event(struct device *dev, struct sk_buff *rxp);
 
 /* Indication from bus module regarding presence/insertion of dongle. */
 int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index d5c2a27..d0631b6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -144,7 +144,7 @@
 #define wl_a_rates_size		(wl_g_rates_size - 4)
 
 #define CHAN2G(_channel, _freq) {				\
-	.band			= IEEE80211_BAND_2GHZ,		\
+	.band			= NL80211_BAND_2GHZ,		\
 	.center_freq		= (_freq),			\
 	.hw_value		= (_channel),			\
 	.flags			= IEEE80211_CHAN_DISABLED,	\
@@ -153,7 +153,7 @@
 }
 
 #define CHAN5G(_channel) {					\
-	.band			= IEEE80211_BAND_5GHZ,		\
+	.band			= NL80211_BAND_5GHZ,		\
 	.center_freq		= 5000 + (5 * (_channel)),	\
 	.hw_value		= (_channel),			\
 	.flags			= IEEE80211_CHAN_DISABLED,	\
@@ -181,13 +181,13 @@
  * above is added to the band during setup.
  */
 static const struct ieee80211_supported_band __wl_band_2ghz = {
-	.band = IEEE80211_BAND_2GHZ,
+	.band = NL80211_BAND_2GHZ,
 	.bitrates = wl_g_rates,
 	.n_bitrates = wl_g_rates_size,
 };
 
 static const struct ieee80211_supported_band __wl_band_5ghz = {
-	.band = IEEE80211_BAND_5GHZ,
+	.band = NL80211_BAND_5GHZ,
 	.bitrates = wl_a_rates,
 	.n_bitrates = wl_a_rates_size,
 };
@@ -250,6 +250,20 @@
 	struct parsed_vndr_ie_info ie_info[VNDR_IE_PARSE_LIMIT];
 };
 
+static u8 nl80211_band_to_fwil(enum nl80211_band band)
+{
+	switch (band) {
+	case NL80211_BAND_2GHZ:
+		return WLC_BAND_2G;
+	case NL80211_BAND_5GHZ:
+		return WLC_BAND_5G;
+	default:
+		WARN_ON(1);
+		break;
+	}
+	return 0;
+}
+
 static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
 			       struct cfg80211_chan_def *ch)
 {
@@ -292,13 +306,13 @@
 		WARN_ON_ONCE(1);
 	}
 	switch (ch->chan->band) {
-	case IEEE80211_BAND_2GHZ:
+	case NL80211_BAND_2GHZ:
 		ch_inf.band = BRCMU_CHAN_BAND_2G;
 		break;
-	case IEEE80211_BAND_5GHZ:
+	case NL80211_BAND_5GHZ:
 		ch_inf.band = BRCMU_CHAN_BAND_5G;
 		break;
-	case IEEE80211_BAND_60GHZ:
+	case NL80211_BAND_60GHZ:
 	default:
 		WARN_ON_ONCE(1);
 	}
@@ -1796,6 +1810,50 @@
 	return type;
 }
 
+static void brcmf_set_join_pref(struct brcmf_if *ifp,
+				struct cfg80211_bss_selection *bss_select)
+{
+	struct brcmf_join_pref_params join_pref_params[2];
+	enum nl80211_band band;
+	int err, i = 0;
+
+	join_pref_params[i].len = 2;
+	join_pref_params[i].rssi_gain = 0;
+
+	if (bss_select->behaviour != NL80211_BSS_SELECT_ATTR_BAND_PREF)
+		brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_ASSOC_PREFER, WLC_BAND_AUTO);
+
+	switch (bss_select->behaviour) {
+	case __NL80211_BSS_SELECT_ATTR_INVALID:
+		brcmf_c_set_joinpref_default(ifp);
+		return;
+	case NL80211_BSS_SELECT_ATTR_BAND_PREF:
+		join_pref_params[i].type = BRCMF_JOIN_PREF_BAND;
+		band = bss_select->param.band_pref;
+		join_pref_params[i].band = nl80211_band_to_fwil(band);
+		i++;
+		break;
+	case NL80211_BSS_SELECT_ATTR_RSSI_ADJUST:
+		join_pref_params[i].type = BRCMF_JOIN_PREF_RSSI_DELTA;
+		band = bss_select->param.adjust.band;
+		join_pref_params[i].band = nl80211_band_to_fwil(band);
+		join_pref_params[i].rssi_gain = bss_select->param.adjust.delta;
+		i++;
+		break;
+	case NL80211_BSS_SELECT_ATTR_RSSI:
+	default:
+		break;
+	}
+	join_pref_params[i].type = BRCMF_JOIN_PREF_RSSI;
+	join_pref_params[i].len = 2;
+	join_pref_params[i].rssi_gain = 0;
+	join_pref_params[i].band = 0;
+	err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
+				       sizeof(join_pref_params));
+	if (err)
+		brcmf_err("Set join_pref error (%d)\n", err);
+}
+
 static s32
 brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
 		       struct cfg80211_connect_params *sme)
@@ -1952,6 +2010,8 @@
 		ext_join_params->scan_le.nprobes = cpu_to_le32(-1);
 	}
 
+	brcmf_set_join_pref(ifp, &sme->bss_select);
+
 	err  = brcmf_fil_bsscfg_data_set(ifp, "join", ext_join_params,
 					 join_params_size);
 	kfree(ext_join_params);
@@ -2679,9 +2739,9 @@
 	channel = bi->ctl_ch;
 
 	if (channel <= CH_MAX_2G_CHANNEL)
-		band = wiphy->bands[IEEE80211_BAND_2GHZ];
+		band = wiphy->bands[NL80211_BAND_2GHZ];
 	else
-		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+		band = wiphy->bands[NL80211_BAND_5GHZ];
 
 	freq = ieee80211_channel_to_frequency(channel, band->band);
 	notify_channel = ieee80211_get_channel(wiphy, freq);
@@ -2788,9 +2848,9 @@
 	cfg->d11inf.decchspec(&ch);
 
 	if (ch.band == BRCMU_CHAN_BAND_2G)
-		band = wiphy->bands[IEEE80211_BAND_2GHZ];
+		band = wiphy->bands[NL80211_BAND_2GHZ];
 	else
-		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+		band = wiphy->bands[NL80211_BAND_5GHZ];
 
 	freq = ieee80211_channel_to_frequency(ch.chnum, band->band);
 	cfg->channel = freq;
@@ -3608,7 +3668,8 @@
 	if (!test_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state))
 		wowl_config |= BRCMF_WOWL_UNASSOC;
 
-	brcmf_fil_iovar_data_set(ifp, "wowl_wakeind", "clear", strlen("clear"));
+	brcmf_fil_iovar_data_set(ifp, "wowl_wakeind", "clear",
+				 sizeof(struct brcmf_wowl_wakeind_le));
 	brcmf_fil_iovar_int_set(ifp, "wowl", wowl_config);
 	brcmf_fil_iovar_int_set(ifp, "wowl_activate", 1);
 	brcmf_bus_wowl_config(cfg->pub->bus_if, true);
@@ -5215,9 +5276,9 @@
 	cfg->d11inf.decchspec(&ch);
 
 	if (ch.band == BRCMU_CHAN_BAND_2G)
-		band = wiphy->bands[IEEE80211_BAND_2GHZ];
+		band = wiphy->bands[NL80211_BAND_2GHZ];
 	else
-		band = wiphy->bands[IEEE80211_BAND_5GHZ];
+		band = wiphy->bands[NL80211_BAND_5GHZ];
 
 	freq = ieee80211_channel_to_frequency(ch.chnum, band->band);
 	notify_channel = ieee80211_get_channel(wiphy, freq);
@@ -5707,11 +5768,11 @@
 	}
 
 	wiphy = cfg_to_wiphy(cfg);
-	band = wiphy->bands[IEEE80211_BAND_2GHZ];
+	band = wiphy->bands[NL80211_BAND_2GHZ];
 	if (band)
 		for (i = 0; i < band->n_channels; i++)
 			band->channels[i].flags = IEEE80211_CHAN_DISABLED;
-	band = wiphy->bands[IEEE80211_BAND_5GHZ];
+	band = wiphy->bands[NL80211_BAND_5GHZ];
 	if (band)
 		for (i = 0; i < band->n_channels; i++)
 			band->channels[i].flags = IEEE80211_CHAN_DISABLED;
@@ -5722,9 +5783,9 @@
 		cfg->d11inf.decchspec(&ch);
 
 		if (ch.band == BRCMU_CHAN_BAND_2G) {
-			band = wiphy->bands[IEEE80211_BAND_2GHZ];
+			band = wiphy->bands[NL80211_BAND_2GHZ];
 		} else if (ch.band == BRCMU_CHAN_BAND_5G) {
-			band = wiphy->bands[IEEE80211_BAND_5GHZ];
+			band = wiphy->bands[NL80211_BAND_5GHZ];
 		} else {
 			brcmf_err("Invalid channel Spec. 0x%x.\n", ch.chspec);
 			continue;
@@ -5839,7 +5900,7 @@
 			return err;
 		}
 
-		band = cfg_to_wiphy(cfg)->bands[IEEE80211_BAND_2GHZ];
+		band = cfg_to_wiphy(cfg)->bands[NL80211_BAND_2GHZ];
 		list = (struct brcmf_chanspec_list *)pbuf;
 		num_chan = le32_to_cpu(list->count);
 		for (i = 0; i < num_chan; i++) {
@@ -5871,11 +5932,11 @@
 	band = WLC_BAND_2G;
 	err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band);
 	if (!err) {
-		bw_cap[IEEE80211_BAND_2GHZ] = band;
+		bw_cap[NL80211_BAND_2GHZ] = band;
 		band = WLC_BAND_5G;
 		err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band);
 		if (!err) {
-			bw_cap[IEEE80211_BAND_5GHZ] = band;
+			bw_cap[NL80211_BAND_5GHZ] = band;
 			return;
 		}
 		WARN_ON(1);
@@ -5890,14 +5951,14 @@
 
 	switch (mimo_bwcap) {
 	case WLC_N_BW_40ALL:
-		bw_cap[IEEE80211_BAND_2GHZ] |= WLC_BW_40MHZ_BIT;
+		bw_cap[NL80211_BAND_2GHZ] |= WLC_BW_40MHZ_BIT;
 		/* fall-thru */
 	case WLC_N_BW_20IN2G_40IN5G:
-		bw_cap[IEEE80211_BAND_5GHZ] |= WLC_BW_40MHZ_BIT;
+		bw_cap[NL80211_BAND_5GHZ] |= WLC_BW_40MHZ_BIT;
 		/* fall-thru */
 	case WLC_N_BW_20ALL:
-		bw_cap[IEEE80211_BAND_2GHZ] |= WLC_BW_20MHZ_BIT;
-		bw_cap[IEEE80211_BAND_5GHZ] |= WLC_BW_20MHZ_BIT;
+		bw_cap[NL80211_BAND_2GHZ] |= WLC_BW_20MHZ_BIT;
+		bw_cap[NL80211_BAND_5GHZ] |= WLC_BW_20MHZ_BIT;
 		break;
 	default:
 		brcmf_err("invalid mimo_bw_cap value\n");
@@ -5938,7 +5999,7 @@
 	__le16 mcs_map;
 
 	/* not allowed in 2.4G band */
-	if (band->band == IEEE80211_BAND_2GHZ)
+	if (band->band == NL80211_BAND_2GHZ)
 		return;
 
 	band->vht_cap.vht_supported = true;
@@ -5997,8 +6058,8 @@
 		brcmf_get_bwcap(ifp, bw_cap);
 	}
 	brcmf_dbg(INFO, "nmode=%d, vhtmode=%d, bw_cap=(%d, %d)\n",
-		  nmode, vhtmode, bw_cap[IEEE80211_BAND_2GHZ],
-		  bw_cap[IEEE80211_BAND_5GHZ]);
+		  nmode, vhtmode, bw_cap[NL80211_BAND_2GHZ],
+		  bw_cap[NL80211_BAND_5GHZ]);
 
 	err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain);
 	if (err) {
@@ -6279,6 +6340,10 @@
 	wiphy->n_cipher_suites = ARRAY_SIZE(brcmf_cipher_suites);
 	if (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MFP))
 		wiphy->n_cipher_suites--;
+	wiphy->bss_select_support = BIT(NL80211_BSS_SELECT_ATTR_RSSI) |
+				    BIT(NL80211_BSS_SELECT_ATTR_BAND_PREF) |
+				    BIT(NL80211_BSS_SELECT_ATTR_RSSI_ADJUST);
+
 	wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT |
 			WIPHY_FLAG_OFFCHAN_TX |
 			WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
@@ -6321,7 +6386,7 @@
 			}
 
 			band->n_channels = ARRAY_SIZE(__wl_2ghz_channels);
-			wiphy->bands[IEEE80211_BAND_2GHZ] = band;
+			wiphy->bands[NL80211_BAND_2GHZ] = band;
 		}
 		if (bandlist[i] == cpu_to_le32(WLC_BAND_5G)) {
 			band = kmemdup(&__wl_band_5ghz, sizeof(__wl_band_5ghz),
@@ -6338,7 +6403,7 @@
 			}
 
 			band->n_channels = ARRAY_SIZE(__wl_5ghz_channels);
-			wiphy->bands[IEEE80211_BAND_5GHZ] = band;
+			wiphy->bands[NL80211_BAND_5GHZ] = band;
 		}
 	}
 	err = brcmf_setup_wiphybands(wiphy);
@@ -6604,13 +6669,13 @@
 			kfree(wiphy->iface_combinations[i].limits);
 	}
 	kfree(wiphy->iface_combinations);
-	if (wiphy->bands[IEEE80211_BAND_2GHZ]) {
-		kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
-		kfree(wiphy->bands[IEEE80211_BAND_2GHZ]);
+	if (wiphy->bands[NL80211_BAND_2GHZ]) {
+		kfree(wiphy->bands[NL80211_BAND_2GHZ]->channels);
+		kfree(wiphy->bands[NL80211_BAND_2GHZ]);
 	}
-	if (wiphy->bands[IEEE80211_BAND_5GHZ]) {
-		kfree(wiphy->bands[IEEE80211_BAND_5GHZ]->channels);
-		kfree(wiphy->bands[IEEE80211_BAND_5GHZ]);
+	if (wiphy->bands[NL80211_BAND_5GHZ]) {
+		kfree(wiphy->bands[NL80211_BAND_5GHZ]->channels);
+		kfree(wiphy->bands[NL80211_BAND_5GHZ]);
 	}
 	wiphy_free(wiphy);
 }
@@ -6698,8 +6763,8 @@
 	 * cfg80211 here that we do and have it decide we can enable
 	 * it. But first check if device does support 2G operation.
 	 */
-	if (wiphy->bands[IEEE80211_BAND_2GHZ]) {
-		cap = &wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap.cap;
+	if (wiphy->bands[NL80211_BAND_2GHZ]) {
+		cap = &wiphy->bands[NL80211_BAND_2GHZ]->ht_cap.cap;
 		*cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
 	}
 	err = wiphy_register(wiphy);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 9e909e3..3e15d64 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -38,7 +38,7 @@
 #define BRCMF_DEFAULT_SCAN_CHANNEL_TIME	40
 #define BRCMF_DEFAULT_SCAN_UNASSOC_TIME	40
 
-/* boost value for RSSI_DELTA in preferred join selection */
+/* default boost value for RSSI_DELTA in preferred join selection */
 #define BRCMF_JOIN_PREF_RSSI_BOOST	8
 
 #define BRCMF_DEFAULT_TXGLOM_SIZE	32  /* max tx frames in glom chain */
@@ -83,11 +83,31 @@
 static struct brcmfmac_platform_data *brcmfmac_pdata;
 struct brcmf_mp_global_t brcmf_mp_global;
 
+void brcmf_c_set_joinpref_default(struct brcmf_if *ifp)
+{
+	struct brcmf_join_pref_params join_pref_params[2];
+	int err;
+
+	/* Setup join_pref to select target by RSSI (boost on 5GHz) */
+	join_pref_params[0].type = BRCMF_JOIN_PREF_RSSI_DELTA;
+	join_pref_params[0].len = 2;
+	join_pref_params[0].rssi_gain = BRCMF_JOIN_PREF_RSSI_BOOST;
+	join_pref_params[0].band = WLC_BAND_5G;
+
+	join_pref_params[1].type = BRCMF_JOIN_PREF_RSSI;
+	join_pref_params[1].len = 2;
+	join_pref_params[1].rssi_gain = 0;
+	join_pref_params[1].band = 0;
+	err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
+				       sizeof(join_pref_params));
+	if (err)
+		brcmf_err("Set join_pref error (%d)\n", err);
+}
+
 int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
 {
 	s8 eventmask[BRCMF_EVENTING_MASK_LEN];
 	u8 buf[BRCMF_DCMD_SMLEN];
-	struct brcmf_join_pref_params join_pref_params[2];
 	struct brcmf_rev_info_le revinfo;
 	struct brcmf_rev_info *ri;
 	char *ptr;
@@ -154,19 +174,7 @@
 		goto done;
 	}
 
-	/* Setup join_pref to select target by RSSI(with boost on 5GHz) */
-	join_pref_params[0].type = BRCMF_JOIN_PREF_RSSI_DELTA;
-	join_pref_params[0].len = 2;
-	join_pref_params[0].rssi_gain = BRCMF_JOIN_PREF_RSSI_BOOST;
-	join_pref_params[0].band = WLC_BAND_5G;
-	join_pref_params[1].type = BRCMF_JOIN_PREF_RSSI;
-	join_pref_params[1].len = 2;
-	join_pref_params[1].rssi_gain = 0;
-	join_pref_params[1].band = 0;
-	err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
-				       sizeof(join_pref_params));
-	if (err)
-		brcmf_err("Set join_pref error (%d)\n", err);
+	brcmf_c_set_joinpref_default(ifp);
 
 	/* Setup event_msgs, enable E_IF */
 	err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index ff825cd..b590499 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -40,19 +40,6 @@
 
 #define MAX_WAIT_FOR_8021X_TX			msecs_to_jiffies(950)
 
-/* AMPDU rx reordering definitions */
-#define BRCMF_RXREORDER_FLOWID_OFFSET		0
-#define BRCMF_RXREORDER_MAXIDX_OFFSET		2
-#define BRCMF_RXREORDER_FLAGS_OFFSET		4
-#define BRCMF_RXREORDER_CURIDX_OFFSET		6
-#define BRCMF_RXREORDER_EXPIDX_OFFSET		8
-
-#define BRCMF_RXREORDER_DEL_FLOW		0x01
-#define BRCMF_RXREORDER_FLUSH_ALL		0x02
-#define BRCMF_RXREORDER_CURIDX_VALID		0x04
-#define BRCMF_RXREORDER_EXPIDX_VALID		0x08
-#define BRCMF_RXREORDER_NEW_HOLE		0x10
-
 #define BRCMF_BSSIDX_INVALID			-1
 
 char *brcmf_ifname(struct brcmf_if *ifp)
@@ -313,15 +300,9 @@
 
 void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
 {
-	skb->dev = ifp->ndev;
-	skb->protocol = eth_type_trans(skb, skb->dev);
-
 	if (skb->pkt_type == PACKET_MULTICAST)
 		ifp->stats.multicast++;
 
-	/* Process special event packets */
-	brcmf_fweh_process_skb(ifp->drvr, skb);
-
 	if (!(ifp->ndev->flags & IFF_UP)) {
 		brcmu_pkt_buf_free_skb(skb);
 		return;
@@ -341,226 +322,60 @@
 		netif_rx_ni(skb);
 }
 
-static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
-					 u8 start, u8 end,
-					 struct sk_buff_head *skb_list)
+static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
+			    struct brcmf_if **ifp)
 {
-	/* initialize return list */
-	__skb_queue_head_init(skb_list);
+	int ret;
 
-	if (rfi->pend_pkts == 0) {
-		brcmf_dbg(INFO, "no packets in reorder queue\n");
-		return;
+	/* process and remove protocol-specific header */
+	ret = brcmf_proto_hdrpull(drvr, true, skb, ifp);
+
+	if (ret || !(*ifp) || !(*ifp)->ndev) {
+		if (ret != -ENODATA && *ifp)
+			(*ifp)->stats.rx_errors++;
+		brcmu_pkt_buf_free_skb(skb);
+		return -ENODATA;
 	}
 
-	do {
-		if (rfi->pktslots[start]) {
-			__skb_queue_tail(skb_list, rfi->pktslots[start]);
-			rfi->pktslots[start] = NULL;
-		}
-		start++;
-		if (start > rfi->max_idx)
-			start = 0;
-	} while (start != end);
-	rfi->pend_pkts -= skb_queue_len(skb_list);
+	skb->protocol = eth_type_trans(skb, (*ifp)->ndev);
+	return 0;
 }
 
-static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
-					 struct sk_buff *pkt)
-{
-	u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
-	struct brcmf_ampdu_rx_reorder *rfi;
-	struct sk_buff_head reorder_list;
-	struct sk_buff *pnext;
-	u8 flags;
-	u32 buf_size;
-
-	flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
-	flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
-
-	/* validate flags and flow id */
-	if (flags == 0xFF) {
-		brcmf_err("invalid flags...so ignore this packet\n");
-		brcmf_netif_rx(ifp, pkt);
-		return;
-	}
-
-	rfi = ifp->drvr->reorder_flows[flow_id];
-	if (flags & BRCMF_RXREORDER_DEL_FLOW) {
-		brcmf_dbg(INFO, "flow-%d: delete\n",
-			  flow_id);
-
-		if (rfi == NULL) {
-			brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
-				  flow_id);
-			brcmf_netif_rx(ifp, pkt);
-			return;
-		}
-
-		brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
-					     &reorder_list);
-		/* add the last packet */
-		__skb_queue_tail(&reorder_list, pkt);
-		kfree(rfi);
-		ifp->drvr->reorder_flows[flow_id] = NULL;
-		goto netif_rx;
-	}
-	/* from here on we need a flow reorder instance */
-	if (rfi == NULL) {
-		buf_size = sizeof(*rfi);
-		max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
-
-		buf_size += (max_idx + 1) * sizeof(pkt);
-
-		/* allocate space for flow reorder info */
-		brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
-			  flow_id, max_idx);
-		rfi = kzalloc(buf_size, GFP_ATOMIC);
-		if (rfi == NULL) {
-			brcmf_err("failed to alloc buffer\n");
-			brcmf_netif_rx(ifp, pkt);
-			return;
-		}
-
-		ifp->drvr->reorder_flows[flow_id] = rfi;
-		rfi->pktslots = (struct sk_buff **)(rfi+1);
-		rfi->max_idx = max_idx;
-	}
-	if (flags & BRCMF_RXREORDER_NEW_HOLE)  {
-		if (rfi->pend_pkts) {
-			brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
-						     rfi->exp_idx,
-						     &reorder_list);
-			WARN_ON(rfi->pend_pkts);
-		} else {
-			__skb_queue_head_init(&reorder_list);
-		}
-		rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
-		rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
-		rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
-		rfi->pktslots[rfi->cur_idx] = pkt;
-		rfi->pend_pkts++;
-		brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
-			  flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
-	} else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
-		cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
-		exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
-
-		if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
-			/* still in the current hole */
-			/* enqueue the current on the buffer chain */
-			if (rfi->pktslots[cur_idx] != NULL) {
-				brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
-				brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
-				rfi->pktslots[cur_idx] = NULL;
-			}
-			rfi->pktslots[cur_idx] = pkt;
-			rfi->pend_pkts++;
-			rfi->cur_idx = cur_idx;
-			brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
-				  flow_id, cur_idx, exp_idx, rfi->pend_pkts);
-
-			/* can return now as there is no reorder
-			 * list to process.
-			 */
-			return;
-		}
-		if (rfi->exp_idx == cur_idx) {
-			if (rfi->pktslots[cur_idx] != NULL) {
-				brcmf_dbg(INFO, "error buffer pending..free it\n");
-				brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
-				rfi->pktslots[cur_idx] = NULL;
-			}
-			rfi->pktslots[cur_idx] = pkt;
-			rfi->pend_pkts++;
-
-			/* got the expected one. flush from current to expected
-			 * and update expected
-			 */
-			brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
-				  flow_id, cur_idx, exp_idx, rfi->pend_pkts);
-
-			rfi->cur_idx = cur_idx;
-			rfi->exp_idx = exp_idx;
-
-			brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
-						     &reorder_list);
-			brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
-				  flow_id, skb_queue_len(&reorder_list),
-				  rfi->pend_pkts);
-		} else {
-			u8 end_idx;
-
-			brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
-				  flow_id, flags, rfi->cur_idx, rfi->exp_idx,
-				  cur_idx, exp_idx);
-			if (flags & BRCMF_RXREORDER_FLUSH_ALL)
-				end_idx = rfi->exp_idx;
-			else
-				end_idx = exp_idx;
-
-			/* flush pkts first */
-			brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
-						     &reorder_list);
-
-			if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
-				__skb_queue_tail(&reorder_list, pkt);
-			} else {
-				rfi->pktslots[cur_idx] = pkt;
-				rfi->pend_pkts++;
-			}
-			rfi->exp_idx = exp_idx;
-			rfi->cur_idx = cur_idx;
-		}
-	} else {
-		/* explicity window move updating the expected index */
-		exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
-
-		brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
-			  flow_id, flags, rfi->exp_idx, exp_idx);
-		if (flags & BRCMF_RXREORDER_FLUSH_ALL)
-			end_idx =  rfi->exp_idx;
-		else
-			end_idx =  exp_idx;
-
-		brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
-					     &reorder_list);
-		__skb_queue_tail(&reorder_list, pkt);
-		/* set the new expected idx */
-		rfi->exp_idx = exp_idx;
-	}
-netif_rx:
-	skb_queue_walk_safe(&reorder_list, pkt, pnext) {
-		__skb_unlink(pkt, &reorder_list);
-		brcmf_netif_rx(ifp, pkt);
-	}
-}
-
-void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
+void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event)
 {
 	struct brcmf_if *ifp;
 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
 	struct brcmf_pub *drvr = bus_if->drvr;
-	struct brcmf_skb_reorder_data *rd;
-	int ret;
 
 	brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
 
-	/* process and remove protocol-specific header */
-	ret = brcmf_proto_hdrpull(drvr, true, skb, &ifp);
-
-	if (ret || !ifp || !ifp->ndev) {
-		if (ret != -ENODATA && ifp)
-			ifp->stats.rx_errors++;
-		brcmu_pkt_buf_free_skb(skb);
+	if (brcmf_rx_hdrpull(drvr, skb, &ifp))
 		return;
-	}
 
-	rd = (struct brcmf_skb_reorder_data *)skb->cb;
-	if (rd->reorder)
-		brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
-	else
+	if (brcmf_proto_is_reorder_skb(skb)) {
+		brcmf_proto_rxreorder(ifp, skb);
+	} else {
+		/* Process special event packets */
+		if (handle_event)
+			brcmf_fweh_process_skb(ifp->drvr, skb);
+
 		brcmf_netif_rx(ifp, skb);
+	}
+}
+
+void brcmf_rx_event(struct device *dev, struct sk_buff *skb)
+{
+	struct brcmf_if *ifp;
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pub *drvr = bus_if->drvr;
+
+	brcmf_dbg(EVENT, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
+
+	if (brcmf_rx_hdrpull(drvr, skb, &ifp))
+		return;
+
+	brcmf_fweh_process_skb(ifp->drvr, skb);
+	brcmu_pkt_buf_free_skb(skb);
 }
 
 void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index 7bdb6fe..647d3cc 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -208,10 +208,6 @@
 	u8 ipv6addr_idx;
 };
 
-struct brcmf_skb_reorder_data {
-	u8 *reorder;
-};
-
 int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp);
 
 /* Return pointer to interface name */
@@ -227,6 +223,7 @@
 void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
 void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
 void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on);
+void brcmf_c_set_joinpref_default(struct brcmf_if *ifp);
 int __init brcmf_core_init(void);
 void __exit brcmf_core_exit(void);
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index 7269056..c7c1e99 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -29,6 +29,7 @@
 #define BRCMF_FW_MAX_NVRAM_SIZE			64000
 #define BRCMF_FW_NVRAM_DEVPATH_LEN		19	/* devpath0=pcie/1/4/ */
 #define BRCMF_FW_NVRAM_PCIEDEV_LEN		10	/* pcie/1/4/ + \0 */
+#define BRCMF_FW_DEFAULT_BOARDREV		"boardrev=0xff"
 
 enum nvram_parser_state {
 	IDLE,
@@ -51,6 +52,7 @@
  * @entry: start position of key,value entry.
  * @multi_dev_v1: detect pcie multi device v1 (compressed).
  * @multi_dev_v2: detect pcie multi device v2.
+ * @boardrev_found: nvram contains boardrev information.
  */
 struct nvram_parser {
 	enum nvram_parser_state state;
@@ -63,6 +65,7 @@
 	u32 entry;
 	bool multi_dev_v1;
 	bool multi_dev_v2;
+	bool boardrev_found;
 };
 
 /**
@@ -125,6 +128,8 @@
 			nvp->multi_dev_v1 = true;
 		if (strncmp(&nvp->data[nvp->entry], "pcie/", 5) == 0)
 			nvp->multi_dev_v2 = true;
+		if (strncmp(&nvp->data[nvp->entry], "boardrev", 8) == 0)
+			nvp->boardrev_found = true;
 	} else if (!is_nvram_char(c) || c == ' ') {
 		brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n",
 			  nvp->line, nvp->column);
@@ -284,6 +289,8 @@
 	while (i < nvp->nvram_len) {
 		if ((nvp->nvram[i] - '0' == id) && (nvp->nvram[i + 1] == ':')) {
 			i += 2;
+			if (strncmp(&nvp->nvram[i], "boardrev", 8) == 0)
+				nvp->boardrev_found = true;
 			while (nvp->nvram[i] != 0) {
 				nvram[j] = nvp->nvram[i];
 				i++;
@@ -335,6 +342,8 @@
 	while (i < nvp->nvram_len - len) {
 		if (strncmp(&nvp->nvram[i], prefix, len) == 0) {
 			i += len;
+			if (strncmp(&nvp->nvram[i], "boardrev", 8) == 0)
+				nvp->boardrev_found = true;
 			while (nvp->nvram[i] != 0) {
 				nvram[j] = nvp->nvram[i];
 				i++;
@@ -356,6 +365,18 @@
 	nvp->nvram_len = 0;
 }
 
+static void brcmf_fw_add_defaults(struct nvram_parser *nvp)
+{
+	if (nvp->boardrev_found)
+		return;
+
+	memcpy(&nvp->nvram[nvp->nvram_len], &BRCMF_FW_DEFAULT_BOARDREV,
+	       strlen(BRCMF_FW_DEFAULT_BOARDREV));
+	nvp->nvram_len += strlen(BRCMF_FW_DEFAULT_BOARDREV);
+	nvp->nvram[nvp->nvram_len] = '\0';
+	nvp->nvram_len++;
+}
+
 /* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a fil
  * and ending in a NUL. Removes carriage returns, empty lines, comment lines,
  * and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
@@ -377,16 +398,21 @@
 		if (nvp.state == END)
 			break;
 	}
-	if (nvp.multi_dev_v1)
+	if (nvp.multi_dev_v1) {
+		nvp.boardrev_found = false;
 		brcmf_fw_strip_multi_v1(&nvp, domain_nr, bus_nr);
-	else if (nvp.multi_dev_v2)
+	} else if (nvp.multi_dev_v2) {
+		nvp.boardrev_found = false;
 		brcmf_fw_strip_multi_v2(&nvp, domain_nr, bus_nr);
+	}
 
 	if (nvp.nvram_len == 0) {
 		kfree(nvp.nvram);
 		return NULL;
 	}
 
+	brcmf_fw_add_defaults(&nvp);
+
 	pad = nvp.nvram_len;
 	*new_length = roundup(nvp.nvram_len + 1, 4);
 	while (pad != *new_length) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index d414fbb..b390561 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -371,6 +371,7 @@
 	int i, err;
 	s8 eventmask[BRCMF_EVENTING_MASK_LEN];
 
+	memset(eventmask, 0, sizeof(eventmask));
 	for (i = 0; i < BRCMF_E_LAST; i++) {
 		if (ifp->drvr->fweh.evt_handler[i]) {
 			brcmf_dbg(EVENT, "enable event %s\n",
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
index 6b72df1..3a9a76d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
@@ -78,6 +78,7 @@
 #define BRCMF_C_SET_SCAN_CHANNEL_TIME		185
 #define BRCMF_C_SET_SCAN_UNASSOC_TIME		187
 #define BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON	201
+#define BRCMF_C_SET_ASSOC_PREFER		205
 #define BRCMF_C_GET_VALID_CHANNELS		217
 #define BRCMF_C_GET_KEY_PRIMARY			235
 #define BRCMF_C_SET_KEY_PRIMARY			236
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index f82c9ab..5b30922 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -92,6 +92,19 @@
 };
 #undef BRCMF_FWS_TLV_DEF
 
+/* AMPDU rx reordering definitions */
+#define BRCMF_RXREORDER_FLOWID_OFFSET		0
+#define BRCMF_RXREORDER_MAXIDX_OFFSET		2
+#define BRCMF_RXREORDER_FLAGS_OFFSET		4
+#define BRCMF_RXREORDER_CURIDX_OFFSET		6
+#define BRCMF_RXREORDER_EXPIDX_OFFSET		8
+
+#define BRCMF_RXREORDER_DEL_FLOW		0x01
+#define BRCMF_RXREORDER_FLUSH_ALL		0x02
+#define BRCMF_RXREORDER_CURIDX_VALID		0x04
+#define BRCMF_RXREORDER_EXPIDX_VALID		0x08
+#define BRCMF_RXREORDER_NEW_HOLE		0x10
+
 #ifdef DEBUG
 /*
  * brcmf_fws_tlv_names - array of tlv names.
@@ -1614,6 +1627,202 @@
 	return 0;
 }
 
+static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
+					 u8 start, u8 end,
+					 struct sk_buff_head *skb_list)
+{
+	/* initialize return list */
+	__skb_queue_head_init(skb_list);
+
+	if (rfi->pend_pkts == 0) {
+		brcmf_dbg(INFO, "no packets in reorder queue\n");
+		return;
+	}
+
+	do {
+		if (rfi->pktslots[start]) {
+			__skb_queue_tail(skb_list, rfi->pktslots[start]);
+			rfi->pktslots[start] = NULL;
+		}
+		start++;
+		if (start > rfi->max_idx)
+			start = 0;
+	} while (start != end);
+	rfi->pend_pkts -= skb_queue_len(skb_list);
+}
+
+void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
+{
+	u8 *reorder_data;
+	u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
+	struct brcmf_ampdu_rx_reorder *rfi;
+	struct sk_buff_head reorder_list;
+	struct sk_buff *pnext;
+	u8 flags;
+	u32 buf_size;
+
+	reorder_data = ((struct brcmf_skb_reorder_data *)pkt->cb)->reorder;
+	flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
+	flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
+
+	/* validate flags and flow id */
+	if (flags == 0xFF) {
+		brcmf_err("invalid flags...so ignore this packet\n");
+		brcmf_netif_rx(ifp, pkt);
+		return;
+	}
+
+	rfi = ifp->drvr->reorder_flows[flow_id];
+	if (flags & BRCMF_RXREORDER_DEL_FLOW) {
+		brcmf_dbg(INFO, "flow-%d: delete\n",
+			  flow_id);
+
+		if (rfi == NULL) {
+			brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
+				  flow_id);
+			brcmf_netif_rx(ifp, pkt);
+			return;
+		}
+
+		brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
+					     &reorder_list);
+		/* add the last packet */
+		__skb_queue_tail(&reorder_list, pkt);
+		kfree(rfi);
+		ifp->drvr->reorder_flows[flow_id] = NULL;
+		goto netif_rx;
+	}
+	/* from here on we need a flow reorder instance */
+	if (rfi == NULL) {
+		buf_size = sizeof(*rfi);
+		max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
+
+		buf_size += (max_idx + 1) * sizeof(pkt);
+
+		/* allocate space for flow reorder info */
+		brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
+			  flow_id, max_idx);
+		rfi = kzalloc(buf_size, GFP_ATOMIC);
+		if (rfi == NULL) {
+			brcmf_err("failed to alloc buffer\n");
+			brcmf_netif_rx(ifp, pkt);
+			return;
+		}
+
+		ifp->drvr->reorder_flows[flow_id] = rfi;
+		rfi->pktslots = (struct sk_buff **)(rfi + 1);
+		rfi->max_idx = max_idx;
+	}
+	if (flags & BRCMF_RXREORDER_NEW_HOLE)  {
+		if (rfi->pend_pkts) {
+			brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
+						     rfi->exp_idx,
+						     &reorder_list);
+			WARN_ON(rfi->pend_pkts);
+		} else {
+			__skb_queue_head_init(&reorder_list);
+		}
+		rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
+		rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
+		rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
+		rfi->pktslots[rfi->cur_idx] = pkt;
+		rfi->pend_pkts++;
+		brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
+			  flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
+	} else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
+		cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
+		exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
+
+		if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
+			/* still in the current hole */
+			/* enqueue the current on the buffer chain */
+			if (rfi->pktslots[cur_idx] != NULL) {
+				brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
+				brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
+				rfi->pktslots[cur_idx] = NULL;
+			}
+			rfi->pktslots[cur_idx] = pkt;
+			rfi->pend_pkts++;
+			rfi->cur_idx = cur_idx;
+			brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
+				  flow_id, cur_idx, exp_idx, rfi->pend_pkts);
+
+			/* can return now as there is no reorder
+			 * list to process.
+			 */
+			return;
+		}
+		if (rfi->exp_idx == cur_idx) {
+			if (rfi->pktslots[cur_idx] != NULL) {
+				brcmf_dbg(INFO, "error buffer pending..free it\n");
+				brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
+				rfi->pktslots[cur_idx] = NULL;
+			}
+			rfi->pktslots[cur_idx] = pkt;
+			rfi->pend_pkts++;
+
+			/* got the expected one. flush from current to expected
+			 * and update expected
+			 */
+			brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
+				  flow_id, cur_idx, exp_idx, rfi->pend_pkts);
+
+			rfi->cur_idx = cur_idx;
+			rfi->exp_idx = exp_idx;
+
+			brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
+						     &reorder_list);
+			brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
+				  flow_id, skb_queue_len(&reorder_list),
+				  rfi->pend_pkts);
+		} else {
+			u8 end_idx;
+
+			brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
+				  flow_id, flags, rfi->cur_idx, rfi->exp_idx,
+				  cur_idx, exp_idx);
+			if (flags & BRCMF_RXREORDER_FLUSH_ALL)
+				end_idx = rfi->exp_idx;
+			else
+				end_idx = exp_idx;
+
+			/* flush pkts first */
+			brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
+						     &reorder_list);
+
+			if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
+				__skb_queue_tail(&reorder_list, pkt);
+			} else {
+				rfi->pktslots[cur_idx] = pkt;
+				rfi->pend_pkts++;
+			}
+			rfi->exp_idx = exp_idx;
+			rfi->cur_idx = cur_idx;
+		}
+	} else {
+		/* explicity window move updating the expected index */
+		exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
+
+		brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
+			  flow_id, flags, rfi->exp_idx, exp_idx);
+		if (flags & BRCMF_RXREORDER_FLUSH_ALL)
+			end_idx =  rfi->exp_idx;
+		else
+			end_idx =  exp_idx;
+
+		brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
+					     &reorder_list);
+		__skb_queue_tail(&reorder_list, pkt);
+		/* set the new expected idx */
+		rfi->exp_idx = exp_idx;
+	}
+netif_rx:
+	skb_queue_walk_safe(&reorder_list, pkt, pnext) {
+		__skb_unlink(pkt, &reorder_list);
+		brcmf_netif_rx(ifp, pkt);
+	}
+}
+
 void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb)
 {
 	struct brcmf_skb_reorder_data *rd;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
index a36bac1..ef0ad85 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
@@ -29,5 +29,6 @@
 void brcmf_fws_del_interface(struct brcmf_if *ifp);
 void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb);
 void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked);
+void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb);
 
 #endif /* FWSIGNAL_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index 9229667..68f1ce0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -20,6 +20,7 @@
 
 #include <linux/types.h>
 #include <linux/netdevice.h>
+#include <linux/etherdevice.h>
 
 #include <brcmu_utils.h>
 #include <brcmu_wifi.h>
@@ -526,6 +527,9 @@
 	return -ENODEV;
 }
 
+static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
+{
+}
 
 static void
 brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf *msgbuf, u16 flowid)
@@ -1075,28 +1079,13 @@
 }
 
 
-static void
-brcmf_msgbuf_rx_skb(struct brcmf_msgbuf *msgbuf, struct sk_buff *skb,
-		    u8 ifidx)
-{
-	struct brcmf_if *ifp;
-
-	ifp = brcmf_get_ifp(msgbuf->drvr, ifidx);
-	if (!ifp || !ifp->ndev) {
-		brcmf_err("Received pkt for invalid ifidx %d\n", ifidx);
-		brcmu_pkt_buf_free_skb(skb);
-		return;
-	}
-	brcmf_netif_rx(ifp, skb);
-}
-
-
 static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf)
 {
 	struct msgbuf_rx_event *event;
 	u32 idx;
 	u16 buflen;
 	struct sk_buff *skb;
+	struct brcmf_if *ifp;
 
 	event = (struct msgbuf_rx_event *)buf;
 	idx = le32_to_cpu(event->msg.request_id);
@@ -1116,7 +1105,19 @@
 
 	skb_trim(skb, buflen);
 
-	brcmf_msgbuf_rx_skb(msgbuf, skb, event->msg.ifidx);
+	ifp = brcmf_get_ifp(msgbuf->drvr, event->msg.ifidx);
+	if (!ifp || !ifp->ndev) {
+		brcmf_err("Received pkt for invalid ifidx %d\n",
+			  event->msg.ifidx);
+		goto exit;
+	}
+
+	skb->protocol = eth_type_trans(skb, ifp->ndev);
+
+	brcmf_fweh_process_skb(ifp->drvr, skb);
+
+exit:
+	brcmu_pkt_buf_free_skb(skb);
 }
 
 
@@ -1128,6 +1129,7 @@
 	u16 data_offset;
 	u16 buflen;
 	u32 idx;
+	struct brcmf_if *ifp;
 
 	brcmf_msgbuf_update_rxbufpost_count(msgbuf, 1);
 
@@ -1148,7 +1150,14 @@
 
 	skb_trim(skb, buflen);
 
-	brcmf_msgbuf_rx_skb(msgbuf, skb, rx_complete->msg.ifidx);
+	ifp = brcmf_get_ifp(msgbuf->drvr, rx_complete->msg.ifidx);
+	if (!ifp || !ifp->ndev) {
+		brcmf_err("Received pkt for invalid ifidx %d\n",
+			  rx_complete->msg.ifidx);
+		brcmu_pkt_buf_free_skb(skb);
+		return;
+	}
+	brcmf_netif_rx(ifp, skb);
 }
 
 
@@ -1460,6 +1469,7 @@
 	drvr->proto->configure_addr_mode = brcmf_msgbuf_configure_addr_mode;
 	drvr->proto->delete_peer = brcmf_msgbuf_delete_peer;
 	drvr->proto->add_tdls_peer = brcmf_msgbuf_add_tdls_peer;
+	drvr->proto->rxreorder = brcmf_msgbuf_rxreorder;
 	drvr->proto->pd = msgbuf;
 
 	init_waitqueue_head(&msgbuf->ioctl_resp_wait);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index b5a49e5..a70cda6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -1266,7 +1266,7 @@
 brcmf_p2p_stop_wait_next_action_frame(struct brcmf_cfg80211_info *cfg)
 {
 	struct brcmf_p2p_info *p2p = &cfg->p2p;
-	struct brcmf_if *ifp = cfg->escan_info.ifp;
+	struct brcmf_if *ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
 
 	if (test_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status) &&
 	    (test_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status) ||
@@ -1430,8 +1430,8 @@
 
 	freq = ieee80211_channel_to_frequency(ch.chnum,
 					      ch.band == BRCMU_CHAN_BAND_2G ?
-					      IEEE80211_BAND_2GHZ :
-					      IEEE80211_BAND_5GHZ);
+					      NL80211_BAND_2GHZ :
+					      NL80211_BAND_5GHZ);
 
 	wdev = &ifp->vif->wdev;
 	cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len, 0);
@@ -1900,8 +1900,8 @@
 	mgmt_frame_len = e->datalen - sizeof(*rxframe);
 	freq = ieee80211_channel_to_frequency(ch.chnum,
 					      ch.band == BRCMU_CHAN_BAND_2G ?
-					      IEEE80211_BAND_2GHZ :
-					      IEEE80211_BAND_5GHZ);
+					      NL80211_BAND_2GHZ :
+					      NL80211_BAND_5GHZ);
 
 	cfg80211_rx_mgmt(&vif->wdev, freq, 0, mgmt_frame, mgmt_frame_len, 0);
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
index d55119d..57531f4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
@@ -22,6 +22,9 @@
 	ADDR_DIRECT
 };
 
+struct brcmf_skb_reorder_data {
+	u8 *reorder;
+};
 
 struct brcmf_proto {
 	int (*hdrpull)(struct brcmf_pub *drvr, bool do_fws,
@@ -38,6 +41,7 @@
 			    u8 peer[ETH_ALEN]);
 	void (*add_tdls_peer)(struct brcmf_pub *drvr, int ifidx,
 			      u8 peer[ETH_ALEN]);
+	void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb);
 	void *pd;
 };
 
@@ -91,6 +95,18 @@
 {
 	drvr->proto->add_tdls_peer(drvr, ifidx, peer);
 }
+static inline bool brcmf_proto_is_reorder_skb(struct sk_buff *skb)
+{
+	struct brcmf_skb_reorder_data *rd;
 
+	rd = (struct brcmf_skb_reorder_data *)skb->cb;
+	return !!rd->reorder;
+}
+
+static inline void
+brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
+{
+	ifp->drvr->proto->rxreorder(ifp, skb);
+}
 
 #endif /* BRCMFMAC_PROTO_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 43fd3f4..4252fa8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -535,9 +535,6 @@
 
 #define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL)
 
-/* Retry count for register access failures */
-static const uint retry_limit = 2;
-
 /* Limit on rounding up frames */
 static const uint max_roundup = 512;
 
@@ -1297,6 +1294,17 @@
 	return (u8)((hdrvalue & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT);
 }
 
+static inline bool brcmf_sdio_fromevntchan(u8 *swheader)
+{
+	u32 hdrvalue;
+	u8 ret;
+
+	hdrvalue = *(u32 *)swheader;
+	ret = (u8)((hdrvalue & SDPCM_CHANNEL_MASK) >> SDPCM_CHANNEL_SHIFT);
+
+	return (ret == SDPCM_EVENT_CHANNEL);
+}
+
 static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
 			      struct brcmf_sdio_hdrinfo *rd,
 			      enum brcmf_sdio_frmtype type)
@@ -1644,7 +1652,11 @@
 					   pfirst->len, pfirst->next,
 					   pfirst->prev);
 			skb_unlink(pfirst, &bus->glom);
-			brcmf_rx_frame(bus->sdiodev->dev, pfirst);
+			if (brcmf_sdio_fromevntchan(pfirst->data))
+				brcmf_rx_event(bus->sdiodev->dev, pfirst);
+			else
+				brcmf_rx_frame(bus->sdiodev->dev, pfirst,
+					       false);
 			bus->sdcnt.rxglompkts++;
 		}
 
@@ -1970,18 +1982,19 @@
 		__skb_trim(pkt, rd->len);
 		skb_pull(pkt, rd->dat_offset);
 
+		if (pkt->len == 0)
+			brcmu_pkt_buf_free_skb(pkt);
+		else if (rd->channel == SDPCM_EVENT_CHANNEL)
+			brcmf_rx_event(bus->sdiodev->dev, pkt);
+		else
+			brcmf_rx_frame(bus->sdiodev->dev, pkt,
+				       false);
+
 		/* prepare the descriptor for the next read */
 		rd->len = rd->len_nxtfrm << 4;
 		rd->len_nxtfrm = 0;
 		/* treat all packet as event if we don't know */
 		rd->channel = SDPCM_EVENT_CHANNEL;
-
-		if (pkt->len == 0) {
-			brcmu_pkt_buf_free_skb(pkt);
-			continue;
-		}
-
-		brcmf_rx_frame(bus->sdiodev->dev, pkt);
 	}
 
 	rxcount = maxframes - rxleft;
@@ -3261,7 +3274,7 @@
 					const struct firmware *fw,
 					void *nvram, u32 nvlen)
 {
-	int bcmerror = -EFAULT;
+	int bcmerror;
 	u32 rstvec;
 
 	sdio_claim_host(bus->sdiodev->func[1]);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 869eb82..98b15a9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -514,7 +514,7 @@
 
 	if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
 		skb_put(skb, urb->actual_length);
-		brcmf_rx_frame(devinfo->dev, skb);
+		brcmf_rx_frame(devinfo->dev, skb, true);
 		brcmf_usb_rx_refill(devinfo, req);
 	} else {
 		brcmu_pkt_buf_free_skb(skb);
@@ -1368,7 +1368,9 @@
 
 	devinfo->ifnum = desc->bInterfaceNumber;
 
-	if (usb->speed == USB_SPEED_SUPER)
+	if (usb->speed == USB_SPEED_SUPER_PLUS)
+		brcmf_dbg(USB, "Broadcom super speed plus USB WLAN interface detected\n");
+	else if (usb->speed == USB_SPEED_SUPER)
 		brcmf_dbg(USB, "Broadcom super speed USB WLAN interface detected\n");
 	else if (usb->speed == USB_SPEED_HIGH)
 		brcmf_dbg(USB, "Broadcom high speed USB WLAN interface detected\n");
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
index 38bd589..3a03287 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
@@ -636,7 +636,7 @@
 	struct ieee80211_channel *ch;
 	int i;
 
-	sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+	sband = wiphy->bands[NL80211_BAND_5GHZ];
 	if (!sband)
 		return;
 
@@ -666,7 +666,7 @@
 	const struct ieee80211_reg_rule *rule;
 	int band, i;
 
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+	for (band = 0; band < NUM_NL80211_BANDS; band++) {
 		sband = wiphy->bands[band];
 		if (!sband)
 			continue;
@@ -710,7 +710,7 @@
 		brcms_reg_apply_beaconing_flags(wiphy, request->initiator);
 
 	/* Disable radio if all channels disallowed by regulatory */
-	for (band = 0; !ch_found && band < IEEE80211_NUM_BANDS; band++) {
+	for (band = 0; !ch_found && band < NUM_NL80211_BANDS; band++) {
 		sband = wiphy->bands[band];
 		if (!sband)
 			continue;
@@ -755,9 +755,9 @@
 					      &sup_chan);
 
 		if (band_idx == BAND_2G_INDEX)
-			sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+			sband = wiphy->bands[NL80211_BAND_2GHZ];
 		else
-			sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+			sband = wiphy->bands[NL80211_BAND_5GHZ];
 
 		for (i = 0; i < sband->n_channels; i++) {
 			ch = &sband->channels[i];
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 61ae276..7c2a9a9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -49,7 +49,7 @@
 	FIF_PSPOLL)
 
 #define CHAN2GHZ(channel, freqency, chflags)  { \
-	.band = IEEE80211_BAND_2GHZ, \
+	.band = NL80211_BAND_2GHZ, \
 	.center_freq = (freqency), \
 	.hw_value = (channel), \
 	.flags = chflags, \
@@ -58,7 +58,7 @@
 }
 
 #define CHAN5GHZ(channel, chflags)  { \
-	.band = IEEE80211_BAND_5GHZ, \
+	.band = NL80211_BAND_5GHZ, \
 	.center_freq = 5000 + 5*(channel), \
 	.hw_value = (channel), \
 	.flags = chflags, \
@@ -217,7 +217,7 @@
 };
 
 static const struct ieee80211_supported_band brcms_band_2GHz_nphy_template = {
-	.band = IEEE80211_BAND_2GHZ,
+	.band = NL80211_BAND_2GHZ,
 	.channels = brcms_2ghz_chantable,
 	.n_channels = ARRAY_SIZE(brcms_2ghz_chantable),
 	.bitrates = legacy_ratetable,
@@ -238,7 +238,7 @@
 };
 
 static const struct ieee80211_supported_band brcms_band_5GHz_nphy_template = {
-	.band = IEEE80211_BAND_5GHZ,
+	.band = NL80211_BAND_5GHZ,
 	.channels = brcms_5ghz_nphy_chantable,
 	.n_channels = ARRAY_SIZE(brcms_5ghz_nphy_chantable),
 	.bitrates = legacy_ratetable + BRCMS_LEGACY_5G_RATE_OFFSET,
@@ -1026,8 +1026,8 @@
 	int has_5g = 0;
 	u16 phy_type;
 
-	hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
-	hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
+	hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
+	hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
 
 	phy_type = brcms_c_get_phy_type(wl->wlc, 0);
 	if (phy_type == PHY_TYPE_N || phy_type == PHY_TYPE_LCN) {
@@ -1038,7 +1038,7 @@
 			band->ht_cap.mcs.rx_mask[1] = 0;
 			band->ht_cap.mcs.rx_highest = cpu_to_le16(72);
 		}
-		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band;
+		hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
 	} else {
 		return -EPERM;
 	}
@@ -1049,7 +1049,7 @@
 		if (phy_type == PHY_TYPE_N || phy_type == PHY_TYPE_LCN) {
 			band = &wlc->bandstate[BAND_5G_INDEX]->band;
 			*band = brcms_band_5GHz_nphy_template;
-			hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band;
+			hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
 		} else {
 			return -EPERM;
 		}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index 218cbc8..e16ee60 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -7076,7 +7076,7 @@
 	channel = BRCMS_CHAN_CHANNEL(rxh->RxChan);
 
 	rx_status->band =
-		channel > 14 ? IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
+		channel > 14 ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
 	rx_status->freq =
 		ieee80211_channel_to_frequency(channel, rx_status->band);
 
@@ -7143,7 +7143,7 @@
 		 * a subset of the 2.4G rates. See bitrates field
 		 * of brcms_band_5GHz_nphy (in mac80211_if.c).
 		 */
-		if (rx_status->band == IEEE80211_BAND_5GHZ)
+		if (rx_status->band == NL80211_BAND_5GHZ)
 			rx_status->rate_idx -= BRCMS_LEGACY_5G_RATE_OFFSET;
 
 		/* Determine short preamble and rate_idx */
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index d2353f6..55456f7 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -2026,7 +2026,7 @@
 	} else {
 		*payloadLen = cpu_to_le16(len - sizeof(etherHead));
 
-		dev->trans_start = jiffies;
+		netif_trans_update(dev);
 
 		/* copy data into airo dma buffer */
 		memcpy(sendbuf, buffer, len);
@@ -2107,7 +2107,7 @@
 
 	i = 0;
 	if ( status == SUCCESS ) {
-		dev->trans_start = jiffies;
+		netif_trans_update(dev);
 		for (; i < MAX_FIDS / 2 && (priv->fids[i] & 0xffff0000); i++);
 	} else {
 		priv->fids[fid] &= 0xffff;
@@ -2174,7 +2174,7 @@
 
 	i = MAX_FIDS / 2;
 	if ( status == SUCCESS ) {
-		dev->trans_start = jiffies;
+		netif_trans_update(dev);
 		for (; i < MAX_FIDS && (priv->fids[i] & 0xffff0000); i++);
 	} else {
 		priv->fids[fid] &= 0xffff;
@@ -5836,7 +5836,7 @@
 	ch = le16_to_cpu(status_rid.channel);
 	if((ch > 0) && (ch < 15)) {
 		fwrq->m = 100000 *
-			ieee80211_channel_to_frequency(ch, IEEE80211_BAND_2GHZ);
+			ieee80211_channel_to_frequency(ch, NL80211_BAND_2GHZ);
 		fwrq->e = 1;
 	} else {
 		fwrq->m = ch;
@@ -6894,7 +6894,7 @@
 	for(i = 0; i < 14; i++) {
 		range->freq[k].i = i + 1; /* List index */
 		range->freq[k].m = 100000 *
-		     ieee80211_channel_to_frequency(i + 1, IEEE80211_BAND_2GHZ);
+		     ieee80211_channel_to_frequency(i + 1, NL80211_BAND_2GHZ);
 		range->freq[k++].e = 1;	/* Values in MHz -> * 10^5 * 10 */
 	}
 	range->num_frequency = k;
@@ -7302,7 +7302,7 @@
 	iwe.cmd = SIOCGIWFREQ;
 	iwe.u.freq.m = le16_to_cpu(bss->dsChannel);
 	iwe.u.freq.m = 100000 *
-	      ieee80211_channel_to_frequency(iwe.u.freq.m, IEEE80211_BAND_2GHZ);
+	      ieee80211_channel_to_frequency(iwe.u.freq.m, NL80211_BAND_2GHZ);
 	iwe.u.freq.e = 1;
 	current_ev = iwe_stream_add_event(info, current_ev, end_buf,
 					  &iwe, IW_EV_FREQ_LEN);
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index f93a7f7..bfa542c 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -1913,7 +1913,7 @@
 	if (geo->bg_channels) {
 		struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
 
-		bg_band->band = IEEE80211_BAND_2GHZ;
+		bg_band->band = NL80211_BAND_2GHZ;
 		bg_band->n_channels = geo->bg_channels;
 		bg_band->channels = kcalloc(geo->bg_channels,
 					    sizeof(struct ieee80211_channel),
@@ -1924,7 +1924,7 @@
 		}
 		/* translate geo->bg to bg_band.channels */
 		for (i = 0; i < geo->bg_channels; i++) {
-			bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
+			bg_band->channels[i].band = NL80211_BAND_2GHZ;
 			bg_band->channels[i].center_freq = geo->bg[i].freq;
 			bg_band->channels[i].hw_value = geo->bg[i].channel;
 			bg_band->channels[i].max_power = geo->bg[i].max_power;
@@ -1945,7 +1945,7 @@
 		bg_band->bitrates = ipw2100_bg_rates;
 		bg_band->n_bitrates = RATE_COUNT;
 
-		wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
+		wdev->wiphy->bands[NL80211_BAND_2GHZ] = bg_band;
 	}
 
 	wdev->wiphy->cipher_suites = ipw_cipher_suites;
@@ -2954,7 +2954,7 @@
 
 		/* A packet was processed by the hardware, so update the
 		 * watchdog */
-		priv->net_dev->trans_start = jiffies;
+		netif_trans_update(priv->net_dev);
 
 		break;
 
@@ -3521,7 +3521,7 @@
 static ssize_t show_pci(struct device *d, struct device_attribute *attr,
 			char *buf)
 {
-	struct pci_dev *pci_dev = container_of(d, struct pci_dev, dev);
+	struct pci_dev *pci_dev = to_pci_dev(d);
 	char *out = buf;
 	int i, j;
 	u32 val;
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index ed0adaf..5adb7ce 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -7707,7 +7707,7 @@
 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
 
 	/* We received data from the HW, so stop the watchdog */
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 
 	/* We only process data packets if the
 	 * interface is open */
@@ -7770,7 +7770,7 @@
 	unsigned short len = le16_to_cpu(pkt->u.frame.length);
 
 	/* We received data from the HW, so stop the watchdog */
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 
 	/* We only process data packets if the
 	 * interface is open */
@@ -7952,7 +7952,7 @@
 		return;
 
 	/* We received data from the HW, so stop the watchdog */
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 
 	if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
 		dev->stats.rx_errors++;
@@ -11359,7 +11359,7 @@
 	if (geo->bg_channels) {
 		struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
 
-		bg_band->band = IEEE80211_BAND_2GHZ;
+		bg_band->band = NL80211_BAND_2GHZ;
 		bg_band->n_channels = geo->bg_channels;
 		bg_band->channels = kcalloc(geo->bg_channels,
 					    sizeof(struct ieee80211_channel),
@@ -11370,7 +11370,7 @@
 		}
 		/* translate geo->bg to bg_band.channels */
 		for (i = 0; i < geo->bg_channels; i++) {
-			bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
+			bg_band->channels[i].band = NL80211_BAND_2GHZ;
 			bg_band->channels[i].center_freq = geo->bg[i].freq;
 			bg_band->channels[i].hw_value = geo->bg[i].channel;
 			bg_band->channels[i].max_power = geo->bg[i].max_power;
@@ -11391,14 +11391,14 @@
 		bg_band->bitrates = ipw2200_bg_rates;
 		bg_band->n_bitrates = ipw2200_num_bg_rates;
 
-		wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
+		wdev->wiphy->bands[NL80211_BAND_2GHZ] = bg_band;
 	}
 
 	/* fill-out priv->ieee->a_band */
 	if (geo->a_channels) {
 		struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
 
-		a_band->band = IEEE80211_BAND_5GHZ;
+		a_band->band = NL80211_BAND_5GHZ;
 		a_band->n_channels = geo->a_channels;
 		a_band->channels = kcalloc(geo->a_channels,
 					   sizeof(struct ieee80211_channel),
@@ -11409,7 +11409,7 @@
 		}
 		/* translate geo->a to a_band.channels */
 		for (i = 0; i < geo->a_channels; i++) {
-			a_band->channels[i].band = IEEE80211_BAND_5GHZ;
+			a_band->channels[i].band = NL80211_BAND_5GHZ;
 			a_band->channels[i].center_freq = geo->a[i].freq;
 			a_band->channels[i].hw_value = geo->a[i].channel;
 			a_band->channels[i].max_power = geo->a[i].max_power;
@@ -11430,7 +11430,7 @@
 		a_band->bitrates = ipw2200_a_rates;
 		a_band->n_bitrates = ipw2200_num_a_rates;
 
-		wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
+		wdev->wiphy->bands[NL80211_BAND_5GHZ] = a_band;
 	}
 
 	wdev->wiphy->cipher_suites = ipw_cipher_suites;
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index af1b3e6..466912e 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -1547,7 +1547,7 @@
 }
 
 static int
-il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band,
+il3945_get_channels_for_scan(struct il_priv *il, enum nl80211_band band,
 			     u8 is_active, u8 n_probes,
 			     struct il3945_scan_channel *scan_ch,
 			     struct ieee80211_vif *vif)
@@ -1618,7 +1618,7 @@
 		/* scan_pwr_info->tpc.dsp_atten; */
 
 		/*scan_pwr_info->tpc.tx_gain; */
-		if (band == IEEE80211_BAND_5GHZ)
+		if (band == NL80211_BAND_5GHZ)
 			scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
 		else {
 			scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
@@ -2534,7 +2534,7 @@
 	};
 	struct il3945_scan_cmd *scan;
 	u8 n_probes = 0;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	bool is_active = false;
 	int ret;
 	u16 len;
@@ -2615,14 +2615,14 @@
 	/* flags + rate selection */
 
 	switch (il->scan_band) {
-	case IEEE80211_BAND_2GHZ:
+	case NL80211_BAND_2GHZ:
 		scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
 		scan->tx_cmd.rate = RATE_1M_PLCP;
-		band = IEEE80211_BAND_2GHZ;
+		band = NL80211_BAND_2GHZ;
 		break;
-	case IEEE80211_BAND_5GHZ:
+	case NL80211_BAND_5GHZ:
 		scan->tx_cmd.rate = RATE_6M_PLCP;
-		band = IEEE80211_BAND_5GHZ;
+		band = NL80211_BAND_5GHZ;
 		break;
 	default:
 		IL_WARN("Invalid scan band\n");
@@ -3507,7 +3507,7 @@
 
 	il->ieee_channels = NULL;
 	il->ieee_rates = NULL;
-	il->band = IEEE80211_BAND_2GHZ;
+	il->band = NL80211_BAND_2GHZ;
 
 	il->iw_mode = NL80211_IFTYPE_STATION;
 	il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
@@ -3582,13 +3582,13 @@
 	/* Default value; 4 EDCA QOS priorities */
 	hw->queues = 4;
 
-	if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
-		il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-		    &il->bands[IEEE80211_BAND_2GHZ];
+	if (il->bands[NL80211_BAND_2GHZ].n_channels)
+		il->hw->wiphy->bands[NL80211_BAND_2GHZ] =
+		    &il->bands[NL80211_BAND_2GHZ];
 
-	if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
-		il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-		    &il->bands[IEEE80211_BAND_5GHZ];
+	if (il->bands[NL80211_BAND_5GHZ].n_channels)
+		il->hw->wiphy->bands[NL80211_BAND_5GHZ] =
+		    &il->bands[NL80211_BAND_5GHZ];
 
 	il_leds_init(il);
 
@@ -3761,7 +3761,7 @@
 		goto out_release_irq;
 	}
 
-	il_set_rxon_channel(il, &il->bands[IEEE80211_BAND_2GHZ].channels[5]);
+	il_set_rxon_channel(il, &il->bands[NL80211_BAND_2GHZ].channels[5]);
 	il3945_setup_deferred_work(il);
 	il3945_setup_handlers(il);
 	il_power_initialize(il);
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-rs.c b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
index 76b0729..03ad9b8 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
@@ -97,7 +97,7 @@
 #define RATE_RETRY_TH		15
 
 static u8
-il3945_get_rate_idx_by_rssi(s32 rssi, enum ieee80211_band band)
+il3945_get_rate_idx_by_rssi(s32 rssi, enum nl80211_band band)
 {
 	u32 idx = 0;
 	u32 table_size = 0;
@@ -107,11 +107,11 @@
 		rssi = IL_MIN_RSSI_VAL;
 
 	switch (band) {
-	case IEEE80211_BAND_2GHZ:
+	case NL80211_BAND_2GHZ:
 		tpt_table = il3945_tpt_table_g;
 		table_size = ARRAY_SIZE(il3945_tpt_table_g);
 		break;
-	case IEEE80211_BAND_5GHZ:
+	case NL80211_BAND_5GHZ:
 		tpt_table = il3945_tpt_table_a;
 		table_size = ARRAY_SIZE(il3945_tpt_table_a);
 		break;
@@ -380,7 +380,7 @@
 
 	il->_3945.sta_supp_rates = sta->supp_rates[sband->band];
 	/* For 5 GHz band it start at IL_FIRST_OFDM_RATE */
-	if (sband->band == IEEE80211_BAND_5GHZ) {
+	if (sband->band == NL80211_BAND_5GHZ) {
 		rs_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
 		il->_3945.sta_supp_rates <<= IL_FIRST_OFDM_RATE;
 	}
@@ -541,7 +541,7 @@
 
 static u16
 il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask,
-			 enum ieee80211_band band)
+			 enum nl80211_band band)
 {
 	u8 high = RATE_INVALID;
 	u8 low = RATE_INVALID;
@@ -549,7 +549,7 @@
 
 	/* 802.11A walks to the next literal adjacent rate in
 	 * the rate table */
-	if (unlikely(band == IEEE80211_BAND_5GHZ)) {
+	if (unlikely(band == NL80211_BAND_5GHZ)) {
 		int i;
 		u32 mask;
 
@@ -657,14 +657,14 @@
 
 	/* get user max rate if set */
 	max_rate_idx = txrc->max_rate_idx;
-	if (sband->band == IEEE80211_BAND_5GHZ && max_rate_idx != -1)
+	if (sband->band == NL80211_BAND_5GHZ && max_rate_idx != -1)
 		max_rate_idx += IL_FIRST_OFDM_RATE;
 	if (max_rate_idx < 0 || max_rate_idx >= RATE_COUNT)
 		max_rate_idx = -1;
 
 	idx = min(rs_sta->last_txrate_idx & 0xffff, RATE_COUNT_3945 - 1);
 
-	if (sband->band == IEEE80211_BAND_5GHZ)
+	if (sband->band == NL80211_BAND_5GHZ)
 		rate_mask = rate_mask << IL_FIRST_OFDM_RATE;
 
 	spin_lock_irqsave(&rs_sta->lock, flags);
@@ -806,7 +806,7 @@
 
 out:
 
-	if (sband->band == IEEE80211_BAND_5GHZ) {
+	if (sband->band == NL80211_BAND_5GHZ) {
 		if (WARN_ON_ONCE(idx < IL_FIRST_OFDM_RATE))
 			idx = IL_FIRST_OFDM_RATE;
 		rs_sta->last_txrate_idx = idx;
@@ -935,7 +935,7 @@
 
 	rs_sta->tgg = 0;
 	switch (il->band) {
-	case IEEE80211_BAND_2GHZ:
+	case NL80211_BAND_2GHZ:
 		/* TODO: this always does G, not a regression */
 		if (il->active.flags & RXON_FLG_TGG_PROTECT_MSK) {
 			rs_sta->tgg = 1;
@@ -943,7 +943,7 @@
 		} else
 			rs_sta->expected_tpt = il3945_expected_tpt_g;
 		break;
-	case IEEE80211_BAND_5GHZ:
+	case NL80211_BAND_5GHZ:
 		rs_sta->expected_tpt = il3945_expected_tpt_a;
 		break;
 	default:
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
index 93bdf68..7bcedbb 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945.c
@@ -255,13 +255,13 @@
 	int next_rate = il3945_get_prev_ieee_rate(rate);
 
 	switch (il->band) {
-	case IEEE80211_BAND_5GHZ:
+	case NL80211_BAND_5GHZ:
 		if (rate == RATE_12M_IDX)
 			next_rate = RATE_9M_IDX;
 		else if (rate == RATE_6M_IDX)
 			next_rate = RATE_6M_IDX;
 		break;
-	case IEEE80211_BAND_2GHZ:
+	case NL80211_BAND_2GHZ:
 		if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
 		    il_is_associated(il)) {
 			if (rate == RATE_11M_IDX)
@@ -349,7 +349,7 @@
 
 	/* Fill the MRR chain with some info about on-chip retransmissions */
 	rate_idx = il3945_hwrate_to_plcp_idx(tx_resp->rate);
-	if (info->band == IEEE80211_BAND_5GHZ)
+	if (info->band == NL80211_BAND_5GHZ)
 		rate_idx -= IL_FIRST_OFDM_RATE;
 
 	fail = tx_resp->failure_frame;
@@ -554,14 +554,14 @@
 	rx_status.mactime = le64_to_cpu(rx_end->timestamp);
 	rx_status.band =
 	    (rx_hdr->
-	     phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
-	    IEEE80211_BAND_5GHZ;
+	     phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? NL80211_BAND_2GHZ :
+	    NL80211_BAND_5GHZ;
 	rx_status.freq =
 	    ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
 					   rx_status.band);
 
 	rx_status.rate_idx = il3945_hwrate_to_plcp_idx(rx_hdr->rate);
-	if (rx_status.band == IEEE80211_BAND_5GHZ)
+	if (rx_status.band == NL80211_BAND_5GHZ)
 		rx_status.rate_idx -= IL_FIRST_OFDM_RATE;
 
 	rx_status.antenna =
@@ -1409,7 +1409,7 @@
 
 	chan = le16_to_cpu(il->active.channel);
 
-	txpower.band = (il->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
+	txpower.band = (il->band == NL80211_BAND_5GHZ) ? 0 : 1;
 	ch_info = il_get_channel_info(il, il->band, chan);
 	if (!ch_info) {
 		IL_ERR("Failed to get channel info for channel %d [%d]\n", chan,
@@ -2310,7 +2310,7 @@
 
 		il3945_sync_sta(il, vif_priv->ibss_bssid_sta_id,
 				(il->band ==
-				 IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP :
+				 NL80211_BAND_5GHZ) ? RATE_6M_PLCP :
 				RATE_1M_PLCP);
 		il3945_rate_scale_init(il->hw, vif_priv->ibss_bssid_sta_id);
 
@@ -2343,7 +2343,7 @@
 	}
 
 	switch (il->band) {
-	case IEEE80211_BAND_5GHZ:
+	case NL80211_BAND_5GHZ:
 		D_RATE("Select A mode rate scale\n");
 		/* If one of the following CCK rates is used,
 		 * have it fall back to the 6M OFDM rate */
@@ -2359,7 +2359,7 @@
 		    il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
 		break;
 
-	case IEEE80211_BAND_2GHZ:
+	case NL80211_BAND_2GHZ:
 		D_RATE("Select B/G mode rate scale\n");
 		/* If an OFDM rate is used, have it fall back to the
 		 * 1M CCK rates */
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index b75f4ef..a91d170 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -457,7 +457,7 @@
 }
 
 int
-il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
+il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band)
 {
 	int idx = 0;
 	int band_offset = 0;
@@ -468,7 +468,7 @@
 		return idx;
 		/* Legacy rate format, search for match in table */
 	} else {
-		if (band == IEEE80211_BAND_5GHZ)
+		if (band == NL80211_BAND_5GHZ)
 			band_offset = IL_FIRST_OFDM_RATE;
 		for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++)
 			if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
@@ -688,8 +688,8 @@
 	rx_status.mactime = le64_to_cpu(phy_res->timestamp);
 	rx_status.band =
 	    (phy_res->
-	     phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
-	    IEEE80211_BAND_5GHZ;
+	     phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? NL80211_BAND_2GHZ :
+	    NL80211_BAND_5GHZ;
 	rx_status.freq =
 	    ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
 					   rx_status.band);
@@ -766,7 +766,7 @@
 
 static int
 il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
-			     enum ieee80211_band band, u8 is_active,
+			     enum nl80211_band band, u8 is_active,
 			     u8 n_probes, struct il_scan_channel *scan_ch)
 {
 	struct ieee80211_channel *chan;
@@ -822,7 +822,7 @@
 		 * power level:
 		 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
 		 */
-		if (band == IEEE80211_BAND_5GHZ)
+		if (band == NL80211_BAND_5GHZ)
 			scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
 		else
 			scan_ch->tx_gain = ((1 << 5) | (5 << 3));
@@ -870,7 +870,7 @@
 	u32 rate_flags = 0;
 	u16 cmd_len;
 	u16 rx_chain = 0;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	u8 n_probes = 0;
 	u8 rx_ant = il->hw_params.valid_rx_ant;
 	u8 rate;
@@ -944,7 +944,7 @@
 	scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
 
 	switch (il->scan_band) {
-	case IEEE80211_BAND_2GHZ:
+	case NL80211_BAND_2GHZ:
 		scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
 		chan_mod =
 		    le32_to_cpu(il->active.flags & RXON_FLG_CHANNEL_MODE_MSK) >>
@@ -956,7 +956,7 @@
 			rate_flags = RATE_MCS_CCK_MSK;
 		}
 		break;
-	case IEEE80211_BAND_5GHZ:
+	case NL80211_BAND_5GHZ:
 		rate = RATE_6M_PLCP;
 		break;
 	default:
@@ -1590,7 +1590,7 @@
 	    || rate_idx > RATE_COUNT_LEGACY)
 		rate_idx = rate_lowest_index(&il->bands[info->band], sta);
 	/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
-	if (info->band == IEEE80211_BAND_5GHZ)
+	if (info->band == NL80211_BAND_5GHZ)
 		rate_idx += IL_FIRST_OFDM_RATE;
 	/* Get PLCP rate for tx_cmd->rate_n_flags */
 	rate_plcp = il_rates[rate_idx].plcp;
@@ -3051,7 +3051,7 @@
 	}
 	/* Set up the rate scaling to start at selected rate, fall back
 	 * all the way down to 1M in IEEE order, and then spin on 1M */
-	if (il->band == IEEE80211_BAND_5GHZ)
+	if (il->band == NL80211_BAND_5GHZ)
 		r = RATE_6M_IDX;
 	else
 		r = RATE_1M_IDX;
@@ -5553,6 +5553,7 @@
 
 	il4965_prepare_card_hw(il);
 	if (!il->hw_ready) {
+		il_dealloc_bcast_stations(il);
 		IL_ERR("HW not ready\n");
 		return -EIO;
 	}
@@ -5564,6 +5565,7 @@
 		set_bit(S_RFKILL, &il->status);
 		wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
 
+		il_dealloc_bcast_stations(il);
 		il_enable_rfkill_int(il);
 		IL_WARN("Radio disabled by HW RF Kill switch\n");
 		return 0;
@@ -5577,6 +5579,7 @@
 	ret = il4965_hw_nic_init(il);
 	if (ret) {
 		IL_ERR("Unable to init nic\n");
+		il_dealloc_bcast_stations(il);
 		return ret;
 	}
 
@@ -5787,12 +5790,12 @@
 
 	hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL;
 
-	if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
-		il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-		    &il->bands[IEEE80211_BAND_2GHZ];
-	if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
-		il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-		    &il->bands[IEEE80211_BAND_5GHZ];
+	if (il->bands[NL80211_BAND_2GHZ].n_channels)
+		il->hw->wiphy->bands[NL80211_BAND_2GHZ] =
+		    &il->bands[NL80211_BAND_2GHZ];
+	if (il->bands[NL80211_BAND_5GHZ].n_channels)
+		il->hw->wiphy->bands[NL80211_BAND_5GHZ] =
+		    &il->bands[NL80211_BAND_5GHZ];
 
 	il_leds_init(il);
 
@@ -6365,7 +6368,7 @@
 
 	il->ieee_channels = NULL;
 	il->ieee_rates = NULL;
-	il->band = IEEE80211_BAND_2GHZ;
+	il->band = NL80211_BAND_2GHZ;
 
 	il->iw_mode = NL80211_IFTYPE_STATION;
 	il->current_ht_config.smps = IEEE80211_SMPS_STATIC;
@@ -6477,7 +6480,7 @@
 	il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
 	il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
 	il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
-	il->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
+	il->hw_params.ht40_channel = BIT(NL80211_BAND_5GHZ);
 
 	il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
 
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
index bac60b2..a867ae7 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
@@ -549,7 +549,7 @@
  */
 static int
 il4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
-				enum ieee80211_band band,
+				enum nl80211_band band,
 				struct il_scale_tbl_info *tbl, int *rate_idx)
 {
 	u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
@@ -574,7 +574,7 @@
 	/* legacy rate format */
 	if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
 		if (il4965_num_of_ant == 1) {
-			if (band == IEEE80211_BAND_5GHZ)
+			if (band == NL80211_BAND_5GHZ)
 				tbl->lq_type = LQ_A;
 			else
 				tbl->lq_type = LQ_G;
@@ -743,7 +743,7 @@
 	if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_idx)) {
 		switch_to_legacy = 1;
 		scale_idx = rs_ht_to_legacy[scale_idx];
-		if (lq_sta->band == IEEE80211_BAND_5GHZ)
+		if (lq_sta->band == NL80211_BAND_5GHZ)
 			tbl->lq_type = LQ_A;
 		else
 			tbl->lq_type = LQ_G;
@@ -762,7 +762,7 @@
 	/* Mask with station rate restriction */
 	if (is_legacy(tbl->lq_type)) {
 		/* supp_rates has no CCK bits in A mode */
-		if (lq_sta->band == IEEE80211_BAND_5GHZ)
+		if (lq_sta->band == NL80211_BAND_5GHZ)
 			rate_mask =
 			    (u16) (rate_mask &
 				   (lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
@@ -851,7 +851,7 @@
 	table = &lq_sta->lq;
 	tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
 	il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type, &rs_idx);
-	if (il->band == IEEE80211_BAND_5GHZ)
+	if (il->band == NL80211_BAND_5GHZ)
 		rs_idx -= IL_FIRST_OFDM_RATE;
 	mac_flags = info->status.rates[0].flags;
 	mac_idx = info->status.rates[0].idx;
@@ -864,7 +864,7 @@
 		 * mac80211 HT idx is always zero-idxed; we need to move
 		 * HT OFDM rates after CCK rates in 2.4 GHz band
 		 */
-		if (il->band == IEEE80211_BAND_2GHZ)
+		if (il->band == NL80211_BAND_2GHZ)
 			mac_idx += IL_FIRST_OFDM_RATE;
 	}
 	/* Here we actually compare this rate to the latest LQ command */
@@ -1816,7 +1816,7 @@
 
 	/* mask with station rate restriction */
 	if (is_legacy(tbl->lq_type)) {
-		if (lq_sta->band == IEEE80211_BAND_5GHZ)
+		if (lq_sta->band == NL80211_BAND_5GHZ)
 			/* supp_rates has no CCK bits in A mode */
 			rate_scale_idx_msk =
 			    (u16) (rate_mask &
@@ -2212,7 +2212,7 @@
 	/* Get max rate if user set max rate */
 	if (lq_sta) {
 		lq_sta->max_rate_idx = txrc->max_rate_idx;
-		if (sband->band == IEEE80211_BAND_5GHZ &&
+		if (sband->band == NL80211_BAND_5GHZ &&
 		    lq_sta->max_rate_idx != -1)
 			lq_sta->max_rate_idx += IL_FIRST_OFDM_RATE;
 		if (lq_sta->max_rate_idx < 0 ||
@@ -2258,11 +2258,11 @@
 	} else {
 		/* Check for invalid rates */
 		if (rate_idx < 0 || rate_idx >= RATE_COUNT_LEGACY ||
-		    (sband->band == IEEE80211_BAND_5GHZ &&
+		    (sband->band == NL80211_BAND_5GHZ &&
 		     rate_idx < IL_FIRST_OFDM_RATE))
 			rate_idx = rate_lowest_index(sband, sta);
 		/* On valid 5 GHz rate, adjust idx */
-		else if (sband->band == IEEE80211_BAND_5GHZ)
+		else if (sband->band == NL80211_BAND_5GHZ)
 			rate_idx -= IL_FIRST_OFDM_RATE;
 		info->control.rates[0].flags = 0;
 	}
@@ -2362,7 +2362,7 @@
 
 	/* Set last_txrate_idx to lowest rate */
 	lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
-	if (sband->band == IEEE80211_BAND_5GHZ)
+	if (sband->band == NL80211_BAND_5GHZ)
 		lq_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
 	lq_sta->is_agg = 0;
 
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c
index fe47db9..c3c638e 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965.c
@@ -1267,7 +1267,7 @@
 	     "TX Power requested while scanning!\n"))
 		return -EAGAIN;
 
-	band = il->band == IEEE80211_BAND_2GHZ;
+	band = il->band == NL80211_BAND_2GHZ;
 
 	is_ht40 = iw4965_is_ht40_channel(il->active.flags);
 
@@ -1480,7 +1480,7 @@
 	u8 switch_count;
 	u16 beacon_interval = le16_to_cpu(il->timing.beacon_interval);
 	struct ieee80211_vif *vif = il->vif;
-	band = (il->band == IEEE80211_BAND_2GHZ);
+	band = (il->band == NL80211_BAND_2GHZ);
 
 	if (WARN_ON_ONCE(vif == NULL))
 		return -EIO;
@@ -1918,7 +1918,7 @@
 	 * Force use of chains B and C for scan RX on 5 GHz band
 	 * because the device has off-channel reception on chain A.
 	 */
-	.scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
+	.scan_rx_antennas[NL80211_BAND_5GHZ] = ANT_BC,
 
 	.eeprom_size = IL4965_EEPROM_IMG_SIZE,
 	.num_of_queues = IL49_NUM_QUEUES,
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.h b/drivers/net/wireless/intel/iwlegacy/4965.h
index e432715..527e8b5 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.h
+++ b/drivers/net/wireless/intel/iwlegacy/4965.h
@@ -68,7 +68,7 @@
 void il4965_rx_replenish_now(struct il_priv *il);
 void il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq);
 int il4965_rxq_stop(struct il_priv *il);
-int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
+int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band);
 void il4965_rx_handle(struct il_priv *il);
 
 /* tx */
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index eb5cb60..eb24b92 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -723,10 +723,9 @@
 	sz = il->cfg->eeprom_size;
 	D_EEPROM("NVM size = %d\n", sz);
 	il->eeprom = kzalloc(sz, GFP_KERNEL);
-	if (!il->eeprom) {
-		ret = -ENOMEM;
-		goto alloc_err;
-	}
+	if (!il->eeprom)
+		return -ENOMEM;
+
 	e = (__le16 *) il->eeprom;
 
 	il->ops->apm_init(il);
@@ -778,7 +777,6 @@
 		il_eeprom_free(il);
 	/* Reset chip to save power until we load uCode during "up". */
 	il_apm_stop(il);
-alloc_err:
 	return ret;
 }
 EXPORT_SYMBOL(il_eeprom_init);
@@ -862,7 +860,7 @@
  * Does not set up a command, or touch hardware.
  */
 static int
-il_mod_ht40_chan_info(struct il_priv *il, enum ieee80211_band band, u16 channel,
+il_mod_ht40_chan_info(struct il_priv *il, enum nl80211_band band, u16 channel,
 		      const struct il_eeprom_channel *eeprom_ch,
 		      u8 clear_ht40_extension_channel)
 {
@@ -947,7 +945,7 @@
 			ch_info->channel = eeprom_ch_idx[ch];
 			ch_info->band =
 			    (band ==
-			     1) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+			     1) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
 
 			/* permanently store EEPROM's channel regulatory flags
 			 *   and max power in channel info database. */
@@ -1005,14 +1003,14 @@
 
 	/* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
 	for (band = 6; band <= 7; band++) {
-		enum ieee80211_band ieeeband;
+		enum nl80211_band ieeeband;
 
 		il_init_band_reference(il, band, &eeprom_ch_count,
 				       &eeprom_ch_info, &eeprom_ch_idx);
 
 		/* EEPROM band 6 is 2.4, band 7 is 5 GHz */
 		ieeeband =
-		    (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+		    (band == 6) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
 
 		/* Loop through each band adding each of the channels */
 		for (ch = 0; ch < eeprom_ch_count; ch++) {
@@ -1050,19 +1048,19 @@
  * Based on band and channel number.
  */
 const struct il_channel_info *
-il_get_channel_info(const struct il_priv *il, enum ieee80211_band band,
+il_get_channel_info(const struct il_priv *il, enum nl80211_band band,
 		    u16 channel)
 {
 	int i;
 
 	switch (band) {
-	case IEEE80211_BAND_5GHZ:
+	case NL80211_BAND_5GHZ:
 		for (i = 14; i < il->channel_count; i++) {
 			if (il->channel_info[i].channel == channel)
 				return &il->channel_info[i];
 		}
 		break;
-	case IEEE80211_BAND_2GHZ:
+	case NL80211_BAND_2GHZ:
 		if (channel >= 1 && channel <= 14)
 			return &il->channel_info[channel - 1];
 		break;
@@ -1459,7 +1457,7 @@
 	clear_bit(S_SCAN_HW, &il->status);
 
 	D_SCAN("Scan on %sGHz took %dms\n",
-	       (il->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
+	       (il->scan_band == NL80211_BAND_2GHZ) ? "2.4" : "5.2",
 	       jiffies_to_msecs(jiffies - il->scan_start));
 
 	queue_work(il->workqueue, &il->scan_completed);
@@ -1477,10 +1475,10 @@
 EXPORT_SYMBOL(il_setup_rx_scan_handlers);
 
 u16
-il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
+il_get_active_dwell_time(struct il_priv *il, enum nl80211_band band,
 			 u8 n_probes)
 {
-	if (band == IEEE80211_BAND_5GHZ)
+	if (band == NL80211_BAND_5GHZ)
 		return IL_ACTIVE_DWELL_TIME_52 +
 		    IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
 	else
@@ -1490,14 +1488,14 @@
 EXPORT_SYMBOL(il_get_active_dwell_time);
 
 u16
-il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
+il_get_passive_dwell_time(struct il_priv *il, enum nl80211_band band,
 			  struct ieee80211_vif *vif)
 {
 	u16 value;
 
 	u16 passive =
 	    (band ==
-	     IEEE80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
+	     NL80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
 	    IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE +
 	    IL_PASSIVE_DWELL_TIME_52;
 
@@ -1522,10 +1520,10 @@
 il_init_scan_params(struct il_priv *il)
 {
 	u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1;
-	if (!il->scan_tx_ant[IEEE80211_BAND_5GHZ])
-		il->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
-	if (!il->scan_tx_ant[IEEE80211_BAND_2GHZ])
-		il->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
+	if (!il->scan_tx_ant[NL80211_BAND_5GHZ])
+		il->scan_tx_ant[NL80211_BAND_5GHZ] = ant_idx;
+	if (!il->scan_tx_ant[NL80211_BAND_2GHZ])
+		il->scan_tx_ant[NL80211_BAND_2GHZ] = ant_idx;
 }
 EXPORT_SYMBOL(il_init_scan_params);
 
@@ -2005,7 +2003,7 @@
 	il_set_ht_add_station(il, sta_id, sta);
 
 	/* 3945 only */
-	rate = (il->band == IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
+	rate = (il->band == NL80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
 	/* Turn on both antennas for the station... */
 	station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
 
@@ -2794,8 +2792,10 @@
 	il_tx_queue_unmap(il, txq_id);
 
 	/* De-alloc array of command/tx buffers */
-	for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
-		kfree(txq->cmd[i]);
+	if (txq->cmd) {
+		for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
+			kfree(txq->cmd[i]);
+	}
 
 	/* De-alloc circular buffer of TFDs */
 	if (txq->q.n_bd)
@@ -2873,8 +2873,10 @@
 	il_cmd_queue_unmap(il);
 
 	/* De-alloc array of command/tx buffers */
-	for (i = 0; i <= TFD_CMD_SLOTS; i++)
-		kfree(txq->cmd[i]);
+	if (txq->cmd) {
+		for (i = 0; i <= TFD_CMD_SLOTS; i++)
+			kfree(txq->cmd[i]);
+	}
 
 	/* De-alloc circular buffer of TFDs */
 	if (txq->q.n_bd)
@@ -3080,7 +3082,9 @@
 		kfree(txq->cmd[i]);
 out_free_arrays:
 	kfree(txq->meta);
+	txq->meta = NULL;
 	kfree(txq->cmd);
+	txq->cmd = NULL;
 
 	return -ENOMEM;
 }
@@ -3378,7 +3382,7 @@
 static void
 il_init_ht_hw_capab(const struct il_priv *il,
 		    struct ieee80211_sta_ht_cap *ht_info,
-		    enum ieee80211_band band)
+		    enum nl80211_band band)
 {
 	u16 max_bit_rate = 0;
 	u8 rx_chains_num = il->hw_params.rx_chains_num;
@@ -3439,8 +3443,8 @@
 	int i = 0;
 	s8 max_tx_power = 0;
 
-	if (il->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
-	    il->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
+	if (il->bands[NL80211_BAND_2GHZ].n_bitrates ||
+	    il->bands[NL80211_BAND_5GHZ].n_bitrates) {
 		D_INFO("Geography modes already initialized.\n");
 		set_bit(S_GEO_CONFIGURED, &il->status);
 		return 0;
@@ -3461,23 +3465,23 @@
 	}
 
 	/* 5.2GHz channels start after the 2.4GHz channels */
-	sband = &il->bands[IEEE80211_BAND_5GHZ];
+	sband = &il->bands[NL80211_BAND_5GHZ];
 	sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)];
 	/* just OFDM */
 	sband->bitrates = &rates[IL_FIRST_OFDM_RATE];
 	sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE;
 
 	if (il->cfg->sku & IL_SKU_N)
-		il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_5GHZ);
+		il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_5GHZ);
 
-	sband = &il->bands[IEEE80211_BAND_2GHZ];
+	sband = &il->bands[NL80211_BAND_2GHZ];
 	sband->channels = channels;
 	/* OFDM & CCK */
 	sband->bitrates = rates;
 	sband->n_bitrates = RATE_COUNT_LEGACY;
 
 	if (il->cfg->sku & IL_SKU_N)
-		il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_2GHZ);
+		il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_2GHZ);
 
 	il->ieee_channels = channels;
 	il->ieee_rates = rates;
@@ -3528,7 +3532,7 @@
 	il->tx_power_user_lmt = max_tx_power;
 	il->tx_power_next = max_tx_power;
 
-	if (il->bands[IEEE80211_BAND_5GHZ].n_channels == 0 &&
+	if (il->bands[NL80211_BAND_5GHZ].n_channels == 0 &&
 	    (il->cfg->sku & IL_SKU_A)) {
 		IL_INFO("Incorrectly detected BG card as ABG. "
 			"Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
@@ -3537,8 +3541,8 @@
 	}
 
 	IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n",
-		il->bands[IEEE80211_BAND_2GHZ].n_channels,
-		il->bands[IEEE80211_BAND_5GHZ].n_channels);
+		il->bands[NL80211_BAND_2GHZ].n_channels,
+		il->bands[NL80211_BAND_5GHZ].n_channels);
 
 	set_bit(S_GEO_CONFIGURED, &il->status);
 
@@ -3559,7 +3563,7 @@
 EXPORT_SYMBOL(il_free_geos);
 
 static bool
-il_is_channel_extension(struct il_priv *il, enum ieee80211_band band,
+il_is_channel_extension(struct il_priv *il, enum nl80211_band band,
 			u16 channel, u8 extension_chan_offset)
 {
 	const struct il_channel_info *ch_info;
@@ -3922,14 +3926,14 @@
 
 /* Return valid, unused, channel for a passive scan to reset the RF */
 u8
-il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band)
+il_get_single_channel_number(struct il_priv *il, enum nl80211_band band)
 {
 	const struct il_channel_info *ch_info;
 	int i;
 	u8 channel = 0;
 	u8 min, max;
 
-	if (band == IEEE80211_BAND_5GHZ) {
+	if (band == NL80211_BAND_5GHZ) {
 		min = 14;
 		max = il->channel_count;
 	} else {
@@ -3961,14 +3965,14 @@
 int
 il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch)
 {
-	enum ieee80211_band band = ch->band;
+	enum nl80211_band band = ch->band;
 	u16 channel = ch->hw_value;
 
 	if (le16_to_cpu(il->staging.channel) == channel && il->band == band)
 		return 0;
 
 	il->staging.channel = cpu_to_le16(channel);
-	if (band == IEEE80211_BAND_5GHZ)
+	if (band == NL80211_BAND_5GHZ)
 		il->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
 	else
 		il->staging.flags |= RXON_FLG_BAND_24G_MSK;
@@ -3982,10 +3986,10 @@
 EXPORT_SYMBOL(il_set_rxon_channel);
 
 void
-il_set_flags_for_band(struct il_priv *il, enum ieee80211_band band,
+il_set_flags_for_band(struct il_priv *il, enum nl80211_band band,
 		      struct ieee80211_vif *vif)
 {
-	if (band == IEEE80211_BAND_5GHZ) {
+	if (band == NL80211_BAND_5GHZ) {
 		il->staging.flags &=
 		    ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
 		      RXON_FLG_CCK_MSK);
@@ -5411,7 +5415,7 @@
 
 	if (changes & BSS_CHANGED_ERP_CTS_PROT) {
 		D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
-		if (bss_conf->use_cts_prot && il->band != IEEE80211_BAND_5GHZ)
+		if (bss_conf->use_cts_prot && il->band != NL80211_BAND_5GHZ)
 			il->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
 		else
 			il->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index ce52cf1..726ede3 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -432,7 +432,7 @@
 int il_init_channel_map(struct il_priv *il);
 void il_free_channel_map(struct il_priv *il);
 const struct il_channel_info *il_get_channel_info(const struct il_priv *il,
-						  enum ieee80211_band band,
+						  enum nl80211_band band,
 						  u16 channel);
 
 #define IL_NUM_SCAN_RATES         (2)
@@ -497,7 +497,7 @@
 
 	u8 group_idx;		/* 0-4, maps channel to group1/2/3/4/5 */
 	u8 band_idx;		/* 0-4, maps channel to band1/2/3/4/5 */
-	enum ieee80211_band band;
+	enum nl80211_band band;
 
 	/* HT40 channel info */
 	s8 ht40_max_power_avg;	/* (dBm) regul. eeprom, normal Tx, any rate */
@@ -811,7 +811,7 @@
  * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
  * @max_stations:
  * @ht40_channel: is 40MHz width possible in band 2.4
- * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
+ * BIT(NL80211_BAND_5GHZ) BIT(NL80211_BAND_5GHZ)
  * @sw_crypto: 0 for hw, 1 for sw
  * @max_xxx_size: for ucode uses
  * @ct_kill_threshold: temperature threshold
@@ -1141,13 +1141,13 @@
 	struct list_head free_frames;
 	int frames_count;
 
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	int alloc_rxb_page;
 
 	void (*handlers[IL_CN_MAX]) (struct il_priv *il,
 				     struct il_rx_buf *rxb);
 
-	struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+	struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
 
 	/* spectrum measurement report caching */
 	struct il_spectrum_notification measure_report;
@@ -1176,10 +1176,10 @@
 	unsigned long scan_start;
 	unsigned long scan_start_tsf;
 	void *scan_cmd;
-	enum ieee80211_band scan_band;
+	enum nl80211_band scan_band;
 	struct cfg80211_scan_request *scan_request;
 	struct ieee80211_vif *scan_vif;
-	u8 scan_tx_ant[IEEE80211_NUM_BANDS];
+	u8 scan_tx_ant[NUM_NL80211_BANDS];
 	u8 mgmt_tx_ant;
 
 	/* spinlock */
@@ -1479,7 +1479,7 @@
 static inline u8
 il_is_channel_a_band(const struct il_channel_info *ch_info)
 {
-	return ch_info->band == IEEE80211_BAND_5GHZ;
+	return ch_info->band == NL80211_BAND_5GHZ;
 }
 
 static inline int
@@ -1673,7 +1673,7 @@
 	/* params not likely to change within a device family */
 	struct il_base_params *base_params;
 	/* params likely to change within a device family */
-	u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
+	u8 scan_rx_antennas[NUM_NL80211_BANDS];
 	enum il_led_mode led_mode;
 
 	int eeprom_size;
@@ -1707,9 +1707,9 @@
 int il_check_rxon_cmd(struct il_priv *il);
 int il_full_rxon_required(struct il_priv *il);
 int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch);
-void il_set_flags_for_band(struct il_priv *il, enum ieee80211_band band,
+void il_set_flags_for_band(struct il_priv *il, enum nl80211_band band,
 			   struct ieee80211_vif *vif);
-u8 il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band);
+u8 il_get_single_channel_number(struct il_priv *il, enum nl80211_band band);
 void il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf);
 bool il_is_ht40_tx_allowed(struct il_priv *il,
 			   struct ieee80211_sta_ht_cap *ht_cap);
@@ -1793,9 +1793,9 @@
 u16 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
 		      const u8 *ta, const u8 *ie, int ie_len, int left);
 void il_setup_rx_scan_handlers(struct il_priv *il);
-u16 il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
+u16 il_get_active_dwell_time(struct il_priv *il, enum nl80211_band band,
 			     u8 n_probes);
-u16 il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
+u16 il_get_passive_dwell_time(struct il_priv *il, enum nl80211_band band,
 			      struct ieee80211_vif *vif);
 void il_setup_scan_deferred_work(struct il_priv *il);
 void il_cancel_scan_deferred_work(struct il_priv *il);
@@ -1955,7 +1955,7 @@
 }
 
 static inline const struct ieee80211_supported_band *
-il_get_hw_mode(struct il_priv *il, enum ieee80211_band band)
+il_get_hw_mode(struct il_priv *il, enum nl80211_band band)
 {
 	return il->hw->wiphy->bands[band];
 }
@@ -2813,7 +2813,7 @@
 	u8 action_counter;	/* # mode-switch actions tried */
 	u8 is_green;
 	u8 is_dup;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 
 	/* The following are bitmaps of rates; RATE_6M_MASK, etc. */
 	u32 supp_rates;
diff --git a/drivers/net/wireless/intel/iwlegacy/debug.c b/drivers/net/wireless/intel/iwlegacy/debug.c
index 908b9f4..6fc6b7f 100644
--- a/drivers/net/wireless/intel/iwlegacy/debug.c
+++ b/drivers/net/wireless/intel/iwlegacy/debug.c
@@ -544,7 +544,7 @@
 		return -ENOMEM;
 	}
 
-	supp_band = il_get_hw_mode(il, IEEE80211_BAND_2GHZ);
+	supp_band = il_get_hw_mode(il, NL80211_BAND_2GHZ);
 	if (supp_band) {
 		channels = supp_band->channels;
 
@@ -571,7 +571,7 @@
 				      flags & IEEE80211_CHAN_NO_IR ?
 				      "passive only" : "active/passive");
 	}
-	supp_band = il_get_hw_mode(il, IEEE80211_BAND_5GHZ);
+	supp_band = il_get_hw_mode(il, NL80211_BAND_5GHZ);
 	if (supp_band) {
 		channels = supp_band->channels;
 
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 16c4f38..492035f 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -88,16 +88,6 @@
 	  If unsure, don't enable this option, as some programs might
 	  expect incoming broadcasts for their normal operations.
 
-config IWLWIFI_UAPSD
-	bool "enable U-APSD by default"
-	depends on IWLMVM
-	help
-	  Say Y here to enable U-APSD by default. This may cause
-	  interoperability problems with some APs, manifesting in lower than
-	  expected throughput due to those APs not enabling aggregation
-
-	  If unsure, say N.
-
 config IWLWIFI_PCIE_RTPM
        bool "Enable runtime power management mode for PCIe devices"
        depends on IWLMVM && PM
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
index 9de277c..b79e387 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
@@ -158,7 +158,7 @@
 			 struct iwl_rxon_context *ctx);
 void iwl_set_flags_for_band(struct iwl_priv *priv,
 			    struct iwl_rxon_context *ctx,
-			    enum ieee80211_band band,
+			    enum nl80211_band band,
 			    struct ieee80211_vif *vif);
 
 /* uCode */
@@ -186,7 +186,7 @@
 				u8 flags, bool clear);
 
 static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
-			struct iwl_priv *priv, enum ieee80211_band band)
+			struct iwl_priv *priv, enum nl80211_band band)
 {
 	return priv->hw->wiphy->bands[band];
 }
@@ -198,7 +198,7 @@
 #endif
 
 /* rx */
-int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
+int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band);
 void iwl_setup_rx_handlers(struct iwl_priv *priv);
 void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
 
@@ -258,7 +258,7 @@
 int __must_check iwl_scan_initiate(struct iwl_priv *priv,
 				   struct ieee80211_vif *vif,
 				   enum iwl_scan_type scan_type,
-				   enum ieee80211_band band);
+				   enum nl80211_band band);
 
 /* For faster active scanning, scan will move to the next channel if fewer than
  * PLCP_QUIET_THRESH packets are heard on this channel within
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
index 74c5161..f6591c8 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
@@ -335,7 +335,7 @@
 	if (!buf)
 		return -ENOMEM;
 
-	supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
+	supp_band = iwl_get_hw_mode(priv, NL80211_BAND_2GHZ);
 	if (supp_band) {
 		channels = supp_band->channels;
 
@@ -358,7 +358,7 @@
 					IEEE80211_CHAN_NO_IR ?
 					"passive only" : "active/passive");
 	}
-	supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
+	supp_band = iwl_get_hw_mode(priv, NL80211_BAND_5GHZ);
 	if (supp_band) {
 		channels = supp_band->channels;
 
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
index 1a7ead7..8148df6 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
@@ -677,7 +677,7 @@
 
 	struct iwl_hw_params hw_params;
 
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	u8 valid_contexts;
 
 	void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
@@ -722,11 +722,11 @@
 	unsigned long scan_start;
 	unsigned long scan_start_tsf;
 	void *scan_cmd;
-	enum ieee80211_band scan_band;
+	enum nl80211_band scan_band;
 	struct cfg80211_scan_request *scan_request;
 	struct ieee80211_vif *scan_vif;
 	enum iwl_scan_type scan_type;
-	u8 scan_tx_ant[IEEE80211_NUM_BANDS];
+	u8 scan_tx_ant[NUM_NL80211_BANDS];
 	u8 mgmt_tx_ant;
 
 	/* max number of station keys */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
index cc13c04..f21732e 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
@@ -420,7 +420,7 @@
 		.data = { &cmd, },
 	};
 
-	cmd.band = priv->band == IEEE80211_BAND_2GHZ;
+	cmd.band = priv->band == NL80211_BAND_2GHZ;
 	ch = ch_switch->chandef.chan->hw_value;
 	IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
 		      ctx->active.channel, ch);
@@ -588,7 +588,7 @@
 
 	hcmd.data[0] = cmd;
 
-	cmd->band = priv->band == IEEE80211_BAND_2GHZ;
+	cmd->band = priv->band == NL80211_BAND_2GHZ;
 	ch = ch_switch->chandef.chan->hw_value;
 	IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
 		      ctx->active.channel, ch);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
index 1799469..8dda52a 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
@@ -94,7 +94,7 @@
 	iwl_tt_handler(priv);
 }
 
-int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
+int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band)
 {
 	int idx = 0;
 	int band_offset = 0;
@@ -105,7 +105,7 @@
 		return idx;
 	/* Legacy rate format, search for match in table */
 	} else {
-		if (band == IEEE80211_BAND_5GHZ)
+		if (band == NL80211_BAND_5GHZ)
 			band_offset = IWL_FIRST_OFDM_RATE;
 		for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
 			if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
@@ -878,7 +878,7 @@
 	int i;
 	u8 ind = ant;
 
-	if (priv->band == IEEE80211_BAND_2GHZ &&
+	if (priv->band == NL80211_BAND_2GHZ &&
 	    priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
 		return 0;
 
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index c63ea79..8c07194 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -202,12 +202,12 @@
 
 	hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
 
-	if (priv->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
-		priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-			&priv->nvm_data->bands[IEEE80211_BAND_2GHZ];
-	if (priv->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels)
-		priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-			&priv->nvm_data->bands[IEEE80211_BAND_5GHZ];
+	if (priv->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
+		priv->hw->wiphy->bands[NL80211_BAND_2GHZ] =
+			&priv->nvm_data->bands[NL80211_BAND_2GHZ];
+	if (priv->nvm_data->bands[NL80211_BAND_5GHZ].n_channels)
+		priv->hw->wiphy->bands[NL80211_BAND_5GHZ] =
+			&priv->nvm_data->bands[NL80211_BAND_5GHZ];
 
 	hw->wiphy->hw_version = priv->trans->hw_id;
 
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index 85628127..37b32a6 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -262,7 +262,7 @@
 	rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
 
 	/* In mac80211, rates for 5 GHz start at 0 */
-	if (info->band == IEEE80211_BAND_5GHZ)
+	if (info->band == NL80211_BAND_5GHZ)
 		rate += IWL_FIRST_OFDM_RATE;
 	else if (rate >= IWL_FIRST_CCK_RATE && rate <= IWL_LAST_CCK_RATE)
 		rate_flags |= RATE_MCS_CCK_MSK;
@@ -1071,7 +1071,7 @@
 
 static void iwl_setup_deferred_work(struct iwl_priv *priv)
 {
-	priv->workqueue = create_singlethread_workqueue(DRV_NAME);
+	priv->workqueue = alloc_ordered_workqueue(DRV_NAME, 0);
 
 	INIT_WORK(&priv->restart, iwl_bg_restart);
 	INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
@@ -1117,7 +1117,7 @@
 
 	INIT_LIST_HEAD(&priv->calib_results);
 
-	priv->band = IEEE80211_BAND_2GHZ;
+	priv->band = NL80211_BAND_2GHZ;
 
 	priv->plcp_delta_threshold = priv->lib->plcp_delta_threshold;
 
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
index ee75055..b95c2d7 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -599,7 +599,7 @@
  * fill "search" or "active" tx mode table.
  */
 static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
-				    enum ieee80211_band band,
+				    enum nl80211_band band,
 				    struct iwl_scale_tbl_info *tbl,
 				    int *rate_idx)
 {
@@ -624,7 +624,7 @@
 	/* legacy rate format */
 	if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
 		if (num_of_ant == 1) {
-			if (band == IEEE80211_BAND_5GHZ)
+			if (band == NL80211_BAND_5GHZ)
 				tbl->lq_type = LQ_A;
 			else
 				tbl->lq_type = LQ_G;
@@ -802,7 +802,7 @@
 	if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
 		switch_to_legacy = 1;
 		scale_index = rs_ht_to_legacy[scale_index];
-		if (lq_sta->band == IEEE80211_BAND_5GHZ)
+		if (lq_sta->band == NL80211_BAND_5GHZ)
 			tbl->lq_type = LQ_A;
 		else
 			tbl->lq_type = LQ_G;
@@ -821,7 +821,7 @@
 	/* Mask with station rate restriction */
 	if (is_legacy(tbl->lq_type)) {
 		/* supp_rates has no CCK bits in A mode */
-		if (lq_sta->band == IEEE80211_BAND_5GHZ)
+		if (lq_sta->band == NL80211_BAND_5GHZ)
 			rate_mask  = (u16)(rate_mask &
 			   (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
 		else
@@ -939,7 +939,7 @@
 	table = &lq_sta->lq;
 	tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
 	rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index);
-	if (priv->band == IEEE80211_BAND_5GHZ)
+	if (priv->band == NL80211_BAND_5GHZ)
 		rs_index -= IWL_FIRST_OFDM_RATE;
 	mac_flags = info->status.rates[0].flags;
 	mac_index = info->status.rates[0].idx;
@@ -952,7 +952,7 @@
 		 * mac80211 HT index is always zero-indexed; we need to move
 		 * HT OFDM rates after CCK rates in 2.4 GHz band
 		 */
-		if (priv->band == IEEE80211_BAND_2GHZ)
+		if (priv->band == NL80211_BAND_2GHZ)
 			mac_index += IWL_FIRST_OFDM_RATE;
 	}
 	/* Here we actually compare this rate to the latest LQ command */
@@ -2284,7 +2284,7 @@
 
 	/* mask with station rate restriction */
 	if (is_legacy(tbl->lq_type)) {
-		if (lq_sta->band == IEEE80211_BAND_5GHZ)
+		if (lq_sta->band == NL80211_BAND_5GHZ)
 			/* supp_rates has no CCK bits in A mode */
 			rate_scale_index_msk = (u16) (rate_mask &
 				(lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
@@ -2721,7 +2721,7 @@
 	/* Get max rate if user set max rate */
 	if (lq_sta) {
 		lq_sta->max_rate_idx = txrc->max_rate_idx;
-		if ((sband->band == IEEE80211_BAND_5GHZ) &&
+		if ((sband->band == NL80211_BAND_5GHZ) &&
 		    (lq_sta->max_rate_idx != -1))
 			lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
 		if ((lq_sta->max_rate_idx < 0) ||
@@ -2763,11 +2763,11 @@
 	} else {
 		/* Check for invalid rates */
 		if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
-				((sband->band == IEEE80211_BAND_5GHZ) &&
+				((sband->band == NL80211_BAND_5GHZ) &&
 				 (rate_idx < IWL_FIRST_OFDM_RATE)))
 			rate_idx = rate_lowest_index(sband, sta);
 		/* On valid 5 GHz rate, adjust index */
-		else if (sband->band == IEEE80211_BAND_5GHZ)
+		else if (sband->band == NL80211_BAND_5GHZ)
 			rate_idx -= IWL_FIRST_OFDM_RATE;
 		info->control.rates[0].flags = 0;
 	}
@@ -2880,7 +2880,7 @@
 
 	/* Set last_txrate_idx to lowest rate */
 	lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
-	if (sband->band == IEEE80211_BAND_5GHZ)
+	if (sband->band == NL80211_BAND_5GHZ)
 		lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
 	lq_sta->is_agg = 0;
 #ifdef CONFIG_MAC80211_DEBUGFS
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
index c5fe445..50c1e95 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
@@ -355,7 +355,7 @@
 	u8 action_counter;	/* # mode-switch actions tried */
 	u8 is_green;
 	u8 is_dup;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 
 	/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
 	u32 supp_rates;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
index 52ab1e0..dfa2041 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
@@ -686,7 +686,7 @@
 
 	memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
 
-	ieee80211_rx_napi(priv->hw, skb, priv->napi);
+	ieee80211_rx_napi(priv->hw, NULL, skb, priv->napi);
 }
 
 static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
@@ -834,7 +834,7 @@
 	/* rx_status carries information about the packet to mac80211 */
 	rx_status.mactime = le64_to_cpu(phy_res->timestamp);
 	rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
-				IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+				NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
 	rx_status.freq =
 		ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
 					       rx_status.band);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
index 2d47cb2..b228552 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
@@ -719,7 +719,7 @@
 void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
 			 struct iwl_rxon_context *ctx)
 {
-	enum ieee80211_band band = ch->band;
+	enum nl80211_band band = ch->band;
 	u16 channel = ch->hw_value;
 
 	if ((le16_to_cpu(ctx->staging.channel) == channel) &&
@@ -727,7 +727,7 @@
 		return;
 
 	ctx->staging.channel = cpu_to_le16(channel);
-	if (band == IEEE80211_BAND_5GHZ)
+	if (band == NL80211_BAND_5GHZ)
 		ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
 	else
 		ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
@@ -740,10 +740,10 @@
 
 void iwl_set_flags_for_band(struct iwl_priv *priv,
 			    struct iwl_rxon_context *ctx,
-			    enum ieee80211_band band,
+			    enum nl80211_band band,
 			    struct ieee80211_vif *vif)
 {
-	if (band == IEEE80211_BAND_5GHZ) {
+	if (band == NL80211_BAND_5GHZ) {
 		ctx->staging.flags &=
 		    ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
 		      | RXON_FLG_CCK_MSK);
@@ -1476,7 +1476,7 @@
 
 	iwlagn_set_rxon_chain(priv, ctx);
 
-	if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
+	if (bss_conf->use_cts_prot && (priv->band != NL80211_BAND_5GHZ))
 		ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
 	else
 		ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
index 81a2ddb..d01766f 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
@@ -312,7 +312,7 @@
 		       scan_notif->tsf_high, scan_notif->status);
 
 	IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
-		       (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
+		       (priv->scan_band == NL80211_BAND_2GHZ) ? "2.4" : "5.2",
 		       jiffies_to_msecs(jiffies - priv->scan_start));
 
 	/*
@@ -362,9 +362,9 @@
 }
 
 static u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
-				     enum ieee80211_band band, u8 n_probes)
+				     enum nl80211_band band, u8 n_probes)
 {
-	if (band == IEEE80211_BAND_5GHZ)
+	if (band == NL80211_BAND_5GHZ)
 		return IWL_ACTIVE_DWELL_TIME_52 +
 			IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
 	else
@@ -431,9 +431,9 @@
 }
 
 static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
-				      enum ieee80211_band band)
+				      enum nl80211_band band)
 {
-	u16 passive = (band == IEEE80211_BAND_2GHZ) ?
+	u16 passive = (band == NL80211_BAND_2GHZ) ?
 	    IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
 	    IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
 
@@ -442,7 +442,7 @@
 
 /* Return valid, unused, channel for a passive scan to reset the RF */
 static u8 iwl_get_single_channel_number(struct iwl_priv *priv,
-					enum ieee80211_band band)
+					enum nl80211_band band)
 {
 	struct ieee80211_supported_band *sband = priv->hw->wiphy->bands[band];
 	struct iwl_rxon_context *ctx;
@@ -470,7 +470,7 @@
 
 static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv,
 					  struct ieee80211_vif *vif,
-					  enum ieee80211_band band,
+					  enum nl80211_band band,
 					  struct iwl_scan_channel *scan_ch)
 {
 	const struct ieee80211_supported_band *sband;
@@ -492,7 +492,7 @@
 			cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME);
 		/* Set txpower levels to defaults */
 		scan_ch->dsp_atten = 110;
-		if (band == IEEE80211_BAND_5GHZ)
+		if (band == NL80211_BAND_5GHZ)
 			scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
 		else
 			scan_ch->tx_gain = ((1 << 5) | (5 << 3));
@@ -505,7 +505,7 @@
 
 static int iwl_get_channels_for_scan(struct iwl_priv *priv,
 				     struct ieee80211_vif *vif,
-				     enum ieee80211_band band,
+				     enum nl80211_band band,
 				     u8 is_active, u8 n_probes,
 				     struct iwl_scan_channel *scan_ch)
 {
@@ -553,7 +553,7 @@
 		 * power level:
 		 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
 		 */
-		if (band == IEEE80211_BAND_5GHZ)
+		if (band == NL80211_BAND_5GHZ)
 			scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
 		else
 			scan_ch->tx_gain = ((1 << 5) | (5 << 3));
@@ -636,7 +636,7 @@
 	u32 rate_flags = 0;
 	u16 cmd_len = 0;
 	u16 rx_chain = 0;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	u8 n_probes = 0;
 	u8 rx_ant = priv->nvm_data->valid_rx_ant;
 	u8 rate;
@@ -750,7 +750,7 @@
 	scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
 
 	switch (priv->scan_band) {
-	case IEEE80211_BAND_2GHZ:
+	case NL80211_BAND_2GHZ:
 		scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
 		chan_mod = le32_to_cpu(
 			priv->contexts[IWL_RXON_CTX_BSS].active.flags &
@@ -771,7 +771,7 @@
 		    priv->lib->bt_params->advanced_bt_coexist)
 			scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
 		break;
-	case IEEE80211_BAND_5GHZ:
+	case NL80211_BAND_5GHZ:
 		rate = IWL_RATE_6M_PLCP;
 		break;
 	default:
@@ -809,7 +809,7 @@
 
 	band = priv->scan_band;
 
-	if (band == IEEE80211_BAND_2GHZ &&
+	if (band == NL80211_BAND_2GHZ &&
 	    priv->lib->bt_params &&
 	    priv->lib->bt_params->advanced_bt_coexist) {
 		/* transmit 2.4 GHz probes only on first antenna */
@@ -925,16 +925,16 @@
 void iwl_init_scan_params(struct iwl_priv *priv)
 {
 	u8 ant_idx = fls(priv->nvm_data->valid_tx_ant) - 1;
-	if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
-		priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
-	if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
-		priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
+	if (!priv->scan_tx_ant[NL80211_BAND_5GHZ])
+		priv->scan_tx_ant[NL80211_BAND_5GHZ] = ant_idx;
+	if (!priv->scan_tx_ant[NL80211_BAND_2GHZ])
+		priv->scan_tx_ant[NL80211_BAND_2GHZ] = ant_idx;
 }
 
 int __must_check iwl_scan_initiate(struct iwl_priv *priv,
 				   struct ieee80211_vif *vif,
 				   enum iwl_scan_type scan_type,
-				   enum ieee80211_band band)
+				   enum nl80211_band band)
 {
 	int ret;
 
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
index 8e9768a..de6ec9b 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
@@ -579,7 +579,7 @@
 
 	/* Set up the rate scaling to start at selected rate, fall back
 	 * all the way down to 1M in IEEE order, and then spin on 1M */
-	if (priv->band == IEEE80211_BAND_5GHZ)
+	if (priv->band == NL80211_BAND_5GHZ)
 		r = IWL_RATE_6M_INDEX;
 	else if (ctx && ctx->vif && ctx->vif->p2p)
 		r = IWL_RATE_6M_INDEX;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
index 59e2001..4b97371 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
@@ -81,7 +81,7 @@
 		tx_flags |= TX_CMD_FLG_TSF_MSK;
 	else if (ieee80211_is_back_req(fc))
 		tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
-	else if (info->band == IEEE80211_BAND_2GHZ &&
+	else if (info->band == NL80211_BAND_2GHZ &&
 		 priv->lib->bt_params &&
 		 priv->lib->bt_params->advanced_bt_coexist &&
 		 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
@@ -177,7 +177,7 @@
 		rate_idx = rate_lowest_index(
 				&priv->nvm_data->bands[info->band], sta);
 	/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
-	if (info->band == IEEE80211_BAND_5GHZ)
+	if (info->band == NL80211_BAND_5GHZ)
 		rate_idx += IWL_FIRST_OFDM_RATE;
 	/* Get PLCP rate for tx_cmd->rate_n_flags */
 	rate_plcp = iwl_rates[rate_idx].plcp;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-1000.c b/drivers/net/wireless/intel/iwlwifi/iwl-1000.c
index a90dbab..5c2aae6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-1000.c
@@ -34,10 +34,6 @@
 #define IWL1000_UCODE_API_MAX 5
 #define IWL100_UCODE_API_MAX 5
 
-/* Oldest version we won't warn about */
-#define IWL1000_UCODE_API_OK 5
-#define IWL100_UCODE_API_OK 5
-
 /* Lowest firmware API version supported */
 #define IWL1000_UCODE_API_MIN 1
 #define IWL100_UCODE_API_MIN 5
@@ -68,7 +64,7 @@
 static const struct iwl_ht_params iwl1000_ht_params = {
 	.ht_greenfield_support = true,
 	.use_rts_for_aggregation = true, /* use rts/cts protection */
-	.ht40_bands = BIT(IEEE80211_BAND_2GHZ),
+	.ht40_bands = BIT(NL80211_BAND_2GHZ),
 };
 
 static const struct iwl_eeprom_params iwl1000_eeprom_params = {
@@ -86,7 +82,6 @@
 #define IWL_DEVICE_1000						\
 	.fw_name_pre = IWL1000_FW_PRE,				\
 	.ucode_api_max = IWL1000_UCODE_API_MAX,			\
-	.ucode_api_ok = IWL1000_UCODE_API_OK,			\
 	.ucode_api_min = IWL1000_UCODE_API_MIN,			\
 	.device_family = IWL_DEVICE_FAMILY_1000,		\
 	.max_inst_size = IWLAGN_RTC_INST_SIZE,			\
@@ -112,7 +107,6 @@
 #define IWL_DEVICE_100						\
 	.fw_name_pre = IWL100_FW_PRE,				\
 	.ucode_api_max = IWL100_UCODE_API_MAX,			\
-	.ucode_api_ok = IWL100_UCODE_API_OK,			\
 	.ucode_api_min = IWL100_UCODE_API_MIN,			\
 	.device_family = IWL_DEVICE_FAMILY_100,			\
 	.max_inst_size = IWLAGN_RTC_INST_SIZE,			\
@@ -136,5 +130,5 @@
 	IWL_DEVICE_100,
 };
 
-MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_OK));
-MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_OK));
+MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-2000.c b/drivers/net/wireless/intel/iwlwifi/iwl-2000.c
index a6da959..2e823bd 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-2000.c
@@ -36,12 +36,6 @@
 #define IWL105_UCODE_API_MAX 6
 #define IWL135_UCODE_API_MAX 6
 
-/* Oldest version we won't warn about */
-#define IWL2030_UCODE_API_OK 6
-#define IWL2000_UCODE_API_OK 6
-#define IWL105_UCODE_API_OK 6
-#define IWL135_UCODE_API_OK 6
-
 /* Lowest firmware API version supported */
 #define IWL2030_UCODE_API_MIN 5
 #define IWL2000_UCODE_API_MIN 5
@@ -95,7 +89,7 @@
 static const struct iwl_ht_params iwl2000_ht_params = {
 	.ht_greenfield_support = true,
 	.use_rts_for_aggregation = true, /* use rts/cts protection */
-	.ht40_bands = BIT(IEEE80211_BAND_2GHZ),
+	.ht40_bands = BIT(NL80211_BAND_2GHZ),
 };
 
 static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
@@ -114,7 +108,6 @@
 #define IWL_DEVICE_2000						\
 	.fw_name_pre = IWL2000_FW_PRE,				\
 	.ucode_api_max = IWL2000_UCODE_API_MAX,			\
-	.ucode_api_ok = IWL2000_UCODE_API_OK,			\
 	.ucode_api_min = IWL2000_UCODE_API_MIN,			\
 	.device_family = IWL_DEVICE_FAMILY_2000,		\
 	.max_inst_size = IWL60_RTC_INST_SIZE,			\
@@ -142,7 +135,6 @@
 #define IWL_DEVICE_2030						\
 	.fw_name_pre = IWL2030_FW_PRE,				\
 	.ucode_api_max = IWL2030_UCODE_API_MAX,			\
-	.ucode_api_ok = IWL2030_UCODE_API_OK,			\
 	.ucode_api_min = IWL2030_UCODE_API_MIN,			\
 	.device_family = IWL_DEVICE_FAMILY_2030,		\
 	.max_inst_size = IWL60_RTC_INST_SIZE,			\
@@ -163,7 +155,6 @@
 #define IWL_DEVICE_105						\
 	.fw_name_pre = IWL105_FW_PRE,				\
 	.ucode_api_max = IWL105_UCODE_API_MAX,			\
-	.ucode_api_ok = IWL105_UCODE_API_OK,			\
 	.ucode_api_min = IWL105_UCODE_API_MIN,			\
 	.device_family = IWL_DEVICE_FAMILY_105,			\
 	.max_inst_size = IWL60_RTC_INST_SIZE,			\
@@ -191,7 +182,6 @@
 #define IWL_DEVICE_135						\
 	.fw_name_pre = IWL135_FW_PRE,				\
 	.ucode_api_max = IWL135_UCODE_API_MAX,			\
-	.ucode_api_ok = IWL135_UCODE_API_OK,			\
 	.ucode_api_min = IWL135_UCODE_API_MIN,			\
 	.device_family = IWL_DEVICE_FAMILY_135,			\
 	.max_inst_size = IWL60_RTC_INST_SIZE,			\
@@ -210,7 +200,7 @@
 	.ht_params = &iwl2000_ht_params,
 };
 
-MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_OK));
-MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_OK));
-MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_OK));
-MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_OK));
+MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-5000.c b/drivers/net/wireless/intel/iwlwifi/iwl-5000.c
index 8b5afde..4c3e3cf 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-5000.c
@@ -34,10 +34,6 @@
 #define IWL5000_UCODE_API_MAX 5
 #define IWL5150_UCODE_API_MAX 2
 
-/* Oldest version we won't warn about */
-#define IWL5000_UCODE_API_OK 5
-#define IWL5150_UCODE_API_OK 2
-
 /* Lowest firmware API version supported */
 #define IWL5000_UCODE_API_MIN 1
 #define IWL5150_UCODE_API_MIN 1
@@ -66,7 +62,7 @@
 
 static const struct iwl_ht_params iwl5000_ht_params = {
 	.ht_greenfield_support = true,
-	.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+	.ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
 };
 
 static const struct iwl_eeprom_params iwl5000_eeprom_params = {
@@ -84,7 +80,6 @@
 #define IWL_DEVICE_5000						\
 	.fw_name_pre = IWL5000_FW_PRE,				\
 	.ucode_api_max = IWL5000_UCODE_API_MAX,			\
-	.ucode_api_ok = IWL5000_UCODE_API_OK,			\
 	.ucode_api_min = IWL5000_UCODE_API_MIN,			\
 	.device_family = IWL_DEVICE_FAMILY_5000,		\
 	.max_inst_size = IWLAGN_RTC_INST_SIZE,			\
@@ -132,7 +127,6 @@
 	.name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
 	.fw_name_pre = IWL5000_FW_PRE,
 	.ucode_api_max = IWL5000_UCODE_API_MAX,
-	.ucode_api_ok = IWL5000_UCODE_API_OK,
 	.ucode_api_min = IWL5000_UCODE_API_MIN,
 	.device_family = IWL_DEVICE_FAMILY_5000,
 	.max_inst_size = IWLAGN_RTC_INST_SIZE,
@@ -149,7 +143,6 @@
 #define IWL_DEVICE_5150						\
 	.fw_name_pre = IWL5150_FW_PRE,				\
 	.ucode_api_max = IWL5150_UCODE_API_MAX,			\
-	.ucode_api_ok = IWL5150_UCODE_API_OK,			\
 	.ucode_api_min = IWL5150_UCODE_API_MIN,			\
 	.device_family = IWL_DEVICE_FAMILY_5150,		\
 	.max_inst_size = IWLAGN_RTC_INST_SIZE,			\
@@ -174,5 +167,5 @@
 	IWL_DEVICE_5150,
 };
 
-MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_OK));
-MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_OK));
+MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c
index 0b4ba78..5a7b7e1 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c
@@ -36,13 +36,6 @@
 #define IWL6000G2_UCODE_API_MAX 6
 #define IWL6035_UCODE_API_MAX 6
 
-/* Oldest version we won't warn about */
-#define IWL6000_UCODE_API_OK 4
-#define IWL6000G2_UCODE_API_OK 5
-#define IWL6050_UCODE_API_OK 5
-#define IWL6000G2B_UCODE_API_OK 6
-#define IWL6035_UCODE_API_OK 6
-
 /* Lowest firmware API version supported */
 #define IWL6000_UCODE_API_MIN 4
 #define IWL6050_UCODE_API_MIN 4
@@ -117,7 +110,7 @@
 static const struct iwl_ht_params iwl6000_ht_params = {
 	.ht_greenfield_support = true,
 	.use_rts_for_aggregation = true, /* use rts/cts protection */
-	.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+	.ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
 };
 
 static const struct iwl_eeprom_params iwl6000_eeprom_params = {
@@ -136,7 +129,6 @@
 #define IWL_DEVICE_6005						\
 	.fw_name_pre = IWL6005_FW_PRE,				\
 	.ucode_api_max = IWL6000G2_UCODE_API_MAX,		\
-	.ucode_api_ok = IWL6000G2_UCODE_API_OK,			\
 	.ucode_api_min = IWL6000G2_UCODE_API_MIN,		\
 	.device_family = IWL_DEVICE_FAMILY_6005,		\
 	.max_inst_size = IWL60_RTC_INST_SIZE,			\
@@ -191,7 +183,6 @@
 #define IWL_DEVICE_6030						\
 	.fw_name_pre = IWL6030_FW_PRE,				\
 	.ucode_api_max = IWL6000G2_UCODE_API_MAX,		\
-	.ucode_api_ok = IWL6000G2B_UCODE_API_OK,		\
 	.ucode_api_min = IWL6000G2_UCODE_API_MIN,		\
 	.device_family = IWL_DEVICE_FAMILY_6030,		\
 	.max_inst_size = IWL60_RTC_INST_SIZE,			\
@@ -228,7 +219,6 @@
 #define IWL_DEVICE_6035						\
 	.fw_name_pre = IWL6030_FW_PRE,				\
 	.ucode_api_max = IWL6035_UCODE_API_MAX,			\
-	.ucode_api_ok = IWL6035_UCODE_API_OK,			\
 	.ucode_api_min = IWL6035_UCODE_API_MIN,			\
 	.device_family = IWL_DEVICE_FAMILY_6030,		\
 	.max_inst_size = IWL60_RTC_INST_SIZE,			\
@@ -282,7 +272,6 @@
 #define IWL_DEVICE_6000i					\
 	.fw_name_pre = IWL6000_FW_PRE,				\
 	.ucode_api_max = IWL6000_UCODE_API_MAX,			\
-	.ucode_api_ok = IWL6000_UCODE_API_OK,			\
 	.ucode_api_min = IWL6000_UCODE_API_MIN,			\
 	.device_family = IWL_DEVICE_FAMILY_6000i,		\
 	.max_inst_size = IWL60_RTC_INST_SIZE,			\
@@ -370,7 +359,6 @@
 	.name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
 	.fw_name_pre = IWL6000_FW_PRE,
 	.ucode_api_max = IWL6000_UCODE_API_MAX,
-	.ucode_api_ok = IWL6000_UCODE_API_OK,
 	.ucode_api_min = IWL6000_UCODE_API_MIN,
 	.device_family = IWL_DEVICE_FAMILY_6000,
 	.max_inst_size = IWL60_RTC_INST_SIZE,
@@ -383,7 +371,7 @@
 	.led_mode = IWL_LED_BLINK,
 };
 
-MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK));
-MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_OK));
-MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_OK));
-MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_OK));
+MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
index fc475ce..abd2904 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
@@ -76,16 +76,10 @@
 #define IWL7265D_UCODE_API_MAX	21
 #define IWL3168_UCODE_API_MAX	21
 
-/* Oldest version we won't warn about */
-#define IWL7260_UCODE_API_OK	13
-#define IWL7265_UCODE_API_OK	13
-#define IWL7265D_UCODE_API_OK	13
-#define IWL3168_UCODE_API_OK	20
-
 /* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN	13
-#define IWL7265_UCODE_API_MIN	13
-#define IWL7265D_UCODE_API_MIN	13
+#define IWL7260_UCODE_API_MIN	16
+#define IWL7265_UCODE_API_MIN	16
+#define IWL7265D_UCODE_API_MIN	16
 #define IWL3168_UCODE_API_MIN	20
 
 /* NVM versions */
@@ -162,7 +156,7 @@
 
 static const struct iwl_ht_params iwl7000_ht_params = {
 	.stbc = true,
-	.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+	.ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
 };
 
 #define IWL_DEVICE_7000_COMMON					\
@@ -179,25 +173,21 @@
 #define IWL_DEVICE_7000						\
 	IWL_DEVICE_7000_COMMON,					\
 	.ucode_api_max = IWL7260_UCODE_API_MAX,			\
-	.ucode_api_ok = IWL7260_UCODE_API_OK,			\
 	.ucode_api_min = IWL7260_UCODE_API_MIN
 
 #define IWL_DEVICE_7005						\
 	IWL_DEVICE_7000_COMMON,					\
 	.ucode_api_max = IWL7265_UCODE_API_MAX,			\
-	.ucode_api_ok = IWL7265_UCODE_API_OK,			\
 	.ucode_api_min = IWL7265_UCODE_API_MIN
 
 #define IWL_DEVICE_3008						\
 	IWL_DEVICE_7000_COMMON,					\
 	.ucode_api_max = IWL3168_UCODE_API_MAX,			\
-	.ucode_api_ok = IWL3168_UCODE_API_OK,			\
 	.ucode_api_min = IWL3168_UCODE_API_MIN
 
 #define IWL_DEVICE_7005D					\
 	IWL_DEVICE_7000_COMMON,					\
 	.ucode_api_max = IWL7265D_UCODE_API_MAX,		\
-	.ucode_api_ok = IWL7265D_UCODE_API_OK,			\
 	.ucode_api_min = IWL7265D_UCODE_API_MIN
 
 const struct iwl_cfg iwl7260_2ac_cfg = {
@@ -297,7 +287,7 @@
 static const struct iwl_ht_params iwl7265_ht_params = {
 	.stbc = true,
 	.ldpc = true,
-	.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+	.ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
 };
 
 const struct iwl_cfg iwl3165_2ac_cfg = {
@@ -388,8 +378,8 @@
 	.dccm_len = IWL7265_DCCM_LEN,
 };
 
-MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
-MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
-MODULE_FIRMWARE(IWL3168_MODULE_FIRMWARE(IWL3168_UCODE_API_OK));
-MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_OK));
-MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_OK));
+MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL3168_MODULE_FIRMWARE(IWL3168_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index 97be104..2d20556 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -73,12 +73,8 @@
 #define IWL8000_UCODE_API_MAX	21
 #define IWL8265_UCODE_API_MAX	21
 
-/* Oldest version we won't warn about */
-#define IWL8000_UCODE_API_OK	13
-#define IWL8265_UCODE_API_OK	20
-
 /* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN	13
+#define IWL8000_UCODE_API_MIN	16
 #define IWL8265_UCODE_API_MIN	20
 
 /* NVM versions */
@@ -93,7 +89,7 @@
 #define IWL8260_SMEM_OFFSET		0x400000
 #define IWL8260_SMEM_LEN		0x68000
 
-#define IWL8000_FW_PRE "iwlwifi-8000"
+#define IWL8000_FW_PRE "iwlwifi-8000C-"
 #define IWL8000_MODULE_FIRMWARE(api) \
 	IWL8000_FW_PRE "-" __stringify(api) ".ucode"
 
@@ -128,7 +124,7 @@
 static const struct iwl_ht_params iwl8000_ht_params = {
 	.stbc = true,
 	.ldpc = true,
-	.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+	.ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
 };
 
 static const struct iwl_tt_params iwl8000_tt_params = {
@@ -175,19 +171,16 @@
 #define IWL_DEVICE_8000							\
 	IWL_DEVICE_8000_COMMON,						\
 	.ucode_api_max = IWL8000_UCODE_API_MAX,				\
-	.ucode_api_ok = IWL8000_UCODE_API_OK,				\
 	.ucode_api_min = IWL8000_UCODE_API_MIN				\
 
 #define IWL_DEVICE_8260							\
 	IWL_DEVICE_8000_COMMON,						\
 	.ucode_api_max = IWL8000_UCODE_API_MAX,				\
-	.ucode_api_ok = IWL8000_UCODE_API_OK,				\
 	.ucode_api_min = IWL8000_UCODE_API_MIN				\
 
 #define IWL_DEVICE_8265							\
 	IWL_DEVICE_8000_COMMON,						\
 	.ucode_api_max = IWL8265_UCODE_API_MAX,				\
-	.ucode_api_ok = IWL8265_UCODE_API_OK,				\
 	.ucode_api_min = IWL8265_UCODE_API_MIN				\
 
 const struct iwl_cfg iwl8260_2n_cfg = {
@@ -259,5 +252,5 @@
 	.max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
 };
 
-MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK));
-MODULE_FIRMWARE(IWL8265_MODULE_FIRMWARE(IWL8265_UCODE_API_OK));
+MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL8265_MODULE_FIRMWARE(IWL8265_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
index 318b1dc..b9aca37 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
@@ -5,7 +5,7 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015-2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -18,7 +18,7 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015-2016 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -57,11 +57,8 @@
 /* Highest firmware API version supported */
 #define IWL9000_UCODE_API_MAX	21
 
-/* Oldest version we won't warn about */
-#define IWL9000_UCODE_API_OK	13
-
 /* Lowest firmware API version supported */
-#define IWL9000_UCODE_API_MIN	13
+#define IWL9000_UCODE_API_MIN	16
 
 /* NVM versions */
 #define IWL9000_NVM_VERSION		0x0a1d
@@ -96,7 +93,7 @@
 static const struct iwl_ht_params iwl9000_ht_params = {
 	.stbc = true,
 	.ldpc = true,
-	.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+	.ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
 };
 
 static const struct iwl_tt_params iwl9000_tt_params = {
@@ -122,7 +119,6 @@
 
 #define IWL_DEVICE_9000							\
 	.ucode_api_max = IWL9000_UCODE_API_MAX,				\
-	.ucode_api_ok = IWL9000_UCODE_API_OK,				\
 	.ucode_api_min = IWL9000_UCODE_API_MIN,				\
 	.device_family = IWL_DEVICE_FAMILY_8000,			\
 	.max_inst_size = IWL60_RTC_INST_SIZE,				\
@@ -137,14 +133,15 @@
 	.dccm2_len = IWL9000_DCCM2_LEN,					\
 	.smem_offset = IWL9000_SMEM_OFFSET,				\
 	.smem_len = IWL9000_SMEM_LEN,					\
+	.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,		\
 	.thermal_params = &iwl9000_tt_params,				\
 	.apmg_not_supported = true,					\
 	.mq_rx_supported = true,					\
 	.vht_mu_mimo_supported = true,					\
 	.mac_addr_from_csr = true
 
-const struct iwl_cfg iwl9260_2ac_cfg = {
-		.name = "Intel(R) Dual Band Wireless AC 9260",
+const struct iwl_cfg iwl9560_2ac_cfg = {
+		.name = "Intel(R) Dual Band Wireless AC 9560",
 		.fw_name_pre = IWL9000_FW_PRE,
 		IWL_DEVICE_9000,
 		.ht_params = &iwl9000_ht_params,
@@ -163,4 +160,4 @@
 		.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
 };
 
-MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_OK));
+MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 3e4d346..7206798 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -131,6 +131,8 @@
 #define IWL_MAX_WD_TIMEOUT	120000
 
 #define IWL_DEFAULT_MAX_TX_POWER 22
+#define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\
+				 NETIF_F_TSO | NETIF_F_TSO6)
 
 /* Antenna presence definitions */
 #define	ANT_NONE	0x0
@@ -183,7 +185,7 @@
  * @stbc: support Tx STBC and 1*SS Rx STBC
  * @ldpc: support Tx/Rx with LDPC
  * @use_rts_for_aggregation: use rts/cts protection for HT traffic
- * @ht40_bands: bitmap of bands (using %IEEE80211_BAND_*) that support HT40
+ * @ht40_bands: bitmap of bands (using %NL80211_BAND_*) that support HT40
  */
 struct iwl_ht_params {
 	enum ieee80211_smps_mode smps_mode;
@@ -277,8 +279,6 @@
  *	(.ucode) will be added to filename before loading from disk. The
  *	filename is constructed as fw_name_pre<api>.ucode.
  * @ucode_api_max: Highest version of uCode API supported by driver.
- * @ucode_api_ok: oldest version of the uCode API that is OK to load
- *	without a warning, for use in transitions
  * @ucode_api_min: Lowest version of uCode API supported by driver.
  * @max_inst_size: The maximal length of the fw inst section
  * @max_data_size: The maximal length of the fw data section
@@ -324,7 +324,6 @@
 	const char *name;
 	const char *fw_name_pre;
 	const unsigned int ucode_api_max;
-	const unsigned int ucode_api_ok;
 	const unsigned int ucode_api_min;
 	const enum iwl_device_family device_family;
 	const u32 max_data_size;
@@ -439,7 +438,7 @@
 extern const struct iwl_cfg iwl4165_2ac_cfg;
 extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
 extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
-extern const struct iwl_cfg iwl9260_2ac_cfg;
+extern const struct iwl_cfg iwl9560_2ac_cfg;
 extern const struct iwl_cfg iwl5165_2ac_cfg;
 #endif /* CONFIG_IWLMVM */
 
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index f899666..ff18b06 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -179,6 +179,8 @@
 		kfree(drv->fw.dbg_conf_tlv[i]);
 	for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++)
 		kfree(drv->fw.dbg_trigger_tlv[i]);
+	for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++)
+		kfree(drv->fw.dbg_mem_tlv[i]);
 
 	for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
 		iwl_free_fw_img(drv, drv->fw.img + i);
@@ -238,19 +240,6 @@
 	snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
 		 name_pre, tag);
 
-	/*
-	 * Starting 8000B - FW name format has changed. This overwrites the
-	 * previous name and uses the new format.
-	 */
-	if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
-		char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev);
-
-		if (rev_step != 'A')
-			snprintf(drv->firmware_name,
-				 sizeof(drv->firmware_name), "%s%c-%s.ucode",
-				 name_pre, rev_step, tag);
-	}
-
 	IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
 		       (drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
 				? "EXPERIMENTAL " : "",
@@ -297,6 +286,7 @@
 	size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
 	struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
 	size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
+	struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX];
 };
 
 /*
@@ -1041,6 +1031,37 @@
 			iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len);
 			gscan_capa = true;
 			break;
+		case IWL_UCODE_TLV_FW_MEM_SEG: {
+			struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
+				(void *)tlv_data;
+			u32 type;
+
+			if (tlv_len != (sizeof(*dbg_mem)))
+				goto invalid_tlv_len;
+
+			type = le32_to_cpu(dbg_mem->data_type);
+			drv->fw.dbg_dynamic_mem = true;
+
+			if (type >= ARRAY_SIZE(drv->fw.dbg_mem_tlv)) {
+				IWL_ERR(drv,
+					"Skip unknown dbg mem segment: %u\n",
+					dbg_mem->data_type);
+				break;
+			}
+
+			if (pieces->dbg_mem_tlv[type]) {
+				IWL_ERR(drv,
+					"Ignore duplicate mem segment: %u\n",
+					dbg_mem->data_type);
+				break;
+			}
+
+			IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n",
+				       dbg_mem->data_type);
+
+			pieces->dbg_mem_tlv[type] = dbg_mem;
+			break;
+			}
 		default:
 			IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
 			break;
@@ -1060,11 +1081,18 @@
 		return -EINVAL;
 	}
 
-	if (WARN(fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
-		 !gscan_capa,
-		 "GSCAN is supported but capabilities TLV is unavailable\n"))
+	/*
+	 * If ucode advertises that it supports GSCAN but GSCAN
+	 * capabilities TLV is not present, or if it has an old format,
+	 * warn and continue without GSCAN.
+	 */
+	if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
+	    !gscan_capa) {
+		IWL_DEBUG_INFO(drv,
+			       "GSCAN is supported but capabilities TLV is unavailable\n");
 		__clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
 			    capa->_capa);
+	}
 
 	return 0;
 
@@ -1199,7 +1227,6 @@
 	int err;
 	struct iwl_firmware_pieces *pieces;
 	const unsigned int api_max = drv->cfg->ucode_api_max;
-	unsigned int api_ok = drv->cfg->ucode_api_ok;
 	const unsigned int api_min = drv->cfg->ucode_api_min;
 	size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX];
 	u32 api_ver;
@@ -1212,20 +1239,12 @@
 			IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
 	fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
 
-	if (!api_ok)
-		api_ok = api_max;
-
 	pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
 	if (!pieces)
 		return;
 
-	if (!ucode_raw) {
-		if (drv->fw_index <= api_ok)
-			IWL_ERR(drv,
-				"request for firmware file '%s' failed.\n",
-				drv->firmware_name);
+	if (!ucode_raw)
 		goto try_again;
-	}
 
 	IWL_DEBUG_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n",
 		       drv->firmware_name, ucode_raw->size);
@@ -1267,19 +1286,6 @@
 				api_max, api_ver);
 			goto try_again;
 		}
-
-		if (api_ver < api_ok) {
-			if (api_ok != api_max)
-				IWL_ERR(drv, "Firmware has old API version, "
-					"expected v%u through v%u, got v%u.\n",
-					api_ok, api_max, api_ver);
-			else
-				IWL_ERR(drv, "Firmware has old API version, "
-					"expected v%u, got v%u.\n",
-					api_max, api_ver);
-			IWL_ERR(drv, "New firmware can be obtained from "
-				      "http://www.intellinuxwireless.org/.\n");
-		}
 	}
 
 	/*
@@ -1368,6 +1374,17 @@
 		}
 	}
 
+	for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++) {
+		if (pieces->dbg_mem_tlv[i]) {
+			drv->fw.dbg_mem_tlv[i] =
+				kmemdup(pieces->dbg_mem_tlv[i],
+					sizeof(*drv->fw.dbg_mem_tlv[i]),
+					GFP_KERNEL);
+			if (!drv->fw.dbg_mem_tlv[i])
+				goto out_free_fw;
+		}
+	}
+
 	/* Now that we can no longer fail, copy information */
 
 	/*
@@ -1560,9 +1577,7 @@
 	.power_level = IWL_POWER_INDEX_1,
 	.d0i3_disable = true,
 	.d0i3_entry_delay = 1000,
-#ifndef CONFIG_IWLWIFI_UAPSD
-	.uapsd_disable = true,
-#endif /* CONFIG_IWLWIFI_UAPSD */
+	.uapsd_disable = IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT,
 	/* the rest are 0 by default */
 };
 IWL_EXPORT_SYMBOL(iwlwifi_mod_params);
@@ -1681,12 +1696,9 @@
 MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)");
 
 module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable,
-		   bool, S_IRUGO | S_IWUSR);
-#ifdef CONFIG_IWLWIFI_UAPSD
-MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: N)");
-#else
-MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: Y)");
-#endif
+		   uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(uapsd_disable,
+		 "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
 
 /*
  * set bt_coex_active to true, uCode will do kill/defer
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
index c15f5be..bf1b69a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
@@ -390,10 +390,10 @@
 				int n_channels, s8 max_txpower_avg)
 {
 	int ch_idx;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 
 	band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
-		IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
+		NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
 
 	for (ch_idx = 0; ch_idx < n_channels; ch_idx++) {
 		struct ieee80211_channel *chan = &data->channels[ch_idx];
@@ -526,7 +526,7 @@
 
 static void iwl_mod_ht40_chan_info(struct device *dev,
 				   struct iwl_nvm_data *data, int n_channels,
-				   enum ieee80211_band band, u16 channel,
+				   enum nl80211_band band, u16 channel,
 				   const struct iwl_eeprom_channel *eeprom_ch,
 				   u8 clear_ht40_extension_channel)
 {
@@ -548,7 +548,7 @@
 	IWL_DEBUG_EEPROM(dev,
 			 "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
 			 channel,
-			 band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4",
+			 band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
 			 CHECK_AND_PRINT(IBSS),
 			 CHECK_AND_PRINT(ACTIVE),
 			 CHECK_AND_PRINT(RADAR),
@@ -606,8 +606,8 @@
 			n_channels++;
 
 			channel->hw_value = eeprom_ch_array[ch_idx];
-			channel->band = (band == 1) ? IEEE80211_BAND_2GHZ
-						    : IEEE80211_BAND_5GHZ;
+			channel->band = (band == 1) ? NL80211_BAND_2GHZ
+						    : NL80211_BAND_5GHZ;
 			channel->center_freq =
 				ieee80211_channel_to_frequency(
 					channel->hw_value, channel->band);
@@ -677,15 +677,15 @@
 
 	/* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
 	for (band = 6; band <= 7; band++) {
-		enum ieee80211_band ieeeband;
+		enum nl80211_band ieeeband;
 
 		iwl_init_band_reference(cfg, eeprom, eeprom_size, band,
 					&eeprom_ch_count, &eeprom_ch_info,
 					&eeprom_ch_array);
 
 		/* EEPROM band 6 is 2.4, band 7 is 5 GHz */
-		ieeeband = (band == 6) ? IEEE80211_BAND_2GHZ
-				       : IEEE80211_BAND_5GHZ;
+		ieeeband = (band == 6) ? NL80211_BAND_2GHZ
+				       : NL80211_BAND_5GHZ;
 
 		/* Loop through each band adding each of the channels */
 		for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
@@ -708,7 +708,7 @@
 
 int iwl_init_sband_channels(struct iwl_nvm_data *data,
 			    struct ieee80211_supported_band *sband,
-			    int n_channels, enum ieee80211_band band)
+			    int n_channels, enum nl80211_band band)
 {
 	struct ieee80211_channel *chan = &data->channels[0];
 	int n = 0, idx = 0;
@@ -734,7 +734,7 @@
 void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
 			  struct iwl_nvm_data *data,
 			  struct ieee80211_sta_ht_cap *ht_info,
-			  enum ieee80211_band band,
+			  enum nl80211_band band,
 			  u8 tx_chains, u8 rx_chains)
 {
 	int max_bit_rate = 0;
@@ -813,22 +813,22 @@
 	int n_used = 0;
 	struct ieee80211_supported_band *sband;
 
-	sband = &data->bands[IEEE80211_BAND_2GHZ];
-	sband->band = IEEE80211_BAND_2GHZ;
+	sband = &data->bands[NL80211_BAND_2GHZ];
+	sband->band = NL80211_BAND_2GHZ;
 	sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
 	sband->n_bitrates = N_RATES_24;
 	n_used += iwl_init_sband_channels(data, sband, n_channels,
-					  IEEE80211_BAND_2GHZ);
-	iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ,
+					  NL80211_BAND_2GHZ);
+	iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ,
 			     data->valid_tx_ant, data->valid_rx_ant);
 
-	sband = &data->bands[IEEE80211_BAND_5GHZ];
-	sband->band = IEEE80211_BAND_5GHZ;
+	sband = &data->bands[NL80211_BAND_5GHZ];
+	sband->band = NL80211_BAND_5GHZ;
 	sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
 	sband->n_bitrates = N_RATES_52;
 	n_used += iwl_init_sband_channels(data, sband, n_channels,
-					  IEEE80211_BAND_5GHZ);
-	iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
+					  NL80211_BAND_5GHZ);
+	iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ,
 			     data->valid_tx_ant, data->valid_rx_ant);
 
 	if (n_channels != n_used)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
index ad2b834..53f39a3 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
@@ -98,7 +98,7 @@
 	s8 max_tx_pwr_half_dbm;
 
 	bool lar_enabled;
-	struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+	struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
 	struct ieee80211_channel channels[];
 };
 
@@ -133,12 +133,12 @@
 
 int iwl_init_sband_channels(struct iwl_nvm_data *data,
 			    struct ieee80211_supported_band *sband,
-			    int n_channels, enum ieee80211_band band);
+			    int n_channels, enum nl80211_band band);
 
 void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
 			  struct iwl_nvm_data *data,
 			  struct ieee80211_sta_ht_cap *ht_info,
-			  enum ieee80211_band band,
+			  enum nl80211_band band,
 			  u8 tx_chains, u8 rx_chains);
 
 #endif /* __iwl_eeprom_parse_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
index 8425e1a..09b7ea2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
@@ -105,6 +105,7 @@
 	IWL_FW_ERROR_DUMP_RB = 11,
 	IWL_FW_ERROR_DUMP_PAGING = 12,
 	IWL_FW_ERROR_DUMP_RADIO_REG = 13,
+	IWL_FW_ERROR_DUMP_INTERNAL_TXF = 14,
 
 	IWL_FW_ERROR_DUMP_MAX,
 };
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
index 15ec4e2..37dc09e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
@@ -142,6 +142,7 @@
 	IWL_UCODE_TLV_FW_DBG_CONF	= 39,
 	IWL_UCODE_TLV_FW_DBG_TRIGGER	= 40,
 	IWL_UCODE_TLV_FW_GSCAN_CAPA	= 50,
+	IWL_UCODE_TLV_FW_MEM_SEG	= 51,
 };
 
 struct iwl_ucode_tlv {
@@ -245,7 +246,6 @@
 
 /**
  * enum iwl_ucode_tlv_api - ucode api
- * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
  * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
  *	longer than the passive one, which is essential for fragmented scan.
  * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
@@ -260,12 +260,11 @@
  * @NUM_IWL_UCODE_TLV_API: number of bits used
  */
 enum iwl_ucode_tlv_api {
-	IWL_UCODE_TLV_API_BT_COEX_SPLIT         = (__force iwl_ucode_tlv_api_t)3,
 	IWL_UCODE_TLV_API_FRAGMENTED_SCAN	= (__force iwl_ucode_tlv_api_t)8,
 	IWL_UCODE_TLV_API_WIFI_MCC_UPDATE	= (__force iwl_ucode_tlv_api_t)9,
 	IWL_UCODE_TLV_API_WIDE_CMD_HDR		= (__force iwl_ucode_tlv_api_t)14,
 	IWL_UCODE_TLV_API_LQ_SS_PARAMS		= (__force iwl_ucode_tlv_api_t)18,
-	IWL_UCODE_TLV_API_NEW_VERSION		= (__force iwl_ucode_tlv_api_t)20,
+	IWL_UCODE_TLV_API_NEW_VERSION           = (__force iwl_ucode_tlv_api_t)20,
 	IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY	= (__force iwl_ucode_tlv_api_t)24,
 	IWL_UCODE_TLV_API_TX_POWER_CHAIN	= (__force iwl_ucode_tlv_api_t)27,
 
@@ -324,6 +323,9 @@
  * @IWL_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command
  * @IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED: supports usniffer enabled in
  *	regular image.
+ * @IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG: support getting more shared
+ *	memory addresses from the firmware.
+ * @IWL_UCODE_TLV_CAPA_LQM_SUPPORT: supports Link Quality Measurement
  *
  * @NUM_IWL_UCODE_TLV_CAPA: number of bits used
  */
@@ -361,6 +363,8 @@
 	IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT	= (__force iwl_ucode_tlv_capa_t)75,
 	IWL_UCODE_TLV_CAPA_CTDP_SUPPORT			= (__force iwl_ucode_tlv_capa_t)76,
 	IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED		= (__force iwl_ucode_tlv_capa_t)77,
+	IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG	= (__force iwl_ucode_tlv_capa_t)80,
+	IWL_UCODE_TLV_CAPA_LQM_SUPPORT			= (__force iwl_ucode_tlv_capa_t)81,
 
 	NUM_IWL_UCODE_TLV_CAPA
 #ifdef __CHECKER__
@@ -491,6 +495,37 @@
 };
 
 /**
+ * enum iwl_fw_mem_seg_type - data types for dumping on error
+ *
+ * @FW_DBG_MEM_SMEM: the data type is SMEM
+ * @FW_DBG_MEM_DCCM_LMAC: the data type is DCCM_LMAC
+ * @FW_DBG_MEM_DCCM_UMAC: the data type is DCCM_UMAC
+ */
+enum iwl_fw_dbg_mem_seg_type {
+	FW_DBG_MEM_DCCM_LMAC = 0,
+	FW_DBG_MEM_DCCM_UMAC,
+	FW_DBG_MEM_SMEM,
+
+	/* Must be last */
+	FW_DBG_MEM_MAX,
+};
+
+/**
+ * struct iwl_fw_dbg_mem_seg_tlv - configures the debug data memory segments
+ *
+ * @data_type: enum %iwl_fw_mem_seg_type
+ * @ofs: the memory segment offset
+ * @len: the memory segment length, in bytes
+ *
+ * This parses IWL_UCODE_TLV_FW_MEM_SEG
+ */
+struct iwl_fw_dbg_mem_seg_tlv {
+	__le32 data_type;
+	__le32 ofs;
+	__le32 len;
+} __packed;
+
+/**
  * struct iwl_fw_dbg_dest_tlv - configures the destination of the debug data
  *
  * @version: version of the TLV - currently 0
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
index 2942571..e461d63 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
@@ -286,6 +286,8 @@
 	struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
 	size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
 	struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
+	struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX];
+	bool dbg_dynamic_mem;
 	size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
 	u8 dbg_dest_reg_num;
 	struct iwl_gscan_capabilities gscan_capa;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index d1a5dd1..6c5c2f9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -92,6 +92,11 @@
 	IWL_AMSDU_12K = 2,
 };
 
+enum iwl_uapsd_disable {
+	IWL_DISABLE_UAPSD_BSS		= BIT(0),
+	IWL_DISABLE_UAPSD_P2P_CLIENT	= BIT(1),
+};
+
 /**
  * struct iwl_mod_params
  *
@@ -109,7 +114,8 @@
  * @debug_level: levels are IWL_DL_*
  * @ant_coupling: antenna coupling in dB, default = 0
  * @nvm_file: specifies a external NVM file
- * @uapsd_disable: disable U-APSD, default = 1
+ * @uapsd_disable: disable U-APSD, see %enum iwl_uapsd_disable, default =
+ *	IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT
  * @d0i3_disable: disable d0i3, default = 1,
  * @d0i3_entry_delay: time to wait after no refs are taken before
  *	entering D0i3 (in msecs)
@@ -131,7 +137,7 @@
 #endif
 	int ant_coupling;
 	char *nvm_file;
-	bool uapsd_disable;
+	u32 uapsd_disable;
 	bool d0i3_disable;
 	unsigned int d0i3_entry_delay;
 	bool lar_disable;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 93a6895..14743c3 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -308,7 +308,7 @@
 
 		channel->hw_value = nvm_chan[ch_idx];
 		channel->band = (ch_idx < num_2ghz_channels) ?
-				IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+				NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
 		channel->center_freq =
 			ieee80211_channel_to_frequency(
 				channel->hw_value, channel->band);
@@ -320,7 +320,7 @@
 		 * is not used in mvm, and is used for backwards compatibility
 		 */
 		channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
-		is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
+		is_5ghz = channel->band == NL80211_BAND_5GHZ;
 
 		/* don't put limitations in case we're using LAR */
 		if (!lar_supported)
@@ -439,22 +439,22 @@
 				&ch_section[NVM_CHANNELS_FAMILY_8000],
 				lar_supported);
 
-	sband = &data->bands[IEEE80211_BAND_2GHZ];
-	sband->band = IEEE80211_BAND_2GHZ;
+	sband = &data->bands[NL80211_BAND_2GHZ];
+	sband->band = NL80211_BAND_2GHZ;
 	sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
 	sband->n_bitrates = N_RATES_24;
 	n_used += iwl_init_sband_channels(data, sband, n_channels,
-					  IEEE80211_BAND_2GHZ);
-	iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ,
+					  NL80211_BAND_2GHZ);
+	iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ,
 			     tx_chains, rx_chains);
 
-	sband = &data->bands[IEEE80211_BAND_5GHZ];
-	sband->band = IEEE80211_BAND_5GHZ;
+	sband = &data->bands[NL80211_BAND_5GHZ];
+	sband->band = NL80211_BAND_5GHZ;
 	sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
 	sband->n_bitrates = N_RATES_52;
 	n_used += iwl_init_sband_channels(data, sband, n_channels,
-					  IEEE80211_BAND_5GHZ);
-	iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
+					  NL80211_BAND_5GHZ);
+	iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ,
 			     tx_chains, rx_chains);
 	if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac)
 		iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
@@ -781,7 +781,7 @@
 	struct ieee80211_regdomain *regd;
 	int size_of_regd;
 	struct ieee80211_reg_rule *rule;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	int center_freq, prev_center_freq = 0;
 	int valid_rules = 0;
 	bool new_rule;
@@ -809,7 +809,7 @@
 	for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
 		ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
 		band = (ch_idx < NUM_2GHZ_CHANNELS) ?
-		       IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+		       NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
 		center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx],
 							     band);
 		new_rule = false;
@@ -857,7 +857,7 @@
 		IWL_DEBUG_DEV(dev, IWL_DL_LAR,
 			      "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x): Ad-Hoc %ssupported\n",
 			      center_freq,
-			      band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4",
+			      band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
 			      CHECK_AND_PRINT_I(VALID),
 			      CHECK_AND_PRINT_I(ACTIVE),
 			      CHECK_AND_PRINT_I(RADAR),
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index c46e596..6c1d20d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -345,6 +347,16 @@
 #define TXF_READ_MODIFY_DATA		(0xa00448)
 #define TXF_READ_MODIFY_ADDR		(0xa0044c)
 
+/* UMAC Internal Tx Fifo */
+#define TXF_CPU2_FIFO_ITEM_CNT		(0xA00538)
+#define TXF_CPU2_WR_PTR		(0xA00514)
+#define TXF_CPU2_RD_PTR		(0xA00510)
+#define TXF_CPU2_FENCE_PTR		(0xA00518)
+#define TXF_CPU2_LOCK_FENCE		(0xA00524)
+#define TXF_CPU2_NUM			(0xA0053C)
+#define TXF_CPU2_READ_MODIFY_DATA	(0xA00548)
+#define TXF_CPU2_READ_MODIFY_ADDR	(0xA0054C)
+
 /* Radio registers access */
 #define RSP_RADIO_CMD			(0xa02804)
 #define RSP_RADIO_RDDAT			(0xa02814)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 91d74b3..fa4ab4b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -519,7 +521,7 @@
 
 struct iwl_trans_txq_scd_cfg {
 	u8 fifo;
-	s8 sta_id;
+	u8 sta_id;
 	u8 tid;
 	bool aggregate;
 	int frame_limit;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
index 23e7e29..2e06dfc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
@@ -2,7 +2,7 @@
 iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
 iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o
 iwlmvm-y += scan.o time-event.o rs.o
-iwlmvm-y += power.o coex.o coex_legacy.o
+iwlmvm-y += power.o coex.o
 iwlmvm-y += tt.o offloading.o tdls.o
 iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
 iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 2e098f8..a63f5bb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -378,7 +378,7 @@
 	chanctx_conf = rcu_dereference(vif->chanctx_conf);
 
 	if (!chanctx_conf ||
-	     chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
+	     chanctx_conf->def.chan->band != NL80211_BAND_2GHZ) {
 		rcu_read_unlock();
 		return BT_COEX_INVALID_LUT;
 	}
@@ -411,9 +411,6 @@
 	struct iwl_bt_coex_cmd bt_cmd = {};
 	u32 mode;
 
-	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-		return iwl_send_bt_init_conf_old(mvm);
-
 	lockdep_assert_held(&mvm->mutex);
 
 	if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
@@ -540,7 +537,7 @@
 
 	/* If channel context is invalid or not on 2.4GHz .. */
 	if ((!chanctx_conf ||
-	     chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
+	     chanctx_conf->def.chan->band != NL80211_BAND_2GHZ)) {
 		if (vif->type == NL80211_IFTYPE_STATION) {
 			/* ... relax constraints and disable rssi events */
 			iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
@@ -728,12 +725,6 @@
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
 
-	if (!fw_has_api(&mvm->fw->ucode_capa,
-			IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
-		iwl_mvm_rx_bt_coex_notif_old(mvm, rxb);
-		return;
-	}
-
 	IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
 	IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
 	IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
@@ -755,12 +746,6 @@
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	int ret;
 
-	if (!fw_has_api(&mvm->fw->ucode_capa,
-			IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
-		iwl_mvm_bt_rssi_event_old(mvm, vif, rssi_event);
-		return;
-	}
-
 	lockdep_assert_held(&mvm->mutex);
 
 	/* Ignore updates if we are in force mode */
@@ -807,9 +792,6 @@
 	struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
 	enum iwl_bt_coex_lut_type lut_type;
 
-	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-		return iwl_mvm_coex_agg_time_limit_old(mvm, sta);
-
 	if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
 		return LINK_QUAL_AGG_TIME_LIMIT_DEF;
 
@@ -834,9 +816,6 @@
 	struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
 	enum iwl_bt_coex_lut_type lut_type;
 
-	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-		return iwl_mvm_bt_coex_is_mimo_allowed_old(mvm, sta);
-
 	if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
 		return true;
 
@@ -864,9 +843,6 @@
 	if (ant & mvm->cfg->non_shared_ant)
 		return true;
 
-	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-		return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
-
 	return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
 		BT_HIGH_TRAFFIC;
 }
@@ -877,21 +853,15 @@
 	if (mvm->cfg->bt_shared_single_ant)
 		return true;
 
-	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-		return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
-
 	return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
 }
 
 bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
-				    enum ieee80211_band band)
+				    enum nl80211_band band)
 {
 	u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
 
-	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-		return iwl_mvm_bt_coex_is_tpc_allowed_old(mvm, band);
-
-	if (band != IEEE80211_BAND_2GHZ)
+	if (band != NL80211_BAND_2GHZ)
 		return false;
 
 	return bt_activity >= BT_LOW_TRAFFIC;
@@ -903,7 +873,7 @@
 	__le16 fc = hdr->frame_control;
 	bool mplut_enabled = iwl_mvm_is_mplut_supported(mvm);
 
-	if (info->band != IEEE80211_BAND_2GHZ)
+	if (info->band != NL80211_BAND_2GHZ)
 		return 0;
 
 	if (unlikely(mvm->bt_tx_prio))
@@ -937,12 +907,6 @@
 
 void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
 {
-	if (!fw_has_api(&mvm->fw->ucode_capa,
-			IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
-		iwl_mvm_bt_coex_vif_change_old(mvm);
-		return;
-	}
-
 	iwl_mvm_bt_coex_notif_handle(mvm);
 }
 
@@ -955,12 +919,6 @@
 	u8 __maybe_unused lower_bound, upper_bound;
 	u8 lut;
 
-	if (!fw_has_api(&mvm->fw->ucode_capa,
-			IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
-		iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb);
-		return;
-	}
-
 	if (!iwl_mvm_bt_is_plcr_supported(mvm))
 		return;
 
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c
deleted file mode 100644
index 0150457..0000000
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c
+++ /dev/null
@@ -1,1315 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- *  Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#include <linux/ieee80211.h>
-#include <linux/etherdevice.h>
-#include <net/mac80211.h>
-
-#include "fw-api-coex.h"
-#include "iwl-modparams.h"
-#include "mvm.h"
-#include "iwl-debug.h"
-
-#define EVENT_PRIO_ANT(_evt, _prio, _shrd_ant)			\
-	[(_evt)] = (((_prio) << BT_COEX_PRIO_TBL_PRIO_POS) |	\
-		   ((_shrd_ant) << BT_COEX_PRIO_TBL_SHRD_ANT_POS))
-
-static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
-	EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB1,
-		       BT_COEX_PRIO_TBL_PRIO_BYPASS, 0),
-	EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB2,
-		       BT_COEX_PRIO_TBL_PRIO_BYPASS, 1),
-	EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1,
-		       BT_COEX_PRIO_TBL_PRIO_LOW, 0),
-	EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2,
-		       BT_COEX_PRIO_TBL_PRIO_LOW, 1),
-	EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1,
-		       BT_COEX_PRIO_TBL_PRIO_HIGH, 0),
-	EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2,
-		       BT_COEX_PRIO_TBL_PRIO_HIGH, 1),
-	EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_DTIM,
-		       BT_COEX_PRIO_TBL_DISABLED, 0),
-	EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN52,
-		       BT_COEX_PRIO_TBL_PRIO_COEX_OFF, 0),
-	EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN24,
-		       BT_COEX_PRIO_TBL_PRIO_COEX_ON, 0),
-	EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_IDLE,
-		       BT_COEX_PRIO_TBL_PRIO_COEX_IDLE, 0),
-	0, 0, 0, 0, 0, 0,
-};
-
-#undef EVENT_PRIO_ANT
-
-static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
-{
-	if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
-		return 0;
-
-	return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, 0,
-				    sizeof(struct iwl_bt_coex_prio_tbl_cmd),
-				    &iwl_bt_prio_tbl);
-}
-
-static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
-	cpu_to_le32(0xf0f0f0f0), /* 50% */
-	cpu_to_le32(0xc0c0c0c0), /* 25% */
-	cpu_to_le32(0xfcfcfcfc), /* 75% */
-	cpu_to_le32(0xfefefefe), /* 87.5% */
-};
-
-static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
-	{
-		cpu_to_le32(0x40000000),
-		cpu_to_le32(0x00000000),
-		cpu_to_le32(0x44000000),
-		cpu_to_le32(0x00000000),
-		cpu_to_le32(0x40000000),
-		cpu_to_le32(0x00000000),
-		cpu_to_le32(0x44000000),
-		cpu_to_le32(0x00000000),
-		cpu_to_le32(0xc0004000),
-		cpu_to_le32(0xf0005000),
-		cpu_to_le32(0xc0004000),
-		cpu_to_le32(0xf0005000),
-	},
-	{
-		cpu_to_le32(0x40000000),
-		cpu_to_le32(0x00000000),
-		cpu_to_le32(0x44000000),
-		cpu_to_le32(0x00000000),
-		cpu_to_le32(0x40000000),
-		cpu_to_le32(0x00000000),
-		cpu_to_le32(0x44000000),
-		cpu_to_le32(0x00000000),
-		cpu_to_le32(0xc0004000),
-		cpu_to_le32(0xf0005000),
-		cpu_to_le32(0xc0004000),
-		cpu_to_le32(0xf0005000),
-	},
-	{
-		cpu_to_le32(0x40000000),
-		cpu_to_le32(0x00000000),
-		cpu_to_le32(0x44000000),
-		cpu_to_le32(0x00000000),
-		cpu_to_le32(0x40000000),
-		cpu_to_le32(0x00000000),
-		cpu_to_le32(0x44000000),
-		cpu_to_le32(0x00000000),
-		cpu_to_le32(0xc0004000),
-		cpu_to_le32(0xf0005000),
-		cpu_to_le32(0xc0004000),
-		cpu_to_le32(0xf0005000),
-	},
-};
-
-static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
-	{
-		/* Tight */
-		cpu_to_le32(0xaaaaaaaa),
-		cpu_to_le32(0xaaaaaaaa),
-		cpu_to_le32(0xaeaaaaaa),
-		cpu_to_le32(0xaaaaaaaa),
-		cpu_to_le32(0xcc00ff28),
-		cpu_to_le32(0x0000aaaa),
-		cpu_to_le32(0xcc00aaaa),
-		cpu_to_le32(0x0000aaaa),
-		cpu_to_le32(0xc0004000),
-		cpu_to_le32(0x00004000),
-		cpu_to_le32(0xf0005000),
-		cpu_to_le32(0xf0005000),
-	},
-	{
-		/* Loose */
-		cpu_to_le32(0xaaaaaaaa),
-		cpu_to_le32(0xaaaaaaaa),
-		cpu_to_le32(0xaaaaaaaa),
-		cpu_to_le32(0xaaaaaaaa),
-		cpu_to_le32(0xcc00ff28),
-		cpu_to_le32(0x0000aaaa),
-		cpu_to_le32(0xcc00aaaa),
-		cpu_to_le32(0x0000aaaa),
-		cpu_to_le32(0x00000000),
-		cpu_to_le32(0x00000000),
-		cpu_to_le32(0xf0005000),
-		cpu_to_le32(0xf0005000),
-	},
-	{
-		/* Tx Tx disabled */
-		cpu_to_le32(0xaaaaaaaa),
-		cpu_to_le32(0xaaaaaaaa),
-		cpu_to_le32(0xeeaaaaaa),
-		cpu_to_le32(0xaaaaaaaa),
-		cpu_to_le32(0xcc00ff28),
-		cpu_to_le32(0x0000aaaa),
-		cpu_to_le32(0xcc00aaaa),
-		cpu_to_le32(0x0000aaaa),
-		cpu_to_le32(0xc0004000),
-		cpu_to_le32(0xc0004000),
-		cpu_to_le32(0xf0005000),
-		cpu_to_le32(0xf0005000),
-	},
-};
-
-/* 20MHz / 40MHz below / 40Mhz above*/
-static const __le64 iwl_ci_mask[][3] = {
-	/* dummy entry for channel 0 */
-	{cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)},
-	{
-		cpu_to_le64(0x0000001FFFULL),
-		cpu_to_le64(0x0ULL),
-		cpu_to_le64(0x00007FFFFFULL),
-	},
-	{
-		cpu_to_le64(0x000000FFFFULL),
-		cpu_to_le64(0x0ULL),
-		cpu_to_le64(0x0003FFFFFFULL),
-	},
-	{
-		cpu_to_le64(0x000003FFFCULL),
-		cpu_to_le64(0x0ULL),
-		cpu_to_le64(0x000FFFFFFCULL),
-	},
-	{
-		cpu_to_le64(0x00001FFFE0ULL),
-		cpu_to_le64(0x0ULL),
-		cpu_to_le64(0x007FFFFFE0ULL),
-	},
-	{
-		cpu_to_le64(0x00007FFF80ULL),
-		cpu_to_le64(0x00007FFFFFULL),
-		cpu_to_le64(0x01FFFFFF80ULL),
-	},
-	{
-		cpu_to_le64(0x0003FFFC00ULL),
-		cpu_to_le64(0x0003FFFFFFULL),
-		cpu_to_le64(0x0FFFFFFC00ULL),
-	},
-	{
-		cpu_to_le64(0x000FFFF000ULL),
-		cpu_to_le64(0x000FFFFFFCULL),
-		cpu_to_le64(0x3FFFFFF000ULL),
-	},
-	{
-		cpu_to_le64(0x007FFF8000ULL),
-		cpu_to_le64(0x007FFFFFE0ULL),
-		cpu_to_le64(0xFFFFFF8000ULL),
-	},
-	{
-		cpu_to_le64(0x01FFFE0000ULL),
-		cpu_to_le64(0x01FFFFFF80ULL),
-		cpu_to_le64(0xFFFFFE0000ULL),
-	},
-	{
-		cpu_to_le64(0x0FFFF00000ULL),
-		cpu_to_le64(0x0FFFFFFC00ULL),
-		cpu_to_le64(0x0ULL),
-	},
-	{
-		cpu_to_le64(0x3FFFC00000ULL),
-		cpu_to_le64(0x3FFFFFF000ULL),
-		cpu_to_le64(0x0)
-	},
-	{
-		cpu_to_le64(0xFFFE000000ULL),
-		cpu_to_le64(0xFFFFFF8000ULL),
-		cpu_to_le64(0x0)
-	},
-	{
-		cpu_to_le64(0xFFF8000000ULL),
-		cpu_to_le64(0xFFFFFE0000ULL),
-		cpu_to_le64(0x0)
-	},
-	{
-		cpu_to_le64(0xFFC0000000ULL),
-		cpu_to_le64(0x0ULL),
-		cpu_to_le64(0x0ULL)
-	},
-};
-
-enum iwl_bt_kill_msk {
-	BT_KILL_MSK_DEFAULT,
-	BT_KILL_MSK_NEVER,
-	BT_KILL_MSK_ALWAYS,
-	BT_KILL_MSK_MAX,
-};
-
-static const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = {
-	[BT_KILL_MSK_DEFAULT] = 0xfffffc00,
-	[BT_KILL_MSK_NEVER] = 0xffffffff,
-	[BT_KILL_MSK_ALWAYS] = 0,
-};
-
-static const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
-	{
-		BT_KILL_MSK_ALWAYS,
-		BT_KILL_MSK_ALWAYS,
-		BT_KILL_MSK_ALWAYS,
-	},
-	{
-		BT_KILL_MSK_NEVER,
-		BT_KILL_MSK_NEVER,
-		BT_KILL_MSK_NEVER,
-	},
-	{
-		BT_KILL_MSK_NEVER,
-		BT_KILL_MSK_NEVER,
-		BT_KILL_MSK_NEVER,
-	},
-	{
-		BT_KILL_MSK_DEFAULT,
-		BT_KILL_MSK_NEVER,
-		BT_KILL_MSK_DEFAULT,
-	},
-};
-
-static const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
-	{
-		BT_KILL_MSK_ALWAYS,
-		BT_KILL_MSK_ALWAYS,
-		BT_KILL_MSK_ALWAYS,
-	},
-	{
-		BT_KILL_MSK_ALWAYS,
-		BT_KILL_MSK_ALWAYS,
-		BT_KILL_MSK_ALWAYS,
-	},
-	{
-		BT_KILL_MSK_ALWAYS,
-		BT_KILL_MSK_ALWAYS,
-		BT_KILL_MSK_ALWAYS,
-	},
-	{
-		BT_KILL_MSK_DEFAULT,
-		BT_KILL_MSK_ALWAYS,
-		BT_KILL_MSK_DEFAULT,
-	},
-};
-
-struct corunning_block_luts {
-	u8 range;
-	__le32 lut20[BT_COEX_CORUN_LUT_SIZE];
-};
-
-/*
- * Ranges for the antenna coupling calibration / co-running block LUT:
- *		LUT0: [ 0, 12[
- *		LUT1: [12, 20[
- *		LUT2: [20, 21[
- *		LUT3: [21, 23[
- *		LUT4: [23, 27[
- *		LUT5: [27, 30[
- *		LUT6: [30, 32[
- *		LUT7: [32, 33[
- *		LUT8: [33, - [
- */
-static const struct corunning_block_luts antenna_coupling_ranges[] = {
-	{
-		.range = 0,
-		.lut20 = {
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-		},
-	},
-	{
-		.range = 12,
-		.lut20 = {
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-		},
-	},
-	{
-		.range = 20,
-		.lut20 = {
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-		},
-	},
-	{
-		.range = 21,
-		.lut20 = {
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-		},
-	},
-	{
-		.range = 23,
-		.lut20 = {
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-		},
-	},
-	{
-		.range = 27,
-		.lut20 = {
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-		},
-	},
-	{
-		.range = 30,
-		.lut20 = {
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-		},
-	},
-	{
-		.range = 32,
-		.lut20 = {
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-		},
-	},
-	{
-		.range = 33,
-		.lut20 = {
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-			cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
-		},
-	},
-};
-
-static enum iwl_bt_coex_lut_type
-iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
-{
-	struct ieee80211_chanctx_conf *chanctx_conf;
-	enum iwl_bt_coex_lut_type ret;
-	u16 phy_ctx_id;
-
-	/*
-	 * Checking that we hold mvm->mutex is a good idea, but the rate
-	 * control can't acquire the mutex since it runs in Tx path.
-	 * So this is racy in that case, but in the worst case, the AMPDU
-	 * size limit will be wrong for a short time which is not a big
-	 * issue.
-	 */
-
-	rcu_read_lock();
-
-	chanctx_conf = rcu_dereference(vif->chanctx_conf);
-
-	if (!chanctx_conf ||
-	    chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
-		rcu_read_unlock();
-		return BT_COEX_INVALID_LUT;
-	}
-
-	ret = BT_COEX_TX_DIS_LUT;
-
-	if (mvm->cfg->bt_shared_single_ant) {
-		rcu_read_unlock();
-		return ret;
-	}
-
-	phy_ctx_id = *((u16 *)chanctx_conf->drv_priv);
-
-	if (mvm->last_bt_ci_cmd_old.primary_ch_phy_id == phy_ctx_id)
-		ret = le32_to_cpu(mvm->last_bt_notif_old.primary_ch_lut);
-	else if (mvm->last_bt_ci_cmd_old.secondary_ch_phy_id == phy_ctx_id)
-		ret = le32_to_cpu(mvm->last_bt_notif_old.secondary_ch_lut);
-	/* else - default = TX TX disallowed */
-
-	rcu_read_unlock();
-
-	return ret;
-}
-
-int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
-{
-	struct iwl_bt_coex_cmd_old *bt_cmd;
-	struct iwl_host_cmd cmd = {
-		.id = BT_CONFIG,
-		.len = { sizeof(*bt_cmd), },
-		.dataflags = { IWL_HCMD_DFL_NOCOPY, },
-	};
-	int ret;
-	u32 flags;
-
-	ret = iwl_send_bt_prio_tbl(mvm);
-	if (ret)
-		return ret;
-
-	bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
-	if (!bt_cmd)
-		return -ENOMEM;
-	cmd.data[0] = bt_cmd;
-
-	lockdep_assert_held(&mvm->mutex);
-
-	if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
-		switch (mvm->bt_force_ant_mode) {
-		case BT_FORCE_ANT_AUTO:
-			flags = BT_COEX_AUTO_OLD;
-			break;
-		case BT_FORCE_ANT_BT:
-			flags = BT_COEX_BT_OLD;
-			break;
-		case BT_FORCE_ANT_WIFI:
-			flags = BT_COEX_WIFI_OLD;
-			break;
-		default:
-			WARN_ON(1);
-			flags = 0;
-		}
-
-		bt_cmd->flags = cpu_to_le32(flags);
-		bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE);
-		goto send_cmd;
-	}
-
-	bt_cmd->max_kill = 5;
-	bt_cmd->bt4_antenna_isolation_thr =
-		IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS;
-	bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling;
-	bt_cmd->bt4_tx_tx_delta_freq_thr = 15;
-	bt_cmd->bt4_tx_rx_max_freq0 = 15;
-	bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT;
-	bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT;
-
-	flags = iwlwifi_mod_params.bt_coex_active ?
-			BT_COEX_NW_OLD : BT_COEX_DISABLE_OLD;
-	bt_cmd->flags = cpu_to_le32(flags);
-
-	bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE |
-					    BT_VALID_BT_PRIO_BOOST |
-					    BT_VALID_MAX_KILL |
-					    BT_VALID_3W_TMRS |
-					    BT_VALID_KILL_ACK |
-					    BT_VALID_KILL_CTS |
-					    BT_VALID_REDUCED_TX_POWER |
-					    BT_VALID_LUT |
-					    BT_VALID_WIFI_RX_SW_PRIO_BOOST |
-					    BT_VALID_WIFI_TX_SW_PRIO_BOOST |
-					    BT_VALID_ANT_ISOLATION |
-					    BT_VALID_ANT_ISOLATION_THRS |
-					    BT_VALID_TXTX_DELTA_FREQ_THRS |
-					    BT_VALID_TXRX_MAX_FREQ_0 |
-					    BT_VALID_SYNC_TO_SCO |
-					    BT_VALID_TTC |
-					    BT_VALID_RRC);
-
-	if (IWL_MVM_BT_COEX_SYNC2SCO)
-		bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
-
-	if (iwl_mvm_bt_is_plcr_supported(mvm)) {
-		bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
-						     BT_VALID_CORUN_LUT_40);
-		bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
-	}
-
-	if (IWL_MVM_BT_COEX_MPLUT) {
-		bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
-		bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
-	}
-
-	if (IWL_MVM_BT_COEX_TTC)
-		bt_cmd->flags |= cpu_to_le32(BT_COEX_TTC);
-
-	if (iwl_mvm_bt_is_rrc_supported(mvm))
-		bt_cmd->flags |= cpu_to_le32(BT_COEX_RRC);
-
-	if (mvm->cfg->bt_shared_single_ant)
-		memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
-		       sizeof(iwl_single_shared_ant));
-	else
-		memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
-		       sizeof(iwl_combined_lookup));
-
-	/* Take first Co-running block LUT to get started */
-	memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[0].lut20,
-	       sizeof(bt_cmd->bt4_corun_lut20));
-	memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[0].lut20,
-	       sizeof(bt_cmd->bt4_corun_lut40));
-
-	memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost,
-	       sizeof(iwl_bt_prio_boost));
-	bt_cmd->bt4_multiprio_lut[0] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG0);
-	bt_cmd->bt4_multiprio_lut[1] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG1);
-
-send_cmd:
-	memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
-	memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old));
-
-	ret = iwl_mvm_send_cmd(mvm, &cmd);
-
-	kfree(bt_cmd);
-	return ret;
-}
-
-static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm)
-{
-	struct iwl_bt_coex_profile_notif_old *notif = &mvm->last_bt_notif_old;
-	u32 primary_lut = le32_to_cpu(notif->primary_ch_lut);
-	u32 ag = le32_to_cpu(notif->bt_activity_grading);
-	struct iwl_bt_coex_cmd_old *bt_cmd;
-	u8 ack_kill_msk, cts_kill_msk;
-	struct iwl_host_cmd cmd = {
-		.id = BT_CONFIG,
-		.data[0] = &bt_cmd,
-		.len = { sizeof(*bt_cmd), },
-		.dataflags = { IWL_HCMD_DFL_NOCOPY, },
-	};
-	int ret = 0;
-
-	lockdep_assert_held(&mvm->mutex);
-
-	ack_kill_msk = iwl_bt_ack_kill_msk[ag][primary_lut];
-	cts_kill_msk = iwl_bt_cts_kill_msk[ag][primary_lut];
-
-	if (mvm->bt_ack_kill_msk[0] == ack_kill_msk &&
-	    mvm->bt_cts_kill_msk[0] == cts_kill_msk)
-		return 0;
-
-	mvm->bt_ack_kill_msk[0] = ack_kill_msk;
-	mvm->bt_cts_kill_msk[0] = cts_kill_msk;
-
-	bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
-	if (!bt_cmd)
-		return -ENOMEM;
-	cmd.data[0] = bt_cmd;
-	bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
-
-	bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ctl_kill_msk[ack_kill_msk]);
-	bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_ctl_kill_msk[cts_kill_msk]);
-	bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
-					     BT_VALID_KILL_ACK |
-					     BT_VALID_KILL_CTS);
-
-	ret = iwl_mvm_send_cmd(mvm, &cmd);
-
-	kfree(bt_cmd);
-	return ret;
-}
-
-static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
-				       bool enable)
-{
-	struct iwl_bt_coex_cmd_old *bt_cmd;
-	/* Send ASYNC since this can be sent from an atomic context */
-	struct iwl_host_cmd cmd = {
-		.id = BT_CONFIG,
-		.len = { sizeof(*bt_cmd), },
-		.dataflags = { IWL_HCMD_DFL_DUP, },
-		.flags = CMD_ASYNC,
-	};
-	struct iwl_mvm_sta *mvmsta;
-	int ret;
-
-	mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
-	if (!mvmsta)
-		return 0;
-
-	/* nothing to do */
-	if (mvmsta->bt_reduced_txpower == enable)
-		return 0;
-
-	bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
-	if (!bt_cmd)
-		return -ENOMEM;
-	cmd.data[0] = bt_cmd;
-	bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
-
-	bt_cmd->valid_bit_msk =
-		cpu_to_le32(BT_VALID_ENABLE | BT_VALID_REDUCED_TX_POWER);
-	bt_cmd->bt_reduced_tx_power = sta_id;
-
-	if (enable)
-		bt_cmd->bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT;
-
-	IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n",
-		       enable ? "en" : "dis", sta_id);
-
-	mvmsta->bt_reduced_txpower = enable;
-
-	ret = iwl_mvm_send_cmd(mvm, &cmd);
-
-	kfree(bt_cmd);
-	return ret;
-}
-
-struct iwl_bt_iterator_data {
-	struct iwl_bt_coex_profile_notif_old *notif;
-	struct iwl_mvm *mvm;
-	struct ieee80211_chanctx_conf *primary;
-	struct ieee80211_chanctx_conf *secondary;
-	bool primary_ll;
-};
-
-static inline
-void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
-				       struct ieee80211_vif *vif,
-				       bool enable, int rssi)
-{
-	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
-	mvmvif->bf_data.last_bt_coex_event = rssi;
-	mvmvif->bf_data.bt_coex_max_thold =
-		enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0;
-	mvmvif->bf_data.bt_coex_min_thold =
-		enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0;
-}
-
-/* must be called under rcu_read_lock */
-static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
-				      struct ieee80211_vif *vif)
-{
-	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-	struct iwl_bt_iterator_data *data = _data;
-	struct iwl_mvm *mvm = data->mvm;
-	struct ieee80211_chanctx_conf *chanctx_conf;
-	enum ieee80211_smps_mode smps_mode;
-	u32 bt_activity_grading;
-	int ave_rssi;
-
-	lockdep_assert_held(&mvm->mutex);
-
-	switch (vif->type) {
-	case NL80211_IFTYPE_STATION:
-		/* default smps_mode for BSS / P2P client is AUTOMATIC */
-		smps_mode = IEEE80211_SMPS_AUTOMATIC;
-		break;
-	case NL80211_IFTYPE_AP:
-		if (!mvmvif->ap_ibss_active)
-			return;
-		break;
-	default:
-		return;
-	}
-
-	chanctx_conf = rcu_dereference(vif->chanctx_conf);
-
-	/* If channel context is invalid or not on 2.4GHz .. */
-	if ((!chanctx_conf ||
-	     chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
-		if (vif->type == NL80211_IFTYPE_STATION) {
-			/* ... relax constraints and disable rssi events */
-			iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
-					    smps_mode);
-			iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
-						    false);
-			iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
-		}
-		return;
-	}
-
-	bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading);
-	if (bt_activity_grading >= BT_HIGH_TRAFFIC)
-		smps_mode = IEEE80211_SMPS_STATIC;
-	else if (bt_activity_grading >= BT_LOW_TRAFFIC)
-		smps_mode = vif->type == NL80211_IFTYPE_AP ?
-				IEEE80211_SMPS_OFF :
-				IEEE80211_SMPS_DYNAMIC;
-
-	/* relax SMPS contraints for next association */
-	if (!vif->bss_conf.assoc)
-		smps_mode = IEEE80211_SMPS_AUTOMATIC;
-
-	if (mvmvif->phy_ctxt &&
-	    data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id))
-		smps_mode = IEEE80211_SMPS_AUTOMATIC;
-
-	IWL_DEBUG_COEX(data->mvm,
-		       "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
-		       mvmvif->id, data->notif->bt_status, bt_activity_grading,
-		       smps_mode);
-
-	if (vif->type == NL80211_IFTYPE_STATION)
-		iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
-				    smps_mode);
-
-	/* low latency is always primary */
-	if (iwl_mvm_vif_low_latency(mvmvif)) {
-		data->primary_ll = true;
-
-		data->secondary = data->primary;
-		data->primary = chanctx_conf;
-	}
-
-	if (vif->type == NL80211_IFTYPE_AP) {
-		if (!mvmvif->ap_ibss_active)
-			return;
-
-		if (chanctx_conf == data->primary)
-			return;
-
-		if (!data->primary_ll) {
-			/*
-			 * downgrade the current primary no matter what its
-			 * type is.
-			 */
-			data->secondary = data->primary;
-			data->primary = chanctx_conf;
-		} else {
-			/* there is low latency vif - we will be secondary */
-			data->secondary = chanctx_conf;
-		}
-		return;
-	}
-
-	/*
-	 * STA / P2P Client, try to be primary if first vif. If we are in low
-	 * latency mode, we are already in primary and just don't do much
-	 */
-	if (!data->primary || data->primary == chanctx_conf)
-		data->primary = chanctx_conf;
-	else if (!data->secondary)
-		/* if secondary is not NULL, it might be a GO */
-		data->secondary = chanctx_conf;
-
-	/*
-	 * don't reduce the Tx power if one of these is true:
-	 *  we are in LOOSE
-	 *  single share antenna product
-	 *  BT is active
-	 *  we are associated
-	 */
-	if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
-	    mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc ||
-	    !data->notif->bt_status) {
-		iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
-		iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
-		return;
-	}
-
-	/* try to get the avg rssi from fw */
-	ave_rssi = mvmvif->bf_data.ave_beacon_signal;
-
-	/* if the RSSI isn't valid, fake it is very low */
-	if (!ave_rssi)
-		ave_rssi = -100;
-	if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) {
-		if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true))
-			IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
-	} else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) {
-		if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
-			IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
-	}
-
-	/* Begin to monitor the RSSI: it may influence the reduced Tx power */
-	iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi);
-}
-
-static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
-{
-	struct iwl_bt_iterator_data data = {
-		.mvm = mvm,
-		.notif = &mvm->last_bt_notif_old,
-	};
-	struct iwl_bt_coex_ci_cmd_old cmd = {};
-	u8 ci_bw_idx;
-
-	/* Ignore updates if we are in force mode */
-	if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
-		return;
-
-	rcu_read_lock();
-	ieee80211_iterate_active_interfaces_atomic(
-					mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-					iwl_mvm_bt_notif_iterator, &data);
-
-	if (data.primary) {
-		struct ieee80211_chanctx_conf *chan = data.primary;
-
-		if (WARN_ON(!chan->def.chan)) {
-			rcu_read_unlock();
-			return;
-		}
-
-		if (chan->def.width < NL80211_CHAN_WIDTH_40) {
-			ci_bw_idx = 0;
-			cmd.co_run_bw_primary = 0;
-		} else {
-			cmd.co_run_bw_primary = 1;
-			if (chan->def.center_freq1 >
-			    chan->def.chan->center_freq)
-				ci_bw_idx = 2;
-			else
-				ci_bw_idx = 1;
-		}
-
-		cmd.bt_primary_ci =
-			iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
-		cmd.primary_ch_phy_id = *((u16 *)data.primary->drv_priv);
-	}
-
-	if (data.secondary) {
-		struct ieee80211_chanctx_conf *chan = data.secondary;
-
-		if (WARN_ON(!data.secondary->def.chan)) {
-			rcu_read_unlock();
-			return;
-		}
-
-		if (chan->def.width < NL80211_CHAN_WIDTH_40) {
-			ci_bw_idx = 0;
-			cmd.co_run_bw_secondary = 0;
-		} else {
-			cmd.co_run_bw_secondary = 1;
-			if (chan->def.center_freq1 >
-			    chan->def.chan->center_freq)
-				ci_bw_idx = 2;
-			else
-				ci_bw_idx = 1;
-		}
-
-		cmd.bt_secondary_ci =
-			iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
-		cmd.secondary_ch_phy_id = *((u16 *)data.secondary->drv_priv);
-	}
-
-	rcu_read_unlock();
-
-	/* Don't spam the fw with the same command over and over */
-	if (memcmp(&cmd, &mvm->last_bt_ci_cmd_old, sizeof(cmd))) {
-		if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0,
-					 sizeof(cmd), &cmd))
-			IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
-		memcpy(&mvm->last_bt_ci_cmd_old, &cmd, sizeof(cmd));
-	}
-
-	if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm))
-		IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
-}
-
-void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
-				  struct iwl_rx_cmd_buffer *rxb)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct iwl_bt_coex_profile_notif_old *notif = (void *)pkt->data;
-
-	IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
-	IWL_DEBUG_COEX(mvm, "\tBT status: %s\n",
-		       notif->bt_status ? "ON" : "OFF");
-	IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn);
-	IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
-	IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
-		       le32_to_cpu(notif->primary_ch_lut));
-	IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
-		       le32_to_cpu(notif->secondary_ch_lut));
-	IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
-		       le32_to_cpu(notif->bt_activity_grading));
-	IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n",
-		       notif->bt_agg_traffic_load);
-
-	/* remember this notification for future use: rssi fluctuations */
-	memcpy(&mvm->last_bt_notif_old, notif, sizeof(mvm->last_bt_notif_old));
-
-	iwl_mvm_bt_coex_notif_handle(mvm);
-}
-
-static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
-				     struct ieee80211_vif *vif)
-{
-	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-	struct iwl_bt_iterator_data *data = _data;
-	struct iwl_mvm *mvm = data->mvm;
-
-	struct ieee80211_sta *sta;
-	struct iwl_mvm_sta *mvmsta;
-
-	struct ieee80211_chanctx_conf *chanctx_conf;
-
-	rcu_read_lock();
-	chanctx_conf = rcu_dereference(vif->chanctx_conf);
-	/* If channel context is invalid or not on 2.4GHz - don't count it */
-	if (!chanctx_conf ||
-	    chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
-		rcu_read_unlock();
-		return;
-	}
-	rcu_read_unlock();
-
-	if (vif->type != NL80211_IFTYPE_STATION ||
-	    mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
-		return;
-
-	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
-					lockdep_is_held(&mvm->mutex));
-
-	/* This can happen if the station has been removed right now */
-	if (IS_ERR_OR_NULL(sta))
-		return;
-
-	mvmsta = iwl_mvm_sta_from_mac80211(sta);
-}
-
-void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-			       enum ieee80211_rssi_event_data rssi_event)
-{
-	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-	struct iwl_bt_iterator_data data = {
-		.mvm = mvm,
-	};
-	int ret;
-
-	lockdep_assert_held(&mvm->mutex);
-
-	/* Ignore updates if we are in force mode */
-	if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
-		return;
-
-	/*
-	 * Rssi update while not associated - can happen since the statistics
-	 * are handled asynchronously
-	 */
-	if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
-		return;
-
-	/* No BT - reports should be disabled */
-	if (!mvm->last_bt_notif_old.bt_status)
-		return;
-
-	IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
-		       rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW");
-
-	/*
-	 * Check if rssi is good enough for reduced Tx power, but not in loose
-	 * scheme.
-	 */
-	if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant ||
-	    iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT)
-		ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
-						  false);
-	else
-		ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true);
-
-	if (ret)
-		IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n");
-
-	ieee80211_iterate_active_interfaces_atomic(
-		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-		iwl_mvm_bt_rssi_iterator, &data);
-
-	if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm))
-		IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
-}
-
-#define LINK_QUAL_AGG_TIME_LIMIT_DEF	(4000)
-#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT	(1200)
-
-u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
-				    struct ieee80211_sta *sta)
-{
-	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-	enum iwl_bt_coex_lut_type lut_type;
-
-	if (le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading) <
-	    BT_HIGH_TRAFFIC)
-		return LINK_QUAL_AGG_TIME_LIMIT_DEF;
-
-	if (mvm->last_bt_notif_old.ttc_enabled)
-		return LINK_QUAL_AGG_TIME_LIMIT_DEF;
-
-	lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
-
-	if (lut_type == BT_COEX_LOOSE_LUT || lut_type == BT_COEX_INVALID_LUT)
-		return LINK_QUAL_AGG_TIME_LIMIT_DEF;
-
-	/* tight coex, high bt traffic, reduce AGG time limit */
-	return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT;
-}
-
-bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
-					 struct ieee80211_sta *sta)
-{
-	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-	enum iwl_bt_coex_lut_type lut_type;
-
-	if (mvm->last_bt_notif_old.ttc_enabled)
-		return true;
-
-	if (le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading) <
-	    BT_HIGH_TRAFFIC)
-		return true;
-
-	/*
-	 * In Tight / TxTxDis, BT can't Rx while we Tx, so use both antennas
-	 * since BT is already killed.
-	 * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while
-	 * we Tx.
-	 * When we are in 5GHz, we'll get BT_COEX_INVALID_LUT allowing MIMO.
-	 */
-	lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
-	return lut_type != BT_COEX_LOOSE_LUT;
-}
-
-bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm)
-{
-	u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
-	return ag < BT_HIGH_TRAFFIC;
-}
-
-bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
-					enum ieee80211_band band)
-{
-	u32 bt_activity =
-		le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
-
-	if (band != IEEE80211_BAND_2GHZ)
-		return false;
-
-	return bt_activity >= BT_LOW_TRAFFIC;
-}
-
-void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm)
-{
-	iwl_mvm_bt_coex_notif_handle(mvm);
-}
-
-void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
-				       struct iwl_rx_cmd_buffer *rxb)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	u32 ant_isolation = le32_to_cpup((void *)pkt->data);
-	u8 __maybe_unused lower_bound, upper_bound;
-	u8 lut;
-
-	struct iwl_bt_coex_cmd_old *bt_cmd;
-	struct iwl_host_cmd cmd = {
-		.id = BT_CONFIG,
-		.len = { sizeof(*bt_cmd), },
-		.dataflags = { IWL_HCMD_DFL_NOCOPY, },
-	};
-
-	if (!iwl_mvm_bt_is_plcr_supported(mvm))
-		return;
-
-	lockdep_assert_held(&mvm->mutex);
-
-	/* Ignore updates if we are in force mode */
-	if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
-		return;
-
-	if (ant_isolation ==  mvm->last_ant_isol)
-		return;
-
-	for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
-		if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
-			break;
-
-	lower_bound = antenna_coupling_ranges[lut].range;
-
-	if (lut < ARRAY_SIZE(antenna_coupling_ranges) - 1)
-		upper_bound = antenna_coupling_ranges[lut + 1].range;
-	else
-		upper_bound = antenna_coupling_ranges[lut].range;
-
-	IWL_DEBUG_COEX(mvm, "Antenna isolation=%d in range [%d,%d[, lut=%d\n",
-		       ant_isolation, lower_bound, upper_bound, lut);
-
-	mvm->last_ant_isol = ant_isolation;
-
-	if (mvm->last_corun_lut == lut)
-		return;
-
-	mvm->last_corun_lut = lut;
-
-	bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
-	if (!bt_cmd)
-		return;
-	cmd.data[0] = bt_cmd;
-
-	bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
-	bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
-					     BT_VALID_CORUN_LUT_20 |
-					     BT_VALID_CORUN_LUT_40);
-
-	/* For the moment, use the same LUT for 20GHz and 40GHz */
-	memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[lut].lut20,
-	       sizeof(bt_cmd->bt4_corun_lut20));
-
-	memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
-	       sizeof(bt_cmd->bt4_corun_lut40));
-
-	if (iwl_mvm_send_cmd(mvm, &cmd))
-		IWL_ERR(mvm, "failed to send BT_CONFIG command\n");
-
-	kfree(bt_cmd);
-}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index 4b560e4..b96b1c6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -75,7 +75,6 @@
 #define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT	(10 * USEC_PER_MSEC)
 #define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT	(2 * 1024) /* defined in TU */
 #define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT	(40 * 1024) /* defined in TU */
-#define IWL_MVM_P2P_UAPSD_STANDALONE		0
 #define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE	0
 #define IWL_MVM_UAPSD_RX_DATA_TIMEOUT		(50 * USEC_PER_MSEC)
 #define IWL_MVM_UAPSD_TX_DATA_TIMEOUT		(50 * USEC_PER_MSEC)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index c1a3131..e3561bb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -723,7 +723,7 @@
 		return -EIO;
 	}
 
-	ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false);
+	ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0);
 	if (ret)
 		return ret;
 	rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 1400445..fb96bc0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -724,9 +724,9 @@
 
 		ret = kstrtou32(data, 10, &value);
 		if (ret == 0 && value) {
-			enum ieee80211_band band = (cmd->channel_num <= 14) ?
-						   IEEE80211_BAND_2GHZ :
-						   IEEE80211_BAND_5GHZ;
+			enum nl80211_band band = (cmd->channel_num <= 14) ?
+						   NL80211_BAND_2GHZ :
+						   NL80211_BAND_5GHZ;
 			struct ieee80211_channel chn = {
 				.band = band,
 				.center_freq = ieee80211_channel_to_frequency(
@@ -1425,6 +1425,89 @@
 	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
 
+static const char * const chanwidths[] = {
+	[NL80211_CHAN_WIDTH_20_NOHT] = "noht",
+	[NL80211_CHAN_WIDTH_20] = "ht20",
+	[NL80211_CHAN_WIDTH_40] = "ht40",
+	[NL80211_CHAN_WIDTH_80] = "vht80",
+	[NL80211_CHAN_WIDTH_80P80] = "vht80p80",
+	[NL80211_CHAN_WIDTH_160] = "vht160",
+};
+
+static bool iwl_mvm_lqm_notif_wait(struct iwl_notif_wait_data *notif_wait,
+				   struct iwl_rx_packet *pkt, void *data)
+{
+	struct ieee80211_vif *vif = data;
+	struct iwl_mvm *mvm =
+		container_of(notif_wait, struct iwl_mvm, notif_wait);
+	struct iwl_link_qual_msrmnt_notif *report = (void *)pkt->data;
+	u32 num_of_stations = le32_to_cpu(report->number_of_stations);
+	int i;
+
+	IWL_INFO(mvm, "LQM report:\n");
+	IWL_INFO(mvm, "\tstatus: %d\n", report->status);
+	IWL_INFO(mvm, "\tmacID: %d\n", le32_to_cpu(report->mac_id));
+	IWL_INFO(mvm, "\ttx_frame_dropped: %d\n",
+		 le32_to_cpu(report->tx_frame_dropped));
+	IWL_INFO(mvm, "\ttime_in_measurement_window: %d us\n",
+		 le32_to_cpu(report->time_in_measurement_window));
+	IWL_INFO(mvm, "\ttotal_air_time_other_stations: %d\n",
+		 le32_to_cpu(report->total_air_time_other_stations));
+	IWL_INFO(mvm, "\tchannel_freq: %d\n",
+		 vif->bss_conf.chandef.center_freq1);
+	IWL_INFO(mvm, "\tchannel_width: %s\n",
+		 chanwidths[vif->bss_conf.chandef.width]);
+	IWL_INFO(mvm, "\tnumber_of_stations: %d\n", num_of_stations);
+	for (i = 0; i < num_of_stations; i++)
+		IWL_INFO(mvm, "\t\tsta[%d]: %d\n", i,
+			 report->frequent_stations_air_time[i]);
+
+	return true;
+}
+
+static ssize_t iwl_dbgfs_lqm_send_cmd_write(struct ieee80211_vif *vif,
+					    char *buf, size_t count,
+					    loff_t *ppos)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	struct iwl_notification_wait wait_lqm_notif;
+	static u16 lqm_notif[] = {
+		WIDE_ID(MAC_CONF_GROUP,
+			LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF)
+	};
+	int err;
+	u32 duration;
+	u32 timeout;
+
+	if (sscanf(buf, "%d,%d", &duration, &timeout) != 2)
+		return -EINVAL;
+
+	iwl_init_notification_wait(&mvm->notif_wait, &wait_lqm_notif,
+				   lqm_notif, ARRAY_SIZE(lqm_notif),
+				   iwl_mvm_lqm_notif_wait, vif);
+	mutex_lock(&mvm->mutex);
+	err = iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_START_MEASUREMENT,
+				   duration, timeout);
+	mutex_unlock(&mvm->mutex);
+
+	if (err) {
+		IWL_ERR(mvm, "Failed to send lqm cmdf(err=%d)\n", err);
+		iwl_remove_notification(&mvm->notif_wait, &wait_lqm_notif);
+		return err;
+	}
+
+	/* wait for 2 * timeout (safety guard) and convert to jiffies*/
+	timeout = msecs_to_jiffies((timeout * 2) / 1000);
+
+	err = iwl_wait_notification(&mvm->notif_wait, &wait_lqm_notif,
+				    timeout);
+	if (err)
+		IWL_ERR(mvm, "Getting lqm notif timed out\n");
+
+	return count;
+}
+
 #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
 	_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
 #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -1449,6 +1532,7 @@
 MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
+MVM_DEBUGFS_WRITE_FILE_OPS(lqm_send_cmd, 64);
 
 void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
@@ -1488,6 +1572,7 @@
 				 S_IRUSR | S_IWUSR);
 	MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir,
 				 S_IRUSR | S_IWUSR);
+	MVM_DEBUGFS_ADD_FILE_VIF(lqm_send_cmd, mvmvif->dbgfs_dir, S_IWUSR);
 
 	if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
 	    mvmvif == mvm->bf_allowed_vif)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index a43b392..362a546 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -65,6 +65,7 @@
  *****************************************************************************/
 #include <linux/vmalloc.h>
 #include <linux/ieee80211.h>
+#include <linux/netdevice.h>
 
 #include "mvm.h"
 #include "fw-dbg.h"
@@ -463,69 +464,11 @@
 	return pos;
 }
 
-static
-int iwl_mvm_coex_dump_mbox_old(struct iwl_bt_coex_profile_notif_old *notif,
-			       char *buf, int pos, int bufsz)
-{
-	pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
-
-	BT_MBOX_PRINT(0, LE_SLAVE_LAT, false);
-	BT_MBOX_PRINT(0, LE_PROF1, false);
-	BT_MBOX_PRINT(0, LE_PROF2, false);
-	BT_MBOX_PRINT(0, LE_PROF_OTHER, false);
-	BT_MBOX_PRINT(0, CHL_SEQ_N, false);
-	BT_MBOX_PRINT(0, INBAND_S, false);
-	BT_MBOX_PRINT(0, LE_MIN_RSSI, false);
-	BT_MBOX_PRINT(0, LE_SCAN, false);
-	BT_MBOX_PRINT(0, LE_ADV, false);
-	BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false);
-	BT_MBOX_PRINT(0, OPEN_CON_1, true);
-
-	pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n");
-
-	BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false);
-	BT_MBOX_PRINT(1, IP_SR, false);
-	BT_MBOX_PRINT(1, LE_MSTR, false);
-	BT_MBOX_PRINT(1, AGGR_TRFC_LD, false);
-	BT_MBOX_PRINT(1, MSG_TYPE, false);
-	BT_MBOX_PRINT(1, SSN, true);
-
-	pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n");
-
-	BT_MBOX_PRINT(2, SNIFF_ACT, false);
-	BT_MBOX_PRINT(2, PAG, false);
-	BT_MBOX_PRINT(2, INQUIRY, false);
-	BT_MBOX_PRINT(2, CONN, false);
-	BT_MBOX_PRINT(2, SNIFF_INTERVAL, false);
-	BT_MBOX_PRINT(2, DISC, false);
-	BT_MBOX_PRINT(2, SCO_TX_ACT, false);
-	BT_MBOX_PRINT(2, SCO_RX_ACT, false);
-	BT_MBOX_PRINT(2, ESCO_RE_TX, false);
-	BT_MBOX_PRINT(2, SCO_DURATION, true);
-
-	pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n");
-
-	BT_MBOX_PRINT(3, SCO_STATE, false);
-	BT_MBOX_PRINT(3, SNIFF_STATE, false);
-	BT_MBOX_PRINT(3, A2DP_STATE, false);
-	BT_MBOX_PRINT(3, ACL_STATE, false);
-	BT_MBOX_PRINT(3, MSTR_STATE, false);
-	BT_MBOX_PRINT(3, OBX_STATE, false);
-	BT_MBOX_PRINT(3, OPEN_CON_2, false);
-	BT_MBOX_PRINT(3, TRAFFIC_LOAD, false);
-	BT_MBOX_PRINT(3, CHL_SEQN_LSB, false);
-	BT_MBOX_PRINT(3, INBAND_P, false);
-	BT_MBOX_PRINT(3, MSG_TYPE_2, false);
-	BT_MBOX_PRINT(3, SSN_2, false);
-	BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
-
-	return pos;
-}
-
 static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
 				       size_t count, loff_t *ppos)
 {
 	struct iwl_mvm *mvm = file->private_data;
+	struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
 	char *buf;
 	int ret, pos = 0, bufsz = sizeof(char) * 1024;
 
@@ -535,52 +478,24 @@
 
 	mutex_lock(&mvm->mutex);
 
-	if (!fw_has_api(&mvm->fw->ucode_capa,
-			IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
-		struct iwl_bt_coex_profile_notif_old *notif =
-			&mvm->last_bt_notif_old;
+	pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
 
-		pos += iwl_mvm_coex_dump_mbox_old(notif, buf, pos, bufsz);
-
-		pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
-				 notif->bt_ci_compliance);
-		pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n",
-				 le32_to_cpu(notif->primary_ch_lut));
-		pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n",
-				 le32_to_cpu(notif->secondary_ch_lut));
-		pos += scnprintf(buf+pos,
-				 bufsz-pos, "bt_activity_grading = %d\n",
-				 le32_to_cpu(notif->bt_activity_grading));
-		pos += scnprintf(buf+pos, bufsz-pos,
-				 "antenna isolation = %d CORUN LUT index = %d\n",
-				 mvm->last_ant_isol, mvm->last_corun_lut);
-		pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
-				 notif->rrc_enabled);
-		pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
-				 notif->ttc_enabled);
-	} else {
-		struct iwl_bt_coex_profile_notif *notif =
-			&mvm->last_bt_notif;
-
-		pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
-
-		pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
-				 notif->bt_ci_compliance);
-		pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n",
-				 le32_to_cpu(notif->primary_ch_lut));
-		pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n",
-				 le32_to_cpu(notif->secondary_ch_lut));
-		pos += scnprintf(buf+pos,
-				 bufsz-pos, "bt_activity_grading = %d\n",
-				 le32_to_cpu(notif->bt_activity_grading));
-		pos += scnprintf(buf+pos, bufsz-pos,
-				 "antenna isolation = %d CORUN LUT index = %d\n",
-				 mvm->last_ant_isol, mvm->last_corun_lut);
-		pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
-				 (notif->ttc_rrc_status >> 4) & 0xF);
-		pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
-				 notif->ttc_rrc_status & 0xF);
-	}
+	pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n",
+			 notif->bt_ci_compliance);
+	pos += scnprintf(buf + pos, bufsz - pos, "primary_ch_lut = %d\n",
+			 le32_to_cpu(notif->primary_ch_lut));
+	pos += scnprintf(buf + pos, bufsz - pos, "secondary_ch_lut = %d\n",
+			 le32_to_cpu(notif->secondary_ch_lut));
+	pos += scnprintf(buf + pos,
+			 bufsz - pos, "bt_activity_grading = %d\n",
+			 le32_to_cpu(notif->bt_activity_grading));
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "antenna isolation = %d CORUN LUT index = %d\n",
+			 mvm->last_ant_isol, mvm->last_corun_lut);
+	pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
+			 (notif->ttc_rrc_status >> 4) & 0xF);
+	pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
+			 notif->ttc_rrc_status & 0xF);
 
 	pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n",
 			 IWL_MVM_BT_COEX_SYNC2SCO);
@@ -602,44 +517,20 @@
 				     size_t count, loff_t *ppos)
 {
 	struct iwl_mvm *mvm = file->private_data;
+	struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
 	char buf[256];
 	int bufsz = sizeof(buf);
 	int pos = 0;
 
 	mutex_lock(&mvm->mutex);
 
-	if (!fw_has_api(&mvm->fw->ucode_capa,
-			IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
-		struct iwl_bt_coex_ci_cmd_old *cmd = &mvm->last_bt_ci_cmd_old;
-
-		pos += scnprintf(buf+pos, bufsz-pos,
-				 "Channel inhibition CMD\n");
-		pos += scnprintf(buf+pos, bufsz-pos,
-			       "\tPrimary Channel Bitmap 0x%016llx\n",
-			       le64_to_cpu(cmd->bt_primary_ci));
-		pos += scnprintf(buf+pos, bufsz-pos,
-			       "\tSecondary Channel Bitmap 0x%016llx\n",
-			       le64_to_cpu(cmd->bt_secondary_ci));
-
-		pos += scnprintf(buf+pos, bufsz-pos,
-				 "BT Configuration CMD - 0=default, 1=never, 2=always\n");
-		pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill msk idx %d\n",
-				 mvm->bt_ack_kill_msk[0]);
-		pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill msk idx %d\n",
-				 mvm->bt_cts_kill_msk[0]);
-
-	} else {
-		struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
-
-		pos += scnprintf(buf+pos, bufsz-pos,
-				 "Channel inhibition CMD\n");
-		pos += scnprintf(buf+pos, bufsz-pos,
-			       "\tPrimary Channel Bitmap 0x%016llx\n",
-			       le64_to_cpu(cmd->bt_primary_ci));
-		pos += scnprintf(buf+pos, bufsz-pos,
-			       "\tSecondary Channel Bitmap 0x%016llx\n",
-			       le64_to_cpu(cmd->bt_secondary_ci));
-	}
+	pos += scnprintf(buf + pos, bufsz - pos, "Channel inhibition CMD\n");
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "\tPrimary Channel Bitmap 0x%016llx\n",
+			 le64_to_cpu(cmd->bt_primary_ci));
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "\tSecondary Channel Bitmap 0x%016llx\n",
+			 le64_to_cpu(cmd->bt_secondary_ci));
 
 	mutex_unlock(&mvm->mutex);
 
@@ -990,8 +881,10 @@
 	struct iwl_rss_config_cmd cmd = {
 		.flags = cpu_to_le32(IWL_RSS_ENABLE),
 		.hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
+			     IWL_RSS_HASH_TYPE_IPV4_UDP |
 			     IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
 			     IWL_RSS_HASH_TYPE_IPV6_TCP |
+			     IWL_RSS_HASH_TYPE_IPV6_UDP |
 			     IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
 	};
 	int ret, i, num_repeats, nbytes = count / 2;
@@ -1015,7 +908,7 @@
 	memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table,
 	       ARRAY_SIZE(cmd.indirection_table) % nbytes);
 
-	memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key));
+	netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
 
 	mutex_lock(&mvm->mutex);
 	ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
index 7a16e55..4c086d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
@@ -268,12 +268,25 @@
 	IWL_RX_MPDU_AMSDU_LAST_SUBFRAME		= 0x80,
 };
 
+enum iwl_rx_l3_proto_values {
+	IWL_RX_L3_TYPE_NONE,
+	IWL_RX_L3_TYPE_IPV4,
+	IWL_RX_L3_TYPE_IPV4_FRAG,
+	IWL_RX_L3_TYPE_IPV6_FRAG,
+	IWL_RX_L3_TYPE_IPV6,
+	IWL_RX_L3_TYPE_IPV6_IN_IPV4,
+	IWL_RX_L3_TYPE_ARP,
+	IWL_RX_L3_TYPE_EAPOL,
+};
+
+#define IWL_RX_L3_PROTO_POS 4
+
 enum iwl_rx_l3l4_flags {
 	IWL_RX_L3L4_IP_HDR_CSUM_OK		= BIT(0),
 	IWL_RX_L3L4_TCP_UDP_CSUM_OK		= BIT(1),
 	IWL_RX_L3L4_TCP_FIN_SYN_RST_PSH		= BIT(2),
 	IWL_RX_L3L4_TCP_ACK			= BIT(3),
-	IWL_RX_L3L4_L3_PROTO_MASK		= 0xf << 4,
+	IWL_RX_L3L4_L3_PROTO_MASK		= 0xf << IWL_RX_L3_PROTO_POS,
 	IWL_RX_L3L4_L4_PROTO_MASK		= 0xf << 8,
 	IWL_RX_L3L4_RSS_HASH_MASK		= 0xf << 12,
 };
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
index ba3f0bb..dadcccd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -193,11 +194,41 @@
 #define IWL_BAR_DFAULT_RETRY_LIMIT		60
 #define IWL_LOW_RETRY_LIMIT			7
 
+/**
+ * enum iwl_tx_offload_assist_flags_pos -  set %iwl_tx_cmd offload_assist values
+ * @TX_CMD_OFFLD_IP_HDR_OFFSET: offset to start of IP header (in words)
+ *	from mac header end. For normal case it is 4 words for SNAP.
+ *	note: tx_cmd, mac header and pad are not counted in the offset.
+ *	This is used to help the offload in case there is tunneling such as
+ *	IPv6 in IPv4, in such case the ip header offset should point to the
+ *	inner ip header and IPv4 checksum of the external header should be
+ *	calculated by driver.
+ * @TX_CMD_OFFLD_L4_EN: enable TCP/UDP checksum
+ * @TX_CMD_OFFLD_L3_EN: enable IP header checksum
+ * @TX_CMD_OFFLD_MH_SIZE: size of the mac header in words. Includes the IV
+ *	field. Doesn't include the pad.
+ * @TX_CMD_OFFLD_PAD: mark 2-byte pad was inserted after the mac header for
+ *	alignment
+ * @TX_CMD_OFFLD_AMSDU: mark TX command is A-MSDU
+ */
+enum iwl_tx_offload_assist_flags_pos {
+	TX_CMD_OFFLD_IP_HDR =		0,
+	TX_CMD_OFFLD_L4_EN =		6,
+	TX_CMD_OFFLD_L3_EN =		7,
+	TX_CMD_OFFLD_MH_SIZE =		8,
+	TX_CMD_OFFLD_PAD =		13,
+	TX_CMD_OFFLD_AMSDU =		14,
+};
+
+#define IWL_TX_CMD_OFFLD_MH_MASK	0x1f
+#define IWL_TX_CMD_OFFLD_IP_HDR_MASK	0x3f
+
 /* TODO: complete documentation for try_cnt and btkill_cnt */
 /**
  * struct iwl_tx_cmd - TX command struct to FW
  * ( TX_CMD = 0x1c )
  * @len: in bytes of the payload, see below for details
+ * @offload_assist: TX offload configuration
  * @tx_flags: combination of TX_CMD_FLG_*
  * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
  *	cleared. Combination of RATE_MCS_*
@@ -231,7 +262,7 @@
  */
 struct iwl_tx_cmd {
 	__le16 len;
-	__le16 next_frame_len;
+	__le16 offload_assist;
 	__le32 tx_flags;
 	struct {
 		u8 try_cnt;
@@ -255,7 +286,7 @@
 	__le16 reserved4;
 	u8 payload[0];
 	struct ieee80211_hdr hdr[0];
-} __packed; /* TX_CMD_API_S_VER_3 */
+} __packed; /* TX_CMD_API_S_VER_6 */
 
 /*
  * TX response related data
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
index 4a0fc47..60eed84 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
@@ -80,12 +80,39 @@
 #include "fw-api-stats.h"
 #include "fw-api-tof.h"
 
-/* Tx queue numbers */
+/* Tx queue numbers for non-DQA mode */
 enum {
 	IWL_MVM_OFFCHANNEL_QUEUE = 8,
 	IWL_MVM_CMD_QUEUE = 9,
 };
 
+/*
+ * DQA queue numbers
+ *
+ * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW
+ * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames
+ * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure
+ *	that we are never left without the possibility to connect to an AP.
+ * @IWL_MVM_DQA_MIN_MGMT_QUEUE: first TXQ in pool for MGMT and non-QOS frames.
+ *	Each MGMT queue is mapped to a single STA
+ *	MGMT frames are frames that return true on ieee80211_is_mgmt()
+ * @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames
+ * @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames.
+ *	DATA frames are intended for !ieee80211_is_mgmt() frames, but if
+ *	the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues
+ *	as well
+ * @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames
+ */
+enum iwl_mvm_dqa_txq {
+	IWL_MVM_DQA_CMD_QUEUE = 0,
+	IWL_MVM_DQA_GCAST_QUEUE = 3,
+	IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4,
+	IWL_MVM_DQA_MIN_MGMT_QUEUE = 5,
+	IWL_MVM_DQA_MAX_MGMT_QUEUE = 8,
+	IWL_MVM_DQA_MIN_DATA_QUEUE = 10,
+	IWL_MVM_DQA_MAX_DATA_QUEUE = 31,
+};
+
 enum iwl_mvm_tx_fifo {
 	IWL_MVM_TX_FIFO_BK = 0,
 	IWL_MVM_TX_FIFO_BE,
@@ -279,6 +306,11 @@
 /* Please keep this enum *SORTED* by hex value.
  * Needed for binary search, otherwise a warning will be triggered.
  */
+enum iwl_mac_conf_subcmd_ids {
+	LINK_QUALITY_MEASUREMENT_CMD = 0x1,
+	LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF = 0xFE,
+};
+
 enum iwl_phy_ops_subcmd_ids {
 	CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
 	CTDP_CONFIG_CMD = 0x03,
@@ -287,6 +319,10 @@
 	DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
 };
 
+enum iwl_system_subcmd_ids {
+	SHARED_MEM_CFG_CMD = 0x0,
+};
+
 enum iwl_data_path_subcmd_ids {
 	UPDATE_MU_GROUPS_CMD = 0x1,
 	TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
@@ -302,6 +338,8 @@
 enum {
 	LEGACY_GROUP = 0x0,
 	LONG_GROUP = 0x1,
+	SYSTEM_GROUP = 0x2,
+	MAC_CONF_GROUP = 0x3,
 	PHY_OPS_GROUP = 0x4,
 	DATA_PATH_GROUP = 0x5,
 	PROT_OFFLOAD_GROUP = 0xb,
@@ -1923,6 +1961,7 @@
 
 #define TX_FIFO_MAX_NUM		8
 #define RX_FIFO_MAX_NUM		2
+#define TX_FIFO_INTERNAL_MAX_NUM	6
 
 /**
  * Shared memory configuration information from the FW
@@ -1940,6 +1979,12 @@
  * @page_buff_addr: used by UMAC and performance debug (page miss analysis),
  *	when paging is not supported this should be 0
  * @page_buff_size: size of %page_buff_addr
+ * @rxfifo_addr: Start address of rxFifo
+ * @internal_txfifo_addr: start address of internalFifo
+ * @internal_txfifo_size: internal fifos' size
+ *
+ * NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG
+ *	 set, the last 3 members don't exist.
  */
 struct iwl_shared_mem_cfg {
 	__le32 shared_mem_addr;
@@ -1951,7 +1996,10 @@
 	__le32 rxfifo_size[RX_FIFO_MAX_NUM];
 	__le32 page_buff_addr;
 	__le32 page_buff_size;
-} __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */
+	__le32 rxfifo_addr;
+	__le32 internal_txfifo_addr;
+	__le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
+} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
 
 /**
  * VHT MU-MIMO group configuration
@@ -2002,4 +2050,60 @@
 	u8 data[MAX_STORED_BEACON_SIZE];
 } __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_1 */
 
+#define LQM_NUMBER_OF_STATIONS_IN_REPORT 16
+
+enum iwl_lqm_cmd_operatrions {
+	LQM_CMD_OPERATION_START_MEASUREMENT = 0x01,
+	LQM_CMD_OPERATION_STOP_MEASUREMENT = 0x02,
+};
+
+enum iwl_lqm_status {
+	LQM_STATUS_SUCCESS = 0,
+	LQM_STATUS_TIMEOUT = 1,
+	LQM_STATUS_ABORT = 2,
+};
+
+/**
+ * Link Quality Measurement command
+ * @cmd_operatrion: command operation to be performed (start or stop)
+ *	as defined above.
+ * @mac_id: MAC ID the measurement applies to.
+ * @measurement_time: time of the total measurement to be performed, in uSec.
+ * @timeout: maximum time allowed until a response is sent, in uSec.
+ */
+struct iwl_link_qual_msrmnt_cmd {
+	__le32 cmd_operation;
+	__le32 mac_id;
+	__le32 measurement_time;
+	__le32 timeout;
+} __packed /* LQM_CMD_API_S_VER_1 */;
+
+/**
+ * Link Quality Measurement notification
+ *
+ * @frequent_stations_air_time: an array containing the total air time
+ *	(in uSec) used by the most frequently transmitting stations.
+ * @number_of_stations: the number of uniqe stations included in the array
+ *	(a number between 0 to 16)
+ * @total_air_time_other_stations: the total air time (uSec) used by all the
+ *	stations which are not included in the above report.
+ * @time_in_measurement_window: the total time in uSec in which a measurement
+ *	took place.
+ * @tx_frame_dropped: the number of TX frames dropped due to retry limit during
+ *	measurement
+ * @mac_id: MAC ID the measurement applies to.
+ * @status: return status. may be one of the LQM_STATUS_* defined above.
+ * @reserved: reserved.
+ */
+struct iwl_link_qual_msrmnt_notif {
+	__le32 frequent_stations_air_time[LQM_NUMBER_OF_STATIONS_IN_REPORT];
+	__le32 number_of_stations;
+	__le32 total_air_time_other_stations;
+	__le32 time_in_measurement_window;
+	__le32 tx_frame_dropped;
+	__le32 mac_id;
+	__le32 status;
+	__le32 reserved[3];
+} __packed; /* LQM_MEASUREMENT_COMPLETE_NTF_API_S_VER1 */
+
 #endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index 4856eac..e25171f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015        Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015        Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -265,6 +265,65 @@
 		*dump_data = iwl_fw_error_next_data(*dump_data);
 	}
 
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
+		/* Pull UMAC internal TXF data from all TXFs */
+		for (i = 0;
+		     i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
+		     i++) {
+			/* Mark the number of TXF we're pulling now */
+			iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i);
+
+			fifo_hdr = (void *)(*dump_data)->data;
+			fifo_data = (void *)fifo_hdr->data;
+			fifo_len = mvm->shared_mem_cfg.internal_txfifo_size[i];
+
+			/* No need to try to read the data if the length is 0 */
+			if (fifo_len == 0)
+				continue;
+
+			/* Add a TLV for the internal FIFOs */
+			(*dump_data)->type =
+				cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF);
+			(*dump_data)->len =
+				cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
+
+			fifo_hdr->fifo_num = cpu_to_le32(i);
+			fifo_hdr->available_bytes =
+				cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+								TXF_CPU2_FIFO_ITEM_CNT));
+			fifo_hdr->wr_ptr =
+				cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+								TXF_CPU2_WR_PTR));
+			fifo_hdr->rd_ptr =
+				cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+								TXF_CPU2_RD_PTR));
+			fifo_hdr->fence_ptr =
+				cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+								TXF_CPU2_FENCE_PTR));
+			fifo_hdr->fence_mode =
+				cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+								TXF_CPU2_LOCK_FENCE));
+
+			/* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */
+			iwl_trans_write_prph(mvm->trans,
+					     TXF_CPU2_READ_MODIFY_ADDR,
+					     TXF_CPU2_WR_PTR);
+
+			/* Dummy-read to advance the read pointer to head */
+			iwl_trans_read_prph(mvm->trans,
+					    TXF_CPU2_READ_MODIFY_DATA);
+
+			/* Read FIFO */
+			fifo_len /= sizeof(u32); /* Size in DWORDS */
+			for (j = 0; j < fifo_len; j++)
+				fifo_data[j] =
+					iwl_trans_read_prph(mvm->trans,
+							    TXF_CPU2_READ_MODIFY_DATA);
+			*dump_data = iwl_fw_error_next_data(*dump_data);
+		}
+	}
+
 	iwl_trans_release_nic_access(mvm->trans, &flags);
 }
 
@@ -429,9 +488,11 @@
 	struct iwl_fw_error_dump_trigger_desc *dump_trig;
 	struct iwl_mvm_dump_ptrs *fw_error_dump;
 	u32 sram_len, sram_ofs;
+	struct iwl_fw_dbg_mem_seg_tlv * const *fw_dbg_mem =
+		mvm->fw->dbg_mem_tlv;
 	u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0;
-	u32 smem_len = mvm->cfg->smem_len;
-	u32 sram2_len = mvm->cfg->dccm2_len;
+	u32 smem_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->smem_len;
+	u32 sram2_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->dccm2_len;
 	bool monitor_dump_only = false;
 	int i;
 
@@ -494,6 +555,22 @@
 					 sizeof(struct iwl_fw_error_dump_fifo);
 		}
 
+		if (fw_has_capa(&mvm->fw->ucode_capa,
+				IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
+			for (i = 0;
+			     i < ARRAY_SIZE(mem_cfg->internal_txfifo_size);
+			     i++) {
+				if (!mem_cfg->internal_txfifo_size[i])
+					continue;
+
+				/* Add header info */
+				fifo_data_len +=
+					mem_cfg->internal_txfifo_size[i] +
+					sizeof(*dump_data) +
+					sizeof(struct iwl_fw_error_dump_fifo);
+			}
+		}
+
 		/* Make room for PRPH registers */
 		for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
 			/* The range includes both boundaries */
@@ -511,7 +588,6 @@
 
 	file_len = sizeof(*dump_file) +
 		   sizeof(*dump_data) * 2 +
-		   sram_len + sizeof(*dump_mem) +
 		   fifo_data_len +
 		   prph_len +
 		   radio_len +
@@ -525,8 +601,16 @@
 	if (sram2_len)
 		file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
 
+	/* Make room for MEM segments */
+	for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) {
+		if (fw_dbg_mem[i])
+			file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+				le32_to_cpu(fw_dbg_mem[i]->len);
+	}
+
 	/* Make room for fw's virtual image pages, if it exists */
-	if (mvm->fw->img[mvm->cur_ucode].paging_mem_size)
+	if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
+	    mvm->fw_paging_db[0].fw_paging_block)
 		file_len += mvm->num_of_paging_blk *
 			(sizeof(*dump_data) +
 			 sizeof(struct iwl_fw_error_dump_paging) +
@@ -550,6 +634,9 @@
 		file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
 			    mvm->fw_dump_desc->len;
 
+	if (!mvm->fw->dbg_dynamic_mem)
+		file_len += sram_len + sizeof(*dump_mem);
+
 	dump_file = vzalloc(file_len);
 	if (!dump_file) {
 		kfree(fw_error_dump);
@@ -599,16 +686,36 @@
 	if (monitor_dump_only)
 		goto dump_trans_data;
 
-	dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
-	dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
-	dump_mem = (void *)dump_data->data;
-	dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
-	dump_mem->offset = cpu_to_le32(sram_ofs);
-	iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data,
-				 sram_len);
+	if (!mvm->fw->dbg_dynamic_mem) {
+		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+		dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
+		dump_mem = (void *)dump_data->data;
+		dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
+		dump_mem->offset = cpu_to_le32(sram_ofs);
+		iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data,
+					 sram_len);
+		dump_data = iwl_fw_error_next_data(dump_data);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) {
+		if (fw_dbg_mem[i]) {
+			u32 len = le32_to_cpu(fw_dbg_mem[i]->len);
+			u32 ofs = le32_to_cpu(fw_dbg_mem[i]->ofs);
+
+			dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+			dump_data->len = cpu_to_le32(len +
+					sizeof(*dump_mem));
+			dump_mem = (void *)dump_data->data;
+			dump_mem->type = fw_dbg_mem[i]->data_type;
+			dump_mem->offset = cpu_to_le32(ofs);
+			iwl_trans_read_mem_bytes(mvm->trans, ofs,
+						 dump_mem->data,
+						 len);
+			dump_data = iwl_fw_error_next_data(dump_data);
+		}
+	}
 
 	if (smem_len) {
-		dump_data = iwl_fw_error_next_data(dump_data);
 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
 		dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
 		dump_mem = (void *)dump_data->data;
@@ -616,10 +723,10 @@
 		dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset);
 		iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset,
 					 dump_mem->data, smem_len);
+		dump_data = iwl_fw_error_next_data(dump_data);
 	}
 
 	if (sram2_len) {
-		dump_data = iwl_fw_error_next_data(dump_data);
 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
 		dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
 		dump_mem = (void *)dump_data->data;
@@ -627,11 +734,11 @@
 		dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset);
 		iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset,
 					 dump_mem->data, sram2_len);
+		dump_data = iwl_fw_error_next_data(dump_data);
 	}
 
 	if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
 	    CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) {
-		dump_data = iwl_fw_error_next_data(dump_data);
 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
 		dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN +
 					     sizeof(*dump_mem));
@@ -640,16 +747,17 @@
 		dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET);
 		iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET,
 					 dump_mem->data, IWL8260_ICCM_LEN);
+		dump_data = iwl_fw_error_next_data(dump_data);
 	}
 
 	/* Dump fw's virtual image */
-	if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) {
+	if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
+	    mvm->fw_paging_db[0].fw_paging_block) {
 		for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
 			struct iwl_fw_error_dump_paging *paging;
 			struct page *pages =
 				mvm->fw_paging_db[i].fw_paging_block;
 
-			dump_data = iwl_fw_error_next_data(dump_data);
 			dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
 			dump_data->len = cpu_to_le32(sizeof(*paging) +
 						     PAGING_BLOCK_SIZE);
@@ -657,10 +765,10 @@
 			paging->index = cpu_to_le32(i);
 			memcpy(paging->data, page_address(pages),
 			       PAGING_BLOCK_SIZE);
+			dump_data = iwl_fw_error_next_data(dump_data);
 		}
 	}
 
-	dump_data = iwl_fw_error_next_data(dump_data);
 	if (prph_len)
 		iwl_dump_prph(mvm->trans, &dump_data);
 
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 594cd0d..b70f453 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -64,6 +64,7 @@
  *
  *****************************************************************************/
 #include <net/mac80211.h>
+#include <linux/netdevice.h>
 
 #include "iwl-trans.h"
 #include "iwl-op-mode.h"
@@ -114,14 +115,18 @@
 	struct iwl_rss_config_cmd cmd = {
 		.flags = cpu_to_le32(IWL_RSS_ENABLE),
 		.hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
+			     IWL_RSS_HASH_TYPE_IPV4_UDP |
 			     IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
 			     IWL_RSS_HASH_TYPE_IPV6_TCP |
+			     IWL_RSS_HASH_TYPE_IPV6_UDP |
 			     IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
 	};
 
+	/* Do not direct RSS traffic to Q 0 which is our fallback queue */
 	for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
-		cmd.indirection_table[i] = i % mvm->trans->num_rx_queues;
-	memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key));
+		cmd.indirection_table[i] =
+			1 + (i % (mvm->trans->num_rx_queues - 1));
+	netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
 
 	return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
 }
@@ -144,9 +149,11 @@
 
 		__free_pages(mvm->fw_paging_db[i].fw_paging_block,
 			     get_order(mvm->fw_paging_db[i].fw_paging_size));
+		mvm->fw_paging_db[i].fw_paging_block = NULL;
 	}
 	kfree(mvm->trans->paging_download_buf);
 	mvm->trans->paging_download_buf = NULL;
+	mvm->trans->paging_db = NULL;
 
 	memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
 }
@@ -174,8 +181,12 @@
 		}
 	}
 
-	if (sec_idx >= IWL_UCODE_SECTION_MAX) {
-		IWL_ERR(mvm, "driver didn't find paging image\n");
+	/*
+	 * If paging is enabled there should be at least 2 more sections left
+	 * (one for CSS and one for Paging data)
+	 */
+	if (sec_idx >= ARRAY_SIZE(image->sec) - 1) {
+		IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n");
 		iwl_free_fw_paging(mvm);
 		return -EINVAL;
 	}
@@ -410,7 +421,9 @@
 		goto exit;
 	}
 
-	mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE,
+	/* Add an extra page for headers */
+	mvm->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE +
+						  FW_PAGING_SIZE,
 						  GFP_KERNEL);
 	if (!mvm->trans->paging_download_buf) {
 		ret = -ENOMEM;
@@ -641,7 +654,10 @@
 	 */
 
 	memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
-	mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
+	if (iwl_mvm_is_dqa_supported(mvm))
+		mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
+	else
+		mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
 
 	for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
 		atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
@@ -788,17 +804,22 @@
 static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
 {
 	struct iwl_host_cmd cmd = {
-		.id = SHARED_MEM_CFG,
 		.flags = CMD_WANT_SKB,
 		.data = { NULL, },
 		.len = { 0, },
 	};
-	struct iwl_rx_packet *pkt;
 	struct iwl_shared_mem_cfg *mem_cfg;
+	struct iwl_rx_packet *pkt;
 	u32 i;
 
 	lockdep_assert_held(&mvm->mutex);
 
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
+		cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
+	else
+		cmd.id = SHARED_MEM_CFG;
+
 	if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
 		return;
 
@@ -824,6 +845,25 @@
 		le32_to_cpu(mem_cfg->page_buff_addr);
 	mvm->shared_mem_cfg.page_buff_size =
 		le32_to_cpu(mem_cfg->page_buff_size);
+
+	/* new API has more data */
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
+		mvm->shared_mem_cfg.rxfifo_addr =
+			le32_to_cpu(mem_cfg->rxfifo_addr);
+		mvm->shared_mem_cfg.internal_txfifo_addr =
+			le32_to_cpu(mem_cfg->internal_txfifo_addr);
+
+		BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
+			     sizeof(mem_cfg->internal_txfifo_size));
+
+		for (i = 0;
+		     i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
+		     i++)
+			mvm->shared_mem_cfg.internal_txfifo_size[i] =
+				le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
+	}
+
 	IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
 
 	iwl_free_resp(&cmd);
@@ -942,7 +982,7 @@
 		goto error;
 
 	/* Add all the PHY contexts */
-	chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0];
+	chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0];
 	cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
 	for (i = 0; i < NUM_PHY_CTX; i++) {
 		/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index e885db3..456067b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -252,10 +252,14 @@
 		.exclude_vif = exclude_vif,
 		.used_hw_queues =
 			BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
-			BIT(mvm->aux_queue) |
-			BIT(IWL_MVM_CMD_QUEUE),
+			BIT(mvm->aux_queue),
 	};
 
+	if (iwl_mvm_is_dqa_supported(mvm))
+		data.used_hw_queues |= BIT(IWL_MVM_DQA_CMD_QUEUE);
+	else
+		data.used_hw_queues |= BIT(IWL_MVM_CMD_QUEUE);
+
 	lockdep_assert_held(&mvm->mutex);
 
 	/* mark all VIF used hw queues */
@@ -425,12 +429,17 @@
 		return 0;
 	}
 
-	/* Find available queues, and allocate them to the ACs */
+	/*
+	 * Find available queues, and allocate them to the ACs. When in
+	 * DQA-mode they aren't really used, and this is done only so the
+	 * mac80211 ieee80211_check_queues() function won't fail
+	 */
 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
 		u8 queue = find_first_zero_bit(&used_hw_queues,
 					       mvm->first_agg_queue);
 
-		if (queue >= mvm->first_agg_queue) {
+		if (!iwl_mvm_is_dqa_supported(mvm) &&
+		    queue >= mvm->first_agg_queue) {
 			IWL_ERR(mvm, "Failed to allocate queue\n");
 			ret = -EIO;
 			goto exit_fail;
@@ -442,13 +451,19 @@
 
 	/* Allocate the CAB queue for softAP and GO interfaces */
 	if (vif->type == NL80211_IFTYPE_AP) {
-		u8 queue = find_first_zero_bit(&used_hw_queues,
-					       mvm->first_agg_queue);
+		u8 queue;
 
-		if (queue >= mvm->first_agg_queue) {
-			IWL_ERR(mvm, "Failed to allocate cab queue\n");
-			ret = -EIO;
-			goto exit_fail;
+		if (!iwl_mvm_is_dqa_supported(mvm)) {
+			queue = find_first_zero_bit(&used_hw_queues,
+						    mvm->first_agg_queue);
+
+			if (queue >= mvm->first_agg_queue) {
+				IWL_ERR(mvm, "Failed to allocate cab queue\n");
+				ret = -EIO;
+				goto exit_fail;
+			}
+		} else {
+			queue = IWL_MVM_DQA_GCAST_QUEUE;
 		}
 
 		vif->cab_queue = queue;
@@ -495,6 +510,10 @@
 				      IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
 		/* fall through */
 	default:
+		/* If DQA is supported - queues will be enabled when needed */
+		if (iwl_mvm_is_dqa_supported(mvm))
+			break;
+
 		for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
 			iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac],
 					      vif->hw_queue[ac],
@@ -523,6 +542,14 @@
 				    IWL_MAX_TID_COUNT, 0);
 		/* fall through */
 	default:
+		/*
+		 * If DQA is supported - queues were already disabled, since in
+		 * DQA-mode the queues are a property of the STA and not of the
+		 * vif, and at this point the STA was already deleted
+		 */
+		if (iwl_mvm_is_dqa_supported(mvm))
+			break;
+
 		for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
 			iwl_mvm_disable_txq(mvm, vif->hw_queue[ac],
 					    vif->hw_queue[ac],
@@ -532,7 +559,7 @@
 
 static void iwl_mvm_ack_rates(struct iwl_mvm *mvm,
 			      struct ieee80211_vif *vif,
-			      enum ieee80211_band band,
+			      enum nl80211_band band,
 			      u8 *cck_rates, u8 *ofdm_rates)
 {
 	struct ieee80211_supported_band *sband;
@@ -703,7 +730,7 @@
 	rcu_read_lock();
 	chanctx = rcu_dereference(vif->chanctx_conf);
 	iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band
-					    : IEEE80211_BAND_2GHZ,
+					    : NL80211_BAND_2GHZ,
 			  &cck_ack_rates, &ofdm_ack_rates);
 	rcu_read_unlock();
 
@@ -1038,7 +1065,7 @@
 		cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
 			    RATE_MCS_ANT_POS);
 
-	if (info->band == IEEE80211_BAND_5GHZ || vif->p2p) {
+	if (info->band == NL80211_BAND_5GHZ || vif->p2p) {
 		rate = IWL_FIRST_OFDM_RATE;
 	} else {
 		rate = IWL_FIRST_CCK_RATE;
@@ -1489,7 +1516,7 @@
 	rx_status.device_timestamp = le32_to_cpu(sb->system_time);
 	rx_status.band =
 		(sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
-				IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+				NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
 	rx_status.freq =
 		ieee80211_channel_to_frequency(le16_to_cpu(sb->channel),
 					       rx_status.band);
@@ -1499,5 +1526,5 @@
 	memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
 
 	/* pass it as regular rx to mac80211 */
-	ieee80211_rx_napi(mvm->hw, skb, NULL);
+	ieee80211_rx_napi(mvm->hw, NULL, skb, NULL);
 }
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 76e649c..5ace468 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -550,18 +550,18 @@
 	else
 		mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
 
-	if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
-		hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-			&mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
-	if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) {
-		hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-			&mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
+	if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
+		hw->wiphy->bands[NL80211_BAND_2GHZ] =
+			&mvm->nvm_data->bands[NL80211_BAND_2GHZ];
+	if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) {
+		hw->wiphy->bands[NL80211_BAND_5GHZ] =
+			&mvm->nvm_data->bands[NL80211_BAND_5GHZ];
 
 		if (fw_has_capa(&mvm->fw->ucode_capa,
 				IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
 		    fw_has_api(&mvm->fw->ucode_capa,
 			       IWL_UCODE_TLV_API_LQ_SS_PARAMS))
-			hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
+			hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |=
 				IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
 	}
 
@@ -665,12 +665,13 @@
 	}
 
 	hw->netdev_features |= mvm->cfg->features;
-	if (!iwl_mvm_is_csum_supported(mvm))
-		hw->netdev_features &= ~NETIF_F_RXCSUM;
-
-	if (IWL_MVM_SW_TX_CSUM_OFFLOAD)
-		hw->netdev_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-			NETIF_F_TSO | NETIF_F_TSO6;
+	if (!iwl_mvm_is_csum_supported(mvm)) {
+		hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS |
+					 NETIF_F_RXCSUM);
+		/* We may support SW TX CSUM */
+		if (IWL_MVM_SW_TX_CSUM_OFFLOAD)
+			hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS;
+	}
 
 	ret = ieee80211_register_hw(mvm->hw);
 	if (ret)
@@ -992,6 +993,7 @@
 	iwl_mvm_reset_phy_ctxts(mvm);
 	memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
 	memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
+	memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
 	memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
 	memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
 	memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
@@ -1147,6 +1149,8 @@
 	/* the fw is stopped, the aux sta is dead: clean up driver state */
 	iwl_mvm_del_aux_sta(mvm);
 
+	iwl_free_fw_paging(mvm);
+
 	/*
 	 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
 	 * won't be called in this case).
@@ -1178,6 +1182,7 @@
 
 	flush_work(&mvm->d0i3_exit_work);
 	flush_work(&mvm->async_handlers_wk);
+	flush_work(&mvm->add_stream_wk);
 	cancel_delayed_work_sync(&mvm->fw_dump_wk);
 	iwl_mvm_free_fw_dump_desc(mvm);
 
@@ -1821,6 +1826,11 @@
 	if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
 		iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
 
+	if (changes & BSS_CHANGED_ASSOC && !bss_conf->assoc &&
+	    mvmvif->lqm_active)
+		iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_STOP_MEASUREMENT,
+				     0, 0);
+
 	/*
 	 * If we're not associated yet, take the (new) BSSID before associating
 	 * so the firmware knows. If we're already associated, then use the old
@@ -2340,7 +2350,8 @@
 		return;
 	}
 
-	if (iwlwifi_mod_params.uapsd_disable) {
+	if (!vif->p2p &&
+	    (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) {
 		vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
 		return;
 	}
@@ -2376,6 +2387,22 @@
 				    peer_addr, action);
 }
 
+static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
+					     struct iwl_mvm_sta *mvm_sta)
+{
+	struct iwl_mvm_tid_data *tid_data;
+	struct sk_buff *skb;
+	int i;
+
+	spin_lock_bh(&mvm_sta->lock);
+	for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
+		tid_data = &mvm_sta->tid_data[i];
+		while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames)))
+			ieee80211_free_txskb(mvm->hw, skb);
+	}
+	spin_unlock_bh(&mvm_sta->lock);
+}
+
 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
 				 struct ieee80211_vif *vif,
 				 struct ieee80211_sta *sta,
@@ -2396,6 +2423,33 @@
 	/* if a STA is being removed, reuse its ID */
 	flush_work(&mvm->sta_drained_wk);
 
+	/*
+	 * If we are in a STA removal flow and in DQA mode:
+	 *
+	 * This is after the sync_rcu part, so the queues have already been
+	 * flushed. No more TXs on their way in mac80211's path, and no more in
+	 * the queues.
+	 * Also, we won't be getting any new TX frames for this station.
+	 * What we might have are deferred TX frames that need to be taken care
+	 * of.
+	 *
+	 * Drop any still-queued deferred-frame before removing the STA, and
+	 * make sure the worker is no longer handling frames for this STA.
+	 */
+	if (old_state == IEEE80211_STA_NONE &&
+	    new_state == IEEE80211_STA_NOTEXIST &&
+	    iwl_mvm_is_dqa_supported(mvm)) {
+		struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+		iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
+		flush_work(&mvm->add_stream_wk);
+
+		/*
+		 * No need to make sure deferred TX indication is off since the
+		 * worker will already remove it if it was on
+		 */
+	}
+
 	mutex_lock(&mvm->mutex);
 	if (old_state == IEEE80211_STA_NOTEXIST &&
 	    new_state == IEEE80211_STA_NONE) {
@@ -2859,7 +2913,7 @@
 			cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
 		.sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
 		/* Set the channel info data */
-		.channel_info.band = (channel->band == IEEE80211_BAND_2GHZ) ?
+		.channel_info.band = (channel->band == NL80211_BAND_2GHZ) ?
 			PHY_BAND_24 : PHY_BAND_5,
 		.channel_info.channel = channel->hw_value,
 		.channel_info.width = PHY_VHT_CHANNEL_MODE20,
@@ -3628,6 +3682,11 @@
 
 		break;
 	case NL80211_IFTYPE_STATION:
+		if (mvmvif->lqm_active)
+			iwl_mvm_send_lqm_cmd(vif,
+					     LQM_CMD_OPERATION_STOP_MEASUREMENT,
+					     0, 0);
+
 		/* Schedule the time event to a bit before beacon 1,
 		 * to make sure we're in the new channel when the
 		 * GO/AP arrives.
@@ -3727,6 +3786,10 @@
 	if (!vif || vif->type != NL80211_IFTYPE_STATION)
 		return;
 
+	/* Make sure we're done with the deferred traffic before flushing */
+	if (iwl_mvm_is_dqa_supported(mvm))
+		flush_work(&mvm->add_stream_wk);
+
 	mutex_lock(&mvm->mutex);
 	mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 9abbc93..85800ba 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -208,7 +208,7 @@
 };
 
 #define IWL_CONN_MAX_LISTEN_INTERVAL	10
-#define IWL_UAPSD_MAX_SP		IEEE80211_WMM_IE_STA_QOSINFO_SP_2
+#define IWL_UAPSD_MAX_SP		IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 enum iwl_dbgfs_pm_mask {
@@ -453,6 +453,12 @@
 
 	/* TCP Checksum Offload */
 	netdev_features_t features;
+
+	/*
+	 * link quality measurement - used to check whether this interface
+	 * is in the middle of a link quality measurement
+	 */
+	bool lqm_active;
 };
 
 static inline struct iwl_mvm_vif *
@@ -602,6 +608,9 @@
 	u32 rxfifo_size[RX_FIFO_MAX_NUM];
 	u32 page_buff_addr;
 	u32 page_buff_size;
+	u32 rxfifo_addr;
+	u32 internal_txfifo_addr;
+	u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
 };
 
 struct iwl_mvm {
@@ -656,10 +665,17 @@
 		/* Map to HW queue */
 		u32 hw_queue_to_mac80211;
 		u8 hw_queue_refcount;
+		u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
+		/*
+		 * This is to mark that queue is reserved for a STA but not yet
+		 * allocated. This is needed to make sure we have at least one
+		 * available queue to use when adding a new STA
+		 */
 		bool setup_reserved;
 		u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
 	} queue_info[IWL_MAX_HW_QUEUES];
 	spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
+	struct work_struct add_stream_wk; /* To add streams to queues */
 	atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
 
 	const char *nvm_file_name;
@@ -679,11 +695,11 @@
 	struct iwl_rx_phy_info last_phy_info;
 	struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
 	struct work_struct sta_drained_wk;
+	unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
 	unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
 	atomic_t pending_frames[IWL_MVM_STATION_COUNT];
 	u32 tfd_drained[IWL_MVM_STATION_COUNT];
 	u8 rx_ba_sessions;
-	u32 secret_key[IWL_RSS_HASH_KEY_CNT];
 
 	/* configured by mac80211 */
 	u32 rts_threshold;
@@ -694,6 +710,7 @@
 	struct iwl_mcast_filter_cmd *mcast_filter_cmd;
 	enum iwl_mvm_scan_type scan_type;
 	enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all;
+	struct timer_list scan_timer;
 
 	/* max number of simultaneous scans the FW supports */
 	unsigned int max_scans;
@@ -1063,7 +1080,8 @@
 {
 	return fw_has_capa(&mvm->fw->ucode_capa,
 			   IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD) &&
-		IWL_MVM_P2P_UAPSD_STANDALONE;
+		!(iwlwifi_mod_params.uapsd_disable &
+		  IWL_DISABLE_UAPSD_P2P_CLIENT);
 }
 
 static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
@@ -1115,9 +1133,9 @@
 
 /* Utils */
 int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
-					enum ieee80211_band band);
+					enum nl80211_band band);
 void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
-			       enum ieee80211_band band,
+			       enum nl80211_band band,
 			       struct ieee80211_tx_rate *r);
 u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
 void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
@@ -1297,6 +1315,7 @@
 int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
 int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
 void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
+void iwl_mvm_scan_timeout(unsigned long data);
 
 /* Scheduled scan */
 void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
@@ -1449,26 +1468,10 @@
 bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant);
 bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm);
 bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
-				    enum ieee80211_band band);
+				    enum nl80211_band band);
 u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
 			   struct ieee80211_tx_info *info, u8 ac);
 
-bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm);
-void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm);
-int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm);
-void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
-				  struct iwl_rx_cmd_buffer *rxb);
-void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-			       enum ieee80211_rssi_event_data);
-u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
-				    struct ieee80211_sta *sta);
-bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
-					 struct ieee80211_sta *sta);
-bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
-					enum ieee80211_band band);
-void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
-				       struct iwl_rx_cmd_buffer *rxb);
-
 /* beacon filtering */
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 void
@@ -1634,4 +1637,10 @@
 void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 			     const char *errmsg);
 
+/* Link Quality Measurement */
+int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
+			 enum iwl_lqm_cmd_operatrions operation,
+			 u32 duration, u32 timeout);
+bool iwl_mvm_lqm_active(struct iwl_mvm *mvm);
+
 #endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 5e8ab79..8bfb8e0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -292,7 +292,7 @@
 	RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
 		   RX_HANDLER_ASYNC_LOCKED),
 	RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
-		       iwl_mvm_temp_notif, RX_HANDLER_ASYNC_LOCKED),
+		       iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED),
 	RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
 		       iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC),
 
@@ -421,6 +421,21 @@
 /* Please keep this array *SORTED* by hex value.
  * Access is done through binary search
  */
+static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
+	HCMD_NAME(SHARED_MEM_CFG_CMD),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
+	HCMD_NAME(LINK_QUALITY_MEASUREMENT_CMD),
+	HCMD_NAME(LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
 static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
 	HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
 	HCMD_NAME(CTDP_CONFIG_CMD),
@@ -449,6 +464,8 @@
 static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
 	[LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
 	[LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
+	[SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names),
+	[MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names),
 	[PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
 	[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
 	[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
@@ -562,6 +579,7 @@
 	INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
 	INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk);
 	INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
+	INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
 
 	spin_lock_init(&mvm->d0i3_tx_lock);
 	spin_lock_init(&mvm->refs_lock);
@@ -601,7 +619,10 @@
 	trans_cfg.command_groups = iwl_mvm_groups;
 	trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
 
-	trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
+	if (iwl_mvm_is_dqa_supported(mvm))
+		trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
+	else
+		trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
 	trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
 	trans_cfg.scd_set_active = true;
 
@@ -707,8 +728,8 @@
 
 	iwl_mvm_tof_init(mvm);
 
-	/* init RSS hash key */
-	get_random_bytes(mvm->secret_key, sizeof(mvm->secret_key));
+	setup_timer(&mvm->scan_timer, iwl_mvm_scan_timeout,
+		    (unsigned long)mvm);
 
 	return op_mode;
 
@@ -761,10 +782,13 @@
 	for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
 		kfree(mvm->nvm_sections[i].data);
 
-	iwl_free_fw_paging(mvm);
-
 	iwl_mvm_tof_clean(mvm);
 
+	del_timer_sync(&mvm->scan_timer);
+
+	mutex_destroy(&mvm->mutex);
+	mutex_destroy(&mvm->d0i3_suspend_mutex);
+
 	ieee80211_free_hw(mvm->hw);
 }
 
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
index 6e6a56f..9513883 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -147,7 +147,7 @@
 	u8 active_cnt, idle_cnt;
 
 	/* Set the channel info data */
-	cmd->ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ?
+	cmd->ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ?
 	      PHY_BAND_24 : PHY_BAND_5);
 
 	cmd->ci.channel = chandef->chan->hw_value;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index f313910..7b1f6ad 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -227,7 +227,7 @@
 			cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
 	}
 
-	cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
+	cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len;
 
 	if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags &
 	    cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 61d0a8cd..81dd2f6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -829,7 +829,7 @@
 
 /* Convert a ucode rate into an rs_rate object */
 static int rs_rate_from_ucode_rate(const u32 ucode_rate,
-				   enum ieee80211_band band,
+				   enum nl80211_band band,
 				   struct rs_rate *rate)
 {
 	u32 ant_msk = ucode_rate & RATE_MCS_ANT_ABC_MSK;
@@ -848,7 +848,7 @@
 	if (!(ucode_rate & RATE_MCS_HT_MSK) &&
 	    !(ucode_rate & RATE_MCS_VHT_MSK)) {
 		if (num_of_ant == 1) {
-			if (band == IEEE80211_BAND_5GHZ)
+			if (band == NL80211_BAND_5GHZ)
 				rate->type = LQ_LEGACY_A;
 			else
 				rate->type = LQ_LEGACY_G;
@@ -1043,7 +1043,7 @@
 		return;
 	} else if (is_siso(rate)) {
 		/* Downgrade to Legacy if we were in SISO */
-		if (lq_sta->band == IEEE80211_BAND_5GHZ)
+		if (lq_sta->band == NL80211_BAND_5GHZ)
 			rate->type = LQ_LEGACY_A;
 		else
 			rate->type = LQ_LEGACY_G;
@@ -1850,7 +1850,7 @@
 	rate->ant = column->ant;
 
 	if (column->mode == RS_LEGACY) {
-		if (lq_sta->band == IEEE80211_BAND_5GHZ)
+		if (lq_sta->band == NL80211_BAND_5GHZ)
 			rate->type = LQ_LEGACY_A;
 		else
 			rate->type = LQ_LEGACY_G;
@@ -2020,7 +2020,7 @@
 }
 
 static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-			   struct rs_rate *rate, enum ieee80211_band band)
+			   struct rs_rate *rate, enum nl80211_band band)
 {
 	int index = rate->index;
 	bool cam = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM);
@@ -2126,7 +2126,7 @@
 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
 	struct ieee80211_vif *vif = mvm_sta->vif;
 	struct ieee80211_chanctx_conf *chanctx_conf;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	struct iwl_rate_scale_data *window;
 	struct rs_rate *rate = &tbl->rate;
 	enum tpc_action action;
@@ -2148,7 +2148,7 @@
 	rcu_read_lock();
 	chanctx_conf = rcu_dereference(vif->chanctx_conf);
 	if (WARN_ON(!chanctx_conf))
-		band = IEEE80211_NUM_BANDS;
+		band = NUM_NL80211_BANDS;
 	else
 		band = chanctx_conf->def.chan->band;
 	rcu_read_unlock();
@@ -2606,7 +2606,7 @@
 		rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
 	else if (lq_sta->max_siso_rate_idx != IWL_RATE_INVALID)
 		rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
-	else if (lq_sta->band == IEEE80211_BAND_5GHZ)
+	else if (lq_sta->band == NL80211_BAND_5GHZ)
 		rate->type = LQ_LEGACY_A;
 	else
 		rate->type = LQ_LEGACY_G;
@@ -2623,7 +2623,7 @@
 	} else {
 		lq_sta->optimal_rate_mask = lq_sta->active_legacy_rate;
 
-		if (lq_sta->band == IEEE80211_BAND_5GHZ) {
+		if (lq_sta->band == NL80211_BAND_5GHZ) {
 			lq_sta->optimal_rates = rs_optimal_rates_5ghz_legacy;
 			lq_sta->optimal_nentries =
 				ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
@@ -2679,7 +2679,7 @@
 static void rs_get_initial_rate(struct iwl_mvm *mvm,
 				struct ieee80211_sta *sta,
 				struct iwl_lq_sta *lq_sta,
-				enum ieee80211_band band,
+				enum nl80211_band band,
 				struct rs_rate *rate)
 {
 	int i, nentries;
@@ -2714,7 +2714,7 @@
 	rate->index = find_first_bit(&lq_sta->active_legacy_rate,
 				     BITS_PER_LONG);
 
-	if (band == IEEE80211_BAND_5GHZ) {
+	if (band == NL80211_BAND_5GHZ) {
 		rate->type = LQ_LEGACY_A;
 		initial_rates = rs_optimal_rates_5ghz_legacy;
 		nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
@@ -2814,7 +2814,7 @@
 static void rs_initialize_lq(struct iwl_mvm *mvm,
 			     struct ieee80211_sta *sta,
 			     struct iwl_lq_sta *lq_sta,
-			     enum ieee80211_band band,
+			     enum nl80211_band band,
 			     bool init)
 {
 	struct iwl_scale_tbl_info *tbl;
@@ -3097,7 +3097,7 @@
  * Called after adding a new station to initialize rate scaling
  */
 void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-			  enum ieee80211_band band, bool init)
+			  enum nl80211_band band, bool init)
 {
 	int i, j;
 	struct ieee80211_hw *hw = mvm->hw;
@@ -3203,7 +3203,7 @@
 #ifdef CONFIG_MAC80211_DEBUGFS
 static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
 					    struct iwl_lq_cmd *lq_cmd,
-					    enum ieee80211_band band,
+					    enum nl80211_band band,
 					    u32 ucode_rate)
 {
 	struct rs_rate rate;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index bdb6f2d..90d046f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -305,7 +305,7 @@
 	bool stbc_capable;      /* Tx STBC is supported by chip and Rx by STA */
 	bool bfer_capable;      /* Remote supports beamformee and we BFer */
 
-	enum ieee80211_band band;
+	enum nl80211_band band;
 
 	/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
 	unsigned long active_legacy_rate;
@@ -358,7 +358,7 @@
 
 /* Initialize station's rate scaling information after adding station */
 void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-			  enum ieee80211_band band, bool init);
+			  enum nl80211_band band, bool init);
 
 /* Notify RS about Tx status */
 void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 485cfc1..263e8a8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -131,7 +131,7 @@
 				fraglen, rxb->truesize);
 	}
 
-	ieee80211_rx_napi(mvm->hw, skb, napi);
+	ieee80211_rx_napi(mvm->hw, NULL, skb, napi);
 }
 
 /*
@@ -319,7 +319,7 @@
 	rx_status->device_timestamp = le32_to_cpu(phy_info->system_timestamp);
 	rx_status->band =
 		(phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
-				IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+				NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
 	rx_status->freq =
 		ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel),
 					       rx_status->band);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 9a54f2d..651604d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -210,7 +210,7 @@
 	if (iwl_mvm_check_pn(mvm, skb, queue, sta))
 		kfree_skb(skb);
 	else
-		ieee80211_rx_napi(mvm->hw, skb, napi);
+		ieee80211_rx_napi(mvm->hw, NULL, skb, napi);
 }
 
 static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
@@ -294,10 +294,15 @@
 {
 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+	u16 flags = le16_to_cpu(desc->l3l4_flags);
+	u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >>
+			  IWL_RX_L3_PROTO_POS);
 
 	if (mvmvif->features & NETIF_F_RXCSUM &&
-	    desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_IP_HDR_CSUM_OK) &&
-	    desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_TCP_UDP_CSUM_OK))
+	    flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK &&
+	    (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK ||
+	     l3_prot == IWL_RX_L3_TYPE_IPV6 ||
+	     l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG))
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 }
 
@@ -451,8 +456,8 @@
 
 	rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise);
 	rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise);
-	rx_status->band = desc->channel > 14 ? IEEE80211_BAND_5GHZ :
-					       IEEE80211_BAND_2GHZ;
+	rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ :
+					       NL80211_BAND_2GHZ;
 	rx_status->freq = ieee80211_channel_to_frequency(desc->channel,
 							 rx_status->band);
 	iwl_mvm_get_signal_strength(mvm, desc, rx_status);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 09eb72c..6f609dd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -70,6 +70,7 @@
 
 #include "mvm.h"
 #include "fw-api-scan.h"
+#include "iwl-io.h"
 
 #define IWL_DENSE_EBS_SCAN_RATIO 5
 #define IWL_SPARSE_EBS_SCAN_RATIO 1
@@ -162,16 +163,16 @@
 	return cpu_to_le16(rx_chain);
 }
 
-static __le32 iwl_mvm_scan_rxon_flags(enum ieee80211_band band)
+static __le32 iwl_mvm_scan_rxon_flags(enum nl80211_band band)
 {
-	if (band == IEEE80211_BAND_2GHZ)
+	if (band == NL80211_BAND_2GHZ)
 		return cpu_to_le32(PHY_BAND_24);
 	else
 		return cpu_to_le32(PHY_BAND_5);
 }
 
 static inline __le32
-iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
+iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
 			  bool no_cck)
 {
 	u32 tx_ant;
@@ -181,7 +182,7 @@
 				     mvm->scan_last_antenna_idx);
 	tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
 
-	if (band == IEEE80211_BAND_2GHZ && !no_cck)
+	if (band == NL80211_BAND_2GHZ && !no_cck)
 		return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK |
 				   tx_ant);
 	else
@@ -398,6 +399,10 @@
 		ieee80211_scan_completed(mvm->hw,
 				scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
 		iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+		del_timer(&mvm->scan_timer);
+	} else {
+		IWL_ERR(mvm,
+			"got scan complete notification but no scan is running\n");
 	}
 
 	mvm->last_ebs_successful =
@@ -586,14 +591,14 @@
 	tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
 					 TX_CMD_FLG_BT_DIS);
 	tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
-							   IEEE80211_BAND_2GHZ,
+							   NL80211_BAND_2GHZ,
 							   no_cck);
 	tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
 
 	tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
 					 TX_CMD_FLG_BT_DIS);
 	tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
-							   IEEE80211_BAND_5GHZ,
+							   NL80211_BAND_5GHZ,
 							   no_cck);
 	tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
 }
@@ -690,19 +695,19 @@
 
 	/* Insert ds parameter set element on 2.4 GHz band */
 	newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
-						 ies->ies[IEEE80211_BAND_2GHZ],
-						 ies->len[IEEE80211_BAND_2GHZ],
+						 ies->ies[NL80211_BAND_2GHZ],
+						 ies->len[NL80211_BAND_2GHZ],
 						 pos);
 	params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf);
 	params->preq.band_data[0].len = cpu_to_le16(newpos - pos);
 	pos = newpos;
 
-	memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ],
-	       ies->len[IEEE80211_BAND_5GHZ]);
+	memcpy(pos, ies->ies[NL80211_BAND_5GHZ],
+	       ies->len[NL80211_BAND_5GHZ]);
 	params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf);
 	params->preq.band_data[1].len =
-		cpu_to_le16(ies->len[IEEE80211_BAND_5GHZ]);
-	pos += ies->len[IEEE80211_BAND_5GHZ];
+		cpu_to_le16(ies->len[NL80211_BAND_5GHZ]);
+	pos += ies->len[NL80211_BAND_5GHZ];
 
 	memcpy(pos, ies->common_ies, ies->common_ie_len);
 	params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
@@ -916,10 +921,10 @@
 	unsigned int rates = 0;
 	int i;
 
-	band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
+	band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
 	for (i = 0; i < band->n_bitrates; i++)
 		rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
-	band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
+	band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
 	for (i = 0; i < band->n_bitrates; i++)
 		rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
 
@@ -934,8 +939,8 @@
 	struct iwl_scan_config *scan_config;
 	struct ieee80211_supported_band *band;
 	int num_channels =
-		mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
-		mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
+		mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels +
+		mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels;
 	int ret, i, j = 0, cmd_size;
 	struct iwl_host_cmd cmd = {
 		.id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
@@ -961,6 +966,7 @@
 					 SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
 					 SCAN_CONFIG_FLAG_SET_TX_CHAINS |
 					 SCAN_CONFIG_FLAG_SET_RX_CHAINS |
+					 SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
 					 SCAN_CONFIG_FLAG_SET_ALL_TIMES |
 					 SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
 					 SCAN_CONFIG_FLAG_SET_MAC_ADDR |
@@ -988,10 +994,10 @@
 				     IWL_CHANNEL_FLAG_EBS_ADD |
 				     IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
 
-	band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
+	band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
 	for (i = 0; i < band->n_channels; i++, j++)
 		scan_config->channel_array[j] = band->channels[i].hw_value;
-	band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
+	band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
 	for (i = 0; i < band->n_channels; i++, j++)
 		scan_config->channel_array[j] = band->channels[i].hw_value;
 
@@ -1216,6 +1222,18 @@
 	return -EIO;
 }
 
+#define SCAN_TIMEOUT (16 * HZ)
+
+void iwl_mvm_scan_timeout(unsigned long data)
+{
+	struct iwl_mvm *mvm = (struct iwl_mvm *)data;
+
+	IWL_ERR(mvm, "regular scan timed out\n");
+
+	del_timer(&mvm->scan_timer);
+	iwl_force_nmi(mvm->trans);
+}
+
 int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 			   struct cfg80211_scan_request *req,
 			   struct ieee80211_scan_ies *ies)
@@ -1295,6 +1313,8 @@
 	mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
 	iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
 
+	mod_timer(&mvm->scan_timer, jiffies + SCAN_TIMEOUT);
+
 	return 0;
 }
 
@@ -1412,6 +1432,7 @@
 	if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
 		ieee80211_scan_completed(mvm->hw, aborted);
 		iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+		del_timer(&mvm->scan_timer);
 	} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
 		ieee80211_sched_scan_stopped(mvm->hw);
 		mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
@@ -1607,6 +1628,7 @@
 		 * to release the scan reference here.
 		 */
 		iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+		del_timer(&mvm->scan_timer);
 		if (notify)
 			ieee80211_scan_completed(mvm->hw, true);
 	} else if (notify) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
index c2def12..443a428 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
@@ -193,7 +193,7 @@
 		}
 	}
 
-	if (sta || IWL_UCODE_API(mvm->fw->ucode_ver) < 13) {
+	if (sta) {
 		BUILD_BUG_ON(sizeof(sf_full_timeout) !=
 			     sizeof(__le32) * SF_NUM_SCENARIO *
 			     SF_NUM_TIMEOUT_TYPES);
@@ -220,9 +220,6 @@
 	struct ieee80211_sta *sta;
 	int ret = 0;
 
-	if (IWL_UCODE_API(mvm->fw->ucode_ver) < 13)
-		sf_cmd.state = cpu_to_le32(new_state);
-
 	if (mvm->cfg->disable_dummy_notification)
 		sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF);
 
@@ -235,8 +232,7 @@
 
 	switch (new_state) {
 	case SF_UNINIT:
-		if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 13)
-			iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
+		iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
 		break;
 	case SF_FULL_ON:
 		if (sta_id == IWL_MVM_STATION_COUNT) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index ef99942..12614b7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -111,7 +111,7 @@
 
 /* send station add/update command to firmware */
 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-			   bool update)
+			   bool update, unsigned int flags)
 {
 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
 	struct iwl_mvm_add_sta_cmd add_sta_cmd = {
@@ -126,9 +126,12 @@
 	u32 status;
 	u32 agg_size = 0, mpdu_dens = 0;
 
-	if (!update) {
+	if (!update || (flags & STA_MODIFY_QUEUES)) {
 		add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
 		memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
+
+		if (flags & STA_MODIFY_QUEUES)
+			add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
 	}
 
 	switch (sta->bandwidth) {
@@ -274,6 +277,211 @@
 		iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
 }
 
+static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
+				   struct ieee80211_sta *sta, u8 ac, int tid,
+				   struct ieee80211_hdr *hdr)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_trans_txq_scd_cfg cfg = {
+		.fifo = iwl_mvm_ac_to_tx_fifo[ac],
+		.sta_id = mvmsta->sta_id,
+		.tid = tid,
+		.frame_limit = IWL_FRAME_LIMIT,
+	};
+	unsigned int wdg_timeout =
+		iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+	u8 mac_queue = mvmsta->vif->hw_queue[ac];
+	int queue = -1;
+	int ssn;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	spin_lock_bh(&mvm->queue_info_lock);
+
+	/*
+	 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
+	 * exists
+	 */
+	if (!ieee80211_is_data_qos(hdr->frame_control) ||
+	    ieee80211_is_qos_nullfunc(hdr->frame_control)) {
+		queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_MGMT_QUEUE,
+						IWL_MVM_DQA_MAX_MGMT_QUEUE);
+		if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
+			IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
+					    queue);
+
+		/* If no such queue is found, we'll use a DATA queue instead */
+	}
+
+	if (queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
+		queue = mvmsta->reserved_queue;
+		IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
+	}
+
+	if (queue < 0)
+		queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
+						IWL_MVM_DQA_MAX_DATA_QUEUE);
+	if (queue >= 0)
+		mvm->queue_info[queue].setup_reserved = false;
+
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+	/* TODO: support shared queues for same RA */
+	if (queue < 0)
+		return -ENOSPC;
+
+	/*
+	 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
+	 * but for configuring the SCD to send A-MPDUs we need to mark the queue
+	 * as aggregatable.
+	 * Mark all DATA queues as allowing to be aggregated at some point
+	 */
+	cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+			 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
+
+	IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue #%d to sta %d on tid %d\n",
+			    queue, mvmsta->sta_id, tid);
+
+	ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+	iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg,
+			   wdg_timeout);
+
+	spin_lock_bh(&mvmsta->lock);
+	mvmsta->tid_data[tid].txq_id = queue;
+	mvmsta->tfd_queue_msk |= BIT(queue);
+
+	if (mvmsta->reserved_queue == queue)
+		mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
+	spin_unlock_bh(&mvmsta->lock);
+
+	return iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
+}
+
+static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
+{
+	if (tid == IWL_MAX_TID_COUNT)
+		return IEEE80211_AC_VO; /* MGMT */
+
+	return tid_to_mac80211_ac[tid];
+}
+
+static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
+				       struct ieee80211_sta *sta, int tid)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+	struct sk_buff *skb;
+	struct ieee80211_hdr *hdr;
+	struct sk_buff_head deferred_tx;
+	u8 mac_queue;
+	bool no_queue = false; /* Marks if there is a problem with the queue */
+	u8 ac;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	skb = skb_peek(&tid_data->deferred_tx_frames);
+	if (!skb)
+		return;
+	hdr = (void *)skb->data;
+
+	ac = iwl_mvm_tid_to_ac_queue(tid);
+	mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
+
+	if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE &&
+	    iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
+		IWL_ERR(mvm,
+			"Can't alloc TXQ for sta %d tid %d - dropping frame\n",
+			mvmsta->sta_id, tid);
+
+		/*
+		 * Mark queue as problematic so later the deferred traffic is
+		 * freed, as we can do nothing with it
+		 */
+		no_queue = true;
+	}
+
+	__skb_queue_head_init(&deferred_tx);
+
+	/* Disable bottom-halves when entering TX path */
+	local_bh_disable();
+	spin_lock(&mvmsta->lock);
+	skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
+	spin_unlock(&mvmsta->lock);
+
+	while ((skb = __skb_dequeue(&deferred_tx)))
+		if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
+			ieee80211_free_txskb(mvm->hw, skb);
+	local_bh_enable();
+
+	/* Wake queue */
+	iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
+}
+
+void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
+{
+	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
+					   add_stream_wk);
+	struct ieee80211_sta *sta;
+	struct iwl_mvm_sta *mvmsta;
+	unsigned long deferred_tid_traffic;
+	int sta_id, tid;
+
+	mutex_lock(&mvm->mutex);
+
+	/* Go over all stations with deferred traffic */
+	for_each_set_bit(sta_id, mvm->sta_deferred_frames,
+			 IWL_MVM_STATION_COUNT) {
+		clear_bit(sta_id, mvm->sta_deferred_frames);
+		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+						lockdep_is_held(&mvm->mutex));
+		if (IS_ERR_OR_NULL(sta))
+			continue;
+
+		mvmsta = iwl_mvm_sta_from_mac80211(sta);
+		deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
+
+		for_each_set_bit(tid, &deferred_tid_traffic,
+				 IWL_MAX_TID_COUNT + 1)
+			iwl_mvm_tx_deferred_stream(mvm, sta, tid);
+	}
+
+	mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
+				      struct ieee80211_sta *sta,
+				      enum nl80211_iftype vif_type)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	int queue;
+
+	spin_lock_bh(&mvm->queue_info_lock);
+
+	/* Make sure we have free resources for this STA */
+	if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
+	    !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
+	    !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].setup_reserved)
+		queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
+	else
+		queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
+						IWL_MVM_DQA_MAX_DATA_QUEUE);
+	if (queue < 0) {
+		spin_unlock_bh(&mvm->queue_info_lock);
+		IWL_ERR(mvm, "No available queues for new station\n");
+		return -ENOSPC;
+	}
+	mvm->queue_info[queue].setup_reserved = true;
+
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+	mvmsta->reserved_queue = queue;
+
+	IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
+			    queue, mvmsta->sta_id);
+
+	return 0;
+}
+
 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
 		    struct ieee80211_vif *vif,
 		    struct ieee80211_sta *sta)
@@ -314,18 +522,29 @@
 		ret = iwl_mvm_tdls_sta_init(mvm, sta);
 		if (ret)
 			return ret;
-	} else {
+	} else if (!iwl_mvm_is_dqa_supported(mvm)) {
 		for (i = 0; i < IEEE80211_NUM_ACS; i++)
 			if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
 				mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
 	}
 
 	/* for HW restart - reset everything but the sequence number */
-	for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+	for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
 		u16 seq = mvm_sta->tid_data[i].seq_number;
 		memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
 		mvm_sta->tid_data[i].seq_number = seq;
+
+		if (!iwl_mvm_is_dqa_supported(mvm))
+			continue;
+
+		/*
+		 * Mark all queues for this STA as unallocated and defer TX
+		 * frames until the queue is allocated
+		 */
+		mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
+		skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
 	}
+	mvm_sta->deferred_traffic_tid_map = 0;
 	mvm_sta->agg_tids = 0;
 
 	if (iwl_mvm_has_new_rx_api(mvm) &&
@@ -338,7 +557,14 @@
 		mvm_sta->dup_data = dup_data;
 	}
 
-	ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
+	if (iwl_mvm_is_dqa_supported(mvm)) {
+		ret = iwl_mvm_reserve_sta_stream(mvm, sta,
+						 ieee80211_vif_type_p2p(vif));
+		if (ret)
+			goto err;
+	}
+
+	ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
 	if (ret)
 		goto err;
 
@@ -364,7 +590,7 @@
 		       struct ieee80211_vif *vif,
 		       struct ieee80211_sta *sta)
 {
-	return iwl_mvm_sta_send_to_fw(mvm, sta, true);
+	return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0);
 }
 
 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
@@ -509,6 +735,26 @@
 	mutex_unlock(&mvm->mutex);
 }
 
+static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
+				       struct ieee80211_vif *vif,
+				       struct iwl_mvm_sta *mvm_sta)
+{
+	int ac;
+	int i;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
+		if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE)
+			continue;
+
+		ac = iwl_mvm_tid_to_ac_queue(i);
+		iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
+				    vif->hw_queue[ac], i, 0);
+		mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
+	}
+}
+
 int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
 		   struct ieee80211_vif *vif,
 		   struct ieee80211_sta *sta)
@@ -537,6 +783,10 @@
 			return ret;
 		ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
 
+		/* If DQA is supported - the queues can be disabled now */
+		if (iwl_mvm_is_dqa_supported(mvm))
+			iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
+
 		/* if we are associated - we can't remove the AP STA now */
 		if (vif->bss_conf.assoc)
 			return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 1a8f69a..e3efdcd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015        Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015        Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -80,6 +80,60 @@
 struct iwl_mvm_vif;
 
 /**
+ * DOC: DQA - Dynamic Queue Allocation -introduction
+ *
+ * Dynamic Queue Allocation (AKA "DQA") is a feature implemented in iwlwifi
+ * driver to allow dynamic allocation of queues on-demand, rather than allocate
+ * them statically ahead of time. Ideally, we would like to allocate one queue
+ * per RA/TID, thus allowing an AP - for example - to send BE traffic to STA2
+ * even if it also needs to send traffic to a sleeping STA1, without being
+ * blocked by the sleeping station.
+ *
+ * Although the queues in DQA mode are dynamically allocated, there are still
+ * some queues that are statically allocated:
+ *	TXQ #0 - command queue
+ *	TXQ #1 - aux frames
+ *	TXQ #2 - P2P device frames
+ *	TXQ #3 - P2P GO/SoftAP GCAST/BCAST frames
+ *	TXQ #4 - BSS DATA frames queue
+ *	TXQ #5-8 - Non-QoS and MGMT frames queue pool
+ *	TXQ #9 - P2P GO/SoftAP probe responses
+ *	TXQ #10-31 - DATA frames queue pool
+ * The queues are dynamically taken from either the MGMT frames queue pool or
+ * the DATA frames one. See the %iwl_mvm_dqa_txq for more information on every
+ * queue.
+ *
+ * When a frame for a previously unseen RA/TID comes in, it needs to be deferred
+ * until a queue is allocated for it, and only then can be TXed. Therefore, it
+ * is placed into %iwl_mvm_tid_data.deferred_tx_frames, and a worker called
+ * %mvm->add_stream_wk later allocates the queues and TXes the deferred frames.
+ *
+ * For convenience, MGMT is considered as if it has TID=8, and go to the MGMT
+ * queues in the pool. If there is no longer a free MGMT queue to allocate, a
+ * queue will be allocated from the DATA pool instead. Since QoS NDPs can create
+ * a problem for aggregations, they too will use a MGMT queue.
+ *
+ * When adding a STA, a DATA queue is reserved for it so that it can TX from
+ * it. If no such free queue exists for reserving, the STA addition will fail.
+ *
+ * If the DATA queue pool gets exhausted, no new STA will be accepted, and if a
+ * new RA/TID comes in for an existing STA, one of the STA's queues will become
+ * shared and will serve more than the single TID (but always for the same RA!).
+ *
+ * When a RA/TID needs to become aggregated, no new queue is required to be
+ * allocated, only mark the queue as aggregated via the ADD_STA command. Note,
+ * however, that a shared queue cannot be aggregated, and only after the other
+ * TIDs become inactive and are removed - only then can the queue be
+ * reconfigured and become aggregated.
+ *
+ * When removing a station, its queues are returned to the pool for reuse. Here
+ * we also need to make sure that we are synced with the worker thread that TXes
+ * the deferred frames so we don't get into a situation where the queues are
+ * removed and then the worker puts deferred frames onto the released queues or
+ * tries to allocate new queues for a STA we don't need anymore.
+ */
+
+/**
  * DOC: station table - introduction
  *
  * The station table is a list of data structure that reprensent the stations.
@@ -253,6 +307,7 @@
 
 /**
  * struct iwl_mvm_tid_data - holds the states for each RA / TID
+ * @deferred_tx_frames: deferred TX frames for this RA/TID
  * @seq_number: the next WiFi sequence number to use
  * @next_reclaimed: the WiFi sequence number of the next packet to be acked.
  *	This is basically (last acked packet++).
@@ -260,7 +315,7 @@
  *	Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
  * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed.
  * @state: state of the BA agreement establishment / tear down.
- * @txq_id: Tx queue used by the BA session
+ * @txq_id: Tx queue used by the BA session / DQA
  * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
  *	the first packet to be sent in legacy HW queue in Tx AGG stop flow.
  *	Basically when next_reclaimed reaches ssn, we can tell mac80211 that
@@ -268,6 +323,7 @@
  * @tx_time: medium time consumed by this A-MPDU
  */
 struct iwl_mvm_tid_data {
+	struct sk_buff_head deferred_tx_frames;
 	u16 seq_number;
 	u16 next_reclaimed;
 	/* The rest is Tx AGG related */
@@ -316,7 +372,10 @@
  *	we need to signal the EOSP
  * @lock: lock to protect the whole struct. Since %tid_data is access from Tx
  * and from Tx response flow, it needs a spinlock.
- * @tid_data: per tid data. Look at %iwl_mvm_tid_data.
+ * @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data.
+ * @reserved_queue: the queue reserved for this STA for DQA purposes
+ *	Every STA has is given one reserved queue to allow it to operate. If no
+ *	such queue can be guaranteed, the STA addition will fail.
  * @tx_protection: reference counter for controlling the Tx protection.
  * @tt_tx_protection: is thermal throttling enable Tx protection?
  * @disable_tx: is tx to this STA disabled?
@@ -329,6 +388,7 @@
  *	the BA window. To be used for UAPSD only.
  * @ptk_pn: per-queue PTK PN data structures
  * @dup_data: per queue duplicate packet detection data
+ * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID
  *
  * When mac80211 creates a station it reserves some space (hw->sta_data_size)
  * in the structure for use by driver. This structure is placed in that
@@ -345,12 +405,16 @@
 	bool bt_reduced_txpower;
 	bool next_status_eosp;
 	spinlock_t lock;
-	struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT];
+	struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1];
 	struct iwl_lq_sta lq_sta;
 	struct ieee80211_vif *vif;
 	struct iwl_mvm_key_pn __rcu *ptk_pn[4];
 	struct iwl_mvm_rxq_dup_data *dup_data;
 
+	u16 deferred_traffic_tid_map;
+
+	u8 reserved_queue;
+
 	/* Temporary, until the new TLC will control the Tx protection */
 	s8 tx_protection;
 	bool tt_tx_protection;
@@ -378,8 +442,18 @@
 	u32 tfd_queue_msk;
 };
 
+/**
+ * Send the STA info to the FW.
+ *
+ * @mvm: the iwl_mvm* to use
+ * @sta: the STA
+ * @update: this is true if the FW is being updated about a STA it already knows
+ *	about. Otherwise (if this is a new STA), this should be false.
+ * @flags: if update==true, this marks what is being changed via ORs of values
+ *	from enum iwl_sta_modify_flag. Otherwise, this is ignored.
+ */
 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-			   bool update);
+			   bool update, unsigned int flags);
 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
 		    struct ieee80211_vif *vif,
 		    struct ieee80211_sta *sta);
@@ -459,5 +533,6 @@
 				       struct iwl_mvm_vif *mvmvif,
 				       bool disable);
 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
 
 #endif /* __sta_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
index 18711c5..9f160fc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -444,7 +444,7 @@
 	}
 
 	if (chandef) {
-		cmd.ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ?
+		cmd.ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ?
 			       PHY_BAND_24 : PHY_BAND_5);
 		cmd.ci.channel = chandef->chan->hw_value;
 		cmd.ci.width = iwl_mvm_get_channel_width(chandef);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index f1f2825..eb3f460 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -204,20 +204,11 @@
 	if (WARN_ON(ths_crossed >= IWL_MAX_DTS_TRIPS))
 		return;
 
-	/*
-	 * We are now handling a temperature notification from the firmware
-	 * in ASYNC and hold the mutex. thermal_notify_framework will call
-	 * us back through get_temp() which ought to send a SYNC command to
-	 * the firmware and hence to take the mutex.
-	 * Avoid the deadlock by unlocking the mutex here.
-	 */
 	if (mvm->tz_device.tzone) {
 		struct iwl_mvm_thermal_device *tz_dev = &mvm->tz_device;
 
-		mutex_unlock(&mvm->mutex);
 		thermal_notify_framework(tz_dev->tzone,
 					 tz_dev->fw_trips_index[ths_crossed]);
-		mutex_lock(&mvm->mutex);
 	}
 #endif /* CONFIG_THERMAL */
 }
@@ -796,9 +787,6 @@
 {
 	struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
 
-	if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
-		return -EBUSY;
-
 	*state = mvm->cooling_dev.cur_state;
 
 	return 0;
@@ -813,9 +801,6 @@
 	if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR))
 		return -EIO;
 
-	if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
-		return -EBUSY;
-
 	mutex_lock(&mvm->mutex);
 
 	if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 75870e6..c53aa0f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -67,6 +67,7 @@
 #include <linux/etherdevice.h>
 #include <linux/tcp.h>
 #include <net/ip.h>
+#include <net/ipv6.h>
 
 #include "iwl-trans.h"
 #include "iwl-eeprom-parse.h"
@@ -98,6 +99,111 @@
 				    addr, tid, ssn);
 }
 
+#define OPT_HDR(type, skb, off) \
+	(type *)(skb_network_header(skb) + (off))
+
+static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
+			    struct ieee80211_hdr *hdr,
+			    struct ieee80211_tx_info *info,
+			    struct iwl_tx_cmd *tx_cmd)
+{
+#if IS_ENABLED(CONFIG_INET)
+	u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
+	u16 offload_assist = le16_to_cpu(tx_cmd->offload_assist);
+	u8 protocol = 0;
+
+	/*
+	 * Do not compute checksum if already computed or if transport will
+	 * compute it
+	 */
+	if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD)
+		return;
+
+	/* We do not expect to be requested to csum stuff we do not support */
+	if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) ||
+		      (skb->protocol != htons(ETH_P_IP) &&
+		       skb->protocol != htons(ETH_P_IPV6)),
+		      "No support for requested checksum\n")) {
+		skb_checksum_help(skb);
+		return;
+	}
+
+	if (skb->protocol == htons(ETH_P_IP)) {
+		protocol = ip_hdr(skb)->protocol;
+	} else {
+#if IS_ENABLED(CONFIG_IPV6)
+		struct ipv6hdr *ipv6h =
+			(struct ipv6hdr *)skb_network_header(skb);
+		unsigned int off = sizeof(*ipv6h);
+
+		protocol = ipv6h->nexthdr;
+		while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
+			/* only supported extension headers */
+			if (protocol != NEXTHDR_ROUTING &&
+			    protocol != NEXTHDR_HOP &&
+			    protocol != NEXTHDR_DEST &&
+			    protocol != NEXTHDR_FRAGMENT) {
+				skb_checksum_help(skb);
+				return;
+			}
+
+			if (protocol == NEXTHDR_FRAGMENT) {
+				struct frag_hdr *hp =
+					OPT_HDR(struct frag_hdr, skb, off);
+
+				protocol = hp->nexthdr;
+				off += sizeof(struct frag_hdr);
+			} else {
+				struct ipv6_opt_hdr *hp =
+					OPT_HDR(struct ipv6_opt_hdr, skb, off);
+
+				protocol = hp->nexthdr;
+				off += ipv6_optlen(hp);
+			}
+		}
+		/* if we get here - protocol now should be TCP/UDP */
+#endif
+	}
+
+	if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
+		WARN_ON_ONCE(1);
+		skb_checksum_help(skb);
+		return;
+	}
+
+	/* enable L4 csum */
+	offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
+
+	/*
+	 * Set offset to IP header (snap).
+	 * We don't support tunneling so no need to take care of inner header.
+	 * Size is in words.
+	 */
+	offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
+
+	/* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
+	if (skb->protocol == htons(ETH_P_IP) &&
+	    (offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) {
+		ip_hdr(skb)->check = 0;
+		offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
+	}
+
+	/* reset UDP/TCP header csum */
+	if (protocol == IPPROTO_TCP)
+		tcp_hdr(skb)->check = 0;
+	else
+		udp_hdr(skb)->check = 0;
+
+	/* mac header len should include IV, size is in words */
+	if (info->control.hw_key)
+		mh_len += info->control.hw_key->iv_len;
+	mh_len /= 2;
+	offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
+
+	tx_cmd->offload_assist = cpu_to_le16(offload_assist);
+#endif
+}
+
 /*
  * Sets most of the Tx cmd's fields
  */
@@ -105,6 +211,7 @@
 			struct iwl_tx_cmd *tx_cmd,
 			struct ieee80211_tx_info *info, u8 sta_id)
 {
+	struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_hdr *hdr = (void *)skb->data;
 	__le16 fc = hdr->frame_control;
 	u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
@@ -126,6 +233,9 @@
 		u8 *qc = ieee80211_get_qos_ctl(hdr);
 		tx_cmd->tid_tspec = qc[0] & 0xf;
 		tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
+		if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
+			tx_cmd->offload_assist |=
+				cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU));
 	} else if (ieee80211_is_back_req(fc)) {
 		struct ieee80211_bar *bar = (void *)skb->data;
 		u16 control = le16_to_cpu(bar->control);
@@ -185,10 +295,16 @@
 	tx_cmd->tx_flags = cpu_to_le32(tx_flags);
 	/* Total # bytes to be transmitted */
 	tx_cmd->len = cpu_to_le16((u16)skb->len +
-		(uintptr_t)info->driver_data[0]);
-	tx_cmd->next_frame_len = 0;
+		(uintptr_t)skb_info->driver_data[0]);
 	tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
 	tx_cmd->sta_id = sta_id;
+
+	/* padding is inserted later in transport */
+	if (ieee80211_hdrlen(fc) % 4 &&
+	    !(tx_cmd->offload_assist & cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU))))
+		tx_cmd->offload_assist |= cpu_to_le16(BIT(TX_CMD_OFFLD_PAD));
+
+	iwl_mvm_tx_csum(mvm, skb, hdr, info, tx_cmd);
 }
 
 /*
@@ -244,7 +360,7 @@
 				&mvm->nvm_data->bands[info->band], sta);
 
 	/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
-	if (info->band == IEEE80211_BAND_5GHZ)
+	if (info->band == NL80211_BAND_5GHZ)
 		rate_idx += IWL_FIRST_OFDM_RATE;
 
 	/* For 2.4 GHZ band, check that there is no need to remap */
@@ -257,7 +373,7 @@
 		iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
 				     mvm->mgmt_last_antenna_idx);
 
-	if (info->band == IEEE80211_BAND_2GHZ &&
+	if (info->band == NL80211_BAND_2GHZ &&
 	    !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
 		rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
 	else
@@ -327,10 +443,11 @@
  */
 static struct iwl_device_cmd *
 iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
-		      int hdrlen, struct ieee80211_sta *sta, u8 sta_id)
+		      struct ieee80211_tx_info *info, int hdrlen,
+		      struct ieee80211_sta *sta, u8 sta_id)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
 	struct iwl_device_cmd *dev_cmd;
 	struct iwl_tx_cmd *tx_cmd;
 
@@ -350,10 +467,10 @@
 
 	iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
 
-	memset(&info->status, 0, sizeof(info->status));
-	memset(info->driver_data, 0, sizeof(info->driver_data));
+	memset(&skb_info->status, 0, sizeof(skb_info->status));
+	memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data));
 
-	info->driver_data[1] = dev_cmd;
+	skb_info->driver_data[1] = dev_cmd;
 
 	return dev_cmd;
 }
@@ -361,22 +478,25 @@
 int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_tx_info info;
 	struct iwl_device_cmd *dev_cmd;
 	struct iwl_tx_cmd *tx_cmd;
 	u8 sta_id;
 	int hdrlen = ieee80211_hdrlen(hdr->frame_control);
 
-	if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU))
+	memcpy(&info, skb->cb, sizeof(info));
+
+	if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
 		return -1;
 
-	if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
-			 (!info->control.vif ||
-			  info->hw_queue != info->control.vif->cab_queue)))
+	if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
+			 (!info.control.vif ||
+			  info.hw_queue != info.control.vif->cab_queue)))
 		return -1;
 
 	/* This holds the amsdu headers length */
-	info->driver_data[0] = (void *)(uintptr_t)0;
+	skb_info->driver_data[0] = (void *)(uintptr_t)0;
 
 	/*
 	 * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
@@ -385,7 +505,7 @@
 	 * and hence needs to be sent on the aux queue
 	 */
 	if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
-	    info->control.vif->type == NL80211_IFTYPE_STATION)
+	    info.control.vif->type == NL80211_IFTYPE_STATION)
 		IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
 
 	/*
@@ -398,14 +518,14 @@
 	 * AUX station.
 	 */
 	sta_id = mvm->aux_sta.sta_id;
-	if (info->control.vif) {
+	if (info.control.vif) {
 		struct iwl_mvm_vif *mvmvif =
-			iwl_mvm_vif_from_mac80211(info->control.vif);
+			iwl_mvm_vif_from_mac80211(info.control.vif);
 
-		if (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
-		    info->control.vif->type == NL80211_IFTYPE_AP)
+		if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
+		    info.control.vif->type == NL80211_IFTYPE_AP)
 			sta_id = mvmvif->bcast_sta.sta_id;
-		else if (info->control.vif->type == NL80211_IFTYPE_STATION &&
+		else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
 			 is_multicast_ether_addr(hdr->addr1)) {
 			u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
 
@@ -414,19 +534,18 @@
 		}
 	}
 
-	IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue);
+	IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info.hw_queue);
 
-	dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, NULL, sta_id);
+	dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
 	if (!dev_cmd)
 		return -1;
 
-	/* From now on, we cannot access info->control */
 	tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
 
 	/* Copy MAC header from skb into command buffer */
 	memcpy(tx_cmd->hdr, hdr, hdrlen);
 
-	if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) {
+	if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info.hw_queue)) {
 		iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
 		return -1;
 	}
@@ -445,11 +564,11 @@
 
 #ifdef CONFIG_INET
 static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
+			  struct ieee80211_tx_info *info,
 			  struct ieee80211_sta *sta,
 			  struct sk_buff_head *mpdus_skb)
 {
 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_hdr *hdr = (void *)skb->data;
 	unsigned int mss = skb_shinfo(skb)->gso_size;
 	struct sk_buff *tmp, *next;
@@ -459,6 +578,7 @@
 	u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
 	u16 amsdu_add, snap_ip_tcp, pad, i = 0;
 	unsigned int dbg_max_amsdu_len;
+	netdev_features_t netdev_features = NETIF_F_CSUM_MASK | NETIF_F_SG;
 	u8 *qc, tid, txf;
 
 	snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
@@ -478,6 +598,19 @@
 	}
 
 	/*
+	 * Do not build AMSDU for IPv6 with extension headers.
+	 * ask stack to segment and checkum the generated MPDUs for us.
+	 */
+	if (skb->protocol == htons(ETH_P_IPV6) &&
+	    ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
+	    IPPROTO_TCP) {
+		num_subframes = 1;
+		pad = 0;
+		netdev_features &= ~NETIF_F_CSUM_MASK;
+		goto segment;
+	}
+
+	/*
 	 * No need to lock amsdu_in_ampdu_allowed since it can't be modified
 	 * during an BA session.
 	 */
@@ -544,6 +677,8 @@
 
 	/* This skb fits in one single A-MSDU */
 	if (num_subframes * mss >= tcp_payload_len) {
+		struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
+
 		/*
 		 * Compute the length of all the data added for the A-MSDU.
 		 * This will be used to compute the length to write in the TX
@@ -552,11 +687,10 @@
 		 * already had one set of SNAP / IP / TCP headers.
 		 */
 		num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
-		info = IEEE80211_SKB_CB(skb);
 		amsdu_add = num_subframes * sizeof(struct ethhdr) +
 			(num_subframes - 1) * (snap_ip_tcp + pad);
 		/* This holds the amsdu headers length */
-		info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
+		skb_info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
 
 		__skb_queue_tail(mpdus_skb, skb);
 		return 0;
@@ -570,7 +704,7 @@
 	skb_shinfo(skb)->gso_size = num_subframes * mss;
 	memcpy(cb, skb->cb, sizeof(cb));
 
-	next = skb_gso_segment(skb, NETIF_F_CSUM_MASK | NETIF_F_SG);
+	next = skb_gso_segment(skb, netdev_features);
 	skb_shinfo(skb)->gso_size = mss;
 	if (WARN_ON_ONCE(IS_ERR(next)))
 		return -EINVAL;
@@ -596,11 +730,14 @@
 			ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
 
 		if (tcp_payload_len > mss) {
+			struct ieee80211_tx_info *skb_info =
+				IEEE80211_SKB_CB(tmp);
+
 			num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
-			info = IEEE80211_SKB_CB(tmp);
 			amsdu_add = num_subframes * sizeof(struct ethhdr) +
 				(num_subframes - 1) * (snap_ip_tcp + pad);
-			info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
+			skb_info->driver_data[0] =
+				(void *)(uintptr_t)amsdu_add;
 			skb_shinfo(tmp)->gso_size = mss;
 		} else {
 			qc = ieee80211_get_qos_ctl((void *)tmp->data);
@@ -622,6 +759,7 @@
 }
 #else /* CONFIG_INET */
 static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
+			  struct ieee80211_tx_info *info,
 			  struct ieee80211_sta *sta,
 			  struct sk_buff_head *mpdus_skb)
 {
@@ -632,14 +770,43 @@
 }
 #endif
 
+static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
+				  struct iwl_mvm_sta *mvm_sta, u8 tid,
+				  struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	u8 mac_queue = info->hw_queue;
+	struct sk_buff_head *deferred_tx_frames;
+
+	lockdep_assert_held(&mvm_sta->lock);
+
+	mvm_sta->deferred_traffic_tid_map |= BIT(tid);
+	set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames);
+
+	deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames;
+
+	skb_queue_tail(deferred_tx_frames, skb);
+
+	/*
+	 * The first deferred frame should've stopped the MAC queues, so we
+	 * should never get a second deferred frame for the RA/TID.
+	 */
+	if (!WARN(skb_queue_len(deferred_tx_frames) != 1,
+		  "RATID %d/%d has %d deferred frames\n", mvm_sta->sta_id, tid,
+		  skb_queue_len(deferred_tx_frames))) {
+		iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue));
+		schedule_work(&mvm->add_stream_wk);
+	}
+}
+
 /*
  * Sets the fields in the Tx cmd that are crypto related
  */
 static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
+			   struct ieee80211_tx_info *info,
 			   struct ieee80211_sta *sta)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct iwl_mvm_sta *mvmsta;
 	struct iwl_device_cmd *dev_cmd;
 	struct iwl_tx_cmd *tx_cmd;
@@ -647,7 +814,7 @@
 	u16 seq_number = 0;
 	u8 tid = IWL_MAX_TID_COUNT;
 	u8 txq_id = info->hw_queue;
-	bool is_data_qos = false, is_ampdu = false;
+	bool is_ampdu = false;
 	int hdrlen;
 
 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
@@ -660,7 +827,8 @@
 	if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
 		return -1;
 
-	dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, sta, mvmsta->sta_id);
+	dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
+					sta, mvmsta->sta_id);
 	if (!dev_cmd)
 		goto drop;
 
@@ -687,8 +855,15 @@
 		seq_number &= IEEE80211_SCTL_SEQ;
 		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
 		hdr->seq_ctrl |= cpu_to_le16(seq_number);
-		is_data_qos = true;
 		is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
+	} else if (iwl_mvm_is_dqa_supported(mvm) &&
+		   (ieee80211_is_qos_nullfunc(fc) ||
+		    ieee80211_is_nullfunc(fc))) {
+		/*
+		 * nullfunc frames should go to the MGMT queue regardless of QOS
+		 */
+		tid = IWL_MAX_TID_COUNT;
+		txq_id = mvmsta->tid_data[tid].txq_id;
 	}
 
 	/* Copy MAC header from skb into command buffer */
@@ -709,13 +884,30 @@
 		txq_id = mvmsta->tid_data[tid].txq_id;
 	}
 
+	if (iwl_mvm_is_dqa_supported(mvm)) {
+		if (unlikely(mvmsta->tid_data[tid].txq_id ==
+			     IEEE80211_INVAL_HW_QUEUE)) {
+			iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
+
+			/*
+			 * The frame is now deferred, and the worker scheduled
+			 * will re-allocate it, so we can free it for now.
+			 */
+			iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
+			spin_unlock(&mvmsta->lock);
+			return 0;
+		}
+
+		txq_id = mvmsta->tid_data[tid].txq_id;
+	}
+
 	IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
 		     tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
 
 	if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
 		goto drop_unlock_sta;
 
-	if (is_data_qos && !ieee80211_has_morefrags(fc))
+	if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc))
 		mvmsta->tid_data[tid].seq_number = seq_number + 0x10;
 
 	spin_unlock(&mvmsta->lock);
@@ -736,7 +928,8 @@
 		   struct ieee80211_sta *sta)
 {
 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_tx_info info;
 	struct sk_buff_head mpdus_skbs;
 	unsigned int payload_len;
 	int ret;
@@ -747,21 +940,23 @@
 	if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
 		return -1;
 
+	memcpy(&info, skb->cb, sizeof(info));
+
 	/* This holds the amsdu headers length */
-	info->driver_data[0] = (void *)(uintptr_t)0;
+	skb_info->driver_data[0] = (void *)(uintptr_t)0;
 
 	if (!skb_is_gso(skb))
-		return iwl_mvm_tx_mpdu(mvm, skb, sta);
+		return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
 
 	payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
 		tcp_hdrlen(skb) + skb->data_len;
 
 	if (payload_len <= skb_shinfo(skb)->gso_size)
-		return iwl_mvm_tx_mpdu(mvm, skb, sta);
+		return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
 
 	__skb_queue_head_init(&mpdus_skbs);
 
-	ret = iwl_mvm_tx_tso(mvm, skb, sta, &mpdus_skbs);
+	ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs);
 	if (ret)
 		return ret;
 
@@ -771,7 +966,7 @@
 	while (!skb_queue_empty(&mpdus_skbs)) {
 		skb = __skb_dequeue(&mpdus_skbs);
 
-		ret = iwl_mvm_tx_mpdu(mvm, skb, sta);
+		ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
 		if (ret) {
 			__skb_queue_purge(&mpdus_skbs);
 			return ret;
@@ -870,7 +1065,7 @@
 #endif /* CONFIG_IWLWIFI_DEBUG */
 
 void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
-			       enum ieee80211_band band,
+			       enum nl80211_band band,
 			       struct ieee80211_tx_rate *r)
 {
 	if (rate_n_flags & RATE_HT_MCS_GF_MSK)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 53cdc57..f0ffd62 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -217,14 +217,14 @@
 };
 
 int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
-					enum ieee80211_band band)
+					enum nl80211_band band)
 {
 	int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
 	int idx;
 	int band_offset = 0;
 
 	/* Legacy rate format, search for match in table */
-	if (band == IEEE80211_BAND_5GHZ)
+	if (band == NL80211_BAND_5GHZ)
 		band_offset = IWL_FIRST_OFDM_RATE;
 	for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
 		if (fw_rate_idx_to_plcp[idx] == rate)
@@ -491,98 +491,12 @@
 	IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
 }
 
-static void iwl_mvm_dump_nic_error_log_old(struct iwl_mvm *mvm)
-{
-	struct iwl_trans *trans = mvm->trans;
-	struct iwl_error_event_table_v1 table;
-	u32 base;
-
-	base = mvm->error_event_table;
-	if (mvm->cur_ucode == IWL_UCODE_INIT) {
-		if (!base)
-			base = mvm->fw->init_errlog_ptr;
-	} else {
-		if (!base)
-			base = mvm->fw->inst_errlog_ptr;
-	}
-
-	if (base < 0x800000) {
-		IWL_ERR(mvm,
-			"Not valid error log pointer 0x%08X for %s uCode\n",
-			base,
-			(mvm->cur_ucode == IWL_UCODE_INIT)
-					? "Init" : "RT");
-		return;
-	}
-
-	iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
-
-	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
-		IWL_ERR(trans, "Start IWL Error Log Dump:\n");
-		IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
-			mvm->status, table.valid);
-	}
-
-	/* Do not change this output - scripts rely on it */
-
-	IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
-
-	trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
-				      table.data1, table.data2, table.data3,
-				      table.blink2, table.ilink1, table.ilink2,
-				      table.bcon_time, table.gp1, table.gp2,
-				      table.gp3, table.ucode_ver, 0,
-				      table.hw_ver, table.brd_ver);
-	IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
-		desc_lookup(table.error_id));
-	IWL_ERR(mvm, "0x%08X | uPc\n", table.pc);
-	IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1);
-	IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
-	IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
-	IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
-	IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
-	IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
-	IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
-	IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
-	IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
-	IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
-	IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
-	IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
-	IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3);
-	IWL_ERR(mvm, "0x%08X | uCode version\n", table.ucode_ver);
-	IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
-	IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
-	IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
-	IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
-	IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
-	IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
-	IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
-	IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
-	IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref);
-	IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
-	IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
-	IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
-	IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
-	IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
-	IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
-	IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
-	IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
-
-	if (mvm->support_umac_log)
-		iwl_mvm_dump_umac_error_log(mvm);
-}
-
 void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
 {
 	struct iwl_trans *trans = mvm->trans;
 	struct iwl_error_event_table table;
 	u32 base;
 
-	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) {
-		iwl_mvm_dump_nic_error_log_old(mvm);
-		return;
-	}
-
 	base = mvm->error_event_table;
 	if (mvm->cur_ucode == IWL_UCODE_INIT) {
 		if (!base)
@@ -694,6 +608,8 @@
 	mvm->queue_info[queue].hw_queue_refcount++;
 	if (mvm->queue_info[queue].hw_queue_refcount > 1)
 		enable_queue = false;
+	else
+		mvm->queue_info[queue].ra_sta_id = cfg->sta_id;
 	mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid);
 
 	IWL_DEBUG_TX_QUEUES(mvm,
@@ -779,6 +695,8 @@
 		return;
 	}
 
+	cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
+
 	/* Make sure queue info is correct even though we overwrite it */
 	WARN(mvm->queue_info[queue].hw_queue_refcount ||
 	     mvm->queue_info[queue].tid_bitmap ||
@@ -1079,3 +997,74 @@
 out:
 	ieee80211_connection_loss(vif);
 }
+
+int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
+			 enum iwl_lqm_cmd_operatrions operation,
+			 u32 duration, u32 timeout)
+{
+	struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_link_qual_msrmnt_cmd cmd = {
+		.cmd_operation = cpu_to_le32(operation),
+		.mac_id = cpu_to_le32(mvm_vif->id),
+		.measurement_time = cpu_to_le32(duration),
+		.timeout = cpu_to_le32(timeout),
+	};
+	u32 cmdid =
+		iwl_cmd_id(LINK_QUALITY_MEASUREMENT_CMD, MAC_CONF_GROUP, 0);
+	int ret;
+
+	if (!fw_has_capa(&mvm_vif->mvm->fw->ucode_capa,
+			 IWL_UCODE_TLV_CAPA_LQM_SUPPORT))
+		return -EOPNOTSUPP;
+
+	if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+		return -EINVAL;
+
+	switch (operation) {
+	case LQM_CMD_OPERATION_START_MEASUREMENT:
+		if (iwl_mvm_lqm_active(mvm_vif->mvm))
+			return -EBUSY;
+		if (!vif->bss_conf.assoc)
+			return -EINVAL;
+		mvm_vif->lqm_active = true;
+		break;
+	case LQM_CMD_OPERATION_STOP_MEASUREMENT:
+		if (!iwl_mvm_lqm_active(mvm_vif->mvm))
+			return -EINVAL;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = iwl_mvm_send_cmd_pdu(mvm_vif->mvm, cmdid, 0, sizeof(cmd),
+				   &cmd);
+
+	/* command failed - roll back lqm_active state */
+	if (ret) {
+		mvm_vif->lqm_active =
+			operation == LQM_CMD_OPERATION_STOP_MEASUREMENT;
+	}
+
+	return ret;
+}
+
+static void iwl_mvm_lqm_active_iterator(void *_data, u8 *mac,
+					struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
+	bool *lqm_active = _data;
+
+	*lqm_active = *lqm_active || mvm_vif->lqm_active;
+}
+
+bool iwl_mvm_lqm_active(struct iwl_mvm *mvm)
+{
+	bool ret = false;
+
+	lockdep_assert_held(&mvm->mutex);
+	ieee80211_iterate_active_interfaces_atomic(
+		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+		iwl_mvm_lqm_active_iterator, &ret);
+
+	return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 05b9685..de42066 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -479,21 +479,33 @@
 	{IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x8110, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x8050, iwl8265_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)},
 
 /* 9000 Series */
+	{IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9560_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9560_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)},
-	{IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9260_2ac_cfg)},
-	{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x1420, iwl5165_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x0010, iwl5165_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)},
-	{IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9260_2ac_cfg)},
-	{IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9260_2ac_cfg)},
-	{IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9560_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9560_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9560_2ac_cfg)},
 #endif /* CONFIG_IWLMVM */
 
 	{0}
@@ -651,10 +663,8 @@
 	/* The PCI device starts with a reference taken and we are
 	 * supposed to release it here.  But to simplify the
 	 * interaction with the opmode, we don't do it now, but let
-	 * the opmode release it when it's ready.  To account for this
-	 * reference, we start with ref_count set to 1.
+	 * the opmode release it when it's ready.
 	 */
-	trans_pcie->ref_count = 1;
 
 	return 0;
 
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index dadafbd..9ce4ec6 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -348,7 +348,7 @@
 struct iwl_trans_pcie {
 	struct iwl_rxq *rxq;
 	struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
-	struct iwl_rx_mem_buffer *global_table[MQ_RX_TABLE_SIZE];
+	struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
 	struct iwl_rb_allocator rba;
 	struct iwl_trans *trans;
 	struct iwl_drv *drv;
@@ -403,10 +403,6 @@
 	bool cmd_hold_nic_awake;
 	bool ref_cmd_in_flight;
 
-	/* protect ref counter */
-	spinlock_t ref_lock;
-	u32 ref_count;
-
 	dma_addr_t fw_mon_phys;
 	struct page *fw_mon_page;
 	u32 fw_mon_size;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 4be3c35..7f8a232 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -210,8 +210,12 @@
 	if (trans->cfg->mq_rx_supported)
 		iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(rxq->id),
 			       rxq->write_actual);
-	else
-		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
+	/*
+	 * write to FH_RSCSR_CHNL0_WPTR register even in MQ as a W/A to
+	 * hardware shadow registers bug - writing to RFH_Q_FRBDCB_WIDX will
+	 * not wake the NIC.
+	 */
+	iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
 }
 
 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
@@ -908,6 +912,8 @@
 	allocator_pool_size = trans->num_rx_queues *
 		(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
 	num_alloc = queue_size + allocator_pool_size;
+	BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
+		     ARRAY_SIZE(trans_pcie->rx_pool));
 	for (i = 0; i < num_alloc; i++) {
 		struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
 
@@ -1805,7 +1811,7 @@
 	struct msix_entry *entry = dev_id;
 	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
 	struct iwl_trans *trans = trans_pcie->trans;
-	struct isr_statistics *isr_stats = isr_stats = &trans_pcie->isr_stats;
+	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
 	u32 inta_fh, inta_hw;
 
 	lock_map_acquire(&trans->sync_cmd_lockdep_map);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index eb39c7e..ee081c22 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -732,8 +732,8 @@
 	 */
 	val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
 	if (val & (BIT(1) | BIT(17))) {
-		IWL_INFO(trans,
-			 "can't access the RSA semaphore it is write protected\n");
+		IWL_DEBUG_INFO(trans,
+			       "can't access the RSA semaphore it is write protected\n");
 		return 0;
 	}
 
@@ -1321,6 +1321,7 @@
 	 * after this call.
 	 */
 	iwl_pcie_reset_ict(trans);
+	iwl_enable_interrupts(trans);
 
 	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
 	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
@@ -1434,7 +1435,7 @@
 	int ret, i;
 
 	if (trans->cfg->mq_rx_supported) {
-		max_vector = min_t(u32, (num_possible_cpus() + 1),
+		max_vector = min_t(u32, (num_possible_cpus() + 2),
 				   IWL_MAX_RX_HW_QUEUES);
 		for (i = 0; i < max_vector; i++)
 			trans_pcie->msix_entries[i].entry = i;
@@ -1465,7 +1466,7 @@
 
 	ret = pci_enable_msi(pdev);
 	if (ret) {
-		dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
+		dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
 		/* enable rfkill interrupt: hw bug w/a */
 		pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
 		if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
@@ -1499,8 +1500,8 @@
 			IWL_ERR(trans_pcie->trans,
 				"Error allocating IRQ %d\n", i);
 			for (j = 0; j < i; j++)
-				free_irq(trans_pcie->msix_entries[i].vector,
-					 &trans_pcie->msix_entries[i]);
+				free_irq(trans_pcie->msix_entries[j].vector,
+					 &trans_pcie->msix_entries[j]);
 			pci_disable_msix(pdev);
 			return ret;
 		}
@@ -1694,6 +1695,7 @@
 	}
 
 	free_percpu(trans_pcie->tso_hdr_page);
+	mutex_destroy(&trans_pcie->mutex);
 	iwl_trans_free(trans);
 }
 
@@ -2014,38 +2016,32 @@
 void iwl_trans_pcie_ref(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	unsigned long flags;
 
 	if (iwlwifi_mod_params.d0i3_disable)
 		return;
 
-	spin_lock_irqsave(&trans_pcie->ref_lock, flags);
-	IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
-	trans_pcie->ref_count++;
 	pm_runtime_get(&trans_pcie->pci_dev->dev);
-	spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
+
+#ifdef CONFIG_PM
+	IWL_DEBUG_RPM(trans, "runtime usage count: %d\n",
+		      atomic_read(&trans_pcie->pci_dev->dev.power.usage_count));
+#endif /* CONFIG_PM */
 }
 
 void iwl_trans_pcie_unref(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	unsigned long flags;
 
 	if (iwlwifi_mod_params.d0i3_disable)
 		return;
 
-	spin_lock_irqsave(&trans_pcie->ref_lock, flags);
-	IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
-	if (WARN_ON_ONCE(trans_pcie->ref_count == 0)) {
-		spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
-		return;
-	}
-	trans_pcie->ref_count--;
-
 	pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev);
 	pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev);
 
-	spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
+#ifdef CONFIG_PM
+	IWL_DEBUG_RPM(trans, "runtime usage count: %d\n",
+		      atomic_read(&trans_pcie->pci_dev->dev.power.usage_count));
+#endif /* CONFIG_PM */
 }
 
 static const char *get_csr_string(int cmd)
@@ -2793,7 +2789,6 @@
 	trans_pcie->trans = trans;
 	spin_lock_init(&trans_pcie->irq_lock);
 	spin_lock_init(&trans_pcie->reg_lock);
-	spin_lock_init(&trans_pcie->ref_lock);
 	mutex_init(&trans_pcie->mutex);
 	init_waitqueue_head(&trans_pcie->ucode_write_waitq);
 	trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 16ad820..e1f7a3f 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -596,6 +596,28 @@
 	}
 }
 
+static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	lockdep_assert_held(&trans_pcie->reg_lock);
+
+	if (trans_pcie->ref_cmd_in_flight) {
+		trans_pcie->ref_cmd_in_flight = false;
+		IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
+		iwl_trans_pcie_unref(trans);
+	}
+
+	if (!trans->cfg->base_params->apmg_wake_up_wa)
+		return;
+	if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
+		return;
+
+	trans_pcie->cmd_hold_nic_awake = false;
+	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+}
+
 /*
  * iwl_pcie_txq_unmap -  Unmap any remaining DMA mappings and free skb's
  */
@@ -620,6 +642,20 @@
 		}
 		iwl_pcie_txq_free_tfd(trans, txq);
 		q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
+
+		if (q->read_ptr == q->write_ptr) {
+			unsigned long flags;
+
+			spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+			if (txq_id != trans_pcie->cmd_queue) {
+				IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
+					      q->id);
+				iwl_trans_pcie_unref(trans);
+			} else {
+				iwl_pcie_clear_cmd_in_flight(trans);
+			}
+			spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+		}
 	}
 	txq->active = false;
 
@@ -1148,29 +1184,6 @@
 	return 0;
 }
 
-static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
-{
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
-	lockdep_assert_held(&trans_pcie->reg_lock);
-
-	if (trans_pcie->ref_cmd_in_flight) {
-		trans_pcie->ref_cmd_in_flight = false;
-		IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
-		iwl_trans_pcie_unref(trans);
-	}
-
-	if (trans->cfg->base_params->apmg_wake_up_wa) {
-		if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
-			return 0;
-
-		trans_pcie->cmd_hold_nic_awake = false;
-		__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
-					   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-	}
-	return 0;
-}
-
 /*
  * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
  *
@@ -2197,6 +2210,7 @@
 	__le16 fc;
 	u8 hdr_len;
 	u16 wifi_seq;
+	bool amsdu;
 
 	txq = &trans_pcie->txq[txq_id];
 	q = &txq->q;
@@ -2288,11 +2302,18 @@
 	 */
 	len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
 	      hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
-	tb1_len = ALIGN(len, 4);
-
-	/* Tell NIC about any 2-byte padding after MAC header */
-	if (tb1_len != len)
-		tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+	/* do not align A-MSDU to dword as the subframe header aligns it */
+	amsdu = ieee80211_is_data_qos(fc) &&
+		(*ieee80211_get_qos_ctl(hdr) &
+		 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+	if (trans_pcie->sw_csum_tx || !amsdu) {
+		tb1_len = ALIGN(len, 4);
+		/* Tell NIC about any 2-byte padding after MAC header */
+		if (tb1_len != len)
+			tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+	} else {
+		tb1_len = len;
+	}
 
 	/* The first TB points to the scratchbuf data - min_copy bytes */
 	memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
@@ -2310,8 +2331,7 @@
 		goto out_err;
 	iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
 
-	if (ieee80211_is_data_qos(fc) &&
-	    (*ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_A_MSDU_PRESENT)) {
+	if (amsdu) {
 		if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
 						     out_meta, dev_cmd,
 						     tb1_len)))
diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
index 515aa3f..a8a9bd8 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
@@ -1794,7 +1794,7 @@
 		netif_wake_queue(dev);
 		return -1;
 	}
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 
 	/* Since we did not wait for command completion, the card continues
 	 * to process on the background and we will finish handling when
diff --git a/drivers/net/wireless/intersil/orinoco/cfg.c b/drivers/net/wireless/intersil/orinoco/cfg.c
index 0f6ea31..7aa4706 100644
--- a/drivers/net/wireless/intersil/orinoco/cfg.c
+++ b/drivers/net/wireless/intersil/orinoco/cfg.c
@@ -60,14 +60,14 @@
 		if (priv->channel_mask & (1 << i)) {
 			priv->channels[i].center_freq =
 				ieee80211_channel_to_frequency(i + 1,
-							   IEEE80211_BAND_2GHZ);
+							   NL80211_BAND_2GHZ);
 			channels++;
 		}
 	}
 	priv->band.channels = priv->channels;
 	priv->band.n_channels = channels;
 
-	wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+	wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
 	wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
 
 	i = 0;
@@ -175,7 +175,7 @@
 	if (cfg80211_get_chandef_type(chandef) != NL80211_CHAN_NO_HT)
 		return -EINVAL;
 
-	if (chandef->chan->band != IEEE80211_BAND_2GHZ)
+	if (chandef->chan->band != NL80211_BAND_2GHZ)
 		return -EINVAL;
 
 	channel = ieee80211_frequency_to_channel(chandef->chan->center_freq);
diff --git a/drivers/net/wireless/intersil/orinoco/hw.c b/drivers/net/wireless/intersil/orinoco/hw.c
index e27e328..61af5a2 100644
--- a/drivers/net/wireless/intersil/orinoco/hw.c
+++ b/drivers/net/wireless/intersil/orinoco/hw.c
@@ -1193,7 +1193,7 @@
 		goto out;
 
 	}
-	freq = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
+	freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
 
  out:
 	orinoco_unlock(priv, &flags);
diff --git a/drivers/net/wireless/intersil/orinoco/main.c b/drivers/net/wireless/intersil/orinoco/main.c
index 7b5c554..7afe200 100644
--- a/drivers/net/wireless/intersil/orinoco/main.c
+++ b/drivers/net/wireless/intersil/orinoco/main.c
@@ -1794,7 +1794,7 @@
 			printk(KERN_ERR "%s: orinoco_reset: Error %d reenabling card\n",
 			       dev->name, err);
 		} else
-			dev->trans_start = jiffies;
+			netif_trans_update(dev);
 	}
 
 	orinoco_unlock_irq(priv);
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
index f2cd513..56f109b 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
@@ -1275,7 +1275,7 @@
 		goto busy;
 	}
 
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	stats->tx_bytes += skb->len;
 	goto ok;
 
diff --git a/drivers/net/wireless/intersil/orinoco/scan.c b/drivers/net/wireless/intersil/orinoco/scan.c
index 2c66166..d0ceb06 100644
--- a/drivers/net/wireless/intersil/orinoco/scan.c
+++ b/drivers/net/wireless/intersil/orinoco/scan.c
@@ -111,7 +111,7 @@
 	}
 
 	freq = ieee80211_channel_to_frequency(
-		le16_to_cpu(bss->a.channel), IEEE80211_BAND_2GHZ);
+		le16_to_cpu(bss->a.channel), NL80211_BAND_2GHZ);
 	channel = ieee80211_get_channel(wiphy, freq);
 	if (!channel) {
 		printk(KERN_DEBUG "Invalid channel designation %04X(%04X)",
@@ -148,7 +148,7 @@
 	ie_len = len - sizeof(*bss);
 	ie = cfg80211_find_ie(WLAN_EID_DS_PARAMS, bss->data, ie_len);
 	chan = ie ? ie[2] : 0;
-	freq = ieee80211_channel_to_frequency(chan, IEEE80211_BAND_2GHZ);
+	freq = ieee80211_channel_to_frequency(chan, NL80211_BAND_2GHZ);
 	channel = ieee80211_get_channel(wiphy, freq);
 
 	timestamp = le64_to_cpu(bss->timestamp);
diff --git a/drivers/net/wireless/intersil/p54/eeprom.c b/drivers/net/wireless/intersil/p54/eeprom.c
index 2fe713e..d4c73d3 100644
--- a/drivers/net/wireless/intersil/p54/eeprom.c
+++ b/drivers/net/wireless/intersil/p54/eeprom.c
@@ -76,14 +76,14 @@
 	u16 data;
 	int index;
 	int max_power;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 };
 
 struct p54_channel_list {
 	struct p54_channel_entry *channels;
 	size_t entries;
 	size_t max_entries;
-	size_t band_channel_num[IEEE80211_NUM_BANDS];
+	size_t band_channel_num[NUM_NL80211_BANDS];
 };
 
 static int p54_get_band_from_freq(u16 freq)
@@ -91,10 +91,10 @@
 	/* FIXME: sync these values with the 802.11 spec */
 
 	if ((freq >= 2412) && (freq <= 2484))
-		return IEEE80211_BAND_2GHZ;
+		return NL80211_BAND_2GHZ;
 
 	if ((freq >= 4920) && (freq <= 5825))
-		return IEEE80211_BAND_5GHZ;
+		return NL80211_BAND_5GHZ;
 
 	return -1;
 }
@@ -124,16 +124,16 @@
 
 static int p54_fill_band_bitrates(struct ieee80211_hw *dev,
 				  struct ieee80211_supported_band *band_entry,
-				  enum ieee80211_band band)
+				  enum nl80211_band band)
 {
 	/* TODO: generate rate array dynamically */
 
 	switch (band) {
-	case IEEE80211_BAND_2GHZ:
+	case NL80211_BAND_2GHZ:
 		band_entry->bitrates = p54_bgrates;
 		band_entry->n_bitrates = ARRAY_SIZE(p54_bgrates);
 		break;
-	case IEEE80211_BAND_5GHZ:
+	case NL80211_BAND_5GHZ:
 		band_entry->bitrates = p54_arates;
 		band_entry->n_bitrates = ARRAY_SIZE(p54_arates);
 		break;
@@ -147,7 +147,7 @@
 static int p54_generate_band(struct ieee80211_hw *dev,
 			     struct p54_channel_list *list,
 			     unsigned int *chan_num,
-			     enum ieee80211_band band)
+			     enum nl80211_band band)
 {
 	struct p54_common *priv = dev->priv;
 	struct ieee80211_supported_band *tmp, *old;
@@ -206,7 +206,7 @@
 
 	if (j == 0) {
 		wiphy_err(dev->wiphy, "Disabling totally damaged %d GHz band\n",
-			  (band == IEEE80211_BAND_2GHZ) ? 2 : 5);
+			  (band == NL80211_BAND_2GHZ) ? 2 : 5);
 
 		ret = -ENODATA;
 		goto err_out;
@@ -396,7 +396,7 @@
 	     p54_compare_channels, NULL);
 
 	k = 0;
-	for (i = 0, j = 0; i < IEEE80211_NUM_BANDS; i++) {
+	for (i = 0, j = 0; i < NUM_NL80211_BANDS; i++) {
 		if (p54_generate_band(dev, list, &k, i) == 0)
 			j++;
 	}
@@ -573,10 +573,10 @@
 		for (i = 0; i < entries; i++) {
 			u16 freq = 0;
 			switch (i) {
-			case IEEE80211_BAND_2GHZ:
+			case NL80211_BAND_2GHZ:
 				freq = 2437;
 				break;
-			case IEEE80211_BAND_5GHZ:
+			case NL80211_BAND_5GHZ:
 				freq = 5240;
 				break;
 			}
@@ -902,11 +902,11 @@
 	if (priv->rxhw == PDR_SYNTH_FRONTEND_XBOW)
 		p54_init_xbow_synth(priv);
 	if (!(synth & PDR_SYNTH_24_GHZ_DISABLED))
-		dev->wiphy->bands[IEEE80211_BAND_2GHZ] =
-			priv->band_table[IEEE80211_BAND_2GHZ];
+		dev->wiphy->bands[NL80211_BAND_2GHZ] =
+			priv->band_table[NL80211_BAND_2GHZ];
 	if (!(synth & PDR_SYNTH_5_GHZ_DISABLED))
-		dev->wiphy->bands[IEEE80211_BAND_5GHZ] =
-			priv->band_table[IEEE80211_BAND_5GHZ];
+		dev->wiphy->bands[NL80211_BAND_5GHZ] =
+			priv->band_table[NL80211_BAND_5GHZ];
 	if ((synth & PDR_SYNTH_RX_DIV_MASK) == PDR_SYNTH_RX_DIV_SUPPORTED)
 		priv->rx_diversity_mask = 3;
 	if ((synth & PDR_SYNTH_TX_DIV_MASK) == PDR_SYNTH_TX_DIV_SUPPORTED)
diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
index 7805864..d5a3bf9 100644
--- a/drivers/net/wireless/intersil/p54/main.c
+++ b/drivers/net/wireless/intersil/p54/main.c
@@ -477,7 +477,7 @@
 		p54_set_edcf(priv);
 	}
 	if (changed & BSS_CHANGED_BASIC_RATES) {
-		if (dev->conf.chandef.chan->band == IEEE80211_BAND_5GHZ)
+		if (dev->conf.chandef.chan->band == NL80211_BAND_5GHZ)
 			priv->basic_rate_mask = (info->basic_rates << 4);
 		else
 			priv->basic_rate_mask = info->basic_rates;
@@ -829,7 +829,7 @@
 	struct p54_common *priv = dev->priv;
 	unsigned int i;
 
-	for (i = 0; i < IEEE80211_NUM_BANDS; i++)
+	for (i = 0; i < NUM_NL80211_BANDS; i++)
 		kfree(priv->band_table[i]);
 
 	kfree(priv->iq_autocal);
diff --git a/drivers/net/wireless/intersil/p54/p54.h b/drivers/net/wireless/intersil/p54/p54.h
index 40b401e..529939e 100644
--- a/drivers/net/wireless/intersil/p54/p54.h
+++ b/drivers/net/wireless/intersil/p54/p54.h
@@ -223,7 +223,7 @@
 	struct p54_cal_database *curve_data;
 	struct p54_cal_database *output_limit;
 	struct p54_cal_database *rssi_db;
-	struct ieee80211_supported_band *band_table[IEEE80211_NUM_BANDS];
+	struct ieee80211_supported_band *band_table[NUM_NL80211_BANDS];
 
 	/* BBP/MAC state */
 	u8 mac_addr[ETH_ALEN];
diff --git a/drivers/net/wireless/intersil/p54/txrx.c b/drivers/net/wireless/intersil/p54/txrx.c
index 24e5ff9..1af7da0 100644
--- a/drivers/net/wireless/intersil/p54/txrx.c
+++ b/drivers/net/wireless/intersil/p54/txrx.c
@@ -353,7 +353,7 @@
 	rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi);
 	if (hdr->rate & 0x10)
 		rx_status->flag |= RX_FLAG_SHORTPRE;
-	if (priv->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ)
+	if (priv->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ)
 		rx_status->rate_idx = (rate < 4) ? 0 : rate - 4;
 	else
 		rx_status->rate_idx = rate;
@@ -867,7 +867,7 @@
 	for (i = 0; i < nrates && ridx < 8; i++) {
 		/* we register the rates in perfect order */
 		rate = info->control.rates[i].idx;
-		if (info->band == IEEE80211_BAND_5GHZ)
+		if (info->band == NL80211_BAND_5GHZ)
 			rate += 4;
 
 		/* store the count we actually calculated for TX status */
diff --git a/drivers/net/wireless/intersil/prism54/isl_38xx.c b/drivers/net/wireless/intersil/prism54/isl_38xx.c
index 333c1a2..6700387 100644
--- a/drivers/net/wireless/intersil/prism54/isl_38xx.c
+++ b/drivers/net/wireless/intersil/prism54/isl_38xx.c
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/delay.h>
+#include <linux/ktime.h>
 
 #include <asm/uaccess.h>
 #include <asm/io.h>
@@ -113,7 +114,7 @@
 
 #if VERBOSE > SHOW_ERROR_MESSAGES
 	u32 counter = 0;
-	struct timeval current_time;
+	struct timespec64 current_ts64;
 	DEBUG(SHOW_FUNCTION_CALLS, "isl38xx trigger device\n");
 #endif
 
@@ -121,22 +122,22 @@
 	if (asleep) {
 		/* device is in powersave, trigger the device for wakeup */
 #if VERBOSE > SHOW_ERROR_MESSAGES
-		do_gettimeofday(&current_time);
-		DEBUG(SHOW_TRACING, "%08li.%08li Device wakeup triggered\n",
-		      current_time.tv_sec, (long)current_time.tv_usec);
+		ktime_get_real_ts64(&current_ts64);
+		DEBUG(SHOW_TRACING, "%lld.%09ld Device wakeup triggered\n",
+		      (s64)current_ts64.tv_sec, current_ts64.tv_nsec);
 
-		DEBUG(SHOW_TRACING, "%08li.%08li Device register read %08x\n",
-		      current_time.tv_sec, (long)current_time.tv_usec,
+		DEBUG(SHOW_TRACING, "%lld.%09ld Device register read %08x\n",
+		      (s64)current_ts64.tv_sec, current_ts64.tv_nsec,
 		      readl(device_base + ISL38XX_CTRL_STAT_REG));
 #endif
 
 		reg = readl(device_base + ISL38XX_INT_IDENT_REG);
 		if (reg == 0xabadface) {
 #if VERBOSE > SHOW_ERROR_MESSAGES
-			do_gettimeofday(&current_time);
+			ktime_get_real_ts64(&current_ts64);
 			DEBUG(SHOW_TRACING,
-			      "%08li.%08li Device register abadface\n",
-			      current_time.tv_sec, (long)current_time.tv_usec);
+			      "%lld.%09ld Device register abadface\n",
+			      (s64)current_ts64.tv_sec, current_ts64.tv_nsec);
 #endif
 			/* read the Device Status Register until Sleepmode bit is set */
 			while (reg = readl(device_base + ISL38XX_CTRL_STAT_REG),
@@ -149,13 +150,13 @@
 
 #if VERBOSE > SHOW_ERROR_MESSAGES
 			DEBUG(SHOW_TRACING,
-			      "%08li.%08li Device register read %08x\n",
-			      current_time.tv_sec, (long)current_time.tv_usec,
+			      "%lld.%09ld Device register read %08x\n",
+			      (s64)current_ts64.tv_sec, current_ts64.tv_nsec,
 			      readl(device_base + ISL38XX_CTRL_STAT_REG));
-			do_gettimeofday(&current_time);
+			ktime_get_real_ts64(&current_ts64);
 			DEBUG(SHOW_TRACING,
-			      "%08li.%08li Device asleep counter %i\n",
-			      current_time.tv_sec, (long)current_time.tv_usec,
+			      "%lld.%09ld Device asleep counter %i\n",
+			      (s64)current_ts64.tv_sec, current_ts64.tv_nsec,
 			      counter);
 #endif
 		}
@@ -168,9 +169,9 @@
 
 		/* perform another read on the Device Status Register */
 		reg = readl(device_base + ISL38XX_CTRL_STAT_REG);
-		do_gettimeofday(&current_time);
-		DEBUG(SHOW_TRACING, "%08li.%08li Device register read %08x\n",
-		      current_time.tv_sec, (long)current_time.tv_usec, reg);
+		ktime_get_real_ts64(&current_ts64);
+		DEBUG(SHOW_TRACING, "%lld.%00ld Device register read %08x\n",
+		      (s64)current_ts64.tv_sec, current_ts64.tv_nsec, reg);
 #endif
 	} else {
 		/* device is (still) awake  */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index e85e073..9ed0ed1 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -255,14 +255,14 @@
 static struct net_device *hwsim_mon; /* global monitor netdev */
 
 #define CHAN2G(_freq)  { \
-	.band = IEEE80211_BAND_2GHZ, \
+	.band = NL80211_BAND_2GHZ, \
 	.center_freq = (_freq), \
 	.hw_value = (_freq), \
 	.max_power = 20, \
 }
 
 #define CHAN5G(_freq) { \
-	.band = IEEE80211_BAND_5GHZ, \
+	.band = NL80211_BAND_5GHZ, \
 	.center_freq = (_freq), \
 	.hw_value = (_freq), \
 	.max_power = 20, \
@@ -479,7 +479,7 @@
 	struct list_head list;
 	struct ieee80211_hw *hw;
 	struct device *dev;
-	struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+	struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
 	struct ieee80211_channel channels_2ghz[ARRAY_SIZE(hwsim_channels_2ghz)];
 	struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)];
 	struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)];
@@ -1030,7 +1030,7 @@
 	data->pending_cookie++;
 	cookie = data->pending_cookie;
 	info->rate_driver_data[0] = (void *)cookie;
-	if (nla_put_u64(skb, HWSIM_ATTR_COOKIE, cookie))
+	if (nla_put_u64_64bit(skb, HWSIM_ATTR_COOKIE, cookie, HWSIM_ATTR_PAD))
 		goto nla_put_failure;
 
 	genlmsg_end(skb, msg_head);
@@ -1909,6 +1909,7 @@
 		/* send probes */
 		for (i = 0; i < req->n_ssids; i++) {
 			struct sk_buff *probe;
+			struct ieee80211_mgmt *mgmt;
 
 			probe = ieee80211_probereq_get(hwsim->hw,
 						       hwsim->scan_addr,
@@ -1918,6 +1919,10 @@
 			if (!probe)
 				continue;
 
+			mgmt = (struct ieee80211_mgmt *) probe->data;
+			memcpy(mgmt->da, req->bssid, ETH_ALEN);
+			memcpy(mgmt->bssid, req->bssid, ETH_ALEN);
+
 			if (req->ie_len)
 				memcpy(skb_put(probe, req->ie_len), req->ie,
 				       req->ie_len);
@@ -2342,7 +2347,7 @@
 	u8 addr[ETH_ALEN];
 	struct mac80211_hwsim_data *data;
 	struct ieee80211_hw *hw;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
 	int idx;
 
@@ -2471,16 +2476,16 @@
 		sizeof(hwsim_channels_5ghz));
 	memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates));
 
-	for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+	for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
 		struct ieee80211_supported_band *sband = &data->bands[band];
 		switch (band) {
-		case IEEE80211_BAND_2GHZ:
+		case NL80211_BAND_2GHZ:
 			sband->channels = data->channels_2ghz;
 			sband->n_channels = ARRAY_SIZE(hwsim_channels_2ghz);
 			sband->bitrates = data->rates;
 			sband->n_bitrates = ARRAY_SIZE(hwsim_rates);
 			break;
-		case IEEE80211_BAND_5GHZ:
+		case NL80211_BAND_5GHZ:
 			sband->channels = data->channels_5ghz;
 			sband->n_channels = ARRAY_SIZE(hwsim_channels_5ghz);
 			sband->bitrates = data->rates + 4;
diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h
index 66e1c73..39f2246 100644
--- a/drivers/net/wireless/mac80211_hwsim.h
+++ b/drivers/net/wireless/mac80211_hwsim.h
@@ -148,6 +148,7 @@
 	HWSIM_ATTR_RADIO_NAME,
 	HWSIM_ATTR_NO_VIF,
 	HWSIM_ATTR_FREQ,
+	HWSIM_ATTR_PAD,
 	__HWSIM_ATTR_MAX,
 };
 #define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index 2eea76a..776b44b 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -23,7 +23,7 @@
 
 
 #define CHAN2G(_channel, _freq, _flags) {        \
-	.band             = IEEE80211_BAND_2GHZ, \
+	.band             = NL80211_BAND_2GHZ, \
 	.center_freq      = (_freq),             \
 	.hw_value         = (_channel),          \
 	.flags            = (_flags),            \
@@ -639,7 +639,7 @@
 		if (chan_no != -1) {
 			struct wiphy *wiphy = priv->wdev->wiphy;
 			int freq = ieee80211_channel_to_frequency(chan_no,
-							IEEE80211_BAND_2GHZ);
+							NL80211_BAND_2GHZ);
 			struct ieee80211_channel *channel =
 				ieee80211_get_channel(wiphy, freq);
 
@@ -1266,7 +1266,7 @@
 {
 	struct cfg80211_scan_request *creq = NULL;
 	int i, n_channels = ieee80211_get_num_supported_channels(wiphy);
-	enum ieee80211_band band;
+	enum nl80211_band band;
 
 	creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) +
 		       n_channels * sizeof(void *),
@@ -1281,7 +1281,7 @@
 
 	/* Scan all available channels */
 	i = 0;
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+	for (band = 0; band < NUM_NL80211_BANDS; band++) {
 		int j;
 
 		if (!wiphy->bands[band])
@@ -2200,7 +2200,7 @@
 	if (lbs_mesh_activated(priv))
 		wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MESH_POINT);
 
-	wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &lbs_band_2ghz;
+	wdev->wiphy->bands[NL80211_BAND_2GHZ] = &lbs_band_2ghz;
 
 	/*
 	 * We could check priv->fwcapinfo && FW_CAPINFO_WPA, but I have
diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c
index 4ddd0e5..301170c 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.c
+++ b/drivers/net/wireless/marvell/libertas/cmd.c
@@ -743,7 +743,7 @@
 	struct cmd_ds_802_11d_domain_info cmd;
 	struct mrvl_ie_domain_param_set *domain = &cmd.domain;
 	struct ieee80211_country_ie_triplet *t;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	struct ieee80211_channel *ch;
 	u8 num_triplet = 0;
 	u8 num_parsed_chan = 0;
@@ -777,7 +777,7 @@
 	 * etc.
 	 */
 	for (band = 0;
-	     (band < IEEE80211_NUM_BANDS) && (num_triplet < MAX_11D_TRIPLETS);
+	     (band < NUM_NL80211_BANDS) && (num_triplet < MAX_11D_TRIPLETS);
 	     band++) {
 
 		if (!bands[band])
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index a47f0ac..0bf8916 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -570,7 +570,7 @@
 	if (!(prxpd->status & cpu_to_le16(MRVDRV_RXPD_STATUS_OK)))
 		stats.flag |= RX_FLAG_FAILED_FCS_CRC;
 	stats.freq = priv->cur_freq;
-	stats.band = IEEE80211_BAND_2GHZ;
+	stats.band = NL80211_BAND_2GHZ;
 	stats.signal = prxpd->snr;
 	priv->noise = prxpd->nf;
 	/* Marvell rate index has a hole at value 4 */
@@ -642,7 +642,7 @@
 	priv->band.bitrates = priv->rates;
 	priv->band.n_channels = ARRAY_SIZE(lbtf_channels);
 	priv->band.channels = priv->channels;
-	hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+	hw->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
 	hw->wiphy->interface_modes =
 		BIT(NL80211_IFTYPE_STATION) |
 		BIT(NL80211_IFTYPE_ADHOC);
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 09578c6..a74cc43 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -59,7 +59,10 @@
 								  skb->len);
 			}
 
-			ret = mwifiex_recv_packet(priv, rx_skb);
+			if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+				ret = mwifiex_uap_recv_packet(priv, rx_skb);
+			else
+				ret = mwifiex_recv_packet(priv, rx_skb);
 			if (ret == -1)
 				mwifiex_dbg(priv->adapter, ERROR,
 					    "Rx of A-MSDU failed");
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index bb7235e..ff948a9 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -474,7 +474,7 @@
 	u8 no_of_parsed_chan = 0;
 	u8 first_chan = 0, next_chan = 0, max_pwr = 0;
 	u8 i, flag = 0;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	struct ieee80211_supported_band *sband;
 	struct ieee80211_channel *ch;
 	struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
@@ -1410,7 +1410,7 @@
 {
 	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
 	struct mwifiex_chan_stats *pchan_stats = priv->adapter->chan_stats;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 
 	mwifiex_dbg(priv->adapter, DUMP, "dump_survey idx=%d\n", idx);
 
@@ -1586,7 +1586,7 @@
 {
 	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
 	u16 bitmap_rates[MAX_BITMAP_RATES_SIZE];
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	struct mwifiex_adapter *adapter = priv->adapter;
 
 	if (!priv->media_connected) {
@@ -1600,11 +1600,11 @@
 	memset(bitmap_rates, 0, sizeof(bitmap_rates));
 
 	/* Fill HR/DSSS rates. */
-	if (band == IEEE80211_BAND_2GHZ)
+	if (band == NL80211_BAND_2GHZ)
 		bitmap_rates[0] = mask->control[band].legacy & 0x000f;
 
 	/* Fill OFDM rates */
-	if (band == IEEE80211_BAND_2GHZ)
+	if (band == NL80211_BAND_2GHZ)
 		bitmap_rates[1] = (mask->control[band].legacy & 0x0ff0) >> 4;
 	else
 		bitmap_rates[1] = mask->control[band].legacy;
@@ -1771,7 +1771,7 @@
 	} else {
 		struct ieee80211_sta_ht_cap *ht_info;
 		int rx_mcs_supp;
-		enum ieee80211_band band;
+		enum nl80211_band band;
 
 		if ((tx_ant == 0x1 && rx_ant == 0x1)) {
 			adapter->user_dev_mcs_support = HT_STREAM_1X1;
@@ -1785,7 +1785,7 @@
 						MWIFIEX_11AC_MCS_MAP_2X2;
 		}
 
-		for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+		for (band = 0; band < NUM_NL80211_BANDS; band++) {
 			if (!adapter->wiphy->bands[band])
 				continue;
 
@@ -1997,7 +1997,7 @@
 	struct cfg80211_bss *bss;
 	int ie_len;
 	u8 ie_buf[IEEE80211_MAX_SSID_LEN + sizeof(struct ieee_types_header)];
-	enum ieee80211_band band;
+	enum nl80211_band band;
 
 	if (mwifiex_get_bss_info(priv, &bss_info))
 		return -1;
@@ -2271,7 +2271,7 @@
 	int index = 0, i;
 	u8 config_bands = 0;
 
-	if (params->chandef.chan->band == IEEE80211_BAND_2GHZ) {
+	if (params->chandef.chan->band == NL80211_BAND_2GHZ) {
 		if (!params->basic_rates) {
 			config_bands = BAND_B | BAND_G;
 		} else {
@@ -2859,18 +2859,18 @@
 	mwifiex_init_priv_params(priv, dev);
 	priv->netdev = dev;
 
-	mwifiex_setup_ht_caps(&wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap, priv);
+	mwifiex_setup_ht_caps(&wiphy->bands[NL80211_BAND_2GHZ]->ht_cap, priv);
 	if (adapter->is_hw_11ac_capable)
 		mwifiex_setup_vht_caps(
-			&wiphy->bands[IEEE80211_BAND_2GHZ]->vht_cap, priv);
+			&wiphy->bands[NL80211_BAND_2GHZ]->vht_cap, priv);
 
 	if (adapter->config_bands & BAND_A)
 		mwifiex_setup_ht_caps(
-			&wiphy->bands[IEEE80211_BAND_5GHZ]->ht_cap, priv);
+			&wiphy->bands[NL80211_BAND_5GHZ]->ht_cap, priv);
 
 	if ((adapter->config_bands & BAND_A) && adapter->is_hw_11ac_capable)
 		mwifiex_setup_vht_caps(
-			&wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap, priv);
+			&wiphy->bands[NL80211_BAND_5GHZ]->vht_cap, priv);
 
 	dev_net_set(dev, wiphy_net(wiphy));
 	dev->ieee80211_ptr = &priv->wdev;
@@ -3272,8 +3272,11 @@
 
 	for (i = 0; i < adapter->priv_num; i++) {
 		priv = adapter->priv[i];
-		if (priv && priv->netdev)
+		if (priv && priv->netdev) {
 			mwifiex_stop_net_dev_queue(priv->netdev, adapter);
+			if (netif_carrier_ok(priv->netdev))
+				netif_carrier_off(priv->netdev);
+		}
 	}
 
 	for (i = 0; i < retry_num; i++) {
@@ -3341,13 +3344,20 @@
 	struct mwifiex_ds_wakeup_reason wakeup_reason;
 	struct cfg80211_wowlan_wakeup wakeup_report;
 	int i;
+	bool report_wakeup_reason = true;
 
 	for (i = 0; i < adapter->priv_num; i++) {
 		priv = adapter->priv[i];
-		if (priv && priv->netdev)
+		if (priv && priv->netdev) {
+			if (!netif_carrier_ok(priv->netdev))
+				netif_carrier_on(priv->netdev);
 			mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
+		}
 	}
 
+	if (!wiphy->wowlan_config)
+		goto done;
+
 	priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
 	mwifiex_get_wakeup_reason(priv, HostCmd_ACT_GEN_GET, MWIFIEX_SYNC_CMD,
 				  &wakeup_reason);
@@ -3380,19 +3390,20 @@
 		if (wiphy->wowlan_config->n_patterns)
 			wakeup_report.pattern_idx = 1;
 		break;
-	case CONTROL_FRAME_MATCHED:
-		break;
-	case	MANAGEMENT_FRAME_MATCHED:
+	case GTK_REKEY_FAILURE:
+		if (wiphy->wowlan_config->gtk_rekey_failure)
+			wakeup_report.gtk_rekey_failure = true;
 		break;
 	default:
+		report_wakeup_reason = false;
 		break;
 	}
 
-	if ((wakeup_reason.hs_wakeup_reason > 0) &&
-	    (wakeup_reason.hs_wakeup_reason <= 7))
+	if (report_wakeup_reason)
 		cfg80211_report_wowlan_wakeup(&priv->wdev, &wakeup_report,
 					      GFP_KERNEL);
 
+done:
 	if (adapter->nd_info) {
 		for (i = 0 ; i < adapter->nd_info->n_matches ; i++)
 			kfree(adapter->nd_info->matches[i]);
@@ -3410,6 +3421,16 @@
 
 	device_set_wakeup_enable(adapter->dev, enabled);
 }
+
+static int mwifiex_set_rekey_data(struct wiphy *wiphy, struct net_device *dev,
+				  struct cfg80211_gtk_rekey_data *data)
+{
+	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+	return mwifiex_send_cmd(priv, HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG,
+				HostCmd_ACT_GEN_SET, 0, data, true);
+}
+
 #endif
 
 static int mwifiex_get_coalesce_pkt_type(u8 *byte_seq)
@@ -3801,7 +3822,7 @@
 	struct ieee80211_channel *chan;
 	u8 second_chan_offset;
 	enum nl80211_channel_type chan_type;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	int freq;
 	int ret = -ENODATA;
 
@@ -3932,6 +3953,7 @@
 	.suspend = mwifiex_cfg80211_suspend,
 	.resume = mwifiex_cfg80211_resume,
 	.set_wakeup = mwifiex_cfg80211_set_wakeup,
+	.set_rekey_data = mwifiex_set_rekey_data,
 #endif
 	.set_coalesce = mwifiex_cfg80211_set_coalesce,
 	.tdls_mgmt = mwifiex_cfg80211_tdls_mgmt,
@@ -3948,7 +3970,8 @@
 #ifdef CONFIG_PM
 static const struct wiphy_wowlan_support mwifiex_wowlan_support = {
 	.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT |
-		WIPHY_WOWLAN_NET_DETECT,
+		WIPHY_WOWLAN_NET_DETECT | WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+		WIPHY_WOWLAN_GTK_REKEY_FAILURE,
 	.n_patterns = MWIFIEX_MEF_MAX_FILTERS,
 	.pattern_min_len = 1,
 	.pattern_max_len = MWIFIEX_MAX_PATTERN_LEN,
@@ -4031,11 +4054,11 @@
 				 BIT(NL80211_IFTYPE_P2P_GO) |
 				 BIT(NL80211_IFTYPE_AP);
 
-	wiphy->bands[IEEE80211_BAND_2GHZ] = &mwifiex_band_2ghz;
+	wiphy->bands[NL80211_BAND_2GHZ] = &mwifiex_band_2ghz;
 	if (adapter->config_bands & BAND_A)
-		wiphy->bands[IEEE80211_BAND_5GHZ] = &mwifiex_band_5ghz;
+		wiphy->bands[NL80211_BAND_5GHZ] = &mwifiex_band_5ghz;
 	else
-		wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
+		wiphy->bands[NL80211_BAND_5GHZ] = NULL;
 
 	if (adapter->drcs_enabled && ISSUPP_DRCS_ENABLED(adapter->fw_cap_info))
 		wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_drcs;
@@ -4086,6 +4109,7 @@
 
 	wiphy->features |= NL80211_FEATURE_HT_IBSS |
 			   NL80211_FEATURE_INACTIVITY_TIMER |
+			   NL80211_FEATURE_LOW_PRIORITY_SCAN |
 			   NL80211_FEATURE_NEED_OBSS_SCAN;
 
 	if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c
index 09fae27..1ff2205 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfp.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfp.c
@@ -322,9 +322,9 @@
 		return cfp;
 
 	if (mwifiex_band_to_radio_type(band) == HostCmd_SCAN_RADIO_TYPE_BG)
-		sband = priv->wdev.wiphy->bands[IEEE80211_BAND_2GHZ];
+		sband = priv->wdev.wiphy->bands[NL80211_BAND_2GHZ];
 	else
-		sband = priv->wdev.wiphy->bands[IEEE80211_BAND_5GHZ];
+		sband = priv->wdev.wiphy->bands[NL80211_BAND_5GHZ];
 
 	if (!sband) {
 		mwifiex_dbg(priv->adapter, ERROR,
@@ -399,15 +399,15 @@
 	int i;
 
 	if (radio_type) {
-		sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+		sband = wiphy->bands[NL80211_BAND_5GHZ];
 		if (WARN_ON_ONCE(!sband))
 			return 0;
-		rate_mask = request->rates[IEEE80211_BAND_5GHZ];
+		rate_mask = request->rates[NL80211_BAND_5GHZ];
 	} else {
-		sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+		sband = wiphy->bands[NL80211_BAND_2GHZ];
 		if (WARN_ON_ONCE(!sband))
 			return 0;
-		rate_mask = request->rates[IEEE80211_BAND_2GHZ];
+		rate_mask = request->rates[NL80211_BAND_2GHZ];
 	}
 
 	num_rates = 0;
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index a12adee..6bc2011 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -105,6 +105,47 @@
 }
 
 /*
+ * This function returns a command to the command free queue.
+ *
+ * The function also calls the completion callback if required, before
+ * cleaning the command node and re-inserting it into the free queue.
+ */
+static void
+mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter,
+			     struct cmd_ctrl_node *cmd_node)
+{
+	unsigned long flags;
+
+	if (!cmd_node)
+		return;
+
+	if (cmd_node->wait_q_enabled)
+		mwifiex_complete_cmd(adapter, cmd_node);
+	/* Clean the node */
+	mwifiex_clean_cmd_node(adapter, cmd_node);
+
+	/* Insert node into cmd_free_q */
+	spin_lock_irqsave(&adapter->cmd_free_q_lock, flags);
+	list_add_tail(&cmd_node->list, &adapter->cmd_free_q);
+	spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
+}
+
+/* This function reuses a command node. */
+void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter,
+			      struct cmd_ctrl_node *cmd_node)
+{
+	struct host_cmd_ds_command *host_cmd = (void *)cmd_node->cmd_skb->data;
+
+	mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+
+	atomic_dec(&adapter->cmd_pending);
+	mwifiex_dbg(adapter, CMD,
+		    "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n",
+		le16_to_cpu(host_cmd->command),
+		atomic_read(&adapter->cmd_pending));
+}
+
+/*
  * This function sends a host command to the firmware.
  *
  * The function copies the host command into the driver command
@@ -614,47 +655,6 @@
 }
 
 /*
- * This function returns a command to the command free queue.
- *
- * The function also calls the completion callback if required, before
- * cleaning the command node and re-inserting it into the free queue.
- */
-void
-mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter,
-			     struct cmd_ctrl_node *cmd_node)
-{
-	unsigned long flags;
-
-	if (!cmd_node)
-		return;
-
-	if (cmd_node->wait_q_enabled)
-		mwifiex_complete_cmd(adapter, cmd_node);
-	/* Clean the node */
-	mwifiex_clean_cmd_node(adapter, cmd_node);
-
-	/* Insert node into cmd_free_q */
-	spin_lock_irqsave(&adapter->cmd_free_q_lock, flags);
-	list_add_tail(&cmd_node->list, &adapter->cmd_free_q);
-	spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
-}
-
-/* This function reuses a command node. */
-void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter,
-			      struct cmd_ctrl_node *cmd_node)
-{
-	struct host_cmd_ds_command *host_cmd = (void *)cmd_node->cmd_skb->data;
-
-	mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
-
-	atomic_dec(&adapter->cmd_pending);
-	mwifiex_dbg(adapter, CMD,
-		    "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n",
-		le16_to_cpu(host_cmd->command),
-		atomic_read(&adapter->cmd_pending));
-}
-
-/*
  * This function queues a command to the command pending queue.
  *
  * This in effect adds the command to the command list to be executed.
@@ -991,6 +991,23 @@
 		adapter->if_ops.card_reset(adapter);
 }
 
+void
+mwifiex_cancel_pending_scan_cmd(struct mwifiex_adapter *adapter)
+{
+	struct cmd_ctrl_node *cmd_node = NULL, *tmp_node;
+	unsigned long flags;
+
+	/* Cancel all pending scan command */
+	spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+	list_for_each_entry_safe(cmd_node, tmp_node,
+				 &adapter->scan_pending_q, list) {
+		list_del(&cmd_node->list);
+		cmd_node->wait_q_enabled = false;
+		mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+	}
+	spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+}
+
 /*
  * This function cancels all the pending commands.
  *
@@ -1009,9 +1026,9 @@
 	spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
 	/* Cancel current cmd */
 	if ((adapter->curr_cmd) && (adapter->curr_cmd->wait_q_enabled)) {
-		adapter->curr_cmd->wait_q_enabled = false;
 		adapter->cmd_wait_q.status = -1;
 		mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+		adapter->curr_cmd->wait_q_enabled = false;
 		/* no recycle probably wait for response */
 	}
 	/* Cancel all pending command */
@@ -1029,16 +1046,7 @@
 	spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
 	spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
 
-	/* Cancel all pending scan command */
-	spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
-	list_for_each_entry_safe(cmd_node, tmp_node,
-				 &adapter->scan_pending_q, list) {
-		list_del(&cmd_node->list);
-
-		cmd_node->wait_q_enabled = false;
-		mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
-	}
-	spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+	mwifiex_cancel_pending_scan_cmd(adapter);
 
 	if (adapter->scan_processing) {
 		spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
@@ -1070,9 +1078,8 @@
 void
 mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
 {
-	struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
+	struct cmd_ctrl_node *cmd_node = NULL;
 	unsigned long cmd_flags;
-	unsigned long scan_pending_q_flags;
 	struct mwifiex_private *priv;
 	int i;
 
@@ -1094,17 +1101,7 @@
 		mwifiex_recycle_cmd_node(adapter, cmd_node);
 	}
 
-	/* Cancel all pending scan command */
-	spin_lock_irqsave(&adapter->scan_pending_q_lock,
-			  scan_pending_q_flags);
-	list_for_each_entry_safe(cmd_node, tmp_node,
-				 &adapter->scan_pending_q, list) {
-		list_del(&cmd_node->list);
-		cmd_node->wait_q_enabled = false;
-		mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
-	}
-	spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
-			       scan_pending_q_flags);
+	mwifiex_cancel_pending_scan_cmd(adapter);
 
 	if (adapter->scan_processing) {
 		spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index c134cf8..8e4145a 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -372,6 +372,7 @@
 #define HostCmd_CMD_COALESCE_CFG                      0x010a
 #define HostCmd_CMD_MGMT_FRAME_REG                    0x010c
 #define HostCmd_CMD_REMAIN_ON_CHAN                    0x010d
+#define HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG             0x010f
 #define HostCmd_CMD_11AC_CFG			      0x0112
 #define HostCmd_CMD_HS_WAKEUP_REASON                  0x0116
 #define HostCmd_CMD_TDLS_CONFIG                       0x0100
@@ -619,6 +620,7 @@
 	MAGIC_PATTERN_MATCHED,
 	CONTROL_FRAME_MATCHED,
 	MANAGEMENT_FRAME_MATCHED,
+	GTK_REKEY_FAILURE,
 	RESERVED
 };
 
@@ -2183,6 +2185,14 @@
 	u16  wakeup_reason;
 } __packed;
 
+struct host_cmd_ds_gtk_rekey_params {
+	__le16 action;
+	u8 kck[NL80211_KCK_LEN];
+	u8 kek[NL80211_KEK_LEN];
+	__le32 replay_ctr_low;
+	__le32 replay_ctr_high;
+} __packed;
+
 struct host_cmd_ds_command {
 	__le16 command;
 	__le16 size;
@@ -2256,6 +2266,7 @@
 		struct host_cmd_ds_multi_chan_policy mc_policy;
 		struct host_cmd_ds_robust_coex coex;
 		struct host_cmd_ds_wakeup_reason hs_wakeup_reason;
+		struct host_cmd_ds_gtk_rekey_params rekey;
 	} params;
 } __packed;
 
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index 517653b..78c532f 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -317,7 +317,7 @@
 	for (i = 0; i < dev->num_tx_queues; i++)
 		netdev_get_tx_queue(dev, i)->trans_start = jiffies;
 
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 }
 
 /*
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 3cfa946..8b67a55 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -702,6 +702,13 @@
 		priv->scan_aborting = true;
 	}
 
+	if (priv->sched_scanning) {
+		mwifiex_dbg(priv->adapter, INFO,
+			    "aborting bgscan on ndo_stop\n");
+		mwifiex_stop_bg_scan(priv);
+		cfg80211_sched_scan_stopped(priv->wdev.wiphy);
+	}
+
 	return 0;
 }
 
@@ -753,13 +760,6 @@
 
 	mwifiex_queue_main_work(priv->adapter);
 
-	if (priv->sched_scanning) {
-		mwifiex_dbg(priv->adapter, INFO,
-			    "aborting bgscan on ndo_stop\n");
-		mwifiex_stop_bg_scan(priv);
-		cfg80211_sched_scan_stopped(priv->wdev.wiphy);
-	}
-
 	return 0;
 }
 
@@ -1074,12 +1074,14 @@
 			     priv->netdev->name, priv->num_tx_timeout);
 	}
 
-	if (adapter->iface_type == MWIFIEX_SDIO) {
-		p += sprintf(p, "\n=== SDIO register dump===\n");
+	if (adapter->iface_type == MWIFIEX_SDIO ||
+	    adapter->iface_type == MWIFIEX_PCIE) {
+		p += sprintf(p, "\n=== %s register dump===\n",
+			     adapter->iface_type == MWIFIEX_SDIO ?
+							"SDIO" : "PCIE");
 		if (adapter->if_ops.reg_dump)
 			p += adapter->if_ops.reg_dump(adapter, p);
 	}
-
 	p += sprintf(p, "\n=== more debug information\n");
 	debug_info = kzalloc(sizeof(*debug_info), GFP_KERNEL);
 	if (debug_info) {
@@ -1432,7 +1434,7 @@
 	struct mwifiex_private *priv = NULL;
 	int i;
 
-	if (down_interruptible(sem))
+	if (down_trylock(sem))
 		goto exit_sem_err;
 
 	if (!adapter)
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index aafc4ab..0207af0 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -37,6 +37,17 @@
 #include <linux/idr.h>
 #include <linux/inetdevice.h>
 #include <linux/devcoredump.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/of_irq.h>
 
 #include "decl.h"
 #include "ioctl.h"
@@ -100,8 +111,8 @@
 #define SCAN_BEACON_ENTRY_PAD			6
 
 #define MWIFIEX_PASSIVE_SCAN_CHAN_TIME	110
-#define MWIFIEX_ACTIVE_SCAN_CHAN_TIME	30
-#define MWIFIEX_SPECIFIC_SCAN_CHAN_TIME	30
+#define MWIFIEX_ACTIVE_SCAN_CHAN_TIME	40
+#define MWIFIEX_SPECIFIC_SCAN_CHAN_TIME	40
 #define MWIFIEX_DEF_SCAN_CHAN_GAP_TIME  50
 
 #define SCAN_RSSI(RSSI)					(0x100 - ((u8)(RSSI)))
@@ -1019,6 +1030,8 @@
 int mwifiex_dnld_fw(struct mwifiex_adapter *, struct mwifiex_fw_image *);
 
 int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb);
+int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
+			    struct sk_buff *skb);
 
 int mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
 				struct sk_buff *skb);
@@ -1040,9 +1053,8 @@
 int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter);
 void mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter);
 void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter);
+void mwifiex_cancel_pending_scan_cmd(struct mwifiex_adapter *adapter);
 
-void mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter,
-				  struct cmd_ctrl_node *cmd_node);
 void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter,
 			      struct cmd_ctrl_node *cmd_node);
 
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index de36438..0c7937e 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -190,7 +190,6 @@
 
 	if (ent->driver_data) {
 		struct mwifiex_pcie_device *data = (void *)ent->driver_data;
-		card->pcie.firmware = data->firmware;
 		card->pcie.reg = data->reg;
 		card->pcie.blksz_fw_dl = data->blksz_fw_dl;
 		card->pcie.tx_buf_size = data->tx_buf_size;
@@ -269,6 +268,11 @@
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
 		.driver_data = (unsigned long)&mwifiex_pcie8997,
 	},
+	{
+		PCIE_VENDOR_ID_V2_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8997,
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		.driver_data = (unsigned long)&mwifiex_pcie8997,
+	},
 	{},
 };
 
@@ -2351,6 +2355,47 @@
 	return 0;
 }
 
+/* Function to dump PCIE scratch registers in case of FW crash
+ */
+static int
+mwifiex_pcie_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf)
+{
+	char *p = drv_buf;
+	char buf[256], *ptr;
+	int i;
+	u32 value;
+	struct pcie_service_card *card = adapter->card;
+	const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+	int pcie_scratch_reg[] = {PCIE_SCRATCH_12_REG,
+				  PCIE_SCRATCH_13_REG,
+				  PCIE_SCRATCH_14_REG};
+
+	if (!p)
+		return 0;
+
+	mwifiex_dbg(adapter, MSG, "PCIE register dump start\n");
+
+	if (mwifiex_read_reg(adapter, reg->fw_status, &value)) {
+		mwifiex_dbg(adapter, ERROR, "failed to read firmware status");
+		return 0;
+	}
+
+	ptr = buf;
+	mwifiex_dbg(adapter, MSG, "pcie scratch register:");
+	for (i = 0; i < ARRAY_SIZE(pcie_scratch_reg); i++) {
+		mwifiex_read_reg(adapter, pcie_scratch_reg[i], &value);
+		ptr += sprintf(ptr, "reg:0x%x, value=0x%x\n",
+			       pcie_scratch_reg[i], value);
+	}
+
+	mwifiex_dbg(adapter, MSG, "%s\n", buf);
+	p += sprintf(p, "%s\n", buf);
+
+	mwifiex_dbg(adapter, MSG, "PCIE register dump end\n");
+
+	return p - drv_buf;
+}
+
 /* This function read/write firmware */
 static enum rdwr_status
 mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag)
@@ -2759,6 +2804,68 @@
 }
 
 /*
+ * This function get firmare name for downloading by revision id
+ *
+ * Read revision id register to get revision id
+ */
+static void mwifiex_pcie_get_fw_name(struct mwifiex_adapter *adapter)
+{
+	int revision_id = 0;
+	int version;
+	struct pcie_service_card *card = adapter->card;
+
+	switch (card->dev->device) {
+	case PCIE_DEVICE_ID_MARVELL_88W8766P:
+		strcpy(adapter->fw_name, PCIE8766_DEFAULT_FW_NAME);
+		break;
+	case PCIE_DEVICE_ID_MARVELL_88W8897:
+		mwifiex_write_reg(adapter, 0x0c58, 0x80c00000);
+		mwifiex_read_reg(adapter, 0x0c58, &revision_id);
+		revision_id &= 0xff00;
+		switch (revision_id) {
+		case PCIE8897_A0:
+			strcpy(adapter->fw_name, PCIE8897_A0_FW_NAME);
+			break;
+		case PCIE8897_B0:
+			strcpy(adapter->fw_name, PCIE8897_B0_FW_NAME);
+			break;
+		default:
+			strcpy(adapter->fw_name, PCIE8897_DEFAULT_FW_NAME);
+
+			break;
+		}
+		break;
+	case PCIE_DEVICE_ID_MARVELL_88W8997:
+		mwifiex_read_reg(adapter, 0x0c48, &revision_id);
+		mwifiex_read_reg(adapter, 0x0cd0, &version);
+		version &= 0x7;
+		switch (revision_id) {
+		case PCIE8997_V2:
+			if (version == CHIP_VER_PCIEUSB)
+				strcpy(adapter->fw_name,
+				       PCIEUSB8997_FW_NAME_V2);
+			else
+				strcpy(adapter->fw_name,
+				       PCIEUART8997_FW_NAME_V2);
+			break;
+		case PCIE8997_Z:
+			if (version == CHIP_VER_PCIEUSB)
+				strcpy(adapter->fw_name,
+				       PCIEUSB8997_FW_NAME_Z);
+			else
+				strcpy(adapter->fw_name,
+				       PCIEUART8997_FW_NAME_Z);
+			break;
+		default:
+			strcpy(adapter->fw_name, PCIE8997_DEFAULT_FW_NAME);
+			break;
+		}
+	default:
+		break;
+	}
+}
+
+/*
  * This function registers the PCIE device.
  *
  * PCIE IRQ is claimed, block size is set and driver data is initialized.
@@ -2778,8 +2885,8 @@
 	adapter->tx_buf_size = card->pcie.tx_buf_size;
 	adapter->mem_type_mapping_tbl = card->pcie.mem_type_mapping_tbl;
 	adapter->num_mem_types = card->pcie.num_mem_types;
-	strcpy(adapter->fw_name, card->pcie.firmware);
 	adapter->ext_scan = card->pcie.can_ext_scan;
+	mwifiex_pcie_get_fw_name(adapter);
 
 	return 0;
 }
@@ -2850,6 +2957,7 @@
 	.cleanup_mpa_buf =		NULL,
 	.init_fw_port =			mwifiex_pcie_init_fw_port,
 	.clean_pcie_ring =		mwifiex_clean_pcie_ring_buf,
+	.reg_dump =			mwifiex_pcie_reg_dump,
 	.device_dump =			mwifiex_pcie_device_dump,
 };
 
@@ -2907,6 +3015,3 @@
 MODULE_DESCRIPTION("Marvell WiFi-Ex PCI-Express Driver version " PCIE_VERSION);
 MODULE_VERSION(PCIE_VERSION);
 MODULE_LICENSE("GPL v2");
-MODULE_FIRMWARE(PCIE8766_DEFAULT_FW_NAME);
-MODULE_FIRMWARE(PCIE8897_DEFAULT_FW_NAME);
-MODULE_FIRMWARE(PCIE8997_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h
index 29e58ce..5770b43 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.h
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.h
@@ -31,13 +31,26 @@
 
 #define PCIE8766_DEFAULT_FW_NAME "mrvl/pcie8766_uapsta.bin"
 #define PCIE8897_DEFAULT_FW_NAME "mrvl/pcie8897_uapsta.bin"
-#define PCIE8997_DEFAULT_FW_NAME "mrvl/pcie8997_uapsta.bin"
+#define PCIE8897_A0_FW_NAME "mrvl/pcie8897_uapsta_a0.bin"
+#define PCIE8897_B0_FW_NAME "mrvl/pcie8897_uapsta.bin"
+#define PCIE8997_DEFAULT_FW_NAME "mrvl/pcieuart8997_combo_v2.bin"
+#define PCIEUART8997_FW_NAME_Z "mrvl/pcieuart8997_combo.bin"
+#define PCIEUART8997_FW_NAME_V2 "mrvl/pcieuart8997_combo_v2.bin"
+#define PCIEUSB8997_FW_NAME_Z "mrvl/pcieusb8997_combo.bin"
+#define PCIEUSB8997_FW_NAME_V2 "mrvl/pcieusb8997_combo_v2.bin"
 
 #define PCIE_VENDOR_ID_MARVELL              (0x11ab)
+#define PCIE_VENDOR_ID_V2_MARVELL           (0x1b4b)
 #define PCIE_DEVICE_ID_MARVELL_88W8766P		(0x2b30)
 #define PCIE_DEVICE_ID_MARVELL_88W8897		(0x2b38)
 #define PCIE_DEVICE_ID_MARVELL_88W8997		(0x2b42)
 
+#define PCIE8897_A0	0x1100
+#define PCIE8897_B0	0x1200
+#define PCIE8997_Z	0x0
+#define PCIE8997_V2	0x471
+#define CHIP_VER_PCIEUSB	0x2
+
 /* Constants for Buffer Descriptor (BD) rings */
 #define MWIFIEX_MAX_TXRX_BD			0x20
 #define MWIFIEX_TXBD_MASK			0x3F
@@ -65,6 +78,8 @@
 #define PCIE_SCRATCH_10_REG				0xCE8
 #define PCIE_SCRATCH_11_REG				0xCEC
 #define PCIE_SCRATCH_12_REG				0xCF0
+#define PCIE_SCRATCH_13_REG				0xCF8
+#define PCIE_SCRATCH_14_REG				0xCFC
 #define PCIE_RD_DATA_PTR_Q0_Q1                          0xC08C
 #define PCIE_WR_DATA_PTR_Q0_Q1                          0xC05C
 
@@ -263,7 +278,6 @@
 };
 
 struct mwifiex_pcie_device {
-	const char *firmware;
 	const struct mwifiex_pcie_card_reg *reg;
 	u16 blksz_fw_dl;
 	u16 tx_buf_size;
@@ -274,7 +288,6 @@
 };
 
 static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
-	.firmware       = PCIE8766_DEFAULT_FW_NAME,
 	.reg            = &mwifiex_reg_8766,
 	.blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
 	.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
@@ -283,7 +296,6 @@
 };
 
 static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
-	.firmware       = PCIE8897_DEFAULT_FW_NAME,
 	.reg            = &mwifiex_reg_8897,
 	.blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
 	.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
@@ -294,7 +306,6 @@
 };
 
 static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
-	.firmware       = PCIE8997_DEFAULT_FW_NAME,
 	.reg            = &mwifiex_reg_8997,
 	.blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
 	.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 489f7a9..bc5e52c 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -76,6 +76,39 @@
 	{ 0x00, 0x0f, 0xac, 0x04 },	/* AES  */
 };
 
+static void
+_dbg_security_flags(int log_level, const char *func, const char *desc,
+		    struct mwifiex_private *priv,
+		    struct mwifiex_bssdescriptor *bss_desc)
+{
+	_mwifiex_dbg(priv->adapter, log_level,
+		     "info: %s: %s:\twpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\tEncMode=%#x privacy=%#x\n",
+		     func, desc,
+		     bss_desc->bcn_wpa_ie ?
+		     bss_desc->bcn_wpa_ie->vend_hdr.element_id : 0,
+		     bss_desc->bcn_rsn_ie ?
+		     bss_desc->bcn_rsn_ie->ieee_hdr.element_id : 0,
+		     priv->sec_info.wep_enabled ? "e" : "d",
+		     priv->sec_info.wpa_enabled ? "e" : "d",
+		     priv->sec_info.wpa2_enabled ? "e" : "d",
+		     priv->sec_info.encryption_mode,
+		     bss_desc->privacy);
+}
+#define dbg_security_flags(mask, desc, priv, bss_desc) \
+	_dbg_security_flags(MWIFIEX_DBG_##mask, desc, __func__, priv, bss_desc)
+
+static bool
+has_ieee_hdr(struct ieee_types_generic *ie, u8 key)
+{
+	return (ie && ie->ieee_hdr.element_id == key);
+}
+
+static bool
+has_vendor_hdr(struct ieee_types_vendor_specific *ie, u8 key)
+{
+	return (ie && ie->vend_hdr.element_id == key);
+}
+
 /*
  * This function parses a given IE for a given OUI.
  *
@@ -121,8 +154,7 @@
 	struct ie_body *iebody;
 	u8 ret = MWIFIEX_OUI_NOT_PRESENT;
 
-	if (((bss_desc->bcn_rsn_ie) && ((*(bss_desc->bcn_rsn_ie)).
-					ieee_hdr.element_id == WLAN_EID_RSN))) {
+	if (has_ieee_hdr(bss_desc->bcn_rsn_ie, WLAN_EID_RSN)) {
 		iebody = (struct ie_body *)
 			 (((u8 *) bss_desc->bcn_rsn_ie->data) +
 			  RSN_GTK_OUI_OFFSET);
@@ -148,9 +180,7 @@
 	struct ie_body *iebody;
 	u8 ret = MWIFIEX_OUI_NOT_PRESENT;
 
-	if (((bss_desc->bcn_wpa_ie) &&
-	     ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id ==
-	      WLAN_EID_VENDOR_SPECIFIC))) {
+	if (has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)) {
 		iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data;
 		oui = &mwifiex_wpa_oui[cipher][0];
 		ret = mwifiex_search_oui_in_ie(iebody, oui);
@@ -180,11 +210,8 @@
 		    struct mwifiex_bssdescriptor *bss_desc)
 {
 	if (priv->sec_info.wapi_enabled &&
-	    (bss_desc->bcn_wapi_ie &&
-	     ((*(bss_desc->bcn_wapi_ie)).ieee_hdr.element_id ==
-			WLAN_EID_BSS_AC_ACCESS_DELAY))) {
+	    has_ieee_hdr(bss_desc->bcn_wapi_ie, WLAN_EID_BSS_AC_ACCESS_DELAY))
 		return true;
-	}
 	return false;
 }
 
@@ -197,12 +224,9 @@
 		      struct mwifiex_bssdescriptor *bss_desc)
 {
 	if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
-	    !priv->sec_info.wpa2_enabled && ((!bss_desc->bcn_wpa_ie) ||
-		((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id !=
-		 WLAN_EID_VENDOR_SPECIFIC)) &&
-	    ((!bss_desc->bcn_rsn_ie) ||
-		((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id !=
-		 WLAN_EID_RSN)) &&
+	    !priv->sec_info.wpa2_enabled &&
+	    !has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC) &&
+	    !has_ieee_hdr(bss_desc->bcn_rsn_ie, WLAN_EID_RSN) &&
 	    !priv->sec_info.encryption_mode && !bss_desc->privacy) {
 		return true;
 	}
@@ -233,29 +257,14 @@
 		   struct mwifiex_bssdescriptor *bss_desc)
 {
 	if (!priv->sec_info.wep_enabled && priv->sec_info.wpa_enabled &&
-	    !priv->sec_info.wpa2_enabled && ((bss_desc->bcn_wpa_ie) &&
-	    ((*(bss_desc->bcn_wpa_ie)).
-	     vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC))
+	    !priv->sec_info.wpa2_enabled &&
+	    has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)
 	   /*
 	    * Privacy bit may NOT be set in some APs like
 	    * LinkSys WRT54G && bss_desc->privacy
 	    */
 	 ) {
-		mwifiex_dbg(priv->adapter, INFO,
-			    "info: %s: WPA:\t"
-			    "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\t"
-			    "EncMode=%#x privacy=%#x\n", __func__,
-			    (bss_desc->bcn_wpa_ie) ?
-			    (*bss_desc->bcn_wpa_ie).
-			    vend_hdr.element_id : 0,
-			    (bss_desc->bcn_rsn_ie) ?
-			    (*bss_desc->bcn_rsn_ie).
-			    ieee_hdr.element_id : 0,
-			    (priv->sec_info.wep_enabled) ? "e" : "d",
-			    (priv->sec_info.wpa_enabled) ? "e" : "d",
-			    (priv->sec_info.wpa2_enabled) ? "e" : "d",
-			    priv->sec_info.encryption_mode,
-			    bss_desc->privacy);
+		dbg_security_flags(INFO, "WPA", priv, bss_desc);
 		return true;
 	}
 	return false;
@@ -269,30 +278,14 @@
 mwifiex_is_bss_wpa2(struct mwifiex_private *priv,
 		    struct mwifiex_bssdescriptor *bss_desc)
 {
-	if (!priv->sec_info.wep_enabled &&
-	    !priv->sec_info.wpa_enabled &&
+	if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
 	    priv->sec_info.wpa2_enabled &&
-	    ((bss_desc->bcn_rsn_ie) &&
-	     ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id == WLAN_EID_RSN))) {
+	    has_ieee_hdr(bss_desc->bcn_rsn_ie, WLAN_EID_RSN)) {
 		/*
 		 * Privacy bit may NOT be set in some APs like
 		 * LinkSys WRT54G && bss_desc->privacy
 		 */
-		mwifiex_dbg(priv->adapter, INFO,
-			    "info: %s: WPA2:\t"
-			    "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\t"
-			    "EncMode=%#x privacy=%#x\n", __func__,
-			    (bss_desc->bcn_wpa_ie) ?
-			    (*bss_desc->bcn_wpa_ie).
-			    vend_hdr.element_id : 0,
-			    (bss_desc->bcn_rsn_ie) ?
-			    (*bss_desc->bcn_rsn_ie).
-			    ieee_hdr.element_id : 0,
-			    (priv->sec_info.wep_enabled) ? "e" : "d",
-			    (priv->sec_info.wpa_enabled) ? "e" : "d",
-			    (priv->sec_info.wpa2_enabled) ? "e" : "d",
-			    priv->sec_info.encryption_mode,
-			    bss_desc->privacy);
+		dbg_security_flags(INFO, "WAP2", priv, bss_desc);
 		return true;
 	}
 	return false;
@@ -308,11 +301,8 @@
 {
 	if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
 	    !priv->sec_info.wpa2_enabled &&
-	    ((!bss_desc->bcn_wpa_ie) ||
-	     ((*(bss_desc->bcn_wpa_ie)).
-	      vend_hdr.element_id != WLAN_EID_VENDOR_SPECIFIC)) &&
-	    ((!bss_desc->bcn_rsn_ie) ||
-	     ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) &&
+	    !has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC) &&
+	    !has_ieee_hdr(bss_desc->bcn_rsn_ie, WLAN_EID_RSN) &&
 	    !priv->sec_info.encryption_mode && bss_desc->privacy) {
 		return true;
 	}
@@ -329,25 +319,10 @@
 {
 	if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
 	    !priv->sec_info.wpa2_enabled &&
-	    ((!bss_desc->bcn_wpa_ie) ||
-	     ((*(bss_desc->bcn_wpa_ie)).
-	      vend_hdr.element_id != WLAN_EID_VENDOR_SPECIFIC)) &&
-	    ((!bss_desc->bcn_rsn_ie) ||
-	     ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) &&
+	    !has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC) &&
+	    !has_ieee_hdr(bss_desc->bcn_rsn_ie, WLAN_EID_RSN) &&
 	    priv->sec_info.encryption_mode && bss_desc->privacy) {
-		mwifiex_dbg(priv->adapter, INFO,
-			    "info: %s: dynamic\t"
-			    "WEP: wpa_ie=%#x wpa2_ie=%#x\t"
-			    "EncMode=%#x privacy=%#x\n",
-			    __func__,
-			    (bss_desc->bcn_wpa_ie) ?
-			    (*bss_desc->bcn_wpa_ie).
-			    vend_hdr.element_id : 0,
-			    (bss_desc->bcn_rsn_ie) ?
-			    (*bss_desc->bcn_rsn_ie).
-			    ieee_hdr.element_id : 0,
-			    priv->sec_info.encryption_mode,
-			    bss_desc->privacy);
+		dbg_security_flags(INFO, "dynamic", priv, bss_desc);
 		return true;
 	}
 	return false;
@@ -460,18 +435,7 @@
 		}
 
 		/* Security doesn't match */
-		mwifiex_dbg(adapter, ERROR,
-			    "info: %s: failed: wpa_ie=%#x wpa2_ie=%#x WEP=%s\t"
-			    "WPA=%s WPA2=%s EncMode=%#x privacy=%#x\n",
-			    __func__,
-			    (bss_desc->bcn_wpa_ie) ?
-			    (*bss_desc->bcn_wpa_ie).vend_hdr.element_id : 0,
-			    (bss_desc->bcn_rsn_ie) ?
-			    (*bss_desc->bcn_rsn_ie).ieee_hdr.element_id : 0,
-			    (priv->sec_info.wep_enabled) ? "e" : "d",
-			    (priv->sec_info.wpa_enabled) ? "e" : "d",
-			    (priv->sec_info.wpa2_enabled) ? "e" : "d",
-			    priv->sec_info.encryption_mode, bss_desc->privacy);
+		dbg_security_flags(ERROR, "failed", priv, bss_desc);
 		return -1;
 	}
 
@@ -494,13 +458,13 @@
 							*scan_chan_list,
 				 u8 filtered_scan)
 {
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	struct ieee80211_supported_band *sband;
 	struct ieee80211_channel *ch;
 	struct mwifiex_adapter *adapter = priv->adapter;
 	int chan_idx = 0, i;
 
-	for (band = 0; (band < IEEE80211_NUM_BANDS) ; band++) {
+	for (band = 0; (band < NUM_NL80211_BANDS) ; band++) {
 
 		if (!priv->wdev.wiphy->bands[band])
 			continue;
@@ -534,11 +498,13 @@
 					&= ~MWIFIEX_PASSIVE_SCAN;
 			scan_chan_list[chan_idx].chan_number =
 							(u32) ch->hw_value;
+
+			scan_chan_list[chan_idx].chan_scan_mode_bitmap
+					|= MWIFIEX_DISABLE_CHAN_FILT;
+
 			if (filtered_scan) {
 				scan_chan_list[chan_idx].max_scan_time =
 				cpu_to_le16(adapter->specific_scan_time);
-				scan_chan_list[chan_idx].chan_scan_mode_bitmap
-					|= MWIFIEX_DISABLE_CHAN_FILT;
 			}
 			chan_idx++;
 		}
@@ -557,13 +523,13 @@
 				   struct mwifiex_chan_scan_param_set
 						*scan_chan_list)
 {
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	struct ieee80211_supported_band *sband;
 	struct ieee80211_channel *ch;
 	struct mwifiex_adapter *adapter = priv->adapter;
 	int chan_idx = 0, i;
 
-	for (band = 0; (band < IEEE80211_NUM_BANDS); band++) {
+	for (band = 0; (band < NUM_NL80211_BANDS); band++) {
 		if (!priv->wdev.wiphy->bands[band])
 			continue;
 
@@ -655,8 +621,6 @@
 	int ret = 0;
 	struct mwifiex_chan_scan_param_set *tmp_chan_list;
 	struct mwifiex_chan_scan_param_set *start_chan;
-	struct cmd_ctrl_node *cmd_node, *tmp_node;
-	unsigned long flags;
 	u32 tlv_idx, rates_size, cmd_no;
 	u32 total_scan_time;
 	u32 done_early;
@@ -813,16 +777,7 @@
 			    sizeof(struct mwifiex_ie_types_header) + rates_size;
 
 		if (ret) {
-			spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
-			list_for_each_entry_safe(cmd_node, tmp_node,
-						 &adapter->scan_pending_q,
-						 list) {
-				list_del(&cmd_node->list);
-				cmd_node->wait_q_enabled = false;
-				mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
-			}
-			spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
-					       flags);
+			mwifiex_cancel_pending_scan_cmd(adapter);
 			break;
 		}
 	}
@@ -912,14 +867,11 @@
 		/* Set the BSS type scan filter, use Adapter setting if
 		   unset */
 		scan_cfg_out->bss_mode =
-			(user_scan_in->bss_mode ? (u8) user_scan_in->
-			 bss_mode : (u8) adapter->scan_mode);
+			(u8)(user_scan_in->bss_mode ?: adapter->scan_mode);
 
 		/* Set the number of probes to send, use Adapter setting
 		   if unset */
-		num_probes =
-			(user_scan_in->num_probes ? user_scan_in->
-			 num_probes : adapter->scan_probes);
+		num_probes = user_scan_in->num_probes ?: adapter->scan_probes;
 
 		/*
 		 * Set the BSSID filter to the incoming configuration,
@@ -1094,28 +1046,24 @@
 		     chan_idx++) {
 
 			channel = user_scan_in->chan_list[chan_idx].chan_number;
-			(scan_chan_list + chan_idx)->chan_number = channel;
+			scan_chan_list[chan_idx].chan_number = channel;
 
 			radio_type =
 				user_scan_in->chan_list[chan_idx].radio_type;
-			(scan_chan_list + chan_idx)->radio_type = radio_type;
+			scan_chan_list[chan_idx].radio_type = radio_type;
 
 			scan_type = user_scan_in->chan_list[chan_idx].scan_type;
 
 			if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
-				(scan_chan_list +
-				 chan_idx)->chan_scan_mode_bitmap
+				scan_chan_list[chan_idx].chan_scan_mode_bitmap
 					|= (MWIFIEX_PASSIVE_SCAN |
 					    MWIFIEX_HIDDEN_SSID_REPORT);
 			else
-				(scan_chan_list +
-				 chan_idx)->chan_scan_mode_bitmap
+				scan_chan_list[chan_idx].chan_scan_mode_bitmap
 					&= ~MWIFIEX_PASSIVE_SCAN;
 
-			if (*filtered_scan)
-				(scan_chan_list +
-				 chan_idx)->chan_scan_mode_bitmap
-					|= MWIFIEX_DISABLE_CHAN_FILT;
+			scan_chan_list[chan_idx].chan_scan_mode_bitmap
+				|= MWIFIEX_DISABLE_CHAN_FILT;
 
 			if (user_scan_in->chan_list[chan_idx].scan_time) {
 				scan_dur = (u16) user_scan_in->
@@ -1129,9 +1077,9 @@
 					scan_dur = adapter->active_scan_time;
 			}
 
-			(scan_chan_list + chan_idx)->min_scan_time =
+			scan_chan_list[chan_idx].min_scan_time =
 				cpu_to_le16(scan_dur);
-			(scan_chan_list + chan_idx)->max_scan_time =
+			scan_chan_list[chan_idx].max_scan_time =
 				cpu_to_le16(scan_dur);
 		}
 
@@ -1991,12 +1939,13 @@
 static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
 {
 	struct mwifiex_adapter *adapter = priv->adapter;
-	struct cmd_ctrl_node *cmd_node, *tmp_node;
+	struct cmd_ctrl_node *cmd_node;
 	unsigned long flags;
 
 	spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
 	if (list_empty(&adapter->scan_pending_q)) {
 		spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+
 		spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
 		adapter->scan_processing = false;
 		spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
@@ -2018,13 +1967,10 @@
 		}
 	} else if ((priv->scan_aborting && !priv->scan_request) ||
 		   priv->scan_block) {
-		list_for_each_entry_safe(cmd_node, tmp_node,
-					 &adapter->scan_pending_q, list) {
-			list_del(&cmd_node->list);
-			mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
-		}
 		spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
 
+		mwifiex_cancel_pending_scan_cmd(adapter);
+
 		spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
 		adapter->scan_processing = false;
 		spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index b2c839a..099722e 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -73,6 +73,66 @@
 	{"EXTLAST", NULL, 0, 0xFE},
 };
 
+static const struct of_device_id mwifiex_sdio_of_match_table[] = {
+	{ .compatible = "marvell,sd8897" },
+	{ .compatible = "marvell,sd8997" },
+	{ }
+};
+
+static irqreturn_t mwifiex_wake_irq_wifi(int irq, void *priv)
+{
+	struct mwifiex_plt_wake_cfg *cfg = priv;
+
+	if (cfg->irq_wifi >= 0) {
+		pr_info("%s: wake by wifi", __func__);
+		cfg->wake_by_wifi = true;
+		disable_irq_nosync(irq);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/* This function parse device tree node using mmc subnode devicetree API.
+ * The device node is saved in card->plt_of_node.
+ * if the device tree node exist and include interrupts attributes, this
+ * function will also request platform specific wakeup interrupt.
+ */
+static int mwifiex_sdio_probe_of(struct device *dev, struct sdio_mmc_card *card)
+{
+	struct mwifiex_plt_wake_cfg *cfg;
+	int ret;
+
+	if (!dev->of_node ||
+	    !of_match_node(mwifiex_sdio_of_match_table, dev->of_node)) {
+		pr_err("sdio platform data not available");
+		return -1;
+	}
+
+	card->plt_of_node = dev->of_node;
+	card->plt_wake_cfg = devm_kzalloc(dev, sizeof(*card->plt_wake_cfg),
+					  GFP_KERNEL);
+	cfg = card->plt_wake_cfg;
+	if (cfg && card->plt_of_node) {
+		cfg->irq_wifi = irq_of_parse_and_map(card->plt_of_node, 0);
+		if (!cfg->irq_wifi) {
+			dev_err(dev, "fail to parse irq_wifi from device tree");
+		} else {
+			ret = devm_request_irq(dev, cfg->irq_wifi,
+					       mwifiex_wake_irq_wifi,
+					       IRQF_TRIGGER_LOW,
+					       "wifi_wake", cfg);
+			if (ret) {
+				dev_err(dev,
+					"Failed to request irq_wifi %d (%d)\n",
+					cfg->irq_wifi, ret);
+			}
+			disable_irq(cfg->irq_wifi);
+		}
+	}
+
+	return 0;
+}
+
 /*
  * SDIO probe.
  *
@@ -127,6 +187,9 @@
 		return -EIO;
 	}
 
+	/* device tree node parsing and platform specific configuration*/
+	mwifiex_sdio_probe_of(&func->dev, card);
+
 	if (mwifiex_add_card(card, &add_remove_card_sem, &sdio_ops,
 			     MWIFIEX_SDIO)) {
 		pr_err("%s: add card failed\n", __func__);
@@ -183,6 +246,13 @@
 	mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
 			  MWIFIEX_SYNC_CMD);
 
+	/* Disable platform specific wakeup interrupt */
+	if (card->plt_wake_cfg && card->plt_wake_cfg->irq_wifi >= 0) {
+		disable_irq_wake(card->plt_wake_cfg->irq_wifi);
+		if (!card->plt_wake_cfg->wake_by_wifi)
+			disable_irq(card->plt_wake_cfg->irq_wifi);
+	}
+
 	return 0;
 }
 
@@ -262,6 +332,13 @@
 
 	adapter = card->adapter;
 
+	/* Enable platform specific wakeup interrupt */
+	if (card->plt_wake_cfg && card->plt_wake_cfg->irq_wifi >= 0) {
+		card->plt_wake_cfg->wake_by_wifi = false;
+		enable_irq(card->plt_wake_cfg->irq_wifi);
+		enable_irq_wake(card->plt_wake_cfg->irq_wifi);
+	}
+
 	/* Enable the Host Sleep */
 	if (!mwifiex_enable_hs(adapter)) {
 		mwifiex_dbg(adapter, ERROR,
@@ -1026,13 +1103,12 @@
 		offset += txlen;
 	} while (true);
 
-	sdio_release_host(card->func);
-
 	mwifiex_dbg(adapter, MSG,
 		    "info: FW download over, size %d bytes\n", offset);
 
 	ret = 0;
 done:
+	sdio_release_host(card->func);
 	kfree(fwbuf);
 	return ret;
 }
@@ -1123,8 +1199,8 @@
 				    __func__, pkt_len, blk_size);
 			break;
 		}
-		skb_deaggr = mwifiex_alloc_dma_align_buf(pkt_len,
-							 GFP_KERNEL | GFP_DMA);
+
+		skb_deaggr = mwifiex_alloc_dma_align_buf(pkt_len, GFP_KERNEL);
 		if (!skb_deaggr)
 			break;
 		skb_put(skb_deaggr, pkt_len);
@@ -1373,8 +1449,7 @@
 
 			/* copy pkt to deaggr buf */
 			skb_deaggr = mwifiex_alloc_dma_align_buf(len_arr[pind],
-								 GFP_KERNEL |
-								 GFP_DMA);
+								 GFP_KERNEL);
 			if (!skb_deaggr) {
 				mwifiex_dbg(adapter, ERROR, "skb allocation failure\t"
 					    "drop pkt len=%d type=%d\n",
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
index b9fbc5cf..db837f1 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.h
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
@@ -154,6 +154,11 @@
 	a->mpa_rx.start_port = 0;					\
 } while (0)
 
+struct mwifiex_plt_wake_cfg {
+	int irq_wifi;
+	bool wake_by_wifi;
+};
+
 /* data structure for SDIO MPA TX */
 struct mwifiex_sdio_mpa_tx {
 	/* multiport tx aggregation buffer pointer */
@@ -237,6 +242,8 @@
 struct sdio_mmc_card {
 	struct sdio_func *func;
 	struct mwifiex_adapter *adapter;
+	struct device_node *plt_of_node;
+	struct mwifiex_plt_wake_cfg *plt_wake_cfg;
 
 	const char *firmware;
 	const struct mwifiex_sdio_card_reg *reg;
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index 30f1526..e436574 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -1558,6 +1558,30 @@
 	return 0;
 }
 
+static int mwifiex_cmd_gtk_rekey_offload(struct mwifiex_private *priv,
+					 struct host_cmd_ds_command *cmd,
+					 u16 cmd_action,
+					 struct cfg80211_gtk_rekey_data *data)
+{
+	struct host_cmd_ds_gtk_rekey_params *rekey = &cmd->params.rekey;
+	u64 rekey_ctr;
+
+	cmd->command = cpu_to_le16(HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG);
+	cmd->size = cpu_to_le16(sizeof(*rekey) + S_DS_GEN);
+
+	rekey->action = cpu_to_le16(cmd_action);
+	if (cmd_action == HostCmd_ACT_GEN_SET) {
+		memcpy(rekey->kek, data->kek, NL80211_KEK_LEN);
+		memcpy(rekey->kck, data->kck, NL80211_KCK_LEN);
+		rekey_ctr = be64_to_cpup((__be64 *)data->replay_ctr);
+		rekey->replay_ctr_low = cpu_to_le32((u32)rekey_ctr);
+		rekey->replay_ctr_high =
+			cpu_to_le32((u32)((u64)rekey_ctr >> 32));
+	}
+
+	return 0;
+}
+
 static int
 mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
 			 struct host_cmd_ds_command *cmd,
@@ -2094,6 +2118,10 @@
 		ret = mwifiex_cmd_robust_coex(priv, cmd_ptr, cmd_action,
 					      data_buf);
 		break;
+	case HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG:
+		ret = mwifiex_cmd_gtk_rekey_offload(priv, cmd_ptr, cmd_action,
+						    data_buf);
+		break;
 	default:
 		mwifiex_dbg(priv->adapter, ERROR,
 			    "PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -2134,6 +2162,7 @@
 	enum state_11d_t state_11d;
 	struct mwifiex_ds_11n_tx_cfg tx_cfg;
 	u8 sdio_sp_rx_aggr_enable;
+	int data;
 
 	if (first_sta) {
 		if (priv->adapter->iface_type == MWIFIEX_PCIE) {
@@ -2154,9 +2183,16 @@
 		 * The cal-data can be read from device tree and/or
 		 * a configuration file and downloaded to firmware.
 		 */
-		adapter->dt_node =
-				of_find_node_by_name(NULL, "marvell_cfgdata");
-		if (adapter->dt_node) {
+		if (priv->adapter->iface_type == MWIFIEX_SDIO &&
+		    adapter->dev->of_node) {
+			adapter->dt_node = adapter->dev->of_node;
+			if (of_property_read_u32(adapter->dt_node,
+						 "marvell,wakeup-pin",
+						 &data) == 0) {
+				pr_debug("Wakeup pin = 0x%x\n", data);
+				adapter->hs_cfg.gpio = data;
+			}
+
 			ret = mwifiex_dnld_dt_cfgdata(priv, adapter->dt_node,
 						      "marvell,caldata");
 			if (ret)
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index d96523e..d18c797 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -44,7 +44,6 @@
 mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
 			      struct host_cmd_ds_command *resp)
 {
-	struct cmd_ctrl_node *cmd_node = NULL, *tmp_node;
 	struct mwifiex_adapter *adapter = priv->adapter;
 	struct host_cmd_ds_802_11_ps_mode_enh *pm;
 	unsigned long flags;
@@ -71,17 +70,7 @@
 		break;
 	case HostCmd_CMD_802_11_SCAN:
 	case HostCmd_CMD_802_11_SCAN_EXT:
-		/* Cancel all pending scan command */
-		spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
-		list_for_each_entry_safe(cmd_node, tmp_node,
-					 &adapter->scan_pending_q, list) {
-			list_del(&cmd_node->list);
-			spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
-					       flags);
-			mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
-			spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
-		}
-		spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+		mwifiex_cancel_pending_scan_cmd(adapter);
 
 		spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
 		adapter->scan_processing = false;
@@ -1244,6 +1233,8 @@
 	case HostCmd_CMD_ROBUST_COEX:
 		ret = mwifiex_ret_robust_coex(priv, resp, data_buf);
 		break;
+	case HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG:
+		break;
 	default:
 		mwifiex_dbg(adapter, ERROR,
 			    "CMD_RESP: unknown cmd response %#x\n",
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index 070bce4..0104108 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -147,6 +147,9 @@
 	mwifiex_stop_net_dev_queue(priv->netdev, adapter);
 	if (netif_carrier_ok(priv->netdev))
 		netif_carrier_off(priv->netdev);
+
+	mwifiex_send_cmd(priv, HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG,
+			 HostCmd_ACT_GEN_REMOVE, 0, NULL, false);
 }
 
 static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index d5c56eb..8e08626 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -146,6 +146,7 @@
 	size_t beacon_ie_len;
 	struct mwifiex_bss_priv *bss_priv = (void *)bss->priv;
 	const struct cfg80211_bss_ies *ies;
+	int ret;
 
 	rcu_read_lock();
 	ies = rcu_dereference(bss->ies);
@@ -189,7 +190,48 @@
 	if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_SPECTRUM_MGMT)
 		bss_desc->sensed_11h = true;
 
-	return mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc);
+	ret = mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc);
+	if (ret)
+		return ret;
+
+	/* Update HT40 capability based on current channel information */
+	if (bss_desc->bcn_ht_oper && bss_desc->bcn_ht_cap) {
+		u8 ht_param = bss_desc->bcn_ht_oper->ht_param;
+		u8 radio = mwifiex_band_to_radio_type(bss_desc->bss_band);
+		struct ieee80211_supported_band *sband =
+						priv->wdev.wiphy->bands[radio];
+		int freq = ieee80211_channel_to_frequency(bss_desc->channel,
+							  radio);
+		struct ieee80211_channel *chan =
+			ieee80211_get_channel(priv->adapter->wiphy, freq);
+
+		switch (ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
+		case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+			if (chan->flags & IEEE80211_CHAN_NO_HT40PLUS) {
+				sband->ht_cap.cap &=
+					~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+				sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40;
+			} else {
+				sband->ht_cap.cap |=
+					IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+					IEEE80211_HT_CAP_SGI_40;
+			}
+			break;
+		case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+			if (chan->flags & IEEE80211_CHAN_NO_HT40MINUS) {
+				sband->ht_cap.cap &=
+					~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+				sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40;
+			} else {
+				sband->ht_cap.cap |=
+					IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+					IEEE80211_HT_CAP_SGI_40;
+			}
+			break;
+		}
+	}
+
+	return 0;
 }
 
 void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv)
@@ -509,7 +551,8 @@
 
 	if (priv && priv->sched_scanning) {
 #ifdef CONFIG_PM
-		if (!priv->wdev.wiphy->wowlan_config->nd_config) {
+		if (priv->wdev.wiphy->wowlan_config &&
+		    !priv->wdev.wiphy->wowlan_config->nd_config) {
 #endif
 			mwifiex_dbg(adapter, CMD, "aborting bgscan!\n");
 			mwifiex_stop_bg_scan(priv);
diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
index 1506496..df9704de0 100644
--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
+++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
@@ -285,7 +285,7 @@
 	else
 		usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg;
 
-	/* find the minmum bandwith between AP/TDLS peers */
+	/* find the minimum bandwidth between AP/TDLS peers */
 	vht_cap = &sta_ptr->tdls_cap.vhtcap;
 	supp_chwd_set = GET_VHTCAP_CHWDSET(usr_vht_cap_info);
 	peer_supp_chwd_set =
diff --git a/drivers/net/wireless/marvell/mwifiex/txrx.c b/drivers/net/wireless/marvell/mwifiex/txrx.c
index bf6182b..abdd0cf7 100644
--- a/drivers/net/wireless/marvell/mwifiex/txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/txrx.c
@@ -297,6 +297,13 @@
 		goto done;
 
 	mwifiex_set_trans_start(priv->netdev);
+
+	if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT)
+		atomic_dec_return(&adapter->pending_bridged_pkts);
+
+	if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT)
+		goto done;
+
 	if (!status) {
 		priv->stats.tx_packets++;
 		priv->stats.tx_bytes += tx_info->pkt_len;
@@ -306,12 +313,6 @@
 		priv->stats.tx_errors++;
 	}
 
-	if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT)
-		atomic_dec_return(&adapter->pending_bridged_pkts);
-
-	if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT)
-		goto done;
-
 	if (aggr)
 		/* For skb_aggr, do not wake up tx queue */
 		goto done;
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
index 16d95b2..f79d00d 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
@@ -694,7 +694,7 @@
 	struct mwifiex_ie_list *ap_ie = cmd_buf;
 	struct mwifiex_ie_types_header *tlv_ie = (void *)tlv;
 
-	if (!ap_ie || !ap_ie->len || !ap_ie->ie_list)
+	if (!ap_ie || !ap_ie->len)
 		return -1;
 
 	*ie_size += le16_to_cpu(ap_ie->len) +
@@ -816,7 +816,7 @@
 						     chandef.chan->center_freq);
 
 	/* Set appropriate bands */
-	if (chandef.chan->band == IEEE80211_BAND_2GHZ) {
+	if (chandef.chan->band == NL80211_BAND_2GHZ) {
 		bss_cfg->band_cfg = BAND_CONFIG_BG;
 		config_bands = BAND_B | BAND_G;
 
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index 52f7981..666e91a 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -102,6 +102,7 @@
 	int hdr_chop;
 	struct ethhdr *p_ethhdr;
 	struct mwifiex_sta_node *src_node;
+	int index;
 
 	uap_rx_pd = (struct uap_rxpd *)(skb->data);
 	rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
@@ -208,10 +209,15 @@
 	}
 
 	__net_timestamp(skb);
+
+	index = mwifiex_1d_to_wmm_queue[skb->priority];
+	atomic_inc(&priv->wmm_tx_pending[index]);
 	mwifiex_wmm_add_buf_txqueue(priv, skb);
 	atomic_inc(&adapter->tx_pending);
 	atomic_inc(&adapter->pending_bridged_pkts);
 
+	mwifiex_queue_main_work(priv->adapter);
+
 	return;
 }
 
@@ -263,6 +269,96 @@
 	return mwifiex_process_rx_packet(priv, skb);
 }
 
+int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
+			    struct sk_buff *skb)
+{
+	struct mwifiex_adapter *adapter = adapter;
+	struct mwifiex_sta_node *src_node;
+	struct ethhdr *p_ethhdr;
+	struct sk_buff *skb_uap;
+	struct mwifiex_txinfo *tx_info;
+
+	if (!skb)
+		return -1;
+
+	p_ethhdr = (void *)skb->data;
+	src_node = mwifiex_get_sta_entry(priv, p_ethhdr->h_source);
+	if (src_node) {
+		src_node->stats.last_rx = jiffies;
+		src_node->stats.rx_bytes += skb->len;
+		src_node->stats.rx_packets++;
+	}
+
+	skb->dev = priv->netdev;
+	skb->protocol = eth_type_trans(skb, priv->netdev);
+	skb->ip_summed = CHECKSUM_NONE;
+
+	/* This is required only in case of 11n and USB/PCIE as we alloc
+	 * a buffer of 4K only if its 11N (to be able to receive 4K
+	 * AMSDU packets). In case of SD we allocate buffers based
+	 * on the size of packet and hence this is not needed.
+	 *
+	 * Modifying the truesize here as our allocation for each
+	 * skb is 4K but we only receive 2K packets and this cause
+	 * the kernel to start dropping packets in case where
+	 * application has allocated buffer based on 2K size i.e.
+	 * if there a 64K packet received (in IP fragments and
+	 * application allocates 64K to receive this packet but
+	 * this packet would almost double up because we allocate
+	 * each 1.5K fragment in 4K and pass it up. As soon as the
+	 * 64K limit hits kernel will start to drop rest of the
+	 * fragments. Currently we fail the Filesndl-ht.scr script
+	 * for UDP, hence this fix
+	 */
+	if ((adapter->iface_type == MWIFIEX_USB ||
+	     adapter->iface_type == MWIFIEX_PCIE) &&
+	    (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
+		skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
+
+	if (is_multicast_ether_addr(p_ethhdr->h_dest) ||
+	    mwifiex_get_sta_entry(priv, p_ethhdr->h_dest)) {
+		if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN)
+			skb_uap =
+			skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
+		else
+			skb_uap = skb_copy(skb, GFP_ATOMIC);
+
+		if (likely(skb_uap)) {
+			tx_info = MWIFIEX_SKB_TXCB(skb_uap);
+			memset(tx_info, 0, sizeof(*tx_info));
+			tx_info->bss_num = priv->bss_num;
+			tx_info->bss_type = priv->bss_type;
+			tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
+			__net_timestamp(skb_uap);
+			mwifiex_wmm_add_buf_txqueue(priv, skb_uap);
+			atomic_inc(&adapter->tx_pending);
+			atomic_inc(&adapter->pending_bridged_pkts);
+			if ((atomic_read(&adapter->pending_bridged_pkts) >=
+					MWIFIEX_BRIDGED_PKTS_THR_HIGH)) {
+				mwifiex_dbg(adapter, ERROR,
+					    "Tx: Bridge packet limit reached. Drop packet!\n");
+				mwifiex_uap_cleanup_tx_queues(priv);
+			}
+
+		} else {
+			mwifiex_dbg(adapter, ERROR, "failed to allocate skb_uap");
+		}
+
+		mwifiex_queue_main_work(adapter);
+		/* Don't forward Intra-BSS unicast packet to upper layer*/
+		if (mwifiex_get_sta_entry(priv, p_ethhdr->h_dest))
+			return 0;
+	}
+
+	/* Forward multicast/broadcast packet to upper layer*/
+	if (in_interrupt())
+		netif_rx(skb);
+	else
+		netif_rx_ni(skb);
+
+	return 0;
+}
+
 /*
  * This function processes the packet received on AP interface.
  *
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 0510861..0857575 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -995,7 +995,8 @@
 {
 	int ret = 0;
 	u8 *firmware = fw->fw_buf, *recv_buff;
-	u32 retries = USB8XXX_FW_MAX_RETRY, dlen;
+	u32 retries = USB8XXX_FW_MAX_RETRY + 1;
+	u32 dlen;
 	u32 fw_seqnum = 0, tlen = 0, dnld_cmd = 0;
 	struct fw_data *fwdata;
 	struct fw_sync_header sync_fw;
@@ -1017,8 +1018,10 @@
 
 	/* Allocate memory for receive */
 	recv_buff = kzalloc(FW_DNLD_RX_BUF_SIZE, GFP_KERNEL);
-	if (!recv_buff)
+	if (!recv_buff) {
+		ret = -ENOMEM;
 		goto cleanup;
+	}
 
 	do {
 		/* Send pseudo data to check winner status first */
@@ -1041,7 +1044,7 @@
 		}
 
 		/* If the send/receive fails or CRC occurs then retry */
-		while (retries--) {
+		while (--retries) {
 			u8 *buf = (u8 *)fwdata;
 			u32 len = FW_DATA_XMIT_SIZE;
 
@@ -1101,7 +1104,7 @@
 				continue;
 			}
 
-			retries = USB8XXX_FW_MAX_RETRY;
+			retries = USB8XXX_FW_MAX_RETRY + 1;
 			break;
 		}
 		fw_seqnum++;
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index 088429d..b1b400b 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -346,20 +346,20 @@
 #define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv))
 
 static const struct ieee80211_channel mwl8k_channels_24[] = {
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2412, .hw_value = 1, },
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2417, .hw_value = 2, },
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2422, .hw_value = 3, },
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2427, .hw_value = 4, },
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2432, .hw_value = 5, },
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2437, .hw_value = 6, },
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2442, .hw_value = 7, },
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2447, .hw_value = 8, },
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2452, .hw_value = 9, },
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2457, .hw_value = 10, },
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2462, .hw_value = 11, },
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2467, .hw_value = 12, },
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2472, .hw_value = 13, },
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2484, .hw_value = 14, },
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2412, .hw_value = 1, },
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2417, .hw_value = 2, },
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2422, .hw_value = 3, },
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2427, .hw_value = 4, },
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2432, .hw_value = 5, },
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2437, .hw_value = 6, },
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2442, .hw_value = 7, },
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2447, .hw_value = 8, },
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2452, .hw_value = 9, },
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2457, .hw_value = 10, },
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2462, .hw_value = 11, },
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2467, .hw_value = 12, },
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2472, .hw_value = 13, },
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2484, .hw_value = 14, },
 };
 
 static const struct ieee80211_rate mwl8k_rates_24[] = {
@@ -379,10 +379,10 @@
 };
 
 static const struct ieee80211_channel mwl8k_channels_50[] = {
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5180, .hw_value = 36, },
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5200, .hw_value = 40, },
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5220, .hw_value = 44, },
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 48, },
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5180, .hw_value = 36, },
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5200, .hw_value = 40, },
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5220, .hw_value = 44, },
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 48, },
 };
 
 static const struct ieee80211_rate mwl8k_rates_50[] = {
@@ -1010,11 +1010,11 @@
 	}
 
 	if (rxd->channel > 14) {
-		status->band = IEEE80211_BAND_5GHZ;
+		status->band = NL80211_BAND_5GHZ;
 		if (!(status->flag & RX_FLAG_HT))
 			status->rate_idx -= 5;
 	} else {
-		status->band = IEEE80211_BAND_2GHZ;
+		status->band = NL80211_BAND_2GHZ;
 	}
 	status->freq = ieee80211_channel_to_frequency(rxd->channel,
 						      status->band);
@@ -1118,11 +1118,11 @@
 		status->flag |= RX_FLAG_HT;
 
 	if (rxd->channel > 14) {
-		status->band = IEEE80211_BAND_5GHZ;
+		status->band = NL80211_BAND_5GHZ;
 		if (!(status->flag & RX_FLAG_HT))
 			status->rate_idx -= 5;
 	} else {
-		status->band = IEEE80211_BAND_2GHZ;
+		status->band = NL80211_BAND_2GHZ;
 	}
 	status->freq = ieee80211_channel_to_frequency(rxd->channel,
 						      status->band);
@@ -2300,13 +2300,13 @@
 	BUILD_BUG_ON(sizeof(priv->rates_24) != sizeof(mwl8k_rates_24));
 	memcpy(priv->rates_24, mwl8k_rates_24, sizeof(mwl8k_rates_24));
 
-	priv->band_24.band = IEEE80211_BAND_2GHZ;
+	priv->band_24.band = NL80211_BAND_2GHZ;
 	priv->band_24.channels = priv->channels_24;
 	priv->band_24.n_channels = ARRAY_SIZE(mwl8k_channels_24);
 	priv->band_24.bitrates = priv->rates_24;
 	priv->band_24.n_bitrates = ARRAY_SIZE(mwl8k_rates_24);
 
-	hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band_24;
+	hw->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band_24;
 }
 
 static void mwl8k_setup_5ghz_band(struct ieee80211_hw *hw)
@@ -2319,13 +2319,13 @@
 	BUILD_BUG_ON(sizeof(priv->rates_50) != sizeof(mwl8k_rates_50));
 	memcpy(priv->rates_50, mwl8k_rates_50, sizeof(mwl8k_rates_50));
 
-	priv->band_50.band = IEEE80211_BAND_5GHZ;
+	priv->band_50.band = NL80211_BAND_5GHZ;
 	priv->band_50.channels = priv->channels_50;
 	priv->band_50.n_channels = ARRAY_SIZE(mwl8k_channels_50);
 	priv->band_50.bitrates = priv->rates_50;
 	priv->band_50.n_bitrates = ARRAY_SIZE(mwl8k_rates_50);
 
-	hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &priv->band_50;
+	hw->wiphy->bands[NL80211_BAND_5GHZ] = &priv->band_50;
 }
 
 /*
@@ -2876,9 +2876,9 @@
 	cmd->header.length = cpu_to_le16(sizeof(*cmd));
 	cmd->action = cpu_to_le16(MWL8K_CMD_SET_LIST);
 
-	if (channel->band == IEEE80211_BAND_2GHZ)
+	if (channel->band == NL80211_BAND_2GHZ)
 		cmd->band = cpu_to_le16(0x1);
-	else if (channel->band == IEEE80211_BAND_5GHZ)
+	else if (channel->band == NL80211_BAND_5GHZ)
 		cmd->band = cpu_to_le16(0x4);
 
 	cmd->channel = cpu_to_le16(channel->hw_value);
@@ -3067,7 +3067,7 @@
 	struct ieee80211_supported_band *sband;
 	int band, ch, idx = 0;
 
-	for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+	for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
 		sband = priv->hw->wiphy->bands[band];
 		if (!sband)
 			continue;
@@ -3149,9 +3149,9 @@
 	cmd->action = cpu_to_le16(MWL8K_CMD_SET);
 	cmd->current_channel = channel->hw_value;
 
-	if (channel->band == IEEE80211_BAND_2GHZ)
+	if (channel->band == NL80211_BAND_2GHZ)
 		cmd->channel_flags |= cpu_to_le32(0x00000001);
-	else if (channel->band == IEEE80211_BAND_5GHZ)
+	else if (channel->band == NL80211_BAND_5GHZ)
 		cmd->channel_flags |= cpu_to_le32(0x00000004);
 
 	if (!priv->sw_scan_start) {
@@ -4094,10 +4094,10 @@
 	memcpy(cmd->mac_addr, sta->addr, ETH_ALEN);
 	cmd->stn_id = cpu_to_le16(sta->aid);
 	cmd->action = cpu_to_le16(MWL8K_STA_ACTION_ADD);
-	if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
-		rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
+	if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ)
+		rates = sta->supp_rates[NL80211_BAND_2GHZ];
 	else
-		rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+		rates = sta->supp_rates[NL80211_BAND_5GHZ] << 5;
 	cmd->legacy_rates = cpu_to_le32(rates);
 	if (sta->ht_cap.ht_supported) {
 		cmd->ht_rates[0] = sta->ht_cap.mcs.rx_mask[0];
@@ -4529,10 +4529,10 @@
 	p->ht_caps = cpu_to_le16(sta->ht_cap.cap);
 	p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) |
 		((sta->ht_cap.ampdu_density & 7) << 2);
-	if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
-		rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
+	if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ)
+		rates = sta->supp_rates[NL80211_BAND_2GHZ];
 	else
-		rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+		rates = sta->supp_rates[NL80211_BAND_5GHZ] << 5;
 	legacy_rate_mask_to_array(p->legacy_rates, rates);
 	memcpy(p->ht_rates, sta->ht_cap.mcs.rx_mask, 16);
 	p->interop = 1;
@@ -5010,11 +5010,11 @@
 			goto out;
 		}
 
-		if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) {
-			ap_legacy_rates = ap->supp_rates[IEEE80211_BAND_2GHZ];
+		if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) {
+			ap_legacy_rates = ap->supp_rates[NL80211_BAND_2GHZ];
 		} else {
 			ap_legacy_rates =
-				ap->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+				ap->supp_rates[NL80211_BAND_5GHZ] << 5;
 		}
 		memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16);
 
@@ -5042,7 +5042,7 @@
 					idx--;
 
 				if (hw->conf.chandef.chan->band ==
-				    IEEE80211_BAND_2GHZ)
+				    NL80211_BAND_2GHZ)
 					rate = mwl8k_rates_24[idx].hw_value;
 				else
 					rate = mwl8k_rates_50[idx].hw_value;
@@ -5116,7 +5116,7 @@
 		if (idx)
 			idx--;
 
-		if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
+		if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ)
 			rate = mwl8k_rates_24[idx].hw_value;
 		else
 			rate = mwl8k_rates_50[idx].hw_value;
@@ -5388,7 +5388,7 @@
 	struct ieee80211_supported_band *sband;
 
 	if (priv->ap_fw) {
-		sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+		sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
 
 		if (sband && idx >= sband->n_channels) {
 			idx -= sband->n_channels;
@@ -5396,7 +5396,7 @@
 		}
 
 		if (!sband)
-			sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
+			sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
 
 		if (!sband || idx >= sband->n_channels)
 			return -ENOENT;
diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c
index 26190fd..8fa78d7 100644
--- a/drivers/net/wireless/mediatek/mt7601u/init.c
+++ b/drivers/net/wireless/mediatek/mt7601u/init.c
@@ -469,7 +469,7 @@
 }
 
 #define CHAN2G(_idx, _freq) {			\
-	.band = IEEE80211_BAND_2GHZ,		\
+	.band = NL80211_BAND_2GHZ,		\
 	.center_freq = (_freq),			\
 	.hw_value = (_idx),			\
 	.max_power = 30,			\
@@ -563,7 +563,7 @@
 {
 	dev->sband_2g = devm_kzalloc(dev->dev, sizeof(*dev->sband_2g),
 				     GFP_KERNEL);
-	dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = dev->sband_2g;
+	dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = dev->sband_2g;
 
 	WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num >
 		ARRAY_SIZE(mt76_channels_2ghz));
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 7fa0128..bf3f0a3 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -777,7 +777,7 @@
 	u8 offset1;
 	u8 offset2;
 
-	if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
+	if (rt2x00dev->curr_band == NL80211_BAND_2GHZ) {
 		rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &eeprom);
 		offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET0);
 		offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET1);
@@ -1174,7 +1174,7 @@
 	    container_of(led_cdev, struct rt2x00_led, led_dev);
 	unsigned int enabled = brightness != LED_OFF;
 	unsigned int bg_mode =
-	    (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
+	    (enabled && led->rt2x00dev->curr_band == NL80211_BAND_2GHZ);
 	unsigned int polarity =
 		rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
 				   EEPROM_FREQ_LED_POLARITY);
@@ -1741,7 +1741,7 @@
 	u8 led_ctrl, led_g_mode, led_r_mode;
 
 	rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
-	if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+	if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
 		rt2x00_set_field32(&reg, GPIO_SWITCH_0, 1);
 		rt2x00_set_field32(&reg, GPIO_SWITCH_1, 1);
 	} else {
@@ -1844,7 +1844,7 @@
 		    rt2x00_has_cap_bt_coexist(rt2x00dev)) {
 			rt2x00_set_field8(&r3, BBP3_RX_ADC, 1);
 			rt2x00_set_field8(&r3, BBP3_RX_ANTENNA,
-				rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
+				rt2x00dev->curr_band == NL80211_BAND_5GHZ);
 			rt2800_set_ant_diversity(rt2x00dev, ANTENNA_B);
 		} else {
 			rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 1);
@@ -3451,7 +3451,7 @@
 	 * Matching Delta value   -4   -3   -2   -1    0   +1   +2   +3   +4
 	 * Example TSSI bounds  0xF0 0xD0 0xB5 0xA0 0x88 0x45 0x25 0x15 0x00
 	 */
-	if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
+	if (rt2x00dev->curr_band == NL80211_BAND_2GHZ) {
 		rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG1, &eeprom);
 		tssi_bounds[0] = rt2x00_get_field16(eeprom,
 					EEPROM_TSSI_BOUND_BG1_MINUS4);
@@ -3546,7 +3546,7 @@
 }
 
 static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev,
-				      enum ieee80211_band band)
+				      enum nl80211_band band)
 {
 	u16 eeprom;
 	u8 comp_en;
@@ -3562,7 +3562,7 @@
 	    !test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
 		return 0;
 
-	if (band == IEEE80211_BAND_2GHZ) {
+	if (band == NL80211_BAND_2GHZ) {
 		comp_en = rt2x00_get_field16(eeprom,
 				 EEPROM_TXPOWER_DELTA_ENABLE_2G);
 		if (comp_en) {
@@ -3611,7 +3611,7 @@
 }
 
 static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
-				   enum ieee80211_band band, int power_level,
+				   enum nl80211_band band, int power_level,
 				   u8 txpower, int delta)
 {
 	u16 eeprom;
@@ -3639,7 +3639,7 @@
 		rt2800_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER,
 				   &eeprom);
 
-		if (band == IEEE80211_BAND_2GHZ)
+		if (band == NL80211_BAND_2GHZ)
 			eirp_txpower_criterion = rt2x00_get_field16(eeprom,
 						 EEPROM_EIRP_MAX_TX_POWER_2GHZ);
 		else
@@ -3686,7 +3686,7 @@
 	u16 eeprom;
 	u32 regs[TX_PWR_CFG_IDX_COUNT];
 	unsigned int offset;
-	enum ieee80211_band band = chan->band;
+	enum nl80211_band band = chan->band;
 	int delta;
 	int i;
 
@@ -3697,7 +3697,7 @@
 	/* calculate temperature compensation delta */
 	delta = rt2800_get_gain_calibration_delta(rt2x00dev);
 
-	if (band == IEEE80211_BAND_5GHZ)
+	if (band == NL80211_BAND_5GHZ)
 		offset = 16;
 	else
 		offset = 0;
@@ -4055,7 +4055,7 @@
 	for (i = 0; i < TX_PWR_CFG_IDX_COUNT; i++)
 		rt2x00_dbg(rt2x00dev,
 			   "band:%cGHz, BW:%c0MHz, TX_PWR_CFG_%d%s = %08lx\n",
-			   (band == IEEE80211_BAND_5GHZ) ? '5' : '2',
+			   (band == NL80211_BAND_5GHZ) ? '5' : '2',
 			   (test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) ?
 								'4' : '2',
 			   (i > TX_PWR_CFG_9_IDX) ?
@@ -4081,7 +4081,7 @@
 	u16 eeprom;
 	u32 reg, offset;
 	int i, is_rate_b, delta, power_ctrl;
-	enum ieee80211_band band = chan->band;
+	enum nl80211_band band = chan->band;
 
 	/*
 	 * Calculate HT40 compensation. For 40MHz we need to add or subtract
@@ -4436,7 +4436,7 @@
 {
 	u8 vgc;
 
-	if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
+	if (rt2x00dev->curr_band == NL80211_BAND_2GHZ) {
 		if (rt2x00_rt(rt2x00dev, RT3070) ||
 		    rt2x00_rt(rt2x00dev, RT3071) ||
 		    rt2x00_rt(rt2x00dev, RT3090) ||
@@ -4511,7 +4511,7 @@
 	case RT3572:
 	case RT3593:
 		if (qual->rssi > -65) {
-			if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ)
+			if (rt2x00dev->curr_band == NL80211_BAND_2GHZ)
 				vgc += 0x20;
 			else
 				vgc += 0x10;
@@ -7492,6 +7492,10 @@
 	if (!rt2x00_is_usb(rt2x00dev))
 		ieee80211_hw_set(rt2x00dev->hw, HOST_BROADCAST_PS_BUFFERING);
 
+	/* Set MFP if HW crypto is disabled. */
+	if (rt2800_hwcrypt_disabled(rt2x00dev))
+		ieee80211_hw_set(rt2x00dev->hw, MFP_CAPABLE);
+
 	SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
 	SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
 				rt2800_eeprom_addr(rt2x00dev,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 6418620..f68d492 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -38,6 +38,7 @@
 #include <linux/kfifo.h>
 #include <linux/hrtimer.h>
 #include <linux/average.h>
+#include <linux/usb.h>
 
 #include <net/mac80211.h>
 
@@ -752,8 +753,8 @@
 	 * IEEE80211 control structure.
 	 */
 	struct ieee80211_hw *hw;
-	struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
-	enum ieee80211_band curr_band;
+	struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
+	enum nl80211_band curr_band;
 	int curr_freq;
 
 	/*
@@ -1002,6 +1003,8 @@
 
 	/* Extra TX headroom required for alignment purposes. */
 	unsigned int extra_tx_headroom;
+
+	struct usb_anchor *anchor;
 };
 
 struct rt2x00_bar_list_entry {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index 5639ed8..4e0c565 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -911,7 +911,7 @@
 			      const int value)
 {
 	/* XXX: this assumption about the band is wrong for 802.11j */
-	entry->band = channel <= 14 ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+	entry->band = channel <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
 	entry->center_freq = ieee80211_channel_to_frequency(channel,
 							    entry->band);
 	entry->hw_value = value;
@@ -975,13 +975,13 @@
 	 * Channels: 2.4 GHz
 	 */
 	if (spec->supported_bands & SUPPORT_BAND_2GHZ) {
-		rt2x00dev->bands[IEEE80211_BAND_2GHZ].n_channels = 14;
-		rt2x00dev->bands[IEEE80211_BAND_2GHZ].n_bitrates = num_rates;
-		rt2x00dev->bands[IEEE80211_BAND_2GHZ].channels = channels;
-		rt2x00dev->bands[IEEE80211_BAND_2GHZ].bitrates = rates;
-		hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-		    &rt2x00dev->bands[IEEE80211_BAND_2GHZ];
-		memcpy(&rt2x00dev->bands[IEEE80211_BAND_2GHZ].ht_cap,
+		rt2x00dev->bands[NL80211_BAND_2GHZ].n_channels = 14;
+		rt2x00dev->bands[NL80211_BAND_2GHZ].n_bitrates = num_rates;
+		rt2x00dev->bands[NL80211_BAND_2GHZ].channels = channels;
+		rt2x00dev->bands[NL80211_BAND_2GHZ].bitrates = rates;
+		hw->wiphy->bands[NL80211_BAND_2GHZ] =
+		    &rt2x00dev->bands[NL80211_BAND_2GHZ];
+		memcpy(&rt2x00dev->bands[NL80211_BAND_2GHZ].ht_cap,
 		       &spec->ht, sizeof(spec->ht));
 	}
 
@@ -991,15 +991,15 @@
 	 * Channels: OFDM, UNII, HiperLAN2.
 	 */
 	if (spec->supported_bands & SUPPORT_BAND_5GHZ) {
-		rt2x00dev->bands[IEEE80211_BAND_5GHZ].n_channels =
+		rt2x00dev->bands[NL80211_BAND_5GHZ].n_channels =
 		    spec->num_channels - 14;
-		rt2x00dev->bands[IEEE80211_BAND_5GHZ].n_bitrates =
+		rt2x00dev->bands[NL80211_BAND_5GHZ].n_bitrates =
 		    num_rates - 4;
-		rt2x00dev->bands[IEEE80211_BAND_5GHZ].channels = &channels[14];
-		rt2x00dev->bands[IEEE80211_BAND_5GHZ].bitrates = &rates[4];
-		hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-		    &rt2x00dev->bands[IEEE80211_BAND_5GHZ];
-		memcpy(&rt2x00dev->bands[IEEE80211_BAND_5GHZ].ht_cap,
+		rt2x00dev->bands[NL80211_BAND_5GHZ].channels = &channels[14];
+		rt2x00dev->bands[NL80211_BAND_5GHZ].bitrates = &rates[4];
+		hw->wiphy->bands[NL80211_BAND_5GHZ] =
+		    &rt2x00dev->bands[NL80211_BAND_5GHZ];
+		memcpy(&rt2x00dev->bands[NL80211_BAND_5GHZ].ht_cap,
 		       &spec->ht, sizeof(spec->ht));
 	}
 
@@ -1016,11 +1016,11 @@
 	if (test_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags))
 		ieee80211_unregister_hw(rt2x00dev->hw);
 
-	if (likely(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ])) {
-		kfree(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
-		kfree(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->bitrates);
-		rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
-		rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
+	if (likely(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ])) {
+		kfree(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels);
+		kfree(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ]->bitrates);
+		rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
+		rt2x00dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
 	}
 
 	kfree(rt2x00dev->spec.channels_info);
@@ -1422,11 +1422,14 @@
 	cancel_work_sync(&rt2x00dev->intf_work);
 	cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
 	cancel_work_sync(&rt2x00dev->sleep_work);
+#ifdef CONFIG_RT2X00_LIB_USB
 	if (rt2x00_is_usb(rt2x00dev)) {
+		usb_kill_anchored_urbs(rt2x00dev->anchor);
 		hrtimer_cancel(&rt2x00dev->txstatus_timer);
 		cancel_work_sync(&rt2x00dev->rxdone_work);
 		cancel_work_sync(&rt2x00dev->txdone_work);
 	}
+#endif
 	if (rt2x00dev->workqueue)
 		destroy_workqueue(rt2x00dev->workqueue);
 
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index 7627af6..7cf26c6 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -171,8 +171,11 @@
 {
 	struct rt2x00_async_read_data *rd = urb->context;
 	if (rd->callback(rd->rt2x00dev, urb->status, le32_to_cpu(rd->reg))) {
-		if (usb_submit_urb(urb, GFP_ATOMIC) < 0)
+		usb_anchor_urb(urb, rd->rt2x00dev->anchor);
+		if (usb_submit_urb(urb, GFP_ATOMIC) < 0) {
+			usb_unanchor_urb(urb);
 			kfree(rd);
+		}
 	} else
 		kfree(rd);
 }
@@ -206,8 +209,11 @@
 	usb_fill_control_urb(urb, usb_dev, usb_rcvctrlpipe(usb_dev, 0),
 			     (unsigned char *)(&rd->cr), &rd->reg, sizeof(rd->reg),
 			     rt2x00usb_register_read_async_cb, rd);
-	if (usb_submit_urb(urb, GFP_ATOMIC) < 0)
+	usb_anchor_urb(urb, rt2x00dev->anchor);
+	if (usb_submit_urb(urb, GFP_ATOMIC) < 0) {
+		usb_unanchor_urb(urb);
 		kfree(rd);
+	}
 	usb_free_urb(urb);
 }
 EXPORT_SYMBOL_GPL(rt2x00usb_register_read_async);
@@ -313,8 +319,10 @@
 			  entry->skb->data, length,
 			  rt2x00usb_interrupt_txdone, entry);
 
+	usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
 	status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
 	if (status) {
+		usb_unanchor_urb(entry_priv->urb);
 		if (status == -ENODEV)
 			clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
 		set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
@@ -402,8 +410,10 @@
 			  entry->skb->data, entry->skb->len,
 			  rt2x00usb_interrupt_rxdone, entry);
 
+	usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
 	status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
 	if (status) {
+		usb_unanchor_urb(entry_priv->urb);
 		if (status == -ENODEV)
 			clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
 		set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
@@ -818,6 +828,13 @@
 	if (retval)
 		goto exit_free_reg;
 
+	rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev,
+					sizeof(struct usb_anchor),
+					GFP_KERNEL);
+	if (!rt2x00dev->anchor)
+		goto exit_free_reg;
+
+	init_usb_anchor(rt2x00dev->anchor);
 	return 0;
 
 exit_free_reg:
diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
index 24a3436..03013eb 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
@@ -252,9 +252,9 @@
 	    container_of(led_cdev, struct rt2x00_led, led_dev);
 	unsigned int enabled = brightness != LED_OFF;
 	unsigned int a_mode =
-	    (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
+	    (enabled && led->rt2x00dev->curr_band == NL80211_BAND_5GHZ);
 	unsigned int bg_mode =
-	    (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
+	    (enabled && led->rt2x00dev->curr_band == NL80211_BAND_2GHZ);
 
 	if (led->type == LED_TYPE_RADIO) {
 		rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg,
@@ -643,12 +643,12 @@
 	case ANTENNA_HW_DIVERSITY:
 		rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2);
 		rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
-				  (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ));
+				  (rt2x00dev->curr_band != NL80211_BAND_5GHZ));
 		break;
 	case ANTENNA_A:
 		rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
 		rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0);
-		if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ)
+		if (rt2x00dev->curr_band == NL80211_BAND_5GHZ)
 			rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
 		else
 			rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
@@ -657,7 +657,7 @@
 	default:
 		rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
 		rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0);
-		if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ)
+		if (rt2x00dev->curr_band == NL80211_BAND_5GHZ)
 			rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
 		else
 			rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
@@ -808,7 +808,7 @@
 	BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY ||
 	       ant->tx == ANTENNA_SW_DIVERSITY);
 
-	if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+	if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
 		sel = antenna_sel_a;
 		lna = rt2x00_has_cap_external_lna_a(rt2x00dev);
 	} else {
@@ -822,9 +822,9 @@
 	rt2x00mmio_register_read(rt2x00dev, PHY_CSR0, &reg);
 
 	rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_BG,
-			   rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
+			   rt2x00dev->curr_band == NL80211_BAND_2GHZ);
 	rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_A,
-			   rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
+			   rt2x00dev->curr_band == NL80211_BAND_5GHZ);
 
 	rt2x00mmio_register_write(rt2x00dev, PHY_CSR0, reg);
 
@@ -846,7 +846,7 @@
 	u16 eeprom;
 	short lna_gain = 0;
 
-	if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) {
+	if (libconf->conf->chandef.chan->band == NL80211_BAND_2GHZ) {
 		if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
 			lna_gain += 14;
 
@@ -1048,7 +1048,7 @@
 	/*
 	 * Determine r17 bounds.
 	 */
-	if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+	if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
 		low_bound = 0x28;
 		up_bound = 0x48;
 		if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
@@ -2077,7 +2077,7 @@
 		return 0;
 	}
 
-	if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+	if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
 		if (lna == 3 || lna == 2)
 			offset += 10;
 	}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
index 7bbc869..c1397a6 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
@@ -197,9 +197,9 @@
 	   container_of(led_cdev, struct rt2x00_led, led_dev);
 	unsigned int enabled = brightness != LED_OFF;
 	unsigned int a_mode =
-	    (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
+	    (enabled && led->rt2x00dev->curr_band == NL80211_BAND_5GHZ);
 	unsigned int bg_mode =
-	    (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
+	    (enabled && led->rt2x00dev->curr_band == NL80211_BAND_2GHZ);
 
 	if (led->type == LED_TYPE_RADIO) {
 		rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg,
@@ -593,13 +593,13 @@
 	case ANTENNA_HW_DIVERSITY:
 		rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2);
 		temp = !rt2x00_has_cap_frame_type(rt2x00dev) &&
-		       (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ);
+		       (rt2x00dev->curr_band != NL80211_BAND_5GHZ);
 		rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, temp);
 		break;
 	case ANTENNA_A:
 		rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
 		rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0);
-		if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ)
+		if (rt2x00dev->curr_band == NL80211_BAND_5GHZ)
 			rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
 		else
 			rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
@@ -608,7 +608,7 @@
 	default:
 		rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
 		rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0);
-		if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ)
+		if (rt2x00dev->curr_band == NL80211_BAND_5GHZ)
 			rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
 		else
 			rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
@@ -704,7 +704,7 @@
 	BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY ||
 	       ant->tx == ANTENNA_SW_DIVERSITY);
 
-	if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+	if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
 		sel = antenna_sel_a;
 		lna = rt2x00_has_cap_external_lna_a(rt2x00dev);
 	} else {
@@ -718,9 +718,9 @@
 	rt2x00usb_register_read(rt2x00dev, PHY_CSR0, &reg);
 
 	rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_BG,
-			   (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ));
+			   (rt2x00dev->curr_band == NL80211_BAND_2GHZ));
 	rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_A,
-			   (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ));
+			   (rt2x00dev->curr_band == NL80211_BAND_5GHZ));
 
 	rt2x00usb_register_write(rt2x00dev, PHY_CSR0, reg);
 
@@ -736,7 +736,7 @@
 	u16 eeprom;
 	short lna_gain = 0;
 
-	if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) {
+	if (libconf->conf->chandef.chan->band == NL80211_BAND_2GHZ) {
 		if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
 			lna_gain += 14;
 
@@ -923,7 +923,7 @@
 	/*
 	 * Determine r17 bounds.
 	 */
-	if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+	if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
 		low_bound = 0x28;
 		up_bound = 0x48;
 
@@ -1657,7 +1657,7 @@
 		return 0;
 	}
 
-	if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+	if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
 		if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
 			if (lna == 3 || lna == 2)
 				offset += 10;
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
index a43a16f..e895a84 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
@@ -526,7 +526,7 @@
 		 * ieee80211_generic_frame_duration
 		 */
 		duration = ieee80211_generic_frame_duration(dev, priv->vif,
-					IEEE80211_BAND_2GHZ, skb->len,
+					NL80211_BAND_2GHZ, skb->len,
 					ieee80211_get_tx_rate(dev, info));
 
 		frame_duration =  priv->ack_time + le16_to_cpu(duration);
@@ -1018,6 +1018,8 @@
 		dma_addr_t *mapping;
 		entry = priv->rx_ring + priv->rx_ring_sz*i;
 		if (!skb) {
+			pci_free_consistent(priv->pdev, priv->rx_ring_sz * 32,
+					priv->rx_ring, priv->rx_ring_dma);
 			wiphy_err(dev->wiphy, "Cannot allocate RX skb\n");
 			return -ENOMEM;
 		}
@@ -1028,6 +1030,8 @@
 
 		if (pci_dma_mapping_error(priv->pdev, *mapping)) {
 			kfree_skb(skb);
+			pci_free_consistent(priv->pdev, priv->rx_ring_sz * 32,
+					priv->rx_ring, priv->rx_ring_dma);
 			wiphy_err(dev->wiphy, "Cannot map DMA for RX skb\n");
 			return -ENOMEM;
 		}
@@ -1529,7 +1533,7 @@
 		priv->ack_time =
 			le16_to_cpu(ieee80211_generic_frame_duration(dev,
 					priv->vif,
-					IEEE80211_BAND_2GHZ, 10,
+					NL80211_BAND_2GHZ, 10,
 					&priv->rates[0])) - 10;
 
 		rtl8180_conf_erp(dev, info);
@@ -1736,7 +1740,7 @@
 	if (err) {
 		printk(KERN_ERR "%s (rtl8180): Cannot obtain PCI resources\n",
 		       pci_name(pdev));
-		return err;
+		goto err_disable_dev;
 	}
 
 	io_addr = pci_resource_start(pdev, 0);
@@ -1795,12 +1799,12 @@
 	memcpy(priv->channels, rtl818x_channels, sizeof(rtl818x_channels));
 	memcpy(priv->rates, rtl818x_rates, sizeof(rtl818x_rates));
 
-	priv->band.band = IEEE80211_BAND_2GHZ;
+	priv->band.band = NL80211_BAND_2GHZ;
 	priv->band.channels = priv->channels;
 	priv->band.n_channels = ARRAY_SIZE(rtl818x_channels);
 	priv->band.bitrates = priv->rates;
 	priv->band.n_bitrates = 4;
-	dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+	dev->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
 
 	ieee80211_hw_set(dev, HOST_BROADCAST_PS_BUFFERING);
 	ieee80211_hw_set(dev, RX_INCLUDES_FCS);
@@ -1938,6 +1942,8 @@
 
  err_free_reg:
 	pci_release_regions(pdev);
+
+ err_disable_dev:
 	pci_disable_device(pdev);
 	return err;
 }
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
index b7f72f9..231f84d 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
@@ -1470,12 +1470,12 @@
 	memcpy(priv->rates, rtl818x_rates, sizeof(rtl818x_rates));
 	priv->map = (struct rtl818x_csr *)0xFF00;
 
-	priv->band.band = IEEE80211_BAND_2GHZ;
+	priv->band.band = NL80211_BAND_2GHZ;
 	priv->band.channels = priv->channels;
 	priv->band.n_channels = ARRAY_SIZE(rtl818x_channels);
 	priv->band.bitrates = priv->rates;
 	priv->band.n_bitrates = ARRAY_SIZE(rtl818x_rates);
-	dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+	dev->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
 
 
 	ieee80211_hw_set(dev, RX_INCLUDES_FCS);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c
index abdff45..f2ce8c9 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c
@@ -1,7 +1,7 @@
 /*
  * RTL8XXXU mac80211 USB driver
  *
- * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com>
+ * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
  *
  * Portions, notably calibration code:
  * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
@@ -91,33 +91,33 @@
 };
 
 static struct ieee80211_channel rtl8xxxu_channels_2g[] = {
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2412,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2412,
 	  .hw_value = 1, .max_power = 30 },
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2417,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2417,
 	  .hw_value = 2, .max_power = 30 },
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2422,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2422,
 	  .hw_value = 3, .max_power = 30 },
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2427,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2427,
 	  .hw_value = 4, .max_power = 30 },
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2432,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2432,
 	  .hw_value = 5, .max_power = 30 },
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2437,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2437,
 	  .hw_value = 6, .max_power = 30 },
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2442,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2442,
 	  .hw_value = 7, .max_power = 30 },
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2447,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2447,
 	  .hw_value = 8, .max_power = 30 },
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2452,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2452,
 	  .hw_value = 9, .max_power = 30 },
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2457,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2457,
 	  .hw_value = 10, .max_power = 30 },
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2462,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2462,
 	  .hw_value = 11, .max_power = 30 },
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2467,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2467,
 	  .hw_value = 12, .max_power = 30 },
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2472,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2472,
 	  .hw_value = 13, .max_power = 30 },
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2484,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2484,
 	  .hw_value = 14, .max_power = 30 }
 };
 
@@ -128,7 +128,7 @@
 	.n_bitrates = ARRAY_SIZE(rtl8xxxu_rates),
 };
 
-static struct rtl8xxxu_reg8val rtl8723a_mac_init_table[] = {
+static struct rtl8xxxu_reg8val rtl8xxxu_gen1_mac_init_table[] = {
 	{0x420, 0x80}, {0x423, 0x00}, {0x430, 0x00}, {0x431, 0x00},
 	{0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05},
 	{0x436, 0x06}, {0x437, 0x07}, {0x438, 0x00}, {0x439, 0x00},
@@ -184,6 +184,104 @@
 	{0xffff, 0xff},
 };
 
+static struct rtl8xxxu_reg8val rtl8192e_mac_init_table[] = {
+	{0x011, 0xeb}, {0x012, 0x07}, {0x014, 0x75}, {0x303, 0xa7},
+	{0x428, 0x0a}, {0x429, 0x10}, {0x430, 0x00}, {0x431, 0x00},
+	{0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05},
+	{0x436, 0x07}, {0x437, 0x08}, {0x43c, 0x04}, {0x43d, 0x05},
+	{0x43e, 0x07}, {0x43f, 0x08}, {0x440, 0x5d}, {0x441, 0x01},
+	{0x442, 0x00}, {0x444, 0x10}, {0x445, 0x00}, {0x446, 0x00},
+	{0x447, 0x00}, {0x448, 0x00}, {0x449, 0xf0}, {0x44a, 0x0f},
+	{0x44b, 0x3e}, {0x44c, 0x10}, {0x44d, 0x00}, {0x44e, 0x00},
+	{0x44f, 0x00}, {0x450, 0x00}, {0x451, 0xf0}, {0x452, 0x0f},
+	{0x453, 0x00}, {0x456, 0x5e}, {0x460, 0x66}, {0x461, 0x66},
+	{0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff}, {0x4cd, 0xff},
+	{0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2}, {0x502, 0x2f},
+	{0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3}, {0x506, 0x5e},
+	{0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4}, {0x50a, 0x5e},
+	{0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4}, {0x50e, 0x00},
+	{0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a}, {0x516, 0x0a},
+	{0x525, 0x4f}, {0x540, 0x12}, {0x541, 0x64}, {0x550, 0x10},
+	{0x551, 0x10}, {0x559, 0x02}, {0x55c, 0x50}, {0x55d, 0xff},
+	{0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a}, {0x620, 0xff},
+	{0x621, 0xff}, {0x622, 0xff}, {0x623, 0xff}, {0x624, 0xff},
+	{0x625, 0xff}, {0x626, 0xff}, {0x627, 0xff}, {0x638, 0x50},
+	{0x63c, 0x0a}, {0x63d, 0x0a}, {0x63e, 0x0e}, {0x63f, 0x0e},
+	{0x640, 0x40}, {0x642, 0x40}, {0x643, 0x00}, {0x652, 0xc8},
+	{0x66e, 0x05}, {0x700, 0x21}, {0x701, 0x43}, {0x702, 0x65},
+	{0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43}, {0x70a, 0x65},
+	{0x70b, 0x87},
+	{0xffff, 0xff},
+};
+
+#ifdef CONFIG_RTL8XXXU_UNTESTED
+static struct rtl8xxxu_power_base rtl8188r_power_base = {
+	.reg_0e00 = 0x06080808,
+	.reg_0e04 = 0x00040406,
+	.reg_0e08 = 0x00000000,
+	.reg_086c = 0x00000000,
+
+	.reg_0e10 = 0x04060608,
+	.reg_0e14 = 0x00020204,
+	.reg_0e18 = 0x04060608,
+	.reg_0e1c = 0x00020204,
+
+	.reg_0830 = 0x06080808,
+	.reg_0834 = 0x00040406,
+	.reg_0838 = 0x00000000,
+	.reg_086c_2 = 0x00000000,
+
+	.reg_083c = 0x04060608,
+	.reg_0848 = 0x00020204,
+	.reg_084c = 0x04060608,
+	.reg_0868 = 0x00020204,
+};
+
+static struct rtl8xxxu_power_base rtl8192c_power_base = {
+	.reg_0e00 = 0x07090c0c,
+	.reg_0e04 = 0x01020405,
+	.reg_0e08 = 0x00000000,
+	.reg_086c = 0x00000000,
+
+	.reg_0e10 = 0x0b0c0c0e,
+	.reg_0e14 = 0x01030506,
+	.reg_0e18 = 0x0b0c0d0e,
+	.reg_0e1c = 0x01030509,
+
+	.reg_0830 = 0x07090c0c,
+	.reg_0834 = 0x01020405,
+	.reg_0838 = 0x00000000,
+	.reg_086c_2 = 0x00000000,
+
+	.reg_083c = 0x0b0c0d0e,
+	.reg_0848 = 0x01030509,
+	.reg_084c = 0x0b0c0d0e,
+	.reg_0868 = 0x01030509,
+};
+#endif
+
+static struct rtl8xxxu_power_base rtl8723a_power_base = {
+	.reg_0e00 = 0x0a0c0c0c,
+	.reg_0e04 = 0x02040608,
+	.reg_0e08 = 0x00000000,
+	.reg_086c = 0x00000000,
+
+	.reg_0e10 = 0x0a0c0d0e,
+	.reg_0e14 = 0x02040608,
+	.reg_0e18 = 0x0a0c0d0e,
+	.reg_0e1c = 0x02040608,
+
+	.reg_0830 = 0x0a0c0c0c,
+	.reg_0834 = 0x02040608,
+	.reg_0838 = 0x00000000,
+	.reg_086c_2 = 0x00000000,
+
+	.reg_083c = 0x0a0c0d0e,
+	.reg_0848 = 0x02040608,
+	.reg_084c = 0x0a0c0d0e,
+	.reg_0868 = 0x02040608,
+};
+
 static struct rtl8xxxu_reg32val rtl8723a_phy_1t_init_table[] = {
 	{0x800, 0x80040000}, {0x804, 0x00000003},
 	{0x808, 0x0000fc00}, {0x80c, 0x0000000a},
@@ -580,6 +678,138 @@
 	{0xffff, 0xffffffff},
 };
 
+static struct rtl8xxxu_reg32val rtl8192eu_phy_init_table[] = {
+	{0x800, 0x80040000}, {0x804, 0x00000003},
+	{0x808, 0x0000fc00}, {0x80c, 0x0000000a},
+	{0x810, 0x10001331}, {0x814, 0x020c3d10},
+	{0x818, 0x02220385}, {0x81c, 0x00000000},
+	{0x820, 0x01000100}, {0x824, 0x00390204},
+	{0x828, 0x01000100}, {0x82c, 0x00390204},
+	{0x830, 0x32323232}, {0x834, 0x30303030},
+	{0x838, 0x30303030}, {0x83c, 0x30303030},
+	{0x840, 0x00010000}, {0x844, 0x00010000},
+	{0x848, 0x28282828}, {0x84c, 0x28282828},
+	{0x850, 0x00000000}, {0x854, 0x00000000},
+	{0x858, 0x009a009a}, {0x85c, 0x01000014},
+	{0x860, 0x66f60000}, {0x864, 0x061f0000},
+	{0x868, 0x30303030}, {0x86c, 0x30303030},
+	{0x870, 0x00000000}, {0x874, 0x55004200},
+	{0x878, 0x08080808}, {0x87c, 0x00000000},
+	{0x880, 0xb0000c1c}, {0x884, 0x00000001},
+	{0x888, 0x00000000}, {0x88c, 0xcc0000c0},
+	{0x890, 0x00000800}, {0x894, 0xfffffffe},
+	{0x898, 0x40302010}, {0x900, 0x00000000},
+	{0x904, 0x00000023}, {0x908, 0x00000000},
+	{0x90c, 0x81121313}, {0x910, 0x806c0001},
+	{0x914, 0x00000001}, {0x918, 0x00000000},
+	{0x91c, 0x00010000}, {0x924, 0x00000001},
+	{0x928, 0x00000000}, {0x92c, 0x00000000},
+	{0x930, 0x00000000}, {0x934, 0x00000000},
+	{0x938, 0x00000000}, {0x93c, 0x00000000},
+	{0x940, 0x00000000}, {0x944, 0x00000000},
+	{0x94c, 0x00000008}, {0xa00, 0x00d0c7c8},
+	{0xa04, 0x81ff000c}, {0xa08, 0x8c838300},
+	{0xa0c, 0x2e68120f}, {0xa10, 0x95009b78},
+	{0xa14, 0x1114d028}, {0xa18, 0x00881117},
+	{0xa1c, 0x89140f00}, {0xa20, 0x1a1b0000},
+	{0xa24, 0x090e1317}, {0xa28, 0x00000204},
+	{0xa2c, 0x00d30000}, {0xa70, 0x101fff00},
+	{0xa74, 0x00000007}, {0xa78, 0x00000900},
+	{0xa7c, 0x225b0606}, {0xa80, 0x218075b1},
+	{0xb38, 0x00000000}, {0xc00, 0x48071d40},
+	{0xc04, 0x03a05633}, {0xc08, 0x000000e4},
+	{0xc0c, 0x6c6c6c6c}, {0xc10, 0x08800000},
+	{0xc14, 0x40000100}, {0xc18, 0x08800000},
+	{0xc1c, 0x40000100}, {0xc20, 0x00000000},
+	{0xc24, 0x00000000}, {0xc28, 0x00000000},
+	{0xc2c, 0x00000000}, {0xc30, 0x69e9ac47},
+	{0xc34, 0x469652af}, {0xc38, 0x49795994},
+	{0xc3c, 0x0a97971c}, {0xc40, 0x1f7c403f},
+	{0xc44, 0x000100b7}, {0xc48, 0xec020107},
+	{0xc4c, 0x007f037f},
+#ifdef EXT_PA_8192EU
+	/* External PA or external LNA */
+	{0xc50, 0x00340220},
+#else
+	{0xc50, 0x00340020},
+#endif
+	{0xc54, 0x0080801f},
+#ifdef EXT_PA_8192EU
+	/* External PA or external LNA */
+	{0xc58, 0x00000220},
+#else
+	{0xc58, 0x00000020},
+#endif
+	{0xc5c, 0x00248492}, {0xc60, 0x00000000},
+	{0xc64, 0x7112848b}, {0xc68, 0x47c00bff},
+	{0xc6c, 0x00000036}, {0xc70, 0x00000600},
+	{0xc74, 0x02013169}, {0xc78, 0x0000001f},
+	{0xc7c, 0x00b91612},
+#ifdef EXT_PA_8192EU
+	/* External PA or external LNA */
+	{0xc80, 0x2d4000b5},
+#else
+	{0xc80, 0x40000100},
+#endif
+	{0xc84, 0x21f60000},
+#ifdef EXT_PA_8192EU
+	/* External PA or external LNA */
+	{0xc88, 0x2d4000b5},
+#else
+	{0xc88, 0x40000100},
+#endif
+	{0xc8c, 0xa0e40000}, {0xc90, 0x00121820},
+	{0xc94, 0x00000000}, {0xc98, 0x00121820},
+	{0xc9c, 0x00007f7f}, {0xca0, 0x00000000},
+	{0xca4, 0x000300a0}, {0xca8, 0x00000000},
+	{0xcac, 0x00000000}, {0xcb0, 0x00000000},
+	{0xcb4, 0x00000000}, {0xcb8, 0x00000000},
+	{0xcbc, 0x28000000}, {0xcc0, 0x00000000},
+	{0xcc4, 0x00000000}, {0xcc8, 0x00000000},
+	{0xccc, 0x00000000}, {0xcd0, 0x00000000},
+	{0xcd4, 0x00000000}, {0xcd8, 0x64b22427},
+	{0xcdc, 0x00766932}, {0xce0, 0x00222222},
+	{0xce4, 0x00040000}, {0xce8, 0x77644302},
+	{0xcec, 0x2f97d40c}, {0xd00, 0x00080740},
+	{0xd04, 0x00020403}, {0xd08, 0x0000907f},
+	{0xd0c, 0x20010201}, {0xd10, 0xa0633333},
+	{0xd14, 0x3333bc43}, {0xd18, 0x7a8f5b6b},
+	{0xd1c, 0x0000007f}, {0xd2c, 0xcc979975},
+	{0xd30, 0x00000000}, {0xd34, 0x80608000},
+	{0xd38, 0x00000000}, {0xd3c, 0x00127353},
+	{0xd40, 0x00000000}, {0xd44, 0x00000000},
+	{0xd48, 0x00000000}, {0xd4c, 0x00000000},
+	{0xd50, 0x6437140a}, {0xd54, 0x00000000},
+	{0xd58, 0x00000282}, {0xd5c, 0x30032064},
+	{0xd60, 0x4653de68}, {0xd64, 0x04518a3c},
+	{0xd68, 0x00002101}, {0xd6c, 0x2a201c16},
+	{0xd70, 0x1812362e}, {0xd74, 0x322c2220},
+	{0xd78, 0x000e3c24}, {0xd80, 0x01081008},
+	{0xd84, 0x00000800}, {0xd88, 0xf0b50000},
+	{0xe00, 0x30303030}, {0xe04, 0x30303030},
+	{0xe08, 0x03903030}, {0xe10, 0x30303030},
+	{0xe14, 0x30303030}, {0xe18, 0x30303030},
+	{0xe1c, 0x30303030}, {0xe28, 0x00000000},
+	{0xe30, 0x1000dc1f}, {0xe34, 0x10008c1f},
+	{0xe38, 0x02140102}, {0xe3c, 0x681604c2},
+	{0xe40, 0x01007c00}, {0xe44, 0x01004800},
+	{0xe48, 0xfb000000}, {0xe4c, 0x000028d1},
+	{0xe50, 0x1000dc1f}, {0xe54, 0x10008c1f},
+	{0xe58, 0x02140102}, {0xe5c, 0x28160d05},
+	{0xe60, 0x00000008}, {0xe68, 0x0fc05656},
+	{0xe6c, 0x03c09696}, {0xe70, 0x03c09696},
+	{0xe74, 0x0c005656}, {0xe78, 0x0c005656},
+	{0xe7c, 0x0c005656}, {0xe80, 0x0c005656},
+	{0xe84, 0x03c09696}, {0xe88, 0x0c005656},
+	{0xe8c, 0x03c09696}, {0xed0, 0x03c09696},
+	{0xed4, 0x03c09696}, {0xed8, 0x03c09696},
+	{0xedc, 0x0000d6d6}, {0xee0, 0x0000d6d6},
+	{0xeec, 0x0fc01616}, {0xee4, 0xb0000c1c},
+	{0xee8, 0x00000001}, {0xf14, 0x00000003},
+	{0xf4c, 0x00000000}, {0xf00, 0x00000300},
+	{0xffff, 0xffffffff},
+};
+
 static struct rtl8xxxu_reg32val rtl8xxx_agc_standard_table[] = {
 	{0xc78, 0x7b000001}, {0xc78, 0x7b010001},
 	{0xc78, 0x7b020001}, {0xc78, 0x7b030001},
@@ -819,6 +1049,144 @@
 	{0xffff, 0xffffffff}
 };
 
+static struct rtl8xxxu_reg32val rtl8xxx_agc_8192eu_std_table[] = {
+	{0xc78, 0xfb000001}, {0xc78, 0xfb010001},
+	{0xc78, 0xfb020001}, {0xc78, 0xfb030001},
+	{0xc78, 0xfb040001}, {0xc78, 0xfb050001},
+	{0xc78, 0xfa060001}, {0xc78, 0xf9070001},
+	{0xc78, 0xf8080001}, {0xc78, 0xf7090001},
+	{0xc78, 0xf60a0001}, {0xc78, 0xf50b0001},
+	{0xc78, 0xf40c0001}, {0xc78, 0xf30d0001},
+	{0xc78, 0xf20e0001}, {0xc78, 0xf10f0001},
+	{0xc78, 0xf0100001}, {0xc78, 0xef110001},
+	{0xc78, 0xee120001}, {0xc78, 0xed130001},
+	{0xc78, 0xec140001}, {0xc78, 0xeb150001},
+	{0xc78, 0xea160001}, {0xc78, 0xe9170001},
+	{0xc78, 0xe8180001}, {0xc78, 0xe7190001},
+	{0xc78, 0xc81a0001}, {0xc78, 0xc71b0001},
+	{0xc78, 0xc61c0001}, {0xc78, 0x071d0001},
+	{0xc78, 0x061e0001}, {0xc78, 0x051f0001},
+	{0xc78, 0x04200001}, {0xc78, 0x03210001},
+	{0xc78, 0xaa220001}, {0xc78, 0xa9230001},
+	{0xc78, 0xa8240001}, {0xc78, 0xa7250001},
+	{0xc78, 0xa6260001}, {0xc78, 0x85270001},
+	{0xc78, 0x84280001}, {0xc78, 0x83290001},
+	{0xc78, 0x252a0001}, {0xc78, 0x242b0001},
+	{0xc78, 0x232c0001}, {0xc78, 0x222d0001},
+	{0xc78, 0x672e0001}, {0xc78, 0x662f0001},
+	{0xc78, 0x65300001}, {0xc78, 0x64310001},
+	{0xc78, 0x63320001}, {0xc78, 0x62330001},
+	{0xc78, 0x61340001}, {0xc78, 0x45350001},
+	{0xc78, 0x44360001}, {0xc78, 0x43370001},
+	{0xc78, 0x42380001}, {0xc78, 0x41390001},
+	{0xc78, 0x403a0001}, {0xc78, 0x403b0001},
+	{0xc78, 0x403c0001}, {0xc78, 0x403d0001},
+	{0xc78, 0x403e0001}, {0xc78, 0x403f0001},
+	{0xc78, 0xfb400001}, {0xc78, 0xfb410001},
+	{0xc78, 0xfb420001}, {0xc78, 0xfb430001},
+	{0xc78, 0xfb440001}, {0xc78, 0xfb450001},
+	{0xc78, 0xfa460001}, {0xc78, 0xf9470001},
+	{0xc78, 0xf8480001}, {0xc78, 0xf7490001},
+	{0xc78, 0xf64a0001}, {0xc78, 0xf54b0001},
+	{0xc78, 0xf44c0001}, {0xc78, 0xf34d0001},
+	{0xc78, 0xf24e0001}, {0xc78, 0xf14f0001},
+	{0xc78, 0xf0500001}, {0xc78, 0xef510001},
+	{0xc78, 0xee520001}, {0xc78, 0xed530001},
+	{0xc78, 0xec540001}, {0xc78, 0xeb550001},
+	{0xc78, 0xea560001}, {0xc78, 0xe9570001},
+	{0xc78, 0xe8580001}, {0xc78, 0xe7590001},
+	{0xc78, 0xe65a0001}, {0xc78, 0xe55b0001},
+	{0xc78, 0xe45c0001}, {0xc78, 0xe35d0001},
+	{0xc78, 0xe25e0001}, {0xc78, 0xe15f0001},
+	{0xc78, 0x8a600001}, {0xc78, 0x89610001},
+	{0xc78, 0x88620001}, {0xc78, 0x87630001},
+	{0xc78, 0x86640001}, {0xc78, 0x85650001},
+	{0xc78, 0x84660001}, {0xc78, 0x83670001},
+	{0xc78, 0x82680001}, {0xc78, 0x6b690001},
+	{0xc78, 0x6a6a0001}, {0xc78, 0x696b0001},
+	{0xc78, 0x686c0001}, {0xc78, 0x676d0001},
+	{0xc78, 0x666e0001}, {0xc78, 0x656f0001},
+	{0xc78, 0x64700001}, {0xc78, 0x63710001},
+	{0xc78, 0x62720001}, {0xc78, 0x61730001},
+	{0xc78, 0x49740001}, {0xc78, 0x48750001},
+	{0xc78, 0x47760001}, {0xc78, 0x46770001},
+	{0xc78, 0x45780001}, {0xc78, 0x44790001},
+	{0xc78, 0x437a0001}, {0xc78, 0x427b0001},
+	{0xc78, 0x417c0001}, {0xc78, 0x407d0001},
+	{0xc78, 0x407e0001}, {0xc78, 0x407f0001},
+	{0xc50, 0x00040022}, {0xc50, 0x00040020},
+	{0xffff, 0xffffffff}
+};
+
+static struct rtl8xxxu_reg32val rtl8xxx_agc_8192eu_highpa_table[] = {
+	{0xc78, 0xfa000001}, {0xc78, 0xf9010001},
+	{0xc78, 0xf8020001}, {0xc78, 0xf7030001},
+	{0xc78, 0xf6040001}, {0xc78, 0xf5050001},
+	{0xc78, 0xf4060001}, {0xc78, 0xf3070001},
+	{0xc78, 0xf2080001}, {0xc78, 0xf1090001},
+	{0xc78, 0xf00a0001}, {0xc78, 0xef0b0001},
+	{0xc78, 0xee0c0001}, {0xc78, 0xed0d0001},
+	{0xc78, 0xec0e0001}, {0xc78, 0xeb0f0001},
+	{0xc78, 0xea100001}, {0xc78, 0xe9110001},
+	{0xc78, 0xe8120001}, {0xc78, 0xe7130001},
+	{0xc78, 0xe6140001}, {0xc78, 0xe5150001},
+	{0xc78, 0xe4160001}, {0xc78, 0xe3170001},
+	{0xc78, 0xe2180001}, {0xc78, 0xe1190001},
+	{0xc78, 0x8a1a0001}, {0xc78, 0x891b0001},
+	{0xc78, 0x881c0001}, {0xc78, 0x871d0001},
+	{0xc78, 0x861e0001}, {0xc78, 0x851f0001},
+	{0xc78, 0x84200001}, {0xc78, 0x83210001},
+	{0xc78, 0x82220001}, {0xc78, 0x6a230001},
+	{0xc78, 0x69240001}, {0xc78, 0x68250001},
+	{0xc78, 0x67260001}, {0xc78, 0x66270001},
+	{0xc78, 0x65280001}, {0xc78, 0x64290001},
+	{0xc78, 0x632a0001}, {0xc78, 0x622b0001},
+	{0xc78, 0x612c0001}, {0xc78, 0x602d0001},
+	{0xc78, 0x472e0001}, {0xc78, 0x462f0001},
+	{0xc78, 0x45300001}, {0xc78, 0x44310001},
+	{0xc78, 0x43320001}, {0xc78, 0x42330001},
+	{0xc78, 0x41340001}, {0xc78, 0x40350001},
+	{0xc78, 0x40360001}, {0xc78, 0x40370001},
+	{0xc78, 0x40380001}, {0xc78, 0x40390001},
+	{0xc78, 0x403a0001}, {0xc78, 0x403b0001},
+	{0xc78, 0x403c0001}, {0xc78, 0x403d0001},
+	{0xc78, 0x403e0001}, {0xc78, 0x403f0001},
+	{0xc78, 0xfa400001}, {0xc78, 0xf9410001},
+	{0xc78, 0xf8420001}, {0xc78, 0xf7430001},
+	{0xc78, 0xf6440001}, {0xc78, 0xf5450001},
+	{0xc78, 0xf4460001}, {0xc78, 0xf3470001},
+	{0xc78, 0xf2480001}, {0xc78, 0xf1490001},
+	{0xc78, 0xf04a0001}, {0xc78, 0xef4b0001},
+	{0xc78, 0xee4c0001}, {0xc78, 0xed4d0001},
+	{0xc78, 0xec4e0001}, {0xc78, 0xeb4f0001},
+	{0xc78, 0xea500001}, {0xc78, 0xe9510001},
+	{0xc78, 0xe8520001}, {0xc78, 0xe7530001},
+	{0xc78, 0xe6540001}, {0xc78, 0xe5550001},
+	{0xc78, 0xe4560001}, {0xc78, 0xe3570001},
+	{0xc78, 0xe2580001}, {0xc78, 0xe1590001},
+	{0xc78, 0x8a5a0001}, {0xc78, 0x895b0001},
+	{0xc78, 0x885c0001}, {0xc78, 0x875d0001},
+	{0xc78, 0x865e0001}, {0xc78, 0x855f0001},
+	{0xc78, 0x84600001}, {0xc78, 0x83610001},
+	{0xc78, 0x82620001}, {0xc78, 0x6a630001},
+	{0xc78, 0x69640001}, {0xc78, 0x68650001},
+	{0xc78, 0x67660001}, {0xc78, 0x66670001},
+	{0xc78, 0x65680001}, {0xc78, 0x64690001},
+	{0xc78, 0x636a0001}, {0xc78, 0x626b0001},
+	{0xc78, 0x616c0001}, {0xc78, 0x606d0001},
+	{0xc78, 0x476e0001}, {0xc78, 0x466f0001},
+	{0xc78, 0x45700001}, {0xc78, 0x44710001},
+	{0xc78, 0x43720001}, {0xc78, 0x42730001},
+	{0xc78, 0x41740001}, {0xc78, 0x40750001},
+	{0xc78, 0x40760001}, {0xc78, 0x40770001},
+	{0xc78, 0x40780001}, {0xc78, 0x40790001},
+	{0xc78, 0x407a0001}, {0xc78, 0x407b0001},
+	{0xc78, 0x407c0001}, {0xc78, 0x407d0001},
+	{0xc78, 0x407e0001}, {0xc78, 0x407f0001},
+	{0xc50, 0x00040222}, {0xc50, 0x00040220},
+	{0xffff, 0xffffffff}
+};
+
 static struct rtl8xxxu_rfregval rtl8723au_radioa_1t_init_table[] = {
 	{0x00, 0x00030159}, {0x01, 0x00031284},
 	{0x02, 0x00098000}, {0x03, 0x00039c63},
@@ -963,6 +1331,7 @@
 	{0xff, 0xffffffff}
 };
 
+#ifdef CONFIG_RTL8XXXU_UNTESTED
 static struct rtl8xxxu_rfregval rtl8192cu_radioa_2t_init_table[] = {
 	{0x00, 0x00030159}, {0x01, 0x00031284},
 	{0x02, 0x00098000}, {0x03, 0x00018c63},
@@ -1211,6 +1580,153 @@
 	{0x00, 0x00030159},
 	{0xff, 0xffffffff}
 };
+#endif
+
+static struct rtl8xxxu_rfregval rtl8192eu_radioa_init_table[] = {
+	{0x7f, 0x00000082}, {0x81, 0x0003fc00},
+	{0x00, 0x00030000}, {0x08, 0x00008400},
+	{0x18, 0x00000407}, {0x19, 0x00000012},
+	{0x1b, 0x00000064}, {0x1e, 0x00080009},
+	{0x1f, 0x00000880}, {0x2f, 0x0001a060},
+	{0x3f, 0x00000000}, {0x42, 0x000060c0},
+	{0x57, 0x000d0000}, {0x58, 0x000be180},
+	{0x67, 0x00001552}, {0x83, 0x00000000},
+	{0xb0, 0x000ff9f1}, {0xb1, 0x00055418},
+	{0xb2, 0x0008cc00}, {0xb4, 0x00043083},
+	{0xb5, 0x00008166}, {0xb6, 0x0000803e},
+	{0xb7, 0x0001c69f}, {0xb8, 0x0000407f},
+	{0xb9, 0x00080001}, {0xba, 0x00040001},
+	{0xbb, 0x00000400}, {0xbf, 0x000c0000},
+	{0xc2, 0x00002400}, {0xc3, 0x00000009},
+	{0xc4, 0x00040c91}, {0xc5, 0x00099999},
+	{0xc6, 0x000000a3}, {0xc7, 0x00088820},
+	{0xc8, 0x00076c06}, {0xc9, 0x00000000},
+	{0xca, 0x00080000}, {0xdf, 0x00000180},
+	{0xef, 0x000001a0}, {0x51, 0x00069545},
+	{0x52, 0x0007e45e}, {0x53, 0x00000071},
+	{0x56, 0x00051ff3}, {0x35, 0x000000a8},
+	{0x35, 0x000001e2}, {0x35, 0x000002a8},
+	{0x36, 0x00001c24}, {0x36, 0x00009c24},
+	{0x36, 0x00011c24}, {0x36, 0x00019c24},
+	{0x18, 0x00000c07}, {0x5a, 0x00048000},
+	{0x19, 0x000739d0},
+#ifdef EXT_PA_8192EU
+	/* External PA or external LNA */
+	{0x34, 0x0000a093}, {0x34, 0x0000908f},
+	{0x34, 0x0000808c}, {0x34, 0x0000704d},
+	{0x34, 0x0000604a}, {0x34, 0x00005047},
+	{0x34, 0x0000400a}, {0x34, 0x00003007},
+	{0x34, 0x00002004}, {0x34, 0x00001001},
+	{0x34, 0x00000000},
+#else
+	/* Regular */
+	{0x34, 0x0000add7}, {0x34, 0x00009dd4},
+	{0x34, 0x00008dd1}, {0x34, 0x00007dce},
+	{0x34, 0x00006dcb}, {0x34, 0x00005dc8},
+	{0x34, 0x00004dc5}, {0x34, 0x000034cc},
+	{0x34, 0x0000244f}, {0x34, 0x0000144c},
+	{0x34, 0x00000014},
+#endif
+	{0x00, 0x00030159},
+	{0x84, 0x00068180},
+	{0x86, 0x0000014e},
+	{0x87, 0x00048e00},
+	{0x8e, 0x00065540},
+	{0x8f, 0x00088000},
+	{0xef, 0x000020a0},
+#ifdef EXT_PA_8192EU
+	/* External PA or external LNA */
+	{0x3b, 0x000f07b0},
+#else
+	{0x3b, 0x000f02b0},
+#endif
+	{0x3b, 0x000ef7b0}, {0x3b, 0x000d4fb0},
+	{0x3b, 0x000cf060}, {0x3b, 0x000b0090},
+	{0x3b, 0x000a0080}, {0x3b, 0x00090080},
+	{0x3b, 0x0008f780},
+#ifdef EXT_PA_8192EU
+	/* External PA or external LNA */
+	{0x3b, 0x000787b0},
+#else
+	{0x3b, 0x00078730},
+#endif
+	{0x3b, 0x00060fb0}, {0x3b, 0x0005ffa0},
+	{0x3b, 0x00040620}, {0x3b, 0x00037090},
+	{0x3b, 0x00020080}, {0x3b, 0x0001f060},
+	{0x3b, 0x0000ffb0}, {0xef, 0x000000a0},
+	{0xfe, 0x00000000}, {0x18, 0x0000fc07},
+	{0xfe, 0x00000000}, {0xfe, 0x00000000},
+	{0xfe, 0x00000000}, {0xfe, 0x00000000},
+	{0x1e, 0x00000001}, {0x1f, 0x00080000},
+	{0x00, 0x00033e70},
+	{0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8192eu_radiob_init_table[] = {
+	{0x7f, 0x00000082}, {0x81, 0x0003fc00},
+	{0x00, 0x00030000}, {0x08, 0x00008400},
+	{0x18, 0x00000407}, {0x19, 0x00000012},
+	{0x1b, 0x00000064}, {0x1e, 0x00080009},
+	{0x1f, 0x00000880}, {0x2f, 0x0001a060},
+	{0x3f, 0x00000000}, {0x42, 0x000060c0},
+	{0x57, 0x000d0000}, {0x58, 0x000be180},
+	{0x67, 0x00001552}, {0x7f, 0x00000082},
+	{0x81, 0x0003f000}, {0x83, 0x00000000},
+	{0xdf, 0x00000180}, {0xef, 0x000001a0},
+	{0x51, 0x00069545}, {0x52, 0x0007e42e},
+	{0x53, 0x00000071}, {0x56, 0x00051ff3},
+	{0x35, 0x000000a8}, {0x35, 0x000001e0},
+	{0x35, 0x000002a8}, {0x36, 0x00001ca8},
+	{0x36, 0x00009c24}, {0x36, 0x00011c24},
+	{0x36, 0x00019c24}, {0x18, 0x00000c07},
+	{0x5a, 0x00048000}, {0x19, 0x000739d0},
+#ifdef EXT_PA_8192EU
+	/* External PA or external LNA */
+	{0x34, 0x0000a093}, {0x34, 0x0000908f},
+	{0x34, 0x0000808c}, {0x34, 0x0000704d},
+	{0x34, 0x0000604a}, {0x34, 0x00005047},
+	{0x34, 0x0000400a}, {0x34, 0x00003007},
+	{0x34, 0x00002004}, {0x34, 0x00001001},
+	{0x34, 0x00000000},
+#else
+	{0x34, 0x0000add7}, {0x34, 0x00009dd4},
+	{0x34, 0x00008dd1}, {0x34, 0x00007dce},
+	{0x34, 0x00006dcb}, {0x34, 0x00005dc8},
+	{0x34, 0x00004dc5}, {0x34, 0x000034cc},
+	{0x34, 0x0000244f}, {0x34, 0x0000144c},
+	{0x34, 0x00000014},
+#endif
+	{0x00, 0x00030159}, {0x84, 0x00068180},
+	{0x86, 0x000000ce}, {0x87, 0x00048a00},
+	{0x8e, 0x00065540}, {0x8f, 0x00088000},
+	{0xef, 0x000020a0},
+#ifdef EXT_PA_8192EU
+	/* External PA or external LNA */
+	{0x3b, 0x000f07b0},
+#else
+	{0x3b, 0x000f02b0},
+#endif
+
+	{0x3b, 0x000ef7b0}, {0x3b, 0x000d4fb0},
+	{0x3b, 0x000cf060}, {0x3b, 0x000b0090},
+	{0x3b, 0x000a0080}, {0x3b, 0x00090080},
+	{0x3b, 0x0008f780},
+#ifdef EXT_PA_8192EU
+	/* External PA or external LNA */
+	{0x3b, 0x000787b0},
+#else
+	{0x3b, 0x00078730},
+#endif
+	{0x3b, 0x00060fb0}, {0x3b, 0x0005ffa0},
+	{0x3b, 0x00040620}, {0x3b, 0x00037090},
+	{0x3b, 0x00020080}, {0x3b, 0x0001f060},
+	{0x3b, 0x0000ffb0}, {0xef, 0x000000a0},
+	{0x00, 0x00010159}, {0xfe, 0x00000000},
+	{0xfe, 0x00000000}, {0xfe, 0x00000000},
+	{0xfe, 0x00000000}, {0x1e, 0x00000001},
+	{0x1f, 0x00080000}, {0x00, 0x00033e70},
+	{0xff, 0xffffffff}
+};
 
 static struct rtl8xxxu_rfregs rtl8xxxu_rfregs[] = {
 	{	/* RF_A */
@@ -1231,7 +1747,7 @@
 	},
 };
 
-static const u32 rtl8723au_iqk_phy_iq_bb_reg[RTL8XXXU_BB_REGS] = {
+static const u32 rtl8xxxu_iqk_phy_iq_bb_reg[RTL8XXXU_BB_REGS] = {
 	REG_OFDM0_XA_RX_IQ_IMBALANCE,
 	REG_OFDM0_XB_RX_IQ_IMBALANCE,
 	REG_OFDM0_ENERGY_CCA_THRES,
@@ -1450,7 +1966,7 @@
 				enum rtl8xxxu_rfpath path, u8 reg, u32 data)
 {
 	int ret, retval;
-	u32 dataaddr;
+	u32 dataaddr, val32;
 
 	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_RFREG_WRITE)
 		dev_info(&priv->udev->dev, "%s(%02x) = 0x%06x\n",
@@ -1459,6 +1975,12 @@
 	data &= FPGA0_LSSI_PARM_DATA_MASK;
 	dataaddr = (reg << FPGA0_LSSI_PARM_ADDR_SHIFT) | data;
 
+	if (priv->rtl_chip == RTL8192E) {
+		val32 = rtl8xxxu_read32(priv, REG_FPGA0_POWER_SAVE);
+		val32 &= ~0x20000;
+		rtl8xxxu_write32(priv, REG_FPGA0_POWER_SAVE, val32);
+	}
+
 	/* Use XB for path B */
 	ret = rtl8xxxu_write32(priv, rtl8xxxu_rfregs[path].lssiparm, dataaddr);
 	if (ret != sizeof(dataaddr))
@@ -1468,6 +1990,12 @@
 
 	udelay(1);
 
+	if (priv->rtl_chip == RTL8192E) {
+		val32 = rtl8xxxu_read32(priv, REG_FPGA0_POWER_SAVE);
+		val32 |= 0x20000;
+		rtl8xxxu_write32(priv, REG_FPGA0_POWER_SAVE, val32);
+	}
+
 	return retval;
 }
 
@@ -1552,7 +2080,7 @@
 	rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_mp_oper));
 }
 
-static void rtl8723a_enable_rf(struct rtl8xxxu_priv *priv)
+static void rtl8xxxu_gen1_enable_rf(struct rtl8xxxu_priv *priv)
 {
 	u8 val8;
 	u32 val32;
@@ -1574,7 +2102,7 @@
 	val32 &= ~OFDM_RF_PATH_TX_MASK;
 	if (priv->tx_paths == 2)
 		val32 |= OFDM_RF_PATH_TX_A | OFDM_RF_PATH_TX_B;
-	else if (priv->rtlchip == 0x8192c || priv->rtlchip == 0x8191c)
+	else if (priv->rtl_chip == RTL8192C || priv->rtl_chip == RTL8191C)
 		val32 |= OFDM_RF_PATH_TX_B;
 	else
 		val32 |= OFDM_RF_PATH_TX_A;
@@ -1596,13 +2124,11 @@
 	rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00);
 }
 
-static void rtl8723a_disable_rf(struct rtl8xxxu_priv *priv)
+static void rtl8xxxu_gen1_disable_rf(struct rtl8xxxu_priv *priv)
 {
 	u8 sps0;
 	u32 val32;
 
-	rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
-
 	sps0 = rtl8xxxu_read8(priv, REG_SPS0_CTRL);
 
 	/* RF RX code for preamble power saving */
@@ -1676,7 +2202,10 @@
 	return group;
 }
 
-static int rtl8723b_channel_to_group(int channel)
+/*
+ * Valid for rtl8723bu and rtl8192eu
+ */
+static int rtl8xxxu_gen2_channel_to_group(int channel)
 {
 	int group;
 
@@ -1694,7 +2223,7 @@
 	return group;
 }
 
-static void rtl8723au_config_channel(struct ieee80211_hw *hw)
+static void rtl8xxxu_gen1_config_channel(struct ieee80211_hw *hw)
 {
 	struct rtl8xxxu_priv *priv = hw->priv;
 	u32 val32, rsr;
@@ -1816,7 +2345,7 @@
 	}
 }
 
-static void rtl8723bu_config_channel(struct ieee80211_hw *hw)
+static void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw)
 {
 	struct rtl8xxxu_priv *priv = hw->priv;
 	u32 val32, rsr;
@@ -1947,8 +2476,9 @@
 }
 
 static void
-rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
+rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
 {
+	struct rtl8xxxu_power_base *power_base = priv->power_base;
 	u8 cck[RTL8723A_MAX_RF_PATHS], ofdm[RTL8723A_MAX_RF_PATHS];
 	u8 ofdmbase[RTL8723A_MAX_RF_PATHS], mcsbase[RTL8723A_MAX_RF_PATHS];
 	u32 val32, ofdm_a, ofdm_b, mcs_a, mcs_b;
@@ -1957,11 +2487,22 @@
 
 	group = rtl8723a_channel_to_group(channel);
 
-	cck[0] = priv->cck_tx_power_index_A[group];
-	cck[1] = priv->cck_tx_power_index_B[group];
+	cck[0] = priv->cck_tx_power_index_A[group] - 1;
+	cck[1] = priv->cck_tx_power_index_B[group] - 1;
+
+	if (priv->hi_pa) {
+		if (cck[0] > 0x20)
+			cck[0] = 0x20;
+		if (cck[1] > 0x20)
+			cck[1] = 0x20;
+	}
 
 	ofdm[0] = priv->ht40_1s_tx_power_index_A[group];
 	ofdm[1] = priv->ht40_1s_tx_power_index_B[group];
+	if (ofdm[0])
+		ofdm[0] -= 1;
+	if (ofdm[1])
+		ofdm[1] -= 1;
 
 	ofdmbase[0] = ofdm[0] +	priv->ofdm_tx_power_index_diff[group].a;
 	ofdmbase[1] = ofdm[1] +	priv->ofdm_tx_power_index_diff[group].b;
@@ -2017,27 +2558,39 @@
 		ofdmbase[0] << 16 | ofdmbase[0] << 24;
 	ofdm_b = ofdmbase[1] | ofdmbase[1] << 8 |
 		ofdmbase[1] << 16 | ofdmbase[1] << 24;
-	rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, ofdm_a);
-	rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE18_06, ofdm_b);
 
-	rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, ofdm_a);
-	rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE54_24, ofdm_b);
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06,
+			 ofdm_a + power_base->reg_0e00);
+	rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE18_06,
+			 ofdm_b + power_base->reg_0830);
+
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24,
+			 ofdm_a + power_base->reg_0e04);
+	rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE54_24,
+			 ofdm_b + power_base->reg_0834);
 
 	mcs_a = mcsbase[0] | mcsbase[0] << 8 |
 		mcsbase[0] << 16 | mcsbase[0] << 24;
 	mcs_b = mcsbase[1] | mcsbase[1] << 8 |
 		mcsbase[1] << 16 | mcsbase[1] << 24;
 
-	rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00, mcs_a);
-	rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS03_MCS00, mcs_b);
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00,
+			 mcs_a + power_base->reg_0e10);
+	rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS03_MCS00,
+			 mcs_b + power_base->reg_083c);
 
-	rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs_a);
-	rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS07_MCS04, mcs_b);
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04,
+			 mcs_a + power_base->reg_0e14);
+	rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS07_MCS04,
+			 mcs_b + power_base->reg_0848);
 
-	rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS11_MCS08, mcs_a);
-	rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS11_MCS08, mcs_b);
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS11_MCS08,
+			 mcs_a + power_base->reg_0e18);
+	rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS11_MCS08,
+			 mcs_b + power_base->reg_084c);
 
-	rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12, mcs_a);
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12,
+			 mcs_a + power_base->reg_0e1c);
 	for (i = 0; i < 3; i++) {
 		if (i != 2)
 			val8 = (mcsbase[0] > 8) ? (mcsbase[0] - 8) : 0;
@@ -2045,7 +2598,8 @@
 			val8 = (mcsbase[0] > 6) ? (mcsbase[0] - 6) : 0;
 		rtl8xxxu_write8(priv, REG_OFDM0_XC_TX_IQ_IMBALANCE + i, val8);
 	}
-	rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12, mcs_b);
+	rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12,
+			 mcs_b + power_base->reg_0868);
 	for (i = 0; i < 3; i++) {
 		if (i != 2)
 			val8 = (mcsbase[1] > 8) ? (mcsbase[1] - 8) : 0;
@@ -2063,7 +2617,7 @@
 	int group, tx_idx;
 
 	tx_idx = 0;
-	group = rtl8723b_channel_to_group(channel);
+	group = rtl8xxxu_gen2_channel_to_group(channel);
 
 	cck = priv->cck_tx_power_index_B[group];
 	val32 = rtl8xxxu_read32(priv, REG_TX_AGC_A_CCK1_MCS32);
@@ -2094,6 +2648,82 @@
 	rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs);
 }
 
+static void
+rtl8192e_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
+{
+	u32 val32, ofdm, mcs;
+	u8 cck, ofdmbase, mcsbase;
+	int group, tx_idx;
+
+	tx_idx = 0;
+	group = rtl8xxxu_gen2_channel_to_group(channel);
+
+	cck = priv->cck_tx_power_index_A[group];
+
+	val32 = rtl8xxxu_read32(priv, REG_TX_AGC_A_CCK1_MCS32);
+	val32 &= 0xffff00ff;
+	val32 |= (cck << 8);
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_CCK1_MCS32, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11);
+	val32 &= 0xff;
+	val32 |= ((cck << 8) | (cck << 16) | (cck << 24));
+	rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32);
+
+	ofdmbase = priv->ht40_1s_tx_power_index_A[group];
+	ofdmbase += priv->ofdm_tx_power_diff[tx_idx].a;
+	ofdm = ofdmbase | ofdmbase << 8 | ofdmbase << 16 | ofdmbase << 24;
+
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, ofdm);
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, ofdm);
+
+	mcsbase = priv->ht40_1s_tx_power_index_A[group];
+	if (ht40)
+		mcsbase += priv->ht40_tx_power_diff[tx_idx++].a;
+	else
+		mcsbase += priv->ht20_tx_power_diff[tx_idx++].a;
+	mcs = mcsbase | mcsbase << 8 | mcsbase << 16 | mcsbase << 24;
+
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00, mcs);
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs);
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS11_MCS08, mcs);
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12, mcs);
+
+	if (priv->tx_paths > 1) {
+		cck = priv->cck_tx_power_index_B[group];
+
+		val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK1_55_MCS32);
+		val32 &= 0xff;
+		val32 |= ((cck << 8) | (cck << 16) | (cck << 24));
+		rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK1_55_MCS32, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11);
+		val32 &= 0xffffff00;
+		val32 |= cck;
+		rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32);
+
+		ofdmbase = priv->ht40_1s_tx_power_index_B[group];
+		ofdmbase += priv->ofdm_tx_power_diff[tx_idx].b;
+		ofdm = ofdmbase | ofdmbase << 8 |
+			ofdmbase << 16 | ofdmbase << 24;
+
+		rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE18_06, ofdm);
+		rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE54_24, ofdm);
+
+		mcsbase = priv->ht40_1s_tx_power_index_B[group];
+		if (ht40)
+			mcsbase += priv->ht40_tx_power_diff[tx_idx++].b;
+		else
+			mcsbase += priv->ht20_tx_power_diff[tx_idx++].b;
+		mcs = mcsbase | mcsbase << 8 | mcsbase << 16 | mcsbase << 24;
+
+		rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS03_MCS00, mcs);
+		rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS07_MCS04, mcs);
+		rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS11_MCS08, mcs);
+		rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12, mcs);
+	}
+}
+
 static void rtl8xxxu_set_linktype(struct rtl8xxxu_priv *priv,
 				  enum nl80211_iftype linktype)
 {
@@ -2199,11 +2829,11 @@
 	if (val32 & SYS_CFG_BT_FUNC) {
 		if (priv->chip_cut >= 3) {
 			sprintf(priv->chip_name, "8723BU");
-			priv->rtlchip = 0x8723b;
+			priv->rtl_chip = RTL8723B;
 		} else {
 			sprintf(priv->chip_name, "8723AU");
 			priv->usb_interrupts = 1;
-			priv->rtlchip = 0x8723a;
+			priv->rtl_chip = RTL8723A;
 		}
 
 		priv->rf_paths = 1;
@@ -2221,19 +2851,20 @@
 	} else if (val32 & SYS_CFG_TYPE_ID) {
 		bonding = rtl8xxxu_read32(priv, REG_HPON_FSM);
 		bonding &= HPON_FSM_BONDING_MASK;
-		if (priv->chip_cut >= 3) {
+		if (priv->fops->tx_desc_size ==
+		    sizeof(struct rtl8xxxu_txdesc40)) {
 			if (bonding == HPON_FSM_BONDING_1T2R) {
 				sprintf(priv->chip_name, "8191EU");
 				priv->rf_paths = 2;
 				priv->rx_paths = 2;
 				priv->tx_paths = 1;
-				priv->rtlchip = 0x8191e;
+				priv->rtl_chip = RTL8191E;
 			} else {
 				sprintf(priv->chip_name, "8192EU");
 				priv->rf_paths = 2;
 				priv->rx_paths = 2;
 				priv->tx_paths = 2;
-				priv->rtlchip = 0x8192e;
+				priv->rtl_chip = RTL8192E;
 			}
 		} else if (bonding == HPON_FSM_BONDING_1T2R) {
 			sprintf(priv->chip_name, "8191CU");
@@ -2241,14 +2872,14 @@
 			priv->rx_paths = 2;
 			priv->tx_paths = 1;
 			priv->usb_interrupts = 1;
-			priv->rtlchip = 0x8191c;
+			priv->rtl_chip = RTL8191C;
 		} else {
 			sprintf(priv->chip_name, "8192CU");
 			priv->rf_paths = 2;
 			priv->rx_paths = 2;
 			priv->tx_paths = 2;
 			priv->usb_interrupts = 1;
-			priv->rtlchip = 0x8192c;
+			priv->rtl_chip = RTL8192C;
 		}
 		priv->has_wifi = 1;
 	} else {
@@ -2256,15 +2887,15 @@
 		priv->rf_paths = 1;
 		priv->rx_paths = 1;
 		priv->tx_paths = 1;
-		priv->rtlchip = 0x8188c;
+		priv->rtl_chip = RTL8188C;
 		priv->usb_interrupts = 1;
 		priv->has_wifi = 1;
 	}
 
-	switch (priv->rtlchip) {
-	case 0x8188e:
-	case 0x8192e:
-	case 0x8723b:
+	switch (priv->rtl_chip) {
+	case RTL8188E:
+	case RTL8192E:
+	case RTL8723B:
 		switch (val32 & SYS_CFG_VENDOR_EXT_MASK) {
 		case SYS_CFG_VENDOR_ID_TSMC:
 			sprintf(priv->chip_vendor, "TSMC");
@@ -2375,6 +3006,9 @@
 		priv->has_xtalk = 1;
 		priv->xtalk = priv->efuse_wifi.efuse8723.xtal_k & 0x3f;
 	}
+
+	priv->power_base = &rtl8723a_power_base;
+
 	dev_info(&priv->udev->dev, "Vendor: %.7s\n",
 		 efuse->vendor_name);
 	dev_info(&priv->udev->dev, "Product: %.41s\n",
@@ -2507,9 +3141,14 @@
 	dev_info(&priv->udev->dev, "Product: %.20s\n",
 		 efuse->device_name);
 
+	priv->power_base = &rtl8192c_power_base;
+
 	if (efuse->rf_regulatory & 0x20) {
 		sprintf(priv->chip_name, "8188RU");
+		priv->rtl_chip = RTL8188R;
 		priv->hi_pa = 1;
+		priv->no_pape = 1;
+		priv->power_base = &rtl8188r_power_base;
 	}
 
 	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
@@ -2541,6 +3180,43 @@
 
 	ether_addr_copy(priv->mac_addr, efuse->mac_addr);
 
+	memcpy(priv->cck_tx_power_index_A, efuse->tx_power_index_A.cck_base,
+	       sizeof(efuse->tx_power_index_A.cck_base));
+	memcpy(priv->cck_tx_power_index_B, efuse->tx_power_index_B.cck_base,
+	       sizeof(efuse->tx_power_index_B.cck_base));
+
+	memcpy(priv->ht40_1s_tx_power_index_A,
+	       efuse->tx_power_index_A.ht40_base,
+	       sizeof(efuse->tx_power_index_A.ht40_base));
+	memcpy(priv->ht40_1s_tx_power_index_B,
+	       efuse->tx_power_index_B.ht40_base,
+	       sizeof(efuse->tx_power_index_B.ht40_base));
+
+	priv->ht20_tx_power_diff[0].a =
+		efuse->tx_power_index_A.ht20_ofdm_1s_diff.b;
+	priv->ht20_tx_power_diff[0].b =
+		efuse->tx_power_index_B.ht20_ofdm_1s_diff.b;
+
+	priv->ht40_tx_power_diff[0].a = 0;
+	priv->ht40_tx_power_diff[0].b = 0;
+
+	for (i = 1; i < RTL8723B_TX_COUNT; i++) {
+		priv->ofdm_tx_power_diff[i].a =
+			efuse->tx_power_index_A.pwr_diff[i - 1].ofdm;
+		priv->ofdm_tx_power_diff[i].b =
+			efuse->tx_power_index_B.pwr_diff[i - 1].ofdm;
+
+		priv->ht20_tx_power_diff[i].a =
+			efuse->tx_power_index_A.pwr_diff[i - 1].ht20;
+		priv->ht20_tx_power_diff[i].b =
+			efuse->tx_power_index_B.pwr_diff[i - 1].ht20;
+
+		priv->ht40_tx_power_diff[i].a =
+			efuse->tx_power_index_A.pwr_diff[i - 1].ht40;
+		priv->ht40_tx_power_diff[i].b =
+			efuse->tx_power_index_B.pwr_diff[i - 1].ht40;
+	}
+
 	priv->has_xtalk = 1;
 	priv->xtalk = priv->efuse_wifi.efuse8192eu.xtal_k & 0x3f;
 
@@ -2562,10 +3238,6 @@
 				 raw[i + 6], raw[i + 7]);
 		}
 	}
-	/*
-	 * Temporarily disable 8192eu support
-	 */
-	return -EINVAL;
 	return 0;
 }
 
@@ -2814,7 +3486,7 @@
 	/*
 	 * Init H2C command
 	 */
-	if (priv->rtlchip == 0x8723b)
+	if (priv->rtl_chip == RTL8723B)
 		rtl8xxxu_write8(priv, REG_HMTFR, 0x0f);
 exit:
 	return ret;
@@ -2997,7 +3669,7 @@
 
 	if (!priv->vendor_umc)
 		fw_name = "rtlwifi/rtl8192cufw_TMSC.bin";
-	else if (priv->chip_cut || priv->rtlchip == 0x8192c)
+	else if (priv->chip_cut || priv->rtl_chip == RTL8192C)
 		fw_name = "rtlwifi/rtl8192cufw_B.bin";
 	else
 		fw_name = "rtlwifi/rtl8192cufw_A.bin";
@@ -3052,9 +3724,9 @@
 {
 	u32 val32;
 
-	val32 = rtl8xxxu_read32(priv, 0x64);
+	val32 = rtl8xxxu_read32(priv, REG_PAD_CTRL1);
 	val32 &= ~(BIT(20) | BIT(24));
-	rtl8xxxu_write32(priv, 0x64, val32);
+	rtl8xxxu_write32(priv, REG_PAD_CTRL1, val32);
 
 	val32 = rtl8xxxu_read32(priv, REG_GPIO_MUXCFG);
 	val32 &= ~BIT(4);
@@ -3087,8 +3759,9 @@
 }
 
 static int
-rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv, struct rtl8xxxu_reg8val *array)
+rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv)
 {
+	struct rtl8xxxu_reg8val *array = priv->fops->mactable;
 	int i, ret;
 	u16 reg;
 	u8 val;
@@ -3103,12 +3776,13 @@
 		ret = rtl8xxxu_write8(priv, reg, val);
 		if (ret != 1) {
 			dev_warn(&priv->udev->dev,
-				 "Failed to initialize MAC\n");
+				 "Failed to initialize MAC "
+				 "(reg: %04x, val %02x)\n", reg, val);
 			return -EAGAIN;
 		}
 	}
 
-	if (priv->rtlchip != 0x8723b)
+	if (priv->rtl_chip != RTL8723B && priv->rtl_chip != RTL8192E)
 		rtl8xxxu_write8(priv, REG_MAX_AGGR_NUM, 0x0a);
 
 	return 0;
@@ -3140,50 +3814,30 @@
 	return 0;
 }
 
-/*
- * Most of this is black magic retrieved from the old rtl8723au driver
- */
-static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
+static void rtl8xxxu_gen1_init_phy_bb(struct rtl8xxxu_priv *priv)
 {
 	u8 val8, ldoa15, ldov12d, lpldo, ldohci12;
 	u16 val16;
 	u32 val32;
 
-	/*
-	 * Todo: The vendor driver maintains a table of PHY register
-	 *       addresses, which is initialized here. Do we need this?
-	 */
+	val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL);
+	udelay(2);
+	val8 |= AFE_PLL_320_ENABLE;
+	rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8);
+	udelay(2);
 
-	if (priv->rtlchip == 0x8723b) {
-		val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
-		val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB |
-			SYS_FUNC_DIO_RF;
-		rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+	rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL + 1, 0xff);
+	udelay(2);
 
-		rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00);
-	} else {
-		val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL);
-		udelay(2);
-		val8 |= AFE_PLL_320_ENABLE;
-		rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8);
-		udelay(2);
+	val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+	val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB;
+	rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
 
-		rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL + 1, 0xff);
-		udelay(2);
-
-		val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
-		val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB;
-		rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
-	}
-
-	if (priv->rtlchip != 0x8723b) {
-		/* AFE_XTAL_RF_GATE (bit 14) if addressing as 32 bit register */
-		val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL);
-		val32 &= ~AFE_XTAL_RF_GATE;
-		if (priv->has_bluetooth)
-			val32 &= ~AFE_XTAL_BT_GATE;
-		rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, val32);
-	}
+	val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL);
+	val32 &= ~AFE_XTAL_RF_GATE;
+	if (priv->has_bluetooth)
+		val32 &= ~AFE_XTAL_BT_GATE;
+	rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, val32);
 
 	/* 6. 0x1f[7:0] = 0x07 */
 	val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB;
@@ -3193,21 +3847,86 @@
 		rtl8xxxu_init_phy_regs(priv, rtl8188ru_phy_1t_highpa_table);
 	else if (priv->tx_paths == 2)
 		rtl8xxxu_init_phy_regs(priv, rtl8192cu_phy_2t_init_table);
-	else if (priv->rtlchip == 0x8723b) {
-		/*
-		 * Why?
-		 */
-		rtl8xxxu_write8(priv, REG_SYS_FUNC, 0xe3);
-		rtl8xxxu_write8(priv, REG_AFE_XTAL_CTRL + 1, 0x80);
-		rtl8xxxu_init_phy_regs(priv, rtl8723b_phy_1t_init_table);
-	} else
+	else
 		rtl8xxxu_init_phy_regs(priv, rtl8723a_phy_1t_init_table);
 
-
-	if (priv->rtlchip == 0x8188c && priv->hi_pa &&
+	if (priv->rtl_chip == RTL8188R && priv->hi_pa &&
 	    priv->vendor_umc && priv->chip_cut == 1)
 		rtl8xxxu_write8(priv, REG_OFDM0_AGC_PARM1 + 2, 0x50);
 
+	if (priv->hi_pa)
+		rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_highpa_table);
+	else
+		rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_standard_table);
+
+	ldoa15 = LDOA15_ENABLE | LDOA15_OBUF;
+	ldov12d = LDOV12D_ENABLE | BIT(2) | (2 << LDOV12D_VADJ_SHIFT);
+	ldohci12 = 0x57;
+	lpldo = 1;
+	val32 = (lpldo << 24) | (ldohci12 << 16) | (ldov12d << 8) | ldoa15;
+	rtl8xxxu_write32(priv, REG_LDOA15_CTRL, val32);
+}
+
+static void rtl8723bu_init_phy_bb(struct rtl8xxxu_priv *priv)
+{
+	u8 val8;
+	u16 val16;
+
+	val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+	val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB | SYS_FUNC_DIO_RF;
+	rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+	rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00);
+
+	/* 6. 0x1f[7:0] = 0x07 */
+	val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB;
+	rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
+
+	/* Why? */
+	rtl8xxxu_write8(priv, REG_SYS_FUNC, 0xe3);
+	rtl8xxxu_write8(priv, REG_AFE_XTAL_CTRL + 1, 0x80);
+	rtl8xxxu_init_phy_regs(priv, rtl8723b_phy_1t_init_table);
+
+	rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8723bu_table);
+}
+
+static void rtl8192eu_init_phy_bb(struct rtl8xxxu_priv *priv)
+{
+	u8 val8;
+	u16 val16;
+
+	val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+	val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB | SYS_FUNC_DIO_RF;
+	rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+	/* 6. 0x1f[7:0] = 0x07 */
+	val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB;
+	rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
+
+	val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+	val16 |= (SYS_FUNC_USBA | SYS_FUNC_USBD | SYS_FUNC_DIO_RF |
+		  SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB);
+	rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+	val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB;
+	rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
+	rtl8xxxu_init_phy_regs(priv, rtl8192eu_phy_init_table);
+
+	if (priv->hi_pa)
+		rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8192eu_highpa_table);
+	else
+		rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8192eu_std_table);
+}
+
+/*
+ * Most of this is black magic retrieved from the old rtl8723au driver
+ */
+static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
+{
+	u8 val8;
+	u32 val32;
+
+	priv->fops->init_phy_bb(priv);
+
 	if (priv->tx_paths == 1 && priv->rx_paths == 2) {
 		/*
 		 * For 1T2R boards, patch the registers.
@@ -3225,8 +3944,10 @@
 		rtl8xxxu_write32(priv, REG_FPGA1_TX_INFO, val32);
 
 		val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING);
-		val32 &= 0xff000000;
-		val32 |= 0x45000000;
+		val32 &= ~CCK0_AFE_RX_MASK;
+		val32 &= 0x00ffffff;
+		val32 |= 0x40000000;
+		val32 |= CCK0_AFE_RX_ANT_B;
 		rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32);
 
 		val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
@@ -3266,13 +3987,6 @@
 		rtl8xxxu_write32(priv, REG_TX_TO_TX, val32);
 	}
 
-	if (priv->rtlchip == 0x8723b)
-		rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8723bu_table);
-	else if (priv->hi_pa)
-		rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_highpa_table);
-	else
-		rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_standard_table);
-
 	if (priv->has_xtalk) {
 		val32 = rtl8xxxu_read32(priv, REG_MAC_PHY_CTRL);
 
@@ -3283,16 +3997,8 @@
 		rtl8xxxu_write32(priv, REG_MAC_PHY_CTRL, val32);
 	}
 
-	if (priv->rtlchip != 0x8723bu) {
-		ldoa15 = LDOA15_ENABLE | LDOA15_OBUF;
-		ldov12d = LDOV12D_ENABLE | BIT(2) | (2 << LDOV12D_VADJ_SHIFT);
-		ldohci12 = 0x57;
-		lpldo = 1;
-		val32 = (lpldo << 24) | (ldohci12 << 16) |
-			(ldov12d << 8) | ldoa15;
-
-		rtl8xxxu_write32(priv, REG_LDOA15_CTRL, val32);
-	}
+	if (priv->rtl_chip == RTL8192E)
+		rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x000f81fb);
 
 	return 0;
 }
@@ -3410,6 +4116,77 @@
 	return 0;
 }
 
+static int rtl8723au_init_phy_rf(struct rtl8xxxu_priv *priv)
+{
+	int ret;
+
+	ret = rtl8xxxu_init_phy_rf(priv, rtl8723au_radioa_1t_init_table, RF_A);
+
+	/* Reduce 80M spur */
+	rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x0381808d);
+	rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
+	rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff82);
+	rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
+
+	return ret;
+}
+
+static int rtl8723bu_init_phy_rf(struct rtl8xxxu_priv *priv)
+{
+	int ret;
+
+	ret = rtl8xxxu_init_phy_rf(priv, rtl8723bu_radioa_1t_init_table, RF_A);
+	/*
+	 * PHY LCK
+	 */
+	rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdfbe0);
+	rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, 0x8c01);
+	msleep(200);
+	rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdffe0);
+
+	return ret;
+}
+
+#ifdef CONFIG_RTL8XXXU_UNTESTED
+static int rtl8192cu_init_phy_rf(struct rtl8xxxu_priv *priv)
+{
+	struct rtl8xxxu_rfregval *rftable;
+	int ret;
+
+	if (priv->rtl_chip == RTL8188R) {
+		rftable = rtl8188ru_radioa_1t_highpa_table;
+		ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+	} else if (priv->rf_paths == 1) {
+		rftable = rtl8192cu_radioa_1t_init_table;
+		ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+	} else {
+		rftable = rtl8192cu_radioa_2t_init_table;
+		ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+		if (ret)
+			goto exit;
+		rftable = rtl8192cu_radiob_2t_init_table;
+		ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_B);
+	}
+
+exit:
+	return ret;
+}
+#endif
+
+static int rtl8192eu_init_phy_rf(struct rtl8xxxu_priv *priv)
+{
+	int ret;
+
+	ret = rtl8xxxu_init_phy_rf(priv, rtl8192eu_radioa_init_table, RF_A);
+	if (ret)
+		goto exit;
+
+	ret = rtl8xxxu_init_phy_rf(priv, rtl8192eu_radiob_init_table, RF_B);
+
+exit:
+	return ret;
+}
+
 static int rtl8xxxu_llt_write(struct rtl8xxxu_priv *priv, u8 address, u8 data)
 {
 	int ret = -EBUSY;
@@ -3818,8 +4595,8 @@
 	return false;
 }
 
-static bool rtl8723bu_simularity_compare(struct rtl8xxxu_priv *priv,
-					 int result[][8], int c1, int c2)
+static bool rtl8xxxu_gen2_simularity_compare(struct rtl8xxxu_priv *priv,
+					     int result[][8], int c1, int c2)
 {
 	u32 i, j, diff, simubitmap, bound = 0;
 	int candidate[2] = {-1, -1};	/* for path A and path B */
@@ -4389,21 +5166,194 @@
 	return result;
 }
 
-#ifdef RTL8723BU_PATH_B
-static int rtl8723bu_iqk_path_b(struct rtl8xxxu_priv *priv)
+static int rtl8192eu_iqk_path_a(struct rtl8xxxu_priv *priv)
 {
-	u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc, path_sel;
+	u32 reg_eac, reg_e94, reg_e9c;
 	int result = 0;
 
-	path_sel = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH);
+	/*
+	 * TX IQK
+	 * PA/PAD controlled by 0x0
+	 */
+	rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+	rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x00180);
+	rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
 
-	val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
-	val32 &= 0x000000ff;
-	rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+	/* Path A IQK setting */
+	rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c);
+	rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+	rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
+	rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
 
-	/* One shot, path B LOK & IQK */
-	rtl8xxxu_write32(priv, REG_IQK_AGC_CONT, 0x00000002);
-	rtl8xxxu_write32(priv, REG_IQK_AGC_CONT, 0x00000000);
+	rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82140303);
+	rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x68160000);
+
+	/* LO calibration setting */
+	rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x00462911);
+
+	/* One shot, path A LOK & IQK */
+	rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
+	rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+	mdelay(10);
+
+	/* Check failed */
+	reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+	reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
+	reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
+
+	if (!(reg_eac & BIT(28)) &&
+	    ((reg_e94 & 0x03ff0000) != 0x01420000) &&
+	    ((reg_e9c & 0x03ff0000) != 0x00420000))
+		result |= 0x01;
+
+	return result;
+}
+
+static int rtl8192eu_rx_iqk_path_a(struct rtl8xxxu_priv *priv)
+{
+	u32 reg_ea4, reg_eac, reg_e94, reg_e9c, val32;
+	int result = 0;
+
+	/* Leave IQK mode */
+	rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00);
+
+	/* Enable path A PA in TX IQK mode */
+	rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, 0x800a0);
+	rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
+	rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0000f);
+	rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf117b);
+
+	/* PA/PAD control by 0x56, and set = 0x0 */
+	rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x00980);
+	rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_56, 0x51000);
+
+	/* Enter IQK mode */
+	rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+	/* TX IQK setting */
+	rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+	rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+	/* path-A IQK setting */
+	rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c);
+	rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+	rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
+	rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
+
+	rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160c1f);
+	rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x68160c1f);
+
+	/* LO calibration setting */
+	rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911);
+
+	/* One shot, path A LOK & IQK */
+	rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa000000);
+	rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+	mdelay(10);
+
+	/* Check failed */
+	reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+	reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
+	reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
+
+	if (!(reg_eac & BIT(28)) &&
+	    ((reg_e94 & 0x03ff0000) != 0x01420000) &&
+	    ((reg_e9c & 0x03ff0000) != 0x00420000)) {
+		result |= 0x01;
+	} else {
+		/* PA/PAD controlled by 0x0 */
+		rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+		rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x180);
+		goto out;
+	}
+
+	val32 = 0x80007c00 |
+		(reg_e94 & 0x03ff0000) | ((reg_e9c >> 16) & 0x03ff);
+	rtl8xxxu_write32(priv, REG_TX_IQK, val32);
+
+	/* Modify RX IQK mode table */
+	rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+
+	rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, 0x800a0);
+	rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
+	rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0000f);
+	rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7ffa);
+
+	/* PA/PAD control by 0x56, and set = 0x0 */
+	rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x00980);
+	rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_56, 0x51000);
+
+	/* Enter IQK mode */
+	rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+	/* IQK setting */
+	rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+	/* Path A IQK setting */
+	rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c);
+	rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x18008c1c);
+	rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
+	rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
+
+	rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160c1f);
+	rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28160c1f);
+
+	/* LO calibration setting */
+	rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a891);
+
+	/* One shot, path A LOK & IQK */
+	rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa000000);
+	rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+	mdelay(10);
+
+	reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+	reg_ea4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2);
+
+	rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+	rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x180);
+
+	if (!(reg_eac & BIT(27)) &&
+	    ((reg_ea4 & 0x03ff0000) != 0x01320000) &&
+	    ((reg_eac & 0x03ff0000) != 0x00360000))
+		result |= 0x02;
+	else
+		dev_warn(&priv->udev->dev, "%s: Path A RX IQK failed!\n",
+			 __func__);
+
+out:
+	return result;
+}
+
+static int rtl8192eu_iqk_path_b(struct rtl8xxxu_priv *priv)
+{
+	u32 reg_eac, reg_eb4, reg_ebc;
+	int result = 0;
+
+	rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+	rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_DF, 0x00180);
+	rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+	rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+	rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+	/* Path B IQK setting */
+	rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c);
+	rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+	rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x18008c1c);
+	rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
+
+	rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x821403e2);
+	rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x68160000);
+
+	/* LO calibration setting */
+	rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x00492911);
+
+	/* One shot, path A LOK & IQK */
+	rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa000000);
+	rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
 
 	mdelay(1);
 
@@ -4411,27 +5361,138 @@
 	reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
 	reg_eb4 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
 	reg_ebc = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
-	reg_ec4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_B_2);
-	reg_ecc = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_B_2);
 
 	if (!(reg_eac & BIT(31)) &&
 	    ((reg_eb4 & 0x03ff0000) != 0x01420000) &&
 	    ((reg_ebc & 0x03ff0000) != 0x00420000))
 		result |= 0x01;
 	else
+		dev_warn(&priv->udev->dev, "%s: Path B IQK failed!\n",
+			 __func__);
+
+	return result;
+}
+
+static int rtl8192eu_rx_iqk_path_b(struct rtl8xxxu_priv *priv)
+{
+	u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc, val32;
+	int result = 0;
+
+	/* Leave IQK mode */
+	rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+
+	/* Enable path A PA in TX IQK mode */
+	rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_WE_LUT, 0x800a0);
+	rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_RCK_OS, 0x30000);
+	rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_TXPA_G1, 0x0000f);
+	rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_TXPA_G2, 0xf117b);
+
+	/* PA/PAD control by 0x56, and set = 0x0 */
+	rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_DF, 0x00980);
+	rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_56, 0x51000);
+
+	/* Enter IQK mode */
+	rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+	/* TX IQK setting */
+	rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+	rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+	/* path-A IQK setting */
+	rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c);
+	rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+	rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x18008c1c);
+	rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
+
+	rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82160c1f);
+	rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x68160c1f);
+
+	/* LO calibration setting */
+	rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911);
+
+	/* One shot, path A LOK & IQK */
+	rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa000000);
+	rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+	mdelay(10);
+
+	/* Check failed */
+	reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+	reg_eb4 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+	reg_ebc = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+
+	if (!(reg_eac & BIT(31)) &&
+	    ((reg_eb4 & 0x03ff0000) != 0x01420000) &&
+	    ((reg_ebc & 0x03ff0000) != 0x00420000)) {
+		result |= 0x01;
+	} else {
+		/*
+		 * PA/PAD controlled by 0x0
+		 * Vendor driver restores RF_A here which I believe is a bug
+		 */
+		rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+		rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_DF, 0x180);
 		goto out;
+	}
+
+	val32 = 0x80007c00 |
+		(reg_eb4 & 0x03ff0000) | ((reg_ebc >> 16) & 0x03ff);
+	rtl8xxxu_write32(priv, REG_TX_IQK, val32);
+
+	/* Modify RX IQK mode table */
+	rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+
+	rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_WE_LUT, 0x800a0);
+	rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_RCK_OS, 0x30000);
+	rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_TXPA_G1, 0x0000f);
+	rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_TXPA_G2, 0xf7ffa);
+
+	/* PA/PAD control by 0x56, and set = 0x0 */
+	rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_DF, 0x00980);
+	rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_56, 0x51000);
+
+	/* Enter IQK mode */
+	rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+	/* IQK setting */
+	rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+	/* Path A IQK setting */
+	rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c);
+	rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+	rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
+	rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x18008c1c);
+
+	rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160c1f);
+	rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28160c1f);
+
+	/* LO calibration setting */
+	rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a891);
+
+	/* One shot, path A LOK & IQK */
+	rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa000000);
+	rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+	mdelay(10);
+
+	reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+	reg_ec4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_B_2);
+	reg_ecc = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_B_2);
+
+	rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+	rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_DF, 0x180);
 
 	if (!(reg_eac & BIT(30)) &&
-	    (((reg_ec4 & 0x03ff0000) >> 16) != 0x132) &&
-	    (((reg_ecc & 0x03ff0000) >> 16) != 0x36))
+	    ((reg_ec4 & 0x03ff0000) != 0x01320000) &&
+	    ((reg_ecc & 0x03ff0000) != 0x00360000))
 		result |= 0x02;
 	else
 		dev_warn(&priv->udev->dev, "%s: Path B RX IQK failed!\n",
 			 __func__);
+
 out:
 	return result;
 }
-#endif
 
 static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
 				     int result[][8], int t)
@@ -4497,9 +5558,12 @@
 	rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4);
 	rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22204000);
 
-	val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_SW_CTRL);
-	val32 |= (FPGA0_RF_PAPE | (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT));
-	rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
+	if (!priv->no_pape) {
+		val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_SW_CTRL);
+		val32 |= (FPGA0_RF_PAPE |
+			  (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT));
+		rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
+	}
 
 	val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_RF_INT_OE);
 	val32 &= ~BIT(10);
@@ -4692,20 +5756,6 @@
 	rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4);
 	rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22204000);
 
-#ifdef RTL8723BU_PATH_B
-	/* Set RF mode to standby Path B */
-	if (priv->tx_paths > 1)
-		rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC, 0x10000);
-#endif
-
-#if 0
-	/* Page B init */
-	rtl8xxxu_write32(priv, REG_CONFIG_ANT_A, 0x0f600000);
-
-	if (priv->tx_paths > 1)
-		rtl8xxxu_write32(priv, REG_CONFIG_ANT_B, 0x0f600000);
-#endif
-
 	/*
 	 * RX IQ calibration setting for 8723B D cut large current issue
 	 * when leaving IPS
@@ -4735,12 +5785,6 @@
 			val32 &= 0x000000ff;
 			rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
 
-#if 0 /* Only needed in restore case, we may need this when going to suspend */
-			priv->RFCalibrateInfo.TxLOK[RF_A] =
-				rtl8xxxu_read_rfreg(priv, RF_A,
-						    RF6052_REG_TXM_IDAC);
-#endif
-
 			val32 = rtl8xxxu_read32(priv,
 						REG_TX_POWER_BEFORE_IQK_A);
 			result[t][0] = (val32 >> 16) & 0x3ff;
@@ -4863,6 +5907,192 @@
 	}
 }
 
+static void rtl8192eu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
+				      int result[][8], int t)
+{
+	struct device *dev = &priv->udev->dev;
+	u32 i, val32;
+	int path_a_ok, path_b_ok;
+	int retry = 2;
+	const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
+		REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
+		REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
+		REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
+		REG_TX_OFDM_BBON, REG_TX_TO_RX,
+		REG_TX_TO_TX, REG_RX_CCK,
+		REG_RX_OFDM, REG_RX_WAIT_RIFS,
+		REG_RX_TO_RX, REG_STANDBY,
+		REG_SLEEP, REG_PMPD_ANAEN
+	};
+	const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
+		REG_TXPAUSE, REG_BEACON_CTRL,
+		REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
+	};
+	const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
+		REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
+		REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
+		REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
+		REG_FPGA0_XB_RF_INT_OE, REG_CCK0_AFE_SETTING
+	};
+	u8 xa_agc = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1) & 0xff;
+	u8 xb_agc = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1) & 0xff;
+
+	/*
+	 * Note: IQ calibration must be performed after loading
+	 *       PHY_REG.txt , and radio_a, radio_b.txt
+	 */
+
+	if (t == 0) {
+		/* Save ADDA parameters, turn Path A ADDA on */
+		rtl8xxxu_save_regs(priv, adda_regs, priv->adda_backup,
+				   RTL8XXXU_ADDA_REGS);
+		rtl8xxxu_save_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+		rtl8xxxu_save_regs(priv, iqk_bb_regs,
+				   priv->bb_backup, RTL8XXXU_BB_REGS);
+	}
+
+	rtl8xxxu_path_adda_on(priv, adda_regs, true);
+
+	/* MAC settings */
+	rtl8xxxu_mac_calibration(priv, iqk_mac_regs, priv->mac_backup);
+
+	val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING);
+	val32 |= 0x0f000000;
+	rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32);
+
+	rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x03a05600);
+	rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4);
+	rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22208200);
+
+	val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_SW_CTRL);
+	val32 |= (FPGA0_RF_PAPE | (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT));
+	rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_RF_INT_OE);
+	val32 |= BIT(10);
+	rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, val32);
+	val32 = rtl8xxxu_read32(priv, REG_FPGA0_XB_RF_INT_OE);
+	val32 |= BIT(10);
+	rtl8xxxu_write32(priv, REG_FPGA0_XB_RF_INT_OE, val32);
+
+	rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+	rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+	rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+	for (i = 0; i < retry; i++) {
+		path_a_ok = rtl8192eu_iqk_path_a(priv);
+		if (path_a_ok == 0x01) {
+			val32 = rtl8xxxu_read32(priv,
+						REG_TX_POWER_BEFORE_IQK_A);
+			result[t][0] = (val32 >> 16) & 0x3ff;
+			val32 = rtl8xxxu_read32(priv,
+						REG_TX_POWER_AFTER_IQK_A);
+			result[t][1] = (val32 >> 16) & 0x3ff;
+
+			break;
+		}
+	}
+
+	if (!path_a_ok)
+		dev_dbg(dev, "%s: Path A TX IQK failed!\n", __func__);
+
+	for (i = 0; i < retry; i++) {
+		path_a_ok = rtl8192eu_rx_iqk_path_a(priv);
+		if (path_a_ok == 0x03) {
+			val32 = rtl8xxxu_read32(priv,
+						REG_RX_POWER_BEFORE_IQK_A_2);
+			result[t][2] = (val32 >> 16) & 0x3ff;
+			val32 = rtl8xxxu_read32(priv,
+						REG_RX_POWER_AFTER_IQK_A_2);
+			result[t][3] = (val32 >> 16) & 0x3ff;
+
+			break;
+		}
+	}
+
+	if (!path_a_ok)
+		dev_dbg(dev, "%s: Path A RX IQK failed!\n", __func__);
+
+	if (priv->rf_paths > 1) {
+		/* Path A into standby */
+		rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+		rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0x10000);
+		rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+		/* Turn Path B ADDA on */
+		rtl8xxxu_path_adda_on(priv, adda_regs, false);
+
+		rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+		rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+		rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+		for (i = 0; i < retry; i++) {
+			path_b_ok = rtl8192eu_iqk_path_b(priv);
+			if (path_b_ok == 0x01) {
+				val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+				result[t][4] = (val32 >> 16) & 0x3ff;
+				val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+				result[t][5] = (val32 >> 16) & 0x3ff;
+				break;
+			}
+		}
+
+		if (!path_b_ok)
+			dev_dbg(dev, "%s: Path B IQK failed!\n", __func__);
+
+		for (i = 0; i < retry; i++) {
+			path_b_ok = rtl8192eu_rx_iqk_path_b(priv);
+			if (path_a_ok == 0x03) {
+				val32 = rtl8xxxu_read32(priv,
+							REG_RX_POWER_BEFORE_IQK_B_2);
+				result[t][6] = (val32 >> 16) & 0x3ff;
+				val32 = rtl8xxxu_read32(priv,
+							REG_RX_POWER_AFTER_IQK_B_2);
+				result[t][7] = (val32 >> 16) & 0x3ff;
+				break;
+			}
+		}
+
+		if (!path_b_ok)
+			dev_dbg(dev, "%s: Path B RX IQK failed!\n", __func__);
+	}
+
+	/* Back to BB mode, load original value */
+	rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+
+	if (t) {
+		/* Reload ADDA power saving parameters */
+		rtl8xxxu_restore_regs(priv, adda_regs, priv->adda_backup,
+				      RTL8XXXU_ADDA_REGS);
+
+		/* Reload MAC parameters */
+		rtl8xxxu_restore_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+
+		/* Reload BB parameters */
+		rtl8xxxu_restore_regs(priv, iqk_bb_regs,
+				      priv->bb_backup, RTL8XXXU_BB_REGS);
+
+		/* Restore RX initial gain */
+		val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1);
+		val32 &= 0xffffff00;
+		rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | 0x50);
+		rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | xa_agc);
+
+		if (priv->rf_paths > 1) {
+			val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1);
+			val32 &= 0xffffff00;
+			rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1,
+					 val32 | 0x50);
+			rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1,
+					 val32 | xb_agc);
+		}
+
+		/* Load 0xe30 IQC default value */
+		rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x01008c00);
+		rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x01008c00);
+	}
+}
+
 static void rtl8xxxu_prepare_calibrate(struct rtl8xxxu_priv *priv, u8 start)
 {
 	struct h2c_cmd h2c;
@@ -4877,7 +6107,7 @@
 	rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_wlan_calibration));
 }
 
-static void rtl8723au_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
+static void rtl8xxxu_gen1_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
 {
 	struct device *dev = &priv->udev->dev;
 	int result[4][8];	/* last is final result */
@@ -4975,7 +6205,7 @@
 		rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result,
 					   candidate, (reg_ec4 == 0));
 
-	rtl8xxxu_save_regs(priv, rtl8723au_iqk_phy_iq_bb_reg,
+	rtl8xxxu_save_regs(priv, rtl8xxxu_iqk_phy_iq_bb_reg,
 			   priv->bb_recovery_backup, RTL8XXXU_BB_REGS);
 
 	rtl8xxxu_prepare_calibrate(priv, 0);
@@ -5007,7 +6237,8 @@
 		rtl8723bu_phy_iqcalibrate(priv, result, i);
 
 		if (i == 1) {
-			simu = rtl8723bu_simularity_compare(priv, result, 0, 1);
+			simu = rtl8xxxu_gen2_simularity_compare(priv,
+								result, 0, 1);
 			if (simu) {
 				candidate = 0;
 				break;
@@ -5015,13 +6246,15 @@
 		}
 
 		if (i == 2) {
-			simu = rtl8723bu_simularity_compare(priv, result, 0, 2);
+			simu = rtl8xxxu_gen2_simularity_compare(priv,
+								result, 0, 2);
 			if (simu) {
 				candidate = 0;
 				break;
 			}
 
-			simu = rtl8723bu_simularity_compare(priv, result, 1, 2);
+			simu = rtl8xxxu_gen2_simularity_compare(priv,
+								result, 1, 2);
 			if (simu) {
 				candidate = 1;
 			} else {
@@ -5080,7 +6313,7 @@
 		rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result,
 					   candidate, (reg_ec4 == 0));
 
-	rtl8xxxu_save_regs(priv, rtl8723au_iqk_phy_iq_bb_reg,
+	rtl8xxxu_save_regs(priv, rtl8xxxu_iqk_phy_iq_bb_reg,
 			   priv->bb_recovery_backup, RTL8XXXU_BB_REGS);
 
 	rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, bt_control);
@@ -5096,18 +6329,105 @@
 	rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED, val32);
 	rtl8xxxu_write_rfreg(priv, RF_A, 0x43, 0x300bd);
 
-	if (priv->rf_paths > 1) {
-		dev_dbg(dev, "%s: beware 2T not yet supported\n", __func__);
-#ifdef RTL8723BU_PATH_B
-		if (RF_Path == 0x0)	//S1
-			ODM_SetIQCbyRFpath(pDM_Odm, 0);
-		else	//S0
-			ODM_SetIQCbyRFpath(pDM_Odm, 1);
-#endif
-	}
+	if (priv->rf_paths > 1)
+		dev_dbg(dev, "%s: 8723BU 2T not supported\n", __func__);
+
 	rtl8xxxu_prepare_calibrate(priv, 0);
 }
 
+static void rtl8192eu_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
+{
+	struct device *dev = &priv->udev->dev;
+	int result[4][8];	/* last is final result */
+	int i, candidate;
+	bool path_a_ok, path_b_ok;
+	u32 reg_e94, reg_e9c, reg_ea4, reg_eac;
+	u32 reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+	bool simu;
+
+	memset(result, 0, sizeof(result));
+	candidate = -1;
+
+	path_a_ok = false;
+	path_b_ok = false;
+
+	for (i = 0; i < 3; i++) {
+		rtl8192eu_phy_iqcalibrate(priv, result, i);
+
+		if (i == 1) {
+			simu = rtl8xxxu_gen2_simularity_compare(priv,
+								result, 0, 1);
+			if (simu) {
+				candidate = 0;
+				break;
+			}
+		}
+
+		if (i == 2) {
+			simu = rtl8xxxu_gen2_simularity_compare(priv,
+								result, 0, 2);
+			if (simu) {
+				candidate = 0;
+				break;
+			}
+
+			simu = rtl8xxxu_gen2_simularity_compare(priv,
+								result, 1, 2);
+			if (simu)
+				candidate = 1;
+			else
+				candidate = 3;
+		}
+	}
+
+	for (i = 0; i < 4; i++) {
+		reg_e94 = result[i][0];
+		reg_e9c = result[i][1];
+		reg_ea4 = result[i][2];
+		reg_eac = result[i][3];
+		reg_eb4 = result[i][4];
+		reg_ebc = result[i][5];
+		reg_ec4 = result[i][6];
+		reg_ecc = result[i][7];
+	}
+
+	if (candidate >= 0) {
+		reg_e94 = result[candidate][0];
+		priv->rege94 =  reg_e94;
+		reg_e9c = result[candidate][1];
+		priv->rege9c = reg_e9c;
+		reg_ea4 = result[candidate][2];
+		reg_eac = result[candidate][3];
+		reg_eb4 = result[candidate][4];
+		priv->regeb4 = reg_eb4;
+		reg_ebc = result[candidate][5];
+		priv->regebc = reg_ebc;
+		reg_ec4 = result[candidate][6];
+		reg_ecc = result[candidate][7];
+		dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate);
+		dev_dbg(dev,
+			"%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x "
+			"ecc=%x\n ", __func__, reg_e94, reg_e9c,
+			reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc);
+		path_a_ok = true;
+		path_b_ok = true;
+	} else {
+		reg_e94 = reg_eb4 = priv->rege94 = priv->regeb4 = 0x100;
+		reg_e9c = reg_ebc = priv->rege9c = priv->regebc = 0x0;
+	}
+
+	if (reg_e94 && candidate >= 0)
+		rtl8xxxu_fill_iqk_matrix_a(priv, path_a_ok, result,
+					   candidate, (reg_ea4 == 0));
+
+	if (priv->rf_paths > 1)
+		rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result,
+					   candidate, (reg_ec4 == 0));
+
+	rtl8xxxu_save_regs(priv, rtl8xxxu_iqk_phy_iq_bb_reg,
+			   priv->bb_recovery_backup, RTL8XXXU_BB_REGS);
+}
+
 static void rtl8723a_phy_lc_calibrate(struct rtl8xxxu_priv *priv)
 {
 	u32 val32;
@@ -5231,7 +6551,7 @@
 static int rtl8xxxu_active_to_emu(struct rtl8xxxu_priv *priv)
 {
 	u8 val8;
-	int count, ret;
+	int count, ret = 0;
 
 	/* Start of rtl8723AU_card_enable_flow */
 	/* Act to Cardemu sequence*/
@@ -5281,7 +6601,7 @@
 	u8 val8;
 	u16 val16;
 	u32 val32;
-	int count, ret;
+	int count, ret = 0;
 
 	/* Turn off RF */
 	rtl8xxxu_write8(priv, REG_RF_CTRL, 0);
@@ -5292,9 +6612,9 @@
 	rtl8xxxu_write16(priv, REG_GPIO_INTM, val16);
 
 	/* Release WLON reset 0x04[16]= 1*/
-	val32 = rtl8xxxu_read32(priv, REG_GPIO_INTM);
+	val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
 	val32 |= APS_FSMCO_WLON_RESET;
-	rtl8xxxu_write32(priv, REG_GPIO_INTM, val32);
+	rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
 
 	/* 0x0005[1] = 1 turn off MAC by HW state machine*/
 	val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
@@ -5338,7 +6658,7 @@
 {
 	u8 val8;
 	u8 val32;
-	int count, ret;
+	int count, ret = 0;
 
 	rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
 
@@ -5756,6 +7076,50 @@
 	return retval;
 }
 
+static void rtl8xxxu_gen1_usb_quirks(struct rtl8xxxu_priv *priv)
+{
+	/* Fix USB interface interference issue */
+	rtl8xxxu_write8(priv, 0xfe40, 0xe0);
+	rtl8xxxu_write8(priv, 0xfe41, 0x8d);
+	rtl8xxxu_write8(priv, 0xfe42, 0x80);
+	/*
+	 * This sets TXDMA_OFFSET_DROP_DATA_EN (bit 9) as well as bits
+	 * 8 and 5, for which I have found no documentation.
+	 */
+	rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, 0xfd0320);
+
+	/*
+	 * Solve too many protocol error on USB bus.
+	 * Can't do this for 8188/8192 UMC A cut parts
+	 */
+	if (!(!priv->chip_cut && priv->vendor_umc)) {
+		rtl8xxxu_write8(priv, 0xfe40, 0xe6);
+		rtl8xxxu_write8(priv, 0xfe41, 0x94);
+		rtl8xxxu_write8(priv, 0xfe42, 0x80);
+
+		rtl8xxxu_write8(priv, 0xfe40, 0xe0);
+		rtl8xxxu_write8(priv, 0xfe41, 0x19);
+		rtl8xxxu_write8(priv, 0xfe42, 0x80);
+
+		rtl8xxxu_write8(priv, 0xfe40, 0xe5);
+		rtl8xxxu_write8(priv, 0xfe41, 0x91);
+		rtl8xxxu_write8(priv, 0xfe42, 0x80);
+
+		rtl8xxxu_write8(priv, 0xfe40, 0xe2);
+		rtl8xxxu_write8(priv, 0xfe41, 0x81);
+		rtl8xxxu_write8(priv, 0xfe42, 0x80);
+	}
+}
+
+static void rtl8xxxu_gen2_usb_quirks(struct rtl8xxxu_priv *priv)
+{
+	u32 val32;
+
+	val32 = rtl8xxxu_read32(priv, REG_TXDMA_OFFSET_CHK);
+	val32 |= TXDMA_OFFSET_DROP_DATA_EN;
+	rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, val32);
+}
+
 static int rtl8723au_power_on(struct rtl8xxxu_priv *priv)
 {
 	u8 val8;
@@ -5952,10 +7316,12 @@
 		CR_SCHEDULE_ENABLE | CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE;
 	rtl8xxxu_write16(priv, REG_CR, val16);
 
+	rtl8xxxu_write8(priv, 0xfe10, 0x19);
+
 	/*
 	 * Workaround for 8188RU LNA power leakage problem.
 	 */
-	if (priv->rtlchip == 0x8188c && priv->hi_pa) {
+	if (priv->rtl_chip == RTL8188R) {
 		val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM);
 		val32 &= ~BIT(1);
 		rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32);
@@ -5965,6 +7331,41 @@
 
 #endif
 
+/*
+ * This is needed for 8723bu as well, presumable
+ */
+static void rtl8192e_crystal_afe_adjust(struct rtl8xxxu_priv *priv)
+{
+	u8 val8;
+	u32 val32;
+
+	/*
+	 * 40Mhz crystal source, MAC 0x28[2]=0
+	 */
+	val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL);
+	val8 &= 0xfb;
+	rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8);
+
+	val32 = rtl8xxxu_read32(priv, REG_AFE_CTRL4);
+	val32 &= 0xfffffc7f;
+	rtl8xxxu_write32(priv, REG_AFE_CTRL4, val32);
+
+	/*
+	 * 92e AFE parameter
+	 * AFE PLL KVCO selection, MAC 0x28[6]=1
+	 */
+	val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL);
+	val8 &= 0xbf;
+	rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8);
+
+	/*
+	 * AFE PLL KVCO selection, MAC 0x78[21]=0
+	 */
+	val32 = rtl8xxxu_read32(priv, REG_AFE_CTRL4);
+	val32 &= 0xffdfffff;
+	rtl8xxxu_write32(priv, REG_AFE_CTRL4, val32);
+}
+
 static int rtl8192eu_power_on(struct rtl8xxxu_priv *priv)
 {
 	u16 val16;
@@ -5987,6 +7388,10 @@
 		rtl8xxxu_write8(priv, REG_LDO_SW_CTRL, 0x83);
 	}
 
+	/*
+	 * Adjust AFE before enabling PLL
+	 */
+	rtl8192e_crystal_afe_adjust(priv);
 	rtl8192e_disabled_to_emu(priv);
 
 	ret = rtl8192e_emu_to_active(priv);
@@ -6020,7 +7425,7 @@
 	/*
 	 * Workaround for 8188RU LNA power leakage problem.
 	 */
-	if (priv->rtlchip == 0x8188c && priv->hi_pa) {
+	if (priv->rtl_chip == RTL8188R) {
 		val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM);
 		val32 |= BIT(1);
 		rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32);
@@ -6075,7 +7480,7 @@
 	val8 &= ~TX_REPORT_CTRL_TIMER_ENABLE;
 	rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
 
-	rtl8xxxu_write16(priv, REG_CR, 0x0000);
+	rtl8xxxu_write8(priv, REG_CR, 0x0000);
 
 	rtl8xxxu_active_to_lps(priv);
 
@@ -6092,7 +7497,15 @@
 	rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00);
 
 	rtl8723bu_active_to_emu(priv);
-	rtl8xxxu_emu_to_disabled(priv);
+
+	val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+	val8 |= BIT(3); /* APS_FSMCO_HW_SUSPEND */
+	rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+	/* 0x48[16] = 1 to enable GPIO9 as EXT wakeup */
+	val8 = rtl8xxxu_read8(priv, REG_GPIO_INTM + 2);
+	val8 |= BIT(0);
+	rtl8xxxu_write8(priv, REG_GPIO_INTM + 2, val8);
 }
 
 #ifdef NEED_PS_TDMA
@@ -6112,6 +7525,43 @@
 }
 #endif
 
+static void rtl8192e_enable_rf(struct rtl8xxxu_priv *priv)
+{
+	u32 val32;
+	u8 val8;
+
+	val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG);
+	val8 |= BIT(5);
+	rtl8xxxu_write8(priv, REG_GPIO_MUXCFG, val8);
+
+	/*
+	 * WLAN action by PTA
+	 */
+	rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x04);
+
+	val32 = rtl8xxxu_read32(priv, REG_PWR_DATA);
+	val32 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN;
+	rtl8xxxu_write32(priv, REG_PWR_DATA, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_RFE_BUFFER);
+	val32 |= (BIT(0) | BIT(1));
+	rtl8xxxu_write32(priv, REG_RFE_BUFFER, val32);
+
+	rtl8xxxu_write8(priv, REG_RFE_CTRL_ANTA_SRC, 0x77);
+
+	val32 = rtl8xxxu_read32(priv, REG_LEDCFG0);
+	val32 &= ~BIT(24);
+	val32 |= BIT(23);
+	rtl8xxxu_write32(priv, REG_LEDCFG0, val32);
+
+	/*
+	 * Fix external switch Main->S1, Aux->S0
+	 */
+	val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1);
+	val8 &= ~BIT(0);
+	rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8);
+}
+
 static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv)
 {
 	struct h2c_cmd h2c;
@@ -6219,12 +7669,10 @@
 	rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.ignore_wlan));
 }
 
-static void rtl8723b_disable_rf(struct rtl8xxxu_priv *priv)
+static void rtl8xxxu_gen2_disable_rf(struct rtl8xxxu_priv *priv)
 {
 	u32 val32;
 
-	rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
-
 	val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA);
 	val32 &= ~(BIT(22) | BIT(23));
 	rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32);
@@ -6272,11 +7720,64 @@
 	rtl8xxxu_write32(priv, REG_OFDM0_FA_RSTC, val32);
 }
 
+static void rtl8xxxu_old_init_queue_reserved_page(struct rtl8xxxu_priv *priv)
+{
+	u8 val8;
+	u32 val32;
+
+	if (priv->ep_tx_normal_queue)
+		val8 = TX_PAGE_NUM_NORM_PQ;
+	else
+		val8 = 0;
+
+	rtl8xxxu_write8(priv, REG_RQPN_NPQ, val8);
+
+	val32 = (TX_PAGE_NUM_PUBQ << RQPN_PUB_PQ_SHIFT) | RQPN_LOAD;
+
+	if (priv->ep_tx_high_queue)
+		val32 |= (TX_PAGE_NUM_HI_PQ << RQPN_HI_PQ_SHIFT);
+	if (priv->ep_tx_low_queue)
+		val32 |= (TX_PAGE_NUM_LO_PQ << RQPN_LO_PQ_SHIFT);
+
+	rtl8xxxu_write32(priv, REG_RQPN, val32);
+}
+
+static void rtl8xxxu_init_queue_reserved_page(struct rtl8xxxu_priv *priv)
+{
+	struct rtl8xxxu_fileops *fops = priv->fops;
+	u32 hq, lq, nq, eq, pubq;
+	u32 val32;
+
+	hq = 0;
+	lq = 0;
+	nq = 0;
+	eq = 0;
+	pubq = 0;
+
+	if (priv->ep_tx_high_queue)
+		hq = fops->page_num_hi;
+	if (priv->ep_tx_low_queue)
+		lq = fops->page_num_lo;
+	if (priv->ep_tx_normal_queue)
+		nq = fops->page_num_norm;
+
+	val32 = (nq << RQPN_NPQ_SHIFT) | (eq << RQPN_EPQ_SHIFT);
+	rtl8xxxu_write32(priv, REG_RQPN_NPQ, val32);
+
+	pubq = fops->total_page_num - hq - lq - nq;
+
+	val32 = RQPN_LOAD;
+	val32 |= (hq << RQPN_HI_PQ_SHIFT);
+	val32 |= (lq << RQPN_LO_PQ_SHIFT);
+	val32 |= (pubq << RQPN_PUB_PQ_SHIFT);
+
+	rtl8xxxu_write32(priv, REG_RQPN, val32);
+}
+
 static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
 {
 	struct rtl8xxxu_priv *priv = hw->priv;
 	struct device *dev = &priv->udev->dev;
-	struct rtl8xxxu_rfregval *rftable;
 	bool macpower;
 	int ret;
 	u8 val8;
@@ -6301,6 +7802,95 @@
 		goto exit;
 	}
 
+	if (!macpower) {
+		if (priv->fops->total_page_num)
+			rtl8xxxu_init_queue_reserved_page(priv);
+		else
+			rtl8xxxu_old_init_queue_reserved_page(priv);
+	}
+
+	ret = rtl8xxxu_init_queue_priority(priv);
+	dev_dbg(dev, "%s: init_queue_priority %i\n", __func__, ret);
+	if (ret)
+		goto exit;
+
+	/*
+	 * Set RX page boundary
+	 */
+	rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, priv->fops->trxff_boundary);
+
+	ret = rtl8xxxu_download_firmware(priv);
+	dev_dbg(dev, "%s: download_fiwmare %i\n", __func__, ret);
+	if (ret)
+		goto exit;
+	ret = rtl8xxxu_start_firmware(priv);
+	dev_dbg(dev, "%s: start_fiwmare %i\n", __func__, ret);
+	if (ret)
+		goto exit;
+
+	if (priv->fops->phy_init_antenna_selection)
+		priv->fops->phy_init_antenna_selection(priv);
+
+	ret = rtl8xxxu_init_mac(priv);
+
+	dev_dbg(dev, "%s: init_mac %i\n", __func__, ret);
+	if (ret)
+		goto exit;
+
+	ret = rtl8xxxu_init_phy_bb(priv);
+	dev_dbg(dev, "%s: init_phy_bb %i\n", __func__, ret);
+	if (ret)
+		goto exit;
+
+	ret = priv->fops->init_phy_rf(priv);
+	if (ret)
+		goto exit;
+
+	/* RFSW Control - clear bit 14 ?? */
+	if (priv->rtl_chip != RTL8723B && priv->rtl_chip != RTL8192E)
+		rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003);
+
+	val32 = FPGA0_RF_TRSW | FPGA0_RF_TRSWB | FPGA0_RF_ANTSW |
+		FPGA0_RF_ANTSWB |
+		((FPGA0_RF_ANTSW | FPGA0_RF_ANTSWB) << FPGA0_RF_BD_CTRL_SHIFT);
+	if (!priv->no_pape) {
+		val32 |= (FPGA0_RF_PAPE |
+			  (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT));
+	}
+	rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
+
+	/* 0x860[6:5]= 00 - why? - this sets antenna B */
+	if (priv->rtl_chip != RTL8192E)
+		rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, 0x66f60210);
+
+	if (!macpower) {
+		/*
+		 * Set TX buffer boundary
+		 */
+		if (priv->rtl_chip == RTL8192E)
+			val8 = TX_TOTAL_PAGE_NUM_8192E + 1;
+		else
+			val8 = TX_TOTAL_PAGE_NUM + 1;
+
+		if (priv->rtl_chip == RTL8723B)
+			val8 -= 1;
+
+		rtl8xxxu_write8(priv, REG_TXPKTBUF_BCNQ_BDNY, val8);
+		rtl8xxxu_write8(priv, REG_TXPKTBUF_MGQ_BDNY, val8);
+		rtl8xxxu_write8(priv, REG_TXPKTBUF_WMAC_LBK_BF_HD, val8);
+		rtl8xxxu_write8(priv, REG_TRXFF_BNDY, val8);
+		rtl8xxxu_write8(priv, REG_TDECTRL + 1, val8);
+	}
+
+	/*
+	 * The vendor drivers set PBP for all devices, except 8192e.
+	 * There is no explanation for this in any of the sources.
+	 */
+	val8 = (priv->fops->pbp_rx << PBP_PAGE_SIZE_RX_SHIFT) |
+		(priv->fops->pbp_tx << PBP_PAGE_SIZE_TX_SHIFT);
+	if (priv->rtl_chip != RTL8192E)
+		rtl8xxxu_write8(priv, REG_PBP, val8);
+
 	dev_dbg(dev, "%s: macpower %i\n", __func__, macpower);
 	if (!macpower) {
 		ret = priv->fops->llt_init(priv, TX_TOTAL_PAGE_NUM);
@@ -6310,10 +7900,15 @@
 		}
 
 		/*
+		 * Chip specific quirks
+		 */
+		priv->fops->usb_quirks(priv);
+
+		/*
 		 * Presumably this is for 8188EU as well
 		 * Enable TX report and TX report timer
 		 */
-		if (priv->rtlchip == 0x8723bu) {
+		if (priv->rtl_chip == RTL8723B) {
 			val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
 			val8 |= TX_REPORT_CTRL_TIMER_ENABLE;
 			rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
@@ -6329,203 +7924,21 @@
 		}
 	}
 
-	ret = rtl8xxxu_download_firmware(priv);
-	dev_dbg(dev, "%s: download_fiwmare %i\n", __func__, ret);
-	if (ret)
-		goto exit;
-	ret = rtl8xxxu_start_firmware(priv);
-	dev_dbg(dev, "%s: start_fiwmare %i\n", __func__, ret);
-	if (ret)
-		goto exit;
-
-	/* Solve too many protocol error on USB bus */
-	/* Can't do this for 8188/8192 UMC A cut parts */
-	if (priv->rtlchip == 0x8723a ||
-	    ((priv->rtlchip == 0x8192c || priv->rtlchip == 0x8191c ||
-	      priv->rtlchip == 0x8188c) &&
-	     (priv->chip_cut || !priv->vendor_umc))) {
-		rtl8xxxu_write8(priv, 0xfe40, 0xe6);
-		rtl8xxxu_write8(priv, 0xfe41, 0x94);
-		rtl8xxxu_write8(priv, 0xfe42, 0x80);
-
-		rtl8xxxu_write8(priv, 0xfe40, 0xe0);
-		rtl8xxxu_write8(priv, 0xfe41, 0x19);
-		rtl8xxxu_write8(priv, 0xfe42, 0x80);
-
-		rtl8xxxu_write8(priv, 0xfe40, 0xe5);
-		rtl8xxxu_write8(priv, 0xfe41, 0x91);
-		rtl8xxxu_write8(priv, 0xfe42, 0x80);
-
-		rtl8xxxu_write8(priv, 0xfe40, 0xe2);
-		rtl8xxxu_write8(priv, 0xfe41, 0x81);
-		rtl8xxxu_write8(priv, 0xfe42, 0x80);
-	}
-
-	if (priv->rtlchip == 0x8192e) {
-		rtl8xxxu_write32(priv, REG_HIMR0, 0x00);
-		rtl8xxxu_write32(priv, REG_HIMR1, 0x00);
-	}
-
-	if (priv->fops->phy_init_antenna_selection)
-		priv->fops->phy_init_antenna_selection(priv);
-
-	if (priv->rtlchip == 0x8723b)
-		ret = rtl8xxxu_init_mac(priv, rtl8723b_mac_init_table);
-	else
-		ret = rtl8xxxu_init_mac(priv, rtl8723a_mac_init_table);
-
-	dev_dbg(dev, "%s: init_mac %i\n", __func__, ret);
-	if (ret)
-		goto exit;
-
-	ret = rtl8xxxu_init_phy_bb(priv);
-	dev_dbg(dev, "%s: init_phy_bb %i\n", __func__, ret);
-	if (ret)
-		goto exit;
-
-	switch(priv->rtlchip) {
-	case 0x8723a:
-		rftable = rtl8723au_radioa_1t_init_table;
-		ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
-		break;
-	case 0x8723b:
-		rftable = rtl8723bu_radioa_1t_init_table;
-		ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
-		/*
-		 * PHY LCK
-		 */
-		rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdfbe0);
-		rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, 0x8c01);
-		msleep(200);
-		rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdffe0);
-		break;
-	case 0x8188c:
-		if (priv->hi_pa)
-			rftable = rtl8188ru_radioa_1t_highpa_table;
-		else
-			rftable = rtl8192cu_radioa_1t_init_table;
-		ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
-		break;
-	case 0x8191c:
-		rftable = rtl8192cu_radioa_1t_init_table;
-		ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
-		break;
-	case 0x8192c:
-		rftable = rtl8192cu_radioa_2t_init_table;
-		ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
-		if (ret)
-			break;
-		rftable = rtl8192cu_radiob_2t_init_table;
-		ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_B);
-		break;
-	default:
-		ret = -EINVAL;
-	}
-
-	if (ret)
-		goto exit;
-
-	/*
-	 * Chip specific quirks
-	 */
-	if (priv->rtlchip == 0x8723a) {
-		/* Fix USB interface interference issue */
-		rtl8xxxu_write8(priv, 0xfe40, 0xe0);
-		rtl8xxxu_write8(priv, 0xfe41, 0x8d);
-		rtl8xxxu_write8(priv, 0xfe42, 0x80);
-		rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, 0xfd0320);
-
-		/* Reduce 80M spur */
-		rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x0381808d);
-		rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
-		rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff82);
-		rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
-	} else {
-		val32 = rtl8xxxu_read32(priv, REG_TXDMA_OFFSET_CHK);
-		val32 |= TXDMA_OFFSET_DROP_DATA_EN;
-		rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, val32);
-	}
-
-	if (!macpower) {
-		if (priv->ep_tx_normal_queue)
-			val8 = TX_PAGE_NUM_NORM_PQ;
-		else
-			val8 = 0;
-
-		rtl8xxxu_write8(priv, REG_RQPN_NPQ, val8);
-
-		val32 = (TX_PAGE_NUM_PUBQ << RQPN_NORM_PQ_SHIFT) | RQPN_LOAD;
-
-		if (priv->ep_tx_high_queue)
-			val32 |= (TX_PAGE_NUM_HI_PQ << RQPN_HI_PQ_SHIFT);
-		if (priv->ep_tx_low_queue)
-			val32 |= (TX_PAGE_NUM_LO_PQ << RQPN_LO_PQ_SHIFT);
-
-		rtl8xxxu_write32(priv, REG_RQPN, val32);
-
-		/*
-		 * Set TX buffer boundary
-		 */
-		val8 = TX_TOTAL_PAGE_NUM + 1;
-
-		if (priv->rtlchip == 0x8723b)
-			val8 -= 1;
-
-		rtl8xxxu_write8(priv, REG_TXPKTBUF_BCNQ_BDNY, val8);
-		rtl8xxxu_write8(priv, REG_TXPKTBUF_MGQ_BDNY, val8);
-		rtl8xxxu_write8(priv, REG_TXPKTBUF_WMAC_LBK_BF_HD, val8);
-		rtl8xxxu_write8(priv, REG_TRXFF_BNDY, val8);
-		rtl8xxxu_write8(priv, REG_TDECTRL + 1, val8);
-	}
-
-	ret = rtl8xxxu_init_queue_priority(priv);
-	dev_dbg(dev, "%s: init_queue_priority %i\n", __func__, ret);
-	if (ret)
-		goto exit;
-
-	/* RFSW Control - clear bit 14 ?? */
-	if (priv->rtlchip != 0x8723b)
-		rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003);
-	/* 0x07000760 */
-	val32 = FPGA0_RF_TRSW | FPGA0_RF_TRSWB | FPGA0_RF_ANTSW |
-		FPGA0_RF_ANTSWB | FPGA0_RF_PAPE |
-		((FPGA0_RF_ANTSW | FPGA0_RF_ANTSWB | FPGA0_RF_PAPE) <<
-		 FPGA0_RF_BD_CTRL_SHIFT);
-	rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
-	/* 0x860[6:5]= 00 - why? - this sets antenna B */
-	rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, 0x66F60210);
-
-	priv->rf_mode_ag[0] = rtl8xxxu_read_rfreg(priv, RF_A,
-						  RF6052_REG_MODE_AG);
-
-	/*
-	 * Set RX page boundary
-	 */
-	if (priv->rtlchip == 0x8723b)
-		rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x3f7f);
-	else
-		rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x27ff);
-	/*
-	 * Transfer page size is always 128
-	 */
-	if (priv->rtlchip == 0x8723b)
-		val8 = (PBP_PAGE_SIZE_256 << PBP_PAGE_SIZE_RX_SHIFT) |
-			(PBP_PAGE_SIZE_256 << PBP_PAGE_SIZE_TX_SHIFT);
-	else
-		val8 = (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_RX_SHIFT) |
-			(PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_TX_SHIFT);
-	rtl8xxxu_write8(priv, REG_PBP, val8);
-
 	/*
 	 * Unit in 8 bytes, not obvious what it is used for
 	 */
 	rtl8xxxu_write8(priv, REG_RX_DRVINFO_SZ, 4);
 
-	/*
-	 * Enable all interrupts - not obvious USB needs to do this
-	 */
-	rtl8xxxu_write32(priv, REG_HISR, 0xffffffff);
-	rtl8xxxu_write32(priv, REG_HIMR, 0xffffffff);
+	if (priv->rtl_chip == RTL8192E) {
+		rtl8xxxu_write32(priv, REG_HIMR0, 0x00);
+		rtl8xxxu_write32(priv, REG_HIMR1, 0x00);
+	} else {
+		/*
+		 * Enable all interrupts - not obvious USB needs to do this
+		 */
+		rtl8xxxu_write32(priv, REG_HISR, 0xffffffff);
+		rtl8xxxu_write32(priv, REG_HIMR, 0xffffffff);
+	}
 
 	rtl8xxxu_set_mac(priv);
 	rtl8xxxu_set_linktype(priv, NL80211_IFTYPE_STATION);
@@ -6600,7 +8013,7 @@
 	/*
 	 * Initialize burst parameters
 	 */
-	if (priv->rtlchip == 0x8723b) {
+	if (priv->rtl_chip == RTL8723B) {
 		/*
 		 * For USB high speed set 512B packets
 		 */
@@ -6651,9 +8064,11 @@
 	priv->fops->set_tx_power(priv, 1, false);
 
 	/* Let the 8051 take control of antenna setting */
-	val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
-	val8 |= LEDCFG2_DPDT_SELECT;
-	rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
+	if (priv->rtl_chip != RTL8192E) {
+		val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
+		val8 |= LEDCFG2_DPDT_SELECT;
+		rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
+	}
 
 	rtl8xxxu_write8(priv, REG_HWSEQ_CTRL, 0xff);
 
@@ -6665,6 +8080,20 @@
 	if (priv->fops->init_statistics)
 		priv->fops->init_statistics(priv);
 
+	if (priv->rtl_chip == RTL8192E) {
+		/*
+		 * 0x4c6[3] 1: RTS BW = Data BW
+		 * 0: RTS BW depends on CCA / secondary CCA result.
+		 */
+		val8 = rtl8xxxu_read8(priv, REG_QUEUE_CTRL);
+		val8 &= ~BIT(3);
+		rtl8xxxu_write8(priv, REG_QUEUE_CTRL, val8);
+		/*
+		 * Reset USB mode switch setting
+		 */
+		rtl8xxxu_write8(priv, REG_ACLK_MON, 0x00);
+	}
+
 	rtl8723a_phy_lc_calibrate(priv);
 
 	priv->fops->phy_iq_calibrate(priv);
@@ -6672,7 +8101,7 @@
 	/*
 	 * This should enable thermal meter
 	 */
-	if (priv->fops->has_s0s1)
+	if (priv->fops->tx_desc_size == sizeof(struct rtl8xxxu_txdesc40))
 		rtl8xxxu_write_rfreg(priv,
 				     RF_A, RF6052_REG_T_METER_8723B, 0x37cf8);
 	else
@@ -6682,7 +8111,7 @@
 	val8 = ((30000 + NAV_UPPER_UNIT - 1) / NAV_UPPER_UNIT);
 	rtl8xxxu_write8(priv, REG_NAV_UPPER, val8);
 
-	if (priv->rtlchip == 0x8723a) {
+	if (priv->rtl_chip == RTL8723A) {
 		/*
 		 * 2011/03/09 MH debug only, UMC-B cut pass 2500 S5 test,
 		 * but we need to find root cause.
@@ -6693,6 +8122,8 @@
 			val32 |= FPGA_RF_MODE_CCK;
 			rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
 		}
+	} else if (priv->rtl_chip == RTL8192E) {
+		rtl8xxxu_write8(priv, REG_USB_HRPWM, 0x00);
 	}
 
 	val32 = rtl8xxxu_read32(priv, REG_FWHW_TXQ_CTRL);
@@ -6700,17 +8131,20 @@
 	/* ack for xmit mgmt frames. */
 	rtl8xxxu_write32(priv, REG_FWHW_TXQ_CTRL, val32);
 
+	if (priv->rtl_chip == RTL8192E) {
+		/*
+		 * Fix LDPC rx hang issue.
+		 */
+		val32 = rtl8xxxu_read32(priv, REG_AFE_MISC);
+		rtl8xxxu_write8(priv, REG_8192E_LDOV12_CTRL, 0x75);
+		val32 &= 0xfff00fff;
+		val32 |= 0x0007e000;
+		rtl8xxxu_write32(priv, REG_AFE_MISC, val32);
+	}
 exit:
 	return ret;
 }
 
-static void rtl8xxxu_disable_device(struct ieee80211_hw *hw)
-{
-	struct rtl8xxxu_priv *priv = hw->priv;
-
-	priv->fops->power_off(priv);
-}
-
 static void rtl8xxxu_cam_write(struct rtl8xxxu_priv *priv,
 			       struct ieee80211_key_conf *key, const u8 *mac)
 {
@@ -6775,8 +8209,8 @@
 	rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
 }
 
-static void rtl8723au_update_rate_mask(struct rtl8xxxu_priv *priv,
-				       u32 ramask, int sgi)
+static void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
+				      u32 ramask, int sgi)
 {
 	struct h2c_cmd h2c;
 
@@ -6795,8 +8229,8 @@
 	rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.ramask));
 }
 
-static void rtl8723bu_update_rate_mask(struct rtl8xxxu_priv *priv,
-				       u32 ramask, int sgi)
+static void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
+					   u32 ramask, int sgi)
 {
 	struct h2c_cmd h2c;
 	u8 bw = 0;
@@ -6821,8 +8255,8 @@
 	rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.b_macid_cfg));
 }
 
-static void rtl8723au_report_connect(struct rtl8xxxu_priv *priv,
-				     u8 macid, bool connect)
+static void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
+					 u8 macid, bool connect)
 {
 	struct h2c_cmd h2c;
 
@@ -6838,8 +8272,8 @@
 	rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.joinbss));
 }
 
-static void rtl8723bu_report_connect(struct rtl8xxxu_priv *priv,
-				     u8 macid, bool connect)
+static void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
+					 u8 macid, bool connect)
 {
 	struct h2c_cmd h2c;
 
@@ -7014,7 +8448,7 @@
  * format. The descriptor checksum is still only calculated over the
  * initial 32 bytes of the descriptor!
  */
-static void rtl8xxxu_calc_tx_desc_csum(struct rtl8723au_tx_desc *tx_desc)
+static void rtl8xxxu_calc_tx_desc_csum(struct rtl8xxxu_txdesc32 *tx_desc)
 {
 	__le16 *ptr = (__le16 *)tx_desc;
 	u16 csum = 0;
@@ -7026,7 +8460,7 @@
 	 */
 	tx_desc->csum = cpu_to_le16(0);
 
-	for (i = 0; i < (sizeof(struct rtl8723au_tx_desc) / sizeof(u16)); i++)
+	for (i = 0; i < (sizeof(struct rtl8xxxu_txdesc32) / sizeof(u16)); i++)
 		csum = csum ^ le16_to_cpu(ptr[i]);
 
 	tx_desc->csum |= cpu_to_le16(csum);
@@ -7164,8 +8598,8 @@
 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
 	struct rtl8xxxu_priv *priv = hw->priv;
-	struct rtl8723au_tx_desc *tx_desc;
-	struct rtl8723bu_tx_desc *tx_desc40;
+	struct rtl8xxxu_txdesc32 *tx_desc;
+	struct rtl8xxxu_txdesc40 *tx_desc40;
 	struct rtl8xxxu_tx_urb *tx_urb;
 	struct ieee80211_sta *sta = NULL;
 	struct ieee80211_vif *vif = tx_info->control.vif;
@@ -7210,7 +8644,7 @@
 	if (control && control->sta)
 		sta = control->sta;
 
-	tx_desc = (struct rtl8723au_tx_desc *)skb_push(skb, tx_desc_size);
+	tx_desc = (struct rtl8xxxu_txdesc32 *)skb_push(skb, tx_desc_size);
 
 	memset(tx_desc, 0, tx_desc_size);
 	tx_desc->pkt_size = cpu_to_le16(pktlen);
@@ -7267,37 +8701,35 @@
 			tx_desc->txdw5 |= cpu_to_le32(0x0001ff00);
 
 		tx_desc->txdw3 =
-			cpu_to_le32((u32)seq_number << TXDESC_SEQ_SHIFT_8723A);
+			cpu_to_le32((u32)seq_number << TXDESC32_SEQ_SHIFT);
 
 		if (ampdu_enable)
-			tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_ENABLE_8723A);
+			tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_ENABLE);
 		else
-			tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_BREAK_8723A);
+			tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_BREAK);
 
 		if (ieee80211_is_mgmt(hdr->frame_control)) {
 			tx_desc->txdw5 = cpu_to_le32(tx_rate->hw_value);
 			tx_desc->txdw4 |=
-				cpu_to_le32(TXDESC_USE_DRIVER_RATE_8723A);
+				cpu_to_le32(TXDESC32_USE_DRIVER_RATE);
 			tx_desc->txdw5 |=
-				cpu_to_le32(6 <<
-					    TXDESC_RETRY_LIMIT_SHIFT_8723A);
+				cpu_to_le32(6 << TXDESC32_RETRY_LIMIT_SHIFT);
 			tx_desc->txdw5 |=
-				cpu_to_le32(TXDESC_RETRY_LIMIT_ENABLE_8723A);
+				cpu_to_le32(TXDESC32_RETRY_LIMIT_ENABLE);
 		}
 
 		if (ieee80211_is_data_qos(hdr->frame_control))
-			tx_desc->txdw4 |= cpu_to_le32(TXDESC_QOS_8723A);
+			tx_desc->txdw4 |= cpu_to_le32(TXDESC32_QOS);
 
 		if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
 		    (sta && vif && vif->bss_conf.use_short_preamble))
-			tx_desc->txdw4 |=
-				cpu_to_le32(TXDESC_SHORT_PREAMBLE_8723A);
+			tx_desc->txdw4 |= cpu_to_le32(TXDESC32_SHORT_PREAMBLE);
 
 		if (rate_flag & IEEE80211_TX_RC_SHORT_GI ||
 		    (ieee80211_is_data_qos(hdr->frame_control) &&
 		     sta && sta->ht_cap.cap &
 		     (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))) {
-			tx_desc->txdw5 |= cpu_to_le32(TXDESC_SHORT_GI);
+			tx_desc->txdw5 |= cpu_to_le32(TXDESC32_SHORT_GI);
 		}
 
 		if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
@@ -7307,46 +8739,43 @@
 			 */
 			tx_desc->txdw4 |=
 				cpu_to_le32(DESC_RATE_24M <<
-					    TXDESC_RTS_RATE_SHIFT_8723A);
+					    TXDESC32_RTS_RATE_SHIFT);
 			tx_desc->txdw4 |=
-				cpu_to_le32(TXDESC_RTS_CTS_ENABLE_8723A);
-			tx_desc->txdw4 |=
-				cpu_to_le32(TXDESC_HW_RTS_ENABLE_8723A);
+				cpu_to_le32(TXDESC32_RTS_CTS_ENABLE);
+			tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE);
 		}
 	} else {
-		tx_desc40 = (struct rtl8723bu_tx_desc *)tx_desc;
+		tx_desc40 = (struct rtl8xxxu_txdesc40 *)tx_desc;
 
 		tx_desc40->txdw4 = cpu_to_le32(rate);
 		if (ieee80211_is_data(hdr->frame_control)) {
 			tx_desc->txdw4 |=
 				cpu_to_le32(0x1f <<
-					    TXDESC_DATA_RATE_FB_SHIFT_8723B);
+					    TXDESC40_DATA_RATE_FB_SHIFT);
 		}
 
 		tx_desc40->txdw9 =
-			cpu_to_le32((u32)seq_number << TXDESC_SEQ_SHIFT_8723B);
+			cpu_to_le32((u32)seq_number << TXDESC40_SEQ_SHIFT);
 
 		if (ampdu_enable)
-			tx_desc40->txdw2 |=
-				cpu_to_le32(TXDESC_AGG_ENABLE_8723B);
+			tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_ENABLE);
 		else
-			tx_desc40->txdw2 |= cpu_to_le32(TXDESC_AGG_BREAK_8723B);
+			tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_BREAK);
 
 		if (ieee80211_is_mgmt(hdr->frame_control)) {
 			tx_desc40->txdw4 = cpu_to_le32(tx_rate->hw_value);
 			tx_desc40->txdw3 |=
-				cpu_to_le32(TXDESC_USE_DRIVER_RATE_8723B);
+				cpu_to_le32(TXDESC40_USE_DRIVER_RATE);
 			tx_desc40->txdw4 |=
-				cpu_to_le32(6 <<
-					    TXDESC_RETRY_LIMIT_SHIFT_8723B);
+				cpu_to_le32(6 << TXDESC40_RETRY_LIMIT_SHIFT);
 			tx_desc40->txdw4 |=
-				cpu_to_le32(TXDESC_RETRY_LIMIT_ENABLE_8723B);
+				cpu_to_le32(TXDESC40_RETRY_LIMIT_ENABLE);
 		}
 
 		if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
 		    (sta && vif && vif->bss_conf.use_short_preamble))
 			tx_desc40->txdw5 |=
-				cpu_to_le32(TXDESC_SHORT_PREAMBLE_8723B);
+				cpu_to_le32(TXDESC40_SHORT_PREAMBLE);
 
 		if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
 			/*
@@ -7355,11 +8784,9 @@
 			 */
 			tx_desc->txdw4 |=
 				cpu_to_le32(DESC_RATE_24M <<
-					    TXDESC_RTS_RATE_SHIFT_8723B);
-			tx_desc->txdw3 |=
-				cpu_to_le32(TXDESC_RTS_CTS_ENABLE_8723B);
-			tx_desc->txdw3 |=
-				cpu_to_le32(TXDESC_HW_RTS_ENABLE_8723B);
+					    TXDESC40_RTS_RATE_SHIFT);
+			tx_desc->txdw3 |= cpu_to_le32(TXDESC40_RTS_CTS_ENABLE);
+			tx_desc->txdw3 |= cpu_to_le32(TXDESC40_HW_RTS_ENABLE);
 		}
 	}
 
@@ -7499,15 +8926,22 @@
 	}
 }
 
-static int rtl8723au_parse_rx_desc(struct rtl8xxxu_priv *priv,
+static int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv,
 				   struct sk_buff *skb,
 				   struct ieee80211_rx_status *rx_status)
 {
-	struct rtl8xxxu_rx_desc *rx_desc = (struct rtl8xxxu_rx_desc *)skb->data;
+	struct rtl8xxxu_rxdesc16 *rx_desc =
+		(struct rtl8xxxu_rxdesc16 *)skb->data;
 	struct rtl8723au_phy_stats *phy_stats;
+	__le32 *_rx_desc_le = (__le32 *)skb->data;
+	u32 *_rx_desc = (u32 *)skb->data;
 	int drvinfo_sz, desc_shift;
+	int i;
 
-	skb_pull(skb, sizeof(struct rtl8xxxu_rx_desc));
+	for (i = 0; i < (sizeof(struct rtl8xxxu_rxdesc16) / sizeof(u32)); i++)
+		_rx_desc[i] = le32_to_cpu(_rx_desc_le[i]);
+
+	skb_pull(skb, sizeof(struct rtl8xxxu_rxdesc16));
 
 	phy_stats = (struct rtl8723au_phy_stats *)skb->data;
 
@@ -7539,16 +8973,22 @@
 	return RX_TYPE_DATA_PKT;
 }
 
-static int rtl8723bu_parse_rx_desc(struct rtl8xxxu_priv *priv,
+static int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv,
 				   struct sk_buff *skb,
 				   struct ieee80211_rx_status *rx_status)
 {
-	struct rtl8723bu_rx_desc *rx_desc =
-		(struct rtl8723bu_rx_desc *)skb->data;
+	struct rtl8xxxu_rxdesc24 *rx_desc =
+		(struct rtl8xxxu_rxdesc24 *)skb->data;
 	struct rtl8723au_phy_stats *phy_stats;
+	__le32 *_rx_desc_le = (__le32 *)skb->data;
+	u32 *_rx_desc = (u32 *)skb->data;
 	int drvinfo_sz, desc_shift;
+	int i;
 
-	skb_pull(skb, sizeof(struct rtl8723bu_rx_desc));
+	for (i = 0; i < (sizeof(struct rtl8xxxu_rxdesc24) / sizeof(u32)); i++)
+		_rx_desc[i] = le32_to_cpu(_rx_desc_le[i]);
+
+	skb_pull(skb, sizeof(struct rtl8xxxu_rxdesc24));
 
 	phy_stats = (struct rtl8723au_phy_stats *)skb->data;
 
@@ -7640,12 +9080,7 @@
 	struct sk_buff *skb = (struct sk_buff *)urb->context;
 	struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
 	struct device *dev = &priv->udev->dev;
-	__le32 *_rx_desc_le = (__le32 *)skb->data;
-	u32 *_rx_desc = (u32 *)skb->data;
-	int rx_type, i;
-
-	for (i = 0; i < (sizeof(struct rtl8xxxu_rx_desc) / sizeof(u32)); i++)
-		_rx_desc[i] = le32_to_cpu(_rx_desc_le[i]);
+	int rx_type;
 
 	skb_put(skb, urb->actual_length);
 
@@ -7684,14 +9119,15 @@
 {
 	struct sk_buff *skb;
 	int skb_size;
-	int ret;
+	int ret, rx_desc_sz;
 
-	skb_size = sizeof(struct rtl8xxxu_rx_desc) + RTL_RX_BUFFER_SIZE;
+	rx_desc_sz = priv->fops->rx_desc_size;
+	skb_size = rx_desc_sz + RTL_RX_BUFFER_SIZE;
 	skb = __netdev_alloc_skb(NULL, skb_size, GFP_KERNEL);
 	if (!skb)
 		return -ENOMEM;
 
-	memset(skb->data, 0, sizeof(struct rtl8xxxu_rx_desc));
+	memset(skb->data, 0, rx_desc_sz);
 	usb_fill_bulk_urb(&rx_urb->urb, priv->udev, priv->pipe_in, skb->data,
 			  skb_size, rtl8xxxu_rx_complete, skb);
 	usb_anchor_urb(&rx_urb->urb, &priv->rx_anchor);
@@ -8161,6 +9597,8 @@
 	if (priv->usb_interrupts)
 		usb_kill_anchored_urbs(&priv->int_anchor);
 
+	rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+
 	priv->fops->disable_rf(priv);
 
 	/*
@@ -8293,6 +9731,10 @@
 		if (id->idProduct == 0x7811)
 			untested = 0;
 		break;
+	case 0x050d:
+		if (id->idProduct == 0x1004)
+			untested = 0;
+		break;
 	default:
 		break;
 	}
@@ -8385,7 +9827,7 @@
 		dev_info(&udev->dev, "Enabling HT_20_40 on the 2.4GHz band\n");
 		sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
 	}
-	hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+	hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
 
 	hw->wiphy->rts_threshold = 2347;
 
@@ -8421,13 +9863,14 @@
 	hw = usb_get_intfdata(interface);
 	priv = hw->priv;
 
-	rtl8xxxu_disable_device(hw);
+	ieee80211_unregister_hw(hw);
+
+	priv->fops->power_off(priv);
+
 	usb_set_intfdata(interface, NULL);
 
 	dev_info(&priv->udev->dev, "disconnecting\n");
 
-	ieee80211_unregister_hw(hw);
-
 	kfree(priv->fw_data);
 	mutex_destroy(&priv->usb_buf_mutex);
 	mutex_destroy(&priv->h2c_mutex);
@@ -8443,22 +9886,30 @@
 	.power_off = rtl8xxxu_power_off,
 	.reset_8051 = rtl8xxxu_reset_8051,
 	.llt_init = rtl8xxxu_init_llt_table,
-	.phy_iq_calibrate = rtl8723au_phy_iq_calibrate,
-	.config_channel = rtl8723au_config_channel,
-	.parse_rx_desc = rtl8723au_parse_rx_desc,
-	.enable_rf = rtl8723a_enable_rf,
-	.disable_rf = rtl8723a_disable_rf,
-	.set_tx_power = rtl8723a_set_tx_power,
-	.update_rate_mask = rtl8723au_update_rate_mask,
-	.report_connect = rtl8723au_report_connect,
+	.init_phy_bb = rtl8xxxu_gen1_init_phy_bb,
+	.init_phy_rf = rtl8723au_init_phy_rf,
+	.phy_iq_calibrate = rtl8xxxu_gen1_phy_iq_calibrate,
+	.config_channel = rtl8xxxu_gen1_config_channel,
+	.parse_rx_desc = rtl8xxxu_parse_rxdesc16,
+	.enable_rf = rtl8xxxu_gen1_enable_rf,
+	.disable_rf = rtl8xxxu_gen1_disable_rf,
+	.usb_quirks = rtl8xxxu_gen1_usb_quirks,
+	.set_tx_power = rtl8xxxu_gen1_set_tx_power,
+	.update_rate_mask = rtl8xxxu_update_rate_mask,
+	.report_connect = rtl8xxxu_gen1_report_connect,
 	.writeN_block_size = 1024,
 	.mbox_ext_reg = REG_HMBOX_EXT_0,
 	.mbox_ext_width = 2,
-	.tx_desc_size = sizeof(struct rtl8723au_tx_desc),
+	.tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
+	.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16),
 	.adda_1t_init = 0x0b1b25a0,
 	.adda_1t_path_on = 0x0bdb25a0,
 	.adda_2t_path_on_a = 0x04db25a4,
 	.adda_2t_path_on_b = 0x0b1b25a4,
+	.trxff_boundary = 0x27ff,
+	.pbp_rx = PBP_PAGE_SIZE_128,
+	.pbp_tx = PBP_PAGE_SIZE_128,
+	.mactable = rtl8xxxu_gen1_mac_init_table,
 };
 
 static struct rtl8xxxu_fileops rtl8723bu_fops = {
@@ -8468,26 +9919,34 @@
 	.power_off = rtl8723bu_power_off,
 	.reset_8051 = rtl8723bu_reset_8051,
 	.llt_init = rtl8xxxu_auto_llt_table,
+	.init_phy_bb = rtl8723bu_init_phy_bb,
+	.init_phy_rf = rtl8723bu_init_phy_rf,
 	.phy_init_antenna_selection = rtl8723bu_phy_init_antenna_selection,
 	.phy_iq_calibrate = rtl8723bu_phy_iq_calibrate,
-	.config_channel = rtl8723bu_config_channel,
-	.parse_rx_desc = rtl8723bu_parse_rx_desc,
+	.config_channel = rtl8xxxu_gen2_config_channel,
+	.parse_rx_desc = rtl8xxxu_parse_rxdesc24,
 	.init_aggregation = rtl8723bu_init_aggregation,
 	.init_statistics = rtl8723bu_init_statistics,
 	.enable_rf = rtl8723b_enable_rf,
-	.disable_rf = rtl8723b_disable_rf,
+	.disable_rf = rtl8xxxu_gen2_disable_rf,
+	.usb_quirks = rtl8xxxu_gen2_usb_quirks,
 	.set_tx_power = rtl8723b_set_tx_power,
-	.update_rate_mask = rtl8723bu_update_rate_mask,
-	.report_connect = rtl8723bu_report_connect,
+	.update_rate_mask = rtl8xxxu_gen2_update_rate_mask,
+	.report_connect = rtl8xxxu_gen2_report_connect,
 	.writeN_block_size = 1024,
 	.mbox_ext_reg = REG_HMBOX_EXT0_8723B,
 	.mbox_ext_width = 4,
-	.tx_desc_size = sizeof(struct rtl8723bu_tx_desc),
+	.tx_desc_size = sizeof(struct rtl8xxxu_txdesc40),
+	.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
 	.has_s0s1 = 1,
 	.adda_1t_init = 0x01c00014,
 	.adda_1t_path_on = 0x01c00014,
 	.adda_2t_path_on_a = 0x01c00014,
 	.adda_2t_path_on_b = 0x01c00014,
+	.trxff_boundary = 0x3f7f,
+	.pbp_rx = PBP_PAGE_SIZE_256,
+	.pbp_tx = PBP_PAGE_SIZE_256,
+	.mactable = rtl8723b_mac_init_table,
 };
 
 #ifdef CONFIG_RTL8XXXU_UNTESTED
@@ -8499,22 +9958,30 @@
 	.power_off = rtl8xxxu_power_off,
 	.reset_8051 = rtl8xxxu_reset_8051,
 	.llt_init = rtl8xxxu_init_llt_table,
-	.phy_iq_calibrate = rtl8723au_phy_iq_calibrate,
-	.config_channel = rtl8723au_config_channel,
-	.parse_rx_desc = rtl8723au_parse_rx_desc,
-	.enable_rf = rtl8723a_enable_rf,
-	.disable_rf = rtl8723a_disable_rf,
-	.set_tx_power = rtl8723a_set_tx_power,
-	.update_rate_mask = rtl8723au_update_rate_mask,
-	.report_connect = rtl8723au_report_connect,
+	.init_phy_bb = rtl8xxxu_gen1_init_phy_bb,
+	.init_phy_rf = rtl8192cu_init_phy_rf,
+	.phy_iq_calibrate = rtl8xxxu_gen1_phy_iq_calibrate,
+	.config_channel = rtl8xxxu_gen1_config_channel,
+	.parse_rx_desc = rtl8xxxu_parse_rxdesc16,
+	.enable_rf = rtl8xxxu_gen1_enable_rf,
+	.disable_rf = rtl8xxxu_gen1_disable_rf,
+	.usb_quirks = rtl8xxxu_gen1_usb_quirks,
+	.set_tx_power = rtl8xxxu_gen1_set_tx_power,
+	.update_rate_mask = rtl8xxxu_update_rate_mask,
+	.report_connect = rtl8xxxu_gen1_report_connect,
 	.writeN_block_size = 128,
 	.mbox_ext_reg = REG_HMBOX_EXT_0,
 	.mbox_ext_width = 2,
-	.tx_desc_size = sizeof(struct rtl8723au_tx_desc),
+	.tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
+	.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16),
 	.adda_1t_init = 0x0b1b25a0,
 	.adda_1t_path_on = 0x0bdb25a0,
 	.adda_2t_path_on_a = 0x04db25a4,
 	.adda_2t_path_on_b = 0x0b1b25a4,
+	.trxff_boundary = 0x27ff,
+	.pbp_rx = PBP_PAGE_SIZE_128,
+	.pbp_tx = PBP_PAGE_SIZE_128,
+	.mactable = rtl8xxxu_gen1_mac_init_table,
 };
 
 #endif
@@ -8526,23 +9993,33 @@
 	.power_off = rtl8xxxu_power_off,
 	.reset_8051 = rtl8xxxu_reset_8051,
 	.llt_init = rtl8xxxu_auto_llt_table,
-	.phy_iq_calibrate = rtl8723bu_phy_iq_calibrate,
-	.config_channel = rtl8723bu_config_channel,
-	.parse_rx_desc = rtl8723bu_parse_rx_desc,
-	.enable_rf = rtl8723b_enable_rf,
-	.disable_rf = rtl8723b_disable_rf,
-	.set_tx_power = rtl8723b_set_tx_power,
-	.update_rate_mask = rtl8723au_update_rate_mask,
-	.report_connect = rtl8723au_report_connect,
+	.init_phy_bb = rtl8192eu_init_phy_bb,
+	.init_phy_rf = rtl8192eu_init_phy_rf,
+	.phy_iq_calibrate = rtl8192eu_phy_iq_calibrate,
+	.config_channel = rtl8xxxu_gen2_config_channel,
+	.parse_rx_desc = rtl8xxxu_parse_rxdesc24,
+	.enable_rf = rtl8192e_enable_rf,
+	.disable_rf = rtl8xxxu_gen2_disable_rf,
+	.usb_quirks = rtl8xxxu_gen2_usb_quirks,
+	.set_tx_power = rtl8192e_set_tx_power,
+	.update_rate_mask = rtl8xxxu_gen2_update_rate_mask,
+	.report_connect = rtl8xxxu_gen2_report_connect,
 	.writeN_block_size = 128,
 	.mbox_ext_reg = REG_HMBOX_EXT0_8723B,
 	.mbox_ext_width = 4,
-	.tx_desc_size = sizeof(struct rtl8723au_tx_desc),
-	.has_s0s1 = 1,
+	.tx_desc_size = sizeof(struct rtl8xxxu_txdesc40),
+	.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
+	.has_s0s1 = 0,
 	.adda_1t_init = 0x0fc01616,
 	.adda_1t_path_on = 0x0fc01616,
 	.adda_2t_path_on_a = 0x0fc01616,
 	.adda_2t_path_on_b = 0x0fc01616,
+	.trxff_boundary = 0x3cff,
+	.mactable = rtl8192e_mac_init_table,
+	.total_page_num = TX_TOTAL_PAGE_NUM_8192E,
+	.page_num_hi = TX_PAGE_NUM_HI_PQ_8192E,
+	.page_num_lo = TX_PAGE_NUM_LO_PQ_8192E,
+	.page_num_norm = TX_PAGE_NUM_NORM_PQ_8192E,
 };
 
 static struct usb_device_id dev_table[] = {
@@ -8567,6 +10044,9 @@
 /* Tested by Larry Finger */
 {USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0x7811, 0xff, 0xff, 0xff),
 	.driver_info = (unsigned long)&rtl8192cu_fops},
+/* Tested by Andrea Merello */
+{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1004, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
 /* Currently untested 8188 series devices */
 {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8191, 0xff, 0xff, 0xff),
 	.driver_info = (unsigned long)&rtl8192cu_fops},
@@ -8651,8 +10131,6 @@
 /* Currently untested 8192 series devices */
 {USB_DEVICE_AND_INTERFACE_INFO(0x04bb, 0x0950, 0xff, 0xff, 0xff),
 	.driver_info = (unsigned long)&rtl8192cu_fops},
-{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1004, 0xff, 0xff, 0xff),
-	.driver_info = (unsigned long)&rtl8192cu_fops},
 {USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x2102, 0xff, 0xff, 0xff),
 	.driver_info = (unsigned long)&rtl8192cu_fops},
 {USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x2103, 0xff, 0xff, 0xff),
@@ -8708,6 +10186,7 @@
 	.probe = rtl8xxxu_probe,
 	.disconnect = rtl8xxxu_disconnect,
 	.id_table = dev_table,
+	.no_dynamic_id = 1,
 	.disable_hub_initiated_lpm = 1,
 };
 
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 7b73654..3e2643c 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com>
+ * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -42,12 +42,18 @@
 #define REALTEK_USB_CMD_IDX		0x00
 
 #define TX_TOTAL_PAGE_NUM		0xf8
+#define TX_TOTAL_PAGE_NUM_8192E		0xf3
 /* (HPQ + LPQ + NPQ + PUBQ) = TX_TOTAL_PAGE_NUM */
 #define TX_PAGE_NUM_PUBQ		0xe7
 #define TX_PAGE_NUM_HI_PQ		0x0c
 #define TX_PAGE_NUM_LO_PQ		0x02
 #define TX_PAGE_NUM_NORM_PQ		0x02
 
+#define TX_PAGE_NUM_PUBQ_8192E		0xe7
+#define TX_PAGE_NUM_HI_PQ_8192E		0x08
+#define TX_PAGE_NUM_LO_PQ_8192E		0x0c
+#define TX_PAGE_NUM_NORM_PQ_8192E	0x00
+
 #define RTL_FW_PAGE_SIZE		4096
 #define RTL8XXXU_FIRMWARE_POLL_MAX	1000
 
@@ -65,13 +71,37 @@
 #define EFUSE_BT_MAP_LEN_8723A		1024
 #define EFUSE_MAX_WORD_UNIT		4
 
+enum rtl8xxxu_rtl_chip {
+	RTL8192S = 0x81920,
+	RTL8191S = 0x81910,
+	RTL8192C = 0x8192c,
+	RTL8191C = 0x8191c,
+	RTL8188C = 0x8188c,
+	RTL8188R = 0x81889,
+	RTL8192D = 0x8192d,
+	RTL8723A = 0x8723a,
+	RTL8188E = 0x8188e,
+	RTL8812  = 0x88120,
+	RTL8821  = 0x88210,
+	RTL8192E = 0x8192e,
+	RTL8191E = 0x8191e,
+	RTL8723B = 0x8723b,
+	RTL8814A = 0x8814a,
+	RTL8881A = 0x8881a,
+	RTL8821B = 0x8821b,
+	RTL8822B = 0x8822b,
+	RTL8703B = 0x8703b,
+	RTL8195A = 0x8195a,
+	RTL8188F = 0x8188f
+};
+
 enum rtl8xxxu_rx_type {
 	RX_TYPE_DATA_PKT = 0,
 	RX_TYPE_C2H = 1,
 	RX_TYPE_ERROR = -1
 };
 
-struct rtl8xxxu_rx_desc {
+struct rtl8xxxu_rxdesc16 {
 #ifdef __LITTLE_ENDIAN
 	u32 pktlen:14;
 	u32 crc32:1;
@@ -207,7 +237,7 @@
 #endif
 };
 
-struct rtl8723bu_rx_desc {
+struct rtl8xxxu_rxdesc24 {
 #ifdef __LITTLE_ENDIAN
 	u32 pktlen:14;
 	u32 crc32:1;
@@ -332,7 +362,7 @@
 	__le32 tsfl;
 };
 
-struct rtl8723au_tx_desc {
+struct rtl8xxxu_txdesc32 {
 	__le16 pkt_size;
 	u8 pkt_offset;
 	u8 txdw0;
@@ -346,7 +376,7 @@
 	__le16 txdw7;
 };
 
-struct rtl8723bu_tx_desc {
+struct rtl8xxxu_txdesc40 {
 	__le16 pkt_size;
 	u8 pkt_offset;
 	u8 txdw0;
@@ -422,10 +452,10 @@
  * aggregation enable and break respectively. For 8723bu, bits 0-7 are macid.
  */
 #define TXDESC_PKT_OFFSET_SZ		0
-#define TXDESC_AGG_ENABLE_8723A		BIT(5)
-#define TXDESC_AGG_BREAK_8723A		BIT(6)
-#define TXDESC_MACID_SHIFT_8723B	0
-#define TXDESC_MACID_MASK_8723B		0x00f0
+#define TXDESC32_AGG_ENABLE		BIT(5)
+#define TXDESC32_AGG_BREAK		BIT(6)
+#define TXDESC40_MACID_SHIFT		0
+#define TXDESC40_MACID_MASK		0x00f0
 #define TXDESC_QUEUE_SHIFT		8
 #define TXDESC_QUEUE_MASK		0x1f00
 #define TXDESC_QUEUE_BK			0x2
@@ -437,9 +467,9 @@
 #define TXDESC_QUEUE_MGNT		0x12
 #define TXDESC_QUEUE_CMD		0x13
 #define TXDESC_QUEUE_MAX		(TXDESC_QUEUE_CMD + 1)
-#define TXDESC_RDG_NAV_EXT_8723B	BIT(13)
-#define TXDESC_LSIG_TXOP_ENABLE_8723B	BIT(14)
-#define TXDESC_PIFS_8723B		BIT(15)
+#define TXDESC40_RDG_NAV_EXT		BIT(13)
+#define TXDESC40_LSIG_TXOP_ENABLE	BIT(14)
+#define TXDESC40_PIFS			BIT(15)
 
 #define DESC_RATE_ID_SHIFT		16
 #define DESC_RATE_ID_MASK		0xf
@@ -451,71 +481,71 @@
 #define TXDESC_HWPC			BIT(31)
 
 /* Word 2 */
-#define TXDESC_PAID_SHIFT_8723B		0
-#define TXDESC_PAID_MASK_8723B		0x1ff
-#define TXDESC_CCA_RTS_SHIFT_8723B	10
-#define TXDESC_CCA_RTS_MASK_8723B	0xc00
-#define TXDESC_AGG_ENABLE_8723B		BIT(12)
-#define TXDESC_RDG_ENABLE_8723B		BIT(13)
-#define TXDESC_AGG_BREAK_8723B		BIT(16)
-#define TXDESC_MORE_FRAG_8723B		BIT(17)
-#define TXDESC_RAW_8723B		BIT(18)
-#define TXDESC_ACK_REPORT_8723A		BIT(19)
-#define TXDESC_SPE_RPT_8723B		BIT(19)
+#define TXDESC40_PAID_SHIFT		0
+#define TXDESC40_PAID_MASK		0x1ff
+#define TXDESC40_CCA_RTS_SHIFT		10
+#define TXDESC40_CCA_RTS_MASK		0xc00
+#define TXDESC40_AGG_ENABLE		BIT(12)
+#define TXDESC40_RDG_ENABLE		BIT(13)
+#define TXDESC40_AGG_BREAK		BIT(16)
+#define TXDESC40_MORE_FRAG		BIT(17)
+#define TXDESC40_RAW			BIT(18)
+#define TXDESC32_ACK_REPORT		BIT(19)
+#define TXDESC40_SPE_RPT		BIT(19)
 #define TXDESC_AMPDU_DENSITY_SHIFT	20
-#define TXDESC_BT_INT_8723B		BIT(23)
-#define TXDESC_GID_8723B		BIT(24)
+#define TXDESC40_BT_INT			BIT(23)
+#define TXDESC40_GID_SHIFT		24
 
 /* Word 3 */
-#define TXDESC_USE_DRIVER_RATE_8723B	BIT(8)
-#define TXDESC_CTS_SELF_ENABLE_8723B	BIT(11)
-#define TXDESC_RTS_CTS_ENABLE_8723B	BIT(12)
-#define TXDESC_HW_RTS_ENABLE_8723B	BIT(13)
-#define TXDESC_SEQ_SHIFT_8723A		16
-#define TXDESC_SEQ_MASK_8723A		0x0fff0000
+#define TXDESC40_USE_DRIVER_RATE	BIT(8)
+#define TXDESC40_CTS_SELF_ENABLE	BIT(11)
+#define TXDESC40_RTS_CTS_ENABLE		BIT(12)
+#define TXDESC40_HW_RTS_ENABLE		BIT(13)
+#define TXDESC32_SEQ_SHIFT		16
+#define TXDESC32_SEQ_MASK		0x0fff0000
 
 /* Word 4 */
-#define TXDESC_RTS_RATE_SHIFT_8723A	0
-#define TXDESC_RTS_RATE_MASK_8723A	0x3f
-#define TXDESC_QOS_8723A		BIT(6)
-#define TXDESC_HW_SEQ_ENABLE_8723A	BIT(7)
-#define TXDESC_USE_DRIVER_RATE_8723A	BIT(8)
+#define TXDESC32_RTS_RATE_SHIFT		0
+#define TXDESC32_RTS_RATE_MASK		0x3f
+#define TXDESC32_QOS			BIT(6)
+#define TXDESC32_HW_SEQ_ENABLE		BIT(7)
+#define TXDESC32_USE_DRIVER_RATE	BIT(8)
 #define TXDESC_DISABLE_DATA_FB		BIT(10)
-#define TXDESC_CTS_SELF_ENABLE_8723A	BIT(11)
-#define TXDESC_RTS_CTS_ENABLE_8723A	BIT(12)
-#define TXDESC_HW_RTS_ENABLE_8723A	BIT(13)
+#define TXDESC32_CTS_SELF_ENABLE	BIT(11)
+#define TXDESC32_RTS_CTS_ENABLE		BIT(12)
+#define TXDESC32_HW_RTS_ENABLE		BIT(13)
 #define TXDESC_PRIME_CH_OFF_LOWER	BIT(20)
 #define TXDESC_PRIME_CH_OFF_UPPER	BIT(21)
-#define TXDESC_SHORT_PREAMBLE_8723A	BIT(24)
+#define TXDESC32_SHORT_PREAMBLE		BIT(24)
 #define TXDESC_DATA_BW			BIT(25)
 #define TXDESC_RTS_DATA_BW		BIT(27)
 #define TXDESC_RTS_PRIME_CH_OFF_LOWER	BIT(28)
 #define TXDESC_RTS_PRIME_CH_OFF_UPPER	BIT(29)
-#define TXDESC_DATA_RATE_FB_SHIFT_8723B	8
-#define TXDESC_DATA_RATE_FB_MASK_8723B	0x00001f00
-#define TXDESC_RETRY_LIMIT_ENABLE_8723B	BIT(17)
-#define TXDESC_RETRY_LIMIT_SHIFT_8723B	18
-#define TXDESC_RETRY_LIMIT_MASK_8723B	0x00fc0000
-#define TXDESC_RTS_RATE_SHIFT_8723B	24
-#define TXDESC_RTS_RATE_MASK_8723B	0x3f000000
+#define TXDESC40_DATA_RATE_FB_SHIFT	8
+#define TXDESC40_DATA_RATE_FB_MASK	0x00001f00
+#define TXDESC40_RETRY_LIMIT_ENABLE	BIT(17)
+#define TXDESC40_RETRY_LIMIT_SHIFT	18
+#define TXDESC40_RETRY_LIMIT_MASK	0x00fc0000
+#define TXDESC40_RTS_RATE_SHIFT		24
+#define TXDESC40_RTS_RATE_MASK		0x3f000000
 
 /* Word 5 */
-#define TXDESC_SHORT_PREAMBLE_8723B	BIT(4)
-#define TXDESC_SHORT_GI			BIT(6)
+#define TXDESC40_SHORT_PREAMBLE		BIT(4)
+#define TXDESC32_SHORT_GI		BIT(6)
 #define TXDESC_CCX_TAG			BIT(7)
-#define TXDESC_RETRY_LIMIT_ENABLE_8723A	BIT(17)
-#define TXDESC_RETRY_LIMIT_SHIFT_8723A	18
-#define TXDESC_RETRY_LIMIT_MASK_8723A	0x00fc0000
+#define TXDESC32_RETRY_LIMIT_ENABLE	BIT(17)
+#define TXDESC32_RETRY_LIMIT_SHIFT	18
+#define TXDESC32_RETRY_LIMIT_MASK	0x00fc0000
 
 /* Word 6 */
 #define TXDESC_MAX_AGG_SHIFT		11
 
 /* Word 8 */
-#define TXDESC_HW_SEQ_ENABLE_8723B	BIT(15)
+#define TXDESC40_HW_SEQ_ENABLE		BIT(15)
 
 /* Word 9 */
-#define TXDESC_SEQ_SHIFT_8723B		12
-#define TXDESC_SEQ_MASK_8723B		0x00fff000
+#define TXDESC40_SEQ_SHIFT		12
+#define TXDESC40_SEQ_MASK		0x00fff000
 
 struct phy_rx_agc_info {
 #ifdef __LITTLE_ENDIAN
@@ -600,6 +630,31 @@
 };
 
 /*
+ * 8723au/8192cu/8188ru required base power index offset tables.
+ */
+struct rtl8xxxu_power_base {
+	u32 reg_0e00;
+	u32 reg_0e04;
+	u32 reg_0e08;
+	u32 reg_086c;
+
+	u32 reg_0e10;
+	u32 reg_0e14;
+	u32 reg_0e18;
+	u32 reg_0e1c;
+
+	u32 reg_0830;
+	u32 reg_0834;
+	u32 reg_0838;
+	u32 reg_086c_2;
+
+	u32 reg_083c;
+	u32 reg_0848;
+	u32 reg_084c;
+	u32 reg_0868;
+};
+
+/*
  * The 8723au has 3 channel groups: 1-3, 4-9, and 10-14
  */
 struct rtl8723au_idx {
@@ -763,55 +818,49 @@
 	u8 cck_base[6];
 	u8 ht40_base[5];
 	struct rtl8723au_idx ht20_ofdm_1s_diff;
-	struct rtl8723au_idx ht40_ht20_2s_diff;
-	struct rtl8723au_idx ofdm_cck_2s_diff; /* not used */
-	struct rtl8723au_idx ht40_ht20_3s_diff;
-	struct rtl8723au_idx ofdm_cck_3s_diff; /* not used */
-	struct rtl8723au_idx ht40_ht20_4s_diff;
-	struct rtl8723au_idx ofdm_cck_4s_diff; /* not used */
+	struct rtl8723bu_pwr_idx pwr_diff[3];
+	u8 dummy5g[24]; /* max channel group (14) + power diff offset (10) */
 };
 
 struct rtl8192eu_efuse {
 	__le16 rtl_id;
 	u8 res0[0x0e];
 	struct rtl8192eu_efuse_tx_power tx_power_index_A;	/* 0x10 */
-	struct rtl8192eu_efuse_tx_power tx_power_index_B;	/* 0x22 */
-	struct rtl8192eu_efuse_tx_power tx_power_index_C;	/* 0x34 */
-	struct rtl8192eu_efuse_tx_power tx_power_index_D;	/* 0x46 */
-	u8 res1[0x60];
+	struct rtl8192eu_efuse_tx_power tx_power_index_B;	/* 0x3a */
+	u8 res2[0x54];
 	u8 channel_plan;		/* 0xb8 */
 	u8 xtal_k;
 	u8 thermal_meter;
 	u8 iqk_lck;
 	u8 pa_type;			/* 0xbc */
 	u8 lna_type_2g;			/* 0xbd */
-	u8 res2[1];
+	u8 res3[1];
 	u8 lna_type_5g;			/* 0xbf */
-	u8 res13[1];
+	u8 res4[1];
 	u8 rf_board_option;
 	u8 rf_feature_option;
 	u8 rf_bt_setting;
 	u8 eeprom_version;
 	u8 eeprom_customer_id;
-	u8 res3[3];
+	u8 res5[3];
 	u8 rf_antenna_option;		/* 0xc9 */
-	u8 res4[6];
+	u8 res6[6];
 	u8 vid;				/* 0xd0 */
-	u8 res5[1];
+	u8 res7[1];
 	u8 pid;				/* 0xd2 */
-	u8 res6[1];
+	u8 res8[1];
 	u8 usb_optional_function;
-	u8 res7[2];
-	u8 mac_addr[ETH_ALEN];		/* 0xd7 */
-	u8 res8[2];
-	u8 vendor_name[7];
 	u8 res9[2];
-	u8 device_name[0x0b];		/* 0xe8 */
+	u8 mac_addr[ETH_ALEN];		/* 0xd7 */
 	u8 res10[2];
+	u8 vendor_name[7];
+	u8 res11[2];
+	u8 device_name[0x0b];		/* 0xe8 */
+	u8 res12[2];
 	u8 serial[0x0b];		/* 0xf5 */
-	u8 res11[0x30];
+	u8 res13[0x30];
 	u8 unknown[0x0d];		/* 0x130 */
-	u8 res12[0xc3];
+	u8 res14[0xc3];
 };
 
 struct rtl8xxxu_reg8val {
@@ -1177,6 +1226,7 @@
 	struct rtl8723au_idx ofdm_tx_power_diff[RTL8723B_TX_COUNT];
 	struct rtl8723au_idx ht20_tx_power_diff[RTL8723B_TX_COUNT];
 	struct rtl8723au_idx ht40_tx_power_diff[RTL8723B_TX_COUNT];
+	struct rtl8xxxu_power_base *power_base;
 	u32 chip_cut:4;
 	u32 rom_rev:4;
 	u32 is_multi_func:1;
@@ -1204,7 +1254,6 @@
 	u8 rf_paths;
 	u8 rx_paths;
 	u8 tx_paths;
-	u32 rf_mode_ag[2];
 	u32 rege94;
 	u32 rege9c;
 	u32 regeb4;
@@ -1236,8 +1285,9 @@
 	u32 mac_backup[RTL8XXXU_MAC_REGS];
 	u32 bb_backup[RTL8XXXU_BB_REGS];
 	u32 bb_recovery_backup[RTL8XXXU_BB_REGS];
-	u32 rtlchip;
+	enum rtl8xxxu_rtl_chip rtl_chip;
 	u8 pi_enabled:1;
+	u8 no_pape:1;
 	u8 int_buf[USB_INTR_CONTENT_LENGTH];
 };
 
@@ -1260,6 +1310,8 @@
 	void (*power_off) (struct rtl8xxxu_priv *priv);
 	void (*reset_8051) (struct rtl8xxxu_priv *priv);
 	int (*llt_init) (struct rtl8xxxu_priv *priv, u8 last_tx_page);
+	void (*init_phy_bb) (struct rtl8xxxu_priv *priv);
+	int (*init_phy_rf) (struct rtl8xxxu_priv *priv);
 	void (*phy_init_antenna_selection) (struct rtl8xxxu_priv *priv);
 	void (*phy_iq_calibrate) (struct rtl8xxxu_priv *priv);
 	void (*config_channel) (struct ieee80211_hw *hw);
@@ -1269,6 +1321,7 @@
 	void (*init_statistics) (struct rtl8xxxu_priv *priv);
 	void (*enable_rf) (struct rtl8xxxu_priv *priv);
 	void (*disable_rf) (struct rtl8xxxu_priv *priv);
+	void (*usb_quirks) (struct rtl8xxxu_priv *priv);
 	void (*set_tx_power) (struct rtl8xxxu_priv *priv, int channel,
 			      bool ht40);
 	void (*update_rate_mask) (struct rtl8xxxu_priv *priv,
@@ -1279,9 +1332,18 @@
 	u16 mbox_ext_reg;
 	char mbox_ext_width;
 	char tx_desc_size;
+	char rx_desc_size;
 	char has_s0s1;
 	u32 adda_1t_init;
 	u32 adda_1t_path_on;
 	u32 adda_2t_path_on_a;
 	u32 adda_2t_path_on_b;
+	u16 trxff_boundary;
+	u8 pbp_rx;
+	u8 pbp_tx;
+	struct rtl8xxxu_reg8val *mactable;
+	u8 total_page_num;
+	u8 page_num_hi;
+	u8 page_num_lo;
+	u8 page_num_norm;
 };
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
index e545e84..b0e0c64 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com>
+ * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -109,6 +109,9 @@
 #define  AFE_XTAL_GATE_DIG		BIT(17)
 #define  AFE_XTAL_BT_GATE		BIT(20)
 
+/*
+ * 0x0028 is also known as REG_AFE_CTRL2 on 8723bu/8192eu
+ */
 #define REG_AFE_PLL_CTRL		0x0028
 #define  AFE_PLL_ENABLE			BIT(0)
 #define  AFE_PLL_320_ENABLE		BIT(1)
@@ -192,6 +195,7 @@
 						   control */
 #define  MULTI_GPS_FUNC_EN		BIT(22)	/* GPS function enable */
 
+#define REG_AFE_CTRL4			0x0078	/* 8192eu/8723bu */
 #define REG_LDO_SW_CTRL			0x007c	/* 8192eu */
 
 #define REG_MCU_FW_DL			0x0080
@@ -383,7 +387,7 @@
 #define REG_RQPN			0x0200
 #define  RQPN_HI_PQ_SHIFT		0
 #define  RQPN_LO_PQ_SHIFT		8
-#define  RQPN_NORM_PQ_SHIFT		16
+#define  RQPN_PUB_PQ_SHIFT		16
 #define  RQPN_LOAD			BIT(31)
 
 #define REG_FIFOPAGE			0x0204
@@ -417,13 +421,20 @@
 
 /*  spec version 11 */
 /* 0x0400 ~ 0x047F	Protocol Configuration */
-#define REG_VOQ_INFORMATION		0x0400
-#define REG_VIQ_INFORMATION		0x0404
-#define REG_BEQ_INFORMATION		0x0408
-#define REG_BKQ_INFORMATION		0x040c
-#define REG_MGQ_INFORMATION		0x0410
-#define REG_HGQ_INFORMATION		0x0414
-#define REG_BCNQ_INFORMATION		0x0418
+/* 8192c, 8192d */
+#define REG_VOQ_INFO			0x0400
+#define REG_VIQ_INFO			0x0404
+#define REG_BEQ_INFO			0x0408
+#define REG_BKQ_INFO			0x040c
+/* 8188e, 8723a, 8812a, 8821a, 8192e, 8723b */
+#define REG_Q0_INFO			0x400
+#define REG_Q1_INFO			0x404
+#define REG_Q2_INFO			0x408
+#define REG_Q3_INFO			0x40c
+
+#define REG_MGQ_INFO			0x0410
+#define REG_HGQ_INFO			0x0414
+#define REG_BCNQ_INFO			0x0418
 
 #define REG_CPU_MGQ_INFORMATION		0x041c
 #define REG_FWHW_TXQ_CTRL		0x0420
@@ -494,6 +505,9 @@
 #define REG_DATA_SUBCHANNEL		0x0483
 /* 8723au */
 #define REG_INIDATA_RATE_SEL		0x0484
+/* MACID_SLEEP_1/3 for 8723b, 8192e, 8812a, 8821a */
+#define REG_MACID_SLEEP_3_8732B		0x0484
+#define REG_MACID_SLEEP_1_8732B		0x0488
 
 #define REG_POWER_STATUS		0x04a4
 #define REG_POWER_STAGE1		0x04b4
@@ -502,12 +516,20 @@
 #define REG_PKT_VO_VI_LIFE_TIME		0x04c0
 #define REG_PKT_BE_BK_LIFE_TIME		0x04c2
 #define REG_STBC_SETTING		0x04c4
+#define REG_QUEUE_CTRL			0x04c6
 #define REG_HT_SINGLE_AMPDU_8723B	0x04c7
 #define REG_PROT_MODE_CTRL		0x04c8
 #define REG_MAX_AGGR_NUM		0x04ca
 #define REG_RTS_MAX_AGGR_NUM		0x04cb
 #define REG_BAR_MODE_CTRL		0x04cc
 #define REG_RA_TRY_RATE_AGG_LMT		0x04cf
+/* MACID_DROP for 8723a */
+#define REG_MACID_DROP_8732A		0x04d0
+/* EARLY_MODE_CONTROL 8188e */
+#define REG_EARLY_MODE_CONTROL_8188E	0x04d0
+/* MACID_SLEEP_2 for 8723b, 8192e, 8812a, 8821a */
+#define REG_MACID_SLEEP_2_8732B		0x04d0
+#define REG_MACID_SLEEP			0x04d4
 #define REG_NQOS_SEQ			0x04dc
 #define REG_QOS_SEQ			0x04de
 #define REG_NEED_CPU_HANDLE		0x04e0
@@ -860,6 +882,10 @@
 #define  CCK0_SIDEBAND			BIT(4)
 
 #define REG_CCK0_AFE_SETTING		0x0a04
+#define  CCK0_AFE_RX_MASK		0x0f000000
+#define  CCK0_AFE_RX_ANT_AB		BIT(24)
+#define  CCK0_AFE_RX_ANT_A		0
+#define  CCK0_AFE_RX_ANT_B		(BIT(24) | BIT(26))
 
 #define REG_CONFIG_ANT_A		0x0b68
 #define REG_CONFIG_ANT_B		0x0b6c
@@ -1026,6 +1052,7 @@
 #define  USB_HIMR_ROK			BIT(0)	/*  Receive DMA OK Interrupt */
 
 #define REG_USB_SPECIAL_OPTION		0xfe55
+#define REG_USB_HRPWM			0xfe58
 #define REG_USB_DMA_AGG_TO		0xfe5b
 #define REG_USB_AGG_TO			0xfe5c
 #define REG_USB_AGG_TH			0xfe5d
@@ -1111,6 +1138,7 @@
 #define RF6052_REG_T_METER_8723B	0x42
 #define RF6052_REG_UNKNOWN_43		0x43
 #define RF6052_REG_UNKNOWN_55		0x55
+#define RF6052_REG_UNKNOWN_56		0x56
 #define RF6052_REG_S0S1			0xb0
 #define RF6052_REG_UNKNOWN_DF		0xdf
 #define RF6052_REG_UNKNOWN_ED		0xed
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index 0517a4f..c74eb13 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -131,7 +131,7 @@
 };
 
 static const struct ieee80211_supported_band rtl_band_2ghz = {
-	.band = IEEE80211_BAND_2GHZ,
+	.band = NL80211_BAND_2GHZ,
 
 	.channels = rtl_channeltable_2g,
 	.n_channels = ARRAY_SIZE(rtl_channeltable_2g),
@@ -143,7 +143,7 @@
 };
 
 static struct ieee80211_supported_band rtl_band_5ghz = {
-	.band = IEEE80211_BAND_5GHZ,
+	.band = NL80211_BAND_5GHZ,
 
 	.channels = rtl_channeltable_5g,
 	.n_channels = ARRAY_SIZE(rtl_channeltable_5g),
@@ -197,7 +197,7 @@
 
 	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
 
-	/*hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+	/*hw->wiphy->bands[NL80211_BAND_2GHZ]
 	 *base on ant_num
 	 *rx_mask: RX mask
 	 *if rx_ant = 1 rx_mask[0]= 0xff;==>MCS0-MCS7
@@ -328,26 +328,26 @@
 	    rtlhal->bandset == BAND_ON_BOTH) {
 		/* 1: 2.4 G bands */
 		/* <1> use  mac->bands as mem for hw->wiphy->bands */
-		sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]);
+		sband = &(rtlmac->bands[NL80211_BAND_2GHZ]);
 
-		/* <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+		/* <2> set hw->wiphy->bands[NL80211_BAND_2GHZ]
 		 * to default value(1T1R) */
-		memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]), &rtl_band_2ghz,
+		memcpy(&(rtlmac->bands[NL80211_BAND_2GHZ]), &rtl_band_2ghz,
 				sizeof(struct ieee80211_supported_band));
 
 		/* <3> init ht cap base on ant_num */
 		_rtl_init_hw_ht_capab(hw, &sband->ht_cap);
 
 		/* <4> set mac->sband to wiphy->sband */
-		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+		hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
 
 		/* 2: 5 G bands */
 		/* <1> use  mac->bands as mem for hw->wiphy->bands */
-		sband = &(rtlmac->bands[IEEE80211_BAND_5GHZ]);
+		sband = &(rtlmac->bands[NL80211_BAND_5GHZ]);
 
-		/* <2> set hw->wiphy->bands[IEEE80211_BAND_5GHZ]
+		/* <2> set hw->wiphy->bands[NL80211_BAND_5GHZ]
 		 * to default value(1T1R) */
-		memcpy(&(rtlmac->bands[IEEE80211_BAND_5GHZ]), &rtl_band_5ghz,
+		memcpy(&(rtlmac->bands[NL80211_BAND_5GHZ]), &rtl_band_5ghz,
 				sizeof(struct ieee80211_supported_band));
 
 		/* <3> init ht cap base on ant_num */
@@ -355,15 +355,15 @@
 
 		_rtl_init_hw_vht_capab(hw, &sband->vht_cap);
 		/* <4> set mac->sband to wiphy->sband */
-		hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
+		hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
 	} else {
 		if (rtlhal->current_bandtype == BAND_ON_2_4G) {
 			/* <1> use  mac->bands as mem for hw->wiphy->bands */
-			sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]);
+			sband = &(rtlmac->bands[NL80211_BAND_2GHZ]);
 
-			/* <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+			/* <2> set hw->wiphy->bands[NL80211_BAND_2GHZ]
 			 * to default value(1T1R) */
-			memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]),
+			memcpy(&(rtlmac->bands[NL80211_BAND_2GHZ]),
 			       &rtl_band_2ghz,
 			       sizeof(struct ieee80211_supported_band));
 
@@ -371,14 +371,14 @@
 			_rtl_init_hw_ht_capab(hw, &sband->ht_cap);
 
 			/* <4> set mac->sband to wiphy->sband */
-			hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+			hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
 		} else if (rtlhal->current_bandtype == BAND_ON_5G) {
 			/* <1> use  mac->bands as mem for hw->wiphy->bands */
-			sband = &(rtlmac->bands[IEEE80211_BAND_5GHZ]);
+			sband = &(rtlmac->bands[NL80211_BAND_5GHZ]);
 
-			/* <2> set hw->wiphy->bands[IEEE80211_BAND_5GHZ]
+			/* <2> set hw->wiphy->bands[NL80211_BAND_5GHZ]
 			 * to default value(1T1R) */
-			memcpy(&(rtlmac->bands[IEEE80211_BAND_5GHZ]),
+			memcpy(&(rtlmac->bands[NL80211_BAND_5GHZ]),
 			       &rtl_band_5ghz,
 			       sizeof(struct ieee80211_supported_band));
 
@@ -387,7 +387,7 @@
 
 			_rtl_init_hw_vht_capab(hw, &sband->vht_cap);
 			/* <4> set mac->sband to wiphy->sband */
-			hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
+			hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
 		} else {
 			RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Err BAND %d\n",
 				 rtlhal->current_bandtype);
@@ -861,7 +861,7 @@
 
 /* mac80211's rate_idx is like this:
  *
- * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
+ * 2.4G band:rx_status->band == NL80211_BAND_2GHZ
  *
  * B/G rate:
  * (rx_status->flag & RX_FLAG_HT) = 0,
@@ -871,7 +871,7 @@
  * (rx_status->flag & RX_FLAG_HT) = 1,
  * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
  *
- * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
+ * 5G band:rx_status->band == NL80211_BAND_5GHZ
  * A rate:
  * (rx_status->flag & RX_FLAG_HT) = 0,
  * DESC_RATE6M-->DESC_RATE54M ==> idx is 0-->7,
@@ -958,7 +958,7 @@
 		return rate_idx;
 	}
 	if (false == isht) {
-		if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
+		if (NL80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
 			switch (desc_rate) {
 			case DESC_RATE1M:
 				rate_idx = 0;
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
index 4514568..a30af6c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
@@ -70,83 +70,83 @@
 	if (level_num == 2) {
 		if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
 		    (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-				  "BT Rssi pre state = LOW\n");
+			btc_alg_dbg(ALGO_BT_RSSI_STATE,
+				    "BT Rssi pre state = LOW\n");
 			if (btrssi >= (rssi_thresh +
 				       BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
 				btrssi_state = BTC_RSSI_STATE_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "BT Rssi state switch to High\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "BT Rssi state switch to High\n");
 			} else {
 				btrssi_state = BTC_RSSI_STATE_STAY_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "BT Rssi state stay at Low\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "BT Rssi state stay at Low\n");
 			}
 		} else {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-				  "BT Rssi pre state = HIGH\n");
+			btc_alg_dbg(ALGO_BT_RSSI_STATE,
+				    "BT Rssi pre state = HIGH\n");
 			if (btrssi < rssi_thresh) {
 				btrssi_state = BTC_RSSI_STATE_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "BT Rssi state switch to Low\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "BT Rssi state switch to Low\n");
 			} else {
 				btrssi_state = BTC_RSSI_STATE_STAY_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "BT Rssi state stay at High\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "BT Rssi state stay at High\n");
 			}
 		}
 	} else if (level_num == 3) {
 		if (rssi_thresh > rssi_thresh1) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-				  "BT Rssi thresh error!!\n");
+			btc_alg_dbg(ALGO_BT_RSSI_STATE,
+				    "BT Rssi thresh error!!\n");
 			return coex_sta->pre_bt_rssi_state;
 		}
 
 		if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
 		    (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-				  "BT Rssi pre state = LOW\n");
+			btc_alg_dbg(ALGO_BT_RSSI_STATE,
+				    "BT Rssi pre state = LOW\n");
 			if (btrssi >= (rssi_thresh +
 				      BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
 				btrssi_state = BTC_RSSI_STATE_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "BT Rssi state switch to Medium\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "BT Rssi state switch to Medium\n");
 			} else {
 				btrssi_state = BTC_RSSI_STATE_STAY_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "BT Rssi state stay at Low\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "BT Rssi state stay at Low\n");
 			}
 		} else if ((coex_sta->pre_bt_rssi_state ==
 			    BTC_RSSI_STATE_MEDIUM) ||
 			   (coex_sta->pre_bt_rssi_state ==
 			    BTC_RSSI_STATE_STAY_MEDIUM)) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-				  "[BTCoex], BT Rssi pre state = MEDIUM\n");
+			btc_alg_dbg(ALGO_BT_RSSI_STATE,
+				    "[BTCoex], BT Rssi pre state = MEDIUM\n");
 			if (btrssi >= (rssi_thresh1 +
 				       BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
 				btrssi_state = BTC_RSSI_STATE_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "BT Rssi state switch to High\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "BT Rssi state switch to High\n");
 			} else if (btrssi < rssi_thresh) {
 				btrssi_state = BTC_RSSI_STATE_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "BT Rssi state switch to Low\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "BT Rssi state switch to Low\n");
 			} else {
 				btrssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "BT Rssi state stay at Medium\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "BT Rssi state stay at Medium\n");
 			}
 		} else {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-				  "BT Rssi pre state = HIGH\n");
+			btc_alg_dbg(ALGO_BT_RSSI_STATE,
+				    "BT Rssi pre state = HIGH\n");
 			if (btrssi < rssi_thresh1) {
 				btrssi_state = BTC_RSSI_STATE_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "BT Rssi state switch to Medium\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "BT Rssi state switch to Medium\n");
 			} else {
 				btrssi_state = BTC_RSSI_STATE_STAY_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "BT Rssi state stay at High\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "BT Rssi state stay at High\n");
 			}
 		}
 	}
@@ -173,32 +173,28 @@
 			if (wifirssi >= (rssi_thresh +
 					 BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
 				wifirssi_state = BTC_RSSI_STATE_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "wifi RSSI state switch to High\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "wifi RSSI state switch to High\n");
 			} else {
 				wifirssi_state = BTC_RSSI_STATE_STAY_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "wifi RSSI state stay at Low\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "wifi RSSI state stay at Low\n");
 			}
 		} else {
 			if (wifirssi < rssi_thresh) {
 				wifirssi_state = BTC_RSSI_STATE_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "wifi RSSI state switch to Low\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "wifi RSSI state switch to Low\n");
 			} else {
 				wifirssi_state = BTC_RSSI_STATE_STAY_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "wifi RSSI state stay at High\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "wifi RSSI state stay at High\n");
 			}
 		}
 	} else if (level_num == 3) {
 		if (rssi_thresh > rssi_thresh1) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
-				  "wifi RSSI thresh error!!\n");
+			btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+				    "wifi RSSI thresh error!!\n");
 			return coex_sta->pre_wifi_rssi_state[index];
 		}
 
@@ -209,14 +205,12 @@
 			if (wifirssi >= (rssi_thresh +
 					 BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
 				wifirssi_state = BTC_RSSI_STATE_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "wifi RSSI state switch to Medium\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "wifi RSSI state switch to Medium\n");
 			} else {
 				wifirssi_state = BTC_RSSI_STATE_STAY_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "wifi RSSI state stay at Low\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "wifi RSSI state stay at Low\n");
 			}
 		} else if ((coex_sta->pre_wifi_rssi_state[index] ==
 			    BTC_RSSI_STATE_MEDIUM) ||
@@ -225,31 +219,26 @@
 			if (wifirssi >= (rssi_thresh1 +
 					 BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
 				wifirssi_state = BTC_RSSI_STATE_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "wifi RSSI state switch to High\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "wifi RSSI state switch to High\n");
 			} else if (wifirssi < rssi_thresh) {
 				wifirssi_state = BTC_RSSI_STATE_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "wifi RSSI state switch to Low\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "wifi RSSI state switch to Low\n");
 			} else {
 				wifirssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "wifi RSSI state stay at Medium\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "wifi RSSI state stay at Medium\n");
 			}
 		} else {
 			if (wifirssi < rssi_thresh1) {
 				wifirssi_state = BTC_RSSI_STATE_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "wifi RSSI state switch to Medium\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "wifi RSSI state switch to Medium\n");
 			} else {
 				wifirssi_state = BTC_RSSI_STATE_STAY_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "wifi RSSI state stay at High\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "wifi RSSI state stay at High\n");
 			}
 		}
 	}
@@ -284,26 +273,26 @@
 		bt_disabled = false;
 		btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
 				   &bt_disabled);
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-			  "[BTCoex], BT is enabled !!\n");
+		btc_alg_dbg(ALGO_BT_MONITOR,
+			    "[BTCoex], BT is enabled !!\n");
 	} else {
 		bt_disable_cnt++;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-			  "[BTCoex], bt all counters = 0, %d times!!\n",
-			  bt_disable_cnt);
+		btc_alg_dbg(ALGO_BT_MONITOR,
+			    "[BTCoex], bt all counters = 0, %d times!!\n",
+			    bt_disable_cnt);
 		if (bt_disable_cnt >= 2) {
 			bt_disabled = true;
 			btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
 					   &bt_disabled);
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-				  "[BTCoex], BT is disabled !!\n");
+			btc_alg_dbg(ALGO_BT_MONITOR,
+				    "[BTCoex], BT is disabled !!\n");
 		}
 	}
 	if (pre_bt_disabled != bt_disabled) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-			  "[BTCoex], BT is from %s to %s!!\n",
-			  (pre_bt_disabled ? "disabled" : "enabled"),
-			  (bt_disabled ? "disabled" : "enabled"));
+		btc_alg_dbg(ALGO_BT_MONITOR,
+			    "[BTCoex], BT is from %s to %s!!\n",
+			    (pre_bt_disabled ? "disabled" : "enabled"),
+			    (bt_disabled ? "disabled" : "enabled"));
 		pre_bt_disabled = bt_disabled;
 	}
 }
@@ -499,12 +488,12 @@
 	coex_sta->low_priority_tx = reg_lp_tx;
 	coex_sta->low_priority_rx = reg_lp_rx;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-		  "[BTCoex] High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
-		  reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-		  "[BTCoex] Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
-		  reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+	btc_alg_dbg(ALGO_BT_MONITOR,
+		    "[BTCoex] High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+		    reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+	btc_alg_dbg(ALGO_BT_MONITOR,
+		    "[BTCoex] Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+		    reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
 
 	/* reset counter */
 	btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -518,9 +507,9 @@
 
 	h2c_parameter[0] |= BIT0;	/* trigger */
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
-		  h2c_parameter[0]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+		    h2c_parameter[0]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
 }
@@ -592,8 +581,8 @@
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
 
 	if (!bt_link_info->bt_link_exist) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "No BT link exists!!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "No BT link exists!!!\n");
 		return algorithm;
 	}
 
@@ -608,27 +597,27 @@
 
 	if (numdiffprofile == 1) {
 		if (bt_link_info->sco_exist) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "SCO only\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "SCO only\n");
 			algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
 		} else {
 			if (bt_link_info->hid_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "HID only\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "HID only\n");
 				algorithm = BT_8192E_2ANT_COEX_ALGO_HID;
 			} else if (bt_link_info->a2dp_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "A2DP only\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "A2DP only\n");
 				algorithm = BT_8192E_2ANT_COEX_ALGO_A2DP;
 			} else if (bt_link_info->pan_exist) {
 				if (bt_hson) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "PAN(HS) only\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "PAN(HS) only\n");
 					algorithm =
 						BT_8192E_2ANT_COEX_ALGO_PANHS;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "PAN(EDR) only\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "PAN(EDR) only\n");
 					algorithm =
 						BT_8192E_2ANT_COEX_ALGO_PANEDR;
 				}
@@ -637,21 +626,21 @@
 	} else if (numdiffprofile == 2) {
 		if (bt_link_info->sco_exist) {
 			if (bt_link_info->hid_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "SCO + HID\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "SCO + HID\n");
 				algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
 			} else if (bt_link_info->a2dp_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "SCO + A2DP ==> SCO\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "SCO + A2DP ==> SCO\n");
 				algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
 			} else if (bt_link_info->pan_exist) {
 				if (bt_hson) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "SCO + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "SCO + PAN(HS)\n");
 					algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "SCO + PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "SCO + PAN(EDR)\n");
 					algorithm =
 						BT_8192E_2ANT_COEX_ALGO_SCO_PAN;
 				}
@@ -660,38 +649,38 @@
 			if (bt_link_info->hid_exist &&
 			    bt_link_info->a2dp_exist) {
 				if (stack_info->num_of_hid >= 2) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "HID*2 + A2DP\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "HID*2 + A2DP\n");
 					algorithm =
 					BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "HID + A2DP\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "HID + A2DP\n");
 					algorithm =
 					    BT_8192E_2ANT_COEX_ALGO_HID_A2DP;
 				}
 			} else if (bt_link_info->hid_exist &&
 				   bt_link_info->pan_exist) {
 				if (bt_hson) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "HID + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "HID + PAN(HS)\n");
 					algorithm = BT_8192E_2ANT_COEX_ALGO_HID;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "HID + PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "HID + PAN(EDR)\n");
 					algorithm =
 					    BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
 				}
 			} else if (bt_link_info->pan_exist &&
 				   bt_link_info->a2dp_exist) {
 				if (bt_hson) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "A2DP + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "A2DP + PAN(HS)\n");
 					algorithm =
 					    BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "A2DP + PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "A2DP + PAN(EDR)\n");
 					algorithm =
 					    BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP;
 				}
@@ -701,30 +690,30 @@
 		if (bt_link_info->sco_exist) {
 			if (bt_link_info->hid_exist &&
 			    bt_link_info->a2dp_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "SCO + HID + A2DP ==> HID\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "SCO + HID + A2DP ==> HID\n");
 				algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
 			} else if (bt_link_info->hid_exist &&
 				   bt_link_info->pan_exist) {
 				if (bt_hson) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "SCO + HID + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "SCO + HID + PAN(HS)\n");
 					algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "SCO + HID + PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "SCO + HID + PAN(EDR)\n");
 					algorithm =
 						BT_8192E_2ANT_COEX_ALGO_SCO_PAN;
 				}
 			} else if (bt_link_info->pan_exist &&
 				   bt_link_info->a2dp_exist) {
 				if (bt_hson) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "SCO + A2DP + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "SCO + A2DP + PAN(HS)\n");
 					algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "SCO + A2DP + PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "SCO + A2DP + PAN(EDR)\n");
 					algorithm =
 					    BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
 				}
@@ -734,13 +723,13 @@
 			    bt_link_info->pan_exist &&
 			    bt_link_info->a2dp_exist) {
 				if (bt_hson) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "HID + A2DP + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "HID + A2DP + PAN(HS)\n");
 					algorithm =
 					    BT_8192E_2ANT_COEX_ALGO_HID_A2DP;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "HID + A2DP + PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "HID + A2DP + PAN(EDR)\n");
 					algorithm =
 					BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
 				}
@@ -752,12 +741,12 @@
 			    bt_link_info->pan_exist &&
 			    bt_link_info->a2dp_exist) {
 				if (bt_hson) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "ErrorSCO+HID+A2DP+PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "ErrorSCO+HID+A2DP+PAN(HS)\n");
 
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "SCO+HID+A2DP+PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "SCO+HID+A2DP+PAN(EDR)\n");
 					algorithm =
 					    BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
 				}
@@ -778,10 +767,10 @@
 	 */
 	h2c_parameter[0] = dac_swinglvl;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swinglvl);
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swinglvl);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
 }
@@ -793,9 +782,9 @@
 
 	h2c_parameter[0] = dec_btpwr_lvl;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex] decrease Bt Power level = %d, FW write 0x62 = 0x%x\n",
-		  dec_btpwr_lvl, h2c_parameter[0]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex] decrease Bt Power level = %d, FW write 0x62 = 0x%x\n",
+		    dec_btpwr_lvl, h2c_parameter[0]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
 }
@@ -803,15 +792,15 @@
 static void halbtc8192e2ant_dec_btpwr(struct btc_coexist *btcoexist,
 				      bool force_exec, u8 dec_btpwr_lvl)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-		  "[BTCoex], %s Dec BT power level = %d\n",
-		  (force_exec ? "force to" : ""), dec_btpwr_lvl);
+	btc_alg_dbg(ALGO_TRACE_FW,
+		    "[BTCoex], %s Dec BT power level = %d\n",
+		    (force_exec ? "force to" : ""), dec_btpwr_lvl);
 	coex_dm->cur_dec_bt_pwr = dec_btpwr_lvl;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n",
-			  coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n",
+			    coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
 	}
 	halbtc8192e2ant_set_fwdec_btpwr(btcoexist, coex_dm->cur_dec_bt_pwr);
 
@@ -828,10 +817,10 @@
 	if (enable_autoreport)
 		h2c_parameter[0] |= BIT0;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
-		  (enable_autoreport ? "Enabled!!" : "Disabled!!"),
-		  h2c_parameter[0]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
+		    (enable_autoreport ? "Enabled!!" : "Disabled!!"),
+		    h2c_parameter[0]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
 }
@@ -840,17 +829,17 @@
 					  bool force_exec,
 					  bool enable_autoreport)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-		  "[BTCoex], %s BT Auto report = %s\n",
-		  (force_exec ? "force to" : ""),
-		  ((enable_autoreport) ? "Enabled" : "Disabled"));
+	btc_alg_dbg(ALGO_TRACE_FW,
+		    "[BTCoex], %s BT Auto report = %s\n",
+		    (force_exec ? "force to" : ""),
+		    ((enable_autoreport) ? "Enabled" : "Disabled"));
 	coex_dm->cur_bt_auto_report = enable_autoreport;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n",
-			  coex_dm->pre_bt_auto_report,
-			  coex_dm->cur_bt_auto_report);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n",
+			    coex_dm->pre_bt_auto_report,
+			    coex_dm->cur_bt_auto_report);
 
 		if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
 			return;
@@ -864,16 +853,16 @@
 static void halbtc8192e2ant_fw_dac_swinglvl(struct btc_coexist *btcoexist,
 					    bool force_exec, u8 fw_dac_swinglvl)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-		  "[BTCoex], %s set FW Dac Swing level = %d\n",
-		  (force_exec ? "force to" : ""), fw_dac_swinglvl);
+	btc_alg_dbg(ALGO_TRACE_FW,
+		    "[BTCoex], %s set FW Dac Swing level = %d\n",
+		    (force_exec ? "force to" : ""), fw_dac_swinglvl);
 	coex_dm->cur_fw_dac_swing_lvl = fw_dac_swinglvl;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
-			  coex_dm->pre_fw_dac_swing_lvl,
-			  coex_dm->cur_fw_dac_swing_lvl);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
+			    coex_dm->pre_fw_dac_swing_lvl,
+			    coex_dm->cur_fw_dac_swing_lvl);
 
 		if (coex_dm->pre_fw_dac_swing_lvl ==
 		    coex_dm->cur_fw_dac_swing_lvl)
@@ -891,8 +880,8 @@
 {
 	if (rx_rf_shrink_on) {
 		/* Shrink RF Rx LPF corner */
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-			  "[BTCoex], Shrink RF Rx LPF corner!!\n");
+		btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+			    "[BTCoex], Shrink RF Rx LPF corner!!\n");
 		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
 					  0xfffff, 0xffffc);
 	} else {
@@ -900,8 +889,8 @@
 		 * After initialized, we can use coex_dm->btRf0x1eBackup
 		 */
 		if (btcoexist->initilized) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-				  "[BTCoex], Resume RF Rx LPF corner!!\n");
+			btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+				    "[BTCoex], Resume RF Rx LPF corner!!\n");
 			btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
 						  0xfffff,
 						  coex_dm->bt_rf0x1e_backup);
@@ -912,17 +901,17 @@
 static void halbtc8192e2ant_rf_shrink(struct btc_coexist *btcoexist,
 				      bool force_exec, bool rx_rf_shrink_on)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-		  "[BTCoex], %s turn Rx RF Shrink = %s\n",
-		  (force_exec ? "force to" : ""),
-		  ((rx_rf_shrink_on) ? "ON" : "OFF"));
+	btc_alg_dbg(ALGO_TRACE_SW,
+		    "[BTCoex], %s turn Rx RF Shrink = %s\n",
+		    (force_exec ? "force to" : ""),
+		    ((rx_rf_shrink_on) ? "ON" : "OFF"));
 	coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-			  "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n",
-			  coex_dm->pre_rf_rx_lpf_shrink,
-			  coex_dm->cur_rf_rx_lpf_shrink);
+		btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+			    "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n",
+			    coex_dm->pre_rf_rx_lpf_shrink,
+			    coex_dm->cur_rf_rx_lpf_shrink);
 
 		if (coex_dm->pre_rf_rx_lpf_shrink ==
 		    coex_dm->cur_rf_rx_lpf_shrink)
@@ -939,8 +928,8 @@
 {
 	u8 val = (u8)level;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-		  "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+	btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+		    "[BTCoex], Write SwDacSwing = 0x%x\n", level);
 	btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
 }
 
@@ -958,22 +947,22 @@
 				     bool force_exec, bool dac_swingon,
 				     u32 dac_swinglvl)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-		  "[BTCoex], %s turn DacSwing=%s, dac_swinglvl = 0x%x\n",
-		  (force_exec ? "force to" : ""),
-		  ((dac_swingon) ? "ON" : "OFF"), dac_swinglvl);
+	btc_alg_dbg(ALGO_TRACE_SW,
+		    "[BTCoex], %s turn DacSwing=%s, dac_swinglvl = 0x%x\n",
+		    (force_exec ? "force to" : ""),
+		    ((dac_swingon) ? "ON" : "OFF"), dac_swinglvl);
 	coex_dm->cur_dac_swing_on = dac_swingon;
 	coex_dm->cur_dac_swing_lvl = dac_swinglvl;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-			  "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl = 0x%x, ",
-			  coex_dm->pre_dac_swing_on,
-			  coex_dm->pre_dac_swing_lvl);
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-			  "bCurDacSwingOn=%d, curDacSwingLvl = 0x%x\n",
-			  coex_dm->cur_dac_swing_on,
-			  coex_dm->cur_dac_swing_lvl);
+		btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+			    "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl = 0x%x, ",
+			    coex_dm->pre_dac_swing_on,
+			    coex_dm->pre_dac_swing_lvl);
+		btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+			    "bCurDacSwingOn=%d, curDacSwingLvl = 0x%x\n",
+			    coex_dm->cur_dac_swing_on,
+			    coex_dm->cur_dac_swing_lvl);
 
 		if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
 		    (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
@@ -991,8 +980,8 @@
 {
 	/* BB AGC Gain Table */
 	if (agc_table_en) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-			  "[BTCoex], BB Agc Table On!\n");
+		btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+			    "[BTCoex], BB Agc Table On!\n");
 		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x0a1A0001);
 		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x091B0001);
 		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x081C0001);
@@ -1000,8 +989,8 @@
 		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x061E0001);
 		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x051F0001);
 	} else {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-			  "[BTCoex], BB Agc Table Off!\n");
+		btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+			    "[BTCoex], BB Agc Table Off!\n");
 		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
 		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
 		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
@@ -1014,16 +1003,17 @@
 static void halbtc8192e2ant_AgcTable(struct btc_coexist *btcoexist,
 				     bool force_exec, bool agc_table_en)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-		  "[BTCoex], %s %s Agc Table\n",
-		  (force_exec ? "force to" : ""),
-		  ((agc_table_en) ? "Enable" : "Disable"));
+	btc_alg_dbg(ALGO_TRACE_SW,
+		    "[BTCoex], %s %s Agc Table\n",
+		    (force_exec ? "force to" : ""),
+		    ((agc_table_en) ? "Enable" : "Disable"));
 	coex_dm->cur_agc_table_en = agc_table_en;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-			  "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
-			  coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en);
+		btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+			    "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+			    coex_dm->pre_agc_table_en,
+			    coex_dm->cur_agc_table_en);
 
 		if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
 			return;
@@ -1037,20 +1027,20 @@
 					   u32 val0x6c0, u32 val0x6c4,
 					   u32 val0x6c8, u8 val0x6cc)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-		  "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+	btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+		    "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
 	btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-		  "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+	btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+		    "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
 	btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-		  "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+	btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+		    "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
 	btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-		  "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+	btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+		    "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
 	btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
 }
 
@@ -1059,30 +1049,30 @@
 				       u32 val0x6c0, u32 val0x6c4,
 				       u32 val0x6c8, u8 val0x6cc)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-		  "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, ",
-		  (force_exec ? "force to" : ""), val0x6c0);
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-		  "0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
-		  val0x6c4, val0x6c8, val0x6cc);
+	btc_alg_dbg(ALGO_TRACE_SW,
+		    "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, ",
+		    (force_exec ? "force to" : ""), val0x6c0);
+	btc_alg_dbg(ALGO_TRACE_SW,
+		    "0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
+		    val0x6c4, val0x6c8, val0x6cc);
 	coex_dm->cur_val0x6c0 = val0x6c0;
 	coex_dm->cur_val0x6c4 = val0x6c4;
 	coex_dm->cur_val0x6c8 = val0x6c8;
 	coex_dm->cur_val0x6cc = val0x6cc;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-			  "[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c4 = 0x%x, ",
-			  coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4);
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-			  "preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n",
-			  coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-			  "[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c4 = 0x%x,\n",
-			  coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4);
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-			  "curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n",
-			  coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
+		btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+			    "[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c4 = 0x%x, ",
+			    coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4);
+		btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+			    "preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n",
+			    coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
+		btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+			    "[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c4 = 0x%x\n",
+			    coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4);
+		btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+			    "curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n",
+			    coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
 
 		if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
 		    (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -1136,9 +1126,9 @@
 	if (enable)
 		h2c_parameter[0] |= BIT0; /* function enable */
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
-		  h2c_parameter[0]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+		    h2c_parameter[0]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
 }
@@ -1146,18 +1136,18 @@
 static void halbtc8192e2ant_IgnoreWlanAct(struct btc_coexist *btcoexist,
 					  bool force_exec, bool enable)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-		  "[BTCoex], %s turn Ignore WlanAct %s\n",
-		  (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+	btc_alg_dbg(ALGO_TRACE_FW,
+		    "[BTCoex], %s turn Ignore WlanAct %s\n",
+		    (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
 	coex_dm->cur_ignore_wlan_act = enable;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], bPreIgnoreWlanAct = %d ",
-			  coex_dm->pre_ignore_wlan_act);
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "bCurIgnoreWlanAct = %d!!\n",
-			  coex_dm->cur_ignore_wlan_act);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], bPreIgnoreWlanAct = %d ",
+			    coex_dm->pre_ignore_wlan_act);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "bCurIgnoreWlanAct = %d!!\n",
+			    coex_dm->cur_ignore_wlan_act);
 
 		if (coex_dm->pre_ignore_wlan_act ==
 		    coex_dm->cur_ignore_wlan_act)
@@ -1185,11 +1175,11 @@
 	coex_dm->ps_tdma_para[3] = byte4;
 	coex_dm->ps_tdma_para[4] = byte5;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
-		  h2c_parameter[0],
-		  h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
-		  h2c_parameter[3] << 8 | h2c_parameter[4]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
+		    h2c_parameter[0],
+		    h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
+		    h2c_parameter[3] << 8 | h2c_parameter[4]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
 }
@@ -1213,20 +1203,20 @@
 static void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist,
 				    bool force_exec, bool turn_on, u8 type)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-		  "[BTCoex], %s turn %s PS TDMA, type=%d\n",
-		  (force_exec ? "force to" : ""),
-		  (turn_on ? "ON" : "OFF"), type);
+	btc_alg_dbg(ALGO_TRACE_FW,
+		    "[BTCoex], %s turn %s PS TDMA, type=%d\n",
+		    (force_exec ? "force to" : ""),
+		    (turn_on ? "ON" : "OFF"), type);
 	coex_dm->cur_ps_tdma_on = turn_on;
 	coex_dm->cur_ps_tdma = type;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
-			  coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
-			  coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+			    coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+			    coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
 
 		if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
 		    (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1353,8 +1343,8 @@
 	u8 mimops = BTC_MIMO_PS_DYNAMIC;
 	u32 disra_mask = 0x0;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-		  "[BTCoex], REAL set SS Type = %d\n", sstype);
+	btc_alg_dbg(ALGO_TRACE,
+		    "[BTCoex], REAL set SS Type = %d\n", sstype);
 
 	disra_mask = halbtc8192e2ant_decidera_mask(btcoexist, sstype,
 						   coex_dm->curra_masktype);
@@ -1386,9 +1376,9 @@
 static void halbtc8192e2ant_switch_sstype(struct btc_coexist *btcoexist,
 					  bool force_exec, u8 new_sstype)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-		  "[BTCoex], %s Switch SS Type = %d\n",
-		  (force_exec ? "force to" : ""), new_sstype);
+	btc_alg_dbg(ALGO_TRACE,
+		    "[BTCoex], %s Switch SS Type = %d\n",
+		    (force_exec ? "force to" : ""), new_sstype);
 	coex_dm->cur_sstype = new_sstype;
 
 	if (!force_exec) {
@@ -1469,8 +1459,8 @@
 		btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
 				   &low_pwr_disable);
 
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], Wifi non-connected idle!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], Wifi non-connected idle!!\n");
 
 		if ((BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
 		     coex_dm->bt_status) ||
@@ -1506,8 +1496,8 @@
 					   BTC_SET_ACT_DISABLE_LOW_POWER,
 					   &low_pwr_disable);
 
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "Wifi connected + BT non connected-idle!!\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "Wifi connected + BT non connected-idle!!\n");
 
 			halbtc8192e2ant_switch_sstype(btcoexist,
 						      NORMAL_EXEC, 2);
@@ -1534,8 +1524,8 @@
 
 			if (bt_hson)
 				return false;
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "Wifi connected + BT connected-idle!!\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "Wifi connected + BT connected-idle!!\n");
 
 			halbtc8192e2ant_switch_sstype(btcoexist,
 						      NORMAL_EXEC, 2);
@@ -1560,12 +1550,12 @@
 					   &low_pwr_disable);
 
 			if (wifi_busy) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "Wifi Connected-Busy + BT Busy!!\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "Wifi Connected-Busy + BT Busy!!\n");
 				common = false;
 			} else {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "Wifi Connected-Idle + BT Busy!!\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "Wifi Connected-Idle + BT Busy!!\n");
 
 				halbtc8192e2ant_switch_sstype(btcoexist,
 							      NORMAL_EXEC, 1);
@@ -1592,9 +1582,8 @@
 			  int result)
 {
 	if (tx_pause) {
-		BTC_PRINT(BTC_MSG_ALGORITHM,
-			  ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], TxPause = 1\n");
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], TxPause = 1\n");
 
 		if (coex_dm->cur_ps_tdma == 71) {
 			halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
@@ -1689,9 +1678,8 @@
 			}
 		}
 	} else {
-		BTC_PRINT(BTC_MSG_ALGORITHM,
-			  ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], TxPause = 0\n");
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], TxPause = 0\n");
 		if (coex_dm->cur_ps_tdma == 5) {
 			halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
 						true, 71);
@@ -1795,9 +1783,8 @@
 			  int result)
 {
 	if (tx_pause) {
-		BTC_PRINT(BTC_MSG_ALGORITHM,
-			  ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], TxPause = 1\n");
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], TxPause = 1\n");
 		if (coex_dm->cur_ps_tdma == 1) {
 			halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
 						true, 6);
@@ -1886,9 +1873,8 @@
 			}
 		}
 	} else {
-		BTC_PRINT(BTC_MSG_ALGORITHM,
-			  ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], TxPause = 0\n");
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], TxPause = 0\n");
 		if (coex_dm->cur_ps_tdma == 5) {
 			halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
 						true, 2);
@@ -1983,9 +1969,8 @@
 			  int result)
 {
 	if (tx_pause) {
-		BTC_PRINT(BTC_MSG_ALGORITHM,
-			  ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], TxPause = 1\n");
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], TxPause = 1\n");
 		if (coex_dm->cur_ps_tdma == 1) {
 			halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
 						true, 7);
@@ -2074,9 +2059,8 @@
 			}
 		}
 	} else {
-		BTC_PRINT(BTC_MSG_ALGORITHM,
-			  ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], TxPause = 0\n");
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], TxPause = 0\n");
 		if (coex_dm->cur_ps_tdma == 5) {
 			halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
 						true, 3);
@@ -2178,13 +2162,13 @@
 	int result;
 	u8 retry_cnt = 0;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-		  "[BTCoex], TdmaDurationAdjust()\n");
+	btc_alg_dbg(ALGO_TRACE_FW,
+		    "[BTCoex], TdmaDurationAdjust()\n");
 
 	if (!coex_dm->auto_tdma_adjust) {
 		coex_dm->auto_tdma_adjust = true;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], first run TdmaDurationAdjust()!!\n");
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], first run TdmaDurationAdjust()!!\n");
 		if (sco_hid) {
 			if (tx_pause) {
 				if (max_interval == 1) {
@@ -2288,11 +2272,11 @@
 	} else {
 		/* accquire the BT TRx retry count from BT_Info byte2 */
 		retry_cnt = coex_sta->bt_retry_cnt;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], retry_cnt = %d\n", retry_cnt);
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n",
-			  up, dn, m, n, wait_cnt);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], retry_cnt = %d\n", retry_cnt);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n",
+			    up, dn, m, n, wait_cnt);
 		result = 0;
 		wait_cnt++;
 		/* no retry in the last 2-second duration */
@@ -2309,9 +2293,8 @@
 				up = 0;
 				dn = 0;
 				result = 1;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_TRACE_FW_DETAIL,
-					  "[BTCoex]Increase wifi duration!!\n");
+				btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+					    "[BTCoex]Increase wifi duration!!\n");
 			}
 		} else if (retry_cnt <= 3) {
 			up--;
@@ -2334,9 +2317,8 @@
 				dn = 0;
 				wait_cnt = 0;
 				result = -1;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_TRACE_FW_DETAIL,
-					  "Reduce wifi duration for retry<3\n");
+				btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+					    "Reduce wifi duration for retry<3\n");
 			}
 		} else {
 			if (wait_cnt == 1)
@@ -2352,12 +2334,12 @@
 			dn = 0;
 			wait_cnt = 0;
 			result = -1;
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-				  "Decrease wifi duration for retryCounter>3!!\n");
+			btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+				    "Decrease wifi duration for retryCounter>3!!\n");
 		}
 
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], max Interval = %d\n", max_interval);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], max Interval = %d\n", max_interval);
 		if (max_interval == 1)
 			btc8192e_int1(btcoexist, tx_pause, result);
 		else if (max_interval == 2)
@@ -2373,11 +2355,11 @@
 	if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
 		bool scan = false, link = false, roam = false;
 
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], PsTdma type dismatch!!!, ");
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "curPsTdma=%d, recordPsTdma=%d\n",
-			  coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], PsTdma type dismatch!!!, ");
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "curPsTdma=%d, recordPsTdma=%d\n",
+			    coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
 
 		btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
 		btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2388,9 +2370,8 @@
 						true,
 						coex_dm->tdma_adj_type);
 		else
-			BTC_PRINT(BTC_MSG_ALGORITHM,
-				  ALGO_TRACE_FW_DETAIL,
-				  "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
+			btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+				    "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
 	}
 }
 
@@ -2594,8 +2575,8 @@
 	     btrssi_state == BTC_RSSI_STATE_STAY_LOW) &&
 	    (wifirssi_state == BTC_RSSI_STATE_LOW ||
 	     wifirssi_state == BTC_RSSI_STATE_STAY_LOW)) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n");
 		long_dist = true;
 	}
 	if (long_dist) {
@@ -3100,105 +3081,105 @@
 {
 	u8 algorithm = 0;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-		  "[BTCoex], RunCoexistMechanism()===>\n");
+	btc_alg_dbg(ALGO_TRACE,
+		    "[BTCoex], RunCoexistMechanism()===>\n");
 
 	if (btcoexist->manual_control) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], return for Manual CTRL <===\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], return for Manual CTRL <===\n");
 		return;
 	}
 
 	if (coex_sta->under_ips) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], wifi is under IPS !!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], wifi is under IPS !!!\n");
 		return;
 	}
 
 	algorithm = halbtc8192e2ant_action_algorithm(btcoexist);
 	if (coex_sta->c2h_bt_inquiry_page &&
 	    (BT_8192E_2ANT_COEX_ALGO_PANHS != algorithm)) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], BT is under inquiry/page scan !!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], BT is under inquiry/page scan !!\n");
 		halbtc8192e2ant_action_bt_inquiry(btcoexist);
 		return;
 	}
 
 	coex_dm->cur_algorithm = algorithm;
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-		  "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
+	btc_alg_dbg(ALGO_TRACE,
+		    "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
 
 	if (halbtc8192e2ant_is_common_action(btcoexist)) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], Action 2-Ant common.\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], Action 2-Ant common\n");
 		coex_dm->auto_tdma_adjust = false;
 	} else {
 		if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n",
-				  coex_dm->pre_algorithm,
-				  coex_dm->cur_algorithm);
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n",
+				    coex_dm->pre_algorithm,
+				    coex_dm->cur_algorithm);
 			coex_dm->auto_tdma_adjust = false;
 		}
 		switch (coex_dm->cur_algorithm) {
 		case BT_8192E_2ANT_COEX_ALGO_SCO:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "Action 2-Ant, algorithm = SCO.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "Action 2-Ant, algorithm = SCO\n");
 			halbtc8192e2ant_action_sco(btcoexist);
 			break;
 		case BT_8192E_2ANT_COEX_ALGO_SCO_PAN:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "Action 2-Ant, algorithm = SCO+PAN(EDR).\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "Action 2-Ant, algorithm = SCO+PAN(EDR)\n");
 			halbtc8192e2ant_action_sco_pan(btcoexist);
 			break;
 		case BT_8192E_2ANT_COEX_ALGO_HID:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "Action 2-Ant, algorithm = HID.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "Action 2-Ant, algorithm = HID\n");
 			halbtc8192e2ant_action_hid(btcoexist);
 			break;
 		case BT_8192E_2ANT_COEX_ALGO_A2DP:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "Action 2-Ant, algorithm = A2DP.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "Action 2-Ant, algorithm = A2DP\n");
 			halbtc8192e2ant_action_a2dp(btcoexist);
 			break;
 		case BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "Action 2-Ant, algorithm = A2DP+PAN(HS).\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
 			halbtc8192e2ant_action_a2dp_pan_hs(btcoexist);
 			break;
 		case BT_8192E_2ANT_COEX_ALGO_PANEDR:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "Action 2-Ant, algorithm = PAN(EDR).\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "Action 2-Ant, algorithm = PAN(EDR)\n");
 			halbtc8192e2ant_action_pan_edr(btcoexist);
 			break;
 		case BT_8192E_2ANT_COEX_ALGO_PANHS:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "Action 2-Ant, algorithm = HS mode.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "Action 2-Ant, algorithm = HS mode\n");
 			halbtc8192e2ant_action_pan_hs(btcoexist);
 			break;
 		case BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "Action 2-Ant, algorithm = PAN+A2DP.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "Action 2-Ant, algorithm = PAN+A2DP\n");
 			halbtc8192e2ant_action_pan_edr_a2dp(btcoexist);
 			break;
 		case BT_8192E_2ANT_COEX_ALGO_PANEDR_HID:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "Action 2-Ant, algorithm = PAN(EDR)+HID.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "Action 2-Ant, algorithm = PAN(EDR)+HID\n");
 			halbtc8192e2ant_action_pan_edr_hid(btcoexist);
 			break;
 		case BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "Action 2-Ant, algorithm = HID+A2DP+PAN.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "Action 2-Ant, algorithm = HID+A2DP+PAN\n");
 			btc8192e2ant_action_hid_a2dp_pan_edr(btcoexist);
 			break;
 		case BT_8192E_2ANT_COEX_ALGO_HID_A2DP:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "Action 2-Ant, algorithm = HID+A2DP.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "Action 2-Ant, algorithm = HID+A2DP\n");
 			halbtc8192e2ant_action_hid_a2dp(btcoexist);
 			break;
 		default:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "Action 2-Ant, algorithm = unknown!!\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "Action 2-Ant, algorithm = unknown!!\n");
 			/* halbtc8192e2ant_coex_alloff(btcoexist); */
 			break;
 		}
@@ -3212,8 +3193,8 @@
 	u16 u16tmp = 0;
 	u8 u8tmp = 0;
 
-	BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-		  "[BTCoex], 2Ant Init HW Config!!\n");
+	btc_iface_dbg(INTF_INIT,
+		      "[BTCoex], 2Ant Init HW Config!!\n");
 
 	if (backup) {
 		/* backup rf 0x1e value */
@@ -3296,8 +3277,8 @@
 
 void ex_halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
 {
-	BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-		  "[BTCoex], Coex Mechanism Init!!\n");
+	btc_iface_dbg(INTF_INIT,
+		      "[BTCoex], Coex Mechanism Init!!\n");
 	halbtc8192e2ant_init_coex_dm(btcoexist);
 }
 
@@ -3525,13 +3506,13 @@
 void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
 {
 	if (BTC_IPS_ENTER == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], IPS ENTER notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], IPS ENTER notify\n");
 		coex_sta->under_ips = true;
 		halbtc8192e2ant_coex_alloff(btcoexist);
 	} else if (BTC_IPS_LEAVE == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], IPS LEAVE notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], IPS LEAVE notify\n");
 		coex_sta->under_ips = false;
 	}
 }
@@ -3539,12 +3520,12 @@
 void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
 {
 	if (BTC_LPS_ENABLE == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], LPS ENABLE notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], LPS ENABLE notify\n");
 		coex_sta->under_lps = true;
 	} else if (BTC_LPS_DISABLE == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], LPS DISABLE notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], LPS DISABLE notify\n");
 		coex_sta->under_lps = false;
 	}
 }
@@ -3552,21 +3533,21 @@
 void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
 {
 	if (BTC_SCAN_START == type)
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], SCAN START notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], SCAN START notify\n");
 	else if (BTC_SCAN_FINISH == type)
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], SCAN FINISH notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], SCAN FINISH notify\n");
 }
 
 void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
 {
 	if (BTC_ASSOCIATE_START == type)
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], CONNECT START notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], CONNECT START notify\n");
 	else if (BTC_ASSOCIATE_FINISH == type)
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], CONNECT FINISH notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], CONNECT FINISH notify\n");
 }
 
 void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
@@ -3582,11 +3563,11 @@
 		return;
 
 	if (BTC_MEDIA_CONNECT == type)
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], MEDIA connect notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], MEDIA connect notify\n");
 	else
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], MEDIA disconnect notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], MEDIA disconnect notify\n");
 
 	/* only 2.4G we need to inform bt the chnl mask */
 	btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
@@ -3606,10 +3587,10 @@
 	coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
 	coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], FW write 0x66 = 0x%x\n",
-		  h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
-		  h2c_parameter[2]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], FW write 0x66 = 0x%x\n",
+		    h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+		    h2c_parameter[2]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
 }
@@ -3618,8 +3599,8 @@
 					      u8 type)
 {
 	if (type == BTC_PACKET_DHCP)
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], DHCP Packet notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], DHCP Packet notify\n");
 }
 
 void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
@@ -3637,19 +3618,19 @@
 		rsp_source = BT_INFO_SRC_8192E_2ANT_WIFI_FW;
 	coex_sta->bt_info_c2h_cnt[rsp_source]++;
 
-	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-		  "[BTCoex], Bt info[%d], length=%d, hex data = [",
-		  rsp_source, length);
+	btc_iface_dbg(INTF_NOTIFY,
+		      "[BTCoex], Bt info[%d], length=%d, hex data = [",
+		      rsp_source, length);
 	for (i = 0; i < length; i++) {
 		coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
 		if (i == 1)
 			bt_info = tmp_buf[i];
 		if (i == length-1)
-			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-				  "0x%02x]\n", tmp_buf[i]);
+			btc_iface_dbg(INTF_NOTIFY,
+				      "0x%02x]\n", tmp_buf[i]);
 		else
-			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-				  "0x%02x, ", tmp_buf[i]);
+			btc_iface_dbg(INTF_NOTIFY,
+				      "0x%02x, ", tmp_buf[i]);
 	}
 
 	if (BT_INFO_SRC_8192E_2ANT_WIFI_FW != rsp_source) {
@@ -3666,8 +3647,8 @@
 		 * because bt is reset and loss of the info.
 		 */
 		if ((coex_sta->bt_info_ext & BIT1)) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "bit1, send wifi BW&Chnl to BT!!\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "bit1, send wifi BW&Chnl to BT!!\n");
 			btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
 					   &wifi_connected);
 			if (wifi_connected)
@@ -3683,8 +3664,8 @@
 		if ((coex_sta->bt_info_ext & BIT3)) {
 			if (!btcoexist->manual_control &&
 			    !btcoexist->stop_coex_dm) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "bit3, BT NOT ignore Wlan active!\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "bit3, BT NOT ignore Wlan active!\n");
 				halbtc8192e2ant_IgnoreWlanAct(btcoexist,
 							      FORCE_EXEC,
 							      false);
@@ -3742,25 +3723,25 @@
 
 	if (!(bt_info&BT_INFO_8192E_2ANT_B_CONNECTION)) {
 		coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], BT Non-Connected idle!!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], BT Non-Connected idle!!!\n");
 	} else if (bt_info == BT_INFO_8192E_2ANT_B_CONNECTION) {
 		coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n");
 	} else if ((bt_info&BT_INFO_8192E_2ANT_B_SCO_ESCO) ||
 		   (bt_info&BT_INFO_8192E_2ANT_B_SCO_BUSY)) {
 		coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_SCO_BUSY;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n");
 	} else if (bt_info&BT_INFO_8192E_2ANT_B_ACL_BUSY) {
 		coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_ACL_BUSY;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n");
 	} else {
 		coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_MAX;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n");
 	}
 
 	if ((BT_8192E_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -3788,7 +3769,7 @@
 
 void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist)
 {
-	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n");
+	btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n");
 
 	halbtc8192e2ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, true);
 	ex_halbtc8192e2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
@@ -3801,29 +3782,29 @@
 	struct btc_board_info *board_info = &btcoexist->board_info;
 	struct btc_stack_info *stack_info = &btcoexist->stack_info;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-		  "=======================Periodical=======================\n");
+	btc_alg_dbg(ALGO_TRACE,
+		    "=======================Periodical=======================\n");
 	if (dis_ver_info_cnt <= 5) {
 		dis_ver_info_cnt += 1;
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-			  "************************************************\n");
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-			  "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
-			  board_info->pg_ant_num, board_info->btdm_ant_num,
-			  board_info->btdm_ant_pos);
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-			  "BT stack/ hci ext ver = %s / %d\n",
-			  ((stack_info->profile_notified) ? "Yes" : "No"),
-			  stack_info->hci_version);
+		btc_iface_dbg(INTF_INIT,
+			      "************************************************\n");
+		btc_iface_dbg(INTF_INIT,
+			      "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+			      board_info->pg_ant_num, board_info->btdm_ant_num,
+			      board_info->btdm_ant_pos);
+		btc_iface_dbg(INTF_INIT,
+			      "BT stack/ hci ext ver = %s / %d\n",
+			      ((stack_info->profile_notified) ? "Yes" : "No"),
+			      stack_info->hci_version);
 		btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
 				   &bt_patch_ver);
 		btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-			  "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
-			  glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
-			  fw_ver, bt_patch_ver, bt_patch_ver);
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-			  "************************************************\n");
+		btc_iface_dbg(INTF_INIT,
+			      "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+			      glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
+			      fw_ver, bt_patch_ver, bt_patch_ver);
+		btc_iface_dbg(INTF_INIT,
+			      "************************************************\n");
 	}
 
 #if (BT_AUTO_REPORT_ONLY_8192E_2ANT == 0)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
index 7e239d3..16add42 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
@@ -74,28 +74,28 @@
 			if (bt_rssi >= rssi_thresh +
 					BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
 				bt_rssi_state = BTC_RSSI_STATE_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state switch to High\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state switch to High\n");
 			} else {
 				bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state stay at Low\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state stay at Low\n");
 			}
 		} else {
 			if (bt_rssi < rssi_thresh) {
 				bt_rssi_state = BTC_RSSI_STATE_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state switch to Low\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state switch to Low\n");
 			} else {
 				bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state stay at High\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state stay at High\n");
 			}
 		}
 	} else if (level_num == 3) {
 		if (rssi_thresh > rssi_thresh1) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-				  "[BTCoex], BT Rssi thresh error!!\n");
+			btc_alg_dbg(ALGO_BT_RSSI_STATE,
+				    "[BTCoex], BT Rssi thresh error!!\n");
 			return coex_sta->pre_bt_rssi_state;
 		}
 
@@ -104,12 +104,12 @@
 			if (bt_rssi >= rssi_thresh +
 					BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
 				bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state switch to Medium\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state switch to Medium\n");
 			} else {
 				bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state stay at Low\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state stay at Low\n");
 			}
 		} else if ((coex_sta->pre_bt_rssi_state ==
 					BTC_RSSI_STATE_MEDIUM) ||
@@ -118,26 +118,26 @@
 			if (bt_rssi >= rssi_thresh1 +
 					BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
 				bt_rssi_state = BTC_RSSI_STATE_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state switch to High\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state switch to High\n");
 			} else if (bt_rssi < rssi_thresh) {
 				bt_rssi_state = BTC_RSSI_STATE_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state switch to Low\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state switch to Low\n");
 			} else {
 				bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state stay at Medium\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state stay at Medium\n");
 			}
 		} else {
 			if (bt_rssi < rssi_thresh1) {
 				bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state switch to Medium\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state switch to Medium\n");
 			} else {
 				bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state stay at High\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state stay at High\n");
 			}
 		}
 	}
@@ -165,32 +165,28 @@
 			if (wifi_rssi >= rssi_thresh +
 					BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
 				wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state switch to High\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state switch to High\n");
 			} else {
 				wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state stay at Low\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state stay at Low\n");
 			}
 		} else {
 			if (wifi_rssi < rssi_thresh) {
 				wifi_rssi_state = BTC_RSSI_STATE_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state switch to Low\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state switch to Low\n");
 			} else {
 				wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state stay at High\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state stay at High\n");
 			}
 		}
 	} else if (level_num == 3) {
 		if (rssi_thresh > rssi_thresh1) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
-				  "[BTCoex], wifi RSSI thresh error!!\n");
+			btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+				    "[BTCoex], wifi RSSI thresh error!!\n");
 			return coex_sta->pre_wifi_rssi_state[index];
 		}
 
@@ -201,14 +197,12 @@
 			if (wifi_rssi >= rssi_thresh +
 					 BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
 				wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state switch to Medium\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state switch to Medium\n");
 			} else {
 				wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state stay at Low\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state stay at Low\n");
 			}
 		} else if ((coex_sta->pre_wifi_rssi_state[index] ==
 						BTC_RSSI_STATE_MEDIUM) ||
@@ -217,31 +211,26 @@
 			if (wifi_rssi >= rssi_thresh1 +
 					 BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
 				wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state switch to High\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state switch to High\n");
 			} else if (wifi_rssi < rssi_thresh) {
 				wifi_rssi_state = BTC_RSSI_STATE_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state switch to Low\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state switch to Low\n");
 			} else {
 				wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state stay at Medium\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state stay at Medium\n");
 			}
 		} else {
 			if (wifi_rssi < rssi_thresh1) {
 				wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state switch to Medium\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state switch to Medium\n");
 			} else {
 				wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state stay at High\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state stay at High\n");
 			}
 		}
 	}
@@ -435,9 +424,9 @@
 
 	h2c_parameter[0] |= BIT0;	/* trigger*/
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
-		  h2c_parameter[0]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+		    h2c_parameter[0]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
 }
@@ -532,8 +521,8 @@
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
 
 	if (!bt_link_info->bt_link_exist) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], No BT link exists!!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], No BT link exists!!!\n");
 		return algorithm;
 	}
 
@@ -548,27 +537,27 @@
 
 	if (numdiffprofile == 1) {
 		if (bt_link_info->sco_exist) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], BT Profile = SCO only\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], BT Profile = SCO only\n");
 			algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
 		} else {
 			if (bt_link_info->hid_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], BT Profile = HID only\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], BT Profile = HID only\n");
 				algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
 			} else if (bt_link_info->a2dp_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], BT Profile = A2DP only\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], BT Profile = A2DP only\n");
 				algorithm = BT_8723B_1ANT_COEX_ALGO_A2DP;
 			} else if (bt_link_info->pan_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = PAN(HS) only\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = PAN(HS) only\n");
 					algorithm =
 						BT_8723B_1ANT_COEX_ALGO_PANHS;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = PAN(EDR) only\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = PAN(EDR) only\n");
 					algorithm =
 						BT_8723B_1ANT_COEX_ALGO_PANEDR;
 				}
@@ -577,21 +566,21 @@
 	} else if (numdiffprofile == 2) {
 		if (bt_link_info->sco_exist) {
 			if (bt_link_info->hid_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], BT Profile = SCO + HID\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], BT Profile = SCO + HID\n");
 				algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
 			} else if (bt_link_info->a2dp_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
 				algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
 			} else if (bt_link_info->pan_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = SCO + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = SCO + PAN(HS)\n");
 					algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
 					algorithm =
 					    BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
 				}
@@ -599,32 +588,32 @@
 		} else {
 			if (bt_link_info->hid_exist &&
 			    bt_link_info->a2dp_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], BT Profile = HID + A2DP\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], BT Profile = HID + A2DP\n");
 				algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
 			} else if (bt_link_info->hid_exist &&
 				   bt_link_info->pan_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = HID + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = HID + PAN(HS)\n");
 					algorithm =
 					    BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = HID + PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = HID + PAN(EDR)\n");
 					algorithm =
 					    BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
 				}
 			} else if (bt_link_info->pan_exist &&
 				   bt_link_info->a2dp_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
 					algorithm =
 					    BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
 					algorithm =
 					    BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP;
 				}
@@ -634,31 +623,31 @@
 		if (bt_link_info->sco_exist) {
 			if (bt_link_info->hid_exist &&
 			    bt_link_info->a2dp_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
 				algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
 			} else if (bt_link_info->hid_exist &&
 				   bt_link_info->pan_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
 					algorithm =
 					    BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
 					algorithm =
 					    BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
 				}
 			} else if (bt_link_info->pan_exist &&
 				   bt_link_info->a2dp_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
 					algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
 					algorithm =
 					    BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
 				}
@@ -668,13 +657,13 @@
 			    bt_link_info->pan_exist &&
 			    bt_link_info->a2dp_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
 					algorithm =
 					    BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
 					algorithm =
 					    BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
 				}
@@ -686,11 +675,11 @@
 			    bt_link_info->pan_exist &&
 			    bt_link_info->a2dp_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
 					algorithm =
 					    BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
 				}
@@ -717,9 +706,9 @@
 		h2c_parameter[5] = 0xf9;  /*MCS5 or OFDM36 */
 	}
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], set WiFi Low-Penalty Retry: %s",
-		  (low_penalty_ra ? "ON!!" : "OFF!!"));
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], set WiFi Low-Penalty Retry: %s",
+		    (low_penalty_ra ? "ON!!" : "OFF!!"));
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
 }
@@ -743,20 +732,20 @@
 					   u32 val0x6c0, u32 val0x6c4,
 					   u32 val0x6c8, u8 val0x6cc)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-		  "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+	btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+		    "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
 	btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-		  "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+	btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+		    "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
 	btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-		  "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+	btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+		    "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
 	btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-		  "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+	btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+		    "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
 	btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
 }
 
@@ -765,10 +754,10 @@
 				       u32 val0x6c4, u32 val0x6c8,
 				       u8 val0x6cc)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-		  "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6cc = 0x%x\n",
-		  (force_exec ? "force to" : ""),
-		  val0x6c0, val0x6c4, val0x6cc);
+	btc_alg_dbg(ALGO_TRACE_SW,
+		    "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6cc = 0x%x\n",
+		    (force_exec ? "force to" : ""),
+		    val0x6c0, val0x6c4, val0x6cc);
 	coex_dm->cur_val0x6c0 = val0x6c0;
 	coex_dm->cur_val0x6c4 = val0x6c4;
 	coex_dm->cur_val0x6c8 = val0x6c8;
@@ -839,9 +828,9 @@
 	if (enable)
 		h2c_parameter[0] |= BIT0;	/* function enable */
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
-		  h2c_parameter[0]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+		    h2c_parameter[0]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
 }
@@ -849,16 +838,16 @@
 static void halbtc8723b1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
 					    bool force_exec, bool enable)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-		  "[BTCoex], %s turn Ignore WlanAct %s\n",
-		  (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+	btc_alg_dbg(ALGO_TRACE_FW,
+		    "[BTCoex], %s turn Ignore WlanAct %s\n",
+		    (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
 	coex_dm->cur_ignore_wlan_act = enable;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
-			  coex_dm->pre_ignore_wlan_act,
-			  coex_dm->cur_ignore_wlan_act);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
+			    coex_dm->pre_ignore_wlan_act,
+			    coex_dm->cur_ignore_wlan_act);
 
 		if (coex_dm->pre_ignore_wlan_act ==
 		    coex_dm->cur_ignore_wlan_act)
@@ -882,8 +871,8 @@
 
 	if (ap_enable) {
 		if ((byte1 & BIT4) && !(byte1 & BIT5)) {
-			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-				  "[BTCoex], FW for 1Ant AP mode\n");
+			btc_iface_dbg(INTF_NOTIFY,
+				      "[BTCoex], FW for 1Ant AP mode\n");
 			real_byte1 &= ~BIT4;
 			real_byte1 |= BIT5;
 
@@ -904,13 +893,13 @@
 	coex_dm->ps_tdma_para[3] = byte4;
 	coex_dm->ps_tdma_para[4] = real_byte5;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
-		  h2c_parameter[0],
-		  h2c_parameter[1] << 24 |
-		  h2c_parameter[2] << 16 |
-		  h2c_parameter[3] << 8 |
-		  h2c_parameter[4]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
+		    h2c_parameter[0],
+		    h2c_parameter[1] << 24 |
+		    h2c_parameter[2] << 16 |
+		    h2c_parameter[3] << 8 |
+		    h2c_parameter[4]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
 }
@@ -929,22 +918,22 @@
 				    bool force_exec,
 				    u8 lps_val, u8 rpwm_val)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-		  "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
-		  (force_exec ? "force to" : ""), lps_val, rpwm_val);
+	btc_alg_dbg(ALGO_TRACE_FW,
+		    "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
+		    (force_exec ? "force to" : ""), lps_val, rpwm_val);
 	coex_dm->cur_lps = lps_val;
 	coex_dm->cur_rpwm = rpwm_val;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], LPS-RxBeaconMode = 0x%x , LPS-RPWM = 0x%x!!\n",
-			  coex_dm->cur_lps, coex_dm->cur_rpwm);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], LPS-RxBeaconMode = 0x%x , LPS-RPWM = 0x%x!!\n",
+			    coex_dm->cur_lps, coex_dm->cur_rpwm);
 
 		if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
 		    (coex_dm->pre_rpwm == coex_dm->cur_rpwm)) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-				  "[BTCoex], LPS-RPWM_Last = 0x%x , LPS-RPWM_Now = 0x%x!!\n",
-				  coex_dm->pre_rpwm, coex_dm->cur_rpwm);
+			btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+				    "[BTCoex], LPS-RPWM_Last = 0x%x , LPS-RPWM_Now = 0x%x!!\n",
+				    coex_dm->pre_rpwm, coex_dm->cur_rpwm);
 
 			return;
 		}
@@ -958,8 +947,8 @@
 static void halbtc8723b1ant_sw_mechanism(struct btc_coexist *btcoexist,
 					 bool low_penalty_ra)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-		  "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
+	btc_alg_dbg(ALGO_BT_MONITOR,
+		    "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
 
 	halbtc8723b1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
 }
@@ -1174,13 +1163,13 @@
 
 	if (!force_exec) {
 		if (coex_dm->cur_ps_tdma_on)
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-				  "[BTCoex], ******** TDMA(on, %d) *********\n",
-				  coex_dm->cur_ps_tdma);
+			btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+				    "[BTCoex], ******** TDMA(on, %d) *********\n",
+				    coex_dm->cur_ps_tdma);
 		else
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-				  "[BTCoex], ******** TDMA(off, %d) ********\n",
-				  coex_dm->cur_ps_tdma);
+			btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+				    "[BTCoex], ******** TDMA(off, %d) ********\n",
+				    coex_dm->cur_ps_tdma);
 
 		if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
 		    (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1394,45 +1383,45 @@
 
 	if (!wifi_connected &&
 	    BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
 		halbtc8723b1ant_sw_mechanism(btcoexist, false);
 		commom = true;
 	} else if (wifi_connected &&
 		   (BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
 		    coex_dm->bt_status)) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], Wifi connected + BT non connected-idle!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], Wifi connected + BT non connected-idle!!\n");
 		halbtc8723b1ant_sw_mechanism(btcoexist, false);
 		commom = true;
 	} else if (!wifi_connected &&
 		   (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
 		    coex_dm->bt_status)) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
 		halbtc8723b1ant_sw_mechanism(btcoexist, false);
 		commom = true;
 	} else if (wifi_connected &&
 		   (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
 		    coex_dm->bt_status)) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], Wifi connected + BT connected-idle!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], Wifi connected + BT connected-idle!!\n");
 		halbtc8723b1ant_sw_mechanism(btcoexist, false);
 		commom = true;
 	} else if (!wifi_connected &&
 		   (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE !=
 		    coex_dm->bt_status)) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  ("[BTCoex], Wifi non connected-idle + BT Busy!!\n"));
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
 		halbtc8723b1ant_sw_mechanism(btcoexist, false);
 		commom = true;
 	} else {
 		if (wifi_busy)
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
 		else
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
 
 		commom = false;
 	}
@@ -1451,8 +1440,8 @@
 	u8 retry_count = 0, bt_info_ext;
 	bool wifi_busy = false;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-		  "[BTCoex], TdmaDurationAdjustForAcl()\n");
+	btc_alg_dbg(ALGO_TRACE_FW,
+		    "[BTCoex], TdmaDurationAdjustForAcl()\n");
 
 	if (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY == wifi_status)
 		wifi_busy = true;
@@ -1481,8 +1470,8 @@
 
 	if (!coex_dm->auto_tdma_adjust) {
 		coex_dm->auto_tdma_adjust = true;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], first run TdmaDurationAdjust()!!\n");
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], first run TdmaDurationAdjust()!!\n");
 
 		halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
 		coex_dm->tdma_adj_type = 2;
@@ -1513,9 +1502,8 @@
 				up = 0;
 				dn = 0;
 				result = 1;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_TRACE_FW_DETAIL,
-					  "[BTCoex], Increase wifi duration!!\n");
+				btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+					    "[BTCoex], Increase wifi duration!!\n");
 			}
 		} else if (retry_count <= 3) {
 			up--;
@@ -1538,9 +1526,8 @@
 				dn = 0;
 				wait_count = 0;
 				result = -1;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_TRACE_FW_DETAIL,
-					  "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
+				btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+					    "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
 			}
 		} else {
 			if (wait_count == 1)
@@ -1556,8 +1543,8 @@
 			dn = 0;
 			wait_count = 0;
 			result = -1;
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-				  "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
+			btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+				    "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
 		}
 
 		if (result == -1) {
@@ -1602,9 +1589,9 @@
 			}
 		} else {	  /*no change */
 			/*if busy / idle change */
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-				  "[BTCoex],********* TDMA(on, %d) ********\n",
-				  coex_dm->cur_ps_tdma);
+			btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+				    "[BTCoex],********* TDMA(on, %d) ********\n",
+				    coex_dm->cur_ps_tdma);
 		}
 
 		if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 &&
@@ -2010,15 +1997,15 @@
 	bool scan = false, link = false, roam = false;
 	bool under_4way = false, ap_enable = false;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-		  "[BTCoex], CoexForWifiConnect()===>\n");
+	btc_alg_dbg(ALGO_TRACE,
+		    "[BTCoex], CoexForWifiConnect()===>\n");
 
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
 			   &under_4way);
 	if (under_4way) {
 		halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist);
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
 		return;
 	}
 
@@ -2032,8 +2019,8 @@
 		else
 			halbtc8723b1ant_action_wifi_connected_special_packet(
 								     btcoexist);
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
 		return;
 	}
 
@@ -2102,58 +2089,58 @@
 	if (!halbtc8723b1ant_is_common_action(btcoexist)) {
 		switch (coex_dm->cur_algorithm) {
 		case BT_8723B_1ANT_COEX_ALGO_SCO:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action algorithm = SCO.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action algorithm = SCO\n");
 			halbtc8723b1ant_action_sco(btcoexist);
 			break;
 		case BT_8723B_1ANT_COEX_ALGO_HID:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action algorithm = HID.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action algorithm = HID\n");
 			halbtc8723b1ant_action_hid(btcoexist);
 			break;
 		case BT_8723B_1ANT_COEX_ALGO_A2DP:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action algorithm = A2DP.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action algorithm = A2DP\n");
 			halbtc8723b1ant_action_a2dp(btcoexist);
 			break;
 		case BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action algorithm = A2DP+PAN(HS).\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action algorithm = A2DP+PAN(HS)\n");
 			halbtc8723b1ant_action_a2dp_pan_hs(btcoexist);
 			break;
 		case BT_8723B_1ANT_COEX_ALGO_PANEDR:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action algorithm = PAN(EDR).\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action algorithm = PAN(EDR)\n");
 			halbtc8723b1ant_action_pan_edr(btcoexist);
 			break;
 		case BT_8723B_1ANT_COEX_ALGO_PANHS:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action algorithm = HS mode.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action algorithm = HS mode\n");
 			halbtc8723b1ant_action_pan_hs(btcoexist);
 			break;
 		case BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action algorithm = PAN+A2DP.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action algorithm = PAN+A2DP\n");
 			halbtc8723b1ant_action_pan_edr_a2dp(btcoexist);
 			break;
 		case BT_8723B_1ANT_COEX_ALGO_PANEDR_HID:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action algorithm = PAN(EDR)+HID.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action algorithm = PAN(EDR)+HID\n");
 			halbtc8723b1ant_action_pan_edr_hid(btcoexist);
 			break;
 		case BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action algorithm = HID+A2DP+PAN.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action algorithm = HID+A2DP+PAN\n");
 			btc8723b1ant_action_hid_a2dp_pan_edr(btcoexist);
 			break;
 		case BT_8723B_1ANT_COEX_ALGO_HID_A2DP:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action algorithm = HID+A2DP.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action algorithm = HID+A2DP\n");
 			halbtc8723b1ant_action_hid_a2dp(btcoexist);
 			break;
 		default:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action algorithm = coexist All Off!!\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action algorithm = coexist All Off!!\n");
 			break;
 		}
 		coex_dm->pre_algorithm = coex_dm->cur_algorithm;
@@ -2171,24 +2158,24 @@
 	u32 wifi_link_status = 0;
 	u32 num_of_wifi_link = 0;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-		  "[BTCoex], RunCoexistMechanism()===>\n");
+	btc_alg_dbg(ALGO_TRACE,
+		    "[BTCoex], RunCoexistMechanism()===>\n");
 
 	if (btcoexist->manual_control) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
 		return;
 	}
 
 	if (btcoexist->stop_coex_dm) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
 		return;
 	}
 
 	if (coex_sta->under_ips) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], wifi is under IPS !!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], wifi is under IPS !!!\n");
 		return;
 	}
 
@@ -2267,8 +2254,8 @@
 	if (!wifi_connected) {
 		bool scan = false, link = false, roam = false;
 
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], wifi is non connected-idle !!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], wifi is non connected-idle !!!\n");
 
 		btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
 		btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2305,8 +2292,8 @@
 	u8 u8tmp = 0;
 	u32 cnt_bt_cal_chk = 0;
 
-	BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-		  "[BTCoex], 1Ant Init HW Config!!\n");
+	btc_iface_dbg(INTF_INIT,
+		      "[BTCoex], 1Ant Init HW Config!!\n");
 
 	if (backup) {/* backup rf 0x1e value */
 		coex_dm->backup_arfr_cnt1 =
@@ -2333,14 +2320,14 @@
 		u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x49d);
 		cnt_bt_cal_chk++;
 		if (u32tmp & BIT0) {
-			BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-				  "[BTCoex], ########### BT calibration(cnt=%d) ###########\n",
-				  cnt_bt_cal_chk);
+			btc_iface_dbg(INTF_INIT,
+				      "[BTCoex], ########### BT calibration(cnt=%d) ###########\n",
+				      cnt_bt_cal_chk);
 			mdelay(50);
 		} else {
-			BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-				  "[BTCoex], ********** BT NOT calibration (cnt=%d)**********\n",
-				  cnt_bt_cal_chk);
+			btc_iface_dbg(INTF_INIT,
+				      "[BTCoex], ********** BT NOT calibration (cnt=%d)**********\n",
+				      cnt_bt_cal_chk);
 			break;
 		}
 	}
@@ -2383,8 +2370,8 @@
 
 void ex_halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist)
 {
-	BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-		  "[BTCoex], Coex Mechanism Init!!\n");
+	btc_iface_dbg(INTF_INIT,
+		      "[BTCoex], Coex Mechanism Init!!\n");
 
 	btcoexist->stop_coex_dm = false;
 
@@ -2677,8 +2664,8 @@
 		return;
 
 	if (BTC_IPS_ENTER == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], IPS ENTER notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], IPS ENTER notify\n");
 		coex_sta->under_ips = true;
 
 		halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT,
@@ -2689,8 +2676,8 @@
 						     NORMAL_EXEC, 0);
 		halbtc8723b1ant_wifi_off_hw_cfg(btcoexist);
 	} else if (BTC_IPS_LEAVE == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], IPS LEAVE notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], IPS LEAVE notify\n");
 		coex_sta->under_ips = false;
 
 		halbtc8723b1ant_init_hw_config(btcoexist, false);
@@ -2705,12 +2692,12 @@
 		return;
 
 	if (BTC_LPS_ENABLE == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], LPS ENABLE notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], LPS ENABLE notify\n");
 		coex_sta->under_lps = true;
 	} else if (BTC_LPS_DISABLE == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], LPS DISABLE notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], LPS DISABLE notify\n");
 		coex_sta->under_lps = false;
 	}
 }
@@ -2753,15 +2740,15 @@
 	}
 
 	if (BTC_SCAN_START == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], SCAN START notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], SCAN START notify\n");
 		if (!wifi_connected)	/* non-connected scan */
 			btc8723b1ant_action_wifi_not_conn_scan(btcoexist);
 		else	/* wifi is connected */
 			btc8723b1ant_action_wifi_conn_scan(btcoexist);
 	} else if (BTC_SCAN_FINISH == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], SCAN FINISH notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], SCAN FINISH notify\n");
 		if (!wifi_connected)	/* non-connected scan */
 			btc8723b1ant_action_wifi_not_conn(btcoexist);
 		else
@@ -2802,12 +2789,12 @@
 	}
 
 	if (BTC_ASSOCIATE_START == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], CONNECT START notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], CONNECT START notify\n");
 		btc8723b1ant_act_wifi_not_conn_asso_auth(btcoexist);
 	} else if (BTC_ASSOCIATE_FINISH == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], CONNECT FINISH notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], CONNECT FINISH notify\n");
 
 		btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
 				   &wifi_connected);
@@ -2830,11 +2817,11 @@
 		return;
 
 	if (BTC_MEDIA_CONNECT == type)
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], MEDIA connect notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], MEDIA connect notify\n");
 	else
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], MEDIA disconnect notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], MEDIA disconnect notify\n");
 
 	/* only 2.4G we need to inform bt the chnl mask */
 	btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
@@ -2855,10 +2842,10 @@
 	coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
 	coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], FW write 0x66 = 0x%x\n",
-		  h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
-		  h2c_parameter[2]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], FW write 0x66 = 0x%x\n",
+		    h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+		    h2c_parameter[2]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
 }
@@ -2900,8 +2887,8 @@
 
 	if (BTC_PACKET_DHCP == type ||
 	    BTC_PACKET_EAPOL == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], special Packet(%d) notify\n", type);
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], special Packet(%d) notify\n", type);
 		halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist);
 	}
 }
@@ -2921,19 +2908,19 @@
 		rsp_source = BT_INFO_SRC_8723B_1ANT_WIFI_FW;
 	coex_sta->bt_info_c2h_cnt[rsp_source]++;
 
-	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-		  "[BTCoex], Bt info[%d], length=%d, hex data = [",
-		  rsp_source, length);
+	btc_iface_dbg(INTF_NOTIFY,
+		      "[BTCoex], Bt info[%d], length=%d, hex data = [",
+		      rsp_source, length);
 	for (i = 0; i < length; i++) {
 		coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
 		if (i == 1)
 			bt_info = tmp_buf[i];
 		if (i == length - 1)
-			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-				  "0x%02x]\n", tmp_buf[i]);
+			btc_iface_dbg(INTF_NOTIFY,
+				      "0x%02x]\n", tmp_buf[i]);
 		else
-			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-				  "0x%02x, ", tmp_buf[i]);
+			btc_iface_dbg(INTF_NOTIFY,
+				      "0x%02x, ", tmp_buf[i]);
 	}
 
 	if (BT_INFO_SRC_8723B_1ANT_WIFI_FW != rsp_source) {
@@ -2950,8 +2937,8 @@
 		 * because bt is reset and loss of the info.
 		 */
 		if (coex_sta->bt_info_ext & BIT1) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
 			btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
 					   &wifi_connected);
 			if (wifi_connected)
@@ -2965,8 +2952,8 @@
 		if (coex_sta->bt_info_ext & BIT3) {
 			if (!btcoexist->manual_control &&
 			    !btcoexist->stop_coex_dm) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], BT ext info bit3 check, set BT NOT ignore Wlan active!!\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], BT ext info bit3 check, set BT NOT ignore Wlan active!!\n");
 				halbtc8723b1ant_ignore_wlan_act(btcoexist,
 								FORCE_EXEC,
 								false);
@@ -3021,30 +3008,30 @@
 
 	if (!(bt_info&BT_INFO_8723B_1ANT_B_CONNECTION)) {
 		coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], BtInfoNotify(), BT Non-Connected idle!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], BtInfoNotify(), BT Non-Connected idle!\n");
 	/* connection exists but no busy */
 	} else if (bt_info == BT_INFO_8723B_1ANT_B_CONNECTION) {
 		coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
 	} else if ((bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO) ||
 		(bt_info & BT_INFO_8723B_1ANT_B_SCO_BUSY)) {
 		coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_SCO_BUSY;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
 	} else if (bt_info & BT_INFO_8723B_1ANT_B_ACL_BUSY) {
 		if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status)
 			coex_dm->auto_tdma_adjust = false;
 
 		coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_ACL_BUSY;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
 	} else {
 		coex_dm->bt_status =
 			BT_8723B_1ANT_BT_STATUS_MAX;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n");
 	}
 
 	if ((BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -3060,7 +3047,7 @@
 
 void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist)
 {
-	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n");
+	btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n");
 
 	btcoexist->stop_coex_dm = true;
 
@@ -3078,11 +3065,11 @@
 
 void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
 {
-	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Pnp notify\n");
+	btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Pnp notify\n");
 
 	if (BTC_WIFI_PNP_SLEEP == pnp_state) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], Pnp notify to SLEEP\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], Pnp notify to SLEEP\n");
 		btcoexist->stop_coex_dm = true;
 		halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, false,
 					   true);
@@ -3092,8 +3079,8 @@
 		halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
 		halbtc8723b1ant_wifi_off_hw_cfg(btcoexist);
 	} else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], Pnp notify to WAKE UP\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], Pnp notify to WAKE UP\n");
 		btcoexist->stop_coex_dm = false;
 		halbtc8723b1ant_init_hw_config(btcoexist, false);
 		halbtc8723b1ant_init_coex_dm(btcoexist);
@@ -3103,8 +3090,8 @@
 
 void ex_halbtc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-		  "[BTCoex], *****************Coex DM Reset****************\n");
+	btc_alg_dbg(ALGO_TRACE,
+		    "[BTCoex], *****************Coex DM Reset****************\n");
 
 	halbtc8723b1ant_init_hw_config(btcoexist, false);
 	btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
@@ -3119,31 +3106,31 @@
 	static u8 dis_ver_info_cnt;
 	u32 fw_ver = 0, bt_patch_ver = 0;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-		  "[BTCoex], ==========================Periodical===========================\n");
+	btc_alg_dbg(ALGO_TRACE,
+		    "[BTCoex], ==========================Periodical===========================\n");
 
 	if (dis_ver_info_cnt <= 5) {
 		dis_ver_info_cnt += 1;
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-			  "[BTCoex], ****************************************************************\n");
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-			  "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
-			  board_info->pg_ant_num, board_info->btdm_ant_num,
-			  board_info->btdm_ant_pos);
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-			  "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
-			  ((stack_info->profile_notified) ? "Yes" : "No"),
-			  stack_info->hci_version);
+		btc_iface_dbg(INTF_INIT,
+			      "[BTCoex], ****************************************************************\n");
+		btc_iface_dbg(INTF_INIT,
+			      "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+			      board_info->pg_ant_num, board_info->btdm_ant_num,
+			      board_info->btdm_ant_pos);
+		btc_iface_dbg(INTF_INIT,
+			      "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+			      stack_info->profile_notified ? "Yes" : "No",
+			      stack_info->hci_version);
 		btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
 				   &bt_patch_ver);
 		btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-			  "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
-			  glcoex_ver_date_8723b_1ant,
-			  glcoex_ver_8723b_1ant, fw_ver,
-			  bt_patch_ver, bt_patch_ver);
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-			  "[BTCoex], ****************************************************************\n");
+		btc_iface_dbg(INTF_INIT,
+			      "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+			      glcoex_ver_date_8723b_1ant,
+			      glcoex_ver_8723b_1ant, fw_ver,
+			      bt_patch_ver, bt_patch_ver);
+		btc_iface_dbg(INTF_INIT,
+			      "[BTCoex], ****************************************************************\n");
 	}
 
 #if (BT_AUTO_REPORT_ONLY_8723B_1ANT == 0)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
index c43ab59..5f488ec 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
@@ -72,32 +72,28 @@
 			if (bt_rssi >= rssi_thresh +
 				       BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
 				bt_rssi_state = BTC_RSSI_STATE_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state "
-					  "switch to High\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state switch to High\n");
 			} else {
 				bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state "
-					  "stay at Low\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state stay at Low\n");
 			}
 		} else {
 			if (bt_rssi < rssi_thresh) {
 				bt_rssi_state = BTC_RSSI_STATE_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state "
-					  "switch to Low\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state switch to Low\n");
 			} else {
 				bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state "
-					  "stay at High\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state stay at High\n");
 			}
 		}
 	} else if (level_num == 3) {
 		if (rssi_thresh > rssi_thresh1) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-				  "[BTCoex], BT Rssi thresh error!!\n");
+			btc_alg_dbg(ALGO_BT_RSSI_STATE,
+				    "[BTCoex], BT Rssi thresh error!!\n");
 			return coex_sta->pre_bt_rssi_state;
 		}
 
@@ -106,14 +102,12 @@
 			if (bt_rssi >= rssi_thresh +
 				       BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
 				bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state "
-					  "switch to Medium\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state switch to Medium\n");
 			} else {
 				bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state "
-					  "stay at Low\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state stay at Low\n");
 			}
 		} else if ((coex_sta->pre_bt_rssi_state ==
 						BTC_RSSI_STATE_MEDIUM) ||
@@ -122,31 +116,26 @@
 			if (bt_rssi >= rssi_thresh1 +
 				       BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
 				bt_rssi_state = BTC_RSSI_STATE_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state "
-					  "switch to High\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state switch to High\n");
 			} else if (bt_rssi < rssi_thresh) {
 				bt_rssi_state = BTC_RSSI_STATE_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state "
-					  "switch to Low\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state switch to Low\n");
 			} else {
 				bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state "
-					  "stay at Medium\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state stay at Medium\n");
 			}
 		} else {
 			if (bt_rssi < rssi_thresh1) {
 				bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state "
-					  "switch to Medium\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state switch to Medium\n");
 			} else {
 				bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state "
-					  "stay at High\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state stay at High\n");
 			}
 		}
 	}
@@ -173,36 +162,28 @@
 			if (wifi_rssi >= rssi_thresh +
 					 BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
 				wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state "
-					  "switch to High\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state switch to High\n");
 			} else {
 				wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state "
-					  "stay at Low\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state stay at Low\n");
 			}
 		} else {
 			if (wifi_rssi < rssi_thresh) {
 				wifi_rssi_state = BTC_RSSI_STATE_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state "
-					  "switch to Low\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state switch to Low\n");
 			} else {
 				wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state "
-					  "stay at High\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state stay at High\n");
 			}
 		}
 	} else if (level_num == 3) {
 		if (rssi_thresh > rssi_thresh1) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
-				  "[BTCoex], wifi RSSI thresh error!!\n");
+			btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+				    "[BTCoex], wifi RSSI thresh error!!\n");
 			return coex_sta->pre_wifi_rssi_state[index];
 		}
 
@@ -213,16 +194,12 @@
 			if (wifi_rssi >= rssi_thresh +
 					BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
 				wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state "
-					  "switch to Medium\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state switch to Medium\n");
 			} else {
 				wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state "
-					  "stay at Low\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state stay at Low\n");
 			}
 		} else if ((coex_sta->pre_wifi_rssi_state[index] ==
 						BTC_RSSI_STATE_MEDIUM) ||
@@ -231,36 +208,26 @@
 			if (wifi_rssi >= rssi_thresh1 +
 					 BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
 				wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state "
-					  "switch to High\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state switch to High\n");
 			} else if (wifi_rssi < rssi_thresh) {
 				wifi_rssi_state = BTC_RSSI_STATE_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state "
-					  "switch to Low\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state switch to Low\n");
 			} else {
 				wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state "
-					  "stay at Medium\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state stay at Medium\n");
 			}
 		} else {
 			if (wifi_rssi < rssi_thresh1) {
 				wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state "
-					  "switch to Medium\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state switch to Medium\n");
 			} else {
 				wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state "
-					  "stay at High\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state stay at High\n");
 			}
 		}
 	}
@@ -292,12 +259,12 @@
 	coex_sta->low_priority_tx = reg_lp_tx;
 	coex_sta->low_priority_rx = reg_lp_rx;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-		  "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
-		  reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-		  "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
-		  reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+	btc_alg_dbg(ALGO_BT_MONITOR,
+		    "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+		    reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+	btc_alg_dbg(ALGO_BT_MONITOR,
+		    "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+		    reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
 
 	/* reset counter */
 	btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -311,9 +278,9 @@
 
 	h2c_parameter[0] |= BIT0;	/* trigger */
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
-		  h2c_parameter[0]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+		    h2c_parameter[0]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
 }
@@ -427,8 +394,8 @@
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
 
 	if (!bt_link_info->bt_link_exist) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], No BT link exists!!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], No BT link exists!!!\n");
 		return algorithm;
 	}
 
@@ -443,27 +410,27 @@
 
 	if (num_of_diff_profile == 1) {
 		if (bt_link_info->sco_exist) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], SCO only\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], SCO only\n");
 			algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
 		} else {
 			if (bt_link_info->hid_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], HID only\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], HID only\n");
 				algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
 			} else if (bt_link_info->a2dp_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], A2DP only\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], A2DP only\n");
 				algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP;
 			} else if (bt_link_info->pan_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], PAN(HS) only\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], PAN(HS) only\n");
 					algorithm =
 						BT_8723B_2ANT_COEX_ALGO_PANHS;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], PAN(EDR) only\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], PAN(EDR) only\n");
 					algorithm =
 						BT_8723B_2ANT_COEX_ALGO_PANEDR;
 				}
@@ -472,21 +439,21 @@
 	} else if (num_of_diff_profile == 2) {
 		if (bt_link_info->sco_exist) {
 			if (bt_link_info->hid_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], SCO + HID\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], SCO + HID\n");
 				algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
 			} else if (bt_link_info->a2dp_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], SCO + A2DP ==> SCO\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], SCO + A2DP ==> SCO\n");
 				algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
 			} else if (bt_link_info->pan_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], SCO + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], SCO + PAN(HS)\n");
 					algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], SCO + PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], SCO + PAN(EDR)\n");
 					algorithm =
 					    BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
 				}
@@ -494,31 +461,31 @@
 		} else {
 			if (bt_link_info->hid_exist &&
 			    bt_link_info->a2dp_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], HID + A2DP\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], HID + A2DP\n");
 				algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
 			} else if (bt_link_info->hid_exist &&
 				   bt_link_info->pan_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], HID + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], HID + PAN(HS)\n");
 					algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], HID + PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], HID + PAN(EDR)\n");
 					algorithm =
 					    BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
 				}
 			} else if (bt_link_info->pan_exist &&
 				   bt_link_info->a2dp_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], A2DP + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], A2DP + PAN(HS)\n");
 					algorithm =
 					    BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex],A2DP + PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex],A2DP + PAN(EDR)\n");
 					algorithm =
 					    BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP;
 				}
@@ -528,37 +495,32 @@
 		if (bt_link_info->sco_exist) {
 			if (bt_link_info->hid_exist &&
 			    bt_link_info->a2dp_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], SCO + HID + A2DP"
-					  " ==> HID\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], SCO + HID + A2DP ==> HID\n");
 				algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
 			} else if (bt_link_info->hid_exist &&
 				   bt_link_info->pan_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], SCO + HID + "
-						  "PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], SCO + HID + PAN(HS)\n");
 					algorithm =
 					    BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], SCO + HID + "
-						  "PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], SCO + HID + PAN(EDR)\n");
 					algorithm =
 					    BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
 				}
 			} else if (bt_link_info->pan_exist &&
 				   bt_link_info->a2dp_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], SCO + A2DP + "
-						  "PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], SCO + A2DP + PAN(HS)\n");
 					algorithm =
 					    BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], SCO + A2DP + "
-						  "PAN(EDR) ==> HID\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
 					algorithm =
 					    BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
 				}
@@ -568,15 +530,13 @@
 			    bt_link_info->pan_exist &&
 			    bt_link_info->a2dp_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], HID + A2DP + "
-						  "PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], HID + A2DP + PAN(HS)\n");
 					algorithm =
 					    BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], HID + A2DP + "
-						  "PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], HID + A2DP + PAN(EDR)\n");
 					algorithm =
 					BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
 				}
@@ -588,13 +548,11 @@
 			    bt_link_info->pan_exist &&
 			    bt_link_info->a2dp_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], Error!!! SCO + HID"
-						  " + A2DP + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], SCO + HID + A2DP +"
-						  " PAN(EDR)==>PAN(EDR)+HID\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
 					algorithm =
 					    BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
 				}
@@ -624,17 +582,15 @@
 	if (wifi_connected) {
 		if (bt_hs_on) {
 			if (bt_hs_rssi > 37) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-					  "[BTCoex], Need to decrease bt "
-					  "power for HS mode!!\n");
+				btc_alg_dbg(ALGO_TRACE_FW,
+					    "[BTCoex], Need to decrease bt power for HS mode!!\n");
 				ret = true;
 			}
 		} else {
 			if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
 			    (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-					  "[BTCoex], Need to decrease bt "
-					  "power for Wifi is connected!!\n");
+				btc_alg_dbg(ALGO_TRACE_FW,
+					    "[BTCoex], Need to decrease bt power for Wifi is connected!!\n");
 				ret = true;
 			}
 		}
@@ -653,10 +609,10 @@
 	 */
 	h2c_parameter[0] = dac_swing_lvl;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
 }
@@ -671,9 +627,9 @@
 	if (dec_bt_pwr)
 		h2c_parameter[0] |= BIT1;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n",
-		  (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n",
+		    (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
 }
@@ -681,15 +637,15 @@
 static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
 				    bool force_exec, bool dec_bt_pwr)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-		  "[BTCoex], %s Dec BT power = %s\n",
-		  (force_exec ? "force to" : ""), (dec_bt_pwr ? "ON" : "OFF"));
+	btc_alg_dbg(ALGO_TRACE_FW,
+		    "[BTCoex], %s Dec BT power = %s\n",
+		    force_exec ? "force to" : "", dec_bt_pwr ? "ON" : "OFF");
 	coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
-			  coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
+			    coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
 
 		if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
 			return;
@@ -702,17 +658,16 @@
 static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
 					  bool force_exec, u8 fw_dac_swing_lvl)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-		  "[BTCoex], %s set FW Dac Swing level = %d\n",
-		  (force_exec ? "force to" : ""), fw_dac_swing_lvl);
+	btc_alg_dbg(ALGO_TRACE_FW,
+		    "[BTCoex], %s set FW Dac Swing level = %d\n",
+		    (force_exec ? "force to" : ""), fw_dac_swing_lvl);
 	coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], preFwDacSwingLvl=%d, "
-			  "curFwDacSwingLvl=%d\n",
-			  coex_dm->pre_fw_dac_swing_lvl,
-			  coex_dm->cur_fw_dac_swing_lvl);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
+			    coex_dm->pre_fw_dac_swing_lvl,
+			    coex_dm->cur_fw_dac_swing_lvl);
 
 		if (coex_dm->pre_fw_dac_swing_lvl ==
 		   coex_dm->cur_fw_dac_swing_lvl)
@@ -729,16 +684,16 @@
 {
 	if (rx_rf_shrink_on) {
 		/* Shrink RF Rx LPF corner */
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-			  "[BTCoex], Shrink RF Rx LPF corner!!\n");
+		btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+			    "[BTCoex], Shrink RF Rx LPF corner!!\n");
 		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
 					  0xfffff, 0xffffc);
 	} else {
 		/* Resume RF Rx LPF corner */
 		/* After initialized, we can use coex_dm->btRf0x1eBackup */
 		if (btcoexist->initilized) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-				  "[BTCoex], Resume RF Rx LPF corner!!\n");
+			btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+				    "[BTCoex], Resume RF Rx LPF corner!!\n");
 			btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
 						  0xfffff,
 						  coex_dm->bt_rf0x1e_backup);
@@ -749,18 +704,17 @@
 static void btc8723b2ant_rf_shrink(struct btc_coexist *btcoexist,
 				   bool force_exec, bool rx_rf_shrink_on)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-		  "[BTCoex], %s turn Rx RF Shrink = %s\n",
-		  (force_exec ? "force to" : ""), (rx_rf_shrink_on ?
-		  "ON" : "OFF"));
+	btc_alg_dbg(ALGO_TRACE_SW,
+		    "[BTCoex], %s turn Rx RF Shrink = %s\n",
+		    (force_exec ? "force to" : ""), (rx_rf_shrink_on ?
+						     "ON" : "OFF"));
 	coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-			  "[BTCoex], bPreRfRxLpfShrink=%d, "
-			  "bCurRfRxLpfShrink=%d\n",
-			  coex_dm->pre_rf_rx_lpf_shrink,
-			  coex_dm->cur_rf_rx_lpf_shrink);
+		btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+			    "[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n",
+			    coex_dm->pre_rf_rx_lpf_shrink,
+			    coex_dm->cur_rf_rx_lpf_shrink);
 
 		if (coex_dm->pre_rf_rx_lpf_shrink ==
 		    coex_dm->cur_rf_rx_lpf_shrink)
@@ -788,9 +742,9 @@
 		h2c_parameter[5] = 0xf9;  /*MCS5 or OFDM36*/
 	}
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], set WiFi Low-Penalty Retry: %s",
-		  (low_penalty_ra ? "ON!!" : "OFF!!"));
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], set WiFi Low-Penalty Retry: %s",
+		    (low_penalty_ra ? "ON!!" : "OFF!!"));
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
 }
@@ -799,18 +753,17 @@
 					bool force_exec, bool low_penalty_ra)
 {
 	/*return; */
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-		  "[BTCoex], %s turn LowPenaltyRA = %s\n",
-		  (force_exec ? "force to" : ""), (low_penalty_ra ?
-		  "ON" : "OFF"));
+	btc_alg_dbg(ALGO_TRACE_SW,
+		    "[BTCoex], %s turn LowPenaltyRA = %s\n",
+		    (force_exec ? "force to" : ""), (low_penalty_ra ?
+						     "ON" : "OFF"));
 	coex_dm->cur_low_penalty_ra = low_penalty_ra;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-			  "[BTCoex], bPreLowPenaltyRa=%d, "
-			  "bCurLowPenaltyRa=%d\n",
-			  coex_dm->pre_low_penalty_ra,
-			  coex_dm->cur_low_penalty_ra);
+		btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+			    "[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n",
+			    coex_dm->pre_low_penalty_ra,
+			    coex_dm->cur_low_penalty_ra);
 
 		if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
 			return;
@@ -824,8 +777,8 @@
 					   u32 level)
 {
 	u8 val = (u8) level;
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-		  "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+	btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+		    "[BTCoex], Write SwDacSwing = 0x%x\n", level);
 	btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
 }
 
@@ -843,20 +796,20 @@
 				   bool force_exec, bool dac_swing_on,
 				   u32 dac_swing_lvl)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-		  "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
-		  (force_exec ? "force to" : ""),
-		  (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl);
+	btc_alg_dbg(ALGO_TRACE_SW,
+		    "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
+		    (force_exec ? "force to" : ""),
+		    (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl);
 	coex_dm->cur_dac_swing_on = dac_swing_on;
 	coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-			  "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x,"
-			  " bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
-			  coex_dm->pre_dac_swing_on, coex_dm->pre_dac_swing_lvl,
-			  coex_dm->cur_dac_swing_on,
-			  coex_dm->cur_dac_swing_lvl);
+		btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+			    "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
+			    coex_dm->pre_dac_swing_on,
+			    coex_dm->pre_dac_swing_lvl,
+			    coex_dm->cur_dac_swing_on,
+			    coex_dm->cur_dac_swing_lvl);
 
 		if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
 		    (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
@@ -877,8 +830,8 @@
 
 	/*  BB AGC Gain Table */
 	if (agc_table_en) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-			  "[BTCoex], BB Agc Table On!\n");
+		btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+			    "[BTCoex], BB Agc Table On!\n");
 		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6e1A0001);
 		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6d1B0001);
 		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6c1C0001);
@@ -887,8 +840,8 @@
 		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x691F0001);
 		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x68200001);
 	} else {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-			  "[BTCoex], BB Agc Table Off!\n");
+		btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+			    "[BTCoex], BB Agc Table Off!\n");
 		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
 		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
 		btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
@@ -901,15 +854,15 @@
 	/* RF Gain */
 	btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000);
 	if (agc_table_en) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-			  "[BTCoex], Agc Table On!\n");
+		btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+			    "[BTCoex], Agc Table On!\n");
 		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
 					  0xfffff, 0x38fff);
 		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
 					  0xfffff, 0x38ffe);
 	} else {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-			  "[BTCoex], Agc Table Off!\n");
+		btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+			    "[BTCoex], Agc Table Off!\n");
 		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
 					  0xfffff, 0x380c3);
 		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
@@ -920,15 +873,15 @@
 	btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x1);
 
 	if (agc_table_en) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-			  "[BTCoex], Agc Table On!\n");
+		btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+			    "[BTCoex], Agc Table On!\n");
 		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
 					  0xfffff, 0x38fff);
 		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
 					  0xfffff, 0x38ffe);
 	} else {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-			  "[BTCoex], Agc Table Off!\n");
+		btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+			    "[BTCoex], Agc Table Off!\n");
 		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
 					  0xfffff, 0x380c3);
 		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
@@ -946,16 +899,17 @@
 static void btc8723b2ant_agc_table(struct btc_coexist *btcoexist,
 				   bool force_exec, bool agc_table_en)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-		  "[BTCoex], %s %s Agc Table\n",
-		  (force_exec ? "force to" : ""),
-		  (agc_table_en ? "Enable" : "Disable"));
+	btc_alg_dbg(ALGO_TRACE_SW,
+		    "[BTCoex], %s %s Agc Table\n",
+		    (force_exec ? "force to" : ""),
+		    (agc_table_en ? "Enable" : "Disable"));
 	coex_dm->cur_agc_table_en = agc_table_en;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-			  "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
-			  coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en);
+		btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+			    "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+			    coex_dm->pre_agc_table_en,
+			    coex_dm->cur_agc_table_en);
 
 		if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
 			return;
@@ -969,20 +923,20 @@
 					u32 val0x6c0, u32 val0x6c4,
 					u32 val0x6c8, u8 val0x6cc)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-		  "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
+	btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+		    "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
 	btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-		  "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
+	btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+		    "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
 	btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-		  "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
+	btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+		    "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
 	btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-		  "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
+	btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+		    "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
 	btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
 }
 
@@ -991,29 +945,24 @@
 				    u32 val0x6c4, u32 val0x6c8,
 				    u8 val0x6cc)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-		  "[BTCoex], %s write Coex Table 0x6c0=0x%x,"
-		  " 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
-		  (force_exec ? "force to" : ""), val0x6c0,
-		  val0x6c4, val0x6c8, val0x6cc);
+	btc_alg_dbg(ALGO_TRACE_SW,
+		    "[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
+		    force_exec ? "force to" : "",
+		    val0x6c0, val0x6c4, val0x6c8, val0x6cc);
 	coex_dm->cur_val0x6c0 = val0x6c0;
 	coex_dm->cur_val0x6c4 = val0x6c4;
 	coex_dm->cur_val0x6c8 = val0x6c8;
 	coex_dm->cur_val0x6cc = val0x6cc;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-			  "[BTCoex], preVal0x6c0=0x%x, "
-			  "preVal0x6c4=0x%x, preVal0x6c8=0x%x, "
-			  "preVal0x6cc=0x%x !!\n",
-			  coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4,
-			  coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-			  "[BTCoex], curVal0x6c0=0x%x, "
-			  "curVal0x6c4=0x%x, curVal0x6c8=0x%x, "
-			  "curVal0x6cc=0x%x !!\n",
-			  coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4,
-			  coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
+		btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+			    "[BTCoex], preVal0x6c0=0x%x, preVal0x6c4=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n",
+			    coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4,
+			    coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
+		btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+			    "[BTCoex], curVal0x6c0=0x%x, curVal0x6c4=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n",
+			    coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4,
+			    coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
 
 		if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
 		    (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -1099,9 +1048,9 @@
 	if (enable)
 		h2c_parameter[0] |= BIT0;/* function enable*/
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], set FW for BT Ignore Wlan_Act, "
-		  "FW write 0x63=0x%x\n", h2c_parameter[0]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n",
+		    h2c_parameter[0]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
 }
@@ -1109,17 +1058,16 @@
 static void btc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
 					 bool force_exec, bool enable)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-		  "[BTCoex], %s turn Ignore WlanAct %s\n",
-		  (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+	btc_alg_dbg(ALGO_TRACE_FW,
+		    "[BTCoex], %s turn Ignore WlanAct %s\n",
+		    (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
 	coex_dm->cur_ignore_wlan_act = enable;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], bPreIgnoreWlanAct = %d, "
-			  "bCurIgnoreWlanAct = %d!!\n",
-			  coex_dm->pre_ignore_wlan_act,
-			  coex_dm->cur_ignore_wlan_act);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
+			    coex_dm->pre_ignore_wlan_act,
+			    coex_dm->cur_ignore_wlan_act);
 
 		if (coex_dm->pre_ignore_wlan_act ==
 		    coex_dm->cur_ignore_wlan_act)
@@ -1147,11 +1095,11 @@
 	coex_dm->ps_tdma_para[3] = byte4;
 	coex_dm->ps_tdma_para[4] = byte5;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
-		  h2c_parameter[0],
-		  h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
-		  h2c_parameter[3] << 8 | h2c_parameter[4]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
+		    h2c_parameter[0],
+		    h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
+		    h2c_parameter[3] << 8 | h2c_parameter[4]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
 }
@@ -1203,7 +1151,6 @@
 
 		/* Force GNT_BT to low */
 		btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0);
-		btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
 
 		if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) {
 			/* tell firmware "no antenna inverse" */
@@ -1211,19 +1158,25 @@
 			h2c_parameter[1] = 1;  /* ext switch type */
 			btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
 						h2c_parameter);
+			btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
 		} else {
 			/* tell firmware "antenna inverse" */
 			h2c_parameter[0] = 1;
 			h2c_parameter[1] = 1;  /* ext switch type */
 			btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
 						h2c_parameter);
+			btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
 		}
 	}
 
 	/* ext switch setting */
 	if (use_ext_switch) {
 		/* fixed internal switch S1->WiFi, S0->BT */
-		btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
+		if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
+			btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
+		else
+			btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
+
 		switch (antpos_type) {
 		case BTC_ANT_WIFI_AT_MAIN:
 			/* ext switch main at wifi */
@@ -1255,20 +1208,20 @@
 static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
 				 bool turn_on, u8 type)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-		  "[BTCoex], %s turn %s PS TDMA, type=%d\n",
-		  (force_exec ? "force to" : ""),
-		  (turn_on ? "ON" : "OFF"), type);
+	btc_alg_dbg(ALGO_TRACE_FW,
+		    "[BTCoex], %s turn %s PS TDMA, type=%d\n",
+		    (force_exec ? "force to" : ""),
+		    (turn_on ? "ON" : "OFF"), type);
 	coex_dm->cur_ps_tdma_on = turn_on;
 	coex_dm->cur_ps_tdma = type;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
-			  coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
-			  coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+			    coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+			    coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
 
 		if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
 		    (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1466,8 +1419,8 @@
 		btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
 				   &low_pwr_disable);
 
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], Wifi non-connected idle!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], Wifi non-connected idle!!\n");
 
 		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
 					  0x0);
@@ -1490,9 +1443,8 @@
 					   BTC_SET_ACT_DISABLE_LOW_POWER,
 					   &low_pwr_disable);
 
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Wifi connected + "
-				  "BT non connected-idle!!\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Wifi connected + BT non connected-idle!!\n");
 
 			btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
 						  0xfffff, 0x0);
@@ -1518,9 +1470,8 @@
 
 			if (bt_hs_on)
 				return false;
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Wifi connected + "
-				  "BT connected-idle!!\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Wifi connected + BT connected-idle!!\n");
 
 			btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
 						  0xfffff, 0x0);
@@ -1544,17 +1495,15 @@
 					   &low_pwr_disable);
 
 			if (wifi_busy) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], Wifi Connected-Busy + "
-					  "BT Busy!!\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
 				common = false;
 			} else {
 				if (bt_hs_on)
 					return false;
 
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], Wifi Connected-Idle + "
-					  "BT Busy!!\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
 
 				btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
 							  0x1, 0xfffff, 0x0);
@@ -1592,9 +1541,8 @@
 {
 	/* Set PS TDMA for max interval == 1 */
 	if (tx_pause) {
-		BTC_PRINT(BTC_MSG_ALGORITHM,
-			  ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], TxPause = 1\n");
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], TxPause = 1\n");
 
 		if (coex_dm->cur_ps_tdma == 71) {
 			btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
@@ -1690,9 +1638,8 @@
 			}
 		}
 	} else {
-		BTC_PRINT(BTC_MSG_ALGORITHM,
-			  ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], TxPause = 0\n");
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], TxPause = 0\n");
 		if (coex_dm->cur_ps_tdma == 5) {
 			btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 71);
 			coex_dm->tdma_adj_type = 71;
@@ -1790,9 +1737,8 @@
 {
 	/* Set PS TDMA for max interval == 2 */
 	if (tx_pause) {
-		BTC_PRINT(BTC_MSG_ALGORITHM,
-			  ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], TxPause = 1\n");
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], TxPause = 1\n");
 		if (coex_dm->cur_ps_tdma == 1) {
 			btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6);
 			coex_dm->tdma_adj_type = 6;
@@ -1873,9 +1819,8 @@
 			}
 		}
 	} else {
-		BTC_PRINT(BTC_MSG_ALGORITHM,
-			  ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], TxPause = 0\n");
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], TxPause = 0\n");
 		if (coex_dm->cur_ps_tdma == 5) {
 			btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
 			coex_dm->tdma_adj_type = 2;
@@ -1963,9 +1908,8 @@
 {
 	/* Set PS TDMA for max interval == 3 */
 	if (tx_pause) {
-		BTC_PRINT(BTC_MSG_ALGORITHM,
-			  ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], TxPause = 1\n");
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], TxPause = 1\n");
 		if (coex_dm->cur_ps_tdma == 1) {
 			btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
 			coex_dm->tdma_adj_type = 7;
@@ -2046,9 +1990,8 @@
 			}
 		}
 	} else {
-		BTC_PRINT(BTC_MSG_ALGORITHM,
-			  ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], TxPause = 0\n");
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], TxPause = 0\n");
 		if (coex_dm->cur_ps_tdma == 5) {
 			btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
 			coex_dm->tdma_adj_type = 3;
@@ -2140,13 +2083,13 @@
 	s32 result;
 	u8 retry_count = 0;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-		  "[BTCoex], TdmaDurationAdjust()\n");
+	btc_alg_dbg(ALGO_TRACE_FW,
+		    "[BTCoex], TdmaDurationAdjust()\n");
 
 	if (!coex_dm->auto_tdma_adjust) {
 		coex_dm->auto_tdma_adjust = true;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], first run TdmaDurationAdjust()!!\n");
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], first run TdmaDurationAdjust()!!\n");
 		if (sco_hid) {
 			if (tx_pause) {
 				if (max_interval == 1) {
@@ -2250,11 +2193,11 @@
 	} else {
 		/*accquire the BT TRx retry count from BT_Info byte2*/
 		retry_count = coex_sta->bt_retry_cnt;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], retry_count = %d\n", retry_count);
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n",
-			  up, dn, m, n, wait_count);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], retry_count = %d\n", retry_count);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n",
+			    up, dn, m, n, wait_count);
 		result = 0;
 		wait_count++;
 		 /* no retry in the last 2-second duration*/
@@ -2271,10 +2214,8 @@
 				up = 0;
 				dn = 0;
 				result = 1;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_TRACE_FW_DETAIL,
-					  "[BTCoex], Increase wifi "
-					  "duration!!\n");
+				btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+					    "[BTCoex], Increase wifi duration!!\n");
 			} /* <=3 retry in the last 2-second duration*/
 		} else if (retry_count <= 3) {
 			up--;
@@ -2297,10 +2238,8 @@
 				dn = 0;
 				wait_count = 0;
 				result = -1;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_TRACE_FW_DETAIL,
-					  "[BTCoex], Decrease wifi duration "
-					  "for retry_counter<3!!\n");
+				btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+					    "[BTCoex], Decrease wifi duration for retry_counter<3!!\n");
 			}
 		} else {
 			if (wait_count == 1)
@@ -2316,13 +2255,12 @@
 			dn = 0;
 			wait_count = 0;
 			result = -1;
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-				  "[BTCoex], Decrease wifi duration "
-				  "for retry_counter>3!!\n");
+			btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+				    "[BTCoex], Decrease wifi duration for retry_counter>3!!\n");
 		}
 
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], max Interval = %d\n", max_interval);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], max Interval = %d\n", max_interval);
 		if (max_interval == 1)
 			set_tdma_int1(btcoexist, tx_pause, result);
 		else if (max_interval == 2)
@@ -2336,10 +2274,9 @@
 	 */
 	if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
 		bool scan = false, link = false, roam = false;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], PsTdma type dismatch!!!, "
-			  "curPsTdma=%d, recordPsTdma=%d\n",
-			  coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], PsTdma type dismatch!!!, curPsTdma=%d, recordPsTdma=%d\n",
+			    coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
 
 		btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
 		btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2349,9 +2286,8 @@
 			btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
 					     coex_dm->tdma_adj_type);
 		else
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-				  "[BTCoex], roaming/link/scan is under"
-				  " progress, will adjust next time!!!\n");
+			btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+				    "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
 	}
 }
 
@@ -2989,27 +2925,26 @@
 {
 	u8 algorithm = 0;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-		  "[BTCoex], RunCoexistMechanism()===>\n");
+	btc_alg_dbg(ALGO_TRACE,
+		    "[BTCoex], RunCoexistMechanism()===>\n");
 
 	if (btcoexist->manual_control) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], RunCoexistMechanism(), "
-			  "return for Manual CTRL <===\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
 		return;
 	}
 
 	if (coex_sta->under_ips) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], wifi is under IPS !!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], wifi is under IPS !!!\n");
 		return;
 	}
 
 	algorithm = btc8723b2ant_action_algorithm(btcoexist);
 	if (coex_sta->c2h_bt_inquiry_page &&
 	    (BT_8723B_2ANT_COEX_ALGO_PANHS != algorithm)) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], BT is under inquiry/page scan !!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], BT is under inquiry/page scan !!\n");
 		btc8723b2ant_action_bt_inquiry(btcoexist);
 		return;
 	} else {
@@ -3021,84 +2956,75 @@
 	}
 
 	coex_dm->cur_algorithm = algorithm;
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, "[BTCoex], Algorithm = %d\n",
-		  coex_dm->cur_algorithm);
+	btc_alg_dbg(ALGO_TRACE, "[BTCoex], Algorithm = %d\n",
+		    coex_dm->cur_algorithm);
 
 	if (btc8723b2ant_is_common_action(btcoexist)) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], Action 2-Ant common.\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], Action 2-Ant common\n");
 		coex_dm->auto_tdma_adjust = false;
 	} else {
 		if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], preAlgorithm=%d, "
-				  "curAlgorithm=%d\n", coex_dm->pre_algorithm,
-				  coex_dm->cur_algorithm);
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], preAlgorithm=%d, curAlgorithm=%d\n",
+				    coex_dm->pre_algorithm,
+				    coex_dm->cur_algorithm);
 			coex_dm->auto_tdma_adjust = false;
 		}
 		switch (coex_dm->cur_algorithm) {
 		case BT_8723B_2ANT_COEX_ALGO_SCO:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action 2-Ant, algorithm = SCO.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action 2-Ant, algorithm = SCO\n");
 			btc8723b2ant_action_sco(btcoexist);
 			break;
 		case BT_8723B_2ANT_COEX_ALGO_HID:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action 2-Ant, algorithm = HID.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action 2-Ant, algorithm = HID\n");
 			btc8723b2ant_action_hid(btcoexist);
 			break;
 		case BT_8723B_2ANT_COEX_ALGO_A2DP:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action 2-Ant, "
-				  "algorithm = A2DP.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
 			btc8723b2ant_action_a2dp(btcoexist);
 			break;
 		case BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action 2-Ant, "
-				  "algorithm = A2DP+PAN(HS).\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
 			btc8723b2ant_action_a2dp_pan_hs(btcoexist);
 			break;
 		case BT_8723B_2ANT_COEX_ALGO_PANEDR:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action 2-Ant, "
-				  "algorithm = PAN(EDR).\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
 			btc8723b2ant_action_pan_edr(btcoexist);
 			break;
 		case BT_8723B_2ANT_COEX_ALGO_PANHS:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action 2-Ant, "
-				  "algorithm = HS mode.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
 			btc8723b2ant_action_pan_hs(btcoexist);
-				break;
+			break;
 		case BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action 2-Ant, "
-				  "algorithm = PAN+A2DP.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
 			btc8723b2ant_action_pan_edr_a2dp(btcoexist);
 			break;
 		case BT_8723B_2ANT_COEX_ALGO_PANEDR_HID:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action 2-Ant, "
-				  "algorithm = PAN(EDR)+HID.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
 			btc8723b2ant_action_pan_edr_hid(btcoexist);
 			break;
 		case BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action 2-Ant, "
-				  "algorithm = HID+A2DP+PAN.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n");
 			btc8723b2ant_action_hid_a2dp_pan_edr(btcoexist);
 			break;
 		case BT_8723B_2ANT_COEX_ALGO_HID_A2DP:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action 2-Ant, "
-				  "algorithm = HID+A2DP.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
 			btc8723b2ant_action_hid_a2dp(btcoexist);
 			break;
 		default:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action 2-Ant, "
-				  "algorithm = coexist All Off!!\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
 			btc8723b2ant_coex_alloff(btcoexist);
 			break;
 		}
@@ -3126,8 +3052,8 @@
 {
 	u8 u8tmp = 0;
 
-	BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-		  "[BTCoex], 2Ant Init HW Config!!\n");
+	btc_iface_dbg(INTF_INIT,
+		      "[BTCoex], 2Ant Init HW Config!!\n");
 	coex_dm->bt_rf0x1e_backup =
 		btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff);
 
@@ -3152,8 +3078,8 @@
 
 void ex_btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
 {
-	BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-		  "[BTCoex], Coex Mechanism Init!!\n");
+	btc_iface_dbg(INTF_INIT,
+		      "[BTCoex], Coex Mechanism Init!!\n");
 	btc8723b2ant_init_coex_dm(btcoexist);
 }
 
@@ -3388,15 +3314,15 @@
 void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
 {
 	if (BTC_IPS_ENTER == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], IPS ENTER notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], IPS ENTER notify\n");
 		coex_sta->under_ips = true;
 		btc8723b2ant_wifioff_hwcfg(btcoexist);
 		btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
 		btc8723b2ant_coex_alloff(btcoexist);
 	} else if (BTC_IPS_LEAVE == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], IPS LEAVE notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], IPS LEAVE notify\n");
 		coex_sta->under_ips = false;
 		ex_btc8723b2ant_init_hwconfig(btcoexist);
 		btc8723b2ant_init_coex_dm(btcoexist);
@@ -3407,12 +3333,12 @@
 void ex_btc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
 {
 	if (BTC_LPS_ENABLE == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], LPS ENABLE notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], LPS ENABLE notify\n");
 		coex_sta->under_lps = true;
 	} else if (BTC_LPS_DISABLE == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], LPS DISABLE notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], LPS DISABLE notify\n");
 		coex_sta->under_lps = false;
 	}
 }
@@ -3420,21 +3346,21 @@
 void ex_btc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
 {
 	if (BTC_SCAN_START == type)
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], SCAN START notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], SCAN START notify\n");
 	else if (BTC_SCAN_FINISH == type)
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], SCAN FINISH notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], SCAN FINISH notify\n");
 }
 
 void ex_btc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
 {
 	if (BTC_ASSOCIATE_START == type)
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], CONNECT START notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], CONNECT START notify\n");
 	else if (BTC_ASSOCIATE_FINISH == type)
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], CONNECT FINISH notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], CONNECT FINISH notify\n");
 }
 
 void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
@@ -3445,11 +3371,11 @@
 	u8 wifi_central_chnl;
 
 	if (BTC_MEDIA_CONNECT == type)
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], MEDIA connect notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], MEDIA connect notify\n");
 	else
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], MEDIA disconnect notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], MEDIA disconnect notify\n");
 
 	/* only 2.4G we need to inform bt the chnl mask */
 	btcoexist->btc_get(btcoexist,
@@ -3470,10 +3396,10 @@
 	coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
 	coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], FW write 0x66=0x%x\n",
-		  h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
-		  h2c_parameter[2]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], FW write 0x66=0x%x\n",
+		    h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+		    h2c_parameter[2]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
 }
@@ -3482,8 +3408,8 @@
 					   u8 type)
 {
 	if (type == BTC_PACKET_DHCP)
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], DHCP Packet notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], DHCP Packet notify\n");
 }
 
 void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
@@ -3501,25 +3427,24 @@
 		rsp_source = BT_INFO_SRC_8723B_2ANT_WIFI_FW;
 	coex_sta->bt_info_c2h_cnt[rsp_source]++;
 
-	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-		  "[BTCoex], Bt info[%d], length=%d, hex data=[",
-		  rsp_source, length);
+	btc_iface_dbg(INTF_NOTIFY,
+		      "[BTCoex], Bt info[%d], length=%d, hex data=[",
+		      rsp_source, length);
 	for (i = 0; i < length; i++) {
 		coex_sta->bt_info_c2h[rsp_source][i] = tmpbuf[i];
 		if (i == 1)
 			bt_info = tmpbuf[i];
 		if (i == length-1)
-			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-				  "0x%02x]\n", tmpbuf[i]);
+			btc_iface_dbg(INTF_NOTIFY,
+				      "0x%02x]\n", tmpbuf[i]);
 		else
-			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-				  "0x%02x, ", tmpbuf[i]);
+			btc_iface_dbg(INTF_NOTIFY,
+				      "0x%02x, ", tmpbuf[i]);
 	}
 
 	if (btcoexist->manual_control) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], BtInfoNotify(), "
-			  "return for Manual CTRL<===\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n");
 		return;
 	}
 
@@ -3537,9 +3462,8 @@
 		     because bt is reset and loss of the info.
 		 */
 		if ((coex_sta->bt_info_ext & BIT1)) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], BT ext info bit1 check,"
-				  " send wifi BW&Chnl to BT!!\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
 			btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
 					   &wifi_connected);
 			if (wifi_connected)
@@ -3553,9 +3477,8 @@
 		}
 
 		if ((coex_sta->bt_info_ext & BIT3)) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], BT ext info bit3 check, "
-				  "set BT NOT to ignore Wlan active!!\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
 			btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC,
 						     false);
 		} else {
@@ -3608,28 +3531,26 @@
 
 	if (!(bt_info & BT_INFO_8723B_2ANT_B_CONNECTION)) {
 		coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], BtInfoNotify(), "
-			  "BT Non-Connected idle!!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
 	/* connection exists but no busy */
 	} else if (bt_info == BT_INFO_8723B_2ANT_B_CONNECTION) {
 		coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
 	} else if ((bt_info & BT_INFO_8723B_2ANT_B_SCO_ESCO) ||
 		   (bt_info & BT_INFO_8723B_2ANT_B_SCO_BUSY)) {
 		coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_SCO_BUSY;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
 	} else if (bt_info&BT_INFO_8723B_2ANT_B_ACL_BUSY) {
 		coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_ACL_BUSY;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
 	} else {
 		coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_MAX;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], BtInfoNotify(), "
-			  "BT Non-Defined state!!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
 	}
 
 	if ((BT_8723B_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -3652,7 +3573,7 @@
 
 void ex_btc8723b2ant_halt_notify(struct btc_coexist *btcoexist)
 {
-	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n");
+	btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n");
 
 	btc8723b2ant_wifioff_hwcfg(btcoexist);
 	btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
@@ -3666,33 +3587,31 @@
 	static u8 dis_ver_info_cnt;
 	u32 fw_ver = 0, bt_patch_ver = 0;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-		  "[BTCoex], =========================="
-		  "Periodical===========================\n");
+	btc_alg_dbg(ALGO_TRACE,
+		    "[BTCoex], ==========================Periodical===========================\n");
 
 	if (dis_ver_info_cnt <= 5) {
 		dis_ver_info_cnt += 1;
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-			  "[BTCoex], ****************************"
-			  "************************************\n");
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-			  "[BTCoex], Ant PG Num/ Ant Mech/ "
-			  "Ant Pos = %d/ %d/ %d\n", board_info->pg_ant_num,
-			  board_info->btdm_ant_num, board_info->btdm_ant_pos);
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-			  "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
-			  ((stack_info->profile_notified) ? "Yes" : "No"),
-			  stack_info->hci_version);
+		btc_iface_dbg(INTF_INIT,
+			      "[BTCoex], ****************************************************************\n");
+		btc_iface_dbg(INTF_INIT,
+			      "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+			      board_info->pg_ant_num,
+			      board_info->btdm_ant_num,
+			      board_info->btdm_ant_pos);
+		btc_iface_dbg(INTF_INIT,
+			      "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+			      stack_info->profile_notified ? "Yes" : "No",
+			      stack_info->hci_version);
 		btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
 				   &bt_patch_ver);
 		btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-			  "[BTCoex], CoexVer/ fw_ver/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
-			  glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
-			  fw_ver, bt_patch_ver, bt_patch_ver);
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-			  "[BTCoex], *****************************"
-			  "***********************************\n");
+		btc_iface_dbg(INTF_INIT,
+			      "[BTCoex], CoexVer/ fw_ver/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+			      glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
+			      fw_ver, bt_patch_ver, bt_patch_ver);
+		btc_iface_dbg(INTF_INIT,
+			      "[BTCoex], ****************************************************************\n");
 	}
 
 #if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 0)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
index 9cecf17..3ce47c7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
@@ -76,28 +76,28 @@
 			if (bt_rssi >= (rssi_thresh +
 					BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
 				bt_rssi_state = BTC_RSSI_STATE_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state switch to High\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state switch to High\n");
 			} else {
 				bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state stay at Low\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state stay at Low\n");
 			}
 		} else {
 			if (bt_rssi < rssi_thresh) {
 				bt_rssi_state = BTC_RSSI_STATE_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state switch to Low\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state switch to Low\n");
 			} else {
 				bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state stay at High\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state stay at High\n");
 			}
 		}
 	} else if (level_num == 3) {
 		if (rssi_thresh > rssi_thresh1) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-				  "[BTCoex], BT Rssi thresh error!!\n");
+			btc_alg_dbg(ALGO_BT_RSSI_STATE,
+				    "[BTCoex], BT Rssi thresh error!!\n");
 			return coex_sta->pre_bt_rssi_state;
 		}
 
@@ -106,12 +106,12 @@
 			if (bt_rssi >= (rssi_thresh +
 					BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
 				bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state switch to Medium\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state switch to Medium\n");
 			} else {
 				bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state stay at Low\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state stay at Low\n");
 			}
 		} else if ((coex_sta->pre_bt_rssi_state ==
 			   BTC_RSSI_STATE_MEDIUM) ||
@@ -120,26 +120,26 @@
 			if (bt_rssi >= (rssi_thresh1 +
 					BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
 				bt_rssi_state = BTC_RSSI_STATE_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state switch to High\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state switch to High\n");
 			} else if (bt_rssi < rssi_thresh) {
 				bt_rssi_state = BTC_RSSI_STATE_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state switch to Low\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state switch to Low\n");
 			} else {
 				bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state stay at Medium\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state stay at Medium\n");
 			}
 		} else {
 			if (bt_rssi < rssi_thresh1) {
 				bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state switch to Medium\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state switch to Medium\n");
 			} else {
 				bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state stay at High\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state stay at High\n");
 			}
 		}
 	}
@@ -165,32 +165,28 @@
 			if (wifi_rssi >=
 			    (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
 				wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state switch to High\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state switch to High\n");
 			} else {
 				wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state stay at Low\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state stay at Low\n");
 			}
 		} else {
 			if (wifi_rssi < rssi_thresh) {
 				wifi_rssi_state = BTC_RSSI_STATE_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state switch to Low\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state switch to Low\n");
 			} else {
 				wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state stay at High\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state stay at High\n");
 			}
 		}
 	} else if (level_num == 3) {
 		if (rssi_thresh > rssi_thresh1) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
-				  "[BTCoex], wifi RSSI thresh error!!\n");
+			btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+				    "[BTCoex], wifi RSSI thresh error!!\n");
 			return coex_sta->pre_wifi_rssi_state[index];
 		}
 
@@ -201,14 +197,12 @@
 			if (wifi_rssi >=
 			    (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
 				wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state switch to Medium\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state switch to Medium\n");
 			} else {
 				wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state stay at Low\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state stay at Low\n");
 			}
 		} else if ((coex_sta->pre_wifi_rssi_state[index] ==
 			BTC_RSSI_STATE_MEDIUM) ||
@@ -218,31 +212,26 @@
 			    (rssi_thresh1 +
 			     BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
 				wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state switch to High\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state switch to High\n");
 			} else if (wifi_rssi < rssi_thresh) {
 				wifi_rssi_state = BTC_RSSI_STATE_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state switch to Low\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state switch to Low\n");
 			} else {
 				wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state stay at Medium\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state stay at Medium\n");
 			}
 		} else {
 			if (wifi_rssi < rssi_thresh1) {
 				wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state switch to Medium\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state switch to Medium\n");
 			} else {
 				wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state stay at High\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state stay at High\n");
 			}
 		}
 	}
@@ -431,9 +420,9 @@
 
 	h2c_parameter[0] |= BIT0;	/* trigger*/
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
-		  h2c_parameter[0]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+		    h2c_parameter[0]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
 }
@@ -504,8 +493,8 @@
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
 
 	if (!bt_link_info->bt_link_exist) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], No BT link exists!!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], No BT link exists!!!\n");
 		return algorithm;
 	}
 
@@ -520,26 +509,26 @@
 
 	if (num_of_diff_profile == 1) {
 		if (bt_link_info->sco_exist) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], BT Profile = SCO only\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], BT Profile = SCO only\n");
 			algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
 		} else {
 			if (bt_link_info->hid_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], BT Profile = HID only\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], BT Profile = HID only\n");
 				algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
 			} else if (bt_link_info->a2dp_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], BT Profile = A2DP only\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], BT Profile = A2DP only\n");
 				algorithm = BT_8821A_1ANT_COEX_ALGO_A2DP;
 			} else if (bt_link_info->pan_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = PAN(HS) only\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = PAN(HS) only\n");
 					algorithm = BT_8821A_1ANT_COEX_ALGO_PANHS;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = PAN(EDR) only\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = PAN(EDR) only\n");
 					algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR;
 				}
 			}
@@ -547,50 +536,50 @@
 	} else if (num_of_diff_profile == 2) {
 		if (bt_link_info->sco_exist) {
 			if (bt_link_info->hid_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], BT Profile = SCO + HID\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], BT Profile = SCO + HID\n");
 				algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
 			} else if (bt_link_info->a2dp_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
 				algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
 			} else if (bt_link_info->pan_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = SCO + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = SCO + PAN(HS)\n");
 					algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
 					algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
 				}
 			}
 		} else {
 			if (bt_link_info->hid_exist &&
 			    bt_link_info->a2dp_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], BT Profile = HID + A2DP\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], BT Profile = HID + A2DP\n");
 				algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
 			} else if (bt_link_info->hid_exist &&
 				   bt_link_info->pan_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = HID + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = HID + PAN(HS)\n");
 					algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = HID + PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = HID + PAN(EDR)\n");
 					algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
 				}
 			} else if (bt_link_info->pan_exist &&
 				   bt_link_info->a2dp_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
 					algorithm = BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
 					algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP;
 				}
 			}
@@ -599,29 +588,29 @@
 		if (bt_link_info->sco_exist) {
 			if (bt_link_info->hid_exist &&
 			    bt_link_info->a2dp_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
 				algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
 			} else if (bt_link_info->hid_exist &&
 				bt_link_info->pan_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
 					algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
 					algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
 				}
 			} else if (bt_link_info->pan_exist &&
 				bt_link_info->a2dp_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
 					algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
 					algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
 				}
 			}
@@ -630,12 +619,12 @@
 			    bt_link_info->pan_exist &&
 			    bt_link_info->a2dp_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
 					algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
 					algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
 				}
 			}
@@ -646,12 +635,12 @@
 			    bt_link_info->pan_exist &&
 			    bt_link_info->a2dp_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
 
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
 					algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
 				}
 			}
@@ -670,10 +659,10 @@
 	if (enable_auto_report)
 		h2c_parameter[0] |= BIT0;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
-		  (enable_auto_report ? "Enabled!!" : "Disabled!!"),
-		   h2c_parameter[0]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
+		    (enable_auto_report ? "Enabled!!" : "Disabled!!"),
+		    h2c_parameter[0]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
 }
@@ -682,17 +671,16 @@
 					   bool force_exec,
 					   bool enable_auto_report)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM,
-		  ALGO_TRACE_FW, "[BTCoex], %s BT Auto report = %s\n",
-		  (force_exec ? "force to" : ""), ((enable_auto_report) ?
-		  "Enabled" : "Disabled"));
+	btc_alg_dbg(ALGO_TRACE_FW, "[BTCoex], %s BT Auto report = %s\n",
+		    (force_exec ? "force to" : ""), ((enable_auto_report) ?
+						     "Enabled" : "Disabled"));
 	coex_dm->cur_bt_auto_report = enable_auto_report;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
-			  coex_dm->pre_bt_auto_report,
-			  coex_dm->cur_bt_auto_report);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
+			    coex_dm->pre_bt_auto_report,
+			    coex_dm->cur_bt_auto_report);
 
 		if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
 			return;
@@ -718,9 +706,9 @@
 		h2c_parameter[5] = 0xf9;	/*MCS5 or OFDM36*/
 	}
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], set WiFi Low-Penalty Retry: %s",
-		  (low_penalty_ra ? "ON!!" : "OFF!!"));
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], set WiFi Low-Penalty Retry: %s",
+		    (low_penalty_ra ? "ON!!" : "OFF!!"));
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
 }
@@ -743,20 +731,20 @@
 					   u32 val0x6c0, u32 val0x6c4,
 					   u32 val0x6c8, u8 val0x6cc)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-		  "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+	btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+		    "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
 	btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-		  "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+	btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+		    "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
 	btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-		  "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+	btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+		    "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
 	btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-		  "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+	btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+		    "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
 	btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
 }
 
@@ -764,10 +752,10 @@
 				       bool force_exec, u32 val0x6c0,
 				       u32 val0x6c4, u32 val0x6c8, u8 val0x6cc)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-		  "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
-		  (force_exec ? "force to" : ""), val0x6c0, val0x6c4,
-		  val0x6c8, val0x6cc);
+	btc_alg_dbg(ALGO_TRACE_SW,
+		    "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
+		    (force_exec ? "force to" : ""), val0x6c0, val0x6c4,
+		    val0x6c8, val0x6cc);
 	coex_dm->cur_val_0x6c0 = val0x6c0;
 	coex_dm->cur_val_0x6c4 = val0x6c4;
 	coex_dm->cur_val_0x6c8 = val0x6c8;
@@ -839,9 +827,9 @@
 	if (enable)
 		h2c_parameter[0] |= BIT0;	/* function enable*/
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
-		  h2c_parameter[0]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+		    h2c_parameter[0]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
 }
@@ -849,16 +837,16 @@
 static void halbtc8821a1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
 					    bool force_exec, bool enable)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-		  "[BTCoex], %s turn Ignore WlanAct %s\n",
-		  (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+	btc_alg_dbg(ALGO_TRACE_FW,
+		    "[BTCoex], %s turn Ignore WlanAct %s\n",
+		    (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
 	coex_dm->cur_ignore_wlan_act = enable;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
-			  coex_dm->pre_ignore_wlan_act,
-			  coex_dm->cur_ignore_wlan_act);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
+			    coex_dm->pre_ignore_wlan_act,
+			    coex_dm->cur_ignore_wlan_act);
 
 		if (coex_dm->pre_ignore_wlan_act ==
 		    coex_dm->cur_ignore_wlan_act)
@@ -887,13 +875,13 @@
 	coex_dm->ps_tdma_para[3] = byte4;
 	coex_dm->ps_tdma_para[4] = byte5;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
-		  h2c_parameter[0],
-		  h2c_parameter[1]<<24 |
-		  h2c_parameter[2]<<16 |
-		  h2c_parameter[3]<<8 |
-		  h2c_parameter[4]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
+		    h2c_parameter[0],
+		    h2c_parameter[1] << 24 |
+		    h2c_parameter[2] << 16 |
+		    h2c_parameter[3] << 8 |
+		    h2c_parameter[4]);
 	btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
 }
 
@@ -910,22 +898,22 @@
 static void halbtc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist,
 				     bool force_exec, u8 lps_val, u8 rpwm_val)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-		  "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
-		  (force_exec ? "force to" : ""), lps_val, rpwm_val);
+	btc_alg_dbg(ALGO_TRACE_FW,
+		    "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
+		    (force_exec ? "force to" : ""), lps_val, rpwm_val);
 	coex_dm->cur_lps = lps_val;
 	coex_dm->cur_rpwm = rpwm_val;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], LPS-RxBeaconMode = 0x%x, LPS-RPWM = 0x%x!!\n",
-			  coex_dm->cur_lps, coex_dm->cur_rpwm);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], LPS-RxBeaconMode = 0x%x, LPS-RPWM = 0x%x!!\n",
+			    coex_dm->cur_lps, coex_dm->cur_rpwm);
 
 		if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
 		    (coex_dm->pre_rpwm == coex_dm->cur_rpwm)) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-				  "[BTCoex], LPS-RPWM_Last = 0x%x, LPS-RPWM_Now = 0x%x!!\n",
-				  coex_dm->pre_rpwm, coex_dm->cur_rpwm);
+			btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+				    "[BTCoex], LPS-RPWM_Last = 0x%x, LPS-RPWM_Now = 0x%x!!\n",
+				    coex_dm->pre_rpwm, coex_dm->cur_rpwm);
 
 			return;
 		}
@@ -939,8 +927,8 @@
 static void halbtc8821a1ant_sw_mechanism(struct btc_coexist *btcoexist,
 					 bool low_penalty_ra)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-		  "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
+	btc_alg_dbg(ALGO_BT_MONITOR,
+		    "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
 
 	halbtc8821a1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
 }
@@ -1036,13 +1024,13 @@
 
 	if (!force_exec) {
 		if (coex_dm->cur_ps_tdma_on) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-				  "[BTCoex], ********** TDMA(on, %d) **********\n",
-				  coex_dm->cur_ps_tdma);
+			btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+				    "[BTCoex], ********** TDMA(on, %d) **********\n",
+				    coex_dm->cur_ps_tdma);
 		} else {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-				  "[BTCoex], ********** TDMA(off, %d) **********\n",
-				  coex_dm->cur_ps_tdma);
+			btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+				    "[BTCoex], ********** TDMA(off, %d) **********\n",
+				    coex_dm->cur_ps_tdma);
 		}
 		if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
 		    (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1253,50 +1241,50 @@
 	if (!wifi_connected &&
 	    BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
 	    coex_dm->bt_status) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
 		halbtc8821a1ant_sw_mechanism(btcoexist, false);
 
 		common = true;
 	} else if (wifi_connected &&
 		   (BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
 		    coex_dm->bt_status)) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], Wifi connected + BT non connected-idle!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], Wifi connected + BT non connected-idle!!\n");
 		halbtc8821a1ant_sw_mechanism(btcoexist, false);
 
 		common = true;
 	} else if (!wifi_connected &&
 		   (BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE ==
 		    coex_dm->bt_status)) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
 		halbtc8821a1ant_sw_mechanism(btcoexist, false);
 
 		common = true;
 	} else if (wifi_connected &&
 		   (BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE ==
 		   coex_dm->bt_status)) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], Wifi connected + BT connected-idle!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], Wifi connected + BT connected-idle!!\n");
 		halbtc8821a1ant_sw_mechanism(btcoexist, false);
 
 		common = true;
 	} else if (!wifi_connected &&
 		   (BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE !=
 		    coex_dm->bt_status)) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
 		halbtc8821a1ant_sw_mechanism(btcoexist, false);
 
 		common = true;
 	} else {
 		if (wifi_busy) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
 		} else {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
 		}
 
 		common = false;
@@ -1313,8 +1301,8 @@
 	long			result;
 	u8			retry_count = 0, bt_info_ext;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-		  "[BTCoex], TdmaDurationAdjustForAcl()\n");
+	btc_alg_dbg(ALGO_TRACE_FW,
+		    "[BTCoex], TdmaDurationAdjustForAcl()\n");
 
 	if ((BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN ==
 	     wifi_status) ||
@@ -1342,8 +1330,8 @@
 
 	if (!coex_dm->auto_tdma_adjust) {
 		coex_dm->auto_tdma_adjust = true;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], first run TdmaDurationAdjust()!!\n");
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], first run TdmaDurationAdjust()!!\n");
 
 		halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
 		coex_dm->tdma_adj_type = 2;
@@ -1378,9 +1366,8 @@
 				up = 0;
 				dn = 0;
 				result = 1;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_TRACE_FW_DETAIL,
-					  "[BTCoex], Increase wifi duration!!\n");
+				btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+					    "[BTCoex], Increase wifi duration!!\n");
 			}
 		} else if (retry_count <= 3) {
 			/* <=3 retry in the last 2-second duration*/
@@ -1410,9 +1397,8 @@
 				dn = 0;
 				wait_count = 0;
 				result = -1;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_TRACE_FW_DETAIL,
-					  "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
+				btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+					    "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
 			}
 		} else {
 			/* retry count > 3, if retry count > 3 happens once,
@@ -1433,8 +1419,8 @@
 			dn = 0;
 			wait_count = 0;
 			result = -1;
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-				  "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
+			btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+				    "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
 		}
 
 		if (result == -1) {
@@ -1479,9 +1465,9 @@
 			}
 		} else {
 			/*no change*/
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-				  "[BTCoex], ********** TDMA(on, %d) **********\n",
-				coex_dm->cur_ps_tdma);
+			btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+				    "[BTCoex], ********** TDMA(on, %d) **********\n",
+				    coex_dm->cur_ps_tdma);
 		}
 
 		if (coex_dm->cur_ps_tdma != 1 &&
@@ -1603,27 +1589,27 @@
 		bt_disabled = false;
 		btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
 				   &bt_disabled);
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-			  "[BTCoex], BT is enabled !!\n");
+		btc_alg_dbg(ALGO_BT_MONITOR,
+			    "[BTCoex], BT is enabled !!\n");
 	} else {
 		bt_disable_cnt++;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-			  "[BTCoex], bt all counters = 0, %d times!!\n",
-			  bt_disable_cnt);
+		btc_alg_dbg(ALGO_BT_MONITOR,
+			    "[BTCoex], bt all counters = 0, %d times!!\n",
+			    bt_disable_cnt);
 		if (bt_disable_cnt >= 2) {
 			bt_disabled = true;
 			btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
 					   &bt_disabled);
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-				  "[BTCoex], BT is disabled !!\n");
+			btc_alg_dbg(ALGO_BT_MONITOR,
+				    "[BTCoex], BT is disabled !!\n");
 			halbtc8821a1ant_action_wifi_only(btcoexist);
 		}
 	}
 	if (pre_bt_disabled != bt_disabled) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-			  "[BTCoex], BT is from %s to %s!!\n",
-			(pre_bt_disabled ? "disabled" : "enabled"),
-			(bt_disabled ? "disabled" : "enabled"));
+		btc_alg_dbg(ALGO_BT_MONITOR,
+			    "[BTCoex], BT is from %s to %s!!\n",
+			    (pre_bt_disabled ? "disabled" : "enabled"),
+			    (bt_disabled ? "disabled" : "enabled"));
 		pre_bt_disabled = bt_disabled;
 		if (bt_disabled) {
 			btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS,
@@ -1897,15 +1883,15 @@
 	bool	scan = false, link = false, roam = false;
 	bool	under_4way = false;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-		  "[BTCoex], CoexForWifiConnect()===>\n");
+	btc_alg_dbg(ALGO_TRACE,
+		    "[BTCoex], CoexForWifiConnect()===>\n");
 
 	btcoexist->btc_get(btcoexist,
 		 BTC_GET_BL_WIFI_4_WAY_PROGRESS, &under_4way);
 	if (under_4way) {
 		btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist);
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
 		return;
 	}
 
@@ -1914,8 +1900,8 @@
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
 	if (scan || link || roam) {
 		halbtc8821a1ant_action_wifi_connected_scan(btcoexist);
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
 		return;
 	}
 
@@ -1976,58 +1962,58 @@
 	if (!halbtc8821a1ant_is_common_action(btcoexist)) {
 		switch (coex_dm->cur_algorithm) {
 		case BT_8821A_1ANT_COEX_ALGO_SCO:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action algorithm = SCO.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action algorithm = SCO\n");
 			halbtc8821a1ant_action_sco(btcoexist);
 			break;
 		case BT_8821A_1ANT_COEX_ALGO_HID:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action algorithm = HID.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action algorithm = HID\n");
 			halbtc8821a1ant_action_hid(btcoexist);
 			break;
 		case BT_8821A_1ANT_COEX_ALGO_A2DP:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action algorithm = A2DP.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action algorithm = A2DP\n");
 			halbtc8821a1ant_action_a2dp(btcoexist);
 			break;
 		case BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action algorithm = A2DP+PAN(HS).\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action algorithm = A2DP+PAN(HS)\n");
 			halbtc8821a1ant_action_a2dp_pan_hs(btcoexist);
 			break;
 		case BT_8821A_1ANT_COEX_ALGO_PANEDR:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action algorithm = PAN(EDR).\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action algorithm = PAN(EDR)\n");
 			halbtc8821a1ant_action_pan_edr(btcoexist);
 			break;
 		case BT_8821A_1ANT_COEX_ALGO_PANHS:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action algorithm = HS mode.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action algorithm = HS mode\n");
 			halbtc8821a1ant_action_pan_hs(btcoexist);
 			break;
 		case BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action algorithm = PAN+A2DP.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action algorithm = PAN+A2DP\n");
 			halbtc8821a1ant_action_pan_edr_a2dp(btcoexist);
 			break;
 		case BT_8821A_1ANT_COEX_ALGO_PANEDR_HID:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action algorithm = PAN(EDR)+HID.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action algorithm = PAN(EDR)+HID\n");
 			halbtc8821a1ant_action_pan_edr_hid(btcoexist);
 			break;
 		case BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action algorithm = HID+A2DP+PAN.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action algorithm = HID+A2DP+PAN\n");
 			btc8821a1ant_action_hid_a2dp_pan_edr(btcoexist);
 			break;
 		case BT_8821A_1ANT_COEX_ALGO_HID_A2DP:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action algorithm = HID+A2DP.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action algorithm = HID+A2DP\n");
 			halbtc8821a1ant_action_hid_a2dp(btcoexist);
 			break;
 		default:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action algorithm = coexist All Off!!\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action algorithm = coexist All Off!!\n");
 			/*halbtc8821a1ant_coex_all_off(btcoexist);*/
 			break;
 		}
@@ -2045,31 +2031,31 @@
 	u8	wifi_rssi_state = BTC_RSSI_STATE_HIGH;
 	bool	wifi_under_5g = false;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-		  "[BTCoex], RunCoexistMechanism()===>\n");
+	btc_alg_dbg(ALGO_TRACE,
+		    "[BTCoex], RunCoexistMechanism()===>\n");
 
 	if (btcoexist->manual_control) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
 		return;
 	}
 
 	if (btcoexist->stop_coex_dm) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
 		return;
 	}
 
 	if (coex_sta->under_ips) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], wifi is under IPS !!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], wifi is under IPS !!!\n");
 		return;
 	}
 
 	btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
 	if (wifi_under_5g) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
 		halbtc8821a1ant_coex_under_5g(btcoexist);
 		return;
 	}
@@ -2135,8 +2121,8 @@
 	if (!wifi_connected) {
 		bool	scan = false, link = false, roam = false;
 
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], wifi is non connected-idle !!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], wifi is non connected-idle !!!\n");
 
 		btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
 		btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2168,8 +2154,8 @@
 	u8	u1_tmp = 0;
 	bool	wifi_under_5g = false;
 
-	BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-		  "[BTCoex], 1Ant Init HW Config!!\n");
+	btc_iface_dbg(INTF_INIT,
+		      "[BTCoex], 1Ant Init HW Config!!\n");
 
 	if (back_up) {
 		coex_dm->backup_arfr_cnt1 = btcoexist->btc_read_4byte(btcoexist,
@@ -2220,8 +2206,8 @@
 
 void ex_halbtc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist)
 {
-	BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-		  "[BTCoex], Coex Mechanism Init!!\n");
+	btc_iface_dbg(INTF_INIT,
+		      "[BTCoex], Coex Mechanism Init!!\n");
 
 	btcoexist->stop_coex_dm = false;
 
@@ -2515,8 +2501,8 @@
 		return;
 
 	if (BTC_IPS_ENTER == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], IPS ENTER notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], IPS ENTER notify\n");
 		coex_sta->under_ips = true;
 		halbtc8821a1ant_set_ant_path(btcoexist,
 					     BTC_ANT_PATH_BT, false, true);
@@ -2525,8 +2511,8 @@
 		halbtc8821a1ant_coex_table_with_type(btcoexist,
 						     NORMAL_EXEC, 0);
 	} else if (BTC_IPS_LEAVE == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], IPS LEAVE notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], IPS LEAVE notify\n");
 		coex_sta->under_ips = false;
 
 		halbtc8821a1ant_run_coexist_mechanism(btcoexist);
@@ -2539,12 +2525,12 @@
 		return;
 
 	if (BTC_LPS_ENABLE == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], LPS ENABLE notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], LPS ENABLE notify\n");
 		coex_sta->under_Lps = true;
 	} else if (BTC_LPS_DISABLE == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], LPS DISABLE notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], LPS DISABLE notify\n");
 		coex_sta->under_Lps = false;
 	}
 }
@@ -2574,8 +2560,8 @@
 	}
 
 	if (BTC_SCAN_START == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], SCAN START notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], SCAN START notify\n");
 		if (!wifi_connected) {
 			/* non-connected scan*/
 			btc8821a1ant_act_wifi_not_conn_scan(btcoexist);
@@ -2584,8 +2570,8 @@
 			halbtc8821a1ant_action_wifi_connected_scan(btcoexist);
 		}
 	} else if (BTC_SCAN_FINISH == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], SCAN FINISH notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], SCAN FINISH notify\n");
 		if (!wifi_connected) {
 			/* non-connected scan*/
 			halbtc8821a1ant_action_wifi_not_connected(btcoexist);
@@ -2614,12 +2600,12 @@
 	}
 
 	if (BTC_ASSOCIATE_START == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], CONNECT START notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], CONNECT START notify\n");
 		btc8821a1ant_act_wifi_not_conn_scan(btcoexist);
 	} else if (BTC_ASSOCIATE_FINISH == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], CONNECT FINISH notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], CONNECT FINISH notify\n");
 
 		btcoexist->btc_get(btcoexist,
 			 BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
@@ -2645,11 +2631,11 @@
 		return;
 
 	if (BTC_MEDIA_CONNECT == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], MEDIA connect notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], MEDIA connect notify\n");
 	} else {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], MEDIA disconnect notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], MEDIA disconnect notify\n");
 	}
 
 	/* only 2.4G we need to inform bt the chnl mask*/
@@ -2672,9 +2658,11 @@
 	coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
 	coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], FW write 0x66 = 0x%x\n",
-		  h2c_parameter[0]<<16|h2c_parameter[1]<<8|h2c_parameter[2]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], FW write 0x66 = 0x%x\n",
+		    h2c_parameter[0] << 16 |
+		    h2c_parameter[1] << 8 |
+		    h2c_parameter[2]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
 }
@@ -2702,8 +2690,8 @@
 
 	if (BTC_PACKET_DHCP == type ||
 	    BTC_PACKET_EAPOL == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], special Packet(%d) notify\n", type);
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], special Packet(%d) notify\n", type);
 		btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist);
 	}
 }
@@ -2727,19 +2715,19 @@
 		rsp_source = BT_INFO_SRC_8821A_1ANT_WIFI_FW;
 	coex_sta->bt_info_c2h_cnt[rsp_source]++;
 
-	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-		  "[BTCoex], Bt info[%d], length = %d, hex data = [",
-		  rsp_source, length);
+	btc_iface_dbg(INTF_NOTIFY,
+		      "[BTCoex], Bt info[%d], length = %d, hex data = [",
+		      rsp_source, length);
 	for (i = 0; i < length; i++) {
 		coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
 		if (i == 1)
 			bt_info = tmp_buf[i];
 		if (i == length-1) {
-			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-				  "0x%02x]\n", tmp_buf[i]);
+			btc_iface_dbg(INTF_NOTIFY,
+				      "0x%02x]\n", tmp_buf[i]);
 		} else {
-			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-				  "0x%02x, ", tmp_buf[i]);
+			btc_iface_dbg(INTF_NOTIFY,
+				      "0x%02x, ", tmp_buf[i]);
 		}
 	}
 
@@ -2756,8 +2744,8 @@
 		/* Here we need to resend some wifi info to BT*/
 		/* because bt is reset and loss of the info.*/
 		if (coex_sta->bt_info_ext & BIT1) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
 			btcoexist->btc_get(btcoexist,
 					   BTC_GET_BL_WIFI_CONNECTED,
 					   &wifi_connected);
@@ -2773,8 +2761,8 @@
 		if ((coex_sta->bt_info_ext & BIT3) && !wifi_under_5g) {
 			if (!btcoexist->manual_control &&
 			    !btcoexist->stop_coex_dm) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
 				halbtc8821a1ant_ignore_wlan_act(btcoexist,
 								FORCE_EXEC,
 								false);
@@ -2782,8 +2770,8 @@
 		}
 #if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0)
 		if (!(coex_sta->bt_info_ext & BIT4)) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], BT ext info bit4 check, set BT to enable Auto Report!!\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], BT ext info bit4 check, set BT to enable Auto Report!!\n");
 			halbtc8821a1ant_bt_auto_report(btcoexist,
 						       FORCE_EXEC, true);
 		}
@@ -2828,28 +2816,28 @@
 
 	if (!(bt_info&BT_INFO_8821A_1ANT_B_CONNECTION)) {
 		coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
 	} else if (bt_info == BT_INFO_8821A_1ANT_B_CONNECTION) {
 		/* connection exists but no busy*/
 		coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
 	} else if ((bt_info&BT_INFO_8821A_1ANT_B_SCO_ESCO) ||
 		(bt_info&BT_INFO_8821A_1ANT_B_SCO_BUSY)) {
 		coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_SCO_BUSY;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
 	} else if (bt_info&BT_INFO_8821A_1ANT_B_ACL_BUSY) {
 		if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status)
 			coex_dm->auto_tdma_adjust = false;
 		coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_ACL_BUSY;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
 	} else {
 		coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_MAX;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
 	}
 
 	if ((BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -2866,8 +2854,8 @@
 
 void ex_halbtc8821a1ant_halt_notify(struct btc_coexist *btcoexist)
 {
-	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-		  "[BTCoex], Halt notify\n");
+	btc_iface_dbg(INTF_NOTIFY,
+		      "[BTCoex], Halt notify\n");
 
 	btcoexist->stop_coex_dm = true;
 
@@ -2885,20 +2873,20 @@
 
 void ex_halbtc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
 {
-	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-		  "[BTCoex], Pnp notify\n");
+	btc_iface_dbg(INTF_NOTIFY,
+		      "[BTCoex], Pnp notify\n");
 
 	if (BTC_WIFI_PNP_SLEEP == pnp_state) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], Pnp notify to SLEEP\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], Pnp notify to SLEEP\n");
 		btcoexist->stop_coex_dm = true;
 		halbtc8821a1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
 		halbtc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
 						 0x0, 0x0);
 		halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 9);
 	} else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], Pnp notify to WAKE UP\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], Pnp notify to WAKE UP\n");
 		btcoexist->stop_coex_dm = false;
 		halbtc8821a1ant_init_hw_config(btcoexist, false);
 		halbtc8821a1ant_init_coex_dm(btcoexist);
@@ -2914,33 +2902,33 @@
 	struct btc_board_info *board_info = &btcoexist->board_info;
 	struct btc_stack_info *stack_info = &btcoexist->stack_info;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-		  "[BTCoex], ==========================Periodical===========================\n");
+	btc_alg_dbg(ALGO_TRACE,
+		    "[BTCoex], ==========================Periodical===========================\n");
 
 	if (dis_ver_info_cnt <= 5) {
 		dis_ver_info_cnt += 1;
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-			  "[BTCoex], ****************************************************************\n");
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-			  "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
-			  board_info->pg_ant_num,
-			  board_info->btdm_ant_num,
-			  board_info->btdm_ant_pos);
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-			  "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
-			  ((stack_info->profile_notified) ? "Yes" : "No"),
-			  stack_info->hci_version);
+		btc_iface_dbg(INTF_INIT,
+			      "[BTCoex], ****************************************************************\n");
+		btc_iface_dbg(INTF_INIT,
+			      "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+			      board_info->pg_ant_num,
+			      board_info->btdm_ant_num,
+			      board_info->btdm_ant_pos);
+		btc_iface_dbg(INTF_INIT,
+			      "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+			      stack_info->profile_notified ? "Yes" : "No",
+			      stack_info->hci_version);
 		btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
 				   &bt_patch_ver);
 		btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-			  "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
-			glcoex_ver_date_8821a_1ant,
-			glcoex_ver_8821a_1ant,
-			fw_ver, bt_patch_ver,
-			bt_patch_ver);
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-			  "[BTCoex], ****************************************************************\n");
+		btc_iface_dbg(INTF_INIT,
+			      "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+			      glcoex_ver_date_8821a_1ant,
+			      glcoex_ver_8821a_1ant,
+			      fw_ver, bt_patch_ver,
+			      bt_patch_ver);
+		btc_iface_dbg(INTF_INIT,
+			      "[BTCoex], ****************************************************************\n");
 	}
 
 #if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
index 044d914..81f843b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
@@ -80,28 +80,28 @@
 				   BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT;
 			if (bt_rssi >= tmp) {
 				bt_rssi_state = BTC_RSSI_STATE_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state switch to High\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state switch to High\n");
 			} else {
 				bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state stay at Low\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state stay at Low\n");
 			}
 		} else {
 			if (bt_rssi < rssi_thresh) {
 				bt_rssi_state = BTC_RSSI_STATE_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state switch to Low\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state switch to Low\n");
 			} else {
 				bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state stay at High\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state stay at High\n");
 			}
 		}
 	} else if (level_num == 3) {
 		if (rssi_thresh > rssi_thresh1) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-				  "[BTCoex], BT Rssi thresh error!!\n");
+			btc_alg_dbg(ALGO_BT_RSSI_STATE,
+				    "[BTCoex], BT Rssi thresh error!!\n");
 			return coex_sta->pre_bt_rssi_state;
 		}
 
@@ -110,12 +110,12 @@
 			if (bt_rssi >=
 			    (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
 				bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state switch to Medium\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state switch to Medium\n");
 			} else {
 				bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state stay at Low\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state stay at Low\n");
 			}
 		} else if ((coex_sta->pre_bt_rssi_state ==
 			   BTC_RSSI_STATE_MEDIUM) ||
@@ -125,26 +125,26 @@
 			    (rssi_thresh1 +
 			     BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
 				bt_rssi_state = BTC_RSSI_STATE_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state switch to High\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state switch to High\n");
 			} else if (bt_rssi < rssi_thresh) {
 				bt_rssi_state = BTC_RSSI_STATE_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state switch to Low\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state switch to Low\n");
 			} else {
 				bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state stay at Medium\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state stay at Medium\n");
 			}
 		} else {
 			if (bt_rssi < rssi_thresh1) {
 				bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state switch to Medium\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state switch to Medium\n");
 			} else {
 				bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
-					  "[BTCoex], BT Rssi state stay at High\n");
+				btc_alg_dbg(ALGO_BT_RSSI_STATE,
+					    "[BTCoex], BT Rssi state stay at High\n");
 			}
 		}
 	}
@@ -171,32 +171,28 @@
 			if (wifi_rssi >=
 			    (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
 				wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state switch to High\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state switch to High\n");
 			} else {
 				wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state stay at Low\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state stay at Low\n");
 			}
 		} else {
 			if (wifi_rssi < rssi_thresh) {
 				wifi_rssi_state = BTC_RSSI_STATE_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state switch to Low\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state switch to Low\n");
 			} else {
 				wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state stay at High\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state stay at High\n");
 			}
 		}
 	} else if (level_num == 3) {
 		if (rssi_thresh > rssi_thresh1) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
-				  "[BTCoex], wifi RSSI thresh error!!\n");
+			btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+				    "[BTCoex], wifi RSSI thresh error!!\n");
 			return coex_sta->pre_wifi_rssi_state[index];
 		}
 
@@ -207,14 +203,12 @@
 			if (wifi_rssi >=
 			    (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
 				wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state switch to Medium\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state switch to Medium\n");
 			} else {
 				wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state stay at Low\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state stay at Low\n");
 			}
 		} else if ((coex_sta->pre_wifi_rssi_state[index] ==
 			   BTC_RSSI_STATE_MEDIUM) ||
@@ -223,31 +217,26 @@
 			if (wifi_rssi >= (rssi_thresh1 +
 			    BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
 				wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state switch to High\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state switch to High\n");
 			} else if (wifi_rssi < rssi_thresh) {
 				wifi_rssi_state = BTC_RSSI_STATE_LOW;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state switch to Low\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state switch to Low\n");
 			} else {
 				wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state stay at Medium\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state stay at Medium\n");
 			}
 		} else {
 			if (wifi_rssi < rssi_thresh1) {
 				wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state switch to Medium\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state switch to Medium\n");
 			} else {
 				wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_WIFI_RSSI_STATE,
-					  "[BTCoex], wifi RSSI state stay at High\n");
+				btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+					    "[BTCoex], wifi RSSI state stay at High\n");
 			}
 		}
 	}
@@ -279,26 +268,26 @@
 		bt_disabled = false;
 		btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
 				   &bt_disabled);
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-			  "[BTCoex], BT is enabled !!\n");
+		btc_alg_dbg(ALGO_BT_MONITOR,
+			    "[BTCoex], BT is enabled !!\n");
 	} else {
 		bt_disable_cnt++;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-			  "[BTCoex], bt all counters = 0, %d times!!\n",
-			  bt_disable_cnt);
+		btc_alg_dbg(ALGO_BT_MONITOR,
+			    "[BTCoex], bt all counters = 0, %d times!!\n",
+			    bt_disable_cnt);
 		if (bt_disable_cnt >= 2) {
 			bt_disabled = true;
 			btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
 					   &bt_disabled);
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-				  "[BTCoex], BT is disabled !!\n");
+			btc_alg_dbg(ALGO_BT_MONITOR,
+				    "[BTCoex], BT is disabled !!\n");
 		}
 	}
 	if (pre_bt_disabled != bt_disabled) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-			  "[BTCoex], BT is from %s to %s!!\n",
-			  (pre_bt_disabled ? "disabled" : "enabled"),
-			  (bt_disabled ? "disabled" : "enabled"));
+		btc_alg_dbg(ALGO_BT_MONITOR,
+			    "[BTCoex], BT is from %s to %s!!\n",
+			    (pre_bt_disabled ? "disabled" : "enabled"),
+			    (bt_disabled ? "disabled" : "enabled"));
 		pre_bt_disabled = bt_disabled;
 	}
 }
@@ -324,12 +313,12 @@
 	coex_sta->low_priority_tx = reg_lp_tx;
 	coex_sta->low_priority_rx = reg_lp_rx;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-		  "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
-		  reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
-		  "[BTCoex], Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
-		  reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+	btc_alg_dbg(ALGO_BT_MONITOR,
+		    "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+		    reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+	btc_alg_dbg(ALGO_BT_MONITOR,
+		    "[BTCoex], Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+		    reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
 
 	/* reset counter */
 	btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -343,9 +332,9 @@
 
 	h2c_parameter[0] |= BIT0;	/* trigger */
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
-		  h2c_parameter[0]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+		    h2c_parameter[0]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
 }
@@ -368,8 +357,8 @@
 		stack_info->bt_link_exist = coex_sta->bt_link_exist;
 
 	if (!coex_sta->bt_link_exist) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], No profile exists!!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], No profile exists!!!\n");
 		return algorithm;
 	}
 
@@ -384,26 +373,26 @@
 
 	if (num_of_diff_profile == 1) {
 		if (coex_sta->sco_exist) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], SCO only\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], SCO only\n");
 			algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
 		} else {
 			if (coex_sta->hid_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], HID only\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], HID only\n");
 				algorithm = BT_8821A_2ANT_COEX_ALGO_HID;
 			} else if (coex_sta->a2dp_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], A2DP only\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], A2DP only\n");
 				algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP;
 			} else if (coex_sta->pan_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], PAN(HS) only\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], PAN(HS) only\n");
 					algorithm = BT_8821A_2ANT_COEX_ALGO_PANHS;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], PAN(EDR) only\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], PAN(EDR) only\n");
 					algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR;
 				}
 			}
@@ -411,50 +400,50 @@
 	} else if (num_of_diff_profile == 2) {
 		if (coex_sta->sco_exist) {
 			if (coex_sta->hid_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], SCO + HID\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], SCO + HID\n");
 				algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
 			} else if (coex_sta->a2dp_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], SCO + A2DP ==> SCO\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], SCO + A2DP ==> SCO\n");
 				algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
 			} else if (coex_sta->pan_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], SCO + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], SCO + PAN(HS)\n");
 					algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], SCO + PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], SCO + PAN(EDR)\n");
 					algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
 				}
 			}
 		} else {
 			if (coex_sta->hid_exist &&
 			    coex_sta->a2dp_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], HID + A2DP\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], HID + A2DP\n");
 				algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP;
 			} else if (coex_sta->hid_exist &&
 				coex_sta->pan_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], HID + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], HID + PAN(HS)\n");
 					algorithm =  BT_8821A_2ANT_COEX_ALGO_HID;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], HID + PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], HID + PAN(EDR)\n");
 					algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
 				}
 			} else if (coex_sta->pan_exist &&
 				coex_sta->a2dp_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], A2DP + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], A2DP + PAN(HS)\n");
 					algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], A2DP + PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], A2DP + PAN(EDR)\n");
 					algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP;
 				}
 			}
@@ -463,29 +452,29 @@
 		if (coex_sta->sco_exist) {
 			if (coex_sta->hid_exist &&
 			    coex_sta->a2dp_exist) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-					  "[BTCoex], SCO + HID + A2DP ==> HID\n");
+				btc_alg_dbg(ALGO_TRACE,
+					    "[BTCoex], SCO + HID + A2DP ==> HID\n");
 				algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
 			} else if (coex_sta->hid_exist &&
 				coex_sta->pan_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], SCO + HID + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], SCO + HID + PAN(HS)\n");
 					algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], SCO + HID + PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], SCO + HID + PAN(EDR)\n");
 					algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
 				}
 			} else if (coex_sta->pan_exist &&
 				   coex_sta->a2dp_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], SCO + A2DP + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], SCO + A2DP + PAN(HS)\n");
 					algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
 					algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
 				}
 			}
@@ -494,12 +483,12 @@
 			    coex_sta->pan_exist &&
 			    coex_sta->a2dp_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], HID + A2DP + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], HID + A2DP + PAN(HS)\n");
 					algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP;
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], HID + A2DP + PAN(EDR)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], HID + A2DP + PAN(EDR)\n");
 					algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
 				}
 			}
@@ -510,12 +499,12 @@
 			    coex_sta->pan_exist &&
 			    coex_sta->a2dp_exist) {
 				if (bt_hs_on) {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
 
 				} else {
-					BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-						  "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
+					btc_alg_dbg(ALGO_TRACE,
+						    "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
 					algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
 				}
 			}
@@ -544,15 +533,15 @@
 	if (wifi_connected) {
 		if (bt_hs_on) {
 			if (bt_hs_rssi > 37) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-					  "[BTCoex], Need to decrease bt power for HS mode!!\n");
+				btc_alg_dbg(ALGO_TRACE_FW,
+					    "[BTCoex], Need to decrease bt power for HS mode!!\n");
 				ret = true;
 			}
 		} else {
 			if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
 			    (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-				BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-					  "[BTCoex], Need to decrease bt power for Wifi is connected!!\n");
+				btc_alg_dbg(ALGO_TRACE_FW,
+					    "[BTCoex], Need to decrease bt power for Wifi is connected!!\n");
 				ret = true;
 			}
 		}
@@ -570,10 +559,10 @@
 	 */
 	h2c_parameter[0] = dac_swing_lvl;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl);
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
 }
@@ -588,9 +577,9 @@
 	if (dec_bt_pwr)
 		h2c_parameter[0] |= BIT1;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], decrease Bt Power : %s, FW write 0x62 = 0x%x\n",
-		  (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], decrease Bt Power : %s, FW write 0x62 = 0x%x\n",
+		    (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
 }
@@ -598,16 +587,16 @@
 static void halbtc8821a2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
 				       bool force_exec, bool dec_bt_pwr)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-		  "[BTCoex], %s Dec BT power = %s\n",
-		  (force_exec ? "force to" : ""),
-		  ((dec_bt_pwr) ? "ON" : "OFF"));
+	btc_alg_dbg(ALGO_TRACE_FW,
+		    "[BTCoex], %s Dec BT power = %s\n",
+		    (force_exec ? "force to" : ""),
+		    ((dec_bt_pwr) ? "ON" : "OFF"));
 	coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], pre_dec_bt_pwr = %d, cur_dec_bt_pwr = %d\n",
-			  coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], pre_dec_bt_pwr = %d, cur_dec_bt_pwr = %d\n",
+			    coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
 
 		if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
 			return;
@@ -627,10 +616,10 @@
 	if (bt_lna_cons_on)
 		h2c_parameter[1] |= BIT0;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], set BT LNA Constrain: %s, FW write 0x69 = 0x%x\n",
-		  (bt_lna_cons_on ? "ON!!" : "OFF!!"),
-		  h2c_parameter[0]<<8|h2c_parameter[1]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], set BT LNA Constrain: %s, FW write 0x69 = 0x%x\n",
+		    bt_lna_cons_on ? "ON!!" : "OFF!!",
+		    h2c_parameter[0] << 8 | h2c_parameter[1]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter);
 }
@@ -638,17 +627,17 @@
 static void btc8821a2_set_bt_lna_const(struct btc_coexist *btcoexist,
 				       bool force_exec, bool bt_lna_cons_on)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-		  "[BTCoex], %s BT Constrain = %s\n",
-		  (force_exec ? "force" : ""),
-		  ((bt_lna_cons_on) ? "ON" : "OFF"));
+	btc_alg_dbg(ALGO_TRACE_FW,
+		    "[BTCoex], %s BT Constrain = %s\n",
+		    (force_exec ? "force" : ""),
+		    ((bt_lna_cons_on) ? "ON" : "OFF"));
 	coex_dm->cur_bt_lna_constrain = bt_lna_cons_on;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], pre_bt_lna_constrain = %d,cur_bt_lna_constrain = %d\n",
-			  coex_dm->pre_bt_lna_constrain,
-			  coex_dm->cur_bt_lna_constrain);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], pre_bt_lna_constrain = %d,cur_bt_lna_constrain = %d\n",
+			    coex_dm->pre_bt_lna_constrain,
+			    coex_dm->cur_bt_lna_constrain);
 
 		if (coex_dm->pre_bt_lna_constrain ==
 		    coex_dm->cur_bt_lna_constrain)
@@ -669,10 +658,10 @@
 
 	h2c_parameter[1] = bt_psd_mode;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], set BT PSD mode = 0x%x, FW write 0x69 = 0x%x\n",
-		  h2c_parameter[1],
-		  h2c_parameter[0]<<8|h2c_parameter[1]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], set BT PSD mode = 0x%x, FW write 0x69 = 0x%x\n",
+		    h2c_parameter[1],
+		    h2c_parameter[0] << 8 | h2c_parameter[1]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter);
 }
@@ -680,15 +669,15 @@
 static void halbtc8821a2ant_set_bt_psd_mode(struct btc_coexist *btcoexist,
 					    bool force_exec, u8 bt_psd_mode)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-		  "[BTCoex], %s BT PSD mode = 0x%x\n",
-		  (force_exec ? "force" : ""), bt_psd_mode);
+	btc_alg_dbg(ALGO_TRACE_FW,
+		    "[BTCoex], %s BT PSD mode = 0x%x\n",
+		    (force_exec ? "force" : ""), bt_psd_mode);
 	coex_dm->cur_bt_psd_mode = bt_psd_mode;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], pre_bt_psd_mode = 0x%x, cur_bt_psd_mode = 0x%x\n",
-			  coex_dm->pre_bt_psd_mode, coex_dm->cur_bt_psd_mode);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], pre_bt_psd_mode = 0x%x, cur_bt_psd_mode = 0x%x\n",
+			    coex_dm->pre_bt_psd_mode, coex_dm->cur_bt_psd_mode);
 
 		if (coex_dm->pre_bt_psd_mode == coex_dm->cur_bt_psd_mode)
 			return;
@@ -709,10 +698,10 @@
 	if (enable_auto_report)
 		h2c_parameter[0] |= BIT0;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
-		  (enable_auto_report ? "Enabled!!" : "Disabled!!"),
-		  h2c_parameter[0]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
+		    (enable_auto_report ? "Enabled!!" : "Disabled!!"),
+		    h2c_parameter[0]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
 }
@@ -721,17 +710,17 @@
 					   bool force_exec,
 					   bool enable_auto_report)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-		  "[BTCoex], %s BT Auto report = %s\n",
-		  (force_exec ? "force to" : ""),
-		  ((enable_auto_report) ? "Enabled" : "Disabled"));
+	btc_alg_dbg(ALGO_TRACE_FW,
+		    "[BTCoex], %s BT Auto report = %s\n",
+		    (force_exec ? "force to" : ""),
+		    ((enable_auto_report) ? "Enabled" : "Disabled"));
 	coex_dm->cur_bt_auto_report = enable_auto_report;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
-			  coex_dm->pre_bt_auto_report,
-			  coex_dm->cur_bt_auto_report);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
+			    coex_dm->pre_bt_auto_report,
+			    coex_dm->cur_bt_auto_report);
 
 		if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
 			return;
@@ -746,16 +735,16 @@
 					     bool force_exec,
 					     u8 fw_dac_swing_lvl)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-		  "[BTCoex], %s set FW Dac Swing level = %d\n",
-		  (force_exec ? "force to" : ""), fw_dac_swing_lvl);
+	btc_alg_dbg(ALGO_TRACE_FW,
+		    "[BTCoex], %s set FW Dac Swing level = %d\n",
+		    (force_exec ? "force to" : ""), fw_dac_swing_lvl);
 	coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], pre_fw_dac_swing_lvl = %d, cur_fw_dac_swing_lvl = %d\n",
-			  coex_dm->pre_fw_dac_swing_lvl,
-			  coex_dm->cur_fw_dac_swing_lvl);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], pre_fw_dac_swing_lvl = %d, cur_fw_dac_swing_lvl = %d\n",
+			    coex_dm->pre_fw_dac_swing_lvl,
+			    coex_dm->cur_fw_dac_swing_lvl);
 
 		if (coex_dm->pre_fw_dac_swing_lvl ==
 		    coex_dm->cur_fw_dac_swing_lvl)
@@ -773,8 +762,8 @@
 {
 	if (rx_rf_shrink_on) {
 		/* Shrink RF Rx LPF corner */
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-			  "[BTCoex], Shrink RF Rx LPF corner!!\n");
+		btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+			    "[BTCoex], Shrink RF Rx LPF corner!!\n");
 		btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
 					  0xfffff, 0xffffc);
 	} else {
@@ -782,8 +771,8 @@
 		 * After initialized, we can use coex_dm->bt_rf0x1e_backup
 		 */
 		if (btcoexist->initilized) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-				  "[BTCoex], Resume RF Rx LPF corner!!\n");
+			btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+				    "[BTCoex], Resume RF Rx LPF corner!!\n");
 			btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
 						  0x1e, 0xfffff,
 						   coex_dm->bt_rf0x1e_backup);
@@ -794,17 +783,17 @@
 static void halbtc8821a2ant_RfShrink(struct btc_coexist *btcoexist,
 				     bool force_exec, bool rx_rf_shrink_on)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-		  "[BTCoex], %s turn Rx RF Shrink = %s\n",
-		  (force_exec ? "force to" : ""),
-		  ((rx_rf_shrink_on) ? "ON" : "OFF"));
+	btc_alg_dbg(ALGO_TRACE_SW,
+		    "[BTCoex], %s turn Rx RF Shrink = %s\n",
+		    (force_exec ? "force to" : ""),
+		    ((rx_rf_shrink_on) ? "ON" : "OFF"));
 	coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-			  "[BTCoex], pre_rf_rx_lpf_shrink = %d, cur_rf_rx_lpf_shrink = %d\n",
-			  coex_dm->pre_rf_rx_lpf_shrink,
-			  coex_dm->cur_rf_rx_lpf_shrink);
+		btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+			    "[BTCoex], pre_rf_rx_lpf_shrink = %d, cur_rf_rx_lpf_shrink = %d\n",
+			    coex_dm->pre_rf_rx_lpf_shrink,
+			    coex_dm->cur_rf_rx_lpf_shrink);
 
 		if (coex_dm->pre_rf_rx_lpf_shrink ==
 		    coex_dm->cur_rf_rx_lpf_shrink)
@@ -835,9 +824,9 @@
 		h2c_parameter[5] = 0xf9;
 	}
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], set WiFi Low-Penalty Retry: %s",
-		  (low_penalty_ra ? "ON!!" : "OFF!!"));
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], set WiFi Low-Penalty Retry: %s",
+		    (low_penalty_ra ? "ON!!" : "OFF!!"));
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
 }
@@ -846,17 +835,17 @@
 					   bool force_exec, bool low_penalty_ra)
 {
 	/*return;*/
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-		  "[BTCoex], %s turn LowPenaltyRA = %s\n",
-		  (force_exec ? "force to" : ""),
-		  ((low_penalty_ra) ? "ON" : "OFF"));
+	btc_alg_dbg(ALGO_TRACE_SW,
+		    "[BTCoex], %s turn LowPenaltyRA = %s\n",
+		    (force_exec ? "force to" : ""),
+		    ((low_penalty_ra) ? "ON" : "OFF"));
 	coex_dm->cur_low_penalty_ra = low_penalty_ra;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-			  "[BTCoex], pre_low_penalty_ra = %d, cur_low_penalty_ra = %d\n",
-			  coex_dm->pre_low_penalty_ra,
-			  coex_dm->cur_low_penalty_ra);
+		btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+			    "[BTCoex], pre_low_penalty_ra = %d, cur_low_penalty_ra = %d\n",
+			    coex_dm->pre_low_penalty_ra,
+			    coex_dm->cur_low_penalty_ra);
 
 		if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
 			return;
@@ -872,8 +861,8 @@
 {
 	u8 val = (u8)level;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-		  "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+	btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+		    "[BTCoex], Write SwDacSwing = 0x%x\n", level);
 	btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc5b, 0x3e, val);
 }
 
@@ -891,21 +880,21 @@
 				      bool force_exec, bool dac_swing_on,
 				      u32 dac_swing_lvl)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-		  "[BTCoex], %s turn DacSwing = %s, dac_swing_lvl = 0x%x\n",
-		  (force_exec ? "force to" : ""),
-		  ((dac_swing_on) ? "ON" : "OFF"),
-		  dac_swing_lvl);
+	btc_alg_dbg(ALGO_TRACE_SW,
+		    "[BTCoex], %s turn DacSwing = %s, dac_swing_lvl = 0x%x\n",
+		    (force_exec ? "force to" : ""),
+		    ((dac_swing_on) ? "ON" : "OFF"),
+		    dac_swing_lvl);
 	coex_dm->cur_dac_swing_on = dac_swing_on;
 	coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-			  "[BTCoex], pre_dac_swing_on = %d, pre_dac_swing_lvl = 0x%x, cur_dac_swing_on = %d, cur_dac_swing_lvl = 0x%x\n",
-			  coex_dm->pre_dac_swing_on,
-			  coex_dm->pre_dac_swing_lvl,
-			  coex_dm->cur_dac_swing_on,
-			  coex_dm->cur_dac_swing_lvl);
+		btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+			    "[BTCoex], pre_dac_swing_on = %d, pre_dac_swing_lvl = 0x%x, cur_dac_swing_on = %d, cur_dac_swing_lvl = 0x%x\n",
+			    coex_dm->pre_dac_swing_on,
+			    coex_dm->pre_dac_swing_lvl,
+			    coex_dm->cur_dac_swing_on,
+			    coex_dm->cur_dac_swing_lvl);
 
 		if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
 		    (coex_dm->pre_dac_swing_lvl ==
@@ -924,12 +913,12 @@
 					     bool adc_back_off)
 {
 	if (adc_back_off) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-			  "[BTCoex], BB BackOff Level On!\n");
+		btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+			    "[BTCoex], BB BackOff Level On!\n");
 		btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x3);
 	} else {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-			  "[BTCoex], BB BackOff Level Off!\n");
+		btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+			    "[BTCoex], BB BackOff Level Off!\n");
 		btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x1);
 	}
 }
@@ -937,16 +926,17 @@
 static void halbtc8821a2ant_adc_back_off(struct btc_coexist *btcoexist,
 					 bool force_exec, bool adc_back_off)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-		  "[BTCoex], %s turn AdcBackOff = %s\n",
-		  (force_exec ? "force to" : ""),
-		  ((adc_back_off) ? "ON" : "OFF"));
+	btc_alg_dbg(ALGO_TRACE_SW,
+		    "[BTCoex], %s turn AdcBackOff = %s\n",
+		    (force_exec ? "force to" : ""),
+		    ((adc_back_off) ? "ON" : "OFF"));
 	coex_dm->cur_adc_back_off = adc_back_off;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-			  "[BTCoex], pre_adc_back_off = %d, cur_adc_back_off = %d\n",
-			  coex_dm->pre_adc_back_off, coex_dm->cur_adc_back_off);
+		btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+			    "[BTCoex], pre_adc_back_off = %d, cur_adc_back_off = %d\n",
+			    coex_dm->pre_adc_back_off,
+			    coex_dm->cur_adc_back_off);
 
 		if (coex_dm->pre_adc_back_off == coex_dm->cur_adc_back_off)
 			return;
@@ -960,20 +950,20 @@
 					   u32 val0x6c0, u32 val0x6c4,
 					   u32 val0x6c8, u8 val0x6cc)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-		  "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+	btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+		    "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
 	btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-		  "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+	btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+		    "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
 	btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-		  "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+	btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+		    "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
 	btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
-		  "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+	btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+		    "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
 	btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
 }
 
@@ -981,28 +971,28 @@
 				       bool force_exec, u32 val0x6c0,
 				       u32 val0x6c4, u32 val0x6c8, u8 val0x6cc)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
-		  "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
-		  (force_exec ? "force to" : ""),
-		  val0x6c0, val0x6c4, val0x6c8, val0x6cc);
+	btc_alg_dbg(ALGO_TRACE_SW,
+		    "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
+		    (force_exec ? "force to" : ""),
+		    val0x6c0, val0x6c4, val0x6c8, val0x6cc);
 	coex_dm->cur_val0x6c0 = val0x6c0;
 	coex_dm->cur_val0x6c4 = val0x6c4;
 	coex_dm->cur_val0x6c8 = val0x6c8;
 	coex_dm->cur_val0x6cc = val0x6cc;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-			  "[BTCoex], pre_val0x6c0 = 0x%x, pre_val0x6c4 = 0x%x, pre_val0x6c8 = 0x%x, pre_val0x6cc = 0x%x !!\n",
-			  coex_dm->pre_val0x6c0,
-			  coex_dm->pre_val0x6c4,
-			  coex_dm->pre_val0x6c8,
-			  coex_dm->pre_val0x6cc);
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
-			  "[BTCoex], cur_val0x6c0 = 0x%x, cur_val0x6c4 = 0x%x, cur_val0x6c8 = 0x%x, cur_val0x6cc = 0x%x !!\n",
-			  coex_dm->cur_val0x6c0,
-			  coex_dm->cur_val0x6c4,
-			  coex_dm->cur_val0x6c8,
-			  coex_dm->cur_val0x6cc);
+		btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+			    "[BTCoex], pre_val0x6c0 = 0x%x, pre_val0x6c4 = 0x%x, pre_val0x6c8 = 0x%x, pre_val0x6cc = 0x%x !!\n",
+			    coex_dm->pre_val0x6c0,
+			    coex_dm->pre_val0x6c4,
+			    coex_dm->pre_val0x6c8,
+			    coex_dm->pre_val0x6cc);
+		btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+			    "[BTCoex], cur_val0x6c0 = 0x%x, cur_val0x6c4 = 0x%x, cur_val0x6c8 = 0x%x, cur_val0x6cc = 0x%x !!\n",
+			    coex_dm->cur_val0x6c0,
+			    coex_dm->cur_val0x6c4,
+			    coex_dm->cur_val0x6c8,
+			    coex_dm->cur_val0x6cc);
 
 		if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
 		    (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -1027,9 +1017,9 @@
 	if (enable)
 		h2c_parameter[0] |= BIT0;/* function enable */
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
-		  h2c_parameter[0]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+		    h2c_parameter[0]);
 
 	btcoex->btc_fill_h2c(btcoex, 0x63, 1, h2c_parameter);
 }
@@ -1037,16 +1027,16 @@
 static void halbtc8821a2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
 					    bool force_exec, bool enable)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-		  "[BTCoex], %s turn Ignore WlanAct %s\n",
-		  (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+	btc_alg_dbg(ALGO_TRACE_FW,
+		    "[BTCoex], %s turn Ignore WlanAct %s\n",
+		    (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
 	coex_dm->cur_ignore_wlan_act = enable;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
-			  coex_dm->pre_ignore_wlan_act,
-			  coex_dm->cur_ignore_wlan_act);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
+			    coex_dm->pre_ignore_wlan_act,
+			    coex_dm->cur_ignore_wlan_act);
 
 		if (coex_dm->pre_ignore_wlan_act ==
 		    coex_dm->cur_ignore_wlan_act)
@@ -1075,13 +1065,13 @@
 	coex_dm->ps_tdma_para[3] = byte4;
 	coex_dm->ps_tdma_para[4] = byte5;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
-		  h2c_parameter[0],
-		  h2c_parameter[1]<<24|
-		  h2c_parameter[2]<<16|
-		  h2c_parameter[3]<<8|
-		  h2c_parameter[4]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
+		    h2c_parameter[0],
+		    h2c_parameter[1] << 24 |
+		    h2c_parameter[2] << 16 |
+		    h2c_parameter[3] << 8 |
+		    h2c_parameter[4]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
 }
@@ -1175,20 +1165,20 @@
 static void halbtc8821a2ant_ps_tdma(struct btc_coexist *btcoexist,
 				    bool force_exec, bool turn_on, u8 type)
 {
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-		  "[BTCoex], %s turn %s PS TDMA, type = %d\n",
-		  (force_exec ? "force to" : ""), (turn_on ? "ON" : "OFF"),
-		  type);
+	btc_alg_dbg(ALGO_TRACE_FW,
+		    "[BTCoex], %s turn %s PS TDMA, type = %d\n",
+		    (force_exec ? "force to" : ""), (turn_on ? "ON" : "OFF"),
+		    type);
 	coex_dm->cur_ps_tdma_on = turn_on;
 	coex_dm->cur_ps_tdma = type;
 
 	if (!force_exec) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n",
-			  coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n",
-			  coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n",
+			    coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n",
+			    coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
 
 		if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
 		    (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1374,8 +1364,8 @@
 		btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
 				   &low_pwr_disable);
 
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], Wifi IPS + BT IPS!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], Wifi IPS + BT IPS!!\n");
 
 		halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
 		halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -1392,13 +1382,13 @@
 				   &low_pwr_disable);
 
 		if (wifi_busy) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Wifi Busy + BT IPS!!\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Wifi Busy + BT IPS!!\n");
 			halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
 						false, 1);
 		} else {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Wifi LPS + BT IPS!!\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Wifi LPS + BT IPS!!\n");
 			halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
 						false, 1);
 		}
@@ -1416,8 +1406,8 @@
 		btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
 				   &low_pwr_disable);
 
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], Wifi IPS + BT LPS!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], Wifi IPS + BT LPS!!\n");
 
 		halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
 		halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -1433,13 +1423,13 @@
 			BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable);
 
 		if (wifi_busy) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Wifi Busy + BT LPS!!\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Wifi Busy + BT LPS!!\n");
 			halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
 						false, 1);
 		} else {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Wifi LPS + BT LPS!!\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Wifi LPS + BT LPS!!\n");
 			halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
 						false, 1);
 		}
@@ -1458,8 +1448,8 @@
 		btcoexist->btc_set(btcoexist,
 			BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable);
 
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], Wifi IPS + BT Busy!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], Wifi IPS + BT Busy!!\n");
 
 		halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
 		halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -1478,12 +1468,12 @@
 				   &low_pwr_disable);
 
 		if (wifi_busy) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Wifi Busy + BT Busy!!\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Wifi Busy + BT Busy!!\n");
 			common = false;
 		} else {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Wifi LPS + BT Busy!!\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Wifi LPS + BT Busy!!\n");
 			halbtc8821a2ant_ps_tdma(btcoexist,
 						NORMAL_EXEC, true, 21);
 
@@ -1505,8 +1495,8 @@
 			   int result)
 {
 	if (tx_pause) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], TxPause = 1\n");
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], TxPause = 1\n");
 
 		if (coex_dm->cur_ps_tdma == 71) {
 			halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
@@ -1601,8 +1591,8 @@
 			}
 		}
 	} else {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], TxPause = 0\n");
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], TxPause = 0\n");
 		if (coex_dm->cur_ps_tdma == 5) {
 			halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
 						true, 71);
@@ -1706,8 +1696,8 @@
 			   int result)
 {
 	if (tx_pause) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], TxPause = 1\n");
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], TxPause = 1\n");
 		if (coex_dm->cur_ps_tdma == 1) {
 			halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
 						true, 6);
@@ -1796,8 +1786,8 @@
 			}
 		}
 	} else {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], TxPause = 0\n");
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], TxPause = 0\n");
 		if (coex_dm->cur_ps_tdma == 5) {
 			halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
 						true, 2);
@@ -1892,8 +1882,8 @@
 			   int result)
 {
 	if (tx_pause) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], TxPause = 1\n");
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], TxPause = 1\n");
 		if (coex_dm->cur_ps_tdma == 1) {
 			halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
 						true, 7);
@@ -1982,8 +1972,8 @@
 			}
 		}
 	} else {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], TxPause = 0\n");
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], TxPause = 0\n");
 		if (coex_dm->cur_ps_tdma == 5) {
 			halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
 						true, 3);
@@ -2085,13 +2075,13 @@
 	int		result;
 	u8		retry_count = 0;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
-		  "[BTCoex], TdmaDurationAdjust()\n");
+	btc_alg_dbg(ALGO_TRACE_FW,
+		    "[BTCoex], TdmaDurationAdjust()\n");
 
 	if (coex_dm->reset_tdma_adjust) {
 		coex_dm->reset_tdma_adjust = false;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], first run TdmaDurationAdjust()!!\n");
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], first run TdmaDurationAdjust()!!\n");
 		if (sco_hid) {
 			if (tx_pause) {
 				if (max_interval == 1) {
@@ -2195,11 +2185,11 @@
 	} else {
 		/* accquire the BT TRx retry count from BT_Info byte2 */
 		retry_count = coex_sta->bt_retry_cnt;
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], retry_count = %d\n", retry_count);
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], up = %d, dn = %d, m = %d, n = %d, wait_count = %d\n",
-			  (int)up, (int)dn, (int)m, (int)n, (int)wait_count);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], retry_count = %d\n", retry_count);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], up = %d, dn = %d, m = %d, n = %d, wait_count = %d\n",
+			    (int)up, (int)dn, (int)m, (int)n, (int)wait_count);
 		result = 0;
 		wait_count++;
 
@@ -2220,9 +2210,8 @@
 				up = 0;
 				dn = 0;
 				result = 1;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_TRACE_FW_DETAIL,
-					  "[BTCoex], Increase wifi duration!!\n");
+				btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+					    "[BTCoex], Increase wifi duration!!\n");
 			}
 		} else if (retry_count <= 3) {
 			/* <=3 retry in the last 2-second duration */
@@ -2251,9 +2240,8 @@
 				dn = 0;
 				wait_count = 0;
 				result = -1;
-				BTC_PRINT(BTC_MSG_ALGORITHM,
-					  ALGO_TRACE_FW_DETAIL,
-					  "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
+				btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+					    "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
 			}
 		} else {
 			/* retry count > 3, if retry count > 3 happens once,
@@ -2274,12 +2262,12 @@
 			dn = 0;
 			wait_count = 0;
 			result = -1;
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-				  "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
+			btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+				    "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
 		}
 
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], max Interval = %d\n", max_interval);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], max Interval = %d\n", max_interval);
 		if (max_interval == 1)
 			btc8821a2_int1(btcoexist, tx_pause, result);
 		else if (max_interval == 2)
@@ -2295,9 +2283,9 @@
 	if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
 		bool	scan = false, link = false, roam = false;
 
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-			  "[BTCoex], PsTdma type dismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n",
-			  coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
+		btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+			    "[BTCoex], PsTdma type dismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n",
+			    coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
 
 		btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
 		btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2307,8 +2295,8 @@
 			halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
 						coex_dm->tdma_adj_type);
 		} else {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
-				  "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
+			btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+				    "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
 		}
 	}
 
@@ -3183,8 +3171,8 @@
 	u8	algorithm = 0;
 
 	if (btcoexist->manual_control) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], Manual control!!!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], Manual control!!!\n");
 		return;
 	}
 
@@ -3192,8 +3180,8 @@
 		BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
 
 	if (wifi_under_5g) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], RunCoexistMechanism(), run 5G coex setting!!<===\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], RunCoexistMechanism(), run 5G coex setting!!<===\n");
 		halbtc8821a2ant_coex_under_5g(btcoexist);
 		return;
 	}
@@ -3201,81 +3189,82 @@
 	algorithm = halbtc8821a2ant_action_algorithm(btcoexist);
 	if (coex_sta->c2h_bt_inquiry_page &&
 	    (BT_8821A_2ANT_COEX_ALGO_PANHS != algorithm)) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], BT is under inquiry/page scan !!\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], BT is under inquiry/page scan !!\n");
 		halbtc8821a2ant_bt_inquiry_page(btcoexist);
 		return;
 	}
 
 	coex_dm->cur_algorithm = algorithm;
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-		  "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
+	btc_alg_dbg(ALGO_TRACE,
+		    "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
 
 	if (halbtc8821a2ant_is_common_action(btcoexist)) {
-		BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-			  "[BTCoex], Action 2-Ant common.\n");
+		btc_alg_dbg(ALGO_TRACE,
+			    "[BTCoex], Action 2-Ant common\n");
 		coex_dm->reset_tdma_adjust = true;
 	} else {
 		if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], pre_algorithm = %d, cur_algorithm = %d\n",
-			coex_dm->pre_algorithm, coex_dm->cur_algorithm);
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], pre_algorithm = %d, cur_algorithm = %d\n",
+				    coex_dm->pre_algorithm,
+				    coex_dm->cur_algorithm);
 			coex_dm->reset_tdma_adjust = true;
 		}
 		switch (coex_dm->cur_algorithm) {
 		case BT_8821A_2ANT_COEX_ALGO_SCO:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action 2-Ant, algorithm = SCO.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action 2-Ant, algorithm = SCO\n");
 			halbtc8821a2ant_action_sco(btcoexist);
 			break;
 		case BT_8821A_2ANT_COEX_ALGO_HID:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action 2-Ant, algorithm = HID.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action 2-Ant, algorithm = HID\n");
 			halbtc8821a2ant_action_hid(btcoexist);
 			break;
 		case BT_8821A_2ANT_COEX_ALGO_A2DP:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action 2-Ant, algorithm = A2DP.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
 			halbtc8821a2ant_action_a2dp(btcoexist);
 			break;
 		case BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS).\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
 			halbtc8821a2ant_action_a2dp_pan_hs(btcoexist);
 			break;
 		case BT_8821A_2ANT_COEX_ALGO_PANEDR:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action 2-Ant, algorithm = PAN(EDR).\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
 			halbtc8821a2ant_action_pan_edr(btcoexist);
 			break;
 		case BT_8821A_2ANT_COEX_ALGO_PANHS:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action 2-Ant, algorithm = HS mode.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
 			halbtc8821a2ant_action_pan_hs(btcoexist);
 			break;
 		case BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
 			halbtc8821a2ant_action_pan_edr_a2dp(btcoexist);
 			break;
 		case BT_8821A_2ANT_COEX_ALGO_PANEDR_HID:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
 			halbtc8821a2ant_action_pan_edr_hid(btcoexist);
 			break;
 		case BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n");
 			btc8821a2ant_act_hid_a2dp_pan_edr(btcoexist);
 			break;
 		case BT_8821A_2ANT_COEX_ALGO_HID_A2DP:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action 2-Ant, algorithm = HID+A2DP.\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
 			halbtc8821a2ant_action_hid_a2dp(btcoexist);
 			break;
 		default:
-			BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-				  "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
+			btc_alg_dbg(ALGO_TRACE,
+				    "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
 			halbtc8821a2ant_coex_all_off(btcoexist);
 			break;
 		}
@@ -3294,8 +3283,8 @@
 {
 	u8 u1tmp = 0;
 
-	BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-		  "[BTCoex], 2Ant Init HW Config!!\n");
+	btc_iface_dbg(INTF_INIT,
+		      "[BTCoex], 2Ant Init HW Config!!\n");
 
 	/* backup rf 0x1e value */
 	coex_dm->bt_rf0x1e_backup =
@@ -3328,8 +3317,8 @@
 	struct btc_coexist *btcoexist
 	)
 {
-	BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-		  "[BTCoex], Coex Mechanism Init!!\n");
+	btc_iface_dbg(INTF_INIT,
+		      "[BTCoex], Coex Mechanism Init!!\n");
 
 	halbtc8821a2ant_init_coex_dm(btcoexist);
 }
@@ -3574,13 +3563,13 @@
 void ex_halbtc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
 {
 	if (BTC_IPS_ENTER == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], IPS ENTER notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], IPS ENTER notify\n");
 		coex_sta->under_ips = true;
 		halbtc8821a2ant_coex_all_off(btcoexist);
 	} else if (BTC_IPS_LEAVE == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], IPS LEAVE notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], IPS LEAVE notify\n");
 		coex_sta->under_ips = false;
 		/*halbtc8821a2ant_init_coex_dm(btcoexist);*/
 	}
@@ -3589,12 +3578,12 @@
 void ex_halbtc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
 {
 	if (BTC_LPS_ENABLE == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], LPS ENABLE notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], LPS ENABLE notify\n");
 		coex_sta->under_lps = true;
 	} else if (BTC_LPS_DISABLE == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], LPS DISABLE notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], LPS DISABLE notify\n");
 		coex_sta->under_lps = false;
 	}
 }
@@ -3602,22 +3591,22 @@
 void ex_halbtc8821a2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
 {
 	if (BTC_SCAN_START == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], SCAN START notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], SCAN START notify\n");
 	} else if (BTC_SCAN_FINISH == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], SCAN FINISH notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], SCAN FINISH notify\n");
 	}
 }
 
 void ex_halbtc8821a2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
 {
 	if (BTC_ASSOCIATE_START == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], CONNECT START notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], CONNECT START notify\n");
 	} else if (BTC_ASSOCIATE_FINISH == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], CONNECT FINISH notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], CONNECT FINISH notify\n");
 	}
 }
 
@@ -3629,11 +3618,11 @@
 	u8	wifi_central_chnl;
 
 	if (BTC_MEDIA_CONNECT == type) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], MEDIA connect notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], MEDIA connect notify\n");
 	} else {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], MEDIA disconnect notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], MEDIA disconnect notify\n");
 	}
 
 	/* only 2.4G we need to inform bt the chnl mask*/
@@ -3654,9 +3643,11 @@
 	coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
 	coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
-		  "[BTCoex], FW write 0x66 = 0x%x\n",
-		h2c_parameter[0]<<16|h2c_parameter[1]<<8|h2c_parameter[2]);
+	btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+		    "[BTCoex], FW write 0x66 = 0x%x\n",
+		    h2c_parameter[0] << 16 |
+		    h2c_parameter[1] << 8 |
+		    h2c_parameter[2]);
 
 	btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
 }
@@ -3664,8 +3655,8 @@
 void ex_halbtc8821a2ant_special_packet_notify(struct btc_coexist *btcoexist,
 					      u8 type) {
 	if (type == BTC_PACKET_DHCP) {
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-			  "[BTCoex], DHCP Packet notify\n");
+		btc_iface_dbg(INTF_NOTIFY,
+			      "[BTCoex], DHCP Packet notify\n");
 	}
 }
 
@@ -3685,19 +3676,19 @@
 		rsp_source = BT_INFO_SRC_8821A_2ANT_WIFI_FW;
 	coex_sta->bt_info_c2h_cnt[rsp_source]++;
 
-	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-		  "[BTCoex], Bt info[%d], length = %d, hex data = [",
-		  rsp_source, length);
+	btc_iface_dbg(INTF_NOTIFY,
+		      "[BTCoex], Bt info[%d], length = %d, hex data = [",
+		      rsp_source, length);
 	for (i = 0; i < length; i++) {
 		coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
 		if (i == 1)
 			bt_info = tmp_buf[i];
 		if (i == length-1) {
-			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-				  "0x%02x]\n", tmp_buf[i]);
+			btc_iface_dbg(INTF_NOTIFY,
+				      "0x%02x]\n", tmp_buf[i]);
 		} else {
-			BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-				  "0x%02x, ", tmp_buf[i]);
+			btc_iface_dbg(INTF_NOTIFY,
+				      "0x%02x, ", tmp_buf[i]);
 		}
 	}
 
@@ -3823,8 +3814,8 @@
 
 void ex_halbtc8821a2ant_halt_notify(struct btc_coexist *btcoexist)
 {
-	BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
-		  "[BTCoex], Halt notify\n");
+	btc_iface_dbg(INTF_NOTIFY,
+		      "[BTCoex], Halt notify\n");
 
 	halbtc8821a2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
 	ex_halbtc8821a2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
@@ -3837,31 +3828,31 @@
 	struct btc_board_info *board_info = &btcoexist->board_info;
 	struct btc_stack_info *stack_info = &btcoexist->stack_info;
 
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-		  "[BTCoex], ==========================Periodical===========================\n");
+	btc_alg_dbg(ALGO_TRACE,
+		    "[BTCoex], ==========================Periodical===========================\n");
 
 	if (dis_ver_info_cnt <= 5) {
 		dis_ver_info_cnt += 1;
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-			  "[BTCoex], ****************************************************************\n");
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-			  "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
-			  board_info->pg_ant_num,
-			  board_info->btdm_ant_num,
-			  board_info->btdm_ant_pos);
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-			  "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
-			  ((stack_info->profile_notified) ? "Yes" : "No"),
-			  stack_info->hci_version);
+		btc_iface_dbg(INTF_INIT,
+			      "[BTCoex], ****************************************************************\n");
+		btc_iface_dbg(INTF_INIT,
+			      "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+			      board_info->pg_ant_num,
+			      board_info->btdm_ant_num,
+			      board_info->btdm_ant_pos);
+		btc_iface_dbg(INTF_INIT,
+			      "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+			      stack_info->profile_notified ? "Yes" : "No",
+			      stack_info->hci_version);
 		btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
 				   &bt_patch_ver);
 		btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-			  "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
-			  glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant,
-			  fw_ver, bt_patch_ver, bt_patch_ver);
-		BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
-			  "[BTCoex], ****************************************************************\n");
+		btc_iface_dbg(INTF_INIT,
+			      "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+			      glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant,
+			      fw_ver, bt_patch_ver, bt_patch_ver);
+		btc_iface_dbg(INTF_INIT,
+			      "[BTCoex], ****************************************************************\n");
 	}
 
 	halbtc8821a2ant_query_bt_info(btcoexist);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
index b2791c8..b660c21 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -141,8 +141,8 @@
 
 	if (rtlphy->current_channel != 0)
 		chnl = rtlphy->current_channel;
-	BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
-		  "static halbtc_get_wifi_central_chnl:%d\n", chnl);
+	btc_alg_dbg(ALGO_TRACE,
+		    "static halbtc_get_wifi_central_chnl:%d\n", chnl);
 	return chnl;
 }
 
@@ -965,13 +965,38 @@
 	}
 }
 
-void exhalbtc_set_ant_num(u8 type, u8 ant_num)
+void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num)
 {
 	if (BT_COEX_ANT_TYPE_PG == type) {
 		gl_bt_coexist.board_info.pg_ant_num = ant_num;
 		gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+		/* The antenna position:
+		 * Main (default) or Aux for pgAntNum=2 && btdmAntNum =1.
+		 * The antenna position should be determined by
+		 * auto-detect mechanism.
+		 * The following is assumed to main,
+		 * and those must be modified
+		 * if y auto-detect mechanism is ready
+		 */
+		if ((gl_bt_coexist.board_info.pg_ant_num == 2) &&
+		    (gl_bt_coexist.board_info.btdm_ant_num == 1))
+			gl_bt_coexist.board_info.btdm_ant_pos =
+						       BTC_ANTENNA_AT_MAIN_PORT;
+		else
+			gl_bt_coexist.board_info.btdm_ant_pos =
+						       BTC_ANTENNA_AT_MAIN_PORT;
 	} else if (BT_COEX_ANT_TYPE_ANTDIV == type) {
 		gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+		gl_bt_coexist.board_info.btdm_ant_pos =
+						       BTC_ANTENNA_AT_MAIN_PORT;
+	} else if (type == BT_COEX_ANT_TYPE_DETECTED) {
+		gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+		if (rtlpriv->cfg->mod_params->ant_sel == 1)
+			gl_bt_coexist.board_info.btdm_ant_pos =
+				BTC_ANTENNA_AT_AUX_PORT;
+		else
+			gl_bt_coexist.board_info.btdm_ant_pos =
+				BTC_ANTENNA_AT_MAIN_PORT;
 	}
 }
 
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
index 0a903ea..3cbe34c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
@@ -116,12 +116,17 @@
 #define		WIFI_P2P_GO_CONNECTED			BIT3
 #define		WIFI_P2P_GC_CONNECTED			BIT4
 
-#define	BTC_PRINT(dbgtype, dbgflag, printstr, ...)		\
-	do {							\
-		if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) {\
-			printk(printstr, ##__VA_ARGS__);	\
-		}						\
-	} while (0)
+#define	btc_alg_dbg(dbgflag, fmt, ...)					\
+do {									\
+	if (unlikely(btc_dbg_type[BTC_MSG_ALGORITHM] & dbgflag))	\
+		printk(KERN_DEBUG fmt, ##__VA_ARGS__);			\
+} while (0)
+#define	btc_iface_dbg(dbgflag, fmt, ...)				\
+do {									\
+	if (unlikely(btc_dbg_type[BTC_MSG_INTERFACE] & dbgflag))	\
+		printk(KERN_DEBUG fmt, ##__VA_ARGS__);			\
+} while (0)
+
 
 #define	BTC_RSSI_HIGH(_rssi_)	\
 	((_rssi_ == BTC_RSSI_STATE_HIGH ||	\
@@ -535,7 +540,7 @@
 void exhalbtc_update_min_bt_rssi(char bt_rssi);
 void exhalbtc_set_bt_exist(bool bt_exist);
 void exhalbtc_set_chip_type(u8 chip_type);
-void exhalbtc_set_ant_num(u8 type, u8 ant_num);
+void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num);
 void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist);
 void exhalbtc_signal_compensation(struct btc_coexist *btcoexist,
 				  u8 *rssi_wifi, u8 *rssi_bt);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
index b9b0cb7..d3fd921 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
@@ -72,7 +72,10 @@
 		 __func__, bt_type);
 	exhalbtc_set_chip_type(bt_type);
 
-	exhalbtc_set_ant_num(BT_COEX_ANT_TYPE_PG, ant_num);
+	if (rtlpriv->cfg->mod_params->ant_sel == 1)
+		exhalbtc_set_ant_num(rtlpriv, BT_COEX_ANT_TYPE_DETECTED, 1);
+	else
+		exhalbtc_set_ant_num(rtlpriv, BT_COEX_ANT_TYPE_PG, ant_num);
 }
 
 void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv)
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 283d608..1ac41b8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -359,30 +359,28 @@
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
 	bool find_buddy_priv = false;
-	struct rtl_priv *tpriv = NULL;
+	struct rtl_priv *tpriv;
 	struct rtl_pci_priv *tpcipriv = NULL;
 
 	if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) {
 		list_for_each_entry(tpriv, &rtlpriv->glb_var->glb_priv_list,
 				    list) {
-			if (tpriv) {
-				tpcipriv = (struct rtl_pci_priv *)tpriv->priv;
-				RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
-					 "pcipriv->ndis_adapter.funcnumber %x\n",
-					pcipriv->ndis_adapter.funcnumber);
-				RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
-					 "tpcipriv->ndis_adapter.funcnumber %x\n",
-					tpcipriv->ndis_adapter.funcnumber);
+			tpcipriv = (struct rtl_pci_priv *)tpriv->priv;
+			RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+				 "pcipriv->ndis_adapter.funcnumber %x\n",
+				pcipriv->ndis_adapter.funcnumber);
+			RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+				 "tpcipriv->ndis_adapter.funcnumber %x\n",
+				tpcipriv->ndis_adapter.funcnumber);
 
-				if ((pcipriv->ndis_adapter.busnumber ==
-				     tpcipriv->ndis_adapter.busnumber) &&
-				    (pcipriv->ndis_adapter.devnumber ==
-				    tpcipriv->ndis_adapter.devnumber) &&
-				    (pcipriv->ndis_adapter.funcnumber !=
-				    tpcipriv->ndis_adapter.funcnumber)) {
-					find_buddy_priv = true;
-					break;
-				}
+			if ((pcipriv->ndis_adapter.busnumber ==
+			     tpcipriv->ndis_adapter.busnumber) &&
+			    (pcipriv->ndis_adapter.devnumber ==
+			    tpcipriv->ndis_adapter.devnumber) &&
+			    (pcipriv->ndis_adapter.funcnumber !=
+			    tpcipriv->ndis_adapter.funcnumber)) {
+				find_buddy_priv = true;
+				break;
 			}
 		}
 	}
@@ -1213,7 +1211,8 @@
 	/*Tx/Rx related var */
 	_rtl_pci_init_trx_var(hw);
 
-	/*IBSS*/ mac->beacon_interval = 100;
+	/*IBSS*/
+	mac->beacon_interval = 100;
 
 	/*AMPDU*/
 	mac->min_space_cfg = 0;
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
index 5be3411..3524441 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
@@ -154,13 +154,13 @@
 static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy,
 					   enum nl80211_reg_initiator initiator)
 {
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	struct ieee80211_supported_band *sband;
 	const struct ieee80211_reg_rule *reg_rule;
 	struct ieee80211_channel *ch;
 	unsigned int i;
 
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+	for (band = 0; band < NUM_NL80211_BANDS; band++) {
 
 		if (!wiphy->bands[band])
 			continue;
@@ -210,9 +210,9 @@
 	struct ieee80211_channel *ch;
 	const struct ieee80211_reg_rule *reg_rule;
 
-	if (!wiphy->bands[IEEE80211_BAND_2GHZ])
+	if (!wiphy->bands[NL80211_BAND_2GHZ])
 		return;
-	sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+	sband = wiphy->bands[NL80211_BAND_2GHZ];
 
 	/*
 	 *If no country IE has been received always enable active scan
@@ -262,10 +262,10 @@
 	struct ieee80211_channel *ch;
 	unsigned int i;
 
-	if (!wiphy->bands[IEEE80211_BAND_5GHZ])
+	if (!wiphy->bands[NL80211_BAND_5GHZ])
 		return;
 
-	sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+	sband = wiphy->bands[NL80211_BAND_5GHZ];
 
 	for (i = 0; i < sband->n_channels; i++) {
 		ch = &sband->channels[i];
@@ -301,12 +301,12 @@
 
 static void _rtl_dump_channel_map(struct wiphy *wiphy)
 {
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	struct ieee80211_supported_band *sband;
 	struct ieee80211_channel *ch;
 	unsigned int i;
 
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+	for (band = 0; band < NUM_NL80211_BANDS; band++) {
 		if (!wiphy->bands[band])
 			continue;
 		sband = wiphy->bands[band];
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
index ce4da9d..db9a782 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
@@ -1137,7 +1137,7 @@
 	} else {
 		RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
 			 "Schedule TxPowerTracking !!\n");
-				dm_txpower_track_cb_therm(hw);
+		dm_txpower_track_cb_therm(hw);
 		rtlpriv->dm.tm_trigger = 0;
 	}
 }
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
index a2bb02c..416a9ba 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
@@ -1903,8 +1903,7 @@
 	} else {
 		rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
 	}
-RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
-
+	RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
 }
 
 static void _rtl88e_phy_set_rfpath_switch(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
index 03cbe4c..316be5f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
@@ -240,7 +240,7 @@
 	ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
 	falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
 
-	 ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD);
+	ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD);
 	falsealm_cnt->cnt_fast_fsync_fail = (ret_value & 0xffff);
 	falsealm_cnt->cnt_sb_search_fail = ((ret_value & 0xffff0000) >> 16);
 
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
index 24eff8e..35e6bf7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
@@ -368,7 +368,7 @@
 	status->decrypted = !GET_RX_DESC_SWDEC(pdesc);
 	status->rate = (u8)GET_RX_DESC_RXMCS(pdesc);
 	status->isampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1);
-		status->timestamp_low = GET_RX_DESC_TSFL(pdesc);
+	status->timestamp_low = GET_RX_DESC_TSFL(pdesc);
 	status->is_cck = RTL92EE_RX_HAL_IS_CCK_RATE(status->rate);
 
 	status->macid = GET_RX_DESC_MACID(pdesc);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
index 4b4612f..881821f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
@@ -645,7 +645,7 @@
 				 rtlpriv->psc.state_inap);
 			ppsc->last_sleep_jiffies = jiffies;
 			_rtl92se_phy_set_rf_sleep(hw);
-	    break;
+			break;
 	default:
 		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
 			 "switch case not processed\n");
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
index 00a0531..44de695 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
@@ -134,9 +134,9 @@
 	if (mgnt_link_status_query(hw) == RT_MEDIA_CONNECT) {
 		RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
 			"Need to decrease bt power\n");
-			rtlpriv->btcoexist.cstate |=
-			BT_COEX_STATE_DEC_BT_POWER;
-			return true;
+		rtlpriv->btcoexist.cstate |=
+		BT_COEX_STATE_DEC_BT_POWER;
+		return true;
 	}
 
 	rtlpriv->btcoexist.cstate &= ~BT_COEX_STATE_DEC_BT_POWER;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index c983d2f..5a3df91 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -2684,6 +2684,7 @@
 					      bool auto_load_fail, u8 *hwinfo)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params;
 	u8 value;
 	u32 tmpu_32;
 
@@ -2702,6 +2703,10 @@
 		rtlpriv->btcoexist.btc_info.ant_num = ANT_X2;
 	}
 
+	/* override ant_num / ant_path */
+	if (mod_params->ant_sel)
+		rtlpriv->btcoexist.btc_info.ant_num =
+			(mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1);
 }
 
 void rtl8723be_bt_reg_init(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
index b7b73cb..445f681 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
@@ -1723,8 +1723,8 @@
 
 	/* Allen 20131125 */
 	tmp = (reg_eac & 0x03FF0000) >> 16;
-		if ((tmp & 0x200) > 0)
-			tmp = 0x400 - tmp;
+	if ((tmp & 0x200) > 0)
+		tmp = 0x400 - tmp;
 	/* if Tx is OK, check whether Rx is OK */
 	if (!(reg_eac & BIT(27)) &&
 	    (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) &&
@@ -2301,8 +2301,7 @@
 	} else {
 		rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
 	}
-RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
-
+	RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
 }
 
 static void _rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw,
@@ -2606,8 +2605,7 @@
 					 "IPS Set eRf nic enable\n");
 				rtstatus = rtl_ps_enable_nic(hw);
 			} while (!rtstatus && (initializecount < 10));
-				RT_CLEAR_PS_LEVEL(ppsc,
-						  RT_RF_OFF_LEVL_HALT_NIC);
+			RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
 		} else {
 			RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
 				 "Set ERFON sleeped:%d ms\n",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
index 5ed4492..97f5a03 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
@@ -303,8 +303,8 @@
 					   [chnlgroup][index + (rf ? 8 : 0)] &
 					      (0x7f << (i * 8))) >> (i * 8));
 
-					if (pwr_diff_limit[i] > pwr_diff)
-						pwr_diff_limit[i] = pwr_diff;
+				if (pwr_diff_limit[i] > pwr_diff)
+					pwr_diff_limit[i] = pwr_diff;
 			}
 
 			customer_limit = (pwr_diff_limit[3] << 24) |
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index a78eaed..2101793 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -273,6 +273,7 @@
 	.msi_support = false,
 	.disable_watchdog = false,
 	.debug = DBG_EMERG,
+	.ant_sel = 0,
 };
 
 static struct rtl_hal_cfg rtl8723be_hal_cfg = {
@@ -394,6 +395,7 @@
 module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
 module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog,
 		   bool, 0444);
+module_param_named(ant_sel, rtl8723be_mod_params.ant_sel, int, 0444);
 MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
 MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
 MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
@@ -402,6 +404,7 @@
 MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
 MODULE_PARM_DESC(disable_watchdog,
 		 "Set to 1 to disable the watchdog (default 0)\n");
+MODULE_PARM_DESC(ant_sel, "Set to 1 or 2 to force antenna number (default 0)\n");
 
 static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
 
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index 95dcbff..17a6817 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -1957,9 +1957,9 @@
 				rtldm->swing_idx_ofdm_base[p] =
 					rtldm->swing_idx_ofdm[p];
 
-			RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-				 "pDM_Odm->RFCalibrateInfo.ThermalValue =%d ThermalValue= %d\n",
-				 rtldm->thermalvalue, thermal_value);
+		RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+			 "pDM_Odm->RFCalibrateInfo.ThermalValue =%d ThermalValue= %d\n",
+			 rtldm->thermalvalue, thermal_value);
 		/*Record last Power Tracking Thermal Value*/
 		rtldm->thermalvalue = thermal_value;
 	}
@@ -2488,9 +2488,9 @@
 		for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
 			rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p];
 
-			RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-				 "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
-				 rtldm->thermalvalue, thermal_value);
+		RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+			 "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
+			 rtldm->thermalvalue, thermal_value);
 		/*Record last Power Tracking Thermal Value*/
 		rtldm->thermalvalue = thermal_value;
 	}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index fe900ba..71e4dd9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -2315,14 +2315,14 @@
 
 	pci_read_config_byte(rtlpci->pdev, 0x34, &cap_pointer);
 	RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
-		 "PCI configration 0x34 = 0x%2x\n", cap_pointer);
+		 "PCI configuration 0x34 = 0x%2x\n", cap_pointer);
 
 	do {
 		pci_read_config_word(rtlpci->pdev, cap_pointer, &cap_hdr);
 		cap_id = cap_hdr & 0xFF;
 
 		RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
-			 "in pci configration, cap_pointer%x = %x\n",
+			 "in pci configuration, cap_pointer%x = %x\n",
 			  cap_pointer, cap_id);
 
 		if (cap_id == 0x01) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index 74165b3..0c3b9ce 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -418,9 +418,9 @@
 			out = 0x16A; /* -3 dB */
 		}
 	} else {
-	    u32 swing = 0, swing_a = 0, swing_b = 0;
+		u32 swing = 0, swing_a = 0, swing_b = 0;
 
-	    if (band == BAND_ON_2_4G) {
+		if (band == BAND_ON_2_4G) {
 			if (reg_swing_2g == auto_temp) {
 				efuse_shadow_read(hw, 1, 0xC6, (u32 *)&swing);
 				swing = (swing == 0xFF) ? 0x00 : swing;
@@ -514,7 +514,7 @@
 
 	RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
 		 "<=== PHY_GetTxBBSwing_8812A, out = 0x%X\n", out);
-	 return out;
+	return out;
 }
 
 void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
@@ -959,7 +959,7 @@
 static void _phy_convert_txpower_dbm_to_relative_value(u32 *data, u8 start,
 						u8 end, u8 base_val)
 {
-	char i = 0;
+	int i;
 	u8 temp_value = 0;
 	u32 temp_data = 0;
 
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 554d814..11d9c23 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -1359,7 +1359,7 @@
 	u32 tx_ss_num;
 	u32 rx_ss_num;
 
-	struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+	struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
 	struct ieee80211_hw *hw;
 	struct ieee80211_vif *vif;
 	enum nl80211_iftype opmode;
@@ -2246,6 +2246,9 @@
 
 	/* default 0: 1 means do not disable interrupts */
 	bool int_clear;
+
+	/* select antenna */
+	int ant_sel;
 };
 
 struct rtl_hal_usbint_cfg {
@@ -2867,7 +2870,7 @@
 	(ppsc->cur_ps_level |= _ps_flg)
 
 #define container_of_dwork_rtl(x, y, z) \
-	container_of(container_of(x, struct delayed_work, work), y, z)
+	container_of(to_delayed_work(x), y, z)
 
 #define FILL_OCTET_STRING(_os, _octet, _len)	\
 		(_os).octet = (u8 *)(_octet);		\
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index a13d1f2..569918c 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1291,7 +1291,7 @@
 		return 0;
 
 	dsconfig = 1000 *
-		ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
+		ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
 
 	len = sizeof(config);
 	ret = rndis_query_oid(usbdev,
@@ -3476,7 +3476,7 @@
 	priv->band.n_channels = ARRAY_SIZE(rndis_channels);
 	priv->band.bitrates = priv->rates;
 	priv->band.n_bitrates = ARRAY_SIZE(rndis_rates);
-	wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+	wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
 	wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
 
 	memcpy(priv->cipher_suites, rndis_cipher_suites,
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 4df992d..dbb2389 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -20,84 +20,84 @@
 #include "rsi_common.h"
 
 static const struct ieee80211_channel rsi_2ghz_channels[] = {
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2412,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2412,
 	  .hw_value = 1 }, /* Channel 1 */
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2417,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2417,
 	  .hw_value = 2 }, /* Channel 2 */
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2422,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2422,
 	  .hw_value = 3 }, /* Channel 3 */
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2427,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2427,
 	  .hw_value = 4 }, /* Channel 4 */
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2432,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2432,
 	  .hw_value = 5 }, /* Channel 5 */
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2437,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2437,
 	  .hw_value = 6 }, /* Channel 6 */
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2442,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2442,
 	  .hw_value = 7 }, /* Channel 7 */
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2447,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2447,
 	  .hw_value = 8 }, /* Channel 8 */
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2452,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2452,
 	  .hw_value = 9 }, /* Channel 9 */
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2457,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2457,
 	  .hw_value = 10 }, /* Channel 10 */
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2462,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2462,
 	  .hw_value = 11 }, /* Channel 11 */
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2467,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2467,
 	  .hw_value = 12 }, /* Channel 12 */
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2472,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2472,
 	  .hw_value = 13 }, /* Channel 13 */
-	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2484,
+	{ .band = NL80211_BAND_2GHZ, .center_freq = 2484,
 	  .hw_value = 14 }, /* Channel 14 */
 };
 
 static const struct ieee80211_channel rsi_5ghz_channels[] = {
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5180,
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5180,
 	  .hw_value = 36,  }, /* Channel 36 */
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5200,
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5200,
 	  .hw_value = 40, }, /* Channel 40 */
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5220,
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5220,
 	  .hw_value = 44, }, /* Channel 44 */
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5240,
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5240,
 	  .hw_value = 48, }, /* Channel 48 */
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5260,
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5260,
 	  .hw_value = 52, }, /* Channel 52 */
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5280,
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5280,
 	  .hw_value = 56, }, /* Channel 56 */
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5300,
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5300,
 	  .hw_value = 60, }, /* Channel 60 */
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5320,
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5320,
 	  .hw_value = 64, }, /* Channel 64 */
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5500,
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5500,
 	  .hw_value = 100, }, /* Channel 100 */
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5520,
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5520,
 	  .hw_value = 104, }, /* Channel 104 */
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5540,
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5540,
 	  .hw_value = 108, }, /* Channel 108 */
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5560,
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5560,
 	  .hw_value = 112, }, /* Channel 112 */
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5580,
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5580,
 	  .hw_value = 116, }, /* Channel 116 */
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5600,
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5600,
 	  .hw_value = 120, }, /* Channel 120 */
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5620,
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5620,
 	  .hw_value = 124, }, /* Channel 124 */
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5640,
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5640,
 	  .hw_value = 128, }, /* Channel 128 */
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5660,
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5660,
 	  .hw_value = 132, }, /* Channel 132 */
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5680,
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5680,
 	  .hw_value = 136, }, /* Channel 136 */
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5700,
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5700,
 	  .hw_value = 140, }, /* Channel 140 */
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5745,
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5745,
 	  .hw_value = 149, }, /* Channel 149 */
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5765,
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5765,
 	  .hw_value = 153, }, /* Channel 153 */
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5785,
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5785,
 	  .hw_value = 157, }, /* Channel 157 */
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5805,
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5805,
 	  .hw_value = 161, }, /* Channel 161 */
-	{ .band = IEEE80211_BAND_5GHZ, .center_freq = 5825,
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5825,
 	  .hw_value = 165, }, /* Channel 165 */
 };
 
@@ -150,12 +150,12 @@
 	struct ieee80211_supported_band *sbands = &adapter->sbands[band];
 	void *channels = NULL;
 
-	if (band == IEEE80211_BAND_2GHZ) {
+	if (band == NL80211_BAND_2GHZ) {
 		channels = kmalloc(sizeof(rsi_2ghz_channels), GFP_KERNEL);
 		memcpy(channels,
 		       rsi_2ghz_channels,
 		       sizeof(rsi_2ghz_channels));
-		sbands->band = IEEE80211_BAND_2GHZ;
+		sbands->band = NL80211_BAND_2GHZ;
 		sbands->n_channels = ARRAY_SIZE(rsi_2ghz_channels);
 		sbands->bitrates = rsi_rates;
 		sbands->n_bitrates = ARRAY_SIZE(rsi_rates);
@@ -164,7 +164,7 @@
 		memcpy(channels,
 		       rsi_5ghz_channels,
 		       sizeof(rsi_5ghz_channels));
-		sbands->band = IEEE80211_BAND_5GHZ;
+		sbands->band = NL80211_BAND_5GHZ;
 		sbands->n_channels = ARRAY_SIZE(rsi_5ghz_channels);
 		sbands->bitrates = &rsi_rates[4];
 		sbands->n_bitrates = ARRAY_SIZE(rsi_rates) - 4;
@@ -775,7 +775,7 @@
 {
 	struct rsi_hw *adapter = hw->priv;
 	struct rsi_common *common = adapter->priv;
-	enum ieee80211_band band = hw->conf.chandef.chan->band;
+	enum nl80211_band band = hw->conf.chandef.chan->band;
 
 	mutex_lock(&common->mutex);
 	common->fixedrate_mask[band] = 0;
@@ -999,8 +999,8 @@
 
 	mutex_lock(&common->mutex);
 	/* Resetting all the fields to default values */
-	common->bitrate_mask[IEEE80211_BAND_2GHZ] = 0;
-	common->bitrate_mask[IEEE80211_BAND_5GHZ] = 0;
+	common->bitrate_mask[NL80211_BAND_2GHZ] = 0;
+	common->bitrate_mask[NL80211_BAND_5GHZ] = 0;
 	common->min_rate = 0xffff;
 	common->vif_info[0].is_ht = false;
 	common->vif_info[0].sgi = false;
@@ -1070,8 +1070,8 @@
 	hw->max_rate_tries = MAX_RETRIES;
 
 	hw->max_tx_aggregation_subframes = 6;
-	rsi_register_rates_channels(adapter, IEEE80211_BAND_2GHZ);
-	rsi_register_rates_channels(adapter, IEEE80211_BAND_5GHZ);
+	rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ);
+	rsi_register_rates_channels(adapter, NL80211_BAND_5GHZ);
 	hw->rate_control_algorithm = "AARF";
 
 	SET_IEEE80211_PERM_ADDR(hw, common->mac_addr);
@@ -1087,10 +1087,10 @@
 
 	wiphy->available_antennas_rx = 1;
 	wiphy->available_antennas_tx = 1;
-	wiphy->bands[IEEE80211_BAND_2GHZ] =
-		&adapter->sbands[IEEE80211_BAND_2GHZ];
-	wiphy->bands[IEEE80211_BAND_5GHZ] =
-		&adapter->sbands[IEEE80211_BAND_5GHZ];
+	wiphy->bands[NL80211_BAND_2GHZ] =
+		&adapter->sbands[NL80211_BAND_2GHZ];
+	wiphy->bands[NL80211_BAND_5GHZ] =
+		&adapter->sbands[NL80211_BAND_5GHZ];
 
 	status = ieee80211_register_hw(hw);
 	if (status)
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index e43b59d..40658b6 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -210,7 +210,7 @@
  */
 static void rsi_set_default_parameters(struct rsi_common *common)
 {
-	common->band = IEEE80211_BAND_2GHZ;
+	common->band = NL80211_BAND_2GHZ;
 	common->channel_width = BW_20MHZ;
 	common->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
 	common->channel = 1;
@@ -655,7 +655,7 @@
 	vap_caps->rts_threshold = cpu_to_le16(common->rts_threshold);
 	vap_caps->default_mgmt_rate = cpu_to_le32(RSI_RATE_6);
 
-	if (common->band == IEEE80211_BAND_5GHZ) {
+	if (common->band == NL80211_BAND_5GHZ) {
 		vap_caps->default_ctrl_rate = cpu_to_le32(RSI_RATE_6);
 		if (conf_is_ht40(&common->priv->hw->conf)) {
 			vap_caps->default_ctrl_rate |=
@@ -872,7 +872,7 @@
 	else
 		common->channel_width = BW_40MHZ;
 
-	if (common->band == IEEE80211_BAND_2GHZ) {
+	if (common->band == NL80211_BAND_2GHZ) {
 		if (common->channel_width)
 			common->endpoint = EP_2GHZ_40MHZ;
 		else
@@ -1046,7 +1046,7 @@
 	if (common->channel_width == BW_40MHZ)
 		auto_rate->desc_word[7] |= cpu_to_le16(1);
 
-	if (band == IEEE80211_BAND_2GHZ) {
+	if (band == NL80211_BAND_2GHZ) {
 		min_rate = RSI_RATE_1;
 		rate_table_offset = 0;
 	} else {
diff --git a/drivers/net/wireless/rsi/rsi_91x_pkt.c b/drivers/net/wireless/rsi/rsi_91x_pkt.c
index 702593f..02920c9 100644
--- a/drivers/net/wireless/rsi/rsi_91x_pkt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_pkt.c
@@ -27,22 +27,24 @@
 int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
 {
 	struct rsi_hw *adapter = common->priv;
-	struct ieee80211_hdr *tmp_hdr = NULL;
+	struct ieee80211_hdr *tmp_hdr;
 	struct ieee80211_tx_info *info;
 	struct skb_info *tx_params;
-	struct ieee80211_bss_conf *bss = NULL;
-	int status = -EINVAL;
+	struct ieee80211_bss_conf *bss;
+	int status;
 	u8 ieee80211_size = MIN_802_11_HDR_LEN;
-	u8 extnd_size = 0;
+	u8 extnd_size;
 	__le16 *frame_desc;
-	u16 seq_num = 0;
+	u16 seq_num;
 
 	info = IEEE80211_SKB_CB(skb);
 	bss = &info->control.vif->bss_conf;
 	tx_params = (struct skb_info *)info->driver_data;
 
-	if (!bss->assoc)
+	if (!bss->assoc) {
+		status = -EINVAL;
 		goto err;
+	}
 
 	tmp_hdr = (struct ieee80211_hdr *)&skb->data[0];
 	seq_num = (le16_to_cpu(tmp_hdr->seq_ctrl) >> 4);
@@ -123,15 +125,15 @@
 		      struct sk_buff *skb)
 {
 	struct rsi_hw *adapter = common->priv;
-	struct ieee80211_hdr *wh = NULL;
+	struct ieee80211_hdr *wh;
 	struct ieee80211_tx_info *info;
-	struct ieee80211_bss_conf *bss = NULL;
+	struct ieee80211_bss_conf *bss;
 	struct ieee80211_hw *hw = adapter->hw;
 	struct ieee80211_conf *conf = &hw->conf;
 	struct skb_info *tx_params;
 	int status = -E2BIG;
-	__le16 *msg = NULL;
-	u8 extnd_size = 0;
+	__le16 *msg;
+	u8 extnd_size;
 	u8 vap_id = 0;
 
 	info = IEEE80211_SKB_CB(skb);
@@ -182,7 +184,7 @@
 	if (wh->addr1[0] & BIT(0))
 		msg[3] |= cpu_to_le16(RSI_BROADCAST_PKT);
 
-	if (common->band == IEEE80211_BAND_2GHZ)
+	if (common->band == NL80211_BAND_2GHZ)
 		msg[4] = cpu_to_le16(RSI_11B_MODE);
 	else
 		msg[4] = cpu_to_le16((RSI_RATE_6 & 0x0f) | RSI_11G_MODE);
diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h
index 5baed94..dcd0957 100644
--- a/drivers/net/wireless/rsi/rsi_main.h
+++ b/drivers/net/wireless/rsi/rsi_main.h
@@ -211,7 +211,7 @@
 	struct ieee80211_hw *hw;
 	struct ieee80211_vif *vifs[RSI_MAX_VIFS];
 	struct ieee80211_tx_queue_params edca_params[NUM_EDCA_QUEUES];
-	struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+	struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
 
 	struct device *device;
 	u8 sc_nvifs;
diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c
index 0e51e27..dc478ce 100644
--- a/drivers/net/wireless/st/cw1200/main.c
+++ b/drivers/net/wireless/st/cw1200/main.c
@@ -102,7 +102,7 @@
 
 
 #define CHAN2G(_channel, _freq, _flags) {			\
-	.band			= IEEE80211_BAND_2GHZ,		\
+	.band			= NL80211_BAND_2GHZ,		\
 	.center_freq		= (_freq),			\
 	.hw_value		= (_channel),			\
 	.flags			= (_flags),			\
@@ -111,7 +111,7 @@
 }
 
 #define CHAN5G(_channel, _flags) {				\
-	.band			= IEEE80211_BAND_5GHZ,		\
+	.band			= NL80211_BAND_5GHZ,		\
 	.center_freq	= 5000 + (5 * (_channel)),		\
 	.hw_value		= (_channel),			\
 	.flags			= (_flags),			\
@@ -311,12 +311,12 @@
 
 	hw->sta_data_size = sizeof(struct cw1200_sta_priv);
 
-	hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &cw1200_band_2ghz;
+	hw->wiphy->bands[NL80211_BAND_2GHZ] = &cw1200_band_2ghz;
 	if (have_5ghz)
-		hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &cw1200_band_5ghz;
+		hw->wiphy->bands[NL80211_BAND_5GHZ] = &cw1200_band_5ghz;
 
 	/* Channel params have to be cleared before registering wiphy again */
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+	for (band = 0; band < NUM_NL80211_BANDS; band++) {
 		struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
 		if (!sband)
 			continue;
diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
index bff81b8..9837881 100644
--- a/drivers/net/wireless/st/cw1200/scan.c
+++ b/drivers/net/wireless/st/cw1200/scan.c
@@ -402,7 +402,7 @@
 	}
 	wsm = (struct wsm_tx *)frame.skb->data;
 	scan.max_tx_rate = wsm->max_tx_rate;
-	scan.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ?
+	scan.band = (priv->channel->band == NL80211_BAND_5GHZ) ?
 		WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G;
 	if (priv->join_status == CW1200_JOIN_STATUS_STA ||
 	    priv->join_status == CW1200_JOIN_STATUS_IBSS) {
diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
index d0ddcde..daf06a4 100644
--- a/drivers/net/wireless/st/cw1200/sta.c
+++ b/drivers/net/wireless/st/cw1200/sta.c
@@ -1278,7 +1278,7 @@
 	join.dtim_period = priv->join_dtim_period;
 
 	join.channel_number = priv->channel->hw_value;
-	join.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ?
+	join.band = (priv->channel->band == NL80211_BAND_5GHZ) ?
 		WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G;
 
 	memcpy(join.bssid, bssid, sizeof(join.bssid));
@@ -1462,7 +1462,7 @@
 	};
 
 	if (priv->channel) {
-		start.band = priv->channel->band == IEEE80211_BAND_5GHZ ?
+		start.band = priv->channel->band == NL80211_BAND_5GHZ ?
 			     WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G;
 		start.channel_number = priv->channel->hw_value;
 	} else {
@@ -2315,7 +2315,7 @@
 	struct wsm_start start = {
 		.mode = priv->vif->p2p ?
 				WSM_START_MODE_P2P_GO : WSM_START_MODE_AP,
-		.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ?
+		.band = (priv->channel->band == NL80211_BAND_5GHZ) ?
 				WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G,
 		.channel_number = priv->channel->hw_value,
 		.beacon_interval = conf->beacon_int,
diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c
index d28bd49..3d17028 100644
--- a/drivers/net/wireless/st/cw1200/txrx.c
+++ b/drivers/net/wireless/st/cw1200/txrx.c
@@ -1079,7 +1079,7 @@
 
 	hdr->band = ((arg->channel_number & 0xff00) ||
 		     (arg->channel_number > 14)) ?
-			IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
+			NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
 	hdr->freq = ieee80211_channel_to_frequency(
 			arg->channel_number,
 			hdr->band);
diff --git a/drivers/net/wireless/st/cw1200/wsm.c b/drivers/net/wireless/st/cw1200/wsm.c
index 9e0ca30..680d60e 100644
--- a/drivers/net/wireless/st/cw1200/wsm.c
+++ b/drivers/net/wireless/st/cw1200/wsm.c
@@ -849,9 +849,9 @@
 
 	/* Disable unsupported frequency bands */
 	if (!(priv->wsm_caps.fw_cap & 0x1))
-		priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
+		priv->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
 	if (!(priv->wsm_caps.fw_cap & 0x2))
-		priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
+		priv->hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
 
 	priv->firmware_ready = 1;
 	wake_up(&priv->wsm_startup_done);
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index cd47779..56384a4e 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1482,7 +1482,7 @@
 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
 					 BIT(NL80211_IFTYPE_ADHOC);
 	wl->hw->wiphy->max_scan_ssids = 1;
-	wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz;
+	wl->hw->wiphy->bands[NL80211_BAND_2GHZ] = &wl1251_band_2ghz;
 
 	wl->hw->queues = 4;
 
diff --git a/drivers/net/wireless/ti/wl1251/ps.c b/drivers/net/wireless/ti/wl1251/ps.c
index b9e27b9..fa01b0a 100644
--- a/drivers/net/wireless/ti/wl1251/ps.c
+++ b/drivers/net/wireless/ti/wl1251/ps.c
@@ -32,7 +32,7 @@
 	struct delayed_work *dwork;
 	struct wl1251 *wl;
 
-	dwork = container_of(work, struct delayed_work, work);
+	dwork = to_delayed_work(work);
 	wl = container_of(dwork, struct wl1251, elp_work);
 
 	wl1251_debug(DEBUG_PSM, "elp work");
diff --git a/drivers/net/wireless/ti/wl1251/rx.c b/drivers/net/wireless/ti/wl1251/rx.c
index cde0eaf..a27d4c2 100644
--- a/drivers/net/wireless/ti/wl1251/rx.c
+++ b/drivers/net/wireless/ti/wl1251/rx.c
@@ -53,7 +53,7 @@
 
 	memset(status, 0, sizeof(struct ieee80211_rx_status));
 
-	status->band = IEEE80211_BAND_2GHZ;
+	status->band = NL80211_BAND_2GHZ;
 	status->mactime = desc->timestamp;
 
 	/*
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index a0d6ccc..58b9d3c 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -469,8 +469,8 @@
 };
 
 static const u8 *wl12xx_band_rate_to_idx[] = {
-	[IEEE80211_BAND_2GHZ] = wl12xx_rate_to_idx_2ghz,
-	[IEEE80211_BAND_5GHZ] = wl12xx_rate_to_idx_5ghz
+	[NL80211_BAND_2GHZ] = wl12xx_rate_to_idx_2ghz,
+	[NL80211_BAND_5GHZ] = wl12xx_rate_to_idx_5ghz
 };
 
 enum wl12xx_hw_rates {
@@ -1827,8 +1827,8 @@
 	wl->fw_status_priv_len = 0;
 	wl->stats.fw_stats_len = sizeof(struct wl12xx_acx_statistics);
 	wl->ofdm_only_ap = true;
-	wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, &wl12xx_ht_cap);
-	wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ, &wl12xx_ht_cap);
+	wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ, &wl12xx_ht_cap);
+	wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ, &wl12xx_ht_cap);
 	wl12xx_conf_init(wl);
 
 	if (!fref_param) {
diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c
index ebed13a..8d47539 100644
--- a/drivers/net/wireless/ti/wl12xx/scan.c
+++ b/drivers/net/wireless/ti/wl12xx/scan.c
@@ -27,7 +27,7 @@
 static int wl1271_get_scan_channels(struct wl1271 *wl,
 				    struct cfg80211_scan_request *req,
 				    struct basic_scan_channel_params *channels,
-				    enum ieee80211_band band, bool passive)
+				    enum nl80211_band band, bool passive)
 {
 	struct conf_scan_settings *c = &wl->conf.scan;
 	int i, j;
@@ -92,7 +92,7 @@
 #define WL1271_NOTHING_TO_SCAN 1
 
 static int wl1271_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-			    enum ieee80211_band band,
+			    enum nl80211_band band,
 			    bool passive, u32 basic_rate)
 {
 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
@@ -144,12 +144,12 @@
 	cmd->params.tid_trigger = CONF_TX_AC_ANY_TID;
 	cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
 
-	if (band == IEEE80211_BAND_2GHZ)
+	if (band == NL80211_BAND_2GHZ)
 		cmd->params.band = WL1271_SCAN_BAND_2_4_GHZ;
 	else
 		cmd->params.band = WL1271_SCAN_BAND_5_GHZ;
 
-	if (wl->scan.ssid_len && wl->scan.ssid) {
+	if (wl->scan.ssid_len) {
 		cmd->params.ssid_len = wl->scan.ssid_len;
 		memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len);
 	}
@@ -218,7 +218,7 @@
 void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
 	int ret = 0;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	u32 rate, mask;
 
 	switch (wl->scan.state) {
@@ -226,7 +226,7 @@
 		break;
 
 	case WL1271_SCAN_STATE_2GHZ_ACTIVE:
-		band = IEEE80211_BAND_2GHZ;
+		band = NL80211_BAND_2GHZ;
 		mask = wlvif->bitrate_masks[band];
 		if (wl->scan.req->no_cck) {
 			mask &= ~CONF_TX_CCK_RATES;
@@ -243,7 +243,7 @@
 		break;
 
 	case WL1271_SCAN_STATE_2GHZ_PASSIVE:
-		band = IEEE80211_BAND_2GHZ;
+		band = NL80211_BAND_2GHZ;
 		mask = wlvif->bitrate_masks[band];
 		if (wl->scan.req->no_cck) {
 			mask &= ~CONF_TX_CCK_RATES;
@@ -263,7 +263,7 @@
 		break;
 
 	case WL1271_SCAN_STATE_5GHZ_ACTIVE:
-		band = IEEE80211_BAND_5GHZ;
+		band = NL80211_BAND_5GHZ;
 		rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
 		ret = wl1271_scan_send(wl, wlvif, band, false, rate);
 		if (ret == WL1271_NOTHING_TO_SCAN) {
@@ -274,7 +274,7 @@
 		break;
 
 	case WL1271_SCAN_STATE_5GHZ_PASSIVE:
-		band = IEEE80211_BAND_5GHZ;
+		band = NL80211_BAND_5GHZ;
 		rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
 		ret = wl1271_scan_send(wl, wlvif, band, true, rate);
 		if (ret == WL1271_NOTHING_TO_SCAN) {
@@ -378,7 +378,7 @@
 	wl12xx_adjust_channels(cfg, cfg_channels);
 
 	if (!force_passive && cfg->active[0]) {
-		u8 band = IEEE80211_BAND_2GHZ;
+		u8 band = NL80211_BAND_2GHZ;
 		ret = wl12xx_cmd_build_probe_req(wl, wlvif,
 						 wlvif->role_id, band,
 						 req->ssids[0].ssid,
@@ -395,7 +395,7 @@
 	}
 
 	if (!force_passive && cfg->active[1]) {
-		u8 band = IEEE80211_BAND_5GHZ;
+		u8 band = NL80211_BAND_5GHZ;
 		ret = wl12xx_cmd_build_probe_req(wl, wlvif,
 						 wlvif->role_id, band,
 						 req->ssids[0].ssid,
diff --git a/drivers/net/wireless/ti/wl18xx/cmd.c b/drivers/net/wireless/ti/wl18xx/cmd.c
index a8d176d..63e95ba 100644
--- a/drivers/net/wireless/ti/wl18xx/cmd.c
+++ b/drivers/net/wireless/ti/wl18xx/cmd.c
@@ -48,10 +48,10 @@
 	cmd->stop_tx = ch_switch->block_tx;
 
 	switch (ch_switch->chandef.chan->band) {
-	case IEEE80211_BAND_2GHZ:
+	case NL80211_BAND_2GHZ:
 		cmd->band = WLCORE_BAND_2_4GHZ;
 		break;
-	case IEEE80211_BAND_5GHZ:
+	case NL80211_BAND_5GHZ:
 		cmd->band = WLCORE_BAND_5GHZ;
 		break;
 	default:
@@ -187,7 +187,7 @@
 
 	cmd->role_id = wlvif->role_id;
 	cmd->channel = wlvif->channel;
-	if (wlvif->band == IEEE80211_BAND_5GHZ)
+	if (wlvif->band == NL80211_BAND_5GHZ)
 		cmd->band = WLCORE_BAND_5GHZ;
 	cmd->bandwidth = wlcore_get_native_channel_type(wlvif->channel_type);
 
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
index ff6e46d..ef81184 100644
--- a/drivers/net/wireless/ti/wl18xx/event.c
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -64,13 +64,13 @@
 					  u8 sync_band)
 {
 	struct sk_buff *skb;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	int freq;
 
 	if (sync_band == WLCORE_BAND_5GHZ)
-		band = IEEE80211_BAND_5GHZ;
+		band = NL80211_BAND_5GHZ;
 	else
-		band = IEEE80211_BAND_2GHZ;
+		band = NL80211_BAND_2GHZ;
 
 	freq = ieee80211_channel_to_frequency(sync_channel, band);
 
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 1bf26cc..ae47c79 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -137,8 +137,8 @@
 };
 
 static const u8 *wl18xx_band_rate_to_idx[] = {
-	[IEEE80211_BAND_2GHZ] = wl18xx_rate_to_idx_2ghz,
-	[IEEE80211_BAND_5GHZ] = wl18xx_rate_to_idx_5ghz
+	[NL80211_BAND_2GHZ] = wl18xx_rate_to_idx_2ghz,
+	[NL80211_BAND_5GHZ] = wl18xx_rate_to_idx_5ghz
 };
 
 enum wl18xx_hw_rates {
@@ -1302,12 +1302,12 @@
 		wl1271_debug(DEBUG_ACX, "using wide channel rate mask");
 
 		/* sanity check - we don't support this */
-		if (WARN_ON(wlvif->band != IEEE80211_BAND_5GHZ))
+		if (WARN_ON(wlvif->band != NL80211_BAND_5GHZ))
 			return 0;
 
 		return CONF_TX_RATE_USE_WIDE_CHAN;
 	} else if (wl18xx_is_mimo_supported(wl) &&
-		   wlvif->band == IEEE80211_BAND_2GHZ) {
+		   wlvif->band == NL80211_BAND_2GHZ) {
 		wl1271_debug(DEBUG_ACX, "using MIMO rate mask");
 		/*
 		 * we don't care about HT channel here - if a peer doesn't
@@ -1996,24 +1996,24 @@
 		 * siso40.
 		 */
 		if (wl18xx_is_mimo_supported(wl))
-			wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
+			wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ,
 					  &wl18xx_mimo_ht_cap_2ghz);
 		else
-			wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
+			wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ,
 					  &wl18xx_siso40_ht_cap_2ghz);
 
 		/* 5Ghz is always wide */
-		wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
+		wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ,
 				  &wl18xx_siso40_ht_cap_5ghz);
 	} else if (priv->conf.ht.mode == HT_MODE_WIDE) {
-		wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
+		wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ,
 				  &wl18xx_siso40_ht_cap_2ghz);
-		wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
+		wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ,
 				  &wl18xx_siso40_ht_cap_5ghz);
 	} else if (priv->conf.ht.mode == HT_MODE_SISO20) {
-		wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
+		wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ,
 				  &wl18xx_siso20_ht_cap);
-		wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
+		wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ,
 				  &wl18xx_siso20_ht_cap);
 	}
 
diff --git a/drivers/net/wireless/ti/wl18xx/scan.c b/drivers/net/wireless/ti/wl18xx/scan.c
index bc15aa2..4e522154 100644
--- a/drivers/net/wireless/ti/wl18xx/scan.c
+++ b/drivers/net/wireless/ti/wl18xx/scan.c
@@ -110,7 +110,7 @@
 
 	/* TODO: per-band ies? */
 	if (cmd->active[0]) {
-		u8 band = IEEE80211_BAND_2GHZ;
+		u8 band = NL80211_BAND_2GHZ;
 		ret = wl12xx_cmd_build_probe_req(wl, wlvif,
 				 cmd->role_id, band,
 				 req->ssids ? req->ssids[0].ssid : NULL,
@@ -127,7 +127,7 @@
 	}
 
 	if (cmd->active[1] || cmd->dfs) {
-		u8 band = IEEE80211_BAND_5GHZ;
+		u8 band = NL80211_BAND_5GHZ;
 		ret = wl12xx_cmd_build_probe_req(wl, wlvif,
 				 cmd->role_id, band,
 				 req->ssids ? req->ssids[0].ssid : NULL,
@@ -253,7 +253,7 @@
 	cmd->terminate_on_report = 0;
 
 	if (cmd->active[0]) {
-		u8 band = IEEE80211_BAND_2GHZ;
+		u8 band = NL80211_BAND_2GHZ;
 		ret = wl12xx_cmd_build_probe_req(wl, wlvif,
 				 cmd->role_id, band,
 				 req->ssids ? req->ssids[0].ssid : NULL,
@@ -270,7 +270,7 @@
 	}
 
 	if (cmd->active[1] || cmd->dfs) {
-		u8 band = IEEE80211_BAND_5GHZ;
+		u8 band = NL80211_BAND_5GHZ;
 		ret = wl12xx_cmd_build_probe_req(wl, wlvif,
 				 cmd->role_id, band,
 				 req->ssids ? req->ssids[0].ssid : NULL,
diff --git a/drivers/net/wireless/ti/wl18xx/tx.c b/drivers/net/wireless/ti/wl18xx/tx.c
index 3406ffb..ebaf66e 100644
--- a/drivers/net/wireless/ti/wl18xx/tx.c
+++ b/drivers/net/wireless/ti/wl18xx/tx.c
@@ -43,7 +43,7 @@
 
 	if (fw_rate <= CONF_HW_RATE_INDEX_54MBPS) {
 		rate->idx = fw_rate;
-		if (band == IEEE80211_BAND_5GHZ)
+		if (band == NL80211_BAND_5GHZ)
 			rate->idx -= CONF_HW_RATE_INDEX_6MBPS;
 		rate->flags = 0;
 	} else {
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index f01d24b..3315356 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -423,7 +423,7 @@
 
 static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
 				     struct wl12xx_vif *wlvif,
-				     enum ieee80211_band band,
+				     enum nl80211_band band,
 				     int channel)
 {
 	struct wl12xx_cmd_role_start *cmd;
@@ -438,7 +438,7 @@
 	wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wlvif->dev_role_id);
 
 	cmd->role_id = wlvif->dev_role_id;
-	if (band == IEEE80211_BAND_5GHZ)
+	if (band == NL80211_BAND_5GHZ)
 		cmd->band = WLCORE_BAND_5GHZ;
 	cmd->channel = channel;
 
@@ -524,7 +524,7 @@
 	wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wlvif->role_id);
 
 	cmd->role_id = wlvif->role_id;
-	if (wlvif->band == IEEE80211_BAND_5GHZ)
+	if (wlvif->band == NL80211_BAND_5GHZ)
 		cmd->band = WLCORE_BAND_5GHZ;
 	cmd->channel = wlvif->channel;
 	cmd->sta.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
@@ -693,10 +693,10 @@
 	cmd->ap.local_rates = cpu_to_le32(supported_rates);
 
 	switch (wlvif->band) {
-	case IEEE80211_BAND_2GHZ:
+	case NL80211_BAND_2GHZ:
 		cmd->band = WLCORE_BAND_2_4GHZ;
 		break;
-	case IEEE80211_BAND_5GHZ:
+	case NL80211_BAND_5GHZ:
 		cmd->band = WLCORE_BAND_5GHZ;
 		break;
 	default:
@@ -773,7 +773,7 @@
 	wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wlvif->role_id);
 
 	cmd->role_id = wlvif->role_id;
-	if (wlvif->band == IEEE80211_BAND_5GHZ)
+	if (wlvif->band == NL80211_BAND_5GHZ)
 		cmd->band = WLCORE_BAND_5GHZ;
 	cmd->channel = wlvif->channel;
 	cmd->ibss.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
@@ -1164,7 +1164,7 @@
 	}
 
 	rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
-	if (band == IEEE80211_BAND_2GHZ)
+	if (band == NL80211_BAND_2GHZ)
 		ret = wl1271_cmd_template_set(wl, role_id,
 					      template_id_2_4,
 					      skb->data, skb->len, 0, rate);
@@ -1195,7 +1195,7 @@
 	wl1271_debug(DEBUG_SCAN, "set ap probe request template");
 
 	rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]);
-	if (wlvif->band == IEEE80211_BAND_2GHZ)
+	if (wlvif->band == NL80211_BAND_2GHZ)
 		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
 					      CMD_TEMPL_CFG_PROBE_REQ_2_4,
 					      skb->data, skb->len, 0, rate);
@@ -1628,19 +1628,19 @@
 	return ret;
 }
 
-static int wlcore_get_reg_conf_ch_idx(enum ieee80211_band band, u16 ch)
+static int wlcore_get_reg_conf_ch_idx(enum nl80211_band band, u16 ch)
 {
 	/*
 	 * map the given band/channel to the respective predefined
 	 * bit expected by the fw
 	 */
 	switch (band) {
-	case IEEE80211_BAND_2GHZ:
+	case NL80211_BAND_2GHZ:
 		/* channels 1..14 are mapped to 0..13 */
 		if (ch >= 1 && ch <= 14)
 			return ch - 1;
 		break;
-	case IEEE80211_BAND_5GHZ:
+	case NL80211_BAND_5GHZ:
 		switch (ch) {
 		case 8 ... 16:
 			/* channels 8,12,16 are mapped to 18,19,20 */
@@ -1670,7 +1670,7 @@
 }
 
 void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
-				     enum ieee80211_band band)
+				     enum nl80211_band band)
 {
 	int ch_bit_idx = 0;
 
@@ -1699,7 +1699,7 @@
 
 	memset(tmp_ch_bitmap, 0, sizeof(tmp_ch_bitmap));
 
-	for (b = IEEE80211_BAND_2GHZ; b <= IEEE80211_BAND_5GHZ; b++) {
+	for (b = NL80211_BAND_2GHZ; b <= NL80211_BAND_5GHZ; b++) {
 		band = wiphy->bands[b];
 		for (i = 0; i < band->n_channels; i++) {
 			struct ieee80211_channel *channel = &band->channels[i];
@@ -1851,7 +1851,7 @@
 }
 
 static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-			  u8 role_id, enum ieee80211_band band, u8 channel)
+			  u8 role_id, enum nl80211_band band, u8 channel)
 {
 	struct wl12xx_cmd_roc *cmd;
 	int ret = 0;
@@ -1870,10 +1870,10 @@
 	cmd->role_id = role_id;
 	cmd->channel = channel;
 	switch (band) {
-	case IEEE80211_BAND_2GHZ:
+	case NL80211_BAND_2GHZ:
 		cmd->band = WLCORE_BAND_2_4GHZ;
 		break;
-	case IEEE80211_BAND_5GHZ:
+	case NL80211_BAND_5GHZ:
 		cmd->band = WLCORE_BAND_5GHZ;
 		break;
 	default:
@@ -1925,7 +1925,7 @@
 }
 
 int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id,
-	       enum ieee80211_band band, u8 channel)
+	       enum nl80211_band band, u8 channel)
 {
 	int ret = 0;
 
@@ -1995,7 +1995,7 @@
 
 /* start dev role and roc on its channel */
 int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-		     enum ieee80211_band band, int channel)
+		     enum nl80211_band band, int channel)
 {
 	int ret;
 
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index e28e2f23..52c3b48 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -40,7 +40,7 @@
 int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif,
-		     enum ieee80211_band band, int channel);
+		     enum nl80211_band band, int channel);
 int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
 int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf,
@@ -83,14 +83,14 @@
 int wl12xx_cmd_set_peer_state(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 			      u8 hlid);
 int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id,
-	       enum ieee80211_band band, u8 channel);
+	       enum nl80211_band band, u8 channel);
 int wl12xx_croc(struct wl1271 *wl, u8 role_id);
 int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 			struct ieee80211_sta *sta, u8 hlid);
 int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 			   u8 hlid);
 void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
-				     enum ieee80211_band band);
+				     enum nl80211_band band);
 int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl);
 int wlcore_cmd_generic_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 			   u8 feature, u8 enable, u8 value);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index dde3620..10fd24c 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -243,7 +243,7 @@
 	struct delayed_work *dwork;
 	struct wl1271 *wl;
 
-	dwork = container_of(work, struct delayed_work, work);
+	dwork = to_delayed_work(work);
 	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
 
 	mutex_lock(&wl->mutex);
@@ -1930,7 +1930,7 @@
 	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
 		wlcore_enable_interrupts(wl);
 
-	wl->band = IEEE80211_BAND_2GHZ;
+	wl->band = NL80211_BAND_2GHZ;
 
 	wl->rx_counter = 0;
 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
@@ -2011,7 +2011,7 @@
 	struct wl12xx_vif *wlvif;
 	int ret;
 
-	dwork = container_of(work, struct delayed_work, work);
+	dwork = to_delayed_work(work);
 	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
 	wl = wlvif->wl;
 
@@ -2047,7 +2047,7 @@
 	struct ieee80211_vif *vif;
 	struct wl12xx_vif *wlvif;
 
-	dwork = container_of(work, struct delayed_work, work);
+	dwork = to_delayed_work(work);
 	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
 	wl = wlvif->wl;
 
@@ -2076,7 +2076,7 @@
 	unsigned long time_spare;
 	int ret;
 
-	dwork = container_of(work, struct delayed_work, work);
+	dwork = to_delayed_work(work);
 	wlvif = container_of(dwork, struct wl12xx_vif,
 			     pending_auth_complete_work);
 	wl = wlvif->wl;
@@ -2240,8 +2240,8 @@
 		wlvif->rate_set = CONF_TX_ENABLED_RATES;
 	}
 
-	wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
-	wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
+	wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
+	wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
 	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
 
 	/*
@@ -2330,7 +2330,7 @@
 	 * 11a channels if not supported
 	 */
 	if (!wl->enable_11a)
-		wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
+		wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
 
 	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
 		     wl->enable_11a ? "" : "not ");
@@ -5588,7 +5588,7 @@
 	struct wl1271 *wl;
 	int ret;
 
-	dwork = container_of(work, struct delayed_work, work);
+	dwork = to_delayed_work(work);
 	wl = container_of(dwork, struct wl1271, roc_complete_work);
 
 	ret = wlcore_roc_completed(wl);
@@ -5871,7 +5871,7 @@
 };
 
 
-u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
+u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
 {
 	u8 idx;
 
@@ -6096,21 +6096,21 @@
 	 * We keep local copies of the band structs because we need to
 	 * modify them on a per-device basis.
 	 */
-	memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
+	memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
 	       sizeof(wl1271_band_2ghz));
-	memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
-	       &wl->ht_cap[IEEE80211_BAND_2GHZ],
+	memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
+	       &wl->ht_cap[NL80211_BAND_2GHZ],
 	       sizeof(*wl->ht_cap));
-	memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
+	memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
 	       sizeof(wl1271_band_5ghz));
-	memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
-	       &wl->ht_cap[IEEE80211_BAND_5GHZ],
+	memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
+	       &wl->ht_cap[NL80211_BAND_5GHZ],
 	       sizeof(*wl->ht_cap));
 
-	wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-		&wl->bands[IEEE80211_BAND_2GHZ];
-	wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-		&wl->bands[IEEE80211_BAND_5GHZ];
+	wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
+		&wl->bands[NL80211_BAND_2GHZ];
+	wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
+		&wl->bands[NL80211_BAND_5GHZ];
 
 	/*
 	 * allow 4 queues per mac address we support +
@@ -6205,7 +6205,7 @@
 	wl->channel = 0;
 	wl->rx_counter = 0;
 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
-	wl->band = IEEE80211_BAND_2GHZ;
+	wl->band = NL80211_BAND_2GHZ;
 	wl->channel_type = NL80211_CHAN_NO_HT;
 	wl->flags = 0;
 	wl->sg_enabled = true;
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 4cd316e..b36133b 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -38,7 +38,7 @@
 	struct wl12xx_vif *wlvif;
 	int ret;
 
-	dwork = container_of(work, struct delayed_work, work);
+	dwork = to_delayed_work(work);
 	wl = container_of(dwork, struct wl1271, elp_work);
 
 	wl1271_debug(DEBUG_PSM, "elp work");
@@ -202,7 +202,7 @@
 		 * enable beacon early termination.
 		 * Not relevant for 5GHz and for high rates.
 		 */
-		if ((wlvif->band == IEEE80211_BAND_2GHZ) &&
+		if ((wlvif->band == NL80211_BAND_2GHZ) &&
 		    (wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) {
 			ret = wl1271_acx_bet_enable(wl, wlvif, true);
 			if (ret < 0)
@@ -213,7 +213,7 @@
 		wl1271_debug(DEBUG_PSM, "leaving psm");
 
 		/* disable beacon early termination */
-		if ((wlvif->band == IEEE80211_BAND_2GHZ) &&
+		if ((wlvif->band == NL80211_BAND_2GHZ) &&
 		    (wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) {
 			ret = wl1271_acx_bet_enable(wl, wlvif, false);
 			if (ret < 0)
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 34e7e93..c9bd294 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -64,9 +64,9 @@
 	memset(status, 0, sizeof(struct ieee80211_rx_status));
 
 	if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == WL1271_RX_DESC_BAND_BG)
-		status->band = IEEE80211_BAND_2GHZ;
+		status->band = NL80211_BAND_2GHZ;
 	else
-		status->band = IEEE80211_BAND_5GHZ;
+		status->band = NL80211_BAND_5GHZ;
 
 	status->rate_idx = wlcore_rate_to_idx(wl, desc->rate, status->band);
 
diff --git a/drivers/net/wireless/ti/wlcore/rx.h b/drivers/net/wireless/ti/wlcore/rx.h
index f5a7087c..57c05656 100644
--- a/drivers/net/wireless/ti/wlcore/rx.h
+++ b/drivers/net/wireless/ti/wlcore/rx.h
@@ -146,7 +146,7 @@
 } __packed;
 
 int wlcore_rx(struct wl1271 *wl, struct wl_fw_status *status);
-u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
+u8 wl1271_rate_to_idx(int rate, enum nl80211_band band);
 int wl1271_rx_filter_enable(struct wl1271 *wl,
 			    int index, bool enable,
 			    struct wl12xx_rx_filter *filter);
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index 1e3d51c..2334364 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -38,7 +38,7 @@
 	struct wl12xx_vif *wlvif;
 	int ret;
 
-	dwork = container_of(work, struct delayed_work, work);
+	dwork = to_delayed_work(work);
 	wl = container_of(dwork, struct wl1271, scan_complete_work);
 
 	wl1271_debug(DEBUG_SCAN, "Scanning complete");
@@ -164,7 +164,7 @@
 		struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
 		u32 delta_per_probe;
 
-		if (band == IEEE80211_BAND_5GHZ)
+		if (band == NL80211_BAND_5GHZ)
 			delta_per_probe = c->dwell_time_delta_per_probe_5;
 		else
 			delta_per_probe = c->dwell_time_delta_per_probe;
@@ -215,7 +215,7 @@
 			channels[j].channel = req_channels[i]->hw_value;
 
 			if (n_pactive_ch &&
-			    (band == IEEE80211_BAND_2GHZ) &&
+			    (band == NL80211_BAND_2GHZ) &&
 			    (channels[j].channel >= 12) &&
 			    (channels[j].channel <= 14) &&
 			    (flags & IEEE80211_CHAN_NO_IR) &&
@@ -266,7 +266,7 @@
 					 n_channels,
 					 n_ssids,
 					 cfg->channels_2,
-					 IEEE80211_BAND_2GHZ,
+					 NL80211_BAND_2GHZ,
 					 false, true, 0,
 					 MAX_CHANNELS_2GHZ,
 					 &n_pactive_ch,
@@ -277,7 +277,7 @@
 					 n_channels,
 					 n_ssids,
 					 cfg->channels_2,
-					 IEEE80211_BAND_2GHZ,
+					 NL80211_BAND_2GHZ,
 					 false, false,
 					 cfg->passive[0],
 					 MAX_CHANNELS_2GHZ,
@@ -289,7 +289,7 @@
 					 n_channels,
 					 n_ssids,
 					 cfg->channels_5,
-					 IEEE80211_BAND_5GHZ,
+					 NL80211_BAND_5GHZ,
 					 false, true, 0,
 					 wl->max_channels_5,
 					 &n_pactive_ch,
@@ -300,7 +300,7 @@
 					 n_channels,
 					 n_ssids,
 					 cfg->channels_5,
-					 IEEE80211_BAND_5GHZ,
+					 NL80211_BAND_5GHZ,
 					 true, true,
 					 cfg->passive[1],
 					 wl->max_channels_5,
@@ -312,7 +312,7 @@
 					 n_channels,
 					 n_ssids,
 					 cfg->channels_5,
-					 IEEE80211_BAND_5GHZ,
+					 NL80211_BAND_5GHZ,
 					 false, false,
 					 cfg->passive[1] + cfg->dfs,
 					 wl->max_channels_5,
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index f0ac361..c1b8e4e 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -453,7 +453,7 @@
 }
 
 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
-				enum ieee80211_band rate_band)
+				enum nl80211_band rate_band)
 {
 	struct ieee80211_supported_band *band;
 	u32 enabled_rates = 0;
diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 79cb3ff..e2ba62d 100644
--- a/drivers/net/wireless/ti/wlcore/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -246,9 +246,9 @@
 void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 void wl12xx_tx_reset(struct wl1271 *wl);
 void wl1271_tx_flush(struct wl1271 *wl);
-u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band);
+u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band);
 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
-				enum ieee80211_band rate_band);
+				enum nl80211_band rate_band);
 u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
 u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 		      struct sk_buff *skb, struct ieee80211_sta *sta);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 72c31a8..8f28aa0 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -342,7 +342,7 @@
 	struct wl12xx_vif *sched_vif;
 
 	/* The current band */
-	enum ieee80211_band band;
+	enum nl80211_band band;
 
 	struct completion *elp_compl;
 	struct delayed_work elp_work;
@@ -517,7 +517,7 @@
 			      struct wl1271_station *wl_sta, bool in_conn);
 
 static inline void
-wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band,
+wlcore_set_ht_cap(struct wl1271 *wl, enum nl80211_band band,
 		  struct ieee80211_sta_ht_cap *ht_cap)
 {
 	memcpy(&wl->ht_cap[band], ht_cap, sizeof(*ht_cap));
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index 27c5687..5c4199f 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -392,7 +392,7 @@
 	u8 ssid_len;
 
 	/* The current band */
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	int channel;
 	enum nl80211_channel_type channel_type;
 
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index d5c371d..13fd734 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1287,7 +1287,7 @@
 		printk(KERN_ERR "%s: Error %d resetting card on Tx timeout!\n",
 		       dev->name, rc);
 	else {
-		dev->trans_start = jiffies; /* prevent tx timeout */
+		netif_trans_update(dev); /* prevent tx timeout */
 		netif_wake_queue(dev);
 	}
 }
@@ -1454,7 +1454,7 @@
 	struct wl3501_card *this = netdev_priv(dev);
 
 	wrqu->freq.m = 100000 *
-		ieee80211_channel_to_frequency(this->chan, IEEE80211_BAND_2GHZ);
+		ieee80211_channel_to_frequency(this->chan, NL80211_BAND_2GHZ);
 	wrqu->freq.e = 1;
 	return 0;
 }
diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c
index 6f5c793..dea049b 100644
--- a/drivers/net/wireless/zydas/zd1201.c
+++ b/drivers/net/wireless/zydas/zd1201.c
@@ -845,7 +845,7 @@
 	usb_unlink_urb(zd->tx_urb);
 	dev->stats.tx_errors++;
 	/* Restart the timeout to quiet the watchdog: */
-	dev->trans_start = jiffies; /* prevent tx timeout */
+	netif_trans_update(dev); /* prevent tx timeout */
 }
 
 static int zd1201_set_mac_address(struct net_device *dev, void *p)
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
index e539d9b..3e37a04 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
@@ -1068,7 +1068,7 @@
 	}
 
 	stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq;
-	stats.band = IEEE80211_BAND_2GHZ;
+	stats.band = NL80211_BAND_2GHZ;
 	stats.signal = zd_check_signal(hw, status->signal_strength);
 
 	rate = zd_rx_rate(buffer, status);
@@ -1395,7 +1395,7 @@
 	mac->band.n_channels = ARRAY_SIZE(zd_channels);
 	mac->band.channels = mac->channels;
 
-	hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band;
+	hw->wiphy->bands[NL80211_BAND_2GHZ] = &mac->band;
 
 	ieee80211_hw_set(hw, MFP_CAPABLE);
 	ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
diff --git a/drivers/net/xen-netback/Makefile b/drivers/net/xen-netback/Makefile
index e346e81..11e02be 100644
--- a/drivers/net/xen-netback/Makefile
+++ b/drivers/net/xen-netback/Makefile
@@ -1,3 +1,3 @@
 obj-$(CONFIG_XEN_NETDEV_BACKEND) := xen-netback.o
 
-xen-netback-y := netback.o xenbus.o interface.o
+xen-netback-y := netback.o xenbus.o interface.o hash.o
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index f44b388..84d6cbd 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -220,6 +220,35 @@
 
 #define XEN_NETBK_MCAST_MAX 64
 
+#define XEN_NETBK_MAX_HASH_KEY_SIZE 40
+#define XEN_NETBK_MAX_HASH_MAPPING_SIZE 128
+#define XEN_NETBK_HASH_TAG_SIZE 40
+
+struct xenvif_hash_cache_entry {
+	struct list_head link;
+	struct rcu_head rcu;
+	u8 tag[XEN_NETBK_HASH_TAG_SIZE];
+	unsigned int len;
+	u32 val;
+	int seq;
+};
+
+struct xenvif_hash_cache {
+	spinlock_t lock;
+	struct list_head list;
+	unsigned int count;
+	atomic_t seq;
+};
+
+struct xenvif_hash {
+	unsigned int alg;
+	u32 flags;
+	u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE];
+	u32 mapping[XEN_NETBK_MAX_HASH_MAPPING_SIZE];
+	unsigned int size;
+	struct xenvif_hash_cache cache;
+};
+
 struct xenvif {
 	/* Unique identifier for this interface. */
 	domid_t          domid;
@@ -251,6 +280,8 @@
 	unsigned int num_queues; /* active queues, resource allocated */
 	unsigned int stalled_queues;
 
+	struct xenvif_hash hash;
+
 	struct xenbus_watch credit_watch;
 	struct xenbus_watch mcast_ctrl_watch;
 
@@ -260,6 +291,11 @@
 	struct dentry *xenvif_dbg_root;
 #endif
 
+	struct xen_netif_ctrl_back_ring ctrl;
+	struct task_struct *ctrl_task;
+	wait_queue_head_t ctrl_wq;
+	unsigned int ctrl_irq;
+
 	/* Miscellaneous private stuff. */
 	struct net_device *dev;
 };
@@ -285,10 +321,15 @@
 int xenvif_init_queue(struct xenvif_queue *queue);
 void xenvif_deinit_queue(struct xenvif_queue *queue);
 
-int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
-		   unsigned long rx_ring_ref, unsigned int tx_evtchn,
-		   unsigned int rx_evtchn);
-void xenvif_disconnect(struct xenvif *vif);
+int xenvif_connect_data(struct xenvif_queue *queue,
+			unsigned long tx_ring_ref,
+			unsigned long rx_ring_ref,
+			unsigned int tx_evtchn,
+			unsigned int rx_evtchn);
+void xenvif_disconnect_data(struct xenvif *vif);
+int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
+			unsigned int evtchn);
+void xenvif_disconnect_ctrl(struct xenvif *vif);
 void xenvif_free(struct xenvif *vif);
 
 int xenvif_xenbus_init(void);
@@ -300,10 +341,10 @@
 void xenvif_wake_queue(struct xenvif_queue *queue);
 
 /* (Un)Map communication rings. */
-void xenvif_unmap_frontend_rings(struct xenvif_queue *queue);
-int xenvif_map_frontend_rings(struct xenvif_queue *queue,
-			      grant_ref_t tx_ring_ref,
-			      grant_ref_t rx_ring_ref);
+void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue);
+int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
+				   grant_ref_t tx_ring_ref,
+				   grant_ref_t rx_ring_ref);
 
 /* Check for SKBs from frontend and schedule backend processing */
 void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue);
@@ -318,6 +359,8 @@
 
 int xenvif_dealloc_kthread(void *data);
 
+int xenvif_ctrl_kthread(void *data);
+
 void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
 
 void xenvif_carrier_on(struct xenvif *vif);
@@ -341,6 +384,7 @@
 extern unsigned int rx_drain_timeout_msecs;
 extern unsigned int rx_stall_timeout_msecs;
 extern unsigned int xenvif_max_queues;
+extern unsigned int xenvif_hash_cache_size;
 
 #ifdef CONFIG_DEBUG_FS
 extern struct dentry *xen_netback_dbg_root;
@@ -354,4 +398,18 @@
 bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr);
 void xenvif_mcast_addr_list_free(struct xenvif *vif);
 
+/* Hash */
+void xenvif_init_hash(struct xenvif *vif);
+void xenvif_deinit_hash(struct xenvif *vif);
+
+u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg);
+u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags);
+u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags);
+u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len);
+u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size);
+u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
+			    u32 off);
+
+void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb);
+
 #endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
new file mode 100644
index 0000000..392e392
--- /dev/null
+++ b/drivers/net/xen-netback/hash.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright (c) 2016 Citrix Systems Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Softare Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#define XEN_NETIF_DEFINE_TOEPLITZ
+
+#include "common.h"
+#include <linux/vmalloc.h>
+#include <linux/rculist.h>
+
+static void xenvif_del_hash(struct rcu_head *rcu)
+{
+	struct xenvif_hash_cache_entry *entry;
+
+	entry = container_of(rcu, struct xenvif_hash_cache_entry, rcu);
+
+	kfree(entry);
+}
+
+static void xenvif_add_hash(struct xenvif *vif, const u8 *tag,
+			    unsigned int len, u32 val)
+{
+	struct xenvif_hash_cache_entry *new, *entry, *oldest;
+	unsigned long flags;
+	bool found;
+
+	new = kmalloc(sizeof(*entry), GFP_KERNEL);
+	if (!new)
+		return;
+
+	memcpy(new->tag, tag, len);
+	new->len = len;
+	new->val = val;
+
+	spin_lock_irqsave(&vif->hash.cache.lock, flags);
+
+	found = false;
+	oldest = NULL;
+	list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
+		/* Make sure we don't add duplicate entries */
+		if (entry->len == len &&
+		    memcmp(entry->tag, tag, len) == 0)
+			found = true;
+		if (!oldest || entry->seq < oldest->seq)
+			oldest = entry;
+	}
+
+	if (!found) {
+		new->seq = atomic_inc_return(&vif->hash.cache.seq);
+		list_add_rcu(&new->link, &vif->hash.cache.list);
+
+		if (++vif->hash.cache.count > xenvif_hash_cache_size) {
+			list_del_rcu(&oldest->link);
+			vif->hash.cache.count--;
+			call_rcu(&oldest->rcu, xenvif_del_hash);
+		}
+	}
+
+	spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
+
+	if (found)
+		kfree(new);
+}
+
+static u32 xenvif_new_hash(struct xenvif *vif, const u8 *data,
+			   unsigned int len)
+{
+	u32 val;
+
+	val = xen_netif_toeplitz_hash(vif->hash.key,
+				      sizeof(vif->hash.key),
+				      data, len);
+
+	if (xenvif_hash_cache_size != 0)
+		xenvif_add_hash(vif, data, len, val);
+
+	return val;
+}
+
+static void xenvif_flush_hash(struct xenvif *vif)
+{
+	struct xenvif_hash_cache_entry *entry;
+	unsigned long flags;
+
+	if (xenvif_hash_cache_size == 0)
+		return;
+
+	spin_lock_irqsave(&vif->hash.cache.lock, flags);
+
+	list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
+		list_del_rcu(&entry->link);
+		vif->hash.cache.count--;
+		call_rcu(&entry->rcu, xenvif_del_hash);
+	}
+
+	spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
+}
+
+static u32 xenvif_find_hash(struct xenvif *vif, const u8 *data,
+			    unsigned int len)
+{
+	struct xenvif_hash_cache_entry *entry;
+	u32 val;
+	bool found;
+
+	if (len >= XEN_NETBK_HASH_TAG_SIZE)
+		return 0;
+
+	if (xenvif_hash_cache_size == 0)
+		return xenvif_new_hash(vif, data, len);
+
+	rcu_read_lock();
+
+	found = false;
+
+	list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
+		if (entry->len == len &&
+		    memcmp(entry->tag, data, len) == 0) {
+			val = entry->val;
+			entry->seq = atomic_inc_return(&vif->hash.cache.seq);
+			found = true;
+			break;
+		}
+	}
+
+	rcu_read_unlock();
+
+	if (!found)
+		val = xenvif_new_hash(vif, data, len);
+
+	return val;
+}
+
+void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb)
+{
+	struct flow_keys flow;
+	u32 hash = 0;
+	enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
+	u32 flags = vif->hash.flags;
+	bool has_tcp_hdr;
+
+	/* Quick rejection test: If the network protocol doesn't
+	 * correspond to any enabled hash type then there's no point
+	 * in parsing the packet header.
+	 */
+	switch (skb->protocol) {
+	case htons(ETH_P_IP):
+		if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
+			     XEN_NETIF_CTRL_HASH_TYPE_IPV4))
+			break;
+
+		goto done;
+
+	case htons(ETH_P_IPV6):
+		if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP |
+			     XEN_NETIF_CTRL_HASH_TYPE_IPV6))
+			break;
+
+		goto done;
+
+	default:
+		goto done;
+	}
+
+	memset(&flow, 0, sizeof(flow));
+	if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
+		goto done;
+
+	has_tcp_hdr = (flow.basic.ip_proto == IPPROTO_TCP) &&
+		      !(flow.control.flags & FLOW_DIS_IS_FRAGMENT);
+
+	switch (skb->protocol) {
+	case htons(ETH_P_IP):
+		if (has_tcp_hdr &&
+		    (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)) {
+			u8 data[12];
+
+			memcpy(&data[0], &flow.addrs.v4addrs.src, 4);
+			memcpy(&data[4], &flow.addrs.v4addrs.dst, 4);
+			memcpy(&data[8], &flow.ports.src, 2);
+			memcpy(&data[10], &flow.ports.dst, 2);
+
+			hash = xenvif_find_hash(vif, data, sizeof(data));
+			type = PKT_HASH_TYPE_L4;
+		} else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4) {
+			u8 data[8];
+
+			memcpy(&data[0], &flow.addrs.v4addrs.src, 4);
+			memcpy(&data[4], &flow.addrs.v4addrs.dst, 4);
+
+			hash = xenvif_find_hash(vif, data, sizeof(data));
+			type = PKT_HASH_TYPE_L3;
+		}
+
+		break;
+
+	case htons(ETH_P_IPV6):
+		if (has_tcp_hdr &&
+		    (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)) {
+			u8 data[36];
+
+			memcpy(&data[0], &flow.addrs.v6addrs.src, 16);
+			memcpy(&data[16], &flow.addrs.v6addrs.dst, 16);
+			memcpy(&data[32], &flow.ports.src, 2);
+			memcpy(&data[34], &flow.ports.dst, 2);
+
+			hash = xenvif_find_hash(vif, data, sizeof(data));
+			type = PKT_HASH_TYPE_L4;
+		} else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6) {
+			u8 data[32];
+
+			memcpy(&data[0], &flow.addrs.v6addrs.src, 16);
+			memcpy(&data[16], &flow.addrs.v6addrs.dst, 16);
+
+			hash = xenvif_find_hash(vif, data, sizeof(data));
+			type = PKT_HASH_TYPE_L3;
+		}
+
+		break;
+	}
+
+done:
+	if (type == PKT_HASH_TYPE_NONE)
+		skb_clear_hash(skb);
+	else
+		__skb_set_sw_hash(skb, hash, type == PKT_HASH_TYPE_L4);
+}
+
+u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg)
+{
+	switch (alg) {
+	case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
+	case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
+		break;
+
+	default:
+		return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+	}
+
+	vif->hash.alg = alg;
+
+	return XEN_NETIF_CTRL_STATUS_SUCCESS;
+}
+
+u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags)
+{
+	if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
+		return XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
+
+	*flags = XEN_NETIF_CTRL_HASH_TYPE_IPV4 |
+		 XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
+		 XEN_NETIF_CTRL_HASH_TYPE_IPV6 |
+		 XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
+
+	return XEN_NETIF_CTRL_STATUS_SUCCESS;
+}
+
+u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags)
+{
+	if (flags & ~(XEN_NETIF_CTRL_HASH_TYPE_IPV4 |
+		      XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
+		      XEN_NETIF_CTRL_HASH_TYPE_IPV6 |
+		      XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP))
+		return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+	if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
+		return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+	vif->hash.flags = flags;
+
+	return XEN_NETIF_CTRL_STATUS_SUCCESS;
+}
+
+u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len)
+{
+	u8 *key = vif->hash.key;
+	struct gnttab_copy copy_op = {
+		.source.u.ref = gref,
+		.source.domid = vif->domid,
+		.dest.u.gmfn = virt_to_gfn(key),
+		.dest.domid = DOMID_SELF,
+		.dest.offset = xen_offset_in_page(key),
+		.len = len,
+		.flags = GNTCOPY_source_gref
+	};
+
+	if (len > XEN_NETBK_MAX_HASH_KEY_SIZE)
+		return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+	if (len != 0) {
+		gnttab_batch_copy(&copy_op, 1);
+
+		if (copy_op.status != GNTST_okay)
+			return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+	}
+
+	/* Clear any remaining key octets */
+	if (len < XEN_NETBK_MAX_HASH_KEY_SIZE)
+		memset(key + len, 0, XEN_NETBK_MAX_HASH_KEY_SIZE - len);
+
+	xenvif_flush_hash(vif);
+
+	return XEN_NETIF_CTRL_STATUS_SUCCESS;
+}
+
+u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
+{
+	if (size > XEN_NETBK_MAX_HASH_MAPPING_SIZE)
+		return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+	vif->hash.size = size;
+	memset(vif->hash.mapping, 0, sizeof(u32) * size);
+
+	return XEN_NETIF_CTRL_STATUS_SUCCESS;
+}
+
+u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
+			    u32 off)
+{
+	u32 *mapping = &vif->hash.mapping[off];
+	struct gnttab_copy copy_op = {
+		.source.u.ref = gref,
+		.source.domid = vif->domid,
+		.dest.u.gmfn = virt_to_gfn(mapping),
+		.dest.domid = DOMID_SELF,
+		.dest.offset = xen_offset_in_page(mapping),
+		.len = len * sizeof(u32),
+		.flags = GNTCOPY_source_gref
+	};
+
+	if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE)
+		return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+	while (len-- != 0)
+		if (mapping[off++] >= vif->num_queues)
+			return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+	if (len != 0) {
+		gnttab_batch_copy(&copy_op, 1);
+
+		if (copy_op.status != GNTST_okay)
+			return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+	}
+
+	return XEN_NETIF_CTRL_STATUS_SUCCESS;
+}
+
+void xenvif_init_hash(struct xenvif *vif)
+{
+	if (xenvif_hash_cache_size == 0)
+		return;
+
+	spin_lock_init(&vif->hash.cache.lock);
+	INIT_LIST_HEAD(&vif->hash.cache.list);
+}
+
+void xenvif_deinit_hash(struct xenvif *vif)
+{
+	xenvif_flush_hash(vif);
+}
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index f5231a2..1c7f49b 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -128,6 +128,15 @@
 	return IRQ_HANDLED;
 }
 
+irqreturn_t xenvif_ctrl_interrupt(int irq, void *dev_id)
+{
+	struct xenvif *vif = dev_id;
+
+	wake_up(&vif->ctrl_wq);
+
+	return IRQ_HANDLED;
+}
+
 int xenvif_queue_stopped(struct xenvif_queue *queue)
 {
 	struct net_device *dev = queue->vif->dev;
@@ -142,6 +151,33 @@
 	netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
 }
 
+static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
+			       void *accel_priv,
+			       select_queue_fallback_t fallback)
+{
+	struct xenvif *vif = netdev_priv(dev);
+	unsigned int size = vif->hash.size;
+
+	if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) {
+		u16 index = fallback(dev, skb) % dev->real_num_tx_queues;
+
+		/* Make sure there is no hash information in the socket
+		 * buffer otherwise it would be incorrectly forwarded
+		 * to the frontend.
+		 */
+		skb_clear_hash(skb);
+
+		return index;
+	}
+
+	xenvif_set_skb_hash(vif, skb);
+
+	if (size == 0)
+		return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
+
+	return vif->hash.mapping[skb_get_hash_raw(skb) % size];
+}
+
 static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct xenvif *vif = netdev_priv(dev);
@@ -386,6 +422,7 @@
 };
 
 static const struct net_device_ops xenvif_netdev_ops = {
+	.ndo_select_queue = xenvif_select_queue,
 	.ndo_start_xmit	= xenvif_start_xmit,
 	.ndo_get_stats	= xenvif_get_stats,
 	.ndo_open	= xenvif_open,
@@ -527,9 +564,69 @@
 	rtnl_unlock();
 }
 
-int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
-		   unsigned long rx_ring_ref, unsigned int tx_evtchn,
-		   unsigned int rx_evtchn)
+int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
+			unsigned int evtchn)
+{
+	struct net_device *dev = vif->dev;
+	void *addr;
+	struct xen_netif_ctrl_sring *shared;
+	struct task_struct *task;
+	int err = -ENOMEM;
+
+	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
+				     &ring_ref, 1, &addr);
+	if (err)
+		goto err;
+
+	shared = (struct xen_netif_ctrl_sring *)addr;
+	BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE);
+
+	init_waitqueue_head(&vif->ctrl_wq);
+
+	err = bind_interdomain_evtchn_to_irqhandler(vif->domid, evtchn,
+						    xenvif_ctrl_interrupt,
+						    0, dev->name, vif);
+	if (err < 0)
+		goto err_unmap;
+
+	vif->ctrl_irq = err;
+
+	xenvif_init_hash(vif);
+
+	task = kthread_create(xenvif_ctrl_kthread, (void *)vif,
+			      "%s-control", dev->name);
+	if (IS_ERR(task)) {
+		pr_warn("Could not allocate kthread for %s\n", dev->name);
+		err = PTR_ERR(task);
+		goto err_deinit;
+	}
+
+	get_task_struct(task);
+	vif->ctrl_task = task;
+
+	wake_up_process(vif->ctrl_task);
+
+	return 0;
+
+err_deinit:
+	xenvif_deinit_hash(vif);
+	unbind_from_irqhandler(vif->ctrl_irq, vif);
+	vif->ctrl_irq = 0;
+
+err_unmap:
+	xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
+				vif->ctrl.sring);
+	vif->ctrl.sring = NULL;
+
+err:
+	return err;
+}
+
+int xenvif_connect_data(struct xenvif_queue *queue,
+			unsigned long tx_ring_ref,
+			unsigned long rx_ring_ref,
+			unsigned int tx_evtchn,
+			unsigned int rx_evtchn)
 {
 	struct task_struct *task;
 	int err = -ENOMEM;
@@ -538,7 +635,8 @@
 	BUG_ON(queue->task);
 	BUG_ON(queue->dealloc_task);
 
-	err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref);
+	err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
+					     rx_ring_ref);
 	if (err < 0)
 		goto err;
 
@@ -614,7 +712,7 @@
 	unbind_from_irqhandler(queue->tx_irq, queue);
 	queue->tx_irq = 0;
 err_unmap:
-	xenvif_unmap_frontend_rings(queue);
+	xenvif_unmap_frontend_data_rings(queue);
 	netif_napi_del(&queue->napi);
 err:
 	module_put(THIS_MODULE);
@@ -634,7 +732,7 @@
 	rtnl_unlock();
 }
 
-void xenvif_disconnect(struct xenvif *vif)
+void xenvif_disconnect_data(struct xenvif *vif)
 {
 	struct xenvif_queue *queue = NULL;
 	unsigned int num_queues = vif->num_queues;
@@ -668,12 +766,34 @@
 			queue->tx_irq = 0;
 		}
 
-		xenvif_unmap_frontend_rings(queue);
+		xenvif_unmap_frontend_data_rings(queue);
 	}
 
 	xenvif_mcast_addr_list_free(vif);
 }
 
+void xenvif_disconnect_ctrl(struct xenvif *vif)
+{
+	if (vif->ctrl_task) {
+		kthread_stop(vif->ctrl_task);
+		put_task_struct(vif->ctrl_task);
+		vif->ctrl_task = NULL;
+	}
+
+	xenvif_deinit_hash(vif);
+
+	if (vif->ctrl_irq) {
+		unbind_from_irqhandler(vif->ctrl_irq, vif);
+		vif->ctrl_irq = 0;
+	}
+
+	if (vif->ctrl.sring) {
+		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
+					vif->ctrl.sring);
+		vif->ctrl.sring = NULL;
+	}
+}
+
 /* Reverse the relevant parts of xenvif_init_queue().
  * Used for queue teardown from xenvif_free(), and on the
  * error handling paths in xenbus.c:connect().
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index b42f260..edbae0b 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -89,6 +89,11 @@
  */
 #define XEN_NETBACK_TX_COPY_LEN 128
 
+/* This is the maximum number of flows in the hash cache. */
+#define XENVIF_HASH_CACHE_SIZE_DEFAULT 64
+unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT;
+module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
+MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
 
 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
 			       u8 status);
@@ -163,6 +168,8 @@
 	needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
 	if (skb_is_gso(skb))
 		needed++;
+	if (skb->sw_hash)
+		needed++;
 
 	do {
 		prod = queue->rx.sring->req_prod;
@@ -280,6 +287,8 @@
 	struct xenvif_rx_meta *meta;
 	int head;
 	int gso_type;
+	int protocol;
+	int hash_present;
 
 	struct page *page;
 };
@@ -326,8 +335,15 @@
 	npo->copy_off += *len;
 	info->meta->size += *len;
 
+	if (!info->head)
+		return;
+
 	/* Leave a gap for the GSO descriptor. */
-	if (info->head && ((1 << info->gso_type) & queue->vif->gso_mask))
+	if ((1 << info->gso_type) & queue->vif->gso_mask)
+		queue->rx.req_cons++;
+
+	/* Leave a gap for the hash extra segment. */
+	if (info->hash_present)
 		queue->rx.req_cons++;
 
 	info->head = 0; /* There must be something in this buffer now */
@@ -362,6 +378,11 @@
 		.npo = npo,
 		.head = *head,
 		.gso_type = XEN_NETIF_GSO_TYPE_NONE,
+		/* xenvif_set_skb_hash() will have either set a s/w
+		 * hash or cleared the hash depending on
+		 * whether the the frontend wants a hash for this skb.
+		 */
+		.hash_present = skb->sw_hash,
 	};
 	unsigned long bytes;
 
@@ -550,6 +571,7 @@
 
 static void xenvif_rx_action(struct xenvif_queue *queue)
 {
+	struct xenvif *vif = queue->vif;
 	s8 status;
 	u16 flags;
 	struct xen_netif_rx_response *resp;
@@ -585,9 +607,10 @@
 	gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
 
 	while ((skb = __skb_dequeue(&rxq)) != NULL) {
+		struct xen_netif_extra_info *extra = NULL;
 
 		if ((1 << queue->meta[npo.meta_cons].gso_type) &
-		    queue->vif->gso_prefix_mask) {
+		    vif->gso_prefix_mask) {
 			resp = RING_GET_RESPONSE(&queue->rx,
 						 queue->rx.rsp_prod_pvt++);
 
@@ -605,7 +628,7 @@
 		queue->stats.tx_bytes += skb->len;
 		queue->stats.tx_packets++;
 
-		status = xenvif_check_gop(queue->vif,
+		status = xenvif_check_gop(vif,
 					  XENVIF_RX_CB(skb)->meta_slots_used,
 					  &npo);
 
@@ -627,21 +650,57 @@
 					flags);
 
 		if ((1 << queue->meta[npo.meta_cons].gso_type) &
-		    queue->vif->gso_mask) {
-			struct xen_netif_extra_info *gso =
-				(struct xen_netif_extra_info *)
+		    vif->gso_mask) {
+			extra = (struct xen_netif_extra_info *)
 				RING_GET_RESPONSE(&queue->rx,
 						  queue->rx.rsp_prod_pvt++);
 
 			resp->flags |= XEN_NETRXF_extra_info;
 
-			gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
-			gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
-			gso->u.gso.pad = 0;
-			gso->u.gso.features = 0;
+			extra->u.gso.type = queue->meta[npo.meta_cons].gso_type;
+			extra->u.gso.size = queue->meta[npo.meta_cons].gso_size;
+			extra->u.gso.pad = 0;
+			extra->u.gso.features = 0;
 
-			gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
-			gso->flags = 0;
+			extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
+			extra->flags = 0;
+		}
+
+		if (skb->sw_hash) {
+			/* Since the skb got here via xenvif_select_queue()
+			 * we know that the hash has been re-calculated
+			 * according to a configuration set by the frontend
+			 * and therefore we know that it is legitimate to
+			 * pass it to the frontend.
+			 */
+			if (resp->flags & XEN_NETRXF_extra_info)
+				extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
+			else
+				resp->flags |= XEN_NETRXF_extra_info;
+
+			extra = (struct xen_netif_extra_info *)
+				RING_GET_RESPONSE(&queue->rx,
+						  queue->rx.rsp_prod_pvt++);
+
+			extra->u.hash.algorithm =
+				XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
+
+			if (skb->l4_hash)
+				extra->u.hash.type =
+					skb->protocol == htons(ETH_P_IP) ?
+					_XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
+					_XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
+			else
+				extra->u.hash.type =
+					skb->protocol == htons(ETH_P_IP) ?
+					_XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
+					_XEN_NETIF_CTRL_HASH_TYPE_IPV6;
+
+			*(uint32_t *)extra->u.hash.value =
+				skb_get_hash_raw(skb);
+
+			extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
+			extra->flags = 0;
 		}
 
 		xenvif_add_frag_responses(queue, status,
@@ -711,6 +770,7 @@
 		if (cons == end)
 			break;
 		RING_COPY_REQUEST(&queue->tx, cons++, txp);
+		extra_count = 0; /* only the first frag can have extras */
 	} while (1);
 	queue->tx.req_cons = cons;
 }
@@ -1450,6 +1510,33 @@
 			}
 		}
 
+		if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) {
+			struct xen_netif_extra_info *extra;
+			enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
+
+			extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
+
+			switch (extra->u.hash.type) {
+			case _XEN_NETIF_CTRL_HASH_TYPE_IPV4:
+			case _XEN_NETIF_CTRL_HASH_TYPE_IPV6:
+				type = PKT_HASH_TYPE_L3;
+				break;
+
+			case _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP:
+			case _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP:
+				type = PKT_HASH_TYPE_L4;
+				break;
+
+			default:
+				break;
+			}
+
+			if (type != PKT_HASH_TYPE_NONE)
+				skb_set_hash(skb,
+					     *(u32 *)extra->u.hash.value,
+					     type);
+		}
+
 		XENVIF_TX_CB(skb)->pending_idx = pending_idx;
 
 		__skb_put(skb, data_len);
@@ -1925,7 +2012,7 @@
 	return queue->dealloc_cons != queue->dealloc_prod;
 }
 
-void xenvif_unmap_frontend_rings(struct xenvif_queue *queue)
+void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue)
 {
 	if (queue->tx.sring)
 		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
@@ -1935,9 +2022,9 @@
 					queue->rx.sring);
 }
 
-int xenvif_map_frontend_rings(struct xenvif_queue *queue,
-			      grant_ref_t tx_ring_ref,
-			      grant_ref_t rx_ring_ref)
+int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
+				   grant_ref_t tx_ring_ref,
+				   grant_ref_t rx_ring_ref)
 {
 	void *addr;
 	struct xen_netif_tx_sring *txs;
@@ -1964,7 +2051,7 @@
 	return 0;
 
 err:
-	xenvif_unmap_frontend_rings(queue);
+	xenvif_unmap_frontend_data_rings(queue);
 	return err;
 }
 
@@ -2163,6 +2250,135 @@
 	return 0;
 }
 
+static void make_ctrl_response(struct xenvif *vif,
+			       const struct xen_netif_ctrl_request *req,
+			       u32 status, u32 data)
+{
+	RING_IDX idx = vif->ctrl.rsp_prod_pvt;
+	struct xen_netif_ctrl_response rsp = {
+		.id = req->id,
+		.type = req->type,
+		.status = status,
+		.data = data,
+	};
+
+	*RING_GET_RESPONSE(&vif->ctrl, idx) = rsp;
+	vif->ctrl.rsp_prod_pvt = ++idx;
+}
+
+static void push_ctrl_response(struct xenvif *vif)
+{
+	int notify;
+
+	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify);
+	if (notify)
+		notify_remote_via_irq(vif->ctrl_irq);
+}
+
+static void process_ctrl_request(struct xenvif *vif,
+				 const struct xen_netif_ctrl_request *req)
+{
+	u32 status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
+	u32 data = 0;
+
+	switch (req->type) {
+	case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM:
+		status = xenvif_set_hash_alg(vif, req->data[0]);
+		break;
+
+	case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS:
+		status = xenvif_get_hash_flags(vif, &data);
+		break;
+
+	case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS:
+		status = xenvif_set_hash_flags(vif, req->data[0]);
+		break;
+
+	case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY:
+		status = xenvif_set_hash_key(vif, req->data[0],
+					     req->data[1]);
+		break;
+
+	case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE:
+		status = XEN_NETIF_CTRL_STATUS_SUCCESS;
+		data = XEN_NETBK_MAX_HASH_MAPPING_SIZE;
+		break;
+
+	case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE:
+		status = xenvif_set_hash_mapping_size(vif,
+						      req->data[0]);
+		break;
+
+	case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING:
+		status = xenvif_set_hash_mapping(vif, req->data[0],
+						 req->data[1],
+						 req->data[2]);
+		break;
+
+	default:
+		break;
+	}
+
+	make_ctrl_response(vif, req, status, data);
+	push_ctrl_response(vif);
+}
+
+static void xenvif_ctrl_action(struct xenvif *vif)
+{
+	for (;;) {
+		RING_IDX req_prod, req_cons;
+
+		req_prod = vif->ctrl.sring->req_prod;
+		req_cons = vif->ctrl.req_cons;
+
+		/* Make sure we can see requests before we process them. */
+		rmb();
+
+		if (req_cons == req_prod)
+			break;
+
+		while (req_cons != req_prod) {
+			struct xen_netif_ctrl_request req;
+
+			RING_COPY_REQUEST(&vif->ctrl, req_cons, &req);
+			req_cons++;
+
+			process_ctrl_request(vif, &req);
+		}
+
+		vif->ctrl.req_cons = req_cons;
+		vif->ctrl.sring->req_event = req_cons + 1;
+	}
+}
+
+static bool xenvif_ctrl_work_todo(struct xenvif *vif)
+{
+	if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl)))
+		return 1;
+
+	return 0;
+}
+
+int xenvif_ctrl_kthread(void *data)
+{
+	struct xenvif *vif = data;
+
+	for (;;) {
+		wait_event_interruptible(vif->ctrl_wq,
+					 xenvif_ctrl_work_todo(vif) ||
+					 kthread_should_stop());
+		if (kthread_should_stop())
+			break;
+
+		while (xenvif_ctrl_work_todo(vif))
+			xenvif_ctrl_action(vif);
+
+		cond_resched();
+	}
+
+	return 0;
+}
+
 static int __init netback_init(void)
 {
 	int rc = 0;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index bd182cd..6a31f26 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -38,7 +38,8 @@
 	const char *hotplug_script;
 };
 
-static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
+static int connect_data_rings(struct backend_info *be,
+			      struct xenvif_queue *queue);
 static void connect(struct backend_info *be);
 static int read_xenbus_vif_flags(struct backend_info *be);
 static int backend_create_xenvif(struct backend_info *be);
@@ -367,6 +368,12 @@
 	if (err)
 		pr_debug("Error writing multi-queue-max-queues\n");
 
+	err = xenbus_printf(XBT_NIL, dev->nodename,
+			    "feature-ctrl-ring",
+			    "%u", true);
+	if (err)
+		pr_debug("Error writing feature-ctrl-ring\n");
+
 	script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
 	if (IS_ERR(script)) {
 		err = PTR_ERR(script);
@@ -457,7 +464,8 @@
 #ifdef CONFIG_DEBUG_FS
 		xenvif_debugfs_delif(be->vif);
 #endif /* CONFIG_DEBUG_FS */
-		xenvif_disconnect(be->vif);
+		xenvif_disconnect_data(be->vif);
+		xenvif_disconnect_ctrl(be->vif);
 	}
 }
 
@@ -825,6 +833,48 @@
 	kfree(str);
 }
 
+static int connect_ctrl_ring(struct backend_info *be)
+{
+	struct xenbus_device *dev = be->dev;
+	struct xenvif *vif = be->vif;
+	unsigned int val;
+	grant_ref_t ring_ref;
+	unsigned int evtchn;
+	int err;
+
+	err = xenbus_gather(XBT_NIL, dev->otherend,
+			    "ctrl-ring-ref", "%u", &val, NULL);
+	if (err)
+		goto done; /* The frontend does not have a control ring */
+
+	ring_ref = val;
+
+	err = xenbus_gather(XBT_NIL, dev->otherend,
+			    "event-channel-ctrl", "%u", &val, NULL);
+	if (err) {
+		xenbus_dev_fatal(dev, err,
+				 "reading %s/event-channel-ctrl",
+				 dev->otherend);
+		goto fail;
+	}
+
+	evtchn = val;
+
+	err = xenvif_connect_ctrl(vif, ring_ref, evtchn);
+	if (err) {
+		xenbus_dev_fatal(dev, err,
+				 "mapping shared-frame %u port %u",
+				 ring_ref, evtchn);
+		goto fail;
+	}
+
+done:
+	return 0;
+
+fail:
+	return err;
+}
+
 static void connect(struct backend_info *be)
 {
 	int err;
@@ -861,6 +911,12 @@
 	xen_register_watchers(dev, be->vif);
 	read_xenbus_vif_flags(be);
 
+	err = connect_ctrl_ring(be);
+	if (err) {
+		xenbus_dev_fatal(dev, err, "connecting control ring");
+		return;
+	}
+
 	/* Use the number of queues requested by the frontend */
 	be->vif->queues = vzalloc(requested_num_queues *
 				  sizeof(struct xenvif_queue));
@@ -896,11 +952,12 @@
 		queue->remaining_credit = credit_bytes;
 		queue->credit_usec = credit_usec;
 
-		err = connect_rings(be, queue);
+		err = connect_data_rings(be, queue);
 		if (err) {
-			/* connect_rings() cleans up after itself on failure,
-			 * but we need to clean up after xenvif_init_queue() here,
-			 * and also clean up any previously initialised queues.
+			/* connect_data_rings() cleans up after itself on
+			 * failure, but we need to clean up after
+			 * xenvif_init_queue() here, and also clean up any
+			 * previously initialised queues.
 			 */
 			xenvif_deinit_queue(queue);
 			be->vif->num_queues = queue_index;
@@ -935,15 +992,17 @@
 
 err:
 	if (be->vif->num_queues > 0)
-		xenvif_disconnect(be->vif); /* Clean up existing queues */
+		xenvif_disconnect_data(be->vif); /* Clean up existing queues */
 	vfree(be->vif->queues);
 	be->vif->queues = NULL;
 	be->vif->num_queues = 0;
+	xenvif_disconnect_ctrl(be->vif);
 	return;
 }
 
 
-static int connect_rings(struct backend_info *be, struct xenvif_queue *queue)
+static int connect_data_rings(struct backend_info *be,
+			      struct xenvif_queue *queue)
 {
 	struct xenbus_device *dev = be->dev;
 	unsigned int num_queues = queue->vif->num_queues;
@@ -1007,8 +1066,8 @@
 	}
 
 	/* Map the shared frame, irq etc. */
-	err = xenvif_connect(queue, tx_ring_ref, rx_ring_ref,
-			     tx_evtchn, rx_evtchn);
+	err = xenvif_connect_data(queue, tx_ring_ref, rx_ring_ref,
+				  tx_evtchn, rx_evtchn);
 	if (err) {
 		xenbus_dev_fatal(dev, err,
 				 "mapping shared-frames %lu/%lu port tx %u rx %u",
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index 7437c9d..ea8321a 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -5,16 +5,6 @@
 menu "Near Field Communication (NFC) devices"
 	depends on NFC
 
-config NFC_PN533
-	tristate "NXP PN533 USB driver"
-	depends on USB
-	help
-	  NXP PN533 USB driver.
-	  This driver provides support for NFC NXP PN533 devices.
-
-	  Say Y here to compile support for PN533 devices into the
-	  kernel or say M to compile it as module (pn533).
-
 config NFC_WILINK
 	tristate "Texas Instruments NFC WiLink driver"
 	depends on TI_ST && NFC_NCI
@@ -70,6 +60,7 @@
 
 source "drivers/nfc/fdp/Kconfig"
 source "drivers/nfc/pn544/Kconfig"
+source "drivers/nfc/pn533/Kconfig"
 source "drivers/nfc/microread/Kconfig"
 source "drivers/nfc/nfcmrvl/Kconfig"
 source "drivers/nfc/st21nfca/Kconfig"
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index 0a99e67..bab8ef0 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -5,7 +5,7 @@
 obj-$(CONFIG_NFC_FDP)		+= fdp/
 obj-$(CONFIG_NFC_PN544)		+= pn544/
 obj-$(CONFIG_NFC_MICROREAD)	+= microread/
-obj-$(CONFIG_NFC_PN533)		+= pn533.o
+obj-$(CONFIG_NFC_PN533)		+= pn533/
 obj-$(CONFIG_NFC_WILINK)	+= nfcwilink.o
 obj-$(CONFIG_NFC_MEI_PHY)	+= mei_phy.o
 obj-$(CONFIG_NFC_SIM)		+= nfcsim.o
diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c
index ccb07a1..e44a7a2 100644
--- a/drivers/nfc/fdp/fdp.c
+++ b/drivers/nfc/fdp/fdp.c
@@ -102,7 +102,8 @@
 	if (r)
 		return r;
 
-	return nci_get_conn_info_by_id(ndev, 0);
+	return nci_get_conn_info_by_dest_type_params(ndev,
+						     FDP_PATCH_CONN_DEST, NULL);
 }
 
 static inline int fdp_nci_get_versions(struct nci_dev *ndev)
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index 11520f4..36099e5 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -418,7 +418,6 @@
 static struct i2c_driver nxp_nci_i2c_driver = {
 	.driver = {
 		   .name = NXP_NCI_I2C_DRIVER_NAME,
-		   .owner  = THIS_MODULE,
 		   .acpi_match_table = ACPI_PTR(acpi_id),
 		   .of_match_table = of_match_ptr(of_nxp_nci_i2c_match),
 		  },
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
deleted file mode 100644
index bb3d5ea..0000000
--- a/drivers/nfc/pn533.c
+++ /dev/null
@@ -1,3313 +0,0 @@
-/*
- * Copyright (C) 2011 Instituto Nokia de Tecnologia
- * Copyright (C) 2012-2013 Tieto Poland
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/usb.h>
-#include <linux/nfc.h>
-#include <linux/netdevice.h>
-#include <net/nfc/nfc.h>
-
-#define VERSION "0.2"
-
-#define PN533_VENDOR_ID 0x4CC
-#define PN533_PRODUCT_ID 0x2533
-
-#define SCM_VENDOR_ID 0x4E6
-#define SCL3711_PRODUCT_ID 0x5591
-
-#define SONY_VENDOR_ID         0x054c
-#define PASORI_PRODUCT_ID      0x02e1
-
-#define ACS_VENDOR_ID 0x072f
-#define ACR122U_PRODUCT_ID 0x2200
-
-#define PN533_DEVICE_STD     0x1
-#define PN533_DEVICE_PASORI  0x2
-#define PN533_DEVICE_ACR122U 0x3
-
-#define PN533_ALL_PROTOCOLS (NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK |\
-			     NFC_PROTO_FELICA_MASK | NFC_PROTO_ISO14443_MASK |\
-			     NFC_PROTO_NFC_DEP_MASK |\
-			     NFC_PROTO_ISO14443_B_MASK)
-
-#define PN533_NO_TYPE_B_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \
-				   NFC_PROTO_MIFARE_MASK | \
-				   NFC_PROTO_FELICA_MASK | \
-				   NFC_PROTO_ISO14443_MASK | \
-				   NFC_PROTO_NFC_DEP_MASK)
-
-static const struct usb_device_id pn533_table[] = {
-	{ USB_DEVICE(PN533_VENDOR_ID, PN533_PRODUCT_ID),
-	  .driver_info = PN533_DEVICE_STD },
-	{ USB_DEVICE(SCM_VENDOR_ID, SCL3711_PRODUCT_ID),
-	  .driver_info = PN533_DEVICE_STD },
-	{ USB_DEVICE(SONY_VENDOR_ID, PASORI_PRODUCT_ID),
-	  .driver_info = PN533_DEVICE_PASORI },
-	{ USB_DEVICE(ACS_VENDOR_ID, ACR122U_PRODUCT_ID),
-	  .driver_info = PN533_DEVICE_ACR122U },
-	{ }
-};
-MODULE_DEVICE_TABLE(usb, pn533_table);
-
-/* How much time we spend listening for initiators */
-#define PN533_LISTEN_TIME 2
-/* Delay between each poll frame (ms) */
-#define PN533_POLL_INTERVAL 10
-
-/* Standard pn533 frame definitions (standard and extended)*/
-#define PN533_STD_FRAME_HEADER_LEN (sizeof(struct pn533_std_frame) \
-					+ 2) /* data[0] TFI, data[1] CC */
-#define PN533_STD_FRAME_TAIL_LEN 2 /* data[len] DCS, data[len + 1] postamble*/
-
-#define PN533_EXT_FRAME_HEADER_LEN (sizeof(struct pn533_ext_frame) \
-					+ 2) /* data[0] TFI, data[1] CC */
-
-#define PN533_CMD_DATAEXCH_DATA_MAXLEN	262
-#define PN533_CMD_DATAFRAME_MAXLEN	240	/* max data length (send) */
-
-/*
- * Max extended frame payload len, excluding TFI and CC
- * which are already in PN533_FRAME_HEADER_LEN.
- */
-#define PN533_STD_FRAME_MAX_PAYLOAD_LEN 263
-
-#define PN533_STD_FRAME_ACK_SIZE 6 /* Preamble (1), SoPC (2), ACK Code (2),
-				  Postamble (1) */
-#define PN533_STD_FRAME_CHECKSUM(f) (f->data[f->datalen])
-#define PN533_STD_FRAME_POSTAMBLE(f) (f->data[f->datalen + 1])
-/* Half start code (3), LEN (4) should be 0xffff for extended frame */
-#define PN533_STD_IS_EXTENDED(hdr) ((hdr)->datalen == 0xFF \
-					&& (hdr)->datalen_checksum == 0xFF)
-#define PN533_EXT_FRAME_CHECKSUM(f) (f->data[be16_to_cpu(f->datalen)])
-
-/* start of frame */
-#define PN533_STD_FRAME_SOF 0x00FF
-
-/* standard frame identifier: in/out/error */
-#define PN533_STD_FRAME_IDENTIFIER(f) (f->data[0]) /* TFI */
-#define PN533_STD_FRAME_DIR_OUT 0xD4
-#define PN533_STD_FRAME_DIR_IN 0xD5
-
-/* ACS ACR122 pn533 frame definitions */
-#define PN533_ACR122_TX_FRAME_HEADER_LEN (sizeof(struct pn533_acr122_tx_frame) \
-					  + 2)
-#define PN533_ACR122_TX_FRAME_TAIL_LEN 0
-#define PN533_ACR122_RX_FRAME_HEADER_LEN (sizeof(struct pn533_acr122_rx_frame) \
-					  + 2)
-#define PN533_ACR122_RX_FRAME_TAIL_LEN 2
-#define PN533_ACR122_FRAME_MAX_PAYLOAD_LEN PN533_STD_FRAME_MAX_PAYLOAD_LEN
-
-/* CCID messages types */
-#define PN533_ACR122_PC_TO_RDR_ICCPOWERON 0x62
-#define PN533_ACR122_PC_TO_RDR_ESCAPE 0x6B
-
-#define PN533_ACR122_RDR_TO_PC_ESCAPE 0x83
-
-/* PN533 Commands */
-#define PN533_FRAME_CMD(f) (f->data[1])
-
-#define PN533_CMD_GET_FIRMWARE_VERSION 0x02
-#define PN533_CMD_RF_CONFIGURATION 0x32
-#define PN533_CMD_IN_DATA_EXCHANGE 0x40
-#define PN533_CMD_IN_COMM_THRU     0x42
-#define PN533_CMD_IN_LIST_PASSIVE_TARGET 0x4A
-#define PN533_CMD_IN_ATR 0x50
-#define PN533_CMD_IN_RELEASE 0x52
-#define PN533_CMD_IN_JUMP_FOR_DEP 0x56
-
-#define PN533_CMD_TG_INIT_AS_TARGET 0x8c
-#define PN533_CMD_TG_GET_DATA 0x86
-#define PN533_CMD_TG_SET_DATA 0x8e
-#define PN533_CMD_TG_SET_META_DATA 0x94
-#define PN533_CMD_UNDEF 0xff
-
-#define PN533_CMD_RESPONSE(cmd) (cmd + 1)
-
-/* PN533 Return codes */
-#define PN533_CMD_RET_MASK 0x3F
-#define PN533_CMD_MI_MASK 0x40
-#define PN533_CMD_RET_SUCCESS 0x00
-
-struct pn533;
-
-typedef int (*pn533_send_async_complete_t) (struct pn533 *dev, void *arg,
-					struct sk_buff *resp);
-
-/* structs for pn533 commands */
-
-/* PN533_CMD_GET_FIRMWARE_VERSION */
-struct pn533_fw_version {
-	u8 ic;
-	u8 ver;
-	u8 rev;
-	u8 support;
-};
-
-/* PN533_CMD_RF_CONFIGURATION */
-#define PN533_CFGITEM_RF_FIELD    0x01
-#define PN533_CFGITEM_TIMING      0x02
-#define PN533_CFGITEM_MAX_RETRIES 0x05
-#define PN533_CFGITEM_PASORI      0x82
-
-#define PN533_CFGITEM_RF_FIELD_AUTO_RFCA 0x2
-#define PN533_CFGITEM_RF_FIELD_ON        0x1
-#define PN533_CFGITEM_RF_FIELD_OFF       0x0
-
-#define PN533_CONFIG_TIMING_102 0xb
-#define PN533_CONFIG_TIMING_204 0xc
-#define PN533_CONFIG_TIMING_409 0xd
-#define PN533_CONFIG_TIMING_819 0xe
-
-#define PN533_CONFIG_MAX_RETRIES_NO_RETRY 0x00
-#define PN533_CONFIG_MAX_RETRIES_ENDLESS 0xFF
-
-struct pn533_config_max_retries {
-	u8 mx_rty_atr;
-	u8 mx_rty_psl;
-	u8 mx_rty_passive_act;
-} __packed;
-
-struct pn533_config_timing {
-	u8 rfu;
-	u8 atr_res_timeout;
-	u8 dep_timeout;
-} __packed;
-
-/* PN533_CMD_IN_LIST_PASSIVE_TARGET */
-
-/* felica commands opcode */
-#define PN533_FELICA_OPC_SENSF_REQ 0
-#define PN533_FELICA_OPC_SENSF_RES 1
-/* felica SENSF_REQ parameters */
-#define PN533_FELICA_SENSF_SC_ALL 0xFFFF
-#define PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE 0
-#define PN533_FELICA_SENSF_RC_SYSTEM_CODE 1
-#define PN533_FELICA_SENSF_RC_ADVANCED_PROTOCOL 2
-
-/* type B initiator_data values */
-#define PN533_TYPE_B_AFI_ALL_FAMILIES 0
-#define PN533_TYPE_B_POLL_METHOD_TIMESLOT 0
-#define PN533_TYPE_B_POLL_METHOD_PROBABILISTIC 1
-
-union pn533_cmd_poll_initdata {
-	struct {
-		u8 afi;
-		u8 polling_method;
-	} __packed type_b;
-	struct {
-		u8 opcode;
-		__be16 sc;
-		u8 rc;
-		u8 tsn;
-	} __packed felica;
-};
-
-/* Poll modulations */
-enum {
-	PN533_POLL_MOD_106KBPS_A,
-	PN533_POLL_MOD_212KBPS_FELICA,
-	PN533_POLL_MOD_424KBPS_FELICA,
-	PN533_POLL_MOD_106KBPS_JEWEL,
-	PN533_POLL_MOD_847KBPS_B,
-	PN533_LISTEN_MOD,
-
-	__PN533_POLL_MOD_AFTER_LAST,
-};
-#define PN533_POLL_MOD_MAX (__PN533_POLL_MOD_AFTER_LAST - 1)
-
-struct pn533_poll_modulations {
-	struct {
-		u8 maxtg;
-		u8 brty;
-		union pn533_cmd_poll_initdata initiator_data;
-	} __packed data;
-	u8 len;
-};
-
-static const struct pn533_poll_modulations poll_mod[] = {
-	[PN533_POLL_MOD_106KBPS_A] = {
-		.data = {
-			.maxtg = 1,
-			.brty = 0,
-		},
-		.len = 2,
-	},
-	[PN533_POLL_MOD_212KBPS_FELICA] = {
-		.data = {
-			.maxtg = 1,
-			.brty = 1,
-			.initiator_data.felica = {
-				.opcode = PN533_FELICA_OPC_SENSF_REQ,
-				.sc = PN533_FELICA_SENSF_SC_ALL,
-				.rc = PN533_FELICA_SENSF_RC_SYSTEM_CODE,
-				.tsn = 0x03,
-			},
-		},
-		.len = 7,
-	},
-	[PN533_POLL_MOD_424KBPS_FELICA] = {
-		.data = {
-			.maxtg = 1,
-			.brty = 2,
-			.initiator_data.felica = {
-				.opcode = PN533_FELICA_OPC_SENSF_REQ,
-				.sc = PN533_FELICA_SENSF_SC_ALL,
-				.rc = PN533_FELICA_SENSF_RC_SYSTEM_CODE,
-				.tsn = 0x03,
-			},
-		 },
-		.len = 7,
-	},
-	[PN533_POLL_MOD_106KBPS_JEWEL] = {
-		.data = {
-			.maxtg = 1,
-			.brty = 4,
-		},
-		.len = 2,
-	},
-	[PN533_POLL_MOD_847KBPS_B] = {
-		.data = {
-			.maxtg = 1,
-			.brty = 8,
-			.initiator_data.type_b = {
-				.afi = PN533_TYPE_B_AFI_ALL_FAMILIES,
-				.polling_method =
-					PN533_TYPE_B_POLL_METHOD_TIMESLOT,
-			},
-		},
-		.len = 3,
-	},
-	[PN533_LISTEN_MOD] = {
-		.len = 0,
-	},
-};
-
-/* PN533_CMD_IN_ATR */
-
-struct pn533_cmd_activate_response {
-	u8 status;
-	u8 nfcid3t[10];
-	u8 didt;
-	u8 bst;
-	u8 brt;
-	u8 to;
-	u8 ppt;
-	/* optional */
-	u8 gt[];
-} __packed;
-
-struct pn533_cmd_jump_dep_response {
-	u8 status;
-	u8 tg;
-	u8 nfcid3t[10];
-	u8 didt;
-	u8 bst;
-	u8 brt;
-	u8 to;
-	u8 ppt;
-	/* optional */
-	u8 gt[];
-} __packed;
-
-
-/* PN533_TG_INIT_AS_TARGET */
-#define PN533_INIT_TARGET_PASSIVE 0x1
-#define PN533_INIT_TARGET_DEP 0x2
-
-#define PN533_INIT_TARGET_RESP_FRAME_MASK 0x3
-#define PN533_INIT_TARGET_RESP_ACTIVE     0x1
-#define PN533_INIT_TARGET_RESP_DEP        0x4
-
-enum  pn533_protocol_type {
-	PN533_PROTO_REQ_ACK_RESP = 0,
-	PN533_PROTO_REQ_RESP
-};
-
-struct pn533 {
-	struct usb_device *udev;
-	struct usb_interface *interface;
-	struct nfc_dev *nfc_dev;
-	u32 device_type;
-	enum pn533_protocol_type protocol_type;
-
-	struct urb *out_urb;
-	struct urb *in_urb;
-
-	struct sk_buff_head resp_q;
-	struct sk_buff_head fragment_skb;
-
-	struct workqueue_struct	*wq;
-	struct work_struct cmd_work;
-	struct work_struct cmd_complete_work;
-	struct delayed_work poll_work;
-	struct work_struct mi_rx_work;
-	struct work_struct mi_tx_work;
-	struct work_struct mi_tm_rx_work;
-	struct work_struct mi_tm_tx_work;
-	struct work_struct tg_work;
-	struct work_struct rf_work;
-
-	struct list_head cmd_queue;
-	struct pn533_cmd *cmd;
-	u8 cmd_pending;
-	struct mutex cmd_lock;  /* protects cmd queue */
-
-	void *cmd_complete_mi_arg;
-	void *cmd_complete_dep_arg;
-
-	struct pn533_poll_modulations *poll_mod_active[PN533_POLL_MOD_MAX + 1];
-	u8 poll_mod_count;
-	u8 poll_mod_curr;
-	u8 poll_dep;
-	u32 poll_protocols;
-	u32 listen_protocols;
-	struct timer_list listen_timer;
-	int cancel_listen;
-
-	u8 *gb;
-	size_t gb_len;
-
-	u8 tgt_available_prots;
-	u8 tgt_active_prot;
-	u8 tgt_mode;
-
-	struct pn533_frame_ops *ops;
-};
-
-struct pn533_cmd {
-	struct list_head queue;
-	u8 code;
-	int status;
-	struct sk_buff *req;
-	struct sk_buff *resp;
-	int resp_len;
-	pn533_send_async_complete_t  complete_cb;
-	void *complete_cb_context;
-};
-
-struct pn533_std_frame {
-	u8 preamble;
-	__be16 start_frame;
-	u8 datalen;
-	u8 datalen_checksum;
-	u8 data[];
-} __packed;
-
-struct pn533_ext_frame {	/* Extended Information frame */
-	u8 preamble;
-	__be16 start_frame;
-	__be16 eif_flag;	/* fixed to 0xFFFF */
-	__be16 datalen;
-	u8 datalen_checksum;
-	u8 data[];
-} __packed;
-
-struct pn533_frame_ops {
-	void (*tx_frame_init)(void *frame, u8 cmd_code);
-	void (*tx_frame_finish)(void *frame);
-	void (*tx_update_payload_len)(void *frame, int len);
-	int tx_header_len;
-	int tx_tail_len;
-
-	bool (*rx_is_frame_valid)(void *frame, struct pn533 *dev);
-	int (*rx_frame_size)(void *frame);
-	int rx_header_len;
-	int rx_tail_len;
-
-	int max_payload_len;
-	u8 (*get_cmd_code)(void *frame);
-};
-
-struct pn533_acr122_ccid_hdr {
-	u8 type;
-	u32 datalen;
-	u8 slot;
-	u8 seq;
-	u8 params[3]; /* 3 msg specific bytes or status, error and 1 specific
-			 byte for reposnse msg */
-	u8 data[]; /* payload */
-} __packed;
-
-struct pn533_acr122_apdu_hdr {
-	u8 class;
-	u8 ins;
-	u8 p1;
-	u8 p2;
-} __packed;
-
-struct pn533_acr122_tx_frame {
-	struct pn533_acr122_ccid_hdr ccid;
-	struct pn533_acr122_apdu_hdr apdu;
-	u8 datalen;
-	u8 data[]; /* pn533 frame: TFI ... */
-} __packed;
-
-struct pn533_acr122_rx_frame {
-	struct pn533_acr122_ccid_hdr ccid;
-	u8 data[]; /* pn533 frame : TFI ... */
-} __packed;
-
-static void pn533_acr122_tx_frame_init(void *_frame, u8 cmd_code)
-{
-	struct pn533_acr122_tx_frame *frame = _frame;
-
-	frame->ccid.type = PN533_ACR122_PC_TO_RDR_ESCAPE;
-	frame->ccid.datalen = sizeof(frame->apdu) + 1; /* sizeof(apdu_hdr) +
-							  sizeof(datalen) */
-	frame->ccid.slot = 0;
-	frame->ccid.seq = 0;
-	frame->ccid.params[0] = 0;
-	frame->ccid.params[1] = 0;
-	frame->ccid.params[2] = 0;
-
-	frame->data[0] = PN533_STD_FRAME_DIR_OUT;
-	frame->data[1] = cmd_code;
-	frame->datalen = 2;  /* data[0] + data[1] */
-
-	frame->apdu.class = 0xFF;
-	frame->apdu.ins = 0;
-	frame->apdu.p1 = 0;
-	frame->apdu.p2 = 0;
-}
-
-static void pn533_acr122_tx_frame_finish(void *_frame)
-{
-	struct pn533_acr122_tx_frame *frame = _frame;
-
-	frame->ccid.datalen += frame->datalen;
-}
-
-static void pn533_acr122_tx_update_payload_len(void *_frame, int len)
-{
-	struct pn533_acr122_tx_frame *frame = _frame;
-
-	frame->datalen += len;
-}
-
-static bool pn533_acr122_is_rx_frame_valid(void *_frame, struct pn533 *dev)
-{
-	struct pn533_acr122_rx_frame *frame = _frame;
-
-	if (frame->ccid.type != 0x83)
-		return false;
-
-	if (!frame->ccid.datalen)
-		return false;
-
-	if (frame->data[frame->ccid.datalen - 2] == 0x63)
-		return false;
-
-	return true;
-}
-
-static int pn533_acr122_rx_frame_size(void *frame)
-{
-	struct pn533_acr122_rx_frame *f = frame;
-
-	/* f->ccid.datalen already includes tail length */
-	return sizeof(struct pn533_acr122_rx_frame) + f->ccid.datalen;
-}
-
-static u8 pn533_acr122_get_cmd_code(void *frame)
-{
-	struct pn533_acr122_rx_frame *f = frame;
-
-	return PN533_FRAME_CMD(f);
-}
-
-static struct pn533_frame_ops pn533_acr122_frame_ops = {
-	.tx_frame_init = pn533_acr122_tx_frame_init,
-	.tx_frame_finish = pn533_acr122_tx_frame_finish,
-	.tx_update_payload_len = pn533_acr122_tx_update_payload_len,
-	.tx_header_len = PN533_ACR122_TX_FRAME_HEADER_LEN,
-	.tx_tail_len = PN533_ACR122_TX_FRAME_TAIL_LEN,
-
-	.rx_is_frame_valid = pn533_acr122_is_rx_frame_valid,
-	.rx_header_len = PN533_ACR122_RX_FRAME_HEADER_LEN,
-	.rx_tail_len = PN533_ACR122_RX_FRAME_TAIL_LEN,
-	.rx_frame_size = pn533_acr122_rx_frame_size,
-
-	.max_payload_len = PN533_ACR122_FRAME_MAX_PAYLOAD_LEN,
-	.get_cmd_code = pn533_acr122_get_cmd_code,
-};
-
-/* The rule: value(high byte) + value(low byte) + checksum = 0 */
-static inline u8 pn533_ext_checksum(u16 value)
-{
-	return ~(u8)(((value & 0xFF00) >> 8) + (u8)(value & 0xFF)) + 1;
-}
-
-/* The rule: value + checksum = 0 */
-static inline u8 pn533_std_checksum(u8 value)
-{
-	return ~value + 1;
-}
-
-/* The rule: sum(data elements) + checksum = 0 */
-static u8 pn533_std_data_checksum(u8 *data, int datalen)
-{
-	u8 sum = 0;
-	int i;
-
-	for (i = 0; i < datalen; i++)
-		sum += data[i];
-
-	return pn533_std_checksum(sum);
-}
-
-static void pn533_std_tx_frame_init(void *_frame, u8 cmd_code)
-{
-	struct pn533_std_frame *frame = _frame;
-
-	frame->preamble = 0;
-	frame->start_frame = cpu_to_be16(PN533_STD_FRAME_SOF);
-	PN533_STD_FRAME_IDENTIFIER(frame) = PN533_STD_FRAME_DIR_OUT;
-	PN533_FRAME_CMD(frame) = cmd_code;
-	frame->datalen = 2;
-}
-
-static void pn533_std_tx_frame_finish(void *_frame)
-{
-	struct pn533_std_frame *frame = _frame;
-
-	frame->datalen_checksum = pn533_std_checksum(frame->datalen);
-
-	PN533_STD_FRAME_CHECKSUM(frame) =
-		pn533_std_data_checksum(frame->data, frame->datalen);
-
-	PN533_STD_FRAME_POSTAMBLE(frame) = 0;
-}
-
-static void pn533_std_tx_update_payload_len(void *_frame, int len)
-{
-	struct pn533_std_frame *frame = _frame;
-
-	frame->datalen += len;
-}
-
-static bool pn533_std_rx_frame_is_valid(void *_frame, struct pn533 *dev)
-{
-	u8 checksum;
-	struct pn533_std_frame *stdf = _frame;
-
-	if (stdf->start_frame != cpu_to_be16(PN533_STD_FRAME_SOF))
-		return false;
-
-	if (likely(!PN533_STD_IS_EXTENDED(stdf))) {
-		/* Standard frame code */
-		dev->ops->rx_header_len = PN533_STD_FRAME_HEADER_LEN;
-
-		checksum = pn533_std_checksum(stdf->datalen);
-		if (checksum != stdf->datalen_checksum)
-			return false;
-
-		checksum = pn533_std_data_checksum(stdf->data, stdf->datalen);
-		if (checksum != PN533_STD_FRAME_CHECKSUM(stdf))
-			return false;
-	} else {
-		/* Extended */
-		struct pn533_ext_frame *eif = _frame;
-
-		dev->ops->rx_header_len = PN533_EXT_FRAME_HEADER_LEN;
-
-		checksum = pn533_ext_checksum(be16_to_cpu(eif->datalen));
-		if (checksum != eif->datalen_checksum)
-			return false;
-
-		/* check data checksum */
-		checksum = pn533_std_data_checksum(eif->data,
-						   be16_to_cpu(eif->datalen));
-		if (checksum != PN533_EXT_FRAME_CHECKSUM(eif))
-			return false;
-	}
-
-	return true;
-}
-
-static bool pn533_std_rx_frame_is_ack(struct pn533_std_frame *frame)
-{
-	if (frame->start_frame != cpu_to_be16(PN533_STD_FRAME_SOF))
-		return false;
-
-	if (frame->datalen != 0 || frame->datalen_checksum != 0xFF)
-		return false;
-
-	return true;
-}
-
-static inline int pn533_std_rx_frame_size(void *frame)
-{
-	struct pn533_std_frame *f = frame;
-
-	/* check for Extended Information frame */
-	if (PN533_STD_IS_EXTENDED(f)) {
-		struct pn533_ext_frame *eif = frame;
-
-		return sizeof(struct pn533_ext_frame)
-			+ be16_to_cpu(eif->datalen) + PN533_STD_FRAME_TAIL_LEN;
-	}
-
-	return sizeof(struct pn533_std_frame) + f->datalen +
-	       PN533_STD_FRAME_TAIL_LEN;
-}
-
-static u8 pn533_std_get_cmd_code(void *frame)
-{
-	struct pn533_std_frame *f = frame;
-	struct pn533_ext_frame *eif = frame;
-
-	if (PN533_STD_IS_EXTENDED(f))
-		return PN533_FRAME_CMD(eif);
-	else
-		return PN533_FRAME_CMD(f);
-}
-
-static struct pn533_frame_ops pn533_std_frame_ops = {
-	.tx_frame_init = pn533_std_tx_frame_init,
-	.tx_frame_finish = pn533_std_tx_frame_finish,
-	.tx_update_payload_len = pn533_std_tx_update_payload_len,
-	.tx_header_len = PN533_STD_FRAME_HEADER_LEN,
-	.tx_tail_len = PN533_STD_FRAME_TAIL_LEN,
-
-	.rx_is_frame_valid = pn533_std_rx_frame_is_valid,
-	.rx_frame_size = pn533_std_rx_frame_size,
-	.rx_header_len = PN533_STD_FRAME_HEADER_LEN,
-	.rx_tail_len = PN533_STD_FRAME_TAIL_LEN,
-
-	.max_payload_len =  PN533_STD_FRAME_MAX_PAYLOAD_LEN,
-	.get_cmd_code = pn533_std_get_cmd_code,
-};
-
-static bool pn533_rx_frame_is_cmd_response(struct pn533 *dev, void *frame)
-{
-	return (dev->ops->get_cmd_code(frame) ==
-				PN533_CMD_RESPONSE(dev->cmd->code));
-}
-
-static void pn533_recv_response(struct urb *urb)
-{
-	struct pn533 *dev = urb->context;
-	struct pn533_cmd *cmd = dev->cmd;
-	u8 *in_frame;
-
-	cmd->status = urb->status;
-
-	switch (urb->status) {
-	case 0:
-		break; /* success */
-	case -ECONNRESET:
-	case -ENOENT:
-		dev_dbg(&dev->interface->dev,
-			"The urb has been canceled (status %d)\n",
-			urb->status);
-		goto sched_wq;
-	case -ESHUTDOWN:
-	default:
-		nfc_err(&dev->interface->dev,
-			"Urb failure (status %d)\n", urb->status);
-		goto sched_wq;
-	}
-
-	in_frame = dev->in_urb->transfer_buffer;
-
-	dev_dbg(&dev->interface->dev, "Received a frame\n");
-	print_hex_dump_debug("PN533 RX: ", DUMP_PREFIX_NONE, 16, 1, in_frame,
-			     dev->ops->rx_frame_size(in_frame), false);
-
-	if (!dev->ops->rx_is_frame_valid(in_frame, dev)) {
-		nfc_err(&dev->interface->dev, "Received an invalid frame\n");
-		cmd->status = -EIO;
-		goto sched_wq;
-	}
-
-	if (!pn533_rx_frame_is_cmd_response(dev, in_frame)) {
-		nfc_err(&dev->interface->dev,
-			"It it not the response to the last command\n");
-		cmd->status = -EIO;
-		goto sched_wq;
-	}
-
-sched_wq:
-	queue_work(dev->wq, &dev->cmd_complete_work);
-}
-
-static int pn533_submit_urb_for_response(struct pn533 *dev, gfp_t flags)
-{
-	dev->in_urb->complete = pn533_recv_response;
-
-	return usb_submit_urb(dev->in_urb, flags);
-}
-
-static void pn533_recv_ack(struct urb *urb)
-{
-	struct pn533 *dev = urb->context;
-	struct pn533_cmd *cmd = dev->cmd;
-	struct pn533_std_frame *in_frame;
-	int rc;
-
-	cmd->status = urb->status;
-
-	switch (urb->status) {
-	case 0:
-		break; /* success */
-	case -ECONNRESET:
-	case -ENOENT:
-		dev_dbg(&dev->interface->dev,
-			"The urb has been stopped (status %d)\n",
-			urb->status);
-		goto sched_wq;
-	case -ESHUTDOWN:
-	default:
-		nfc_err(&dev->interface->dev,
-			"Urb failure (status %d)\n", urb->status);
-		goto sched_wq;
-	}
-
-	in_frame = dev->in_urb->transfer_buffer;
-
-	if (!pn533_std_rx_frame_is_ack(in_frame)) {
-		nfc_err(&dev->interface->dev, "Received an invalid ack\n");
-		cmd->status = -EIO;
-		goto sched_wq;
-	}
-
-	rc = pn533_submit_urb_for_response(dev, GFP_ATOMIC);
-	if (rc) {
-		nfc_err(&dev->interface->dev,
-			"usb_submit_urb failed with result %d\n", rc);
-		cmd->status = rc;
-		goto sched_wq;
-	}
-
-	return;
-
-sched_wq:
-	queue_work(dev->wq, &dev->cmd_complete_work);
-}
-
-static int pn533_submit_urb_for_ack(struct pn533 *dev, gfp_t flags)
-{
-	dev->in_urb->complete = pn533_recv_ack;
-
-	return usb_submit_urb(dev->in_urb, flags);
-}
-
-static int pn533_send_ack(struct pn533 *dev, gfp_t flags)
-{
-	u8 ack[PN533_STD_FRAME_ACK_SIZE] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00};
-	/* spec 7.1.1.3:  Preamble, SoPC (2), ACK Code (2), Postamble */
-	int rc;
-
-	dev->out_urb->transfer_buffer = ack;
-	dev->out_urb->transfer_buffer_length = sizeof(ack);
-	rc = usb_submit_urb(dev->out_urb, flags);
-
-	return rc;
-}
-
-static int __pn533_send_frame_async(struct pn533 *dev,
-					struct sk_buff *out,
-					struct sk_buff *in,
-					int in_len)
-{
-	int rc;
-
-	dev->out_urb->transfer_buffer = out->data;
-	dev->out_urb->transfer_buffer_length = out->len;
-
-	dev->in_urb->transfer_buffer = in->data;
-	dev->in_urb->transfer_buffer_length = in_len;
-
-	print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1,
-			     out->data, out->len, false);
-
-	rc = usb_submit_urb(dev->out_urb, GFP_KERNEL);
-	if (rc)
-		return rc;
-
-	if (dev->protocol_type == PN533_PROTO_REQ_RESP) {
-		/* request for response for sent packet directly */
-		rc = pn533_submit_urb_for_response(dev, GFP_ATOMIC);
-		if (rc)
-			goto error;
-	} else if (dev->protocol_type == PN533_PROTO_REQ_ACK_RESP) {
-		/* request for ACK if that's the case */
-		rc = pn533_submit_urb_for_ack(dev, GFP_KERNEL);
-		if (rc)
-			goto error;
-	}
-
-	return 0;
-
-error:
-	usb_unlink_urb(dev->out_urb);
-	return rc;
-}
-
-static void pn533_build_cmd_frame(struct pn533 *dev, u8 cmd_code,
-				  struct sk_buff *skb)
-{
-	/* payload is already there, just update datalen */
-	int payload_len = skb->len;
-	struct pn533_frame_ops *ops = dev->ops;
-
-
-	skb_push(skb, ops->tx_header_len);
-	skb_put(skb, ops->tx_tail_len);
-
-	ops->tx_frame_init(skb->data, cmd_code);
-	ops->tx_update_payload_len(skb->data, payload_len);
-	ops->tx_frame_finish(skb->data);
-}
-
-static int pn533_send_async_complete(struct pn533 *dev)
-{
-	struct pn533_cmd *cmd = dev->cmd;
-	int status = cmd->status;
-
-	struct sk_buff *req = cmd->req;
-	struct sk_buff *resp = cmd->resp;
-
-	int rc;
-
-	dev_kfree_skb(req);
-
-	if (status < 0) {
-		rc = cmd->complete_cb(dev, cmd->complete_cb_context,
-				      ERR_PTR(status));
-		dev_kfree_skb(resp);
-		goto done;
-	}
-
-	skb_put(resp, dev->ops->rx_frame_size(resp->data));
-	skb_pull(resp, dev->ops->rx_header_len);
-	skb_trim(resp, resp->len - dev->ops->rx_tail_len);
-
-	rc = cmd->complete_cb(dev, cmd->complete_cb_context, resp);
-
-done:
-	kfree(cmd);
-	dev->cmd = NULL;
-	return rc;
-}
-
-static int __pn533_send_async(struct pn533 *dev, u8 cmd_code,
-			      struct sk_buff *req, struct sk_buff *resp,
-			      int resp_len,
-			      pn533_send_async_complete_t complete_cb,
-			      void *complete_cb_context)
-{
-	struct pn533_cmd *cmd;
-	int rc = 0;
-
-	dev_dbg(&dev->interface->dev, "Sending command 0x%x\n", cmd_code);
-
-	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
-	if (!cmd)
-		return -ENOMEM;
-
-	cmd->code = cmd_code;
-	cmd->req = req;
-	cmd->resp = resp;
-	cmd->resp_len = resp_len;
-	cmd->complete_cb = complete_cb;
-	cmd->complete_cb_context = complete_cb_context;
-
-	pn533_build_cmd_frame(dev, cmd_code, req);
-
-	mutex_lock(&dev->cmd_lock);
-
-	if (!dev->cmd_pending) {
-		rc = __pn533_send_frame_async(dev, req, resp, resp_len);
-		if (rc)
-			goto error;
-
-		dev->cmd_pending = 1;
-		dev->cmd = cmd;
-		goto unlock;
-	}
-
-	dev_dbg(&dev->interface->dev, "%s Queueing command 0x%x\n",
-		__func__, cmd_code);
-
-	INIT_LIST_HEAD(&cmd->queue);
-	list_add_tail(&cmd->queue, &dev->cmd_queue);
-
-	goto unlock;
-
-error:
-	kfree(cmd);
-unlock:
-	mutex_unlock(&dev->cmd_lock);
-	return rc;
-}
-
-static int pn533_send_data_async(struct pn533 *dev, u8 cmd_code,
-				 struct sk_buff *req,
-				 pn533_send_async_complete_t complete_cb,
-				 void *complete_cb_context)
-{
-	struct sk_buff *resp;
-	int rc;
-	int  resp_len = dev->ops->rx_header_len +
-			dev->ops->max_payload_len +
-			dev->ops->rx_tail_len;
-
-	resp = nfc_alloc_recv_skb(resp_len, GFP_KERNEL);
-	if (!resp)
-		return -ENOMEM;
-
-	rc = __pn533_send_async(dev, cmd_code, req, resp, resp_len, complete_cb,
-				complete_cb_context);
-	if (rc)
-		dev_kfree_skb(resp);
-
-	return rc;
-}
-
-static int pn533_send_cmd_async(struct pn533 *dev, u8 cmd_code,
-				struct sk_buff *req,
-				pn533_send_async_complete_t complete_cb,
-				void *complete_cb_context)
-{
-	struct sk_buff *resp;
-	int rc;
-	int  resp_len = dev->ops->rx_header_len +
-			dev->ops->max_payload_len +
-			dev->ops->rx_tail_len;
-
-	resp = alloc_skb(resp_len, GFP_KERNEL);
-	if (!resp)
-		return -ENOMEM;
-
-	rc = __pn533_send_async(dev, cmd_code, req, resp, resp_len, complete_cb,
-				complete_cb_context);
-	if (rc)
-		dev_kfree_skb(resp);
-
-	return rc;
-}
-
-/*
- * pn533_send_cmd_direct_async
- *
- * The function sends a piority cmd directly to the chip omiting the cmd
- * queue. It's intended to be used by chaining mechanism of received responses
- * where the host has to request every single chunk of data before scheduling
- * next cmd from the queue.
- */
-static int pn533_send_cmd_direct_async(struct pn533 *dev, u8 cmd_code,
-				       struct sk_buff *req,
-				       pn533_send_async_complete_t complete_cb,
-				       void *complete_cb_context)
-{
-	struct sk_buff *resp;
-	struct pn533_cmd *cmd;
-	int rc;
-	int resp_len = dev->ops->rx_header_len +
-		       dev->ops->max_payload_len +
-		       dev->ops->rx_tail_len;
-
-	resp = alloc_skb(resp_len, GFP_KERNEL);
-	if (!resp)
-		return -ENOMEM;
-
-	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
-	if (!cmd) {
-		dev_kfree_skb(resp);
-		return -ENOMEM;
-	}
-
-	cmd->code = cmd_code;
-	cmd->req = req;
-	cmd->resp = resp;
-	cmd->resp_len = resp_len;
-	cmd->complete_cb = complete_cb;
-	cmd->complete_cb_context = complete_cb_context;
-
-	pn533_build_cmd_frame(dev, cmd_code, req);
-
-	rc = __pn533_send_frame_async(dev, req, resp, resp_len);
-	if (rc < 0) {
-		dev_kfree_skb(resp);
-		kfree(cmd);
-	} else {
-		dev->cmd = cmd;
-	}
-
-	return rc;
-}
-
-static void pn533_wq_cmd_complete(struct work_struct *work)
-{
-	struct pn533 *dev = container_of(work, struct pn533, cmd_complete_work);
-	int rc;
-
-	rc = pn533_send_async_complete(dev);
-	if (rc != -EINPROGRESS)
-		queue_work(dev->wq, &dev->cmd_work);
-}
-
-static void pn533_wq_cmd(struct work_struct *work)
-{
-	struct pn533 *dev = container_of(work, struct pn533, cmd_work);
-	struct pn533_cmd *cmd;
-	int rc;
-
-	mutex_lock(&dev->cmd_lock);
-
-	if (list_empty(&dev->cmd_queue)) {
-		dev->cmd_pending = 0;
-		mutex_unlock(&dev->cmd_lock);
-		return;
-	}
-
-	cmd = list_first_entry(&dev->cmd_queue, struct pn533_cmd, queue);
-
-	list_del(&cmd->queue);
-
-	mutex_unlock(&dev->cmd_lock);
-
-	rc = __pn533_send_frame_async(dev, cmd->req, cmd->resp, cmd->resp_len);
-	if (rc < 0) {
-		dev_kfree_skb(cmd->req);
-		dev_kfree_skb(cmd->resp);
-		kfree(cmd);
-		return;
-	}
-
-	dev->cmd = cmd;
-}
-
-struct pn533_sync_cmd_response {
-	struct sk_buff *resp;
-	struct completion done;
-};
-
-static int pn533_send_sync_complete(struct pn533 *dev, void *_arg,
-				    struct sk_buff *resp)
-{
-	struct pn533_sync_cmd_response *arg = _arg;
-
-	arg->resp = resp;
-	complete(&arg->done);
-
-	return 0;
-}
-
-/*  pn533_send_cmd_sync
- *
- *  Please note the req parameter is freed inside the function to
- *  limit a number of return value interpretations by the caller.
- *
- *  1. negative in case of error during TX path -> req should be freed
- *
- *  2. negative in case of error during RX path -> req should not be freed
- *     as it's been already freed at the begining of RX path by
- *     async_complete_cb.
- *
- *  3. valid pointer in case of succesfult RX path
- *
- *  A caller has to check a return value with IS_ERR macro. If the test pass,
- *  the returned pointer is valid.
- *
- * */
-static struct sk_buff *pn533_send_cmd_sync(struct pn533 *dev, u8 cmd_code,
-					       struct sk_buff *req)
-{
-	int rc;
-	struct pn533_sync_cmd_response arg;
-
-	init_completion(&arg.done);
-
-	rc = pn533_send_cmd_async(dev, cmd_code, req,
-				  pn533_send_sync_complete, &arg);
-	if (rc) {
-		dev_kfree_skb(req);
-		return ERR_PTR(rc);
-	}
-
-	wait_for_completion(&arg.done);
-
-	return arg.resp;
-}
-
-static void pn533_send_complete(struct urb *urb)
-{
-	struct pn533 *dev = urb->context;
-
-	switch (urb->status) {
-	case 0:
-		break; /* success */
-	case -ECONNRESET:
-	case -ENOENT:
-		dev_dbg(&dev->interface->dev,
-			"The urb has been stopped (status %d)\n",
-			urb->status);
-		break;
-	case -ESHUTDOWN:
-	default:
-		nfc_err(&dev->interface->dev, "Urb failure (status %d)\n",
-			urb->status);
-	}
-}
-
-static void pn533_abort_cmd(struct pn533 *dev, gfp_t flags)
-{
-	/* ACR122U does not support any command which aborts last
-	 * issued command i.e. as ACK for standard PN533. Additionally,
-	 * it behaves stange, sending broken or incorrect responses,
-	 * when we cancel urb before the chip will send response.
-	 */
-	if (dev->device_type == PN533_DEVICE_ACR122U)
-		return;
-
-	/* An ack will cancel the last issued command */
-	pn533_send_ack(dev, flags);
-
-	/* cancel the urb request */
-	usb_kill_urb(dev->in_urb);
-}
-
-static struct sk_buff *pn533_alloc_skb(struct pn533 *dev, unsigned int size)
-{
-	struct sk_buff *skb;
-
-	skb = alloc_skb(dev->ops->tx_header_len +
-			size +
-			dev->ops->tx_tail_len, GFP_KERNEL);
-
-	if (skb)
-		skb_reserve(skb, dev->ops->tx_header_len);
-
-	return skb;
-}
-
-struct pn533_target_type_a {
-	__be16 sens_res;
-	u8 sel_res;
-	u8 nfcid_len;
-	u8 nfcid_data[];
-} __packed;
-
-
-#define PN533_TYPE_A_SENS_RES_NFCID1(x) ((u8)((be16_to_cpu(x) & 0x00C0) >> 6))
-#define PN533_TYPE_A_SENS_RES_SSD(x) ((u8)((be16_to_cpu(x) & 0x001F) >> 0))
-#define PN533_TYPE_A_SENS_RES_PLATCONF(x) ((u8)((be16_to_cpu(x) & 0x0F00) >> 8))
-
-#define PN533_TYPE_A_SENS_RES_SSD_JEWEL 0x00
-#define PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL 0x0C
-
-#define PN533_TYPE_A_SEL_PROT(x) (((x) & 0x60) >> 5)
-#define PN533_TYPE_A_SEL_CASCADE(x) (((x) & 0x04) >> 2)
-
-#define PN533_TYPE_A_SEL_PROT_MIFARE 0
-#define PN533_TYPE_A_SEL_PROT_ISO14443 1
-#define PN533_TYPE_A_SEL_PROT_DEP 2
-#define PN533_TYPE_A_SEL_PROT_ISO14443_DEP 3
-
-static bool pn533_target_type_a_is_valid(struct pn533_target_type_a *type_a,
-							int target_data_len)
-{
-	u8 ssd;
-	u8 platconf;
-
-	if (target_data_len < sizeof(struct pn533_target_type_a))
-		return false;
-
-	/* The lenght check of nfcid[] and ats[] are not being performed because
-	   the values are not being used */
-
-	/* Requirement 4.6.3.3 from NFC Forum Digital Spec */
-	ssd = PN533_TYPE_A_SENS_RES_SSD(type_a->sens_res);
-	platconf = PN533_TYPE_A_SENS_RES_PLATCONF(type_a->sens_res);
-
-	if ((ssd == PN533_TYPE_A_SENS_RES_SSD_JEWEL &&
-	     platconf != PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL) ||
-	    (ssd != PN533_TYPE_A_SENS_RES_SSD_JEWEL &&
-	     platconf == PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL))
-		return false;
-
-	/* Requirements 4.8.2.1, 4.8.2.3, 4.8.2.5 and 4.8.2.7 from NFC Forum */
-	if (PN533_TYPE_A_SEL_CASCADE(type_a->sel_res) != 0)
-		return false;
-
-	return true;
-}
-
-static int pn533_target_found_type_a(struct nfc_target *nfc_tgt, u8 *tgt_data,
-							int tgt_data_len)
-{
-	struct pn533_target_type_a *tgt_type_a;
-
-	tgt_type_a = (struct pn533_target_type_a *)tgt_data;
-
-	if (!pn533_target_type_a_is_valid(tgt_type_a, tgt_data_len))
-		return -EPROTO;
-
-	switch (PN533_TYPE_A_SEL_PROT(tgt_type_a->sel_res)) {
-	case PN533_TYPE_A_SEL_PROT_MIFARE:
-		nfc_tgt->supported_protocols = NFC_PROTO_MIFARE_MASK;
-		break;
-	case PN533_TYPE_A_SEL_PROT_ISO14443:
-		nfc_tgt->supported_protocols = NFC_PROTO_ISO14443_MASK;
-		break;
-	case PN533_TYPE_A_SEL_PROT_DEP:
-		nfc_tgt->supported_protocols = NFC_PROTO_NFC_DEP_MASK;
-		break;
-	case PN533_TYPE_A_SEL_PROT_ISO14443_DEP:
-		nfc_tgt->supported_protocols = NFC_PROTO_ISO14443_MASK |
-							NFC_PROTO_NFC_DEP_MASK;
-		break;
-	}
-
-	nfc_tgt->sens_res = be16_to_cpu(tgt_type_a->sens_res);
-	nfc_tgt->sel_res = tgt_type_a->sel_res;
-	nfc_tgt->nfcid1_len = tgt_type_a->nfcid_len;
-	memcpy(nfc_tgt->nfcid1, tgt_type_a->nfcid_data, nfc_tgt->nfcid1_len);
-
-	return 0;
-}
-
-struct pn533_target_felica {
-	u8 pol_res;
-	u8 opcode;
-	u8 nfcid2[NFC_NFCID2_MAXSIZE];
-	u8 pad[8];
-	/* optional */
-	u8 syst_code[];
-} __packed;
-
-#define PN533_FELICA_SENSF_NFCID2_DEP_B1 0x01
-#define PN533_FELICA_SENSF_NFCID2_DEP_B2 0xFE
-
-static bool pn533_target_felica_is_valid(struct pn533_target_felica *felica,
-							int target_data_len)
-{
-	if (target_data_len < sizeof(struct pn533_target_felica))
-		return false;
-
-	if (felica->opcode != PN533_FELICA_OPC_SENSF_RES)
-		return false;
-
-	return true;
-}
-
-static int pn533_target_found_felica(struct nfc_target *nfc_tgt, u8 *tgt_data,
-							int tgt_data_len)
-{
-	struct pn533_target_felica *tgt_felica;
-
-	tgt_felica = (struct pn533_target_felica *)tgt_data;
-
-	if (!pn533_target_felica_is_valid(tgt_felica, tgt_data_len))
-		return -EPROTO;
-
-	if ((tgt_felica->nfcid2[0] == PN533_FELICA_SENSF_NFCID2_DEP_B1) &&
-	    (tgt_felica->nfcid2[1] == PN533_FELICA_SENSF_NFCID2_DEP_B2))
-		nfc_tgt->supported_protocols = NFC_PROTO_NFC_DEP_MASK;
-	else
-		nfc_tgt->supported_protocols = NFC_PROTO_FELICA_MASK;
-
-	memcpy(nfc_tgt->sensf_res, &tgt_felica->opcode, 9);
-	nfc_tgt->sensf_res_len = 9;
-
-	memcpy(nfc_tgt->nfcid2, tgt_felica->nfcid2, NFC_NFCID2_MAXSIZE);
-	nfc_tgt->nfcid2_len = NFC_NFCID2_MAXSIZE;
-
-	return 0;
-}
-
-struct pn533_target_jewel {
-	__be16 sens_res;
-	u8 jewelid[4];
-} __packed;
-
-static bool pn533_target_jewel_is_valid(struct pn533_target_jewel *jewel,
-							int target_data_len)
-{
-	u8 ssd;
-	u8 platconf;
-
-	if (target_data_len < sizeof(struct pn533_target_jewel))
-		return false;
-
-	/* Requirement 4.6.3.3 from NFC Forum Digital Spec */
-	ssd = PN533_TYPE_A_SENS_RES_SSD(jewel->sens_res);
-	platconf = PN533_TYPE_A_SENS_RES_PLATCONF(jewel->sens_res);
-
-	if ((ssd == PN533_TYPE_A_SENS_RES_SSD_JEWEL &&
-	     platconf != PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL) ||
-	    (ssd != PN533_TYPE_A_SENS_RES_SSD_JEWEL &&
-	     platconf == PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL))
-		return false;
-
-	return true;
-}
-
-static int pn533_target_found_jewel(struct nfc_target *nfc_tgt, u8 *tgt_data,
-							int tgt_data_len)
-{
-	struct pn533_target_jewel *tgt_jewel;
-
-	tgt_jewel = (struct pn533_target_jewel *)tgt_data;
-
-	if (!pn533_target_jewel_is_valid(tgt_jewel, tgt_data_len))
-		return -EPROTO;
-
-	nfc_tgt->supported_protocols = NFC_PROTO_JEWEL_MASK;
-	nfc_tgt->sens_res = be16_to_cpu(tgt_jewel->sens_res);
-	nfc_tgt->nfcid1_len = 4;
-	memcpy(nfc_tgt->nfcid1, tgt_jewel->jewelid, nfc_tgt->nfcid1_len);
-
-	return 0;
-}
-
-struct pn533_type_b_prot_info {
-	u8 bitrate;
-	u8 fsci_type;
-	u8 fwi_adc_fo;
-} __packed;
-
-#define PN533_TYPE_B_PROT_FCSI(x) (((x) & 0xF0) >> 4)
-#define PN533_TYPE_B_PROT_TYPE(x) (((x) & 0x0F) >> 0)
-#define PN533_TYPE_B_PROT_TYPE_RFU_MASK 0x8
-
-struct pn533_type_b_sens_res {
-	u8 opcode;
-	u8 nfcid[4];
-	u8 appdata[4];
-	struct pn533_type_b_prot_info prot_info;
-} __packed;
-
-#define PN533_TYPE_B_OPC_SENSB_RES 0x50
-
-struct pn533_target_type_b {
-	struct pn533_type_b_sens_res sensb_res;
-	u8 attrib_res_len;
-	u8 attrib_res[];
-} __packed;
-
-static bool pn533_target_type_b_is_valid(struct pn533_target_type_b *type_b,
-							int target_data_len)
-{
-	if (target_data_len < sizeof(struct pn533_target_type_b))
-		return false;
-
-	if (type_b->sensb_res.opcode != PN533_TYPE_B_OPC_SENSB_RES)
-		return false;
-
-	if (PN533_TYPE_B_PROT_TYPE(type_b->sensb_res.prot_info.fsci_type) &
-						PN533_TYPE_B_PROT_TYPE_RFU_MASK)
-		return false;
-
-	return true;
-}
-
-static int pn533_target_found_type_b(struct nfc_target *nfc_tgt, u8 *tgt_data,
-							int tgt_data_len)
-{
-	struct pn533_target_type_b *tgt_type_b;
-
-	tgt_type_b = (struct pn533_target_type_b *)tgt_data;
-
-	if (!pn533_target_type_b_is_valid(tgt_type_b, tgt_data_len))
-		return -EPROTO;
-
-	nfc_tgt->supported_protocols = NFC_PROTO_ISO14443_B_MASK;
-
-	return 0;
-}
-
-static int pn533_target_found(struct pn533 *dev, u8 tg, u8 *tgdata,
-			      int tgdata_len)
-{
-	struct nfc_target nfc_tgt;
-	int rc;
-
-	dev_dbg(&dev->interface->dev, "%s: modulation=%d\n",
-		__func__, dev->poll_mod_curr);
-
-	if (tg != 1)
-		return -EPROTO;
-
-	memset(&nfc_tgt, 0, sizeof(struct nfc_target));
-
-	switch (dev->poll_mod_curr) {
-	case PN533_POLL_MOD_106KBPS_A:
-		rc = pn533_target_found_type_a(&nfc_tgt, tgdata, tgdata_len);
-		break;
-	case PN533_POLL_MOD_212KBPS_FELICA:
-	case PN533_POLL_MOD_424KBPS_FELICA:
-		rc = pn533_target_found_felica(&nfc_tgt, tgdata, tgdata_len);
-		break;
-	case PN533_POLL_MOD_106KBPS_JEWEL:
-		rc = pn533_target_found_jewel(&nfc_tgt, tgdata, tgdata_len);
-		break;
-	case PN533_POLL_MOD_847KBPS_B:
-		rc = pn533_target_found_type_b(&nfc_tgt, tgdata, tgdata_len);
-		break;
-	default:
-		nfc_err(&dev->interface->dev,
-			"Unknown current poll modulation\n");
-		return -EPROTO;
-	}
-
-	if (rc)
-		return rc;
-
-	if (!(nfc_tgt.supported_protocols & dev->poll_protocols)) {
-		dev_dbg(&dev->interface->dev,
-			"The Tg found doesn't have the desired protocol\n");
-		return -EAGAIN;
-	}
-
-	dev_dbg(&dev->interface->dev,
-		"Target found - supported protocols: 0x%x\n",
-		nfc_tgt.supported_protocols);
-
-	dev->tgt_available_prots = nfc_tgt.supported_protocols;
-
-	nfc_targets_found(dev->nfc_dev, &nfc_tgt, 1);
-
-	return 0;
-}
-
-static inline void pn533_poll_next_mod(struct pn533 *dev)
-{
-	dev->poll_mod_curr = (dev->poll_mod_curr + 1) % dev->poll_mod_count;
-}
-
-static void pn533_poll_reset_mod_list(struct pn533 *dev)
-{
-	dev->poll_mod_count = 0;
-}
-
-static void pn533_poll_add_mod(struct pn533 *dev, u8 mod_index)
-{
-	dev->poll_mod_active[dev->poll_mod_count] =
-		(struct pn533_poll_modulations *)&poll_mod[mod_index];
-	dev->poll_mod_count++;
-}
-
-static void pn533_poll_create_mod_list(struct pn533 *dev,
-				       u32 im_protocols, u32 tm_protocols)
-{
-	pn533_poll_reset_mod_list(dev);
-
-	if ((im_protocols & NFC_PROTO_MIFARE_MASK) ||
-	    (im_protocols & NFC_PROTO_ISO14443_MASK) ||
-	    (im_protocols & NFC_PROTO_NFC_DEP_MASK))
-		pn533_poll_add_mod(dev, PN533_POLL_MOD_106KBPS_A);
-
-	if (im_protocols & NFC_PROTO_FELICA_MASK ||
-	    im_protocols & NFC_PROTO_NFC_DEP_MASK) {
-		pn533_poll_add_mod(dev, PN533_POLL_MOD_212KBPS_FELICA);
-		pn533_poll_add_mod(dev, PN533_POLL_MOD_424KBPS_FELICA);
-	}
-
-	if (im_protocols & NFC_PROTO_JEWEL_MASK)
-		pn533_poll_add_mod(dev, PN533_POLL_MOD_106KBPS_JEWEL);
-
-	if (im_protocols & NFC_PROTO_ISO14443_B_MASK)
-		pn533_poll_add_mod(dev, PN533_POLL_MOD_847KBPS_B);
-
-	if (tm_protocols)
-		pn533_poll_add_mod(dev, PN533_LISTEN_MOD);
-}
-
-static int pn533_start_poll_complete(struct pn533 *dev, struct sk_buff *resp)
-{
-	u8 nbtg, tg, *tgdata;
-	int rc, tgdata_len;
-
-	/* Toggle the DEP polling */
-	dev->poll_dep = 1;
-
-	nbtg = resp->data[0];
-	tg = resp->data[1];
-	tgdata = &resp->data[2];
-	tgdata_len = resp->len - 2;  /* nbtg + tg */
-
-	if (nbtg) {
-		rc = pn533_target_found(dev, tg, tgdata, tgdata_len);
-
-		/* We must stop the poll after a valid target found */
-		if (rc == 0) {
-			pn533_poll_reset_mod_list(dev);
-			return 0;
-		}
-	}
-
-	return -EAGAIN;
-}
-
-static struct sk_buff *pn533_alloc_poll_tg_frame(struct pn533 *dev)
-{
-	struct sk_buff *skb;
-	u8 *felica, *nfcid3, *gb;
-
-	u8 *gbytes = dev->gb;
-	size_t gbytes_len = dev->gb_len;
-
-	u8 felica_params[18] = {0x1, 0xfe, /* DEP */
-				0x0, 0x0, 0x0, 0x0, 0x0, 0x0, /* random */
-				0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
-				0xff, 0xff}; /* System code */
-
-	u8 mifare_params[6] = {0x1, 0x1, /* SENS_RES */
-			       0x0, 0x0, 0x0,
-			       0x40}; /* SEL_RES for DEP */
-
-	unsigned int skb_len = 36 + /* mode (1), mifare (6),
-				       felica (18), nfcid3 (10), gb_len (1) */
-			       gbytes_len +
-			       1;  /* len Tk*/
-
-	skb = pn533_alloc_skb(dev, skb_len);
-	if (!skb)
-		return NULL;
-
-	/* DEP support only */
-	*skb_put(skb, 1) = PN533_INIT_TARGET_DEP;
-
-	/* MIFARE params */
-	memcpy(skb_put(skb, 6), mifare_params, 6);
-
-	/* Felica params */
-	felica = skb_put(skb, 18);
-	memcpy(felica, felica_params, 18);
-	get_random_bytes(felica + 2, 6);
-
-	/* NFCID3 */
-	nfcid3 = skb_put(skb, 10);
-	memset(nfcid3, 0, 10);
-	memcpy(nfcid3, felica, 8);
-
-	/* General bytes */
-	*skb_put(skb, 1) = gbytes_len;
-
-	gb = skb_put(skb, gbytes_len);
-	memcpy(gb, gbytes, gbytes_len);
-
-	/* Len Tk */
-	*skb_put(skb, 1) = 0;
-
-	return skb;
-}
-
-#define PN533_CMD_DATAEXCH_HEAD_LEN 1
-#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262
-static void pn533_wq_tm_mi_recv(struct work_struct *work);
-static struct sk_buff *pn533_build_response(struct pn533 *dev);
-
-static int pn533_tm_get_data_complete(struct pn533 *dev, void *arg,
-				      struct sk_buff *resp)
-{
-	struct sk_buff *skb;
-	u8 status, ret, mi;
-	int rc;
-
-	dev_dbg(&dev->interface->dev, "%s\n", __func__);
-
-	if (IS_ERR(resp)) {
-		skb_queue_purge(&dev->resp_q);
-		return PTR_ERR(resp);
-	}
-
-	status = resp->data[0];
-
-	ret = status & PN533_CMD_RET_MASK;
-	mi = status & PN533_CMD_MI_MASK;
-
-	skb_pull(resp, sizeof(status));
-
-	if (ret != PN533_CMD_RET_SUCCESS) {
-		rc = -EIO;
-		goto error;
-	}
-
-	skb_queue_tail(&dev->resp_q, resp);
-
-	if (mi) {
-		queue_work(dev->wq, &dev->mi_tm_rx_work);
-		return -EINPROGRESS;
-	}
-
-	skb = pn533_build_response(dev);
-	if (!skb) {
-		rc = -EIO;
-		goto error;
-	}
-
-	return nfc_tm_data_received(dev->nfc_dev, skb);
-
-error:
-	nfc_tm_deactivated(dev->nfc_dev);
-	dev->tgt_mode = 0;
-	skb_queue_purge(&dev->resp_q);
-	dev_kfree_skb(resp);
-
-	return rc;
-}
-
-static void pn533_wq_tm_mi_recv(struct work_struct *work)
-{
-	struct pn533 *dev = container_of(work, struct pn533, mi_tm_rx_work);
-	struct sk_buff *skb;
-	int rc;
-
-	dev_dbg(&dev->interface->dev, "%s\n", __func__);
-
-	skb = pn533_alloc_skb(dev, 0);
-	if (!skb)
-		return;
-
-	rc = pn533_send_cmd_direct_async(dev,
-					PN533_CMD_TG_GET_DATA,
-					skb,
-					pn533_tm_get_data_complete,
-					NULL);
-
-	if (rc < 0)
-		dev_kfree_skb(skb);
-
-	return;
-}
-
-static int pn533_tm_send_complete(struct pn533 *dev, void *arg,
-				  struct sk_buff *resp);
-static void pn533_wq_tm_mi_send(struct work_struct *work)
-{
-	struct pn533 *dev = container_of(work, struct pn533, mi_tm_tx_work);
-	struct sk_buff *skb;
-	int rc;
-
-	dev_dbg(&dev->interface->dev, "%s\n", __func__);
-
-	/* Grab the first skb in the queue */
-	skb = skb_dequeue(&dev->fragment_skb);
-	if (skb == NULL) {	/* No more data */
-		/* Reset the queue for future use */
-		skb_queue_head_init(&dev->fragment_skb);
-		goto error;
-	}
-
-	/* last entry - remove MI bit */
-	if (skb_queue_len(&dev->fragment_skb) == 0) {
-		rc = pn533_send_cmd_direct_async(dev, PN533_CMD_TG_SET_DATA,
-					skb, pn533_tm_send_complete, NULL);
-	} else
-		rc = pn533_send_cmd_direct_async(dev,
-					PN533_CMD_TG_SET_META_DATA,
-					skb, pn533_tm_send_complete, NULL);
-
-	if (rc == 0) /* success */
-		return;
-
-	dev_err(&dev->interface->dev,
-		"Error %d when trying to perform set meta data_exchange", rc);
-
-	dev_kfree_skb(skb);
-
-error:
-	pn533_send_ack(dev, GFP_KERNEL);
-	queue_work(dev->wq, &dev->cmd_work);
-}
-
-static void pn533_wq_tg_get_data(struct work_struct *work)
-{
-	struct pn533 *dev = container_of(work, struct pn533, tg_work);
-	struct sk_buff *skb;
-	int rc;
-
-	dev_dbg(&dev->interface->dev, "%s\n", __func__);
-
-	skb = pn533_alloc_skb(dev, 0);
-	if (!skb)
-		return;
-
-	rc = pn533_send_data_async(dev, PN533_CMD_TG_GET_DATA, skb,
-				   pn533_tm_get_data_complete, NULL);
-
-	if (rc < 0)
-		dev_kfree_skb(skb);
-
-	return;
-}
-
-#define ATR_REQ_GB_OFFSET 17
-static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp)
-{
-	u8 mode, *cmd, comm_mode = NFC_COMM_PASSIVE, *gb;
-	size_t gb_len;
-	int rc;
-
-	dev_dbg(&dev->interface->dev, "%s\n", __func__);
-
-	if (resp->len < ATR_REQ_GB_OFFSET + 1)
-		return -EINVAL;
-
-	mode = resp->data[0];
-	cmd = &resp->data[1];
-
-	dev_dbg(&dev->interface->dev, "Target mode 0x%x len %d\n",
-		mode, resp->len);
-
-	if ((mode & PN533_INIT_TARGET_RESP_FRAME_MASK) ==
-	    PN533_INIT_TARGET_RESP_ACTIVE)
-		comm_mode = NFC_COMM_ACTIVE;
-
-	if ((mode & PN533_INIT_TARGET_RESP_DEP) == 0)  /* Only DEP supported */
-		return -EOPNOTSUPP;
-
-	gb = cmd + ATR_REQ_GB_OFFSET;
-	gb_len = resp->len - (ATR_REQ_GB_OFFSET + 1);
-
-	rc = nfc_tm_activated(dev->nfc_dev, NFC_PROTO_NFC_DEP_MASK,
-			      comm_mode, gb, gb_len);
-	if (rc < 0) {
-		nfc_err(&dev->interface->dev,
-			"Error when signaling target activation\n");
-		return rc;
-	}
-
-	dev->tgt_mode = 1;
-	queue_work(dev->wq, &dev->tg_work);
-
-	return 0;
-}
-
-static void pn533_listen_mode_timer(unsigned long data)
-{
-	struct pn533 *dev = (struct pn533 *)data;
-
-	dev_dbg(&dev->interface->dev, "Listen mode timeout\n");
-
-	dev->cancel_listen = 1;
-
-	pn533_poll_next_mod(dev);
-
-	queue_delayed_work(dev->wq, &dev->poll_work,
-			   msecs_to_jiffies(PN533_POLL_INTERVAL));
-}
-
-static int pn533_rf_complete(struct pn533 *dev, void *arg,
-			     struct sk_buff *resp)
-{
-	int rc = 0;
-
-	dev_dbg(&dev->interface->dev, "%s\n", __func__);
-
-	if (IS_ERR(resp)) {
-		rc = PTR_ERR(resp);
-
-		nfc_err(&dev->interface->dev, "RF setting error %d\n", rc);
-
-		return rc;
-	}
-
-	queue_delayed_work(dev->wq, &dev->poll_work,
-			   msecs_to_jiffies(PN533_POLL_INTERVAL));
-
-	dev_kfree_skb(resp);
-	return rc;
-}
-
-static void pn533_wq_rf(struct work_struct *work)
-{
-	struct pn533 *dev = container_of(work, struct pn533, rf_work);
-	struct sk_buff *skb;
-	int rc;
-
-	dev_dbg(&dev->interface->dev, "%s\n", __func__);
-
-	skb = pn533_alloc_skb(dev, 2);
-	if (!skb)
-		return;
-
-	*skb_put(skb, 1) = PN533_CFGITEM_RF_FIELD;
-	*skb_put(skb, 1) = PN533_CFGITEM_RF_FIELD_AUTO_RFCA;
-
-	rc = pn533_send_cmd_async(dev, PN533_CMD_RF_CONFIGURATION, skb,
-				  pn533_rf_complete, NULL);
-	if (rc < 0) {
-		dev_kfree_skb(skb);
-		nfc_err(&dev->interface->dev, "RF setting error %d\n", rc);
-	}
-
-	return;
-}
-
-static int pn533_poll_dep_complete(struct pn533 *dev, void *arg,
-				   struct sk_buff *resp)
-{
-	struct pn533_cmd_jump_dep_response *rsp;
-	struct nfc_target nfc_target;
-	u8 target_gt_len;
-	int rc;
-
-	if (IS_ERR(resp))
-		return PTR_ERR(resp);
-
-	rsp = (struct pn533_cmd_jump_dep_response *)resp->data;
-
-	rc = rsp->status & PN533_CMD_RET_MASK;
-	if (rc != PN533_CMD_RET_SUCCESS) {
-		/* Not target found, turn radio off */
-		queue_work(dev->wq, &dev->rf_work);
-
-		dev_kfree_skb(resp);
-		return 0;
-	}
-
-	dev_dbg(&dev->interface->dev, "Creating new target");
-
-	nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
-	nfc_target.nfcid1_len = 10;
-	memcpy(nfc_target.nfcid1, rsp->nfcid3t, nfc_target.nfcid1_len);
-	rc = nfc_targets_found(dev->nfc_dev, &nfc_target, 1);
-	if (rc)
-		goto error;
-
-	dev->tgt_available_prots = 0;
-	dev->tgt_active_prot = NFC_PROTO_NFC_DEP;
-
-	/* ATR_RES general bytes are located at offset 17 */
-	target_gt_len = resp->len - 17;
-	rc = nfc_set_remote_general_bytes(dev->nfc_dev,
-					  rsp->gt, target_gt_len);
-	if (!rc) {
-		rc = nfc_dep_link_is_up(dev->nfc_dev,
-					dev->nfc_dev->targets[0].idx,
-					0, NFC_RF_INITIATOR);
-
-		if (!rc)
-			pn533_poll_reset_mod_list(dev);
-	}
-error:
-	dev_kfree_skb(resp);
-	return rc;
-}
-
-#define PASSIVE_DATA_LEN 5
-static int pn533_poll_dep(struct nfc_dev *nfc_dev)
-{
-	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
-	struct sk_buff *skb;
-	int rc, skb_len;
-	u8 *next, nfcid3[NFC_NFCID3_MAXSIZE];
-	u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
-
-	dev_dbg(&dev->interface->dev, "%s", __func__);
-
-	if (!dev->gb) {
-		dev->gb = nfc_get_local_general_bytes(nfc_dev, &dev->gb_len);
-
-		if (!dev->gb || !dev->gb_len) {
-			dev->poll_dep = 0;
-			queue_work(dev->wq, &dev->rf_work);
-		}
-	}
-
-	skb_len = 3 + dev->gb_len; /* ActPass + BR + Next */
-	skb_len += PASSIVE_DATA_LEN;
-
-	/* NFCID3 */
-	skb_len += NFC_NFCID3_MAXSIZE;
-	nfcid3[0] = 0x1;
-	nfcid3[1] = 0xfe;
-	get_random_bytes(nfcid3 + 2, 6);
-
-	skb = pn533_alloc_skb(dev, skb_len);
-	if (!skb)
-		return -ENOMEM;
-
-	*skb_put(skb, 1) = 0x01;  /* Active */
-	*skb_put(skb, 1) = 0x02;  /* 424 kbps */
-
-	next = skb_put(skb, 1);  /* Next */
-	*next = 0;
-
-	/* Copy passive data */
-	memcpy(skb_put(skb, PASSIVE_DATA_LEN), passive_data, PASSIVE_DATA_LEN);
-	*next |= 1;
-
-	/* Copy NFCID3 (which is NFCID2 from SENSF_RES) */
-	memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), nfcid3,
-	       NFC_NFCID3_MAXSIZE);
-	*next |= 2;
-
-	memcpy(skb_put(skb, dev->gb_len), dev->gb, dev->gb_len);
-	*next |= 4; /* We have some Gi */
-
-	rc = pn533_send_cmd_async(dev, PN533_CMD_IN_JUMP_FOR_DEP, skb,
-				  pn533_poll_dep_complete, NULL);
-
-	if (rc < 0)
-		dev_kfree_skb(skb);
-
-	return rc;
-}
-
-static int pn533_poll_complete(struct pn533 *dev, void *arg,
-			       struct sk_buff *resp)
-{
-	struct pn533_poll_modulations *cur_mod;
-	int rc;
-
-	dev_dbg(&dev->interface->dev, "%s\n", __func__);
-
-	if (IS_ERR(resp)) {
-		rc = PTR_ERR(resp);
-
-		nfc_err(&dev->interface->dev, "%s  Poll complete error %d\n",
-			__func__, rc);
-
-		if (rc == -ENOENT) {
-			if (dev->poll_mod_count != 0)
-				return rc;
-			else
-				goto stop_poll;
-		} else if (rc < 0) {
-			nfc_err(&dev->interface->dev,
-				"Error %d when running poll\n", rc);
-			goto stop_poll;
-		}
-	}
-
-	cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
-
-	if (cur_mod->len == 0) { /* Target mode */
-		del_timer(&dev->listen_timer);
-		rc = pn533_init_target_complete(dev, resp);
-		goto done;
-	}
-
-	/* Initiator mode */
-	rc = pn533_start_poll_complete(dev, resp);
-	if (!rc)
-		goto done;
-
-	if (!dev->poll_mod_count) {
-		dev_dbg(&dev->interface->dev, "Polling has been stopped\n");
-		goto done;
-	}
-
-	pn533_poll_next_mod(dev);
-	/* Not target found, turn radio off */
-	queue_work(dev->wq, &dev->rf_work);
-
-done:
-	dev_kfree_skb(resp);
-	return rc;
-
-stop_poll:
-	nfc_err(&dev->interface->dev, "Polling operation has been stopped\n");
-
-	pn533_poll_reset_mod_list(dev);
-	dev->poll_protocols = 0;
-	return rc;
-}
-
-static struct sk_buff *pn533_alloc_poll_in_frame(struct pn533 *dev,
-					struct pn533_poll_modulations *mod)
-{
-	struct sk_buff *skb;
-
-	skb = pn533_alloc_skb(dev, mod->len);
-	if (!skb)
-		return NULL;
-
-	memcpy(skb_put(skb, mod->len), &mod->data, mod->len);
-
-	return skb;
-}
-
-static int pn533_send_poll_frame(struct pn533 *dev)
-{
-	struct pn533_poll_modulations *mod;
-	struct sk_buff *skb;
-	int rc;
-	u8 cmd_code;
-
-	mod = dev->poll_mod_active[dev->poll_mod_curr];
-
-	dev_dbg(&dev->interface->dev, "%s mod len %d\n",
-		__func__, mod->len);
-
-	if (dev->poll_dep)  {
-		dev->poll_dep = 0;
-		return pn533_poll_dep(dev->nfc_dev);
-	}
-
-	if (mod->len == 0) {  /* Listen mode */
-		cmd_code = PN533_CMD_TG_INIT_AS_TARGET;
-		skb = pn533_alloc_poll_tg_frame(dev);
-	} else {  /* Polling mode */
-		cmd_code =  PN533_CMD_IN_LIST_PASSIVE_TARGET;
-		skb = pn533_alloc_poll_in_frame(dev, mod);
-	}
-
-	if (!skb) {
-		nfc_err(&dev->interface->dev, "Failed to allocate skb\n");
-		return -ENOMEM;
-	}
-
-	rc = pn533_send_cmd_async(dev, cmd_code, skb, pn533_poll_complete,
-				  NULL);
-	if (rc < 0) {
-		dev_kfree_skb(skb);
-		nfc_err(&dev->interface->dev, "Polling loop error %d\n", rc);
-	}
-
-	return rc;
-}
-
-static void pn533_wq_poll(struct work_struct *work)
-{
-	struct pn533 *dev = container_of(work, struct pn533, poll_work.work);
-	struct pn533_poll_modulations *cur_mod;
-	int rc;
-
-	cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
-
-	dev_dbg(&dev->interface->dev,
-		"%s cancel_listen %d modulation len %d\n",
-		__func__, dev->cancel_listen, cur_mod->len);
-
-	if (dev->cancel_listen == 1) {
-		dev->cancel_listen = 0;
-		pn533_abort_cmd(dev, GFP_ATOMIC);
-	}
-
-	rc = pn533_send_poll_frame(dev);
-	if (rc)
-		return;
-
-	if (cur_mod->len == 0 && dev->poll_mod_count > 1)
-		mod_timer(&dev->listen_timer, jiffies + PN533_LISTEN_TIME * HZ);
-
-	return;
-}
-
-static int pn533_start_poll(struct nfc_dev *nfc_dev,
-			    u32 im_protocols, u32 tm_protocols)
-{
-	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
-	struct pn533_poll_modulations *cur_mod;
-	u8 rand_mod;
-	int rc;
-
-	dev_dbg(&dev->interface->dev,
-		"%s: im protocols 0x%x tm protocols 0x%x\n",
-		__func__, im_protocols, tm_protocols);
-
-	if (dev->tgt_active_prot) {
-		nfc_err(&dev->interface->dev,
-			"Cannot poll with a target already activated\n");
-		return -EBUSY;
-	}
-
-	if (dev->tgt_mode) {
-		nfc_err(&dev->interface->dev,
-			"Cannot poll while already being activated\n");
-		return -EBUSY;
-	}
-
-	if (tm_protocols) {
-		dev->gb = nfc_get_local_general_bytes(nfc_dev, &dev->gb_len);
-		if (dev->gb == NULL)
-			tm_protocols = 0;
-	}
-
-	pn533_poll_create_mod_list(dev, im_protocols, tm_protocols);
-	dev->poll_protocols = im_protocols;
-	dev->listen_protocols = tm_protocols;
-
-	/* Do not always start polling from the same modulation */
-	get_random_bytes(&rand_mod, sizeof(rand_mod));
-	rand_mod %= dev->poll_mod_count;
-	dev->poll_mod_curr = rand_mod;
-
-	cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
-
-	rc = pn533_send_poll_frame(dev);
-
-	/* Start listen timer */
-	if (!rc && cur_mod->len == 0 && dev->poll_mod_count > 1)
-		mod_timer(&dev->listen_timer, jiffies + PN533_LISTEN_TIME * HZ);
-
-	return rc;
-}
-
-static void pn533_stop_poll(struct nfc_dev *nfc_dev)
-{
-	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
-
-	del_timer(&dev->listen_timer);
-
-	if (!dev->poll_mod_count) {
-		dev_dbg(&dev->interface->dev,
-			"Polling operation was not running\n");
-		return;
-	}
-
-	pn533_abort_cmd(dev, GFP_KERNEL);
-	flush_delayed_work(&dev->poll_work);
-	pn533_poll_reset_mod_list(dev);
-}
-
-static int pn533_activate_target_nfcdep(struct pn533 *dev)
-{
-	struct pn533_cmd_activate_response *rsp;
-	u16 gt_len;
-	int rc;
-	struct sk_buff *skb;
-	struct sk_buff *resp;
-
-	dev_dbg(&dev->interface->dev, "%s\n", __func__);
-
-	skb = pn533_alloc_skb(dev, sizeof(u8) * 2); /*TG + Next*/
-	if (!skb)
-		return -ENOMEM;
-
-	*skb_put(skb, sizeof(u8)) = 1; /* TG */
-	*skb_put(skb, sizeof(u8)) = 0; /* Next */
-
-	resp = pn533_send_cmd_sync(dev, PN533_CMD_IN_ATR, skb);
-	if (IS_ERR(resp))
-		return PTR_ERR(resp);
-
-	rsp = (struct pn533_cmd_activate_response *)resp->data;
-	rc = rsp->status & PN533_CMD_RET_MASK;
-	if (rc != PN533_CMD_RET_SUCCESS) {
-		nfc_err(&dev->interface->dev,
-			"Target activation failed (error 0x%x)\n", rc);
-		dev_kfree_skb(resp);
-		return -EIO;
-	}
-
-	/* ATR_RES general bytes are located at offset 16 */
-	gt_len = resp->len - 16;
-	rc = nfc_set_remote_general_bytes(dev->nfc_dev, rsp->gt, gt_len);
-
-	dev_kfree_skb(resp);
-	return rc;
-}
-
-static int pn533_activate_target(struct nfc_dev *nfc_dev,
-				 struct nfc_target *target, u32 protocol)
-{
-	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
-	int rc;
-
-	dev_dbg(&dev->interface->dev, "%s: protocol=%u\n", __func__, protocol);
-
-	if (dev->poll_mod_count) {
-		nfc_err(&dev->interface->dev,
-			"Cannot activate while polling\n");
-		return -EBUSY;
-	}
-
-	if (dev->tgt_active_prot) {
-		nfc_err(&dev->interface->dev,
-			"There is already an active target\n");
-		return -EBUSY;
-	}
-
-	if (!dev->tgt_available_prots) {
-		nfc_err(&dev->interface->dev,
-			"There is no available target to activate\n");
-		return -EINVAL;
-	}
-
-	if (!(dev->tgt_available_prots & (1 << protocol))) {
-		nfc_err(&dev->interface->dev,
-			"Target doesn't support requested proto %u\n",
-			protocol);
-		return -EINVAL;
-	}
-
-	if (protocol == NFC_PROTO_NFC_DEP) {
-		rc = pn533_activate_target_nfcdep(dev);
-		if (rc) {
-			nfc_err(&dev->interface->dev,
-				"Activating target with DEP failed %d\n", rc);
-			return rc;
-		}
-	}
-
-	dev->tgt_active_prot = protocol;
-	dev->tgt_available_prots = 0;
-
-	return 0;
-}
-
-static void pn533_deactivate_target(struct nfc_dev *nfc_dev,
-				    struct nfc_target *target, u8 mode)
-{
-	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
-	struct sk_buff *skb;
-	struct sk_buff *resp;
-	int rc;
-
-	dev_dbg(&dev->interface->dev, "%s\n", __func__);
-
-	if (!dev->tgt_active_prot) {
-		nfc_err(&dev->interface->dev, "There is no active target\n");
-		return;
-	}
-
-	dev->tgt_active_prot = 0;
-	skb_queue_purge(&dev->resp_q);
-
-	skb = pn533_alloc_skb(dev, sizeof(u8));
-	if (!skb)
-		return;
-
-	*skb_put(skb, 1) = 1; /* TG*/
-
-	resp = pn533_send_cmd_sync(dev, PN533_CMD_IN_RELEASE, skb);
-	if (IS_ERR(resp))
-		return;
-
-	rc = resp->data[0] & PN533_CMD_RET_MASK;
-	if (rc != PN533_CMD_RET_SUCCESS)
-		nfc_err(&dev->interface->dev,
-			"Error 0x%x when releasing the target\n", rc);
-
-	dev_kfree_skb(resp);
-	return;
-}
-
-
-static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
-					 struct sk_buff *resp)
-{
-	struct pn533_cmd_jump_dep_response *rsp;
-	u8 target_gt_len;
-	int rc;
-	u8 active = *(u8 *)arg;
-
-	kfree(arg);
-
-	if (IS_ERR(resp))
-		return PTR_ERR(resp);
-
-	if (dev->tgt_available_prots &&
-	    !(dev->tgt_available_prots & (1 << NFC_PROTO_NFC_DEP))) {
-		nfc_err(&dev->interface->dev,
-			"The target does not support DEP\n");
-		rc =  -EINVAL;
-		goto error;
-	}
-
-	rsp = (struct pn533_cmd_jump_dep_response *)resp->data;
-
-	rc = rsp->status & PN533_CMD_RET_MASK;
-	if (rc != PN533_CMD_RET_SUCCESS) {
-		nfc_err(&dev->interface->dev,
-			"Bringing DEP link up failed (error 0x%x)\n", rc);
-		goto error;
-	}
-
-	if (!dev->tgt_available_prots) {
-		struct nfc_target nfc_target;
-
-		dev_dbg(&dev->interface->dev, "Creating new target\n");
-
-		nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
-		nfc_target.nfcid1_len = 10;
-		memcpy(nfc_target.nfcid1, rsp->nfcid3t, nfc_target.nfcid1_len);
-		rc = nfc_targets_found(dev->nfc_dev, &nfc_target, 1);
-		if (rc)
-			goto error;
-
-		dev->tgt_available_prots = 0;
-	}
-
-	dev->tgt_active_prot = NFC_PROTO_NFC_DEP;
-
-	/* ATR_RES general bytes are located at offset 17 */
-	target_gt_len = resp->len - 17;
-	rc = nfc_set_remote_general_bytes(dev->nfc_dev,
-					  rsp->gt, target_gt_len);
-	if (rc == 0)
-		rc = nfc_dep_link_is_up(dev->nfc_dev,
-					dev->nfc_dev->targets[0].idx,
-					!active, NFC_RF_INITIATOR);
-
-error:
-	dev_kfree_skb(resp);
-	return rc;
-}
-
-static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf);
-static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
-			     u8 comm_mode, u8 *gb, size_t gb_len)
-{
-	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
-	struct sk_buff *skb;
-	int rc, skb_len;
-	u8 *next, *arg, nfcid3[NFC_NFCID3_MAXSIZE];
-	u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
-
-	dev_dbg(&dev->interface->dev, "%s\n", __func__);
-
-	if (dev->poll_mod_count) {
-		nfc_err(&dev->interface->dev,
-			"Cannot bring the DEP link up while polling\n");
-		return -EBUSY;
-	}
-
-	if (dev->tgt_active_prot) {
-		nfc_err(&dev->interface->dev,
-			"There is already an active target\n");
-		return -EBUSY;
-	}
-
-	skb_len = 3 + gb_len; /* ActPass + BR + Next */
-	skb_len += PASSIVE_DATA_LEN;
-
-	/* NFCID3 */
-	skb_len += NFC_NFCID3_MAXSIZE;
-	if (target && !target->nfcid2_len) {
-		nfcid3[0] = 0x1;
-		nfcid3[1] = 0xfe;
-		get_random_bytes(nfcid3 + 2, 6);
-	}
-
-	skb = pn533_alloc_skb(dev, skb_len);
-	if (!skb)
-		return -ENOMEM;
-
-	*skb_put(skb, 1) = !comm_mode;  /* ActPass */
-	*skb_put(skb, 1) = 0x02;  /* 424 kbps */
-
-	next = skb_put(skb, 1);  /* Next */
-	*next = 0;
-
-	/* Copy passive data */
-	memcpy(skb_put(skb, PASSIVE_DATA_LEN), passive_data, PASSIVE_DATA_LEN);
-	*next |= 1;
-
-	/* Copy NFCID3 (which is NFCID2 from SENSF_RES) */
-	if (target && target->nfcid2_len)
-		memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), target->nfcid2,
-		       target->nfcid2_len);
-	else
-		memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), nfcid3,
-		       NFC_NFCID3_MAXSIZE);
-	*next |= 2;
-
-	if (gb != NULL && gb_len > 0) {
-		memcpy(skb_put(skb, gb_len), gb, gb_len);
-		*next |= 4; /* We have some Gi */
-	} else {
-		*next = 0;
-	}
-
-	arg = kmalloc(sizeof(*arg), GFP_KERNEL);
-	if (!arg) {
-		dev_kfree_skb(skb);
-		return -ENOMEM;
-	}
-
-	*arg = !comm_mode;
-
-	pn533_rf_field(dev->nfc_dev, 0);
-
-	rc = pn533_send_cmd_async(dev, PN533_CMD_IN_JUMP_FOR_DEP, skb,
-				  pn533_in_dep_link_up_complete, arg);
-
-	if (rc < 0) {
-		dev_kfree_skb(skb);
-		kfree(arg);
-	}
-
-	return rc;
-}
-
-static int pn533_dep_link_down(struct nfc_dev *nfc_dev)
-{
-	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
-
-	dev_dbg(&dev->interface->dev, "%s\n", __func__);
-
-	pn533_poll_reset_mod_list(dev);
-
-	if (dev->tgt_mode || dev->tgt_active_prot)
-		pn533_abort_cmd(dev, GFP_KERNEL);
-
-	dev->tgt_active_prot = 0;
-	dev->tgt_mode = 0;
-
-	skb_queue_purge(&dev->resp_q);
-
-	return 0;
-}
-
-struct pn533_data_exchange_arg {
-	data_exchange_cb_t cb;
-	void *cb_context;
-};
-
-static struct sk_buff *pn533_build_response(struct pn533 *dev)
-{
-	struct sk_buff *skb, *tmp, *t;
-	unsigned int skb_len = 0, tmp_len = 0;
-
-	dev_dbg(&dev->interface->dev, "%s\n", __func__);
-
-	if (skb_queue_empty(&dev->resp_q))
-		return NULL;
-
-	if (skb_queue_len(&dev->resp_q) == 1) {
-		skb = skb_dequeue(&dev->resp_q);
-		goto out;
-	}
-
-	skb_queue_walk_safe(&dev->resp_q, tmp, t)
-		skb_len += tmp->len;
-
-	dev_dbg(&dev->interface->dev, "%s total length %d\n",
-		__func__, skb_len);
-
-	skb = alloc_skb(skb_len, GFP_KERNEL);
-	if (skb == NULL)
-		goto out;
-
-	skb_put(skb, skb_len);
-
-	skb_queue_walk_safe(&dev->resp_q, tmp, t) {
-		memcpy(skb->data + tmp_len, tmp->data, tmp->len);
-		tmp_len += tmp->len;
-	}
-
-out:
-	skb_queue_purge(&dev->resp_q);
-
-	return skb;
-}
-
-static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
-					struct sk_buff *resp)
-{
-	struct pn533_data_exchange_arg *arg = _arg;
-	struct sk_buff *skb;
-	int rc = 0;
-	u8 status, ret, mi;
-
-	dev_dbg(&dev->interface->dev, "%s\n", __func__);
-
-	if (IS_ERR(resp)) {
-		rc = PTR_ERR(resp);
-		goto _error;
-	}
-
-	status = resp->data[0];
-	ret = status & PN533_CMD_RET_MASK;
-	mi = status & PN533_CMD_MI_MASK;
-
-	skb_pull(resp, sizeof(status));
-
-	if (ret != PN533_CMD_RET_SUCCESS) {
-		nfc_err(&dev->interface->dev,
-			"Exchanging data failed (error 0x%x)\n", ret);
-		rc = -EIO;
-		goto error;
-	}
-
-	skb_queue_tail(&dev->resp_q, resp);
-
-	if (mi) {
-		dev->cmd_complete_mi_arg = arg;
-		queue_work(dev->wq, &dev->mi_rx_work);
-		return -EINPROGRESS;
-	}
-
-	/* Prepare for the next round */
-	if (skb_queue_len(&dev->fragment_skb) > 0) {
-		dev->cmd_complete_dep_arg = arg;
-		queue_work(dev->wq, &dev->mi_tx_work);
-
-		return -EINPROGRESS;
-	}
-
-	skb = pn533_build_response(dev);
-	if (!skb) {
-		rc = -ENOMEM;
-		goto error;
-	}
-
-	arg->cb(arg->cb_context, skb, 0);
-	kfree(arg);
-	return 0;
-
-error:
-	dev_kfree_skb(resp);
-_error:
-	skb_queue_purge(&dev->resp_q);
-	arg->cb(arg->cb_context, NULL, rc);
-	kfree(arg);
-	return rc;
-}
-
-/* Split the Tx skb into small chunks */
-static int pn533_fill_fragment_skbs(struct pn533 *dev, struct sk_buff *skb)
-{
-	struct sk_buff *frag;
-	int  frag_size;
-
-	do {
-		/* Remaining size */
-		if (skb->len > PN533_CMD_DATAFRAME_MAXLEN)
-			frag_size = PN533_CMD_DATAFRAME_MAXLEN;
-		else
-			frag_size = skb->len;
-
-		/* Allocate and reserve */
-		frag = pn533_alloc_skb(dev, frag_size);
-		if (!frag) {
-			skb_queue_purge(&dev->fragment_skb);
-			break;
-		}
-
-		if (!dev->tgt_mode) {
-			/* Reserve the TG/MI byte */
-			skb_reserve(frag, 1);
-
-			/* MI + TG */
-			if (frag_size  == PN533_CMD_DATAFRAME_MAXLEN)
-				*skb_push(frag, sizeof(u8)) =
-							(PN533_CMD_MI_MASK | 1);
-			else
-				*skb_push(frag, sizeof(u8)) =  1; /* TG */
-		}
-
-		memcpy(skb_put(frag, frag_size), skb->data, frag_size);
-
-		/* Reduce the size of incoming buffer */
-		skb_pull(skb, frag_size);
-
-		/* Add this to skb_queue */
-		skb_queue_tail(&dev->fragment_skb, frag);
-
-	} while (skb->len > 0);
-
-	dev_kfree_skb(skb);
-
-	return skb_queue_len(&dev->fragment_skb);
-}
-
-static int pn533_transceive(struct nfc_dev *nfc_dev,
-			    struct nfc_target *target, struct sk_buff *skb,
-			    data_exchange_cb_t cb, void *cb_context)
-{
-	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
-	struct pn533_data_exchange_arg *arg = NULL;
-	int rc;
-
-	dev_dbg(&dev->interface->dev, "%s\n", __func__);
-
-	if (!dev->tgt_active_prot) {
-		nfc_err(&dev->interface->dev,
-			"Can't exchange data if there is no active target\n");
-		rc = -EINVAL;
-		goto error;
-	}
-
-	arg = kmalloc(sizeof(*arg), GFP_KERNEL);
-	if (!arg) {
-		rc = -ENOMEM;
-		goto error;
-	}
-
-	arg->cb = cb;
-	arg->cb_context = cb_context;
-
-	switch (dev->device_type) {
-	case PN533_DEVICE_PASORI:
-		if (dev->tgt_active_prot == NFC_PROTO_FELICA) {
-			rc = pn533_send_data_async(dev, PN533_CMD_IN_COMM_THRU,
-						   skb,
-						   pn533_data_exchange_complete,
-						   arg);
-
-			break;
-		}
-	default:
-		/* jumbo frame ? */
-		if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
-			rc = pn533_fill_fragment_skbs(dev, skb);
-			if (rc <= 0)
-				goto error;
-
-			skb = skb_dequeue(&dev->fragment_skb);
-			if (!skb) {
-				rc = -EIO;
-				goto error;
-			}
-		} else {
-			*skb_push(skb, sizeof(u8)) =  1; /* TG */
-		}
-
-		rc = pn533_send_data_async(dev, PN533_CMD_IN_DATA_EXCHANGE,
-					   skb, pn533_data_exchange_complete,
-					   arg);
-
-		break;
-	}
-
-	if (rc < 0) /* rc from send_async */
-		goto error;
-
-	return 0;
-
-error:
-	kfree(arg);
-	dev_kfree_skb(skb);
-	return rc;
-}
-
-static int pn533_tm_send_complete(struct pn533 *dev, void *arg,
-				  struct sk_buff *resp)
-{
-	u8 status;
-
-	dev_dbg(&dev->interface->dev, "%s\n", __func__);
-
-	if (IS_ERR(resp))
-		return PTR_ERR(resp);
-
-	status = resp->data[0];
-
-	/* Prepare for the next round */
-	if (skb_queue_len(&dev->fragment_skb) > 0) {
-		queue_work(dev->wq, &dev->mi_tm_tx_work);
-		return -EINPROGRESS;
-	}
-	dev_kfree_skb(resp);
-
-	if (status != 0) {
-		nfc_tm_deactivated(dev->nfc_dev);
-
-		dev->tgt_mode = 0;
-
-		return 0;
-	}
-
-	queue_work(dev->wq, &dev->tg_work);
-
-	return 0;
-}
-
-static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
-{
-	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
-	int rc;
-
-	dev_dbg(&dev->interface->dev, "%s\n", __func__);
-
-	/* let's split in multiple chunks if size's too big */
-	if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
-		rc = pn533_fill_fragment_skbs(dev, skb);
-		if (rc <= 0)
-			goto error;
-
-		/* get the first skb */
-		skb = skb_dequeue(&dev->fragment_skb);
-		if (!skb) {
-			rc = -EIO;
-			goto error;
-		}
-
-		rc = pn533_send_data_async(dev, PN533_CMD_TG_SET_META_DATA, skb,
-						pn533_tm_send_complete, NULL);
-	} else {
-		/* Send th skb */
-		rc = pn533_send_data_async(dev, PN533_CMD_TG_SET_DATA, skb,
-						pn533_tm_send_complete, NULL);
-	}
-
-error:
-	if (rc < 0) {
-		dev_kfree_skb(skb);
-		skb_queue_purge(&dev->fragment_skb);
-	}
-
-	return rc;
-}
-
-static void pn533_wq_mi_recv(struct work_struct *work)
-{
-	struct pn533 *dev = container_of(work, struct pn533, mi_rx_work);
-	struct sk_buff *skb;
-	int rc;
-
-	dev_dbg(&dev->interface->dev, "%s\n", __func__);
-
-	skb = pn533_alloc_skb(dev, PN533_CMD_DATAEXCH_HEAD_LEN);
-	if (!skb)
-		goto error;
-
-	switch (dev->device_type) {
-	case PN533_DEVICE_PASORI:
-		if (dev->tgt_active_prot == NFC_PROTO_FELICA) {
-			rc = pn533_send_cmd_direct_async(dev,
-						PN533_CMD_IN_COMM_THRU,
-						skb,
-						pn533_data_exchange_complete,
-						 dev->cmd_complete_mi_arg);
-
-			break;
-		}
-	default:
-		*skb_put(skb, sizeof(u8)) =  1; /*TG*/
-
-		rc = pn533_send_cmd_direct_async(dev,
-						 PN533_CMD_IN_DATA_EXCHANGE,
-						 skb,
-						 pn533_data_exchange_complete,
-						 dev->cmd_complete_mi_arg);
-
-		break;
-	}
-
-	if (rc == 0) /* success */
-		return;
-
-	nfc_err(&dev->interface->dev,
-		"Error %d when trying to perform data_exchange\n", rc);
-
-	dev_kfree_skb(skb);
-	kfree(dev->cmd_complete_mi_arg);
-
-error:
-	pn533_send_ack(dev, GFP_KERNEL);
-	queue_work(dev->wq, &dev->cmd_work);
-}
-
-static void pn533_wq_mi_send(struct work_struct *work)
-{
-	struct pn533 *dev = container_of(work, struct pn533, mi_tx_work);
-	struct sk_buff *skb;
-	int rc;
-
-	dev_dbg(&dev->interface->dev, "%s\n", __func__);
-
-	/* Grab the first skb in the queue */
-	skb = skb_dequeue(&dev->fragment_skb);
-
-	if (skb == NULL) {	/* No more data */
-		/* Reset the queue for future use */
-		skb_queue_head_init(&dev->fragment_skb);
-		goto error;
-	}
-
-	switch (dev->device_type) {
-	case PN533_DEVICE_PASORI:
-		if (dev->tgt_active_prot != NFC_PROTO_FELICA) {
-			rc = -EIO;
-			break;
-		}
-
-		rc = pn533_send_cmd_direct_async(dev, PN533_CMD_IN_COMM_THRU,
-						 skb,
-						 pn533_data_exchange_complete,
-						 dev->cmd_complete_dep_arg);
-
-		break;
-
-	default:
-		/* Still some fragments? */
-		rc = pn533_send_cmd_direct_async(dev,PN533_CMD_IN_DATA_EXCHANGE,
-						 skb,
-						 pn533_data_exchange_complete,
-						 dev->cmd_complete_dep_arg);
-
-		break;
-	}
-
-	if (rc == 0) /* success */
-		return;
-
-	nfc_err(&dev->interface->dev,
-		"Error %d when trying to perform data_exchange\n", rc);
-
-	dev_kfree_skb(skb);
-	kfree(dev->cmd_complete_dep_arg);
-
-error:
-	pn533_send_ack(dev, GFP_KERNEL);
-	queue_work(dev->wq, &dev->cmd_work);
-}
-
-static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
-								u8 cfgdata_len)
-{
-	struct sk_buff *skb;
-	struct sk_buff *resp;
-	int skb_len;
-
-	dev_dbg(&dev->interface->dev, "%s\n", __func__);
-
-	skb_len = sizeof(cfgitem) + cfgdata_len; /* cfgitem + cfgdata */
-
-	skb = pn533_alloc_skb(dev, skb_len);
-	if (!skb)
-		return -ENOMEM;
-
-	*skb_put(skb, sizeof(cfgitem)) = cfgitem;
-	memcpy(skb_put(skb, cfgdata_len), cfgdata, cfgdata_len);
-
-	resp = pn533_send_cmd_sync(dev, PN533_CMD_RF_CONFIGURATION, skb);
-	if (IS_ERR(resp))
-		return PTR_ERR(resp);
-
-	dev_kfree_skb(resp);
-	return 0;
-}
-
-static int pn533_get_firmware_version(struct pn533 *dev,
-				      struct pn533_fw_version *fv)
-{
-	struct sk_buff *skb;
-	struct sk_buff *resp;
-
-	skb = pn533_alloc_skb(dev, 0);
-	if (!skb)
-		return -ENOMEM;
-
-	resp = pn533_send_cmd_sync(dev, PN533_CMD_GET_FIRMWARE_VERSION, skb);
-	if (IS_ERR(resp))
-		return PTR_ERR(resp);
-
-	fv->ic = resp->data[0];
-	fv->ver = resp->data[1];
-	fv->rev = resp->data[2];
-	fv->support = resp->data[3];
-
-	dev_kfree_skb(resp);
-	return 0;
-}
-
-static int pn533_pasori_fw_reset(struct pn533 *dev)
-{
-	struct sk_buff *skb;
-	struct sk_buff *resp;
-
-	dev_dbg(&dev->interface->dev, "%s\n", __func__);
-
-	skb = pn533_alloc_skb(dev, sizeof(u8));
-	if (!skb)
-		return -ENOMEM;
-
-	*skb_put(skb, sizeof(u8)) = 0x1;
-
-	resp = pn533_send_cmd_sync(dev, 0x18, skb);
-	if (IS_ERR(resp))
-		return PTR_ERR(resp);
-
-	dev_kfree_skb(resp);
-
-	return 0;
-}
-
-struct pn533_acr122_poweron_rdr_arg {
-	int rc;
-	struct completion done;
-};
-
-static void pn533_acr122_poweron_rdr_resp(struct urb *urb)
-{
-	struct pn533_acr122_poweron_rdr_arg *arg = urb->context;
-
-	dev_dbg(&urb->dev->dev, "%s\n", __func__);
-
-	print_hex_dump_debug("ACR122 RX: ", DUMP_PREFIX_NONE, 16, 1,
-		       urb->transfer_buffer, urb->transfer_buffer_length,
-		       false);
-
-	arg->rc = urb->status;
-	complete(&arg->done);
-}
-
-static int pn533_acr122_poweron_rdr(struct pn533 *dev)
-{
-	/* Power on th reader (CCID cmd) */
-	u8 cmd[10] = {PN533_ACR122_PC_TO_RDR_ICCPOWERON,
-		      0, 0, 0, 0, 0, 0, 3, 0, 0};
-	u8 buf[255];
-	int rc;
-	void *cntx;
-	struct pn533_acr122_poweron_rdr_arg arg;
-
-	dev_dbg(&dev->interface->dev, "%s\n", __func__);
-
-	init_completion(&arg.done);
-	cntx = dev->in_urb->context;  /* backup context */
-
-	dev->in_urb->transfer_buffer = buf;
-	dev->in_urb->transfer_buffer_length = 255;
-	dev->in_urb->complete = pn533_acr122_poweron_rdr_resp;
-	dev->in_urb->context = &arg;
-
-	dev->out_urb->transfer_buffer = cmd;
-	dev->out_urb->transfer_buffer_length = sizeof(cmd);
-
-	print_hex_dump_debug("ACR122 TX: ", DUMP_PREFIX_NONE, 16, 1,
-		       cmd, sizeof(cmd), false);
-
-	rc = usb_submit_urb(dev->out_urb, GFP_KERNEL);
-	if (rc) {
-		nfc_err(&dev->interface->dev,
-			"Reader power on cmd error %d\n", rc);
-		return rc;
-	}
-
-	rc =  usb_submit_urb(dev->in_urb, GFP_KERNEL);
-	if (rc) {
-		nfc_err(&dev->interface->dev,
-			"Can't submit reader poweron cmd response %d\n", rc);
-		return rc;
-	}
-
-	wait_for_completion(&arg.done);
-	dev->in_urb->context = cntx; /* restore context */
-
-	return arg.rc;
-}
-
-static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf)
-{
-	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
-	u8 rf_field = !!rf;
-	int rc;
-
-	rf_field |= PN533_CFGITEM_RF_FIELD_AUTO_RFCA;
-
-	rc = pn533_set_configuration(dev, PN533_CFGITEM_RF_FIELD,
-				     (u8 *)&rf_field, 1);
-	if (rc) {
-		nfc_err(&dev->interface->dev, "Error on setting RF field\n");
-		return rc;
-	}
-
-	return rc;
-}
-
-static int pn533_dev_up(struct nfc_dev *nfc_dev)
-{
-	return pn533_rf_field(nfc_dev, 1);
-}
-
-static int pn533_dev_down(struct nfc_dev *nfc_dev)
-{
-	return pn533_rf_field(nfc_dev, 0);
-}
-
-static struct nfc_ops pn533_nfc_ops = {
-	.dev_up = pn533_dev_up,
-	.dev_down = pn533_dev_down,
-	.dep_link_up = pn533_dep_link_up,
-	.dep_link_down = pn533_dep_link_down,
-	.start_poll = pn533_start_poll,
-	.stop_poll = pn533_stop_poll,
-	.activate_target = pn533_activate_target,
-	.deactivate_target = pn533_deactivate_target,
-	.im_transceive = pn533_transceive,
-	.tm_send = pn533_tm_send,
-};
-
-static int pn533_setup(struct pn533 *dev)
-{
-	struct pn533_config_max_retries max_retries;
-	struct pn533_config_timing timing;
-	u8 pasori_cfg[3] = {0x08, 0x01, 0x08};
-	int rc;
-
-	switch (dev->device_type) {
-	case PN533_DEVICE_STD:
-	case PN533_DEVICE_PASORI:
-	case PN533_DEVICE_ACR122U:
-		max_retries.mx_rty_atr = 0x2;
-		max_retries.mx_rty_psl = 0x1;
-		max_retries.mx_rty_passive_act =
-			PN533_CONFIG_MAX_RETRIES_NO_RETRY;
-
-		timing.rfu = PN533_CONFIG_TIMING_102;
-		timing.atr_res_timeout = PN533_CONFIG_TIMING_102;
-		timing.dep_timeout = PN533_CONFIG_TIMING_204;
-
-		break;
-
-	default:
-		nfc_err(&dev->interface->dev, "Unknown device type %d\n",
-			dev->device_type);
-		return -EINVAL;
-	}
-
-	rc = pn533_set_configuration(dev, PN533_CFGITEM_MAX_RETRIES,
-				     (u8 *)&max_retries, sizeof(max_retries));
-	if (rc) {
-		nfc_err(&dev->interface->dev,
-			"Error on setting MAX_RETRIES config\n");
-		return rc;
-	}
-
-
-	rc = pn533_set_configuration(dev, PN533_CFGITEM_TIMING,
-				     (u8 *)&timing, sizeof(timing));
-	if (rc) {
-		nfc_err(&dev->interface->dev, "Error on setting RF timings\n");
-		return rc;
-	}
-
-	switch (dev->device_type) {
-	case PN533_DEVICE_STD:
-		break;
-
-	case PN533_DEVICE_PASORI:
-		pn533_pasori_fw_reset(dev);
-
-		rc = pn533_set_configuration(dev, PN533_CFGITEM_PASORI,
-					     pasori_cfg, 3);
-		if (rc) {
-			nfc_err(&dev->interface->dev,
-				"Error while settings PASORI config\n");
-			return rc;
-		}
-
-		pn533_pasori_fw_reset(dev);
-
-		break;
-	}
-
-	return 0;
-}
-
-static int pn533_probe(struct usb_interface *interface,
-			const struct usb_device_id *id)
-{
-	struct pn533_fw_version fw_ver;
-	struct pn533 *dev;
-	struct usb_host_interface *iface_desc;
-	struct usb_endpoint_descriptor *endpoint;
-	int in_endpoint = 0;
-	int out_endpoint = 0;
-	int rc = -ENOMEM;
-	int i;
-	u32 protocols;
-
-	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
-	if (!dev)
-		return -ENOMEM;
-
-	dev->udev = usb_get_dev(interface_to_usbdev(interface));
-	dev->interface = interface;
-	mutex_init(&dev->cmd_lock);
-
-	iface_desc = interface->cur_altsetting;
-	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
-		endpoint = &iface_desc->endpoint[i].desc;
-
-		if (!in_endpoint && usb_endpoint_is_bulk_in(endpoint))
-			in_endpoint = endpoint->bEndpointAddress;
-
-		if (!out_endpoint && usb_endpoint_is_bulk_out(endpoint))
-			out_endpoint = endpoint->bEndpointAddress;
-	}
-
-	if (!in_endpoint || !out_endpoint) {
-		nfc_err(&interface->dev,
-			"Could not find bulk-in or bulk-out endpoint\n");
-		rc = -ENODEV;
-		goto error;
-	}
-
-	dev->in_urb = usb_alloc_urb(0, GFP_KERNEL);
-	dev->out_urb = usb_alloc_urb(0, GFP_KERNEL);
-
-	if (!dev->in_urb || !dev->out_urb)
-		goto error;
-
-	usb_fill_bulk_urb(dev->in_urb, dev->udev,
-			  usb_rcvbulkpipe(dev->udev, in_endpoint),
-			  NULL, 0, NULL, dev);
-	usb_fill_bulk_urb(dev->out_urb, dev->udev,
-			  usb_sndbulkpipe(dev->udev, out_endpoint),
-			  NULL, 0, pn533_send_complete, dev);
-
-	INIT_WORK(&dev->cmd_work, pn533_wq_cmd);
-	INIT_WORK(&dev->cmd_complete_work, pn533_wq_cmd_complete);
-	INIT_WORK(&dev->mi_rx_work, pn533_wq_mi_recv);
-	INIT_WORK(&dev->mi_tx_work, pn533_wq_mi_send);
-	INIT_WORK(&dev->tg_work, pn533_wq_tg_get_data);
-	INIT_WORK(&dev->mi_tm_rx_work, pn533_wq_tm_mi_recv);
-	INIT_WORK(&dev->mi_tm_tx_work, pn533_wq_tm_mi_send);
-	INIT_DELAYED_WORK(&dev->poll_work, pn533_wq_poll);
-	INIT_WORK(&dev->rf_work, pn533_wq_rf);
-	dev->wq = alloc_ordered_workqueue("pn533", 0);
-	if (dev->wq == NULL)
-		goto error;
-
-	init_timer(&dev->listen_timer);
-	dev->listen_timer.data = (unsigned long) dev;
-	dev->listen_timer.function = pn533_listen_mode_timer;
-
-	skb_queue_head_init(&dev->resp_q);
-	skb_queue_head_init(&dev->fragment_skb);
-
-	INIT_LIST_HEAD(&dev->cmd_queue);
-
-	usb_set_intfdata(interface, dev);
-
-	dev->ops = &pn533_std_frame_ops;
-
-	dev->protocol_type = PN533_PROTO_REQ_ACK_RESP;
-	dev->device_type = id->driver_info;
-	switch (dev->device_type) {
-	case PN533_DEVICE_STD:
-		protocols = PN533_ALL_PROTOCOLS;
-		break;
-
-	case PN533_DEVICE_PASORI:
-		protocols = PN533_NO_TYPE_B_PROTOCOLS;
-		break;
-
-	case PN533_DEVICE_ACR122U:
-		protocols = PN533_NO_TYPE_B_PROTOCOLS;
-		dev->ops = &pn533_acr122_frame_ops;
-		dev->protocol_type = PN533_PROTO_REQ_RESP,
-
-		rc = pn533_acr122_poweron_rdr(dev);
-		if (rc < 0) {
-			nfc_err(&dev->interface->dev,
-				"Couldn't poweron the reader (error %d)\n", rc);
-			goto destroy_wq;
-		}
-		break;
-
-	default:
-		nfc_err(&dev->interface->dev, "Unknown device type %d\n",
-			dev->device_type);
-		rc = -EINVAL;
-		goto destroy_wq;
-	}
-
-	memset(&fw_ver, 0, sizeof(fw_ver));
-	rc = pn533_get_firmware_version(dev, &fw_ver);
-	if (rc < 0)
-		goto destroy_wq;
-
-	nfc_info(&dev->interface->dev,
-		 "NXP PN5%02X firmware ver %d.%d now attached\n",
-		 fw_ver.ic, fw_ver.ver, fw_ver.rev);
-
-
-	dev->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols,
-					   dev->ops->tx_header_len +
-					   PN533_CMD_DATAEXCH_HEAD_LEN,
-					   dev->ops->tx_tail_len);
-	if (!dev->nfc_dev) {
-		rc = -ENOMEM;
-		goto destroy_wq;
-	}
-
-	nfc_set_parent_dev(dev->nfc_dev, &interface->dev);
-	nfc_set_drvdata(dev->nfc_dev, dev);
-
-	rc = nfc_register_device(dev->nfc_dev);
-	if (rc)
-		goto free_nfc_dev;
-
-	rc = pn533_setup(dev);
-	if (rc)
-		goto unregister_nfc_dev;
-
-	return 0;
-
-unregister_nfc_dev:
-	nfc_unregister_device(dev->nfc_dev);
-
-free_nfc_dev:
-	nfc_free_device(dev->nfc_dev);
-
-destroy_wq:
-	destroy_workqueue(dev->wq);
-error:
-	usb_free_urb(dev->in_urb);
-	usb_free_urb(dev->out_urb);
-	usb_put_dev(dev->udev);
-	kfree(dev);
-	return rc;
-}
-
-static void pn533_disconnect(struct usb_interface *interface)
-{
-	struct pn533 *dev;
-	struct pn533_cmd *cmd, *n;
-
-	dev = usb_get_intfdata(interface);
-	usb_set_intfdata(interface, NULL);
-
-	nfc_unregister_device(dev->nfc_dev);
-	nfc_free_device(dev->nfc_dev);
-
-	usb_kill_urb(dev->in_urb);
-	usb_kill_urb(dev->out_urb);
-
-	flush_delayed_work(&dev->poll_work);
-	destroy_workqueue(dev->wq);
-
-	skb_queue_purge(&dev->resp_q);
-
-	del_timer(&dev->listen_timer);
-
-	list_for_each_entry_safe(cmd, n, &dev->cmd_queue, queue) {
-		list_del(&cmd->queue);
-		kfree(cmd);
-	}
-
-	usb_free_urb(dev->in_urb);
-	usb_free_urb(dev->out_urb);
-	kfree(dev);
-
-	nfc_info(&interface->dev, "NXP PN533 NFC device disconnected\n");
-}
-
-static struct usb_driver pn533_driver = {
-	.name =		"pn533",
-	.probe =	pn533_probe,
-	.disconnect =	pn533_disconnect,
-	.id_table =	pn533_table,
-};
-
-module_usb_driver(pn533_driver);
-
-MODULE_AUTHOR("Lauro Ramos Venancio <lauro.venancio@openbossa.org>");
-MODULE_AUTHOR("Aloisio Almeida Jr <aloisio.almeida@openbossa.org>");
-MODULE_AUTHOR("Waldemar Rymarkiewicz <waldemar.rymarkiewicz@tieto.com>");
-MODULE_DESCRIPTION("PN533 usb driver ver " VERSION);
-MODULE_VERSION(VERSION);
-MODULE_LICENSE("GPL");
diff --git a/drivers/nfc/pn533/Kconfig b/drivers/nfc/pn533/Kconfig
new file mode 100644
index 0000000..d94122d
--- /dev/null
+++ b/drivers/nfc/pn533/Kconfig
@@ -0,0 +1,27 @@
+config NFC_PN533
+	tristate
+	help
+	  NXP PN533 core driver.
+	  This driver provides core functionality for NXP PN533 NFC devices.
+
+config NFC_PN533_USB
+	tristate "NFC PN533 device support (USB)"
+	depends on USB
+	select NFC_PN533
+	---help---
+	  This module adds support for the NXP pn533 USB interface.
+	  Select this if your platform is using the USB bus.
+
+	  If you choose to build a module, it'll be called pn533_usb.
+	  Say N if unsure.
+
+config NFC_PN533_I2C
+	tristate "NFC PN533 device support (I2C)"
+	depends on I2C
+	select NFC_PN533
+	---help---
+	  This module adds support for the NXP pn533 I2C interface.
+	  Select this if your platform is using the I2C bus.
+
+	  If you choose to build a module, it'll be called pn533_i2c.
+	  Say N if unsure.
diff --git a/drivers/nfc/pn533/Makefile b/drivers/nfc/pn533/Makefile
new file mode 100644
index 0000000..51d24c6
--- /dev/null
+++ b/drivers/nfc/pn533/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for PN533 NFC driver
+#
+pn533_usb-objs  = usb.o
+pn533_i2c-objs  = i2c.o
+
+obj-$(CONFIG_NFC_PN533)     += pn533.o
+obj-$(CONFIG_NFC_PN533_USB) += pn533_usb.o
+obj-$(CONFIG_NFC_PN533_I2C) += pn533_i2c.o
diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c
new file mode 100644
index 0000000..1dc8924
--- /dev/null
+++ b/drivers/nfc/pn533/i2c.c
@@ -0,0 +1,281 @@
+/*
+ * Driver for NXP PN533 NFC Chip - I2C transport layer
+ *
+ * Copyright (C) 2011 Instituto Nokia de Tecnologia
+ * Copyright (C) 2012-2013 Tieto Poland
+ * Copyright (C) 2016 HALE electronic
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/nfc.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <net/nfc/nfc.h>
+#include "pn533.h"
+
+#define VERSION "0.1"
+
+#define PN533_I2C_DRIVER_NAME "pn533_i2c"
+
+struct pn533_i2c_phy {
+	struct i2c_client *i2c_dev;
+	struct pn533 *priv;
+
+	bool aborted;
+
+	int hard_fault;		/*
+				 * < 0 if hardware error occurred (e.g. i2c err)
+				 * and prevents normal operation.
+				 */
+};
+
+static int pn533_i2c_send_ack(struct pn533 *dev, gfp_t flags)
+{
+	struct pn533_i2c_phy *phy = dev->phy;
+	struct i2c_client *client = phy->i2c_dev;
+	u8 ack[6] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00};
+	/* spec 6.2.1.3:  Preamble, SoPC (2), ACK Code (2), Postamble */
+	int rc;
+
+	rc = i2c_master_send(client, ack, 6);
+
+	return rc;
+}
+
+static int pn533_i2c_send_frame(struct pn533 *dev,
+				struct sk_buff *out)
+{
+	struct pn533_i2c_phy *phy = dev->phy;
+	struct i2c_client *client = phy->i2c_dev;
+	int rc;
+
+	if (phy->hard_fault != 0)
+		return phy->hard_fault;
+
+	if (phy->priv == NULL)
+		phy->priv = dev;
+
+	phy->aborted = false;
+
+	print_hex_dump_debug("PN533_i2c TX: ", DUMP_PREFIX_NONE, 16, 1,
+			     out->data, out->len, false);
+
+	rc = i2c_master_send(client, out->data, out->len);
+
+	if (rc == -EREMOTEIO) { /* Retry, chip was in power down */
+		usleep_range(6000, 10000);
+		rc = i2c_master_send(client, out->data, out->len);
+	}
+
+	if (rc >= 0) {
+		if (rc != out->len)
+			rc = -EREMOTEIO;
+		else
+			rc = 0;
+	}
+
+	return rc;
+}
+
+static void pn533_i2c_abort_cmd(struct pn533 *dev, gfp_t flags)
+{
+	struct pn533_i2c_phy *phy = dev->phy;
+
+	phy->aborted = true;
+
+	/* An ack will cancel the last issued command */
+	pn533_i2c_send_ack(dev, flags);
+
+	/* schedule cmd_complete_work to finish current command execution */
+	pn533_recv_frame(phy->priv, NULL, -ENOENT);
+}
+
+static int pn533_i2c_read(struct pn533_i2c_phy *phy, struct sk_buff **skb)
+{
+	struct i2c_client *client = phy->i2c_dev;
+	int len = PN533_EXT_FRAME_HEADER_LEN +
+		  PN533_STD_FRAME_MAX_PAYLOAD_LEN +
+		  PN533_STD_FRAME_TAIL_LEN + 1;
+	int r;
+
+	*skb = alloc_skb(len, GFP_KERNEL);
+	if (*skb == NULL)
+		return -ENOMEM;
+
+	r = i2c_master_recv(client, skb_put(*skb, len), len);
+	if (r != len) {
+		nfc_err(&client->dev, "cannot read. r=%d len=%d\n", r, len);
+		kfree_skb(*skb);
+		return -EREMOTEIO;
+	}
+
+	if (!((*skb)->data[0] & 0x01)) {
+		nfc_err(&client->dev, "READY flag not set");
+		kfree_skb(*skb);
+		return -EBUSY;
+	}
+
+	/* remove READY byte */
+	skb_pull(*skb, 1);
+	/* trim to frame size */
+	skb_trim(*skb, phy->priv->ops->rx_frame_size((*skb)->data));
+
+	return 0;
+}
+
+static irqreturn_t pn533_i2c_irq_thread_fn(int irq, void *data)
+{
+	struct pn533_i2c_phy *phy = data;
+	struct i2c_client *client;
+	struct sk_buff *skb = NULL;
+	int r;
+
+	if (!phy || irq != phy->i2c_dev->irq) {
+		WARN_ON_ONCE(1);
+		return IRQ_NONE;
+	}
+
+	client = phy->i2c_dev;
+	dev_dbg(&client->dev, "IRQ\n");
+
+	if (phy->hard_fault != 0)
+		return IRQ_HANDLED;
+
+	r = pn533_i2c_read(phy, &skb);
+	if (r == -EREMOTEIO) {
+		phy->hard_fault = r;
+
+		pn533_recv_frame(phy->priv, NULL, -EREMOTEIO);
+
+		return IRQ_HANDLED;
+	} else if ((r == -ENOMEM) || (r == -EBADMSG) || (r == -EBUSY)) {
+		return IRQ_HANDLED;
+	}
+
+	if (!phy->aborted)
+		pn533_recv_frame(phy->priv, skb, 0);
+
+	return IRQ_HANDLED;
+}
+
+static struct pn533_phy_ops i2c_phy_ops = {
+	.send_frame = pn533_i2c_send_frame,
+	.send_ack = pn533_i2c_send_ack,
+	.abort_cmd = pn533_i2c_abort_cmd,
+};
+
+
+static int pn533_i2c_probe(struct i2c_client *client,
+			       const struct i2c_device_id *id)
+{
+	struct pn533_i2c_phy *phy;
+	struct pn533 *priv;
+	int r = 0;
+
+	dev_dbg(&client->dev, "%s\n", __func__);
+	dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		nfc_err(&client->dev, "Need I2C_FUNC_I2C\n");
+		return -ENODEV;
+	}
+
+	phy = devm_kzalloc(&client->dev, sizeof(struct pn533_i2c_phy),
+			   GFP_KERNEL);
+	if (!phy)
+		return -ENOMEM;
+
+	phy->i2c_dev = client;
+	i2c_set_clientdata(client, phy);
+
+	r = request_threaded_irq(client->irq, NULL, pn533_i2c_irq_thread_fn,
+				 IRQF_TRIGGER_FALLING |
+				 IRQF_SHARED | IRQF_ONESHOT,
+				 PN533_I2C_DRIVER_NAME, phy);
+
+	if (r < 0)
+		nfc_err(&client->dev, "Unable to register IRQ handler\n");
+
+	priv = pn533_register_device(PN533_DEVICE_PN532,
+				     PN533_NO_TYPE_B_PROTOCOLS,
+				     PN533_PROTO_REQ_ACK_RESP,
+				     phy, &i2c_phy_ops, NULL,
+				     &phy->i2c_dev->dev,
+				     &client->dev);
+
+	if (IS_ERR(priv)) {
+		r = PTR_ERR(priv);
+		goto err_register;
+	}
+
+	phy->priv = priv;
+
+	return 0;
+
+err_register:
+	free_irq(client->irq, phy);
+
+	return r;
+}
+
+static int pn533_i2c_remove(struct i2c_client *client)
+{
+	struct pn533_i2c_phy *phy = i2c_get_clientdata(client);
+
+	dev_dbg(&client->dev, "%s\n", __func__);
+
+	pn533_unregister_device(phy->priv);
+
+	free_irq(client->irq, phy);
+
+	return 0;
+}
+
+static const struct of_device_id of_pn533_i2c_match[] = {
+	{ .compatible = "nxp,pn533-i2c", },
+	{ .compatible = "nxp,pn532-i2c", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, of_pn533_i2c_match);
+
+static struct i2c_device_id pn533_i2c_id_table[] = {
+	{ PN533_I2C_DRIVER_NAME, 0 },
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, pn533_i2c_id_table);
+
+static struct i2c_driver pn533_i2c_driver = {
+	.driver = {
+		   .name = PN533_I2C_DRIVER_NAME,
+		   .owner = THIS_MODULE,
+		   .of_match_table = of_match_ptr(of_pn533_i2c_match),
+		  },
+	.probe = pn533_i2c_probe,
+	.id_table = pn533_i2c_id_table,
+	.remove = pn533_i2c_remove,
+};
+
+module_i2c_driver(pn533_i2c_driver);
+
+MODULE_AUTHOR("Michael Thalmeier <michael.thalmeier@hale.at>");
+MODULE_DESCRIPTION("PN533 I2C driver ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
new file mode 100644
index 0000000..d9c5583
--- /dev/null
+++ b/drivers/nfc/pn533/pn533.c
@@ -0,0 +1,2685 @@
+/*
+ * Driver for NXP PN533 NFC Chip - core functions
+ *
+ * Copyright (C) 2011 Instituto Nokia de Tecnologia
+ * Copyright (C) 2012-2013 Tieto Poland
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/nfc.h>
+#include <linux/netdevice.h>
+#include <net/nfc/nfc.h>
+#include "pn533.h"
+
+#define VERSION "0.3"
+
+/* How much time we spend listening for initiators */
+#define PN533_LISTEN_TIME 2
+/* Delay between each poll frame (ms) */
+#define PN533_POLL_INTERVAL 10
+
+/* structs for pn533 commands */
+
+/* PN533_CMD_GET_FIRMWARE_VERSION */
+struct pn533_fw_version {
+	u8 ic;
+	u8 ver;
+	u8 rev;
+	u8 support;
+};
+
+/* PN533_CMD_RF_CONFIGURATION */
+#define PN533_CFGITEM_RF_FIELD    0x01
+#define PN533_CFGITEM_TIMING      0x02
+#define PN533_CFGITEM_MAX_RETRIES 0x05
+#define PN533_CFGITEM_PASORI      0x82
+
+#define PN533_CFGITEM_RF_FIELD_AUTO_RFCA 0x2
+#define PN533_CFGITEM_RF_FIELD_ON        0x1
+#define PN533_CFGITEM_RF_FIELD_OFF       0x0
+
+#define PN533_CONFIG_TIMING_102 0xb
+#define PN533_CONFIG_TIMING_204 0xc
+#define PN533_CONFIG_TIMING_409 0xd
+#define PN533_CONFIG_TIMING_819 0xe
+
+#define PN533_CONFIG_MAX_RETRIES_NO_RETRY 0x00
+#define PN533_CONFIG_MAX_RETRIES_ENDLESS 0xFF
+
+struct pn533_config_max_retries {
+	u8 mx_rty_atr;
+	u8 mx_rty_psl;
+	u8 mx_rty_passive_act;
+} __packed;
+
+struct pn533_config_timing {
+	u8 rfu;
+	u8 atr_res_timeout;
+	u8 dep_timeout;
+} __packed;
+
+/* PN533_CMD_IN_LIST_PASSIVE_TARGET */
+
+/* felica commands opcode */
+#define PN533_FELICA_OPC_SENSF_REQ 0
+#define PN533_FELICA_OPC_SENSF_RES 1
+/* felica SENSF_REQ parameters */
+#define PN533_FELICA_SENSF_SC_ALL 0xFFFF
+#define PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE 0
+#define PN533_FELICA_SENSF_RC_SYSTEM_CODE 1
+#define PN533_FELICA_SENSF_RC_ADVANCED_PROTOCOL 2
+
+/* type B initiator_data values */
+#define PN533_TYPE_B_AFI_ALL_FAMILIES 0
+#define PN533_TYPE_B_POLL_METHOD_TIMESLOT 0
+#define PN533_TYPE_B_POLL_METHOD_PROBABILISTIC 1
+
+union pn533_cmd_poll_initdata {
+	struct {
+		u8 afi;
+		u8 polling_method;
+	} __packed type_b;
+	struct {
+		u8 opcode;
+		__be16 sc;
+		u8 rc;
+		u8 tsn;
+	} __packed felica;
+};
+
+struct pn533_poll_modulations {
+	struct {
+		u8 maxtg;
+		u8 brty;
+		union pn533_cmd_poll_initdata initiator_data;
+	} __packed data;
+	u8 len;
+};
+
+static const struct pn533_poll_modulations poll_mod[] = {
+	[PN533_POLL_MOD_106KBPS_A] = {
+		.data = {
+			.maxtg = 1,
+			.brty = 0,
+		},
+		.len = 2,
+	},
+	[PN533_POLL_MOD_212KBPS_FELICA] = {
+		.data = {
+			.maxtg = 1,
+			.brty = 1,
+			.initiator_data.felica = {
+				.opcode = PN533_FELICA_OPC_SENSF_REQ,
+				.sc = PN533_FELICA_SENSF_SC_ALL,
+				.rc = PN533_FELICA_SENSF_RC_SYSTEM_CODE,
+				.tsn = 0x03,
+			},
+		},
+		.len = 7,
+	},
+	[PN533_POLL_MOD_424KBPS_FELICA] = {
+		.data = {
+			.maxtg = 1,
+			.brty = 2,
+			.initiator_data.felica = {
+				.opcode = PN533_FELICA_OPC_SENSF_REQ,
+				.sc = PN533_FELICA_SENSF_SC_ALL,
+				.rc = PN533_FELICA_SENSF_RC_SYSTEM_CODE,
+				.tsn = 0x03,
+			},
+		 },
+		.len = 7,
+	},
+	[PN533_POLL_MOD_106KBPS_JEWEL] = {
+		.data = {
+			.maxtg = 1,
+			.brty = 4,
+		},
+		.len = 2,
+	},
+	[PN533_POLL_MOD_847KBPS_B] = {
+		.data = {
+			.maxtg = 1,
+			.brty = 8,
+			.initiator_data.type_b = {
+				.afi = PN533_TYPE_B_AFI_ALL_FAMILIES,
+				.polling_method =
+					PN533_TYPE_B_POLL_METHOD_TIMESLOT,
+			},
+		},
+		.len = 3,
+	},
+	[PN533_LISTEN_MOD] = {
+		.len = 0,
+	},
+};
+
+/* PN533_CMD_IN_ATR */
+
+struct pn533_cmd_activate_response {
+	u8 status;
+	u8 nfcid3t[10];
+	u8 didt;
+	u8 bst;
+	u8 brt;
+	u8 to;
+	u8 ppt;
+	/* optional */
+	u8 gt[];
+} __packed;
+
+struct pn533_cmd_jump_dep_response {
+	u8 status;
+	u8 tg;
+	u8 nfcid3t[10];
+	u8 didt;
+	u8 bst;
+	u8 brt;
+	u8 to;
+	u8 ppt;
+	/* optional */
+	u8 gt[];
+} __packed;
+
+
+/* PN533_TG_INIT_AS_TARGET */
+#define PN533_INIT_TARGET_PASSIVE 0x1
+#define PN533_INIT_TARGET_DEP 0x2
+
+#define PN533_INIT_TARGET_RESP_FRAME_MASK 0x3
+#define PN533_INIT_TARGET_RESP_ACTIVE     0x1
+#define PN533_INIT_TARGET_RESP_DEP        0x4
+
+/* The rule: value(high byte) + value(low byte) + checksum = 0 */
+static inline u8 pn533_ext_checksum(u16 value)
+{
+	return ~(u8)(((value & 0xFF00) >> 8) + (u8)(value & 0xFF)) + 1;
+}
+
+/* The rule: value + checksum = 0 */
+static inline u8 pn533_std_checksum(u8 value)
+{
+	return ~value + 1;
+}
+
+/* The rule: sum(data elements) + checksum = 0 */
+static u8 pn533_std_data_checksum(u8 *data, int datalen)
+{
+	u8 sum = 0;
+	int i;
+
+	for (i = 0; i < datalen; i++)
+		sum += data[i];
+
+	return pn533_std_checksum(sum);
+}
+
+static void pn533_std_tx_frame_init(void *_frame, u8 cmd_code)
+{
+	struct pn533_std_frame *frame = _frame;
+
+	frame->preamble = 0;
+	frame->start_frame = cpu_to_be16(PN533_STD_FRAME_SOF);
+	PN533_STD_FRAME_IDENTIFIER(frame) = PN533_STD_FRAME_DIR_OUT;
+	PN533_FRAME_CMD(frame) = cmd_code;
+	frame->datalen = 2;
+}
+
+static void pn533_std_tx_frame_finish(void *_frame)
+{
+	struct pn533_std_frame *frame = _frame;
+
+	frame->datalen_checksum = pn533_std_checksum(frame->datalen);
+
+	PN533_STD_FRAME_CHECKSUM(frame) =
+		pn533_std_data_checksum(frame->data, frame->datalen);
+
+	PN533_STD_FRAME_POSTAMBLE(frame) = 0;
+}
+
+static void pn533_std_tx_update_payload_len(void *_frame, int len)
+{
+	struct pn533_std_frame *frame = _frame;
+
+	frame->datalen += len;
+}
+
+static bool pn533_std_rx_frame_is_valid(void *_frame, struct pn533 *dev)
+{
+	u8 checksum;
+	struct pn533_std_frame *stdf = _frame;
+
+	if (stdf->start_frame != cpu_to_be16(PN533_STD_FRAME_SOF))
+		return false;
+
+	if (likely(!PN533_STD_IS_EXTENDED(stdf))) {
+		/* Standard frame code */
+		dev->ops->rx_header_len = PN533_STD_FRAME_HEADER_LEN;
+
+		checksum = pn533_std_checksum(stdf->datalen);
+		if (checksum != stdf->datalen_checksum)
+			return false;
+
+		checksum = pn533_std_data_checksum(stdf->data, stdf->datalen);
+		if (checksum != PN533_STD_FRAME_CHECKSUM(stdf))
+			return false;
+	} else {
+		/* Extended */
+		struct pn533_ext_frame *eif = _frame;
+
+		dev->ops->rx_header_len = PN533_EXT_FRAME_HEADER_LEN;
+
+		checksum = pn533_ext_checksum(be16_to_cpu(eif->datalen));
+		if (checksum != eif->datalen_checksum)
+			return false;
+
+		/* check data checksum */
+		checksum = pn533_std_data_checksum(eif->data,
+						   be16_to_cpu(eif->datalen));
+		if (checksum != PN533_EXT_FRAME_CHECKSUM(eif))
+			return false;
+	}
+
+	return true;
+}
+
+bool pn533_rx_frame_is_ack(void *_frame)
+{
+	struct pn533_std_frame *frame = _frame;
+
+	if (frame->start_frame != cpu_to_be16(PN533_STD_FRAME_SOF))
+		return false;
+
+	if (frame->datalen != 0 || frame->datalen_checksum != 0xFF)
+		return false;
+
+	return true;
+}
+EXPORT_SYMBOL_GPL(pn533_rx_frame_is_ack);
+
+static inline int pn533_std_rx_frame_size(void *frame)
+{
+	struct pn533_std_frame *f = frame;
+
+	/* check for Extended Information frame */
+	if (PN533_STD_IS_EXTENDED(f)) {
+		struct pn533_ext_frame *eif = frame;
+
+		return sizeof(struct pn533_ext_frame)
+			+ be16_to_cpu(eif->datalen) + PN533_STD_FRAME_TAIL_LEN;
+	}
+
+	return sizeof(struct pn533_std_frame) + f->datalen +
+	       PN533_STD_FRAME_TAIL_LEN;
+}
+
+static u8 pn533_std_get_cmd_code(void *frame)
+{
+	struct pn533_std_frame *f = frame;
+	struct pn533_ext_frame *eif = frame;
+
+	if (PN533_STD_IS_EXTENDED(f))
+		return PN533_FRAME_CMD(eif);
+	else
+		return PN533_FRAME_CMD(f);
+}
+
+bool pn533_rx_frame_is_cmd_response(struct pn533 *dev, void *frame)
+{
+	return (dev->ops->get_cmd_code(frame) ==
+				PN533_CMD_RESPONSE(dev->cmd->code));
+}
+EXPORT_SYMBOL_GPL(pn533_rx_frame_is_cmd_response);
+
+
+static struct pn533_frame_ops pn533_std_frame_ops = {
+	.tx_frame_init = pn533_std_tx_frame_init,
+	.tx_frame_finish = pn533_std_tx_frame_finish,
+	.tx_update_payload_len = pn533_std_tx_update_payload_len,
+	.tx_header_len = PN533_STD_FRAME_HEADER_LEN,
+	.tx_tail_len = PN533_STD_FRAME_TAIL_LEN,
+
+	.rx_is_frame_valid = pn533_std_rx_frame_is_valid,
+	.rx_frame_size = pn533_std_rx_frame_size,
+	.rx_header_len = PN533_STD_FRAME_HEADER_LEN,
+	.rx_tail_len = PN533_STD_FRAME_TAIL_LEN,
+
+	.max_payload_len =  PN533_STD_FRAME_MAX_PAYLOAD_LEN,
+	.get_cmd_code = pn533_std_get_cmd_code,
+};
+
+static void pn533_build_cmd_frame(struct pn533 *dev, u8 cmd_code,
+				  struct sk_buff *skb)
+{
+	/* payload is already there, just update datalen */
+	int payload_len = skb->len;
+	struct pn533_frame_ops *ops = dev->ops;
+
+
+	skb_push(skb, ops->tx_header_len);
+	skb_put(skb, ops->tx_tail_len);
+
+	ops->tx_frame_init(skb->data, cmd_code);
+	ops->tx_update_payload_len(skb->data, payload_len);
+	ops->tx_frame_finish(skb->data);
+}
+
+static int pn533_send_async_complete(struct pn533 *dev)
+{
+	struct pn533_cmd *cmd = dev->cmd;
+	int status = cmd->status;
+
+	struct sk_buff *req = cmd->req;
+	struct sk_buff *resp = cmd->resp;
+
+	int rc;
+
+	dev_kfree_skb(req);
+
+	if (status < 0) {
+		rc = cmd->complete_cb(dev, cmd->complete_cb_context,
+				      ERR_PTR(status));
+		dev_kfree_skb(resp);
+		goto done;
+	}
+
+	skb_pull(resp, dev->ops->rx_header_len);
+	skb_trim(resp, resp->len - dev->ops->rx_tail_len);
+
+	rc = cmd->complete_cb(dev, cmd->complete_cb_context, resp);
+
+done:
+	kfree(cmd);
+	dev->cmd = NULL;
+	return rc;
+}
+
+static int __pn533_send_async(struct pn533 *dev, u8 cmd_code,
+			      struct sk_buff *req,
+			      pn533_send_async_complete_t complete_cb,
+			      void *complete_cb_context)
+{
+	struct pn533_cmd *cmd;
+	int rc = 0;
+
+	dev_dbg(dev->dev, "Sending command 0x%x\n", cmd_code);
+
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	cmd->code = cmd_code;
+	cmd->req = req;
+	cmd->complete_cb = complete_cb;
+	cmd->complete_cb_context = complete_cb_context;
+
+	pn533_build_cmd_frame(dev, cmd_code, req);
+
+	mutex_lock(&dev->cmd_lock);
+
+	if (!dev->cmd_pending) {
+		rc = dev->phy_ops->send_frame(dev, req);
+		if (rc)
+			goto error;
+
+		dev->cmd_pending = 1;
+		dev->cmd = cmd;
+		goto unlock;
+	}
+
+	dev_dbg(dev->dev, "%s Queueing command 0x%x\n",
+		__func__, cmd_code);
+
+	INIT_LIST_HEAD(&cmd->queue);
+	list_add_tail(&cmd->queue, &dev->cmd_queue);
+
+	goto unlock;
+
+error:
+	kfree(cmd);
+unlock:
+	mutex_unlock(&dev->cmd_lock);
+	return rc;
+}
+
+static int pn533_send_data_async(struct pn533 *dev, u8 cmd_code,
+				 struct sk_buff *req,
+				 pn533_send_async_complete_t complete_cb,
+				 void *complete_cb_context)
+{
+	int rc;
+
+	rc = __pn533_send_async(dev, cmd_code, req, complete_cb,
+				complete_cb_context);
+
+	return rc;
+}
+
+static int pn533_send_cmd_async(struct pn533 *dev, u8 cmd_code,
+				struct sk_buff *req,
+				pn533_send_async_complete_t complete_cb,
+				void *complete_cb_context)
+{
+	int rc;
+
+	rc = __pn533_send_async(dev, cmd_code, req, complete_cb,
+				complete_cb_context);
+
+	return rc;
+}
+
+/*
+ * pn533_send_cmd_direct_async
+ *
+ * The function sends a piority cmd directly to the chip omiting the cmd
+ * queue. It's intended to be used by chaining mechanism of received responses
+ * where the host has to request every single chunk of data before scheduling
+ * next cmd from the queue.
+ */
+static int pn533_send_cmd_direct_async(struct pn533 *dev, u8 cmd_code,
+				       struct sk_buff *req,
+				       pn533_send_async_complete_t complete_cb,
+				       void *complete_cb_context)
+{
+	struct pn533_cmd *cmd;
+	int rc;
+
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	cmd->code = cmd_code;
+	cmd->req = req;
+	cmd->complete_cb = complete_cb;
+	cmd->complete_cb_context = complete_cb_context;
+
+	pn533_build_cmd_frame(dev, cmd_code, req);
+
+	rc = dev->phy_ops->send_frame(dev, req);
+	if (rc < 0)
+		kfree(cmd);
+	else
+		dev->cmd = cmd;
+
+	return rc;
+}
+
+static void pn533_wq_cmd_complete(struct work_struct *work)
+{
+	struct pn533 *dev = container_of(work, struct pn533, cmd_complete_work);
+	int rc;
+
+	rc = pn533_send_async_complete(dev);
+	if (rc != -EINPROGRESS)
+		queue_work(dev->wq, &dev->cmd_work);
+}
+
+static void pn533_wq_cmd(struct work_struct *work)
+{
+	struct pn533 *dev = container_of(work, struct pn533, cmd_work);
+	struct pn533_cmd *cmd;
+	int rc;
+
+	mutex_lock(&dev->cmd_lock);
+
+	if (list_empty(&dev->cmd_queue)) {
+		dev->cmd_pending = 0;
+		mutex_unlock(&dev->cmd_lock);
+		return;
+	}
+
+	cmd = list_first_entry(&dev->cmd_queue, struct pn533_cmd, queue);
+
+	list_del(&cmd->queue);
+
+	mutex_unlock(&dev->cmd_lock);
+
+	rc = dev->phy_ops->send_frame(dev, cmd->req);
+	if (rc < 0) {
+		dev_kfree_skb(cmd->req);
+		kfree(cmd);
+		return;
+	}
+
+	dev->cmd = cmd;
+}
+
+struct pn533_sync_cmd_response {
+	struct sk_buff *resp;
+	struct completion done;
+};
+
+static int pn533_send_sync_complete(struct pn533 *dev, void *_arg,
+				    struct sk_buff *resp)
+{
+	struct pn533_sync_cmd_response *arg = _arg;
+
+	arg->resp = resp;
+	complete(&arg->done);
+
+	return 0;
+}
+
+/*  pn533_send_cmd_sync
+ *
+ *  Please note the req parameter is freed inside the function to
+ *  limit a number of return value interpretations by the caller.
+ *
+ *  1. negative in case of error during TX path -> req should be freed
+ *
+ *  2. negative in case of error during RX path -> req should not be freed
+ *     as it's been already freed at the beginning of RX path by
+ *     async_complete_cb.
+ *
+ *  3. valid pointer in case of succesfult RX path
+ *
+ *  A caller has to check a return value with IS_ERR macro. If the test pass,
+ *  the returned pointer is valid.
+ *
+ */
+static struct sk_buff *pn533_send_cmd_sync(struct pn533 *dev, u8 cmd_code,
+					       struct sk_buff *req)
+{
+	int rc;
+	struct pn533_sync_cmd_response arg;
+
+	init_completion(&arg.done);
+
+	rc = pn533_send_cmd_async(dev, cmd_code, req,
+				  pn533_send_sync_complete, &arg);
+	if (rc) {
+		dev_kfree_skb(req);
+		return ERR_PTR(rc);
+	}
+
+	wait_for_completion(&arg.done);
+
+	return arg.resp;
+}
+
+static struct sk_buff *pn533_alloc_skb(struct pn533 *dev, unsigned int size)
+{
+	struct sk_buff *skb;
+
+	skb = alloc_skb(dev->ops->tx_header_len +
+			size +
+			dev->ops->tx_tail_len, GFP_KERNEL);
+
+	if (skb)
+		skb_reserve(skb, dev->ops->tx_header_len);
+
+	return skb;
+}
+
+struct pn533_target_type_a {
+	__be16 sens_res;
+	u8 sel_res;
+	u8 nfcid_len;
+	u8 nfcid_data[];
+} __packed;
+
+
+#define PN533_TYPE_A_SENS_RES_NFCID1(x) ((u8)((be16_to_cpu(x) & 0x00C0) >> 6))
+#define PN533_TYPE_A_SENS_RES_SSD(x) ((u8)((be16_to_cpu(x) & 0x001F) >> 0))
+#define PN533_TYPE_A_SENS_RES_PLATCONF(x) ((u8)((be16_to_cpu(x) & 0x0F00) >> 8))
+
+#define PN533_TYPE_A_SENS_RES_SSD_JEWEL 0x00
+#define PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL 0x0C
+
+#define PN533_TYPE_A_SEL_PROT(x) (((x) & 0x60) >> 5)
+#define PN533_TYPE_A_SEL_CASCADE(x) (((x) & 0x04) >> 2)
+
+#define PN533_TYPE_A_SEL_PROT_MIFARE 0
+#define PN533_TYPE_A_SEL_PROT_ISO14443 1
+#define PN533_TYPE_A_SEL_PROT_DEP 2
+#define PN533_TYPE_A_SEL_PROT_ISO14443_DEP 3
+
+static bool pn533_target_type_a_is_valid(struct pn533_target_type_a *type_a,
+							int target_data_len)
+{
+	u8 ssd;
+	u8 platconf;
+
+	if (target_data_len < sizeof(struct pn533_target_type_a))
+		return false;
+
+	/*
+	 * The length check of nfcid[] and ats[] are not being performed because
+	 * the values are not being used
+	 */
+
+	/* Requirement 4.6.3.3 from NFC Forum Digital Spec */
+	ssd = PN533_TYPE_A_SENS_RES_SSD(type_a->sens_res);
+	platconf = PN533_TYPE_A_SENS_RES_PLATCONF(type_a->sens_res);
+
+	if ((ssd == PN533_TYPE_A_SENS_RES_SSD_JEWEL &&
+	     platconf != PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL) ||
+	    (ssd != PN533_TYPE_A_SENS_RES_SSD_JEWEL &&
+	     platconf == PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL))
+		return false;
+
+	/* Requirements 4.8.2.1, 4.8.2.3, 4.8.2.5 and 4.8.2.7 from NFC Forum */
+	if (PN533_TYPE_A_SEL_CASCADE(type_a->sel_res) != 0)
+		return false;
+
+	return true;
+}
+
+static int pn533_target_found_type_a(struct nfc_target *nfc_tgt, u8 *tgt_data,
+							int tgt_data_len)
+{
+	struct pn533_target_type_a *tgt_type_a;
+
+	tgt_type_a = (struct pn533_target_type_a *)tgt_data;
+
+	if (!pn533_target_type_a_is_valid(tgt_type_a, tgt_data_len))
+		return -EPROTO;
+
+	switch (PN533_TYPE_A_SEL_PROT(tgt_type_a->sel_res)) {
+	case PN533_TYPE_A_SEL_PROT_MIFARE:
+		nfc_tgt->supported_protocols = NFC_PROTO_MIFARE_MASK;
+		break;
+	case PN533_TYPE_A_SEL_PROT_ISO14443:
+		nfc_tgt->supported_protocols = NFC_PROTO_ISO14443_MASK;
+		break;
+	case PN533_TYPE_A_SEL_PROT_DEP:
+		nfc_tgt->supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+		break;
+	case PN533_TYPE_A_SEL_PROT_ISO14443_DEP:
+		nfc_tgt->supported_protocols = NFC_PROTO_ISO14443_MASK |
+							NFC_PROTO_NFC_DEP_MASK;
+		break;
+	}
+
+	nfc_tgt->sens_res = be16_to_cpu(tgt_type_a->sens_res);
+	nfc_tgt->sel_res = tgt_type_a->sel_res;
+	nfc_tgt->nfcid1_len = tgt_type_a->nfcid_len;
+	memcpy(nfc_tgt->nfcid1, tgt_type_a->nfcid_data, nfc_tgt->nfcid1_len);
+
+	return 0;
+}
+
+struct pn533_target_felica {
+	u8 pol_res;
+	u8 opcode;
+	u8 nfcid2[NFC_NFCID2_MAXSIZE];
+	u8 pad[8];
+	/* optional */
+	u8 syst_code[];
+} __packed;
+
+#define PN533_FELICA_SENSF_NFCID2_DEP_B1 0x01
+#define PN533_FELICA_SENSF_NFCID2_DEP_B2 0xFE
+
+static bool pn533_target_felica_is_valid(struct pn533_target_felica *felica,
+							int target_data_len)
+{
+	if (target_data_len < sizeof(struct pn533_target_felica))
+		return false;
+
+	if (felica->opcode != PN533_FELICA_OPC_SENSF_RES)
+		return false;
+
+	return true;
+}
+
+static int pn533_target_found_felica(struct nfc_target *nfc_tgt, u8 *tgt_data,
+							int tgt_data_len)
+{
+	struct pn533_target_felica *tgt_felica;
+
+	tgt_felica = (struct pn533_target_felica *)tgt_data;
+
+	if (!pn533_target_felica_is_valid(tgt_felica, tgt_data_len))
+		return -EPROTO;
+
+	if ((tgt_felica->nfcid2[0] == PN533_FELICA_SENSF_NFCID2_DEP_B1) &&
+	    (tgt_felica->nfcid2[1] == PN533_FELICA_SENSF_NFCID2_DEP_B2))
+		nfc_tgt->supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+	else
+		nfc_tgt->supported_protocols = NFC_PROTO_FELICA_MASK;
+
+	memcpy(nfc_tgt->sensf_res, &tgt_felica->opcode, 9);
+	nfc_tgt->sensf_res_len = 9;
+
+	memcpy(nfc_tgt->nfcid2, tgt_felica->nfcid2, NFC_NFCID2_MAXSIZE);
+	nfc_tgt->nfcid2_len = NFC_NFCID2_MAXSIZE;
+
+	return 0;
+}
+
+struct pn533_target_jewel {
+	__be16 sens_res;
+	u8 jewelid[4];
+} __packed;
+
+static bool pn533_target_jewel_is_valid(struct pn533_target_jewel *jewel,
+							int target_data_len)
+{
+	u8 ssd;
+	u8 platconf;
+
+	if (target_data_len < sizeof(struct pn533_target_jewel))
+		return false;
+
+	/* Requirement 4.6.3.3 from NFC Forum Digital Spec */
+	ssd = PN533_TYPE_A_SENS_RES_SSD(jewel->sens_res);
+	platconf = PN533_TYPE_A_SENS_RES_PLATCONF(jewel->sens_res);
+
+	if ((ssd == PN533_TYPE_A_SENS_RES_SSD_JEWEL &&
+	     platconf != PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL) ||
+	    (ssd != PN533_TYPE_A_SENS_RES_SSD_JEWEL &&
+	     platconf == PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL))
+		return false;
+
+	return true;
+}
+
+static int pn533_target_found_jewel(struct nfc_target *nfc_tgt, u8 *tgt_data,
+							int tgt_data_len)
+{
+	struct pn533_target_jewel *tgt_jewel;
+
+	tgt_jewel = (struct pn533_target_jewel *)tgt_data;
+
+	if (!pn533_target_jewel_is_valid(tgt_jewel, tgt_data_len))
+		return -EPROTO;
+
+	nfc_tgt->supported_protocols = NFC_PROTO_JEWEL_MASK;
+	nfc_tgt->sens_res = be16_to_cpu(tgt_jewel->sens_res);
+	nfc_tgt->nfcid1_len = 4;
+	memcpy(nfc_tgt->nfcid1, tgt_jewel->jewelid, nfc_tgt->nfcid1_len);
+
+	return 0;
+}
+
+struct pn533_type_b_prot_info {
+	u8 bitrate;
+	u8 fsci_type;
+	u8 fwi_adc_fo;
+} __packed;
+
+#define PN533_TYPE_B_PROT_FCSI(x) (((x) & 0xF0) >> 4)
+#define PN533_TYPE_B_PROT_TYPE(x) (((x) & 0x0F) >> 0)
+#define PN533_TYPE_B_PROT_TYPE_RFU_MASK 0x8
+
+struct pn533_type_b_sens_res {
+	u8 opcode;
+	u8 nfcid[4];
+	u8 appdata[4];
+	struct pn533_type_b_prot_info prot_info;
+} __packed;
+
+#define PN533_TYPE_B_OPC_SENSB_RES 0x50
+
+struct pn533_target_type_b {
+	struct pn533_type_b_sens_res sensb_res;
+	u8 attrib_res_len;
+	u8 attrib_res[];
+} __packed;
+
+static bool pn533_target_type_b_is_valid(struct pn533_target_type_b *type_b,
+							int target_data_len)
+{
+	if (target_data_len < sizeof(struct pn533_target_type_b))
+		return false;
+
+	if (type_b->sensb_res.opcode != PN533_TYPE_B_OPC_SENSB_RES)
+		return false;
+
+	if (PN533_TYPE_B_PROT_TYPE(type_b->sensb_res.prot_info.fsci_type) &
+						PN533_TYPE_B_PROT_TYPE_RFU_MASK)
+		return false;
+
+	return true;
+}
+
+static int pn533_target_found_type_b(struct nfc_target *nfc_tgt, u8 *tgt_data,
+							int tgt_data_len)
+{
+	struct pn533_target_type_b *tgt_type_b;
+
+	tgt_type_b = (struct pn533_target_type_b *)tgt_data;
+
+	if (!pn533_target_type_b_is_valid(tgt_type_b, tgt_data_len))
+		return -EPROTO;
+
+	nfc_tgt->supported_protocols = NFC_PROTO_ISO14443_B_MASK;
+
+	return 0;
+}
+
+static void pn533_poll_reset_mod_list(struct pn533 *dev);
+static int pn533_target_found(struct pn533 *dev, u8 tg, u8 *tgdata,
+			      int tgdata_len)
+{
+	struct nfc_target nfc_tgt;
+	int rc;
+
+	dev_dbg(dev->dev, "%s: modulation=%d\n",
+		__func__, dev->poll_mod_curr);
+
+	if (tg != 1)
+		return -EPROTO;
+
+	memset(&nfc_tgt, 0, sizeof(struct nfc_target));
+
+	switch (dev->poll_mod_curr) {
+	case PN533_POLL_MOD_106KBPS_A:
+		rc = pn533_target_found_type_a(&nfc_tgt, tgdata, tgdata_len);
+		break;
+	case PN533_POLL_MOD_212KBPS_FELICA:
+	case PN533_POLL_MOD_424KBPS_FELICA:
+		rc = pn533_target_found_felica(&nfc_tgt, tgdata, tgdata_len);
+		break;
+	case PN533_POLL_MOD_106KBPS_JEWEL:
+		rc = pn533_target_found_jewel(&nfc_tgt, tgdata, tgdata_len);
+		break;
+	case PN533_POLL_MOD_847KBPS_B:
+		rc = pn533_target_found_type_b(&nfc_tgt, tgdata, tgdata_len);
+		break;
+	default:
+		nfc_err(dev->dev,
+			"Unknown current poll modulation\n");
+		return -EPROTO;
+	}
+
+	if (rc)
+		return rc;
+
+	if (!(nfc_tgt.supported_protocols & dev->poll_protocols)) {
+		dev_dbg(dev->dev,
+			"The Tg found doesn't have the desired protocol\n");
+		return -EAGAIN;
+	}
+
+	dev_dbg(dev->dev,
+		"Target found - supported protocols: 0x%x\n",
+		nfc_tgt.supported_protocols);
+
+	dev->tgt_available_prots = nfc_tgt.supported_protocols;
+
+	pn533_poll_reset_mod_list(dev);
+	nfc_targets_found(dev->nfc_dev, &nfc_tgt, 1);
+
+	return 0;
+}
+
+static inline void pn533_poll_next_mod(struct pn533 *dev)
+{
+	dev->poll_mod_curr = (dev->poll_mod_curr + 1) % dev->poll_mod_count;
+}
+
+static void pn533_poll_reset_mod_list(struct pn533 *dev)
+{
+	dev->poll_mod_count = 0;
+}
+
+static void pn533_poll_add_mod(struct pn533 *dev, u8 mod_index)
+{
+	dev->poll_mod_active[dev->poll_mod_count] =
+		(struct pn533_poll_modulations *)&poll_mod[mod_index];
+	dev->poll_mod_count++;
+}
+
+static void pn533_poll_create_mod_list(struct pn533 *dev,
+				       u32 im_protocols, u32 tm_protocols)
+{
+	pn533_poll_reset_mod_list(dev);
+
+	if ((im_protocols & NFC_PROTO_MIFARE_MASK) ||
+	    (im_protocols & NFC_PROTO_ISO14443_MASK) ||
+	    (im_protocols & NFC_PROTO_NFC_DEP_MASK))
+		pn533_poll_add_mod(dev, PN533_POLL_MOD_106KBPS_A);
+
+	if (im_protocols & NFC_PROTO_FELICA_MASK ||
+	    im_protocols & NFC_PROTO_NFC_DEP_MASK) {
+		pn533_poll_add_mod(dev, PN533_POLL_MOD_212KBPS_FELICA);
+		pn533_poll_add_mod(dev, PN533_POLL_MOD_424KBPS_FELICA);
+	}
+
+	if (im_protocols & NFC_PROTO_JEWEL_MASK)
+		pn533_poll_add_mod(dev, PN533_POLL_MOD_106KBPS_JEWEL);
+
+	if (im_protocols & NFC_PROTO_ISO14443_B_MASK)
+		pn533_poll_add_mod(dev, PN533_POLL_MOD_847KBPS_B);
+
+	if (tm_protocols)
+		pn533_poll_add_mod(dev, PN533_LISTEN_MOD);
+}
+
+static int pn533_start_poll_complete(struct pn533 *dev, struct sk_buff *resp)
+{
+	u8 nbtg, tg, *tgdata;
+	int rc, tgdata_len;
+
+	/* Toggle the DEP polling */
+	if (dev->poll_protocols & NFC_PROTO_NFC_DEP_MASK)
+		dev->poll_dep = 1;
+
+	nbtg = resp->data[0];
+	tg = resp->data[1];
+	tgdata = &resp->data[2];
+	tgdata_len = resp->len - 2;  /* nbtg + tg */
+
+	if (nbtg) {
+		rc = pn533_target_found(dev, tg, tgdata, tgdata_len);
+
+		/* We must stop the poll after a valid target found */
+		if (rc == 0)
+			return 0;
+	}
+
+	return -EAGAIN;
+}
+
+static struct sk_buff *pn533_alloc_poll_tg_frame(struct pn533 *dev)
+{
+	struct sk_buff *skb;
+	u8 *felica, *nfcid3, *gb;
+
+	u8 *gbytes = dev->gb;
+	size_t gbytes_len = dev->gb_len;
+
+	u8 felica_params[18] = {0x1, 0xfe, /* DEP */
+				0x0, 0x0, 0x0, 0x0, 0x0, 0x0, /* random */
+				0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+				0xff, 0xff}; /* System code */
+
+	u8 mifare_params[6] = {0x1, 0x1, /* SENS_RES */
+			       0x0, 0x0, 0x0,
+			       0x40}; /* SEL_RES for DEP */
+
+	unsigned int skb_len = 36 + /*
+				     * mode (1), mifare (6),
+				     * felica (18), nfcid3 (10), gb_len (1)
+				     */
+			       gbytes_len +
+			       1;  /* len Tk*/
+
+	skb = pn533_alloc_skb(dev, skb_len);
+	if (!skb)
+		return NULL;
+
+	/* DEP support only */
+	*skb_put(skb, 1) = PN533_INIT_TARGET_DEP;
+
+	/* MIFARE params */
+	memcpy(skb_put(skb, 6), mifare_params, 6);
+
+	/* Felica params */
+	felica = skb_put(skb, 18);
+	memcpy(felica, felica_params, 18);
+	get_random_bytes(felica + 2, 6);
+
+	/* NFCID3 */
+	nfcid3 = skb_put(skb, 10);
+	memset(nfcid3, 0, 10);
+	memcpy(nfcid3, felica, 8);
+
+	/* General bytes */
+	*skb_put(skb, 1) = gbytes_len;
+
+	gb = skb_put(skb, gbytes_len);
+	memcpy(gb, gbytes, gbytes_len);
+
+	/* Len Tk */
+	*skb_put(skb, 1) = 0;
+
+	return skb;
+}
+
+static void pn533_wq_tm_mi_recv(struct work_struct *work);
+static struct sk_buff *pn533_build_response(struct pn533 *dev);
+
+static int pn533_tm_get_data_complete(struct pn533 *dev, void *arg,
+				      struct sk_buff *resp)
+{
+	struct sk_buff *skb;
+	u8 status, ret, mi;
+	int rc;
+
+	dev_dbg(dev->dev, "%s\n", __func__);
+
+	if (IS_ERR(resp)) {
+		skb_queue_purge(&dev->resp_q);
+		return PTR_ERR(resp);
+	}
+
+	status = resp->data[0];
+
+	ret = status & PN533_CMD_RET_MASK;
+	mi = status & PN533_CMD_MI_MASK;
+
+	skb_pull(resp, sizeof(status));
+
+	if (ret != PN533_CMD_RET_SUCCESS) {
+		rc = -EIO;
+		goto error;
+	}
+
+	skb_queue_tail(&dev->resp_q, resp);
+
+	if (mi) {
+		queue_work(dev->wq, &dev->mi_tm_rx_work);
+		return -EINPROGRESS;
+	}
+
+	skb = pn533_build_response(dev);
+	if (!skb) {
+		rc = -EIO;
+		goto error;
+	}
+
+	return nfc_tm_data_received(dev->nfc_dev, skb);
+
+error:
+	nfc_tm_deactivated(dev->nfc_dev);
+	dev->tgt_mode = 0;
+	skb_queue_purge(&dev->resp_q);
+	dev_kfree_skb(resp);
+
+	return rc;
+}
+
+static void pn533_wq_tm_mi_recv(struct work_struct *work)
+{
+	struct pn533 *dev = container_of(work, struct pn533, mi_tm_rx_work);
+	struct sk_buff *skb;
+	int rc;
+
+	dev_dbg(dev->dev, "%s\n", __func__);
+
+	skb = pn533_alloc_skb(dev, 0);
+	if (!skb)
+		return;
+
+	rc = pn533_send_cmd_direct_async(dev,
+					PN533_CMD_TG_GET_DATA,
+					skb,
+					pn533_tm_get_data_complete,
+					NULL);
+
+	if (rc < 0)
+		dev_kfree_skb(skb);
+}
+
+static int pn533_tm_send_complete(struct pn533 *dev, void *arg,
+				  struct sk_buff *resp);
+static void pn533_wq_tm_mi_send(struct work_struct *work)
+{
+	struct pn533 *dev = container_of(work, struct pn533, mi_tm_tx_work);
+	struct sk_buff *skb;
+	int rc;
+
+	dev_dbg(dev->dev, "%s\n", __func__);
+
+	/* Grab the first skb in the queue */
+	skb = skb_dequeue(&dev->fragment_skb);
+	if (skb == NULL) {	/* No more data */
+		/* Reset the queue for future use */
+		skb_queue_head_init(&dev->fragment_skb);
+		goto error;
+	}
+
+	/* last entry - remove MI bit */
+	if (skb_queue_len(&dev->fragment_skb) == 0) {
+		rc = pn533_send_cmd_direct_async(dev, PN533_CMD_TG_SET_DATA,
+					skb, pn533_tm_send_complete, NULL);
+	} else
+		rc = pn533_send_cmd_direct_async(dev,
+					PN533_CMD_TG_SET_META_DATA,
+					skb, pn533_tm_send_complete, NULL);
+
+	if (rc == 0) /* success */
+		return;
+
+	dev_err(dev->dev,
+		"Error %d when trying to perform set meta data_exchange", rc);
+
+	dev_kfree_skb(skb);
+
+error:
+	dev->phy_ops->send_ack(dev, GFP_KERNEL);
+	queue_work(dev->wq, &dev->cmd_work);
+}
+
+static void pn533_wq_tg_get_data(struct work_struct *work)
+{
+	struct pn533 *dev = container_of(work, struct pn533, tg_work);
+	struct sk_buff *skb;
+	int rc;
+
+	dev_dbg(dev->dev, "%s\n", __func__);
+
+	skb = pn533_alloc_skb(dev, 0);
+	if (!skb)
+		return;
+
+	rc = pn533_send_data_async(dev, PN533_CMD_TG_GET_DATA, skb,
+				   pn533_tm_get_data_complete, NULL);
+
+	if (rc < 0)
+		dev_kfree_skb(skb);
+}
+
+#define ATR_REQ_GB_OFFSET 17
+static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp)
+{
+	u8 mode, *cmd, comm_mode = NFC_COMM_PASSIVE, *gb;
+	size_t gb_len;
+	int rc;
+
+	dev_dbg(dev->dev, "%s\n", __func__);
+
+	if (resp->len < ATR_REQ_GB_OFFSET + 1)
+		return -EINVAL;
+
+	mode = resp->data[0];
+	cmd = &resp->data[1];
+
+	dev_dbg(dev->dev, "Target mode 0x%x len %d\n",
+		mode, resp->len);
+
+	if ((mode & PN533_INIT_TARGET_RESP_FRAME_MASK) ==
+	    PN533_INIT_TARGET_RESP_ACTIVE)
+		comm_mode = NFC_COMM_ACTIVE;
+
+	if ((mode & PN533_INIT_TARGET_RESP_DEP) == 0)  /* Only DEP supported */
+		return -EOPNOTSUPP;
+
+	gb = cmd + ATR_REQ_GB_OFFSET;
+	gb_len = resp->len - (ATR_REQ_GB_OFFSET + 1);
+
+	rc = nfc_tm_activated(dev->nfc_dev, NFC_PROTO_NFC_DEP_MASK,
+			      comm_mode, gb, gb_len);
+	if (rc < 0) {
+		nfc_err(dev->dev,
+			"Error when signaling target activation\n");
+		return rc;
+	}
+
+	dev->tgt_mode = 1;
+	queue_work(dev->wq, &dev->tg_work);
+
+	return 0;
+}
+
+static void pn533_listen_mode_timer(unsigned long data)
+{
+	struct pn533 *dev = (struct pn533 *)data;
+
+	dev_dbg(dev->dev, "Listen mode timeout\n");
+
+	dev->cancel_listen = 1;
+
+	pn533_poll_next_mod(dev);
+
+	queue_delayed_work(dev->wq, &dev->poll_work,
+			   msecs_to_jiffies(PN533_POLL_INTERVAL));
+}
+
+static int pn533_rf_complete(struct pn533 *dev, void *arg,
+			     struct sk_buff *resp)
+{
+	int rc = 0;
+
+	dev_dbg(dev->dev, "%s\n", __func__);
+
+	if (IS_ERR(resp)) {
+		rc = PTR_ERR(resp);
+
+		nfc_err(dev->dev, "RF setting error %d\n", rc);
+
+		return rc;
+	}
+
+	queue_delayed_work(dev->wq, &dev->poll_work,
+			   msecs_to_jiffies(PN533_POLL_INTERVAL));
+
+	dev_kfree_skb(resp);
+	return rc;
+}
+
+static void pn533_wq_rf(struct work_struct *work)
+{
+	struct pn533 *dev = container_of(work, struct pn533, rf_work);
+	struct sk_buff *skb;
+	int rc;
+
+	dev_dbg(dev->dev, "%s\n", __func__);
+
+	skb = pn533_alloc_skb(dev, 2);
+	if (!skb)
+		return;
+
+	*skb_put(skb, 1) = PN533_CFGITEM_RF_FIELD;
+	*skb_put(skb, 1) = PN533_CFGITEM_RF_FIELD_AUTO_RFCA;
+
+	rc = pn533_send_cmd_async(dev, PN533_CMD_RF_CONFIGURATION, skb,
+				  pn533_rf_complete, NULL);
+	if (rc < 0) {
+		dev_kfree_skb(skb);
+		nfc_err(dev->dev, "RF setting error %d\n", rc);
+	}
+}
+
+static int pn533_poll_dep_complete(struct pn533 *dev, void *arg,
+				   struct sk_buff *resp)
+{
+	struct pn533_cmd_jump_dep_response *rsp;
+	struct nfc_target nfc_target;
+	u8 target_gt_len;
+	int rc;
+
+	if (IS_ERR(resp))
+		return PTR_ERR(resp);
+
+	rsp = (struct pn533_cmd_jump_dep_response *)resp->data;
+
+	rc = rsp->status & PN533_CMD_RET_MASK;
+	if (rc != PN533_CMD_RET_SUCCESS) {
+		/* Not target found, turn radio off */
+		queue_work(dev->wq, &dev->rf_work);
+
+		dev_kfree_skb(resp);
+		return 0;
+	}
+
+	dev_dbg(dev->dev, "Creating new target");
+
+	nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+	nfc_target.nfcid1_len = 10;
+	memcpy(nfc_target.nfcid1, rsp->nfcid3t, nfc_target.nfcid1_len);
+	rc = nfc_targets_found(dev->nfc_dev, &nfc_target, 1);
+	if (rc)
+		goto error;
+
+	dev->tgt_available_prots = 0;
+	dev->tgt_active_prot = NFC_PROTO_NFC_DEP;
+
+	/* ATR_RES general bytes are located at offset 17 */
+	target_gt_len = resp->len - 17;
+	rc = nfc_set_remote_general_bytes(dev->nfc_dev,
+					  rsp->gt, target_gt_len);
+	if (!rc) {
+		rc = nfc_dep_link_is_up(dev->nfc_dev,
+					dev->nfc_dev->targets[0].idx,
+					0, NFC_RF_INITIATOR);
+
+		if (!rc)
+			pn533_poll_reset_mod_list(dev);
+	}
+error:
+	dev_kfree_skb(resp);
+	return rc;
+}
+
+#define PASSIVE_DATA_LEN 5
+static int pn533_poll_dep(struct nfc_dev *nfc_dev)
+{
+	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+	struct sk_buff *skb;
+	int rc, skb_len;
+	u8 *next, nfcid3[NFC_NFCID3_MAXSIZE];
+	u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
+
+	dev_dbg(dev->dev, "%s", __func__);
+
+	if (!dev->gb) {
+		dev->gb = nfc_get_local_general_bytes(nfc_dev, &dev->gb_len);
+
+		if (!dev->gb || !dev->gb_len) {
+			dev->poll_dep = 0;
+			queue_work(dev->wq, &dev->rf_work);
+		}
+	}
+
+	skb_len = 3 + dev->gb_len; /* ActPass + BR + Next */
+	skb_len += PASSIVE_DATA_LEN;
+
+	/* NFCID3 */
+	skb_len += NFC_NFCID3_MAXSIZE;
+	nfcid3[0] = 0x1;
+	nfcid3[1] = 0xfe;
+	get_random_bytes(nfcid3 + 2, 6);
+
+	skb = pn533_alloc_skb(dev, skb_len);
+	if (!skb)
+		return -ENOMEM;
+
+	*skb_put(skb, 1) = 0x01;  /* Active */
+	*skb_put(skb, 1) = 0x02;  /* 424 kbps */
+
+	next = skb_put(skb, 1);  /* Next */
+	*next = 0;
+
+	/* Copy passive data */
+	memcpy(skb_put(skb, PASSIVE_DATA_LEN), passive_data, PASSIVE_DATA_LEN);
+	*next |= 1;
+
+	/* Copy NFCID3 (which is NFCID2 from SENSF_RES) */
+	memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), nfcid3,
+	       NFC_NFCID3_MAXSIZE);
+	*next |= 2;
+
+	memcpy(skb_put(skb, dev->gb_len), dev->gb, dev->gb_len);
+	*next |= 4; /* We have some Gi */
+
+	rc = pn533_send_cmd_async(dev, PN533_CMD_IN_JUMP_FOR_DEP, skb,
+				  pn533_poll_dep_complete, NULL);
+
+	if (rc < 0)
+		dev_kfree_skb(skb);
+
+	return rc;
+}
+
+static int pn533_poll_complete(struct pn533 *dev, void *arg,
+			       struct sk_buff *resp)
+{
+	struct pn533_poll_modulations *cur_mod;
+	int rc;
+
+	dev_dbg(dev->dev, "%s\n", __func__);
+
+	if (IS_ERR(resp)) {
+		rc = PTR_ERR(resp);
+
+		nfc_err(dev->dev, "%s  Poll complete error %d\n",
+			__func__, rc);
+
+		if (rc == -ENOENT) {
+			if (dev->poll_mod_count != 0)
+				return rc;
+			goto stop_poll;
+		} else if (rc < 0) {
+			nfc_err(dev->dev,
+				"Error %d when running poll\n", rc);
+			goto stop_poll;
+		}
+	}
+
+	cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
+
+	if (cur_mod->len == 0) { /* Target mode */
+		del_timer(&dev->listen_timer);
+		rc = pn533_init_target_complete(dev, resp);
+		goto done;
+	}
+
+	/* Initiator mode */
+	rc = pn533_start_poll_complete(dev, resp);
+	if (!rc)
+		goto done;
+
+	if (!dev->poll_mod_count) {
+		dev_dbg(dev->dev, "Polling has been stopped\n");
+		goto done;
+	}
+
+	pn533_poll_next_mod(dev);
+	/* Not target found, turn radio off */
+	queue_work(dev->wq, &dev->rf_work);
+
+done:
+	dev_kfree_skb(resp);
+	return rc;
+
+stop_poll:
+	nfc_err(dev->dev, "Polling operation has been stopped\n");
+
+	pn533_poll_reset_mod_list(dev);
+	dev->poll_protocols = 0;
+	return rc;
+}
+
+static struct sk_buff *pn533_alloc_poll_in_frame(struct pn533 *dev,
+					struct pn533_poll_modulations *mod)
+{
+	struct sk_buff *skb;
+
+	skb = pn533_alloc_skb(dev, mod->len);
+	if (!skb)
+		return NULL;
+
+	memcpy(skb_put(skb, mod->len), &mod->data, mod->len);
+
+	return skb;
+}
+
+static int pn533_send_poll_frame(struct pn533 *dev)
+{
+	struct pn533_poll_modulations *mod;
+	struct sk_buff *skb;
+	int rc;
+	u8 cmd_code;
+
+	mod = dev->poll_mod_active[dev->poll_mod_curr];
+
+	dev_dbg(dev->dev, "%s mod len %d\n",
+		__func__, mod->len);
+
+	if ((dev->poll_protocols & NFC_PROTO_NFC_DEP_MASK) && dev->poll_dep)  {
+		dev->poll_dep = 0;
+		return pn533_poll_dep(dev->nfc_dev);
+	}
+
+	if (mod->len == 0) {  /* Listen mode */
+		cmd_code = PN533_CMD_TG_INIT_AS_TARGET;
+		skb = pn533_alloc_poll_tg_frame(dev);
+	} else {  /* Polling mode */
+		cmd_code =  PN533_CMD_IN_LIST_PASSIVE_TARGET;
+		skb = pn533_alloc_poll_in_frame(dev, mod);
+	}
+
+	if (!skb) {
+		nfc_err(dev->dev, "Failed to allocate skb\n");
+		return -ENOMEM;
+	}
+
+	rc = pn533_send_cmd_async(dev, cmd_code, skb, pn533_poll_complete,
+				  NULL);
+	if (rc < 0) {
+		dev_kfree_skb(skb);
+		nfc_err(dev->dev, "Polling loop error %d\n", rc);
+	}
+
+	return rc;
+}
+
+static void pn533_wq_poll(struct work_struct *work)
+{
+	struct pn533 *dev = container_of(work, struct pn533, poll_work.work);
+	struct pn533_poll_modulations *cur_mod;
+	int rc;
+
+	cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
+
+	dev_dbg(dev->dev,
+		"%s cancel_listen %d modulation len %d\n",
+		__func__, dev->cancel_listen, cur_mod->len);
+
+	if (dev->cancel_listen == 1) {
+		dev->cancel_listen = 0;
+		dev->phy_ops->abort_cmd(dev, GFP_ATOMIC);
+	}
+
+	rc = pn533_send_poll_frame(dev);
+	if (rc)
+		return;
+
+	if (cur_mod->len == 0 && dev->poll_mod_count > 1)
+		mod_timer(&dev->listen_timer, jiffies + PN533_LISTEN_TIME * HZ);
+}
+
+static int pn533_start_poll(struct nfc_dev *nfc_dev,
+			    u32 im_protocols, u32 tm_protocols)
+{
+	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+	struct pn533_poll_modulations *cur_mod;
+	u8 rand_mod;
+	int rc;
+
+	dev_dbg(dev->dev,
+		"%s: im protocols 0x%x tm protocols 0x%x\n",
+		__func__, im_protocols, tm_protocols);
+
+	if (dev->tgt_active_prot) {
+		nfc_err(dev->dev,
+			"Cannot poll with a target already activated\n");
+		return -EBUSY;
+	}
+
+	if (dev->tgt_mode) {
+		nfc_err(dev->dev,
+			"Cannot poll while already being activated\n");
+		return -EBUSY;
+	}
+
+	if (tm_protocols) {
+		dev->gb = nfc_get_local_general_bytes(nfc_dev, &dev->gb_len);
+		if (dev->gb == NULL)
+			tm_protocols = 0;
+	}
+
+	pn533_poll_create_mod_list(dev, im_protocols, tm_protocols);
+	dev->poll_protocols = im_protocols;
+	dev->listen_protocols = tm_protocols;
+
+	/* Do not always start polling from the same modulation */
+	get_random_bytes(&rand_mod, sizeof(rand_mod));
+	rand_mod %= dev->poll_mod_count;
+	dev->poll_mod_curr = rand_mod;
+
+	cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
+
+	rc = pn533_send_poll_frame(dev);
+
+	/* Start listen timer */
+	if (!rc && cur_mod->len == 0 && dev->poll_mod_count > 1)
+		mod_timer(&dev->listen_timer, jiffies + PN533_LISTEN_TIME * HZ);
+
+	return rc;
+}
+
+static void pn533_stop_poll(struct nfc_dev *nfc_dev)
+{
+	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+
+	del_timer(&dev->listen_timer);
+
+	if (!dev->poll_mod_count) {
+		dev_dbg(dev->dev,
+			"Polling operation was not running\n");
+		return;
+	}
+
+	dev->phy_ops->abort_cmd(dev, GFP_KERNEL);
+	flush_delayed_work(&dev->poll_work);
+	pn533_poll_reset_mod_list(dev);
+}
+
+static int pn533_activate_target_nfcdep(struct pn533 *dev)
+{
+	struct pn533_cmd_activate_response *rsp;
+	u16 gt_len;
+	int rc;
+	struct sk_buff *skb;
+	struct sk_buff *resp;
+
+	dev_dbg(dev->dev, "%s\n", __func__);
+
+	skb = pn533_alloc_skb(dev, sizeof(u8) * 2); /*TG + Next*/
+	if (!skb)
+		return -ENOMEM;
+
+	*skb_put(skb, sizeof(u8)) = 1; /* TG */
+	*skb_put(skb, sizeof(u8)) = 0; /* Next */
+
+	resp = pn533_send_cmd_sync(dev, PN533_CMD_IN_ATR, skb);
+	if (IS_ERR(resp))
+		return PTR_ERR(resp);
+
+	rsp = (struct pn533_cmd_activate_response *)resp->data;
+	rc = rsp->status & PN533_CMD_RET_MASK;
+	if (rc != PN533_CMD_RET_SUCCESS) {
+		nfc_err(dev->dev,
+			"Target activation failed (error 0x%x)\n", rc);
+		dev_kfree_skb(resp);
+		return -EIO;
+	}
+
+	/* ATR_RES general bytes are located at offset 16 */
+	gt_len = resp->len - 16;
+	rc = nfc_set_remote_general_bytes(dev->nfc_dev, rsp->gt, gt_len);
+
+	dev_kfree_skb(resp);
+	return rc;
+}
+
+static int pn533_activate_target(struct nfc_dev *nfc_dev,
+				 struct nfc_target *target, u32 protocol)
+{
+	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+	int rc;
+
+	dev_dbg(dev->dev, "%s: protocol=%u\n", __func__, protocol);
+
+	if (dev->poll_mod_count) {
+		nfc_err(dev->dev,
+			"Cannot activate while polling\n");
+		return -EBUSY;
+	}
+
+	if (dev->tgt_active_prot) {
+		nfc_err(dev->dev,
+			"There is already an active target\n");
+		return -EBUSY;
+	}
+
+	if (!dev->tgt_available_prots) {
+		nfc_err(dev->dev,
+			"There is no available target to activate\n");
+		return -EINVAL;
+	}
+
+	if (!(dev->tgt_available_prots & (1 << protocol))) {
+		nfc_err(dev->dev,
+			"Target doesn't support requested proto %u\n",
+			protocol);
+		return -EINVAL;
+	}
+
+	if (protocol == NFC_PROTO_NFC_DEP) {
+		rc = pn533_activate_target_nfcdep(dev);
+		if (rc) {
+			nfc_err(dev->dev,
+				"Activating target with DEP failed %d\n", rc);
+			return rc;
+		}
+	}
+
+	dev->tgt_active_prot = protocol;
+	dev->tgt_available_prots = 0;
+
+	return 0;
+}
+
+static int pn533_deactivate_target_complete(struct pn533 *dev, void *arg,
+			     struct sk_buff *resp)
+{
+	int rc = 0;
+
+	dev_dbg(dev->dev, "%s\n", __func__);
+
+	if (IS_ERR(resp)) {
+		rc = PTR_ERR(resp);
+
+		nfc_err(dev->dev, "Target release error %d\n", rc);
+
+		return rc;
+	}
+
+	rc = resp->data[0] & PN533_CMD_RET_MASK;
+	if (rc != PN533_CMD_RET_SUCCESS)
+		nfc_err(dev->dev,
+			"Error 0x%x when releasing the target\n", rc);
+
+	dev_kfree_skb(resp);
+	return rc;
+}
+
+static void pn533_deactivate_target(struct nfc_dev *nfc_dev,
+				    struct nfc_target *target, u8 mode)
+{
+	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+	struct sk_buff *skb;
+	int rc;
+
+	dev_dbg(dev->dev, "%s\n", __func__);
+
+	if (!dev->tgt_active_prot) {
+		nfc_err(dev->dev, "There is no active target\n");
+		return;
+	}
+
+	dev->tgt_active_prot = 0;
+	skb_queue_purge(&dev->resp_q);
+
+	skb = pn533_alloc_skb(dev, sizeof(u8));
+	if (!skb)
+		return;
+
+	*skb_put(skb, 1) = 1; /* TG*/
+
+	rc = pn533_send_cmd_async(dev, PN533_CMD_IN_RELEASE, skb,
+				  pn533_deactivate_target_complete, NULL);
+	if (rc < 0) {
+		dev_kfree_skb(skb);
+		nfc_err(dev->dev, "Target release error %d\n", rc);
+	}
+}
+
+
+static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
+					 struct sk_buff *resp)
+{
+	struct pn533_cmd_jump_dep_response *rsp;
+	u8 target_gt_len;
+	int rc;
+	u8 active = *(u8 *)arg;
+
+	kfree(arg);
+
+	if (IS_ERR(resp))
+		return PTR_ERR(resp);
+
+	if (dev->tgt_available_prots &&
+	    !(dev->tgt_available_prots & (1 << NFC_PROTO_NFC_DEP))) {
+		nfc_err(dev->dev,
+			"The target does not support DEP\n");
+		rc =  -EINVAL;
+		goto error;
+	}
+
+	rsp = (struct pn533_cmd_jump_dep_response *)resp->data;
+
+	rc = rsp->status & PN533_CMD_RET_MASK;
+	if (rc != PN533_CMD_RET_SUCCESS) {
+		nfc_err(dev->dev,
+			"Bringing DEP link up failed (error 0x%x)\n", rc);
+		goto error;
+	}
+
+	if (!dev->tgt_available_prots) {
+		struct nfc_target nfc_target;
+
+		dev_dbg(dev->dev, "Creating new target\n");
+
+		nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+		nfc_target.nfcid1_len = 10;
+		memcpy(nfc_target.nfcid1, rsp->nfcid3t, nfc_target.nfcid1_len);
+		rc = nfc_targets_found(dev->nfc_dev, &nfc_target, 1);
+		if (rc)
+			goto error;
+
+		dev->tgt_available_prots = 0;
+	}
+
+	dev->tgt_active_prot = NFC_PROTO_NFC_DEP;
+
+	/* ATR_RES general bytes are located at offset 17 */
+	target_gt_len = resp->len - 17;
+	rc = nfc_set_remote_general_bytes(dev->nfc_dev,
+					  rsp->gt, target_gt_len);
+	if (rc == 0)
+		rc = nfc_dep_link_is_up(dev->nfc_dev,
+					dev->nfc_dev->targets[0].idx,
+					!active, NFC_RF_INITIATOR);
+
+error:
+	dev_kfree_skb(resp);
+	return rc;
+}
+
+static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf);
+static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
+			     u8 comm_mode, u8 *gb, size_t gb_len)
+{
+	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+	struct sk_buff *skb;
+	int rc, skb_len;
+	u8 *next, *arg, nfcid3[NFC_NFCID3_MAXSIZE];
+	u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
+
+	dev_dbg(dev->dev, "%s\n", __func__);
+
+	if (dev->poll_mod_count) {
+		nfc_err(dev->dev,
+			"Cannot bring the DEP link up while polling\n");
+		return -EBUSY;
+	}
+
+	if (dev->tgt_active_prot) {
+		nfc_err(dev->dev,
+			"There is already an active target\n");
+		return -EBUSY;
+	}
+
+	skb_len = 3 + gb_len; /* ActPass + BR + Next */
+	skb_len += PASSIVE_DATA_LEN;
+
+	/* NFCID3 */
+	skb_len += NFC_NFCID3_MAXSIZE;
+	if (target && !target->nfcid2_len) {
+		nfcid3[0] = 0x1;
+		nfcid3[1] = 0xfe;
+		get_random_bytes(nfcid3 + 2, 6);
+	}
+
+	skb = pn533_alloc_skb(dev, skb_len);
+	if (!skb)
+		return -ENOMEM;
+
+	*skb_put(skb, 1) = !comm_mode;  /* ActPass */
+	*skb_put(skb, 1) = 0x02;  /* 424 kbps */
+
+	next = skb_put(skb, 1);  /* Next */
+	*next = 0;
+
+	/* Copy passive data */
+	memcpy(skb_put(skb, PASSIVE_DATA_LEN), passive_data, PASSIVE_DATA_LEN);
+	*next |= 1;
+
+	/* Copy NFCID3 (which is NFCID2 from SENSF_RES) */
+	if (target && target->nfcid2_len)
+		memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), target->nfcid2,
+		       target->nfcid2_len);
+	else
+		memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), nfcid3,
+		       NFC_NFCID3_MAXSIZE);
+	*next |= 2;
+
+	if (gb != NULL && gb_len > 0) {
+		memcpy(skb_put(skb, gb_len), gb, gb_len);
+		*next |= 4; /* We have some Gi */
+	} else {
+		*next = 0;
+	}
+
+	arg = kmalloc(sizeof(*arg), GFP_KERNEL);
+	if (!arg) {
+		dev_kfree_skb(skb);
+		return -ENOMEM;
+	}
+
+	*arg = !comm_mode;
+
+	pn533_rf_field(dev->nfc_dev, 0);
+
+	rc = pn533_send_cmd_async(dev, PN533_CMD_IN_JUMP_FOR_DEP, skb,
+				  pn533_in_dep_link_up_complete, arg);
+
+	if (rc < 0) {
+		dev_kfree_skb(skb);
+		kfree(arg);
+	}
+
+	return rc;
+}
+
+static int pn533_dep_link_down(struct nfc_dev *nfc_dev)
+{
+	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+
+	dev_dbg(dev->dev, "%s\n", __func__);
+
+	pn533_poll_reset_mod_list(dev);
+
+	if (dev->tgt_mode || dev->tgt_active_prot)
+		dev->phy_ops->abort_cmd(dev, GFP_KERNEL);
+
+	dev->tgt_active_prot = 0;
+	dev->tgt_mode = 0;
+
+	skb_queue_purge(&dev->resp_q);
+
+	return 0;
+}
+
+struct pn533_data_exchange_arg {
+	data_exchange_cb_t cb;
+	void *cb_context;
+};
+
+static struct sk_buff *pn533_build_response(struct pn533 *dev)
+{
+	struct sk_buff *skb, *tmp, *t;
+	unsigned int skb_len = 0, tmp_len = 0;
+
+	dev_dbg(dev->dev, "%s\n", __func__);
+
+	if (skb_queue_empty(&dev->resp_q))
+		return NULL;
+
+	if (skb_queue_len(&dev->resp_q) == 1) {
+		skb = skb_dequeue(&dev->resp_q);
+		goto out;
+	}
+
+	skb_queue_walk_safe(&dev->resp_q, tmp, t)
+		skb_len += tmp->len;
+
+	dev_dbg(dev->dev, "%s total length %d\n",
+		__func__, skb_len);
+
+	skb = alloc_skb(skb_len, GFP_KERNEL);
+	if (skb == NULL)
+		goto out;
+
+	skb_put(skb, skb_len);
+
+	skb_queue_walk_safe(&dev->resp_q, tmp, t) {
+		memcpy(skb->data + tmp_len, tmp->data, tmp->len);
+		tmp_len += tmp->len;
+	}
+
+out:
+	skb_queue_purge(&dev->resp_q);
+
+	return skb;
+}
+
+static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
+					struct sk_buff *resp)
+{
+	struct pn533_data_exchange_arg *arg = _arg;
+	struct sk_buff *skb;
+	int rc = 0;
+	u8 status, ret, mi;
+
+	dev_dbg(dev->dev, "%s\n", __func__);
+
+	if (IS_ERR(resp)) {
+		rc = PTR_ERR(resp);
+		goto _error;
+	}
+
+	status = resp->data[0];
+	ret = status & PN533_CMD_RET_MASK;
+	mi = status & PN533_CMD_MI_MASK;
+
+	skb_pull(resp, sizeof(status));
+
+	if (ret != PN533_CMD_RET_SUCCESS) {
+		nfc_err(dev->dev,
+			"Exchanging data failed (error 0x%x)\n", ret);
+		rc = -EIO;
+		goto error;
+	}
+
+	skb_queue_tail(&dev->resp_q, resp);
+
+	if (mi) {
+		dev->cmd_complete_mi_arg = arg;
+		queue_work(dev->wq, &dev->mi_rx_work);
+		return -EINPROGRESS;
+	}
+
+	/* Prepare for the next round */
+	if (skb_queue_len(&dev->fragment_skb) > 0) {
+		dev->cmd_complete_dep_arg = arg;
+		queue_work(dev->wq, &dev->mi_tx_work);
+
+		return -EINPROGRESS;
+	}
+
+	skb = pn533_build_response(dev);
+	if (!skb) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	arg->cb(arg->cb_context, skb, 0);
+	kfree(arg);
+	return 0;
+
+error:
+	dev_kfree_skb(resp);
+_error:
+	skb_queue_purge(&dev->resp_q);
+	arg->cb(arg->cb_context, NULL, rc);
+	kfree(arg);
+	return rc;
+}
+
+/*
+ * Receive an incoming pn533 frame. skb contains only header and payload.
+ * If skb == NULL, it is a notification that the link below is dead.
+ */
+void pn533_recv_frame(struct pn533 *dev, struct sk_buff *skb, int status)
+{
+	if (!dev->cmd)
+		goto sched_wq;
+
+	dev->cmd->status = status;
+
+	if (status != 0) {
+		dev_dbg(dev->dev, "%s: Error received: %d\n", __func__, status);
+		goto sched_wq;
+	}
+
+	if (skb == NULL) {
+		pr_err("NULL Frame -> link is dead\n");
+		goto sched_wq;
+	}
+
+	if (pn533_rx_frame_is_ack(skb->data)) {
+		dev_dbg(dev->dev, "%s: Received ACK frame\n", __func__);
+		dev_kfree_skb(skb);
+		return;
+	}
+
+	print_hex_dump_debug("PN533 RX: ", DUMP_PREFIX_NONE, 16, 1, skb->data,
+			     dev->ops->rx_frame_size(skb->data), false);
+
+	if (!dev->ops->rx_is_frame_valid(skb->data, dev)) {
+		nfc_err(dev->dev, "Received an invalid frame\n");
+		dev->cmd->status = -EIO;
+	} else if (!pn533_rx_frame_is_cmd_response(dev, skb->data)) {
+		nfc_err(dev->dev, "It it not the response to the last command\n");
+		dev->cmd->status = -EIO;
+	}
+
+	dev->cmd->resp = skb;
+
+sched_wq:
+	queue_work(dev->wq, &dev->cmd_complete_work);
+}
+EXPORT_SYMBOL(pn533_recv_frame);
+
+/* Split the Tx skb into small chunks */
+static int pn533_fill_fragment_skbs(struct pn533 *dev, struct sk_buff *skb)
+{
+	struct sk_buff *frag;
+	int  frag_size;
+
+	do {
+		/* Remaining size */
+		if (skb->len > PN533_CMD_DATAFRAME_MAXLEN)
+			frag_size = PN533_CMD_DATAFRAME_MAXLEN;
+		else
+			frag_size = skb->len;
+
+		/* Allocate and reserve */
+		frag = pn533_alloc_skb(dev, frag_size);
+		if (!frag) {
+			skb_queue_purge(&dev->fragment_skb);
+			break;
+		}
+
+		if (!dev->tgt_mode) {
+			/* Reserve the TG/MI byte */
+			skb_reserve(frag, 1);
+
+			/* MI + TG */
+			if (frag_size  == PN533_CMD_DATAFRAME_MAXLEN)
+				*skb_push(frag, sizeof(u8)) =
+							(PN533_CMD_MI_MASK | 1);
+			else
+				*skb_push(frag, sizeof(u8)) =  1; /* TG */
+		}
+
+		memcpy(skb_put(frag, frag_size), skb->data, frag_size);
+
+		/* Reduce the size of incoming buffer */
+		skb_pull(skb, frag_size);
+
+		/* Add this to skb_queue */
+		skb_queue_tail(&dev->fragment_skb, frag);
+
+	} while (skb->len > 0);
+
+	dev_kfree_skb(skb);
+
+	return skb_queue_len(&dev->fragment_skb);
+}
+
+static int pn533_transceive(struct nfc_dev *nfc_dev,
+			    struct nfc_target *target, struct sk_buff *skb,
+			    data_exchange_cb_t cb, void *cb_context)
+{
+	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+	struct pn533_data_exchange_arg *arg = NULL;
+	int rc;
+
+	dev_dbg(dev->dev, "%s\n", __func__);
+
+	if (!dev->tgt_active_prot) {
+		nfc_err(dev->dev,
+			"Can't exchange data if there is no active target\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	arg = kmalloc(sizeof(*arg), GFP_KERNEL);
+	if (!arg) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	arg->cb = cb;
+	arg->cb_context = cb_context;
+
+	switch (dev->device_type) {
+	case PN533_DEVICE_PASORI:
+		if (dev->tgt_active_prot == NFC_PROTO_FELICA) {
+			rc = pn533_send_data_async(dev, PN533_CMD_IN_COMM_THRU,
+						   skb,
+						   pn533_data_exchange_complete,
+						   arg);
+
+			break;
+		}
+	default:
+		/* jumbo frame ? */
+		if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
+			rc = pn533_fill_fragment_skbs(dev, skb);
+			if (rc <= 0)
+				goto error;
+
+			skb = skb_dequeue(&dev->fragment_skb);
+			if (!skb) {
+				rc = -EIO;
+				goto error;
+			}
+		} else {
+			*skb_push(skb, sizeof(u8)) =  1; /* TG */
+		}
+
+		rc = pn533_send_data_async(dev, PN533_CMD_IN_DATA_EXCHANGE,
+					   skb, pn533_data_exchange_complete,
+					   arg);
+
+		break;
+	}
+
+	if (rc < 0) /* rc from send_async */
+		goto error;
+
+	return 0;
+
+error:
+	kfree(arg);
+	dev_kfree_skb(skb);
+	return rc;
+}
+
+static int pn533_tm_send_complete(struct pn533 *dev, void *arg,
+				  struct sk_buff *resp)
+{
+	u8 status;
+
+	dev_dbg(dev->dev, "%s\n", __func__);
+
+	if (IS_ERR(resp))
+		return PTR_ERR(resp);
+
+	status = resp->data[0];
+
+	/* Prepare for the next round */
+	if (skb_queue_len(&dev->fragment_skb) > 0) {
+		queue_work(dev->wq, &dev->mi_tm_tx_work);
+		return -EINPROGRESS;
+	}
+	dev_kfree_skb(resp);
+
+	if (status != 0) {
+		nfc_tm_deactivated(dev->nfc_dev);
+
+		dev->tgt_mode = 0;
+
+		return 0;
+	}
+
+	queue_work(dev->wq, &dev->tg_work);
+
+	return 0;
+}
+
+static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
+{
+	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+	int rc;
+
+	dev_dbg(dev->dev, "%s\n", __func__);
+
+	/* let's split in multiple chunks if size's too big */
+	if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
+		rc = pn533_fill_fragment_skbs(dev, skb);
+		if (rc <= 0)
+			goto error;
+
+		/* get the first skb */
+		skb = skb_dequeue(&dev->fragment_skb);
+		if (!skb) {
+			rc = -EIO;
+			goto error;
+		}
+
+		rc = pn533_send_data_async(dev, PN533_CMD_TG_SET_META_DATA, skb,
+						pn533_tm_send_complete, NULL);
+	} else {
+		/* Send th skb */
+		rc = pn533_send_data_async(dev, PN533_CMD_TG_SET_DATA, skb,
+						pn533_tm_send_complete, NULL);
+	}
+
+error:
+	if (rc < 0) {
+		dev_kfree_skb(skb);
+		skb_queue_purge(&dev->fragment_skb);
+	}
+
+	return rc;
+}
+
+static void pn533_wq_mi_recv(struct work_struct *work)
+{
+	struct pn533 *dev = container_of(work, struct pn533, mi_rx_work);
+	struct sk_buff *skb;
+	int rc;
+
+	dev_dbg(dev->dev, "%s\n", __func__);
+
+	skb = pn533_alloc_skb(dev, PN533_CMD_DATAEXCH_HEAD_LEN);
+	if (!skb)
+		goto error;
+
+	switch (dev->device_type) {
+	case PN533_DEVICE_PASORI:
+		if (dev->tgt_active_prot == NFC_PROTO_FELICA) {
+			rc = pn533_send_cmd_direct_async(dev,
+						PN533_CMD_IN_COMM_THRU,
+						skb,
+						pn533_data_exchange_complete,
+						 dev->cmd_complete_mi_arg);
+
+			break;
+		}
+	default:
+		*skb_put(skb, sizeof(u8)) =  1; /*TG*/
+
+		rc = pn533_send_cmd_direct_async(dev,
+						 PN533_CMD_IN_DATA_EXCHANGE,
+						 skb,
+						 pn533_data_exchange_complete,
+						 dev->cmd_complete_mi_arg);
+
+		break;
+	}
+
+	if (rc == 0) /* success */
+		return;
+
+	nfc_err(dev->dev,
+		"Error %d when trying to perform data_exchange\n", rc);
+
+	dev_kfree_skb(skb);
+	kfree(dev->cmd_complete_mi_arg);
+
+error:
+	dev->phy_ops->send_ack(dev, GFP_KERNEL);
+	queue_work(dev->wq, &dev->cmd_work);
+}
+
+static void pn533_wq_mi_send(struct work_struct *work)
+{
+	struct pn533 *dev = container_of(work, struct pn533, mi_tx_work);
+	struct sk_buff *skb;
+	int rc;
+
+	dev_dbg(dev->dev, "%s\n", __func__);
+
+	/* Grab the first skb in the queue */
+	skb = skb_dequeue(&dev->fragment_skb);
+
+	if (skb == NULL) {	/* No more data */
+		/* Reset the queue for future use */
+		skb_queue_head_init(&dev->fragment_skb);
+		goto error;
+	}
+
+	switch (dev->device_type) {
+	case PN533_DEVICE_PASORI:
+		if (dev->tgt_active_prot != NFC_PROTO_FELICA) {
+			rc = -EIO;
+			break;
+		}
+
+		rc = pn533_send_cmd_direct_async(dev, PN533_CMD_IN_COMM_THRU,
+						 skb,
+						 pn533_data_exchange_complete,
+						 dev->cmd_complete_dep_arg);
+
+		break;
+
+	default:
+		/* Still some fragments? */
+		rc = pn533_send_cmd_direct_async(dev,
+						 PN533_CMD_IN_DATA_EXCHANGE,
+						 skb,
+						 pn533_data_exchange_complete,
+						 dev->cmd_complete_dep_arg);
+
+		break;
+	}
+
+	if (rc == 0) /* success */
+		return;
+
+	nfc_err(dev->dev,
+		"Error %d when trying to perform data_exchange\n", rc);
+
+	dev_kfree_skb(skb);
+	kfree(dev->cmd_complete_dep_arg);
+
+error:
+	dev->phy_ops->send_ack(dev, GFP_KERNEL);
+	queue_work(dev->wq, &dev->cmd_work);
+}
+
+static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
+								u8 cfgdata_len)
+{
+	struct sk_buff *skb;
+	struct sk_buff *resp;
+	int skb_len;
+
+	dev_dbg(dev->dev, "%s\n", __func__);
+
+	skb_len = sizeof(cfgitem) + cfgdata_len; /* cfgitem + cfgdata */
+
+	skb = pn533_alloc_skb(dev, skb_len);
+	if (!skb)
+		return -ENOMEM;
+
+	*skb_put(skb, sizeof(cfgitem)) = cfgitem;
+	memcpy(skb_put(skb, cfgdata_len), cfgdata, cfgdata_len);
+
+	resp = pn533_send_cmd_sync(dev, PN533_CMD_RF_CONFIGURATION, skb);
+	if (IS_ERR(resp))
+		return PTR_ERR(resp);
+
+	dev_kfree_skb(resp);
+	return 0;
+}
+
+static int pn533_get_firmware_version(struct pn533 *dev,
+				      struct pn533_fw_version *fv)
+{
+	struct sk_buff *skb;
+	struct sk_buff *resp;
+
+	skb = pn533_alloc_skb(dev, 0);
+	if (!skb)
+		return -ENOMEM;
+
+	resp = pn533_send_cmd_sync(dev, PN533_CMD_GET_FIRMWARE_VERSION, skb);
+	if (IS_ERR(resp))
+		return PTR_ERR(resp);
+
+	fv->ic = resp->data[0];
+	fv->ver = resp->data[1];
+	fv->rev = resp->data[2];
+	fv->support = resp->data[3];
+
+	dev_kfree_skb(resp);
+	return 0;
+}
+
+static int pn533_pasori_fw_reset(struct pn533 *dev)
+{
+	struct sk_buff *skb;
+	struct sk_buff *resp;
+
+	dev_dbg(dev->dev, "%s\n", __func__);
+
+	skb = pn533_alloc_skb(dev, sizeof(u8));
+	if (!skb)
+		return -ENOMEM;
+
+	*skb_put(skb, sizeof(u8)) = 0x1;
+
+	resp = pn533_send_cmd_sync(dev, 0x18, skb);
+	if (IS_ERR(resp))
+		return PTR_ERR(resp);
+
+	dev_kfree_skb(resp);
+
+	return 0;
+}
+
+static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf)
+{
+	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+	u8 rf_field = !!rf;
+	int rc;
+
+	rf_field |= PN533_CFGITEM_RF_FIELD_AUTO_RFCA;
+
+	rc = pn533_set_configuration(dev, PN533_CFGITEM_RF_FIELD,
+				     (u8 *)&rf_field, 1);
+	if (rc) {
+		nfc_err(dev->dev, "Error on setting RF field\n");
+		return rc;
+	}
+
+	return rc;
+}
+
+static int pn532_sam_configuration(struct nfc_dev *nfc_dev)
+{
+	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+	struct sk_buff *skb;
+	struct sk_buff *resp;
+
+	skb = pn533_alloc_skb(dev, 1);
+	if (!skb)
+		return -ENOMEM;
+
+	*skb_put(skb, 1) = 0x01;
+
+	resp = pn533_send_cmd_sync(dev, PN533_CMD_SAM_CONFIGURATION, skb);
+	if (IS_ERR(resp))
+		return PTR_ERR(resp);
+
+	dev_kfree_skb(resp);
+	return 0;
+}
+
+static int pn533_dev_up(struct nfc_dev *nfc_dev)
+{
+	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+
+	if (dev->device_type == PN533_DEVICE_PN532) {
+		int rc = pn532_sam_configuration(nfc_dev);
+
+		if (rc)
+			return rc;
+	}
+
+	return pn533_rf_field(nfc_dev, 1);
+}
+
+static int pn533_dev_down(struct nfc_dev *nfc_dev)
+{
+	return pn533_rf_field(nfc_dev, 0);
+}
+
+static struct nfc_ops pn533_nfc_ops = {
+	.dev_up = pn533_dev_up,
+	.dev_down = pn533_dev_down,
+	.dep_link_up = pn533_dep_link_up,
+	.dep_link_down = pn533_dep_link_down,
+	.start_poll = pn533_start_poll,
+	.stop_poll = pn533_stop_poll,
+	.activate_target = pn533_activate_target,
+	.deactivate_target = pn533_deactivate_target,
+	.im_transceive = pn533_transceive,
+	.tm_send = pn533_tm_send,
+};
+
+static int pn533_setup(struct pn533 *dev)
+{
+	struct pn533_config_max_retries max_retries;
+	struct pn533_config_timing timing;
+	u8 pasori_cfg[3] = {0x08, 0x01, 0x08};
+	int rc;
+
+	switch (dev->device_type) {
+	case PN533_DEVICE_STD:
+	case PN533_DEVICE_PASORI:
+	case PN533_DEVICE_ACR122U:
+	case PN533_DEVICE_PN532:
+		max_retries.mx_rty_atr = 0x2;
+		max_retries.mx_rty_psl = 0x1;
+		max_retries.mx_rty_passive_act =
+			PN533_CONFIG_MAX_RETRIES_NO_RETRY;
+
+		timing.rfu = PN533_CONFIG_TIMING_102;
+		timing.atr_res_timeout = PN533_CONFIG_TIMING_102;
+		timing.dep_timeout = PN533_CONFIG_TIMING_204;
+
+		break;
+
+	default:
+		nfc_err(dev->dev, "Unknown device type %d\n",
+			dev->device_type);
+		return -EINVAL;
+	}
+
+	rc = pn533_set_configuration(dev, PN533_CFGITEM_MAX_RETRIES,
+				     (u8 *)&max_retries, sizeof(max_retries));
+	if (rc) {
+		nfc_err(dev->dev,
+			"Error on setting MAX_RETRIES config\n");
+		return rc;
+	}
+
+
+	rc = pn533_set_configuration(dev, PN533_CFGITEM_TIMING,
+				     (u8 *)&timing, sizeof(timing));
+	if (rc) {
+		nfc_err(dev->dev, "Error on setting RF timings\n");
+		return rc;
+	}
+
+	switch (dev->device_type) {
+	case PN533_DEVICE_STD:
+	case PN533_DEVICE_PN532:
+		break;
+
+	case PN533_DEVICE_PASORI:
+		pn533_pasori_fw_reset(dev);
+
+		rc = pn533_set_configuration(dev, PN533_CFGITEM_PASORI,
+					     pasori_cfg, 3);
+		if (rc) {
+			nfc_err(dev->dev,
+				"Error while settings PASORI config\n");
+			return rc;
+		}
+
+		pn533_pasori_fw_reset(dev);
+
+		break;
+	}
+
+	return 0;
+}
+
+struct pn533 *pn533_register_device(u32 device_type,
+				u32 protocols,
+				enum pn533_protocol_type protocol_type,
+				void *phy,
+				struct pn533_phy_ops *phy_ops,
+				struct pn533_frame_ops *fops,
+				struct device *dev,
+				struct device *parent)
+{
+	struct pn533_fw_version fw_ver;
+	struct pn533 *priv;
+	int rc = -ENOMEM;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return ERR_PTR(-ENOMEM);
+
+	priv->phy = phy;
+	priv->phy_ops = phy_ops;
+	priv->dev = dev;
+	if (fops != NULL)
+		priv->ops = fops;
+	else
+		priv->ops = &pn533_std_frame_ops;
+
+	priv->protocol_type = protocol_type;
+	priv->device_type = device_type;
+
+	mutex_init(&priv->cmd_lock);
+
+	INIT_WORK(&priv->cmd_work, pn533_wq_cmd);
+	INIT_WORK(&priv->cmd_complete_work, pn533_wq_cmd_complete);
+	INIT_WORK(&priv->mi_rx_work, pn533_wq_mi_recv);
+	INIT_WORK(&priv->mi_tx_work, pn533_wq_mi_send);
+	INIT_WORK(&priv->tg_work, pn533_wq_tg_get_data);
+	INIT_WORK(&priv->mi_tm_rx_work, pn533_wq_tm_mi_recv);
+	INIT_WORK(&priv->mi_tm_tx_work, pn533_wq_tm_mi_send);
+	INIT_DELAYED_WORK(&priv->poll_work, pn533_wq_poll);
+	INIT_WORK(&priv->rf_work, pn533_wq_rf);
+	priv->wq = alloc_ordered_workqueue("pn533", 0);
+	if (priv->wq == NULL)
+		goto error;
+
+	init_timer(&priv->listen_timer);
+	priv->listen_timer.data = (unsigned long) priv;
+	priv->listen_timer.function = pn533_listen_mode_timer;
+
+	skb_queue_head_init(&priv->resp_q);
+	skb_queue_head_init(&priv->fragment_skb);
+
+	INIT_LIST_HEAD(&priv->cmd_queue);
+
+	memset(&fw_ver, 0, sizeof(fw_ver));
+	rc = pn533_get_firmware_version(priv, &fw_ver);
+	if (rc < 0)
+		goto destroy_wq;
+
+	nfc_info(dev, "NXP PN5%02X firmware ver %d.%d now attached\n",
+		 fw_ver.ic, fw_ver.ver, fw_ver.rev);
+
+
+	priv->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols,
+					   priv->ops->tx_header_len +
+					   PN533_CMD_DATAEXCH_HEAD_LEN,
+					   priv->ops->tx_tail_len);
+	if (!priv->nfc_dev) {
+		rc = -ENOMEM;
+		goto destroy_wq;
+	}
+
+	nfc_set_parent_dev(priv->nfc_dev, parent);
+	nfc_set_drvdata(priv->nfc_dev, priv);
+
+	rc = nfc_register_device(priv->nfc_dev);
+	if (rc)
+		goto free_nfc_dev;
+
+	rc = pn533_setup(priv);
+	if (rc)
+		goto unregister_nfc_dev;
+
+	return priv;
+
+unregister_nfc_dev:
+	nfc_unregister_device(priv->nfc_dev);
+
+free_nfc_dev:
+	nfc_free_device(priv->nfc_dev);
+
+destroy_wq:
+	destroy_workqueue(priv->wq);
+error:
+	kfree(priv);
+	return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(pn533_register_device);
+
+void pn533_unregister_device(struct pn533 *priv)
+{
+	struct pn533_cmd *cmd, *n;
+
+	nfc_unregister_device(priv->nfc_dev);
+	nfc_free_device(priv->nfc_dev);
+
+	flush_delayed_work(&priv->poll_work);
+	destroy_workqueue(priv->wq);
+
+	skb_queue_purge(&priv->resp_q);
+
+	del_timer(&priv->listen_timer);
+
+	list_for_each_entry_safe(cmd, n, &priv->cmd_queue, queue) {
+		list_del(&cmd->queue);
+		kfree(cmd);
+	}
+
+	kfree(priv);
+}
+EXPORT_SYMBOL_GPL(pn533_unregister_device);
+
+
+MODULE_AUTHOR("Lauro Ramos Venancio <lauro.venancio@openbossa.org>");
+MODULE_AUTHOR("Aloisio Almeida Jr <aloisio.almeida@openbossa.org>");
+MODULE_AUTHOR("Waldemar Rymarkiewicz <waldemar.rymarkiewicz@tieto.com>");
+MODULE_DESCRIPTION("PN533 driver ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/nfc/pn533/pn533.h b/drivers/nfc/pn533/pn533.h
new file mode 100644
index 0000000..553c7d1
--- /dev/null
+++ b/drivers/nfc/pn533/pn533.h
@@ -0,0 +1,238 @@
+/*
+ * Driver for NXP PN533 NFC Chip
+ *
+ * Copyright (C) 2011 Instituto Nokia de Tecnologia
+ * Copyright (C) 2012-2013 Tieto Poland
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define PN533_DEVICE_STD     0x1
+#define PN533_DEVICE_PASORI  0x2
+#define PN533_DEVICE_ACR122U 0x3
+#define PN533_DEVICE_PN532   0x4
+
+#define PN533_ALL_PROTOCOLS (NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK |\
+			     NFC_PROTO_FELICA_MASK | NFC_PROTO_ISO14443_MASK |\
+			     NFC_PROTO_NFC_DEP_MASK |\
+			     NFC_PROTO_ISO14443_B_MASK)
+
+#define PN533_NO_TYPE_B_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \
+				   NFC_PROTO_MIFARE_MASK | \
+				   NFC_PROTO_FELICA_MASK | \
+				   NFC_PROTO_ISO14443_MASK | \
+				   NFC_PROTO_NFC_DEP_MASK)
+
+/* Standard pn533 frame definitions (standard and extended)*/
+#define PN533_STD_FRAME_HEADER_LEN (sizeof(struct pn533_std_frame) \
+					+ 2) /* data[0] TFI, data[1] CC */
+#define PN533_STD_FRAME_TAIL_LEN 2 /* data[len] DCS, data[len + 1] postamble*/
+
+#define PN533_EXT_FRAME_HEADER_LEN (sizeof(struct pn533_ext_frame) \
+					+ 2) /* data[0] TFI, data[1] CC */
+
+#define PN533_CMD_DATAEXCH_HEAD_LEN 1
+#define PN533_CMD_DATAEXCH_DATA_MAXLEN	262
+#define PN533_CMD_DATAFRAME_MAXLEN	240	/* max data length (send) */
+
+/*
+ * Max extended frame payload len, excluding TFI and CC
+ * which are already in PN533_FRAME_HEADER_LEN.
+ */
+#define PN533_STD_FRAME_MAX_PAYLOAD_LEN 263
+
+
+/* Preamble (1), SoPC (2), ACK Code (2), Postamble (1) */
+#define PN533_STD_FRAME_ACK_SIZE 6
+#define PN533_STD_FRAME_CHECKSUM(f) (f->data[f->datalen])
+#define PN533_STD_FRAME_POSTAMBLE(f) (f->data[f->datalen + 1])
+/* Half start code (3), LEN (4) should be 0xffff for extended frame */
+#define PN533_STD_IS_EXTENDED(hdr) ((hdr)->datalen == 0xFF \
+					&& (hdr)->datalen_checksum == 0xFF)
+#define PN533_EXT_FRAME_CHECKSUM(f) (f->data[be16_to_cpu(f->datalen)])
+
+/* start of frame */
+#define PN533_STD_FRAME_SOF 0x00FF
+
+/* standard frame identifier: in/out/error */
+#define PN533_STD_FRAME_IDENTIFIER(f) (f->data[0]) /* TFI */
+#define PN533_STD_FRAME_DIR_OUT 0xD4
+#define PN533_STD_FRAME_DIR_IN 0xD5
+
+/* PN533 Commands */
+#define PN533_FRAME_CMD(f) (f->data[1])
+
+#define PN533_CMD_GET_FIRMWARE_VERSION 0x02
+#define PN533_CMD_SAM_CONFIGURATION 0x14
+#define PN533_CMD_RF_CONFIGURATION 0x32
+#define PN533_CMD_IN_DATA_EXCHANGE 0x40
+#define PN533_CMD_IN_COMM_THRU     0x42
+#define PN533_CMD_IN_LIST_PASSIVE_TARGET 0x4A
+#define PN533_CMD_IN_ATR 0x50
+#define PN533_CMD_IN_RELEASE 0x52
+#define PN533_CMD_IN_JUMP_FOR_DEP 0x56
+
+#define PN533_CMD_TG_INIT_AS_TARGET 0x8c
+#define PN533_CMD_TG_GET_DATA 0x86
+#define PN533_CMD_TG_SET_DATA 0x8e
+#define PN533_CMD_TG_SET_META_DATA 0x94
+#define PN533_CMD_UNDEF 0xff
+
+#define PN533_CMD_RESPONSE(cmd) (cmd + 1)
+
+/* PN533 Return codes */
+#define PN533_CMD_RET_MASK 0x3F
+#define PN533_CMD_MI_MASK 0x40
+#define PN533_CMD_RET_SUCCESS 0x00
+
+
+enum  pn533_protocol_type {
+	PN533_PROTO_REQ_ACK_RESP = 0,
+	PN533_PROTO_REQ_RESP
+};
+
+/* Poll modulations */
+enum {
+	PN533_POLL_MOD_106KBPS_A,
+	PN533_POLL_MOD_212KBPS_FELICA,
+	PN533_POLL_MOD_424KBPS_FELICA,
+	PN533_POLL_MOD_106KBPS_JEWEL,
+	PN533_POLL_MOD_847KBPS_B,
+	PN533_LISTEN_MOD,
+
+	__PN533_POLL_MOD_AFTER_LAST,
+};
+#define PN533_POLL_MOD_MAX (__PN533_POLL_MOD_AFTER_LAST - 1)
+
+struct pn533_std_frame {
+	u8 preamble;
+	__be16 start_frame;
+	u8 datalen;
+	u8 datalen_checksum;
+	u8 data[];
+} __packed;
+
+struct pn533_ext_frame {	/* Extended Information frame */
+	u8 preamble;
+	__be16 start_frame;
+	__be16 eif_flag;	/* fixed to 0xFFFF */
+	__be16 datalen;
+	u8 datalen_checksum;
+	u8 data[];
+} __packed;
+
+struct pn533 {
+	struct nfc_dev *nfc_dev;
+	u32 device_type;
+	enum pn533_protocol_type protocol_type;
+
+	struct sk_buff_head resp_q;
+	struct sk_buff_head fragment_skb;
+
+	struct workqueue_struct	*wq;
+	struct work_struct cmd_work;
+	struct work_struct cmd_complete_work;
+	struct delayed_work poll_work;
+	struct work_struct mi_rx_work;
+	struct work_struct mi_tx_work;
+	struct work_struct mi_tm_rx_work;
+	struct work_struct mi_tm_tx_work;
+	struct work_struct tg_work;
+	struct work_struct rf_work;
+
+	struct list_head cmd_queue;
+	struct pn533_cmd *cmd;
+	u8 cmd_pending;
+	struct mutex cmd_lock;  /* protects cmd queue */
+
+	void *cmd_complete_mi_arg;
+	void *cmd_complete_dep_arg;
+
+	struct pn533_poll_modulations *poll_mod_active[PN533_POLL_MOD_MAX + 1];
+	u8 poll_mod_count;
+	u8 poll_mod_curr;
+	u8 poll_dep;
+	u32 poll_protocols;
+	u32 listen_protocols;
+	struct timer_list listen_timer;
+	int cancel_listen;
+
+	u8 *gb;
+	size_t gb_len;
+
+	u8 tgt_available_prots;
+	u8 tgt_active_prot;
+	u8 tgt_mode;
+
+	struct pn533_frame_ops *ops;
+
+	struct device *dev;
+	void *phy;
+	struct pn533_phy_ops *phy_ops;
+};
+
+typedef int (*pn533_send_async_complete_t) (struct pn533 *dev, void *arg,
+					struct sk_buff *resp);
+
+struct pn533_cmd {
+	struct list_head queue;
+	u8 code;
+	int status;
+	struct sk_buff *req;
+	struct sk_buff *resp;
+	pn533_send_async_complete_t  complete_cb;
+	void *complete_cb_context;
+};
+
+
+struct pn533_frame_ops {
+	void (*tx_frame_init)(void *frame, u8 cmd_code);
+	void (*tx_frame_finish)(void *frame);
+	void (*tx_update_payload_len)(void *frame, int len);
+	int tx_header_len;
+	int tx_tail_len;
+
+	bool (*rx_is_frame_valid)(void *frame, struct pn533 *dev);
+	bool (*rx_frame_is_ack)(void *frame);
+	int (*rx_frame_size)(void *frame);
+	int rx_header_len;
+	int rx_tail_len;
+
+	int max_payload_len;
+	u8 (*get_cmd_code)(void *frame);
+};
+
+
+struct pn533_phy_ops {
+	int (*send_frame)(struct pn533 *priv,
+			  struct sk_buff *out);
+	int (*send_ack)(struct pn533 *dev, gfp_t flags);
+	void (*abort_cmd)(struct pn533 *priv, gfp_t flags);
+};
+
+
+struct pn533 *pn533_register_device(u32 device_type,
+				u32 protocols,
+				enum pn533_protocol_type protocol_type,
+				void *phy,
+				struct pn533_phy_ops *phy_ops,
+				struct pn533_frame_ops *fops,
+				struct device *dev,
+				struct device *parent);
+
+void pn533_unregister_device(struct pn533 *priv);
+void pn533_recv_frame(struct pn533 *dev, struct sk_buff *skb, int status);
+
+bool pn533_rx_frame_is_cmd_response(struct pn533 *dev, void *frame);
+bool pn533_rx_frame_is_ack(void *_frame);
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
new file mode 100644
index 0000000..8ca0603
--- /dev/null
+++ b/drivers/nfc/pn533/usb.c
@@ -0,0 +1,597 @@
+/*
+ * Driver for NXP PN533 NFC Chip - USB transport layer
+ *
+ * Copyright (C) 2011 Instituto Nokia de Tecnologia
+ * Copyright (C) 2012-2013 Tieto Poland
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/nfc.h>
+#include <linux/netdevice.h>
+#include <net/nfc/nfc.h>
+#include "pn533.h"
+
+#define VERSION "0.1"
+
+#define PN533_VENDOR_ID 0x4CC
+#define PN533_PRODUCT_ID 0x2533
+
+#define SCM_VENDOR_ID 0x4E6
+#define SCL3711_PRODUCT_ID 0x5591
+
+#define SONY_VENDOR_ID         0x054c
+#define PASORI_PRODUCT_ID      0x02e1
+
+#define ACS_VENDOR_ID 0x072f
+#define ACR122U_PRODUCT_ID 0x2200
+
+static const struct usb_device_id pn533_usb_table[] = {
+	{ USB_DEVICE(PN533_VENDOR_ID, PN533_PRODUCT_ID),
+	  .driver_info = PN533_DEVICE_STD },
+	{ USB_DEVICE(SCM_VENDOR_ID, SCL3711_PRODUCT_ID),
+	  .driver_info = PN533_DEVICE_STD },
+	{ USB_DEVICE(SONY_VENDOR_ID, PASORI_PRODUCT_ID),
+	  .driver_info = PN533_DEVICE_PASORI },
+	{ USB_DEVICE(ACS_VENDOR_ID, ACR122U_PRODUCT_ID),
+	  .driver_info = PN533_DEVICE_ACR122U },
+	{ }
+};
+MODULE_DEVICE_TABLE(usb, pn533_usb_table);
+
+struct pn533_usb_phy {
+	struct usb_device *udev;
+	struct usb_interface *interface;
+
+	struct urb *out_urb;
+	struct urb *in_urb;
+
+	struct pn533 *priv;
+};
+
+static void pn533_recv_response(struct urb *urb)
+{
+	struct pn533_usb_phy *phy = urb->context;
+	struct sk_buff *skb = NULL;
+
+	if (!urb->status) {
+		skb = alloc_skb(urb->actual_length, GFP_KERNEL);
+		if (!skb) {
+			nfc_err(&phy->udev->dev, "failed to alloc memory\n");
+		} else {
+			memcpy(skb_put(skb, urb->actual_length),
+			       urb->transfer_buffer, urb->actual_length);
+		}
+	}
+
+	pn533_recv_frame(phy->priv, skb, urb->status);
+}
+
+static int pn533_submit_urb_for_response(struct pn533_usb_phy *phy, gfp_t flags)
+{
+	phy->in_urb->complete = pn533_recv_response;
+
+	return usb_submit_urb(phy->in_urb, flags);
+}
+
+static void pn533_recv_ack(struct urb *urb)
+{
+	struct pn533_usb_phy *phy = urb->context;
+	struct pn533 *priv = phy->priv;
+	struct pn533_cmd *cmd = priv->cmd;
+	struct pn533_std_frame *in_frame;
+	int rc;
+
+	cmd->status = urb->status;
+
+	switch (urb->status) {
+	case 0:
+		break; /* success */
+	case -ECONNRESET:
+	case -ENOENT:
+		dev_dbg(&phy->udev->dev,
+			"The urb has been stopped (status %d)\n",
+			urb->status);
+		goto sched_wq;
+	case -ESHUTDOWN:
+	default:
+		nfc_err(&phy->udev->dev,
+			"Urb failure (status %d)\n", urb->status);
+		goto sched_wq;
+	}
+
+	in_frame = phy->in_urb->transfer_buffer;
+
+	if (!pn533_rx_frame_is_ack(in_frame)) {
+		nfc_err(&phy->udev->dev, "Received an invalid ack\n");
+		cmd->status = -EIO;
+		goto sched_wq;
+	}
+
+	rc = pn533_submit_urb_for_response(phy, GFP_ATOMIC);
+	if (rc) {
+		nfc_err(&phy->udev->dev,
+			"usb_submit_urb failed with result %d\n", rc);
+		cmd->status = rc;
+		goto sched_wq;
+	}
+
+	return;
+
+sched_wq:
+	queue_work(priv->wq, &priv->cmd_complete_work);
+}
+
+static int pn533_submit_urb_for_ack(struct pn533_usb_phy *phy, gfp_t flags)
+{
+	phy->in_urb->complete = pn533_recv_ack;
+
+	return usb_submit_urb(phy->in_urb, flags);
+}
+
+static int pn533_usb_send_ack(struct pn533 *dev, gfp_t flags)
+{
+	struct pn533_usb_phy *phy = dev->phy;
+	u8 ack[6] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00};
+	/* spec 7.1.1.3:  Preamble, SoPC (2), ACK Code (2), Postamble */
+	int rc;
+
+	phy->out_urb->transfer_buffer = ack;
+	phy->out_urb->transfer_buffer_length = sizeof(ack);
+	rc = usb_submit_urb(phy->out_urb, flags);
+
+	return rc;
+}
+
+static int pn533_usb_send_frame(struct pn533 *dev,
+				struct sk_buff *out)
+{
+	struct pn533_usb_phy *phy = dev->phy;
+	int rc;
+
+	if (phy->priv == NULL)
+		phy->priv = dev;
+
+	phy->out_urb->transfer_buffer = out->data;
+	phy->out_urb->transfer_buffer_length = out->len;
+
+	print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1,
+			     out->data, out->len, false);
+
+	rc = usb_submit_urb(phy->out_urb, GFP_KERNEL);
+	if (rc)
+		return rc;
+
+	if (dev->protocol_type == PN533_PROTO_REQ_RESP) {
+		/* request for response for sent packet directly */
+		rc = pn533_submit_urb_for_response(phy, GFP_ATOMIC);
+		if (rc)
+			goto error;
+	} else if (dev->protocol_type == PN533_PROTO_REQ_ACK_RESP) {
+		/* request for ACK if that's the case */
+		rc = pn533_submit_urb_for_ack(phy, GFP_KERNEL);
+		if (rc)
+			goto error;
+	}
+
+	return 0;
+
+error:
+	usb_unlink_urb(phy->out_urb);
+	return rc;
+}
+
+static void pn533_usb_abort_cmd(struct pn533 *dev, gfp_t flags)
+{
+	struct pn533_usb_phy *phy = dev->phy;
+
+	/* ACR122U does not support any command which aborts last
+	 * issued command i.e. as ACK for standard PN533. Additionally,
+	 * it behaves stange, sending broken or incorrect responses,
+	 * when we cancel urb before the chip will send response.
+	 */
+	if (dev->device_type == PN533_DEVICE_ACR122U)
+		return;
+
+	/* An ack will cancel the last issued command */
+	pn533_usb_send_ack(dev, flags);
+
+	/* cancel the urb request */
+	usb_kill_urb(phy->in_urb);
+}
+
+/* ACR122 specific structs and fucntions */
+
+/* ACS ACR122 pn533 frame definitions */
+#define PN533_ACR122_TX_FRAME_HEADER_LEN (sizeof(struct pn533_acr122_tx_frame) \
+					  + 2)
+#define PN533_ACR122_TX_FRAME_TAIL_LEN 0
+#define PN533_ACR122_RX_FRAME_HEADER_LEN (sizeof(struct pn533_acr122_rx_frame) \
+					  + 2)
+#define PN533_ACR122_RX_FRAME_TAIL_LEN 2
+#define PN533_ACR122_FRAME_MAX_PAYLOAD_LEN PN533_STD_FRAME_MAX_PAYLOAD_LEN
+
+/* CCID messages types */
+#define PN533_ACR122_PC_TO_RDR_ICCPOWERON 0x62
+#define PN533_ACR122_PC_TO_RDR_ESCAPE 0x6B
+
+#define PN533_ACR122_RDR_TO_PC_ESCAPE 0x83
+
+
+struct pn533_acr122_ccid_hdr {
+	u8 type;
+	u32 datalen;
+	u8 slot;
+	u8 seq;
+
+	/*
+	 * 3 msg specific bytes or status, error and 1 specific
+	 * byte for reposnse msg
+	 */
+	u8 params[3];
+	u8 data[]; /* payload */
+} __packed;
+
+struct pn533_acr122_apdu_hdr {
+	u8 class;
+	u8 ins;
+	u8 p1;
+	u8 p2;
+} __packed;
+
+struct pn533_acr122_tx_frame {
+	struct pn533_acr122_ccid_hdr ccid;
+	struct pn533_acr122_apdu_hdr apdu;
+	u8 datalen;
+	u8 data[]; /* pn533 frame: TFI ... */
+} __packed;
+
+struct pn533_acr122_rx_frame {
+	struct pn533_acr122_ccid_hdr ccid;
+	u8 data[]; /* pn533 frame : TFI ... */
+} __packed;
+
+static void pn533_acr122_tx_frame_init(void *_frame, u8 cmd_code)
+{
+	struct pn533_acr122_tx_frame *frame = _frame;
+
+	frame->ccid.type = PN533_ACR122_PC_TO_RDR_ESCAPE;
+	/* sizeof(apdu_hdr) + sizeof(datalen) */
+	frame->ccid.datalen = sizeof(frame->apdu) + 1;
+	frame->ccid.slot = 0;
+	frame->ccid.seq = 0;
+	frame->ccid.params[0] = 0;
+	frame->ccid.params[1] = 0;
+	frame->ccid.params[2] = 0;
+
+	frame->data[0] = PN533_STD_FRAME_DIR_OUT;
+	frame->data[1] = cmd_code;
+	frame->datalen = 2;  /* data[0] + data[1] */
+
+	frame->apdu.class = 0xFF;
+	frame->apdu.ins = 0;
+	frame->apdu.p1 = 0;
+	frame->apdu.p2 = 0;
+}
+
+static void pn533_acr122_tx_frame_finish(void *_frame)
+{
+	struct pn533_acr122_tx_frame *frame = _frame;
+
+	frame->ccid.datalen += frame->datalen;
+}
+
+static void pn533_acr122_tx_update_payload_len(void *_frame, int len)
+{
+	struct pn533_acr122_tx_frame *frame = _frame;
+
+	frame->datalen += len;
+}
+
+static bool pn533_acr122_is_rx_frame_valid(void *_frame, struct pn533 *dev)
+{
+	struct pn533_acr122_rx_frame *frame = _frame;
+
+	if (frame->ccid.type != 0x83)
+		return false;
+
+	if (!frame->ccid.datalen)
+		return false;
+
+	if (frame->data[frame->ccid.datalen - 2] == 0x63)
+		return false;
+
+	return true;
+}
+
+static int pn533_acr122_rx_frame_size(void *frame)
+{
+	struct pn533_acr122_rx_frame *f = frame;
+
+	/* f->ccid.datalen already includes tail length */
+	return sizeof(struct pn533_acr122_rx_frame) + f->ccid.datalen;
+}
+
+static u8 pn533_acr122_get_cmd_code(void *frame)
+{
+	struct pn533_acr122_rx_frame *f = frame;
+
+	return PN533_FRAME_CMD(f);
+}
+
+static struct pn533_frame_ops pn533_acr122_frame_ops = {
+	.tx_frame_init = pn533_acr122_tx_frame_init,
+	.tx_frame_finish = pn533_acr122_tx_frame_finish,
+	.tx_update_payload_len = pn533_acr122_tx_update_payload_len,
+	.tx_header_len = PN533_ACR122_TX_FRAME_HEADER_LEN,
+	.tx_tail_len = PN533_ACR122_TX_FRAME_TAIL_LEN,
+
+	.rx_is_frame_valid = pn533_acr122_is_rx_frame_valid,
+	.rx_header_len = PN533_ACR122_RX_FRAME_HEADER_LEN,
+	.rx_tail_len = PN533_ACR122_RX_FRAME_TAIL_LEN,
+	.rx_frame_size = pn533_acr122_rx_frame_size,
+
+	.max_payload_len = PN533_ACR122_FRAME_MAX_PAYLOAD_LEN,
+	.get_cmd_code = pn533_acr122_get_cmd_code,
+};
+
+struct pn533_acr122_poweron_rdr_arg {
+	int rc;
+	struct completion done;
+};
+
+static void pn533_acr122_poweron_rdr_resp(struct urb *urb)
+{
+	struct pn533_acr122_poweron_rdr_arg *arg = urb->context;
+
+	dev_dbg(&urb->dev->dev, "%s\n", __func__);
+
+	print_hex_dump_debug("ACR122 RX: ", DUMP_PREFIX_NONE, 16, 1,
+		       urb->transfer_buffer, urb->transfer_buffer_length,
+		       false);
+
+	arg->rc = urb->status;
+	complete(&arg->done);
+}
+
+static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy)
+{
+	/* Power on th reader (CCID cmd) */
+	u8 cmd[10] = {PN533_ACR122_PC_TO_RDR_ICCPOWERON,
+		      0, 0, 0, 0, 0, 0, 3, 0, 0};
+	int rc;
+	void *cntx;
+	struct pn533_acr122_poweron_rdr_arg arg;
+
+	dev_dbg(&phy->udev->dev, "%s\n", __func__);
+
+	init_completion(&arg.done);
+	cntx = phy->in_urb->context;  /* backup context */
+
+	phy->in_urb->complete = pn533_acr122_poweron_rdr_resp;
+	phy->in_urb->context = &arg;
+
+	phy->out_urb->transfer_buffer = cmd;
+	phy->out_urb->transfer_buffer_length = sizeof(cmd);
+
+	print_hex_dump_debug("ACR122 TX: ", DUMP_PREFIX_NONE, 16, 1,
+		       cmd, sizeof(cmd), false);
+
+	rc = usb_submit_urb(phy->out_urb, GFP_KERNEL);
+	if (rc) {
+		nfc_err(&phy->udev->dev,
+			"Reader power on cmd error %d\n", rc);
+		return rc;
+	}
+
+	rc =  usb_submit_urb(phy->in_urb, GFP_KERNEL);
+	if (rc) {
+		nfc_err(&phy->udev->dev,
+			"Can't submit reader poweron cmd response %d\n", rc);
+		return rc;
+	}
+
+	wait_for_completion(&arg.done);
+	phy->in_urb->context = cntx; /* restore context */
+
+	return arg.rc;
+}
+
+static void pn533_send_complete(struct urb *urb)
+{
+	struct pn533_usb_phy *phy = urb->context;
+
+	switch (urb->status) {
+	case 0:
+		break; /* success */
+	case -ECONNRESET:
+	case -ENOENT:
+		dev_dbg(&phy->udev->dev,
+			"The urb has been stopped (status %d)\n",
+			urb->status);
+		break;
+	case -ESHUTDOWN:
+	default:
+		nfc_err(&phy->udev->dev,
+			"Urb failure (status %d)\n",
+			urb->status);
+	}
+}
+
+static struct pn533_phy_ops usb_phy_ops = {
+	.send_frame = pn533_usb_send_frame,
+	.send_ack = pn533_usb_send_ack,
+	.abort_cmd = pn533_usb_abort_cmd,
+};
+
+static int pn533_usb_probe(struct usb_interface *interface,
+			const struct usb_device_id *id)
+{
+	struct pn533 *priv;
+	struct pn533_usb_phy *phy;
+	struct usb_host_interface *iface_desc;
+	struct usb_endpoint_descriptor *endpoint;
+	int in_endpoint = 0;
+	int out_endpoint = 0;
+	int rc = -ENOMEM;
+	int i;
+	u32 protocols;
+	enum pn533_protocol_type protocol_type = PN533_PROTO_REQ_ACK_RESP;
+	struct pn533_frame_ops *fops = NULL;
+	unsigned char *in_buf;
+	int in_buf_len = PN533_EXT_FRAME_HEADER_LEN +
+			 PN533_STD_FRAME_MAX_PAYLOAD_LEN +
+			 PN533_STD_FRAME_TAIL_LEN;
+
+	phy = devm_kzalloc(&interface->dev, sizeof(*phy), GFP_KERNEL);
+	if (!phy)
+		return -ENOMEM;
+
+	in_buf = kzalloc(in_buf_len, GFP_KERNEL);
+	if (!in_buf) {
+		rc = -ENOMEM;
+		goto out_free_phy;
+	}
+
+	phy->udev = usb_get_dev(interface_to_usbdev(interface));
+	phy->interface = interface;
+
+	iface_desc = interface->cur_altsetting;
+	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+		endpoint = &iface_desc->endpoint[i].desc;
+
+		if (!in_endpoint && usb_endpoint_is_bulk_in(endpoint))
+			in_endpoint = endpoint->bEndpointAddress;
+
+		if (!out_endpoint && usb_endpoint_is_bulk_out(endpoint))
+			out_endpoint = endpoint->bEndpointAddress;
+	}
+
+	if (!in_endpoint || !out_endpoint) {
+		nfc_err(&interface->dev,
+			"Could not find bulk-in or bulk-out endpoint\n");
+		rc = -ENODEV;
+		goto error;
+	}
+
+	phy->in_urb = usb_alloc_urb(0, GFP_KERNEL);
+	phy->out_urb = usb_alloc_urb(0, GFP_KERNEL);
+
+	if (!phy->in_urb || !phy->out_urb)
+		goto error;
+
+	usb_fill_bulk_urb(phy->in_urb, phy->udev,
+			  usb_rcvbulkpipe(phy->udev, in_endpoint),
+			  in_buf, in_buf_len, NULL, phy);
+
+	usb_fill_bulk_urb(phy->out_urb, phy->udev,
+			  usb_sndbulkpipe(phy->udev, out_endpoint),
+			  NULL, 0, pn533_send_complete, phy);
+
+
+	switch (id->driver_info) {
+	case PN533_DEVICE_STD:
+		protocols = PN533_ALL_PROTOCOLS;
+		break;
+
+	case PN533_DEVICE_PASORI:
+		protocols = PN533_NO_TYPE_B_PROTOCOLS;
+		break;
+
+	case PN533_DEVICE_ACR122U:
+		protocols = PN533_NO_TYPE_B_PROTOCOLS;
+		fops = &pn533_acr122_frame_ops;
+		protocol_type = PN533_PROTO_REQ_RESP,
+
+		rc = pn533_acr122_poweron_rdr(phy);
+		if (rc < 0) {
+			nfc_err(&interface->dev,
+				"Couldn't poweron the reader (error %d)\n", rc);
+			goto error;
+		}
+		break;
+
+	default:
+		nfc_err(&interface->dev, "Unknown device type %lu\n",
+			id->driver_info);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	priv = pn533_register_device(id->driver_info, protocols, protocol_type,
+					phy, &usb_phy_ops, fops,
+					&phy->udev->dev, &interface->dev);
+
+	if (IS_ERR(priv)) {
+		rc = PTR_ERR(priv);
+		goto error;
+	}
+
+	phy->priv = priv;
+
+	usb_set_intfdata(interface, phy);
+
+	return 0;
+
+error:
+	usb_free_urb(phy->in_urb);
+	usb_free_urb(phy->out_urb);
+	usb_put_dev(phy->udev);
+	kfree(in_buf);
+out_free_phy:
+	kfree(phy);
+	return rc;
+}
+
+static void pn533_usb_disconnect(struct usb_interface *interface)
+{
+	struct pn533_usb_phy *phy = usb_get_intfdata(interface);
+
+	if (!phy)
+		return;
+
+	pn533_unregister_device(phy->priv);
+
+	usb_set_intfdata(interface, NULL);
+
+	usb_kill_urb(phy->in_urb);
+	usb_kill_urb(phy->out_urb);
+
+	kfree(phy->in_urb->transfer_buffer);
+	usb_free_urb(phy->in_urb);
+	usb_free_urb(phy->out_urb);
+
+	nfc_info(&interface->dev, "NXP PN533 NFC device disconnected\n");
+}
+
+static struct usb_driver pn533_usb_driver = {
+	.name =		"pn533_usb",
+	.probe =	pn533_usb_probe,
+	.disconnect =	pn533_usb_disconnect,
+	.id_table =	pn533_usb_table,
+};
+
+module_usb_driver(pn533_usb_driver);
+
+MODULE_AUTHOR("Lauro Ramos Venancio <lauro.venancio@openbossa.org>");
+MODULE_AUTHOR("Aloisio Almeida Jr <aloisio.almeida@openbossa.org>");
+MODULE_AUTHOR("Waldemar Rymarkiewicz <waldemar.rymarkiewicz@tieto.com>");
+MODULE_DESCRIPTION("PN533 USB driver ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index 45d0e66..f837c39 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -1106,7 +1106,6 @@
 static struct i2c_driver pn544_hci_i2c_driver = {
 	.driver = {
 		   .name = PN544_HCI_I2C_DRIVER_NAME,
-		   .owner  = THIS_MODULE,
 		   .of_match_table = of_match_ptr(of_pn544_i2c_match),
 		   .acpi_match_table = ACPI_PTR(pn544_hci_i2c_acpi_match),
 		  },
diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c
index 8a56b5c..9dfae0e 100644
--- a/drivers/nfc/st-nci/i2c.c
+++ b/drivers/nfc/st-nci/i2c.c
@@ -42,7 +42,7 @@
 
 #define ST_NCI_I2C_DRIVER_NAME "st_nci_i2c"
 
-#define ST_NCI_GPIO_NAME_RESET "clf_reset"
+#define ST_NCI_GPIO_NAME_RESET "reset"
 
 struct st_nci_i2c_phy {
 	struct i2c_client *i2c_dev;
@@ -211,19 +211,9 @@
 static int st_nci_i2c_acpi_request_resources(struct i2c_client *client)
 {
 	struct st_nci_i2c_phy *phy = i2c_get_clientdata(client);
-	const struct acpi_device_id *id;
 	struct gpio_desc *gpiod_reset;
-	struct device *dev;
-
-	if (!client)
-		return -EINVAL;
-
-	dev = &client->dev;
-
-	/* Match the struct device against a given list of ACPI IDs */
-	id = acpi_match_device(dev->driver->acpi_match_table, dev);
-	if (!id)
-		return -ENODEV;
+	struct device *dev = &client->dev;
+	u8 tmp;
 
 	/* Get RESET GPIO from ACPI */
 	gpiod_reset = devm_gpiod_get_index(dev, ST_NCI_GPIO_NAME_RESET, 1,
@@ -237,10 +227,18 @@
 
 	phy->irq_polarity = irq_get_trigger_type(client->irq);
 
-	phy->se_status.is_ese_present =
-				device_property_present(dev, "ese-present");
-	phy->se_status.is_uicc_present =
-				device_property_present(dev, "uicc-present");
+	phy->se_status.is_ese_present = false;
+	phy->se_status.is_uicc_present = false;
+
+	if (device_property_present(dev, "ese-present")) {
+		device_property_read_u8(dev, "ese-present", &tmp);
+		phy->se_status.is_ese_present = tmp;
+	}
+
+	if (device_property_present(dev, "uicc-present")) {
+		device_property_read_u8(dev, "uicc-present", &tmp);
+		phy->se_status.is_uicc_present = tmp;
+	}
 
 	return 0;
 }
@@ -416,7 +414,6 @@
 
 static struct i2c_driver st_nci_i2c_driver = {
 	.driver = {
-		.owner = THIS_MODULE,
 		.name = ST_NCI_I2C_DRIVER_NAME,
 		.of_match_table = of_match_ptr(of_st_nci_i2c_match),
 		.acpi_match_table = ACPI_PTR(st_nci_i2c_acpi_match),
diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
index a53e5df..56f2112 100644
--- a/drivers/nfc/st-nci/se.c
+++ b/drivers/nfc/st-nci/se.c
@@ -113,8 +113,6 @@
 
 	{NCI_HCI_IDENTITY_MGMT_GATE, NCI_HCI_INVALID_PIPE,
 					ST_NCI_HOST_CONTROLLER_ID},
-	{NCI_HCI_LOOPBACK_GATE, NCI_HCI_INVALID_PIPE,
-					ST_NCI_HOST_CONTROLLER_ID},
 
 	/* Secure element pipes are created by secure element host */
 	{ST_NCI_CONNECTIVITY_GATE, NCI_HCI_DO_NOT_OPEN_PIPE,
@@ -222,7 +220,7 @@
 		 */
 		dm_pipe_info = (struct st_nci_pipe_info *)skb_pipe_info->data;
 		if (dm_pipe_info->dst_gate_id == ST_NCI_APDU_READER_GATE &&
-		    dm_pipe_info->src_host_id != ST_NCI_ESE_HOST_ID) {
+		    dm_pipe_info->src_host_id == ST_NCI_UICC_HOST_ID) {
 			pr_err("Unexpected apdu_reader pipe on host %x\n",
 			       dm_pipe_info->src_host_id);
 			kfree_skb(skb_pipe_info);
@@ -385,9 +383,6 @@
 	case ST_NCI_CONNECTIVITY_GATE:
 		st_nci_hci_connectivity_event_received(ndev, host, event, skb);
 	break;
-	case NCI_HCI_LOOPBACK_GATE:
-		st_nci_hci_loopback_event_received(ndev, event, skb);
-	break;
 	}
 }
 EXPORT_SYMBOL_GPL(st_nci_hci_event_received);
@@ -520,7 +515,7 @@
 	 * Same for eSE.
 	 */
 	r = st_nci_control_se(ndev, se_idx, ST_NCI_SE_MODE_ON);
-	if (r == ST_NCI_HCI_HOST_ID_ESE) {
+	if (r == ST_NCI_ESE_HOST_ID) {
 		st_nci_se_get_atr(ndev);
 		r = nci_hci_send_event(ndev, ST_NCI_APDU_READER_GATE,
 				ST_NCI_EVT_SE_SOFT_RESET, NULL, 0);
@@ -600,10 +595,12 @@
 	 * HCI will be used here only for proprietary commands.
 	 */
 	if (test_bit(ST_NCI_FACTORY_MODE, &info->flags))
-		r = nci_nfcee_mode_set(ndev, ndev->hci_dev->conn_info->id,
+		r = nci_nfcee_mode_set(ndev,
+				       ndev->hci_dev->conn_info->dest_params->id,
 				       NCI_NFCEE_DISABLE);
 	else
-		r = nci_nfcee_mode_set(ndev, ndev->hci_dev->conn_info->id,
+		r = nci_nfcee_mode_set(ndev,
+				       ndev->hci_dev->conn_info->dest_params->id,
 				       NCI_NFCEE_ENABLE);
 
 free_dest_params:
@@ -629,17 +626,10 @@
 	if (test_bit(ST_NCI_FACTORY_MODE, &info->flags))
 		return 0;
 
-	if (info->se_info.se_status->is_ese_present &&
-	    info->se_info.se_status->is_uicc_present) {
+	if (info->se_info.se_status->is_uicc_present)
 		white_list[wl_size++] = ST_NCI_UICC_HOST_ID;
+	if (info->se_info.se_status->is_ese_present)
 		white_list[wl_size++] = ST_NCI_ESE_HOST_ID;
-	} else if (!info->se_info.se_status->is_ese_present &&
-		   info->se_info.se_status->is_uicc_present) {
-		white_list[wl_size++] = ST_NCI_UICC_HOST_ID;
-	} else if (info->se_info.se_status->is_ese_present &&
-		   !info->se_info.se_status->is_uicc_present) {
-		white_list[wl_size++] = ST_NCI_ESE_HOST_ID;
-	}
 
 	if (wl_size) {
 		r = nci_hci_set_param(ndev, NCI_HCI_ADMIN_GATE,
@@ -672,7 +662,7 @@
 	pr_debug("\n");
 
 	switch (se_idx) {
-	case ST_NCI_HCI_HOST_ID_ESE:
+	case ST_NCI_ESE_HOST_ID:
 		info->se_info.cb = cb;
 		info->se_info.cb_context = cb_context;
 		mod_timer(&info->se_info.bwi_timer, jiffies +
diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c
index 821dfa9..89e341e 100644
--- a/drivers/nfc/st-nci/spi.c
+++ b/drivers/nfc/st-nci/spi.c
@@ -43,7 +43,7 @@
 
 #define ST_NCI_SPI_DRIVER_NAME "st_nci_spi"
 
-#define ST_NCI_GPIO_NAME_RESET "clf_reset"
+#define ST_NCI_GPIO_NAME_RESET "reset"
 
 struct st_nci_spi_phy {
 	struct spi_device *spi_dev;
@@ -226,19 +226,9 @@
 static int st_nci_spi_acpi_request_resources(struct spi_device *spi_dev)
 {
 	struct st_nci_spi_phy *phy = spi_get_drvdata(spi_dev);
-	const struct acpi_device_id *id;
 	struct gpio_desc *gpiod_reset;
-	struct device *dev;
-
-	if (!spi_dev)
-		return -EINVAL;
-
-	dev = &spi_dev->dev;
-
-	/* Match the struct device against a given list of ACPI IDs */
-	id = acpi_match_device(dev->driver->acpi_match_table, dev);
-	if (!id)
-		return -ENODEV;
+	struct device *dev = &spi_dev->dev;
+	u8 tmp;
 
 	/* Get RESET GPIO from ACPI */
 	gpiod_reset = devm_gpiod_get_index(dev, ST_NCI_GPIO_NAME_RESET, 1,
@@ -252,10 +242,18 @@
 
 	phy->irq_polarity = irq_get_trigger_type(spi_dev->irq);
 
-	phy->se_status.is_ese_present =
-				device_property_present(dev, "ese-present");
-	phy->se_status.is_uicc_present =
-				device_property_present(dev, "uicc-present");
+	phy->se_status.is_ese_present = false;
+	phy->se_status.is_uicc_present = false;
+
+	if (device_property_present(dev, "ese-present")) {
+		device_property_read_u8(dev, "ese-present", &tmp);
+		tmp = phy->se_status.is_ese_present;
+	}
+
+	if (device_property_present(dev, "uicc-present")) {
+		device_property_read_u8(dev, "uicc-present", &tmp);
+		tmp = phy->se_status.is_uicc_present;
+	}
 
 	return 0;
 }
diff --git a/drivers/nfc/st-nci/st-nci.h b/drivers/nfc/st-nci/st-nci.h
index 8b9f77b..afaf138 100644
--- a/drivers/nfc/st-nci/st-nci.h
+++ b/drivers/nfc/st-nci/st-nci.h
@@ -32,7 +32,6 @@
  * sequence of at most 32 characters.
  */
 #define ST_NCI_ESE_MAX_LENGTH  33
-#define ST_NCI_HCI_HOST_ID_ESE 0xc0
 
 #define ST_NCI_DEVICE_MGNT_GATE		0x01
 
@@ -93,8 +92,7 @@
  *	white list).
  * @HCI_DM_FIELD_GENERATOR: Allow to generate different kind of RF
  *	technology. When using this command to anti-collision is done.
- * @HCI_LOOPBACK: Allow to echo a command and test the Dh to CLF
- *	connectivity.
+ * @LOOPBACK: Allow to echo a command and test the Dh to CLF connectivity.
  * @HCI_DM_VDC_MEASUREMENT_VALUE: Allow to measure the field applied on the
  *	CLF antenna. A value between 0 and 0x0f is returned. 0 is maximum.
  * @HCI_DM_FWUPD_START: Allow to put CLF into firmware update mode. It is a
@@ -116,7 +114,7 @@
 	HCI_DM_RESET,
 	HCI_GET_PARAM,
 	HCI_DM_FIELD_GENERATOR,
-	HCI_LOOPBACK,
+	LOOPBACK,
 	HCI_DM_FWUPD_START,
 	HCI_DM_FWUPD_END,
 	HCI_DM_VDC_MEASUREMENT_VALUE,
@@ -124,17 +122,11 @@
 	MANUFACTURER_SPECIFIC,
 };
 
-struct st_nci_vendor_info {
-	struct completion req_completion;
-	struct sk_buff *rx_skb;
-};
-
 struct st_nci_info {
 	struct llt_ndlc *ndlc;
 	unsigned long flags;
 
 	struct st_nci_se_info se_info;
-	struct st_nci_vendor_info vendor_info;
 };
 
 void st_nci_remove(struct nci_dev *ndev);
@@ -156,8 +148,6 @@
 void st_nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe, u8 cmd,
 						struct sk_buff *skb);
 
-void st_nci_hci_loopback_event_received(struct nci_dev *ndev, u8 event,
-					struct sk_buff *skb);
 int st_nci_vendor_cmds_init(struct nci_dev *ndev);
 
 #endif /* __LOCAL_ST_NCI_H_ */
diff --git a/drivers/nfc/st-nci/vendor_cmds.c b/drivers/nfc/st-nci/vendor_cmds.c
index b5debce..1a836c7 100644
--- a/drivers/nfc/st-nci/vendor_cmds.c
+++ b/drivers/nfc/st-nci/vendor_cmds.c
@@ -333,62 +333,28 @@
 	return r;
 }
 
-void st_nci_hci_loopback_event_received(struct nci_dev *ndev, u8 event,
-					struct sk_buff *skb)
-{
-	struct st_nci_info *info = nci_get_drvdata(ndev);
-
-	switch (event) {
-	case ST_NCI_EVT_POST_DATA:
-		info->vendor_info.rx_skb = skb;
-	break;
-	default:
-		nfc_err(&ndev->nfc_dev->dev, "Unexpected event on loopback gate\n");
-	}
-	complete(&info->vendor_info.req_completion);
-}
-EXPORT_SYMBOL(st_nci_hci_loopback_event_received);
-
-static int st_nci_hci_loopback(struct nfc_dev *dev, void *data,
-			       size_t data_len)
+static int st_nci_loopback(struct nfc_dev *dev, void *data,
+			   size_t data_len)
 {
 	int r;
-	struct sk_buff *msg;
+	struct sk_buff *msg, *skb;
 	struct nci_dev *ndev = nfc_get_drvdata(dev);
-	struct st_nci_info *info = nci_get_drvdata(ndev);
 
 	if (data_len <= 0)
 		return -EPROTO;
 
-	reinit_completion(&info->vendor_info.req_completion);
-	info->vendor_info.rx_skb = NULL;
+	r = nci_nfcc_loopback(ndev, data, data_len, &skb);
+	if (r < 0)
+		return r;
 
-	r = nci_hci_send_event(ndev, NCI_HCI_LOOPBACK_GATE,
-			       ST_NCI_EVT_POST_DATA, data, data_len);
-	if (r != data_len) {
-		r = -EPROTO;
-		goto exit;
-	}
-
-	wait_for_completion_interruptible(&info->vendor_info.req_completion);
-
-	if (!info->vendor_info.rx_skb ||
-	    info->vendor_info.rx_skb->len != data_len) {
-		r = -EPROTO;
-		goto exit;
-	}
-
-	msg = nfc_vendor_cmd_alloc_reply_skb(ndev->nfc_dev,
-					ST_NCI_VENDOR_OUI,
-					HCI_LOOPBACK,
-					info->vendor_info.rx_skb->len);
+	msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST_NCI_VENDOR_OUI,
+					     LOOPBACK, skb->len);
 	if (!msg) {
 		r = -ENOMEM;
 		goto free_skb;
 	}
 
-	if (nla_put(msg, NFC_ATTR_VENDOR_DATA, info->vendor_info.rx_skb->len,
-		    info->vendor_info.rx_skb->data)) {
+	if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) {
 		kfree_skb(msg);
 		r = -ENOBUFS;
 		goto free_skb;
@@ -396,8 +362,7 @@
 
 	r = nfc_vendor_cmd_reply(msg);
 free_skb:
-	kfree_skb(info->vendor_info.rx_skb);
-exit:
+	kfree_skb(skb);
 	return r;
 }
 
@@ -485,8 +450,8 @@
 	},
 	{
 		.vendor_id = ST_NCI_VENDOR_OUI,
-		.subcmd = HCI_LOOPBACK,
-		.doit = st_nci_hci_loopback,
+		.subcmd = LOOPBACK,
+		.doit = st_nci_loopback,
 	},
 	{
 		.vendor_id = ST_NCI_VENDOR_OUI,
@@ -507,9 +472,6 @@
 
 int st_nci_vendor_cmds_init(struct nci_dev *ndev)
 {
-	struct st_nci_info *info = nci_get_drvdata(ndev);
-
-	init_completion(&info->vendor_info.req_completion);
 	return nfc_set_vendor_cmds(ndev->nfc_dev, st_nci_vendor_cmds,
 				   sizeof(st_nci_vendor_cmds));
 }
diff --git a/drivers/nfc/st21nfca/core.c b/drivers/nfc/st21nfca/core.c
index dd8b150..dacb916 100644
--- a/drivers/nfc/st21nfca/core.c
+++ b/drivers/nfc/st21nfca/core.c
@@ -176,7 +176,7 @@
 		 */
 		info = (struct st21nfca_pipe_info *) skb_pipe_info->data;
 		if (info->dst_gate_id == ST21NFCA_APDU_READER_GATE &&
-			info->src_host_id != ST21NFCA_ESE_HOST_ID) {
+			info->src_host_id == NFC_HCI_UICC_HOST_ID) {
 			pr_err("Unexpected apdu_reader pipe on host %x\n",
 				info->src_host_id);
 			kfree_skb(skb_pipe_info);
@@ -262,17 +262,10 @@
 	int wl_size = 0;
 	int r;
 
-	if (info->se_status->is_ese_present &&
-		info->se_status->is_uicc_present) {
+	if (info->se_status->is_uicc_present)
 		white_list[wl_size++] = NFC_HCI_UICC_HOST_ID;
+	if (info->se_status->is_ese_present)
 		white_list[wl_size++] = ST21NFCA_ESE_HOST_ID;
-	} else if (!info->se_status->is_ese_present &&
-			 info->se_status->is_uicc_present) {
-		white_list[wl_size++] = NFC_HCI_UICC_HOST_ID;
-	} else if (info->se_status->is_ese_present &&
-			!info->se_status->is_uicc_present) {
-		white_list[wl_size++] = ST21NFCA_ESE_HOST_ID;
-	}
 
 	if (wl_size) {
 		r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE,
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
index 1f44a15..5a82f55 100644
--- a/drivers/nfc/st21nfca/i2c.c
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -62,7 +62,7 @@
 
 #define ST21NFCA_HCI_I2C_DRIVER_NAME "st21nfca_hci_i2c"
 
-#define ST21NFCA_GPIO_NAME_EN "clf_enable"
+#define ST21NFCA_GPIO_NAME_EN "enable"
 
 struct st21nfca_i2c_phy {
 	struct i2c_client *i2c_dev;
@@ -507,34 +507,34 @@
 static int st21nfca_hci_i2c_acpi_request_resources(struct i2c_client *client)
 {
 	struct st21nfca_i2c_phy *phy = i2c_get_clientdata(client);
-	const struct acpi_device_id *id;
 	struct gpio_desc *gpiod_ena;
-	struct device *dev;
-
-	if (!client)
-		return -EINVAL;
-
-	dev = &client->dev;
-
-	/* Match the struct device against a given list of ACPI IDs */
-	id = acpi_match_device(dev->driver->acpi_match_table, dev);
-	if (!id)
-		return -ENODEV;
+	struct device *dev = &client->dev;
+	u8 tmp;
 
 	/* Get EN GPIO from ACPI */
 	gpiod_ena = devm_gpiod_get_index(dev, ST21NFCA_GPIO_NAME_EN, 1,
 					 GPIOD_OUT_LOW);
-	if (!IS_ERR(gpiod_ena))
-		phy->gpio_ena = desc_to_gpio(gpiod_ena);
+	if (!IS_ERR(gpiod_ena)) {
+		nfc_err(dev, "Unable to get ENABLE GPIO\n");
+		return -ENODEV;
+	}
 
 	phy->gpio_ena = desc_to_gpio(gpiod_ena);
 
 	phy->irq_polarity = irq_get_trigger_type(client->irq);
 
-	phy->se_status.is_ese_present =
-				device_property_present(dev, "ese-present");
-	phy->se_status.is_uicc_present =
-				device_property_present(dev, "uicc-present");
+	phy->se_status.is_ese_present = false;
+	phy->se_status.is_uicc_present = false;
+
+	if (device_property_present(dev, "ese-present")) {
+		device_property_read_u8(dev, "ese-present", &tmp);
+		phy->se_status.is_ese_present = tmp;
+	}
+
+	if (device_property_present(dev, "uicc-present")) {
+		device_property_read_u8(dev, "uicc-present", &tmp);
+		phy->se_status.is_uicc_present = tmp;
+	}
 
 	return 0;
 }
@@ -721,7 +721,6 @@
 
 static struct i2c_driver st21nfca_hci_i2c_driver = {
 	.driver = {
-		.owner = THIS_MODULE,
 		.name = ST21NFCA_HCI_I2C_DRIVER_NAME,
 		.of_match_table = of_match_ptr(of_st21nfca_i2c_match),
 		.acpi_match_table = ACPI_PTR(st21nfca_hci_i2c_acpi_match),
diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
index bd56a16..3a98563 100644
--- a/drivers/nfc/st21nfca/se.c
+++ b/drivers/nfc/st21nfca/se.c
@@ -32,8 +32,6 @@
 #define ST21NFCA_EVT_CONNECTIVITY		0x10
 #define ST21NFCA_EVT_TRANSACTION		0x12
 
-#define ST21NFCA_ESE_HOST_ID			0xc0
-
 #define ST21NFCA_SE_TO_HOT_PLUG			1000
 /* Connectivity pipe only */
 #define ST21NFCA_SE_COUNT_PIPE_UICC		0x01
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index c32cbb5..f068b65 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1204,7 +1204,7 @@
 {
 	struct btt *btt = bdev->bd_disk->private_data;
 
-	btt_do_bvec(btt, NULL, page, PAGE_CACHE_SIZE, 0, rw, sector);
+	btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, rw, sector);
 	page_endio(page, rw & WRITE, 0);
 	return 0;
 }
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index fc82743..19f822d 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -407,7 +407,7 @@
 	[ND_CMD_IMPLEMENTED] = { },
 	[ND_CMD_SMART] = {
 		.out_num = 2,
-		.out_sizes = { 4, 8, },
+		.out_sizes = { 4, 128, },
 	},
 	[ND_CMD_SMART_THRESHOLD] = {
 		.out_num = 2,
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 79646d0..182a93f 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -417,8 +417,8 @@
 		set_badblock(bb, start_sector, num_sectors);
 }
 
-static void namespace_add_poison(struct list_head *poison_list,
-		struct badblocks *bb, struct resource *res)
+static void badblocks_populate(struct list_head *poison_list,
+		struct badblocks *bb, const struct resource *res)
 {
 	struct nd_poison *pl;
 
@@ -460,36 +460,35 @@
 }
 
 /**
- * nvdimm_namespace_add_poison() - Convert a list of poison ranges to badblocks
- * @ndns:	the namespace containing poison ranges
- * @bb:		badblocks instance to populate
- * @offset:	offset at the start of the namespace before 'sector 0'
+ * nvdimm_badblocks_populate() - Convert a list of poison ranges to badblocks
+ * @region: parent region of the range to interrogate
+ * @bb: badblocks instance to populate
+ * @res: resource range to consider
  *
- * The poison list generated during NFIT initialization may contain multiple,
- * possibly overlapping ranges in the SPA (System Physical Address) space.
- * Compare each of these ranges to the namespace currently being initialized,
- * and add badblocks to the gendisk for all matching sub-ranges
+ * The poison list generated during bus initialization may contain
+ * multiple, possibly overlapping physical address ranges.  Compare each
+ * of these ranges to the resource range currently being initialized,
+ * and add badblocks entries for all matching sub-ranges
  */
-void nvdimm_namespace_add_poison(struct nd_namespace_common *ndns,
-		struct badblocks *bb, resource_size_t offset)
+void nvdimm_badblocks_populate(struct nd_region *nd_region,
+		struct badblocks *bb, const struct resource *res)
 {
-	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
-	struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
 	struct nvdimm_bus *nvdimm_bus;
 	struct list_head *poison_list;
-	struct resource res = {
-		.start = nsio->res.start + offset,
-		.end = nsio->res.end,
-	};
 
-	nvdimm_bus = to_nvdimm_bus(nd_region->dev.parent);
+	if (!is_nd_pmem(&nd_region->dev)) {
+		dev_WARN_ONCE(&nd_region->dev, 1,
+				"%s only valid for pmem regions\n", __func__);
+		return;
+	}
+	nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
 	poison_list = &nvdimm_bus->poison_list;
 
 	nvdimm_bus_lock(&nvdimm_bus->dev);
-	namespace_add_poison(poison_list, bb, &res);
+	badblocks_populate(poison_list, bb, res);
 	nvdimm_bus_unlock(&nvdimm_bus->dev);
 }
-EXPORT_SYMBOL_GPL(nvdimm_namespace_add_poison);
+EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
 
 static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
 {
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 1799bd9..875c524 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -266,8 +266,8 @@
 int nvdimm_namespace_detach_btt(struct nd_namespace_common *ndns);
 const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
 		char *name);
-void nvdimm_namespace_add_poison(struct nd_namespace_common *ndns,
-		struct badblocks *bb, resource_size_t offset);
+void nvdimm_badblocks_populate(struct nd_region *nd_region,
+		struct badblocks *bb, const struct resource *res);
 int nd_blk_region_init(struct nd_region *nd_region);
 void __nd_iostat_start(struct bio *bio, unsigned long *start);
 static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 254d3bc..e071e21 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -376,7 +376,7 @@
 	} else {
 		/* from init we validate */
 		if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
-			return -EINVAL;
+			return -ENODEV;
 	}
 
 	if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) {
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index ca5721c..92f5365 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -99,10 +99,24 @@
 		if (unlikely(bad_pmem))
 			rc = -EIO;
 		else {
-			memcpy_from_pmem(mem + off, pmem_addr, len);
+			rc = memcpy_from_pmem(mem + off, pmem_addr, len);
 			flush_dcache_page(page);
 		}
 	} else {
+		/*
+		 * Note that we write the data both before and after
+		 * clearing poison.  The write before clear poison
+		 * handles situations where the latest written data is
+		 * preserved and the clear poison operation simply marks
+		 * the address range as valid without changing the data.
+		 * In this case application software can assume that an
+		 * interrupted write will either return the new good
+		 * data or an error.
+		 *
+		 * However, if pmem_clear_poison() leaves the data in an
+		 * indeterminate state we need to perform the write
+		 * after clear poison.
+		 */
 		flush_dcache_page(page);
 		memcpy_to_pmem(pmem_addr, mem + off, len);
 		if (unlikely(bad_pmem)) {
@@ -151,7 +165,7 @@
 	struct pmem_device *pmem = bdev->bd_disk->private_data;
 	int rc;
 
-	rc = pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector);
+	rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
 	if (rw & WRITE)
 		wmb_pmem();
 
@@ -244,7 +258,9 @@
 static int pmem_attach_disk(struct device *dev,
 		struct nd_namespace_common *ndns, struct pmem_device *pmem)
 {
+	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
 	int nid = dev_to_node(dev);
+	struct resource bb_res;
 	struct gendisk *disk;
 
 	blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
@@ -271,8 +287,17 @@
 	devm_exit_badblocks(dev, &pmem->bb);
 	if (devm_init_badblocks(dev, &pmem->bb))
 		return -ENOMEM;
-	nvdimm_namespace_add_poison(ndns, &pmem->bb, pmem->data_offset);
+	bb_res.start = nsio->res.start + pmem->data_offset;
+	bb_res.end = nsio->res.end;
+	if (is_nd_pfn(dev)) {
+		struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+		struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
 
+		bb_res.start += __le32_to_cpu(pfn_sb->start_pad);
+		bb_res.end -= __le32_to_cpu(pfn_sb->end_trunc);
+	}
+	nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb,
+			&bb_res);
 	disk->bb = &pmem->bb;
 	add_disk(disk);
 	revalidate_disk(disk);
@@ -295,7 +320,7 @@
 
 		if (unlikely(is_bad_pmem(&pmem->bb, offset / 512, sz_align)))
 			return -EIO;
-		memcpy_from_pmem(buf, pmem->virt_addr + offset, size);
+		return memcpy_from_pmem(buf, pmem->virt_addr + offset, size);
 	} else {
 		memcpy_to_pmem(pmem->virt_addr + offset, buf, size);
 		wmb_pmem();
@@ -372,10 +397,17 @@
 	 */
 	start += start_pad;
 	npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K;
-	if (nd_pfn->mode == PFN_MODE_PMEM)
-		offset = ALIGN(start + SZ_8K + 64 * npfns, nd_pfn->align)
+	if (nd_pfn->mode == PFN_MODE_PMEM) {
+		unsigned long memmap_size;
+
+		/*
+		 * vmemmap_populate_hugepages() allocates the memmap array in
+		 * PMD_SIZE chunks.
+		 */
+		memmap_size = ALIGN(64 * npfns, PMD_SIZE);
+		offset = ALIGN(start + SZ_8K + memmap_size, nd_pfn->align)
 			- start;
-	else if (nd_pfn->mode == PFN_MODE_RAM)
+	} else if (nd_pfn->mode == PFN_MODE_RAM)
 		offset = ALIGN(start + SZ_8K, nd_pfn->align) - start;
 	else
 		goto err;
@@ -553,7 +585,7 @@
 	ndns->rw_bytes = pmem_rw_bytes;
 	if (devm_init_badblocks(dev, &pmem->bb))
 		return -ENOMEM;
-	nvdimm_namespace_add_poison(ndns, &pmem->bb, 0);
+	nvdimm_badblocks_populate(nd_region, &pmem->bb, &nsio->res);
 
 	if (is_nd_btt(dev)) {
 		/* btt allocates its own request_queue */
@@ -595,14 +627,25 @@
 {
 	struct pmem_device *pmem = dev_get_drvdata(dev);
 	struct nd_namespace_common *ndns = pmem->ndns;
+	struct nd_region *nd_region = to_nd_region(dev->parent);
+	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+	struct resource res = {
+		.start = nsio->res.start + pmem->data_offset,
+		.end = nsio->res.end,
+	};
 
 	if (event != NVDIMM_REVALIDATE_POISON)
 		return;
 
-	if (is_nd_btt(dev))
-		nvdimm_namespace_add_poison(ndns, &pmem->bb, 0);
-	else
-		nvdimm_namespace_add_poison(ndns, &pmem->bb, pmem->data_offset);
+	if (is_nd_pfn(dev)) {
+		struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+		struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
+
+		res.start += __le32_to_cpu(pfn_sb->start_pad);
+		res.end -= __le32_to_cpu(pfn_sb->end_trunc);
+	}
+
+	nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
 }
 
 MODULE_ALIAS("pmem");
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index c894841..d296fc3 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -18,7 +18,7 @@
 	depends on NVME_CORE
 	---help---
 	  This adds support for the SG_IO ioctl on the NVMe character
-	  and block devices nodes, as well a a translation for a small
+	  and block devices nodes, as well as a translation for a small
 	  number of selected SCSI commands to NVMe commands to the NVMe
 	  driver.  If you don't know what this means you probably want
 	  to say N here, unless you run a distro that abuses the SCSI
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 643f457..2de248b 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -58,6 +58,55 @@
 
 static struct class *nvme_class;
 
+bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
+		enum nvme_ctrl_state new_state)
+{
+	enum nvme_ctrl_state old_state = ctrl->state;
+	bool changed = false;
+
+	spin_lock_irq(&ctrl->lock);
+	switch (new_state) {
+	case NVME_CTRL_LIVE:
+		switch (old_state) {
+		case NVME_CTRL_RESETTING:
+			changed = true;
+			/* FALLTHRU */
+		default:
+			break;
+		}
+		break;
+	case NVME_CTRL_RESETTING:
+		switch (old_state) {
+		case NVME_CTRL_NEW:
+		case NVME_CTRL_LIVE:
+			changed = true;
+			/* FALLTHRU */
+		default:
+			break;
+		}
+		break;
+	case NVME_CTRL_DELETING:
+		switch (old_state) {
+		case NVME_CTRL_LIVE:
+		case NVME_CTRL_RESETTING:
+			changed = true;
+			/* FALLTHRU */
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+	spin_unlock_irq(&ctrl->lock);
+
+	if (changed)
+		ctrl->state = new_state;
+
+	return changed;
+}
+EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
+
 static void nvme_free_ns(struct kref *kref)
 {
 	struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
@@ -138,6 +187,111 @@
 }
 EXPORT_SYMBOL_GPL(nvme_alloc_request);
 
+static inline void nvme_setup_flush(struct nvme_ns *ns,
+		struct nvme_command *cmnd)
+{
+	memset(cmnd, 0, sizeof(*cmnd));
+	cmnd->common.opcode = nvme_cmd_flush;
+	cmnd->common.nsid = cpu_to_le32(ns->ns_id);
+}
+
+static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
+		struct nvme_command *cmnd)
+{
+	struct nvme_dsm_range *range;
+	struct page *page;
+	int offset;
+	unsigned int nr_bytes = blk_rq_bytes(req);
+
+	range = kmalloc(sizeof(*range), GFP_ATOMIC);
+	if (!range)
+		return BLK_MQ_RQ_QUEUE_BUSY;
+
+	range->cattr = cpu_to_le32(0);
+	range->nlb = cpu_to_le32(nr_bytes >> ns->lba_shift);
+	range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
+
+	memset(cmnd, 0, sizeof(*cmnd));
+	cmnd->dsm.opcode = nvme_cmd_dsm;
+	cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
+	cmnd->dsm.nr = 0;
+	cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
+
+	req->completion_data = range;
+	page = virt_to_page(range);
+	offset = offset_in_page(range);
+	blk_add_request_payload(req, page, offset, sizeof(*range));
+
+	/*
+	 * we set __data_len back to the size of the area to be discarded
+	 * on disk. This allows us to report completion on the full amount
+	 * of blocks described by the request.
+	 */
+	req->__data_len = nr_bytes;
+
+	return 0;
+}
+
+static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
+		struct nvme_command *cmnd)
+{
+	u16 control = 0;
+	u32 dsmgmt = 0;
+
+	if (req->cmd_flags & REQ_FUA)
+		control |= NVME_RW_FUA;
+	if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
+		control |= NVME_RW_LR;
+
+	if (req->cmd_flags & REQ_RAHEAD)
+		dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
+
+	memset(cmnd, 0, sizeof(*cmnd));
+	cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
+	cmnd->rw.command_id = req->tag;
+	cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
+	cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
+	cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
+
+	if (ns->ms) {
+		switch (ns->pi_type) {
+		case NVME_NS_DPS_PI_TYPE3:
+			control |= NVME_RW_PRINFO_PRCHK_GUARD;
+			break;
+		case NVME_NS_DPS_PI_TYPE1:
+		case NVME_NS_DPS_PI_TYPE2:
+			control |= NVME_RW_PRINFO_PRCHK_GUARD |
+					NVME_RW_PRINFO_PRCHK_REF;
+			cmnd->rw.reftag = cpu_to_le32(
+					nvme_block_nr(ns, blk_rq_pos(req)));
+			break;
+		}
+		if (!blk_integrity_rq(req))
+			control |= NVME_RW_PRINFO_PRACT;
+	}
+
+	cmnd->rw.control = cpu_to_le16(control);
+	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+}
+
+int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
+		struct nvme_command *cmd)
+{
+	int ret = 0;
+
+	if (req->cmd_type == REQ_TYPE_DRV_PRIV)
+		memcpy(cmd, req->cmd, sizeof(*cmd));
+	else if (req->cmd_flags & REQ_FLUSH)
+		nvme_setup_flush(ns, cmd);
+	else if (req->cmd_flags & REQ_DISCARD)
+		ret = nvme_setup_discard(ns, req, cmd);
+	else
+		nvme_setup_rw(ns, req, cmd);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_setup_cmd);
+
 /*
  * Returns 0 on success.  If the result is negative, it's a Linux error code;
  * if the result is positive, it's an NVM Express status code
@@ -894,6 +1048,8 @@
 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
 		struct request_queue *q)
 {
+	bool vwc = false;
+
 	if (ctrl->max_hw_sectors) {
 		u32 max_segments =
 			(ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
@@ -903,9 +1059,10 @@
 	}
 	if (ctrl->stripe_size)
 		blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
-	if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
-		blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
 	blk_queue_virt_boundary(q, ctrl->page_size - 1);
+	if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
+		vwc = true;
+	blk_queue_write_cache(q, vwc, vwc);
 }
 
 /*
@@ -1272,7 +1429,7 @@
 	if (nvme_revalidate_disk(ns->disk))
 		goto out_free_disk;
 
-	list_add_tail(&ns->list, &ctrl->namespaces);
+	list_add_tail_rcu(&ns->list, &ctrl->namespaces);
 	kref_get(&ctrl->kref);
 	if (ns->type == NVME_NS_LIGHTNVM)
 		return;
@@ -1295,6 +1452,8 @@
 
 static void nvme_ns_remove(struct nvme_ns *ns)
 {
+	lockdep_assert_held(&ns->ctrl->namespaces_mutex);
+
 	if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
 		return;
 
@@ -1307,9 +1466,8 @@
 		blk_mq_abort_requeue_list(ns->queue);
 		blk_cleanup_queue(ns->queue);
 	}
-	mutex_lock(&ns->ctrl->namespaces_mutex);
 	list_del_init(&ns->list);
-	mutex_unlock(&ns->ctrl->namespaces_mutex);
+	synchronize_rcu();
 	nvme_put_ns(ns);
 }
 
@@ -1361,7 +1519,7 @@
 	return ret;
 }
 
-static void __nvme_scan_namespaces(struct nvme_ctrl *ctrl, unsigned nn)
+static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
 {
 	struct nvme_ns *ns, *next;
 	unsigned i;
@@ -1377,11 +1535,16 @@
 	}
 }
 
-void nvme_scan_namespaces(struct nvme_ctrl *ctrl)
+static void nvme_scan_work(struct work_struct *work)
 {
+	struct nvme_ctrl *ctrl =
+		container_of(work, struct nvme_ctrl, scan_work);
 	struct nvme_id_ctrl *id;
 	unsigned nn;
 
+	if (ctrl->state != NVME_CTRL_LIVE)
+		return;
+
 	if (nvme_identify_ctrl(ctrl, &id))
 		return;
 
@@ -1392,23 +1555,86 @@
 		if (!nvme_scan_ns_list(ctrl, nn))
 			goto done;
 	}
-	__nvme_scan_namespaces(ctrl, le32_to_cpup(&id->nn));
+	nvme_scan_ns_sequential(ctrl, nn);
  done:
 	list_sort(NULL, &ctrl->namespaces, ns_cmp);
 	mutex_unlock(&ctrl->namespaces_mutex);
 	kfree(id);
+
+	if (ctrl->ops->post_scan)
+		ctrl->ops->post_scan(ctrl);
 }
-EXPORT_SYMBOL_GPL(nvme_scan_namespaces);
+
+void nvme_queue_scan(struct nvme_ctrl *ctrl)
+{
+	/*
+	 * Do not queue new scan work when a controller is reset during
+	 * removal.
+	 */
+	if (ctrl->state == NVME_CTRL_LIVE)
+		schedule_work(&ctrl->scan_work);
+}
+EXPORT_SYMBOL_GPL(nvme_queue_scan);
 
 void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
 {
 	struct nvme_ns *ns, *next;
 
+	mutex_lock(&ctrl->namespaces_mutex);
 	list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
 		nvme_ns_remove(ns);
+	mutex_unlock(&ctrl->namespaces_mutex);
 }
 EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
 
+static void nvme_async_event_work(struct work_struct *work)
+{
+	struct nvme_ctrl *ctrl =
+		container_of(work, struct nvme_ctrl, async_event_work);
+
+	spin_lock_irq(&ctrl->lock);
+	while (ctrl->event_limit > 0) {
+		int aer_idx = --ctrl->event_limit;
+
+		spin_unlock_irq(&ctrl->lock);
+		ctrl->ops->submit_async_event(ctrl, aer_idx);
+		spin_lock_irq(&ctrl->lock);
+	}
+	spin_unlock_irq(&ctrl->lock);
+}
+
+void nvme_complete_async_event(struct nvme_ctrl *ctrl,
+		struct nvme_completion *cqe)
+{
+	u16 status = le16_to_cpu(cqe->status) >> 1;
+	u32 result = le32_to_cpu(cqe->result);
+
+	if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) {
+		++ctrl->event_limit;
+		schedule_work(&ctrl->async_event_work);
+	}
+
+	if (status != NVME_SC_SUCCESS)
+		return;
+
+	switch (result & 0xff07) {
+	case NVME_AER_NOTICE_NS_CHANGED:
+		dev_info(ctrl->device, "rescanning\n");
+		nvme_queue_scan(ctrl);
+		break;
+	default:
+		dev_warn(ctrl->device, "async event result %08x\n", result);
+	}
+}
+EXPORT_SYMBOL_GPL(nvme_complete_async_event);
+
+void nvme_queue_async_events(struct nvme_ctrl *ctrl)
+{
+	ctrl->event_limit = NVME_NR_AERS;
+	schedule_work(&ctrl->async_event_work);
+}
+EXPORT_SYMBOL_GPL(nvme_queue_async_events);
+
 static DEFINE_IDA(nvme_instance_ida);
 
 static int nvme_set_instance(struct nvme_ctrl *ctrl)
@@ -1440,6 +1666,10 @@
 
 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
 {
+	flush_work(&ctrl->async_event_work);
+	flush_work(&ctrl->scan_work);
+	nvme_remove_namespaces(ctrl);
+
 	device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance));
 
 	spin_lock(&dev_list_lock);
@@ -1475,12 +1705,16 @@
 {
 	int ret;
 
+	ctrl->state = NVME_CTRL_NEW;
+	spin_lock_init(&ctrl->lock);
 	INIT_LIST_HEAD(&ctrl->namespaces);
 	mutex_init(&ctrl->namespaces_mutex);
 	kref_init(&ctrl->kref);
 	ctrl->dev = dev;
 	ctrl->ops = ops;
 	ctrl->quirks = quirks;
+	INIT_WORK(&ctrl->scan_work, nvme_scan_work);
+	INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
 
 	ret = nvme_set_instance(ctrl);
 	if (ret)
@@ -1520,8 +1754,8 @@
 {
 	struct nvme_ns *ns;
 
-	mutex_lock(&ctrl->namespaces_mutex);
-	list_for_each_entry(ns, &ctrl->namespaces, list) {
+	rcu_read_lock();
+	list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
 		if (!kref_get_unless_zero(&ns->kref))
 			continue;
 
@@ -1538,7 +1772,7 @@
 
 		nvme_put_ns(ns);
 	}
-	mutex_unlock(&ctrl->namespaces_mutex);
+	rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(nvme_kill_queues);
 
@@ -1546,8 +1780,8 @@
 {
 	struct nvme_ns *ns;
 
-	mutex_lock(&ctrl->namespaces_mutex);
-	list_for_each_entry(ns, &ctrl->namespaces, list) {
+	rcu_read_lock();
+	list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
 		spin_lock_irq(ns->queue->queue_lock);
 		queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
 		spin_unlock_irq(ns->queue->queue_lock);
@@ -1555,7 +1789,7 @@
 		blk_mq_cancel_requeue_work(ns->queue);
 		blk_mq_stop_hw_queues(ns->queue);
 	}
-	mutex_unlock(&ctrl->namespaces_mutex);
+	rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(nvme_stop_queues);
 
@@ -1563,13 +1797,13 @@
 {
 	struct nvme_ns *ns;
 
-	mutex_lock(&ctrl->namespaces_mutex);
-	list_for_each_entry(ns, &ctrl->namespaces, list) {
+	rcu_read_lock();
+	list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
 		queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
 		blk_mq_start_stopped_hw_queues(ns->queue, true);
 		blk_mq_kick_requeue_list(ns->queue);
 	}
-	mutex_unlock(&ctrl->namespaces_mutex);
+	rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(nvme_start_queues);
 
@@ -1607,9 +1841,9 @@
 
 void nvme_core_exit(void)
 {
-	unregister_blkdev(nvme_major, "nvme");
 	class_destroy(nvme_class);
 	__unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
+	unregister_blkdev(nvme_major, "nvme");
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 9461dd6..a0af055 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -367,8 +367,8 @@
 		ret = nvme_submit_sync_cmd(ns->ctrl->admin_q,
 				(struct nvme_command *)&c, entries, len);
 		if (ret) {
-			dev_err(ns->ctrl->dev, "L2P table transfer failed (%d)\n",
-									ret);
+			dev_err(ns->ctrl->device,
+				"L2P table transfer failed (%d)\n", ret);
 			ret = -EIO;
 			goto out;
 		}
@@ -387,41 +387,16 @@
 	return ret;
 }
 
-static void nvme_nvm_bb_tbl_fold(struct nvm_dev *nvmdev,
-						int nr_dst_blks, u8 *dst_blks,
-						int nr_src_blks, u8 *src_blks)
-{
-	int blk, offset, pl, blktype;
-
-	for (blk = 0; blk < nr_dst_blks; blk++) {
-		offset = blk * nvmdev->plane_mode;
-		blktype = src_blks[offset];
-
-		/* Bad blocks on any planes take precedence over other types */
-		for (pl = 0; pl < nvmdev->plane_mode; pl++) {
-			if (src_blks[offset + pl] &
-					(NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
-				blktype = src_blks[offset + pl];
-				break;
-			}
-		}
-
-		dst_blks[blk] = blktype;
-	}
-}
-
 static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
-				int nr_dst_blks, nvm_bb_update_fn *update_bbtbl,
-				void *priv)
+								u8 *blks)
 {
 	struct request_queue *q = nvmdev->q;
 	struct nvme_ns *ns = q->queuedata;
 	struct nvme_ctrl *ctrl = ns->ctrl;
 	struct nvme_nvm_command c = {};
 	struct nvme_nvm_bb_tbl *bb_tbl;
-	u8 *dst_blks = NULL;
-	int nr_src_blks = nr_dst_blks * nvmdev->plane_mode;
-	int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_src_blks;
+	int nr_blks = nvmdev->blks_per_lun * nvmdev->plane_mode;
+	int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
 	int ret = 0;
 
 	c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
@@ -432,54 +407,43 @@
 	if (!bb_tbl)
 		return -ENOMEM;
 
-	dst_blks = kzalloc(nr_dst_blks, GFP_KERNEL);
-	if (!dst_blks) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
 	ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
 								bb_tbl, tblsz);
 	if (ret) {
-		dev_err(ctrl->dev, "get bad block table failed (%d)\n", ret);
+		dev_err(ctrl->device, "get bad block table failed (%d)\n", ret);
 		ret = -EIO;
 		goto out;
 	}
 
 	if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
 		bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
-		dev_err(ctrl->dev, "bbt format mismatch\n");
+		dev_err(ctrl->device, "bbt format mismatch\n");
 		ret = -EINVAL;
 		goto out;
 	}
 
 	if (le16_to_cpu(bb_tbl->verid) != 1) {
 		ret = -EINVAL;
-		dev_err(ctrl->dev, "bbt version not supported\n");
+		dev_err(ctrl->device, "bbt version not supported\n");
 		goto out;
 	}
 
-	if (le32_to_cpu(bb_tbl->tblks) != nr_src_blks) {
+	if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
 		ret = -EINVAL;
-		dev_err(ctrl->dev, "bbt unsuspected blocks returned (%u!=%u)",
-				le32_to_cpu(bb_tbl->tblks), nr_src_blks);
+		dev_err(ctrl->device,
+				"bbt unsuspected blocks returned (%u!=%u)",
+				le32_to_cpu(bb_tbl->tblks), nr_blks);
 		goto out;
 	}
 
-	nvme_nvm_bb_tbl_fold(nvmdev, nr_dst_blks, dst_blks,
-						nr_src_blks, bb_tbl->blk);
-
-	ppa = dev_to_generic_addr(nvmdev, ppa);
-	ret = update_bbtbl(ppa, nr_dst_blks, dst_blks, priv);
-
+	memcpy(blks, bb_tbl->blk, nvmdev->blks_per_lun * nvmdev->plane_mode);
 out:
-	kfree(dst_blks);
 	kfree(bb_tbl);
 	return ret;
 }
 
-static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd,
-								int type)
+static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
+							int nr_ppas, int type)
 {
 	struct nvme_ns *ns = nvmdev->q->queuedata;
 	struct nvme_nvm_command c = {};
@@ -487,14 +451,15 @@
 
 	c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
 	c.set_bb.nsid = cpu_to_le32(ns->ns_id);
-	c.set_bb.spba = cpu_to_le64(rqd->ppa_addr.ppa);
-	c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1);
+	c.set_bb.spba = cpu_to_le64(ppas->ppa);
+	c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
 	c.set_bb.value = type;
 
 	ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
 								NULL, 0);
 	if (ret)
-		dev_err(ns->ctrl->dev, "set bad block table failed (%d)\n", ret);
+		dev_err(ns->ctrl->device, "set bad block table failed (%d)\n",
+									ret);
 	return ret;
 }
 
@@ -504,8 +469,9 @@
 	c->ph_rw.opcode = rqd->opcode;
 	c->ph_rw.nsid = cpu_to_le32(ns->ns_id);
 	c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
+	c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
 	c->ph_rw.control = cpu_to_le16(rqd->flags);
-	c->ph_rw.length = cpu_to_le16(rqd->nr_pages - 1);
+	c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
 
 	if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD)
 		c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns,
@@ -576,7 +542,7 @@
 	c.erase.opcode = NVM_OP_ERASE;
 	c.erase.nsid = cpu_to_le32(ns->ns_id);
 	c.erase.spba = cpu_to_le64(rqd->ppa_addr.ppa);
-	c.erase.length = cpu_to_le16(rqd->nr_pages - 1);
+	c.erase.length = cpu_to_le16(rqd->nr_ppas - 1);
 
 	return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
 }
@@ -601,10 +567,10 @@
 	return dma_pool_alloc(pool, mem_flags, dma_handler);
 }
 
-static void nvme_nvm_dev_dma_free(void *pool, void *ppa_list,
+static void nvme_nvm_dev_dma_free(void *pool, void *addr,
 							dma_addr_t dma_handler)
 {
-	dma_pool_free(pool, ppa_list, dma_handler);
+	dma_pool_free(pool, addr, dma_handler);
 }
 
 static struct nvm_dev_ops nvme_nvm_dev_ops = {
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index f846da4..114b928 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -67,7 +67,16 @@
 	NVME_QUIRK_DISCARD_ZEROES		= (1 << 2),
 };
 
+enum nvme_ctrl_state {
+	NVME_CTRL_NEW,
+	NVME_CTRL_LIVE,
+	NVME_CTRL_RESETTING,
+	NVME_CTRL_DELETING,
+};
+
 struct nvme_ctrl {
+	enum nvme_ctrl_state state;
+	spinlock_t lock;
 	const struct nvme_ctrl_ops *ops;
 	struct request_queue *admin_q;
 	struct device *dev;
@@ -84,7 +93,7 @@
 	char serial[20];
 	char model[40];
 	char firmware_rev[8];
-	int cntlid;
+	u16 cntlid;
 
 	u32 ctrl_config;
 
@@ -99,6 +108,8 @@
 	u32 vs;
 	bool subsystem;
 	unsigned long quirks;
+	struct work_struct scan_work;
+	struct work_struct async_event_work;
 };
 
 /*
@@ -136,9 +147,10 @@
 	int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
 	int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
 	int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
-	bool (*io_incapable)(struct nvme_ctrl *ctrl);
 	int (*reset_ctrl)(struct nvme_ctrl *ctrl);
 	void (*free_ctrl)(struct nvme_ctrl *ctrl);
+	void (*post_scan)(struct nvme_ctrl *ctrl);
+	void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx);
 };
 
 static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
@@ -150,17 +162,6 @@
 	return val & NVME_CSTS_RDY;
 }
 
-static inline bool nvme_io_incapable(struct nvme_ctrl *ctrl)
-{
-	u32 val = 0;
-
-	if (ctrl->ops->io_incapable(ctrl))
-		return true;
-	if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
-		return true;
-	return val & NVME_CSTS_CFS;
-}
-
 static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
 {
 	if (!ctrl->subsystem)
@@ -173,57 +174,20 @@
 	return (sector >> (ns->lba_shift - 9));
 }
 
-static inline void nvme_setup_flush(struct nvme_ns *ns,
-		struct nvme_command *cmnd)
+static inline unsigned nvme_map_len(struct request *rq)
 {
-	memset(cmnd, 0, sizeof(*cmnd));
-	cmnd->common.opcode = nvme_cmd_flush;
-	cmnd->common.nsid = cpu_to_le32(ns->ns_id);
+	if (rq->cmd_flags & REQ_DISCARD)
+		return sizeof(struct nvme_dsm_range);
+	else
+		return blk_rq_bytes(rq);
 }
 
-static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
-		struct nvme_command *cmnd)
+static inline void nvme_cleanup_cmd(struct request *req)
 {
-	u16 control = 0;
-	u32 dsmgmt = 0;
-
-	if (req->cmd_flags & REQ_FUA)
-		control |= NVME_RW_FUA;
-	if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
-		control |= NVME_RW_LR;
-
-	if (req->cmd_flags & REQ_RAHEAD)
-		dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
-
-	memset(cmnd, 0, sizeof(*cmnd));
-	cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
-	cmnd->rw.command_id = req->tag;
-	cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
-	cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
-	cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
-
-	if (ns->ms) {
-		switch (ns->pi_type) {
-		case NVME_NS_DPS_PI_TYPE3:
-			control |= NVME_RW_PRINFO_PRCHK_GUARD;
-			break;
-		case NVME_NS_DPS_PI_TYPE1:
-		case NVME_NS_DPS_PI_TYPE2:
-			control |= NVME_RW_PRINFO_PRCHK_GUARD |
-					NVME_RW_PRINFO_PRCHK_REF;
-			cmnd->rw.reftag = cpu_to_le32(
-					nvme_block_nr(ns, blk_rq_pos(req)));
-			break;
-		}
-		if (!blk_integrity_rq(req))
-			control |= NVME_RW_PRINFO_PRACT;
-	}
-
-	cmnd->rw.control = cpu_to_le16(control);
-	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+	if (req->cmd_flags & REQ_DISCARD)
+		kfree(req->completion_data);
 }
 
-
 static inline int nvme_error_status(u16 status)
 {
 	switch (status & 0x7ff) {
@@ -242,6 +206,8 @@
 		(jiffies - req->start_time) < req->timeout;
 }
 
+bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
+		enum nvme_ctrl_state new_state);
 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
 int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
@@ -251,9 +217,14 @@
 void nvme_put_ctrl(struct nvme_ctrl *ctrl);
 int nvme_init_identify(struct nvme_ctrl *ctrl);
 
-void nvme_scan_namespaces(struct nvme_ctrl *ctrl);
+void nvme_queue_scan(struct nvme_ctrl *ctrl);
 void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
 
+#define NVME_NR_AERS	1
+void nvme_complete_async_event(struct nvme_ctrl *ctrl,
+		struct nvme_completion *cqe);
+void nvme_queue_async_events(struct nvme_ctrl *ctrl);
+
 void nvme_stop_queues(struct nvme_ctrl *ctrl);
 void nvme_start_queues(struct nvme_ctrl *ctrl);
 void nvme_kill_queues(struct nvme_ctrl *ctrl);
@@ -261,6 +232,8 @@
 struct request *nvme_alloc_request(struct request_queue *q,
 		struct nvme_command *cmd, unsigned int flags);
 void nvme_requeue_req(struct request *req);
+int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
+		struct nvme_command *cmd);
 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
 		void *buf, unsigned bufflen);
 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 24ccda3..0f093f1 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -54,8 +54,7 @@
  * We handle AEN commands ourselves and don't even let the
  * block layer know about them.
  */
-#define NVME_NR_AEN_COMMANDS	1
-#define NVME_AQ_BLKMQ_DEPTH	(NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
+#define NVME_AQ_BLKMQ_DEPTH	(NVME_AQ_DEPTH - NVME_NR_AERS)
 
 static int use_threaded_interrupts;
 module_param(use_threaded_interrupts, int, 0);
@@ -92,9 +91,7 @@
 	struct msix_entry *entry;
 	void __iomem *bar;
 	struct work_struct reset_work;
-	struct work_struct scan_work;
 	struct work_struct remove_work;
-	struct work_struct async_work;
 	struct timer_list watchdog_timer;
 	struct mutex shutdown_lock;
 	bool subsystem;
@@ -102,11 +99,6 @@
 	dma_addr_t cmb_dma_addr;
 	u64 cmb_size;
 	u32 cmbsz;
-	unsigned long flags;
-
-#define NVME_CTRL_RESETTING    0
-#define NVME_CTRL_REMOVING     1
-
 	struct nvme_ctrl ctrl;
 	struct completion ioq_wait;
 };
@@ -271,40 +263,6 @@
 	return 0;
 }
 
-static void nvme_queue_scan(struct nvme_dev *dev)
-{
-	/*
-	 * Do not queue new scan work when a controller is reset during
-	 * removal.
-	 */
-	if (test_bit(NVME_CTRL_REMOVING, &dev->flags))
-		return;
-	queue_work(nvme_workq, &dev->scan_work);
-}
-
-static void nvme_complete_async_event(struct nvme_dev *dev,
-		struct nvme_completion *cqe)
-{
-	u16 status = le16_to_cpu(cqe->status) >> 1;
-	u32 result = le32_to_cpu(cqe->result);
-
-	if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) {
-		++dev->ctrl.event_limit;
-		queue_work(nvme_workq, &dev->async_work);
-	}
-
-	if (status != NVME_SC_SUCCESS)
-		return;
-
-	switch (result & 0xff07) {
-	case NVME_AER_NOTICE_NS_CHANGED:
-		dev_info(dev->ctrl.device, "rescanning\n");
-		nvme_queue_scan(dev);
-	default:
-		dev_warn(dev->ctrl.device, "async event result %08x\n", result);
-	}
-}
-
 /**
  * __nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
  * @nvmeq: The queue to use
@@ -334,16 +292,11 @@
 	return (__le64 **)(iod->sg + req->nr_phys_segments);
 }
 
-static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
+static int nvme_init_iod(struct request *rq, unsigned size,
+		struct nvme_dev *dev)
 {
 	struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
 	int nseg = rq->nr_phys_segments;
-	unsigned size;
-
-	if (rq->cmd_flags & REQ_DISCARD)
-		size = sizeof(struct nvme_dsm_range);
-	else
-		size = blk_rq_bytes(rq);
 
 	if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
 		iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
@@ -368,6 +321,8 @@
 	__le64 **list = iod_list(req);
 	dma_addr_t prp_dma = iod->first_dma;
 
+	nvme_cleanup_cmd(req);
+
 	if (iod->npages == 0)
 		dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
 	for (i = 0; i < iod->npages; i++) {
@@ -529,7 +484,7 @@
 }
 
 static int nvme_map_data(struct nvme_dev *dev, struct request *req,
-		struct nvme_command *cmnd)
+		unsigned size, struct nvme_command *cmnd)
 {
 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
 	struct request_queue *q = req->q;
@@ -546,7 +501,7 @@
 	if (!dma_map_sg(dev->dev, iod->sg, iod->nents, dma_dir))
 		goto out;
 
-	if (!nvme_setup_prps(dev, req, blk_rq_bytes(req)))
+	if (!nvme_setup_prps(dev, req, size))
 		goto out_unmap;
 
 	ret = BLK_MQ_RQ_QUEUE_ERROR;
@@ -596,37 +551,6 @@
 }
 
 /*
- * We reuse the small pool to allocate the 16-byte range here as it is not
- * worth having a special pool for these or additional cases to handle freeing
- * the iod.
- */
-static int nvme_setup_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
-		struct request *req, struct nvme_command *cmnd)
-{
-	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
-	struct nvme_dsm_range *range;
-
-	range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC,
-						&iod->first_dma);
-	if (!range)
-		return BLK_MQ_RQ_QUEUE_BUSY;
-	iod_list(req)[0] = (__le64 *)range;
-	iod->npages = 0;
-
-	range->cattr = cpu_to_le32(0);
-	range->nlb = cpu_to_le32(blk_rq_bytes(req) >> ns->lba_shift);
-	range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
-
-	memset(cmnd, 0, sizeof(*cmnd));
-	cmnd->dsm.opcode = nvme_cmd_dsm;
-	cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
-	cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma);
-	cmnd->dsm.nr = 0;
-	cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
-	return BLK_MQ_RQ_QUEUE_OK;
-}
-
-/*
  * NOTE: ns is NULL when called on the admin queue.
  */
 static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -637,6 +561,7 @@
 	struct nvme_dev *dev = nvmeq->dev;
 	struct request *req = bd->rq;
 	struct nvme_command cmnd;
+	unsigned map_len;
 	int ret = BLK_MQ_RQ_QUEUE_OK;
 
 	/*
@@ -652,23 +577,17 @@
 		}
 	}
 
-	ret = nvme_init_iod(req, dev);
+	map_len = nvme_map_len(req);
+	ret = nvme_init_iod(req, map_len, dev);
 	if (ret)
 		return ret;
 
-	if (req->cmd_flags & REQ_DISCARD) {
-		ret = nvme_setup_discard(nvmeq, ns, req, &cmnd);
-	} else {
-		if (req->cmd_type == REQ_TYPE_DRV_PRIV)
-			memcpy(&cmnd, req->cmd, sizeof(cmnd));
-		else if (req->cmd_flags & REQ_FLUSH)
-			nvme_setup_flush(ns, &cmnd);
-		else
-			nvme_setup_rw(ns, req, &cmnd);
+	ret = nvme_setup_cmd(ns, req, &cmnd);
+	if (ret)
+		goto out;
 
-		if (req->nr_phys_segments)
-			ret = nvme_map_data(dev, req, &cmnd);
-	}
+	if (req->nr_phys_segments)
+		ret = nvme_map_data(dev, req, map_len, &cmnd);
 
 	if (ret)
 		goto out;
@@ -764,7 +683,7 @@
 		 */
 		if (unlikely(nvmeq->qid == 0 &&
 				cqe.command_id >= NVME_AQ_BLKMQ_DEPTH)) {
-			nvme_complete_async_event(nvmeq->dev, &cqe);
+			nvme_complete_async_event(&nvmeq->dev->ctrl, &cqe);
 			continue;
 		}
 
@@ -833,21 +752,18 @@
 	return 0;
 }
 
-static void nvme_async_event_work(struct work_struct *work)
+static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx)
 {
-	struct nvme_dev *dev = container_of(work, struct nvme_dev, async_work);
+	struct nvme_dev *dev = to_nvme_dev(ctrl);
 	struct nvme_queue *nvmeq = dev->queues[0];
 	struct nvme_command c;
 
 	memset(&c, 0, sizeof(c));
 	c.common.opcode = nvme_admin_async_event;
+	c.common.command_id = NVME_AQ_BLKMQ_DEPTH + aer_idx;
 
 	spin_lock_irq(&nvmeq->q_lock);
-	while (dev->ctrl.event_limit > 0) {
-		c.common.command_id = NVME_AQ_BLKMQ_DEPTH +
-			--dev->ctrl.event_limit;
-		__nvme_submit_cmd(nvmeq, &c);
-	}
+	__nvme_submit_cmd(nvmeq, &c);
 	spin_unlock_irq(&nvmeq->q_lock);
 }
 
@@ -939,7 +855,7 @@
 	 * cancellation error. All outstanding requests are completed on
 	 * shutdown, so we return BLK_EH_HANDLED.
 	 */
-	if (test_bit(NVME_CTRL_RESETTING, &dev->flags)) {
+	if (dev->ctrl.state == NVME_CTRL_RESETTING) {
 		dev_warn(dev->ctrl.device,
 			 "I/O %d QID %d timeout, disable controller\n",
 			 req->tag, nvmeq->qid);
@@ -1003,16 +919,15 @@
 	return BLK_EH_RESET_TIMER;
 }
 
-static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved)
+static void nvme_cancel_io(struct request *req, void *data, bool reserved)
 {
-	struct nvme_queue *nvmeq = data;
 	int status;
 
 	if (!blk_mq_request_started(req))
 		return;
 
-	dev_dbg_ratelimited(nvmeq->dev->ctrl.device,
-		 "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid);
+	dev_dbg_ratelimited(((struct nvme_dev *) data)->ctrl.device,
+				"Cancelling I/O %d", req->tag);
 
 	status = NVME_SC_ABORT_REQ;
 	if (blk_queue_dying(req->q))
@@ -1069,14 +984,6 @@
 	return 0;
 }
 
-static void nvme_clear_queue(struct nvme_queue *nvmeq)
-{
-	spin_lock_irq(&nvmeq->q_lock);
-	if (nvmeq->tags && *nvmeq->tags)
-		blk_mq_all_tag_busy_iter(*nvmeq->tags, nvme_cancel_queue_ios, nvmeq);
-	spin_unlock_irq(&nvmeq->q_lock);
-}
-
 static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
 {
 	struct nvme_queue *nvmeq = dev->queues[0];
@@ -1350,22 +1257,44 @@
 	return result;
 }
 
+static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
+{
+
+	/* If true, indicates loss of adapter communication, possibly by a
+	 * NVMe Subsystem reset.
+	 */
+	bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
+
+	/* If there is a reset ongoing, we shouldn't reset again. */
+	if (work_busy(&dev->reset_work))
+		return false;
+
+	/* We shouldn't reset unless the controller is on fatal error state
+	 * _or_ if we lost the communication with it.
+	 */
+	if (!(csts & NVME_CSTS_CFS) && !nssro)
+		return false;
+
+	/* If PCI error recovery process is happening, we cannot reset or
+	 * the recovery mechanism will surely fail.
+	 */
+	if (pci_channel_offline(to_pci_dev(dev->dev)))
+		return false;
+
+	return true;
+}
+
 static void nvme_watchdog_timer(unsigned long data)
 {
 	struct nvme_dev *dev = (struct nvme_dev *)data;
 	u32 csts = readl(dev->bar + NVME_REG_CSTS);
 
-	/*
-	 * Skip controllers currently under reset.
-	 */
-	if (!work_pending(&dev->reset_work) && !work_busy(&dev->reset_work) &&
-	    ((csts & NVME_CSTS_CFS) ||
-	     (dev->subsystem && (csts & NVME_CSTS_NSSRO)))) {
-		if (queue_work(nvme_workq, &dev->reset_work)) {
+	/* Skip controllers under certain specific conditions. */
+	if (nvme_should_reset(dev, csts)) {
+		if (queue_work(nvme_workq, &dev->reset_work))
 			dev_warn(dev->dev,
 				"Failed status: 0x%x, reset controller.\n",
 				csts);
-		}
 		return;
 	}
 
@@ -1478,8 +1407,7 @@
 	if (result > 0) {
 		dev_err(dev->ctrl.device,
 			"Could not set queue count (%d)\n", result);
-		nr_io_queues = 0;
-		result = 0;
+		return 0;
 	}
 
 	if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
@@ -1513,7 +1441,9 @@
 	 * If we enable msix early due to not intx, disable it again before
 	 * setting up the full range we need.
 	 */
-	if (!pdev->irq)
+	if (pdev->msi_enabled)
+		pci_disable_msi(pdev);
+	else if (pdev->msix_enabled)
 		pci_disable_msix(pdev);
 
 	for (i = 0; i < nr_io_queues; i++)
@@ -1550,8 +1480,9 @@
 	return result;
 }
 
-static void nvme_set_irq_hints(struct nvme_dev *dev)
+static void nvme_pci_post_scan(struct nvme_ctrl *ctrl)
 {
+	struct nvme_dev *dev = to_nvme_dev(ctrl);
 	struct nvme_queue *nvmeq;
 	int i;
 
@@ -1566,16 +1497,6 @@
 	}
 }
 
-static void nvme_dev_scan(struct work_struct *work)
-{
-	struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work);
-
-	if (!dev->tagset.tags)
-		return;
-	nvme_scan_namespaces(&dev->ctrl);
-	nvme_set_irq_hints(dev);
-}
-
 static void nvme_del_queue_end(struct request *req, int error)
 {
 	struct nvme_queue *nvmeq = req->end_io_data;
@@ -1591,7 +1512,13 @@
 	if (!error) {
 		unsigned long flags;
 
-		spin_lock_irqsave(&nvmeq->q_lock, flags);
+		/*
+		 * We might be called with the AQ q_lock held
+		 * and the I/O queue q_lock should always
+		 * nest inside the AQ one.
+		 */
+		spin_lock_irqsave_nested(&nvmeq->q_lock, flags,
+					SINGLE_DEPTH_NESTING);
 		nvme_process_cq(nvmeq);
 		spin_unlock_irqrestore(&nvmeq->q_lock, flags);
 	}
@@ -1683,7 +1610,6 @@
 		nvme_free_queues(dev, dev->online_queues);
 	}
 
-	nvme_queue_scan(dev);
 	return 0;
 }
 
@@ -1696,7 +1622,6 @@
 	if (pci_enable_device_mem(pdev))
 		return result;
 
-	dev->entry[0].vector = pdev->irq;
 	pci_set_master(pdev);
 
 	if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
@@ -1709,13 +1634,18 @@
 	}
 
 	/*
-	 * Some devices don't advertse INTx interrupts, pre-enable a single
-	 * MSIX vec for setup. We'll adjust this later.
+	 * Some devices and/or platforms don't advertise or work with INTx
+	 * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
+	 * adjust this later.
 	 */
-	if (!pdev->irq) {
-		result = pci_enable_msix(pdev, dev->entry, 1);
-		if (result < 0)
-			goto disable;
+	if (pci_enable_msix(pdev, dev->entry, 1)) {
+		pci_enable_msi(pdev);
+		dev->entry[0].vector = pdev->irq;
+	}
+
+	if (!dev->entry[0].vector) {
+		result = -ENODEV;
+		goto disable;
 	}
 
 	cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
@@ -1792,8 +1722,8 @@
 	}
 	nvme_pci_disable(dev);
 
-	for (i = dev->queue_count - 1; i >= 0; i--)
-		nvme_clear_queue(dev->queues[i]);
+	blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_io, dev);
+	blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_io, dev);
 	mutex_unlock(&dev->shutdown_lock);
 }
 
@@ -1849,7 +1779,7 @@
 	struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
 	int result = -ENODEV;
 
-	if (WARN_ON(test_bit(NVME_CTRL_RESETTING, &dev->flags)))
+	if (WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING))
 		goto out;
 
 	/*
@@ -1859,7 +1789,8 @@
 	if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
 		nvme_dev_disable(dev, false);
 
-	set_bit(NVME_CTRL_RESETTING, &dev->flags);
+	if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
+		goto out;
 
 	result = nvme_pci_enable(dev);
 	if (result)
@@ -1882,8 +1813,14 @@
 	if (result)
 		goto out;
 
-	dev->ctrl.event_limit = NVME_NR_AEN_COMMANDS;
-	queue_work(nvme_workq, &dev->async_work);
+	/*
+	 * A controller that can not execute IO typically requires user
+	 * intervention to correct. For such degraded controllers, the driver
+	 * should not submit commands the user did not request, so skip
+	 * registering for asynchronous event notification on this condition.
+	 */
+	if (dev->online_queues > 1)
+		nvme_queue_async_events(&dev->ctrl);
 
 	mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ));
 
@@ -1893,13 +1830,20 @@
 	 */
 	if (dev->online_queues < 2) {
 		dev_warn(dev->ctrl.device, "IO queues not created\n");
+		nvme_kill_queues(&dev->ctrl);
 		nvme_remove_namespaces(&dev->ctrl);
 	} else {
 		nvme_start_queues(&dev->ctrl);
 		nvme_dev_add(dev);
 	}
 
-	clear_bit(NVME_CTRL_RESETTING, &dev->flags);
+	if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
+		dev_warn(dev->ctrl.device, "failed to mark controller live\n");
+		goto out;
+	}
+
+	if (dev->online_queues > 1)
+		nvme_queue_scan(&dev->ctrl);
 	return;
 
  out:
@@ -1947,13 +1891,6 @@
 	return 0;
 }
 
-static bool nvme_pci_io_incapable(struct nvme_ctrl *ctrl)
-{
-	struct nvme_dev *dev = to_nvme_dev(ctrl);
-
-	return !dev->bar || dev->online_queues < 2;
-}
-
 static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
 {
 	return nvme_reset(to_nvme_dev(ctrl));
@@ -1964,9 +1901,10 @@
 	.reg_read32		= nvme_pci_reg_read32,
 	.reg_write32		= nvme_pci_reg_write32,
 	.reg_read64		= nvme_pci_reg_read64,
-	.io_incapable		= nvme_pci_io_incapable,
 	.reset_ctrl		= nvme_pci_reset_ctrl,
 	.free_ctrl		= nvme_pci_free_ctrl,
+	.post_scan		= nvme_pci_post_scan,
+	.submit_async_event	= nvme_pci_submit_async_event,
 };
 
 static int nvme_dev_map(struct nvme_dev *dev)
@@ -2018,10 +1956,8 @@
 	if (result)
 		goto free;
 
-	INIT_WORK(&dev->scan_work, nvme_dev_scan);
 	INIT_WORK(&dev->reset_work, nvme_reset_work);
 	INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
-	INIT_WORK(&dev->async_work, nvme_async_event_work);
 	setup_timer(&dev->watchdog_timer, nvme_watchdog_timer,
 		(unsigned long)dev);
 	mutex_init(&dev->shutdown_lock);
@@ -2078,16 +2014,12 @@
 {
 	struct nvme_dev *dev = pci_get_drvdata(pdev);
 
-	del_timer_sync(&dev->watchdog_timer);
+	nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
 
-	set_bit(NVME_CTRL_REMOVING, &dev->flags);
 	pci_set_drvdata(pdev, NULL);
-	flush_work(&dev->async_work);
-	flush_work(&dev->scan_work);
-	nvme_remove_namespaces(&dev->ctrl);
+	flush_work(&dev->reset_work);
 	nvme_uninit_ctrl(&dev->ctrl);
 	nvme_dev_disable(dev, true);
-	flush_work(&dev->reset_work);
 	nvme_dev_remove_admin(dev);
 	nvme_free_queues(dev, 0);
 	nvme_release_cmb(dev);
diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
index 8ba19bb..2bb3c57 100644
--- a/drivers/nvmem/mxs-ocotp.c
+++ b/drivers/nvmem/mxs-ocotp.c
@@ -94,7 +94,7 @@
 	if (ret)
 		goto close_banks;
 
-	while (val_size) {
+	while (val_size >= reg_size) {
 		if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) {
 			/* fill up non-data register */
 			*buf = 0;
@@ -103,7 +103,7 @@
 		}
 
 		buf++;
-		val_size--;
+		val_size -= reg_size;
 		offset += reg_size;
 	}
 
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index e2a4841..b3bec3a 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -112,4 +112,7 @@
 	  While this option is selected automatically when needed, you can
 	  enable it manually to improve device tree unit test coverage.
 
+config OF_NUMA
+	bool
+
 endif # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 156c072..bee3fa9 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -14,5 +14,6 @@
 obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
 obj-$(CONFIG_OF_RESOLVE)  += resolver.o
 obj-$(CONFIG_OF_OVERLAY) += overlay.o
+obj-$(CONFIG_OF_NUMA) += of_numa.o
 
 obj-$(CONFIG_OF_UNITTEST) += unittest-data/
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 91a469d..0a553c0 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -4,6 +4,7 @@
 #include <linux/ioport.h>
 #include <linux/module.h>
 #include <linux/of_address.h>
+#include <linux/pci.h>
 #include <linux/pci_regs.h>
 #include <linux/sizes.h>
 #include <linux/slab.h>
@@ -673,121 +674,6 @@
 }
 EXPORT_SYMBOL(of_get_address);
 
-#ifdef PCI_IOBASE
-struct io_range {
-	struct list_head list;
-	phys_addr_t start;
-	resource_size_t size;
-};
-
-static LIST_HEAD(io_range_list);
-static DEFINE_SPINLOCK(io_range_lock);
-#endif
-
-/*
- * Record the PCI IO range (expressed as CPU physical address + size).
- * Return a negative value if an error has occured, zero otherwise
- */
-int __weak pci_register_io_range(phys_addr_t addr, resource_size_t size)
-{
-	int err = 0;
-
-#ifdef PCI_IOBASE
-	struct io_range *range;
-	resource_size_t allocated_size = 0;
-
-	/* check if the range hasn't been previously recorded */
-	spin_lock(&io_range_lock);
-	list_for_each_entry(range, &io_range_list, list) {
-		if (addr >= range->start && addr + size <= range->start + size) {
-			/* range already registered, bail out */
-			goto end_register;
-		}
-		allocated_size += range->size;
-	}
-
-	/* range not registed yet, check for available space */
-	if (allocated_size + size - 1 > IO_SPACE_LIMIT) {
-		/* if it's too big check if 64K space can be reserved */
-		if (allocated_size + SZ_64K - 1 > IO_SPACE_LIMIT) {
-			err = -E2BIG;
-			goto end_register;
-		}
-
-		size = SZ_64K;
-		pr_warn("Requested IO range too big, new size set to 64K\n");
-	}
-
-	/* add the range to the list */
-	range = kzalloc(sizeof(*range), GFP_ATOMIC);
-	if (!range) {
-		err = -ENOMEM;
-		goto end_register;
-	}
-
-	range->start = addr;
-	range->size = size;
-
-	list_add_tail(&range->list, &io_range_list);
-
-end_register:
-	spin_unlock(&io_range_lock);
-#endif
-
-	return err;
-}
-
-phys_addr_t pci_pio_to_address(unsigned long pio)
-{
-	phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
-
-#ifdef PCI_IOBASE
-	struct io_range *range;
-	resource_size_t allocated_size = 0;
-
-	if (pio > IO_SPACE_LIMIT)
-		return address;
-
-	spin_lock(&io_range_lock);
-	list_for_each_entry(range, &io_range_list, list) {
-		if (pio >= allocated_size && pio < allocated_size + range->size) {
-			address = range->start + pio - allocated_size;
-			break;
-		}
-		allocated_size += range->size;
-	}
-	spin_unlock(&io_range_lock);
-#endif
-
-	return address;
-}
-
-unsigned long __weak pci_address_to_pio(phys_addr_t address)
-{
-#ifdef PCI_IOBASE
-	struct io_range *res;
-	resource_size_t offset = 0;
-	unsigned long addr = -1;
-
-	spin_lock(&io_range_lock);
-	list_for_each_entry(res, &io_range_list, list) {
-		if (address >= res->start && address < res->start + res->size) {
-			addr = address - res->start + offset;
-			break;
-		}
-		offset += res->size;
-	}
-	spin_unlock(&io_range_lock);
-
-	return addr;
-#else
-	if (address > IO_SPACE_LIMIT)
-		return (unsigned long)-1;
-
-	return (unsigned long) address;
-#endif
-}
-
 static int __of_address_to_resource(struct device_node *dev,
 		const __be32 *addrp, u64 size, unsigned int flags,
 		const char *name, struct resource *r)
diff --git a/drivers/of/device.c b/drivers/of/device.c
index e5f47ce..fd5cfad 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -88,7 +88,7 @@
 	int ret;
 	bool coherent;
 	unsigned long offset;
-	struct iommu_ops *iommu;
+	const struct iommu_ops *iommu;
 
 	/*
 	 * Set default coherent_dma_mask to 32 bit.  Drivers are expected to
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 8453f08..e051e1b 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -41,8 +41,8 @@
 	return -EINVAL;
 }
 
-static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *child,
-				   u32 addr)
+static void of_mdiobus_register_phy(struct mii_bus *mdio,
+				    struct device_node *child, u32 addr)
 {
 	struct phy_device *phy;
 	bool is_c45;
@@ -56,8 +56,8 @@
 		phy = phy_device_create(mdio, addr, phy_id, 0, NULL);
 	else
 		phy = get_phy_device(mdio, addr, is_c45);
-	if (IS_ERR_OR_NULL(phy))
-		return 1;
+	if (IS_ERR(phy))
+		return;
 
 	rc = irq_of_parse_and_map(child, 0);
 	if (rc > 0) {
@@ -81,25 +81,22 @@
 	if (rc) {
 		phy_device_free(phy);
 		of_node_put(child);
-		return 1;
+		return;
 	}
 
 	dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
 		child->name, addr);
-
-	return 0;
 }
 
-static int of_mdiobus_register_device(struct mii_bus *mdio,
-				      struct device_node *child,
-				      u32 addr)
+static void of_mdiobus_register_device(struct mii_bus *mdio,
+				       struct device_node *child, u32 addr)
 {
 	struct mdio_device *mdiodev;
 	int rc;
 
 	mdiodev = mdio_device_create(mdio, addr);
 	if (IS_ERR(mdiodev))
-		return 1;
+		return;
 
 	/* Associate the OF node with the device structure so it
 	 * can be looked up later.
@@ -112,13 +109,11 @@
 	if (rc) {
 		mdio_device_free(mdiodev);
 		of_node_put(child);
-		return 1;
+		return;
 	}
 
 	dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n",
 		child->name, addr);
-
-	return 0;
 }
 
 int of_mdio_parse_addr(struct device *dev, const struct device_node *np)
@@ -214,6 +209,10 @@
 	bool scanphys = false;
 	int addr, rc;
 
+	/* Do not continue if the node is disabled */
+	if (!of_device_is_available(np))
+		return -ENODEV;
+
 	/* Mask out all PHYs from auto probing.  Instead the PHYs listed in
 	 * the device tree are populated after the bus has been registered */
 	mdio->phy_mask = ~0;
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
new file mode 100644
index 0000000..0f2784b
--- /dev/null
+++ b/drivers/of/of_numa.c
@@ -0,0 +1,211 @@
+/*
+ * OF NUMA Parsing support.
+ *
+ * Copyright (C) 2015 - 2016 Cavium Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/nodemask.h>
+
+#include <asm/numa.h>
+
+/* define default numa node to 0 */
+#define DEFAULT_NODE 0
+
+/*
+ * Even though we connect cpus to numa domains later in SMP
+ * init, we need to know the node ids now for all cpus.
+*/
+static void __init of_numa_parse_cpu_nodes(void)
+{
+	u32 nid;
+	int r;
+	struct device_node *cpus;
+	struct device_node *np = NULL;
+
+	cpus = of_find_node_by_path("/cpus");
+	if (!cpus)
+		return;
+
+	for_each_child_of_node(cpus, np) {
+		/* Skip things that are not CPUs */
+		if (of_node_cmp(np->type, "cpu") != 0)
+			continue;
+
+		r = of_property_read_u32(np, "numa-node-id", &nid);
+		if (r)
+			continue;
+
+		pr_debug("NUMA: CPU on %u\n", nid);
+		if (nid >= MAX_NUMNODES)
+			pr_warn("NUMA: Node id %u exceeds maximum value\n",
+				nid);
+		else
+			node_set(nid, numa_nodes_parsed);
+	}
+}
+
+static int __init of_numa_parse_memory_nodes(void)
+{
+	struct device_node *np = NULL;
+	struct resource rsrc;
+	u32 nid;
+	int r = 0;
+
+	for (;;) {
+		np = of_find_node_by_type(np, "memory");
+		if (!np)
+			break;
+
+		r = of_property_read_u32(np, "numa-node-id", &nid);
+		if (r == -EINVAL)
+			/*
+			 * property doesn't exist if -EINVAL, continue
+			 * looking for more memory nodes with
+			 * "numa-node-id" property
+			 */
+			continue;
+		else if (r)
+			/* some other error */
+			break;
+
+		r = of_address_to_resource(np, 0, &rsrc);
+		if (r) {
+			pr_err("NUMA: bad reg property in memory node\n");
+			break;
+		}
+
+		pr_debug("NUMA:  base = %llx len = %llx, node = %u\n",
+			 rsrc.start, rsrc.end - rsrc.start + 1, nid);
+
+		r = numa_add_memblk(nid, rsrc.start,
+				    rsrc.end - rsrc.start + 1);
+		if (r)
+			break;
+	}
+	of_node_put(np);
+
+	return r;
+}
+
+static int __init of_numa_parse_distance_map_v1(struct device_node *map)
+{
+	const __be32 *matrix;
+	int entry_count;
+	int i;
+
+	pr_info("NUMA: parsing numa-distance-map-v1\n");
+
+	matrix = of_get_property(map, "distance-matrix", NULL);
+	if (!matrix) {
+		pr_err("NUMA: No distance-matrix property in distance-map\n");
+		return -EINVAL;
+	}
+
+	entry_count = of_property_count_u32_elems(map, "distance-matrix");
+	if (entry_count <= 0) {
+		pr_err("NUMA: Invalid distance-matrix\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i + 2 < entry_count; i += 3) {
+		u32 nodea, nodeb, distance;
+
+		nodea = of_read_number(matrix, 1);
+		matrix++;
+		nodeb = of_read_number(matrix, 1);
+		matrix++;
+		distance = of_read_number(matrix, 1);
+		matrix++;
+
+		numa_set_distance(nodea, nodeb, distance);
+		pr_debug("NUMA:  distance[node%d -> node%d] = %d\n",
+			 nodea, nodeb, distance);
+
+		/* Set default distance of node B->A same as A->B */
+		if (nodeb > nodea)
+			numa_set_distance(nodeb, nodea, distance);
+	}
+
+	return 0;
+}
+
+static int __init of_numa_parse_distance_map(void)
+{
+	int ret = 0;
+	struct device_node *np;
+
+	np = of_find_compatible_node(NULL, NULL,
+				     "numa-distance-map-v1");
+	if (np)
+		ret = of_numa_parse_distance_map_v1(np);
+
+	of_node_put(np);
+	return ret;
+}
+
+int of_node_to_nid(struct device_node *device)
+{
+	struct device_node *np;
+	u32 nid;
+	int r = -ENODATA;
+
+	np = of_node_get(device);
+
+	while (np) {
+		struct device_node *parent;
+
+		r = of_property_read_u32(np, "numa-node-id", &nid);
+		/*
+		 * -EINVAL indicates the property was not found, and
+		 *  we walk up the tree trying to find a parent with a
+		 *  "numa-node-id".  Any other type of error indicates
+		 *  a bad device tree and we give up.
+		 */
+		if (r != -EINVAL)
+			break;
+
+		parent = of_get_parent(np);
+		of_node_put(np);
+		np = parent;
+	}
+	if (np && r)
+		pr_warn("NUMA: Invalid \"numa-node-id\" property in node %s\n",
+			np->name);
+	of_node_put(np);
+
+	if (!r) {
+		if (nid >= MAX_NUMNODES)
+			pr_warn("NUMA: Node id %u exceeds maximum value\n",
+				nid);
+		else
+			return nid;
+	}
+
+	return NUMA_NO_NODE;
+}
+EXPORT_SYMBOL(of_node_to_nid);
+
+int __init of_numa_init(void)
+{
+	int r;
+
+	of_numa_parse_cpu_nodes();
+	r = of_numa_parse_memory_nodes();
+	if (r)
+		return r;
+	return of_numa_parse_distance_map();
+}
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 8d103e4..16e8daf 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -297,19 +297,37 @@
 static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *lookup,
 				 struct device_node *np)
 {
+	const struct of_dev_auxdata *auxdata;
 	struct resource res;
+	int compatible = 0;
 
 	if (!lookup)
 		return NULL;
 
-	for(; lookup->compatible != NULL; lookup++) {
-		if (!of_device_is_compatible(np, lookup->compatible))
+	auxdata = lookup;
+	for (; auxdata->compatible; auxdata++) {
+		if (!of_device_is_compatible(np, auxdata->compatible))
 			continue;
+		compatible++;
 		if (!of_address_to_resource(np, 0, &res))
-			if (res.start != lookup->phys_addr)
+			if (res.start != auxdata->phys_addr)
 				continue;
-		pr_debug("%s: devname=%s\n", np->full_name, lookup->name);
-		return lookup;
+		pr_debug("%s: devname=%s\n", np->full_name, auxdata->name);
+		return auxdata;
+	}
+
+	if (!compatible)
+		return NULL;
+
+	/* Try compatible match if no phys_addr and name are specified */
+	auxdata = lookup;
+	for (; auxdata->compatible; auxdata++) {
+		if (!of_device_is_compatible(np, auxdata->compatible))
+			continue;
+		if (!auxdata->phys_addr && !auxdata->name) {
+			pr_debug("%s: compatible match\n", np->full_name);
+			return auxdata;
+		}
 	}
 
 	return NULL;
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index e986e6e..c1ebbfb 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -1692,13 +1692,7 @@
 
 #if IS_BUILTIN(CONFIG_I2C_MUX)
 
-struct unittest_i2c_mux_data {
-	int nchans;
-	struct i2c_adapter *adap[];
-};
-
-static int unittest_i2c_mux_select_chan(struct i2c_adapter *adap,
-			       void *client, u32 chan)
+static int unittest_i2c_mux_select_chan(struct i2c_mux_core *muxc, u32 chan)
 {
 	return 0;
 }
@@ -1706,11 +1700,11 @@
 static int unittest_i2c_mux_probe(struct i2c_client *client,
 		const struct i2c_device_id *id)
 {
-	int ret, i, nchans, size;
+	int ret, i, nchans;
 	struct device *dev = &client->dev;
 	struct i2c_adapter *adap = to_i2c_adapter(dev->parent);
 	struct device_node *np = client->dev.of_node, *child;
-	struct unittest_i2c_mux_data *stm;
+	struct i2c_mux_core *muxc;
 	u32 reg, max_reg;
 
 	dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name);
@@ -1734,25 +1728,20 @@
 		return -EINVAL;
 	}
 
-	size = offsetof(struct unittest_i2c_mux_data, adap[nchans]);
-	stm = devm_kzalloc(dev, size, GFP_KERNEL);
-	if (!stm) {
-		dev_err(dev, "Out of memory\n");
+	muxc = i2c_mux_alloc(adap, dev, nchans, 0, 0,
+			     unittest_i2c_mux_select_chan, NULL);
+	if (!muxc)
 		return -ENOMEM;
-	}
-	stm->nchans = nchans;
 	for (i = 0; i < nchans; i++) {
-		stm->adap[i] = i2c_add_mux_adapter(adap, dev, client,
-				0, i, 0, unittest_i2c_mux_select_chan, NULL);
-		if (!stm->adap[i]) {
+		ret = i2c_mux_add_adapter(muxc, 0, i, 0);
+		if (ret) {
 			dev_err(dev, "Failed to register mux #%d\n", i);
-			for (i--; i >= 0; i--)
-				i2c_del_mux_adapter(stm->adap[i]);
+			i2c_mux_del_adapters(muxc);
 			return -ENODEV;
 		}
 	}
 
-	i2c_set_clientdata(client, stm);
+	i2c_set_clientdata(client, muxc);
 
 	return 0;
 };
@@ -1761,12 +1750,10 @@
 {
 	struct device *dev = &client->dev;
 	struct device_node *np = client->dev.of_node;
-	struct unittest_i2c_mux_data *stm = i2c_get_clientdata(client);
-	int i;
+	struct i2c_mux_core *muxc = i2c_get_clientdata(client);
 
 	dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name);
-	for (i = stm->nchans - 1; i >= 0; i--)
-		i2c_del_mux_adapter(stm->adap[i]);
+	i2c_mux_del_adapters(muxc);
 	return 0;
 }
 
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index b48ac630..a0e5260 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -239,8 +239,8 @@
 {
 	struct inode *root_inode;
 
-	sb->s_blocksize = PAGE_CACHE_SIZE;
-	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	sb->s_blocksize = PAGE_SIZE;
+	sb->s_blocksize_bits = PAGE_SHIFT;
 	sb->s_magic = OPROFILEFS_MAGIC;
 	sb->s_op = &s_ops;
 	sb->s_time_gran = 1;
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 209292e..56389be 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -83,6 +83,9 @@
 config PCI_ATS
 	bool
 
+config PCI_ECAM
+	bool
+
 config PCI_IOV
 	bool "PCI IOV support"
 	depends on PCI
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 2154092..1fa6925 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -55,6 +55,8 @@
 
 obj-$(CONFIG_PCI_STUB) += pci-stub.o
 
+obj-$(CONFIG_PCI_ECAM) += ecam.o
+
 obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
 
 obj-$(CONFIG_OF) += of.o
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 01b9d0a..d11cdbb 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -275,6 +275,19 @@
 }
 EXPORT_SYMBOL(pci_write_vpd);
 
+/**
+ * pci_set_vpd_size - Set size of Vital Product Data space
+ * @dev:	pci device struct
+ * @len:	size of vpd space
+ */
+int pci_set_vpd_size(struct pci_dev *dev, size_t len)
+{
+	if (!dev->vpd || !dev->vpd->ops)
+		return -ENODEV;
+	return dev->vpd->ops->set_size(dev, len);
+}
+EXPORT_SYMBOL(pci_set_vpd_size);
+
 #define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
 
 /**
@@ -498,9 +511,23 @@
 	return ret ? ret : count;
 }
 
+static int pci_vpd_set_size(struct pci_dev *dev, size_t len)
+{
+	struct pci_vpd *vpd = dev->vpd;
+
+	if (len == 0 || len > PCI_VPD_MAX_SIZE)
+		return -EIO;
+
+	vpd->valid = 1;
+	vpd->len = len;
+
+	return 0;
+}
+
 static const struct pci_vpd_ops pci_vpd_ops = {
 	.read = pci_vpd_read,
 	.write = pci_vpd_write,
+	.set_size = pci_vpd_set_size,
 };
 
 static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
@@ -533,9 +560,24 @@
 	return ret;
 }
 
+static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len)
+{
+	struct pci_dev *tdev = pci_get_slot(dev->bus,
+					    PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+	int ret;
+
+	if (!tdev)
+		return -ENODEV;
+
+	ret = pci_set_vpd_size(tdev, len);
+	pci_dev_put(tdev);
+	return ret;
+}
+
 static const struct pci_vpd_ops pci_vpd_f0_ops = {
 	.read = pci_vpd_f0_read,
 	.write = pci_vpd_f0_write,
+	.set_size = pci_vpd_f0_set_size,
 };
 
 int pci_vpd_init(struct pci_dev *dev)
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 6c9f546..dd7cdbe 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -294,7 +294,7 @@
 
 	dev->match_driver = true;
 	retval = device_attach(&dev->dev);
-	if (retval < 0) {
+	if (retval < 0 && retval != -EPROBE_DEFER) {
 		dev_warn(&dev->dev, "device attach failed (%d)\n", retval);
 		pci_proc_detach_device(dev);
 		pci_remove_sysfs_dev_files(dev);
@@ -324,7 +324,9 @@
 	}
 
 	list_for_each_entry(dev, &bus->devices, bus_list) {
-		BUG_ON(!dev->is_added);
+		/* Skip if device attach failed */
+		if (!dev->is_added)
+			continue;
 		child = dev->subordinate;
 		if (child)
 			pci_bus_add_devices(child);
diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c
new file mode 100644
index 0000000..f9832ad
--- /dev/null
+++ b/drivers/pci/ecam.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include "ecam.h"
+
+/*
+ * On 64-bit systems, we do a single ioremap for the whole config space
+ * since we have enough virtual address range available.  On 32-bit, we
+ * ioremap the config space for each bus individually.
+ */
+static const bool per_bus_mapping = !config_enabled(CONFIG_64BIT);
+
+/*
+ * Create a PCI config space window
+ *  - reserve mem region
+ *  - alloc struct pci_config_window with space for all mappings
+ *  - ioremap the config space
+ */
+struct pci_config_window *pci_ecam_create(struct device *dev,
+		struct resource *cfgres, struct resource *busr,
+		struct pci_ecam_ops *ops)
+{
+	struct pci_config_window *cfg;
+	unsigned int bus_range, bus_range_max, bsz;
+	struct resource *conflict;
+	int i, err;
+
+	if (busr->start > busr->end)
+		return ERR_PTR(-EINVAL);
+
+	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+	if (!cfg)
+		return ERR_PTR(-ENOMEM);
+
+	cfg->ops = ops;
+	cfg->busr.start = busr->start;
+	cfg->busr.end = busr->end;
+	cfg->busr.flags = IORESOURCE_BUS;
+	bus_range = resource_size(&cfg->busr);
+	bus_range_max = resource_size(cfgres) >> ops->bus_shift;
+	if (bus_range > bus_range_max) {
+		bus_range = bus_range_max;
+		cfg->busr.end = busr->start + bus_range - 1;
+		dev_warn(dev, "ECAM area %pR can only accommodate %pR (reduced from %pR desired)\n",
+			 cfgres, &cfg->busr, busr);
+	}
+	bsz = 1 << ops->bus_shift;
+
+	cfg->res.start = cfgres->start;
+	cfg->res.end = cfgres->end;
+	cfg->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+	cfg->res.name = "PCI ECAM";
+
+	conflict = request_resource_conflict(&iomem_resource, &cfg->res);
+	if (conflict) {
+		err = -EBUSY;
+		dev_err(dev, "can't claim ECAM area %pR: address conflict with %s %pR\n",
+			&cfg->res, conflict->name, conflict);
+		goto err_exit;
+	}
+
+	if (per_bus_mapping) {
+		cfg->winp = kcalloc(bus_range, sizeof(*cfg->winp), GFP_KERNEL);
+		if (!cfg->winp)
+			goto err_exit_malloc;
+		for (i = 0; i < bus_range; i++) {
+			cfg->winp[i] = ioremap(cfgres->start + i * bsz, bsz);
+			if (!cfg->winp[i])
+				goto err_exit_iomap;
+		}
+	} else {
+		cfg->win = ioremap(cfgres->start, bus_range * bsz);
+		if (!cfg->win)
+			goto err_exit_iomap;
+	}
+
+	if (ops->init) {
+		err = ops->init(dev, cfg);
+		if (err)
+			goto err_exit;
+	}
+	dev_info(dev, "ECAM at %pR for %pR\n", &cfg->res, &cfg->busr);
+	return cfg;
+
+err_exit_iomap:
+	dev_err(dev, "ECAM ioremap failed\n");
+err_exit_malloc:
+	err = -ENOMEM;
+err_exit:
+	pci_ecam_free(cfg);
+	return ERR_PTR(err);
+}
+
+void pci_ecam_free(struct pci_config_window *cfg)
+{
+	int i;
+
+	if (per_bus_mapping) {
+		if (cfg->winp) {
+			for (i = 0; i < resource_size(&cfg->busr); i++)
+				if (cfg->winp[i])
+					iounmap(cfg->winp[i]);
+			kfree(cfg->winp);
+		}
+	} else {
+		if (cfg->win)
+			iounmap(cfg->win);
+	}
+	if (cfg->res.parent)
+		release_resource(&cfg->res);
+	kfree(cfg);
+}
+
+/*
+ * Function to implement the pci_ops ->map_bus method
+ */
+void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn,
+			       int where)
+{
+	struct pci_config_window *cfg = bus->sysdata;
+	unsigned int devfn_shift = cfg->ops->bus_shift - 8;
+	unsigned int busn = bus->number;
+	void __iomem *base;
+
+	if (busn < cfg->busr.start || busn > cfg->busr.end)
+		return NULL;
+
+	busn -= cfg->busr.start;
+	if (per_bus_mapping)
+		base = cfg->winp[busn];
+	else
+		base = cfg->win + (busn << cfg->ops->bus_shift);
+	return base + (devfn << devfn_shift) + where;
+}
+
+/* ECAM ops */
+struct pci_ecam_ops pci_generic_ecam_ops = {
+	.bus_shift	= 20,
+	.pci_ops	= {
+		.map_bus	= pci_ecam_map_bus,
+		.read		= pci_generic_config_read,
+		.write		= pci_generic_config_write,
+	}
+};
diff --git a/drivers/pci/ecam.h b/drivers/pci/ecam.h
new file mode 100644
index 0000000..9878beb
--- /dev/null
+++ b/drivers/pci/ecam.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+#ifndef DRIVERS_PCI_ECAM_H
+#define DRIVERS_PCI_ECAM_H
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+/*
+ * struct to hold pci ops and bus shift of the config window
+ * for a PCI controller.
+ */
+struct pci_config_window;
+struct pci_ecam_ops {
+	unsigned int			bus_shift;
+	struct pci_ops			pci_ops;
+	int				(*init)(struct device *,
+						struct pci_config_window *);
+};
+
+/*
+ * struct to hold the mappings of a config space window. This
+ * is expected to be used as sysdata for PCI controllers that
+ * use ECAM.
+ */
+struct pci_config_window {
+	struct resource			res;
+	struct resource			busr;
+	void				*priv;
+	struct pci_ecam_ops		*ops;
+	union {
+		void __iomem		*win;	/* 64-bit single mapping */
+		void __iomem		**winp; /* 32-bit per-bus mapping */
+	};
+};
+
+/* create and free pci_config_window */
+struct pci_config_window *pci_ecam_create(struct device *dev,
+		struct resource *cfgres, struct resource *busr,
+		struct pci_ecam_ops *ops);
+void pci_ecam_free(struct pci_config_window *cfg);
+
+/* map_bus when ->sysdata is an instance of pci_config_window */
+void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn,
+			       int where);
+/* default ECAM ops */
+extern struct pci_ecam_ops pci_generic_ecam_ops;
+
+#ifdef CONFIG_PCI_HOST_GENERIC
+/* for DT-based PCI controllers that support ECAM */
+int pci_host_common_probe(struct platform_device *pdev,
+			  struct pci_ecam_ops *ops);
+#endif
+#endif
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 7a0780d..5d2374e 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -69,14 +69,17 @@
 	  There are 3 internal PCI controllers available with a single
 	  built-in EHCI/OHCI host controller present on each one.
 
-config PCI_RCAR_GEN2_PCIE
+config PCIE_RCAR
 	bool "Renesas R-Car PCIe controller"
 	depends on ARCH_RENESAS || (ARM && COMPILE_TEST)
+	select PCI_MSI
+	select PCI_MSI_IRQ_DOMAIN
 	help
-	  Say Y here if you want PCIe controller support on R-Car Gen2 SoCs.
+	  Say Y here if you want PCIe controller support on R-Car SoCs.
 
 config PCI_HOST_COMMON
 	bool
+	select PCI_ECAM
 
 config PCI_HOST_GENERIC
 	bool "Generic PCI host controller"
@@ -231,4 +234,15 @@
 	help
 	  Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs.
 
+config PCIE_ARMADA_8K
+	bool "Marvell Armada-8K PCIe controller"
+	depends on ARCH_MVEBU
+	select PCIE_DW
+	select PCIEPORTBUS
+	help
+	  Say Y here if you want to enable PCIe controller support on
+	  Armada-8K SoCs. The PCIe controller on Armada-8K is based on
+	  Designware hardware and therefore the driver re-uses the
+	  Designware core functions to implement the driver.
+
 endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index d85b5fa..9c8698e 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -7,7 +7,7 @@
 obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
 obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
 obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
-obj-$(CONFIG_PCI_RCAR_GEN2_PCIE) += pcie-rcar.o
+obj-$(CONFIG_PCIE_RCAR) += pcie-rcar.o
 obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o
 obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
 obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
@@ -28,3 +28,4 @@
 obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
 obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o
 obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o
+obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c
index 2ca3a1f..f441130 100644
--- a/drivers/pci/host/pci-dra7xx.c
+++ b/drivers/pci/host/pci-dra7xx.c
@@ -142,13 +142,13 @@
 
 static void dra7xx_pcie_host_init(struct pcie_port *pp)
 {
-	dw_pcie_setup_rc(pp);
-
 	pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR;
 	pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR;
 	pp->cfg0_base &= DRA7XX_CPU_TO_BUS_ADDR;
 	pp->cfg1_base &= DRA7XX_CPU_TO_BUS_ADDR;
 
+	dw_pcie_setup_rc(pp);
+
 	dra7xx_pcie_establish_link(pp);
 	if (IS_ENABLED(CONFIG_PCI_MSI))
 		dw_pcie_msi_init(pp);
diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c
index e9f850f..8cba7ab 100644
--- a/drivers/pci/host/pci-host-common.c
+++ b/drivers/pci/host/pci-host-common.c
@@ -22,27 +22,21 @@
 #include <linux/of_pci.h>
 #include <linux/platform_device.h>
 
-#include "pci-host-common.h"
+#include "../ecam.h"
 
-static void gen_pci_release_of_pci_ranges(struct gen_pci *pci)
-{
-	pci_free_resource_list(&pci->resources);
-}
-
-static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci)
+static int gen_pci_parse_request_of_pci_ranges(struct device *dev,
+		       struct list_head *resources, struct resource **bus_range)
 {
 	int err, res_valid = 0;
-	struct device *dev = pci->host.dev.parent;
 	struct device_node *np = dev->of_node;
 	resource_size_t iobase;
 	struct resource_entry *win;
 
-	err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources,
-					       &iobase);
+	err = of_pci_get_host_bridge_resources(np, 0, 0xff, resources, &iobase);
 	if (err)
 		return err;
 
-	resource_list_for_each_entry(win, &pci->resources) {
+	resource_list_for_each_entry(win, resources) {
 		struct resource *parent, *res = win->res;
 
 		switch (resource_type(res)) {
@@ -60,7 +54,7 @@
 			res_valid |= !(res->flags & IORESOURCE_PREFETCH);
 			break;
 		case IORESOURCE_BUS:
-			pci->cfg.bus_range = res;
+			*bus_range = res;
 		default:
 			continue;
 		}
@@ -79,65 +73,60 @@
 	return 0;
 
 out_release_res:
-	gen_pci_release_of_pci_ranges(pci);
 	return err;
 }
 
-static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci)
+static void gen_pci_unmap_cfg(void *ptr)
+{
+	pci_ecam_free((struct pci_config_window *)ptr);
+}
+
+static struct pci_config_window *gen_pci_init(struct device *dev,
+		struct list_head *resources, struct pci_ecam_ops *ops)
 {
 	int err;
-	u8 bus_max;
-	resource_size_t busn;
-	struct resource *bus_range;
-	struct device *dev = pci->host.dev.parent;
-	struct device_node *np = dev->of_node;
-	u32 sz = 1 << pci->cfg.ops->bus_shift;
+	struct resource cfgres;
+	struct resource *bus_range = NULL;
+	struct pci_config_window *cfg;
 
-	err = of_address_to_resource(np, 0, &pci->cfg.res);
+	/* Parse our PCI ranges and request their resources */
+	err = gen_pci_parse_request_of_pci_ranges(dev, resources, &bus_range);
+	if (err)
+		goto err_out;
+
+	err = of_address_to_resource(dev->of_node, 0, &cfgres);
 	if (err) {
 		dev_err(dev, "missing \"reg\" property\n");
-		return err;
+		goto err_out;
 	}
 
-	/* Limit the bus-range to fit within reg */
-	bus_max = pci->cfg.bus_range->start +
-		  (resource_size(&pci->cfg.res) >> pci->cfg.ops->bus_shift) - 1;
-	pci->cfg.bus_range->end = min_t(resource_size_t,
-					pci->cfg.bus_range->end, bus_max);
-
-	pci->cfg.win = devm_kcalloc(dev, resource_size(pci->cfg.bus_range),
-				    sizeof(*pci->cfg.win), GFP_KERNEL);
-	if (!pci->cfg.win)
-		return -ENOMEM;
-
-	/* Map our Configuration Space windows */
-	if (!devm_request_mem_region(dev, pci->cfg.res.start,
-				     resource_size(&pci->cfg.res),
-				     "Configuration Space"))
-		return -ENOMEM;
-
-	bus_range = pci->cfg.bus_range;
-	for (busn = bus_range->start; busn <= bus_range->end; ++busn) {
-		u32 idx = busn - bus_range->start;
-
-		pci->cfg.win[idx] = devm_ioremap(dev,
-						 pci->cfg.res.start + idx * sz,
-						 sz);
-		if (!pci->cfg.win[idx])
-			return -ENOMEM;
+	cfg = pci_ecam_create(dev, &cfgres, bus_range, ops);
+	if (IS_ERR(cfg)) {
+		err = PTR_ERR(cfg);
+		goto err_out;
 	}
 
-	return 0;
+	err = devm_add_action(dev, gen_pci_unmap_cfg, cfg);
+	if (err) {
+		gen_pci_unmap_cfg(cfg);
+		goto err_out;
+	}
+	return cfg;
+
+err_out:
+	pci_free_resource_list(resources);
+	return ERR_PTR(err);
 }
 
 int pci_host_common_probe(struct platform_device *pdev,
-			  struct gen_pci *pci)
+			  struct pci_ecam_ops *ops)
 {
-	int err;
 	const char *type;
 	struct device *dev = &pdev->dev;
 	struct device_node *np = dev->of_node;
 	struct pci_bus *bus, *child;
+	struct pci_config_window *cfg;
+	struct list_head resources;
 
 	type = of_get_property(np, "device_type", NULL);
 	if (!type || strcmp(type, "pci")) {
@@ -147,29 +136,18 @@
 
 	of_pci_check_probe_only();
 
-	pci->host.dev.parent = dev;
-	INIT_LIST_HEAD(&pci->host.windows);
-	INIT_LIST_HEAD(&pci->resources);
-
-	/* Parse our PCI ranges and request their resources */
-	err = gen_pci_parse_request_of_pci_ranges(pci);
-	if (err)
-		return err;
-
 	/* Parse and map our Configuration Space windows */
-	err = gen_pci_parse_map_cfg_windows(pci);
-	if (err) {
-		gen_pci_release_of_pci_ranges(pci);
-		return err;
-	}
+	INIT_LIST_HEAD(&resources);
+	cfg = gen_pci_init(dev, &resources, ops);
+	if (IS_ERR(cfg))
+		return PTR_ERR(cfg);
 
 	/* Do not reassign resources if probe only */
 	if (!pci_has_flag(PCI_PROBE_ONLY))
 		pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
 
-
-	bus = pci_scan_root_bus(dev, pci->cfg.bus_range->start,
-				&pci->cfg.ops->ops, pci, &pci->resources);
+	bus = pci_scan_root_bus(dev, cfg->busr.start, &ops->pci_ops, cfg,
+				&resources);
 	if (!bus) {
 		dev_err(dev, "Scanning rootbus failed");
 		return -ENODEV;
diff --git a/drivers/pci/host/pci-host-common.h b/drivers/pci/host/pci-host-common.h
deleted file mode 100644
index 09f3fa0..0000000
--- a/drivers/pci/host/pci-host-common.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- * Copyright (C) 2014 ARM Limited
- *
- * Author: Will Deacon <will.deacon@arm.com>
- */
-
-#ifndef _PCI_HOST_COMMON_H
-#define _PCI_HOST_COMMON_H
-
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-
-struct gen_pci_cfg_bus_ops {
-	u32 bus_shift;
-	struct pci_ops ops;
-};
-
-struct gen_pci_cfg_windows {
-	struct resource				res;
-	struct resource				*bus_range;
-	void __iomem				**win;
-
-	struct gen_pci_cfg_bus_ops		*ops;
-};
-
-struct gen_pci {
-	struct pci_host_bridge			host;
-	struct gen_pci_cfg_windows		cfg;
-	struct list_head			resources;
-};
-
-int pci_host_common_probe(struct platform_device *pdev,
-			  struct gen_pci *pci);
-
-#endif /* _PCI_HOST_COMMON_H */
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
index e8aa78f..6eaceab 100644
--- a/drivers/pci/host/pci-host-generic.c
+++ b/drivers/pci/host/pci-host-generic.c
@@ -25,41 +25,12 @@
 #include <linux/of_pci.h>
 #include <linux/platform_device.h>
 
-#include "pci-host-common.h"
+#include "../ecam.h"
 
-static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus,
-					     unsigned int devfn,
-					     int where)
-{
-	struct gen_pci *pci = bus->sysdata;
-	resource_size_t idx = bus->number - pci->cfg.bus_range->start;
-
-	return pci->cfg.win[idx] + ((devfn << 8) | where);
-}
-
-static struct gen_pci_cfg_bus_ops gen_pci_cfg_cam_bus_ops = {
+static struct pci_ecam_ops gen_pci_cfg_cam_bus_ops = {
 	.bus_shift	= 16,
-	.ops		= {
-		.map_bus	= gen_pci_map_cfg_bus_cam,
-		.read		= pci_generic_config_read,
-		.write		= pci_generic_config_write,
-	}
-};
-
-static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
-					      unsigned int devfn,
-					      int where)
-{
-	struct gen_pci *pci = bus->sysdata;
-	resource_size_t idx = bus->number - pci->cfg.bus_range->start;
-
-	return pci->cfg.win[idx] + ((devfn << 12) | where);
-}
-
-static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = {
-	.bus_shift	= 20,
-	.ops		= {
-		.map_bus	= gen_pci_map_cfg_bus_ecam,
+	.pci_ops	= {
+		.map_bus	= pci_ecam_map_bus,
 		.read		= pci_generic_config_read,
 		.write		= pci_generic_config_write,
 	}
@@ -70,25 +41,22 @@
 	  .data = &gen_pci_cfg_cam_bus_ops },
 
 	{ .compatible = "pci-host-ecam-generic",
-	  .data = &gen_pci_cfg_ecam_bus_ops },
+	  .data = &pci_generic_ecam_ops },
 
 	{ },
 };
+
 MODULE_DEVICE_TABLE(of, gen_pci_of_match);
 
 static int gen_pci_probe(struct platform_device *pdev)
 {
-	struct device *dev = &pdev->dev;
 	const struct of_device_id *of_id;
-	struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+	struct pci_ecam_ops *ops;
 
-	if (!pci)
-		return -ENOMEM;
+	of_id = of_match_node(gen_pci_of_match, pdev->dev.of_node);
+	ops = (struct pci_ecam_ops *)of_id->data;
 
-	of_id = of_match_node(gen_pci_of_match, dev->of_node);
-	pci->cfg.ops = (struct gen_pci_cfg_bus_ops *)of_id->data;
-
-	return pci_host_common_probe(pdev, pci);
+	return pci_host_common_probe(pdev, ops);
 }
 
 static struct platform_driver gen_pci_driver = {
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index ed651ba..58f7eeb 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -553,6 +553,8 @@
 		spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
 		/* Choose the function to be read. (See comment above) */
 		writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
+		/* Make sure the function was chosen before we start reading. */
+		mb();
 		/* Read from that function's config space. */
 		switch (size) {
 		case 1:
@@ -565,6 +567,11 @@
 			*val = readl(addr);
 			break;
 		}
+		/*
+		 * Make sure the write was done before we release the spinlock
+		 * allowing consecutive reads/writes.
+		 */
+		mb();
 		spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
 	} else {
 		dev_err(&hpdev->hbus->hdev->device,
@@ -592,6 +599,8 @@
 		spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
 		/* Choose the function to be written. (See comment above) */
 		writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
+		/* Make sure the function was chosen before we start writing. */
+		wmb();
 		/* Write to that function's config space. */
 		switch (size) {
 		case 1:
@@ -604,6 +613,11 @@
 			writel(val, addr);
 			break;
 		}
+		/*
+		 * Make sure the write was done before we release the spinlock
+		 * allowing consecutive reads/writes.
+		 */
+		mb();
 		spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
 	} else {
 		dev_err(&hpdev->hbus->hdev->device,
@@ -2268,11 +2282,6 @@
 
 	hbus = hv_get_drvdata(hdev);
 
-	ret = hv_send_resources_released(hdev);
-	if (ret)
-		dev_err(&hdev->device,
-			"Couldn't send resources released packet(s)\n");
-
 	memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
 	init_completion(&comp_pkt.host_event);
 	pkt.teardown_packet.completion_func = hv_pci_generic_compl;
@@ -2295,6 +2304,11 @@
 		pci_unlock_rescan_remove();
 	}
 
+	ret = hv_send_resources_released(hdev);
+	if (ret)
+		dev_err(&hdev->device,
+			"Couldn't send resources released packet(s)\n");
+
 	vmbus_close(hdev->channel);
 
 	/* Delete any children which might still exist. */
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index eb5a275..b741a36 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -19,6 +19,7 @@
 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
 #include <linux/module.h>
 #include <linux/of_gpio.h>
+#include <linux/of_device.h>
 #include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
@@ -31,19 +32,29 @@
 
 #define to_imx6_pcie(x)	container_of(x, struct imx6_pcie, pp)
 
+enum imx6_pcie_variants {
+	IMX6Q,
+	IMX6SX,
+	IMX6QP,
+};
+
 struct imx6_pcie {
-	struct gpio_desc	*reset_gpio;
+	int			reset_gpio;
+	bool			gpio_active_high;
 	struct clk		*pcie_bus;
 	struct clk		*pcie_phy;
+	struct clk		*pcie_inbound_axi;
 	struct clk		*pcie;
 	struct pcie_port	pp;
 	struct regmap		*iomuxc_gpr;
+	enum imx6_pcie_variants variant;
 	void __iomem		*mem_base;
 	u32			tx_deemph_gen1;
 	u32			tx_deemph_gen2_3p5db;
 	u32			tx_deemph_gen2_6db;
 	u32			tx_swing_full;
 	u32			tx_swing_low;
+	int			link_gen;
 };
 
 /* PCIe Root Complex registers (memory-mapped) */
@@ -236,39 +247,95 @@
 	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
 	u32 val, gpr1, gpr12;
 
-	/*
-	 * If the bootloader already enabled the link we need some special
-	 * handling to get the core back into a state where it is safe to
-	 * touch it for configuration.  As there is no dedicated reset signal
-	 * wired up for MX6QDL, we need to manually force LTSSM into "detect"
-	 * state before completely disabling LTSSM, which is a prerequisite
-	 * for core configuration.
-	 *
-	 * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we have a strong
-	 * indication that the bootloader activated the link.
-	 */
-	regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1);
-	regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12);
-
-	if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
-	    (gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
-		val = readl(pp->dbi_base + PCIE_PL_PFLR);
-		val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
-		val |= PCIE_PL_PFLR_FORCE_LINK;
-		writel(val, pp->dbi_base + PCIE_PL_PFLR);
-
+	switch (imx6_pcie->variant) {
+	case IMX6SX:
 		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
-				IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
+				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
+		/* Force PCIe PHY reset */
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
+				   IMX6SX_GPR5_PCIE_BTNRST_RESET,
+				   IMX6SX_GPR5_PCIE_BTNRST_RESET);
+		break;
+	case IMX6QP:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+				   IMX6Q_GPR1_PCIE_SW_RST,
+				   IMX6Q_GPR1_PCIE_SW_RST);
+		break;
+	case IMX6Q:
+		/*
+		 * If the bootloader already enabled the link we need some
+		 * special handling to get the core back into a state where
+		 * it is safe to touch it for configuration.  As there is
+		 * no dedicated reset signal wired up for MX6QDL, we need
+		 * to manually force LTSSM into "detect" state before
+		 * completely disabling LTSSM, which is a prerequisite for
+		 * core configuration.
+		 *
+		 * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we
+		 * have a strong indication that the bootloader activated
+		 * the link.
+		 */
+		regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1);
+		regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12);
+
+		if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
+		    (gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
+			val = readl(pp->dbi_base + PCIE_PL_PFLR);
+			val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
+			val |= PCIE_PL_PFLR_FORCE_LINK;
+			writel(val, pp->dbi_base + PCIE_PL_PFLR);
+
+			regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+					   IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+		}
+
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+				   IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+				   IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
+		break;
 	}
 
-	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
-			IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
-	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
-			IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
-
 	return 0;
 }
 
+static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
+{
+	struct pcie_port *pp = &imx6_pcie->pp;
+	int ret = 0;
+
+	switch (imx6_pcie->variant) {
+	case IMX6SX:
+		ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
+		if (ret) {
+			dev_err(pp->dev, "unable to enable pcie_axi clock\n");
+			break;
+		}
+
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				   IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
+		break;
+	case IMX6QP: 		/* FALLTHROUGH */
+	case IMX6Q:
+		/* power up core phy and enable ref clock */
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+				   IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
+		/*
+		 * the async reset input need ref clock to sync internally,
+		 * when the ref clock comes after reset, internal synced
+		 * reset time is too short, cannot meet the requirement.
+		 * add one ~10us delay here.
+		 */
+		udelay(10);
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+				   IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
+		break;
+	}
+
+	return ret;
+}
+
 static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
 {
 	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
@@ -292,43 +359,60 @@
 		goto err_pcie;
 	}
 
-	/* power up core phy and enable ref clock */
-	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
-			IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
-	/*
-	 * the async reset input need ref clock to sync internally,
-	 * when the ref clock comes after reset, internal synced
-	 * reset time is too short, cannot meet the requirement.
-	 * add one ~10us delay here.
-	 */
-	udelay(10);
-	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
-			IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
+	ret = imx6_pcie_enable_ref_clk(imx6_pcie);
+	if (ret) {
+		dev_err(pp->dev, "unable to enable pcie ref clock\n");
+		goto err_ref_clk;
+	}
 
 	/* allow the clocks to stabilize */
 	usleep_range(200, 500);
 
 	/* Some boards don't have PCIe reset GPIO. */
-	if (imx6_pcie->reset_gpio) {
-		gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 0);
+	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+		gpio_set_value_cansleep(imx6_pcie->reset_gpio,
+					imx6_pcie->gpio_active_high);
 		msleep(100);
-		gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 1);
+		gpio_set_value_cansleep(imx6_pcie->reset_gpio,
+					!imx6_pcie->gpio_active_high);
 	}
+
+	switch (imx6_pcie->variant) {
+	case IMX6SX:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
+				   IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
+		break;
+	case IMX6QP:
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+				   IMX6Q_GPR1_PCIE_SW_RST, 0);
+
+		usleep_range(200, 500);
+		break;
+	case IMX6Q:		/* Nothing to do */
+		break;
+	}
+
 	return 0;
 
+err_ref_clk:
+	clk_disable_unprepare(imx6_pcie->pcie);
 err_pcie:
 	clk_disable_unprepare(imx6_pcie->pcie_bus);
 err_pcie_bus:
 	clk_disable_unprepare(imx6_pcie->pcie_phy);
 err_pcie_phy:
 	return ret;
-
 }
 
 static void imx6_pcie_init_phy(struct pcie_port *pp)
 {
 	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
 
+	if (imx6_pcie->variant == IMX6SX)
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				   IMX6SX_GPR12_PCIE_RX_EQ_MASK,
+				   IMX6SX_GPR12_PCIE_RX_EQ_2);
+
 	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
 			IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
 
@@ -417,11 +501,15 @@
 		goto err_reset_phy;
 	}
 
-	/* Allow Gen2 mode after the link is up. */
-	tmp = readl(pp->dbi_base + PCIE_RC_LCR);
-	tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
-	tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
-	writel(tmp, pp->dbi_base + PCIE_RC_LCR);
+	if (imx6_pcie->link_gen == 2) {
+		/* Allow Gen2 mode after the link is up. */
+		tmp = readl(pp->dbi_base + PCIE_RC_LCR);
+		tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
+		tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
+		writel(tmp, pp->dbi_base + PCIE_RC_LCR);
+	} else {
+		dev_info(pp->dev, "Link: Gen2 disabled\n");
+	}
 
 	/*
 	 * Start Directed Speed Change so the best possible speed both link
@@ -445,8 +533,7 @@
 	}
 
 	tmp = readl(pp->dbi_base + PCIE_RC_LCSR);
-	dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf);
-
+	dev_info(pp->dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
 	return 0;
 
 err_reset_phy:
@@ -523,6 +610,7 @@
 {
 	struct imx6_pcie *imx6_pcie;
 	struct pcie_port *pp;
+	struct device_node *np = pdev->dev.of_node;
 	struct resource *dbi_base;
 	struct device_node *node = pdev->dev.of_node;
 	int ret;
@@ -534,6 +622,9 @@
 	pp = &imx6_pcie->pp;
 	pp->dev = &pdev->dev;
 
+	imx6_pcie->variant =
+		(enum imx6_pcie_variants)of_device_get_match_data(&pdev->dev);
+
 	/* Added for PCI abort handling */
 	hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
 		"imprecise external abort");
@@ -544,8 +635,20 @@
 		return PTR_ERR(pp->dbi_base);
 
 	/* Fetch GPIOs */
-	imx6_pcie->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
-							GPIOD_OUT_LOW);
+	imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+	imx6_pcie->gpio_active_high = of_property_read_bool(np,
+						"reset-gpio-active-high");
+	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+		ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
+				imx6_pcie->gpio_active_high ?
+					GPIOF_OUT_INIT_HIGH :
+					GPIOF_OUT_INIT_LOW,
+				"PCIe reset");
+		if (ret) {
+			dev_err(&pdev->dev, "unable to get reset gpio\n");
+			return ret;
+		}
+	}
 
 	/* Fetch clocks */
 	imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
@@ -569,6 +672,16 @@
 		return PTR_ERR(imx6_pcie->pcie);
 	}
 
+	if (imx6_pcie->variant == IMX6SX) {
+		imx6_pcie->pcie_inbound_axi = devm_clk_get(&pdev->dev,
+							   "pcie_inbound_axi");
+		if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
+			dev_err(&pdev->dev,
+				"pcie_incbound_axi clock missing or invalid\n");
+			return PTR_ERR(imx6_pcie->pcie_inbound_axi);
+		}
+	}
+
 	/* Grab GPR config register range */
 	imx6_pcie->iomuxc_gpr =
 		 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
@@ -598,6 +711,12 @@
 				 &imx6_pcie->tx_swing_low))
 		imx6_pcie->tx_swing_low = 127;
 
+	/* Limit link speed */
+	ret = of_property_read_u32(pp->dev->of_node, "fsl,max-link-speed",
+				   &imx6_pcie->link_gen);
+	if (ret)
+		imx6_pcie->link_gen = 1;
+
 	ret = imx6_add_pcie_port(pp, pdev);
 	if (ret < 0)
 		return ret;
@@ -615,7 +734,9 @@
 }
 
 static const struct of_device_id imx6_pcie_of_match[] = {
-	{ .compatible = "fsl,imx6q-pcie", },
+	{ .compatible = "fsl,imx6q-pcie",  .data = (void *)IMX6Q,  },
+	{ .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, },
+	{ .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, },
 	{},
 };
 MODULE_DEVICE_TABLE(of, imx6_pcie_of_match);
diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c
index 6153853..4151509 100644
--- a/drivers/pci/host/pci-keystone-dw.c
+++ b/drivers/pci/host/pci-keystone-dw.c
@@ -14,6 +14,7 @@
 
 #include <linux/irq.h>
 #include <linux/irqdomain.h>
+#include <linux/irqreturn.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_pci.h>
@@ -53,6 +54,21 @@
 #define IRQ_STATUS			0x184
 #define MSI_IRQ_OFFSET			4
 
+/* Error IRQ bits */
+#define ERR_AER		BIT(5)	/* ECRC error */
+#define ERR_AXI		BIT(4)	/* AXI tag lookup fatal error */
+#define ERR_CORR	BIT(3)	/* Correctable error */
+#define ERR_NONFATAL	BIT(2)	/* Non-fatal error */
+#define ERR_FATAL	BIT(1)	/* Fatal error */
+#define ERR_SYS		BIT(0)	/* System (fatal, non-fatal, or correctable) */
+#define ERR_IRQ_ALL	(ERR_AER | ERR_AXI | ERR_CORR | \
+			 ERR_NONFATAL | ERR_FATAL | ERR_SYS)
+#define ERR_FATAL_IRQ	(ERR_FATAL | ERR_AXI)
+#define ERR_IRQ_STATUS_RAW		0x1c0
+#define ERR_IRQ_STATUS			0x1c4
+#define ERR_IRQ_ENABLE_SET		0x1c8
+#define ERR_IRQ_ENABLE_CLR		0x1cc
+
 /* Config space registers */
 #define DEBUG0				0x728
 
@@ -243,6 +259,28 @@
 	writel(offset, ks_pcie->va_app_base + IRQ_EOI);
 }
 
+void ks_dw_pcie_enable_error_irq(void __iomem *reg_base)
+{
+	writel(ERR_IRQ_ALL, reg_base + ERR_IRQ_ENABLE_SET);
+}
+
+irqreturn_t ks_dw_pcie_handle_error_irq(struct device *dev,
+					void __iomem *reg_base)
+{
+	u32 status;
+
+	status = readl(reg_base + ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL;
+	if (!status)
+		return IRQ_NONE;
+
+	if (status & ERR_FATAL_IRQ)
+		dev_err(dev, "fatal error (status %#010x)\n", status);
+
+	/* Ack the IRQ; status bits are RW1C */
+	writel(status, reg_base + ERR_IRQ_STATUS);
+	return IRQ_HANDLED;
+}
+
 static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d)
 {
 }
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
index b71f55b..6b8301e 100644
--- a/drivers/pci/host/pci-keystone.c
+++ b/drivers/pci/host/pci-keystone.c
@@ -15,6 +15,7 @@
 #include <linux/irqchip/chained_irq.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
+#include <linux/interrupt.h>
 #include <linux/irqdomain.h>
 #include <linux/module.h>
 #include <linux/msi.h>
@@ -159,7 +160,7 @@
 static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
 					   char *controller, int *num_irqs)
 {
-	int temp, max_host_irqs, legacy = 1, *host_irqs, ret = -EINVAL;
+	int temp, max_host_irqs, legacy = 1, *host_irqs;
 	struct device *dev = ks_pcie->pp.dev;
 	struct device_node *np_pcie = dev->of_node, **np_temp;
 
@@ -180,11 +181,15 @@
 	*np_temp = of_find_node_by_name(np_pcie, controller);
 	if (!(*np_temp)) {
 		dev_err(dev, "Node for %s is absent\n", controller);
-		goto out;
+		return -EINVAL;
 	}
+
 	temp = of_irq_count(*np_temp);
-	if (!temp)
-		goto out;
+	if (!temp) {
+		dev_err(dev, "No IRQ entries in %s\n", controller);
+		return -EINVAL;
+	}
+
 	if (temp > max_host_irqs)
 		dev_warn(dev, "Too many %s interrupts defined %u\n",
 			(legacy ? "legacy" : "MSI"), temp);
@@ -198,12 +203,13 @@
 		if (!host_irqs[temp])
 			break;
 	}
+
 	if (temp) {
 		*num_irqs = temp;
-		ret = 0;
+		return 0;
 	}
-out:
-	return ret;
+
+	return -EINVAL;
 }
 
 static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
@@ -226,6 +232,9 @@
 							 ks_pcie);
 		}
 	}
+
+	if (ks_pcie->error_irq > 0)
+		ks_dw_pcie_enable_error_irq(ks_pcie->va_app_base);
 }
 
 /*
@@ -289,6 +298,14 @@
 	.scan_bus = ks_dw_pcie_v3_65_scan_bus,
 };
 
+static irqreturn_t pcie_err_irq_handler(int irq, void *priv)
+{
+	struct keystone_pcie *ks_pcie = priv;
+
+	return ks_dw_pcie_handle_error_irq(ks_pcie->pp.dev,
+					   ks_pcie->va_app_base);
+}
+
 static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
 			 struct platform_device *pdev)
 {
@@ -309,6 +326,22 @@
 			return ret;
 	}
 
+	/*
+	 * Index 0 is the platform interrupt for error interrupt
+	 * from RC.  This is optional.
+	 */
+	ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0);
+	if (ks_pcie->error_irq <= 0)
+		dev_info(&pdev->dev, "no error IRQ defined\n");
+	else {
+		if (request_irq(ks_pcie->error_irq, pcie_err_irq_handler,
+				IRQF_SHARED, "pcie-error-irq", ks_pcie) < 0) {
+			dev_err(&pdev->dev, "failed to request error IRQ %d\n",
+				ks_pcie->error_irq);
+			return ret;
+		}
+	}
+
 	pp->root_bus_nr = -1;
 	pp->ops = &keystone_pcie_host_ops;
 	ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
@@ -317,7 +350,7 @@
 		return ret;
 	}
 
-	return ret;
+	return 0;
 }
 
 static const struct of_device_id ks_pcie_of_match[] = {
@@ -346,7 +379,7 @@
 	struct resource *res;
 	void __iomem *reg_p;
 	struct phy *phy;
-	int ret = 0;
+	int ret;
 
 	ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie),
 				GFP_KERNEL);
@@ -376,6 +409,7 @@
 	devm_release_mem_region(dev, res->start, resource_size(res));
 
 	pp->dev = dev;
+	ks_pcie->np = dev->of_node;
 	platform_set_drvdata(pdev, ks_pcie);
 	ks_pcie->clk = devm_clk_get(dev, "pcie");
 	if (IS_ERR(ks_pcie->clk)) {
diff --git a/drivers/pci/host/pci-keystone.h b/drivers/pci/host/pci-keystone.h
index f0944e8..a5b0cb2 100644
--- a/drivers/pci/host/pci-keystone.h
+++ b/drivers/pci/host/pci-keystone.h
@@ -29,6 +29,9 @@
 	int			msi_host_irqs[MAX_MSI_HOST_IRQS];
 	struct			device_node *msi_intc_np;
 	struct irq_domain	*legacy_irq_domain;
+	struct device_node	*np;
+
+	int error_irq;
 
 	/* Application register space */
 	void __iomem		*va_app_base;
@@ -42,6 +45,9 @@
 /* Keystone specific PCI controller APIs */
 void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie);
 void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset);
+void ks_dw_pcie_enable_error_irq(void __iomem *reg_base);
+irqreturn_t ks_dw_pcie_handle_error_irq(struct device *dev,
+					void __iomem *reg_base);
 int  ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
 			struct device_node *msi_intc_np);
 int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 53b79c5..6b451df 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -1003,6 +1003,7 @@
 		pcie->msi->dev = &pcie->pdev->dev;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int mvebu_pcie_suspend(struct device *dev)
 {
 	struct mvebu_pcie *pcie;
@@ -1031,6 +1032,7 @@
 
 	return 0;
 }
+#endif
 
 static void mvebu_pcie_port_clk_put(void *data)
 {
@@ -1298,9 +1300,8 @@
 };
 MODULE_DEVICE_TABLE(of, mvebu_pcie_of_match_table);
 
-static struct dev_pm_ops mvebu_pcie_pm_ops = {
-	.suspend_noirq = mvebu_pcie_suspend,
-	.resume_noirq = mvebu_pcie_resume,
+static const struct dev_pm_ops mvebu_pcie_pm_ops = {
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
 };
 
 static struct platform_driver mvebu_pcie_driver = {
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 68d1f41..c388468 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -295,6 +295,7 @@
 	struct reset_control *afi_rst;
 	struct reset_control *pcie_xrst;
 
+	bool legacy_phy;
 	struct phy *phy;
 
 	struct tegra_msi msi;
@@ -311,11 +312,14 @@
 
 struct tegra_pcie_port {
 	struct tegra_pcie *pcie;
+	struct device_node *np;
 	struct list_head list;
 	struct resource regs;
 	void __iomem *base;
 	unsigned int index;
 	unsigned int lanes;
+
+	struct phy **phys;
 };
 
 struct tegra_pcie_bus {
@@ -860,6 +864,128 @@
 	return 0;
 }
 
+static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
+{
+	const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+	u32 value;
+
+	/* disable TX/RX data */
+	value = pads_readl(pcie, PADS_CTL);
+	value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
+	pads_writel(pcie, value, PADS_CTL);
+
+	/* override IDDQ */
+	value = pads_readl(pcie, PADS_CTL);
+	value |= PADS_CTL_IDDQ_1L;
+	pads_writel(pcie, PADS_CTL, value);
+
+	/* reset PLL */
+	value = pads_readl(pcie, soc->pads_pll_ctl);
+	value &= ~PADS_PLL_CTL_RST_B4SM;
+	pads_writel(pcie, value, soc->pads_pll_ctl);
+
+	usleep_range(20, 100);
+
+	return 0;
+}
+
+static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
+{
+	struct device *dev = port->pcie->dev;
+	unsigned int i;
+	int err;
+
+	for (i = 0; i < port->lanes; i++) {
+		err = phy_power_on(port->phys[i]);
+		if (err < 0) {
+			dev_err(dev, "failed to power on PHY#%u: %d\n", i,
+				err);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
+{
+	struct device *dev = port->pcie->dev;
+	unsigned int i;
+	int err;
+
+	for (i = 0; i < port->lanes; i++) {
+		err = phy_power_off(port->phys[i]);
+		if (err < 0) {
+			dev_err(dev, "failed to power off PHY#%u: %d\n", i,
+				err);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
+{
+	struct tegra_pcie_port *port;
+	int err;
+
+	if (pcie->legacy_phy) {
+		if (pcie->phy)
+			err = phy_power_on(pcie->phy);
+		else
+			err = tegra_pcie_phy_enable(pcie);
+
+		if (err < 0)
+			dev_err(pcie->dev, "failed to power on PHY: %d\n", err);
+
+		return err;
+	}
+
+	list_for_each_entry(port, &pcie->ports, list) {
+		err = tegra_pcie_port_phy_power_on(port);
+		if (err < 0) {
+			dev_err(pcie->dev,
+				"failed to power on PCIe port %u PHY: %d\n",
+				port->index, err);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
+{
+	struct tegra_pcie_port *port;
+	int err;
+
+	if (pcie->legacy_phy) {
+		if (pcie->phy)
+			err = phy_power_off(pcie->phy);
+		else
+			err = tegra_pcie_phy_disable(pcie);
+
+		if (err < 0)
+			dev_err(pcie->dev, "failed to power off PHY: %d\n",
+				err);
+
+		return err;
+	}
+
+	list_for_each_entry(port, &pcie->ports, list) {
+		err = tegra_pcie_port_phy_power_off(port);
+		if (err < 0) {
+			dev_err(pcie->dev,
+				"failed to power off PCIe port %u PHY: %d\n",
+				port->index, err);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
 static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
 {
 	const struct tegra_pcie_soc_data *soc = pcie->soc_data;
@@ -899,13 +1025,9 @@
 		afi_writel(pcie, value, AFI_FUSE);
 	}
 
-	if (!pcie->phy)
-		err = tegra_pcie_phy_enable(pcie);
-	else
-		err = phy_power_on(pcie->phy);
-
+	err = tegra_pcie_phy_power_on(pcie);
 	if (err < 0) {
-		dev_err(pcie->dev, "failed to power on PHY: %d\n", err);
+		dev_err(pcie->dev, "failed to power on PHY(s): %d\n", err);
 		return err;
 	}
 
@@ -942,9 +1064,9 @@
 
 	/* TODO: disable and unprepare clocks? */
 
-	err = phy_power_off(pcie->phy);
+	err = tegra_pcie_phy_power_off(pcie);
 	if (err < 0)
-		dev_warn(pcie->dev, "failed to power off PHY: %d\n", err);
+		dev_err(pcie->dev, "failed to power off PHY(s): %d\n", err);
 
 	reset_control_assert(pcie->pcie_xrst);
 	reset_control_assert(pcie->afi_rst);
@@ -1049,6 +1171,100 @@
 	return 0;
 }
 
+static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
+{
+	int err;
+
+	pcie->phy = devm_phy_optional_get(pcie->dev, "pcie");
+	if (IS_ERR(pcie->phy)) {
+		err = PTR_ERR(pcie->phy);
+		dev_err(pcie->dev, "failed to get PHY: %d\n", err);
+		return err;
+	}
+
+	err = phy_init(pcie->phy);
+	if (err < 0) {
+		dev_err(pcie->dev, "failed to initialize PHY: %d\n", err);
+		return err;
+	}
+
+	pcie->legacy_phy = true;
+
+	return 0;
+}
+
+static struct phy *devm_of_phy_optional_get_index(struct device *dev,
+						  struct device_node *np,
+						  const char *consumer,
+						  unsigned int index)
+{
+	struct phy *phy;
+	char *name;
+
+	name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
+	if (!name)
+		return ERR_PTR(-ENOMEM);
+
+	phy = devm_of_phy_get(dev, np, name);
+	kfree(name);
+
+	if (IS_ERR(phy) && PTR_ERR(phy) == -ENODEV)
+		phy = NULL;
+
+	return phy;
+}
+
+static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
+{
+	struct device *dev = port->pcie->dev;
+	struct phy *phy;
+	unsigned int i;
+	int err;
+
+	port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
+	if (!port->phys)
+		return -ENOMEM;
+
+	for (i = 0; i < port->lanes; i++) {
+		phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
+		if (IS_ERR(phy)) {
+			dev_err(dev, "failed to get PHY#%u: %ld\n", i,
+				PTR_ERR(phy));
+			return PTR_ERR(phy);
+		}
+
+		err = phy_init(phy);
+		if (err < 0) {
+			dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
+				err);
+			return err;
+		}
+
+		port->phys[i] = phy;
+	}
+
+	return 0;
+}
+
+static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
+{
+	const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+	struct device_node *np = pcie->dev->of_node;
+	struct tegra_pcie_port *port;
+	int err;
+
+	if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL)
+		return tegra_pcie_phys_get_legacy(pcie);
+
+	list_for_each_entry(port, &pcie->ports, list) {
+		err = tegra_pcie_port_get_phys(port);
+		if (err < 0)
+			return err;
+	}
+
+	return 0;
+}
+
 static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
 {
 	struct platform_device *pdev = to_platform_device(pcie->dev);
@@ -1067,16 +1283,9 @@
 		return err;
 	}
 
-	pcie->phy = devm_phy_optional_get(pcie->dev, "pcie");
-	if (IS_ERR(pcie->phy)) {
-		err = PTR_ERR(pcie->phy);
-		dev_err(&pdev->dev, "failed to get PHY: %d\n", err);
-		return err;
-	}
-
-	err = phy_init(pcie->phy);
+	err = tegra_pcie_phys_get(pcie);
 	if (err < 0) {
-		dev_err(&pdev->dev, "failed to initialize PHY: %d\n", err);
+		dev_err(&pdev->dev, "failed to get PHYs: %d\n", err);
 		return err;
 	}
 
@@ -1752,6 +1961,7 @@
 		rp->index = index;
 		rp->lanes = value;
 		rp->pcie = pcie;
+		rp->np = port;
 
 		rp->base = devm_ioremap_resource(pcie->dev, &rp->regs);
 		if (IS_ERR(rp->base))
diff --git a/drivers/pci/host/pci-thunder-ecam.c b/drivers/pci/host/pci-thunder-ecam.c
index d71935cb..540d030 100644
--- a/drivers/pci/host/pci-thunder-ecam.c
+++ b/drivers/pci/host/pci-thunder-ecam.c
@@ -13,18 +13,7 @@
 #include <linux/of.h>
 #include <linux/platform_device.h>
 
-#include "pci-host-common.h"
-
-/* Mapping is standard ECAM */
-static void __iomem *thunder_ecam_map_bus(struct pci_bus *bus,
-					  unsigned int devfn,
-					  int where)
-{
-	struct gen_pci *pci = bus->sysdata;
-	resource_size_t idx = bus->number - pci->cfg.bus_range->start;
-
-	return pci->cfg.win[idx] + ((devfn << 12) | where);
-}
+#include "../ecam.h"
 
 static void set_val(u32 v, int where, int size, u32 *val)
 {
@@ -99,7 +88,7 @@
 static int thunder_ecam_p2_config_read(struct pci_bus *bus, unsigned int devfn,
 				       int where, int size, u32 *val)
 {
-	struct gen_pci *pci = bus->sysdata;
+	struct pci_config_window *cfg = bus->sysdata;
 	int where_a = where & ~3;
 	void __iomem *addr;
 	u32 node_bits;
@@ -129,7 +118,7 @@
 	 * the config space access window.  Since we are working with
 	 * the high-order 32 bits, shift everything down by 32 bits.
 	 */
-	node_bits = (pci->cfg.res.start >> 32) & (1 << 12);
+	node_bits = (cfg->res.start >> 32) & (1 << 12);
 
 	v |= node_bits;
 	set_val(v, where, size, val);
@@ -358,36 +347,24 @@
 	return pci_generic_config_write(bus, devfn, where, size, val);
 }
 
-static struct gen_pci_cfg_bus_ops thunder_ecam_bus_ops = {
+static struct pci_ecam_ops pci_thunder_ecam_ops = {
 	.bus_shift	= 20,
-	.ops		= {
-		.map_bus        = thunder_ecam_map_bus,
+	.pci_ops	= {
+		.map_bus        = pci_ecam_map_bus,
 		.read           = thunder_ecam_config_read,
 		.write          = thunder_ecam_config_write,
 	}
 };
 
 static const struct of_device_id thunder_ecam_of_match[] = {
-	{ .compatible = "cavium,pci-host-thunder-ecam",
-	  .data = &thunder_ecam_bus_ops },
-
+	{ .compatible = "cavium,pci-host-thunder-ecam" },
 	{ },
 };
 MODULE_DEVICE_TABLE(of, thunder_ecam_of_match);
 
 static int thunder_ecam_probe(struct platform_device *pdev)
 {
-	struct device *dev = &pdev->dev;
-	const struct of_device_id *of_id;
-	struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
-
-	if (!pci)
-		return -ENOMEM;
-
-	of_id = of_match_node(thunder_ecam_of_match, dev->of_node);
-	pci->cfg.ops = (struct gen_pci_cfg_bus_ops *)of_id->data;
-
-	return pci_host_common_probe(pdev, pci);
+	return pci_host_common_probe(pdev, &pci_thunder_ecam_ops);
 }
 
 static struct platform_driver thunder_ecam_driver = {
diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c
index cabb92a..9b8ab94 100644
--- a/drivers/pci/host/pci-thunder-pem.c
+++ b/drivers/pci/host/pci-thunder-pem.c
@@ -20,34 +20,22 @@
 #include <linux/of_pci.h>
 #include <linux/platform_device.h>
 
-#include "pci-host-common.h"
+#include "../ecam.h"
 
 #define PEM_CFG_WR 0x28
 #define PEM_CFG_RD 0x30
 
 struct thunder_pem_pci {
-	struct gen_pci	gen_pci;
 	u32		ea_entry[3];
 	void __iomem	*pem_reg_base;
 };
 
-static void __iomem *thunder_pem_map_bus(struct pci_bus *bus,
-					 unsigned int devfn, int where)
-{
-	struct gen_pci *pci = bus->sysdata;
-	resource_size_t idx = bus->number - pci->cfg.bus_range->start;
-
-	return pci->cfg.win[idx] + ((devfn << 16) | where);
-}
-
 static int thunder_pem_bridge_read(struct pci_bus *bus, unsigned int devfn,
 				   int where, int size, u32 *val)
 {
 	u64 read_val;
-	struct thunder_pem_pci *pem_pci;
-	struct gen_pci *pci = bus->sysdata;
-
-	pem_pci = container_of(pci, struct thunder_pem_pci, gen_pci);
+	struct pci_config_window *cfg = bus->sysdata;
+	struct thunder_pem_pci *pem_pci = (struct thunder_pem_pci *)cfg->priv;
 
 	if (devfn != 0 || where >= 2048) {
 		*val = ~0;
@@ -132,17 +120,17 @@
 static int thunder_pem_config_read(struct pci_bus *bus, unsigned int devfn,
 				   int where, int size, u32 *val)
 {
-	struct gen_pci *pci = bus->sysdata;
+	struct pci_config_window *cfg = bus->sysdata;
 
-	if (bus->number < pci->cfg.bus_range->start ||
-	    bus->number > pci->cfg.bus_range->end)
+	if (bus->number < cfg->busr.start ||
+	    bus->number > cfg->busr.end)
 		return PCIBIOS_DEVICE_NOT_FOUND;
 
 	/*
 	 * The first device on the bus is the PEM PCIe bridge.
 	 * Special case its config access.
 	 */
-	if (bus->number == pci->cfg.bus_range->start)
+	if (bus->number == cfg->busr.start)
 		return thunder_pem_bridge_read(bus, devfn, where, size, val);
 
 	return pci_generic_config_read(bus, devfn, where, size, val);
@@ -153,11 +141,11 @@
  * reserved bits, this makes the code simpler and is OK as the bits
  * are not affected by writing zeros to them.
  */
-static u32 thunder_pem_bridge_w1c_bits(int where)
+static u32 thunder_pem_bridge_w1c_bits(u64 where_aligned)
 {
 	u32 w1c_bits = 0;
 
-	switch (where & ~3) {
+	switch (where_aligned) {
 	case 0x04: /* Command/Status */
 	case 0x1c: /* Base and I/O Limit/Secondary Status */
 		w1c_bits = 0xff000000;
@@ -184,15 +172,36 @@
 	return w1c_bits;
 }
 
+/* Some bits must be written to one so they appear to be read-only. */
+static u32 thunder_pem_bridge_w1_bits(u64 where_aligned)
+{
+	u32 w1_bits;
+
+	switch (where_aligned) {
+	case 0x1c: /* I/O Base / I/O Limit, Secondary Status */
+		/* Force 32-bit I/O addressing. */
+		w1_bits = 0x0101;
+		break;
+	case 0x24: /* Prefetchable Memory Base / Prefetchable Memory Limit */
+		/* Force 64-bit addressing */
+		w1_bits = 0x00010001;
+		break;
+	default:
+		w1_bits = 0;
+		break;
+	}
+	return w1_bits;
+}
+
 static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn,
 				    int where, int size, u32 val)
 {
-	struct gen_pci *pci = bus->sysdata;
-	struct thunder_pem_pci *pem_pci;
+	struct pci_config_window *cfg = bus->sysdata;
+	struct thunder_pem_pci *pem_pci = (struct thunder_pem_pci *)cfg->priv;
 	u64 write_val, read_val;
+	u64 where_aligned = where & ~3ull;
 	u32 mask = 0;
 
-	pem_pci = container_of(pci, struct thunder_pem_pci, gen_pci);
 
 	if (devfn != 0 || where >= 2048)
 		return PCIBIOS_DEVICE_NOT_FOUND;
@@ -205,8 +214,7 @@
 	 */
 	switch (size) {
 	case 1:
-		read_val = where & ~3ull;
-		writeq(read_val, pem_pci->pem_reg_base + PEM_CFG_RD);
+		writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD);
 		read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
 		read_val >>= 32;
 		mask = ~(0xff << (8 * (where & 3)));
@@ -215,8 +223,7 @@
 		val |= (u32)read_val;
 		break;
 	case 2:
-		read_val = where & ~3ull;
-		writeq(read_val, pem_pci->pem_reg_base + PEM_CFG_RD);
+		writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD);
 		read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
 		read_val >>= 32;
 		mask = ~(0xffff << (8 * (where & 3)));
@@ -244,11 +251,17 @@
 	}
 
 	/*
+	 * Some bits must be read-only with value of one.  Since the
+	 * access method allows these to be cleared if a zero is
+	 * written, force them to one before writing.
+	 */
+	val |= thunder_pem_bridge_w1_bits(where_aligned);
+
+	/*
 	 * Low order bits are the config address, the high order 32
 	 * bits are the data to be written.
 	 */
-	write_val = where & ~3ull;
-	write_val |= (((u64)val) << 32);
+	write_val = (((u64)val) << 32) | where_aligned;
 	writeq(write_val, pem_pci->pem_reg_base + PEM_CFG_WR);
 	return PCIBIOS_SUCCESSFUL;
 }
@@ -256,53 +269,38 @@
 static int thunder_pem_config_write(struct pci_bus *bus, unsigned int devfn,
 				    int where, int size, u32 val)
 {
-	struct gen_pci *pci = bus->sysdata;
+	struct pci_config_window *cfg = bus->sysdata;
 
-	if (bus->number < pci->cfg.bus_range->start ||
-	    bus->number > pci->cfg.bus_range->end)
+	if (bus->number < cfg->busr.start ||
+	    bus->number > cfg->busr.end)
 		return PCIBIOS_DEVICE_NOT_FOUND;
 	/*
 	 * The first device on the bus is the PEM PCIe bridge.
 	 * Special case its config access.
 	 */
-	if (bus->number == pci->cfg.bus_range->start)
+	if (bus->number == cfg->busr.start)
 		return thunder_pem_bridge_write(bus, devfn, where, size, val);
 
 
 	return pci_generic_config_write(bus, devfn, where, size, val);
 }
 
-static struct gen_pci_cfg_bus_ops thunder_pem_bus_ops = {
-	.bus_shift	= 24,
-	.ops		= {
-		.map_bus	= thunder_pem_map_bus,
-		.read		= thunder_pem_config_read,
-		.write		= thunder_pem_config_write,
-	}
-};
-
-static const struct of_device_id thunder_pem_of_match[] = {
-	{ .compatible = "cavium,pci-host-thunder-pem",
-	  .data = &thunder_pem_bus_ops },
-
-	{ },
-};
-MODULE_DEVICE_TABLE(of, thunder_pem_of_match);
-
-static int thunder_pem_probe(struct platform_device *pdev)
+static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg)
 {
-	struct device *dev = &pdev->dev;
-	const struct of_device_id *of_id;
 	resource_size_t bar4_start;
 	struct resource *res_pem;
 	struct thunder_pem_pci *pem_pci;
+	struct platform_device *pdev;
+
+	/* Only OF support for now */
+	if (!dev->of_node)
+		return -EINVAL;
 
 	pem_pci = devm_kzalloc(dev, sizeof(*pem_pci), GFP_KERNEL);
 	if (!pem_pci)
 		return -ENOMEM;
 
-	of_id = of_match_node(thunder_pem_of_match, dev->of_node);
-	pem_pci->gen_pci.cfg.ops = (struct gen_pci_cfg_bus_ops *)of_id->data;
+	pdev = to_platform_device(dev);
 
 	/*
 	 * The second register range is the PEM bridge to the PCIe
@@ -330,7 +328,29 @@
 	pem_pci->ea_entry[1] = (u32)(res_pem->end - bar4_start) & ~3u;
 	pem_pci->ea_entry[2] = (u32)(bar4_start >> 32);
 
-	return pci_host_common_probe(pdev, &pem_pci->gen_pci);
+	cfg->priv = pem_pci;
+	return 0;
+}
+
+static struct pci_ecam_ops pci_thunder_pem_ops = {
+	.bus_shift	= 24,
+	.init		= thunder_pem_init,
+	.pci_ops	= {
+		.map_bus	= pci_ecam_map_bus,
+		.read		= thunder_pem_config_read,
+		.write		= thunder_pem_config_write,
+	}
+};
+
+static const struct of_device_id thunder_pem_of_match[] = {
+	{ .compatible = "cavium,pci-host-thunder-pem" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, thunder_pem_of_match);
+
+static int thunder_pem_probe(struct platform_device *pdev)
+{
+	return pci_host_common_probe(pdev, &pci_thunder_pem_ops);
 }
 
 static struct platform_driver thunder_pem_driver = {
diff --git a/drivers/pci/host/pcie-armada8k.c b/drivers/pci/host/pcie-armada8k.c
new file mode 100644
index 0000000..5572356
--- /dev/null
+++ b/drivers/pci/host/pcie-armada8k.c
@@ -0,0 +1,262 @@
+/*
+ * PCIe host controller driver for Marvell Armada-8K SoCs
+ *
+ * Armada-8K PCIe Glue Layer Source Code
+ *
+ * Copyright (C) 2016 Marvell Technology Group Ltd.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/of_pci.h>
+#include <linux/of_irq.h>
+
+#include "pcie-designware.h"
+
+struct armada8k_pcie {
+	void __iomem *base;
+	struct clk *clk;
+	struct pcie_port pp;
+};
+
+#define PCIE_VENDOR_REGS_OFFSET		0x8000
+
+#define PCIE_GLOBAL_CONTROL_REG		0x0
+#define PCIE_APP_LTSSM_EN		BIT(2)
+#define PCIE_DEVICE_TYPE_SHIFT		4
+#define PCIE_DEVICE_TYPE_MASK		0xF
+#define PCIE_DEVICE_TYPE_RC		0x4 /* Root complex */
+
+#define PCIE_GLOBAL_STATUS_REG		0x8
+#define PCIE_GLB_STS_RDLH_LINK_UP	BIT(1)
+#define PCIE_GLB_STS_PHY_LINK_UP	BIT(9)
+
+#define PCIE_GLOBAL_INT_CAUSE1_REG	0x1C
+#define PCIE_GLOBAL_INT_MASK1_REG	0x20
+#define PCIE_INT_A_ASSERT_MASK		BIT(9)
+#define PCIE_INT_B_ASSERT_MASK		BIT(10)
+#define PCIE_INT_C_ASSERT_MASK		BIT(11)
+#define PCIE_INT_D_ASSERT_MASK		BIT(12)
+
+#define PCIE_ARCACHE_TRC_REG		0x50
+#define PCIE_AWCACHE_TRC_REG		0x54
+#define PCIE_ARUSER_REG			0x5C
+#define PCIE_AWUSER_REG			0x60
+/*
+ * AR/AW Cache defauls: Normal memory, Write-Back, Read / Write
+ * allocate
+ */
+#define ARCACHE_DEFAULT_VALUE		0x3511
+#define AWCACHE_DEFAULT_VALUE		0x5311
+
+#define DOMAIN_OUTER_SHAREABLE		0x2
+#define AX_USER_DOMAIN_MASK		0x3
+#define AX_USER_DOMAIN_SHIFT		4
+
+#define to_armada8k_pcie(x)	container_of(x, struct armada8k_pcie, pp)
+
+static int armada8k_pcie_link_up(struct pcie_port *pp)
+{
+	struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
+	u32 reg;
+	u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP;
+
+	reg = readl(pcie->base + PCIE_GLOBAL_STATUS_REG);
+
+	if ((reg & mask) == mask)
+		return 1;
+
+	dev_dbg(pp->dev, "No link detected (Global-Status: 0x%08x).\n", reg);
+	return 0;
+}
+
+static void armada8k_pcie_establish_link(struct pcie_port *pp)
+{
+	struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
+	void __iomem *base = pcie->base;
+	u32 reg;
+
+	if (!dw_pcie_link_up(pp)) {
+		/* Disable LTSSM state machine to enable configuration */
+		reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
+		reg &= ~(PCIE_APP_LTSSM_EN);
+		writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
+	}
+
+	/* Set the device to root complex mode */
+	reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
+	reg &= ~(PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_SHIFT);
+	reg |= PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_SHIFT;
+	writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
+
+	/* Set the PCIe master AxCache attributes */
+	writel(ARCACHE_DEFAULT_VALUE, base + PCIE_ARCACHE_TRC_REG);
+	writel(AWCACHE_DEFAULT_VALUE, base + PCIE_AWCACHE_TRC_REG);
+
+	/* Set the PCIe master AxDomain attributes */
+	reg = readl(base + PCIE_ARUSER_REG);
+	reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
+	reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
+	writel(reg, base + PCIE_ARUSER_REG);
+
+	reg = readl(base + PCIE_AWUSER_REG);
+	reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
+	reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
+	writel(reg, base + PCIE_AWUSER_REG);
+
+	/* Enable INT A-D interrupts */
+	reg = readl(base + PCIE_GLOBAL_INT_MASK1_REG);
+	reg |= PCIE_INT_A_ASSERT_MASK | PCIE_INT_B_ASSERT_MASK |
+	       PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK;
+	writel(reg, base + PCIE_GLOBAL_INT_MASK1_REG);
+
+	if (!dw_pcie_link_up(pp)) {
+		/* Configuration done. Start LTSSM */
+		reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
+		reg |= PCIE_APP_LTSSM_EN;
+		writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
+	}
+
+	/* Wait until the link becomes active again */
+	if (dw_pcie_wait_for_link(pp))
+		dev_err(pp->dev, "Link not up after reconfiguration\n");
+}
+
+static void armada8k_pcie_host_init(struct pcie_port *pp)
+{
+	dw_pcie_setup_rc(pp);
+	armada8k_pcie_establish_link(pp);
+}
+
+static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
+{
+	struct pcie_port *pp = arg;
+	struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
+	void __iomem *base = pcie->base;
+	u32 val;
+
+	/*
+	 * Interrupts are directly handled by the device driver of the
+	 * PCI device. However, they are also latched into the PCIe
+	 * controller, so we simply discard them.
+	 */
+	val = readl(base + PCIE_GLOBAL_INT_CAUSE1_REG);
+	writel(val, base + PCIE_GLOBAL_INT_CAUSE1_REG);
+
+	return IRQ_HANDLED;
+}
+
+static struct pcie_host_ops armada8k_pcie_host_ops = {
+	.link_up = armada8k_pcie_link_up,
+	.host_init = armada8k_pcie_host_init,
+};
+
+static int armada8k_add_pcie_port(struct pcie_port *pp,
+				  struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	pp->root_bus_nr = -1;
+	pp->ops = &armada8k_pcie_host_ops;
+
+	pp->irq = platform_get_irq(pdev, 0);
+	if (!pp->irq) {
+		dev_err(dev, "failed to get irq for port\n");
+		return -ENODEV;
+	}
+
+	ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler,
+			       IRQF_SHARED, "armada8k-pcie", pp);
+	if (ret) {
+		dev_err(dev, "failed to request irq %d\n", pp->irq);
+		return ret;
+	}
+
+	ret = dw_pcie_host_init(pp);
+	if (ret) {
+		dev_err(dev, "failed to initialize host: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int armada8k_pcie_probe(struct platform_device *pdev)
+{
+	struct armada8k_pcie *pcie;
+	struct pcie_port *pp;
+	struct device *dev = &pdev->dev;
+	struct resource *base;
+	int ret;
+
+	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+	if (!pcie)
+		return -ENOMEM;
+
+	pcie->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(pcie->clk))
+		return PTR_ERR(pcie->clk);
+
+	clk_prepare_enable(pcie->clk);
+
+	pp = &pcie->pp;
+	pp->dev = dev;
+	platform_set_drvdata(pdev, pcie);
+
+	/* Get the dw-pcie unit configuration/control registers base. */
+	base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
+	pp->dbi_base = devm_ioremap_resource(dev, base);
+	if (IS_ERR(pp->dbi_base)) {
+		dev_err(dev, "couldn't remap regs base %p\n", base);
+		ret = PTR_ERR(pp->dbi_base);
+		goto fail;
+	}
+
+	pcie->base = pp->dbi_base + PCIE_VENDOR_REGS_OFFSET;
+
+	ret = armada8k_add_pcie_port(pp, pdev);
+	if (ret)
+		goto fail;
+
+	return 0;
+
+fail:
+	if (!IS_ERR(pcie->clk))
+		clk_disable_unprepare(pcie->clk);
+
+	return ret;
+}
+
+static const struct of_device_id armada8k_pcie_of_match[] = {
+	{ .compatible = "marvell,armada8k-pcie", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, armada8k_pcie_of_match);
+
+static struct platform_driver armada8k_pcie_driver = {
+	.probe		= armada8k_pcie_probe,
+	.driver = {
+		.name	= "armada8k-pcie",
+		.of_match_table = of_match_ptr(armada8k_pcie_of_match),
+	},
+};
+
+module_platform_driver(armada8k_pcie_driver);
+
+MODULE_DESCRIPTION("Armada 8k PCIe host controller driver");
+MODULE_AUTHOR("Yehuda Yitshak <yehuday@marvell.com>");
+MODULE_AUTHOR("Shadi Ammouri <shadi@marvell.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index a4cccd3..aafd766 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -434,7 +434,6 @@
 	struct platform_device *pdev = to_platform_device(pp->dev);
 	struct pci_bus *bus, *child;
 	struct resource *cfg_res;
-	u32 val;
 	int i, ret;
 	LIST_HEAD(res);
 	struct resource_entry *win;
@@ -544,25 +543,6 @@
 	if (pp->ops->host_init)
 		pp->ops->host_init(pp);
 
-	/*
-	 * If the platform provides ->rd_other_conf, it means the platform
-	 * uses its own address translation component rather than ATU, so
-	 * we should not program the ATU here.
-	 */
-	if (!pp->ops->rd_other_conf)
-		dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
-					  PCIE_ATU_TYPE_MEM, pp->mem_base,
-					  pp->mem_bus_addr, pp->mem_size);
-
-	dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
-
-	/* program correct class for RC */
-	dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
-
-	dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
-	val |= PORT_LOGIC_SPEED_CHANGE;
-	dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
-
 	pp->root_bus_nr = pp->busn->start;
 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
 		bus = pci_scan_root_bus_msi(pp->dev, pp->root_bus_nr,
@@ -728,8 +708,6 @@
 void dw_pcie_setup_rc(struct pcie_port *pp)
 {
 	u32 val;
-	u32 membase;
-	u32 memlimit;
 
 	/* set the number of lanes */
 	dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL, &val);
@@ -788,18 +766,31 @@
 	val |= 0x00010100;
 	dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS);
 
-	/* setup memory base, memory limit */
-	membase = ((u32)pp->mem_base & 0xfff00000) >> 16;
-	memlimit = (pp->mem_size + (u32)pp->mem_base) & 0xfff00000;
-	val = memlimit | membase;
-	dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE);
-
 	/* setup command register */
 	dw_pcie_readl_rc(pp, PCI_COMMAND, &val);
 	val &= 0xffff0000;
 	val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
 		PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
 	dw_pcie_writel_rc(pp, val, PCI_COMMAND);
+
+	/*
+	 * If the platform provides ->rd_other_conf, it means the platform
+	 * uses its own address translation component rather than ATU, so
+	 * we should not program the ATU here.
+	 */
+	if (!pp->ops->rd_other_conf)
+		dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
+					  PCIE_ATU_TYPE_MEM, pp->mem_base,
+					  pp->mem_bus_addr, pp->mem_size);
+
+	dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
+
+	/* program correct class for RC */
+	dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
+
+	dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
+	val |= PORT_LOGIC_SPEED_CHANGE;
+	dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
 }
 
 MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index 5139e64..3479d30 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -819,7 +819,7 @@
 
 	err = nwl_pcie_bridge_init(pcie);
 	if (err) {
-		dev_err(pcie->dev, "HW Initalization failed\n");
+		dev_err(pcie->dev, "HW Initialization failed\n");
 		return err;
 	}
 
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 2f6d3a1..f6221d7 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -138,6 +138,8 @@
 	char *table;
 
 	size = ibm_get_table_from_acpi(&table);
+	if (size < 0)
+		return NULL;
 	des = (union apci_descriptor *)table;
 	if (memcmp(des->header.sig, "aPCI", 4) != 0)
 		goto ibm_slot_done;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index e982010..d319a9c 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -636,7 +636,7 @@
 	u8 *data = (u8 *) buf;
 
 	/* Several chips lock up trying to read undefined config space */
-	if (security_capable(filp->f_cred, &init_user_ns, CAP_SYS_ADMIN) == 0)
+	if (file_ns_capable(filp, &init_user_ns, CAP_SYS_ADMIN))
 		size = dev->cfg_size;
 	else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
 		size = 128;
@@ -1008,6 +1008,9 @@
 	if (i >= PCI_ROM_RESOURCE)
 		return -ENODEV;
 
+	if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start))
+		return -EINVAL;
+
 	if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) {
 		WARN(1, "process \"%s\" tried to map 0x%08lx bytes at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n",
 			current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff,
@@ -1024,10 +1027,6 @@
 	pci_resource_to_user(pdev, i, res, &start, &end);
 	vma->vm_pgoff += start >> PAGE_SHIFT;
 	mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
-
-	if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(start))
-		return -EINVAL;
-
 	return pci_mmap_page_range(pdev, vma, mmap_type, write_combine);
 }
 
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 25e0327..c8b4dbd 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2228,7 +2228,7 @@
 
 static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
 {
-	unsigned long flags = IORESOURCE_PCI_FIXED;
+	unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
 
 	switch (prop) {
 	case PCI_EA_P_MEM:
@@ -2389,7 +2389,7 @@
 	return offset + ent_size;
 }
 
-/* Enhanced Allocation Initalization */
+/* Enhanced Allocation Initialization */
 void pci_ea_init(struct pci_dev *dev)
 {
 	int ea;
@@ -2547,7 +2547,7 @@
  * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites
  * @dev: the PCI device
  */
-static int pci_std_enable_acs(struct pci_dev *dev)
+static void pci_std_enable_acs(struct pci_dev *dev)
 {
 	int pos;
 	u16 cap;
@@ -2555,7 +2555,7 @@
 
 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
 	if (!pos)
-		return -ENODEV;
+		return;
 
 	pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
 	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
@@ -2573,8 +2573,6 @@
 	ctrl |= (cap & PCI_ACS_UF);
 
 	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
-
-	return 0;
 }
 
 /**
@@ -2586,10 +2584,10 @@
 	if (!pci_acs_enable)
 		return;
 
-	if (!pci_std_enable_acs(dev))
+	if (!pci_dev_specific_enable_acs(dev))
 		return;
 
-	pci_dev_specific_enable_acs(dev);
+	pci_std_enable_acs(dev);
 }
 
 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
@@ -3021,6 +3019,121 @@
 }
 EXPORT_SYMBOL(pci_request_regions_exclusive);
 
+#ifdef PCI_IOBASE
+struct io_range {
+	struct list_head list;
+	phys_addr_t start;
+	resource_size_t size;
+};
+
+static LIST_HEAD(io_range_list);
+static DEFINE_SPINLOCK(io_range_lock);
+#endif
+
+/*
+ * Record the PCI IO range (expressed as CPU physical address + size).
+ * Return a negative value if an error has occured, zero otherwise
+ */
+int __weak pci_register_io_range(phys_addr_t addr, resource_size_t size)
+{
+	int err = 0;
+
+#ifdef PCI_IOBASE
+	struct io_range *range;
+	resource_size_t allocated_size = 0;
+
+	/* check if the range hasn't been previously recorded */
+	spin_lock(&io_range_lock);
+	list_for_each_entry(range, &io_range_list, list) {
+		if (addr >= range->start && addr + size <= range->start + size) {
+			/* range already registered, bail out */
+			goto end_register;
+		}
+		allocated_size += range->size;
+	}
+
+	/* range not registed yet, check for available space */
+	if (allocated_size + size - 1 > IO_SPACE_LIMIT) {
+		/* if it's too big check if 64K space can be reserved */
+		if (allocated_size + SZ_64K - 1 > IO_SPACE_LIMIT) {
+			err = -E2BIG;
+			goto end_register;
+		}
+
+		size = SZ_64K;
+		pr_warn("Requested IO range too big, new size set to 64K\n");
+	}
+
+	/* add the range to the list */
+	range = kzalloc(sizeof(*range), GFP_ATOMIC);
+	if (!range) {
+		err = -ENOMEM;
+		goto end_register;
+	}
+
+	range->start = addr;
+	range->size = size;
+
+	list_add_tail(&range->list, &io_range_list);
+
+end_register:
+	spin_unlock(&io_range_lock);
+#endif
+
+	return err;
+}
+
+phys_addr_t pci_pio_to_address(unsigned long pio)
+{
+	phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
+
+#ifdef PCI_IOBASE
+	struct io_range *range;
+	resource_size_t allocated_size = 0;
+
+	if (pio > IO_SPACE_LIMIT)
+		return address;
+
+	spin_lock(&io_range_lock);
+	list_for_each_entry(range, &io_range_list, list) {
+		if (pio >= allocated_size && pio < allocated_size + range->size) {
+			address = range->start + pio - allocated_size;
+			break;
+		}
+		allocated_size += range->size;
+	}
+	spin_unlock(&io_range_lock);
+#endif
+
+	return address;
+}
+
+unsigned long __weak pci_address_to_pio(phys_addr_t address)
+{
+#ifdef PCI_IOBASE
+	struct io_range *res;
+	resource_size_t offset = 0;
+	unsigned long addr = -1;
+
+	spin_lock(&io_range_lock);
+	list_for_each_entry(res, &io_range_list, list) {
+		if (address >= res->start && address < res->start + res->size) {
+			addr = address - res->start + offset;
+			break;
+		}
+		offset += res->size;
+	}
+	spin_unlock(&io_range_lock);
+
+	return addr;
+#else
+	if (address > IO_SPACE_LIMIT)
+		return (unsigned long)-1;
+
+	return (unsigned long) address;
+#endif
+}
+
 /**
  *	pci_remap_iospace - Remap the memory mapped I/O space
  *	@res: Resource describing the I/O space
@@ -4578,6 +4691,37 @@
 	return 0;
 }
 
+/**
+ * pci_add_dma_alias - Add a DMA devfn alias for a device
+ * @dev: the PCI device for which alias is added
+ * @devfn: alias slot and function
+ *
+ * This helper encodes 8-bit devfn as bit number in dma_alias_mask.
+ * It should be called early, preferably as PCI fixup header quirk.
+ */
+void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
+{
+	if (!dev->dma_alias_mask)
+		dev->dma_alias_mask = kcalloc(BITS_TO_LONGS(U8_MAX),
+					      sizeof(long), GFP_KERNEL);
+	if (!dev->dma_alias_mask) {
+		dev_warn(&dev->dev, "Unable to allocate DMA alias mask\n");
+		return;
+	}
+
+	set_bit(devfn, dev->dma_alias_mask);
+	dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n",
+		 PCI_SLOT(devfn), PCI_FUNC(devfn));
+}
+
+bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
+{
+	return (dev1->dma_alias_mask &&
+		test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
+	       (dev2->dma_alias_mask &&
+		test_bit(dev1->devfn, dev2->dma_alias_mask));
+}
+
 bool pci_device_is_present(struct pci_dev *pdev)
 {
 	u32 v;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d0fb934..a814bbb 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -97,6 +97,7 @@
 struct pci_vpd_ops {
 	ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
 	ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
+	int (*set_size)(struct pci_dev *dev, size_t len);
 };
 
 struct pci_vpd {
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 72db7f4..22ca641 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -81,3 +81,17 @@
 config PCIE_PME
 	def_bool y
 	depends on PCIEPORTBUS && PM
+
+config PCIE_DPC
+	tristate "PCIe Downstream Port Containment support"
+	depends on PCIEPORTBUS
+	default n
+	help
+	  This enables PCI Express Downstream Port Containment (DPC)
+	  driver support.  DPC events from Root and Downstream ports
+	  will be handled by the DPC driver.  If your system doesn't
+	  have this capability or you do not want to use this feature,
+	  it is safe to answer N.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called pcie-dpc.
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index 00c62df..b24525b 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -14,3 +14,5 @@
 obj-$(CONFIG_PCIEAER)		+= aer/
 
 obj-$(CONFIG_PCIE_PME) += pme.o
+
+obj-$(CONFIG_PCIE_DPC) += pcie-dpc.o
diff --git a/drivers/pci/pcie/pcie-dpc.c b/drivers/pci/pcie/pcie-dpc.c
new file mode 100644
index 0000000..ab552f1
--- /dev/null
+++ b/drivers/pci/pcie/pcie-dpc.c
@@ -0,0 +1,163 @@
+/*
+ * PCI Express Downstream Port Containment services driver
+ * Copyright (C) 2016 Intel Corp.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pcieport_if.h>
+
+struct dpc_dev {
+	struct pcie_device	*dev;
+	struct work_struct 	work;
+	int 			cap_pos;
+};
+
+static void dpc_wait_link_inactive(struct pci_dev *pdev)
+{
+	unsigned long timeout = jiffies + HZ;
+	u16 lnk_status;
+
+	pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
+	while (lnk_status & PCI_EXP_LNKSTA_DLLLA &&
+					!time_after(jiffies, timeout)) {
+		msleep(10);
+		pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
+	}
+	if (lnk_status & PCI_EXP_LNKSTA_DLLLA)
+		dev_warn(&pdev->dev, "Link state not disabled for DPC event");
+}
+
+static void interrupt_event_handler(struct work_struct *work)
+{
+	struct dpc_dev *dpc = container_of(work, struct dpc_dev, work);
+	struct pci_dev *dev, *temp, *pdev = dpc->dev->port;
+	struct pci_bus *parent = pdev->subordinate;
+
+	pci_lock_rescan_remove();
+	list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
+					 bus_list) {
+		pci_dev_get(dev);
+		pci_stop_and_remove_bus_device(dev);
+		pci_dev_put(dev);
+	}
+	pci_unlock_rescan_remove();
+
+	dpc_wait_link_inactive(pdev);
+	pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS,
+		PCI_EXP_DPC_STATUS_TRIGGER | PCI_EXP_DPC_STATUS_INTERRUPT);
+}
+
+static irqreturn_t dpc_irq(int irq, void *context)
+{
+	struct dpc_dev *dpc = (struct dpc_dev *)context;
+	struct pci_dev *pdev = dpc->dev->port;
+	u16 status, source;
+
+	pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, &status);
+	pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_SOURCE_ID,
+			     &source);
+	if (!status)
+		return IRQ_NONE;
+
+	dev_info(&dpc->dev->device, "DPC containment event, status:%#06x source:%#06x\n",
+		status, source);
+
+	if (status & PCI_EXP_DPC_STATUS_TRIGGER) {
+		u16 reason = (status >> 1) & 0x3;
+
+		dev_warn(&dpc->dev->device, "DPC %s triggered, remove downstream devices\n",
+			 (reason == 0) ? "unmasked uncorrectable error" :
+			 (reason == 1) ? "ERR_NONFATAL" :
+			 (reason == 2) ? "ERR_FATAL" : "extended error");
+		schedule_work(&dpc->work);
+	}
+	return IRQ_HANDLED;
+}
+
+#define FLAG(x, y) (((x) & (y)) ? '+' : '-')
+static int dpc_probe(struct pcie_device *dev)
+{
+	struct dpc_dev *dpc;
+	struct pci_dev *pdev = dev->port;
+	int status;
+	u16 ctl, cap;
+
+	dpc = kzalloc(sizeof(*dpc), GFP_KERNEL);
+	if (!dpc)
+		return -ENOMEM;
+
+	dpc->cap_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DPC);
+	dpc->dev = dev;
+	INIT_WORK(&dpc->work, interrupt_event_handler);
+	set_service_data(dev, dpc);
+
+	status = request_irq(dev->irq, dpc_irq, IRQF_SHARED, "pcie-dpc", dpc);
+	if (status) {
+		dev_warn(&dev->device, "request IRQ%d failed: %d\n", dev->irq,
+			 status);
+		goto out;
+	}
+
+	pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CAP, &cap);
+	pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl);
+
+	ctl |= PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN;
+	pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
+
+	dev_info(&dev->device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
+		cap & 0xf, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT),
+		FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP),
+		FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), (cap >> 8) & 0xf,
+		FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE));
+	return status;
+ out:
+	kfree(dpc);
+	return status;
+}
+
+static void dpc_remove(struct pcie_device *dev)
+{
+	struct dpc_dev *dpc = get_service_data(dev);
+	struct pci_dev *pdev = dev->port;
+	u16 ctl;
+
+	pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl);
+	ctl &= ~(PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN);
+	pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
+
+	free_irq(dev->irq, dpc);
+	kfree(dpc);
+}
+
+static struct pcie_port_service_driver dpcdriver = {
+	.name		= "dpc",
+	.port_type	= PCI_EXP_TYPE_ROOT_PORT | PCI_EXP_TYPE_DOWNSTREAM,
+	.service	= PCIE_PORT_SERVICE_DPC,
+	.probe		= dpc_probe,
+	.remove		= dpc_remove,
+};
+
+static int __init dpc_service_init(void)
+{
+	return pcie_port_service_register(&dpcdriver);
+}
+
+static void __exit dpc_service_exit(void)
+{
+	pcie_port_service_unregister(&dpcdriver);
+}
+
+MODULE_DESCRIPTION("PCI Express Downstream Port Containment driver");
+MODULE_AUTHOR("Keith Busch <keith.busch@intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.1");
+
+module_init(dpc_service_init);
+module_exit(dpc_service_exit);
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index d525548..587aef3 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -11,14 +11,14 @@
 
 #include <linux/compiler.h>
 
-#define PCIE_PORT_DEVICE_MAXSERVICES   4
+#define PCIE_PORT_DEVICE_MAXSERVICES   5
 /*
  * According to the PCI Express Base Specification 2.0, the indices of
  * the MSI-X table entries used by port services must not exceed 31
  */
 #define PCIE_PORT_MAX_MSIX_ENTRIES	32
 
-#define get_descriptor_id(type, service) (((type - 4) << 4) | service)
+#define get_descriptor_id(type, service) (((type - 4) << 8) | service)
 
 extern struct bus_type pcie_port_bus_type;
 int pcie_port_device_register(struct pci_dev *dev);
@@ -67,17 +67,14 @@
 #endif /* !CONFIG_PCIE_PME */
 
 #ifdef CONFIG_ACPI
-int pcie_port_acpi_setup(struct pci_dev *port, int *mask);
+void pcie_port_acpi_setup(struct pci_dev *port, int *mask);
 
-static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
+static inline void pcie_port_platform_notify(struct pci_dev *port, int *mask)
 {
-	return pcie_port_acpi_setup(port, mask);
+	pcie_port_acpi_setup(port, mask);
 }
 #else /* !CONFIG_ACPI */
-static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
-{
-	return 0;
-}
+static inline void pcie_port_platform_notify(struct pci_dev *port, int *mask){}
 #endif /* !CONFIG_ACPI */
 
 #endif /* _PORTDRV_H_ */
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c
index b4d2894..6b8c2f1 100644
--- a/drivers/pci/pcie/portdrv_acpi.c
+++ b/drivers/pci/pcie/portdrv_acpi.c
@@ -32,32 +32,30 @@
  * NOTE: It turns out that we cannot do that for individual port services
  * separately, because that would make some systems work incorrectly.
  */
-int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
+void pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
 {
 	struct acpi_pci_root *root;
 	acpi_handle handle;
 	u32 flags;
 
 	if (acpi_pci_disabled)
-		return 0;
+		return;
 
 	handle = acpi_find_root_bridge_handle(port);
 	if (!handle)
-		return -EINVAL;
+		return;
 
 	root = acpi_pci_find_root(handle);
 	if (!root)
-		return -ENODEV;
+		return;
 
 	flags = root->osc_control_set;
 
-	*srv_mask = PCIE_PORT_SERVICE_VC;
+	*srv_mask = PCIE_PORT_SERVICE_VC | PCIE_PORT_SERVICE_DPC;
 	if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
 		*srv_mask |= PCIE_PORT_SERVICE_HP;
 	if (flags & OSC_PCI_EXPRESS_PME_CONTROL)
 		*srv_mask |= PCIE_PORT_SERVICE_PME;
 	if (flags & OSC_PCI_EXPRESS_AER_CONTROL)
 		*srv_mask |= PCIE_PORT_SERVICE_AER;
-
-	return 0;
 }
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 88122dc..32d4d0a 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -254,38 +254,28 @@
 static int get_port_device_capability(struct pci_dev *dev)
 {
 	int services = 0;
-	u32 reg32;
 	int cap_mask = 0;
-	int err;
 
 	if (pcie_ports_disabled)
 		return 0;
 
 	cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP
-			| PCIE_PORT_SERVICE_VC;
+			| PCIE_PORT_SERVICE_VC | PCIE_PORT_SERVICE_DPC;
 	if (pci_aer_available())
 		cap_mask |= PCIE_PORT_SERVICE_AER;
 
-	if (pcie_ports_auto) {
-		err = pcie_port_platform_notify(dev, &cap_mask);
-		if (err)
-			return 0;
-	}
+	if (pcie_ports_auto)
+		pcie_port_platform_notify(dev, &cap_mask);
 
 	/* Hot-Plug Capable */
-	if ((cap_mask & PCIE_PORT_SERVICE_HP) &&
-	    pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT) {
-		pcie_capability_read_dword(dev, PCI_EXP_SLTCAP, &reg32);
-		if (reg32 & PCI_EXP_SLTCAP_HPC) {
-			services |= PCIE_PORT_SERVICE_HP;
-			/*
-			 * Disable hot-plug interrupts in case they have been
-			 * enabled by the BIOS and the hot-plug service driver
-			 * is not loaded.
-			 */
-			pcie_capability_clear_word(dev, PCI_EXP_SLTCTL,
-				PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
-		}
+	if ((cap_mask & PCIE_PORT_SERVICE_HP) && dev->is_hotplug_bridge) {
+		services |= PCIE_PORT_SERVICE_HP;
+		/*
+		 * Disable hot-plug interrupts in case they have been enabled
+		 * by the BIOS and the hot-plug service driver is not loaded.
+		 */
+		pcie_capability_clear_word(dev, PCI_EXP_SLTCTL,
+			  PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
 	}
 	/* AER capable */
 	if ((cap_mask & PCIE_PORT_SERVICE_AER)
@@ -311,6 +301,8 @@
 		 */
 		pcie_pme_interrupt_enable(dev, false);
 	}
+	if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC))
+		services |= PCIE_PORT_SERVICE_DPC;
 
 	return services;
 }
@@ -338,7 +330,7 @@
 	device = &pcie->device;
 	device->bus = &pcie_port_bus_type;
 	device->release = release_pcie_device;	/* callback to free pcie dev */
-	dev_set_name(device, "%s:pcie%02x",
+	dev_set_name(device, "%s:pcie%03x",
 		     pci_name(pdev),
 		     get_descriptor_id(pci_pcie_type(pdev), service));
 	device->parent = &pdev->dev;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 8004f67..8e3ef72 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -179,9 +179,6 @@
 	u16 orig_cmd;
 	struct pci_bus_region region, inverted_region;
 
-	if (dev->non_compliant_bars)
-		return 0;
-
 	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
 
 	/* No printks while decoding is disabled! */
@@ -322,6 +319,9 @@
 {
 	unsigned int pos, reg;
 
+	if (dev->non_compliant_bars)
+		return;
+
 	for (pos = 0; pos < howmany; pos++) {
 		struct resource *res = &dev->resource[pos];
 		reg = PCI_BASE_ADDRESS_0 + (pos << 2);
@@ -1537,6 +1537,7 @@
 	pcibios_release_device(pci_dev);
 	pci_bus_put(pci_dev->bus);
 	kfree(pci_dev->driver_override);
+	kfree(pci_dev->dma_alias_mask);
 	kfree(pci_dev);
 }
 
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 8e67802..ee72ebe 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3150,6 +3150,39 @@
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
 			 quirk_broken_intx_masking);
 
+/*
+ * Intel i40e (XL710/X710) 10/20/40GbE NICs all have broken INTx masking,
+ * DisINTx can be set but the interrupt status bit is non-functional.
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1572,
+			 quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1574,
+			 quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1580,
+			 quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1581,
+			 quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1583,
+			 quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1584,
+			 quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1585,
+			 quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1586,
+			 quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1587,
+			 quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1588,
+			 quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1589,
+			 quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d0,
+			 quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d1,
+			 quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d2,
+			 quirk_broken_intx_masking);
+
 static void quirk_no_bus_reset(struct pci_dev *dev)
 {
 	dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
@@ -3185,6 +3218,29 @@
 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
 			       PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset);
 
+/*
+ * Thunderbolt controllers with broken MSI hotplug signaling:
+ * Entire 1st generation (Light Ridge, Eagle Ridge, Light Peak) and part
+ * of the 2nd generation (Cactus Ridge 4C up to revision 1, Port Ridge).
+ */
+static void quirk_thunderbolt_hotplug_msi(struct pci_dev *pdev)
+{
+	if (pdev->is_hotplug_bridge &&
+	    (pdev->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C ||
+	     pdev->revision <= 1))
+		pdev->no_msi = 1;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
+			quirk_thunderbolt_hotplug_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EAGLE_RIDGE,
+			quirk_thunderbolt_hotplug_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_PEAK,
+			quirk_thunderbolt_hotplug_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
+			quirk_thunderbolt_hotplug_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
+			quirk_thunderbolt_hotplug_msi);
+
 #ifdef CONFIG_ACPI
 /*
  * Apple: Shutdown Cactus Ridge Thunderbolt controller.
@@ -3232,7 +3288,8 @@
 	acpi_execute_simple_method(SXIO, NULL, 0);
 	acpi_execute_simple_method(SXLV, NULL, 0);
 }
-DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL, 0x1547,
+DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL,
+			       PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
 			       quirk_apple_poweroff_thunderbolt);
 
 /*
@@ -3266,9 +3323,11 @@
 	if (!nhi)
 		goto out;
 	if (nhi->vendor != PCI_VENDOR_ID_INTEL
-			|| (nhi->device != 0x1547 && nhi->device != 0x156c)
-			|| nhi->subsystem_vendor != 0x2222
-			|| nhi->subsystem_device != 0x1111)
+		    || (nhi->device != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
+			nhi->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
+			nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI)
+		    || nhi->subsystem_vendor != 0x2222
+		    || nhi->subsystem_device != 0x1111)
 		goto out;
 	dev_info(&dev->dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n");
 	device_pm_wait_for_dev(&dev->dev, &nhi->dev);
@@ -3276,9 +3335,14 @@
 	pci_dev_put(nhi);
 	pci_dev_put(sibling);
 }
-DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, 0x1547,
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
+			       PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
 			       quirk_apple_wait_for_thunderbolt);
-DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, 0x156d,
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
+			       PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
+			       quirk_apple_wait_for_thunderbolt);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
+			       PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE,
 			       quirk_apple_wait_for_thunderbolt);
 #endif
 
@@ -3610,10 +3674,8 @@
 
 static void quirk_dma_func0_alias(struct pci_dev *dev)
 {
-	if (PCI_FUNC(dev->devfn) != 0) {
-		dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0);
-		dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
-	}
+	if (PCI_FUNC(dev->devfn) != 0)
+		pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
 }
 
 /*
@@ -3626,10 +3688,8 @@
 
 static void quirk_dma_func1_alias(struct pci_dev *dev)
 {
-	if (PCI_FUNC(dev->devfn) != 1) {
-		dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 1);
-		dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
-	}
+	if (PCI_FUNC(dev->devfn) != 1)
+		pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1));
 }
 
 /*
@@ -3695,13 +3755,8 @@
 	const struct pci_device_id *id;
 
 	id = pci_match_id(fixed_dma_alias_tbl, dev);
-	if (id) {
-		dev->dma_alias_devfn = id->driver_data;
-		dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
-		dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n",
-			 PCI_SLOT(dev->dma_alias_devfn),
-			 PCI_FUNC(dev->dma_alias_devfn));
-	}
+	if (id)
+		pci_add_dma_alias(dev, id->driver_data);
 }
 
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
@@ -3734,6 +3789,21 @@
 DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
 
 /*
+ * MIC x200 NTB forwards PCIe traffic using multiple alien RIDs. They have to
+ * be added as aliases to the DMA device in order to allow buffer access
+ * when IOMMU is enabled. Following devfns have to match RIT-LUT table
+ * programmed in the EEPROM.
+ */
+static void quirk_mic_x200_dma_alias(struct pci_dev *pdev)
+{
+	pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0));
+	pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0));
+	pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3));
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias);
+
+/*
  * Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero)
  * class code.  Fix it.
  */
@@ -3936,6 +4006,55 @@
 	return acs_flags & ~flags ? 0 : 1;
 }
 
+/*
+ * Sunrise Point PCH root ports implement ACS, but unfortunately as shown in
+ * the datasheet (Intel 100 Series Chipset Family PCH Datasheet, Vol. 2,
+ * 12.1.46, 12.1.47)[1] this chipset uses dwords for the ACS capability and
+ * control registers whereas the PCIe spec packs them into words (Rev 3.0,
+ * 7.16 ACS Extended Capability).  The bit definitions are correct, but the
+ * control register is at offset 8 instead of 6 and we should probably use
+ * dword accesses to them.  This applies to the following PCI Device IDs, as
+ * found in volume 1 of the datasheet[2]:
+ *
+ * 0xa110-0xa11f Sunrise Point-H PCI Express Root Port #{0-16}
+ * 0xa167-0xa16a Sunrise Point-H PCI Express Root Port #{17-20}
+ *
+ * N.B. This doesn't fix what lspci shows.
+ *
+ * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
+ * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
+ */
+static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
+{
+	return pci_is_pcie(dev) &&
+		pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT &&
+		((dev->device & ~0xf) == 0xa110 ||
+		 (dev->device >= 0xa167 && dev->device <= 0xa16a));
+}
+
+#define INTEL_SPT_ACS_CTRL (PCI_ACS_CAP + 4)
+
+static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags)
+{
+	int pos;
+	u32 cap, ctrl;
+
+	if (!pci_quirk_intel_spt_pch_acs_match(dev))
+		return -ENOTTY;
+
+	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
+	if (!pos)
+		return -ENOTTY;
+
+	/* see pci_acs_flags_enabled() */
+	pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
+	acs_flags &= (cap | PCI_ACS_EC);
+
+	pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
+
+	return acs_flags & ~ctrl ? 0 : 1;
+}
+
 static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
 {
 	/*
@@ -4024,6 +4143,7 @@
 	{ PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
 	/* Intel PCH root ports */
 	{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
+	{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs },
 	{ 0x19a2, 0x710, pci_quirk_mf_endpoint_acs }, /* Emulex BE3-R */
 	{ 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
 	/* Cavium ThunderX */
@@ -4159,16 +4279,44 @@
 	return 0;
 }
 
+static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev)
+{
+	int pos;
+	u32 cap, ctrl;
+
+	if (!pci_quirk_intel_spt_pch_acs_match(dev))
+		return -ENOTTY;
+
+	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
+	if (!pos)
+		return -ENOTTY;
+
+	pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
+	pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
+
+	ctrl |= (cap & PCI_ACS_SV);
+	ctrl |= (cap & PCI_ACS_RR);
+	ctrl |= (cap & PCI_ACS_CR);
+	ctrl |= (cap & PCI_ACS_UF);
+
+	pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
+
+	dev_info(&dev->dev, "Intel SPT PCH root port ACS workaround enabled\n");
+
+	return 0;
+}
+
 static const struct pci_dev_enable_acs {
 	u16 vendor;
 	u16 device;
 	int (*enable_acs)(struct pci_dev *dev);
 } pci_dev_enable_acs[] = {
 	{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_pch_acs },
+	{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_spt_pch_acs },
 	{ 0 }
 };
 
-void pci_dev_specific_enable_acs(struct pci_dev *dev)
+int pci_dev_specific_enable_acs(struct pci_dev *dev)
 {
 	const struct pci_dev_enable_acs *i;
 	int ret;
@@ -4180,9 +4328,11 @@
 		     i->device == (u16)PCI_ANY_ID)) {
 			ret = i->enable_acs(dev);
 			if (ret >= 0)
-				return;
+				return ret;
 		}
 	}
+
+	return -ENOTTY;
 }
 
 /*
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index a20ce7d..33e0f03 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -40,11 +40,15 @@
 	 * If the device is broken and uses an alias requester ID for
 	 * DMA, iterate over that too.
 	 */
-	if (unlikely(pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN)) {
-		ret = fn(pdev, PCI_DEVID(pdev->bus->number,
-					 pdev->dma_alias_devfn), data);
-		if (ret)
-			return ret;
+	if (unlikely(pdev->dma_alias_mask)) {
+		u8 devfn;
+
+		for_each_set_bit(devfn, pdev->dma_alias_mask, U8_MAX) {
+			ret = fn(pdev, PCI_DEVID(pdev->bus->number, devfn),
+				 data);
+			if (ret)
+				return ret;
+		}
 	}
 
 	for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index 4c2fa05..944674e 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -56,6 +56,7 @@
 	int	stschg_irq;	/* card-status-change irq */
 	int	card_irq;	/* card irq */
 	int	eject_irq;	/* db1200/pb1200 have these */
+	int	insert_gpio;	/* db1000 carddetect gpio */
 
 #define BOARD_TYPE_DEFAULT	0	/* most boards */
 #define BOARD_TYPE_DB1200	1	/* IRQs aren't gpios */
@@ -83,7 +84,7 @@
 /* carddetect gpio: low-active */
 static int db1000_card_inserted(struct db1x_pcmcia_sock *sock)
 {
-	return !gpio_get_value(irq_to_gpio(sock->insert_irq));
+	return !gpio_get_value(sock->insert_gpio);
 }
 
 static int db1x_card_inserted(struct db1x_pcmcia_sock *sock)
@@ -457,9 +458,15 @@
 	r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "card");
 	sock->card_irq = r ? r->start : 0;
 
-	/* insert: irq which triggers on card insertion/ejection */
+	/* insert: irq which triggers on card insertion/ejection
+	 * BIG FAT NOTE: on DB1000/1100/1500/1550 we pass a GPIO here!
+	 */
 	r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "insert");
 	sock->insert_irq = r ? r->start : -1;
+	if (sock->board_type == BOARD_TYPE_DEFAULT) {
+		sock->insert_gpio = r ? r->start : -1;
+		sock->insert_irq = r ? gpio_to_irq(r->start) : -1;
+	}
 
 	/* stschg: irq which trigger on card status change (optional) */
 	r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "stschg");
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 32346b5..f2d01d4 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -737,8 +737,19 @@
 			break;
 		case CPU_PM_EXIT:
 		case CPU_PM_ENTER_FAILED:
-			 /* Restore and enable the counter */
-			armpmu_start(event, PERF_EF_RELOAD);
+			 /*
+			  * Restore and enable the counter.
+			  * armpmu_start() indirectly calls
+			  *
+			  * perf_event_update_userpage()
+			  *
+			  * that requires RCU read locking to be functional,
+			  * wrap the call within RCU_NONIDLE to make the
+			  * RCU subsystem aware this cpu is not idle from
+			  * an RCU perspective for the armpmu_start() call
+			  * duration.
+			  */
+			RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
 			break;
 		default:
 			break;
@@ -836,6 +847,14 @@
 	if (!platform_get_irq(cpu_pmu->plat_device, 0))
 		cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
 
+	/*
+	 * This is a CPU PMU potentially in a heterogeneous configuration (e.g.
+	 * big.LITTLE). This is not an uncore PMU, and we have taken ctx
+	 * sharing into account (e.g. with our pmu::filter_match callback and
+	 * pmu::event_init group validation).
+	 */
+	cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_HETEROGENEOUS_CPUS;
+
 	return 0;
 
 out_unregister:
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 26566db..27e5f6e 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -421,4 +421,6 @@
 	  Enable this to support the Broadcom Cygnus PCIe PHY.
 	  If unsure, say N.
 
+source "drivers/phy/tegra/Kconfig"
+
 endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 24596a9..d4f06e6 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -52,3 +52,5 @@
 obj-$(CONFIG_PHY_BRCMSTB_SATA)		+= phy-brcmstb-sata.o
 obj-$(CONFIG_PHY_PISTACHIO_USB)		+= phy-pistachio-usb.o
 obj-$(CONFIG_PHY_CYGNUS_PCIE)		+= phy-bcm-cygnus-pcie.o
+
+obj-$(CONFIG_ARCH_TEGRA) += tegra/
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index e7e574d..b72e9a3 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -141,7 +141,7 @@
 		if (phy_provider->dev->of_node == node)
 			return phy_provider;
 
-		for_each_child_of_node(phy_provider->dev->of_node, child)
+		for_each_child_of_node(phy_provider->children, child)
 			if (child == node)
 				return phy_provider;
 	}
@@ -811,24 +811,59 @@
 /**
  * __of_phy_provider_register() - create/register phy provider with the framework
  * @dev: struct device of the phy provider
+ * @children: device node containing children (if different from dev->of_node)
  * @owner: the module owner containing of_xlate
  * @of_xlate: function pointer to obtain phy instance from phy provider
  *
  * Creates struct phy_provider from dev and of_xlate function pointer.
  * This is used in the case of dt boot for finding the phy instance from
  * phy provider.
+ *
+ * If the PHY provider doesn't nest children directly but uses a separate
+ * child node to contain the individual children, the @children parameter
+ * can be used to override the default. If NULL, the default (dev->of_node)
+ * will be used. If non-NULL, the device node must be a child (or further
+ * descendant) of dev->of_node. Otherwise an ERR_PTR()-encoded -EINVAL
+ * error code is returned.
  */
 struct phy_provider *__of_phy_provider_register(struct device *dev,
-	struct module *owner, struct phy * (*of_xlate)(struct device *dev,
-	struct of_phandle_args *args))
+	struct device_node *children, struct module *owner,
+	struct phy * (*of_xlate)(struct device *dev,
+				 struct of_phandle_args *args))
 {
 	struct phy_provider *phy_provider;
 
+	/*
+	 * If specified, the device node containing the children must itself
+	 * be the provider's device node or a child (or further descendant)
+	 * thereof.
+	 */
+	if (children) {
+		struct device_node *parent = of_node_get(children), *next;
+
+		while (parent) {
+			if (parent == dev->of_node)
+				break;
+
+			next = of_get_parent(parent);
+			of_node_put(parent);
+			parent = next;
+		}
+
+		if (!parent)
+			return ERR_PTR(-EINVAL);
+
+		of_node_put(parent);
+	} else {
+		children = dev->of_node;
+	}
+
 	phy_provider = kzalloc(sizeof(*phy_provider), GFP_KERNEL);
 	if (!phy_provider)
 		return ERR_PTR(-ENOMEM);
 
 	phy_provider->dev = dev;
+	phy_provider->children = of_node_get(children);
 	phy_provider->owner = owner;
 	phy_provider->of_xlate = of_xlate;
 
@@ -854,8 +889,9 @@
  * on the devres data, then, devres data is freed.
  */
 struct phy_provider *__devm_of_phy_provider_register(struct device *dev,
-	struct module *owner, struct phy * (*of_xlate)(struct device *dev,
-	struct of_phandle_args *args))
+	struct device_node *children, struct module *owner,
+	struct phy * (*of_xlate)(struct device *dev,
+				 struct of_phandle_args *args))
 {
 	struct phy_provider **ptr, *phy_provider;
 
@@ -863,7 +899,8 @@
 	if (!ptr)
 		return ERR_PTR(-ENOMEM);
 
-	phy_provider = __of_phy_provider_register(dev, owner, of_xlate);
+	phy_provider = __of_phy_provider_register(dev, children, owner,
+						  of_xlate);
 	if (!IS_ERR(phy_provider)) {
 		*ptr = phy_provider;
 		devres_add(dev, ptr);
@@ -888,6 +925,7 @@
 
 	mutex_lock(&phy_provider_mutex);
 	list_del(&phy_provider->list);
+	of_node_put(phy_provider->children);
 	kfree(phy_provider);
 	mutex_unlock(&phy_provider_mutex);
 }
diff --git a/drivers/phy/phy-rockchip-dp.c b/drivers/phy/phy-rockchip-dp.c
index 77e2d02..793ecb6 100644
--- a/drivers/phy/phy-rockchip-dp.c
+++ b/drivers/phy/phy-rockchip-dp.c
@@ -86,6 +86,9 @@
 	if (!np)
 		return -ENODEV;
 
+	if (!dev->parent || !dev->parent->of_node)
+		return -ENODEV;
+
 	dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
 	if (IS_ERR(dp))
 		return -ENOMEM;
@@ -104,9 +107,9 @@
 		return ret;
 	}
 
-	dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+	dp->grf = syscon_node_to_regmap(dev->parent->of_node);
 	if (IS_ERR(dp->grf)) {
-		dev_err(dev, "rk3288-dp needs rockchip,grf property\n");
+		dev_err(dev, "rk3288-dp needs the General Register Files syscon\n");
 		return PTR_ERR(dp->grf);
 	}
 
diff --git a/drivers/phy/phy-rockchip-emmc.c b/drivers/phy/phy-rockchip-emmc.c
index 887b4c2..6ebcf3e 100644
--- a/drivers/phy/phy-rockchip-emmc.c
+++ b/drivers/phy/phy-rockchip-emmc.c
@@ -176,7 +176,10 @@
 	struct regmap *grf;
 	unsigned int reg_offset;
 
-	grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
+	if (!dev->parent || !dev->parent->of_node)
+		return -ENODEV;
+
+	grf = syscon_node_to_regmap(dev->parent->of_node);
 	if (IS_ERR(grf)) {
 		dev_err(dev, "Missing rockchip,grf property\n");
 		return PTR_ERR(grf);
diff --git a/drivers/phy/tegra/Kconfig b/drivers/phy/tegra/Kconfig
new file mode 100644
index 0000000..a3b1de9
--- /dev/null
+++ b/drivers/phy/tegra/Kconfig
@@ -0,0 +1,8 @@
+config PHY_TEGRA_XUSB
+	tristate "NVIDIA Tegra XUSB pad controller driver"
+	depends on ARCH_TEGRA
+	help
+	  Choose this option if you have an NVIDIA Tegra SoC.
+
+	  To compile this driver as a module, choose M here: the module will
+	  be called phy-tegra-xusb.
diff --git a/drivers/phy/tegra/Makefile b/drivers/phy/tegra/Makefile
new file mode 100644
index 0000000..8985892
--- /dev/null
+++ b/drivers/phy/tegra/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_PHY_TEGRA_XUSB) += phy-tegra-xusb.o
+
+phy-tegra-xusb-y += xusb.o
+phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_124_SOC) += xusb-tegra124.o
+phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_132_SOC) += xusb-tegra124.o
+phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_210_SOC) += xusb-tegra210.o
diff --git a/drivers/phy/tegra/xusb-tegra124.c b/drivers/phy/tegra/xusb-tegra124.c
new file mode 100644
index 0000000..1199572
--- /dev/null
+++ b/drivers/phy/tegra/xusb-tegra124.c
@@ -0,0 +1,1752 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "xusb.h"
+
+#define FUSE_SKU_CALIB_HS_CURR_LEVEL_PADX_SHIFT(x) ((x) ? 15 : 0)
+#define FUSE_SKU_CALIB_HS_CURR_LEVEL_PAD_MASK 0x3f
+#define FUSE_SKU_CALIB_HS_IREF_CAP_SHIFT 13
+#define FUSE_SKU_CALIB_HS_IREF_CAP_MASK 0x3
+#define FUSE_SKU_CALIB_HS_SQUELCH_LEVEL_SHIFT 11
+#define FUSE_SKU_CALIB_HS_SQUELCH_LEVEL_MASK 0x3
+#define FUSE_SKU_CALIB_HS_TERM_RANGE_ADJ_SHIFT 7
+#define FUSE_SKU_CALIB_HS_TERM_RANGE_ADJ_MASK 0xf
+
+#define XUSB_PADCTL_USB2_PORT_CAP 0x008
+#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_SHIFT(x) ((x) * 4)
+#define XUSB_PADCTL_USB2_PORT_CAP_PORT_CAP_MASK 0x3
+#define XUSB_PADCTL_USB2_PORT_CAP_DISABLED 0x0
+#define XUSB_PADCTL_USB2_PORT_CAP_HOST 0x1
+#define XUSB_PADCTL_USB2_PORT_CAP_DEVICE 0x2
+#define XUSB_PADCTL_USB2_PORT_CAP_OTG 0x3
+
+#define XUSB_PADCTL_SS_PORT_MAP 0x014
+#define XUSB_PADCTL_SS_PORT_MAP_PORTX_INTERNAL(x) (1 << (((x) * 4) + 3))
+#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_SHIFT(x) ((x) * 4)
+#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(x) (0x7 << ((x) * 4))
+#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(x, v) (((v) & 0x7) << ((x) * 4))
+#define XUSB_PADCTL_SS_PORT_MAP_PORT_MAP_MASK 0x7
+
+#define XUSB_PADCTL_ELPG_PROGRAM 0x01c
+#define XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_VCORE_DOWN (1 << 26)
+#define XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN_EARLY (1 << 25)
+#define XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN (1 << 24)
+#define XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_VCORE_DOWN(x) (1 << (18 + (x) * 4))
+#define XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_CLAMP_EN_EARLY(x) \
+							(1 << (17 + (x) * 4))
+#define XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_CLAMP_EN(x) (1 << (16 + (x) * 4))
+
+#define XUSB_PADCTL_IOPHY_PLL_P0_CTL1 0x040
+#define XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL0_LOCKDET (1 << 19)
+#define XUSB_PADCTL_IOPHY_PLL_P0_CTL1_REFCLK_SEL_MASK (0xf << 12)
+#define XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL_RST (1 << 1)
+
+#define XUSB_PADCTL_IOPHY_PLL_P0_CTL2 0x044
+#define XUSB_PADCTL_IOPHY_PLL_P0_CTL2_REFCLKBUF_EN (1 << 6)
+#define XUSB_PADCTL_IOPHY_PLL_P0_CTL2_TXCLKREF_EN (1 << 5)
+#define XUSB_PADCTL_IOPHY_PLL_P0_CTL2_TXCLKREF_SEL (1 << 4)
+
+#define XUSB_PADCTL_IOPHY_USB3_PADX_CTL2(x) (0x058 + (x) * 4)
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_CDR_CNTL_SHIFT 24
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_CDR_CNTL_MASK 0xff
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_CDR_CNTL_VAL 0x24
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_Z_SHIFT 16
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_Z_MASK 0x3f
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_G_SHIFT 8
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_G_MASK 0x3f
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_SHIFT 8
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_MASK 0xffff
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_VAL 0xf070
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_WANDER_SHIFT 4
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_WANDER_MASK 0xf
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_WANDER_VAL 0xf
+
+#define XUSB_PADCTL_IOPHY_USB3_PADX_CTL4(x) (0x068 + (x) * 4)
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_TAP_SHIFT 24
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_TAP_MASK 0x1f
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_AMP_SHIFT 16
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_AMP_MASK 0x7f
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_VAL 0x002008ee
+
+#define XUSB_PADCTL_IOPHY_MISC_PAD_PX_CTL2(x) ((x) < 2 ? 0x078 + (x) * 4 : \
+					       0x0f8 + (x) * 4)
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL2_SPARE_IN_SHIFT 28
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL2_SPARE_IN_MASK 0x3
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL2_SPARE_IN_VAL 0x1
+
+#define XUSB_PADCTL_IOPHY_MISC_PAD_PX_CTL5(x) ((x) < 2 ? 0x090 + (x) * 4 : \
+					       0x11c + (x) * 4)
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL5_RX_QEYE_EN (1 << 8)
+
+#define XUSB_PADCTL_IOPHY_MISC_PAD_PX_CTL6(x) ((x) < 2 ? 0x098 + (x) * 4 : \
+					       0x128 + (x) * 4)
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SHIFT 24
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_G_Z_MASK 0x3f
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_TAP_MASK 0x1f
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_AMP_MASK 0x7f
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT 16
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_MASK 0xff
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_G_Z 0x21
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_TAP 0x32
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_AMP 0x33
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_CTLE_Z 0x48
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_LATCH_G_Z 0xa1
+
+#define XUSB_PADCTL_USB2_OTG_PADX_CTL0(x) (0x0a0 + (x) * 4)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD_ZI (1 << 21)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD2 (1 << 20)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD (1 << 19)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_LS_RSLEW_SHIFT 14
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_LS_RSLEW_MASK 0x3
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_LS_RSLEW_VAL(x) ((x) ? 0x0 : 0x3)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_SLEW_SHIFT 6
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_SLEW_MASK 0x3f
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_SLEW_VAL 0x0e
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_SHIFT 0
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_MASK 0x3f
+
+#define XUSB_PADCTL_USB2_OTG_PADX_CTL1(x) (0x0ac + (x) * 4)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_HS_IREF_CAP_SHIFT 9
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_HS_IREF_CAP_MASK 0x3
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_SHIFT 3
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_MASK 0x7
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_DR (1 << 2)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_DISC_FORCE_POWERUP (1 << 1)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_CHRP_FORCE_POWERUP (1 << 0)
+
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0 0x0b8
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_PD (1 << 12)
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_SHIFT 2
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_MASK 0x7
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_VAL 0x5
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_SHIFT 0
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_MASK 0x3
+
+#define XUSB_PADCTL_HSIC_PADX_CTL0(x) (0x0c0 + (x) * 4)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWN_SHIFT 12
+#define XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWN_MASK 0x7
+#define XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWP_SHIFT 8
+#define XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWP_MASK 0x7
+#define XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEN_SHIFT 4
+#define XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEN_MASK 0x7
+#define XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEP_SHIFT 0
+#define XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEP_MASK 0x7
+
+#define XUSB_PADCTL_HSIC_PADX_CTL1(x) (0x0c8 + (x) * 4)
+#define XUSB_PADCTL_HSIC_PAD_CTL1_RPU_STROBE (1 << 10)
+#define XUSB_PADCTL_HSIC_PAD_CTL1_RPU_DATA (1 << 9)
+#define XUSB_PADCTL_HSIC_PAD_CTL1_RPD_STROBE (1 << 8)
+#define XUSB_PADCTL_HSIC_PAD_CTL1_RPD_DATA (1 << 7)
+#define XUSB_PADCTL_HSIC_PAD_CTL1_PD_ZI (1 << 5)
+#define XUSB_PADCTL_HSIC_PAD_CTL1_PD_RX (1 << 4)
+#define XUSB_PADCTL_HSIC_PAD_CTL1_PD_TRX (1 << 3)
+#define XUSB_PADCTL_HSIC_PAD_CTL1_PD_TX (1 << 2)
+#define XUSB_PADCTL_HSIC_PAD_CTL1_AUTO_TERM_EN (1 << 0)
+
+#define XUSB_PADCTL_HSIC_PADX_CTL2(x) (0x0d0 + (x) * 4)
+#define XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_SHIFT 4
+#define XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_MASK 0x7
+#define XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_SHIFT 0
+#define XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_MASK 0x7
+
+#define XUSB_PADCTL_HSIC_STRB_TRIM_CONTROL 0x0e0
+#define XUSB_PADCTL_HSIC_STRB_TRIM_CONTROL_STRB_TRIM_MASK 0x1f
+
+#define XUSB_PADCTL_USB3_PAD_MUX 0x134
+#define XUSB_PADCTL_USB3_PAD_MUX_PCIE_IDDQ_DISABLE(x) (1 << (1 + (x)))
+#define XUSB_PADCTL_USB3_PAD_MUX_SATA_IDDQ_DISABLE(x) (1 << (6 + (x)))
+
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1 0x138
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_LOCKDET (1 << 27)
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_MODE (1 << 24)
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL0_REFCLK_NDIV_SHIFT 20
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL0_REFCLK_NDIV_MASK 0x3
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_PWR_OVRD (1 << 3)
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_RST (1 << 1)
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_IDDQ (1 << 0)
+
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2 0x13c
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL1_CP_CNTL_SHIFT 20
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL1_CP_CNTL_MASK 0xf
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL0_CP_CNTL_SHIFT 16
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL0_CP_CNTL_MASK 0xf
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2_TCLKOUT_EN (1 << 12)
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2_TXCLKREF_SEL (1 << 4)
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2_XDIGCLK_SEL_SHIFT 0
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2_XDIGCLK_SEL_MASK 0x7
+
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL3 0x140
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL3_RCAL_BYPASS (1 << 7)
+
+#define XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1 0x148
+#define XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ_OVRD (1 << 1)
+#define XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ (1 << 0)
+
+#define XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL2 0x14c
+
+#define XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL5 0x158
+
+#define XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL6 0x15c
+
+struct tegra124_xusb_fuse_calibration {
+	u32 hs_curr_level[3];
+	u32 hs_iref_cap;
+	u32 hs_term_range_adj;
+	u32 hs_squelch_level;
+};
+
+struct tegra124_xusb_padctl {
+	struct tegra_xusb_padctl base;
+
+	struct tegra124_xusb_fuse_calibration fuse;
+};
+
+static inline struct tegra124_xusb_padctl *
+to_tegra124_xusb_padctl(struct tegra_xusb_padctl *padctl)
+{
+	return container_of(padctl, struct tegra124_xusb_padctl, base);
+}
+
+static int tegra124_xusb_padctl_enable(struct tegra_xusb_padctl *padctl)
+{
+	u32 value;
+
+	mutex_lock(&padctl->lock);
+
+	if (padctl->enable++ > 0)
+		goto out;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+	value &= ~XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN;
+	padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+	usleep_range(100, 200);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+	value &= ~XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN_EARLY;
+	padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+	usleep_range(100, 200);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+	value &= ~XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_VCORE_DOWN;
+	padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+out:
+	mutex_unlock(&padctl->lock);
+	return 0;
+}
+
+static int tegra124_xusb_padctl_disable(struct tegra_xusb_padctl *padctl)
+{
+	u32 value;
+
+	mutex_lock(&padctl->lock);
+
+	if (WARN_ON(padctl->enable == 0))
+		goto out;
+
+	if (--padctl->enable > 0)
+		goto out;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+	value |= XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_VCORE_DOWN;
+	padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+	usleep_range(100, 200);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+	value |= XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN_EARLY;
+	padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+	usleep_range(100, 200);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+	value |= XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN;
+	padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+out:
+	mutex_unlock(&padctl->lock);
+	return 0;
+}
+
+static int tegra124_usb3_save_context(struct tegra_xusb_padctl *padctl,
+				      unsigned int index)
+{
+	struct tegra_xusb_usb3_port *port;
+	struct tegra_xusb_lane *lane;
+	u32 value, offset;
+
+	port = tegra_xusb_find_usb3_port(padctl, index);
+	if (!port)
+		return -ENODEV;
+
+	port->context_saved = true;
+	lane = port->base.lane;
+
+	if (lane->pad == padctl->pcie)
+		offset = XUSB_PADCTL_IOPHY_MISC_PAD_PX_CTL6(lane->index);
+	else
+		offset = XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL6;
+
+	value = padctl_readl(padctl, offset);
+	value &= ~(XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_MASK <<
+		   XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT);
+	value |= XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_TAP <<
+		XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT;
+	padctl_writel(padctl, value, offset);
+
+	value = padctl_readl(padctl, offset) >>
+		XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SHIFT;
+	port->tap1 = value & XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_TAP_MASK;
+
+	value = padctl_readl(padctl, offset);
+	value &= ~(XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_MASK <<
+		   XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT);
+	value |= XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_AMP <<
+		XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT;
+	padctl_writel(padctl, value, offset);
+
+	value = padctl_readl(padctl, offset) >>
+		XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SHIFT;
+	port->amp = value & XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_AMP_MASK;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_USB3_PADX_CTL4(index));
+	value &= ~((XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_TAP_MASK <<
+		    XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_TAP_SHIFT) |
+		   (XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_AMP_MASK <<
+		    XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_AMP_SHIFT));
+	value |= (port->tap1 <<
+		  XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_TAP_SHIFT) |
+		 (port->amp <<
+		  XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_AMP_SHIFT);
+	padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_USB3_PADX_CTL4(index));
+
+	value = padctl_readl(padctl, offset);
+	value &= ~(XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_MASK <<
+		   XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT);
+	value |= XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_LATCH_G_Z <<
+		XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT;
+	padctl_writel(padctl, value, offset);
+
+	value = padctl_readl(padctl, offset);
+	value &= ~(XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_MASK <<
+		   XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT);
+	value |= XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_G_Z <<
+		XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT;
+	padctl_writel(padctl, value, offset);
+
+	value = padctl_readl(padctl, offset) >>
+		XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SHIFT;
+	port->ctle_g = value &
+		XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_G_Z_MASK;
+
+	value = padctl_readl(padctl, offset);
+	value &= ~(XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_MASK <<
+		   XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT);
+	value |= XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_CTLE_Z <<
+		XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT;
+	padctl_writel(padctl, value, offset);
+
+	value = padctl_readl(padctl, offset) >>
+		XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SHIFT;
+	port->ctle_z = value &
+		XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_G_Z_MASK;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_USB3_PADX_CTL2(index));
+	value &= ~((XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_G_MASK <<
+		    XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_G_SHIFT) |
+		   (XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_Z_MASK <<
+		    XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_Z_SHIFT));
+	value |= (port->ctle_g <<
+		  XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_G_SHIFT) |
+		 (port->ctle_z <<
+		  XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_Z_SHIFT);
+	padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_USB3_PADX_CTL2(index));
+
+	return 0;
+}
+
+static int tegra124_hsic_set_idle(struct tegra_xusb_padctl *padctl,
+				  unsigned int index, bool idle)
+{
+	u32 value;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL1(index));
+
+	if (idle)
+		value |= XUSB_PADCTL_HSIC_PAD_CTL1_RPD_DATA |
+			 XUSB_PADCTL_HSIC_PAD_CTL1_RPU_STROBE;
+	else
+		value &= ~(XUSB_PADCTL_HSIC_PAD_CTL1_RPD_DATA |
+			   XUSB_PADCTL_HSIC_PAD_CTL1_RPU_STROBE);
+
+	padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL1(index));
+
+	return 0;
+}
+
+#define TEGRA124_LANE(_name, _offset, _shift, _mask, _type)		\
+	{								\
+		.name = _name,						\
+		.offset = _offset,					\
+		.shift = _shift,					\
+		.mask = _mask,						\
+		.num_funcs = ARRAY_SIZE(tegra124_##_type##_functions),	\
+		.funcs = tegra124_##_type##_functions,			\
+	}
+
+static const char * const tegra124_usb2_functions[] = {
+	"snps",
+	"xusb",
+	"uart",
+};
+
+static const struct tegra_xusb_lane_soc tegra124_usb2_lanes[] = {
+	TEGRA124_LANE("usb2-0", 0x004,  0, 0x3, usb2),
+	TEGRA124_LANE("usb2-1", 0x004,  2, 0x3, usb2),
+	TEGRA124_LANE("usb2-2", 0x004,  4, 0x3, usb2),
+};
+
+static struct tegra_xusb_lane *
+tegra124_usb2_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
+			 unsigned int index)
+{
+	struct tegra_xusb_usb2_lane *usb2;
+	int err;
+
+	usb2 = kzalloc(sizeof(*usb2), GFP_KERNEL);
+	if (!usb2)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&usb2->base.list);
+	usb2->base.soc = &pad->soc->lanes[index];
+	usb2->base.index = index;
+	usb2->base.pad = pad;
+	usb2->base.np = np;
+
+	err = tegra_xusb_lane_parse_dt(&usb2->base, np);
+	if (err < 0) {
+		kfree(usb2);
+		return ERR_PTR(err);
+	}
+
+	return &usb2->base;
+}
+
+static void tegra124_usb2_lane_remove(struct tegra_xusb_lane *lane)
+{
+	struct tegra_xusb_usb2_lane *usb2 = to_usb2_lane(lane);
+
+	kfree(usb2);
+}
+
+static const struct tegra_xusb_lane_ops tegra124_usb2_lane_ops = {
+	.probe = tegra124_usb2_lane_probe,
+	.remove = tegra124_usb2_lane_remove,
+};
+
+static int tegra124_usb2_phy_init(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+	return tegra124_xusb_padctl_enable(lane->pad->padctl);
+}
+
+static int tegra124_usb2_phy_exit(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+	return tegra124_xusb_padctl_disable(lane->pad->padctl);
+}
+
+static int tegra124_usb2_phy_power_on(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+	struct tegra_xusb_usb2_lane *usb2 = to_usb2_lane(lane);
+	struct tegra_xusb_usb2_pad *pad = to_usb2_pad(lane->pad);
+	struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+	struct tegra124_xusb_padctl *priv;
+	struct tegra_xusb_usb2_port *port;
+	unsigned int index = lane->index;
+	u32 value;
+	int err;
+
+	port = tegra_xusb_find_usb2_port(padctl, index);
+	if (!port) {
+		dev_err(&phy->dev, "no port found for USB2 lane %u\n", index);
+		return -ENODEV;
+	}
+
+	priv = to_tegra124_xusb_padctl(padctl);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
+	value &= ~((XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_MASK <<
+		    XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_SHIFT) |
+		   (XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_MASK <<
+		    XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_SHIFT));
+	value |= (priv->fuse.hs_squelch_level <<
+		  XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_SHIFT) |
+		 (XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_VAL <<
+		  XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_SHIFT);
+	padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_USB2_PORT_CAP);
+	value &= ~(XUSB_PADCTL_USB2_PORT_CAP_PORT_CAP_MASK <<
+		   XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_SHIFT(index));
+	value |= XUSB_PADCTL_USB2_PORT_CAP_HOST <<
+		XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_SHIFT(index);
+	padctl_writel(padctl, value, XUSB_PADCTL_USB2_PORT_CAP);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
+	value &= ~((XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_MASK <<
+		    XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_SHIFT) |
+		   (XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_SLEW_MASK <<
+		    XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_SLEW_SHIFT) |
+		   (XUSB_PADCTL_USB2_OTG_PAD_CTL0_LS_RSLEW_MASK <<
+		    XUSB_PADCTL_USB2_OTG_PAD_CTL0_LS_RSLEW_SHIFT) |
+		   XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD |
+		   XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD2 |
+		   XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD_ZI);
+	value |= (priv->fuse.hs_curr_level[index] +
+		  usb2->hs_curr_level_offset) <<
+		XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_SHIFT;
+	value |= XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_SLEW_VAL <<
+		XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_SLEW_SHIFT;
+	value |= XUSB_PADCTL_USB2_OTG_PAD_CTL0_LS_RSLEW_VAL(index) <<
+		XUSB_PADCTL_USB2_OTG_PAD_CTL0_LS_RSLEW_SHIFT;
+	padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
+
+	value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
+	value &= ~((XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_MASK <<
+		    XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_SHIFT) |
+		   (XUSB_PADCTL_USB2_OTG_PAD_CTL1_HS_IREF_CAP_MASK <<
+		    XUSB_PADCTL_USB2_OTG_PAD_CTL1_HS_IREF_CAP_SHIFT) |
+		   XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_DR |
+		   XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_CHRP_FORCE_POWERUP |
+		   XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_DISC_FORCE_POWERUP);
+	value |= (priv->fuse.hs_term_range_adj <<
+		  XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_SHIFT) |
+		 (priv->fuse.hs_iref_cap <<
+		  XUSB_PADCTL_USB2_OTG_PAD_CTL1_HS_IREF_CAP_SHIFT);
+	padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
+
+	err = regulator_enable(port->supply);
+	if (err)
+		return err;
+
+	mutex_lock(&pad->lock);
+
+	if (pad->enable++ > 0)
+		goto out;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
+	value &= ~XUSB_PADCTL_USB2_BIAS_PAD_CTL0_PD;
+	padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
+
+out:
+	mutex_unlock(&pad->lock);
+	return 0;
+}
+
+static int tegra124_usb2_phy_power_off(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+	struct tegra_xusb_usb2_pad *pad = to_usb2_pad(lane->pad);
+	struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+	struct tegra_xusb_usb2_port *port;
+	u32 value;
+
+	port = tegra_xusb_find_usb2_port(padctl, lane->index);
+	if (!port) {
+		dev_err(&phy->dev, "no port found for USB2 lane %u\n",
+			lane->index);
+		return -ENODEV;
+	}
+
+	mutex_lock(&pad->lock);
+
+	if (WARN_ON(pad->enable == 0))
+		goto out;
+
+	if (--pad->enable > 0)
+		goto out;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
+	value |= XUSB_PADCTL_USB2_BIAS_PAD_CTL0_PD;
+	padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
+
+out:
+	regulator_disable(port->supply);
+	mutex_unlock(&pad->lock);
+	return 0;
+}
+
+static const struct phy_ops tegra124_usb2_phy_ops = {
+	.init = tegra124_usb2_phy_init,
+	.exit = tegra124_usb2_phy_exit,
+	.power_on = tegra124_usb2_phy_power_on,
+	.power_off = tegra124_usb2_phy_power_off,
+	.owner = THIS_MODULE,
+};
+
+static struct tegra_xusb_pad *
+tegra124_usb2_pad_probe(struct tegra_xusb_padctl *padctl,
+			const struct tegra_xusb_pad_soc *soc,
+			struct device_node *np)
+{
+	struct tegra_xusb_usb2_pad *usb2;
+	struct tegra_xusb_pad *pad;
+	int err;
+
+	usb2 = kzalloc(sizeof(*usb2), GFP_KERNEL);
+	if (!usb2)
+		return ERR_PTR(-ENOMEM);
+
+	mutex_init(&usb2->lock);
+
+	pad = &usb2->base;
+	pad->ops = &tegra124_usb2_lane_ops;
+	pad->soc = soc;
+
+	err = tegra_xusb_pad_init(pad, padctl, np);
+	if (err < 0) {
+		kfree(usb2);
+		goto out;
+	}
+
+	err = tegra_xusb_pad_register(pad, &tegra124_usb2_phy_ops);
+	if (err < 0)
+		goto unregister;
+
+	dev_set_drvdata(&pad->dev, pad);
+
+	return pad;
+
+unregister:
+	device_unregister(&pad->dev);
+out:
+	return ERR_PTR(err);
+}
+
+static void tegra124_usb2_pad_remove(struct tegra_xusb_pad *pad)
+{
+	struct tegra_xusb_usb2_pad *usb2 = to_usb2_pad(pad);
+
+	kfree(usb2);
+}
+
+static const struct tegra_xusb_pad_ops tegra124_usb2_ops = {
+	.probe = tegra124_usb2_pad_probe,
+	.remove = tegra124_usb2_pad_remove,
+};
+
+static const struct tegra_xusb_pad_soc tegra124_usb2_pad = {
+	.name = "usb2",
+	.num_lanes = ARRAY_SIZE(tegra124_usb2_lanes),
+	.lanes = tegra124_usb2_lanes,
+	.ops = &tegra124_usb2_ops,
+};
+
+static const char * const tegra124_ulpi_functions[] = {
+	"snps",
+	"xusb",
+};
+
+static const struct tegra_xusb_lane_soc tegra124_ulpi_lanes[] = {
+	TEGRA124_LANE("ulpi-0", 0x004, 12, 0x1, ulpi),
+};
+
+static struct tegra_xusb_lane *
+tegra124_ulpi_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
+			 unsigned int index)
+{
+	struct tegra_xusb_ulpi_lane *ulpi;
+	int err;
+
+	ulpi = kzalloc(sizeof(*ulpi), GFP_KERNEL);
+	if (!ulpi)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&ulpi->base.list);
+	ulpi->base.soc = &pad->soc->lanes[index];
+	ulpi->base.index = index;
+	ulpi->base.pad = pad;
+	ulpi->base.np = np;
+
+	err = tegra_xusb_lane_parse_dt(&ulpi->base, np);
+	if (err < 0) {
+		kfree(ulpi);
+		return ERR_PTR(err);
+	}
+
+	return &ulpi->base;
+}
+
+static void tegra124_ulpi_lane_remove(struct tegra_xusb_lane *lane)
+{
+	struct tegra_xusb_ulpi_lane *ulpi = to_ulpi_lane(lane);
+
+	kfree(ulpi);
+}
+
+static const struct tegra_xusb_lane_ops tegra124_ulpi_lane_ops = {
+	.probe = tegra124_ulpi_lane_probe,
+	.remove = tegra124_ulpi_lane_remove,
+};
+
+static int tegra124_ulpi_phy_init(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+	return tegra124_xusb_padctl_enable(lane->pad->padctl);
+}
+
+static int tegra124_ulpi_phy_exit(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+	return tegra124_xusb_padctl_disable(lane->pad->padctl);
+}
+
+static int tegra124_ulpi_phy_power_on(struct phy *phy)
+{
+	return 0;
+}
+
+static int tegra124_ulpi_phy_power_off(struct phy *phy)
+{
+	return 0;
+}
+
+static const struct phy_ops tegra124_ulpi_phy_ops = {
+	.init = tegra124_ulpi_phy_init,
+	.exit = tegra124_ulpi_phy_exit,
+	.power_on = tegra124_ulpi_phy_power_on,
+	.power_off = tegra124_ulpi_phy_power_off,
+	.owner = THIS_MODULE,
+};
+
+static struct tegra_xusb_pad *
+tegra124_ulpi_pad_probe(struct tegra_xusb_padctl *padctl,
+			const struct tegra_xusb_pad_soc *soc,
+			struct device_node *np)
+{
+	struct tegra_xusb_ulpi_pad *ulpi;
+	struct tegra_xusb_pad *pad;
+	int err;
+
+	ulpi = kzalloc(sizeof(*ulpi), GFP_KERNEL);
+	if (!ulpi)
+		return ERR_PTR(-ENOMEM);
+
+	pad = &ulpi->base;
+	pad->ops = &tegra124_ulpi_lane_ops;
+	pad->soc = soc;
+
+	err = tegra_xusb_pad_init(pad, padctl, np);
+	if (err < 0) {
+		kfree(ulpi);
+		goto out;
+	}
+
+	err = tegra_xusb_pad_register(pad, &tegra124_ulpi_phy_ops);
+	if (err < 0)
+		goto unregister;
+
+	dev_set_drvdata(&pad->dev, pad);
+
+	return pad;
+
+unregister:
+	device_unregister(&pad->dev);
+out:
+	return ERR_PTR(err);
+}
+
+static void tegra124_ulpi_pad_remove(struct tegra_xusb_pad *pad)
+{
+	struct tegra_xusb_ulpi_pad *ulpi = to_ulpi_pad(pad);
+
+	kfree(ulpi);
+}
+
+static const struct tegra_xusb_pad_ops tegra124_ulpi_ops = {
+	.probe = tegra124_ulpi_pad_probe,
+	.remove = tegra124_ulpi_pad_remove,
+};
+
+static const struct tegra_xusb_pad_soc tegra124_ulpi_pad = {
+	.name = "ulpi",
+	.num_lanes = ARRAY_SIZE(tegra124_ulpi_lanes),
+	.lanes = tegra124_ulpi_lanes,
+	.ops = &tegra124_ulpi_ops,
+};
+
+static const char * const tegra124_hsic_functions[] = {
+	"snps",
+	"xusb",
+};
+
+static const struct tegra_xusb_lane_soc tegra124_hsic_lanes[] = {
+	TEGRA124_LANE("hsic-0", 0x004, 14, 0x1, hsic),
+	TEGRA124_LANE("hsic-1", 0x004, 15, 0x1, hsic),
+};
+
+static struct tegra_xusb_lane *
+tegra124_hsic_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
+			 unsigned int index)
+{
+	struct tegra_xusb_hsic_lane *hsic;
+	int err;
+
+	hsic = kzalloc(sizeof(*hsic), GFP_KERNEL);
+	if (!hsic)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&hsic->base.list);
+	hsic->base.soc = &pad->soc->lanes[index];
+	hsic->base.index = index;
+	hsic->base.pad = pad;
+	hsic->base.np = np;
+
+	err = tegra_xusb_lane_parse_dt(&hsic->base, np);
+	if (err < 0) {
+		kfree(hsic);
+		return ERR_PTR(err);
+	}
+
+	return &hsic->base;
+}
+
+static void tegra124_hsic_lane_remove(struct tegra_xusb_lane *lane)
+{
+	struct tegra_xusb_hsic_lane *hsic = to_hsic_lane(lane);
+
+	kfree(hsic);
+}
+
+static const struct tegra_xusb_lane_ops tegra124_hsic_lane_ops = {
+	.probe = tegra124_hsic_lane_probe,
+	.remove = tegra124_hsic_lane_remove,
+};
+
+static int tegra124_hsic_phy_init(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+	return tegra124_xusb_padctl_enable(lane->pad->padctl);
+}
+
+static int tegra124_hsic_phy_exit(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+	return tegra124_xusb_padctl_disable(lane->pad->padctl);
+}
+
+static int tegra124_hsic_phy_power_on(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+	struct tegra_xusb_hsic_lane *hsic = to_hsic_lane(lane);
+	struct tegra_xusb_hsic_pad *pad = to_hsic_pad(lane->pad);
+	struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+	unsigned int index = lane->index;
+	u32 value;
+	int err;
+
+	err = regulator_enable(pad->supply);
+	if (err)
+		return err;
+
+	padctl_writel(padctl, hsic->strobe_trim,
+		      XUSB_PADCTL_HSIC_STRB_TRIM_CONTROL);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL1(index));
+
+	if (hsic->auto_term)
+		value |= XUSB_PADCTL_HSIC_PAD_CTL1_AUTO_TERM_EN;
+	else
+		value &= ~XUSB_PADCTL_HSIC_PAD_CTL1_AUTO_TERM_EN;
+
+	padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL1(index));
+
+	value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL0(index));
+	value &= ~((XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEN_MASK <<
+		    XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEN_SHIFT) |
+		   (XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEP_MASK <<
+		    XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEP_SHIFT) |
+		   (XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWN_MASK <<
+		    XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWN_SHIFT) |
+		   (XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWP_MASK <<
+		    XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWP_SHIFT));
+	value |= (hsic->tx_rtune_n <<
+		  XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEN_SHIFT) |
+		(hsic->tx_rtune_p <<
+		  XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEP_SHIFT) |
+		(hsic->tx_rslew_n <<
+		 XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWN_SHIFT) |
+		(hsic->tx_rslew_p <<
+		 XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWP_SHIFT);
+	padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL0(index));
+
+	value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL2(index));
+	value &= ~((XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_MASK <<
+		    XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_SHIFT) |
+		   (XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_MASK <<
+		    XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_SHIFT));
+	value |= (hsic->rx_strobe_trim <<
+		  XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_SHIFT) |
+		(hsic->rx_data_trim <<
+		 XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_SHIFT);
+	padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL2(index));
+
+	value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL1(index));
+	value &= ~(XUSB_PADCTL_HSIC_PAD_CTL1_RPD_STROBE |
+		   XUSB_PADCTL_HSIC_PAD_CTL1_RPU_DATA |
+		   XUSB_PADCTL_HSIC_PAD_CTL1_PD_RX |
+		   XUSB_PADCTL_HSIC_PAD_CTL1_PD_ZI |
+		   XUSB_PADCTL_HSIC_PAD_CTL1_PD_TRX |
+		   XUSB_PADCTL_HSIC_PAD_CTL1_PD_TX);
+	value |= XUSB_PADCTL_HSIC_PAD_CTL1_RPD_DATA |
+		 XUSB_PADCTL_HSIC_PAD_CTL1_RPU_STROBE;
+	padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL1(index));
+
+	return 0;
+}
+
+static int tegra124_hsic_phy_power_off(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+	struct tegra_xusb_hsic_pad *pad = to_hsic_pad(lane->pad);
+	struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+	unsigned int index = lane->index;
+	u32 value;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL1(index));
+	value |= XUSB_PADCTL_HSIC_PAD_CTL1_PD_RX |
+		 XUSB_PADCTL_HSIC_PAD_CTL1_PD_ZI |
+		 XUSB_PADCTL_HSIC_PAD_CTL1_PD_TRX |
+		 XUSB_PADCTL_HSIC_PAD_CTL1_PD_TX;
+	padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL1(index));
+
+	regulator_disable(pad->supply);
+
+	return 0;
+}
+
+static const struct phy_ops tegra124_hsic_phy_ops = {
+	.init = tegra124_hsic_phy_init,
+	.exit = tegra124_hsic_phy_exit,
+	.power_on = tegra124_hsic_phy_power_on,
+	.power_off = tegra124_hsic_phy_power_off,
+	.owner = THIS_MODULE,
+};
+
+static struct tegra_xusb_pad *
+tegra124_hsic_pad_probe(struct tegra_xusb_padctl *padctl,
+			const struct tegra_xusb_pad_soc *soc,
+			struct device_node *np)
+{
+	struct tegra_xusb_hsic_pad *hsic;
+	struct tegra_xusb_pad *pad;
+	int err;
+
+	hsic = kzalloc(sizeof(*hsic), GFP_KERNEL);
+	if (!hsic)
+		return ERR_PTR(-ENOMEM);
+
+	pad = &hsic->base;
+	pad->ops = &tegra124_hsic_lane_ops;
+	pad->soc = soc;
+
+	err = tegra_xusb_pad_init(pad, padctl, np);
+	if (err < 0) {
+		kfree(hsic);
+		goto out;
+	}
+
+	err = tegra_xusb_pad_register(pad, &tegra124_hsic_phy_ops);
+	if (err < 0)
+		goto unregister;
+
+	dev_set_drvdata(&pad->dev, pad);
+
+	return pad;
+
+unregister:
+	device_unregister(&pad->dev);
+out:
+	return ERR_PTR(err);
+}
+
+static void tegra124_hsic_pad_remove(struct tegra_xusb_pad *pad)
+{
+	struct tegra_xusb_hsic_pad *hsic = to_hsic_pad(pad);
+
+	kfree(hsic);
+}
+
+static const struct tegra_xusb_pad_ops tegra124_hsic_ops = {
+	.probe = tegra124_hsic_pad_probe,
+	.remove = tegra124_hsic_pad_remove,
+};
+
+static const struct tegra_xusb_pad_soc tegra124_hsic_pad = {
+	.name = "hsic",
+	.num_lanes = ARRAY_SIZE(tegra124_hsic_lanes),
+	.lanes = tegra124_hsic_lanes,
+	.ops = &tegra124_hsic_ops,
+};
+
+static const char * const tegra124_pcie_functions[] = {
+	"pcie",
+	"usb3-ss",
+	"sata",
+};
+
+static const struct tegra_xusb_lane_soc tegra124_pcie_lanes[] = {
+	TEGRA124_LANE("pcie-0", 0x134, 16, 0x3, pcie),
+	TEGRA124_LANE("pcie-1", 0x134, 18, 0x3, pcie),
+	TEGRA124_LANE("pcie-2", 0x134, 20, 0x3, pcie),
+	TEGRA124_LANE("pcie-3", 0x134, 22, 0x3, pcie),
+	TEGRA124_LANE("pcie-4", 0x134, 24, 0x3, pcie),
+};
+
+static struct tegra_xusb_lane *
+tegra124_pcie_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
+			 unsigned int index)
+{
+	struct tegra_xusb_pcie_lane *pcie;
+	int err;
+
+	pcie = kzalloc(sizeof(*pcie), GFP_KERNEL);
+	if (!pcie)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&pcie->base.list);
+	pcie->base.soc = &pad->soc->lanes[index];
+	pcie->base.index = index;
+	pcie->base.pad = pad;
+	pcie->base.np = np;
+
+	err = tegra_xusb_lane_parse_dt(&pcie->base, np);
+	if (err < 0) {
+		kfree(pcie);
+		return ERR_PTR(err);
+	}
+
+	return &pcie->base;
+}
+
+static void tegra124_pcie_lane_remove(struct tegra_xusb_lane *lane)
+{
+	struct tegra_xusb_pcie_lane *pcie = to_pcie_lane(lane);
+
+	kfree(pcie);
+}
+
+static const struct tegra_xusb_lane_ops tegra124_pcie_lane_ops = {
+	.probe = tegra124_pcie_lane_probe,
+	.remove = tegra124_pcie_lane_remove,
+};
+
+static int tegra124_pcie_phy_init(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+	return tegra124_xusb_padctl_enable(lane->pad->padctl);
+}
+
+static int tegra124_pcie_phy_exit(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+	return tegra124_xusb_padctl_disable(lane->pad->padctl);
+}
+
+static int tegra124_pcie_phy_power_on(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+	struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+	unsigned long timeout;
+	int err = -ETIMEDOUT;
+	u32 value;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
+	value &= ~XUSB_PADCTL_IOPHY_PLL_P0_CTL1_REFCLK_SEL_MASK;
+	padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL2);
+	value |= XUSB_PADCTL_IOPHY_PLL_P0_CTL2_REFCLKBUF_EN |
+		 XUSB_PADCTL_IOPHY_PLL_P0_CTL2_TXCLKREF_EN |
+		 XUSB_PADCTL_IOPHY_PLL_P0_CTL2_TXCLKREF_SEL;
+	padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_P0_CTL2);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
+	value |= XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL_RST;
+	padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
+
+	timeout = jiffies + msecs_to_jiffies(50);
+
+	while (time_before(jiffies, timeout)) {
+		value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
+		if (value & XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL0_LOCKDET) {
+			err = 0;
+			break;
+		}
+
+		usleep_range(100, 200);
+	}
+
+	value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
+	value |= XUSB_PADCTL_USB3_PAD_MUX_PCIE_IDDQ_DISABLE(lane->index);
+	padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
+
+	return err;
+}
+
+static int tegra124_pcie_phy_power_off(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+	struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+	u32 value;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
+	value &= ~XUSB_PADCTL_USB3_PAD_MUX_PCIE_IDDQ_DISABLE(lane->index);
+	padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
+	value &= ~XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL_RST;
+	padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
+
+	return 0;
+}
+
+static const struct phy_ops tegra124_pcie_phy_ops = {
+	.init = tegra124_pcie_phy_init,
+	.exit = tegra124_pcie_phy_exit,
+	.power_on = tegra124_pcie_phy_power_on,
+	.power_off = tegra124_pcie_phy_power_off,
+	.owner = THIS_MODULE,
+};
+
+static struct tegra_xusb_pad *
+tegra124_pcie_pad_probe(struct tegra_xusb_padctl *padctl,
+			const struct tegra_xusb_pad_soc *soc,
+			struct device_node *np)
+{
+	struct tegra_xusb_pcie_pad *pcie;
+	struct tegra_xusb_pad *pad;
+	int err;
+
+	pcie = kzalloc(sizeof(*pcie), GFP_KERNEL);
+	if (!pcie)
+		return ERR_PTR(-ENOMEM);
+
+	pad = &pcie->base;
+	pad->ops = &tegra124_pcie_lane_ops;
+	pad->soc = soc;
+
+	err = tegra_xusb_pad_init(pad, padctl, np);
+	if (err < 0) {
+		kfree(pcie);
+		goto out;
+	}
+
+	err = tegra_xusb_pad_register(pad, &tegra124_pcie_phy_ops);
+	if (err < 0)
+		goto unregister;
+
+	dev_set_drvdata(&pad->dev, pad);
+
+	return pad;
+
+unregister:
+	device_unregister(&pad->dev);
+out:
+	return ERR_PTR(err);
+}
+
+static void tegra124_pcie_pad_remove(struct tegra_xusb_pad *pad)
+{
+	struct tegra_xusb_pcie_pad *pcie = to_pcie_pad(pad);
+
+	kfree(pcie);
+}
+
+static const struct tegra_xusb_pad_ops tegra124_pcie_ops = {
+	.probe = tegra124_pcie_pad_probe,
+	.remove = tegra124_pcie_pad_remove,
+};
+
+static const struct tegra_xusb_pad_soc tegra124_pcie_pad = {
+	.name = "pcie",
+	.num_lanes = ARRAY_SIZE(tegra124_pcie_lanes),
+	.lanes = tegra124_pcie_lanes,
+	.ops = &tegra124_pcie_ops,
+};
+
+static const struct tegra_xusb_lane_soc tegra124_sata_lanes[] = {
+	TEGRA124_LANE("sata-0", 0x134, 26, 0x3, pcie),
+};
+
+static struct tegra_xusb_lane *
+tegra124_sata_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
+			 unsigned int index)
+{
+	struct tegra_xusb_sata_lane *sata;
+	int err;
+
+	sata = kzalloc(sizeof(*sata), GFP_KERNEL);
+	if (!sata)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&sata->base.list);
+	sata->base.soc = &pad->soc->lanes[index];
+	sata->base.index = index;
+	sata->base.pad = pad;
+	sata->base.np = np;
+
+	err = tegra_xusb_lane_parse_dt(&sata->base, np);
+	if (err < 0) {
+		kfree(sata);
+		return ERR_PTR(err);
+	}
+
+	return &sata->base;
+}
+
+static void tegra124_sata_lane_remove(struct tegra_xusb_lane *lane)
+{
+	struct tegra_xusb_sata_lane *sata = to_sata_lane(lane);
+
+	kfree(sata);
+}
+
+static const struct tegra_xusb_lane_ops tegra124_sata_lane_ops = {
+	.probe = tegra124_sata_lane_probe,
+	.remove = tegra124_sata_lane_remove,
+};
+
+static int tegra124_sata_phy_init(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+	return tegra124_xusb_padctl_enable(lane->pad->padctl);
+}
+
+static int tegra124_sata_phy_exit(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+	return tegra124_xusb_padctl_disable(lane->pad->padctl);
+}
+
+static int tegra124_sata_phy_power_on(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+	struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+	unsigned long timeout;
+	int err = -ETIMEDOUT;
+	u32 value;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1);
+	value &= ~XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ_OVRD;
+	value &= ~XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ;
+	padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+	value &= ~XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_PWR_OVRD;
+	value &= ~XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_IDDQ;
+	padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+	value |= XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_MODE;
+	padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+	value |= XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_RST;
+	padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+
+	timeout = jiffies + msecs_to_jiffies(50);
+
+	while (time_before(jiffies, timeout)) {
+		value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+		if (value & XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_LOCKDET) {
+			err = 0;
+			break;
+		}
+
+		usleep_range(100, 200);
+	}
+
+	value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
+	value |= XUSB_PADCTL_USB3_PAD_MUX_SATA_IDDQ_DISABLE(lane->index);
+	padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
+
+	return err;
+}
+
+static int tegra124_sata_phy_power_off(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+	struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+	u32 value;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
+	value &= ~XUSB_PADCTL_USB3_PAD_MUX_SATA_IDDQ_DISABLE(lane->index);
+	padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+	value &= ~XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_RST;
+	padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+	value &= ~XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_MODE;
+	padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+	value |= XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_PWR_OVRD;
+	value |= XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_IDDQ;
+	padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1);
+	value |= ~XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ_OVRD;
+	value |= ~XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ;
+	padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1);
+
+	return 0;
+}
+
+static const struct phy_ops tegra124_sata_phy_ops = {
+	.init = tegra124_sata_phy_init,
+	.exit = tegra124_sata_phy_exit,
+	.power_on = tegra124_sata_phy_power_on,
+	.power_off = tegra124_sata_phy_power_off,
+	.owner = THIS_MODULE,
+};
+
+static struct tegra_xusb_pad *
+tegra124_sata_pad_probe(struct tegra_xusb_padctl *padctl,
+			const struct tegra_xusb_pad_soc *soc,
+			struct device_node *np)
+{
+	struct tegra_xusb_sata_pad *sata;
+	struct tegra_xusb_pad *pad;
+	int err;
+
+	sata = kzalloc(sizeof(*sata), GFP_KERNEL);
+	if (!sata)
+		return ERR_PTR(-ENOMEM);
+
+	pad = &sata->base;
+	pad->ops = &tegra124_sata_lane_ops;
+	pad->soc = soc;
+
+	err = tegra_xusb_pad_init(pad, padctl, np);
+	if (err < 0) {
+		kfree(sata);
+		goto out;
+	}
+
+	err = tegra_xusb_pad_register(pad, &tegra124_sata_phy_ops);
+	if (err < 0)
+		goto unregister;
+
+	dev_set_drvdata(&pad->dev, pad);
+
+	return pad;
+
+unregister:
+	device_unregister(&pad->dev);
+out:
+	return ERR_PTR(err);
+}
+
+static void tegra124_sata_pad_remove(struct tegra_xusb_pad *pad)
+{
+	struct tegra_xusb_sata_pad *sata = to_sata_pad(pad);
+
+	kfree(sata);
+}
+
+static const struct tegra_xusb_pad_ops tegra124_sata_ops = {
+	.probe = tegra124_sata_pad_probe,
+	.remove = tegra124_sata_pad_remove,
+};
+
+static const struct tegra_xusb_pad_soc tegra124_sata_pad = {
+	.name = "sata",
+	.num_lanes = ARRAY_SIZE(tegra124_sata_lanes),
+	.lanes = tegra124_sata_lanes,
+	.ops = &tegra124_sata_ops,
+};
+
+static const struct tegra_xusb_pad_soc *tegra124_pads[] = {
+	&tegra124_usb2_pad,
+	&tegra124_ulpi_pad,
+	&tegra124_hsic_pad,
+	&tegra124_pcie_pad,
+	&tegra124_sata_pad,
+};
+
+static int tegra124_usb2_port_enable(struct tegra_xusb_port *port)
+{
+	return 0;
+}
+
+static void tegra124_usb2_port_disable(struct tegra_xusb_port *port)
+{
+}
+
+static struct tegra_xusb_lane *
+tegra124_usb2_port_map(struct tegra_xusb_port *port)
+{
+	return tegra_xusb_find_lane(port->padctl, "usb2", port->index);
+}
+
+static const struct tegra_xusb_port_ops tegra124_usb2_port_ops = {
+	.enable = tegra124_usb2_port_enable,
+	.disable = tegra124_usb2_port_disable,
+	.map = tegra124_usb2_port_map,
+};
+
+static int tegra124_ulpi_port_enable(struct tegra_xusb_port *port)
+{
+	return 0;
+}
+
+static void tegra124_ulpi_port_disable(struct tegra_xusb_port *port)
+{
+}
+
+static struct tegra_xusb_lane *
+tegra124_ulpi_port_map(struct tegra_xusb_port *port)
+{
+	return tegra_xusb_find_lane(port->padctl, "ulpi", port->index);
+}
+
+static const struct tegra_xusb_port_ops tegra124_ulpi_port_ops = {
+	.enable = tegra124_ulpi_port_enable,
+	.disable = tegra124_ulpi_port_disable,
+	.map = tegra124_ulpi_port_map,
+};
+
+static int tegra124_hsic_port_enable(struct tegra_xusb_port *port)
+{
+	return 0;
+}
+
+static void tegra124_hsic_port_disable(struct tegra_xusb_port *port)
+{
+}
+
+static struct tegra_xusb_lane *
+tegra124_hsic_port_map(struct tegra_xusb_port *port)
+{
+	return tegra_xusb_find_lane(port->padctl, "hsic", port->index);
+}
+
+static const struct tegra_xusb_port_ops tegra124_hsic_port_ops = {
+	.enable = tegra124_hsic_port_enable,
+	.disable = tegra124_hsic_port_disable,
+	.map = tegra124_hsic_port_map,
+};
+
+static int tegra124_usb3_port_enable(struct tegra_xusb_port *port)
+{
+	struct tegra_xusb_usb3_port *usb3 = to_usb3_port(port);
+	struct tegra_xusb_padctl *padctl = port->padctl;
+	struct tegra_xusb_lane *lane = usb3->base.lane;
+	unsigned int index = port->index, offset;
+	int ret = 0;
+	u32 value;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
+
+	if (!usb3->internal)
+		value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_INTERNAL(index);
+	else
+		value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_INTERNAL(index);
+
+	value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(index);
+	value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(index, usb3->port);
+	padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_MAP);
+
+	/*
+	 * TODO: move this code into the PCIe/SATA PHY ->power_on() callbacks
+	 * and conditionalize based on mux function? This seems to work, but
+	 * might not be the exact proper sequence.
+	 */
+	value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_USB3_PADX_CTL2(index));
+	value &= ~((XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_WANDER_MASK <<
+		    XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_WANDER_SHIFT) |
+		   (XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_MASK <<
+		    XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_SHIFT) |
+		   (XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_CDR_CNTL_MASK <<
+		    XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_CDR_CNTL_SHIFT));
+	value |= (XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_WANDER_VAL <<
+		  XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_WANDER_SHIFT) |
+		 (XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_CDR_CNTL_VAL <<
+		  XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_CDR_CNTL_SHIFT) |
+		 (XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_VAL <<
+		  XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_SHIFT);
+
+	if (usb3->context_saved) {
+		value &= ~((XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_G_MASK <<
+			    XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_G_SHIFT) |
+			   (XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_Z_MASK <<
+			    XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_Z_SHIFT));
+		value |= (usb3->ctle_g <<
+			  XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_G_SHIFT) |
+			 (usb3->ctle_z <<
+			  XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_Z_SHIFT);
+	}
+
+	padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_USB3_PADX_CTL2(index));
+
+	value = XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_VAL;
+
+	if (usb3->context_saved) {
+		value &= ~((XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_TAP_MASK <<
+			    XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_TAP_SHIFT) |
+			   (XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_AMP_MASK <<
+			    XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_AMP_SHIFT));
+		value |= (usb3->tap1 <<
+			  XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_TAP_SHIFT) |
+			 (usb3->amp <<
+			  XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_AMP_SHIFT);
+	}
+
+	padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_USB3_PADX_CTL4(index));
+
+	if (lane->pad == padctl->pcie)
+		offset = XUSB_PADCTL_IOPHY_MISC_PAD_PX_CTL2(lane->index);
+	else
+		offset = XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL2;
+
+	value = padctl_readl(padctl, offset);
+	value &= ~(XUSB_PADCTL_IOPHY_MISC_PAD_CTL2_SPARE_IN_MASK <<
+		   XUSB_PADCTL_IOPHY_MISC_PAD_CTL2_SPARE_IN_SHIFT);
+	value |= XUSB_PADCTL_IOPHY_MISC_PAD_CTL2_SPARE_IN_VAL <<
+		XUSB_PADCTL_IOPHY_MISC_PAD_CTL2_SPARE_IN_SHIFT;
+	padctl_writel(padctl, value, offset);
+
+	if (lane->pad == padctl->pcie)
+		offset = XUSB_PADCTL_IOPHY_MISC_PAD_PX_CTL5(lane->index);
+	else
+		offset = XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL5;
+
+	value = padctl_readl(padctl, offset);
+	value |= XUSB_PADCTL_IOPHY_MISC_PAD_CTL5_RX_QEYE_EN;
+	padctl_writel(padctl, value, offset);
+
+	/* Enable SATA PHY when SATA lane is used */
+	if (lane->pad == padctl->sata) {
+		value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+		value &= ~(XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL0_REFCLK_NDIV_MASK <<
+			   XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL0_REFCLK_NDIV_SHIFT);
+		value |= 0x2 <<
+			XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL0_REFCLK_NDIV_SHIFT;
+		padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+
+		value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL2);
+		value &= ~((XUSB_PADCTL_IOPHY_PLL_S0_CTL2_XDIGCLK_SEL_MASK <<
+			    XUSB_PADCTL_IOPHY_PLL_S0_CTL2_XDIGCLK_SEL_SHIFT) |
+			   (XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL1_CP_CNTL_MASK <<
+			    XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL1_CP_CNTL_SHIFT) |
+			   (XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL0_CP_CNTL_MASK <<
+			    XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL0_CP_CNTL_SHIFT) |
+			   XUSB_PADCTL_IOPHY_PLL_S0_CTL2_TCLKOUT_EN);
+		value |= (0x7 <<
+			  XUSB_PADCTL_IOPHY_PLL_S0_CTL2_XDIGCLK_SEL_SHIFT) |
+			 (0x8 <<
+			  XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL1_CP_CNTL_SHIFT) |
+			 (0x8 <<
+			  XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL0_CP_CNTL_SHIFT) |
+			 XUSB_PADCTL_IOPHY_PLL_S0_CTL2_TXCLKREF_SEL;
+		padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL2);
+
+		value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL3);
+		value &= ~XUSB_PADCTL_IOPHY_PLL_S0_CTL3_RCAL_BYPASS;
+		padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL3);
+	}
+
+	value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+	value &= ~XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_VCORE_DOWN(index);
+	padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+	usleep_range(100, 200);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+	value &= ~XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_CLAMP_EN_EARLY(index);
+	padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+	usleep_range(100, 200);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+	value &= ~XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_CLAMP_EN(index);
+	padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+	return ret;
+}
+
+static void tegra124_usb3_port_disable(struct tegra_xusb_port *port)
+{
+	struct tegra_xusb_padctl *padctl = port->padctl;
+	u32 value;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+	value |= XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_CLAMP_EN_EARLY(port->index);
+	padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+	usleep_range(100, 200);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+	value |= XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_CLAMP_EN(port->index);
+	padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+	usleep_range(250, 350);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+	value |= XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_VCORE_DOWN(port->index);
+	padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
+	value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(port->index);
+	value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(port->index, 0x7);
+	padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_MAP);
+}
+
+static const struct tegra_xusb_lane_map tegra124_usb3_map[] = {
+	{ 0, "pcie", 0 },
+	{ 1, "pcie", 1 },
+	{ 1, "sata", 0 },
+	{ 0, NULL,   0 },
+};
+
+static struct tegra_xusb_lane *
+tegra124_usb3_port_map(struct tegra_xusb_port *port)
+{
+	return tegra_xusb_port_find_lane(port, tegra124_usb3_map, "usb3-ss");
+}
+
+static const struct tegra_xusb_port_ops tegra124_usb3_port_ops = {
+	.enable = tegra124_usb3_port_enable,
+	.disable = tegra124_usb3_port_disable,
+	.map = tegra124_usb3_port_map,
+};
+
+static int
+tegra124_xusb_read_fuse_calibration(struct tegra124_xusb_fuse_calibration *fuse)
+{
+	unsigned int i;
+	int err;
+	u32 value;
+
+	err = tegra_fuse_readl(TEGRA_FUSE_SKU_CALIB_0, &value);
+	if (err < 0)
+		return err;
+
+	for (i = 0; i < ARRAY_SIZE(fuse->hs_curr_level); i++) {
+		fuse->hs_curr_level[i] =
+			(value >> FUSE_SKU_CALIB_HS_CURR_LEVEL_PADX_SHIFT(i)) &
+			FUSE_SKU_CALIB_HS_CURR_LEVEL_PAD_MASK;
+	}
+	fuse->hs_iref_cap =
+		(value >> FUSE_SKU_CALIB_HS_IREF_CAP_SHIFT) &
+		FUSE_SKU_CALIB_HS_IREF_CAP_MASK;
+	fuse->hs_term_range_adj =
+		(value >> FUSE_SKU_CALIB_HS_TERM_RANGE_ADJ_SHIFT) &
+		FUSE_SKU_CALIB_HS_TERM_RANGE_ADJ_MASK;
+	fuse->hs_squelch_level =
+		(value >> FUSE_SKU_CALIB_HS_SQUELCH_LEVEL_SHIFT) &
+		FUSE_SKU_CALIB_HS_SQUELCH_LEVEL_MASK;
+
+	return 0;
+}
+
+static struct tegra_xusb_padctl *
+tegra124_xusb_padctl_probe(struct device *dev,
+			   const struct tegra_xusb_padctl_soc *soc)
+{
+	struct tegra124_xusb_padctl *padctl;
+	int err;
+
+	padctl = devm_kzalloc(dev, sizeof(*padctl), GFP_KERNEL);
+	if (!padctl)
+		return ERR_PTR(-ENOMEM);
+
+	padctl->base.dev = dev;
+	padctl->base.soc = soc;
+
+	err = tegra124_xusb_read_fuse_calibration(&padctl->fuse);
+	if (err < 0)
+		return ERR_PTR(err);
+
+	return &padctl->base;
+}
+
+static void tegra124_xusb_padctl_remove(struct tegra_xusb_padctl *padctl)
+{
+}
+
+static const struct tegra_xusb_padctl_ops tegra124_xusb_padctl_ops = {
+	.probe = tegra124_xusb_padctl_probe,
+	.remove = tegra124_xusb_padctl_remove,
+	.usb3_save_context = tegra124_usb3_save_context,
+	.hsic_set_idle = tegra124_hsic_set_idle,
+};
+
+const struct tegra_xusb_padctl_soc tegra124_xusb_padctl_soc = {
+	.num_pads = ARRAY_SIZE(tegra124_pads),
+	.pads = tegra124_pads,
+	.ports = {
+		.usb2 = {
+			.ops = &tegra124_usb2_port_ops,
+			.count = 3,
+		},
+		.ulpi = {
+			.ops = &tegra124_ulpi_port_ops,
+			.count = 1,
+		},
+		.hsic = {
+			.ops = &tegra124_hsic_port_ops,
+			.count = 2,
+		},
+		.usb3 = {
+			.ops = &tegra124_usb3_port_ops,
+			.count = 2,
+		},
+	},
+	.ops = &tegra124_xusb_padctl_ops,
+};
+EXPORT_SYMBOL_GPL(tegra124_xusb_padctl_soc);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra 124 XUSB Pad Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/tegra/xusb-tegra210.c b/drivers/phy/tegra/xusb-tegra210.c
new file mode 100644
index 0000000..9d0689e
--- /dev/null
+++ b/drivers/phy/tegra/xusb-tegra210.c
@@ -0,0 +1,2045 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "xusb.h"
+
+#define FUSE_SKU_CALIB_HS_CURR_LEVEL_PADX_SHIFT(x) \
+					((x) ? (11 + ((x) - 1) * 6) : 0)
+#define FUSE_SKU_CALIB_HS_CURR_LEVEL_PAD_MASK 0x3f
+#define FUSE_SKU_CALIB_HS_TERM_RANGE_ADJ_SHIFT 7
+#define FUSE_SKU_CALIB_HS_TERM_RANGE_ADJ_MASK 0xf
+
+#define FUSE_USB_CALIB_EXT_RPD_CTRL_SHIFT 0
+#define FUSE_USB_CALIB_EXT_RPD_CTRL_MASK 0x1f
+
+#define XUSB_PADCTL_USB2_PAD_MUX 0x004
+#define XUSB_PADCTL_USB2_PAD_MUX_HSIC_PAD_TRK_SHIFT 16
+#define XUSB_PADCTL_USB2_PAD_MUX_HSIC_PAD_TRK_MASK 0x3
+#define XUSB_PADCTL_USB2_PAD_MUX_HSIC_PAD_TRK_XUSB 0x1
+#define XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_SHIFT 18
+#define XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_MASK 0x3
+#define XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_XUSB 0x1
+
+#define XUSB_PADCTL_USB2_PORT_CAP 0x008
+#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_HOST(x) (0x1 << ((x) * 4))
+#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_MASK(x) (0x3 << ((x) * 4))
+
+#define XUSB_PADCTL_SS_PORT_MAP 0x014
+#define XUSB_PADCTL_SS_PORT_MAP_PORTX_INTERNAL(x) (1 << (((x) * 5) + 4))
+#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_SHIFT(x) ((x) * 5)
+#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(x) (0x7 << ((x) * 5))
+#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(x, v) (((v) & 0x7) << ((x) * 5))
+
+#define XUSB_PADCTL_ELPG_PROGRAM1 0x024
+#define XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_VCORE_DOWN (1 << 31)
+#define XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN_EARLY (1 << 30)
+#define XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN (1 << 29)
+#define XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_VCORE_DOWN(x) (1 << (2 + (x) * 3))
+#define XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(x) \
+							(1 << (1 + (x) * 3))
+#define XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(x) (1 << ((x) * 3))
+
+#define XUSB_PADCTL_USB3_PAD_MUX 0x028
+#define XUSB_PADCTL_USB3_PAD_MUX_PCIE_IDDQ_DISABLE(x) (1 << (1 + (x)))
+#define XUSB_PADCTL_USB3_PAD_MUX_SATA_IDDQ_DISABLE(x) (1 << (8 + (x)))
+
+#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL1(x) (0x084 + (x) * 0x40)
+#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_SHIFT 7
+#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_MASK 0x3
+#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_FIX18 (1 << 6)
+
+#define XUSB_PADCTL_USB2_OTG_PADX_CTL0(x) (0x088 + (x) * 0x40)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD_ZI (1 << 29)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD2 (1 << 27)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD (1 << 26)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_SHIFT 0
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_MASK 0x3f
+
+#define XUSB_PADCTL_USB2_OTG_PADX_CTL1(x) (0x08c + (x) * 0x40)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_RPD_CTRL_SHIFT 26
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_RPD_CTRL_MASK 0x1f
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_SHIFT 3
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_MASK 0xf
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_DR (1 << 2)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_DISC_OVRD (1 << 1)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_CHRP_OVRD (1 << 0)
+
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0 0x284
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_PD (1 << 11)
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_SHIFT 3
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_MASK 0x7
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_VAL 0x7
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_SHIFT 0
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_MASK 0x7
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_VAL 0x2
+
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1 0x288
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1_PD_TRK (1 << 26)
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_DONE_RESET_TIMER_SHIFT 19
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_DONE_RESET_TIMER_MASK 0x7f
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_DONE_RESET_TIMER_VAL 0x0a
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_START_TIMER_SHIFT 12
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_START_TIMER_MASK 0x7f
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_START_TIMER_VAL 0x1e
+
+#define XUSB_PADCTL_HSIC_PADX_CTL0(x) (0x300 + (x) * 0x20)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_RPU_STROBE (1 << 18)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_RPU_DATA1 (1 << 17)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_RPU_DATA0 (1 << 16)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_RPD_STROBE (1 << 15)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_RPD_DATA1 (1 << 14)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_RPD_DATA0 (1 << 13)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_STROBE (1 << 9)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_DATA1 (1 << 8)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_DATA0 (1 << 7)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_STROBE (1 << 6)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_DATA1 (1 << 5)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_DATA0 (1 << 4)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_STROBE (1 << 3)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_DATA1 (1 << 2)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_DATA0 (1 << 1)
+
+#define XUSB_PADCTL_HSIC_PADX_CTL1(x) (0x304 + (x) * 0x20)
+#define XUSB_PADCTL_HSIC_PAD_CTL1_TX_RTUNEP_SHIFT 0
+#define XUSB_PADCTL_HSIC_PAD_CTL1_TX_RTUNEP_MASK 0xf
+
+#define XUSB_PADCTL_HSIC_PADX_CTL2(x) (0x308 + (x) * 0x20)
+#define XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_SHIFT 8
+#define XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_MASK 0xf
+#define XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_SHIFT 0
+#define XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_MASK 0xff
+
+#define XUSB_PADCTL_HSIC_PAD_TRK_CTL 0x340
+#define XUSB_PADCTL_HSIC_PAD_TRK_CTL_PD_TRK (1 << 19)
+#define XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_DONE_RESET_TIMER_SHIFT 12
+#define XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_DONE_RESET_TIMER_MASK 0x7f
+#define XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_DONE_RESET_TIMER_VAL 0x0a
+#define XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_START_TIMER_SHIFT 5
+#define XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_START_TIMER_MASK 0x7f
+#define XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_START_TIMER_VAL 0x1e
+
+#define XUSB_PADCTL_HSIC_STRB_TRIM_CONTROL 0x344
+
+#define XUSB_PADCTL_UPHY_PLL_P0_CTL1 0x360
+#define XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_SHIFT 20
+#define XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_MASK 0xff
+#define XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_USB_VAL 0x19
+#define XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_SATA_VAL 0x1e
+#define XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_MDIV_SHIFT 16
+#define XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_MDIV_MASK 0x3
+#define XUSB_PADCTL_UPHY_PLL_CTL1_LOCKDET_STATUS (1 << 15)
+#define XUSB_PADCTL_UPHY_PLL_CTL1_PWR_OVRD (1 << 4)
+#define XUSB_PADCTL_UPHY_PLL_CTL1_ENABLE (1 << 3)
+#define XUSB_PADCTL_UPHY_PLL_CTL1_SLEEP_SHIFT 1
+#define XUSB_PADCTL_UPHY_PLL_CTL1_SLEEP_MASK 0x3
+#define XUSB_PADCTL_UPHY_PLL_CTL1_IDDQ (1 << 0)
+
+#define XUSB_PADCTL_UPHY_PLL_P0_CTL2 0x364
+#define XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_SHIFT 4
+#define XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_MASK 0xffffff
+#define XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_VAL 0x136
+#define XUSB_PADCTL_UPHY_PLL_CTL2_CAL_OVRD (1 << 2)
+#define XUSB_PADCTL_UPHY_PLL_CTL2_CAL_DONE (1 << 1)
+#define XUSB_PADCTL_UPHY_PLL_CTL2_CAL_EN (1 << 0)
+
+#define XUSB_PADCTL_UPHY_PLL_P0_CTL4 0x36c
+#define XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_EN (1 << 15)
+#define XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_SHIFT 12
+#define XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_MASK 0x3
+#define XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_USB_VAL 0x2
+#define XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_SATA_VAL 0x0
+#define XUSB_PADCTL_UPHY_PLL_CTL4_REFCLKBUF_EN (1 << 8)
+#define XUSB_PADCTL_UPHY_PLL_CTL4_REFCLK_SEL_SHIFT 4
+#define XUSB_PADCTL_UPHY_PLL_CTL4_REFCLK_SEL_MASK 0xf
+
+#define XUSB_PADCTL_UPHY_PLL_P0_CTL5 0x370
+#define XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_SHIFT 16
+#define XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_MASK 0xff
+#define XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_VAL 0x2a
+
+#define XUSB_PADCTL_UPHY_PLL_P0_CTL8 0x37c
+#define XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_DONE (1 << 31)
+#define XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_OVRD (1 << 15)
+#define XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_CLK_EN (1 << 13)
+#define XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_EN (1 << 12)
+
+#define XUSB_PADCTL_UPHY_MISC_PAD_PX_CTL1(x) (0x460 + (x) * 0x40)
+#define XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_IDLE_MODE_SHIFT 20
+#define XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_IDLE_MODE_MASK 0x3
+#define XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_IDLE_MODE_VAL 0x1
+#define XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_TERM_EN BIT(18)
+#define XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_MODE_OVRD BIT(13)
+
+#define XUSB_PADCTL_UPHY_PLL_S0_CTL1 0x860
+
+#define XUSB_PADCTL_UPHY_PLL_S0_CTL2 0x864
+
+#define XUSB_PADCTL_UPHY_PLL_S0_CTL4 0x86c
+
+#define XUSB_PADCTL_UPHY_PLL_S0_CTL5 0x870
+
+#define XUSB_PADCTL_UPHY_PLL_S0_CTL8 0x87c
+
+#define XUSB_PADCTL_UPHY_MISC_PAD_S0_CTL1 0x960
+
+#define XUSB_PADCTL_UPHY_USB3_PADX_ECTL1(x) (0xa60 + (x) * 0x40)
+#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_SHIFT 16
+#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_MASK 0x3
+#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_VAL 0x2
+
+#define XUSB_PADCTL_UPHY_USB3_PADX_ECTL2(x) (0xa64 + (x) * 0x40)
+#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_SHIFT 0
+#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_MASK 0xffff
+#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_VAL 0x00fc
+
+#define XUSB_PADCTL_UPHY_USB3_PADX_ECTL3(x) (0xa68 + (x) * 0x40)
+#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL3_RX_DFE_VAL 0xc0077f1f
+
+#define XUSB_PADCTL_UPHY_USB3_PADX_ECTL4(x) (0xa6c + (x) * 0x40)
+#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_SHIFT 16
+#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_MASK 0xffff
+#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_VAL 0x01c7
+
+#define XUSB_PADCTL_UPHY_USB3_PADX_ECTL6(x) (0xa74 + (x) * 0x40)
+#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL6_RX_EQ_CTRL_H_VAL 0xfcf01368
+
+struct tegra210_xusb_fuse_calibration {
+	u32 hs_curr_level[4];
+	u32 hs_term_range_adj;
+	u32 rpd_ctrl;
+};
+
+struct tegra210_xusb_padctl {
+	struct tegra_xusb_padctl base;
+
+	struct tegra210_xusb_fuse_calibration fuse;
+};
+
+static inline struct tegra210_xusb_padctl *
+to_tegra210_xusb_padctl(struct tegra_xusb_padctl *padctl)
+{
+	return container_of(padctl, struct tegra210_xusb_padctl, base);
+}
+
+/* must be called under padctl->lock */
+static int tegra210_pex_uphy_enable(struct tegra_xusb_padctl *padctl)
+{
+	struct tegra_xusb_pcie_pad *pcie = to_pcie_pad(padctl->pcie);
+	unsigned long timeout;
+	u32 value;
+	int err;
+
+	if (pcie->enable > 0) {
+		pcie->enable++;
+		return 0;
+	}
+
+	err = clk_prepare_enable(pcie->pll);
+	if (err < 0)
+		return err;
+
+	err = reset_control_deassert(pcie->rst);
+	if (err < 0)
+		goto disable;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
+	value &= ~(XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_MASK <<
+		   XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_SHIFT);
+	value |= XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_VAL <<
+		 XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_SHIFT;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL5);
+	value &= ~(XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_MASK <<
+		   XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_SHIFT);
+	value |= XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_VAL <<
+		 XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_SHIFT;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL5);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
+	value |= XUSB_PADCTL_UPHY_PLL_CTL1_PWR_OVRD;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
+	value |= XUSB_PADCTL_UPHY_PLL_CTL2_CAL_OVRD;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
+	value |= XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_OVRD;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL4);
+	value &= ~((XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_MASK <<
+		    XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_SHIFT) |
+		   (XUSB_PADCTL_UPHY_PLL_CTL4_REFCLK_SEL_MASK <<
+		    XUSB_PADCTL_UPHY_PLL_CTL4_REFCLK_SEL_SHIFT));
+	value |= (XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_USB_VAL <<
+		  XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_SHIFT) |
+		 XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_EN;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL4);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
+	value &= ~((XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_MDIV_MASK <<
+		    XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_MDIV_SHIFT) |
+		   (XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_MASK <<
+		    XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_SHIFT));
+	value |= XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_USB_VAL <<
+		 XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_SHIFT;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
+	value &= ~XUSB_PADCTL_UPHY_PLL_CTL1_IDDQ;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
+	value &= ~(XUSB_PADCTL_UPHY_PLL_CTL1_SLEEP_MASK <<
+		   XUSB_PADCTL_UPHY_PLL_CTL1_SLEEP_SHIFT);
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
+
+	usleep_range(10, 20);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL4);
+	value |= XUSB_PADCTL_UPHY_PLL_CTL4_REFCLKBUF_EN;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL4);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
+	value |= XUSB_PADCTL_UPHY_PLL_CTL2_CAL_EN;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
+
+	timeout = jiffies + msecs_to_jiffies(100);
+
+	while (time_before(jiffies, timeout)) {
+		value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
+		if (value & XUSB_PADCTL_UPHY_PLL_CTL2_CAL_DONE)
+			break;
+
+		usleep_range(10, 20);
+	}
+
+	if (time_after_eq(jiffies, timeout)) {
+		err = -ETIMEDOUT;
+		goto reset;
+	}
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
+	value &= ~XUSB_PADCTL_UPHY_PLL_CTL2_CAL_EN;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
+
+	timeout = jiffies + msecs_to_jiffies(100);
+
+	while (time_before(jiffies, timeout)) {
+		value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
+		if (!(value & XUSB_PADCTL_UPHY_PLL_CTL2_CAL_DONE))
+			break;
+
+		usleep_range(10, 20);
+	}
+
+	if (time_after_eq(jiffies, timeout)) {
+		err = -ETIMEDOUT;
+		goto reset;
+	}
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
+	value |= XUSB_PADCTL_UPHY_PLL_CTL1_ENABLE;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
+
+	timeout = jiffies + msecs_to_jiffies(100);
+
+	while (time_before(jiffies, timeout)) {
+		value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
+		if (value & XUSB_PADCTL_UPHY_PLL_CTL1_LOCKDET_STATUS)
+			break;
+
+		usleep_range(10, 20);
+	}
+
+	if (time_after_eq(jiffies, timeout)) {
+		err = -ETIMEDOUT;
+		goto reset;
+	}
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
+	value |= XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_EN |
+		 XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_CLK_EN;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
+
+	timeout = jiffies + msecs_to_jiffies(100);
+
+	while (time_before(jiffies, timeout)) {
+		value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
+		if (value & XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_DONE)
+			break;
+
+		usleep_range(10, 20);
+	}
+
+	if (time_after_eq(jiffies, timeout)) {
+		err = -ETIMEDOUT;
+		goto reset;
+	}
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
+	value &= ~XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_EN;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
+
+	timeout = jiffies + msecs_to_jiffies(100);
+
+	while (time_before(jiffies, timeout)) {
+		value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
+		if (!(value & XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_DONE))
+			break;
+
+		usleep_range(10, 20);
+	}
+
+	if (time_after_eq(jiffies, timeout)) {
+		err = -ETIMEDOUT;
+		goto reset;
+	}
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
+	value &= ~XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_CLK_EN;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
+
+	tegra210_xusb_pll_hw_control_enable();
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
+	value &= ~XUSB_PADCTL_UPHY_PLL_CTL1_PWR_OVRD;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
+	value &= ~XUSB_PADCTL_UPHY_PLL_CTL2_CAL_OVRD;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
+	value &= ~XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_OVRD;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
+
+	usleep_range(10, 20);
+
+	tegra210_xusb_pll_hw_sequence_start();
+
+	pcie->enable++;
+
+	return 0;
+
+reset:
+	reset_control_assert(pcie->rst);
+disable:
+	clk_disable_unprepare(pcie->pll);
+	return err;
+}
+
+static void tegra210_pex_uphy_disable(struct tegra_xusb_padctl *padctl)
+{
+	struct tegra_xusb_pcie_pad *pcie = to_pcie_pad(padctl->pcie);
+
+	mutex_lock(&padctl->lock);
+
+	if (WARN_ON(pcie->enable == 0))
+		goto unlock;
+
+	if (--pcie->enable > 0)
+		goto unlock;
+
+	reset_control_assert(pcie->rst);
+	clk_disable_unprepare(pcie->pll);
+
+unlock:
+	mutex_unlock(&padctl->lock);
+}
+
+/* must be called under padctl->lock */
+static int tegra210_sata_uphy_enable(struct tegra_xusb_padctl *padctl, bool usb)
+{
+	struct tegra_xusb_sata_pad *sata = to_sata_pad(padctl->sata);
+	unsigned long timeout;
+	u32 value;
+	int err;
+
+	if (sata->enable > 0) {
+		sata->enable++;
+		return 0;
+	}
+
+	err = clk_prepare_enable(sata->pll);
+	if (err < 0)
+		return err;
+
+	err = reset_control_deassert(sata->rst);
+	if (err < 0)
+		goto disable;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
+	value &= ~(XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_MASK <<
+		   XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_SHIFT);
+	value |= XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_VAL <<
+		 XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_SHIFT;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL5);
+	value &= ~(XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_MASK <<
+		   XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_SHIFT);
+	value |= XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_VAL <<
+		 XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_SHIFT;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL5);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
+	value |= XUSB_PADCTL_UPHY_PLL_CTL1_PWR_OVRD;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
+	value |= XUSB_PADCTL_UPHY_PLL_CTL2_CAL_OVRD;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
+	value |= XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_OVRD;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL4);
+	value &= ~((XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_MASK <<
+		    XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_SHIFT) |
+		   (XUSB_PADCTL_UPHY_PLL_CTL4_REFCLK_SEL_MASK <<
+		    XUSB_PADCTL_UPHY_PLL_CTL4_REFCLK_SEL_SHIFT));
+	value |= XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_EN;
+
+	if (usb)
+		value |= (XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_USB_VAL <<
+			  XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_SHIFT);
+	else
+		value |= (XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_SATA_VAL <<
+			  XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_SHIFT);
+
+	/* XXX PLL0_XDIGCLK_EN */
+	/*
+	value &= ~(1 << 19);
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL4);
+	*/
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
+	value &= ~((XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_MDIV_MASK <<
+		    XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_MDIV_SHIFT) |
+		   (XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_MASK <<
+		    XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_SHIFT));
+
+	if (usb)
+		value |= XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_USB_VAL <<
+			 XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_SHIFT;
+	else
+		value |= XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_SATA_VAL <<
+			 XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_SHIFT;
+
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
+	value &= ~XUSB_PADCTL_UPHY_PLL_CTL1_IDDQ;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
+	value &= ~(XUSB_PADCTL_UPHY_PLL_CTL1_SLEEP_MASK <<
+		   XUSB_PADCTL_UPHY_PLL_CTL1_SLEEP_SHIFT);
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
+
+	usleep_range(10, 20);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL4);
+	value |= XUSB_PADCTL_UPHY_PLL_CTL4_REFCLKBUF_EN;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL4);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
+	value |= XUSB_PADCTL_UPHY_PLL_CTL2_CAL_EN;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
+
+	timeout = jiffies + msecs_to_jiffies(100);
+
+	while (time_before(jiffies, timeout)) {
+		value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
+		if (value & XUSB_PADCTL_UPHY_PLL_CTL2_CAL_DONE)
+			break;
+
+		usleep_range(10, 20);
+	}
+
+	if (time_after_eq(jiffies, timeout)) {
+		err = -ETIMEDOUT;
+		goto reset;
+	}
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
+	value &= ~XUSB_PADCTL_UPHY_PLL_CTL2_CAL_EN;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
+
+	timeout = jiffies + msecs_to_jiffies(100);
+
+	while (time_before(jiffies, timeout)) {
+		value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
+		if (!(value & XUSB_PADCTL_UPHY_PLL_CTL2_CAL_DONE))
+			break;
+
+		usleep_range(10, 20);
+	}
+
+	if (time_after_eq(jiffies, timeout)) {
+		err = -ETIMEDOUT;
+		goto reset;
+	}
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
+	value |= XUSB_PADCTL_UPHY_PLL_CTL1_ENABLE;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
+
+	timeout = jiffies + msecs_to_jiffies(100);
+
+	while (time_before(jiffies, timeout)) {
+		value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
+		if (value & XUSB_PADCTL_UPHY_PLL_CTL1_LOCKDET_STATUS)
+			break;
+
+		usleep_range(10, 20);
+	}
+
+	if (time_after_eq(jiffies, timeout)) {
+		err = -ETIMEDOUT;
+		goto reset;
+	}
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
+	value |= XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_EN |
+		 XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_CLK_EN;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
+
+	timeout = jiffies + msecs_to_jiffies(100);
+
+	while (time_before(jiffies, timeout)) {
+		value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
+		if (value & XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_DONE)
+			break;
+
+		usleep_range(10, 20);
+	}
+
+	if (time_after_eq(jiffies, timeout)) {
+		err = -ETIMEDOUT;
+		goto reset;
+	}
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
+	value &= ~XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_EN;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
+
+	timeout = jiffies + msecs_to_jiffies(100);
+
+	while (time_before(jiffies, timeout)) {
+		value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
+		if (!(value & XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_DONE))
+			break;
+
+		usleep_range(10, 20);
+	}
+
+	if (time_after_eq(jiffies, timeout)) {
+		err = -ETIMEDOUT;
+		goto reset;
+	}
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
+	value &= ~XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_CLK_EN;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
+
+	tegra210_sata_pll_hw_control_enable();
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
+	value &= ~XUSB_PADCTL_UPHY_PLL_CTL1_PWR_OVRD;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
+	value &= ~XUSB_PADCTL_UPHY_PLL_CTL2_CAL_OVRD;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
+	value &= ~XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_OVRD;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
+
+	usleep_range(10, 20);
+
+	tegra210_sata_pll_hw_sequence_start();
+
+	sata->enable++;
+
+	return 0;
+
+reset:
+	reset_control_assert(sata->rst);
+disable:
+	clk_disable_unprepare(sata->pll);
+	return err;
+}
+
+static void tegra210_sata_uphy_disable(struct tegra_xusb_padctl *padctl)
+{
+	struct tegra_xusb_sata_pad *sata = to_sata_pad(padctl->sata);
+
+	mutex_lock(&padctl->lock);
+
+	if (WARN_ON(sata->enable == 0))
+		goto unlock;
+
+	if (--sata->enable > 0)
+		goto unlock;
+
+	reset_control_assert(sata->rst);
+	clk_disable_unprepare(sata->pll);
+
+unlock:
+	mutex_unlock(&padctl->lock);
+}
+
+static int tegra210_xusb_padctl_enable(struct tegra_xusb_padctl *padctl)
+{
+	u32 value;
+
+	mutex_lock(&padctl->lock);
+
+	if (padctl->enable++ > 0)
+		goto out;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+	value &= ~XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN;
+	padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+	usleep_range(100, 200);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+	value &= ~XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN_EARLY;
+	padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+	usleep_range(100, 200);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+	value &= ~XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_VCORE_DOWN;
+	padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+out:
+	mutex_unlock(&padctl->lock);
+	return 0;
+}
+
+static int tegra210_xusb_padctl_disable(struct tegra_xusb_padctl *padctl)
+{
+	u32 value;
+
+	mutex_lock(&padctl->lock);
+
+	if (WARN_ON(padctl->enable == 0))
+		goto out;
+
+	if (--padctl->enable > 0)
+		goto out;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+	value |= XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_VCORE_DOWN;
+	padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+	usleep_range(100, 200);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+	value |= XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN_EARLY;
+	padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+	usleep_range(100, 200);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+	value |= XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN;
+	padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+out:
+	mutex_unlock(&padctl->lock);
+	return 0;
+}
+
+static int tegra210_hsic_set_idle(struct tegra_xusb_padctl *padctl,
+				  unsigned int index, bool idle)
+{
+	u32 value;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL0(index));
+
+	value &= ~(XUSB_PADCTL_HSIC_PAD_CTL0_RPU_DATA0 |
+		   XUSB_PADCTL_HSIC_PAD_CTL0_RPU_DATA1 |
+		   XUSB_PADCTL_HSIC_PAD_CTL0_RPD_STROBE);
+
+	if (idle)
+		value |= XUSB_PADCTL_HSIC_PAD_CTL0_RPD_DATA0 |
+			 XUSB_PADCTL_HSIC_PAD_CTL0_RPD_DATA1 |
+			 XUSB_PADCTL_HSIC_PAD_CTL0_RPU_STROBE;
+	else
+		value &= ~(XUSB_PADCTL_HSIC_PAD_CTL0_RPD_DATA0 |
+			   XUSB_PADCTL_HSIC_PAD_CTL0_RPD_DATA1 |
+			   XUSB_PADCTL_HSIC_PAD_CTL0_RPU_STROBE);
+
+	padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL0(index));
+
+	return 0;
+}
+
+static int tegra210_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl,
+					 unsigned int index, bool enable)
+{
+	struct tegra_xusb_port *port;
+	struct tegra_xusb_lane *lane;
+	u32 value, offset;
+
+	port = tegra_xusb_find_port(padctl, "usb3", index);
+	if (!port)
+		return -ENODEV;
+
+	lane = port->lane;
+
+	if (lane->pad == padctl->pcie)
+		offset = XUSB_PADCTL_UPHY_MISC_PAD_PX_CTL1(lane->index);
+	else
+		offset = XUSB_PADCTL_UPHY_MISC_PAD_S0_CTL1;
+
+	value = padctl_readl(padctl, offset);
+
+	value &= ~((XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_IDLE_MODE_MASK <<
+		    XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_IDLE_MODE_SHIFT) |
+		   XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_TERM_EN |
+		   XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_MODE_OVRD);
+
+	if (!enable) {
+		value |= (XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_IDLE_MODE_VAL <<
+			  XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_IDLE_MODE_SHIFT) |
+			 XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_TERM_EN |
+			 XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_MODE_OVRD;
+	}
+
+	padctl_writel(padctl, value, offset);
+
+	return 0;
+}
+
+#define TEGRA210_LANE(_name, _offset, _shift, _mask, _type)		\
+	{								\
+		.name = _name,						\
+		.offset = _offset,					\
+		.shift = _shift,					\
+		.mask = _mask,						\
+		.num_funcs = ARRAY_SIZE(tegra210_##_type##_functions),	\
+		.funcs = tegra210_##_type##_functions,			\
+	}
+
+static const char *tegra210_usb2_functions[] = {
+	"snps",
+	"xusb",
+	"uart"
+};
+
+static const struct tegra_xusb_lane_soc tegra210_usb2_lanes[] = {
+	TEGRA210_LANE("usb2-0", 0x004,  0, 0x3, usb2),
+	TEGRA210_LANE("usb2-1", 0x004,  2, 0x3, usb2),
+	TEGRA210_LANE("usb2-2", 0x004,  4, 0x3, usb2),
+	TEGRA210_LANE("usb2-3", 0x004,  6, 0x3, usb2),
+};
+
+static struct tegra_xusb_lane *
+tegra210_usb2_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
+			 unsigned int index)
+{
+	struct tegra_xusb_usb2_lane *usb2;
+	int err;
+
+	usb2 = kzalloc(sizeof(*usb2), GFP_KERNEL);
+	if (!usb2)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&usb2->base.list);
+	usb2->base.soc = &pad->soc->lanes[index];
+	usb2->base.index = index;
+	usb2->base.pad = pad;
+	usb2->base.np = np;
+
+	err = tegra_xusb_lane_parse_dt(&usb2->base, np);
+	if (err < 0) {
+		kfree(usb2);
+		return ERR_PTR(err);
+	}
+
+	return &usb2->base;
+}
+
+static void tegra210_usb2_lane_remove(struct tegra_xusb_lane *lane)
+{
+	struct tegra_xusb_usb2_lane *usb2 = to_usb2_lane(lane);
+
+	kfree(usb2);
+}
+
+static const struct tegra_xusb_lane_ops tegra210_usb2_lane_ops = {
+	.probe = tegra210_usb2_lane_probe,
+	.remove = tegra210_usb2_lane_remove,
+};
+
+static int tegra210_usb2_phy_init(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+	struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+	u32 value;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_USB2_PAD_MUX);
+	value &= ~(XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_MASK <<
+		   XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_SHIFT);
+	value |= XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_XUSB <<
+		 XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_SHIFT;
+	padctl_writel(padctl, value, XUSB_PADCTL_USB2_PAD_MUX);
+
+	return tegra210_xusb_padctl_enable(padctl);
+}
+
+static int tegra210_usb2_phy_exit(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+	return tegra210_xusb_padctl_disable(lane->pad->padctl);
+}
+
+static int tegra210_usb2_phy_power_on(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+	struct tegra_xusb_usb2_lane *usb2 = to_usb2_lane(lane);
+	struct tegra_xusb_usb2_pad *pad = to_usb2_pad(lane->pad);
+	struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+	struct tegra210_xusb_padctl *priv;
+	struct tegra_xusb_usb2_port *port;
+	unsigned int index = lane->index;
+	u32 value;
+	int err;
+
+	port = tegra_xusb_find_usb2_port(padctl, index);
+	if (!port) {
+		dev_err(&phy->dev, "no port found for USB2 lane %u\n", index);
+		return -ENODEV;
+	}
+
+	priv = to_tegra210_xusb_padctl(padctl);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
+	value &= ~((XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_MASK <<
+		    XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_SHIFT) |
+		   (XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_MASK <<
+		    XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_SHIFT));
+	value |= (XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_VAL <<
+		  XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_SHIFT);
+
+	if (tegra_sku_info.revision < TEGRA_REVISION_A02)
+		value |=
+			(XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_VAL <<
+			XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_SHIFT);
+
+	padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_USB2_PORT_CAP);
+	value &= ~XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_MASK(index);
+	value |= XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_HOST(index);
+	padctl_writel(padctl, value, XUSB_PADCTL_USB2_PORT_CAP);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
+	value &= ~((XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_MASK <<
+		    XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_SHIFT) |
+		   XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD |
+		   XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD2 |
+		   XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD_ZI);
+	value |= (priv->fuse.hs_curr_level[index] +
+		  usb2->hs_curr_level_offset) <<
+		 XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_SHIFT;
+	padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
+
+	value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
+	value &= ~((XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_MASK <<
+		    XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_SHIFT) |
+		   (XUSB_PADCTL_USB2_OTG_PAD_CTL1_RPD_CTRL_MASK <<
+		    XUSB_PADCTL_USB2_OTG_PAD_CTL1_RPD_CTRL_SHIFT) |
+		   XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_DR |
+		   XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_CHRP_OVRD |
+		   XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_DISC_OVRD);
+	value |= (priv->fuse.hs_term_range_adj <<
+		  XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_SHIFT) |
+		 (priv->fuse.rpd_ctrl <<
+		  XUSB_PADCTL_USB2_OTG_PAD_CTL1_RPD_CTRL_SHIFT);
+	padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
+
+	value = padctl_readl(padctl,
+			     XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL1(index));
+	value &= ~(XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_MASK <<
+		   XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_SHIFT);
+	value |= XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_FIX18;
+	padctl_writel(padctl, value,
+		      XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL1(index));
+
+	err = regulator_enable(port->supply);
+	if (err)
+		return err;
+
+	mutex_lock(&padctl->lock);
+
+	if (pad->enable > 0) {
+		pad->enable++;
+		mutex_unlock(&padctl->lock);
+		return 0;
+	}
+
+	err = clk_prepare_enable(pad->clk);
+	if (err)
+		goto disable_regulator;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
+	value &= ~((XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_START_TIMER_MASK <<
+		    XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_START_TIMER_SHIFT) |
+		   (XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_DONE_RESET_TIMER_MASK <<
+		    XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_DONE_RESET_TIMER_SHIFT));
+	value |= (XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_START_TIMER_VAL <<
+		  XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_START_TIMER_SHIFT) |
+		 (XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_DONE_RESET_TIMER_VAL <<
+		  XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_DONE_RESET_TIMER_SHIFT);
+	padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
+	value &= ~XUSB_PADCTL_USB2_BIAS_PAD_CTL0_PD;
+	padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
+
+	udelay(1);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
+	value &= ~XUSB_PADCTL_USB2_BIAS_PAD_CTL1_PD_TRK;
+	padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
+
+	udelay(50);
+
+	clk_disable_unprepare(pad->clk);
+
+	pad->enable++;
+	mutex_unlock(&padctl->lock);
+
+	return 0;
+
+disable_regulator:
+	regulator_disable(port->supply);
+	mutex_unlock(&padctl->lock);
+	return err;
+}
+
+static int tegra210_usb2_phy_power_off(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+	struct tegra_xusb_usb2_pad *pad = to_usb2_pad(lane->pad);
+	struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+	struct tegra_xusb_usb2_port *port;
+	u32 value;
+
+	port = tegra_xusb_find_usb2_port(padctl, lane->index);
+	if (!port) {
+		dev_err(&phy->dev, "no port found for USB2 lane %u\n",
+			lane->index);
+		return -ENODEV;
+	}
+
+	mutex_lock(&padctl->lock);
+
+	if (WARN_ON(pad->enable == 0))
+		goto out;
+
+	if (--pad->enable > 0)
+		goto out;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
+	value |= XUSB_PADCTL_USB2_BIAS_PAD_CTL0_PD;
+	padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
+
+out:
+	regulator_disable(port->supply);
+	mutex_unlock(&padctl->lock);
+	return 0;
+}
+
+static const struct phy_ops tegra210_usb2_phy_ops = {
+	.init = tegra210_usb2_phy_init,
+	.exit = tegra210_usb2_phy_exit,
+	.power_on = tegra210_usb2_phy_power_on,
+	.power_off = tegra210_usb2_phy_power_off,
+	.owner = THIS_MODULE,
+};
+
+static struct tegra_xusb_pad *
+tegra210_usb2_pad_probe(struct tegra_xusb_padctl *padctl,
+			const struct tegra_xusb_pad_soc *soc,
+			struct device_node *np)
+{
+	struct tegra_xusb_usb2_pad *usb2;
+	struct tegra_xusb_pad *pad;
+	int err;
+
+	usb2 = kzalloc(sizeof(*usb2), GFP_KERNEL);
+	if (!usb2)
+		return ERR_PTR(-ENOMEM);
+
+	pad = &usb2->base;
+	pad->ops = &tegra210_usb2_lane_ops;
+	pad->soc = soc;
+
+	err = tegra_xusb_pad_init(pad, padctl, np);
+	if (err < 0) {
+		kfree(usb2);
+		goto out;
+	}
+
+	usb2->clk = devm_clk_get(&pad->dev, "trk");
+	if (IS_ERR(usb2->clk)) {
+		err = PTR_ERR(usb2->clk);
+		dev_err(&pad->dev, "failed to get trk clock: %d\n", err);
+		goto unregister;
+	}
+
+	err = tegra_xusb_pad_register(pad, &tegra210_usb2_phy_ops);
+	if (err < 0)
+		goto unregister;
+
+	dev_set_drvdata(&pad->dev, pad);
+
+	return pad;
+
+unregister:
+	device_unregister(&pad->dev);
+out:
+	return ERR_PTR(err);
+}
+
+static void tegra210_usb2_pad_remove(struct tegra_xusb_pad *pad)
+{
+	struct tegra_xusb_usb2_pad *usb2 = to_usb2_pad(pad);
+
+	kfree(usb2);
+}
+
+static const struct tegra_xusb_pad_ops tegra210_usb2_ops = {
+	.probe = tegra210_usb2_pad_probe,
+	.remove = tegra210_usb2_pad_remove,
+};
+
+static const struct tegra_xusb_pad_soc tegra210_usb2_pad = {
+	.name = "usb2",
+	.num_lanes = ARRAY_SIZE(tegra210_usb2_lanes),
+	.lanes = tegra210_usb2_lanes,
+	.ops = &tegra210_usb2_ops,
+};
+
+static const char *tegra210_hsic_functions[] = {
+	"snps",
+	"xusb",
+};
+
+static const struct tegra_xusb_lane_soc tegra210_hsic_lanes[] = {
+	TEGRA210_LANE("hsic-0", 0x004, 14, 0x1, hsic),
+};
+
+static struct tegra_xusb_lane *
+tegra210_hsic_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
+			 unsigned int index)
+{
+	struct tegra_xusb_hsic_lane *hsic;
+	int err;
+
+	hsic = kzalloc(sizeof(*hsic), GFP_KERNEL);
+	if (!hsic)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&hsic->base.list);
+	hsic->base.soc = &pad->soc->lanes[index];
+	hsic->base.index = index;
+	hsic->base.pad = pad;
+	hsic->base.np = np;
+
+	err = tegra_xusb_lane_parse_dt(&hsic->base, np);
+	if (err < 0) {
+		kfree(hsic);
+		return ERR_PTR(err);
+	}
+
+	return &hsic->base;
+}
+
+static void tegra210_hsic_lane_remove(struct tegra_xusb_lane *lane)
+{
+	struct tegra_xusb_hsic_lane *hsic = to_hsic_lane(lane);
+
+	kfree(hsic);
+}
+
+static const struct tegra_xusb_lane_ops tegra210_hsic_lane_ops = {
+	.probe = tegra210_hsic_lane_probe,
+	.remove = tegra210_hsic_lane_remove,
+};
+
+static int tegra210_hsic_phy_init(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+	struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+	u32 value;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_USB2_PAD_MUX);
+	value &= ~(XUSB_PADCTL_USB2_PAD_MUX_HSIC_PAD_TRK_MASK <<
+		   XUSB_PADCTL_USB2_PAD_MUX_HSIC_PAD_TRK_SHIFT);
+	value |= XUSB_PADCTL_USB2_PAD_MUX_HSIC_PAD_TRK_XUSB <<
+		 XUSB_PADCTL_USB2_PAD_MUX_HSIC_PAD_TRK_SHIFT;
+	padctl_writel(padctl, value, XUSB_PADCTL_USB2_PAD_MUX);
+
+	return tegra210_xusb_padctl_enable(padctl);
+}
+
+static int tegra210_hsic_phy_exit(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+	return tegra210_xusb_padctl_disable(lane->pad->padctl);
+}
+
+static int tegra210_hsic_phy_power_on(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+	struct tegra_xusb_hsic_lane *hsic = to_hsic_lane(lane);
+	struct tegra_xusb_hsic_pad *pad = to_hsic_pad(lane->pad);
+	struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+	struct tegra210_xusb_padctl *priv;
+	unsigned int index = lane->index;
+	u32 value;
+	int err;
+
+	priv = to_tegra210_xusb_padctl(padctl);
+
+	err = regulator_enable(pad->supply);
+	if (err)
+		return err;
+
+	padctl_writel(padctl, hsic->strobe_trim,
+		      XUSB_PADCTL_HSIC_STRB_TRIM_CONTROL);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL1(index));
+	value &= ~(XUSB_PADCTL_HSIC_PAD_CTL1_TX_RTUNEP_MASK <<
+		   XUSB_PADCTL_HSIC_PAD_CTL1_TX_RTUNEP_SHIFT);
+	value |= (hsic->tx_rtune_p <<
+		  XUSB_PADCTL_HSIC_PAD_CTL1_TX_RTUNEP_SHIFT);
+	padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL1(index));
+
+	value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL2(index));
+	value &= ~((XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_MASK <<
+		    XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_SHIFT) |
+		   (XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_MASK <<
+		    XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_SHIFT));
+	value |= (hsic->rx_strobe_trim <<
+		  XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_SHIFT) |
+		 (hsic->rx_data_trim <<
+		  XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_SHIFT);
+	padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL2(index));
+
+	value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL0(index));
+	value &= ~(XUSB_PADCTL_HSIC_PAD_CTL0_RPU_DATA0 |
+		   XUSB_PADCTL_HSIC_PAD_CTL0_RPU_DATA1 |
+		   XUSB_PADCTL_HSIC_PAD_CTL0_RPU_STROBE |
+		   XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_DATA0 |
+		   XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_DATA1 |
+		   XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_STROBE |
+		   XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_DATA0 |
+		   XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_DATA1 |
+		   XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_STROBE |
+		   XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_DATA0 |
+		   XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_DATA1 |
+		   XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_STROBE);
+	value |= XUSB_PADCTL_HSIC_PAD_CTL0_RPD_DATA0 |
+		 XUSB_PADCTL_HSIC_PAD_CTL0_RPD_DATA1 |
+		 XUSB_PADCTL_HSIC_PAD_CTL0_RPD_STROBE;
+	padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL0(index));
+
+	err = clk_prepare_enable(pad->clk);
+	if (err)
+		goto disable;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PAD_TRK_CTL);
+	value &= ~((XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_START_TIMER_MASK <<
+		    XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_START_TIMER_SHIFT) |
+		   (XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_DONE_RESET_TIMER_MASK <<
+		    XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_DONE_RESET_TIMER_SHIFT));
+	value |= (XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_START_TIMER_VAL <<
+		  XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_START_TIMER_SHIFT) |
+		 (XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_DONE_RESET_TIMER_VAL <<
+		  XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_DONE_RESET_TIMER_SHIFT);
+	padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PAD_TRK_CTL);
+
+	udelay(1);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PAD_TRK_CTL);
+	value &= ~XUSB_PADCTL_HSIC_PAD_TRK_CTL_PD_TRK;
+	padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PAD_TRK_CTL);
+
+	udelay(50);
+
+	clk_disable_unprepare(pad->clk);
+
+	return 0;
+
+disable:
+	regulator_disable(pad->supply);
+	return err;
+}
+
+static int tegra210_hsic_phy_power_off(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+	struct tegra_xusb_hsic_pad *pad = to_hsic_pad(lane->pad);
+	struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+	unsigned int index = lane->index;
+	u32 value;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL0(index));
+	value |= XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_DATA0 |
+		 XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_DATA1 |
+		 XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_STROBE |
+		 XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_DATA0 |
+		 XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_DATA1 |
+		 XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_STROBE |
+		 XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_DATA0 |
+		 XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_DATA1 |
+		 XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_STROBE;
+	padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL1(index));
+
+	regulator_disable(pad->supply);
+
+	return 0;
+}
+
+static const struct phy_ops tegra210_hsic_phy_ops = {
+	.init = tegra210_hsic_phy_init,
+	.exit = tegra210_hsic_phy_exit,
+	.power_on = tegra210_hsic_phy_power_on,
+	.power_off = tegra210_hsic_phy_power_off,
+	.owner = THIS_MODULE,
+};
+
+static struct tegra_xusb_pad *
+tegra210_hsic_pad_probe(struct tegra_xusb_padctl *padctl,
+			const struct tegra_xusb_pad_soc *soc,
+			struct device_node *np)
+{
+	struct tegra_xusb_hsic_pad *hsic;
+	struct tegra_xusb_pad *pad;
+	int err;
+
+	hsic = kzalloc(sizeof(*hsic), GFP_KERNEL);
+	if (!hsic)
+		return ERR_PTR(-ENOMEM);
+
+	pad = &hsic->base;
+	pad->ops = &tegra210_hsic_lane_ops;
+	pad->soc = soc;
+
+	err = tegra_xusb_pad_init(pad, padctl, np);
+	if (err < 0) {
+		kfree(hsic);
+		goto out;
+	}
+
+	hsic->clk = devm_clk_get(&pad->dev, "trk");
+	if (IS_ERR(hsic->clk)) {
+		err = PTR_ERR(hsic->clk);
+		dev_err(&pad->dev, "failed to get trk clock: %d\n", err);
+		goto unregister;
+	}
+
+	err = tegra_xusb_pad_register(pad, &tegra210_hsic_phy_ops);
+	if (err < 0)
+		goto unregister;
+
+	dev_set_drvdata(&pad->dev, pad);
+
+	return pad;
+
+unregister:
+	device_unregister(&pad->dev);
+out:
+	return ERR_PTR(err);
+}
+
+static void tegra210_hsic_pad_remove(struct tegra_xusb_pad *pad)
+{
+	struct tegra_xusb_hsic_pad *hsic = to_hsic_pad(pad);
+
+	kfree(hsic);
+}
+
+static const struct tegra_xusb_pad_ops tegra210_hsic_ops = {
+	.probe = tegra210_hsic_pad_probe,
+	.remove = tegra210_hsic_pad_remove,
+};
+
+static const struct tegra_xusb_pad_soc tegra210_hsic_pad = {
+	.name = "hsic",
+	.num_lanes = ARRAY_SIZE(tegra210_hsic_lanes),
+	.lanes = tegra210_hsic_lanes,
+	.ops = &tegra210_hsic_ops,
+};
+
+static const char *tegra210_pcie_functions[] = {
+	"pcie-x1",
+	"usb3-ss",
+	"sata",
+	"pcie-x4",
+};
+
+static const struct tegra_xusb_lane_soc tegra210_pcie_lanes[] = {
+	TEGRA210_LANE("pcie-0", 0x028, 12, 0x3, pcie),
+	TEGRA210_LANE("pcie-1", 0x028, 14, 0x3, pcie),
+	TEGRA210_LANE("pcie-2", 0x028, 16, 0x3, pcie),
+	TEGRA210_LANE("pcie-3", 0x028, 18, 0x3, pcie),
+	TEGRA210_LANE("pcie-4", 0x028, 20, 0x3, pcie),
+	TEGRA210_LANE("pcie-5", 0x028, 22, 0x3, pcie),
+	TEGRA210_LANE("pcie-6", 0x028, 24, 0x3, pcie),
+};
+
+static struct tegra_xusb_lane *
+tegra210_pcie_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
+			 unsigned int index)
+{
+	struct tegra_xusb_pcie_lane *pcie;
+	int err;
+
+	pcie = kzalloc(sizeof(*pcie), GFP_KERNEL);
+	if (!pcie)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&pcie->base.list);
+	pcie->base.soc = &pad->soc->lanes[index];
+	pcie->base.index = index;
+	pcie->base.pad = pad;
+	pcie->base.np = np;
+
+	err = tegra_xusb_lane_parse_dt(&pcie->base, np);
+	if (err < 0) {
+		kfree(pcie);
+		return ERR_PTR(err);
+	}
+
+	return &pcie->base;
+}
+
+static void tegra210_pcie_lane_remove(struct tegra_xusb_lane *lane)
+{
+	struct tegra_xusb_pcie_lane *pcie = to_pcie_lane(lane);
+
+	kfree(pcie);
+}
+
+static const struct tegra_xusb_lane_ops tegra210_pcie_lane_ops = {
+	.probe = tegra210_pcie_lane_probe,
+	.remove = tegra210_pcie_lane_remove,
+};
+
+static int tegra210_pcie_phy_init(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+	return tegra210_xusb_padctl_enable(lane->pad->padctl);
+}
+
+static int tegra210_pcie_phy_exit(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+	return tegra210_xusb_padctl_disable(lane->pad->padctl);
+}
+
+static int tegra210_pcie_phy_power_on(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+	struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+	u32 value;
+	int err;
+
+	mutex_lock(&padctl->lock);
+
+	err = tegra210_pex_uphy_enable(padctl);
+	if (err < 0)
+		goto unlock;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
+	value |= XUSB_PADCTL_USB3_PAD_MUX_PCIE_IDDQ_DISABLE(lane->index);
+	padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
+
+unlock:
+	mutex_unlock(&padctl->lock);
+	return err;
+}
+
+static int tegra210_pcie_phy_power_off(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+	struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+	u32 value;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
+	value &= ~XUSB_PADCTL_USB3_PAD_MUX_PCIE_IDDQ_DISABLE(lane->index);
+	padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
+
+	tegra210_pex_uphy_disable(padctl);
+
+	return 0;
+}
+
+static const struct phy_ops tegra210_pcie_phy_ops = {
+	.init = tegra210_pcie_phy_init,
+	.exit = tegra210_pcie_phy_exit,
+	.power_on = tegra210_pcie_phy_power_on,
+	.power_off = tegra210_pcie_phy_power_off,
+	.owner = THIS_MODULE,
+};
+
+static struct tegra_xusb_pad *
+tegra210_pcie_pad_probe(struct tegra_xusb_padctl *padctl,
+			const struct tegra_xusb_pad_soc *soc,
+			struct device_node *np)
+{
+	struct tegra_xusb_pcie_pad *pcie;
+	struct tegra_xusb_pad *pad;
+	int err;
+
+	pcie = kzalloc(sizeof(*pcie), GFP_KERNEL);
+	if (!pcie)
+		return ERR_PTR(-ENOMEM);
+
+	pad = &pcie->base;
+	pad->ops = &tegra210_pcie_lane_ops;
+	pad->soc = soc;
+
+	err = tegra_xusb_pad_init(pad, padctl, np);
+	if (err < 0) {
+		kfree(pcie);
+		goto out;
+	}
+
+	pcie->pll = devm_clk_get(&pad->dev, "pll");
+	if (IS_ERR(pcie->pll)) {
+		err = PTR_ERR(pcie->pll);
+		dev_err(&pad->dev, "failed to get PLL: %d\n", err);
+		goto unregister;
+	}
+
+	pcie->rst = devm_reset_control_get(&pad->dev, "phy");
+	if (IS_ERR(pcie->rst)) {
+		err = PTR_ERR(pcie->rst);
+		dev_err(&pad->dev, "failed to get PCIe pad reset: %d\n", err);
+		goto unregister;
+	}
+
+	err = tegra_xusb_pad_register(pad, &tegra210_pcie_phy_ops);
+	if (err < 0)
+		goto unregister;
+
+	dev_set_drvdata(&pad->dev, pad);
+
+	return pad;
+
+unregister:
+	device_unregister(&pad->dev);
+out:
+	return ERR_PTR(err);
+}
+
+static void tegra210_pcie_pad_remove(struct tegra_xusb_pad *pad)
+{
+	struct tegra_xusb_pcie_pad *pcie = to_pcie_pad(pad);
+
+	kfree(pcie);
+}
+
+static const struct tegra_xusb_pad_ops tegra210_pcie_ops = {
+	.probe = tegra210_pcie_pad_probe,
+	.remove = tegra210_pcie_pad_remove,
+};
+
+static const struct tegra_xusb_pad_soc tegra210_pcie_pad = {
+	.name = "pcie",
+	.num_lanes = ARRAY_SIZE(tegra210_pcie_lanes),
+	.lanes = tegra210_pcie_lanes,
+	.ops = &tegra210_pcie_ops,
+};
+
+static const struct tegra_xusb_lane_soc tegra210_sata_lanes[] = {
+	TEGRA210_LANE("sata-0", 0x028, 30, 0x3, pcie),
+};
+
+static struct tegra_xusb_lane *
+tegra210_sata_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
+			 unsigned int index)
+{
+	struct tegra_xusb_sata_lane *sata;
+	int err;
+
+	sata = kzalloc(sizeof(*sata), GFP_KERNEL);
+	if (!sata)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&sata->base.list);
+	sata->base.soc = &pad->soc->lanes[index];
+	sata->base.index = index;
+	sata->base.pad = pad;
+	sata->base.np = np;
+
+	err = tegra_xusb_lane_parse_dt(&sata->base, np);
+	if (err < 0) {
+		kfree(sata);
+		return ERR_PTR(err);
+	}
+
+	return &sata->base;
+}
+
+static void tegra210_sata_lane_remove(struct tegra_xusb_lane *lane)
+{
+	struct tegra_xusb_sata_lane *sata = to_sata_lane(lane);
+
+	kfree(sata);
+}
+
+static const struct tegra_xusb_lane_ops tegra210_sata_lane_ops = {
+	.probe = tegra210_sata_lane_probe,
+	.remove = tegra210_sata_lane_remove,
+};
+
+static int tegra210_sata_phy_init(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+	return tegra210_xusb_padctl_enable(lane->pad->padctl);
+}
+
+static int tegra210_sata_phy_exit(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+	return tegra210_xusb_padctl_disable(lane->pad->padctl);
+}
+
+static int tegra210_sata_phy_power_on(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+	struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+	u32 value;
+	int err;
+
+	mutex_lock(&padctl->lock);
+
+	err = tegra210_sata_uphy_enable(padctl, false);
+	if (err < 0)
+		goto unlock;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
+	value |= XUSB_PADCTL_USB3_PAD_MUX_SATA_IDDQ_DISABLE(lane->index);
+	padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
+
+unlock:
+	mutex_unlock(&padctl->lock);
+	return err;
+}
+
+static int tegra210_sata_phy_power_off(struct phy *phy)
+{
+	struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+	struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+	u32 value;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
+	value &= ~XUSB_PADCTL_USB3_PAD_MUX_SATA_IDDQ_DISABLE(lane->index);
+	padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
+
+	tegra210_sata_uphy_disable(lane->pad->padctl);
+
+	return 0;
+}
+
+static const struct phy_ops tegra210_sata_phy_ops = {
+	.init = tegra210_sata_phy_init,
+	.exit = tegra210_sata_phy_exit,
+	.power_on = tegra210_sata_phy_power_on,
+	.power_off = tegra210_sata_phy_power_off,
+	.owner = THIS_MODULE,
+};
+
+static struct tegra_xusb_pad *
+tegra210_sata_pad_probe(struct tegra_xusb_padctl *padctl,
+			const struct tegra_xusb_pad_soc *soc,
+			struct device_node *np)
+{
+	struct tegra_xusb_sata_pad *sata;
+	struct tegra_xusb_pad *pad;
+	int err;
+
+	sata = kzalloc(sizeof(*sata), GFP_KERNEL);
+	if (!sata)
+		return ERR_PTR(-ENOMEM);
+
+	pad = &sata->base;
+	pad->ops = &tegra210_sata_lane_ops;
+	pad->soc = soc;
+
+	err = tegra_xusb_pad_init(pad, padctl, np);
+	if (err < 0) {
+		kfree(sata);
+		goto out;
+	}
+
+	sata->rst = devm_reset_control_get(&pad->dev, "phy");
+	if (IS_ERR(sata->rst)) {
+		err = PTR_ERR(sata->rst);
+		dev_err(&pad->dev, "failed to get SATA pad reset: %d\n", err);
+		goto unregister;
+	}
+
+	err = tegra_xusb_pad_register(pad, &tegra210_sata_phy_ops);
+	if (err < 0)
+		goto unregister;
+
+	dev_set_drvdata(&pad->dev, pad);
+
+	return pad;
+
+unregister:
+	device_unregister(&pad->dev);
+out:
+	return ERR_PTR(err);
+}
+
+static void tegra210_sata_pad_remove(struct tegra_xusb_pad *pad)
+{
+	struct tegra_xusb_sata_pad *sata = to_sata_pad(pad);
+
+	kfree(sata);
+}
+
+static const struct tegra_xusb_pad_ops tegra210_sata_ops = {
+	.probe = tegra210_sata_pad_probe,
+	.remove = tegra210_sata_pad_remove,
+};
+
+static const struct tegra_xusb_pad_soc tegra210_sata_pad = {
+	.name = "sata",
+	.num_lanes = ARRAY_SIZE(tegra210_sata_lanes),
+	.lanes = tegra210_sata_lanes,
+	.ops = &tegra210_sata_ops,
+};
+
+static const struct tegra_xusb_pad_soc * const tegra210_pads[] = {
+	&tegra210_usb2_pad,
+	&tegra210_hsic_pad,
+	&tegra210_pcie_pad,
+	&tegra210_sata_pad,
+};
+
+static int tegra210_usb2_port_enable(struct tegra_xusb_port *port)
+{
+	return 0;
+}
+
+static void tegra210_usb2_port_disable(struct tegra_xusb_port *port)
+{
+}
+
+static struct tegra_xusb_lane *
+tegra210_usb2_port_map(struct tegra_xusb_port *port)
+{
+	return tegra_xusb_find_lane(port->padctl, "usb2", port->index);
+}
+
+static const struct tegra_xusb_port_ops tegra210_usb2_port_ops = {
+	.enable = tegra210_usb2_port_enable,
+	.disable = tegra210_usb2_port_disable,
+	.map = tegra210_usb2_port_map,
+};
+
+static int tegra210_hsic_port_enable(struct tegra_xusb_port *port)
+{
+	return 0;
+}
+
+static void tegra210_hsic_port_disable(struct tegra_xusb_port *port)
+{
+}
+
+static struct tegra_xusb_lane *
+tegra210_hsic_port_map(struct tegra_xusb_port *port)
+{
+	return tegra_xusb_find_lane(port->padctl, "hsic", port->index);
+}
+
+static const struct tegra_xusb_port_ops tegra210_hsic_port_ops = {
+	.enable = tegra210_hsic_port_enable,
+	.disable = tegra210_hsic_port_disable,
+	.map = tegra210_hsic_port_map,
+};
+
+static int tegra210_usb3_port_enable(struct tegra_xusb_port *port)
+{
+	struct tegra_xusb_usb3_port *usb3 = to_usb3_port(port);
+	struct tegra_xusb_padctl *padctl = port->padctl;
+	struct tegra_xusb_lane *lane = usb3->base.lane;
+	unsigned int index = port->index;
+	u32 value;
+	int err;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
+
+	if (!usb3->internal)
+		value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_INTERNAL(index);
+	else
+		value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_INTERNAL(index);
+
+	value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(index);
+	value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(index, usb3->port);
+	padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_MAP);
+
+	/*
+	 * TODO: move this code into the PCIe/SATA PHY ->power_on() callbacks
+	 * and conditionalize based on mux function? This seems to work, but
+	 * might not be the exact proper sequence.
+	 */
+	err = regulator_enable(usb3->supply);
+	if (err < 0)
+		return err;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_USB3_PADX_ECTL1(index));
+	value &= ~(XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_MASK <<
+		   XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_SHIFT);
+	value |= XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_VAL <<
+		 XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_SHIFT;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_USB3_PADX_ECTL1(index));
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_USB3_PADX_ECTL2(index));
+	value &= ~(XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_MASK <<
+		   XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_SHIFT);
+	value |= XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_VAL <<
+		 XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_SHIFT;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_USB3_PADX_ECTL2(index));
+
+	padctl_writel(padctl, XUSB_PADCTL_UPHY_USB3_PAD_ECTL3_RX_DFE_VAL,
+		      XUSB_PADCTL_UPHY_USB3_PADX_ECTL3(index));
+
+	value = padctl_readl(padctl, XUSB_PADCTL_UPHY_USB3_PADX_ECTL4(index));
+	value &= ~(XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_MASK <<
+		   XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_SHIFT);
+	value |= XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_VAL <<
+		 XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_SHIFT;
+	padctl_writel(padctl, value, XUSB_PADCTL_UPHY_USB3_PADX_ECTL4(index));
+
+	padctl_writel(padctl, XUSB_PADCTL_UPHY_USB3_PAD_ECTL6_RX_EQ_CTRL_H_VAL,
+		      XUSB_PADCTL_UPHY_USB3_PADX_ECTL6(index));
+
+	if (lane->pad == padctl->sata)
+		err = tegra210_sata_uphy_enable(padctl, true);
+	else
+		err = tegra210_pex_uphy_enable(padctl);
+
+	if (err) {
+		dev_err(&port->dev, "%s: failed to enable UPHY: %d\n",
+			__func__, err);
+		return err;
+	}
+
+	value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+	value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_VCORE_DOWN(index);
+	padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+	usleep_range(100, 200);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+	value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(index);
+	padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+	usleep_range(100, 200);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+	value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(index);
+	padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+	return 0;
+}
+
+static void tegra210_usb3_port_disable(struct tegra_xusb_port *port)
+{
+	struct tegra_xusb_usb3_port *usb3 = to_usb3_port(port);
+	struct tegra_xusb_padctl *padctl = port->padctl;
+	struct tegra_xusb_lane *lane = port->lane;
+	unsigned int index = port->index;
+	u32 value;
+
+	value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+	value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(index);
+	padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+	usleep_range(100, 200);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+	value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(index);
+	padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+	usleep_range(250, 350);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+	value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_VCORE_DOWN(index);
+	padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+	if (lane->pad == padctl->sata)
+		tegra210_sata_uphy_disable(padctl);
+	else
+		tegra210_pex_uphy_disable(padctl);
+
+	regulator_disable(usb3->supply);
+
+	value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
+	value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(index);
+	value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(index, 0x7);
+	padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_MAP);
+}
+
+static const struct tegra_xusb_lane_map tegra210_usb3_map[] = {
+	{ 0, "pcie", 6 },
+	{ 1, "pcie", 5 },
+	{ 2, "pcie", 0 },
+	{ 2, "pcie", 3 },
+	{ 3, "pcie", 4 },
+	{ 3, "pcie", 4 },
+	{ 0, NULL,   0 }
+};
+
+static struct tegra_xusb_lane *
+tegra210_usb3_port_map(struct tegra_xusb_port *port)
+{
+	return tegra_xusb_port_find_lane(port, tegra210_usb3_map, "usb3-ss");
+}
+
+static const struct tegra_xusb_port_ops tegra210_usb3_port_ops = {
+	.enable = tegra210_usb3_port_enable,
+	.disable = tegra210_usb3_port_disable,
+	.map = tegra210_usb3_port_map,
+};
+
+static int
+tegra210_xusb_read_fuse_calibration(struct tegra210_xusb_fuse_calibration *fuse)
+{
+	unsigned int i;
+	u32 value;
+	int err;
+
+	err = tegra_fuse_readl(TEGRA_FUSE_SKU_CALIB_0, &value);
+	if (err < 0)
+		return err;
+
+	for (i = 0; i < ARRAY_SIZE(fuse->hs_curr_level); i++) {
+		fuse->hs_curr_level[i] =
+			(value >> FUSE_SKU_CALIB_HS_CURR_LEVEL_PADX_SHIFT(i)) &
+			FUSE_SKU_CALIB_HS_CURR_LEVEL_PAD_MASK;
+	}
+
+	fuse->hs_term_range_adj =
+		(value >> FUSE_SKU_CALIB_HS_TERM_RANGE_ADJ_SHIFT) &
+		FUSE_SKU_CALIB_HS_TERM_RANGE_ADJ_MASK;
+
+	err = tegra_fuse_readl(TEGRA_FUSE_USB_CALIB_EXT_0, &value);
+	if (err < 0)
+		return err;
+
+	fuse->rpd_ctrl =
+		(value >> FUSE_USB_CALIB_EXT_RPD_CTRL_SHIFT) &
+		FUSE_USB_CALIB_EXT_RPD_CTRL_MASK;
+
+	return 0;
+}
+
+static struct tegra_xusb_padctl *
+tegra210_xusb_padctl_probe(struct device *dev,
+			   const struct tegra_xusb_padctl_soc *soc)
+{
+	struct tegra210_xusb_padctl *padctl;
+	int err;
+
+	padctl = devm_kzalloc(dev, sizeof(*padctl), GFP_KERNEL);
+	if (!padctl)
+		return ERR_PTR(-ENOMEM);
+
+	padctl->base.dev = dev;
+	padctl->base.soc = soc;
+
+	err = tegra210_xusb_read_fuse_calibration(&padctl->fuse);
+	if (err < 0)
+		return ERR_PTR(err);
+
+	return &padctl->base;
+}
+
+static void tegra210_xusb_padctl_remove(struct tegra_xusb_padctl *padctl)
+{
+}
+
+static const struct tegra_xusb_padctl_ops tegra210_xusb_padctl_ops = {
+	.probe = tegra210_xusb_padctl_probe,
+	.remove = tegra210_xusb_padctl_remove,
+	.usb3_set_lfps_detect = tegra210_usb3_set_lfps_detect,
+	.hsic_set_idle = tegra210_hsic_set_idle,
+};
+
+const struct tegra_xusb_padctl_soc tegra210_xusb_padctl_soc = {
+	.num_pads = ARRAY_SIZE(tegra210_pads),
+	.pads = tegra210_pads,
+	.ports = {
+		.usb2 = {
+			.ops = &tegra210_usb2_port_ops,
+			.count = 4,
+		},
+		.hsic = {
+			.ops = &tegra210_hsic_port_ops,
+			.count = 1,
+		},
+		.usb3 = {
+			.ops = &tegra210_usb3_port_ops,
+			.count = 4,
+		},
+	},
+	.ops = &tegra210_xusb_padctl_ops,
+};
+EXPORT_SYMBOL_GPL(tegra210_xusb_padctl_soc);
+
+MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
+MODULE_DESCRIPTION("NVIDIA Tegra 210 XUSB Pad Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
new file mode 100644
index 0000000..ec83dfd
--- /dev/null
+++ b/drivers/phy/tegra/xusb.c
@@ -0,0 +1,1021 @@
+/*
+ * Copyright (c) 2014-2015, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "xusb.h"
+
+static struct phy *tegra_xusb_pad_of_xlate(struct device *dev,
+					   struct of_phandle_args *args)
+{
+	struct tegra_xusb_pad *pad = dev_get_drvdata(dev);
+	struct phy *phy = NULL;
+	unsigned int i;
+
+	if (args->args_count != 0)
+		return ERR_PTR(-EINVAL);
+
+	for (i = 0; i < pad->soc->num_lanes; i++) {
+		if (!pad->lanes[i])
+			continue;
+
+		if (pad->lanes[i]->dev.of_node == args->np) {
+			phy = pad->lanes[i];
+			break;
+		}
+	}
+
+	if (phy == NULL)
+		phy = ERR_PTR(-ENODEV);
+
+	return phy;
+}
+
+static const struct of_device_id tegra_xusb_padctl_of_match[] = {
+#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
+	{
+		.compatible = "nvidia,tegra124-xusb-padctl",
+		.data = &tegra124_xusb_padctl_soc,
+	},
+#endif
+#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+	{
+		.compatible = "nvidia,tegra210-xusb-padctl",
+		.data = &tegra210_xusb_padctl_soc,
+	},
+#endif
+	{ }
+};
+MODULE_DEVICE_TABLE(of, tegra_xusb_padctl_of_match);
+
+static struct device_node *
+tegra_xusb_find_pad_node(struct tegra_xusb_padctl *padctl, const char *name)
+{
+	/*
+	 * of_find_node_by_name() drops a reference, so make sure to grab one.
+	 */
+	struct device_node *np = of_node_get(padctl->dev->of_node);
+
+	np = of_find_node_by_name(np, "pads");
+	if (np)
+		np = of_find_node_by_name(np, name);
+
+	return np;
+}
+
+static struct device_node *
+tegra_xusb_pad_find_phy_node(struct tegra_xusb_pad *pad, unsigned int index)
+{
+	/*
+	 * of_find_node_by_name() drops a reference, so make sure to grab one.
+	 */
+	struct device_node *np = of_node_get(pad->dev.of_node);
+
+	np = of_find_node_by_name(np, "lanes");
+	if (!np)
+		return NULL;
+
+	return of_find_node_by_name(np, pad->soc->lanes[index].name);
+}
+
+int tegra_xusb_lane_lookup_function(struct tegra_xusb_lane *lane,
+				    const char *function)
+{
+	unsigned int i;
+
+	for (i = 0; i < lane->soc->num_funcs; i++)
+		if (strcmp(function, lane->soc->funcs[i]) == 0)
+			return i;
+
+	return -EINVAL;
+}
+
+int tegra_xusb_lane_parse_dt(struct tegra_xusb_lane *lane,
+			     struct device_node *np)
+{
+	struct device *dev = &lane->pad->dev;
+	const char *function;
+	int err;
+
+	err = of_property_read_string(np, "nvidia,function", &function);
+	if (err < 0)
+		return err;
+
+	err = tegra_xusb_lane_lookup_function(lane, function);
+	if (err < 0) {
+		dev_err(dev, "invalid function \"%s\" for lane \"%s\"\n",
+			function, np->name);
+		return err;
+	}
+
+	lane->function = err;
+
+	return 0;
+}
+
+static void tegra_xusb_lane_destroy(struct phy *phy)
+{
+	if (phy) {
+		struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+		lane->pad->ops->remove(lane);
+		phy_destroy(phy);
+	}
+}
+
+static void tegra_xusb_pad_release(struct device *dev)
+{
+	struct tegra_xusb_pad *pad = to_tegra_xusb_pad(dev);
+
+	pad->soc->ops->remove(pad);
+}
+
+static struct device_type tegra_xusb_pad_type = {
+	.release = tegra_xusb_pad_release,
+};
+
+int tegra_xusb_pad_init(struct tegra_xusb_pad *pad,
+			struct tegra_xusb_padctl *padctl,
+			struct device_node *np)
+{
+	int err;
+
+	device_initialize(&pad->dev);
+	INIT_LIST_HEAD(&pad->list);
+	pad->dev.parent = padctl->dev;
+	pad->dev.type = &tegra_xusb_pad_type;
+	pad->dev.of_node = np;
+	pad->padctl = padctl;
+
+	err = dev_set_name(&pad->dev, "%s", pad->soc->name);
+	if (err < 0)
+		goto unregister;
+
+	err = device_add(&pad->dev);
+	if (err < 0)
+		goto unregister;
+
+	return 0;
+
+unregister:
+	device_unregister(&pad->dev);
+	return err;
+}
+
+int tegra_xusb_pad_register(struct tegra_xusb_pad *pad,
+			    const struct phy_ops *ops)
+{
+	struct device_node *children;
+	struct phy *lane;
+	unsigned int i;
+	int err;
+
+	children = of_find_node_by_name(pad->dev.of_node, "lanes");
+	if (!children)
+		return -ENODEV;
+
+	pad->lanes = devm_kcalloc(&pad->dev, pad->soc->num_lanes, sizeof(lane),
+				  GFP_KERNEL);
+	if (!pad->lanes) {
+		of_node_put(children);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < pad->soc->num_lanes; i++) {
+		struct device_node *np = tegra_xusb_pad_find_phy_node(pad, i);
+		struct tegra_xusb_lane *lane;
+
+		/* skip disabled lanes */
+		if (!np || !of_device_is_available(np)) {
+			of_node_put(np);
+			continue;
+		}
+
+		pad->lanes[i] = phy_create(&pad->dev, np, ops);
+		if (IS_ERR(pad->lanes[i])) {
+			err = PTR_ERR(pad->lanes[i]);
+			of_node_put(np);
+			goto remove;
+		}
+
+		lane = pad->ops->probe(pad, np, i);
+		if (IS_ERR(lane)) {
+			phy_destroy(pad->lanes[i]);
+			err = PTR_ERR(lane);
+			goto remove;
+		}
+
+		list_add_tail(&lane->list, &pad->padctl->lanes);
+		phy_set_drvdata(pad->lanes[i], lane);
+	}
+
+	pad->provider = of_phy_provider_register_full(&pad->dev, children,
+						      tegra_xusb_pad_of_xlate);
+	if (IS_ERR(pad->provider)) {
+		err = PTR_ERR(pad->provider);
+		goto remove;
+	}
+
+	return 0;
+
+remove:
+	while (i--)
+		tegra_xusb_lane_destroy(pad->lanes[i]);
+
+	of_node_put(children);
+
+	return err;
+}
+
+void tegra_xusb_pad_unregister(struct tegra_xusb_pad *pad)
+{
+	unsigned int i = pad->soc->num_lanes;
+
+	of_phy_provider_unregister(pad->provider);
+
+	while (i--)
+		tegra_xusb_lane_destroy(pad->lanes[i]);
+
+	device_unregister(&pad->dev);
+}
+
+static struct tegra_xusb_pad *
+tegra_xusb_pad_create(struct tegra_xusb_padctl *padctl,
+		      const struct tegra_xusb_pad_soc *soc)
+{
+	struct tegra_xusb_pad *pad;
+	struct device_node *np;
+	int err;
+
+	np = tegra_xusb_find_pad_node(padctl, soc->name);
+	if (!np || !of_device_is_available(np))
+		return NULL;
+
+	pad = soc->ops->probe(padctl, soc, np);
+	if (IS_ERR(pad)) {
+		err = PTR_ERR(pad);
+		dev_err(padctl->dev, "failed to create pad %s: %d\n",
+			soc->name, err);
+		return ERR_PTR(err);
+	}
+
+	/* XXX move this into ->probe() to avoid string comparison */
+	if (strcmp(soc->name, "pcie") == 0)
+		padctl->pcie = pad;
+
+	if (strcmp(soc->name, "sata") == 0)
+		padctl->sata = pad;
+
+	if (strcmp(soc->name, "usb2") == 0)
+		padctl->usb2 = pad;
+
+	if (strcmp(soc->name, "ulpi") == 0)
+		padctl->ulpi = pad;
+
+	if (strcmp(soc->name, "hsic") == 0)
+		padctl->hsic = pad;
+
+	return pad;
+}
+
+static void __tegra_xusb_remove_pads(struct tegra_xusb_padctl *padctl)
+{
+	struct tegra_xusb_pad *pad, *tmp;
+
+	list_for_each_entry_safe_reverse(pad, tmp, &padctl->pads, list) {
+		list_del(&pad->list);
+		tegra_xusb_pad_unregister(pad);
+	}
+}
+
+static void tegra_xusb_remove_pads(struct tegra_xusb_padctl *padctl)
+{
+	mutex_lock(&padctl->lock);
+	__tegra_xusb_remove_pads(padctl);
+	mutex_unlock(&padctl->lock);
+}
+
+static void tegra_xusb_lane_program(struct tegra_xusb_lane *lane)
+{
+	struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+	const struct tegra_xusb_lane_soc *soc = lane->soc;
+	u32 value;
+
+	/* choose function */
+	value = padctl_readl(padctl, soc->offset);
+	value &= ~(soc->mask << soc->shift);
+	value |= lane->function << soc->shift;
+	padctl_writel(padctl, value, soc->offset);
+}
+
+static void tegra_xusb_pad_program(struct tegra_xusb_pad *pad)
+{
+	unsigned int i;
+
+	for (i = 0; i < pad->soc->num_lanes; i++) {
+		struct tegra_xusb_lane *lane;
+
+		if (pad->lanes[i]) {
+			lane = phy_get_drvdata(pad->lanes[i]);
+			tegra_xusb_lane_program(lane);
+		}
+	}
+}
+
+static int tegra_xusb_setup_pads(struct tegra_xusb_padctl *padctl)
+{
+	struct tegra_xusb_pad *pad;
+	unsigned int i;
+
+	mutex_lock(&padctl->lock);
+
+	for (i = 0; i < padctl->soc->num_pads; i++) {
+		const struct tegra_xusb_pad_soc *soc = padctl->soc->pads[i];
+		int err;
+
+		pad = tegra_xusb_pad_create(padctl, soc);
+		if (IS_ERR(pad)) {
+			err = PTR_ERR(pad);
+			dev_err(padctl->dev, "failed to create pad %s: %d\n",
+				soc->name, err);
+			__tegra_xusb_remove_pads(padctl);
+			mutex_unlock(&padctl->lock);
+			return err;
+		}
+
+		if (!pad)
+			continue;
+
+		list_add_tail(&pad->list, &padctl->pads);
+	}
+
+	list_for_each_entry(pad, &padctl->pads, list)
+		tegra_xusb_pad_program(pad);
+
+	mutex_unlock(&padctl->lock);
+	return 0;
+}
+
+static bool tegra_xusb_lane_check(struct tegra_xusb_lane *lane,
+				  const char *function)
+{
+	const char *func = lane->soc->funcs[lane->function];
+
+	return strcmp(function, func) == 0;
+}
+
+struct tegra_xusb_lane *tegra_xusb_find_lane(struct tegra_xusb_padctl *padctl,
+					     const char *type,
+					     unsigned int index)
+{
+	struct tegra_xusb_lane *lane, *hit = ERR_PTR(-ENODEV);
+	char *name;
+
+	name = kasprintf(GFP_KERNEL, "%s-%u", type, index);
+	if (!name)
+		return ERR_PTR(-ENOMEM);
+
+	list_for_each_entry(lane, &padctl->lanes, list) {
+		if (strcmp(lane->soc->name, name) == 0) {
+			hit = lane;
+			break;
+		}
+	}
+
+	kfree(name);
+	return hit;
+}
+
+struct tegra_xusb_lane *
+tegra_xusb_port_find_lane(struct tegra_xusb_port *port,
+			  const struct tegra_xusb_lane_map *map,
+			  const char *function)
+{
+	struct tegra_xusb_lane *lane, *match = ERR_PTR(-ENODEV);
+
+	for (map = map; map->type; map++) {
+		if (port->index != map->port)
+			continue;
+
+		lane = tegra_xusb_find_lane(port->padctl, map->type,
+					    map->index);
+		if (IS_ERR(lane))
+			continue;
+
+		if (!tegra_xusb_lane_check(lane, function))
+			continue;
+
+		if (!IS_ERR(match))
+			dev_err(&port->dev, "conflicting match: %s-%u / %s\n",
+				map->type, map->index, match->soc->name);
+		else
+			match = lane;
+	}
+
+	return match;
+}
+
+static struct device_node *
+tegra_xusb_find_port_node(struct tegra_xusb_padctl *padctl, const char *type,
+			  unsigned int index)
+{
+	/*
+	 * of_find_node_by_name() drops a reference, so make sure to grab one.
+	 */
+	struct device_node *np = of_node_get(padctl->dev->of_node);
+
+	np = of_find_node_by_name(np, "ports");
+	if (np) {
+		char *name;
+
+		name = kasprintf(GFP_KERNEL, "%s-%u", type, index);
+		np = of_find_node_by_name(np, name);
+		kfree(name);
+	}
+
+	return np;
+}
+
+struct tegra_xusb_port *
+tegra_xusb_find_port(struct tegra_xusb_padctl *padctl, const char *type,
+		     unsigned int index)
+{
+	struct tegra_xusb_port *port;
+	struct device_node *np;
+
+	np = tegra_xusb_find_port_node(padctl, type, index);
+	if (!np)
+		return NULL;
+
+	list_for_each_entry(port, &padctl->ports, list) {
+		if (np == port->dev.of_node) {
+			of_node_put(np);
+			return port;
+		}
+	}
+
+	of_node_put(np);
+
+	return NULL;
+}
+
+struct tegra_xusb_usb2_port *
+tegra_xusb_find_usb2_port(struct tegra_xusb_padctl *padctl, unsigned int index)
+{
+	struct tegra_xusb_port *port;
+
+	port = tegra_xusb_find_port(padctl, "usb2", index);
+	if (port)
+		return to_usb2_port(port);
+
+	return NULL;
+}
+
+struct tegra_xusb_usb3_port *
+tegra_xusb_find_usb3_port(struct tegra_xusb_padctl *padctl, unsigned int index)
+{
+	struct tegra_xusb_port *port;
+
+	port = tegra_xusb_find_port(padctl, "usb3", index);
+	if (port)
+		return to_usb3_port(port);
+
+	return NULL;
+}
+
+static void tegra_xusb_port_release(struct device *dev)
+{
+}
+
+static struct device_type tegra_xusb_port_type = {
+	.release = tegra_xusb_port_release,
+};
+
+static int tegra_xusb_port_init(struct tegra_xusb_port *port,
+				struct tegra_xusb_padctl *padctl,
+				struct device_node *np,
+				const char *name,
+				unsigned int index)
+{
+	int err;
+
+	INIT_LIST_HEAD(&port->list);
+	port->padctl = padctl;
+	port->index = index;
+
+	device_initialize(&port->dev);
+	port->dev.type = &tegra_xusb_port_type;
+	port->dev.of_node = of_node_get(np);
+	port->dev.parent = padctl->dev;
+
+	err = dev_set_name(&port->dev, "%s-%u", name, index);
+	if (err < 0)
+		goto unregister;
+
+	err = device_add(&port->dev);
+	if (err < 0)
+		goto unregister;
+
+	return 0;
+
+unregister:
+	device_unregister(&port->dev);
+	return err;
+}
+
+static void tegra_xusb_port_unregister(struct tegra_xusb_port *port)
+{
+	device_unregister(&port->dev);
+}
+
+static int tegra_xusb_usb2_port_parse_dt(struct tegra_xusb_usb2_port *usb2)
+{
+	struct tegra_xusb_port *port = &usb2->base;
+	struct device_node *np = port->dev.of_node;
+
+	usb2->internal = of_property_read_bool(np, "nvidia,internal");
+
+	usb2->supply = devm_regulator_get(&port->dev, "vbus");
+	if (IS_ERR(usb2->supply))
+		return PTR_ERR(usb2->supply);
+
+	return 0;
+}
+
+static int tegra_xusb_add_usb2_port(struct tegra_xusb_padctl *padctl,
+				    unsigned int index)
+{
+	struct tegra_xusb_usb2_port *usb2;
+	struct device_node *np;
+	int err = 0;
+
+	/*
+	 * USB2 ports don't require additional properties, but if the port is
+	 * marked as disabled there is no reason to register it.
+	 */
+	np = tegra_xusb_find_port_node(padctl, "usb2", index);
+	if (!np || !of_device_is_available(np))
+		goto out;
+
+	usb2 = devm_kzalloc(padctl->dev, sizeof(*usb2), GFP_KERNEL);
+	if (!usb2) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	err = tegra_xusb_port_init(&usb2->base, padctl, np, "usb2", index);
+	if (err < 0)
+		goto out;
+
+	usb2->base.ops = padctl->soc->ports.usb2.ops;
+
+	usb2->base.lane = usb2->base.ops->map(&usb2->base);
+	if (IS_ERR(usb2->base.lane)) {
+		err = PTR_ERR(usb2->base.lane);
+		goto out;
+	}
+
+	err = tegra_xusb_usb2_port_parse_dt(usb2);
+	if (err < 0) {
+		tegra_xusb_port_unregister(&usb2->base);
+		goto out;
+	}
+
+	list_add_tail(&usb2->base.list, &padctl->ports);
+
+out:
+	of_node_put(np);
+	return err;
+}
+
+static int tegra_xusb_ulpi_port_parse_dt(struct tegra_xusb_ulpi_port *ulpi)
+{
+	struct tegra_xusb_port *port = &ulpi->base;
+	struct device_node *np = port->dev.of_node;
+
+	ulpi->internal = of_property_read_bool(np, "nvidia,internal");
+
+	return 0;
+}
+
+static int tegra_xusb_add_ulpi_port(struct tegra_xusb_padctl *padctl,
+				    unsigned int index)
+{
+	struct tegra_xusb_ulpi_port *ulpi;
+	struct device_node *np;
+	int err = 0;
+
+	np = tegra_xusb_find_port_node(padctl, "ulpi", index);
+	if (!np || !of_device_is_available(np))
+		goto out;
+
+	ulpi = devm_kzalloc(padctl->dev, sizeof(*ulpi), GFP_KERNEL);
+	if (!ulpi) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	err = tegra_xusb_port_init(&ulpi->base, padctl, np, "ulpi", index);
+	if (err < 0)
+		goto out;
+
+	ulpi->base.ops = padctl->soc->ports.ulpi.ops;
+
+	ulpi->base.lane = ulpi->base.ops->map(&ulpi->base);
+	if (IS_ERR(ulpi->base.lane)) {
+		err = PTR_ERR(ulpi->base.lane);
+		goto out;
+	}
+
+	err = tegra_xusb_ulpi_port_parse_dt(ulpi);
+	if (err < 0) {
+		tegra_xusb_port_unregister(&ulpi->base);
+		goto out;
+	}
+
+	list_add_tail(&ulpi->base.list, &padctl->ports);
+
+out:
+	of_node_put(np);
+	return err;
+}
+
+static int tegra_xusb_hsic_port_parse_dt(struct tegra_xusb_hsic_port *hsic)
+{
+	/* XXX */
+	return 0;
+}
+
+static int tegra_xusb_add_hsic_port(struct tegra_xusb_padctl *padctl,
+				    unsigned int index)
+{
+	struct tegra_xusb_hsic_port *hsic;
+	struct device_node *np;
+	int err = 0;
+
+	np = tegra_xusb_find_port_node(padctl, "hsic", index);
+	if (!np || !of_device_is_available(np))
+		goto out;
+
+	hsic = devm_kzalloc(padctl->dev, sizeof(*hsic), GFP_KERNEL);
+	if (!hsic) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	err = tegra_xusb_port_init(&hsic->base, padctl, np, "hsic", index);
+	if (err < 0)
+		goto out;
+
+	hsic->base.ops = padctl->soc->ports.hsic.ops;
+
+	hsic->base.lane = hsic->base.ops->map(&hsic->base);
+	if (IS_ERR(hsic->base.lane)) {
+		err = PTR_ERR(hsic->base.lane);
+		goto out;
+	}
+
+	err = tegra_xusb_hsic_port_parse_dt(hsic);
+	if (err < 0) {
+		tegra_xusb_port_unregister(&hsic->base);
+		goto out;
+	}
+
+	list_add_tail(&hsic->base.list, &padctl->ports);
+
+out:
+	of_node_put(np);
+	return err;
+}
+
+static int tegra_xusb_usb3_port_parse_dt(struct tegra_xusb_usb3_port *usb3)
+{
+	struct tegra_xusb_port *port = &usb3->base;
+	struct device_node *np = port->dev.of_node;
+	u32 value;
+	int err;
+
+	err = of_property_read_u32(np, "nvidia,usb2-companion", &value);
+	if (err < 0) {
+		dev_err(&port->dev, "failed to read port: %d\n", err);
+		return err;
+	}
+
+	usb3->port = value;
+
+	usb3->internal = of_property_read_bool(np, "nvidia,internal");
+
+	usb3->supply = devm_regulator_get(&port->dev, "vbus");
+	if (IS_ERR(usb3->supply))
+		return PTR_ERR(usb3->supply);
+
+	return 0;
+}
+
+static int tegra_xusb_add_usb3_port(struct tegra_xusb_padctl *padctl,
+				    unsigned int index)
+{
+	struct tegra_xusb_usb3_port *usb3;
+	struct device_node *np;
+	int err = 0;
+
+	/*
+	 * If there is no supplemental configuration in the device tree the
+	 * port is unusable. But it is valid to configure only a single port,
+	 * hence return 0 instead of an error to allow ports to be optional.
+	 */
+	np = tegra_xusb_find_port_node(padctl, "usb3", index);
+	if (!np || !of_device_is_available(np))
+		goto out;
+
+	usb3 = devm_kzalloc(padctl->dev, sizeof(*usb3), GFP_KERNEL);
+	if (!usb3) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	err = tegra_xusb_port_init(&usb3->base, padctl, np, "usb3", index);
+	if (err < 0)
+		goto out;
+
+	usb3->base.ops = padctl->soc->ports.usb3.ops;
+
+	usb3->base.lane = usb3->base.ops->map(&usb3->base);
+	if (IS_ERR(usb3->base.lane)) {
+		err = PTR_ERR(usb3->base.lane);
+		goto out;
+	}
+
+	err = tegra_xusb_usb3_port_parse_dt(usb3);
+	if (err < 0) {
+		tegra_xusb_port_unregister(&usb3->base);
+		goto out;
+	}
+
+	list_add_tail(&usb3->base.list, &padctl->ports);
+
+out:
+	of_node_put(np);
+	return err;
+}
+
+static void __tegra_xusb_remove_ports(struct tegra_xusb_padctl *padctl)
+{
+	struct tegra_xusb_port *port, *tmp;
+
+	list_for_each_entry_safe_reverse(port, tmp, &padctl->ports, list) {
+		list_del(&port->list);
+		tegra_xusb_port_unregister(port);
+	}
+}
+
+static int tegra_xusb_setup_ports(struct tegra_xusb_padctl *padctl)
+{
+	struct tegra_xusb_port *port;
+	unsigned int i;
+	int err = 0;
+
+	mutex_lock(&padctl->lock);
+
+	for (i = 0; i < padctl->soc->ports.usb2.count; i++) {
+		err = tegra_xusb_add_usb2_port(padctl, i);
+		if (err < 0)
+			goto remove_ports;
+	}
+
+	for (i = 0; i < padctl->soc->ports.ulpi.count; i++) {
+		err = tegra_xusb_add_ulpi_port(padctl, i);
+		if (err < 0)
+			goto remove_ports;
+	}
+
+	for (i = 0; i < padctl->soc->ports.hsic.count; i++) {
+		err = tegra_xusb_add_hsic_port(padctl, i);
+		if (err < 0)
+			goto remove_ports;
+	}
+
+	for (i = 0; i < padctl->soc->ports.usb3.count; i++) {
+		err = tegra_xusb_add_usb3_port(padctl, i);
+		if (err < 0)
+			goto remove_ports;
+	}
+
+	list_for_each_entry(port, &padctl->ports, list) {
+		err = port->ops->enable(port);
+		if (err < 0)
+			dev_err(padctl->dev, "failed to enable port %s: %d\n",
+				dev_name(&port->dev), err);
+	}
+
+	goto unlock;
+
+remove_ports:
+	__tegra_xusb_remove_ports(padctl);
+unlock:
+	mutex_unlock(&padctl->lock);
+	return err;
+}
+
+static void tegra_xusb_remove_ports(struct tegra_xusb_padctl *padctl)
+{
+	mutex_lock(&padctl->lock);
+	__tegra_xusb_remove_ports(padctl);
+	mutex_unlock(&padctl->lock);
+}
+
+static int tegra_xusb_padctl_probe(struct platform_device *pdev)
+{
+	struct device_node *np = of_node_get(pdev->dev.of_node);
+	const struct tegra_xusb_padctl_soc *soc;
+	struct tegra_xusb_padctl *padctl;
+	const struct of_device_id *match;
+	struct resource *res;
+	int err;
+
+	/* for backwards compatibility with old device trees */
+	np = of_find_node_by_name(np, "pads");
+	if (!np) {
+		dev_warn(&pdev->dev, "deprecated DT, using legacy driver\n");
+		return tegra_xusb_padctl_legacy_probe(pdev);
+	}
+
+	of_node_put(np);
+
+	match = of_match_node(tegra_xusb_padctl_of_match, pdev->dev.of_node);
+	soc = match->data;
+
+	padctl = soc->ops->probe(&pdev->dev, soc);
+	if (IS_ERR(padctl))
+		return PTR_ERR(padctl);
+
+	platform_set_drvdata(pdev, padctl);
+	INIT_LIST_HEAD(&padctl->ports);
+	INIT_LIST_HEAD(&padctl->lanes);
+	INIT_LIST_HEAD(&padctl->pads);
+	mutex_init(&padctl->lock);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	padctl->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(padctl->regs)) {
+		err = PTR_ERR(padctl->regs);
+		goto remove;
+	}
+
+	padctl->rst = devm_reset_control_get(&pdev->dev, NULL);
+	if (IS_ERR(padctl->rst)) {
+		err = PTR_ERR(padctl->rst);
+		goto remove;
+	}
+
+	err = reset_control_deassert(padctl->rst);
+	if (err < 0)
+		goto remove;
+
+	err = tegra_xusb_setup_pads(padctl);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to setup pads: %d\n", err);
+		goto reset;
+	}
+
+	err = tegra_xusb_setup_ports(padctl);
+	if (err) {
+		dev_err(&pdev->dev, "failed to setup XUSB ports: %d\n", err);
+		goto remove_pads;
+	}
+
+	return 0;
+
+remove_pads:
+	tegra_xusb_remove_pads(padctl);
+reset:
+	reset_control_assert(padctl->rst);
+remove:
+	soc->ops->remove(padctl);
+	return err;
+}
+
+static int tegra_xusb_padctl_remove(struct platform_device *pdev)
+{
+	struct tegra_xusb_padctl *padctl = platform_get_drvdata(pdev);
+	int err;
+
+	tegra_xusb_remove_ports(padctl);
+	tegra_xusb_remove_pads(padctl);
+
+	err = reset_control_assert(padctl->rst);
+	if (err < 0)
+		dev_err(&pdev->dev, "failed to assert reset: %d\n", err);
+
+	padctl->soc->ops->remove(padctl);
+
+	return err;
+}
+
+static struct platform_driver tegra_xusb_padctl_driver = {
+	.driver = {
+		.name = "tegra-xusb-padctl",
+		.of_match_table = tegra_xusb_padctl_of_match,
+	},
+	.probe = tegra_xusb_padctl_probe,
+	.remove = tegra_xusb_padctl_remove,
+};
+module_platform_driver(tegra_xusb_padctl_driver);
+
+struct tegra_xusb_padctl *tegra_xusb_padctl_get(struct device *dev)
+{
+	struct tegra_xusb_padctl *padctl;
+	struct platform_device *pdev;
+	struct device_node *np;
+
+	np = of_parse_phandle(dev->of_node, "nvidia,xusb-padctl", 0);
+	if (!np)
+		return ERR_PTR(-EINVAL);
+
+	/*
+	 * This is slightly ugly. A better implementation would be to keep a
+	 * registry of pad controllers, but since there will almost certainly
+	 * only ever be one per SoC that would be a little overkill.
+	 */
+	pdev = of_find_device_by_node(np);
+	if (!pdev) {
+		of_node_put(np);
+		return ERR_PTR(-ENODEV);
+	}
+
+	of_node_put(np);
+
+	padctl = platform_get_drvdata(pdev);
+	if (!padctl) {
+		put_device(&pdev->dev);
+		return ERR_PTR(-EPROBE_DEFER);
+	}
+
+	return padctl;
+}
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_get);
+
+void tegra_xusb_padctl_put(struct tegra_xusb_padctl *padctl)
+{
+	if (padctl)
+		put_device(padctl->dev);
+}
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_put);
+
+int tegra_xusb_padctl_usb3_save_context(struct tegra_xusb_padctl *padctl,
+					unsigned int port)
+{
+	if (padctl->soc->ops->usb3_save_context)
+		return padctl->soc->ops->usb3_save_context(padctl, port);
+
+	return -ENOSYS;
+}
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_usb3_save_context);
+
+int tegra_xusb_padctl_hsic_set_idle(struct tegra_xusb_padctl *padctl,
+				    unsigned int port, bool idle)
+{
+	if (padctl->soc->ops->hsic_set_idle)
+		return padctl->soc->ops->hsic_set_idle(padctl, port, idle);
+
+	return -ENOSYS;
+}
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_hsic_set_idle);
+
+int tegra_xusb_padctl_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl,
+					   unsigned int port, bool enable)
+{
+	if (padctl->soc->ops->usb3_set_lfps_detect)
+		return padctl->soc->ops->usb3_set_lfps_detect(padctl, port,
+							      enable);
+
+	return -ENOSYS;
+}
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_usb3_set_lfps_detect);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("Tegra XUSB Pad Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/tegra/xusb.h b/drivers/phy/tegra/xusb.h
new file mode 100644
index 0000000..b49dbc3
--- /dev/null
+++ b/drivers/phy/tegra/xusb.h
@@ -0,0 +1,421 @@
+/*
+ * Copyright (c) 2014-2015, NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (c) 2015, Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __PHY_TEGRA_XUSB_H
+#define __PHY_TEGRA_XUSB_H
+
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+/* legacy entry points for backwards-compatibility */
+int tegra_xusb_padctl_legacy_probe(struct platform_device *pdev);
+int tegra_xusb_padctl_legacy_remove(struct platform_device *pdev);
+
+struct phy;
+struct phy_provider;
+struct platform_device;
+struct regulator;
+
+/*
+ * lanes
+ */
+struct tegra_xusb_lane_soc {
+	const char *name;
+
+	unsigned int offset;
+	unsigned int shift;
+	unsigned int mask;
+
+	const char * const *funcs;
+	unsigned int num_funcs;
+};
+
+struct tegra_xusb_lane {
+	const struct tegra_xusb_lane_soc *soc;
+	struct tegra_xusb_pad *pad;
+	struct device_node *np;
+	struct list_head list;
+	unsigned int function;
+	unsigned int index;
+};
+
+int tegra_xusb_lane_parse_dt(struct tegra_xusb_lane *lane,
+			     struct device_node *np);
+
+struct tegra_xusb_usb2_lane {
+	struct tegra_xusb_lane base;
+
+	u32 hs_curr_level_offset;
+};
+
+static inline struct tegra_xusb_usb2_lane *
+to_usb2_lane(struct tegra_xusb_lane *lane)
+{
+	return container_of(lane, struct tegra_xusb_usb2_lane, base);
+}
+
+struct tegra_xusb_ulpi_lane {
+	struct tegra_xusb_lane base;
+};
+
+static inline struct tegra_xusb_ulpi_lane *
+to_ulpi_lane(struct tegra_xusb_lane *lane)
+{
+	return container_of(lane, struct tegra_xusb_ulpi_lane, base);
+}
+
+struct tegra_xusb_hsic_lane {
+	struct tegra_xusb_lane base;
+
+	u32 strobe_trim;
+	u32 rx_strobe_trim;
+	u32 rx_data_trim;
+	u32 tx_rtune_n;
+	u32 tx_rtune_p;
+	u32 tx_rslew_n;
+	u32 tx_rslew_p;
+	bool auto_term;
+};
+
+static inline struct tegra_xusb_hsic_lane *
+to_hsic_lane(struct tegra_xusb_lane *lane)
+{
+	return container_of(lane, struct tegra_xusb_hsic_lane, base);
+}
+
+struct tegra_xusb_pcie_lane {
+	struct tegra_xusb_lane base;
+};
+
+static inline struct tegra_xusb_pcie_lane *
+to_pcie_lane(struct tegra_xusb_lane *lane)
+{
+	return container_of(lane, struct tegra_xusb_pcie_lane, base);
+}
+
+struct tegra_xusb_sata_lane {
+	struct tegra_xusb_lane base;
+};
+
+static inline struct tegra_xusb_sata_lane *
+to_sata_lane(struct tegra_xusb_lane *lane)
+{
+	return container_of(lane, struct tegra_xusb_sata_lane, base);
+}
+
+struct tegra_xusb_lane_ops {
+	struct tegra_xusb_lane *(*probe)(struct tegra_xusb_pad *pad,
+					 struct device_node *np,
+					 unsigned int index);
+	void (*remove)(struct tegra_xusb_lane *lane);
+};
+
+/*
+ * pads
+ */
+struct tegra_xusb_pad_soc;
+struct tegra_xusb_padctl;
+
+struct tegra_xusb_pad_ops {
+	struct tegra_xusb_pad *(*probe)(struct tegra_xusb_padctl *padctl,
+					const struct tegra_xusb_pad_soc *soc,
+					struct device_node *np);
+	void (*remove)(struct tegra_xusb_pad *pad);
+};
+
+struct tegra_xusb_pad_soc {
+	const char *name;
+
+	const struct tegra_xusb_lane_soc *lanes;
+	unsigned int num_lanes;
+
+	const struct tegra_xusb_pad_ops *ops;
+};
+
+struct tegra_xusb_pad {
+	const struct tegra_xusb_pad_soc *soc;
+	struct tegra_xusb_padctl *padctl;
+	struct phy_provider *provider;
+	struct phy **lanes;
+	struct device dev;
+
+	const struct tegra_xusb_lane_ops *ops;
+
+	struct list_head list;
+};
+
+static inline struct tegra_xusb_pad *to_tegra_xusb_pad(struct device *dev)
+{
+	return container_of(dev, struct tegra_xusb_pad, dev);
+}
+
+int tegra_xusb_pad_init(struct tegra_xusb_pad *pad,
+			struct tegra_xusb_padctl *padctl,
+			struct device_node *np);
+int tegra_xusb_pad_register(struct tegra_xusb_pad *pad,
+			    const struct phy_ops *ops);
+void tegra_xusb_pad_unregister(struct tegra_xusb_pad *pad);
+
+struct tegra_xusb_usb2_pad {
+	struct tegra_xusb_pad base;
+
+	struct clk *clk;
+	unsigned int enable;
+	struct mutex lock;
+};
+
+static inline struct tegra_xusb_usb2_pad *
+to_usb2_pad(struct tegra_xusb_pad *pad)
+{
+	return container_of(pad, struct tegra_xusb_usb2_pad, base);
+}
+
+struct tegra_xusb_ulpi_pad {
+	struct tegra_xusb_pad base;
+};
+
+static inline struct tegra_xusb_ulpi_pad *
+to_ulpi_pad(struct tegra_xusb_pad *pad)
+{
+	return container_of(pad, struct tegra_xusb_ulpi_pad, base);
+}
+
+struct tegra_xusb_hsic_pad {
+	struct tegra_xusb_pad base;
+
+	struct regulator *supply;
+	struct clk *clk;
+};
+
+static inline struct tegra_xusb_hsic_pad *
+to_hsic_pad(struct tegra_xusb_pad *pad)
+{
+	return container_of(pad, struct tegra_xusb_hsic_pad, base);
+}
+
+struct tegra_xusb_pcie_pad {
+	struct tegra_xusb_pad base;
+
+	struct reset_control *rst;
+	struct clk *pll;
+
+	unsigned int enable;
+};
+
+static inline struct tegra_xusb_pcie_pad *
+to_pcie_pad(struct tegra_xusb_pad *pad)
+{
+	return container_of(pad, struct tegra_xusb_pcie_pad, base);
+}
+
+struct tegra_xusb_sata_pad {
+	struct tegra_xusb_pad base;
+
+	struct reset_control *rst;
+	struct clk *pll;
+
+	unsigned int enable;
+};
+
+static inline struct tegra_xusb_sata_pad *
+to_sata_pad(struct tegra_xusb_pad *pad)
+{
+	return container_of(pad, struct tegra_xusb_sata_pad, base);
+}
+
+/*
+ * ports
+ */
+struct tegra_xusb_port_ops;
+
+struct tegra_xusb_port {
+	struct tegra_xusb_padctl *padctl;
+	struct tegra_xusb_lane *lane;
+	unsigned int index;
+
+	struct list_head list;
+	struct device dev;
+
+	const struct tegra_xusb_port_ops *ops;
+};
+
+struct tegra_xusb_lane_map {
+	unsigned int port;
+	const char *type;
+	unsigned int index;
+	const char *func;
+};
+
+struct tegra_xusb_lane *
+tegra_xusb_port_find_lane(struct tegra_xusb_port *port,
+			  const struct tegra_xusb_lane_map *map,
+			  const char *function);
+
+struct tegra_xusb_port *
+tegra_xusb_find_port(struct tegra_xusb_padctl *padctl, const char *type,
+		     unsigned int index);
+
+struct tegra_xusb_usb2_port {
+	struct tegra_xusb_port base;
+
+	struct regulator *supply;
+	bool internal;
+};
+
+static inline struct tegra_xusb_usb2_port *
+to_usb2_port(struct tegra_xusb_port *port)
+{
+	return container_of(port, struct tegra_xusb_usb2_port, base);
+}
+
+struct tegra_xusb_usb2_port *
+tegra_xusb_find_usb2_port(struct tegra_xusb_padctl *padctl,
+			  unsigned int index);
+
+struct tegra_xusb_ulpi_port {
+	struct tegra_xusb_port base;
+
+	struct regulator *supply;
+	bool internal;
+};
+
+static inline struct tegra_xusb_ulpi_port *
+to_ulpi_port(struct tegra_xusb_port *port)
+{
+	return container_of(port, struct tegra_xusb_ulpi_port, base);
+}
+
+struct tegra_xusb_hsic_port {
+	struct tegra_xusb_port base;
+};
+
+static inline struct tegra_xusb_hsic_port *
+to_hsic_port(struct tegra_xusb_port *port)
+{
+	return container_of(port, struct tegra_xusb_hsic_port, base);
+}
+
+struct tegra_xusb_usb3_port {
+	struct tegra_xusb_port base;
+	struct regulator *supply;
+	bool context_saved;
+	unsigned int port;
+	bool internal;
+
+	u32 tap1;
+	u32 amp;
+	u32 ctle_z;
+	u32 ctle_g;
+};
+
+static inline struct tegra_xusb_usb3_port *
+to_usb3_port(struct tegra_xusb_port *port)
+{
+	return container_of(port, struct tegra_xusb_usb3_port, base);
+}
+
+struct tegra_xusb_usb3_port *
+tegra_xusb_find_usb3_port(struct tegra_xusb_padctl *padctl,
+			  unsigned int index);
+
+struct tegra_xusb_port_ops {
+	int (*enable)(struct tegra_xusb_port *port);
+	void (*disable)(struct tegra_xusb_port *port);
+	struct tegra_xusb_lane *(*map)(struct tegra_xusb_port *port);
+};
+
+/*
+ * pad controller
+ */
+struct tegra_xusb_padctl_soc;
+
+struct tegra_xusb_padctl_ops {
+	struct tegra_xusb_padctl *
+		(*probe)(struct device *dev,
+			 const struct tegra_xusb_padctl_soc *soc);
+	void (*remove)(struct tegra_xusb_padctl *padctl);
+
+	int (*usb3_save_context)(struct tegra_xusb_padctl *padctl,
+				 unsigned int index);
+	int (*hsic_set_idle)(struct tegra_xusb_padctl *padctl,
+			     unsigned int index, bool idle);
+	int (*usb3_set_lfps_detect)(struct tegra_xusb_padctl *padctl,
+				    unsigned int index, bool enable);
+};
+
+struct tegra_xusb_padctl_soc {
+	const struct tegra_xusb_pad_soc * const *pads;
+	unsigned int num_pads;
+
+	struct {
+		struct {
+			const struct tegra_xusb_port_ops *ops;
+			unsigned int count;
+		} usb2, ulpi, hsic, usb3;
+	} ports;
+
+	const struct tegra_xusb_padctl_ops *ops;
+};
+
+struct tegra_xusb_padctl {
+	struct device *dev;
+	void __iomem *regs;
+	struct mutex lock;
+	struct reset_control *rst;
+
+	const struct tegra_xusb_padctl_soc *soc;
+
+	struct tegra_xusb_pad *pcie;
+	struct tegra_xusb_pad *sata;
+	struct tegra_xusb_pad *ulpi;
+	struct tegra_xusb_pad *usb2;
+	struct tegra_xusb_pad *hsic;
+
+	struct list_head ports;
+	struct list_head lanes;
+	struct list_head pads;
+
+	unsigned int enable;
+
+	struct clk *clk;
+};
+
+static inline void padctl_writel(struct tegra_xusb_padctl *padctl, u32 value,
+				 unsigned long offset)
+{
+	dev_dbg(padctl->dev, "%08lx < %08x\n", offset, value);
+	writel(value, padctl->regs + offset);
+}
+
+static inline u32 padctl_readl(struct tegra_xusb_padctl *padctl,
+			       unsigned long offset)
+{
+	u32 value = readl(padctl->regs + offset);
+	dev_dbg(padctl->dev, "%08lx > %08x\n", offset, value);
+	return value;
+}
+
+struct tegra_xusb_lane *tegra_xusb_find_lane(struct tegra_xusb_padctl *padctl,
+					     const char *name,
+					     unsigned int index);
+
+#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
+extern const struct tegra_xusb_padctl_soc tegra124_xusb_padctl_soc;
+#endif
+#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+extern const struct tegra_xusb_padctl_soc tegra210_xusb_padctl_soc;
+#endif
+
+#endif /* __PHY_TEGRA_XUSB_H */
diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
index 2cc7438..c356223 100644
--- a/drivers/pinctrl/bcm/Kconfig
+++ b/drivers/pinctrl/bcm/Kconfig
@@ -86,3 +86,16 @@
 	  The ChipcommonA GPIO controller support basic PINCONF functions such
 	  as bias pull up, pull down, and drive strength configurations, when
 	  these pins are muxed to GPIO.
+
+config PINCTRL_NS2_MUX
+	bool "Broadcom Northstar2 pinmux driver"
+	depends on OF
+	depends on ARCH_BCM_IPROC || COMPILE_TEST
+	select PINMUX
+	select GENERIC_PINCONF
+	default ARM64 && ARCH_BCM_IPROC
+	help
+	  Say yes here to enable the Broadcom NS2 MUX driver.
+
+	  The Broadcom Northstar2 IOMUX driver supports group based IOMUX
+	  configuration.
diff --git a/drivers/pinctrl/bcm/Makefile b/drivers/pinctrl/bcm/Makefile
index 6148367d..3861a1c 100644
--- a/drivers/pinctrl/bcm/Makefile
+++ b/drivers/pinctrl/bcm/Makefile
@@ -5,3 +5,4 @@
 obj-$(CONFIG_PINCTRL_IPROC_GPIO)	+= pinctrl-iproc-gpio.o
 obj-$(CONFIG_PINCTRL_CYGNUS_MUX)	+= pinctrl-cygnus-mux.o
 obj-$(CONFIG_PINCTRL_NSP_GPIO)		+= pinctrl-nsp-gpio.o
+obj-$(CONFIG_PINCTRL_NS2_MUX)		+= pinctrl-ns2-mux.o
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
index c3c692e..582f6df 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
@@ -1024,7 +1024,7 @@
 	.get_group_pins = bcm281xx_pinctrl_get_group_pins,
 	.pin_dbg_show = bcm281xx_pinctrl_pin_dbg_show,
 	.dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
-	.dt_free_map = pinctrl_utils_dt_free_map,
+	.dt_free_map = pinctrl_utils_free_map,
 };
 
 static int bcm281xx_pinctrl_get_fcns_count(struct pinctrl_dev *pctldev)
@@ -1422,9 +1422,7 @@
 	bcm281xx_pinctrl_desc.pins = bcm281xx_pinctrl.pins;
 	bcm281xx_pinctrl_desc.npins = bcm281xx_pinctrl.npins;
 
-	pctl = pinctrl_register(&bcm281xx_pinctrl_desc,
-				&pdev->dev,
-				pdata);
+	pctl = devm_pinctrl_register(&pdev->dev, &bcm281xx_pinctrl_desc, pdata);
 	if (IS_ERR(pctl)) {
 		dev_err(&pdev->dev, "Failed to register pinctrl\n");
 		return PTR_ERR(pctl);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 08b1d93..fa77165 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -342,6 +342,18 @@
 	return bcm2835_gpio_get_bit(pc, GPLEV0, offset);
 }
 
+static int bcm2835_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+	struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
+	enum bcm2835_fsel fsel = bcm2835_pinctrl_fsel_get(pc, offset);
+
+	/* Alternative function doesn't clearly provide a direction */
+	if (fsel > BCM2835_FSEL_GPIO_OUT)
+		return -EINVAL;
+
+	return (fsel == BCM2835_FSEL_GPIO_IN);
+}
+
 static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 {
 	struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
@@ -370,6 +382,7 @@
 	.free = gpiochip_generic_free,
 	.direction_input = bcm2835_gpio_direction_input,
 	.direction_output = bcm2835_gpio_direction_output,
+	.get_direction = bcm2835_gpio_get_direction,
 	.get = bcm2835_gpio_get,
 	.set = bcm2835_gpio_set,
 	.to_irq = bcm2835_gpio_to_irq,
@@ -1027,7 +1040,7 @@
 		return err;
 	}
 
-	pc->pctl_dev = pinctrl_register(&bcm2835_pinctrl_desc, dev, pc);
+	pc->pctl_dev = devm_pinctrl_register(dev, &bcm2835_pinctrl_desc, pc);
 	if (IS_ERR(pc->pctl_dev)) {
 		gpiochip_remove(&pc->gpio_chip);
 		return PTR_ERR(pc->pctl_dev);
@@ -1045,7 +1058,6 @@
 {
 	struct bcm2835_pinctrl *pc = platform_get_drvdata(pdev);
 
-	pinctrl_unregister(pc->pctl_dev);
 	gpiochip_remove(&pc->gpio_chip);
 
 	return 0;
diff --git a/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c b/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
index 9728f3d..d31c957 100644
--- a/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
@@ -737,7 +737,7 @@
 	.get_group_pins = cygnus_get_group_pins,
 	.pin_dbg_show = cygnus_pin_dbg_show,
 	.dt_node_to_map = pinconf_generic_dt_node_to_map_group,
-	.dt_free_map = pinctrl_utils_dt_free_map,
+	.dt_free_map = pinctrl_utils_free_map,
 };
 
 static int cygnus_get_functions_count(struct pinctrl_dev *pctrl_dev)
@@ -987,7 +987,7 @@
 	cygnus_pinctrl_desc.pins = pins;
 	cygnus_pinctrl_desc.npins = num_pins;
 
-	pinctrl->pctl = pinctrl_register(&cygnus_pinctrl_desc, &pdev->dev,
+	pinctrl->pctl = devm_pinctrl_register(&pdev->dev, &cygnus_pinctrl_desc,
 			pinctrl);
 	if (IS_ERR(pinctrl->pctl)) {
 		dev_err(&pdev->dev, "unable to register Cygnus IOMUX pinctrl\n");
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index d530ab4..3670f5e 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -379,7 +379,7 @@
 	.get_groups_count = iproc_get_groups_count,
 	.get_group_name = iproc_get_group_name,
 	.dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
-	.dt_free_map = pinctrl_utils_dt_free_map,
+	.dt_free_map = pinctrl_utils_free_map,
 };
 
 static int iproc_gpio_set_pull(struct iproc_gpio *chip, unsigned gpio,
@@ -623,7 +623,7 @@
 	pctldesc->npins = gc->ngpio;
 	pctldesc->confops = &iproc_pconf_ops;
 
-	chip->pctl = pinctrl_register(pctldesc, chip->dev, chip);
+	chip->pctl = devm_pinctrl_register(chip->dev, pctldesc, chip);
 	if (IS_ERR(chip->pctl)) {
 		dev_err(chip->dev, "unable to register pinctrl device\n");
 		return PTR_ERR(chip->pctl);
@@ -632,11 +632,6 @@
 	return 0;
 }
 
-static void iproc_gpio_unregister_pinconf(struct iproc_gpio *chip)
-{
-	pinctrl_unregister(chip->pctl);
-}
-
 static const struct of_device_id iproc_gpio_of_match[] = {
 	{ .compatible = "brcm,cygnus-ccm-gpio" },
 	{ .compatible = "brcm,cygnus-asiu-gpio" },
@@ -720,7 +715,7 @@
 					   handle_simple_irq, IRQ_TYPE_NONE);
 		if (ret) {
 			dev_err(dev, "no GPIO irqchip\n");
-			goto err_unregister_pinconf;
+			goto err_rm_gpiochip;
 		}
 
 		gpiochip_set_chained_irqchip(gc, &iproc_gpio_irq_chip, irq,
@@ -729,9 +724,6 @@
 
 	return 0;
 
-err_unregister_pinconf:
-	iproc_gpio_unregister_pinconf(chip);
-
 err_rm_gpiochip:
 	gpiochip_remove(gc);
 
diff --git a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
new file mode 100644
index 0000000..3fefd14
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
@@ -0,0 +1,1117 @@
+/* Copyright (C) 2016 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file contains the Northstar2 IOMUX driver that supports group
+ * based PINMUX configuration. The PWM is functional only when the
+ * corresponding mfio pin group is selected as gpio.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+
+#define NS2_NUM_IOMUX			19
+#define NS2_NUM_PWM_MUX			4
+
+#define NS2_PIN_MUX_BASE0		0x00
+#define NS2_PIN_MUX_BASE1		0x01
+#define NS2_PIN_CONF_BASE		0x02
+#define NS2_MUX_PAD_FUNC1_OFFSET	0x04
+
+#define NS2_PIN_SRC_MASK		0x01
+#define NS2_PIN_PULL_MASK		0x03
+#define NS2_PIN_DRIVE_STRENGTH_MASK	0x07
+
+#define NS2_PIN_PULL_UP			0x01
+#define NS2_PIN_PULL_DOWN		0x02
+
+#define NS2_PIN_INPUT_EN_MASK		0x01
+
+/*
+ * Northstar2 IOMUX register description
+ *
+ * @base: base address number
+ * @offset: register offset for mux configuration of a group
+ * @shift: bit shift for mux configuration of a group
+ * @mask: mask bits
+ * @alt: alternate function to set to
+ */
+struct ns2_mux {
+	unsigned int base;
+	unsigned int offset;
+	unsigned int shift;
+	unsigned int mask;
+	unsigned int alt;
+};
+
+/*
+ * Keep track of Northstar2 IOMUX configuration and prevent double
+ * configuration
+ *
+ * @ns2_mux: Northstar2 IOMUX register description
+ * @is_configured: flag to indicate whether a mux setting has already
+ * been configured
+ */
+struct ns2_mux_log {
+	struct ns2_mux mux;
+	bool is_configured;
+};
+
+/*
+ * Group based IOMUX configuration
+ *
+ * @name: name of the group
+ * @pins: array of pins used by this group
+ * @num_pins: total number of pins used by this group
+ * @mux: Northstar2 group based IOMUX configuration
+ */
+struct ns2_pin_group {
+	const char *name;
+	const unsigned int *pins;
+	const unsigned int num_pins;
+	const struct ns2_mux mux;
+};
+
+/*
+ * Northstar2 mux function and supported pin groups
+ *
+ * @name: name of the function
+ * @groups: array of groups that can be supported by this function
+ * @num_groups: total number of groups that can be supported by function
+ */
+struct ns2_pin_function {
+	const char *name;
+	const char * const *groups;
+	const unsigned int num_groups;
+};
+
+/*
+ * Northstar2 IOMUX pinctrl core
+ *
+ * @pctl: pointer to pinctrl_dev
+ * @dev: pointer to device
+ * @base0: first IOMUX register base
+ * @base1: second IOMUX register base
+ * @pinconf_base: configuration register base
+ * @groups: pointer to array of groups
+ * @num_groups: total number of groups
+ * @functions: pointer to array of functions
+ * @num_functions: total number of functions
+ * @mux_log: pointer to the array of mux logs
+ * @lock: lock to protect register access
+ */
+struct ns2_pinctrl {
+	struct pinctrl_dev *pctl;
+	struct device *dev;
+	void __iomem *base0;
+	void __iomem *base1;
+	void __iomem *pinconf_base;
+
+	const struct ns2_pin_group *groups;
+	unsigned int num_groups;
+
+	const struct ns2_pin_function *functions;
+	unsigned int num_functions;
+
+	struct ns2_mux_log *mux_log;
+
+	spinlock_t lock;
+};
+
+/*
+ * Pin configuration info
+ *
+ * @base: base address number
+ * @offset: register offset from base
+ * @src_shift: slew rate control bit shift in the register
+ * @input_en: input enable control bit shift
+ * @pull_shift: pull-up/pull-down control bit shift in the register
+ * @drive_shift: drive strength control bit shift in the register
+ */
+struct ns2_pinconf {
+	unsigned int base;
+	unsigned int offset;
+	unsigned int src_shift;
+	unsigned int input_en;
+	unsigned int pull_shift;
+	unsigned int drive_shift;
+};
+
+/*
+ * Description of a pin in Northstar2
+ *
+ * @pin: pin number
+ * @name: pin name
+ * @pin_conf: pin configuration structure
+ */
+struct ns2_pin {
+	unsigned int pin;
+	char *name;
+	struct ns2_pinconf pin_conf;
+};
+
+#define NS2_PIN_DESC(p, n, b, o, s, i, pu, d)	\
+{						\
+	.pin = p,				\
+	.name = n,				\
+	.pin_conf = {				\
+		.base = b,			\
+		.offset = o,			\
+		.src_shift = s,			\
+		.input_en = i,			\
+		.pull_shift = pu,		\
+		.drive_shift = d,		\
+	}					\
+}
+
+/*
+ * List of pins in Northstar2
+ */
+static struct ns2_pin ns2_pins[] = {
+	NS2_PIN_DESC(0, "mfio_0", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(1, "mfio_1", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(2, "mfio_2", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(3, "mfio_3", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(4, "mfio_4", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(5, "mfio_5", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(6, "mfio_6", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(7, "mfio_7", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(8, "mfio_8", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(9, "mfio_9", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(10, "mfio_10", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(11, "mfio_11", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(12, "mfio_12", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(13, "mfio_13", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(14, "mfio_14", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(15, "mfio_15", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(16, "mfio_16", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(17, "mfio_17", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(18, "mfio_18", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(19, "mfio_19", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(20, "mfio_20", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(21, "mfio_21", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(22, "mfio_22", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(23, "mfio_23", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(24, "mfio_24", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(25, "mfio_25", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(26, "mfio_26", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(27, "mfio_27", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(28, "mfio_28", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(29, "mfio_29", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(30, "mfio_30", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(31, "mfio_31", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(32, "mfio_32", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(33, "mfio_33", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(34, "mfio_34", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(35, "mfio_35", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(36, "mfio_36", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(37, "mfio_37", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(38, "mfio_38", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(39, "mfio_39", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(40, "mfio_40", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(41, "mfio_41", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(42, "mfio_42", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(43, "mfio_43", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(44, "mfio_44", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(45, "mfio_45", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(46, "mfio_46", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(47, "mfio_47", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(48, "mfio_48", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(49, "mfio_49", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(50, "mfio_50", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(51, "mfio_51", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(52, "mfio_52", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(53, "mfio_53", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(54, "mfio_54", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(55, "mfio_55", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(56, "mfio_56", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(57, "mfio_57", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(58, "mfio_58", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(59, "mfio_59", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(60, "mfio_60", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(61, "mfio_61", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(62, "mfio_62", -1, 0, 0, 0, 0, 0),
+	NS2_PIN_DESC(63, "qspi_wp", 2, 0x0, 31, 30, 27, 24),
+	NS2_PIN_DESC(64, "qspi_hold", 2, 0x0, 23, 22, 19, 16),
+	NS2_PIN_DESC(65, "qspi_cs", 2, 0x0, 15, 14, 11, 8),
+	NS2_PIN_DESC(66, "qspi_sck", 2, 0x0, 7, 6, 3, 0),
+	NS2_PIN_DESC(67, "uart3_sin", 2, 0x04, 31, 30, 27, 24),
+	NS2_PIN_DESC(68, "uart3_sout", 2, 0x04, 23, 22, 19, 16),
+	NS2_PIN_DESC(69, "qspi_mosi", 2, 0x04, 15, 14, 11, 8),
+	NS2_PIN_DESC(70, "qspi_miso", 2, 0x04, 7, 6, 3, 0),
+	NS2_PIN_DESC(71, "spi0_fss", 2, 0x08, 31, 30, 27, 24),
+	NS2_PIN_DESC(72, "spi0_rxd", 2, 0x08, 23, 22, 19, 16),
+	NS2_PIN_DESC(73, "spi0_txd", 2, 0x08, 15, 14, 11, 8),
+	NS2_PIN_DESC(74, "spi0_sck", 2, 0x08, 7, 6, 3, 0),
+	NS2_PIN_DESC(75, "spi1_fss", 2, 0x0c, 31, 30, 27, 24),
+	NS2_PIN_DESC(76, "spi1_rxd", 2, 0x0c, 23, 22, 19, 16),
+	NS2_PIN_DESC(77, "spi1_txd", 2, 0x0c, 15, 14, 11, 8),
+	NS2_PIN_DESC(78, "spi1_sck", 2, 0x0c, 7, 6, 3, 0),
+	NS2_PIN_DESC(79, "sdio0_data7", 2, 0x10, 31, 30, 27, 24),
+	NS2_PIN_DESC(80, "sdio0_emmc_rst", 2, 0x10, 23, 22, 19, 16),
+	NS2_PIN_DESC(81, "sdio0_led_on", 2, 0x10, 15, 14, 11, 8),
+	NS2_PIN_DESC(82, "sdio0_wp", 2, 0x10, 7, 6, 3, 0),
+	NS2_PIN_DESC(83, "sdio0_data3", 2, 0x14, 31, 30, 27, 24),
+	NS2_PIN_DESC(84, "sdio0_data4", 2, 0x14, 23, 22, 19, 16),
+	NS2_PIN_DESC(85, "sdio0_data5", 2, 0x14, 15, 14, 11, 8),
+	NS2_PIN_DESC(86, "sdio0_data6", 2, 0x14, 7, 6, 3, 0),
+	NS2_PIN_DESC(87, "sdio0_cmd", 2, 0x18, 31, 30, 27, 24),
+	NS2_PIN_DESC(88, "sdio0_data0", 2, 0x18, 23, 22, 19, 16),
+	NS2_PIN_DESC(89, "sdio0_data1", 2, 0x18, 15, 14, 11, 8),
+	NS2_PIN_DESC(90, "sdio0_data2", 2, 0x18, 7, 6, 3, 0),
+	NS2_PIN_DESC(91, "sdio1_led_on", 2, 0x1c, 31, 30, 27, 24),
+	NS2_PIN_DESC(92, "sdio1_wp", 2, 0x1c, 23, 22, 19, 16),
+	NS2_PIN_DESC(93, "sdio0_cd_l", 2, 0x1c, 15, 14, 11, 8),
+	NS2_PIN_DESC(94, "sdio0_clk", 2, 0x1c, 7, 6, 3, 0),
+	NS2_PIN_DESC(95, "sdio1_data5", 2, 0x20, 31, 30, 27, 24),
+	NS2_PIN_DESC(96, "sdio1_data6", 2, 0x20, 23, 22, 19, 16),
+	NS2_PIN_DESC(97, "sdio1_data7", 2, 0x20, 15, 14, 11, 8),
+	NS2_PIN_DESC(98, "sdio1_emmc_rst", 2, 0x20, 7, 6, 3, 0),
+	NS2_PIN_DESC(99, "sdio1_data1", 2, 0x24, 31, 30, 27, 24),
+	NS2_PIN_DESC(100, "sdio1_data2", 2, 0x24, 23, 22, 19, 16),
+	NS2_PIN_DESC(101, "sdio1_data3", 2, 0x24, 15, 14, 11, 8),
+	NS2_PIN_DESC(102, "sdio1_data4", 2, 0x24, 7, 6, 3, 0),
+	NS2_PIN_DESC(103, "sdio1_cd_l", 2, 0x28, 31, 30, 27, 24),
+	NS2_PIN_DESC(104, "sdio1_clk", 2, 0x28, 23, 22, 19, 16),
+	NS2_PIN_DESC(105, "sdio1_cmd", 2, 0x28, 15, 14, 11, 8),
+	NS2_PIN_DESC(106, "sdio1_data0", 2, 0x28, 7, 6, 3, 0),
+	NS2_PIN_DESC(107, "ext_mdio_0", 2, 0x2c, 15, 14, 11, 8),
+	NS2_PIN_DESC(108, "ext_mdc_0", 2, 0x2c, 7, 6, 3, 0),
+	NS2_PIN_DESC(109, "usb3_p1_vbus_ppc", 2, 0x34, 31, 30, 27, 24),
+	NS2_PIN_DESC(110, "usb3_p1_overcurrent", 2, 0x34, 23, 22, 19, 16),
+	NS2_PIN_DESC(111, "usb3_p0_vbus_ppc", 2, 0x34, 15, 14, 11, 8),
+	NS2_PIN_DESC(112, "usb3_p0_overcurrent", 2, 0x34, 7, 6, 3, 0),
+	NS2_PIN_DESC(113, "usb2_presence_indication", 2, 0x38, 31, 30, 27, 24),
+	NS2_PIN_DESC(114, "usb2_vbus_present", 2, 0x38, 23, 22, 19, 16),
+	NS2_PIN_DESC(115, "usb2_vbus_ppc", 2, 0x38, 15, 14, 11, 8),
+	NS2_PIN_DESC(116, "usb2_overcurrent", 2, 0x38, 7, 6, 3, 0),
+	NS2_PIN_DESC(117, "sata_led1", 2, 0x3c, 15, 14, 11, 8),
+	NS2_PIN_DESC(118, "sata_led0", 2, 0x3c, 7, 6, 3, 0),
+};
+
+/*
+ * List of groups of pins
+ */
+
+static const unsigned int nand_pins[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+	11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23};
+static const unsigned int nor_data_pins[] =  {0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+	10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25};
+
+static const unsigned int gpio_0_1_pins[] = {24, 25};
+static const unsigned int pwm_0_pins[] = {24};
+static const unsigned int pwm_1_pins[] = {25};
+
+static const unsigned int uart1_ext_clk_pins[] = {26};
+static const unsigned int nor_adv_pins[] = {26};
+
+static const unsigned int gpio_2_5_pins[] = {27, 28, 29, 30};
+static const unsigned int pcie_ab1_clk_wak_pins[] = {27, 28, 29, 30};
+static const unsigned int nor_addr_0_3_pins[] = {27, 28, 29, 30};
+static const unsigned int pwm_2_pins[] = {27};
+static const unsigned int pwm_3_pins[] = {28};
+
+static const unsigned int gpio_6_7_pins[] = {31, 32};
+static const unsigned int pcie_a3_clk_wak_pins[] = {31, 32};
+static const unsigned int nor_addr_4_5_pins[] = {31, 32};
+
+static const unsigned int gpio_8_9_pins[] = {33, 34};
+static const unsigned int pcie_b3_clk_wak_pins[] = {33, 34};
+static const unsigned int nor_addr_6_7_pins[] = {33, 34};
+
+static const unsigned int gpio_10_11_pins[] = {35, 36};
+static const unsigned int pcie_b2_clk_wak_pins[] = {35, 36};
+static const unsigned int nor_addr_8_9_pins[] = {35, 36};
+
+static const unsigned int gpio_12_13_pins[] = {37, 38};
+static const unsigned int pcie_a2_clk_wak_pins[] = {37, 38};
+static const unsigned int nor_addr_10_11_pins[] = {37, 38};
+
+static const unsigned int gpio_14_17_pins[] = {39, 40, 41, 42};
+static const unsigned int uart0_modem_pins[] = {39, 40, 41, 42};
+static const unsigned int nor_addr_12_15_pins[] = {39, 40, 41, 42};
+
+static const unsigned int gpio_18_19_pins[] = {43, 44};
+static const unsigned int uart0_rts_cts_pins[] = {43, 44};
+
+static const unsigned int gpio_20_21_pins[] = {45, 46};
+static const unsigned int uart0_in_out_pins[] = {45, 46};
+
+static const unsigned int gpio_22_23_pins[] = {47, 48};
+static const unsigned int uart1_dcd_dsr_pins[] = {47, 48};
+
+static const unsigned int gpio_24_25_pins[] = {49, 50};
+static const unsigned int uart1_ri_dtr_pins[] = {49, 50};
+
+static const unsigned int gpio_26_27_pins[] = {51, 52};
+static const unsigned int uart1_rts_cts_pins[] = {51, 52};
+
+static const unsigned int gpio_28_29_pins[] = {53, 54};
+static const unsigned int uart1_in_out_pins[] = {53, 54};
+
+static const unsigned int gpio_30_31_pins[] = {55, 56};
+static const unsigned int uart2_rts_cts_pins[] = {55, 56};
+
+#define NS2_PIN_GROUP(group_name, ba, off, sh, ma, al)	\
+{							\
+	.name = __stringify(group_name) "_grp",		\
+	.pins = group_name ## _pins,			\
+	.num_pins = ARRAY_SIZE(group_name ## _pins),	\
+	.mux = {					\
+		.base = ba,				\
+		.offset = off,				\
+		.shift = sh,				\
+		.mask = ma,				\
+		.alt = al,				\
+	}						\
+}
+
+/*
+ * List of Northstar2 pin groups
+ */
+static const struct ns2_pin_group ns2_pin_groups[] = {
+	NS2_PIN_GROUP(nand, 0, 0, 31, 1, 0),
+	NS2_PIN_GROUP(nor_data, 0, 0, 31, 1, 1),
+	NS2_PIN_GROUP(gpio_0_1, 0, 0, 31, 1, 0),
+
+	NS2_PIN_GROUP(uart1_ext_clk, 0, 4, 30, 3, 1),
+	NS2_PIN_GROUP(nor_adv, 0, 4, 30, 3, 2),
+
+	NS2_PIN_GROUP(gpio_2_5,	0, 4, 28, 3, 0),
+	NS2_PIN_GROUP(pcie_ab1_clk_wak, 0, 4, 28, 3, 1),
+	NS2_PIN_GROUP(nor_addr_0_3, 0, 4, 28, 3, 2),
+
+	NS2_PIN_GROUP(gpio_6_7, 0, 4, 26, 3, 0),
+	NS2_PIN_GROUP(pcie_a3_clk_wak, 0, 4, 26, 3, 1),
+	NS2_PIN_GROUP(nor_addr_4_5, 0, 4, 26, 3, 2),
+
+	NS2_PIN_GROUP(gpio_8_9, 0, 4, 24, 3, 0),
+	NS2_PIN_GROUP(pcie_b3_clk_wak, 0, 4, 24, 3, 1),
+	NS2_PIN_GROUP(nor_addr_6_7, 0, 4, 24, 3, 2),
+
+	NS2_PIN_GROUP(gpio_10_11, 0, 4, 22, 3, 0),
+	NS2_PIN_GROUP(pcie_b2_clk_wak, 0, 4, 22, 3, 1),
+	NS2_PIN_GROUP(nor_addr_8_9, 0, 4, 22, 3, 2),
+
+	NS2_PIN_GROUP(gpio_12_13, 0, 4, 20, 3, 0),
+	NS2_PIN_GROUP(pcie_a2_clk_wak, 0, 4, 20, 3, 1),
+	NS2_PIN_GROUP(nor_addr_10_11, 0, 4, 20, 3, 2),
+
+	NS2_PIN_GROUP(gpio_14_17, 0, 4, 18, 3, 0),
+	NS2_PIN_GROUP(uart0_modem, 0, 4, 18, 3, 1),
+	NS2_PIN_GROUP(nor_addr_12_15, 0, 4, 18, 3, 2),
+
+	NS2_PIN_GROUP(gpio_18_19, 0, 4, 16, 3, 0),
+	NS2_PIN_GROUP(uart0_rts_cts, 0, 4, 16, 3, 1),
+
+	NS2_PIN_GROUP(gpio_20_21, 0, 4, 14, 3, 0),
+	NS2_PIN_GROUP(uart0_in_out, 0, 4, 14, 3, 1),
+
+	NS2_PIN_GROUP(gpio_22_23, 0, 4, 12, 3, 0),
+	NS2_PIN_GROUP(uart1_dcd_dsr, 0, 4, 12, 3, 1),
+
+	NS2_PIN_GROUP(gpio_24_25, 0, 4, 10, 3, 0),
+	NS2_PIN_GROUP(uart1_ri_dtr, 0, 4, 10, 3, 1),
+
+	NS2_PIN_GROUP(gpio_26_27, 0, 4, 8, 3, 0),
+	NS2_PIN_GROUP(uart1_rts_cts, 0, 4, 8, 3, 1),
+
+	NS2_PIN_GROUP(gpio_28_29, 0, 4, 6, 3, 0),
+	NS2_PIN_GROUP(uart1_in_out, 0, 4, 6, 3, 1),
+
+	NS2_PIN_GROUP(gpio_30_31, 0, 4, 4, 3, 0),
+	NS2_PIN_GROUP(uart2_rts_cts, 0, 4, 4, 3, 1),
+
+	NS2_PIN_GROUP(pwm_0, 1, 0, 0, 1, 1),
+	NS2_PIN_GROUP(pwm_1, 1, 0, 1, 1, 1),
+	NS2_PIN_GROUP(pwm_2, 1, 0, 2, 1, 1),
+	NS2_PIN_GROUP(pwm_3, 1, 0, 3, 1, 1),
+};
+
+/*
+ * List of groups supported by functions
+ */
+
+static const char * const nand_grps[] = {"nand_grp"};
+
+static const char * const nor_grps[] = {"nor_data_grp", "nor_adv_grp",
+	"nor_addr_0_3_grp", "nor_addr_4_5_grp",	"nor_addr_6_7_grp",
+	"nor_addr_8_9_grp", "nor_addr_10_11_grp", "nor_addr_12_15_grp"};
+
+static const char * const gpio_grps[] = {"gpio_0_1_grp", "gpio_2_5_grp",
+	"gpio_6_7_grp",	"gpio_8_9_grp",	"gpio_10_11_grp", "gpio_12_13_grp",
+	"gpio_14_17_grp", "gpio_18_19_grp", "gpio_20_21_grp", "gpio_22_23_grp",
+	"gpio_24_25_grp", "gpio_26_27_grp", "gpio_28_29_grp",
+	"gpio_30_31_grp"};
+
+static const char * const pcie_grps[] = {"pcie_ab1_clk_wak_grp",
+	"pcie_a3_clk_wak_grp", "pcie_b3_clk_wak_grp", "pcie_b2_clk_wak_grp",
+	"pcie_a2_clk_wak_grp"};
+
+static const char * const uart0_grps[] = {"uart0_modem_grp",
+	"uart0_rts_cts_grp", "uart0_in_out_grp"};
+
+static const char * const uart1_grps[] = {"uart1_ext_clk_grp",
+	"uart1_dcd_dsr_grp", "uart1_ri_dtr_grp", "uart1_rts_cts_grp",
+	"uart1_in_out_grp"};
+
+static const char * const uart2_grps[] = {"uart2_rts_cts_grp"};
+
+static const char * const pwm_grps[] = {"pwm_0_grp", "pwm_1_grp",
+	"pwm_2_grp", "pwm_3_grp"};
+
+#define NS2_PIN_FUNCTION(func)				\
+{							\
+	.name = #func,					\
+	.groups = func ## _grps,			\
+	.num_groups = ARRAY_SIZE(func ## _grps),	\
+}
+
+/*
+ * List of supported functions
+ */
+static const struct ns2_pin_function ns2_pin_functions[] = {
+	NS2_PIN_FUNCTION(nand),
+	NS2_PIN_FUNCTION(nor),
+	NS2_PIN_FUNCTION(gpio),
+	NS2_PIN_FUNCTION(pcie),
+	NS2_PIN_FUNCTION(uart0),
+	NS2_PIN_FUNCTION(uart1),
+	NS2_PIN_FUNCTION(uart2),
+	NS2_PIN_FUNCTION(pwm),
+};
+
+static int ns2_get_groups_count(struct pinctrl_dev *pctrl_dev)
+{
+	struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+	return pinctrl->num_groups;
+}
+
+static const char *ns2_get_group_name(struct pinctrl_dev *pctrl_dev,
+				      unsigned int selector)
+{
+	struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+	return pinctrl->groups[selector].name;
+}
+
+static int ns2_get_group_pins(struct pinctrl_dev *pctrl_dev,
+			      unsigned int selector, const unsigned int **pins,
+			      unsigned int *num_pins)
+{
+	struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+	*pins = pinctrl->groups[selector].pins;
+	*num_pins = pinctrl->groups[selector].num_pins;
+
+	return 0;
+}
+
+static void ns2_pin_dbg_show(struct pinctrl_dev *pctrl_dev,
+			     struct seq_file *s, unsigned int offset)
+{
+	seq_printf(s, " %s", dev_name(pctrl_dev->dev));
+}
+
+static struct pinctrl_ops ns2_pinctrl_ops = {
+	.get_groups_count = ns2_get_groups_count,
+	.get_group_name = ns2_get_group_name,
+	.get_group_pins = ns2_get_group_pins,
+	.pin_dbg_show = ns2_pin_dbg_show,
+	.dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+	.dt_free_map = pinctrl_utils_free_map,
+};
+
+static int ns2_get_functions_count(struct pinctrl_dev *pctrl_dev)
+{
+	struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+	return pinctrl->num_functions;
+}
+
+static const char *ns2_get_function_name(struct pinctrl_dev *pctrl_dev,
+					 unsigned int selector)
+{
+	struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+	return pinctrl->functions[selector].name;
+}
+
+static int ns2_get_function_groups(struct pinctrl_dev *pctrl_dev,
+				   unsigned int selector,
+				   const char * const **groups,
+				   unsigned int * const num_groups)
+{
+	struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+	*groups = pinctrl->functions[selector].groups;
+	*num_groups = pinctrl->functions[selector].num_groups;
+
+	return 0;
+}
+
+static int ns2_pinmux_set(struct ns2_pinctrl *pinctrl,
+			  const struct ns2_pin_function *func,
+			  const struct ns2_pin_group *grp,
+			  struct ns2_mux_log *mux_log)
+{
+	const struct ns2_mux *mux = &grp->mux;
+	int i;
+	u32 val, mask;
+	unsigned long flags;
+	void __iomem *base_address;
+
+	for (i = 0; i < NS2_NUM_IOMUX; i++) {
+		if ((mux->shift != mux_log[i].mux.shift) ||
+			(mux->base != mux_log[i].mux.base) ||
+			(mux->offset != mux_log[i].mux.offset))
+			continue;
+
+		/* if this is a new configuration, just do it! */
+		if (!mux_log[i].is_configured)
+			break;
+
+		/*
+		 * IOMUX has been configured previously and one is trying to
+		 * configure it to a different function
+		 */
+		if (mux_log[i].mux.alt != mux->alt) {
+			dev_err(pinctrl->dev,
+				"double configuration error detected!\n");
+			dev_err(pinctrl->dev, "func:%s grp:%s\n",
+				func->name, grp->name);
+			return -EINVAL;
+		}
+
+		return 0;
+	}
+	if (i == NS2_NUM_IOMUX)
+		return -EINVAL;
+
+	mask = mux->mask;
+	mux_log[i].mux.alt = mux->alt;
+	mux_log[i].is_configured = true;
+
+	switch (mux->base) {
+	case NS2_PIN_MUX_BASE0:
+		base_address = pinctrl->base0;
+		break;
+
+	case NS2_PIN_MUX_BASE1:
+		base_address = pinctrl->base1;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&pinctrl->lock, flags);
+	val = readl(base_address + grp->mux.offset);
+	val &= ~(mask << grp->mux.shift);
+	val |= grp->mux.alt << grp->mux.shift;
+	writel(val, (base_address + grp->mux.offset));
+	spin_unlock_irqrestore(&pinctrl->lock, flags);
+
+	return 0;
+}
+
+static int ns2_pinmux_enable(struct pinctrl_dev *pctrl_dev,
+			     unsigned int func_select, unsigned int grp_select)
+{
+	struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+	const struct ns2_pin_function *func;
+	const struct ns2_pin_group *grp;
+
+	if (grp_select > pinctrl->num_groups ||
+		func_select > pinctrl->num_functions)
+		return -EINVAL;
+
+	func = &pinctrl->functions[func_select];
+	grp = &pinctrl->groups[grp_select];
+
+	dev_dbg(pctrl_dev->dev, "func:%u name:%s grp:%u name:%s\n",
+		func_select, func->name, grp_select, grp->name);
+
+	dev_dbg(pctrl_dev->dev, "offset:0x%08x shift:%u alt:%u\n",
+		grp->mux.offset, grp->mux.shift, grp->mux.alt);
+
+	return ns2_pinmux_set(pinctrl, func, grp, pinctrl->mux_log);
+}
+
+static int ns2_pin_set_enable(struct pinctrl_dev *pctrldev, unsigned int pin,
+			    u16 enable)
+{
+	struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrldev);
+	struct ns2_pin *pin_data = pctrldev->desc->pins[pin].drv_data;
+	unsigned long flags;
+	u32 val;
+	void __iomem *base_address;
+
+	base_address = pinctrl->pinconf_base;
+	spin_lock_irqsave(&pinctrl->lock, flags);
+	val = readl(base_address + pin_data->pin_conf.offset);
+	val &= ~(NS2_PIN_SRC_MASK << pin_data->pin_conf.input_en);
+
+	if (!enable)
+		val |= NS2_PIN_INPUT_EN_MASK << pin_data->pin_conf.input_en;
+
+	writel(val, (base_address + pin_data->pin_conf.offset));
+	spin_unlock_irqrestore(&pinctrl->lock, flags);
+
+	dev_dbg(pctrldev->dev, "pin:%u set enable:%d\n", pin, enable);
+	return 0;
+}
+
+static int ns2_pin_get_enable(struct pinctrl_dev *pctrldev, unsigned int pin)
+{
+	struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrldev);
+	struct ns2_pin *pin_data = pctrldev->desc->pins[pin].drv_data;
+	unsigned long flags;
+	int enable;
+
+	spin_lock_irqsave(&pinctrl->lock, flags);
+	enable = readl(pinctrl->pinconf_base + pin_data->pin_conf.offset);
+	enable = (enable >> pin_data->pin_conf.input_en) &
+			NS2_PIN_INPUT_EN_MASK;
+	spin_unlock_irqrestore(&pinctrl->lock, flags);
+
+	if (!enable)
+		enable = NS2_PIN_INPUT_EN_MASK;
+	else
+		enable = 0;
+
+	dev_dbg(pctrldev->dev, "pin:%u get disable:%d\n", pin, enable);
+	return enable;
+}
+
+static int ns2_pin_set_slew(struct pinctrl_dev *pctrldev, unsigned int pin,
+			    u16 slew)
+{
+	struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrldev);
+	struct ns2_pin *pin_data = pctrldev->desc->pins[pin].drv_data;
+	unsigned long flags;
+	u32 val;
+	void __iomem *base_address;
+
+	base_address = pinctrl->pinconf_base;
+	spin_lock_irqsave(&pinctrl->lock, flags);
+	val = readl(base_address + pin_data->pin_conf.offset);
+	val &= ~(NS2_PIN_SRC_MASK << pin_data->pin_conf.src_shift);
+
+	if (slew)
+		val |= NS2_PIN_SRC_MASK << pin_data->pin_conf.src_shift;
+
+	writel(val, (base_address + pin_data->pin_conf.offset));
+	spin_unlock_irqrestore(&pinctrl->lock, flags);
+
+	dev_dbg(pctrldev->dev, "pin:%u set slew:%d\n", pin, slew);
+	return 0;
+}
+
+static int ns2_pin_get_slew(struct pinctrl_dev *pctrldev, unsigned int pin,
+			    u16 *slew)
+{
+	struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrldev);
+	struct ns2_pin *pin_data = pctrldev->desc->pins[pin].drv_data;
+	unsigned long flags;
+	u32 val;
+
+	spin_lock_irqsave(&pinctrl->lock, flags);
+	val = readl(pinctrl->pinconf_base + pin_data->pin_conf.offset);
+	*slew = (val >> pin_data->pin_conf.src_shift) & NS2_PIN_SRC_MASK;
+	spin_unlock_irqrestore(&pinctrl->lock, flags);
+
+	dev_dbg(pctrldev->dev, "pin:%u get slew:%d\n", pin, *slew);
+	return 0;
+}
+
+static int ns2_pin_set_pull(struct pinctrl_dev *pctrldev, unsigned int pin,
+			    bool pull_up, bool pull_down)
+{
+	struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrldev);
+	struct ns2_pin *pin_data = pctrldev->desc->pins[pin].drv_data;
+	unsigned long flags;
+	u32 val;
+	void __iomem *base_address;
+
+	base_address = pinctrl->pinconf_base;
+	spin_lock_irqsave(&pinctrl->lock, flags);
+	val = readl(base_address + pin_data->pin_conf.offset);
+	val &= ~(NS2_PIN_PULL_MASK << pin_data->pin_conf.pull_shift);
+
+	if (pull_up == true)
+		val |= NS2_PIN_PULL_UP << pin_data->pin_conf.pull_shift;
+	if (pull_down == true)
+		val |= NS2_PIN_PULL_DOWN << pin_data->pin_conf.pull_shift;
+	writel(val, (base_address + pin_data->pin_conf.offset));
+	spin_unlock_irqrestore(&pinctrl->lock, flags);
+
+	dev_dbg(pctrldev->dev, "pin:%u set pullup:%d pulldown: %d\n",
+		pin, pull_up, pull_down);
+	return 0;
+}
+
+static void ns2_pin_get_pull(struct pinctrl_dev *pctrldev,
+			     unsigned int pin, bool *pull_up,
+			     bool *pull_down)
+{
+	struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrldev);
+	struct ns2_pin *pin_data = pctrldev->desc->pins[pin].drv_data;
+	unsigned long flags;
+	u32 val;
+
+	spin_lock_irqsave(&pinctrl->lock, flags);
+	val = readl(pinctrl->pinconf_base + pin_data->pin_conf.offset);
+	val = (val >> pin_data->pin_conf.pull_shift) & NS2_PIN_PULL_MASK;
+	*pull_up = false;
+	*pull_down = false;
+
+	if (val == NS2_PIN_PULL_UP)
+		*pull_up = true;
+
+	if (val == NS2_PIN_PULL_DOWN)
+		*pull_down = true;
+	spin_unlock_irqrestore(&pinctrl->lock, flags);
+}
+
+static int ns2_pin_set_strength(struct pinctrl_dev *pctrldev, unsigned int pin,
+				u16 strength)
+{
+	struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrldev);
+	struct ns2_pin *pin_data = pctrldev->desc->pins[pin].drv_data;
+	u32 val;
+	unsigned long flags;
+	void __iomem *base_address;
+
+	/* make sure drive strength is supported */
+	if (strength < 2 || strength > 16 || (strength % 2))
+		return -ENOTSUPP;
+
+	base_address = pinctrl->pinconf_base;
+	spin_lock_irqsave(&pinctrl->lock, flags);
+	val = readl(base_address + pin_data->pin_conf.offset);
+	val &= ~(NS2_PIN_DRIVE_STRENGTH_MASK << pin_data->pin_conf.drive_shift);
+	val |= ((strength / 2) - 1) << pin_data->pin_conf.drive_shift;
+	writel(val, (base_address + pin_data->pin_conf.offset));
+	spin_unlock_irqrestore(&pinctrl->lock, flags);
+
+	dev_dbg(pctrldev->dev, "pin:%u set drive strength:%d mA\n",
+		pin, strength);
+	return 0;
+}
+
+static int ns2_pin_get_strength(struct pinctrl_dev *pctrldev, unsigned int pin,
+				 u16 *strength)
+{
+	struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrldev);
+	struct ns2_pin *pin_data = pctrldev->desc->pins[pin].drv_data;
+	u32 val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pinctrl->lock, flags);
+	val = readl(pinctrl->pinconf_base + pin_data->pin_conf.offset);
+	*strength = (val >> pin_data->pin_conf.drive_shift) &
+					NS2_PIN_DRIVE_STRENGTH_MASK;
+	*strength = (*strength + 1) * 2;
+	spin_unlock_irqrestore(&pinctrl->lock, flags);
+
+	dev_dbg(pctrldev->dev, "pin:%u get drive strength:%d mA\n",
+		pin, *strength);
+	return 0;
+}
+
+static int ns2_pin_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
+			      unsigned long *config)
+{
+	struct ns2_pin *pin_data = pctldev->desc->pins[pin].drv_data;
+	enum pin_config_param param = pinconf_to_config_param(*config);
+	bool pull_up, pull_down;
+	u16 arg = 0;
+	int ret;
+
+	if (pin_data->pin_conf.base == -1)
+		return -ENOTSUPP;
+
+	switch (param) {
+	case PIN_CONFIG_BIAS_DISABLE:
+		ns2_pin_get_pull(pctldev, pin, &pull_up, &pull_down);
+		if ((pull_up == false) && (pull_down == false))
+			return 0;
+		else
+			return -EINVAL;
+
+	case PIN_CONFIG_BIAS_PULL_UP:
+		ns2_pin_get_pull(pctldev, pin, &pull_up, &pull_down);
+		if (pull_up)
+			return 0;
+		else
+			return -EINVAL;
+
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		ns2_pin_get_pull(pctldev, pin, &pull_up, &pull_down);
+		if (pull_down)
+			return 0;
+		else
+			return -EINVAL;
+
+	case PIN_CONFIG_DRIVE_STRENGTH:
+		ret = ns2_pin_get_strength(pctldev, pin, &arg);
+		if (ret)
+			return ret;
+		*config = pinconf_to_config_packed(param, arg);
+		return 0;
+
+	case PIN_CONFIG_SLEW_RATE:
+		ret = ns2_pin_get_slew(pctldev, pin, &arg);
+		if (ret)
+			return ret;
+		*config = pinconf_to_config_packed(param, arg);
+		return 0;
+
+	case PIN_CONFIG_INPUT_ENABLE:
+		ret = ns2_pin_get_enable(pctldev, pin);
+		if (ret)
+			return 0;
+		else
+			return -EINVAL;
+
+	default:
+		return -ENOTSUPP;
+	}
+}
+
+static int ns2_pin_config_set(struct pinctrl_dev *pctrldev, unsigned int pin,
+			      unsigned long *configs, unsigned int num_configs)
+{
+	struct ns2_pin *pin_data = pctrldev->desc->pins[pin].drv_data;
+	enum pin_config_param param;
+	unsigned int i;
+	u16 arg;
+	int ret = -ENOTSUPP;
+
+	if (pin_data->pin_conf.base == -1)
+		return -ENOTSUPP;
+
+	for (i = 0; i < num_configs; i++) {
+		param = pinconf_to_config_param(configs[i]);
+		arg = pinconf_to_config_argument(configs[i]);
+
+		switch (param) {
+		case PIN_CONFIG_BIAS_DISABLE:
+			ret = ns2_pin_set_pull(pctrldev, pin, false, false);
+			if (ret < 0)
+				goto out;
+			break;
+
+		case PIN_CONFIG_BIAS_PULL_UP:
+			ret = ns2_pin_set_pull(pctrldev, pin, true, false);
+			if (ret < 0)
+				goto out;
+			break;
+
+		case PIN_CONFIG_BIAS_PULL_DOWN:
+			ret = ns2_pin_set_pull(pctrldev, pin, false, true);
+			if (ret < 0)
+				goto out;
+			break;
+
+		case PIN_CONFIG_DRIVE_STRENGTH:
+			ret = ns2_pin_set_strength(pctrldev, pin, arg);
+			if (ret < 0)
+				goto out;
+			break;
+
+		case PIN_CONFIG_SLEW_RATE:
+			ret = ns2_pin_set_slew(pctrldev, pin, arg);
+			if (ret < 0)
+				goto out;
+			break;
+
+		case PIN_CONFIG_INPUT_ENABLE:
+			ret = ns2_pin_set_enable(pctrldev, pin, arg);
+			if (ret < 0)
+				goto out;
+			break;
+
+		default:
+			dev_err(pctrldev->dev, "invalid configuration\n");
+			return -ENOTSUPP;
+		}
+	}
+out:
+	return ret;
+}
+static struct pinmux_ops ns2_pinmux_ops = {
+	.get_functions_count = ns2_get_functions_count,
+	.get_function_name = ns2_get_function_name,
+	.get_function_groups = ns2_get_function_groups,
+	.set_mux = ns2_pinmux_enable,
+};
+
+static const struct pinconf_ops ns2_pinconf_ops = {
+	.is_generic = true,
+	.pin_config_get = ns2_pin_config_get,
+	.pin_config_set = ns2_pin_config_set,
+};
+
+static struct pinctrl_desc ns2_pinctrl_desc = {
+	.name = "ns2-pinmux",
+	.pctlops = &ns2_pinctrl_ops,
+	.pmxops = &ns2_pinmux_ops,
+	.confops = &ns2_pinconf_ops,
+};
+
+static int ns2_mux_log_init(struct ns2_pinctrl *pinctrl)
+{
+	struct ns2_mux_log *log;
+	unsigned int i;
+
+	pinctrl->mux_log = devm_kcalloc(pinctrl->dev, NS2_NUM_IOMUX,
+					sizeof(struct ns2_mux_log),
+					GFP_KERNEL);
+	if (!pinctrl->mux_log)
+		return -ENOMEM;
+
+	for (i = 0; i < NS2_NUM_IOMUX; i++)
+		pinctrl->mux_log[i].is_configured = false;
+	/* Group 0 uses bit 31 in the IOMUX_PAD_FUNCTION_0 register */
+	log = &pinctrl->mux_log[0];
+	log->mux.base = NS2_PIN_MUX_BASE0;
+	log->mux.offset = 0;
+	log->mux.shift = 31;
+	log->mux.alt = 0;
+
+	/*
+	 * Groups 1 through 14 use two bits each in the
+	 * IOMUX_PAD_FUNCTION_1 register starting with
+	 * bit position 30.
+	 */
+	for (i = 1; i < (NS2_NUM_IOMUX - NS2_NUM_PWM_MUX); i++) {
+		log = &pinctrl->mux_log[i];
+		log->mux.base = NS2_PIN_MUX_BASE0;
+		log->mux.offset = NS2_MUX_PAD_FUNC1_OFFSET;
+		log->mux.shift = 32 - (i * 2);
+		log->mux.alt = 0;
+	}
+
+	/*
+	 * Groups 15 through 18 use one bit each in the
+	 * AUX_SEL register.
+	 */
+	for (i = 0; i < NS2_NUM_PWM_MUX; i++) {
+		log = &pinctrl->mux_log[(NS2_NUM_IOMUX - NS2_NUM_PWM_MUX) + i];
+		log->mux.base = NS2_PIN_MUX_BASE1;
+		log->mux.offset = 0;
+		log->mux.shift = i;
+		log->mux.alt =  0;
+	}
+	return 0;
+}
+
+static int ns2_pinmux_probe(struct platform_device *pdev)
+{
+	struct ns2_pinctrl *pinctrl;
+	struct resource *res;
+	int i, ret;
+	struct pinctrl_pin_desc *pins;
+	unsigned int num_pins = ARRAY_SIZE(ns2_pins);
+
+	pinctrl = devm_kzalloc(&pdev->dev, sizeof(*pinctrl), GFP_KERNEL);
+	if (!pinctrl)
+		return -ENOMEM;
+
+	pinctrl->dev = &pdev->dev;
+	platform_set_drvdata(pdev, pinctrl);
+	spin_lock_init(&pinctrl->lock);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pinctrl->base0 = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(pinctrl->base0)) {
+		dev_err(&pdev->dev, "unable to map I/O space\n");
+		return PTR_ERR(pinctrl->base0);
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start,
+					resource_size(res));
+	if (!pinctrl->base1) {
+		dev_err(&pdev->dev, "unable to map I/O space\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+	pinctrl->pinconf_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(pinctrl->pinconf_base)) {
+		dev_err(&pdev->dev, "unable to map I/O space\n");
+		return PTR_ERR(pinctrl->pinconf_base);
+	}
+
+	ret = ns2_mux_log_init(pinctrl);
+	if (ret) {
+		dev_err(&pdev->dev, "unable to initialize IOMUX log\n");
+		return ret;
+	}
+
+	pins = devm_kcalloc(&pdev->dev, num_pins, sizeof(*pins), GFP_KERNEL);
+	if (!pins)
+		return -ENOMEM;
+
+	for (i = 0; i < num_pins; i++) {
+		pins[i].number = ns2_pins[i].pin;
+		pins[i].name = ns2_pins[i].name;
+		pins[i].drv_data = &ns2_pins[i];
+	}
+
+	pinctrl->groups = ns2_pin_groups;
+	pinctrl->num_groups = ARRAY_SIZE(ns2_pin_groups);
+	pinctrl->functions = ns2_pin_functions;
+	pinctrl->num_functions = ARRAY_SIZE(ns2_pin_functions);
+	ns2_pinctrl_desc.pins = pins;
+	ns2_pinctrl_desc.npins = num_pins;
+
+	pinctrl->pctl = pinctrl_register(&ns2_pinctrl_desc, &pdev->dev,
+			pinctrl);
+	if (!pinctrl->pctl) {
+		dev_err(&pdev->dev, "unable to register IOMUX pinctrl\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id ns2_pinmux_of_match[] = {
+	{.compatible = "brcm,ns2-pinmux"},
+	{ }
+};
+
+static struct platform_driver ns2_pinmux_driver = {
+	.driver = {
+		.name = "ns2-pinmux",
+		.of_match_table = ns2_pinmux_of_match,
+	},
+	.probe = ns2_pinmux_probe,
+};
+
+static int __init ns2_pinmux_init(void)
+{
+	return platform_driver_register(&ns2_pinmux_driver);
+}
+arch_initcall(ns2_pinmux_init);
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index ac90043..a8b37a9 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -363,7 +363,7 @@
 	.get_groups_count = nsp_get_groups_count,
 	.get_group_name = nsp_get_group_name,
 	.dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
-	.dt_free_map = pinctrl_utils_dt_free_map,
+	.dt_free_map = pinctrl_utils_free_map,
 };
 
 static int nsp_gpio_set_slew(struct nsp_gpio *chip, unsigned gpio, u16 slew)
@@ -609,7 +609,7 @@
 	pctldesc->npins = gc->ngpio;
 	pctldesc->confops = &nsp_pconf_ops;
 
-	chip->pctl = pinctrl_register(pctldesc, chip->dev, chip);
+	chip->pctl = devm_pinctrl_register(chip->dev, pctldesc, chip);
 	if (IS_ERR(chip->pctl)) {
 		dev_err(chip->dev, "unable to register pinctrl device\n");
 		return PTR_ERR(chip->pctl);
diff --git a/drivers/pinctrl/berlin/berlin.c b/drivers/pinctrl/berlin/berlin.c
index 46f2b48..8f0dc02 100644
--- a/drivers/pinctrl/berlin/berlin.c
+++ b/drivers/pinctrl/berlin/berlin.c
@@ -104,7 +104,7 @@
 	.get_groups_count	= &berlin_pinctrl_get_group_count,
 	.get_group_name		= &berlin_pinctrl_get_group_name,
 	.dt_node_to_map		= &berlin_pinctrl_dt_node_to_map,
-	.dt_free_map		= &pinctrl_utils_dt_free_map,
+	.dt_free_map		= &pinctrl_utils_free_map,
 };
 
 static int berlin_pinmux_get_functions_count(struct pinctrl_dev *pctrl_dev)
@@ -316,7 +316,8 @@
 		return ret;
 	}
 
-	pctrl->pctrl_dev = pinctrl_register(&berlin_pctrl_desc, dev, pctrl);
+	pctrl->pctrl_dev = devm_pinctrl_register(dev, &berlin_pctrl_desc,
+						 pctrl);
 	if (IS_ERR(pctrl->pctrl_dev)) {
 		dev_err(dev, "failed to register pinctrl driver\n");
 		return PTR_ERR(pctrl->pctrl_dev);
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index f67a8b7..98d2a1b 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1872,6 +1872,69 @@
 }
 EXPORT_SYMBOL_GPL(pinctrl_unregister);
 
+static void devm_pinctrl_dev_release(struct device *dev, void *res)
+{
+	struct pinctrl_dev *pctldev = *(struct pinctrl_dev **)res;
+
+	pinctrl_unregister(pctldev);
+}
+
+static int devm_pinctrl_dev_match(struct device *dev, void *res, void *data)
+{
+	struct pctldev **r = res;
+
+	if (WARN_ON(!r || !*r))
+		return 0;
+
+	return *r == data;
+}
+
+/**
+ * devm_pinctrl_register() - Resource managed version of pinctrl_register().
+ * @dev: parent device for this pin controller
+ * @pctldesc: descriptor for this pin controller
+ * @driver_data: private pin controller data for this pin controller
+ *
+ * Returns an error pointer if pincontrol register failed. Otherwise
+ * it returns valid pinctrl handle.
+ *
+ * The pinctrl device will be automatically released when the device is unbound.
+ */
+struct pinctrl_dev *devm_pinctrl_register(struct device *dev,
+					  struct pinctrl_desc *pctldesc,
+					  void *driver_data)
+{
+	struct pinctrl_dev **ptr, *pctldev;
+
+	ptr = devres_alloc(devm_pinctrl_dev_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return ERR_PTR(-ENOMEM);
+
+	pctldev = pinctrl_register(pctldesc, dev, driver_data);
+	if (IS_ERR(pctldev)) {
+		devres_free(ptr);
+		return pctldev;
+	}
+
+	*ptr = pctldev;
+	devres_add(dev, ptr);
+
+	return pctldev;
+}
+EXPORT_SYMBOL_GPL(devm_pinctrl_register);
+
+/**
+ * devm_pinctrl_unregister() - Resource managed version of pinctrl_unregister().
+ * @dev: device for which which resource was allocated
+ * @pctldev: the pinctrl device to unregister.
+ */
+void devm_pinctrl_unregister(struct device *dev, struct pinctrl_dev *pctldev)
+{
+	WARN_ON(devres_release(dev, devm_pinctrl_dev_release,
+			       devm_pinctrl_dev_match, pctldev));
+}
+EXPORT_SYMBOL_GPL(devm_pinctrl_unregister);
+
 static int __init pinctrl_init(void)
 {
 	pr_info("initialized pinctrl subsystem\n");
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index debe121..fc8cbf6 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -2,6 +2,7 @@
 	bool
 	select PINMUX
 	select PINCONF
+	select REGMAP
 
 config PINCTRL_IMX1_CORE
 	bool
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 4621051..47ccfcc 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -762,19 +762,18 @@
 
 	if (of_property_read_bool(dev_np, "fsl,input-sel")) {
 		np = of_parse_phandle(dev_np, "fsl,input-sel", 0);
-		if (np) {
-			ipctl->input_sel_base = of_iomap(np, 0);
-			if (IS_ERR(ipctl->input_sel_base)) {
-				of_node_put(np);
-				dev_err(&pdev->dev,
-					"iomuxc input select base address not found\n");
-				return PTR_ERR(ipctl->input_sel_base);
-			}
-		} else {
+		if (!np) {
 			dev_err(&pdev->dev, "iomuxc fsl,input-sel property not found\n");
 			return -EINVAL;
 		}
+
+		ipctl->input_sel_base = of_iomap(np, 0);
 		of_node_put(np);
+		if (!ipctl->input_sel_base) {
+			dev_err(&pdev->dev,
+				"iomuxc input select base address not found\n");
+			return -ENOMEM;
+		}
 	}
 
 	imx_pinctrl_desc.name = dev_name(&pdev->dev);
@@ -790,7 +789,7 @@
 	ipctl->info = info;
 	ipctl->dev = info->dev;
 	platform_set_drvdata(pdev, ipctl);
-	ipctl->pctl = pinctrl_register(&imx_pinctrl_desc, &pdev->dev, ipctl);
+	ipctl->pctl = devm_pinctrl_register(&pdev->dev, &imx_pinctrl_desc, ipctl);
 	if (IS_ERR(ipctl->pctl)) {
 		dev_err(&pdev->dev, "could not register IMX pinctrl driver\n");
 		return PTR_ERR(ipctl->pctl);
@@ -800,12 +799,3 @@
 
 	return 0;
 }
-
-int imx_pinctrl_remove(struct platform_device *pdev)
-{
-	struct imx_pinctrl *ipctl = platform_get_drvdata(pdev);
-
-	pinctrl_unregister(ipctl->pctl);
-
-	return 0;
-}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.h b/drivers/pinctrl/freescale/pinctrl-imx.h
index 3b8bd81..8af8aa2 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.h
+++ b/drivers/pinctrl/freescale/pinctrl-imx.h
@@ -99,5 +99,4 @@
 
 int imx_pinctrl_probe(struct platform_device *pdev,
 			struct imx_pinctrl_soc_info *info);
-int imx_pinctrl_remove(struct platform_device *pdev);
 #endif /* __DRIVERS_PINCTRL_IMX_H */
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index acaf84c..b4400cb 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -635,7 +635,7 @@
 	ipctl->info = info;
 	ipctl->dev = info->dev;
 	platform_set_drvdata(pdev, ipctl);
-	ipctl->pctl = pinctrl_register(pctl_desc, &pdev->dev, ipctl);
+	ipctl->pctl = devm_pinctrl_register(&pdev->dev, pctl_desc, ipctl);
 	if (IS_ERR(ipctl->pctl)) {
 		dev_err(&pdev->dev, "could not register IMX pinctrl driver\n");
 		return PTR_ERR(ipctl->pctl);
@@ -652,12 +652,3 @@
 
 	return 0;
 }
-
-int imx1_pinctrl_core_remove(struct platform_device *pdev)
-{
-	struct imx1_pinctrl *ipctl = platform_get_drvdata(pdev);
-
-	pinctrl_unregister(ipctl->pctl);
-
-	return 0;
-}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1.c b/drivers/pinctrl/freescale/pinctrl-imx1.c
index d3bacb7..0472345 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1.c
@@ -269,7 +269,6 @@
 		.name		= "imx1-pinctrl",
 		.of_match_table	= imx1_pinctrl_of_match,
 	},
-	.remove	= imx1_pinctrl_core_remove,
 };
 module_platform_driver_probe(imx1_pinctrl_driver, imx1_pinctrl_probe);
 
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1.h b/drivers/pinctrl/freescale/pinctrl-imx1.h
index 692a54c..1740743 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1.h
+++ b/drivers/pinctrl/freescale/pinctrl-imx1.h
@@ -69,5 +69,4 @@
 
 int imx1_pinctrl_core_probe(struct platform_device *pdev,
 			struct imx1_pinctrl_soc_info *info);
-int imx1_pinctrl_core_remove(struct platform_device *pdev);
 #endif /* __DRIVERS_PINCTRL_IMX1_H */
diff --git a/drivers/pinctrl/freescale/pinctrl-imx21.c b/drivers/pinctrl/freescale/pinctrl-imx21.c
index 9d9aca3..aa1221f 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx21.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx21.c
@@ -332,7 +332,6 @@
 		.name		= "imx21-pinctrl",
 		.of_match_table	= imx21_pinctrl_of_match,
 	},
-	.remove	= imx1_pinctrl_core_remove,
 };
 module_platform_driver_probe(imx21_pinctrl_driver, imx21_pinctrl_probe);
 
diff --git a/drivers/pinctrl/freescale/pinctrl-imx25.c b/drivers/pinctrl/freescale/pinctrl-imx25.c
index 293ed43..81ad546 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx25.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx25.c
@@ -331,7 +331,6 @@
 		.of_match_table = of_match_ptr(imx25_pinctrl_of_match),
 	},
 	.probe = imx25_pinctrl_probe,
-	.remove = imx_pinctrl_remove,
 };
 
 static int __init imx25_pinctrl_init(void)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx27.c b/drivers/pinctrl/freescale/pinctrl-imx27.c
index a461d588..f828fbb 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx27.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx27.c
@@ -405,7 +405,6 @@
 		.of_match_table = of_match_ptr(imx27_pinctrl_of_match),
 	},
 	.probe = imx27_pinctrl_probe,
-	.remove = imx1_pinctrl_core_remove,
 };
 
 static int __init imx27_pinctrl_init(void)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx35.c b/drivers/pinctrl/freescale/pinctrl-imx35.c
index 9109c10..13eb224 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx35.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx35.c
@@ -1021,7 +1021,6 @@
 		.of_match_table = imx35_pinctrl_of_match,
 	},
 	.probe = imx35_pinctrl_probe,
-	.remove = imx_pinctrl_remove,
 };
 
 static int __init imx35_pinctrl_init(void)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx50.c b/drivers/pinctrl/freescale/pinctrl-imx50.c
index 8acc4d9..95a36c8 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx50.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx50.c
@@ -408,7 +408,6 @@
 		.of_match_table = of_match_ptr(imx50_pinctrl_of_match),
 	},
 	.probe = imx50_pinctrl_probe,
-	.remove = imx_pinctrl_remove,
 };
 
 static int __init imx50_pinctrl_init(void)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx51.c b/drivers/pinctrl/freescale/pinctrl-imx51.c
index 8dec494..0863e527 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx51.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx51.c
@@ -784,7 +784,6 @@
 		.of_match_table = imx51_pinctrl_of_match,
 	},
 	.probe = imx51_pinctrl_probe,
-	.remove = imx_pinctrl_remove,
 };
 
 static int __init imx51_pinctrl_init(void)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx53.c b/drivers/pinctrl/freescale/pinctrl-imx53.c
index d39dfd6..64c9cbe 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx53.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx53.c
@@ -471,7 +471,6 @@
 		.of_match_table = imx53_pinctrl_of_match,
 	},
 	.probe = imx53_pinctrl_probe,
-	.remove = imx_pinctrl_remove,
 };
 
 static int __init imx53_pinctrl_init(void)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6dl.c b/drivers/pinctrl/freescale/pinctrl-imx6dl.c
index 5a2cdb0..de17bac 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6dl.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6dl.c
@@ -477,7 +477,6 @@
 		.of_match_table = imx6dl_pinctrl_of_match,
 	},
 	.probe = imx6dl_pinctrl_probe,
-	.remove = imx_pinctrl_remove,
 };
 
 static int __init imx6dl_pinctrl_init(void)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6q.c b/drivers/pinctrl/freescale/pinctrl-imx6q.c
index 7d50a36..55cd8a0 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6q.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6q.c
@@ -483,7 +483,6 @@
 		.of_match_table = imx6q_pinctrl_of_match,
 	},
 	.probe = imx6q_pinctrl_probe,
-	.remove = imx_pinctrl_remove,
 };
 
 static int __init imx6q_pinctrl_init(void)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6sl.c b/drivers/pinctrl/freescale/pinctrl-imx6sl.c
index e27d17f..bf455b8 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6sl.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6sl.c
@@ -384,7 +384,6 @@
 		.of_match_table = imx6sl_pinctrl_of_match,
 	},
 	.probe = imx6sl_pinctrl_probe,
-	.remove = imx_pinctrl_remove,
 };
 
 static int __init imx6sl_pinctrl_init(void)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6sx.c b/drivers/pinctrl/freescale/pinctrl-imx6sx.c
index 117180c..84118c3 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6sx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6sx.c
@@ -387,7 +387,6 @@
 		.of_match_table = of_match_ptr(imx6sx_pinctrl_of_match),
 	},
 	.probe = imx6sx_pinctrl_probe,
-	.remove = imx_pinctrl_remove,
 };
 
 static int __init imx6sx_pinctrl_init(void)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6ul.c b/drivers/pinctrl/freescale/pinctrl-imx6ul.c
index 78627c7..c707fdd 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6ul.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6ul.c
@@ -303,7 +303,6 @@
 		.of_match_table = of_match_ptr(imx6ul_pinctrl_of_match),
 	},
 	.probe = imx6ul_pinctrl_probe,
-	.remove = imx_pinctrl_remove,
 };
 
 static int __init imx6ul_pinctrl_init(void)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx7d.c b/drivers/pinctrl/freescale/pinctrl-imx7d.c
index 1c89613..d30d91f 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx7d.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx7d.c
@@ -395,7 +395,6 @@
 		.of_match_table = of_match_ptr(imx7d_pinctrl_of_match),
 	},
 	.probe = imx7d_pinctrl_probe,
-	.remove = imx_pinctrl_remove,
 };
 
 static int __init imx7d_pinctrl_init(void)
diff --git a/drivers/pinctrl/freescale/pinctrl-vf610.c b/drivers/pinctrl/freescale/pinctrl-vf610.c
index 587d1ff..6d81be0 100644
--- a/drivers/pinctrl/freescale/pinctrl-vf610.c
+++ b/drivers/pinctrl/freescale/pinctrl-vf610.c
@@ -318,7 +318,6 @@
 		.of_match_table = vf610_pinctrl_of_match,
 	},
 	.probe = vf610_pinctrl_probe,
-	.remove = imx_pinctrl_remove,
 };
 
 static int __init vf610_pinctrl_init(void)
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index 4d2efad..1c74e03 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -6,6 +6,9 @@
 	bool "Intel Baytrail GPIO pin control"
 	depends on GPIOLIB && ACPI
 	select GPIOLIB_IRQCHIP
+	select PINMUX
+	select PINCONF
+	select GENERIC_PINCONF
 	help
 	  driver for memory mapped GPIO functionality on Intel Baytrail
 	  platforms. Supports 3 banks with 102, 28 and 44 gpios.
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 21b79a4..55182fc 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -20,6 +20,7 @@
 #include <linux/types.h>
 #include <linux/bitops.h>
 #include <linux/interrupt.h>
+#include <linux/gpio.h>
 #include <linux/gpio/driver.h>
 #include <linux/acpi.h>
 #include <linux/platform_device.h>
@@ -27,6 +28,9 @@
 #include <linux/io.h>
 #include <linux/pm_runtime.h>
 #include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
 
 /* memory mapped register offsets */
 #define BYT_CONF0_REG		0x000
@@ -34,6 +38,7 @@
 #define BYT_VAL_REG		0x008
 #define BYT_DFT_REG		0x00c
 #define BYT_INT_STAT_REG	0x800
+#define BYT_DEBOUNCE_REG	0x9d0
 
 /* BYT_CONF0_REG register bits */
 #define BYT_IODEN		BIT(31)
@@ -41,6 +46,7 @@
 #define BYT_TRIG_NEG		BIT(26)
 #define BYT_TRIG_POS		BIT(25)
 #define BYT_TRIG_LVL		BIT(24)
+#define BYT_DEBOUNCE_EN		BIT(20)
 #define BYT_PULL_STR_SHIFT	9
 #define BYT_PULL_STR_MASK	(3 << BYT_PULL_STR_SHIFT)
 #define BYT_PULL_STR_2K		(0 << BYT_PULL_STR_SHIFT)
@@ -65,6 +71,16 @@
 				 BYT_PIN_MUX)
 #define BYT_VAL_RESTORE_MASK	(BYT_DIR_MASK | BYT_LEVEL)
 
+/* BYT_DEBOUNCE_REG bits */
+#define BYT_DEBOUNCE_PULSE_MASK		0x7
+#define BYT_DEBOUNCE_PULSE_375US	1
+#define BYT_DEBOUNCE_PULSE_750US	2
+#define BYT_DEBOUNCE_PULSE_1500US	3
+#define BYT_DEBOUNCE_PULSE_3MS		4
+#define BYT_DEBOUNCE_PULSE_6MS		5
+#define BYT_DEBOUNCE_PULSE_12MS		6
+#define BYT_DEBOUNCE_PULSE_24MS		7
+
 #define BYT_NGPIO_SCORE		102
 #define BYT_NGPIO_NCORE		28
 #define BYT_NGPIO_SUS		44
@@ -74,18 +90,227 @@
 #define BYT_SUS_ACPI_UID	"3"
 
 /*
- * Baytrail gpio controller consist of three separate sub-controllers called
- * SCORE, NCORE and SUS. The sub-controllers are identified by their acpi UID.
- *
- * GPIO numbering is _not_ ordered meaning that gpio # 0 in ACPI namespace does
- * _not_ correspond to the first gpio register at controller's gpio base.
- * There is no logic or pattern in mapping gpio numbers to registers (pads) so
- * each sub-controller needs to have its own mapping table
+ * This is the function value most pins have for GPIO muxing. If the value
+ * differs from the default one, it must be explicitly mentioned. Otherwise, the
+ * pin control implementation will set the muxing value to default GPIO if it
+ * does not find a match for the requested function.
  */
+#define BYT_DEFAULT_GPIO_MUX	0
 
-/* score_pins[gpio_nr] = pad_nr */
+struct byt_gpio_pin_context {
+	u32 conf0;
+	u32 val;
+};
 
-static unsigned const score_pins[BYT_NGPIO_SCORE] = {
+struct byt_simple_func_mux {
+	const char *name;
+	unsigned short func;
+};
+
+struct byt_mixed_func_mux {
+	const char *name;
+	const unsigned short *func_values;
+};
+
+struct byt_pingroup {
+	const char *name;
+	const unsigned int *pins;
+	size_t npins;
+	unsigned short has_simple_funcs;
+	union {
+		const struct byt_simple_func_mux *simple_funcs;
+		const struct byt_mixed_func_mux *mixed_funcs;
+	};
+	size_t nfuncs;
+};
+
+struct byt_function {
+	const char *name;
+	const char * const *groups;
+	size_t ngroups;
+};
+
+struct byt_community {
+	unsigned int pin_base;
+	size_t npins;
+	const unsigned int *pad_map;
+	void __iomem *reg_base;
+};
+
+#define SIMPLE_FUNC(n, f)	\
+	{			\
+		.name	= (n),	\
+		.func	= (f),	\
+	}
+#define MIXED_FUNC(n, f)		\
+	{				\
+		.name		= (n),	\
+		.func_values	= (f),	\
+	}
+
+#define PIN_GROUP_SIMPLE(n, p, f)				\
+	{							\
+		.name			= (n),			\
+		.pins			= (p),			\
+		.npins			= ARRAY_SIZE((p)),	\
+		.has_simple_funcs	= 1,		\
+		.simple_funcs		= (f),			\
+		.nfuncs			= ARRAY_SIZE((f)),	\
+	}
+#define PIN_GROUP_MIXED(n, p, f)				\
+	{							\
+		.name			= (n),			\
+		.pins			= (p),			\
+		.npins			= ARRAY_SIZE((p)),	\
+		.has_simple_funcs	= 0,			\
+		.mixed_funcs		= (f),			\
+		.nfuncs			= ARRAY_SIZE((f)),	\
+	}
+
+#define FUNCTION(n, g)					\
+	{						\
+		.name		= (n),			\
+		.groups		= (g),			\
+		.ngroups	= ARRAY_SIZE((g)),	\
+	}
+
+#define COMMUNITY(p, n, map)		\
+	{				\
+		.pin_base	= (p),	\
+		.npins		= (n),	\
+		.pad_map	= (map),\
+	}
+
+struct byt_pinctrl_soc_data {
+	const char *uid;
+	const struct pinctrl_pin_desc *pins;
+	size_t npins;
+	const struct byt_pingroup *groups;
+	size_t ngroups;
+	const struct byt_function *functions;
+	size_t nfunctions;
+	const struct byt_community *communities;
+	size_t ncommunities;
+};
+
+struct byt_gpio {
+	struct gpio_chip chip;
+	struct platform_device *pdev;
+	struct pinctrl_dev *pctl_dev;
+	struct pinctrl_desc pctl_desc;
+	raw_spinlock_t lock;
+	const struct byt_pinctrl_soc_data *soc_data;
+	struct byt_community *communities_copy;
+	struct byt_gpio_pin_context *saved_context;
+};
+
+/* SCORE pins, aka GPIOC_<pin_no> or GPIO_S0_SC[<pin_no>] */
+static const struct pinctrl_pin_desc byt_score_pins[] = {
+	PINCTRL_PIN(0, "SATA_GP0"),
+	PINCTRL_PIN(1, "SATA_GP1"),
+	PINCTRL_PIN(2, "SATA_LED#"),
+	PINCTRL_PIN(3, "PCIE_CLKREQ0"),
+	PINCTRL_PIN(4, "PCIE_CLKREQ1"),
+	PINCTRL_PIN(5, "PCIE_CLKREQ2"),
+	PINCTRL_PIN(6, "PCIE_CLKREQ3"),
+	PINCTRL_PIN(7, "SD3_WP"),
+	PINCTRL_PIN(8, "HDA_RST"),
+	PINCTRL_PIN(9, "HDA_SYNC"),
+	PINCTRL_PIN(10, "HDA_CLK"),
+	PINCTRL_PIN(11, "HDA_SDO"),
+	PINCTRL_PIN(12, "HDA_SDI0"),
+	PINCTRL_PIN(13, "HDA_SDI1"),
+	PINCTRL_PIN(14, "GPIO_S0_SC14"),
+	PINCTRL_PIN(15, "GPIO_S0_SC15"),
+	PINCTRL_PIN(16, "MMC1_CLK"),
+	PINCTRL_PIN(17, "MMC1_D0"),
+	PINCTRL_PIN(18, "MMC1_D1"),
+	PINCTRL_PIN(19, "MMC1_D2"),
+	PINCTRL_PIN(20, "MMC1_D3"),
+	PINCTRL_PIN(21, "MMC1_D4"),
+	PINCTRL_PIN(22, "MMC1_D5"),
+	PINCTRL_PIN(23, "MMC1_D6"),
+	PINCTRL_PIN(24, "MMC1_D7"),
+	PINCTRL_PIN(25, "MMC1_CMD"),
+	PINCTRL_PIN(26, "MMC1_RST"),
+	PINCTRL_PIN(27, "SD2_CLK"),
+	PINCTRL_PIN(28, "SD2_D0"),
+	PINCTRL_PIN(29, "SD2_D1"),
+	PINCTRL_PIN(30, "SD2_D2"),
+	PINCTRL_PIN(31, "SD2_D3_CD"),
+	PINCTRL_PIN(32, "SD2_CMD"),
+	PINCTRL_PIN(33, "SD3_CLK"),
+	PINCTRL_PIN(34, "SD3_D0"),
+	PINCTRL_PIN(35, "SD3_D1"),
+	PINCTRL_PIN(36, "SD3_D2"),
+	PINCTRL_PIN(37, "SD3_D3"),
+	PINCTRL_PIN(38, "SD3_CD"),
+	PINCTRL_PIN(39, "SD3_CMD"),
+	PINCTRL_PIN(40, "SD3_1P8EN"),
+	PINCTRL_PIN(41, "SD3_PWREN#"),
+	PINCTRL_PIN(42, "ILB_LPC_AD0"),
+	PINCTRL_PIN(43, "ILB_LPC_AD1"),
+	PINCTRL_PIN(44, "ILB_LPC_AD2"),
+	PINCTRL_PIN(45, "ILB_LPC_AD3"),
+	PINCTRL_PIN(46, "ILB_LPC_FRAME"),
+	PINCTRL_PIN(47, "ILB_LPC_CLK0"),
+	PINCTRL_PIN(48, "ILB_LPC_CLK1"),
+	PINCTRL_PIN(49, "ILB_LPC_CLKRUN"),
+	PINCTRL_PIN(50, "ILB_LPC_SERIRQ"),
+	PINCTRL_PIN(51, "PCU_SMB_DATA"),
+	PINCTRL_PIN(52, "PCU_SMB_CLK"),
+	PINCTRL_PIN(53, "PCU_SMB_ALERT"),
+	PINCTRL_PIN(54, "ILB_8254_SPKR"),
+	PINCTRL_PIN(55, "GPIO_S0_SC55"),
+	PINCTRL_PIN(56, "GPIO_S0_SC56"),
+	PINCTRL_PIN(57, "GPIO_S0_SC57"),
+	PINCTRL_PIN(58, "GPIO_S0_SC58"),
+	PINCTRL_PIN(59, "GPIO_S0_SC59"),
+	PINCTRL_PIN(60, "GPIO_S0_SC60"),
+	PINCTRL_PIN(61, "GPIO_S0_SC61"),
+	PINCTRL_PIN(62, "LPE_I2S2_CLK"),
+	PINCTRL_PIN(63, "LPE_I2S2_FRM"),
+	PINCTRL_PIN(64, "LPE_I2S2_DATAIN"),
+	PINCTRL_PIN(65, "LPE_I2S2_DATAOUT"),
+	PINCTRL_PIN(66, "SIO_SPI_CS"),
+	PINCTRL_PIN(67, "SIO_SPI_MISO"),
+	PINCTRL_PIN(68, "SIO_SPI_MOSI"),
+	PINCTRL_PIN(69, "SIO_SPI_CLK"),
+	PINCTRL_PIN(70, "SIO_UART1_RXD"),
+	PINCTRL_PIN(71, "SIO_UART1_TXD"),
+	PINCTRL_PIN(72, "SIO_UART1_RTS"),
+	PINCTRL_PIN(73, "SIO_UART1_CTS"),
+	PINCTRL_PIN(74, "SIO_UART2_RXD"),
+	PINCTRL_PIN(75, "SIO_UART2_TXD"),
+	PINCTRL_PIN(76, "SIO_UART2_RTS"),
+	PINCTRL_PIN(77, "SIO_UART2_CTS"),
+	PINCTRL_PIN(78, "SIO_I2C0_DATA"),
+	PINCTRL_PIN(79, "SIO_I2C0_CLK"),
+	PINCTRL_PIN(80, "SIO_I2C1_DATA"),
+	PINCTRL_PIN(81, "SIO_I2C1_CLK"),
+	PINCTRL_PIN(82, "SIO_I2C2_DATA"),
+	PINCTRL_PIN(83, "SIO_I2C2_CLK"),
+	PINCTRL_PIN(84, "SIO_I2C3_DATA"),
+	PINCTRL_PIN(85, "SIO_I2C3_CLK"),
+	PINCTRL_PIN(86, "SIO_I2C4_DATA"),
+	PINCTRL_PIN(87, "SIO_I2C4_CLK"),
+	PINCTRL_PIN(88, "SIO_I2C5_DATA"),
+	PINCTRL_PIN(89, "SIO_I2C5_CLK"),
+	PINCTRL_PIN(90, "SIO_I2C6_DATA"),
+	PINCTRL_PIN(91, "SIO_I2C6_CLK"),
+	PINCTRL_PIN(92, "GPIO_S0_SC92"),
+	PINCTRL_PIN(93, "GPIO_S0_SC93"),
+	PINCTRL_PIN(94, "SIO_PWM0"),
+	PINCTRL_PIN(95, "SIO_PWM1"),
+	PINCTRL_PIN(96, "PMC_PLT_CLK0"),
+	PINCTRL_PIN(97, "PMC_PLT_CLK1"),
+	PINCTRL_PIN(98, "PMC_PLT_CLK2"),
+	PINCTRL_PIN(99, "PMC_PLT_CLK3"),
+	PINCTRL_PIN(100, "PMC_PLT_CLK4"),
+	PINCTRL_PIN(101, "PMC_PLT_CLK5"),
+};
+
+static const unsigned int byt_score_pins_map[BYT_NGPIO_SCORE] = {
 	85, 89, 93, 96, 99, 102, 98, 101, 34, 37,
 	36, 38, 39, 35, 40, 84, 62, 61, 64, 59,
 	54, 56, 60, 55, 63, 57, 51, 50, 53, 47,
@@ -99,13 +324,263 @@
 	97, 100,
 };
 
-static unsigned const ncore_pins[BYT_NGPIO_NCORE] = {
-	19, 18, 17, 20, 21, 22, 24, 25, 23, 16,
-	14, 15, 12, 26, 27, 1, 4, 8, 11, 0,
-	3, 6, 10, 13, 2, 5, 9, 7,
+/* SCORE groups */
+static const unsigned int byt_score_uart1_pins[] = { 70, 71, 72, 73 };
+static const unsigned int byt_score_uart2_pins[] = { 74, 75, 76, 77 };
+static const struct byt_simple_func_mux byt_score_uart_mux[] = {
+	SIMPLE_FUNC("uart", 1),
 };
 
-static unsigned const sus_pins[BYT_NGPIO_SUS] = {
+static const unsigned int byt_score_pwm0_pins[] = { 94 };
+static const unsigned int byt_score_pwm1_pins[] = { 95 };
+static const struct byt_simple_func_mux byt_score_pwm_mux[] = {
+	SIMPLE_FUNC("pwm", 1),
+};
+
+static const unsigned int byt_score_sio_spi_pins[] = { 66, 67, 68, 69 };
+static const struct byt_simple_func_mux byt_score_spi_mux[] = {
+	SIMPLE_FUNC("spi", 1),
+};
+
+static const unsigned int byt_score_i2c5_pins[] = { 88, 89 };
+static const unsigned int byt_score_i2c6_pins[] = { 90, 91 };
+static const unsigned int byt_score_i2c4_pins[] = { 86, 87 };
+static const unsigned int byt_score_i2c3_pins[] = { 84, 85 };
+static const unsigned int byt_score_i2c2_pins[] = { 82, 83 };
+static const unsigned int byt_score_i2c1_pins[] = { 80, 81 };
+static const unsigned int byt_score_i2c0_pins[] = { 78, 79 };
+static const struct byt_simple_func_mux byt_score_i2c_mux[] = {
+	SIMPLE_FUNC("i2c", 1),
+};
+
+static const unsigned int byt_score_ssp0_pins[] = { 8, 9, 10, 11 };
+static const unsigned int byt_score_ssp1_pins[] = { 12, 13, 14, 15 };
+static const unsigned int byt_score_ssp2_pins[] = { 62, 63, 64, 65 };
+static const struct byt_simple_func_mux byt_score_ssp_mux[] = {
+	SIMPLE_FUNC("ssp", 1),
+};
+
+static const unsigned int byt_score_sdcard_pins[] = {
+	7, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+static const unsigned short byt_score_sdcard_mux_values[] = {
+	2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+};
+static const struct byt_mixed_func_mux byt_score_sdcard_mux[] = {
+	MIXED_FUNC("sdcard", byt_score_sdcard_mux_values),
+};
+
+static const unsigned int byt_score_sdio_pins[] = { 27, 28, 29, 30, 31, 32 };
+static const struct byt_simple_func_mux byt_score_sdio_mux[] = {
+	SIMPLE_FUNC("sdio", 1),
+};
+
+static const unsigned int byt_score_emmc_pins[] = {
+	16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+};
+static const struct byt_simple_func_mux byt_score_emmc_mux[] = {
+	SIMPLE_FUNC("emmc", 1),
+};
+
+static const unsigned int byt_score_ilb_lpc_pins[] = {
+	42, 43, 44, 45, 46, 47, 48, 49, 50,
+};
+static const struct byt_simple_func_mux byt_score_lpc_mux[] = {
+	SIMPLE_FUNC("lpc", 1),
+};
+
+static const unsigned int byt_score_sata_pins[] = { 0, 1, 2 };
+static const struct byt_simple_func_mux byt_score_sata_mux[] = {
+	SIMPLE_FUNC("sata", 1),
+};
+
+static const unsigned int byt_score_plt_clk0_pins[] = { 96 };
+static const unsigned int byt_score_plt_clk1_pins[] = { 97 };
+static const unsigned int byt_score_plt_clk2_pins[] = { 98 };
+static const unsigned int byt_score_plt_clk4_pins[] = { 99 };
+static const unsigned int byt_score_plt_clk5_pins[] = { 100 };
+static const unsigned int byt_score_plt_clk3_pins[] = { 101 };
+static const struct byt_simple_func_mux byt_score_plt_clk_mux[] = {
+	SIMPLE_FUNC("plt_clk", 1),
+};
+
+static const unsigned int byt_score_smbus_pins[] = { 51, 52, 53 };
+static const struct byt_simple_func_mux byt_score_smbus_mux[] = {
+	SIMPLE_FUNC("smbus", 1),
+};
+
+static const struct byt_pingroup byt_score_groups[] = {
+	PIN_GROUP_SIMPLE("uart1_grp",
+			 byt_score_uart1_pins, byt_score_uart_mux),
+	PIN_GROUP_SIMPLE("uart2_grp",
+			 byt_score_uart2_pins, byt_score_uart_mux),
+	PIN_GROUP_SIMPLE("pwm0_grp",
+			 byt_score_pwm0_pins, byt_score_pwm_mux),
+	PIN_GROUP_SIMPLE("pwm1_grp",
+			 byt_score_pwm1_pins, byt_score_pwm_mux),
+	PIN_GROUP_SIMPLE("ssp2_grp",
+			 byt_score_ssp2_pins, byt_score_pwm_mux),
+	PIN_GROUP_SIMPLE("sio_spi_grp",
+			 byt_score_sio_spi_pins, byt_score_spi_mux),
+	PIN_GROUP_SIMPLE("i2c5_grp",
+			 byt_score_i2c5_pins, byt_score_i2c_mux),
+	PIN_GROUP_SIMPLE("i2c6_grp",
+			 byt_score_i2c6_pins, byt_score_i2c_mux),
+	PIN_GROUP_SIMPLE("i2c4_grp",
+			 byt_score_i2c4_pins, byt_score_i2c_mux),
+	PIN_GROUP_SIMPLE("i2c3_grp",
+			 byt_score_i2c3_pins, byt_score_i2c_mux),
+	PIN_GROUP_SIMPLE("i2c2_grp",
+			 byt_score_i2c2_pins, byt_score_i2c_mux),
+	PIN_GROUP_SIMPLE("i2c1_grp",
+			 byt_score_i2c1_pins, byt_score_i2c_mux),
+	PIN_GROUP_SIMPLE("i2c0_grp",
+			 byt_score_i2c0_pins, byt_score_i2c_mux),
+	PIN_GROUP_SIMPLE("ssp0_grp",
+			 byt_score_ssp0_pins, byt_score_ssp_mux),
+	PIN_GROUP_SIMPLE("ssp1_grp",
+			 byt_score_ssp1_pins, byt_score_ssp_mux),
+	PIN_GROUP_MIXED("sdcard_grp",
+			byt_score_sdcard_pins, byt_score_sdcard_mux),
+	PIN_GROUP_SIMPLE("sdio_grp",
+			 byt_score_sdio_pins, byt_score_sdio_mux),
+	PIN_GROUP_SIMPLE("emmc_grp",
+			 byt_score_emmc_pins, byt_score_emmc_mux),
+	PIN_GROUP_SIMPLE("lpc_grp",
+			 byt_score_ilb_lpc_pins, byt_score_lpc_mux),
+	PIN_GROUP_SIMPLE("sata_grp",
+			 byt_score_sata_pins, byt_score_sata_mux),
+	PIN_GROUP_SIMPLE("plt_clk0_grp",
+			 byt_score_plt_clk0_pins, byt_score_plt_clk_mux),
+	PIN_GROUP_SIMPLE("plt_clk1_grp",
+			 byt_score_plt_clk1_pins, byt_score_plt_clk_mux),
+	PIN_GROUP_SIMPLE("plt_clk2_grp",
+			 byt_score_plt_clk2_pins, byt_score_plt_clk_mux),
+	PIN_GROUP_SIMPLE("plt_clk3_grp",
+			 byt_score_plt_clk3_pins, byt_score_plt_clk_mux),
+	PIN_GROUP_SIMPLE("plt_clk4_grp",
+			 byt_score_plt_clk4_pins, byt_score_plt_clk_mux),
+	PIN_GROUP_SIMPLE("plt_clk5_grp",
+			 byt_score_plt_clk5_pins, byt_score_plt_clk_mux),
+	PIN_GROUP_SIMPLE("smbus_grp",
+			 byt_score_smbus_pins, byt_score_smbus_mux),
+};
+
+static const char * const byt_score_uart_groups[] = {
+	"uart1_grp", "uart2_grp",
+};
+static const char * const byt_score_pwm_groups[] = {
+	"pwm0_grp", "pwm1_grp",
+};
+static const char * const byt_score_ssp_groups[] = {
+	"ssp0_grp", "ssp1_grp", "ssp2_grp",
+};
+static const char * const byt_score_spi_groups[] = { "sio_spi_grp" };
+static const char * const byt_score_i2c_groups[] = {
+	"i2c0_grp", "i2c1_grp", "i2c2_grp", "i2c3_grp", "i2c4_grp", "i2c5_grp",
+	"i2c6_grp",
+};
+static const char * const byt_score_sdcard_groups[] = { "sdcard_grp" };
+static const char * const byt_score_sdio_groups[] = { "sdio_grp" };
+static const char * const byt_score_emmc_groups[] = { "emmc_grp" };
+static const char * const byt_score_lpc_groups[] = { "lpc_grp" };
+static const char * const byt_score_sata_groups[] = { "sata_grp" };
+static const char * const byt_score_plt_clk_groups[] = {
+	"plt_clk0_grp", "plt_clk1_grp", "plt_clk2_grp", "plt_clk3_grp",
+	"plt_clk4_grp", "plt_clk5_grp",
+};
+static const char * const byt_score_smbus_groups[] = { "smbus_grp" };
+static const char * const byt_score_gpio_groups[] = {
+	"uart1_grp", "uart2_grp", "pwm0_grp", "pwm1_grp", "ssp0_grp",
+	"ssp1_grp", "ssp2_grp", "sio_spi_grp", "i2c0_grp", "i2c1_grp",
+	"i2c2_grp", "i2c3_grp", "i2c4_grp", "i2c5_grp", "i2c6_grp",
+	"sdcard_grp", "sdio_grp", "emmc_grp", "lpc_grp", "sata_grp",
+	"plt_clk0_grp", "plt_clk1_grp", "plt_clk2_grp", "plt_clk3_grp",
+	"plt_clk4_grp", "plt_clk5_grp", "smbus_grp",
+
+};
+
+static const struct byt_function byt_score_functions[] = {
+	FUNCTION("uart", byt_score_uart_groups),
+	FUNCTION("pwm", byt_score_pwm_groups),
+	FUNCTION("ssp", byt_score_ssp_groups),
+	FUNCTION("spi", byt_score_spi_groups),
+	FUNCTION("i2c", byt_score_i2c_groups),
+	FUNCTION("sdcard", byt_score_sdcard_groups),
+	FUNCTION("sdio", byt_score_sdio_groups),
+	FUNCTION("emmc", byt_score_emmc_groups),
+	FUNCTION("lpc", byt_score_lpc_groups),
+	FUNCTION("sata", byt_score_sata_groups),
+	FUNCTION("plt_clk", byt_score_plt_clk_groups),
+	FUNCTION("smbus", byt_score_smbus_groups),
+	FUNCTION("gpio", byt_score_gpio_groups),
+};
+
+static const struct byt_community byt_score_communities[] = {
+	COMMUNITY(0, BYT_NGPIO_SCORE, byt_score_pins_map),
+};
+
+static const struct byt_pinctrl_soc_data byt_score_soc_data = {
+	.uid		= BYT_SCORE_ACPI_UID,
+	.pins		= byt_score_pins,
+	.npins		= ARRAY_SIZE(byt_score_pins),
+	.groups		= byt_score_groups,
+	.ngroups	= ARRAY_SIZE(byt_score_groups),
+	.functions	= byt_score_functions,
+	.nfunctions	= ARRAY_SIZE(byt_score_functions),
+	.communities	= byt_score_communities,
+	.ncommunities	= ARRAY_SIZE(byt_score_communities),
+};
+
+/* SUS pins, aka GPIOS_<pin_no> or GPIO_S5[<pin_no>]  */
+static const struct pinctrl_pin_desc byt_sus_pins[] = {
+	PINCTRL_PIN(0, "GPIO_S50"),
+	PINCTRL_PIN(1, "GPIO_S51"),
+	PINCTRL_PIN(2, "GPIO_S52"),
+	PINCTRL_PIN(3, "GPIO_S53"),
+	PINCTRL_PIN(4, "GPIO_S54"),
+	PINCTRL_PIN(5, "GPIO_S55"),
+	PINCTRL_PIN(6, "GPIO_S56"),
+	PINCTRL_PIN(7, "GPIO_S57"),
+	PINCTRL_PIN(8, "GPIO_S58"),
+	PINCTRL_PIN(9, "GPIO_S59"),
+	PINCTRL_PIN(10, "GPIO_S510"),
+	PINCTRL_PIN(11, "PMC_SUSPWRDNACK"),
+	PINCTRL_PIN(12, "PMC_SUSCLK0"),
+	PINCTRL_PIN(13, "GPIO_S513"),
+	PINCTRL_PIN(14, "USB_ULPI_RST"),
+	PINCTRL_PIN(15, "PMC_WAKE_PCIE0#"),
+	PINCTRL_PIN(16, "PMC_PWRBTN"),
+	PINCTRL_PIN(17, "GPIO_S517"),
+	PINCTRL_PIN(18, "PMC_SUS_STAT"),
+	PINCTRL_PIN(19, "USB_OC0"),
+	PINCTRL_PIN(20, "USB_OC1"),
+	PINCTRL_PIN(21, "PCU_SPI_CS1"),
+	PINCTRL_PIN(22, "GPIO_S522"),
+	PINCTRL_PIN(23, "GPIO_S523"),
+	PINCTRL_PIN(24, "GPIO_S524"),
+	PINCTRL_PIN(25, "GPIO_S525"),
+	PINCTRL_PIN(26, "GPIO_S526"),
+	PINCTRL_PIN(27, "GPIO_S527"),
+	PINCTRL_PIN(28, "GPIO_S528"),
+	PINCTRL_PIN(29, "GPIO_S529"),
+	PINCTRL_PIN(30, "GPIO_S530"),
+	PINCTRL_PIN(31, "USB_ULPI_CLK"),
+	PINCTRL_PIN(32, "USB_ULPI_DATA0"),
+	PINCTRL_PIN(33, "USB_ULPI_DATA1"),
+	PINCTRL_PIN(34, "USB_ULPI_DATA2"),
+	PINCTRL_PIN(35, "USB_ULPI_DATA3"),
+	PINCTRL_PIN(36, "USB_ULPI_DATA4"),
+	PINCTRL_PIN(37, "USB_ULPI_DATA5"),
+	PINCTRL_PIN(38, "USB_ULPI_DATA6"),
+	PINCTRL_PIN(39, "USB_ULPI_DATA7"),
+	PINCTRL_PIN(40, "USB_ULPI_DIR"),
+	PINCTRL_PIN(41, "USB_ULPI_NXT"),
+	PINCTRL_PIN(42, "USB_ULPI_STP"),
+	PINCTRL_PIN(43, "USB_ULPI_REFCLK"),
+};
+
+static const unsigned int byt_sus_pins_map[BYT_NGPIO_SUS] = {
 	29, 33, 30, 31, 32, 34, 36, 35, 38, 37,
 	18, 7, 11, 20, 17, 1, 8, 10, 19, 12,
 	0, 2, 23, 39, 28, 27, 22, 21, 24, 25,
@@ -113,57 +588,357 @@
 	52, 53, 59, 40,
 };
 
-static struct pinctrl_gpio_range byt_ranges[] = {
-	{
-		.name = BYT_SCORE_ACPI_UID, /* match with acpi _UID in probe */
-		.npins = BYT_NGPIO_SCORE,
-		.pins = score_pins,
-	},
-	{
-		.name = BYT_NCORE_ACPI_UID,
-		.npins = BYT_NGPIO_NCORE,
-		.pins = ncore_pins,
-	},
-	{
-		.name = BYT_SUS_ACPI_UID,
-		.npins = BYT_NGPIO_SUS,
-		.pins = sus_pins,
-	},
-	{
-	},
+static const unsigned int byt_sus_usb_over_current_pins[] = { 19, 20 };
+static const struct byt_simple_func_mux byt_sus_usb_oc_mux[] = {
+	SIMPLE_FUNC("usb", 0),
+	SIMPLE_FUNC("gpio", 1),
 };
 
-struct byt_gpio_pin_context {
-	u32 conf0;
-	u32 val;
+static const unsigned int byt_sus_usb_ulpi_pins[] = {
+	14, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+};
+static const unsigned short byt_sus_usb_ulpi_mode_values[] = {
+	2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+};
+static const unsigned short byt_sus_usb_ulpi_gpio_mode_values[] = {
+	1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+static const struct byt_mixed_func_mux byt_sus_usb_ulpi_mux[] = {
+	MIXED_FUNC("usb", byt_sus_usb_ulpi_mode_values),
+	MIXED_FUNC("gpio", byt_sus_usb_ulpi_gpio_mode_values),
 };
 
-struct byt_gpio {
-	struct gpio_chip		chip;
-	struct platform_device		*pdev;
-	raw_spinlock_t			lock;
-	void __iomem			*reg_base;
-	struct pinctrl_gpio_range	*range;
-	struct byt_gpio_pin_context	*saved_context;
+static const unsigned int byt_sus_pcu_spi_pins[] = { 21 };
+static const struct byt_simple_func_mux byt_sus_pcu_spi_mux[] = {
+	SIMPLE_FUNC("spi", 0),
+	SIMPLE_FUNC("gpio", 1),
 };
 
-static void __iomem *byt_gpio_reg(struct gpio_chip *chip, unsigned offset,
-				 int reg)
+static const struct byt_pingroup byt_sus_groups[] = {
+	PIN_GROUP_SIMPLE("usb_oc_grp",
+			byt_sus_usb_over_current_pins, byt_sus_usb_oc_mux),
+	PIN_GROUP_MIXED("usb_ulpi_grp",
+			byt_sus_usb_ulpi_pins, byt_sus_usb_ulpi_mux),
+	PIN_GROUP_SIMPLE("pcu_spi_grp",
+			byt_sus_pcu_spi_pins, byt_sus_pcu_spi_mux),
+};
+
+static const char * const byt_sus_usb_groups[] = {
+	"usb_oc_grp", "usb_ulpi_grp",
+};
+static const char * const byt_sus_spi_groups[] = { "pcu_spi_grp" };
+static const char * const byt_sus_gpio_groups[] = {
+	"usb_oc_grp", "usb_ulpi_grp", "pcu_spi_grp",
+};
+
+static const struct byt_function byt_sus_functions[] = {
+	FUNCTION("usb", byt_sus_usb_groups),
+	FUNCTION("spi", byt_sus_spi_groups),
+	FUNCTION("gpio", byt_sus_gpio_groups),
+};
+
+static const struct byt_community byt_sus_communities[] = {
+	COMMUNITY(0, BYT_NGPIO_SUS, byt_sus_pins_map),
+};
+
+static const struct byt_pinctrl_soc_data byt_sus_soc_data = {
+	.uid		= BYT_SUS_ACPI_UID,
+	.pins		= byt_sus_pins,
+	.npins		= ARRAY_SIZE(byt_sus_pins),
+	.groups		= byt_sus_groups,
+	.ngroups	= ARRAY_SIZE(byt_sus_groups),
+	.functions	= byt_sus_functions,
+	.nfunctions	= ARRAY_SIZE(byt_sus_functions),
+	.communities	= byt_sus_communities,
+	.ncommunities	= ARRAY_SIZE(byt_sus_communities),
+};
+
+static const struct pinctrl_pin_desc byt_ncore_pins[] = {
+	PINCTRL_PIN(0, "GPIO_NCORE0"),
+	PINCTRL_PIN(1, "GPIO_NCORE1"),
+	PINCTRL_PIN(2, "GPIO_NCORE2"),
+	PINCTRL_PIN(3, "GPIO_NCORE3"),
+	PINCTRL_PIN(4, "GPIO_NCORE4"),
+	PINCTRL_PIN(5, "GPIO_NCORE5"),
+	PINCTRL_PIN(6, "GPIO_NCORE6"),
+	PINCTRL_PIN(7, "GPIO_NCORE7"),
+	PINCTRL_PIN(8, "GPIO_NCORE8"),
+	PINCTRL_PIN(9, "GPIO_NCORE9"),
+	PINCTRL_PIN(10, "GPIO_NCORE10"),
+	PINCTRL_PIN(11, "GPIO_NCORE11"),
+	PINCTRL_PIN(12, "GPIO_NCORE12"),
+	PINCTRL_PIN(13, "GPIO_NCORE13"),
+	PINCTRL_PIN(14, "GPIO_NCORE14"),
+	PINCTRL_PIN(15, "GPIO_NCORE15"),
+	PINCTRL_PIN(16, "GPIO_NCORE16"),
+	PINCTRL_PIN(17, "GPIO_NCORE17"),
+	PINCTRL_PIN(18, "GPIO_NCORE18"),
+	PINCTRL_PIN(19, "GPIO_NCORE19"),
+	PINCTRL_PIN(20, "GPIO_NCORE20"),
+	PINCTRL_PIN(21, "GPIO_NCORE21"),
+	PINCTRL_PIN(22, "GPIO_NCORE22"),
+	PINCTRL_PIN(23, "GPIO_NCORE23"),
+	PINCTRL_PIN(24, "GPIO_NCORE24"),
+	PINCTRL_PIN(25, "GPIO_NCORE25"),
+	PINCTRL_PIN(26, "GPIO_NCORE26"),
+	PINCTRL_PIN(27, "GPIO_NCORE27"),
+};
+
+static unsigned const byt_ncore_pins_map[BYT_NGPIO_NCORE] = {
+	19, 18, 17, 20, 21, 22, 24, 25, 23, 16,
+	14, 15, 12, 26, 27, 1, 4, 8, 11, 0,
+	3, 6, 10, 13, 2, 5, 9, 7,
+};
+
+static const struct byt_community byt_ncore_communities[] = {
+	COMMUNITY(0, BYT_NGPIO_NCORE, byt_ncore_pins_map),
+};
+
+static const struct byt_pinctrl_soc_data byt_ncore_soc_data = {
+	.uid		= BYT_NCORE_ACPI_UID,
+	.pins		= byt_ncore_pins,
+	.npins		= ARRAY_SIZE(byt_ncore_pins),
+	.communities	= byt_ncore_communities,
+	.ncommunities	= ARRAY_SIZE(byt_ncore_communities),
+};
+
+static const struct byt_pinctrl_soc_data *byt_soc_data[] = {
+	&byt_score_soc_data,
+	&byt_sus_soc_data,
+	&byt_ncore_soc_data,
+	NULL,
+};
+
+static struct byt_community *byt_get_community(struct byt_gpio *vg,
+					       unsigned int pin)
 {
-	struct byt_gpio *vg = gpiochip_get_data(chip);
-	u32 reg_offset;
+	struct byt_community *comm;
+	int i;
 
+	for (i = 0; i < vg->soc_data->ncommunities; i++) {
+		comm = vg->communities_copy + i;
+		if (pin < comm->pin_base + comm->npins && pin >= comm->pin_base)
+			return comm;
+	}
+
+	return NULL;
+}
+
+static void __iomem *byt_gpio_reg(struct byt_gpio *vg, unsigned int offset,
+				  int reg)
+{
+	struct byt_community *comm = byt_get_community(vg, offset);
+	u32 reg_offset = 0;
+
+	if (!comm)
+		return NULL;
+
+	offset -= comm->pin_base;
 	if (reg == BYT_INT_STAT_REG)
 		reg_offset = (offset / 32) * 4;
 	else
-		reg_offset = vg->range->pins[offset] * 16;
+		reg_offset = comm->pad_map[offset] * 16;
 
-	return vg->reg_base + reg_offset + reg;
+	return comm->reg_base + reg_offset + reg;
 }
 
-static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned offset)
+static int byt_get_groups_count(struct pinctrl_dev *pctldev)
 {
-	void __iomem *reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
+	struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+
+	return vg->soc_data->ngroups;
+}
+
+static const char *byt_get_group_name(struct pinctrl_dev *pctldev,
+				      unsigned int selector)
+{
+	struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+
+	return vg->soc_data->groups[selector].name;
+}
+
+static int byt_get_group_pins(struct pinctrl_dev *pctldev,
+			      unsigned int selector,
+			      const unsigned int **pins,
+			      unsigned int *num_pins)
+{
+	struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+
+	*pins		= vg->soc_data->groups[selector].pins;
+	*num_pins	= vg->soc_data->groups[selector].npins;
+
+	return 0;
+}
+
+static const struct pinctrl_ops byt_pinctrl_ops = {
+	.get_groups_count	= byt_get_groups_count,
+	.get_group_name		= byt_get_group_name,
+	.get_group_pins		= byt_get_group_pins,
+};
+
+static int byt_get_functions_count(struct pinctrl_dev *pctldev)
+{
+	struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+
+	return vg->soc_data->nfunctions;
+}
+
+static const char *byt_get_function_name(struct pinctrl_dev *pctldev,
+					 unsigned int selector)
+{
+	struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+
+	return vg->soc_data->functions[selector].name;
+}
+
+static int byt_get_function_groups(struct pinctrl_dev *pctldev,
+				   unsigned int selector,
+				   const char * const **groups,
+				   unsigned int *num_groups)
+{
+	struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+
+	*groups		= vg->soc_data->functions[selector].groups;
+	*num_groups	= vg->soc_data->functions[selector].ngroups;
+
+	return 0;
+}
+
+static int byt_get_group_simple_mux(const struct byt_pingroup group,
+				    const char *func_name,
+				    unsigned short *func)
+{
+	int i;
+
+	for (i = 0; i < group.nfuncs; i++) {
+		if (!strcmp(group.simple_funcs[i].name, func_name)) {
+			*func = group.simple_funcs[i].func;
+			return 0;
+		}
+	}
+
+	return 1;
+}
+
+static int byt_get_group_mixed_mux(const struct byt_pingroup group,
+				   const char *func_name,
+				   const unsigned short **func)
+{
+	int i;
+
+	for (i = 0; i < group.nfuncs; i++) {
+		if (!strcmp(group.mixed_funcs[i].name, func_name)) {
+			*func = group.mixed_funcs[i].func_values;
+			return 0;
+		}
+	}
+
+	return 1;
+}
+
+static void byt_set_group_simple_mux(struct byt_gpio *vg,
+				     const struct byt_pingroup group,
+				     unsigned short func)
+{
+	unsigned long flags;
+	int i;
+
+	raw_spin_lock_irqsave(&vg->lock, flags);
+
+	for (i = 0; i < group.npins; i++) {
+		void __iomem *padcfg0;
+		u32 value;
+
+		padcfg0 = byt_gpio_reg(vg, group.pins[i], BYT_CONF0_REG);
+		if (!padcfg0) {
+			dev_warn(&vg->pdev->dev,
+				 "Group %s, pin %i not muxed (no padcfg0)\n",
+				 group.name, i);
+			continue;
+		}
+
+		value = readl(padcfg0);
+		value &= ~BYT_PIN_MUX;
+		value |= func;
+		writel(value, padcfg0);
+	}
+
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
+}
+
+static void byt_set_group_mixed_mux(struct byt_gpio *vg,
+				    const struct byt_pingroup group,
+				    const unsigned short *func)
+{
+	unsigned long flags;
+	int i;
+
+	raw_spin_lock_irqsave(&vg->lock, flags);
+
+	for (i = 0; i < group.npins; i++) {
+		void __iomem *padcfg0;
+		u32 value;
+
+		padcfg0 = byt_gpio_reg(vg, group.pins[i], BYT_CONF0_REG);
+		if (!padcfg0) {
+			dev_warn(&vg->pdev->dev,
+				 "Group %s, pin %i not muxed (no padcfg0)\n",
+				 group.name, i);
+			continue;
+		}
+
+		value = readl(padcfg0);
+		value &= ~BYT_PIN_MUX;
+		value |= func[i];
+		writel(value, padcfg0);
+	}
+
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
+}
+
+static int byt_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
+		       unsigned int group_selector)
+{
+	struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+	const struct byt_function func = vg->soc_data->functions[func_selector];
+	const struct byt_pingroup group = vg->soc_data->groups[group_selector];
+	const unsigned short *mixed_func;
+	unsigned short simple_func;
+	int ret = 1;
+
+	if (group.has_simple_funcs)
+		ret = byt_get_group_simple_mux(group, func.name, &simple_func);
+	else
+		ret = byt_get_group_mixed_mux(group, func.name, &mixed_func);
+
+	if (ret)
+		byt_set_group_simple_mux(vg, group, BYT_DEFAULT_GPIO_MUX);
+	else if (group.has_simple_funcs)
+		byt_set_group_simple_mux(vg, group, simple_func);
+	else
+		byt_set_group_mixed_mux(vg, group, mixed_func);
+
+	return 0;
+}
+
+static u32 byt_get_gpio_mux(struct byt_gpio *vg, unsigned offset)
+{
+	/* SCORE pin 92-93 */
+	if (!strcmp(vg->soc_data->uid, BYT_SCORE_ACPI_UID) &&
+	    offset >= 92 && offset <= 93)
+		return 1;
+
+	/* SUS pin 11-21 */
+	if (!strcmp(vg->soc_data->uid, BYT_SUS_ACPI_UID) &&
+	    offset >= 11 && offset <= 21)
+		return 1;
+
+	return 0;
+}
+
+static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned int offset)
+{
+	void __iomem *reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
 	unsigned long flags;
 	u32 value;
 
@@ -174,25 +949,12 @@
 	raw_spin_unlock_irqrestore(&vg->lock, flags);
 }
 
-static u32 byt_get_gpio_mux(struct byt_gpio *vg, unsigned offset)
+static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
+				   struct pinctrl_gpio_range *range,
+				   unsigned int offset)
 {
-	/* SCORE pin 92-93 */
-	if (!strcmp(vg->range->name, BYT_SCORE_ACPI_UID) &&
-		offset >= 92 && offset <= 93)
-		return 1;
-
-	/* SUS pin 11-21 */
-	if (!strcmp(vg->range->name, BYT_SUS_ACPI_UID) &&
-		offset >= 11 && offset <= 21)
-		return 1;
-
-	return 0;
-}
-
-static int byt_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
-	struct byt_gpio *vg = gpiochip_get_data(chip);
-	void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG);
+	struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+	void __iomem *reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
 	u32 value, gpio_mux;
 	unsigned long flags;
 
@@ -225,53 +987,318 @@
 	return 0;
 }
 
-static void byt_gpio_free(struct gpio_chip *chip, unsigned offset)
+static void byt_gpio_disable_free(struct pinctrl_dev *pctl_dev,
+				  struct pinctrl_gpio_range *range,
+				  unsigned int offset)
 {
-	struct byt_gpio *vg = gpiochip_get_data(chip);
+	struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
 
 	byt_gpio_clear_triggering(vg, offset);
 	pm_runtime_put(&vg->pdev->dev);
 }
 
-static int byt_irq_type(struct irq_data *d, unsigned type)
+static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
+				  struct pinctrl_gpio_range *range,
+				  unsigned int offset,
+				  bool input)
 {
-	struct byt_gpio *vg = gpiochip_get_data(irq_data_get_irq_chip_data(d));
-	u32 offset = irqd_to_hwirq(d);
-	u32 value;
+	struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+	void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+	void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
 	unsigned long flags;
-	void __iomem *reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
-
-	if (offset >= vg->chip.ngpio)
-		return -EINVAL;
+	u32 value;
 
 	raw_spin_lock_irqsave(&vg->lock, flags);
-	value = readl(reg);
 
-	WARN(value & BYT_DIRECT_IRQ_EN,
-		"Bad pad config for io mode, force direct_irq_en bit clearing");
-
-	/* For level trigges the BYT_TRIG_POS and BYT_TRIG_NEG bits
-	 * are used to indicate high and low level triggering
-	 */
-	value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG |
-		   BYT_TRIG_LVL);
-
-	writel(value, reg);
-
-	if (type & IRQ_TYPE_EDGE_BOTH)
-		irq_set_handler_locked(d, handle_edge_irq);
-	else if (type & IRQ_TYPE_LEVEL_MASK)
-		irq_set_handler_locked(d, handle_level_irq);
+	value = readl(val_reg);
+	value &= ~BYT_DIR_MASK;
+	if (input)
+		value |= BYT_OUTPUT_EN;
+	else
+		/*
+		 * Before making any direction modifications, do a check if gpio
+		 * is set for direct IRQ.  On baytrail, setting GPIO to output
+		 * does not make sense, so let's at least warn the caller before
+		 * they shoot themselves in the foot.
+		 */
+		WARN(readl(conf_reg) & BYT_DIRECT_IRQ_EN,
+		     "Potential Error: Setting GPIO with direct_irq_en to output");
+	writel(value, val_reg);
 
 	raw_spin_unlock_irqrestore(&vg->lock, flags);
 
 	return 0;
 }
 
+static const struct pinmux_ops byt_pinmux_ops = {
+	.get_functions_count	= byt_get_functions_count,
+	.get_function_name	= byt_get_function_name,
+	.get_function_groups	= byt_get_function_groups,
+	.set_mux		= byt_set_mux,
+	.gpio_request_enable	= byt_gpio_request_enable,
+	.gpio_disable_free	= byt_gpio_disable_free,
+	.gpio_set_direction	= byt_gpio_set_direction,
+};
+
+static void byt_get_pull_strength(u32 reg, u16 *strength)
+{
+	switch (reg & BYT_PULL_STR_MASK) {
+	case BYT_PULL_STR_2K:
+		*strength = 2000;
+		break;
+	case BYT_PULL_STR_10K:
+		*strength = 10000;
+		break;
+	case BYT_PULL_STR_20K:
+		*strength = 20000;
+		break;
+	case BYT_PULL_STR_40K:
+		*strength = 40000;
+		break;
+	}
+}
+
+static int byt_set_pull_strength(u32 *reg, u16 strength)
+{
+	*reg &= ~BYT_PULL_STR_MASK;
+
+	switch (strength) {
+	case 2000:
+		*reg |= BYT_PULL_STR_2K;
+		break;
+	case 10000:
+		*reg |= BYT_PULL_STR_10K;
+		break;
+	case 20000:
+		*reg |= BYT_PULL_STR_20K;
+		break;
+	case 40000:
+		*reg |= BYT_PULL_STR_40K;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
+			      unsigned long *config)
+{
+	struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+	enum pin_config_param param = pinconf_to_config_param(*config);
+	void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
+	void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+	unsigned long flags;
+	u32 conf, pull, val, debounce;
+	u16 arg = 0;
+
+	raw_spin_lock_irqsave(&vg->lock, flags);
+	conf = readl(conf_reg);
+	pull = conf & BYT_PULL_ASSIGN_MASK;
+	val = readl(val_reg);
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
+
+	switch (param) {
+	case PIN_CONFIG_BIAS_DISABLE:
+		if (pull)
+			return -EINVAL;
+		break;
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		/* Pull assignment is only applicable in input mode */
+		if ((val & BYT_INPUT_EN) || pull != BYT_PULL_ASSIGN_DOWN)
+			return -EINVAL;
+
+		byt_get_pull_strength(conf, &arg);
+
+		break;
+	case PIN_CONFIG_BIAS_PULL_UP:
+		/* Pull assignment is only applicable in input mode */
+		if ((val & BYT_INPUT_EN) || pull != BYT_PULL_ASSIGN_UP)
+			return -EINVAL;
+
+		byt_get_pull_strength(conf, &arg);
+
+		break;
+	case PIN_CONFIG_INPUT_DEBOUNCE:
+		if (!(conf & BYT_DEBOUNCE_EN))
+			return -EINVAL;
+
+		raw_spin_lock_irqsave(&vg->lock, flags);
+		debounce = readl(byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG));
+		raw_spin_unlock_irqrestore(&vg->lock, flags);
+
+		switch (debounce & BYT_DEBOUNCE_PULSE_MASK) {
+		case BYT_DEBOUNCE_PULSE_375US:
+			arg = 375;
+			break;
+		case BYT_DEBOUNCE_PULSE_750US:
+			arg = 750;
+			break;
+		case BYT_DEBOUNCE_PULSE_1500US:
+			arg = 1500;
+			break;
+		case BYT_DEBOUNCE_PULSE_3MS:
+			arg = 3000;
+			break;
+		case BYT_DEBOUNCE_PULSE_6MS:
+			arg = 6000;
+			break;
+		case BYT_DEBOUNCE_PULSE_12MS:
+			arg = 12000;
+			break;
+		case BYT_DEBOUNCE_PULSE_24MS:
+			arg = 24000;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		break;
+	default:
+		return -ENOTSUPP;
+	}
+
+	*config = pinconf_to_config_packed(param, arg);
+
+	return 0;
+}
+
+static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
+			      unsigned int offset,
+			      unsigned long *configs,
+			      unsigned int num_configs)
+{
+	struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+	unsigned int param, arg;
+	void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
+	void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+	unsigned long flags;
+	u32 conf, val, debounce;
+	int i, ret = 0;
+
+	raw_spin_lock_irqsave(&vg->lock, flags);
+
+	conf = readl(conf_reg);
+	val = readl(val_reg);
+
+	for (i = 0; i < num_configs; i++) {
+		param = pinconf_to_config_param(configs[i]);
+		arg = pinconf_to_config_argument(configs[i]);
+
+		switch (param) {
+		case PIN_CONFIG_BIAS_DISABLE:
+			conf &= ~BYT_PULL_ASSIGN_MASK;
+			break;
+		case PIN_CONFIG_BIAS_PULL_DOWN:
+			/* Set default strength value in case none is given */
+			if (arg == 1)
+				arg = 2000;
+
+			/*
+			 * Pull assignment is only applicable in input mode. If
+			 * chip is not in input mode, set it and warn about it.
+			 */
+			if (val & BYT_INPUT_EN) {
+				val &= ~BYT_INPUT_EN;
+				writel(val, val_reg);
+				dev_warn(&vg->pdev->dev,
+					 "pin %u forcibly set to input mode\n",
+					 offset);
+			}
+
+			conf &= ~BYT_PULL_ASSIGN_MASK;
+			conf |= BYT_PULL_ASSIGN_DOWN;
+			ret = byt_set_pull_strength(&conf, arg);
+
+			break;
+		case PIN_CONFIG_BIAS_PULL_UP:
+			/* Set default strength value in case none is given */
+			if (arg == 1)
+				arg = 2000;
+
+			/*
+			 * Pull assignment is only applicable in input mode. If
+			 * chip is not in input mode, set it and warn about it.
+			 */
+			if (val & BYT_INPUT_EN) {
+				val &= ~BYT_INPUT_EN;
+				writel(val, val_reg);
+				dev_warn(&vg->pdev->dev,
+					 "pin %u forcibly set to input mode\n",
+					 offset);
+			}
+
+			conf &= ~BYT_PULL_ASSIGN_MASK;
+			conf |= BYT_PULL_ASSIGN_UP;
+			ret = byt_set_pull_strength(&conf, arg);
+
+			break;
+		case PIN_CONFIG_INPUT_DEBOUNCE:
+			debounce = readl(byt_gpio_reg(vg, offset,
+						      BYT_DEBOUNCE_REG));
+			conf &= ~BYT_DEBOUNCE_PULSE_MASK;
+
+			switch (arg) {
+			case 375:
+				conf |= BYT_DEBOUNCE_PULSE_375US;
+				break;
+			case 750:
+				conf |= BYT_DEBOUNCE_PULSE_750US;
+				break;
+			case 1500:
+				conf |= BYT_DEBOUNCE_PULSE_1500US;
+				break;
+			case 3000:
+				conf |= BYT_DEBOUNCE_PULSE_3MS;
+				break;
+			case 6000:
+				conf |= BYT_DEBOUNCE_PULSE_6MS;
+				break;
+			case 12000:
+				conf |= BYT_DEBOUNCE_PULSE_12MS;
+				break;
+			case 24000:
+				conf |= BYT_DEBOUNCE_PULSE_24MS;
+				break;
+			default:
+				ret = -EINVAL;
+			}
+
+			break;
+		default:
+			ret = -ENOTSUPP;
+		}
+
+		if (ret)
+			break;
+	}
+
+	if (!ret)
+		writel(conf, conf_reg);
+
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
+
+	return ret;
+}
+
+static const struct pinconf_ops byt_pinconf_ops = {
+	.is_generic	= true,
+	.pin_config_get	= byt_pin_config_get,
+	.pin_config_set	= byt_pin_config_set,
+};
+
+static const struct pinctrl_desc byt_pinctrl_desc = {
+	.pctlops	= &byt_pinctrl_ops,
+	.pmxops		= &byt_pinmux_ops,
+	.confops	= &byt_pinconf_ops,
+	.owner		= THIS_MODULE,
+};
+
 static int byt_gpio_get(struct gpio_chip *chip, unsigned offset)
 {
-	void __iomem *reg = byt_gpio_reg(chip, offset, BYT_VAL_REG);
 	struct byt_gpio *vg = gpiochip_get_data(chip);
+	void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
 	unsigned long flags;
 	u32 val;
 
@@ -285,69 +1312,58 @@
 static void byt_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 {
 	struct byt_gpio *vg = gpiochip_get_data(chip);
-	void __iomem *reg = byt_gpio_reg(chip, offset, BYT_VAL_REG);
+	void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
 	unsigned long flags;
 	u32 old_val;
 
+	if (!reg)
+		return;
+
 	raw_spin_lock_irqsave(&vg->lock, flags);
-
 	old_val = readl(reg);
-
 	if (value)
 		writel(old_val | BYT_LEVEL, reg);
 	else
 		writel(old_val & ~BYT_LEVEL, reg);
-
 	raw_spin_unlock_irqrestore(&vg->lock, flags);
 }
 
-static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
 {
 	struct byt_gpio *vg = gpiochip_get_data(chip);
-	void __iomem *reg = byt_gpio_reg(chip, offset, BYT_VAL_REG);
+	void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
 	unsigned long flags;
 	u32 value;
 
+	if (!reg)
+		return -EINVAL;
+
 	raw_spin_lock_irqsave(&vg->lock, flags);
-
-	value = readl(reg) | BYT_DIR_MASK;
-	value &= ~BYT_INPUT_EN;		/* active low */
-	writel(value, reg);
-
+	value = readl(reg);
 	raw_spin_unlock_irqrestore(&vg->lock, flags);
 
-	return 0;
+	if (!(value & BYT_OUTPUT_EN))
+		return GPIOF_DIR_OUT;
+	if (!(value & BYT_INPUT_EN))
+		return GPIOF_DIR_IN;
+
+	return -EINVAL;
+}
+
+static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+	return pinctrl_gpio_direction_input(chip->base + offset);
 }
 
 static int byt_gpio_direction_output(struct gpio_chip *chip,
-				     unsigned gpio, int value)
+				     unsigned int offset, int value)
 {
-	struct byt_gpio *vg = gpiochip_get_data(chip);
-	void __iomem *conf_reg = byt_gpio_reg(chip, gpio, BYT_CONF0_REG);
-	void __iomem *reg = byt_gpio_reg(chip, gpio, BYT_VAL_REG);
-	unsigned long flags;
-	u32 reg_val;
+	int ret = pinctrl_gpio_direction_output(chip->base + offset);
 
-	raw_spin_lock_irqsave(&vg->lock, flags);
+	if (ret)
+		return ret;
 
-	/*
-	 * Before making any direction modifications, do a check if gpio
-	 * is set for direct IRQ.  On baytrail, setting GPIO to output does
-	 * not make sense, so let's at least warn the caller before they shoot
-	 * themselves in the foot.
-	 */
-	WARN(readl(conf_reg) & BYT_DIRECT_IRQ_EN,
-		"Potential Error: Setting GPIO with direct_irq_en to output");
-
-	reg_val = readl(reg) | BYT_DIR_MASK;
-	reg_val &= ~(BYT_OUTPUT_EN | BYT_INPUT_EN);
-
-	if (value)
-		writel(reg_val | BYT_LEVEL, reg);
-	else
-		writel(reg_val & ~BYT_LEVEL, reg);
-
-	raw_spin_unlock_irqrestore(&vg->lock, flags);
+	byt_gpio_set(chip, offset, value);
 
 	return 0;
 }
@@ -356,20 +1372,45 @@
 {
 	struct byt_gpio *vg = gpiochip_get_data(chip);
 	int i;
-	u32 conf0, val, offs;
+	u32 conf0, val;
 
-	for (i = 0; i < vg->chip.ngpio; i++) {
+	for (i = 0; i < vg->soc_data->npins; i++) {
+		const struct byt_community *comm;
 		const char *pull_str = NULL;
 		const char *pull = NULL;
+		void __iomem *reg;
 		unsigned long flags;
 		const char *label;
-		offs = vg->range->pins[i] * 16;
+		unsigned int pin;
 
 		raw_spin_lock_irqsave(&vg->lock, flags);
-		conf0 = readl(vg->reg_base + offs + BYT_CONF0_REG);
-		val = readl(vg->reg_base + offs + BYT_VAL_REG);
+		pin = vg->soc_data->pins[i].number;
+		reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
+		if (!reg) {
+			seq_printf(s,
+				   "Could not retrieve pin %i conf0 reg\n",
+				   pin);
+			raw_spin_unlock_irqrestore(&vg->lock, flags);
+			continue;
+		}
+		conf0 = readl(reg);
+
+		reg = byt_gpio_reg(vg, pin, BYT_VAL_REG);
+		if (!reg) {
+			seq_printf(s,
+				   "Could not retrieve pin %i val reg\n", pin);
+			raw_spin_unlock_irqrestore(&vg->lock, flags);
+			continue;
+		}
+		val = readl(reg);
 		raw_spin_unlock_irqrestore(&vg->lock, flags);
 
+		comm = byt_get_community(vg, pin);
+		if (!comm) {
+			seq_printf(s,
+				   "Could not get community for pin %i\n", pin);
+			continue;
+		}
 		label = gpiochip_is_requested(chip, i);
 		if (!label)
 			label = "Unrequested";
@@ -400,12 +1441,12 @@
 
 		seq_printf(s,
 			   " gpio-%-3d (%-20.20s) %s %s %s pad-%-3d offset:0x%03x mux:%d %s%s%s",
-			   i,
+			   pin,
 			   label,
 			   val & BYT_INPUT_EN ? "  " : "in",
 			   val & BYT_OUTPUT_EN ? "   " : "out",
 			   val & BYT_LEVEL ? "hi" : "lo",
-			   vg->range->pins[i], offs,
+			   comm->pad_map[i], comm->pad_map[i] * 32,
 			   conf0 & 0x7,
 			   conf0 & BYT_TRIG_NEG ? " fall" : "     ",
 			   conf0 & BYT_TRIG_POS ? " rise" : "     ",
@@ -423,27 +1464,17 @@
 	}
 }
 
-static void byt_gpio_irq_handler(struct irq_desc *desc)
-{
-	struct irq_data *data = irq_desc_get_irq_data(desc);
-	struct byt_gpio *vg = gpiochip_get_data(irq_desc_get_handler_data(desc));
-	struct irq_chip *chip = irq_data_get_irq_chip(data);
-	u32 base, pin;
-	void __iomem *reg;
-	unsigned long pending;
-	unsigned virq;
-
-	/* check from GPIO controller which pin triggered the interrupt */
-	for (base = 0; base < vg->chip.ngpio; base += 32) {
-		reg = byt_gpio_reg(&vg->chip, base, BYT_INT_STAT_REG);
-		pending = readl(reg);
-		for_each_set_bit(pin, &pending, 32) {
-			virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
-			generic_handle_irq(virq);
-		}
-	}
-	chip->irq_eoi(data);
-}
+static const struct gpio_chip byt_gpio_chip = {
+	.owner			= THIS_MODULE,
+	.request		= gpiochip_generic_request,
+	.free			= gpiochip_generic_free,
+	.get_direction		= byt_gpio_get_direction,
+	.direction_input	= byt_gpio_direction_input,
+	.direction_output	= byt_gpio_direction_output,
+	.get			= byt_gpio_get,
+	.set			= byt_gpio_set,
+	.dbg_show		= byt_gpio_dbg_show,
+};
 
 static void byt_irq_ack(struct irq_data *d)
 {
@@ -452,12 +1483,23 @@
 	unsigned offset = irqd_to_hwirq(d);
 	void __iomem *reg;
 
+	reg = byt_gpio_reg(vg, offset, BYT_INT_STAT_REG);
+	if (!reg)
+		return;
+
 	raw_spin_lock(&vg->lock);
-	reg = byt_gpio_reg(&vg->chip, offset, BYT_INT_STAT_REG);
 	writel(BIT(offset % 32), reg);
 	raw_spin_unlock(&vg->lock);
 }
 
+static void byt_irq_mask(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct byt_gpio *vg = gpiochip_get_data(gc);
+
+	byt_gpio_clear_triggering(vg, irqd_to_hwirq(d));
+}
+
 static void byt_irq_unmask(struct irq_data *d)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -467,7 +1509,9 @@
 	void __iomem *reg;
 	u32 value;
 
-	reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
+	reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
+	if (!reg)
+		return;
 
 	raw_spin_lock_irqsave(&vg->lock, flags);
 	value = readl(reg);
@@ -493,23 +1537,81 @@
 	raw_spin_unlock_irqrestore(&vg->lock, flags);
 }
 
-static void byt_irq_mask(struct irq_data *d)
+static int byt_irq_type(struct irq_data *d, unsigned int type)
 {
-	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-	struct byt_gpio *vg = gpiochip_get_data(gc);
+	struct byt_gpio *vg = gpiochip_get_data(irq_data_get_irq_chip_data(d));
+	u32 offset = irqd_to_hwirq(d);
+	u32 value;
+	unsigned long flags;
+	void __iomem *reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
 
-	byt_gpio_clear_triggering(vg, irqd_to_hwirq(d));
+	if (!reg || offset >= vg->chip.ngpio)
+		return -EINVAL;
+
+	raw_spin_lock_irqsave(&vg->lock, flags);
+	value = readl(reg);
+
+	WARN(value & BYT_DIRECT_IRQ_EN,
+	     "Bad pad config for io mode, force direct_irq_en bit clearing");
+
+	/* For level trigges the BYT_TRIG_POS and BYT_TRIG_NEG bits
+	 * are used to indicate high and low level triggering
+	 */
+	value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG |
+		   BYT_TRIG_LVL);
+
+	writel(value, reg);
+
+	if (type & IRQ_TYPE_EDGE_BOTH)
+		irq_set_handler_locked(d, handle_edge_irq);
+	else if (type & IRQ_TYPE_LEVEL_MASK)
+		irq_set_handler_locked(d, handle_level_irq);
+
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
+
+	return 0;
 }
 
 static struct irq_chip byt_irqchip = {
-	.name = "BYT-GPIO",
-	.irq_ack = byt_irq_ack,
-	.irq_mask = byt_irq_mask,
-	.irq_unmask = byt_irq_unmask,
-	.irq_set_type = byt_irq_type,
-	.flags = IRQCHIP_SKIP_SET_WAKE,
+	.name		= "BYT-GPIO",
+	.irq_ack	= byt_irq_ack,
+	.irq_mask	= byt_irq_mask,
+	.irq_unmask	= byt_irq_unmask,
+	.irq_set_type	= byt_irq_type,
+	.flags		= IRQCHIP_SKIP_SET_WAKE,
 };
 
+static void byt_gpio_irq_handler(struct irq_desc *desc)
+{
+	struct irq_data *data = irq_desc_get_irq_data(desc);
+	struct byt_gpio *vg = gpiochip_get_data(
+				irq_desc_get_handler_data(desc));
+	struct irq_chip *chip = irq_data_get_irq_chip(data);
+	u32 base, pin;
+	void __iomem *reg;
+	unsigned long pending;
+	unsigned int virq;
+
+	/* check from GPIO controller which pin triggered the interrupt */
+	for (base = 0; base < vg->chip.ngpio; base += 32) {
+		reg = byt_gpio_reg(vg, base, BYT_INT_STAT_REG);
+
+		if (!reg) {
+			dev_warn(&vg->pdev->dev,
+				 "Pin %i: could not retrieve interrupt status register\n",
+				 base);
+			continue;
+		}
+
+		pending = readl(reg);
+		for_each_set_bit(pin, &pending, 32) {
+			virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
+			generic_handle_irq(virq);
+		}
+	}
+	chip->irq_eoi(data);
+}
+
 static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
 {
 	void __iomem *reg;
@@ -521,8 +1623,18 @@
 	 * do not use direct IRQ mode. This will prevent spurious
 	 * interrupts from misconfigured pins.
 	 */
-	for (i = 0; i < vg->chip.ngpio; i++) {
-		value = readl(byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG));
+	for (i = 0; i < vg->soc_data->npins; i++) {
+		unsigned int pin = vg->soc_data->pins[i].number;
+
+		reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
+		if (!reg) {
+			dev_warn(&vg->pdev->dev,
+				 "Pin %i: could not retrieve conf0 register\n",
+				 i);
+			continue;
+		}
+
+		value = readl(reg);
 		if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i) &&
 		    !(value & BYT_DIRECT_IRQ_EN)) {
 			byt_gpio_clear_triggering(vg, i);
@@ -531,8 +1643,16 @@
 	}
 
 	/* clear interrupt status trigger registers */
-	for (base = 0; base < vg->chip.ngpio; base += 32) {
-		reg = byt_gpio_reg(&vg->chip, base, BYT_INT_STAT_REG);
+	for (base = 0; base < vg->soc_data->npins; base += 32) {
+		reg = byt_gpio_reg(vg, base, BYT_INT_STAT_REG);
+
+		if (!reg) {
+			dev_warn(&vg->pdev->dev,
+				 "Pin %i: could not retrieve irq status reg\n",
+				 base);
+			continue;
+		}
+
 		writel(0xffffffff, reg);
 		/* make sure trigger bits are cleared, if not then a pin
 		   might be misconfigured in bios */
@@ -543,82 +1663,47 @@
 	}
 }
 
-static int byt_gpio_probe(struct platform_device *pdev)
+static int byt_gpio_probe(struct byt_gpio *vg)
 {
-	struct byt_gpio *vg;
 	struct gpio_chip *gc;
-	struct resource *mem_rc, *irq_rc;
-	struct device *dev = &pdev->dev;
-	struct acpi_device *acpi_dev;
-	struct pinctrl_gpio_range *range;
-	acpi_handle handle = ACPI_HANDLE(dev);
+	struct resource *irq_rc;
 	int ret;
 
-	if (acpi_bus_get_device(handle, &acpi_dev))
-		return -ENODEV;
-
-	vg = devm_kzalloc(dev, sizeof(struct byt_gpio), GFP_KERNEL);
-	if (!vg) {
-		dev_err(&pdev->dev, "can't allocate byt_gpio chip data\n");
-		return -ENOMEM;
-	}
-
-	for (range = byt_ranges; range->name; range++) {
-		if (!strcmp(acpi_dev->pnp.unique_id, range->name)) {
-			vg->chip.ngpio = range->npins;
-			vg->range = range;
-			break;
-		}
-	}
-
-	if (!vg->chip.ngpio || !vg->range)
-		return -ENODEV;
-
-	vg->pdev = pdev;
-	platform_set_drvdata(pdev, vg);
-
-	mem_rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	vg->reg_base = devm_ioremap_resource(dev, mem_rc);
-	if (IS_ERR(vg->reg_base))
-		return PTR_ERR(vg->reg_base);
-
-	raw_spin_lock_init(&vg->lock);
-
-	gc = &vg->chip;
-	gc->label = dev_name(&pdev->dev);
-	gc->owner = THIS_MODULE;
-	gc->request = byt_gpio_request;
-	gc->free = byt_gpio_free;
-	gc->direction_input = byt_gpio_direction_input;
-	gc->direction_output = byt_gpio_direction_output;
-	gc->get = byt_gpio_get;
-	gc->set = byt_gpio_set;
-	gc->dbg_show = byt_gpio_dbg_show;
-	gc->base = -1;
-	gc->can_sleep = false;
-	gc->parent = dev;
+	/* Set up gpio chip */
+	vg->chip	= byt_gpio_chip;
+	gc		= &vg->chip;
+	gc->label	= dev_name(&vg->pdev->dev);
+	gc->base	= -1;
+	gc->can_sleep	= false;
+	gc->parent	= &vg->pdev->dev;
+	gc->ngpio	= vg->soc_data->npins;
 
 #ifdef CONFIG_PM_SLEEP
-	vg->saved_context = devm_kcalloc(&pdev->dev, gc->ngpio,
+	vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio,
 				       sizeof(*vg->saved_context), GFP_KERNEL);
 #endif
-
 	ret = gpiochip_add_data(gc, vg);
 	if (ret) {
-		dev_err(&pdev->dev, "failed adding byt-gpio chip\n");
+		dev_err(&vg->pdev->dev, "failed adding byt-gpio chip\n");
 		return ret;
 	}
 
+	ret = gpiochip_add_pin_range(&vg->chip, dev_name(&vg->pdev->dev),
+				     0, 0, vg->soc_data->npins);
+	if (ret) {
+		dev_err(&vg->pdev->dev, "failed to add GPIO pin range\n");
+		goto fail;
+	}
+
 	/* set up interrupts  */
-	irq_rc = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	irq_rc = platform_get_resource(vg->pdev, IORESOURCE_IRQ, 0);
 	if (irq_rc && irq_rc->start) {
 		byt_gpio_irq_init_hw(vg);
 		ret = gpiochip_irqchip_add(gc, &byt_irqchip, 0,
 					   handle_simple_irq, IRQ_TYPE_NONE);
 		if (ret) {
-			dev_err(dev, "failed to add irqchip\n");
-			gpiochip_remove(gc);
-			return ret;
+			dev_err(&vg->pdev->dev, "failed to add irqchip\n");
+			goto fail;
 		}
 
 		gpiochip_set_chained_irqchip(gc, &byt_irqchip,
@@ -626,7 +1711,120 @@
 					     byt_gpio_irq_handler);
 	}
 
-	pm_runtime_enable(dev);
+	return ret;
+
+fail:
+	gpiochip_remove(&vg->chip);
+
+	return ret;
+}
+
+static int byt_set_soc_data(struct byt_gpio *vg,
+			    const struct byt_pinctrl_soc_data *soc_data)
+{
+	int i;
+
+	vg->soc_data = soc_data;
+	vg->communities_copy = devm_kcalloc(&vg->pdev->dev,
+					    soc_data->ncommunities,
+					    sizeof(*vg->communities_copy),
+					    GFP_KERNEL);
+	if (!vg->communities_copy)
+		return -ENOMEM;
+
+	for (i = 0; i < soc_data->ncommunities; i++) {
+		struct byt_community *comm = vg->communities_copy + i;
+		struct resource *mem_rc;
+
+		*comm = vg->soc_data->communities[i];
+
+		mem_rc = platform_get_resource(vg->pdev, IORESOURCE_MEM, 0);
+		comm->reg_base = devm_ioremap_resource(&vg->pdev->dev, mem_rc);
+		if (IS_ERR(comm->reg_base))
+			return PTR_ERR(comm->reg_base);
+	}
+
+	return 0;
+}
+
+static const struct acpi_device_id byt_gpio_acpi_match[] = {
+	{ "INT33B2", (kernel_ulong_t)byt_soc_data },
+	{ "INT33FC", (kernel_ulong_t)byt_soc_data },
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match);
+
+static int byt_pinctrl_probe(struct platform_device *pdev)
+{
+	const struct byt_pinctrl_soc_data *soc_data = NULL;
+	const struct byt_pinctrl_soc_data **soc_table;
+	const struct acpi_device_id *acpi_id;
+	struct acpi_device *acpi_dev;
+	struct byt_gpio *vg;
+	int i, ret;
+
+	acpi_dev = ACPI_COMPANION(&pdev->dev);
+	if (!acpi_dev)
+		return -ENODEV;
+
+	acpi_id = acpi_match_device(byt_gpio_acpi_match, &pdev->dev);
+	if (!acpi_id)
+		return -ENODEV;
+
+	soc_table = (const struct byt_pinctrl_soc_data **)acpi_id->driver_data;
+
+	for (i = 0; soc_table[i]; i++) {
+		if (!strcmp(acpi_dev->pnp.unique_id, soc_table[i]->uid)) {
+			soc_data = soc_table[i];
+			break;
+		}
+	}
+
+	if (!soc_data)
+		return -ENODEV;
+
+	vg = devm_kzalloc(&pdev->dev, sizeof(*vg), GFP_KERNEL);
+	if (!vg)
+		return -ENOMEM;
+
+	vg->pdev = pdev;
+	ret = byt_set_soc_data(vg, soc_data);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to set soc data\n");
+		return ret;
+	}
+
+	vg->pctl_desc		= byt_pinctrl_desc;
+	vg->pctl_desc.name	= dev_name(&pdev->dev);
+	vg->pctl_desc.pins	= vg->soc_data->pins;
+	vg->pctl_desc.npins	= vg->soc_data->npins;
+
+	vg->pctl_dev = pinctrl_register(&vg->pctl_desc, &pdev->dev, vg);
+	if (IS_ERR(vg->pctl_dev)) {
+		dev_err(&pdev->dev, "failed to register pinctrl driver\n");
+		return PTR_ERR(vg->pctl_dev);
+	}
+
+	ret = byt_gpio_probe(vg);
+	if (ret) {
+		pinctrl_unregister(vg->pctl_dev);
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, vg);
+	raw_spin_lock_init(&vg->lock);
+	pm_runtime_enable(&pdev->dev);
+
+	return 0;
+}
+
+static int byt_pinctrl_remove(struct platform_device *pdev)
+{
+	struct byt_gpio *vg = platform_get_drvdata(pdev);
+
+	pm_runtime_disable(&pdev->dev);
+	gpiochip_remove(&vg->chip);
+	pinctrl_unregister(vg->pctl_dev);
 
 	return 0;
 }
@@ -638,15 +1836,22 @@
 	struct byt_gpio *vg = platform_get_drvdata(pdev);
 	int i;
 
-	for (i = 0; i < vg->chip.ngpio; i++) {
+	for (i = 0; i < vg->soc_data->npins; i++) {
 		void __iomem *reg;
 		u32 value;
+		unsigned int pin = vg->soc_data->pins[i].number;
 
-		reg = byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG);
+		reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
+		if (!reg) {
+			dev_warn(&vg->pdev->dev,
+				 "Pin %i: could not retrieve conf0 register\n",
+				 i);
+			continue;
+		}
 		value = readl(reg) & BYT_CONF0_RESTORE_MASK;
 		vg->saved_context[i].conf0 = value;
 
-		reg = byt_gpio_reg(&vg->chip, i, BYT_VAL_REG);
+		reg = byt_gpio_reg(vg, pin, BYT_VAL_REG);
 		value = readl(reg) & BYT_VAL_RESTORE_MASK;
 		vg->saved_context[i].val = value;
 	}
@@ -660,11 +1865,18 @@
 	struct byt_gpio *vg = platform_get_drvdata(pdev);
 	int i;
 
-	for (i = 0; i < vg->chip.ngpio; i++) {
+	for (i = 0; i < vg->soc_data->npins; i++) {
 		void __iomem *reg;
 		u32 value;
+		unsigned int pin = vg->soc_data->pins[i].number;
 
-		reg = byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG);
+		reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
+		if (!reg) {
+			dev_warn(&vg->pdev->dev,
+				 "Pin %i: could not retrieve conf0 register\n",
+				 i);
+			continue;
+		}
 		value = readl(reg);
 		if ((value & BYT_CONF0_RESTORE_MASK) !=
 		     vg->saved_context[i].conf0) {
@@ -674,7 +1886,7 @@
 			dev_info(dev, "restored pin %d conf0 %#08x", i, value);
 		}
 
-		reg = byt_gpio_reg(&vg->chip, i, BYT_VAL_REG);
+		reg = byt_gpio_reg(vg, pin, BYT_VAL_REG);
 		value = readl(reg);
 		if ((value & BYT_VAL_RESTORE_MASK) !=
 		     vg->saved_context[i].val) {
@@ -712,26 +1924,9 @@
 			   NULL)
 };
 
-static const struct acpi_device_id byt_gpio_acpi_match[] = {
-	{ "INT33B2", 0 },
-	{ "INT33FC", 0 },
-	{ }
-};
-MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match);
-
-static int byt_gpio_remove(struct platform_device *pdev)
-{
-	struct byt_gpio *vg = platform_get_drvdata(pdev);
-
-	pm_runtime_disable(&pdev->dev);
-	gpiochip_remove(&vg->chip);
-
-	return 0;
-}
-
 static struct platform_driver byt_gpio_driver = {
-	.probe          = byt_gpio_probe,
-	.remove         = byt_gpio_remove,
+	.probe          = byt_pinctrl_probe,
+	.remove         = byt_pinctrl_remove,
 	.driver         = {
 		.name   = "byt_gpio",
 		.pm	= &byt_gpio_pm_ops,
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 4251e07..ac4f564 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1526,17 +1526,16 @@
 	pctrl->pctldesc.pins = pctrl->community->pins;
 	pctrl->pctldesc.npins = pctrl->community->npins;
 
-	pctrl->pctldev = pinctrl_register(&pctrl->pctldesc, &pdev->dev, pctrl);
+	pctrl->pctldev = devm_pinctrl_register(&pdev->dev, &pctrl->pctldesc,
+					       pctrl);
 	if (IS_ERR(pctrl->pctldev)) {
 		dev_err(&pdev->dev, "failed to register pinctrl driver\n");
 		return PTR_ERR(pctrl->pctldev);
 	}
 
 	ret = chv_gpio_probe(pctrl, irq);
-	if (ret) {
-		pinctrl_unregister(pctrl->pctldev);
+	if (ret)
 		return ret;
-	}
 
 	platform_set_drvdata(pdev, pctrl);
 
@@ -1548,7 +1547,6 @@
 	struct chv_pinctrl *pctrl = platform_get_drvdata(pdev);
 
 	gpiochip_remove(&pctrl->chip);
-	pinctrl_unregister(pctrl->pctldev);
 
 	return 0;
 }
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 85536b4..3584e50 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -665,6 +665,35 @@
 	spin_unlock(&pctrl->lock);
 }
 
+static void intel_gpio_irq_enable(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
+	const struct intel_community *community;
+	unsigned pin = irqd_to_hwirq(d);
+	unsigned long flags;
+
+	spin_lock_irqsave(&pctrl->lock, flags);
+
+	community = intel_get_community(pctrl, pin);
+	if (community) {
+		unsigned padno = pin_to_padno(community, pin);
+		unsigned gpp_size = community->gpp_size;
+		unsigned gpp_offset = padno % gpp_size;
+		unsigned gpp = padno / gpp_size;
+		u32 value;
+
+		/* Clear interrupt status first to avoid unexpected interrupt */
+		writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4);
+
+		value = readl(community->regs + community->ie_offset + gpp * 4);
+		value |= BIT(gpp_offset);
+		writel(value, community->regs + community->ie_offset + gpp * 4);
+	}
+
+	spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
 static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -741,8 +770,9 @@
 		value |= PADCFG0_RXINV;
 	} else if (type & IRQ_TYPE_EDGE_RISING) {
 		value |= PADCFG0_RXEVCFG_EDGE << PADCFG0_RXEVCFG_SHIFT;
-	} else if (type & IRQ_TYPE_LEVEL_LOW) {
-		value |= PADCFG0_RXINV;
+	} else if (type & IRQ_TYPE_LEVEL_MASK) {
+		if (type & IRQ_TYPE_LEVEL_LOW)
+			value |= PADCFG0_RXINV;
 	} else {
 		value |= PADCFG0_RXEVCFG_DISABLED << PADCFG0_RXEVCFG_SHIFT;
 	}
@@ -852,6 +882,7 @@
 
 static struct irq_chip intel_gpio_irqchip = {
 	.name = "intel-gpio",
+	.irq_enable = intel_gpio_irq_enable,
 	.irq_ack = intel_gpio_irq_ack,
 	.irq_mask = intel_gpio_irq_mask,
 	.irq_unmask = intel_gpio_irq_unmask,
@@ -1014,17 +1045,16 @@
 	pctrl->pctldesc.pins = pctrl->soc->pins;
 	pctrl->pctldesc.npins = pctrl->soc->npins;
 
-	pctrl->pctldev = pinctrl_register(&pctrl->pctldesc, &pdev->dev, pctrl);
+	pctrl->pctldev = devm_pinctrl_register(&pdev->dev, &pctrl->pctldesc,
+					       pctrl);
 	if (IS_ERR(pctrl->pctldev)) {
 		dev_err(&pdev->dev, "failed to register pinctrl driver\n");
 		return PTR_ERR(pctrl->pctldev);
 	}
 
 	ret = intel_gpio_probe(pctrl, irq);
-	if (ret) {
-		pinctrl_unregister(pctrl->pctldev);
+	if (ret)
 		return ret;
-	}
 
 	platform_set_drvdata(pdev, pctrl);
 
@@ -1037,7 +1067,6 @@
 	struct intel_pinctrl *pctrl = platform_get_drvdata(pdev);
 
 	gpiochip_remove(&pctrl->chip);
-	pinctrl_unregister(pctrl->pctldev);
 
 	return 0;
 }
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 2bbe6f7..207b13b 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -605,7 +605,7 @@
 		ret = mtk_pctrl_dt_subnode_to_map(pctldev, np, map,
 				&reserved_maps, num_maps);
 		if (ret < 0) {
-			pinctrl_utils_dt_free_map(pctldev, *map, *num_maps);
+			pinctrl_utils_free_map(pctldev, *map, *num_maps);
 			of_node_put(np);
 			return ret;
 		}
@@ -644,7 +644,7 @@
 
 static const struct pinctrl_ops mtk_pctrl_ops = {
 	.dt_node_to_map		= mtk_pctrl_dt_node_to_map,
-	.dt_free_map		= pinctrl_utils_dt_free_map,
+	.dt_free_map		= pinctrl_utils_free_map,
 	.get_groups_count	= mtk_pctrl_get_groups_count,
 	.get_group_name		= mtk_pctrl_get_group_name,
 	.get_group_pins		= mtk_pctrl_get_group_pins,
@@ -1004,7 +1004,8 @@
 	struct mtk_pinctrl *pctl = dev_get_drvdata(chip->parent);
 	int eint_num, virq, eint_offset;
 	unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc;
-	static const unsigned int dbnc_arr[] = {0 , 1, 16, 32, 64, 128, 256};
+	static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, 64000,
+						128000, 256000};
 	const struct mtk_desc_pin *pin;
 	struct irq_data *d;
 
@@ -1022,9 +1023,9 @@
 	if (!mtk_eint_can_en_debounce(pctl, eint_num))
 		return -ENOSYS;
 
-	dbnc = ARRAY_SIZE(dbnc_arr);
-	for (i = 0; i < ARRAY_SIZE(dbnc_arr); i++) {
-		if (debounce <= dbnc_arr[i]) {
+	dbnc = ARRAY_SIZE(debounce_time);
+	for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
+		if (debounce <= debounce_time[i]) {
 			dbnc = i;
 			break;
 		}
@@ -1395,17 +1396,16 @@
 	pctl->pctl_desc.pmxops = &mtk_pmx_ops;
 	pctl->dev = &pdev->dev;
 
-	pctl->pctl_dev = pinctrl_register(&pctl->pctl_desc, &pdev->dev, pctl);
+	pctl->pctl_dev = devm_pinctrl_register(&pdev->dev, &pctl->pctl_desc,
+					       pctl);
 	if (IS_ERR(pctl->pctl_dev)) {
 		dev_err(&pdev->dev, "couldn't register pinctrl driver\n");
 		return PTR_ERR(pctl->pctl_dev);
 	}
 
 	pctl->chip = devm_kzalloc(&pdev->dev, sizeof(*pctl->chip), GFP_KERNEL);
-	if (!pctl->chip) {
-		ret = -ENOMEM;
-		goto pctrl_error;
-	}
+	if (!pctl->chip)
+		return -ENOMEM;
 
 	*pctl->chip = mtk_gpio_chip;
 	pctl->chip->ngpio = pctl->devdata->npins;
@@ -1414,10 +1414,8 @@
 	pctl->chip->base = -1;
 
 	ret = gpiochip_add_data(pctl->chip, pctl);
-	if (ret) {
-		ret = -EINVAL;
-		goto pctrl_error;
-	}
+	if (ret)
+		return -EINVAL;
 
 	/* Register the GPIO to pin mappings. */
 	ret = gpiochip_add_pin_range(pctl->chip, dev_name(&pdev->dev),
@@ -1495,8 +1493,6 @@
 
 chip_error:
 	gpiochip_remove(pctl->chip);
-pctrl_error:
-	pinctrl_unregister(pctl->pctl_dev);
 	return ret;
 }
 
diff --git a/drivers/pinctrl/meson/Makefile b/drivers/pinctrl/meson/Makefile
index c751d22..24434f1 100644
--- a/drivers/pinctrl/meson/Makefile
+++ b/drivers/pinctrl/meson/Makefile
@@ -1,2 +1,2 @@
-obj-y	+= pinctrl-meson8.o pinctrl-meson8b.o
+obj-y	+= pinctrl-meson8.o pinctrl-meson8b.o pinctrl-meson-gxbb.o
 obj-y	+= pinctrl-meson.o
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
new file mode 100644
index 0000000..eeabafb
--- /dev/null
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -0,0 +1,432 @@
+/*
+ * Pin controller and GPIO driver for Amlogic Meson GXBB.
+ *
+ * Copyright (C) 2016 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <dt-bindings/gpio/meson-gxbb-gpio.h>
+#include "pinctrl-meson.h"
+
+#define EE_OFF	14
+
+static const struct pinctrl_pin_desc meson_gxbb_periphs_pins[] = {
+	MESON_PIN(GPIOZ_0, EE_OFF),
+	MESON_PIN(GPIOZ_1, EE_OFF),
+	MESON_PIN(GPIOZ_2, EE_OFF),
+	MESON_PIN(GPIOZ_3, EE_OFF),
+	MESON_PIN(GPIOZ_4, EE_OFF),
+	MESON_PIN(GPIOZ_5, EE_OFF),
+	MESON_PIN(GPIOZ_6, EE_OFF),
+	MESON_PIN(GPIOZ_7, EE_OFF),
+	MESON_PIN(GPIOZ_8, EE_OFF),
+	MESON_PIN(GPIOZ_9, EE_OFF),
+	MESON_PIN(GPIOZ_10, EE_OFF),
+	MESON_PIN(GPIOZ_11, EE_OFF),
+	MESON_PIN(GPIOZ_12, EE_OFF),
+	MESON_PIN(GPIOZ_13, EE_OFF),
+	MESON_PIN(GPIOZ_14, EE_OFF),
+	MESON_PIN(GPIOZ_15, EE_OFF),
+
+	MESON_PIN(GPIOH_0, EE_OFF),
+	MESON_PIN(GPIOH_1, EE_OFF),
+	MESON_PIN(GPIOH_2, EE_OFF),
+	MESON_PIN(GPIOH_3, EE_OFF),
+
+	MESON_PIN(BOOT_0, EE_OFF),
+	MESON_PIN(BOOT_1, EE_OFF),
+	MESON_PIN(BOOT_2, EE_OFF),
+	MESON_PIN(BOOT_3, EE_OFF),
+	MESON_PIN(BOOT_4, EE_OFF),
+	MESON_PIN(BOOT_5, EE_OFF),
+	MESON_PIN(BOOT_6, EE_OFF),
+	MESON_PIN(BOOT_7, EE_OFF),
+	MESON_PIN(BOOT_8, EE_OFF),
+	MESON_PIN(BOOT_9, EE_OFF),
+	MESON_PIN(BOOT_10, EE_OFF),
+	MESON_PIN(BOOT_11, EE_OFF),
+	MESON_PIN(BOOT_12, EE_OFF),
+	MESON_PIN(BOOT_13, EE_OFF),
+	MESON_PIN(BOOT_14, EE_OFF),
+	MESON_PIN(BOOT_15, EE_OFF),
+	MESON_PIN(BOOT_16, EE_OFF),
+	MESON_PIN(BOOT_17, EE_OFF),
+
+	MESON_PIN(CARD_0, EE_OFF),
+	MESON_PIN(CARD_1, EE_OFF),
+	MESON_PIN(CARD_2, EE_OFF),
+	MESON_PIN(CARD_3, EE_OFF),
+	MESON_PIN(CARD_4, EE_OFF),
+	MESON_PIN(CARD_5, EE_OFF),
+	MESON_PIN(CARD_6, EE_OFF),
+
+	MESON_PIN(GPIODV_0, EE_OFF),
+	MESON_PIN(GPIODV_1, EE_OFF),
+	MESON_PIN(GPIODV_2, EE_OFF),
+	MESON_PIN(GPIODV_3, EE_OFF),
+	MESON_PIN(GPIODV_4, EE_OFF),
+	MESON_PIN(GPIODV_5, EE_OFF),
+	MESON_PIN(GPIODV_6, EE_OFF),
+	MESON_PIN(GPIODV_7, EE_OFF),
+	MESON_PIN(GPIODV_8, EE_OFF),
+	MESON_PIN(GPIODV_9, EE_OFF),
+	MESON_PIN(GPIODV_10, EE_OFF),
+	MESON_PIN(GPIODV_11, EE_OFF),
+	MESON_PIN(GPIODV_12, EE_OFF),
+	MESON_PIN(GPIODV_13, EE_OFF),
+	MESON_PIN(GPIODV_14, EE_OFF),
+	MESON_PIN(GPIODV_15, EE_OFF),
+	MESON_PIN(GPIODV_16, EE_OFF),
+	MESON_PIN(GPIODV_17, EE_OFF),
+	MESON_PIN(GPIODV_19, EE_OFF),
+	MESON_PIN(GPIODV_20, EE_OFF),
+	MESON_PIN(GPIODV_21, EE_OFF),
+	MESON_PIN(GPIODV_22, EE_OFF),
+	MESON_PIN(GPIODV_23, EE_OFF),
+	MESON_PIN(GPIODV_24, EE_OFF),
+	MESON_PIN(GPIODV_25, EE_OFF),
+	MESON_PIN(GPIODV_26, EE_OFF),
+	MESON_PIN(GPIODV_27, EE_OFF),
+	MESON_PIN(GPIODV_28, EE_OFF),
+	MESON_PIN(GPIODV_29, EE_OFF),
+
+	MESON_PIN(GPIOY_0, EE_OFF),
+	MESON_PIN(GPIOY_1, EE_OFF),
+	MESON_PIN(GPIOY_2, EE_OFF),
+	MESON_PIN(GPIOY_3, EE_OFF),
+	MESON_PIN(GPIOY_4, EE_OFF),
+	MESON_PIN(GPIOY_5, EE_OFF),
+	MESON_PIN(GPIOY_6, EE_OFF),
+	MESON_PIN(GPIOY_7, EE_OFF),
+	MESON_PIN(GPIOY_8, EE_OFF),
+	MESON_PIN(GPIOY_9, EE_OFF),
+	MESON_PIN(GPIOY_10, EE_OFF),
+	MESON_PIN(GPIOY_11, EE_OFF),
+	MESON_PIN(GPIOY_12, EE_OFF),
+	MESON_PIN(GPIOY_13, EE_OFF),
+	MESON_PIN(GPIOY_14, EE_OFF),
+	MESON_PIN(GPIOY_15, EE_OFF),
+	MESON_PIN(GPIOY_16, EE_OFF),
+
+	MESON_PIN(GPIOX_0, EE_OFF),
+	MESON_PIN(GPIOX_1, EE_OFF),
+	MESON_PIN(GPIOX_2, EE_OFF),
+	MESON_PIN(GPIOX_3, EE_OFF),
+	MESON_PIN(GPIOX_4, EE_OFF),
+	MESON_PIN(GPIOX_5, EE_OFF),
+	MESON_PIN(GPIOX_6, EE_OFF),
+	MESON_PIN(GPIOX_7, EE_OFF),
+	MESON_PIN(GPIOX_8, EE_OFF),
+	MESON_PIN(GPIOX_9, EE_OFF),
+	MESON_PIN(GPIOX_10, EE_OFF),
+	MESON_PIN(GPIOX_11, EE_OFF),
+	MESON_PIN(GPIOX_12, EE_OFF),
+	MESON_PIN(GPIOX_13, EE_OFF),
+	MESON_PIN(GPIOX_14, EE_OFF),
+	MESON_PIN(GPIOX_15, EE_OFF),
+	MESON_PIN(GPIOX_16, EE_OFF),
+	MESON_PIN(GPIOX_17, EE_OFF),
+	MESON_PIN(GPIOX_18, EE_OFF),
+	MESON_PIN(GPIOX_19, EE_OFF),
+	MESON_PIN(GPIOX_20, EE_OFF),
+	MESON_PIN(GPIOX_21, EE_OFF),
+	MESON_PIN(GPIOX_22, EE_OFF),
+
+	MESON_PIN(GPIOCLK_0, EE_OFF),
+	MESON_PIN(GPIOCLK_1, EE_OFF),
+	MESON_PIN(GPIOCLK_2, EE_OFF),
+	MESON_PIN(GPIOCLK_3, EE_OFF),
+
+	MESON_PIN(GPIO_TEST_N, EE_OFF),
+};
+
+static const struct pinctrl_pin_desc meson_gxbb_aobus_pins[] = {
+	MESON_PIN(GPIOAO_0, 0),
+	MESON_PIN(GPIOAO_1, 0),
+	MESON_PIN(GPIOAO_2, 0),
+	MESON_PIN(GPIOAO_3, 0),
+	MESON_PIN(GPIOAO_4, 0),
+	MESON_PIN(GPIOAO_5, 0),
+	MESON_PIN(GPIOAO_6, 0),
+	MESON_PIN(GPIOAO_7, 0),
+	MESON_PIN(GPIOAO_8, 0),
+	MESON_PIN(GPIOAO_9, 0),
+	MESON_PIN(GPIOAO_10, 0),
+	MESON_PIN(GPIOAO_11, 0),
+	MESON_PIN(GPIOAO_12, 0),
+	MESON_PIN(GPIOAO_13, 0),
+};
+
+static const unsigned int uart_tx_ao_a_pins[]	= { PIN(GPIOAO_0, 0) };
+static const unsigned int uart_rx_ao_a_pins[]	= { PIN(GPIOAO_1, 0) };
+static const unsigned int uart_cts_ao_a_pins[]	= { PIN(GPIOAO_2, 0) };
+static const unsigned int uart_rts_ao_a_pins[]	= { PIN(GPIOAO_3, 0) };
+
+static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
+	GPIO_GROUP(GPIOZ_0, EE_OFF),
+	GPIO_GROUP(GPIOZ_1, EE_OFF),
+	GPIO_GROUP(GPIOZ_2, EE_OFF),
+	GPIO_GROUP(GPIOZ_3, EE_OFF),
+	GPIO_GROUP(GPIOZ_4, EE_OFF),
+	GPIO_GROUP(GPIOZ_5, EE_OFF),
+	GPIO_GROUP(GPIOZ_6, EE_OFF),
+	GPIO_GROUP(GPIOZ_7, EE_OFF),
+	GPIO_GROUP(GPIOZ_8, EE_OFF),
+	GPIO_GROUP(GPIOZ_9, EE_OFF),
+	GPIO_GROUP(GPIOZ_10, EE_OFF),
+	GPIO_GROUP(GPIOZ_11, EE_OFF),
+	GPIO_GROUP(GPIOZ_12, EE_OFF),
+	GPIO_GROUP(GPIOZ_13, EE_OFF),
+	GPIO_GROUP(GPIOZ_14, EE_OFF),
+	GPIO_GROUP(GPIOZ_15, EE_OFF),
+
+	GPIO_GROUP(GPIOH_0, EE_OFF),
+	GPIO_GROUP(GPIOH_1, EE_OFF),
+	GPIO_GROUP(GPIOH_2, EE_OFF),
+	GPIO_GROUP(GPIOH_3, EE_OFF),
+
+	GPIO_GROUP(BOOT_0, EE_OFF),
+	GPIO_GROUP(BOOT_1, EE_OFF),
+	GPIO_GROUP(BOOT_2, EE_OFF),
+	GPIO_GROUP(BOOT_3, EE_OFF),
+	GPIO_GROUP(BOOT_4, EE_OFF),
+	GPIO_GROUP(BOOT_5, EE_OFF),
+	GPIO_GROUP(BOOT_6, EE_OFF),
+	GPIO_GROUP(BOOT_7, EE_OFF),
+	GPIO_GROUP(BOOT_8, EE_OFF),
+	GPIO_GROUP(BOOT_9, EE_OFF),
+	GPIO_GROUP(BOOT_10, EE_OFF),
+	GPIO_GROUP(BOOT_11, EE_OFF),
+	GPIO_GROUP(BOOT_12, EE_OFF),
+	GPIO_GROUP(BOOT_13, EE_OFF),
+	GPIO_GROUP(BOOT_14, EE_OFF),
+	GPIO_GROUP(BOOT_15, EE_OFF),
+	GPIO_GROUP(BOOT_16, EE_OFF),
+	GPIO_GROUP(BOOT_17, EE_OFF),
+
+	GPIO_GROUP(CARD_0, EE_OFF),
+	GPIO_GROUP(CARD_1, EE_OFF),
+	GPIO_GROUP(CARD_2, EE_OFF),
+	GPIO_GROUP(CARD_3, EE_OFF),
+	GPIO_GROUP(CARD_4, EE_OFF),
+	GPIO_GROUP(CARD_5, EE_OFF),
+	GPIO_GROUP(CARD_6, EE_OFF),
+
+	GPIO_GROUP(GPIODV_0, EE_OFF),
+	GPIO_GROUP(GPIODV_1, EE_OFF),
+	GPIO_GROUP(GPIODV_2, EE_OFF),
+	GPIO_GROUP(GPIODV_3, EE_OFF),
+	GPIO_GROUP(GPIODV_4, EE_OFF),
+	GPIO_GROUP(GPIODV_5, EE_OFF),
+	GPIO_GROUP(GPIODV_6, EE_OFF),
+	GPIO_GROUP(GPIODV_7, EE_OFF),
+	GPIO_GROUP(GPIODV_8, EE_OFF),
+	GPIO_GROUP(GPIODV_9, EE_OFF),
+	GPIO_GROUP(GPIODV_10, EE_OFF),
+	GPIO_GROUP(GPIODV_11, EE_OFF),
+	GPIO_GROUP(GPIODV_12, EE_OFF),
+	GPIO_GROUP(GPIODV_13, EE_OFF),
+	GPIO_GROUP(GPIODV_14, EE_OFF),
+	GPIO_GROUP(GPIODV_15, EE_OFF),
+	GPIO_GROUP(GPIODV_16, EE_OFF),
+	GPIO_GROUP(GPIODV_17, EE_OFF),
+	GPIO_GROUP(GPIODV_19, EE_OFF),
+	GPIO_GROUP(GPIODV_20, EE_OFF),
+	GPIO_GROUP(GPIODV_21, EE_OFF),
+	GPIO_GROUP(GPIODV_22, EE_OFF),
+	GPIO_GROUP(GPIODV_23, EE_OFF),
+	GPIO_GROUP(GPIODV_24, EE_OFF),
+	GPIO_GROUP(GPIODV_25, EE_OFF),
+	GPIO_GROUP(GPIODV_26, EE_OFF),
+	GPIO_GROUP(GPIODV_27, EE_OFF),
+	GPIO_GROUP(GPIODV_28, EE_OFF),
+	GPIO_GROUP(GPIODV_29, EE_OFF),
+
+	GPIO_GROUP(GPIOY_0, EE_OFF),
+	GPIO_GROUP(GPIOY_1, EE_OFF),
+	GPIO_GROUP(GPIOY_2, EE_OFF),
+	GPIO_GROUP(GPIOY_3, EE_OFF),
+	GPIO_GROUP(GPIOY_4, EE_OFF),
+	GPIO_GROUP(GPIOY_5, EE_OFF),
+	GPIO_GROUP(GPIOY_6, EE_OFF),
+	GPIO_GROUP(GPIOY_7, EE_OFF),
+	GPIO_GROUP(GPIOY_8, EE_OFF),
+	GPIO_GROUP(GPIOY_9, EE_OFF),
+	GPIO_GROUP(GPIOY_10, EE_OFF),
+	GPIO_GROUP(GPIOY_11, EE_OFF),
+	GPIO_GROUP(GPIOY_12, EE_OFF),
+	GPIO_GROUP(GPIOY_13, EE_OFF),
+	GPIO_GROUP(GPIOY_14, EE_OFF),
+	GPIO_GROUP(GPIOY_15, EE_OFF),
+	GPIO_GROUP(GPIOY_16, EE_OFF),
+
+	GPIO_GROUP(GPIOX_0, EE_OFF),
+	GPIO_GROUP(GPIOX_1, EE_OFF),
+	GPIO_GROUP(GPIOX_2, EE_OFF),
+	GPIO_GROUP(GPIOX_3, EE_OFF),
+	GPIO_GROUP(GPIOX_4, EE_OFF),
+	GPIO_GROUP(GPIOX_5, EE_OFF),
+	GPIO_GROUP(GPIOX_6, EE_OFF),
+	GPIO_GROUP(GPIOX_7, EE_OFF),
+	GPIO_GROUP(GPIOX_8, EE_OFF),
+	GPIO_GROUP(GPIOX_9, EE_OFF),
+	GPIO_GROUP(GPIOX_10, EE_OFF),
+	GPIO_GROUP(GPIOX_11, EE_OFF),
+	GPIO_GROUP(GPIOX_12, EE_OFF),
+	GPIO_GROUP(GPIOX_13, EE_OFF),
+	GPIO_GROUP(GPIOX_14, EE_OFF),
+	GPIO_GROUP(GPIOX_15, EE_OFF),
+	GPIO_GROUP(GPIOX_16, EE_OFF),
+	GPIO_GROUP(GPIOX_17, EE_OFF),
+	GPIO_GROUP(GPIOX_18, EE_OFF),
+	GPIO_GROUP(GPIOX_19, EE_OFF),
+	GPIO_GROUP(GPIOX_20, EE_OFF),
+	GPIO_GROUP(GPIOX_21, EE_OFF),
+	GPIO_GROUP(GPIOX_22, EE_OFF),
+
+	GPIO_GROUP(GPIOCLK_0, EE_OFF),
+	GPIO_GROUP(GPIOCLK_1, EE_OFF),
+	GPIO_GROUP(GPIOCLK_2, EE_OFF),
+	GPIO_GROUP(GPIOCLK_3, EE_OFF),
+
+	GPIO_GROUP(GPIO_TEST_N, EE_OFF),
+};
+
+static struct meson_pmx_group meson_gxbb_aobus_groups[] = {
+	GPIO_GROUP(GPIOAO_0, 0),
+	GPIO_GROUP(GPIOAO_1, 0),
+	GPIO_GROUP(GPIOAO_2, 0),
+	GPIO_GROUP(GPIOAO_3, 0),
+	GPIO_GROUP(GPIOAO_4, 0),
+	GPIO_GROUP(GPIOAO_5, 0),
+	GPIO_GROUP(GPIOAO_6, 0),
+	GPIO_GROUP(GPIOAO_7, 0),
+	GPIO_GROUP(GPIOAO_8, 0),
+	GPIO_GROUP(GPIOAO_9, 0),
+	GPIO_GROUP(GPIOAO_10, 0),
+	GPIO_GROUP(GPIOAO_11, 0),
+	GPIO_GROUP(GPIOAO_12, 0),
+	GPIO_GROUP(GPIOAO_13, 0),
+
+	/* bank AO */
+	GROUP(uart_tx_ao_a,	0,	12),
+	GROUP(uart_rx_ao_a,	0,	11),
+	GROUP(uart_cts_ao_a,	0,	10),
+	GROUP(uart_rts_ao_a,	0,	9),
+};
+
+static const char * const gpio_periphs_groups[] = {
+	"GPIOZ_0", "GPIOZ_1", "GPIOZ_2", "GPIOZ_3", "GPIOZ_4",
+	"GPIOZ_5", "GPIOZ_6", "GPIOZ_7", "GPIOZ_8", "GPIOZ_9",
+	"GPIOZ_10", "GPIOZ_11", "GPIOZ_12", "GPIOZ_13", "GPIOZ_14",
+	"GPIOZ_15",
+
+	"GPIOH_0", "GPIOH_1", "GPIOH_2", "GPIOH_3",
+
+	"BOOT_0", "BOOT_1", "BOOT_2", "BOOT_3", "BOOT_4",
+	"BOOT_5", "BOOT_6", "BOOT_7", "BOOT_8", "BOOT_9",
+	"BOOT_10", "BOOT_11", "BOOT_12", "BOOT_13", "BOOT_14",
+	"BOOT_15", "BOOT_16", "BOOT_17",
+
+	"CARD_0", "CARD_1", "CARD_2", "CARD_3", "CARD_4",
+	"CARD_5", "CARD_6",
+
+	"GPIODV_0", "GPIODV_1", "GPIODV_2", "GPIODV_3", "GPIODV_4",
+	"GPIODV_5", "GPIODV_6", "GPIODV_7", "GPIODV_8", "GPIODV_9",
+	"GPIODV_10", "GPIODV_11", "GPIODV_12", "GPIODV_13", "GPIODV_14",
+	"GPIODV_15", "GPIODV_16", "GPIODV_17", "GPIODV_18", "GPIODV_19",
+	"GPIODV_20", "GPIODV_21", "GPIODV_22", "GPIODV_23", "GPIODV_24",
+	"GPIODV_25", "GPIODV_26", "GPIODV_27", "GPIODV_28", "GPIODV_29",
+
+	"GPIOY_0", "GPIOY_1", "GPIOY_2", "GPIOY_3", "GPIOY_4",
+	"GPIOY_5", "GPIOY_6", "GPIOY_7", "GPIOY_8", "GPIOY_9",
+	"GPIOY_10", "GPIOY_11", "GPIOY_12", "GPIOY_13", "GPIOY_14",
+	"GPIOY_15", "GPIOY_16",
+
+	"GPIOX_0", "GPIOX_1", "GPIOX_2", "GPIOX_3", "GPIOX_4",
+	"GPIOX_5", "GPIOX_6", "GPIOX_7", "GPIOX_8", "GPIOX_9",
+	"GPIOX_10", "GPIOX_11", "GPIOX_12", "GPIOX_13", "GPIOX_14",
+	"GPIOX_15", "GPIOX_16", "GPIOX_17", "GPIOX_18", "GPIOX_19",
+	"GPIOX_20", "GPIOX_21", "GPIOX_22",
+
+	"GPIO_TEST_N",
+};
+
+static const char * const gpio_aobus_groups[] = {
+	"GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", "GPIOAO_4",
+	"GPIOAO_5", "GPIOAO_6", "GPIOAO_7", "GPIOAO_8", "GPIOAO_9",
+	"GPIOAO_10", "GPIOAO_11", "GPIOAO_12", "GPIOAO_13",
+};
+
+static const char * const uart_ao_groups[] = {
+	"uart_tx_ao_a", "uart_rx_ao_a", "uart_cts_ao_a", "uart_rts_ao_a"
+};
+
+static struct meson_pmx_func meson_gxbb_periphs_functions[] = {
+	FUNCTION(gpio_periphs),
+};
+
+static struct meson_pmx_func meson_gxbb_aobus_functions[] = {
+	FUNCTION(gpio_aobus),
+	FUNCTION(uart_ao),
+};
+
+static struct meson_bank meson_gxbb_periphs_banks[] = {
+	/*   name    first                      last                    pullen  pull    dir     out     in  */
+	BANK("X",    PIN(GPIOX_0, EE_OFF),	PIN(GPIOX_22, EE_OFF),  4,  0,  4,  0,  12, 0,  13, 0,  14, 0),
+	BANK("Y",    PIN(GPIOY_0, EE_OFF),	PIN(GPIOY_16, EE_OFF),  1,  0,  1,  0,  3,  0,  4,  0,  5,  0),
+	BANK("DV",   PIN(GPIODV_0, EE_OFF),	PIN(GPIODV_29, EE_OFF), 0,  0,  0,  0,  0,  0,  1,  0,  2,  0),
+	BANK("H",    PIN(GPIOH_0, EE_OFF),	PIN(GPIOH_3, EE_OFF),   1, 20,  1, 20,  3, 20,  4, 20,  5, 20),
+	BANK("Z",    PIN(GPIOZ_0, EE_OFF),	PIN(GPIOZ_15, EE_OFF),  3,  0,  3,  0,  9,  0,  10, 0, 11,  0),
+	BANK("CARD", PIN(CARD_0, EE_OFF),	PIN(CARD_6, EE_OFF),    2, 20,  2, 20,  6, 20,  7, 20,  8, 20),
+	BANK("BOOT", PIN(BOOT_0, EE_OFF),	PIN(BOOT_17, EE_OFF),   2,  0,  2,  0,  6,  0,  7,  0,  8,  0),
+	BANK("CLK",  PIN(GPIOCLK_0, EE_OFF),	PIN(GPIOCLK_3, EE_OFF), 3, 28,  3, 28,  9, 28, 10, 28, 11, 28),
+};
+
+static struct meson_bank meson_gxbb_aobus_banks[] = {
+	/*   name    first              last               pullen  pull    dir     out     in  */
+	BANK("AO",   PIN(GPIOAO_0, 0),  PIN(GPIOAO_13, 0), 0,  0,  0, 16,  0,  0,  0, 16,  1,  0),
+};
+
+static struct meson_domain_data meson_gxbb_periphs_domain_data = {
+	.name		= "periphs-banks",
+	.banks		= meson_gxbb_periphs_banks,
+	.num_banks	= ARRAY_SIZE(meson_gxbb_periphs_banks),
+	.pin_base	= 14,
+	.num_pins	= 120,
+};
+
+static struct meson_domain_data meson_gxbb_aobus_domain_data = {
+	.name		= "aobus-banks",
+	.banks		= meson_gxbb_aobus_banks,
+	.num_banks	= ARRAY_SIZE(meson_gxbb_aobus_banks),
+	.pin_base	= 0,
+	.num_pins	= 14,
+};
+
+struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = {
+	.pins		= meson_gxbb_periphs_pins,
+	.groups		= meson_gxbb_periphs_groups,
+	.funcs		= meson_gxbb_periphs_functions,
+	.domain_data	= &meson_gxbb_periphs_domain_data,
+	.num_pins	= ARRAY_SIZE(meson_gxbb_periphs_pins),
+	.num_groups	= ARRAY_SIZE(meson_gxbb_periphs_groups),
+	.num_funcs	= ARRAY_SIZE(meson_gxbb_periphs_functions),
+};
+
+struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data = {
+	.pins		= meson_gxbb_aobus_pins,
+	.groups		= meson_gxbb_aobus_groups,
+	.funcs		= meson_gxbb_aobus_functions,
+	.domain_data	= &meson_gxbb_aobus_domain_data,
+	.num_pins	= ARRAY_SIZE(meson_gxbb_aobus_pins),
+	.num_groups	= ARRAY_SIZE(meson_gxbb_aobus_groups),
+	.num_funcs	= ARRAY_SIZE(meson_gxbb_aobus_functions),
+};
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 0bdb8fd..11623c6 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -171,7 +171,7 @@
 	.get_group_name		= meson_get_group_name,
 	.get_group_pins		= meson_get_group_pins,
 	.dt_node_to_map		= pinconf_generic_dt_node_to_map_all,
-	.dt_free_map		= pinctrl_utils_dt_free_map,
+	.dt_free_map		= pinctrl_utils_free_map,
 	.pin_dbg_show		= meson_pin_dbg_show,
 };
 
@@ -549,6 +549,14 @@
 		.compatible = "amlogic,meson8b-aobus-pinctrl",
 		.data = &meson8b_aobus_pinctrl_data,
 	},
+	{
+		.compatible = "amlogic,meson-gxbb-periphs-pinctrl",
+		.data = &meson_gxbb_periphs_pinctrl_data,
+	},
+	{
+		.compatible = "amlogic,meson-gxbb-aobus-pinctrl",
+		.data = &meson_gxbb_aobus_pinctrl_data,
+	},
 	{ },
 };
 
@@ -713,7 +721,7 @@
 	pc->desc.pins		= pc->data->pins;
 	pc->desc.npins		= pc->data->num_pins;
 
-	pc->pcdev = pinctrl_register(&pc->desc, pc->dev, pc);
+	pc->pcdev = devm_pinctrl_register(pc->dev, &pc->desc, pc);
 	if (IS_ERR(pc->pcdev)) {
 		dev_err(pc->dev, "can't register pinctrl device");
 		return PTR_ERR(pc->pcdev);
diff --git a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h
index 9c93e0d..d89442e 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.h
+++ b/drivers/pinctrl/meson/pinctrl-meson.h
@@ -199,3 +199,5 @@
 extern struct meson_pinctrl_data meson8_aobus_pinctrl_data;
 extern struct meson_pinctrl_data meson8b_cbus_pinctrl_data;
 extern struct meson_pinctrl_data meson8b_aobus_pinctrl_data;
+extern struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data;
+extern struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data;
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index a100bcf..874f2ed 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -564,7 +564,7 @@
 	GROUP(eth_rx_clk,	6,	3),
 	GROUP(eth_txd0_1,	6,	4),
 	GROUP(eth_txd1_1,	6,	5),
-	GROUP(eth_tx_en,	6,	0),
+	GROUP(eth_tx_en,	6,	6),
 	GROUP(eth_ref_clk,	6,	8),
 	GROUP(eth_mdc,		6,	9),
 	GROUP(eth_mdio_en,	6,	10),
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-370.c b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
index 73dc1bc..9cc1cc3 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-370.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
@@ -417,18 +417,12 @@
 	return mvebu_pinctrl_probe(pdev);
 }
 
-static int armada_370_pinctrl_remove(struct platform_device *pdev)
-{
-	return mvebu_pinctrl_remove(pdev);
-}
-
 static struct platform_driver armada_370_pinctrl_driver = {
 	.driver = {
 		.name = "armada-370-pinctrl",
 		.of_match_table = armada_370_pinctrl_of_match,
 	},
 	.probe = armada_370_pinctrl_probe,
-	.remove = armada_370_pinctrl_remove,
 };
 
 module_platform_driver(armada_370_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-375.c b/drivers/pinctrl/mvebu/pinctrl-armada-375.c
index 54e9fbd..0706514 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-375.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-375.c
@@ -435,18 +435,12 @@
 	return mvebu_pinctrl_probe(pdev);
 }
 
-static int armada_375_pinctrl_remove(struct platform_device *pdev)
-{
-	return mvebu_pinctrl_remove(pdev);
-}
-
 static struct platform_driver armada_375_pinctrl_driver = {
 	.driver = {
 		.name = "armada-375-pinctrl",
 		.of_match_table = of_match_ptr(armada_375_pinctrl_of_match),
 	},
 	.probe = armada_375_pinctrl_probe,
-	.remove = armada_375_pinctrl_remove,
 };
 
 module_platform_driver(armada_375_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
index 6ec82c6..4e84c8e 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
@@ -446,18 +446,12 @@
 	return mvebu_pinctrl_probe(pdev);
 }
 
-static int armada_38x_pinctrl_remove(struct platform_device *pdev)
-{
-	return mvebu_pinctrl_remove(pdev);
-}
-
 static struct platform_driver armada_38x_pinctrl_driver = {
 	.driver = {
 		.name = "armada-38x-pinctrl",
 		.of_match_table = of_match_ptr(armada_38x_pinctrl_of_match),
 	},
 	.probe = armada_38x_pinctrl_probe,
-	.remove = armada_38x_pinctrl_remove,
 };
 
 module_platform_driver(armada_38x_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-39x.c b/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
index fcfe9b4..e288f8b 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
@@ -428,18 +428,12 @@
 	return mvebu_pinctrl_probe(pdev);
 }
 
-static int armada_39x_pinctrl_remove(struct platform_device *pdev)
-{
-	return mvebu_pinctrl_remove(pdev);
-}
-
 static struct platform_driver armada_39x_pinctrl_driver = {
 	.driver = {
 		.name = "armada-39x-pinctrl",
 		.of_match_table = of_match_ptr(armada_39x_pinctrl_of_match),
 	},
 	.probe = armada_39x_pinctrl_probe,
-	.remove = armada_39x_pinctrl_remove,
 };
 
 module_platform_driver(armada_39x_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
index bf70e09..e4ea71a 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
@@ -502,18 +502,12 @@
 	return mvebu_pinctrl_probe(pdev);
 }
 
-static int armada_xp_pinctrl_remove(struct platform_device *pdev)
-{
-	return mvebu_pinctrl_remove(pdev);
-}
-
 static struct platform_driver armada_xp_pinctrl_driver = {
 	.driver = {
 		.name = "armada-xp-pinctrl",
 		.of_match_table = armada_xp_pinctrl_of_match,
 	},
 	.probe = armada_xp_pinctrl_probe,
-	.remove = armada_xp_pinctrl_remove,
 	.suspend = armada_xp_pinctrl_suspend,
 	.resume = armada_xp_pinctrl_resume,
 };
diff --git a/drivers/pinctrl/mvebu/pinctrl-dove.c b/drivers/pinctrl/mvebu/pinctrl-dove.c
index 95bfd06..f93ae0d 100644
--- a/drivers/pinctrl/mvebu/pinctrl-dove.c
+++ b/drivers/pinctrl/mvebu/pinctrl-dove.c
@@ -840,12 +840,9 @@
 
 static int dove_pinctrl_remove(struct platform_device *pdev)
 {
-	int ret;
-
-	ret = mvebu_pinctrl_remove(pdev);
 	if (!IS_ERR(clk))
 		clk_disable_unprepare(clk);
-	return ret;
+	return 0;
 }
 
 static struct platform_driver dove_pinctrl_driver = {
diff --git a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
index 0f07dc55..a78e9a4 100644
--- a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
+++ b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
@@ -481,18 +481,12 @@
 	return mvebu_pinctrl_probe(pdev);
 }
 
-static int kirkwood_pinctrl_remove(struct platform_device *pdev)
-{
-	return mvebu_pinctrl_remove(pdev);
-}
-
 static struct platform_driver kirkwood_pinctrl_driver = {
 	.driver = {
 		.name = "kirkwood-pinctrl",
 		.of_match_table = kirkwood_pinctrl_of_match,
 	},
 	.probe = kirkwood_pinctrl_probe,
-	.remove = kirkwood_pinctrl_remove,
 };
 
 module_platform_driver(kirkwood_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index 3ef798f..b6ec6db 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -711,7 +711,7 @@
 		return ret;
 	}
 
-	pctl->pctldev = pinctrl_register(&pctl->desc, &pdev->dev, pctl);
+	pctl->pctldev = devm_pinctrl_register(&pdev->dev, &pctl->desc, pctl);
 	if (IS_ERR(pctl->pctldev)) {
 		dev_err(&pdev->dev, "unable to register pinctrl driver\n");
 		return PTR_ERR(pctl->pctldev);
@@ -725,10 +725,3 @@
 
 	return 0;
 }
-
-int mvebu_pinctrl_remove(struct platform_device *pdev)
-{
-	struct mvebu_pinctrl *pctl = platform_get_drvdata(pdev);
-	pinctrl_unregister(pctl->pctldev);
-	return 0;
-}
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.h b/drivers/pinctrl/mvebu/pinctrl-mvebu.h
index 65a98e6..b75a5f4 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.h
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.h
@@ -202,6 +202,5 @@
 }
 
 int mvebu_pinctrl_probe(struct platform_device *pdev);
-int mvebu_pinctrl_remove(struct platform_device *pdev);
 
 #endif
diff --git a/drivers/pinctrl/mvebu/pinctrl-orion.c b/drivers/pinctrl/mvebu/pinctrl-orion.c
index 3b7122d..345c3df 100644
--- a/drivers/pinctrl/mvebu/pinctrl-orion.c
+++ b/drivers/pinctrl/mvebu/pinctrl-orion.c
@@ -239,18 +239,12 @@
 	return mvebu_pinctrl_probe(pdev);
 }
 
-static int orion_pinctrl_remove(struct platform_device *pdev)
-{
-	return mvebu_pinctrl_remove(pdev);
-}
-
 static struct platform_driver orion_pinctrl_driver = {
 	.driver = {
 		.name = "orion-pinctrl",
 		.of_match_table = of_match_ptr(orion_pinctrl_of_match),
 	},
 	.probe = orion_pinctrl_probe,
-	.remove = orion_pinctrl_remove,
 };
 
 module_platform_driver(orion_pinctrl_driver);
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 1f7469c..7d343c2 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -937,7 +937,7 @@
 		ret = abx500_dt_subnode_to_map(pctldev, np, map,
 				&reserved_maps, num_maps);
 		if (ret < 0) {
-			pinctrl_utils_dt_free_map(pctldev, *map, *num_maps);
+			pinctrl_utils_free_map(pctldev, *map, *num_maps);
 			return ret;
 		}
 	}
@@ -951,7 +951,7 @@
 	.get_group_pins = abx500_get_group_pins,
 	.pin_dbg_show = abx500_pin_dbg_show,
 	.dt_node_to_map = abx500_dt_node_to_map,
-	.dt_free_map = pinctrl_utils_dt_free_map,
+	.dt_free_map = pinctrl_utils_free_map,
 };
 
 static int abx500_pin_config_get(struct pinctrl_dev *pctldev,
@@ -1212,7 +1212,8 @@
 
 	abx500_pinctrl_desc.pins = pct->soc->pins;
 	abx500_pinctrl_desc.npins = pct->soc->npins;
-	pct->pctldev = pinctrl_register(&abx500_pinctrl_desc, &pdev->dev, pct);
+	pct->pctldev = devm_pinctrl_register(&pdev->dev, &abx500_pinctrl_desc,
+					     pct);
 	if (IS_ERR(pct->pctldev)) {
 		dev_err(&pdev->dev,
 			"could not register abx500 pinctrl driver\n");
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 3524061..ccbfc32 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -24,6 +24,7 @@
 #include <linux/slab.h>
 #include <linux/of_device.h>
 #include <linux/of_address.h>
+#include <linux/bitops.h>
 #include <linux/pinctrl/machine.h>
 #include <linux/pinctrl/pinctrl.h>
 #include <linux/pinctrl/pinmux.h>
@@ -292,15 +293,14 @@
 static void __nmk_gpio_set_mode(struct nmk_gpio_chip *nmk_chip,
 				unsigned offset, int gpio_mode)
 {
-	u32 bit = 1 << offset;
 	u32 afunc, bfunc;
 
-	afunc = readl(nmk_chip->addr + NMK_GPIO_AFSLA) & ~bit;
-	bfunc = readl(nmk_chip->addr + NMK_GPIO_AFSLB) & ~bit;
+	afunc = readl(nmk_chip->addr + NMK_GPIO_AFSLA) & ~BIT(offset);
+	bfunc = readl(nmk_chip->addr + NMK_GPIO_AFSLB) & ~BIT(offset);
 	if (gpio_mode & NMK_GPIO_ALT_A)
-		afunc |= bit;
+		afunc |= BIT(offset);
 	if (gpio_mode & NMK_GPIO_ALT_B)
-		bfunc |= bit;
+		bfunc |= BIT(offset);
 	writel(afunc, nmk_chip->addr + NMK_GPIO_AFSLA);
 	writel(bfunc, nmk_chip->addr + NMK_GPIO_AFSLB);
 }
@@ -308,55 +308,52 @@
 static void __nmk_gpio_set_slpm(struct nmk_gpio_chip *nmk_chip,
 				unsigned offset, enum nmk_gpio_slpm mode)
 {
-	u32 bit = 1 << offset;
 	u32 slpm;
 
 	slpm = readl(nmk_chip->addr + NMK_GPIO_SLPC);
 	if (mode == NMK_GPIO_SLPM_NOCHANGE)
-		slpm |= bit;
+		slpm |= BIT(offset);
 	else
-		slpm &= ~bit;
+		slpm &= ~BIT(offset);
 	writel(slpm, nmk_chip->addr + NMK_GPIO_SLPC);
 }
 
 static void __nmk_gpio_set_pull(struct nmk_gpio_chip *nmk_chip,
 				unsigned offset, enum nmk_gpio_pull pull)
 {
-	u32 bit = 1 << offset;
 	u32 pdis;
 
 	pdis = readl(nmk_chip->addr + NMK_GPIO_PDIS);
 	if (pull == NMK_GPIO_PULL_NONE) {
-		pdis |= bit;
-		nmk_chip->pull_up &= ~bit;
+		pdis |= BIT(offset);
+		nmk_chip->pull_up &= ~BIT(offset);
 	} else {
-		pdis &= ~bit;
+		pdis &= ~BIT(offset);
 	}
 
 	writel(pdis, nmk_chip->addr + NMK_GPIO_PDIS);
 
 	if (pull == NMK_GPIO_PULL_UP) {
-		nmk_chip->pull_up |= bit;
-		writel(bit, nmk_chip->addr + NMK_GPIO_DATS);
+		nmk_chip->pull_up |= BIT(offset);
+		writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DATS);
 	} else if (pull == NMK_GPIO_PULL_DOWN) {
-		nmk_chip->pull_up &= ~bit;
-		writel(bit, nmk_chip->addr + NMK_GPIO_DATC);
+		nmk_chip->pull_up &= ~BIT(offset);
+		writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DATC);
 	}
 }
 
 static void __nmk_gpio_set_lowemi(struct nmk_gpio_chip *nmk_chip,
 				  unsigned offset, bool lowemi)
 {
-	u32 bit = BIT(offset);
-	bool enabled = nmk_chip->lowemi & bit;
+	bool enabled = nmk_chip->lowemi & BIT(offset);
 
 	if (lowemi == enabled)
 		return;
 
 	if (lowemi)
-		nmk_chip->lowemi |= bit;
+		nmk_chip->lowemi |= BIT(offset);
 	else
-		nmk_chip->lowemi &= ~bit;
+		nmk_chip->lowemi &= ~BIT(offset);
 
 	writel_relaxed(nmk_chip->lowemi,
 		       nmk_chip->addr + NMK_GPIO_LOWEMI);
@@ -365,22 +362,22 @@
 static void __nmk_gpio_make_input(struct nmk_gpio_chip *nmk_chip,
 				  unsigned offset)
 {
-	writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRC);
+	writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DIRC);
 }
 
 static void __nmk_gpio_set_output(struct nmk_gpio_chip *nmk_chip,
 				  unsigned offset, int val)
 {
 	if (val)
-		writel(1 << offset, nmk_chip->addr + NMK_GPIO_DATS);
+		writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DATS);
 	else
-		writel(1 << offset, nmk_chip->addr + NMK_GPIO_DATC);
+		writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DATC);
 }
 
 static void __nmk_gpio_make_output(struct nmk_gpio_chip *nmk_chip,
 				  unsigned offset, int val)
 {
-	writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRS);
+	writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DIRS);
 	__nmk_gpio_set_output(nmk_chip, offset, val);
 }
 
@@ -614,34 +611,7 @@
 	return NMK_GPIO_ALT_C;
 }
 
-int nmk_gpio_get_mode(int gpio)
-{
-	struct nmk_gpio_chip *nmk_chip;
-	u32 afunc, bfunc, bit;
-
-	nmk_chip = nmk_gpio_chips[gpio / NMK_GPIO_PER_CHIP];
-	if (!nmk_chip)
-		return -EINVAL;
-
-	bit = 1 << (gpio % NMK_GPIO_PER_CHIP);
-
-	clk_enable(nmk_chip->clk);
-
-	afunc = readl(nmk_chip->addr + NMK_GPIO_AFSLA) & bit;
-	bfunc = readl(nmk_chip->addr + NMK_GPIO_AFSLB) & bit;
-
-	clk_disable(nmk_chip->clk);
-
-	return (afunc ? NMK_GPIO_ALT_A : 0) | (bfunc ? NMK_GPIO_ALT_B : 0);
-}
-EXPORT_SYMBOL(nmk_gpio_get_mode);
-
-
 /* IRQ functions */
-static inline int nmk_gpio_get_bitmask(int gpio)
-{
-	return 1 << (gpio % NMK_GPIO_PER_CHIP);
-}
 
 static void nmk_gpio_irq_ack(struct irq_data *d)
 {
@@ -649,7 +619,7 @@
 	struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
 
 	clk_enable(nmk_chip->clk);
-	writel(nmk_gpio_get_bitmask(d->hwirq), nmk_chip->addr + NMK_GPIO_IC);
+	writel(BIT(d->hwirq), nmk_chip->addr + NMK_GPIO_IC);
 	clk_disable(nmk_chip->clk);
 }
 
@@ -659,10 +629,9 @@
 };
 
 static void __nmk_gpio_irq_modify(struct nmk_gpio_chip *nmk_chip,
-				  int gpio, enum nmk_gpio_irq_type which,
+				  int offset, enum nmk_gpio_irq_type which,
 				  bool enable)
 {
-	u32 bitmask = nmk_gpio_get_bitmask(gpio);
 	u32 *rimscval;
 	u32 *fimscval;
 	u32 rimscreg;
@@ -681,24 +650,24 @@
 	}
 
 	/* we must individually set/clear the two edges */
-	if (nmk_chip->edge_rising & bitmask) {
+	if (nmk_chip->edge_rising & BIT(offset)) {
 		if (enable)
-			*rimscval |= bitmask;
+			*rimscval |= BIT(offset);
 		else
-			*rimscval &= ~bitmask;
+			*rimscval &= ~BIT(offset);
 		writel(*rimscval, nmk_chip->addr + rimscreg);
 	}
-	if (nmk_chip->edge_falling & bitmask) {
+	if (nmk_chip->edge_falling & BIT(offset)) {
 		if (enable)
-			*fimscval |= bitmask;
+			*fimscval |= BIT(offset);
 		else
-			*fimscval &= ~bitmask;
+			*fimscval &= ~BIT(offset);
 		writel(*fimscval, nmk_chip->addr + fimscreg);
 	}
 }
 
 static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip,
-				int gpio, bool on)
+				int offset, bool on)
 {
 	/*
 	 * Ensure WAKEUP_ENABLE is on.  No need to disable it if wakeup is
@@ -706,21 +675,19 @@
 	 * wakeup is anyhow controlled by the RIMSC and FIMSC registers.
 	 */
 	if (nmk_chip->sleepmode && on) {
-		__nmk_gpio_set_slpm(nmk_chip, gpio % NMK_GPIO_PER_CHIP,
+		__nmk_gpio_set_slpm(nmk_chip, offset,
 				    NMK_GPIO_SLPM_WAKEUP_ENABLE);
 	}
 
-	__nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, on);
+	__nmk_gpio_irq_modify(nmk_chip, offset, WAKE, on);
 }
 
 static int nmk_gpio_irq_maskunmask(struct irq_data *d, bool enable)
 {
 	struct nmk_gpio_chip *nmk_chip;
 	unsigned long flags;
-	u32 bitmask;
 
 	nmk_chip = irq_data_get_irq_chip_data(d);
-	bitmask = nmk_gpio_get_bitmask(d->hwirq);
 	if (!nmk_chip)
 		return -EINVAL;
 
@@ -730,7 +697,7 @@
 
 	__nmk_gpio_irq_modify(nmk_chip, d->hwirq, NORMAL, enable);
 
-	if (!(nmk_chip->real_wake & bitmask))
+	if (!(nmk_chip->real_wake & BIT(d->hwirq)))
 		__nmk_gpio_set_wake(nmk_chip, d->hwirq, enable);
 
 	spin_unlock(&nmk_chip->lock);
@@ -754,12 +721,10 @@
 {
 	struct nmk_gpio_chip *nmk_chip;
 	unsigned long flags;
-	u32 bitmask;
 
 	nmk_chip = irq_data_get_irq_chip_data(d);
 	if (!nmk_chip)
 		return -EINVAL;
-	bitmask = nmk_gpio_get_bitmask(d->hwirq);
 
 	clk_enable(nmk_chip->clk);
 	spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
@@ -769,9 +734,9 @@
 		__nmk_gpio_set_wake(nmk_chip, d->hwirq, on);
 
 	if (on)
-		nmk_chip->real_wake |= bitmask;
+		nmk_chip->real_wake |= BIT(d->hwirq);
 	else
-		nmk_chip->real_wake &= ~bitmask;
+		nmk_chip->real_wake &= ~BIT(d->hwirq);
 
 	spin_unlock(&nmk_chip->lock);
 	spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
@@ -786,10 +751,8 @@
 	bool wake = irqd_is_wakeup_set(d);
 	struct nmk_gpio_chip *nmk_chip;
 	unsigned long flags;
-	u32 bitmask;
 
 	nmk_chip = irq_data_get_irq_chip_data(d);
-	bitmask = nmk_gpio_get_bitmask(d->hwirq);
 	if (!nmk_chip)
 		return -EINVAL;
 	if (type & IRQ_TYPE_LEVEL_HIGH)
@@ -806,13 +769,13 @@
 	if (enabled || wake)
 		__nmk_gpio_irq_modify(nmk_chip, d->hwirq, WAKE, false);
 
-	nmk_chip->edge_rising &= ~bitmask;
+	nmk_chip->edge_rising &= ~BIT(d->hwirq);
 	if (type & IRQ_TYPE_EDGE_RISING)
-		nmk_chip->edge_rising |= bitmask;
+		nmk_chip->edge_rising |= BIT(d->hwirq);
 
-	nmk_chip->edge_falling &= ~bitmask;
+	nmk_chip->edge_falling &= ~BIT(d->hwirq);
 	if (type & IRQ_TYPE_EDGE_FALLING)
-		nmk_chip->edge_falling |= bitmask;
+		nmk_chip->edge_falling |= BIT(d->hwirq);
 
 	if (enabled)
 		__nmk_gpio_irq_modify(nmk_chip, d->hwirq, NORMAL, true);
@@ -884,13 +847,27 @@
 
 /* I/O Functions */
 
+static int nmk_gpio_get_dir(struct gpio_chip *chip, unsigned offset)
+{
+	struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
+	int dir;
+
+	clk_enable(nmk_chip->clk);
+
+	dir = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset));
+
+	clk_disable(nmk_chip->clk);
+
+	return dir;
+}
+
 static int nmk_gpio_make_input(struct gpio_chip *chip, unsigned offset)
 {
 	struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
 
 	clk_enable(nmk_chip->clk);
 
-	writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRC);
+	writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DIRC);
 
 	clk_disable(nmk_chip->clk);
 
@@ -900,12 +877,11 @@
 static int nmk_gpio_get_input(struct gpio_chip *chip, unsigned offset)
 {
 	struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
-	u32 bit = 1 << offset;
 	int value;
 
 	clk_enable(nmk_chip->clk);
 
-	value = (readl(nmk_chip->addr + NMK_GPIO_DAT) & bit) != 0;
+	value = !!(readl(nmk_chip->addr + NMK_GPIO_DAT) & BIT(offset));
 
 	clk_disable(nmk_chip->clk);
 
@@ -939,6 +915,19 @@
 }
 
 #ifdef CONFIG_DEBUG_FS
+static int nmk_gpio_get_mode(struct nmk_gpio_chip *nmk_chip, int offset)
+{
+	u32 afunc, bfunc;
+
+	clk_enable(nmk_chip->clk);
+
+	afunc = readl(nmk_chip->addr + NMK_GPIO_AFSLA) & BIT(offset);
+	bfunc = readl(nmk_chip->addr + NMK_GPIO_AFSLB) & BIT(offset);
+
+	clk_disable(nmk_chip->clk);
+
+	return (afunc ? NMK_GPIO_ALT_A : 0) | (bfunc ? NMK_GPIO_ALT_B : 0);
+}
 
 #include <linux/seq_file.h>
 
@@ -952,7 +941,6 @@
 	bool is_out;
 	bool data_out;
 	bool pull;
-	u32 bit = 1 << offset;
 	const char *modes[] = {
 		[NMK_GPIO_ALT_GPIO]	= "gpio",
 		[NMK_GPIO_ALT_A]	= "altA",
@@ -970,10 +958,10 @@
 	};
 
 	clk_enable(nmk_chip->clk);
-	is_out = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & bit);
-	pull = !(readl(nmk_chip->addr + NMK_GPIO_PDIS) & bit);
-	data_out = !!(readl(nmk_chip->addr + NMK_GPIO_DAT) & bit);
-	mode = nmk_gpio_get_mode(gpio);
+	is_out = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset));
+	pull = !(readl(nmk_chip->addr + NMK_GPIO_PDIS) & BIT(offset));
+	data_out = !!(readl(nmk_chip->addr + NMK_GPIO_DAT) & BIT(offset));
+	mode = nmk_gpio_get_mode(nmk_chip, offset);
 	if ((mode == NMK_GPIO_ALT_C) && pctldev)
 		mode = nmk_prcm_gpiocr_get_mode(pctldev, gpio);
 
@@ -990,7 +978,7 @@
 		int val;
 
 		if (pull)
-			pullidx = data_out ? 1 : 2;
+			pullidx = data_out ? 2 : 1;
 
 		seq_printf(s, " gpio-%-3d (%-20.20s) in  %s %s",
 			   gpio,
@@ -1007,11 +995,10 @@
 		 */
 		if (irq > 0 && desc && desc->action) {
 			char *trigger;
-			u32 bitmask = nmk_gpio_get_bitmask(gpio);
 
-			if (nmk_chip->edge_rising & bitmask)
+			if (nmk_chip->edge_rising & BIT(offset))
 				trigger = "edge-rising";
-			else if (nmk_chip->edge_falling & bitmask)
+			else if (nmk_chip->edge_falling & BIT(offset))
 				trigger = "edge-falling";
 			else
 				trigger = "edge-undefined";
@@ -1246,6 +1233,7 @@
 	chip = &nmk_chip->chip;
 	chip->request = gpiochip_generic_request;
 	chip->free = gpiochip_generic_free;
+	chip->get_direction = nmk_gpio_get_dir;
 	chip->direction_input = nmk_gpio_make_input;
 	chip->get = nmk_gpio_get_input;
 	chip->direction_output = nmk_gpio_make_output;
@@ -1612,7 +1600,7 @@
 		ret = nmk_pinctrl_dt_subnode_to_map(pctldev, np, map,
 				&reserved_maps, num_maps);
 		if (ret < 0) {
-			pinctrl_utils_dt_free_map(pctldev, *map, *num_maps);
+			pinctrl_utils_free_map(pctldev, *map, *num_maps);
 			return ret;
 		}
 	}
@@ -1626,7 +1614,7 @@
 	.get_group_pins = nmk_get_group_pins,
 	.pin_dbg_show = nmk_pin_dbg_show,
 	.dt_node_to_map = nmk_pinctrl_dt_node_to_map,
-	.dt_free_map = pinctrl_utils_dt_free_map,
+	.dt_free_map = pinctrl_utils_free_map,
 };
 
 static int nmk_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev)
@@ -2044,7 +2032,7 @@
 	nmk_pinctrl_desc.npins = npct->soc->npins;
 	npct->dev = &pdev->dev;
 
-	npct->pctl = pinctrl_register(&nmk_pinctrl_desc, &pdev->dev, npct);
+	npct->pctl = devm_pinctrl_register(&pdev->dev, &nmk_pinctrl_desc, npct);
 	if (IS_ERR(npct->pctl)) {
 		dev_err(&pdev->dev, "could not register Nomadik pinctrl driver\n");
 		return PTR_ERR(npct->pctl);
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 79e6159..d5bf9fa 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -386,7 +386,7 @@
 	return 0;
 
 exit:
-	pinctrl_utils_dt_free_map(pctldev, *map, *num_maps);
+	pinctrl_utils_free_map(pctldev, *map, *num_maps);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(pinconf_generic_dt_node_to_map);
diff --git a/drivers/pinctrl/pinctrl-adi2.c b/drivers/pinctrl/pinctrl-adi2.c
index ecb57635..54569a7 100644
--- a/drivers/pinctrl/pinctrl-adi2.c
+++ b/drivers/pinctrl/pinctrl-adi2.c
@@ -1058,7 +1058,8 @@
 	adi_pinmux_desc.npins = pinctrl->soc->npins;
 
 	/* Now register the pin controller and all pins it handles */
-	pinctrl->pctl = pinctrl_register(&adi_pinmux_desc, &pdev->dev, pinctrl);
+	pinctrl->pctl = devm_pinctrl_register(&pdev->dev, &adi_pinmux_desc,
+					      pinctrl);
 	if (IS_ERR(pinctrl->pctl)) {
 		dev_err(&pdev->dev, "could not register pinctrl ADI2 driver\n");
 		return PTR_ERR(pinctrl->pctl);
@@ -1069,18 +1070,8 @@
 	return 0;
 }
 
-static int adi_pinctrl_remove(struct platform_device *pdev)
-{
-	struct adi_pinctrl *pinctrl = platform_get_drvdata(pdev);
-
-	pinctrl_unregister(pinctrl->pctl);
-
-	return 0;
-}
-
 static struct platform_driver adi_pinctrl_driver = {
 	.probe		= adi_pinctrl_probe,
-	.remove		= adi_pinctrl_remove,
 	.driver		= {
 		.name	= DRIVER_NAME,
 	},
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 5c025f5..634b4d3 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -580,7 +580,7 @@
 	.get_group_pins		= amd_get_group_pins,
 #ifdef CONFIG_OF
 	.dt_node_to_map		= pinconf_generic_dt_node_to_map_group,
-	.dt_free_map		= pinctrl_utils_dt_free_map,
+	.dt_free_map		= pinctrl_utils_free_map,
 #endif
 };
 
@@ -783,8 +783,8 @@
 	gpio_dev->ngroups = ARRAY_SIZE(kerncz_groups);
 
 	amd_pinctrl_desc.name = dev_name(&pdev->dev);
-	gpio_dev->pctrl = pinctrl_register(&amd_pinctrl_desc,
-					&pdev->dev, gpio_dev);
+	gpio_dev->pctrl = devm_pinctrl_register(&pdev->dev, &amd_pinctrl_desc,
+						gpio_dev);
 	if (IS_ERR(gpio_dev->pctrl)) {
 		dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
 		return PTR_ERR(gpio_dev->pctrl);
@@ -792,7 +792,7 @@
 
 	ret = gpiochip_add_data(&gpio_dev->gc, gpio_dev);
 	if (ret)
-		goto out1;
+		return ret;
 
 	ret = gpiochip_add_pin_range(&gpio_dev->gc, dev_name(&pdev->dev),
 				0, 0, TOTAL_NUMBER_OF_PINS);
@@ -825,8 +825,6 @@
 out2:
 	gpiochip_remove(&gpio_dev->gc);
 
-out1:
-	pinctrl_unregister(gpio_dev->pctrl);
 	return ret;
 }
 
@@ -837,13 +835,13 @@
 	gpio_dev = platform_get_drvdata(pdev);
 
 	gpiochip_remove(&gpio_dev->gc);
-	pinctrl_unregister(gpio_dev->pctrl);
 
 	return 0;
 }
 
 static const struct acpi_device_id amd_gpio_acpi_match[] = {
 	{ "AMD0030", 0 },
+	{ "AMDI0030", 0},
 	{ },
 };
 MODULE_DEVICE_TABLE(acpi, amd_gpio_acpi_match);
diff --git a/drivers/pinctrl/pinctrl-as3722.c b/drivers/pinctrl/pinctrl-as3722.c
index e844fdc..4e9fe78 100644
--- a/drivers/pinctrl/pinctrl-as3722.c
+++ b/drivers/pinctrl/pinctrl-as3722.c
@@ -201,7 +201,7 @@
 	.get_group_name = as3722_pinctrl_get_group_name,
 	.get_group_pins = as3722_pinctrl_get_group_pins,
 	.dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
-	.dt_free_map = pinctrl_utils_dt_free_map,
+	.dt_free_map = pinctrl_utils_free_map,
 };
 
 static int as3722_pinctrl_get_funcs_count(struct pinctrl_dev *pctldev)
@@ -569,8 +569,8 @@
 	as3722_pinctrl_desc.name = dev_name(&pdev->dev);
 	as3722_pinctrl_desc.pins = as3722_pins_desc;
 	as3722_pinctrl_desc.npins = ARRAY_SIZE(as3722_pins_desc);
-	as_pci->pctl = pinctrl_register(&as3722_pinctrl_desc,
-					&pdev->dev, as_pci);
+	as_pci->pctl = devm_pinctrl_register(&pdev->dev, &as3722_pinctrl_desc,
+					     as_pci);
 	if (IS_ERR(as_pci->pctl)) {
 		dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
 		return PTR_ERR(as_pci->pctl);
@@ -582,7 +582,7 @@
 	ret = gpiochip_add_data(&as_pci->gpio_chip, as_pci);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Couldn't register gpiochip, %d\n", ret);
-		goto fail_chip_add;
+		return ret;
 	}
 
 	ret = gpiochip_add_pin_range(&as_pci->gpio_chip, dev_name(&pdev->dev),
@@ -596,8 +596,6 @@
 
 fail_range_add:
 	gpiochip_remove(&as_pci->gpio_chip);
-fail_chip_add:
-	pinctrl_unregister(as_pci->pctl);
 	return ret;
 }
 
@@ -606,7 +604,6 @@
 	struct as3722_pctrl_info *as_pci = platform_get_drvdata(pdev);
 
 	gpiochip_remove(&as_pci->gpio_chip);
-	pinctrl_unregister(as_pci->pctl);
 	return 0;
 }
 
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 4429312..a025b40 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -579,7 +579,7 @@
 	}
 
 	if (ret < 0) {
-		pinctrl_utils_dt_free_map(pctldev, *map, *num_maps);
+		pinctrl_utils_free_map(pctldev, *map, *num_maps);
 		dev_err(pctldev->dev, "can't create maps for node %s\n",
 			np_config->full_name);
 	}
@@ -592,7 +592,7 @@
 	.get_group_name		= atmel_pctl_get_group_name,
 	.get_group_pins		= atmel_pctl_get_group_pins,
 	.dt_node_to_map		= atmel_pctl_dt_node_to_map,
-	.dt_free_map		= pinctrl_utils_dt_free_map,
+	.dt_free_map		= pinctrl_utils_free_map,
 };
 
 static int atmel_pmx_get_functions_count(struct pinctrl_dev *pctldev)
@@ -722,9 +722,11 @@
 			break;
 		case PIN_CONFIG_BIAS_PULL_UP:
 			conf |= ATMEL_PIO_PUEN_MASK;
+			conf &= (~ATMEL_PIO_PDEN_MASK);
 			break;
 		case PIN_CONFIG_BIAS_PULL_DOWN:
 			conf |= ATMEL_PIO_PDEN_MASK;
+			conf &= (~ATMEL_PIO_PUEN_MASK);
 			break;
 		case PIN_CONFIG_DRIVE_OPEN_DRAIN:
 			if (arg == 0)
@@ -1034,18 +1036,19 @@
 		goto clk_prepare_enable_error;
 	}
 
-	atmel_pioctrl->pinctrl_dev = pinctrl_register(&atmel_pinctrl_desc,
-						      &pdev->dev,
-						      atmel_pioctrl);
-	if (!atmel_pioctrl->pinctrl_dev) {
+	atmel_pioctrl->pinctrl_dev = devm_pinctrl_register(&pdev->dev,
+							   &atmel_pinctrl_desc,
+							   atmel_pioctrl);
+	if (IS_ERR(atmel_pioctrl->pinctrl_dev)) {
+		ret = PTR_ERR(atmel_pioctrl->pinctrl_dev);
 		dev_err(dev, "pinctrl registration failed\n");
-		goto pinctrl_register_error;
+		goto clk_unprep;
 	}
 
 	ret = gpiochip_add_data(atmel_pioctrl->gpio_chip, atmel_pioctrl);
 	if (ret) {
 		dev_err(dev, "failed to add gpiochip\n");
-		goto gpiochip_add_error;
+		goto clk_unprep;
 	}
 
 	ret = gpiochip_add_pin_range(atmel_pioctrl->gpio_chip, dev_name(dev),
@@ -1059,15 +1062,15 @@
 
 	return 0;
 
-clk_prepare_enable_error:
-	irq_domain_remove(atmel_pioctrl->irq_domain);
-pinctrl_register_error:
-	clk_disable_unprepare(atmel_pioctrl->clk);
-gpiochip_add_error:
-	pinctrl_unregister(atmel_pioctrl->pinctrl_dev);
 gpiochip_add_pin_range_error:
 	gpiochip_remove(atmel_pioctrl->gpio_chip);
 
+clk_unprep:
+	clk_disable_unprepare(atmel_pioctrl->clk);
+
+clk_prepare_enable_error:
+	irq_domain_remove(atmel_pioctrl->irq_domain);
+
 	return ret;
 }
 
@@ -1077,7 +1080,6 @@
 
 	irq_domain_remove(atmel_pioctrl->irq_domain);
 	clk_disable_unprepare(atmel_pioctrl->clk);
-	pinctrl_unregister(atmel_pioctrl->pinctrl_dev);
 	gpiochip_remove(atmel_pioctrl->gpio_chip);
 
 	return 0;
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 523b6b7..b7c0d6f 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1252,7 +1252,8 @@
 	}
 
 	platform_set_drvdata(pdev, info);
-	info->pctl = pinctrl_register(&at91_pinctrl_desc, &pdev->dev, info);
+	info->pctl = devm_pinctrl_register(&pdev->dev, &at91_pinctrl_desc,
+					   info);
 
 	if (IS_ERR(info->pctl)) {
 		dev_err(&pdev->dev, "could not register AT91 pinctrl driver\n");
@@ -1269,15 +1270,6 @@
 	return 0;
 }
 
-static int at91_pinctrl_remove(struct platform_device *pdev)
-{
-	struct at91_pinctrl *info = platform_get_drvdata(pdev);
-
-	pinctrl_unregister(info->pctl);
-
-	return 0;
-}
-
 static int at91_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
 {
 	struct at91_gpio_chip *at91_gpio = gpiochip_get_data(chip);
@@ -1660,7 +1652,7 @@
 }
 
 /* This structure is replicated for each GPIO block allocated at probe time */
-static struct gpio_chip at91_gpio_template = {
+static const struct gpio_chip at91_gpio_template = {
 	.request		= gpiochip_generic_request,
 	.free			= gpiochip_generic_free,
 	.get_direction		= at91_gpio_get_direction,
@@ -1730,14 +1722,9 @@
 		goto err;
 	}
 
-	ret = clk_prepare(at91_chip->clock);
-	if (ret)
-		goto clk_prepare_err;
-
-	/* enable PIO controller's clock */
-	ret = clk_enable(at91_chip->clock);
+	ret = clk_prepare_enable(at91_chip->clock);
 	if (ret) {
-		dev_err(&pdev->dev, "failed to enable clock, ignoring.\n");
+		dev_err(&pdev->dev, "failed to prepare and enable clock, ignoring.\n");
 		goto clk_enable_err;
 	}
 
@@ -1797,10 +1784,8 @@
 irq_setup_err:
 	gpiochip_remove(chip);
 gpiochip_add_err:
-	clk_disable(at91_chip->clock);
 clk_enable_err:
-	clk_unprepare(at91_chip->clock);
-clk_prepare_err:
+	clk_disable_unprepare(at91_chip->clock);
 err:
 	dev_err(&pdev->dev, "Failure %i for GPIO %i\n", ret, alias_idx);
 
@@ -1821,7 +1806,6 @@
 		.of_match_table = at91_pinctrl_of_match,
 	},
 	.probe = at91_pinctrl_probe,
-	.remove = at91_pinctrl_remove,
 };
 
 static struct platform_driver * const drivers[] = {
diff --git a/drivers/pinctrl/pinctrl-digicolor.c b/drivers/pinctrl/pinctrl-digicolor.c
index f1343d6..30ee564 100644
--- a/drivers/pinctrl/pinctrl-digicolor.c
+++ b/drivers/pinctrl/pinctrl-digicolor.c
@@ -84,7 +84,7 @@
 	.get_group_name		= dc_get_group_name,
 	.get_group_pins		= dc_get_group_pins,
 	.dt_node_to_map		= pinconf_generic_dt_node_to_map_pin,
-	.dt_free_map		= pinctrl_utils_dt_free_map,
+	.dt_free_map		= pinctrl_utils_free_map,
 };
 
 static const char *const dc_functions[] = {
@@ -280,7 +280,7 @@
 	struct pinctrl_desc *pctl_desc;
 	char *pin_names;
 	int name_len = strlen("GP_xx") + 1;
-	int i, j, ret;
+	int i, j;
 
 	pmap = devm_kzalloc(&pdev->dev, sizeof(*pmap), GFP_KERNEL);
 	if (!pmap)
@@ -326,26 +326,19 @@
 
 	pmap->dev = &pdev->dev;
 
-	pmap->pctl = pinctrl_register(pctl_desc, &pdev->dev, pmap);
+	pmap->pctl = devm_pinctrl_register(&pdev->dev, pctl_desc, pmap);
 	if (IS_ERR(pmap->pctl)) {
 		dev_err(&pdev->dev, "pinctrl driver registration failed\n");
 		return PTR_ERR(pmap->pctl);
 	}
 
-	ret = dc_gpiochip_add(pmap, pdev->dev.of_node);
-	if (ret < 0) {
-		pinctrl_unregister(pmap->pctl);
-		return ret;
-	}
-
-	return 0;
+	return dc_gpiochip_add(pmap, pdev->dev.of_node);
 }
 
 static int dc_pinctrl_remove(struct platform_device *pdev)
 {
 	struct dc_pinmap *pmap = platform_get_drvdata(pdev);
 
-	pinctrl_unregister(pmap->pctl);
 	gpiochip_remove(&pmap->chip);
 
 	return 0;
diff --git a/drivers/pinctrl/pinctrl-lantiq.c b/drivers/pinctrl/pinctrl-lantiq.c
index fc38a85..a4d6474 100644
--- a/drivers/pinctrl/pinctrl-lantiq.c
+++ b/drivers/pinctrl/pinctrl-lantiq.c
@@ -336,7 +336,7 @@
 	desc->pmxops = &ltq_pmx_ops;
 	info->dev = &pdev->dev;
 
-	info->pctrl = pinctrl_register(desc, &pdev->dev, info);
+	info->pctrl = devm_pinctrl_register(&pdev->dev, desc, info);
 	if (IS_ERR(info->pctrl)) {
 		dev_err(&pdev->dev, "failed to register LTQ pinmux driver\n");
 		return PTR_ERR(info->pctrl);
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
index b1767f7..8a931c7 100644
--- a/drivers/pinctrl/pinctrl-lpc18xx.c
+++ b/drivers/pinctrl/pinctrl-lpc18xx.c
@@ -1252,7 +1252,7 @@
 	.get_group_name		= lpc18xx_pctl_get_group_name,
 	.get_group_pins		= lpc18xx_pctl_get_group_pins,
 	.dt_node_to_map		= pinconf_generic_dt_node_to_map_pin,
-	.dt_free_map		= pinctrl_utils_dt_free_map,
+	.dt_free_map		= pinctrl_utils_free_map,
 };
 
 static struct pinctrl_desc lpc18xx_scu_desc = {
@@ -1355,7 +1355,7 @@
 
 	platform_set_drvdata(pdev, scu);
 
-	scu->pctl = pinctrl_register(&lpc18xx_scu_desc, &pdev->dev, scu);
+	scu->pctl = devm_pinctrl_register(&pdev->dev, &lpc18xx_scu_desc, scu);
 	if (IS_ERR(scu->pctl)) {
 		dev_err(&pdev->dev, "Could not register pinctrl driver\n");
 		clk_disable_unprepare(scu->clk);
@@ -1369,7 +1369,6 @@
 {
 	struct lpc18xx_scu_data *scu = platform_get_drvdata(pdev);
 
-	pinctrl_unregister(scu->pctl);
 	clk_disable_unprepare(scu->clk);
 
 	return 0;
diff --git a/drivers/pinctrl/pinctrl-palmas.c b/drivers/pinctrl/pinctrl-palmas.c
index f7e1680..8edb3f8c 100644
--- a/drivers/pinctrl/pinctrl-palmas.c
+++ b/drivers/pinctrl/pinctrl-palmas.c
@@ -656,7 +656,7 @@
 	.get_group_name = palmas_pinctrl_get_group_name,
 	.get_group_pins = palmas_pinctrl_get_group_pins,
 	.dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
-	.dt_free_map = pinctrl_utils_dt_free_map,
+	.dt_free_map = pinctrl_utils_free_map,
 };
 
 static int palmas_pinctrl_get_funcs_count(struct pinctrl_dev *pctldev)
@@ -1043,7 +1043,8 @@
 	palmas_pinctrl_desc.name = dev_name(&pdev->dev);
 	palmas_pinctrl_desc.pins = palmas_pins_desc;
 	palmas_pinctrl_desc.npins = ARRAY_SIZE(palmas_pins_desc);
-	pci->pctl = pinctrl_register(&palmas_pinctrl_desc, &pdev->dev, pci);
+	pci->pctl = devm_pinctrl_register(&pdev->dev, &palmas_pinctrl_desc,
+					  pci);
 	if (IS_ERR(pci->pctl)) {
 		dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
 		return PTR_ERR(pci->pctl);
@@ -1051,21 +1052,12 @@
 	return 0;
 }
 
-static int palmas_pinctrl_remove(struct platform_device *pdev)
-{
-	struct palmas_pctrl_chip_info *pci = platform_get_drvdata(pdev);
-
-	pinctrl_unregister(pci->pctl);
-	return 0;
-}
-
 static struct platform_driver palmas_pinctrl_driver = {
 	.driver = {
 		.name = "palmas-pinctrl",
 		.of_match_table = palmas_pinctrl_of_match,
 	},
 	.probe = palmas_pinctrl_probe,
-	.remove = palmas_pinctrl_remove,
 };
 
 module_platform_driver(palmas_pinctrl_driver);
diff --git a/drivers/pinctrl/pinctrl-pic32.c b/drivers/pinctrl/pinctrl-pic32.c
index 0b07d4b..31ceb95 100644
--- a/drivers/pinctrl/pinctrl-pic32.c
+++ b/drivers/pinctrl/pinctrl-pic32.c
@@ -1743,7 +1743,7 @@
 	.get_group_name = pic32_pinctrl_get_group_name,
 	.get_group_pins = pic32_pinctrl_get_group_pins,
 	.dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
-	.dt_free_map = pinctrl_utils_dt_free_map,
+	.dt_free_map = pinctrl_utils_free_map,
 };
 
 static int pic32_pinmux_get_functions_count(struct pinctrl_dev *pctldev)
@@ -2194,7 +2194,8 @@
 	pic32_pinctrl_desc.custom_params = pic32_mpp_bindings;
 	pic32_pinctrl_desc.num_custom_params = ARRAY_SIZE(pic32_mpp_bindings);
 
-	pctl->pctldev = pinctrl_register(&pic32_pinctrl_desc, &pdev->dev, pctl);
+	pctl->pctldev = devm_pinctrl_register(&pdev->dev, &pic32_pinctrl_desc,
+					      pctl);
 	if (IS_ERR(pctl->pctldev)) {
 		dev_err(&pdev->dev, "Failed to register pinctrl device\n");
 		return PTR_ERR(pctl->pctldev);
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 856f736..c6d410e 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -469,27 +469,27 @@
 	"mfio83",
 };
 
-static const char * const pistachio_sys_pll_lock_groups[] = {
+static const char * const pistachio_audio_pll_lock_groups[] = {
 	"mfio84",
 };
 
-static const char * const pistachio_wifi_pll_lock_groups[] = {
+static const char * const pistachio_rpu_v_pll_lock_groups[] = {
 	"mfio85",
 };
 
-static const char * const pistachio_bt_pll_lock_groups[] = {
+static const char * const pistachio_rpu_l_pll_lock_groups[] = {
 	"mfio86",
 };
 
-static const char * const pistachio_rpu_v_pll_lock_groups[] = {
+static const char * const pistachio_sys_pll_lock_groups[] = {
 	"mfio87",
 };
 
-static const char * const pistachio_rpu_l_pll_lock_groups[] = {
+static const char * const pistachio_wifi_pll_lock_groups[] = {
 	"mfio88",
 };
 
-static const char * const pistachio_audio_pll_lock_groups[] = {
+static const char * const pistachio_bt_pll_lock_groups[] = {
 	"mfio89",
 };
 
@@ -559,12 +559,12 @@
 	PISTACHIO_FUNCTION_DREQ4,
 	PISTACHIO_FUNCTION_DREQ5,
 	PISTACHIO_FUNCTION_MIPS_PLL_LOCK,
+	PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
+	PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
+	PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
 	PISTACHIO_FUNCTION_SYS_PLL_LOCK,
 	PISTACHIO_FUNCTION_WIFI_PLL_LOCK,
 	PISTACHIO_FUNCTION_BT_PLL_LOCK,
-	PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
-	PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
-	PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
 	PISTACHIO_FUNCTION_DEBUG_RAW_CCA_IND,
 	PISTACHIO_FUNCTION_DEBUG_ED_SEC20_CCA_IND,
 	PISTACHIO_FUNCTION_DEBUG_ED_SEC40_CCA_IND,
@@ -620,12 +620,12 @@
 	FUNCTION(dreq4),
 	FUNCTION(dreq5),
 	FUNCTION(mips_pll_lock),
+	FUNCTION(audio_pll_lock),
+	FUNCTION(rpu_v_pll_lock),
+	FUNCTION(rpu_l_pll_lock),
 	FUNCTION(sys_pll_lock),
 	FUNCTION(wifi_pll_lock),
 	FUNCTION(bt_pll_lock),
-	FUNCTION(rpu_v_pll_lock),
-	FUNCTION(rpu_l_pll_lock),
-	FUNCTION(audio_pll_lock),
 	FUNCTION(debug_raw_cca_ind),
 	FUNCTION(debug_ed_sec20_cca_ind),
 	FUNCTION(debug_ed_sec40_cca_ind),
@@ -913,7 +913,7 @@
 	.get_group_name = pistachio_pinctrl_get_group_name,
 	.get_group_pins = pistachio_pinctrl_get_group_pins,
 	.dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
-	.dt_free_map = pinctrl_utils_dt_free_map,
+	.dt_free_map = pinctrl_utils_free_map,
 };
 
 static int pistachio_pinmux_get_functions_count(struct pinctrl_dev *pctldev)
@@ -1457,8 +1457,8 @@
 	pistachio_pinctrl_desc.pins = pctl->pins;
 	pistachio_pinctrl_desc.npins = pctl->npins;
 
-	pctl->pctldev = pinctrl_register(&pistachio_pinctrl_desc, &pdev->dev,
-					 pctl);
+	pctl->pctldev = devm_pinctrl_register(&pdev->dev, &pistachio_pinctrl_desc,
+					      pctl);
 	if (IS_ERR(pctl->pctldev)) {
 		dev_err(&pdev->dev, "Failed to register pinctrl device\n");
 		return PTR_ERR(pctl->pctldev);
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index bf032b9..a91026e 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -99,6 +99,15 @@
 };
 
 /**
+ * enum type index corresponding to rockchip_pull_list arrays index.
+ */
+enum rockchip_pin_pull_type {
+	PULL_TYPE_IO_DEFAULT = 0,
+	PULL_TYPE_IO_1V8_ONLY,
+	PULL_TYPE_MAX
+};
+
+/**
  * @drv_type: drive strength variant using rockchip_perpin_drv_type
  * @offset: if initialized to -1 it will be autocalculated, by specifying
  *	    an initial offset value the relevant source offset can be reset
@@ -123,6 +132,7 @@
  * @bank_num: number of the bank, to account for holes
  * @iomux: array describing the 4 iomux sources of the bank
  * @drv: array describing the 4 drive strength sources of the bank
+ * @pull_type: array describing the 4 pull type sources of the bank
  * @valid: are all necessary informations present
  * @of_node: dt node of this bank
  * @drvdata: common pinctrl basedata
@@ -143,6 +153,7 @@
 	u8				bank_num;
 	struct rockchip_iomux		iomux[4];
 	struct rockchip_drv		drv[4];
+	enum rockchip_pin_pull_type	pull_type[4];
 	bool				valid;
 	struct device_node		*of_node;
 	struct rockchip_pinctrl		*drvdata;
@@ -198,6 +209,31 @@
 		},							\
 	}
 
+#define PIN_BANK_DRV_FLAGS_PULL_FLAGS(id, pins, label, drv0, drv1,	\
+				      drv2, drv3, pull0, pull1,		\
+				      pull2, pull3)			\
+	{								\
+		.bank_num	= id,					\
+		.nr_pins	= pins,					\
+		.name		= label,				\
+		.iomux		= {					\
+			{ .offset = -1 },				\
+			{ .offset = -1 },				\
+			{ .offset = -1 },				\
+			{ .offset = -1 },				\
+		},							\
+		.drv		= {					\
+			{ .drv_type = drv0, .offset = -1 },		\
+			{ .drv_type = drv1, .offset = -1 },		\
+			{ .drv_type = drv2, .offset = -1 },		\
+			{ .drv_type = drv3, .offset = -1 },		\
+		},							\
+		.pull_type[0] = pull0,					\
+		.pull_type[1] = pull1,					\
+		.pull_type[2] = pull2,					\
+		.pull_type[3] = pull3,					\
+	}
+
 #define PIN_BANK_IOMUX_DRV_FLAGS_OFFSET(id, pins, label, iom0, iom1,	\
 					iom2, iom3, drv0, drv1, drv2,	\
 					drv3, offset0, offset1,		\
@@ -220,6 +256,34 @@
 		},							\
 	}
 
+#define PIN_BANK_IOMUX_FLAGS_DRV_FLAGS_OFFSET_PULL_FLAGS(id, pins,	\
+					      label, iom0, iom1, iom2,  \
+					      iom3, drv0, drv1, drv2,   \
+					      drv3, offset0, offset1,   \
+					      offset2, offset3, pull0,  \
+					      pull1, pull2, pull3)	\
+	{								\
+		.bank_num	= id,					\
+		.nr_pins	= pins,					\
+		.name		= label,				\
+		.iomux		= {					\
+			{ .type = iom0, .offset = -1 },			\
+			{ .type = iom1, .offset = -1 },			\
+			{ .type = iom2, .offset = -1 },			\
+			{ .type = iom3, .offset = -1 },			\
+		},							\
+		.drv		= {					\
+			{ .drv_type = drv0, .offset = offset0 },	\
+			{ .drv_type = drv1, .offset = offset1 },	\
+			{ .drv_type = drv2, .offset = offset2 },	\
+			{ .drv_type = drv3, .offset = offset3 },	\
+		},							\
+		.pull_type[0] = pull0,					\
+		.pull_type[1] = pull1,					\
+		.pull_type[2] = pull2,					\
+		.pull_type[3] = pull3,					\
+	}
+
 /**
  */
 struct rockchip_pin_ctrl {
@@ -1020,12 +1084,27 @@
 	return ret;
 }
 
+static int rockchip_pull_list[PULL_TYPE_MAX][4] = {
+	{
+		PIN_CONFIG_BIAS_DISABLE,
+		PIN_CONFIG_BIAS_PULL_UP,
+		PIN_CONFIG_BIAS_PULL_DOWN,
+		PIN_CONFIG_BIAS_BUS_HOLD
+	},
+	{
+		PIN_CONFIG_BIAS_DISABLE,
+		PIN_CONFIG_BIAS_PULL_DOWN,
+		PIN_CONFIG_BIAS_DISABLE,
+		PIN_CONFIG_BIAS_PULL_UP
+	},
+};
+
 static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
 {
 	struct rockchip_pinctrl *info = bank->drvdata;
 	struct rockchip_pin_ctrl *ctrl = info->ctrl;
 	struct regmap *regmap;
-	int reg, ret;
+	int reg, ret, pull_type;
 	u8 bit;
 	u32 data;
 
@@ -1048,22 +1127,11 @@
 	case RK3288:
 	case RK3368:
 	case RK3399:
+		pull_type = bank->pull_type[pin_num / 8];
 		data >>= bit;
 		data &= (1 << RK3188_PULL_BITS_PER_PIN) - 1;
 
-		switch (data) {
-		case 0:
-			return PIN_CONFIG_BIAS_DISABLE;
-		case 1:
-			return PIN_CONFIG_BIAS_PULL_UP;
-		case 2:
-			return PIN_CONFIG_BIAS_PULL_DOWN;
-		case 3:
-			return PIN_CONFIG_BIAS_BUS_HOLD;
-		}
-
-		dev_err(info->dev, "unknown pull setting\n");
-		return -EIO;
+		return rockchip_pull_list[pull_type][data];
 	default:
 		dev_err(info->dev, "unsupported pinctrl type\n");
 		return -EINVAL;
@@ -1076,7 +1144,7 @@
 	struct rockchip_pinctrl *info = bank->drvdata;
 	struct rockchip_pin_ctrl *ctrl = info->ctrl;
 	struct regmap *regmap;
-	int reg, ret;
+	int reg, ret, i, pull_type;
 	unsigned long flags;
 	u8 bit;
 	u32 data, rmask;
@@ -1105,30 +1173,28 @@
 	case RK3288:
 	case RK3368:
 	case RK3399:
+		pull_type = bank->pull_type[pin_num / 8];
+		ret = -EINVAL;
+		for (i = 0; i < ARRAY_SIZE(rockchip_pull_list[pull_type]);
+			i++) {
+			if (rockchip_pull_list[pull_type][i] == pull) {
+				ret = i;
+				break;
+			}
+		}
+
+		if (ret < 0) {
+			dev_err(info->dev, "unsupported pull setting %d\n",
+				pull);
+			return ret;
+		}
+
 		spin_lock_irqsave(&bank->slock, flags);
 
 		/* enable the write to the equivalent lower bits */
 		data = ((1 << RK3188_PULL_BITS_PER_PIN) - 1) << (bit + 16);
 		rmask = data | (data >> 16);
-
-		switch (pull) {
-		case PIN_CONFIG_BIAS_DISABLE:
-			break;
-		case PIN_CONFIG_BIAS_PULL_UP:
-			data |= (1 << bit);
-			break;
-		case PIN_CONFIG_BIAS_PULL_DOWN:
-			data |= (2 << bit);
-			break;
-		case PIN_CONFIG_BIAS_BUS_HOLD:
-			data |= (3 << bit);
-			break;
-		default:
-			spin_unlock_irqrestore(&bank->slock, flags);
-			dev_err(info->dev, "unsupported pull setting %d\n",
-				pull);
-			return -EINVAL;
-		}
+		data |= (ret << bit);
 
 		ret = regmap_update_bits(regmap, reg, rmask, data);
 
@@ -1208,6 +1274,16 @@
 	return 0;
 }
 
+static int rockchip_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+	struct rockchip_pin_bank *bank = gpiochip_get_data(chip);
+	u32 data;
+
+	data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
+
+	return !(data & BIT(offset));
+}
+
 /*
  * The calls to gpio_direction_output() and gpio_direction_input()
  * leads to this function call (via the pinctrl_gpio_direction_{input|output}()
@@ -1636,7 +1712,7 @@
 	if (ret)
 		return ret;
 
-	info->pctl_dev = pinctrl_register(ctrldesc, &pdev->dev, info);
+	info->pctl_dev = devm_pinctrl_register(&pdev->dev, ctrldesc, info);
 	if (IS_ERR(info->pctl_dev)) {
 		dev_err(&pdev->dev, "could not register pinctrl driver\n");
 		return PTR_ERR(info->pctl_dev);
@@ -1741,6 +1817,7 @@
 	.free = gpiochip_generic_free,
 	.set = rockchip_gpio_set,
 	.get = rockchip_gpio_get,
+	.get_direction	= rockchip_gpio_get_direction,
 	.direction_input = rockchip_gpio_direction_input,
 	.direction_output = rockchip_gpio_direction_output,
 	.to_irq = rockchip_gpio_to_irq,
@@ -2541,19 +2618,24 @@
 };
 
 static struct rockchip_pin_bank rk3399_pin_banks[] = {
-	PIN_BANK_IOMUX_DRV_FLAGS_OFFSET(0, 32, "gpio0", IOMUX_SOURCE_PMU,
-					IOMUX_SOURCE_PMU,
-					IOMUX_SOURCE_PMU,
-					IOMUX_SOURCE_PMU,
-					DRV_TYPE_IO_1V8_ONLY,
-					DRV_TYPE_IO_1V8_ONLY,
-					DRV_TYPE_IO_DEFAULT,
-					DRV_TYPE_IO_DEFAULT,
-					0x0,
-					0x8,
-					-1,
-					-1
-					),
+	PIN_BANK_IOMUX_FLAGS_DRV_FLAGS_OFFSET_PULL_FLAGS(0, 32, "gpio0",
+							 IOMUX_SOURCE_PMU,
+							 IOMUX_SOURCE_PMU,
+							 IOMUX_SOURCE_PMU,
+							 IOMUX_SOURCE_PMU,
+							 DRV_TYPE_IO_1V8_ONLY,
+							 DRV_TYPE_IO_1V8_ONLY,
+							 DRV_TYPE_IO_DEFAULT,
+							 DRV_TYPE_IO_DEFAULT,
+							 0x0,
+							 0x8,
+							 -1,
+							 -1,
+							 PULL_TYPE_IO_1V8_ONLY,
+							 PULL_TYPE_IO_1V8_ONLY,
+							 PULL_TYPE_IO_DEFAULT,
+							 PULL_TYPE_IO_DEFAULT
+							),
 	PIN_BANK_IOMUX_DRV_FLAGS_OFFSET(1, 32, "gpio1", IOMUX_SOURCE_PMU,
 					IOMUX_SOURCE_PMU,
 					IOMUX_SOURCE_PMU,
@@ -2567,11 +2649,15 @@
 					0x30,
 					0x38
 					),
-	PIN_BANK_DRV_FLAGS(2, 32, "gpio2", DRV_TYPE_IO_1V8_OR_3V0,
-			   DRV_TYPE_IO_1V8_OR_3V0,
-			   DRV_TYPE_IO_1V8_ONLY,
-			   DRV_TYPE_IO_1V8_ONLY
-			   ),
+	PIN_BANK_DRV_FLAGS_PULL_FLAGS(2, 32, "gpio2", DRV_TYPE_IO_1V8_OR_3V0,
+				      DRV_TYPE_IO_1V8_OR_3V0,
+				      DRV_TYPE_IO_1V8_ONLY,
+				      DRV_TYPE_IO_1V8_ONLY,
+				      PULL_TYPE_IO_DEFAULT,
+				      PULL_TYPE_IO_DEFAULT,
+				      PULL_TYPE_IO_1V8_ONLY,
+				      PULL_TYPE_IO_1V8_ONLY
+				      ),
 	PIN_BANK_DRV_FLAGS(3, 32, "gpio3", DRV_TYPE_IO_3V3_ONLY,
 			   DRV_TYPE_IO_3V3_ONLY,
 			   DRV_TYPE_IO_3V3_ONLY,
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index fb126d5..cf9bafa 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1280,9 +1280,9 @@
 
 		/* Parse pins in each row from LSB */
 		while (mask) {
-			bit_pos = ffs(mask);
+			bit_pos = __ffs(mask);
 			pin_num_from_lsb = bit_pos / pcs->bits_per_pin;
-			mask_pos = ((pcs->fmask) << (bit_pos - 1));
+			mask_pos = ((pcs->fmask) << bit_pos);
 			val_pos = val & mask_pos;
 			submask = mask & mask_pos;
 
@@ -1852,7 +1852,7 @@
 	ret = of_property_read_u32(np, "pinctrl-single,function-mask",
 				   &pcs->fmask);
 	if (!ret) {
-		pcs->fshift = ffs(pcs->fmask) - 1;
+		pcs->fshift = __ffs(pcs->fmask);
 		pcs->fmax = pcs->fmask >> pcs->fshift;
 	} else {
 		/* If mask property doesn't exist, function mux is invalid. */
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index cab66c6..d0ba968 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1724,7 +1724,7 @@
 	pctl_desc->confops	= &st_confops;
 	pctl_desc->name		= dev_name(&pdev->dev);
 
-	info->pctl = pinctrl_register(pctl_desc, &pdev->dev, info);
+	info->pctl = devm_pinctrl_register(&pdev->dev, pctl_desc, info);
 	if (IS_ERR(info->pctl)) {
 		dev_err(&pdev->dev, "Failed pinctrl registration\n");
 		return PTR_ERR(info->pctl);
diff --git a/drivers/pinctrl/pinctrl-tb10x.c b/drivers/pinctrl/pinctrl-tb10x.c
index 6546b9b..edfba50 100644
--- a/drivers/pinctrl/pinctrl-tb10x.c
+++ b/drivers/pinctrl/pinctrl-tb10x.c
@@ -582,7 +582,7 @@
 	.get_group_name   = tb10x_get_group_name,
 	.get_group_pins   = tb10x_get_group_pins,
 	.dt_node_to_map   = tb10x_dt_node_to_map,
-	.dt_free_map      = pinctrl_utils_dt_free_map,
+	.dt_free_map      = pinctrl_utils_free_map,
 };
 
 static int tb10x_get_functions_count(struct pinctrl_dev *pctl)
@@ -806,7 +806,7 @@
 		}
 	}
 
-	state->pctl = pinctrl_register(&tb10x_pindesc, dev, state);
+	state->pctl = devm_pinctrl_register(dev, &tb10x_pindesc, state);
 	if (IS_ERR(state->pctl)) {
 		dev_err(dev, "could not register TB10x pin driver\n");
 		ret = PTR_ERR(state->pctl);
@@ -824,7 +824,6 @@
 {
 	struct tb10x_pinctrl *state = platform_get_drvdata(pdev);
 
-	pinctrl_unregister(state->pctl);
 	mutex_destroy(&state->mutex);
 
 	return 0;
diff --git a/drivers/pinctrl/pinctrl-tz1090-pdc.c b/drivers/pinctrl/pinctrl-tz1090-pdc.c
index b89ad3c..e70e362 100644
--- a/drivers/pinctrl/pinctrl-tz1090-pdc.c
+++ b/drivers/pinctrl/pinctrl-tz1090-pdc.c
@@ -947,7 +947,8 @@
 	if (IS_ERR(pmx->regs))
 		return PTR_ERR(pmx->regs);
 
-	pmx->pctl = pinctrl_register(&tz1090_pdc_pinctrl_desc, &pdev->dev, pmx);
+	pmx->pctl = devm_pinctrl_register(&pdev->dev, &tz1090_pdc_pinctrl_desc,
+					  pmx);
 	if (IS_ERR(pmx->pctl)) {
 		dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
 		return PTR_ERR(pmx->pctl);
@@ -960,15 +961,6 @@
 	return 0;
 }
 
-static int tz1090_pdc_pinctrl_remove(struct platform_device *pdev)
-{
-	struct tz1090_pdc_pmx *pmx = platform_get_drvdata(pdev);
-
-	pinctrl_unregister(pmx->pctl);
-
-	return 0;
-}
-
 static const struct of_device_id tz1090_pdc_pinctrl_of_match[] = {
 	{ .compatible = "img,tz1090-pdc-pinctrl", },
 	{ },
@@ -980,7 +972,6 @@
 		.of_match_table	= tz1090_pdc_pinctrl_of_match,
 	},
 	.probe	= tz1090_pdc_pinctrl_probe,
-	.remove	= tz1090_pdc_pinctrl_remove,
 };
 
 static int __init tz1090_pdc_pinctrl_init(void)
diff --git a/drivers/pinctrl/pinctrl-tz1090.c b/drivers/pinctrl/pinctrl-tz1090.c
index 5425299..04cbe53 100644
--- a/drivers/pinctrl/pinctrl-tz1090.c
+++ b/drivers/pinctrl/pinctrl-tz1090.c
@@ -1962,7 +1962,8 @@
 	if (IS_ERR(pmx->regs))
 		return PTR_ERR(pmx->regs);
 
-	pmx->pctl = pinctrl_register(&tz1090_pinctrl_desc, &pdev->dev, pmx);
+	pmx->pctl = devm_pinctrl_register(&pdev->dev, &tz1090_pinctrl_desc,
+					  pmx);
 	if (IS_ERR(pmx->pctl)) {
 		dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
 		return PTR_ERR(pmx->pctl);
@@ -1975,15 +1976,6 @@
 	return 0;
 }
 
-static int tz1090_pinctrl_remove(struct platform_device *pdev)
-{
-	struct tz1090_pmx *pmx = platform_get_drvdata(pdev);
-
-	pinctrl_unregister(pmx->pctl);
-
-	return 0;
-}
-
 static const struct of_device_id tz1090_pinctrl_of_match[] = {
 	{ .compatible = "img,tz1090-pinctrl", },
 	{ },
@@ -1995,7 +1987,6 @@
 		.of_match_table	= tz1090_pinctrl_of_match,
 	},
 	.probe	= tz1090_pinctrl_probe,
-	.remove	= tz1090_pinctrl_remove,
 };
 
 static int __init tz1090_pinctrl_init(void)
diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c
index c076021..d1af908 100644
--- a/drivers/pinctrl/pinctrl-u300.c
+++ b/drivers/pinctrl/pinctrl-u300.c
@@ -1067,7 +1067,7 @@
 	if (IS_ERR(upmx->virtbase))
 		return PTR_ERR(upmx->virtbase);
 
-	upmx->pctl = pinctrl_register(&u300_pmx_desc, &pdev->dev, upmx);
+	upmx->pctl = devm_pinctrl_register(&pdev->dev, &u300_pmx_desc, upmx);
 	if (IS_ERR(upmx->pctl)) {
 		dev_err(&pdev->dev, "could not register U300 pinmux driver\n");
 		return PTR_ERR(upmx->pctl);
@@ -1080,15 +1080,6 @@
 	return 0;
 }
 
-static int u300_pmx_remove(struct platform_device *pdev)
-{
-	struct u300_pmx *upmx = platform_get_drvdata(pdev);
-
-	pinctrl_unregister(upmx->pctl);
-
-	return 0;
-}
-
 static const struct of_device_id u300_pinctrl_match[] = {
 	{ .compatible = "stericsson,pinctrl-u300" },
 	{},
@@ -1101,7 +1092,6 @@
 		.of_match_table = u300_pinctrl_match,
 	},
 	.probe = u300_pmx_probe,
-	.remove = u300_pmx_remove,
 };
 
 static int __init u300_pmx_init(void)
diff --git a/drivers/pinctrl/pinctrl-utils.c b/drivers/pinctrl/pinctrl-utils.c
index d77693f..9189fba 100644
--- a/drivers/pinctrl/pinctrl-utils.c
+++ b/drivers/pinctrl/pinctrl-utils.c
@@ -122,7 +122,7 @@
 }
 EXPORT_SYMBOL_GPL(pinctrl_utils_add_config);
 
-void pinctrl_utils_dt_free_map(struct pinctrl_dev *pctldev,
+void pinctrl_utils_free_map(struct pinctrl_dev *pctldev,
 	      struct pinctrl_map *map, unsigned num_maps)
 {
 	int i;
@@ -139,4 +139,4 @@
 	}
 	kfree(map);
 }
-EXPORT_SYMBOL_GPL(pinctrl_utils_dt_free_map);
+EXPORT_SYMBOL_GPL(pinctrl_utils_free_map);
diff --git a/drivers/pinctrl/pinctrl-utils.h b/drivers/pinctrl/pinctrl-utils.h
index d0ffe1c..8f9f2d2 100644
--- a/drivers/pinctrl/pinctrl-utils.h
+++ b/drivers/pinctrl/pinctrl-utils.h
@@ -37,7 +37,7 @@
 int pinctrl_utils_add_config(struct pinctrl_dev *pctldev,
 		unsigned long **configs, unsigned *num_configs,
 		unsigned long config);
-void pinctrl_utils_dt_free_map(struct pinctrl_dev *pctldev,
+void pinctrl_utils_free_map(struct pinctrl_dev *pctldev,
 		struct pinctrl_map *map, unsigned num_maps);
 
 #endif /* __PINCTRL_UTILS_H__ */
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index 412c6b7..a13f2b6 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -1573,6 +1573,22 @@
 	return 0;
 }
 
+/*
+ * gpiolib gpiod_to_irq callback function.
+ * Returns the mapped IRQ (external interrupt) number for a given GPIO pin.
+ */
+static int xway_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+	struct ltq_pinmux_info *info = dev_get_drvdata(chip->parent);
+	int i;
+
+	for (i = 0; i < info->num_exin; i++)
+		if (info->exin[i] == offset)
+			return ltq_eiu_get_irq(i);
+
+	return -1;
+}
+
 static struct gpio_chip xway_chip = {
 	.label = "gpio-xway",
 	.direction_input = xway_gpio_dir_in,
@@ -1581,6 +1597,7 @@
 	.set = xway_gpio_set,
 	.request = gpiochip_generic_request,
 	.free = gpiochip_generic_free,
+	.to_irq = xway_gpio_to_irq,
 	.base = -1,
 };
 
diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c
index 76f1abd..8fdc60c 100644
--- a/drivers/pinctrl/pinctrl-zynq.c
+++ b/drivers/pinctrl/pinctrl-zynq.c
@@ -862,7 +862,7 @@
 	.get_group_name = zynq_pctrl_get_group_name,
 	.get_group_pins = zynq_pctrl_get_group_pins,
 	.dt_node_to_map = pinconf_generic_dt_node_to_map_all,
-	.dt_free_map = pinctrl_utils_dt_free_map,
+	.dt_free_map = pinctrl_utils_free_map,
 };
 
 /* pinmux */
@@ -1195,7 +1195,7 @@
 	pctrl->funcs = zynq_pmux_functions;
 	pctrl->nfuncs = ARRAY_SIZE(zynq_pmux_functions);
 
-	pctrl->pctrl = pinctrl_register(&zynq_desc, &pdev->dev, pctrl);
+	pctrl->pctrl = devm_pinctrl_register(&pdev->dev, &zynq_desc, pctrl);
 	if (IS_ERR(pctrl->pctrl))
 		return PTR_ERR(pctrl->pctrl);
 
@@ -1206,15 +1206,6 @@
 	return 0;
 }
 
-static int zynq_pinctrl_remove(struct platform_device *pdev)
-{
-	struct zynq_pinctrl *pctrl = platform_get_drvdata(pdev);
-
-	pinctrl_unregister(pctrl->pctrl);
-
-	return 0;
-}
-
 static const struct of_device_id zynq_pinctrl_of_match[] = {
 	{ .compatible = "xlnx,pinctrl-zynq" },
 	{ }
@@ -1227,7 +1218,6 @@
 		.of_match_table = zynq_pinctrl_of_match,
 	},
 	.probe = zynq_pinctrl_probe,
-	.remove = zynq_pinctrl_remove,
 };
 
 static int __init zynq_pinctrl_init(void)
diff --git a/drivers/pinctrl/pxa/Kconfig b/drivers/pinctrl/pxa/Kconfig
index 990667f..c29bdcf 100644
--- a/drivers/pinctrl/pxa/Kconfig
+++ b/drivers/pinctrl/pxa/Kconfig
@@ -6,12 +6,20 @@
 	select PINCONF
 	select GENERIC_PINCONF
 
+config PINCTRL_PXA25X
+	tristate "Marvell PXA25x pin controller driver"
+	select PINCTRL_PXA
+	default y if PXA25x
+	help
+	  This is the pinctrl, pinmux, pinconf driver for the Marvell
+	  PXA2xx block found in the pxa25x platforms.
+
 config PINCTRL_PXA27X
 	tristate "Marvell PXA27x pin controller driver"
 	select PINCTRL_PXA
 	default y if PXA27x
 	help
 	  This is the pinctrl, pinmux, pinconf driver for the Marvell
-	  PXA2xx block found in the pxa25x and pxa27x platforms.
+	  PXA2xx block found in the pxa27x platforms.
 
 endif
diff --git a/drivers/pinctrl/pxa/Makefile b/drivers/pinctrl/pxa/Makefile
index f1d56af..ca2ade1 100644
--- a/drivers/pinctrl/pxa/Makefile
+++ b/drivers/pinctrl/pxa/Makefile
@@ -1,2 +1,3 @@
 # Marvell PXA pin control drivers
+obj-$(CONFIG_PINCTRL_PXA25X)	+= pinctrl-pxa2xx.o pinctrl-pxa25x.o
 obj-$(CONFIG_PINCTRL_PXA27X)	+= pinctrl-pxa2xx.o pinctrl-pxa27x.o
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa25x.c b/drivers/pinctrl/pxa/pinctrl-pxa25x.c
new file mode 100644
index 0000000..b98ecb3
--- /dev/null
+++ b/drivers/pinctrl/pxa/pinctrl-pxa25x.c
@@ -0,0 +1,274 @@
+/*
+ * Marvell PXA25x family pin control
+ *
+ * Copyright (C) 2016 Robert Jarzmik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-pxa2xx.h"
+
+static const struct pxa_desc_pin pxa25x_pins[] = {
+	PXA_GPIO_ONLY_PIN(PXA_PINCTRL_PIN(0)),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(1),
+		     PXA_FUNCTION(0, 1, "GP_RST")),
+	PXA_GPIO_ONLY_PIN(PXA_PINCTRL_PIN(2)),
+	PXA_GPIO_ONLY_PIN(PXA_PINCTRL_PIN(3)),
+	PXA_GPIO_ONLY_PIN(PXA_PINCTRL_PIN(4)),
+	PXA_GPIO_ONLY_PIN(PXA_PINCTRL_PIN(5)),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(6),
+		     PXA_FUNCTION(1, 1, "MMCCLK")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(7),
+		     PXA_FUNCTION(1, 1, "48_MHz")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(8),
+		     PXA_FUNCTION(1, 1, "MMCCS0")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(9),
+		     PXA_FUNCTION(1, 1, "MMCCS1")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(10),
+		     PXA_FUNCTION(1, 1, "RTCCLK")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(11),
+		     PXA_FUNCTION(1, 1, "3_6_MHz")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(12),
+		     PXA_FUNCTION(1, 1, "32_kHz")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(13),
+		     PXA_FUNCTION(1, 2, "MBGNT")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(14),
+		     PXA_FUNCTION(0, 1, "MBREQ")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(15),
+		     PXA_FUNCTION(1, 2, "nCS_1")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(16),
+		     PXA_FUNCTION(1, 2, "PWM0")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(17),
+		     PXA_FUNCTION(1, 2, "PWM1")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(18),
+		     PXA_FUNCTION(0, 1, "RDY")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(19),
+		     PXA_FUNCTION(0, 1, "DREQ[1]")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(20),
+		     PXA_FUNCTION(0, 1, "DREQ[0]")),
+	PXA_GPIO_ONLY_PIN(PXA_PINCTRL_PIN(21)),
+	PXA_GPIO_ONLY_PIN(PXA_PINCTRL_PIN(22)),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(23),
+		     PXA_FUNCTION(1, 2, "SCLK")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(24),
+		     PXA_FUNCTION(1, 2, "SFRM")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(25),
+		     PXA_FUNCTION(1, 2, "TXD")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(26),
+		     PXA_FUNCTION(0, 1, "RXD")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(27),
+		     PXA_FUNCTION(0, 1, "EXTCLK")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(28),
+		     PXA_FUNCTION(0, 1, "BITCLK"),
+		     PXA_FUNCTION(0, 2, "BITCLK"),
+		     PXA_FUNCTION(1, 1, "BITCLK")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(29),
+		     PXA_FUNCTION(0, 1, "SDATA_IN0"),
+		     PXA_FUNCTION(0, 2, "SDATA_IN")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(30),
+		     PXA_FUNCTION(1, 1, "SDATA_OUT"),
+		     PXA_FUNCTION(1, 2, "SDATA_OUT")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(31),
+		     PXA_FUNCTION(1, 1, "SYNC"),
+		     PXA_FUNCTION(1, 2, "SYNC")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(32),
+		     PXA_FUNCTION(0, 1, "SDATA_IN1"),
+		     PXA_FUNCTION(1, 1, "SYSCLK")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(33),
+		     PXA_FUNCTION(1, 2, "nCS[5]")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(34),
+		     PXA_FUNCTION(0, 1, "FFRXD"),
+		     PXA_FUNCTION(1, 2, "MMCCS0")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(35),
+		     PXA_FUNCTION(0, 1, "CTS")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(36),
+		     PXA_FUNCTION(0, 1, "DCD")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(37),
+		     PXA_FUNCTION(0, 1, "DSR")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(38),
+		     PXA_FUNCTION(0, 1, "RI")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(39),
+		     PXA_FUNCTION(1, 1, "MMCC1"),
+		     PXA_FUNCTION(1, 2, "FFTXD")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(40),
+		     PXA_FUNCTION(1, 2, "DTR")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(41),
+		     PXA_FUNCTION(1, 2, "RTS")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(42),
+		     PXA_FUNCTION(0, 1, "BTRXD"),
+		     PXA_FUNCTION(0, 3, "HWRXD")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(43),
+		     PXA_FUNCTION(1, 2, "BTTXD"),
+		     PXA_FUNCTION(1, 3, "HWTXD")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(44),
+		     PXA_FUNCTION(0, 1, "BTCTS"),
+		     PXA_FUNCTION(0, 3, "HWCTS")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(45),
+		     PXA_FUNCTION(1, 2, "BTRTS"),
+		     PXA_FUNCTION(1, 3, "HWRTS")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(46),
+		     PXA_FUNCTION(0, 1, "ICP_RXD"),
+		     PXA_FUNCTION(0, 2, "RXD")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(47),
+		     PXA_FUNCTION(1, 1, "TXD"),
+		     PXA_FUNCTION(1, 2, "ICP_TXD")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(48),
+		     PXA_FUNCTION(1, 1, "HWTXD"),
+		     PXA_FUNCTION(1, 2, "nPOE")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(49),
+		     PXA_FUNCTION(0, 1, "HWRXD"),
+		     PXA_FUNCTION(1, 2, "nPWE")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(50),
+		     PXA_FUNCTION(0, 1, "HWCTS"),
+		     PXA_FUNCTION(1, 2, "nPIOR")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(51),
+		     PXA_FUNCTION(1, 1, "HWRTS"),
+		     PXA_FUNCTION(1, 2, "nPIOW")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(52),
+		     PXA_FUNCTION(1, 2, "nPCE[1]")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(53),
+		     PXA_FUNCTION(1, 1, "MMCCLK"),
+		     PXA_FUNCTION(1, 2, "nPCE[2]")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(54),
+		     PXA_FUNCTION(1, 1, "MMCCLK"),
+		     PXA_FUNCTION(1, 2, "nPSKTSEL")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(55),
+		     PXA_FUNCTION(1, 2, "nPREG")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(56),
+		     PXA_FUNCTION(0, 1, "nPWAIT")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(57),
+		     PXA_FUNCTION(0, 1, "nIOIS16")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(58),
+		     PXA_FUNCTION(1, 2, "LDD<0>")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(59),
+		     PXA_FUNCTION(1, 2, "LDD<1>")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(60),
+		     PXA_FUNCTION(1, 2, "LDD<2>")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(61),
+		     PXA_FUNCTION(1, 2, "LDD<3>")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(62),
+		     PXA_FUNCTION(1, 2, "LDD<4>")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(63),
+		     PXA_FUNCTION(1, 2, "LDD<5>")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(64),
+		     PXA_FUNCTION(1, 2, "LDD<6>")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(65),
+		     PXA_FUNCTION(1, 2, "LDD<7>")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(66),
+		     PXA_FUNCTION(0, 1, "MBREQ"),
+		     PXA_FUNCTION(1, 2, "LDD<8>")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(67),
+		     PXA_FUNCTION(1, 1, "MMCCS0"),
+		     PXA_FUNCTION(1, 2, "LDD<9>")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(68),
+		     PXA_FUNCTION(1, 1, "MMCCS1"),
+		     PXA_FUNCTION(1, 2, "LDD<10>")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(69),
+		     PXA_FUNCTION(1, 1, "MMCCLK"),
+		     PXA_FUNCTION(1, 2, "LDD<11>")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(70),
+		     PXA_FUNCTION(1, 1, "RTCCLK"),
+		     PXA_FUNCTION(1, 2, "LDD<12>")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(71),
+		     PXA_FUNCTION(1, 1, "3_6_MHz"),
+		     PXA_FUNCTION(1, 2, "LDD<13>")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(72),
+		     PXA_FUNCTION(1, 1, "32_kHz"),
+		     PXA_FUNCTION(1, 2, "LDD<14>")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(73),
+		     PXA_FUNCTION(1, 1, "MBGNT"),
+		     PXA_FUNCTION(1, 2, "LDD<15>")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(74),
+		     PXA_FUNCTION(1, 2, "LCD_FCLK")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(75),
+		     PXA_FUNCTION(1, 2, "LCD_LCLK")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(76),
+		     PXA_FUNCTION(1, 2, "LCD_PCLK")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(77),
+		     PXA_FUNCTION(1, 2, "LCD_ACBIAS")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(78),
+		     PXA_FUNCTION(1, 2, "nCS<2>")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(79),
+		     PXA_FUNCTION(1, 2, "nCS<3>")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(80),
+		     PXA_FUNCTION(1, 2, "nCS<4>")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(81),
+		     PXA_FUNCTION(0, 1, "NSSPSCLK"),
+		     PXA_FUNCTION(1, 1, "NSSPSCLK")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(82),
+		     PXA_FUNCTION(0, 1, "NSSPSFRM"),
+		     PXA_FUNCTION(1, 1, "NSSPSFRM")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(83),
+		     PXA_FUNCTION(0, 2, "NSSPRXD"),
+		     PXA_FUNCTION(1, 1, "NSSPTXD")),
+	PXA_GPIO_PIN(PXA_PINCTRL_PIN(84),
+		     PXA_FUNCTION(0, 2, "NSSPRXD"),
+		     PXA_FUNCTION(1, 1, "NSSPTXD")),
+};
+
+static int pxa25x_pinctrl_probe(struct platform_device *pdev)
+{
+	int ret, i;
+	void __iomem *base_af[8];
+	void __iomem *base_dir[4];
+	void __iomem *base_sleep[4];
+	struct resource *res;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base_af[0] = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base_af[0]))
+		return PTR_ERR(base_af[0]);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	base_dir[0] = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base_dir[0]))
+		return PTR_ERR(base_dir[0]);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+	base_dir[3] = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base_dir[3]))
+		return PTR_ERR(base_dir[3]);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+	base_sleep[0] = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base_sleep[0]))
+		return PTR_ERR(base_sleep[0]);
+
+	for (i = 0; i < ARRAY_SIZE(base_af); i++)
+		base_af[i] = base_af[0] + sizeof(base_af[0]) * i;
+	for (i = 0; i < 3; i++)
+		base_dir[i] = base_dir[0] + sizeof(base_dir[0]) * i;
+	for (i = 0; i < ARRAY_SIZE(base_sleep); i++)
+		base_sleep[i] = base_sleep[0] + sizeof(base_af[0]) * i;
+
+	ret = pxa2xx_pinctrl_init(pdev, pxa25x_pins, ARRAY_SIZE(pxa25x_pins),
+				  base_af, base_dir, base_sleep);
+	return ret;
+}
+
+static const struct of_device_id pxa25x_pinctrl_match[] = {
+	{ .compatible = "marvell,pxa25x-pinctrl", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, pxa25x_pinctrl_match);
+
+static struct platform_driver pxa25x_pinctrl_driver = {
+	.probe	= pxa25x_pinctrl_probe,
+	.driver	= {
+		.name		= "pxa25x-pinctrl",
+		.of_match_table	= pxa25x_pinctrl_match,
+	},
+};
+module_platform_driver(pxa25x_pinctrl_driver);
+
+MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
+MODULE_DESCRIPTION("Marvell PXA25x pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
index f553313..866aa3c 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
@@ -57,7 +57,7 @@
 static const struct pinctrl_ops pxa2xx_pctl_ops = {
 #ifdef CONFIG_OF
 	.dt_node_to_map		= pinconf_generic_dt_node_to_map_all,
-	.dt_free_map		= pinctrl_utils_dt_free_map,
+	.dt_free_map		= pinctrl_utils_free_map,
 #endif
 	.get_groups_count	= pxa2xx_pctrl_get_groups_count,
 	.get_group_name		= pxa2xx_pctrl_get_group_name,
@@ -416,7 +416,7 @@
 	if (ret)
 		return ret;
 
-	pctl->pctl_dev = pinctrl_register(&pctl->desc, &pdev->dev, pctl);
+	pctl->pctl_dev = devm_pinctrl_register(&pdev->dev, &pctl->desc, pctl);
 	if (IS_ERR(pctl->pctl_dev)) {
 		dev_err(&pdev->dev, "couldn't register pinctrl driver\n");
 		return PTR_ERR(pctl->pctl_dev);
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
index b5d81ce..b68ae42 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
@@ -237,7 +237,7 @@
 		.pins = gpio##id##_pins,		\
 		.npins = (unsigned)ARRAY_SIZE(gpio##id##_pins),	\
 		.funcs = (int[]){			\
-			qca_mux_NA, /* gpio mode */	\
+			qca_mux_gpio, /* gpio mode */	\
 			qca_mux_##f1,			\
 			qca_mux_##f2,			\
 			qca_mux_##f3,			\
@@ -254,11 +254,11 @@
 			qca_mux_##f14			\
 		},				        \
 		.nfuncs = 15,				\
-		.ctl_reg = 0x1000 + 0x10 * id,		\
-		.io_reg = 0x1004 + 0x10 * id,		\
-		.intr_cfg_reg = 0x1008 + 0x10 * id,	\
-		.intr_status_reg = 0x100c + 0x10 * id,	\
-		.intr_target_reg = 0x400 + 0x4 * id,	\
+		.ctl_reg = 0x0 + 0x1000 * id,		\
+		.io_reg = 0x4 + 0x1000 * id,		\
+		.intr_cfg_reg = 0x8 + 0x1000 * id,	\
+		.intr_status_reg = 0xc + 0x1000 * id,	\
+		.intr_target_reg = 0x8 + 0x1000 * id,	\
 		.mux_bit = 2,			\
 		.pull_bit = 0,			\
 		.drv_bit = 6,			\
@@ -414,7 +414,7 @@
 	.nfunctions = ARRAY_SIZE(ipq4019_functions),
 	.groups = ipq4019_groups,
 	.ngroups = ARRAY_SIZE(ipq4019_groups),
-	.ngpios = 70,
+	.ngpios = 100,
 };
 
 static int ipq4019_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 8777cf0..1a44e1d 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -101,7 +101,7 @@
 	.get_group_name		= msm_get_group_name,
 	.get_group_pins		= msm_get_group_pins,
 	.dt_node_to_map		= pinconf_generic_dt_node_to_map_group,
-	.dt_free_map		= pinctrl_utils_dt_free_map,
+	.dt_free_map		= pinctrl_utils_free_map,
 };
 
 static int msm_get_functions_count(struct pinctrl_dev *pctldev)
@@ -898,17 +898,16 @@
 	msm_pinctrl_desc.name = dev_name(&pdev->dev);
 	msm_pinctrl_desc.pins = pctrl->soc->pins;
 	msm_pinctrl_desc.npins = pctrl->soc->npins;
-	pctrl->pctrl = pinctrl_register(&msm_pinctrl_desc, &pdev->dev, pctrl);
+	pctrl->pctrl = devm_pinctrl_register(&pdev->dev, &msm_pinctrl_desc,
+					     pctrl);
 	if (IS_ERR(pctrl->pctrl)) {
 		dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
 		return PTR_ERR(pctrl->pctrl);
 	}
 
 	ret = msm_gpio_init(pctrl);
-	if (ret) {
-		pinctrl_unregister(pctrl->pctrl);
+	if (ret)
 		return ret;
-	}
 
 	platform_set_drvdata(pdev, pctrl);
 
@@ -923,7 +922,6 @@
 	struct msm_pinctrl *pctrl = platform_get_drvdata(pdev);
 
 	gpiochip_remove(&pctrl->chip);
-	pinctrl_unregister(pctrl->pctrl);
 
 	unregister_restart_handler(&pctrl->restart_nb);
 
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 4e12ded..686accb 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -212,7 +212,7 @@
 	.get_group_name		= pmic_gpio_get_group_name,
 	.get_group_pins		= pmic_gpio_get_group_pins,
 	.dt_node_to_map		= pinconf_generic_dt_node_to_map_group,
-	.dt_free_map		= pinctrl_utils_dt_free_map,
+	.dt_free_map		= pinctrl_utils_free_map,
 };
 
 static int pmic_gpio_get_functions_count(struct pinctrl_dev *pctldev)
@@ -764,14 +764,14 @@
 	state->chip.of_gpio_n_cells = 2;
 	state->chip.can_sleep = false;
 
-	state->ctrl = pinctrl_register(pctrldesc, dev, state);
+	state->ctrl = devm_pinctrl_register(dev, pctrldesc, state);
 	if (IS_ERR(state->ctrl))
 		return PTR_ERR(state->ctrl);
 
 	ret = gpiochip_add_data(&state->chip, state);
 	if (ret) {
 		dev_err(state->dev, "can't add gpio chip\n");
-		goto err_chip;
+		return ret;
 	}
 
 	ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0, npins);
@@ -784,8 +784,6 @@
 
 err_range:
 	gpiochip_remove(&state->chip);
-err_chip:
-	pinctrl_unregister(state->ctrl);
 	return ret;
 }
 
@@ -794,7 +792,6 @@
 	struct pmic_gpio_state *state = platform_get_drvdata(pdev);
 
 	gpiochip_remove(&state->chip);
-	pinctrl_unregister(state->ctrl);
 	return 0;
 }
 
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 2a3e549..1735ffe 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -235,7 +235,7 @@
 	.get_group_name		= pmic_mpp_get_group_name,
 	.get_group_pins		= pmic_mpp_get_group_pins,
 	.dt_node_to_map		= pinconf_generic_dt_node_to_map_group,
-	.dt_free_map		= pinctrl_utils_dt_free_map,
+	.dt_free_map		= pinctrl_utils_free_map,
 };
 
 static int pmic_mpp_get_functions_count(struct pinctrl_dev *pctldev)
@@ -877,14 +877,14 @@
 	state->chip.of_gpio_n_cells = 2;
 	state->chip.can_sleep = false;
 
-	state->ctrl = pinctrl_register(pctrldesc, dev, state);
+	state->ctrl = devm_pinctrl_register(dev, pctrldesc, state);
 	if (IS_ERR(state->ctrl))
 		return PTR_ERR(state->ctrl);
 
 	ret = gpiochip_add_data(&state->chip, state);
 	if (ret) {
 		dev_err(state->dev, "can't add gpio chip\n");
-		goto err_chip;
+		return ret;
 	}
 
 	ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0, npins);
@@ -897,8 +897,6 @@
 
 err_range:
 	gpiochip_remove(&state->chip);
-err_chip:
-	pinctrl_unregister(state->ctrl);
 	return ret;
 }
 
@@ -907,7 +905,6 @@
 	struct pmic_mpp_state *state = platform_get_drvdata(pdev);
 
 	gpiochip_remove(&state->chip);
-	pinctrl_unregister(state->ctrl);
 	return 0;
 }
 
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index cd8580d..d3f5501d 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -200,7 +200,7 @@
 	.get_group_name		= pm8xxx_get_group_name,
 	.get_group_pins         = pm8xxx_get_group_pins,
 	.dt_node_to_map		= pinconf_generic_dt_node_to_map_group,
-	.dt_free_map		= pinctrl_utils_dt_free_map,
+	.dt_free_map		= pinctrl_utils_free_map,
 };
 
 static int pm8xxx_get_functions_count(struct pinctrl_dev *pctldev)
@@ -729,7 +729,7 @@
 	pctrl->desc.custom_conf_items = pm8xxx_conf_items;
 #endif
 
-	pctrl->pctrl = pinctrl_register(&pctrl->desc, &pdev->dev, pctrl);
+	pctrl->pctrl = devm_pinctrl_register(&pdev->dev, &pctrl->desc, pctrl);
 	if (IS_ERR(pctrl->pctrl)) {
 		dev_err(&pdev->dev, "couldn't register pm8xxx gpio driver\n");
 		return PTR_ERR(pctrl->pctrl);
@@ -745,7 +745,7 @@
 	ret = gpiochip_add_data(&pctrl->chip, pctrl);
 	if (ret) {
 		dev_err(&pdev->dev, "failed register gpiochip\n");
-		goto unregister_pinctrl;
+		return ret;
 	}
 
 	ret = gpiochip_add_pin_range(&pctrl->chip,
@@ -765,9 +765,6 @@
 unregister_gpiochip:
 	gpiochip_remove(&pctrl->chip);
 
-unregister_pinctrl:
-	pinctrl_unregister(pctrl->pctrl);
-
 	return ret;
 }
 
@@ -777,8 +774,6 @@
 
 	gpiochip_remove(&pctrl->chip);
 
-	pinctrl_unregister(pctrl->pctrl);
-
 	return 0;
 }
 
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 54a5402..9191727 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -277,7 +277,7 @@
 	.get_group_name		= pm8xxx_get_group_name,
 	.get_group_pins         = pm8xxx_get_group_pins,
 	.dt_node_to_map		= pinconf_generic_dt_node_to_map_group,
-	.dt_free_map		= pinctrl_utils_dt_free_map,
+	.dt_free_map		= pinctrl_utils_free_map,
 };
 
 static int pm8xxx_get_functions_count(struct pinctrl_dev *pctldev)
@@ -820,7 +820,7 @@
 	pctrl->desc.custom_conf_items = pm8xxx_conf_items;
 #endif
 
-	pctrl->pctrl = pinctrl_register(&pctrl->desc, &pdev->dev, pctrl);
+	pctrl->pctrl = devm_pinctrl_register(&pdev->dev, &pctrl->desc, pctrl);
 	if (IS_ERR(pctrl->pctrl)) {
 		dev_err(&pdev->dev, "couldn't register pm8xxx mpp driver\n");
 		return PTR_ERR(pctrl->pctrl);
@@ -836,7 +836,7 @@
 	ret = gpiochip_add_data(&pctrl->chip, pctrl);
 	if (ret) {
 		dev_err(&pdev->dev, "failed register gpiochip\n");
-		goto unregister_pinctrl;
+		return ret;
 	}
 
 	ret = gpiochip_add_pin_range(&pctrl->chip,
@@ -856,9 +856,6 @@
 unregister_gpiochip:
 	gpiochip_remove(&pctrl->chip);
 
-unregister_pinctrl:
-	pinctrl_unregister(pctrl->pctrl);
-
 	return ret;
 }
 
@@ -868,8 +865,6 @@
 
 	gpiochip_remove(&pctrl->chip);
 
-	pinctrl_unregister(pctrl->pctrl);
-
 	return 0;
 }
 
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos5440.c b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
index 00ab63a..fb71fc3 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos5440.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
@@ -107,6 +107,7 @@
  * @nr_groups: number of pin groups available.
  * @pmx_functions: list of pin functions parsed from device tree.
  * @nr_functions: number of pin functions available.
+ * @range: gpio range to register with pinctrl
  */
 struct exynos5440_pinctrl_priv_data {
 	void __iomem			*reg_base;
@@ -117,6 +118,7 @@
 	unsigned int			nr_groups;
 	const struct exynos5440_pmx_func	*pmx_functions;
 	unsigned int			nr_functions;
+	struct pinctrl_gpio_range	range;
 };
 
 /**
@@ -742,7 +744,6 @@
 	struct pinctrl_desc *ctrldesc;
 	struct pinctrl_dev *pctl_dev;
 	struct pinctrl_pin_desc *pindesc, *pdesc;
-	struct pinctrl_gpio_range grange;
 	char *pin_names;
 	int pin, ret;
 
@@ -788,18 +789,18 @@
 	if (ret)
 		return ret;
 
-	pctl_dev = pinctrl_register(ctrldesc, &pdev->dev, priv);
+	pctl_dev = devm_pinctrl_register(&pdev->dev, ctrldesc, priv);
 	if (IS_ERR(pctl_dev)) {
 		dev_err(&pdev->dev, "could not register pinctrl driver\n");
 		return PTR_ERR(pctl_dev);
 	}
 
-	grange.name = "exynos5440-pctrl-gpio-range";
-	grange.id = 0;
-	grange.base = 0;
-	grange.npins = EXYNOS5440_MAX_PINS;
-	grange.gc = priv->gc;
-	pinctrl_add_gpio_range(pctl_dev, &grange);
+	priv->range.name = "exynos5440-pctrl-gpio-range";
+	priv->range.id = 0;
+	priv->range.base = 0;
+	priv->range.npins = EXYNOS5440_MAX_PINS;
+	priv->range.gc = priv->gc;
+	pinctrl_add_gpio_range(pctl_dev, &priv->range);
 	return 0;
 }
 
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 5cc97f8..ed0b708 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -884,7 +884,8 @@
 	if (ret)
 		return ret;
 
-	drvdata->pctl_dev = pinctrl_register(ctrldesc, &pdev->dev, drvdata);
+	drvdata->pctl_dev = devm_pinctrl_register(&pdev->dev, ctrldesc,
+						  drvdata);
 	if (IS_ERR(drvdata->pctl_dev)) {
 		dev_err(&pdev->dev, "could not register pinctrl driver\n");
 		return PTR_ERR(drvdata->pctl_dev);
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index dc3609f..9b9cee0 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -175,6 +175,21 @@
 	BUG();
 }
 
+u32 sh_pfc_read_reg(struct sh_pfc *pfc, u32 reg, unsigned int width)
+{
+	return sh_pfc_read_raw_reg(sh_pfc_phys_to_virt(pfc, reg), width);
+}
+
+void sh_pfc_write_reg(struct sh_pfc *pfc, u32 reg, unsigned int width, u32 data)
+{
+	if (pfc->info->unlock_reg)
+		sh_pfc_write_raw_reg(
+			sh_pfc_phys_to_virt(pfc, pfc->info->unlock_reg), 32,
+			~data);
+
+	sh_pfc_write_raw_reg(sh_pfc_phys_to_virt(pfc, reg), width, data);
+}
+
 static void sh_pfc_config_reg_helper(struct sh_pfc *pfc,
 				     const struct pinmux_cfg_reg *crp,
 				     unsigned int in_pos,
@@ -546,7 +561,9 @@
 			return ret;
 	}
 
-	pinctrl_provide_dummies();
+	/* Enable dummy states for those platforms without pinctrl support */
+	if (!of_have_populated_dt())
+		pinctrl_provide_dummies();
 
 	ret = sh_pfc_init_ranges(pfc);
 	if (ret < 0)
@@ -583,12 +600,9 @@
 
 static int sh_pfc_remove(struct platform_device *pdev)
 {
-	struct sh_pfc *pfc = platform_get_drvdata(pdev);
-
 #ifdef CONFIG_PINCTRL_SH_PFC_GPIO
-	sh_pfc_unregister_gpiochip(pfc);
+	sh_pfc_unregister_gpiochip(platform_get_drvdata(pdev));
 #endif
-	sh_pfc_unregister_pinctrl(pfc);
 
 	return 0;
 }
diff --git a/drivers/pinctrl/sh-pfc/core.h b/drivers/pinctrl/sh-pfc/core.h
index 62f53b2..dc1b2ad 100644
--- a/drivers/pinctrl/sh-pfc/core.h
+++ b/drivers/pinctrl/sh-pfc/core.h
@@ -50,18 +50,19 @@
 	struct sh_pfc_chip *func;
 #endif
 
-	struct sh_pfc_pinctrl *pinctrl;
 };
 
 int sh_pfc_register_gpiochip(struct sh_pfc *pfc);
 int sh_pfc_unregister_gpiochip(struct sh_pfc *pfc);
 
 int sh_pfc_register_pinctrl(struct sh_pfc *pfc);
-int sh_pfc_unregister_pinctrl(struct sh_pfc *pfc);
 
 u32 sh_pfc_read_raw_reg(void __iomem *mapped_reg, unsigned int reg_width);
 void sh_pfc_write_raw_reg(void __iomem *mapped_reg, unsigned int reg_width,
 			  u32 data);
+u32 sh_pfc_read_reg(struct sh_pfc *pfc, u32 reg, unsigned int width);
+void sh_pfc_write_reg(struct sh_pfc *pfc, u32 reg, unsigned int width,
+		      u32 data);
 
 int sh_pfc_get_pin_index(struct sh_pfc *pfc, unsigned int pin);
 int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type);
diff --git a/drivers/pinctrl/sh-pfc/gpio.c b/drivers/pinctrl/sh-pfc/gpio.c
index a6681b8..97dff6a 100644
--- a/drivers/pinctrl/sh-pfc/gpio.c
+++ b/drivers/pinctrl/sh-pfc/gpio.c
@@ -212,7 +212,7 @@
 		}
 	}
 
-	return -ENOSYS;
+	return 0;
 
 found:
 	return pfc->irqs[i];
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index 0f4d48f..eed8daa 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -21,16 +21,21 @@
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
+#include <linux/io.h>
 #include <linux/kernel.h>
 
 #include "core.h"
 #include "sh_pfc.h"
 
+/*
+ * All pins assigned to GPIO bank 3 can be used for SD interfaces in
+ * which case they support both 3.3V and 1.8V signalling.
+ */
 #define CPU_ALL_PORT(fn, sfx)						\
 	PORT_GP_32(0, fn, sfx),						\
 	PORT_GP_30(1, fn, sfx),						\
 	PORT_GP_30(2, fn, sfx),						\
-	PORT_GP_32(3, fn, sfx),						\
+	PORT_GP_CFG_32(3, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE),		\
 	PORT_GP_32(4, fn, sfx),						\
 	PORT_GP_32(5, fn, sfx)
 
@@ -4691,6 +4696,47 @@
 	"vin3_clk",
 };
 
+#define IOCTRL6 0x8c
+
+static int r8a7790_get_io_voltage(struct sh_pfc *pfc, unsigned int pin)
+{
+	u32 data, mask;
+
+	if (WARN(pin < RCAR_GP_PIN(3, 0) || pin > RCAR_GP_PIN(3, 31), "invalid pin %#x", pin))
+		return -EINVAL;
+
+	data = ioread32(pfc->windows->virt + IOCTRL6),
+	/* Bits in IOCTRL6 are numbered in opposite order to pins */
+	mask = 0x80000000 >> (pin & 0x1f);
+
+	return (data & mask) ? 3300 : 1800;
+}
+
+static int r8a7790_set_io_voltage(struct sh_pfc *pfc, unsigned int pin, u16 mV)
+{
+	u32 data, mask;
+
+	if (WARN(pin < RCAR_GP_PIN(3, 0) || pin > RCAR_GP_PIN(3, 31), "invalid pin %#x", pin))
+		return -EINVAL;
+
+	if (mV != 1800 && mV != 3300)
+		return -EINVAL;
+
+	data = ioread32(pfc->windows->virt + IOCTRL6);
+	/* Bits in IOCTRL6 are numbered in opposite order to pins */
+	mask = 0x80000000 >> (pin & 0x1f);
+
+	if (mV == 3300)
+		data |= mask;
+	else
+		data &= ~mask;
+
+	iowrite32(~data, pfc->windows->virt); /* unlock reg */
+	iowrite32(data, pfc->windows->virt + IOCTRL6);
+
+	return 0;
+}
+
 static const struct sh_pfc_function pinmux_functions[] = {
 	SH_PFC_FUNCTION(audio_clk),
 	SH_PFC_FUNCTION(avb),
@@ -5690,8 +5736,14 @@
 	{ },
 };
 
+static const struct sh_pfc_soc_operations pinmux_ops = {
+	.get_io_voltage = r8a7790_get_io_voltage,
+	.set_io_voltage = r8a7790_set_io_voltage,
+};
+
 const struct sh_pfc_soc_info r8a7790_pinmux_info = {
 	.name = "r8a77900_pfc",
+	.ops = &pinmux_ops,
 	.unlock_reg = 0xe6060000, /* PMMR */
 
 	.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
index 38912cf..8bc2cf0c 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
@@ -1682,6 +1682,179 @@
 static const unsigned int avb_avtp_match_b_mux[] = {
 	AVB_AVTP_MATCH_B_MARK,
 };
+/* - DU --------------------------------------------------------------------- */
+static const unsigned int du0_rgb666_pins[] = {
+	/* R[7:2], G[7:2], B[7:2] */
+	RCAR_GP_PIN(2, 7),  RCAR_GP_PIN(2, 6),  RCAR_GP_PIN(2, 5),
+	RCAR_GP_PIN(2, 4),  RCAR_GP_PIN(2, 3),  RCAR_GP_PIN(2, 2),
+	RCAR_GP_PIN(2, 15), RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 13),
+	RCAR_GP_PIN(2, 12), RCAR_GP_PIN(2, 11), RCAR_GP_PIN(2, 10),
+	RCAR_GP_PIN(2, 23), RCAR_GP_PIN(2, 22), RCAR_GP_PIN(2, 21),
+	RCAR_GP_PIN(2, 20), RCAR_GP_PIN(2, 19), RCAR_GP_PIN(2, 18),
+};
+static const unsigned int du0_rgb666_mux[] = {
+	DU0_DR7_MARK, DU0_DR6_MARK, DU0_DR5_MARK, DU0_DR4_MARK,
+	DU0_DR3_MARK, DU0_DR2_MARK,
+	DU0_DG7_MARK, DU0_DG6_MARK, DU0_DG5_MARK, DU0_DG4_MARK,
+	DU0_DG3_MARK, DU0_DG2_MARK,
+	DU0_DB7_MARK, DU0_DB6_MARK, DU0_DB5_MARK, DU0_DB4_MARK,
+	DU0_DB3_MARK, DU0_DB2_MARK,
+};
+static const unsigned int du0_rgb888_pins[] = {
+	/* R[7:0], G[7:0], B[7:0] */
+	RCAR_GP_PIN(2, 7),  RCAR_GP_PIN(2, 6),  RCAR_GP_PIN(2, 5),
+	RCAR_GP_PIN(2, 4),  RCAR_GP_PIN(2, 3),  RCAR_GP_PIN(2, 2),
+	RCAR_GP_PIN(2, 1),  RCAR_GP_PIN(2, 0),
+	RCAR_GP_PIN(2, 15), RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 13),
+	RCAR_GP_PIN(2, 12), RCAR_GP_PIN(2, 11), RCAR_GP_PIN(2, 10),
+	RCAR_GP_PIN(2, 9),  RCAR_GP_PIN(2, 8),
+	RCAR_GP_PIN(2, 23), RCAR_GP_PIN(2, 22), RCAR_GP_PIN(2, 21),
+	RCAR_GP_PIN(2, 20), RCAR_GP_PIN(2, 19), RCAR_GP_PIN(2, 18),
+	RCAR_GP_PIN(2, 17), RCAR_GP_PIN(2, 16),
+};
+static const unsigned int du0_rgb888_mux[] = {
+	DU0_DR7_MARK, DU0_DR6_MARK, DU0_DR5_MARK, DU0_DR4_MARK,
+	DU0_DR3_MARK, DU0_DR2_MARK, DU0_DR1_MARK, DU0_DR0_MARK,
+	DU0_DG7_MARK, DU0_DG6_MARK, DU0_DG5_MARK, DU0_DG4_MARK,
+	DU0_DG3_MARK, DU0_DG2_MARK, DU0_DG1_MARK, DU0_DG0_MARK,
+	DU0_DB7_MARK, DU0_DB6_MARK, DU0_DB5_MARK, DU0_DB4_MARK,
+	DU0_DB3_MARK, DU0_DB2_MARK, DU0_DB1_MARK, DU0_DB0_MARK,
+};
+static const unsigned int du0_clk0_out_pins[] = {
+	/* DOTCLKOUT0 */
+	RCAR_GP_PIN(2, 25),
+};
+static const unsigned int du0_clk0_out_mux[] = {
+	DU0_DOTCLKOUT0_MARK
+};
+static const unsigned int du0_clk1_out_pins[] = {
+	/* DOTCLKOUT1 */
+	RCAR_GP_PIN(2, 26),
+};
+static const unsigned int du0_clk1_out_mux[] = {
+	DU0_DOTCLKOUT1_MARK
+};
+static const unsigned int du0_clk_in_pins[] = {
+	/* CLKIN */
+	RCAR_GP_PIN(2, 24),
+};
+static const unsigned int du0_clk_in_mux[] = {
+	DU0_DOTCLKIN_MARK
+};
+static const unsigned int du0_sync_pins[] = {
+	/* EXVSYNC/VSYNC, EXHSYNC/HSYNC */
+	RCAR_GP_PIN(2, 28), RCAR_GP_PIN(2, 27),
+};
+static const unsigned int du0_sync_mux[] = {
+	DU0_EXVSYNC_DU0_VSYNC_MARK, DU0_EXHSYNC_DU0_HSYNC_MARK
+};
+static const unsigned int du0_oddf_pins[] = {
+	/* EXODDF/ODDF/DISP/CDE */
+	RCAR_GP_PIN(2, 29),
+};
+static const unsigned int du0_oddf_mux[] = {
+	DU0_EXODDF_DU0_ODDF_DISP_CDE_MARK,
+};
+static const unsigned int du0_cde_pins[] = {
+	/* CDE */
+	RCAR_GP_PIN(2, 31),
+};
+static const unsigned int du0_cde_mux[] = {
+	DU0_CDE_MARK,
+};
+static const unsigned int du0_disp_pins[] = {
+	/* DISP */
+	RCAR_GP_PIN(2, 30),
+};
+static const unsigned int du0_disp_mux[] = {
+	DU0_DISP_MARK
+};
+static const unsigned int du1_rgb666_pins[] = {
+	/* R[7:2], G[7:2], B[7:2] */
+	RCAR_GP_PIN(4, 7),  RCAR_GP_PIN(4, 6),  RCAR_GP_PIN(4, 5),
+	RCAR_GP_PIN(4, 4),  RCAR_GP_PIN(4, 3),  RCAR_GP_PIN(4, 2),
+	RCAR_GP_PIN(4, 15), RCAR_GP_PIN(4, 14), RCAR_GP_PIN(4, 13),
+	RCAR_GP_PIN(4, 12), RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 10),
+	RCAR_GP_PIN(4, 23), RCAR_GP_PIN(4, 22), RCAR_GP_PIN(4, 21),
+	RCAR_GP_PIN(4, 20), RCAR_GP_PIN(4, 19), RCAR_GP_PIN(4, 18),
+};
+static const unsigned int du1_rgb666_mux[] = {
+	DU1_DR7_MARK, DU1_DR6_MARK, DU1_DR5_MARK, DU1_DR4_MARK,
+	DU1_DR3_MARK, DU1_DR2_MARK,
+	DU1_DG7_MARK, DU1_DG6_MARK, DU1_DG5_MARK, DU1_DG4_MARK,
+	DU1_DG3_MARK, DU1_DG2_MARK,
+	DU1_DB7_MARK, DU1_DB6_MARK, DU1_DB5_MARK, DU1_DB4_MARK,
+	DU1_DB3_MARK, DU1_DB2_MARK,
+};
+static const unsigned int du1_rgb888_pins[] = {
+	/* R[7:0], G[7:0], B[7:0] */
+	RCAR_GP_PIN(4, 7),  RCAR_GP_PIN(4, 6),  RCAR_GP_PIN(4, 5),
+	RCAR_GP_PIN(4, 4),  RCAR_GP_PIN(4, 3),  RCAR_GP_PIN(4, 2),
+	RCAR_GP_PIN(4, 1),  RCAR_GP_PIN(4, 0),
+	RCAR_GP_PIN(4, 15), RCAR_GP_PIN(4, 14), RCAR_GP_PIN(4, 13),
+	RCAR_GP_PIN(4, 12), RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 10),
+	RCAR_GP_PIN(4, 9),  RCAR_GP_PIN(4, 8),
+	RCAR_GP_PIN(4, 23), RCAR_GP_PIN(4, 22), RCAR_GP_PIN(4, 21),
+	RCAR_GP_PIN(4, 20), RCAR_GP_PIN(4, 19), RCAR_GP_PIN(4, 18),
+	RCAR_GP_PIN(4, 17), RCAR_GP_PIN(4, 16),
+};
+static const unsigned int du1_rgb888_mux[] = {
+	DU1_DR7_MARK, DU1_DR6_MARK, DU1_DR5_MARK, DU1_DR4_MARK,
+	DU1_DR3_MARK, DU1_DR2_MARK, DU1_DR1_MARK, DU1_DR0_MARK,
+	DU1_DG7_MARK, DU1_DG6_MARK, DU1_DG5_MARK, DU1_DG4_MARK,
+	DU1_DG3_MARK, DU1_DG2_MARK, DU1_DG1_MARK, DU1_DG0_MARK,
+	DU1_DB7_MARK, DU1_DB6_MARK, DU1_DB5_MARK, DU1_DB4_MARK,
+	DU1_DB3_MARK, DU1_DB2_MARK, DU1_DB1_MARK, DU1_DB0_MARK,
+};
+static const unsigned int du1_clk0_out_pins[] = {
+	/* DOTCLKOUT0 */
+	RCAR_GP_PIN(4, 25),
+};
+static const unsigned int du1_clk0_out_mux[] = {
+	DU1_DOTCLKOUT0_MARK
+};
+static const unsigned int du1_clk1_out_pins[] = {
+	/* DOTCLKOUT1 */
+	RCAR_GP_PIN(4, 26),
+};
+static const unsigned int du1_clk1_out_mux[] = {
+	DU1_DOTCLKOUT1_MARK
+};
+static const unsigned int du1_clk_in_pins[] = {
+	/* DOTCLKIN */
+	RCAR_GP_PIN(4, 24),
+};
+static const unsigned int du1_clk_in_mux[] = {
+	DU1_DOTCLKIN_MARK
+};
+static const unsigned int du1_sync_pins[] = {
+	/* EXVSYNC/VSYNC, EXHSYNC/HSYNC */
+	RCAR_GP_PIN(4, 28), RCAR_GP_PIN(4, 27),
+};
+static const unsigned int du1_sync_mux[] = {
+	DU1_EXVSYNC_DU1_VSYNC_MARK, DU1_EXHSYNC_DU1_HSYNC_MARK
+};
+static const unsigned int du1_oddf_pins[] = {
+	/* EXODDF/ODDF/DISP/CDE */
+	RCAR_GP_PIN(4, 29),
+};
+static const unsigned int du1_oddf_mux[] = {
+	DU1_EXODDF_DU1_ODDF_DISP_CDE_MARK,
+};
+static const unsigned int du1_cde_pins[] = {
+	/* CDE */
+	RCAR_GP_PIN(4, 31),
+};
+static const unsigned int du1_cde_mux[] = {
+	DU1_CDE_MARK
+};
+static const unsigned int du1_disp_pins[] = {
+	/* DISP */
+	RCAR_GP_PIN(4, 30),
+};
+static const unsigned int du1_disp_mux[] = {
+	DU1_DISP_MARK
+};
 /* - ETH -------------------------------------------------------------------- */
 static const unsigned int eth_link_pins[] = {
 	/* LINK */
@@ -3364,6 +3537,24 @@
 	SH_PFC_PIN_GROUP(avb_avtp_match),
 	SH_PFC_PIN_GROUP(avb_avtp_capture_b),
 	SH_PFC_PIN_GROUP(avb_avtp_match_b),
+	SH_PFC_PIN_GROUP(du0_rgb666),
+	SH_PFC_PIN_GROUP(du0_rgb888),
+	SH_PFC_PIN_GROUP(du0_clk0_out),
+	SH_PFC_PIN_GROUP(du0_clk1_out),
+	SH_PFC_PIN_GROUP(du0_clk_in),
+	SH_PFC_PIN_GROUP(du0_sync),
+	SH_PFC_PIN_GROUP(du0_oddf),
+	SH_PFC_PIN_GROUP(du0_cde),
+	SH_PFC_PIN_GROUP(du0_disp),
+	SH_PFC_PIN_GROUP(du1_rgb666),
+	SH_PFC_PIN_GROUP(du1_rgb888),
+	SH_PFC_PIN_GROUP(du1_clk0_out),
+	SH_PFC_PIN_GROUP(du1_clk1_out),
+	SH_PFC_PIN_GROUP(du1_clk_in),
+	SH_PFC_PIN_GROUP(du1_sync),
+	SH_PFC_PIN_GROUP(du1_oddf),
+	SH_PFC_PIN_GROUP(du1_cde),
+	SH_PFC_PIN_GROUP(du1_disp),
 	SH_PFC_PIN_GROUP(eth_link),
 	SH_PFC_PIN_GROUP(eth_magic),
 	SH_PFC_PIN_GROUP(eth_mdio),
@@ -3622,6 +3813,30 @@
 	"avb_avtp_match_b",
 };
 
+static const char * const du0_groups[] = {
+	"du0_rgb666",
+	"du0_rgb888",
+	"du0_clk0_out",
+	"du0_clk1_out",
+	"du0_clk_in",
+	"du0_sync",
+	"du0_oddf",
+	"du0_cde",
+	"du0_disp",
+};
+
+static const char * const du1_groups[] = {
+	"du1_rgb666",
+	"du1_rgb888",
+	"du1_clk0_out",
+	"du1_clk1_out",
+	"du1_clk_in",
+	"du1_sync",
+	"du1_oddf",
+	"du1_cde",
+	"du1_disp",
+};
+
 static const char * const eth_groups[] = {
 	"eth_link",
 	"eth_magic",
@@ -3969,6 +4184,8 @@
 static const struct sh_pfc_function pinmux_functions[] = {
 	SH_PFC_FUNCTION(audio_clk),
 	SH_PFC_FUNCTION(avb),
+	SH_PFC_FUNCTION(du0),
+	SH_PFC_FUNCTION(du1),
 	SH_PFC_FUNCTION(eth),
 	SH_PFC_FUNCTION(hscif0),
 	SH_PFC_FUNCTION(hscif1),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
index 5979dab..44632b1 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
@@ -14,14 +14,14 @@
 #include "sh_pfc.h"
 
 #define CPU_ALL_PORT(fn, sfx)						\
-	PORT_GP_16(0, fn, sfx),						\
-	PORT_GP_28(1, fn, sfx),						\
-	PORT_GP_15(2, fn, sfx),						\
-	PORT_GP_16(3, fn, sfx),						\
-	PORT_GP_18(4, fn, sfx),						\
-	PORT_GP_26(5, fn, sfx),						\
-	PORT_GP_32(6, fn, sfx),						\
-	PORT_GP_4(7, fn, sfx)
+	PORT_GP_CFG_16(0, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH),	\
+	PORT_GP_CFG_28(1, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH),	\
+	PORT_GP_CFG_15(2, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH),	\
+	PORT_GP_CFG_16(3, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH),	\
+	PORT_GP_CFG_18(4, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH),	\
+	PORT_GP_CFG_26(5, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH),	\
+	PORT_GP_CFG_32(6, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH),	\
+	PORT_GP_CFG_4(7, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH)
 /*
  * F_() : just information
  * FM() : macro for FN_xxx / xxx_MARK
@@ -4564,6 +4564,207 @@
 	{ },
 };
 
+static const struct pinmux_drive_reg pinmux_drive_regs[] = {
+	{ PINMUX_DRIVE_REG("DRVCTRL3", 0xe606030c) {
+		{ RCAR_GP_PIN(2,  9),  8, 3 },	/* AVB_MDC */
+		{ RCAR_GP_PIN(2, 10),  4, 3 },	/* AVB_MAGIC */
+		{ RCAR_GP_PIN(2, 11),  0, 3 },	/* AVB_PHY_INT */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL4", 0xe6060310) {
+		{ RCAR_GP_PIN(2, 12), 28, 3 },	/* AVB_LINK */
+		{ RCAR_GP_PIN(2, 13), 24, 3 },	/* AVB_AVTP_MATCH */
+		{ RCAR_GP_PIN(2, 14), 20, 3 },	/* AVB_AVTP_CAPTURE */
+		{ RCAR_GP_PIN(2,  0), 16, 3 },	/* IRQ0 */
+		{ RCAR_GP_PIN(2,  1), 12, 3 },	/* IRQ1 */
+		{ RCAR_GP_PIN(2,  2),  8, 3 },	/* IRQ2 */
+		{ RCAR_GP_PIN(2,  3),  4, 3 },	/* IRQ3 */
+		{ RCAR_GP_PIN(2,  4),  0, 3 },	/* IRQ4 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL5", 0xe6060314) {
+		{ RCAR_GP_PIN(2,  5), 28, 3 },	/* IRQ5 */
+		{ RCAR_GP_PIN(2,  6), 24, 3 },	/* PWM0 */
+		{ RCAR_GP_PIN(2,  7), 20, 3 },	/* PWM1 */
+		{ RCAR_GP_PIN(2,  8), 16, 3 },	/* PWM2 */
+		{ RCAR_GP_PIN(1,  0), 12, 3 },	/* A0 */
+		{ RCAR_GP_PIN(1,  1),  8, 3 },	/* A1 */
+		{ RCAR_GP_PIN(1,  2),  4, 3 },	/* A2 */
+		{ RCAR_GP_PIN(1,  3),  0, 3 },	/* A3 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL6", 0xe6060318) {
+		{ RCAR_GP_PIN(1,  4), 28, 3 },	/* A4 */
+		{ RCAR_GP_PIN(1,  5), 24, 3 },	/* A5 */
+		{ RCAR_GP_PIN(1,  6), 20, 3 },	/* A6 */
+		{ RCAR_GP_PIN(1,  7), 16, 3 },	/* A7 */
+		{ RCAR_GP_PIN(1,  8), 12, 3 },	/* A8 */
+		{ RCAR_GP_PIN(1,  9),  8, 3 },	/* A9 */
+		{ RCAR_GP_PIN(1, 10),  4, 3 },	/* A10 */
+		{ RCAR_GP_PIN(1, 11),  0, 3 },	/* A11 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL7", 0xe606031c) {
+		{ RCAR_GP_PIN(1, 12), 28, 3 },	/* A12 */
+		{ RCAR_GP_PIN(1, 13), 24, 3 },	/* A13 */
+		{ RCAR_GP_PIN(1, 14), 20, 3 },	/* A14 */
+		{ RCAR_GP_PIN(1, 15), 16, 3 },	/* A15 */
+		{ RCAR_GP_PIN(1, 16), 12, 3 },	/* A16 */
+		{ RCAR_GP_PIN(1, 17),  8, 3 },	/* A17 */
+		{ RCAR_GP_PIN(1, 18),  4, 3 },	/* A18 */
+		{ RCAR_GP_PIN(1, 19),  0, 3 },	/* A19 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL8", 0xe6060320) {
+		{ RCAR_GP_PIN(1, 20), 24, 3 },	/* CS0 */
+		{ RCAR_GP_PIN(1, 21), 20, 3 },	/* CS1_A26 */
+		{ RCAR_GP_PIN(1, 22), 16, 3 },	/* BS */
+		{ RCAR_GP_PIN(1, 23), 12, 3 },	/* RD */
+		{ RCAR_GP_PIN(1, 24),  8, 3 },	/* RD_WR */
+		{ RCAR_GP_PIN(1, 25),  4, 3 },	/* WE0 */
+		{ RCAR_GP_PIN(1, 26),  0, 3 },	/* WE1 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL9", 0xe6060324) {
+		{ RCAR_GP_PIN(1, 27), 28, 3 },	/* EX_WAIT0 */
+		{ RCAR_GP_PIN(0,  0), 20, 3 },	/* D0 */
+		{ RCAR_GP_PIN(0,  1), 16, 3 },	/* D1 */
+		{ RCAR_GP_PIN(0,  2), 12, 3 },	/* D2 */
+		{ RCAR_GP_PIN(0,  3),  8, 3 },	/* D3 */
+		{ RCAR_GP_PIN(0,  4),  4, 3 },	/* D4 */
+		{ RCAR_GP_PIN(0,  5),  0, 3 },	/* D5 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL10", 0xe6060328) {
+		{ RCAR_GP_PIN(0,  6), 28, 3 },	/* D6 */
+		{ RCAR_GP_PIN(0,  7), 24, 3 },	/* D7 */
+		{ RCAR_GP_PIN(0,  8), 20, 3 },	/* D8 */
+		{ RCAR_GP_PIN(0,  9), 16, 3 },	/* D9 */
+		{ RCAR_GP_PIN(0, 10), 12, 3 },	/* D10 */
+		{ RCAR_GP_PIN(0, 11),  8, 3 },	/* D11 */
+		{ RCAR_GP_PIN(0, 12),  4, 3 },	/* D12 */
+		{ RCAR_GP_PIN(0, 13),  0, 3 },	/* D13 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL11", 0xe606032c) {
+		{ RCAR_GP_PIN(0, 14), 28, 3 },	/* D14 */
+		{ RCAR_GP_PIN(0, 15), 24, 3 },	/* D15 */
+		{ RCAR_GP_PIN(7,  0), 20, 3 },	/* AVS1 */
+		{ RCAR_GP_PIN(7,  1), 16, 3 },	/* AVS2 */
+		{ RCAR_GP_PIN(7,  2), 12, 3 },	/* HDMI0_CEC */
+		{ RCAR_GP_PIN(7,  3),  8, 3 },	/* HDMI1_CEC */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL13", 0xe6060334) {
+		{ RCAR_GP_PIN(3,  0), 20, 3 },	/* SD0_CLK */
+		{ RCAR_GP_PIN(3,  1), 16, 3 },	/* SD0_CMD */
+		{ RCAR_GP_PIN(3,  2), 12, 3 },	/* SD0_DAT0 */
+		{ RCAR_GP_PIN(3,  3),  8, 3 },	/* SD0_DAT1 */
+		{ RCAR_GP_PIN(3,  4),  4, 3 },	/* SD0_DAT2 */
+		{ RCAR_GP_PIN(3,  5),  0, 3 },	/* SD0_DAT3 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL14", 0xe6060338) {
+		{ RCAR_GP_PIN(3,  6), 28, 3 },	/* SD1_CLK */
+		{ RCAR_GP_PIN(3,  7), 24, 3 },	/* SD1_CMD */
+		{ RCAR_GP_PIN(3,  8), 20, 3 },	/* SD1_DAT0 */
+		{ RCAR_GP_PIN(3,  9), 16, 3 },	/* SD1_DAT1 */
+		{ RCAR_GP_PIN(3, 10), 12, 3 },	/* SD1_DAT2 */
+		{ RCAR_GP_PIN(3, 11),  8, 3 },	/* SD1_DAT3 */
+		{ RCAR_GP_PIN(4,  0),  4, 3 },	/* SD2_CLK */
+		{ RCAR_GP_PIN(4,  1),  0, 3 },	/* SD2_CMD */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL15", 0xe606033c) {
+		{ RCAR_GP_PIN(4,  2), 28, 3 },	/* SD2_DAT0 */
+		{ RCAR_GP_PIN(4,  3), 24, 3 },	/* SD2_DAT1 */
+		{ RCAR_GP_PIN(4,  4), 20, 3 },	/* SD2_DAT2 */
+		{ RCAR_GP_PIN(4,  5), 16, 3 },	/* SD2_DAT3 */
+		{ RCAR_GP_PIN(4,  6), 12, 3 },	/* SD2_DS */
+		{ RCAR_GP_PIN(4,  7),  8, 3 },	/* SD3_CLK */
+		{ RCAR_GP_PIN(4,  8),  4, 3 },	/* SD3_CMD */
+		{ RCAR_GP_PIN(4,  9),  0, 3 },	/* SD3_DAT0 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL16", 0xe6060340) {
+		{ RCAR_GP_PIN(4, 10), 28, 3 },	/* SD3_DAT1 */
+		{ RCAR_GP_PIN(4, 11), 24, 3 },	/* SD3_DAT2 */
+		{ RCAR_GP_PIN(4, 12), 20, 3 },	/* SD3_DAT3 */
+		{ RCAR_GP_PIN(4, 13), 16, 3 },	/* SD3_DAT4 */
+		{ RCAR_GP_PIN(4, 14), 12, 3 },	/* SD3_DAT5 */
+		{ RCAR_GP_PIN(4, 15),  8, 3 },	/* SD3_DAT6 */
+		{ RCAR_GP_PIN(4, 16),  4, 3 },	/* SD3_DAT7 */
+		{ RCAR_GP_PIN(4, 17),  0, 3 },	/* SD3_DS */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL17", 0xe6060344) {
+		{ RCAR_GP_PIN(3, 12), 28, 3 },	/* SD0_CD */
+		{ RCAR_GP_PIN(3, 13), 24, 3 },	/* SD0_WP */
+		{ RCAR_GP_PIN(3, 14), 20, 3 },	/* SD1_CD */
+		{ RCAR_GP_PIN(3, 15), 16, 3 },	/* SD1_WP */
+		{ RCAR_GP_PIN(5,  0), 12, 3 },	/* SCK0 */
+		{ RCAR_GP_PIN(5,  1),  8, 3 },	/* RX0 */
+		{ RCAR_GP_PIN(5,  2),  4, 3 },	/* TX0 */
+		{ RCAR_GP_PIN(5,  3),  0, 3 },	/* CTS0 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL18", 0xe6060348) {
+		{ RCAR_GP_PIN(5,  4), 28, 3 },	/* RTS0_TANS */
+		{ RCAR_GP_PIN(5,  5), 24, 3 },	/* RX1 */
+		{ RCAR_GP_PIN(5,  6), 20, 3 },	/* TX1 */
+		{ RCAR_GP_PIN(5,  7), 16, 3 },	/* CTS1 */
+		{ RCAR_GP_PIN(5,  8), 12, 3 },	/* RTS1_TANS */
+		{ RCAR_GP_PIN(5,  9),  8, 3 },	/* SCK2 */
+		{ RCAR_GP_PIN(5, 10),  4, 3 },	/* TX2 */
+		{ RCAR_GP_PIN(5, 11),  0, 3 },	/* RX2 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL19", 0xe606034c) {
+		{ RCAR_GP_PIN(5, 12), 28, 3 },	/* HSCK0 */
+		{ RCAR_GP_PIN(5, 13), 24, 3 },	/* HRX0 */
+		{ RCAR_GP_PIN(5, 14), 20, 3 },	/* HTX0 */
+		{ RCAR_GP_PIN(5, 15), 16, 3 },	/* HCTS0 */
+		{ RCAR_GP_PIN(5, 16), 12, 3 },	/* HRTS0 */
+		{ RCAR_GP_PIN(5, 17),  8, 3 },	/* MSIOF0_SCK */
+		{ RCAR_GP_PIN(5, 18),  4, 3 },	/* MSIOF0_SYNC */
+		{ RCAR_GP_PIN(5, 19),  0, 3 },	/* MSIOF0_SS1 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL20", 0xe6060350) {
+		{ RCAR_GP_PIN(5, 20), 28, 3 },	/* MSIOF0_TXD */
+		{ RCAR_GP_PIN(5, 21), 24, 3 },	/* MSIOF0_SS2 */
+		{ RCAR_GP_PIN(5, 22), 20, 3 },	/* MSIOF0_RXD */
+		{ RCAR_GP_PIN(5, 23), 16, 3 },	/* MLB_CLK */
+		{ RCAR_GP_PIN(5, 24), 12, 3 },	/* MLB_SIG */
+		{ RCAR_GP_PIN(5, 25),  8, 3 },	/* MLB_DAT */
+		{ RCAR_GP_PIN(6,  0),  0, 3 },	/* SSI_SCK01239 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL21", 0xe6060354) {
+		{ RCAR_GP_PIN(6,  1), 28, 3 },	/* SSI_WS01239 */
+		{ RCAR_GP_PIN(6,  2), 24, 3 },	/* SSI_SDATA0 */
+		{ RCAR_GP_PIN(6,  3), 20, 3 },	/* SSI_SDATA1 */
+		{ RCAR_GP_PIN(6,  4), 16, 3 },	/* SSI_SDATA2 */
+		{ RCAR_GP_PIN(6,  5), 12, 3 },	/* SSI_SCK34 */
+		{ RCAR_GP_PIN(6,  6),  8, 3 },	/* SSI_WS34 */
+		{ RCAR_GP_PIN(6,  7),  4, 3 },	/* SSI_SDATA3 */
+		{ RCAR_GP_PIN(6,  8),  0, 3 },	/* SSI_SCK4 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL22", 0xe6060358) {
+		{ RCAR_GP_PIN(6,  9), 28, 3 },	/* SSI_WS4 */
+		{ RCAR_GP_PIN(6, 10), 24, 3 },	/* SSI_SDATA4 */
+		{ RCAR_GP_PIN(6, 11), 20, 3 },	/* SSI_SCK5 */
+		{ RCAR_GP_PIN(6, 12), 16, 3 },	/* SSI_WS5 */
+		{ RCAR_GP_PIN(6, 13), 12, 3 },	/* SSI_SDATA5 */
+		{ RCAR_GP_PIN(6, 14),  8, 3 },	/* SSI_SCK6 */
+		{ RCAR_GP_PIN(6, 15),  4, 3 },	/* SSI_WS6 */
+		{ RCAR_GP_PIN(6, 16),  0, 3 },	/* SSI_SDATA6 */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL23", 0xe606035c) {
+		{ RCAR_GP_PIN(6, 17), 28, 3 },	/* SSI_SCK78 */
+		{ RCAR_GP_PIN(6, 18), 24, 3 },	/* SSI_WS78 */
+		{ RCAR_GP_PIN(6, 19), 20, 3 },	/* SSI_SDATA7 */
+		{ RCAR_GP_PIN(6, 20), 16, 3 },	/* SSI_SDATA8 */
+		{ RCAR_GP_PIN(6, 21), 12, 3 },	/* SSI_SDATA9 */
+		{ RCAR_GP_PIN(6, 22),  8, 3 },	/* AUDIO_CLKA */
+		{ RCAR_GP_PIN(6, 23),  4, 3 },	/* AUDIO_CLKB */
+		{ RCAR_GP_PIN(6, 24),  0, 3 },	/* USB0_PWEN */
+	} },
+	{ PINMUX_DRIVE_REG("DRVCTRL24", 0xe6060360) {
+		{ RCAR_GP_PIN(6, 25), 28, 3 },	/* USB0_OVC */
+		{ RCAR_GP_PIN(6, 26), 24, 3 },	/* USB1_PWEN */
+		{ RCAR_GP_PIN(6, 27), 20, 3 },	/* USB1_OVC */
+		{ RCAR_GP_PIN(6, 28), 16, 3 },	/* USB30_PWEN */
+		{ RCAR_GP_PIN(6, 29), 12, 3 },	/* USB30_OVC */
+		{ RCAR_GP_PIN(6, 30),  8, 3 },	/* USB31_PWEN */
+		{ RCAR_GP_PIN(6, 31),  4, 3 },	/* USB31_OVC */
+	} },
+	{ },
+};
+
 const struct sh_pfc_soc_info r8a7795_pinmux_info = {
 	.name = "r8a77950_pfc",
 	.unlock_reg = 0xe6060000, /* PMMR */
@@ -4578,6 +4779,7 @@
 	.nr_functions = ARRAY_SIZE(pinmux_functions),
 
 	.cfg_regs = pinmux_config_regs,
+	.drive_regs = pinmux_drive_regs,
 
 	.pinmux_data = pinmux_data,
 	.pinmux_data_size = ARRAY_SIZE(pinmux_data),
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index 87b0a59..fdb445d 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -476,6 +476,91 @@
 	.gpio_set_direction	= sh_pfc_gpio_set_direction,
 };
 
+static u32 sh_pfc_pinconf_find_drive_strength_reg(struct sh_pfc *pfc,
+		unsigned int pin, unsigned int *offset, unsigned int *size)
+{
+	const struct pinmux_drive_reg_field *field;
+	const struct pinmux_drive_reg *reg;
+	unsigned int i;
+
+	for (reg = pfc->info->drive_regs; reg->reg; ++reg) {
+		for (i = 0; i < ARRAY_SIZE(reg->fields); ++i) {
+			field = &reg->fields[i];
+
+			if (field->size && field->pin == pin) {
+				*offset = field->offset;
+				*size = field->size;
+
+				return reg->reg;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int sh_pfc_pinconf_get_drive_strength(struct sh_pfc *pfc,
+					     unsigned int pin)
+{
+	unsigned long flags;
+	unsigned int offset;
+	unsigned int size;
+	u32 reg;
+	u32 val;
+
+	reg = sh_pfc_pinconf_find_drive_strength_reg(pfc, pin, &offset, &size);
+	if (!reg)
+		return -EINVAL;
+
+	spin_lock_irqsave(&pfc->lock, flags);
+	val = sh_pfc_read_reg(pfc, reg, 32);
+	spin_unlock_irqrestore(&pfc->lock, flags);
+
+	val = (val >> offset) & GENMASK(size - 1, 0);
+
+	/* Convert the value to mA based on a full drive strength value of 24mA.
+	 * We can make the full value configurable later if needed.
+	 */
+	return (val + 1) * (size == 2 ? 6 : 3);
+}
+
+static int sh_pfc_pinconf_set_drive_strength(struct sh_pfc *pfc,
+					     unsigned int pin, u16 strength)
+{
+	unsigned long flags;
+	unsigned int offset;
+	unsigned int size;
+	unsigned int step;
+	u32 reg;
+	u32 val;
+
+	reg = sh_pfc_pinconf_find_drive_strength_reg(pfc, pin, &offset, &size);
+	if (!reg)
+		return -EINVAL;
+
+	step = size == 2 ? 6 : 3;
+
+	if (strength < step || strength > 24)
+		return -EINVAL;
+
+	/* Convert the value from mA based on a full drive strength value of
+	 * 24mA. We can make the full value configurable later if needed.
+	 */
+	strength = strength / step - 1;
+
+	spin_lock_irqsave(&pfc->lock, flags);
+
+	val = sh_pfc_read_reg(pfc, reg, 32);
+	val &= ~GENMASK(offset + size - 1, offset);
+	val |= strength << offset;
+
+	sh_pfc_write_reg(pfc, reg, 32, val);
+
+	spin_unlock_irqrestore(&pfc->lock, flags);
+
+	return 0;
+}
+
 /* Check whether the requested parameter is supported for a pin. */
 static bool sh_pfc_pinconf_validate(struct sh_pfc *pfc, unsigned int _pin,
 				    enum pin_config_param param)
@@ -493,6 +578,9 @@
 	case PIN_CONFIG_BIAS_PULL_DOWN:
 		return pin->configs & SH_PFC_PIN_CFG_PULL_DOWN;
 
+	case PIN_CONFIG_DRIVE_STRENGTH:
+		return pin->configs & SH_PFC_PIN_CFG_DRIVE_STRENGTH;
+
 	case PIN_CONFIG_POWER_SOURCE:
 		return pin->configs & SH_PFC_PIN_CFG_IO_VOLTAGE;
 
@@ -532,6 +620,17 @@
 		break;
 	}
 
+	case PIN_CONFIG_DRIVE_STRENGTH: {
+		int ret;
+
+		ret = sh_pfc_pinconf_get_drive_strength(pfc, _pin);
+		if (ret < 0)
+			return ret;
+
+		*config = ret;
+		break;
+	}
+
 	case PIN_CONFIG_POWER_SOURCE: {
 		int ret;
 
@@ -584,6 +683,18 @@
 
 			break;
 
+		case PIN_CONFIG_DRIVE_STRENGTH: {
+			unsigned int arg =
+				pinconf_to_config_argument(configs[i]);
+			int ret;
+
+			ret = sh_pfc_pinconf_set_drive_strength(pfc, _pin, arg);
+			if (ret < 0)
+				return ret;
+
+			break;
+		}
+
 		case PIN_CONFIG_POWER_SOURCE: {
 			unsigned int arg =
 				pinconf_to_config_argument(configs[i]);
@@ -678,7 +789,6 @@
 		return -ENOMEM;
 
 	pmx->pfc = pfc;
-	pfc->pinctrl = pmx;
 
 	ret = sh_pfc_map_pins(pfc, pmx);
 	if (ret < 0)
@@ -692,19 +802,9 @@
 	pmx->pctl_desc.pins = pmx->pins;
 	pmx->pctl_desc.npins = pfc->info->nr_pins;
 
-	pmx->pctl = pinctrl_register(&pmx->pctl_desc, pfc->dev, pmx);
+	pmx->pctl = devm_pinctrl_register(pfc->dev, &pmx->pctl_desc, pmx);
 	if (IS_ERR(pmx->pctl))
 		return PTR_ERR(pmx->pctl);
 
 	return 0;
 }
-
-int sh_pfc_unregister_pinctrl(struct sh_pfc *pfc)
-{
-	struct sh_pfc_pinctrl *pmx = pfc->pinctrl;
-
-	pinctrl_unregister(pmx->pctl);
-
-	pfc->pinctrl = NULL;
-	return 0;
-}
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index a490834..656ea32 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -28,6 +28,7 @@
 #define SH_PFC_PIN_CFG_PULL_UP		(1 << 2)
 #define SH_PFC_PIN_CFG_PULL_DOWN	(1 << 3)
 #define SH_PFC_PIN_CFG_IO_VOLTAGE	(1 << 4)
+#define SH_PFC_PIN_CFG_DRIVE_STRENGTH	(1 << 5)
 #define SH_PFC_PIN_CFG_NO_GPIO		(1 << 31)
 
 struct sh_pfc_pin {
@@ -131,6 +132,21 @@
 		{ var_fw0, var_fwn, 0 }, \
 	.enum_ids = (const u16 [])
 
+struct pinmux_drive_reg_field {
+	u16 pin;
+	u8 offset;
+	u8 size;
+};
+
+struct pinmux_drive_reg {
+	u32 reg;
+	const struct pinmux_drive_reg_field fields[8];
+};
+
+#define PINMUX_DRIVE_REG(name, r) \
+	.reg = r, \
+	.fields =
+
 struct pinmux_data_reg {
 	u32 reg;
 	u8 reg_width;
@@ -199,6 +215,7 @@
 #endif
 
 	const struct pinmux_cfg_reg *cfg_regs;
+	const struct pinmux_drive_reg *drive_regs;
 	const struct pinmux_data_reg *data_regs;
 
 	const u16 *pinmux_data;
@@ -276,7 +293,7 @@
  *   - msel: Module selector
  */
 #define PINMUX_IPSR_MSEL(ipsr, fn, msel)				\
-	PINMUX_DATA(fn##_MARK, FN_##msel, FN_##ipsr, FN_##fn)
+	PINMUX_DATA(fn##_MARK, FN_##msel, FN_##fn, FN_##ipsr)
 
 /*
  * Describe a pinmux configuration for a single-function pin with GPIO
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index 3d233fc..168c0f5 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -5798,7 +5798,7 @@
 
 	status = readl(ATLAS7_GPIO_INT_STATUS(bank));
 	if (!status) {
-		pr_warn("%s: gpio [%s] status %#x no interrupt is flaged\n",
+		pr_warn("%s: gpio [%s] status %#x no interrupt is flagged\n",
 			__func__, gc->label, status);
 		handle_bad_irq(desc);
 		return;
diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c
index 0afaf79..4db52ba 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.c
+++ b/drivers/pinctrl/spear/pinctrl-spear.c
@@ -395,7 +395,7 @@
 	spear_pinctrl_desc.pins = machdata->pins;
 	spear_pinctrl_desc.npins = machdata->npins;
 
-	pmx->pctl = pinctrl_register(&spear_pinctrl_desc, &pdev->dev, pmx);
+	pmx->pctl = devm_pinctrl_register(&pdev->dev, &spear_pinctrl_desc, pmx);
 	if (IS_ERR(pmx->pctl)) {
 		dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
 		return PTR_ERR(pmx->pctl);
@@ -403,12 +403,3 @@
 
 	return 0;
 }
-
-int spear_pinctrl_remove(struct platform_device *pdev)
-{
-	struct spear_pmx *pmx = platform_get_drvdata(pdev);
-
-	pinctrl_unregister(pmx->pctl);
-
-	return 0;
-}
diff --git a/drivers/pinctrl/spear/pinctrl-spear.h b/drivers/pinctrl/spear/pinctrl-spear.h
index 27c2cc8..aa5cf70 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.h
+++ b/drivers/pinctrl/spear/pinctrl-spear.h
@@ -197,7 +197,6 @@
 				 unsigned count, u16 reg);
 int spear_pinctrl_probe(struct platform_device *pdev,
 			struct spear_pinctrl_machdata *machdata);
-int spear_pinctrl_remove(struct platform_device *pdev);
 
 #define SPEAR_PIN_0_TO_101		\
 	PINCTRL_PIN(0, "PLGPIO0"),	\
diff --git a/drivers/pinctrl/spear/pinctrl-spear1310.c b/drivers/pinctrl/spear/pinctrl-spear1310.c
index 92611bb..1821068 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1310.c
@@ -2704,18 +2704,12 @@
 	return spear_pinctrl_probe(pdev, &spear1310_machdata);
 }
 
-static int spear1310_pinctrl_remove(struct platform_device *pdev)
-{
-	return spear_pinctrl_remove(pdev);
-}
-
 static struct platform_driver spear1310_pinctrl_driver = {
 	.driver = {
 		.name = DRIVER_NAME,
 		.of_match_table = spear1310_pinctrl_of_match,
 	},
 	.probe = spear1310_pinctrl_probe,
-	.remove = spear1310_pinctrl_remove,
 };
 
 static int __init spear1310_pinctrl_init(void)
diff --git a/drivers/pinctrl/spear/pinctrl-spear1340.c b/drivers/pinctrl/spear/pinctrl-spear1340.c
index f842e9d..c01fb23 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1340.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1340.c
@@ -2020,18 +2020,12 @@
 	return spear_pinctrl_probe(pdev, &spear1340_machdata);
 }
 
-static int spear1340_pinctrl_remove(struct platform_device *pdev)
-{
-	return spear_pinctrl_remove(pdev);
-}
-
 static struct platform_driver spear1340_pinctrl_driver = {
 	.driver = {
 		.name = DRIVER_NAME,
 		.of_match_table = spear1340_pinctrl_of_match,
 	},
 	.probe = spear1340_pinctrl_probe,
-	.remove = spear1340_pinctrl_remove,
 };
 
 static int __init spear1340_pinctrl_init(void)
diff --git a/drivers/pinctrl/spear/pinctrl-spear300.c b/drivers/pinctrl/spear/pinctrl-spear300.c
index d998a2c..111148d 100644
--- a/drivers/pinctrl/spear/pinctrl-spear300.c
+++ b/drivers/pinctrl/spear/pinctrl-spear300.c
@@ -677,18 +677,12 @@
 	return 0;
 }
 
-static int spear300_pinctrl_remove(struct platform_device *pdev)
-{
-	return spear_pinctrl_remove(pdev);
-}
-
 static struct platform_driver spear300_pinctrl_driver = {
 	.driver = {
 		.name = DRIVER_NAME,
 		.of_match_table = spear300_pinctrl_of_match,
 	},
 	.probe = spear300_pinctrl_probe,
-	.remove = spear300_pinctrl_remove,
 };
 
 static int __init spear300_pinctrl_init(void)
diff --git a/drivers/pinctrl/spear/pinctrl-spear310.c b/drivers/pinctrl/spear/pinctrl-spear310.c
index 609b18a..a7b0000 100644
--- a/drivers/pinctrl/spear/pinctrl-spear310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear310.c
@@ -400,18 +400,12 @@
 	return 0;
 }
 
-static int spear310_pinctrl_remove(struct platform_device *pdev)
-{
-	return spear_pinctrl_remove(pdev);
-}
-
 static struct platform_driver spear310_pinctrl_driver = {
 	.driver = {
 		.name = DRIVER_NAME,
 		.of_match_table = spear310_pinctrl_of_match,
 	},
 	.probe = spear310_pinctrl_probe,
-	.remove = spear310_pinctrl_remove,
 };
 
 static int __init spear310_pinctrl_init(void)
diff --git a/drivers/pinctrl/spear/pinctrl-spear320.c b/drivers/pinctrl/spear/pinctrl-spear320.c
index c071144..e2b3817 100644
--- a/drivers/pinctrl/spear/pinctrl-spear320.c
+++ b/drivers/pinctrl/spear/pinctrl-spear320.c
@@ -3441,18 +3441,12 @@
 	return 0;
 }
 
-static int spear320_pinctrl_remove(struct platform_device *pdev)
-{
-	return spear_pinctrl_remove(pdev);
-}
-
 static struct platform_driver spear320_pinctrl_driver = {
 	.driver = {
 		.name = DRIVER_NAME,
 		.of_match_table = spear320_pinctrl_of_match,
 	},
 	.probe = spear320_pinctrl_probe,
-	.remove = spear320_pinctrl_remove,
 };
 
 static int __init spear320_pinctrl_init(void)
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 8deb566..ae9fab8 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -358,7 +358,7 @@
 		ret = stm32_pctrl_dt_subnode_to_map(pctldev, np, map,
 				&reserved_maps, num_maps);
 		if (ret < 0) {
-			pinctrl_utils_dt_free_map(pctldev, *map, *num_maps);
+			pinctrl_utils_free_map(pctldev, *map, *num_maps);
 			return ret;
 		}
 	}
@@ -396,7 +396,7 @@
 
 static const struct pinctrl_ops stm32_pctrl_ops = {
 	.dt_node_to_map		= stm32_pctrl_dt_node_to_map,
-	.dt_free_map		= pinctrl_utils_dt_free_map,
+	.dt_free_map		= pinctrl_utils_free_map,
 	.get_groups_count	= stm32_pctrl_get_groups_count,
 	.get_group_name		= stm32_pctrl_get_group_name,
 	.get_group_pins		= stm32_pctrl_get_group_pins,
@@ -454,6 +454,29 @@
 	clk_disable(bank->clk);
 }
 
+static void stm32_pmx_get_mode(struct stm32_gpio_bank *bank,
+		int pin, u32 *mode, u32 *alt)
+{
+	u32 val;
+	int alt_shift = (pin % 8) * 4;
+	int alt_offset = STM32_GPIO_AFRL + (pin / 8) * 4;
+	unsigned long flags;
+
+	clk_enable(bank->clk);
+	spin_lock_irqsave(&bank->lock, flags);
+
+	val = readl_relaxed(bank->base + alt_offset);
+	val &= GENMASK(alt_shift + 3, alt_shift);
+	*alt = val >> alt_shift;
+
+	val = readl_relaxed(bank->base + STM32_GPIO_MODER);
+	val &= GENMASK(pin * 2 + 1, pin * 2);
+	*mode = val >> (pin * 2);
+
+	spin_unlock_irqrestore(&bank->lock, flags);
+	clk_disable(bank->clk);
+}
+
 static int stm32_pmx_set_mux(struct pinctrl_dev *pctldev,
 			    unsigned function,
 			    unsigned group)
@@ -525,6 +548,24 @@
 	clk_disable(bank->clk);
 }
 
+static u32 stm32_pconf_get_driving(struct stm32_gpio_bank *bank,
+	unsigned int offset)
+{
+	unsigned long flags;
+	u32 val;
+
+	clk_enable(bank->clk);
+	spin_lock_irqsave(&bank->lock, flags);
+
+	val = readl_relaxed(bank->base + STM32_GPIO_TYPER);
+	val &= BIT(offset);
+
+	spin_unlock_irqrestore(&bank->lock, flags);
+	clk_disable(bank->clk);
+
+	return (val >> offset);
+}
+
 static void stm32_pconf_set_speed(struct stm32_gpio_bank *bank,
 	unsigned offset, u32 speed)
 {
@@ -543,6 +584,24 @@
 	clk_disable(bank->clk);
 }
 
+static u32 stm32_pconf_get_speed(struct stm32_gpio_bank *bank,
+	unsigned int offset)
+{
+	unsigned long flags;
+	u32 val;
+
+	clk_enable(bank->clk);
+	spin_lock_irqsave(&bank->lock, flags);
+
+	val = readl_relaxed(bank->base + STM32_GPIO_SPEEDR);
+	val &= GENMASK(offset * 2 + 1, offset * 2);
+
+	spin_unlock_irqrestore(&bank->lock, flags);
+	clk_disable(bank->clk);
+
+	return (val >> (offset * 2));
+}
+
 static void stm32_pconf_set_bias(struct stm32_gpio_bank *bank,
 	unsigned offset, u32 bias)
 {
@@ -561,6 +620,57 @@
 	clk_disable(bank->clk);
 }
 
+static u32 stm32_pconf_get_bias(struct stm32_gpio_bank *bank,
+	unsigned int offset)
+{
+	unsigned long flags;
+	u32 val;
+
+	clk_enable(bank->clk);
+	spin_lock_irqsave(&bank->lock, flags);
+
+	val = readl_relaxed(bank->base + STM32_GPIO_PUPDR);
+	val &= GENMASK(offset * 2 + 1, offset * 2);
+
+	spin_unlock_irqrestore(&bank->lock, flags);
+	clk_disable(bank->clk);
+
+	return (val >> (offset * 2));
+}
+
+static bool stm32_pconf_input_get(struct stm32_gpio_bank *bank,
+	unsigned int offset)
+{
+	unsigned long flags;
+	u32 val;
+
+	clk_enable(bank->clk);
+	spin_lock_irqsave(&bank->lock, flags);
+
+	val = !!(readl_relaxed(bank->base + STM32_GPIO_IDR) & BIT(offset));
+
+	spin_unlock_irqrestore(&bank->lock, flags);
+	clk_disable(bank->clk);
+
+	return val;
+}
+
+static bool stm32_pconf_output_get(struct stm32_gpio_bank *bank,
+	unsigned int offset)
+{
+	unsigned long flags;
+	u32 val;
+
+	clk_enable(bank->clk);
+	spin_lock_irqsave(&bank->lock, flags);
+	val = !!(readl_relaxed(bank->base + STM32_GPIO_ODR) & BIT(offset));
+
+	spin_unlock_irqrestore(&bank->lock, flags);
+	clk_disable(bank->clk);
+
+	return val;
+}
+
 static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev,
 		unsigned int pin, enum pin_config_param param,
 		enum pin_config_param arg)
@@ -634,9 +744,73 @@
 	return 0;
 }
 
+static void stm32_pconf_dbg_show(struct pinctrl_dev *pctldev,
+				 struct seq_file *s,
+				 unsigned int pin)
+{
+	struct pinctrl_gpio_range *range;
+	struct stm32_gpio_bank *bank;
+	int offset;
+	u32 mode, alt, drive, speed, bias;
+	static const char * const modes[] = {
+			"input", "output", "alternate", "analog" };
+	static const char * const speeds[] = {
+			"low", "medium", "high", "very high" };
+	static const char * const biasing[] = {
+			"floating", "pull up", "pull down", "" };
+	bool val;
+
+	range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, pin);
+	bank = gpio_range_to_bank(range);
+	offset = stm32_gpio_pin(pin);
+
+	stm32_pmx_get_mode(bank, offset, &mode, &alt);
+	bias = stm32_pconf_get_bias(bank, offset);
+
+	seq_printf(s, "%s ", modes[mode]);
+
+	switch (mode) {
+	/* input */
+	case 0:
+		val = stm32_pconf_input_get(bank, offset);
+		seq_printf(s, "- %s - %s",
+			   val ? "high" : "low",
+			   biasing[bias]);
+		break;
+
+	/* output */
+	case 1:
+		drive = stm32_pconf_get_driving(bank, offset);
+		speed = stm32_pconf_get_speed(bank, offset);
+		val = stm32_pconf_output_get(bank, offset);
+		seq_printf(s, "- %s - %s - %s - %s %s",
+			   val ? "high" : "low",
+			   drive ? "open drain" : "push pull",
+			   biasing[bias],
+			   speeds[speed], "speed");
+		break;
+
+	/* alternate */
+	case 2:
+		drive = stm32_pconf_get_driving(bank, offset);
+		speed = stm32_pconf_get_speed(bank, offset);
+		seq_printf(s, "%d - %s - %s - %s %s", alt,
+			   drive ? "open drain" : "push pull",
+			   biasing[bias],
+			   speeds[speed], "speed");
+		break;
+
+	/* analog */
+	case 3:
+		break;
+	}
+}
+
+
 static const struct pinconf_ops stm32_pconf_ops = {
 	.pin_config_group_get	= stm32_pconf_group_get,
 	.pin_config_group_set	= stm32_pconf_group_set,
+	.pin_config_dbg_show	= stm32_pconf_dbg_show,
 };
 
 static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
@@ -813,10 +987,11 @@
 	pctl->pctl_desc.pmxops = &stm32_pmx_ops;
 	pctl->dev = &pdev->dev;
 
-	pctl->pctl_dev = pinctrl_register(&pctl->pctl_desc, &pdev->dev, pctl);
-	if (!pctl->pctl_dev) {
+	pctl->pctl_dev = devm_pinctrl_register(&pdev->dev, &pctl->pctl_desc,
+					       pctl);
+	if (IS_ERR(pctl->pctl_dev)) {
 		dev_err(&pdev->dev, "Failed pinctrl registration\n");
-		return -EINVAL;
+		return PTR_ERR(pctl->pctl_dev);
 	}
 
 	for (i = 0; i < pctl->nbanks; i++)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
index 00265f0..8b381d6 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
@@ -485,6 +485,7 @@
 	.pins = sun8i_a33_pins,
 	.npins = ARRAY_SIZE(sun8i_a33_pins),
 	.irq_banks = 2,
+	.irq_bank_base = 1,
 };
 
 static int sun8i_a33_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 12a1dfa..54455af 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -579,7 +579,7 @@
 static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
 {
 	struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
-	u32 reg = sunxi_irq_cfg_reg(d->hwirq);
+	u32 reg = sunxi_irq_cfg_reg(d->hwirq, pctl->desc->irq_bank_base);
 	u8 index = sunxi_irq_cfg_offset(d->hwirq);
 	unsigned long flags;
 	u32 regval;
@@ -626,7 +626,8 @@
 static void sunxi_pinctrl_irq_ack(struct irq_data *d)
 {
 	struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
-	u32 status_reg = sunxi_irq_status_reg(d->hwirq);
+	u32 status_reg = sunxi_irq_status_reg(d->hwirq,
+					      pctl->desc->irq_bank_base);
 	u8 status_idx = sunxi_irq_status_offset(d->hwirq);
 
 	/* Clear the IRQ */
@@ -636,7 +637,7 @@
 static void sunxi_pinctrl_irq_mask(struct irq_data *d)
 {
 	struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
-	u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
+	u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
 	u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
 	unsigned long flags;
 	u32 val;
@@ -653,7 +654,7 @@
 static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
 {
 	struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
-	u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
+	u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
 	u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
 	unsigned long flags;
 	u32 val;
@@ -745,7 +746,7 @@
 	if (bank == pctl->desc->irq_banks)
 		return;
 
-	reg = sunxi_irq_status_reg_from_bank(bank);
+	reg = sunxi_irq_status_reg_from_bank(bank, pctl->desc->irq_bank_base);
 	val = readl(pctl->membase + reg);
 
 	if (val) {
@@ -932,18 +933,15 @@
 	pctrl_desc->pctlops = &sunxi_pctrl_ops;
 	pctrl_desc->pmxops =  &sunxi_pmx_ops;
 
-	pctl->pctl_dev = pinctrl_register(pctrl_desc,
-					  &pdev->dev, pctl);
+	pctl->pctl_dev = devm_pinctrl_register(&pdev->dev, pctrl_desc, pctl);
 	if (IS_ERR(pctl->pctl_dev)) {
 		dev_err(&pdev->dev, "couldn't register pinctrl driver\n");
 		return PTR_ERR(pctl->pctl_dev);
 	}
 
 	pctl->chip = devm_kzalloc(&pdev->dev, sizeof(*pctl->chip), GFP_KERNEL);
-	if (!pctl->chip) {
-		ret = -ENOMEM;
-		goto pinctrl_error;
-	}
+	if (!pctl->chip)
+		return -ENOMEM;
 
 	last_pin = pctl->desc->pins[pctl->desc->npins - 1].pin.number;
 	pctl->chip->owner = THIS_MODULE;
@@ -965,7 +963,7 @@
 
 	ret = gpiochip_add_data(pctl->chip, pctl);
 	if (ret)
-		goto pinctrl_error;
+		return ret;
 
 	for (i = 0; i < pctl->desc->npins; i++) {
 		const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
@@ -1024,9 +1022,11 @@
 
 	for (i = 0; i < pctl->desc->irq_banks; i++) {
 		/* Mask and clear all IRQs before registering a handler */
-		writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i));
+		writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i,
+						pctl->desc->irq_bank_base));
 		writel(0xffffffff,
-			pctl->membase + sunxi_irq_status_reg_from_bank(i));
+		       pctl->membase + sunxi_irq_status_reg_from_bank(i,
+						pctl->desc->irq_bank_base));
 
 		irq_set_chained_handler_and_data(pctl->irq[i],
 						 sunxi_pinctrl_irq_handler,
@@ -1041,7 +1041,5 @@
 	clk_disable_unprepare(clk);
 gpiochip_error:
 	gpiochip_remove(pctl->chip);
-pinctrl_error:
-	pinctrl_unregister(pctl->pctl_dev);
 	return ret;
 }
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index e248e81..0afce1a 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -97,6 +97,7 @@
 	int				npins;
 	unsigned			pin_base;
 	unsigned			irq_banks;
+	unsigned			irq_bank_base;
 	bool				irq_read_needs_mux;
 };
 
@@ -233,12 +234,12 @@
 	return pin_num * PULL_PINS_BITS;
 }
 
-static inline u32 sunxi_irq_cfg_reg(u16 irq)
+static inline u32 sunxi_irq_cfg_reg(u16 irq, unsigned bank_base)
 {
 	u8 bank = irq / IRQ_PER_BANK;
 	u8 reg = (irq % IRQ_PER_BANK) / IRQ_CFG_IRQ_PER_REG * 0x04;
 
-	return IRQ_CFG_REG + bank * IRQ_MEM_SIZE + reg;
+	return IRQ_CFG_REG + (bank_base + bank) * IRQ_MEM_SIZE + reg;
 }
 
 static inline u32 sunxi_irq_cfg_offset(u16 irq)
@@ -247,16 +248,16 @@
 	return irq_num * IRQ_CFG_IRQ_BITS;
 }
 
-static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank)
+static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank, unsigned bank_base)
 {
-	return IRQ_CTRL_REG + bank * IRQ_MEM_SIZE;
+	return IRQ_CTRL_REG + (bank_base + bank) * IRQ_MEM_SIZE;
 }
 
-static inline u32 sunxi_irq_ctrl_reg(u16 irq)
+static inline u32 sunxi_irq_ctrl_reg(u16 irq, unsigned bank_base)
 {
 	u8 bank = irq / IRQ_PER_BANK;
 
-	return sunxi_irq_ctrl_reg_from_bank(bank);
+	return sunxi_irq_ctrl_reg_from_bank(bank, bank_base);
 }
 
 static inline u32 sunxi_irq_ctrl_offset(u16 irq)
@@ -265,16 +266,16 @@
 	return irq_num * IRQ_CTRL_IRQ_BITS;
 }
 
-static inline u32 sunxi_irq_status_reg_from_bank(u8 bank)
+static inline u32 sunxi_irq_status_reg_from_bank(u8 bank, unsigned bank_base)
 {
-	return IRQ_STATUS_REG + bank * IRQ_MEM_SIZE;
+	return IRQ_STATUS_REG + (bank_base + bank) * IRQ_MEM_SIZE;
 }
 
-static inline u32 sunxi_irq_status_reg(u16 irq)
+static inline u32 sunxi_irq_status_reg(u16 irq, unsigned bank_base)
 {
 	u8 bank = irq / IRQ_PER_BANK;
 
-	return sunxi_irq_status_reg_from_bank(bank);
+	return sunxi_irq_status_reg_from_bank(bank, bank_base);
 }
 
 static inline u32 sunxi_irq_status_offset(u16 irq)
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c b/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
index 2f06029..6f68a9e 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
@@ -267,7 +267,7 @@
 	.get_group_name = tegra_xusb_padctl_get_group_name,
 	.get_group_pins = tegra_xusb_padctl_get_group_pins,
 	.dt_node_to_map = tegra_xusb_padctl_dt_node_to_map,
-	.dt_free_map = pinctrl_utils_dt_free_map,
+	.dt_free_map = pinctrl_utils_free_map,
 };
 
 static int tegra_xusb_padctl_get_functions_count(struct pinctrl_dev *pinctrl)
@@ -873,7 +873,7 @@
 };
 MODULE_DEVICE_TABLE(of, tegra_xusb_padctl_of_match);
 
-static int tegra_xusb_padctl_probe(struct platform_device *pdev)
+int tegra_xusb_padctl_legacy_probe(struct platform_device *pdev)
 {
 	struct tegra_xusb_padctl *padctl;
 	const struct of_device_id *match;
@@ -914,7 +914,8 @@
 	padctl->desc.confops = &tegra_xusb_padctl_pinconf_ops;
 	padctl->desc.owner = THIS_MODULE;
 
-	padctl->pinctrl = pinctrl_register(&padctl->desc, &pdev->dev, padctl);
+	padctl->pinctrl = devm_pinctrl_register(&pdev->dev, &padctl->desc,
+						padctl);
 	if (IS_ERR(padctl->pinctrl)) {
 		dev_err(&pdev->dev, "failed to register pincontrol\n");
 		err = PTR_ERR(padctl->pinctrl);
@@ -924,7 +925,7 @@
 	phy = devm_phy_create(&pdev->dev, NULL, &pcie_phy_ops);
 	if (IS_ERR(phy)) {
 		err = PTR_ERR(phy);
-		goto unregister;
+		goto reset;
 	}
 
 	padctl->phys[TEGRA_XUSB_PADCTL_PCIE] = phy;
@@ -933,7 +934,7 @@
 	phy = devm_phy_create(&pdev->dev, NULL, &sata_phy_ops);
 	if (IS_ERR(phy)) {
 		err = PTR_ERR(phy);
-		goto unregister;
+		goto reset;
 	}
 
 	padctl->phys[TEGRA_XUSB_PADCTL_SATA] = phy;
@@ -944,42 +945,26 @@
 	if (IS_ERR(padctl->provider)) {
 		err = PTR_ERR(padctl->provider);
 		dev_err(&pdev->dev, "failed to register PHYs: %d\n", err);
-		goto unregister;
+		goto reset;
 	}
 
 	return 0;
 
-unregister:
-	pinctrl_unregister(padctl->pinctrl);
 reset:
 	reset_control_assert(padctl->rst);
 	return err;
 }
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_legacy_probe);
 
-static int tegra_xusb_padctl_remove(struct platform_device *pdev)
+int tegra_xusb_padctl_legacy_remove(struct platform_device *pdev)
 {
 	struct tegra_xusb_padctl *padctl = platform_get_drvdata(pdev);
 	int err;
 
-	pinctrl_unregister(padctl->pinctrl);
-
 	err = reset_control_assert(padctl->rst);
 	if (err < 0)
 		dev_err(&pdev->dev, "failed to assert reset: %d\n", err);
 
 	return err;
 }
-
-static struct platform_driver tegra_xusb_padctl_driver = {
-	.driver = {
-		.name = "tegra-xusb-padctl",
-		.of_match_table = tegra_xusb_padctl_of_match,
-	},
-	.probe = tegra_xusb_padctl_probe,
-	.remove = tegra_xusb_padctl_remove,
-};
-module_platform_driver(tegra_xusb_padctl_driver);
-
-MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
-MODULE_DESCRIPTION("Tegra 124 XUSB Pad Control driver");
-MODULE_LICENSE("GPL v2");
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_legacy_remove);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index 4938882..6e82b29 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -215,7 +215,7 @@
 		ret = tegra_pinctrl_dt_subnode_to_map(pctldev, np, map,
 						      &reserved_maps, num_maps);
 		if (ret < 0) {
-			pinctrl_utils_dt_free_map(pctldev, *map,
+			pinctrl_utils_free_map(pctldev, *map,
 				*num_maps);
 			of_node_put(np);
 			return ret;
@@ -233,7 +233,7 @@
 	.pin_dbg_show = tegra_pinctrl_pin_dbg_show,
 #endif
 	.dt_node_to_map = tegra_pinctrl_dt_node_to_map,
-	.dt_free_map = pinctrl_utils_dt_free_map,
+	.dt_free_map = pinctrl_utils_free_map,
 };
 
 static int tegra_pinctrl_get_funcs_count(struct pinctrl_dev *pctldev)
@@ -417,7 +417,7 @@
 		return -ENOTSUPP;
 	}
 
-	if (*reg < 0 || *bit > 31) {
+	if (*reg < 0 || *bit < 0)  {
 		if (report_err) {
 			const char *prop = "unknown";
 			int i;
@@ -625,6 +625,22 @@
 	.owner = THIS_MODULE,
 };
 
+static void tegra_pinctrl_clear_parked_bits(struct tegra_pmx *pmx)
+{
+	int i = 0;
+	const struct tegra_pingroup *g;
+	u32 val;
+
+	for (i = 0; i < pmx->soc->ngroups; ++i) {
+		if (pmx->soc->groups[i].parked_reg >= 0) {
+			g = &pmx->soc->groups[i];
+			val = pmx_readl(pmx, g->parked_bank, g->parked_reg);
+			val &= ~(1 << g->parked_bit);
+			pmx_writel(pmx, val, g->parked_bank, g->parked_reg);
+		}
+	}
+}
+
 static bool gpio_node_has_range(void)
 {
 	struct device_node *np;
@@ -719,12 +735,14 @@
 			return PTR_ERR(pmx->regs[i]);
 	}
 
-	pmx->pctl = pinctrl_register(&tegra_pinctrl_desc, &pdev->dev, pmx);
+	pmx->pctl = devm_pinctrl_register(&pdev->dev, &tegra_pinctrl_desc, pmx);
 	if (IS_ERR(pmx->pctl)) {
 		dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
 		return PTR_ERR(pmx->pctl);
 	}
 
+	tegra_pinctrl_clear_parked_bits(pmx);
+
 	if (!gpio_node_has_range())
 		pinctrl_add_gpio_range(pmx->pctl, &tegra_pinctrl_gpio_range);
 
@@ -735,13 +753,3 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(tegra_pinctrl_probe);
-
-int tegra_pinctrl_remove(struct platform_device *pdev)
-{
-	struct tegra_pmx *pmx = platform_get_drvdata(pdev);
-
-	pinctrl_unregister(pmx->pctl);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(tegra_pinctrl_remove);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.h b/drivers/pinctrl/tegra/pinctrl-tegra.h
index 1615db7..d2ced17 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.h
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.h
@@ -93,6 +93,9 @@
  * @tri_reg:		Tri-state register offset.
  * @tri_bank:		Tri-state register bank.
  * @tri_bit:		Tri-state register bit.
+ * @parked_reg:		Parked register offset. -1 if unsupported.
+ * @parked_bank:	Parked register bank. 0 if unsupported.
+ * @parked_bit:		Parked register bit. 0 if unsupported.
  * @einput_bit:		Enable-input register bit.
  * @odrain_bit:		Open-drain register bit.
  * @lock_bit:		Lock register bit.
@@ -135,13 +138,16 @@
 	s16 pupd_reg;
 	s16 tri_reg;
 	s16 drv_reg;
+	s16 parked_reg;
 	u32 mux_bank:2;
 	u32 pupd_bank:2;
 	u32 tri_bank:2;
 	u32 drv_bank:2;
+	u32 parked_bank:2;
 	s32 mux_bit:6;
 	s32 pupd_bit:6;
 	s32 tri_bit:6;
+	s32 parked_bit:6;
 	s32 einput_bit:6;
 	s32 odrain_bit:6;
 	s32 lock_bit:6;
@@ -189,6 +195,4 @@
 
 int tegra_pinctrl_probe(struct platform_device *pdev,
 			const struct tegra_pinctrl_soc_data *soc_data);
-int tegra_pinctrl_remove(struct platform_device *pdev);
-
 #endif
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra114.c b/drivers/pinctrl/tegra/pinctrl-tegra114.c
index 05e49d5..4851d16 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra114.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra114.c
@@ -1578,6 +1578,7 @@
 		.lock_bit = 7,						\
 		.ioreset_bit = PINGROUP_BIT_##ior(8),			\
 		.rcv_sel_bit = PINGROUP_BIT_##rcv_sel(9),		\
+		.parked_reg = -1,					\
 		.drv_reg = -1,						\
 	}
 
@@ -1598,6 +1599,7 @@
 		.rcv_sel_bit = -1,					\
 		.drv_reg = DRV_PINGROUP_REG(r),				\
 		.drv_bank = 0,						\
+		.parked_reg = -1,					\
 		.hsm_bit = hsm_b,					\
 		.schmitt_bit = schmitt_b,				\
 		.lpmd_bit = lpmd_b,					\
@@ -1863,7 +1865,6 @@
 		.of_match_table = tegra114_pinctrl_of_match,
 	},
 	.probe = tegra114_pinctrl_probe,
-	.remove = tegra_pinctrl_remove,
 };
 module_platform_driver(tegra114_pinctrl_driver);
 
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra124.c b/drivers/pinctrl/tegra/pinctrl-tegra124.c
index 7cd44c7..a0ce723 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra124.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra124.c
@@ -1747,6 +1747,7 @@
 		.lock_bit = 7,						\
 		.ioreset_bit = PINGROUP_BIT_##ior(8),			\
 		.rcv_sel_bit = PINGROUP_BIT_##rcv_sel(9),		\
+		.parked_reg = -1,					\
 		.drv_reg = -1,						\
 	}
 
@@ -1767,6 +1768,7 @@
 		.rcv_sel_bit = -1,					\
 		.drv_reg = DRV_PINGROUP_REG(r),				\
 		.drv_bank = 0,						\
+		.parked_reg = -1,					\
 		.hsm_bit = hsm_b,					\
 		.schmitt_bit = schmitt_b,				\
 		.lpmd_bit = lpmd_b,					\
@@ -2075,7 +2077,6 @@
 		.of_match_table = tegra124_pinctrl_of_match,
 	},
 	.probe = tegra124_pinctrl_probe,
-	.remove = tegra_pinctrl_remove,
 };
 module_platform_driver(tegra124_pinctrl_driver);
 
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra20.c b/drivers/pinctrl/tegra/pinctrl-tegra20.c
index 4833db4..09bad69 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra20.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra20.c
@@ -1994,6 +1994,7 @@
 		.tri_reg = ((tri_r) - TRISTATE_REG_A),		\
 		.tri_bank = 0,					\
 		.tri_bit = tri_b,				\
+		.parked_reg = -1,				\
 		.einput_bit = -1,				\
 		.odrain_bit = -1,				\
 		.lock_bit = -1,					\
@@ -2013,6 +2014,7 @@
 		.pupd_bank = 2,					\
 		.pupd_bit = pupd_b,				\
 		.drv_reg = -1,					\
+		.parked_reg = -1,				\
 	}
 
 /* Pin groups for drive strength registers (configurable version) */
@@ -2028,6 +2030,7 @@
 		.tri_reg = -1,					\
 		.drv_reg = ((r) - PINGROUP_REG_A),		\
 		.drv_bank = 3,					\
+		.parked_reg = -1,				\
 		.hsm_bit = hsm_b,				\
 		.schmitt_bit = schmitt_b,			\
 		.lpmd_bit = lpmd_b,				\
@@ -2242,7 +2245,6 @@
 		.of_match_table = tegra20_pinctrl_of_match,
 	},
 	.probe = tegra20_pinctrl_probe,
-	.remove = tegra_pinctrl_remove,
 };
 module_platform_driver(tegra20_pinctrl_driver);
 
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra210.c b/drivers/pinctrl/tegra/pinctrl-tegra210.c
index 252b464..2d856af 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra210.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra210.c
@@ -1310,6 +1310,9 @@
 		.lock_bit = 7,						\
 		.ioreset_bit = -1,					\
 		.rcv_sel_bit = PINGROUP_BIT_##e_io_hv(10),		\
+		.parked_reg = PINGROUP_REG(r),				\
+		.parked_bank = 1,					\
+		.parked_bit = 5,					\
 		.hsm_bit = PINGROUP_BIT_##hsm(9),			\
 		.schmitt_bit = 12,					\
 		.drvtype_bit = PINGROUP_BIT_##drvtype(13),		\
@@ -1342,6 +1345,7 @@
 		.rcv_sel_bit = -1,					\
 		.drv_reg = DRV_PINGROUP_REG(r),				\
 		.drv_bank = 0,						\
+		.parked_reg = -1,					\
 		.hsm_bit = -1,						\
 		.schmitt_bit = -1,					\
 		.lpmd_bit = -1,						\
@@ -1579,7 +1583,6 @@
 		.of_match_table = tegra210_pinctrl_of_match,
 	},
 	.probe = tegra210_pinctrl_probe,
-	.remove = tegra_pinctrl_remove,
 };
 module_platform_driver(tegra210_pinctrl_driver);
 
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra30.c b/drivers/pinctrl/tegra/pinctrl-tegra30.c
index 47b2fd8..fb7817f 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra30.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra30.c
@@ -2139,6 +2139,7 @@
 		.lock_bit = 7,						\
 		.ioreset_bit = PINGROUP_BIT_##ior(8),			\
 		.rcv_sel_bit = -1,					\
+		.parked_reg = -1,					\
 		.drv_reg = -1,						\
 	}
 
@@ -2159,6 +2160,7 @@
 		.rcv_sel_bit = -1,					\
 		.drv_reg = DRV_PINGROUP_REG(r),				\
 		.drv_bank = 0,						\
+		.parked_reg = -1,					\
 		.hsm_bit = hsm_b,					\
 		.schmitt_bit = schmitt_b,				\
 		.lpmd_bit = lpmd_b,					\
@@ -2498,7 +2500,6 @@
 		.of_match_table = tegra30_pinctrl_of_match,
 	},
 	.probe = tegra30_pinctrl_probe,
-	.remove = tegra_pinctrl_remove,
 };
 module_platform_driver(tegra30_pinctrl_driver);
 
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
index 589872c..9674009 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
@@ -115,7 +115,7 @@
 	.pin_dbg_show = uniphier_pctl_pin_dbg_show,
 #endif
 	.dt_node_to_map = pinconf_generic_dt_node_to_map_all,
-	.dt_free_map = pinctrl_utils_dt_free_map,
+	.dt_free_map = pinctrl_utils_free_map,
 };
 
 static int uniphier_conf_pin_bias_get(struct pinctrl_dev *pctldev,
@@ -665,7 +665,7 @@
 	desc->pmxops = &uniphier_pmxops;
 	desc->confops = &uniphier_confops;
 
-	priv->pctldev = pinctrl_register(desc, dev, priv);
+	priv->pctldev = devm_pinctrl_register(dev, desc, priv);
 	if (IS_ERR(priv->pctldev)) {
 		dev_err(dev, "failed to register UniPhier pinctrl driver\n");
 		return PTR_ERR(priv->pctldev);
@@ -676,13 +676,3 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(uniphier_pinctrl_probe);
-
-int uniphier_pinctrl_remove(struct platform_device *pdev)
-{
-	struct uniphier_pinctrl_priv *priv = platform_get_drvdata(pdev);
-
-	pinctrl_unregister(priv->pctldev);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(uniphier_pinctrl_remove);
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c
index a7056dc..4a0439c 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c
@@ -878,7 +878,6 @@
 
 static struct platform_driver ph1_ld4_pinctrl_driver = {
 	.probe = ph1_ld4_pinctrl_probe,
-	.remove = uniphier_pinctrl_remove,
 	.driver = {
 		.name = DRIVER_NAME,
 		.of_match_table = ph1_ld4_pinctrl_match,
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c
index 1824831..150d339 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c
@@ -1266,7 +1266,6 @@
 
 static struct platform_driver ph1_ld6b_pinctrl_driver = {
 	.probe = ph1_ld6b_pinctrl_probe,
-	.remove = uniphier_pinctrl_remove,
 	.driver = {
 		.name = DRIVER_NAME,
 		.of_match_table = ph1_ld6b_pinctrl_match,
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
index ec8e92d..b1f09e6 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
@@ -1552,7 +1552,6 @@
 
 static struct platform_driver ph1_pro4_pinctrl_driver = {
 	.probe = ph1_pro4_pinctrl_probe,
-	.remove = uniphier_pinctrl_remove,
 	.driver = {
 		.name = DRIVER_NAME,
 		.of_match_table = ph1_pro4_pinctrl_match,
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c
index e3d648e..3087f76 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c
@@ -1343,7 +1343,6 @@
 
 static struct platform_driver ph1_pro5_pinctrl_driver = {
 	.probe = ph1_pro5_pinctrl_probe,
-	.remove = uniphier_pinctrl_remove,
 	.driver = {
 		.name = DRIVER_NAME,
 		.of_match_table = ph1_pro5_pinctrl_match,
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c
index bc00d75..e868030 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c
@@ -1261,7 +1261,6 @@
 
 static struct platform_driver proxstream2_pinctrl_driver = {
 	.probe = proxstream2_pinctrl_probe,
-	.remove = uniphier_pinctrl_remove,
 	.driver = {
 		.name = DRIVER_NAME,
 		.of_match_table = proxstream2_pinctrl_match,
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c
index c3700a3..ceb7a98 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c
@@ -786,7 +786,6 @@
 
 static struct platform_driver ph1_sld8_pinctrl_driver = {
 	.probe = ph1_sld8_pinctrl_probe,
-	.remove = uniphier_pinctrl_remove,
 	.driver = {
 		.name = DRIVER_NAME,
 		.of_match_table = ph1_sld8_pinctrl_match,
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier.h b/drivers/pinctrl/uniphier/pinctrl-uniphier.h
index e1e98b8..a21154f 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier.h
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier.h
@@ -212,6 +212,4 @@
 			   struct pinctrl_desc *desc,
 			   struct uniphier_pinctrl_socdata *socdata);
 
-int uniphier_pinctrl_remove(struct platform_device *pdev);
-
 #endif /* __PINCTRL_UNIPHIER_H__ */
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index 5c261bf..cbc6386 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -583,7 +583,7 @@
 
 	data->dev = &pdev->dev;
 
-	data->pctl_dev = pinctrl_register(&wmt_desc, &pdev->dev, data);
+	data->pctl_dev = devm_pinctrl_register(&pdev->dev, &wmt_desc, data);
 	if (IS_ERR(data->pctl_dev)) {
 		dev_err(&pdev->dev, "Failed to register pinctrl\n");
 		return PTR_ERR(data->pctl_dev);
@@ -592,7 +592,7 @@
 	err = gpiochip_add_data(&data->gpio_chip, data);
 	if (err) {
 		dev_err(&pdev->dev, "could not add GPIO chip\n");
-		goto fail_gpio;
+		return err;
 	}
 
 	err = gpiochip_add_pin_range(&data->gpio_chip, dev_name(data->dev),
@@ -606,8 +606,6 @@
 
 fail_range:
 	gpiochip_remove(&data->gpio_chip);
-fail_gpio:
-	pinctrl_unregister(data->pctl_dev);
 	return err;
 }
 
@@ -616,7 +614,6 @@
 	struct wmt_pinctrl_data *data = platform_get_drvdata(pdev);
 
 	gpiochip_remove(&data->gpio_chip);
-	pinctrl_unregister(data->pctl_dev);
 
 	return 0;
 }
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 9973ceb..07462d7 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -309,8 +309,7 @@
 		 * much memory to the process.
 		 */
 		down_read(&current->mm->mmap_sem);
-		ret = get_user_pages(current, current->mm, address, 1,
-				     !is_write, 0, &page, NULL);
+		ret = get_user_pages(address, 1, !is_write, 0, &page, NULL);
 		up_read(&current->mm->mmap_sem);
 		if (ret < 0)
 			break;
diff --git a/drivers/platform/mips/Kconfig b/drivers/platform/mips/Kconfig
index 125e569..b3ae30a 100644
--- a/drivers/platform/mips/Kconfig
+++ b/drivers/platform/mips/Kconfig
@@ -15,10 +15,6 @@
 
 if MIPS_PLATFORM_DEVICES
 
-config MIPS_ACPI
-	bool
-	default y if LOONGSON_MACH3X
-
 config CPU_HWMON
 	tristate "Loongson CPU HWMon Driver"
 	depends on LOONGSON_MACH3X
diff --git a/drivers/platform/mips/Makefile b/drivers/platform/mips/Makefile
index 4341284..8dfd039 100644
--- a/drivers/platform/mips/Makefile
+++ b/drivers/platform/mips/Makefile
@@ -1,2 +1 @@
-obj-$(CONFIG_MIPS_ACPI) += acpi_init.o
 obj-$(CONFIG_CPU_HWMON) += cpu_hwmon.o
diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c
index 0f6c63e..4300a55 100644
--- a/drivers/platform/mips/cpu_hwmon.c
+++ b/drivers/platform/mips/cpu_hwmon.c
@@ -20,9 +20,9 @@
 	u32 reg;
 
 	reg = LOONGSON_CHIPTEMP(cpu);
-	if (loongson_sysconf.cputype == Loongson_3A)
+	if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1)
 		reg = (reg >> 8) & 0xff;
-	else if (loongson_sysconf.cputype == Loongson_3B)
+	else
 		reg = ((reg >> 8) & 0xff) - 100;
 
 	return (int)reg * 1000;
@@ -80,13 +80,13 @@
 static ssize_t cpu0_temp_label(struct device *dev,
 			struct device_attribute *attr, char *buf)
 {
-	return sprintf(buf, "CPU 0 Temprature\n");
+	return sprintf(buf, "CPU 0 Temperature\n");
 }
 
 static ssize_t cpu1_temp_label(struct device *dev,
 			struct device_attribute *attr, char *buf)
 {
-	return sprintf(buf, "CPU 1 Temprature\n");
+	return sprintf(buf, "CPU 1 Temperature\n");
 }
 
 static ssize_t get_cpu0_temp(struct device *dev,
@@ -169,7 +169,7 @@
 
 	ret = create_sysfs_cputemp_files(&cpu_hwmon_dev->kobj);
 	if (ret) {
-		pr_err("fail to create cpu temprature interface!\n");
+		pr_err("fail to create cpu temperature interface!\n");
 		goto fail_create_sysfs_cputemp_files;
 	}
 
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 1062fa4..79d64ea 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -793,15 +793,6 @@
 	return AE_OK;
 }
 
-static int AMW0_set_cap_acpi_check_device_found __initdata;
-
-static acpi_status __init AMW0_set_cap_acpi_check_device_cb(acpi_handle handle,
-	u32 level, void *context, void **retval)
-{
-	AMW0_set_cap_acpi_check_device_found = 1;
-	return AE_OK;
-}
-
 static const struct acpi_device_id norfkill_ids[] __initconst = {
 	{ "VPC2004", 0},
 	{ "IBM0068", 0},
@@ -816,9 +807,10 @@
 	const struct acpi_device_id *id;
 
 	for (id = norfkill_ids; id->id[0]; id++)
-		acpi_get_devices(id->id, AMW0_set_cap_acpi_check_device_cb,
-				NULL, NULL);
-	return AMW0_set_cap_acpi_check_device_found;
+		if (acpi_dev_found(id->id))
+			return true;
+
+	return false;
 }
 
 static acpi_status __init AMW0_set_capabilities(void)
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 14fd2ec..17b365f 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -204,30 +204,10 @@
 	}
 }
 
-static acpi_status eeepc_wmi_parse_device(acpi_handle handle, u32 level,
-						 void *context, void **retval)
-{
-	pr_warn("Found legacy ATKD device (%s)\n", EEEPC_ACPI_HID);
-	*(bool *)context = true;
-	return AE_CTRL_TERMINATE;
-}
-
-static int eeepc_wmi_check_atkd(void)
-{
-	acpi_status status;
-	bool found = false;
-
-	status = acpi_get_devices(EEEPC_ACPI_HID, eeepc_wmi_parse_device,
-				  &found, NULL);
-
-	if (ACPI_FAILURE(status) || !found)
-		return 0;
-	return -1;
-}
-
 static int eeepc_wmi_probe(struct platform_device *pdev)
 {
-	if (eeepc_wmi_check_atkd()) {
+	if (acpi_dev_found(EEEPC_ACPI_HID)) {
+		pr_warn("Found legacy ATKD device (%s)\n", EEEPC_ACPI_HID);
 		pr_warn("WMI device present, but legacy ATKD device is also "
 			"present and enabled\n");
 		pr_warn("You probably booted with acpi_osi=\"Linux\" or "
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 10ce6cb..0935668 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -127,8 +127,10 @@
 	arg0.integer.value = reg;
 
 	status = acpi_evaluate_integer(dev->handle, "ALRD", &args, &lret);
+	if (ACPI_FAILURE(status))
+		return -EINVAL;
 	*ret = lret;
-	return (status != AE_OK) ? -EINVAL : 0;
+	return 0;
 }
 
 /**
@@ -173,6 +175,7 @@
 DEFINE_CONV(normal, 1, 2, 3);
 DEFINE_CONV(y_inverted, 1, -2, 3);
 DEFINE_CONV(x_inverted, -1, 2, 3);
+DEFINE_CONV(x_inverted_usd, -1, 2, -3);
 DEFINE_CONV(z_inverted, 1, 2, -3);
 DEFINE_CONV(xy_swap, 2, 1, 3);
 DEFINE_CONV(xy_rotated_left, -2, 1, 3);
@@ -236,6 +239,7 @@
 	AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted),
 	AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted),
 	AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left),
+	AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd),
 	AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
 	AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
 	AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index f93abc8..a818db6 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -91,6 +91,8 @@
 }
 
 static const struct dev_pm_ops intel_hid_pl_pm_ops = {
+	.freeze  = intel_hid_pl_suspend_handler,
+	.restore  = intel_hid_pl_resume_handler,
 	.suspend  = intel_hid_pl_suspend_handler,
 	.resume  = intel_hid_pl_resume_handler,
 };
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index 3fb1d85..6f497e8 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -687,8 +687,8 @@
 	ipcdev.acpi_io_size = size;
 	dev_info(&pdev->dev, "io res: %pR\n", res);
 
-	/* This is index 0 to cover BIOS data register */
 	punit_res = punit_res_array;
+	/* This is index 0 to cover BIOS data register */
 	res = platform_get_resource(pdev, IORESOURCE_MEM,
 				    PLAT_RESOURCE_BIOS_DATA_INDEX);
 	if (!res) {
@@ -698,55 +698,51 @@
 	*punit_res = *res;
 	dev_info(&pdev->dev, "punit BIOS data res: %pR\n", res);
 
+	/* This is index 1 to cover BIOS interface register */
 	res = platform_get_resource(pdev, IORESOURCE_MEM,
 				    PLAT_RESOURCE_BIOS_IFACE_INDEX);
 	if (!res) {
 		dev_err(&pdev->dev, "Failed to get res of punit BIOS iface\n");
 		return -ENXIO;
 	}
-	/* This is index 1 to cover BIOS interface register */
 	*++punit_res = *res;
 	dev_info(&pdev->dev, "punit BIOS interface res: %pR\n", res);
 
+	/* This is index 2 to cover ISP data register, optional */
 	res = platform_get_resource(pdev, IORESOURCE_MEM,
 				    PLAT_RESOURCE_ISP_DATA_INDEX);
-	if (!res) {
-		dev_err(&pdev->dev, "Failed to get res of punit ISP data\n");
-		return -ENXIO;
+	++punit_res;
+	if (res) {
+		*punit_res = *res;
+		dev_info(&pdev->dev, "punit ISP data res: %pR\n", res);
 	}
-	/* This is index 2 to cover ISP data register */
-	*++punit_res = *res;
-	dev_info(&pdev->dev, "punit ISP data res: %pR\n", res);
 
+	/* This is index 3 to cover ISP interface register, optional */
 	res = platform_get_resource(pdev, IORESOURCE_MEM,
 				    PLAT_RESOURCE_ISP_IFACE_INDEX);
-	if (!res) {
-		dev_err(&pdev->dev, "Failed to get res of punit ISP iface\n");
-		return -ENXIO;
+	++punit_res;
+	if (res) {
+		*punit_res = *res;
+		dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res);
 	}
-	/* This is index 3 to cover ISP interface register */
-	*++punit_res = *res;
-	dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res);
 
+	/* This is index 4 to cover GTD data register, optional */
 	res = platform_get_resource(pdev, IORESOURCE_MEM,
 				    PLAT_RESOURCE_GTD_DATA_INDEX);
-	if (!res) {
-		dev_err(&pdev->dev, "Failed to get res of punit GTD data\n");
-		return -ENXIO;
+	++punit_res;
+	if (res) {
+		*punit_res = *res;
+		dev_info(&pdev->dev, "punit GTD data res: %pR\n", res);
 	}
-	/* This is index 4 to cover GTD data register */
-	*++punit_res = *res;
-	dev_info(&pdev->dev, "punit GTD data res: %pR\n", res);
 
+	/* This is index 5 to cover GTD interface register, optional */
 	res = platform_get_resource(pdev, IORESOURCE_MEM,
 				    PLAT_RESOURCE_GTD_IFACE_INDEX);
-	if (!res) {
-		dev_err(&pdev->dev, "Failed to get res of punit GTD iface\n");
-		return -ENXIO;
+	++punit_res;
+	if (res) {
+		*punit_res = *res;
+		dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res);
 	}
-	/* This is index 5 to cover GTD interface register */
-	*++punit_res = *res;
-	dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res);
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM,
 				    PLAT_RESOURCE_IPC_INDEX);
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index 0e73fd1..63b371d 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -30,7 +30,7 @@
 #include <linux/ioport.h>
 #include <linux/init.h>
 #include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
 #include <asm/intel_scu_ipc.h>
 #include <linux/device.h>
 #include <linux/intel_pmic_gpio.h>
@@ -174,7 +174,7 @@
 
 static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
 {
-	struct pmic_gpio *pg = container_of(chip, struct pmic_gpio, chip);
+	struct pmic_gpio *pg = gpiochip_get_data(chip);
 
 	return pg->irq_base + offset;
 }
@@ -279,7 +279,7 @@
 	mutex_init(&pg->buslock);
 
 	pg->chip.parent = dev;
-	retval = gpiochip_add(&pg->chip);
+	retval = gpiochip_add_data(&pg->chip, pg);
 	if (retval) {
 		pr_err("Can not add pmic gpio chip\n");
 		goto err;
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index bd87540..a47a41f 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -227,6 +227,11 @@
 	struct resource *res;
 	void __iomem *addr;
 
+	/*
+	 * The following resources are required
+	 * - BIOS_IPC BASE_DATA
+	 * - BIOS_IPC BASE_IFACE
+	 */
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	addr = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(addr))
@@ -239,29 +244,40 @@
 		return PTR_ERR(addr);
 	punit_ipcdev->base[BIOS_IPC][BASE_IFACE] = addr;
 
+	/*
+	 * The following resources are optional
+	 * - ISPDRIVER_IPC BASE_DATA
+	 * - ISPDRIVER_IPC BASE_IFACE
+	 * - GTDRIVER_IPC BASE_DATA
+	 * - GTDRIVER_IPC BASE_IFACE
+	 */
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
-	addr = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(addr))
-		return PTR_ERR(addr);
-	punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
+	if (res) {
+		addr = devm_ioremap_resource(&pdev->dev, res);
+		if (!IS_ERR(addr))
+			punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
+	}
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
-	addr = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(addr))
-		return PTR_ERR(addr);
-	punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
+	if (res) {
+		addr = devm_ioremap_resource(&pdev->dev, res);
+		if (!IS_ERR(addr))
+			punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
+	}
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
-	addr = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(addr))
-		return PTR_ERR(addr);
-	punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
+	if (res) {
+		addr = devm_ioremap_resource(&pdev->dev, res);
+		if (!IS_ERR(addr))
+			punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
+	}
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
-	addr = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(addr))
-		return PTR_ERR(addr);
-	punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
+	if (res) {
+		addr = devm_ioremap_resource(&pdev->dev, res);
+		if (!IS_ERR(addr))
+			punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
+	}
 
 	return 0;
 }
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c
index 397119f..781bd10 100644
--- a/drivers/platform/x86/intel_telemetry_pltdrv.c
+++ b/drivers/platform/x86/intel_telemetry_pltdrv.c
@@ -659,7 +659,7 @@
 static int telemetry_plt_set_sampling_period(u8 pss_period, u8 ioss_period)
 {
 	u32 telem_ctrl = 0;
-	int ret;
+	int ret = 0;
 
 	mutex_lock(&(telm_conf->telem_lock));
 	if (ioss_period) {
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e305ab5..9255ff3 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -7972,10 +7972,12 @@
 		fan_update_desired_level(s);
 	mutex_unlock(&fan_mutex);
 
+	if (rc)
+		return rc;
 	if (status)
 		*status = s;
 
-	return rc;
+	return 0;
 }
 
 static int fan_get_speed(unsigned int *speed)
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index df1f1a7..01e12d2 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -135,7 +135,7 @@
 /* Field definitions */
 #define HCI_ACCEL_MASK			0x7fff
 #define HCI_HOTKEY_DISABLE		0x0b
-#define HCI_HOTKEY_ENABLE		0x01
+#define HCI_HOTKEY_ENABLE		0x09
 #define HCI_HOTKEY_SPECIAL_FUNCTIONS	0x10
 #define HCI_LCD_BRIGHTNESS_BITS		3
 #define HCI_LCD_BRIGHTNESS_SHIFT	(16-HCI_LCD_BRIGHTNESS_BITS)
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index facd43b..81603d9 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -521,10 +521,11 @@
 	int ret;
 
 	if (pnpbios_disabled || dmi_check_system(pnpbios_dmi_table) ||
-	    paravirt_enabled()) {
+	    arch_pnpbios_disabled()) {
 		printk(KERN_INFO "PnPBIOS: Disabled\n");
 		return -ENODEV;
 	}
+
 #ifdef CONFIG_PNPACPI
 	if (!acpi_disabled && !pnpacpi_disabled) {
 		pnpbios_disabled = 1;
diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c
index 8986382..01b6d3f 100644
--- a/drivers/power/avs/rockchip-io-domain.c
+++ b/drivers/power/avs/rockchip-io-domain.c
@@ -336,6 +336,7 @@
 	struct device_node *np = pdev->dev.of_node;
 	const struct of_device_id *match;
 	struct rockchip_iodomain *iod;
+	struct device *parent;
 	int i, ret = 0;
 
 	if (!np)
@@ -351,7 +352,14 @@
 	match = of_match_node(rockchip_iodomain_match, np);
 	iod->soc_data = (struct rockchip_iodomain_soc_data *)match->data;
 
-	iod->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+	parent = pdev->dev.parent;
+	if (parent && parent->of_node) {
+		iod->grf = syscon_node_to_regmap(parent->of_node);
+	} else {
+		dev_dbg(&pdev->dev, "falling back to old binding\n");
+		iod->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+	}
+
 	if (IS_ERR(iod->grf)) {
 		dev_err(&pdev->dev, "couldn't find grf regmap\n");
 		return PTR_ERR(iod->grf);
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index cdfd01f0..b2766b8 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -34,6 +34,9 @@
 #include <asm/processor.h>
 #include <asm/cpu_device_id.h>
 
+/* Local defines */
+#define MSR_PLATFORM_POWER_LIMIT	0x0000065C
+
 /* bitmasks for RAPL MSRs, used by primitive access functions */
 #define ENERGY_STATUS_MASK      0xffffffff
 
@@ -86,6 +89,7 @@
 	RAPL_DOMAIN_PP0, /* core power plane */
 	RAPL_DOMAIN_PP1, /* graphics uncore */
 	RAPL_DOMAIN_DRAM,/* DRAM control_type */
+	RAPL_DOMAIN_PLATFORM, /* PSys control_type */
 	RAPL_DOMAIN_MAX,
 };
 
@@ -251,9 +255,11 @@
 	"core",
 	"uncore",
 	"dram",
+	"psys",
 };
 
 static struct powercap_control_type *control_type; /* PowerCap Controller */
+static struct rapl_domain *platform_rapl_domain; /* Platform (PSys) domain */
 
 /* caller to ensure CPU hotplug lock is held */
 static struct rapl_package *find_package_by_id(int id)
@@ -409,6 +415,14 @@
 		.set_enable = set_domain_enable,
 		.get_enable = get_domain_enable,
 	},
+	/* RAPL_DOMAIN_PLATFORM */
+	{
+		.get_energy_uj = get_energy_counter,
+		.get_max_energy_range_uj = get_max_energy_counter,
+		.release = release_zone,
+		.set_enable = set_domain_enable,
+		.get_enable = get_domain_enable,
+	},
 };
 
 static int set_power_limit(struct powercap_zone *power_zone, int id,
@@ -1091,6 +1105,7 @@
 	RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */
 	RAPL_CPU(0x4f, rapl_defaults_hsw_server),/* Broadwell servers */
 	RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
+	RAPL_CPU(0x46, rapl_defaults_core),/* Haswell */
 	RAPL_CPU(0x47, rapl_defaults_core),/* Broadwell-H */
 	RAPL_CPU(0x4E, rapl_defaults_core),/* Skylake */
 	RAPL_CPU(0x4C, rapl_defaults_cht),/* Braswell/Cherryview */
@@ -1100,6 +1115,8 @@
 	RAPL_CPU(0X5C, rapl_defaults_core),/* Broxton */
 	RAPL_CPU(0x5E, rapl_defaults_core),/* Skylake-H/S */
 	RAPL_CPU(0x57, rapl_defaults_hsw_server),/* Knights Landing */
+	RAPL_CPU(0x8E, rapl_defaults_core),/* Kabylake */
+	RAPL_CPU(0x9E, rapl_defaults_core),/* Kabylake */
 	{}
 };
 MODULE_DEVICE_TABLE(x86cpu, rapl_ids);
@@ -1159,6 +1176,13 @@
 			powercap_unregister_zone(control_type,
 						&rd_package->power_zone);
 	}
+
+	if (platform_rapl_domain) {
+		powercap_unregister_zone(control_type,
+					 &platform_rapl_domain->power_zone);
+		kfree(platform_rapl_domain);
+	}
+
 	powercap_unregister_control_type(control_type);
 
 	return 0;
@@ -1238,6 +1262,47 @@
 	return ret;
 }
 
+static int rapl_register_psys(void)
+{
+	struct rapl_domain *rd;
+	struct powercap_zone *power_zone;
+	u64 val;
+
+	if (rdmsrl_safe_on_cpu(0, MSR_PLATFORM_ENERGY_STATUS, &val) || !val)
+		return -ENODEV;
+
+	if (rdmsrl_safe_on_cpu(0, MSR_PLATFORM_POWER_LIMIT, &val) || !val)
+		return -ENODEV;
+
+	rd = kzalloc(sizeof(*rd), GFP_KERNEL);
+	if (!rd)
+		return -ENOMEM;
+
+	rd->name = rapl_domain_names[RAPL_DOMAIN_PLATFORM];
+	rd->id = RAPL_DOMAIN_PLATFORM;
+	rd->msrs[0] = MSR_PLATFORM_POWER_LIMIT;
+	rd->msrs[1] = MSR_PLATFORM_ENERGY_STATUS;
+	rd->rpl[0].prim_id = PL1_ENABLE;
+	rd->rpl[0].name = pl1_name;
+	rd->rpl[1].prim_id = PL2_ENABLE;
+	rd->rpl[1].name = pl2_name;
+	rd->rp = find_package_by_id(0);
+
+	power_zone = powercap_register_zone(&rd->power_zone, control_type,
+					    "psys", NULL,
+					    &zone_ops[RAPL_DOMAIN_PLATFORM],
+					    2, &constraint_ops);
+
+	if (IS_ERR(power_zone)) {
+		kfree(rd);
+		return PTR_ERR(power_zone);
+	}
+
+	platform_rapl_domain = rd;
+
+	return 0;
+}
+
 static int rapl_register_powercap(void)
 {
 	struct rapl_domain *rd;
@@ -1254,6 +1319,10 @@
 	list_for_each_entry(rp, &rapl_packages, plist)
 		if (rapl_package_register_powercap(rp))
 			goto err_cleanup_package;
+
+	/* Don't bail out if PSys is not supported */
+	rapl_register_psys();
+
 	return ret;
 
 err_cleanup_package:
@@ -1288,6 +1357,9 @@
 	case RAPL_DOMAIN_DRAM:
 		msr = MSR_DRAM_ENERGY_STATUS;
 		break;
+	case RAPL_DOMAIN_PLATFORM:
+		/* PSYS(PLATFORM) is not a CPU domain, so avoid printng error */
+		return -EINVAL;
 	default:
 		pr_err("invalid domain id %d\n", domain);
 		return -EINVAL;
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 7831bc6..680fbc7 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -128,6 +128,13 @@
 	set_bit(PWMF_REQUESTED, &pwm->flags);
 	pwm->label = label;
 
+	/*
+	 * FIXME: This should be removed once all PWM users properly make use
+	 * of struct pwm_args to initialize the PWM device. As long as this is
+	 * here, the PWM state and hardware state can get out of sync.
+	 */
+	pwm_apply_args(pwm);
+
 	return 0;
 }
 
@@ -146,12 +153,12 @@
 	if (IS_ERR(pwm))
 		return pwm;
 
-	pwm_set_period(pwm, args->args[1]);
+	pwm->args.period = args->args[1];
 
 	if (args->args[2] & PWM_POLARITY_INVERTED)
-		pwm_set_polarity(pwm, PWM_POLARITY_INVERSED);
+		pwm->args.polarity = PWM_POLARITY_INVERSED;
 	else
-		pwm_set_polarity(pwm, PWM_POLARITY_NORMAL);
+		pwm->args.polarity = PWM_POLARITY_NORMAL;
 
 	return pwm;
 }
@@ -172,7 +179,7 @@
 	if (IS_ERR(pwm))
 		return pwm;
 
-	pwm_set_period(pwm, args->args[1]);
+	pwm->args.period = args->args[1];
 
 	return pwm;
 }
@@ -747,13 +754,13 @@
 	if (!chip)
 		goto out;
 
+	pwm->args.period = chosen->period;
+	pwm->args.polarity = chosen->polarity;
+
 	pwm = pwm_request_from_chip(chip, chosen->index, con_id ?: dev_id);
 	if (IS_ERR(pwm))
 		goto out;
 
-	pwm_set_period(pwm, chosen->period);
-	pwm_set_polarity(pwm, chosen->polarity);
-
 out:
 	mutex_unlock(&pwm_lookup_lock);
 	return pwm;
diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c
index a80c108..7d33542 100644
--- a/drivers/pwm/pwm-clps711x.c
+++ b/drivers/pwm/pwm-clps711x.c
@@ -60,7 +60,7 @@
 		return -EINVAL;
 
 	/* Store constant period value */
-	pwm_set_period(pwm, DIV_ROUND_CLOSEST(NSEC_PER_SEC, freq));
+	pwm->args.period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, freq);
 
 	return 0;
 }
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index 7225ac6..fad968e 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -392,7 +392,7 @@
 
 	.max_register = FTM_PWMLOAD,
 	.volatile_reg = fsl_pwm_volatile_reg,
-	.cache_type = REGCACHE_RBTREE,
+	.cache_type = REGCACHE_FLAT,
 };
 
 static int fsl_pwm_probe(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index cb2f702..58b709f 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -160,7 +160,7 @@
 	if (IS_ERR(pwm))
 		return pwm;
 
-	pwm_set_period(pwm, args->args[0]);
+	pwm->args.period = args->args[0];
 
 	return pwm;
 }
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 9607bc8..e165b7c 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -126,7 +126,7 @@
 	struct list_head node;
 	struct mport_dev *md;
 	enum rio_mport_map_dir dir;
-	u32 rioid;
+	u16 rioid;
 	u64 rio_addr;
 	dma_addr_t phys_addr; /* for mmap */
 	void *virt_addr; /* kernel address, for dma_free_coherent */
@@ -137,7 +137,7 @@
 
 struct rio_mport_dma_map {
 	int valid;
-	uint64_t length;
+	u64 length;
 	void *vaddr;
 	dma_addr_t paddr;
 };
@@ -208,7 +208,7 @@
 	struct kfifo            event_fifo;
 	wait_queue_head_t       event_rx_wait;
 	spinlock_t              fifo_lock;
-	unsigned int            event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */
+	u32			event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */
 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
 	struct dma_chan		*dmach;
 	struct list_head	async_list;
@@ -276,7 +276,8 @@
 		return -EFAULT;
 
 	if ((maint_io.offset % 4) ||
-	    (maint_io.length == 0) || (maint_io.length % 4))
+	    (maint_io.length == 0) || (maint_io.length % 4) ||
+	    (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
 		return -EINVAL;
 
 	buffer = vmalloc(maint_io.length);
@@ -298,7 +299,8 @@
 		offset += 4;
 	}
 
-	if (unlikely(copy_to_user(maint_io.buffer, buffer, maint_io.length)))
+	if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer,
+				   buffer, maint_io.length)))
 		ret = -EFAULT;
 out:
 	vfree(buffer);
@@ -319,7 +321,8 @@
 		return -EFAULT;
 
 	if ((maint_io.offset % 4) ||
-	    (maint_io.length == 0) || (maint_io.length % 4))
+	    (maint_io.length == 0) || (maint_io.length % 4) ||
+	    (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
 		return -EINVAL;
 
 	buffer = vmalloc(maint_io.length);
@@ -327,7 +330,8 @@
 		return -ENOMEM;
 	length = maint_io.length;
 
-	if (unlikely(copy_from_user(buffer, maint_io.buffer, length))) {
+	if (unlikely(copy_from_user(buffer,
+			(void __user *)(uintptr_t)maint_io.buffer, length))) {
 		ret = -EFAULT;
 		goto out;
 	}
@@ -360,7 +364,7 @@
  */
 static int
 rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
-				  u32 rioid, u64 raddr, u32 size,
+				  u16 rioid, u64 raddr, u32 size,
 				  dma_addr_t *paddr)
 {
 	struct rio_mport *mport = md->mport;
@@ -369,7 +373,7 @@
 
 	rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size);
 
-	map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL);
+	map = kzalloc(sizeof(*map), GFP_KERNEL);
 	if (map == NULL)
 		return -ENOMEM;
 
@@ -394,7 +398,7 @@
 
 static int
 rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp,
-			       u32 rioid, u64 raddr, u32 size,
+			       u16 rioid, u64 raddr, u32 size,
 			       dma_addr_t *paddr)
 {
 	struct rio_mport_mapping *map;
@@ -433,7 +437,7 @@
 	dma_addr_t paddr;
 	int ret;
 
-	if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap))))
+	if (unlikely(copy_from_user(&map, arg, sizeof(map))))
 		return -EFAULT;
 
 	rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx",
@@ -448,7 +452,7 @@
 
 	map.handle = paddr;
 
-	if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap))))
+	if (unlikely(copy_to_user(arg, &map, sizeof(map))))
 		return -EFAULT;
 	return 0;
 }
@@ -469,7 +473,7 @@
 	if (!md->mport->ops->unmap_outb)
 		return -EPROTONOSUPPORT;
 
-	if (copy_from_user(&handle, arg, sizeof(u64)))
+	if (copy_from_user(&handle, arg, sizeof(handle)))
 		return -EFAULT;
 
 	rmcd_debug(OBW, "h=0x%llx", handle);
@@ -498,9 +502,9 @@
 static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
 {
 	struct mport_dev *md = priv->md;
-	uint16_t hdid;
+	u16 hdid;
 
-	if (copy_from_user(&hdid, arg, sizeof(uint16_t)))
+	if (copy_from_user(&hdid, arg, sizeof(hdid)))
 		return -EFAULT;
 
 	md->mport->host_deviceid = hdid;
@@ -520,9 +524,9 @@
 static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg)
 {
 	struct mport_dev *md = priv->md;
-	uint32_t comptag;
+	u32 comptag;
 
-	if (copy_from_user(&comptag, arg, sizeof(uint32_t)))
+	if (copy_from_user(&comptag, arg, sizeof(comptag)))
 		return -EFAULT;
 
 	rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag);
@@ -837,7 +841,7 @@
  * @xfer: data transfer descriptor structure
  */
 static int
-rio_dma_transfer(struct file *filp, uint32_t transfer_mode,
+rio_dma_transfer(struct file *filp, u32 transfer_mode,
 		 enum rio_transfer_sync sync, enum dma_data_direction dir,
 		 struct rio_transfer_io *xfer)
 {
@@ -875,7 +879,7 @@
 		unsigned long offset;
 		long pinned;
 
-		offset = (unsigned long)xfer->loc_addr & ~PAGE_MASK;
+		offset = (unsigned long)(uintptr_t)xfer->loc_addr & ~PAGE_MASK;
 		nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT;
 
 		page_list = kmalloc_array(nr_pages,
@@ -886,7 +890,7 @@
 		}
 
 		down_read(&current->mm->mmap_sem);
-		pinned = get_user_pages(current, current->mm,
+		pinned = get_user_pages(
 				(unsigned long)xfer->loc_addr & PAGE_MASK,
 				nr_pages, dir == DMA_FROM_DEVICE, 0,
 				page_list, NULL);
@@ -1015,19 +1019,20 @@
 	if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction))))
 		return -EFAULT;
 
-	if (transaction.count != 1)
+	if (transaction.count != 1) /* only single transfer for now */
 		return -EINVAL;
 
 	if ((transaction.transfer_mode &
 	     priv->md->properties.transfer_mode) == 0)
 		return -ENODEV;
 
-	transfer = vmalloc(transaction.count * sizeof(struct rio_transfer_io));
+	transfer = vmalloc(transaction.count * sizeof(*transfer));
 	if (!transfer)
 		return -ENOMEM;
 
-	if (unlikely(copy_from_user(transfer, transaction.block,
-	      transaction.count * sizeof(struct rio_transfer_io)))) {
+	if (unlikely(copy_from_user(transfer,
+				    (void __user *)(uintptr_t)transaction.block,
+				    transaction.count * sizeof(*transfer)))) {
 		ret = -EFAULT;
 		goto out_free;
 	}
@@ -1038,8 +1043,9 @@
 		ret = rio_dma_transfer(filp, transaction.transfer_mode,
 			transaction.sync, dir, &transfer[i]);
 
-	if (unlikely(copy_to_user(transaction.block, transfer,
-	      transaction.count * sizeof(struct rio_transfer_io))))
+	if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block,
+				  transfer,
+				  transaction.count * sizeof(*transfer))))
 		ret = -EFAULT;
 
 out_free:
@@ -1129,11 +1135,11 @@
 }
 
 static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp,
-			uint64_t size, struct rio_mport_mapping **mapping)
+			u64 size, struct rio_mport_mapping **mapping)
 {
 	struct rio_mport_mapping *map;
 
-	map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL);
+	map = kzalloc(sizeof(*map), GFP_KERNEL);
 	if (map == NULL)
 		return -ENOMEM;
 
@@ -1165,7 +1171,7 @@
 	struct rio_mport_mapping *mapping = NULL;
 	int ret;
 
-	if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_dma_mem))))
+	if (unlikely(copy_from_user(&map, arg, sizeof(map))))
 		return -EFAULT;
 
 	ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping);
@@ -1174,7 +1180,7 @@
 
 	map.dma_handle = mapping->phys_addr;
 
-	if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_dma_mem)))) {
+	if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
 		mutex_lock(&md->buf_mutex);
 		kref_put(&mapping->ref, mport_release_mapping);
 		mutex_unlock(&md->buf_mutex);
@@ -1192,7 +1198,7 @@
 	int ret = -EFAULT;
 	struct rio_mport_mapping *map, *_map;
 
-	if (copy_from_user(&handle, arg, sizeof(u64)))
+	if (copy_from_user(&handle, arg, sizeof(handle)))
 		return -EFAULT;
 	rmcd_debug(EXIT, "filp=%p", filp);
 
@@ -1242,14 +1248,18 @@
 
 static int
 rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
-				u64 raddr, u32 size,
+				u64 raddr, u64 size,
 				struct rio_mport_mapping **mapping)
 {
 	struct rio_mport *mport = md->mport;
 	struct rio_mport_mapping *map;
 	int ret;
 
-	map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL);
+	/* rio_map_inb_region() accepts u32 size */
+	if (size > 0xffffffff)
+		return -EINVAL;
+
+	map = kzalloc(sizeof(*map), GFP_KERNEL);
 	if (map == NULL)
 		return -ENOMEM;
 
@@ -1262,7 +1272,7 @@
 
 	if (raddr == RIO_MAP_ANY_ADDR)
 		raddr = map->phys_addr;
-	ret = rio_map_inb_region(mport, map->phys_addr, raddr, size, 0);
+	ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0);
 	if (ret < 0)
 		goto err_map_inb;
 
@@ -1288,7 +1298,7 @@
 
 static int
 rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp,
-			      u64 raddr, u32 size,
+			      u64 raddr, u64 size,
 			      struct rio_mport_mapping **mapping)
 {
 	struct rio_mport_mapping *map;
@@ -1331,7 +1341,7 @@
 
 	if (!md->mport->ops->map_inb)
 		return -EPROTONOSUPPORT;
-	if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap))))
+	if (unlikely(copy_from_user(&map, arg, sizeof(map))))
 		return -EFAULT;
 
 	rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
@@ -1344,7 +1354,7 @@
 	map.handle = mapping->phys_addr;
 	map.rio_addr = mapping->rio_addr;
 
-	if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap)))) {
+	if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
 		/* Delete mapping if it was created by this request */
 		if (ret == 0 && mapping->filp == filp) {
 			mutex_lock(&md->buf_mutex);
@@ -1375,7 +1385,7 @@
 	if (!md->mport->ops->unmap_inb)
 		return -EPROTONOSUPPORT;
 
-	if (copy_from_user(&handle, arg, sizeof(u64)))
+	if (copy_from_user(&handle, arg, sizeof(handle)))
 		return -EFAULT;
 
 	mutex_lock(&md->buf_mutex);
@@ -1401,7 +1411,7 @@
 static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg)
 {
 	struct mport_dev *md = priv->md;
-	uint32_t port_idx = md->mport->index;
+	u32 port_idx = md->mport->index;
 
 	rmcd_debug(MPORT, "port_index=%d", port_idx);
 
@@ -1451,7 +1461,7 @@
 	handled = 0;
 	spin_lock(&data->db_lock);
 	list_for_each_entry(db_filter, &data->doorbells, data_node) {
-		if (((db_filter->filter.rioid == 0xffffffff ||
+		if (((db_filter->filter.rioid == RIO_INVALID_DESTID ||
 		      db_filter->filter.rioid == src)) &&
 		      info >= db_filter->filter.low &&
 		      info <= db_filter->filter.high) {
@@ -1525,6 +1535,9 @@
 	if (copy_from_user(&filter, arg, sizeof(filter)))
 		return -EFAULT;
 
+	if (filter.low > filter.high)
+		return -EINVAL;
+
 	spin_lock_irqsave(&priv->md->db_lock, flags);
 	list_for_each_entry(db_filter, &priv->db_filters, priv_node) {
 		if (db_filter->filter.rioid == filter.rioid &&
@@ -1737,10 +1750,10 @@
 		return -EEXIST;
 	}
 
-	size = sizeof(struct rio_dev);
+	size = sizeof(*rdev);
 	mport = md->mport;
-	destid = (u16)dev_info.destid;
-	hopcount = (u8)dev_info.hopcount;
+	destid = dev_info.destid;
+	hopcount = dev_info.hopcount;
 
 	if (rio_mport_read_config_32(mport, destid, hopcount,
 				     RIO_PEF_CAR, &rval))
@@ -1872,8 +1885,8 @@
 		do {
 			rdev = rio_get_comptag(dev_info.comptag, rdev);
 			if (rdev && rdev->dev.parent == &mport->net->dev &&
-			    rdev->destid == (u16)dev_info.destid &&
-			    rdev->hopcount == (u8)dev_info.hopcount)
+			    rdev->destid == dev_info.destid &&
+			    rdev->hopcount == dev_info.hopcount)
 				break;
 		} while (rdev);
 	}
@@ -2146,8 +2159,8 @@
 		return maint_port_idx_get(data, (void __user *)arg);
 	case RIO_MPORT_GET_PROPERTIES:
 		md->properties.hdid = md->mport->host_deviceid;
-		if (copy_to_user((void __user *)arg, &(data->md->properties),
-				 sizeof(data->md->properties)))
+		if (copy_to_user((void __user *)arg, &(md->properties),
+				 sizeof(md->properties)))
 			return -EFAULT;
 		return 0;
 	case RIO_ENABLE_DOORBELL_RANGE:
@@ -2159,11 +2172,11 @@
 	case RIO_DISABLE_PORTWRITE_RANGE:
 		return rio_mport_remove_pw_filter(data, (void __user *)arg);
 	case RIO_SET_EVENT_MASK:
-		data->event_mask = arg;
+		data->event_mask = (u32)arg;
 		return 0;
 	case RIO_GET_EVENT_MASK:
 		if (copy_to_user((void __user *)arg, &data->event_mask,
-				    sizeof(data->event_mask)))
+				    sizeof(u32)))
 			return -EFAULT;
 		return 0;
 	case RIO_MAP_OUTBOUND:
@@ -2374,7 +2387,7 @@
 			return -EINVAL;
 
 		ret = rio_mport_send_doorbell(mport,
-					      (u16)event.u.doorbell.rioid,
+					      event.u.doorbell.rioid,
 					      event.u.doorbell.payload);
 		if (ret < 0)
 			return ret;
@@ -2421,7 +2434,7 @@
 	struct mport_dev *md;
 	struct rio_mport_attr attr;
 
-	md = kzalloc(sizeof(struct mport_dev), GFP_KERNEL);
+	md = kzalloc(sizeof(*md), GFP_KERNEL);
 	if (!md) {
 		rmcd_error("Unable allocate a device object");
 		return NULL;
@@ -2470,7 +2483,7 @@
 	/* The transfer_mode property will be returned through mport query
 	 * interface
 	 */
-#ifdef CONFIG_PPC /* for now: only on Freescale's SoCs */
+#ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */
 	md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED;
 #else
 	md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
@@ -2669,9 +2682,9 @@
 
 	/* Create device class needed by udev */
 	dev_class = class_create(THIS_MODULE, DRV_NAME);
-	if (!dev_class) {
+	if (IS_ERR(dev_class)) {
 		rmcd_error("Unable to create " DRV_NAME " class");
-		return -EINVAL;
+		return PTR_ERR(dev_class);
 	}
 
 	ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index c77dc08..144cbf5 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -321,6 +321,15 @@
 	help
 	  This driver supports LP8720/LP8725 PMIC
 
+config REGULATOR_LP873X
+	tristate "TI LP873X Power regulators"
+	depends on MFD_LP873X && OF
+	help
+	  This driver supports LP873X voltage regulator chips. LP873X
+	  provides two step-down converters and two general-purpose LDO
+	  voltage regulators. It supports software based voltage control
+	  for different voltage domains
+
 config REGULATOR_LP8755
 	tristate "TI LP8755 High Performance PMU driver"
 	depends on I2C
@@ -409,6 +418,7 @@
 config REGULATOR_MAX8973
 	tristate "Maxim MAX8973 voltage regulator "
 	depends on I2C
+	depends on THERMAL && THERMAL_OF
 	select REGMAP_I2C
 	help
 	  The MAXIM MAX8973 high-efficiency. three phase, DC-DC step-down
@@ -548,6 +558,13 @@
 	  Say y here to support the voltage regulators and convertors
 	  PV88060
 
+config REGULATOR_PV88080
+	tristate "Powerventure Semiconductor PV88080 regulator"
+	depends on I2C
+	select REGMAP_I2C
+	help
+	  Say y here to support the buck convertors on PV88080
+
 config REGULATOR_PV88090
 	tristate "Powerventure Semiconductor PV88090 regulator"
 	depends on I2C
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 61bfbb9..85a1d44 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -42,11 +42,12 @@
 obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o
 obj-$(CONFIG_REGULATOR_LP3972) += lp3972.o
 obj-$(CONFIG_REGULATOR_LP872X) += lp872x.o
+obj-$(CONFIG_REGULATOR_LP873X) += lp873x-regulator.o
 obj-$(CONFIG_REGULATOR_LP8788) += lp8788-buck.o
 obj-$(CONFIG_REGULATOR_LP8788) += lp8788-ldo.o
 obj-$(CONFIG_REGULATOR_LP8755) += lp8755.o
 obj-$(CONFIG_REGULATOR_LTC3589) += ltc3589.o
-obj-$(CONFIG_REGULATOR_MAX14577) += max14577.o
+obj-$(CONFIG_REGULATOR_MAX14577) += max14577-regulator.o
 obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
 obj-$(CONFIG_REGULATOR_MAX77620) += max77620-regulator.o
 obj-$(CONFIG_REGULATOR_MAX8649)	+= max8649.o
@@ -55,10 +56,10 @@
 obj-$(CONFIG_REGULATOR_MAX8925) += max8925-regulator.o
 obj-$(CONFIG_REGULATOR_MAX8952) += max8952.o
 obj-$(CONFIG_REGULATOR_MAX8973) += max8973-regulator.o
-obj-$(CONFIG_REGULATOR_MAX8997) += max8997.o
+obj-$(CONFIG_REGULATOR_MAX8997) += max8997-regulator.o
 obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o
 obj-$(CONFIG_REGULATOR_MAX77686) += max77686-regulator.o
-obj-$(CONFIG_REGULATOR_MAX77693) += max77693.o
+obj-$(CONFIG_REGULATOR_MAX77693) += max77693-regulator.o
 obj-$(CONFIG_REGULATOR_MAX77802) += max77802-regulator.o
 obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
 obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
@@ -71,6 +72,7 @@
 obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
 obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o
 obj-$(CONFIG_REGULATOR_PV88060) += pv88060-regulator.o
+obj-$(CONFIG_REGULATOR_PV88080) += pv88080-regulator.o
 obj-$(CONFIG_REGULATOR_PV88090) += pv88090-regulator.o
 obj-$(CONFIG_REGULATOR_PWM) += pwm-regulator.o
 obj-$(CONFIG_REGULATOR_TPS51632) += tps51632-regulator.o
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
index 000d566..a1cd0d4 100644
--- a/drivers/regulator/act8865-regulator.c
+++ b/drivers/regulator/act8865-regulator.c
@@ -139,6 +139,74 @@
 	int off_mask;
 };
 
+static const struct regmap_range act8600_reg_ranges[] = {
+	regmap_reg_range(0x00, 0x01),
+	regmap_reg_range(0x10, 0x10),
+	regmap_reg_range(0x12, 0x12),
+	regmap_reg_range(0x20, 0x20),
+	regmap_reg_range(0x22, 0x22),
+	regmap_reg_range(0x30, 0x30),
+	regmap_reg_range(0x32, 0x32),
+	regmap_reg_range(0x40, 0x41),
+	regmap_reg_range(0x50, 0x51),
+	regmap_reg_range(0x60, 0x61),
+	regmap_reg_range(0x70, 0x71),
+	regmap_reg_range(0x80, 0x81),
+	regmap_reg_range(0x91, 0x91),
+	regmap_reg_range(0xA1, 0xA1),
+	regmap_reg_range(0xA8, 0xAA),
+	regmap_reg_range(0xB0, 0xB0),
+	regmap_reg_range(0xB2, 0xB2),
+	regmap_reg_range(0xC1, 0xC1),
+};
+
+static const struct regmap_range act8600_reg_ro_ranges[] = {
+	regmap_reg_range(0xAA, 0xAA),
+	regmap_reg_range(0xC1, 0xC1),
+};
+
+static const struct regmap_range act8600_reg_volatile_ranges[] = {
+	regmap_reg_range(0x00, 0x01),
+	regmap_reg_range(0x12, 0x12),
+	regmap_reg_range(0x22, 0x22),
+	regmap_reg_range(0x32, 0x32),
+	regmap_reg_range(0x41, 0x41),
+	regmap_reg_range(0x51, 0x51),
+	regmap_reg_range(0x61, 0x61),
+	regmap_reg_range(0x71, 0x71),
+	regmap_reg_range(0x81, 0x81),
+	regmap_reg_range(0xA8, 0xA8),
+	regmap_reg_range(0xAA, 0xAA),
+	regmap_reg_range(0xB0, 0xB0),
+	regmap_reg_range(0xC1, 0xC1),
+};
+
+static const struct regmap_access_table act8600_write_ranges_table = {
+	.yes_ranges	= act8600_reg_ranges,
+	.n_yes_ranges	= ARRAY_SIZE(act8600_reg_ranges),
+	.no_ranges	= act8600_reg_ro_ranges,
+	.n_no_ranges	= ARRAY_SIZE(act8600_reg_ro_ranges),
+};
+
+static const struct regmap_access_table act8600_read_ranges_table = {
+	.yes_ranges	= act8600_reg_ranges,
+	.n_yes_ranges	= ARRAY_SIZE(act8600_reg_ranges),
+};
+
+static const struct regmap_access_table act8600_volatile_ranges_table = {
+	.yes_ranges	= act8600_reg_volatile_ranges,
+	.n_yes_ranges	= ARRAY_SIZE(act8600_reg_volatile_ranges),
+};
+
+static const struct regmap_config act8600_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.max_register = 0xFF,
+	.wr_table = &act8600_write_ranges_table,
+	.rd_table = &act8600_read_ranges_table,
+	.volatile_table = &act8600_volatile_ranges_table,
+};
+
 static const struct regmap_config act8865_regmap_config = {
 	.reg_bits = 8,
 	.val_bits = 8,
@@ -319,7 +387,6 @@
 };
 
 static int act8865_pdata_from_dt(struct device *dev,
-				 struct device_node **of_node,
 				 struct act8865_platform_data *pdata,
 				 unsigned long type)
 {
@@ -370,7 +437,7 @@
 		regulator->id = i;
 		regulator->name = matches[i].name;
 		regulator->init_data = matches[i].init_data;
-		of_node[i] = matches[i].of_node;
+		regulator->of_node = matches[i].of_node;
 		regulator++;
 	}
 
@@ -378,7 +445,6 @@
 }
 #else
 static inline int act8865_pdata_from_dt(struct device *dev,
-					struct device_node **of_node,
 					struct act8865_platform_data *pdata,
 					unsigned long type)
 {
@@ -386,8 +452,8 @@
 }
 #endif
 
-static struct regulator_init_data
-*act8865_get_init_data(int id, struct act8865_platform_data *pdata)
+static struct act8865_regulator_data *act8865_get_regulator_data(
+		int id, struct act8865_platform_data *pdata)
 {
 	int i;
 
@@ -396,7 +462,7 @@
 
 	for (i = 0; i < pdata->num_regulators; i++) {
 		if (pdata->regulators[i].id == id)
-			return pdata->regulators[i].init_data;
+			return &pdata->regulators[i];
 	}
 
 	return NULL;
@@ -418,9 +484,9 @@
 	const struct regulator_desc *regulators;
 	struct act8865_platform_data pdata_of, *pdata;
 	struct device *dev = &client->dev;
-	struct device_node **of_node;
 	int i, ret, num_regulators;
 	struct act8865 *act8865;
+	const struct regmap_config *regmap_config;
 	unsigned long type;
 	int off_reg, off_mask;
 	int voltage_select = 0;
@@ -447,12 +513,14 @@
 	case ACT8600:
 		regulators = act8600_regulators;
 		num_regulators = ARRAY_SIZE(act8600_regulators);
+		regmap_config = &act8600_regmap_config;
 		off_reg = -1;
 		off_mask = -1;
 		break;
 	case ACT8846:
 		regulators = act8846_regulators;
 		num_regulators = ARRAY_SIZE(act8846_regulators);
+		regmap_config = &act8865_regmap_config;
 		off_reg = ACT8846_GLB_OFF_CTRL;
 		off_mask = ACT8846_OFF_SYSMASK;
 		break;
@@ -464,6 +532,7 @@
 			regulators = act8865_regulators;
 			num_regulators = ARRAY_SIZE(act8865_regulators);
 		}
+		regmap_config = &act8865_regmap_config;
 		off_reg = ACT8865_SYS_CTRL;
 		off_mask = ACT8865_MSTROFF;
 		break;
@@ -472,34 +541,22 @@
 		return -EINVAL;
 	}
 
-	of_node = devm_kzalloc(dev, sizeof(struct device_node *) *
-			       num_regulators, GFP_KERNEL);
-	if (!of_node)
-		return -ENOMEM;
-
 	if (dev->of_node && !pdata) {
-		ret = act8865_pdata_from_dt(dev, of_node, &pdata_of, type);
+		ret = act8865_pdata_from_dt(dev, &pdata_of, type);
 		if (ret < 0)
 			return ret;
 
 		pdata = &pdata_of;
 	}
 
-	if (pdata->num_regulators > num_regulators) {
-		dev_err(dev, "too many regulators: %d\n",
-			pdata->num_regulators);
-		return -EINVAL;
-	}
-
 	act8865 = devm_kzalloc(dev, sizeof(struct act8865), GFP_KERNEL);
 	if (!act8865)
 		return -ENOMEM;
 
-	act8865->regmap = devm_regmap_init_i2c(client, &act8865_regmap_config);
+	act8865->regmap = devm_regmap_init_i2c(client, regmap_config);
 	if (IS_ERR(act8865->regmap)) {
 		ret = PTR_ERR(act8865->regmap);
-		dev_err(&client->dev, "Failed to allocate register map: %d\n",
-			ret);
+		dev_err(dev, "Failed to allocate register map: %d\n", ret);
 		return ret;
 	}
 
@@ -518,15 +575,20 @@
 	for (i = 0; i < num_regulators; i++) {
 		const struct regulator_desc *desc = &regulators[i];
 		struct regulator_config config = { };
+		struct act8865_regulator_data *rdata;
 		struct regulator_dev *rdev;
 
 		config.dev = dev;
-		config.init_data = act8865_get_init_data(desc->id, pdata);
-		config.of_node = of_node[i];
 		config.driver_data = act8865;
 		config.regmap = act8865->regmap;
 
-		rdev = devm_regulator_register(&client->dev, desc, &config);
+		rdata = act8865_get_regulator_data(desc->id, pdata);
+		if (rdata) {
+			config.init_data = rdata->init_data;
+			config.of_node = rdata->of_node;
+		}
+
+		rdev = devm_regulator_register(dev, desc, &config);
 		if (IS_ERR(rdev)) {
 			dev_err(dev, "failed to register %s\n", desc->name);
 			return PTR_ERR(rdev);
@@ -534,7 +596,6 @@
 	}
 
 	i2c_set_clientdata(client, act8865);
-	devm_kfree(dev, of_node);
 
 	return 0;
 }
diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c
index 8b046ee..66337e1 100644
--- a/drivers/regulator/as3722-regulator.c
+++ b/drivers/regulator/as3722-regulator.c
@@ -372,7 +372,7 @@
 			AS3722_LDO_ILIMIT_MASK, reg);
 }
 
-static struct regulator_ops as3722_ldo0_ops = {
+static const struct regulator_ops as3722_ldo0_ops = {
 	.is_enabled = regulator_is_enabled_regmap,
 	.enable = regulator_enable_regmap,
 	.disable = regulator_disable_regmap,
@@ -383,7 +383,7 @@
 	.set_current_limit = as3722_ldo_set_current_limit,
 };
 
-static struct regulator_ops as3722_ldo0_extcntrl_ops = {
+static const struct regulator_ops as3722_ldo0_extcntrl_ops = {
 	.list_voltage = regulator_list_voltage_linear,
 	.get_voltage_sel = regulator_get_voltage_sel_regmap,
 	.set_voltage_sel = regulator_set_voltage_sel_regmap,
@@ -415,7 +415,7 @@
 	return 150000;
 }
 
-static struct regulator_ops as3722_ldo3_ops = {
+static const struct regulator_ops as3722_ldo3_ops = {
 	.is_enabled = regulator_is_enabled_regmap,
 	.enable = regulator_enable_regmap,
 	.disable = regulator_disable_regmap,
@@ -425,20 +425,45 @@
 	.get_current_limit = as3722_ldo3_get_current_limit,
 };
 
-static struct regulator_ops as3722_ldo3_extcntrl_ops = {
+static const struct regulator_ops as3722_ldo3_extcntrl_ops = {
 	.list_voltage = regulator_list_voltage_linear,
 	.get_voltage_sel = regulator_get_voltage_sel_regmap,
 	.set_voltage_sel = regulator_set_voltage_sel_regmap,
 	.get_current_limit = as3722_ldo3_get_current_limit,
 };
 
+static const struct regulator_ops as3722_ldo6_ops = {
+	.is_enabled = regulator_is_enabled_regmap,
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
+	.map_voltage = regulator_map_voltage_linear_range,
+	.set_voltage_sel = regulator_set_voltage_sel_regmap,
+	.get_voltage_sel = regulator_get_voltage_sel_regmap,
+	.list_voltage = regulator_list_voltage_linear_range,
+	.get_current_limit = as3722_ldo_get_current_limit,
+	.set_current_limit = as3722_ldo_set_current_limit,
+	.get_bypass = regulator_get_bypass_regmap,
+	.set_bypass = regulator_set_bypass_regmap,
+};
+
+static const struct regulator_ops as3722_ldo6_extcntrl_ops = {
+	.map_voltage = regulator_map_voltage_linear_range,
+	.set_voltage_sel = regulator_set_voltage_sel_regmap,
+	.get_voltage_sel = regulator_get_voltage_sel_regmap,
+	.list_voltage = regulator_list_voltage_linear_range,
+	.get_current_limit = as3722_ldo_get_current_limit,
+	.set_current_limit = as3722_ldo_set_current_limit,
+	.get_bypass = regulator_get_bypass_regmap,
+	.set_bypass = regulator_set_bypass_regmap,
+};
+
 static const struct regulator_linear_range as3722_ldo_ranges[] = {
 	REGULATOR_LINEAR_RANGE(0, 0x00, 0x00, 0),
 	REGULATOR_LINEAR_RANGE(825000, 0x01, 0x24, 25000),
 	REGULATOR_LINEAR_RANGE(1725000, 0x40, 0x7F, 25000),
 };
 
-static struct regulator_ops as3722_ldo_ops = {
+static const struct regulator_ops as3722_ldo_ops = {
 	.is_enabled = regulator_is_enabled_regmap,
 	.enable = regulator_enable_regmap,
 	.disable = regulator_disable_regmap,
@@ -450,7 +475,7 @@
 	.set_current_limit = as3722_ldo_set_current_limit,
 };
 
-static struct regulator_ops as3722_ldo_extcntrl_ops = {
+static const struct regulator_ops as3722_ldo_extcntrl_ops = {
 	.map_voltage = regulator_map_voltage_linear_range,
 	.set_voltage_sel = regulator_set_voltage_sel_regmap,
 	.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -616,7 +641,7 @@
 	REGULATOR_LINEAR_RANGE(2650000, 0x71, 0x7F, 50000),
 };
 
-static struct regulator_ops as3722_sd016_ops = {
+static const struct regulator_ops as3722_sd016_ops = {
 	.is_enabled = regulator_is_enabled_regmap,
 	.enable = regulator_enable_regmap,
 	.disable = regulator_disable_regmap,
@@ -630,7 +655,7 @@
 	.set_mode = as3722_sd_set_mode,
 };
 
-static struct regulator_ops as3722_sd016_extcntrl_ops = {
+static const struct regulator_ops as3722_sd016_extcntrl_ops = {
 	.list_voltage = regulator_list_voltage_linear,
 	.map_voltage = regulator_map_voltage_linear,
 	.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -641,7 +666,7 @@
 	.set_mode = as3722_sd_set_mode,
 };
 
-static struct regulator_ops as3722_sd2345_ops = {
+static const struct regulator_ops as3722_sd2345_ops = {
 	.is_enabled = regulator_is_enabled_regmap,
 	.enable = regulator_enable_regmap,
 	.disable = regulator_disable_regmap,
@@ -653,7 +678,7 @@
 	.set_mode = as3722_sd_set_mode,
 };
 
-static struct regulator_ops as3722_sd2345_extcntrl_ops = {
+static const struct regulator_ops as3722_sd2345_extcntrl_ops = {
 	.list_voltage = regulator_list_voltage_linear_range,
 	.map_voltage = regulator_map_voltage_linear_range,
 	.set_voltage_sel = regulator_set_voltage_sel_regmap,
@@ -760,7 +785,7 @@
 	struct as3722_regulator_config_data *reg_config;
 	struct regulator_dev *rdev;
 	struct regulator_config config = { };
-	struct regulator_ops *ops;
+	const struct regulator_ops *ops;
 	int id;
 	int ret;
 
@@ -829,6 +854,24 @@
 				}
 			}
 			break;
+		case AS3722_REGULATOR_ID_LDO6:
+			if (reg_config->ext_control)
+				ops = &as3722_ldo6_extcntrl_ops;
+			else
+				ops = &as3722_ldo6_ops;
+			as3722_regs->desc[id].enable_time = 500;
+			as3722_regs->desc[id].bypass_reg =
+						AS3722_LDO6_VOLTAGE_REG;
+			as3722_regs->desc[id].bypass_mask =
+						AS3722_LDO_VSEL_MASK;
+			as3722_regs->desc[id].bypass_val_on =
+						AS3722_LDO6_VSEL_BYPASS;
+			as3722_regs->desc[id].bypass_val_off =
+						AS3722_LDO6_VSEL_BYPASS;
+			as3722_regs->desc[id].linear_ranges = as3722_ldo_ranges;
+			as3722_regs->desc[id].n_linear_ranges =
+						ARRAY_SIZE(as3722_ldo_ranges);
+			break;
 		case AS3722_REGULATOR_ID_SD0:
 		case AS3722_REGULATOR_ID_SD1:
 		case AS3722_REGULATOR_ID_SD6:
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 40cd894..514a5e8 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -157,7 +157,9 @@
 static const struct regulator_linear_range axp20x_ldo4_ranges[] = {
 	REGULATOR_LINEAR_RANGE(1250000, 0x0, 0x0, 0),
 	REGULATOR_LINEAR_RANGE(1300000, 0x1, 0x8, 100000),
-	REGULATOR_LINEAR_RANGE(2500000, 0x9, 0xf, 100000),
+	REGULATOR_LINEAR_RANGE(2500000, 0x9, 0x9, 0),
+	REGULATOR_LINEAR_RANGE(2700000, 0xa, 0xb, 100000),
+	REGULATOR_LINEAR_RANGE(3000000, 0xc, 0xf, 100000),
 };
 
 static const struct regulator_desc axp20x_regulators[] = {
@@ -215,10 +217,14 @@
 		 AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)),
 	AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100,
 		 AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)),
-	AXP_DESC_IO(AXP22X, LDO_IO0, "ldo_io0", "ips", 1800, 3300, 100,
+	/* Note the datasheet only guarantees reliable operation up to
+	 * 3.3V, this needs to be enforced via dts provided constraints */
+	AXP_DESC_IO(AXP22X, LDO_IO0, "ldo_io0", "ips", 700, 3800, 100,
 		    AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07,
 		    AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
-	AXP_DESC_IO(AXP22X, LDO_IO1, "ldo_io1", "ips", 1800, 3300, 100,
+	/* Note the datasheet only guarantees reliable operation up to
+	 * 3.3V, this needs to be enforced via dts provided constraints */
+	AXP_DESC_IO(AXP22X, LDO_IO1, "ldo_io1", "ips", 700, 3800, 100,
 		    AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07,
 		    AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
 	AXP_DESC_FIXED(AXP22X, RTC_LDO, "rtc_ldo", "ips", 3000),
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index e0b7642..ec8184d5 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -132,6 +132,19 @@
 	return has_full_constraints || of_have_populated_dt();
 }
 
+static bool regulator_ops_is_valid(struct regulator_dev *rdev, int ops)
+{
+	if (!rdev->constraints) {
+		rdev_err(rdev, "no constraints\n");
+		return false;
+	}
+
+	if (rdev->constraints->valid_ops_mask & ops)
+		return true;
+
+	return false;
+}
+
 static inline struct regulator_dev *rdev_get_supply(struct regulator_dev *rdev)
 {
 	if (rdev && rdev->supply)
@@ -198,28 +211,13 @@
 	return regnode;
 }
 
-static int _regulator_can_change_status(struct regulator_dev *rdev)
-{
-	if (!rdev->constraints)
-		return 0;
-
-	if (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_STATUS)
-		return 1;
-	else
-		return 0;
-}
-
 /* Platform voltage constraint check */
 static int regulator_check_voltage(struct regulator_dev *rdev,
 				   int *min_uV, int *max_uV)
 {
 	BUG_ON(*min_uV > *max_uV);
 
-	if (!rdev->constraints) {
-		rdev_err(rdev, "no constraints\n");
-		return -ENODEV;
-	}
-	if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
+	if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE)) {
 		rdev_err(rdev, "voltage operation not allowed\n");
 		return -EPERM;
 	}
@@ -275,11 +273,7 @@
 {
 	BUG_ON(*min_uA > *max_uA);
 
-	if (!rdev->constraints) {
-		rdev_err(rdev, "no constraints\n");
-		return -ENODEV;
-	}
-	if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_CURRENT)) {
+	if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_CURRENT)) {
 		rdev_err(rdev, "current operation not allowed\n");
 		return -EPERM;
 	}
@@ -312,11 +306,7 @@
 		return -EINVAL;
 	}
 
-	if (!rdev->constraints) {
-		rdev_err(rdev, "no constraints\n");
-		return -ENODEV;
-	}
-	if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_MODE)) {
+	if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_MODE)) {
 		rdev_err(rdev, "mode operation not allowed\n");
 		return -EPERM;
 	}
@@ -333,20 +323,6 @@
 	return -EINVAL;
 }
 
-/* dynamic regulator mode switching constraint check */
-static int regulator_check_drms(struct regulator_dev *rdev)
-{
-	if (!rdev->constraints) {
-		rdev_err(rdev, "no constraints\n");
-		return -ENODEV;
-	}
-	if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) {
-		rdev_dbg(rdev, "drms operation not allowed\n");
-		return -EPERM;
-	}
-	return 0;
-}
-
 static ssize_t regulator_uV_show(struct device *dev,
 				struct device_attribute *attr, char *buf)
 {
@@ -692,8 +668,7 @@
 	 * first check to see if we can set modes at all, otherwise just
 	 * tell the consumer everything is OK.
 	 */
-	err = regulator_check_drms(rdev);
-	if (err < 0)
+	if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_DRMS))
 		return 0;
 
 	if (!rdev->desc->ops->get_optimum_mode &&
@@ -808,8 +783,6 @@
 /* locks held by caller */
 static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
 {
-	lockdep_assert_held_once(&rdev->mutex);
-
 	if (!rdev->constraints)
 		return -EINVAL;
 
@@ -893,7 +866,7 @@
 	rdev_dbg(rdev, "%s\n", buf);
 
 	if ((constraints->min_uV != constraints->max_uV) &&
-	    !(constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE))
+	    !regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE))
 		rdev_warn(rdev,
 			  "Voltage range but no REGULATOR_CHANGE_VOLTAGE\n");
 }
@@ -906,7 +879,8 @@
 
 	/* do we need to apply the constraint voltage */
 	if (rdev->constraints->apply_uV &&
-	    rdev->constraints->min_uV == rdev->constraints->max_uV) {
+	    rdev->constraints->min_uV && rdev->constraints->max_uV) {
+		int target_min, target_max;
 		int current_uV = _regulator_get_voltage(rdev);
 		if (current_uV < 0) {
 			rdev_err(rdev,
@@ -914,15 +888,34 @@
 				 current_uV);
 			return current_uV;
 		}
-		if (current_uV < rdev->constraints->min_uV ||
-		    current_uV > rdev->constraints->max_uV) {
+
+		/*
+		 * If we're below the minimum voltage move up to the
+		 * minimum voltage, if we're above the maximum voltage
+		 * then move down to the maximum.
+		 */
+		target_min = current_uV;
+		target_max = current_uV;
+
+		if (current_uV < rdev->constraints->min_uV) {
+			target_min = rdev->constraints->min_uV;
+			target_max = rdev->constraints->min_uV;
+		}
+
+		if (current_uV > rdev->constraints->max_uV) {
+			target_min = rdev->constraints->max_uV;
+			target_max = rdev->constraints->max_uV;
+		}
+
+		if (target_min != current_uV || target_max != current_uV) {
+			rdev_info(rdev, "Bringing %duV into %d-%duV\n",
+				  current_uV, target_min, target_max);
 			ret = _regulator_do_set_voltage(
-				rdev, rdev->constraints->min_uV,
-				rdev->constraints->max_uV);
+				rdev, target_min, target_max);
 			if (ret < 0) {
 				rdev_err(rdev,
-					"failed to apply %duV constraint(%d)\n",
-					rdev->constraints->min_uV, ret);
+					"failed to apply %d-%duV constraint(%d)\n",
+					target_min, target_max, ret);
 				return ret;
 			}
 		}
@@ -1150,17 +1143,6 @@
 		}
 	}
 
-	if (rdev->constraints->active_discharge && ops->set_active_discharge) {
-		bool ad_state = (rdev->constraints->active_discharge ==
-			      REGULATOR_ACTIVE_DISCHARGE_ENABLE) ? true : false;
-
-		ret = ops->set_active_discharge(rdev, ad_state);
-		if (ret < 0) {
-			rdev_err(rdev, "failed to set active discharge\n");
-			return ret;
-		}
-	}
-
 	print_constraints(rdev);
 	return 0;
 }
@@ -1272,6 +1254,55 @@
 	}
 }
 
+#ifdef CONFIG_DEBUG_FS
+static ssize_t constraint_flags_read_file(struct file *file,
+					  char __user *user_buf,
+					  size_t count, loff_t *ppos)
+{
+	const struct regulator *regulator = file->private_data;
+	const struct regulation_constraints *c = regulator->rdev->constraints;
+	char *buf;
+	ssize_t ret;
+
+	if (!c)
+		return 0;
+
+	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	ret = snprintf(buf, PAGE_SIZE,
+			"always_on: %u\n"
+			"boot_on: %u\n"
+			"apply_uV: %u\n"
+			"ramp_disable: %u\n"
+			"soft_start: %u\n"
+			"pull_down: %u\n"
+			"over_current_protection: %u\n",
+			c->always_on,
+			c->boot_on,
+			c->apply_uV,
+			c->ramp_disable,
+			c->soft_start,
+			c->pull_down,
+			c->over_current_protection);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+	kfree(buf);
+
+	return ret;
+}
+
+#endif
+
+static const struct file_operations constraint_flags_fops = {
+#ifdef CONFIG_DEBUG_FS
+	.open = simple_open,
+	.read = constraint_flags_read_file,
+	.llseek = default_llseek,
+#endif
+};
+
 #define REG_STR_SIZE	64
 
 static struct regulator *create_regulator(struct regulator_dev *rdev,
@@ -1327,6 +1358,9 @@
 				   &regulator->min_uV);
 		debugfs_create_u32("max_uV", 0444, regulator->debugfs,
 				   &regulator->max_uV);
+		debugfs_create_file("constraint_flags", 0444,
+				    regulator->debugfs, regulator,
+				    &constraint_flags_fops);
 	}
 
 	/*
@@ -1334,7 +1368,7 @@
 	 * it is then we don't need to do nearly so much work for
 	 * enable/disable calls.
 	 */
-	if (!_regulator_can_change_status(rdev) &&
+	if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_STATUS) &&
 	    _regulator_is_enabled(rdev))
 		regulator->always_on = true;
 
@@ -1532,10 +1566,11 @@
 	}
 
 	/* Cascade always-on state to supply */
-	if (_regulator_is_enabled(rdev) && rdev->supply) {
+	if (_regulator_is_enabled(rdev)) {
 		ret = regulator_enable(rdev->supply);
 		if (ret < 0) {
 			_regulator_put(rdev->supply);
+			rdev->supply = NULL;
 			return ret;
 		}
 	}
@@ -2111,15 +2146,15 @@
 	lockdep_assert_held_once(&rdev->mutex);
 
 	/* check voltage and requested load before enabling */
-	if (rdev->constraints &&
-	    (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS))
+	if (regulator_ops_is_valid(rdev, REGULATOR_CHANGE_DRMS))
 		drms_uA_update(rdev);
 
 	if (rdev->use_count == 0) {
 		/* The regulator may on if it's not switchable or left on */
 		ret = _regulator_is_enabled(rdev);
 		if (ret == -EINVAL || ret == 0) {
-			if (!_regulator_can_change_status(rdev))
+			if (!regulator_ops_is_valid(rdev,
+					REGULATOR_CHANGE_STATUS))
 				return -EPERM;
 
 			ret = _regulator_do_enable(rdev);
@@ -2221,7 +2256,7 @@
 	    (rdev->constraints && !rdev->constraints->always_on)) {
 
 		/* we are last user */
-		if (_regulator_can_change_status(rdev)) {
+		if (regulator_ops_is_valid(rdev, REGULATOR_CHANGE_STATUS)) {
 			ret = _notifier_call_chain(rdev,
 						   REGULATOR_EVENT_PRE_DISABLE,
 						   NULL);
@@ -2242,10 +2277,7 @@
 
 		rdev->use_count = 0;
 	} else if (rdev->use_count > 1) {
-
-		if (rdev->constraints &&
-			(rdev->constraints->valid_ops_mask &
-			REGULATOR_CHANGE_DRMS))
+		if (regulator_ops_is_valid(rdev, REGULATOR_CHANGE_DRMS))
 			drms_uA_update(rdev);
 
 		rdev->use_count--;
@@ -2489,8 +2521,7 @@
 {
 	struct regulator_dev	*rdev = regulator->rdev;
 
-	if (rdev->constraints &&
-	    (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
+	if (regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE)) {
 		if (rdev->desc->n_voltages - rdev->desc->linear_min_sel > 1)
 			return 1;
 
@@ -2644,7 +2675,7 @@
 	int i, voltages, ret;
 
 	/* If we can't change voltage check the current voltage */
-	if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
+	if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE)) {
 		ret = regulator_get_voltage(regulator);
 		if (ret >= 0)
 			return min_uV <= ret && ret <= max_uV;
@@ -2850,7 +2881,7 @@
 	 * return successfully even though the regulator does not support
 	 * changing the voltage.
 	 */
-	if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
+	if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE)) {
 		current_uV = _regulator_get_voltage(rdev);
 		if (min_uV <= current_uV && current_uV <= max_uV) {
 			regulator->min_uV = min_uV;
@@ -3109,6 +3140,23 @@
 static int _regulator_get_voltage(struct regulator_dev *rdev)
 {
 	int sel, ret;
+	bool bypassed;
+
+	if (rdev->desc->ops->get_bypass) {
+		ret = rdev->desc->ops->get_bypass(rdev, &bypassed);
+		if (ret < 0)
+			return ret;
+		if (bypassed) {
+			/* if bypassed the regulator must have a supply */
+			if (!rdev->supply) {
+				rdev_err(rdev,
+					 "bypassed regulator has no supply!\n");
+				return -EPROBE_DEFER;
+			}
+
+			return _regulator_get_voltage(rdev->supply->rdev);
+		}
+	}
 
 	if (rdev->desc->ops->get_voltage_sel) {
 		sel = rdev->desc->ops->get_voltage_sel(rdev);
@@ -3365,8 +3413,7 @@
 	if (!rdev->desc->ops->set_bypass)
 		return 0;
 
-	if (rdev->constraints &&
-	    !(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_BYPASS))
+	if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_BYPASS))
 		return 0;
 
 	mutex_lock(&rdev->mutex);
@@ -3840,6 +3887,16 @@
 			   &rdev->bypass_count);
 }
 
+static int regulator_register_resolve_supply(struct device *dev, void *data)
+{
+	struct regulator_dev *rdev = dev_to_rdev(dev);
+
+	if (regulator_resolve_supply(rdev))
+		rdev_dbg(rdev, "unable to resolve supply\n");
+
+	return 0;
+}
+
 /**
  * regulator_register - register regulator
  * @regulator_desc: regulator to register
@@ -3911,8 +3968,6 @@
 		rdev->dev.of_node = of_node_get(config->of_node);
 	}
 
-	mutex_lock(&regulator_list_mutex);
-
 	mutex_init(&rdev->mutex);
 	rdev->reg_data = config->driver_data;
 	rdev->owner = regulator_desc->owner;
@@ -3937,7 +3992,9 @@
 
 	if ((config->ena_gpio || config->ena_gpio_initialized) &&
 	    gpio_is_valid(config->ena_gpio)) {
+		mutex_lock(&regulator_list_mutex);
 		ret = regulator_ena_gpio_request(rdev, config);
+		mutex_unlock(&regulator_list_mutex);
 		if (ret != 0) {
 			rdev_err(rdev, "Failed to request enable GPIO%d: %d\n",
 				 config->ena_gpio, ret);
@@ -3950,63 +4007,73 @@
 	rdev->dev.parent = dev;
 	dev_set_name(&rdev->dev, "regulator.%lu",
 		    (unsigned long) atomic_inc_return(&regulator_no));
-	ret = device_register(&rdev->dev);
-	if (ret != 0) {
-		put_device(&rdev->dev);
-		goto wash;
-	}
-
-	dev_set_drvdata(&rdev->dev, rdev);
 
 	/* set regulator constraints */
 	if (init_data)
 		constraints = &init_data->constraints;
 
-	ret = set_machine_constraints(rdev, constraints);
-	if (ret < 0)
-		goto scrub;
-
 	if (init_data && init_data->supply_regulator)
 		rdev->supply_name = init_data->supply_regulator;
 	else if (regulator_desc->supply_name)
 		rdev->supply_name = regulator_desc->supply_name;
 
+	/*
+	 * Attempt to resolve the regulator supply, if specified,
+	 * but don't return an error if we fail because we will try
+	 * to resolve it again later as more regulators are added.
+	 */
+	if (regulator_resolve_supply(rdev))
+		rdev_dbg(rdev, "unable to resolve supply\n");
+
+	ret = set_machine_constraints(rdev, constraints);
+	if (ret < 0)
+		goto wash;
+
 	/* add consumers devices */
 	if (init_data) {
+		mutex_lock(&regulator_list_mutex);
 		for (i = 0; i < init_data->num_consumer_supplies; i++) {
 			ret = set_consumer_device_supply(rdev,
 				init_data->consumer_supplies[i].dev_name,
 				init_data->consumer_supplies[i].supply);
 			if (ret < 0) {
+				mutex_unlock(&regulator_list_mutex);
 				dev_err(dev, "Failed to set supply %s\n",
 					init_data->consumer_supplies[i].supply);
 				goto unset_supplies;
 			}
 		}
+		mutex_unlock(&regulator_list_mutex);
 	}
 
+	ret = device_register(&rdev->dev);
+	if (ret != 0) {
+		put_device(&rdev->dev);
+		goto unset_supplies;
+	}
+
+	dev_set_drvdata(&rdev->dev, rdev);
 	rdev_init_debugfs(rdev);
-out:
-	mutex_unlock(&regulator_list_mutex);
+
+	/* try to resolve regulators supply since a new one was registered */
+	class_for_each_device(&regulator_class, NULL, NULL,
+			      regulator_register_resolve_supply);
 	kfree(config);
 	return rdev;
 
 unset_supplies:
+	mutex_lock(&regulator_list_mutex);
 	unset_regulator_supplies(rdev);
-
-scrub:
-	regulator_ena_gpio_free(rdev);
-	device_unregister(&rdev->dev);
-	/* device core frees rdev */
-	rdev = ERR_PTR(ret);
-	goto out;
-
+	mutex_unlock(&regulator_list_mutex);
 wash:
+	kfree(rdev->constraints);
+	mutex_lock(&regulator_list_mutex);
 	regulator_ena_gpio_free(rdev);
+	mutex_unlock(&regulator_list_mutex);
 clean:
 	kfree(rdev);
-	rdev = ERR_PTR(ret);
-	goto out;
+	kfree(config);
+	return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(regulator_register);
 
@@ -4032,8 +4099,8 @@
 	WARN_ON(rdev->open_count);
 	unset_regulator_supplies(rdev);
 	list_del(&rdev->list);
-	mutex_unlock(&regulator_list_mutex);
 	regulator_ena_gpio_free(rdev);
+	mutex_unlock(&regulator_list_mutex);
 	device_unregister(&rdev->dev);
 }
 EXPORT_SYMBOL_GPL(regulator_unregister);
@@ -4386,7 +4453,7 @@
 	if (c && c->always_on)
 		return 0;
 
-	if (c && !(c->valid_ops_mask & REGULATOR_CHANGE_STATUS))
+	if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_STATUS))
 		return 0;
 
 	mutex_lock(&rdev->mutex);
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index ed9e7e9..c6af343 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -900,4 +900,4 @@
 MODULE_AUTHOR("Krystian Garbaciak <krystian.garbaciak@diasemi.com>");
 MODULE_DESCRIPTION("DA9063 regulators driver");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("paltform:" DA9063_DRVNAME_REGULATORS);
+MODULE_ALIAS("platform:" DA9063_DRVNAME_REGULATORS);
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 2cb5cc3..d7da81a 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -65,6 +65,13 @@
 	FAN53555_CHIP_ID_03,
 	FAN53555_CHIP_ID_04,
 	FAN53555_CHIP_ID_05,
+	FAN53555_CHIP_ID_08 = 8,
+};
+
+/* IC mask revision */
+enum {
+	FAN53555_CHIP_REV_00 = 0x3,
+	FAN53555_CHIP_REV_13 = 0xf,
 };
 
 enum {
@@ -217,9 +224,26 @@
 	/* Init voltage range and step */
 	switch (di->chip_id) {
 	case FAN53555_CHIP_ID_00:
+		switch (di->chip_rev) {
+		case FAN53555_CHIP_REV_00:
+			di->vsel_min = 600000;
+			di->vsel_step = 10000;
+			break;
+		case FAN53555_CHIP_REV_13:
+			di->vsel_min = 800000;
+			di->vsel_step = 10000;
+			break;
+		default:
+			dev_err(di->dev,
+				"Chip ID %d with rev %d not supported!\n",
+				di->chip_id, di->chip_rev);
+			return -EINVAL;
+		}
+		break;
 	case FAN53555_CHIP_ID_01:
 	case FAN53555_CHIP_ID_03:
 	case FAN53555_CHIP_ID_05:
+	case FAN53555_CHIP_ID_08:
 		di->vsel_min = 600000;
 		di->vsel_step = 10000;
 		break;
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index a8718e9..83e89e5 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -162,6 +162,8 @@
 	of_property_read_u32(np, "startup-delay-us", &config->startup_delay);
 
 	config->enable_gpio = of_get_named_gpio(np, "enable-gpio", 0);
+	if (config->enable_gpio == -EPROBE_DEFER)
+		return ERR_PTR(-EPROBE_DEFER);
 
 	/* Fetch GPIOs. - optional property*/
 	ret = of_gpio_count(np);
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index b1e32e7..bcf38fd 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -460,7 +460,7 @@
 	if (ret != 0)
 		return ret;
 
-	*enable = val & rdev->desc->bypass_mask;
+	*enable = (val & rdev->desc->bypass_mask) == rdev->desc->bypass_val_on;
 
 	return 0;
 }
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 15c25c6..204b5c5 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -365,8 +365,8 @@
 	mutex_lock(&lp3971->io_lock);
 
 	ret = lp3971_i2c_read(lp3971->i2c, reg, 1, &tmp);
-	tmp = (tmp & ~mask) | val;
 	if (ret == 0) {
+		tmp = (tmp & ~mask) | val;
 		ret = lp3971_i2c_write(lp3971->i2c, reg, 1, &tmp);
 		dev_dbg(lp3971->dev, "reg write 0x%02x -> 0x%02x\n", (int)reg,
 			(unsigned)val&0xff);
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index 3a7e96e..ff0c275 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -211,8 +211,8 @@
 	mutex_lock(&lp3972->io_lock);
 
 	ret = lp3972_i2c_read(lp3972->i2c, reg, 1, &tmp);
-	tmp = (tmp & ~mask) | val;
 	if (ret == 0) {
+		tmp = (tmp & ~mask) | val;
 		ret = lp3972_i2c_write(lp3972->i2c, reg, 1, &tmp);
 		dev_dbg(lp3972->dev, "reg write 0x%02x -> 0x%02x\n", (int)reg,
 			(unsigned)val & 0xff);
diff --git a/drivers/regulator/lp873x-regulator.c b/drivers/regulator/lp873x-regulator.c
new file mode 100644
index 0000000..b4ffd11
--- /dev/null
+++ b/drivers/regulator/lp873x-regulator.c
@@ -0,0 +1,241 @@
+/*
+ * Regulator driver for LP873X PMIC
+ *
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License version 2 for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/lp873x.h>
+
+#define LP873X_REGULATOR(_name, _id, _of, _ops, _n, _vr, _vm, _er, _em, \
+			 _delay, _lr, _nlr, _cr)			\
+	[_id] = {							\
+		.desc = {						\
+			.name			= _name,		\
+			.id			= _id,			\
+			.of_match		= of_match_ptr(_of),	\
+			.regulators_node	= of_match_ptr("regulators"),\
+			.ops			= &_ops,		\
+			.n_voltages		= _n,			\
+			.type			= REGULATOR_VOLTAGE,	\
+			.owner			= THIS_MODULE,		\
+			.vsel_reg		= _vr,			\
+			.vsel_mask		= _vm,			\
+			.enable_reg		= _er,			\
+			.enable_mask		= _em,			\
+			.ramp_delay		= _delay,		\
+			.linear_ranges		= _lr,			\
+			.n_linear_ranges	= _nlr,			\
+		},							\
+		.ctrl2_reg = _cr,					\
+	}
+
+struct lp873x_regulator {
+	struct regulator_desc desc;
+	unsigned int ctrl2_reg;
+};
+
+static const struct lp873x_regulator regulators[];
+
+static const struct regulator_linear_range buck0_buck1_ranges[] = {
+	REGULATOR_LINEAR_RANGE(0, 0x0, 0x13, 0),
+	REGULATOR_LINEAR_RANGE(700000, 0x14, 0x17, 10000),
+	REGULATOR_LINEAR_RANGE(735000, 0x18, 0x9d, 5000),
+	REGULATOR_LINEAR_RANGE(1420000, 0x9e, 0xff, 20000),
+};
+
+static const struct regulator_linear_range ldo0_ldo1_ranges[] = {
+	REGULATOR_LINEAR_RANGE(800000, 0x0, 0x19, 100000),
+};
+
+static unsigned int lp873x_buck_ramp_delay[] = {
+	30000, 15000, 10000, 7500, 3800, 1900, 940, 470
+};
+
+/* LP873X BUCK current limit */
+static const unsigned int lp873x_buck_uA[] = {
+	1500000, 2000000, 2500000, 3000000, 3500000, 4000000,
+};
+
+static int lp873x_buck_set_ramp_delay(struct regulator_dev *rdev,
+				      int ramp_delay)
+{
+	int id = rdev_get_id(rdev);
+	struct lp873x *lp873 = rdev_get_drvdata(rdev);
+	unsigned int reg;
+	int ret;
+
+	if (ramp_delay <= 470)
+		reg = 7;
+	else if (ramp_delay <= 940)
+		reg = 6;
+	else if (ramp_delay <= 1900)
+		reg = 5;
+	else if (ramp_delay <= 3800)
+		reg = 4;
+	else if (ramp_delay <= 7500)
+		reg = 3;
+	else if (ramp_delay <= 10000)
+		reg = 2;
+	else if (ramp_delay <= 15000)
+		reg = 1;
+	else
+		reg = 0;
+
+	ret = regmap_update_bits(lp873->regmap, regulators[id].ctrl2_reg,
+				 LP873X_BUCK0_CTRL_2_BUCK0_SLEW_RATE,
+				 reg << __ffs(LP873X_BUCK0_CTRL_2_BUCK0_SLEW_RATE));
+	if (ret) {
+		dev_err(lp873->dev, "SLEW RATE write failed: %d\n", ret);
+		return ret;
+	}
+
+	rdev->constraints->ramp_delay = lp873x_buck_ramp_delay[reg];
+
+	return 0;
+}
+
+static int lp873x_buck_set_current_limit(struct regulator_dev *rdev,
+					 int min_uA, int max_uA)
+{
+	int id = rdev_get_id(rdev);
+	struct lp873x *lp873 = rdev_get_drvdata(rdev);
+	int i;
+
+	for (i = ARRAY_SIZE(lp873x_buck_uA) - 1; i >= 0; i--) {
+		if (lp873x_buck_uA[i] >= min_uA &&
+		    lp873x_buck_uA[i] <= max_uA)
+			return regmap_update_bits(lp873->regmap,
+						  regulators[id].ctrl2_reg,
+						  LP873X_BUCK0_CTRL_2_BUCK0_ILIM,
+						  i << __ffs(LP873X_BUCK0_CTRL_2_BUCK0_ILIM));
+	}
+
+	return -EINVAL;
+}
+
+static int lp873x_buck_get_current_limit(struct regulator_dev *rdev)
+{
+	int id = rdev_get_id(rdev);
+	struct lp873x *lp873 = rdev_get_drvdata(rdev);
+	int ret;
+	unsigned int val;
+
+	ret = regmap_read(lp873->regmap, regulators[id].ctrl2_reg, &val);
+	if (ret)
+		return ret;
+
+	val = (val & LP873X_BUCK0_CTRL_2_BUCK0_ILIM) >>
+	       __ffs(LP873X_BUCK0_CTRL_2_BUCK0_ILIM);
+
+	return (val < ARRAY_SIZE(lp873x_buck_uA)) ?
+			lp873x_buck_uA[val] : -EINVAL;
+}
+
+/* Operations permitted on BUCK0, BUCK1 */
+static struct regulator_ops lp873x_buck01_ops = {
+	.is_enabled		= regulator_is_enabled_regmap,
+	.enable			= regulator_enable_regmap,
+	.disable		= regulator_disable_regmap,
+	.get_voltage_sel	= regulator_get_voltage_sel_regmap,
+	.set_voltage_sel	= regulator_set_voltage_sel_regmap,
+	.list_voltage		= regulator_list_voltage_linear_range,
+	.map_voltage		= regulator_map_voltage_linear_range,
+	.set_voltage_time_sel	= regulator_set_voltage_time_sel,
+	.set_ramp_delay		= lp873x_buck_set_ramp_delay,
+	.set_current_limit	= lp873x_buck_set_current_limit,
+	.get_current_limit	= lp873x_buck_get_current_limit,
+};
+
+/* Operations permitted on LDO0 and LDO1 */
+static struct regulator_ops lp873x_ldo01_ops = {
+	.is_enabled		= regulator_is_enabled_regmap,
+	.enable			= regulator_enable_regmap,
+	.disable		= regulator_disable_regmap,
+	.get_voltage_sel	= regulator_get_voltage_sel_regmap,
+	.set_voltage_sel	= regulator_set_voltage_sel_regmap,
+	.list_voltage		= regulator_list_voltage_linear_range,
+	.map_voltage		= regulator_map_voltage_linear_range,
+};
+
+static const struct lp873x_regulator regulators[] = {
+	LP873X_REGULATOR("BUCK0", LP873X_BUCK_0, "buck0", lp873x_buck01_ops,
+			 256, LP873X_REG_BUCK0_VOUT,
+			 LP873X_BUCK0_VOUT_BUCK0_VSET, LP873X_REG_BUCK0_CTRL_1,
+			 LP873X_BUCK0_CTRL_1_BUCK0_EN, 10000,
+			 buck0_buck1_ranges, 4, LP873X_REG_BUCK0_CTRL_2),
+	LP873X_REGULATOR("BUCK1", LP873X_BUCK_1, "buck1", lp873x_buck01_ops,
+			 256, LP873X_REG_BUCK1_VOUT,
+			 LP873X_BUCK1_VOUT_BUCK1_VSET, LP873X_REG_BUCK1_CTRL_1,
+			 LP873X_BUCK1_CTRL_1_BUCK1_EN, 10000,
+			 buck0_buck1_ranges, 4, LP873X_REG_BUCK1_CTRL_2),
+	LP873X_REGULATOR("LDO0", LP873X_LDO_0, "ldo0", lp873x_ldo01_ops, 26,
+			 LP873X_REG_LDO0_VOUT, LP873X_LDO0_VOUT_LDO0_VSET,
+			 LP873X_REG_LDO0_CTRL,
+			 LP873X_LDO0_CTRL_LDO0_EN, 0, ldo0_ldo1_ranges, 1,
+			 0xFF),
+	LP873X_REGULATOR("LDO1", LP873X_LDO_1, "ldo1", lp873x_ldo01_ops, 26,
+			 LP873X_REG_LDO1_VOUT, LP873X_LDO1_VOUT_LDO1_VSET,
+			 LP873X_REG_LDO1_CTRL,
+			 LP873X_LDO1_CTRL_LDO1_EN, 0, ldo0_ldo1_ranges, 1,
+			 0xFF),
+};
+
+static int lp873x_regulator_probe(struct platform_device *pdev)
+{
+	struct lp873x *lp873 = dev_get_drvdata(pdev->dev.parent);
+	struct regulator_config config = { };
+	struct regulator_dev *rdev;
+	int i;
+
+	platform_set_drvdata(pdev, lp873);
+
+	config.dev = &pdev->dev;
+	config.dev->of_node = lp873->dev->of_node;
+	config.driver_data = lp873;
+	config.regmap = lp873->regmap;
+
+	for (i = 0; i < ARRAY_SIZE(regulators); i++) {
+		rdev = devm_regulator_register(&pdev->dev, &regulators[i].desc,
+					       &config);
+		if (IS_ERR(rdev)) {
+			dev_err(lp873->dev, "failed to register %s regulator\n",
+				pdev->name);
+			return PTR_ERR(rdev);
+		}
+	}
+
+	return 0;
+}
+
+static const struct platform_device_id lp873x_regulator_id_table[] = {
+	{ "lp873x-regulator", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, lp873x_regulator_id_table);
+
+static struct platform_driver lp873x_regulator_driver = {
+	.driver = {
+		.name = "lp873x-pmic",
+	},
+	.probe = lp873x_regulator_probe,
+	.id_table = lp873x_regulator_id_table,
+};
+module_platform_driver(lp873x_regulator_driver);
+
+MODULE_AUTHOR("J Keerthy <j-keerthy@ti.com>");
+MODULE_DESCRIPTION("LP873X voltage regulator driver");
+MODULE_ALIAS("platform:lp873x-pmic");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577-regulator.c
similarity index 100%
rename from drivers/regulator/max14577.c
rename to drivers/regulator/max14577-regulator.c
diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c
index 73a3356..321e804 100644
--- a/drivers/regulator/max77620-regulator.c
+++ b/drivers/regulator/max77620-regulator.c
@@ -81,6 +81,7 @@
 	int suspend_fps_pd_slot;
 	int suspend_fps_pu_slot;
 	int current_mode;
+	int ramp_rate_setting;
 };
 
 struct max77620_regulator {
@@ -307,6 +308,43 @@
 	return 0;
 }
 
+static int max77620_set_slew_rate(struct max77620_regulator *pmic, int id,
+				  int slew_rate)
+{
+	struct max77620_regulator_info *rinfo = pmic->rinfo[id];
+	unsigned int val;
+	int ret;
+	u8 mask;
+
+	if (rinfo->type == MAX77620_REGULATOR_TYPE_SD) {
+		if (slew_rate <= 13750)
+			val = 0;
+		else if (slew_rate <= 27500)
+			val = 1;
+		else if (slew_rate <= 55000)
+			val = 2;
+		else
+			val = 3;
+		val <<= MAX77620_SD_SR_SHIFT;
+		mask = MAX77620_SD_SR_MASK;
+	} else {
+		if (slew_rate <= 5000)
+			val = 1;
+		else
+			val = 0;
+		mask = MAX77620_LDO_SLEW_RATE_MASK;
+	}
+
+	ret = regmap_update_bits(pmic->rmap, rinfo->cfg_addr, mask, val);
+	if (ret < 0) {
+		dev_err(pmic->dev, "Regulator %d slew rate set failed: %d\n",
+			id, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
 static int max77620_init_pmic(struct max77620_regulator *pmic, int id)
 {
 	struct max77620_regulator_pdata *rpdata = &pmic->reg_pdata[id];
@@ -351,6 +389,13 @@
 	if (ret < 0)
 		return ret;
 
+	if (rpdata->ramp_rate_setting) {
+		ret = max77620_set_slew_rate(pmic, id,
+					     rpdata->ramp_rate_setting);
+		if (ret < 0)
+			return ret;
+	}
+
 	return 0;
 }
 
@@ -502,35 +547,16 @@
 {
 	struct max77620_regulator *pmic = rdev_get_drvdata(rdev);
 	int id = rdev_get_id(rdev);
-	struct max77620_regulator_info *rinfo = pmic->rinfo[id];
-	int ret, val;
-	u8 mask;
+	struct max77620_regulator_pdata *rpdata = &pmic->reg_pdata[id];
 
-	if (rinfo->type == MAX77620_REGULATOR_TYPE_SD) {
-		if (ramp_delay <= 13750)
-			val = 0;
-		else if (ramp_delay <= 27500)
-			val = 1;
-		else if (ramp_delay <= 55000)
-			val = 2;
-		else
-			val = 3;
-		val <<= MAX77620_SD_SR_SHIFT;
-		mask = MAX77620_SD_SR_MASK;
-	} else {
-		if (ramp_delay <= 5000)
-			val = 1;
-		else
-			val = 0;
-		mask = MAX77620_LDO_SLEW_RATE_MASK;
-	}
+	/* Device specific ramp rate setting tells that platform has
+	 * different ramp rate from advertised value. In this case,
+	 * do not configure anything and just return success.
+	 */
+	if (rpdata->ramp_rate_setting)
+		return 0;
 
-	ret = regmap_update_bits(pmic->rmap, rinfo->cfg_addr, mask, val);
-	if (ret < 0)
-		dev_err(pmic->dev, "Reg 0x%02x update failed: %d\n",
-			rinfo->cfg_addr, ret);
-
-	return ret;
+	return max77620_set_slew_rate(pmic, id, ramp_delay);
 }
 
 static int max77620_of_parse_cb(struct device_node *np,
@@ -563,6 +589,9 @@
 			np, "maxim,suspend-fps-power-down-slot", &pval);
 	rpdata->suspend_fps_pd_slot = (!ret) ? pval : -1;
 
+	ret = of_property_read_u32(np, "maxim,ramp-rate-setting", &pval);
+	rpdata->ramp_rate_setting = (!ret) ? pval : 0;
+
 	return max77620_init_pmic(pmic, desc->id);
 }
 
diff --git a/drivers/regulator/max77686-regulator.c b/drivers/regulator/max77686-regulator.c
index 17ccf36..ac4fa58 100644
--- a/drivers/regulator/max77686-regulator.c
+++ b/drivers/regulator/max77686-regulator.c
@@ -41,6 +41,8 @@
 #define MAX77686_LDO_LOW_UVSTEP	25000
 #define MAX77686_BUCK_MINUV	750000
 #define MAX77686_BUCK_UVSTEP	50000
+#define MAX77686_BUCK_ENABLE_TIME	40		/* us */
+#define MAX77686_DVS_ENABLE_TIME	22		/* us */
 #define MAX77686_RAMP_DELAY	100000			/* uV/us */
 #define MAX77686_DVS_RAMP_DELAY	27500			/* uV/us */
 #define MAX77686_DVS_MINUV	600000
@@ -422,6 +424,7 @@
 	.min_uV		= MAX77686_BUCK_MINUV,				\
 	.uV_step	= MAX77686_BUCK_UVSTEP,				\
 	.ramp_delay	= MAX77686_RAMP_DELAY,				\
+	.enable_time	= MAX77686_BUCK_ENABLE_TIME,			\
 	.n_voltages	= MAX77686_VSEL_MASK + 1,			\
 	.vsel_reg	= MAX77686_REG_BUCK5OUT + (num - 5) * 2,	\
 	.vsel_mask	= MAX77686_VSEL_MASK,				\
@@ -439,6 +442,7 @@
 	.min_uV		= MAX77686_BUCK_MINUV,				\
 	.uV_step	= MAX77686_BUCK_UVSTEP,				\
 	.ramp_delay	= MAX77686_RAMP_DELAY,				\
+	.enable_time	= MAX77686_BUCK_ENABLE_TIME,			\
 	.n_voltages	= MAX77686_VSEL_MASK + 1,			\
 	.vsel_reg	= MAX77686_REG_BUCK1OUT,			\
 	.vsel_mask	= MAX77686_VSEL_MASK,				\
@@ -456,6 +460,7 @@
 	.min_uV		= MAX77686_DVS_MINUV,				\
 	.uV_step	= MAX77686_DVS_UVSTEP,				\
 	.ramp_delay	= MAX77686_DVS_RAMP_DELAY,			\
+	.enable_time	= MAX77686_DVS_ENABLE_TIME,			\
 	.n_voltages	= MAX77686_DVS_VSEL_MASK + 1,			\
 	.vsel_reg	= MAX77686_REG_BUCK2DVS1 + (num - 2) * 10,	\
 	.vsel_mask	= MAX77686_DVS_VSEL_MASK,			\
@@ -553,17 +558,7 @@
 	.id_table = max77686_pmic_id,
 };
 
-static int __init max77686_pmic_init(void)
-{
-	return platform_driver_register(&max77686_pmic_driver);
-}
-subsys_initcall(max77686_pmic_init);
-
-static void __exit max77686_pmic_cleanup(void)
-{
-	platform_driver_unregister(&max77686_pmic_driver);
-}
-module_exit(max77686_pmic_cleanup);
+module_platform_driver(max77686_pmic_driver);
 
 MODULE_DESCRIPTION("MAXIM 77686 Regulator Driver");
 MODULE_AUTHOR("Chiwoong Byun <woong.byun@samsung.com>");
diff --git a/drivers/regulator/max77693.c b/drivers/regulator/max77693-regulator.c
similarity index 100%
rename from drivers/regulator/max77693.c
rename to drivers/regulator/max77693-regulator.c
diff --git a/drivers/regulator/max77802-regulator.c b/drivers/regulator/max77802-regulator.c
index c07ee13..1d35393 100644
--- a/drivers/regulator/max77802-regulator.c
+++ b/drivers/regulator/max77802-regulator.c
@@ -5,7 +5,7 @@
  * Simon Glass <sjg@chromium.org>
  *
  * Copyright (C) 2012 Samsung Electronics
- * Chiwoong Byun <woong.byun@smasung.com>
+ * Chiwoong Byun <woong.byun@samsung.com>
  * Jonghwa Lee <jonghwa3.lee@samsung.com>
  *
  * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 5b75b7c..08d2f13 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -38,6 +38,9 @@
 #include <linux/i2c.h>
 #include <linux/slab.h>
 #include <linux/regmap.h>
+#include <linux/thermal.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
 
 /* Register definitions */
 #define MAX8973_VOUT					0x0
@@ -74,6 +77,7 @@
 #define MAX8973_WDTMR_ENABLE				BIT(6)
 #define MAX8973_DISCH_ENBABLE				BIT(5)
 #define MAX8973_FT_ENABLE				BIT(4)
+#define MAX77621_T_JUNCTION_120				BIT(7)
 
 #define MAX8973_CKKADV_TRIP_MASK			0xC
 #define MAX8973_CKKADV_TRIP_DISABLE			0xC
@@ -93,6 +97,12 @@
 #define MAX8973_VOLATGE_STEP				6250
 #define MAX8973_BUCK_N_VOLTAGE				0x80
 
+#define MAX77621_CHIPID_TJINT_S				BIT(0)
+
+#define MAX77621_NORMAL_OPERATING_TEMP			100000
+#define MAX77621_TJINT_WARNING_TEMP_120			120000
+#define MAX77621_TJINT_WARNING_TEMP_140			140000
+
 enum device_id {
 	MAX8973,
 	MAX77621
@@ -112,6 +122,9 @@
 	int curr_gpio_val;
 	struct regulator_ops ops;
 	enum device_id id;
+	int junction_temp_warning;
+	int irq;
+	struct thermal_zone_device *tz_device;
 };
 
 /*
@@ -391,6 +404,10 @@
 	if (pdata->control_flags & MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE)
 		control1 |= MAX8973_FREQSHIFT_9PER;
 
+	if ((pdata->junction_temp_warning == MAX77621_TJINT_WARNING_TEMP_120) &&
+	    (max->id == MAX77621))
+		control2 |= MAX77621_T_JUNCTION_120;
+
 	if (!(pdata->control_flags & MAX8973_CONTROL_PULL_DOWN_ENABLE))
 		control2 |= MAX8973_DISCH_ENBABLE;
 
@@ -457,6 +474,79 @@
 	return ret;
 }
 
+static int max8973_thermal_read_temp(void *data, int *temp)
+{
+	struct max8973_chip *mchip = data;
+	unsigned int val;
+	int ret;
+
+	ret = regmap_read(mchip->regmap, MAX8973_CHIPID1, &val);
+	if (ret < 0) {
+		dev_err(mchip->dev, "Failed to read register CHIPID1, %d", ret);
+		return ret;
+	}
+
+	/* +1 degC to trigger cool devive */
+	if (val & MAX77621_CHIPID_TJINT_S)
+		*temp = mchip->junction_temp_warning + 1000;
+	else
+		*temp = MAX77621_NORMAL_OPERATING_TEMP;
+
+	return 0;
+}
+
+static irqreturn_t max8973_thermal_irq(int irq, void *data)
+{
+	struct max8973_chip *mchip = data;
+
+	thermal_zone_device_update(mchip->tz_device);
+
+	return IRQ_HANDLED;
+}
+
+static const struct thermal_zone_of_device_ops max77621_tz_ops = {
+	.get_temp = max8973_thermal_read_temp,
+};
+
+static int max8973_thermal_init(struct max8973_chip *mchip)
+{
+	struct thermal_zone_device *tzd;
+	struct irq_data *irq_data;
+	unsigned long irq_flags = 0;
+	int ret;
+
+	if (mchip->id != MAX77621)
+		return 0;
+
+	tzd = devm_thermal_zone_of_sensor_register(mchip->dev, 0, mchip,
+						   &max77621_tz_ops);
+	if (IS_ERR(tzd)) {
+		ret = PTR_ERR(tzd);
+		dev_err(mchip->dev, "Failed to register thermal sensor: %d\n",
+			ret);
+		return ret;
+	}
+
+	if (mchip->irq <= 0)
+		return 0;
+
+	irq_data = irq_get_irq_data(mchip->irq);
+	if (irq_data)
+		irq_flags = irqd_get_trigger_type(irq_data);
+
+	ret = devm_request_threaded_irq(mchip->dev, mchip->irq, NULL,
+					max8973_thermal_irq,
+					IRQF_ONESHOT | IRQF_SHARED | irq_flags,
+					dev_name(mchip->dev), mchip);
+	if (ret < 0) {
+		dev_err(mchip->dev, "Failed to request irq %d, %d\n",
+			mchip->irq, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
 static const struct regmap_config max8973_regmap_config = {
 	.reg_bits		= 8,
 	.val_bits		= 8,
@@ -521,6 +611,11 @@
 		pdata->control_flags |= MAX8973_CONTROL_CLKADV_TRIP_DISABLED;
 	}
 
+	pdata->junction_temp_warning = MAX77621_TJINT_WARNING_TEMP_140;
+	ret = of_property_read_u32(np, "junction-warn-millicelsius", &pval);
+	if (!ret && (pval <= MAX77621_TJINT_WARNING_TEMP_120))
+		pdata->junction_temp_warning = MAX77621_TJINT_WARNING_TEMP_120;
+
 	return pdata;
 }
 
@@ -608,6 +703,7 @@
 	max->enable_external_control = pdata->enable_ext_control;
 	max->curr_gpio_val = pdata->dvs_def_state;
 	max->curr_vout_reg = MAX8973_VOUT + pdata->dvs_def_state;
+	max->junction_temp_warning = pdata->junction_temp_warning;
 
 	if (gpio_is_valid(max->enable_gpio))
 		max->enable_external_control = true;
@@ -718,6 +814,7 @@
 		return ret;
 	}
 
+	max8973_thermal_init(max);
 	return 0;
 }
 
diff --git a/drivers/regulator/max8997-regulator.c b/drivers/regulator/max8997-regulator.c
new file mode 100644
index 0000000..efabc0e
--- /dev/null
+++ b/drivers/regulator/max8997-regulator.c
@@ -0,0 +1,1241 @@
+/*
+ * max8997.c - Regulator driver for the Maxim 8997/8966
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * This driver is based on max8998.c
+ */
+
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/max8997.h>
+#include <linux/mfd/max8997-private.h>
+#include <linux/regulator/of_regulator.h>
+
+struct max8997_data {
+	struct device *dev;
+	struct max8997_dev *iodev;
+	int num_regulators;
+	int ramp_delay; /* in mV/us */
+
+	bool buck1_gpiodvs;
+	bool buck2_gpiodvs;
+	bool buck5_gpiodvs;
+	u8 buck1_vol[8];
+	u8 buck2_vol[8];
+	u8 buck5_vol[8];
+	int buck125_gpios[3];
+	int buck125_gpioindex;
+	bool ignore_gpiodvs_side_effect;
+
+	u8 saved_states[MAX8997_REG_MAX];
+};
+
+static const unsigned int safeoutvolt[] = {
+	4850000,
+	4900000,
+	4950000,
+	3300000,
+};
+
+static inline void max8997_set_gpio(struct max8997_data *max8997)
+{
+	int set3 = (max8997->buck125_gpioindex) & 0x1;
+	int set2 = ((max8997->buck125_gpioindex) >> 1) & 0x1;
+	int set1 = ((max8997->buck125_gpioindex) >> 2) & 0x1;
+
+	gpio_set_value(max8997->buck125_gpios[0], set1);
+	gpio_set_value(max8997->buck125_gpios[1], set2);
+	gpio_set_value(max8997->buck125_gpios[2], set3);
+}
+
+struct voltage_map_desc {
+	int min;
+	int max;
+	int step;
+};
+
+/* Voltage maps in uV */
+static const struct voltage_map_desc ldo_voltage_map_desc = {
+	.min = 800000,	.max = 3950000,	.step = 50000,
+}; /* LDO1 ~ 18, 21 all */
+
+static const struct voltage_map_desc buck1245_voltage_map_desc = {
+	.min = 650000,	.max = 2225000,	.step = 25000,
+}; /* Buck1, 2, 4, 5 */
+
+static const struct voltage_map_desc buck37_voltage_map_desc = {
+	.min = 750000,	.max = 3900000,	.step = 50000,
+}; /* Buck3, 7 */
+
+/* current map in uA */
+static const struct voltage_map_desc charger_current_map_desc = {
+	.min = 200000,	.max = 950000,	.step = 50000,
+};
+
+static const struct voltage_map_desc topoff_current_map_desc = {
+	.min = 50000,	.max = 200000,	.step = 10000,
+};
+
+static const struct voltage_map_desc *reg_voltage_map[] = {
+	[MAX8997_LDO1] = &ldo_voltage_map_desc,
+	[MAX8997_LDO2] = &ldo_voltage_map_desc,
+	[MAX8997_LDO3] = &ldo_voltage_map_desc,
+	[MAX8997_LDO4] = &ldo_voltage_map_desc,
+	[MAX8997_LDO5] = &ldo_voltage_map_desc,
+	[MAX8997_LDO6] = &ldo_voltage_map_desc,
+	[MAX8997_LDO7] = &ldo_voltage_map_desc,
+	[MAX8997_LDO8] = &ldo_voltage_map_desc,
+	[MAX8997_LDO9] = &ldo_voltage_map_desc,
+	[MAX8997_LDO10] = &ldo_voltage_map_desc,
+	[MAX8997_LDO11] = &ldo_voltage_map_desc,
+	[MAX8997_LDO12] = &ldo_voltage_map_desc,
+	[MAX8997_LDO13] = &ldo_voltage_map_desc,
+	[MAX8997_LDO14] = &ldo_voltage_map_desc,
+	[MAX8997_LDO15] = &ldo_voltage_map_desc,
+	[MAX8997_LDO16] = &ldo_voltage_map_desc,
+	[MAX8997_LDO17] = &ldo_voltage_map_desc,
+	[MAX8997_LDO18] = &ldo_voltage_map_desc,
+	[MAX8997_LDO21] = &ldo_voltage_map_desc,
+	[MAX8997_BUCK1] = &buck1245_voltage_map_desc,
+	[MAX8997_BUCK2] = &buck1245_voltage_map_desc,
+	[MAX8997_BUCK3] = &buck37_voltage_map_desc,
+	[MAX8997_BUCK4] = &buck1245_voltage_map_desc,
+	[MAX8997_BUCK5] = &buck1245_voltage_map_desc,
+	[MAX8997_BUCK6] = NULL,
+	[MAX8997_BUCK7] = &buck37_voltage_map_desc,
+	[MAX8997_EN32KHZ_AP] = NULL,
+	[MAX8997_EN32KHZ_CP] = NULL,
+	[MAX8997_ENVICHG] = NULL,
+	[MAX8997_ESAFEOUT1] = NULL,
+	[MAX8997_ESAFEOUT2] = NULL,
+	[MAX8997_CHARGER_CV] = NULL,
+	[MAX8997_CHARGER] = &charger_current_map_desc,
+	[MAX8997_CHARGER_TOPOFF] = &topoff_current_map_desc,
+};
+
+static int max8997_list_voltage_charger_cv(struct regulator_dev *rdev,
+		unsigned int selector)
+{
+	int rid = rdev_get_id(rdev);
+
+	if (rid != MAX8997_CHARGER_CV)
+		goto err;
+
+	switch (selector) {
+	case 0x00:
+		return 4200000;
+	case 0x01 ... 0x0E:
+		return 4000000 + 20000 * (selector - 0x01);
+	case 0x0F:
+		return 4350000;
+	default:
+		return -EINVAL;
+	}
+err:
+	return -EINVAL;
+}
+
+static int max8997_list_voltage(struct regulator_dev *rdev,
+		unsigned int selector)
+{
+	const struct voltage_map_desc *desc;
+	int rid = rdev_get_id(rdev);
+	int val;
+
+	if (rid >= ARRAY_SIZE(reg_voltage_map) ||
+			rid < 0)
+		return -EINVAL;
+
+	desc = reg_voltage_map[rid];
+	if (desc == NULL)
+		return -EINVAL;
+
+	val = desc->min + desc->step * selector;
+	if (val > desc->max)
+		return -EINVAL;
+
+	return val;
+}
+
+static int max8997_get_enable_register(struct regulator_dev *rdev,
+		int *reg, int *mask, int *pattern)
+{
+	int rid = rdev_get_id(rdev);
+
+	switch (rid) {
+	case MAX8997_LDO1 ... MAX8997_LDO21:
+		*reg = MAX8997_REG_LDO1CTRL + (rid - MAX8997_LDO1);
+		*mask = 0xC0;
+		*pattern = 0xC0;
+		break;
+	case MAX8997_BUCK1:
+		*reg = MAX8997_REG_BUCK1CTRL;
+		*mask = 0x01;
+		*pattern = 0x01;
+		break;
+	case MAX8997_BUCK2:
+		*reg = MAX8997_REG_BUCK2CTRL;
+		*mask = 0x01;
+		*pattern = 0x01;
+		break;
+	case MAX8997_BUCK3:
+		*reg = MAX8997_REG_BUCK3CTRL;
+		*mask = 0x01;
+		*pattern = 0x01;
+		break;
+	case MAX8997_BUCK4:
+		*reg = MAX8997_REG_BUCK4CTRL;
+		*mask = 0x01;
+		*pattern = 0x01;
+		break;
+	case MAX8997_BUCK5:
+		*reg = MAX8997_REG_BUCK5CTRL;
+		*mask = 0x01;
+		*pattern = 0x01;
+		break;
+	case MAX8997_BUCK6:
+		*reg = MAX8997_REG_BUCK6CTRL;
+		*mask = 0x01;
+		*pattern = 0x01;
+		break;
+	case MAX8997_BUCK7:
+		*reg = MAX8997_REG_BUCK7CTRL;
+		*mask = 0x01;
+		*pattern = 0x01;
+		break;
+	case MAX8997_EN32KHZ_AP ... MAX8997_EN32KHZ_CP:
+		*reg = MAX8997_REG_MAINCON1;
+		*mask = 0x01 << (rid - MAX8997_EN32KHZ_AP);
+		*pattern = 0x01 << (rid - MAX8997_EN32KHZ_AP);
+		break;
+	case MAX8997_ENVICHG:
+		*reg = MAX8997_REG_MBCCTRL1;
+		*mask = 0x80;
+		*pattern = 0x80;
+		break;
+	case MAX8997_ESAFEOUT1 ... MAX8997_ESAFEOUT2:
+		*reg = MAX8997_REG_SAFEOUTCTRL;
+		*mask = 0x40 << (rid - MAX8997_ESAFEOUT1);
+		*pattern = 0x40 << (rid - MAX8997_ESAFEOUT1);
+		break;
+	case MAX8997_CHARGER:
+		*reg = MAX8997_REG_MBCCTRL2;
+		*mask = 0x40;
+		*pattern = 0x40;
+		break;
+	default:
+		/* Not controllable or not exists */
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int max8997_reg_is_enabled(struct regulator_dev *rdev)
+{
+	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+	struct i2c_client *i2c = max8997->iodev->i2c;
+	int ret, reg, mask, pattern;
+	u8 val;
+
+	ret = max8997_get_enable_register(rdev, &reg, &mask, &pattern);
+	if (ret)
+		return ret;
+
+	ret = max8997_read_reg(i2c, reg, &val);
+	if (ret)
+		return ret;
+
+	return (val & mask) == pattern;
+}
+
+static int max8997_reg_enable(struct regulator_dev *rdev)
+{
+	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+	struct i2c_client *i2c = max8997->iodev->i2c;
+	int ret, reg, mask, pattern;
+
+	ret = max8997_get_enable_register(rdev, &reg, &mask, &pattern);
+	if (ret)
+		return ret;
+
+	return max8997_update_reg(i2c, reg, pattern, mask);
+}
+
+static int max8997_reg_disable(struct regulator_dev *rdev)
+{
+	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+	struct i2c_client *i2c = max8997->iodev->i2c;
+	int ret, reg, mask, pattern;
+
+	ret = max8997_get_enable_register(rdev, &reg, &mask, &pattern);
+	if (ret)
+		return ret;
+
+	return max8997_update_reg(i2c, reg, ~pattern, mask);
+}
+
+static int max8997_get_voltage_register(struct regulator_dev *rdev,
+		int *_reg, int *_shift, int *_mask)
+{
+	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+	int rid = rdev_get_id(rdev);
+	int reg, shift = 0, mask = 0x3f;
+
+	switch (rid) {
+	case MAX8997_LDO1 ... MAX8997_LDO21:
+		reg = MAX8997_REG_LDO1CTRL + (rid - MAX8997_LDO1);
+		break;
+	case MAX8997_BUCK1:
+		reg = MAX8997_REG_BUCK1DVS1;
+		if (max8997->buck1_gpiodvs)
+			reg += max8997->buck125_gpioindex;
+		break;
+	case MAX8997_BUCK2:
+		reg = MAX8997_REG_BUCK2DVS1;
+		if (max8997->buck2_gpiodvs)
+			reg += max8997->buck125_gpioindex;
+		break;
+	case MAX8997_BUCK3:
+		reg = MAX8997_REG_BUCK3DVS;
+		break;
+	case MAX8997_BUCK4:
+		reg = MAX8997_REG_BUCK4DVS;
+		break;
+	case MAX8997_BUCK5:
+		reg = MAX8997_REG_BUCK5DVS1;
+		if (max8997->buck5_gpiodvs)
+			reg += max8997->buck125_gpioindex;
+		break;
+	case MAX8997_BUCK7:
+		reg = MAX8997_REG_BUCK7DVS;
+		break;
+	case MAX8997_ESAFEOUT1 ...  MAX8997_ESAFEOUT2:
+		reg = MAX8997_REG_SAFEOUTCTRL;
+		shift = (rid == MAX8997_ESAFEOUT2) ? 2 : 0;
+		mask = 0x3;
+		break;
+	case MAX8997_CHARGER_CV:
+		reg = MAX8997_REG_MBCCTRL3;
+		shift = 0;
+		mask = 0xf;
+		break;
+	case MAX8997_CHARGER:
+		reg = MAX8997_REG_MBCCTRL4;
+		shift = 0;
+		mask = 0xf;
+		break;
+	case MAX8997_CHARGER_TOPOFF:
+		reg = MAX8997_REG_MBCCTRL5;
+		shift = 0;
+		mask = 0xf;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	*_reg = reg;
+	*_shift = shift;
+	*_mask = mask;
+
+	return 0;
+}
+
+static int max8997_get_voltage_sel(struct regulator_dev *rdev)
+{
+	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+	struct i2c_client *i2c = max8997->iodev->i2c;
+	int reg, shift, mask, ret;
+	u8 val;
+
+	ret = max8997_get_voltage_register(rdev, &reg, &shift, &mask);
+	if (ret)
+		return ret;
+
+	ret = max8997_read_reg(i2c, reg, &val);
+	if (ret)
+		return ret;
+
+	val >>= shift;
+	val &= mask;
+
+	return val;
+}
+
+static inline int max8997_get_voltage_proper_val(
+		const struct voltage_map_desc *desc,
+		int min_vol, int max_vol)
+{
+	int i;
+
+	if (desc == NULL)
+		return -EINVAL;
+
+	if (max_vol < desc->min || min_vol > desc->max)
+		return -EINVAL;
+
+	if (min_vol < desc->min)
+		min_vol = desc->min;
+
+	i = DIV_ROUND_UP(min_vol - desc->min, desc->step);
+
+	if (desc->min + desc->step * i > max_vol)
+		return -EINVAL;
+
+	return i;
+}
+
+static int max8997_set_voltage_charger_cv(struct regulator_dev *rdev,
+		int min_uV, int max_uV, unsigned *selector)
+{
+	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+	struct i2c_client *i2c = max8997->iodev->i2c;
+	int rid = rdev_get_id(rdev);
+	int lb, ub;
+	int reg, shift = 0, mask, ret = 0;
+	u8 val = 0x0;
+
+	if (rid != MAX8997_CHARGER_CV)
+		return -EINVAL;
+
+	ret = max8997_get_voltage_register(rdev, &reg, &shift, &mask);
+	if (ret)
+		return ret;
+
+	if (max_uV < 4000000 || min_uV > 4350000)
+		return -EINVAL;
+
+	if (min_uV <= 4000000) {
+		if (max_uV >= 4000000)
+			return -EINVAL;
+		else
+			val = 0x1;
+	} else if (min_uV <= 4200000 && max_uV >= 4200000)
+		val = 0x0;
+	else {
+		lb = (min_uV - 4000001) / 20000 + 2;
+		ub = (max_uV - 4000000) / 20000 + 1;
+
+		if (lb > ub)
+			return -EINVAL;
+
+		if (lb < 0xf)
+			val = lb;
+		else {
+			if (ub >= 0xf)
+				val = 0xf;
+			else
+				return -EINVAL;
+		}
+	}
+
+	*selector = val;
+
+	ret = max8997_update_reg(i2c, reg, val << shift, mask);
+
+	return ret;
+}
+
+/*
+ * For LDO1 ~ LDO21, BUCK1~5, BUCK7, CHARGER, CHARGER_TOPOFF
+ * BUCK1, 2, and 5 are available if they are not controlled by gpio
+ */
+static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
+		int min_uV, int max_uV, unsigned *selector)
+{
+	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+	struct i2c_client *i2c = max8997->iodev->i2c;
+	const struct voltage_map_desc *desc;
+	int rid = rdev_get_id(rdev);
+	int i, reg, shift, mask, ret;
+
+	switch (rid) {
+	case MAX8997_LDO1 ... MAX8997_LDO21:
+		break;
+	case MAX8997_BUCK1 ... MAX8997_BUCK5:
+		break;
+	case MAX8997_BUCK6:
+		return -EINVAL;
+	case MAX8997_BUCK7:
+		break;
+	case MAX8997_CHARGER:
+		break;
+	case MAX8997_CHARGER_TOPOFF:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	desc = reg_voltage_map[rid];
+
+	i = max8997_get_voltage_proper_val(desc, min_uV, max_uV);
+	if (i < 0)
+		return i;
+
+	ret = max8997_get_voltage_register(rdev, &reg, &shift, &mask);
+	if (ret)
+		return ret;
+
+	ret = max8997_update_reg(i2c, reg, i << shift, mask << shift);
+	*selector = i;
+
+	return ret;
+}
+
+static int max8997_set_voltage_buck_time_sel(struct regulator_dev *rdev,
+						unsigned int old_selector,
+						unsigned int new_selector)
+{
+	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+	int rid = rdev_get_id(rdev);
+	const struct voltage_map_desc *desc = reg_voltage_map[rid];
+
+	/* Delay is required only if the voltage is increasing */
+	if (old_selector >= new_selector)
+		return 0;
+
+	/* No need to delay if gpio_dvs_mode */
+	switch (rid) {
+	case MAX8997_BUCK1:
+		if (max8997->buck1_gpiodvs)
+			return 0;
+		break;
+	case MAX8997_BUCK2:
+		if (max8997->buck2_gpiodvs)
+			return 0;
+		break;
+	case MAX8997_BUCK5:
+		if (max8997->buck5_gpiodvs)
+			return 0;
+		break;
+	}
+
+	switch (rid) {
+	case MAX8997_BUCK1:
+	case MAX8997_BUCK2:
+	case MAX8997_BUCK4:
+	case MAX8997_BUCK5:
+		return DIV_ROUND_UP(desc->step * (new_selector - old_selector),
+				    max8997->ramp_delay * 1000);
+	}
+
+	return 0;
+}
+
+/*
+ * Assess the damage on the voltage setting of BUCK1,2,5 by the change.
+ *
+ * When GPIO-DVS mode is used for multiple bucks, changing the voltage value
+ * of one of the bucks may affect that of another buck, which is the side
+ * effect of the change (set_voltage). This function examines the GPIO-DVS
+ * configurations and checks whether such side-effect exists.
+ */
+static int max8997_assess_side_effect(struct regulator_dev *rdev,
+		u8 new_val, int *best)
+{
+	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+	int rid = rdev_get_id(rdev);
+	u8 *buckx_val[3];
+	bool buckx_gpiodvs[3];
+	int side_effect[8];
+	int min_side_effect = INT_MAX;
+	int i;
+
+	*best = -1;
+
+	switch (rid) {
+	case MAX8997_BUCK1:
+		rid = 0;
+		break;
+	case MAX8997_BUCK2:
+		rid = 1;
+		break;
+	case MAX8997_BUCK5:
+		rid = 2;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	buckx_val[0] = max8997->buck1_vol;
+	buckx_val[1] = max8997->buck2_vol;
+	buckx_val[2] = max8997->buck5_vol;
+	buckx_gpiodvs[0] = max8997->buck1_gpiodvs;
+	buckx_gpiodvs[1] = max8997->buck2_gpiodvs;
+	buckx_gpiodvs[2] = max8997->buck5_gpiodvs;
+
+	for (i = 0; i < 8; i++) {
+		int others;
+
+		if (new_val != (buckx_val[rid])[i]) {
+			side_effect[i] = -1;
+			continue;
+		}
+
+		side_effect[i] = 0;
+		for (others = 0; others < 3; others++) {
+			int diff;
+
+			if (others == rid)
+				continue;
+			if (buckx_gpiodvs[others] == false)
+				continue; /* Not affected */
+			diff = (buckx_val[others])[i] -
+				(buckx_val[others])[max8997->buck125_gpioindex];
+			if (diff > 0)
+				side_effect[i] += diff;
+			else if (diff < 0)
+				side_effect[i] -= diff;
+		}
+		if (side_effect[i] == 0) {
+			*best = i;
+			return 0; /* NO SIDE EFFECT! Use This! */
+		}
+		if (side_effect[i] < min_side_effect) {
+			min_side_effect = side_effect[i];
+			*best = i;
+		}
+	}
+
+	if (*best == -1)
+		return -EINVAL;
+
+	return side_effect[*best];
+}
+
+/*
+ * For Buck 1 ~ 5 and 7. If it is not controlled by GPIO, this calls
+ * max8997_set_voltage_ldobuck to do the job.
+ */
+static int max8997_set_voltage_buck(struct regulator_dev *rdev,
+		int min_uV, int max_uV, unsigned *selector)
+{
+	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+	int rid = rdev_get_id(rdev);
+	const struct voltage_map_desc *desc;
+	int new_val, new_idx, damage, tmp_val, tmp_idx, tmp_dmg;
+	bool gpio_dvs_mode = false;
+
+	if (rid < MAX8997_BUCK1 || rid > MAX8997_BUCK7)
+		return -EINVAL;
+
+	switch (rid) {
+	case MAX8997_BUCK1:
+		if (max8997->buck1_gpiodvs)
+			gpio_dvs_mode = true;
+		break;
+	case MAX8997_BUCK2:
+		if (max8997->buck2_gpiodvs)
+			gpio_dvs_mode = true;
+		break;
+	case MAX8997_BUCK5:
+		if (max8997->buck5_gpiodvs)
+			gpio_dvs_mode = true;
+		break;
+	}
+
+	if (!gpio_dvs_mode)
+		return max8997_set_voltage_ldobuck(rdev, min_uV, max_uV,
+						selector);
+
+	desc = reg_voltage_map[rid];
+	new_val = max8997_get_voltage_proper_val(desc, min_uV, max_uV);
+	if (new_val < 0)
+		return new_val;
+
+	tmp_dmg = INT_MAX;
+	tmp_idx = -1;
+	tmp_val = -1;
+	do {
+		damage = max8997_assess_side_effect(rdev, new_val, &new_idx);
+		if (damage == 0)
+			goto out;
+
+		if (tmp_dmg > damage) {
+			tmp_idx = new_idx;
+			tmp_val = new_val;
+			tmp_dmg = damage;
+		}
+
+		new_val++;
+	} while (desc->min + desc->step * new_val <= desc->max);
+
+	new_idx = tmp_idx;
+	new_val = tmp_val;
+
+	if (max8997->ignore_gpiodvs_side_effect == false)
+		return -EINVAL;
+
+	dev_warn(&rdev->dev,
+		"MAX8997 GPIO-DVS Side Effect Warning: GPIO SET:  %d -> %d\n",
+		max8997->buck125_gpioindex, tmp_idx);
+
+out:
+	if (new_idx < 0 || new_val < 0)
+		return -EINVAL;
+
+	max8997->buck125_gpioindex = new_idx;
+	max8997_set_gpio(max8997);
+	*selector = new_val;
+
+	return 0;
+}
+
+/* For SAFEOUT1 and SAFEOUT2 */
+static int max8997_set_voltage_safeout_sel(struct regulator_dev *rdev,
+					   unsigned selector)
+{
+	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+	struct i2c_client *i2c = max8997->iodev->i2c;
+	int rid = rdev_get_id(rdev);
+	int reg, shift = 0, mask, ret;
+
+	if (rid != MAX8997_ESAFEOUT1 && rid != MAX8997_ESAFEOUT2)
+		return -EINVAL;
+
+	ret = max8997_get_voltage_register(rdev, &reg, &shift, &mask);
+	if (ret)
+		return ret;
+
+	return max8997_update_reg(i2c, reg, selector << shift, mask << shift);
+}
+
+static int max8997_reg_disable_suspend(struct regulator_dev *rdev)
+{
+	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+	struct i2c_client *i2c = max8997->iodev->i2c;
+	int ret, reg, mask, pattern;
+	int rid = rdev_get_id(rdev);
+
+	ret = max8997_get_enable_register(rdev, &reg, &mask, &pattern);
+	if (ret)
+		return ret;
+
+	max8997_read_reg(i2c, reg, &max8997->saved_states[rid]);
+
+	if (rid == MAX8997_LDO1 ||
+			rid == MAX8997_LDO10 ||
+			rid == MAX8997_LDO21) {
+		dev_dbg(&rdev->dev, "Conditional Power-Off for %s\n",
+				rdev->desc->name);
+		return max8997_update_reg(i2c, reg, 0x40, mask);
+	}
+
+	dev_dbg(&rdev->dev, "Full Power-Off for %s (%xh -> %xh)\n",
+			rdev->desc->name, max8997->saved_states[rid] & mask,
+			(~pattern) & mask);
+	return max8997_update_reg(i2c, reg, ~pattern, mask);
+}
+
+static struct regulator_ops max8997_ldo_ops = {
+	.list_voltage		= max8997_list_voltage,
+	.is_enabled		= max8997_reg_is_enabled,
+	.enable			= max8997_reg_enable,
+	.disable		= max8997_reg_disable,
+	.get_voltage_sel	= max8997_get_voltage_sel,
+	.set_voltage		= max8997_set_voltage_ldobuck,
+	.set_suspend_disable	= max8997_reg_disable_suspend,
+};
+
+static struct regulator_ops max8997_buck_ops = {
+	.list_voltage		= max8997_list_voltage,
+	.is_enabled		= max8997_reg_is_enabled,
+	.enable			= max8997_reg_enable,
+	.disable		= max8997_reg_disable,
+	.get_voltage_sel	= max8997_get_voltage_sel,
+	.set_voltage		= max8997_set_voltage_buck,
+	.set_voltage_time_sel	= max8997_set_voltage_buck_time_sel,
+	.set_suspend_disable	= max8997_reg_disable_suspend,
+};
+
+static struct regulator_ops max8997_fixedvolt_ops = {
+	.list_voltage		= max8997_list_voltage,
+	.is_enabled		= max8997_reg_is_enabled,
+	.enable			= max8997_reg_enable,
+	.disable		= max8997_reg_disable,
+	.set_suspend_disable	= max8997_reg_disable_suspend,
+};
+
+static struct regulator_ops max8997_safeout_ops = {
+	.list_voltage		= regulator_list_voltage_table,
+	.is_enabled		= max8997_reg_is_enabled,
+	.enable			= max8997_reg_enable,
+	.disable		= max8997_reg_disable,
+	.get_voltage_sel	= max8997_get_voltage_sel,
+	.set_voltage_sel	= max8997_set_voltage_safeout_sel,
+	.set_suspend_disable	= max8997_reg_disable_suspend,
+};
+
+static struct regulator_ops max8997_fixedstate_ops = {
+	.list_voltage		= max8997_list_voltage_charger_cv,
+	.get_voltage_sel	= max8997_get_voltage_sel,
+	.set_voltage		= max8997_set_voltage_charger_cv,
+};
+
+static int max8997_set_current_limit(struct regulator_dev *rdev,
+				     int min_uA, int max_uA)
+{
+	unsigned dummy;
+	int rid = rdev_get_id(rdev);
+
+	if (rid != MAX8997_CHARGER && rid != MAX8997_CHARGER_TOPOFF)
+		return -EINVAL;
+
+	/* Reuse max8997_set_voltage_ldobuck to set current_limit. */
+	return max8997_set_voltage_ldobuck(rdev, min_uA, max_uA, &dummy);
+}
+
+static int max8997_get_current_limit(struct regulator_dev *rdev)
+{
+	int sel, rid = rdev_get_id(rdev);
+
+	if (rid != MAX8997_CHARGER && rid != MAX8997_CHARGER_TOPOFF)
+		return -EINVAL;
+
+	sel = max8997_get_voltage_sel(rdev);
+	if (sel < 0)
+		return sel;
+
+	/* Reuse max8997_list_voltage to get current_limit. */
+	return max8997_list_voltage(rdev, sel);
+}
+
+static struct regulator_ops max8997_charger_ops = {
+	.is_enabled		= max8997_reg_is_enabled,
+	.enable			= max8997_reg_enable,
+	.disable		= max8997_reg_disable,
+	.get_current_limit	= max8997_get_current_limit,
+	.set_current_limit	= max8997_set_current_limit,
+};
+
+static struct regulator_ops max8997_charger_fixedstate_ops = {
+	.get_current_limit	= max8997_get_current_limit,
+	.set_current_limit	= max8997_set_current_limit,
+};
+
+#define MAX8997_VOLTAGE_REGULATOR(_name, _ops) {\
+	.name		= #_name,		\
+	.id		= MAX8997_##_name,	\
+	.ops		= &_ops,		\
+	.type		= REGULATOR_VOLTAGE,	\
+	.owner		= THIS_MODULE,		\
+}
+
+#define MAX8997_CURRENT_REGULATOR(_name, _ops) {\
+	.name		= #_name,		\
+	.id		= MAX8997_##_name,	\
+	.ops		= &_ops,		\
+	.type		= REGULATOR_CURRENT,	\
+	.owner		= THIS_MODULE,		\
+}
+
+static struct regulator_desc regulators[] = {
+	MAX8997_VOLTAGE_REGULATOR(LDO1, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO2, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO3, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO4, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO5, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO6, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO7, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO8, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO9, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO10, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO11, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO12, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO13, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO14, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO15, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO16, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO17, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO18, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO21, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(BUCK1, max8997_buck_ops),
+	MAX8997_VOLTAGE_REGULATOR(BUCK2, max8997_buck_ops),
+	MAX8997_VOLTAGE_REGULATOR(BUCK3, max8997_buck_ops),
+	MAX8997_VOLTAGE_REGULATOR(BUCK4, max8997_buck_ops),
+	MAX8997_VOLTAGE_REGULATOR(BUCK5, max8997_buck_ops),
+	MAX8997_VOLTAGE_REGULATOR(BUCK6, max8997_fixedvolt_ops),
+	MAX8997_VOLTAGE_REGULATOR(BUCK7, max8997_buck_ops),
+	MAX8997_VOLTAGE_REGULATOR(EN32KHZ_AP, max8997_fixedvolt_ops),
+	MAX8997_VOLTAGE_REGULATOR(EN32KHZ_CP, max8997_fixedvolt_ops),
+	MAX8997_VOLTAGE_REGULATOR(ENVICHG, max8997_fixedvolt_ops),
+	MAX8997_VOLTAGE_REGULATOR(ESAFEOUT1, max8997_safeout_ops),
+	MAX8997_VOLTAGE_REGULATOR(ESAFEOUT2, max8997_safeout_ops),
+	MAX8997_VOLTAGE_REGULATOR(CHARGER_CV, max8997_fixedstate_ops),
+	MAX8997_CURRENT_REGULATOR(CHARGER, max8997_charger_ops),
+	MAX8997_CURRENT_REGULATOR(CHARGER_TOPOFF,
+				  max8997_charger_fixedstate_ops),
+};
+
+#ifdef CONFIG_OF
+static int max8997_pmic_dt_parse_dvs_gpio(struct platform_device *pdev,
+			struct max8997_platform_data *pdata,
+			struct device_node *pmic_np)
+{
+	int i, gpio;
+
+	for (i = 0; i < 3; i++) {
+		gpio = of_get_named_gpio(pmic_np,
+					"max8997,pmic-buck125-dvs-gpios", i);
+		if (!gpio_is_valid(gpio)) {
+			dev_err(&pdev->dev, "invalid gpio[%d]: %d\n", i, gpio);
+			return -EINVAL;
+		}
+		pdata->buck125_gpios[i] = gpio;
+	}
+	return 0;
+}
+
+static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
+					struct max8997_platform_data *pdata)
+{
+	struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+	struct device_node *pmic_np, *regulators_np, *reg_np;
+	struct max8997_regulator_data *rdata;
+	unsigned int i, dvs_voltage_nr = 1, ret;
+
+	pmic_np = iodev->dev->of_node;
+	if (!pmic_np) {
+		dev_err(&pdev->dev, "could not find pmic sub-node\n");
+		return -ENODEV;
+	}
+
+	regulators_np = of_get_child_by_name(pmic_np, "regulators");
+	if (!regulators_np) {
+		dev_err(&pdev->dev, "could not find regulators sub-node\n");
+		return -EINVAL;
+	}
+
+	/* count the number of regulators to be supported in pmic */
+	pdata->num_regulators = of_get_child_count(regulators_np);
+
+	rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *
+				pdata->num_regulators, GFP_KERNEL);
+	if (!rdata) {
+		of_node_put(regulators_np);
+		return -ENOMEM;
+	}
+
+	pdata->regulators = rdata;
+	for_each_child_of_node(regulators_np, reg_np) {
+		for (i = 0; i < ARRAY_SIZE(regulators); i++)
+			if (!of_node_cmp(reg_np->name, regulators[i].name))
+				break;
+
+		if (i == ARRAY_SIZE(regulators)) {
+			dev_warn(&pdev->dev, "don't know how to configure regulator %s\n",
+				 reg_np->name);
+			continue;
+		}
+
+		rdata->id = i;
+		rdata->initdata = of_get_regulator_init_data(&pdev->dev,
+							     reg_np,
+							     &regulators[i]);
+		rdata->reg_node = reg_np;
+		rdata++;
+	}
+	of_node_put(regulators_np);
+
+	if (of_get_property(pmic_np, "max8997,pmic-buck1-uses-gpio-dvs", NULL))
+		pdata->buck1_gpiodvs = true;
+
+	if (of_get_property(pmic_np, "max8997,pmic-buck2-uses-gpio-dvs", NULL))
+		pdata->buck2_gpiodvs = true;
+
+	if (of_get_property(pmic_np, "max8997,pmic-buck5-uses-gpio-dvs", NULL))
+		pdata->buck5_gpiodvs = true;
+
+	if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs ||
+						pdata->buck5_gpiodvs) {
+		ret = max8997_pmic_dt_parse_dvs_gpio(pdev, pdata, pmic_np);
+		if (ret)
+			return -EINVAL;
+
+		if (of_property_read_u32(pmic_np,
+				"max8997,pmic-buck125-default-dvs-idx",
+				&pdata->buck125_default_idx)) {
+			pdata->buck125_default_idx = 0;
+		} else {
+			if (pdata->buck125_default_idx >= 8) {
+				pdata->buck125_default_idx = 0;
+				dev_info(&pdev->dev, "invalid value for default dvs index, using 0 instead\n");
+			}
+		}
+
+		if (of_get_property(pmic_np,
+			"max8997,pmic-ignore-gpiodvs-side-effect", NULL))
+			pdata->ignore_gpiodvs_side_effect = true;
+
+		dvs_voltage_nr = 8;
+	}
+
+	if (of_property_read_u32_array(pmic_np,
+				"max8997,pmic-buck1-dvs-voltage",
+				pdata->buck1_voltage, dvs_voltage_nr)) {
+		dev_err(&pdev->dev, "buck1 voltages not specified\n");
+		return -EINVAL;
+	}
+
+	if (of_property_read_u32_array(pmic_np,
+				"max8997,pmic-buck2-dvs-voltage",
+				pdata->buck2_voltage, dvs_voltage_nr)) {
+		dev_err(&pdev->dev, "buck2 voltages not specified\n");
+		return -EINVAL;
+	}
+
+	if (of_property_read_u32_array(pmic_np,
+				"max8997,pmic-buck5-dvs-voltage",
+				pdata->buck5_voltage, dvs_voltage_nr)) {
+		dev_err(&pdev->dev, "buck5 voltages not specified\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+#else
+static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
+					struct max8997_platform_data *pdata)
+{
+	return 0;
+}
+#endif /* CONFIG_OF */
+
+static int max8997_pmic_probe(struct platform_device *pdev)
+{
+	struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+	struct max8997_platform_data *pdata = iodev->pdata;
+	struct regulator_config config = { };
+	struct regulator_dev *rdev;
+	struct max8997_data *max8997;
+	struct i2c_client *i2c;
+	int i, ret, nr_dvs;
+	u8 max_buck1 = 0, max_buck2 = 0, max_buck5 = 0;
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "No platform init data supplied.\n");
+		return -ENODEV;
+	}
+
+	if (iodev->dev->of_node) {
+		ret = max8997_pmic_dt_parse_pdata(pdev, pdata);
+		if (ret)
+			return ret;
+	}
+
+	max8997 = devm_kzalloc(&pdev->dev, sizeof(struct max8997_data),
+			       GFP_KERNEL);
+	if (!max8997)
+		return -ENOMEM;
+
+	max8997->dev = &pdev->dev;
+	max8997->iodev = iodev;
+	max8997->num_regulators = pdata->num_regulators;
+	platform_set_drvdata(pdev, max8997);
+	i2c = max8997->iodev->i2c;
+
+	max8997->buck125_gpioindex = pdata->buck125_default_idx;
+	max8997->buck1_gpiodvs = pdata->buck1_gpiodvs;
+	max8997->buck2_gpiodvs = pdata->buck2_gpiodvs;
+	max8997->buck5_gpiodvs = pdata->buck5_gpiodvs;
+	memcpy(max8997->buck125_gpios, pdata->buck125_gpios, sizeof(int) * 3);
+	max8997->ignore_gpiodvs_side_effect = pdata->ignore_gpiodvs_side_effect;
+
+	nr_dvs = (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs ||
+			pdata->buck5_gpiodvs) ? 8 : 1;
+
+	for (i = 0; i < nr_dvs; i++) {
+		max8997->buck1_vol[i] = ret =
+			max8997_get_voltage_proper_val(
+					&buck1245_voltage_map_desc,
+					pdata->buck1_voltage[i],
+					pdata->buck1_voltage[i] +
+					buck1245_voltage_map_desc.step);
+		if (ret < 0)
+			return ret;
+
+		max8997->buck2_vol[i] = ret =
+			max8997_get_voltage_proper_val(
+					&buck1245_voltage_map_desc,
+					pdata->buck2_voltage[i],
+					pdata->buck2_voltage[i] +
+					buck1245_voltage_map_desc.step);
+		if (ret < 0)
+			return ret;
+
+		max8997->buck5_vol[i] = ret =
+			max8997_get_voltage_proper_val(
+					&buck1245_voltage_map_desc,
+					pdata->buck5_voltage[i],
+					pdata->buck5_voltage[i] +
+					buck1245_voltage_map_desc.step);
+		if (ret < 0)
+			return ret;
+
+		if (max_buck1 < max8997->buck1_vol[i])
+			max_buck1 = max8997->buck1_vol[i];
+		if (max_buck2 < max8997->buck2_vol[i])
+			max_buck2 = max8997->buck2_vol[i];
+		if (max_buck5 < max8997->buck5_vol[i])
+			max_buck5 = max8997->buck5_vol[i];
+	}
+
+	/* For the safety, set max voltage before setting up */
+	for (i = 0; i < 8; i++) {
+		max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS1 + i,
+				max_buck1, 0x3f);
+		max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS1 + i,
+				max_buck2, 0x3f);
+		max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS1 + i,
+				max_buck5, 0x3f);
+	}
+
+	/* Initialize all the DVS related BUCK registers */
+	for (i = 0; i < nr_dvs; i++) {
+		max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS1 + i,
+				max8997->buck1_vol[i],
+				0x3f);
+		max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS1 + i,
+				max8997->buck2_vol[i],
+				0x3f);
+		max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS1 + i,
+				max8997->buck5_vol[i],
+				0x3f);
+	}
+
+	/*
+	 * If buck 1, 2, and 5 do not care DVS GPIO settings, ignore them.
+	 * If at least one of them cares, set gpios.
+	 */
+	if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs ||
+			pdata->buck5_gpiodvs) {
+
+		if (!gpio_is_valid(pdata->buck125_gpios[0]) ||
+				!gpio_is_valid(pdata->buck125_gpios[1]) ||
+				!gpio_is_valid(pdata->buck125_gpios[2])) {
+			dev_err(&pdev->dev, "GPIO NOT VALID\n");
+			return -EINVAL;
+		}
+
+		ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[0],
+					"MAX8997 SET1");
+		if (ret)
+			return ret;
+
+		ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[1],
+					"MAX8997 SET2");
+		if (ret)
+			return ret;
+
+		ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[2],
+				"MAX8997 SET3");
+		if (ret)
+			return ret;
+
+		gpio_direction_output(pdata->buck125_gpios[0],
+				(max8997->buck125_gpioindex >> 2)
+				& 0x1); /* SET1 */
+		gpio_direction_output(pdata->buck125_gpios[1],
+				(max8997->buck125_gpioindex >> 1)
+				& 0x1); /* SET2 */
+		gpio_direction_output(pdata->buck125_gpios[2],
+				(max8997->buck125_gpioindex >> 0)
+				& 0x1); /* SET3 */
+	}
+
+	/* DVS-GPIO disabled */
+	max8997_update_reg(i2c, MAX8997_REG_BUCK1CTRL, (pdata->buck1_gpiodvs) ?
+			(1 << 1) : (0 << 1), 1 << 1);
+	max8997_update_reg(i2c, MAX8997_REG_BUCK2CTRL, (pdata->buck2_gpiodvs) ?
+			(1 << 1) : (0 << 1), 1 << 1);
+	max8997_update_reg(i2c, MAX8997_REG_BUCK5CTRL, (pdata->buck5_gpiodvs) ?
+			(1 << 1) : (0 << 1), 1 << 1);
+
+	/* Misc Settings */
+	max8997->ramp_delay = 10; /* set 10mV/us, which is the default */
+	max8997_write_reg(i2c, MAX8997_REG_BUCKRAMP, (0xf << 4) | 0x9);
+
+	for (i = 0; i < pdata->num_regulators; i++) {
+		const struct voltage_map_desc *desc;
+		int id = pdata->regulators[i].id;
+
+		desc = reg_voltage_map[id];
+		if (desc) {
+			regulators[id].n_voltages =
+				(desc->max - desc->min) / desc->step + 1;
+		} else if (id == MAX8997_ESAFEOUT1 || id == MAX8997_ESAFEOUT2) {
+			regulators[id].volt_table = safeoutvolt;
+			regulators[id].n_voltages = ARRAY_SIZE(safeoutvolt);
+		} else if (id == MAX8997_CHARGER_CV) {
+			regulators[id].n_voltages = 16;
+		}
+
+		config.dev = max8997->dev;
+		config.init_data = pdata->regulators[i].initdata;
+		config.driver_data = max8997;
+		config.of_node = pdata->regulators[i].reg_node;
+
+		rdev = devm_regulator_register(&pdev->dev, &regulators[id],
+					       &config);
+		if (IS_ERR(rdev)) {
+			dev_err(max8997->dev, "regulator init failed for %d\n",
+					id);
+			return PTR_ERR(rdev);
+		}
+	}
+
+	return 0;
+}
+
+static const struct platform_device_id max8997_pmic_id[] = {
+	{ "max8997-pmic", 0},
+	{ },
+};
+MODULE_DEVICE_TABLE(platform, max8997_pmic_id);
+
+static struct platform_driver max8997_pmic_driver = {
+	.driver = {
+		.name = "max8997-pmic",
+	},
+	.probe = max8997_pmic_probe,
+	.id_table = max8997_pmic_id,
+};
+
+static int __init max8997_pmic_init(void)
+{
+	return platform_driver_register(&max8997_pmic_driver);
+}
+subsys_initcall(max8997_pmic_init);
+
+static void __exit max8997_pmic_cleanup(void)
+{
+	platform_driver_unregister(&max8997_pmic_driver);
+}
+module_exit(max8997_pmic_cleanup);
+
+MODULE_DESCRIPTION("MAXIM 8997/8966 Regulator Driver");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
deleted file mode 100644
index ea0196d..0000000
--- a/drivers/regulator/max8997.c
+++ /dev/null
@@ -1,1241 +0,0 @@
-/*
- * max8997.c - Regulator driver for the Maxim 8997/8966
- *
- * Copyright (C) 2011 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@smasung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- * This driver is based on max8998.c
- */
-
-#include <linux/bug.h>
-#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/driver.h>
-#include <linux/regulator/machine.h>
-#include <linux/mfd/max8997.h>
-#include <linux/mfd/max8997-private.h>
-#include <linux/regulator/of_regulator.h>
-
-struct max8997_data {
-	struct device *dev;
-	struct max8997_dev *iodev;
-	int num_regulators;
-	int ramp_delay; /* in mV/us */
-
-	bool buck1_gpiodvs;
-	bool buck2_gpiodvs;
-	bool buck5_gpiodvs;
-	u8 buck1_vol[8];
-	u8 buck2_vol[8];
-	u8 buck5_vol[8];
-	int buck125_gpios[3];
-	int buck125_gpioindex;
-	bool ignore_gpiodvs_side_effect;
-
-	u8 saved_states[MAX8997_REG_MAX];
-};
-
-static const unsigned int safeoutvolt[] = {
-	4850000,
-	4900000,
-	4950000,
-	3300000,
-};
-
-static inline void max8997_set_gpio(struct max8997_data *max8997)
-{
-	int set3 = (max8997->buck125_gpioindex) & 0x1;
-	int set2 = ((max8997->buck125_gpioindex) >> 1) & 0x1;
-	int set1 = ((max8997->buck125_gpioindex) >> 2) & 0x1;
-
-	gpio_set_value(max8997->buck125_gpios[0], set1);
-	gpio_set_value(max8997->buck125_gpios[1], set2);
-	gpio_set_value(max8997->buck125_gpios[2], set3);
-}
-
-struct voltage_map_desc {
-	int min;
-	int max;
-	int step;
-};
-
-/* Voltage maps in uV */
-static const struct voltage_map_desc ldo_voltage_map_desc = {
-	.min = 800000,	.max = 3950000,	.step = 50000,
-}; /* LDO1 ~ 18, 21 all */
-
-static const struct voltage_map_desc buck1245_voltage_map_desc = {
-	.min = 650000,	.max = 2225000,	.step = 25000,
-}; /* Buck1, 2, 4, 5 */
-
-static const struct voltage_map_desc buck37_voltage_map_desc = {
-	.min = 750000,	.max = 3900000,	.step = 50000,
-}; /* Buck3, 7 */
-
-/* current map in uA */
-static const struct voltage_map_desc charger_current_map_desc = {
-	.min = 200000,	.max = 950000,	.step = 50000,
-};
-
-static const struct voltage_map_desc topoff_current_map_desc = {
-	.min = 50000,	.max = 200000,	.step = 10000,
-};
-
-static const struct voltage_map_desc *reg_voltage_map[] = {
-	[MAX8997_LDO1] = &ldo_voltage_map_desc,
-	[MAX8997_LDO2] = &ldo_voltage_map_desc,
-	[MAX8997_LDO3] = &ldo_voltage_map_desc,
-	[MAX8997_LDO4] = &ldo_voltage_map_desc,
-	[MAX8997_LDO5] = &ldo_voltage_map_desc,
-	[MAX8997_LDO6] = &ldo_voltage_map_desc,
-	[MAX8997_LDO7] = &ldo_voltage_map_desc,
-	[MAX8997_LDO8] = &ldo_voltage_map_desc,
-	[MAX8997_LDO9] = &ldo_voltage_map_desc,
-	[MAX8997_LDO10] = &ldo_voltage_map_desc,
-	[MAX8997_LDO11] = &ldo_voltage_map_desc,
-	[MAX8997_LDO12] = &ldo_voltage_map_desc,
-	[MAX8997_LDO13] = &ldo_voltage_map_desc,
-	[MAX8997_LDO14] = &ldo_voltage_map_desc,
-	[MAX8997_LDO15] = &ldo_voltage_map_desc,
-	[MAX8997_LDO16] = &ldo_voltage_map_desc,
-	[MAX8997_LDO17] = &ldo_voltage_map_desc,
-	[MAX8997_LDO18] = &ldo_voltage_map_desc,
-	[MAX8997_LDO21] = &ldo_voltage_map_desc,
-	[MAX8997_BUCK1] = &buck1245_voltage_map_desc,
-	[MAX8997_BUCK2] = &buck1245_voltage_map_desc,
-	[MAX8997_BUCK3] = &buck37_voltage_map_desc,
-	[MAX8997_BUCK4] = &buck1245_voltage_map_desc,
-	[MAX8997_BUCK5] = &buck1245_voltage_map_desc,
-	[MAX8997_BUCK6] = NULL,
-	[MAX8997_BUCK7] = &buck37_voltage_map_desc,
-	[MAX8997_EN32KHZ_AP] = NULL,
-	[MAX8997_EN32KHZ_CP] = NULL,
-	[MAX8997_ENVICHG] = NULL,
-	[MAX8997_ESAFEOUT1] = NULL,
-	[MAX8997_ESAFEOUT2] = NULL,
-	[MAX8997_CHARGER_CV] = NULL,
-	[MAX8997_CHARGER] = &charger_current_map_desc,
-	[MAX8997_CHARGER_TOPOFF] = &topoff_current_map_desc,
-};
-
-static int max8997_list_voltage_charger_cv(struct regulator_dev *rdev,
-		unsigned int selector)
-{
-	int rid = rdev_get_id(rdev);
-
-	if (rid != MAX8997_CHARGER_CV)
-		goto err;
-
-	switch (selector) {
-	case 0x00:
-		return 4200000;
-	case 0x01 ... 0x0E:
-		return 4000000 + 20000 * (selector - 0x01);
-	case 0x0F:
-		return 4350000;
-	default:
-		return -EINVAL;
-	}
-err:
-	return -EINVAL;
-}
-
-static int max8997_list_voltage(struct regulator_dev *rdev,
-		unsigned int selector)
-{
-	const struct voltage_map_desc *desc;
-	int rid = rdev_get_id(rdev);
-	int val;
-
-	if (rid >= ARRAY_SIZE(reg_voltage_map) ||
-			rid < 0)
-		return -EINVAL;
-
-	desc = reg_voltage_map[rid];
-	if (desc == NULL)
-		return -EINVAL;
-
-	val = desc->min + desc->step * selector;
-	if (val > desc->max)
-		return -EINVAL;
-
-	return val;
-}
-
-static int max8997_get_enable_register(struct regulator_dev *rdev,
-		int *reg, int *mask, int *pattern)
-{
-	int rid = rdev_get_id(rdev);
-
-	switch (rid) {
-	case MAX8997_LDO1 ... MAX8997_LDO21:
-		*reg = MAX8997_REG_LDO1CTRL + (rid - MAX8997_LDO1);
-		*mask = 0xC0;
-		*pattern = 0xC0;
-		break;
-	case MAX8997_BUCK1:
-		*reg = MAX8997_REG_BUCK1CTRL;
-		*mask = 0x01;
-		*pattern = 0x01;
-		break;
-	case MAX8997_BUCK2:
-		*reg = MAX8997_REG_BUCK2CTRL;
-		*mask = 0x01;
-		*pattern = 0x01;
-		break;
-	case MAX8997_BUCK3:
-		*reg = MAX8997_REG_BUCK3CTRL;
-		*mask = 0x01;
-		*pattern = 0x01;
-		break;
-	case MAX8997_BUCK4:
-		*reg = MAX8997_REG_BUCK4CTRL;
-		*mask = 0x01;
-		*pattern = 0x01;
-		break;
-	case MAX8997_BUCK5:
-		*reg = MAX8997_REG_BUCK5CTRL;
-		*mask = 0x01;
-		*pattern = 0x01;
-		break;
-	case MAX8997_BUCK6:
-		*reg = MAX8997_REG_BUCK6CTRL;
-		*mask = 0x01;
-		*pattern = 0x01;
-		break;
-	case MAX8997_BUCK7:
-		*reg = MAX8997_REG_BUCK7CTRL;
-		*mask = 0x01;
-		*pattern = 0x01;
-		break;
-	case MAX8997_EN32KHZ_AP ... MAX8997_EN32KHZ_CP:
-		*reg = MAX8997_REG_MAINCON1;
-		*mask = 0x01 << (rid - MAX8997_EN32KHZ_AP);
-		*pattern = 0x01 << (rid - MAX8997_EN32KHZ_AP);
-		break;
-	case MAX8997_ENVICHG:
-		*reg = MAX8997_REG_MBCCTRL1;
-		*mask = 0x80;
-		*pattern = 0x80;
-		break;
-	case MAX8997_ESAFEOUT1 ... MAX8997_ESAFEOUT2:
-		*reg = MAX8997_REG_SAFEOUTCTRL;
-		*mask = 0x40 << (rid - MAX8997_ESAFEOUT1);
-		*pattern = 0x40 << (rid - MAX8997_ESAFEOUT1);
-		break;
-	case MAX8997_CHARGER:
-		*reg = MAX8997_REG_MBCCTRL2;
-		*mask = 0x40;
-		*pattern = 0x40;
-		break;
-	default:
-		/* Not controllable or not exists */
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int max8997_reg_is_enabled(struct regulator_dev *rdev)
-{
-	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
-	struct i2c_client *i2c = max8997->iodev->i2c;
-	int ret, reg, mask, pattern;
-	u8 val;
-
-	ret = max8997_get_enable_register(rdev, &reg, &mask, &pattern);
-	if (ret)
-		return ret;
-
-	ret = max8997_read_reg(i2c, reg, &val);
-	if (ret)
-		return ret;
-
-	return (val & mask) == pattern;
-}
-
-static int max8997_reg_enable(struct regulator_dev *rdev)
-{
-	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
-	struct i2c_client *i2c = max8997->iodev->i2c;
-	int ret, reg, mask, pattern;
-
-	ret = max8997_get_enable_register(rdev, &reg, &mask, &pattern);
-	if (ret)
-		return ret;
-
-	return max8997_update_reg(i2c, reg, pattern, mask);
-}
-
-static int max8997_reg_disable(struct regulator_dev *rdev)
-{
-	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
-	struct i2c_client *i2c = max8997->iodev->i2c;
-	int ret, reg, mask, pattern;
-
-	ret = max8997_get_enable_register(rdev, &reg, &mask, &pattern);
-	if (ret)
-		return ret;
-
-	return max8997_update_reg(i2c, reg, ~pattern, mask);
-}
-
-static int max8997_get_voltage_register(struct regulator_dev *rdev,
-		int *_reg, int *_shift, int *_mask)
-{
-	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
-	int rid = rdev_get_id(rdev);
-	int reg, shift = 0, mask = 0x3f;
-
-	switch (rid) {
-	case MAX8997_LDO1 ... MAX8997_LDO21:
-		reg = MAX8997_REG_LDO1CTRL + (rid - MAX8997_LDO1);
-		break;
-	case MAX8997_BUCK1:
-		reg = MAX8997_REG_BUCK1DVS1;
-		if (max8997->buck1_gpiodvs)
-			reg += max8997->buck125_gpioindex;
-		break;
-	case MAX8997_BUCK2:
-		reg = MAX8997_REG_BUCK2DVS1;
-		if (max8997->buck2_gpiodvs)
-			reg += max8997->buck125_gpioindex;
-		break;
-	case MAX8997_BUCK3:
-		reg = MAX8997_REG_BUCK3DVS;
-		break;
-	case MAX8997_BUCK4:
-		reg = MAX8997_REG_BUCK4DVS;
-		break;
-	case MAX8997_BUCK5:
-		reg = MAX8997_REG_BUCK5DVS1;
-		if (max8997->buck5_gpiodvs)
-			reg += max8997->buck125_gpioindex;
-		break;
-	case MAX8997_BUCK7:
-		reg = MAX8997_REG_BUCK7DVS;
-		break;
-	case MAX8997_ESAFEOUT1 ...  MAX8997_ESAFEOUT2:
-		reg = MAX8997_REG_SAFEOUTCTRL;
-		shift = (rid == MAX8997_ESAFEOUT2) ? 2 : 0;
-		mask = 0x3;
-		break;
-	case MAX8997_CHARGER_CV:
-		reg = MAX8997_REG_MBCCTRL3;
-		shift = 0;
-		mask = 0xf;
-		break;
-	case MAX8997_CHARGER:
-		reg = MAX8997_REG_MBCCTRL4;
-		shift = 0;
-		mask = 0xf;
-		break;
-	case MAX8997_CHARGER_TOPOFF:
-		reg = MAX8997_REG_MBCCTRL5;
-		shift = 0;
-		mask = 0xf;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	*_reg = reg;
-	*_shift = shift;
-	*_mask = mask;
-
-	return 0;
-}
-
-static int max8997_get_voltage_sel(struct regulator_dev *rdev)
-{
-	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
-	struct i2c_client *i2c = max8997->iodev->i2c;
-	int reg, shift, mask, ret;
-	u8 val;
-
-	ret = max8997_get_voltage_register(rdev, &reg, &shift, &mask);
-	if (ret)
-		return ret;
-
-	ret = max8997_read_reg(i2c, reg, &val);
-	if (ret)
-		return ret;
-
-	val >>= shift;
-	val &= mask;
-
-	return val;
-}
-
-static inline int max8997_get_voltage_proper_val(
-		const struct voltage_map_desc *desc,
-		int min_vol, int max_vol)
-{
-	int i;
-
-	if (desc == NULL)
-		return -EINVAL;
-
-	if (max_vol < desc->min || min_vol > desc->max)
-		return -EINVAL;
-
-	if (min_vol < desc->min)
-		min_vol = desc->min;
-
-	i = DIV_ROUND_UP(min_vol - desc->min, desc->step);
-
-	if (desc->min + desc->step * i > max_vol)
-		return -EINVAL;
-
-	return i;
-}
-
-static int max8997_set_voltage_charger_cv(struct regulator_dev *rdev,
-		int min_uV, int max_uV, unsigned *selector)
-{
-	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
-	struct i2c_client *i2c = max8997->iodev->i2c;
-	int rid = rdev_get_id(rdev);
-	int lb, ub;
-	int reg, shift = 0, mask, ret = 0;
-	u8 val = 0x0;
-
-	if (rid != MAX8997_CHARGER_CV)
-		return -EINVAL;
-
-	ret = max8997_get_voltage_register(rdev, &reg, &shift, &mask);
-	if (ret)
-		return ret;
-
-	if (max_uV < 4000000 || min_uV > 4350000)
-		return -EINVAL;
-
-	if (min_uV <= 4000000) {
-		if (max_uV >= 4000000)
-			return -EINVAL;
-		else
-			val = 0x1;
-	} else if (min_uV <= 4200000 && max_uV >= 4200000)
-		val = 0x0;
-	else {
-		lb = (min_uV - 4000001) / 20000 + 2;
-		ub = (max_uV - 4000000) / 20000 + 1;
-
-		if (lb > ub)
-			return -EINVAL;
-
-		if (lb < 0xf)
-			val = lb;
-		else {
-			if (ub >= 0xf)
-				val = 0xf;
-			else
-				return -EINVAL;
-		}
-	}
-
-	*selector = val;
-
-	ret = max8997_update_reg(i2c, reg, val << shift, mask);
-
-	return ret;
-}
-
-/*
- * For LDO1 ~ LDO21, BUCK1~5, BUCK7, CHARGER, CHARGER_TOPOFF
- * BUCK1, 2, and 5 are available if they are not controlled by gpio
- */
-static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
-		int min_uV, int max_uV, unsigned *selector)
-{
-	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
-	struct i2c_client *i2c = max8997->iodev->i2c;
-	const struct voltage_map_desc *desc;
-	int rid = rdev_get_id(rdev);
-	int i, reg, shift, mask, ret;
-
-	switch (rid) {
-	case MAX8997_LDO1 ... MAX8997_LDO21:
-		break;
-	case MAX8997_BUCK1 ... MAX8997_BUCK5:
-		break;
-	case MAX8997_BUCK6:
-		return -EINVAL;
-	case MAX8997_BUCK7:
-		break;
-	case MAX8997_CHARGER:
-		break;
-	case MAX8997_CHARGER_TOPOFF:
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	desc = reg_voltage_map[rid];
-
-	i = max8997_get_voltage_proper_val(desc, min_uV, max_uV);
-	if (i < 0)
-		return i;
-
-	ret = max8997_get_voltage_register(rdev, &reg, &shift, &mask);
-	if (ret)
-		return ret;
-
-	ret = max8997_update_reg(i2c, reg, i << shift, mask << shift);
-	*selector = i;
-
-	return ret;
-}
-
-static int max8997_set_voltage_buck_time_sel(struct regulator_dev *rdev,
-						unsigned int old_selector,
-						unsigned int new_selector)
-{
-	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
-	int rid = rdev_get_id(rdev);
-	const struct voltage_map_desc *desc = reg_voltage_map[rid];
-
-	/* Delay is required only if the voltage is increasing */
-	if (old_selector >= new_selector)
-		return 0;
-
-	/* No need to delay if gpio_dvs_mode */
-	switch (rid) {
-	case MAX8997_BUCK1:
-		if (max8997->buck1_gpiodvs)
-			return 0;
-		break;
-	case MAX8997_BUCK2:
-		if (max8997->buck2_gpiodvs)
-			return 0;
-		break;
-	case MAX8997_BUCK5:
-		if (max8997->buck5_gpiodvs)
-			return 0;
-		break;
-	}
-
-	switch (rid) {
-	case MAX8997_BUCK1:
-	case MAX8997_BUCK2:
-	case MAX8997_BUCK4:
-	case MAX8997_BUCK5:
-		return DIV_ROUND_UP(desc->step * (new_selector - old_selector),
-				    max8997->ramp_delay * 1000);
-	}
-
-	return 0;
-}
-
-/*
- * Assess the damage on the voltage setting of BUCK1,2,5 by the change.
- *
- * When GPIO-DVS mode is used for multiple bucks, changing the voltage value
- * of one of the bucks may affect that of another buck, which is the side
- * effect of the change (set_voltage). This function examines the GPIO-DVS
- * configurations and checks whether such side-effect exists.
- */
-static int max8997_assess_side_effect(struct regulator_dev *rdev,
-		u8 new_val, int *best)
-{
-	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
-	int rid = rdev_get_id(rdev);
-	u8 *buckx_val[3];
-	bool buckx_gpiodvs[3];
-	int side_effect[8];
-	int min_side_effect = INT_MAX;
-	int i;
-
-	*best = -1;
-
-	switch (rid) {
-	case MAX8997_BUCK1:
-		rid = 0;
-		break;
-	case MAX8997_BUCK2:
-		rid = 1;
-		break;
-	case MAX8997_BUCK5:
-		rid = 2;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	buckx_val[0] = max8997->buck1_vol;
-	buckx_val[1] = max8997->buck2_vol;
-	buckx_val[2] = max8997->buck5_vol;
-	buckx_gpiodvs[0] = max8997->buck1_gpiodvs;
-	buckx_gpiodvs[1] = max8997->buck2_gpiodvs;
-	buckx_gpiodvs[2] = max8997->buck5_gpiodvs;
-
-	for (i = 0; i < 8; i++) {
-		int others;
-
-		if (new_val != (buckx_val[rid])[i]) {
-			side_effect[i] = -1;
-			continue;
-		}
-
-		side_effect[i] = 0;
-		for (others = 0; others < 3; others++) {
-			int diff;
-
-			if (others == rid)
-				continue;
-			if (buckx_gpiodvs[others] == false)
-				continue; /* Not affected */
-			diff = (buckx_val[others])[i] -
-				(buckx_val[others])[max8997->buck125_gpioindex];
-			if (diff > 0)
-				side_effect[i] += diff;
-			else if (diff < 0)
-				side_effect[i] -= diff;
-		}
-		if (side_effect[i] == 0) {
-			*best = i;
-			return 0; /* NO SIDE EFFECT! Use This! */
-		}
-		if (side_effect[i] < min_side_effect) {
-			min_side_effect = side_effect[i];
-			*best = i;
-		}
-	}
-
-	if (*best == -1)
-		return -EINVAL;
-
-	return side_effect[*best];
-}
-
-/*
- * For Buck 1 ~ 5 and 7. If it is not controlled by GPIO, this calls
- * max8997_set_voltage_ldobuck to do the job.
- */
-static int max8997_set_voltage_buck(struct regulator_dev *rdev,
-		int min_uV, int max_uV, unsigned *selector)
-{
-	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
-	int rid = rdev_get_id(rdev);
-	const struct voltage_map_desc *desc;
-	int new_val, new_idx, damage, tmp_val, tmp_idx, tmp_dmg;
-	bool gpio_dvs_mode = false;
-
-	if (rid < MAX8997_BUCK1 || rid > MAX8997_BUCK7)
-		return -EINVAL;
-
-	switch (rid) {
-	case MAX8997_BUCK1:
-		if (max8997->buck1_gpiodvs)
-			gpio_dvs_mode = true;
-		break;
-	case MAX8997_BUCK2:
-		if (max8997->buck2_gpiodvs)
-			gpio_dvs_mode = true;
-		break;
-	case MAX8997_BUCK5:
-		if (max8997->buck5_gpiodvs)
-			gpio_dvs_mode = true;
-		break;
-	}
-
-	if (!gpio_dvs_mode)
-		return max8997_set_voltage_ldobuck(rdev, min_uV, max_uV,
-						selector);
-
-	desc = reg_voltage_map[rid];
-	new_val = max8997_get_voltage_proper_val(desc, min_uV, max_uV);
-	if (new_val < 0)
-		return new_val;
-
-	tmp_dmg = INT_MAX;
-	tmp_idx = -1;
-	tmp_val = -1;
-	do {
-		damage = max8997_assess_side_effect(rdev, new_val, &new_idx);
-		if (damage == 0)
-			goto out;
-
-		if (tmp_dmg > damage) {
-			tmp_idx = new_idx;
-			tmp_val = new_val;
-			tmp_dmg = damage;
-		}
-
-		new_val++;
-	} while (desc->min + desc->step * new_val <= desc->max);
-
-	new_idx = tmp_idx;
-	new_val = tmp_val;
-
-	if (max8997->ignore_gpiodvs_side_effect == false)
-		return -EINVAL;
-
-	dev_warn(&rdev->dev,
-		"MAX8997 GPIO-DVS Side Effect Warning: GPIO SET:  %d -> %d\n",
-		max8997->buck125_gpioindex, tmp_idx);
-
-out:
-	if (new_idx < 0 || new_val < 0)
-		return -EINVAL;
-
-	max8997->buck125_gpioindex = new_idx;
-	max8997_set_gpio(max8997);
-	*selector = new_val;
-
-	return 0;
-}
-
-/* For SAFEOUT1 and SAFEOUT2 */
-static int max8997_set_voltage_safeout_sel(struct regulator_dev *rdev,
-					   unsigned selector)
-{
-	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
-	struct i2c_client *i2c = max8997->iodev->i2c;
-	int rid = rdev_get_id(rdev);
-	int reg, shift = 0, mask, ret;
-
-	if (rid != MAX8997_ESAFEOUT1 && rid != MAX8997_ESAFEOUT2)
-		return -EINVAL;
-
-	ret = max8997_get_voltage_register(rdev, &reg, &shift, &mask);
-	if (ret)
-		return ret;
-
-	return max8997_update_reg(i2c, reg, selector << shift, mask << shift);
-}
-
-static int max8997_reg_disable_suspend(struct regulator_dev *rdev)
-{
-	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
-	struct i2c_client *i2c = max8997->iodev->i2c;
-	int ret, reg, mask, pattern;
-	int rid = rdev_get_id(rdev);
-
-	ret = max8997_get_enable_register(rdev, &reg, &mask, &pattern);
-	if (ret)
-		return ret;
-
-	max8997_read_reg(i2c, reg, &max8997->saved_states[rid]);
-
-	if (rid == MAX8997_LDO1 ||
-			rid == MAX8997_LDO10 ||
-			rid == MAX8997_LDO21) {
-		dev_dbg(&rdev->dev, "Conditional Power-Off for %s\n",
-				rdev->desc->name);
-		return max8997_update_reg(i2c, reg, 0x40, mask);
-	}
-
-	dev_dbg(&rdev->dev, "Full Power-Off for %s (%xh -> %xh)\n",
-			rdev->desc->name, max8997->saved_states[rid] & mask,
-			(~pattern) & mask);
-	return max8997_update_reg(i2c, reg, ~pattern, mask);
-}
-
-static struct regulator_ops max8997_ldo_ops = {
-	.list_voltage		= max8997_list_voltage,
-	.is_enabled		= max8997_reg_is_enabled,
-	.enable			= max8997_reg_enable,
-	.disable		= max8997_reg_disable,
-	.get_voltage_sel	= max8997_get_voltage_sel,
-	.set_voltage		= max8997_set_voltage_ldobuck,
-	.set_suspend_disable	= max8997_reg_disable_suspend,
-};
-
-static struct regulator_ops max8997_buck_ops = {
-	.list_voltage		= max8997_list_voltage,
-	.is_enabled		= max8997_reg_is_enabled,
-	.enable			= max8997_reg_enable,
-	.disable		= max8997_reg_disable,
-	.get_voltage_sel	= max8997_get_voltage_sel,
-	.set_voltage		= max8997_set_voltage_buck,
-	.set_voltage_time_sel	= max8997_set_voltage_buck_time_sel,
-	.set_suspend_disable	= max8997_reg_disable_suspend,
-};
-
-static struct regulator_ops max8997_fixedvolt_ops = {
-	.list_voltage		= max8997_list_voltage,
-	.is_enabled		= max8997_reg_is_enabled,
-	.enable			= max8997_reg_enable,
-	.disable		= max8997_reg_disable,
-	.set_suspend_disable	= max8997_reg_disable_suspend,
-};
-
-static struct regulator_ops max8997_safeout_ops = {
-	.list_voltage		= regulator_list_voltage_table,
-	.is_enabled		= max8997_reg_is_enabled,
-	.enable			= max8997_reg_enable,
-	.disable		= max8997_reg_disable,
-	.get_voltage_sel	= max8997_get_voltage_sel,
-	.set_voltage_sel	= max8997_set_voltage_safeout_sel,
-	.set_suspend_disable	= max8997_reg_disable_suspend,
-};
-
-static struct regulator_ops max8997_fixedstate_ops = {
-	.list_voltage		= max8997_list_voltage_charger_cv,
-	.get_voltage_sel	= max8997_get_voltage_sel,
-	.set_voltage		= max8997_set_voltage_charger_cv,
-};
-
-static int max8997_set_current_limit(struct regulator_dev *rdev,
-				     int min_uA, int max_uA)
-{
-	unsigned dummy;
-	int rid = rdev_get_id(rdev);
-
-	if (rid != MAX8997_CHARGER && rid != MAX8997_CHARGER_TOPOFF)
-		return -EINVAL;
-
-	/* Reuse max8997_set_voltage_ldobuck to set current_limit. */
-	return max8997_set_voltage_ldobuck(rdev, min_uA, max_uA, &dummy);
-}
-
-static int max8997_get_current_limit(struct regulator_dev *rdev)
-{
-	int sel, rid = rdev_get_id(rdev);
-
-	if (rid != MAX8997_CHARGER && rid != MAX8997_CHARGER_TOPOFF)
-		return -EINVAL;
-
-	sel = max8997_get_voltage_sel(rdev);
-	if (sel < 0)
-		return sel;
-
-	/* Reuse max8997_list_voltage to get current_limit. */
-	return max8997_list_voltage(rdev, sel);
-}
-
-static struct regulator_ops max8997_charger_ops = {
-	.is_enabled		= max8997_reg_is_enabled,
-	.enable			= max8997_reg_enable,
-	.disable		= max8997_reg_disable,
-	.get_current_limit	= max8997_get_current_limit,
-	.set_current_limit	= max8997_set_current_limit,
-};
-
-static struct regulator_ops max8997_charger_fixedstate_ops = {
-	.get_current_limit	= max8997_get_current_limit,
-	.set_current_limit	= max8997_set_current_limit,
-};
-
-#define MAX8997_VOLTAGE_REGULATOR(_name, _ops) {\
-	.name		= #_name,		\
-	.id		= MAX8997_##_name,	\
-	.ops		= &_ops,		\
-	.type		= REGULATOR_VOLTAGE,	\
-	.owner		= THIS_MODULE,		\
-}
-
-#define MAX8997_CURRENT_REGULATOR(_name, _ops) {\
-	.name		= #_name,		\
-	.id		= MAX8997_##_name,	\
-	.ops		= &_ops,		\
-	.type		= REGULATOR_CURRENT,	\
-	.owner		= THIS_MODULE,		\
-}
-
-static struct regulator_desc regulators[] = {
-	MAX8997_VOLTAGE_REGULATOR(LDO1, max8997_ldo_ops),
-	MAX8997_VOLTAGE_REGULATOR(LDO2, max8997_ldo_ops),
-	MAX8997_VOLTAGE_REGULATOR(LDO3, max8997_ldo_ops),
-	MAX8997_VOLTAGE_REGULATOR(LDO4, max8997_ldo_ops),
-	MAX8997_VOLTAGE_REGULATOR(LDO5, max8997_ldo_ops),
-	MAX8997_VOLTAGE_REGULATOR(LDO6, max8997_ldo_ops),
-	MAX8997_VOLTAGE_REGULATOR(LDO7, max8997_ldo_ops),
-	MAX8997_VOLTAGE_REGULATOR(LDO8, max8997_ldo_ops),
-	MAX8997_VOLTAGE_REGULATOR(LDO9, max8997_ldo_ops),
-	MAX8997_VOLTAGE_REGULATOR(LDO10, max8997_ldo_ops),
-	MAX8997_VOLTAGE_REGULATOR(LDO11, max8997_ldo_ops),
-	MAX8997_VOLTAGE_REGULATOR(LDO12, max8997_ldo_ops),
-	MAX8997_VOLTAGE_REGULATOR(LDO13, max8997_ldo_ops),
-	MAX8997_VOLTAGE_REGULATOR(LDO14, max8997_ldo_ops),
-	MAX8997_VOLTAGE_REGULATOR(LDO15, max8997_ldo_ops),
-	MAX8997_VOLTAGE_REGULATOR(LDO16, max8997_ldo_ops),
-	MAX8997_VOLTAGE_REGULATOR(LDO17, max8997_ldo_ops),
-	MAX8997_VOLTAGE_REGULATOR(LDO18, max8997_ldo_ops),
-	MAX8997_VOLTAGE_REGULATOR(LDO21, max8997_ldo_ops),
-	MAX8997_VOLTAGE_REGULATOR(BUCK1, max8997_buck_ops),
-	MAX8997_VOLTAGE_REGULATOR(BUCK2, max8997_buck_ops),
-	MAX8997_VOLTAGE_REGULATOR(BUCK3, max8997_buck_ops),
-	MAX8997_VOLTAGE_REGULATOR(BUCK4, max8997_buck_ops),
-	MAX8997_VOLTAGE_REGULATOR(BUCK5, max8997_buck_ops),
-	MAX8997_VOLTAGE_REGULATOR(BUCK6, max8997_fixedvolt_ops),
-	MAX8997_VOLTAGE_REGULATOR(BUCK7, max8997_buck_ops),
-	MAX8997_VOLTAGE_REGULATOR(EN32KHZ_AP, max8997_fixedvolt_ops),
-	MAX8997_VOLTAGE_REGULATOR(EN32KHZ_CP, max8997_fixedvolt_ops),
-	MAX8997_VOLTAGE_REGULATOR(ENVICHG, max8997_fixedvolt_ops),
-	MAX8997_VOLTAGE_REGULATOR(ESAFEOUT1, max8997_safeout_ops),
-	MAX8997_VOLTAGE_REGULATOR(ESAFEOUT2, max8997_safeout_ops),
-	MAX8997_VOLTAGE_REGULATOR(CHARGER_CV, max8997_fixedstate_ops),
-	MAX8997_CURRENT_REGULATOR(CHARGER, max8997_charger_ops),
-	MAX8997_CURRENT_REGULATOR(CHARGER_TOPOFF,
-				  max8997_charger_fixedstate_ops),
-};
-
-#ifdef CONFIG_OF
-static int max8997_pmic_dt_parse_dvs_gpio(struct platform_device *pdev,
-			struct max8997_platform_data *pdata,
-			struct device_node *pmic_np)
-{
-	int i, gpio;
-
-	for (i = 0; i < 3; i++) {
-		gpio = of_get_named_gpio(pmic_np,
-					"max8997,pmic-buck125-dvs-gpios", i);
-		if (!gpio_is_valid(gpio)) {
-			dev_err(&pdev->dev, "invalid gpio[%d]: %d\n", i, gpio);
-			return -EINVAL;
-		}
-		pdata->buck125_gpios[i] = gpio;
-	}
-	return 0;
-}
-
-static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
-					struct max8997_platform_data *pdata)
-{
-	struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
-	struct device_node *pmic_np, *regulators_np, *reg_np;
-	struct max8997_regulator_data *rdata;
-	unsigned int i, dvs_voltage_nr = 1, ret;
-
-	pmic_np = iodev->dev->of_node;
-	if (!pmic_np) {
-		dev_err(&pdev->dev, "could not find pmic sub-node\n");
-		return -ENODEV;
-	}
-
-	regulators_np = of_get_child_by_name(pmic_np, "regulators");
-	if (!regulators_np) {
-		dev_err(&pdev->dev, "could not find regulators sub-node\n");
-		return -EINVAL;
-	}
-
-	/* count the number of regulators to be supported in pmic */
-	pdata->num_regulators = of_get_child_count(regulators_np);
-
-	rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *
-				pdata->num_regulators, GFP_KERNEL);
-	if (!rdata) {
-		of_node_put(regulators_np);
-		return -ENOMEM;
-	}
-
-	pdata->regulators = rdata;
-	for_each_child_of_node(regulators_np, reg_np) {
-		for (i = 0; i < ARRAY_SIZE(regulators); i++)
-			if (!of_node_cmp(reg_np->name, regulators[i].name))
-				break;
-
-		if (i == ARRAY_SIZE(regulators)) {
-			dev_warn(&pdev->dev, "don't know how to configure regulator %s\n",
-				 reg_np->name);
-			continue;
-		}
-
-		rdata->id = i;
-		rdata->initdata = of_get_regulator_init_data(&pdev->dev,
-							     reg_np,
-							     &regulators[i]);
-		rdata->reg_node = reg_np;
-		rdata++;
-	}
-	of_node_put(regulators_np);
-
-	if (of_get_property(pmic_np, "max8997,pmic-buck1-uses-gpio-dvs", NULL))
-		pdata->buck1_gpiodvs = true;
-
-	if (of_get_property(pmic_np, "max8997,pmic-buck2-uses-gpio-dvs", NULL))
-		pdata->buck2_gpiodvs = true;
-
-	if (of_get_property(pmic_np, "max8997,pmic-buck5-uses-gpio-dvs", NULL))
-		pdata->buck5_gpiodvs = true;
-
-	if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs ||
-						pdata->buck5_gpiodvs) {
-		ret = max8997_pmic_dt_parse_dvs_gpio(pdev, pdata, pmic_np);
-		if (ret)
-			return -EINVAL;
-
-		if (of_property_read_u32(pmic_np,
-				"max8997,pmic-buck125-default-dvs-idx",
-				&pdata->buck125_default_idx)) {
-			pdata->buck125_default_idx = 0;
-		} else {
-			if (pdata->buck125_default_idx >= 8) {
-				pdata->buck125_default_idx = 0;
-				dev_info(&pdev->dev, "invalid value for default dvs index, using 0 instead\n");
-			}
-		}
-
-		if (of_get_property(pmic_np,
-			"max8997,pmic-ignore-gpiodvs-side-effect", NULL))
-			pdata->ignore_gpiodvs_side_effect = true;
-
-		dvs_voltage_nr = 8;
-	}
-
-	if (of_property_read_u32_array(pmic_np,
-				"max8997,pmic-buck1-dvs-voltage",
-				pdata->buck1_voltage, dvs_voltage_nr)) {
-		dev_err(&pdev->dev, "buck1 voltages not specified\n");
-		return -EINVAL;
-	}
-
-	if (of_property_read_u32_array(pmic_np,
-				"max8997,pmic-buck2-dvs-voltage",
-				pdata->buck2_voltage, dvs_voltage_nr)) {
-		dev_err(&pdev->dev, "buck2 voltages not specified\n");
-		return -EINVAL;
-	}
-
-	if (of_property_read_u32_array(pmic_np,
-				"max8997,pmic-buck5-dvs-voltage",
-				pdata->buck5_voltage, dvs_voltage_nr)) {
-		dev_err(&pdev->dev, "buck5 voltages not specified\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-#else
-static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
-					struct max8997_platform_data *pdata)
-{
-	return 0;
-}
-#endif /* CONFIG_OF */
-
-static int max8997_pmic_probe(struct platform_device *pdev)
-{
-	struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
-	struct max8997_platform_data *pdata = iodev->pdata;
-	struct regulator_config config = { };
-	struct regulator_dev *rdev;
-	struct max8997_data *max8997;
-	struct i2c_client *i2c;
-	int i, ret, nr_dvs;
-	u8 max_buck1 = 0, max_buck2 = 0, max_buck5 = 0;
-
-	if (!pdata) {
-		dev_err(&pdev->dev, "No platform init data supplied.\n");
-		return -ENODEV;
-	}
-
-	if (iodev->dev->of_node) {
-		ret = max8997_pmic_dt_parse_pdata(pdev, pdata);
-		if (ret)
-			return ret;
-	}
-
-	max8997 = devm_kzalloc(&pdev->dev, sizeof(struct max8997_data),
-			       GFP_KERNEL);
-	if (!max8997)
-		return -ENOMEM;
-
-	max8997->dev = &pdev->dev;
-	max8997->iodev = iodev;
-	max8997->num_regulators = pdata->num_regulators;
-	platform_set_drvdata(pdev, max8997);
-	i2c = max8997->iodev->i2c;
-
-	max8997->buck125_gpioindex = pdata->buck125_default_idx;
-	max8997->buck1_gpiodvs = pdata->buck1_gpiodvs;
-	max8997->buck2_gpiodvs = pdata->buck2_gpiodvs;
-	max8997->buck5_gpiodvs = pdata->buck5_gpiodvs;
-	memcpy(max8997->buck125_gpios, pdata->buck125_gpios, sizeof(int) * 3);
-	max8997->ignore_gpiodvs_side_effect = pdata->ignore_gpiodvs_side_effect;
-
-	nr_dvs = (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs ||
-			pdata->buck5_gpiodvs) ? 8 : 1;
-
-	for (i = 0; i < nr_dvs; i++) {
-		max8997->buck1_vol[i] = ret =
-			max8997_get_voltage_proper_val(
-					&buck1245_voltage_map_desc,
-					pdata->buck1_voltage[i],
-					pdata->buck1_voltage[i] +
-					buck1245_voltage_map_desc.step);
-		if (ret < 0)
-			return ret;
-
-		max8997->buck2_vol[i] = ret =
-			max8997_get_voltage_proper_val(
-					&buck1245_voltage_map_desc,
-					pdata->buck2_voltage[i],
-					pdata->buck2_voltage[i] +
-					buck1245_voltage_map_desc.step);
-		if (ret < 0)
-			return ret;
-
-		max8997->buck5_vol[i] = ret =
-			max8997_get_voltage_proper_val(
-					&buck1245_voltage_map_desc,
-					pdata->buck5_voltage[i],
-					pdata->buck5_voltage[i] +
-					buck1245_voltage_map_desc.step);
-		if (ret < 0)
-			return ret;
-
-		if (max_buck1 < max8997->buck1_vol[i])
-			max_buck1 = max8997->buck1_vol[i];
-		if (max_buck2 < max8997->buck2_vol[i])
-			max_buck2 = max8997->buck2_vol[i];
-		if (max_buck5 < max8997->buck5_vol[i])
-			max_buck5 = max8997->buck5_vol[i];
-	}
-
-	/* For the safety, set max voltage before setting up */
-	for (i = 0; i < 8; i++) {
-		max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS1 + i,
-				max_buck1, 0x3f);
-		max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS1 + i,
-				max_buck2, 0x3f);
-		max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS1 + i,
-				max_buck5, 0x3f);
-	}
-
-	/* Initialize all the DVS related BUCK registers */
-	for (i = 0; i < nr_dvs; i++) {
-		max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS1 + i,
-				max8997->buck1_vol[i],
-				0x3f);
-		max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS1 + i,
-				max8997->buck2_vol[i],
-				0x3f);
-		max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS1 + i,
-				max8997->buck5_vol[i],
-				0x3f);
-	}
-
-	/*
-	 * If buck 1, 2, and 5 do not care DVS GPIO settings, ignore them.
-	 * If at least one of them cares, set gpios.
-	 */
-	if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs ||
-			pdata->buck5_gpiodvs) {
-
-		if (!gpio_is_valid(pdata->buck125_gpios[0]) ||
-				!gpio_is_valid(pdata->buck125_gpios[1]) ||
-				!gpio_is_valid(pdata->buck125_gpios[2])) {
-			dev_err(&pdev->dev, "GPIO NOT VALID\n");
-			return -EINVAL;
-		}
-
-		ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[0],
-					"MAX8997 SET1");
-		if (ret)
-			return ret;
-
-		ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[1],
-					"MAX8997 SET2");
-		if (ret)
-			return ret;
-
-		ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[2],
-				"MAX8997 SET3");
-		if (ret)
-			return ret;
-
-		gpio_direction_output(pdata->buck125_gpios[0],
-				(max8997->buck125_gpioindex >> 2)
-				& 0x1); /* SET1 */
-		gpio_direction_output(pdata->buck125_gpios[1],
-				(max8997->buck125_gpioindex >> 1)
-				& 0x1); /* SET2 */
-		gpio_direction_output(pdata->buck125_gpios[2],
-				(max8997->buck125_gpioindex >> 0)
-				& 0x1); /* SET3 */
-	}
-
-	/* DVS-GPIO disabled */
-	max8997_update_reg(i2c, MAX8997_REG_BUCK1CTRL, (pdata->buck1_gpiodvs) ?
-			(1 << 1) : (0 << 1), 1 << 1);
-	max8997_update_reg(i2c, MAX8997_REG_BUCK2CTRL, (pdata->buck2_gpiodvs) ?
-			(1 << 1) : (0 << 1), 1 << 1);
-	max8997_update_reg(i2c, MAX8997_REG_BUCK5CTRL, (pdata->buck5_gpiodvs) ?
-			(1 << 1) : (0 << 1), 1 << 1);
-
-	/* Misc Settings */
-	max8997->ramp_delay = 10; /* set 10mV/us, which is the default */
-	max8997_write_reg(i2c, MAX8997_REG_BUCKRAMP, (0xf << 4) | 0x9);
-
-	for (i = 0; i < pdata->num_regulators; i++) {
-		const struct voltage_map_desc *desc;
-		int id = pdata->regulators[i].id;
-
-		desc = reg_voltage_map[id];
-		if (desc) {
-			regulators[id].n_voltages =
-				(desc->max - desc->min) / desc->step + 1;
-		} else if (id == MAX8997_ESAFEOUT1 || id == MAX8997_ESAFEOUT2) {
-			regulators[id].volt_table = safeoutvolt;
-			regulators[id].n_voltages = ARRAY_SIZE(safeoutvolt);
-		} else if (id == MAX8997_CHARGER_CV) {
-			regulators[id].n_voltages = 16;
-		}
-
-		config.dev = max8997->dev;
-		config.init_data = pdata->regulators[i].initdata;
-		config.driver_data = max8997;
-		config.of_node = pdata->regulators[i].reg_node;
-
-		rdev = devm_regulator_register(&pdev->dev, &regulators[id],
-					       &config);
-		if (IS_ERR(rdev)) {
-			dev_err(max8997->dev, "regulator init failed for %d\n",
-					id);
-			return PTR_ERR(rdev);
-		}
-	}
-
-	return 0;
-}
-
-static const struct platform_device_id max8997_pmic_id[] = {
-	{ "max8997-pmic", 0},
-	{ },
-};
-MODULE_DEVICE_TABLE(platform, max8997_pmic_id);
-
-static struct platform_driver max8997_pmic_driver = {
-	.driver = {
-		.name = "max8997-pmic",
-	},
-	.probe = max8997_pmic_probe,
-	.id_table = max8997_pmic_id,
-};
-
-static int __init max8997_pmic_init(void)
-{
-	return platform_driver_register(&max8997_pmic_driver);
-}
-subsys_initcall(max8997_pmic_init);
-
-static void __exit max8997_pmic_cleanup(void)
-{
-	platform_driver_unregister(&max8997_pmic_driver);
-}
-module_exit(max8997_pmic_cleanup);
-
-MODULE_DESCRIPTION("MAXIM 8997/8966 Regulator Driver");
-MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 6b0aa80..cd828db 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -45,9 +45,9 @@
 	/* Voltage change possible? */
 	if (constraints->min_uV != constraints->max_uV)
 		constraints->valid_ops_mask |= REGULATOR_CHANGE_VOLTAGE;
-	/* Only one voltage?  Then make sure it's set. */
-	if (constraints->min_uV && constraints->max_uV &&
-	    constraints->min_uV == constraints->max_uV)
+
+	/* Do we have a voltage range, if so try to apply it? */
+	if (constraints->min_uV && constraints->max_uV)
 		constraints->apply_uV = true;
 
 	if (!of_property_read_u32(np, "regulator-microvolt-offset", &pval))
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 6efc7ee..f11d41d 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -944,6 +944,8 @@
 			if (id == PALMAS_REG_LDO9) {
 				desc->ops = &palmas_ops_ldo9;
 				desc->bypass_reg = desc->enable_reg;
+				desc->bypass_val_on =
+						PALMAS_LDO9_CTRL_LDO_BYPASS_EN;
 				desc->bypass_mask =
 						PALMAS_LDO9_CTRL_LDO_BYPASS_EN;
 			}
@@ -1055,6 +1057,8 @@
 			    id == TPS65917_REG_LDO2) {
 				desc->ops = &tps65917_ops_ldo_1_2;
 				desc->bypass_reg = desc->enable_reg;
+				desc->bypass_val_on =
+						TPS65917_LDO1_CTRL_BYPASS_EN;
 				desc->bypass_mask =
 						TPS65917_LDO1_CTRL_BYPASS_EN;
 			}
@@ -1206,6 +1210,7 @@
 				desc->enable_mask = SMPS10_BOOST_EN;
 			desc->bypass_reg = PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
 							    PALMAS_SMPS10_CTRL);
+			desc->bypass_val_on = SMPS10_BYPASS_EN;
 			desc->bypass_mask = SMPS10_BYPASS_EN;
 			desc->min_uV = 3750000;
 			desc->uV_step = 1250000;
@@ -1462,10 +1467,10 @@
 	.ldo_register = tps65917_ldo_registration,
 };
 
-static void palmas_dt_to_pdata(struct device *dev,
-			       struct device_node *node,
-			       struct palmas_pmic_platform_data *pdata,
-			       struct palmas_pmic_driver_data *ddata)
+static int palmas_dt_to_pdata(struct device *dev,
+			      struct device_node *node,
+			      struct palmas_pmic_platform_data *pdata,
+			      struct palmas_pmic_driver_data *ddata)
 {
 	struct device_node *regulators;
 	u32 prop;
@@ -1474,7 +1479,7 @@
 	regulators = of_get_child_by_name(node, "regulators");
 	if (!regulators) {
 		dev_info(dev, "regulator node not found\n");
-		return;
+		return 0;
 	}
 
 	ret = of_regulator_match(dev, regulators, ddata->palmas_matches,
@@ -1482,25 +1487,29 @@
 	of_node_put(regulators);
 	if (ret < 0) {
 		dev_err(dev, "Error parsing regulator init data: %d\n", ret);
-		return;
+		return 0;
 	}
 
 	for (idx = 0; idx < ddata->max_reg; idx++) {
-		if (!ddata->palmas_matches[idx].init_data ||
-		    !ddata->palmas_matches[idx].of_node)
+		static struct of_regulator_match *match;
+		struct palmas_reg_init *rinit;
+		struct device_node *np;
+
+		match = &ddata->palmas_matches[idx];
+		np = match->of_node;
+
+		if (!match->init_data || !np)
 			continue;
 
-		pdata->reg_data[idx] = ddata->palmas_matches[idx].init_data;
+		rinit = devm_kzalloc(dev, sizeof(*rinit), GFP_KERNEL);
+		if (!rinit)
+			return -ENOMEM;
 
-		pdata->reg_init[idx] = devm_kzalloc(dev,
-				sizeof(struct palmas_reg_init), GFP_KERNEL);
+		pdata->reg_data[idx] = match->init_data;
+		pdata->reg_init[idx] = rinit;
 
-		pdata->reg_init[idx]->warm_reset =
-			of_property_read_bool(ddata->palmas_matches[idx].of_node,
-					      "ti,warm-reset");
-
-		ret = of_property_read_u32(ddata->palmas_matches[idx].of_node,
-					   "ti,roof-floor", &prop);
+		rinit->warm_reset = of_property_read_bool(np, "ti,warm-reset");
+		ret = of_property_read_u32(np, "ti,roof-floor", &prop);
 		/* EINVAL: Property not found */
 		if (ret != -EINVAL) {
 			int econtrol;
@@ -1522,31 +1531,29 @@
 					WARN_ON(1);
 					dev_warn(dev,
 						 "%s: Invalid roof-floor option: %u\n",
-					     palmas_matches[idx].name, prop);
+						 match->name, prop);
 					break;
 				}
 			}
-			pdata->reg_init[idx]->roof_floor = econtrol;
+			rinit->roof_floor = econtrol;
 		}
 
-		ret = of_property_read_u32(ddata->palmas_matches[idx].of_node,
-					   "ti,mode-sleep", &prop);
+		ret = of_property_read_u32(np, "ti,mode-sleep", &prop);
 		if (!ret)
-			pdata->reg_init[idx]->mode_sleep = prop;
+			rinit->mode_sleep = prop;
 
-		ret = of_property_read_bool(ddata->palmas_matches[idx].of_node,
-					    "ti,smps-range");
+		ret = of_property_read_bool(np, "ti,smps-range");
 		if (ret)
-			pdata->reg_init[idx]->vsel =
-				PALMAS_SMPS12_VOLTAGE_RANGE;
+			rinit->vsel = PALMAS_SMPS12_VOLTAGE_RANGE;
 
 		if (idx == PALMAS_REG_LDO8)
 			pdata->enable_ldo8_tracking = of_property_read_bool(
-						ddata->palmas_matches[idx].of_node,
-						"ti,enable-ldo8-tracking");
+						np, "ti,enable-ldo8-tracking");
 	}
 
 	pdata->ldo6_vibrator = of_property_read_bool(node, "ti,ldo6-vibrator");
+
+	return 0;
 }
 
 static const struct of_device_id of_palmas_match_tbl[] = {
@@ -1628,7 +1635,9 @@
 	platform_set_drvdata(pdev, pmic);
 	pmic->palmas->pmic_ddata = driver_data;
 
-	palmas_dt_to_pdata(&pdev->dev, node, pdata, driver_data);
+	ret = palmas_dt_to_pdata(&pdev->dev, node, pdata, driver_data);
+	if (ret)
+		return ret;
 
 	ret = palmas_smps_read(palmas, PALMAS_SMPS_CTRL, &reg);
 	if (ret)
diff --git a/drivers/regulator/pv88080-regulator.c b/drivers/regulator/pv88080-regulator.c
new file mode 100644
index 0000000..d710756
--- /dev/null
+++ b/drivers/regulator/pv88080-regulator.c
@@ -0,0 +1,419 @@
+/*
+ * pv88080-regulator.c - Regulator device driver for PV88080
+ * Copyright (C) 2016  Powerventure Semiconductor Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regmap.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+#include "pv88080-regulator.h"
+
+#define PV88080_MAX_REGULATORS	3
+
+/* PV88080 REGULATOR IDs */
+enum {
+	/* BUCKs */
+	PV88080_ID_BUCK1,
+	PV88080_ID_BUCK2,
+	PV88080_ID_BUCK3,
+};
+
+struct pv88080_regulator {
+	struct regulator_desc desc;
+	/* Current limiting */
+	unsigned int n_current_limits;
+	const int *current_limits;
+	unsigned int limit_mask;
+	unsigned int conf;
+	unsigned int conf2;
+	unsigned int conf5;
+};
+
+struct pv88080 {
+	struct device *dev;
+	struct regmap *regmap;
+	struct regulator_dev *rdev[PV88080_MAX_REGULATORS];
+};
+
+struct pv88080_buck_voltage {
+	int min_uV;
+	int max_uV;
+	int uV_step;
+};
+
+static const struct regmap_config pv88080_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+};
+
+/* Current limits array (in uA) for BUCK1, BUCK2, BUCK3.
+ * Entry indexes corresponds to register values.
+ */
+
+static const int pv88080_buck1_limits[] = {
+	3230000, 5130000, 6960000, 8790000
+};
+
+static const int pv88080_buck23_limits[] = {
+	1496000, 2393000, 3291000, 4189000
+};
+
+static const struct pv88080_buck_voltage pv88080_buck_vol[2] = {
+	{
+		.min_uV = 600000,
+		.max_uV = 1393750,
+		.uV_step = 6250,
+	},
+	{
+		.min_uV = 1400000,
+		.max_uV = 2193750,
+		.uV_step = 6250,
+	},
+};
+
+static unsigned int pv88080_buck_get_mode(struct regulator_dev *rdev)
+{
+	struct pv88080_regulator *info = rdev_get_drvdata(rdev);
+	unsigned int data;
+	int ret, mode = 0;
+
+	ret = regmap_read(rdev->regmap, info->conf, &data);
+	if (ret < 0)
+		return ret;
+
+	switch (data & PV88080_BUCK1_MODE_MASK) {
+	case PV88080_BUCK_MODE_SYNC:
+		mode = REGULATOR_MODE_FAST;
+		break;
+	case PV88080_BUCK_MODE_AUTO:
+		mode = REGULATOR_MODE_NORMAL;
+		break;
+	case PV88080_BUCK_MODE_SLEEP:
+		mode = REGULATOR_MODE_STANDBY;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return mode;
+}
+
+static int pv88080_buck_set_mode(struct regulator_dev *rdev,
+					unsigned int mode)
+{
+	struct pv88080_regulator *info = rdev_get_drvdata(rdev);
+	int val = 0;
+
+	switch (mode) {
+	case REGULATOR_MODE_FAST:
+		val = PV88080_BUCK_MODE_SYNC;
+		break;
+	case REGULATOR_MODE_NORMAL:
+		val = PV88080_BUCK_MODE_AUTO;
+		break;
+	case REGULATOR_MODE_STANDBY:
+		val = PV88080_BUCK_MODE_SLEEP;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return regmap_update_bits(rdev->regmap, info->conf,
+					PV88080_BUCK1_MODE_MASK, val);
+}
+
+static int pv88080_set_current_limit(struct regulator_dev *rdev, int min,
+				    int max)
+{
+	struct pv88080_regulator *info = rdev_get_drvdata(rdev);
+	int i;
+
+	/* search for closest to maximum */
+	for (i = info->n_current_limits; i >= 0; i--) {
+		if (min <= info->current_limits[i]
+			&& max >= info->current_limits[i]) {
+				return regmap_update_bits(rdev->regmap,
+					info->conf,
+					info->limit_mask,
+					i << PV88080_BUCK1_ILIM_SHIFT);
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int pv88080_get_current_limit(struct regulator_dev *rdev)
+{
+	struct pv88080_regulator *info = rdev_get_drvdata(rdev);
+	unsigned int data;
+	int ret;
+
+	ret = regmap_read(rdev->regmap, info->conf, &data);
+	if (ret < 0)
+		return ret;
+
+	data = (data & info->limit_mask) >> PV88080_BUCK1_ILIM_SHIFT;
+	return info->current_limits[data];
+}
+
+static struct regulator_ops pv88080_buck_ops = {
+	.get_mode = pv88080_buck_get_mode,
+	.set_mode = pv88080_buck_set_mode,
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
+	.is_enabled = regulator_is_enabled_regmap,
+	.set_voltage_sel = regulator_set_voltage_sel_regmap,
+	.get_voltage_sel = regulator_get_voltage_sel_regmap,
+	.list_voltage = regulator_list_voltage_linear,
+	.set_current_limit = pv88080_set_current_limit,
+	.get_current_limit = pv88080_get_current_limit,
+};
+
+#define PV88080_BUCK(chip, regl_name, min, step, max, limits_array) \
+{\
+	.desc	=	{\
+		.id = chip##_ID_##regl_name,\
+		.name = __stringify(chip##_##regl_name),\
+		.of_match = of_match_ptr(#regl_name),\
+		.regulators_node = of_match_ptr("regulators"),\
+		.type = REGULATOR_VOLTAGE,\
+		.owner = THIS_MODULE,\
+		.ops = &pv88080_buck_ops,\
+		.min_uV = min, \
+		.uV_step = step, \
+		.n_voltages = ((max) - (min))/(step) + 1, \
+		.enable_reg = PV88080_REG_##regl_name##_CONF0, \
+		.enable_mask = PV88080_##regl_name##_EN, \
+		.vsel_reg = PV88080_REG_##regl_name##_CONF0, \
+		.vsel_mask = PV88080_V##regl_name##_MASK, \
+	},\
+	.current_limits = limits_array, \
+	.n_current_limits = ARRAY_SIZE(limits_array), \
+	.limit_mask = PV88080_##regl_name##_ILIM_MASK, \
+	.conf = PV88080_REG_##regl_name##_CONF1, \
+	.conf2 = PV88080_REG_##regl_name##_CONF2, \
+	.conf5 = PV88080_REG_##regl_name##_CONF5, \
+}
+
+static struct pv88080_regulator pv88080_regulator_info[] = {
+	PV88080_BUCK(PV88080, BUCK1, 600000, 6250, 1393750,
+		pv88080_buck1_limits),
+	PV88080_BUCK(PV88080, BUCK2, 600000, 6250, 1393750,
+		pv88080_buck23_limits),
+	PV88080_BUCK(PV88080, BUCK3, 600000, 6250, 1393750,
+		pv88080_buck23_limits),
+};
+
+static irqreturn_t pv88080_irq_handler(int irq, void *data)
+{
+	struct pv88080 *chip = data;
+	int i, reg_val, err, ret = IRQ_NONE;
+
+	err = regmap_read(chip->regmap, PV88080_REG_EVENT_A, &reg_val);
+	if (err < 0)
+		goto error_i2c;
+
+	if (reg_val & PV88080_E_VDD_FLT) {
+		for (i = 0; i < PV88080_MAX_REGULATORS; i++) {
+			if (chip->rdev[i] != NULL) {
+				regulator_notifier_call_chain(chip->rdev[i],
+					REGULATOR_EVENT_UNDER_VOLTAGE,
+					NULL);
+			}
+		}
+
+		err = regmap_write(chip->regmap, PV88080_REG_EVENT_A,
+			PV88080_E_VDD_FLT);
+		if (err < 0)
+			goto error_i2c;
+
+		ret = IRQ_HANDLED;
+	}
+
+	if (reg_val & PV88080_E_OVER_TEMP) {
+		for (i = 0; i < PV88080_MAX_REGULATORS; i++) {
+			if (chip->rdev[i] != NULL) {
+				regulator_notifier_call_chain(chip->rdev[i],
+					REGULATOR_EVENT_OVER_TEMP,
+					NULL);
+			}
+		}
+
+		err = regmap_write(chip->regmap, PV88080_REG_EVENT_A,
+			PV88080_E_OVER_TEMP);
+		if (err < 0)
+			goto error_i2c;
+
+		ret = IRQ_HANDLED;
+	}
+
+	return ret;
+
+error_i2c:
+	dev_err(chip->dev, "I2C error : %d\n", err);
+	return IRQ_NONE;
+}
+
+/*
+ * I2C driver interface functions
+ */
+static int pv88080_i2c_probe(struct i2c_client *i2c,
+		const struct i2c_device_id *id)
+{
+	struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev);
+	struct pv88080 *chip;
+	struct regulator_config config = { };
+	int i, error, ret;
+	unsigned int conf2, conf5;
+
+	chip = devm_kzalloc(&i2c->dev, sizeof(struct pv88080), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->dev = &i2c->dev;
+	chip->regmap = devm_regmap_init_i2c(i2c, &pv88080_regmap_config);
+	if (IS_ERR(chip->regmap)) {
+		error = PTR_ERR(chip->regmap);
+		dev_err(chip->dev, "Failed to allocate register map: %d\n",
+			error);
+		return error;
+	}
+
+	i2c_set_clientdata(i2c, chip);
+
+	if (i2c->irq != 0) {
+		ret = regmap_write(chip->regmap, PV88080_REG_MASK_A, 0xFF);
+		if (ret < 0) {
+			dev_err(chip->dev,
+				"Failed to mask A reg: %d\n", ret);
+			return ret;
+		}
+		ret = regmap_write(chip->regmap, PV88080_REG_MASK_B, 0xFF);
+		if (ret < 0) {
+			dev_err(chip->dev,
+				"Failed to mask B reg: %d\n", ret);
+			return ret;
+		}
+		ret = regmap_write(chip->regmap, PV88080_REG_MASK_C, 0xFF);
+		if (ret < 0) {
+			dev_err(chip->dev,
+				"Failed to mask C reg: %d\n", ret);
+			return ret;
+		}
+
+		ret = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL,
+					pv88080_irq_handler,
+					IRQF_TRIGGER_LOW|IRQF_ONESHOT,
+					"pv88080", chip);
+		if (ret != 0) {
+			dev_err(chip->dev, "Failed to request IRQ: %d\n",
+				i2c->irq);
+			return ret;
+		}
+
+		ret = regmap_update_bits(chip->regmap, PV88080_REG_MASK_A,
+			PV88080_M_VDD_FLT | PV88080_M_OVER_TEMP, 0);
+		if (ret < 0) {
+			dev_err(chip->dev,
+				"Failed to update mask reg: %d\n", ret);
+			return ret;
+		}
+
+	} else {
+		dev_warn(chip->dev, "No IRQ configured\n");
+	}
+
+	config.dev = chip->dev;
+	config.regmap = chip->regmap;
+
+	for (i = 0; i < PV88080_MAX_REGULATORS; i++) {
+		if (init_data)
+			config.init_data = &init_data[i];
+
+		ret = regmap_read(chip->regmap,
+			pv88080_regulator_info[i].conf2, &conf2);
+		if (ret < 0)
+			return ret;
+
+		conf2 = ((conf2 >> PV88080_BUCK_VDAC_RANGE_SHIFT) &
+			PV88080_BUCK_VDAC_RANGE_MASK);
+
+		ret = regmap_read(chip->regmap,
+			pv88080_regulator_info[i].conf5, &conf5);
+		if (ret < 0)
+			return ret;
+
+		conf5 = ((conf5 >> PV88080_BUCK_VRANGE_GAIN_SHIFT) &
+			PV88080_BUCK_VRANGE_GAIN_MASK);
+
+		pv88080_regulator_info[i].desc.min_uV =
+			pv88080_buck_vol[conf2].min_uV * (conf5+1);
+		pv88080_regulator_info[i].desc.uV_step =
+			pv88080_buck_vol[conf2].uV_step * (conf5+1);
+		pv88080_regulator_info[i].desc.n_voltages =
+			((pv88080_buck_vol[conf2].max_uV * (conf5+1))
+			- (pv88080_regulator_info[i].desc.min_uV))
+			/(pv88080_regulator_info[i].desc.uV_step) + 1;
+
+		config.driver_data = (void *)&pv88080_regulator_info[i];
+		chip->rdev[i] = devm_regulator_register(chip->dev,
+			&pv88080_regulator_info[i].desc, &config);
+		if (IS_ERR(chip->rdev[i])) {
+			dev_err(chip->dev,
+				"Failed to register PV88080 regulator\n");
+			return PTR_ERR(chip->rdev[i]);
+		}
+	}
+
+	return 0;
+}
+
+static const struct i2c_device_id pv88080_i2c_id[] = {
+	{"pv88080", 0},
+	{},
+};
+MODULE_DEVICE_TABLE(i2c, pv88080_i2c_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id pv88080_dt_ids[] = {
+	{ .compatible = "pvs,pv88080", .data = &pv88080_i2c_id[0] },
+	{},
+};
+MODULE_DEVICE_TABLE(of, pv88080_dt_ids);
+#endif
+
+static struct i2c_driver pv88080_regulator_driver = {
+	.driver = {
+		.name = "pv88080",
+		.of_match_table = of_match_ptr(pv88080_dt_ids),
+	},
+	.probe = pv88080_i2c_probe,
+	.id_table = pv88080_i2c_id,
+};
+
+module_i2c_driver(pv88080_regulator_driver);
+
+MODULE_AUTHOR("James Ban <James.Ban.opensource@diasemi.com>");
+MODULE_DESCRIPTION("Regulator device driver for Powerventure PV88080");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/pv88080-regulator.h b/drivers/regulator/pv88080-regulator.h
new file mode 100644
index 0000000..5e9afde
--- /dev/null
+++ b/drivers/regulator/pv88080-regulator.h
@@ -0,0 +1,92 @@
+/*
+ * pv88080-regulator.h - Regulator definitions for PV88080
+ * Copyright (C) 2016 Powerventure Semiconductor Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __PV88080_REGISTERS_H__
+#define __PV88080_REGISTERS_H__
+
+/* System Control and Event Registers */
+#define	PV88080_REG_EVENT_A			0x04
+#define	PV88080_REG_MASK_A			0x09
+#define	PV88080_REG_MASK_B			0x0a
+#define	PV88080_REG_MASK_C			0x0b
+
+/* Regulator Registers */
+#define	PV88080_REG_BUCK1_CONF0			0x27
+#define	PV88080_REG_BUCK1_CONF1			0x28
+#define	PV88080_REG_BUCK1_CONF2			0x59
+#define	PV88080_REG_BUCK1_CONF5			0x5c
+#define	PV88080_REG_BUCK2_CONF0			0x29
+#define	PV88080_REG_BUCK2_CONF1			0x2a
+#define	PV88080_REG_BUCK2_CONF2			0x61
+#define	PV88080_REG_BUCK2_CONF5			0x64
+#define	PV88080_REG_BUCK3_CONF0			0x2b
+#define	PV88080_REG_BUCK3_CONF1			0x2c
+#define	PV88080_REG_BUCK3_CONF2			0x69
+#define	PV88080_REG_BUCK3_CONF5			0x6c
+
+/* PV88080_REG_EVENT_A (addr=0x04) */
+#define	PV88080_E_VDD_FLT				0x01
+#define	PV88080_E_OVER_TEMP			0x02
+
+/* PV88080_REG_MASK_A (addr=0x09) */
+#define	PV88080_M_VDD_FLT				0x01
+#define	PV88080_M_OVER_TEMP			0x02
+
+/* PV88080_REG_BUCK1_CONF0 (addr=0x27) */
+#define	PV88080_BUCK1_EN				0x80
+#define PV88080_VBUCK1_MASK			0x7F
+/* PV88080_REG_BUCK2_CONF0 (addr=0x29) */
+#define	PV88080_BUCK2_EN				0x80
+#define PV88080_VBUCK2_MASK			0x7F
+/* PV88080_REG_BUCK3_CONF0 (addr=0x2b) */
+#define	PV88080_BUCK3_EN				0x80
+#define PV88080_VBUCK3_MASK			0x7F
+
+/* PV88080_REG_BUCK1_CONF1 (addr=0x28) */
+#define PV88080_BUCK1_ILIM_SHIFT			2
+#define PV88080_BUCK1_ILIM_MASK			0x0C
+#define PV88080_BUCK1_MODE_MASK			0x03
+
+/* PV88080_REG_BUCK2_CONF1 (addr=0x2a) */
+#define PV88080_BUCK2_ILIM_SHIFT			2
+#define PV88080_BUCK2_ILIM_MASK			0x0C
+#define PV88080_BUCK2_MODE_MASK			0x03
+
+/* PV88080_REG_BUCK3_CONF1 (addr=0x2c) */
+#define PV88080_BUCK3_ILIM_SHIFT			2
+#define PV88080_BUCK3_ILIM_MASK			0x0C
+#define PV88080_BUCK3_MODE_MASK			0x03
+
+#define	PV88080_BUCK_MODE_SLEEP			0x00
+#define	PV88080_BUCK_MODE_AUTO			0x01
+#define	PV88080_BUCK_MODE_SYNC			0x02
+
+/* PV88080_REG_BUCK2_CONF2 (addr=0x61) */
+/* PV88080_REG_BUCK3_CONF2 (addr=0x69) */
+#define PV88080_BUCK_VDAC_RANGE_SHIFT			7
+#define PV88080_BUCK_VDAC_RANGE_MASK			0x01
+
+#define PV88080_BUCK_VDAC_RANGE_1			0x00
+#define PV88080_BUCK_VDAC_RANGE_2			0x01
+
+/* PV88080_REG_BUCK2_CONF5 (addr=0x64) */
+/* PV88080_REG_BUCK3_CONF5 (addr=0x6c) */
+#define PV88080_BUCK_VRANGE_GAIN_SHIFT			0
+#define PV88080_BUCK_VRANGE_GAIN_MASK			0x01
+
+#define PV88080_BUCK_VRANGE_GAIN_1			0x00
+#define PV88080_BUCK_VRANGE_GAIN_2			0x01
+
+#endif	/* __PV88080_REGISTERS_H__ */
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index 4689d62..fafa348 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -59,18 +59,18 @@
 					 unsigned selector)
 {
 	struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
-	unsigned int pwm_reg_period;
+	struct pwm_args pargs;
 	int dutycycle;
 	int ret;
 
-	pwm_reg_period = pwm_get_period(drvdata->pwm);
+	pwm_get_args(drvdata->pwm, &pargs);
 
-	dutycycle = (pwm_reg_period *
+	dutycycle = (pargs.period *
 		    drvdata->duty_cycle_table[selector].dutycycle) / 100;
 
-	ret = pwm_config(drvdata->pwm, dutycycle, pwm_reg_period);
+	ret = pwm_config(drvdata->pwm, dutycycle, pargs.period);
 	if (ret) {
-		dev_err(&rdev->dev, "Failed to configure PWM\n");
+		dev_err(&rdev->dev, "Failed to configure PWM: %d\n", ret);
 		return ret;
 	}
 
@@ -113,18 +113,6 @@
 	return pwm_is_enabled(drvdata->pwm);
 }
 
-/**
- * Continuous voltage call-backs
- */
-static int pwm_voltage_to_duty_cycle_percentage(struct regulator_dev *rdev, int req_uV)
-{
-	int min_uV = rdev->constraints->min_uV;
-	int max_uV = rdev->constraints->max_uV;
-	int diff = max_uV - min_uV;
-
-	return ((req_uV * 100) - (min_uV * 100)) / diff;
-}
-
 static int pwm_regulator_get_voltage(struct regulator_dev *rdev)
 {
 	struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
@@ -138,21 +126,42 @@
 {
 	struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
 	unsigned int ramp_delay = rdev->constraints->ramp_delay;
-	unsigned int period = pwm_get_period(drvdata->pwm);
-	int duty_cycle;
+	struct pwm_args pargs;
+	unsigned int req_diff = min_uV - rdev->constraints->min_uV;
+	unsigned int diff;
+	unsigned int duty_pulse;
+	u64 req_period;
+	u32 rem;
 	int ret;
 
-	duty_cycle = pwm_voltage_to_duty_cycle_percentage(rdev, min_uV);
+	pwm_get_args(drvdata->pwm, &pargs);
+	diff = rdev->constraints->max_uV - rdev->constraints->min_uV;
 
-	ret = pwm_config(drvdata->pwm, (period / 100) * duty_cycle, period);
+	/* First try to find out if we get the iduty cycle time which is
+	 * factor of PWM period time. If (request_diff_to_min * pwm_period)
+	 * is perfect divided by voltage_range_diff then it is possible to
+	 * get duty cycle time which is factor of PWM period. This will help
+	 * to get output voltage nearer to requested value as there is no
+	 * calculation loss.
+	 */
+	req_period = req_diff * pargs.period;
+	div_u64_rem(req_period, diff, &rem);
+	if (!rem) {
+		do_div(req_period, diff);
+		duty_pulse = (unsigned int)req_period;
+	} else {
+		duty_pulse = (pargs.period / 100) * ((req_diff * 100) / diff);
+	}
+
+	ret = pwm_config(drvdata->pwm, duty_pulse, pargs.period);
 	if (ret) {
-		dev_err(&rdev->dev, "Failed to configure PWM\n");
+		dev_err(&rdev->dev, "Failed to configure PWM: %d\n", ret);
 		return ret;
 	}
 
 	ret = pwm_enable(drvdata->pwm);
 	if (ret) {
-		dev_err(&rdev->dev, "Failed to enable PWM\n");
+		dev_err(&rdev->dev, "Failed to enable PWM: %d\n", ret);
 		return ret;
 	}
 	drvdata->volt_uV = min_uV;
@@ -200,8 +209,7 @@
 
 	if ((length < sizeof(*duty_cycle_table)) ||
 	    (length % sizeof(*duty_cycle_table))) {
-		dev_err(&pdev->dev,
-			"voltage-table length(%d) is invalid\n",
+		dev_err(&pdev->dev, "voltage-table length(%d) is invalid\n",
 			length);
 		return -EINVAL;
 	}
@@ -214,7 +222,7 @@
 					 (u32 *)duty_cycle_table,
 					 length / sizeof(u32));
 	if (ret) {
-		dev_err(&pdev->dev, "Failed to read voltage-table\n");
+		dev_err(&pdev->dev, "Failed to read voltage-table: %d\n", ret);
 		return ret;
 	}
 
@@ -277,16 +285,24 @@
 
 	drvdata->pwm = devm_pwm_get(&pdev->dev, NULL);
 	if (IS_ERR(drvdata->pwm)) {
-		dev_err(&pdev->dev, "Failed to get PWM\n");
-		return PTR_ERR(drvdata->pwm);
+		ret = PTR_ERR(drvdata->pwm);
+		dev_err(&pdev->dev, "Failed to get PWM: %d\n", ret);
+		return ret;
 	}
 
+	/*
+	 * FIXME: pwm_apply_args() should be removed when switching to the
+	 * atomic PWM API.
+	 */
+	pwm_apply_args(drvdata->pwm);
+
 	regulator = devm_regulator_register(&pdev->dev,
 					    &drvdata->desc, &config);
 	if (IS_ERR(regulator)) {
-		dev_err(&pdev->dev, "Failed to register regulator %s\n",
-			drvdata->desc.name);
-		return PTR_ERR(regulator);
+		ret = PTR_ERR(regulator);
+		dev_err(&pdev->dev, "Failed to register regulator %s: %d\n",
+			drvdata->desc.name, ret);
+		return ret;
 	}
 
 	return 0;
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 88a5dc8..84cce21 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -246,6 +246,7 @@
 
 /* Minimum voltage stepper delay for each step. */
 #define SPMI_FTSMPS_STEP_DELAY		8
+#define SPMI_DEFAULT_STEP_DELAY		20
 
 /*
  * The ratio SPMI_FTSMPS_STEP_MARGIN_NUM/SPMI_FTSMPS_STEP_MARGIN_DEN is used to
@@ -254,13 +255,6 @@
 #define SPMI_FTSMPS_STEP_MARGIN_NUM	4
 #define SPMI_FTSMPS_STEP_MARGIN_DEN	5
 
-/*
- * This voltage in uV is returned by get_voltage functions when there is no way
- * to determine the current voltage level.  It is needed because the regulator
- * framework treats a 0 uV voltage as an error.
- */
-#define VOLTAGE_UNKNOWN 1
-
 /* VSET value to decide the range of ULT SMPS */
 #define ULT_SMPS_RANGE_SPLIT 0x60
 
@@ -539,12 +533,12 @@
 }
 
 static int spmi_regulator_select_voltage(struct spmi_regulator *vreg,
-		int min_uV, int max_uV, u8 *range_sel, u8 *voltage_sel,
-		unsigned *selector)
+					 int min_uV, int max_uV)
 {
 	const struct spmi_voltage_range *range;
 	int uV = min_uV;
 	int lim_min_uV, lim_max_uV, i, range_id, range_max_uV;
+	int selector, voltage_sel;
 
 	/* Check if request voltage is outside of physically settable range. */
 	lim_min_uV = vreg->set_points->range[0].set_point_min_uV;
@@ -570,14 +564,13 @@
 
 	range_id = i;
 	range = &vreg->set_points->range[range_id];
-	*range_sel = range->range_sel;
 
 	/*
 	 * Force uV to be an allowed set point by applying a ceiling function to
 	 * the uV value.
 	 */
-	*voltage_sel = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
-	uV = *voltage_sel * range->step_uV + range->min_uV;
+	voltage_sel = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
+	uV = voltage_sel * range->step_uV + range->min_uV;
 
 	if (uV > max_uV) {
 		dev_err(vreg->dev,
@@ -587,12 +580,48 @@
 		return -EINVAL;
 	}
 
-	*selector = 0;
+	selector = 0;
 	for (i = 0; i < range_id; i++)
-		*selector += vreg->set_points->range[i].n_voltages;
-	*selector += (uV - range->set_point_min_uV) / range->step_uV;
+		selector += vreg->set_points->range[i].n_voltages;
+	selector += (uV - range->set_point_min_uV) / range->step_uV;
 
-	return 0;
+	return selector;
+}
+
+static int spmi_sw_selector_to_hw(struct spmi_regulator *vreg,
+				  unsigned selector, u8 *range_sel,
+				  u8 *voltage_sel)
+{
+	const struct spmi_voltage_range *range, *end;
+
+	range = vreg->set_points->range;
+	end = range + vreg->set_points->count;
+
+	for (; range < end; range++) {
+		if (selector < range->n_voltages) {
+			*voltage_sel = selector;
+			*range_sel = range->range_sel;
+			return 0;
+		}
+
+		selector -= range->n_voltages;
+	}
+
+	return -EINVAL;
+}
+
+static int spmi_hw_selector_to_sw(struct spmi_regulator *vreg, u8 hw_sel,
+				  const struct spmi_voltage_range *range)
+{
+	int sw_sel = hw_sel;
+	const struct spmi_voltage_range *r = vreg->set_points->range;
+
+	while (r != range) {
+		sw_sel += r->n_voltages;
+		r++;
+	}
+
+	return sw_sel;
 }
 
 static const struct spmi_voltage_range *
@@ -614,12 +643,11 @@
 }
 
 static int spmi_regulator_select_voltage_same_range(struct spmi_regulator *vreg,
-		int min_uV, int max_uV, u8 *range_sel, u8 *voltage_sel,
-		unsigned *selector)
+		int min_uV, int max_uV)
 {
 	const struct spmi_voltage_range *range;
 	int uV = min_uV;
-	int i;
+	int i, selector;
 
 	range = spmi_regulator_find_range(vreg);
 	if (!range)
@@ -637,8 +665,8 @@
 	 * Force uV to be an allowed set point by applying a ceiling function to
 	 * the uV value.
 	 */
-	*voltage_sel = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
-	uV = *voltage_sel * range->step_uV + range->min_uV;
+	uV = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
+	uV = uV * range->step_uV + range->min_uV;
 
 	if (uV > max_uV) {
 		/*
@@ -648,43 +676,49 @@
 		goto different_range;
 	}
 
-	*selector = 0;
+	selector = 0;
 	for (i = 0; i < vreg->set_points->count; i++) {
 		if (uV >= vreg->set_points->range[i].set_point_min_uV
 		    && uV <= vreg->set_points->range[i].set_point_max_uV) {
-			*selector +=
+			selector +=
 			    (uV - vreg->set_points->range[i].set_point_min_uV)
 				/ vreg->set_points->range[i].step_uV;
 			break;
 		}
 
-		*selector += vreg->set_points->range[i].n_voltages;
+		selector += vreg->set_points->range[i].n_voltages;
 	}
 
-	if (*selector >= vreg->set_points->n_voltages)
+	if (selector >= vreg->set_points->n_voltages)
 		goto different_range;
 
-	return 0;
+	return selector;
 
 different_range:
-	return spmi_regulator_select_voltage(vreg, min_uV, max_uV,
-			range_sel, voltage_sel, selector);
+	return spmi_regulator_select_voltage(vreg, min_uV, max_uV);
 }
 
-static int spmi_regulator_common_set_voltage(struct regulator_dev *rdev,
-		int min_uV, int max_uV, unsigned *selector)
+static int spmi_regulator_common_map_voltage(struct regulator_dev *rdev,
+					     int min_uV, int max_uV)
+{
+	struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
+
+	/*
+	 * Favor staying in the current voltage range if possible.  This avoids
+	 * voltage spikes that occur when changing the voltage range.
+	 */
+	return spmi_regulator_select_voltage_same_range(vreg, min_uV, max_uV);
+}
+
+static int
+spmi_regulator_common_set_voltage(struct regulator_dev *rdev, unsigned selector)
 {
 	struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
 	int ret;
 	u8 buf[2];
 	u8 range_sel, voltage_sel;
 
-	/*
-	 * Favor staying in the current voltage range if possible.  This avoids
-	 * voltage spikes that occur when changing the voltage range.
-	 */
-	ret = spmi_regulator_select_voltage_same_range(vreg, min_uV, max_uV,
-		&range_sel, &voltage_sel, selector);
+	ret = spmi_sw_selector_to_hw(vreg, selector, &range_sel, &voltage_sel);
 	if (ret)
 		return ret;
 
@@ -719,24 +753,24 @@
 
 	range = spmi_regulator_find_range(vreg);
 	if (!range)
-		return VOLTAGE_UNKNOWN;
+		return -EINVAL;
 
-	return range->step_uV * voltage_sel + range->min_uV;
+	return spmi_hw_selector_to_sw(vreg, voltage_sel, range);
+}
+
+static int spmi_regulator_single_map_voltage(struct regulator_dev *rdev,
+		int min_uV, int max_uV)
+{
+	struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
+
+	return spmi_regulator_select_voltage(vreg, min_uV, max_uV);
 }
 
 static int spmi_regulator_single_range_set_voltage(struct regulator_dev *rdev,
-		int min_uV, int max_uV, unsigned *selector)
+						   unsigned selector)
 {
 	struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
-	int ret;
-	u8 range_sel, sel;
-
-	ret = spmi_regulator_select_voltage(vreg, min_uV, max_uV, &range_sel,
-		&sel, selector);
-	if (ret) {
-		dev_err(vreg->dev, "could not set voltage, ret=%d\n", ret);
-		return ret;
-	}
+	u8 sel = selector;
 
 	/*
 	 * Certain types of regulators do not have a range select register so
@@ -748,27 +782,24 @@
 static int spmi_regulator_single_range_get_voltage(struct regulator_dev *rdev)
 {
 	struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
-	const struct spmi_voltage_range *range = vreg->set_points->range;
-	u8 voltage_sel;
+	u8 selector;
+	int ret;
 
-	spmi_vreg_read(vreg, SPMI_COMMON_REG_VOLTAGE_SET, &voltage_sel, 1);
+	ret = spmi_vreg_read(vreg, SPMI_COMMON_REG_VOLTAGE_SET, &selector, 1);
+	if (ret)
+		return ret;
 
-	return range->step_uV * voltage_sel + range->min_uV;
+	return selector;
 }
 
 static int spmi_regulator_ult_lo_smps_set_voltage(struct regulator_dev *rdev,
-		int min_uV, int max_uV, unsigned *selector)
+						  unsigned selector)
 {
 	struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
 	int ret;
 	u8 range_sel, voltage_sel;
 
-	/*
-	 * Favor staying in the current voltage range if possible. This avoids
-	 * voltage spikes that occur when changing the voltage range.
-	 */
-	ret = spmi_regulator_select_voltage_same_range(vreg, min_uV, max_uV,
-		&range_sel, &voltage_sel, selector);
+	ret = spmi_sw_selector_to_hw(vreg, selector, &range_sel, &voltage_sel);
 	if (ret)
 		return ret;
 
@@ -783,7 +814,7 @@
 		voltage_sel |= ULT_SMPS_RANGE_SPLIT;
 
 	return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_VOLTAGE_SET,
-	       voltage_sel, 0xff);
+				     voltage_sel, 0xff);
 }
 
 static int spmi_regulator_ult_lo_smps_get_voltage(struct regulator_dev *rdev)
@@ -796,12 +827,12 @@
 
 	range = spmi_regulator_find_range(vreg);
 	if (!range)
-		return VOLTAGE_UNKNOWN;
+		return -EINVAL;
 
 	if (range->range_sel == 1)
 		voltage_sel &= ~ULT_SMPS_RANGE_SPLIT;
 
-	return range->step_uV * voltage_sel + range->min_uV;
+	return spmi_hw_selector_to_sw(vreg, voltage_sel, range);
 }
 
 static int spmi_regulator_common_list_voltage(struct regulator_dev *rdev,
@@ -1007,8 +1038,10 @@
 	.enable			= spmi_regulator_common_enable,
 	.disable		= spmi_regulator_common_disable,
 	.is_enabled		= spmi_regulator_common_is_enabled,
-	.set_voltage		= spmi_regulator_common_set_voltage,
-	.get_voltage		= spmi_regulator_common_get_voltage,
+	.set_voltage_sel	= spmi_regulator_common_set_voltage,
+	.set_voltage_time_sel	= spmi_regulator_set_voltage_time_sel,
+	.get_voltage_sel	= spmi_regulator_common_get_voltage,
+	.map_voltage		= spmi_regulator_common_map_voltage,
 	.list_voltage		= spmi_regulator_common_list_voltage,
 	.set_mode		= spmi_regulator_common_set_mode,
 	.get_mode		= spmi_regulator_common_get_mode,
@@ -1020,8 +1053,9 @@
 	.enable			= spmi_regulator_common_enable,
 	.disable		= spmi_regulator_common_disable,
 	.is_enabled		= spmi_regulator_common_is_enabled,
-	.set_voltage		= spmi_regulator_common_set_voltage,
-	.get_voltage		= spmi_regulator_common_get_voltage,
+	.set_voltage_sel	= spmi_regulator_common_set_voltage,
+	.get_voltage_sel	= spmi_regulator_common_get_voltage,
+	.map_voltage		= spmi_regulator_common_map_voltage,
 	.list_voltage		= spmi_regulator_common_list_voltage,
 	.set_mode		= spmi_regulator_common_set_mode,
 	.get_mode		= spmi_regulator_common_get_mode,
@@ -1036,8 +1070,9 @@
 	.enable			= spmi_regulator_common_enable,
 	.disable		= spmi_regulator_common_disable,
 	.is_enabled		= spmi_regulator_common_is_enabled,
-	.set_voltage		= spmi_regulator_common_set_voltage,
-	.get_voltage		= spmi_regulator_common_get_voltage,
+	.set_voltage_sel	= spmi_regulator_common_set_voltage,
+	.get_voltage_sel	= spmi_regulator_common_get_voltage,
+	.map_voltage		= spmi_regulator_common_map_voltage,
 	.list_voltage		= spmi_regulator_common_list_voltage,
 	.set_bypass		= spmi_regulator_common_set_bypass,
 	.get_bypass		= spmi_regulator_common_get_bypass,
@@ -1056,8 +1091,9 @@
 	.enable			= spmi_regulator_common_enable,
 	.disable		= spmi_regulator_common_disable,
 	.is_enabled		= spmi_regulator_common_is_enabled,
-	.set_voltage		= spmi_regulator_single_range_set_voltage,
-	.get_voltage		= spmi_regulator_single_range_get_voltage,
+	.set_voltage_sel	= spmi_regulator_single_range_set_voltage,
+	.get_voltage_sel	= spmi_regulator_single_range_get_voltage,
+	.map_voltage		= spmi_regulator_single_map_voltage,
 	.list_voltage		= spmi_regulator_common_list_voltage,
 	.set_input_current_limit = spmi_regulator_set_ilim,
 };
@@ -1066,9 +1102,10 @@
 	.enable			= spmi_regulator_common_enable,
 	.disable		= spmi_regulator_common_disable,
 	.is_enabled		= spmi_regulator_common_is_enabled,
-	.set_voltage		= spmi_regulator_common_set_voltage,
+	.set_voltage_sel	= spmi_regulator_common_set_voltage,
 	.set_voltage_time_sel	= spmi_regulator_set_voltage_time_sel,
-	.get_voltage		= spmi_regulator_common_get_voltage,
+	.get_voltage_sel	= spmi_regulator_common_get_voltage,
+	.map_voltage		= spmi_regulator_common_map_voltage,
 	.list_voltage		= spmi_regulator_common_list_voltage,
 	.set_mode		= spmi_regulator_common_set_mode,
 	.get_mode		= spmi_regulator_common_get_mode,
@@ -1080,8 +1117,9 @@
 	.enable			= spmi_regulator_common_enable,
 	.disable		= spmi_regulator_common_disable,
 	.is_enabled		= spmi_regulator_common_is_enabled,
-	.set_voltage		= spmi_regulator_ult_lo_smps_set_voltage,
-	.get_voltage		= spmi_regulator_ult_lo_smps_get_voltage,
+	.set_voltage_sel	= spmi_regulator_ult_lo_smps_set_voltage,
+	.set_voltage_time_sel	= spmi_regulator_set_voltage_time_sel,
+	.get_voltage_sel	= spmi_regulator_ult_lo_smps_get_voltage,
 	.list_voltage		= spmi_regulator_common_list_voltage,
 	.set_mode		= spmi_regulator_common_set_mode,
 	.get_mode		= spmi_regulator_common_get_mode,
@@ -1093,8 +1131,10 @@
 	.enable			= spmi_regulator_common_enable,
 	.disable		= spmi_regulator_common_disable,
 	.is_enabled		= spmi_regulator_common_is_enabled,
-	.set_voltage		= spmi_regulator_single_range_set_voltage,
-	.get_voltage		= spmi_regulator_single_range_get_voltage,
+	.set_voltage_sel	= spmi_regulator_single_range_set_voltage,
+	.set_voltage_time_sel	= spmi_regulator_set_voltage_time_sel,
+	.get_voltage_sel	= spmi_regulator_single_range_get_voltage,
+	.map_voltage		= spmi_regulator_single_map_voltage,
 	.list_voltage		= spmi_regulator_common_list_voltage,
 	.set_mode		= spmi_regulator_common_set_mode,
 	.get_mode		= spmi_regulator_common_get_mode,
@@ -1106,8 +1146,9 @@
 	.enable			= spmi_regulator_common_enable,
 	.disable		= spmi_regulator_common_disable,
 	.is_enabled		= spmi_regulator_common_is_enabled,
-	.set_voltage		= spmi_regulator_single_range_set_voltage,
-	.get_voltage		= spmi_regulator_single_range_get_voltage,
+	.set_voltage_sel	= spmi_regulator_single_range_set_voltage,
+	.get_voltage_sel	= spmi_regulator_single_range_get_voltage,
+	.map_voltage		= spmi_regulator_single_map_voltage,
 	.list_voltage		= spmi_regulator_common_list_voltage,
 	.set_mode		= spmi_regulator_common_set_mode,
 	.get_mode		= spmi_regulator_common_get_mode,
@@ -1201,7 +1242,7 @@
 	ret = spmi_vreg_read(vreg, SPMI_COMMON_REG_DIG_MAJOR_REV, version,
 		ARRAY_SIZE(version));
 	if (ret) {
-		dev_err(vreg->dev, "could not read version registers\n");
+		dev_dbg(vreg->dev, "could not read version registers\n");
 		return ret;
 	}
 	dig_major_rev	= version[SPMI_COMMON_REG_DIG_MAJOR_REV
@@ -1245,11 +1286,11 @@
 	return 0;
 }
 
-static int spmi_regulator_ftsmps_init_slew_rate(struct spmi_regulator *vreg)
+static int spmi_regulator_init_slew_rate(struct spmi_regulator *vreg)
 {
 	int ret;
 	u8 reg = 0;
-	int step, delay, slew_rate;
+	int step, delay, slew_rate, step_delay;
 	const struct spmi_voltage_range *range;
 
 	ret = spmi_vreg_read(vreg, SPMI_COMMON_REG_STEP_CTRL, &reg, 1);
@@ -1262,6 +1303,15 @@
 	if (!range)
 		return -EINVAL;
 
+	switch (vreg->logical_type) {
+	case SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS:
+		step_delay = SPMI_FTSMPS_STEP_DELAY;
+		break;
+	default:
+		step_delay = SPMI_DEFAULT_STEP_DELAY;
+		break;
+	}
+
 	step = reg & SPMI_FTSMPS_STEP_CTRL_STEP_MASK;
 	step >>= SPMI_FTSMPS_STEP_CTRL_STEP_SHIFT;
 
@@ -1270,7 +1320,7 @@
 
 	/* slew_rate has units of uV/us */
 	slew_rate = SPMI_FTSMPS_CLOCK_RATE * range->step_uV * (1 << step);
-	slew_rate /= 1000 * (SPMI_FTSMPS_STEP_DELAY << delay);
+	slew_rate /= 1000 * (step_delay << delay);
 	slew_rate *= SPMI_FTSMPS_STEP_MARGIN_NUM;
 	slew_rate /= SPMI_FTSMPS_STEP_MARGIN_DEN;
 
@@ -1411,10 +1461,16 @@
 		return ret;
 	}
 
-	if (vreg->logical_type == SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS) {
-		ret = spmi_regulator_ftsmps_init_slew_rate(vreg);
+	switch (vreg->logical_type) {
+	case SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS:
+	case SPMI_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS:
+	case SPMI_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS:
+	case SPMI_REGULATOR_LOGICAL_TYPE_SMPS:
+		ret = spmi_regulator_init_slew_rate(vreg);
 		if (ret)
 			return ret;
+	default:
+		break;
 	}
 
 	if (vreg->logical_type != SPMI_REGULATOR_LOGICAL_TYPE_VS)
@@ -1510,10 +1566,61 @@
 	{ }
 };
 
+static const struct spmi_regulator_data pm8994_regulators[] = {
+	{ "s1", 0x1400, "vdd_s1", },
+	{ "s2", 0x1700, "vdd_s2", },
+	{ "s3", 0x1a00, "vdd_s3", },
+	{ "s4", 0x1d00, "vdd_s4", },
+	{ "s5", 0x2000, "vdd_s5", },
+	{ "s6", 0x2300, "vdd_s6", },
+	{ "s7", 0x2600, "vdd_s7", },
+	{ "s8", 0x2900, "vdd_s8", },
+	{ "s9", 0x2c00, "vdd_s9", },
+	{ "s10", 0x2f00, "vdd_s10", },
+	{ "s11", 0x3200, "vdd_s11", },
+	{ "s12", 0x3500, "vdd_s12", },
+	{ "l1", 0x4000, "vdd_l1", },
+	{ "l2", 0x4100, "vdd_l2_l26_l28", },
+	{ "l3", 0x4200, "vdd_l3_l11", },
+	{ "l4", 0x4300, "vdd_l4_l27_l31", },
+	{ "l5", 0x4400, "vdd_l5_l7", },
+	{ "l6", 0x4500, "vdd_l6_l12_l32", },
+	{ "l7", 0x4600, "vdd_l5_l7", },
+	{ "l8", 0x4700, "vdd_l8_l16_l30", },
+	{ "l9", 0x4800, "vdd_l9_l10_l18_l22", },
+	{ "l10", 0x4900, "vdd_l9_l10_l18_l22", },
+	{ "l11", 0x4a00, "vdd_l3_l11", },
+	{ "l12", 0x4b00, "vdd_l6_l12_l32", },
+	{ "l13", 0x4c00, "vdd_l13_l19_l23_l24", },
+	{ "l14", 0x4d00, "vdd_l14_l15", },
+	{ "l15", 0x4e00, "vdd_l14_l15", },
+	{ "l16", 0x4f00, "vdd_l8_l16_l30", },
+	{ "l17", 0x5000, "vdd_l17_l29", },
+	{ "l18", 0x5100, "vdd_l9_l10_l18_l22", },
+	{ "l19", 0x5200, "vdd_l13_l19_l23_l24", },
+	{ "l20", 0x5300, "vdd_l20_l21", },
+	{ "l21", 0x5400, "vdd_l20_l21", },
+	{ "l22", 0x5500, "vdd_l9_l10_l18_l22", },
+	{ "l23", 0x5600, "vdd_l13_l19_l23_l24", },
+	{ "l24", 0x5700, "vdd_l13_l19_l23_l24", },
+	{ "l25", 0x5800, "vdd_l25", },
+	{ "l26", 0x5900, "vdd_l2_l26_l28", },
+	{ "l27", 0x5a00, "vdd_l4_l27_l31", },
+	{ "l28", 0x5b00, "vdd_l2_l26_l28", },
+	{ "l29", 0x5c00, "vdd_l17_l29", },
+	{ "l30", 0x5d00, "vdd_l8_l16_l30", },
+	{ "l31", 0x5e00, "vdd_l4_l27_l31", },
+	{ "l32", 0x5f00, "vdd_l6_l12_l32", },
+	{ "lvs1", 0x8000, "vdd_lvs_1_2", },
+	{ "lvs2", 0x8100, "vdd_lvs_1_2", },
+	{ }
+};
+
 static const struct of_device_id qcom_spmi_regulator_match[] = {
 	{ .compatible = "qcom,pm8841-regulators", .data = &pm8841_regulators },
 	{ .compatible = "qcom,pm8916-regulators", .data = &pm8916_regulators },
 	{ .compatible = "qcom,pm8941-regulators", .data = &pm8941_regulators },
+	{ .compatible = "qcom,pm8994-regulators", .data = &pm8994_regulators },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, qcom_spmi_regulator_match);
@@ -1573,7 +1680,7 @@
 
 		ret = spmi_regulator_match(vreg, reg->force_type);
 		if (ret)
-			goto err;
+			continue;
 
 		config.dev = dev;
 		config.driver_data = vreg;
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index d86a3dc..40d07ba 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -55,6 +55,42 @@
 /* max steps for increase voltage of Buck1/2, equal 100mv*/
 #define MAX_STEPS_ONE_TIME 8
 
+#define RK8XX_DESC(_id, _match, _supply, _min, _max, _step, _vreg,	\
+	_vmask, _ereg, _emask, _etime)					\
+	[_id] = {							\
+		.name		= (_match),				\
+		.supply_name	= (_supply),				\
+		.of_match	= of_match_ptr(_match),			\
+		.regulators_node = of_match_ptr("regulators"),		\
+		.type		= REGULATOR_VOLTAGE,			\
+		.id		= (_id),				\
+		.n_voltages	= (((_max) - (_min)) / (_step) + 1),	\
+		.owner		= THIS_MODULE,				\
+		.min_uV		= (_min) * 1000,			\
+		.uV_step	= (_step) * 1000,			\
+		.vsel_reg	= (_vreg),				\
+		.vsel_mask	= (_vmask),				\
+		.enable_reg	= (_ereg),				\
+		.enable_mask	= (_emask),				\
+		.enable_time	= (_etime),				\
+		.ops		= &rk808_reg_ops,			\
+	}
+
+#define RK8XX_DESC_SWITCH(_id, _match, _supply, _ereg, _emask)		\
+	[_id] = {							\
+		.name		= (_match),				\
+		.supply_name	= (_supply),				\
+		.of_match	= of_match_ptr(_match),			\
+		.regulators_node = of_match_ptr("regulators"),		\
+		.type		= REGULATOR_VOLTAGE,			\
+		.id		= (_id),				\
+		.enable_reg	= (_ereg),				\
+		.enable_mask	= (_emask),				\
+		.owner		= THIS_MODULE,				\
+		.ops		= &rk808_switch_ops			\
+	}
+
+
 struct rk808_regulator_data {
 	struct gpio_desc *dvs_gpio[2];
 };
@@ -66,27 +102,11 @@
 	RK808_BUCK4_CONFIG_REG,
 };
 
-static const struct regulator_linear_range rk808_buck_voltage_ranges[] = {
-	REGULATOR_LINEAR_RANGE(712500, 0, 63, 12500),
-};
-
-static const struct regulator_linear_range rk808_buck4_voltage_ranges[] = {
-	REGULATOR_LINEAR_RANGE(1800000, 0, 15, 100000),
-};
-
-static const struct regulator_linear_range rk808_ldo_voltage_ranges[] = {
-	REGULATOR_LINEAR_RANGE(1800000, 0, 16, 100000),
-};
-
 static const struct regulator_linear_range rk808_ldo3_voltage_ranges[] = {
 	REGULATOR_LINEAR_RANGE(800000, 0, 13, 100000),
 	REGULATOR_LINEAR_RANGE(2500000, 15, 15, 0),
 };
 
-static const struct regulator_linear_range rk808_ldo6_voltage_ranges[] = {
-	REGULATOR_LINEAR_RANGE(800000, 0, 17, 100000),
-};
-
 static int rk808_buck1_2_get_voltage_sel_regmap(struct regulator_dev *rdev)
 {
 	struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
@@ -242,6 +262,21 @@
 static int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv)
 {
 	unsigned int reg;
+	int sel = regulator_map_voltage_linear(rdev, uv, uv);
+
+	if (sel < 0)
+		return -EINVAL;
+
+	reg = rdev->desc->vsel_reg + RK808_SLP_REG_OFFSET;
+
+	return regmap_update_bits(rdev->regmap, reg,
+				  rdev->desc->vsel_mask,
+				  sel);
+}
+
+static int rk808_set_suspend_voltage_range(struct regulator_dev *rdev, int uv)
+{
+	unsigned int reg;
 	int sel = regulator_map_voltage_linear_range(rdev, uv, uv);
 
 	if (sel < 0)
@@ -277,8 +312,8 @@
 }
 
 static struct regulator_ops rk808_buck1_2_ops = {
-	.list_voltage		= regulator_list_voltage_linear_range,
-	.map_voltage		= regulator_map_voltage_linear_range,
+	.list_voltage		= regulator_list_voltage_linear,
+	.map_voltage		= regulator_map_voltage_linear,
 	.get_voltage_sel	= rk808_buck1_2_get_voltage_sel_regmap,
 	.set_voltage_sel	= rk808_buck1_2_set_voltage_sel,
 	.set_voltage_time_sel	= rk808_buck1_2_set_voltage_time_sel,
@@ -292,6 +327,19 @@
 };
 
 static struct regulator_ops rk808_reg_ops = {
+	.list_voltage		= regulator_list_voltage_linear,
+	.map_voltage		= regulator_map_voltage_linear,
+	.get_voltage_sel	= regulator_get_voltage_sel_regmap,
+	.set_voltage_sel	= regulator_set_voltage_sel_regmap,
+	.enable			= regulator_enable_regmap,
+	.disable		= regulator_disable_regmap,
+	.is_enabled		= regulator_is_enabled_regmap,
+	.set_suspend_voltage	= rk808_set_suspend_voltage,
+	.set_suspend_enable	= rk808_set_suspend_enable,
+	.set_suspend_disable	= rk808_set_suspend_disable,
+};
+
+static struct regulator_ops rk808_reg_ops_ranges = {
 	.list_voltage		= regulator_list_voltage_linear_range,
 	.map_voltage		= regulator_map_voltage_linear_range,
 	.get_voltage_sel	= regulator_get_voltage_sel_regmap,
@@ -299,7 +347,7 @@
 	.enable			= regulator_enable_regmap,
 	.disable		= regulator_disable_regmap,
 	.is_enabled		= regulator_is_enabled_regmap,
-	.set_suspend_voltage	= rk808_set_suspend_voltage,
+	.set_suspend_voltage	= rk808_set_suspend_voltage_range,
 	.set_suspend_enable	= rk808_set_suspend_enable,
 	.set_suspend_disable	= rk808_set_suspend_disable,
 };
@@ -316,12 +364,14 @@
 	{
 		.name = "DCDC_REG1",
 		.supply_name = "vcc1",
+		.of_match = of_match_ptr("DCDC_REG1"),
+		.regulators_node = of_match_ptr("regulators"),
 		.id = RK808_ID_DCDC1,
 		.ops = &rk808_buck1_2_ops,
 		.type = REGULATOR_VOLTAGE,
+		.min_uV = 712500,
+		.uV_step = 12500,
 		.n_voltages = 64,
-		.linear_ranges = rk808_buck_voltage_ranges,
-		.n_linear_ranges = ARRAY_SIZE(rk808_buck_voltage_ranges),
 		.vsel_reg = RK808_BUCK1_ON_VSEL_REG,
 		.vsel_mask = RK808_BUCK_VSEL_MASK,
 		.enable_reg = RK808_DCDC_EN_REG,
@@ -330,12 +380,14 @@
 	}, {
 		.name = "DCDC_REG2",
 		.supply_name = "vcc2",
+		.of_match = of_match_ptr("DCDC_REG2"),
+		.regulators_node = of_match_ptr("regulators"),
 		.id = RK808_ID_DCDC2,
 		.ops = &rk808_buck1_2_ops,
 		.type = REGULATOR_VOLTAGE,
+		.min_uV = 712500,
+		.uV_step = 12500,
 		.n_voltages = 64,
-		.linear_ranges = rk808_buck_voltage_ranges,
-		.n_linear_ranges = ARRAY_SIZE(rk808_buck_voltage_ranges),
 		.vsel_reg = RK808_BUCK2_ON_VSEL_REG,
 		.vsel_mask = RK808_BUCK_VSEL_MASK,
 		.enable_reg = RK808_DCDC_EN_REG,
@@ -344,6 +396,8 @@
 	}, {
 		.name = "DCDC_REG3",
 		.supply_name = "vcc3",
+		.of_match = of_match_ptr("DCDC_REG3"),
+		.regulators_node = of_match_ptr("regulators"),
 		.id = RK808_ID_DCDC3,
 		.ops = &rk808_switch_ops,
 		.type = REGULATOR_VOLTAGE,
@@ -351,55 +405,23 @@
 		.enable_reg = RK808_DCDC_EN_REG,
 		.enable_mask = BIT(2),
 		.owner = THIS_MODULE,
-	}, {
-		.name = "DCDC_REG4",
-		.supply_name = "vcc4",
-		.id = RK808_ID_DCDC4,
-		.ops = &rk808_reg_ops,
-		.type = REGULATOR_VOLTAGE,
-		.n_voltages = 16,
-		.linear_ranges = rk808_buck4_voltage_ranges,
-		.n_linear_ranges = ARRAY_SIZE(rk808_buck4_voltage_ranges),
-		.vsel_reg = RK808_BUCK4_ON_VSEL_REG,
-		.vsel_mask = RK808_BUCK4_VSEL_MASK,
-		.enable_reg = RK808_DCDC_EN_REG,
-		.enable_mask = BIT(3),
-		.owner = THIS_MODULE,
-	}, {
-		.name = "LDO_REG1",
-		.supply_name = "vcc6",
-		.id = RK808_ID_LDO1,
-		.ops = &rk808_reg_ops,
-		.type = REGULATOR_VOLTAGE,
-		.n_voltages = 17,
-		.linear_ranges = rk808_ldo_voltage_ranges,
-		.n_linear_ranges = ARRAY_SIZE(rk808_ldo_voltage_ranges),
-		.vsel_reg = RK808_LDO1_ON_VSEL_REG,
-		.vsel_mask = RK808_LDO_VSEL_MASK,
-		.enable_reg = RK808_LDO_EN_REG,
-		.enable_mask = BIT(0),
-		.enable_time = 400,
-		.owner = THIS_MODULE,
-	}, {
-		.name = "LDO_REG2",
-		.supply_name = "vcc6",
-		.id = RK808_ID_LDO2,
-		.ops = &rk808_reg_ops,
-		.type = REGULATOR_VOLTAGE,
-		.n_voltages = 17,
-		.linear_ranges = rk808_ldo_voltage_ranges,
-		.n_linear_ranges = ARRAY_SIZE(rk808_ldo_voltage_ranges),
-		.vsel_reg = RK808_LDO2_ON_VSEL_REG,
-		.vsel_mask = RK808_LDO_VSEL_MASK,
-		.enable_reg = RK808_LDO_EN_REG,
-		.enable_mask = BIT(1),
-		.enable_time = 400,
-		.owner = THIS_MODULE,
-	}, {
+	},
+	RK8XX_DESC(RK808_ID_DCDC4, "DCDC_REG4", "vcc4", 1800, 3300, 100,
+		RK808_BUCK4_ON_VSEL_REG, RK808_BUCK4_VSEL_MASK,
+		RK808_DCDC_EN_REG, BIT(3), 0),
+	RK8XX_DESC(RK808_ID_LDO1, "LDO_REG1", "vcc6", 1800, 3400, 100,
+		RK808_LDO1_ON_VSEL_REG, RK808_LDO_VSEL_MASK, RK808_LDO_EN_REG,
+		BIT(0), 400),
+	RK8XX_DESC(RK808_ID_LDO2, "LDO_REG2", "vcc6", 1800, 3400, 100,
+		RK808_LDO2_ON_VSEL_REG, RK808_LDO_VSEL_MASK, RK808_LDO_EN_REG,
+		BIT(1), 400),
+	{
 		.name = "LDO_REG3",
 		.supply_name = "vcc7",
+		.of_match = of_match_ptr("LDO_REG3"),
+		.regulators_node = of_match_ptr("regulators"),
 		.id = RK808_ID_LDO3,
-		.ops = &rk808_reg_ops,
+		.ops = &rk808_reg_ops_ranges,
 		.type = REGULATOR_VOLTAGE,
 		.n_voltages = 16,
 		.linear_ranges = rk808_ldo3_voltage_ranges,
@@ -410,117 +432,26 @@
 		.enable_mask = BIT(2),
 		.enable_time = 400,
 		.owner = THIS_MODULE,
-	}, {
-		.name = "LDO_REG4",
-		.supply_name = "vcc9",
-		.id = RK808_ID_LDO4,
-		.ops = &rk808_reg_ops,
-		.type = REGULATOR_VOLTAGE,
-		.n_voltages = 17,
-		.linear_ranges = rk808_ldo_voltage_ranges,
-		.n_linear_ranges = ARRAY_SIZE(rk808_ldo_voltage_ranges),
-		.vsel_reg = RK808_LDO4_ON_VSEL_REG,
-		.vsel_mask = RK808_LDO_VSEL_MASK,
-		.enable_reg = RK808_LDO_EN_REG,
-		.enable_mask = BIT(3),
-		.enable_time = 400,
-		.owner = THIS_MODULE,
-	}, {
-		.name = "LDO_REG5",
-		.supply_name = "vcc9",
-		.id = RK808_ID_LDO5,
-		.ops = &rk808_reg_ops,
-		.type = REGULATOR_VOLTAGE,
-		.n_voltages = 17,
-		.linear_ranges = rk808_ldo_voltage_ranges,
-		.n_linear_ranges = ARRAY_SIZE(rk808_ldo_voltage_ranges),
-		.vsel_reg = RK808_LDO5_ON_VSEL_REG,
-		.vsel_mask = RK808_LDO_VSEL_MASK,
-		.enable_reg = RK808_LDO_EN_REG,
-		.enable_mask = BIT(4),
-		.enable_time = 400,
-		.owner = THIS_MODULE,
-	}, {
-		.name = "LDO_REG6",
-		.supply_name = "vcc10",
-		.id = RK808_ID_LDO6,
-		.ops = &rk808_reg_ops,
-		.type = REGULATOR_VOLTAGE,
-		.n_voltages = 18,
-		.linear_ranges = rk808_ldo6_voltage_ranges,
-		.n_linear_ranges = ARRAY_SIZE(rk808_ldo6_voltage_ranges),
-		.vsel_reg = RK808_LDO6_ON_VSEL_REG,
-		.vsel_mask = RK808_LDO_VSEL_MASK,
-		.enable_reg = RK808_LDO_EN_REG,
-		.enable_mask = BIT(5),
-		.enable_time = 400,
-		.owner = THIS_MODULE,
-	}, {
-		.name = "LDO_REG7",
-		.supply_name = "vcc7",
-		.id = RK808_ID_LDO7,
-		.ops = &rk808_reg_ops,
-		.type = REGULATOR_VOLTAGE,
-		.n_voltages = 18,
-		.linear_ranges = rk808_ldo6_voltage_ranges,
-		.n_linear_ranges = ARRAY_SIZE(rk808_ldo6_voltage_ranges),
-		.vsel_reg = RK808_LDO7_ON_VSEL_REG,
-		.vsel_mask = RK808_LDO_VSEL_MASK,
-		.enable_reg = RK808_LDO_EN_REG,
-		.enable_mask = BIT(6),
-		.enable_time = 400,
-		.owner = THIS_MODULE,
-	}, {
-		.name = "LDO_REG8",
-		.supply_name = "vcc11",
-		.id = RK808_ID_LDO8,
-		.ops = &rk808_reg_ops,
-		.type = REGULATOR_VOLTAGE,
-		.n_voltages = 17,
-		.linear_ranges = rk808_ldo_voltage_ranges,
-		.n_linear_ranges = ARRAY_SIZE(rk808_ldo_voltage_ranges),
-		.vsel_reg = RK808_LDO8_ON_VSEL_REG,
-		.vsel_mask = RK808_LDO_VSEL_MASK,
-		.enable_reg = RK808_LDO_EN_REG,
-		.enable_mask = BIT(7),
-		.enable_time = 400,
-		.owner = THIS_MODULE,
-	}, {
-		.name = "SWITCH_REG1",
-		.supply_name = "vcc8",
-		.id = RK808_ID_SWITCH1,
-		.ops = &rk808_switch_ops,
-		.type = REGULATOR_VOLTAGE,
-		.enable_reg = RK808_DCDC_EN_REG,
-		.enable_mask = BIT(5),
-		.owner = THIS_MODULE,
-	}, {
-		.name = "SWITCH_REG2",
-		.supply_name = "vcc12",
-		.id = RK808_ID_SWITCH2,
-		.ops = &rk808_switch_ops,
-		.type = REGULATOR_VOLTAGE,
-		.enable_reg = RK808_DCDC_EN_REG,
-		.enable_mask = BIT(6),
-		.owner = THIS_MODULE,
 	},
-};
-
-static struct of_regulator_match rk808_reg_matches[] = {
-	[RK808_ID_DCDC1]	= { .name = "DCDC_REG1" },
-	[RK808_ID_DCDC2]	= { .name = "DCDC_REG2" },
-	[RK808_ID_DCDC3]	= { .name = "DCDC_REG3" },
-	[RK808_ID_DCDC4]	= { .name = "DCDC_REG4" },
-	[RK808_ID_LDO1]		= { .name = "LDO_REG1" },
-	[RK808_ID_LDO2]		= { .name = "LDO_REG2" },
-	[RK808_ID_LDO3]		= { .name = "LDO_REG3" },
-	[RK808_ID_LDO4]		= { .name = "LDO_REG4" },
-	[RK808_ID_LDO5]		= { .name = "LDO_REG5" },
-	[RK808_ID_LDO6]		= { .name = "LDO_REG6" },
-	[RK808_ID_LDO7]		= { .name = "LDO_REG7" },
-	[RK808_ID_LDO8]		= { .name = "LDO_REG8" },
-	[RK808_ID_SWITCH1]	= { .name = "SWITCH_REG1" },
-	[RK808_ID_SWITCH2]	= { .name = "SWITCH_REG2" },
+	RK8XX_DESC(RK808_ID_LDO4, "LDO_REG4", "vcc9", 1800, 3400, 100,
+		RK808_LDO4_ON_VSEL_REG, RK808_LDO_VSEL_MASK, RK808_LDO_EN_REG,
+		BIT(3), 400),
+	RK8XX_DESC(RK808_ID_LDO5, "LDO_REG5", "vcc9", 1800, 3400, 100,
+		RK808_LDO5_ON_VSEL_REG, RK808_LDO_VSEL_MASK, RK808_LDO_EN_REG,
+		BIT(4), 400),
+	RK8XX_DESC(RK808_ID_LDO6, "LDO_REG6", "vcc10", 800, 2500, 100,
+		RK808_LDO6_ON_VSEL_REG, RK808_LDO_VSEL_MASK, RK808_LDO_EN_REG,
+		BIT(5), 400),
+	RK8XX_DESC(RK808_ID_LDO7, "LDO_REG7", "vcc7", 800, 2500, 100,
+		RK808_LDO7_ON_VSEL_REG, RK808_LDO_VSEL_MASK, RK808_LDO_EN_REG,
+		BIT(6), 400),
+	RK8XX_DESC(RK808_ID_LDO8, "LDO_REG8", "vcc11", 1800, 3400, 100,
+		RK808_LDO8_ON_VSEL_REG, RK808_LDO_VSEL_MASK, RK808_LDO_EN_REG,
+		BIT(7), 400),
+	RK8XX_DESC_SWITCH(RK808_ID_SWITCH1, "SWITCH_REG1", "vcc8",
+		RK808_DCDC_EN_REG, BIT(5)),
+	RK8XX_DESC_SWITCH(RK808_ID_SWITCH2, "SWITCH_REG2", "vcc12",
+		RK808_DCDC_EN_REG, BIT(6)),
 };
 
 static int rk808_regulator_dt_parse_pdata(struct device *dev,
@@ -529,17 +460,12 @@
 				   struct rk808_regulator_data *pdata)
 {
 	struct device_node *np;
-	int tmp, ret, i;
+	int tmp, ret = 0, i;
 
 	np = of_get_child_by_name(client_dev->of_node, "regulators");
 	if (!np)
 		return -ENXIO;
 
-	ret = of_regulator_match(dev, np, rk808_reg_matches,
-				 RK808_NUM_REGULATORS);
-	if (ret < 0)
-		goto dt_parse_end;
-
 	for (i = 0; i < ARRAY_SIZE(pdata->dvs_gpio); i++) {
 		pdata->dvs_gpio[i] =
 			devm_gpiod_get_index_optional(client_dev, "dvs", i,
@@ -586,18 +512,12 @@
 
 	platform_set_drvdata(pdev, pdata);
 
+	config.dev = &client->dev;
+	config.driver_data = pdata;
+	config.regmap = rk808->regmap;
+
 	/* Instantiate the regulators */
 	for (i = 0; i < RK808_NUM_REGULATORS; i++) {
-		if (!rk808_reg_matches[i].init_data ||
-		    !rk808_reg_matches[i].of_node)
-			continue;
-
-		config.dev = &client->dev;
-		config.driver_data = pdata;
-		config.regmap = rk808->regmap;
-		config.of_node = rk808_reg_matches[i].of_node;
-		config.init_data = rk808_reg_matches[i].init_data;
-
 		rk808_rdev = devm_regulator_register(&pdev->dev,
 						     &rk808_reg[i], &config);
 		if (IS_ERR(rk808_rdev)) {
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index d24e2c7..02fb6b4 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -267,6 +267,7 @@
 	.ops		= &s2mps11_ldo_ops,		\
 	.type		= REGULATOR_VOLTAGE,		\
 	.owner		= THIS_MODULE,			\
+	.ramp_delay	= RAMP_DELAY_12_MVUS,		\
 	.min_uV		= MIN_800_MV,			\
 	.uV_step	= step,				\
 	.n_voltages	= S2MPS11_LDO_N_VOLTAGES,	\
@@ -308,7 +309,7 @@
 	.enable_mask	= S2MPS11_ENABLE_MASK			\
 }
 
-#define regulator_desc_s2mps11_buck6_10(num, min, step) {	\
+#define regulator_desc_s2mps11_buck67810(num, min, step) {	\
 	.name		= "BUCK"#num,				\
 	.id		= S2MPS11_BUCK##num,			\
 	.ops		= &s2mps11_buck_ops,			\
@@ -324,6 +325,22 @@
 	.enable_mask	= S2MPS11_ENABLE_MASK			\
 }
 
+#define regulator_desc_s2mps11_buck9 {				\
+	.name		= "BUCK9",				\
+	.id		= S2MPS11_BUCK9,			\
+	.ops		= &s2mps11_buck_ops,			\
+	.type		= REGULATOR_VOLTAGE,			\
+	.owner		= THIS_MODULE,				\
+	.min_uV		= MIN_3000_MV,				\
+	.uV_step	= STEP_25_MV,				\
+	.n_voltages	= S2MPS11_BUCK9_N_VOLTAGES,		\
+	.ramp_delay	= S2MPS11_RAMP_DELAY,			\
+	.vsel_reg	= S2MPS11_REG_B9CTRL2,			\
+	.vsel_mask	= S2MPS11_BUCK9_VSEL_MASK,		\
+	.enable_reg	= S2MPS11_REG_B9CTRL1,			\
+	.enable_mask	= S2MPS11_ENABLE_MASK			\
+}
+
 static const struct regulator_desc s2mps11_regulators[] = {
 	regulator_desc_s2mps11_ldo(1, STEP_25_MV),
 	regulator_desc_s2mps11_ldo(2, STEP_50_MV),
@@ -368,11 +385,11 @@
 	regulator_desc_s2mps11_buck1_4(3),
 	regulator_desc_s2mps11_buck1_4(4),
 	regulator_desc_s2mps11_buck5,
-	regulator_desc_s2mps11_buck6_10(6, MIN_600_MV, STEP_6_25_MV),
-	regulator_desc_s2mps11_buck6_10(7, MIN_600_MV, STEP_6_25_MV),
-	regulator_desc_s2mps11_buck6_10(8, MIN_600_MV, STEP_6_25_MV),
-	regulator_desc_s2mps11_buck6_10(9, MIN_3000_MV, STEP_25_MV),
-	regulator_desc_s2mps11_buck6_10(10, MIN_750_MV, STEP_12_5_MV),
+	regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
+	regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV),
+	regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV),
+	regulator_desc_s2mps11_buck9,
+	regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
 };
 
 static struct regulator_ops s2mps14_reg_ops;
@@ -1221,17 +1238,7 @@
 	.id_table = s2mps11_pmic_id,
 };
 
-static int __init s2mps11_pmic_init(void)
-{
-	return platform_driver_register(&s2mps11_pmic_driver);
-}
-subsys_initcall(s2mps11_pmic_init);
-
-static void __exit s2mps11_pmic_exit(void)
-{
-	platform_driver_unregister(&s2mps11_pmic_driver);
-}
-module_exit(s2mps11_pmic_exit);
+module_platform_driver(s2mps11_pmic_driver);
 
 /* Module information */
 MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
index 9d6ea3a..67cac26 100644
--- a/drivers/regulator/tps6524x-regulator.c
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -600,7 +600,7 @@
 
 	memset(hw, 0, sizeof(struct tps6524x));
 	hw->dev = dev;
-	hw->spi = spi_dev_get(spi);
+	hw->spi = spi;
 	mutex_init(&hw->lock);
 
 	for (i = 0; i < N_REGULATORS; i++, info++, init_data++) {
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 955a6fb..faeb5ee 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -21,7 +21,7 @@
 #include <linux/regulator/machine.h>
 #include <linux/regulator/of_regulator.h>
 #include <linux/i2c/twl.h>
-
+#include <linux/delay.h>
 
 /*
  * The TWL4030/TW5030/TPS659x0/TWL6030 family chips include power management, a
@@ -188,6 +188,74 @@
 	return grp && (val == TWL6030_CFG_STATE_ON);
 }
 
+#define PB_I2C_BUSY	BIT(0)
+#define PB_I2C_BWEN	BIT(1)
+
+/* Wait until buffer empty/ready to send a word on power bus. */
+static int twl4030_wait_pb_ready(void)
+{
+
+	int	ret;
+	int	timeout = 10;
+	u8	val;
+
+	do {
+		ret = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &val,
+				      TWL4030_PM_MASTER_PB_CFG);
+		if (ret < 0)
+			return ret;
+
+		if (!(val & PB_I2C_BUSY))
+			return 0;
+
+		mdelay(1);
+		timeout--;
+	} while (timeout);
+
+	return -ETIMEDOUT;
+}
+
+/* Send a word over the powerbus */
+static int twl4030_send_pb_msg(unsigned msg)
+{
+	u8	val;
+	int	ret;
+
+	/* save powerbus configuration */
+	ret = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &val,
+			      TWL4030_PM_MASTER_PB_CFG);
+	if (ret < 0)
+		return ret;
+
+	/* Enable i2c access to powerbus */
+	ret = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, val | PB_I2C_BWEN,
+			       TWL4030_PM_MASTER_PB_CFG);
+	if (ret < 0)
+		return ret;
+
+	ret = twl4030_wait_pb_ready();
+	if (ret < 0)
+		return ret;
+
+	ret = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, msg >> 8,
+			       TWL4030_PM_MASTER_PB_WORD_MSB);
+	if (ret < 0)
+		return ret;
+
+	ret = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, msg & 0xff,
+			       TWL4030_PM_MASTER_PB_WORD_LSB);
+	if (ret < 0)
+		return ret;
+
+	ret = twl4030_wait_pb_ready();
+	if (ret < 0)
+		return ret;
+
+	/* Restore powerbus configuration */
+	return twl_i2c_write_u8(TWL_MODULE_PM_MASTER, val,
+				TWL4030_PM_MASTER_PB_CFG);
+}
+
 static int twl4030reg_enable(struct regulator_dev *rdev)
 {
 	struct twlreg_info	*info = rdev_get_drvdata(rdev);
@@ -303,7 +371,6 @@
 {
 	struct twlreg_info	*info = rdev_get_drvdata(rdev);
 	unsigned		message;
-	int			status;
 
 	/* We can only set the mode through state machine commands... */
 	switch (mode) {
@@ -317,20 +384,19 @@
 		return -EINVAL;
 	}
 
-	/* Ensure the resource is associated with some group */
-	status = twlreg_grp(rdev);
-	if (status < 0)
-		return status;
-	if (!(status & (P3_GRP_4030 | P2_GRP_4030 | P1_GRP_4030)))
-		return -EACCES;
+	return twl4030_send_pb_msg(message);
+}
 
-	status = twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
-			message >> 8, TWL4030_PM_MASTER_PB_WORD_MSB);
-	if (status < 0)
-		return status;
-
-	return twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
-			message & 0xff, TWL4030_PM_MASTER_PB_WORD_LSB);
+static inline unsigned int twl4030reg_map_mode(unsigned int mode)
+{
+	switch (mode) {
+	case RES_STATE_ACTIVE:
+		return REGULATOR_MODE_NORMAL;
+	case RES_STATE_SLEEP:
+		return REGULATOR_MODE_STANDBY;
+	default:
+		return -EINVAL;
+	}
 }
 
 static int twl6030reg_set_mode(struct regulator_dev *rdev, unsigned mode)
@@ -835,10 +901,11 @@
 #define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
 			remap_conf) \
 		TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
-			remap_conf, TWL4030, twl4030fixed_ops)
+			remap_conf, TWL4030, twl4030fixed_ops, \
+			twl4030reg_map_mode)
 #define TWL6030_FIXED_LDO(label, offset, mVolts, turnon_delay) \
 		TWL_FIXED_LDO(label, offset, mVolts, 0x0, turnon_delay, \
-			0x0, TWL6030, twl6030fixed_ops)
+			0x0, TWL6030, twl6030fixed_ops, 0x0)
 
 #define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) \
 static const struct twlreg_info TWL4030_INFO_##label = { \
@@ -855,6 +922,7 @@
 		.type = REGULATOR_VOLTAGE, \
 		.owner = THIS_MODULE, \
 		.enable_time = turnon_delay, \
+		.of_map_mode = twl4030reg_map_mode, \
 		}, \
 	}
 
@@ -870,6 +938,7 @@
 		.type = REGULATOR_VOLTAGE, \
 		.owner = THIS_MODULE, \
 		.enable_time = turnon_delay, \
+		.of_map_mode = twl4030reg_map_mode, \
 		}, \
 	}
 
@@ -915,7 +984,7 @@
 	}
 
 #define TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, remap_conf, \
-		family, operations) \
+		family, operations, map_mode) \
 static const struct twlreg_info TWLFIXED_INFO_##label = { \
 	.base = offset, \
 	.id = num, \
@@ -930,6 +999,7 @@
 		.owner = THIS_MODULE, \
 		.min_uV = mVolts * 1000, \
 		.enable_time = turnon_delay, \
+		.of_map_mode = map_mode, \
 		}, \
 	}
 
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 3d7d58a..db3958b 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -57,6 +57,8 @@
 
 static const char * const rproc_crash_names[] = {
 	[RPROC_MMUFAULT]	= "mmufault",
+	[RPROC_WATCHDOG]	= "watchdog",
+	[RPROC_FATAL_ERROR]	= "fatal error",
 };
 
 /* translate rproc_crash_type to string */
@@ -856,12 +858,8 @@
 	 * copy this information to device memory.
 	 */
 	loaded_table = rproc_find_loaded_rsc_table(rproc, fw);
-	if (!loaded_table) {
-		ret = -EINVAL;
-		goto clean_up;
-	}
-
-	memcpy(loaded_table, rproc->cached_table, tablesz);
+	if (loaded_table)
+		memcpy(loaded_table, rproc->cached_table, tablesz);
 
 	/* power up the remote processor */
 	ret = rproc->ops->start(rproc);
@@ -1030,8 +1028,9 @@
 }
 
 /**
- * rproc_boot() - boot a remote processor
+ * __rproc_boot() - boot a remote processor
  * @rproc: handle of a remote processor
+ * @wait: wait for rproc registration completion
  *
  * Boot a remote processor (i.e. load its firmware, power it on, ...).
  *
@@ -1040,7 +1039,7 @@
  *
  * Returns 0 on success, and an appropriate error value otherwise.
  */
-int rproc_boot(struct rproc *rproc)
+static int __rproc_boot(struct rproc *rproc, bool wait)
 {
 	const struct firmware *firmware_p;
 	struct device *dev;
@@ -1088,6 +1087,10 @@
 		goto downref_rproc;
 	}
 
+	/* if rproc virtio is not yet configured, wait */
+	if (wait)
+		wait_for_completion(&rproc->firmware_loading_complete);
+
 	ret = rproc_fw_boot(rproc, firmware_p);
 
 	release_firmware(firmware_p);
@@ -1101,9 +1104,29 @@
 	mutex_unlock(&rproc->lock);
 	return ret;
 }
+
+/**
+ * rproc_boot() - boot a remote processor
+ * @rproc: handle of a remote processor
+ */
+int rproc_boot(struct rproc *rproc)
+{
+	return __rproc_boot(rproc, true);
+}
 EXPORT_SYMBOL(rproc_boot);
 
 /**
+ * rproc_boot_nowait() - boot a remote processor
+ * @rproc: handle of a remote processor
+ *
+ * Same as rproc_boot() but don't wait for rproc registration completion
+ */
+int rproc_boot_nowait(struct rproc *rproc)
+{
+	return __rproc_boot(rproc, false);
+}
+
+/**
  * rproc_shutdown() - power off the remote processor
  * @rproc: the remote processor
  *
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index 8041b95..57e1de5 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -48,6 +48,7 @@
 /* from remoteproc_core.c */
 void rproc_release(struct kref *kref);
 irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int vq_id);
+int rproc_boot_nowait(struct rproc *rproc);
 
 /* from remoteproc_virtio.c */
 int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id);
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index e44872f..cc91556 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -161,7 +161,7 @@
 	}
 
 	/* now that the vqs are all set, boot the remote processor */
-	ret = rproc_boot(rproc);
+	ret = rproc_boot_nowait(rproc);
 	if (ret) {
 		dev_err(&rproc->dev, "rproc_boot() failed %d\n", ret);
 		goto error;
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
index 6bb04d4..6f056ca 100644
--- a/drivers/remoteproc/st_remoteproc.c
+++ b/drivers/remoteproc/st_remoteproc.c
@@ -189,9 +189,9 @@
 	}
 
 	ddata->boot_base = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
-	if (!ddata->boot_base) {
+	if (IS_ERR(ddata->boot_base)) {
 		dev_err(dev, "Boot base not found\n");
-		return -EINVAL;
+		return PTR_ERR(ddata->boot_base);
 	}
 
 	err = of_property_read_u32_index(np, "st,syscfg", 1,
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index df37212..0b2733d 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -12,5 +12,8 @@
 
 	  If unsure, say no.
 
+config RESET_OXNAS
+	bool
+
 source "drivers/reset/sti/Kconfig"
 source "drivers/reset/hisilicon/Kconfig"
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index a1fc8ed..f173fc3 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -8,3 +8,4 @@
 obj-$(CONFIG_ARCH_HISI) += hisilicon/
 obj-$(CONFIG_ARCH_ZYNQ) += reset-zynq.o
 obj-$(CONFIG_ATH79) += reset-ath79.o
+obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index f15f150..72b32bd 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -8,6 +8,7 @@
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
  */
+#include <linux/atomic.h>
 #include <linux/device.h>
 #include <linux/err.h>
 #include <linux/export.h>
@@ -18,19 +19,27 @@
 #include <linux/reset-controller.h>
 #include <linux/slab.h>
 
-static DEFINE_MUTEX(reset_controller_list_mutex);
+static DEFINE_MUTEX(reset_list_mutex);
 static LIST_HEAD(reset_controller_list);
 
 /**
  * struct reset_control - a reset control
  * @rcdev: a pointer to the reset controller device
  *         this reset control belongs to
+ * @list: list entry for the rcdev's reset controller list
  * @id: ID of the reset controller in the reset
  *      controller device
+ * @refcnt: Number of gets of this reset_control
+ * @shared: Is this a shared (1), or an exclusive (0) reset_control?
+ * @deassert_cnt: Number of times this reset line has been deasserted
  */
 struct reset_control {
 	struct reset_controller_dev *rcdev;
+	struct list_head list;
 	unsigned int id;
+	unsigned int refcnt;
+	int shared;
+	atomic_t deassert_count;
 };
 
 /**
@@ -62,9 +71,11 @@
 		rcdev->of_xlate = of_reset_simple_xlate;
 	}
 
-	mutex_lock(&reset_controller_list_mutex);
+	INIT_LIST_HEAD(&rcdev->reset_control_head);
+
+	mutex_lock(&reset_list_mutex);
 	list_add(&rcdev->list, &reset_controller_list);
-	mutex_unlock(&reset_controller_list_mutex);
+	mutex_unlock(&reset_list_mutex);
 
 	return 0;
 }
@@ -76,18 +87,23 @@
  */
 void reset_controller_unregister(struct reset_controller_dev *rcdev)
 {
-	mutex_lock(&reset_controller_list_mutex);
+	mutex_lock(&reset_list_mutex);
 	list_del(&rcdev->list);
-	mutex_unlock(&reset_controller_list_mutex);
+	mutex_unlock(&reset_list_mutex);
 }
 EXPORT_SYMBOL_GPL(reset_controller_unregister);
 
 /**
  * reset_control_reset - reset the controlled device
  * @rstc: reset controller
+ *
+ * Calling this on a shared reset controller is an error.
  */
 int reset_control_reset(struct reset_control *rstc)
 {
+	if (WARN_ON(rstc->shared))
+		return -EINVAL;
+
 	if (rstc->rcdev->ops->reset)
 		return rstc->rcdev->ops->reset(rstc->rcdev, rstc->id);
 
@@ -98,26 +114,48 @@
 /**
  * reset_control_assert - asserts the reset line
  * @rstc: reset controller
+ *
+ * Calling this on an exclusive reset controller guarantees that the reset
+ * will be asserted. When called on a shared reset controller the line may
+ * still be deasserted, as long as other users keep it so.
+ *
+ * For shared reset controls a driver cannot expect the hw's registers and
+ * internal state to be reset, but must be prepared for this to happen.
  */
 int reset_control_assert(struct reset_control *rstc)
 {
-	if (rstc->rcdev->ops->assert)
-		return rstc->rcdev->ops->assert(rstc->rcdev, rstc->id);
+	if (!rstc->rcdev->ops->assert)
+		return -ENOTSUPP;
 
-	return -ENOTSUPP;
+	if (rstc->shared) {
+		if (WARN_ON(atomic_read(&rstc->deassert_count) == 0))
+			return -EINVAL;
+
+		if (atomic_dec_return(&rstc->deassert_count) != 0)
+			return 0;
+	}
+
+	return rstc->rcdev->ops->assert(rstc->rcdev, rstc->id);
 }
 EXPORT_SYMBOL_GPL(reset_control_assert);
 
 /**
  * reset_control_deassert - deasserts the reset line
  * @rstc: reset controller
+ *
+ * After calling this function, the reset is guaranteed to be deasserted.
  */
 int reset_control_deassert(struct reset_control *rstc)
 {
-	if (rstc->rcdev->ops->deassert)
-		return rstc->rcdev->ops->deassert(rstc->rcdev, rstc->id);
+	if (!rstc->rcdev->ops->deassert)
+		return -ENOTSUPP;
 
-	return -ENOTSUPP;
+	if (rstc->shared) {
+		if (atomic_inc_return(&rstc->deassert_count) != 1)
+			return 0;
+	}
+
+	return rstc->rcdev->ops->deassert(rstc->rcdev, rstc->id);
 }
 EXPORT_SYMBOL_GPL(reset_control_deassert);
 
@@ -136,18 +174,54 @@
 }
 EXPORT_SYMBOL_GPL(reset_control_status);
 
-/**
- * of_reset_control_get_by_index - Lookup and obtain a reference to a reset
- * controller by index.
- * @node: device to be reset by the controller
- * @index: index of the reset controller
- *
- * This is to be used to perform a list of resets for a device or power domain
- * in whatever order. Returns a struct reset_control or IS_ERR() condition
- * containing errno.
- */
-struct reset_control *of_reset_control_get_by_index(struct device_node *node,
-					   int index)
+static struct reset_control *__reset_control_get(
+				struct reset_controller_dev *rcdev,
+				unsigned int index, int shared)
+{
+	struct reset_control *rstc;
+
+	lockdep_assert_held(&reset_list_mutex);
+
+	list_for_each_entry(rstc, &rcdev->reset_control_head, list) {
+		if (rstc->id == index) {
+			if (WARN_ON(!rstc->shared || !shared))
+				return ERR_PTR(-EBUSY);
+
+			rstc->refcnt++;
+			return rstc;
+		}
+	}
+
+	rstc = kzalloc(sizeof(*rstc), GFP_KERNEL);
+	if (!rstc)
+		return ERR_PTR(-ENOMEM);
+
+	try_module_get(rcdev->owner);
+
+	rstc->rcdev = rcdev;
+	list_add(&rstc->list, &rcdev->reset_control_head);
+	rstc->id = index;
+	rstc->refcnt = 1;
+	rstc->shared = shared;
+
+	return rstc;
+}
+
+static void __reset_control_put(struct reset_control *rstc)
+{
+	lockdep_assert_held(&reset_list_mutex);
+
+	if (--rstc->refcnt)
+		return;
+
+	module_put(rstc->rcdev->owner);
+
+	list_del(&rstc->list);
+	kfree(rstc);
+}
+
+struct reset_control *__of_reset_control_get(struct device_node *node,
+				     const char *id, int index, int shared)
 {
 	struct reset_control *rstc;
 	struct reset_controller_dev *r, *rcdev;
@@ -155,12 +229,22 @@
 	int rstc_id;
 	int ret;
 
+	if (!node)
+		return ERR_PTR(-EINVAL);
+
+	if (id) {
+		index = of_property_match_string(node,
+						 "reset-names", id);
+		if (index < 0)
+			return ERR_PTR(-ENOENT);
+	}
+
 	ret = of_parse_phandle_with_args(node, "resets", "#reset-cells",
 					 index, &args);
 	if (ret)
 		return ERR_PTR(ret);
 
-	mutex_lock(&reset_controller_list_mutex);
+	mutex_lock(&reset_list_mutex);
 	rcdev = NULL;
 	list_for_each_entry(r, &reset_controller_list, list) {
 		if (args.np == r->of_node) {
@@ -171,78 +255,29 @@
 	of_node_put(args.np);
 
 	if (!rcdev) {
-		mutex_unlock(&reset_controller_list_mutex);
+		mutex_unlock(&reset_list_mutex);
 		return ERR_PTR(-EPROBE_DEFER);
 	}
 
 	if (WARN_ON(args.args_count != rcdev->of_reset_n_cells)) {
-		mutex_unlock(&reset_controller_list_mutex);
+		mutex_unlock(&reset_list_mutex);
 		return ERR_PTR(-EINVAL);
 	}
 
 	rstc_id = rcdev->of_xlate(rcdev, &args);
 	if (rstc_id < 0) {
-		mutex_unlock(&reset_controller_list_mutex);
+		mutex_unlock(&reset_list_mutex);
 		return ERR_PTR(rstc_id);
 	}
 
-	try_module_get(rcdev->owner);
-	mutex_unlock(&reset_controller_list_mutex);
+	/* reset_list_mutex also protects the rcdev's reset_control list */
+	rstc = __reset_control_get(rcdev, rstc_id, shared);
 
-	rstc = kzalloc(sizeof(*rstc), GFP_KERNEL);
-	if (!rstc) {
-		module_put(rcdev->owner);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	rstc->rcdev = rcdev;
-	rstc->id = rstc_id;
+	mutex_unlock(&reset_list_mutex);
 
 	return rstc;
 }
-EXPORT_SYMBOL_GPL(of_reset_control_get_by_index);
-
-/**
- * of_reset_control_get - Lookup and obtain a reference to a reset controller.
- * @node: device to be reset by the controller
- * @id: reset line name
- *
- * Returns a struct reset_control or IS_ERR() condition containing errno.
- *
- * Use of id names is optional.
- */
-struct reset_control *of_reset_control_get(struct device_node *node,
-					   const char *id)
-{
-	int index = 0;
-
-	if (id) {
-		index = of_property_match_string(node,
-						 "reset-names", id);
-		if (index < 0)
-			return ERR_PTR(-ENOENT);
-	}
-	return of_reset_control_get_by_index(node, index);
-}
-EXPORT_SYMBOL_GPL(of_reset_control_get);
-
-/**
- * reset_control_get - Lookup and obtain a reference to a reset controller.
- * @dev: device to be reset by the controller
- * @id: reset line name
- *
- * Returns a struct reset_control or IS_ERR() condition containing errno.
- *
- * Use of id names is optional.
- */
-struct reset_control *reset_control_get(struct device *dev, const char *id)
-{
-	if (!dev)
-		return ERR_PTR(-EINVAL);
-
-	return of_reset_control_get(dev->of_node, id);
-}
-EXPORT_SYMBOL_GPL(reset_control_get);
+EXPORT_SYMBOL_GPL(__of_reset_control_get);
 
 /**
  * reset_control_put - free the reset controller
@@ -254,8 +289,9 @@
 	if (IS_ERR(rstc))
 		return;
 
-	module_put(rstc->rcdev->owner);
-	kfree(rstc);
+	mutex_lock(&reset_list_mutex);
+	__reset_control_put(rstc);
+	mutex_unlock(&reset_list_mutex);
 }
 EXPORT_SYMBOL_GPL(reset_control_put);
 
@@ -264,16 +300,8 @@
 	reset_control_put(*(struct reset_control **)res);
 }
 
-/**
- * devm_reset_control_get - resource managed reset_control_get()
- * @dev: device to be reset by the controller
- * @id: reset line name
- *
- * Managed reset_control_get(). For reset controllers returned from this
- * function, reset_control_put() is called automatically on driver detach.
- * See reset_control_get() for more information.
- */
-struct reset_control *devm_reset_control_get(struct device *dev, const char *id)
+struct reset_control *__devm_reset_control_get(struct device *dev,
+				     const char *id, int index, int shared)
 {
 	struct reset_control **ptr, *rstc;
 
@@ -282,7 +310,8 @@
 	if (!ptr)
 		return ERR_PTR(-ENOMEM);
 
-	rstc = reset_control_get(dev, id);
+	rstc = __of_reset_control_get(dev ? dev->of_node : NULL,
+				      id, index, shared);
 	if (!IS_ERR(rstc)) {
 		*ptr = rstc;
 		devres_add(dev, ptr);
@@ -292,7 +321,7 @@
 
 	return rstc;
 }
-EXPORT_SYMBOL_GPL(devm_reset_control_get);
+EXPORT_SYMBOL_GPL(__devm_reset_control_get);
 
 /**
  * device_reset - find reset controller associated with the device
diff --git a/drivers/reset/reset-lpc18xx.c b/drivers/reset/reset-lpc18xx.c
index 3b8a4f5..54cca00 100644
--- a/drivers/reset/reset-lpc18xx.c
+++ b/drivers/reset/reset-lpc18xx.c
@@ -35,6 +35,7 @@
 
 struct lpc18xx_rgu_data {
 	struct reset_controller_dev rcdev;
+	struct notifier_block restart_nb;
 	struct clk *clk_delay;
 	struct clk *clk_reg;
 	void __iomem *base;
@@ -44,12 +45,13 @@
 
 #define to_rgu_data(p) container_of(p, struct lpc18xx_rgu_data, rcdev)
 
-static void __iomem *rgu_base;
-
-static int lpc18xx_rgu_restart(struct notifier_block *this, unsigned long mode,
+static int lpc18xx_rgu_restart(struct notifier_block *nb, unsigned long mode,
 			       void *cmd)
 {
-	writel(BIT(LPC18XX_RGU_CORE_RST), rgu_base + LPC18XX_RGU_CTRL0);
+	struct lpc18xx_rgu_data *rc = container_of(nb, struct lpc18xx_rgu_data,
+						   restart_nb);
+
+	writel(BIT(LPC18XX_RGU_CORE_RST), rc->base + LPC18XX_RGU_CTRL0);
 	mdelay(2000);
 
 	pr_emerg("%s: unable to restart system\n", __func__);
@@ -57,11 +59,6 @@
 	return NOTIFY_DONE;
 }
 
-static struct notifier_block lpc18xx_rgu_restart_nb = {
-	.notifier_call = lpc18xx_rgu_restart,
-	.priority = 192,
-};
-
 /*
  * The LPC18xx RGU has mostly self-deasserting resets except for the
  * two reset lines going to the internal Cortex-M0 cores.
@@ -205,8 +202,9 @@
 		goto dis_clks;
 	}
 
-	rgu_base = rc->base;
-	ret = register_restart_handler(&lpc18xx_rgu_restart_nb);
+	rc->restart_nb.priority = 192,
+	rc->restart_nb.notifier_call = lpc18xx_rgu_restart,
+	ret = register_restart_handler(&rc->restart_nb);
 	if (ret)
 		dev_warn(&pdev->dev, "failed to register restart handler\n");
 
@@ -225,7 +223,7 @@
 	struct lpc18xx_rgu_data *rc = platform_get_drvdata(pdev);
 	int ret;
 
-	ret = unregister_restart_handler(&lpc18xx_rgu_restart_nb);
+	ret = unregister_restart_handler(&rc->restart_nb);
 	if (ret)
 		dev_warn(&pdev->dev, "failed to unregister restart handler\n");
 
diff --git a/drivers/reset/reset-oxnas.c b/drivers/reset/reset-oxnas.c
new file mode 100644
index 0000000..c60fb2d
--- /dev/null
+++ b/drivers/reset/reset-oxnas.c
@@ -0,0 +1,136 @@
+/*
+ * drivers/reset/reset-oxnas.c
+ *
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2014 Ma Haijun <mahaijuns@gmail.com>
+ * Copyright (C) 2009 Oxford Semiconductor Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+/* Regmap offsets */
+#define RST_SET_REGOFFSET	0x34
+#define RST_CLR_REGOFFSET	0x38
+
+struct oxnas_reset {
+	struct regmap *regmap;
+	struct reset_controller_dev rcdev;
+};
+
+static int oxnas_reset_reset(struct reset_controller_dev *rcdev,
+			      unsigned long id)
+{
+	struct oxnas_reset *data =
+		container_of(rcdev, struct oxnas_reset, rcdev);
+
+	regmap_write(data->regmap, RST_SET_REGOFFSET, BIT(id));
+	msleep(50);
+	regmap_write(data->regmap, RST_CLR_REGOFFSET, BIT(id));
+
+	return 0;
+}
+
+static int oxnas_reset_assert(struct reset_controller_dev *rcdev,
+			      unsigned long id)
+{
+	struct oxnas_reset *data =
+		container_of(rcdev, struct oxnas_reset, rcdev);
+
+	regmap_write(data->regmap, RST_SET_REGOFFSET, BIT(id));
+
+	return 0;
+}
+
+static int oxnas_reset_deassert(struct reset_controller_dev *rcdev,
+				unsigned long id)
+{
+	struct oxnas_reset *data =
+		container_of(rcdev, struct oxnas_reset, rcdev);
+
+	regmap_write(data->regmap, RST_CLR_REGOFFSET, BIT(id));
+
+	return 0;
+}
+
+static const struct reset_control_ops oxnas_reset_ops = {
+	.reset		= oxnas_reset_reset,
+	.assert		= oxnas_reset_assert,
+	.deassert	= oxnas_reset_deassert,
+};
+
+static const struct of_device_id oxnas_reset_dt_ids[] = {
+	 { .compatible = "oxsemi,ox810se-reset", },
+	 { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, oxnas_reset_dt_ids);
+
+static int oxnas_reset_probe(struct platform_device *pdev)
+{
+	struct oxnas_reset *data;
+	struct device *parent;
+
+	parent = pdev->dev.parent;
+	if (!parent) {
+		dev_err(&pdev->dev, "no parent\n");
+		return -ENODEV;
+	}
+
+	data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->regmap = syscon_node_to_regmap(parent->of_node);
+	if (IS_ERR(data->regmap)) {
+		dev_err(&pdev->dev, "failed to get parent regmap\n");
+		return PTR_ERR(data->regmap);
+	}
+
+	platform_set_drvdata(pdev, data);
+
+	data->rcdev.owner = THIS_MODULE;
+	data->rcdev.nr_resets = 32;
+	data->rcdev.ops = &oxnas_reset_ops;
+	data->rcdev.of_node = pdev->dev.of_node;
+
+	return reset_controller_register(&data->rcdev);
+}
+
+static int oxnas_reset_remove(struct platform_device *pdev)
+{
+	struct oxnas_reset *data = platform_get_drvdata(pdev);
+
+	reset_controller_unregister(&data->rcdev);
+
+	return 0;
+}
+
+static struct platform_driver oxnas_reset_driver = {
+	.probe	= oxnas_reset_probe,
+	.remove	= oxnas_reset_remove,
+	.driver = {
+		.name		= "oxnas-reset",
+		.of_match_table	= oxnas_reset_dt_ids,
+	},
+};
+
+module_platform_driver(oxnas_reset_driver);
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 1fcd27c..fe03b2a 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -436,17 +436,19 @@
 };
 
 /**
- * register_rpmsg_driver() - register an rpmsg driver with the rpmsg bus
+ * __register_rpmsg_driver() - register an rpmsg driver with the rpmsg bus
  * @rpdrv: pointer to a struct rpmsg_driver
+ * @owner: owning module/driver
  *
  * Returns 0 on success, and an appropriate error value on failure.
  */
-int register_rpmsg_driver(struct rpmsg_driver *rpdrv)
+int __register_rpmsg_driver(struct rpmsg_driver *rpdrv, struct module *owner)
 {
 	rpdrv->drv.bus = &rpmsg_bus;
+	rpdrv->drv.owner = owner;
 	return driver_register(&rpdrv->drv);
 }
-EXPORT_SYMBOL(register_rpmsg_driver);
+EXPORT_SYMBOL(__register_rpmsg_driver);
 
 /**
  * unregister_rpmsg_driver() - unregister an rpmsg driver from the rpmsg bus
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index b2156ee..ecb7dba 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -863,7 +863,7 @@
  * A user-initiated temperature conversion is not started by this function,
  * so the temperature is updated once every 64 seconds.
  */
-static int ds3231_hwmon_read_temp(struct device *dev, s16 *mC)
+static int ds3231_hwmon_read_temp(struct device *dev, s32 *mC)
 {
 	struct ds1307 *ds1307 = dev_get_drvdata(dev);
 	u8 temp_buf[2];
@@ -892,7 +892,7 @@
 				struct device_attribute *attr, char *buf)
 {
 	int ret;
-	s16 temp;
+	s32 temp;
 
 	ret = ds3231_hwmon_read_temp(dev, &temp);
 	if (ret)
@@ -1531,7 +1531,7 @@
 		return PTR_ERR(ds1307->rtc);
 	}
 
-	if (ds1307_can_wakeup_device) {
+	if (ds1307_can_wakeup_device && ds1307->client->irq <= 0) {
 		/* Disable request for an IRQ */
 		want_irq = false;
 		dev_info(&client->dev, "'wakeup-source' is set, request for an IRQ is disabled!\n");
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index c78db05..8973d34 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -75,6 +75,8 @@
 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
 static void dasd_profile_init(struct dasd_profile *, struct dentry *);
 static void dasd_profile_exit(struct dasd_profile *);
+static void dasd_hosts_init(struct dentry *, struct dasd_device *);
+static void dasd_hosts_exit(struct dasd_device *);
 
 /*
  * SECTION: Operations on the device structure.
@@ -267,6 +269,7 @@
 		dasd_debugfs_setup(dev_name(&device->cdev->dev),
 				   dasd_debugfs_root_entry);
 	dasd_profile_init(&device->profile, device->debugfs_dentry);
+	dasd_hosts_init(device->debugfs_dentry, device);
 
 	/* register 'device' debug area, used for all DBF_DEV_XXX calls */
 	device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
@@ -304,6 +307,7 @@
 		return rc;
 	dasd_device_clear_timer(device);
 	dasd_profile_exit(&device->profile);
+	dasd_hosts_exit(device);
 	debugfs_remove(device->debugfs_dentry);
 	DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
 	if (device->debug_area != NULL) {
@@ -1150,6 +1154,58 @@
 
 #endif				/* CONFIG_DASD_PROFILE */
 
+static int dasd_hosts_show(struct seq_file *m, void *v)
+{
+	struct dasd_device *device;
+	int rc = -EOPNOTSUPP;
+
+	device = m->private;
+	dasd_get_device(device);
+
+	if (device->discipline->hosts_print)
+		rc = device->discipline->hosts_print(device, m);
+
+	dasd_put_device(device);
+	return rc;
+}
+
+static int dasd_hosts_open(struct inode *inode, struct file *file)
+{
+	struct dasd_device *device = inode->i_private;
+
+	return single_open(file, dasd_hosts_show, device);
+}
+
+static const struct file_operations dasd_hosts_fops = {
+	.owner		= THIS_MODULE,
+	.open		= dasd_hosts_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static void dasd_hosts_exit(struct dasd_device *device)
+{
+	debugfs_remove(device->hosts_dentry);
+	device->hosts_dentry = NULL;
+}
+
+static void dasd_hosts_init(struct dentry *base_dentry,
+			    struct dasd_device *device)
+{
+	struct dentry *pde;
+	umode_t mode;
+
+	if (!base_dentry)
+		return;
+
+	mode = S_IRUSR | S_IFREG;
+	pde = debugfs_create_file("host_access_list", mode, base_dentry,
+				  device, &dasd_hosts_fops);
+	if (pde && !IS_ERR(pde))
+		device->hosts_dentry = pde;
+}
+
 /*
  * Allocate memory for a channel program with 'cplength' channel
  * command words and 'datasize' additional space. There are two
@@ -1582,6 +1638,9 @@
 	struct dasd_ccw_req *cqr, *next;
 	struct dasd_device *device;
 	unsigned long long now;
+	int nrf_suppressed = 0;
+	int fp_suppressed = 0;
+	u8 *sense = NULL;
 	int expires;
 
 	if (IS_ERR(irb)) {
@@ -1617,7 +1676,23 @@
 			dasd_put_device(device);
 			return;
 		}
-		device->discipline->dump_sense_dbf(device, irb, "int");
+
+		/*
+		 * In some cases 'File Protected' or 'No Record Found' errors
+		 * might be expected and debug log messages for the
+		 * corresponding interrupts shouldn't be written then.
+		 * Check if either of the according suppress bits is set.
+		 */
+		sense = dasd_get_sense(irb);
+		if (sense) {
+			fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) &&
+				test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
+			nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) &&
+				test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
+		}
+		if (!(fp_suppressed || nrf_suppressed))
+			device->discipline->dump_sense_dbf(device, irb, "int");
+
 		if (device->features & DASD_FEATURE_ERPLOG)
 			device->discipline->dump_sense(device, cqr, irb);
 		device->discipline->check_for_device_change(device, cqr, irb);
@@ -2256,6 +2331,7 @@
 {
 	struct dasd_device *device;
 	struct dasd_ccw_req *cqr, *n;
+	u8 *sense = NULL;
 	int rc;
 
 retry:
@@ -2302,6 +2378,20 @@
 	rc = 0;
 	list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
 		/*
+		 * In some cases the 'File Protected' or 'Incorrect Length'
+		 * error might be expected and error recovery would be
+		 * unnecessary in these cases.	Check if the according suppress
+		 * bit is set.
+		 */
+		sense = dasd_get_sense(&cqr->irb);
+		if (sense && sense[1] & SNS1_FILE_PROTECTED &&
+		    test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags))
+			continue;
+		if (scsw_cstat(&cqr->irb.scsw) == 0x40 &&
+		    test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags))
+			continue;
+
+		/*
 		 * for alias devices simplify error recovery and
 		 * return to upper layer
 		 * do not skip ERP requests
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index d261347..8305ab6 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1367,8 +1367,14 @@
 
 	struct dasd_device *device = default_erp->startdev;
 
-	dev_err(&device->cdev->dev,
-		    "The specified record was not found\n");
+	/*
+	 * In some cases the 'No Record Found' error might be expected and
+	 * log messages shouldn't be written then.
+	 * Check if the according suppress bit is set.
+	 */
+	if (!test_bit(DASD_CQR_SUPPRESS_NRF, &default_erp->flags))
+		dev_err(&device->cdev->dev,
+			"The specified record was not found\n");
 
 	return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
 
@@ -1393,8 +1399,14 @@
 
 	struct dasd_device *device = erp->startdev;
 
-	dev_err(&device->cdev->dev, "Accessing the DASD failed because of "
-		"a hardware error\n");
+	/*
+	 * In some cases the 'File Protected' error might be expected and
+	 * log messages shouldn't be written then.
+	 * Check if the according suppress bit is set.
+	 */
+	if (!test_bit(DASD_CQR_SUPPRESS_FP, &erp->flags))
+		dev_err(&device->cdev->dev,
+			"Accessing the DASD failed because of a hardware error\n");
 
 	return dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
 
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 17ad574..1e56018 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -317,17 +317,17 @@
 	struct alias_pav_group *group;
 	struct dasd_uid uid;
 
+	spin_lock(get_ccwdev_lock(device->cdev));
 	private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
 	private->uid.base_unit_addr =
 		lcu->uac->unit[private->uid.real_unit_addr].base_ua;
 	uid = private->uid;
-
+	spin_unlock(get_ccwdev_lock(device->cdev));
 	/* if we have no PAV anyway, we don't need to bother with PAV groups */
 	if (lcu->pav == NO_PAV) {
 		list_move(&device->alias_list, &lcu->active_devices);
 		return 0;
 	}
-
 	group = _find_group(lcu, &uid);
 	if (!group) {
 		group = kzalloc(sizeof(*group), GFP_ATOMIC);
@@ -397,130 +397,6 @@
 	return 0;
 }
 
-/*
- * This function tries to lock all devices on an lcu via trylock
- * return NULL on success otherwise return first failed device
- */
-static struct dasd_device *_trylock_all_devices_on_lcu(struct alias_lcu *lcu,
-						      struct dasd_device *pos)
-
-{
-	struct alias_pav_group *pavgroup;
-	struct dasd_device *device;
-
-	list_for_each_entry(device, &lcu->active_devices, alias_list) {
-		if (device == pos)
-			continue;
-		if (!spin_trylock(get_ccwdev_lock(device->cdev)))
-			return device;
-	}
-	list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
-		if (device == pos)
-			continue;
-		if (!spin_trylock(get_ccwdev_lock(device->cdev)))
-			return device;
-	}
-	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
-		list_for_each_entry(device, &pavgroup->baselist, alias_list) {
-			if (device == pos)
-				continue;
-			if (!spin_trylock(get_ccwdev_lock(device->cdev)))
-				return device;
-		}
-		list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
-			if (device == pos)
-				continue;
-			if (!spin_trylock(get_ccwdev_lock(device->cdev)))
-				return device;
-		}
-	}
-	return NULL;
-}
-
-/*
- * unlock all devices except the one that is specified as pos
- * stop if enddev is specified and reached
- */
-static void _unlock_all_devices_on_lcu(struct alias_lcu *lcu,
-				       struct dasd_device *pos,
-				       struct dasd_device *enddev)
-
-{
-	struct alias_pav_group *pavgroup;
-	struct dasd_device *device;
-
-	list_for_each_entry(device, &lcu->active_devices, alias_list) {
-		if (device == pos)
-			continue;
-		if (device == enddev)
-			return;
-		spin_unlock(get_ccwdev_lock(device->cdev));
-	}
-	list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
-		if (device == pos)
-			continue;
-		if (device == enddev)
-			return;
-		spin_unlock(get_ccwdev_lock(device->cdev));
-	}
-	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
-		list_for_each_entry(device, &pavgroup->baselist, alias_list) {
-			if (device == pos)
-				continue;
-			if (device == enddev)
-				return;
-			spin_unlock(get_ccwdev_lock(device->cdev));
-		}
-		list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
-			if (device == pos)
-				continue;
-			if (device == enddev)
-				return;
-			spin_unlock(get_ccwdev_lock(device->cdev));
-		}
-	}
-}
-
-/*
- *  this function is needed because the locking order
- *  device lock -> lcu lock
- *  needs to be assured when iterating over devices in an LCU
- *
- *  if a device is specified in pos then the device lock is already hold
- */
-static void _trylock_and_lock_lcu_irqsave(struct alias_lcu *lcu,
-					  struct dasd_device *pos,
-					  unsigned long *flags)
-{
-	struct dasd_device *failed;
-
-	do {
-		spin_lock_irqsave(&lcu->lock, *flags);
-		failed = _trylock_all_devices_on_lcu(lcu, pos);
-		if (failed) {
-			_unlock_all_devices_on_lcu(lcu, pos, failed);
-			spin_unlock_irqrestore(&lcu->lock, *flags);
-			cpu_relax();
-		}
-	} while (failed);
-}
-
-static void _trylock_and_lock_lcu(struct alias_lcu *lcu,
-				  struct dasd_device *pos)
-{
-	struct dasd_device *failed;
-
-	do {
-		spin_lock(&lcu->lock);
-		failed = _trylock_all_devices_on_lcu(lcu, pos);
-		if (failed) {
-			_unlock_all_devices_on_lcu(lcu, pos, failed);
-			spin_unlock(&lcu->lock);
-			cpu_relax();
-		}
-	} while (failed);
-}
-
 static int read_unit_address_configuration(struct dasd_device *device,
 					   struct alias_lcu *lcu)
 {
@@ -615,7 +491,7 @@
 	if (rc)
 		return rc;
 
-	_trylock_and_lock_lcu_irqsave(lcu, NULL, &flags);
+	spin_lock_irqsave(&lcu->lock, flags);
 	lcu->pav = NO_PAV;
 	for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
 		switch (lcu->uac->unit[i].ua_type) {
@@ -634,7 +510,6 @@
 				 alias_list) {
 		_add_device_to_lcu(lcu, device, refdev);
 	}
-	_unlock_all_devices_on_lcu(lcu, NULL, NULL);
 	spin_unlock_irqrestore(&lcu->lock, flags);
 	return 0;
 }
@@ -722,8 +597,7 @@
 
 	lcu = private->lcu;
 	rc = 0;
-	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
-	spin_lock(&lcu->lock);
+	spin_lock_irqsave(&lcu->lock, flags);
 	if (!(lcu->flags & UPDATE_PENDING)) {
 		rc = _add_device_to_lcu(lcu, device, device);
 		if (rc)
@@ -733,8 +607,7 @@
 		list_move(&device->alias_list, &lcu->active_devices);
 		_schedule_lcu_update(lcu, device);
 	}
-	spin_unlock(&lcu->lock);
-	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+	spin_unlock_irqrestore(&lcu->lock, flags);
 	return rc;
 }
 
@@ -933,15 +806,27 @@
 	struct alias_pav_group *pavgroup;
 	struct dasd_device *device;
 
-	list_for_each_entry(device, &lcu->active_devices, alias_list)
+	list_for_each_entry(device, &lcu->active_devices, alias_list) {
+		spin_lock(get_ccwdev_lock(device->cdev));
 		dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
-	list_for_each_entry(device, &lcu->inactive_devices, alias_list)
+		spin_unlock(get_ccwdev_lock(device->cdev));
+	}
+	list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
+		spin_lock(get_ccwdev_lock(device->cdev));
 		dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
+		spin_unlock(get_ccwdev_lock(device->cdev));
+	}
 	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
-		list_for_each_entry(device, &pavgroup->baselist, alias_list)
+		list_for_each_entry(device, &pavgroup->baselist, alias_list) {
+			spin_lock(get_ccwdev_lock(device->cdev));
 			dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
-		list_for_each_entry(device, &pavgroup->aliaslist, alias_list)
+			spin_unlock(get_ccwdev_lock(device->cdev));
+		}
+		list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
+			spin_lock(get_ccwdev_lock(device->cdev));
 			dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
+			spin_unlock(get_ccwdev_lock(device->cdev));
+		}
 	}
 }
 
@@ -950,15 +835,27 @@
 	struct alias_pav_group *pavgroup;
 	struct dasd_device *device;
 
-	list_for_each_entry(device, &lcu->active_devices, alias_list)
+	list_for_each_entry(device, &lcu->active_devices, alias_list) {
+		spin_lock(get_ccwdev_lock(device->cdev));
 		dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
-	list_for_each_entry(device, &lcu->inactive_devices, alias_list)
+		spin_unlock(get_ccwdev_lock(device->cdev));
+	}
+	list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
+		spin_lock(get_ccwdev_lock(device->cdev));
 		dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
+		spin_unlock(get_ccwdev_lock(device->cdev));
+	}
 	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
-		list_for_each_entry(device, &pavgroup->baselist, alias_list)
+		list_for_each_entry(device, &pavgroup->baselist, alias_list) {
+			spin_lock(get_ccwdev_lock(device->cdev));
 			dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
-		list_for_each_entry(device, &pavgroup->aliaslist, alias_list)
+			spin_unlock(get_ccwdev_lock(device->cdev));
+		}
+		list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
+			spin_lock(get_ccwdev_lock(device->cdev));
 			dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
+			spin_unlock(get_ccwdev_lock(device->cdev));
+		}
 	}
 }
 
@@ -984,48 +881,32 @@
 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
 	reset_summary_unit_check(lcu, device, suc_data->reason);
 
-	_trylock_and_lock_lcu_irqsave(lcu, NULL, &flags);
+	spin_lock_irqsave(&lcu->lock, flags);
 	_unstop_all_devices_on_lcu(lcu);
 	_restart_all_base_devices_on_lcu(lcu);
 	/* 3. read new alias configuration */
 	_schedule_lcu_update(lcu, device);
 	lcu->suc_data.device = NULL;
 	dasd_put_device(device);
-	_unlock_all_devices_on_lcu(lcu, NULL, NULL);
 	spin_unlock_irqrestore(&lcu->lock, flags);
 }
 
-/*
- * note: this will be called from int handler context (cdev locked)
- */
-void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
-					  struct irb *irb)
+void dasd_alias_handle_summary_unit_check(struct work_struct *work)
 {
+	struct dasd_device *device = container_of(work, struct dasd_device,
+						  suc_work);
 	struct dasd_eckd_private *private = device->private;
 	struct alias_lcu *lcu;
-	char reason;
-	char *sense;
-
-	sense = dasd_get_sense(irb);
-	if (sense) {
-		reason = sense[8];
-		DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
-			    "eckd handle summary unit check: reason", reason);
-	} else {
-		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
-			    "eckd handle summary unit check:"
-			    " no reason code available");
-		return;
-	}
+	unsigned long flags;
 
 	lcu = private->lcu;
 	if (!lcu) {
 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
 			    "device not ready to handle summary"
 			    " unit check (no lcu structure)");
-		return;
+		goto out;
 	}
-	_trylock_and_lock_lcu(lcu, device);
+	spin_lock_irqsave(&lcu->lock, flags);
 	/* If this device is about to be removed just return and wait for
 	 * the next interrupt on a different device
 	 */
@@ -1033,27 +914,26 @@
 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
 			    "device is in offline processing,"
 			    " don't do summary unit check handling");
-		_unlock_all_devices_on_lcu(lcu, device, NULL);
-		spin_unlock(&lcu->lock);
-		return;
+		goto out_unlock;
 	}
 	if (lcu->suc_data.device) {
 		/* already scheduled or running */
 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
 			    "previous instance of summary unit check worker"
 			    " still pending");
-		_unlock_all_devices_on_lcu(lcu, device, NULL);
-		spin_unlock(&lcu->lock);
-		return ;
+		goto out_unlock;
 	}
 	_stop_all_devices_on_lcu(lcu);
 	/* prepare for lcu_update */
-	private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
-	lcu->suc_data.reason = reason;
+	lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
+	lcu->suc_data.reason = private->suc_reason;
 	lcu->suc_data.device = device;
 	dasd_get_device(device);
-	_unlock_all_devices_on_lcu(lcu, device, NULL);
-	spin_unlock(&lcu->lock);
 	if (!schedule_work(&lcu->suc_data.worker))
 		dasd_put_device(device);
+out_unlock:
+	spin_unlock_irqrestore(&lcu->lock, flags);
+out:
+	clear_bit(DASD_FLAG_SUC, &device->flags);
+	dasd_put_device(device);
 };
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 2f18f61..3cdbce4 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -982,6 +982,32 @@
 static DEVICE_ATTR(safe_offline, 0200, NULL, dasd_safe_offline_store);
 
 static ssize_t
+dasd_access_show(struct device *dev, struct device_attribute *attr,
+		 char *buf)
+{
+	struct ccw_device *cdev = to_ccwdev(dev);
+	struct dasd_device *device;
+	int count;
+
+	device = dasd_device_from_cdev(cdev);
+	if (IS_ERR(device))
+		return PTR_ERR(device);
+
+	if (device->discipline->host_access_count)
+		count = device->discipline->host_access_count(device);
+	else
+		count = -EOPNOTSUPP;
+
+	dasd_put_device(device);
+	if (count < 0)
+		return count;
+
+	return sprintf(buf, "%d\n", count);
+}
+
+static DEVICE_ATTR(host_access_count, 0444, dasd_access_show, NULL);
+
+static ssize_t
 dasd_discipline_show(struct device *dev, struct device_attribute *attr,
 		     char *buf)
 {
@@ -1471,6 +1497,7 @@
 	&dev_attr_reservation_policy.attr,
 	&dev_attr_last_known_reservation_state.attr,
 	&dev_attr_safe_offline.attr,
+	&dev_attr_host_access_count.attr,
 	&dev_attr_path_masks.attr,
 	NULL,
 };
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 75c032d..42b34cd 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 #include <linux/compat.h>
 #include <linux/init.h>
+#include <linux/seq_file.h>
 
 #include <asm/css_chars.h>
 #include <asm/debug.h>
@@ -120,6 +121,11 @@
 	__u8 lpum;
 };
 
+static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
+			struct dasd_device *, struct dasd_device *,
+			unsigned int, int, unsigned int, unsigned int,
+			unsigned int, unsigned int);
+
 /* initial attempt at a probe function. this can be simplified once
  * the other detection code is gone */
 static int
@@ -256,10 +262,13 @@
 	case DASD_ECKD_CCW_READ_CKD_MT:
 	case DASD_ECKD_CCW_READ_KD:
 	case DASD_ECKD_CCW_READ_KD_MT:
-	case DASD_ECKD_CCW_READ_COUNT:
 		data->mask.perm = 0x1;
 		data->attributes.operation = private->attrib.operation;
 		break;
+	case DASD_ECKD_CCW_READ_COUNT:
+		data->mask.perm = 0x1;
+		data->attributes.operation = DASD_BYPASS_CACHE;
+		break;
 	case DASD_ECKD_CCW_WRITE:
 	case DASD_ECKD_CCW_WRITE_MT:
 	case DASD_ECKD_CCW_WRITE_KD:
@@ -528,10 +537,13 @@
 	case DASD_ECKD_CCW_READ_CKD_MT:
 	case DASD_ECKD_CCW_READ_KD:
 	case DASD_ECKD_CCW_READ_KD_MT:
-	case DASD_ECKD_CCW_READ_COUNT:
 		dedata->mask.perm = 0x1;
 		dedata->attributes.operation = basepriv->attrib.operation;
 		break;
+	case DASD_ECKD_CCW_READ_COUNT:
+		dedata->mask.perm = 0x1;
+		dedata->attributes.operation = DASD_BYPASS_CACHE;
+		break;
 	case DASD_ECKD_CCW_READ_TRACK:
 	case DASD_ECKD_CCW_READ_TRACK_DATA:
 		dedata->mask.perm = 0x1;
@@ -1682,6 +1694,8 @@
 
 	/* setup work queue for validate server*/
 	INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
+	/* setup work queue for summary unit check */
+	INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check);
 
 	if (!ccw_device_is_pathgroup(device->cdev)) {
 		dev_warn(&device->cdev->dev,
@@ -2093,6 +2107,180 @@
 	return 0;
 }
 
+/*
+ * Build the TCW request for the format check
+ */
+static struct dasd_ccw_req *
+dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
+			  int enable_pav, struct eckd_count *fmt_buffer,
+			  int rpt)
+{
+	struct dasd_eckd_private *start_priv;
+	struct dasd_device *startdev = NULL;
+	struct tidaw *last_tidaw = NULL;
+	struct dasd_ccw_req *cqr;
+	struct itcw *itcw;
+	int itcw_size;
+	int count;
+	int rc;
+	int i;
+
+	if (enable_pav)
+		startdev = dasd_alias_get_start_dev(base);
+
+	if (!startdev)
+		startdev = base;
+
+	start_priv = startdev->private;
+
+	count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
+
+	/*
+	 * we're adding 'count' amount of tidaw to the itcw.
+	 * calculate the corresponding itcw_size
+	 */
+	itcw_size = itcw_calc_size(0, count, 0);
+
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
+	if (IS_ERR(cqr))
+		return cqr;
+
+	start_priv->count++;
+
+	itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0);
+	if (IS_ERR(itcw)) {
+		rc = -EINVAL;
+		goto out_err;
+	}
+
+	cqr->cpaddr = itcw_get_tcw(itcw);
+	rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit,
+			  DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count,
+			  sizeof(struct eckd_count),
+			  count * sizeof(struct eckd_count), 0, rpt);
+	if (rc)
+		goto out_err;
+
+	for (i = 0; i < count; i++) {
+		last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++,
+					    sizeof(struct eckd_count));
+		if (IS_ERR(last_tidaw)) {
+			rc = -EINVAL;
+			goto out_err;
+		}
+	}
+
+	last_tidaw->flags |= TIDAW_FLAGS_LAST;
+	itcw_finalize(itcw);
+
+	cqr->cpmode = 1;
+	cqr->startdev = startdev;
+	cqr->memdev = startdev;
+	cqr->basedev = base;
+	cqr->retries = startdev->default_retries;
+	cqr->expires = startdev->default_expires * HZ;
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+	/* Set flags to suppress output for expected errors */
+	set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
+	set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
+
+	return cqr;
+
+out_err:
+	dasd_sfree_request(cqr, startdev);
+
+	return ERR_PTR(rc);
+}
+
+/*
+ * Build the CCW request for the format check
+ */
+static struct dasd_ccw_req *
+dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
+		      int enable_pav, struct eckd_count *fmt_buffer, int rpt)
+{
+	struct dasd_eckd_private *start_priv;
+	struct dasd_eckd_private *base_priv;
+	struct dasd_device *startdev = NULL;
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+	void *data;
+	int cplength, datasize;
+	int use_prefix;
+	int count;
+	int i;
+
+	if (enable_pav)
+		startdev = dasd_alias_get_start_dev(base);
+
+	if (!startdev)
+		startdev = base;
+
+	start_priv = startdev->private;
+	base_priv = base->private;
+
+	count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
+
+	use_prefix = base_priv->features.feature[8] & 0x01;
+
+	if (use_prefix) {
+		cplength = 1;
+		datasize = sizeof(struct PFX_eckd_data);
+	} else {
+		cplength = 2;
+		datasize = sizeof(struct DE_eckd_data) +
+			sizeof(struct LO_eckd_data);
+	}
+	cplength += count;
+
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
+				  startdev);
+	if (IS_ERR(cqr))
+		return cqr;
+
+	start_priv->count++;
+	data = cqr->data;
+	ccw = cqr->cpaddr;
+
+	if (use_prefix) {
+		prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit,
+			   DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0,
+			   count, 0, 0);
+	} else {
+		define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit,
+			      DASD_ECKD_CCW_READ_COUNT, startdev);
+
+		data += sizeof(struct DE_eckd_data);
+		ccw[-1].flags |= CCW_FLAG_CC;
+
+		locate_record(ccw++, data, fdata->start_unit, 0, count,
+			      DASD_ECKD_CCW_READ_COUNT, base, 0);
+	}
+
+	for (i = 0; i < count; i++) {
+		ccw[-1].flags |= CCW_FLAG_CC;
+		ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
+		ccw->flags = CCW_FLAG_SLI;
+		ccw->count = 8;
+		ccw->cda = (__u32)(addr_t) fmt_buffer;
+		ccw++;
+		fmt_buffer++;
+	}
+
+	cqr->startdev = startdev;
+	cqr->memdev = startdev;
+	cqr->basedev = base;
+	cqr->retries = DASD_RETRIES;
+	cqr->expires = startdev->default_expires * HZ;
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+	/* Set flags to suppress output for expected errors */
+	set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
+
+	return cqr;
+}
+
 static struct dasd_ccw_req *
 dasd_eckd_build_format(struct dasd_device *base,
 		       struct format_data_t *fdata,
@@ -2360,9 +2548,24 @@
  */
 static struct dasd_ccw_req *
 dasd_eckd_format_build_ccw_req(struct dasd_device *base,
-			       struct format_data_t *fdata, int enable_pav)
+			       struct format_data_t *fdata, int enable_pav,
+			       int tpm, struct eckd_count *fmt_buffer, int rpt)
 {
-	return dasd_eckd_build_format(base, fdata, enable_pav);
+	struct dasd_ccw_req *ccw_req;
+
+	if (!fmt_buffer) {
+		ccw_req = dasd_eckd_build_format(base, fdata, enable_pav);
+	} else {
+		if (tpm)
+			ccw_req = dasd_eckd_build_check_tcw(base, fdata,
+							    enable_pav,
+							    fmt_buffer, rpt);
+		else
+			ccw_req = dasd_eckd_build_check(base, fdata, enable_pav,
+							fmt_buffer, rpt);
+	}
+
+	return ccw_req;
 }
 
 /*
@@ -2407,12 +2610,15 @@
  */
 static int dasd_eckd_format_process_data(struct dasd_device *base,
 					 struct format_data_t *fdata,
-					 int enable_pav)
+					 int enable_pav, int tpm,
+					 struct eckd_count *fmt_buffer, int rpt,
+					 struct irb *irb)
 {
 	struct dasd_eckd_private *private = base->private;
 	struct dasd_ccw_req *cqr, *n;
 	struct list_head format_queue;
 	struct dasd_device *device;
+	char *sense = NULL;
 	int old_start, old_stop, format_step;
 	int step, retry;
 	int rc;
@@ -2426,8 +2632,18 @@
 	old_start = fdata->start_unit;
 	old_stop = fdata->stop_unit;
 
-	format_step = DASD_CQR_MAX_CCW / recs_per_track(&private->rdc_data, 0,
-							fdata->blksize);
+	if (!tpm && fmt_buffer != NULL) {
+		/* Command Mode / Format Check */
+		format_step = 1;
+	} else if (tpm && fmt_buffer != NULL) {
+		/* Transport Mode / Format Check */
+		format_step = DASD_CQR_MAX_CCW / rpt;
+	} else {
+		/* Normal Formatting */
+		format_step = DASD_CQR_MAX_CCW /
+			recs_per_track(&private->rdc_data, 0, fdata->blksize);
+	}
+
 	do {
 		retry = 0;
 		while (fdata->start_unit <= old_stop) {
@@ -2438,7 +2654,8 @@
 			}
 
 			cqr = dasd_eckd_format_build_ccw_req(base, fdata,
-							     enable_pav);
+							     enable_pav, tpm,
+							     fmt_buffer, rpt);
 			if (IS_ERR(cqr)) {
 				rc = PTR_ERR(cqr);
 				if (rc == -ENOMEM) {
@@ -2456,6 +2673,10 @@
 			}
 			list_add_tail(&cqr->blocklist, &format_queue);
 
+			if (fmt_buffer) {
+				step = fdata->stop_unit - fdata->start_unit + 1;
+				fmt_buffer += rpt * step;
+			}
 			fdata->start_unit = fdata->stop_unit + 1;
 			fdata->stop_unit = old_stop;
 		}
@@ -2466,15 +2687,41 @@
 		list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
 			device = cqr->startdev;
 			private = device->private;
-			if (cqr->status == DASD_CQR_FAILED)
+
+			if (cqr->status == DASD_CQR_FAILED) {
+				/*
+				 * Only get sense data if called by format
+				 * check
+				 */
+				if (fmt_buffer && irb) {
+					sense = dasd_get_sense(&cqr->irb);
+					memcpy(irb, &cqr->irb, sizeof(*irb));
+				}
 				rc = -EIO;
+			}
 			list_del_init(&cqr->blocklist);
 			dasd_sfree_request(cqr, device);
 			private->count--;
 		}
 
-		if (rc)
+		if (rc && rc != -EIO)
 			goto out;
+		if (rc == -EIO) {
+			/*
+			 * In case fewer than the expected records are on the
+			 * track, we will most likely get a 'No Record Found'
+			 * error (in command mode) or a 'File Protected' error
+			 * (in transport mode). Those particular cases shouldn't
+			 * pass the -EIO to the IOCTL, therefore reset the rc
+			 * and continue.
+			 */
+			if (sense &&
+			    (sense[1] & SNS1_NO_REC_FOUND ||
+			     sense[1] & SNS1_FILE_PROTECTED))
+				retry = 1;
+			else
+				goto out;
+		}
 
 	} while (retry);
 
@@ -2488,7 +2735,225 @@
 static int dasd_eckd_format_device(struct dasd_device *base,
 				   struct format_data_t *fdata, int enable_pav)
 {
-	return dasd_eckd_format_process_data(base, fdata, enable_pav);
+	return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL,
+					     0, NULL);
+}
+
+/*
+ * Helper function to count consecutive records of a single track.
+ */
+static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start,
+				   int max)
+{
+	int head;
+	int i;
+
+	head = fmt_buffer[start].head;
+
+	/*
+	 * There are 3 conditions where we stop counting:
+	 * - if data reoccurs (same head and record may reoccur), which may
+	 *   happen due to the way DASD_ECKD_CCW_READ_COUNT works
+	 * - when the head changes, because we're iterating over several tracks
+	 *   then (DASD_ECKD_CCW_READ_COUNT_MT)
+	 * - when we've reached the end of sensible data in the buffer (the
+	 *   record will be 0 then)
+	 */
+	for (i = start; i < max; i++) {
+		if (i > start) {
+			if ((fmt_buffer[i].head == head &&
+			    fmt_buffer[i].record == 1) ||
+			    fmt_buffer[i].head != head ||
+			    fmt_buffer[i].record == 0)
+				break;
+		}
+	}
+
+	return i - start;
+}
+
+/*
+ * Evaluate a given range of tracks. Data like number of records, blocksize,
+ * record ids, and key length are compared with expected data.
+ *
+ * If a mismatch occurs, the corresponding error bit is set, as well as
+ * additional information, depending on the error.
+ */
+static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer,
+					     struct format_check_t *cdata,
+					     int rpt_max, int rpt_exp,
+					     int trk_per_cyl, int tpm)
+{
+	struct ch_t geo;
+	int max_entries;
+	int count = 0;
+	int trkcount;
+	int blksize;
+	int pos = 0;
+	int i, j;
+	int kl;
+
+	trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
+	max_entries = trkcount * rpt_max;
+
+	for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) {
+		/* Calculate the correct next starting position in the buffer */
+		if (tpm) {
+			while (fmt_buffer[pos].record == 0 &&
+			       fmt_buffer[pos].dl == 0) {
+				if (pos++ > max_entries)
+					break;
+			}
+		} else {
+			if (i != cdata->expect.start_unit)
+				pos += rpt_max - count;
+		}
+
+		/* Calculate the expected geo values for the current track */
+		set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl);
+
+		/* Count and check number of records */
+		count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max);
+
+		if (count < rpt_exp) {
+			cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS;
+			break;
+		}
+		if (count > rpt_exp) {
+			cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS;
+			break;
+		}
+
+		for (j = 0; j < count; j++, pos++) {
+			blksize = cdata->expect.blksize;
+			kl = 0;
+
+			/*
+			 * Set special values when checking CDL formatted
+			 * devices.
+			 */
+			if ((cdata->expect.intensity & 0x08) &&
+			    geo.cyl == 0 && geo.head == 0) {
+				if (j < 3) {
+					blksize = sizes_trk0[j] - 4;
+					kl = 4;
+				}
+			}
+			if ((cdata->expect.intensity & 0x08) &&
+			    geo.cyl == 0 && geo.head == 1) {
+				blksize = LABEL_SIZE - 44;
+				kl = 44;
+			}
+
+			/* Check blocksize */
+			if (fmt_buffer[pos].dl != blksize) {
+				cdata->result = DASD_FMT_ERR_BLKSIZE;
+				goto out;
+			}
+			/* Check if key length is 0 */
+			if (fmt_buffer[pos].kl != kl) {
+				cdata->result = DASD_FMT_ERR_KEY_LENGTH;
+				goto out;
+			}
+			/* Check if record_id is correct */
+			if (fmt_buffer[pos].cyl != geo.cyl ||
+			    fmt_buffer[pos].head != geo.head ||
+			    fmt_buffer[pos].record != (j + 1)) {
+				cdata->result = DASD_FMT_ERR_RECORD_ID;
+				goto out;
+			}
+		}
+	}
+
+out:
+	/*
+	 * In case of no errors, we need to decrease by one
+	 * to get the correct positions.
+	 */
+	if (!cdata->result) {
+		i--;
+		pos--;
+	}
+
+	cdata->unit = i;
+	cdata->num_records = count;
+	cdata->rec = fmt_buffer[pos].record;
+	cdata->blksize = fmt_buffer[pos].dl;
+	cdata->key_length = fmt_buffer[pos].kl;
+}
+
+/*
+ * Check the format of a range of tracks of a DASD.
+ */
+static int dasd_eckd_check_device_format(struct dasd_device *base,
+					 struct format_check_t *cdata,
+					 int enable_pav)
+{
+	struct dasd_eckd_private *private = base->private;
+	struct eckd_count *fmt_buffer;
+	struct irb irb;
+	int rpt_max, rpt_exp;
+	int fmt_buffer_size;
+	int trk_per_cyl;
+	int trkcount;
+	int tpm = 0;
+	int rc;
+
+	trk_per_cyl = private->rdc_data.trk_per_cyl;
+
+	/* Get maximum and expected amount of records per track */
+	rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1;
+	rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize);
+
+	trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
+	fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count);
+
+	fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA);
+	if (!fmt_buffer)
+		return -ENOMEM;
+
+	/*
+	 * A certain FICON feature subset is needed to operate in transport
+	 * mode. Additionally, the support for transport mode is implicitly
+	 * checked by comparing the buffer size with fcx_max_data. As long as
+	 * the buffer size is smaller we can operate in transport mode and
+	 * process multiple tracks. If not, only one track at once is being
+	 * processed using command mode.
+	 */
+	if ((private->features.feature[40] & 0x04) &&
+	    fmt_buffer_size <= private->fcx_max_data)
+		tpm = 1;
+
+	rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav,
+					   tpm, fmt_buffer, rpt_max, &irb);
+	if (rc && rc != -EIO)
+		goto out;
+	if (rc == -EIO) {
+		/*
+		 * If our first attempt with transport mode enabled comes back
+		 * with an incorrect length error, we're going to retry the
+		 * check with command mode.
+		 */
+		if (tpm && scsw_cstat(&irb.scsw) == 0x40) {
+			tpm = 0;
+			rc = dasd_eckd_format_process_data(base, &cdata->expect,
+							   enable_pav, tpm,
+							   fmt_buffer, rpt_max,
+							   &irb);
+			if (rc)
+				goto out;
+		} else {
+			goto out;
+		}
+	}
+
+	dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp,
+					 trk_per_cyl, tpm);
+
+out:
+	kfree(fmt_buffer);
+
+	return rc;
 }
 
 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
@@ -2549,14 +3014,6 @@
 		    device->state == DASD_STATE_ONLINE &&
 		    !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
 		    !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
-			/*
-			 * the state change could be caused by an alias
-			 * reassignment remove device from alias handling
-			 * to prevent new requests from being scheduled on
-			 * the wrong alias device
-			 */
-			dasd_alias_remove_device(device);
-
 			/* schedule worker to reload device */
 			dasd_reload_device(device);
 		}
@@ -2571,7 +3028,27 @@
 	/* summary unit check */
 	if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
 	    (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
-		dasd_alias_handle_summary_unit_check(device, irb);
+		if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) {
+			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+				      "eckd suc: device already notified");
+			return;
+		}
+		sense = dasd_get_sense(irb);
+		if (!sense) {
+			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+				      "eckd suc: no reason code available");
+			clear_bit(DASD_FLAG_SUC, &device->flags);
+			return;
+
+		}
+		private->suc_reason = sense[8];
+		DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
+			      "eckd handle summary unit check: reason",
+			      private->suc_reason);
+		dasd_get_device(device);
+		if (!schedule_work(&device->suc_work))
+			dasd_put_device(device);
+
 		return;
 	}
 
@@ -3023,6 +3500,16 @@
 		lredata->auxiliary.check_bytes = 0x2;
 		pfx_cmd = DASD_ECKD_CCW_PFX;
 		break;
+	case DASD_ECKD_CCW_READ_COUNT_MT:
+		dedata->mask.perm = 0x1;
+		dedata->attributes.operation = DASD_BYPASS_CACHE;
+		dedata->ga_extended |= 0x42;
+		dedata->blk_size = blksize;
+		lredata->operation.orientation = 0x2;
+		lredata->operation.operation = 0x16;
+		lredata->auxiliary.check_bytes = 0x01;
+		pfx_cmd = DASD_ECKD_CCW_PFX_READ;
+		break;
 	default:
 		DBF_DEV_EVENT(DBF_ERR, basedev,
 			      "prepare itcw, unknown opcode 0x%x", cmd);
@@ -3070,13 +3557,19 @@
 		}
 	}
 
-	lredata->auxiliary.length_valid = 1;
-	lredata->auxiliary.length_scope = 1;
+	if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) {
+		lredata->auxiliary.length_valid = 0;
+		lredata->auxiliary.length_scope = 0;
+		lredata->sector = 0xff;
+	} else {
+		lredata->auxiliary.length_valid = 1;
+		lredata->auxiliary.length_scope = 1;
+		lredata->sector = sector;
+	}
 	lredata->auxiliary.imbedded_ccw_valid = 1;
 	lredata->length = tlf;
 	lredata->imbedded_ccw = cmd;
 	lredata->count = count;
-	lredata->sector = sector;
 	set_ch_t(&lredata->seek_addr, begcyl, beghead);
 	lredata->search_arg.cyl = lredata->seek_addr.cyl;
 	lredata->search_arg.head = lredata->seek_addr.head;
@@ -4398,10 +4891,34 @@
 static void dasd_eckd_dump_sense(struct dasd_device *device,
 				 struct dasd_ccw_req *req, struct irb *irb)
 {
-	if (scsw_is_tm(&irb->scsw))
+	u8 *sense = dasd_get_sense(irb);
+
+	if (scsw_is_tm(&irb->scsw)) {
+		/*
+		 * In some cases the 'File Protected' or 'Incorrect Length'
+		 * error might be expected and log messages shouldn't be written
+		 * then. Check if the according suppress bit is set.
+		 */
+		if (sense && (sense[1] & SNS1_FILE_PROTECTED) &&
+		    test_bit(DASD_CQR_SUPPRESS_FP, &req->flags))
+			return;
+		if (scsw_cstat(&irb->scsw) == 0x40 &&
+		    test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
+			return;
+
 		dasd_eckd_dump_sense_tcw(device, req, irb);
-	else
+	} else {
+		/*
+		 * In some cases the 'No Record Found' error might be expected
+		 * and log messages shouldn't be written then. Check if the
+		 * according suppress bit is set.
+		 */
+		if (sense && sense[1] & SNS1_NO_REC_FOUND &&
+		    test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
+			return;
+
 		dasd_eckd_dump_sense_ccw(device, req, irb);
+	}
 }
 
 static int dasd_eckd_pm_freeze(struct dasd_device *device)
@@ -4495,6 +5012,12 @@
 	struct dasd_uid uid;
 	unsigned long flags;
 
+	/*
+	 * remove device from alias handling to prevent new requests
+	 * from being scheduled on the wrong alias device
+	 */
+	dasd_alias_remove_device(device);
+
 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
 	old_base = private->uid.base_unit_addr;
 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
@@ -4607,6 +5130,167 @@
 	return rc;
 }
 
+static int dasd_eckd_query_host_access(struct dasd_device *device,
+				       struct dasd_psf_query_host_access *data)
+{
+	struct dasd_eckd_private *private = device->private;
+	struct dasd_psf_query_host_access *host_access;
+	struct dasd_psf_prssd_data *prssdp;
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+	int rc;
+
+	/* not available for HYPER PAV alias devices */
+	if (!device->block && private->lcu->pav == HYPER_PAV)
+		return -EOPNOTSUPP;
+
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */	+ 1 /* RSSD */,
+				   sizeof(struct dasd_psf_prssd_data) + 1,
+				   device);
+	if (IS_ERR(cqr)) {
+		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+				"Could not allocate read message buffer request");
+		return PTR_ERR(cqr);
+	}
+	host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA);
+	if (!host_access) {
+		dasd_sfree_request(cqr, device);
+		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+				"Could not allocate host_access buffer");
+		return -ENOMEM;
+	}
+	cqr->startdev = device;
+	cqr->memdev = device;
+	cqr->block = NULL;
+	cqr->retries = 256;
+	cqr->expires = 10 * HZ;
+
+	/* Prepare for Read Subsystem Data */
+	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
+	prssdp->order = PSF_ORDER_PRSSD;
+	prssdp->suborder = PSF_SUBORDER_QHA;	/* query host access */
+	/* LSS and Volume that will be queried */
+	prssdp->lss = private->ned->ID;
+	prssdp->volume = private->ned->unit_addr;
+	/* all other bytes of prssdp must be zero */
+
+	ccw = cqr->cpaddr;
+	ccw->cmd_code = DASD_ECKD_CCW_PSF;
+	ccw->count = sizeof(struct dasd_psf_prssd_data);
+	ccw->flags |= CCW_FLAG_CC;
+	ccw->flags |= CCW_FLAG_SLI;
+	ccw->cda = (__u32)(addr_t) prssdp;
+
+	/* Read Subsystem Data - query host access */
+	ccw++;
+	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+	ccw->count = sizeof(struct dasd_psf_query_host_access);
+	ccw->flags |= CCW_FLAG_SLI;
+	ccw->cda = (__u32)(addr_t) host_access;
+
+	cqr->buildclk = get_tod_clock();
+	cqr->status = DASD_CQR_FILLED;
+	rc = dasd_sleep_on(cqr);
+	if (rc == 0) {
+		*data = *host_access;
+	} else {
+		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+				"Reading host access data failed with rc=%d\n",
+				rc);
+		rc = -EOPNOTSUPP;
+	}
+
+	dasd_sfree_request(cqr, cqr->memdev);
+	kfree(host_access);
+	return rc;
+}
+/*
+ * return number of grouped devices
+ */
+static int dasd_eckd_host_access_count(struct dasd_device *device)
+{
+	struct dasd_psf_query_host_access *access;
+	struct dasd_ckd_path_group_entry *entry;
+	struct dasd_ckd_host_information *info;
+	int count = 0;
+	int rc, i;
+
+	access = kzalloc(sizeof(*access), GFP_NOIO);
+	if (!access) {
+		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+				"Could not allocate access buffer");
+		return -ENOMEM;
+	}
+	rc = dasd_eckd_query_host_access(device, access);
+	if (rc) {
+		kfree(access);
+		return rc;
+	}
+
+	info = (struct dasd_ckd_host_information *)
+		access->host_access_information;
+	for (i = 0; i < info->entry_count; i++) {
+		entry = (struct dasd_ckd_path_group_entry *)
+			(info->entry + i * info->entry_size);
+		if (entry->status_flags & DASD_ECKD_PG_GROUPED)
+			count++;
+	}
+
+	kfree(access);
+	return count;
+}
+
+/*
+ * write host access information to a sequential file
+ */
+static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
+{
+	struct dasd_psf_query_host_access *access;
+	struct dasd_ckd_path_group_entry *entry;
+	struct dasd_ckd_host_information *info;
+	char sysplex[9] = "";
+	int rc, i, j;
+
+	access = kzalloc(sizeof(*access), GFP_NOIO);
+	if (!access) {
+		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+				"Could not allocate access buffer");
+		return -ENOMEM;
+	}
+	rc = dasd_eckd_query_host_access(device, access);
+	if (rc) {
+		kfree(access);
+		return rc;
+	}
+
+	info = (struct dasd_ckd_host_information *)
+		access->host_access_information;
+	for (i = 0; i < info->entry_count; i++) {
+		entry = (struct dasd_ckd_path_group_entry *)
+			(info->entry + i * info->entry_size);
+		/* PGID */
+		seq_puts(m, "pgid ");
+		for (j = 0; j < 11; j++)
+			seq_printf(m, "%02x", entry->pgid[j]);
+		seq_putc(m, '\n');
+		/* FLAGS */
+		seq_printf(m, "status_flags %02x\n", entry->status_flags);
+		/* SYSPLEX NAME */
+		memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1);
+		EBCASC(sysplex, sizeof(sysplex));
+		seq_printf(m, "sysplex_name %8s\n", sysplex);
+		/* SUPPORTED CYLINDER */
+		seq_printf(m, "supported_cylinder %d\n", entry->cylinder);
+		/* TIMESTAMP */
+		seq_printf(m, "timestamp %lu\n", (unsigned long)
+			   entry->timestamp);
+	}
+	kfree(access);
+
+	return 0;
+}
+
 /*
  * Perform Subsystem Function - CUIR response
  */
@@ -5064,6 +5748,7 @@
 	.term_IO = dasd_term_IO,
 	.handle_terminated_request = dasd_eckd_handle_terminated_request,
 	.format_device = dasd_eckd_format_device,
+	.check_device_format = dasd_eckd_check_device_format,
 	.erp_action = dasd_eckd_erp_action,
 	.erp_postaction = dasd_eckd_erp_postaction,
 	.check_for_device_change = dasd_eckd_check_for_device_change,
@@ -5079,6 +5764,8 @@
 	.get_uid = dasd_eckd_get_uid,
 	.kick_validate = dasd_eckd_kick_validate_server,
 	.check_attention = dasd_eckd_check_attention,
+	.host_access_count = dasd_eckd_host_access_count,
+	.hosts_print = dasd_hosts_print,
 };
 
 static int __init
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index f8f91ee..5980362 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -35,6 +35,7 @@
 #define DASD_ECKD_CCW_READ_MT		 0x86
 #define DASD_ECKD_CCW_WRITE_KD_MT	 0x8d
 #define DASD_ECKD_CCW_READ_KD_MT	 0x8e
+#define DASD_ECKD_CCW_READ_COUNT_MT	 0x92
 #define DASD_ECKD_CCW_RELEASE		 0x94
 #define DASD_ECKD_CCW_WRITE_FULL_TRACK	 0x95
 #define DASD_ECKD_CCW_READ_CKD_MT	 0x9e
@@ -53,6 +54,7 @@
  */
 #define PSF_ORDER_PRSSD			 0x18
 #define PSF_ORDER_CUIR_RESPONSE		 0x1A
+#define PSF_SUBORDER_QHA		 0x1C
 #define PSF_ORDER_SSC			 0x1D
 
 /*
@@ -81,6 +83,8 @@
 #define ATTENTION_LENGTH_CUIR		 0x0e
 #define ATTENTION_FORMAT_CUIR		 0x01
 
+#define DASD_ECKD_PG_GROUPED		 0x10
+
 /*
  * Size that is reportet for large volumes in the old 16-bit no_cyl field
  */
@@ -403,13 +407,41 @@
 	__u8 ssid;
 } __packed;
 
+struct dasd_ckd_path_group_entry {
+	__u8 status_flags;
+	__u8 pgid[11];
+	__u8 sysplex_name[8];
+	__u32 timestamp;
+	__u32 cylinder;
+	__u8 reserved[4];
+} __packed;
+
+struct dasd_ckd_host_information {
+	__u8 access_flags;
+	__u8 entry_size;
+	__u16 entry_count;
+	__u8 entry[16390];
+} __packed;
+
+struct dasd_psf_query_host_access {
+	__u8 access_flag;
+	__u8 version;
+	__u16 CKD_length;
+	__u16 SCSI_length;
+	__u8 unused[10];
+	__u8 host_access_information[16394];
+} __packed;
+
 /*
  * Perform Subsystem Function - Prepare for Read Subsystem Data
  */
 struct dasd_psf_prssd_data {
 	unsigned char order;
 	unsigned char flags;
-	unsigned char reserved[4];
+	unsigned char reserved1;
+	unsigned char reserved2;
+	unsigned char lss;
+	unsigned char volume;
 	unsigned char suborder;
 	unsigned char varies[5];
 } __attribute__ ((packed));
@@ -525,6 +557,7 @@
 	int count;
 
 	u32 fcx_max_data;
+	char suc_reason;
 };
 
 
@@ -534,7 +567,7 @@
 int dasd_alias_add_device(struct dasd_device *);
 int dasd_alias_remove_device(struct dasd_device *);
 struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *);
-void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *);
+void dasd_alias_handle_summary_unit_check(struct work_struct *);
 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
 void dasd_alias_lcu_setup_complete(struct dasd_device *);
 void dasd_alias_wait_for_lcu_setup(struct dasd_device *);
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 8de29be..ac7027e 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -236,6 +236,13 @@
 					 * stolen. Should not be combined with
 					 * DASD_CQR_FLAGS_USE_ERP
 					 */
+/*
+ * The following flags are used to suppress output of certain errors.
+ * These flags should only be used for format checks!
+ */
+#define DASD_CQR_SUPPRESS_NRF	4	/* Suppress 'No Record Found' error */
+#define DASD_CQR_SUPPRESS_FP	5	/* Suppress 'File Protected' error*/
+#define DASD_CQR_SUPPRESS_IL	6	/* Suppress 'Incorrect Length' error */
 
 /* Signature for error recovery functions. */
 typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
@@ -318,7 +325,8 @@
 	 * Device operation functions. build_cp creates a ccw chain for
 	 * a block device request, start_io starts the request and
 	 * term_IO cancels it (e.g. in case of a timeout). format_device
-	 * returns a ccw chain to be used to format the device.
+	 * formats the device and check_device_format compares the format of
+	 * a device with the expected format_data.
 	 * handle_terminated_request allows to examine a cqr and prepare
 	 * it for retry.
 	 */
@@ -329,7 +337,9 @@
 	int (*term_IO) (struct dasd_ccw_req *);
 	void (*handle_terminated_request) (struct dasd_ccw_req *);
 	int (*format_device) (struct dasd_device *,
-			      struct format_data_t *, int enable_pav);
+			      struct format_data_t *, int);
+	int (*check_device_format)(struct dasd_device *,
+				   struct format_check_t *, int);
 	int (*free_cp) (struct dasd_ccw_req *, struct request *);
 
 	/*
@@ -365,6 +375,8 @@
 	int (*get_uid) (struct dasd_device *, struct dasd_uid *);
 	void (*kick_validate) (struct dasd_device *);
 	int (*check_attention)(struct dasd_device *, __u8);
+	int (*host_access_count)(struct dasd_device *);
+	int (*hosts_print)(struct dasd_device *, struct seq_file *);
 };
 
 extern struct dasd_discipline *dasd_diag_discipline_pointer;
@@ -470,6 +482,7 @@
 	struct work_struct restore_device;
 	struct work_struct reload_device;
 	struct work_struct kick_validate;
+	struct work_struct suc_work;
 	struct timer_list timer;
 
 	debug_info_t *debug_area;
@@ -486,6 +499,7 @@
 	unsigned long blk_timeout;
 
 	struct dentry *debugfs_dentry;
+	struct dentry *hosts_dentry;
 	struct dasd_profile profile;
 };
 
@@ -542,6 +556,7 @@
 #define DASD_FLAG_SAFE_OFFLINE_RUNNING	11	/* safe offline running */
 #define DASD_FLAG_ABORTALL	12	/* Abort all noretry requests */
 #define DASD_FLAG_PATH_VERIFY	13	/* Path verification worker running */
+#define DASD_FLAG_SUC		14	/* unhandled summary unit check */
 
 #define DASD_SLEEPON_START_TAG	((void *) 1)
 #define DASD_SLEEPON_END_TAG	((void *) 2)
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 90f30cc..9dfbd97 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -238,6 +238,23 @@
 	return rc;
 }
 
+static int dasd_check_format(struct dasd_block *block,
+			     struct format_check_t *cdata)
+{
+	struct dasd_device *base;
+	int rc;
+
+	base = block->base;
+	if (!base->discipline->check_device_format)
+		return -ENOTTY;
+
+	rc = base->discipline->check_device_format(base, cdata, 1);
+	if (rc == -EAGAIN)
+		rc = base->discipline->check_device_format(base, cdata, 0);
+
+	return rc;
+}
+
 /*
  * Format device.
  */
@@ -272,6 +289,47 @@
 	}
 	rc = dasd_format(base->block, &fdata);
 	dasd_put_device(base);
+
+	return rc;
+}
+
+/*
+ * Check device format
+ */
+static int dasd_ioctl_check_format(struct block_device *bdev, void __user *argp)
+{
+	struct format_check_t cdata;
+	struct dasd_device *base;
+	int rc = 0;
+
+	if (!argp)
+		return -EINVAL;
+
+	base = dasd_device_from_gendisk(bdev->bd_disk);
+	if (!base)
+		return -ENODEV;
+	if (bdev != bdev->bd_contains) {
+		pr_warn("%s: The specified DASD is a partition and cannot be checked\n",
+			dev_name(&base->cdev->dev));
+		rc = -EINVAL;
+		goto out_err;
+	}
+
+	if (copy_from_user(&cdata, argp, sizeof(cdata))) {
+		rc = -EFAULT;
+		goto out_err;
+	}
+
+	rc = dasd_check_format(base->block, &cdata);
+	if (rc)
+		goto out_err;
+
+	if (copy_to_user(argp, &cdata, sizeof(cdata)))
+		rc = -EFAULT;
+
+out_err:
+	dasd_put_device(base);
+
 	return rc;
 }
 
@@ -519,6 +577,9 @@
 	case BIODASDFMT:
 		rc = dasd_ioctl_format(bdev, argp);
 		break;
+	case BIODASDCHECKFMT:
+		rc = dasd_ioctl_check_format(bdev, argp);
+		break;
 	case BIODASDINFO:
 		rc = dasd_ioctl_information(block, cmd, argp);
 		break;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 1bce9cf..b839086 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -756,15 +756,16 @@
 	blk_cleanup_queue(dev_info->dcssblk_queue);
 	dev_info->gd->queue = NULL;
 	put_disk(dev_info->gd);
-	device_unregister(&dev_info->dev);
 
 	/* unload all related segments */
 	list_for_each_entry(entry, &dev_info->seg_list, lh)
 		segment_unload(entry->segment_name);
 
-	put_device(&dev_info->dev);
 	up_write(&dcssblk_devices_sem);
 
+	device_unregister(&dev_info->dev);
+	put_device(&dev_info->dev);
+
 	rc = count;
 out_buf:
 	kfree(local_buf);
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 75d9896..e6f54d3 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -303,7 +303,7 @@
 		if (req->cmd_type != REQ_TYPE_FS) {
 			blk_start_request(req);
 			blk_dump_rq_flags(req, KMSG_COMPONENT " bad request");
-			blk_end_request_all(req, -EIO);
+			__blk_end_request_all(req, -EIO);
 			continue;
 		}
 
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index dd2f7c8..41e28b2 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -18,6 +18,8 @@
 obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
 obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o
 
+obj-$(CONFIG_PCI) += sclp_pci.o
+
 obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
 obj-$(CONFIG_VMCP) += vmcp.o
 
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 4d7a9bad..6b1577c 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -400,7 +400,7 @@
 	del_timer(&cp->timer);
 }
 
-static int
+static void
 con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
 {
 	/* Handle ATTN. Schedule tasklet to read aid. */
@@ -418,7 +418,6 @@
 		cp->update_flags = CON_UPDATE_ALL;
 		con3270_set_timer(cp, 1);
 	}
-	return RAW3270_IO_DONE;
 }
 
 /* Console view to a 3270 device. */
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 71e9747..85eca1c 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -217,7 +217,7 @@
 		fp->init->callback(fp->init, NULL);
 }
 
-static int
+static void
 fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb)
 {
 	/* Handle ATTN. Set indication and wake waiters for attention. */
@@ -233,7 +233,6 @@
 			/* Normal end. Copy residual count. */
 			rq->rescnt = irb->scsw.cmd.count;
 	}
-	return RAW3270_IO_DONE;
 }
 
 /*
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 220acb4..a2da898 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -90,6 +90,8 @@
  */
 DECLARE_WAIT_QUEUE_HEAD(raw3270_wait_queue);
 
+static void __raw3270_disconnect(struct raw3270 *rp);
+
 /*
  * Encode array for 12 bit 3270 addresses.
  */
@@ -229,29 +231,6 @@
 }
 
 /*
- * Stop running ccw.
- */
-static int
-__raw3270_halt_io(struct raw3270 *rp, struct raw3270_request *rq)
-{
-	int retries;
-	int rc;
-
-	if (raw3270_request_final(rq))
-		return 0;
-	/* Check if interrupt has already been processed */
-	for (retries = 0; retries < 5; retries++) {
-		if (retries < 2)
-			rc = ccw_device_halt(rp->cdev, (long) rq);
-		else
-			rc = ccw_device_clear(rp->cdev, (long) rq);
-		if (rc == 0)
-			break;		/* termination successful */
-	}
-	return rc;
-}
-
-/*
  * Add the request to the request queue, try to start it if the
  * 3270 device is idle. Return without waiting for end of i/o.
  */
@@ -342,7 +321,6 @@
 	struct raw3270 *rp;
 	struct raw3270_view *view;
 	struct raw3270_request *rq;
-	int rc;
 
 	rp = dev_get_drvdata(&cdev->dev);
 	if (!rp)
@@ -350,57 +328,31 @@
 	rq = (struct raw3270_request *) intparm;
 	view = rq ? rq->view : rp->view;
 
-	if (IS_ERR(irb))
-		rc = RAW3270_IO_RETRY;
-	else if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
-		rq->rc = -EIO;
-		rc = RAW3270_IO_DONE;
-	} else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END |
-					   DEV_STAT_UNIT_EXCEP)) {
+	if (!IS_ERR(irb)) {
 		/* Handle CE-DE-UE and subsequent UDE */
-		set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
-		rc = RAW3270_IO_BUSY;
-	} else if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) {
-		/* Wait for UDE if busy flag is set. */
-		if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
+		if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END)
 			clear_bit(RAW3270_FLAGS_BUSY, &rp->flags);
-			/* Got it, now retry. */
-			rc = RAW3270_IO_RETRY;
-		} else
-			rc = RAW3270_IO_BUSY;
-	} else if (view)
-		rc = view->fn->intv(view, rq, irb);
-	else
-		rc = RAW3270_IO_DONE;
-
-	switch (rc) {
-	case RAW3270_IO_DONE:
-		break;
-	case RAW3270_IO_BUSY:
-		/* 
-		 * Intervention required by the operator. We have to wait
-		 * for unsolicited device end.
-		 */
-		return;
-	case RAW3270_IO_RETRY:
-		if (!rq)
-			break;
-		rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
-					  (unsigned long) rq, 0, 0);
-		if (rq->rc == 0)
-			return;	/* Successfully restarted. */
-		break;
-	case RAW3270_IO_STOP:
-		if (!rq)
-			break;
-		__raw3270_halt_io(rp, rq);
-		rq->rc = -EIO;
-		break;
-	default:
-		BUG();
+		if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END |
+					    DEV_STAT_DEV_END |
+					    DEV_STAT_UNIT_EXCEP))
+			set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
+		/* Handle disconnected devices */
+		if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
+		    (irb->ecw[0] & SNS0_INTERVENTION_REQ)) {
+			set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
+			if (rp->state > RAW3270_STATE_RESET)
+				__raw3270_disconnect(rp);
+		}
+		/* Call interrupt handler of the view */
+		if (view)
+			view->fn->intv(view, rq, irb);
 	}
-	if (rq) {
-		BUG_ON(list_empty(&rq->list));
+
+	if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags))
+		/* Device busy, do not start I/O */
+		return;
+
+	if (rq && !list_empty(&rq->list)) {
 		/* The request completed, remove from queue and do callback. */
 		list_del_init(&rq->list);
 		if (rq->callback)
@@ -408,6 +360,7 @@
 		/* Do put_device for get_device in raw3270_start. */
 		raw3270_put_view(view);
 	}
+
 	/*
 	 * Try to start each request on request queue until one is
 	 * started successful.
@@ -685,23 +638,34 @@
 	return rc;
 }
 
-static int
+static void
+__raw3270_disconnect(struct raw3270 *rp)
+{
+	struct raw3270_request *rq;
+	struct raw3270_view *view;
+
+	rp->state = RAW3270_STATE_INIT;
+	rp->view = &rp->init_view;
+	/* Cancel all queued requests */
+	while (!list_empty(&rp->req_queue)) {
+		rq = list_entry(rp->req_queue.next,struct raw3270_request,list);
+		view = rq->view;
+		rq->rc = -EACCES;
+		list_del_init(&rq->list);
+		if (rq->callback)
+			rq->callback(rq, rq->callback_data);
+		raw3270_put_view(view);
+	}
+	/* Start from scratch */
+	__raw3270_reset_device(rp);
+}
+
+static void
 raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
 		 struct irb *irb)
 {
 	struct raw3270 *rp;
 
-	/*
-	 * Unit-Check Processing:
-	 * Expect Command Reject or Intervention Required.
-	 */
-	if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
-		/* Request finished abnormally. */
-		if (irb->ecw[0] & SNS0_INTERVENTION_REQ) {
-			set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags);
-			return RAW3270_IO_BUSY;
-		}
-	}
 	if (rq) {
 		if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
 			if (irb->ecw[0] & SNS0_CMD_REJECT)
@@ -715,7 +679,6 @@
 		rp = view->dev;
 		raw3270_read_modified(rp);
 	}
-	return RAW3270_IO_DONE;
 }
 
 static struct raw3270_fn raw3270_init_fn = {
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index e1e41c2..56519cb 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -125,19 +125,13 @@
 
 void raw3270_buffer_address(struct raw3270 *, char *, unsigned short);
 
-/* Return value of *intv (see raw3270_fn below) can be one of the following: */
-#define RAW3270_IO_DONE		0	/* request finished */
-#define RAW3270_IO_BUSY		1	/* request still active */
-#define RAW3270_IO_RETRY	2	/* retry current request */
-#define RAW3270_IO_STOP		3	/* kill current request */
-
 /*
  * Functions of a 3270 view.
  */
 struct raw3270_fn {
 	int  (*activate)(struct raw3270_view *);
 	void (*deactivate)(struct raw3270_view *);
-	int  (*intv)(struct raw3270_view *,
+	void (*intv)(struct raw3270_view *,
 		     struct raw3270_request *, struct irb *);
 	void (*release)(struct raw3270_view *);
 	void (*free)(struct raw3270_view *);
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 026e389..7a10c56 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -17,33 +17,35 @@
 #define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
 #define SCLP_CONSOLE_PAGES	6
 
+#define SCLP_EVTYP_MASK(T)	(1U << (32 - (T)))
+
 #define EVTYP_OPCMD		0x01
 #define EVTYP_MSG		0x02
+#define EVTYP_CONFMGMDATA	0x04
 #define EVTYP_DIAG_TEST		0x07
 #define EVTYP_STATECHANGE	0x08
 #define EVTYP_PMSGCMD		0x09
-#define EVTYP_CNTLPROGOPCMD	0x20
-#define EVTYP_CNTLPROGIDENT	0x0B
-#define EVTYP_SIGQUIESCE	0x1D
-#define EVTYP_VT220MSG		0x1A
-#define EVTYP_CONFMGMDATA	0x04
-#define EVTYP_SDIAS		0x1C
 #define EVTYP_ASYNC		0x0A
+#define EVTYP_CTLPROGIDENT	0x0B
+#define EVTYP_ERRNOTIFY		0x18
+#define EVTYP_VT220MSG		0x1A
+#define EVTYP_SDIAS		0x1C
+#define EVTYP_SIGQUIESCE	0x1D
 #define EVTYP_OCF		0x1E
 
-#define EVTYP_OPCMD_MASK	0x80000000
-#define EVTYP_MSG_MASK		0x40000000
-#define EVTYP_DIAG_TEST_MASK	0x02000000
-#define EVTYP_STATECHANGE_MASK	0x01000000
-#define EVTYP_PMSGCMD_MASK	0x00800000
-#define EVTYP_CTLPROGOPCMD_MASK	0x00000001
-#define EVTYP_CTLPROGIDENT_MASK	0x00200000
-#define EVTYP_SIGQUIESCE_MASK	0x00000008
-#define EVTYP_VT220MSG_MASK	0x00000040
-#define EVTYP_CONFMGMDATA_MASK	0x10000000
-#define EVTYP_SDIAS_MASK	0x00000010
-#define EVTYP_ASYNC_MASK	0x00400000
-#define EVTYP_OCF_MASK		0x00000004
+#define EVTYP_OPCMD_MASK	SCLP_EVTYP_MASK(EVTYP_OPCMD)
+#define EVTYP_MSG_MASK		SCLP_EVTYP_MASK(EVTYP_MSG)
+#define EVTYP_CONFMGMDATA_MASK	SCLP_EVTYP_MASK(EVTYP_CONFMGMDATA)
+#define EVTYP_DIAG_TEST_MASK	SCLP_EVTYP_MASK(EVTYP_DIAG_TEST)
+#define EVTYP_STATECHANGE_MASK	SCLP_EVTYP_MASK(EVTYP_STATECHANGE)
+#define EVTYP_PMSGCMD_MASK	SCLP_EVTYP_MASK(EVTYP_PMSGCMD)
+#define EVTYP_ASYNC_MASK	SCLP_EVTYP_MASK(EVTYP_ASYNC)
+#define EVTYP_CTLPROGIDENT_MASK	SCLP_EVTYP_MASK(EVTYP_CTLPROGIDENT)
+#define EVTYP_ERRNOTIFY_MASK	SCLP_EVTYP_MASK(EVTYP_ERRNOTIFY)
+#define EVTYP_VT220MSG_MASK	SCLP_EVTYP_MASK(EVTYP_VT220MSG)
+#define EVTYP_SDIAS_MASK	SCLP_EVTYP_MASK(EVTYP_SDIAS)
+#define EVTYP_SIGQUIESCE_MASK	SCLP_EVTYP_MASK(EVTYP_SIGQUIESCE)
+#define EVTYP_OCF_MASK		SCLP_EVTYP_MASK(EVTYP_OCF)
 
 #define GNRLMSGFLGS_DOM		0x8000
 #define GNRLMSGFLGS_SNDALRM	0x4000
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index d3947ea..e3fc753 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -576,67 +576,6 @@
 #endif /* CONFIG_MEMORY_HOTPLUG */
 
 /*
- * PCI I/O adapter configuration related functions.
- */
-#define SCLP_CMDW_CONFIGURE_PCI			0x001a0001
-#define SCLP_CMDW_DECONFIGURE_PCI		0x001b0001
-
-#define SCLP_RECONFIG_PCI_ATPYE			2
-
-struct pci_cfg_sccb {
-	struct sccb_header header;
-	u8 atype;		/* adapter type */
-	u8 reserved1;
-	u16 reserved2;
-	u32 aid;		/* adapter identifier */
-} __packed;
-
-static int do_pci_configure(sclp_cmdw_t cmd, u32 fid)
-{
-	struct pci_cfg_sccb *sccb;
-	int rc;
-
-	if (!SCLP_HAS_PCI_RECONFIG)
-		return -EOPNOTSUPP;
-
-	sccb = (struct pci_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
-	if (!sccb)
-		return -ENOMEM;
-
-	sccb->header.length = PAGE_SIZE;
-	sccb->atype = SCLP_RECONFIG_PCI_ATPYE;
-	sccb->aid = fid;
-	rc = sclp_sync_request(cmd, sccb);
-	if (rc)
-		goto out;
-	switch (sccb->header.response_code) {
-	case 0x0020:
-	case 0x0120:
-		break;
-	default:
-		pr_warn("configure PCI I/O adapter failed: cmd=0x%08x  response=0x%04x\n",
-			cmd, sccb->header.response_code);
-		rc = -EIO;
-		break;
-	}
-out:
-	free_page((unsigned long) sccb);
-	return rc;
-}
-
-int sclp_pci_configure(u32 fid)
-{
-	return do_pci_configure(SCLP_CMDW_CONFIGURE_PCI, fid);
-}
-EXPORT_SYMBOL(sclp_pci_configure);
-
-int sclp_pci_deconfigure(u32 fid)
-{
-	return do_pci_configure(SCLP_CMDW_DECONFIGURE_PCI, fid);
-}
-EXPORT_SYMBOL(sclp_pci_deconfigure);
-
-/*
  * Channel path configuration related functions.
  */
 
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c
index f344e5b..90d92fb 100644
--- a/drivers/s390/char/sclp_cpi_sys.c
+++ b/drivers/s390/char/sclp_cpi_sys.c
@@ -93,7 +93,7 @@
 	/* setup SCCB for Control-Program Identification */
 	sccb->header.length = sizeof(struct cpi_sccb);
 	sccb->cpi_evbuf.header.length = sizeof(struct cpi_evbuf);
-	sccb->cpi_evbuf.header.type = 0x0b;
+	sccb->cpi_evbuf.header.type = EVTYP_CTLPROGIDENT;
 	evb = &sccb->cpi_evbuf;
 
 	/* set system type */
diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c
index 648cb86..ea607a4 100644
--- a/drivers/s390/char/sclp_ctl.c
+++ b/drivers/s390/char/sclp_ctl.c
@@ -56,6 +56,7 @@
 {
 	struct sclp_ctl_sccb ctl_sccb;
 	struct sccb_header *sccb;
+	unsigned long copied;
 	int rc;
 
 	if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb)))
@@ -65,14 +66,15 @@
 	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
 	if (!sccb)
 		return -ENOMEM;
-	if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sizeof(*sccb))) {
+	copied = PAGE_SIZE -
+		copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), PAGE_SIZE);
+	if (offsetof(struct sccb_header, length) +
+	    sizeof(sccb->length) > copied || sccb->length > copied) {
 		rc = -EFAULT;
 		goto out_free;
 	}
-	if (sccb->length > PAGE_SIZE || sccb->length < 8)
-		return -EINVAL;
-	if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sccb->length)) {
-		rc = -EFAULT;
+	if (sccb->length < 8) {
+		rc = -EINVAL;
 		goto out_free;
 	}
 	rc = sclp_sync_request(ctl_sccb.cmdw, sccb);
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 6804354..0ac520d 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -49,7 +49,9 @@
 	u8	_pad_117[119 - 117];	/* 117-118 */
 	u8	fac119;			/* 119 */
 	u16	hcpua;			/* 120-121 */
-	u8	_pad_122[4096 - 122];	/* 122-4095 */
+	u8	_pad_122[124 - 122];	/* 122-123 */
+	u32	hmfai;			/* 124-127 */
+	u8	_pad_128[4096 - 128];	/* 128-4095 */
 } __packed __aligned(PAGE_SIZE);
 
 static char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE) __initdata;
@@ -155,6 +157,8 @@
 	sclp.mtid = (sccb->fac42 & 0x80) ? (sccb->fac42 & 31) : 0;
 	sclp.mtid_cp = (sccb->fac42 & 0x80) ? (sccb->fac43 & 31) : 0;
 	sclp.mtid_prev = (sccb->fac42 & 0x80) ? (sccb->fac66 & 31) : 0;
+
+	sclp.hmfai = sccb->hmfai;
 }
 
 /*
diff --git a/drivers/s390/char/sclp_pci.c b/drivers/s390/char/sclp_pci.c
new file mode 100644
index 0000000..4dbb3df
--- /dev/null
+++ b/drivers/s390/char/sclp_pci.c
@@ -0,0 +1,193 @@
+/*
+ * PCI I/O adapter configuration related functions.
+ *
+ * Copyright IBM Corp. 2016
+ */
+#define KMSG_COMPONENT "sclp_cmd"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/completion.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+
+#include <asm/sclp.h>
+
+#include "sclp.h"
+
+#define SCLP_CMDW_CONFIGURE_PCI			0x001a0001
+#define SCLP_CMDW_DECONFIGURE_PCI		0x001b0001
+
+#define SCLP_ATYPE_PCI				2
+
+#define SCLP_ERRNOTIFY_AQ_REPAIR		1
+#define SCLP_ERRNOTIFY_AQ_INFO_LOG		2
+
+static DEFINE_MUTEX(sclp_pci_mutex);
+static struct sclp_register sclp_pci_event = {
+	.send_mask = EVTYP_ERRNOTIFY_MASK,
+};
+
+struct err_notify_evbuf {
+	struct evbuf_header header;
+	u8 action;
+	u8 atype;
+	u32 fh;
+	u32 fid;
+	u8 data[0];
+} __packed;
+
+struct err_notify_sccb {
+	struct sccb_header header;
+	struct err_notify_evbuf evbuf;
+} __packed;
+
+struct pci_cfg_sccb {
+	struct sccb_header header;
+	u8 atype;		/* adapter type */
+	u8 reserved1;
+	u16 reserved2;
+	u32 aid;		/* adapter identifier */
+} __packed;
+
+static int do_pci_configure(sclp_cmdw_t cmd, u32 fid)
+{
+	struct pci_cfg_sccb *sccb;
+	int rc;
+
+	if (!SCLP_HAS_PCI_RECONFIG)
+		return -EOPNOTSUPP;
+
+	sccb = (struct pci_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb)
+		return -ENOMEM;
+
+	sccb->header.length = PAGE_SIZE;
+	sccb->atype = SCLP_ATYPE_PCI;
+	sccb->aid = fid;
+	rc = sclp_sync_request(cmd, sccb);
+	if (rc)
+		goto out;
+	switch (sccb->header.response_code) {
+	case 0x0020:
+	case 0x0120:
+		break;
+	default:
+		pr_warn("configure PCI I/O adapter failed: cmd=0x%08x  response=0x%04x\n",
+			cmd, sccb->header.response_code);
+		rc = -EIO;
+		break;
+	}
+out:
+	free_page((unsigned long) sccb);
+	return rc;
+}
+
+int sclp_pci_configure(u32 fid)
+{
+	return do_pci_configure(SCLP_CMDW_CONFIGURE_PCI, fid);
+}
+EXPORT_SYMBOL(sclp_pci_configure);
+
+int sclp_pci_deconfigure(u32 fid)
+{
+	return do_pci_configure(SCLP_CMDW_DECONFIGURE_PCI, fid);
+}
+EXPORT_SYMBOL(sclp_pci_deconfigure);
+
+static void sclp_pci_callback(struct sclp_req *req, void *data)
+{
+	struct completion *completion = data;
+
+	complete(completion);
+}
+
+static int sclp_pci_check_report(struct zpci_report_error_header *report)
+{
+	if (report->version != 1)
+		return -EINVAL;
+
+	if (report->action != SCLP_ERRNOTIFY_AQ_REPAIR &&
+	    report->action != SCLP_ERRNOTIFY_AQ_INFO_LOG)
+		return -EINVAL;
+
+	if (report->length > (PAGE_SIZE - sizeof(struct err_notify_sccb)))
+		return -EINVAL;
+
+	return 0;
+}
+
+int sclp_pci_report(struct zpci_report_error_header *report, u32 fh, u32 fid)
+{
+	DECLARE_COMPLETION_ONSTACK(completion);
+	struct err_notify_sccb *sccb;
+	struct sclp_req req;
+	int ret;
+
+	ret = sclp_pci_check_report(report);
+	if (ret)
+		return ret;
+
+	mutex_lock(&sclp_pci_mutex);
+	ret = sclp_register(&sclp_pci_event);
+	if (ret)
+		goto out_unlock;
+
+	if (!(sclp_pci_event.sclp_receive_mask & EVTYP_ERRNOTIFY_MASK)) {
+		ret = -EOPNOTSUPP;
+		goto out_unregister;
+	}
+
+	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+	if (!sccb) {
+		ret = -ENOMEM;
+		goto out_unregister;
+	}
+
+	memset(&req, 0, sizeof(req));
+	req.callback_data = &completion;
+	req.callback = sclp_pci_callback;
+	req.command = SCLP_CMDW_WRITE_EVENT_DATA;
+	req.status = SCLP_REQ_FILLED;
+	req.sccb = sccb;
+
+	sccb->evbuf.header.length = sizeof(sccb->evbuf) + report->length;
+	sccb->evbuf.header.type = EVTYP_ERRNOTIFY;
+	sccb->header.length = sizeof(sccb->header) + sccb->evbuf.header.length;
+
+	sccb->evbuf.action = report->action;
+	sccb->evbuf.atype = SCLP_ATYPE_PCI;
+	sccb->evbuf.fh = fh;
+	sccb->evbuf.fid = fid;
+
+	memcpy(sccb->evbuf.data, report->data, report->length);
+
+	ret = sclp_add_request(&req);
+	if (ret)
+		goto out_free_req;
+
+	wait_for_completion(&completion);
+	if (req.status != SCLP_REQ_DONE) {
+		pr_warn("request failed (status=0x%02x)\n",
+			req.status);
+		ret = -EIO;
+		goto out_free_req;
+	}
+
+	if (sccb->header.response_code != 0x0020) {
+		pr_warn("request failed with response code 0x%x\n",
+			sccb->header.response_code);
+		ret = -EIO;
+	}
+
+out_free_req:
+	free_page((unsigned long) sccb);
+out_unregister:
+	sclp_unregister(&sclp_pci_event);
+out_unlock:
+	mutex_unlock(&sclp_pci_mutex);
+	return ret;
+}
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index e96fc7f..0a9f219 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -92,6 +92,7 @@
 	unsigned char inattr;		/* Visible/invisible input. */
 	int throttle, attn;		/* tty throttle/unthrottle. */
 	struct tasklet_struct readlet;	/* Tasklet to issue read request. */
+	struct tasklet_struct hanglet;	/* Tasklet to hang up the tty. */
 	struct kbd_data *kbd;		/* key_maps stuff. */
 
 	/* Escape sequence parsing. */
@@ -319,6 +320,27 @@
 }
 
 /*
+ * Create a blank screen and remove all lines from the history.
+ */
+static void
+tty3270_blank_screen(struct tty3270 *tp)
+{
+	struct string *s, *n;
+	int i;
+
+	for (i = 0; i < tp->view.rows - 2; i++)
+		tp->screen[i].len = 0;
+	tp->nr_up = 0;
+	list_for_each_entry_safe(s, n, &tp->lines, list) {
+		list_del(&s->list);
+		if (!list_empty(&s->update))
+			list_del(&s->update);
+		tp->nr_lines--;
+		free_string(&tp->freemem, s);
+	}
+}
+
+/*
  * Write request completion callback.
  */
 static void
@@ -405,7 +427,10 @@
 			if (raw3270_request_add_data(wrq, str, len) != 0)
 				break;
 			list_del_init(&s->update);
-			sba = s->string + s->len - 3;
+			if (s->string[s->len - 4] == TO_RA)
+				sba = s->string + s->len - 3;
+			else
+				sba = invalid_sba;
 		}
 		if (list_empty(&tp->update))
 			updated |= TTY_UPDATE_LIST;
@@ -622,6 +647,16 @@
 }
 
 /*
+ * Hang up the tty
+ */
+static void
+tty3270_hangup_tasklet(struct tty3270 *tp)
+{
+	tty_port_tty_hangup(&tp->port, true);
+	raw3270_put_view(&tp->view);
+}
+
+/*
  * Switch to the tty view.
  */
 static int
@@ -642,7 +677,7 @@
 	del_timer(&tp->timer);
 }
 
-static int
+static void
 tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
 {
 	/* Handle ATTN. Schedule tasklet to read aid. */
@@ -654,17 +689,19 @@
 	}
 
 	if (rq) {
-		if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
+		if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
 			rq->rc = -EIO;
-		else
+			raw3270_get_view(&tp->view);
+			tasklet_schedule(&tp->hanglet);
+		} else {
 			/* Normal end. Copy residual count. */
 			rq->rescnt = irb->scsw.cmd.count;
+		}
 	} else if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
 		/* Interrupt without an outstanding request -> update all */
 		tp->update_flags = TTY_UPDATE_ALL;
 		tty3270_set_timer(tp, 1);
 	}
-	return RAW3270_IO_DONE;
 }
 
 /*
@@ -716,6 +753,9 @@
 	tasklet_init(&tp->readlet,
 		     (void (*)(unsigned long)) tty3270_read_tasklet,
 		     (unsigned long) tp->read);
+	tasklet_init(&tp->hanglet,
+		     (void (*)(unsigned long)) tty3270_hangup_tasklet,
+		     (unsigned long) tp);
 	INIT_WORK(&tp->resize_work, tty3270_resize_work);
 
 	return tp;
@@ -814,6 +854,7 @@
 		return;
 	/* Switch to new output size */
 	spin_lock_bh(&tp->view.lock);
+	tty3270_blank_screen(tp);
 	oscreen = tp->screen;
 	orows = tp->view.rows;
 	tp->view.model = tp->n_model;
@@ -824,7 +865,6 @@
 	free_string(&tp->freemem, tp->status);
 	tty3270_create_prompt(tp);
 	tty3270_create_status(tp);
-	tp->nr_up = 0;
 	while (tp->nr_lines < tp->view.rows - 2)
 		tty3270_blank_line(tp);
 	tp->update_flags = TTY_UPDATE_ALL;
@@ -838,6 +878,7 @@
 	ws.ws_row = tp->view.rows - 2;
 	ws.ws_col = tp->view.cols;
 	tty_do_resize(tty, &ws);
+	tty_kref_put(tty);
 }
 
 static void
@@ -845,6 +886,8 @@
 {
 	struct tty3270 *tp = container_of(view, struct tty3270, view);
 
+	if (tp->n_model == model && tp->n_rows == rows && tp->n_cols == cols)
+		return;
 	tp->n_model = model;
 	tp->n_rows = rows;
 	tp->n_cols = cols;
@@ -923,10 +966,8 @@
 		tty->winsize.ws_row = tp->view.rows - 2;
 		tty->winsize.ws_col = tp->view.cols;
 		tp->port.low_latency = 0;
-		/* why to reassign? */
-		tty_port_tty_set(&tp->port, tty);
 		tp->inattr = TF_INPUT;
-		return tty_port_install(&tp->port, driver, tty);
+		goto port_install;
 	}
 	if (tty3270_max_index < tty->index + 1)
 		tty3270_max_index = tty->index + 1;
@@ -952,7 +993,6 @@
 		return rc;
 	}
 
-	tty_port_tty_set(&tp->port, tty);
 	tp->port.low_latency = 0;
 	tty->winsize.ws_row = tp->view.rows - 2;
 	tty->winsize.ws_col = tp->view.cols;
@@ -974,6 +1014,7 @@
 
 	raw3270_activate_view(&tp->view);
 
+port_install:
 	rc = tty_port_install(&tp->port, driver, tty);
 	if (rc) {
 		raw3270_put_view(&tp->view);
@@ -1010,18 +1051,18 @@
 
 	if (tty->count > 1)
 		return;
-	if (tp) {
-		tty->driver_data = NULL;
+	if (tp)
 		tty_port_tty_set(&tp->port, NULL);
-	}
 }
 
 static void tty3270_cleanup(struct tty_struct *tty)
 {
 	struct tty3270 *tp = tty->driver_data;
 
-	if (tp)
+	if (tp) {
+		tty->driver_data = NULL;
 		raw3270_put_view(&tp->view);
+	}
 }
 
 /*
@@ -1788,7 +1829,22 @@
 static void
 tty3270_hangup(struct tty_struct *tty)
 {
-	// FIXME: implement
+	struct tty3270 *tp;
+
+	tp = tty->driver_data;
+	if (!tp)
+		return;
+	spin_lock_bh(&tp->view.lock);
+	tp->cx = tp->saved_cx = 0;
+	tp->cy = tp->saved_cy = 0;
+	tp->highlight = tp->saved_highlight = TAX_RESET;
+	tp->f_color = tp->saved_f_color = TAC_RESET;
+	tty3270_blank_screen(tp);
+	while (tp->nr_lines < tp->view.rows - 2)
+		tty3270_blank_line(tp);
+	tp->update_flags = TTY_UPDATE_ALL;
+	spin_unlock_bh(&tp->view.lock);
+	tty3270_set_timer(tp, 1);
 }
 
 static void
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 24ec282..327255d 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -787,7 +787,7 @@
 /*
  * AP state machine jump table
  */
-ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
+static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
 	[AP_STATE_RESET_START] = {
 		[AP_EVENT_POLL] = ap_sm_reset,
 		[AP_EVENT_TIMEOUT] = ap_sm_nop,
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index c3e2252..ad17fc5 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -642,7 +642,7 @@
 
 	kfree(header);
 
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	skb_queue_tail(&ch->sweep_queue, sweep_skb);
 
 	fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch);
@@ -911,7 +911,7 @@
 	if (ctcm_test_and_set_busy(dev))
 		return NETDEV_TX_BUSY;
 
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	if (ctcm_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0)
 		return NETDEV_TX_BUSY;
 	return NETDEV_TX_OK;
@@ -994,7 +994,7 @@
 					goto done;
 	}
 
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	if (ctcmpc_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) {
 		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
 			"%s(%s): device error - dropped",
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index edf16bf..c103fc7 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -671,7 +671,7 @@
 
 	kfree(header);
 
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	skb_queue_tail(&ch->sweep_queue, sweep_skb);
 
 	fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch);
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 0ba3a2f..b0e8ffd 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1407,7 +1407,7 @@
 		IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
 		return NETDEV_TX_BUSY;
 	}
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	rc = netiucv_transmit_skb(privptr->conn, skb);
 	netiucv_clear_busy(dev);
 	return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 7871537..b7b7477 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -3481,7 +3481,7 @@
 		}
 	}
 
-	queue->card->dev->trans_start = jiffies;
+	netif_trans_update(queue->card->dev);
 	if (queue->card->options.performance_stats) {
 		queue->card->perf_stats.outbound_do_qdio_cnt++;
 		queue->card->perf_stats.outbound_do_qdio_start_time =
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
index 157d3d2..9310a54 100644
--- a/drivers/s390/scsi/zfcp_unit.c
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -26,7 +26,8 @@
 	lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun);
 
 	if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
-		scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, lun, 1);
+		scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, lun,
+				 SCSI_SCAN_MANUAL);
 }
 
 static void zfcp_unit_scsi_scan_work(struct work_struct *work)
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index e80768f..98e5d51 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -17,6 +17,7 @@
 	tristate "SCSI device support"
 	depends on BLOCK
 	select SCSI_DMA if HAS_DMA
+	select SG_POOL
 	---help---
 	  If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or
 	  any other SCSI device under Linux, say Y and make sure that you know
@@ -202,12 +203,12 @@
 	  certain enclosure conditions to be reported and is not required.
 
 config SCSI_CONSTANTS
-	bool "Verbose SCSI error reporting (kernel size +=75K)"
+	bool "Verbose SCSI error reporting (kernel size += 36K)"
 	depends on SCSI
 	help
 	  The error messages regarding your SCSI hardware will be easier to
 	  understand if you say Y here; it will enlarge your kernel by about
-	  75 KB. If in doubt, say Y.
+	  36 KB. If in doubt, say Y.
 
 config SCSI_LOGGING
 	bool "SCSI logging facility"
@@ -813,17 +814,6 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called g_NCR5380_mmio.
 
-config SCSI_GENERIC_NCR53C400
-	bool "Enable NCR53c400 extensions"
-	depends on SCSI_GENERIC_NCR5380
-	help
-	  This enables certain optimizations for the NCR53c400 SCSI cards.
-	  You might as well try it out.  Note that this driver will only probe
-	  for the Trantor T130B in its default configuration; you might have
-	  to pass a command line option to the kernel at boot time if it does
-	  not detect your card.  See the file
-	  <file:Documentation/scsi/g_NCR5380.txt> for details.
-
 config SCSI_IPS
 	tristate "IBM ServeRAID support"
 	depends on PCI && SCSI
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 3eff2a6..43908bb 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -29,29 +29,9 @@
  * Ronald van Cuijlenborg, Alan Cox and others.
  */
 
-/*
- * Further development / testing that should be done :
- * 1.  Cleanup the NCR5380_transfer_dma function and DMA operation complete
- * code so that everything does the same thing that's done at the
- * end of a pseudo-DMA read operation.
- *
- * 2.  Fix REAL_DMA (interrupt driven, polled works fine) -
- * basically, transfer size needs to be reduced by one
- * and the last byte read as is done with PSEUDO_DMA.
- *
- * 4.  Test SCSI-II tagged queueing (I have no devices which support
- * tagged queueing)
- */
+/* Ported to Atari by Roman Hodek and others. */
 
-#ifndef notyet
-#undef REAL_DMA
-#endif
-
-#ifdef BOARD_REQUIRES_NO_DELAY
-#define io_recovery_delay(x)
-#else
-#define io_recovery_delay(x)	udelay(x)
-#endif
+/* Adapted for the Sun 3 by Sam Creasey. */
 
 /*
  * Design
@@ -126,17 +106,10 @@
  * DIFFERENTIAL - if defined, NCR53c81 chips will use external differential
  * transceivers.
  *
- * DONT_USE_INTR - if defined, never use interrupts, even if we probe or
- * override-configure an IRQ.
- *
  * PSEUDO_DMA - if defined, PSEUDO DMA is used during the data transfer phases.
  *
  * REAL_DMA - if defined, REAL DMA is used during the data transfer phases.
  *
- * REAL_DMA_POLL - if defined, REAL DMA is used but the driver doesn't
- * rely on phase mismatch and EOP interrupts to determine end
- * of phase.
- *
  * These macros MUST be defined :
  *
  * NCR5380_read(register)  - read from the specified register
@@ -147,29 +120,29 @@
  * specific implementation of the NCR5380
  *
  * Either real DMA *or* pseudo DMA may be implemented
- * REAL functions :
- * NCR5380_REAL_DMA should be defined if real DMA is to be used.
- * Note that the DMA setup functions should return the number of bytes
- * that they were able to program the controller for.
- *
- * Also note that generic i386/PC versions of these macros are
- * available as NCR5380_i386_dma_write_setup,
- * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual.
  *
  * NCR5380_dma_write_setup(instance, src, count) - initialize
  * NCR5380_dma_read_setup(instance, dst, count) - initialize
  * NCR5380_dma_residual(instance); - residual count
  *
- * PSEUDO functions :
- * NCR5380_pwrite(instance, src, count)
- * NCR5380_pread(instance, dst, count);
- *
  * The generic driver is initialized by calling NCR5380_init(instance),
  * after setting the appropriate host specific fields and ID.  If the
  * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance,
  * possible) function may be used.
  */
 
+#ifndef NCR5380_io_delay
+#define NCR5380_io_delay(x)
+#endif
+
+#ifndef NCR5380_acquire_dma_irq
+#define NCR5380_acquire_dma_irq(x)	(1)
+#endif
+
+#ifndef NCR5380_release_dma_irq
+#define NCR5380_release_dma_irq(x)
+#endif
+
 static int do_abort(struct Scsi_Host *);
 static void do_reset(struct Scsi_Host *);
 
@@ -280,12 +253,20 @@
 	{0, NULL}
 },
 basrs[] = {
+	{BASR_END_DMA_TRANSFER, "END OF DMA"},
+	{BASR_DRQ, "DRQ"},
+	{BASR_PARITY_ERROR, "PARITY ERROR"},
+	{BASR_IRQ, "IRQ"},
+	{BASR_PHASE_MATCH, "PHASE MATCH"},
+	{BASR_BUSY_ERROR, "BUSY ERROR"},
 	{BASR_ATN, "ATN"},
 	{BASR_ACK, "ACK"},
 	{0, NULL}
 },
 icrs[] = {
 	{ICR_ASSERT_RST, "ASSERT RST"},
+	{ICR_ARBITRATION_PROGRESS, "ARB. IN PROGRESS"},
+	{ICR_ARBITRATION_LOST, "LOST ARB."},
 	{ICR_ASSERT_ACK, "ASSERT ACK"},
 	{ICR_ASSERT_BSY, "ASSERT BSY"},
 	{ICR_ASSERT_SEL, "ASSERT SEL"},
@@ -294,14 +275,14 @@
 	{0, NULL}
 },
 mrs[] = {
-	{MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"},
-	{MR_TARGET, "MODE TARGET"},
-	{MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"},
-	{MR_ENABLE_PAR_INTR, "MODE PARITY INTR"},
-	{MR_ENABLE_EOP_INTR, "MODE EOP INTR"},
-	{MR_MONITOR_BSY, "MODE MONITOR BSY"},
-	{MR_DMA_MODE, "MODE DMA"},
-	{MR_ARBITRATE, "MODE ARBITRATION"},
+	{MR_BLOCK_DMA_MODE, "BLOCK DMA MODE"},
+	{MR_TARGET, "TARGET"},
+	{MR_ENABLE_PAR_CHECK, "PARITY CHECK"},
+	{MR_ENABLE_PAR_INTR, "PARITY INTR"},
+	{MR_ENABLE_EOP_INTR, "EOP INTR"},
+	{MR_MONITOR_BSY, "MONITOR BSY"},
+	{MR_DMA_MODE, "DMA MODE"},
+	{MR_ARBITRATE, "ARBITRATE"},
 	{0, NULL}
 };
 
@@ -322,23 +303,23 @@
 	icr = NCR5380_read(INITIATOR_COMMAND_REG);
 	basr = NCR5380_read(BUS_AND_STATUS_REG);
 
-	printk("STATUS_REG: %02x ", status);
+	printk(KERN_DEBUG "SR =   0x%02x : ", status);
 	for (i = 0; signals[i].mask; ++i)
 		if (status & signals[i].mask)
-			printk(",%s", signals[i].name);
-	printk("\nBASR: %02x ", basr);
+			printk(KERN_CONT "%s, ", signals[i].name);
+	printk(KERN_CONT "\nBASR = 0x%02x : ", basr);
 	for (i = 0; basrs[i].mask; ++i)
 		if (basr & basrs[i].mask)
-			printk(",%s", basrs[i].name);
-	printk("\nICR: %02x ", icr);
+			printk(KERN_CONT "%s, ", basrs[i].name);
+	printk(KERN_CONT "\nICR =  0x%02x : ", icr);
 	for (i = 0; icrs[i].mask; ++i)
 		if (icr & icrs[i].mask)
-			printk(",%s", icrs[i].name);
-	printk("\nMODE: %02x ", mr);
+			printk(KERN_CONT "%s, ", icrs[i].name);
+	printk(KERN_CONT "\nMR =   0x%02x : ", mr);
 	for (i = 0; mrs[i].mask; ++i)
 		if (mr & mrs[i].mask)
-			printk(",%s", mrs[i].name);
-	printk("\n");
+			printk(KERN_CONT "%s, ", mrs[i].name);
+	printk(KERN_CONT "\n");
 }
 
 static struct {
@@ -477,52 +458,18 @@
 	         instance->base, instance->irq,
 	         instance->can_queue, instance->cmd_per_lun,
 	         instance->sg_tablesize, instance->this_id,
-	         hostdata->flags & FLAG_NO_DMA_FIXUP  ? "NO_DMA_FIXUP "  : "",
+	         hostdata->flags & FLAG_DMA_FIXUP     ? "DMA_FIXUP "     : "",
 	         hostdata->flags & FLAG_NO_PSEUDO_DMA ? "NO_PSEUDO_DMA " : "",
 	         hostdata->flags & FLAG_TOSHIBA_DELAY ? "TOSHIBA_DELAY "  : "",
-#ifdef AUTOPROBE_IRQ
-	         "AUTOPROBE_IRQ "
-#endif
 #ifdef DIFFERENTIAL
 	         "DIFFERENTIAL "
 #endif
-#ifdef REAL_DMA
-	         "REAL_DMA "
-#endif
-#ifdef REAL_DMA_POLL
-	         "REAL_DMA_POLL "
-#endif
 #ifdef PARITY
 	         "PARITY "
 #endif
-#ifdef PSEUDO_DMA
-	         "PSEUDO_DMA "
-#endif
 	         "");
 }
 
-#ifdef PSEUDO_DMA
-static int __maybe_unused NCR5380_write_info(struct Scsi_Host *instance,
-	char *buffer, int length)
-{
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
-	hostdata->spin_max_r = 0;
-	hostdata->spin_max_w = 0;
-	return 0;
-}
-
-static int __maybe_unused NCR5380_show_info(struct seq_file *m,
-                                            struct Scsi_Host *instance)
-{
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
-	seq_printf(m, "Highwater I/O busy spin counts: write %d, read %d\n",
-	        hostdata->spin_max_w, hostdata->spin_max_r);
-	return 0;
-}
-#endif
-
 /**
  * NCR5380_init - initialise an NCR5380
  * @instance: adapter to configure
@@ -543,6 +490,8 @@
 	int i;
 	unsigned long deadline;
 
+	instance->max_lun = 7;
+
 	hostdata->host = instance;
 	hostdata->id_mask = 1 << instance->this_id;
 	hostdata->id_higher_mask = 0;
@@ -551,9 +500,8 @@
 			hostdata->id_higher_mask |= i;
 	for (i = 0; i < 8; ++i)
 		hostdata->busy[i] = 0;
-#ifdef REAL_DMA
-	hostdata->dmalen = 0;
-#endif
+	hostdata->dma_len = 0;
+
 	spin_lock_init(&hostdata->lock);
 	hostdata->connected = NULL;
 	hostdata->sensing = NULL;
@@ -719,6 +667,9 @@
 
 	cmd->result = 0;
 
+	if (!NCR5380_acquire_dma_irq(instance))
+		return SCSI_MLQUEUE_HOST_BUSY;
+
 	spin_lock_irqsave(&hostdata->lock, flags);
 
 	/*
@@ -743,6 +694,19 @@
 	return 0;
 }
 
+static inline void maybe_release_dma_irq(struct Scsi_Host *instance)
+{
+	struct NCR5380_hostdata *hostdata = shost_priv(instance);
+
+	/* Caller does the locking needed to set & test these data atomically */
+	if (list_empty(&hostdata->disconnected) &&
+	    list_empty(&hostdata->unissued) &&
+	    list_empty(&hostdata->autosense) &&
+	    !hostdata->connected &&
+	    !hostdata->selecting)
+		NCR5380_release_dma_irq(instance);
+}
+
 /**
  * dequeue_next_cmd - dequeue a command for processing
  * @instance: the scsi host instance
@@ -844,17 +808,14 @@
 
 			if (!NCR5380_select(instance, cmd)) {
 				dsprintk(NDEBUG_MAIN, instance, "main: select complete\n");
+				maybe_release_dma_irq(instance);
 			} else {
 				dsprintk(NDEBUG_MAIN | NDEBUG_QUEUES, instance,
 				         "main: select failed, returning %p to queue\n", cmd);
 				requeue_cmd(instance, cmd);
 			}
 		}
-		if (hostdata->connected
-#ifdef REAL_DMA
-		    && !hostdata->dmalen
-#endif
-		    ) {
+		if (hostdata->connected && !hostdata->dma_len) {
 			dsprintk(NDEBUG_MAIN, instance, "main: performing information transfer\n");
 			NCR5380_information_transfer(instance);
 			done = 0;
@@ -865,7 +826,88 @@
 	} while (!done);
 }
 
-#ifndef DONT_USE_INTR
+/*
+ * NCR5380_dma_complete - finish DMA transfer
+ * @instance: the scsi host instance
+ *
+ * Called by the interrupt handler when DMA finishes or a phase
+ * mismatch occurs (which would end the DMA transfer).
+ */
+
+static void NCR5380_dma_complete(struct Scsi_Host *instance)
+{
+	struct NCR5380_hostdata *hostdata = shost_priv(instance);
+	int transferred;
+	unsigned char **data;
+	int *count;
+	int saved_data = 0, overrun = 0;
+	unsigned char p;
+
+	if (hostdata->read_overruns) {
+		p = hostdata->connected->SCp.phase;
+		if (p & SR_IO) {
+			udelay(10);
+			if ((NCR5380_read(BUS_AND_STATUS_REG) &
+			     (BASR_PHASE_MATCH | BASR_ACK)) ==
+			    (BASR_PHASE_MATCH | BASR_ACK)) {
+				saved_data = NCR5380_read(INPUT_DATA_REG);
+				overrun = 1;
+				dsprintk(NDEBUG_DMA, instance, "read overrun handled\n");
+			}
+		}
+	}
+
+#ifdef CONFIG_SUN3
+	if ((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) {
+		pr_err("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n",
+		       instance->host_no);
+		BUG();
+	}
+
+	if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) ==
+	    (BASR_PHASE_MATCH | BASR_ACK)) {
+		pr_err("scsi%d: BASR %02x\n", instance->host_no,
+		       NCR5380_read(BUS_AND_STATUS_REG));
+		pr_err("scsi%d: bus stuck in data phase -- probably a single byte overrun!\n",
+		       instance->host_no);
+		BUG();
+	}
+#endif
+
+	NCR5380_write(MODE_REG, MR_BASE);
+	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+	NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+
+	transferred = hostdata->dma_len - NCR5380_dma_residual(instance);
+	hostdata->dma_len = 0;
+
+	data = (unsigned char **)&hostdata->connected->SCp.ptr;
+	count = &hostdata->connected->SCp.this_residual;
+	*data += transferred;
+	*count -= transferred;
+
+	if (hostdata->read_overruns) {
+		int cnt, toPIO;
+
+		if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) {
+			cnt = toPIO = hostdata->read_overruns;
+			if (overrun) {
+				dsprintk(NDEBUG_DMA, instance,
+				         "Got an input overrun, using saved byte\n");
+				*(*data)++ = saved_data;
+				(*count)--;
+				cnt--;
+				toPIO--;
+			}
+			if (toPIO > 0) {
+				dsprintk(NDEBUG_DMA, instance,
+				         "Doing %d byte PIO to 0x%p\n", cnt, *data);
+				NCR5380_transfer_pio(instance, &p, &cnt, data);
+				*count -= toPIO - cnt;
+			}
+		}
+	}
+}
 
 /**
  * NCR5380_intr - generic NCR5380 irq handler
@@ -901,7 +943,7 @@
  * the Busy Monitor interrupt is enabled together with DMA Mode.
  */
 
-static irqreturn_t NCR5380_intr(int irq, void *dev_id)
+static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
 {
 	struct Scsi_Host *instance = dev_id;
 	struct NCR5380_hostdata *hostdata = shost_priv(instance);
@@ -919,7 +961,6 @@
 		dsprintk(NDEBUG_INTR, instance, "IRQ %d, BASR 0x%02x, SR 0x%02x, MR 0x%02x\n",
 		         irq, basr, sr, mr);
 
-#if defined(REAL_DMA)
 		if ((mr & MR_DMA_MODE) || (mr & MR_MONITOR_BSY)) {
 			/* Probably End of DMA, Phase Mismatch or Loss of BSY.
 			 * We ack IRQ after clearing Mode Register. Workarounds
@@ -928,26 +969,14 @@
 
 			dsprintk(NDEBUG_INTR, instance, "interrupt in DMA mode\n");
 
-			int transferred;
-
-			if (!hostdata->connected)
-				panic("scsi%d : DMA interrupt with no connected cmd\n",
-				      instance->hostno);
-
-			transferred = hostdata->dmalen - NCR5380_dma_residual(instance);
-			hostdata->connected->SCp.this_residual -= transferred;
-			hostdata->connected->SCp.ptr += transferred;
-			hostdata->dmalen = 0;
-
-			/* FIXME: we need to poll briefly then defer a workqueue task ! */
-			NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, BASR_ACK, 0, 2 * HZ);
-
-			NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-			NCR5380_write(MODE_REG, MR_BASE);
-			NCR5380_read(RESET_PARITY_INTERRUPT_REG);
-		} else
-#endif /* REAL_DMA */
-		if ((NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_mask) &&
+			if (hostdata->connected) {
+				NCR5380_dma_complete(instance);
+				queue_work(hostdata->work_q, &hostdata->main_task);
+			} else {
+				NCR5380_write(MODE_REG, MR_BASE);
+				NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+			}
+		} else if ((NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_mask) &&
 		    (sr & (SR_SEL | SR_IO | SR_BSY | SR_RST)) == (SR_SEL | SR_IO)) {
 			/* Probably reselected */
 			NCR5380_write(SELECT_ENABLE_REG, 0);
@@ -966,10 +995,16 @@
 			NCR5380_read(RESET_PARITY_INTERRUPT_REG);
 
 			dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n");
+#ifdef SUN3_SCSI_VME
+			dregs->csr |= CSR_DMA_ENABLE;
+#endif
 		}
 		handled = 1;
 	} else {
 		shost_printk(KERN_NOTICE, instance, "interrupt without IRQ bit\n");
+#ifdef SUN3_SCSI_VME
+		dregs->csr |= CSR_DMA_ENABLE;
+#endif
 	}
 
 	spin_unlock_irqrestore(&hostdata->lock, flags);
@@ -977,8 +1012,6 @@
 	return IRQ_RETVAL(handled);
 }
 
-#endif
-
 /*
  * Function : int NCR5380_select(struct Scsi_Host *instance,
  * struct scsi_cmnd *cmd)
@@ -1217,14 +1250,6 @@
 	 * was true but before BSY was false during selection, the information
 	 * transfer phase should be a MESSAGE OUT phase so that we can send the
 	 * IDENTIFY message.
-	 *
-	 * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG
-	 * message (2 bytes) with a tag ID that we increment with every command
-	 * until it wraps back to 0.
-	 *
-	 * XXX - it turns out that there are some broken SCSI-II devices,
-	 * which claim to support tagged queuing but fail when more than
-	 * some number of commands are issued at once.
 	 */
 
 	/* Wait for start of REQ/ACK handshake */
@@ -1247,9 +1272,6 @@
 	tmp[0] = IDENTIFY(((instance->irq == NO_IRQ) ? 0 : 1), cmd->device->lun);
 
 	len = 1;
-	cmd->tag = 0;
-
-	/* Send message(s) */
 	data = tmp;
 	phase = PHASE_MSGOUT;
 	NCR5380_transfer_pio(instance, &phase, &len, &data);
@@ -1259,6 +1281,10 @@
 	hostdata->connected = cmd;
 	hostdata->busy[cmd->device->id] |= 1 << cmd->device->lun;
 
+#ifdef SUN3_SCSI_VME
+	dregs->csr |= CSR_INTR;
+#endif
+
 	initialize_SCp(cmd);
 
 	cmd = NULL;
@@ -1495,7 +1521,6 @@
 	return -1;
 }
 
-#if defined(REAL_DMA) || defined(PSEUDO_DMA) || defined (REAL_DMA_POLL)
 /*
  * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance,
  * unsigned char *phase, int *count, unsigned char **data)
@@ -1520,53 +1545,47 @@
 				unsigned char **data)
 {
 	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-	register int c = *count;
-	register unsigned char p = *phase;
-	register unsigned char *d = *data;
+	int c = *count;
+	unsigned char p = *phase;
+	unsigned char *d = *data;
 	unsigned char tmp;
-	int foo;
-#if defined(REAL_DMA_POLL)
-	int cnt, toPIO;
-	unsigned char saved_data = 0, overrun = 0, residue;
-#endif
+	int result = 0;
 
 	if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) {
 		*phase = tmp;
 		return -1;
 	}
-#if defined(REAL_DMA) || defined(REAL_DMA_POLL)
+
+	hostdata->connected->SCp.phase = p;
+
 	if (p & SR_IO) {
-		if (!(hostdata->flags & FLAG_NO_DMA_FIXUPS))
-			c -= 2;
+		if (hostdata->read_overruns)
+			c -= hostdata->read_overruns;
+		else if (hostdata->flags & FLAG_DMA_FIXUP)
+			--c;
 	}
-	hostdata->dma_len = (p & SR_IO) ? NCR5380_dma_read_setup(instance, d, c) : NCR5380_dma_write_setup(instance, d, c);
 
 	dsprintk(NDEBUG_DMA, instance, "initializing DMA %s: length %d, address %p\n",
-	         (p & SR_IO) ? "receive" : "send", c, *data);
+	         (p & SR_IO) ? "receive" : "send", c, d);
+
+#ifdef CONFIG_SUN3
+	/* send start chain */
+	sun3scsi_dma_start(c, *data);
 #endif
 
 	NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
-
-#ifdef REAL_DMA
 	NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY |
 	                        MR_ENABLE_EOP_INTR);
-#elif defined(REAL_DMA_POLL)
-	NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY);
-#else
-	/*
-	 * Note : on my sample board, watch-dog timeouts occurred when interrupts
-	 * were not disabled for the duration of a single DMA transfer, from
-	 * before the setting of DMA mode to after transfer of the last byte.
-	 */
 
-	if (hostdata->flags & FLAG_NO_DMA_FIXUP)
-		NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY |
-		                        MR_ENABLE_EOP_INTR);
-	else
-		NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY);
-#endif				/* def REAL_DMA */
-
-	dprintk(NDEBUG_DMA, "scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG));
+	if (!(hostdata->flags & FLAG_LATE_DMA_SETUP)) {
+		/* On the Medusa, it is a must to initialize the DMA before
+		 * starting the NCR. This is also the cleaner way for the TT.
+		 */
+		if (p & SR_IO)
+			result = NCR5380_dma_recv_setup(instance, d, c);
+		else
+			result = NCR5380_dma_send_setup(instance, d, c);
+	}
 
 	/*
 	 * On the PAS16 at least I/O recovery delays are not needed here.
@@ -1574,24 +1593,49 @@
 	 */
 
 	if (p & SR_IO) {
-		io_recovery_delay(1);
+		NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+		NCR5380_io_delay(1);
 		NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0);
 	} else {
-		io_recovery_delay(1);
+		NCR5380_io_delay(1);
 		NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA);
-		io_recovery_delay(1);
+		NCR5380_io_delay(1);
 		NCR5380_write(START_DMA_SEND_REG, 0);
-		io_recovery_delay(1);
+		NCR5380_io_delay(1);
 	}
 
-#if defined(REAL_DMA_POLL)
-	do {
-		tmp = NCR5380_read(BUS_AND_STATUS_REG);
-	} while ((tmp & BASR_PHASE_MATCH) && !(tmp & (BASR_BUSY_ERROR | BASR_END_DMA_TRANSFER)));
+#ifdef CONFIG_SUN3
+#ifdef SUN3_SCSI_VME
+	dregs->csr |= CSR_DMA_ENABLE;
+#endif
+	sun3_dma_active = 1;
+#endif
+
+	if (hostdata->flags & FLAG_LATE_DMA_SETUP) {
+		/* On the Falcon, the DMA setup must be done after the last
+		 * NCR access, else the DMA setup gets trashed!
+		 */
+		if (p & SR_IO)
+			result = NCR5380_dma_recv_setup(instance, d, c);
+		else
+			result = NCR5380_dma_send_setup(instance, d, c);
+	}
+
+	/* On failure, NCR5380_dma_xxxx_setup() returns a negative int. */
+	if (result < 0)
+		return result;
+
+	/* For real DMA, result is the byte count. DMA interrupt is expected. */
+	if (result > 0) {
+		hostdata->dma_len = result;
+		return 0;
+	}
+
+	/* The result is zero iff pseudo DMA send/receive was completed. */
+	hostdata->dma_len = c;
 
 /*
- * At this point, either we've completed DMA, or we have a phase mismatch,
- * or we've unexpectedly lost BUSY (which is a real error).
+ * A note regarding the DMA errata workarounds for early NMOS silicon.
  *
  * For DMA sends, we want to wait until the last byte has been
  * transferred out over the bus before we turn off DMA mode.  Alas, there
@@ -1618,79 +1662,16 @@
  * properly, or the target switches to MESSAGE IN phase to signal a
  * disconnection (either operation bringing the DMA to a clean halt).
  * However, in order to handle scatter-receive, we must work around the
- * problem.  The chosen fix is to DMA N-2 bytes, then check for the
+ * problem.  The chosen fix is to DMA fewer bytes, then check for the
  * condition before taking the NCR5380 out of DMA mode.  One or two extra
  * bytes are transferred via PIO as necessary to fill out the original
  * request.
  */
 
-	if (p & SR_IO) {
-		if (!(hostdata->flags & FLAG_NO_DMA_FIXUPS)) {
-			udelay(10);
-			if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) ==
-			    (BASR_PHASE_MATCH | BASR_ACK)) {
-				saved_data = NCR5380_read(INPUT_DATA_REGISTER);
-				overrun = 1;
-			}
-		}
-	} else {
-		int limit = 100;
-		while (((tmp = NCR5380_read(BUS_AND_STATUS_REG)) & BASR_ACK) || (NCR5380_read(STATUS_REG) & SR_REQ)) {
-			if (!(tmp & BASR_PHASE_MATCH))
-				break;
-			if (--limit < 0)
-				break;
-		}
-	}
-
-	dsprintk(NDEBUG_DMA, "polled DMA transfer complete, basr 0x%02x, sr 0x%02x\n",
-	         tmp, NCR5380_read(STATUS_REG));
-
-	NCR5380_write(MODE_REG, MR_BASE);
-	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-
-	residue = NCR5380_dma_residual(instance);
-	c -= residue;
-	*count -= c;
-	*data += c;
-	*phase = NCR5380_read(STATUS_REG) & PHASE_MASK;
-
-	if (!(hostdata->flags & FLAG_NO_DMA_FIXUPS) &&
-	    *phase == p && (p & SR_IO) && residue == 0) {
-		if (overrun) {
-			dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n");
-			**data = saved_data;
-			*data += 1;
-			*count -= 1;
-			cnt = toPIO = 1;
-		} else {
-			printk("No overrun??\n");
-			cnt = toPIO = 2;
-		}
-		dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%X\n", cnt, *data);
-		NCR5380_transfer_pio(instance, phase, &cnt, data);
-		*count -= toPIO - cnt;
-	}
-
-	dprintk(NDEBUG_DMA, "Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n", *data, *count, *(*data + *count - 1), *(*data + *count));
-	return 0;
-
-#elif defined(REAL_DMA)
-	return 0;
-#else				/* defined(REAL_DMA_POLL) */
-	if (p & SR_IO) {
-		foo = NCR5380_pread(instance, d,
-			hostdata->flags & FLAG_NO_DMA_FIXUP ? c : c - 1);
-		if (!foo && !(hostdata->flags & FLAG_NO_DMA_FIXUP)) {
+	if (hostdata->flags & FLAG_DMA_FIXUP) {
+		if (p & SR_IO) {
 			/*
-			 * We can't disable DMA mode after successfully transferring
-			 * what we plan to be the last byte, since that would open up
-			 * a race condition where if the target asserted REQ before
-			 * we got the DMA mode reset, the NCR5380 would have latched
-			 * an additional byte into the INPUT DATA register and we'd
-			 * have dropped it.
-			 *
-			 * The workaround was to transfer one fewer bytes than we
+			 * The workaround was to transfer fewer bytes than we
 			 * intended to with the pseudo-DMA read function, wait for
 			 * the chip to latch the last byte, read it, and then disable
 			 * pseudo-DMA mode.
@@ -1706,19 +1687,16 @@
 
 			if (NCR5380_poll_politely(instance, BUS_AND_STATUS_REG,
 			                          BASR_DRQ, BASR_DRQ, HZ) < 0) {
-				foo = -1;
+				result = -1;
 				shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n");
 			}
 			if (NCR5380_poll_politely(instance, STATUS_REG,
 			                          SR_REQ, 0, HZ) < 0) {
-				foo = -1;
+				result = -1;
 				shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n");
 			}
-			d[c - 1] = NCR5380_read(INPUT_DATA_REG);
-		}
-	} else {
-		foo = NCR5380_pwrite(instance, d, c);
-		if (!foo && !(hostdata->flags & FLAG_NO_DMA_FIXUP)) {
+			d[*count - 1] = NCR5380_read(INPUT_DATA_REG);
+		} else {
 			/*
 			 * Wait for the last byte to be sent.  If REQ is being asserted for
 			 * the byte we're interested, we'll ACK it and it will go false.
@@ -1726,21 +1704,15 @@
 			if (NCR5380_poll_politely2(instance,
 			     BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ,
 			     BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, HZ) < 0) {
-				foo = -1;
+				result = -1;
 				shost_printk(KERN_ERR, instance, "PDMA write: DRQ and phase timeout\n");
 			}
 		}
 	}
-	NCR5380_write(MODE_REG, MR_BASE);
-	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-	NCR5380_read(RESET_PARITY_INTERRUPT_REG);
-	*data = d + c;
-	*count = 0;
-	*phase = NCR5380_read(STATUS_REG) & PHASE_MASK;
-	return foo;
-#endif				/* def REAL_DMA */
+
+	NCR5380_dma_complete(instance);
+	return result;
 }
-#endif				/* defined(REAL_DMA) | defined(PSEUDO_DMA) */
 
 /*
  * Function : NCR5380_information_transfer (struct Scsi_Host *instance)
@@ -1770,6 +1742,10 @@
 	unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
 	struct scsi_cmnd *cmd;
 
+#ifdef SUN3_SCSI_VME
+	dregs->csr |= CSR_INTR;
+#endif
+
 	while ((cmd = hostdata->connected)) {
 		struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
 
@@ -1781,6 +1757,31 @@
 				old_phase = phase;
 				NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);
 			}
+#ifdef CONFIG_SUN3
+			if (phase == PHASE_CMDOUT) {
+				void *d;
+				unsigned long count;
+
+				if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
+					count = cmd->SCp.buffer->length;
+					d = sg_virt(cmd->SCp.buffer);
+				} else {
+					count = cmd->SCp.this_residual;
+					d = cmd->SCp.ptr;
+				}
+
+				if (sun3_dma_setup_done != cmd &&
+				    sun3scsi_dma_xfer_len(count, cmd) > 0) {
+					sun3scsi_dma_setup(instance, d, count,
+					                   rq_data_dir(cmd->request));
+					sun3_dma_setup_done = cmd;
+				}
+#ifdef SUN3_SCSI_VME
+				dregs->csr |= CSR_INTR;
+#endif
+			}
+#endif /* CONFIG_SUN3 */
+
 			if (sink && (phase != PHASE_MSGOUT)) {
 				NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
 
@@ -1831,13 +1832,11 @@
 				 * in an unconditional loop.
 				 */
 
-#if defined(PSEUDO_DMA) || defined(REAL_DMA_POLL)
 				transfersize = 0;
-				if (!cmd->device->borken &&
-				    !(hostdata->flags & FLAG_NO_PSEUDO_DMA))
+				if (!cmd->device->borken)
 					transfersize = NCR5380_dma_xfer_len(instance, cmd, phase);
 
-				if (transfersize) {
+				if (transfersize > 0) {
 					len = transfersize;
 					if (NCR5380_transfer_dma(instance, &phase,
 					    &len, (unsigned char **)&cmd->SCp.ptr)) {
@@ -1853,11 +1852,8 @@
 						do_abort(instance);
 						cmd->result = DID_ERROR << 16;
 						/* XXX - need to source or sink data here, as appropriate */
-					} else
-						cmd->SCp.this_residual -= transfersize - len;
-				} else
-#endif				/* defined(PSEUDO_DMA) || defined(REAL_DMA_POLL) */
-				{
+					}
+				} else {
 					/* Break up transfer into 3 ms chunks,
 					 * presuming 6 accesses per handshake.
 					 */
@@ -1868,6 +1864,10 @@
 					                     (unsigned char **)&cmd->SCp.ptr);
 					cmd->SCp.this_residual -= transfersize - len;
 				}
+#ifdef CONFIG_SUN3
+				if (sun3_dma_setup_done == cmd)
+					sun3_dma_setup_done = NULL;
+#endif
 				return;
 			case PHASE_MSGIN:
 				len = 1;
@@ -1912,6 +1912,8 @@
 
 					/* Enable reselect interrupts */
 					NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+
+					maybe_release_dma_irq(instance);
 					return;
 				case MESSAGE_REJECT:
 					/* Accept message by clearing ACK */
@@ -1944,6 +1946,9 @@
 
 					/* Enable reselect interrupts */
 					NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+#ifdef SUN3_SCSI_VME
+					dregs->csr |= CSR_DMA_ENABLE;
+#endif
 					return;
 					/*
 					 * The SCSI data pointer is *IMPLICITLY* saved on a disconnect
@@ -2047,6 +2052,7 @@
 					hostdata->connected = NULL;
 					cmd->result = DID_ERROR << 16;
 					complete_cmd(instance, cmd);
+					maybe_release_dma_irq(instance);
 					NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
 					return;
 				}
@@ -2094,10 +2100,8 @@
 {
 	struct NCR5380_hostdata *hostdata = shost_priv(instance);
 	unsigned char target_mask;
-	unsigned char lun, phase;
-	int len;
+	unsigned char lun;
 	unsigned char msg[3];
-	unsigned char *data;
 	struct NCR5380_cmd *ncmd;
 	struct scsi_cmnd *tmp;
 
@@ -2139,15 +2143,26 @@
 		return;
 	}
 
-	len = 1;
-	data = msg;
-	phase = PHASE_MSGIN;
-	NCR5380_transfer_pio(instance, &phase, &len, &data);
+#ifdef CONFIG_SUN3
+	/* acknowledge toggle to MSGIN */
+	NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(PHASE_MSGIN));
 
-	if (len) {
-		do_abort(instance);
-		return;
+	/* peek at the byte without really hitting the bus */
+	msg[0] = NCR5380_read(CURRENT_SCSI_DATA_REG);
+#else
+	{
+		int len = 1;
+		unsigned char *data = msg;
+		unsigned char phase = PHASE_MSGIN;
+
+		NCR5380_transfer_pio(instance, &phase, &len, &data);
+
+		if (len) {
+			do_abort(instance);
+			return;
+		}
 	}
+#endif /* CONFIG_SUN3 */
 
 	if (!(msg[0] & 0x80)) {
 		shost_printk(KERN_ERR, instance, "expecting IDENTIFY message, got ");
@@ -2195,60 +2210,38 @@
 		return;
 	}
 
+#ifdef CONFIG_SUN3
+	{
+		void *d;
+		unsigned long count;
+
+		if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) {
+			count = tmp->SCp.buffer->length;
+			d = sg_virt(tmp->SCp.buffer);
+		} else {
+			count = tmp->SCp.this_residual;
+			d = tmp->SCp.ptr;
+		}
+
+		if (sun3_dma_setup_done != tmp &&
+		    sun3scsi_dma_xfer_len(count, tmp) > 0) {
+			sun3scsi_dma_setup(instance, d, count,
+			                   rq_data_dir(tmp->request));
+			sun3_dma_setup_done = tmp;
+		}
+	}
+
+	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
+#endif /* CONFIG_SUN3 */
+
 	/* Accept message by clearing ACK */
 	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
 
 	hostdata->connected = tmp;
-	dsprintk(NDEBUG_RESELECTION, instance, "nexus established, target %d, lun %llu, tag %d\n",
-	         scmd_id(tmp), tmp->device->lun, tmp->tag);
+	dsprintk(NDEBUG_RESELECTION, instance, "nexus established, target %d, lun %llu\n",
+	         scmd_id(tmp), tmp->device->lun);
 }
 
-/*
- * Function : void NCR5380_dma_complete (struct Scsi_Host *instance)
- *
- * Purpose : called by interrupt handler when DMA finishes or a phase
- * mismatch occurs (which would finish the DMA transfer).
- *
- * Inputs : instance - this instance of the NCR5380.
- *
- * Returns : pointer to the scsi_cmnd structure for which the I_T_L
- * nexus has been reestablished, on failure NULL is returned.
- */
-
-#ifdef REAL_DMA
-static void NCR5380_dma_complete(NCR5380_instance * instance) {
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-	int transferred;
-
-	/*
-	 * XXX this might not be right.
-	 *
-	 * Wait for final byte to transfer, ie wait for ACK to go false.
-	 *
-	 * We should use the Last Byte Sent bit, unfortunately this is
-	 * not available on the 5380/5381 (only the various CMOS chips)
-	 *
-	 * FIXME: timeout, and need to handle long timeout/irq case
-	 */
-
-	NCR5380_poll_politely(instance, BUS_AND_STATUS_REG, BASR_ACK, 0, 5*HZ);
-
-	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-
-	/*
-	 * The only places we should see a phase mismatch and have to send
-	 * data from the same set of pointers will be the data transfer
-	 * phases.  So, residual, requested length are only important here.
-	 */
-
-	if (!(hostdata->connected->SCp.phase & SR_CD)) {
-		transferred = instance->dmalen - NCR5380_dma_residual();
-		hostdata->connected->SCp.this_residual -= transferred;
-		hostdata->connected->SCp.ptr += transferred;
-	}
-}
-#endif				/* def REAL_DMA */
-
 /**
  * list_find_cmd - test for presence of a command in a linked list
  * @haystack: list of commands
@@ -2360,9 +2353,7 @@
 	if (hostdata->connected == cmd) {
 		dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
 		hostdata->connected = NULL;
-#ifdef REAL_DMA
 		hostdata->dma_len = 0;
-#endif
 		if (do_abort(instance)) {
 			set_host_byte(cmd, DID_ERROR);
 			complete_cmd(instance, cmd);
@@ -2388,6 +2379,7 @@
 		dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd);
 
 	queue_work(hostdata->work_q, &hostdata->main_task);
+	maybe_release_dma_irq(instance);
 	spin_unlock_irqrestore(&hostdata->lock, flags);
 
 	return result;
@@ -2445,7 +2437,7 @@
 		struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
 
 		set_host_byte(cmd, DID_RESET);
-		cmd->scsi_done(cmd);
+		complete_cmd(instance, cmd);
 	}
 	INIT_LIST_HEAD(&hostdata->disconnected);
 
@@ -2465,11 +2457,10 @@
 
 	for (i = 0; i < 8; ++i)
 		hostdata->busy[i] = 0;
-#ifdef REAL_DMA
 	hostdata->dma_len = 0;
-#endif
 
 	queue_work(hostdata->work_q, &hostdata->main_task);
+	maybe_release_dma_irq(instance);
 	spin_unlock_irqrestore(&hostdata->lock, flags);
 
 	return SUCCESS;
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index a792886..c607287 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -199,13 +199,6 @@
 
 #define PHASE_SR_TO_TCR(phase) ((phase) >> 2)
 
-/* 
- * "Special" value for the (unsigned char) command tag, to indicate
- * I_T_L nexus instead of I_T_L_Q.
- */
-
-#define TAG_NONE	0xff
-
 /*
  * These are "special" values for the irq and dma_channel fields of the 
  * Scsi_Host structure
@@ -220,28 +213,17 @@
 #define NO_IRQ		0
 #endif
 
-#define FLAG_NO_DMA_FIXUP		1	/* No DMA errata workarounds */
+#define FLAG_DMA_FIXUP			1	/* Use DMA errata workarounds */
 #define FLAG_NO_PSEUDO_DMA		8	/* Inhibit DMA */
 #define FLAG_LATE_DMA_SETUP		32	/* Setup NCR before DMA H/W */
-#define FLAG_TAGGED_QUEUING		64	/* as X3T9.2 spelled it */
 #define FLAG_TOSHIBA_DELAY		128	/* Allow for borken CD-ROMs */
 
-#ifdef SUPPORT_TAGS
-struct tag_alloc {
-	DECLARE_BITMAP(allocated, MAX_TAGS);
-	int nr_allocated;
-	int queue_size;
-};
-#endif
-
 struct NCR5380_hostdata {
 	NCR5380_implementation_fields;		/* implementation specific */
 	struct Scsi_Host *host;			/* Host backpointer */
 	unsigned char id_mask, id_higher_mask;	/* 1 << id, all bits greater */
 	unsigned char busy[8];			/* index = target, bit = lun */
-#if defined(REAL_DMA) || defined(REAL_DMA_POLL)
 	int dma_len;				/* requested length of DMA */
-#endif
 	unsigned char last_message;		/* last message OUT */
 	struct scsi_cmnd *connected;		/* currently connected cmnd */
 	struct scsi_cmnd *selecting;		/* cmnd to be connected */
@@ -256,13 +238,6 @@
 	int read_overruns;                /* number of bytes to cut from a
 	                                   * transfer to handle chip overruns */
 	struct work_struct main_task;
-#ifdef SUPPORT_TAGS
-	struct tag_alloc TagAlloc[8][8];	/* 8 targets and 8 LUNs */
-#endif
-#ifdef PSEUDO_DMA
-	unsigned spin_max_r;
-	unsigned spin_max_w;
-#endif
 	struct workqueue_struct *work_q;
 	unsigned long accesses_per_ms;	/* chip register accesses per ms */
 };
@@ -305,132 +280,20 @@
 #define NCR5380_dprint_phase(flg, arg) do {} while (0)
 #endif
 
-#if defined(AUTOPROBE_IRQ)
 static int NCR5380_probe_irq(struct Scsi_Host *instance, int possible);
-#endif
 static int NCR5380_init(struct Scsi_Host *instance, int flags);
 static int NCR5380_maybe_reset_bus(struct Scsi_Host *);
 static void NCR5380_exit(struct Scsi_Host *instance);
 static void NCR5380_information_transfer(struct Scsi_Host *instance);
-#ifndef DONT_USE_INTR
 static irqreturn_t NCR5380_intr(int irq, void *dev_id);
-#endif
 static void NCR5380_main(struct work_struct *work);
 static const char *NCR5380_info(struct Scsi_Host *instance);
 static void NCR5380_reselect(struct Scsi_Host *instance);
 static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
-#if defined(PSEUDO_DMA) || defined(REAL_DMA) || defined(REAL_DMA_POLL)
 static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
-#endif
 static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
+static int NCR5380_poll_politely(struct Scsi_Host *, int, int, int, int);
+static int NCR5380_poll_politely2(struct Scsi_Host *, int, int, int, int, int, int, int);
 
-#if (defined(REAL_DMA) || defined(REAL_DMA_POLL))
-
-#if defined(i386) || defined(__alpha__)
-
-/**
- *	NCR5380_pc_dma_setup		-	setup ISA DMA
- *	@instance: adapter to set up
- *	@ptr: block to transfer (virtual address)
- *	@count: number of bytes to transfer
- *	@mode: DMA controller mode to use
- *
- *	Program the DMA controller ready to perform an ISA DMA transfer
- *	on this chip.
- *
- *	Locks: takes and releases the ISA DMA lock.
- */
- 
-static __inline__ int NCR5380_pc_dma_setup(struct Scsi_Host *instance, unsigned char *ptr, unsigned int count, unsigned char mode)
-{
-	unsigned limit;
-	unsigned long bus_addr = virt_to_bus(ptr);
-	unsigned long flags;
-
-	if (instance->dma_channel <= 3) {
-		if (count > 65536)
-			count = 65536;
-		limit = 65536 - (bus_addr & 0xFFFF);
-	} else {
-		if (count > 65536 * 2)
-			count = 65536 * 2;
-		limit = 65536 * 2 - (bus_addr & 0x1FFFF);
-	}
-
-	if (count > limit)
-		count = limit;
-
-	if ((count & 1) || (bus_addr & 1))
-		panic("scsi%d : attempted unaligned DMA transfer\n", instance->host_no);
-	
-	flags=claim_dma_lock();
-	disable_dma(instance->dma_channel);
-	clear_dma_ff(instance->dma_channel);
-	set_dma_addr(instance->dma_channel, bus_addr);
-	set_dma_count(instance->dma_channel, count);
-	set_dma_mode(instance->dma_channel, mode);
-	enable_dma(instance->dma_channel);
-	release_dma_lock(flags);
-	
-	return count;
-}
-
-/**
- *	NCR5380_pc_dma_write_setup		-	setup ISA DMA write
- *	@instance: adapter to set up
- *	@ptr: block to transfer (virtual address)
- *	@count: number of bytes to transfer
- *
- *	Program the DMA controller ready to perform an ISA DMA write to the
- *	SCSI controller.
- *
- *	Locks: called routines take and release the ISA DMA lock.
- */
-
-static __inline__ int NCR5380_pc_dma_write_setup(struct Scsi_Host *instance, unsigned char *src, unsigned int count)
-{
-	return NCR5380_pc_dma_setup(instance, src, count, DMA_MODE_WRITE);
-}
-
-/**
- *	NCR5380_pc_dma_read_setup		-	setup ISA DMA read
- *	@instance: adapter to set up
- *	@ptr: block to transfer (virtual address)
- *	@count: number of bytes to transfer
- *
- *	Program the DMA controller ready to perform an ISA DMA read from the
- *	SCSI controller.
- *
- *	Locks: called routines take and release the ISA DMA lock.
- */
-
-static __inline__ int NCR5380_pc_dma_read_setup(struct Scsi_Host *instance, unsigned char *src, unsigned int count)
-{
-	return NCR5380_pc_dma_setup(instance, src, count, DMA_MODE_READ);
-}
-
-/**
- *	NCR5380_pc_dma_residual		-	return bytes left 
- *	@instance: adapter
- *
- *	Reports the number of bytes left over after the DMA was terminated.
- *
- *	Locks: takes and releases the ISA DMA lock.
- */
-
-static __inline__ int NCR5380_pc_dma_residual(struct Scsi_Host *instance)
-{
-	unsigned long flags;
-	int tmp;
-
-	flags = claim_dma_lock();
-	clear_dma_ff(instance->dma_channel);
-	tmp = get_dma_residue(instance->dma_channel);
-	release_dma_lock(flags);
-	
-	return tmp;
-}
-#endif				/* defined(i386) || defined(__alpha__) */
-#endif				/* defined(REAL_DMA)  */
 #endif				/* __KERNEL__ */
 #endif				/* NCR5380_H */
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 7dfd0fa..6678d1f 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -555,8 +555,6 @@
 	dev = (struct aac_dev *)scsicmd->device->host->hostdata;
 
 	cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
-	if (!cmd_fibcontext)
-		return -ENOMEM;
 
 	aac_fib_init(cmd_fibcontext);
 	dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
@@ -1037,8 +1035,6 @@
 	dev = (struct aac_dev *)scsicmd->device->host->hostdata;
 
 	cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
-	if (!cmd_fibcontext)
-		return -ENOMEM;
 
 	aac_fib_init(cmd_fibcontext);
 	dinfo = (struct aac_get_serial *) fib_data(cmd_fibcontext);
@@ -1950,10 +1946,6 @@
 	 *	Alocate and initialize a Fib
 	 */
 	cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
-	if (!cmd_fibcontext) {
-		printk(KERN_WARNING "aac_read: fib allocation failed\n");
-		return -1;
-	}
 
 	status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count);
 
@@ -2048,16 +2040,6 @@
 	 *	Allocate and initialize a Fib then setup a BlockWrite command
 	 */
 	cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
-	if (!cmd_fibcontext) {
-		/* FIB temporarily unavailable,not catastrophic failure */
-
-		/* scsicmd->result = DID_ERROR << 16;
-		 * scsicmd->scsi_done(scsicmd);
-		 * return 0;
-		 */
-		printk(KERN_WARNING "aac_write: fib allocation failed\n");
-		return -1;
-	}
 
 	status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
 
@@ -2283,8 +2265,6 @@
 	 *	Allocate and initialize a Fib
 	 */
 	cmd_fibcontext = aac_fib_alloc_tag(aac, scsicmd);
-	if (!cmd_fibcontext)
-		return SCSI_MLQUEUE_HOST_BUSY;
 
 	aac_fib_init(cmd_fibcontext);
 
@@ -3184,8 +3164,6 @@
 	 *	Allocate and initialize a Fib then setup a BlockWrite command
 	 */
 	cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
-	if (!cmd_fibcontext)
-		return -1;
 
 	status = aac_adapter_scsi(cmd_fibcontext, scsicmd);
 
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index efa493c..8f90d9e 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -29,6 +29,7 @@
 #define AAC_INT_MODE_MSI		(1<<1)
 #define AAC_INT_MODE_AIF		(1<<2)
 #define AAC_INT_MODE_SYNC		(1<<3)
+#define AAC_INT_MODE_MSIX		(1<<16)
 
 #define AAC_INT_ENABLE_TYPE1_INTX	0xfffffffb
 #define AAC_INT_ENABLE_TYPE1_MSIX	0xfffffffa
@@ -62,7 +63,7 @@
 #define	PMC_GLOBAL_INT_BIT0		0x00000001
 
 #ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 41052
+# define AAC_DRIVER_BUILD 41066
 # define AAC_DRIVER_BRANCH "-ms"
 #endif
 #define MAXIMUM_NUM_CONTAINERS	32
@@ -720,7 +721,7 @@
 };
 
 
-#define Sa_MINIPORT_REVISION			1
+#define SA_INIT_NUM_MSIXVECTORS		1
 
 #define sa_readw(AEP, CSR)		readl(&((AEP)->regs.sa->CSR))
 #define sa_readl(AEP, CSR)		readl(&((AEP)->regs.sa->CSR))
@@ -2065,6 +2066,10 @@
 #define			AifEnAddJBOD		30	/* JBOD created */
 #define			AifEnDeleteJBOD		31	/* JBOD deleted */
 
+#define			AifBuManagerEvent		42 /* Bu management*/
+#define			AifBuCacheDataLoss		10
+#define			AifBuCacheDataRecover	11
+
 #define		AifCmdJobProgress	2	/* Progress report */
 #define			AifJobCtrZero	101	/* Array Zero progress */
 #define			AifJobStsSuccess 1	/* Job completes */
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 2b4e753..341ea32 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -37,6 +37,7 @@
 #include <linux/spinlock.h>
 #include <linux/slab.h>
 #include <linux/blkdev.h>
+#include <linux/delay.h>
 #include <linux/completion.h>
 #include <linux/mm.h>
 #include <scsi/scsi_host.h>
@@ -47,6 +48,20 @@
 	.irq_mod = 1
 };
 
+static inline int aac_is_msix_mode(struct aac_dev *dev)
+{
+	u32 status;
+
+	status = src_readl(dev, MUnit.OMR);
+	return (status & AAC_INT_MODE_MSIX);
+}
+
+static inline void aac_change_to_intx(struct aac_dev *dev)
+{
+	aac_src_access_devreg(dev, AAC_DISABLE_MSIX);
+	aac_src_access_devreg(dev, AAC_ENABLE_INTX);
+}
+
 static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign)
 {
 	unsigned char *base;
@@ -91,7 +106,7 @@
 	init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION);
 	if (dev->max_fib_size != sizeof(struct hw_fib))
 		init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4);
-	init->Sa_MSIXVectors = cpu_to_le32(Sa_MINIPORT_REVISION);
+	init->Sa_MSIXVectors = cpu_to_le32(SA_INIT_NUM_MSIXVECTORS);
 	init->fsrev = cpu_to_le32(dev->fsrev);
 
 	/*
@@ -378,21 +393,8 @@
 			msi_count = i;
 		} else {
 			dev->msi_enabled = 0;
-			printk(KERN_ERR "%s%d: MSIX not supported!! Will try MSI 0x%x.\n",
-					dev->name, dev->id, i);
-		}
-	}
-
-	if (!dev->msi_enabled) {
-		msi_count = 1;
-		i = pci_enable_msi(dev->pdev);
-
-		if (!i) {
-			dev->msi_enabled = 1;
-			dev->msi = 1;
-		} else {
-			printk(KERN_ERR "%s%d: MSI not supported!! Will try INTx 0x%x.\n",
-					dev->name, dev->id, i);
+			dev_err(&dev->pdev->dev,
+			"MSIX not supported!! Will try INTX 0x%x.\n", i);
 		}
 	}
 
@@ -427,6 +429,15 @@
 	dev->comm_interface = AAC_COMM_PRODUCER;
 	dev->raw_io_interface = dev->raw_io_64 = 0;
 
+
+	/*
+	 * Enable INTX mode, if not done already Enabled
+	 */
+	if (aac_is_msix_mode(dev)) {
+		aac_change_to_intx(dev);
+		dev_info(&dev->pdev->dev, "Changed firmware to INTX mode");
+	}
+
 	if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
 		0, 0, 0, 0, 0, 0,
 		status+0, status+1, status+2, status+3, NULL)) &&
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 511bbc5..0aeecec 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -637,10 +637,10 @@
 					}
 					return -EFAULT;
 				}
-				/* We used to udelay() here but that absorbed
-				 * a CPU when a timeout occured. Not very
-				 * useful. */
-				cpu_relax();
+				/*
+				 * Allow other processes / CPUS to use core
+				 */
+				schedule();
 			}
 		} else if (down_interruptible(&fibptr->event_wait)) {
 			/* Do nothing ... satisfy
@@ -901,6 +901,31 @@
 	memset(cp, 0, 256);
 }
 
+static inline int aac_aif_data(struct aac_aifcmd *aifcmd, uint32_t index)
+{
+	return le32_to_cpu(((__le32 *)aifcmd->data)[index]);
+}
+
+
+static void aac_handle_aif_bu(struct aac_dev *dev, struct aac_aifcmd *aifcmd)
+{
+	switch (aac_aif_data(aifcmd, 1)) {
+	case AifBuCacheDataLoss:
+		if (aac_aif_data(aifcmd, 2))
+			dev_info(&dev->pdev->dev, "Backup unit had cache data loss - [%d]\n",
+			aac_aif_data(aifcmd, 2));
+		else
+			dev_info(&dev->pdev->dev, "Backup Unit had cache data loss\n");
+		break;
+	case AifBuCacheDataRecover:
+		if (aac_aif_data(aifcmd, 2))
+			dev_info(&dev->pdev->dev, "DDR cache data recovered successfully - [%d]\n",
+			aac_aif_data(aifcmd, 2));
+		else
+			dev_info(&dev->pdev->dev, "DDR cache data recovered successfully\n");
+		break;
+	}
+}
 
 /**
  *	aac_handle_aif		-	Handle a message from the firmware
@@ -1154,6 +1179,8 @@
 				  ADD : DELETE;
 				break;
 			}
+			case AifBuManagerEvent:
+				aac_handle_aif_bu(dev, aifcmd);
 			break;
 		}
 
@@ -1996,6 +2023,10 @@
 		if (difference <= 0)
 			difference = 1;
 		set_current_state(TASK_INTERRUPTIBLE);
+
+		if (kthread_should_stop())
+			break;
+
 		schedule_timeout(difference);
 
 		if (kthread_should_stop())
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index d677b52..7e83620 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -392,9 +392,10 @@
 			if (likely(fib->callback && fib->callback_data)) {
 				fib->flags &= FIB_CONTEXT_FLAG_FASTRESP;
 				fib->callback(fib->callback_data, fib);
-			} else {
-				aac_fib_complete(fib);
-			}
+			} else
+				dev_info(&dev->pdev->dev,
+				"Invalid callback_fib[%d] (*%p)(%p)\n",
+				index, fib->callback, fib->callback_data);
 		} else {
 			unsigned long flagv;
 	  		dprintk((KERN_INFO "event_wait up\n"));
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 21a67ed..a943bd2 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -452,10 +452,11 @@
 		else if (depth < 2)
 			depth = 2;
 		scsi_change_queue_depth(sdev, depth);
-	} else
+	} else {
 		scsi_change_queue_depth(sdev, 1);
 
 		sdev->tagged_supported = 1;
+	}
 
 	return 0;
 }
@@ -1298,6 +1299,8 @@
 	else
 		shost->this_id = shost->max_id;
 
+	aac_intr_normal(aac, 0, 2, 0, NULL);
+
 	/*
 	 * dmb - we may need to move the setting of these parms somewhere else once
 	 * we get a fib that can report the actual numbers
@@ -1430,8 +1433,8 @@
 		/* After EEH recovery or suspend resume, max_msix count
 		 * may change, therfore updating in init as well.
 		 */
-		aac_adapter_start(dev);
 		dev->init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix);
+		aac_adapter_start(dev);
 	}
 	return 0;
 
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index bc0203f..28f8b8a 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -135,7 +135,8 @@
 
 	if (mode & AAC_INT_MODE_AIF) {
 		/* handle AIF */
-		aac_intr_normal(dev, 0, 2, 0, NULL);
+		if (dev->aif_thread && dev->fsa_dev)
+			aac_intr_normal(dev, 0, 2, 0, NULL);
 		if (dev->msi_enabled)
 			aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT);
 		mode = 0;
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 9f636a3..0fdc98b 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -477,7 +477,7 @@
 
 	err = asd_start_seqs(asd_ha);
 	if (err) {
-		asd_printk("coudln't start seqs for %s\n",
+		asd_printk("couldn't start seqs for %s\n",
 			   pci_name(asd_ha->pcidev));
 		goto out;
 	}
diff --git a/drivers/scsi/aic94xx/aic94xx_seq.c b/drivers/scsi/aic94xx/aic94xx_seq.c
index 5fdca93..da1e056 100644
--- a/drivers/scsi/aic94xx/aic94xx_seq.c
+++ b/drivers/scsi/aic94xx/aic94xx_seq.c
@@ -1352,7 +1352,7 @@
 	for_each_sequencer(lseq_mask, lseq_mask, lseq) {
 		err = asd_seq_start_lseq(asd_ha, lseq);
 		if (err) {
-			asd_printk("coudln't start LSEQ %d for %s\n", lseq,
+			asd_printk("couldn't start LSEQ %d for %s\n", lseq,
 				   pci_name(asd_ha->pcidev));
 			return err;
 		}
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index 221f18c..8e9cfe8 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -13,13 +13,14 @@
 
 #include <scsi/scsi_host.h>
 
-#define PSEUDO_DMA
-
 #define priv(host)			((struct NCR5380_hostdata *)(host)->hostdata)
 #define NCR5380_read(reg)		cumanascsi_read(instance, reg)
 #define NCR5380_write(reg, value)	cumanascsi_write(instance, reg, value)
 
 #define NCR5380_dma_xfer_len(instance, cmd, phase)	(cmd->transfersize)
+#define NCR5380_dma_recv_setup		cumanascsi_pread
+#define NCR5380_dma_send_setup		cumanascsi_pwrite
+#define NCR5380_dma_residual(instance)	(0)
 
 #define NCR5380_intr			cumanascsi_intr
 #define NCR5380_queue_command		cumanascsi_queue_command
@@ -41,8 +42,8 @@
 #define L(v)	(((v)<<16)|((v) & 0x0000ffff))
 #define H(v)	(((v)>>16)|((v) & 0xffff0000))
 
-static inline int
-NCR5380_pwrite(struct Scsi_Host *host, unsigned char *addr, int len)
+static inline int cumanascsi_pwrite(struct Scsi_Host *host,
+                                    unsigned char *addr, int len)
 {
   unsigned long *laddr;
   void __iomem *dma = priv(host)->dma + 0x2000;
@@ -101,11 +102,14 @@
   }
 end:
   writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL);
-  return len;
+
+	if (len)
+		return -1;
+	return 0;
 }
 
-static inline int
-NCR5380_pread(struct Scsi_Host *host, unsigned char *addr, int len)
+static inline int cumanascsi_pread(struct Scsi_Host *host,
+                                   unsigned char *addr, int len)
 {
   unsigned long *laddr;
   void __iomem *dma = priv(host)->dma + 0x2000;
@@ -163,7 +167,10 @@
   }
 end:
   writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL);
-  return len;
+
+	if (len)
+		return -1;
+	return 0;
 }
 
 static unsigned char cumanascsi_read(struct Scsi_Host *host, unsigned int reg)
@@ -239,7 +246,7 @@
 
 	host->irq = ec->irq;
 
-	ret = NCR5380_init(host, 0);
+	ret = NCR5380_init(host, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP);
 	if (ret)
 		goto out_unmap;
 
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index faa1bee..edce5f3 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -365,7 +365,7 @@
 	.eh_abort_handler		= fas216_eh_abort,
 	.can_queue			= 1,
 	.this_id			= 7,
-	.sg_tablesize			= SCSI_MAX_SG_CHAIN_SEGMENTS,
+	.sg_tablesize			= SG_MAX_SEGMENTS,
 	.dma_boundary			= IOMD_DMA_BOUNDARY,
 	.use_clustering			= DISABLE_CLUSTERING,
 	.proc_name			= "cumanascsi2",
diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c
index a8ad688..e93e047 100644
--- a/drivers/scsi/arm/eesox.c
+++ b/drivers/scsi/arm/eesox.c
@@ -484,7 +484,7 @@
 	.eh_abort_handler		= fas216_eh_abort,
 	.can_queue			= 1,
 	.this_id			= 7,
-	.sg_tablesize			= SCSI_MAX_SG_CHAIN_SEGMENTS,
+	.sg_tablesize			= SG_MAX_SEGMENTS,
 	.dma_boundary			= IOMD_DMA_BOUNDARY,
 	.use_clustering			= DISABLE_CLUSTERING,
 	.proc_name			= "eesox",
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index 1fab1d18..a396024 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -14,9 +14,6 @@
 
 #include <scsi/scsi_host.h>
 
-/*#define PSEUDO_DMA*/
-#define DONT_USE_INTR
-
 #define priv(host)			((struct NCR5380_hostdata *)(host)->hostdata)
 
 #define NCR5380_read(reg) \
@@ -24,7 +21,10 @@
 #define NCR5380_write(reg, value) \
 	writeb(value, priv(instance)->base + ((reg) << 2))
 
-#define NCR5380_dma_xfer_len(instance, cmd, phase)	(cmd->transfersize)
+#define NCR5380_dma_xfer_len(instance, cmd, phase)	(0)
+#define NCR5380_dma_recv_setup		oakscsi_pread
+#define NCR5380_dma_send_setup		oakscsi_pwrite
+#define NCR5380_dma_residual(instance)	(0)
 
 #define NCR5380_queue_command		oakscsi_queue_command
 #define NCR5380_info			oakscsi_info
@@ -40,23 +40,23 @@
 #define STAT	((128 + 16) << 2)
 #define DATA	((128 + 8) << 2)
 
-static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *addr,
-              int len)
+static inline int oakscsi_pwrite(struct Scsi_Host *instance,
+                                 unsigned char *addr, int len)
 {
   void __iomem *base = priv(instance)->base;
 
 printk("writing %p len %d\n",addr, len);
-  if(!len) return -1;
 
   while(1)
   {
     int status;
     while (((status = readw(base + STAT)) & 0x100)==0);
   }
+  return 0;
 }
 
-static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *addr,
-              int len)
+static inline int oakscsi_pread(struct Scsi_Host *instance,
+                                unsigned char *addr, int len)
 {
   void __iomem *base = priv(instance)->base;
 printk("reading %p len %d\n", addr, len);
@@ -73,7 +73,7 @@
       if(status & 0x200 || !timeout)
       {
         printk("status = %08X\n", status);
-        return 1;
+        return -1;
       }
     }
 
@@ -143,7 +143,7 @@
 	host->irq = NO_IRQ;
 	host->n_io_port = 255;
 
-	ret = NCR5380_init(host, 0);
+	ret = NCR5380_init(host, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP);
 	if (ret)
 		goto out_unmap;
 
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
index 5e1b73e..79aa889 100644
--- a/drivers/scsi/arm/powertec.c
+++ b/drivers/scsi/arm/powertec.c
@@ -291,7 +291,7 @@
 
 	.can_queue			= 8,
 	.this_id			= 7,
-	.sg_tablesize			= SCSI_MAX_SG_CHAIN_SEGMENTS,
+	.sg_tablesize			= SG_MAX_SEGMENTS,
 	.dma_boundary			= IOMD_DMA_BOUNDARY,
 	.cmd_per_lun			= 2,
 	.use_clustering			= ENABLE_CLUSTERING,
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
deleted file mode 100644
index 389825b..0000000
--- a/drivers/scsi/atari_NCR5380.c
+++ /dev/null
@@ -1,2676 +0,0 @@
-/*
- * NCR 5380 generic driver routines.  These should make it *trivial*
- * to implement 5380 SCSI drivers under Linux with a non-trantor
- * architecture.
- *
- * Note that these routines also work with NR53c400 family chips.
- *
- * Copyright 1993, Drew Eckhardt
- * Visionary Computing
- * (Unix and Linux consulting and custom programming)
- * drew@colorado.edu
- * +1 (303) 666-5836
- *
- * For more information, please consult
- *
- * NCR 5380 Family
- * SCSI Protocol Controller
- * Databook
- *
- * NCR Microelectronics
- * 1635 Aeroplaza Drive
- * Colorado Springs, CO 80916
- * 1+ (719) 578-3400
- * 1+ (800) 334-5454
- */
-
-/* Ported to Atari by Roman Hodek and others. */
-
-/* Adapted for the sun3 by Sam Creasey. */
-
-/*
- * Design
- *
- * This is a generic 5380 driver.  To use it on a different platform,
- * one simply writes appropriate system specific macros (ie, data
- * transfer - some PC's will use the I/O bus, 68K's must use
- * memory mapped) and drops this file in their 'C' wrapper.
- *
- * As far as command queueing, two queues are maintained for
- * each 5380 in the system - commands that haven't been issued yet,
- * and commands that are currently executing.  This means that an
- * unlimited number of commands may be queued, letting
- * more commands propagate from the higher driver levels giving higher
- * throughput.  Note that both I_T_L and I_T_L_Q nexuses are supported,
- * allowing multiple commands to propagate all the way to a SCSI-II device
- * while a command is already executing.
- *
- *
- * Issues specific to the NCR5380 :
- *
- * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead
- * piece of hardware that requires you to sit in a loop polling for
- * the REQ signal as long as you are connected.  Some devices are
- * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect
- * while doing long seek operations. [...] These
- * broken devices are the exception rather than the rule and I'd rather
- * spend my time optimizing for the normal case.
- *
- * Architecture :
- *
- * At the heart of the design is a coroutine, NCR5380_main,
- * which is started from a workqueue for each NCR5380 host in the
- * system.  It attempts to establish I_T_L or I_T_L_Q nexuses by
- * removing the commands from the issue queue and calling
- * NCR5380_select() if a nexus is not established.
- *
- * Once a nexus is established, the NCR5380_information_transfer()
- * phase goes through the various phases as instructed by the target.
- * if the target goes into MSG IN and sends a DISCONNECT message,
- * the command structure is placed into the per instance disconnected
- * queue, and NCR5380_main tries to find more work.  If the target is
- * idle for too long, the system will try to sleep.
- *
- * If a command has disconnected, eventually an interrupt will trigger,
- * calling NCR5380_intr()  which will in turn call NCR5380_reselect
- * to reestablish a nexus.  This will run main if necessary.
- *
- * On command termination, the done function will be called as
- * appropriate.
- *
- * SCSI pointers are maintained in the SCp field of SCSI command
- * structures, being initialized after the command is connected
- * in NCR5380_select, and set as appropriate in NCR5380_information_transfer.
- * Note that in violation of the standard, an implicit SAVE POINTERS operation
- * is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS.
- */
-
-/*
- * Using this file :
- * This file a skeleton Linux SCSI driver for the NCR 5380 series
- * of chips.  To use it, you write an architecture specific functions
- * and macros and include this file in your driver.
- *
- * These macros control options :
- * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
- * for commands that return with a CHECK CONDITION status.
- *
- * DIFFERENTIAL - if defined, NCR53c81 chips will use external differential
- * transceivers.
- *
- * REAL_DMA - if defined, REAL DMA is used during the data transfer phases.
- *
- * SUPPORT_TAGS - if defined, SCSI-2 tagged queuing is used where possible
- *
- * These macros MUST be defined :
- *
- * NCR5380_read(register)  - read from the specified register
- *
- * NCR5380_write(register, value) - write to the specific register
- *
- * NCR5380_implementation_fields  - additional fields needed for this
- * specific implementation of the NCR5380
- *
- * Either real DMA *or* pseudo DMA may be implemented
- * REAL functions :
- * NCR5380_REAL_DMA should be defined if real DMA is to be used.
- * Note that the DMA setup functions should return the number of bytes
- * that they were able to program the controller for.
- *
- * Also note that generic i386/PC versions of these macros are
- * available as NCR5380_i386_dma_write_setup,
- * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual.
- *
- * NCR5380_dma_write_setup(instance, src, count) - initialize
- * NCR5380_dma_read_setup(instance, dst, count) - initialize
- * NCR5380_dma_residual(instance); - residual count
- *
- * PSEUDO functions :
- * NCR5380_pwrite(instance, src, count)
- * NCR5380_pread(instance, dst, count);
- *
- * The generic driver is initialized by calling NCR5380_init(instance),
- * after setting the appropriate host specific fields and ID.  If the
- * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance,
- * possible) function may be used.
- */
-
-static int do_abort(struct Scsi_Host *);
-static void do_reset(struct Scsi_Host *);
-
-#ifdef SUPPORT_TAGS
-
-/*
- * Functions for handling tagged queuing
- * =====================================
- *
- * ++roman (01/96): Now I've implemented SCSI-2 tagged queuing. Some notes:
- *
- * Using consecutive numbers for the tags is no good idea in my eyes. There
- * could be wrong re-usings if the counter (8 bit!) wraps and some early
- * command has been preempted for a long time. My solution: a bitfield for
- * remembering used tags.
- *
- * There's also the problem that each target has a certain queue size, but we
- * cannot know it in advance :-( We just see a QUEUE_FULL status being
- * returned. So, in this case, the driver internal queue size assumption is
- * reduced to the number of active tags if QUEUE_FULL is returned by the
- * target.
- *
- * We're also not allowed running tagged commands as long as an untagged
- * command is active. And REQUEST SENSE commands after a contingent allegiance
- * condition _must_ be untagged. To keep track whether an untagged command has
- * been issued, the host->busy array is still employed, as it is without
- * support for tagged queuing.
- *
- * One could suspect that there are possible race conditions between
- * is_lun_busy(), cmd_get_tag() and cmd_free_tag(). But I think this isn't the
- * case: is_lun_busy() and cmd_get_tag() are both called from NCR5380_main(),
- * which already guaranteed to be running at most once. It is also the only
- * place where tags/LUNs are allocated. So no other allocation can slip
- * between that pair, there could only happen a reselection, which can free a
- * tag, but that doesn't hurt. Only the sequence in cmd_free_tag() becomes
- * important: the tag bit must be cleared before 'nr_allocated' is decreased.
- */
-
-static void __init init_tags(struct NCR5380_hostdata *hostdata)
-{
-	int target, lun;
-	struct tag_alloc *ta;
-
-	if (!(hostdata->flags & FLAG_TAGGED_QUEUING))
-		return;
-
-	for (target = 0; target < 8; ++target) {
-		for (lun = 0; lun < 8; ++lun) {
-			ta = &hostdata->TagAlloc[target][lun];
-			bitmap_zero(ta->allocated, MAX_TAGS);
-			ta->nr_allocated = 0;
-			/* At the beginning, assume the maximum queue size we could
-			 * support (MAX_TAGS). This value will be decreased if the target
-			 * returns QUEUE_FULL status.
-			 */
-			ta->queue_size = MAX_TAGS;
-		}
-	}
-}
-
-
-/* Check if we can issue a command to this LUN: First see if the LUN is marked
- * busy by an untagged command. If the command should use tagged queuing, also
- * check that there is a free tag and the target's queue won't overflow. This
- * function should be called with interrupts disabled to avoid race
- * conditions.
- */
-
-static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged)
-{
-	u8 lun = cmd->device->lun;
-	struct Scsi_Host *instance = cmd->device->host;
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
-	if (hostdata->busy[cmd->device->id] & (1 << lun))
-		return 1;
-	if (!should_be_tagged ||
-	    !(hostdata->flags & FLAG_TAGGED_QUEUING) ||
-	    !cmd->device->tagged_supported)
-		return 0;
-	if (hostdata->TagAlloc[scmd_id(cmd)][lun].nr_allocated >=
-	    hostdata->TagAlloc[scmd_id(cmd)][lun].queue_size) {
-		dsprintk(NDEBUG_TAGS, instance, "target %d lun %d: no free tags\n",
-		         scmd_id(cmd), lun);
-		return 1;
-	}
-	return 0;
-}
-
-
-/* Allocate a tag for a command (there are no checks anymore, check_lun_busy()
- * must be called before!), or reserve the LUN in 'busy' if the command is
- * untagged.
- */
-
-static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged)
-{
-	u8 lun = cmd->device->lun;
-	struct Scsi_Host *instance = cmd->device->host;
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
-	/* If we or the target don't support tagged queuing, allocate the LUN for
-	 * an untagged command.
-	 */
-	if (!should_be_tagged ||
-	    !(hostdata->flags & FLAG_TAGGED_QUEUING) ||
-	    !cmd->device->tagged_supported) {
-		cmd->tag = TAG_NONE;
-		hostdata->busy[cmd->device->id] |= (1 << lun);
-		dsprintk(NDEBUG_TAGS, instance, "target %d lun %d now allocated by untagged command\n",
-		         scmd_id(cmd), lun);
-	} else {
-		struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun];
-
-		cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS);
-		set_bit(cmd->tag, ta->allocated);
-		ta->nr_allocated++;
-		dsprintk(NDEBUG_TAGS, instance, "using tag %d for target %d lun %d (%d tags allocated)\n",
-		         cmd->tag, scmd_id(cmd), lun, ta->nr_allocated);
-	}
-}
-
-
-/* Mark the tag of command 'cmd' as free, or in case of an untagged command,
- * unlock the LUN.
- */
-
-static void cmd_free_tag(struct scsi_cmnd *cmd)
-{
-	u8 lun = cmd->device->lun;
-	struct Scsi_Host *instance = cmd->device->host;
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
-	if (cmd->tag == TAG_NONE) {
-		hostdata->busy[cmd->device->id] &= ~(1 << lun);
-		dsprintk(NDEBUG_TAGS, instance, "target %d lun %d untagged cmd freed\n",
-		         scmd_id(cmd), lun);
-	} else if (cmd->tag >= MAX_TAGS) {
-		shost_printk(KERN_NOTICE, instance,
-		             "trying to free bad tag %d!\n", cmd->tag);
-	} else {
-		struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun];
-		clear_bit(cmd->tag, ta->allocated);
-		ta->nr_allocated--;
-		dsprintk(NDEBUG_TAGS, instance, "freed tag %d for target %d lun %d\n",
-		         cmd->tag, scmd_id(cmd), lun);
-	}
-}
-
-
-static void free_all_tags(struct NCR5380_hostdata *hostdata)
-{
-	int target, lun;
-	struct tag_alloc *ta;
-
-	if (!(hostdata->flags & FLAG_TAGGED_QUEUING))
-		return;
-
-	for (target = 0; target < 8; ++target) {
-		for (lun = 0; lun < 8; ++lun) {
-			ta = &hostdata->TagAlloc[target][lun];
-			bitmap_zero(ta->allocated, MAX_TAGS);
-			ta->nr_allocated = 0;
-		}
-	}
-}
-
-#endif /* SUPPORT_TAGS */
-
-/**
- * merge_contiguous_buffers - coalesce scatter-gather list entries
- * @cmd: command requesting IO
- *
- * Try to merge several scatter-gather buffers into one DMA transfer.
- * This is possible if the scatter buffers lie on physically
- * contiguous addresses. The first scatter-gather buffer's data are
- * assumed to be already transferred into cmd->SCp.this_residual.
- * Every buffer merged avoids an interrupt and a DMA setup operation.
- */
-
-static void merge_contiguous_buffers(struct scsi_cmnd *cmd)
-{
-#if !defined(CONFIG_SUN3)
-	unsigned long endaddr;
-#if (NDEBUG & NDEBUG_MERGING)
-	unsigned long oldlen = cmd->SCp.this_residual;
-	int cnt = 1;
-#endif
-
-	for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1;
-	     cmd->SCp.buffers_residual &&
-	     virt_to_phys(sg_virt(&cmd->SCp.buffer[1])) == endaddr;) {
-		dprintk(NDEBUG_MERGING, "VTOP(%p) == %08lx -> merging\n",
-			   page_address(sg_page(&cmd->SCp.buffer[1])), endaddr);
-#if (NDEBUG & NDEBUG_MERGING)
-		++cnt;
-#endif
-		++cmd->SCp.buffer;
-		--cmd->SCp.buffers_residual;
-		cmd->SCp.this_residual += cmd->SCp.buffer->length;
-		endaddr += cmd->SCp.buffer->length;
-	}
-#if (NDEBUG & NDEBUG_MERGING)
-	if (oldlen != cmd->SCp.this_residual)
-		dprintk(NDEBUG_MERGING, "merged %d buffers from %p, new length %08x\n",
-			   cnt, cmd->SCp.ptr, cmd->SCp.this_residual);
-#endif
-#endif /* !defined(CONFIG_SUN3) */
-}
-
-/**
- * initialize_SCp - init the scsi pointer field
- * @cmd: command block to set up
- *
- * Set up the internal fields in the SCSI command.
- */
-
-static inline void initialize_SCp(struct scsi_cmnd *cmd)
-{
-	/*
-	 * Initialize the Scsi Pointer field so that all of the commands in the
-	 * various queues are valid.
-	 */
-
-	if (scsi_bufflen(cmd)) {
-		cmd->SCp.buffer = scsi_sglist(cmd);
-		cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
-		cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
-		cmd->SCp.this_residual = cmd->SCp.buffer->length;
-
-		merge_contiguous_buffers(cmd);
-	} else {
-		cmd->SCp.buffer = NULL;
-		cmd->SCp.buffers_residual = 0;
-		cmd->SCp.ptr = NULL;
-		cmd->SCp.this_residual = 0;
-	}
-
-	cmd->SCp.Status = 0;
-	cmd->SCp.Message = 0;
-}
-
-/**
- * NCR5380_poll_politely2 - wait for two chip register values
- * @instance: controller to poll
- * @reg1: 5380 register to poll
- * @bit1: Bitmask to check
- * @val1: Expected value
- * @reg2: Second 5380 register to poll
- * @bit2: Second bitmask to check
- * @val2: Second expected value
- * @wait: Time-out in jiffies
- *
- * Polls the chip in a reasonably efficient manner waiting for an
- * event to occur. After a short quick poll we begin to yield the CPU
- * (if possible). In irq contexts the time-out is arbitrarily limited.
- * Callers may hold locks as long as they are held in irq mode.
- *
- * Returns 0 if either or both event(s) occurred otherwise -ETIMEDOUT.
- */
-
-static int NCR5380_poll_politely2(struct Scsi_Host *instance,
-                                  int reg1, int bit1, int val1,
-                                  int reg2, int bit2, int val2, int wait)
-{
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-	unsigned long deadline = jiffies + wait;
-	unsigned long n;
-
-	/* Busy-wait for up to 10 ms */
-	n = min(10000U, jiffies_to_usecs(wait));
-	n *= hostdata->accesses_per_ms;
-	n /= 2000;
-	do {
-		if ((NCR5380_read(reg1) & bit1) == val1)
-			return 0;
-		if ((NCR5380_read(reg2) & bit2) == val2)
-			return 0;
-		cpu_relax();
-	} while (n--);
-
-	if (irqs_disabled() || in_interrupt())
-		return -ETIMEDOUT;
-
-	/* Repeatedly sleep for 1 ms until deadline */
-	while (time_is_after_jiffies(deadline)) {
-		schedule_timeout_uninterruptible(1);
-		if ((NCR5380_read(reg1) & bit1) == val1)
-			return 0;
-		if ((NCR5380_read(reg2) & bit2) == val2)
-			return 0;
-	}
-
-	return -ETIMEDOUT;
-}
-
-static inline int NCR5380_poll_politely(struct Scsi_Host *instance,
-                                        int reg, int bit, int val, int wait)
-{
-	return NCR5380_poll_politely2(instance, reg, bit, val,
-	                                        reg, bit, val, wait);
-}
-
-#if NDEBUG
-static struct {
-	unsigned char mask;
-	const char *name;
-} signals[] = {
-	{SR_DBP, "PARITY"},
-	{SR_RST, "RST"},
-	{SR_BSY, "BSY"},
-	{SR_REQ, "REQ"},
-	{SR_MSG, "MSG"},
-	{SR_CD, "CD"},
-	{SR_IO, "IO"},
-	{SR_SEL, "SEL"},
-	{0, NULL}
-},
-basrs[] = {
-	{BASR_ATN, "ATN"},
-	{BASR_ACK, "ACK"},
-	{0, NULL}
-},
-icrs[] = {
-	{ICR_ASSERT_RST, "ASSERT RST"},
-	{ICR_ASSERT_ACK, "ASSERT ACK"},
-	{ICR_ASSERT_BSY, "ASSERT BSY"},
-	{ICR_ASSERT_SEL, "ASSERT SEL"},
-	{ICR_ASSERT_ATN, "ASSERT ATN"},
-	{ICR_ASSERT_DATA, "ASSERT DATA"},
-	{0, NULL}
-},
-mrs[] = {
-	{MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"},
-	{MR_TARGET, "MODE TARGET"},
-	{MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"},
-	{MR_ENABLE_PAR_INTR, "MODE PARITY INTR"},
-	{MR_ENABLE_EOP_INTR, "MODE EOP INTR"},
-	{MR_MONITOR_BSY, "MODE MONITOR BSY"},
-	{MR_DMA_MODE, "MODE DMA"},
-	{MR_ARBITRATE, "MODE ARBITRATION"},
-	{0, NULL}
-};
-
-/**
- * NCR5380_print - print scsi bus signals
- * @instance: adapter state to dump
- *
- * Print the SCSI bus signals for debugging purposes
- */
-
-static void NCR5380_print(struct Scsi_Host *instance)
-{
-	unsigned char status, data, basr, mr, icr, i;
-
-	data = NCR5380_read(CURRENT_SCSI_DATA_REG);
-	status = NCR5380_read(STATUS_REG);
-	mr = NCR5380_read(MODE_REG);
-	icr = NCR5380_read(INITIATOR_COMMAND_REG);
-	basr = NCR5380_read(BUS_AND_STATUS_REG);
-
-	printk("STATUS_REG: %02x ", status);
-	for (i = 0; signals[i].mask; ++i)
-		if (status & signals[i].mask)
-			printk(",%s", signals[i].name);
-	printk("\nBASR: %02x ", basr);
-	for (i = 0; basrs[i].mask; ++i)
-		if (basr & basrs[i].mask)
-			printk(",%s", basrs[i].name);
-	printk("\nICR: %02x ", icr);
-	for (i = 0; icrs[i].mask; ++i)
-		if (icr & icrs[i].mask)
-			printk(",%s", icrs[i].name);
-	printk("\nMODE: %02x ", mr);
-	for (i = 0; mrs[i].mask; ++i)
-		if (mr & mrs[i].mask)
-			printk(",%s", mrs[i].name);
-	printk("\n");
-}
-
-static struct {
-	unsigned char value;
-	const char *name;
-} phases[] = {
-	{PHASE_DATAOUT, "DATAOUT"},
-	{PHASE_DATAIN, "DATAIN"},
-	{PHASE_CMDOUT, "CMDOUT"},
-	{PHASE_STATIN, "STATIN"},
-	{PHASE_MSGOUT, "MSGOUT"},
-	{PHASE_MSGIN, "MSGIN"},
-	{PHASE_UNKNOWN, "UNKNOWN"}
-};
-
-/**
- * NCR5380_print_phase - show SCSI phase
- * @instance: adapter to dump
- *
- * Print the current SCSI phase for debugging purposes
- */
-
-static void NCR5380_print_phase(struct Scsi_Host *instance)
-{
-	unsigned char status;
-	int i;
-
-	status = NCR5380_read(STATUS_REG);
-	if (!(status & SR_REQ))
-		shost_printk(KERN_DEBUG, instance, "REQ not asserted, phase unknown.\n");
-	else {
-		for (i = 0; (phases[i].value != PHASE_UNKNOWN) &&
-		     (phases[i].value != (status & PHASE_MASK)); ++i)
-			;
-		shost_printk(KERN_DEBUG, instance, "phase %s\n", phases[i].name);
-	}
-}
-#endif
-
-/**
- * NCR58380_info - report driver and host information
- * @instance: relevant scsi host instance
- *
- * For use as the host template info() handler.
- */
-
-static const char *NCR5380_info(struct Scsi_Host *instance)
-{
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
-	return hostdata->info;
-}
-
-static void prepare_info(struct Scsi_Host *instance)
-{
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
-	snprintf(hostdata->info, sizeof(hostdata->info),
-	         "%s, io_port 0x%lx, n_io_port %d, "
-	         "base 0x%lx, irq %d, "
-	         "can_queue %d, cmd_per_lun %d, "
-	         "sg_tablesize %d, this_id %d, "
-	         "flags { %s%s}, "
-	         "options { %s} ",
-	         instance->hostt->name, instance->io_port, instance->n_io_port,
-	         instance->base, instance->irq,
-	         instance->can_queue, instance->cmd_per_lun,
-	         instance->sg_tablesize, instance->this_id,
-	         hostdata->flags & FLAG_TAGGED_QUEUING ? "TAGGED_QUEUING " : "",
-	         hostdata->flags & FLAG_TOSHIBA_DELAY  ? "TOSHIBA_DELAY "  : "",
-#ifdef DIFFERENTIAL
-	         "DIFFERENTIAL "
-#endif
-#ifdef REAL_DMA
-	         "REAL_DMA "
-#endif
-#ifdef PARITY
-	         "PARITY "
-#endif
-#ifdef SUPPORT_TAGS
-	         "SUPPORT_TAGS "
-#endif
-	         "");
-}
-
-/**
- * NCR5380_init - initialise an NCR5380
- * @instance: adapter to configure
- * @flags: control flags
- *
- * Initializes *instance and corresponding 5380 chip,
- * with flags OR'd into the initial flags value.
- *
- * Notes : I assume that the host, hostno, and id bits have been
- * set correctly. I don't care about the irq and other fields.
- *
- * Returns 0 for success
- */
-
-static int __init NCR5380_init(struct Scsi_Host *instance, int flags)
-{
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-	int i;
-	unsigned long deadline;
-
-	hostdata->host = instance;
-	hostdata->id_mask = 1 << instance->this_id;
-	hostdata->id_higher_mask = 0;
-	for (i = hostdata->id_mask; i <= 0x80; i <<= 1)
-		if (i > hostdata->id_mask)
-			hostdata->id_higher_mask |= i;
-	for (i = 0; i < 8; ++i)
-		hostdata->busy[i] = 0;
-#ifdef SUPPORT_TAGS
-	init_tags(hostdata);
-#endif
-#if defined (REAL_DMA)
-	hostdata->dma_len = 0;
-#endif
-	spin_lock_init(&hostdata->lock);
-	hostdata->connected = NULL;
-	hostdata->sensing = NULL;
-	INIT_LIST_HEAD(&hostdata->autosense);
-	INIT_LIST_HEAD(&hostdata->unissued);
-	INIT_LIST_HEAD(&hostdata->disconnected);
-
-	hostdata->flags = flags;
-
-	INIT_WORK(&hostdata->main_task, NCR5380_main);
-	hostdata->work_q = alloc_workqueue("ncr5380_%d",
-	                        WQ_UNBOUND | WQ_MEM_RECLAIM,
-	                        1, instance->host_no);
-	if (!hostdata->work_q)
-		return -ENOMEM;
-
-	prepare_info(instance);
-
-	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-	NCR5380_write(MODE_REG, MR_BASE);
-	NCR5380_write(TARGET_COMMAND_REG, 0);
-	NCR5380_write(SELECT_ENABLE_REG, 0);
-
-	/* Calibrate register polling loop */
-	i = 0;
-	deadline = jiffies + 1;
-	do {
-		cpu_relax();
-	} while (time_is_after_jiffies(deadline));
-	deadline += msecs_to_jiffies(256);
-	do {
-		NCR5380_read(STATUS_REG);
-		++i;
-		cpu_relax();
-	} while (time_is_after_jiffies(deadline));
-	hostdata->accesses_per_ms = i / 256;
-
-	return 0;
-}
-
-/**
- * NCR5380_maybe_reset_bus - Detect and correct bus wedge problems.
- * @instance: adapter to check
- *
- * If the system crashed, it may have crashed with a connected target and
- * the SCSI bus busy. Check for BUS FREE phase. If not, try to abort the
- * currently established nexus, which we know nothing about. Failing that
- * do a bus reset.
- *
- * Note that a bus reset will cause the chip to assert IRQ.
- *
- * Returns 0 if successful, otherwise -ENXIO.
- */
-
-static int NCR5380_maybe_reset_bus(struct Scsi_Host *instance)
-{
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-	int pass;
-
-	for (pass = 1; (NCR5380_read(STATUS_REG) & SR_BSY) && pass <= 6; ++pass) {
-		switch (pass) {
-		case 1:
-		case 3:
-		case 5:
-			shost_printk(KERN_ERR, instance, "SCSI bus busy, waiting up to five seconds\n");
-			NCR5380_poll_politely(instance,
-			                      STATUS_REG, SR_BSY, 0, 5 * HZ);
-			break;
-		case 2:
-			shost_printk(KERN_ERR, instance, "bus busy, attempting abort\n");
-			do_abort(instance);
-			break;
-		case 4:
-			shost_printk(KERN_ERR, instance, "bus busy, attempting reset\n");
-			do_reset(instance);
-			/* Wait after a reset; the SCSI standard calls for
-			 * 250ms, we wait 500ms to be on the safe side.
-			 * But some Toshiba CD-ROMs need ten times that.
-			 */
-			if (hostdata->flags & FLAG_TOSHIBA_DELAY)
-				msleep(2500);
-			else
-				msleep(500);
-			break;
-		case 6:
-			shost_printk(KERN_ERR, instance, "bus locked solid\n");
-			return -ENXIO;
-		}
-	}
-	return 0;
-}
-
-/**
- * NCR5380_exit - remove an NCR5380
- * @instance: adapter to remove
- *
- * Assumes that no more work can be queued (e.g. by NCR5380_intr).
- */
-
-static void NCR5380_exit(struct Scsi_Host *instance)
-{
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
-	cancel_work_sync(&hostdata->main_task);
-	destroy_workqueue(hostdata->work_q);
-}
-
-/**
- * complete_cmd - finish processing a command and return it to the SCSI ML
- * @instance: the host instance
- * @cmd: command to complete
- */
-
-static void complete_cmd(struct Scsi_Host *instance,
-                         struct scsi_cmnd *cmd)
-{
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
-	dsprintk(NDEBUG_QUEUES, instance, "complete_cmd: cmd %p\n", cmd);
-
-	if (hostdata->sensing == cmd) {
-		/* Autosense processing ends here */
-		if ((cmd->result & 0xff) != SAM_STAT_GOOD) {
-			scsi_eh_restore_cmnd(cmd, &hostdata->ses);
-			set_host_byte(cmd, DID_ERROR);
-		} else
-			scsi_eh_restore_cmnd(cmd, &hostdata->ses);
-		hostdata->sensing = NULL;
-	}
-
-#ifdef SUPPORT_TAGS
-	cmd_free_tag(cmd);
-#else
-	hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
-#endif
-	cmd->scsi_done(cmd);
-}
-
-/**
- * NCR5380_queue_command - queue a command
- * @instance: the relevant SCSI adapter
- * @cmd: SCSI command
- *
- * cmd is added to the per-instance issue queue, with minor
- * twiddling done to the host specific fields of cmd.  If the
- * main coroutine is not running, it is restarted.
- */
-
-static int NCR5380_queue_command(struct Scsi_Host *instance,
-                                 struct scsi_cmnd *cmd)
-{
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-	struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
-	unsigned long flags;
-
-#if (NDEBUG & NDEBUG_NO_WRITE)
-	switch (cmd->cmnd[0]) {
-	case WRITE_6:
-	case WRITE_10:
-		shost_printk(KERN_DEBUG, instance, "WRITE attempted with NDEBUG_NO_WRITE set\n");
-		cmd->result = (DID_ERROR << 16);
-		cmd->scsi_done(cmd);
-		return 0;
-	}
-#endif /* (NDEBUG & NDEBUG_NO_WRITE) */
-
-	cmd->result = 0;
-
-	/*
-	 * ++roman: Just disabling the NCR interrupt isn't sufficient here,
-	 * because also a timer int can trigger an abort or reset, which would
-	 * alter queues and touch the lock.
-	 */
-	if (!NCR5380_acquire_dma_irq(instance))
-		return SCSI_MLQUEUE_HOST_BUSY;
-
-	spin_lock_irqsave(&hostdata->lock, flags);
-
-	/*
-	 * Insert the cmd into the issue queue. Note that REQUEST SENSE
-	 * commands are added to the head of the queue since any command will
-	 * clear the contingent allegiance condition that exists and the
-	 * sense data is only guaranteed to be valid while the condition exists.
-	 */
-
-	if (cmd->cmnd[0] == REQUEST_SENSE)
-		list_add(&ncmd->list, &hostdata->unissued);
-	else
-		list_add_tail(&ncmd->list, &hostdata->unissued);
-
-	spin_unlock_irqrestore(&hostdata->lock, flags);
-
-	dsprintk(NDEBUG_QUEUES, instance, "command %p added to %s of queue\n",
-	         cmd, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail");
-
-	/* Kick off command processing */
-	queue_work(hostdata->work_q, &hostdata->main_task);
-	return 0;
-}
-
-static inline void maybe_release_dma_irq(struct Scsi_Host *instance)
-{
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
-	/* Caller does the locking needed to set & test these data atomically */
-	if (list_empty(&hostdata->disconnected) &&
-	    list_empty(&hostdata->unissued) &&
-	    list_empty(&hostdata->autosense) &&
-	    !hostdata->connected &&
-	    !hostdata->selecting)
-		NCR5380_release_dma_irq(instance);
-}
-
-/**
- * dequeue_next_cmd - dequeue a command for processing
- * @instance: the scsi host instance
- *
- * Priority is given to commands on the autosense queue. These commands
- * need autosense because of a CHECK CONDITION result.
- *
- * Returns a command pointer if a command is found for a target that is
- * not already busy. Otherwise returns NULL.
- */
-
-static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance)
-{
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-	struct NCR5380_cmd *ncmd;
-	struct scsi_cmnd *cmd;
-
-	if (hostdata->sensing || list_empty(&hostdata->autosense)) {
-		list_for_each_entry(ncmd, &hostdata->unissued, list) {
-			cmd = NCR5380_to_scmd(ncmd);
-			dsprintk(NDEBUG_QUEUES, instance, "dequeue: cmd=%p target=%d busy=0x%02x lun=%llu\n",
-			         cmd, scmd_id(cmd), hostdata->busy[scmd_id(cmd)], cmd->device->lun);
-
-			if (
-#ifdef SUPPORT_TAGS
-			    !is_lun_busy(cmd, 1)
-#else
-			    !(hostdata->busy[scmd_id(cmd)] & (1 << cmd->device->lun))
-#endif
-			) {
-				list_del(&ncmd->list);
-				dsprintk(NDEBUG_QUEUES, instance,
-				         "dequeue: removed %p from issue queue\n", cmd);
-				return cmd;
-			}
-		}
-	} else {
-		/* Autosense processing begins here */
-		ncmd = list_first_entry(&hostdata->autosense,
-		                        struct NCR5380_cmd, list);
-		list_del(&ncmd->list);
-		cmd = NCR5380_to_scmd(ncmd);
-		dsprintk(NDEBUG_QUEUES, instance,
-		         "dequeue: removed %p from autosense queue\n", cmd);
-		scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0);
-		hostdata->sensing = cmd;
-		return cmd;
-	}
-	return NULL;
-}
-
-static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
-{
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-	struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
-
-	if (hostdata->sensing == cmd) {
-		scsi_eh_restore_cmnd(cmd, &hostdata->ses);
-		list_add(&ncmd->list, &hostdata->autosense);
-		hostdata->sensing = NULL;
-	} else
-		list_add(&ncmd->list, &hostdata->unissued);
-}
-
-/**
- * NCR5380_main - NCR state machines
- *
- * NCR5380_main is a coroutine that runs as long as more work can
- * be done on the NCR5380 host adapters in a system.  Both
- * NCR5380_queue_command() and NCR5380_intr() will try to start it
- * in case it is not running.
- */
-
-static void NCR5380_main(struct work_struct *work)
-{
-	struct NCR5380_hostdata *hostdata =
-		container_of(work, struct NCR5380_hostdata, main_task);
-	struct Scsi_Host *instance = hostdata->host;
-	int done;
-
-	/*
-	 * ++roman: Just disabling the NCR interrupt isn't sufficient here,
-	 * because also a timer int can trigger an abort or reset, which can
-	 * alter queues and touch the Falcon lock.
-	 */
-
-	do {
-		done = 1;
-
-		spin_lock_irq(&hostdata->lock);
-		while (!hostdata->connected && !hostdata->selecting) {
-			struct scsi_cmnd *cmd = dequeue_next_cmd(instance);
-
-			if (!cmd)
-				break;
-
-			dsprintk(NDEBUG_MAIN, instance, "main: dequeued %p\n", cmd);
-
-			/*
-			 * Attempt to establish an I_T_L nexus here.
-			 * On success, instance->hostdata->connected is set.
-			 * On failure, we must add the command back to the
-			 * issue queue so we can keep trying.
-			 */
-			/*
-			 * REQUEST SENSE commands are issued without tagged
-			 * queueing, even on SCSI-II devices because the
-			 * contingent allegiance condition exists for the
-			 * entire unit.
-			 */
-			/* ++roman: ...and the standard also requires that
-			 * REQUEST SENSE command are untagged.
-			 */
-
-#ifdef SUPPORT_TAGS
-			cmd_get_tag(cmd, cmd->cmnd[0] != REQUEST_SENSE);
-#endif
-			if (!NCR5380_select(instance, cmd)) {
-				dsprintk(NDEBUG_MAIN, instance, "main: select complete\n");
-				maybe_release_dma_irq(instance);
-			} else {
-				dsprintk(NDEBUG_MAIN | NDEBUG_QUEUES, instance,
-				         "main: select failed, returning %p to queue\n", cmd);
-				requeue_cmd(instance, cmd);
-#ifdef SUPPORT_TAGS
-				cmd_free_tag(cmd);
-#endif
-			}
-		}
-		if (hostdata->connected
-#ifdef REAL_DMA
-		    && !hostdata->dma_len
-#endif
-		    ) {
-			dsprintk(NDEBUG_MAIN, instance, "main: performing information transfer\n");
-			NCR5380_information_transfer(instance);
-			done = 0;
-		}
-		spin_unlock_irq(&hostdata->lock);
-		if (!done)
-			cond_resched();
-	} while (!done);
-}
-
-
-#ifdef REAL_DMA
-/*
- * Function : void NCR5380_dma_complete (struct Scsi_Host *instance)
- *
- * Purpose : Called by interrupt handler when DMA finishes or a phase
- * mismatch occurs (which would finish the DMA transfer).
- *
- * Inputs : instance - this instance of the NCR5380.
- */
-
-static void NCR5380_dma_complete(struct Scsi_Host *instance)
-{
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-	int transferred;
-	unsigned char **data;
-	int *count;
-	int saved_data = 0, overrun = 0;
-	unsigned char p;
-
-	if (hostdata->read_overruns) {
-		p = hostdata->connected->SCp.phase;
-		if (p & SR_IO) {
-			udelay(10);
-			if ((NCR5380_read(BUS_AND_STATUS_REG) &
-			     (BASR_PHASE_MATCH|BASR_ACK)) ==
-			    (BASR_PHASE_MATCH|BASR_ACK)) {
-				saved_data = NCR5380_read(INPUT_DATA_REG);
-				overrun = 1;
-				dsprintk(NDEBUG_DMA, instance, "read overrun handled\n");
-			}
-		}
-	}
-
-#if defined(CONFIG_SUN3)
-	if ((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) {
-		pr_err("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n",
-		       instance->host_no);
-		BUG();
-	}
-
-	/* make sure we're not stuck in a data phase */
-	if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) ==
-	    (BASR_PHASE_MATCH | BASR_ACK)) {
-		pr_err("scsi%d: BASR %02x\n", instance->host_no,
-		       NCR5380_read(BUS_AND_STATUS_REG));
-		pr_err("scsi%d: bus stuck in data phase -- probably a single byte overrun!\n",
-		       instance->host_no);
-		BUG();
-	}
-#endif
-
-	NCR5380_write(MODE_REG, MR_BASE);
-	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-	NCR5380_read(RESET_PARITY_INTERRUPT_REG);
-
-	transferred = hostdata->dma_len - NCR5380_dma_residual(instance);
-	hostdata->dma_len = 0;
-
-	data = (unsigned char **)&hostdata->connected->SCp.ptr;
-	count = &hostdata->connected->SCp.this_residual;
-	*data += transferred;
-	*count -= transferred;
-
-	if (hostdata->read_overruns) {
-		int cnt, toPIO;
-
-		if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) {
-			cnt = toPIO = hostdata->read_overruns;
-			if (overrun) {
-				dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n");
-				*(*data)++ = saved_data;
-				(*count)--;
-				cnt--;
-				toPIO--;
-			}
-			dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data);
-			NCR5380_transfer_pio(instance, &p, &cnt, data);
-			*count -= toPIO - cnt;
-		}
-	}
-}
-#endif /* REAL_DMA */
-
-
-/**
- * NCR5380_intr - generic NCR5380 irq handler
- * @irq: interrupt number
- * @dev_id: device info
- *
- * Handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses
- * from the disconnected queue, and restarting NCR5380_main()
- * as required.
- *
- * The chip can assert IRQ in any of six different conditions. The IRQ flag
- * is then cleared by reading the Reset Parity/Interrupt Register (RPIR).
- * Three of these six conditions are latched in the Bus and Status Register:
- * - End of DMA (cleared by ending DMA Mode)
- * - Parity error (cleared by reading RPIR)
- * - Loss of BSY (cleared by reading RPIR)
- * Two conditions have flag bits that are not latched:
- * - Bus phase mismatch (non-maskable in DMA Mode, cleared by ending DMA Mode)
- * - Bus reset (non-maskable)
- * The remaining condition has no flag bit at all:
- * - Selection/reselection
- *
- * Hence, establishing the cause(s) of any interrupt is partly guesswork.
- * In "The DP8490 and DP5380 Comparison Guide", National Semiconductor
- * claimed that "the design of the [DP8490] interrupt logic ensures
- * interrupts will not be lost (they can be on the DP5380)."
- * The L5380/53C80 datasheet from LOGIC Devices has more details.
- *
- * Checking for bus reset by reading RST is futile because of interrupt
- * latency, but a bus reset will reset chip logic. Checking for parity error
- * is unnecessary because that interrupt is never enabled. A Loss of BSY
- * condition will clear DMA Mode. We can tell when this occurs because the
- * the Busy Monitor interrupt is enabled together with DMA Mode.
- */
-
-static irqreturn_t NCR5380_intr(int irq, void *dev_id)
-{
-	struct Scsi_Host *instance = dev_id;
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-	int handled = 0;
-	unsigned char basr;
-	unsigned long flags;
-
-	spin_lock_irqsave(&hostdata->lock, flags);
-
-	basr = NCR5380_read(BUS_AND_STATUS_REG);
-	if (basr & BASR_IRQ) {
-		unsigned char mr = NCR5380_read(MODE_REG);
-		unsigned char sr = NCR5380_read(STATUS_REG);
-
-		dsprintk(NDEBUG_INTR, instance, "IRQ %d, BASR 0x%02x, SR 0x%02x, MR 0x%02x\n",
-		         irq, basr, sr, mr);
-
-#if defined(REAL_DMA)
-		if ((mr & MR_DMA_MODE) || (mr & MR_MONITOR_BSY)) {
-			/* Probably End of DMA, Phase Mismatch or Loss of BSY.
-			 * We ack IRQ after clearing Mode Register. Workarounds
-			 * for End of DMA errata need to happen in DMA Mode.
-			 */
-
-			dsprintk(NDEBUG_INTR, instance, "interrupt in DMA mode\n");
-
-			if (hostdata->connected) {
-				NCR5380_dma_complete(instance);
-				queue_work(hostdata->work_q, &hostdata->main_task);
-			} else {
-				NCR5380_write(MODE_REG, MR_BASE);
-				NCR5380_read(RESET_PARITY_INTERRUPT_REG);
-			}
-		} else
-#endif /* REAL_DMA */
-		if ((NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_mask) &&
-		    (sr & (SR_SEL | SR_IO | SR_BSY | SR_RST)) == (SR_SEL | SR_IO)) {
-			/* Probably reselected */
-			NCR5380_write(SELECT_ENABLE_REG, 0);
-			NCR5380_read(RESET_PARITY_INTERRUPT_REG);
-
-			dsprintk(NDEBUG_INTR, instance, "interrupt with SEL and IO\n");
-
-			if (!hostdata->connected) {
-				NCR5380_reselect(instance);
-				queue_work(hostdata->work_q, &hostdata->main_task);
-			}
-			if (!hostdata->connected)
-				NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
-		} else {
-			/* Probably Bus Reset */
-			NCR5380_read(RESET_PARITY_INTERRUPT_REG);
-
-			dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n");
-#ifdef SUN3_SCSI_VME
-			dregs->csr |= CSR_DMA_ENABLE;
-#endif
-		}
-		handled = 1;
-	} else {
-		shost_printk(KERN_NOTICE, instance, "interrupt without IRQ bit\n");
-#ifdef SUN3_SCSI_VME
-		dregs->csr |= CSR_DMA_ENABLE;
-#endif
-	}
-
-	spin_unlock_irqrestore(&hostdata->lock, flags);
-
-	return IRQ_RETVAL(handled);
-}
-
-/*
- * Function : int NCR5380_select(struct Scsi_Host *instance,
- * struct scsi_cmnd *cmd)
- *
- * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command,
- * including ARBITRATION, SELECTION, and initial message out for
- * IDENTIFY and queue messages.
- *
- * Inputs : instance - instantiation of the 5380 driver on which this
- * target lives, cmd - SCSI command to execute.
- *
- * Returns cmd if selection failed but should be retried,
- * NULL if selection failed and should not be retried, or
- * NULL if selection succeeded (hostdata->connected == cmd).
- *
- * Side effects :
- * If bus busy, arbitration failed, etc, NCR5380_select() will exit
- * with registers as they should have been on entry - ie
- * SELECT_ENABLE will be set appropriately, the NCR5380
- * will cease to drive any SCSI bus signals.
- *
- * If successful : I_T_L or I_T_L_Q nexus will be established,
- * instance->connected will be set to cmd.
- * SELECT interrupt will be disabled.
- *
- * If failed (no target) : cmd->scsi_done() will be called, and the
- * cmd->result host byte set to DID_BAD_TARGET.
- */
-
-static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
-                                        struct scsi_cmnd *cmd)
-{
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-	unsigned char tmp[3], phase;
-	unsigned char *data;
-	int len;
-	int err;
-
-	NCR5380_dprint(NDEBUG_ARBITRATION, instance);
-	dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n",
-	         instance->this_id);
-
-	/*
-	 * Arbitration and selection phases are slow and involve dropping the
-	 * lock, so we have to watch out for EH. An exception handler may
-	 * change 'selecting' to NULL. This function will then return NULL
-	 * so that the caller will forget about 'cmd'. (During information
-	 * transfer phases, EH may change 'connected' to NULL.)
-	 */
-	hostdata->selecting = cmd;
-
-	/*
-	 * Set the phase bits to 0, otherwise the NCR5380 won't drive the
-	 * data bus during SELECTION.
-	 */
-
-	NCR5380_write(TARGET_COMMAND_REG, 0);
-
-	/*
-	 * Start arbitration.
-	 */
-
-	NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
-	NCR5380_write(MODE_REG, MR_ARBITRATE);
-
-	/* The chip now waits for BUS FREE phase. Then after the 800 ns
-	 * Bus Free Delay, arbitration will begin.
-	 */
-
-	spin_unlock_irq(&hostdata->lock);
-	err = NCR5380_poll_politely2(instance, MODE_REG, MR_ARBITRATE, 0,
-	                INITIATOR_COMMAND_REG, ICR_ARBITRATION_PROGRESS,
-	                                       ICR_ARBITRATION_PROGRESS, HZ);
-	spin_lock_irq(&hostdata->lock);
-	if (!(NCR5380_read(MODE_REG) & MR_ARBITRATE)) {
-		/* Reselection interrupt */
-		goto out;
-	}
-	if (!hostdata->selecting) {
-		/* Command was aborted */
-		NCR5380_write(MODE_REG, MR_BASE);
-		goto out;
-	}
-	if (err < 0) {
-		NCR5380_write(MODE_REG, MR_BASE);
-		shost_printk(KERN_ERR, instance,
-		             "select: arbitration timeout\n");
-		goto out;
-	}
-	spin_unlock_irq(&hostdata->lock);
-
-	/* The SCSI-2 arbitration delay is 2.4 us */
-	udelay(3);
-
-	/* Check for lost arbitration */
-	if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
-	    (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) ||
-	    (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) {
-		NCR5380_write(MODE_REG, MR_BASE);
-		dsprintk(NDEBUG_ARBITRATION, instance, "lost arbitration, deasserting MR_ARBITRATE\n");
-		spin_lock_irq(&hostdata->lock);
-		goto out;
-	}
-
-	/* After/during arbitration, BSY should be asserted.
-	 * IBM DPES-31080 Version S31Q works now
-	 * Tnx to Thomas_Roesch@m2.maus.de for finding this! (Roman)
-	 */
-	NCR5380_write(INITIATOR_COMMAND_REG,
-		      ICR_BASE | ICR_ASSERT_SEL | ICR_ASSERT_BSY);
-
-	/*
-	 * Again, bus clear + bus settle time is 1.2us, however, this is
-	 * a minimum so we'll udelay ceil(1.2)
-	 */
-
-	if (hostdata->flags & FLAG_TOSHIBA_DELAY)
-		udelay(15);
-	else
-		udelay(2);
-
-	spin_lock_irq(&hostdata->lock);
-
-	/* NCR5380_reselect() clears MODE_REG after a reselection interrupt */
-	if (!(NCR5380_read(MODE_REG) & MR_ARBITRATE))
-		goto out;
-
-	if (!hostdata->selecting) {
-		NCR5380_write(MODE_REG, MR_BASE);
-		NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-		goto out;
-	}
-
-	dsprintk(NDEBUG_ARBITRATION, instance, "won arbitration\n");
-
-	/*
-	 * Now that we have won arbitration, start Selection process, asserting
-	 * the host and target ID's on the SCSI bus.
-	 */
-
-	NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask | (1 << scmd_id(cmd)));
-
-	/*
-	 * Raise ATN while SEL is true before BSY goes false from arbitration,
-	 * since this is the only way to guarantee that we'll get a MESSAGE OUT
-	 * phase immediately after selection.
-	 */
-
-	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY |
-	              ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL);
-	NCR5380_write(MODE_REG, MR_BASE);
-
-	/*
-	 * Reselect interrupts must be turned off prior to the dropping of BSY,
-	 * otherwise we will trigger an interrupt.
-	 */
-	NCR5380_write(SELECT_ENABLE_REG, 0);
-
-	spin_unlock_irq(&hostdata->lock);
-
-	/*
-	 * The initiator shall then wait at least two deskew delays and release
-	 * the BSY signal.
-	 */
-	udelay(1);        /* wingel -- wait two bus deskew delay >2*45ns */
-
-	/* Reset BSY */
-	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA |
-	              ICR_ASSERT_ATN | ICR_ASSERT_SEL);
-
-	/*
-	 * Something weird happens when we cease to drive BSY - looks
-	 * like the board/chip is letting us do another read before the
-	 * appropriate propagation delay has expired, and we're confusing
-	 * a BSY signal from ourselves as the target's response to SELECTION.
-	 *
-	 * A small delay (the 'C++' frontend breaks the pipeline with an
-	 * unnecessary jump, making it work on my 386-33/Trantor T128, the
-	 * tighter 'C' code breaks and requires this) solves the problem -
-	 * the 1 us delay is arbitrary, and only used because this delay will
-	 * be the same on other platforms and since it works here, it should
-	 * work there.
-	 *
-	 * wingel suggests that this could be due to failing to wait
-	 * one deskew delay.
-	 */
-
-	udelay(1);
-
-	dsprintk(NDEBUG_SELECTION, instance, "selecting target %d\n", scmd_id(cmd));
-
-	/*
-	 * The SCSI specification calls for a 250 ms timeout for the actual
-	 * selection.
-	 */
-
-	err = NCR5380_poll_politely(instance, STATUS_REG, SR_BSY, SR_BSY,
-	                            msecs_to_jiffies(250));
-
-	if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) {
-		spin_lock_irq(&hostdata->lock);
-		NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-		NCR5380_reselect(instance);
-		if (!hostdata->connected)
-			NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
-		shost_printk(KERN_ERR, instance, "reselection after won arbitration?\n");
-		goto out;
-	}
-
-	if (err < 0) {
-		spin_lock_irq(&hostdata->lock);
-		NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-		NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
-		/* Can't touch cmd if it has been reclaimed by the scsi ML */
-		if (hostdata->selecting) {
-			cmd->result = DID_BAD_TARGET << 16;
-			complete_cmd(instance, cmd);
-			dsprintk(NDEBUG_SELECTION, instance, "target did not respond within 250ms\n");
-			cmd = NULL;
-		}
-		goto out;
-	}
-
-	/*
-	 * No less than two deskew delays after the initiator detects the
-	 * BSY signal is true, it shall release the SEL signal and may
-	 * change the DATA BUS.                                     -wingel
-	 */
-
-	udelay(1);
-
-	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
-
-	/*
-	 * Since we followed the SCSI spec, and raised ATN while SEL
-	 * was true but before BSY was false during selection, the information
-	 * transfer phase should be a MESSAGE OUT phase so that we can send the
-	 * IDENTIFY message.
-	 *
-	 * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG
-	 * message (2 bytes) with a tag ID that we increment with every command
-	 * until it wraps back to 0.
-	 *
-	 * XXX - it turns out that there are some broken SCSI-II devices,
-	 * which claim to support tagged queuing but fail when more than
-	 * some number of commands are issued at once.
-	 */
-
-	/* Wait for start of REQ/ACK handshake */
-
-	err = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ);
-	spin_lock_irq(&hostdata->lock);
-	if (err < 0) {
-		shost_printk(KERN_ERR, instance, "select: REQ timeout\n");
-		NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-		NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
-		goto out;
-	}
-	if (!hostdata->selecting) {
-		do_abort(instance);
-		goto out;
-	}
-
-	dsprintk(NDEBUG_SELECTION, instance, "target %d selected, going into MESSAGE OUT phase.\n",
-	         scmd_id(cmd));
-	tmp[0] = IDENTIFY(1, cmd->device->lun);
-
-#ifdef SUPPORT_TAGS
-	if (cmd->tag != TAG_NONE) {
-		tmp[1] = hostdata->last_message = SIMPLE_QUEUE_TAG;
-		tmp[2] = cmd->tag;
-		len = 3;
-	} else
-		len = 1;
-#else
-	len = 1;
-	cmd->tag = 0;
-#endif /* SUPPORT_TAGS */
-
-	/* Send message(s) */
-	data = tmp;
-	phase = PHASE_MSGOUT;
-	NCR5380_transfer_pio(instance, &phase, &len, &data);
-	dsprintk(NDEBUG_SELECTION, instance, "nexus established.\n");
-	/* XXX need to handle errors here */
-
-	hostdata->connected = cmd;
-#ifndef SUPPORT_TAGS
-	hostdata->busy[cmd->device->id] |= 1 << cmd->device->lun;
-#endif
-#ifdef SUN3_SCSI_VME
-	dregs->csr |= CSR_INTR;
-#endif
-
-	initialize_SCp(cmd);
-
-	cmd = NULL;
-
-out:
-	if (!hostdata->selecting)
-		return NULL;
-	hostdata->selecting = NULL;
-	return cmd;
-}
-
-/*
- * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance,
- * unsigned char *phase, int *count, unsigned char **data)
- *
- * Purpose : transfers data in given phase using polled I/O
- *
- * Inputs : instance - instance of driver, *phase - pointer to
- * what phase is expected, *count - pointer to number of
- * bytes to transfer, **data - pointer to data pointer.
- *
- * Returns : -1 when different phase is entered without transferring
- * maximum number of bytes, 0 if all bytes are transferred or exit
- * is in same phase.
- *
- * Also, *phase, *count, *data are modified in place.
- *
- * XXX Note : handling for bus free may be useful.
- */
-
-/*
- * Note : this code is not as quick as it could be, however it
- * IS 100% reliable, and for the actual data transfer where speed
- * counts, we will always do a pseudo DMA or DMA transfer.
- */
-
-static int NCR5380_transfer_pio(struct Scsi_Host *instance,
-				unsigned char *phase, int *count,
-				unsigned char **data)
-{
-	unsigned char p = *phase, tmp;
-	int c = *count;
-	unsigned char *d = *data;
-
-	/*
-	 * The NCR5380 chip will only drive the SCSI bus when the
-	 * phase specified in the appropriate bits of the TARGET COMMAND
-	 * REGISTER match the STATUS REGISTER
-	 */
-
-	NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
-
-	do {
-		/*
-		 * Wait for assertion of REQ, after which the phase bits will be
-		 * valid
-		 */
-
-		if (NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ) < 0)
-			break;
-
-		dsprintk(NDEBUG_HANDSHAKE, instance, "REQ asserted\n");
-
-		/* Check for phase mismatch */
-		if ((NCR5380_read(STATUS_REG) & PHASE_MASK) != p) {
-			dsprintk(NDEBUG_PIO, instance, "phase mismatch\n");
-			NCR5380_dprint_phase(NDEBUG_PIO, instance);
-			break;
-		}
-
-		/* Do actual transfer from SCSI bus to / from memory */
-		if (!(p & SR_IO))
-			NCR5380_write(OUTPUT_DATA_REG, *d);
-		else
-			*d = NCR5380_read(CURRENT_SCSI_DATA_REG);
-
-		++d;
-
-		/*
-		 * The SCSI standard suggests that in MSGOUT phase, the initiator
-		 * should drop ATN on the last byte of the message phase
-		 * after REQ has been asserted for the handshake but before
-		 * the initiator raises ACK.
-		 */
-
-		if (!(p & SR_IO)) {
-			if (!((p & SR_MSG) && c > 1)) {
-				NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA);
-				NCR5380_dprint(NDEBUG_PIO, instance);
-				NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
-				              ICR_ASSERT_DATA | ICR_ASSERT_ACK);
-			} else {
-				NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
-				              ICR_ASSERT_DATA | ICR_ASSERT_ATN);
-				NCR5380_dprint(NDEBUG_PIO, instance);
-				NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
-				              ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
-			}
-		} else {
-			NCR5380_dprint(NDEBUG_PIO, instance);
-			NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
-		}
-
-		if (NCR5380_poll_politely(instance,
-		                          STATUS_REG, SR_REQ, 0, 5 * HZ) < 0)
-			break;
-
-		dsprintk(NDEBUG_HANDSHAKE, instance, "REQ negated, handshake complete\n");
-
-/*
- * We have several special cases to consider during REQ/ACK handshaking :
- * 1.  We were in MSGOUT phase, and we are on the last byte of the
- * message.  ATN must be dropped as ACK is dropped.
- *
- * 2.  We are in a MSGIN phase, and we are on the last byte of the
- * message.  We must exit with ACK asserted, so that the calling
- * code may raise ATN before dropping ACK to reject the message.
- *
- * 3.  ACK and ATN are clear and the target may proceed as normal.
- */
-		if (!(p == PHASE_MSGIN && c == 1)) {
-			if (p == PHASE_MSGOUT && c > 1)
-				NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
-			else
-				NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-		}
-	} while (--c);
-
-	dsprintk(NDEBUG_PIO, instance, "residual %d\n", c);
-
-	*count = c;
-	*data = d;
-	tmp = NCR5380_read(STATUS_REG);
-	/* The phase read from the bus is valid if either REQ is (already)
-	 * asserted or if ACK hasn't been released yet. The latter applies if
-	 * we're in MSG IN, DATA IN or STATUS and all bytes have been received.
-	 */
-	if ((tmp & SR_REQ) || ((tmp & SR_IO) && c == 0))
-		*phase = tmp & PHASE_MASK;
-	else
-		*phase = PHASE_UNKNOWN;
-
-	if (!c || (*phase == p))
-		return 0;
-	else
-		return -1;
-}
-
-/**
- * do_reset - issue a reset command
- * @instance: adapter to reset
- *
- * Issue a reset sequence to the NCR5380 and try and get the bus
- * back into sane shape.
- *
- * This clears the reset interrupt flag because there may be no handler for
- * it. When the driver is initialized, the NCR5380_intr() handler has not yet
- * been installed. And when in EH we may have released the ST DMA interrupt.
- */
-
-static void do_reset(struct Scsi_Host *instance)
-{
-	unsigned long flags;
-
-	local_irq_save(flags);
-	NCR5380_write(TARGET_COMMAND_REG,
-	              PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK));
-	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST);
-	udelay(50);
-	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-	(void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
-	local_irq_restore(flags);
-}
-
-/**
- * do_abort - abort the currently established nexus by going to
- * MESSAGE OUT phase and sending an ABORT message.
- * @instance: relevant scsi host instance
- *
- * Returns 0 on success, -1 on failure.
- */
-
-static int do_abort(struct Scsi_Host *instance)
-{
-	unsigned char *msgptr, phase, tmp;
-	int len;
-	int rc;
-
-	/* Request message out phase */
-	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
-
-	/*
-	 * Wait for the target to indicate a valid phase by asserting
-	 * REQ.  Once this happens, we'll have either a MSGOUT phase
-	 * and can immediately send the ABORT message, or we'll have some
-	 * other phase and will have to source/sink data.
-	 *
-	 * We really don't care what value was on the bus or what value
-	 * the target sees, so we just handshake.
-	 */
-
-	rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ);
-	if (rc < 0)
-		goto timeout;
-
-	tmp = NCR5380_read(STATUS_REG) & PHASE_MASK;
-
-	NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
-
-	if (tmp != PHASE_MSGOUT) {
-		NCR5380_write(INITIATOR_COMMAND_REG,
-		              ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
-		rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 3 * HZ);
-		if (rc < 0)
-			goto timeout;
-		NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
-	}
-
-	tmp = ABORT;
-	msgptr = &tmp;
-	len = 1;
-	phase = PHASE_MSGOUT;
-	NCR5380_transfer_pio(instance, &phase, &len, &msgptr);
-
-	/*
-	 * If we got here, and the command completed successfully,
-	 * we're about to go into bus free state.
-	 */
-
-	return len ? -1 : 0;
-
-timeout:
-	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-	return -1;
-}
-
-#if defined(REAL_DMA)
-/*
- * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance,
- * unsigned char *phase, int *count, unsigned char **data)
- *
- * Purpose : transfers data in given phase using either real
- * or pseudo DMA.
- *
- * Inputs : instance - instance of driver, *phase - pointer to
- * what phase is expected, *count - pointer to number of
- * bytes to transfer, **data - pointer to data pointer.
- *
- * Returns : -1 when different phase is entered without transferring
- * maximum number of bytes, 0 if all bytes or transferred or exit
- * is in same phase.
- *
- * Also, *phase, *count, *data are modified in place.
- */
-
-
-static int NCR5380_transfer_dma(struct Scsi_Host *instance,
-				unsigned char *phase, int *count,
-				unsigned char **data)
-{
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-	register int c = *count;
-	register unsigned char p = *phase;
-
-#if defined(CONFIG_SUN3)
-	/* sanity check */
-	if (!sun3_dma_setup_done) {
-		pr_err("scsi%d: transfer_dma without setup!\n",
-		       instance->host_no);
-		BUG();
-	}
-	hostdata->dma_len = c;
-
-	dsprintk(NDEBUG_DMA, instance, "initializing DMA %s: length %d, address %p\n",
-	         (p & SR_IO) ? "receive" : "send", c, *data);
-
-	/* netbsd turns off ints here, why not be safe and do it too */
-
-	/* send start chain */
-	sun3scsi_dma_start(c, *data);
-
-	NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
-	NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY |
-	                        MR_ENABLE_EOP_INTR);
-	if (p & SR_IO) {
-		NCR5380_write(INITIATOR_COMMAND_REG, 0);
-		NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0);
-	} else {
-		NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_DATA);
-		NCR5380_write(START_DMA_SEND_REG, 0);
-	}
-
-#ifdef SUN3_SCSI_VME
-	dregs->csr |= CSR_DMA_ENABLE;
-#endif
-
-	sun3_dma_active = 1;
-
-#else /* !defined(CONFIG_SUN3) */
-	register unsigned char *d = *data;
-	unsigned char tmp;
-
-	if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) {
-		*phase = tmp;
-		return -1;
-	}
-
-	if (hostdata->read_overruns && (p & SR_IO))
-		c -= hostdata->read_overruns;
-
-	dsprintk(NDEBUG_DMA, instance, "initializing DMA %s: length %d, address %p\n",
-	         (p & SR_IO) ? "receive" : "send", c, d);
-
-	NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
-	NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY |
-	                        MR_ENABLE_EOP_INTR);
-
-	if (!(hostdata->flags & FLAG_LATE_DMA_SETUP)) {
-		/* On the Medusa, it is a must to initialize the DMA before
-		 * starting the NCR. This is also the cleaner way for the TT.
-		 */
-		hostdata->dma_len = (p & SR_IO) ?
-			NCR5380_dma_read_setup(instance, d, c) :
-			NCR5380_dma_write_setup(instance, d, c);
-	}
-
-	if (p & SR_IO)
-		NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0);
-	else {
-		NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA);
-		NCR5380_write(START_DMA_SEND_REG, 0);
-	}
-
-	if (hostdata->flags & FLAG_LATE_DMA_SETUP) {
-		/* On the Falcon, the DMA setup must be done after the last */
-		/* NCR access, else the DMA setup gets trashed!
-		 */
-		hostdata->dma_len = (p & SR_IO) ?
-			NCR5380_dma_read_setup(instance, d, c) :
-			NCR5380_dma_write_setup(instance, d, c);
-	}
-#endif /* !defined(CONFIG_SUN3) */
-
-	return 0;
-}
-#endif /* defined(REAL_DMA) */
-
-/*
- * Function : NCR5380_information_transfer (struct Scsi_Host *instance)
- *
- * Purpose : run through the various SCSI phases and do as the target
- * directs us to.  Operates on the currently connected command,
- * instance->connected.
- *
- * Inputs : instance, instance for which we are doing commands
- *
- * Side effects : SCSI things happen, the disconnected queue will be
- * modified if a command disconnects, *instance->connected will
- * change.
- *
- * XXX Note : we need to watch for bus free or a reset condition here
- * to recover from an unexpected bus free condition.
- */
-
-static void NCR5380_information_transfer(struct Scsi_Host *instance)
-{
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-	unsigned char msgout = NOP;
-	int sink = 0;
-	int len;
-	int transfersize;
-	unsigned char *data;
-	unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
-	struct scsi_cmnd *cmd;
-
-#ifdef SUN3_SCSI_VME
-	dregs->csr |= CSR_INTR;
-#endif
-
-	while ((cmd = hostdata->connected)) {
-		struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
-
-		tmp = NCR5380_read(STATUS_REG);
-		/* We only have a valid SCSI phase when REQ is asserted */
-		if (tmp & SR_REQ) {
-			phase = (tmp & PHASE_MASK);
-			if (phase != old_phase) {
-				old_phase = phase;
-				NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);
-			}
-#if defined(CONFIG_SUN3)
-			if (phase == PHASE_CMDOUT) {
-#if defined(REAL_DMA)
-				void *d;
-				unsigned long count;
-
-				if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
-					count = cmd->SCp.buffer->length;
-					d = sg_virt(cmd->SCp.buffer);
-				} else {
-					count = cmd->SCp.this_residual;
-					d = cmd->SCp.ptr;
-				}
-				/* this command setup for dma yet? */
-				if ((count >= DMA_MIN_SIZE) && (sun3_dma_setup_done != cmd)) {
-					if (cmd->request->cmd_type == REQ_TYPE_FS) {
-						sun3scsi_dma_setup(instance, d, count,
-						                   rq_data_dir(cmd->request));
-						sun3_dma_setup_done = cmd;
-					}
-				}
-#endif
-#ifdef SUN3_SCSI_VME
-				dregs->csr |= CSR_INTR;
-#endif
-			}
-#endif /* CONFIG_SUN3 */
-
-			if (sink && (phase != PHASE_MSGOUT)) {
-				NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
-
-				NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN |
-				              ICR_ASSERT_ACK);
-				while (NCR5380_read(STATUS_REG) & SR_REQ)
-					;
-				NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
-				              ICR_ASSERT_ATN);
-				sink = 0;
-				continue;
-			}
-
-			switch (phase) {
-			case PHASE_DATAOUT:
-#if (NDEBUG & NDEBUG_NO_DATAOUT)
-				shost_printk(KERN_DEBUG, instance, "NDEBUG_NO_DATAOUT set, attempted DATAOUT aborted\n");
-				sink = 1;
-				do_abort(instance);
-				cmd->result = DID_ERROR << 16;
-				complete_cmd(instance, cmd);
-				hostdata->connected = NULL;
-				return;
-#endif
-			case PHASE_DATAIN:
-				/*
-				 * If there is no room left in the current buffer in the
-				 * scatter-gather list, move onto the next one.
-				 */
-
-				if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
-					++cmd->SCp.buffer;
-					--cmd->SCp.buffers_residual;
-					cmd->SCp.this_residual = cmd->SCp.buffer->length;
-					cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
-					merge_contiguous_buffers(cmd);
-					dsprintk(NDEBUG_INFORMATION, instance, "%d bytes and %d buffers left\n",
-					         cmd->SCp.this_residual,
-					         cmd->SCp.buffers_residual);
-				}
-
-				/*
-				 * The preferred transfer method is going to be
-				 * PSEUDO-DMA for systems that are strictly PIO,
-				 * since we can let the hardware do the handshaking.
-				 *
-				 * For this to work, we need to know the transfersize
-				 * ahead of time, since the pseudo-DMA code will sit
-				 * in an unconditional loop.
-				 */
-
-				/* ++roman: I suggest, this should be
-				 * #if def(REAL_DMA)
-				 * instead of leaving REAL_DMA out.
-				 */
-
-#if defined(REAL_DMA)
-#if !defined(CONFIG_SUN3)
-				transfersize = 0;
-				if (!cmd->device->borken)
-#endif
-					transfersize = NCR5380_dma_xfer_len(instance, cmd, phase);
-
-				if (transfersize >= DMA_MIN_SIZE) {
-					len = transfersize;
-					cmd->SCp.phase = phase;
-					if (NCR5380_transfer_dma(instance, &phase,
-					    &len, (unsigned char **)&cmd->SCp.ptr)) {
-						/*
-						 * If the watchdog timer fires, all future
-						 * accesses to this device will use the
-						 * polled-IO.
-						 */
-						scmd_printk(KERN_INFO, cmd,
-							"switching to slow handshake\n");
-						cmd->device->borken = 1;
-						sink = 1;
-						do_abort(instance);
-						cmd->result = DID_ERROR << 16;
-						/* XXX - need to source or sink data here, as appropriate */
-					} else {
-#ifdef REAL_DMA
-						/* ++roman: When using real DMA,
-						 * information_transfer() should return after
-						 * starting DMA since it has nothing more to
-						 * do.
-						 */
-						return;
-#else
-						cmd->SCp.this_residual -= transfersize - len;
-#endif
-					}
-				} else
-#endif /* defined(REAL_DMA) */
-				{
-					/* Break up transfer into 3 ms chunks,
-					 * presuming 6 accesses per handshake.
-					 */
-					transfersize = min((unsigned long)cmd->SCp.this_residual,
-					                   hostdata->accesses_per_ms / 2);
-					len = transfersize;
-					NCR5380_transfer_pio(instance, &phase, &len,
-					                     (unsigned char **)&cmd->SCp.ptr);
-					cmd->SCp.this_residual -= transfersize - len;
-				}
-#if defined(CONFIG_SUN3) && defined(REAL_DMA)
-				/* if we had intended to dma that command clear it */
-				if (sun3_dma_setup_done == cmd)
-					sun3_dma_setup_done = NULL;
-#endif
-				return;
-			case PHASE_MSGIN:
-				len = 1;
-				data = &tmp;
-				NCR5380_transfer_pio(instance, &phase, &len, &data);
-				cmd->SCp.Message = tmp;
-
-				switch (tmp) {
-				case ABORT:
-				case COMMAND_COMPLETE:
-					/* Accept message by clearing ACK */
-					NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-					dsprintk(NDEBUG_QUEUES, instance,
-					         "COMMAND COMPLETE %p target %d lun %llu\n",
-					         cmd, scmd_id(cmd), cmd->device->lun);
-
-					hostdata->connected = NULL;
-#ifdef SUPPORT_TAGS
-					cmd_free_tag(cmd);
-					if (status_byte(cmd->SCp.Status) == QUEUE_FULL) {
-						u8 lun = cmd->device->lun;
-						struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun];
-
-						dsprintk(NDEBUG_TAGS, instance,
-						         "QUEUE_FULL %p target %d lun %d nr_allocated %d\n",
-						         cmd, scmd_id(cmd), lun, ta->nr_allocated);
-						if (ta->queue_size > ta->nr_allocated)
-							ta->queue_size = ta->nr_allocated;
-					}
-#endif
-
-					cmd->result &= ~0xffff;
-					cmd->result |= cmd->SCp.Status;
-					cmd->result |= cmd->SCp.Message << 8;
-
-					if (cmd->cmnd[0] == REQUEST_SENSE)
-						complete_cmd(instance, cmd);
-					else {
-						if (cmd->SCp.Status == SAM_STAT_CHECK_CONDITION ||
-						    cmd->SCp.Status == SAM_STAT_COMMAND_TERMINATED) {
-							dsprintk(NDEBUG_QUEUES, instance, "autosense: adding cmd %p to tail of autosense queue\n",
-							         cmd);
-							list_add_tail(&ncmd->list,
-							              &hostdata->autosense);
-						} else
-							complete_cmd(instance, cmd);
-					}
-
-					/*
-					 * Restore phase bits to 0 so an interrupted selection,
-					 * arbitration can resume.
-					 */
-					NCR5380_write(TARGET_COMMAND_REG, 0);
-
-					/* Enable reselect interrupts */
-					NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
-
-					maybe_release_dma_irq(instance);
-					return;
-				case MESSAGE_REJECT:
-					/* Accept message by clearing ACK */
-					NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-					switch (hostdata->last_message) {
-					case HEAD_OF_QUEUE_TAG:
-					case ORDERED_QUEUE_TAG:
-					case SIMPLE_QUEUE_TAG:
-						/* The target obviously doesn't support tagged
-						 * queuing, even though it announced this ability in
-						 * its INQUIRY data ?!? (maybe only this LUN?) Ok,
-						 * clear 'tagged_supported' and lock the LUN, since
-						 * the command is treated as untagged further on.
-						 */
-						cmd->device->tagged_supported = 0;
-						hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
-						cmd->tag = TAG_NONE;
-						dsprintk(NDEBUG_TAGS, instance, "target %d lun %llu rejected QUEUE_TAG message; tagged queuing disabled\n",
-						         scmd_id(cmd), cmd->device->lun);
-						break;
-					}
-					break;
-				case DISCONNECT:
-					/* Accept message by clearing ACK */
-					NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-					hostdata->connected = NULL;
-					list_add(&ncmd->list, &hostdata->disconnected);
-					dsprintk(NDEBUG_INFORMATION | NDEBUG_QUEUES,
-					         instance, "connected command %p for target %d lun %llu moved to disconnected queue\n",
-					         cmd, scmd_id(cmd), cmd->device->lun);
-
-					/*
-					 * Restore phase bits to 0 so an interrupted selection,
-					 * arbitration can resume.
-					 */
-					NCR5380_write(TARGET_COMMAND_REG, 0);
-
-					/* Enable reselect interrupts */
-					NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
-#ifdef SUN3_SCSI_VME
-					dregs->csr |= CSR_DMA_ENABLE;
-#endif
-					return;
-					/*
-					 * The SCSI data pointer is *IMPLICITLY* saved on a disconnect
-					 * operation, in violation of the SCSI spec so we can safely
-					 * ignore SAVE/RESTORE pointers calls.
-					 *
-					 * Unfortunately, some disks violate the SCSI spec and
-					 * don't issue the required SAVE_POINTERS message before
-					 * disconnecting, and we have to break spec to remain
-					 * compatible.
-					 */
-				case SAVE_POINTERS:
-				case RESTORE_POINTERS:
-					/* Accept message by clearing ACK */
-					NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-					break;
-				case EXTENDED_MESSAGE:
-					/*
-					 * Start the message buffer with the EXTENDED_MESSAGE
-					 * byte, since spi_print_msg() wants the whole thing.
-					 */
-					extended_msg[0] = EXTENDED_MESSAGE;
-					/* Accept first byte by clearing ACK */
-					NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-
-					spin_unlock_irq(&hostdata->lock);
-
-					dsprintk(NDEBUG_EXTENDED, instance, "receiving extended message\n");
-
-					len = 2;
-					data = extended_msg + 1;
-					phase = PHASE_MSGIN;
-					NCR5380_transfer_pio(instance, &phase, &len, &data);
-					dsprintk(NDEBUG_EXTENDED, instance, "length %d, code 0x%02x\n",
-					         (int)extended_msg[1],
-					         (int)extended_msg[2]);
-
-					if (!len && extended_msg[1] > 0 &&
-					    extended_msg[1] <= sizeof(extended_msg) - 2) {
-						/* Accept third byte by clearing ACK */
-						NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-						len = extended_msg[1] - 1;
-						data = extended_msg + 3;
-						phase = PHASE_MSGIN;
-
-						NCR5380_transfer_pio(instance, &phase, &len, &data);
-						dsprintk(NDEBUG_EXTENDED, instance, "message received, residual %d\n",
-						         len);
-
-						switch (extended_msg[2]) {
-						case EXTENDED_SDTR:
-						case EXTENDED_WDTR:
-						case EXTENDED_MODIFY_DATA_POINTER:
-						case EXTENDED_EXTENDED_IDENTIFY:
-							tmp = 0;
-						}
-					} else if (len) {
-						shost_printk(KERN_ERR, instance, "error receiving extended message\n");
-						tmp = 0;
-					} else {
-						shost_printk(KERN_NOTICE, instance, "extended message code %02x length %d is too long\n",
-						             extended_msg[2], extended_msg[1]);
-						tmp = 0;
-					}
-
-					spin_lock_irq(&hostdata->lock);
-					if (!hostdata->connected)
-						return;
-
-					/* Fall through to reject message */
-
-					/*
-					 * If we get something weird that we aren't expecting,
-					 * reject it.
-					 */
-				default:
-					if (!tmp) {
-						shost_printk(KERN_ERR, instance, "rejecting message ");
-						spi_print_msg(extended_msg);
-						printk("\n");
-					} else if (tmp != EXTENDED_MESSAGE)
-						scmd_printk(KERN_INFO, cmd,
-						            "rejecting unknown message %02x\n",
-						            tmp);
-					else
-						scmd_printk(KERN_INFO, cmd,
-						            "rejecting unknown extended message code %02x, length %d\n",
-						            extended_msg[1], extended_msg[0]);
-
-					msgout = MESSAGE_REJECT;
-					NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
-					break;
-				} /* switch (tmp) */
-				break;
-			case PHASE_MSGOUT:
-				len = 1;
-				data = &msgout;
-				hostdata->last_message = msgout;
-				NCR5380_transfer_pio(instance, &phase, &len, &data);
-				if (msgout == ABORT) {
-					hostdata->connected = NULL;
-					cmd->result = DID_ERROR << 16;
-					complete_cmd(instance, cmd);
-					maybe_release_dma_irq(instance);
-					NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
-					return;
-				}
-				msgout = NOP;
-				break;
-			case PHASE_CMDOUT:
-				len = cmd->cmd_len;
-				data = cmd->cmnd;
-				/*
-				 * XXX for performance reasons, on machines with a
-				 * PSEUDO-DMA architecture we should probably
-				 * use the dma transfer function.
-				 */
-				NCR5380_transfer_pio(instance, &phase, &len, &data);
-				break;
-			case PHASE_STATIN:
-				len = 1;
-				data = &tmp;
-				NCR5380_transfer_pio(instance, &phase, &len, &data);
-				cmd->SCp.Status = tmp;
-				break;
-			default:
-				shost_printk(KERN_ERR, instance, "unknown phase\n");
-				NCR5380_dprint(NDEBUG_ANY, instance);
-			} /* switch(phase) */
-		} else {
-			spin_unlock_irq(&hostdata->lock);
-			NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ);
-			spin_lock_irq(&hostdata->lock);
-		}
-	}
-}
-
-/*
- * Function : void NCR5380_reselect (struct Scsi_Host *instance)
- *
- * Purpose : does reselection, initializing the instance->connected
- * field to point to the scsi_cmnd for which the I_T_L or I_T_L_Q
- * nexus has been reestablished,
- *
- * Inputs : instance - this instance of the NCR5380.
- */
-
-
-/* it might eventually prove necessary to do a dma setup on
-   reselection, but it doesn't seem to be needed now -- sam */
-
-static void NCR5380_reselect(struct Scsi_Host *instance)
-{
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-	unsigned char target_mask;
-	unsigned char lun;
-#ifdef SUPPORT_TAGS
-	unsigned char tag;
-#endif
-	unsigned char msg[3];
-	int __maybe_unused len;
-	unsigned char __maybe_unused *data, __maybe_unused phase;
-	struct NCR5380_cmd *ncmd;
-	struct scsi_cmnd *tmp;
-
-	/*
-	 * Disable arbitration, etc. since the host adapter obviously
-	 * lost, and tell an interrupted NCR5380_select() to restart.
-	 */
-
-	NCR5380_write(MODE_REG, MR_BASE);
-
-	target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
-
-	dsprintk(NDEBUG_RESELECTION, instance, "reselect\n");
-
-	/*
-	 * At this point, we have detected that our SCSI ID is on the bus,
-	 * SEL is true and BSY was false for at least one bus settle delay
-	 * (400 ns).
-	 *
-	 * We must assert BSY ourselves, until the target drops the SEL
-	 * signal.
-	 */
-
-	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
-	if (NCR5380_poll_politely(instance,
-	                          STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) {
-		NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-		return;
-	}
-	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-
-	/*
-	 * Wait for target to go into MSGIN.
-	 */
-
-	if (NCR5380_poll_politely(instance,
-	                          STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) {
-		do_abort(instance);
-		return;
-	}
-
-#if defined(CONFIG_SUN3) && defined(REAL_DMA)
-	/* acknowledge toggle to MSGIN */
-	NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(PHASE_MSGIN));
-
-	/* peek at the byte without really hitting the bus */
-	msg[0] = NCR5380_read(CURRENT_SCSI_DATA_REG);
-#else
-	len = 1;
-	data = msg;
-	phase = PHASE_MSGIN;
-	NCR5380_transfer_pio(instance, &phase, &len, &data);
-
-	if (len) {
-		do_abort(instance);
-		return;
-	}
-#endif
-
-	if (!(msg[0] & 0x80)) {
-		shost_printk(KERN_ERR, instance, "expecting IDENTIFY message, got ");
-		spi_print_msg(msg);
-		printk("\n");
-		do_abort(instance);
-		return;
-	}
-	lun = msg[0] & 0x07;
-
-#if defined(SUPPORT_TAGS) && !defined(CONFIG_SUN3)
-	/* If the phase is still MSGIN, the target wants to send some more
-	 * messages. In case it supports tagged queuing, this is probably a
-	 * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus.
-	 */
-	tag = TAG_NONE;
-	if (phase == PHASE_MSGIN && (hostdata->flags & FLAG_TAGGED_QUEUING)) {
-		/* Accept previous IDENTIFY message by clearing ACK */
-		NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-		len = 2;
-		data = msg + 1;
-		if (!NCR5380_transfer_pio(instance, &phase, &len, &data) &&
-		    msg[1] == SIMPLE_QUEUE_TAG)
-			tag = msg[2];
-		dsprintk(NDEBUG_TAGS, instance, "reselect: target mask %02x, lun %d sent tag %d\n",
-		         target_mask, lun, tag);
-	}
-#endif
-
-	/*
-	 * Find the command corresponding to the I_T_L or I_T_L_Q  nexus we
-	 * just reestablished, and remove it from the disconnected queue.
-	 */
-
-	tmp = NULL;
-	list_for_each_entry(ncmd, &hostdata->disconnected, list) {
-		struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
-
-		if (target_mask == (1 << scmd_id(cmd)) &&
-		    lun == (u8)cmd->device->lun
-#ifdef SUPPORT_TAGS
-		    && (tag == cmd->tag)
-#endif
-		    ) {
-			list_del(&ncmd->list);
-			tmp = cmd;
-			break;
-		}
-	}
-
-	if (tmp) {
-		dsprintk(NDEBUG_RESELECTION | NDEBUG_QUEUES, instance,
-		         "reselect: removed %p from disconnected queue\n", tmp);
-	} else {
-
-#ifdef SUPPORT_TAGS
-		shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d tag %d not in disconnected queue.\n",
-		             target_mask, lun, tag);
-#else
-		shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d not in disconnected queue.\n",
-		             target_mask, lun);
-#endif
-		/*
-		 * Since we have an established nexus that we can't do anything
-		 * with, we must abort it.
-		 */
-		do_abort(instance);
-		return;
-	}
-
-#if defined(CONFIG_SUN3) && defined(REAL_DMA)
-	/* engage dma setup for the command we just saw */
-	{
-		void *d;
-		unsigned long count;
-
-		if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) {
-			count = tmp->SCp.buffer->length;
-			d = sg_virt(tmp->SCp.buffer);
-		} else {
-			count = tmp->SCp.this_residual;
-			d = tmp->SCp.ptr;
-		}
-		/* setup this command for dma if not already */
-		if ((count >= DMA_MIN_SIZE) && (sun3_dma_setup_done != tmp)) {
-			sun3scsi_dma_setup(instance, d, count,
-			                   rq_data_dir(tmp->request));
-			sun3_dma_setup_done = tmp;
-		}
-	}
-
-	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
-#endif
-
-	/* Accept message by clearing ACK */
-	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-
-#if defined(SUPPORT_TAGS) && defined(CONFIG_SUN3)
-	/* If the phase is still MSGIN, the target wants to send some more
-	 * messages. In case it supports tagged queuing, this is probably a
-	 * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus.
-	 */
-	tag = TAG_NONE;
-	if (phase == PHASE_MSGIN && setup_use_tagged_queuing) {
-		/* Accept previous IDENTIFY message by clearing ACK */
-		NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-		len = 2;
-		data = msg + 1;
-		if (!NCR5380_transfer_pio(instance, &phase, &len, &data) &&
-		    msg[1] == SIMPLE_QUEUE_TAG)
-			tag = msg[2];
-		dsprintk(NDEBUG_TAGS, instance, "reselect: target mask %02x, lun %d sent tag %d\n"
-		         target_mask, lun, tag);
-	}
-#endif
-
-	hostdata->connected = tmp;
-	dsprintk(NDEBUG_RESELECTION, instance, "nexus established, target %d, lun %llu, tag %d\n",
-	         scmd_id(tmp), tmp->device->lun, tmp->tag);
-}
-
-
-/**
- * list_find_cmd - test for presence of a command in a linked list
- * @haystack: list of commands
- * @needle: command to search for
- */
-
-static bool list_find_cmd(struct list_head *haystack,
-                          struct scsi_cmnd *needle)
-{
-	struct NCR5380_cmd *ncmd;
-
-	list_for_each_entry(ncmd, haystack, list)
-		if (NCR5380_to_scmd(ncmd) == needle)
-			return true;
-	return false;
-}
-
-/**
- * list_remove_cmd - remove a command from linked list
- * @haystack: list of commands
- * @needle: command to remove
- */
-
-static bool list_del_cmd(struct list_head *haystack,
-                         struct scsi_cmnd *needle)
-{
-	if (list_find_cmd(haystack, needle)) {
-		struct NCR5380_cmd *ncmd = scsi_cmd_priv(needle);
-
-		list_del(&ncmd->list);
-		return true;
-	}
-	return false;
-}
-
-/**
- * NCR5380_abort - scsi host eh_abort_handler() method
- * @cmd: the command to be aborted
- *
- * Try to abort a given command by removing it from queues and/or sending
- * the target an abort message. This may not succeed in causing a target
- * to abort the command. Nonetheless, the low-level driver must forget about
- * the command because the mid-layer reclaims it and it may be re-issued.
- *
- * The normal path taken by a command is as follows. For EH we trace this
- * same path to locate and abort the command.
- *
- * unissued -> selecting -> [unissued -> selecting ->]... connected ->
- * [disconnected -> connected ->]...
- * [autosense -> connected ->] done
- *
- * If cmd was not found at all then presumably it has already been completed,
- * in which case return SUCCESS to try to avoid further EH measures.
- *
- * If the command has not completed yet, we must not fail to find it.
- * We have no option but to forget the aborted command (even if it still
- * lacks sense data). The mid-layer may re-issue a command that is in error
- * recovery (see scsi_send_eh_cmnd), but the logic and data structures in
- * this driver are such that a command can appear on one queue only.
- *
- * The lock protects driver data structures, but EH handlers also use it
- * to serialize their own execution and prevent their own re-entry.
- */
-
-static int NCR5380_abort(struct scsi_cmnd *cmd)
-{
-	struct Scsi_Host *instance = cmd->device->host;
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-	unsigned long flags;
-	int result = SUCCESS;
-
-	spin_lock_irqsave(&hostdata->lock, flags);
-
-#if (NDEBUG & NDEBUG_ANY)
-	scmd_printk(KERN_INFO, cmd, __func__);
-#endif
-	NCR5380_dprint(NDEBUG_ANY, instance);
-	NCR5380_dprint_phase(NDEBUG_ANY, instance);
-
-	if (list_del_cmd(&hostdata->unissued, cmd)) {
-		dsprintk(NDEBUG_ABORT, instance,
-		         "abort: removed %p from issue queue\n", cmd);
-		cmd->result = DID_ABORT << 16;
-		cmd->scsi_done(cmd); /* No tag or busy flag to worry about */
-		goto out;
-	}
-
-	if (hostdata->selecting == cmd) {
-		dsprintk(NDEBUG_ABORT, instance,
-		         "abort: cmd %p == selecting\n", cmd);
-		hostdata->selecting = NULL;
-		cmd->result = DID_ABORT << 16;
-		complete_cmd(instance, cmd);
-		goto out;
-	}
-
-	if (list_del_cmd(&hostdata->disconnected, cmd)) {
-		dsprintk(NDEBUG_ABORT, instance,
-		         "abort: removed %p from disconnected list\n", cmd);
-		/* Can't call NCR5380_select() and send ABORT because that
-		 * means releasing the lock. Need a bus reset.
-		 */
-		set_host_byte(cmd, DID_ERROR);
-		complete_cmd(instance, cmd);
-		result = FAILED;
-		goto out;
-	}
-
-	if (hostdata->connected == cmd) {
-		dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
-		hostdata->connected = NULL;
-#ifdef REAL_DMA
-		hostdata->dma_len = 0;
-#endif
-		if (do_abort(instance)) {
-			set_host_byte(cmd, DID_ERROR);
-			complete_cmd(instance, cmd);
-			result = FAILED;
-			goto out;
-		}
-		set_host_byte(cmd, DID_ABORT);
-		complete_cmd(instance, cmd);
-		goto out;
-	}
-
-	if (list_del_cmd(&hostdata->autosense, cmd)) {
-		dsprintk(NDEBUG_ABORT, instance,
-		         "abort: removed %p from sense queue\n", cmd);
-		set_host_byte(cmd, DID_ERROR);
-		complete_cmd(instance, cmd);
-	}
-
-out:
-	if (result == FAILED)
-		dsprintk(NDEBUG_ABORT, instance, "abort: failed to abort %p\n", cmd);
-	else
-		dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd);
-
-	queue_work(hostdata->work_q, &hostdata->main_task);
-	maybe_release_dma_irq(instance);
-	spin_unlock_irqrestore(&hostdata->lock, flags);
-
-	return result;
-}
-
-
-/**
- * NCR5380_bus_reset - reset the SCSI bus
- * @cmd: SCSI command undergoing EH
- *
- * Returns SUCCESS
- */
-
-static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
-{
-	struct Scsi_Host *instance = cmd->device->host;
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-	int i;
-	unsigned long flags;
-	struct NCR5380_cmd *ncmd;
-
-	spin_lock_irqsave(&hostdata->lock, flags);
-
-#if (NDEBUG & NDEBUG_ANY)
-	scmd_printk(KERN_INFO, cmd, __func__);
-#endif
-	NCR5380_dprint(NDEBUG_ANY, instance);
-	NCR5380_dprint_phase(NDEBUG_ANY, instance);
-
-	do_reset(instance);
-
-	/* reset NCR registers */
-	NCR5380_write(MODE_REG, MR_BASE);
-	NCR5380_write(TARGET_COMMAND_REG, 0);
-	NCR5380_write(SELECT_ENABLE_REG, 0);
-
-	/* After the reset, there are no more connected or disconnected commands
-	 * and no busy units; so clear the low-level status here to avoid
-	 * conflicts when the mid-level code tries to wake up the affected
-	 * commands!
-	 */
-
-	if (list_del_cmd(&hostdata->unissued, cmd)) {
-		cmd->result = DID_RESET << 16;
-		cmd->scsi_done(cmd);
-	}
-
-	if (hostdata->selecting) {
-		hostdata->selecting->result = DID_RESET << 16;
-		complete_cmd(instance, hostdata->selecting);
-		hostdata->selecting = NULL;
-	}
-
-	list_for_each_entry(ncmd, &hostdata->disconnected, list) {
-		struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
-
-		set_host_byte(cmd, DID_RESET);
-		cmd->scsi_done(cmd);
-	}
-	INIT_LIST_HEAD(&hostdata->disconnected);
-
-	list_for_each_entry(ncmd, &hostdata->autosense, list) {
-		struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
-
-		set_host_byte(cmd, DID_RESET);
-		cmd->scsi_done(cmd);
-	}
-	INIT_LIST_HEAD(&hostdata->autosense);
-
-	if (hostdata->connected) {
-		set_host_byte(hostdata->connected, DID_RESET);
-		complete_cmd(instance, hostdata->connected);
-		hostdata->connected = NULL;
-	}
-
-#ifdef SUPPORT_TAGS
-	free_all_tags(hostdata);
-#endif
-	for (i = 0; i < 8; ++i)
-		hostdata->busy[i] = 0;
-#ifdef REAL_DMA
-	hostdata->dma_len = 0;
-#endif
-
-	queue_work(hostdata->work_q, &hostdata->main_task);
-	maybe_release_dma_irq(instance);
-	spin_unlock_irqrestore(&hostdata->lock, flags);
-
-	return SUCCESS;
-}
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index 78d1b2963..a59ad94 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -14,55 +14,23 @@
  *
  */
 
-
-/**************************************************************************/
-/*                                                                        */
-/* Notes for Falcon SCSI:                                                 */
-/* ----------------------                                                 */
-/*                                                                        */
-/* Since the Falcon SCSI uses the ST-DMA chip, that is shared among       */
-/* several device drivers, locking and unlocking the access to this       */
-/* chip is required. But locking is not possible from an interrupt,       */
-/* since it puts the process to sleep if the lock is not available.       */
-/* This prevents "late" locking of the DMA chip, i.e. locking it just     */
-/* before using it, since in case of disconnection-reconnection           */
-/* commands, the DMA is started from the reselection interrupt.           */
-/*                                                                        */
-/* Two possible schemes for ST-DMA-locking would be:                      */
-/*  1) The lock is taken for each command separately and disconnecting    */
-/*     is forbidden (i.e. can_queue = 1).                                 */
-/*  2) The DMA chip is locked when the first command comes in and         */
-/*     released when the last command is finished and all queues are      */
-/*     empty.                                                             */
-/* The first alternative would result in bad performance, since the       */
-/* interleaving of commands would not be used. The second is unfair to    */
-/* other drivers using the ST-DMA, because the queues will seldom be      */
-/* totally empty if there is a lot of disk traffic.                       */
-/*                                                                        */
-/* For this reasons I decided to employ a more elaborate scheme:          */
-/*  - First, we give up the lock every time we can (for fairness), this    */
-/*    means every time a command finishes and there are no other commands */
-/*    on the disconnected queue.                                          */
-/*  - If there are others waiting to lock the DMA chip, we stop           */
-/*    issuing commands, i.e. moving them onto the issue queue.           */
-/*    Because of that, the disconnected queue will run empty in a         */
-/*    while. Instead we go to sleep on a 'fairness_queue'.                */
-/*  - If the lock is released, all processes waiting on the fairness      */
-/*    queue will be woken. The first of them tries to re-lock the DMA,     */
-/*    the others wait for the first to finish this task. After that,      */
-/*    they can all run on and do their commands...                        */
-/* This sounds complicated (and it is it :-(), but it seems to be a       */
-/* good compromise between fairness and performance: As long as no one     */
-/* else wants to work with the ST-DMA chip, SCSI can go along as          */
-/* usual. If now someone else comes, this behaviour is changed to a       */
-/* "fairness mode": just already initiated commands are finished and      */
-/* then the lock is released. The other one waiting will probably win     */
-/* the race for locking the DMA, since it was waiting for longer. And     */
-/* after it has finished, SCSI can go ahead again. Finally: I hope I      */
-/* have not produced any deadlock possibilities!                          */
-/*                                                                        */
-/**************************************************************************/
-
+/*
+ * Notes for Falcon SCSI DMA
+ *
+ * The 5380 device is one of several that all share the DMA chip. Hence
+ * "locking" and "unlocking" access to this chip is required.
+ *
+ * Two possible schemes for ST DMA acquisition by atari_scsi are:
+ * 1) The lock is taken for each command separately (i.e. can_queue == 1).
+ * 2) The lock is taken when the first command arrives and released
+ * when the last command is finished (i.e. can_queue > 1).
+ *
+ * The first alternative limits SCSI bus utilization, since interleaving
+ * commands is not possible. The second gives better performance but is
+ * unfair to other drivers needing to use the ST DMA chip. In order to
+ * allow the IDE and floppy drivers equal access to the ST DMA chip
+ * the default is can_queue == 1.
+ */
 
 #include <linux/module.h>
 #include <linux/types.h>
@@ -83,13 +51,10 @@
 
 #include <scsi/scsi_host.h>
 
-/* Definitions for the core NCR5380 driver. */
-
-#define REAL_DMA
-#define SUPPORT_TAGS
-#define MAX_TAGS                        32
 #define DMA_MIN_SIZE                    32
 
+/* Definitions for the core NCR5380 driver. */
+
 #define NCR5380_implementation_fields   /* none */
 
 #define NCR5380_read(reg)               atari_scsi_reg_read(reg)
@@ -99,9 +64,9 @@
 #define NCR5380_abort                   atari_scsi_abort
 #define NCR5380_info                    atari_scsi_info
 
-#define NCR5380_dma_read_setup(instance, data, count) \
+#define NCR5380_dma_recv_setup(instance, data, count) \
         atari_scsi_dma_setup(instance, data, count, 0)
-#define NCR5380_dma_write_setup(instance, data, count) \
+#define NCR5380_dma_send_setup(instance, data, count) \
         atari_scsi_dma_setup(instance, data, count, 1)
 #define NCR5380_dma_residual(instance) \
         atari_scsi_dma_residual(instance)
@@ -159,14 +124,11 @@
 	return adr;
 }
 
-#ifdef REAL_DMA
 static void atari_scsi_fetch_restbytes(void);
-#endif
 
 static unsigned char (*atari_scsi_reg_read)(unsigned char reg);
 static void (*atari_scsi_reg_write)(unsigned char reg, unsigned char value);
 
-#ifdef REAL_DMA
 static unsigned long	atari_dma_residual, atari_dma_startaddr;
 static short		atari_dma_active;
 /* pointer to the dribble buffer */
@@ -185,7 +147,6 @@
 /* mask for address bits that can't be used with the ST-DMA */
 static unsigned long	atari_dma_stram_mask;
 #define STRAM_ADDR(a)	(((a) & atari_dma_stram_mask) == 0)
-#endif
 
 static int setup_can_queue = -1;
 module_param(setup_can_queue, int, 0);
@@ -193,16 +154,12 @@
 module_param(setup_cmd_per_lun, int, 0);
 static int setup_sg_tablesize = -1;
 module_param(setup_sg_tablesize, int, 0);
-static int setup_use_tagged_queuing = -1;
-module_param(setup_use_tagged_queuing, int, 0);
 static int setup_hostid = -1;
 module_param(setup_hostid, int, 0);
 static int setup_toshiba_delay = -1;
 module_param(setup_toshiba_delay, int, 0);
 
 
-#if defined(REAL_DMA)
-
 static int scsi_dma_is_ignored_buserr(unsigned char dma_stat)
 {
 	int i;
@@ -255,12 +212,9 @@
 }
 #endif
 
-#endif
-
 
 static irqreturn_t scsi_tt_intr(int irq, void *dev)
 {
-#ifdef REAL_DMA
 	struct Scsi_Host *instance = dev;
 	struct NCR5380_hostdata *hostdata = shost_priv(instance);
 	int dma_stat;
@@ -342,8 +296,6 @@
 		tt_scsi_dma.dma_ctrl = 0;
 	}
 
-#endif /* REAL_DMA */
-
 	NCR5380_intr(irq, dev);
 
 	return IRQ_HANDLED;
@@ -352,7 +304,6 @@
 
 static irqreturn_t scsi_falcon_intr(int irq, void *dev)
 {
-#ifdef REAL_DMA
 	struct Scsi_Host *instance = dev;
 	struct NCR5380_hostdata *hostdata = shost_priv(instance);
 	int dma_stat;
@@ -405,15 +356,12 @@
 		atari_dma_orig_addr = NULL;
 	}
 
-#endif /* REAL_DMA */
-
 	NCR5380_intr(irq, dev);
 
 	return IRQ_HANDLED;
 }
 
 
-#ifdef REAL_DMA
 static void atari_scsi_fetch_restbytes(void)
 {
 	int nr;
@@ -436,7 +384,6 @@
 			*dst++ = *src++;
 	}
 }
-#endif /* REAL_DMA */
 
 
 /* This function releases the lock on the DMA chip if there is no
@@ -464,6 +411,10 @@
 	if (IS_A_TT())
 		return 1;
 
+	if (stdma_is_locked_by(scsi_falcon_intr) &&
+	    instance->hostt->can_queue > 1)
+		return 1;
+
 	if (in_interrupt())
 		return stdma_try_lock(scsi_falcon_intr, instance);
 
@@ -495,8 +446,7 @@
 		setup_sg_tablesize = ints[3];
 	if (ints[0] >= 4)
 		setup_hostid = ints[4];
-	if (ints[0] >= 5)
-		setup_use_tagged_queuing = ints[5];
+	/* ints[5] (use_tagged_queuing) is ignored */
 	/* ints[6] (use_pdma) is ignored */
 	if (ints[0] >= 7)
 		setup_toshiba_delay = ints[7];
@@ -508,8 +458,6 @@
 #endif /* !MODULE */
 
 
-#if defined(REAL_DMA)
-
 static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance,
 					  void *data, unsigned long count,
 					  int dir)
@@ -545,9 +493,6 @@
 	 */
 	dma_cache_maintenance(addr, count, dir);
 
-	if (count == 0)
-		printk(KERN_NOTICE "SCSI warning: DMA programmed for 0 bytes !\n");
-
 	if (IS_A_TT()) {
 		tt_scsi_dma.dma_ctrl = dir;
 		SCSI_DMA_WRITE_P(dma_addr, addr);
@@ -624,6 +569,9 @@
 {
 	unsigned long	possible_len, limit;
 
+	if (wanted_len < DMA_MIN_SIZE)
+		return 0;
+
 	if (IS_A_TT())
 		/* TT SCSI DMA can transfer arbitrary #bytes */
 		return wanted_len;
@@ -703,9 +651,6 @@
 }
 
 
-#endif	/* REAL_DMA */
-
-
 /* NCR5380 register access functions
  *
  * There are separate functions for TT and Falcon, because the access
@@ -736,7 +681,7 @@
 }
 
 
-#include "atari_NCR5380.c"
+#include "NCR5380.c"
 
 static int atari_scsi_bus_reset(struct scsi_cmnd *cmd)
 {
@@ -745,7 +690,6 @@
 
 	local_irq_save(flags);
 
-#ifdef REAL_DMA
 	/* Abort a maybe active DMA transfer */
 	if (IS_A_TT()) {
 		tt_scsi_dma.dma_ctrl = 0;
@@ -754,7 +698,6 @@
 		atari_dma_active = 0;
 		atari_dma_orig_addr = NULL;
 	}
-#endif
 
 	rv = NCR5380_bus_reset(cmd);
 
@@ -781,6 +724,7 @@
 	.eh_abort_handler	= atari_scsi_abort,
 	.eh_bus_reset_handler	= atari_scsi_bus_reset,
 	.this_id		= 7,
+	.cmd_per_lun		= 2,
 	.use_clustering		= DISABLE_CLUSTERING,
 	.cmd_size		= NCR5380_CMD_SIZE,
 };
@@ -804,24 +748,11 @@
 		atari_scsi_reg_write = atari_scsi_falcon_reg_write;
 	}
 
-	/* The values for CMD_PER_LUN and CAN_QUEUE are somehow arbitrary.
-	 * Higher values should work, too; try it!
-	 * (But cmd_per_lun costs memory!)
-	 *
-	 * But there seems to be a bug somewhere that requires CAN_QUEUE to be
-	 * 2*CMD_PER_LUN. At least on a TT, no spurious timeouts seen since
-	 * changed CMD_PER_LUN...
-	 *
-	 * Note: The Falcon currently uses 8/1 setting due to unsolved problems
-	 * with cmd_per_lun != 1
-	 */
 	if (ATARIHW_PRESENT(TT_SCSI)) {
 		atari_scsi_template.can_queue    = 16;
-		atari_scsi_template.cmd_per_lun  = 8;
 		atari_scsi_template.sg_tablesize = SG_ALL;
 	} else {
-		atari_scsi_template.can_queue    = 8;
-		atari_scsi_template.cmd_per_lun  = 1;
+		atari_scsi_template.can_queue    = 1;
 		atari_scsi_template.sg_tablesize = SG_NONE;
 	}
 
@@ -850,8 +781,6 @@
 		}
 	}
 
-
-#ifdef REAL_DMA
 	/* If running on a Falcon and if there's TT-Ram (i.e., more than one
 	 * memory block, since there's always ST-Ram in a Falcon), then
 	 * allocate a STRAM_BUFFER_SIZE byte dribble buffer for transfers
@@ -867,7 +796,6 @@
 		atari_dma_phys_buffer = atari_stram_to_phys(atari_dma_buffer);
 		atari_dma_orig_addr = 0;
 	}
-#endif
 
 	instance = scsi_host_alloc(&atari_scsi_template,
 	                           sizeof(struct NCR5380_hostdata));
@@ -879,9 +807,6 @@
 	instance->irq = irq->start;
 
 	host_flags |= IS_A_TT() ? 0 : FLAG_LATE_DMA_SETUP;
-#ifdef SUPPORT_TAGS
-	host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0;
-#endif
 	host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0;
 
 	error = NCR5380_init(instance, host_flags);
@@ -897,7 +822,7 @@
 			goto fail_irq;
 		}
 		tt_mfp.active_edge |= 0x80;	/* SCSI int on L->H */
-#ifdef REAL_DMA
+
 		tt_scsi_dma.dma_ctrl = 0;
 		atari_dma_residual = 0;
 
@@ -919,17 +844,14 @@
 
 			hostdata->read_overruns = 4;
 		}
-#endif
 	} else {
 		/* Nothing to do for the interrupt: the ST-DMA is initialized
 		 * already.
 		 */
-#ifdef REAL_DMA
 		atari_dma_residual = 0;
 		atari_dma_active = 0;
 		atari_dma_stram_mask = (ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000
 					: 0xff000000);
-#endif
 	}
 
 	NCR5380_maybe_reset_bus(instance);
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index 06dc215..0f797a5 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -874,8 +874,8 @@
 /*
  * itnim callbacks
  */
-void bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
-			 struct bfad_itnim_s **itnim_drv);
+int bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
+			struct bfad_itnim_s **itnim_drv);
 void bfa_fcb_itnim_free(struct bfad_s *bfad,
 			struct bfad_itnim_s *itnim_drv);
 void bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv);
diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c
index 4f089d7..2e3b19e 100644
--- a/drivers/scsi/bfa/bfa_fcs_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c
@@ -588,12 +588,13 @@
 	struct bfa_fcs_lport_s *port = rport->port;
 	struct bfa_fcs_itnim_s *itnim;
 	struct bfad_itnim_s   *itnim_drv;
+	int ret;
 
 	/*
 	 * call bfad to allocate the itnim
 	 */
-	bfa_fcb_itnim_alloc(port->fcs->bfad, &itnim, &itnim_drv);
-	if (itnim == NULL) {
+	ret = bfa_fcb_itnim_alloc(port->fcs->bfad, &itnim, &itnim_drv);
+	if (ret) {
 		bfa_trc(port->fcs, rport->pwwn);
 		return NULL;
 	}
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 6c805e1..02d8060 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -440,13 +440,13 @@
  * BFA FCS itnim alloc callback, after successful PRLI
  * Context: Interrupt
  */
-void
+int
 bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
 		    struct bfad_itnim_s **itnim_drv)
 {
 	*itnim_drv = kzalloc(sizeof(struct bfad_itnim_s), GFP_ATOMIC);
 	if (*itnim_drv == NULL)
-		return;
+		return -ENOMEM;
 
 	(*itnim_drv)->im = bfad->im;
 	*itnim = &(*itnim_drv)->fcs_itnim;
@@ -457,6 +457,7 @@
 	 */
 	INIT_WORK(&(*itnim_drv)->itnim_work, bfad_im_itnim_work_handler);
 	bfad->bfad_flags |= BFAD_RPORT_ONLINE;
+	return 0;
 }
 
 /*
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 499e369..fdd4eb4 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -65,7 +65,7 @@
 #include "bnx2fc_constants.h"
 
 #define BNX2FC_NAME		"bnx2fc"
-#define BNX2FC_VERSION		"2.9.6"
+#define BNX2FC_VERSION		"2.10.3"
 
 #define PFX			"bnx2fc: "
 
@@ -261,6 +261,7 @@
 	u8 vlan_enabled;
 	int vlan_id;
 	bool enabled;
+	u8 tm_timeout;
 };
 
 #define bnx2fc_from_ctlr(x)			\
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index d7029ea..a188199 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -107,6 +107,26 @@
 		"\t\t0x10 - fcoe L2 fame related logs.\n"
 		"\t\t0xff - LOG all messages.");
 
+uint bnx2fc_devloss_tmo;
+module_param_named(devloss_tmo, bnx2fc_devloss_tmo, uint, S_IRUGO);
+MODULE_PARM_DESC(devloss_tmo, " Change devloss_tmo for the remote ports "
+	"attached via bnx2fc.");
+
+uint bnx2fc_max_luns = BNX2FC_MAX_LUN;
+module_param_named(max_luns, bnx2fc_max_luns, uint, S_IRUGO);
+MODULE_PARM_DESC(max_luns, " Change the default max_lun per SCSI host. Default "
+	"0xffff.");
+
+uint bnx2fc_queue_depth;
+module_param_named(queue_depth, bnx2fc_queue_depth, uint, S_IRUGO);
+MODULE_PARM_DESC(queue_depth, " Change the default queue depth of SCSI devices "
+	"attached via bnx2fc.");
+
+uint bnx2fc_log_fka;
+module_param_named(log_fka, bnx2fc_log_fka, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(log_fka, " Print message to kernel log when fcoe is "
+	"initiating a FIP keep alive when debug logging is enabled.");
+
 static int bnx2fc_cpu_callback(struct notifier_block *nfb,
 			     unsigned long action, void *hcpu);
 /* notification function for CPU hotplug events */
@@ -692,7 +712,7 @@
 	int rc = 0;
 
 	shost->max_cmd_len = BNX2FC_MAX_CMD_LEN;
-	shost->max_lun = BNX2FC_MAX_LUN;
+	shost->max_lun = bnx2fc_max_luns;
 	shost->max_id = BNX2FC_MAX_FCP_TGT;
 	shost->max_channel = 0;
 	if (lport->vport)
@@ -1061,6 +1081,20 @@
  */
 static void bnx2fc_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
 {
+	struct fip_header *fiph;
+	struct ethhdr *eth_hdr;
+	u16 op;
+	u8 sub;
+
+	fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2);
+	eth_hdr = (struct ethhdr *)skb_mac_header(skb);
+	op = ntohs(fiph->fip_op);
+	sub = fiph->fip_subcode;
+
+	if (op == FIP_OP_CTRL && sub == FIP_SC_SOL && bnx2fc_log_fka)
+		BNX2FC_MISC_DBG("Sending FKA from %pM to %pM.\n",
+		    eth_hdr->h_source, eth_hdr->h_dest);
+
 	skb->dev = bnx2fc_from_ctlr(fip)->netdev;
 	dev_queue_xmit(skb);
 }
@@ -1102,6 +1136,9 @@
 		return -EIO;
 	}
 
+	if (bnx2fc_devloss_tmo)
+		fc_host_dev_loss_tmo(vn_port->host) = bnx2fc_devloss_tmo;
+
 	if (disabled) {
 		fc_vport_set_state(vport, FC_VPORT_DISABLED);
 	} else {
@@ -1495,6 +1532,9 @@
 	}
 	fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
 
+	if (bnx2fc_devloss_tmo)
+		fc_host_dev_loss_tmo(shost) = bnx2fc_devloss_tmo;
+
 	/* Allocate exchange manager */
 	if (!npiv)
 		rc = bnx2fc_em_config(lport, hba);
@@ -1999,6 +2039,8 @@
 		return;
 	}
 
+	pr_info(PFX "FCoE initialized for %s.\n", dev->netdev->name);
+
 	/* Add HBA to the adapter list */
 	mutex_lock(&bnx2fc_dev_lock);
 	list_add_tail(&hba->list, &adapter_list);
@@ -2293,6 +2335,7 @@
 	ctlr = bnx2fc_to_ctlr(interface);
 	cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
 	interface->vlan_id = vlan_id;
+	interface->tm_timeout = BNX2FC_TM_TIMEOUT;
 
 	interface->timer_work_queue =
 			create_singlethread_workqueue("bnx2fc_timer_wq");
@@ -2612,6 +2655,15 @@
 	return NOTIFY_OK;
 }
 
+static int bnx2fc_slave_configure(struct scsi_device *sdev)
+{
+	if (!bnx2fc_queue_depth)
+		return 0;
+
+	scsi_change_queue_depth(sdev, bnx2fc_queue_depth);
+	return 0;
+}
+
 /**
  * bnx2fc_mod_init - module init entry point
  *
@@ -2858,6 +2910,50 @@
 	.bsg_request = fc_lport_bsg_request,
 };
 
+/*
+ * Additional scsi_host attributes.
+ */
+static ssize_t
+bnx2fc_tm_timeout_show(struct device *dev, struct device_attribute *attr,
+	char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct fc_lport *lport = shost_priv(shost);
+	struct fcoe_port *port = lport_priv(lport);
+	struct bnx2fc_interface *interface = port->priv;
+
+	sprintf(buf, "%u\n", interface->tm_timeout);
+	return strlen(buf);
+}
+
+static ssize_t
+bnx2fc_tm_timeout_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct fc_lport *lport = shost_priv(shost);
+	struct fcoe_port *port = lport_priv(lport);
+	struct bnx2fc_interface *interface = port->priv;
+	int rval, val;
+
+	rval = kstrtouint(buf, 10, &val);
+	if (rval)
+		return rval;
+	if (val > 255)
+		return -ERANGE;
+
+	interface->tm_timeout = (u8)val;
+	return strlen(buf);
+}
+
+static DEVICE_ATTR(tm_timeout, S_IRUGO|S_IWUSR, bnx2fc_tm_timeout_show,
+	bnx2fc_tm_timeout_store);
+
+static struct device_attribute *bnx2fc_host_attrs[] = {
+	&dev_attr_tm_timeout,
+	NULL,
+};
+
 /**
  * scsi_host_template structure used while registering with SCSI-ml
  */
@@ -2877,6 +2973,8 @@
 	.sg_tablesize		= BNX2FC_MAX_BDS_PER_CMD,
 	.max_sectors		= 1024,
 	.track_queue_depth	= 1,
+	.slave_configure	= bnx2fc_slave_configure,
+	.shost_attrs		= bnx2fc_host_attrs,
 };
 
 static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 2230dab..026f394 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -179,12 +179,24 @@
 
 	bnx2fc_unmap_sg_list(io_req);
 	io_req->sc_cmd = NULL;
+
+	/* Sanity checks before returning command to mid-layer */
 	if (!sc_cmd) {
 		printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. "
 				    "IO(0x%x) already cleaned up\n",
 		       io_req->xid);
 		return;
 	}
+	if (!sc_cmd->device) {
+		pr_err(PFX "0x%x: sc_cmd->device is NULL.\n", io_req->xid);
+		return;
+	}
+	if (!sc_cmd->device->host) {
+		pr_err(PFX "0x%x: sc_cmd->device->host is NULL.\n",
+		    io_req->xid);
+		return;
+	}
+
 	sc_cmd->result = err_code << 16;
 
 	BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n",
@@ -770,7 +782,7 @@
 	spin_unlock_bh(&tgt->tgt_lock);
 
 	rc = wait_for_completion_timeout(&io_req->tm_done,
-					 BNX2FC_TM_TIMEOUT * HZ);
+					 interface->tm_timeout * HZ);
 	spin_lock_bh(&tgt->tgt_lock);
 
 	io_req->wait_for_comp = 0;
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 7289437..133901f 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -675,7 +675,7 @@
 {
 	struct list_head *list;
 	struct list_head *tmp;
-	struct bnx2i_endpoint *ep;
+	struct bnx2i_endpoint *ep = NULL;
 
 	read_lock_bh(&hba->ep_rdwr_lock);
 	list_for_each_safe(list, tmp, &hba->ep_ofld_list) {
@@ -703,7 +703,7 @@
 {
 	struct list_head *list;
 	struct list_head *tmp;
-	struct bnx2i_endpoint *ep;
+	struct bnx2i_endpoint *ep = NULL;
 
 	read_lock_bh(&hba->ep_rdwr_lock);
 	list_for_each_safe(list, tmp, &hba->ep_destroy_list) {
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index fa09d4b..83458f7 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -292,850 +292,30 @@
 
 struct error_info {
 	unsigned short code12;	/* 0x0302 looks better than 0x03,0x02 */
-	const char * text;
+	unsigned short size;
 };
 
 /*
- * The canonical list of T10 Additional Sense Codes is available at:
- * http://www.t10.org/lists/asc-num.txt [most recent: 20141221]
+ * There are 700+ entries in this table. To save space, we don't store
+ * (code, pointer) pairs, which would make sizeof(struct
+ * error_info)==16 on 64 bits. Rather, the second element just stores
+ * the size (including \0) of the corresponding string, and we use the
+ * sum of these to get the appropriate offset into additional_text
+ * defined below. This approach saves 12 bytes per entry.
  */
-
 static const struct error_info additional[] =
 {
-	{0x0000, "No additional sense information"},
-	{0x0001, "Filemark detected"},
-	{0x0002, "End-of-partition/medium detected"},
-	{0x0003, "Setmark detected"},
-	{0x0004, "Beginning-of-partition/medium detected"},
-	{0x0005, "End-of-data detected"},
-	{0x0006, "I/O process terminated"},
-	{0x0007, "Programmable early warning detected"},
-	{0x0011, "Audio play operation in progress"},
-	{0x0012, "Audio play operation paused"},
-	{0x0013, "Audio play operation successfully completed"},
-	{0x0014, "Audio play operation stopped due to error"},
-	{0x0015, "No current audio status to return"},
-	{0x0016, "Operation in progress"},
-	{0x0017, "Cleaning requested"},
-	{0x0018, "Erase operation in progress"},
-	{0x0019, "Locate operation in progress"},
-	{0x001A, "Rewind operation in progress"},
-	{0x001B, "Set capacity operation in progress"},
-	{0x001C, "Verify operation in progress"},
-	{0x001D, "ATA pass through information available"},
-	{0x001E, "Conflicting SA creation request"},
-	{0x001F, "Logical unit transitioning to another power condition"},
-	{0x0020, "Extended copy information available"},
-	{0x0021, "Atomic command aborted due to ACA"},
-
-	{0x0100, "No index/sector signal"},
-
-	{0x0200, "No seek complete"},
-
-	{0x0300, "Peripheral device write fault"},
-	{0x0301, "No write current"},
-	{0x0302, "Excessive write errors"},
-
-	{0x0400, "Logical unit not ready, cause not reportable"},
-	{0x0401, "Logical unit is in process of becoming ready"},
-	{0x0402, "Logical unit not ready, initializing command required"},
-	{0x0403, "Logical unit not ready, manual intervention required"},
-	{0x0404, "Logical unit not ready, format in progress"},
-	{0x0405, "Logical unit not ready, rebuild in progress"},
-	{0x0406, "Logical unit not ready, recalculation in progress"},
-	{0x0407, "Logical unit not ready, operation in progress"},
-	{0x0408, "Logical unit not ready, long write in progress"},
-	{0x0409, "Logical unit not ready, self-test in progress"},
-	{0x040A, "Logical unit not accessible, asymmetric access state "
-	 "transition"},
-	{0x040B, "Logical unit not accessible, target port in standby state"},
-	{0x040C, "Logical unit not accessible, target port in unavailable "
-	 "state"},
-	{0x040D, "Logical unit not ready, structure check required"},
-	{0x040E, "Logical unit not ready, security session in progress"},
-	{0x0410, "Logical unit not ready, auxiliary memory not accessible"},
-	{0x0411, "Logical unit not ready, notify (enable spinup) required"},
-	{0x0412, "Logical unit not ready, offline"},
-	{0x0413, "Logical unit not ready, SA creation in progress"},
-	{0x0414, "Logical unit not ready, space allocation in progress"},
-	{0x0415, "Logical unit not ready, robotics disabled"},
-	{0x0416, "Logical unit not ready, configuration required"},
-	{0x0417, "Logical unit not ready, calibration required"},
-	{0x0418, "Logical unit not ready, a door is open"},
-	{0x0419, "Logical unit not ready, operating in sequential mode"},
-	{0x041A, "Logical unit not ready, start stop unit command in "
-	 "progress"},
-	{0x041B, "Logical unit not ready, sanitize in progress"},
-	{0x041C, "Logical unit not ready, additional power use not yet "
-	 "granted"},
-	{0x041D, "Logical unit not ready, configuration in progress"},
-	{0x041E, "Logical unit not ready, microcode activation required"},
-	{0x041F, "Logical unit not ready, microcode download required"},
-	{0x0420, "Logical unit not ready, logical unit reset required"},
-	{0x0421, "Logical unit not ready, hard reset required"},
-	{0x0422, "Logical unit not ready, power cycle required"},
-
-	{0x0500, "Logical unit does not respond to selection"},
-
-	{0x0600, "No reference position found"},
-
-	{0x0700, "Multiple peripheral devices selected"},
-
-	{0x0800, "Logical unit communication failure"},
-	{0x0801, "Logical unit communication time-out"},
-	{0x0802, "Logical unit communication parity error"},
-	{0x0803, "Logical unit communication CRC error (Ultra-DMA/32)"},
-	{0x0804, "Unreachable copy target"},
-
-	{0x0900, "Track following error"},
-	{0x0901, "Tracking servo failure"},
-	{0x0902, "Focus servo failure"},
-	{0x0903, "Spindle servo failure"},
-	{0x0904, "Head select fault"},
-	{0x0905, "Vibration induced tracking error"},
-
-	{0x0A00, "Error log overflow"},
-
-	{0x0B00, "Warning"},
-	{0x0B01, "Warning - specified temperature exceeded"},
-	{0x0B02, "Warning - enclosure degraded"},
-	{0x0B03, "Warning - background self-test failed"},
-	{0x0B04, "Warning - background pre-scan detected medium error"},
-	{0x0B05, "Warning - background medium scan detected medium error"},
-	{0x0B06, "Warning - non-volatile cache now volatile"},
-	{0x0B07, "Warning - degraded power to non-volatile cache"},
-	{0x0B08, "Warning - power loss expected"},
-	{0x0B09, "Warning - device statistics notification active"},
-
-	{0x0C00, "Write error"},
-	{0x0C01, "Write error - recovered with auto reallocation"},
-	{0x0C02, "Write error - auto reallocation failed"},
-	{0x0C03, "Write error - recommend reassignment"},
-	{0x0C04, "Compression check miscompare error"},
-	{0x0C05, "Data expansion occurred during compression"},
-	{0x0C06, "Block not compressible"},
-	{0x0C07, "Write error - recovery needed"},
-	{0x0C08, "Write error - recovery failed"},
-	{0x0C09, "Write error - loss of streaming"},
-	{0x0C0A, "Write error - padding blocks added"},
-	{0x0C0B, "Auxiliary memory write error"},
-	{0x0C0C, "Write error - unexpected unsolicited data"},
-	{0x0C0D, "Write error - not enough unsolicited data"},
-	{0x0C0E, "Multiple write errors"},
-	{0x0C0F, "Defects in error window"},
-	{0x0C10, "Incomplete multiple atomic write operations"},
-
-	{0x0D00, "Error detected by third party temporary initiator"},
-	{0x0D01, "Third party device failure"},
-	{0x0D02, "Copy target device not reachable"},
-	{0x0D03, "Incorrect copy target device type"},
-	{0x0D04, "Copy target device data underrun"},
-	{0x0D05, "Copy target device data overrun"},
-
-	{0x0E00, "Invalid information unit"},
-	{0x0E01, "Information unit too short"},
-	{0x0E02, "Information unit too long"},
-	{0x0E03, "Invalid field in command information unit"},
-
-	{0x1000, "Id CRC or ECC error"},
-	{0x1001, "Logical block guard check failed"},
-	{0x1002, "Logical block application tag check failed"},
-	{0x1003, "Logical block reference tag check failed"},
-	{0x1004, "Logical block protection error on recover buffered data"},
-	{0x1005, "Logical block protection method error"},
-
-	{0x1100, "Unrecovered read error"},
-	{0x1101, "Read retries exhausted"},
-	{0x1102, "Error too long to correct"},
-	{0x1103, "Multiple read errors"},
-	{0x1104, "Unrecovered read error - auto reallocate failed"},
-	{0x1105, "L-EC uncorrectable error"},
-	{0x1106, "CIRC unrecovered error"},
-	{0x1107, "Data re-synchronization error"},
-	{0x1108, "Incomplete block read"},
-	{0x1109, "No gap found"},
-	{0x110A, "Miscorrected error"},
-	{0x110B, "Unrecovered read error - recommend reassignment"},
-	{0x110C, "Unrecovered read error - recommend rewrite the data"},
-	{0x110D, "De-compression CRC error"},
-	{0x110E, "Cannot decompress using declared algorithm"},
-	{0x110F, "Error reading UPC/EAN number"},
-	{0x1110, "Error reading ISRC number"},
-	{0x1111, "Read error - loss of streaming"},
-	{0x1112, "Auxiliary memory read error"},
-	{0x1113, "Read error - failed retransmission request"},
-	{0x1114, "Read error - lba marked bad by application client"},
-	{0x1115, "Write after sanitize required"},
-
-	{0x1200, "Address mark not found for id field"},
-
-	{0x1300, "Address mark not found for data field"},
-
-	{0x1400, "Recorded entity not found"},
-	{0x1401, "Record not found"},
-	{0x1402, "Filemark or setmark not found"},
-	{0x1403, "End-of-data not found"},
-	{0x1404, "Block sequence error"},
-	{0x1405, "Record not found - recommend reassignment"},
-	{0x1406, "Record not found - data auto-reallocated"},
-	{0x1407, "Locate operation failure"},
-
-	{0x1500, "Random positioning error"},
-	{0x1501, "Mechanical positioning error"},
-	{0x1502, "Positioning error detected by read of medium"},
-
-	{0x1600, "Data synchronization mark error"},
-	{0x1601, "Data sync error - data rewritten"},
-	{0x1602, "Data sync error - recommend rewrite"},
-	{0x1603, "Data sync error - data auto-reallocated"},
-	{0x1604, "Data sync error - recommend reassignment"},
-
-	{0x1700, "Recovered data with no error correction applied"},
-	{0x1701, "Recovered data with retries"},
-	{0x1702, "Recovered data with positive head offset"},
-	{0x1703, "Recovered data with negative head offset"},
-	{0x1704, "Recovered data with retries and/or circ applied"},
-	{0x1705, "Recovered data using previous sector id"},
-	{0x1706, "Recovered data without ECC - data auto-reallocated"},
-	{0x1707, "Recovered data without ECC - recommend reassignment"},
-	{0x1708, "Recovered data without ECC - recommend rewrite"},
-	{0x1709, "Recovered data without ECC - data rewritten"},
-
-	{0x1800, "Recovered data with error correction applied"},
-	{0x1801, "Recovered data with error corr. & retries applied"},
-	{0x1802, "Recovered data - data auto-reallocated"},
-	{0x1803, "Recovered data with CIRC"},
-	{0x1804, "Recovered data with L-EC"},
-	{0x1805, "Recovered data - recommend reassignment"},
-	{0x1806, "Recovered data - recommend rewrite"},
-	{0x1807, "Recovered data with ECC - data rewritten"},
-	{0x1808, "Recovered data with linking"},
-
-	{0x1900, "Defect list error"},
-	{0x1901, "Defect list not available"},
-	{0x1902, "Defect list error in primary list"},
-	{0x1903, "Defect list error in grown list"},
-
-	{0x1A00, "Parameter list length error"},
-
-	{0x1B00, "Synchronous data transfer error"},
-
-	{0x1C00, "Defect list not found"},
-	{0x1C01, "Primary defect list not found"},
-	{0x1C02, "Grown defect list not found"},
-
-	{0x1D00, "Miscompare during verify operation"},
-	{0x1D01, "Miscompare verify of unmapped LBA"},
-
-	{0x1E00, "Recovered id with ECC correction"},
-
-	{0x1F00, "Partial defect list transfer"},
-
-	{0x2000, "Invalid command operation code"},
-	{0x2001, "Access denied - initiator pending-enrolled"},
-	{0x2002, "Access denied - no access rights"},
-	{0x2003, "Access denied - invalid mgmt id key"},
-	{0x2004, "Illegal command while in write capable state"},
-	{0x2005, "Obsolete"},
-	{0x2006, "Illegal command while in explicit address mode"},
-	{0x2007, "Illegal command while in implicit address mode"},
-	{0x2008, "Access denied - enrollment conflict"},
-	{0x2009, "Access denied - invalid LU identifier"},
-	{0x200A, "Access denied - invalid proxy token"},
-	{0x200B, "Access denied - ACL LUN conflict"},
-	{0x200C, "Illegal command when not in append-only mode"},
-
-	{0x2100, "Logical block address out of range"},
-	{0x2101, "Invalid element address"},
-	{0x2102, "Invalid address for write"},
-	{0x2103, "Invalid write crossing layer jump"},
-	{0x2104, "Unaligned write command"},
-	{0x2105, "Write boundary violation"},
-	{0x2106, "Attempt to read invalid data"},
-	{0x2107, "Read boundary violation"},
-
-	{0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"},
-
-	{0x2300, "Invalid token operation, cause not reportable"},
-	{0x2301, "Invalid token operation, unsupported token type"},
-	{0x2302, "Invalid token operation, remote token usage not supported"},
-	{0x2303, "Invalid token operation, remote rod token creation not "
-	 "supported"},
-	{0x2304, "Invalid token operation, token unknown"},
-	{0x2305, "Invalid token operation, token corrupt"},
-	{0x2306, "Invalid token operation, token revoked"},
-	{0x2307, "Invalid token operation, token expired"},
-	{0x2308, "Invalid token operation, token cancelled"},
-	{0x2309, "Invalid token operation, token deleted"},
-	{0x230A, "Invalid token operation, invalid token length"},
-
-	{0x2400, "Invalid field in cdb"},
-	{0x2401, "CDB decryption error"},
-	{0x2402, "Obsolete"},
-	{0x2403, "Obsolete"},
-	{0x2404, "Security audit value frozen"},
-	{0x2405, "Security working key frozen"},
-	{0x2406, "Nonce not unique"},
-	{0x2407, "Nonce timestamp out of range"},
-	{0x2408, "Invalid XCDB"},
-
-	{0x2500, "Logical unit not supported"},
-
-	{0x2600, "Invalid field in parameter list"},
-	{0x2601, "Parameter not supported"},
-	{0x2602, "Parameter value invalid"},
-	{0x2603, "Threshold parameters not supported"},
-	{0x2604, "Invalid release of persistent reservation"},
-	{0x2605, "Data decryption error"},
-	{0x2606, "Too many target descriptors"},
-	{0x2607, "Unsupported target descriptor type code"},
-	{0x2608, "Too many segment descriptors"},
-	{0x2609, "Unsupported segment descriptor type code"},
-	{0x260A, "Unexpected inexact segment"},
-	{0x260B, "Inline data length exceeded"},
-	{0x260C, "Invalid operation for copy source or destination"},
-	{0x260D, "Copy segment granularity violation"},
-	{0x260E, "Invalid parameter while port is enabled"},
-	{0x260F, "Invalid data-out buffer integrity check value"},
-	{0x2610, "Data decryption key fail limit reached"},
-	{0x2611, "Incomplete key-associated data set"},
-	{0x2612, "Vendor specific key reference not found"},
-
-	{0x2700, "Write protected"},
-	{0x2701, "Hardware write protected"},
-	{0x2702, "Logical unit software write protected"},
-	{0x2703, "Associated write protect"},
-	{0x2704, "Persistent write protect"},
-	{0x2705, "Permanent write protect"},
-	{0x2706, "Conditional write protect"},
-	{0x2707, "Space allocation failed write protect"},
-	{0x2708, "Zone is read only"},
-
-	{0x2800, "Not ready to ready change, medium may have changed"},
-	{0x2801, "Import or export element accessed"},
-	{0x2802, "Format-layer may have changed"},
-	{0x2803, "Import/export element accessed, medium changed"},
-
-	{0x2900, "Power on, reset, or bus device reset occurred"},
-	{0x2901, "Power on occurred"},
-	{0x2902, "Scsi bus reset occurred"},
-	{0x2903, "Bus device reset function occurred"},
-	{0x2904, "Device internal reset"},
-	{0x2905, "Transceiver mode changed to single-ended"},
-	{0x2906, "Transceiver mode changed to lvd"},
-	{0x2907, "I_T nexus loss occurred"},
-
-	{0x2A00, "Parameters changed"},
-	{0x2A01, "Mode parameters changed"},
-	{0x2A02, "Log parameters changed"},
-	{0x2A03, "Reservations preempted"},
-	{0x2A04, "Reservations released"},
-	{0x2A05, "Registrations preempted"},
-	{0x2A06, "Asymmetric access state changed"},
-	{0x2A07, "Implicit asymmetric access state transition failed"},
-	{0x2A08, "Priority changed"},
-	{0x2A09, "Capacity data has changed"},
-	{0x2A0A, "Error history I_T nexus cleared"},
-	{0x2A0B, "Error history snapshot released"},
-	{0x2A0C, "Error recovery attributes have changed"},
-	{0x2A0D, "Data encryption capabilities changed"},
-	{0x2A10, "Timestamp changed"},
-	{0x2A11, "Data encryption parameters changed by another i_t nexus"},
-	{0x2A12, "Data encryption parameters changed by vendor specific "
-		 "event"},
-	{0x2A13, "Data encryption key instance counter has changed"},
-	{0x2A14, "SA creation capabilities data has changed"},
-	{0x2A15, "Medium removal prevention preempted"},
-
-	{0x2B00, "Copy cannot execute since host cannot disconnect"},
-
-	{0x2C00, "Command sequence error"},
-	{0x2C01, "Too many windows specified"},
-	{0x2C02, "Invalid combination of windows specified"},
-	{0x2C03, "Current program area is not empty"},
-	{0x2C04, "Current program area is empty"},
-	{0x2C05, "Illegal power condition request"},
-	{0x2C06, "Persistent prevent conflict"},
-	{0x2C07, "Previous busy status"},
-	{0x2C08, "Previous task set full status"},
-	{0x2C09, "Previous reservation conflict status"},
-	{0x2C0A, "Partition or collection contains user objects"},
-	{0x2C0B, "Not reserved"},
-	{0x2C0C, "Orwrite generation does not match"},
-	{0x2C0D, "Reset write pointer not allowed"},
-	{0x2C0E, "Zone is offline"},
-
-	{0x2D00, "Overwrite error on update in place"},
-
-	{0x2E00, "Insufficient time for operation"},
-	{0x2E01, "Command timeout before processing"},
-	{0x2E02, "Command timeout during processing"},
-	{0x2E03, "Command timeout during processing due to error recovery"},
-
-	{0x2F00, "Commands cleared by another initiator"},
-	{0x2F01, "Commands cleared by power loss notification"},
-	{0x2F02, "Commands cleared by device server"},
-	{0x2F03, "Some commands cleared by queuing layer event"},
-
-	{0x3000, "Incompatible medium installed"},
-	{0x3001, "Cannot read medium - unknown format"},
-	{0x3002, "Cannot read medium - incompatible format"},
-	{0x3003, "Cleaning cartridge installed"},
-	{0x3004, "Cannot write medium - unknown format"},
-	{0x3005, "Cannot write medium - incompatible format"},
-	{0x3006, "Cannot format medium - incompatible medium"},
-	{0x3007, "Cleaning failure"},
-	{0x3008, "Cannot write - application code mismatch"},
-	{0x3009, "Current session not fixated for append"},
-	{0x300A, "Cleaning request rejected"},
-	{0x300C, "WORM medium - overwrite attempted"},
-	{0x300D, "WORM medium - integrity check"},
-	{0x3010, "Medium not formatted"},
-	{0x3011, "Incompatible volume type"},
-	{0x3012, "Incompatible volume qualifier"},
-	{0x3013, "Cleaning volume expired"},
-
-	{0x3100, "Medium format corrupted"},
-	{0x3101, "Format command failed"},
-	{0x3102, "Zoned formatting failed due to spare linking"},
-	{0x3103, "Sanitize command failed"},
-
-	{0x3200, "No defect spare location available"},
-	{0x3201, "Defect list update failure"},
-
-	{0x3300, "Tape length error"},
-
-	{0x3400, "Enclosure failure"},
-
-	{0x3500, "Enclosure services failure"},
-	{0x3501, "Unsupported enclosure function"},
-	{0x3502, "Enclosure services unavailable"},
-	{0x3503, "Enclosure services transfer failure"},
-	{0x3504, "Enclosure services transfer refused"},
-	{0x3505, "Enclosure services checksum error"},
-
-	{0x3600, "Ribbon, ink, or toner failure"},
-
-	{0x3700, "Rounded parameter"},
-
-	{0x3800, "Event status notification"},
-	{0x3802, "Esn - power management class event"},
-	{0x3804, "Esn - media class event"},
-	{0x3806, "Esn - device busy class event"},
-	{0x3807, "Thin Provisioning soft threshold reached"},
-
-	{0x3900, "Saving parameters not supported"},
-
-	{0x3A00, "Medium not present"},
-	{0x3A01, "Medium not present - tray closed"},
-	{0x3A02, "Medium not present - tray open"},
-	{0x3A03, "Medium not present - loadable"},
-	{0x3A04, "Medium not present - medium auxiliary memory accessible"},
-
-	{0x3B00, "Sequential positioning error"},
-	{0x3B01, "Tape position error at beginning-of-medium"},
-	{0x3B02, "Tape position error at end-of-medium"},
-	{0x3B03, "Tape or electronic vertical forms unit not ready"},
-	{0x3B04, "Slew failure"},
-	{0x3B05, "Paper jam"},
-	{0x3B06, "Failed to sense top-of-form"},
-	{0x3B07, "Failed to sense bottom-of-form"},
-	{0x3B08, "Reposition error"},
-	{0x3B09, "Read past end of medium"},
-	{0x3B0A, "Read past beginning of medium"},
-	{0x3B0B, "Position past end of medium"},
-	{0x3B0C, "Position past beginning of medium"},
-	{0x3B0D, "Medium destination element full"},
-	{0x3B0E, "Medium source element empty"},
-	{0x3B0F, "End of medium reached"},
-	{0x3B11, "Medium magazine not accessible"},
-	{0x3B12, "Medium magazine removed"},
-	{0x3B13, "Medium magazine inserted"},
-	{0x3B14, "Medium magazine locked"},
-	{0x3B15, "Medium magazine unlocked"},
-	{0x3B16, "Mechanical positioning or changer error"},
-	{0x3B17, "Read past end of user object"},
-	{0x3B18, "Element disabled"},
-	{0x3B19, "Element enabled"},
-	{0x3B1A, "Data transfer device removed"},
-	{0x3B1B, "Data transfer device inserted"},
-	{0x3B1C, "Too many logical objects on partition to support "
-	 "operation"},
-
-	{0x3D00, "Invalid bits in identify message"},
-
-	{0x3E00, "Logical unit has not self-configured yet"},
-	{0x3E01, "Logical unit failure"},
-	{0x3E02, "Timeout on logical unit"},
-	{0x3E03, "Logical unit failed self-test"},
-	{0x3E04, "Logical unit unable to update self-test log"},
-
-	{0x3F00, "Target operating conditions have changed"},
-	{0x3F01, "Microcode has been changed"},
-	{0x3F02, "Changed operating definition"},
-	{0x3F03, "Inquiry data has changed"},
-	{0x3F04, "Component device attached"},
-	{0x3F05, "Device identifier changed"},
-	{0x3F06, "Redundancy group created or modified"},
-	{0x3F07, "Redundancy group deleted"},
-	{0x3F08, "Spare created or modified"},
-	{0x3F09, "Spare deleted"},
-	{0x3F0A, "Volume set created or modified"},
-	{0x3F0B, "Volume set deleted"},
-	{0x3F0C, "Volume set deassigned"},
-	{0x3F0D, "Volume set reassigned"},
-	{0x3F0E, "Reported luns data has changed"},
-	{0x3F0F, "Echo buffer overwritten"},
-	{0x3F10, "Medium loadable"},
-	{0x3F11, "Medium auxiliary memory accessible"},
-	{0x3F12, "iSCSI IP address added"},
-	{0x3F13, "iSCSI IP address removed"},
-	{0x3F14, "iSCSI IP address changed"},
-	{0x3F15, "Inspect referrals sense descriptors"},
-	{0x3F16, "Microcode has been changed without reset"},
-/*
- *	{0x40NN, "Ram failure"},
- *	{0x40NN, "Diagnostic failure on component nn"},
- *	{0x41NN, "Data path failure"},
- *	{0x42NN, "Power-on or self-test failure"},
- */
-	{0x4300, "Message error"},
-
-	{0x4400, "Internal target failure"},
-	{0x4401, "Persistent reservation information lost"},
-	{0x4471, "ATA device failed set features"},
-
-	{0x4500, "Select or reselect failure"},
-
-	{0x4600, "Unsuccessful soft reset"},
-
-	{0x4700, "Scsi parity error"},
-	{0x4701, "Data phase CRC error detected"},
-	{0x4702, "Scsi parity error detected during st data phase"},
-	{0x4703, "Information unit iuCRC error detected"},
-	{0x4704, "Asynchronous information protection error detected"},
-	{0x4705, "Protocol service CRC error"},
-	{0x4706, "Phy test function in progress"},
-	{0x477f, "Some commands cleared by iSCSI Protocol event"},
-
-	{0x4800, "Initiator detected error message received"},
-
-	{0x4900, "Invalid message error"},
-
-	{0x4A00, "Command phase error"},
-
-	{0x4B00, "Data phase error"},
-	{0x4B01, "Invalid target port transfer tag received"},
-	{0x4B02, "Too much write data"},
-	{0x4B03, "Ack/nak timeout"},
-	{0x4B04, "Nak received"},
-	{0x4B05, "Data offset error"},
-	{0x4B06, "Initiator response timeout"},
-	{0x4B07, "Connection lost"},
-	{0x4B08, "Data-in buffer overflow - data buffer size"},
-	{0x4B09, "Data-in buffer overflow - data buffer descriptor area"},
-	{0x4B0A, "Data-in buffer error"},
-	{0x4B0B, "Data-out buffer overflow - data buffer size"},
-	{0x4B0C, "Data-out buffer overflow - data buffer descriptor area"},
-	{0x4B0D, "Data-out buffer error"},
-	{0x4B0E, "PCIe fabric error"},
-	{0x4B0F, "PCIe completion timeout"},
-	{0x4B10, "PCIe completer abort"},
-	{0x4B11, "PCIe poisoned tlp received"},
-	{0x4B12, "PCIe eCRC check failed"},
-	{0x4B13, "PCIe unsupported request"},
-	{0x4B14, "PCIe acs violation"},
-	{0x4B15, "PCIe tlp prefix blocked"},
-
-	{0x4C00, "Logical unit failed self-configuration"},
-/*
- *	{0x4DNN, "Tagged overlapped commands (nn = queue tag)"},
- */
-	{0x4E00, "Overlapped commands attempted"},
-
-	{0x5000, "Write append error"},
-	{0x5001, "Write append position error"},
-	{0x5002, "Position error related to timing"},
-
-	{0x5100, "Erase failure"},
-	{0x5101, "Erase failure - incomplete erase operation detected"},
-
-	{0x5200, "Cartridge fault"},
-
-	{0x5300, "Media load or eject failed"},
-	{0x5301, "Unload tape failure"},
-	{0x5302, "Medium removal prevented"},
-	{0x5303, "Medium removal prevented by data transfer element"},
-	{0x5304, "Medium thread or unthread failure"},
-	{0x5305, "Volume identifier invalid"},
-	{0x5306, "Volume identifier missing"},
-	{0x5307, "Duplicate volume identifier"},
-	{0x5308, "Element status unknown"},
-	{0x5309, "Data transfer device error - load failed"},
-	{0x530a, "Data transfer device error - unload failed"},
-	{0x530b, "Data transfer device error - unload missing"},
-	{0x530c, "Data transfer device error - eject failed"},
-	{0x530d, "Data transfer device error - library communication failed"},
-
-	{0x5400, "Scsi to host system interface failure"},
-
-	{0x5500, "System resource failure"},
-	{0x5501, "System buffer full"},
-	{0x5502, "Insufficient reservation resources"},
-	{0x5503, "Insufficient resources"},
-	{0x5504, "Insufficient registration resources"},
-	{0x5505, "Insufficient access control resources"},
-	{0x5506, "Auxiliary memory out of space"},
-	{0x5507, "Quota error"},
-	{0x5508, "Maximum number of supplemental decryption keys exceeded"},
-	{0x5509, "Medium auxiliary memory not accessible"},
-	{0x550A, "Data currently unavailable"},
-	{0x550B, "Insufficient power for operation"},
-	{0x550C, "Insufficient resources to create rod"},
-	{0x550D, "Insufficient resources to create rod token"},
-	{0x550E, "Insufficient zone resources"},
-
-	{0x5700, "Unable to recover table-of-contents"},
-
-	{0x5800, "Generation does not exist"},
-
-	{0x5900, "Updated block read"},
-
-	{0x5A00, "Operator request or state change input"},
-	{0x5A01, "Operator medium removal request"},
-	{0x5A02, "Operator selected write protect"},
-	{0x5A03, "Operator selected write permit"},
-
-	{0x5B00, "Log exception"},
-	{0x5B01, "Threshold condition met"},
-	{0x5B02, "Log counter at maximum"},
-	{0x5B03, "Log list codes exhausted"},
-
-	{0x5C00, "Rpl status change"},
-	{0x5C01, "Spindles synchronized"},
-	{0x5C02, "Spindles not synchronized"},
-
-	{0x5D00, "Failure prediction threshold exceeded"},
-	{0x5D01, "Media failure prediction threshold exceeded"},
-	{0x5D02, "Logical unit failure prediction threshold exceeded"},
-	{0x5D03, "Spare area exhaustion prediction threshold exceeded"},
-	{0x5D10, "Hardware impending failure general hard drive failure"},
-	{0x5D11, "Hardware impending failure drive error rate too high"},
-	{0x5D12, "Hardware impending failure data error rate too high"},
-	{0x5D13, "Hardware impending failure seek error rate too high"},
-	{0x5D14, "Hardware impending failure too many block reassigns"},
-	{0x5D15, "Hardware impending failure access times too high"},
-	{0x5D16, "Hardware impending failure start unit times too high"},
-	{0x5D17, "Hardware impending failure channel parametrics"},
-	{0x5D18, "Hardware impending failure controller detected"},
-	{0x5D19, "Hardware impending failure throughput performance"},
-	{0x5D1A, "Hardware impending failure seek time performance"},
-	{0x5D1B, "Hardware impending failure spin-up retry count"},
-	{0x5D1C, "Hardware impending failure drive calibration retry count"},
-	{0x5D20, "Controller impending failure general hard drive failure"},
-	{0x5D21, "Controller impending failure drive error rate too high"},
-	{0x5D22, "Controller impending failure data error rate too high"},
-	{0x5D23, "Controller impending failure seek error rate too high"},
-	{0x5D24, "Controller impending failure too many block reassigns"},
-	{0x5D25, "Controller impending failure access times too high"},
-	{0x5D26, "Controller impending failure start unit times too high"},
-	{0x5D27, "Controller impending failure channel parametrics"},
-	{0x5D28, "Controller impending failure controller detected"},
-	{0x5D29, "Controller impending failure throughput performance"},
-	{0x5D2A, "Controller impending failure seek time performance"},
-	{0x5D2B, "Controller impending failure spin-up retry count"},
-	{0x5D2C, "Controller impending failure drive calibration retry count"},
-	{0x5D30, "Data channel impending failure general hard drive failure"},
-	{0x5D31, "Data channel impending failure drive error rate too high"},
-	{0x5D32, "Data channel impending failure data error rate too high"},
-	{0x5D33, "Data channel impending failure seek error rate too high"},
-	{0x5D34, "Data channel impending failure too many block reassigns"},
-	{0x5D35, "Data channel impending failure access times too high"},
-	{0x5D36, "Data channel impending failure start unit times too high"},
-	{0x5D37, "Data channel impending failure channel parametrics"},
-	{0x5D38, "Data channel impending failure controller detected"},
-	{0x5D39, "Data channel impending failure throughput performance"},
-	{0x5D3A, "Data channel impending failure seek time performance"},
-	{0x5D3B, "Data channel impending failure spin-up retry count"},
-	{0x5D3C, "Data channel impending failure drive calibration retry "
-	 "count"},
-	{0x5D40, "Servo impending failure general hard drive failure"},
-	{0x5D41, "Servo impending failure drive error rate too high"},
-	{0x5D42, "Servo impending failure data error rate too high"},
-	{0x5D43, "Servo impending failure seek error rate too high"},
-	{0x5D44, "Servo impending failure too many block reassigns"},
-	{0x5D45, "Servo impending failure access times too high"},
-	{0x5D46, "Servo impending failure start unit times too high"},
-	{0x5D47, "Servo impending failure channel parametrics"},
-	{0x5D48, "Servo impending failure controller detected"},
-	{0x5D49, "Servo impending failure throughput performance"},
-	{0x5D4A, "Servo impending failure seek time performance"},
-	{0x5D4B, "Servo impending failure spin-up retry count"},
-	{0x5D4C, "Servo impending failure drive calibration retry count"},
-	{0x5D50, "Spindle impending failure general hard drive failure"},
-	{0x5D51, "Spindle impending failure drive error rate too high"},
-	{0x5D52, "Spindle impending failure data error rate too high"},
-	{0x5D53, "Spindle impending failure seek error rate too high"},
-	{0x5D54, "Spindle impending failure too many block reassigns"},
-	{0x5D55, "Spindle impending failure access times too high"},
-	{0x5D56, "Spindle impending failure start unit times too high"},
-	{0x5D57, "Spindle impending failure channel parametrics"},
-	{0x5D58, "Spindle impending failure controller detected"},
-	{0x5D59, "Spindle impending failure throughput performance"},
-	{0x5D5A, "Spindle impending failure seek time performance"},
-	{0x5D5B, "Spindle impending failure spin-up retry count"},
-	{0x5D5C, "Spindle impending failure drive calibration retry count"},
-	{0x5D60, "Firmware impending failure general hard drive failure"},
-	{0x5D61, "Firmware impending failure drive error rate too high"},
-	{0x5D62, "Firmware impending failure data error rate too high"},
-	{0x5D63, "Firmware impending failure seek error rate too high"},
-	{0x5D64, "Firmware impending failure too many block reassigns"},
-	{0x5D65, "Firmware impending failure access times too high"},
-	{0x5D66, "Firmware impending failure start unit times too high"},
-	{0x5D67, "Firmware impending failure channel parametrics"},
-	{0x5D68, "Firmware impending failure controller detected"},
-	{0x5D69, "Firmware impending failure throughput performance"},
-	{0x5D6A, "Firmware impending failure seek time performance"},
-	{0x5D6B, "Firmware impending failure spin-up retry count"},
-	{0x5D6C, "Firmware impending failure drive calibration retry count"},
-	{0x5DFF, "Failure prediction threshold exceeded (false)"},
-
-	{0x5E00, "Low power condition on"},
-	{0x5E01, "Idle condition activated by timer"},
-	{0x5E02, "Standby condition activated by timer"},
-	{0x5E03, "Idle condition activated by command"},
-	{0x5E04, "Standby condition activated by command"},
-	{0x5E05, "Idle_b condition activated by timer"},
-	{0x5E06, "Idle_b condition activated by command"},
-	{0x5E07, "Idle_c condition activated by timer"},
-	{0x5E08, "Idle_c condition activated by command"},
-	{0x5E09, "Standby_y condition activated by timer"},
-	{0x5E0A, "Standby_y condition activated by command"},
-	{0x5E41, "Power state change to active"},
-	{0x5E42, "Power state change to idle"},
-	{0x5E43, "Power state change to standby"},
-	{0x5E45, "Power state change to sleep"},
-	{0x5E47, "Power state change to device control"},
-
-	{0x6000, "Lamp failure"},
-
-	{0x6100, "Video acquisition error"},
-	{0x6101, "Unable to acquire video"},
-	{0x6102, "Out of focus"},
-
-	{0x6200, "Scan head positioning error"},
-
-	{0x6300, "End of user area encountered on this track"},
-	{0x6301, "Packet does not fit in available space"},
-
-	{0x6400, "Illegal mode for this track"},
-	{0x6401, "Invalid packet size"},
-
-	{0x6500, "Voltage fault"},
-
-	{0x6600, "Automatic document feeder cover up"},
-	{0x6601, "Automatic document feeder lift up"},
-	{0x6602, "Document jam in automatic document feeder"},
-	{0x6603, "Document miss feed automatic in document feeder"},
-
-	{0x6700, "Configuration failure"},
-	{0x6701, "Configuration of incapable logical units failed"},
-	{0x6702, "Add logical unit failed"},
-	{0x6703, "Modification of logical unit failed"},
-	{0x6704, "Exchange of logical unit failed"},
-	{0x6705, "Remove of logical unit failed"},
-	{0x6706, "Attachment of logical unit failed"},
-	{0x6707, "Creation of logical unit failed"},
-	{0x6708, "Assign failure occurred"},
-	{0x6709, "Multiply assigned logical unit"},
-	{0x670A, "Set target port groups command failed"},
-	{0x670B, "ATA device feature not enabled"},
-
-	{0x6800, "Logical unit not configured"},
-	{0x6801, "Subsidiary logical unit not configured"},
-
-	{0x6900, "Data loss on logical unit"},
-	{0x6901, "Multiple logical unit failures"},
-	{0x6902, "Parity/data mismatch"},
-
-	{0x6A00, "Informational, refer to log"},
-
-	{0x6B00, "State change has occurred"},
-	{0x6B01, "Redundancy level got better"},
-	{0x6B02, "Redundancy level got worse"},
-
-	{0x6C00, "Rebuild failure occurred"},
-
-	{0x6D00, "Recalculate failure occurred"},
-
-	{0x6E00, "Command to logical unit failed"},
-
-	{0x6F00, "Copy protection key exchange failure - authentication "
-	 "failure"},
-	{0x6F01, "Copy protection key exchange failure - key not present"},
-	{0x6F02, "Copy protection key exchange failure - key not established"},
-	{0x6F03, "Read of scrambled sector without authentication"},
-	{0x6F04, "Media region code is mismatched to logical unit region"},
-	{0x6F05, "Drive region must be permanent/region reset count error"},
-	{0x6F06, "Insufficient block count for binding nonce recording"},
-	{0x6F07, "Conflict in binding nonce recording"},
-/*
- *	{0x70NN, "Decompression exception short algorithm id of nn"},
- */
-	{0x7100, "Decompression exception long algorithm id"},
-
-	{0x7200, "Session fixation error"},
-	{0x7201, "Session fixation error writing lead-in"},
-	{0x7202, "Session fixation error writing lead-out"},
-	{0x7203, "Session fixation error - incomplete track in session"},
-	{0x7204, "Empty or partially written reserved track"},
-	{0x7205, "No more track reservations allowed"},
-	{0x7206, "RMZ extension is not allowed"},
-	{0x7207, "No more test zone extensions are allowed"},
-
-	{0x7300, "Cd control error"},
-	{0x7301, "Power calibration area almost full"},
-	{0x7302, "Power calibration area is full"},
-	{0x7303, "Power calibration area error"},
-	{0x7304, "Program memory area update failure"},
-	{0x7305, "Program memory area is full"},
-	{0x7306, "RMA/PMA is almost full"},
-	{0x7310, "Current power calibration area almost full"},
-	{0x7311, "Current power calibration area is full"},
-	{0x7317, "RDZ is full"},
-
-	{0x7400, "Security error"},
-	{0x7401, "Unable to decrypt data"},
-	{0x7402, "Unencrypted data encountered while decrypting"},
-	{0x7403, "Incorrect data encryption key"},
-	{0x7404, "Cryptographic integrity validation failed"},
-	{0x7405, "Error decrypting data"},
-	{0x7406, "Unknown signature verification key"},
-	{0x7407, "Encryption parameters not useable"},
-	{0x7408, "Digital signature validation failure"},
-	{0x7409, "Encryption mode mismatch on read"},
-	{0x740A, "Encrypted block not raw read enabled"},
-	{0x740B, "Incorrect Encryption parameters"},
-	{0x740C, "Unable to decrypt parameter list"},
-	{0x740D, "Encryption algorithm disabled"},
-	{0x7410, "SA creation parameter value invalid"},
-	{0x7411, "SA creation parameter value rejected"},
-	{0x7412, "Invalid SA usage"},
-	{0x7421, "Data Encryption configuration prevented"},
-	{0x7430, "SA creation parameter not supported"},
-	{0x7440, "Authentication failed"},
-	{0x7461, "External data encryption key manager access error"},
-	{0x7462, "External data encryption key manager error"},
-	{0x7463, "External data encryption key not found"},
-	{0x7464, "External data encryption request not authorized"},
-	{0x746E, "External data encryption control timeout"},
-	{0x746F, "External data encryption control error"},
-	{0x7471, "Logical unit access not authorized"},
-	{0x7479, "Security conflict in translated device"},
-
-	{0, NULL}
+#define SENSE_CODE(c, s) {c, sizeof(s)},
+#include "sense_codes.h"
+#undef SENSE_CODE
 };
 
+static const char *additional_text =
+#define SENSE_CODE(c, s) s "\0"
+#include "sense_codes.h"
+#undef SENSE_CODE
+	;
+
 struct error_info2 {
 	unsigned char code1, code2_min, code2_max;
 	const char * str;
@@ -1197,11 +377,14 @@
 {
 	int i;
 	unsigned short code = ((asc << 8) | ascq);
+	unsigned offset = 0;
 
 	*fmt = NULL;
-	for (i = 0; additional[i].text; i++)
+	for (i = 0; i < ARRAY_SIZE(additional); i++) {
 		if (additional[i].code12 == code)
-			return additional[i].text;
+			return additional_text + offset;
+		offset += additional[i].size;
+	}
 	for (i = 0; additional2[i].fmt; i++) {
 		if (additional2[i].code1 == asc &&
 		    ascq >= additional2[i].code2_min &&
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index f3bb7af..ead83a2 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -688,6 +688,7 @@
 {
 	struct flowi6 fl;
 
+	memset(&fl, 0, sizeof(fl));
 	if (saddr)
 		memcpy(&fl.saddr, saddr, sizeof(struct in6_addr));
 	if (daddr)
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 35968bd..8fb9643 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -289,7 +289,7 @@
 		atomic64_set(&afu->room, room);
 		if (room)
 			goto write_rrin;
-		udelay(nretry);
+		udelay(1 << nretry);
 	} while (nretry++ < MC_ROOM_RETRY_CNT);
 
 	pr_err("%s: no cmd_room to send reset\n", __func__);
@@ -303,7 +303,7 @@
 		if (rrin != 0x1)
 			break;
 		/* Double delay each time */
-		udelay(2 << nretry);
+		udelay(1 << nretry);
 	} while (nretry++ < MC_ROOM_RETRY_CNT);
 }
 
@@ -338,7 +338,7 @@
 			atomic64_set(&afu->room, room);
 			if (room)
 				goto write_ioarrin;
-			udelay(nretry);
+			udelay(1 << nretry);
 		} while (nretry++ < MC_ROOM_RETRY_CNT);
 
 		dev_err(dev, "%s: no cmd_room to send 0x%X\n",
@@ -352,7 +352,7 @@
 		 * afu->room.
 		 */
 		if (nretry++ < MC_ROOM_RETRY_CNT) {
-			udelay(nretry);
+			udelay(1 << nretry);
 			goto retry;
 		}
 
@@ -683,28 +683,23 @@
 }
 
 /**
- * term_mc() - terminates the master context
+ * term_intr() - disables all AFU interrupts
  * @cfg:	Internal structure associated with the host.
  * @level:	Depth of allocation, where to begin waterfall tear down.
  *
  * Safe to call with AFU/MC in partially allocated/initialized state.
  */
-static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
+static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level)
 {
-	int rc = 0;
 	struct afu *afu = cfg->afu;
 	struct device *dev = &cfg->dev->dev;
 
 	if (!afu || !cfg->mcctx) {
-		dev_err(dev, "%s: returning from term_mc with NULL afu or MC\n",
-		       __func__);
+		dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
 		return;
 	}
 
 	switch (level) {
-	case UNDO_START:
-		rc = cxl_stop_context(cfg->mcctx);
-		BUG_ON(rc);
 	case UNMAP_THREE:
 		cxl_unmap_afu_irq(cfg->mcctx, 3, afu);
 	case UNMAP_TWO:
@@ -713,12 +708,37 @@
 		cxl_unmap_afu_irq(cfg->mcctx, 1, afu);
 	case FREE_IRQ:
 		cxl_free_afu_irqs(cfg->mcctx);
-	case RELEASE_CONTEXT:
-		cfg->mcctx = NULL;
+		/* fall through */
+	case UNDO_NOOP:
+		/* No action required */
+		break;
 	}
 }
 
 /**
+ * term_mc() - terminates the master context
+ * @cfg:	Internal structure associated with the host.
+ * @level:	Depth of allocation, where to begin waterfall tear down.
+ *
+ * Safe to call with AFU/MC in partially allocated/initialized state.
+ */
+static void term_mc(struct cxlflash_cfg *cfg)
+{
+	int rc = 0;
+	struct afu *afu = cfg->afu;
+	struct device *dev = &cfg->dev->dev;
+
+	if (!afu || !cfg->mcctx) {
+		dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
+		return;
+	}
+
+	rc = cxl_stop_context(cfg->mcctx);
+	WARN_ON(rc);
+	cfg->mcctx = NULL;
+}
+
+/**
  * term_afu() - terminates the AFU
  * @cfg:	Internal structure associated with the host.
  *
@@ -726,10 +746,20 @@
  */
 static void term_afu(struct cxlflash_cfg *cfg)
 {
+	/*
+	 * Tear down is carefully orchestrated to ensure
+	 * no interrupts can come in when the problem state
+	 * area is unmapped.
+	 *
+	 * 1) Disable all AFU interrupts
+	 * 2) Unmap the problem state area
+	 * 3) Stop the master context
+	 */
+	term_intr(cfg, UNMAP_THREE);
 	if (cfg->afu)
 		stop_afu(cfg);
 
-	term_mc(cfg, UNDO_START);
+	term_mc(cfg);
 
 	pr_debug("%s: returning\n", __func__);
 }
@@ -1597,41 +1627,24 @@
 }
 
 /**
- * init_mc() - create and register as the master context
+ * init_intr() - setup interrupt handlers for the master context
  * @cfg:	Internal structure associated with the host.
  *
  * Return: 0 on success, -errno on failure
  */
-static int init_mc(struct cxlflash_cfg *cfg)
+static enum undo_level init_intr(struct cxlflash_cfg *cfg,
+				 struct cxl_context *ctx)
 {
-	struct cxl_context *ctx;
-	struct device *dev = &cfg->dev->dev;
 	struct afu *afu = cfg->afu;
+	struct device *dev = &cfg->dev->dev;
 	int rc = 0;
-	enum undo_level level;
-
-	ctx = cxl_get_context(cfg->dev);
-	if (unlikely(!ctx))
-		return -ENOMEM;
-	cfg->mcctx = ctx;
-
-	/* Set it up as a master with the CXL */
-	cxl_set_master(ctx);
-
-	/* During initialization reset the AFU to start from a clean slate */
-	rc = cxl_afu_reset(cfg->mcctx);
-	if (unlikely(rc)) {
-		dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
-			__func__, rc);
-		level = RELEASE_CONTEXT;
-		goto out;
-	}
+	enum undo_level level = UNDO_NOOP;
 
 	rc = cxl_allocate_afu_irqs(ctx, 3);
 	if (unlikely(rc)) {
 		dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n",
 			__func__, rc);
-		level = RELEASE_CONTEXT;
+		level = UNDO_NOOP;
 		goto out;
 	}
 
@@ -1661,8 +1674,47 @@
 		level = UNMAP_TWO;
 		goto out;
 	}
+out:
+	return level;
+}
 
-	rc = 0;
+/**
+ * init_mc() - create and register as the master context
+ * @cfg:	Internal structure associated with the host.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int init_mc(struct cxlflash_cfg *cfg)
+{
+	struct cxl_context *ctx;
+	struct device *dev = &cfg->dev->dev;
+	int rc = 0;
+	enum undo_level level;
+
+	ctx = cxl_get_context(cfg->dev);
+	if (unlikely(!ctx)) {
+		rc = -ENOMEM;
+		goto ret;
+	}
+	cfg->mcctx = ctx;
+
+	/* Set it up as a master with the CXL */
+	cxl_set_master(ctx);
+
+	/* During initialization reset the AFU to start from a clean slate */
+	rc = cxl_afu_reset(cfg->mcctx);
+	if (unlikely(rc)) {
+		dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
+			__func__, rc);
+		goto ret;
+	}
+
+	level = init_intr(cfg, ctx);
+	if (unlikely(level)) {
+		dev_err(dev, "%s: setting up interrupts failed rc=%d\n",
+			__func__, rc);
+		goto out;
+	}
 
 	/* This performs the equivalent of the CXL_IOCTL_START_WORK.
 	 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
@@ -1678,7 +1730,7 @@
 	pr_debug("%s: returning rc=%d\n", __func__, rc);
 	return rc;
 out:
-	term_mc(cfg, level);
+	term_intr(cfg, level);
 	goto ret;
 }
 
@@ -1751,7 +1803,8 @@
 err2:
 	kref_put(&afu->mapcount, afu_unmap);
 err1:
-	term_mc(cfg, UNDO_START);
+	term_intr(cfg, UNMAP_THREE);
+	term_mc(cfg);
 	goto out;
 }
 
@@ -2488,8 +2541,7 @@
 		if (unlikely(rc))
 			dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
 				__func__, rc);
-		stop_afu(cfg);
-		term_mc(cfg, UNDO_START);
+		term_afu(cfg);
 		return PCI_ERS_RESULT_NEED_RESET;
 	case pci_channel_io_perm_failure:
 		cfg->state = STATE_FAILTERM;
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
index 0faed42..eb9d8f7 100644
--- a/drivers/scsi/cxlflash/main.h
+++ b/drivers/scsi/cxlflash/main.h
@@ -79,12 +79,11 @@
 #define WWPN_BUF_LEN	(WWPN_LEN + 1)
 
 enum undo_level {
-	RELEASE_CONTEXT = 0,
+	UNDO_NOOP = 0,
 	FREE_IRQ,
 	UNMAP_ONE,
 	UNMAP_TWO,
-	UNMAP_THREE,
-	UNDO_START
+	UNMAP_THREE
 };
 
 struct dev_dependent_vals {
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index d8a5cb3..ce15070 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -1615,6 +1615,13 @@
  * place at the same time and the failure was due to CXL services being
  * unable to keep up.
  *
+ * As this routine is called on ioctl context, it holds the ioctl r/w
+ * semaphore that is used to drain ioctls in recovery scenarios. The
+ * implementation to achieve the pacing described above (a local mutex)
+ * requires that the ioctl r/w semaphore be dropped and reacquired to
+ * avoid a 3-way deadlock when multiple process recoveries operate in
+ * parallel.
+ *
  * Because a user can detect an error condition before the kernel, it is
  * quite possible for this routine to act as the kernel's EEH detection
  * source (MMIO read of mbox_r). Because of this, there is a window of
@@ -1642,9 +1649,17 @@
 	int rc = 0;
 
 	atomic_inc(&cfg->recovery_threads);
+	up_read(&cfg->ioctl_rwsem);
 	rc = mutex_lock_interruptible(mutex);
+	down_read(&cfg->ioctl_rwsem);
 	if (rc)
 		goto out;
+	rc = check_state(cfg);
+	if (rc) {
+		dev_err(dev, "%s: Failed state! rc=%d\n", __func__, rc);
+		rc = -ENODEV;
+		goto out;
+	}
 
 	dev_dbg(dev, "%s: reason 0x%016llX rctxid=%016llX\n",
 		__func__, recover->reason, rctxid);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index a404a41..752b5c9 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -190,15 +190,18 @@
 				      ALUA_FAILOVER_RETRIES, NULL, req_flags);
 }
 
-struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size,
-					 int group_id)
+static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size,
+						int group_id)
 {
 	struct alua_port_group *pg;
 
+	if (!id_str || !id_size || !strlen(id_str))
+		return NULL;
+
 	list_for_each_entry(pg, &port_group_list, node) {
 		if (pg->group_id != group_id)
 			continue;
-		if (pg->device_id_len != id_size)
+		if (!pg->device_id_len || pg->device_id_len != id_size)
 			continue;
 		if (strncmp(pg->device_id_str, id_str, id_size))
 			continue;
@@ -219,8 +222,8 @@
  * Allocate a new port_group structure for a given
  * device.
  */
-struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev,
-				      int group_id, int tpgs)
+static struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev,
+					     int group_id, int tpgs)
 {
 	struct alua_port_group *pg, *tmp_pg;
 
@@ -232,14 +235,14 @@
 					    sizeof(pg->device_id_str));
 	if (pg->device_id_len <= 0) {
 		/*
-		 * Internal error: TPGS supported but no device
-		 * identifcation found. Disable ALUA support.
+		 * TPGS supported but no device identification found.
+		 * Generate private device identification.
 		 */
-		kfree(pg);
 		sdev_printk(KERN_INFO, sdev,
 			    "%s: No device descriptors found\n",
 			    ALUA_DH_NAME);
-		return ERR_PTR(-ENXIO);
+		pg->device_id_str[0] = '\0';
+		pg->device_id_len = 0;
 	}
 	pg->group_id = group_id;
 	pg->tpgs = tpgs;
@@ -354,9 +357,15 @@
 			return SCSI_DH_NOMEM;
 		return SCSI_DH_DEV_UNSUPP;
 	}
-	sdev_printk(KERN_INFO, sdev,
-		    "%s: device %s port group %x rel port %x\n",
-		    ALUA_DH_NAME, pg->device_id_str, group_id, rel_port);
+	if (pg->device_id_len)
+		sdev_printk(KERN_INFO, sdev,
+			    "%s: device %s port group %x rel port %x\n",
+			    ALUA_DH_NAME, pg->device_id_str,
+			    group_id, rel_port);
+	else
+		sdev_printk(KERN_INFO, sdev,
+			    "%s: port group %x rel port %x\n",
+			    ALUA_DH_NAME, group_id, rel_port);
 
 	/* Check for existing port group references */
 	spin_lock(&h->pg_lock);
@@ -532,6 +541,7 @@
 		return SCSI_DH_DEV_TEMP_BUSY;
 
  retry:
+	err = 0;
 	retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags);
 
 	if (retval) {
@@ -1112,9 +1122,9 @@
 	h->sdev = NULL;
 	spin_unlock(&h->pg_lock);
 	if (pg) {
-		spin_lock(&pg->lock);
+		spin_lock_irq(&pg->lock);
 		list_del_rcu(&h->node);
-		spin_unlock(&pg->lock);
+		spin_unlock_irq(&pg->lock);
 		kref_put(&pg->kref, release_port_group);
 	}
 	sdev->handler_data = NULL;
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
index 6c14e68..9b5a457 100644
--- a/drivers/scsi/dmx3191d.c
+++ b/drivers/scsi/dmx3191d.c
@@ -34,11 +34,14 @@
  * Definitions for the generic 5380 driver.
  */
 
-#define DONT_USE_INTR
-
 #define NCR5380_read(reg)		inb(instance->io_port + reg)
 #define NCR5380_write(reg, value)	outb(value, instance->io_port + reg)
 
+#define NCR5380_dma_xfer_len(instance, cmd, phase)	(0)
+#define NCR5380_dma_recv_setup(instance, dst, len)	(0)
+#define NCR5380_dma_send_setup(instance, src, len)	(0)
+#define NCR5380_dma_residual(instance)			(0)
+
 #define NCR5380_implementation_fields	/* none */
 
 #include "NCR5380.h"
@@ -62,7 +65,6 @@
 	.cmd_per_lun		= 2,
 	.use_clustering		= DISABLE_CLUSTERING,
 	.cmd_size		= NCR5380_CMD_SIZE,
-	.max_sectors		= 128,
 };
 
 static int dmx3191d_probe_one(struct pci_dev *pdev,
@@ -93,7 +95,7 @@
 	 */
 	shost->irq = NO_IRQ;
 
-	error = NCR5380_init(shost, FLAG_NO_PSEUDO_DMA);
+	error = NCR5380_init(shost, 0);
 	if (error)
 		goto out_host_put;
 
diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c
index 6c736b0..459863f 100644
--- a/drivers/scsi/dtc.c
+++ b/drivers/scsi/dtc.c
@@ -1,6 +1,3 @@
-#define PSEUDO_DMA
-#define DONT_USE_INTR
-
 /*
  * DTC 3180/3280 driver, by
  *	Ray Van Tassle	rayvt@comm.mot.com
@@ -54,7 +51,6 @@
 #include <scsi/scsi_host.h>
 
 #include "dtc.h"
-#define AUTOPROBE_IRQ
 #include "NCR5380.h"
 
 /*
@@ -229,7 +225,7 @@
 		instance->base = addr;
 		((struct NCR5380_hostdata *)(instance)->hostdata)->base = base;
 
-		if (NCR5380_init(instance, FLAG_NO_DMA_FIXUP))
+		if (NCR5380_init(instance, FLAG_LATE_DMA_SETUP))
 			goto out_unregister;
 
 		NCR5380_maybe_reset_bus(instance);
@@ -244,9 +240,10 @@
 		if (instance->irq == 255)
 			instance->irq = NO_IRQ;
 
-#ifndef DONT_USE_INTR
 		/* With interrupts enabled, it will sometimes hang when doing heavy
 		 * reads. So better not enable them until I finger it out. */
+		instance->irq = NO_IRQ;
+
 		if (instance->irq != NO_IRQ)
 			if (request_irq(instance->irq, dtc_intr, 0,
 					"dtc", instance)) {
@@ -258,11 +255,7 @@
 			printk(KERN_WARNING "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
 			printk(KERN_WARNING "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
 		}
-#else
-		if (instance->irq != NO_IRQ)
-			printk(KERN_WARNING "scsi%d : interrupts not used. Might as well not jumper it.\n", instance->host_no);
-		instance->irq = NO_IRQ;
-#endif
+
 		dprintk(NDEBUG_INIT, "scsi%d : irq = %d\n",
 		        instance->host_no, instance->irq);
 
@@ -323,7 +316,8 @@
  * 	timeout.
 */
 
-static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len)
+static inline int dtc_pread(struct Scsi_Host *instance,
+                            unsigned char *dst, int len)
 {
 	unsigned char *d = dst;
 	int i;			/* For counting time spent in the poll-loop */
@@ -352,8 +346,6 @@
 	while (!(NCR5380_read(DTC_CONTROL_REG) & D_CR_ACCESS))
 		++i;
 	rtrc(0);
-	if (i > hostdata->spin_max_r)
-		hostdata->spin_max_r = i;
 	return (0);
 }
 
@@ -370,7 +362,8 @@
  * 	timeout.
 */
 
-static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, int len)
+static inline int dtc_pwrite(struct Scsi_Host *instance,
+                             unsigned char *src, int len)
 {
 	int i;
 	struct NCR5380_hostdata *hostdata = shost_priv(instance);
@@ -400,8 +393,6 @@
 	rtrc(7);
 	/* Check for parity error here. fixme. */
 	rtrc(0);
-	if (i > hostdata->spin_max_w)
-		hostdata->spin_max_w = i;
 	return (0);
 }
 
@@ -440,8 +431,6 @@
 	.detect			= dtc_detect,
 	.release		= dtc_release,
 	.proc_name		= "dtc3x80",
-	.show_info		= dtc_show_info,
-	.write_info		= dtc_write_info,
 	.info			= dtc_info,
 	.queuecommand		= dtc_queue_command,
 	.eh_abort_handler	= dtc_abort,
diff --git a/drivers/scsi/dtc.h b/drivers/scsi/dtc.h
index 56732cb..fcb0a8e 100644
--- a/drivers/scsi/dtc.h
+++ b/drivers/scsi/dtc.h
@@ -21,14 +21,17 @@
 
 #define NCR5380_dma_xfer_len(instance, cmd, phase) \
         dtc_dma_xfer_len(cmd)
+#define NCR5380_dma_recv_setup		dtc_pread
+#define NCR5380_dma_send_setup		dtc_pwrite
+#define NCR5380_dma_residual(instance)	(0)
 
 #define NCR5380_intr			dtc_intr
 #define NCR5380_queue_command		dtc_queue_command
 #define NCR5380_abort			dtc_abort
 #define NCR5380_bus_reset		dtc_bus_reset
 #define NCR5380_info			dtc_info
-#define NCR5380_show_info		dtc_show_info 
-#define NCR5380_write_info		dtc_write_info 
+
+#define NCR5380_io_delay(x)		udelay(x)
 
 /* 15 12 11 10
    1001 1100 0000 0000 */
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index ca8003f..4299fa4 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -729,6 +729,7 @@
 		break;
 	case 0x24:
 		SD(sh)->EATA_revision = 'z';
+		break;
 	default:
 		SD(sh)->EATA_revision = '?';
 	}
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 33581ba..2aca4d1 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -246,7 +246,7 @@
 	.eh_target_reset_handler	= esas2r_target_reset,
 	.can_queue			= 128,
 	.this_id			= -1,
-	.sg_tablesize			= SCSI_MAX_SG_SEGMENTS,
+	.sg_tablesize			= SG_CHUNK_SIZE,
 	.cmd_per_lun			=
 		ESAS2R_DEFAULT_CMD_PER_LUN,
 	.present			= 0,
@@ -271,7 +271,7 @@
 MODULE_PARM_DESC(num_sg_lists,
 		 "Number of scatter/gather lists.  Default 1024.");
 
-int sg_tablesize = SCSI_MAX_SG_SEGMENTS;
+int sg_tablesize = SG_CHUNK_SIZE;
 module_param(sg_tablesize, int, 0);
 MODULE_PARM_DESC(sg_tablesize,
 		 "Maximum number of entries in a scatter/gather table.");
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index ce129e5..9ddc920 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -39,7 +39,7 @@
 
 #define DRV_NAME		"fnic"
 #define DRV_DESCRIPTION		"Cisco FCoE HBA Driver"
-#define DRV_VERSION		"1.6.0.17a"
+#define DRV_VERSION		"1.6.0.21"
 #define PFX			DRV_NAME ": "
 #define DFX                     DRV_NAME "%d: "
 
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index f3032ca..d9fd2f8 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -439,7 +439,6 @@
 	int sg_count = 0;
 	unsigned long flags = 0;
 	unsigned long ptr;
-	struct fc_rport_priv *rdata;
 	spinlock_t *io_lock = NULL;
 	int io_lock_acquired = 0;
 
@@ -455,14 +454,17 @@
 		return 0;
 	}
 
-	rdata = lp->tt.rport_lookup(lp, rport->port_id);
-	if (!rdata || (rdata->rp_state == RPORT_ST_DELETE)) {
-		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
-			"returning IO as rport is removed\n");
-		atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
-		sc->result = DID_NO_CONNECT;
-		done(sc);
-		return 0;
+	if (rport) {
+		struct fc_rport_libfc_priv *rp = rport->dd_data;
+
+		if (!rp || rp->rp_state != RPORT_ST_READY) {
+			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+				"returning DID_NO_CONNECT for IO as rport is removed\n");
+			atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
+			sc->result = DID_NO_CONNECT<<16;
+			done(sc);
+			return 0;
+		}
 	}
 
 	if (lp->state != LPORT_ST_READY || !(lp->link_up))
@@ -1091,6 +1093,11 @@
 				atomic64_inc(
 					&term_stats->terminate_fw_timeouts);
 			break;
+		case FCPIO_ITMF_REJECTED:
+			FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+				"abort reject recd. id %d\n",
+				(int)(id & FNIC_TAG_MASK));
+			break;
 		case FCPIO_IO_NOT_FOUND:
 			if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
 				atomic64_inc(&abts_stats->abort_io_not_found);
@@ -1111,9 +1118,15 @@
 			spin_unlock_irqrestore(io_lock, flags);
 			return;
 		}
-		CMD_ABTS_STATUS(sc) = hdr_status;
+
 		CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
 
+		/* If the status is IO not found consider it as success */
+		if (hdr_status == FCPIO_IO_NOT_FOUND)
+			CMD_ABTS_STATUS(sc) = FCPIO_SUCCESS;
+		else
+			CMD_ABTS_STATUS(sc) = hdr_status;
+
 		atomic64_dec(&fnic_stats->io_stats.active_ios);
 		if (atomic64_read(&fnic->io_cmpl_skip))
 			atomic64_dec(&fnic->io_cmpl_skip);
@@ -1926,21 +1939,31 @@
 
 	CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
 
+	start_time = io_req->start_time;
 	/*
 	 * firmware completed the abort, check the status,
-	 * free the io_req irrespective of failure or success
+	 * free the io_req if successful. If abort fails,
+	 * Device reset will clean the I/O.
 	 */
-	if (CMD_ABTS_STATUS(sc) != FCPIO_SUCCESS)
+	if (CMD_ABTS_STATUS(sc) == FCPIO_SUCCESS)
+		CMD_SP(sc) = NULL;
+	else {
 		ret = FAILED;
-
-	CMD_SP(sc) = NULL;
+		spin_unlock_irqrestore(io_lock, flags);
+		goto fnic_abort_cmd_end;
+	}
 
 	spin_unlock_irqrestore(io_lock, flags);
 
-	start_time = io_req->start_time;
 	fnic_release_ioreq_buf(fnic, io_req, sc);
 	mempool_free(io_req, fnic->io_req_pool);
 
+	if (sc->scsi_done) {
+	/* Call SCSI completion function to complete the IO */
+		sc->result = (DID_ABORT << 16);
+		sc->scsi_done(sc);
+	}
+
 fnic_abort_cmd_end:
 	FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
 		  sc->request->tag, sc,
@@ -2018,7 +2041,9 @@
  * successfully aborted, 1 otherwise
  */
 static int fnic_clean_pending_aborts(struct fnic *fnic,
-				     struct scsi_cmnd *lr_sc)
+				     struct scsi_cmnd *lr_sc,
+					 bool new_sc)
+
 {
 	int tag, abt_tag;
 	struct fnic_io_req *io_req;
@@ -2036,10 +2061,10 @@
 		spin_lock_irqsave(io_lock, flags);
 		sc = scsi_host_find_tag(fnic->lport->host, tag);
 		/*
-		 * ignore this lun reset cmd or cmds that do not belong to
-		 * this lun
+		 * ignore this lun reset cmd if issued using new SC
+		 * or cmds that do not belong to this lun
 		 */
-		if (!sc || sc == lr_sc || sc->device != lun_dev) {
+		if (!sc || ((sc == lr_sc) && new_sc) || sc->device != lun_dev) {
 			spin_unlock_irqrestore(io_lock, flags);
 			continue;
 		}
@@ -2145,11 +2170,27 @@
 			goto clean_pending_aborts_end;
 		}
 		CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
-		CMD_SP(sc) = NULL;
+
+		/* original sc used for lr is handled by dev reset code */
+		if (sc != lr_sc)
+			CMD_SP(sc) = NULL;
 		spin_unlock_irqrestore(io_lock, flags);
 
-		fnic_release_ioreq_buf(fnic, io_req, sc);
-		mempool_free(io_req, fnic->io_req_pool);
+		/* original sc used for lr is handled by dev reset code */
+		if (sc != lr_sc) {
+			fnic_release_ioreq_buf(fnic, io_req, sc);
+			mempool_free(io_req, fnic->io_req_pool);
+		}
+
+		/*
+		 * Any IO is returned during reset, it needs to call scsi_done
+		 * to return the scsi_cmnd to upper layer.
+		 */
+		if (sc->scsi_done) {
+			/* Set result to let upper SCSI layer retry */
+			sc->result = DID_RESET << 16;
+			sc->scsi_done(sc);
+		}
 	}
 
 	schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
@@ -2243,6 +2284,7 @@
 	int tag = 0;
 	DECLARE_COMPLETION_ONSTACK(tm_done);
 	int tag_gen_flag = 0;   /*to track tags allocated by fnic driver*/
+	bool new_sc = 0;
 
 	/* Wait for rport to unblock */
 	fc_block_scsi_eh(sc);
@@ -2288,13 +2330,12 @@
 		 * fix the way the EH ioctls work for real, but until
 		 * that happens we fail these explicit requests here.
 		 */
-		if (shost_use_blk_mq(sc->device->host))
-			goto fnic_device_reset_end;
 
 		tag = fnic_scsi_host_start_tag(fnic, sc);
 		if (unlikely(tag == SCSI_NO_TAG))
 			goto fnic_device_reset_end;
 		tag_gen_flag = 1;
+		new_sc = 1;
 	}
 	io_lock = fnic_io_lock_hash(fnic, sc);
 	spin_lock_irqsave(io_lock, flags);
@@ -2429,7 +2470,7 @@
 	 * the lun reset cmd. If all cmds get cleaned, the lun reset
 	 * succeeds
 	 */
-	if (fnic_clean_pending_aborts(fnic, sc)) {
+	if (fnic_clean_pending_aborts(fnic, sc, new_sc)) {
 		spin_lock_irqsave(io_lock, flags);
 		io_req = (struct fnic_io_req *)CMD_SP(sc);
 		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 90091e6..516bd6c 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -18,50 +18,10 @@
  *
  * Added ISAPNP support for DTC436 adapters,
  * Thomas Sailer, sailer@ife.ee.ethz.ch
- */
-
-/* 
- * TODO : flesh out DMA support, find some one actually using this (I have
- * 	a memory mapped Trantor board that works fine)
- */
-
-/*
- * The card is detected and initialized in one of several ways : 
- * 1.  With command line overrides - NCR5380=port,irq may be 
- *     used on the LILO command line to override the defaults.
  *
- * 2.  With the GENERIC_NCR5380_OVERRIDE compile time define.  This is 
- *     specified as an array of address, irq, dma, board tuples.  Ie, for
- *     one board at 0x350, IRQ5, no dma, I could say  
- *     -DGENERIC_NCR5380_OVERRIDE={{0xcc000, 5, DMA_NONE, BOARD_NCR5380}}
- * 
- * -1 should be specified for no or DMA interrupt, -2 to autoprobe for an 
- * 	IRQ line if overridden on the command line.
- *
- * 3.  When included as a module, with arguments passed on the command line:
- *         ncr_irq=xx	the interrupt
- *         ncr_addr=xx  the port or base address (for port or memory
- *              	mapped, resp.)
- *         ncr_dma=xx	the DMA
- *         ncr_5380=1	to set up for a NCR5380 board
- *         ncr_53c400=1	to set up for a NCR53C400 board
- *     e.g.
- *     modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_5380=1
- *       for a port mapped NCR5380 board or
- *     modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1
- *       for a memory mapped NCR53C400 board with interrupts disabled.
- * 
- * 255 should be specified for no or DMA interrupt, 254 to autoprobe for an 
- * 	IRQ line if overridden on the command line.
- *     
+ * See Documentation/scsi/g_NCR5380.txt for more info.
  */
 
-#define AUTOPROBE_IRQ
-
-#ifdef CONFIG_SCSI_GENERIC_NCR53C400
-#define PSEUDO_DMA
-#endif
-
 #include <asm/io.h>
 #include <linux/blkdev.h>
 #include <linux/module.h>
@@ -270,7 +230,7 @@
 #ifndef SCSI_G_NCR5380_MEM
 	int i;
 	int port_idx = -1;
-	unsigned long region_size = 16;
+	unsigned long region_size;
 #endif
 	static unsigned int __initdata ncr_53c400a_ports[] = {
 		0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0
@@ -290,6 +250,7 @@
 #ifdef SCSI_G_NCR5380_MEM
 	unsigned long base;
 	void __iomem *iomem;
+	resource_size_t iomem_size;
 #endif
 
 	if (ncr_irq)
@@ -350,25 +311,17 @@
 		flags = 0;
 		switch (overrides[current_override].board) {
 		case BOARD_NCR5380:
-			flags = FLAG_NO_PSEUDO_DMA;
-			break;
-		case BOARD_NCR53C400:
-#ifdef PSEUDO_DMA
-			flags = FLAG_NO_DMA_FIXUP;
-#endif
+			flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP;
 			break;
 		case BOARD_NCR53C400A:
-			flags = FLAG_NO_DMA_FIXUP;
 			ports = ncr_53c400a_ports;
 			magic = ncr_53c400a_magic;
 			break;
 		case BOARD_HP_C2502:
-			flags = FLAG_NO_DMA_FIXUP;
 			ports = ncr_53c400a_ports;
 			magic = hp_c2502_magic;
 			break;
 		case BOARD_DTC3181E:
-			flags = FLAG_NO_DMA_FIXUP;
 			ports = dtc_3181e_ports;
 			magic = ncr_53c400a_magic;
 			break;
@@ -381,20 +334,22 @@
 			/* Disable the adapter and look for a free io port */
 			magic_configure(-1, 0, magic);
 
+			region_size = 16;
+
 			if (overrides[current_override].NCR5380_map_name != PORT_AUTO)
 				for (i = 0; ports[i]; i++) {
-					if (!request_region(ports[i],  16, "ncr53c80"))
+					if (!request_region(ports[i], region_size, "ncr53c80"))
 						continue;
 					if (overrides[current_override].NCR5380_map_name == ports[i])
 						break;
-					release_region(ports[i], 16);
+					release_region(ports[i], region_size);
 			} else
 				for (i = 0; ports[i]; i++) {
-					if (!request_region(ports[i],  16, "ncr53c80"))
+					if (!request_region(ports[i], region_size, "ncr53c80"))
 						continue;
 					if (inb(ports[i]) == 0xff)
 						break;
-					release_region(ports[i], 16);
+					release_region(ports[i], region_size);
 				}
 			if (ports[i]) {
 				/* At this point we have our region reserved */
@@ -410,17 +365,19 @@
 		else
 		{
 			/* Not a 53C400A style setup - just grab */
-			if(!(request_region(overrides[current_override].NCR5380_map_name, NCR5380_region_size, "ncr5380")))
+			region_size = 8;
+			if (!request_region(overrides[current_override].NCR5380_map_name,
+			                    region_size, "ncr5380"))
 				continue;
-			region_size = NCR5380_region_size;
 		}
 #else
 		base = overrides[current_override].NCR5380_map_name;
-		if (!request_mem_region(base, NCR5380_region_size, "ncr5380"))
+		iomem_size = NCR53C400_region_size;
+		if (!request_mem_region(base, iomem_size, "ncr5380"))
 			continue;
-		iomem = ioremap(base, NCR5380_region_size);
+		iomem = ioremap(base, iomem_size);
 		if (!iomem) {
-			release_mem_region(base, NCR5380_region_size);
+			release_mem_region(base, iomem_size);
 			continue;
 		}
 #endif
@@ -458,6 +415,7 @@
 #else
 		instance->base = overrides[current_override].NCR5380_map_name;
 		hostdata->iomem = iomem;
+		hostdata->iomem_size = iomem_size;
 		switch (overrides[current_override].board) {
 		case BOARD_NCR53C400:
 			hostdata->c400_ctl_status = 0x100;
@@ -472,7 +430,7 @@
 		}
 #endif
 
-		if (NCR5380_init(instance, flags))
+		if (NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP))
 			goto out_unregister;
 
 		switch (overrides[current_override].board) {
@@ -524,7 +482,7 @@
 	release_region(overrides[current_override].NCR5380_map_name, region_size);
 #else
 	iounmap(iomem);
-	release_mem_region(base, NCR5380_region_size);
+	release_mem_region(base, iomem_size);
 #endif
 	return count;
 }
@@ -546,45 +504,18 @@
 #ifndef SCSI_G_NCR5380_MEM
 	release_region(instance->io_port, instance->n_io_port);
 #else
-	iounmap(((struct NCR5380_hostdata *)instance->hostdata)->iomem);
-	release_mem_region(instance->base, NCR5380_region_size);
+	{
+		struct NCR5380_hostdata *hostdata = shost_priv(instance);
+
+		iounmap(hostdata->iomem);
+		release_mem_region(instance->base, hostdata->iomem_size);
+	}
 #endif
 	return 0;
 }
 
-#ifdef BIOSPARAM
 /**
- *	generic_NCR5380_biosparam
- *	@disk: disk to compute geometry for
- *	@dev: device identifier for this disk
- *	@ip: sizes to fill in
- *
- *	Generates a BIOS / DOS compatible H-C-S mapping for the specified 
- *	device / size.
- * 
- * 	XXX Most SCSI boards use this mapping, I could be incorrect.  Someone
- *	using hard disks on a trantor should verify that this mapping
- *	corresponds to that used by the BIOS / ASPI driver by running the linux
- *	fdisk program and matching the H_C_S coordinates to what DOS uses.
- *
- *	Locks: none
- */
-
-static int
-generic_NCR5380_biosparam(struct scsi_device *sdev, struct block_device *bdev,
-			  sector_t capacity, int *ip)
-{
-	ip[0] = 64;
-	ip[1] = 32;
-	ip[2] = capacity >> 11;
-	return 0;
-}
-#endif
-
-#ifdef PSEUDO_DMA
-
-/**
- *	NCR5380_pread		-	pseudo DMA read
+ *	generic_NCR5380_pread - pseudo DMA read
  *	@instance: adapter to read from
  *	@dst: buffer to read into
  *	@len: buffer length
@@ -593,7 +524,8 @@
  *	controller
  */
  
-static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len)
+static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
+                                        unsigned char *dst, int len)
 {
 	struct NCR5380_hostdata *hostdata = shost_priv(instance);
 	int blocks = len / 128;
@@ -661,7 +593,7 @@
 }
 
 /**
- *	NCR5380_write		-	pseudo DMA write
+ *	generic_NCR5380_pwrite - pseudo DMA write
  *	@instance: adapter to read from
  *	@dst: buffer to read into
  *	@len: buffer length
@@ -670,7 +602,8 @@
  *	controller
  */
 
-static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, int len)
+static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance,
+                                         unsigned char *src, int len)
 {
 	struct NCR5380_hostdata *hostdata = shost_priv(instance);
 	int blocks = len / 128;
@@ -738,10 +671,15 @@
 	return 0;
 }
 
-static int generic_NCR5380_dma_xfer_len(struct scsi_cmnd *cmd)
+static int generic_NCR5380_dma_xfer_len(struct Scsi_Host *instance,
+                                        struct scsi_cmnd *cmd)
 {
+	struct NCR5380_hostdata *hostdata = shost_priv(instance);
 	int transfersize = cmd->transfersize;
 
+	if (hostdata->flags & FLAG_NO_PSEUDO_DMA)
+		return 0;
+
 	/* Limit transfers to 32K, for xx400 & xx406
 	 * pseudoDMA that transfers in 128 bytes blocks.
 	 */
@@ -756,8 +694,6 @@
 	return transfersize;
 }
 
-#endif /* PSEUDO_DMA */
-
 /*
  *	Include the NCR5380 core code that we build our driver around	
  */
@@ -773,7 +709,6 @@
 	.queuecommand		= generic_NCR5380_queue_command,
 	.eh_abort_handler	= generic_NCR5380_abort,
 	.eh_bus_reset_handler	= generic_NCR5380_bus_reset,
-	.bios_param		= NCR5380_BIOSPARAM,
 	.can_queue		= 16,
 	.this_id		= 7,
 	.sg_tablesize		= SG_ALL,
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
index 6f3d2ac..5951774 100644
--- a/drivers/scsi/g_NCR5380.h
+++ b/drivers/scsi/g_NCR5380.h
@@ -14,13 +14,6 @@
 #ifndef GENERIC_NCR5380_H
 #define GENERIC_NCR5380_H
 
-#ifdef CONFIG_SCSI_GENERIC_NCR53C400
-#define BIOSPARAM
-#define NCR5380_BIOSPARAM generic_NCR5380_biosparam
-#else
-#define NCR5380_BIOSPARAM NULL
-#endif
-
 #define __STRVAL(x) #x
 #define STRVAL(x) __STRVAL(x)
 
@@ -30,12 +23,6 @@
 #define NCR5380_map_type int
 #define NCR5380_map_name port
 
-#ifdef CONFIG_SCSI_GENERIC_NCR53C400
-#define NCR5380_region_size 16
-#else
-#define NCR5380_region_size 8
-#endif
-
 #define NCR5380_read(reg) \
 	inb(instance->io_port + (reg))
 #define NCR5380_write(reg, value) \
@@ -55,7 +42,7 @@
 #define NCR5380_map_name base
 #define NCR53C400_mem_base 0x3880
 #define NCR53C400_host_buffer 0x3900
-#define NCR5380_region_size 0x3a00
+#define NCR53C400_region_size 0x3a00
 
 #define NCR5380_read(reg) \
 	readb(((struct NCR5380_hostdata *)shost_priv(instance))->iomem + \
@@ -66,6 +53,7 @@
 
 #define NCR5380_implementation_fields \
 	void __iomem *iomem; \
+	resource_size_t iomem_size; \
 	int c400_ctl_status; \
 	int c400_blk_cnt; \
 	int c400_host_buf;
@@ -73,16 +61,18 @@
 #endif
 
 #define NCR5380_dma_xfer_len(instance, cmd, phase) \
-        generic_NCR5380_dma_xfer_len(cmd)
+        generic_NCR5380_dma_xfer_len(instance, cmd)
+#define NCR5380_dma_recv_setup		generic_NCR5380_pread
+#define NCR5380_dma_send_setup		generic_NCR5380_pwrite
+#define NCR5380_dma_residual(instance)	(0)
 
 #define NCR5380_intr generic_NCR5380_intr
 #define NCR5380_queue_command generic_NCR5380_queue_command
 #define NCR5380_abort generic_NCR5380_abort
 #define NCR5380_bus_reset generic_NCR5380_bus_reset
-#define NCR5380_pread generic_NCR5380_pread
-#define NCR5380_pwrite generic_NCR5380_pwrite
 #define NCR5380_info generic_NCR5380_info
-#define NCR5380_show_info generic_NCR5380_show_info
+
+#define NCR5380_io_delay(x)		udelay(x)
 
 #define BOARD_NCR5380	0
 #define BOARD_NCR53C400	1
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 29e89f3..d7cab72 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -23,7 +23,7 @@
 #include <scsi/sas_ata.h>
 #include <scsi/libsas.h>
 
-#define DRV_VERSION "v1.3"
+#define DRV_VERSION "v1.4"
 
 #define HISI_SAS_MAX_PHYS	9
 #define HISI_SAS_MAX_QUEUES	32
@@ -133,6 +133,9 @@
 	int (*hw_init)(struct hisi_hba *hisi_hba);
 	void (*setup_itct)(struct hisi_hba *hisi_hba,
 			   struct hisi_sas_device *device);
+	int (*slot_index_alloc)(struct hisi_hba *hisi_hba, int *slot_idx,
+				struct domain_device *device);
+	struct hisi_sas_device *(*alloc_dev)(struct domain_device *device);
 	void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no);
 	int (*get_free_slot)(struct hisi_hba *hisi_hba, int *q, int *s);
 	void (*start_delivery)(struct hisi_hba *hisi_hba);
@@ -298,7 +301,7 @@
 	u8	atapi_cdb[ATAPI_CDB_LEN];
 };
 
-#define HISI_SAS_SGE_PAGE_CNT SCSI_MAX_SG_SEGMENTS
+#define HISI_SAS_SGE_PAGE_CNT SG_CHUNK_SIZE
 struct hisi_sas_sge_page {
 	struct hisi_sas_sge sge[HISI_SAS_SGE_PAGE_CNT];
 };
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 097ab4f..18dd5ea 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -227,7 +227,11 @@
 	} else
 		n_elem = task->num_scatter;
 
-	rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
+	if (hisi_hba->hw->slot_index_alloc)
+		rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
+						    device);
+	else
+		rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
 	if (rc)
 		goto err_out;
 	rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue,
@@ -417,7 +421,10 @@
 	struct hisi_sas_device *sas_dev;
 	struct device *dev = &hisi_hba->pdev->dev;
 
-	sas_dev = hisi_sas_alloc_dev(device);
+	if (hisi_hba->hw->alloc_dev)
+		sas_dev = hisi_hba->hw->alloc_dev(device);
+	else
+		sas_dev = hisi_sas_alloc_dev(device);
 	if (!sas_dev) {
 		dev_err(dev, "fail alloc dev: max support %d devices\n",
 			HISI_SAS_MAX_DEVICES);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index b733747..bbe98ec 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -465,6 +465,62 @@
 	return readl(regs);
 }
 
+/* This function needs to be protected from pre-emption. */
+static int
+slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
+		       struct domain_device *device)
+{
+	unsigned int index = 0;
+	void *bitmap = hisi_hba->slot_index_tags;
+	int sata_dev = dev_is_sata(device);
+
+	while (1) {
+		index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
+					   index);
+		if (index >= hisi_hba->slot_index_count)
+			return -SAS_QUEUE_FULL;
+		/*
+		 * SAS IPTT bit0 should be 1
+		 */
+		if (sata_dev || (index & 1))
+			break;
+		index++;
+	}
+
+	set_bit(index, bitmap);
+	*slot_idx = index;
+	return 0;
+}
+
+static struct
+hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device)
+{
+	struct hisi_hba *hisi_hba = device->port->ha->lldd_ha;
+	struct hisi_sas_device *sas_dev = NULL;
+	int i, sata_dev = dev_is_sata(device);
+
+	spin_lock(&hisi_hba->lock);
+	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
+		/*
+		 * SATA device id bit0 should be 0
+		 */
+		if (sata_dev && (i & 1))
+			continue;
+		if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
+			hisi_hba->devices[i].device_id = i;
+			sas_dev = &hisi_hba->devices[i];
+			sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
+			sas_dev->dev_type = device->dev_type;
+			sas_dev->hisi_hba = hisi_hba;
+			sas_dev->sas_device = device;
+			break;
+		}
+	}
+	spin_unlock(&hisi_hba->lock);
+
+	return sas_dev;
+}
+
 static void config_phy_opt_mode_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
 {
 	u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
@@ -544,7 +600,7 @@
 	}
 
 	qw0 |= ((1 << ITCT_HDR_VALID_OFF) |
-		(device->max_linkrate << ITCT_HDR_MCR_OFF) |
+		(device->linkrate << ITCT_HDR_MCR_OFF) |
 		(1 << ITCT_HDR_VLN_OFF) |
 		(port->id << ITCT_HDR_PORT_ID_OFF));
 	itct->qw0 = cpu_to_le64(qw0);
@@ -554,10 +610,11 @@
 	itct->sas_addr = __swab64(itct->sas_addr);
 
 	/* qw2 */
-	itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_INLT_OFF) |
-				(0xff00ULL << ITCT_HDR_BITLT_OFF) |
-				(0xff00ULL << ITCT_HDR_MCTLT_OFF) |
-				(0xff00ULL << ITCT_HDR_RTOLT_OFF));
+	if (!dev_is_sata(device))
+		itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_INLT_OFF) |
+					(0x1ULL << ITCT_HDR_BITLT_OFF) |
+					(0x32ULL << ITCT_HDR_MCTLT_OFF) |
+					(0x1ULL << ITCT_HDR_RTOLT_OFF));
 }
 
 static void free_device_v2_hw(struct hisi_hba *hisi_hba,
@@ -715,7 +772,7 @@
 	hisi_sas_write32(hisi_hba, HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL, 0x7FF);
 	hisi_sas_write32(hisi_hba, OPENA_WT_CONTI_TIME, 0x1);
 	hisi_sas_write32(hisi_hba, I_T_NEXUS_LOSS_TIME, 0x1F4);
-	hisi_sas_write32(hisi_hba, MAX_CON_TIME_LIMIT_TIME, 0x4E20);
+	hisi_sas_write32(hisi_hba, MAX_CON_TIME_LIMIT_TIME, 0x32);
 	hisi_sas_write32(hisi_hba, BUS_INACTIVE_LIMIT_TIME, 0x1);
 	hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1);
 	hisi_sas_write32(hisi_hba, HGC_ERR_STAT_EN, 0x1);
@@ -1993,22 +2050,23 @@
 	u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate;
 	irqreturn_t res = IRQ_HANDLED;
 	u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
-	int phy_no;
+	int phy_no, offset;
 
 	phy_no = sas_phy->id;
 	initial_fis = &hisi_hba->initial_fis[phy_no];
 	fis = &initial_fis->fis;
 
-	ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK1);
-	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk | 1 << phy_no);
+	offset = 4 * (phy_no / 4);
+	ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK1 + offset);
+	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset,
+			 ent_msk | 1 << ((phy_no % 4) * 8));
 
-	ent_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC1);
-	ent_tmp = ent_int;
+	ent_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC1 + offset);
+	ent_tmp = ent_int & (1 << (ENT_INT_SRC1_D2H_FIS_CH1_OFF *
+			     (phy_no % 4)));
 	ent_int >>= ENT_INT_SRC1_D2H_FIS_CH1_OFF * (phy_no % 4);
 	if ((ent_int & ENT_INT_SRC1_D2H_FIS_CH0_MSK) == 0) {
 		dev_warn(dev, "sata int: phy%d did not receive FIS\n", phy_no);
-		hisi_sas_write32(hisi_hba, ENT_INT_SRC1, ent_tmp);
-		hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk);
 		res = IRQ_NONE;
 		goto end;
 	}
@@ -2056,8 +2114,8 @@
 	queue_work(hisi_hba->wq, &phy->phyup_ws);
 
 end:
-	hisi_sas_write32(hisi_hba, ENT_INT_SRC1, ent_tmp);
-	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk);
+	hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp);
+	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, ent_msk);
 
 	return res;
 }
@@ -2165,6 +2223,8 @@
 static const struct hisi_sas_hw hisi_sas_v2_hw = {
 	.hw_init = hisi_sas_v2_init,
 	.setup_itct = setup_itct_v2_hw,
+	.slot_index_alloc = slot_index_alloc_quirk_v2_hw,
+	.alloc_dev = alloc_dev_quirk_v2_hw,
 	.sl_notify = sl_notify_v2_hw,
 	.get_wideport_bitmap = get_wideport_bitmap_v2_hw,
 	.free_device = free_device_v2_hw,
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 5be944c..ff8dcd5 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -60,7 +60,7 @@
  * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
  * with an optional trailing '-' followed by a byte value (0-255).
  */
-#define HPSA_DRIVER_VERSION "3.4.14-0"
+#define HPSA_DRIVER_VERSION "3.4.16-0"
 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
 #define HPSA "hpsa"
 
@@ -294,6 +294,9 @@
 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
 	struct ReportExtendedLUNdata *buf, int bufsize);
 static int hpsa_luns_changed(struct ctlr_info *h);
+static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
+			       struct hpsa_scsi_dev_t *dev,
+			       unsigned char *scsi3addr);
 
 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
 {
@@ -728,6 +731,29 @@
 			sn[12], sn[13], sn[14], sn[15]);
 }
 
+static ssize_t sas_address_show(struct device *dev,
+	      struct device_attribute *attr, char *buf)
+{
+	struct ctlr_info *h;
+	struct scsi_device *sdev;
+	struct hpsa_scsi_dev_t *hdev;
+	unsigned long flags;
+	u64 sas_address;
+
+	sdev = to_scsi_device(dev);
+	h = sdev_to_hba(sdev);
+	spin_lock_irqsave(&h->lock, flags);
+	hdev = sdev->hostdata;
+	if (!hdev || is_logical_device(hdev) || !hdev->expose_device) {
+		spin_unlock_irqrestore(&h->lock, flags);
+		return -ENODEV;
+	}
+	sas_address = hdev->sas_address;
+	spin_unlock_irqrestore(&h->lock, flags);
+
+	return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address);
+}
+
 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
 	     struct device_attribute *attr, char *buf)
 {
@@ -840,6 +866,7 @@
 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
+static DEVICE_ATTR(sas_address, S_IRUGO, sas_address_show, NULL);
 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
 			host_show_hp_ssd_smart_path_enabled, NULL);
 static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL);
@@ -865,6 +892,7 @@
 	&dev_attr_unique_id,
 	&dev_attr_hp_ssd_smart_path_enabled,
 	&dev_attr_path_info,
+	&dev_attr_sas_address,
 	NULL,
 };
 
@@ -1637,9 +1665,8 @@
 		for (j = 0; j < ndevices; j++) {
 			if (dev[j] == NULL)
 				continue;
-			if (dev[j]->devtype != TYPE_DISK)
-				continue;
-			if (dev[j]->devtype != TYPE_ZBC)
+			if (dev[j]->devtype != TYPE_DISK &&
+			    dev[j]->devtype != TYPE_ZBC)
 				continue;
 			if (is_logical_device(dev[j]))
 				continue;
@@ -1684,9 +1711,8 @@
 	for (i = 0; i < ndevices; i++) {
 		if (dev[i] == NULL)
 			continue;
-		if (dev[i]->devtype != TYPE_DISK)
-			continue;
-		if (dev[i]->devtype != TYPE_ZBC)
+		if (dev[i]->devtype != TYPE_DISK &&
+		    dev[i]->devtype != TYPE_ZBC)
 			continue;
 		if (!is_logical_device(dev[i]))
 			continue;
@@ -1720,6 +1746,51 @@
 	return rc;
 }
 
+static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h,
+						struct hpsa_scsi_dev_t *dev)
+{
+	int i;
+	int count = 0;
+
+	for (i = 0; i < h->nr_cmds; i++) {
+		struct CommandList *c = h->cmd_pool + i;
+		int refcount = atomic_inc_return(&c->refcount);
+
+		if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev,
+				dev->scsi3addr)) {
+			unsigned long flags;
+
+			spin_lock_irqsave(&h->lock, flags);	/* Implied MB */
+			if (!hpsa_is_cmd_idle(c))
+				++count;
+			spin_unlock_irqrestore(&h->lock, flags);
+		}
+
+		cmd_free(h, c);
+	}
+
+	return count;
+}
+
+static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
+						struct hpsa_scsi_dev_t *device)
+{
+	int cmds = 0;
+	int waits = 0;
+
+	while (1) {
+		cmds = hpsa_find_outstanding_commands_for_dev(h, device);
+		if (cmds == 0)
+			break;
+		if (++waits > 20)
+			break;
+		dev_warn(&h->pdev->dev,
+			"%s: removing device with %d outstanding commands!\n",
+			__func__, cmds);
+		msleep(1000);
+	}
+}
+
 static void hpsa_remove_device(struct ctlr_info *h,
 			struct hpsa_scsi_dev_t *device)
 {
@@ -1743,8 +1814,13 @@
 			hpsa_show_dev_msg(KERN_WARNING, h, device,
 					"didn't find device for removal.");
 		}
-	} else /* HBA */
+	} else { /* HBA */
+
+		device->removed = 1;
+		hpsa_wait_for_outstanding_commands_for_dev(h, device);
+
 		hpsa_remove_sas_device(device);
+	}
 }
 
 static void adjust_hpsa_scsi_table(struct ctlr_info *h,
@@ -2146,7 +2222,8 @@
 static int handle_ioaccel_mode2_error(struct ctlr_info *h,
 					struct CommandList *c,
 					struct scsi_cmnd *cmd,
-					struct io_accel2_cmd *c2)
+					struct io_accel2_cmd *c2,
+					struct hpsa_scsi_dev_t *dev)
 {
 	int data_len;
 	int retry = 0;
@@ -2210,8 +2287,27 @@
 		case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
 		case IOACCEL2_STATUS_SR_INVALID_DEVICE:
 		case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
-			/* We will get an event from ctlr to trigger rescan */
-			retry = 1;
+			/*
+			 * Did an HBA disk disappear? We will eventually
+			 * get a state change event from the controller but
+			 * in the meantime, we need to tell the OS that the
+			 * HBA disk is no longer there and stop I/O
+			 * from going down. This allows the potential re-insert
+			 * of the disk to get the same device node.
+			 */
+			if (dev->physical_device && dev->expose_device) {
+				cmd->result = DID_NO_CONNECT << 16;
+				dev->removed = 1;
+				h->drv_req_rescan = 1;
+				dev_warn(&h->pdev->dev,
+					"%s: device is gone!\n", __func__);
+			} else
+				/*
+				 * Retry by sending down the RAID path.
+				 * We will get an event from ctlr to
+				 * trigger rescan regardless.
+				 */
+				retry = 1;
 			break;
 		default:
 			retry = 1;
@@ -2335,13 +2431,15 @@
 		c2->error_data.serv_response ==
 			IOACCEL2_SERV_RESPONSE_FAILURE) {
 		if (c2->error_data.status ==
-			IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
+			IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
 			dev->offload_enabled = 0;
+			dev->offload_to_be_enabled = 0;
+		}
 
 		return hpsa_retry_cmd(h, c);
 	}
 
-	if (handle_ioaccel_mode2_error(h, c, cmd, c2))
+	if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev))
 		return hpsa_retry_cmd(h, c);
 
 	return hpsa_cmd_free_and_done(h, c, cmd);
@@ -2806,7 +2904,7 @@
 		goto out;
 	}
 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-					PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+					PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
 	if (rc)
 		goto out;
 	ei = c->err_info;
@@ -2832,7 +2930,7 @@
 	/* fill_cmd can't fail here, no data buffer to map. */
 	(void) fill_cmd(c, reset_type, h, NULL, 0, 0,
 			scsi3addr, TYPE_MSG);
-	rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
+	rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
 	if (rc) {
 		dev_warn(&h->pdev->dev, "Failed to send reset command\n");
 		goto out;
@@ -3080,7 +3178,7 @@
 		return -1;
 	}
 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-					PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+					PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
 	if (rc)
 		goto out;
 	ei = c->err_info;
@@ -3123,7 +3221,7 @@
 	c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
 
 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-				PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+				PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
 	if (rc)
 		goto out;
 	ei = c->err_info;
@@ -3151,7 +3249,7 @@
 		goto out;
 
 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-		PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+		PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
 	if (rc)
 		goto out;
 	ei = c->err_info;
@@ -3182,7 +3280,7 @@
 	c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
 
 	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
-						NO_TIMEOUT);
+						DEFAULT_TIMEOUT);
 	ei = c->err_info;
 	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
 		hpsa_scsi_interpret_error(h, c);
@@ -3250,7 +3348,7 @@
 		c->Request.CDB[5] = 0;
 
 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
-						NO_TIMEOUT);
+						DEFAULT_TIMEOUT);
 	if (rc)
 		goto out;
 
@@ -3462,7 +3560,7 @@
 	if (extended_response)
 		c->Request.CDB[1] = extended_response;
 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-					PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+					PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
 	if (rc)
 		goto out;
 	ei = c->err_info;
@@ -3569,7 +3667,8 @@
 	c = cmd_alloc(h);
 
 	(void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
-	rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
+	rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
+					DEFAULT_TIMEOUT);
 	if (rc) {
 		cmd_free(h, c);
 		return 0;
@@ -3644,7 +3743,8 @@
 	c = cmd_alloc(h);
 
 	(void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG);
-	(void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
+	(void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
+					DEFAULT_TIMEOUT);
 	/* no unmap needed here because no data xfer. */
 	ei = c->err_info;
 	switch (ei->CommandStatus) {
@@ -5234,6 +5334,12 @@
 
 	dev = cmd->device->hostdata;
 	if (!dev) {
+		cmd->result = NOT_READY << 16; /* host byte */
+		cmd->scsi_done(cmd);
+		return 0;
+	}
+
+	if (dev->removed) {
 		cmd->result = DID_NO_CONNECT << 16;
 		cmd->scsi_done(cmd);
 		return 0;
@@ -5414,7 +5520,7 @@
 	/* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
 	(void) fill_cmd(c, TEST_UNIT_READY, h,
 			NULL, 0, 0, lunaddr, TYPE_CMD);
-	rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
+	rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
 	if (rc)
 		return rc;
 	/* no unmap needed here because no data xfer. */
@@ -5638,7 +5744,7 @@
 		0, 0, scsi3addr, TYPE_MSG);
 	if (h->needs_abort_tags_swizzled)
 		swizzle_abort_tag(&c->Request.CDB[4]);
-	(void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
+	(void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
 	hpsa_get_tag(h, abort, &taglower, &tagupper);
 	dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
 		__func__, tagupper, taglower);
@@ -5803,7 +5909,7 @@
 	c = cmd_alloc(h);
 	setup_ioaccel2_abort_cmd(c, h, abort, reply_queue);
 	c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
-	(void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
+	(void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
 	hpsa_get_tag(h, abort, &taglower, &tagupper);
 	dev_dbg(&h->pdev->dev,
 		"%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n",
@@ -6348,7 +6454,8 @@
 		c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
 		c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
 	}
-	rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
+	rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
+					DEFAULT_TIMEOUT);
 	if (iocommand.buf_size > 0)
 		hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
 	check_ioctl_unit_attention(h, c);
@@ -6480,7 +6587,8 @@
 		}
 		c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
 	}
-	status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
+	status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
+						DEFAULT_TIMEOUT);
 	if (sg_used)
 		hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
 	check_ioctl_unit_attention(h, c);
@@ -8254,8 +8362,10 @@
 			event_type = "configuration change";
 		/* Stop sending new RAID offload reqs via the IO accelerator */
 		scsi_block_requests(h->scsi_host);
-		for (i = 0; i < h->ndevices; i++)
+		for (i = 0; i < h->ndevices; i++) {
 			h->dev[i]->offload_enabled = 0;
+			h->dev[i]->offload_to_be_enabled = 0;
+		}
 		hpsa_drain_accel_commands(h);
 		/* Set 'accelerator path config change' bit */
 		dev_warn(&h->pdev->dev,
@@ -8541,11 +8651,6 @@
 	if (rc)
 		goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
 
-	/* hook into SCSI subsystem */
-	rc = hpsa_scsi_add_host(h);
-	if (rc)
-		goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
-
 	/* create the resubmit workqueue */
 	h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
 	if (!h->rescan_ctlr_wq) {
@@ -8642,6 +8747,11 @@
 		dev_info(&h->pdev->dev,
 			"Can't track change to report lun data\n");
 
+	/* hook into SCSI subsystem */
+	rc = hpsa_scsi_add_host(h);
+	if (rc)
+		goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
+
 	/* Monitor the controller for firmware lockups */
 	h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
 	INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
@@ -8703,7 +8813,7 @@
 		goto out;
 	}
 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-					PCI_DMA_TODEVICE, NO_TIMEOUT);
+					PCI_DMA_TODEVICE, DEFAULT_TIMEOUT);
 	if (rc)
 		goto out;
 	if (c->err_info->CommandStatus != 0)
@@ -8742,7 +8852,7 @@
 		goto errout;
 
 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-		PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+		PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
 	if ((rc != 0) || (c->err_info->CommandStatus != 0))
 		goto errout;
 
@@ -8754,7 +8864,7 @@
 		goto errout;
 
 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-		PCI_DMA_TODEVICE, NO_TIMEOUT);
+		PCI_DMA_TODEVICE, DEFAULT_TIMEOUT);
 	if ((rc != 0)  || (c->err_info->CommandStatus != 0))
 		goto errout;
 
@@ -8764,7 +8874,7 @@
 		goto errout;
 
 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-		PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+		PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
 	if ((rc != 0)  || (c->err_info->CommandStatus != 0))
 		goto errout;
 
@@ -9602,6 +9712,7 @@
 static int
 hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
 {
+	*identifier = 0;
 	return 0;
 }
 
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index d06bb74..a1487e6 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -63,6 +63,7 @@
 	unsigned char scsi3addr[8];	/* as presented to the HW */
 	u8 physical_device : 1;
 	u8 expose_device;
+	u8 removed : 1;			/* device is marked for death */
 #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
 	unsigned char device_id[16];    /* from inquiry pg. 0x83 */
 	u64 sas_address;
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index 13098b0..a4dd5c9 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -794,7 +794,7 @@
 		 * case stay in the stopped state.
 		 */
 		dev_err(sciport_to_dev(iport),
-			"%s: SCIC Port 0x%p failed to stop before tiemout.\n",
+			"%s: SCIC Port 0x%p failed to stop before timeout.\n",
 			__func__,
 			iport);
 	} else if (current_state == SCI_PORT_STOPPING) {
diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c
index 8f0ea97..d453667 100644
--- a/drivers/scsi/iscsi_boot_sysfs.c
+++ b/drivers/scsi/iscsi_boot_sysfs.c
@@ -306,6 +306,42 @@
 	.is_visible = iscsi_boot_ini_attr_is_visible,
 };
 
+/* iBFT ACPI Table attributes */
+iscsi_boot_rd_attr(acpitbl_signature, signature, ISCSI_BOOT_ACPITBL_SIGNATURE);
+iscsi_boot_rd_attr(acpitbl_oem_id, oem_id, ISCSI_BOOT_ACPITBL_OEM_ID);
+iscsi_boot_rd_attr(acpitbl_oem_table_id, oem_table_id,
+		   ISCSI_BOOT_ACPITBL_OEM_TABLE_ID);
+
+static struct attribute *acpitbl_attrs[] = {
+	&iscsi_boot_attr_acpitbl_signature.attr,
+	&iscsi_boot_attr_acpitbl_oem_id.attr,
+	&iscsi_boot_attr_acpitbl_oem_table_id.attr,
+	NULL
+};
+
+static umode_t iscsi_boot_acpitbl_attr_is_visible(struct kobject *kobj,
+					     struct attribute *attr, int i)
+{
+	struct iscsi_boot_kobj *boot_kobj =
+			container_of(kobj, struct iscsi_boot_kobj, kobj);
+
+	if (attr ==  &iscsi_boot_attr_acpitbl_signature.attr)
+		return boot_kobj->is_visible(boot_kobj->data,
+					     ISCSI_BOOT_ACPITBL_SIGNATURE);
+	if (attr ==  &iscsi_boot_attr_acpitbl_oem_id.attr)
+		return boot_kobj->is_visible(boot_kobj->data,
+					     ISCSI_BOOT_ACPITBL_OEM_ID);
+	if (attr ==  &iscsi_boot_attr_acpitbl_oem_table_id.attr)
+		return boot_kobj->is_visible(boot_kobj->data,
+					     ISCSI_BOOT_ACPITBL_OEM_TABLE_ID);
+	return 0;
+}
+
+static struct attribute_group iscsi_boot_acpitbl_attr_group = {
+	.attrs = acpitbl_attrs,
+	.is_visible = iscsi_boot_acpitbl_attr_is_visible,
+};
+
 static struct iscsi_boot_kobj *
 iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset,
 		       struct attribute_group *attr_group,
@@ -436,6 +472,32 @@
 EXPORT_SYMBOL_GPL(iscsi_boot_create_ethernet);
 
 /**
+ * iscsi_boot_create_acpitbl() - create boot acpi table sysfs dir
+ * @boot_kset: boot kset
+ * @index: not used
+ * @data: driver specific data
+ * @show: attr show function
+ * @is_visible: attr visibility function
+ * @release: release function
+ *
+ * Note: The boot sysfs lib will free the data passed in for the caller
+ * when all refs to the acpitbl kobject have been released.
+ */
+struct iscsi_boot_kobj *
+iscsi_boot_create_acpitbl(struct iscsi_boot_kset *boot_kset, int index,
+			   void *data,
+			   ssize_t (*show)(void *data, int type, char *buf),
+			   umode_t (*is_visible)(void *data, int type),
+			   void (*release)(void *data))
+{
+	return iscsi_boot_create_kobj(boot_kset,
+				      &iscsi_boot_acpitbl_attr_group,
+				      "acpi_header", index, data, show,
+				      is_visible, release);
+}
+EXPORT_SYMBOL_GPL(iscsi_boot_create_acpitbl);
+
+/**
  * iscsi_boot_create_kset() - creates root sysfs tree
  * @set_name: name of root dir
  */
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 6bffd91..c051694 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2127,7 +2127,7 @@
 	struct iscsi_conn *conn;
 	struct iscsi_task *task;
 	struct iscsi_tm *hdr;
-	int rc, age;
+	int age;
 
 	cls_session = starget_to_session(scsi_target(sc->device));
 	session = cls_session->dd_data;
@@ -2188,10 +2188,8 @@
 	hdr = &conn->tmhdr;
 	iscsi_prep_abort_task_pdu(task, hdr);
 
-	if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
-		rc = FAILED;
+	if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout))
 		goto failed;
-	}
 
 	switch (conn->tmf_state) {
 	case TMF_SUCCESS:
@@ -2423,7 +2421,7 @@
  *
  * This will attempt to send a warm target reset.
  */
-int iscsi_eh_target_reset(struct scsi_cmnd *sc)
+static int iscsi_eh_target_reset(struct scsi_cmnd *sc)
 {
 	struct iscsi_cls_session *cls_session;
 	struct iscsi_session *session;
@@ -2495,7 +2493,6 @@
 	mutex_unlock(&session->eh_mutex);
 	return rc;
 }
-EXPORT_SYMBOL_GPL(iscsi_eh_target_reset);
 
 /**
  * iscsi_eh_recover_target - reset target and possibly the session
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 90a3ca5..d5bd420 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2015 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -694,6 +694,7 @@
 	uint8_t  wwnn[8];
 	uint8_t  wwpn[8];
 	uint32_t RandomData[7];
+	uint32_t fcp_embed_io;
 
 	/* HBA Config Parameters */
 	uint32_t cfg_ack0;
@@ -757,7 +758,6 @@
 	uint32_t cfg_fdmi_on;
 #define LPFC_FDMI_NO_SUPPORT	0	/* FDMI not supported */
 #define LPFC_FDMI_SUPPORT	1	/* FDMI supported? */
-#define LPFC_FDMI_SMART_SAN	2	/* SmartSAN supported */
 	uint32_t cfg_enable_SmartSAN;
 	lpfc_vpd_t vpd;		/* vital product data */
 
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 343ae94..cfec2ec 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2015 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -4584,15 +4584,14 @@
 # lpfc_fdmi_on: Controls FDMI support.
 #       0       No FDMI support (default)
 #       1       Traditional FDMI support
-#       2       Smart SAN support
-# If lpfc_enable_SmartSAN is set 1, the driver sets lpfc_fdmi_on to value 2
-# overwriting the current value.  If lpfc_enable_SmartSAN is set 0, the
-# driver uses the current value of lpfc_fdmi_on provided it has value 0 or 1.
-# A value of 2 with lpfc_enable_SmartSAN set to 0 causes the driver to
-# set lpfc_fdmi_on back to 1.
-# Value range [0,2]. Default value is 0.
+# Traditional FDMI support means the driver will assume FDMI-2 support;
+# however, if that fails, it will fallback to FDMI-1.
+# If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on.
+# If lpfc_enable_SmartSAN is set 0, the driver uses the current value of
+# lpfc_fdmi_on.
+# Value range [0,1]. Default value is 0.
 */
-LPFC_ATTR_R(fdmi_on, 0, 0, 2, "Enable FDMI support");
+LPFC_ATTR_R(fdmi_on, 0, 0, 1, "Enable FDMI support");
 
 /*
 # Specifies the maximum number of ELS cmds we can have outstanding (for
@@ -5150,7 +5149,6 @@
 	sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
 }
 
-
 /*
  * Dynamic FC Host Attributes Support
  */
@@ -5857,14 +5855,6 @@
 	else
 		phba->cfg_poll = lpfc_poll;
 
-	/* Ensure fdmi_on and enable_SmartSAN don't conflict */
-	if (phba->cfg_enable_SmartSAN) {
-		phba->cfg_fdmi_on = LPFC_FDMI_SMART_SAN;
-	} else {
-		if (phba->cfg_fdmi_on == LPFC_FDMI_SMART_SAN)
-			phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
-	}
-
 	phba->cfg_soft_wwnn = 0L;
 	phba->cfg_soft_wwpn = 0L;
 	lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 79e261d..a38816e 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2015 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -2322,7 +2322,7 @@
 	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
 	memset(ae, 0, 256);
 
-	strncpy(ae->un.AttrString, "Smart SAN Version 1.0",
+	strncpy(ae->un.AttrString, "Smart SAN Version 2.0",
 		sizeof(ae->un.AttrString));
 	len = strnlen(ae->un.AttrString,
 			  sizeof(ae->un.AttrString));
@@ -2397,7 +2397,7 @@
 	uint32_t size;
 
 	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
-	ae->un.AttrInt =  cpu_to_be32(0);
+	ae->un.AttrInt =  cpu_to_be32(1);
 	size = FOURBYTES + sizeof(uint32_t);
 	ad->AttrLen = cpu_to_be16(size);
 	ad->AttrType = cpu_to_be16(RPRT_SMART_SECURITY);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 7f5abb8..0498f57 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2015 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -690,16 +690,17 @@
 	fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
 	if (fabric_param_changed) {
 		/* Reset FDMI attribute masks based on config parameter */
-		if (phba->cfg_fdmi_on == LPFC_FDMI_NO_SUPPORT) {
-			vport->fdmi_hba_mask = 0;
-			vport->fdmi_port_mask = 0;
-		} else {
+		if (phba->cfg_enable_SmartSAN ||
+		    (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
 			/* Setup appropriate attribute masks */
 			vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
-			if (phba->cfg_fdmi_on == LPFC_FDMI_SMART_SAN)
+			if (phba->cfg_enable_SmartSAN)
 				vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
 			else
 				vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
+		} else {
+			vport->fdmi_hba_mask = 0;
+			vport->fdmi_port_mask = 0;
 		}
 
 	}
@@ -1069,7 +1070,10 @@
 					lpfc_sli4_unreg_all_rpis(vport);
 				}
 			}
-			lpfc_issue_reg_vfi(vport);
+
+			/* Do not register VFI if the driver aborted FLOGI */
+			if (!lpfc_error_lost_link(irsp))
+				lpfc_issue_reg_vfi(vport);
 			lpfc_nlp_put(ndlp);
 			goto out;
 		}
@@ -4705,6 +4709,144 @@
 	desc->length = cpu_to_be32(sizeof(desc->info));
 }
 
+void
+lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat,
+		      struct lpfc_vport *vport)
+{
+	desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG);
+
+	desc->bbc_info.port_bbc = cpu_to_be32(
+				vport->fc_sparam.cmn.bbCreditMsb |
+				vport->fc_sparam.cmn.bbCreditlsb << 8);
+	if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP)
+		desc->bbc_info.attached_port_bbc = cpu_to_be32(
+				vport->phba->fc_fabparam.cmn.bbCreditMsb |
+				vport->phba->fc_fabparam.cmn.bbCreditlsb << 8);
+	else
+		desc->bbc_info.attached_port_bbc = 0;
+
+	desc->bbc_info.rtt = 0;
+	desc->length = cpu_to_be32(sizeof(desc->bbc_info));
+}
+
+void
+lpfc_rdp_res_oed_temp_desc(struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2)
+{
+	uint32_t flags;
+
+	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
+
+	desc->oed_info.hi_alarm =
+			cpu_to_be16(page_a2[SSF_TEMP_HIGH_ALARM]);
+	desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_TEMP_LOW_ALARM]);
+	desc->oed_info.hi_warning =
+			cpu_to_be16(page_a2[SSF_TEMP_HIGH_WARNING]);
+	desc->oed_info.lo_warning =
+			cpu_to_be16(page_a2[SSF_TEMP_LOW_WARNING]);
+	flags = 0xf; /* All four are valid */
+	flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT);
+	desc->oed_info.function_flags = cpu_to_be32(flags);
+	desc->length = cpu_to_be32(sizeof(desc->oed_info));
+}
+
+void
+lpfc_rdp_res_oed_voltage_desc(struct fc_rdp_oed_sfp_desc *desc,
+			      uint8_t *page_a2)
+{
+	uint32_t flags;
+
+	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
+
+	desc->oed_info.hi_alarm =
+			cpu_to_be16(page_a2[SSF_VOLTAGE_HIGH_ALARM]);
+	desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_VOLTAGE_LOW_ALARM]);
+	desc->oed_info.hi_warning =
+			cpu_to_be16(page_a2[SSF_VOLTAGE_HIGH_WARNING]);
+	desc->oed_info.lo_warning =
+			cpu_to_be16(page_a2[SSF_VOLTAGE_LOW_WARNING]);
+	flags = 0xf; /* All four are valid */
+	flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT);
+	desc->oed_info.function_flags = cpu_to_be32(flags);
+	desc->length = cpu_to_be32(sizeof(desc->oed_info));
+}
+
+void
+lpfc_rdp_res_oed_txbias_desc(struct fc_rdp_oed_sfp_desc *desc,
+			     uint8_t *page_a2)
+{
+	uint32_t flags;
+
+	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
+
+	desc->oed_info.hi_alarm =
+			cpu_to_be16(page_a2[SSF_BIAS_HIGH_ALARM]);
+	desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_BIAS_LOW_ALARM]);
+	desc->oed_info.hi_warning =
+			cpu_to_be16(page_a2[SSF_BIAS_HIGH_WARNING]);
+	desc->oed_info.lo_warning =
+			cpu_to_be16(page_a2[SSF_BIAS_LOW_WARNING]);
+	flags = 0xf; /* All four are valid */
+	flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT);
+	desc->oed_info.function_flags = cpu_to_be32(flags);
+	desc->length = cpu_to_be32(sizeof(desc->oed_info));
+}
+
+void
+lpfc_rdp_res_oed_txpower_desc(struct fc_rdp_oed_sfp_desc *desc,
+			      uint8_t *page_a2)
+{
+	uint32_t flags;
+
+	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
+
+	desc->oed_info.hi_alarm =
+			cpu_to_be16(page_a2[SSF_TXPOWER_HIGH_ALARM]);
+	desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_TXPOWER_LOW_ALARM]);
+	desc->oed_info.hi_warning =
+			cpu_to_be16(page_a2[SSF_TXPOWER_HIGH_WARNING]);
+	desc->oed_info.lo_warning =
+			cpu_to_be16(page_a2[SSF_TXPOWER_LOW_WARNING]);
+	flags = 0xf; /* All four are valid */
+	flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT);
+	desc->oed_info.function_flags = cpu_to_be32(flags);
+	desc->length = cpu_to_be32(sizeof(desc->oed_info));
+}
+
+
+void
+lpfc_rdp_res_oed_rxpower_desc(struct fc_rdp_oed_sfp_desc *desc,
+			      uint8_t *page_a2)
+{
+	uint32_t flags;
+
+	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
+
+	desc->oed_info.hi_alarm =
+			cpu_to_be16(page_a2[SSF_RXPOWER_HIGH_ALARM]);
+	desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_RXPOWER_LOW_ALARM]);
+	desc->oed_info.hi_warning =
+			cpu_to_be16(page_a2[SSF_RXPOWER_HIGH_WARNING]);
+	desc->oed_info.lo_warning =
+			cpu_to_be16(page_a2[SSF_RXPOWER_LOW_WARNING]);
+	flags = 0xf; /* All four are valid */
+	flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT);
+	desc->oed_info.function_flags = cpu_to_be32(flags);
+	desc->length = cpu_to_be32(sizeof(desc->oed_info));
+}
+
+void
+lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc,
+		      uint8_t *page_a0, struct lpfc_vport *vport)
+{
+	desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG);
+	memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16);
+	memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16);
+	memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16);
+	memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 2);
+	memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8);
+	desc->length = cpu_to_be32(sizeof(desc->opd_info));
+}
+
 int
 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat)
 {
@@ -4776,6 +4918,8 @@
 
 	if (rdp_cap == 0)
 		rdp_cap = RDP_CAP_UNKNOWN;
+	if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO)
+		rdp_cap |= RDP_CAP_USER_CONFIGURED;
 
 	desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap);
 	desc->length = cpu_to_be32(sizeof(desc->info));
@@ -4875,6 +5019,19 @@
 	lpfc_rdp_res_diag_port_names(&rdp_res->diag_port_names_desc, phba);
 	lpfc_rdp_res_attach_port_names(&rdp_res->attached_port_names_desc,
 			vport, ndlp);
+	lpfc_rdp_res_bbc_desc(&rdp_res->bbc_desc, &rdp_context->link_stat,
+			      vport);
+	lpfc_rdp_res_oed_temp_desc(&rdp_res->oed_temp_desc,
+				   rdp_context->page_a2);
+	lpfc_rdp_res_oed_voltage_desc(&rdp_res->oed_voltage_desc,
+				      rdp_context->page_a2);
+	lpfc_rdp_res_oed_txbias_desc(&rdp_res->oed_txbias_desc,
+				     rdp_context->page_a2);
+	lpfc_rdp_res_oed_txpower_desc(&rdp_res->oed_txpower_desc,
+				      rdp_context->page_a2);
+	lpfc_rdp_res_oed_rxpower_desc(&rdp_res->oed_rxpower_desc,
+				      rdp_context->page_a2);
+	lpfc_rdp_res_opd_desc(&rdp_res->opd_desc, rdp_context->page_a0, vport);
 	fec_size = lpfc_rdp_res_fec_desc(&rdp_res->fec_desc,
 			&rdp_context->link_stat);
 	rdp_res->length = cpu_to_be32(fec_size + RDP_DESC_PAYLOAD_SIZE);
@@ -7849,8 +8006,9 @@
 		return;
 	}
 
-	if ((phba->cfg_fdmi_on > LPFC_FDMI_NO_SUPPORT) &&
-	    (vport->load_flag & FC_ALLOW_FDMI))
+	if ((phba->cfg_enable_SmartSAN ||
+	     (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) &&
+	     (vport->load_flag & FC_ALLOW_FDMI))
 		lpfc_start_fdmi(vport);
 }
 
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 25b5dcd..ed22393 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2015 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -4545,7 +4545,8 @@
 				    (!(vport->load_flag & FC_UNLOADING)) &&
 				    (bf_get(lpfc_sli_intf_if_type,
 				     &phba->sli4_hba.sli_intf) ==
-				      LPFC_SLI_INTF_IF_TYPE_2)) {
+				      LPFC_SLI_INTF_IF_TYPE_2) &&
+				    (atomic_read(&ndlp->kref.refcount) > 0)) {
 					mbox->context1 = lpfc_nlp_get(ndlp);
 					mbox->mbox_cmpl =
 						lpfc_sli4_unreg_rpi_cmpl_clr;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index dd20412..39f0fd0 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2015 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -1134,9 +1134,10 @@
 #define RDP_PS_16GB            0x0400
 #define RDP_PS_32GB            0x0200
 
-#define RDP_CAP_UNKNOWN        0x0001
-#define RDP_PS_UNKNOWN         0x0002
-#define RDP_PS_NOT_ESTABLISHED 0x0001
+#define RDP_CAP_USER_CONFIGURED 0x0002
+#define RDP_CAP_UNKNOWN         0x0001
+#define RDP_PS_UNKNOWN          0x0002
+#define RDP_PS_NOT_ESTABLISHED  0x0001
 
 struct fc_rdp_port_speed {
 	uint16_t   capabilities;
@@ -1192,6 +1193,58 @@
 	struct fc_rdp_sfp_info sfp_info;
 };
 
+/* Buffer Credit Descriptor */
+struct fc_rdp_bbc_info {
+	uint32_t              port_bbc; /* FC_Port buffer-to-buffer credit */
+	uint32_t              attached_port_bbc;
+	uint32_t              rtt;      /* Round trip time */
+};
+#define RDP_BBC_DESC_TAG  0x00010006
+struct fc_rdp_bbc_desc {
+	uint32_t              tag;
+	uint32_t              length;
+	struct fc_rdp_bbc_info  bbc_info;
+};
+
+#define RDP_OED_TEMPERATURE  0x1
+#define RDP_OED_VOLTAGE      0x2
+#define RDP_OED_TXBIAS       0x3
+#define RDP_OED_TXPOWER      0x4
+#define RDP_OED_RXPOWER      0x5
+
+#define RDP_OED_TYPE_SHIFT   28
+/* Optical Element Data descriptor */
+struct fc_rdp_oed_info {
+	uint16_t            hi_alarm;
+	uint16_t            lo_alarm;
+	uint16_t            hi_warning;
+	uint16_t            lo_warning;
+	uint32_t            function_flags;
+};
+#define RDP_OED_DESC_TAG  0x00010007
+struct fc_rdp_oed_sfp_desc {
+	uint32_t             tag;
+	uint32_t             length;
+	struct fc_rdp_oed_info oed_info;
+};
+
+/* Optical Product Data descriptor */
+struct fc_rdp_opd_sfp_info {
+	uint8_t            vendor_name[16];
+	uint8_t            model_number[16];
+	uint8_t            serial_number[16];
+	uint8_t            reserved[2];
+	uint8_t            revision[2];
+	uint8_t            date[8];
+};
+
+#define RDP_OPD_DESC_TAG  0x00010008
+struct fc_rdp_opd_sfp_desc {
+	uint32_t             tag;
+	uint32_t             length;
+	struct fc_rdp_opd_sfp_info opd_info;
+};
+
 struct fc_rdp_req_frame {
 	uint32_t         rdp_command;           /* ELS command opcode (0x18)*/
 	uint32_t         rdp_des_length;        /* RDP Payload Word 1 */
@@ -1208,7 +1261,14 @@
 	struct fc_rdp_link_error_status_desc link_error_desc; /* Word 13-21 */
 	struct fc_rdp_port_name_desc diag_port_names_desc;    /* Word 22-27 */
 	struct fc_rdp_port_name_desc attached_port_names_desc;/* Word 28-33 */
-	struct fc_fec_rdp_desc fec_desc;	      /* FC Word 34 - 37 */
+	struct fc_rdp_bbc_desc bbc_desc;                      /* FC Word 34-38*/
+	struct fc_rdp_oed_sfp_desc oed_temp_desc;             /* FC Word 39-43*/
+	struct fc_rdp_oed_sfp_desc oed_voltage_desc;          /* FC word 44-48*/
+	struct fc_rdp_oed_sfp_desc oed_txbias_desc;           /* FC word 49-53*/
+	struct fc_rdp_oed_sfp_desc oed_txpower_desc;          /* FC word 54-58*/
+	struct fc_rdp_oed_sfp_desc oed_rxpower_desc;          /* FC word 59-63*/
+	struct fc_rdp_opd_sfp_desc opd_desc;                  /* FC word 64-80*/
+	struct fc_fec_rdp_desc fec_desc;                      /* FC word 81-84*/
 };
 
 
@@ -1216,7 +1276,10 @@
 				+ sizeof(struct fc_rdp_sfp_desc) \
 				+ sizeof(struct fc_rdp_port_speed_desc) \
 				+ sizeof(struct fc_rdp_link_error_status_desc) \
-				+ (sizeof(struct fc_rdp_port_name_desc) * 2))
+				+ (sizeof(struct fc_rdp_port_name_desc) * 2) \
+				+ sizeof(struct fc_rdp_bbc_desc) \
+				+ (sizeof(struct fc_rdp_oed_sfp_desc) * 5) \
+				+ sizeof(struct fc_rdp_opd_sfp_desc))
 
 
 /******** FDMI ********/
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 608f941..0c7070b 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2009-2015 Emulex.  All rights reserved.                *
+ * Copyright (C) 2009-2016 Emulex.  All rights reserved.                *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -2557,7 +2557,26 @@
 
 /* SFF-8472 Table 3.1a Diagnostics: Data Fields Address/Page A2 */
 
-#define SSF_AW_THRESHOLDS		0
+#define SSF_TEMP_HIGH_ALARM		0
+#define SSF_TEMP_LOW_ALARM		2
+#define SSF_TEMP_HIGH_WARNING		4
+#define SSF_TEMP_LOW_WARNING		6
+#define SSF_VOLTAGE_HIGH_ALARM		8
+#define SSF_VOLTAGE_LOW_ALARM		10
+#define SSF_VOLTAGE_HIGH_WARNING	12
+#define SSF_VOLTAGE_LOW_WARNING		14
+#define SSF_BIAS_HIGH_ALARM		16
+#define SSF_BIAS_LOW_ALARM		18
+#define SSF_BIAS_HIGH_WARNING		20
+#define SSF_BIAS_LOW_WARNING		22
+#define SSF_TXPOWER_HIGH_ALARM		24
+#define SSF_TXPOWER_LOW_ALARM		26
+#define SSF_TXPOWER_HIGH_WARNING	28
+#define SSF_TXPOWER_LOW_WARNING		30
+#define SSF_RXPOWER_HIGH_ALARM		32
+#define SSF_RXPOWER_LOW_ALARM		34
+#define SSF_RXPOWER_HIGH_WARNING	36
+#define SSF_RXPOWER_LOW_WARNING		38
 #define SSF_EXT_CAL_CONSTANTS		56
 #define SSF_CC_DMI			95
 #define SFF_TEMPERATURE_B1		96
@@ -2865,6 +2884,9 @@
 	uint32_t word17;
 	uint32_t word18;
 	uint32_t word19;
+#define cfg_ext_embed_cb_SHIFT			0
+#define cfg_ext_embed_cb_MASK			0x00000001
+#define cfg_ext_embed_cb_WORD			word19
 };
 
 struct lpfc_mbx_get_sli4_parameters {
@@ -3919,6 +3941,9 @@
 union lpfc_wqe128 {
 	uint32_t words[32];
 	struct lpfc_wqe_generic generic;
+	struct fcp_icmnd64_wqe fcp_icmd;
+	struct fcp_iread64_wqe fcp_iread;
+	struct fcp_iwrite64_wqe fcp_iwrite;
 	struct xmit_seq64_wqe xmit_sequence;
 	struct gen_req64_wqe gen_req;
 };
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index f57d02c..b43f7ac 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2015 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -6158,11 +6158,12 @@
 	 * any initial discovery should be completed.
 	 */
 	vport->load_flag |= FC_ALLOW_FDMI;
-	if (phba->cfg_fdmi_on > LPFC_FDMI_NO_SUPPORT) {
+	if (phba->cfg_enable_SmartSAN ||
+	    (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
 
 		/* Setup appropriate attribute masks */
 		vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
-		if (phba->cfg_fdmi_on == LPFC_FDMI_SMART_SAN)
+		if (phba->cfg_enable_SmartSAN)
 			vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
 		else
 			vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
@@ -7264,8 +7265,15 @@
 		phba->sli4_hba.fcp_cq[idx] = qdesc;
 
 		/* Create Fast Path FCP WQs */
-		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
-					      phba->sli4_hba.wq_ecount);
+		if (phba->fcp_embed_io) {
+			qdesc = lpfc_sli4_queue_alloc(phba,
+						      LPFC_WQE128_SIZE,
+						      LPFC_WQE128_DEF_COUNT);
+		} else {
+			qdesc = lpfc_sli4_queue_alloc(phba,
+						      phba->sli4_hba.wq_esize,
+						      phba->sli4_hba.wq_ecount);
+		}
 		if (!qdesc) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 					"0503 Failed allocate fast-path FCP "
@@ -9510,6 +9518,15 @@
 	if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
 		sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
 
+	/*
+	 * Issue IOs with CDB embedded in WQE to minimized the number
+	 * of DMAs the firmware has to do. Setting this to 1 also forces
+	 * the driver to use 128 bytes WQEs for FCP IOs.
+	 */
+	if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
+		phba->fcp_embed_io = 1;
+	else
+		phba->fcp_embed_io = 0;
 	return 0;
 }
 
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index f87f90e..12dbe99 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2015 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -2145,10 +2145,12 @@
 	reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
 	reg_vfi->e_d_tov = phba->fc_edtov;
 	reg_vfi->r_a_tov = phba->fc_ratov;
-	reg_vfi->bde.addrHigh = putPaddrHigh(phys);
-	reg_vfi->bde.addrLow = putPaddrLow(phys);
-	reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
-	reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+	if (phys) {
+		reg_vfi->bde.addrHigh = putPaddrHigh(phys);
+		reg_vfi->bde.addrLow = putPaddrLow(phys);
+		reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
+		reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+	}
 	bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
 
 	/* Only FC supports upd bit */
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 4fb3581..3fa6533 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -231,13 +231,15 @@
 	if (phba->lpfc_hbq_pool)
 		pci_pool_destroy(phba->lpfc_hbq_pool);
 	phba->lpfc_hbq_pool = NULL;
-	mempool_destroy(phba->rrq_pool);
+
+	if (phba->rrq_pool)
+		mempool_destroy(phba->rrq_pool);
 	phba->rrq_pool = NULL;
 
 	/* Free NLP memory pool */
 	mempool_destroy(phba->nlp_mem_pool);
 	phba->nlp_mem_pool = NULL;
-	if (phba->sli_rev == LPFC_SLI_REV4) {
+	if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) {
 		mempool_destroy(phba->active_rrq_pool);
 		phba->active_rrq_pool = NULL;
 	}
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 193733e..56a3df4 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
  /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2015 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -1512,6 +1512,7 @@
 	if ((mb = phba->sli.mbox_active)) {
 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
+			ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
 			lpfc_nlp_put(ndlp);
 			mb->context2 = NULL;
 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -1527,6 +1528,7 @@
 				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
 				kfree(mp);
 			}
+			ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
 			lpfc_nlp_put(ndlp);
 			list_del(&mb->list);
 			phba->sli.mboxq_cnt--;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 2207726..70edf21 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2015 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -2000,10 +2000,9 @@
  * @phba: Pointer to HBA context object.
  * @tag: Tag of the hbq buffer.
  *
- * This function is called with hbalock held. This function searches
- * for the hbq buffer associated with the given tag in the hbq buffer
- * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
- * it returns NULL.
+ * This function searches for the hbq buffer associated with the given tag in
+ * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
+ * otherwise it returns NULL.
  **/
 static struct hbq_dmabuf *
 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
@@ -2012,8 +2011,6 @@
 	struct hbq_dmabuf *hbq_buf;
 	uint32_t hbqno;
 
-	lockdep_assert_held(&phba->hbalock);
-
 	hbqno = tag >> 16;
 	if (hbqno >= LPFC_MAX_HBQS)
 		return NULL;
@@ -2211,6 +2208,7 @@
 		rpi = pmb->u.mb.un.varWords[0];
 		vpi = pmb->u.mb.un.varRegLogin.vpi;
 		lpfc_unreg_login(phba, vpi, rpi, pmb);
+		pmb->vport = vport;
 		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
 		if (rc != MBX_NOT_FINISHED)
@@ -4688,6 +4686,7 @@
 
 		break;
 	}
+	phba->fcp_embed_io = 0;	/* SLI4 FC support only */
 
 	rc = lpfc_sli_config_port(phba, mode);
 
@@ -6320,10 +6319,12 @@
 
 	mqe = &mboxq->u.mqe;
 	phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
-	if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
+	if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
 		phba->hba_flag |= HBA_FCOE_MODE;
-	else
+		phba->fcp_embed_io = 0;	/* SLI4 FC support only */
+	} else {
 		phba->hba_flag &= ~HBA_FCOE_MODE;
+	}
 
 	if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
 		LPFC_DCBX_CEE_MODE)
@@ -8218,12 +8219,15 @@
 	else
 		command_type = ELS_COMMAND_NON_FIP;
 
+	if (phba->fcp_embed_io)
+		memset(wqe, 0, sizeof(union lpfc_wqe128));
 	/* Some of the fields are in the right position already */
 	memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
-	abort_tag = (uint32_t) iocbq->iotag;
-	xritag = iocbq->sli4_xritag;
 	wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
 	wqe->generic.wqe_com.word10 = 0;
+
+	abort_tag = (uint32_t) iocbq->iotag;
+	xritag = iocbq->sli4_xritag;
 	/* words0-2 bpl convert bde */
 	if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
 		numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
@@ -8372,11 +8376,9 @@
 		       iocbq->iocb.ulpFCP2Rcvy);
 		bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
 		/* Always open the exchange */
-		bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0);
 		bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
 		bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
 		       LPFC_WQE_LENLOC_WORD4);
-		bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
 		bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
 		bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
 		if (iocbq->iocb_flag & LPFC_IO_OAS) {
@@ -8387,6 +8389,35 @@
 				       (phba->cfg_XLanePriority << 1));
 			}
 		}
+		/* Note, word 10 is already initialized to 0 */
+
+		if (phba->fcp_embed_io) {
+			struct lpfc_scsi_buf *lpfc_cmd;
+			struct sli4_sge *sgl;
+			union lpfc_wqe128 *wqe128;
+			struct fcp_cmnd *fcp_cmnd;
+			uint32_t *ptr;
+
+			/* 128 byte wqe support here */
+			wqe128 = (union lpfc_wqe128 *)wqe;
+
+			lpfc_cmd = iocbq->context1;
+			sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+			fcp_cmnd = lpfc_cmd->fcp_cmnd;
+
+			/* Word 0-2 - FCP_CMND */
+			wqe128->generic.bde.tus.f.bdeFlags =
+				BUFF_TYPE_BDE_IMMED;
+			wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
+			wqe128->generic.bde.addrHigh = 0;
+			wqe128->generic.bde.addrLow =  88;  /* Word 22 */
+
+			bf_set(wqe_wqes, &wqe128->fcp_iwrite.wqe_com, 1);
+
+			/* Word 22-29  FCP CMND Payload */
+			ptr = &wqe128->words[22];
+			memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
+		}
 		break;
 	case CMD_FCP_IREAD64_CR:
 		/* word3 iocb=iotag wqe=payload_offset_len */
@@ -8401,11 +8432,9 @@
 		       iocbq->iocb.ulpFCP2Rcvy);
 		bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
 		/* Always open the exchange */
-		bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
 		bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
 		bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
 		       LPFC_WQE_LENLOC_WORD4);
-		bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
 		bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
 		bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
 		if (iocbq->iocb_flag & LPFC_IO_OAS) {
@@ -8416,6 +8445,35 @@
 				       (phba->cfg_XLanePriority << 1));
 			}
 		}
+		/* Note, word 10 is already initialized to 0 */
+
+		if (phba->fcp_embed_io) {
+			struct lpfc_scsi_buf *lpfc_cmd;
+			struct sli4_sge *sgl;
+			union lpfc_wqe128 *wqe128;
+			struct fcp_cmnd *fcp_cmnd;
+			uint32_t *ptr;
+
+			/* 128 byte wqe support here */
+			wqe128 = (union lpfc_wqe128 *)wqe;
+
+			lpfc_cmd = iocbq->context1;
+			sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+			fcp_cmnd = lpfc_cmd->fcp_cmnd;
+
+			/* Word 0-2 - FCP_CMND */
+			wqe128->generic.bde.tus.f.bdeFlags =
+				BUFF_TYPE_BDE_IMMED;
+			wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
+			wqe128->generic.bde.addrHigh = 0;
+			wqe128->generic.bde.addrLow =  88;  /* Word 22 */
+
+			bf_set(wqe_wqes, &wqe128->fcp_iread.wqe_com, 1);
+
+			/* Word 22-29  FCP CMND Payload */
+			ptr = &wqe128->words[22];
+			memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
+		}
 		break;
 	case CMD_FCP_ICMND64_CR:
 		/* word3 iocb=iotag wqe=payload_offset_len */
@@ -8427,13 +8485,11 @@
 		/* word3 iocb=IO_TAG wqe=reserved */
 		bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
 		/* Always open the exchange */
-		bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
 		bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
 		bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
 		bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
 		bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
 		       LPFC_WQE_LENLOC_NONE);
-		bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
 		bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
 		       iocbq->iocb.ulpFCP2Rcvy);
 		if (iocbq->iocb_flag & LPFC_IO_OAS) {
@@ -8444,6 +8500,35 @@
 				       (phba->cfg_XLanePriority << 1));
 			}
 		}
+		/* Note, word 10 is already initialized to 0 */
+
+		if (phba->fcp_embed_io) {
+			struct lpfc_scsi_buf *lpfc_cmd;
+			struct sli4_sge *sgl;
+			union lpfc_wqe128 *wqe128;
+			struct fcp_cmnd *fcp_cmnd;
+			uint32_t *ptr;
+
+			/* 128 byte wqe support here */
+			wqe128 = (union lpfc_wqe128 *)wqe;
+
+			lpfc_cmd = iocbq->context1;
+			sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+			fcp_cmnd = lpfc_cmd->fcp_cmnd;
+
+			/* Word 0-2 - FCP_CMND */
+			wqe128->generic.bde.tus.f.bdeFlags =
+				BUFF_TYPE_BDE_IMMED;
+			wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
+			wqe128->generic.bde.addrHigh = 0;
+			wqe128->generic.bde.addrLow =  88;  /* Word 22 */
+
+			bf_set(wqe_wqes, &wqe128->fcp_icmd.wqe_com, 1);
+
+			/* Word 22-29  FCP CMND Payload */
+			ptr = &wqe128->words[22];
+			memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
+		}
 		break;
 	case CMD_GEN_REQUEST64_CR:
 		/* For this command calculate the xmit length of the
@@ -8675,12 +8760,19 @@
 			 struct lpfc_iocbq *piocb, uint32_t flag)
 {
 	struct lpfc_sglq *sglq;
-	union lpfc_wqe wqe;
+	union lpfc_wqe *wqe;
+	union lpfc_wqe128 wqe128;
 	struct lpfc_queue *wq;
 	struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
 
 	lockdep_assert_held(&phba->hbalock);
 
+	/*
+	 * The WQE can be either 64 or 128 bytes,
+	 * so allocate space on the stack assuming the largest.
+	 */
+	wqe = (union lpfc_wqe *)&wqe128;
+
 	if (piocb->sli4_xritag == NO_XRI) {
 		if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
 		    piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
@@ -8727,7 +8819,7 @@
 			return IOCB_ERROR;
 	}
 
-	if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
+	if (lpfc_sli4_iocb2wqe(phba, piocb, wqe))
 		return IOCB_ERROR;
 
 	if ((piocb->iocb_flag & LPFC_IO_FCP) ||
@@ -8737,12 +8829,12 @@
 		} else {
 			wq = phba->sli4_hba.oas_wq;
 		}
-		if (lpfc_sli4_wq_put(wq, &wqe))
+		if (lpfc_sli4_wq_put(wq, wqe))
 			return IOCB_ERROR;
 	} else {
 		if (unlikely(!phba->sli4_hba.els_wq))
 			return IOCB_ERROR;
-		if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
+		if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, wqe))
 			return IOCB_ERROR;
 	}
 	lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
@@ -8757,9 +8849,9 @@
  * pointer from the lpfc_hba struct.
  *
  * Return codes:
- * 	IOCB_ERROR - Error
- * 	IOCB_SUCCESS - Success
- * 	IOCB_BUSY - Busy
+ * IOCB_ERROR - Error
+ * IOCB_SUCCESS - Success
+ * IOCB_BUSY - Busy
  **/
 int
 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 4dc2256..fa0d531 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2015 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "11.0.0.10."
+#define LPFC_DRIVER_VERSION "11.1.0.0."
 #define LPFC_DRIVER_NAME		"lpfc"
 
 /* Used for SLI 2/3 */
@@ -30,4 +30,4 @@
 
 #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
 		LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright(c) 2004-2015 Emulex.  All rights reserved."
+#define LPFC_COPYRIGHT "Copyright(c) 2004-2016 Emulex.  All rights reserved."
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index b3f85de..c27f4b7 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2013 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -395,7 +395,8 @@
 
 	/* At this point we are fully registered with SCSI Layer.  */
 	vport->load_flag |= FC_ALLOW_FDMI;
-	if (phba->cfg_fdmi_on > LPFC_FDMI_NO_SUPPORT) {
+	if (phba->cfg_enable_SmartSAN ||
+	    (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
 		/* Setup appropriate attribute masks */
 		vport->fdmi_hba_mask = phba->pport->fdmi_hba_mask;
 		vport->fdmi_port_mask = phba->pport->fdmi_port_mask;
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index bb23813..a590089 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -28,24 +28,23 @@
 
 /* Definitions for the core NCR5380 driver. */
 
-#define PSEUDO_DMA
-
-#define NCR5380_implementation_fields   unsigned char *pdma_base
+#define NCR5380_implementation_fields   unsigned char *pdma_base; \
+                                        int pdma_residual
 
 #define NCR5380_read(reg)               macscsi_read(instance, reg)
 #define NCR5380_write(reg, value)       macscsi_write(instance, reg, value)
 
-#define NCR5380_pread                   macscsi_pread
-#define NCR5380_pwrite                  macscsi_pwrite
-#define NCR5380_dma_xfer_len(instance, cmd, phase)	(cmd->transfersize)
+#define NCR5380_dma_xfer_len(instance, cmd, phase) \
+        macscsi_dma_xfer_len(instance, cmd)
+#define NCR5380_dma_recv_setup          macscsi_pread
+#define NCR5380_dma_send_setup          macscsi_pwrite
+#define NCR5380_dma_residual(instance)  (hostdata->pdma_residual)
 
 #define NCR5380_intr                    macscsi_intr
 #define NCR5380_queue_command           macscsi_queue_command
 #define NCR5380_abort                   macscsi_abort
 #define NCR5380_bus_reset               macscsi_bus_reset
 #define NCR5380_info                    macscsi_info
-#define NCR5380_show_info               macscsi_show_info
-#define NCR5380_write_info              macscsi_write_info
 
 #include "NCR5380.h"
 
@@ -57,8 +56,6 @@
 module_param(setup_sg_tablesize, int, 0);
 static int setup_use_pdma = -1;
 module_param(setup_use_pdma, int, 0);
-static int setup_use_tagged_queuing = -1;
-module_param(setup_use_tagged_queuing, int, 0);
 static int setup_hostid = -1;
 module_param(setup_hostid, int, 0);
 static int setup_toshiba_delay = -1;
@@ -97,8 +94,7 @@
 		setup_sg_tablesize = ints[3];
 	if (ints[0] >= 4)
 		setup_hostid = ints[4];
-	if (ints[0] >= 5)
-		setup_use_tagged_queuing = ints[5];
+	/* ints[5] (use_tagged_queuing) is ignored */
 	if (ints[0] >= 6)
 		setup_use_pdma = ints[6];
 	if (ints[0] >= 7)
@@ -109,19 +105,9 @@
 __setup("mac5380=", mac_scsi_setup);
 #endif /* !MODULE */
 
-#ifdef PSEUDO_DMA
-/* 
-   Pseudo-DMA: (Ove Edlund)
-   The code attempts to catch bus errors that occur if one for example
-   "trips over the cable".
-   XXX: Since bus errors in the PDMA routines never happen on my 
-   computer, the bus error code is untested. 
-   If the code works as intended, a bus error results in Pseudo-DMA 
-   being disabled, meaning that the driver switches to slow handshake.
-   If bus errors are NOT extremely rare, this has to be changed. 
-*/
+/* Pseudo DMA asm originally by Ove Edlund */
 
-#define CP_IO_TO_MEM(s,d,len)				\
+#define CP_IO_TO_MEM(s,d,n)				\
 __asm__ __volatile__					\
     ("    cmp.w  #4,%2\n"				\
      "    bls    8f\n"					\
@@ -158,61 +144,73 @@
      " 9: \n"						\
      ".section .fixup,\"ax\"\n"				\
      "    .even\n"					\
-     "90: moveq.l #1, %2\n"				\
+     "91: moveq.l #1, %2\n"				\
+     "    jra 9b\n"					\
+     "94: moveq.l #4, %2\n"				\
      "    jra 9b\n"					\
      ".previous\n"					\
      ".section __ex_table,\"a\"\n"			\
      "   .align 4\n"					\
-     "   .long  1b,90b\n"				\
-     "   .long  3b,90b\n"				\
-     "   .long 31b,90b\n"				\
-     "   .long 32b,90b\n"				\
-     "   .long 33b,90b\n"				\
-     "   .long 34b,90b\n"				\
-     "   .long 35b,90b\n"				\
-     "   .long 36b,90b\n"				\
-     "   .long 37b,90b\n"				\
-     "   .long  5b,90b\n"				\
-     "   .long  7b,90b\n"				\
+     "   .long  1b,91b\n"				\
+     "   .long  3b,94b\n"				\
+     "   .long 31b,94b\n"				\
+     "   .long 32b,94b\n"				\
+     "   .long 33b,94b\n"				\
+     "   .long 34b,94b\n"				\
+     "   .long 35b,94b\n"				\
+     "   .long 36b,94b\n"				\
+     "   .long 37b,94b\n"				\
+     "   .long  5b,94b\n"				\
+     "   .long  7b,91b\n"				\
      ".previous"					\
-     : "=a"(s), "=a"(d), "=d"(len)			\
-     : "0"(s), "1"(d), "2"(len)				\
+     : "=a"(s), "=a"(d), "=d"(n)			\
+     : "0"(s), "1"(d), "2"(n)				\
      : "d0")
 
 static int macscsi_pread(struct Scsi_Host *instance,
                          unsigned char *dst, int len)
 {
 	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-	unsigned char *d;
-	unsigned char *s;
+	unsigned char *s = hostdata->pdma_base + (INPUT_DATA_REG << 4);
+	unsigned char *d = dst;
+	int n = len;
+	int transferred;
 
-	s = hostdata->pdma_base + (INPUT_DATA_REG << 4);
-	d = dst;
+	while (!NCR5380_poll_politely(instance, BUS_AND_STATUS_REG,
+	                              BASR_DRQ | BASR_PHASE_MATCH,
+	                              BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
+		CP_IO_TO_MEM(s, d, n);
 
-	/* These conditions are derived from MacOS */
+		transferred = d - dst - n;
+		hostdata->pdma_residual = len - transferred;
 
-	while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) &&
-	       !(NCR5380_read(STATUS_REG) & SR_REQ))
-		;
+		/* No bus error. */
+		if (n == 0)
+			return 0;
 
-	if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) &&
-	    (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) {
-		pr_err("Error in macscsi_pread\n");
-		return -1;
+		/* Target changed phase early? */
+		if (NCR5380_poll_politely2(instance, STATUS_REG, SR_REQ, SR_REQ,
+		                           BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
+			scmd_printk(KERN_ERR, hostdata->connected,
+			            "%s: !REQ and !ACK\n", __func__);
+		if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
+			return 0;
+
+		dsprintk(NDEBUG_PSEUDO_DMA, instance,
+		         "%s: bus error (%d/%d)\n", __func__, transferred, len);
+		NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
+		d = dst + transferred;
+		n = len - transferred;
 	}
 
-	CP_IO_TO_MEM(s, d, len);
-
-	if (len != 0) {
-		pr_notice("Bus error in macscsi_pread\n");
-		return -1;
-	}
-
-	return 0;
+	scmd_printk(KERN_ERR, hostdata->connected,
+	            "%s: phase mismatch or !DRQ\n", __func__);
+	NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
+	return -1;
 }
 
 
-#define CP_MEM_TO_IO(s,d,len)				\
+#define CP_MEM_TO_IO(s,d,n)				\
 __asm__ __volatile__					\
     ("    cmp.w  #4,%2\n"				\
      "    bls    8f\n"					\
@@ -249,59 +247,89 @@
      " 9: \n"						\
      ".section .fixup,\"ax\"\n"				\
      "    .even\n"					\
-     "90: moveq.l #1, %2\n"				\
+     "91: moveq.l #1, %2\n"				\
+     "    jra 9b\n"					\
+     "94: moveq.l #4, %2\n"				\
      "    jra 9b\n"					\
      ".previous\n"					\
      ".section __ex_table,\"a\"\n"			\
      "   .align 4\n"					\
-     "   .long  1b,90b\n"				\
-     "   .long  3b,90b\n"				\
-     "   .long 31b,90b\n"				\
-     "   .long 32b,90b\n"				\
-     "   .long 33b,90b\n"				\
-     "   .long 34b,90b\n"				\
-     "   .long 35b,90b\n"				\
-     "   .long 36b,90b\n"				\
-     "   .long 37b,90b\n"				\
-     "   .long  5b,90b\n"				\
-     "   .long  7b,90b\n"				\
+     "   .long  1b,91b\n"				\
+     "   .long  3b,94b\n"				\
+     "   .long 31b,94b\n"				\
+     "   .long 32b,94b\n"				\
+     "   .long 33b,94b\n"				\
+     "   .long 34b,94b\n"				\
+     "   .long 35b,94b\n"				\
+     "   .long 36b,94b\n"				\
+     "   .long 37b,94b\n"				\
+     "   .long  5b,94b\n"				\
+     "   .long  7b,91b\n"				\
      ".previous"					\
-     : "=a"(s), "=a"(d), "=d"(len)			\
-     : "0"(s), "1"(d), "2"(len)				\
+     : "=a"(s), "=a"(d), "=d"(n)			\
+     : "0"(s), "1"(d), "2"(n)				\
      : "d0")
 
 static int macscsi_pwrite(struct Scsi_Host *instance,
                           unsigned char *src, int len)
 {
 	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-	unsigned char *s;
-	unsigned char *d;
+	unsigned char *s = src;
+	unsigned char *d = hostdata->pdma_base + (OUTPUT_DATA_REG << 4);
+	int n = len;
+	int transferred;
 
-	s = src;
-	d = hostdata->pdma_base + (OUTPUT_DATA_REG << 4);
+	while (!NCR5380_poll_politely(instance, BUS_AND_STATUS_REG,
+	                              BASR_DRQ | BASR_PHASE_MATCH,
+	                              BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
+		CP_MEM_TO_IO(s, d, n);
 
-	/* These conditions are derived from MacOS */
+		transferred = s - src - n;
+		hostdata->pdma_residual = len - transferred;
 
-	while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) &&
-	       (!(NCR5380_read(STATUS_REG) & SR_REQ) ||
-	        (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)))
-		;
+		/* Target changed phase early? */
+		if (NCR5380_poll_politely2(instance, STATUS_REG, SR_REQ, SR_REQ,
+		                           BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
+			scmd_printk(KERN_ERR, hostdata->connected,
+			            "%s: !REQ and !ACK\n", __func__);
+		if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
+			return 0;
 
-	if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ)) {
-		pr_err("Error in macscsi_pwrite\n");
-		return -1;
+		/* No bus error. */
+		if (n == 0) {
+			if (NCR5380_poll_politely(instance, TARGET_COMMAND_REG,
+			                          TCR_LAST_BYTE_SENT,
+			                          TCR_LAST_BYTE_SENT, HZ / 64) < 0)
+				scmd_printk(KERN_ERR, hostdata->connected,
+				            "%s: Last Byte Sent timeout\n", __func__);
+			return 0;
+		}
+
+		dsprintk(NDEBUG_PSEUDO_DMA, instance,
+		         "%s: bus error (%d/%d)\n", __func__, transferred, len);
+		NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
+		s = src + transferred;
+		n = len - transferred;
 	}
 
-	CP_MEM_TO_IO(s, d, len);
+	scmd_printk(KERN_ERR, hostdata->connected,
+	            "%s: phase mismatch or !DRQ\n", __func__);
+	NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
 
-	if (len != 0) {
-		pr_notice("Bus error in macscsi_pwrite\n");
-		return -1;
-	}
-
-	return 0;
+	return -1;
 }
-#endif
+
+static int macscsi_dma_xfer_len(struct Scsi_Host *instance,
+                                struct scsi_cmnd *cmd)
+{
+	struct NCR5380_hostdata *hostdata = shost_priv(instance);
+
+	if (hostdata->flags & FLAG_NO_PSEUDO_DMA ||
+	    cmd->SCp.this_residual < 16)
+		return 0;
+
+	return cmd->SCp.this_residual;
+}
 
 #include "NCR5380.c"
 
@@ -311,8 +339,6 @@
 static struct scsi_host_template mac_scsi_template = {
 	.module			= THIS_MODULE,
 	.proc_name		= DRV_MODULE_NAME,
-	.show_info		= macscsi_show_info,
-	.write_info		= macscsi_write_info,
 	.name			= "Macintosh NCR5380 SCSI",
 	.info			= macscsi_info,
 	.queuecommand		= macscsi_queue_command,
@@ -320,7 +346,7 @@
 	.eh_bus_reset_handler	= macscsi_bus_reset,
 	.can_queue		= 16,
 	.this_id		= 7,
-	.sg_tablesize		= SG_ALL,
+	.sg_tablesize		= 1,
 	.cmd_per_lun		= 2,
 	.use_clustering		= DISABLE_CLUSTERING,
 	.cmd_size		= NCR5380_CMD_SIZE,
@@ -338,9 +364,7 @@
 	if (!pio_mem)
 		return -ENODEV;
 
-#ifdef PSEUDO_DMA
 	pdma_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-#endif
 
 	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 
@@ -358,8 +382,6 @@
 		mac_scsi_template.sg_tablesize = setup_sg_tablesize;
 	if (setup_hostid >= 0)
 		mac_scsi_template.this_id = setup_hostid & 7;
-	if (setup_use_pdma < 0)
-		setup_use_pdma = 0;
 
 	instance = scsi_host_alloc(&mac_scsi_template,
 	                           sizeof(struct NCR5380_hostdata));
@@ -379,12 +401,9 @@
 	} else
 		host_flags |= FLAG_NO_PSEUDO_DMA;
 
-#ifdef SUPPORT_TAGS
-	host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0;
-#endif
 	host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0;
 
-	error = NCR5380_init(instance, host_flags);
+	error = NCR5380_init(instance, host_flags | FLAG_LATE_DMA_SETUP);
 	if (error)
 		goto fail_init;
 
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index fce414a..ca86c88 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION				"06.810.09.00-rc1"
-#define MEGASAS_RELDATE				"Jan. 28, 2016"
+#define MEGASAS_VERSION				"06.811.02.00-rc1"
+#define MEGASAS_RELDATE				"April 12, 2016"
 
 /*
  * Device IDs
@@ -1344,6 +1344,8 @@
 #define SCAN_PD_CHANNEL	0x1
 #define SCAN_VD_CHANNEL	0x2
 
+#define MEGASAS_KDUMP_QUEUE_DEPTH               100
+
 enum MR_SCSI_CMD_TYPE {
 	READ_WRITE_LDIO = 0,
 	NON_READ_WRITE_LDIO = 1,
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index e6ebc7a..f4b0690 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -2670,17 +2670,6 @@
 }
 
 /**
- * megasas_reset_device -	Device reset handler entry point
- */
-static int megasas_reset_device(struct scsi_cmnd *scmd)
-{
-	/*
-	 * First wait for all commands to complete
-	 */
-	return megasas_generic_reset(scmd);
-}
-
-/**
  * megasas_reset_bus_host -	Bus & host reset handler entry point
  */
 static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
@@ -2702,6 +2691,50 @@
 }
 
 /**
+ * megasas_task_abort - Issues task abort request to firmware
+ *			(supported only for fusion adapters)
+ * @scmd:		SCSI command pointer
+ */
+static int megasas_task_abort(struct scsi_cmnd *scmd)
+{
+	int ret;
+	struct megasas_instance *instance;
+
+	instance = (struct megasas_instance *)scmd->device->host->hostdata;
+
+	if (instance->ctrl_context)
+		ret = megasas_task_abort_fusion(scmd);
+	else {
+		sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
+		ret = FAILED;
+	}
+
+	return ret;
+}
+
+/**
+ * megasas_reset_target:  Issues target reset request to firmware
+ *                        (supported only for fusion adapters)
+ * @scmd:                 SCSI command pointer
+ */
+static int megasas_reset_target(struct scsi_cmnd *scmd)
+{
+	int ret;
+	struct megasas_instance *instance;
+
+	instance = (struct megasas_instance *)scmd->device->host->hostdata;
+
+	if (instance->ctrl_context)
+		ret = megasas_reset_target_fusion(scmd);
+	else {
+		sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
+		ret = FAILED;
+	}
+
+	return ret;
+}
+
+/**
  * megasas_bios_param - Returns disk geometry for a disk
  * @sdev:		device handle
  * @bdev:		block device
@@ -2969,8 +3002,8 @@
 	.slave_alloc = megasas_slave_alloc,
 	.slave_destroy = megasas_slave_destroy,
 	.queuecommand = megasas_queue_command,
-	.eh_device_reset_handler = megasas_reset_device,
-	.eh_bus_reset_handler = megasas_reset_bus_host,
+	.eh_target_reset_handler = megasas_reset_target,
+	.eh_abort_handler = megasas_task_abort,
 	.eh_host_reset_handler = megasas_reset_bus_host,
 	.eh_timed_out = megasas_reset_timer,
 	.shost_attrs = megaraid_host_attrs,
@@ -5152,7 +5185,7 @@
 
 	instance->instancet->enable_intr(instance);
 
-	dev_err(&instance->pdev->dev, "INIT adapter done\n");
+	dev_info(&instance->pdev->dev, "INIT adapter done\n");
 
 	megasas_setup_jbod_map(instance);
 
@@ -5598,14 +5631,6 @@
 	host->max_lun = MEGASAS_MAX_LUN;
 	host->max_cmd_len = 16;
 
-	/* Fusion only supports host reset */
-	if (instance->ctrl_context) {
-		host->hostt->eh_device_reset_handler = NULL;
-		host->hostt->eh_bus_reset_handler = NULL;
-		host->hostt->eh_target_reset_handler = megasas_reset_target_fusion;
-		host->hostt->eh_abort_handler = megasas_task_abort_fusion;
-	}
-
 	/*
 	 * Notify the mid-layer about the new controller
 	 */
@@ -5761,13 +5786,6 @@
 		break;
 	}
 
-	instance->system_info_buf = pci_zalloc_consistent(pdev,
-					sizeof(struct MR_DRV_SYSTEM_INFO),
-					&instance->system_info_h);
-
-	if (!instance->system_info_buf)
-		dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n");
-
 	/* Crash dump feature related initialisation*/
 	instance->drv_buf_index = 0;
 	instance->drv_buf_alloc = 0;
@@ -5777,14 +5795,6 @@
 	spin_lock_init(&instance->crashdump_lock);
 	instance->crash_dump_buf = NULL;
 
-	if (!reset_devices)
-		instance->crash_dump_buf = pci_alloc_consistent(pdev,
-						CRASH_DMA_BUF_SIZE,
-						&instance->crash_dump_h);
-	if (!instance->crash_dump_buf)
-		dev_err(&pdev->dev, "Can't allocate Firmware "
-			"crash dump DMA buffer\n");
-
 	megasas_poll_wait_aen = 0;
 	instance->flag_ieee = 0;
 	instance->ev = NULL;
@@ -5803,11 +5813,26 @@
 		goto fail_alloc_dma_buf;
 	}
 
-	instance->pd_info = pci_alloc_consistent(pdev,
-		sizeof(struct MR_PD_INFO), &instance->pd_info_h);
+	if (!reset_devices) {
+		instance->system_info_buf = pci_zalloc_consistent(pdev,
+					sizeof(struct MR_DRV_SYSTEM_INFO),
+					&instance->system_info_h);
+		if (!instance->system_info_buf)
+			dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n");
 
-	if (!instance->pd_info)
-		dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n");
+		instance->pd_info = pci_alloc_consistent(pdev,
+			sizeof(struct MR_PD_INFO), &instance->pd_info_h);
+
+		if (!instance->pd_info)
+			dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n");
+
+		instance->crash_dump_buf = pci_alloc_consistent(pdev,
+						CRASH_DMA_BUF_SIZE,
+						&instance->crash_dump_h);
+		if (!instance->crash_dump_buf)
+			dev_err(&pdev->dev, "Can't allocate Firmware "
+				"crash dump DMA buffer\n");
+	}
 
 	/*
 	 * Initialize locks and queues
@@ -7174,6 +7199,16 @@
 	int rval;
 
 	/*
+	 * Booted in kdump kernel, minimize memory footprints by
+	 * disabling few features
+	 */
+	if (reset_devices) {
+		msix_vectors = 1;
+		rdpq_enable = 0;
+		dual_qdepth_disable = 1;
+	}
+
+	/*
 	 * Announce driver version and other information
 	 */
 	pr_info("megasas: %s\n", MEGASAS_VERSION);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 98a848b..ec83754 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -257,6 +257,9 @@
 		if (!instance->is_rdpq)
 			instance->max_fw_cmds = min_t(u16, instance->max_fw_cmds, 1024);
 
+		if (reset_devices)
+			instance->max_fw_cmds = min(instance->max_fw_cmds,
+						(u16)MEGASAS_KDUMP_QUEUE_DEPTH);
 		/*
 		* Reduce the max supported cmds by 1. This is to ensure that the
 		* reply_q_sz (1 more than the max cmd that driver may send)
@@ -851,7 +854,7 @@
 		ret = 1;
 		goto fail_fw_init;
 	}
-	dev_err(&instance->pdev->dev, "Init cmd success\n");
+	dev_info(&instance->pdev->dev, "Init cmd success\n");
 
 	ret = 0;
 
@@ -2759,6 +2762,7 @@
 			dev_warn(&instance->pdev->dev, "Found FW in FAULT state,"
 			       " will reset adapter scsi%d.\n",
 				instance->host->host_no);
+			megasas_complete_cmd_dpc_fusion((unsigned long)instance);
 			retval = 1;
 			goto out;
 		}
@@ -2766,6 +2770,7 @@
 		if (reason == MFI_IO_TIMEOUT_OCR) {
 			dev_info(&instance->pdev->dev,
 				"MFI IO is timed out, initiating OCR\n");
+			megasas_complete_cmd_dpc_fusion((unsigned long)instance);
 			retval = 1;
 			goto out;
 		}
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index dfad5b8..a9a659f 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
  *                 scatter/gather formats.
  * Creation Date:  June 21, 2006
  *
- * mpi2.h Version:  02.00.39
+ * mpi2.h Version:  02.00.42
  *
  * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
  *       prefix are for use only on MPI v2.5 products, and must not be used
@@ -100,6 +100,9 @@
  *                     Added MPI2_DIAG_SBR_RELOAD.
  * 03-19-15  02.00.38  Bumped MPI2_HEADER_VERSION_UNIT.
  * 05-25-15  02.00.39  Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-25-15  02.00.40  Bumped MPI2_HEADER_VERSION_UNIT.
+ * 12-15-15  02.00.41  Bumped MPI_HEADER_VERSION_UNIT
+ * 01-01-16  02.00.42  Bumped MPI_HEADER_VERSION_UNIT
  * --------------------------------------------------------------------------
  */
 
@@ -139,7 +142,7 @@
 #define MPI2_VERSION_02_06		    (0x0206)
 
 /*Unit and Dev versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT            (0x27)
+#define MPI2_HEADER_VERSION_UNIT            (0x2A)
 #define MPI2_HEADER_VERSION_DEV             (0x00)
 #define MPI2_HEADER_VERSION_UNIT_MASK       (0xFF00)
 #define MPI2_HEADER_VERSION_UNIT_SHIFT      (8)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 9cf09bf..95356a8 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
  *         Title:  MPI Configuration messages and pages
  * Creation Date:  November 10, 2006
  *
- *   mpi2_cnfg.h Version:  02.00.33
+ *   mpi2_cnfg.h Version:  02.00.35
  *
  * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
  *       prefix are for use only on MPI v2.5 products, and must not be used
@@ -183,9 +183,12 @@
  *                     Added MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG.
  *                     Added AdapterOrderAux fields to BIOS Page 3.
  * 03-16-15  02.00.31  Updated for MPI v2.6.
+ *                     Added Flags field to IO Unit Page 7.
  *                     Added new SAS Phy Event codes
  * 05-25-15  02.00.33  Added more defines for the BiosOptions field of
  *                     MPI2_CONFIG_PAGE_BIOS_1.
+ * 08-25-15  02.00.34  Bumped Header Version.
+ * 12-18-15  02.00.35  Added SATADeviceWaitTime to SAS IO Unit Page 4.
  * --------------------------------------------------------------------------
  */
 
@@ -958,13 +961,16 @@
 	U8                      Reserved3;              /*0x17 */
 	U32			BoardPowerRequirement;	/*0x18 */
 	U32			PCISlotPowerAllocation;	/*0x1C */
-	U32			Reserved6;		/* 0x20 */
-	U32			Reserved7;		/* 0x24 */
+/* reserved prior to MPI v2.6 */
+	U8		Flags;			/* 0x20 */
+	U8		Reserved6;			/* 0x21 */
+	U16		Reserved7;			/* 0x22 */
+	U32		Reserved8;			/* 0x24 */
 } MPI2_CONFIG_PAGE_IO_UNIT_7,
 	*PTR_MPI2_CONFIG_PAGE_IO_UNIT_7,
 	Mpi2IOUnitPage7_t, *pMpi2IOUnitPage7_t;
 
-#define MPI2_IOUNITPAGE7_PAGEVERSION			(0x04)
+#define MPI2_IOUNITPAGE7_PAGEVERSION			(0x05)
 
 /*defines for IO Unit Page 7 CurrentPowerMode and PreviousPowerMode fields */
 #define MPI25_IOUNITPAGE7_PM_INIT_MASK              (0xC0)
@@ -1045,6 +1051,8 @@
 #define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT      (0x01)
 #define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS         (0x02)
 
+/* defines for IO Unit Page 7 Flags field */
+#define MPI2_IOUNITPAGE7_FLAG_CABLE_POWER_EXC       (0x01)
 
 /*IO Unit Page 8 */
 
@@ -2271,7 +2279,7 @@
 	U8
 		BootDeviceWaitTime;             /*0x24 */
 	U8
-		Reserved4;                      /*0x25 */
+		SATADeviceWaitTime;		/*0x25 */
 	U16
 		Reserved5;                      /*0x26 */
 	U8
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
index c38f624..bba56b6 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
@@ -6,7 +6,7 @@
  *         Title:  MPI SCSI initiator mode messages and structures
  * Creation Date:  June 23, 2006
  *
- * mpi2_init.h Version:  02.00.17
+ * mpi2_init.h Version:  02.00.20
  *
  * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
  *       prefix are for use only on MPI v2.5 products, and must not be used
@@ -51,6 +51,9 @@
  *                     Added MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH.
  *                     Added MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF and
  *                     MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF.
+ * 08-26-15  02.00.18  Added SCSITASKMGMT_MSGFLAGS for Target Reset.
+ * 12-18-15  02.00.19  Added EEDPObservedValue added to SCSI IO Reply message.
+ * 01-04-16  02.00.20  Modified EEDP reported values in SCSI IO Reply message.
  * --------------------------------------------------------------------------
  */
 
@@ -359,8 +362,14 @@
 	U16 TaskTag;		/*0x20 */
 	U16 SCSIStatusQualifier; /* 0x22 */
 	U32 BidirectionalTransferCount;	/*0x24 */
-	U32 EEDPErrorOffset;	/*0x28 *//*MPI 2.5 only; Reserved in MPI 2.0*/
-	U32 Reserved6;		/*0x2C */
+ /* MPI 2.5+ only; Reserved in MPI 2.0 */
+	U32 EEDPErrorOffset;	/* 0x28 */
+ /* MPI 2.5+ only; Reserved in MPI 2.0 */
+	U16 EEDPObservedAppTag;	/* 0x2C */
+ /* MPI 2.5+ only; Reserved in MPI 2.0 */
+	U16 EEDPObservedGuard;	/* 0x2E */
+ /* MPI 2.5+ only; Reserved in MPI 2.0 */
+	U32 EEDPObservedRefTag;	/* 0x30 */
 } MPI2_SCSI_IO_REPLY, *PTR_MPI2_SCSI_IO_REPLY,
 	Mpi2SCSIIOReply_t, *pMpi2SCSIIOReply_t;
 
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index cf510ed..8bae305 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -6,7 +6,7 @@
  *         Title:  MPI IOC, Port, Event, FW Download, and FW Upload messages
  * Creation Date:  October 11, 2006
  *
- * mpi2_ioc.h Version:  02.00.26
+ * mpi2_ioc.h Version:  02.00.27
  *
  * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
  *       prefix are for use only on MPI v2.5 products, and must not be used
@@ -134,9 +134,13 @@
  *			Added Encrypted Hash Extended Image.
  * 12-05-13  02.00.24  Added MPI25_HASH_IMAGE_TYPE_BIOS.
  * 11-18-14  02.00.25  Updated copyright information.
- * 03-16-15  02.00.26  Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and
+ * 03-16-15  02.00.26  Updated for MPI v2.6.
+ *		       Added MPI2_EVENT_ACTIVE_CABLE_EXCEPTION and
+ *		       MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT.
+ *                     Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and
  *                     MPI26_FW_HEADER_PID_FAMILY_3516_SAS.
  *                     Added MPI26_CTRL_OP_SHUTDOWN.
+ * 08-25-15  02.00.27  Added IC ARCH Class based signature defines
  * --------------------------------------------------------------------------
  */
 
@@ -168,7 +172,7 @@
 	U16 MsgVersion;		/*0x0C */
 	U16 HeaderVersion;	/*0x0E */
 	U32 Reserved5;		/*0x10 */
-	U16 Reserved6;		/*0x14 */
+	U16 ConfigurationFlags;	/* 0x14 */
 	U8 HostPageSize;	/*0x16 */
 	U8 HostMSIxVectors;	/*0x17 */
 	U16 Reserved8;		/*0x18 */
@@ -516,6 +520,7 @@
 #define MPI2_EVENT_TEMP_THRESHOLD                   (0x0027)
 #define MPI2_EVENT_HOST_MESSAGE                     (0x0028)
 #define MPI2_EVENT_POWER_PERFORMANCE_CHANGE         (0x0029)
+#define MPI2_EVENT_ACTIVE_CABLE_EXCEPTION           (0x0034)
 #define MPI2_EVENT_MIN_PRODUCT_SPECIFIC             (0x006E)
 #define MPI2_EVENT_MAX_PRODUCT_SPECIFIC             (0x007F)
 
@@ -580,7 +585,7 @@
 } MPI2_EVENT_DATA_HOST_MESSAGE, *PTR_MPI2_EVENT_DATA_HOST_MESSAGE,
 	Mpi2EventDataHostMessage_t, *pMpi2EventDataHostMessage_t;
 
-/*Power Performance Change Event */
+/*Power Performance Change Event data */
 
 typedef struct _MPI2_EVENT_DATA_POWER_PERF_CHANGE {
 	U8 CurrentPowerMode;	/*0x00 */
@@ -605,6 +610,21 @@
 #define MPI2_EVENT_PM_MODE_REDUCED_POWER     (0x05)
 #define MPI2_EVENT_PM_MODE_STANDBY           (0x06)
 
+/* Active Cable Exception Event data */
+
+typedef struct _MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT {
+	U32         ActiveCablePowerRequirement;        /* 0x00 */
+	U8          ReasonCode;                         /* 0x04 */
+	U8          ReceptacleID;                       /* 0x05 */
+	U16         Reserved1;                          /* 0x06 */
+} MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT,
+	*PTR_MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT,
+	Mpi26EventDataActiveCableExcept_t,
+	*pMpi26EventDataActiveCableExcept_t;
+
+/* defines for ReasonCode field */
+#define MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER     (0x00)
+
 /*Hard Reset Received Event data */
 
 typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED {
@@ -1366,7 +1386,16 @@
 /*Signature0 field */
 #define MPI2_FW_HEADER_SIGNATURE0_OFFSET        (0x04)
 #define MPI2_FW_HEADER_SIGNATURE0               (0x5AFAA55A)
-#define MPI26_FW_HEADER_SIGNATURE0              (0x5AEAA55A)
+/* Last byte is defined by architecture */
+#define MPI26_FW_HEADER_SIGNATURE0_BASE         (0x5AEAA500)
+#define MPI26_FW_HEADER_SIGNATURE0_ARC_0        (0x5A)
+#define MPI26_FW_HEADER_SIGNATURE0_ARC_1        (0x00)
+#define MPI26_FW_HEADER_SIGNATURE0_ARC_2        (0x01)
+/* legacy (0x5AEAA55A) */
+#define MPI26_FW_HEADER_SIGNATURE0 \
+	(MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_0)
+#define MPI26_FW_HEADER_SIGNATURE0_3516 \
+	(MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_1)
 
 /*Signature1 field */
 #define MPI2_FW_HEADER_SIGNATURE1_OFFSET        (0x08)
@@ -1778,6 +1807,7 @@
 #define MPI26_CTRL_OP_SAS_PHY_LINK_RESET                (0x06)
 #define MPI26_CTRL_OP_SAS_PHY_HARD_RESET                (0x07)
 #define MPI26_CTRL_OP_PHY_CLEAR_ERROR_LOG               (0x08)
+#define MPI26_CTRL_OP_LINK_CLEAR_ERROR_LOG              (0x09)
 #define MPI26_CTRL_OP_SAS_SEND_PRIMITIVE                (0x0A)
 #define MPI26_CTRL_OP_FORCE_FULL_DISCOVERY              (0x0B)
 #define MPI26_CTRL_OP_REMOVE_DEVICE                     (0x0D)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index e4db5fb..751f13e 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -57,6 +57,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/io.h>
 #include <linux/time.h>
+#include <linux/ktime.h>
 #include <linux/kthread.h>
 #include <linux/aer.h>
 
@@ -654,6 +655,9 @@
 	case MPI2_EVENT_TEMP_THRESHOLD:
 		desc = "Temperature Threshold";
 		break;
+	case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
+		desc = "Active cable exception";
+		break;
 	}
 
 	if (!desc)
@@ -1100,18 +1104,16 @@
 }
 
 /**
- * mpt3sas_base_flush_reply_queues - flushing the MSIX reply queues
+ * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts
  * @ioc: per adapter object
- * Context: ISR conext
+ * Context: non ISR conext
  *
- * Called when a Task Management request has completed. We want
- * to flush the other reply queues so all the outstanding IO has been
- * completed back to OS before we process the TM completetion.
+ * Called when a Task Management request has completed.
  *
  * Return nothing.
  */
 void
-mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc)
+mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
 {
 	struct adapter_reply_queue *reply_q;
 
@@ -1122,12 +1124,13 @@
 		return;
 
 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
-		if (ioc->shost_recovery)
+		if (ioc->shost_recovery || ioc->remove_host ||
+				ioc->pci_error_recovery)
 			return;
 		/* TMs are on msix_index == 0 */
 		if (reply_q->msix_index == 0)
 			continue;
-		_base_interrupt(reply_q->vector, (void *)reply_q);
+		synchronize_irq(reply_q->vector);
 	}
 }
 
@@ -3207,10 +3210,10 @@
 		sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
 	else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
 		sg_tablesize = min_t(unsigned short, sg_tablesize,
-				      SCSI_MAX_SG_CHAIN_SEGMENTS);
+				      SG_MAX_SEGMENTS);
 		pr_warn(MPT3SAS_FMT
 		 "sg_tablesize(%u) is bigger than kernel"
-		 " defined SCSI_MAX_SG_SEGMENTS(%u)\n", ioc->name,
+		 " defined SG_CHUNK_SIZE(%u)\n", ioc->name,
 		 sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
 	}
 	ioc->shost->sg_tablesize = sg_tablesize;
@@ -4387,7 +4390,7 @@
 	Mpi2IOCInitRequest_t mpi_request;
 	Mpi2IOCInitReply_t mpi_reply;
 	int i, r = 0;
-	struct timeval current_time;
+	ktime_t current_time;
 	u16 ioc_status;
 	u32 reply_post_free_array_sz = 0;
 	Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL;
@@ -4449,9 +4452,8 @@
 	/* This time stamp specifies number of milliseconds
 	 * since epoch ~ midnight January 1, 1970.
 	 */
-	do_gettimeofday(&current_time);
-	mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 +
-	    (current_time.tv_usec / 1000));
+	current_time = ktime_get_real();
+	mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
 
 	if (ioc->logging_level & MPT_DEBUG_INIT) {
 		__le32 *mfp;
@@ -5030,7 +5032,7 @@
 static int
 _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
 {
-	int r, i;
+	int r, i, index;
 	unsigned long	flags;
 	u32 reply_address;
 	u16 smid;
@@ -5039,8 +5041,7 @@
 	struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
 	u8 hide_flag;
 	struct adapter_reply_queue *reply_q;
-	long reply_post_free;
-	u32 reply_post_free_sz, index = 0;
+	Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
 
 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
 	    __func__));
@@ -5124,27 +5125,27 @@
 		_base_assign_reply_queues(ioc);
 
 	/* initialize Reply Post Free Queue */
-	reply_post_free_sz = ioc->reply_post_queue_depth *
-	    sizeof(Mpi2DefaultReplyDescriptor_t);
-	reply_post_free = (long)ioc->reply_post[index].reply_post_free;
+	index = 0;
+	reply_post_free_contig = ioc->reply_post[0].reply_post_free;
 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+		/*
+		 * If RDPQ is enabled, switch to the next allocation.
+		 * Otherwise advance within the contiguous region.
+		 */
+		if (ioc->rdpq_array_enable) {
+			reply_q->reply_post_free =
+				ioc->reply_post[index++].reply_post_free;
+		} else {
+			reply_q->reply_post_free = reply_post_free_contig;
+			reply_post_free_contig += ioc->reply_post_queue_depth;
+		}
+
 		reply_q->reply_post_host_index = 0;
-		reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
-		    reply_post_free;
 		for (i = 0; i < ioc->reply_post_queue_depth; i++)
 			reply_q->reply_post_free[i].Words =
 			    cpu_to_le64(ULLONG_MAX);
 		if (!_base_is_controller_msix_enabled(ioc))
 			goto skip_init_reply_post_free_queue;
-		/*
-		 * If RDPQ is enabled, switch to the next allocation.
-		 * Otherwise advance within the contiguous region.
-		 */
-		if (ioc->rdpq_array_enable)
-			reply_post_free = (long)
-			    ioc->reply_post[++index].reply_post_free;
-		else
-			reply_post_free += reply_post_free_sz;
 	}
  skip_init_reply_post_free_queue:
 
@@ -5425,6 +5426,8 @@
 	_base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
 	_base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
 	_base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
+	if (ioc->hba_mpi_version_belonged == MPI26_VERSION)
+		_base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
 
 	r = _base_make_ioc_operational(ioc, CAN_SLEEP);
 	if (r)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 32580b5..892c9be 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -73,8 +73,8 @@
 #define MPT3SAS_DRIVER_NAME		"mpt3sas"
 #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
 #define MPT3SAS_DESCRIPTION	"LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION		"12.100.00.00"
-#define MPT3SAS_MAJOR_VERSION		12
+#define MPT3SAS_DRIVER_VERSION		"13.100.00.00"
+#define MPT3SAS_MAJOR_VERSION		13
 #define MPT3SAS_MINOR_VERSION		100
 #define MPT3SAS_BUILD_VERSION		0
 #define MPT3SAS_RELEASE_VERSION	00
@@ -90,7 +90,7 @@
 /*
  * Set MPT3SAS_SG_DEPTH value based on user input.
  */
-#define MPT_MAX_PHYS_SEGMENTS	SCSI_MAX_SG_SEGMENTS
+#define MPT_MAX_PHYS_SEGMENTS	SG_CHUNK_SIZE
 #define MPT_MIN_PHYS_SEGMENTS	16
 
 #ifdef CONFIG_SCSI_MPT3SAS_MAX_SGE
@@ -112,6 +112,8 @@
 #define MPT3SAS_SAS_QUEUE_DEPTH		254
 #define MPT3SAS_RAID_QUEUE_DEPTH	128
 
+#define MPT3SAS_RAID_MAX_SECTORS	8192
+
 #define MPT_NAME_LENGTH			32	/* generic length of strings */
 #define MPT_STRING_LENGTH		64
 
@@ -1234,7 +1236,8 @@
 void *mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid);
 __le32 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc,
 	u16 smid);
-void mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc);
+
+void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc);
 
 /* hi-priority queue */
 u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index e0e4920..6a4df5a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -174,13 +174,13 @@
  * struct fw_event_work - firmware event struct
  * @list: link list framework
  * @work: work object (ioc->fault_reset_work_q)
- * @cancel_pending_work: flag set during reset handling
  * @ioc: per adapter object
  * @device_handle: device handle
  * @VF_ID: virtual function id
  * @VP_ID: virtual port id
  * @ignore: flag meaning this event has been marked to ignore
- * @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h
+ * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
+ * @refcount: kref for this event
  * @event_data: reply event data payload follows
  *
  * This object stored on ioc->fw_event_list.
@@ -188,8 +188,6 @@
 struct fw_event_work {
 	struct list_head	list;
 	struct work_struct	work;
-	u8			cancel_pending_work;
-	struct delayed_work	delayed_work;
 
 	struct MPT3SAS_ADAPTER *ioc;
 	u16			device_handle;
@@ -1911,6 +1909,14 @@
 			    (unsigned long long)raid_device->wwid,
 			    raid_device->num_pds, ds);
 
+		if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
+			blk_queue_max_hw_sectors(sdev->request_queue,
+						MPT3SAS_RAID_MAX_SECTORS);
+			sdev_printk(KERN_INFO, sdev,
+					"Set queue's max_sector to: %u\n",
+						MPT3SAS_RAID_MAX_SECTORS);
+		}
+
 		scsih_change_queue_depth(sdev, qdepth);
 
 		/* raid transport support */
@@ -2118,7 +2124,6 @@
 		return 1;
 	if (ioc->tm_cmds.smid != smid)
 		return 1;
-	mpt3sas_base_flush_reply_queues(ioc);
 	ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
 	mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
 	if (mpi_reply) {
@@ -2303,6 +2308,9 @@
 		}
 	}
 
+	/* sync IRQs in case those were busy during flush. */
+	mpt3sas_base_sync_reply_irqs(ioc);
+
 	if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
 		mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
 		mpi_reply = ioc->tm_cmds.reply;
@@ -2804,12 +2812,12 @@
 		/*
 		 * Wait on the fw_event to complete. If this returns 1, then
 		 * the event was never executed, and we need a put for the
-		 * reference the delayed_work had on the fw_event.
+		 * reference the work had on the fw_event.
 		 *
 		 * If it did execute, we wait for it to finish, and the put will
 		 * happen from _firmware_event_work()
 		 */
-		if (cancel_delayed_work_sync(&fw_event->delayed_work))
+		if (cancel_work_sync(&fw_event->work))
 			fw_event_work_put(fw_event);
 
 		fw_event_work_put(fw_event);
@@ -3961,7 +3969,7 @@
 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
 		mpi_request->CDB.EEDP32.PrimaryReferenceTag =
-		    cpu_to_be32(scsi_get_lba(scmd));
+		    cpu_to_be32(scsi_prot_ref_tag(scmd));
 		break;
 
 	case SCSI_PROT_DIF_TYPE3:
@@ -7850,6 +7858,7 @@
 	Mpi2EventNotificationReply_t *mpi_reply;
 	u16 event;
 	u16 sz;
+	Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
 
 	/* events turned off due to host reset or driver unloading */
 	if (ioc->remove_host || ioc->pci_error_recovery)
@@ -7962,6 +7971,18 @@
 			(Mpi2EventDataTemperature_t *)
 			mpi_reply->EventData);
 		break;
+	case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
+		ActiveCableEventData =
+		    (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
+		if (ActiveCableEventData->ReasonCode ==
+				MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER)
+			pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d",
+			    ioc->name, ActiveCableEventData->ReceptacleID);
+			pr_info("cannot be powered and devices connected to this active cable");
+			pr_info("will not be seen. This active cable");
+			pr_info("requires %d mW of power",
+			    ActiveCableEventData->ActiveCablePowerRequirement);
+		break;
 
 	default: /* ignore the rest */
 		return 1;
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index c7c2505..8280046 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -704,24 +704,7 @@
 		.class_mask	= 0,
 		.driver_data	= chip_9445,
 	},
-	{
-		.vendor		= PCI_VENDOR_ID_MARVELL_EXT,
-		.device		= 0x9485,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= 0x9480,
-		.class		= 0,
-		.class_mask	= 0,
-		.driver_data	= chip_9485,
-	},
-	{
-		.vendor		= PCI_VENDOR_ID_MARVELL_EXT,
-		.device		= 0x9485,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= 0x9485,
-		.class		= 0,
-		.class_mask	= 0,
-		.driver_data	= chip_9485,
-	},
+	{ PCI_VDEVICE(MARVELL_EXT, 0x9485), chip_9485 }, /* Marvell 9480/9485 (any vendor/model) */
 	{ PCI_VDEVICE(OCZ, 0x1021), chip_9485}, /* OCZ RevoDrive3 */
 	{ PCI_VDEVICE(OCZ, 0x1022), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
 	{ PCI_VDEVICE(OCZ, 0x1040), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c
index 512037e..2f689ae 100644
--- a/drivers/scsi/pas16.c
+++ b/drivers/scsi/pas16.c
@@ -1,5 +1,3 @@
-#define PSEUDO_DMA
-
 /*
  * This driver adapted from Drew Eckhardt's Trantor T128 driver
  *
@@ -77,7 +75,6 @@
 
 #include <scsi/scsi_host.h>
 #include "pas16.h"
-#define AUTOPROBE_IRQ
 #include "NCR5380.h"
 
 
@@ -377,7 +374,7 @@
 		
 	instance->io_port = io_port;
 
-	if (NCR5380_init(instance, 0))
+	if (NCR5380_init(instance, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP))
 		goto out_unregister;
 
 	NCR5380_maybe_reset_bus(instance);
@@ -460,7 +457,7 @@
 }
 
 /*
- * Function : int NCR5380_pread (struct Scsi_Host *instance, 
+ * Function : int pas16_pread (struct Scsi_Host *instance,
  *	unsigned char *dst, int len)
  *
  * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to 
@@ -472,14 +469,14 @@
  * 	timeout.
  */
 
-static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst,
-    int len) {
+static inline int pas16_pread(struct Scsi_Host *instance,
+                              unsigned char *dst, int len)
+{
     register unsigned char  *d = dst;
     register unsigned short reg = (unsigned short) (instance->io_port + 
 	P_DATA_REG_OFFSET);
     register int i = len;
     int ii = 0;
-    struct NCR5380_hostdata *hostdata = shost_priv(instance);
 
     while ( !(inb(instance->io_port + P_STATUS_REG_OFFSET) & P_ST_RDY) )
 	 ++ii;
@@ -492,13 +489,11 @@
 	    instance->host_no);
 	return -1;
     }
-    if (ii > hostdata->spin_max_r)
-        hostdata->spin_max_r = ii;
     return 0;
 }
 
 /*
- * Function : int NCR5380_pwrite (struct Scsi_Host *instance, 
+ * Function : int pas16_pwrite (struct Scsi_Host *instance,
  *	unsigned char *src, int len)
  *
  * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from
@@ -510,13 +505,13 @@
  * 	timeout.
  */
 
-static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src,
-    int len) {
+static inline int pas16_pwrite(struct Scsi_Host *instance,
+                               unsigned char *src, int len)
+{
     register unsigned char *s = src;
     register unsigned short reg = (instance->io_port + P_DATA_REG_OFFSET);
     register int i = len;
     int ii = 0;
-    struct NCR5380_hostdata *hostdata = shost_priv(instance);
 
     while ( !((inb(instance->io_port + P_STATUS_REG_OFFSET)) & P_ST_RDY) )
 	 ++ii;
@@ -529,8 +524,6 @@
 	    instance->host_no);
 	return -1;
     }
-    if (ii > hostdata->spin_max_w)
-        hostdata->spin_max_w = ii;
     return 0;
 }
 
@@ -550,8 +543,6 @@
 	.detect			= pas16_detect,
 	.release		= pas16_release,
 	.proc_name		= "pas16",
-	.show_info		= pas16_show_info,
-	.write_info		= pas16_write_info,
 	.info			= pas16_info,
 	.queuecommand		= pas16_queue_command,
 	.eh_abort_handler	= pas16_abort,
diff --git a/drivers/scsi/pas16.h b/drivers/scsi/pas16.h
index d375277..9fe7f33 100644
--- a/drivers/scsi/pas16.h
+++ b/drivers/scsi/pas16.h
@@ -103,14 +103,15 @@
 #define NCR5380_write(reg, value) ( outb((value),PAS16_io_port(reg)) )
 
 #define NCR5380_dma_xfer_len(instance, cmd, phase)	(cmd->transfersize)
+#define NCR5380_dma_recv_setup		pas16_pread
+#define NCR5380_dma_send_setup		pas16_pwrite
+#define NCR5380_dma_residual(instance)	(0)
 
 #define NCR5380_intr pas16_intr
 #define NCR5380_queue_command pas16_queue_command
 #define NCR5380_abort pas16_abort
 #define NCR5380_bus_reset pas16_bus_reset
 #define NCR5380_info pas16_info
-#define NCR5380_show_info pas16_show_info
-#define NCR5380_write_info pas16_write_info
 
 /* 15 14 12 10 7 5 3 
    1101 0100 1010 1000 */
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 062ab34..6bd7bf4 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -418,8 +418,6 @@
 		if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
 			pm8001_ha->io_mem[logicalBar].membase =
 				pci_resource_start(pdev, bar);
-			pm8001_ha->io_mem[logicalBar].membase &=
-				(u32)PCI_BASE_ADDRESS_MEM_MASK;
 			pm8001_ha->io_mem[logicalBar].memsize =
 				pci_resource_len(pdev, bar);
 			pm8001_ha->io_mem[logicalBar].memvirtaddr =
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 5d0ec42..634254a 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -4214,7 +4214,7 @@
 	.eh_bus_reset_handler	= qla1280_eh_bus_reset,
 	.eh_host_reset_handler	= qla1280_eh_adapter_reset,
 	.bios_param		= qla1280_biosparam,
-	.can_queue		= 0xfffff,
+	.can_queue		= MAX_OUTSTANDING_COMMANDS,
 	.this_id		= -1,
 	.sg_tablesize		= SG_ALL,
 	.use_clustering		= ENABLE_CLUSTERING,
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index b5029e5..15dff70 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -6,6 +6,7 @@
  */
 #include "qla_def.h"
 #include <linux/delay.h>
+#include <linux/ktime.h>
 #include <linux/pci.h>
 #include <linux/ratelimit.h>
 #include <linux/vmalloc.h>
@@ -1812,7 +1813,6 @@
 	struct host_system_info *phost_info;
 	struct register_host_info *preg_hsi;
 	struct new_utsname *p_sysid = NULL;
-	struct timeval tv;
 
 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
 	if (!sp)
@@ -1886,8 +1886,7 @@
 			    p_sysid->domainname, DOMNAME_LENGTH);
 			strncpy(phost_info->hostdriver,
 			    QLA2XXX_VERSION, VERSION_LENGTH);
-			do_gettimeofday(&tv);
-			preg_hsi->utc = (uint64_t)tv.tv_sec;
+			preg_hsi->utc = (uint64_t)ktime_get_real_seconds();
 			ql_dbg(ql_dbg_init, vha, 0x0149,
 			    "ISP%04X: Host registration with firmware\n",
 			    ha->pdev->device);
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index b6b4cfd..54380b4 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1229,7 +1229,7 @@
 	if (buf == NULL) {
 		ql_log(ql_log_fatal, vha, 0x010c,
 		    "Unable to allocate memory.\n");
-		return -1;
+		return -ENOMEM;
 	}
 
 	for (i = 0; i < n; i++) {
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 5e93923..9f6012b 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -3222,7 +3222,7 @@
 		ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
 		if (!ha->fcp_prio_cfg) {
 			ql_log(ql_log_warn, vha, 0x00d5,
-			    "Unable to allocate memory for fcp priorty data (%x).\n",
+			    "Unable to allocate memory for fcp priority data (%x).\n",
 			    FCP_PRIO_CFG_SIZE);
 			return QLA_FUNCTION_FAILED;
 		}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index b1bf42b..1deb6ad 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -784,8 +784,9 @@
 	int pg83_supported = 0;
 	unsigned char __rcu *vpd_buf, *orig_vpd_buf = NULL;
 
-	if (sdev->skip_vpd_pages)
+	if (!scsi_device_supports_vpd(sdev))
 		return;
+
 retry_pg0:
 	vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
 	if (!vpd_buf)
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index f3d69a98..0f9ba41 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -6,23 +6,15 @@
  *  anything out of the ordinary is seen.
  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  *
- *  This version is more generic, simulating a variable number of disk
- *  (or disk like devices) sharing a common amount of RAM. To be more
- *  realistic, the simulated devices have the transport attributes of
- *  SAS disks.
+ * Copyright (C) 2001 - 2016 Douglas Gilbert
  *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
  *
  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
  *
- *   D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
- *   dpg: work for devfs large number of disks [20010809]
- *        forked for lk 2.5 series [20011216, 20020101]
- *        use vmalloc() more inquiry+mode_sense [20020302]
- *        add timers for delayed responses [20020721]
- *   Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
- *   Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
- *   dpg: change style of boot options to "scsi_debug.num_tgts=2" and
- *        module options to "modprobe scsi_debug num_tgts=2" [20021221]
  */
 
 
@@ -32,7 +24,7 @@
 
 #include <linux/kernel.h>
 #include <linux/errno.h>
-#include <linux/timer.h>
+#include <linux/jiffies.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/string.h>
@@ -49,6 +41,7 @@
 #include <linux/interrupt.h>
 #include <linux/atomic.h>
 #include <linux/hrtimer.h>
+#include <linux/uuid.h>
 
 #include <net/checksum.h>
 
@@ -66,8 +59,9 @@
 #include "sd.h"
 #include "scsi_logging.h"
 
-#define SCSI_DEBUG_VERSION "1.85"
-static const char *scsi_debug_version_date = "20141022";
+/* make sure inq_product_rev string corresponds to this version */
+#define SDEBUG_VERSION "1.86"
+static const char *sdebug_version_date = "20160430";
 
 #define MY_NAME "scsi_debug"
 
@@ -102,7 +96,6 @@
 /* Additional Sense Code Qualifier (ASCQ) */
 #define ACK_NAK_TO 0x3
 
-
 /* Default values for driver parameters */
 #define DEF_NUM_HOST   1
 #define DEF_NUM_TGTS   1
@@ -111,7 +104,7 @@
  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
  */
 #define DEF_ATO 1
-#define DEF_DELAY   1		/* if > 0 unit is a jiffy */
+#define DEF_JDELAY   1		/* if > 0 unit is a jiffy */
 #define DEF_DEV_SIZE_MB   8
 #define DEF_DIF 0
 #define DEF_DIX 0
@@ -131,9 +124,9 @@
 #define DEF_OPTS   0
 #define DEF_OPT_BLKS 1024
 #define DEF_PHYSBLK_EXP 0
-#define DEF_PTYPE   0
+#define DEF_PTYPE   TYPE_DISK
 #define DEF_REMOVABLE false
-#define DEF_SCSI_LEVEL   6    /* INQUIRY, byte2 [6->SPC-4] */
+#define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
 #define DEF_SECTOR_SIZE 512
 #define DEF_UNMAP_ALIGNMENT 0
 #define DEF_UNMAP_GRANULARITY 1
@@ -143,43 +136,54 @@
 #define DEF_VPD_USE_HOSTNO 1
 #define DEF_WRITESAME_LENGTH 0xFFFF
 #define DEF_STRICT 0
-#define DELAY_OVERRIDDEN -9999
+#define DEF_STATISTICS false
+#define DEF_SUBMIT_QUEUES 1
+#define DEF_UUID_CTL 0
+#define JDELAY_OVERRIDDEN -9999
 
-/* bit mask values for scsi_debug_opts */
-#define SCSI_DEBUG_OPT_NOISE   1
-#define SCSI_DEBUG_OPT_MEDIUM_ERR   2
-#define SCSI_DEBUG_OPT_TIMEOUT   4
-#define SCSI_DEBUG_OPT_RECOVERED_ERR   8
-#define SCSI_DEBUG_OPT_TRANSPORT_ERR   16
-#define SCSI_DEBUG_OPT_DIF_ERR   32
-#define SCSI_DEBUG_OPT_DIX_ERR   64
-#define SCSI_DEBUG_OPT_MAC_TIMEOUT  128
-#define SCSI_DEBUG_OPT_SHORT_TRANSFER	0x100
-#define SCSI_DEBUG_OPT_Q_NOISE	0x200
-#define SCSI_DEBUG_OPT_ALL_TSF	0x400
-#define SCSI_DEBUG_OPT_RARE_TSF	0x800
-#define SCSI_DEBUG_OPT_N_WCE	0x1000
-#define SCSI_DEBUG_OPT_RESET_NOISE 0x2000
-#define SCSI_DEBUG_OPT_NO_CDB_NOISE 0x4000
-#define SCSI_DEBUG_OPT_ALL_NOISE (0x1 | 0x200 | 0x2000)
+#define SDEBUG_LUN_0_VAL 0
+
+/* bit mask values for sdebug_opts */
+#define SDEBUG_OPT_NOISE		1
+#define SDEBUG_OPT_MEDIUM_ERR		2
+#define SDEBUG_OPT_TIMEOUT		4
+#define SDEBUG_OPT_RECOVERED_ERR	8
+#define SDEBUG_OPT_TRANSPORT_ERR	16
+#define SDEBUG_OPT_DIF_ERR		32
+#define SDEBUG_OPT_DIX_ERR		64
+#define SDEBUG_OPT_MAC_TIMEOUT		128
+#define SDEBUG_OPT_SHORT_TRANSFER	0x100
+#define SDEBUG_OPT_Q_NOISE		0x200
+#define SDEBUG_OPT_ALL_TSF		0x400
+#define SDEBUG_OPT_RARE_TSF		0x800
+#define SDEBUG_OPT_N_WCE		0x1000
+#define SDEBUG_OPT_RESET_NOISE		0x2000
+#define SDEBUG_OPT_NO_CDB_NOISE		0x4000
+#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
+			      SDEBUG_OPT_RESET_NOISE)
+#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
+				  SDEBUG_OPT_TRANSPORT_ERR | \
+				  SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
+				  SDEBUG_OPT_SHORT_TRANSFER)
 /* When "every_nth" > 0 then modulo "every_nth" commands:
- *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
+ *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
  *   - a RECOVERED_ERROR is simulated on successful read and write
- *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
+ *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
  *   - a TRANSPORT_ERROR is simulated on successful read and write
- *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
+ *     commands if SDEBUG_OPT_TRANSPORT_ERR is set.
  *
  * When "every_nth" < 0 then after "- every_nth" commands:
- *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
+ *   - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
  *   - a RECOVERED_ERROR is simulated on successful read and write
- *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
+ *     commands if SDEBUG_OPT_RECOVERED_ERR is set.
  *   - a TRANSPORT_ERROR is simulated on successful read and write
- *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
- * This will continue until some other action occurs (e.g. the user
- * writing a new value (other than -1 or 1) to every_nth via sysfs).
+ *     commands if _DEBUG_OPT_TRANSPORT_ERR is set.
+ * This will continue on every subsequent command until some other action
+ * occurs (e.g. the user * writing a new value (other than -1 or 1) to
+ * every_nth via sysfs).
  */
 
-/* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs)are returned in
+/* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
  * priority order. In the subset implemented here lower numbers have higher
  * priority. The UA numbers should be a sequence starting from 0 with
  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
@@ -192,11 +196,7 @@
 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
 #define SDEBUG_NUM_UAS 7
 
-/* for check_readiness() */
-#define UAS_ONLY 1	/* check for UAs only */
-#define UAS_TUR 0	/* if no UAs then check if media access possible */
-
-/* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
+/* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
  * sector on read commands: */
 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
@@ -205,21 +205,108 @@
  * or "peripheral device" addressing (value 0) */
 #define SAM2_LUN_ADDRESS_METHOD 0
 
-/* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
- * (for response) at one time. Can be reduced by max_queue option. Command
- * responses are not queued when delay=0 and ndelay=0. The per-device
- * DEF_CMD_PER_LUN can be changed via sysfs:
- * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed
- * SCSI_DEBUG_CANQUEUE. */
-#define SCSI_DEBUG_CANQUEUE_WORDS  9	/* a WORD is bits in a long */
-#define SCSI_DEBUG_CANQUEUE  (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
+/* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
+ * (for response) per submit queue at one time. Can be reduced by max_queue
+ * option. Command responses are not queued when jdelay=0 and ndelay=0. The
+ * per-device DEF_CMD_PER_LUN can be changed via sysfs:
+ * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
+ * but cannot exceed SDEBUG_CANQUEUE .
+ */
+#define SDEBUG_CANQUEUE_WORDS  3	/* a WORD is bits in a long */
+#define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
 #define DEF_CMD_PER_LUN  255
 
-#if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE
-#warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE"
-#endif
+#define F_D_IN			1
+#define F_D_OUT			2
+#define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
+#define F_D_UNKN		8
+#define F_RL_WLUN_OK		0x10
+#define F_SKIP_UA		0x20
+#define F_DELAY_OVERR		0x40
+#define F_SA_LOW		0x80	/* cdb byte 1, bits 4 to 0 */
+#define F_SA_HIGH		0x100	/* as used by variable length cdbs */
+#define F_INV_OP		0x200
+#define F_FAKE_RW		0x400
+#define F_M_ACCESS		0x800	/* media access */
 
-/* SCSI opcodes (first byte of cdb) mapped onto these indexes */
+#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
+#define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
+#define FF_SA (F_SA_HIGH | F_SA_LOW)
+
+#define SDEBUG_MAX_PARTS 4
+
+#define SDEBUG_MAX_CMD_LEN 32
+
+
+struct sdebug_dev_info {
+	struct list_head dev_list;
+	unsigned int channel;
+	unsigned int target;
+	u64 lun;
+	uuid_be lu_name;
+	struct sdebug_host_info *sdbg_host;
+	unsigned long uas_bm[1];
+	atomic_t num_in_q;
+	atomic_t stopped;
+	bool used;
+};
+
+struct sdebug_host_info {
+	struct list_head host_list;
+	struct Scsi_Host *shost;
+	struct device dev;
+	struct list_head dev_info_list;
+};
+
+#define to_sdebug_host(d)	\
+	container_of(d, struct sdebug_host_info, dev)
+
+struct sdebug_defer {
+	struct hrtimer hrt;
+	struct execute_work ew;
+	int sqa_idx;	/* index of sdebug_queue array */
+	int qc_idx;	/* index of sdebug_queued_cmd array within sqa_idx */
+	int issuing_cpu;
+};
+
+struct sdebug_queued_cmd {
+	/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
+	 * instance indicates this slot is in use.
+	 */
+	struct sdebug_defer *sd_dp;
+	struct scsi_cmnd *a_cmnd;
+	unsigned int inj_recovered:1;
+	unsigned int inj_transport:1;
+	unsigned int inj_dif:1;
+	unsigned int inj_dix:1;
+	unsigned int inj_short:1;
+};
+
+struct sdebug_queue {
+	struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
+	unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
+	spinlock_t qc_lock;
+	atomic_t blocked;	/* to temporarily stop more being queued */
+};
+
+static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
+static atomic_t sdebug_completions;  /* count of deferred completions */
+static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
+static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
+
+struct opcode_info_t {
+	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff */
+				/* for terminating element */
+	u8 opcode;		/* if num_attached > 0, preferred */
+	u16 sa;			/* service action */
+	u32 flags;		/* OR-ed set of SDEB_F_* */
+	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
+	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
+	u8 len_mask[16];	/* len=len_mask[0], then mask for cdb[1]... */
+				/* ignore cdb bytes after position 15 */
+};
+
+/* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
 enum sdeb_opcode_index {
 	SDEB_I_INVALID_OPCODE =	0,
 	SDEB_I_INQUIRY = 1,
@@ -254,6 +341,7 @@
 	SDEB_I_LAST_ELEMENT = 30,	/* keep this last */
 };
 
+
 static const unsigned char opcode_ind_arr[256] = {
 /* 0x0; 0x0->0x1f: 6 byte cdbs */
 	SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
@@ -274,7 +362,7 @@
 	0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
 	    SDEB_I_RELEASE,
 	0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
-/* 0x60; 0x60->0x7d are reserved */
+/* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 	0, SDEB_I_VARIABLE_LEN,
@@ -297,24 +385,6 @@
 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 };
 
-#define F_D_IN			1
-#define F_D_OUT			2
-#define F_D_OUT_MAYBE		4	/* WRITE SAME, NDOB bit */
-#define F_D_UNKN		8
-#define F_RL_WLUN_OK		0x10
-#define F_SKIP_UA		0x20
-#define F_DELAY_OVERR		0x40
-#define F_SA_LOW		0x80	/* cdb byte 1, bits 4 to 0 */
-#define F_SA_HIGH		0x100	/* as used by variable length cdbs */
-#define F_INV_OP		0x200
-#define F_FAKE_RW		0x400
-#define F_M_ACCESS		0x800	/* media access */
-
-#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
-#define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
-#define FF_SA (F_SA_HIGH | F_SA_LOW)
-
-struct sdebug_dev_info;
 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
@@ -337,18 +407,6 @@
 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
 
-struct opcode_info_t {
-	u8 num_attached;	/* 0 if this is it (i.e. a leaf); use 0xff
-				 * for terminating element */
-	u8 opcode;		/* if num_attached > 0, preferred */
-	u16 sa;			/* service action */
-	u32 flags;		/* OR-ed set of SDEB_F_* */
-	int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
-	const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
-	u8 len_mask[16];	/* len=len_mask[0], then mask for cdb[1]... */
-				/* ignore cdb bytes after position 15 */
-};
-
 static const struct opcode_info_t msense_iarr[1] = {
 	{0, 0x1a, 0, F_D_IN, NULL, NULL,
 	    {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
@@ -509,61 +567,52 @@
 	    {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 };
 
-struct sdebug_scmd_extra_t {
-	bool inj_recovered;
-	bool inj_transport;
-	bool inj_dif;
-	bool inj_dix;
-	bool inj_short;
-};
-
-static int scsi_debug_add_host = DEF_NUM_HOST;
-static int scsi_debug_ato = DEF_ATO;
-static int scsi_debug_delay = DEF_DELAY;
-static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
-static int scsi_debug_dif = DEF_DIF;
-static int scsi_debug_dix = DEF_DIX;
-static int scsi_debug_dsense = DEF_D_SENSE;
-static int scsi_debug_every_nth = DEF_EVERY_NTH;
-static int scsi_debug_fake_rw = DEF_FAKE_RW;
-static unsigned int scsi_debug_guard = DEF_GUARD;
-static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
-static int scsi_debug_max_luns = DEF_MAX_LUNS;
-static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
+static int sdebug_add_host = DEF_NUM_HOST;
+static int sdebug_ato = DEF_ATO;
+static int sdebug_jdelay = DEF_JDELAY;	/* if > 0 then unit is jiffies */
+static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
+static int sdebug_dif = DEF_DIF;
+static int sdebug_dix = DEF_DIX;
+static int sdebug_dsense = DEF_D_SENSE;
+static int sdebug_every_nth = DEF_EVERY_NTH;
+static int sdebug_fake_rw = DEF_FAKE_RW;
+static unsigned int sdebug_guard = DEF_GUARD;
+static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
+static int sdebug_max_luns = DEF_MAX_LUNS;
+static int sdebug_max_queue = SDEBUG_CANQUEUE;	/* per submit queue */
 static atomic_t retired_max_queue;	/* if > 0 then was prior max_queue */
-static int scsi_debug_ndelay = DEF_NDELAY;
-static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
-static int scsi_debug_no_uld = 0;
-static int scsi_debug_num_parts = DEF_NUM_PARTS;
-static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
-static int scsi_debug_opt_blks = DEF_OPT_BLKS;
-static int scsi_debug_opts = DEF_OPTS;
-static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
-static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
-static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
-static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
-static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
-static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
-static unsigned int scsi_debug_lbpu = DEF_LBPU;
-static unsigned int scsi_debug_lbpws = DEF_LBPWS;
-static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
-static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
-static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
-static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
-static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
-static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
-static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
-static bool scsi_debug_removable = DEF_REMOVABLE;
-static bool scsi_debug_clustering;
-static bool scsi_debug_host_lock = DEF_HOST_LOCK;
-static bool scsi_debug_strict = DEF_STRICT;
+static int sdebug_ndelay = DEF_NDELAY;	/* if > 0 then unit is nanoseconds */
+static int sdebug_no_lun_0 = DEF_NO_LUN_0;
+static int sdebug_no_uld;
+static int sdebug_num_parts = DEF_NUM_PARTS;
+static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
+static int sdebug_opt_blks = DEF_OPT_BLKS;
+static int sdebug_opts = DEF_OPTS;
+static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
+static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
+static int sdebug_scsi_level = DEF_SCSI_LEVEL;
+static int sdebug_sector_size = DEF_SECTOR_SIZE;
+static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
+static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
+static unsigned int sdebug_lbpu = DEF_LBPU;
+static unsigned int sdebug_lbpws = DEF_LBPWS;
+static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
+static unsigned int sdebug_lbprz = DEF_LBPRZ;
+static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
+static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
+static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
+static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
+static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
+static int sdebug_uuid_ctl = DEF_UUID_CTL;
+static bool sdebug_removable = DEF_REMOVABLE;
+static bool sdebug_clustering;
+static bool sdebug_host_lock = DEF_HOST_LOCK;
+static bool sdebug_strict = DEF_STRICT;
 static bool sdebug_any_injecting_opt;
-
-static atomic_t sdebug_cmnd_count;
-static atomic_t sdebug_completions;
-static atomic_t sdebug_a_tsf;		/* counter of 'almost' TSFs */
-
-#define DEV_READONLY(TGT)      (0)
+static bool sdebug_verbose;
+static bool have_dif_prot;
+static bool sdebug_statistics = DEF_STATISTICS;
+static bool sdebug_mq_active;
 
 static unsigned int sdebug_store_sectors;
 static sector_t sdebug_capacity;	/* in sectors */
@@ -574,59 +623,10 @@
 static int sdebug_cylinders_per;	/* cylinders per surface */
 static int sdebug_sectors_per;		/* sectors per cylinder */
 
-#define SDEBUG_MAX_PARTS 4
-
-#define SCSI_DEBUG_MAX_CMD_LEN 32
-
-static unsigned int scsi_debug_lbp(void)
-{
-	return ((0 == scsi_debug_fake_rw) &&
-		(scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10));
-}
-
-struct sdebug_dev_info {
-	struct list_head dev_list;
-	unsigned int channel;
-	unsigned int target;
-	u64 lun;
-	struct sdebug_host_info *sdbg_host;
-	unsigned long uas_bm[1];
-	atomic_t num_in_q;
-	char stopped;		/* TODO: should be atomic */
-	bool used;
-};
-
-struct sdebug_host_info {
-	struct list_head host_list;
-	struct Scsi_Host *shost;
-	struct device dev;
-	struct list_head dev_info_list;
-};
-
-#define to_sdebug_host(d)	\
-	container_of(d, struct sdebug_host_info, dev)
-
 static LIST_HEAD(sdebug_host_list);
 static DEFINE_SPINLOCK(sdebug_host_list_lock);
 
-
-struct sdebug_hrtimer {		/* ... is derived from hrtimer */
-	struct hrtimer hrt;	/* must be first element */
-	int qa_indx;
-};
-
-struct sdebug_queued_cmd {
-	/* in_use flagged by a bit in queued_in_use_bm[] */
-	struct timer_list *cmnd_timerp;
-	struct tasklet_struct *tletp;
-	struct sdebug_hrtimer *sd_hrtp;
-	struct scsi_cmnd * a_cmnd;
-};
-static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
-static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS];
-
-
-static unsigned char * fake_storep;	/* ramdisk storage */
+static unsigned char *fake_storep;	/* ramdisk storage */
 static struct sd_dif_tuple *dif_storep;	/* protection info */
 static void *map_storep;		/* provisioning map */
 
@@ -640,7 +640,9 @@
 static int dix_reads;
 static int dif_errors;
 
-static DEFINE_SPINLOCK(queued_arr_lock);
+static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
+static struct sdebug_queue *sdebug_q_arr;  /* ptr to array of submit queues */
+
 static DEFINE_RWLOCK(atomic_rw);
 
 static char sdebug_proc_name[] = MY_NAME;
@@ -662,19 +664,22 @@
 static const int device_qfull_result =
 	(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
 
-static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
-				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
-				     0, 0, 0, 0};
-static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
-				    0, 0, 0x2, 0x4b};
-static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
-			           0, 0, 0x0, 0x0};
+
+/* Only do the extra work involved in logical block provisioning if one or
+ * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
+ * real reads and writes (i.e. not skipping them for speed).
+ */
+static inline bool scsi_debug_lbp(void)
+{
+	return 0 == sdebug_fake_rw &&
+		(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
+}
 
 static void *fake_store(unsigned long long lba)
 {
 	lba = do_div(lba, sdebug_store_sectors);
 
-	return fake_storep + lba * scsi_debug_sector_size;
+	return fake_storep + lba * sdebug_sector_size;
 }
 
 static struct sd_dif_tuple *dif_store(sector_t sector)
@@ -684,9 +689,6 @@
 	return dif_storep + sector;
 }
 
-static int sdebug_add_adapter(void);
-static void sdebug_remove_adapter(void);
-
 static void sdebug_max_tgts_luns(void)
 {
 	struct sdebug_host_info *sdbg_host;
@@ -696,11 +698,11 @@
 	list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
 		hpnt = sdbg_host->shost;
 		if ((hpnt->this_id >= 0) &&
-		    (scsi_debug_num_tgts > hpnt->this_id))
-			hpnt->max_id = scsi_debug_num_tgts + 1;
+		    (sdebug_num_tgts > hpnt->this_id))
+			hpnt->max_id = sdebug_num_tgts + 1;
 		else
-			hpnt->max_id = scsi_debug_num_tgts;
-		/* scsi_debug_max_luns; */
+			hpnt->max_id = sdebug_num_tgts;
+		/* sdebug_max_luns; */
 		hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
 	}
 	spin_unlock(&sdebug_host_list_lock);
@@ -709,9 +711,9 @@
 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
 
 /* Set in_bit to -1 to indicate no bit position of invalid field */
-static void
-mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d,
-		     int in_byte, int in_bit)
+static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
+				 enum sdeb_cmd_data c_d,
+				 int in_byte, int in_bit)
 {
 	unsigned char *sbuff;
 	u8 sks[4];
@@ -725,8 +727,7 @@
 	}
 	asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
-	scsi_build_sense_buffer(scsi_debug_dsense, sbuff, ILLEGAL_REQUEST,
-				asc, 0);
+	scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
 	memset(sks, 0, sizeof(sks));
 	sks[0] = 0x80;
 	if (c_d)
@@ -736,7 +737,7 @@
 		sks[0] |= 0x7 & in_bit;
 	}
 	put_unaligned_be16(in_byte, sks + 1);
-	if (scsi_debug_dsense) {
+	if (sdebug_dsense) {
 		sl = sbuff[7] + 8;
 		sbuff[7] = sl;
 		sbuff[sl] = 0x2;
@@ -744,7 +745,7 @@
 		memcpy(sbuff + sl + 4, sks, 3);
 	} else
 		memcpy(sbuff + 15, sks, 3);
-	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
+	if (sdebug_verbose)
 		sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
 			    "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
 			    my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
@@ -762,23 +763,22 @@
 	}
 	memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
 
-	scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
+	scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
 
-	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
+	if (sdebug_verbose)
 		sdev_printk(KERN_INFO, scp->device,
 			    "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
 			    my_name, key, asc, asq);
 }
 
-static void
-mk_sense_invalid_opcode(struct scsi_cmnd *scp)
+static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
 {
 	mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
 }
 
 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
 {
-	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
+	if (sdebug_verbose) {
 		if (0x1261 == cmd)
 			sdev_printk(KERN_INFO, dev,
 				    "%s: BLKFLSBUF [0x1261]\n", __func__);
@@ -810,11 +810,9 @@
 	spin_unlock(&sdebug_host_list_lock);
 }
 
-static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
-			   struct sdebug_dev_info * devip)
+static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
 {
 	int k;
-	bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
 
 	k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
 	if (k != SDEBUG_NUM_UAS) {
@@ -822,40 +820,41 @@
 
 		switch (k) {
 		case SDEBUG_UA_POR:
-			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
-					UA_RESET_ASC, POWER_ON_RESET_ASCQ);
-			if (debug)
+			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
+					POWER_ON_RESET_ASCQ);
+			if (sdebug_verbose)
 				cp = "power on reset";
 			break;
 		case SDEBUG_UA_BUS_RESET:
-			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
-					UA_RESET_ASC, BUS_RESET_ASCQ);
-			if (debug)
+			mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
+					BUS_RESET_ASCQ);
+			if (sdebug_verbose)
 				cp = "bus reset";
 			break;
 		case SDEBUG_UA_MODE_CHANGED:
-			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
-					UA_CHANGED_ASC, MODE_CHANGED_ASCQ);
-			if (debug)
+			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
+					MODE_CHANGED_ASCQ);
+			if (sdebug_verbose)
 				cp = "mode parameters changed";
 			break;
 		case SDEBUG_UA_CAPACITY_CHANGED:
-			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
-					UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ);
-			if (debug)
+			mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
+					CAPACITY_CHANGED_ASCQ);
+			if (sdebug_verbose)
 				cp = "capacity data changed";
 			break;
 		case SDEBUG_UA_MICROCODE_CHANGED:
-			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
-				 TARGET_CHANGED_ASC, MICROCODE_CHANGED_ASCQ);
-			if (debug)
+			mk_sense_buffer(scp, UNIT_ATTENTION,
+					TARGET_CHANGED_ASC,
+					MICROCODE_CHANGED_ASCQ);
+			if (sdebug_verbose)
 				cp = "microcode has been changed";
 			break;
 		case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
-			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
+			mk_sense_buffer(scp, UNIT_ATTENTION,
 					TARGET_CHANGED_ASC,
 					MICROCODE_CHANGED_WO_RESET_ASCQ);
-			if (debug)
+			if (sdebug_verbose)
 				cp = "microcode has been changed without reset";
 			break;
 		case SDEBUG_UA_LUNS_CHANGED:
@@ -864,40 +863,30 @@
 			 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
 			 * on the target, until a REPORT LUNS command is
 			 * received.  SPC-4 behavior is to report it only once.
-			 * NOTE:  scsi_debug_scsi_level does not use the same
+			 * NOTE:  sdebug_scsi_level does not use the same
 			 * values as struct scsi_device->scsi_level.
 			 */
-			if (scsi_debug_scsi_level >= 6)	/* SPC-4 and above */
+			if (sdebug_scsi_level >= 6)	/* SPC-4 and above */
 				clear_luns_changed_on_target(devip);
-			mk_sense_buffer(SCpnt, UNIT_ATTENTION,
+			mk_sense_buffer(scp, UNIT_ATTENTION,
 					TARGET_CHANGED_ASC,
 					LUNS_CHANGED_ASCQ);
-			if (debug)
+			if (sdebug_verbose)
 				cp = "reported luns data has changed";
 			break;
 		default:
-			pr_warn("%s: unexpected unit attention code=%d\n",
-				__func__, k);
-			if (debug)
+			pr_warn("unexpected unit attention code=%d\n", k);
+			if (sdebug_verbose)
 				cp = "unknown";
 			break;
 		}
 		clear_bit(k, devip->uas_bm);
-		if (debug)
-			sdev_printk(KERN_INFO, SCpnt->device,
+		if (sdebug_verbose)
+			sdev_printk(KERN_INFO, scp->device,
 				   "%s reports: Unit attention: %s\n",
 				   my_name, cp);
 		return check_condition_result;
 	}
-	if ((UAS_TUR == uas_only) && devip->stopped) {
-		mk_sense_buffer(SCpnt, NOT_READY, LOGICAL_UNIT_NOT_READY,
-				0x2);
-		if (debug)
-			sdev_printk(KERN_INFO, SCpnt->device,
-				    "%s reports: Not ready: %s\n", my_name,
-				    "initializing command required");
-		return check_condition_result;
-	}
 	return 0;
 }
 
@@ -911,7 +900,7 @@
 	if (!sdb->length)
 		return 0;
 	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
-		return (DID_ERROR << 16);
+		return DID_ERROR << 16;
 
 	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
 				      arr, arr_len);
@@ -935,13 +924,17 @@
 
 static const char * inq_vendor_id = "Linux   ";
 static const char * inq_product_id = "scsi_debug      ";
-static const char *inq_product_rev = "0184";	/* version less '.' */
+static const char *inq_product_rev = "0186";	/* version less '.' */
+/* Use some locally assigned NAAs for SAS addresses. */
+static const u64 naa3_comp_a = 0x3222222000000000ULL;
+static const u64 naa3_comp_b = 0x3333333000000000ULL;
+static const u64 naa3_comp_c = 0x3111111000000000ULL;
 
 /* Device identification VPD page. Returns number of bytes placed in arr */
-static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
-			   int target_dev_id, int dev_id_num,
-			   const char * dev_id_str,
-			   int dev_id_str_len)
+static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
+			  int target_dev_id, int dev_id_num,
+			  const char *dev_id_str, int dev_id_str_len,
+			  const uuid_be *lu_name)
 {
 	int num, port_a;
 	char b[32];
@@ -958,19 +951,25 @@
 	arr[3] = num;
 	num += 4;
 	if (dev_id_num >= 0) {
-		/* NAA-5, Logical unit identifier (binary) */
-		arr[num++] = 0x1;	/* binary (not necessarily sas) */
-		arr[num++] = 0x3;	/* PIV=0, lu, naa */
-		arr[num++] = 0x0;
-		arr[num++] = 0x8;
-		arr[num++] = 0x53;  /* naa-5 ieee company id=0x333333 (fake) */
-		arr[num++] = 0x33;
-		arr[num++] = 0x33;
-		arr[num++] = 0x30;
-		arr[num++] = (dev_id_num >> 24);
-		arr[num++] = (dev_id_num >> 16) & 0xff;
-		arr[num++] = (dev_id_num >> 8) & 0xff;
-		arr[num++] = dev_id_num & 0xff;
+		if (sdebug_uuid_ctl) {
+			/* Locally assigned UUID */
+			arr[num++] = 0x1;  /* binary (not necessarily sas) */
+			arr[num++] = 0xa;  /* PIV=0, lu, naa */
+			arr[num++] = 0x0;
+			arr[num++] = 0x12;
+			arr[num++] = 0x10; /* uuid type=1, locally assigned */
+			arr[num++] = 0x0;
+			memcpy(arr + num, lu_name, 16);
+			num += 16;
+		} else {
+			/* NAA-3, Logical unit identifier (binary) */
+			arr[num++] = 0x1;  /* binary (not necessarily sas) */
+			arr[num++] = 0x3;  /* PIV=0, lu, naa */
+			arr[num++] = 0x0;
+			arr[num++] = 0x8;
+			put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
+			num += 8;
+		}
 		/* Target relative port number */
 		arr[num++] = 0x61;	/* proto=sas, binary */
 		arr[num++] = 0x94;	/* PIV=1, target port, rel port */
@@ -981,47 +980,35 @@
 		arr[num++] = 0x0;
 		arr[num++] = 0x1;	/* relative port A */
 	}
-	/* NAA-5, Target port identifier */
+	/* NAA-3, Target port identifier */
 	arr[num++] = 0x61;	/* proto=sas, binary */
 	arr[num++] = 0x93;	/* piv=1, target port, naa */
 	arr[num++] = 0x0;
 	arr[num++] = 0x8;
-	arr[num++] = 0x52;	/* naa-5, company id=0x222222 (fake) */
-	arr[num++] = 0x22;
-	arr[num++] = 0x22;
-	arr[num++] = 0x20;
-	arr[num++] = (port_a >> 24);
-	arr[num++] = (port_a >> 16) & 0xff;
-	arr[num++] = (port_a >> 8) & 0xff;
-	arr[num++] = port_a & 0xff;
-	/* NAA-5, Target port group identifier */
+	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
+	num += 8;
+	/* NAA-3, Target port group identifier */
 	arr[num++] = 0x61;	/* proto=sas, binary */
 	arr[num++] = 0x95;	/* piv=1, target port group id */
 	arr[num++] = 0x0;
 	arr[num++] = 0x4;
 	arr[num++] = 0;
 	arr[num++] = 0;
-	arr[num++] = (port_group_id >> 8) & 0xff;
-	arr[num++] = port_group_id & 0xff;
-	/* NAA-5, Target device identifier */
+	put_unaligned_be16(port_group_id, arr + num);
+	num += 2;
+	/* NAA-3, Target device identifier */
 	arr[num++] = 0x61;	/* proto=sas, binary */
 	arr[num++] = 0xa3;	/* piv=1, target device, naa */
 	arr[num++] = 0x0;
 	arr[num++] = 0x8;
-	arr[num++] = 0x52;	/* naa-5, company id=0x222222 (fake) */
-	arr[num++] = 0x22;
-	arr[num++] = 0x22;
-	arr[num++] = 0x20;
-	arr[num++] = (target_dev_id >> 24);
-	arr[num++] = (target_dev_id >> 16) & 0xff;
-	arr[num++] = (target_dev_id >> 8) & 0xff;
-	arr[num++] = target_dev_id & 0xff;
+	put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
+	num += 8;
 	/* SCSI name string: Target device identifier */
 	arr[num++] = 0x63;	/* proto=sas, UTF-8 */
 	arr[num++] = 0xa8;	/* piv=1, target device, SCSI name string */
 	arr[num++] = 0x0;
 	arr[num++] = 24;
-	memcpy(arr + num, "naa.52222220", 12);
+	memcpy(arr + num, "naa.32222220", 12);
 	num += 12;
 	snprintf(b, sizeof(b), "%08X", target_dev_id);
 	memcpy(arr + num, b, 8);
@@ -1031,7 +1018,6 @@
 	return num;
 }
 
-
 static unsigned char vpd84_data[] = {
 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
     0x22,0x22,0x22,0x0,0xbb,0x1,
@@ -1039,14 +1025,14 @@
 };
 
 /*  Software interface identification VPD page */
-static int inquiry_evpd_84(unsigned char * arr)
+static int inquiry_vpd_84(unsigned char *arr)
 {
 	memcpy(arr, vpd84_data, sizeof(vpd84_data));
 	return sizeof(vpd84_data);
 }
 
 /* Management network addresses VPD page */
-static int inquiry_evpd_85(unsigned char * arr)
+static int inquiry_vpd_85(unsigned char *arr)
 {
 	int num = 0;
 	const char * na1 = "https://www.kernel.org/config";
@@ -1081,7 +1067,7 @@
 }
 
 /* SCSI ports VPD page */
-static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
+static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
 {
 	int num = 0;
 	int port_a, port_b;
@@ -1101,15 +1087,8 @@
 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
 	arr[num++] = 0x0;	/* reserved */
 	arr[num++] = 0x8;	/* length */
-	arr[num++] = 0x52;	/* NAA-5, company_id=0x222222 (fake) */
-	arr[num++] = 0x22;
-	arr[num++] = 0x22;
-	arr[num++] = 0x20;
-	arr[num++] = (port_a >> 24);
-	arr[num++] = (port_a >> 16) & 0xff;
-	arr[num++] = (port_a >> 8) & 0xff;
-	arr[num++] = port_a & 0xff;
-
+	put_unaligned_be64(naa3_comp_a + port_a, arr + num);
+	num += 8;
 	arr[num++] = 0x0;	/* reserved */
 	arr[num++] = 0x0;	/* reserved */
 	arr[num++] = 0x0;
@@ -1123,14 +1102,8 @@
 	arr[num++] = 0x93;	/* PIV=1, target port, NAA */
 	arr[num++] = 0x0;	/* reserved */
 	arr[num++] = 0x8;	/* length */
-	arr[num++] = 0x52;	/* NAA-5, company_id=0x222222 (fake) */
-	arr[num++] = 0x22;
-	arr[num++] = 0x22;
-	arr[num++] = 0x20;
-	arr[num++] = (port_b >> 24);
-	arr[num++] = (port_b >> 16) & 0xff;
-	arr[num++] = (port_b >> 8) & 0xff;
-	arr[num++] = port_b & 0xff;
+	put_unaligned_be64(naa3_comp_a + port_b, arr + num);
+	num += 8;
 
 	return num;
 }
@@ -1181,7 +1154,7 @@
 };
 
 /* ATA Information VPD page */
-static int inquiry_evpd_89(unsigned char * arr)
+static int inquiry_vpd_89(unsigned char *arr)
 {
 	memcpy(arr, vpd89_data, sizeof(vpd89_data));
 	return sizeof(vpd89_data);
@@ -1196,47 +1169,42 @@
 };
 
 /* Block limits VPD page (SBC-3) */
-static int inquiry_evpd_b0(unsigned char * arr)
+static int inquiry_vpd_b0(unsigned char *arr)
 {
 	unsigned int gran;
 
 	memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
 
 	/* Optimal transfer length granularity */
-	gran = 1 << scsi_debug_physblk_exp;
-	arr[2] = (gran >> 8) & 0xff;
-	arr[3] = gran & 0xff;
+	gran = 1 << sdebug_physblk_exp;
+	put_unaligned_be16(gran, arr + 2);
 
 	/* Maximum Transfer Length */
-	if (sdebug_store_sectors > 0x400) {
-		arr[4] = (sdebug_store_sectors >> 24) & 0xff;
-		arr[5] = (sdebug_store_sectors >> 16) & 0xff;
-		arr[6] = (sdebug_store_sectors >> 8) & 0xff;
-		arr[7] = sdebug_store_sectors & 0xff;
-	}
+	if (sdebug_store_sectors > 0x400)
+		put_unaligned_be32(sdebug_store_sectors, arr + 4);
 
 	/* Optimal Transfer Length */
-	put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
+	put_unaligned_be32(sdebug_opt_blks, &arr[8]);
 
-	if (scsi_debug_lbpu) {
+	if (sdebug_lbpu) {
 		/* Maximum Unmap LBA Count */
-		put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
+		put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
 
 		/* Maximum Unmap Block Descriptor Count */
-		put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
+		put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
 	}
 
 	/* Unmap Granularity Alignment */
-	if (scsi_debug_unmap_alignment) {
-		put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
+	if (sdebug_unmap_alignment) {
+		put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
 		arr[28] |= 0x80; /* UGAVALID */
 	}
 
 	/* Optimal Unmap Granularity */
-	put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
+	put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
 
 	/* Maximum WRITE SAME Length */
-	put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
+	put_unaligned_be64(sdebug_write_same_length, &arr[32]);
 
 	return 0x3c; /* Mandatory page length for Logical Block Provisioning */
 
@@ -1244,7 +1212,7 @@
 }
 
 /* Block device characteristics VPD page (SBC-3) */
-static int inquiry_evpd_b1(unsigned char *arr)
+static int inquiry_vpd_b1(unsigned char *arr)
 {
 	memset(arr, 0, 0x3c);
 	arr[0] = 0;
@@ -1255,24 +1223,22 @@
 	return 0x3c;
 }
 
-/* Logical block provisioning VPD page (SBC-3) */
-static int inquiry_evpd_b2(unsigned char *arr)
+/* Logical block provisioning VPD page (SBC-4) */
+static int inquiry_vpd_b2(unsigned char *arr)
 {
 	memset(arr, 0, 0x4);
 	arr[0] = 0;			/* threshold exponent */
-
-	if (scsi_debug_lbpu)
+	if (sdebug_lbpu)
 		arr[1] = 1 << 7;
-
-	if (scsi_debug_lbpws)
+	if (sdebug_lbpws)
 		arr[1] |= 1 << 6;
-
-	if (scsi_debug_lbpws10)
+	if (sdebug_lbpws10)
 		arr[1] |= 1 << 5;
-
-	if (scsi_debug_lbprz)
-		arr[1] |= 1 << 2;
-
+	if (sdebug_lbprz && scsi_debug_lbp())
+		arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
+	/* anc_sup=0; dp=0 (no provisioning group descriptor) */
+	/* minimum_percentage=0; provisioning_type=0 (unknown) */
+	/* threshold_percentage=0 */
 	return 0x4;
 }
 
@@ -1285,19 +1251,20 @@
 	unsigned char * arr;
 	unsigned char *cmd = scp->cmnd;
 	int alloc_len, n, ret;
-	bool have_wlun;
+	bool have_wlun, is_disk;
 
-	alloc_len = (cmd[3] << 8) + cmd[4];
+	alloc_len = get_unaligned_be16(cmd + 3);
 	arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
 	if (! arr)
 		return DID_REQUEUE << 16;
-	have_wlun = (scp->device->lun == SCSI_W_LUN_REPORT_LUNS);
+	is_disk = (sdebug_ptype == TYPE_DISK);
+	have_wlun = scsi_is_wlun(scp->device->lun);
 	if (have_wlun)
-		pq_pdt = 0x1e;	/* present, wlun */
-	else if (scsi_debug_no_lun_0 && (0 == devip->lun))
-		pq_pdt = 0x7f;	/* not present, no device type */
+		pq_pdt = TYPE_WLUN;	/* present, wlun */
+	else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
+		pq_pdt = 0x7f;	/* not present, PQ=3, PDT=0x1f */
 	else
-		pq_pdt = (scsi_debug_ptype & 0x1f);
+		pq_pdt = (sdebug_ptype & 0x1f);
 	arr[0] = pq_pdt;
 	if (0x2 & cmd[1]) {  /* CMDDT bit set */
 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
@@ -1310,7 +1277,7 @@
 		
 		port_group_id = (((host_no + 1) & 0x7f) << 8) +
 		    (devip->channel & 0x7f);
-		if (0 == scsi_debug_vpd_use_hostno)
+		if (sdebug_vpd_use_hostno == 0)
 			host_no = 0;
 		lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
 			    (devip->target * 1000) + devip->lun);
@@ -1328,11 +1295,12 @@
 			arr[n++] = 0x86;  /* extended inquiry */
 			arr[n++] = 0x87;  /* mode page policy */
 			arr[n++] = 0x88;  /* SCSI ports */
-			arr[n++] = 0x89;  /* ATA information */
-			arr[n++] = 0xb0;  /* Block limits (SBC) */
-			arr[n++] = 0xb1;  /* Block characteristics (SBC) */
-			if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
-				arr[n++] = 0xb2;
+			if (is_disk) {	  /* SBC only */
+				arr[n++] = 0x89;  /* ATA information */
+				arr[n++] = 0xb0;  /* Block limits */
+				arr[n++] = 0xb1;  /* Block characteristics */
+				arr[n++] = 0xb2;  /* Logical Block Prov */
+			}
 			arr[3] = n - 4;	  /* number of supported VPD pages */
 		} else if (0x80 == cmd[2]) { /* unit serial number */
 			arr[1] = cmd[2];	/*sanity */
@@ -1340,21 +1308,22 @@
 			memcpy(&arr[4], lu_id_str, len);
 		} else if (0x83 == cmd[2]) { /* device identification */
 			arr[1] = cmd[2];	/*sanity */
-			arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
-						 target_dev_id, lu_id_num,
-						 lu_id_str, len);
+			arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
+						target_dev_id, lu_id_num,
+						lu_id_str, len,
+						&devip->lu_name);
 		} else if (0x84 == cmd[2]) { /* Software interface ident. */
 			arr[1] = cmd[2];	/*sanity */
-			arr[3] = inquiry_evpd_84(&arr[4]);
+			arr[3] = inquiry_vpd_84(&arr[4]);
 		} else if (0x85 == cmd[2]) { /* Management network addresses */
 			arr[1] = cmd[2];	/*sanity */
-			arr[3] = inquiry_evpd_85(&arr[4]);
+			arr[3] = inquiry_vpd_85(&arr[4]);
 		} else if (0x86 == cmd[2]) { /* extended inquiry */
 			arr[1] = cmd[2];	/*sanity */
 			arr[3] = 0x3c;	/* number of following entries */
-			if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
+			if (sdebug_dif == SD_DIF_TYPE3_PROTECTION)
 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
-			else if (scsi_debug_dif)
+			else if (have_dif_prot)
 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
 			else
 				arr[4] = 0x0;   /* no protection stuff */
@@ -1368,39 +1337,38 @@
 			arr[10] = 0x82;	 /* mlus, per initiator port */
 		} else if (0x88 == cmd[2]) { /* SCSI Ports */
 			arr[1] = cmd[2];	/*sanity */
-			arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
-		} else if (0x89 == cmd[2]) { /* ATA information */
+			arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
+		} else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
 			arr[1] = cmd[2];        /*sanity */
-			n = inquiry_evpd_89(&arr[4]);
-			arr[2] = (n >> 8);
-			arr[3] = (n & 0xff);
-		} else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
+			n = inquiry_vpd_89(&arr[4]);
+			put_unaligned_be16(n, arr + 2);
+		} else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
 			arr[1] = cmd[2];        /*sanity */
-			arr[3] = inquiry_evpd_b0(&arr[4]);
-		} else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
+			arr[3] = inquiry_vpd_b0(&arr[4]);
+		} else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
 			arr[1] = cmd[2];        /*sanity */
-			arr[3] = inquiry_evpd_b1(&arr[4]);
-		} else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
+			arr[3] = inquiry_vpd_b1(&arr[4]);
+		} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
 			arr[1] = cmd[2];        /*sanity */
-			arr[3] = inquiry_evpd_b2(&arr[4]);
+			arr[3] = inquiry_vpd_b2(&arr[4]);
 		} else {
 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
 			kfree(arr);
 			return check_condition_result;
 		}
-		len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
+		len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
 		ret = fill_from_dev_buffer(scp, arr,
 			    min(len, SDEBUG_MAX_INQ_ARR_SZ));
 		kfree(arr);
 		return ret;
 	}
 	/* drops through here for a standard inquiry */
-	arr[1] = scsi_debug_removable ? 0x80 : 0;	/* Removable disk */
-	arr[2] = scsi_debug_scsi_level;
+	arr[1] = sdebug_removable ? 0x80 : 0;	/* Removable disk */
+	arr[2] = sdebug_scsi_level;
 	arr[3] = 2;    /* response_data_format==2 */
 	arr[4] = SDEBUG_LONG_INQ_SZ - 5;
-	arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
-	if (0 == scsi_debug_vpd_use_hostno)
+	arr[5] = (int)have_dif_prot;	/* PROTECT bit */
+	if (sdebug_vpd_use_hostno == 0)
 		arr[5] = 0x10; /* claim: implicit TGPS */
 	arr[6] = 0x10; /* claim: MultiP */
 	/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
@@ -1409,21 +1377,26 @@
 	memcpy(&arr[16], inq_product_id, 16);
 	memcpy(&arr[32], inq_product_rev, 4);
 	/* version descriptors (2 bytes each) follow */
-	arr[58] = 0x0; arr[59] = 0xa2;  /* SAM-5 rev 4 */
-	arr[60] = 0x4; arr[61] = 0x68;  /* SPC-4 rev 37 */
+	put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
+	put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
 	n = 62;
-	if (scsi_debug_ptype == 0) {
-		arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */
-	} else if (scsi_debug_ptype == 1) {
-		arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */
+	if (is_disk) {		/* SBC-4 no version claimed */
+		put_unaligned_be16(0x600, arr + n);
+		n += 2;
+	} else if (sdebug_ptype == TYPE_TAPE) {	/* SSC-4 rev 3 */
+		put_unaligned_be16(0x525, arr + n);
+		n += 2;
 	}
-	arr[n++] = 0x20; arr[n++] = 0xe6;  /* SPL-3 rev 7 */
+	put_unaligned_be16(0x2100, arr + n);	/* SPL-4 no version claimed */
 	ret = fill_from_dev_buffer(scp, arr,
 			    min(alloc_len, SDEBUG_LONG_INQ_SZ));
 	kfree(arr);
 	return ret;
 }
 
+static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
+				   0, 0, 0x0, 0x0};
+
 static int resp_requests(struct scsi_cmnd * scp,
 			 struct sdebug_dev_info * devip)
 {
@@ -1452,7 +1425,7 @@
 		}
 	} else {
 		memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
-		if (arr[0] >= 0x70 && dsense == scsi_debug_dsense)
+		if (arr[0] >= 0x70 && dsense == sdebug_dsense)
 			;	/* have sense and formats match */
 		else if (arr[0] <= 0x70) {
 			if (dsense) {
@@ -1489,24 +1462,25 @@
 			   struct sdebug_dev_info * devip)
 {
 	unsigned char *cmd = scp->cmnd;
-	int power_cond, start;
+	int power_cond, stop;
 
 	power_cond = (cmd[4] & 0xf0) >> 4;
 	if (power_cond) {
 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
 		return check_condition_result;
 	}
-	start = cmd[4] & 1;
-	if (start == devip->stopped)
-		devip->stopped = !start;
+	stop = !(cmd[4] & 1);
+	atomic_xchg(&devip->stopped, stop);
 	return 0;
 }
 
 static sector_t get_sdebug_capacity(void)
 {
-	if (scsi_debug_virtual_gb > 0)
-		return (sector_t)scsi_debug_virtual_gb *
-			(1073741824 / scsi_debug_sector_size);
+	static const unsigned int gibibyte = 1073741824;
+
+	if (sdebug_virtual_gb > 0)
+		return (sector_t)sdebug_virtual_gb *
+			(gibibyte / sdebug_sector_size);
 	else
 		return sdebug_store_sectors;
 }
@@ -1523,18 +1497,10 @@
 	memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
 	if (sdebug_capacity < 0xffffffff) {
 		capac = (unsigned int)sdebug_capacity - 1;
-		arr[0] = (capac >> 24);
-		arr[1] = (capac >> 16) & 0xff;
-		arr[2] = (capac >> 8) & 0xff;
-		arr[3] = capac & 0xff;
-	} else {
-		arr[0] = 0xff;
-		arr[1] = 0xff;
-		arr[2] = 0xff;
-		arr[3] = 0xff;
-	}
-	arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
-	arr[7] = scsi_debug_sector_size & 0xff;
+		put_unaligned_be32(capac, arr + 0);
+	} else
+		put_unaligned_be32(0xffffffff, arr + 0);
+	put_unaligned_be16(sdebug_sector_size, arr + 6);
 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
 }
 
@@ -1544,34 +1510,31 @@
 {
 	unsigned char *cmd = scp->cmnd;
 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
-	unsigned long long capac;
-	int k, alloc_len;
+	int alloc_len;
 
-	alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
-		     + cmd[13]);
+	alloc_len = get_unaligned_be32(cmd + 10);
 	/* following just in case virtual_gb changed */
 	sdebug_capacity = get_sdebug_capacity();
 	memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
-	capac = sdebug_capacity - 1;
-	for (k = 0; k < 8; ++k, capac >>= 8)
-		arr[7 - k] = capac & 0xff;
-	arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
-	arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
-	arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
-	arr[11] = scsi_debug_sector_size & 0xff;
-	arr[13] = scsi_debug_physblk_exp & 0xf;
-	arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
+	put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
+	put_unaligned_be32(sdebug_sector_size, arr + 8);
+	arr[13] = sdebug_physblk_exp & 0xf;
+	arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
 
 	if (scsi_debug_lbp()) {
 		arr[14] |= 0x80; /* LBPME */
-		if (scsi_debug_lbprz)
-			arr[14] |= 0x40; /* LBPRZ */
+		/* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
+		 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
+		 * in the wider field maps to 0 in this field.
+		 */
+		if (sdebug_lbprz & 1)	/* precisely what the draft requires */
+			arr[14] |= 0x40;
 	}
 
-	arr[15] = scsi_debug_lowest_aligned & 0xff;
+	arr[15] = sdebug_lowest_aligned & 0xff;
 
-	if (scsi_debug_dif) {
-		arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
+	if (have_dif_prot) {
+		arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
 		arr[12] |= 1; /* PROT_EN */
 	}
 
@@ -1590,9 +1553,7 @@
 	int n, ret, alen, rlen;
 	int port_group_a, port_group_b, port_a, port_b;
 
-	alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
-		+ cmd[9]);
-
+	alen = get_unaligned_be32(cmd + 6);
 	arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
 	if (! arr)
 		return DID_REQUEUE << 16;
@@ -1605,49 +1566,46 @@
 	port_a = 0x1; /* relative port A */
 	port_b = 0x2; /* relative port B */
 	port_group_a = (((host_no + 1) & 0x7f) << 8) +
-	    (devip->channel & 0x7f);
+			(devip->channel & 0x7f);
 	port_group_b = (((host_no + 1) & 0x7f) << 8) +
-	    (devip->channel & 0x7f) + 0x80;
+			(devip->channel & 0x7f) + 0x80;
 
 	/*
 	 * The asymmetric access state is cycled according to the host_id.
 	 */
 	n = 4;
-	if (0 == scsi_debug_vpd_use_hostno) {
-	    arr[n++] = host_no % 3; /* Asymm access state */
-	    arr[n++] = 0x0F; /* claim: all states are supported */
+	if (sdebug_vpd_use_hostno == 0) {
+		arr[n++] = host_no % 3; /* Asymm access state */
+		arr[n++] = 0x0F; /* claim: all states are supported */
 	} else {
-	    arr[n++] = 0x0; /* Active/Optimized path */
-	    arr[n++] = 0x01; /* claim: only support active/optimized paths */
+		arr[n++] = 0x0; /* Active/Optimized path */
+		arr[n++] = 0x01; /* only support active/optimized paths */
 	}
-	arr[n++] = (port_group_a >> 8) & 0xff;
-	arr[n++] = port_group_a & 0xff;
+	put_unaligned_be16(port_group_a, arr + n);
+	n += 2;
 	arr[n++] = 0;    /* Reserved */
 	arr[n++] = 0;    /* Status code */
 	arr[n++] = 0;    /* Vendor unique */
 	arr[n++] = 0x1;  /* One port per group */
 	arr[n++] = 0;    /* Reserved */
 	arr[n++] = 0;    /* Reserved */
-	arr[n++] = (port_a >> 8) & 0xff;
-	arr[n++] = port_a & 0xff;
+	put_unaligned_be16(port_a, arr + n);
+	n += 2;
 	arr[n++] = 3;    /* Port unavailable */
 	arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
-	arr[n++] = (port_group_b >> 8) & 0xff;
-	arr[n++] = port_group_b & 0xff;
+	put_unaligned_be16(port_group_b, arr + n);
+	n += 2;
 	arr[n++] = 0;    /* Reserved */
 	arr[n++] = 0;    /* Status code */
 	arr[n++] = 0;    /* Vendor unique */
 	arr[n++] = 0x1;  /* One port per group */
 	arr[n++] = 0;    /* Reserved */
 	arr[n++] = 0;    /* Reserved */
-	arr[n++] = (port_b >> 8) & 0xff;
-	arr[n++] = port_b & 0xff;
+	put_unaligned_be16(port_b, arr + n);
+	n += 2;
 
 	rlen = n - 4;
-	arr[0] = (rlen >> 24) & 0xff;
-	arr[1] = (rlen >> 16) & 0xff;
-	arr[2] = (rlen >> 8) & 0xff;
-	arr[3] = rlen & 0xff;
+	put_unaligned_be32(rlen, arr + 0);
 
 	/*
 	 * Return the smallest value of either
@@ -1662,8 +1620,8 @@
 	return ret;
 }
 
-static int
-resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+static int resp_rsup_opcodes(struct scsi_cmnd *scp,
+			     struct sdebug_dev_info *devip)
 {
 	bool rctd;
 	u8 reporting_opts, req_opcode, sdeb_i, supp;
@@ -1813,8 +1771,8 @@
 	return errsts;
 }
 
-static int
-resp_rsup_tmfs(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+static int resp_rsup_tmfs(struct scsi_cmnd *scp,
+			  struct sdebug_dev_info *devip)
 {
 	bool repd;
 	u32 alloc_len, len;
@@ -1871,17 +1829,19 @@
 				     0, 0, 0, 0, 0x40, 0, 0, 0};
 
 	memcpy(p, format_pg, sizeof(format_pg));
-	p[10] = (sdebug_sectors_per >> 8) & 0xff;
-	p[11] = sdebug_sectors_per & 0xff;
-	p[12] = (scsi_debug_sector_size >> 8) & 0xff;
-	p[13] = scsi_debug_sector_size & 0xff;
-	if (scsi_debug_removable)
+	put_unaligned_be16(sdebug_sectors_per, p + 10);
+	put_unaligned_be16(sdebug_sector_size, p + 12);
+	if (sdebug_removable)
 		p[20] |= 0x20; /* should agree with INQUIRY */
 	if (1 == pcontrol)
 		memset(p + 2, 0, sizeof(format_pg) - 2);
 	return sizeof(format_pg);
 }
 
+static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
+				     0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
+				     0, 0, 0, 0};
+
 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
 { 	/* Caching page for mode_sense */
 	unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
@@ -1889,7 +1849,7 @@
 	unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
 		0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
 
-	if (SCSI_DEBUG_OPT_N_WCE & scsi_debug_opts)
+	if (SDEBUG_OPT_N_WCE & sdebug_opts)
 		caching_pg[2] &= ~0x4;	/* set WCE=0 (default WCE=1) */
 	memcpy(p, caching_pg, sizeof(caching_pg));
 	if (1 == pcontrol)
@@ -1899,6 +1859,9 @@
 	return sizeof(caching_pg);
 }
 
+static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
+				    0, 0, 0x2, 0x4b};
+
 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
 { 	/* Control mode page for mode_sense */
 	unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
@@ -1906,12 +1869,12 @@
 	unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
 				     0, 0, 0x2, 0x4b};
 
-	if (scsi_debug_dsense)
+	if (sdebug_dsense)
 		ctrl_m_pg[2] |= 0x4;
 	else
 		ctrl_m_pg[2] &= ~0x4;
 
-	if (scsi_debug_ato)
+	if (sdebug_ato)
 		ctrl_m_pg[5] |= 0x80; /* ATO=1 */
 
 	memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
@@ -1955,31 +1918,29 @@
 {	/* SAS phy control and discover mode page for mode_sense */
 	unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
 		    0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
-		    0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
-		    0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
+		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
+		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
 		    0x2, 0, 0, 0, 0, 0, 0, 0,
 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
 		    0, 0, 0, 0, 0, 0, 0, 0,
 		    0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
-		    0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
-		    0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
+		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
+		    0, 0, 0, 0, 0, 0, 0, 0,	/* insert SAS addr */
 		    0x3, 0, 0, 0, 0, 0, 0, 0,
 		    0x88, 0x99, 0, 0, 0, 0, 0, 0,
 		    0, 0, 0, 0, 0, 0, 0, 0,
 		};
 	int port_a, port_b;
 
+	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
+	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
+	put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
+	put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
 	port_a = target_dev_id + 1;
 	port_b = port_a + 1;
 	memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
-	p[20] = (port_a >> 24);
-	p[21] = (port_a >> 16) & 0xff;
-	p[22] = (port_a >> 8) & 0xff;
-	p[23] = port_a & 0xff;
-	p[48 + 20] = (port_b >> 24);
-	p[48 + 21] = (port_b >> 16) & 0xff;
-	p[48 + 22] = (port_b >> 8) & 0xff;
-	p[48 + 23] = port_b & 0xff;
+	put_unaligned_be32(port_a, p + 20);
+	put_unaligned_be32(port_b, p + 48 + 20);
 	if (1 == pcontrol)
 		memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
 	return sizeof(sas_pcd_m_pg);
@@ -1999,29 +1960,30 @@
 
 #define SDEBUG_MAX_MSENSE_SZ 256
 
-static int
-resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+static int resp_mode_sense(struct scsi_cmnd *scp,
+			   struct sdebug_dev_info *devip)
 {
-	unsigned char dbd, llbaa;
 	int pcontrol, pcode, subpcode, bd_len;
 	unsigned char dev_spec;
-	int k, alloc_len, msense_6, offset, len, target_dev_id;
+	int alloc_len, offset, len, target_dev_id;
 	int target = scp->device->id;
 	unsigned char * ap;
 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
 	unsigned char *cmd = scp->cmnd;
+	bool dbd, llbaa, msense_6, is_disk, bad_pcode;
 
-	dbd = !!(cmd[1] & 0x8);
+	dbd = !!(cmd[1] & 0x8);		/* disable block descriptors */
 	pcontrol = (cmd[2] & 0xc0) >> 6;
 	pcode = cmd[2] & 0x3f;
 	subpcode = cmd[3];
 	msense_6 = (MODE_SENSE == cmd[0]);
-	llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
-	if ((0 == scsi_debug_ptype) && (0 == dbd))
+	llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
+	is_disk = (sdebug_ptype == TYPE_DISK);
+	if (is_disk && !dbd)
 		bd_len = llbaa ? 16 : 8;
 	else
 		bd_len = 0;
-	alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
+	alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
 	memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
 	if (0x3 == pcontrol) {  /* Saving values not supported */
 		mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
@@ -2029,9 +1991,9 @@
 	}
 	target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
 			(devip->target * 1000) - 3;
-	/* set DPOFUA bit for disks */
-	if (0 == scsi_debug_ptype)
-		dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
+	/* for disks set DPOFUA bit and clear write protect (WP) bit */
+	if (is_disk)
+		dev_spec = 0x10;	/* =0x90 if WP=1 implies read-only */
 	else
 		dev_spec = 0x0;
 	if (msense_6) {
@@ -2050,30 +2012,16 @@
 		sdebug_capacity = get_sdebug_capacity();
 
 	if (8 == bd_len) {
-		if (sdebug_capacity > 0xfffffffe) {
-			ap[0] = 0xff;
-			ap[1] = 0xff;
-			ap[2] = 0xff;
-			ap[3] = 0xff;
-		} else {
-			ap[0] = (sdebug_capacity >> 24) & 0xff;
-			ap[1] = (sdebug_capacity >> 16) & 0xff;
-			ap[2] = (sdebug_capacity >> 8) & 0xff;
-			ap[3] = sdebug_capacity & 0xff;
-		}
-		ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
-		ap[7] = scsi_debug_sector_size & 0xff;
+		if (sdebug_capacity > 0xfffffffe)
+			put_unaligned_be32(0xffffffff, ap + 0);
+		else
+			put_unaligned_be32(sdebug_capacity, ap + 0);
+		put_unaligned_be16(sdebug_sector_size, ap + 6);
 		offset += bd_len;
 		ap = arr + offset;
 	} else if (16 == bd_len) {
-		unsigned long long capac = sdebug_capacity;
-
-        	for (k = 0; k < 8; ++k, capac >>= 8)
-                	ap[7 - k] = capac & 0xff;
-		ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
-		ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
-		ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
-		ap[15] = scsi_debug_sector_size & 0xff;
+		put_unaligned_be64((u64)sdebug_capacity, ap + 0);
+		put_unaligned_be32(sdebug_sector_size, ap + 12);
 		offset += bd_len;
 		ap = arr + offset;
 	}
@@ -2083,6 +2031,8 @@
 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
 		return check_condition_result;
 	}
+	bad_pcode = false;
+
 	switch (pcode) {
 	case 0x1:	/* Read-Write error recovery page, direct access */
 		len = resp_err_recov_pg(ap, pcontrol, target);
@@ -2093,12 +2043,18 @@
 		offset += len;
 		break;
         case 0x3:       /* Format device page, direct access */
-                len = resp_format_pg(ap, pcontrol, target);
-                offset += len;
+		if (is_disk) {
+			len = resp_format_pg(ap, pcontrol, target);
+			offset += len;
+		} else
+			bad_pcode = true;
                 break;
 	case 0x8:	/* Caching page, direct access */
-		len = resp_caching_pg(ap, pcontrol, target);
-		offset += len;
+		if (is_disk) {
+			len = resp_caching_pg(ap, pcontrol, target);
+			offset += len;
+		} else
+			bad_pcode = true;
 		break;
 	case 0xa:	/* Control Mode page, all devices */
 		len = resp_ctrl_m_pg(ap, pcontrol, target);
@@ -2127,8 +2083,12 @@
 		if ((0 == subpcode) || (0xff == subpcode)) {
 			len = resp_err_recov_pg(ap, pcontrol, target);
 			len += resp_disconnect_pg(ap + len, pcontrol, target);
-			len += resp_format_pg(ap + len, pcontrol, target);
-			len += resp_caching_pg(ap + len, pcontrol, target);
+			if (is_disk) {
+				len += resp_format_pg(ap + len, pcontrol,
+						      target);
+				len += resp_caching_pg(ap + len, pcontrol,
+						       target);
+			}
 			len += resp_ctrl_m_pg(ap + len, pcontrol, target);
 			len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
 			if (0xff == subpcode) {
@@ -2137,29 +2097,31 @@
 				len += resp_sas_sha_m_spg(ap + len, pcontrol);
 			}
 			len += resp_iec_m_pg(ap + len, pcontrol, target);
+			offset += len;
 		} else {
 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
 			return check_condition_result;
                 }
-		offset += len;
 		break;
 	default:
+		bad_pcode = true;
+		break;
+	}
+	if (bad_pcode) {
 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
 		return check_condition_result;
 	}
 	if (msense_6)
 		arr[0] = offset - 1;
-	else {
-		arr[0] = ((offset - 2) >> 8) & 0xff;
-		arr[1] = (offset - 2) & 0xff;
-	}
+	else
+		put_unaligned_be16((offset - 2), arr + 0);
 	return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
 }
 
 #define SDEBUG_MAX_MSELECT_SZ 512
 
-static int
-resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+static int resp_mode_select(struct scsi_cmnd *scp,
+			    struct sdebug_dev_info *devip)
 {
 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
 	int param_len, res, mpage;
@@ -2170,21 +2132,20 @@
 	memset(arr, 0, sizeof(arr));
 	pf = cmd[1] & 0x10;
 	sp = cmd[1] & 0x1;
-	param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
+	param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
 	if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
 		return check_condition_result;
 	}
         res = fetch_to_dev_buffer(scp, arr, param_len);
         if (-1 == res)
-                return (DID_ERROR << 16);
-        else if ((res < param_len) &&
-                 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
+		return DID_ERROR << 16;
+	else if (sdebug_verbose && (res < param_len))
 		sdev_printk(KERN_INFO, scp->device,
 			    "%s: cdb indicated=%d, IO sent=%d bytes\n",
 			    __func__, param_len, res);
-	md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
-	bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
+	md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
+	bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
 	if (md_len > 2) {
 		mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
 		return check_condition_result;
@@ -2197,7 +2158,7 @@
 		return check_condition_result;
 	}
 	spf = !!(arr[off] & 0x40);
-	pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
+	pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
 		       (arr[off + 1] + 2);
 	if ((pg_len + off) > param_len) {
 		mk_sense_buffer(scp, ILLEGAL_REQUEST,
@@ -2216,7 +2177,7 @@
 		if (ctrl_m_pg[1] == arr[off + 1]) {
 			memcpy(ctrl_m_pg + 2, arr + off + 2,
 			       sizeof(ctrl_m_pg) - 2);
-			scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
+			sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
 			goto set_mode_changed_ua;
 		}
 		break;
@@ -2279,7 +2240,7 @@
 	pcontrol = (cmd[2] & 0xc0) >> 6;
 	pcode = cmd[2] & 0x3f;
 	subpcode = cmd[3] & 0xff;
-	alloc_len = (cmd[7] << 8) + cmd[8];
+	alloc_len = get_unaligned_be16(cmd + 7);
 	arr[0] = pcode;
 	if (0 == subpcode) {
 		switch (pcode) {
@@ -2336,7 +2297,7 @@
 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
 		return check_condition_result;
 	}
-	len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
+	len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
 	return fill_from_dev_buffer(scp, arr,
 		    min(len, SDEBUG_MAX_INQ_ARR_SZ));
 }
@@ -2358,8 +2319,8 @@
 }
 
 /* Returns number of bytes copied or -1 if error. */
-static int
-do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write)
+static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num,
+			    bool do_write)
 {
 	int ret;
 	u64 block, rest = 0;
@@ -2384,15 +2345,15 @@
 		rest = block + num - sdebug_store_sectors;
 
 	ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
-		   fake_storep + (block * scsi_debug_sector_size),
-		   (num - rest) * scsi_debug_sector_size, 0, do_write);
-	if (ret != (num - rest) * scsi_debug_sector_size)
+		   fake_storep + (block * sdebug_sector_size),
+		   (num - rest) * sdebug_sector_size, 0, do_write);
+	if (ret != (num - rest) * sdebug_sector_size)
 		return ret;
 
 	if (rest) {
 		ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
-			    fake_storep, rest * scsi_debug_sector_size,
-			    (num - rest) * scsi_debug_sector_size, do_write);
+			    fake_storep, rest * sdebug_sector_size,
+			    (num - rest) * sdebug_sector_size, do_write);
 	}
 
 	return ret;
@@ -2401,13 +2362,12 @@
 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
  * arr into fake_store(lba,num) and return true. If comparison fails then
  * return false. */
-static bool
-comp_write_worker(u64 lba, u32 num, const u8 *arr)
+static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
 {
 	bool res;
 	u64 block, rest = 0;
 	u32 store_blks = sdebug_store_sectors;
-	u32 lb_size = scsi_debug_sector_size;
+	u32 lb_size = sdebug_sector_size;
 
 	block = do_div(lba, store_blks);
 	if (block + num > store_blks)
@@ -2434,7 +2394,7 @@
 {
 	__be16 csum;
 
-	if (scsi_debug_guard)
+	if (sdebug_guard)
 		csum = (__force __be16)ip_compute_csum(buf, len);
 	else
 		csum = cpu_to_be16(crc_t10dif(buf, len));
@@ -2445,7 +2405,7 @@
 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
 		      sector_t sector, u32 ei_lba)
 {
-	__be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
+	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
 
 	if (sdt->guard_tag != csum) {
 		pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
@@ -2454,13 +2414,13 @@
 			be16_to_cpu(csum));
 		return 0x01;
 	}
-	if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
+	if (sdebug_dif == SD_DIF_TYPE1_PROTECTION &&
 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
 		pr_err("REF check failed on sector %lu\n",
 			(unsigned long)sector);
 		return 0x03;
 	}
-	if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
+	if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
 		pr_err("REF check failed on sector %lu\n",
 			(unsigned long)sector);
@@ -2541,10 +2501,10 @@
 	return 0;
 }
 
-static int
-resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
 {
 	u8 *cmd = scp->cmnd;
+	struct sdebug_queued_cmd *sqcp;
 	u64 lba;
 	u32 num;
 	u32 ei_lba;
@@ -2591,40 +2551,43 @@
 		check_prot = false;
 		break;
 	}
-	if (check_prot) {
-		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
+	if (unlikely(have_dif_prot && check_prot)) {
+		if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
 		    (cmd[1] & 0xe0)) {
 			mk_sense_invalid_opcode(scp);
 			return check_condition_result;
 		}
-		if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
-		     scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
+		if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
+		     sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
 		    (cmd[1] & 0xe0) == 0)
 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
 				    "to DIF device\n");
 	}
-	if (sdebug_any_injecting_opt) {
-		struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
+	if (unlikely(sdebug_any_injecting_opt)) {
+		sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
 
-		if (ep->inj_short)
-			num /= 2;
-	}
+		if (sqcp) {
+			if (sqcp->inj_short)
+				num /= 2;
+		}
+	} else
+		sqcp = NULL;
 
 	/* inline check_device_access_params() */
-	if (lba + num > sdebug_capacity) {
+	if (unlikely(lba + num > sdebug_capacity)) {
 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
 		return check_condition_result;
 	}
 	/* transfer length excessive (tie in to block limits VPD page) */
-	if (num > sdebug_store_sectors) {
+	if (unlikely(num > sdebug_store_sectors)) {
 		/* needs work to find which cdb byte 'num' comes from */
 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
 		return check_condition_result;
 	}
 
-	if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
-	    (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
-	    ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
+	if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
+		     (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
+		     ((lba + num) > OPT_MEDIUM_ERR_ADDR))) {
 		/* claim unrecoverable read error */
 		mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
 		/* set info field and valid bit for fixed descriptor */
@@ -2641,7 +2604,7 @@
 	read_lock_irqsave(&atomic_rw, iflags);
 
 	/* DIX + T10 DIF */
-	if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
+	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
 		int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
 
 		if (prot_ret) {
@@ -2653,27 +2616,25 @@
 
 	ret = do_device_access(scp, lba, num, false);
 	read_unlock_irqrestore(&atomic_rw, iflags);
-	if (ret == -1)
+	if (unlikely(ret == -1))
 		return DID_ERROR << 16;
 
 	scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
 
-	if (sdebug_any_injecting_opt) {
-		struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
-
-		if (ep->inj_recovered) {
+	if (unlikely(sqcp)) {
+		if (sqcp->inj_recovered) {
 			mk_sense_buffer(scp, RECOVERED_ERROR,
 					THRESHOLD_EXCEEDED, 0);
 			return check_condition_result;
-		} else if (ep->inj_transport) {
+		} else if (sqcp->inj_transport) {
 			mk_sense_buffer(scp, ABORTED_COMMAND,
 					TRANSPORT_PROBLEM, ACK_NAK_TO);
 			return check_condition_result;
-		} else if (ep->inj_dif) {
+		} else if (sqcp->inj_dif) {
 			/* Logical block guard check failed */
 			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
 			return illegal_condition_result;
-		} else if (ep->inj_dix) {
+		} else if (sqcp->inj_dix) {
 			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
 			return illegal_condition_result;
 		}
@@ -2750,13 +2711,13 @@
 
 			ret = dif_verify(sdt, daddr, sector, ei_lba);
 			if (ret) {
-				dump_sector(daddr, scsi_debug_sector_size);
+				dump_sector(daddr, sdebug_sector_size);
 				goto out;
 			}
 
 			sector++;
 			ei_lba++;
-			dpage_offset += scsi_debug_sector_size;
+			dpage_offset += sdebug_sector_size;
 		}
 		diter.consumed = dpage_offset;
 		sg_miter_stop(&diter);
@@ -2777,24 +2738,18 @@
 
 static unsigned long lba_to_map_index(sector_t lba)
 {
-	if (scsi_debug_unmap_alignment) {
-		lba += scsi_debug_unmap_granularity -
-			scsi_debug_unmap_alignment;
-	}
-	sector_div(lba, scsi_debug_unmap_granularity);
-
+	if (sdebug_unmap_alignment)
+		lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
+	sector_div(lba, sdebug_unmap_granularity);
 	return lba;
 }
 
 static sector_t map_index_to_lba(unsigned long index)
 {
-	sector_t lba = index * scsi_debug_unmap_granularity;
+	sector_t lba = index * sdebug_unmap_granularity;
 
-	if (scsi_debug_unmap_alignment) {
-		lba -= scsi_debug_unmap_granularity -
-			scsi_debug_unmap_alignment;
-	}
-
+	if (sdebug_unmap_alignment)
+		lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
 	return lba;
 }
 
@@ -2815,7 +2770,6 @@
 
 	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
 	*num = end - lba;
-
 	return mapped;
 }
 
@@ -2841,27 +2795,27 @@
 		unsigned long index = lba_to_map_index(lba);
 
 		if (lba == map_index_to_lba(index) &&
-		    lba + scsi_debug_unmap_granularity <= end &&
+		    lba + sdebug_unmap_granularity <= end &&
 		    index < map_size) {
 			clear_bit(index, map_storep);
-			if (scsi_debug_lbprz) {
+			if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
 				memset(fake_storep +
-				       lba * scsi_debug_sector_size, 0,
-				       scsi_debug_sector_size *
-				       scsi_debug_unmap_granularity);
+				       lba * sdebug_sector_size,
+				       (sdebug_lbprz & 1) ? 0 : 0xff,
+				       sdebug_sector_size *
+				       sdebug_unmap_granularity);
 			}
 			if (dif_storep) {
 				memset(dif_storep + lba, 0xff,
 				       sizeof(*dif_storep) *
-				       scsi_debug_unmap_granularity);
+				       sdebug_unmap_granularity);
 			}
 		}
 		lba = map_index_to_lba(index + 1);
 	}
 }
 
-static int
-resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
 {
 	u8 *cmd = scp->cmnd;
 	u64 lba;
@@ -2910,26 +2864,26 @@
 		check_prot = false;
 		break;
 	}
-	if (check_prot) {
-		if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
+	if (unlikely(have_dif_prot && check_prot)) {
+		if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
 		    (cmd[1] & 0xe0)) {
 			mk_sense_invalid_opcode(scp);
 			return check_condition_result;
 		}
-		if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
-		     scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
+		if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
+		     sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
 		    (cmd[1] & 0xe0) == 0)
 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
 				    "to DIF device\n");
 	}
 
 	/* inline check_device_access_params() */
-	if (lba + num > sdebug_capacity) {
+	if (unlikely(lba + num > sdebug_capacity)) {
 		mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
 		return check_condition_result;
 	}
 	/* transfer length excessive (tie in to block limits VPD page) */
-	if (num > sdebug_store_sectors) {
+	if (unlikely(num > sdebug_store_sectors)) {
 		/* needs work to find which cdb byte 'num' comes from */
 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
 		return check_condition_result;
@@ -2938,7 +2892,7 @@
 	write_lock_irqsave(&atomic_rw, iflags);
 
 	/* DIX + T10 DIF */
-	if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
+	if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
 		int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
 
 		if (prot_ret) {
@@ -2949,43 +2903,46 @@
 	}
 
 	ret = do_device_access(scp, lba, num, true);
-	if (scsi_debug_lbp())
+	if (unlikely(scsi_debug_lbp()))
 		map_region(lba, num);
 	write_unlock_irqrestore(&atomic_rw, iflags);
-	if (-1 == ret)
-		return (DID_ERROR << 16);
-	else if ((ret < (num * scsi_debug_sector_size)) &&
-		 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
+	if (unlikely(-1 == ret))
+		return DID_ERROR << 16;
+	else if (unlikely(sdebug_verbose &&
+			  (ret < (num * sdebug_sector_size))))
 		sdev_printk(KERN_INFO, scp->device,
 			    "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
-			    my_name, num * scsi_debug_sector_size, ret);
+			    my_name, num * sdebug_sector_size, ret);
 
-	if (sdebug_any_injecting_opt) {
-		struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
+	if (unlikely(sdebug_any_injecting_opt)) {
+		struct sdebug_queued_cmd *sqcp =
+				(struct sdebug_queued_cmd *)scp->host_scribble;
 
-		if (ep->inj_recovered) {
-			mk_sense_buffer(scp, RECOVERED_ERROR,
-					THRESHOLD_EXCEEDED, 0);
-			return check_condition_result;
-		} else if (ep->inj_dif) {
-			/* Logical block guard check failed */
-			mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
-			return illegal_condition_result;
-		} else if (ep->inj_dix) {
-			mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
-			return illegal_condition_result;
+		if (sqcp) {
+			if (sqcp->inj_recovered) {
+				mk_sense_buffer(scp, RECOVERED_ERROR,
+						THRESHOLD_EXCEEDED, 0);
+				return check_condition_result;
+			} else if (sqcp->inj_dif) {
+				/* Logical block guard check failed */
+				mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
+				return illegal_condition_result;
+			} else if (sqcp->inj_dix) {
+				mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
+				return illegal_condition_result;
+			}
 		}
 	}
 	return 0;
 }
 
-static int
-resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba,
-		bool unmap, bool ndob)
+static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
+			   u32 ei_lba, bool unmap, bool ndob)
 {
 	unsigned long iflags;
 	unsigned long long i;
 	int ret;
+	u64 lba_off;
 
 	ret = check_device_access_params(scp, lba, num);
 	if (ret)
@@ -2998,31 +2955,29 @@
 		goto out;
 	}
 
+	lba_off = lba * sdebug_sector_size;
 	/* if ndob then zero 1 logical block, else fetch 1 logical block */
 	if (ndob) {
-		memset(fake_storep + (lba * scsi_debug_sector_size), 0,
-		       scsi_debug_sector_size);
+		memset(fake_storep + lba_off, 0, sdebug_sector_size);
 		ret = 0;
 	} else
-		ret = fetch_to_dev_buffer(scp, fake_storep +
-					       (lba * scsi_debug_sector_size),
-					  scsi_debug_sector_size);
+		ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
+					  sdebug_sector_size);
 
 	if (-1 == ret) {
 		write_unlock_irqrestore(&atomic_rw, iflags);
-		return (DID_ERROR << 16);
-	} else if ((ret < (num * scsi_debug_sector_size)) &&
-		 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
+		return DID_ERROR << 16;
+	} else if (sdebug_verbose && (ret < (num * sdebug_sector_size)))
 		sdev_printk(KERN_INFO, scp->device,
 			    "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
 			    my_name, "write same",
-			    num * scsi_debug_sector_size, ret);
+			    num * sdebug_sector_size, ret);
 
 	/* Copy first sector to remaining blocks */
 	for (i = 1 ; i < num ; i++)
-		memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
-		       fake_storep + (lba * scsi_debug_sector_size),
-		       scsi_debug_sector_size);
+		memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
+		       fake_storep + lba_off,
+		       sdebug_sector_size);
 
 	if (scsi_debug_lbp())
 		map_region(lba, num);
@@ -3032,8 +2987,8 @@
 	return 0;
 }
 
-static int
-resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+static int resp_write_same_10(struct scsi_cmnd *scp,
+			      struct sdebug_dev_info *devip)
 {
 	u8 *cmd = scp->cmnd;
 	u32 lba;
@@ -3042,7 +2997,7 @@
 	bool unmap = false;
 
 	if (cmd[1] & 0x8) {
-		if (scsi_debug_lbpws10 == 0) {
+		if (sdebug_lbpws10 == 0) {
 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
 			return check_condition_result;
 		} else
@@ -3050,15 +3005,15 @@
 	}
 	lba = get_unaligned_be32(cmd + 2);
 	num = get_unaligned_be16(cmd + 7);
-	if (num > scsi_debug_write_same_length) {
+	if (num > sdebug_write_same_length) {
 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
 		return check_condition_result;
 	}
 	return resp_write_same(scp, lba, num, ei_lba, unmap, false);
 }
 
-static int
-resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+static int resp_write_same_16(struct scsi_cmnd *scp,
+			      struct sdebug_dev_info *devip)
 {
 	u8 *cmd = scp->cmnd;
 	u64 lba;
@@ -3068,7 +3023,7 @@
 	bool ndob = false;
 
 	if (cmd[1] & 0x8) {	/* UNMAP */
-		if (scsi_debug_lbpws == 0) {
+		if (sdebug_lbpws == 0) {
 			mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
 			return check_condition_result;
 		} else
@@ -3078,7 +3033,7 @@
 		ndob = true;
 	lba = get_unaligned_be64(cmd + 2);
 	num = get_unaligned_be32(cmd + 10);
-	if (num > scsi_debug_write_same_length) {
+	if (num > sdebug_write_same_length) {
 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
 		return check_condition_result;
 	}
@@ -3088,8 +3043,8 @@
 /* Note the mode field is in the same position as the (lower) service action
  * field. For the Report supported operation codes command, SPC-4 suggests
  * each mode of this command should be reported separately; for future. */
-static int
-resp_write_buffer(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+static int resp_write_buffer(struct scsi_cmnd *scp,
+			     struct sdebug_dev_info *devip)
 {
 	u8 *cmd = scp->cmnd;
 	struct scsi_device *sdp = scp->device;
@@ -3134,15 +3089,15 @@
 	return 0;
 }
 
-static int
-resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+static int resp_comp_write(struct scsi_cmnd *scp,
+			   struct sdebug_dev_info *devip)
 {
 	u8 *cmd = scp->cmnd;
 	u8 *arr;
 	u8 *fake_storep_hold;
 	u64 lba;
 	u32 dnum;
-	u32 lb_size = scsi_debug_sector_size;
+	u32 lb_size = sdebug_sector_size;
 	u8 num;
 	unsigned long iflags;
 	int ret;
@@ -3152,13 +3107,13 @@
 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
 	if (0 == num)
 		return 0;	/* degenerate case, not an error */
-	if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
+	if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
 	    (cmd[1] & 0xe0)) {
 		mk_sense_invalid_opcode(scp);
 		return check_condition_result;
 	}
-	if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
-	     scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
+	if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
+	     sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
 	    (cmd[1] & 0xe0) == 0)
 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
 			    "to DIF device\n");
@@ -3193,8 +3148,7 @@
 	if (ret == -1) {
 		retval = DID_ERROR << 16;
 		goto cleanup;
-	} else if ((ret < (dnum * lb_size)) &&
-		 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
+	} else if (sdebug_verbose && (ret < (dnum * lb_size)))
 		sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
 			    "indicated=%u, IO sent=%d bytes\n", my_name,
 			    dnum * lb_size, ret);
@@ -3217,8 +3171,7 @@
 	__be32	__reserved;
 };
 
-static int
-resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
 {
 	unsigned char *buf;
 	struct unmap_block_desc *desc;
@@ -3233,12 +3186,12 @@
 	BUG_ON(scsi_bufflen(scp) != payload_len);
 
 	descriptors = (payload_len - 8) / 16;
-	if (descriptors > scsi_debug_unmap_max_desc) {
+	if (descriptors > sdebug_unmap_max_desc) {
 		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
 		return check_condition_result;
 	}
 
-	buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
+	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
 	if (!buf) {
 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
 				INSUFF_RES_ASCQ);
@@ -3276,8 +3229,8 @@
 
 #define SDEBUG_GET_LBA_STATUS_LEN 32
 
-static int
-resp_get_lba_status(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+static int resp_get_lba_status(struct scsi_cmnd *scp,
+			       struct sdebug_dev_info *devip)
 {
 	u8 *cmd = scp->cmnd;
 	u64 lba;
@@ -3316,63 +3269,94 @@
 	return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
 }
 
-#define SDEBUG_RLUN_ARR_SZ 256
-
-static int resp_report_luns(struct scsi_cmnd * scp,
-			    struct sdebug_dev_info * devip)
+/* Even though each pseudo target has a REPORT LUNS "well known logical unit"
+ * (W-LUN), the normal Linux scanning logic does not associate it with a
+ * device (e.g. /dev/sg7). The following magic will make that association:
+ *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
+ * where <n> is a host number. If there are multiple targets in a host then
+ * the above will associate a W-LUN to each target. To only get a W-LUN
+ * for target 2, then use "echo '- 2 49409' > scan" .
+ */
+static int resp_report_luns(struct scsi_cmnd *scp,
+			    struct sdebug_dev_info *devip)
 {
-	unsigned int alloc_len;
-	int lun_cnt, i, upper, num, n, want_wlun, shortish;
-	u64 lun;
 	unsigned char *cmd = scp->cmnd;
-	int select_report = (int)cmd[2];
-	struct scsi_lun *one_lun;
-	unsigned char arr[SDEBUG_RLUN_ARR_SZ];
-	unsigned char * max_addr;
+	unsigned int alloc_len;
+	unsigned char select_report;
+	u64 lun;
+	struct scsi_lun *lun_p;
+	u8 *arr;
+	unsigned int lun_cnt;	/* normal LUN count (max: 256) */
+	unsigned int wlun_cnt;	/* report luns W-LUN count */
+	unsigned int tlun_cnt;	/* total LUN count */
+	unsigned int rlen;	/* response length (in bytes) */
+	int i, res;
 
 	clear_luns_changed_on_target(devip);
-	alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
-	shortish = (alloc_len < 4);
-	if (shortish || (select_report > 2)) {
-		mk_sense_invalid_fld(scp, SDEB_IN_CDB, shortish ? 6 : 2, -1);
+
+	select_report = cmd[2];
+	alloc_len = get_unaligned_be32(cmd + 6);
+
+	if (alloc_len < 4) {
+		pr_err("alloc len too small %d\n", alloc_len);
+		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
 		return check_condition_result;
 	}
-	/* can produce response with up to 16k luns (lun 0 to lun 16383) */
-	memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
-	lun_cnt = scsi_debug_max_luns;
-	if (1 == select_report)
+
+	switch (select_report) {
+	case 0:		/* all LUNs apart from W-LUNs */
+		lun_cnt = sdebug_max_luns;
+		wlun_cnt = 0;
+		break;
+	case 1:		/* only W-LUNs */
 		lun_cnt = 0;
-	else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
+		wlun_cnt = 1;
+		break;
+	case 2:		/* all LUNs */
+		lun_cnt = sdebug_max_luns;
+		wlun_cnt = 1;
+		break;
+	case 0x10:	/* only administrative LUs */
+	case 0x11:	/* see SPC-5 */
+	case 0x12:	/* only subsiduary LUs owned by referenced LU */
+	default:
+		pr_debug("select report invalid %d\n", select_report);
+		mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
+		return check_condition_result;
+	}
+
+	if (sdebug_no_lun_0 && (lun_cnt > 0))
 		--lun_cnt;
-	want_wlun = (select_report > 0) ? 1 : 0;
-	num = lun_cnt + want_wlun;
-	arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
-	arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
-	n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
-			    sizeof(struct scsi_lun)), num);
-	if (n < num) {
-		want_wlun = 0;
-		lun_cnt = n;
+
+	tlun_cnt = lun_cnt + wlun_cnt;
+
+	rlen = (tlun_cnt * sizeof(struct scsi_lun)) + 8;
+	arr = vmalloc(rlen);
+	if (!arr) {
+		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
+				INSUFF_RES_ASCQ);
+		return check_condition_result;
 	}
-	one_lun = (struct scsi_lun *) &arr[8];
-	max_addr = arr + SDEBUG_RLUN_ARR_SZ;
-	for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
-             ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
-	     i++, lun++) {
-		upper = (lun >> 8) & 0x3f;
-		if (upper)
-			one_lun[i].scsi_lun[0] =
-			    (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
-		one_lun[i].scsi_lun[1] = lun & 0xff;
-	}
-	if (want_wlun) {
-		one_lun[i].scsi_lun[0] = (SCSI_W_LUN_REPORT_LUNS >> 8) & 0xff;
-		one_lun[i].scsi_lun[1] = SCSI_W_LUN_REPORT_LUNS & 0xff;
-		i++;
-	}
-	alloc_len = (unsigned char *)(one_lun + i) - arr;
-	return fill_from_dev_buffer(scp, arr,
-				    min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
+	memset(arr, 0, rlen);
+	pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
+		 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
+
+	/* luns start at byte 8 in response following the header */
+	lun_p = (struct scsi_lun *)&arr[8];
+
+	/* LUNs use single level peripheral device addressing method */
+	lun = sdebug_no_lun_0 ? 1 : 0;
+	for (i = 0; i < lun_cnt; i++)
+		int_to_scsilun(lun++, lun_p++);
+
+	if (wlun_cnt)
+		int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p++);
+
+	put_unaligned_be32(rlen - 8, &arr[0]);
+
+	res = fill_from_dev_buffer(scp, arr, rlen);
+	vfree(arr);
+	return res;
 }
 
 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
@@ -3385,7 +3369,7 @@
 	struct sg_mapping_iter miter;
 
 	/* better not to use temporary buffer. */
-	buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
+	buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
 	if (!buf) {
 		mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
 				INSUFF_RES_ASCQ);
@@ -3411,8 +3395,8 @@
 	return 0;
 }
 
-static int
-resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+static int resp_xdwriteread_10(struct scsi_cmnd *scp,
+			       struct sdebug_dev_info *devip)
 {
 	u8 *cmd = scp->cmnd;
 	u64 lba;
@@ -3437,41 +3421,66 @@
 	return resp_xdwriteread(scp, lba, num, devip);
 }
 
-/* When timer or tasklet goes off this function is called. */
-static void sdebug_q_cmd_complete(unsigned long indx)
+static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
 {
-	int qa_indx;
+	struct sdebug_queue *sqp = sdebug_q_arr;
+
+	if (sdebug_mq_active) {
+		u32 tag = blk_mq_unique_tag(cmnd->request);
+		u16 hwq = blk_mq_unique_tag_to_hwq(tag);
+
+		if (unlikely(hwq >= submit_queues)) {
+			pr_warn("Unexpected hwq=%d, apply modulo\n", hwq);
+			hwq %= submit_queues;
+		}
+		pr_debug("tag=%u, hwq=%d\n", tag, hwq);
+		return sqp + hwq;
+	} else
+		return sqp;
+}
+
+/* Queued (deferred) command completions converge here. */
+static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
+{
+	int qc_idx;
 	int retiring = 0;
 	unsigned long iflags;
+	struct sdebug_queue *sqp;
 	struct sdebug_queued_cmd *sqcp;
 	struct scsi_cmnd *scp;
 	struct sdebug_dev_info *devip;
 
-	atomic_inc(&sdebug_completions);
-	qa_indx = indx;
-	if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
-		pr_err("wild qa_indx=%d\n", qa_indx);
+	qc_idx = sd_dp->qc_idx;
+	sqp = sdebug_q_arr + sd_dp->sqa_idx;
+	if (sdebug_statistics) {
+		atomic_inc(&sdebug_completions);
+		if (raw_smp_processor_id() != sd_dp->issuing_cpu)
+			atomic_inc(&sdebug_miss_cpus);
+	}
+	if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
+		pr_err("wild qc_idx=%d\n", qc_idx);
 		return;
 	}
-	spin_lock_irqsave(&queued_arr_lock, iflags);
-	sqcp = &queued_arr[qa_indx];
+	spin_lock_irqsave(&sqp->qc_lock, iflags);
+	sqcp = &sqp->qc_arr[qc_idx];
 	scp = sqcp->a_cmnd;
-	if (NULL == scp) {
-		spin_unlock_irqrestore(&queued_arr_lock, iflags);
-		pr_err("scp is NULL\n");
+	if (unlikely(scp == NULL)) {
+		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+		pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
+		       sd_dp->sqa_idx, qc_idx);
 		return;
 	}
 	devip = (struct sdebug_dev_info *)scp->device->hostdata;
-	if (devip)
+	if (likely(devip))
 		atomic_dec(&devip->num_in_q);
 	else
 		pr_err("devip=NULL\n");
-	if (atomic_read(&retired_max_queue) > 0)
+	if (unlikely(atomic_read(&retired_max_queue) > 0))
 		retiring = 1;
 
 	sqcp->a_cmnd = NULL;
-	if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
-		spin_unlock_irqrestore(&queued_arr_lock, iflags);
+	if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
+		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
 		pr_err("Unexpected completion\n");
 		return;
 	}
@@ -3480,105 +3489,71 @@
 		int k, retval;
 
 		retval = atomic_read(&retired_max_queue);
-		if (qa_indx >= retval) {
-			spin_unlock_irqrestore(&queued_arr_lock, iflags);
+		if (qc_idx >= retval) {
+			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
 			pr_err("index %d too large\n", retval);
 			return;
 		}
-		k = find_last_bit(queued_in_use_bm, retval);
-		if ((k < scsi_debug_max_queue) || (k == retval))
+		k = find_last_bit(sqp->in_use_bm, retval);
+		if ((k < sdebug_max_queue) || (k == retval))
 			atomic_set(&retired_max_queue, 0);
 		else
 			atomic_set(&retired_max_queue, k + 1);
 	}
-	spin_unlock_irqrestore(&queued_arr_lock, iflags);
+	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
 	scp->scsi_done(scp); /* callback to mid level */
 }
 
 /* When high resolution timer goes off this function is called. */
-static enum hrtimer_restart
-sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
+static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
 {
-	int qa_indx;
-	int retiring = 0;
-	unsigned long iflags;
-	struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer;
-	struct sdebug_queued_cmd *sqcp;
-	struct scsi_cmnd *scp;
-	struct sdebug_dev_info *devip;
-
-	atomic_inc(&sdebug_completions);
-	qa_indx = sd_hrtp->qa_indx;
-	if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
-		pr_err("wild qa_indx=%d\n", qa_indx);
-		goto the_end;
-	}
-	spin_lock_irqsave(&queued_arr_lock, iflags);
-	sqcp = &queued_arr[qa_indx];
-	scp = sqcp->a_cmnd;
-	if (NULL == scp) {
-		spin_unlock_irqrestore(&queued_arr_lock, iflags);
-		pr_err("scp is NULL\n");
-		goto the_end;
-	}
-	devip = (struct sdebug_dev_info *)scp->device->hostdata;
-	if (devip)
-		atomic_dec(&devip->num_in_q);
-	else
-		pr_err("devip=NULL\n");
-	if (atomic_read(&retired_max_queue) > 0)
-		retiring = 1;
-
-	sqcp->a_cmnd = NULL;
-	if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
-		spin_unlock_irqrestore(&queued_arr_lock, iflags);
-		pr_err("Unexpected completion\n");
-		goto the_end;
-	}
-
-	if (unlikely(retiring)) {	/* user has reduced max_queue */
-		int k, retval;
-
-		retval = atomic_read(&retired_max_queue);
-		if (qa_indx >= retval) {
-			spin_unlock_irqrestore(&queued_arr_lock, iflags);
-			pr_err("index %d too large\n", retval);
-			goto the_end;
-		}
-		k = find_last_bit(queued_in_use_bm, retval);
-		if ((k < scsi_debug_max_queue) || (k == retval))
-			atomic_set(&retired_max_queue, 0);
-		else
-			atomic_set(&retired_max_queue, k + 1);
-	}
-	spin_unlock_irqrestore(&queued_arr_lock, iflags);
-	scp->scsi_done(scp); /* callback to mid level */
-the_end:
+	struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
+						  hrt);
+	sdebug_q_cmd_complete(sd_dp);
 	return HRTIMER_NORESTART;
 }
 
-static struct sdebug_dev_info *
-sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
+/* When work queue schedules work, it calls this function. */
+static void sdebug_q_cmd_wq_complete(struct work_struct *work)
+{
+	struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
+						  ew.work);
+	sdebug_q_cmd_complete(sd_dp);
+}
+
+static bool got_shared_uuid;
+static uuid_be shared_uuid;
+
+static struct sdebug_dev_info *sdebug_device_create(
+			struct sdebug_host_info *sdbg_host, gfp_t flags)
 {
 	struct sdebug_dev_info *devip;
 
 	devip = kzalloc(sizeof(*devip), flags);
 	if (devip) {
+		if (sdebug_uuid_ctl == 1)
+			uuid_be_gen(&devip->lu_name);
+		else if (sdebug_uuid_ctl == 2) {
+			if (got_shared_uuid)
+				devip->lu_name = shared_uuid;
+			else {
+				uuid_be_gen(&shared_uuid);
+				got_shared_uuid = true;
+				devip->lu_name = shared_uuid;
+			}
+		}
 		devip->sdbg_host = sdbg_host;
 		list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
 	}
 	return devip;
 }
 
-static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
+static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
 {
-	struct sdebug_host_info * sdbg_host;
-	struct sdebug_dev_info * open_devip = NULL;
-	struct sdebug_dev_info * devip =
-			(struct sdebug_dev_info *)sdev->hostdata;
+	struct sdebug_host_info *sdbg_host;
+	struct sdebug_dev_info *open_devip = NULL;
+	struct sdebug_dev_info *devip;
 
-	if (devip)
-		return devip;
 	sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
 	if (!sdbg_host) {
 		pr_err("Host info NULL\n");
@@ -3614,7 +3589,7 @@
 
 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
 {
-	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
+	if (sdebug_verbose)
 		pr_info("slave_alloc <%u %u %u %llu>\n",
 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
 	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
@@ -3623,19 +3598,22 @@
 
 static int scsi_debug_slave_configure(struct scsi_device *sdp)
 {
-	struct sdebug_dev_info *devip;
+	struct sdebug_dev_info *devip =
+			(struct sdebug_dev_info *)sdp->hostdata;
 
-	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
+	if (sdebug_verbose)
 		pr_info("slave_configure <%u %u %u %llu>\n",
 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
-	if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
-		sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
-	devip = devInfoReg(sdp);
-	if (NULL == devip)
-		return 1;	/* no resources, will be marked offline */
+	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
+		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
+	if (devip == NULL) {
+		devip = find_build_dev_info(sdp);
+		if (devip == NULL)
+			return 1;  /* no resources, will be marked offline */
+	}
 	sdp->hostdata = devip;
 	blk_queue_max_segment_size(sdp->request_queue, -1U);
-	if (scsi_debug_no_uld)
+	if (sdebug_no_uld)
 		sdp->no_uld_attach = 1;
 	return 0;
 }
@@ -3645,7 +3623,7 @@
 	struct sdebug_dev_info *devip =
 		(struct sdebug_dev_info *)sdp->hostdata;
 
-	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
+	if (sdebug_verbose)
 		pr_info("slave_destroy <%u %u %u %llu>\n",
 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
 	if (devip) {
@@ -3655,135 +3633,130 @@
 	}
 }
 
-/* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */
-static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
+static void stop_qc_helper(struct sdebug_defer *sd_dp)
+{
+	if (!sd_dp)
+		return;
+	if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0))
+		hrtimer_cancel(&sd_dp->hrt);
+	else if (sdebug_jdelay < 0)
+		cancel_work_sync(&sd_dp->ew.work);
+}
+
+/* If @cmnd found deletes its timer or work queue and returns true; else
+   returns false */
+static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
 {
 	unsigned long iflags;
-	int k, qmax, r_qmax;
+	int j, k, qmax, r_qmax;
+	struct sdebug_queue *sqp;
 	struct sdebug_queued_cmd *sqcp;
 	struct sdebug_dev_info *devip;
+	struct sdebug_defer *sd_dp;
 
-	spin_lock_irqsave(&queued_arr_lock, iflags);
-	qmax = scsi_debug_max_queue;
-	r_qmax = atomic_read(&retired_max_queue);
-	if (r_qmax > qmax)
-		qmax = r_qmax;
-	for (k = 0; k < qmax; ++k) {
-		if (test_bit(k, queued_in_use_bm)) {
-			sqcp = &queued_arr[k];
-			if (cmnd == sqcp->a_cmnd) {
+	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
+		spin_lock_irqsave(&sqp->qc_lock, iflags);
+		qmax = sdebug_max_queue;
+		r_qmax = atomic_read(&retired_max_queue);
+		if (r_qmax > qmax)
+			qmax = r_qmax;
+		for (k = 0; k < qmax; ++k) {
+			if (test_bit(k, sqp->in_use_bm)) {
+				sqcp = &sqp->qc_arr[k];
+				if (cmnd != sqcp->a_cmnd)
+					continue;
+				/* found */
 				devip = (struct sdebug_dev_info *)
-					cmnd->device->hostdata;
+						cmnd->device->hostdata;
 				if (devip)
 					atomic_dec(&devip->num_in_q);
 				sqcp->a_cmnd = NULL;
-				spin_unlock_irqrestore(&queued_arr_lock,
-						       iflags);
-				if (scsi_debug_ndelay > 0) {
-					if (sqcp->sd_hrtp)
-						hrtimer_cancel(
-							&sqcp->sd_hrtp->hrt);
-				} else if (scsi_debug_delay > 0) {
-					if (sqcp->cmnd_timerp)
-						del_timer_sync(
-							sqcp->cmnd_timerp);
-				} else if (scsi_debug_delay < 0) {
-					if (sqcp->tletp)
-						tasklet_kill(sqcp->tletp);
-				}
-				clear_bit(k, queued_in_use_bm);
-				return 1;
+				sd_dp = sqcp->sd_dp;
+				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+				stop_qc_helper(sd_dp);
+				clear_bit(k, sqp->in_use_bm);
+				return true;
 			}
 		}
+		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
 	}
-	spin_unlock_irqrestore(&queued_arr_lock, iflags);
-	return 0;
+	return false;
 }
 
-/* Deletes (stops) timers or tasklets of all queued commands */
+/* Deletes (stops) timers or work queues of all queued commands */
 static void stop_all_queued(void)
 {
 	unsigned long iflags;
-	int k;
+	int j, k;
+	struct sdebug_queue *sqp;
 	struct sdebug_queued_cmd *sqcp;
 	struct sdebug_dev_info *devip;
+	struct sdebug_defer *sd_dp;
 
-	spin_lock_irqsave(&queued_arr_lock, iflags);
-	for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
-		if (test_bit(k, queued_in_use_bm)) {
-			sqcp = &queued_arr[k];
-			if (sqcp->a_cmnd) {
+	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
+		spin_lock_irqsave(&sqp->qc_lock, iflags);
+		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
+			if (test_bit(k, sqp->in_use_bm)) {
+				sqcp = &sqp->qc_arr[k];
+				if (sqcp->a_cmnd == NULL)
+					continue;
 				devip = (struct sdebug_dev_info *)
 					sqcp->a_cmnd->device->hostdata;
 				if (devip)
 					atomic_dec(&devip->num_in_q);
 				sqcp->a_cmnd = NULL;
-				spin_unlock_irqrestore(&queued_arr_lock,
-						       iflags);
-				if (scsi_debug_ndelay > 0) {
-					if (sqcp->sd_hrtp)
-						hrtimer_cancel(
-							&sqcp->sd_hrtp->hrt);
-				} else if (scsi_debug_delay > 0) {
-					if (sqcp->cmnd_timerp)
-						del_timer_sync(
-							sqcp->cmnd_timerp);
-				} else if (scsi_debug_delay < 0) {
-					if (sqcp->tletp)
-						tasklet_kill(sqcp->tletp);
-				}
-				clear_bit(k, queued_in_use_bm);
-				spin_lock_irqsave(&queued_arr_lock, iflags);
+				sd_dp = sqcp->sd_dp;
+				spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+				stop_qc_helper(sd_dp);
+				clear_bit(k, sqp->in_use_bm);
+				spin_lock_irqsave(&sqp->qc_lock, iflags);
 			}
 		}
+		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
 	}
-	spin_unlock_irqrestore(&queued_arr_lock, iflags);
 }
 
 /* Free queued command memory on heap */
 static void free_all_queued(void)
 {
-	unsigned long iflags;
-	int k;
+	int j, k;
+	struct sdebug_queue *sqp;
 	struct sdebug_queued_cmd *sqcp;
 
-	spin_lock_irqsave(&queued_arr_lock, iflags);
-	for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
-		sqcp = &queued_arr[k];
-		kfree(sqcp->cmnd_timerp);
-		sqcp->cmnd_timerp = NULL;
-		kfree(sqcp->tletp);
-		sqcp->tletp = NULL;
-		kfree(sqcp->sd_hrtp);
-		sqcp->sd_hrtp = NULL;
+	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
+		for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
+			sqcp = &sqp->qc_arr[k];
+			kfree(sqcp->sd_dp);
+			sqcp->sd_dp = NULL;
+		}
 	}
-	spin_unlock_irqrestore(&queued_arr_lock, iflags);
 }
 
 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
 {
+	bool ok;
+
 	++num_aborts;
 	if (SCpnt) {
-		if (SCpnt->device &&
-		    (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
-			sdev_printk(KERN_INFO, SCpnt->device, "%s\n",
-				    __func__);
-		stop_queued_cmnd(SCpnt);
+		ok = stop_queued_cmnd(SCpnt);
+		if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
+			sdev_printk(KERN_INFO, SCpnt->device,
+				    "%s: command%s found\n", __func__,
+				    ok ? "" : " not");
 	}
 	return SUCCESS;
 }
 
 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
 {
-	struct sdebug_dev_info * devip;
-
 	++num_dev_resets;
 	if (SCpnt && SCpnt->device) {
 		struct scsi_device *sdp = SCpnt->device;
+		struct sdebug_dev_info *devip =
+				(struct sdebug_dev_info *)sdp->hostdata;
 
-		if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
+		if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
-		devip = devInfoReg(sdp);
 		if (devip)
 			set_bit(SDEBUG_UA_POR, devip->uas_bm);
 	}
@@ -3804,7 +3777,7 @@
 	sdp = SCpnt->device;
 	if (!sdp)
 		goto lie;
-	if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
+	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
 	hp = sdp->host;
 	if (!hp)
@@ -3819,7 +3792,7 @@
 				++k;
 			}
 	}
-	if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
+	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
 		sdev_printk(KERN_INFO, sdp,
 			    "%s: %d device(s) found in target\n", __func__, k);
 lie:
@@ -3838,7 +3811,7 @@
 	if (!(SCpnt && SCpnt->device))
 		goto lie;
 	sdp = SCpnt->device;
-	if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
+	if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
 	hp = sdp->host;
 	if (hp) {
@@ -3852,7 +3825,7 @@
 			}
 		}
 	}
-	if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
+	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
 		sdev_printk(KERN_INFO, sdp,
 			    "%s: %d device(s) found in host\n", __func__, k);
 lie:
@@ -3866,7 +3839,7 @@
 	int k = 0;
 
 	++num_host_resets;
-	if ((SCpnt->device) && (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
+	if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
 		sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
         spin_lock(&sdebug_host_list_lock);
         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
@@ -3878,7 +3851,7 @@
         }
         spin_unlock(&sdebug_host_list_lock);
 	stop_all_queued();
-	if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
+	if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
 		sdev_printk(KERN_INFO, SCpnt->device,
 			    "%s: %d device(s) found\n", __func__, k);
 	return SUCCESS;
@@ -3893,22 +3866,22 @@
 	int heads_by_sects, start_sec, end_sec;
 
 	/* assume partition table already zeroed */
-	if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
+	if ((sdebug_num_parts < 1) || (store_size < 1048576))
 		return;
-	if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
-		scsi_debug_num_parts = SDEBUG_MAX_PARTS;
+	if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
+		sdebug_num_parts = SDEBUG_MAX_PARTS;
 		pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
 	}
 	num_sectors = (int)sdebug_store_sectors;
 	sectors_per_part = (num_sectors - sdebug_sectors_per)
-			   / scsi_debug_num_parts;
+			   / sdebug_num_parts;
 	heads_by_sects = sdebug_heads * sdebug_sectors_per;
         starts[0] = sdebug_sectors_per;
-	for (k = 1; k < scsi_debug_num_parts; ++k)
+	for (k = 1; k < sdebug_num_parts; ++k)
 		starts[k] = ((k * sectors_per_part) / heads_by_sects)
 			    * heads_by_sects;
-	starts[scsi_debug_num_parts] = num_sectors;
-	starts[scsi_debug_num_parts + 1] = 0;
+	starts[sdebug_num_parts] = num_sectors;
+	starts[sdebug_num_parts + 1] = 0;
 
 	ramp[510] = 0x55;	/* magic partition markings */
 	ramp[511] = 0xAA;
@@ -3934,67 +3907,118 @@
 	}
 }
 
-static int
-schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
-	      int scsi_result, int delta_jiff)
+static void block_unblock_all_queues(bool block)
+{
+	int j;
+	struct sdebug_queue *sqp;
+
+	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
+		atomic_set(&sqp->blocked, (int)block);
+}
+
+/* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
+ * commands will be processed normally before triggers occur.
+ */
+static void tweak_cmnd_count(void)
+{
+	int count, modulo;
+
+	modulo = abs(sdebug_every_nth);
+	if (modulo < 2)
+		return;
+	block_unblock_all_queues(true);
+	count = atomic_read(&sdebug_cmnd_count);
+	atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
+	block_unblock_all_queues(false);
+}
+
+static void clear_queue_stats(void)
+{
+	atomic_set(&sdebug_cmnd_count, 0);
+	atomic_set(&sdebug_completions, 0);
+	atomic_set(&sdebug_miss_cpus, 0);
+	atomic_set(&sdebug_a_tsf, 0);
+}
+
+static void setup_inject(struct sdebug_queue *sqp,
+			 struct sdebug_queued_cmd *sqcp)
+{
+	if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0)
+		return;
+	sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
+	sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
+	sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
+	sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
+	sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
+}
+
+/* Complete the processing of the thread that queued a SCSI command to this
+ * driver. It either completes the command by calling cmnd_done() or
+ * schedules a hr timer or work queue then returns 0. Returns
+ * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
+ */
+static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
+			 int scsi_result, int delta_jiff)
 {
 	unsigned long iflags;
 	int k, num_in_q, qdepth, inject;
-	struct sdebug_queued_cmd *sqcp = NULL;
+	struct sdebug_queue *sqp;
+	struct sdebug_queued_cmd *sqcp;
 	struct scsi_device *sdp;
+	struct sdebug_defer *sd_dp;
 
-	/* this should never happen */
-	if (WARN_ON(!cmnd))
-		return SCSI_MLQUEUE_HOST_BUSY;
-
-	if (NULL == devip) {
-		pr_warn("called devip == NULL\n");
-		/* no particularly good error to report back */
-		return SCSI_MLQUEUE_HOST_BUSY;
+	if (unlikely(devip == NULL)) {
+		if (scsi_result == 0)
+			scsi_result = DID_NO_CONNECT << 16;
+		goto respond_in_thread;
 	}
-
 	sdp = cmnd->device;
 
-	if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
+	if (unlikely(sdebug_verbose && scsi_result))
 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
 			    __func__, scsi_result);
 	if (delta_jiff == 0)
 		goto respond_in_thread;
 
 	/* schedule the response at a later time if resources permit */
-	spin_lock_irqsave(&queued_arr_lock, iflags);
+	sqp = get_queue(cmnd);
+	spin_lock_irqsave(&sqp->qc_lock, iflags);
+	if (unlikely(atomic_read(&sqp->blocked))) {
+		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+		return SCSI_MLQUEUE_HOST_BUSY;
+	}
 	num_in_q = atomic_read(&devip->num_in_q);
 	qdepth = cmnd->device->queue_depth;
 	inject = 0;
-	if ((qdepth > 0) && (num_in_q >= qdepth)) {
+	if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
 		if (scsi_result) {
-			spin_unlock_irqrestore(&queued_arr_lock, iflags);
+			spin_unlock_irqrestore(&sqp->qc_lock, iflags);
 			goto respond_in_thread;
 		} else
 			scsi_result = device_qfull_result;
-	} else if ((scsi_debug_every_nth != 0) &&
-		   (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) &&
-		   (scsi_result == 0)) {
+	} else if (unlikely(sdebug_every_nth &&
+			    (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
+			    (scsi_result == 0))) {
 		if ((num_in_q == (qdepth - 1)) &&
 		    (atomic_inc_return(&sdebug_a_tsf) >=
-		     abs(scsi_debug_every_nth))) {
+		     abs(sdebug_every_nth))) {
 			atomic_set(&sdebug_a_tsf, 0);
 			inject = 1;
 			scsi_result = device_qfull_result;
 		}
 	}
 
-	k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue);
-	if (k >= scsi_debug_max_queue) {
-		spin_unlock_irqrestore(&queued_arr_lock, iflags);
+	k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
+	if (unlikely(k >= sdebug_max_queue)) {
+		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
 		if (scsi_result)
 			goto respond_in_thread;
-		else if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts)
+		else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
 			scsi_result = device_qfull_result;
-		if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts)
+		if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
 			sdev_printk(KERN_INFO, sdp,
 				    "%s: max_queue=%d exceeded, %s\n",
-				    __func__, scsi_debug_max_queue,
+				    __func__, sdebug_max_queue,
 				    (scsi_result ?  "status: TASK SET FULL" :
 						    "report: host busy"));
 		if (scsi_result)
@@ -4002,55 +4026,56 @@
 		else
 			return SCSI_MLQUEUE_HOST_BUSY;
 	}
-	__set_bit(k, queued_in_use_bm);
+	__set_bit(k, sqp->in_use_bm);
 	atomic_inc(&devip->num_in_q);
-	sqcp = &queued_arr[k];
+	sqcp = &sqp->qc_arr[k];
 	sqcp->a_cmnd = cmnd;
+	cmnd->host_scribble = (unsigned char *)sqcp;
 	cmnd->result = scsi_result;
-	spin_unlock_irqrestore(&queued_arr_lock, iflags);
-	if (delta_jiff > 0) {
-		if (NULL == sqcp->cmnd_timerp) {
-			sqcp->cmnd_timerp = kmalloc(sizeof(struct timer_list),
-						    GFP_ATOMIC);
-			if (NULL == sqcp->cmnd_timerp)
-				return SCSI_MLQUEUE_HOST_BUSY;
-			init_timer(sqcp->cmnd_timerp);
-		}
-		sqcp->cmnd_timerp->function = sdebug_q_cmd_complete;
-		sqcp->cmnd_timerp->data = k;
-		sqcp->cmnd_timerp->expires = get_jiffies_64() + delta_jiff;
-		add_timer(sqcp->cmnd_timerp);
-	} else if (scsi_debug_ndelay > 0) {
-		ktime_t kt = ktime_set(0, scsi_debug_ndelay);
-		struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp;
+	sd_dp = sqcp->sd_dp;
+	spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+	if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
+		setup_inject(sqp, sqcp);
+	if (delta_jiff > 0 || sdebug_ndelay > 0) {
+		ktime_t kt;
 
-		if (NULL == sd_hp) {
-			sd_hp = kmalloc(sizeof(*sd_hp), GFP_ATOMIC);
-			if (NULL == sd_hp)
+		if (delta_jiff > 0) {
+			struct timespec ts;
+
+			jiffies_to_timespec(delta_jiff, &ts);
+			kt = ktime_set(ts.tv_sec, ts.tv_nsec);
+		} else
+			kt = ktime_set(0, sdebug_ndelay);
+		if (NULL == sd_dp) {
+			sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
+			if (NULL == sd_dp)
 				return SCSI_MLQUEUE_HOST_BUSY;
-			sqcp->sd_hrtp = sd_hp;
-			hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC,
-				     HRTIMER_MODE_REL);
-			sd_hp->hrt.function = sdebug_q_cmd_hrt_complete;
-			sd_hp->qa_indx = k;
+			sqcp->sd_dp = sd_dp;
+			hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
+				     HRTIMER_MODE_REL_PINNED);
+			sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
+			sd_dp->sqa_idx = sqp - sdebug_q_arr;
+			sd_dp->qc_idx = k;
 		}
-		hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL);
-	} else {	/* delay < 0 */
-		if (NULL == sqcp->tletp) {
-			sqcp->tletp = kmalloc(sizeof(*sqcp->tletp),
-					      GFP_ATOMIC);
-			if (NULL == sqcp->tletp)
+		if (sdebug_statistics)
+			sd_dp->issuing_cpu = raw_smp_processor_id();
+		hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
+	} else {	/* jdelay < 0, use work queue */
+		if (NULL == sd_dp) {
+			sd_dp = kzalloc(sizeof(*sqcp->sd_dp), GFP_ATOMIC);
+			if (NULL == sd_dp)
 				return SCSI_MLQUEUE_HOST_BUSY;
-			tasklet_init(sqcp->tletp,
-				     sdebug_q_cmd_complete, k);
+			sqcp->sd_dp = sd_dp;
+			sd_dp->sqa_idx = sqp - sdebug_q_arr;
+			sd_dp->qc_idx = k;
+			INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
 		}
-		if (-1 == delta_jiff)
-			tasklet_hi_schedule(sqcp->tletp);
-		else
-			tasklet_schedule(sqcp->tletp);
+		if (sdebug_statistics)
+			sd_dp->issuing_cpu = raw_smp_processor_id();
+		schedule_work(&sd_dp->ew.work);
 	}
-	if ((SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) &&
-	    (scsi_result == device_qfull_result))
+	if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
+		     (scsi_result == device_qfull_result)))
 		sdev_printk(KERN_INFO, sdp,
 			    "%s: num_in_q=%d +1, %s%s\n", __func__,
 			    num_in_q, (inject ? "<inject> " : ""),
@@ -4069,52 +4094,55 @@
    as it can when the corresponding attribute in the
    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
  */
-module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
-module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
-module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR);
-module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
-module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
-module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
-module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
-module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
-module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
-module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
-module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
-module_param_named(host_lock, scsi_debug_host_lock, bool, S_IRUGO | S_IWUSR);
-module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
-module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
-module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
-module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
-module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
-module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
-module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
-module_param_named(ndelay, scsi_debug_ndelay, int, S_IRUGO | S_IWUSR);
-module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
-module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
-module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
-module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
-module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
-module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
-module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
-module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
-module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
-module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
-module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
-module_param_named(strict, scsi_debug_strict, bool, S_IRUGO | S_IWUSR);
-module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
-module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
-module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
-module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
-module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
-module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
+module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
+module_param_named(ato, sdebug_ato, int, S_IRUGO);
+module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
+module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
+module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
+module_param_named(dif, sdebug_dif, int, S_IRUGO);
+module_param_named(dix, sdebug_dix, int, S_IRUGO);
+module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
+module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
+module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
+module_param_named(guard, sdebug_guard, uint, S_IRUGO);
+module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
+module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
+module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
+module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
+module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
+module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
+module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
+module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
+module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
+module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
+module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
+module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
+module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
+module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
+module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
+module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
+module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
+module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
+module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
+module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
+module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
+module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
+module_param_named(submit_queues, submit_queues, int, S_IRUGO);
+module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
+module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
+module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
+module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
+module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
+module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
+module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
 		   S_IRUGO | S_IWUSR);
-module_param_named(write_same_length, scsi_debug_write_same_length, int,
+module_param_named(write_same_length, sdebug_write_same_length, int,
 		   S_IRUGO | S_IWUSR);
 
 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
 MODULE_DESCRIPTION("SCSI debug adapter driver");
 MODULE_LICENSE("GPL");
-MODULE_VERSION(SCSI_DEBUG_VERSION);
+MODULE_VERSION(SDEBUG_VERSION);
 
 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
@@ -4127,11 +4155,12 @@
 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
-MODULE_PARM_DESC(host_lock, "use host_lock around all commands (def=0)");
+MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
-MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
+MODULE_PARM_DESC(lbprz,
+	"on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
@@ -4145,30 +4174,42 @@
 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
-MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])");
+MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
+MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
+MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
+MODULE_PARM_DESC(uuid_ctl,
+		 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
 
-static char sdebug_info[256];
+#define SDEBUG_INFO_LEN 256
+static char sdebug_info[SDEBUG_INFO_LEN];
 
 static const char * scsi_debug_info(struct Scsi_Host * shp)
 {
-	sprintf(sdebug_info, "scsi_debug, version %s [%s], "
-		"dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
-		scsi_debug_version_date, scsi_debug_dev_size_mb,
-		scsi_debug_opts);
+	int k;
+
+	k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
+		      my_name, SDEBUG_VERSION, sdebug_version_date);
+	if (k >= (SDEBUG_INFO_LEN - 1))
+		return sdebug_info;
+	scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
+		  "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
+		  sdebug_dev_size_mb, sdebug_opts, submit_queues,
+		  "statistics", (int)sdebug_statistics);
 	return sdebug_info;
 }
 
 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
-static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
+static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
+				 int length)
 {
 	char arr[16];
 	int opts;
@@ -4180,9 +4221,11 @@
 	arr[minLen] = '\0';
 	if (1 != sscanf(arr, "%d", &opts))
 		return -EINVAL;
-	scsi_debug_opts = opts;
-	if (scsi_debug_every_nth != 0)
-		atomic_set(&sdebug_cmnd_count, 0);
+	sdebug_opts = opts;
+	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
+	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
+	if (sdebug_every_nth != 0)
+		tweak_cmnd_count();
 	return length;
 }
 
@@ -4191,69 +4234,83 @@
  * output are not atomics so might be inaccurate in a busy system. */
 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
 {
-	int f, l;
-	char b[32];
+	int f, j, l;
+	struct sdebug_queue *sqp;
 
-	if (scsi_debug_every_nth > 0)
-		snprintf(b, sizeof(b), " (curr:%d)",
-			 ((SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) ?
-				atomic_read(&sdebug_a_tsf) :
-				atomic_read(&sdebug_cmnd_count)));
-	else
-		b[0] = '\0';
+	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
+		   SDEBUG_VERSION, sdebug_version_date);
+	seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
+		   sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
+		   sdebug_opts, sdebug_every_nth);
+	seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
+		   sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
+		   sdebug_sector_size, "bytes");
+	seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
+		   sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
+		   num_aborts);
+	seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
+		   num_dev_resets, num_target_resets, num_bus_resets,
+		   num_host_resets);
+	seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
+		   dix_reads, dix_writes, dif_errors);
+	seq_printf(m, "usec_in_jiffy=%lu, %s=%d, mq_active=%d\n",
+		   TICK_NSEC / 1000, "statistics", sdebug_statistics,
+		   sdebug_mq_active);
+	seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
+		   atomic_read(&sdebug_cmnd_count),
+		   atomic_read(&sdebug_completions),
+		   "miss_cpus", atomic_read(&sdebug_miss_cpus),
+		   atomic_read(&sdebug_a_tsf));
 
-	seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n"
-		"num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
-		"every_nth=%d%s\n"
-		"delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n"
-		"sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
-		"command aborts=%d; RESETs: device=%d, target=%d, bus=%d, "
-		"host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d "
-		"usec_in_jiffy=%lu\n",
-		SCSI_DEBUG_VERSION, scsi_debug_version_date,
-		scsi_debug_num_tgts, scsi_debug_dev_size_mb, scsi_debug_opts,
-		scsi_debug_every_nth, b, scsi_debug_delay, scsi_debug_ndelay,
-		scsi_debug_max_luns, atomic_read(&sdebug_completions),
-		scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
-		sdebug_sectors_per, num_aborts, num_dev_resets,
-		num_target_resets, num_bus_resets, num_host_resets,
-		dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000);
-
-	f = find_first_bit(queued_in_use_bm, scsi_debug_max_queue);
-	if (f != scsi_debug_max_queue) {
-		l = find_last_bit(queued_in_use_bm, scsi_debug_max_queue);
-		seq_printf(m, "   %s BUSY: first,last bits set: %d,%d\n",
-			   "queued_in_use_bm", f, l);
+	seq_printf(m, "submit_queues=%d\n", submit_queues);
+	for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
+		seq_printf(m, "  queue %d:\n", j);
+		f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
+		if (f != sdebug_max_queue) {
+			l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
+			seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
+				   "first,last bits", f, l);
+		}
 	}
 	return 0;
 }
 
 static ssize_t delay_show(struct device_driver *ddp, char *buf)
 {
-        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
 }
-/* Returns -EBUSY if delay is being changed and commands are queued */
+/* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
+ * of delay is jiffies.
+ */
 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
 			   size_t count)
 {
-	int delay, res;
+	int jdelay, res;
 
-	if ((count > 0) && (1 == sscanf(buf, "%d", &delay))) {
+	if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
 		res = count;
-		if (scsi_debug_delay != delay) {
-			unsigned long iflags;
-			int k;
+		if (sdebug_jdelay != jdelay) {
+			int j, k;
+			struct sdebug_queue *sqp;
 
-			spin_lock_irqsave(&queued_arr_lock, iflags);
-			k = find_first_bit(queued_in_use_bm,
-					   scsi_debug_max_queue);
-			if (k != scsi_debug_max_queue)
-				res = -EBUSY;	/* have queued commands */
-			else {
-				scsi_debug_delay = delay;
-				scsi_debug_ndelay = 0;
+			block_unblock_all_queues(true);
+			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
+			     ++j, ++sqp) {
+				k = find_first_bit(sqp->in_use_bm,
+						   sdebug_max_queue);
+				if (k != sdebug_max_queue) {
+					res = -EBUSY;   /* queued commands */
+					break;
+				}
 			}
-			spin_unlock_irqrestore(&queued_arr_lock, iflags);
+			if (res > 0) {
+				/* make sure sdebug_defer instances get
+				 * re-allocated for new delay variant */
+				free_all_queued();
+				sdebug_jdelay = jdelay;
+				sdebug_ndelay = 0;
+			}
+			block_unblock_all_queues(false);
 		}
 		return res;
 	}
@@ -4263,31 +4320,41 @@
 
 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
 {
-	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ndelay);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
 }
 /* Returns -EBUSY if ndelay is being changed and commands are queued */
-/* If > 0 and accepted then scsi_debug_delay is set to DELAY_OVERRIDDEN */
+/* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
-			   size_t count)
+			    size_t count)
 {
-	unsigned long iflags;
-	int ndelay, res, k;
+	int ndelay, res;
 
 	if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
-	    (ndelay >= 0) && (ndelay < 1000000000)) {
+	    (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
 		res = count;
-		if (scsi_debug_ndelay != ndelay) {
-			spin_lock_irqsave(&queued_arr_lock, iflags);
-			k = find_first_bit(queued_in_use_bm,
-					   scsi_debug_max_queue);
-			if (k != scsi_debug_max_queue)
-				res = -EBUSY;	/* have queued commands */
-			else {
-				scsi_debug_ndelay = ndelay;
-				scsi_debug_delay = ndelay ? DELAY_OVERRIDDEN
-							  : DEF_DELAY;
+		if (sdebug_ndelay != ndelay) {
+			int j, k;
+			struct sdebug_queue *sqp;
+
+			block_unblock_all_queues(true);
+			for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
+			     ++j, ++sqp) {
+				k = find_first_bit(sqp->in_use_bm,
+						   sdebug_max_queue);
+				if (k != sdebug_max_queue) {
+					res = -EBUSY;   /* queued commands */
+					break;
+				}
 			}
-			spin_unlock_irqrestore(&queued_arr_lock, iflags);
+			if (res > 0) {
+				/* make sure sdebug_defer instances get
+				 * re-allocated for new delay variant */
+				free_all_queued();
+				sdebug_ndelay = ndelay;
+				sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
+							: DEF_JDELAY;
+			}
+			block_unblock_all_queues(false);
 		}
 		return res;
 	}
@@ -4297,7 +4364,7 @@
 
 static ssize_t opts_show(struct device_driver *ddp, char *buf)
 {
-        return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
+	return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
 }
 
 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
@@ -4317,26 +4384,17 @@
 	}
 	return -EINVAL;
 opts_done:
-	scsi_debug_opts = opts;
-	if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
-		sdebug_any_injecting_opt = true;
-	else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
-		sdebug_any_injecting_opt = true;
-	else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
-		sdebug_any_injecting_opt = true;
-	else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
-		sdebug_any_injecting_opt = true;
-	else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
-		sdebug_any_injecting_opt = true;
-	atomic_set(&sdebug_cmnd_count, 0);
-	atomic_set(&sdebug_a_tsf, 0);
+	sdebug_opts = opts;
+	sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
+	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
+	tweak_cmnd_count();
 	return count;
 }
 static DRIVER_ATTR_RW(opts);
 
 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
 {
-        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
 }
 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
 			   size_t count)
@@ -4344,7 +4402,7 @@
         int n;
 
 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
-		scsi_debug_ptype = n;
+		sdebug_ptype = n;
 		return count;
 	}
 	return -EINVAL;
@@ -4353,7 +4411,7 @@
 
 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
 {
-        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
 }
 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
 			    size_t count)
@@ -4361,7 +4419,7 @@
         int n;
 
 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
-		scsi_debug_dsense = n;
+		sdebug_dsense = n;
 		return count;
 	}
 	return -EINVAL;
@@ -4370,7 +4428,7 @@
 
 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
 {
-        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
 }
 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
 			     size_t count)
@@ -4379,11 +4437,11 @@
 
 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
 		n = (n > 0);
-		scsi_debug_fake_rw = (scsi_debug_fake_rw > 0);
-		if (scsi_debug_fake_rw != n) {
+		sdebug_fake_rw = (sdebug_fake_rw > 0);
+		if (sdebug_fake_rw != n) {
 			if ((0 == n) && (NULL == fake_storep)) {
 				unsigned long sz =
-					(unsigned long)scsi_debug_dev_size_mb *
+					(unsigned long)sdebug_dev_size_mb *
 					1048576;
 
 				fake_storep = vmalloc(sz);
@@ -4393,7 +4451,7 @@
 				}
 				memset(fake_storep, 0, sz);
 			}
-			scsi_debug_fake_rw = n;
+			sdebug_fake_rw = n;
 		}
 		return count;
 	}
@@ -4403,7 +4461,7 @@
 
 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
 {
-        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
 }
 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
 			      size_t count)
@@ -4411,7 +4469,7 @@
         int n;
 
 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
-		scsi_debug_no_lun_0 = n;
+		sdebug_no_lun_0 = n;
 		return count;
 	}
 	return -EINVAL;
@@ -4420,7 +4478,7 @@
 
 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
 {
-        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
 }
 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
 			      size_t count)
@@ -4428,7 +4486,7 @@
         int n;
 
 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
-		scsi_debug_num_tgts = n;
+		sdebug_num_tgts = n;
 		sdebug_max_tgts_luns();
 		return count;
 	}
@@ -4438,19 +4496,19 @@
 
 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
 {
-        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
 }
 static DRIVER_ATTR_RO(dev_size_mb);
 
 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
 {
-        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
 }
 static DRIVER_ATTR_RO(num_parts);
 
 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
 {
-        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
 }
 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
 			       size_t count)
@@ -4458,8 +4516,12 @@
         int nth;
 
 	if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
-		scsi_debug_every_nth = nth;
-		atomic_set(&sdebug_cmnd_count, 0);
+		sdebug_every_nth = nth;
+		if (nth && !sdebug_statistics) {
+			pr_info("every_nth needs statistics=1, set it\n");
+			sdebug_statistics = true;
+		}
+		tweak_cmnd_count();
 		return count;
 	}
 	return -EINVAL;
@@ -4468,7 +4530,7 @@
 
 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
 {
-        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
 }
 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
 			      size_t count)
@@ -4477,10 +4539,14 @@
 	bool changed;
 
 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
-		changed = (scsi_debug_max_luns != n);
-		scsi_debug_max_luns = n;
+		if (n > 256) {
+			pr_warn("max_luns can be no more than 256\n");
+			return -EINVAL;
+		}
+		changed = (sdebug_max_luns != n);
+		sdebug_max_luns = n;
 		sdebug_max_tgts_luns();
-		if (changed && (scsi_debug_scsi_level >= 5)) {	/* >= SPC-3 */
+		if (changed && (sdebug_scsi_level >= 5)) {	/* >= SPC-3 */
 			struct sdebug_host_info *sdhp;
 			struct sdebug_dev_info *dp;
 
@@ -4503,28 +4569,34 @@
 
 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
 {
-        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
 }
 /* N.B. max_queue can be changed while there are queued commands. In flight
  * commands beyond the new max_queue will be completed. */
 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
 			       size_t count)
 {
-	unsigned long iflags;
-	int n, k;
+	int j, n, k, a;
+	struct sdebug_queue *sqp;
 
 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
-	    (n <= SCSI_DEBUG_CANQUEUE)) {
-		spin_lock_irqsave(&queued_arr_lock, iflags);
-		k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE);
-		scsi_debug_max_queue = n;
-		if (SCSI_DEBUG_CANQUEUE == k)
+	    (n <= SDEBUG_CANQUEUE)) {
+		block_unblock_all_queues(true);
+		k = 0;
+		for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
+		     ++j, ++sqp) {
+			a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
+			if (a > k)
+				k = a;
+		}
+		sdebug_max_queue = n;
+		if (k == SDEBUG_CANQUEUE)
 			atomic_set(&retired_max_queue, 0);
 		else if (k >= n)
 			atomic_set(&retired_max_queue, k + 1);
 		else
 			atomic_set(&retired_max_queue, 0);
-		spin_unlock_irqrestore(&queued_arr_lock, iflags);
+		block_unblock_all_queues(false);
 		return count;
 	}
 	return -EINVAL;
@@ -4533,19 +4605,19 @@
 
 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
 {
-        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
 }
 static DRIVER_ATTR_RO(no_uld);
 
 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
 {
-        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
 }
 static DRIVER_ATTR_RO(scsi_level);
 
 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
 {
-        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
 }
 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
 				size_t count)
@@ -4554,8 +4626,8 @@
 	bool changed;
 
 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
-		changed = (scsi_debug_virtual_gb != n);
-		scsi_debug_virtual_gb = n;
+		changed = (sdebug_virtual_gb != n);
+		sdebug_virtual_gb = n;
 		sdebug_capacity = get_sdebug_capacity();
 		if (changed) {
 			struct sdebug_host_info *sdhp;
@@ -4580,9 +4652,12 @@
 
 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
 {
-        return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
 }
 
+static int sdebug_add_adapter(void);
+static void sdebug_remove_adapter(void);
+
 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
 			      size_t count)
 {
@@ -4605,7 +4680,7 @@
 
 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
 {
-	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
 }
 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
 				    size_t count)
@@ -4613,40 +4688,68 @@
 	int n;
 
 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
-		scsi_debug_vpd_use_hostno = n;
+		sdebug_vpd_use_hostno = n;
 		return count;
 	}
 	return -EINVAL;
 }
 static DRIVER_ATTR_RW(vpd_use_hostno);
 
+static ssize_t statistics_show(struct device_driver *ddp, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
+}
+static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
+				size_t count)
+{
+	int n;
+
+	if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
+		if (n > 0)
+			sdebug_statistics = true;
+		else {
+			clear_queue_stats();
+			sdebug_statistics = false;
+		}
+		return count;
+	}
+	return -EINVAL;
+}
+static DRIVER_ATTR_RW(statistics);
+
 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
 {
-	return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
+	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
 }
 static DRIVER_ATTR_RO(sector_size);
 
+static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
+}
+static DRIVER_ATTR_RO(submit_queues);
+
 static ssize_t dix_show(struct device_driver *ddp, char *buf)
 {
-	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
 }
 static DRIVER_ATTR_RO(dix);
 
 static ssize_t dif_show(struct device_driver *ddp, char *buf)
 {
-	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
 }
 static DRIVER_ATTR_RO(dif);
 
 static ssize_t guard_show(struct device_driver *ddp, char *buf)
 {
-	return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
+	return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
 }
 static DRIVER_ATTR_RO(guard);
 
 static ssize_t ato_show(struct device_driver *ddp, char *buf)
 {
-	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
 }
 static DRIVER_ATTR_RO(ato);
 
@@ -4669,7 +4772,7 @@
 
 static ssize_t removable_show(struct device_driver *ddp, char *buf)
 {
-	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
 }
 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
 			       size_t count)
@@ -4677,7 +4780,7 @@
 	int n;
 
 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
-		scsi_debug_removable = (n > 0);
+		sdebug_removable = (n > 0);
 		return count;
 	}
 	return -EINVAL;
@@ -4686,32 +4789,17 @@
 
 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
 {
-	return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_host_lock);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
 }
-/* Returns -EBUSY if host_lock is being changed and commands are queued */
+/* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
 			       size_t count)
 {
-	int n, res;
+	int n;
 
 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
-		bool new_host_lock = (n > 0);
-
-		res = count;
-		if (new_host_lock != scsi_debug_host_lock) {
-			unsigned long iflags;
-			int k;
-
-			spin_lock_irqsave(&queued_arr_lock, iflags);
-			k = find_first_bit(queued_in_use_bm,
-					   scsi_debug_max_queue);
-			if (k != scsi_debug_max_queue)
-				res = -EBUSY;	/* have queued commands */
-			else
-				scsi_debug_host_lock = new_host_lock;
-			spin_unlock_irqrestore(&queued_arr_lock, iflags);
-		}
-		return res;
+		sdebug_host_lock = (n > 0);
+		return count;
 	}
 	return -EINVAL;
 }
@@ -4719,7 +4807,7 @@
 
 static ssize_t strict_show(struct device_driver *ddp, char *buf)
 {
-	return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_strict);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
 }
 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
 			    size_t count)
@@ -4727,13 +4815,19 @@
 	int n;
 
 	if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
-		scsi_debug_strict = (n > 0);
+		sdebug_strict = (n > 0);
 		return count;
 	}
 	return -EINVAL;
 }
 static DRIVER_ATTR_RW(strict);
 
+static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
+}
+static DRIVER_ATTR_RO(uuid_ctl);
+
 
 /* Note: The following array creates attribute files in the
    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
@@ -4761,6 +4855,8 @@
 	&driver_attr_add_host.attr,
 	&driver_attr_vpd_use_hostno.attr,
 	&driver_attr_sector_size.attr,
+	&driver_attr_statistics.attr,
+	&driver_attr_submit_queues.attr,
 	&driver_attr_dix.attr,
 	&driver_attr_dif.attr,
 	&driver_attr_guard.attr,
@@ -4770,6 +4866,7 @@
 	&driver_attr_host_lock.attr,
 	&driver_attr_ndelay.attr,
 	&driver_attr_strict.attr,
+	&driver_attr_uuid_ctl.attr,
 	NULL,
 };
 ATTRIBUTE_GROUPS(sdebug_drv);
@@ -4783,33 +4880,33 @@
 	int k;
 	int ret;
 
-	atomic_set(&sdebug_cmnd_count, 0);
-	atomic_set(&sdebug_completions, 0);
 	atomic_set(&retired_max_queue, 0);
 
-	if (scsi_debug_ndelay >= 1000000000) {
+	if (sdebug_ndelay >= 1000 * 1000 * 1000) {
 		pr_warn("ndelay must be less than 1 second, ignored\n");
-		scsi_debug_ndelay = 0;
-	} else if (scsi_debug_ndelay > 0)
-		scsi_debug_delay = DELAY_OVERRIDDEN;
+		sdebug_ndelay = 0;
+	} else if (sdebug_ndelay > 0)
+		sdebug_jdelay = JDELAY_OVERRIDDEN;
 
-	switch (scsi_debug_sector_size) {
+	switch (sdebug_sector_size) {
 	case  512:
 	case 1024:
 	case 2048:
 	case 4096:
 		break;
 	default:
-		pr_err("invalid sector_size %d\n", scsi_debug_sector_size);
+		pr_err("invalid sector_size %d\n", sdebug_sector_size);
 		return -EINVAL;
 	}
 
-	switch (scsi_debug_dif) {
+	switch (sdebug_dif) {
 
 	case SD_DIF_TYPE0_PROTECTION:
+		break;
 	case SD_DIF_TYPE1_PROTECTION:
 	case SD_DIF_TYPE2_PROTECTION:
 	case SD_DIF_TYPE3_PROTECTION:
+		have_dif_prot = true;
 		break;
 
 	default:
@@ -4817,39 +4914,53 @@
 		return -EINVAL;
 	}
 
-	if (scsi_debug_guard > 1) {
+	if (sdebug_guard > 1) {
 		pr_err("guard must be 0 or 1\n");
 		return -EINVAL;
 	}
 
-	if (scsi_debug_ato > 1) {
+	if (sdebug_ato > 1) {
 		pr_err("ato must be 0 or 1\n");
 		return -EINVAL;
 	}
 
-	if (scsi_debug_physblk_exp > 15) {
-		pr_err("invalid physblk_exp %u\n", scsi_debug_physblk_exp);
+	if (sdebug_physblk_exp > 15) {
+		pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
+		return -EINVAL;
+	}
+	if (sdebug_max_luns > 256) {
+		pr_warn("max_luns can be no more than 256, use default\n");
+		sdebug_max_luns = DEF_MAX_LUNS;
+	}
+
+	if (sdebug_lowest_aligned > 0x3fff) {
+		pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
 		return -EINVAL;
 	}
 
-	if (scsi_debug_lowest_aligned > 0x3fff) {
-		pr_err("lowest_aligned too big: %u\n",
-			scsi_debug_lowest_aligned);
+	if (submit_queues < 1) {
+		pr_err("submit_queues must be 1 or more\n");
 		return -EINVAL;
 	}
+	sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
+			       GFP_KERNEL);
+	if (sdebug_q_arr == NULL)
+		return -ENOMEM;
+	for (k = 0; k < submit_queues; ++k)
+		spin_lock_init(&sdebug_q_arr[k].qc_lock);
 
-	if (scsi_debug_dev_size_mb < 1)
-		scsi_debug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
-	sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
-	sdebug_store_sectors = sz / scsi_debug_sector_size;
+	if (sdebug_dev_size_mb < 1)
+		sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
+	sz = (unsigned long)sdebug_dev_size_mb * 1048576;
+	sdebug_store_sectors = sz / sdebug_sector_size;
 	sdebug_capacity = get_sdebug_capacity();
 
 	/* play around with geometry, don't waste too much on track 0 */
 	sdebug_heads = 8;
 	sdebug_sectors_per = 32;
-	if (scsi_debug_dev_size_mb >= 256)
+	if (sdebug_dev_size_mb >= 256)
 		sdebug_heads = 64;
-	else if (scsi_debug_dev_size_mb >= 16)
+	else if (sdebug_dev_size_mb >= 16)
 		sdebug_heads = 32;
 	sdebug_cylinders_per = (unsigned long)sdebug_capacity /
 			       (sdebug_sectors_per * sdebug_heads);
@@ -4861,18 +4972,19 @@
 			       (sdebug_sectors_per * sdebug_heads);
 	}
 
-	if (0 == scsi_debug_fake_rw) {
+	if (sdebug_fake_rw == 0) {
 		fake_storep = vmalloc(sz);
 		if (NULL == fake_storep) {
 			pr_err("out of memory, 1\n");
-			return -ENOMEM;
+			ret = -ENOMEM;
+			goto free_q_arr;
 		}
 		memset(fake_storep, 0, sz);
-		if (scsi_debug_num_parts > 0)
+		if (sdebug_num_parts > 0)
 			sdebug_build_parts(fake_storep, sz);
 	}
 
-	if (scsi_debug_dix) {
+	if (sdebug_dix) {
 		int dif_size;
 
 		dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
@@ -4891,20 +5003,21 @@
 
 	/* Logical Block Provisioning */
 	if (scsi_debug_lbp()) {
-		scsi_debug_unmap_max_blocks =
-			clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
+		sdebug_unmap_max_blocks =
+			clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
 
-		scsi_debug_unmap_max_desc =
-			clamp(scsi_debug_unmap_max_desc, 0U, 256U);
+		sdebug_unmap_max_desc =
+			clamp(sdebug_unmap_max_desc, 0U, 256U);
 
-		scsi_debug_unmap_granularity =
-			clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
+		sdebug_unmap_granularity =
+			clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
 
-		if (scsi_debug_unmap_alignment &&
-		    scsi_debug_unmap_granularity <=
-		    scsi_debug_unmap_alignment) {
+		if (sdebug_unmap_alignment &&
+		    sdebug_unmap_granularity <=
+		    sdebug_unmap_alignment) {
 			pr_err("ERR: unmap_granularity <= unmap_alignment\n");
-			return -EINVAL;
+			ret = -EINVAL;
+			goto free_vm;
 		}
 
 		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
@@ -4921,7 +5034,7 @@
 		bitmap_zero(map_storep, map_size);
 
 		/* Map first 1KB for partition table */
-		if (scsi_debug_num_parts)
+		if (sdebug_num_parts)
 			map_region(0, 2);
 	}
 
@@ -4942,8 +5055,8 @@
 		goto bus_unreg;
 	}
 
-	host_to_add = scsi_debug_add_host;
-        scsi_debug_add_host = 0;
+	host_to_add = sdebug_add_host;
+	sdebug_add_host = 0;
 
         for (k = 0; k < host_to_add; k++) {
                 if (sdebug_add_adapter()) {
@@ -4952,8 +5065,8 @@
                 }
         }
 
-	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
-		pr_info("built %d host(s)\n", scsi_debug_add_host);
+	if (sdebug_verbose)
+		pr_info("built %d host(s)\n", sdebug_add_host);
 
 	return 0;
 
@@ -4965,13 +5078,14 @@
 	vfree(map_storep);
 	vfree(dif_storep);
 	vfree(fake_storep);
-
+free_q_arr:
+	kfree(sdebug_q_arr);
 	return ret;
 }
 
 static void __exit scsi_debug_exit(void)
 {
-	int k = scsi_debug_add_host;
+	int k = sdebug_add_host;
 
 	stop_all_queued();
 	free_all_queued();
@@ -4983,6 +5097,7 @@
 
 	vfree(dif_storep);
 	vfree(fake_storep);
+	kfree(sdebug_q_arr);
 }
 
 device_initcall(scsi_debug_init);
@@ -5011,7 +5126,7 @@
 
         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
 
-	devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
+	devs_per_host = sdebug_num_tgts * sdebug_max_luns;
         for (k = 0; k < devs_per_host; k++) {
 		sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
 		if (!sdbg_devinfo) {
@@ -5028,14 +5143,14 @@
         sdbg_host->dev.bus = &pseudo_lld_bus;
         sdbg_host->dev.parent = pseudo_primary;
         sdbg_host->dev.release = &sdebug_release_adapter;
-        dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
+	dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
 
         error = device_register(&sdbg_host->dev);
 
         if (error)
 		goto clean;
 
-	++scsi_debug_add_host;
+	++sdebug_add_host;
         return error;
 
 clean:
@@ -5064,78 +5179,54 @@
 	if (!sdbg_host)
 		return;
 
-        device_unregister(&sdbg_host->dev);
-        --scsi_debug_add_host;
+	device_unregister(&sdbg_host->dev);
+	--sdebug_add_host;
 }
 
-static int
-sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
+static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
 {
 	int num_in_q = 0;
-	unsigned long iflags;
 	struct sdebug_dev_info *devip;
 
-	spin_lock_irqsave(&queued_arr_lock, iflags);
+	block_unblock_all_queues(true);
 	devip = (struct sdebug_dev_info *)sdev->hostdata;
 	if (NULL == devip) {
-		spin_unlock_irqrestore(&queued_arr_lock, iflags);
+		block_unblock_all_queues(false);
 		return	-ENODEV;
 	}
 	num_in_q = atomic_read(&devip->num_in_q);
-	spin_unlock_irqrestore(&queued_arr_lock, iflags);
 
 	if (qdepth < 1)
 		qdepth = 1;
-	/* allow to exceed max host queued_arr elements for testing */
-	if (qdepth > SCSI_DEBUG_CANQUEUE + 10)
-		qdepth = SCSI_DEBUG_CANQUEUE + 10;
+	/* allow to exceed max host qc_arr elements for testing */
+	if (qdepth > SDEBUG_CANQUEUE + 10)
+		qdepth = SDEBUG_CANQUEUE + 10;
 	scsi_change_queue_depth(sdev, qdepth);
 
-	if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
-		sdev_printk(KERN_INFO, sdev,
-			    "%s: qdepth=%d, num_in_q=%d\n",
+	if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
+		sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
 			    __func__, qdepth, num_in_q);
 	}
+	block_unblock_all_queues(false);
 	return sdev->queue_depth;
 }
 
-static int
-check_inject(struct scsi_cmnd *scp)
+static bool fake_timeout(struct scsi_cmnd *scp)
 {
-	struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
-
-	memset(ep, 0, sizeof(struct sdebug_scmd_extra_t));
-
-	if (atomic_inc_return(&sdebug_cmnd_count) >=
-	    abs(scsi_debug_every_nth)) {
-		atomic_set(&sdebug_cmnd_count, 0);
-		if (scsi_debug_every_nth < -1)
-			scsi_debug_every_nth = -1;
-		if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
-			return 1; /* ignore command causing timeout */
-		else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
+	if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
+		if (sdebug_every_nth < -1)
+			sdebug_every_nth = -1;
+		if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
+			return true; /* ignore command causing timeout */
+		else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
 			 scsi_medium_access_command(scp))
-			return 1; /* time out reads and writes */
-		if (sdebug_any_injecting_opt) {
-			int opts = scsi_debug_opts;
-
-			if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
-				ep->inj_recovered = true;
-			else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
-				ep->inj_transport = true;
-			else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
-				ep->inj_dif = true;
-			else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
-				ep->inj_dix = true;
-			else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
-				ep->inj_short = true;
-		}
+			return true; /* time out reads and writes */
 	}
-	return 0;
+	return false;
 }
 
-static int
-scsi_debug_queuecommand(struct scsi_cmnd *scp)
+static int scsi_debug_queuecommand(struct Scsi_Host *shost,
+				   struct scsi_cmnd *scp)
 {
 	u8 sdeb_i;
 	struct scsi_device *sdp = scp->device;
@@ -5146,15 +5237,16 @@
 	int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
 	int k, na;
 	int errsts = 0;
-	int errsts_no_connect = DID_NO_CONNECT << 16;
 	u32 flags;
 	u16 sa;
 	u8 opcode = cmd[0];
 	bool has_wlun_rl;
-	bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
 
 	scsi_set_resid(scp, 0);
-	if (debug && !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) {
+	if (sdebug_statistics)
+		atomic_inc(&sdebug_cmnd_count);
+	if (unlikely(sdebug_verbose &&
+		     !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
 		char b[120];
 		int n, len, sb;
 
@@ -5167,19 +5259,25 @@
 				n += scnprintf(b + n, sb - n, "%02x ",
 					       (u32)cmd[k]);
 		}
-		sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b);
+		if (sdebug_mq_active)
+			sdev_printk(KERN_INFO, sdp, "%s: tag=%u, cmd %s\n",
+				    my_name, blk_mq_unique_tag(scp->request),
+				    b);
+		else
+			sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name,
+				    b);
 	}
 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
-	if ((sdp->lun >= scsi_debug_max_luns) && !has_wlun_rl)
-		return schedule_resp(scp, NULL, errsts_no_connect, 0);
+	if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
+		goto err_out;
 
 	sdeb_i = opcode_ind_arr[opcode];	/* fully mapped */
 	oip = &opcode_info_arr[sdeb_i];		/* safe if table consistent */
 	devip = (struct sdebug_dev_info *)sdp->hostdata;
-	if (!devip) {
-		devip = devInfoReg(sdp);
+	if (unlikely(!devip)) {
+		devip = find_build_dev_info(sdp);
 		if (NULL == devip)
-			return schedule_resp(scp, NULL, errsts_no_connect, 0);
+			goto err_out;
 	}
 	na = oip->num_attached;
 	r_pfp = oip->pfp;
@@ -5211,18 +5309,18 @@
 		}
 	}	/* else (when na==0) we assume the oip is a match */
 	flags = oip->flags;
-	if (F_INV_OP & flags) {
+	if (unlikely(F_INV_OP & flags)) {
 		mk_sense_invalid_opcode(scp);
 		goto check_cond;
 	}
-	if (has_wlun_rl && !(F_RL_WLUN_OK & flags)) {
-		if (debug)
-			sdev_printk(KERN_INFO, sdp, "scsi_debug: Opcode: "
-				    "0x%x not supported for wlun\n", opcode);
+	if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
+		if (sdebug_verbose)
+			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
+				    my_name, opcode, " supported for wlun");
 		mk_sense_invalid_opcode(scp);
 		goto check_cond;
 	}
-	if (scsi_debug_strict) {	/* check cdb against mask */
+	if (unlikely(sdebug_strict)) {	/* check cdb against mask */
 		u8 rem;
 		int j;
 
@@ -5238,52 +5336,40 @@
 			}
 		}
 	}
-	if (!(F_SKIP_UA & flags) &&
-	    SDEBUG_NUM_UAS != find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS)) {
-		errsts = check_readiness(scp, UAS_ONLY, devip);
+	if (unlikely(!(F_SKIP_UA & flags) &&
+		     find_first_bit(devip->uas_bm,
+				    SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
+		errsts = make_ua(scp, devip);
 		if (errsts)
 			goto check_cond;
 	}
-	if ((F_M_ACCESS & flags) && devip->stopped) {
+	if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
 		mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
-		if (debug)
+		if (sdebug_verbose)
 			sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
 				    "%s\n", my_name, "initializing command "
 				    "required");
 		errsts = check_condition_result;
 		goto fini;
 	}
-	if (scsi_debug_fake_rw && (F_FAKE_RW & flags))
+	if (sdebug_fake_rw && (F_FAKE_RW & flags))
 		goto fini;
-	if (scsi_debug_every_nth) {
-		if (check_inject(scp))
+	if (unlikely(sdebug_every_nth)) {
+		if (fake_timeout(scp))
 			return 0;	/* ignore command: make trouble */
 	}
-	if (oip->pfp)	/* if this command has a resp_* function, call it */
-		errsts = oip->pfp(scp, devip);
+	if (likely(oip->pfp))
+		errsts = oip->pfp(scp, devip);	/* calls a resp_* function */
 	else if (r_pfp)	/* if leaf function ptr NULL, try the root's */
 		errsts = r_pfp(scp, devip);
 
 fini:
 	return schedule_resp(scp, devip, errsts,
-			     ((F_DELAY_OVERR & flags) ? 0 : scsi_debug_delay));
+			     ((F_DELAY_OVERR & flags) ? 0 : sdebug_jdelay));
 check_cond:
 	return schedule_resp(scp, devip, check_condition_result, 0);
-}
-
-static int
-sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
-{
-	if (scsi_debug_host_lock) {
-		unsigned long iflags;
-		int rc;
-
-		spin_lock_irqsave(shost->host_lock, iflags);
-		rc = scsi_debug_queuecommand(cmd);
-		spin_unlock_irqrestore(shost->host_lock, iflags);
-		return rc;
-	} else
-		return scsi_debug_queuecommand(cmd);
+err_out:
+	return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0);
 }
 
 static struct scsi_host_template sdebug_driver_template = {
@@ -5296,36 +5382,34 @@
 	.slave_configure =	scsi_debug_slave_configure,
 	.slave_destroy =	scsi_debug_slave_destroy,
 	.ioctl =		scsi_debug_ioctl,
-	.queuecommand =		sdebug_queuecommand_lock_or_not,
+	.queuecommand =		scsi_debug_queuecommand,
 	.change_queue_depth =	sdebug_change_qdepth,
 	.eh_abort_handler =	scsi_debug_abort,
 	.eh_device_reset_handler = scsi_debug_device_reset,
 	.eh_target_reset_handler = scsi_debug_target_reset,
 	.eh_bus_reset_handler = scsi_debug_bus_reset,
 	.eh_host_reset_handler = scsi_debug_host_reset,
-	.can_queue =		SCSI_DEBUG_CANQUEUE,
+	.can_queue =		SDEBUG_CANQUEUE,
 	.this_id =		7,
-	.sg_tablesize =		SCSI_MAX_SG_CHAIN_SEGMENTS,
+	.sg_tablesize =		SG_MAX_SEGMENTS,
 	.cmd_per_lun =		DEF_CMD_PER_LUN,
 	.max_sectors =		-1U,
 	.use_clustering = 	DISABLE_CLUSTERING,
 	.module =		THIS_MODULE,
 	.track_queue_depth =	1,
-	.cmd_size =		sizeof(struct sdebug_scmd_extra_t),
 };
 
 static int sdebug_driver_probe(struct device * dev)
 {
 	int error = 0;
-	int opts;
 	struct sdebug_host_info *sdbg_host;
 	struct Scsi_Host *hpnt;
-	int host_prot;
+	int hprot;
 
 	sdbg_host = to_sdebug_host(dev);
 
-	sdebug_driver_template.can_queue = scsi_debug_max_queue;
-	if (scsi_debug_clustering)
+	sdebug_driver_template.can_queue = sdebug_max_queue;
+	if (sdebug_clustering)
 		sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
 	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
 	if (NULL == hpnt) {
@@ -5333,72 +5417,75 @@
 		error = -ENODEV;
 		return error;
 	}
+	if (submit_queues > nr_cpu_ids) {
+		pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%d\n",
+			my_name, submit_queues, nr_cpu_ids);
+		submit_queues = nr_cpu_ids;
+	}
+	/* Decide whether to tell scsi subsystem that we want mq */
+	/* Following should give the same answer for each host */
+	sdebug_mq_active = shost_use_blk_mq(hpnt) && (submit_queues > 1);
+	if (sdebug_mq_active)
+		hpnt->nr_hw_queues = submit_queues;
 
         sdbg_host->shost = hpnt;
 	*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
-	if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
-		hpnt->max_id = scsi_debug_num_tgts + 1;
+	if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
+		hpnt->max_id = sdebug_num_tgts + 1;
 	else
-		hpnt->max_id = scsi_debug_num_tgts;
-	/* = scsi_debug_max_luns; */
+		hpnt->max_id = sdebug_num_tgts;
+	/* = sdebug_max_luns; */
 	hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
 
-	host_prot = 0;
+	hprot = 0;
 
-	switch (scsi_debug_dif) {
+	switch (sdebug_dif) {
 
 	case SD_DIF_TYPE1_PROTECTION:
-		host_prot = SHOST_DIF_TYPE1_PROTECTION;
-		if (scsi_debug_dix)
-			host_prot |= SHOST_DIX_TYPE1_PROTECTION;
+		hprot = SHOST_DIF_TYPE1_PROTECTION;
+		if (sdebug_dix)
+			hprot |= SHOST_DIX_TYPE1_PROTECTION;
 		break;
 
 	case SD_DIF_TYPE2_PROTECTION:
-		host_prot = SHOST_DIF_TYPE2_PROTECTION;
-		if (scsi_debug_dix)
-			host_prot |= SHOST_DIX_TYPE2_PROTECTION;
+		hprot = SHOST_DIF_TYPE2_PROTECTION;
+		if (sdebug_dix)
+			hprot |= SHOST_DIX_TYPE2_PROTECTION;
 		break;
 
 	case SD_DIF_TYPE3_PROTECTION:
-		host_prot = SHOST_DIF_TYPE3_PROTECTION;
-		if (scsi_debug_dix)
-			host_prot |= SHOST_DIX_TYPE3_PROTECTION;
+		hprot = SHOST_DIF_TYPE3_PROTECTION;
+		if (sdebug_dix)
+			hprot |= SHOST_DIX_TYPE3_PROTECTION;
 		break;
 
 	default:
-		if (scsi_debug_dix)
-			host_prot |= SHOST_DIX_TYPE0_PROTECTION;
+		if (sdebug_dix)
+			hprot |= SHOST_DIX_TYPE0_PROTECTION;
 		break;
 	}
 
-	scsi_host_set_prot(hpnt, host_prot);
+	scsi_host_set_prot(hpnt, hprot);
 
-	pr_info("host protection%s%s%s%s%s%s%s\n",
-	       (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
-	       (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
-	       (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
-	       (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
-	       (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
-	       (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
-	       (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
+	if (have_dif_prot || sdebug_dix)
+		pr_info("host protection%s%s%s%s%s%s%s\n",
+			(hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
+			(hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
+			(hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
+			(hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
+			(hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
+			(hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
+			(hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
 
-	if (scsi_debug_guard == 1)
+	if (sdebug_guard == 1)
 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
 	else
 		scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
 
-	opts = scsi_debug_opts;
-	if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
-		sdebug_any_injecting_opt = true;
-	else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
-		sdebug_any_injecting_opt = true;
-	else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
-		sdebug_any_injecting_opt = true;
-	else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
-		sdebug_any_injecting_opt = true;
-	else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
-		sdebug_any_injecting_opt = true;
-
+	sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
+	sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
+	if (sdebug_every_nth)	/* need stats counters for every_nth */
+		sdebug_statistics = true;
         error = scsi_add_host(hpnt, &sdbg_host->dev);
         if (error) {
 		pr_err("scsi_add_host failed\n");
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 8106515..b2e332a 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -14,8 +14,6 @@
 #include <linux/completion.h>
 #include <linux/kernel.h>
 #include <linux/export.h>
-#include <linux/mempool.h>
-#include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
@@ -40,39 +38,6 @@
 #include "scsi_logging.h"
 
 
-#define SG_MEMPOOL_NR		ARRAY_SIZE(scsi_sg_pools)
-#define SG_MEMPOOL_SIZE		2
-
-struct scsi_host_sg_pool {
-	size_t		size;
-	char		*name;
-	struct kmem_cache	*slab;
-	mempool_t	*pool;
-};
-
-#define SP(x) { .size = x, "sgpool-" __stringify(x) }
-#if (SCSI_MAX_SG_SEGMENTS < 32)
-#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
-#endif
-static struct scsi_host_sg_pool scsi_sg_pools[] = {
-	SP(8),
-	SP(16),
-#if (SCSI_MAX_SG_SEGMENTS > 32)
-	SP(32),
-#if (SCSI_MAX_SG_SEGMENTS > 64)
-	SP(64),
-#if (SCSI_MAX_SG_SEGMENTS > 128)
-	SP(128),
-#if (SCSI_MAX_SG_SEGMENTS > 256)
-#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
-#endif
-#endif
-#endif
-#endif
-	SP(SCSI_MAX_SG_SEGMENTS)
-};
-#undef SP
-
 struct kmem_cache *scsi_sdb_cache;
 
 /*
@@ -553,66 +518,6 @@
 		scsi_run_queue(sdev->request_queue);
 }
 
-static inline unsigned int scsi_sgtable_index(unsigned short nents)
-{
-	unsigned int index;
-
-	BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
-
-	if (nents <= 8)
-		index = 0;
-	else
-		index = get_count_order(nents) - 3;
-
-	return index;
-}
-
-static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
-{
-	struct scsi_host_sg_pool *sgp;
-
-	sgp = scsi_sg_pools + scsi_sgtable_index(nents);
-	mempool_free(sgl, sgp->pool);
-}
-
-static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
-{
-	struct scsi_host_sg_pool *sgp;
-
-	sgp = scsi_sg_pools + scsi_sgtable_index(nents);
-	return mempool_alloc(sgp->pool, gfp_mask);
-}
-
-static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
-{
-	if (mq && sdb->table.orig_nents <= SCSI_MAX_SG_SEGMENTS)
-		return;
-	__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free);
-}
-
-static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
-{
-	struct scatterlist *first_chunk = NULL;
-	int ret;
-
-	BUG_ON(!nents);
-
-	if (mq) {
-		if (nents <= SCSI_MAX_SG_SEGMENTS) {
-			sdb->table.nents = sdb->table.orig_nents = nents;
-			sg_init_table(sdb->table.sgl, nents);
-			return 0;
-		}
-		first_chunk = sdb->table.sgl;
-	}
-
-	ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
-			       first_chunk, GFP_ATOMIC, scsi_sg_alloc);
-	if (unlikely(ret))
-		scsi_free_sgtable(sdb, mq);
-	return ret;
-}
-
 static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
 {
 	if (cmd->request->cmd_type == REQ_TYPE_FS) {
@@ -625,12 +530,17 @@
 
 static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
 {
+	struct scsi_data_buffer *sdb;
+
 	if (cmd->sdb.table.nents)
-		scsi_free_sgtable(&cmd->sdb, true);
-	if (cmd->request->next_rq && cmd->request->next_rq->special)
-		scsi_free_sgtable(cmd->request->next_rq->special, true);
+		sg_free_table_chained(&cmd->sdb.table, true);
+	if (cmd->request->next_rq) {
+		sdb = cmd->request->next_rq->special;
+		if (sdb)
+			sg_free_table_chained(&sdb->table, true);
+	}
 	if (scsi_prot_sg_count(cmd))
-		scsi_free_sgtable(cmd->prot_sdb, true);
+		sg_free_table_chained(&cmd->prot_sdb->table, true);
 }
 
 static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
@@ -669,19 +579,19 @@
 static void scsi_release_buffers(struct scsi_cmnd *cmd)
 {
 	if (cmd->sdb.table.nents)
-		scsi_free_sgtable(&cmd->sdb, false);
+		sg_free_table_chained(&cmd->sdb.table, false);
 
 	memset(&cmd->sdb, 0, sizeof(cmd->sdb));
 
 	if (scsi_prot_sg_count(cmd))
-		scsi_free_sgtable(cmd->prot_sdb, false);
+		sg_free_table_chained(&cmd->prot_sdb->table, false);
 }
 
 static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
 {
 	struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special;
 
-	scsi_free_sgtable(bidi_sdb, false);
+	sg_free_table_chained(&bidi_sdb->table, false);
 	kmem_cache_free(scsi_sdb_cache, bidi_sdb);
 	cmd->request->next_rq->special = NULL;
 }
@@ -1085,8 +995,8 @@
 	/*
 	 * If sg table allocation fails, requeue request later.
 	 */
-	if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
-					req->mq_ctx != NULL)))
+	if (unlikely(sg_alloc_table_chained(&sdb->table, req->nr_phys_segments,
+					sdb->table.sgl)))
 		return BLKPREP_DEFER;
 
 	/* 
@@ -1158,7 +1068,8 @@
 
 		ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
 
-		if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) {
+		if (sg_alloc_table_chained(&prot_sdb->table, ivecs,
+				prot_sdb->table.sgl)) {
 			error = BLKPREP_DEFER;
 			goto err_exit;
 		}
@@ -1932,7 +1843,7 @@
 	if (scsi_host_get_prot(shost)) {
 		cmd->prot_sdb = (void *)sg +
 			min_t(unsigned int,
-			      shost->sg_tablesize, SCSI_MAX_SG_SEGMENTS) *
+			      shost->sg_tablesize, SG_CHUNK_SIZE) *
 			sizeof(struct scatterlist);
 		memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
 
@@ -2105,7 +2016,7 @@
 	 * this limit is imposed by hardware restrictions
 	 */
 	blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
-					SCSI_MAX_SG_CHAIN_SEGMENTS));
+					SG_MAX_SEGMENTS));
 
 	if (scsi_host_prot_dma(shost)) {
 		shost->sg_prot_tablesize =
@@ -2187,8 +2098,8 @@
 	unsigned int cmd_size, sgl_size, tbl_size;
 
 	tbl_size = shost->sg_tablesize;
-	if (tbl_size > SCSI_MAX_SG_SEGMENTS)
-		tbl_size = SCSI_MAX_SG_SEGMENTS;
+	if (tbl_size > SG_CHUNK_SIZE)
+		tbl_size = SG_CHUNK_SIZE;
 	sgl_size = tbl_size * sizeof(struct scatterlist);
 	cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
 	if (scsi_host_get_prot(shost))
@@ -2264,8 +2175,6 @@
 
 int __init scsi_init_queue(void)
 {
-	int i;
-
 	scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
 					   sizeof(struct scsi_data_buffer),
 					   0, 0, NULL);
@@ -2274,53 +2183,12 @@
 		return -ENOMEM;
 	}
 
-	for (i = 0; i < SG_MEMPOOL_NR; i++) {
-		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
-		int size = sgp->size * sizeof(struct scatterlist);
-
-		sgp->slab = kmem_cache_create(sgp->name, size, 0,
-				SLAB_HWCACHE_ALIGN, NULL);
-		if (!sgp->slab) {
-			printk(KERN_ERR "SCSI: can't init sg slab %s\n",
-					sgp->name);
-			goto cleanup_sdb;
-		}
-
-		sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
-						     sgp->slab);
-		if (!sgp->pool) {
-			printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
-					sgp->name);
-			goto cleanup_sdb;
-		}
-	}
-
 	return 0;
-
-cleanup_sdb:
-	for (i = 0; i < SG_MEMPOOL_NR; i++) {
-		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
-		if (sgp->pool)
-			mempool_destroy(sgp->pool);
-		if (sgp->slab)
-			kmem_cache_destroy(sgp->slab);
-	}
-	kmem_cache_destroy(scsi_sdb_cache);
-
-	return -ENOMEM;
 }
 
 void scsi_exit_queue(void)
 {
-	int i;
-
 	kmem_cache_destroy(scsi_sdb_cache);
-
-	for (i = 0; i < SG_MEMPOOL_NR; i++) {
-		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
-		mempool_destroy(sgp->pool);
-		kmem_cache_destroy(sgp->slab);
-	}
 }
 
 /**
@@ -3196,6 +3064,7 @@
 	 * - EUI-64 based 12-byte
 	 * - NAA IEEE Registered
 	 * - NAA IEEE Extended
+	 * - T10 Vendor ID
 	 * as longer descriptors reduce the likelyhood
 	 * of identification clashes.
 	 */
@@ -3214,6 +3083,21 @@
 			goto next_desig;
 
 		switch (d[1] & 0xf) {
+		case 0x1:
+			/* T10 Vendor ID */
+			if (cur_id_size > d[3])
+				break;
+			/* Prefer anything */
+			if (cur_id_type > 0x01 && cur_id_type != 0xff)
+				break;
+			cur_id_size = d[3];
+			if (cur_id_size + 4 > id_len)
+				cur_id_size = id_len - 4;
+			cur_id_str = d + 4;
+			cur_id_type = d[1] & 0xf;
+			id_size = snprintf(id, id_len, "t10.%*pE",
+					   cur_id_size, cur_id_str);
+			break;
 		case 0x2:
 			/* EUI-64 */
 			if (cur_id_size > d[3])
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 27b4d0a..57a4b99 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -116,7 +116,7 @@
 extern char scsi_scan_type[];
 extern int scsi_complete_async_scans(void);
 extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
-				   unsigned int, u64, int);
+				   unsigned int, u64, enum scsi_scan_mode);
 extern void scsi_forget_host(struct Scsi_Host *);
 extern void scsi_rescan_device(struct device *);
 
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index 251598e..7a74b82 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -251,7 +251,8 @@
 	if (shost->transportt->user_scan)
 		error = shost->transportt->user_scan(shost, channel, id, lun);
 	else
-		error = scsi_scan_host_selected(shost, channel, id, lun, 1);
+		error = scsi_scan_host_selected(shost, channel, id, lun,
+						SCSI_SCAN_MANUAL);
 	scsi_host_put(shost);
 	return error;
 }
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 97074c9..e0a78f5 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -96,10 +96,13 @@
 #define SCSI_SCAN_TYPE_DEFAULT "sync"
 #endif
 
-char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT;
+char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
 
-module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO);
-MODULE_PARM_DESC(scan, "sync, async or none");
+module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type),
+		    S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(scan, "sync, async, manual, or none. "
+		 "Setting to 'manual' disables automatic scanning, but allows "
+		 "for manual device scan via the 'scan' sysfs attribute.");
 
 static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
 
@@ -316,6 +319,7 @@
 	struct Scsi_Host *shost = dev_to_shost(dev->parent);
 	unsigned long flags;
 
+	BUG_ON(starget->state == STARGET_DEL);
 	starget->state = STARGET_DEL;
 	transport_destroy_device(dev);
 	spin_lock_irqsave(shost->host_lock, flags);
@@ -1040,7 +1044,8 @@
  * @lun:	LUN of target device
  * @bflagsp:	store bflags here if not NULL
  * @sdevp:	probe the LUN corresponding to this scsi_device
- * @rescan:     if nonzero skip some code only needed on first scan
+ * @rescan:     if not equal to SCSI_SCAN_INITIAL skip some code only
+ *              needed on first scan
  * @hostdata:	passed to scsi_alloc_sdev()
  *
  * Description:
@@ -1055,7 +1060,8 @@
  **/
 static int scsi_probe_and_add_lun(struct scsi_target *starget,
 				  u64 lun, int *bflagsp,
-				  struct scsi_device **sdevp, int rescan,
+				  struct scsi_device **sdevp,
+				  enum scsi_scan_mode rescan,
 				  void *hostdata)
 {
 	struct scsi_device *sdev;
@@ -1069,7 +1075,7 @@
 	 */
 	sdev = scsi_device_lookup_by_target(starget, lun);
 	if (sdev) {
-		if (rescan || !scsi_device_created(sdev)) {
+		if (rescan != SCSI_SCAN_INITIAL || !scsi_device_created(sdev)) {
 			SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
 				"scsi scan: device exists on %s\n",
 				dev_name(&sdev->sdev_gendev)));
@@ -1205,7 +1211,8 @@
  *     Modifies sdevscan->lun.
  **/
 static void scsi_sequential_lun_scan(struct scsi_target *starget,
-				     int bflags, int scsi_level, int rescan)
+				     int bflags, int scsi_level,
+				     enum scsi_scan_mode rescan)
 {
 	uint max_dev_lun;
 	u64 sparse_lun, lun;
@@ -1300,7 +1307,7 @@
  *     1: could not scan with REPORT LUN
  **/
 static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
-				int rescan)
+				enum scsi_scan_mode rescan)
 {
 	char devname[64];
 	unsigned char scsi_cmd[MAX_COMMAND_SIZE];
@@ -1546,7 +1553,7 @@
 EXPORT_SYMBOL(scsi_rescan_device);
 
 static void __scsi_scan_target(struct device *parent, unsigned int channel,
-		unsigned int id, u64 lun, int rescan)
+		unsigned int id, u64 lun, enum scsi_scan_mode rescan)
 {
 	struct Scsi_Host *shost = dev_to_shost(parent);
 	int bflags = 0;
@@ -1604,7 +1611,10 @@
  * @channel:	channel to scan
  * @id:		target id to scan
  * @lun:	Specific LUN to scan or SCAN_WILD_CARD
- * @rescan:	passed to LUN scanning routines
+ * @rescan:	passed to LUN scanning routines; SCSI_SCAN_INITIAL for
+ *              no rescan, SCSI_SCAN_RESCAN to rescan existing LUNs,
+ *              and SCSI_SCAN_MANUAL to force scanning even if
+ *              'scan=manual' is set.
  *
  * Description:
  *     Scan the target id on @parent, @channel, and @id. Scan at least LUN 0,
@@ -1614,13 +1624,17 @@
  *     sequential scan of LUNs on the target id.
  **/
 void scsi_scan_target(struct device *parent, unsigned int channel,
-		      unsigned int id, u64 lun, int rescan)
+		      unsigned int id, u64 lun, enum scsi_scan_mode rescan)
 {
 	struct Scsi_Host *shost = dev_to_shost(parent);
 
 	if (strncmp(scsi_scan_type, "none", 4) == 0)
 		return;
 
+	if (rescan != SCSI_SCAN_MANUAL &&
+	    strncmp(scsi_scan_type, "manual", 6) == 0)
+		return;
+
 	mutex_lock(&shost->scan_mutex);
 	if (!shost->async_scan)
 		scsi_complete_async_scans();
@@ -1634,7 +1648,8 @@
 EXPORT_SYMBOL(scsi_scan_target);
 
 static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel,
-			      unsigned int id, u64 lun, int rescan)
+			      unsigned int id, u64 lun,
+			      enum scsi_scan_mode rescan)
 {
 	uint order_id;
 
@@ -1665,7 +1680,8 @@
 }
 
 int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
-			    unsigned int id, u64 lun, int rescan)
+			    unsigned int id, u64 lun,
+			    enum scsi_scan_mode rescan)
 {
 	SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost,
 		"%s: <%u:%u:%llu>\n",
@@ -1844,7 +1860,8 @@
 {
 	struct async_scan_data *data;
 
-	if (strncmp(scsi_scan_type, "none", 4) == 0)
+	if (strncmp(scsi_scan_type, "none", 4) == 0 ||
+	    strncmp(scsi_scan_type, "manual", 6) == 0)
 		return;
 	if (scsi_autopm_get_host(shost) < 0)
 		return;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 92ffd24..0734927 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -81,6 +81,7 @@
 	return name;
 }
 
+#ifdef CONFIG_SCSI_DH
 static const struct {
 	unsigned char	value;
 	char		*name;
@@ -94,7 +95,7 @@
 	{ SCSI_ACCESS_STATE_TRANSITIONING, "transitioning" },
 };
 
-const char *scsi_access_state_name(unsigned char state)
+static const char *scsi_access_state_name(unsigned char state)
 {
 	int i;
 	char *name = NULL;
@@ -107,6 +108,7 @@
 	}
 	return name;
 }
+#endif
 
 static int check_set(unsigned long long *val, char *src)
 {
@@ -143,7 +145,8 @@
 	if (shost->transportt->user_scan)
 		res = shost->transportt->user_scan(shost, channel, id, lun);
 	else
-		res = scsi_scan_host_selected(shost, channel, id, lun, 1);
+		res = scsi_scan_host_selected(shost, channel, id, lun,
+					      SCSI_SCAN_MANUAL);
 	return res;
 }
 
@@ -226,7 +229,7 @@
 }
 
 /* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */
-struct device_attribute dev_attr_hstate =
+static struct device_attribute dev_attr_hstate =
 	__ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state);
 
 static ssize_t
@@ -401,7 +404,7 @@
 	NULL
 };
 
-struct attribute_group scsi_shost_attr_group = {
+static struct attribute_group scsi_shost_attr_group = {
 	.attrs =	scsi_sysfs_shost_attrs,
 };
 
@@ -1364,18 +1367,18 @@
 void scsi_remove_target(struct device *dev)
 {
 	struct Scsi_Host *shost = dev_to_shost(dev->parent);
-	struct scsi_target *starget, *last_target = NULL;
+	struct scsi_target *starget;
 	unsigned long flags;
 
 restart:
 	spin_lock_irqsave(shost->host_lock, flags);
 	list_for_each_entry(starget, &shost->__targets, siblings) {
 		if (starget->state == STARGET_DEL ||
-		    starget == last_target)
+		    starget->state == STARGET_REMOVE)
 			continue;
 		if (starget->dev.parent == dev || &starget->dev == dev) {
 			kref_get(&starget->reap_ref);
-			last_target = starget;
+			starget->state = STARGET_REMOVE;
 			spin_unlock_irqrestore(shost->host_lock, flags);
 			__scsi_remove_target(starget);
 			scsi_target_reap(starget);
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
index 08bb47b..0ff083b 100644
--- a/drivers/scsi/scsi_trace.c
+++ b/drivers/scsi/scsi_trace.c
@@ -17,6 +17,7 @@
  */
 #include <linux/kernel.h>
 #include <linux/trace_seq.h>
+#include <asm/unaligned.h>
 #include <trace/events/scsi.h>
 
 #define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f)
@@ -231,6 +232,158 @@
 }
 
 static const char *
+scsi_trace_maintenance_in(struct trace_seq *p, unsigned char *cdb, int len)
+{
+	const char *ret = trace_seq_buffer_ptr(p), *cmd;
+	u32 alloc_len;
+
+	switch (SERVICE_ACTION16(cdb)) {
+	case MI_REPORT_IDENTIFYING_INFORMATION:
+		cmd = "REPORT_IDENTIFYING_INFORMATION";
+		break;
+	case MI_REPORT_TARGET_PGS:
+		cmd = "REPORT_TARGET_PORT_GROUPS";
+		break;
+	case MI_REPORT_ALIASES:
+		cmd = "REPORT_ALIASES";
+		break;
+	case MI_REPORT_SUPPORTED_OPERATION_CODES:
+		cmd = "REPORT_SUPPORTED_OPERATION_CODES";
+		break;
+	case MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS:
+		cmd = "REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS";
+		break;
+	case MI_REPORT_PRIORITY:
+		cmd = "REPORT_PRIORITY";
+		break;
+	case MI_REPORT_TIMESTAMP:
+		cmd = "REPORT_TIMESTAMP";
+		break;
+	case MI_MANAGEMENT_PROTOCOL_IN:
+		cmd = "MANAGEMENT_PROTOCOL_IN";
+		break;
+	default:
+		trace_seq_puts(p, "UNKNOWN");
+		goto out;
+	}
+
+	alloc_len = get_unaligned_be32(&cdb[6]);
+
+	trace_seq_printf(p, "%s alloc_len=%u", cmd, alloc_len);
+
+out:
+	trace_seq_putc(p, 0);
+
+	return ret;
+}
+
+static const char *
+scsi_trace_maintenance_out(struct trace_seq *p, unsigned char *cdb, int len)
+{
+	const char *ret = trace_seq_buffer_ptr(p), *cmd;
+	u32 alloc_len;
+
+	switch (SERVICE_ACTION16(cdb)) {
+	case MO_SET_IDENTIFYING_INFORMATION:
+		cmd = "SET_IDENTIFYING_INFORMATION";
+		break;
+	case MO_SET_TARGET_PGS:
+		cmd = "SET_TARGET_PORT_GROUPS";
+		break;
+	case MO_CHANGE_ALIASES:
+		cmd = "CHANGE_ALIASES";
+		break;
+	case MO_SET_PRIORITY:
+		cmd = "SET_PRIORITY";
+		break;
+	case MO_SET_TIMESTAMP:
+		cmd = "SET_TIMESTAMP";
+		break;
+	case MO_MANAGEMENT_PROTOCOL_OUT:
+		cmd = "MANAGEMENT_PROTOCOL_OUT";
+		break;
+	default:
+		trace_seq_puts(p, "UNKNOWN");
+		goto out;
+	}
+
+	alloc_len = get_unaligned_be32(&cdb[6]);
+
+	trace_seq_printf(p, "%s alloc_len=%u", cmd, alloc_len);
+
+out:
+	trace_seq_putc(p, 0);
+
+	return ret;
+}
+
+static const char *
+scsi_trace_zbc_in(struct trace_seq *p, unsigned char *cdb, int len)
+{
+	const char *ret = trace_seq_buffer_ptr(p), *cmd;
+	u64 zone_id;
+	u32 alloc_len;
+	u8 options;
+
+	switch (SERVICE_ACTION16(cdb)) {
+	case ZI_REPORT_ZONES:
+		cmd = "REPORT_ZONES";
+		break;
+	default:
+		trace_seq_puts(p, "UNKNOWN");
+		goto out;
+	}
+
+	zone_id = get_unaligned_be64(&cdb[2]);
+	alloc_len = get_unaligned_be32(&cdb[10]);
+	options = cdb[14] & 0x3f;
+
+	trace_seq_printf(p, "%s zone=%llu alloc_len=%u options=%u partial=%u",
+			 cmd, (unsigned long long)zone_id, alloc_len,
+			 options, (cdb[14] >> 7) & 1);
+
+out:
+	trace_seq_putc(p, 0);
+
+	return ret;
+}
+
+static const char *
+scsi_trace_zbc_out(struct trace_seq *p, unsigned char *cdb, int len)
+{
+	const char *ret = trace_seq_buffer_ptr(p), *cmd;
+	u64 zone_id;
+
+	switch (SERVICE_ACTION16(cdb)) {
+	case ZO_CLOSE_ZONE:
+		cmd = "CLOSE_ZONE";
+		break;
+	case ZO_FINISH_ZONE:
+		cmd = "FINISH_ZONE";
+		break;
+	case ZO_OPEN_ZONE:
+		cmd = "OPEN_ZONE";
+		break;
+	case ZO_RESET_WRITE_POINTER:
+		cmd = "RESET_WRITE_POINTER";
+		break;
+	default:
+		trace_seq_puts(p, "UNKNOWN");
+		goto out;
+	}
+
+	zone_id = get_unaligned_be64(&cdb[2]);
+
+	trace_seq_printf(p, "%s zone=%llu all=%u", cmd,
+			 (unsigned long long)zone_id, cdb[14] & 1);
+
+out:
+	trace_seq_putc(p, 0);
+
+	return ret;
+}
+
+static const char *
 scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len)
 {
 	switch (SERVICE_ACTION32(cdb)) {
@@ -282,6 +435,14 @@
 		return scsi_trace_service_action_in(p, cdb, len);
 	case VARIABLE_LENGTH_CMD:
 		return scsi_trace_varlen(p, cdb, len);
+	case MAINTENANCE_IN:
+		return scsi_trace_maintenance_in(p, cdb, len);
+	case MAINTENANCE_OUT:
+		return scsi_trace_maintenance_out(p, cdb, len);
+	case ZBC_IN:
+		return scsi_trace_zbc_in(p, cdb, len);
+	case ZBC_OUT:
+		return scsi_trace_zbc_out(p, cdb, len);
 	default:
 		return scsi_trace_misc(p, cdb, len);
 	}
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 8a88226..0f3a386 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -2027,11 +2027,10 @@
 	kfree(vport);
 }
 
-int scsi_is_fc_vport(const struct device *dev)
+static int scsi_is_fc_vport(const struct device *dev)
 {
 	return dev->release == fc_vport_dev_release;
 }
-EXPORT_SYMBOL(scsi_is_fc_vport);
 
 static int fc_vport_match(struct attribute_container *cont,
 			    struct device *dev)
@@ -2110,7 +2109,8 @@
 		if ((channel == rport->channel) &&
 		    (id == rport->scsi_target_id)) {
 			spin_unlock_irqrestore(shost->host_lock, flags);
-			scsi_scan_target(&rport->dev, channel, id, lun, 1);
+			scsi_scan_target(&rport->dev, channel, id, lun,
+					 SCSI_SCAN_MANUAL);
 			return;
 		}
 	}
@@ -3277,7 +3277,8 @@
 	    (rport->roles & FC_PORT_ROLE_FCP_TARGET) &&
 	    !(i->f->disable_target_scan)) {
 		scsi_scan_target(&rport->dev, rport->channel,
-			rport->scsi_target_id, SCAN_WILD_CARD, 1);
+				 rport->scsi_target_id, SCAN_WILD_CARD,
+				 SCSI_SCAN_RESCAN);
 	}
 
 	spin_lock_irqsave(shost->host_lock, flags);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 4414816..42bca61 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1009,7 +1009,7 @@
 	kfree(fnode_sess);
 }
 
-struct device_type iscsi_flashnode_sess_dev_type = {
+static struct device_type iscsi_flashnode_sess_dev_type = {
 	.name = "iscsi_flashnode_sess_dev_type",
 	.groups = iscsi_flashnode_sess_attr_groups,
 	.release = iscsi_flashnode_sess_release,
@@ -1195,13 +1195,13 @@
 	kfree(fnode_conn);
 }
 
-struct device_type iscsi_flashnode_conn_dev_type = {
+static struct device_type iscsi_flashnode_conn_dev_type = {
 	.name = "iscsi_flashnode_conn_dev_type",
 	.groups = iscsi_flashnode_conn_attr_groups,
 	.release = iscsi_flashnode_conn_release,
 };
 
-struct bus_type iscsi_flashnode_bus;
+static struct bus_type iscsi_flashnode_bus;
 
 int iscsi_flashnode_bus_match(struct device *dev,
 				     struct device_driver *drv)
@@ -1212,7 +1212,7 @@
 }
 EXPORT_SYMBOL_GPL(iscsi_flashnode_bus_match);
 
-struct bus_type iscsi_flashnode_bus = {
+static struct bus_type iscsi_flashnode_bus = {
 	.name = "iscsi_flashnode",
 	.match = &iscsi_flashnode_bus_match,
 };
@@ -1324,11 +1324,10 @@
  *  1 on success
  *  0 on failure
  */
-int iscsi_is_flashnode_conn_dev(struct device *dev, void *data)
+static int iscsi_is_flashnode_conn_dev(struct device *dev, void *data)
 {
 	return dev->bus == &iscsi_flashnode_bus;
 }
-EXPORT_SYMBOL_GPL(iscsi_is_flashnode_conn_dev);
 
 static int iscsi_destroy_flashnode_conn(struct iscsi_bus_flash_conn *fnode_conn)
 {
@@ -1783,6 +1782,7 @@
 	unsigned int channel;
 	unsigned int id;
 	u64 lun;
+	enum scsi_scan_mode rescan;
 };
 
 static int iscsi_user_scan_session(struct device *dev, void *data)
@@ -1819,7 +1819,7 @@
 		    (scan_data->id == SCAN_WILD_CARD ||
 		     scan_data->id == id))
 			scsi_scan_target(&session->dev, 0, id,
-					 scan_data->lun, 1);
+					 scan_data->lun, scan_data->rescan);
 	}
 
 user_scan_exit:
@@ -1836,6 +1836,7 @@
 	scan_data.channel = channel;
 	scan_data.id = id;
 	scan_data.lun = lun;
+	scan_data.rescan = SCSI_SCAN_MANUAL;
 
 	return device_for_each_child(&shost->shost_gendev, &scan_data,
 				     iscsi_user_scan_session);
@@ -1852,6 +1853,7 @@
 	scan_data.channel = 0;
 	scan_data.id = SCAN_WILD_CARD;
 	scan_data.lun = SCAN_WILD_CARD;
+	scan_data.rescan = SCSI_SCAN_RESCAN;
 
 	iscsi_user_scan_session(&session->dev, &scan_data);
 	atomic_dec(&ihost->nr_scans);
@@ -2067,13 +2069,10 @@
 
 int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
 {
-	struct Scsi_Host *shost = iscsi_session_to_shost(session);
-	struct iscsi_cls_host *ihost;
 	unsigned long flags;
 	int id = 0;
 	int err;
 
-	ihost = shost->shost_data;
 	session->sid = atomic_add_return(1, &iscsi_session_nr);
 
 	if (target_id == ISCSI_MAX_TARGET) {
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index b6f958193..3f0ff07 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -1614,7 +1614,8 @@
 		else
 			lun = 0;
 
-		scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, lun, 0);
+		scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, lun,
+				 SCSI_SCAN_INITIAL);
 	}
 
 	return 0;
@@ -1739,8 +1740,8 @@
 
 		if ((channel == SCAN_WILD_CARD || channel == 0) &&
 		    (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) {
-			scsi_scan_target(&rphy->dev, 0,
-					 rphy->scsi_target_id, lun, 1);
+			scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id,
+					 lun, SCSI_SCAN_MANUAL);
 		}
 	}
 	mutex_unlock(&sas_host->lock);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 5a5457a..428c03e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -137,15 +137,15 @@
 
 static void sd_set_flush_flag(struct scsi_disk *sdkp)
 {
-	unsigned flush = 0;
+	bool wc = false, fua = false;
 
 	if (sdkp->WCE) {
-		flush |= REQ_FLUSH;
+		wc = true;
 		if (sdkp->DPOFUA)
-			flush |= REQ_FUA;
+			fua = true;
 	}
 
-	blk_queue_flush(sdkp->disk->queue, flush);
+	blk_queue_write_cache(sdkp->disk->queue, wc, fua);
 }
 
 static ssize_t
@@ -779,7 +779,7 @@
 	 * discarded on disk. This allows us to report completion on the full
 	 * amount of blocks described by the request.
 	 */
-	blk_add_request_payload(rq, page, len);
+	blk_add_request_payload(rq, page, 0, len);
 	ret = scsi_init_io(cmd);
 	rq->__data_len = nr_bytes;
 
@@ -1275,18 +1275,19 @@
 	struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
 	struct scsi_device *sdp = sdkp->device;
 	struct Scsi_Host *host = sdp->host;
+	sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
 	int diskinfo[4];
 
 	/* default to most commonly used values */
-        diskinfo[0] = 0x40;	/* 1 << 6 */
-       	diskinfo[1] = 0x20;	/* 1 << 5 */
-       	diskinfo[2] = sdkp->capacity >> 11;
-	
+	diskinfo[0] = 0x40;	/* 1 << 6 */
+	diskinfo[1] = 0x20;	/* 1 << 5 */
+	diskinfo[2] = capacity >> 11;
+
 	/* override with calculated, extended default, or driver values */
 	if (host->hostt->bios_param)
-		host->hostt->bios_param(sdp, bdev, sdkp->capacity, diskinfo);
+		host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
 	else
-		scsicam_bios_param(bdev, sdkp->capacity, diskinfo);
+		scsicam_bios_param(bdev, capacity, diskinfo);
 
 	geo->heads = diskinfo[0];
 	geo->sectors = diskinfo[1];
@@ -2337,14 +2338,6 @@
 	if (sdkp->capacity > 0xffffffff)
 		sdp->use_16_for_rw = 1;
 
-	/* Rescale capacity to 512-byte units */
-	if (sector_size == 4096)
-		sdkp->capacity <<= 3;
-	else if (sector_size == 2048)
-		sdkp->capacity <<= 2;
-	else if (sector_size == 1024)
-		sdkp->capacity <<= 1;
-
 	blk_queue_physical_block_size(sdp->request_queue,
 				      sdkp->physical_block_size);
 	sdkp->device->sector_size = sector_size;
@@ -2795,28 +2788,6 @@
 		sdkp->ws10 = 1;
 }
 
-static int sd_try_extended_inquiry(struct scsi_device *sdp)
-{
-	/* Attempt VPD inquiry if the device blacklist explicitly calls
-	 * for it.
-	 */
-	if (sdp->try_vpd_pages)
-		return 1;
-	/*
-	 * Although VPD inquiries can go to SCSI-2 type devices,
-	 * some USB ones crash on receiving them, and the pages
-	 * we currently ask for are for SPC-3 and beyond
-	 */
-	if (sdp->scsi_level > SCSI_SPC_2 && !sdp->skip_vpd_pages)
-		return 1;
-	return 0;
-}
-
-static inline u32 logical_to_sectors(struct scsi_device *sdev, u32 blocks)
-{
-	return blocks << (ilog2(sdev->sector_size) - 9);
-}
-
 /**
  *	sd_revalidate_disk - called the first time a new disk is seen,
  *	performs disk spin up, read_capacity, etc.
@@ -2856,7 +2827,7 @@
 	if (sdkp->media_present) {
 		sd_read_capacity(sdkp, buffer);
 
-		if (sd_try_extended_inquiry(sdp)) {
+		if (scsi_device_supports_vpd(sdp)) {
 			sd_read_block_provisioning(sdkp);
 			sd_read_block_limits(sdkp);
 			sd_read_block_characteristics(sdkp);
@@ -2891,7 +2862,7 @@
 	if (sdkp->opt_xfer_blocks &&
 	    sdkp->opt_xfer_blocks <= dev_max &&
 	    sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
-	    sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE)
+	    sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE)
 		rw_max = q->limits.io_opt =
 			sdkp->opt_xfer_blocks * sdp->sector_size;
 	else
@@ -2900,7 +2871,7 @@
 	/* Combine with controller limits */
 	q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
 
-	set_capacity(disk, sdkp->capacity);
+	set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
 	sd_config_write_same(sdkp);
 	kfree(buffer);
 
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 5f2a84a..654630b 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -65,7 +65,7 @@
 	struct device	dev;
 	struct gendisk	*disk;
 	atomic_t	openers;
-	sector_t	capacity;	/* size in 512-byte sectors */
+	sector_t	capacity;	/* size in logical blocks */
 	u32		max_xfer_blocks;
 	u32		opt_xfer_blocks;
 	u32		max_ws_blocks;
@@ -146,6 +146,11 @@
 	return 0;
 }
 
+static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blocks)
+{
+	return blocks << (ilog2(sdev->sector_size) - 9);
+}
+
 /*
  * A DIF-capable target device can be formatted with different
  * protection schemes.  Currently 0 through 3 are defined:
diff --git a/drivers/scsi/sense_codes.h b/drivers/scsi/sense_codes.h
new file mode 100644
index 0000000..e4e1dcc
--- /dev/null
+++ b/drivers/scsi/sense_codes.h
@@ -0,0 +1,826 @@
+/*
+ * The canonical list of T10 Additional Sense Codes is available at:
+ * http://www.t10.org/lists/asc-num.txt [most recent: 20141221]
+ */
+
+SENSE_CODE(0x0000, "No additional sense information")
+SENSE_CODE(0x0001, "Filemark detected")
+SENSE_CODE(0x0002, "End-of-partition/medium detected")
+SENSE_CODE(0x0003, "Setmark detected")
+SENSE_CODE(0x0004, "Beginning-of-partition/medium detected")
+SENSE_CODE(0x0005, "End-of-data detected")
+SENSE_CODE(0x0006, "I/O process terminated")
+SENSE_CODE(0x0007, "Programmable early warning detected")
+SENSE_CODE(0x0011, "Audio play operation in progress")
+SENSE_CODE(0x0012, "Audio play operation paused")
+SENSE_CODE(0x0013, "Audio play operation successfully completed")
+SENSE_CODE(0x0014, "Audio play operation stopped due to error")
+SENSE_CODE(0x0015, "No current audio status to return")
+SENSE_CODE(0x0016, "Operation in progress")
+SENSE_CODE(0x0017, "Cleaning requested")
+SENSE_CODE(0x0018, "Erase operation in progress")
+SENSE_CODE(0x0019, "Locate operation in progress")
+SENSE_CODE(0x001A, "Rewind operation in progress")
+SENSE_CODE(0x001B, "Set capacity operation in progress")
+SENSE_CODE(0x001C, "Verify operation in progress")
+SENSE_CODE(0x001D, "ATA pass through information available")
+SENSE_CODE(0x001E, "Conflicting SA creation request")
+SENSE_CODE(0x001F, "Logical unit transitioning to another power condition")
+SENSE_CODE(0x0020, "Extended copy information available")
+SENSE_CODE(0x0021, "Atomic command aborted due to ACA")
+
+SENSE_CODE(0x0100, "No index/sector signal")
+
+SENSE_CODE(0x0200, "No seek complete")
+
+SENSE_CODE(0x0300, "Peripheral device write fault")
+SENSE_CODE(0x0301, "No write current")
+SENSE_CODE(0x0302, "Excessive write errors")
+
+SENSE_CODE(0x0400, "Logical unit not ready, cause not reportable")
+SENSE_CODE(0x0401, "Logical unit is in process of becoming ready")
+SENSE_CODE(0x0402, "Logical unit not ready, initializing command required")
+SENSE_CODE(0x0403, "Logical unit not ready, manual intervention required")
+SENSE_CODE(0x0404, "Logical unit not ready, format in progress")
+SENSE_CODE(0x0405, "Logical unit not ready, rebuild in progress")
+SENSE_CODE(0x0406, "Logical unit not ready, recalculation in progress")
+SENSE_CODE(0x0407, "Logical unit not ready, operation in progress")
+SENSE_CODE(0x0408, "Logical unit not ready, long write in progress")
+SENSE_CODE(0x0409, "Logical unit not ready, self-test in progress")
+SENSE_CODE(0x040A, "Logical unit not accessible, asymmetric access state transition")
+SENSE_CODE(0x040B, "Logical unit not accessible, target port in standby state")
+SENSE_CODE(0x040C, "Logical unit not accessible, target port in unavailable state")
+SENSE_CODE(0x040D, "Logical unit not ready, structure check required")
+SENSE_CODE(0x040E, "Logical unit not ready, security session in progress")
+SENSE_CODE(0x0410, "Logical unit not ready, auxiliary memory not accessible")
+SENSE_CODE(0x0411, "Logical unit not ready, notify (enable spinup) required")
+SENSE_CODE(0x0412, "Logical unit not ready, offline")
+SENSE_CODE(0x0413, "Logical unit not ready, SA creation in progress")
+SENSE_CODE(0x0414, "Logical unit not ready, space allocation in progress")
+SENSE_CODE(0x0415, "Logical unit not ready, robotics disabled")
+SENSE_CODE(0x0416, "Logical unit not ready, configuration required")
+SENSE_CODE(0x0417, "Logical unit not ready, calibration required")
+SENSE_CODE(0x0418, "Logical unit not ready, a door is open")
+SENSE_CODE(0x0419, "Logical unit not ready, operating in sequential mode")
+SENSE_CODE(0x041A, "Logical unit not ready, start stop unit command in progress")
+SENSE_CODE(0x041B, "Logical unit not ready, sanitize in progress")
+SENSE_CODE(0x041C, "Logical unit not ready, additional power use not yet granted")
+SENSE_CODE(0x041D, "Logical unit not ready, configuration in progress")
+SENSE_CODE(0x041E, "Logical unit not ready, microcode activation required")
+SENSE_CODE(0x041F, "Logical unit not ready, microcode download required")
+SENSE_CODE(0x0420, "Logical unit not ready, logical unit reset required")
+SENSE_CODE(0x0421, "Logical unit not ready, hard reset required")
+SENSE_CODE(0x0422, "Logical unit not ready, power cycle required")
+
+SENSE_CODE(0x0500, "Logical unit does not respond to selection")
+
+SENSE_CODE(0x0600, "No reference position found")
+
+SENSE_CODE(0x0700, "Multiple peripheral devices selected")
+
+SENSE_CODE(0x0800, "Logical unit communication failure")
+SENSE_CODE(0x0801, "Logical unit communication time-out")
+SENSE_CODE(0x0802, "Logical unit communication parity error")
+SENSE_CODE(0x0803, "Logical unit communication CRC error (Ultra-DMA/32)")
+SENSE_CODE(0x0804, "Unreachable copy target")
+
+SENSE_CODE(0x0900, "Track following error")
+SENSE_CODE(0x0901, "Tracking servo failure")
+SENSE_CODE(0x0902, "Focus servo failure")
+SENSE_CODE(0x0903, "Spindle servo failure")
+SENSE_CODE(0x0904, "Head select fault")
+SENSE_CODE(0x0905, "Vibration induced tracking error")
+
+SENSE_CODE(0x0A00, "Error log overflow")
+
+SENSE_CODE(0x0B00, "Warning")
+SENSE_CODE(0x0B01, "Warning - specified temperature exceeded")
+SENSE_CODE(0x0B02, "Warning - enclosure degraded")
+SENSE_CODE(0x0B03, "Warning - background self-test failed")
+SENSE_CODE(0x0B04, "Warning - background pre-scan detected medium error")
+SENSE_CODE(0x0B05, "Warning - background medium scan detected medium error")
+SENSE_CODE(0x0B06, "Warning - non-volatile cache now volatile")
+SENSE_CODE(0x0B07, "Warning - degraded power to non-volatile cache")
+SENSE_CODE(0x0B08, "Warning - power loss expected")
+SENSE_CODE(0x0B09, "Warning - device statistics notification active")
+
+SENSE_CODE(0x0C00, "Write error")
+SENSE_CODE(0x0C01, "Write error - recovered with auto reallocation")
+SENSE_CODE(0x0C02, "Write error - auto reallocation failed")
+SENSE_CODE(0x0C03, "Write error - recommend reassignment")
+SENSE_CODE(0x0C04, "Compression check miscompare error")
+SENSE_CODE(0x0C05, "Data expansion occurred during compression")
+SENSE_CODE(0x0C06, "Block not compressible")
+SENSE_CODE(0x0C07, "Write error - recovery needed")
+SENSE_CODE(0x0C08, "Write error - recovery failed")
+SENSE_CODE(0x0C09, "Write error - loss of streaming")
+SENSE_CODE(0x0C0A, "Write error - padding blocks added")
+SENSE_CODE(0x0C0B, "Auxiliary memory write error")
+SENSE_CODE(0x0C0C, "Write error - unexpected unsolicited data")
+SENSE_CODE(0x0C0D, "Write error - not enough unsolicited data")
+SENSE_CODE(0x0C0E, "Multiple write errors")
+SENSE_CODE(0x0C0F, "Defects in error window")
+SENSE_CODE(0x0C10, "Incomplete multiple atomic write operations")
+
+SENSE_CODE(0x0D00, "Error detected by third party temporary initiator")
+SENSE_CODE(0x0D01, "Third party device failure")
+SENSE_CODE(0x0D02, "Copy target device not reachable")
+SENSE_CODE(0x0D03, "Incorrect copy target device type")
+SENSE_CODE(0x0D04, "Copy target device data underrun")
+SENSE_CODE(0x0D05, "Copy target device data overrun")
+
+SENSE_CODE(0x0E00, "Invalid information unit")
+SENSE_CODE(0x0E01, "Information unit too short")
+SENSE_CODE(0x0E02, "Information unit too long")
+SENSE_CODE(0x0E03, "Invalid field in command information unit")
+
+SENSE_CODE(0x1000, "Id CRC or ECC error")
+SENSE_CODE(0x1001, "Logical block guard check failed")
+SENSE_CODE(0x1002, "Logical block application tag check failed")
+SENSE_CODE(0x1003, "Logical block reference tag check failed")
+SENSE_CODE(0x1004, "Logical block protection error on recover buffered data")
+SENSE_CODE(0x1005, "Logical block protection method error")
+
+SENSE_CODE(0x1100, "Unrecovered read error")
+SENSE_CODE(0x1101, "Read retries exhausted")
+SENSE_CODE(0x1102, "Error too long to correct")
+SENSE_CODE(0x1103, "Multiple read errors")
+SENSE_CODE(0x1104, "Unrecovered read error - auto reallocate failed")
+SENSE_CODE(0x1105, "L-EC uncorrectable error")
+SENSE_CODE(0x1106, "CIRC unrecovered error")
+SENSE_CODE(0x1107, "Data re-synchronization error")
+SENSE_CODE(0x1108, "Incomplete block read")
+SENSE_CODE(0x1109, "No gap found")
+SENSE_CODE(0x110A, "Miscorrected error")
+SENSE_CODE(0x110B, "Unrecovered read error - recommend reassignment")
+SENSE_CODE(0x110C, "Unrecovered read error - recommend rewrite the data")
+SENSE_CODE(0x110D, "De-compression CRC error")
+SENSE_CODE(0x110E, "Cannot decompress using declared algorithm")
+SENSE_CODE(0x110F, "Error reading UPC/EAN number")
+SENSE_CODE(0x1110, "Error reading ISRC number")
+SENSE_CODE(0x1111, "Read error - loss of streaming")
+SENSE_CODE(0x1112, "Auxiliary memory read error")
+SENSE_CODE(0x1113, "Read error - failed retransmission request")
+SENSE_CODE(0x1114, "Read error - lba marked bad by application client")
+SENSE_CODE(0x1115, "Write after sanitize required")
+
+SENSE_CODE(0x1200, "Address mark not found for id field")
+
+SENSE_CODE(0x1300, "Address mark not found for data field")
+
+SENSE_CODE(0x1400, "Recorded entity not found")
+SENSE_CODE(0x1401, "Record not found")
+SENSE_CODE(0x1402, "Filemark or setmark not found")
+SENSE_CODE(0x1403, "End-of-data not found")
+SENSE_CODE(0x1404, "Block sequence error")
+SENSE_CODE(0x1405, "Record not found - recommend reassignment")
+SENSE_CODE(0x1406, "Record not found - data auto-reallocated")
+SENSE_CODE(0x1407, "Locate operation failure")
+
+SENSE_CODE(0x1500, "Random positioning error")
+SENSE_CODE(0x1501, "Mechanical positioning error")
+SENSE_CODE(0x1502, "Positioning error detected by read of medium")
+
+SENSE_CODE(0x1600, "Data synchronization mark error")
+SENSE_CODE(0x1601, "Data sync error - data rewritten")
+SENSE_CODE(0x1602, "Data sync error - recommend rewrite")
+SENSE_CODE(0x1603, "Data sync error - data auto-reallocated")
+SENSE_CODE(0x1604, "Data sync error - recommend reassignment")
+
+SENSE_CODE(0x1700, "Recovered data with no error correction applied")
+SENSE_CODE(0x1701, "Recovered data with retries")
+SENSE_CODE(0x1702, "Recovered data with positive head offset")
+SENSE_CODE(0x1703, "Recovered data with negative head offset")
+SENSE_CODE(0x1704, "Recovered data with retries and/or circ applied")
+SENSE_CODE(0x1705, "Recovered data using previous sector id")
+SENSE_CODE(0x1706, "Recovered data without ECC - data auto-reallocated")
+SENSE_CODE(0x1707, "Recovered data without ECC - recommend reassignment")
+SENSE_CODE(0x1708, "Recovered data without ECC - recommend rewrite")
+SENSE_CODE(0x1709, "Recovered data without ECC - data rewritten")
+
+SENSE_CODE(0x1800, "Recovered data with error correction applied")
+SENSE_CODE(0x1801, "Recovered data with error corr. & retries applied")
+SENSE_CODE(0x1802, "Recovered data - data auto-reallocated")
+SENSE_CODE(0x1803, "Recovered data with CIRC")
+SENSE_CODE(0x1804, "Recovered data with L-EC")
+SENSE_CODE(0x1805, "Recovered data - recommend reassignment")
+SENSE_CODE(0x1806, "Recovered data - recommend rewrite")
+SENSE_CODE(0x1807, "Recovered data with ECC - data rewritten")
+SENSE_CODE(0x1808, "Recovered data with linking")
+
+SENSE_CODE(0x1900, "Defect list error")
+SENSE_CODE(0x1901, "Defect list not available")
+SENSE_CODE(0x1902, "Defect list error in primary list")
+SENSE_CODE(0x1903, "Defect list error in grown list")
+
+SENSE_CODE(0x1A00, "Parameter list length error")
+
+SENSE_CODE(0x1B00, "Synchronous data transfer error")
+
+SENSE_CODE(0x1C00, "Defect list not found")
+SENSE_CODE(0x1C01, "Primary defect list not found")
+SENSE_CODE(0x1C02, "Grown defect list not found")
+
+SENSE_CODE(0x1D00, "Miscompare during verify operation")
+SENSE_CODE(0x1D01, "Miscompare verify of unmapped LBA")
+
+SENSE_CODE(0x1E00, "Recovered id with ECC correction")
+
+SENSE_CODE(0x1F00, "Partial defect list transfer")
+
+SENSE_CODE(0x2000, "Invalid command operation code")
+SENSE_CODE(0x2001, "Access denied - initiator pending-enrolled")
+SENSE_CODE(0x2002, "Access denied - no access rights")
+SENSE_CODE(0x2003, "Access denied - invalid mgmt id key")
+SENSE_CODE(0x2004, "Illegal command while in write capable state")
+SENSE_CODE(0x2005, "Obsolete")
+SENSE_CODE(0x2006, "Illegal command while in explicit address mode")
+SENSE_CODE(0x2007, "Illegal command while in implicit address mode")
+SENSE_CODE(0x2008, "Access denied - enrollment conflict")
+SENSE_CODE(0x2009, "Access denied - invalid LU identifier")
+SENSE_CODE(0x200A, "Access denied - invalid proxy token")
+SENSE_CODE(0x200B, "Access denied - ACL LUN conflict")
+SENSE_CODE(0x200C, "Illegal command when not in append-only mode")
+
+SENSE_CODE(0x2100, "Logical block address out of range")
+SENSE_CODE(0x2101, "Invalid element address")
+SENSE_CODE(0x2102, "Invalid address for write")
+SENSE_CODE(0x2103, "Invalid write crossing layer jump")
+SENSE_CODE(0x2104, "Unaligned write command")
+SENSE_CODE(0x2105, "Write boundary violation")
+SENSE_CODE(0x2106, "Attempt to read invalid data")
+SENSE_CODE(0x2107, "Read boundary violation")
+
+SENSE_CODE(0x2200, "Illegal function (use 20 00, 24 00, or 26 00)")
+
+SENSE_CODE(0x2300, "Invalid token operation, cause not reportable")
+SENSE_CODE(0x2301, "Invalid token operation, unsupported token type")
+SENSE_CODE(0x2302, "Invalid token operation, remote token usage not supported")
+SENSE_CODE(0x2303, "Invalid token operation, remote rod token creation not supported")
+SENSE_CODE(0x2304, "Invalid token operation, token unknown")
+SENSE_CODE(0x2305, "Invalid token operation, token corrupt")
+SENSE_CODE(0x2306, "Invalid token operation, token revoked")
+SENSE_CODE(0x2307, "Invalid token operation, token expired")
+SENSE_CODE(0x2308, "Invalid token operation, token cancelled")
+SENSE_CODE(0x2309, "Invalid token operation, token deleted")
+SENSE_CODE(0x230A, "Invalid token operation, invalid token length")
+
+SENSE_CODE(0x2400, "Invalid field in cdb")
+SENSE_CODE(0x2401, "CDB decryption error")
+SENSE_CODE(0x2402, "Obsolete")
+SENSE_CODE(0x2403, "Obsolete")
+SENSE_CODE(0x2404, "Security audit value frozen")
+SENSE_CODE(0x2405, "Security working key frozen")
+SENSE_CODE(0x2406, "Nonce not unique")
+SENSE_CODE(0x2407, "Nonce timestamp out of range")
+SENSE_CODE(0x2408, "Invalid XCDB")
+
+SENSE_CODE(0x2500, "Logical unit not supported")
+
+SENSE_CODE(0x2600, "Invalid field in parameter list")
+SENSE_CODE(0x2601, "Parameter not supported")
+SENSE_CODE(0x2602, "Parameter value invalid")
+SENSE_CODE(0x2603, "Threshold parameters not supported")
+SENSE_CODE(0x2604, "Invalid release of persistent reservation")
+SENSE_CODE(0x2605, "Data decryption error")
+SENSE_CODE(0x2606, "Too many target descriptors")
+SENSE_CODE(0x2607, "Unsupported target descriptor type code")
+SENSE_CODE(0x2608, "Too many segment descriptors")
+SENSE_CODE(0x2609, "Unsupported segment descriptor type code")
+SENSE_CODE(0x260A, "Unexpected inexact segment")
+SENSE_CODE(0x260B, "Inline data length exceeded")
+SENSE_CODE(0x260C, "Invalid operation for copy source or destination")
+SENSE_CODE(0x260D, "Copy segment granularity violation")
+SENSE_CODE(0x260E, "Invalid parameter while port is enabled")
+SENSE_CODE(0x260F, "Invalid data-out buffer integrity check value")
+SENSE_CODE(0x2610, "Data decryption key fail limit reached")
+SENSE_CODE(0x2611, "Incomplete key-associated data set")
+SENSE_CODE(0x2612, "Vendor specific key reference not found")
+
+SENSE_CODE(0x2700, "Write protected")
+SENSE_CODE(0x2701, "Hardware write protected")
+SENSE_CODE(0x2702, "Logical unit software write protected")
+SENSE_CODE(0x2703, "Associated write protect")
+SENSE_CODE(0x2704, "Persistent write protect")
+SENSE_CODE(0x2705, "Permanent write protect")
+SENSE_CODE(0x2706, "Conditional write protect")
+SENSE_CODE(0x2707, "Space allocation failed write protect")
+SENSE_CODE(0x2708, "Zone is read only")
+
+SENSE_CODE(0x2800, "Not ready to ready change, medium may have changed")
+SENSE_CODE(0x2801, "Import or export element accessed")
+SENSE_CODE(0x2802, "Format-layer may have changed")
+SENSE_CODE(0x2803, "Import/export element accessed, medium changed")
+
+SENSE_CODE(0x2900, "Power on, reset, or bus device reset occurred")
+SENSE_CODE(0x2901, "Power on occurred")
+SENSE_CODE(0x2902, "Scsi bus reset occurred")
+SENSE_CODE(0x2903, "Bus device reset function occurred")
+SENSE_CODE(0x2904, "Device internal reset")
+SENSE_CODE(0x2905, "Transceiver mode changed to single-ended")
+SENSE_CODE(0x2906, "Transceiver mode changed to lvd")
+SENSE_CODE(0x2907, "I_T nexus loss occurred")
+
+SENSE_CODE(0x2A00, "Parameters changed")
+SENSE_CODE(0x2A01, "Mode parameters changed")
+SENSE_CODE(0x2A02, "Log parameters changed")
+SENSE_CODE(0x2A03, "Reservations preempted")
+SENSE_CODE(0x2A04, "Reservations released")
+SENSE_CODE(0x2A05, "Registrations preempted")
+SENSE_CODE(0x2A06, "Asymmetric access state changed")
+SENSE_CODE(0x2A07, "Implicit asymmetric access state transition failed")
+SENSE_CODE(0x2A08, "Priority changed")
+SENSE_CODE(0x2A09, "Capacity data has changed")
+SENSE_CODE(0x2A0A, "Error history I_T nexus cleared")
+SENSE_CODE(0x2A0B, "Error history snapshot released")
+SENSE_CODE(0x2A0C, "Error recovery attributes have changed")
+SENSE_CODE(0x2A0D, "Data encryption capabilities changed")
+SENSE_CODE(0x2A10, "Timestamp changed")
+SENSE_CODE(0x2A11, "Data encryption parameters changed by another i_t nexus")
+SENSE_CODE(0x2A12, "Data encryption parameters changed by vendor specific event")
+SENSE_CODE(0x2A13, "Data encryption key instance counter has changed")
+SENSE_CODE(0x2A14, "SA creation capabilities data has changed")
+SENSE_CODE(0x2A15, "Medium removal prevention preempted")
+
+SENSE_CODE(0x2B00, "Copy cannot execute since host cannot disconnect")
+
+SENSE_CODE(0x2C00, "Command sequence error")
+SENSE_CODE(0x2C01, "Too many windows specified")
+SENSE_CODE(0x2C02, "Invalid combination of windows specified")
+SENSE_CODE(0x2C03, "Current program area is not empty")
+SENSE_CODE(0x2C04, "Current program area is empty")
+SENSE_CODE(0x2C05, "Illegal power condition request")
+SENSE_CODE(0x2C06, "Persistent prevent conflict")
+SENSE_CODE(0x2C07, "Previous busy status")
+SENSE_CODE(0x2C08, "Previous task set full status")
+SENSE_CODE(0x2C09, "Previous reservation conflict status")
+SENSE_CODE(0x2C0A, "Partition or collection contains user objects")
+SENSE_CODE(0x2C0B, "Not reserved")
+SENSE_CODE(0x2C0C, "Orwrite generation does not match")
+SENSE_CODE(0x2C0D, "Reset write pointer not allowed")
+SENSE_CODE(0x2C0E, "Zone is offline")
+
+SENSE_CODE(0x2D00, "Overwrite error on update in place")
+
+SENSE_CODE(0x2E00, "Insufficient time for operation")
+SENSE_CODE(0x2E01, "Command timeout before processing")
+SENSE_CODE(0x2E02, "Command timeout during processing")
+SENSE_CODE(0x2E03, "Command timeout during processing due to error recovery")
+
+SENSE_CODE(0x2F00, "Commands cleared by another initiator")
+SENSE_CODE(0x2F01, "Commands cleared by power loss notification")
+SENSE_CODE(0x2F02, "Commands cleared by device server")
+SENSE_CODE(0x2F03, "Some commands cleared by queuing layer event")
+
+SENSE_CODE(0x3000, "Incompatible medium installed")
+SENSE_CODE(0x3001, "Cannot read medium - unknown format")
+SENSE_CODE(0x3002, "Cannot read medium - incompatible format")
+SENSE_CODE(0x3003, "Cleaning cartridge installed")
+SENSE_CODE(0x3004, "Cannot write medium - unknown format")
+SENSE_CODE(0x3005, "Cannot write medium - incompatible format")
+SENSE_CODE(0x3006, "Cannot format medium - incompatible medium")
+SENSE_CODE(0x3007, "Cleaning failure")
+SENSE_CODE(0x3008, "Cannot write - application code mismatch")
+SENSE_CODE(0x3009, "Current session not fixated for append")
+SENSE_CODE(0x300A, "Cleaning request rejected")
+SENSE_CODE(0x300C, "WORM medium - overwrite attempted")
+SENSE_CODE(0x300D, "WORM medium - integrity check")
+SENSE_CODE(0x3010, "Medium not formatted")
+SENSE_CODE(0x3011, "Incompatible volume type")
+SENSE_CODE(0x3012, "Incompatible volume qualifier")
+SENSE_CODE(0x3013, "Cleaning volume expired")
+
+SENSE_CODE(0x3100, "Medium format corrupted")
+SENSE_CODE(0x3101, "Format command failed")
+SENSE_CODE(0x3102, "Zoned formatting failed due to spare linking")
+SENSE_CODE(0x3103, "Sanitize command failed")
+
+SENSE_CODE(0x3200, "No defect spare location available")
+SENSE_CODE(0x3201, "Defect list update failure")
+
+SENSE_CODE(0x3300, "Tape length error")
+
+SENSE_CODE(0x3400, "Enclosure failure")
+
+SENSE_CODE(0x3500, "Enclosure services failure")
+SENSE_CODE(0x3501, "Unsupported enclosure function")
+SENSE_CODE(0x3502, "Enclosure services unavailable")
+SENSE_CODE(0x3503, "Enclosure services transfer failure")
+SENSE_CODE(0x3504, "Enclosure services transfer refused")
+SENSE_CODE(0x3505, "Enclosure services checksum error")
+
+SENSE_CODE(0x3600, "Ribbon, ink, or toner failure")
+
+SENSE_CODE(0x3700, "Rounded parameter")
+
+SENSE_CODE(0x3800, "Event status notification")
+SENSE_CODE(0x3802, "Esn - power management class event")
+SENSE_CODE(0x3804, "Esn - media class event")
+SENSE_CODE(0x3806, "Esn - device busy class event")
+SENSE_CODE(0x3807, "Thin Provisioning soft threshold reached")
+
+SENSE_CODE(0x3900, "Saving parameters not supported")
+
+SENSE_CODE(0x3A00, "Medium not present")
+SENSE_CODE(0x3A01, "Medium not present - tray closed")
+SENSE_CODE(0x3A02, "Medium not present - tray open")
+SENSE_CODE(0x3A03, "Medium not present - loadable")
+SENSE_CODE(0x3A04, "Medium not present - medium auxiliary memory accessible")
+
+SENSE_CODE(0x3B00, "Sequential positioning error")
+SENSE_CODE(0x3B01, "Tape position error at beginning-of-medium")
+SENSE_CODE(0x3B02, "Tape position error at end-of-medium")
+SENSE_CODE(0x3B03, "Tape or electronic vertical forms unit not ready")
+SENSE_CODE(0x3B04, "Slew failure")
+SENSE_CODE(0x3B05, "Paper jam")
+SENSE_CODE(0x3B06, "Failed to sense top-of-form")
+SENSE_CODE(0x3B07, "Failed to sense bottom-of-form")
+SENSE_CODE(0x3B08, "Reposition error")
+SENSE_CODE(0x3B09, "Read past end of medium")
+SENSE_CODE(0x3B0A, "Read past beginning of medium")
+SENSE_CODE(0x3B0B, "Position past end of medium")
+SENSE_CODE(0x3B0C, "Position past beginning of medium")
+SENSE_CODE(0x3B0D, "Medium destination element full")
+SENSE_CODE(0x3B0E, "Medium source element empty")
+SENSE_CODE(0x3B0F, "End of medium reached")
+SENSE_CODE(0x3B11, "Medium magazine not accessible")
+SENSE_CODE(0x3B12, "Medium magazine removed")
+SENSE_CODE(0x3B13, "Medium magazine inserted")
+SENSE_CODE(0x3B14, "Medium magazine locked")
+SENSE_CODE(0x3B15, "Medium magazine unlocked")
+SENSE_CODE(0x3B16, "Mechanical positioning or changer error")
+SENSE_CODE(0x3B17, "Read past end of user object")
+SENSE_CODE(0x3B18, "Element disabled")
+SENSE_CODE(0x3B19, "Element enabled")
+SENSE_CODE(0x3B1A, "Data transfer device removed")
+SENSE_CODE(0x3B1B, "Data transfer device inserted")
+SENSE_CODE(0x3B1C, "Too many logical objects on partition to support operation")
+
+SENSE_CODE(0x3D00, "Invalid bits in identify message")
+
+SENSE_CODE(0x3E00, "Logical unit has not self-configured yet")
+SENSE_CODE(0x3E01, "Logical unit failure")
+SENSE_CODE(0x3E02, "Timeout on logical unit")
+SENSE_CODE(0x3E03, "Logical unit failed self-test")
+SENSE_CODE(0x3E04, "Logical unit unable to update self-test log")
+
+SENSE_CODE(0x3F00, "Target operating conditions have changed")
+SENSE_CODE(0x3F01, "Microcode has been changed")
+SENSE_CODE(0x3F02, "Changed operating definition")
+SENSE_CODE(0x3F03, "Inquiry data has changed")
+SENSE_CODE(0x3F04, "Component device attached")
+SENSE_CODE(0x3F05, "Device identifier changed")
+SENSE_CODE(0x3F06, "Redundancy group created or modified")
+SENSE_CODE(0x3F07, "Redundancy group deleted")
+SENSE_CODE(0x3F08, "Spare created or modified")
+SENSE_CODE(0x3F09, "Spare deleted")
+SENSE_CODE(0x3F0A, "Volume set created or modified")
+SENSE_CODE(0x3F0B, "Volume set deleted")
+SENSE_CODE(0x3F0C, "Volume set deassigned")
+SENSE_CODE(0x3F0D, "Volume set reassigned")
+SENSE_CODE(0x3F0E, "Reported luns data has changed")
+SENSE_CODE(0x3F0F, "Echo buffer overwritten")
+SENSE_CODE(0x3F10, "Medium loadable")
+SENSE_CODE(0x3F11, "Medium auxiliary memory accessible")
+SENSE_CODE(0x3F12, "iSCSI IP address added")
+SENSE_CODE(0x3F13, "iSCSI IP address removed")
+SENSE_CODE(0x3F14, "iSCSI IP address changed")
+SENSE_CODE(0x3F15, "Inspect referrals sense descriptors")
+SENSE_CODE(0x3F16, "Microcode has been changed without reset")
+/*
+ *	SENSE_CODE(0x40NN, "Ram failure")
+ *	SENSE_CODE(0x40NN, "Diagnostic failure on component nn")
+ *	SENSE_CODE(0x41NN, "Data path failure")
+ *	SENSE_CODE(0x42NN, "Power-on or self-test failure")
+ */
+SENSE_CODE(0x4300, "Message error")
+
+SENSE_CODE(0x4400, "Internal target failure")
+SENSE_CODE(0x4401, "Persistent reservation information lost")
+SENSE_CODE(0x4471, "ATA device failed set features")
+
+SENSE_CODE(0x4500, "Select or reselect failure")
+
+SENSE_CODE(0x4600, "Unsuccessful soft reset")
+
+SENSE_CODE(0x4700, "Scsi parity error")
+SENSE_CODE(0x4701, "Data phase CRC error detected")
+SENSE_CODE(0x4702, "Scsi parity error detected during st data phase")
+SENSE_CODE(0x4703, "Information unit iuCRC error detected")
+SENSE_CODE(0x4704, "Asynchronous information protection error detected")
+SENSE_CODE(0x4705, "Protocol service CRC error")
+SENSE_CODE(0x4706, "Phy test function in progress")
+SENSE_CODE(0x477f, "Some commands cleared by iSCSI Protocol event")
+
+SENSE_CODE(0x4800, "Initiator detected error message received")
+
+SENSE_CODE(0x4900, "Invalid message error")
+
+SENSE_CODE(0x4A00, "Command phase error")
+
+SENSE_CODE(0x4B00, "Data phase error")
+SENSE_CODE(0x4B01, "Invalid target port transfer tag received")
+SENSE_CODE(0x4B02, "Too much write data")
+SENSE_CODE(0x4B03, "Ack/nak timeout")
+SENSE_CODE(0x4B04, "Nak received")
+SENSE_CODE(0x4B05, "Data offset error")
+SENSE_CODE(0x4B06, "Initiator response timeout")
+SENSE_CODE(0x4B07, "Connection lost")
+SENSE_CODE(0x4B08, "Data-in buffer overflow - data buffer size")
+SENSE_CODE(0x4B09, "Data-in buffer overflow - data buffer descriptor area")
+SENSE_CODE(0x4B0A, "Data-in buffer error")
+SENSE_CODE(0x4B0B, "Data-out buffer overflow - data buffer size")
+SENSE_CODE(0x4B0C, "Data-out buffer overflow - data buffer descriptor area")
+SENSE_CODE(0x4B0D, "Data-out buffer error")
+SENSE_CODE(0x4B0E, "PCIe fabric error")
+SENSE_CODE(0x4B0F, "PCIe completion timeout")
+SENSE_CODE(0x4B10, "PCIe completer abort")
+SENSE_CODE(0x4B11, "PCIe poisoned tlp received")
+SENSE_CODE(0x4B12, "PCIe eCRC check failed")
+SENSE_CODE(0x4B13, "PCIe unsupported request")
+SENSE_CODE(0x4B14, "PCIe acs violation")
+SENSE_CODE(0x4B15, "PCIe tlp prefix blocked")
+
+SENSE_CODE(0x4C00, "Logical unit failed self-configuration")
+/*
+ *	SENSE_CODE(0x4DNN, "Tagged overlapped commands (nn = queue tag)")
+ */
+SENSE_CODE(0x4E00, "Overlapped commands attempted")
+
+SENSE_CODE(0x5000, "Write append error")
+SENSE_CODE(0x5001, "Write append position error")
+SENSE_CODE(0x5002, "Position error related to timing")
+
+SENSE_CODE(0x5100, "Erase failure")
+SENSE_CODE(0x5101, "Erase failure - incomplete erase operation detected")
+
+SENSE_CODE(0x5200, "Cartridge fault")
+
+SENSE_CODE(0x5300, "Media load or eject failed")
+SENSE_CODE(0x5301, "Unload tape failure")
+SENSE_CODE(0x5302, "Medium removal prevented")
+SENSE_CODE(0x5303, "Medium removal prevented by data transfer element")
+SENSE_CODE(0x5304, "Medium thread or unthread failure")
+SENSE_CODE(0x5305, "Volume identifier invalid")
+SENSE_CODE(0x5306, "Volume identifier missing")
+SENSE_CODE(0x5307, "Duplicate volume identifier")
+SENSE_CODE(0x5308, "Element status unknown")
+SENSE_CODE(0x5309, "Data transfer device error - load failed")
+SENSE_CODE(0x530a, "Data transfer device error - unload failed")
+SENSE_CODE(0x530b, "Data transfer device error - unload missing")
+SENSE_CODE(0x530c, "Data transfer device error - eject failed")
+SENSE_CODE(0x530d, "Data transfer device error - library communication failed")
+
+SENSE_CODE(0x5400, "Scsi to host system interface failure")
+
+SENSE_CODE(0x5500, "System resource failure")
+SENSE_CODE(0x5501, "System buffer full")
+SENSE_CODE(0x5502, "Insufficient reservation resources")
+SENSE_CODE(0x5503, "Insufficient resources")
+SENSE_CODE(0x5504, "Insufficient registration resources")
+SENSE_CODE(0x5505, "Insufficient access control resources")
+SENSE_CODE(0x5506, "Auxiliary memory out of space")
+SENSE_CODE(0x5507, "Quota error")
+SENSE_CODE(0x5508, "Maximum number of supplemental decryption keys exceeded")
+SENSE_CODE(0x5509, "Medium auxiliary memory not accessible")
+SENSE_CODE(0x550A, "Data currently unavailable")
+SENSE_CODE(0x550B, "Insufficient power for operation")
+SENSE_CODE(0x550C, "Insufficient resources to create rod")
+SENSE_CODE(0x550D, "Insufficient resources to create rod token")
+SENSE_CODE(0x550E, "Insufficient zone resources")
+
+SENSE_CODE(0x5700, "Unable to recover table-of-contents")
+
+SENSE_CODE(0x5800, "Generation does not exist")
+
+SENSE_CODE(0x5900, "Updated block read")
+
+SENSE_CODE(0x5A00, "Operator request or state change input")
+SENSE_CODE(0x5A01, "Operator medium removal request")
+SENSE_CODE(0x5A02, "Operator selected write protect")
+SENSE_CODE(0x5A03, "Operator selected write permit")
+
+SENSE_CODE(0x5B00, "Log exception")
+SENSE_CODE(0x5B01, "Threshold condition met")
+SENSE_CODE(0x5B02, "Log counter at maximum")
+SENSE_CODE(0x5B03, "Log list codes exhausted")
+
+SENSE_CODE(0x5C00, "Rpl status change")
+SENSE_CODE(0x5C01, "Spindles synchronized")
+SENSE_CODE(0x5C02, "Spindles not synchronized")
+
+SENSE_CODE(0x5D00, "Failure prediction threshold exceeded")
+SENSE_CODE(0x5D01, "Media failure prediction threshold exceeded")
+SENSE_CODE(0x5D02, "Logical unit failure prediction threshold exceeded")
+SENSE_CODE(0x5D03, "Spare area exhaustion prediction threshold exceeded")
+SENSE_CODE(0x5D10, "Hardware impending failure general hard drive failure")
+SENSE_CODE(0x5D11, "Hardware impending failure drive error rate too high")
+SENSE_CODE(0x5D12, "Hardware impending failure data error rate too high")
+SENSE_CODE(0x5D13, "Hardware impending failure seek error rate too high")
+SENSE_CODE(0x5D14, "Hardware impending failure too many block reassigns")
+SENSE_CODE(0x5D15, "Hardware impending failure access times too high")
+SENSE_CODE(0x5D16, "Hardware impending failure start unit times too high")
+SENSE_CODE(0x5D17, "Hardware impending failure channel parametrics")
+SENSE_CODE(0x5D18, "Hardware impending failure controller detected")
+SENSE_CODE(0x5D19, "Hardware impending failure throughput performance")
+SENSE_CODE(0x5D1A, "Hardware impending failure seek time performance")
+SENSE_CODE(0x5D1B, "Hardware impending failure spin-up retry count")
+SENSE_CODE(0x5D1C, "Hardware impending failure drive calibration retry count")
+SENSE_CODE(0x5D20, "Controller impending failure general hard drive failure")
+SENSE_CODE(0x5D21, "Controller impending failure drive error rate too high")
+SENSE_CODE(0x5D22, "Controller impending failure data error rate too high")
+SENSE_CODE(0x5D23, "Controller impending failure seek error rate too high")
+SENSE_CODE(0x5D24, "Controller impending failure too many block reassigns")
+SENSE_CODE(0x5D25, "Controller impending failure access times too high")
+SENSE_CODE(0x5D26, "Controller impending failure start unit times too high")
+SENSE_CODE(0x5D27, "Controller impending failure channel parametrics")
+SENSE_CODE(0x5D28, "Controller impending failure controller detected")
+SENSE_CODE(0x5D29, "Controller impending failure throughput performance")
+SENSE_CODE(0x5D2A, "Controller impending failure seek time performance")
+SENSE_CODE(0x5D2B, "Controller impending failure spin-up retry count")
+SENSE_CODE(0x5D2C, "Controller impending failure drive calibration retry count")
+SENSE_CODE(0x5D30, "Data channel impending failure general hard drive failure")
+SENSE_CODE(0x5D31, "Data channel impending failure drive error rate too high")
+SENSE_CODE(0x5D32, "Data channel impending failure data error rate too high")
+SENSE_CODE(0x5D33, "Data channel impending failure seek error rate too high")
+SENSE_CODE(0x5D34, "Data channel impending failure too many block reassigns")
+SENSE_CODE(0x5D35, "Data channel impending failure access times too high")
+SENSE_CODE(0x5D36, "Data channel impending failure start unit times too high")
+SENSE_CODE(0x5D37, "Data channel impending failure channel parametrics")
+SENSE_CODE(0x5D38, "Data channel impending failure controller detected")
+SENSE_CODE(0x5D39, "Data channel impending failure throughput performance")
+SENSE_CODE(0x5D3A, "Data channel impending failure seek time performance")
+SENSE_CODE(0x5D3B, "Data channel impending failure spin-up retry count")
+SENSE_CODE(0x5D3C, "Data channel impending failure drive calibration retry count")
+SENSE_CODE(0x5D40, "Servo impending failure general hard drive failure")
+SENSE_CODE(0x5D41, "Servo impending failure drive error rate too high")
+SENSE_CODE(0x5D42, "Servo impending failure data error rate too high")
+SENSE_CODE(0x5D43, "Servo impending failure seek error rate too high")
+SENSE_CODE(0x5D44, "Servo impending failure too many block reassigns")
+SENSE_CODE(0x5D45, "Servo impending failure access times too high")
+SENSE_CODE(0x5D46, "Servo impending failure start unit times too high")
+SENSE_CODE(0x5D47, "Servo impending failure channel parametrics")
+SENSE_CODE(0x5D48, "Servo impending failure controller detected")
+SENSE_CODE(0x5D49, "Servo impending failure throughput performance")
+SENSE_CODE(0x5D4A, "Servo impending failure seek time performance")
+SENSE_CODE(0x5D4B, "Servo impending failure spin-up retry count")
+SENSE_CODE(0x5D4C, "Servo impending failure drive calibration retry count")
+SENSE_CODE(0x5D50, "Spindle impending failure general hard drive failure")
+SENSE_CODE(0x5D51, "Spindle impending failure drive error rate too high")
+SENSE_CODE(0x5D52, "Spindle impending failure data error rate too high")
+SENSE_CODE(0x5D53, "Spindle impending failure seek error rate too high")
+SENSE_CODE(0x5D54, "Spindle impending failure too many block reassigns")
+SENSE_CODE(0x5D55, "Spindle impending failure access times too high")
+SENSE_CODE(0x5D56, "Spindle impending failure start unit times too high")
+SENSE_CODE(0x5D57, "Spindle impending failure channel parametrics")
+SENSE_CODE(0x5D58, "Spindle impending failure controller detected")
+SENSE_CODE(0x5D59, "Spindle impending failure throughput performance")
+SENSE_CODE(0x5D5A, "Spindle impending failure seek time performance")
+SENSE_CODE(0x5D5B, "Spindle impending failure spin-up retry count")
+SENSE_CODE(0x5D5C, "Spindle impending failure drive calibration retry count")
+SENSE_CODE(0x5D60, "Firmware impending failure general hard drive failure")
+SENSE_CODE(0x5D61, "Firmware impending failure drive error rate too high")
+SENSE_CODE(0x5D62, "Firmware impending failure data error rate too high")
+SENSE_CODE(0x5D63, "Firmware impending failure seek error rate too high")
+SENSE_CODE(0x5D64, "Firmware impending failure too many block reassigns")
+SENSE_CODE(0x5D65, "Firmware impending failure access times too high")
+SENSE_CODE(0x5D66, "Firmware impending failure start unit times too high")
+SENSE_CODE(0x5D67, "Firmware impending failure channel parametrics")
+SENSE_CODE(0x5D68, "Firmware impending failure controller detected")
+SENSE_CODE(0x5D69, "Firmware impending failure throughput performance")
+SENSE_CODE(0x5D6A, "Firmware impending failure seek time performance")
+SENSE_CODE(0x5D6B, "Firmware impending failure spin-up retry count")
+SENSE_CODE(0x5D6C, "Firmware impending failure drive calibration retry count")
+SENSE_CODE(0x5DFF, "Failure prediction threshold exceeded (false)")
+
+SENSE_CODE(0x5E00, "Low power condition on")
+SENSE_CODE(0x5E01, "Idle condition activated by timer")
+SENSE_CODE(0x5E02, "Standby condition activated by timer")
+SENSE_CODE(0x5E03, "Idle condition activated by command")
+SENSE_CODE(0x5E04, "Standby condition activated by command")
+SENSE_CODE(0x5E05, "Idle_b condition activated by timer")
+SENSE_CODE(0x5E06, "Idle_b condition activated by command")
+SENSE_CODE(0x5E07, "Idle_c condition activated by timer")
+SENSE_CODE(0x5E08, "Idle_c condition activated by command")
+SENSE_CODE(0x5E09, "Standby_y condition activated by timer")
+SENSE_CODE(0x5E0A, "Standby_y condition activated by command")
+SENSE_CODE(0x5E41, "Power state change to active")
+SENSE_CODE(0x5E42, "Power state change to idle")
+SENSE_CODE(0x5E43, "Power state change to standby")
+SENSE_CODE(0x5E45, "Power state change to sleep")
+SENSE_CODE(0x5E47, "Power state change to device control")
+
+SENSE_CODE(0x6000, "Lamp failure")
+
+SENSE_CODE(0x6100, "Video acquisition error")
+SENSE_CODE(0x6101, "Unable to acquire video")
+SENSE_CODE(0x6102, "Out of focus")
+
+SENSE_CODE(0x6200, "Scan head positioning error")
+
+SENSE_CODE(0x6300, "End of user area encountered on this track")
+SENSE_CODE(0x6301, "Packet does not fit in available space")
+
+SENSE_CODE(0x6400, "Illegal mode for this track")
+SENSE_CODE(0x6401, "Invalid packet size")
+
+SENSE_CODE(0x6500, "Voltage fault")
+
+SENSE_CODE(0x6600, "Automatic document feeder cover up")
+SENSE_CODE(0x6601, "Automatic document feeder lift up")
+SENSE_CODE(0x6602, "Document jam in automatic document feeder")
+SENSE_CODE(0x6603, "Document miss feed automatic in document feeder")
+
+SENSE_CODE(0x6700, "Configuration failure")
+SENSE_CODE(0x6701, "Configuration of incapable logical units failed")
+SENSE_CODE(0x6702, "Add logical unit failed")
+SENSE_CODE(0x6703, "Modification of logical unit failed")
+SENSE_CODE(0x6704, "Exchange of logical unit failed")
+SENSE_CODE(0x6705, "Remove of logical unit failed")
+SENSE_CODE(0x6706, "Attachment of logical unit failed")
+SENSE_CODE(0x6707, "Creation of logical unit failed")
+SENSE_CODE(0x6708, "Assign failure occurred")
+SENSE_CODE(0x6709, "Multiply assigned logical unit")
+SENSE_CODE(0x670A, "Set target port groups command failed")
+SENSE_CODE(0x670B, "ATA device feature not enabled")
+
+SENSE_CODE(0x6800, "Logical unit not configured")
+SENSE_CODE(0x6801, "Subsidiary logical unit not configured")
+
+SENSE_CODE(0x6900, "Data loss on logical unit")
+SENSE_CODE(0x6901, "Multiple logical unit failures")
+SENSE_CODE(0x6902, "Parity/data mismatch")
+
+SENSE_CODE(0x6A00, "Informational, refer to log")
+
+SENSE_CODE(0x6B00, "State change has occurred")
+SENSE_CODE(0x6B01, "Redundancy level got better")
+SENSE_CODE(0x6B02, "Redundancy level got worse")
+
+SENSE_CODE(0x6C00, "Rebuild failure occurred")
+
+SENSE_CODE(0x6D00, "Recalculate failure occurred")
+
+SENSE_CODE(0x6E00, "Command to logical unit failed")
+
+SENSE_CODE(0x6F00, "Copy protection key exchange failure - authentication failure")
+SENSE_CODE(0x6F01, "Copy protection key exchange failure - key not present")
+SENSE_CODE(0x6F02, "Copy protection key exchange failure - key not established")
+SENSE_CODE(0x6F03, "Read of scrambled sector without authentication")
+SENSE_CODE(0x6F04, "Media region code is mismatched to logical unit region")
+SENSE_CODE(0x6F05, "Drive region must be permanent/region reset count error")
+SENSE_CODE(0x6F06, "Insufficient block count for binding nonce recording")
+SENSE_CODE(0x6F07, "Conflict in binding nonce recording")
+/*
+ *	SENSE_CODE(0x70NN, "Decompression exception short algorithm id of nn")
+ */
+SENSE_CODE(0x7100, "Decompression exception long algorithm id")
+
+SENSE_CODE(0x7200, "Session fixation error")
+SENSE_CODE(0x7201, "Session fixation error writing lead-in")
+SENSE_CODE(0x7202, "Session fixation error writing lead-out")
+SENSE_CODE(0x7203, "Session fixation error - incomplete track in session")
+SENSE_CODE(0x7204, "Empty or partially written reserved track")
+SENSE_CODE(0x7205, "No more track reservations allowed")
+SENSE_CODE(0x7206, "RMZ extension is not allowed")
+SENSE_CODE(0x7207, "No more test zone extensions are allowed")
+
+SENSE_CODE(0x7300, "Cd control error")
+SENSE_CODE(0x7301, "Power calibration area almost full")
+SENSE_CODE(0x7302, "Power calibration area is full")
+SENSE_CODE(0x7303, "Power calibration area error")
+SENSE_CODE(0x7304, "Program memory area update failure")
+SENSE_CODE(0x7305, "Program memory area is full")
+SENSE_CODE(0x7306, "RMA/PMA is almost full")
+SENSE_CODE(0x7310, "Current power calibration area almost full")
+SENSE_CODE(0x7311, "Current power calibration area is full")
+SENSE_CODE(0x7317, "RDZ is full")
+
+SENSE_CODE(0x7400, "Security error")
+SENSE_CODE(0x7401, "Unable to decrypt data")
+SENSE_CODE(0x7402, "Unencrypted data encountered while decrypting")
+SENSE_CODE(0x7403, "Incorrect data encryption key")
+SENSE_CODE(0x7404, "Cryptographic integrity validation failed")
+SENSE_CODE(0x7405, "Error decrypting data")
+SENSE_CODE(0x7406, "Unknown signature verification key")
+SENSE_CODE(0x7407, "Encryption parameters not useable")
+SENSE_CODE(0x7408, "Digital signature validation failure")
+SENSE_CODE(0x7409, "Encryption mode mismatch on read")
+SENSE_CODE(0x740A, "Encrypted block not raw read enabled")
+SENSE_CODE(0x740B, "Incorrect Encryption parameters")
+SENSE_CODE(0x740C, "Unable to decrypt parameter list")
+SENSE_CODE(0x740D, "Encryption algorithm disabled")
+SENSE_CODE(0x7410, "SA creation parameter value invalid")
+SENSE_CODE(0x7411, "SA creation parameter value rejected")
+SENSE_CODE(0x7412, "Invalid SA usage")
+SENSE_CODE(0x7421, "Data Encryption configuration prevented")
+SENSE_CODE(0x7430, "SA creation parameter not supported")
+SENSE_CODE(0x7440, "Authentication failed")
+SENSE_CODE(0x7461, "External data encryption key manager access error")
+SENSE_CODE(0x7462, "External data encryption key manager error")
+SENSE_CODE(0x7463, "External data encryption key not found")
+SENSE_CODE(0x7464, "External data encryption request not authorized")
+SENSE_CODE(0x746E, "External data encryption control timeout")
+SENSE_CODE(0x746F, "External data encryption control error")
+SENSE_CODE(0x7471, "Logical unit access not authorized")
+SENSE_CODE(0x7479, "Security conflict in translated device")
diff --git a/drivers/scsi/snic/snic.h b/drivers/scsi/snic/snic.h
index d7f5ba6..8ed778d 100644
--- a/drivers/scsi/snic/snic.h
+++ b/drivers/scsi/snic/snic.h
@@ -95,6 +95,8 @@
 #define SNIC_DEV_RST_NOTSUP		BIT(25)
 #define SNIC_SCSI_CLEANUP		BIT(26)
 #define SNIC_HOST_RESET_ISSUED		BIT(27)
+#define SNIC_HOST_RESET_CMD_TERM	\
+	(SNIC_DEV_RST_NOTSUP | SNIC_SCSI_CLEANUP | SNIC_HOST_RESET_ISSUED)
 
 #define SNIC_ABTS_TIMEOUT		30000		/* msec */
 #define SNIC_LUN_RESET_TIMEOUT		30000		/* msec */
@@ -216,9 +218,10 @@
 	SNIC_MSIX_INTR_MAX,
 };
 
+#define SNIC_INTRHDLR_NAMSZ	(2 * IFNAMSIZ)
 struct snic_msix_entry {
 	int requested;
-	char devname[IFNAMSIZ];
+	char devname[SNIC_INTRHDLR_NAMSZ];
 	irqreturn_t (*isr)(int, void *);
 	void *devid;
 };
diff --git a/drivers/scsi/snic/snic_ctl.c b/drivers/scsi/snic/snic_ctl.c
index ab0e06b..449b03f 100644
--- a/drivers/scsi/snic/snic_ctl.c
+++ b/drivers/scsi/snic/snic_ctl.c
@@ -39,17 +39,15 @@
 {
 	struct snic *snic = container_of(work, struct snic, link_work);
 
-	if (snic->config.xpt_type != SNIC_DAS) {
-		SNIC_HOST_INFO(snic->shost, "Link Event Received.\n");
-		SNIC_ASSERT_NOT_IMPL(1);
-
+	if (snic->config.xpt_type == SNIC_DAS)
 		return;
-	}
 
 	snic->link_status = svnic_dev_link_status(snic->vdev);
 	snic->link_down_cnt = svnic_dev_link_down_cnt(snic->vdev);
 	SNIC_HOST_INFO(snic->shost, "Link Event: Link %s.\n",
 		       ((snic->link_status) ? "Up" : "Down"));
+
+	SNIC_ASSERT_NOT_IMPL(1);
 }
 
 
diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c
index 1686f01..d302803 100644
--- a/drivers/scsi/snic/snic_debugfs.c
+++ b/drivers/scsi/snic/snic_debugfs.c
@@ -264,12 +264,14 @@
 		   "Aborts Fail                 : %lld\n"
 		   "Aborts Driver Timeout       : %lld\n"
 		   "Abort FW Timeout            : %lld\n"
-		   "Abort IO NOT Found          : %lld\n",
+		   "Abort IO NOT Found          : %lld\n"
+		   "Abort Queuing Failed        : %lld\n",
 		   (u64) atomic64_read(&stats->abts.num),
 		   (u64) atomic64_read(&stats->abts.fail),
 		   (u64) atomic64_read(&stats->abts.drv_tmo),
 		   (u64) atomic64_read(&stats->abts.fw_tmo),
-		   (u64) atomic64_read(&stats->abts.io_not_found));
+		   (u64) atomic64_read(&stats->abts.io_not_found),
+		   (u64) atomic64_read(&stats->abts.q_fail));
 
 	/* Dump Reset Stats */
 	seq_printf(sfp,
@@ -316,7 +318,9 @@
 	seq_printf(sfp,
 		   "Last ISR Time               : %llu (%8lu.%8lu)\n"
 		   "Last Ack Time               : %llu (%8lu.%8lu)\n"
-		   "ISRs                        : %llu\n"
+		   "Ack ISRs                    : %llu\n"
+		   "IO Cmpl ISRs                : %llu\n"
+		   "Err Notify ISRs             : %llu\n"
 		   "Max CQ Entries              : %lld\n"
 		   "Data Count Mismatch         : %lld\n"
 		   "IOs w/ Timeout Status       : %lld\n"
@@ -324,12 +328,17 @@
 		   "IOs w/ SGL Invalid Stat     : %lld\n"
 		   "WQ Desc Alloc Fail          : %lld\n"
 		   "Queue Full                  : %lld\n"
+		   "Queue Ramp Up               : %lld\n"
+		   "Queue Ramp Down             : %lld\n"
+		   "Queue Last Queue Depth      : %lld\n"
 		   "Target Not Ready            : %lld\n",
 		   (u64) stats->misc.last_isr_time,
 		   last_isr_tms.tv_sec, last_isr_tms.tv_nsec,
 		   (u64)stats->misc.last_ack_time,
 		   last_ack_tms.tv_sec, last_ack_tms.tv_nsec,
-		   (u64) atomic64_read(&stats->misc.isr_cnt),
+		   (u64) atomic64_read(&stats->misc.ack_isr_cnt),
+		   (u64) atomic64_read(&stats->misc.cmpl_isr_cnt),
+		   (u64) atomic64_read(&stats->misc.errnotify_isr_cnt),
 		   (u64) atomic64_read(&stats->misc.max_cq_ents),
 		   (u64) atomic64_read(&stats->misc.data_cnt_mismat),
 		   (u64) atomic64_read(&stats->misc.io_tmo),
@@ -337,6 +346,9 @@
 		   (u64) atomic64_read(&stats->misc.sgl_inval),
 		   (u64) atomic64_read(&stats->misc.wq_alloc_fail),
 		   (u64) atomic64_read(&stats->misc.qfull),
+		   (u64) atomic64_read(&stats->misc.qsz_rampup),
+		   (u64) atomic64_read(&stats->misc.qsz_rampdown),
+		   (u64) atomic64_read(&stats->misc.last_qsz),
 		   (u64) atomic64_read(&stats->misc.tgt_not_rdy));
 
 	return 0;
diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
index 5f63217..b0fefd6 100644
--- a/drivers/scsi/snic/snic_disc.c
+++ b/drivers/scsi/snic/snic_disc.c
@@ -171,7 +171,7 @@
 			 tgt->channel,
 			 tgt->scsi_tgt_id,
 			 SCAN_WILD_CARD,
-			 1);
+			 SCSI_SCAN_RESCAN);
 
 	spin_lock_irqsave(shost->host_lock, flags);
 	tgt->flags &= ~SNIC_TGT_SCAN_PENDING;
@@ -480,10 +480,21 @@
 snic_disc_start(struct snic *snic)
 {
 	struct snic_disc *disc = &snic->disc;
+	unsigned long flags;
 	int ret = 0;
 
 	SNIC_SCSI_DBG(snic->shost, "Discovery Start.\n");
 
+	spin_lock_irqsave(&snic->snic_lock, flags);
+	if (snic->in_remove) {
+		spin_unlock_irqrestore(&snic->snic_lock, flags);
+		SNIC_ERR("snic driver removal in progress ...\n");
+		ret = 0;
+
+		return ret;
+	}
+	spin_unlock_irqrestore(&snic->snic_lock, flags);
+
 	mutex_lock(&disc->mutex);
 	if (disc->state == SNIC_DISC_PENDING) {
 		disc->req_cnt++;
@@ -533,6 +544,8 @@
 	struct list_head *cur, *nxt;
 	unsigned long flags;
 
+	scsi_flush_work(snic->shost);
+
 	mutex_lock(&snic->disc.mutex);
 	spin_lock_irqsave(snic->shost->host_lock, flags);
 
@@ -545,7 +558,7 @@
 		tgt = NULL;
 	}
 	spin_unlock_irqrestore(snic->shost->host_lock, flags);
-
-	scsi_flush_work(snic->shost);
 	mutex_unlock(&snic->disc.mutex);
+
+	flush_workqueue(snic_glob->event_q);
 } /* end of snic_tgt_del_all */
diff --git a/drivers/scsi/snic/snic_fwint.h b/drivers/scsi/snic/snic_fwint.h
index 2cfaf2d..c5f9e19 100644
--- a/drivers/scsi/snic/snic_fwint.h
+++ b/drivers/scsi/snic/snic_fwint.h
@@ -414,7 +414,7 @@
 /* Payload 88 bytes = 128 - 24 - 16 */
 #define SNIC_HOST_REQ_PAYLOAD	((int)(SNIC_HOST_REQ_LEN -		\
 					sizeof(struct snic_io_hdr) -	\
-					(2 * sizeof(u64))))
+					(2 * sizeof(u64)) - sizeof(ulong)))
 
 /*
  * snic_host_req: host -> firmware request
@@ -448,6 +448,8 @@
 		/* hba reset */
 		struct snic_hba_reset		reset;
 	} u;
+
+	ulong req_pa;
 }; /* end of snic_host_req structure */
 
 
diff --git a/drivers/scsi/snic/snic_io.c b/drivers/scsi/snic/snic_io.c
index 993db7d..8e69548 100644
--- a/drivers/scsi/snic/snic_io.c
+++ b/drivers/scsi/snic/snic_io.c
@@ -48,7 +48,7 @@
 	SNIC_TRC(snic->shost->host_no, 0, 0,
 		 ((ulong)(buf->os_buf) - sizeof(struct snic_req_info)), 0, 0,
 		 0);
-	pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE);
+
 	buf->os_buf = NULL;
 }
 
@@ -137,13 +137,36 @@
 	return 0;
 }
 
+static int
+snic_wqdesc_avail(struct snic *snic, int q_num, int req_type)
+{
+	int nr_wqdesc = snic->config.wq_enet_desc_count;
+
+	if (q_num > 0) {
+		/*
+		 * Multi Queue case, additional care is required.
+		 * Per WQ active requests need to be maintained.
+		 */
+		SNIC_HOST_INFO(snic->shost, "desc_avail: Multi Queue case.\n");
+		SNIC_BUG_ON(q_num > 0);
+
+		return -1;
+	}
+
+	nr_wqdesc -= atomic64_read(&snic->s_stats.fw.actv_reqs);
+
+	return ((req_type == SNIC_REQ_HBA_RESET) ? nr_wqdesc : nr_wqdesc - 1);
+}
+
 int
 snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
 {
 	dma_addr_t pa = 0;
 	unsigned long flags;
 	struct snic_fw_stats *fwstats = &snic->s_stats.fw;
+	struct snic_host_req *req = (struct snic_host_req *) os_buf;
 	long act_reqs;
+	long desc_avail = 0;
 	int q_num = 0;
 
 	snic_print_desc(__func__, os_buf, len);
@@ -156,11 +179,15 @@
 		return -ENOMEM;
 	}
 
+	req->req_pa = (ulong)pa;
+
 	q_num = snic_select_wq(snic);
 
 	spin_lock_irqsave(&snic->wq_lock[q_num], flags);
-	if (!svnic_wq_desc_avail(snic->wq)) {
+	desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type);
+	if (desc_avail <= 0) {
 		pci_unmap_single(snic->pdev, pa, len, PCI_DMA_TODEVICE);
+		req->req_pa = 0;
 		spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
 		atomic64_inc(&snic->s_stats.misc.wq_alloc_fail);
 		SNIC_DBG("host = %d, WQ is Full\n", snic->shost->host_no);
@@ -169,10 +196,13 @@
 	}
 
 	snic_queue_wq_eth_desc(&snic->wq[q_num], os_buf, pa, len, 0, 0, 1);
+	/*
+	 * Update stats
+	 * note: when multi queue enabled, fw actv_reqs should be per queue.
+	 */
+	act_reqs = atomic64_inc_return(&fwstats->actv_reqs);
 	spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
 
-	/* Update stats */
-	act_reqs = atomic64_inc_return(&fwstats->actv_reqs);
 	if (act_reqs > atomic64_read(&fwstats->max_actv_reqs))
 		atomic64_set(&fwstats->max_actv_reqs, act_reqs);
 
@@ -318,11 +348,31 @@
 		      "Req_free:rqi %p:ioreq %p:abt %p:dr %p\n",
 		      rqi, rqi->req, rqi->abort_req, rqi->dr_req);
 
-	if (rqi->abort_req)
-		mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
+	if (rqi->abort_req) {
+		if (rqi->abort_req->req_pa)
+			pci_unmap_single(snic->pdev,
+					 rqi->abort_req->req_pa,
+					 sizeof(struct snic_host_req),
+					 PCI_DMA_TODEVICE);
 
-	if (rqi->dr_req)
+		mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
+	}
+
+	if (rqi->dr_req) {
+		if (rqi->dr_req->req_pa)
+			pci_unmap_single(snic->pdev,
+					 rqi->dr_req->req_pa,
+					 sizeof(struct snic_host_req),
+					 PCI_DMA_TODEVICE);
+
 		mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
+	}
+
+	if (rqi->req->req_pa)
+		pci_unmap_single(snic->pdev,
+				 rqi->req->req_pa,
+				 rqi->req_len,
+				 PCI_DMA_TODEVICE);
 
 	mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]);
 }
diff --git a/drivers/scsi/snic/snic_isr.c b/drivers/scsi/snic/snic_isr.c
index a85fae2..f552003 100644
--- a/drivers/scsi/snic/snic_isr.c
+++ b/drivers/scsi/snic/snic_isr.c
@@ -38,7 +38,7 @@
 	unsigned long wq_work_done = 0;
 
 	snic->s_stats.misc.last_isr_time = jiffies;
-	atomic64_inc(&snic->s_stats.misc.isr_cnt);
+	atomic64_inc(&snic->s_stats.misc.ack_isr_cnt);
 
 	wq_work_done = snic_wq_cmpl_handler(snic, -1);
 	svnic_intr_return_credits(&snic->intr[SNIC_MSIX_WQ],
@@ -56,7 +56,7 @@
 	unsigned long iocmpl_work_done = 0;
 
 	snic->s_stats.misc.last_isr_time = jiffies;
-	atomic64_inc(&snic->s_stats.misc.isr_cnt);
+	atomic64_inc(&snic->s_stats.misc.cmpl_isr_cnt);
 
 	iocmpl_work_done = snic_fwcq_cmpl_handler(snic, -1);
 	svnic_intr_return_credits(&snic->intr[SNIC_MSIX_IO_CMPL],
@@ -73,7 +73,7 @@
 	struct snic *snic = data;
 
 	snic->s_stats.misc.last_isr_time = jiffies;
-	atomic64_inc(&snic->s_stats.misc.isr_cnt);
+	atomic64_inc(&snic->s_stats.misc.errnotify_isr_cnt);
 
 	svnic_intr_return_all_credits(&snic->intr[SNIC_MSIX_ERR_NOTIFY]);
 	snic_log_q_error(snic);
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index 2b3c253..396b32d 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -98,11 +98,18 @@
 static int
 snic_change_queue_depth(struct scsi_device *sdev, int qdepth)
 {
+	struct snic *snic = shost_priv(sdev->host);
 	int qsz = 0;
 
 	qsz = min_t(u32, qdepth, SNIC_MAX_QUEUE_DEPTH);
+	if (qsz < sdev->queue_depth)
+		atomic64_inc(&snic->s_stats.misc.qsz_rampdown);
+	else if (qsz > sdev->queue_depth)
+		atomic64_inc(&snic->s_stats.misc.qsz_rampup);
+
+	atomic64_set(&snic->s_stats.misc.last_qsz, sdev->queue_depth);
+
 	scsi_change_queue_depth(sdev, qsz);
-	SNIC_INFO("QDepth Changed to %d\n", sdev->queue_depth);
 
 	return sdev->queue_depth;
 }
@@ -624,19 +631,6 @@
 		goto err_free_tmreq_pool;
 	}
 
-	/*
-	 * Initialization done with PCI system, hardware, firmware.
-	 * Add shost to SCSI
-	 */
-	ret = snic_add_host(shost, pdev);
-	if (ret) {
-		SNIC_HOST_ERR(shost,
-			      "Adding scsi host Failed ... exiting. %d\n",
-			      ret);
-
-		goto err_notify_unset;
-	}
-
 	spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
 	list_add_tail(&snic->list, &snic_glob->snic_list);
 	spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
@@ -669,8 +663,6 @@
 	for (i = 0; i < snic->intr_count; i++)
 		svnic_intr_unmask(&snic->intr[i]);
 
-	snic_set_state(snic, SNIC_ONLINE);
-
 	/* Get snic params */
 	ret = snic_get_conf(snic);
 	if (ret) {
@@ -681,6 +673,21 @@
 		goto err_get_conf;
 	}
 
+	/*
+	 * Initialization done with PCI system, hardware, firmware.
+	 * Add shost to SCSI
+	 */
+	ret = snic_add_host(shost, pdev);
+	if (ret) {
+		SNIC_HOST_ERR(shost,
+			      "Adding scsi host Failed ... exiting. %d\n",
+			      ret);
+
+		goto err_get_conf;
+	}
+
+	snic_set_state(snic, SNIC_ONLINE);
+
 	ret = snic_disc_start(snic);
 	if (ret) {
 		SNIC_HOST_ERR(shost, "snic_probe:Discovery Failed w err = %d\n",
@@ -705,6 +712,8 @@
 	svnic_dev_disable(snic->vdev);
 
 err_vdev_enable:
+	svnic_dev_notify_unset(snic->vdev);
+
 	for (i = 0; i < snic->wq_count; i++) {
 		int rc = 0;
 
@@ -718,9 +727,6 @@
 	}
 	snic_del_host(snic->shost);
 
-err_notify_unset:
-	svnic_dev_notify_unset(snic->vdev);
-
 err_free_tmreq_pool:
 	mempool_destroy(snic->req_pool[SNIC_REQ_TM_CACHE]);
 
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
index 2c7b4c3..abada16 100644
--- a/drivers/scsi/snic/snic_scsi.c
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -221,11 +221,15 @@
 			pa, /* sense buffer pa */
 			SCSI_SENSE_BUFFERSIZE);
 
+	atomic64_inc(&snic->s_stats.io.active);
 	ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
-	if (ret)
+	if (ret) {
+		atomic64_dec(&snic->s_stats.io.active);
 		SNIC_HOST_ERR(snic->shost,
 			      "QIcmnd: Queuing Icmnd Failed. ret = %d\n",
 			      ret);
+	} else
+		snic_stats_update_active_ios(&snic->s_stats);
 
 	return ret;
 } /* end of snic_queue_icmnd_req */
@@ -361,8 +365,7 @@
 	if (ret) {
 		SNIC_HOST_ERR(shost, "Failed to Q, Scsi Req w/ err %d.\n", ret);
 		ret = SCSI_MLQUEUE_HOST_BUSY;
-	} else
-		snic_stats_update_active_ios(&snic->s_stats);
+	}
 
 	atomic_dec(&snic->ios_inflight);
 
@@ -598,6 +601,12 @@
 		      sc->device->lun, sc, sc->cmnd[0], snic_cmd_tag(sc),
 		      CMD_FLAGS(sc), rqi);
 
+	if (CMD_FLAGS(sc) & SNIC_HOST_RESET_CMD_TERM) {
+		spin_unlock_irqrestore(io_lock, flags);
+
+		return;
+	}
+
 	SNIC_BUG_ON(rqi != (struct snic_req_info *)ctx);
 	WARN_ON_ONCE(req);
 	if (!rqi) {
@@ -779,6 +788,11 @@
 
 	io_lock = snic_io_lock_hash(snic, sc);
 	spin_lock_irqsave(io_lock, flags);
+	if (CMD_FLAGS(sc) & SNIC_HOST_RESET_CMD_TERM) {
+		spin_unlock_irqrestore(io_lock, flags);
+
+		return ret;
+	}
 	rqi = (struct snic_req_info *) CMD_SP(sc);
 	WARN_ON_ONCE(!rqi);
 
@@ -1001,10 +1015,11 @@
 	unsigned long flags, gflags;
 	int ret = 0;
 
-	SNIC_HOST_INFO(snic->shost,
-		       "reset_cmpl:HBA Reset Completion received.\n");
-
 	snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
+	SNIC_HOST_INFO(snic->shost,
+		       "reset_cmpl:Tag %d ctx %lx cmpl status %s HBA Reset Completion received.\n",
+		       cmnd_id, ctx, snic_io_status_to_str(hdr_stat));
+
 	SNIC_SCSI_DBG(snic->shost,
 		      "reset_cmpl: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x, ctx = %lx\n",
 		      typ, hdr_stat, cmnd_id, hid, ctx);
@@ -1012,6 +1027,9 @@
 	/* spl case, host reset issued through ioctl */
 	if (cmnd_id == SCSI_NO_TAG) {
 		rqi = (struct snic_req_info *) ctx;
+		SNIC_HOST_INFO(snic->shost,
+			       "reset_cmpl:Tag %d ctx %lx cmpl stat %s\n",
+			       cmnd_id, ctx, snic_io_status_to_str(hdr_stat));
 		sc = rqi->sc;
 
 		goto ioctl_hba_rst;
@@ -1038,6 +1056,10 @@
 		return ret;
 	}
 
+	SNIC_HOST_INFO(snic->shost,
+		       "reset_cmpl: sc %p rqi %p Tag %d flags 0x%llx\n",
+		       sc, rqi, cmnd_id, CMD_FLAGS(sc));
+
 	io_lock = snic_io_lock_hash(snic, sc);
 	spin_lock_irqsave(io_lock, flags);
 
@@ -1454,11 +1476,19 @@
 	case SNIC_STAT_IO_SUCCESS:
 	case SNIC_STAT_IO_NOT_FOUND:
 		ret = SUCCESS;
+		/*
+		 * If abort path doesn't call scsi_done(),
+		 * the # IO timeouts == 2, will cause the LUN offline.
+		 * Call scsi_done to complete the IO.
+		 */
+		sc->result = (DID_ERROR << 16);
+		sc->scsi_done(sc);
 		break;
 
 	default:
 		/* Firmware completed abort with error */
 		ret = FAILED;
+		rqi = NULL;
 		break;
 	}
 
@@ -1554,6 +1584,7 @@
 	/* Now Queue the abort command to firmware */
 	ret = snic_queue_abort_req(snic, rqi, sc, tmf);
 	if (ret) {
+		atomic64_inc(&snic->s_stats.abts.q_fail);
 		SNIC_HOST_ERR(snic->shost,
 			      "send_abt_cmd: IO w/ Tag 0x%x fail w/ err %d flags 0x%llx\n",
 			      tag, ret, CMD_FLAGS(sc));
@@ -1830,6 +1861,9 @@
 
 	snic_release_req_buf(snic, rqi, sc);
 
+	sc->result = (DID_ERROR << 16);
+	sc->scsi_done(sc);
+
 	ret = 0;
 
 	return ret;
@@ -2384,6 +2418,13 @@
 		      "Completing Pending TM Req sc %p, state %s flags 0x%llx\n",
 		      sc, snic_io_status_to_str(CMD_STATE(sc)), CMD_FLAGS(sc));
 
+	/*
+	 * CASE : FW didn't post itmf completion due to PCIe Errors.
+	 * Marking the abort status as Success to call scsi completion
+	 * in snic_abort_finish()
+	 */
+	CMD_ABTS_STATUS(sc) = SNIC_STAT_IO_SUCCESS;
+
 	rqi = (struct snic_req_info *) CMD_SP(sc);
 	if (!rqi)
 		return;
@@ -2459,8 +2500,9 @@
 cleanup:
 		sc->result = DID_TRANSPORT_DISRUPTED << 16;
 		SNIC_HOST_INFO(snic->shost,
-			       "sc_clean: DID_TRANSPORT_DISRUPTED for sc %p. rqi %p duration %llu msecs\n",
-			       sc, rqi, (jiffies - st_time));
+			       "sc_clean: DID_TRANSPORT_DISRUPTED for sc %p, Tag %d flags 0x%llx rqi %p duration %u msecs\n",
+			       sc, sc->request->tag, CMD_FLAGS(sc), rqi,
+			       jiffies_to_msecs(jiffies - st_time));
 
 		/* Update IO stats */
 		snic_stats_update_io_cmpl(&snic->s_stats);
diff --git a/drivers/scsi/snic/snic_stats.h b/drivers/scsi/snic/snic_stats.h
index 11e6148..fd1066b 100644
--- a/drivers/scsi/snic/snic_stats.h
+++ b/drivers/scsi/snic/snic_stats.h
@@ -42,6 +42,7 @@
 	atomic64_t drv_tmo;	/* Abort Driver Timeouts */
 	atomic64_t fw_tmo;	/* Abort Firmware Timeouts */
 	atomic64_t io_not_found;/* Abort IO Not Found */
+	atomic64_t q_fail;	/* Abort Queuing Failed */
 };
 
 struct snic_reset_stats {
@@ -69,7 +70,9 @@
 struct snic_misc_stats {
 	u64	last_isr_time;
 	u64	last_ack_time;
-	atomic64_t isr_cnt;
+	atomic64_t ack_isr_cnt;
+	atomic64_t cmpl_isr_cnt;
+	atomic64_t errnotify_isr_cnt;
 	atomic64_t max_cq_ents;		/* Max CQ Entries */
 	atomic64_t data_cnt_mismat;	/* Data Count Mismatch */
 	atomic64_t io_tmo;
@@ -81,6 +84,9 @@
 	atomic64_t no_icmnd_itmf_cmpls;
 	atomic64_t io_under_run;
 	atomic64_t qfull;
+	atomic64_t qsz_rampup;
+	atomic64_t qsz_rampdown;
+	atomic64_t last_qsz;
 	atomic64_t tgt_not_rdy;
 };
 
@@ -101,9 +107,9 @@
 snic_stats_update_active_ios(struct snic_stats *s_stats)
 {
 	struct snic_io_stats *io = &s_stats->io;
-	u32 nr_active_ios;
+	int nr_active_ios;
 
-	nr_active_ios = atomic64_inc_return(&io->active);
+	nr_active_ios = atomic64_read(&io->active);
 	if (atomic64_read(&io->max_active) < nr_active_ios)
 		atomic64_set(&io->max_active, nr_active_ios);
 
diff --git a/drivers/scsi/snic/vnic_dev.c b/drivers/scsi/snic/vnic_dev.c
index e0b5549..dad5fc6 100644
--- a/drivers/scsi/snic/vnic_dev.c
+++ b/drivers/scsi/snic/vnic_dev.c
@@ -263,12 +263,20 @@
 	int wait)
 {
 	struct devcmd2_controller *dc2c = vdev->devcmd2;
-	struct devcmd2_result *result = dc2c->result + dc2c->next_result;
+	struct devcmd2_result *result = NULL;
 	unsigned int i;
 	int delay;
 	int err;
 	u32 posted;
+	u32 fetch_idx;
 	u32 new_posted;
+	u8 color;
+
+	fetch_idx = ioread32(&dc2c->wq_ctrl->fetch_index);
+	if (fetch_idx == 0xFFFFFFFF) { /* check for hardware gone  */
+		/* Hardware surprise removal: return error */
+		return -ENODEV;
+	}
 
 	posted = ioread32(&dc2c->wq_ctrl->posted_index);
 
@@ -278,6 +286,13 @@
 	}
 
 	new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
+	if (new_posted == fetch_idx) {
+		pr_err("%s: wq is full while issuing devcmd2 command %d, fetch index: %u, posted index: %u\n",
+			pci_name(vdev->pdev), _CMD_N(cmd), fetch_idx, posted);
+
+		return -EBUSY;
+	}
+
 	dc2c->cmd_ring[posted].cmd = cmd;
 	dc2c->cmd_ring[posted].flags = 0;
 
@@ -299,14 +314,22 @@
 	if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
 		return 0;
 
+	result = dc2c->result + dc2c->next_result;
+	color = dc2c->color;
+
+	/*
+	 * Increment next_result, after posting the devcmd, irrespective of
+	 * devcmd result, and it should be done only once.
+	 */
+	dc2c->next_result++;
+	if (dc2c->next_result == dc2c->result_size) {
+		dc2c->next_result = 0;
+		dc2c->color = dc2c->color ? 0 : 1;
+	}
+
 	for (delay = 0; delay < wait; delay++) {
 		udelay(100);
-		if (result->color == dc2c->color) {
-			dc2c->next_result++;
-			if (dc2c->next_result == dc2c->result_size) {
-				dc2c->next_result = 0;
-				dc2c->color = dc2c->color ? 0 : 1;
-			}
+		if (result->color == color) {
 			if (result->error) {
 				err = (int) result->error;
 				if (err != ERR_ECMDUNKNOWN ||
@@ -317,13 +340,6 @@
 				return err;
 			}
 			if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
-				/*
-				 * Adding the rmb() prevents the compiler
-				 * and/or CPU from reordering the reads which
-				 * would potentially result in reading stale
-				 * values.
-				 */
-				rmb();
 				for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
 					vdev->args[i] = result->results[i];
 			}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 71c5138..7af5226 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -1974,9 +1974,12 @@
 					transfer = (int)cmdstatp->uremainder64;
 				else
 					transfer = 0;
-				if (STp->block_size == 0 &&
-				    cmdstatp->sense_hdr.sense_key == MEDIUM_ERROR)
-					transfer = bytes;
+				if (cmdstatp->sense_hdr.sense_key == MEDIUM_ERROR) {
+					if (STp->block_size == 0)
+						transfer = bytes;
+					/* Some drives set ILI with MEDIUM ERROR */
+					cmdstatp->flags &= ~SENSE_ILI;
+				}
 
 				if (cmdstatp->flags & SENSE_ILI) {	/* ILI */
 					if (STp->block_size == 0 &&
@@ -4941,7 +4944,7 @@
  out_unmap:
 	if (res > 0) {
 		for (j=0; j < res; j++)
-			page_cache_release(pages[j]);
+			put_page(pages[j]);
 		res = 0;
 	}
 	kfree(pages);
@@ -4963,7 +4966,7 @@
 		/* FIXME: cache flush missing for rw==READ
 		 * FIXME: call the correct reference counting function
 		 */
-		page_cache_release(page);
+		put_page(page);
 	}
 	kfree(STbp->mapped_pages);
 	STbp->mapped_pages = NULL;
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index b9de487..3c4c070 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -36,14 +36,10 @@
 #include <scsi/scsi_host.h>
 #include "sun3_scsi.h"
 
-/* Definitions for the core NCR5380 driver. */
-
-#define REAL_DMA
-/* #define SUPPORT_TAGS */
 /* minimum number of bytes to do dma on */
 #define DMA_MIN_SIZE                    129
 
-/* #define MAX_TAGS                     32 */
+/* Definitions for the core NCR5380 driver. */
 
 #define NCR5380_implementation_fields   /* none */
 
@@ -55,14 +51,12 @@
 #define NCR5380_abort                   sun3scsi_abort
 #define NCR5380_info                    sun3scsi_info
 
-#define NCR5380_dma_read_setup(instance, data, count) \
-        sun3scsi_dma_setup(instance, data, count, 0)
-#define NCR5380_dma_write_setup(instance, data, count) \
-        sun3scsi_dma_setup(instance, data, count, 1)
+#define NCR5380_dma_recv_setup(instance, data, count) (count)
+#define NCR5380_dma_send_setup(instance, data, count) (count)
 #define NCR5380_dma_residual(instance) \
         sun3scsi_dma_residual(instance)
 #define NCR5380_dma_xfer_len(instance, cmd, phase) \
-        sun3scsi_dma_xfer_len(cmd->SCp.this_residual, cmd, !((phase) & SR_IO))
+        sun3scsi_dma_xfer_len(cmd->SCp.this_residual, cmd)
 
 #define NCR5380_acquire_dma_irq(instance)    (1)
 #define NCR5380_release_dma_irq(instance)
@@ -78,10 +72,6 @@
 module_param(setup_cmd_per_lun, int, 0);
 static int setup_sg_tablesize = -1;
 module_param(setup_sg_tablesize, int, 0);
-#ifdef SUPPORT_TAGS
-static int setup_use_tagged_queuing = -1;
-module_param(setup_use_tagged_queuing, int, 0);
-#endif
 static int setup_hostid = -1;
 module_param(setup_hostid, int, 0);
 
@@ -263,14 +253,13 @@
 	return last_residual;
 }
 
-static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted,
-						  struct scsi_cmnd *cmd,
-						  int write_flag)
+static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted_len,
+                                                  struct scsi_cmnd *cmd)
 {
-	if (cmd->request->cmd_type == REQ_TYPE_FS)
- 		return wanted;
-	else
+	if (wanted_len < DMA_MIN_SIZE || cmd->request->cmd_type != REQ_TYPE_FS)
 		return 0;
+
+	return wanted_len;
 }
 
 static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data)
@@ -408,7 +397,7 @@
 
 }
 	
-#include "atari_NCR5380.c"
+#include "NCR5380.c"
 
 #ifdef SUN3_SCSI_VME
 #define SUN3_SCSI_NAME          "Sun3 NCR5380 VME SCSI"
@@ -516,10 +505,6 @@
 	instance->io_port = (unsigned long)ioaddr;
 	instance->irq = irq->start;
 
-#ifdef SUPPORT_TAGS
-	host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0;
-#endif
-
 	error = NCR5380_init(instance, host_flags);
 	if (error)
 		goto fail_init;
@@ -527,15 +512,9 @@
 	error = request_irq(instance->irq, scsi_sun3_intr, 0,
 	                    "NCR5380", instance);
 	if (error) {
-#ifdef REAL_DMA
 		pr_err(PFX "scsi%d: IRQ %d not free, bailing out\n",
 		       instance->host_no, instance->irq);
 		goto fail_irq;
-#else
-		pr_warn(PFX "scsi%d: IRQ %d not free, interrupts disabled\n",
-		        instance->host_no, instance->irq);
-		instance->irq = NO_IRQ;
-#endif
 	}
 
 	dregs->csr = 0;
@@ -565,8 +544,7 @@
 	return 0;
 
 fail_host:
-	if (instance->irq != NO_IRQ)
-		free_irq(instance->irq, instance);
+	free_irq(instance->irq, instance);
 fail_irq:
 	NCR5380_exit(instance);
 fail_init:
@@ -583,8 +561,7 @@
 	struct Scsi_Host *instance = platform_get_drvdata(pdev);
 
 	scsi_remove_host(instance);
-	if (instance->irq != NO_IRQ)
-		free_irq(instance->irq, instance);
+	free_irq(instance->irq, instance);
 	NCR5380_exit(instance);
 	scsi_host_put(instance);
 	if (udc_regs)
diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c
index 4615fda..8a8608a 100644
--- a/drivers/scsi/t128.c
+++ b/drivers/scsi/t128.c
@@ -1,5 +1,3 @@
-#define PSEUDO_DMA
-
 /*
  * Trantor T128/T128F/T228 driver
  *	Note : architecturally, the T100 and T130 are different and won't 
@@ -76,7 +74,6 @@
 
 #include <scsi/scsi_host.h>
 #include "t128.h"
-#define AUTOPROBE_IRQ
 #include "NCR5380.h"
 
 static struct override {
@@ -210,7 +207,7 @@
 	instance->base = base;
 	((struct NCR5380_hostdata *)instance->hostdata)->base = p;
 
-	if (NCR5380_init(instance, 0))
+	if (NCR5380_init(instance, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP))
 		goto out_unregister;
 
 	NCR5380_maybe_reset_bus(instance);
@@ -294,7 +291,7 @@
 }
 
 /*
- * Function : int NCR5380_pread (struct Scsi_Host *instance, 
+ * Function : int t128_pread (struct Scsi_Host *instance,
  *	unsigned char *dst, int len)
  *
  * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to 
@@ -306,8 +303,8 @@
  * 	timeout.
  */
 
-static inline int
-NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len)
+static inline int t128_pread(struct Scsi_Host *instance,
+                             unsigned char *dst, int len)
 {
 	struct NCR5380_hostdata *hostdata = shost_priv(instance);
 	void __iomem *reg, *base = hostdata->base;
@@ -340,7 +337,7 @@
 }
 
 /*
- * Function : int NCR5380_pwrite (struct Scsi_Host *instance, 
+ * Function : int t128_pwrite (struct Scsi_Host *instance,
  *	unsigned char *src, int len)
  *
  * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from
@@ -352,8 +349,8 @@
  * 	timeout.
  */
 
-static inline int
-NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, int len)
+static inline int t128_pwrite(struct Scsi_Host *instance,
+                              unsigned char *src, int len)
 {
 	struct NCR5380_hostdata *hostdata = shost_priv(instance);
 	void __iomem *reg, *base = hostdata->base;
@@ -394,8 +391,6 @@
 	.detect			= t128_detect,
 	.release		= t128_release,
 	.proc_name		= "t128",
-	.show_info		= t128_show_info,
-	.write_info		= t128_write_info,
 	.info			= t128_info,
 	.queuecommand		= t128_queue_command,
 	.eh_abort_handler	= t128_abort,
diff --git a/drivers/scsi/t128.h b/drivers/scsi/t128.h
index dd16d85..c95bcd8 100644
--- a/drivers/scsi/t128.h
+++ b/drivers/scsi/t128.h
@@ -77,14 +77,17 @@
 #define NCR5380_write(reg, value) writeb((value),(T128_address(reg)))
 
 #define NCR5380_dma_xfer_len(instance, cmd, phase)	(cmd->transfersize)
+#define NCR5380_dma_recv_setup		t128_pread
+#define NCR5380_dma_send_setup		t128_pwrite
+#define NCR5380_dma_residual(instance)	(0)
 
 #define NCR5380_intr t128_intr
 #define NCR5380_queue_command t128_queue_command
 #define NCR5380_abort t128_abort
 #define NCR5380_bus_reset t128_bus_reset
 #define NCR5380_info t128_info
-#define NCR5380_show_info t128_show_info
-#define NCR5380_write_info t128_write_info
+
+#define NCR5380_io_delay(x)		udelay(x)
 
 /* 15 14 12 10 7 5 3
    1101 0100 1010 1000 */
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 5ade713..380230f 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -9,7 +9,8 @@
 obj-y				+= fsl/
 obj-$(CONFIG_ARCH_MEDIATEK)	+= mediatek/
 obj-$(CONFIG_ARCH_QCOM)		+= qcom/
-obj-$(CONFIG_ARCH_ROCKCHIP)		+= rockchip/
+obj-$(CONFIG_ARCH_RENESAS)	+= renesas/
+obj-$(CONFIG_ARCH_ROCKCHIP)	+= rockchip/
 obj-$(CONFIG_SOC_SAMSUNG)	+= samsung/
 obj-$(CONFIG_ARCH_SUNXI)	+= sunxi/
 obj-$(CONFIG_ARCH_TEGRA)	+= tegra/
diff --git a/drivers/soc/brcmstb/Kconfig b/drivers/soc/brcmstb/Kconfig
index 39cab3b..7fec3b4 100644
--- a/drivers/soc/brcmstb/Kconfig
+++ b/drivers/soc/brcmstb/Kconfig
@@ -1,6 +1,7 @@
 menuconfig SOC_BRCMSTB
 	bool "Broadcom STB SoC drivers"
 	depends on ARM
+	select SOC_BUS
 	help
 	  Enables drivers for the Broadcom Set-Top Box (STB) series of chips.
 	  This option alone enables only some support code, while the drivers
diff --git a/drivers/soc/brcmstb/common.c b/drivers/soc/brcmstb/common.c
index c262c02..94e7335 100644
--- a/drivers/soc/brcmstb/common.c
+++ b/drivers/soc/brcmstb/common.c
@@ -12,10 +12,18 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/io.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/soc/brcmstb/brcmstb.h>
+#include <linux/sys_soc.h>
 
 #include <soc/brcmstb/common.h>
 
+static u32 family_id;
+static u32 product_id;
+
 static const struct of_device_id brcmstb_machine_match[] = {
 	{ .compatible = "brcm,brcmstb", },
 	{ }
@@ -31,3 +39,61 @@
 
 	return of_match_node(brcmstb_machine_match, root) != NULL;
 }
+
+static const struct of_device_id sun_top_ctrl_match[] = {
+	{ .compatible = "brcm,brcmstb-sun-top-ctrl", },
+	{ }
+};
+
+static int __init brcmstb_soc_device_init(void)
+{
+	struct soc_device_attribute *soc_dev_attr;
+	struct soc_device *soc_dev;
+	struct device_node *sun_top_ctrl;
+	void __iomem *sun_top_ctrl_base;
+	int ret = 0;
+
+	sun_top_ctrl = of_find_matching_node(NULL, sun_top_ctrl_match);
+	if (!sun_top_ctrl)
+		return -ENODEV;
+
+	sun_top_ctrl_base = of_iomap(sun_top_ctrl, 0);
+	if (!sun_top_ctrl_base)
+		return -ENODEV;
+
+	family_id = readl(sun_top_ctrl_base);
+	product_id = readl(sun_top_ctrl_base + 0x4);
+
+	soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+	if (!soc_dev_attr) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	soc_dev_attr->family = kasprintf(GFP_KERNEL, "%x",
+					 family_id >> 28 ?
+					 family_id >> 16 : family_id >> 8);
+	soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "%x",
+					 product_id >> 28 ?
+					 product_id >> 16 : product_id >> 8);
+	soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%c%d",
+					 ((product_id & 0xf0) >> 4) + 'A',
+					   product_id & 0xf);
+
+	soc_dev = soc_device_register(soc_dev_attr);
+	if (IS_ERR(soc_dev)) {
+		kfree(soc_dev_attr->family);
+		kfree(soc_dev_attr->soc_id);
+		kfree(soc_dev_attr->revision);
+		kfree(soc_dev_attr);
+		ret = -ENODEV;
+		goto out;
+	}
+
+	return 0;
+
+out:
+	iounmap(sun_top_ctrl_base);
+	return ret;
+}
+arch_initcall(brcmstb_soc_device_init);
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
index 6584571..333eb22 100644
--- a/drivers/soc/fsl/qe/gpio.c
+++ b/drivers/soc/fsl/qe/gpio.c
@@ -18,6 +18,8 @@
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of_gpio.h>
+#include <linux/gpio/driver.h>
+/* FIXME: needed for gpio_to_chip() get rid of this */
 #include <linux/gpio.h>
 #include <linux/slab.h>
 #include <linux/export.h>
@@ -37,15 +39,9 @@
 	struct qe_pio_regs saved_regs;
 };
 
-static inline struct qe_gpio_chip *
-to_qe_gpio_chip(struct of_mm_gpio_chip *mm_gc)
-{
-	return container_of(mm_gc, struct qe_gpio_chip, mm_gc);
-}
-
 static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
 {
-	struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
+	struct qe_gpio_chip *qe_gc = gpiochip_get_data(&mm_gc->gc);
 	struct qe_pio_regs __iomem *regs = mm_gc->regs;
 
 	qe_gc->cpdata = in_be32(&regs->cpdata);
@@ -69,7 +65,7 @@
 static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
 {
 	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
-	struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
+	struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
 	struct qe_pio_regs __iomem *regs = mm_gc->regs;
 	unsigned long flags;
 	u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
@@ -89,7 +85,7 @@
 static int qe_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
 {
 	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
-	struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
+	struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
 	unsigned long flags;
 
 	spin_lock_irqsave(&qe_gc->lock, flags);
@@ -104,7 +100,7 @@
 static int qe_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
 {
 	struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
-	struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
+	struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
 	unsigned long flags;
 
 	qe_gpio_set(gc, gpio, val);
@@ -165,7 +161,7 @@
 	}
 
 	mm_gc = to_of_mm_gpio_chip(gc);
-	qe_gc = to_qe_gpio_chip(mm_gc);
+	qe_gc = gpiochip_get_data(gc);
 
 	spin_lock_irqsave(&qe_gc->lock, flags);
 
@@ -302,7 +298,7 @@
 		gc->get = qe_gpio_get;
 		gc->set = qe_gpio_set;
 
-		ret = of_mm_gpiochip_add(np, mm_gc);
+		ret = of_mm_gpiochip_add_data(np, mm_gc, qe_gc);
 		if (ret)
 			goto err;
 		continue;
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index 0d9b19a..3c3e56d 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -52,6 +52,7 @@
 #define PWRAP_DEW_WRITE_TEST_VAL	0xa55a
 
 /* macro for manual command */
+#define PWRAP_MAN_CMD_SPI_WRITE_NEW	(1 << 14)
 #define PWRAP_MAN_CMD_SPI_WRITE		(1 << 13)
 #define PWRAP_MAN_CMD_OP_CSH		(0x0 << 8)
 #define PWRAP_MAN_CMD_OP_CSL		(0x1 << 8)
@@ -69,33 +70,75 @@
 					  PWRAP_WDT_SRC_EN_HARB_STAUPD_DLE | \
 					  PWRAP_WDT_SRC_EN_HARB_STAUPD_ALE)
 
-/* macro for slave device wrapper registers */
-#define PWRAP_DEW_BASE			0xbc00
-#define PWRAP_DEW_EVENT_OUT_EN		(PWRAP_DEW_BASE + 0x0)
-#define PWRAP_DEW_DIO_EN		(PWRAP_DEW_BASE + 0x2)
-#define PWRAP_DEW_EVENT_SRC_EN		(PWRAP_DEW_BASE + 0x4)
-#define PWRAP_DEW_EVENT_SRC		(PWRAP_DEW_BASE + 0x6)
-#define PWRAP_DEW_EVENT_FLAG		(PWRAP_DEW_BASE + 0x8)
-#define PWRAP_DEW_READ_TEST		(PWRAP_DEW_BASE + 0xa)
-#define PWRAP_DEW_WRITE_TEST		(PWRAP_DEW_BASE + 0xc)
-#define PWRAP_DEW_CRC_EN		(PWRAP_DEW_BASE + 0xe)
-#define PWRAP_DEW_CRC_VAL		(PWRAP_DEW_BASE + 0x10)
-#define PWRAP_DEW_MON_GRP_SEL		(PWRAP_DEW_BASE + 0x12)
-#define PWRAP_DEW_MON_FLAG_SEL		(PWRAP_DEW_BASE + 0x14)
-#define PWRAP_DEW_EVENT_TEST		(PWRAP_DEW_BASE + 0x16)
-#define PWRAP_DEW_CIPHER_KEY_SEL	(PWRAP_DEW_BASE + 0x18)
-#define PWRAP_DEW_CIPHER_IV_SEL		(PWRAP_DEW_BASE + 0x1a)
-#define PWRAP_DEW_CIPHER_LOAD		(PWRAP_DEW_BASE + 0x1c)
-#define PWRAP_DEW_CIPHER_START		(PWRAP_DEW_BASE + 0x1e)
-#define PWRAP_DEW_CIPHER_RDY		(PWRAP_DEW_BASE + 0x20)
-#define PWRAP_DEW_CIPHER_MODE		(PWRAP_DEW_BASE + 0x22)
-#define PWRAP_DEW_CIPHER_SWRST		(PWRAP_DEW_BASE + 0x24)
-#define PWRAP_MT8173_DEW_CIPHER_IV0	(PWRAP_DEW_BASE + 0x26)
-#define PWRAP_MT8173_DEW_CIPHER_IV1	(PWRAP_DEW_BASE + 0x28)
-#define PWRAP_MT8173_DEW_CIPHER_IV2	(PWRAP_DEW_BASE + 0x2a)
-#define PWRAP_MT8173_DEW_CIPHER_IV3	(PWRAP_DEW_BASE + 0x2c)
-#define PWRAP_MT8173_DEW_CIPHER_IV4	(PWRAP_DEW_BASE + 0x2e)
-#define PWRAP_MT8173_DEW_CIPHER_IV5	(PWRAP_DEW_BASE + 0x30)
+/* defines for slave device wrapper registers */
+enum dew_regs {
+	PWRAP_DEW_BASE,
+	PWRAP_DEW_DIO_EN,
+	PWRAP_DEW_READ_TEST,
+	PWRAP_DEW_WRITE_TEST,
+	PWRAP_DEW_CRC_EN,
+	PWRAP_DEW_CRC_VAL,
+	PWRAP_DEW_MON_GRP_SEL,
+	PWRAP_DEW_CIPHER_KEY_SEL,
+	PWRAP_DEW_CIPHER_IV_SEL,
+	PWRAP_DEW_CIPHER_RDY,
+	PWRAP_DEW_CIPHER_MODE,
+	PWRAP_DEW_CIPHER_SWRST,
+
+	/* MT6397 only regs */
+	PWRAP_DEW_EVENT_OUT_EN,
+	PWRAP_DEW_EVENT_SRC_EN,
+	PWRAP_DEW_EVENT_SRC,
+	PWRAP_DEW_EVENT_FLAG,
+	PWRAP_DEW_MON_FLAG_SEL,
+	PWRAP_DEW_EVENT_TEST,
+	PWRAP_DEW_CIPHER_LOAD,
+	PWRAP_DEW_CIPHER_START,
+
+	/* MT6323 only regs */
+	PWRAP_DEW_CIPHER_EN,
+	PWRAP_DEW_RDDMY_NO,
+};
+
+static const u32 mt6323_regs[] = {
+	[PWRAP_DEW_BASE] =		0x0000,
+	[PWRAP_DEW_DIO_EN] =		0x018a,
+	[PWRAP_DEW_READ_TEST] =		0x018c,
+	[PWRAP_DEW_WRITE_TEST] =	0x018e,
+	[PWRAP_DEW_CRC_EN] =		0x0192,
+	[PWRAP_DEW_CRC_VAL] =		0x0194,
+	[PWRAP_DEW_MON_GRP_SEL] =	0x0196,
+	[PWRAP_DEW_CIPHER_KEY_SEL] =	0x0198,
+	[PWRAP_DEW_CIPHER_IV_SEL] =	0x019a,
+	[PWRAP_DEW_CIPHER_EN] =		0x019c,
+	[PWRAP_DEW_CIPHER_RDY] =	0x019e,
+	[PWRAP_DEW_CIPHER_MODE] =	0x01a0,
+	[PWRAP_DEW_CIPHER_SWRST] =	0x01a2,
+	[PWRAP_DEW_RDDMY_NO] =		0x01a4,
+};
+
+static const u32 mt6397_regs[] = {
+	[PWRAP_DEW_BASE] =		0xbc00,
+	[PWRAP_DEW_EVENT_OUT_EN] =	0xbc00,
+	[PWRAP_DEW_DIO_EN] =		0xbc02,
+	[PWRAP_DEW_EVENT_SRC_EN] =	0xbc04,
+	[PWRAP_DEW_EVENT_SRC] =		0xbc06,
+	[PWRAP_DEW_EVENT_FLAG] =	0xbc08,
+	[PWRAP_DEW_READ_TEST] =		0xbc0a,
+	[PWRAP_DEW_WRITE_TEST] =	0xbc0c,
+	[PWRAP_DEW_CRC_EN] =		0xbc0e,
+	[PWRAP_DEW_CRC_VAL] =		0xbc10,
+	[PWRAP_DEW_MON_GRP_SEL] =	0xbc12,
+	[PWRAP_DEW_MON_FLAG_SEL] =	0xbc14,
+	[PWRAP_DEW_EVENT_TEST] =	0xbc16,
+	[PWRAP_DEW_CIPHER_KEY_SEL] =	0xbc18,
+	[PWRAP_DEW_CIPHER_IV_SEL] =	0xbc1a,
+	[PWRAP_DEW_CIPHER_LOAD] =	0xbc1c,
+	[PWRAP_DEW_CIPHER_START] =	0xbc1e,
+	[PWRAP_DEW_CIPHER_RDY] =	0xbc20,
+	[PWRAP_DEW_CIPHER_MODE] =	0xbc22,
+	[PWRAP_DEW_CIPHER_SWRST] =	0xbc24,
+};
 
 enum pwrap_regs {
 	PWRAP_MUX_SEL,
@@ -158,6 +201,13 @@
 	PWRAP_DCM_EN,
 	PWRAP_DCM_DBC_PRD,
 
+	/* MT2701 only regs */
+	PWRAP_ADC_CMD_ADDR,
+	PWRAP_PWRAP_ADC_CMD,
+	PWRAP_ADC_RDY_ADDR,
+	PWRAP_ADC_RDATA_ADDR1,
+	PWRAP_ADC_RDATA_ADDR2,
+
 	/* MT8135 only regs */
 	PWRAP_CSHEXT,
 	PWRAP_EVENT_IN_EN,
@@ -194,6 +244,92 @@
 	PWRAP_CIPHER_EN,
 };
 
+static int mt2701_regs[] = {
+	[PWRAP_MUX_SEL] =		0x0,
+	[PWRAP_WRAP_EN] =		0x4,
+	[PWRAP_DIO_EN] =		0x8,
+	[PWRAP_SIDLY] =			0xc,
+	[PWRAP_RDDMY] =			0x18,
+	[PWRAP_SI_CK_CON] =		0x1c,
+	[PWRAP_CSHEXT_WRITE] =		0x20,
+	[PWRAP_CSHEXT_READ] =		0x24,
+	[PWRAP_CSLEXT_START] =		0x28,
+	[PWRAP_CSLEXT_END] =		0x2c,
+	[PWRAP_STAUPD_PRD] =		0x30,
+	[PWRAP_STAUPD_GRPEN] =		0x34,
+	[PWRAP_STAUPD_MAN_TRIG] =	0x38,
+	[PWRAP_STAUPD_STA] =		0x3c,
+	[PWRAP_WRAP_STA] =		0x44,
+	[PWRAP_HARB_INIT] =		0x48,
+	[PWRAP_HARB_HPRIO] =		0x4c,
+	[PWRAP_HIPRIO_ARB_EN] =		0x50,
+	[PWRAP_HARB_STA0] =		0x54,
+	[PWRAP_HARB_STA1] =		0x58,
+	[PWRAP_MAN_EN] =		0x5c,
+	[PWRAP_MAN_CMD] =		0x60,
+	[PWRAP_MAN_RDATA] =		0x64,
+	[PWRAP_MAN_VLDCLR] =		0x68,
+	[PWRAP_WACS0_EN] =		0x6c,
+	[PWRAP_INIT_DONE0] =		0x70,
+	[PWRAP_WACS0_CMD] =		0x74,
+	[PWRAP_WACS0_RDATA] =		0x78,
+	[PWRAP_WACS0_VLDCLR] =		0x7c,
+	[PWRAP_WACS1_EN] =		0x80,
+	[PWRAP_INIT_DONE1] =		0x84,
+	[PWRAP_WACS1_CMD] =		0x88,
+	[PWRAP_WACS1_RDATA] =		0x8c,
+	[PWRAP_WACS1_VLDCLR] =		0x90,
+	[PWRAP_WACS2_EN] =		0x94,
+	[PWRAP_INIT_DONE2] =		0x98,
+	[PWRAP_WACS2_CMD] =		0x9c,
+	[PWRAP_WACS2_RDATA] =		0xa0,
+	[PWRAP_WACS2_VLDCLR] =		0xa4,
+	[PWRAP_INT_EN] =		0xa8,
+	[PWRAP_INT_FLG_RAW] =		0xac,
+	[PWRAP_INT_FLG] =		0xb0,
+	[PWRAP_INT_CLR] =		0xb4,
+	[PWRAP_SIG_ADR] =		0xb8,
+	[PWRAP_SIG_MODE] =		0xbc,
+	[PWRAP_SIG_VALUE] =		0xc0,
+	[PWRAP_SIG_ERRVAL] =		0xc4,
+	[PWRAP_CRC_EN] =		0xc8,
+	[PWRAP_TIMER_EN] =		0xcc,
+	[PWRAP_TIMER_STA] =		0xd0,
+	[PWRAP_WDT_UNIT] =		0xd4,
+	[PWRAP_WDT_SRC_EN] =		0xd8,
+	[PWRAP_WDT_FLG] =		0xdc,
+	[PWRAP_DEBUG_INT_SEL] =		0xe0,
+	[PWRAP_DVFS_ADR0] =		0xe4,
+	[PWRAP_DVFS_WDATA0] =		0xe8,
+	[PWRAP_DVFS_ADR1] =		0xec,
+	[PWRAP_DVFS_WDATA1] =		0xf0,
+	[PWRAP_DVFS_ADR2] =		0xf4,
+	[PWRAP_DVFS_WDATA2] =		0xf8,
+	[PWRAP_DVFS_ADR3] =		0xfc,
+	[PWRAP_DVFS_WDATA3] =		0x100,
+	[PWRAP_DVFS_ADR4] =		0x104,
+	[PWRAP_DVFS_WDATA4] =		0x108,
+	[PWRAP_DVFS_ADR5] =		0x10c,
+	[PWRAP_DVFS_WDATA5] =		0x110,
+	[PWRAP_DVFS_ADR6] =		0x114,
+	[PWRAP_DVFS_WDATA6] =		0x118,
+	[PWRAP_DVFS_ADR7] =		0x11c,
+	[PWRAP_DVFS_WDATA7] =		0x120,
+	[PWRAP_CIPHER_KEY_SEL] =	0x124,
+	[PWRAP_CIPHER_IV_SEL] =		0x128,
+	[PWRAP_CIPHER_EN] =		0x12c,
+	[PWRAP_CIPHER_RDY] =		0x130,
+	[PWRAP_CIPHER_MODE] =		0x134,
+	[PWRAP_CIPHER_SWRST] =		0x138,
+	[PWRAP_DCM_EN] =		0x13c,
+	[PWRAP_DCM_DBC_PRD] =		0x140,
+	[PWRAP_ADC_CMD_ADDR] =		0x144,
+	[PWRAP_PWRAP_ADC_CMD] =		0x148,
+	[PWRAP_ADC_RDY_ADDR] =		0x14c,
+	[PWRAP_ADC_RDATA_ADDR1] =	0x150,
+	[PWRAP_ADC_RDATA_ADDR2] =	0x154,
+};
+
 static int mt8173_regs[] = {
 	[PWRAP_MUX_SEL] =		0x0,
 	[PWRAP_WRAP_EN] =		0x4,
@@ -349,36 +485,28 @@
 	[PWRAP_DCM_DBC_PRD] =		0x160,
 };
 
+enum pmic_type {
+	PMIC_MT6323,
+	PMIC_MT6397,
+};
+
 enum pwrap_type {
+	PWRAP_MT2701,
 	PWRAP_MT8135,
 	PWRAP_MT8173,
 };
 
-struct pmic_wrapper_type {
-	int *regs;
-	enum pwrap_type type;
-	u32 arb_en_all;
-};
-
-static struct pmic_wrapper_type pwrap_mt8135 = {
-	.regs = mt8135_regs,
-	.type = PWRAP_MT8135,
-	.arb_en_all = 0x1ff,
-};
-
-static struct pmic_wrapper_type pwrap_mt8173 = {
-	.regs = mt8173_regs,
-	.type = PWRAP_MT8173,
-	.arb_en_all = 0x3f,
+struct pwrap_slv_type {
+	const u32 *dew_regs;
+	enum pmic_type type;
 };
 
 struct pmic_wrapper {
 	struct device *dev;
 	void __iomem *base;
 	struct regmap *regmap;
-	int *regs;
-	enum pwrap_type type;
-	u32 arb_en_all;
+	const struct pmic_wrapper_type *master;
+	const struct pwrap_slv_type *slave;
 	struct clk *clk_spi;
 	struct clk *clk_wrap;
 	struct reset_control *rstc;
@@ -387,24 +515,26 @@
 	void __iomem *bridge_base;
 };
 
-static inline int pwrap_is_mt8135(struct pmic_wrapper *wrp)
-{
-	return wrp->type == PWRAP_MT8135;
-}
-
-static inline int pwrap_is_mt8173(struct pmic_wrapper *wrp)
-{
-	return wrp->type == PWRAP_MT8173;
-}
+struct pmic_wrapper_type {
+	int *regs;
+	enum pwrap_type type;
+	u32 arb_en_all;
+	u32 int_en_all;
+	u32 spi_w;
+	u32 wdt_src;
+	int has_bridge:1;
+	int (*init_reg_clock)(struct pmic_wrapper *wrp);
+	int (*init_soc_specific)(struct pmic_wrapper *wrp);
+};
 
 static u32 pwrap_readl(struct pmic_wrapper *wrp, enum pwrap_regs reg)
 {
-	return readl(wrp->base + wrp->regs[reg]);
+	return readl(wrp->base + wrp->master->regs[reg]);
 }
 
 static void pwrap_writel(struct pmic_wrapper *wrp, u32 val, enum pwrap_regs reg)
 {
-	writel(val, wrp->base + wrp->regs[reg]);
+	writel(val, wrp->base + wrp->master->regs[reg]);
 }
 
 static bool pwrap_is_fsm_idle(struct pmic_wrapper *wrp)
@@ -522,15 +652,15 @@
 	pwrap_writel(wrp, 1, PWRAP_MAN_EN);
 	pwrap_writel(wrp, 0, PWRAP_DIO_EN);
 
-	pwrap_writel(wrp, PWRAP_MAN_CMD_SPI_WRITE | PWRAP_MAN_CMD_OP_CSL,
+	pwrap_writel(wrp, wrp->master->spi_w | PWRAP_MAN_CMD_OP_CSL,
 			PWRAP_MAN_CMD);
-	pwrap_writel(wrp, PWRAP_MAN_CMD_SPI_WRITE | PWRAP_MAN_CMD_OP_OUTS,
+	pwrap_writel(wrp, wrp->master->spi_w | PWRAP_MAN_CMD_OP_OUTS,
 			PWRAP_MAN_CMD);
-	pwrap_writel(wrp, PWRAP_MAN_CMD_SPI_WRITE | PWRAP_MAN_CMD_OP_CSH,
+	pwrap_writel(wrp, wrp->master->spi_w | PWRAP_MAN_CMD_OP_CSH,
 			PWRAP_MAN_CMD);
 
 	for (i = 0; i < 4; i++)
-		pwrap_writel(wrp, PWRAP_MAN_CMD_SPI_WRITE | PWRAP_MAN_CMD_OP_OUTS,
+		pwrap_writel(wrp, wrp->master->spi_w | PWRAP_MAN_CMD_OP_OUTS,
 				PWRAP_MAN_CMD);
 
 	ret = pwrap_wait_for_state(wrp, pwrap_is_sync_idle);
@@ -562,7 +692,8 @@
 
 	for (i = 0; i < 4; i++) {
 		pwrap_writel(wrp, i, PWRAP_SIDLY);
-		pwrap_read(wrp, PWRAP_DEW_READ_TEST, &rdata);
+		pwrap_read(wrp, wrp->slave->dew_regs[PWRAP_DEW_READ_TEST],
+			   &rdata);
 		if (rdata == PWRAP_DEW_READ_TEST_VAL) {
 			dev_dbg(wrp->dev, "[Read Test] pass, SIDLY=%x\n", i);
 			pass |= 1 << i;
@@ -580,19 +711,47 @@
 	return 0;
 }
 
-static int pwrap_init_reg_clock(struct pmic_wrapper *wrp)
+static int pwrap_mt8135_init_reg_clock(struct pmic_wrapper *wrp)
 {
-	if (pwrap_is_mt8135(wrp)) {
-		pwrap_writel(wrp, 0x4, PWRAP_CSHEXT);
-		pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE);
-		pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ);
-		pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START);
-		pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END);
-	} else {
-		pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE);
-		pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ);
+	pwrap_writel(wrp, 0x4, PWRAP_CSHEXT);
+	pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE);
+	pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ);
+	pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START);
+	pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END);
+
+	return 0;
+}
+
+static int pwrap_mt8173_init_reg_clock(struct pmic_wrapper *wrp)
+{
+	pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE);
+	pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ);
+	pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_START);
+	pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_END);
+
+	return 0;
+}
+
+static int pwrap_mt2701_init_reg_clock(struct pmic_wrapper *wrp)
+{
+	switch (wrp->slave->type) {
+	case PMIC_MT6397:
+		pwrap_writel(wrp, 0xc, PWRAP_RDDMY);
+		pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_WRITE);
+		pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_READ);
 		pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_START);
 		pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_END);
+		break;
+
+	case PMIC_MT6323:
+		pwrap_writel(wrp, 0x8, PWRAP_RDDMY);
+		pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_RDDMY_NO],
+			    0x8);
+		pwrap_writel(wrp, 0x5, PWRAP_CSHEXT_WRITE);
+		pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_READ);
+		pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_START);
+		pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_END);
+		break;
 	}
 
 	return 0;
@@ -608,7 +767,8 @@
 	u32 rdata;
 	int ret;
 
-	ret = pwrap_read(wrp, PWRAP_DEW_CIPHER_RDY, &rdata);
+	ret = pwrap_read(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_RDY],
+			 &rdata);
 	if (ret)
 		return 0;
 
@@ -625,20 +785,37 @@
 	pwrap_writel(wrp, 0x1, PWRAP_CIPHER_KEY_SEL);
 	pwrap_writel(wrp, 0x2, PWRAP_CIPHER_IV_SEL);
 
-	if (pwrap_is_mt8135(wrp)) {
+	switch (wrp->master->type) {
+	case PWRAP_MT8135:
 		pwrap_writel(wrp, 1, PWRAP_CIPHER_LOAD);
 		pwrap_writel(wrp, 1, PWRAP_CIPHER_START);
-	} else {
+		break;
+	case PWRAP_MT2701:
+	case PWRAP_MT8173:
 		pwrap_writel(wrp, 1, PWRAP_CIPHER_EN);
+		break;
 	}
 
 	/* Config cipher mode @PMIC */
-	pwrap_write(wrp, PWRAP_DEW_CIPHER_SWRST, 0x1);
-	pwrap_write(wrp, PWRAP_DEW_CIPHER_SWRST, 0x0);
-	pwrap_write(wrp, PWRAP_DEW_CIPHER_KEY_SEL, 0x1);
-	pwrap_write(wrp, PWRAP_DEW_CIPHER_IV_SEL, 0x2);
-	pwrap_write(wrp, PWRAP_DEW_CIPHER_LOAD, 0x1);
-	pwrap_write(wrp, PWRAP_DEW_CIPHER_START, 0x1);
+	pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_SWRST], 0x1);
+	pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_SWRST], 0x0);
+	pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_KEY_SEL], 0x1);
+	pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_IV_SEL], 0x2);
+	pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_LOAD], 0x1);
+	pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_START], 0x1);
+
+	switch (wrp->slave->type) {
+	case PMIC_MT6397:
+		pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_LOAD],
+			    0x1);
+		pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_START],
+			    0x1);
+		break;
+	case PMIC_MT6323:
+		pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_EN],
+			    0x1);
+		break;
+	}
 
 	/* wait for cipher data ready@AP */
 	ret = pwrap_wait_for_state(wrp, pwrap_is_cipher_ready);
@@ -655,7 +832,7 @@
 	}
 
 	/* wait for cipher mode idle */
-	pwrap_write(wrp, PWRAP_DEW_CIPHER_MODE, 0x1);
+	pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_MODE], 0x1);
 	ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle_and_sync_idle);
 	if (ret) {
 		dev_err(wrp->dev, "cipher mode idle fail, ret=%d\n", ret);
@@ -665,9 +842,11 @@
 	pwrap_writel(wrp, 1, PWRAP_CIPHER_MODE);
 
 	/* Write Test */
-	if (pwrap_write(wrp, PWRAP_DEW_WRITE_TEST, PWRAP_DEW_WRITE_TEST_VAL) ||
-	    pwrap_read(wrp, PWRAP_DEW_WRITE_TEST, &rdata) ||
-			(rdata != PWRAP_DEW_WRITE_TEST_VAL)) {
+	if (pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_WRITE_TEST],
+			PWRAP_DEW_WRITE_TEST_VAL) ||
+	    pwrap_read(wrp, wrp->slave->dew_regs[PWRAP_DEW_WRITE_TEST],
+		       &rdata) ||
+	    (rdata != PWRAP_DEW_WRITE_TEST_VAL)) {
 		dev_err(wrp->dev, "rdata=0x%04X\n", rdata);
 		return -EFAULT;
 	}
@@ -675,6 +854,63 @@
 	return 0;
 }
 
+static int pwrap_mt8135_init_soc_specific(struct pmic_wrapper *wrp)
+{
+	/* enable pwrap events and pwrap bridge in AP side */
+	pwrap_writel(wrp, 0x1, PWRAP_EVENT_IN_EN);
+	pwrap_writel(wrp, 0xffff, PWRAP_EVENT_DST_EN);
+	writel(0x7f, wrp->bridge_base + PWRAP_MT8135_BRIDGE_IORD_ARB_EN);
+	writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WACS3_EN);
+	writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WACS4_EN);
+	writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WDT_UNIT);
+	writel(0xffff, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WDT_SRC_EN);
+	writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_TIMER_EN);
+	writel(0x7ff, wrp->bridge_base + PWRAP_MT8135_BRIDGE_INT_EN);
+
+	/* enable PMIC event out and sources */
+	if (pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_EVENT_OUT_EN],
+			0x1) ||
+	    pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_EVENT_SRC_EN],
+			0xffff)) {
+		dev_err(wrp->dev, "enable dewrap fail\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int pwrap_mt8173_init_soc_specific(struct pmic_wrapper *wrp)
+{
+	/* PMIC_DEWRAP enables */
+	if (pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_EVENT_OUT_EN],
+			0x1) ||
+	    pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_EVENT_SRC_EN],
+			0xffff)) {
+		dev_err(wrp->dev, "enable dewrap fail\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int pwrap_mt2701_init_soc_specific(struct pmic_wrapper *wrp)
+{
+	/* GPS_INTF initialization */
+	switch (wrp->slave->type) {
+	case PMIC_MT6323:
+		pwrap_writel(wrp, 0x076c, PWRAP_ADC_CMD_ADDR);
+		pwrap_writel(wrp, 0x8000, PWRAP_PWRAP_ADC_CMD);
+		pwrap_writel(wrp, 0x072c, PWRAP_ADC_RDY_ADDR);
+		pwrap_writel(wrp, 0x072e, PWRAP_ADC_RDATA_ADDR1);
+		pwrap_writel(wrp, 0x0730, PWRAP_ADC_RDATA_ADDR2);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
 static int pwrap_init(struct pmic_wrapper *wrp)
 {
 	int ret;
@@ -684,7 +920,7 @@
 	if (wrp->rstc_bridge)
 		reset_control_reset(wrp->rstc_bridge);
 
-	if (pwrap_is_mt8173(wrp)) {
+	if (wrp->master->type == PWRAP_MT8173) {
 		/* Enable DCM */
 		pwrap_writel(wrp, 3, PWRAP_DCM_EN);
 		pwrap_writel(wrp, 0, PWRAP_DCM_DBC_PRD);
@@ -697,11 +933,11 @@
 
 	pwrap_writel(wrp, 1, PWRAP_WRAP_EN);
 
-	pwrap_writel(wrp, wrp->arb_en_all, PWRAP_HIPRIO_ARB_EN);
+	pwrap_writel(wrp, wrp->master->arb_en_all, PWRAP_HIPRIO_ARB_EN);
 
 	pwrap_writel(wrp, 1, PWRAP_WACS2_EN);
 
-	ret = pwrap_init_reg_clock(wrp);
+	ret = wrp->master->init_reg_clock(wrp);
 	if (ret)
 		return ret;
 
@@ -711,7 +947,7 @@
 		return ret;
 
 	/* Enable dual IO mode */
-	pwrap_write(wrp, PWRAP_DEW_DIO_EN, 1);
+	pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_DIO_EN], 1);
 
 	/* Check IDLE & INIT_DONE in advance */
 	ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle_and_sync_idle);
@@ -723,7 +959,7 @@
 	pwrap_writel(wrp, 1, PWRAP_DIO_EN);
 
 	/* Read Test */
-	pwrap_read(wrp, PWRAP_DEW_READ_TEST, &rdata);
+	pwrap_read(wrp, wrp->slave->dew_regs[PWRAP_DEW_READ_TEST], &rdata);
 	if (rdata != PWRAP_DEW_READ_TEST_VAL) {
 		dev_err(wrp->dev, "Read test failed after switch to DIO mode: 0x%04x != 0x%04x\n",
 				PWRAP_DEW_READ_TEST_VAL, rdata);
@@ -736,15 +972,16 @@
 		return ret;
 
 	/* Signature checking - using CRC */
-	if (pwrap_write(wrp, PWRAP_DEW_CRC_EN, 0x1))
+	if (pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CRC_EN], 0x1))
 		return -EFAULT;
 
 	pwrap_writel(wrp, 0x1, PWRAP_CRC_EN);
 	pwrap_writel(wrp, 0x0, PWRAP_SIG_MODE);
-	pwrap_writel(wrp, PWRAP_DEW_CRC_VAL, PWRAP_SIG_ADR);
-	pwrap_writel(wrp, wrp->arb_en_all, PWRAP_HIPRIO_ARB_EN);
+	pwrap_writel(wrp, wrp->slave->dew_regs[PWRAP_DEW_CRC_VAL],
+		     PWRAP_SIG_ADR);
+	pwrap_writel(wrp, wrp->master->arb_en_all, PWRAP_HIPRIO_ARB_EN);
 
-	if (pwrap_is_mt8135(wrp))
+	if (wrp->master->type == PWRAP_MT8135)
 		pwrap_writel(wrp, 0x7, PWRAP_RRARB_EN);
 
 	pwrap_writel(wrp, 0x1, PWRAP_WACS0_EN);
@@ -753,31 +990,10 @@
 	pwrap_writel(wrp, 0x5, PWRAP_STAUPD_PRD);
 	pwrap_writel(wrp, 0xff, PWRAP_STAUPD_GRPEN);
 
-	if (pwrap_is_mt8135(wrp)) {
-		/* enable pwrap events and pwrap bridge in AP side */
-		pwrap_writel(wrp, 0x1, PWRAP_EVENT_IN_EN);
-		pwrap_writel(wrp, 0xffff, PWRAP_EVENT_DST_EN);
-		writel(0x7f, wrp->bridge_base + PWRAP_MT8135_BRIDGE_IORD_ARB_EN);
-		writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WACS3_EN);
-		writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WACS4_EN);
-		writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WDT_UNIT);
-		writel(0xffff, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WDT_SRC_EN);
-		writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_TIMER_EN);
-		writel(0x7ff, wrp->bridge_base + PWRAP_MT8135_BRIDGE_INT_EN);
-
-		/* enable PMIC event out and sources */
-		if (pwrap_write(wrp, PWRAP_DEW_EVENT_OUT_EN, 0x1) ||
-				pwrap_write(wrp, PWRAP_DEW_EVENT_SRC_EN, 0xffff)) {
-			dev_err(wrp->dev, "enable dewrap fail\n");
-			return -EFAULT;
-		}
-	} else {
-		/* PMIC_DEWRAP enables */
-		if (pwrap_write(wrp, PWRAP_DEW_EVENT_OUT_EN, 0x1) ||
-				pwrap_write(wrp, PWRAP_DEW_EVENT_SRC_EN, 0xffff)) {
-			dev_err(wrp->dev, "enable dewrap fail\n");
-			return -EFAULT;
-		}
+	if (wrp->master->init_soc_specific) {
+		ret = wrp->master->init_soc_specific(wrp);
+		if (ret)
+			return ret;
 	}
 
 	/* Setup the init done registers */
@@ -785,7 +1001,7 @@
 	pwrap_writel(wrp, 1, PWRAP_INIT_DONE0);
 	pwrap_writel(wrp, 1, PWRAP_INIT_DONE1);
 
-	if (pwrap_is_mt8135(wrp)) {
+	if (wrp->master->has_bridge) {
 		writel(1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_INIT_DONE3);
 		writel(1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_INIT_DONE4);
 	}
@@ -816,8 +1032,70 @@
 	.max_register = 0xffff,
 };
 
+static const struct pwrap_slv_type pmic_mt6323 = {
+	.dew_regs = mt6323_regs,
+	.type = PMIC_MT6323,
+};
+
+static const struct pwrap_slv_type pmic_mt6397 = {
+	.dew_regs = mt6397_regs,
+	.type = PMIC_MT6397,
+};
+
+static const struct of_device_id of_slave_match_tbl[] = {
+	{
+		.compatible = "mediatek,mt6323",
+		.data = &pmic_mt6323,
+	}, {
+		.compatible = "mediatek,mt6397",
+		.data = &pmic_mt6397,
+	}, {
+		/* sentinel */
+	}
+};
+MODULE_DEVICE_TABLE(of, of_slave_match_tbl);
+
+static const struct pmic_wrapper_type pwrap_mt2701 = {
+	.regs = mt2701_regs,
+	.type = PWRAP_MT2701,
+	.arb_en_all = 0x3f,
+	.int_en_all = ~(BIT(31) | BIT(2)),
+	.spi_w = PWRAP_MAN_CMD_SPI_WRITE_NEW,
+	.wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+	.has_bridge = 0,
+	.init_reg_clock = pwrap_mt2701_init_reg_clock,
+	.init_soc_specific = pwrap_mt2701_init_soc_specific,
+};
+
+static struct pmic_wrapper_type pwrap_mt8135 = {
+	.regs = mt8135_regs,
+	.type = PWRAP_MT8135,
+	.arb_en_all = 0x1ff,
+	.int_en_all = ~(BIT(31) | BIT(1)),
+	.spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+	.wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+	.has_bridge = 1,
+	.init_reg_clock = pwrap_mt8135_init_reg_clock,
+	.init_soc_specific = pwrap_mt8135_init_soc_specific,
+};
+
+static struct pmic_wrapper_type pwrap_mt8173 = {
+	.regs = mt8173_regs,
+	.type = PWRAP_MT8173,
+	.arb_en_all = 0x3f,
+	.int_en_all = ~(BIT(31) | BIT(1)),
+	.spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+	.wdt_src = PWRAP_WDT_SRC_MASK_NO_STAUPD,
+	.has_bridge = 0,
+	.init_reg_clock = pwrap_mt8173_init_reg_clock,
+	.init_soc_specific = pwrap_mt8173_init_soc_specific,
+};
+
 static struct of_device_id of_pwrap_match_tbl[] = {
 	{
+		.compatible = "mediatek,mt2701-pwrap",
+		.data = &pwrap_mt2701,
+	}, {
 		.compatible = "mediatek,mt8135-pwrap",
 		.data = &pwrap_mt8135,
 	}, {
@@ -831,24 +1109,30 @@
 
 static int pwrap_probe(struct platform_device *pdev)
 {
-	int ret, irq, wdt_src;
+	int ret, irq;
 	struct pmic_wrapper *wrp;
 	struct device_node *np = pdev->dev.of_node;
 	const struct of_device_id *of_id =
 		of_match_device(of_pwrap_match_tbl, &pdev->dev);
-	const struct pmic_wrapper_type *type;
+	const struct of_device_id *of_slave_id = NULL;
 	struct resource *res;
 
+	if (pdev->dev.of_node->child)
+		of_slave_id = of_match_node(of_slave_match_tbl,
+					    pdev->dev.of_node->child);
+	if (!of_slave_id) {
+		dev_dbg(&pdev->dev, "slave pmic should be defined in dts\n");
+		return -EINVAL;
+	}
+
 	wrp = devm_kzalloc(&pdev->dev, sizeof(*wrp), GFP_KERNEL);
 	if (!wrp)
 		return -ENOMEM;
 
 	platform_set_drvdata(pdev, wrp);
 
-	type = of_id->data;
-	wrp->regs = type->regs;
-	wrp->type = type->type;
-	wrp->arb_en_all = type->arb_en_all;
+	wrp->master = of_id->data;
+	wrp->slave = of_slave_id->data;
 	wrp->dev = &pdev->dev;
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwrap");
@@ -863,7 +1147,7 @@
 		return ret;
 	}
 
-	if (pwrap_is_mt8135(wrp)) {
+	if (wrp->master->has_bridge) {
 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 				"pwrap-bridge");
 		wrp->bridge_base = devm_ioremap_resource(wrp->dev, res);
@@ -925,11 +1209,9 @@
 	 * Since STAUPD was not used on mt8173 platform,
 	 * so STAUPD of WDT_SRC which should be turned off
 	 */
-	wdt_src = pwrap_is_mt8173(wrp) ?
-			PWRAP_WDT_SRC_MASK_NO_STAUPD : PWRAP_WDT_SRC_MASK_ALL;
-	pwrap_writel(wrp, wdt_src, PWRAP_WDT_SRC_EN);
+	pwrap_writel(wrp, wrp->master->wdt_src, PWRAP_WDT_SRC_EN);
 	pwrap_writel(wrp, 0x1, PWRAP_TIMER_EN);
-	pwrap_writel(wrp, ~((1 << 31) | (1 << 1)), PWRAP_INT_EN);
+	pwrap_writel(wrp, wrp->master->int_en_all, PWRAP_INT_EN);
 
 	irq = platform_get_irq(pdev, 0);
 	ret = devm_request_irq(wrp->dev, irq, pwrap_interrupt, IRQF_TRIGGER_HIGH,
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index 57e781c..837effe 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -491,13 +491,14 @@
 		genpd->dev_ops.active_wakeup = scpsys_active_wakeup;
 
 		/*
-		 * With CONFIG_PM disabled turn on all domains to make the
-		 * hardware usable.
+		 * Initially turn on all domains to make the domains usable
+		 * with !CONFIG_PM and to get the hardware in sync with the
+		 * software.  The unused domains will be switched off during
+		 * late_init time.
 		 */
-		if (!IS_ENABLED(CONFIG_PM))
-			genpd->power_on(genpd);
+		genpd->power_on(genpd);
 
-		pm_genpd_init(genpd, NULL, true);
+		pm_genpd_init(genpd, NULL, false);
 	}
 
 	/*
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
index 731fa06..6609d7e 100644
--- a/drivers/soc/qcom/smd-rpm.c
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -33,6 +33,7 @@
  */
 struct qcom_smd_rpm {
 	struct qcom_smd_channel *rpm_channel;
+	struct device *dev;
 
 	struct completion ack;
 	struct mutex lock;
@@ -149,14 +150,14 @@
 }
 EXPORT_SYMBOL(qcom_rpm_smd_write);
 
-static int qcom_smd_rpm_callback(struct qcom_smd_device *qsdev,
+static int qcom_smd_rpm_callback(struct qcom_smd_channel *channel,
 				 const void *data,
 				 size_t count)
 {
 	const struct qcom_rpm_header *hdr = data;
 	size_t hdr_length = le32_to_cpu(hdr->length);
 	const struct qcom_rpm_message *msg;
-	struct qcom_smd_rpm *rpm = dev_get_drvdata(&qsdev->dev);
+	struct qcom_smd_rpm *rpm = qcom_smd_get_drvdata(channel);
 	const u8 *buf = data + sizeof(struct qcom_rpm_header);
 	const u8 *end = buf + hdr_length;
 	char msgbuf[32];
@@ -165,7 +166,7 @@
 
 	if (le32_to_cpu(hdr->service_type) != RPM_SERVICE_TYPE_REQUEST ||
 	    hdr_length < sizeof(struct qcom_rpm_message)) {
-		dev_err(&qsdev->dev, "invalid request\n");
+		dev_err(rpm->dev, "invalid request\n");
 		return 0;
 	}
 
@@ -206,7 +207,9 @@
 	mutex_init(&rpm->lock);
 	init_completion(&rpm->ack);
 
+	rpm->dev = &sdev->dev;
 	rpm->rpm_channel = sdev->channel;
+	qcom_smd_set_drvdata(sdev->channel, rpm);
 
 	dev_set_drvdata(&sdev->dev, rpm);
 
diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c
index 498fd05..ac1957d 100644
--- a/drivers/soc/qcom/smd.c
+++ b/drivers/soc/qcom/smd.c
@@ -106,9 +106,9 @@
  * @channels:		list of all channels detected on this edge
  * @channels_lock:	guard for modifications of @channels
  * @allocated:		array of bitmaps representing already allocated channels
- * @need_rescan:	flag that the @work needs to scan smem for new channels
  * @smem_available:	last available amount of smem triggering a channel scan
- * @work:		work item for edge house keeping
+ * @scan_work:		work item for discovering new channels
+ * @state_work:		work item for edge state changes
  */
 struct qcom_smd_edge {
 	struct qcom_smd *smd;
@@ -127,10 +127,12 @@
 
 	DECLARE_BITMAP(allocated[SMD_ALLOC_TBL_COUNT], SMD_ALLOC_TBL_SIZE);
 
-	bool need_rescan;
 	unsigned smem_available;
 
-	struct work_struct work;
+	wait_queue_head_t new_channel_event;
+
+	struct work_struct scan_work;
+	struct work_struct state_work;
 };
 
 /*
@@ -186,13 +188,16 @@
 	int fifo_size;
 
 	void *bounce_buffer;
-	int (*cb)(struct qcom_smd_device *, const void *, size_t);
+	qcom_smd_cb_t cb;
 
 	spinlock_t recv_lock;
 
 	int pkt_size;
 
+	void *drvdata;
+
 	struct list_head list;
+	struct list_head dev_list;
 };
 
 /**
@@ -378,6 +383,19 @@
 }
 
 /*
+ * Set the callback for a channel, with appropriate locking
+ */
+static void qcom_smd_channel_set_callback(struct qcom_smd_channel *channel,
+					  qcom_smd_cb_t cb)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&channel->recv_lock, flags);
+	channel->cb = cb;
+	spin_unlock_irqrestore(&channel->recv_lock, flags);
+};
+
+/*
  * Calculate the amount of data available in the rx fifo
  */
 static size_t qcom_smd_channel_get_rx_avail(struct qcom_smd_channel *channel)
@@ -497,7 +515,6 @@
  */
 static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel)
 {
-	struct qcom_smd_device *qsdev = channel->qsdev;
 	unsigned tail;
 	size_t len;
 	void *ptr;
@@ -517,7 +534,7 @@
 		len = channel->pkt_size;
 	}
 
-	ret = channel->cb(qsdev, ptr, len);
+	ret = channel->cb(channel, ptr, len);
 	if (ret < 0)
 		return ret;
 
@@ -601,7 +618,8 @@
 	struct qcom_smd_edge *edge = data;
 	struct qcom_smd_channel *channel;
 	unsigned available;
-	bool kick_worker = false;
+	bool kick_scanner = false;
+	bool kick_state = false;
 
 	/*
 	 * Handle state changes or data on each of the channels on this edge
@@ -609,7 +627,7 @@
 	spin_lock(&edge->channels_lock);
 	list_for_each_entry(channel, &edge->channels, list) {
 		spin_lock(&channel->recv_lock);
-		kick_worker |= qcom_smd_channel_intr(channel);
+		kick_state |= qcom_smd_channel_intr(channel);
 		spin_unlock(&channel->recv_lock);
 	}
 	spin_unlock(&edge->channels_lock);
@@ -622,12 +640,13 @@
 	available = qcom_smem_get_free_space(edge->remote_pid);
 	if (available != edge->smem_available) {
 		edge->smem_available = available;
-		edge->need_rescan = true;
-		kick_worker = true;
+		kick_scanner = true;
 	}
 
-	if (kick_worker)
-		schedule_work(&edge->work);
+	if (kick_scanner)
+		schedule_work(&edge->scan_work);
+	if (kick_state)
+		schedule_work(&edge->state_work);
 
 	return IRQ_HANDLED;
 }
@@ -793,6 +812,43 @@
 }
 
 /*
+ * Helper for opening a channel
+ */
+static int qcom_smd_channel_open(struct qcom_smd_channel *channel,
+				 qcom_smd_cb_t cb)
+{
+	size_t bb_size;
+
+	/*
+	 * Packets are maximum 4k, but reduce if the fifo is smaller
+	 */
+	bb_size = min(channel->fifo_size, SZ_4K);
+	channel->bounce_buffer = kmalloc(bb_size, GFP_KERNEL);
+	if (!channel->bounce_buffer)
+		return -ENOMEM;
+
+	qcom_smd_channel_set_callback(channel, cb);
+	qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING);
+	qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED);
+
+	return 0;
+}
+
+/*
+ * Helper for closing and resetting a channel
+ */
+static void qcom_smd_channel_close(struct qcom_smd_channel *channel)
+{
+	qcom_smd_channel_set_callback(channel, NULL);
+
+	kfree(channel->bounce_buffer);
+	channel->bounce_buffer = NULL;
+
+	qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
+	qcom_smd_channel_reset(channel);
+}
+
+/*
  * Probe the smd client.
  *
  * The remote side have indicated that it want the channel to be opened, so
@@ -803,22 +859,11 @@
 	struct qcom_smd_device *qsdev = to_smd_device(dev);
 	struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
 	struct qcom_smd_channel *channel = qsdev->channel;
-	size_t bb_size;
 	int ret;
 
-	/*
-	 * Packets are maximum 4k, but reduce if the fifo is smaller
-	 */
-	bb_size = min(channel->fifo_size, SZ_4K);
-	channel->bounce_buffer = kmalloc(bb_size, GFP_KERNEL);
-	if (!channel->bounce_buffer)
-		return -ENOMEM;
-
-	channel->cb = qsdrv->callback;
-
-	qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING);
-
-	qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED);
+	ret = qcom_smd_channel_open(channel, qsdrv->callback);
+	if (ret)
+		return ret;
 
 	ret = qsdrv->probe(qsdev);
 	if (ret)
@@ -831,11 +876,7 @@
 err:
 	dev_err(&qsdev->dev, "probe failed\n");
 
-	channel->cb = NULL;
-	kfree(channel->bounce_buffer);
-	channel->bounce_buffer = NULL;
-
-	qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
+	qcom_smd_channel_close(channel);
 	return ret;
 }
 
@@ -850,16 +891,15 @@
 	struct qcom_smd_device *qsdev = to_smd_device(dev);
 	struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
 	struct qcom_smd_channel *channel = qsdev->channel;
-	unsigned long flags;
+	struct qcom_smd_channel *tmp;
+	struct qcom_smd_channel *ch;
 
 	qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSING);
 
 	/*
 	 * Make sure we don't race with the code receiving data.
 	 */
-	spin_lock_irqsave(&channel->recv_lock, flags);
-	channel->cb = NULL;
-	spin_unlock_irqrestore(&channel->recv_lock, flags);
+	qcom_smd_channel_set_callback(channel, NULL);
 
 	/* Wake up any sleepers in qcom_smd_send() */
 	wake_up_interruptible(&channel->fblockread_event);
@@ -872,15 +912,14 @@
 		qsdrv->remove(qsdev);
 
 	/*
-	 * The client is now gone, cleanup and reset the channel state.
+	 * The client is now gone, close and release all channels associated
+	 * with this sdev
 	 */
-	channel->qsdev = NULL;
-	kfree(channel->bounce_buffer);
-	channel->bounce_buffer = NULL;
-
-	qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
-
-	qcom_smd_channel_reset(channel);
+	list_for_each_entry_safe(ch, tmp, &channel->dev_list, dev_list) {
+		qcom_smd_channel_close(ch);
+		list_del(&ch->dev_list);
+		ch->qsdev = NULL;
+	}
 
 	return 0;
 }
@@ -996,6 +1035,18 @@
 }
 EXPORT_SYMBOL(qcom_smd_driver_register);
 
+void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel)
+{
+	return channel->drvdata;
+}
+EXPORT_SYMBOL(qcom_smd_get_drvdata);
+
+void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data)
+{
+	channel->drvdata = data;
+}
+EXPORT_SYMBOL(qcom_smd_set_drvdata);
+
 /**
  * qcom_smd_driver_unregister - unregister a smd driver
  * @qsdrv:	qcom_smd_driver struct
@@ -1006,6 +1057,78 @@
 }
 EXPORT_SYMBOL(qcom_smd_driver_unregister);
 
+static struct qcom_smd_channel *
+qcom_smd_find_channel(struct qcom_smd_edge *edge, const char *name)
+{
+	struct qcom_smd_channel *channel;
+	struct qcom_smd_channel *ret = NULL;
+	unsigned long flags;
+	unsigned state;
+
+	spin_lock_irqsave(&edge->channels_lock, flags);
+	list_for_each_entry(channel, &edge->channels, list) {
+		if (strcmp(channel->name, name))
+			continue;
+
+		state = GET_RX_CHANNEL_INFO(channel, state);
+		if (state != SMD_CHANNEL_OPENING &&
+		    state != SMD_CHANNEL_OPENED)
+			continue;
+
+		ret = channel;
+		break;
+	}
+	spin_unlock_irqrestore(&edge->channels_lock, flags);
+
+	return ret;
+}
+
+/**
+ * qcom_smd_open_channel() - claim additional channels on the same edge
+ * @sdev:	smd_device handle
+ * @name:	channel name
+ * @cb:		callback method to use for incoming data
+ *
+ * Returns a channel handle on success, or -EPROBE_DEFER if the channel isn't
+ * ready.
+ */
+struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *parent,
+					       const char *name,
+					       qcom_smd_cb_t cb)
+{
+	struct qcom_smd_channel *channel;
+	struct qcom_smd_device *sdev = parent->qsdev;
+	struct qcom_smd_edge *edge = parent->edge;
+	int ret;
+
+	/* Wait up to HZ for the channel to appear */
+	ret = wait_event_interruptible_timeout(edge->new_channel_event,
+			(channel = qcom_smd_find_channel(edge, name)) != NULL,
+			HZ);
+	if (!ret)
+		return ERR_PTR(-ETIMEDOUT);
+
+	if (channel->state != SMD_CHANNEL_CLOSED) {
+		dev_err(&sdev->dev, "channel %s is busy\n", channel->name);
+		return ERR_PTR(-EBUSY);
+	}
+
+	channel->qsdev = sdev;
+	ret = qcom_smd_channel_open(channel, cb);
+	if (ret) {
+		channel->qsdev = NULL;
+		return ERR_PTR(ret);
+	}
+
+	/*
+	 * Append the list of channel to the channels associated with the sdev
+	 */
+	list_add_tail(&channel->dev_list, &sdev->channel->dev_list);
+
+	return channel;
+}
+EXPORT_SYMBOL(qcom_smd_open_channel);
+
 /*
  * Allocate the qcom_smd_channel object for a newly found smd channel,
  * retrieving and validating the smem items involved.
@@ -1027,6 +1150,7 @@
 	if (!channel)
 		return ERR_PTR(-ENOMEM);
 
+	INIT_LIST_HEAD(&channel->dev_list);
 	channel->edge = edge;
 	channel->name = devm_kstrdup(smd->dev, name, GFP_KERNEL);
 	if (!channel->name)
@@ -1089,8 +1213,9 @@
  * qcom_smd_create_channel() to create representations of these and add
  * them to the edge's list of channels.
  */
-static void qcom_discover_channels(struct qcom_smd_edge *edge)
+static void qcom_channel_scan_worker(struct work_struct *work)
 {
+	struct qcom_smd_edge *edge = container_of(work, struct qcom_smd_edge, scan_work);
 	struct qcom_smd_alloc_entry *alloc_tbl;
 	struct qcom_smd_alloc_entry *entry;
 	struct qcom_smd_channel *channel;
@@ -1140,10 +1265,12 @@
 
 			dev_dbg(smd->dev, "new channel found: '%s'\n", channel->name);
 			set_bit(i, edge->allocated[tbl]);
+
+			wake_up_interruptible(&edge->new_channel_event);
 		}
 	}
 
-	schedule_work(&edge->work);
+	schedule_work(&edge->state_work);
 }
 
 /*
@@ -1151,29 +1278,23 @@
  * then scans all registered channels for state changes that should be handled
  * by creating or destroying smd client devices for the registered channels.
  *
- * LOCKING: edge->channels_lock is not needed to be held during the traversal
- * of the channels list as it's done synchronously with the only writer.
+ * LOCKING: edge->channels_lock only needs to cover the list operations, as the
+ * worker is killed before any channels are deallocated
  */
 static void qcom_channel_state_worker(struct work_struct *work)
 {
 	struct qcom_smd_channel *channel;
 	struct qcom_smd_edge *edge = container_of(work,
 						  struct qcom_smd_edge,
-						  work);
+						  state_work);
 	unsigned remote_state;
-
-	/*
-	 * Rescan smem if we have reason to belive that there are new channels.
-	 */
-	if (edge->need_rescan) {
-		edge->need_rescan = false;
-		qcom_discover_channels(edge);
-	}
+	unsigned long flags;
 
 	/*
 	 * Register a device for any closed channel where the remote processor
 	 * is showing interest in opening the channel.
 	 */
+	spin_lock_irqsave(&edge->channels_lock, flags);
 	list_for_each_entry(channel, &edge->channels, list) {
 		if (channel->state != SMD_CHANNEL_CLOSED)
 			continue;
@@ -1183,7 +1304,9 @@
 		    remote_state != SMD_CHANNEL_OPENED)
 			continue;
 
+		spin_unlock_irqrestore(&edge->channels_lock, flags);
 		qcom_smd_create_device(channel);
+		spin_lock_irqsave(&edge->channels_lock, flags);
 	}
 
 	/*
@@ -1200,8 +1323,11 @@
 		    remote_state == SMD_CHANNEL_OPENED)
 			continue;
 
+		spin_unlock_irqrestore(&edge->channels_lock, flags);
 		qcom_smd_destroy_device(channel);
+		spin_lock_irqsave(&edge->channels_lock, flags);
 	}
+	spin_unlock_irqrestore(&edge->channels_lock, flags);
 }
 
 /*
@@ -1219,7 +1345,8 @@
 	INIT_LIST_HEAD(&edge->channels);
 	spin_lock_init(&edge->channels_lock);
 
-	INIT_WORK(&edge->work, qcom_channel_state_worker);
+	INIT_WORK(&edge->scan_work, qcom_channel_scan_worker);
+	INIT_WORK(&edge->state_work, qcom_channel_state_worker);
 
 	edge->of_node = of_node_get(node);
 
@@ -1303,13 +1430,13 @@
 	for_each_available_child_of_node(pdev->dev.of_node, node) {
 		edge = &smd->edges[i++];
 		edge->smd = smd;
+		init_waitqueue_head(&edge->new_channel_event);
 
 		ret = qcom_smd_parse_edge(&pdev->dev, node, edge);
 		if (ret)
 			continue;
 
-		edge->need_rescan = true;
-		schedule_work(&edge->work);
+		schedule_work(&edge->scan_work);
 	}
 
 	platform_set_drvdata(pdev, smd);
@@ -1332,8 +1459,10 @@
 		edge = &smd->edges[i];
 
 		disable_irq(edge->irq);
-		cancel_work_sync(&edge->work);
+		cancel_work_sync(&edge->scan_work);
+		cancel_work_sync(&edge->state_work);
 
+		/* No need to lock here, because the writer is gone */
 		list_for_each_entry(channel, &edge->channels, list) {
 			if (!channel->qsdev)
 				continue;
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 19019aa..2e1aa9f 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -684,8 +684,7 @@
 
 	smem->regions[i].aux_base = (u32)r.start;
 	smem->regions[i].size = resource_size(&r);
-	smem->regions[i].virt_base = devm_ioremap_nocache(dev, r.start,
-							  resource_size(&r));
+	smem->regions[i].virt_base = devm_ioremap_wc(dev, r.start, resource_size(&r));
 	if (!smem->regions[i].virt_base)
 		return -ENOMEM;
 
diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c
index 5548a31..f9d7a85 100644
--- a/drivers/soc/qcom/spm.c
+++ b/drivers/soc/qcom/spm.c
@@ -2,6 +2,8 @@
  * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
  * Copyright (c) 2014,2015, Linaro Ltd.
  *
+ * SAW power controller driver
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
  * only version 2 as published by the Free Software Foundation.
@@ -12,7 +14,6 @@
  * GNU General Public License for more details.
  */
 
-#include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/io.h>
@@ -274,7 +275,7 @@
 	return per_cpu(cpu_spm_drv, cpu) ? 0 : -ENXIO;
 }
 
-static struct cpuidle_ops qcom_cpuidle_ops __initdata = {
+static const struct cpuidle_ops qcom_cpuidle_ops __initconst = {
 	.suspend = qcom_idle_enter,
 	.init = qcom_cpuidle_init,
 };
@@ -378,8 +379,5 @@
 		.of_match_table = spm_match_table,
 	},
 };
-module_platform_driver(spm_driver);
 
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("SAW power controller driver");
-MODULE_ALIAS("platform:saw");
+builtin_platform_driver(spm_driver);
diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c
index 7a986f8..c544f3d 100644
--- a/drivers/soc/qcom/wcnss_ctrl.c
+++ b/drivers/soc/qcom/wcnss_ctrl.c
@@ -100,17 +100,17 @@
 
 /**
  * wcnss_ctrl_smd_callback() - handler from SMD responses
- * @qsdev:	smd device handle
+ * @channel:	smd channel handle
  * @data:	pointer to the incoming data packet
  * @count:	size of the incoming data packet
  *
  * Handles any incoming packets from the remote WCNSS_CTRL service.
  */
-static int wcnss_ctrl_smd_callback(struct qcom_smd_device *qsdev,
+static int wcnss_ctrl_smd_callback(struct qcom_smd_channel *channel,
 				   const void *data,
 				   size_t count)
 {
-	struct wcnss_ctrl *wcnss = dev_get_drvdata(&qsdev->dev);
+	struct wcnss_ctrl *wcnss = qcom_smd_get_drvdata(channel);
 	const struct wcnss_download_nv_resp *nvresp;
 	const struct wcnss_version_resp *version;
 	const struct wcnss_msg_hdr *hdr = data;
@@ -246,7 +246,7 @@
 	init_completion(&wcnss->ack);
 	INIT_WORK(&wcnss->download_nv_work, wcnss_download_nv);
 
-	dev_set_drvdata(&sdev->dev, wcnss);
+	qcom_smd_set_drvdata(sdev->channel, wcnss);
 
 	return wcnss_request_version(wcnss);
 }
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
new file mode 100644
index 0000000..151fcd3
--- /dev/null
+++ b/drivers/soc/renesas/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_ARCH_R8A7779)	+= rcar-sysc.o r8a7779-sysc.o
+obj-$(CONFIG_ARCH_R8A7790)	+= rcar-sysc.o r8a7790-sysc.o
+obj-$(CONFIG_ARCH_R8A7791)	+= rcar-sysc.o r8a7791-sysc.o
+# R-Car M2-N is identical to R-Car M2-W w.r.t. power domains.
+obj-$(CONFIG_ARCH_R8A7793)	+= rcar-sysc.o r8a7791-sysc.o
+obj-$(CONFIG_ARCH_R8A7794)	+= rcar-sysc.o r8a7794-sysc.o
+obj-$(CONFIG_ARCH_R8A7795)	+= rcar-sysc.o r8a7795-sysc.o
diff --git a/drivers/soc/renesas/r8a7779-sysc.c b/drivers/soc/renesas/r8a7779-sysc.c
new file mode 100644
index 0000000..9e8e6b7
--- /dev/null
+++ b/drivers/soc/renesas/r8a7779-sysc.c
@@ -0,0 +1,34 @@
+/*
+ * Renesas R-Car H1 System Controller
+ *
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7779-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7779_areas[] __initconst = {
+	{ "always-on",	    0, 0, R8A7779_PD_ALWAYS_ON,	-1, PD_ALWAYS_ON },
+	{ "arm1",	 0x40, 1, R8A7779_PD_ARM1,	R8A7779_PD_ALWAYS_ON,
+	  PD_CPU_CR },
+	{ "arm2",	 0x40, 2, R8A7779_PD_ARM2,	R8A7779_PD_ALWAYS_ON,
+	  PD_CPU_CR },
+	{ "arm3",	 0x40, 3, R8A7779_PD_ARM3,	R8A7779_PD_ALWAYS_ON,
+	  PD_CPU_CR },
+	{ "sgx",	 0xc0, 0, R8A7779_PD_SGX,	R8A7779_PD_ALWAYS_ON },
+	{ "vdp",	0x100, 0, R8A7779_PD_VDP,	R8A7779_PD_ALWAYS_ON },
+	{ "imp",	0x140, 0, R8A7779_PD_IMP,	R8A7779_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7779_sysc_info __initconst = {
+	.areas = r8a7779_areas,
+	.num_areas = ARRAY_SIZE(r8a7779_areas),
+};
diff --git a/drivers/soc/renesas/r8a7790-sysc.c b/drivers/soc/renesas/r8a7790-sysc.c
new file mode 100644
index 0000000..7a567ad
--- /dev/null
+++ b/drivers/soc/renesas/r8a7790-sysc.c
@@ -0,0 +1,48 @@
+/*
+ * Renesas R-Car H2 System Controller
+ *
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7790-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7790_areas[] __initconst = {
+	{ "always-on",	    0, 0, R8A7790_PD_ALWAYS_ON,	-1, PD_ALWAYS_ON },
+	{ "ca15-scu",	0x180, 0, R8A7790_PD_CA15_SCU,	R8A7790_PD_ALWAYS_ON,
+	  PD_SCU },
+	{ "ca15-cpu0",	 0x40, 0, R8A7790_PD_CA15_CPU0,	R8A7790_PD_CA15_SCU,
+	  PD_CPU_NOCR },
+	{ "ca15-cpu1",	 0x40, 1, R8A7790_PD_CA15_CPU1,	R8A7790_PD_CA15_SCU,
+	  PD_CPU_NOCR },
+	{ "ca15-cpu2",	 0x40, 2, R8A7790_PD_CA15_CPU2,	R8A7790_PD_CA15_SCU,
+	  PD_CPU_NOCR },
+	{ "ca15-cpu3",	 0x40, 3, R8A7790_PD_CA15_CPU3,	R8A7790_PD_CA15_SCU,
+	  PD_CPU_NOCR },
+	{ "ca7-scu",	0x100, 0, R8A7790_PD_CA7_SCU,	R8A7790_PD_ALWAYS_ON,
+	  PD_SCU },
+	{ "ca7-cpu0",	0x1c0, 0, R8A7790_PD_CA7_CPU0,	R8A7790_PD_CA7_SCU,
+	  PD_CPU_NOCR },
+	{ "ca7-cpu1",	0x1c0, 1, R8A7790_PD_CA7_CPU1,	R8A7790_PD_CA7_SCU,
+	  PD_CPU_NOCR },
+	{ "ca7-cpu2",	0x1c0, 2, R8A7790_PD_CA7_CPU2,	R8A7790_PD_CA7_SCU,
+	  PD_CPU_NOCR },
+	{ "ca7-cpu3",	0x1c0, 3, R8A7790_PD_CA7_CPU3,	R8A7790_PD_CA7_SCU,
+	  PD_CPU_NOCR },
+	{ "sh-4a",	 0x80, 0, R8A7790_PD_SH_4A,	R8A7790_PD_ALWAYS_ON },
+	{ "rgx",	 0xc0, 0, R8A7790_PD_RGX,	R8A7790_PD_ALWAYS_ON },
+	{ "imp",	0x140, 0, R8A7790_PD_IMP,	R8A7790_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7790_sysc_info __initconst = {
+	.areas = r8a7790_areas,
+	.num_areas = ARRAY_SIZE(r8a7790_areas),
+};
diff --git a/drivers/soc/renesas/r8a7791-sysc.c b/drivers/soc/renesas/r8a7791-sysc.c
new file mode 100644
index 0000000..03b9f41
--- /dev/null
+++ b/drivers/soc/renesas/r8a7791-sysc.c
@@ -0,0 +1,33 @@
+/*
+ * Renesas R-Car M2-W/N System Controller
+ *
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7791-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7791_areas[] __initconst = {
+	{ "always-on",	    0, 0, R8A7791_PD_ALWAYS_ON,	-1, PD_ALWAYS_ON },
+	{ "ca15-scu",	0x180, 0, R8A7791_PD_CA15_SCU,	R8A7791_PD_ALWAYS_ON,
+	  PD_SCU },
+	{ "ca15-cpu0",	 0x40, 0, R8A7791_PD_CA15_CPU0,	R8A7791_PD_CA15_SCU,
+	  PD_CPU_NOCR },
+	{ "ca15-cpu1",	 0x40, 1, R8A7791_PD_CA15_CPU1,	R8A7791_PD_CA15_SCU,
+	  PD_CPU_NOCR },
+	{ "sh-4a",	 0x80, 0, R8A7791_PD_SH_4A,	R8A7791_PD_ALWAYS_ON },
+	{ "sgx",	 0xc0, 0, R8A7791_PD_SGX,	R8A7791_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7791_sysc_info __initconst = {
+	.areas = r8a7791_areas,
+	.num_areas = ARRAY_SIZE(r8a7791_areas),
+};
diff --git a/drivers/soc/renesas/r8a7794-sysc.c b/drivers/soc/renesas/r8a7794-sysc.c
new file mode 100644
index 0000000..c4da294
--- /dev/null
+++ b/drivers/soc/renesas/r8a7794-sysc.c
@@ -0,0 +1,33 @@
+/*
+ * Renesas R-Car E2 System Controller
+ *
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7794-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7794_areas[] __initconst = {
+	{ "always-on",	    0, 0, R8A7794_PD_ALWAYS_ON,	-1, PD_ALWAYS_ON },
+	{ "ca7-scu",	0x100, 0, R8A7794_PD_CA7_SCU,	R8A7794_PD_ALWAYS_ON,
+	  PD_SCU },
+	{ "ca7-cpu0",	0x1c0, 0, R8A7794_PD_CA7_CPU0,	R8A7794_PD_CA7_SCU,
+	  PD_CPU_NOCR },
+	{ "ca7-cpu1",	0x1c0, 1, R8A7794_PD_CA7_CPU1,	R8A7794_PD_CA7_SCU,
+	  PD_CPU_NOCR },
+	{ "sh-4a",	 0x80, 0, R8A7794_PD_SH_4A,	R8A7794_PD_ALWAYS_ON },
+	{ "sgx",	 0xc0, 0, R8A7794_PD_SGX,	R8A7794_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7794_sysc_info __initconst = {
+	.areas = r8a7794_areas,
+	.num_areas = ARRAY_SIZE(r8a7794_areas),
+};
diff --git a/drivers/soc/renesas/r8a7795-sysc.c b/drivers/soc/renesas/r8a7795-sysc.c
new file mode 100644
index 0000000..5e7537c
--- /dev/null
+++ b/drivers/soc/renesas/r8a7795-sysc.c
@@ -0,0 +1,56 @@
+/*
+ * Renesas R-Car H3 System Controller
+ *
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7795-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7795_areas[] __initconst = {
+	{ "always-on",	    0, 0, R8A7795_PD_ALWAYS_ON,	-1, PD_ALWAYS_ON },
+	{ "ca57-scu",	0x1c0, 0, R8A7795_PD_CA57_SCU,	R8A7795_PD_ALWAYS_ON,
+	  PD_SCU },
+	{ "ca57-cpu0",	 0x80, 0, R8A7795_PD_CA57_CPU0,	R8A7795_PD_CA57_SCU,
+	  PD_CPU_NOCR },
+	{ "ca57-cpu1",	 0x80, 1, R8A7795_PD_CA57_CPU1,	R8A7795_PD_CA57_SCU,
+	  PD_CPU_NOCR },
+	{ "ca57-cpu2",	 0x80, 2, R8A7795_PD_CA57_CPU2,	R8A7795_PD_CA57_SCU,
+	  PD_CPU_NOCR },
+	{ "ca57-cpu3",	 0x80, 3, R8A7795_PD_CA57_CPU3,	R8A7795_PD_CA57_SCU,
+	  PD_CPU_NOCR },
+	{ "ca53-scu",	0x140, 0, R8A7795_PD_CA53_SCU,	R8A7795_PD_ALWAYS_ON,
+	  PD_SCU },
+	{ "ca53-cpu0",	0x200, 0, R8A7795_PD_CA53_CPU0,	R8A7795_PD_CA53_SCU,
+	  PD_CPU_NOCR },
+	{ "ca53-cpu1",	0x200, 1, R8A7795_PD_CA53_CPU1,	R8A7795_PD_CA53_SCU,
+	  PD_CPU_NOCR },
+	{ "ca53-cpu2",	0x200, 2, R8A7795_PD_CA53_CPU2,	R8A7795_PD_CA53_SCU,
+	  PD_CPU_NOCR },
+	{ "ca53-cpu3",	0x200, 3, R8A7795_PD_CA53_CPU3,	R8A7795_PD_CA53_SCU,
+	  PD_CPU_NOCR },
+	{ "a3vp",	0x340, 0, R8A7795_PD_A3VP,	R8A7795_PD_ALWAYS_ON },
+	{ "cr7",	0x240, 0, R8A7795_PD_CR7,	R8A7795_PD_ALWAYS_ON },
+	{ "a3vc",	0x380, 0, R8A7795_PD_A3VC,	R8A7795_PD_ALWAYS_ON },
+	{ "a2vc0",	0x3c0, 0, R8A7795_PD_A2VC0,	R8A7795_PD_A3VC },
+	{ "a2vc1",	0x3c0, 1, R8A7795_PD_A2VC1,	R8A7795_PD_A3VC },
+	{ "3dg-a",	0x100, 0, R8A7795_PD_3DG_A,	R8A7795_PD_ALWAYS_ON },
+	{ "3dg-b",	0x100, 1, R8A7795_PD_3DG_B,	R8A7795_PD_3DG_A },
+	{ "3dg-c",	0x100, 2, R8A7795_PD_3DG_C,	R8A7795_PD_3DG_B },
+	{ "3dg-d",	0x100, 3, R8A7795_PD_3DG_D,	R8A7795_PD_3DG_C },
+	{ "3dg-e",	0x100, 4, R8A7795_PD_3DG_E,	R8A7795_PD_3DG_D },
+	{ "a3ir",	0x180, 0, R8A7795_PD_A3IR,	R8A7795_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7795_sysc_info __initconst = {
+	.areas = r8a7795_areas,
+	.num_areas = ARRAY_SIZE(r8a7795_areas),
+};
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
new file mode 100644
index 0000000..79dbc77
--- /dev/null
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -0,0 +1,401 @@
+/*
+ * R-Car SYSC Power management support
+ *
+ * Copyright (C) 2014  Magnus Damm
+ * Copyright (C) 2015-2016 Glider bvba
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/clk/renesas.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/of_address.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/soc/renesas/rcar-sysc.h>
+
+#include "rcar-sysc.h"
+
+/* SYSC Common */
+#define SYSCSR			0x00	/* SYSC Status Register */
+#define SYSCISR			0x04	/* Interrupt Status Register */
+#define SYSCISCR		0x08	/* Interrupt Status Clear Register */
+#define SYSCIER			0x0c	/* Interrupt Enable Register */
+#define SYSCIMR			0x10	/* Interrupt Mask Register */
+
+/* SYSC Status Register */
+#define SYSCSR_PONENB		1	/* Ready for power resume requests */
+#define SYSCSR_POFFENB		0	/* Ready for power shutoff requests */
+
+/*
+ * Power Control Register Offsets inside the register block for each domain
+ * Note: The "CR" registers for ARM cores exist on H1 only
+ *	 Use WFI to power off, CPG/APMU to resume ARM cores on R-Car Gen2
+ *	 Use PSCI on R-Car Gen3
+ */
+#define PWRSR_OFFS		0x00	/* Power Status Register */
+#define PWROFFCR_OFFS		0x04	/* Power Shutoff Control Register */
+#define PWROFFSR_OFFS		0x08	/* Power Shutoff Status Register */
+#define PWRONCR_OFFS		0x0c	/* Power Resume Control Register */
+#define PWRONSR_OFFS		0x10	/* Power Resume Status Register */
+#define PWRER_OFFS		0x14	/* Power Shutoff/Resume Error */
+
+
+#define SYSCSR_RETRIES		100
+#define SYSCSR_DELAY_US		1
+
+#define PWRER_RETRIES		100
+#define PWRER_DELAY_US		1
+
+#define SYSCISR_RETRIES		1000
+#define SYSCISR_DELAY_US	1
+
+#define RCAR_PD_ALWAYS_ON	32	/* Always-on power area */
+
+static void __iomem *rcar_sysc_base;
+static DEFINE_SPINLOCK(rcar_sysc_lock); /* SMP CPUs + I/O devices */
+
+static int rcar_sysc_pwr_on_off(const struct rcar_sysc_ch *sysc_ch, bool on)
+{
+	unsigned int sr_bit, reg_offs;
+	int k;
+
+	if (on) {
+		sr_bit = SYSCSR_PONENB;
+		reg_offs = PWRONCR_OFFS;
+	} else {
+		sr_bit = SYSCSR_POFFENB;
+		reg_offs = PWROFFCR_OFFS;
+	}
+
+	/* Wait until SYSC is ready to accept a power request */
+	for (k = 0; k < SYSCSR_RETRIES; k++) {
+		if (ioread32(rcar_sysc_base + SYSCSR) & BIT(sr_bit))
+			break;
+		udelay(SYSCSR_DELAY_US);
+	}
+
+	if (k == SYSCSR_RETRIES)
+		return -EAGAIN;
+
+	/* Submit power shutoff or power resume request */
+	iowrite32(BIT(sysc_ch->chan_bit),
+		  rcar_sysc_base + sysc_ch->chan_offs + reg_offs);
+
+	return 0;
+}
+
+static int rcar_sysc_power(const struct rcar_sysc_ch *sysc_ch, bool on)
+{
+	unsigned int isr_mask = BIT(sysc_ch->isr_bit);
+	unsigned int chan_mask = BIT(sysc_ch->chan_bit);
+	unsigned int status;
+	unsigned long flags;
+	int ret = 0;
+	int k;
+
+	spin_lock_irqsave(&rcar_sysc_lock, flags);
+
+	iowrite32(isr_mask, rcar_sysc_base + SYSCISCR);
+
+	/* Submit power shutoff or resume request until it was accepted */
+	for (k = 0; k < PWRER_RETRIES; k++) {
+		ret = rcar_sysc_pwr_on_off(sysc_ch, on);
+		if (ret)
+			goto out;
+
+		status = ioread32(rcar_sysc_base +
+				  sysc_ch->chan_offs + PWRER_OFFS);
+		if (!(status & chan_mask))
+			break;
+
+		udelay(PWRER_DELAY_US);
+	}
+
+	if (k == PWRER_RETRIES) {
+		ret = -EIO;
+		goto out;
+	}
+
+	/* Wait until the power shutoff or resume request has completed * */
+	for (k = 0; k < SYSCISR_RETRIES; k++) {
+		if (ioread32(rcar_sysc_base + SYSCISR) & isr_mask)
+			break;
+		udelay(SYSCISR_DELAY_US);
+	}
+
+	if (k == SYSCISR_RETRIES)
+		ret = -EIO;
+
+	iowrite32(isr_mask, rcar_sysc_base + SYSCISCR);
+
+ out:
+	spin_unlock_irqrestore(&rcar_sysc_lock, flags);
+
+	pr_debug("sysc power %s domain %d: %08x -> %d\n", on ? "on" : "off",
+		 sysc_ch->isr_bit, ioread32(rcar_sysc_base + SYSCISR), ret);
+	return ret;
+}
+
+int rcar_sysc_power_down(const struct rcar_sysc_ch *sysc_ch)
+{
+	return rcar_sysc_power(sysc_ch, false);
+}
+
+int rcar_sysc_power_up(const struct rcar_sysc_ch *sysc_ch)
+{
+	return rcar_sysc_power(sysc_ch, true);
+}
+
+static bool rcar_sysc_power_is_off(const struct rcar_sysc_ch *sysc_ch)
+{
+	unsigned int st;
+
+	st = ioread32(rcar_sysc_base + sysc_ch->chan_offs + PWRSR_OFFS);
+	if (st & BIT(sysc_ch->chan_bit))
+		return true;
+
+	return false;
+}
+
+void __iomem *rcar_sysc_init(phys_addr_t base)
+{
+	rcar_sysc_base = ioremap_nocache(base, PAGE_SIZE);
+	if (!rcar_sysc_base)
+		panic("unable to ioremap R-Car SYSC hardware block\n");
+
+	return rcar_sysc_base;
+}
+
+struct rcar_sysc_pd {
+	struct generic_pm_domain genpd;
+	struct rcar_sysc_ch ch;
+	unsigned int flags;
+	char name[0];
+};
+
+static inline struct rcar_sysc_pd *to_rcar_pd(struct generic_pm_domain *d)
+{
+	return container_of(d, struct rcar_sysc_pd, genpd);
+}
+
+static int rcar_sysc_pd_power_off(struct generic_pm_domain *genpd)
+{
+	struct rcar_sysc_pd *pd = to_rcar_pd(genpd);
+
+	pr_debug("%s: %s\n", __func__, genpd->name);
+
+	if (pd->flags & PD_NO_CR) {
+		pr_debug("%s: Cannot control %s\n", __func__, genpd->name);
+		return -EBUSY;
+	}
+
+	if (pd->flags & PD_BUSY) {
+		pr_debug("%s: %s busy\n", __func__, genpd->name);
+		return -EBUSY;
+	}
+
+	return rcar_sysc_power_down(&pd->ch);
+}
+
+static int rcar_sysc_pd_power_on(struct generic_pm_domain *genpd)
+{
+	struct rcar_sysc_pd *pd = to_rcar_pd(genpd);
+
+	pr_debug("%s: %s\n", __func__, genpd->name);
+
+	if (pd->flags & PD_NO_CR) {
+		pr_debug("%s: Cannot control %s\n", __func__, genpd->name);
+		return 0;
+	}
+
+	return rcar_sysc_power_up(&pd->ch);
+}
+
+static bool has_cpg_mstp;
+
+static void __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
+{
+	struct generic_pm_domain *genpd = &pd->genpd;
+	const char *name = pd->genpd.name;
+	struct dev_power_governor *gov = &simple_qos_governor;
+
+	if (pd->flags & PD_CPU) {
+		/*
+		 * This domain contains a CPU core and therefore it should
+		 * only be turned off if the CPU is not in use.
+		 */
+		pr_debug("PM domain %s contains %s\n", name, "CPU");
+		pd->flags |= PD_BUSY;
+		gov = &pm_domain_always_on_gov;
+	} else if (pd->flags & PD_SCU) {
+		/*
+		 * This domain contains an SCU and cache-controller, and
+		 * therefore it should only be turned off if the CPU cores are
+		 * not in use.
+		 */
+		pr_debug("PM domain %s contains %s\n", name, "SCU");
+		pd->flags |= PD_BUSY;
+		gov = &pm_domain_always_on_gov;
+	} else if (pd->flags & PD_NO_CR) {
+		/*
+		 * This domain cannot be turned off.
+		 */
+		pd->flags |= PD_BUSY;
+		gov = &pm_domain_always_on_gov;
+	}
+
+	if (!(pd->flags & (PD_CPU | PD_SCU))) {
+		/* Enable Clock Domain for I/O devices */
+		genpd->flags = GENPD_FLAG_PM_CLK;
+		if (has_cpg_mstp) {
+			genpd->attach_dev = cpg_mstp_attach_dev;
+			genpd->detach_dev = cpg_mstp_detach_dev;
+		} else {
+			genpd->attach_dev = cpg_mssr_attach_dev;
+			genpd->detach_dev = cpg_mssr_detach_dev;
+		}
+	}
+
+	genpd->power_off = rcar_sysc_pd_power_off;
+	genpd->power_on = rcar_sysc_pd_power_on;
+
+	if (pd->flags & (PD_CPU | PD_NO_CR)) {
+		/* Skip CPUs (handled by SMP code) and areas without control */
+		pr_debug("%s: Not touching %s\n", __func__, genpd->name);
+		goto finalize;
+	}
+
+	if (!rcar_sysc_power_is_off(&pd->ch)) {
+		pr_debug("%s: %s is already powered\n", __func__, genpd->name);
+		goto finalize;
+	}
+
+	rcar_sysc_power_up(&pd->ch);
+
+finalize:
+	pm_genpd_init(genpd, gov, false);
+}
+
+static const struct of_device_id rcar_sysc_matches[] = {
+#ifdef CONFIG_ARCH_R8A7779
+	{ .compatible = "renesas,r8a7779-sysc", .data = &r8a7779_sysc_info },
+#endif
+#ifdef CONFIG_ARCH_R8A7790
+	{ .compatible = "renesas,r8a7790-sysc", .data = &r8a7790_sysc_info },
+#endif
+#ifdef CONFIG_ARCH_R8A7791
+	{ .compatible = "renesas,r8a7791-sysc", .data = &r8a7791_sysc_info },
+#endif
+#ifdef CONFIG_ARCH_R8A7793
+	/* R-Car M2-N is identical to R-Car M2-W w.r.t. power domains. */
+	{ .compatible = "renesas,r8a7793-sysc", .data = &r8a7791_sysc_info },
+#endif
+#ifdef CONFIG_ARCH_R8A7794
+	{ .compatible = "renesas,r8a7794-sysc", .data = &r8a7794_sysc_info },
+#endif
+#ifdef CONFIG_ARCH_R8A7795
+	{ .compatible = "renesas,r8a7795-sysc", .data = &r8a7795_sysc_info },
+#endif
+	{ /* sentinel */ }
+};
+
+struct rcar_pm_domains {
+	struct genpd_onecell_data onecell_data;
+	struct generic_pm_domain *domains[RCAR_PD_ALWAYS_ON + 1];
+};
+
+static int __init rcar_sysc_pd_init(void)
+{
+	const struct rcar_sysc_info *info;
+	const struct of_device_id *match;
+	struct rcar_pm_domains *domains;
+	struct device_node *np;
+	u32 syscier, syscimr;
+	void __iomem *base;
+	unsigned int i;
+	int error;
+
+	np = of_find_matching_node_and_match(NULL, rcar_sysc_matches, &match);
+	if (!np)
+		return -ENODEV;
+
+	info = match->data;
+
+	has_cpg_mstp = of_find_compatible_node(NULL, NULL,
+					       "renesas,cpg-mstp-clocks");
+
+	base = of_iomap(np, 0);
+	if (!base) {
+		pr_warn("%s: Cannot map regs\n", np->full_name);
+		error = -ENOMEM;
+		goto out_put;
+	}
+
+	rcar_sysc_base = base;
+
+	domains = kzalloc(sizeof(*domains), GFP_KERNEL);
+	if (!domains) {
+		error = -ENOMEM;
+		goto out_put;
+	}
+
+	domains->onecell_data.domains = domains->domains;
+	domains->onecell_data.num_domains = ARRAY_SIZE(domains->domains);
+
+	for (i = 0, syscier = 0; i < info->num_areas; i++)
+		syscier |= BIT(info->areas[i].isr_bit);
+
+	/*
+	 * Mask all interrupt sources to prevent the CPU from receiving them.
+	 * Make sure not to clear reserved bits that were set before.
+	 */
+	syscimr = ioread32(base + SYSCIMR);
+	syscimr |= syscier;
+	pr_debug("%s: syscimr = 0x%08x\n", np->full_name, syscimr);
+	iowrite32(syscimr, base + SYSCIMR);
+
+	/*
+	 * SYSC needs all interrupt sources enabled to control power.
+	 */
+	pr_debug("%s: syscier = 0x%08x\n", np->full_name, syscier);
+	iowrite32(syscier, base + SYSCIER);
+
+	for (i = 0; i < info->num_areas; i++) {
+		const struct rcar_sysc_area *area = &info->areas[i];
+		struct rcar_sysc_pd *pd;
+
+		pd = kzalloc(sizeof(*pd) + strlen(area->name) + 1, GFP_KERNEL);
+		if (!pd) {
+			error = -ENOMEM;
+			goto out_put;
+		}
+
+		strcpy(pd->name, area->name);
+		pd->genpd.name = pd->name;
+		pd->ch.chan_offs = area->chan_offs;
+		pd->ch.chan_bit = area->chan_bit;
+		pd->ch.isr_bit = area->isr_bit;
+		pd->flags = area->flags;
+
+		rcar_sysc_pd_setup(pd);
+		if (area->parent >= 0)
+			pm_genpd_add_subdomain(domains->domains[area->parent],
+					       &pd->genpd);
+
+		domains->domains[area->isr_bit] = &pd->genpd;
+	}
+
+	of_genpd_add_provider_onecell(np, &domains->onecell_data);
+
+out_put:
+	of_node_put(np);
+	return error;
+}
+early_initcall(rcar_sysc_pd_init);
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h
new file mode 100644
index 0000000..5e76617
--- /dev/null
+++ b/drivers/soc/renesas/rcar-sysc.h
@@ -0,0 +1,58 @@
+/*
+ * Renesas R-Car System Controller
+ *
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+#ifndef __SOC_RENESAS_RCAR_SYSC_H__
+#define __SOC_RENESAS_RCAR_SYSC_H__
+
+#include <linux/types.h>
+
+
+/*
+ * Power Domain flags
+ */
+#define PD_CPU		BIT(0)	/* Area contains main CPU core */
+#define PD_SCU		BIT(1)	/* Area contains SCU and L2 cache */
+#define PD_NO_CR	BIT(2)	/* Area lacks PWR{ON,OFF}CR registers */
+
+#define PD_BUSY		BIT(3)	/* Busy, for internal use only */
+
+#define PD_CPU_CR	PD_CPU		  /* CPU area has CR (R-Car H1) */
+#define PD_CPU_NOCR	PD_CPU | PD_NO_CR /* CPU area lacks CR (R-Car Gen2/3) */
+#define PD_ALWAYS_ON	PD_NO_CR	  /* Always-on area */
+
+
+/*
+ * Description of a Power Area
+ */
+
+struct rcar_sysc_area {
+	const char *name;
+	u16 chan_offs;		/* Offset of PWRSR register for this area */
+	u8 chan_bit;		/* Bit in PWR* (except for PWRUP in PWRSR) */
+	u8 isr_bit;		/* Bit in SYSCI*R */
+	int parent;		/* -1 if none */
+	unsigned int flags;	/* See PD_* */
+};
+
+
+/*
+ * SoC-specific Power Area Description
+ */
+
+struct rcar_sysc_info {
+	const struct rcar_sysc_area *areas;
+	unsigned int num_areas;
+};
+
+extern const struct rcar_sysc_info r8a7779_sysc_info;
+extern const struct rcar_sysc_info r8a7790_sysc_info;
+extern const struct rcar_sysc_info r8a7791_sysc_info;
+extern const struct rcar_sysc_info r8a7794_sysc_info;
+extern const struct rcar_sysc_info r8a7795_sysc_info;
+#endif /* __SOC_RENESAS_RCAR_SYSC_H__ */
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 43155e1..44842a2 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -19,6 +19,7 @@
 #include <linux/mfd/syscon.h>
 #include <dt-bindings/power/rk3288-power.h>
 #include <dt-bindings/power/rk3368-power.h>
+#include <dt-bindings/power/rk3399-power.h>
 
 struct rockchip_domain_info {
 	int pwr_mask;
@@ -45,10 +46,20 @@
 	const struct rockchip_domain_info *domain_info;
 };
 
+#define MAX_QOS_REGS_NUM	5
+#define QOS_PRIORITY		0x08
+#define QOS_MODE		0x0c
+#define QOS_BANDWIDTH		0x10
+#define QOS_SATURATION		0x14
+#define QOS_EXTCONTROL		0x18
+
 struct rockchip_pm_domain {
 	struct generic_pm_domain genpd;
 	const struct rockchip_domain_info *info;
 	struct rockchip_pmu *pmu;
+	int num_qos;
+	struct regmap **qos_regmap;
+	u32 *qos_save_regs[MAX_QOS_REGS_NUM];
 	int num_clks;
 	struct clk *clks[];
 };
@@ -66,11 +77,11 @@
 
 #define DOMAIN(pwr, status, req, idle, ack)	\
 {						\
-	.pwr_mask = BIT(pwr),			\
-	.status_mask = BIT(status),		\
-	.req_mask = BIT(req),			\
-	.idle_mask = BIT(idle),			\
-	.ack_mask = BIT(ack),			\
+	.pwr_mask = (pwr >= 0) ? BIT(pwr) : 0,		\
+	.status_mask = (status >= 0) ? BIT(status) : 0,	\
+	.req_mask = (req >= 0) ? BIT(req) : 0,		\
+	.idle_mask = (idle >= 0) ? BIT(idle) : 0,	\
+	.ack_mask = (ack >= 0) ? BIT(ack) : 0,		\
 }
 
 #define DOMAIN_RK3288(pwr, status, req)		\
@@ -79,6 +90,9 @@
 #define DOMAIN_RK3368(pwr, status, req)		\
 	DOMAIN(pwr, status, req, (req) + 16, req)
 
+#define DOMAIN_RK3399(pwr, status, req)                \
+	DOMAIN(pwr, status, req, req, req)
+
 static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
 {
 	struct rockchip_pmu *pmu = pd->pmu;
@@ -96,6 +110,9 @@
 	struct rockchip_pmu *pmu = pd->pmu;
 	unsigned int val;
 
+	if (pd_info->req_mask == 0)
+		return 0;
+
 	regmap_update_bits(pmu->regmap, pmu->info->req_offset,
 			   pd_info->req_mask, idle ? -1U : 0);
 
@@ -111,11 +128,64 @@
 	return 0;
 }
 
+static int rockchip_pmu_save_qos(struct rockchip_pm_domain *pd)
+{
+	int i;
+
+	for (i = 0; i < pd->num_qos; i++) {
+		regmap_read(pd->qos_regmap[i],
+			    QOS_PRIORITY,
+			    &pd->qos_save_regs[0][i]);
+		regmap_read(pd->qos_regmap[i],
+			    QOS_MODE,
+			    &pd->qos_save_regs[1][i]);
+		regmap_read(pd->qos_regmap[i],
+			    QOS_BANDWIDTH,
+			    &pd->qos_save_regs[2][i]);
+		regmap_read(pd->qos_regmap[i],
+			    QOS_SATURATION,
+			    &pd->qos_save_regs[3][i]);
+		regmap_read(pd->qos_regmap[i],
+			    QOS_EXTCONTROL,
+			    &pd->qos_save_regs[4][i]);
+	}
+	return 0;
+}
+
+static int rockchip_pmu_restore_qos(struct rockchip_pm_domain *pd)
+{
+	int i;
+
+	for (i = 0; i < pd->num_qos; i++) {
+		regmap_write(pd->qos_regmap[i],
+			     QOS_PRIORITY,
+			     pd->qos_save_regs[0][i]);
+		regmap_write(pd->qos_regmap[i],
+			     QOS_MODE,
+			     pd->qos_save_regs[1][i]);
+		regmap_write(pd->qos_regmap[i],
+			     QOS_BANDWIDTH,
+			     pd->qos_save_regs[2][i]);
+		regmap_write(pd->qos_regmap[i],
+			     QOS_SATURATION,
+			     pd->qos_save_regs[3][i]);
+		regmap_write(pd->qos_regmap[i],
+			     QOS_EXTCONTROL,
+			     pd->qos_save_regs[4][i]);
+	}
+
+	return 0;
+}
+
 static bool rockchip_pmu_domain_is_on(struct rockchip_pm_domain *pd)
 {
 	struct rockchip_pmu *pmu = pd->pmu;
 	unsigned int val;
 
+	/* check idle status for idle-only domains */
+	if (pd->info->status_mask == 0)
+		return !rockchip_pmu_domain_is_idle(pd);
+
 	regmap_read(pmu->regmap, pmu->info->status_offset, &val);
 
 	/* 1'b0: power on, 1'b1: power off */
@@ -127,6 +197,9 @@
 {
 	struct rockchip_pmu *pmu = pd->pmu;
 
+	if (pd->info->pwr_mask == 0)
+		return;
+
 	regmap_update_bits(pmu->regmap, pmu->info->pwr_offset,
 			   pd->info->pwr_mask, on ? 0 : -1U);
 
@@ -147,7 +220,7 @@
 			clk_enable(pd->clks[i]);
 
 		if (!power_on) {
-			/* FIXME: add code to save AXI_QOS */
+			rockchip_pmu_save_qos(pd);
 
 			/* if powering down, idle request to NIU first */
 			rockchip_pmu_set_idle_request(pd, true);
@@ -159,7 +232,7 @@
 			/* if powering up, leave idle mode */
 			rockchip_pmu_set_idle_request(pd, false);
 
-			/* FIXME: add code to restore AXI_QOS */
+			rockchip_pmu_restore_qos(pd);
 		}
 
 		for (i = pd->num_clks - 1; i >= 0; i--)
@@ -227,9 +300,10 @@
 {
 	const struct rockchip_domain_info *pd_info;
 	struct rockchip_pm_domain *pd;
+	struct device_node *qos_node;
 	struct clk *clk;
 	int clk_cnt;
-	int i;
+	int i, j;
 	u32 id;
 	int error;
 
@@ -289,6 +363,45 @@
 			clk, node->name);
 	}
 
+	pd->num_qos = of_count_phandle_with_args(node, "pm_qos",
+						 NULL);
+
+	if (pd->num_qos > 0) {
+		pd->qos_regmap = devm_kcalloc(pmu->dev, pd->num_qos,
+					      sizeof(*pd->qos_regmap),
+					      GFP_KERNEL);
+		if (!pd->qos_regmap) {
+			error = -ENOMEM;
+			goto err_out;
+		}
+
+		for (j = 0; j < MAX_QOS_REGS_NUM; j++) {
+			pd->qos_save_regs[j] = devm_kcalloc(pmu->dev,
+							    pd->num_qos,
+							    sizeof(u32),
+							    GFP_KERNEL);
+			if (!pd->qos_save_regs[j]) {
+				error = -ENOMEM;
+				goto err_out;
+			}
+		}
+
+		for (j = 0; j < pd->num_qos; j++) {
+			qos_node = of_parse_phandle(node, "pm_qos", j);
+			if (!qos_node) {
+				error = -ENODEV;
+				goto err_out;
+			}
+			pd->qos_regmap[j] = syscon_node_to_regmap(qos_node);
+			if (IS_ERR(pd->qos_regmap[j])) {
+				error = -ENODEV;
+				of_node_put(qos_node);
+				goto err_out;
+			}
+			of_node_put(qos_node);
+		}
+	}
+
 	error = rockchip_pd_power(pd, true);
 	if (error) {
 		dev_err(pmu->dev,
@@ -360,6 +473,61 @@
 	regmap_write(pmu->regmap, domain_reg_offset + 4, count);
 }
 
+static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu,
+				     struct device_node *parent)
+{
+	struct device_node *np;
+	struct generic_pm_domain *child_domain, *parent_domain;
+	int error;
+
+	for_each_child_of_node(parent, np) {
+		u32 idx;
+
+		error = of_property_read_u32(parent, "reg", &idx);
+		if (error) {
+			dev_err(pmu->dev,
+				"%s: failed to retrieve domain id (reg): %d\n",
+				parent->name, error);
+			goto err_out;
+		}
+		parent_domain = pmu->genpd_data.domains[idx];
+
+		error = rockchip_pm_add_one_domain(pmu, np);
+		if (error) {
+			dev_err(pmu->dev, "failed to handle node %s: %d\n",
+				np->name, error);
+			goto err_out;
+		}
+
+		error = of_property_read_u32(np, "reg", &idx);
+		if (error) {
+			dev_err(pmu->dev,
+				"%s: failed to retrieve domain id (reg): %d\n",
+				np->name, error);
+			goto err_out;
+		}
+		child_domain = pmu->genpd_data.domains[idx];
+
+		error = pm_genpd_add_subdomain(parent_domain, child_domain);
+		if (error) {
+			dev_err(pmu->dev, "%s failed to add subdomain %s: %d\n",
+				parent_domain->name, child_domain->name, error);
+			goto err_out;
+		} else {
+			dev_dbg(pmu->dev, "%s add subdomain: %s\n",
+				parent_domain->name, child_domain->name);
+		}
+
+		rockchip_pm_add_subdomain(pmu, np);
+	}
+
+	return 0;
+
+err_out:
+	of_node_put(np);
+	return error;
+}
+
 static int rockchip_pm_domain_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -406,6 +574,10 @@
 	}
 
 	pmu->regmap = syscon_node_to_regmap(parent->of_node);
+	if (IS_ERR(pmu->regmap)) {
+		dev_err(dev, "no regmap available\n");
+		return PTR_ERR(pmu->regmap);
+	}
 
 	/*
 	 * Configure power up and down transition delays for CORE
@@ -426,6 +598,14 @@
 			of_node_put(node);
 			goto err_out;
 		}
+
+		error = rockchip_pm_add_subdomain(pmu, node);
+		if (error < 0) {
+			dev_err(dev, "failed to handle subdomain node %s: %d\n",
+				node->name, error);
+			of_node_put(node);
+			goto err_out;
+		}
 	}
 
 	if (error) {
@@ -457,6 +637,36 @@
 	[RK3368_PD_GPU_1]	= DOMAIN_RK3368(17, 16, 2),
 };
 
+static const struct rockchip_domain_info rk3399_pm_domains[] = {
+	[RK3399_PD_TCPD0]	= DOMAIN_RK3399(8, 8, -1),
+	[RK3399_PD_TCPD1]	= DOMAIN_RK3399(9, 9, -1),
+	[RK3399_PD_CCI]		= DOMAIN_RK3399(10, 10, -1),
+	[RK3399_PD_CCI0]	= DOMAIN_RK3399(-1, -1, 15),
+	[RK3399_PD_CCI1]	= DOMAIN_RK3399(-1, -1, 16),
+	[RK3399_PD_PERILP]	= DOMAIN_RK3399(11, 11, 1),
+	[RK3399_PD_PERIHP]	= DOMAIN_RK3399(12, 12, 2),
+	[RK3399_PD_CENTER]	= DOMAIN_RK3399(13, 13, 14),
+	[RK3399_PD_VIO]		= DOMAIN_RK3399(14, 14, 17),
+	[RK3399_PD_GPU]		= DOMAIN_RK3399(15, 15, 0),
+	[RK3399_PD_VCODEC]	= DOMAIN_RK3399(16, 16, 3),
+	[RK3399_PD_VDU]		= DOMAIN_RK3399(17, 17, 4),
+	[RK3399_PD_RGA]		= DOMAIN_RK3399(18, 18, 5),
+	[RK3399_PD_IEP]		= DOMAIN_RK3399(19, 19, 6),
+	[RK3399_PD_VO]		= DOMAIN_RK3399(20, 20, -1),
+	[RK3399_PD_VOPB]	= DOMAIN_RK3399(-1, -1, 7),
+	[RK3399_PD_VOPL]	= DOMAIN_RK3399(-1, -1, 8),
+	[RK3399_PD_ISP0]	= DOMAIN_RK3399(22, 22, 9),
+	[RK3399_PD_ISP1]	= DOMAIN_RK3399(23, 23, 10),
+	[RK3399_PD_HDCP]	= DOMAIN_RK3399(24, 24, 11),
+	[RK3399_PD_GMAC]	= DOMAIN_RK3399(25, 25, 23),
+	[RK3399_PD_EMMC]	= DOMAIN_RK3399(26, 26, 24),
+	[RK3399_PD_USB3]	= DOMAIN_RK3399(27, 27, 12),
+	[RK3399_PD_EDP]		= DOMAIN_RK3399(28, 28, 22),
+	[RK3399_PD_GIC]		= DOMAIN_RK3399(29, 29, 27),
+	[RK3399_PD_SD]		= DOMAIN_RK3399(30, 30, 28),
+	[RK3399_PD_SDIOAUDIO]	= DOMAIN_RK3399(31, 31, 29),
+};
+
 static const struct rockchip_pmu_info rk3288_pmu = {
 	.pwr_offset = 0x08,
 	.status_offset = 0x0c,
@@ -491,6 +701,23 @@
 	.domain_info = rk3368_pm_domains,
 };
 
+static const struct rockchip_pmu_info rk3399_pmu = {
+	.pwr_offset = 0x14,
+	.status_offset = 0x18,
+	.req_offset = 0x60,
+	.idle_offset = 0x64,
+	.ack_offset = 0x68,
+
+	.core_pwrcnt_offset = 0x9c,
+	.gpu_pwrcnt_offset = 0xa4,
+
+	.core_power_transition_time = 24,
+	.gpu_power_transition_time = 24,
+
+	.num_domains = ARRAY_SIZE(rk3399_pm_domains),
+	.domain_info = rk3399_pm_domains,
+};
+
 static const struct of_device_id rockchip_pm_domain_dt_match[] = {
 	{
 		.compatible = "rockchip,rk3288-power-controller",
@@ -500,6 +727,10 @@
 		.compatible = "rockchip,rk3368-power-controller",
 		.data = (void *)&rk3368_pmu,
 	},
+	{
+		.compatible = "rockchip,rk3399-power-controller",
+		.data = (void *)&rk3399_pmu,
+	},
 	{ /* sentinel */ },
 };
 
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index d0c3c3e..03089ad 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -31,7 +31,6 @@
 config ARCH_TEGRA_114_SOC
 	bool "Enable support for Tegra114 family"
 	select ARM_ERRATA_798181 if SMP
-	select ARM_L1_CACHE_SHIFT_6
 	select HAVE_ARM_ARCH_TIMER
 	select PINCTRL_TEGRA114
 	select TEGRA_TIMER
@@ -41,7 +40,6 @@
 
 config ARCH_TEGRA_124_SOC
 	bool "Enable support for Tegra124 family"
-	select ARM_L1_CACHE_SHIFT_6
 	select HAVE_ARM_ARCH_TIMER
 	select PINCTRL_TEGRA124
 	select TEGRA_TIMER
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index bc34cf7..bb17345 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -28,12 +28,16 @@
 #include <linux/export.h>
 #include <linux/init.h>
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/of_platform.h>
 #include <linux/platform_device.h>
+#include <linux/pm_domain.h>
 #include <linux/reboot.h>
 #include <linux/reset.h>
 #include <linux/seq_file.h>
+#include <linux/slab.h>
 #include <linux/spinlock.h>
 
 #include <soc/tegra/common.h>
@@ -101,6 +105,16 @@
 
 #define GPU_RG_CNTRL			0x2d4
 
+struct tegra_powergate {
+	struct generic_pm_domain genpd;
+	struct tegra_pmc *pmc;
+	unsigned int id;
+	struct clk **clks;
+	unsigned int num_clks;
+	struct reset_control **resets;
+	unsigned int num_resets;
+};
+
 struct tegra_pmc_soc {
 	unsigned int num_powergates;
 	const char *const *powergates;
@@ -113,8 +127,11 @@
 
 /**
  * struct tegra_pmc - NVIDIA Tegra PMC
+ * @dev: pointer to PMC device structure
  * @base: pointer to I/O remapped register region
  * @clk: pointer to pclk clock
+ * @soc: pointer to SoC data structure
+ * @debugfs: pointer to debugfs entry
  * @rate: currently configured rate of pclk
  * @suspend_mode: lowest suspend mode available
  * @cpu_good_time: CPU power good time (in microseconds)
@@ -128,12 +145,14 @@
  * @cpu_pwr_good_en: CPU power good signal is enabled
  * @lp0_vec_phys: physical base address of the LP0 warm boot code
  * @lp0_vec_size: size of the LP0 warm boot code
+ * @powergates_available: Bitmap of available power gates
  * @powergates_lock: mutex for power gate register access
  */
 struct tegra_pmc {
 	struct device *dev;
 	void __iomem *base;
 	struct clk *clk;
+	struct dentry *debugfs;
 
 	const struct tegra_pmc_soc *soc;
 
@@ -151,6 +170,7 @@
 	bool cpu_pwr_good_en;
 	u32 lp0_vec_phys;
 	u32 lp0_vec_size;
+	DECLARE_BITMAP(powergates_available, TEGRA_POWERGATE_MAX);
 
 	struct mutex powergates_lock;
 };
@@ -160,6 +180,12 @@
 	.suspend_mode = TEGRA_SUSPEND_NONE,
 };
 
+static inline struct tegra_powergate *
+to_powergate(struct generic_pm_domain *domain)
+{
+	return container_of(domain, struct tegra_powergate, genpd);
+}
+
 static u32 tegra_pmc_readl(unsigned long offset)
 {
 	return readl(pmc->base + offset);
@@ -170,81 +196,79 @@
 	writel(value, pmc->base + offset);
 }
 
+static inline bool tegra_powergate_state(int id)
+{
+	if (id == TEGRA_POWERGATE_3D && pmc->soc->has_gpu_clamps)
+		return (tegra_pmc_readl(GPU_RG_CNTRL) & 0x1) == 0;
+	else
+		return (tegra_pmc_readl(PWRGATE_STATUS) & BIT(id)) != 0;
+}
+
+static inline bool tegra_powergate_is_valid(int id)
+{
+	return (pmc->soc && pmc->soc->powergates[id]);
+}
+
+static inline bool tegra_powergate_is_available(int id)
+{
+	return test_bit(id, pmc->powergates_available);
+}
+
+static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
+{
+	unsigned int i;
+
+	if (!pmc || !pmc->soc || !name)
+		return -EINVAL;
+
+	for (i = 0; i < pmc->soc->num_powergates; i++) {
+		if (!tegra_powergate_is_valid(i))
+			continue;
+
+		if (!strcmp(name, pmc->soc->powergates[i]))
+			return i;
+	}
+
+	dev_err(pmc->dev, "powergate %s not found\n", name);
+
+	return -ENODEV;
+}
+
 /**
  * tegra_powergate_set() - set the state of a partition
  * @id: partition ID
  * @new_state: new state of the partition
  */
-static int tegra_powergate_set(int id, bool new_state)
+static int tegra_powergate_set(unsigned int id, bool new_state)
 {
 	bool status;
+	int err;
+
+	if (id == TEGRA_POWERGATE_3D && pmc->soc->has_gpu_clamps)
+		return -EINVAL;
 
 	mutex_lock(&pmc->powergates_lock);
 
-	status = tegra_pmc_readl(PWRGATE_STATUS) & (1 << id);
-
-	if (status == new_state) {
+	if (tegra_powergate_state(id) == new_state) {
 		mutex_unlock(&pmc->powergates_lock);
 		return 0;
 	}
 
 	tegra_pmc_writel(PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
 
+	err = readx_poll_timeout(tegra_powergate_state, id, status,
+				 status == new_state, 10, 100000);
+
 	mutex_unlock(&pmc->powergates_lock);
 
-	return 0;
+	return err;
 }
 
-/**
- * tegra_powergate_power_on() - power on partition
- * @id: partition ID
- */
-int tegra_powergate_power_on(int id)
-{
-	if (!pmc->soc || id < 0 || id >= pmc->soc->num_powergates)
-		return -EINVAL;
-
-	return tegra_powergate_set(id, true);
-}
-
-/**
- * tegra_powergate_power_off() - power off partition
- * @id: partition ID
- */
-int tegra_powergate_power_off(int id)
-{
-	if (!pmc->soc || id < 0 || id >= pmc->soc->num_powergates)
-		return -EINVAL;
-
-	return tegra_powergate_set(id, false);
-}
-EXPORT_SYMBOL(tegra_powergate_power_off);
-
-/**
- * tegra_powergate_is_powered() - check if partition is powered
- * @id: partition ID
- */
-int tegra_powergate_is_powered(int id)
-{
-	u32 status;
-
-	if (!pmc->soc || id < 0 || id >= pmc->soc->num_powergates)
-		return -EINVAL;
-
-	status = tegra_pmc_readl(PWRGATE_STATUS) & (1 << id);
-	return !!status;
-}
-
-/**
- * tegra_powergate_remove_clamping() - remove power clamps for partition
- * @id: partition ID
- */
-int tegra_powergate_remove_clamping(int id)
+static int __tegra_powergate_remove_clamping(unsigned int id)
 {
 	u32 mask;
 
-	if (!pmc->soc || id < 0 || id >= pmc->soc->num_powergates)
-		return -EINVAL;
+	mutex_lock(&pmc->powergates_lock);
 
 	/*
 	 * On Tegra124 and later, the clamps for the GPU are controlled by a
@@ -253,7 +277,7 @@
 	if (id == TEGRA_POWERGATE_3D) {
 		if (pmc->soc->has_gpu_clamps) {
 			tegra_pmc_writel(0, GPU_RG_CNTRL);
-			return 0;
+			goto out;
 		}
 	}
 
@@ -270,8 +294,236 @@
 
 	tegra_pmc_writel(mask, REMOVE_CLAMPING);
 
+out:
+	mutex_unlock(&pmc->powergates_lock);
+
 	return 0;
 }
+
+static void tegra_powergate_disable_clocks(struct tegra_powergate *pg)
+{
+	unsigned int i;
+
+	for (i = 0; i < pg->num_clks; i++)
+		clk_disable_unprepare(pg->clks[i]);
+}
+
+static int tegra_powergate_enable_clocks(struct tegra_powergate *pg)
+{
+	unsigned int i;
+	int err;
+
+	for (i = 0; i < pg->num_clks; i++) {
+		err = clk_prepare_enable(pg->clks[i]);
+		if (err)
+			goto out;
+	}
+
+	return 0;
+
+out:
+	while (i--)
+		clk_disable_unprepare(pg->clks[i]);
+
+	return err;
+}
+
+static int tegra_powergate_reset_assert(struct tegra_powergate *pg)
+{
+	unsigned int i;
+	int err;
+
+	for (i = 0; i < pg->num_resets; i++) {
+		err = reset_control_assert(pg->resets[i]);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int tegra_powergate_reset_deassert(struct tegra_powergate *pg)
+{
+	unsigned int i;
+	int err;
+
+	for (i = 0; i < pg->num_resets; i++) {
+		err = reset_control_deassert(pg->resets[i]);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int tegra_powergate_power_up(struct tegra_powergate *pg,
+				    bool disable_clocks)
+{
+	int err;
+
+	err = tegra_powergate_reset_assert(pg);
+	if (err)
+		return err;
+
+	usleep_range(10, 20);
+
+	err = tegra_powergate_set(pg->id, true);
+	if (err < 0)
+		return err;
+
+	usleep_range(10, 20);
+
+	err = tegra_powergate_enable_clocks(pg);
+	if (err)
+		goto disable_clks;
+
+	usleep_range(10, 20);
+
+	err = __tegra_powergate_remove_clamping(pg->id);
+	if (err)
+		goto disable_clks;
+
+	usleep_range(10, 20);
+
+	err = tegra_powergate_reset_deassert(pg);
+	if (err)
+		goto powergate_off;
+
+	usleep_range(10, 20);
+
+	if (disable_clocks)
+		tegra_powergate_disable_clocks(pg);
+
+	return 0;
+
+disable_clks:
+	tegra_powergate_disable_clocks(pg);
+	usleep_range(10, 20);
+powergate_off:
+	tegra_powergate_set(pg->id, false);
+
+	return err;
+}
+
+static int tegra_powergate_power_down(struct tegra_powergate *pg)
+{
+	int err;
+
+	err = tegra_powergate_enable_clocks(pg);
+	if (err)
+		return err;
+
+	usleep_range(10, 20);
+
+	err = tegra_powergate_reset_assert(pg);
+	if (err)
+		goto disable_clks;
+
+	usleep_range(10, 20);
+
+	tegra_powergate_disable_clocks(pg);
+
+	usleep_range(10, 20);
+
+	err = tegra_powergate_set(pg->id, false);
+	if (err)
+		goto assert_resets;
+
+	return 0;
+
+assert_resets:
+	tegra_powergate_enable_clocks(pg);
+	usleep_range(10, 20);
+	tegra_powergate_reset_deassert(pg);
+	usleep_range(10, 20);
+disable_clks:
+	tegra_powergate_disable_clocks(pg);
+
+	return err;
+}
+
+static int tegra_genpd_power_on(struct generic_pm_domain *domain)
+{
+	struct tegra_powergate *pg = to_powergate(domain);
+	struct tegra_pmc *pmc = pg->pmc;
+	int err;
+
+	err = tegra_powergate_power_up(pg, true);
+	if (err)
+		dev_err(pmc->dev, "failed to turn on PM domain %s: %d\n",
+			pg->genpd.name, err);
+
+	return err;
+}
+
+static int tegra_genpd_power_off(struct generic_pm_domain *domain)
+{
+	struct tegra_powergate *pg = to_powergate(domain);
+	struct tegra_pmc *pmc = pg->pmc;
+	int err;
+
+	err = tegra_powergate_power_down(pg);
+	if (err)
+		dev_err(pmc->dev, "failed to turn off PM domain %s: %d\n",
+			pg->genpd.name, err);
+
+	return err;
+}
+
+/**
+ * tegra_powergate_power_on() - power on partition
+ * @id: partition ID
+ */
+int tegra_powergate_power_on(unsigned int id)
+{
+	if (!tegra_powergate_is_available(id))
+		return -EINVAL;
+
+	return tegra_powergate_set(id, true);
+}
+
+/**
+ * tegra_powergate_power_off() - power off partition
+ * @id: partition ID
+ */
+int tegra_powergate_power_off(unsigned int id)
+{
+	if (!tegra_powergate_is_available(id))
+		return -EINVAL;
+
+	return tegra_powergate_set(id, false);
+}
+EXPORT_SYMBOL(tegra_powergate_power_off);
+
+/**
+ * tegra_powergate_is_powered() - check if partition is powered
+ * @id: partition ID
+ */
+int tegra_powergate_is_powered(unsigned int id)
+{
+	int status;
+
+	if (!tegra_powergate_is_valid(id))
+		return -EINVAL;
+
+	mutex_lock(&pmc->powergates_lock);
+	status = tegra_powergate_state(id);
+	mutex_unlock(&pmc->powergates_lock);
+
+	return status;
+}
+
+/**
+ * tegra_powergate_remove_clamping() - remove power clamps for partition
+ * @id: partition ID
+ */
+int tegra_powergate_remove_clamping(unsigned int id)
+{
+	if (!tegra_powergate_is_available(id))
+		return -EINVAL;
+
+	return __tegra_powergate_remove_clamping(id);
+}
 EXPORT_SYMBOL(tegra_powergate_remove_clamping);
 
 /**
@@ -282,38 +534,23 @@
  *
  * Must be called with clk disabled, and returns with clk enabled.
  */
-int tegra_powergate_sequence_power_up(int id, struct clk *clk,
+int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
 				      struct reset_control *rst)
 {
-	int ret;
+	struct tegra_powergate pg;
+	int err;
 
-	reset_control_assert(rst);
+	pg.id = id;
+	pg.clks = &clk;
+	pg.num_clks = 1;
+	pg.resets = &rst;
+	pg.num_resets = 1;
 
-	ret = tegra_powergate_power_on(id);
-	if (ret)
-		goto err_power;
+	err = tegra_powergate_power_up(&pg, false);
+	if (err)
+		pr_err("failed to turn on partition %d: %d\n", id, err);
 
-	ret = clk_prepare_enable(clk);
-	if (ret)
-		goto err_clk;
-
-	usleep_range(10, 20);
-
-	ret = tegra_powergate_remove_clamping(id);
-	if (ret)
-		goto err_clamp;
-
-	usleep_range(10, 20);
-	reset_control_deassert(rst);
-
-	return 0;
-
-err_clamp:
-	clk_disable_unprepare(clk);
-err_clk:
-	tegra_powergate_power_off(id);
-err_power:
-	return ret;
+	return err;
 }
 EXPORT_SYMBOL(tegra_powergate_sequence_power_up);
 
@@ -325,9 +562,9 @@
  * Returns the partition ID corresponding to the CPU partition ID or a
  * negative error code on failure.
  */
-static int tegra_get_cpu_powergate_id(int cpuid)
+static int tegra_get_cpu_powergate_id(unsigned int cpuid)
 {
-	if (pmc->soc && cpuid > 0 && cpuid < pmc->soc->num_cpu_powergates)
+	if (pmc->soc && cpuid < pmc->soc->num_cpu_powergates)
 		return pmc->soc->cpu_powergates[cpuid];
 
 	return -EINVAL;
@@ -337,7 +574,7 @@
  * tegra_pmc_cpu_is_powered() - check if CPU partition is powered
  * @cpuid: CPU partition ID
  */
-bool tegra_pmc_cpu_is_powered(int cpuid)
+bool tegra_pmc_cpu_is_powered(unsigned int cpuid)
 {
 	int id;
 
@@ -352,7 +589,7 @@
  * tegra_pmc_cpu_power_on() - power on CPU partition
  * @cpuid: CPU partition ID
  */
-int tegra_pmc_cpu_power_on(int cpuid)
+int tegra_pmc_cpu_power_on(unsigned int cpuid)
 {
 	int id;
 
@@ -367,7 +604,7 @@
  * tegra_pmc_cpu_remove_clamping() - remove power clamps for CPU partition
  * @cpuid: CPU partition ID
  */
-int tegra_pmc_cpu_remove_clamping(int cpuid)
+int tegra_pmc_cpu_remove_clamping(unsigned int cpuid)
 {
 	int id;
 
@@ -416,16 +653,18 @@
 static int powergate_show(struct seq_file *s, void *data)
 {
 	unsigned int i;
+	int status;
 
 	seq_printf(s, " powergate powered\n");
 	seq_printf(s, "------------------\n");
 
 	for (i = 0; i < pmc->soc->num_powergates; i++) {
-		if (!pmc->soc->powergates[i])
+		status = tegra_powergate_is_powered(i);
+		if (status < 0)
 			continue;
 
 		seq_printf(s, " %9s %7s\n", pmc->soc->powergates[i],
-			   tegra_powergate_is_powered(i) ? "yes" : "no");
+			   status ? "yes" : "no");
 	}
 
 	return 0;
@@ -445,17 +684,164 @@
 
 static int tegra_powergate_debugfs_init(void)
 {
-	struct dentry *d;
-
-	d = debugfs_create_file("powergate", S_IRUGO, NULL, NULL,
-				&powergate_fops);
-	if (!d)
+	pmc->debugfs = debugfs_create_file("powergate", S_IRUGO, NULL, NULL,
+					   &powergate_fops);
+	if (!pmc->debugfs)
 		return -ENOMEM;
 
 	return 0;
 }
 
-static int tegra_io_rail_prepare(int id, unsigned long *request,
+static int tegra_powergate_of_get_clks(struct tegra_powergate *pg,
+				       struct device_node *np)
+{
+	struct clk *clk;
+	unsigned int i, count;
+	int err;
+
+	count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+	if (count == 0)
+		return -ENODEV;
+
+	pg->clks = kcalloc(count, sizeof(clk), GFP_KERNEL);
+	if (!pg->clks)
+		return -ENOMEM;
+
+	for (i = 0; i < count; i++) {
+		pg->clks[i] = of_clk_get(np, i);
+		if (IS_ERR(pg->clks[i])) {
+			err = PTR_ERR(pg->clks[i]);
+			goto err;
+		}
+	}
+
+	pg->num_clks = count;
+
+	return 0;
+
+err:
+	while (i--)
+		clk_put(pg->clks[i]);
+	kfree(pg->clks);
+
+	return err;
+}
+
+static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
+					 struct device_node *np)
+{
+	struct reset_control *rst;
+	unsigned int i, count;
+	int err;
+
+	count = of_count_phandle_with_args(np, "resets", "#reset-cells");
+	if (count == 0)
+		return -ENODEV;
+
+	pg->resets = kcalloc(count, sizeof(rst), GFP_KERNEL);
+	if (!pg->resets)
+		return -ENOMEM;
+
+	for (i = 0; i < count; i++) {
+		pg->resets[i] = of_reset_control_get_by_index(np, i);
+		if (IS_ERR(pg->resets[i])) {
+			err = PTR_ERR(pg->resets[i]);
+			goto error;
+		}
+	}
+
+	pg->num_resets = count;
+
+	return 0;
+
+error:
+	while (i--)
+		reset_control_put(pg->resets[i]);
+	kfree(pg->resets);
+
+	return err;
+}
+
+static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
+{
+	struct tegra_powergate *pg;
+	bool off;
+	int id;
+
+	pg = kzalloc(sizeof(*pg), GFP_KERNEL);
+	if (!pg)
+		goto error;
+
+	id = tegra_powergate_lookup(pmc, np->name);
+	if (id < 0)
+		goto free_mem;
+
+	/*
+	 * Clear the bit for this powergate so it cannot be managed
+	 * directly via the legacy APIs for controlling powergates.
+	 */
+	clear_bit(id, pmc->powergates_available);
+
+	pg->id = id;
+	pg->genpd.name = np->name;
+	pg->genpd.power_off = tegra_genpd_power_off;
+	pg->genpd.power_on = tegra_genpd_power_on;
+	pg->pmc = pmc;
+
+	if (tegra_powergate_of_get_clks(pg, np))
+		goto set_available;
+
+	if (tegra_powergate_of_get_resets(pg, np))
+		goto remove_clks;
+
+	off = !tegra_powergate_is_powered(pg->id);
+
+	pm_genpd_init(&pg->genpd, NULL, off);
+
+	if (of_genpd_add_provider_simple(np, &pg->genpd))
+		goto remove_resets;
+
+	dev_dbg(pmc->dev, "added power domain %s\n", pg->genpd.name);
+
+	return;
+
+remove_resets:
+	while (pg->num_resets--)
+		reset_control_put(pg->resets[pg->num_resets]);
+	kfree(pg->resets);
+
+remove_clks:
+	while (pg->num_clks--)
+		clk_put(pg->clks[pg->num_clks]);
+	kfree(pg->clks);
+
+set_available:
+	set_bit(id, pmc->powergates_available);
+
+free_mem:
+	kfree(pg);
+
+error:
+	dev_err(pmc->dev, "failed to create power domain for %s\n", np->name);
+}
+
+static void tegra_powergate_init(struct tegra_pmc *pmc)
+{
+	struct device_node *np, *child;
+
+	np = of_get_child_by_name(pmc->dev->of_node, "powergates");
+	if (!np)
+		return;
+
+	for_each_child_of_node(np, child) {
+		tegra_powergate_add(pmc, child);
+		of_node_put(child);
+	}
+
+	of_node_put(np);
+}
+
+static int tegra_io_rail_prepare(unsigned int id, unsigned long *request,
 				 unsigned long *status, unsigned int *bit)
 {
 	unsigned long rate, value;
@@ -512,15 +898,17 @@
 	tegra_pmc_writel(DPD_SAMPLE_DISABLE, DPD_SAMPLE);
 }
 
-int tegra_io_rail_power_on(int id)
+int tegra_io_rail_power_on(unsigned int id)
 {
 	unsigned long request, status, value;
 	unsigned int bit, mask;
 	int err;
 
+	mutex_lock(&pmc->powergates_lock);
+
 	err = tegra_io_rail_prepare(id, &request, &status, &bit);
-	if (err < 0)
-		return err;
+	if (err)
+		goto error;
 
 	mask = 1 << bit;
 
@@ -531,27 +919,32 @@
 	tegra_pmc_writel(value, request);
 
 	err = tegra_io_rail_poll(status, mask, 0, 250);
-	if (err < 0) {
+	if (err) {
 		pr_info("tegra_io_rail_poll() failed: %d\n", err);
-		return err;
+		goto error;
 	}
 
 	tegra_io_rail_unprepare();
 
-	return 0;
+error:
+	mutex_unlock(&pmc->powergates_lock);
+
+	return err;
 }
 EXPORT_SYMBOL(tegra_io_rail_power_on);
 
-int tegra_io_rail_power_off(int id)
+int tegra_io_rail_power_off(unsigned int id)
 {
 	unsigned long request, status, value;
 	unsigned int bit, mask;
 	int err;
 
+	mutex_lock(&pmc->powergates_lock);
+
 	err = tegra_io_rail_prepare(id, &request, &status, &bit);
-	if (err < 0) {
+	if (err) {
 		pr_info("tegra_io_rail_prepare() failed: %d\n", err);
-		return err;
+		goto error;
 	}
 
 	mask = 1 << bit;
@@ -563,12 +956,15 @@
 	tegra_pmc_writel(value, request);
 
 	err = tegra_io_rail_poll(status, mask, mask, 250);
-	if (err < 0)
-		return err;
+	if (err)
+		goto error;
 
 	tegra_io_rail_unprepare();
 
-	return 0;
+error:
+	mutex_unlock(&pmc->powergates_lock);
+
+	return err;
 }
 EXPORT_SYMBOL(tegra_io_rail_power_off);
 
@@ -727,7 +1123,7 @@
 	tegra_pmc_writel(value, PMC_CNTRL);
 }
 
-void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
+static void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
 {
 	static const char disabled[] = "emergency thermal reset disabled";
 	u32 pmu_addr, ctrl_id, reg_addr, reg_data, pinmux;
@@ -805,7 +1201,7 @@
 
 static int tegra_pmc_probe(struct platform_device *pdev)
 {
-	void __iomem *base = pmc->base;
+	void __iomem *base;
 	struct resource *res;
 	int err;
 
@@ -815,11 +1211,9 @@
 
 	/* take over the memory region from the early initialization */
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	pmc->base = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(pmc->base))
-		return PTR_ERR(pmc->base);
-
-	iounmap(base);
+	base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
 
 	pmc->clk = devm_clk_get(&pdev->dev, "pclk");
 	if (IS_ERR(pmc->clk)) {
@@ -842,11 +1236,19 @@
 
 	err = register_restart_handler(&tegra_pmc_restart_handler);
 	if (err) {
+		debugfs_remove(pmc->debugfs);
 		dev_err(&pdev->dev, "unable to register restart handler, %d\n",
 			err);
 		return err;
 	}
 
+	tegra_powergate_init(pmc);
+
+	mutex_lock(&pmc->powergates_lock);
+	iounmap(pmc->base);
+	pmc->base = base;
+	mutex_unlock(&pmc->powergates_lock);
+
 	return 0;
 }
 
@@ -964,7 +1366,6 @@
 	[TEGRA_POWERGATE_VENC] = "venc",
 	[TEGRA_POWERGATE_PCIE] = "pcie",
 	[TEGRA_POWERGATE_VDEC] = "vdec",
-	[TEGRA_POWERGATE_L2] = "l2",
 	[TEGRA_POWERGATE_MPE] = "mpe",
 	[TEGRA_POWERGATE_HEG] = "heg",
 	[TEGRA_POWERGATE_SATA] = "sata",
@@ -1006,17 +1407,13 @@
 	[TEGRA_POWERGATE_3D] = "3d",
 	[TEGRA_POWERGATE_VENC] = "venc",
 	[TEGRA_POWERGATE_PCIE] = "pcie",
-	[TEGRA_POWERGATE_L2] = "l2",
 	[TEGRA_POWERGATE_MPE] = "mpe",
-	[TEGRA_POWERGATE_HEG] = "heg",
 	[TEGRA_POWERGATE_SATA] = "sata",
 	[TEGRA_POWERGATE_CPU1] = "cpu1",
 	[TEGRA_POWERGATE_CPU2] = "cpu2",
 	[TEGRA_POWERGATE_CPU3] = "cpu3",
-	[TEGRA_POWERGATE_CELP] = "celp",
 	[TEGRA_POWERGATE_CPU0] = "cpu0",
 	[TEGRA_POWERGATE_C0NC] = "c0nc",
-	[TEGRA_POWERGATE_C1NC] = "c1nc",
 	[TEGRA_POWERGATE_SOR] = "sor",
 	[TEGRA_POWERGATE_DIS] = "dis",
 	[TEGRA_POWERGATE_DISB] = "disb",
@@ -1080,6 +1477,7 @@
 	const struct of_device_id *match;
 	struct device_node *np;
 	struct resource regs;
+	unsigned int i;
 	bool invert;
 	u32 value;
 
@@ -1129,6 +1527,11 @@
 		return -ENXIO;
 	}
 
+	/* Create a bit-map of the available and valid partitions */
+	for (i = 0; i < pmc->soc->num_powergates; i++)
+		if (pmc->soc->powergates[i])
+			set_bit(i, pmc->powergates_available);
+
 	mutex_init(&pmc->powergates_lock);
 
 	/*
diff --git a/drivers/soc/versatile/soc-realview.c b/drivers/soc/versatile/soc-realview.c
index c337764..282e371 100644
--- a/drivers/soc/versatile/soc-realview.c
+++ b/drivers/soc/versatile/soc-realview.c
@@ -31,18 +31,6 @@
 
 static u32 realview_coreid;
 
-static const char *realview_board_str(u32 id)
-{
-	switch ((id >> 16) & 0xfff) {
-	case 0x0147:
-		return "HBI-0147";
-	case 0x0159:
-		return "HBI-0159";
-	default:
-		return "Unknown";
-	}
-}
-
 static const char *realview_arch_str(u32 id)
 {
 	switch ((id >> 8) & 0xf) {
@@ -69,7 +57,7 @@
 			      struct device_attribute *attr,
 			      char *buf)
 {
-	return sprintf(buf, "%s\n", realview_board_str(realview_coreid));
+	return sprintf(buf, "HBI-%03x\n", ((realview_coreid >> 16) & 0xfff));
 }
 
 static struct device_attribute realview_board_attr =
@@ -133,8 +121,9 @@
 	device_create_file(soc_device_to_device(soc_dev), &realview_arch_attr);
 	device_create_file(soc_device_to_device(soc_dev), &realview_build_attr);
 
-	dev_info(&pdev->dev, "RealView Syscon Core ID: 0x%08x\n",
-		 realview_coreid);
+	dev_info(&pdev->dev, "RealView Syscon Core ID: 0x%08x, HBI-%03x\n",
+		 realview_coreid,
+		 ((realview_coreid >> 16) & 0xfff));
 	/* FIXME: add attributes for SoC to sysfs */
 	return 0;
 }
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 39412c9..c1a2d74 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -385,8 +385,8 @@
 		dspi->cur_chip = spi_get_ctldata(spi);
 		dspi->cs = spi->chip_select;
 		dspi->cs_change = 0;
-		if (dspi->cur_transfer->transfer_list.next
-				== &dspi->cur_msg->transfers)
+		if (list_is_last(&dspi->cur_transfer->transfer_list,
+				 &dspi->cur_msg->transfers) || transfer->cs_change)
 			dspi->cs_change = 1;
 		dspi->void_write_data = dspi->cur_chip->void_write_data;
 
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index e7a19be..50769078 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -211,11 +211,15 @@
 			 struct spi_transfer *transfer)
 {
 	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
-	unsigned int bpw = transfer->bits_per_word;
+	unsigned int bpw;
 
 	if (!master->dma_rx)
 		return false;
 
+	if (!transfer)
+		return false;
+
+	bpw = transfer->bits_per_word;
 	if (!bpw)
 		bpw = spi->bits_per_word;
 
@@ -333,8 +337,9 @@
 static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
 		struct spi_imx_config *config)
 {
-	u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0;
+	u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
 	u32 clk = config->speed_hz, delay, reg;
+	u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
 
 	/*
 	 * The hardware seems to have a race condition when changing modes. The
@@ -358,13 +363,20 @@
 
 	if (config->mode & SPI_CPHA)
 		cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
+	else
+		cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
 
 	if (config->mode & SPI_CPOL) {
 		cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
 		cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
+	} else {
+		cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
+		cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
 	}
 	if (config->mode & SPI_CS_HIGH)
 		cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs);
+	else
+		cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(config->cs);
 
 	if (spi_imx->usedma)
 		ctrl |= MX51_ECSPI_CTRL_SMC;
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 520ed1d..4fd7f98 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -144,16 +144,16 @@
 		struct dw_dma_slave *slave = c->tx_param;
 
 		slave->dma_dev = &dma_dev->dev;
-		slave->src_master = 1;
-		slave->dst_master = 0;
+		slave->m_master = 0;
+		slave->p_master = 1;
 	}
 
 	if (c->rx_param) {
 		struct dw_dma_slave *slave = c->rx_param;
 
 		slave->dma_dev = &dma_dev->dev;
-		slave->src_master = 1;
-		slave->dst_master = 0;
+		slave->m_master = 0;
+		slave->p_master = 1;
 	}
 
 	spi_pdata.dma_filter = lpss_dma_filter;
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 85e59a4..86138e4 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -126,7 +126,7 @@
 		.reg_general = -1,
 		.reg_ssp = 0x20,
 		.reg_cs_ctrl = 0x24,
-		.reg_capabilities = 0xfc,
+		.reg_capabilities = -1,
 		.rx_threshold = 1,
 		.tx_threshold_lo = 32,
 		.tx_threshold_hi = 56,
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 8f50a40..6c6c001 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -534,7 +534,7 @@
 	if (WARN_ON(rs->speed > MAX_SCLK_OUT))
 		rs->speed = MAX_SCLK_OUT;
 
-	/* the minimum divsor is 2 */
+	/* the minimum divisor is 2 */
 	if (rs->max_freq < 2 * rs->speed) {
 		clk_set_rate(rs->spiclk, 2 * rs->speed);
 		rs->max_freq = clk_get_rate(rs->spiclk);
@@ -730,23 +730,27 @@
 	master->transfer_one = rockchip_spi_transfer_one;
 	master->handle_err = rockchip_spi_handle_err;
 
-	rs->dma_tx.ch = dma_request_slave_channel(rs->dev, "tx");
-	if (IS_ERR_OR_NULL(rs->dma_tx.ch)) {
+	rs->dma_tx.ch = dma_request_chan(rs->dev, "tx");
+	if (IS_ERR(rs->dma_tx.ch)) {
 		/* Check tx to see if we need defer probing driver */
 		if (PTR_ERR(rs->dma_tx.ch) == -EPROBE_DEFER) {
 			ret = -EPROBE_DEFER;
 			goto err_get_fifo_len;
 		}
 		dev_warn(rs->dev, "Failed to request TX DMA channel\n");
+		rs->dma_tx.ch = NULL;
 	}
 
-	rs->dma_rx.ch = dma_request_slave_channel(rs->dev, "rx");
-	if (!rs->dma_rx.ch) {
-		if (rs->dma_tx.ch) {
+	rs->dma_rx.ch = dma_request_chan(rs->dev, "rx");
+	if (IS_ERR(rs->dma_rx.ch)) {
+		if (PTR_ERR(rs->dma_rx.ch) == -EPROBE_DEFER) {
 			dma_release_channel(rs->dma_tx.ch);
 			rs->dma_tx.ch = NULL;
+			ret = -EPROBE_DEFER;
+			goto err_get_fifo_len;
 		}
 		dev_warn(rs->dev, "Failed to request RX DMA channel\n");
+		rs->dma_rx.ch = NULL;
 	}
 
 	if (rs->dma_tx.ch && rs->dma_rx.ch) {
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index eac3c96..443f664 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -94,6 +94,7 @@
 #define QSPI_FLEN(n)			((n - 1) << 0)
 #define QSPI_WLEN_MAX_BITS		128
 #define QSPI_WLEN_MAX_BYTES		16
+#define QSPI_WLEN_MASK			QSPI_WLEN(QSPI_WLEN_MAX_BITS)
 
 /* STATUS REGISTER */
 #define BUSY				0x01
@@ -235,16 +236,16 @@
 	return  -ETIMEDOUT;
 }
 
-static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t,
+			  int count)
 {
-	int wlen, count, xfer_len;
+	int wlen, xfer_len;
 	unsigned int cmd;
 	const u8 *txbuf;
 	u32 data;
 
 	txbuf = t->tx_buf;
 	cmd = qspi->cmd | QSPI_WR_SNGL;
-	count = t->len;
 	wlen = t->bits_per_word >> 3;	/* in bytes */
 	xfer_len = wlen;
 
@@ -304,9 +305,10 @@
 	return 0;
 }
 
-static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
+			 int count)
 {
-	int wlen, count;
+	int wlen;
 	unsigned int cmd;
 	u8 *rxbuf;
 
@@ -323,7 +325,6 @@
 		cmd |= QSPI_RD_SNGL;
 		break;
 	}
-	count = t->len;
 	wlen = t->bits_per_word >> 3;	/* in bytes */
 
 	while (count) {
@@ -354,12 +355,13 @@
 	return 0;
 }
 
-static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t,
+			     int count)
 {
 	int ret;
 
 	if (t->tx_buf) {
-		ret = qspi_write_msg(qspi, t);
+		ret = qspi_write_msg(qspi, t, count);
 		if (ret) {
 			dev_dbg(qspi->dev, "Error while writing\n");
 			return ret;
@@ -367,7 +369,7 @@
 	}
 
 	if (t->rx_buf) {
-		ret = qspi_read_msg(qspi, t);
+		ret = qspi_read_msg(qspi, t, count);
 		if (ret) {
 			dev_dbg(qspi->dev, "Error while reading\n");
 			return ret;
@@ -450,7 +452,8 @@
 	struct spi_device *spi = m->spi;
 	struct spi_transfer *t;
 	int status = 0, ret;
-	int frame_length;
+	unsigned int frame_len_words, transfer_len_words;
+	int wlen;
 
 	/* setup device control reg */
 	qspi->dc = 0;
@@ -462,14 +465,15 @@
 	if (spi->mode & SPI_CS_HIGH)
 		qspi->dc |= QSPI_CSPOL(spi->chip_select);
 
-	frame_length = (m->frame_length << 3) / spi->bits_per_word;
-
-	frame_length = clamp(frame_length, 0, QSPI_FRAME);
+	frame_len_words = 0;
+	list_for_each_entry(t, &m->transfers, transfer_list)
+		frame_len_words += t->len / (t->bits_per_word >> 3);
+	frame_len_words = min_t(unsigned int, frame_len_words, QSPI_FRAME);
 
 	/* setup command reg */
 	qspi->cmd = 0;
 	qspi->cmd |= QSPI_EN_CS(spi->chip_select);
-	qspi->cmd |= QSPI_FLEN(frame_length);
+	qspi->cmd |= QSPI_FLEN(frame_len_words);
 
 	ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG);
 
@@ -479,16 +483,23 @@
 		ti_qspi_disable_memory_map(spi);
 
 	list_for_each_entry(t, &m->transfers, transfer_list) {
-		qspi->cmd |= QSPI_WLEN(t->bits_per_word);
+		qspi->cmd = ((qspi->cmd & ~QSPI_WLEN_MASK) |
+			     QSPI_WLEN(t->bits_per_word));
 
-		ret = qspi_transfer_msg(qspi, t);
+		wlen = t->bits_per_word >> 3;
+		transfer_len_words = min(t->len / wlen, frame_len_words);
+
+		ret = qspi_transfer_msg(qspi, t, transfer_len_words * wlen);
 		if (ret) {
 			dev_dbg(qspi->dev, "transfer message failed\n");
 			mutex_unlock(&qspi->list_lock);
 			return -EINVAL;
 		}
 
-		m->actual_length += t->len;
+		m->actual_length += transfer_len_words * wlen;
+		frame_len_words -= transfer_len_words;
+		if (frame_len_words == 0)
+			break;
 	}
 
 	mutex_unlock(&qspi->list_lock);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index de2f2f9..0239b45 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1209,7 +1209,7 @@
 	struct spi_master *master =
 		container_of(work, struct spi_master, pump_messages);
 
-	__spi_pump_messages(master, true, false);
+	__spi_pump_messages(master, true, master->bus_lock_flag);
 }
 
 static int spi_init_queue(struct spi_master *master)
@@ -2853,7 +2853,7 @@
  */
 int spi_sync(struct spi_device *spi, struct spi_message *message)
 {
-	return __spi_sync(spi, message, 0);
+	return __spi_sync(spi, message, spi->master->bus_lock_flag);
 }
 EXPORT_SYMBOL_GPL(spi_sync);
 
diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c
index f92e266..180e027 100644
--- a/drivers/ssb/driver_gpio.c
+++ b/drivers/ssb/driver_gpio.c
@@ -8,7 +8,7 @@
  * Licensed under the GNU/GPL. See COPYING for details.
  */
 
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
 #include <linux/irqdomain.h>
@@ -22,15 +22,10 @@
  * Shared
  **************************************************/
 
-static struct ssb_bus *ssb_gpio_get_bus(struct gpio_chip *chip)
-{
-	return container_of(chip, struct ssb_bus, gpio);
-}
-
 #if IS_ENABLED(CONFIG_SSB_EMBEDDED)
 static int ssb_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
 {
-	struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+	struct ssb_bus *bus = gpiochip_get_data(chip);
 
 	if (bus->bustype == SSB_BUSTYPE_SSB)
 		return irq_find_mapping(bus->irq_domain, gpio);
@@ -45,7 +40,7 @@
 
 static int ssb_gpio_chipco_get_value(struct gpio_chip *chip, unsigned gpio)
 {
-	struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+	struct ssb_bus *bus = gpiochip_get_data(chip);
 
 	return !!ssb_chipco_gpio_in(&bus->chipco, 1 << gpio);
 }
@@ -53,7 +48,7 @@
 static void ssb_gpio_chipco_set_value(struct gpio_chip *chip, unsigned gpio,
 				      int value)
 {
-	struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+	struct ssb_bus *bus = gpiochip_get_data(chip);
 
 	ssb_chipco_gpio_out(&bus->chipco, 1 << gpio, value ? 1 << gpio : 0);
 }
@@ -61,7 +56,7 @@
 static int ssb_gpio_chipco_direction_input(struct gpio_chip *chip,
 					   unsigned gpio)
 {
-	struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+	struct ssb_bus *bus = gpiochip_get_data(chip);
 
 	ssb_chipco_gpio_outen(&bus->chipco, 1 << gpio, 0);
 	return 0;
@@ -70,7 +65,7 @@
 static int ssb_gpio_chipco_direction_output(struct gpio_chip *chip,
 					    unsigned gpio, int value)
 {
-	struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+	struct ssb_bus *bus = gpiochip_get_data(chip);
 
 	ssb_chipco_gpio_outen(&bus->chipco, 1 << gpio, 1 << gpio);
 	ssb_chipco_gpio_out(&bus->chipco, 1 << gpio, value ? 1 << gpio : 0);
@@ -79,7 +74,7 @@
 
 static int ssb_gpio_chipco_request(struct gpio_chip *chip, unsigned gpio)
 {
-	struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+	struct ssb_bus *bus = gpiochip_get_data(chip);
 
 	ssb_chipco_gpio_control(&bus->chipco, 1 << gpio, 0);
 	/* clear pulldown */
@@ -92,7 +87,7 @@
 
 static void ssb_gpio_chipco_free(struct gpio_chip *chip, unsigned gpio)
 {
-	struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+	struct ssb_bus *bus = gpiochip_get_data(chip);
 
 	/* clear pullup */
 	ssb_chipco_gpio_pullup(&bus->chipco, 1 << gpio, 0);
@@ -246,7 +241,7 @@
 	if (err)
 		return err;
 
-	err = gpiochip_add(chip);
+	err = gpiochip_add_data(chip, bus);
 	if (err) {
 		ssb_gpio_irq_chipco_domain_exit(bus);
 		return err;
@@ -263,7 +258,7 @@
 
 static int ssb_gpio_extif_get_value(struct gpio_chip *chip, unsigned gpio)
 {
-	struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+	struct ssb_bus *bus = gpiochip_get_data(chip);
 
 	return !!ssb_extif_gpio_in(&bus->extif, 1 << gpio);
 }
@@ -271,7 +266,7 @@
 static void ssb_gpio_extif_set_value(struct gpio_chip *chip, unsigned gpio,
 				     int value)
 {
-	struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+	struct ssb_bus *bus = gpiochip_get_data(chip);
 
 	ssb_extif_gpio_out(&bus->extif, 1 << gpio, value ? 1 << gpio : 0);
 }
@@ -279,7 +274,7 @@
 static int ssb_gpio_extif_direction_input(struct gpio_chip *chip,
 					  unsigned gpio)
 {
-	struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+	struct ssb_bus *bus = gpiochip_get_data(chip);
 
 	ssb_extif_gpio_outen(&bus->extif, 1 << gpio, 0);
 	return 0;
@@ -288,7 +283,7 @@
 static int ssb_gpio_extif_direction_output(struct gpio_chip *chip,
 					   unsigned gpio, int value)
 {
-	struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+	struct ssb_bus *bus = gpiochip_get_data(chip);
 
 	ssb_extif_gpio_outen(&bus->extif, 1 << gpio, 1 << gpio);
 	ssb_extif_gpio_out(&bus->extif, 1 << gpio, value ? 1 << gpio : 0);
@@ -439,7 +434,7 @@
 	if (err)
 		return err;
 
-	err = gpiochip_add(chip);
+	err = gpiochip_add_data(chip, bus);
 	if (err) {
 		ssb_gpio_irq_extif_domain_exit(bus);
 		return err;
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index cf84581..5bac28a 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -30,6 +30,8 @@
 
 source "drivers/staging/comedi/Kconfig"
 
+source "drivers/staging/olpc_dcon/Kconfig"
+
 source "drivers/staging/rtl8192u/Kconfig"
 
 source "drivers/staging/rtl8192e/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 7d6448d..a954242 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -4,6 +4,7 @@
 obj-$(CONFIG_SLICOSS)		+= slicoss/
 obj-$(CONFIG_PRISM2_USB)	+= wlan-ng/
 obj-$(CONFIG_COMEDI)		+= comedi/
+obj-$(CONFIG_FB_OLPC_DCON)	+= olpc_dcon/
 obj-$(CONFIG_RTL8192U)		+= rtl8192u/
 obj-$(CONFIG_RTL8192E)		+= rtl8192e/
 obj-$(CONFIG_R8712U)		+= rtl8712/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index dab4862..1333543 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -88,7 +88,7 @@
 } while (0)
 
 #ifndef LIBCFS_VMALLOC_SIZE
-#define LIBCFS_VMALLOC_SIZE	(2 << PAGE_CACHE_SHIFT) /* 2 pages */
+#define LIBCFS_VMALLOC_SIZE	(2 << PAGE_SHIFT) /* 2 pages */
 #endif
 
 #define LIBCFS_ALLOC_PRE(size, mask)					    \
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
index 0f2fd79..837eb22 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
@@ -57,7 +57,7 @@
 #include "../libcfs_cpu.h"
 #endif
 
-#define CFS_PAGE_MASK		   (~((__u64)PAGE_CACHE_SIZE-1))
+#define CFS_PAGE_MASK		   (~((__u64)PAGE_SIZE-1))
 #define page_index(p)       ((p)->index)
 
 #define memory_pressure_get() (current->flags & PF_MEMALLOC)
@@ -67,7 +67,7 @@
 #if BITS_PER_LONG == 32
 /* limit to lowmem on 32-bit systems */
 #define NUM_CACHEPAGES \
-	min(totalram_pages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4)
+	min(totalram_pages, 1UL << (30 - PAGE_SHIFT) * 3 / 4)
 #else
 #define NUM_CACHEPAGES totalram_pages
 #endif
diff --git a/drivers/staging/lustre/include/linux/lnet/types.h b/drivers/staging/lustre/include/linux/lnet/types.h
index 08f193c..1c679cb 100644
--- a/drivers/staging/lustre/include/linux/lnet/types.h
+++ b/drivers/staging/lustre/include/linux/lnet/types.h
@@ -514,7 +514,7 @@
 	/**
 	 * Starting offset of the fragment within the page. Note that the
 	 * end of the fragment must not pass the end of the page; i.e.,
-	 * kiov_len + kiov_offset <= PAGE_CACHE_SIZE.
+	 * kiov_len + kiov_offset <= PAGE_SIZE.
 	 */
 	unsigned int	 kiov_offset;
 } lnet_kiov_t;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
index 3e1f24e..d4ce06d 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
@@ -291,7 +291,7 @@
 
 	for (nob = i = 0; i < niov; i++) {
 		if ((kiov[i].kiov_offset && i > 0) ||
-		    (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1))
+		    (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1))
 			return NULL;
 
 		pages[i] = kiov[i].kiov_page;
diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c
index c90e510..c3d628b 100644
--- a/drivers/staging/lustre/lnet/libcfs/debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/debug.c
@@ -517,7 +517,7 @@
 		max = TCD_MAX_PAGES;
 	} else {
 		max = max / num_possible_cpus();
-		max <<= (20 - PAGE_CACHE_SHIFT);
+		max <<= (20 - PAGE_SHIFT);
 	}
 	rc = cfs_tracefile_init(max);
 
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c
index ec3bc04..244eb89 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c
@@ -182,7 +182,7 @@
 	if (tcd->tcd_cur_pages > 0) {
 		__LASSERT(!list_empty(&tcd->tcd_pages));
 		tage = cfs_tage_from_list(tcd->tcd_pages.prev);
-		if (tage->used + len <= PAGE_CACHE_SIZE)
+		if (tage->used + len <= PAGE_SIZE)
 			return tage;
 	}
 
@@ -260,7 +260,7 @@
 	 * from here: this will lead to infinite recursion.
 	 */
 
-	if (len > PAGE_CACHE_SIZE) {
+	if (len > PAGE_SIZE) {
 		pr_err("cowardly refusing to write %lu bytes in a page\n", len);
 		return NULL;
 	}
@@ -349,7 +349,7 @@
 	for (i = 0; i < 2; i++) {
 		tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
 		if (!tage) {
-			if (needed + known_size > PAGE_CACHE_SIZE)
+			if (needed + known_size > PAGE_SIZE)
 				mask |= D_ERROR;
 
 			cfs_trace_put_tcd(tcd);
@@ -360,7 +360,7 @@
 		string_buf = (char *)page_address(tage->page) +
 					tage->used + known_size;
 
-		max_nob = PAGE_CACHE_SIZE - tage->used - known_size;
+		max_nob = PAGE_SIZE - tage->used - known_size;
 		if (max_nob <= 0) {
 			printk(KERN_EMERG "negative max_nob: %d\n",
 			       max_nob);
@@ -424,7 +424,7 @@
 	__LASSERT(debug_buf == string_buf);
 
 	tage->used += needed;
-	__LASSERT(tage->used <= PAGE_CACHE_SIZE);
+	__LASSERT(tage->used <= PAGE_SIZE);
 
 console:
 	if ((mask & libcfs_printk) == 0) {
@@ -835,7 +835,7 @@
 
 int cfs_trace_allocate_string_buffer(char **str, int nob)
 {
-	if (nob > 2 * PAGE_CACHE_SIZE)	    /* string must be "sensible" */
+	if (nob > 2 * PAGE_SIZE)	    /* string must be "sensible" */
 		return -EINVAL;
 
 	*str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
@@ -951,7 +951,7 @@
 	}
 
 	mb /= num_possible_cpus();
-	pages = mb << (20 - PAGE_CACHE_SHIFT);
+	pages = mb << (20 - PAGE_SHIFT);
 
 	cfs_tracefile_write_lock();
 
@@ -977,7 +977,7 @@
 
 	cfs_tracefile_read_unlock();
 
-	return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1;
+	return (total_pages >> (20 - PAGE_SHIFT)) + 1;
 }
 
 static int tracefiled(void *arg)
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.h b/drivers/staging/lustre/lnet/libcfs/tracefile.h
index 4c77f90..ac84e7f 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.h
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.h
@@ -87,7 +87,7 @@
 extern int  libcfs_panic_in_progress;
 int cfs_trace_max_debug_mb(void);
 
-#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
+#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
 #define TCD_STOCK_PAGES (TCD_MAX_PAGES)
 #define CFS_TRACEFILE_SIZE (500 << 20)
 
@@ -96,7 +96,7 @@
 /*
  * Private declare for tracefile
  */
-#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
+#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
 #define TCD_STOCK_PAGES (TCD_MAX_PAGES)
 
 #define CFS_TRACEFILE_SIZE (500 << 20)
@@ -257,7 +257,7 @@
 do {								    \
 	__LASSERT(tage);					\
 	__LASSERT(tage->page);				  \
-	__LASSERT(tage->used <= PAGE_CACHE_SIZE);			 \
+	__LASSERT(tage->used <= PAGE_SIZE);			 \
 	__LASSERT(page_count(tage->page) > 0);		      \
 } while (0)
 
diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c
index c74514f..75d3121 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-md.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-md.c
@@ -139,7 +139,7 @@
 		for (i = 0; i < (int)niov; i++) {
 			/* We take the page pointer on trust */
 			if (lmd->md_iov.kiov[i].kiov_offset +
-			    lmd->md_iov.kiov[i].kiov_len > PAGE_CACHE_SIZE)
+			    lmd->md_iov.kiov[i].kiov_len > PAGE_SIZE)
 				return -EINVAL; /* invalid length */
 
 			total_length += lmd->md_iov.kiov[i].kiov_len;
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index 0009a8d..f19aa93 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -549,12 +549,12 @@
 		if (len <= frag_len) {
 			dst->kiov_len = len;
 			LASSERT(dst->kiov_offset + dst->kiov_len
-					<= PAGE_CACHE_SIZE);
+					<= PAGE_SIZE);
 			return niov;
 		}
 
 		dst->kiov_len = frag_len;
-		LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
+		LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
 
 		len -= frag_len;
 		dst++;
@@ -887,7 +887,7 @@
 	rbp = &the_lnet.ln_rtrpools[cpt][0];
 
 	LASSERT(msg->msg_len <= LNET_MTU);
-	while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) {
+	while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
 		rbp++;
 		LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
 	}
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c
index cc0c275..891fd59 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-socket.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c
@@ -166,9 +166,9 @@
 	nalloc = 16;	/* first guess at max interfaces */
 	toobig = 0;
 	for (;;) {
-		if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) {
+		if (nalloc * sizeof(*ifr) > PAGE_SIZE) {
 			toobig = 1;
-			nalloc = PAGE_CACHE_SIZE / sizeof(*ifr);
+			nalloc = PAGE_SIZE / sizeof(*ifr);
 			CWARN("Too many interfaces: only enumerating first %d\n",
 			      nalloc);
 		}
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index 61459cf..b01dc42 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -27,8 +27,8 @@
 #define LNET_NRB_SMALL_PAGES	1
 #define LNET_NRB_LARGE_MIN	256	/* min value for each CPT */
 #define LNET_NRB_LARGE		(LNET_NRB_LARGE_MIN * 4)
-#define LNET_NRB_LARGE_PAGES   ((LNET_MTU + PAGE_CACHE_SIZE - 1) >> \
-				 PAGE_CACHE_SHIFT)
+#define LNET_NRB_LARGE_PAGES   ((LNET_MTU + PAGE_SIZE - 1) >> \
+				 PAGE_SHIFT)
 
 static char *forwarding = "";
 module_param(forwarding, charp, 0444);
@@ -1338,7 +1338,7 @@
 			return NULL;
 		}
 
-		rb->rb_kiov[i].kiov_len = PAGE_CACHE_SIZE;
+		rb->rb_kiov[i].kiov_len = PAGE_SIZE;
 		rb->rb_kiov[i].kiov_offset = 0;
 		rb->rb_kiov[i].kiov_page = page;
 	}
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index eebc924..dcb6e50 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -90,7 +90,7 @@
 		 * NB: this is not going to work for variable page size,
 		 * but we have to keep it for compatibility
 		 */
-		len = npg * PAGE_CACHE_SIZE;
+		len = npg * PAGE_SIZE;
 
 	} else {
 		test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
@@ -104,7 +104,7 @@
 		opc = breq->blk_opc;
 		flags = breq->blk_flags;
 		len = breq->blk_len;
-		npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+		npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	}
 
 	if (npg > LNET_MAX_IOV || npg <= 0)
@@ -167,13 +167,13 @@
 
 	if (pattern == LST_BRW_CHECK_SIMPLE) {
 		memcpy(addr, &magic, BRW_MSIZE);
-		addr += PAGE_CACHE_SIZE - BRW_MSIZE;
+		addr += PAGE_SIZE - BRW_MSIZE;
 		memcpy(addr, &magic, BRW_MSIZE);
 		return;
 	}
 
 	if (pattern == LST_BRW_CHECK_FULL) {
-		for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++)
+		for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++)
 			memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE);
 		return;
 	}
@@ -198,7 +198,7 @@
 		if (data != magic)
 			goto bad_data;
 
-		addr += PAGE_CACHE_SIZE - BRW_MSIZE;
+		addr += PAGE_SIZE - BRW_MSIZE;
 		data = *((__u64 *)addr);
 		if (data != magic)
 			goto bad_data;
@@ -207,7 +207,7 @@
 	}
 
 	if (pattern == LST_BRW_CHECK_FULL) {
-		for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) {
+		for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++) {
 			data = *(((__u64 *)addr) + i);
 			if (data != magic)
 				goto bad_data;
@@ -278,7 +278,7 @@
 		opc = breq->blk_opc;
 		flags = breq->blk_flags;
 		npg = breq->blk_npg;
-		len = npg * PAGE_CACHE_SIZE;
+		len = npg * PAGE_SIZE;
 
 	} else {
 		test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
@@ -292,7 +292,7 @@
 		opc = breq->blk_opc;
 		flags = breq->blk_flags;
 		len = breq->blk_len;
-		npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+		npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	}
 
 	rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc);
@@ -463,10 +463,10 @@
 			reply->brw_status = EINVAL;
 			return 0;
 		}
-		npg = reqst->brw_len >> PAGE_CACHE_SHIFT;
+		npg = reqst->brw_len >> PAGE_SHIFT;
 
 	} else {
-		npg = (reqst->brw_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+		npg = (reqst->brw_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	}
 
 	replymsg->msg_ses_feats = reqstmsg->msg_ses_feats;
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index 5c7cb72..79ee6c0 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -743,7 +743,7 @@
 	if (args->lstio_tes_param &&
 	    (args->lstio_tes_param_len <= 0 ||
 	     args->lstio_tes_param_len >
-	     PAGE_CACHE_SIZE - sizeof(lstcon_test_t)))
+	     PAGE_SIZE - sizeof(lstcon_test_t)))
 		return -EINVAL;
 
 	LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
@@ -819,7 +819,7 @@
 
 	opc = data->ioc_u32[0];
 
-	if (data->ioc_plen1 > PAGE_CACHE_SIZE)
+	if (data->ioc_plen1 > PAGE_SIZE)
 		return -EINVAL;
 
 	LIBCFS_ALLOC(buf, data->ioc_plen1);
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index bcd7888..35a227d 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -786,8 +786,8 @@
 	test_bulk_req_t *brq = &req->tsr_u.bulk_v0;
 
 	brq->blk_opc = param->blk_opc;
-	brq->blk_npg = (param->blk_size + PAGE_CACHE_SIZE - 1) /
-			PAGE_CACHE_SIZE;
+	brq->blk_npg = (param->blk_size + PAGE_SIZE - 1) /
+			PAGE_SIZE;
 	brq->blk_flags = param->blk_flags;
 
 	return 0;
@@ -822,7 +822,7 @@
 	if (transop == LST_TRANS_TSBCLIADD) {
 		npg = sfw_id_pages(test->tes_span);
 		nob = !(feats & LST_FEAT_BULK_LEN) ?
-		      npg * PAGE_CACHE_SIZE :
+		      npg * PAGE_SIZE :
 		      sizeof(lnet_process_id_packed_t) * test->tes_span;
 	}
 
@@ -851,8 +851,8 @@
 			LASSERT(nob > 0);
 
 			len = !(feats & LST_FEAT_BULK_LEN) ?
-			      PAGE_CACHE_SIZE :
-			      min_t(int, nob, PAGE_CACHE_SIZE);
+			      PAGE_SIZE :
+			      min_t(int, nob, PAGE_SIZE);
 			nob -= len;
 
 			bulk->bk_iovs[i].kiov_offset = 0;
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index 926c397..e2c5323 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -1161,7 +1161,7 @@
 		int len;
 
 		if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
-			len = npg * PAGE_CACHE_SIZE;
+			len = npg * PAGE_SIZE;
 
 		} else {
 			len = sizeof(lnet_process_id_packed_t) *
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index 69be7d6..7d7748d 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -90,7 +90,7 @@
 static int
 srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
 {
-	nob = min_t(int, nob, PAGE_CACHE_SIZE);
+	nob = min_t(int, nob, PAGE_SIZE);
 
 	LASSERT(nob > 0);
 	LASSERT(i >= 0 && i < bk->bk_niov);
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index 288522d..e689ca1 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -390,10 +390,10 @@
 	} tsi_u;
 } sfw_test_instance_t;
 
-/* XXX: trailing (PAGE_CACHE_SIZE % sizeof(lnet_process_id_t)) bytes at
- * the end of pages are not used */
+/* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of
+ * pages are not used */
 #define SFW_MAX_CONCUR	   LST_MAX_CONCUR
-#define SFW_ID_PER_PAGE    (PAGE_CACHE_SIZE / sizeof(lnet_process_id_packed_t))
+#define SFW_ID_PER_PAGE    (PAGE_SIZE / sizeof(lnet_process_id_packed_t))
 #define SFW_MAX_NDESTS	   (LNET_MAX_IOV * SFW_ID_PER_PAGE)
 #define sfw_id_pages(n)    (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)
 
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
index 33e0b99..c6c7f54 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
@@ -52,7 +52,7 @@
 		return;
 
 	if (PagePrivate(page))
-		page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
+		page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
 
 	cancel_dirty_page(page);
 	ClearPageMappedToDisk(page);
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index b5088b1..242bb1e 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -1118,7 +1118,7 @@
 	{							 \
 		type *value;				      \
 								  \
-		CLASSERT(PAGE_CACHE_SIZE >= sizeof (*value));       \
+		CLASSERT(PAGE_SIZE >= sizeof (*value));       \
 								  \
 		value = kzalloc(sizeof(*value), GFP_NOFS);	\
 		if (!value)				\
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index da8bc6e..5aae1d0 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -1022,16 +1022,16 @@
  * MDS_READPAGE page size
  *
  * This is the directory page size packed in MDS_READPAGE RPC.
- * It's different than PAGE_CACHE_SIZE because the client needs to
+ * It's different than PAGE_SIZE because the client needs to
  * access the struct lu_dirpage header packed at the beginning of
  * the "page" and without this there isn't any way to know find the
- * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ.
+ * lu_dirpage header is if client and server PAGE_SIZE differ.
  */
 #define LU_PAGE_SHIFT 12
 #define LU_PAGE_SIZE  (1UL << LU_PAGE_SHIFT)
 #define LU_PAGE_MASK  (~(LU_PAGE_SIZE - 1))
 
-#define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT))
+#define LU_PAGE_COUNT (1 << (PAGE_SHIFT - LU_PAGE_SHIFT))
 
 /** @} lu_dir */
 
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
index df94f9f..af77eb3 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -155,12 +155,12 @@
 		if (cli->cl_max_mds_easize < body->max_mdsize) {
 			cli->cl_max_mds_easize = body->max_mdsize;
 			cli->cl_default_mds_easize =
-			    min_t(__u32, body->max_mdsize, PAGE_CACHE_SIZE);
+			    min_t(__u32, body->max_mdsize, PAGE_SIZE);
 		}
 		if (cli->cl_max_mds_cookiesize < body->max_cookiesize) {
 			cli->cl_max_mds_cookiesize = body->max_cookiesize;
 			cli->cl_default_mds_cookiesize =
-			    min_t(__u32, body->max_cookiesize, PAGE_CACHE_SIZE);
+			    min_t(__u32, body->max_cookiesize, PAGE_SIZE);
 		}
 	}
 }
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index 4fa1a18..69586a5 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -99,21 +99,21 @@
  */
 #define PTLRPC_MAX_BRW_BITS	(LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
 #define PTLRPC_MAX_BRW_SIZE	(1 << PTLRPC_MAX_BRW_BITS)
-#define PTLRPC_MAX_BRW_PAGES	(PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
+#define PTLRPC_MAX_BRW_PAGES	(PTLRPC_MAX_BRW_SIZE >> PAGE_SHIFT)
 
 #define ONE_MB_BRW_SIZE		(1 << LNET_MTU_BITS)
 #define MD_MAX_BRW_SIZE		(1 << LNET_MTU_BITS)
-#define MD_MAX_BRW_PAGES	(MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
+#define MD_MAX_BRW_PAGES	(MD_MAX_BRW_SIZE >> PAGE_SHIFT)
 #define DT_MAX_BRW_SIZE		PTLRPC_MAX_BRW_SIZE
-#define DT_MAX_BRW_PAGES	(DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
+#define DT_MAX_BRW_PAGES	(DT_MAX_BRW_SIZE >> PAGE_SHIFT)
 #define OFD_MAX_BRW_SIZE	(1 << LNET_MTU_BITS)
 
 /* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
 # if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
 #  error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
 # endif
-# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE))
-#  error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE"
+# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_SIZE))
+#  error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_SIZE"
 # endif
 # if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
 #  error "PTLRPC_MAX_BRW_SIZE too big"
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index 4a0f2e8..4264d97 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -272,7 +272,7 @@
 	int		 cl_grant_shrink_interval; /* seconds */
 
 	/* A chunk is an optimal size used by osc_extent to determine
-	 * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size)
+	 * the extent size. A chunk is max(PAGE_SIZE, OST block size)
 	 */
 	int		  cl_chunkbits;
 	int		  cl_chunk;
@@ -1318,7 +1318,7 @@
 
 static inline int cli_brw_size(struct obd_device *obd)
 {
-	return obd->u.cli.cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+	return obd->u.cli.cl_max_pages_per_rpc << PAGE_SHIFT;
 }
 
 #endif /* __OBD_H */
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index 225262fa..f8ee3a3 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -500,7 +500,7 @@
 
 #ifdef POISON_BULK
 #define POISON_PAGE(page, val) do {		  \
-	memset(kmap(page), val, PAGE_CACHE_SIZE); \
+	memset(kmap(page), val, PAGE_SIZE); \
 	kunmap(page);				  \
 } while (0)
 #else
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
index aced41a..96141d1 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
@@ -758,9 +758,9 @@
 				 * --bug 17336
 				 */
 				loff_t size = cl_isize_read(inode);
-				loff_t cur_index = start >> PAGE_CACHE_SHIFT;
+				loff_t cur_index = start >> PAGE_SHIFT;
 				loff_t size_index = (size - 1) >>
-						    PAGE_CACHE_SHIFT;
+						    PAGE_SHIFT;
 
 				if ((size == 0 && cur_index != 0) ||
 				    size_index < cur_index)
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index b586d5a..7dd7df5 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -307,8 +307,8 @@
 	cli->cl_avail_grant = 0;
 	/* FIXME: Should limit this for the sum of all cl_dirty_max. */
 	cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
-	if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > totalram_pages / 8)
-		cli->cl_dirty_max = totalram_pages << (PAGE_CACHE_SHIFT - 3);
+	if (cli->cl_dirty_max >> PAGE_SHIFT > totalram_pages / 8)
+		cli->cl_dirty_max = totalram_pages << (PAGE_SHIFT - 3);
 	INIT_LIST_HEAD(&cli->cl_cache_waiters);
 	INIT_LIST_HEAD(&cli->cl_loi_ready_list);
 	INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
@@ -353,15 +353,15 @@
 	 * In the future this should likely be increased. LU-1431
 	 */
 	cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES,
-					  LNET_MTU >> PAGE_CACHE_SHIFT);
+					  LNET_MTU >> PAGE_SHIFT);
 
 	if (!strcmp(name, LUSTRE_MDC_NAME)) {
 		cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
-	} else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) {
+	} else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */) {
 		cli->cl_max_rpcs_in_flight = 2;
-	} else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) {
+	} else if (totalram_pages >> (20 - PAGE_SHIFT) <= 256 /* MB */) {
 		cli->cl_max_rpcs_in_flight = 3;
-	} else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) {
+	} else if (totalram_pages >> (20 - PAGE_SHIFT) <= 512 /* MB */) {
 		cli->cl_max_rpcs_in_flight = 4;
 	} else {
 		cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index 3e937b0..b913ba9 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -107,7 +107,7 @@
 /*
  * 50 ldlm locks for 1MB of RAM.
  */
-#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50)
+#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_SHIFT)) * 50)
 
 /*
  * Maximal possible grant step plan in %.
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index c7904a9..74e193e 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -546,7 +546,7 @@
 {
 	int avail;
 
-	avail = min_t(int, LDLM_MAXREQSIZE, PAGE_CACHE_SIZE - 512) - req_size;
+	avail = min_t(int, LDLM_MAXREQSIZE, PAGE_SIZE - 512) - req_size;
 	if (likely(avail >= 0))
 		avail /= (int)sizeof(struct lustre_handle);
 	else
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 4e0a3e5..7a0a67f 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -134,9 +134,8 @@
  * a header lu_dirpage which describes the start/end hash, and whether this
  * page is empty (contains no dir entry) or hash collide with next page.
  * After client receives reply, several pages will be integrated into dir page
- * in PAGE_CACHE_SIZE (if PAGE_CACHE_SIZE greater than LU_PAGE_SIZE), and the
- * lu_dirpage for this integrated page will be adjusted. See
- * lmv_adjust_dirpages().
+ * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the lu_dirpage
+ * for this integrated page will be adjusted. See lmv_adjust_dirpages().
  *
  */
 
@@ -153,7 +152,7 @@
 	struct page **page_pool;
 	struct page *page;
 	struct lu_dirpage *dp;
-	int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_CACHE_SHIFT;
+	int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_SHIFT;
 	int nrdpgs = 0; /* number of pages read actually */
 	int npages;
 	int i;
@@ -193,8 +192,8 @@
 		if (body->valid & OBD_MD_FLSIZE)
 			cl_isize_write(inode, body->size);
 
-		nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_CACHE_SIZE-1)
-			 >> PAGE_CACHE_SHIFT;
+		nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_SIZE-1)
+			 >> PAGE_SHIFT;
 		SetPageUptodate(page0);
 	}
 	unlock_page(page0);
@@ -209,7 +208,7 @@
 		page = page_pool[i];
 
 		if (rc < 0 || i >= nrdpgs) {
-			page_cache_release(page);
+			put_page(page);
 			continue;
 		}
 
@@ -230,7 +229,7 @@
 			CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n",
 			       offset, ret);
 		}
-		page_cache_release(page);
+		put_page(page);
 	}
 
 	if (page_pool != &page0)
@@ -247,7 +246,7 @@
 			truncate_complete_page(page->mapping, page);
 		unlock_page(page);
 	}
-	page_cache_release(page);
+	put_page(page);
 }
 
 /*
@@ -273,7 +272,7 @@
 	if (found > 0 && !radix_tree_exceptional_entry(page)) {
 		struct lu_dirpage *dp;
 
-		page_cache_get(page);
+		get_page(page);
 		spin_unlock_irq(&mapping->tree_lock);
 		/*
 		 * In contrast to find_lock_page() we are sure that directory
@@ -313,7 +312,7 @@
 				page = NULL;
 			}
 		} else {
-			page_cache_release(page);
+			put_page(page);
 			page = ERR_PTR(-EIO);
 		}
 
@@ -1507,7 +1506,7 @@
 			st.st_gid     = body->gid;
 			st.st_rdev    = body->rdev;
 			st.st_size    = body->size;
-			st.st_blksize = PAGE_CACHE_SIZE;
+			st.st_blksize = PAGE_SIZE;
 			st.st_blocks  = body->blocks;
 			st.st_atime   = body->atime;
 			st.st_mtime   = body->mtime;
@@ -1866,7 +1865,6 @@
 	int api32 = ll_need_32bit_api(sbi);
 	loff_t ret = -EINVAL;
 
-	inode_lock(inode);
 	switch (origin) {
 	case SEEK_SET:
 		break;
@@ -1904,7 +1902,6 @@
 	goto out;
 
 out:
-	inode_unlock(inode);
 	return ret;
 }
 
@@ -1923,7 +1920,7 @@
 	.open     = ll_dir_open,
 	.release  = ll_dir_release,
 	.read     = generic_read_dir,
-	.iterate  = ll_readdir,
+	.iterate_shared  = ll_readdir,
 	.unlocked_ioctl   = ll_dir_ioctl,
 	.fsync    = ll_fsync,
 };
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 3e1572c..65a6ace 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -310,10 +310,10 @@
 /* default to about 40meg of readahead on a given system.  That much tied
  * up in 512k readahead requests serviced at 40ms each is about 1GB/s.
  */
-#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_CACHE_SHIFT))
+#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_SHIFT))
 
 /* default to read-ahead full files smaller than 2MB on the second read */
-#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_CACHE_SHIFT))
+#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_SHIFT))
 
 enum ra_stat {
 	RA_STAT_HIT = 0,
@@ -975,13 +975,13 @@
 static inline void ll_invalidate_page(struct page *vmpage)
 {
 	struct address_space *mapping = vmpage->mapping;
-	loff_t offset = vmpage->index << PAGE_CACHE_SHIFT;
+	loff_t offset = vmpage->index << PAGE_SHIFT;
 
 	LASSERT(PageLocked(vmpage));
 	if (!mapping)
 		return;
 
-	ll_teardown_mmaps(mapping, offset, offset + PAGE_CACHE_SIZE);
+	ll_teardown_mmaps(mapping, offset, offset + PAGE_SIZE);
 	truncate_complete_page(mapping, vmpage);
 }
 
@@ -1042,8 +1042,8 @@
 /* llite/xattr.c */
 int ll_setxattr(struct dentry *dentry, const char *name,
 		const void *value, size_t size, int flags);
-ssize_t ll_getxattr(struct dentry *dentry, const char *name,
-		    void *buffer, size_t size);
+ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode,
+		    const char *name, void *buffer, size_t size);
 ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size);
 int ll_removexattr(struct dentry *dentry, const char *name);
 
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 6d6bb33..b57a992 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -85,7 +85,7 @@
 
 	si_meminfo(&si);
 	pages = si.totalram - si.totalhigh;
-	if (pages >> (20 - PAGE_CACHE_SHIFT) < 512)
+	if (pages >> (20 - PAGE_SHIFT) < 512)
 		lru_page_max = pages / 2;
 	else
 		lru_page_max = (pages / 4) * 3;
@@ -272,12 +272,12 @@
 	    valid != CLIENT_CONNECT_MDT_REQD) {
 		char *buf;
 
-		buf = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+		buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
 		if (!buf) {
 			err = -ENOMEM;
 			goto out_md_fid;
 		}
-		obd_connect_flags2str(buf, PAGE_CACHE_SIZE,
+		obd_connect_flags2str(buf, PAGE_SIZE,
 				      valid ^ CLIENT_CONNECT_MDT_REQD, ",");
 		LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
 				   sbi->ll_md_exp->exp_obd->obd_name, buf);
@@ -335,7 +335,7 @@
 	if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
 		sbi->ll_md_brw_size = data->ocd_brw_size;
 	else
-		sbi->ll_md_brw_size = PAGE_CACHE_SIZE;
+		sbi->ll_md_brw_size = PAGE_SIZE;
 
 	if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) {
 		LCONSOLE_INFO("Layout lock feature supported.\n");
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index 69445a9..5b484e6 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -58,7 +58,7 @@
 		     size_t count)
 {
 	policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
-				 (vma->vm_pgoff << PAGE_CACHE_SHIFT);
+				 (vma->vm_pgoff << PAGE_SHIFT);
 	policy->l_extent.end = (policy->l_extent.start + count - 1) |
 			       ~CFS_PAGE_MASK;
 }
@@ -321,7 +321,7 @@
 
 		vmpage = vio->u.fault.ft_vmpage;
 		if (result != 0 && vmpage) {
-			page_cache_release(vmpage);
+			put_page(vmpage);
 			vmf->page = NULL;
 		}
 	}
@@ -360,7 +360,7 @@
 		lock_page(vmpage);
 		if (unlikely(!vmpage->mapping)) { /* unlucky */
 			unlock_page(vmpage);
-			page_cache_release(vmpage);
+			put_page(vmpage);
 			vmf->page = NULL;
 
 			if (!printed && ++count > 16) {
@@ -457,7 +457,7 @@
 	LASSERTF(last > first, "last %llu first %llu\n", last, first);
 	if (mapping_mapped(mapping)) {
 		rc = 0;
-		unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1,
+		unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
 				    last - first + 1, 0);
 	}
 
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index b725fc1..f169c0d 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -218,7 +218,7 @@
 		offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
 		bio_for_each_segment(bvec, bio, iter) {
 			BUG_ON(bvec.bv_offset != 0);
-			BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE);
+			BUG_ON(bvec.bv_len != PAGE_SIZE);
 
 			pages[page_count] = bvec.bv_page;
 			offsets[page_count] = offset;
@@ -232,7 +232,7 @@
 			(rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ,
 			page_count);
 
-	pvec->ldp_size = page_count << PAGE_CACHE_SHIFT;
+	pvec->ldp_size = page_count << PAGE_SHIFT;
 	pvec->ldp_nr = page_count;
 
 	/* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to
@@ -507,7 +507,7 @@
 
 	set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
 
-	lo->lo_blocksize = PAGE_CACHE_SIZE;
+	lo->lo_blocksize = PAGE_SIZE;
 	lo->lo_device = bdev;
 	lo->lo_flags = lo_flags;
 	lo->lo_backing_file = file;
@@ -525,11 +525,11 @@
 	lo->lo_queue->queuedata = lo;
 
 	/* queue parameters */
-	CLASSERT(PAGE_CACHE_SIZE < (1 << (sizeof(unsigned short) * 8)));
+	CLASSERT(PAGE_SIZE < (1 << (sizeof(unsigned short) * 8)));
 	blk_queue_logical_block_size(lo->lo_queue,
-				     (unsigned short)PAGE_CACHE_SIZE);
+				     (unsigned short)PAGE_SIZE);
 	blk_queue_max_hw_sectors(lo->lo_queue,
-				 LLOOP_MAX_SEGMENTS << (PAGE_CACHE_SHIFT - 9));
+				 LLOOP_MAX_SEGMENTS << (PAGE_SHIFT - 9));
 	blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
 
 	set_capacity(disks[lo->lo_number], size);
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 45941a6..27ab126 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -233,7 +233,7 @@
 	pages_number = sbi->ll_ra_info.ra_max_pages;
 	spin_unlock(&sbi->ll_lock);
 
-	mult = 1 << (20 - PAGE_CACHE_SHIFT);
+	mult = 1 << (20 - PAGE_SHIFT);
 	return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
 }
 
@@ -251,12 +251,12 @@
 	if (rc)
 		return rc;
 
-	pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */
+	pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
 
 	if (pages_number > totalram_pages / 2) {
 
 		CERROR("can't set file readahead more than %lu MB\n",
-		       totalram_pages >> (20 - PAGE_CACHE_SHIFT + 1)); /*1/2 of RAM*/
+		       totalram_pages >> (20 - PAGE_SHIFT + 1)); /*1/2 of RAM*/
 		return -ERANGE;
 	}
 
@@ -281,7 +281,7 @@
 	pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
 	spin_unlock(&sbi->ll_lock);
 
-	mult = 1 << (20 - PAGE_CACHE_SHIFT);
+	mult = 1 << (20 - PAGE_SHIFT);
 	return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
 }
 
@@ -326,7 +326,7 @@
 	pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
 	spin_unlock(&sbi->ll_lock);
 
-	mult = 1 << (20 - PAGE_CACHE_SHIFT);
+	mult = 1 << (20 - PAGE_SHIFT);
 	return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
 }
 
@@ -349,7 +349,7 @@
 	 */
 	if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
 		CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n",
-		       sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_CACHE_SHIFT));
+		       sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_SHIFT));
 		return -ERANGE;
 	}
 
@@ -366,7 +366,7 @@
 	struct super_block     *sb    = m->private;
 	struct ll_sb_info      *sbi   = ll_s2sbi(sb);
 	struct cl_client_cache *cache = &sbi->ll_cache;
-	int shift = 20 - PAGE_CACHE_SHIFT;
+	int shift = 20 - PAGE_SHIFT;
 	int max_cached_mb;
 	int unused_mb;
 
@@ -405,7 +405,7 @@
 		return -EFAULT;
 	kernbuf[count] = 0;
 
-	mult = 1 << (20 - PAGE_CACHE_SHIFT);
+	mult = 1 << (20 - PAGE_SHIFT);
 	buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
 		  kernbuf;
 	rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
@@ -415,7 +415,7 @@
 	if (pages_number < 0 || pages_number > totalram_pages) {
 		CERROR("%s: can't set max cache more than %lu MB\n",
 		       ll_get_fsname(sb, NULL, 0),
-		       totalram_pages >> (20 - PAGE_CACHE_SHIFT));
+		       totalram_pages >> (20 - PAGE_SHIFT));
 		return -ERANGE;
 	}
 
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index 34614ac..edab6c5 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -146,10 +146,10 @@
 		 */
 		io->ci_lockreq = CILR_NEVER;
 
-		pos = vmpage->index << PAGE_CACHE_SHIFT;
+		pos = vmpage->index << PAGE_SHIFT;
 
 		/* Create a temp IO to serve write. */
-		result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_CACHE_SIZE);
+		result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_SIZE);
 		if (result == 0) {
 			cio->cui_fd = LUSTRE_FPRIVATE(file);
 			cio->cui_iter = NULL;
@@ -498,7 +498,7 @@
 		}
 		if (rc != 1)
 			unlock_page(vmpage);
-		page_cache_release(vmpage);
+		put_page(vmpage);
 	} else {
 		which = RA_STAT_FAILED_GRAB_PAGE;
 		msg   = "g_c_p_n failed";
@@ -521,13 +521,13 @@
  * striped over, rather than having a constant value for all files here.
  */
 
-/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)).
+/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_SHIFT)).
  * Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled
  * by default, this should be adjusted corresponding with max_read_ahead_mb
  * and max_read_ahead_per_file_mb otherwise the readahead budget can be used
  * up quickly which will affect read performance significantly. See LU-2816
  */
-#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT)
+#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_SHIFT)
 
 static inline int stride_io_mode(struct ll_readahead_state *ras)
 {
@@ -739,7 +739,7 @@
 			end = rpc_boundary;
 
 		/* Truncate RA window to end of file */
-		end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT));
+		end = min(end, (unsigned long)((kms - 1) >> PAGE_SHIFT));
 
 		ras->ras_next_readahead = max(end, end + 1);
 		RAS_CDEBUG(ras);
@@ -776,7 +776,7 @@
 	if (reserved != 0)
 		ll_ra_count_put(ll_i2sbi(inode), reserved);
 
-	if (ra_end == end + 1 && ra_end == (kms >> PAGE_CACHE_SHIFT))
+	if (ra_end == end + 1 && ra_end == (kms >> PAGE_SHIFT))
 		ll_ra_stats_inc(mapping, RA_STAT_EOF);
 
 	/* if we didn't get to the end of the region we reserved from
@@ -985,8 +985,8 @@
 	if (ras->ras_requests == 2 && !ras->ras_request_index) {
 		__u64 kms_pages;
 
-		kms_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
-			    PAGE_CACHE_SHIFT;
+		kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >>
+			    PAGE_SHIFT;
 
 		CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages,
 		       ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file);
@@ -1173,7 +1173,7 @@
 		 * PageWriteback or clean the page.
 		 */
 		result = cl_sync_file_range(inode, offset,
-					    offset + PAGE_CACHE_SIZE - 1,
+					    offset + PAGE_SIZE - 1,
 					    CL_FSYNC_LOCAL, 1);
 		if (result > 0) {
 			/* actually we may have written more than one page.
@@ -1211,7 +1211,7 @@
 	int ignore_layout = 0;
 
 	if (wbc->range_cyclic) {
-		start = mapping->writeback_index << PAGE_CACHE_SHIFT;
+		start = mapping->writeback_index << PAGE_SHIFT;
 		end = OBD_OBJECT_EOF;
 	} else {
 		start = wbc->range_start;
@@ -1241,7 +1241,7 @@
 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
 		if (end == OBD_OBJECT_EOF)
 			end = i_size_read(inode);
-		mapping->writeback_index = (end >> PAGE_CACHE_SHIFT) + 1;
+		mapping->writeback_index = (end >> PAGE_SHIFT) + 1;
 	}
 	return result;
 }
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 7a5db67..0c3459c 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -87,7 +87,7 @@
 	 * below because they are run with page locked and all our io is
 	 * happening with locked page too
 	 */
-	if (offset == 0 && length == PAGE_CACHE_SIZE) {
+	if (offset == 0 && length == PAGE_SIZE) {
 		env = cl_env_get(&refcheck);
 		if (!IS_ERR(env)) {
 			inode = vmpage->mapping->host;
@@ -193,8 +193,8 @@
 		return -EFBIG;
 	}
 
-	*max_pages = (user_addr + size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-	*max_pages -= user_addr >> PAGE_CACHE_SHIFT;
+	*max_pages = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	*max_pages -= user_addr >> PAGE_SHIFT;
 
 	*pages = libcfs_kvzalloc(*max_pages * sizeof(**pages), GFP_NOFS);
 	if (*pages) {
@@ -217,7 +217,7 @@
 	for (i = 0; i < npages; i++) {
 		if (do_dirty)
 			set_page_dirty_lock(pages[i]);
-		page_cache_release(pages[i]);
+		put_page(pages[i]);
 	}
 	kvfree(pages);
 }
@@ -357,15 +357,15 @@
  * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc.
  */
 #define MAX_DIO_SIZE ((KMALLOC_MAX_SIZE / sizeof(struct brw_page) *	  \
-		       PAGE_CACHE_SIZE) & ~(DT_MAX_BRW_SIZE - 1))
-static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
-			       loff_t file_offset)
+		       PAGE_SIZE) & ~(DT_MAX_BRW_SIZE - 1))
+static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct lu_env *env;
 	struct cl_io *io;
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file->f_mapping->host;
 	struct ccc_object *obj = cl_inode2ccc(inode);
+	loff_t file_offset = iocb->ki_pos;
 	ssize_t count = iov_iter_count(iter);
 	ssize_t tot_bytes = 0, result = 0;
 	struct ll_inode_info *lli = ll_i2info(inode);
@@ -382,8 +382,8 @@
 	CDEBUG(D_VFSTRACE,
 	       "VFS Op:inode=%lu/%u(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n",
 	       inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE,
-	       file_offset, file_offset, count >> PAGE_CACHE_SHIFT,
-	       MAX_DIO_SIZE >> PAGE_CACHE_SHIFT);
+	       file_offset, file_offset, count >> PAGE_SHIFT,
+	       MAX_DIO_SIZE >> PAGE_SHIFT);
 
 	/* Check that all user buffers are aligned as well */
 	if (iov_iter_alignment(iter) & ~CFS_PAGE_MASK)
@@ -432,8 +432,8 @@
 			 * page worth of page pointers = 4MB on i386.
 			 */
 			if (result == -ENOMEM &&
-			    size > (PAGE_CACHE_SIZE / sizeof(*pages)) *
-				   PAGE_CACHE_SIZE) {
+			    size > (PAGE_SIZE / sizeof(*pages)) *
+			    PAGE_SIZE) {
 				size = ((((size / 2) - 1) |
 					 ~CFS_PAGE_MASK) + 1) &
 					CFS_PAGE_MASK;
@@ -474,10 +474,10 @@
 			  loff_t pos, unsigned len, unsigned flags,
 			  struct page **pagep, void **fsdata)
 {
-	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+	pgoff_t index = pos >> PAGE_SHIFT;
 	struct page *page;
 	int rc;
-	unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+	unsigned from = pos & (PAGE_SIZE - 1);
 
 	page = grab_cache_page_write_begin(mapping, index, flags);
 	if (!page)
@@ -488,7 +488,7 @@
 	rc = ll_prepare_write(file, page, from, from + len);
 	if (rc) {
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 	return rc;
 }
@@ -497,12 +497,12 @@
 			loff_t pos, unsigned len, unsigned copied,
 			struct page *page, void *fsdata)
 {
-	unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+	unsigned from = pos & (PAGE_SIZE - 1);
 	int rc;
 
 	rc = ll_commit_write(file, page, from, from + copied);
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 
 	return rc ?: copied;
 }
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index fb0c26e..85a8359 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -512,9 +512,9 @@
 		vio->cui_ra_window_set = 1;
 		bead->lrr_start = cl_index(obj, pos);
 		/*
-		 * XXX: explicit PAGE_CACHE_SIZE
+		 * XXX: explicit PAGE_SIZE
 		 */
-		bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1);
+		bead->lrr_count = cl_index(obj, tot + PAGE_SIZE - 1);
 		ll_ra_read_in(file, bead);
 	}
 
@@ -959,7 +959,7 @@
 		 * We're completely overwriting an existing page, so _don't_
 		 * set it up to date until commit_write
 		 */
-		if (from == 0 && to == PAGE_CACHE_SIZE) {
+		if (from == 0 && to == PAGE_SIZE) {
 			CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n");
 			POISON_PAGE(page, 0x11);
 		} else
@@ -1022,7 +1022,7 @@
 			set_page_dirty(vmpage);
 			vvp_write_pending(cl2ccc(obj), cp);
 		} else if (result == -EDQUOT) {
-			pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
+			pgoff_t last_index = i_size_read(inode) >> PAGE_SHIFT;
 			bool need_clip = true;
 
 			/*
@@ -1040,7 +1040,7 @@
 			 * being.
 			 */
 			if (last_index > pg->cp_index) {
-				to = PAGE_CACHE_SIZE;
+				to = PAGE_SIZE;
 				need_clip = false;
 			} else if (last_index == pg->cp_index) {
 				int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index 850bae7..33ca3eb 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -57,7 +57,7 @@
 	struct page *vmpage = cp->cpg_page;
 
 	LASSERT(vmpage);
-	page_cache_release(vmpage);
+	put_page(vmpage);
 }
 
 static void vvp_page_fini(const struct lu_env *env,
@@ -164,12 +164,12 @@
 	LASSERT(vmpage);
 	LASSERT(PageLocked(vmpage));
 
-	offset = vmpage->index << PAGE_CACHE_SHIFT;
+	offset = vmpage->index << PAGE_SHIFT;
 
 	/*
 	 * XXX is it safe to call this with the page lock held?
 	 */
-	ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_CACHE_SIZE);
+	ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_SIZE);
 	return 0;
 }
 
@@ -537,7 +537,7 @@
 	CLOBINVRNT(env, obj, ccc_object_invariant(obj));
 
 	cpg->cpg_page = vmpage;
-	page_cache_get(vmpage);
+	get_page(vmpage);
 
 	INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
 	if (page->cp_type == CPT_CACHEABLE) {
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index b68dcc9..c671f22 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -451,11 +451,9 @@
 	return rc;
 }
 
-ssize_t ll_getxattr(struct dentry *dentry, const char *name,
-		    void *buffer, size_t size)
+ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode,
+		    const char *name, void *buffer, size_t size)
 {
-	struct inode *inode = d_inode(dentry);
-
 	LASSERT(inode);
 	LASSERT(name);
 
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 0f776cf..9abb7c2 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -2017,7 +2017,7 @@
  * |s|e|f|p|ent| 0 | ... | 0 |
  * '-----------------   -----'
  *
- * However, on hosts where the native VM page size (PAGE_CACHE_SIZE) is
+ * However, on hosts where the native VM page size (PAGE_SIZE) is
  * larger than LU_PAGE_SIZE, a single host page may contain multiple
  * lu_dirpages. After reading the lu_dirpages from the MDS, the
  * ldp_hash_end of the first lu_dirpage refers to the one immediately
@@ -2048,7 +2048,7 @@
  * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span
  *   to the first entry of the next lu_dirpage.
  */
-#if PAGE_CACHE_SIZE > LU_PAGE_SIZE
+#if PAGE_SIZE > LU_PAGE_SIZE
 static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
 {
 	int i;
@@ -2101,7 +2101,7 @@
 }
 #else
 #define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0)
-#endif	/* PAGE_CACHE_SIZE > LU_PAGE_SIZE */
+#endif	/* PAGE_SIZE > LU_PAGE_SIZE */
 
 static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
 			struct page **pages, struct ptlrpc_request **request)
@@ -2110,7 +2110,7 @@
 	struct lmv_obd		*lmv = &obd->u.lmv;
 	__u64			offset = op_data->op_offset;
 	int			rc;
-	int			ncfspgs; /* pages read in PAGE_CACHE_SIZE */
+	int			ncfspgs; /* pages read in PAGE_SIZE */
 	int			nlupgs; /* pages read in LU_PAGE_SIZE */
 	struct lmv_tgt_desc	*tgt;
 
@@ -2129,8 +2129,8 @@
 	if (rc != 0)
 		return rc;
 
-	ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_CACHE_SIZE - 1)
-		 >> PAGE_CACHE_SHIFT;
+	ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_SIZE - 1)
+		 >> PAGE_SHIFT;
 	nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT;
 	LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
 	LASSERT(ncfspgs > 0 && ncfspgs <= op_data->op_npages);
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index 55dd8ef..b91d3ff 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -1002,10 +1002,10 @@
 
 	/* NB req now owns desc and will free it when it gets freed */
 	for (i = 0; i < op_data->op_npages; i++)
-		ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);
+		ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
 
 	mdc_readdir_pack(req, op_data->op_offset,
-			 PAGE_CACHE_SIZE * op_data->op_npages,
+			 PAGE_SIZE * op_data->op_npages,
 			 &op_data->op_fid1);
 
 	ptlrpc_request_set_replen(req);
@@ -1037,7 +1037,7 @@
 	if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) {
 		CERROR("Unexpected # bytes transferred: %d (%ld expected)\n",
 		       req->rq_bulk->bd_nob_transferred,
-		       PAGE_CACHE_SIZE * op_data->op_npages);
+		       PAGE_SIZE * op_data->op_npages);
 		ptlrpc_req_finished(req);
 		return -EPROTO;
 	}
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index b7dc872..3924b09 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -1113,7 +1113,7 @@
 }
 
 enum {
-	CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT),
+	CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_SHIFT),
 	CONFIG_READ_NRPAGES      = 4
 };
 
@@ -1137,19 +1137,19 @@
 	LASSERT(cfg->cfg_instance);
 	LASSERT(cfg->cfg_sb == cfg->cfg_instance);
 
-	inst = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+	inst = kzalloc(PAGE_SIZE, GFP_KERNEL);
 	if (!inst)
 		return -ENOMEM;
 
-	pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance);
-	if (pos >= PAGE_CACHE_SIZE) {
+	pos = snprintf(inst, PAGE_SIZE, "%p", cfg->cfg_instance);
+	if (pos >= PAGE_SIZE) {
 		kfree(inst);
 		return -E2BIG;
 	}
 
 	++pos;
 	buf   = inst + pos;
-	bufsz = PAGE_CACHE_SIZE - pos;
+	bufsz = PAGE_SIZE - pos;
 
 	while (datalen > 0) {
 		int   entry_len = sizeof(*entry);
@@ -1181,7 +1181,7 @@
 		/* Keep this swab for normal mixed endian handling. LU-1644 */
 		if (mne_swab)
 			lustre_swab_mgs_nidtbl_entry(entry);
-		if (entry->mne_length > PAGE_CACHE_SIZE) {
+		if (entry->mne_length > PAGE_SIZE) {
 			CERROR("MNE too large (%u)\n", entry->mne_length);
 			break;
 		}
@@ -1371,7 +1371,7 @@
 	}
 	body->mcb_offset = cfg->cfg_last_idx + 1;
 	body->mcb_type   = cld->cld_type;
-	body->mcb_bits   = PAGE_CACHE_SHIFT;
+	body->mcb_bits   = PAGE_SHIFT;
 	body->mcb_units  = nrpages;
 
 	/* allocate bulk transfer descriptor */
@@ -1383,7 +1383,7 @@
 	}
 
 	for (i = 0; i < nrpages; i++)
-		ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);
+		ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
 
 	ptlrpc_request_set_replen(req);
 	rc = ptlrpc_queue_wait(req);
@@ -1411,7 +1411,7 @@
 		goto out;
 	}
 
-	if (ealen > nrpages << PAGE_CACHE_SHIFT) {
+	if (ealen > nrpages << PAGE_SHIFT) {
 		rc = -EINVAL;
 		goto out;
 	}
@@ -1439,7 +1439,7 @@
 
 		ptr = kmap(pages[i]);
 		rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr,
-					     min_t(int, ealen, PAGE_CACHE_SIZE),
+					     min_t(int, ealen, PAGE_SIZE),
 					     mne_swab);
 		kunmap(pages[i]);
 		if (rc2 < 0) {
@@ -1448,7 +1448,7 @@
 			break;
 		}
 
-		ealen -= PAGE_CACHE_SIZE;
+		ealen -= PAGE_SIZE;
 	}
 
 out:
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index 231a2f2..3945800 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -1477,7 +1477,7 @@
 	/*
 	 * XXX for now.
 	 */
-	return (loff_t)idx << PAGE_CACHE_SHIFT;
+	return (loff_t)idx << PAGE_SHIFT;
 }
 EXPORT_SYMBOL(cl_offset);
 
@@ -1489,13 +1489,13 @@
 	/*
 	 * XXX for now.
 	 */
-	return offset >> PAGE_CACHE_SHIFT;
+	return offset >> PAGE_SHIFT;
 }
 EXPORT_SYMBOL(cl_index);
 
 int cl_page_size(const struct cl_object *obj)
 {
-	return 1 << PAGE_CACHE_SHIFT;
+	return 1 << PAGE_SHIFT;
 }
 EXPORT_SYMBOL(cl_page_size);
 
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index 1a938e1..c2cf015 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -461,9 +461,9 @@
 		CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len);
 		ret = -EINVAL;
 	}
-	if ((u64val & ~CFS_PAGE_MASK) >= PAGE_CACHE_SIZE) {
+	if ((u64val & ~CFS_PAGE_MASK) >= PAGE_SIZE) {
 		CWARN("mask failed: u64val %llu >= %llu\n", u64val,
-		      (__u64)PAGE_CACHE_SIZE);
+		      (__u64)PAGE_SIZE);
 		ret = -EINVAL;
 	}
 
@@ -509,7 +509,7 @@
 	 * For clients with less memory, a larger fraction is needed
 	 * for other purposes (mostly for BGL).
 	 */
-	if (totalram_pages <= 512 << (20 - PAGE_CACHE_SHIFT))
+	if (totalram_pages <= 512 << (20 - PAGE_SHIFT))
 		obd_max_dirty_pages = totalram_pages / 4;
 	else
 		obd_max_dirty_pages = totalram_pages / 2;
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
index 9496c09..b41b65e2 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
@@ -47,7 +47,6 @@
 #include "../../include/lustre/lustre_idl.h"
 
 #include <linux/fs.h>
-#include <linux/pagemap.h> /* for PAGE_CACHE_SIZE */
 
 void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid)
 {
@@ -71,8 +70,8 @@
 	if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits))
 		dst->i_blkbits = ffs(src->o_blksize) - 1;
 
-	if (dst->i_blkbits < PAGE_CACHE_SHIFT)
-		dst->i_blkbits = PAGE_CACHE_SHIFT;
+	if (dst->i_blkbits < PAGE_SHIFT)
+		dst->i_blkbits = PAGE_SHIFT;
 
 	/* allocation of space */
 	if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks)
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
index fd333b9..e6bf414 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
@@ -100,7 +100,7 @@
 				 char *buf)
 {
 	return sprintf(buf, "%ul\n",
-			obd_max_dirty_pages / (1 << (20 - PAGE_CACHE_SHIFT)));
+			obd_max_dirty_pages / (1 << (20 - PAGE_SHIFT)));
 }
 
 static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr,
@@ -113,14 +113,14 @@
 	if (rc)
 		return rc;
 
-	val *= 1 << (20 - PAGE_CACHE_SHIFT); /* convert to pages */
+	val *= 1 << (20 - PAGE_SHIFT); /* convert to pages */
 
 	if (val > ((totalram_pages / 10) * 9)) {
 		/* Somebody wants to assign too much memory to dirty pages */
 		return -EINVAL;
 	}
 
-	if (val < 4 << (20 - PAGE_CACHE_SHIFT)) {
+	if (val < 4 << (20 - PAGE_SHIFT)) {
 		/* Less than 4 Mb for dirty cache is also bad */
 		return -EINVAL;
 	}
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 65a4746..978568a 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -840,8 +840,8 @@
 
 #if BITS_PER_LONG == 32
 	/* limit hashtable size for lowmem systems to low RAM */
-	if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT))
-		cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4;
+	if (cache_size > 1 << (30 - PAGE_SHIFT))
+		cache_size = 1 << (30 - PAGE_SHIFT) * 3 / 4;
 #endif
 
 	/* clear off unreasonable cache setting. */
@@ -853,7 +853,7 @@
 		lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
 	}
 	cache_size = cache_size / 100 * lu_cache_percent *
-		(PAGE_CACHE_SIZE / 1024);
+		(PAGE_SIZE / 1024);
 
 	for (bits = 1; (1 << bits) < cache_size; ++bits) {
 		;
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 64ffe24..1e83669 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -278,7 +278,7 @@
 	struct page *vmpage      = ep->ep_vmpage;
 
 	atomic_dec(&eco->eo_npages);
-	page_cache_release(vmpage);
+	put_page(vmpage);
 }
 
 static int echo_page_prep(const struct lu_env *env,
@@ -373,7 +373,7 @@
 	struct echo_object *eco = cl2echo_obj(obj);
 
 	ep->ep_vmpage = vmpage;
-	page_cache_get(vmpage);
+	get_page(vmpage);
 	mutex_init(&ep->ep_lock);
 	cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
 	atomic_inc(&eco->eo_npages);
@@ -1138,7 +1138,7 @@
 	LASSERT(rc == 0);
 
 	rc = cl_echo_enqueue0(env, eco, offset,
-			      offset + npages * PAGE_CACHE_SIZE - 1,
+			      offset + npages * PAGE_SIZE - 1,
 			      rw == READ ? LCK_PR : LCK_PW, &lh.cookie,
 			      CEF_NEVER);
 	if (rc < 0)
@@ -1311,11 +1311,11 @@
 	int      delta;
 
 	/* no partial pages on the client */
-	LASSERT(count == PAGE_CACHE_SIZE);
+	LASSERT(count == PAGE_SIZE);
 
 	addr = kmap(page);
 
-	for (delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
+	for (delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
 		if (rw == OBD_BRW_WRITE) {
 			stripe_off = offset + delta;
 			stripe_id = id;
@@ -1341,11 +1341,11 @@
 	int     rc2;
 
 	/* no partial pages on the client */
-	LASSERT(count == PAGE_CACHE_SIZE);
+	LASSERT(count == PAGE_SIZE);
 
 	addr = kmap(page);
 
-	for (rc = delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
+	for (rc = delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
 		stripe_off = offset + delta;
 		stripe_id = id;
 
@@ -1391,7 +1391,7 @@
 		return -EINVAL;
 
 	/* XXX think again with misaligned I/O */
-	npages = count >> PAGE_CACHE_SHIFT;
+	npages = count >> PAGE_SHIFT;
 
 	if (rw == OBD_BRW_WRITE)
 		brw_flags = OBD_BRW_ASYNC;
@@ -1408,7 +1408,7 @@
 
 	for (i = 0, pgp = pga, off = offset;
 	     i < npages;
-	     i++, pgp++, off += PAGE_CACHE_SIZE) {
+	     i++, pgp++, off += PAGE_SIZE) {
 
 		LASSERT(!pgp->pg);      /* for cleanup */
 
@@ -1418,7 +1418,7 @@
 			goto out;
 
 		pages[i] = pgp->pg;
-		pgp->count = PAGE_CACHE_SIZE;
+		pgp->count = PAGE_SIZE;
 		pgp->off = off;
 		pgp->flag = brw_flags;
 
@@ -1473,8 +1473,8 @@
 	if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0)
 		return -EINVAL;
 
-	npages = batch >> PAGE_CACHE_SHIFT;
-	tot_pages = count >> PAGE_CACHE_SHIFT;
+	npages = batch >> PAGE_SHIFT;
+	tot_pages = count >> PAGE_SHIFT;
 
 	lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS);
 	rnb = kcalloc(npages, sizeof(struct niobuf_remote), GFP_NOFS);
@@ -1497,9 +1497,9 @@
 		if (tot_pages < npages)
 			npages = tot_pages;
 
-		for (i = 0; i < npages; i++, off += PAGE_CACHE_SIZE) {
+		for (i = 0; i < npages; i++, off += PAGE_SIZE) {
 			rnb[i].offset = off;
-			rnb[i].len = PAGE_CACHE_SIZE;
+			rnb[i].len = PAGE_SIZE;
 			rnb[i].flags = brw_flags;
 		}
 
@@ -1878,7 +1878,7 @@
 {
 	LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n");
 
-	LASSERT(PAGE_CACHE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
+	LASSERT(PAGE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
 
 	return echo_client_init();
 }
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index 57c43c5..a3358c3 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -162,15 +162,15 @@
 	if (rc)
 		return rc;
 
-	pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */
+	pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
 
 	if (pages_number <= 0 ||
-	    pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_CACHE_SHIFT) ||
+	    pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) ||
 	    pages_number > totalram_pages / 4) /* 1/4 of RAM */
 		return -ERANGE;
 
 	client_obd_list_lock(&cli->cl_loi_list_lock);
-	cli->cl_dirty_max = (u32)(pages_number << PAGE_CACHE_SHIFT);
+	cli->cl_dirty_max = (u32)(pages_number << PAGE_SHIFT);
 	osc_wake_cache_waiters(cli);
 	client_obd_list_unlock(&cli->cl_loi_list_lock);
 
@@ -182,7 +182,7 @@
 {
 	struct obd_device *dev = m->private;
 	struct client_obd *cli = &dev->u.cli;
-	int shift = 20 - PAGE_CACHE_SHIFT;
+	int shift = 20 - PAGE_SHIFT;
 
 	seq_printf(m,
 		   "used_mb: %d\n"
@@ -211,7 +211,7 @@
 		return -EFAULT;
 	kernbuf[count] = 0;
 
-	mult = 1 << (20 - PAGE_CACHE_SHIFT);
+	mult = 1 << (20 - PAGE_SHIFT);
 	buffer += lprocfs_find_named_value(kernbuf, "used_mb:", &count) -
 		  kernbuf;
 	rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
@@ -569,12 +569,12 @@
 
 	/* if the max_pages is specified in bytes, convert to pages */
 	if (val >= ONE_MB_BRW_SIZE)
-		val >>= PAGE_CACHE_SHIFT;
+		val >>= PAGE_SHIFT;
 
-	chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_CACHE_SHIFT)) - 1);
+	chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
 	/* max_pages_per_rpc must be chunk aligned */
 	val = (val + ~chunk_mask) & chunk_mask;
-	if (val == 0 || val > ocd->ocd_brw_size >> PAGE_CACHE_SHIFT) {
+	if (val == 0 || val > ocd->ocd_brw_size >> PAGE_SHIFT) {
 		return -ERANGE;
 	}
 	client_obd_list_lock(&cli->cl_loi_list_lock);
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 6336311..5f25bf8 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -544,7 +544,7 @@
 		return -ERANGE;
 
 	LASSERT(cur->oe_osclock == victim->oe_osclock);
-	ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_CACHE_SHIFT;
+	ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_SHIFT;
 	chunk_start = cur->oe_start >> ppc_bits;
 	chunk_end = cur->oe_end >> ppc_bits;
 	if (chunk_start != (victim->oe_end >> ppc_bits) + 1 &&
@@ -647,8 +647,8 @@
 	lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0);
 	LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
 
-	LASSERT(cli->cl_chunkbits >= PAGE_CACHE_SHIFT);
-	ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
+	LASSERT(cli->cl_chunkbits >= PAGE_SHIFT);
+	ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
 	chunk_mask = ~((1 << ppc_bits) - 1);
 	chunksize = 1 << cli->cl_chunkbits;
 	chunk = index >> ppc_bits;
@@ -871,8 +871,8 @@
 
 	if (!sent) {
 		lost_grant = ext->oe_grants;
-	} else if (blocksize < PAGE_CACHE_SIZE &&
-		   last_count != PAGE_CACHE_SIZE) {
+	} else if (blocksize < PAGE_SIZE &&
+		   last_count != PAGE_SIZE) {
 		/* For short writes we shouldn't count parts of pages that
 		 * span a whole chunk on the OST side, or our accounting goes
 		 * wrong.  Should match the code in filter_grant_check.
@@ -884,7 +884,7 @@
 		if (end)
 			count += blocksize - end;
 
-		lost_grant = PAGE_CACHE_SIZE - count;
+		lost_grant = PAGE_SIZE - count;
 	}
 	if (ext->oe_grants > 0)
 		osc_free_grant(cli, nr_pages, lost_grant);
@@ -967,7 +967,7 @@
 	struct osc_async_page *oap;
 	struct osc_async_page *tmp;
 	int pages_in_chunk = 0;
-	int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
+	int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
 	__u64 trunc_chunk = trunc_index >> ppc_bits;
 	int grants = 0;
 	int nr_pages = 0;
@@ -1125,7 +1125,7 @@
 	if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) {
 		last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
 		LASSERT(last->oap_count > 0);
-		LASSERT(last->oap_page_off + last->oap_count <= PAGE_CACHE_SIZE);
+		LASSERT(last->oap_page_off + last->oap_count <= PAGE_SIZE);
 		last->oap_async_flags |= ASYNC_COUNT_STABLE;
 	}
 
@@ -1134,7 +1134,7 @@
 	 */
 	list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
 		if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
-			oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off;
+			oap->oap_count = PAGE_SIZE - oap->oap_page_off;
 			oap->oap_async_flags |= ASYNC_COUNT_STABLE;
 		}
 	}
@@ -1158,7 +1158,7 @@
 	struct osc_object *obj = ext->oe_obj;
 	struct client_obd *cli = osc_cli(obj);
 	struct osc_extent *next;
-	int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
+	int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
 	pgoff_t chunk = index >> ppc_bits;
 	pgoff_t end_chunk;
 	pgoff_t end_index;
@@ -1293,9 +1293,9 @@
 		return 0;
 	else if (cl_offset(obj, page->cp_index + 1) > kms)
 		/* catch sub-page write at end of file */
-		return kms % PAGE_CACHE_SIZE;
+		return kms % PAGE_SIZE;
 	else
-		return PAGE_CACHE_SIZE;
+		return PAGE_SIZE;
 }
 
 static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
@@ -1376,10 +1376,10 @@
 	assert_spin_locked(&cli->cl_loi_list_lock.lock);
 	LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
 	atomic_inc(&obd_dirty_pages);
-	cli->cl_dirty += PAGE_CACHE_SIZE;
+	cli->cl_dirty += PAGE_SIZE;
 	pga->flag |= OBD_BRW_FROM_GRANT;
 	CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
-	       PAGE_CACHE_SIZE, pga, pga->pg);
+	       PAGE_SIZE, pga, pga->pg);
 	osc_update_next_shrink(cli);
 }
 
@@ -1396,11 +1396,11 @@
 
 	pga->flag &= ~OBD_BRW_FROM_GRANT;
 	atomic_dec(&obd_dirty_pages);
-	cli->cl_dirty -= PAGE_CACHE_SIZE;
+	cli->cl_dirty -= PAGE_SIZE;
 	if (pga->flag & OBD_BRW_NOCACHE) {
 		pga->flag &= ~OBD_BRW_NOCACHE;
 		atomic_dec(&obd_dirty_transit_pages);
-		cli->cl_dirty_transit -= PAGE_CACHE_SIZE;
+		cli->cl_dirty_transit -= PAGE_SIZE;
 	}
 }
 
@@ -1456,7 +1456,7 @@
  * used, we should return these grants to OST. There're two cases where grants
  * can be lost:
  * 1. truncate;
- * 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was
+ * 2. blocksize at OST is less than PAGE_SIZE and a partial page was
  *    written. In this case OST may use less chunks to serve this partial
  *    write. OSTs don't actually know the page size on the client side. so
  *    clients have to calculate lost grant by the blocksize on the OST.
@@ -1469,7 +1469,7 @@
 
 	client_obd_list_lock(&cli->cl_loi_list_lock);
 	atomic_sub(nr_pages, &obd_dirty_pages);
-	cli->cl_dirty -= nr_pages << PAGE_CACHE_SHIFT;
+	cli->cl_dirty -= nr_pages << PAGE_SHIFT;
 	cli->cl_lost_grant += lost_grant;
 	if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) {
 		/* borrow some grant from truncate to avoid the case that
@@ -1512,11 +1512,11 @@
 	if (rc < 0)
 		return 0;
 
-	if (cli->cl_dirty + PAGE_CACHE_SIZE <= cli->cl_dirty_max &&
+	if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max &&
 	    atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
 		osc_consume_write_grant(cli, &oap->oap_brw_page);
 		if (transient) {
-			cli->cl_dirty_transit += PAGE_CACHE_SIZE;
+			cli->cl_dirty_transit += PAGE_SIZE;
 			atomic_inc(&obd_dirty_transit_pages);
 			oap->oap_brw_flags |= OBD_BRW_NOCACHE;
 		}
@@ -1562,7 +1562,7 @@
 	 * of queued writes and create a discontiguous rpc stream
 	 */
 	if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
-	    cli->cl_dirty_max < PAGE_CACHE_SIZE     ||
+	    cli->cl_dirty_max < PAGE_SIZE     ||
 	    cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) {
 		rc = -EDQUOT;
 		goto out;
@@ -1632,7 +1632,7 @@
 
 		ocw->ocw_rc = -EDQUOT;
 		/* we can't dirty more */
-		if ((cli->cl_dirty + PAGE_CACHE_SIZE > cli->cl_dirty_max) ||
+		if ((cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) ||
 		    (atomic_read(&obd_dirty_pages) + 1 >
 		     obd_max_dirty_pages)) {
 			CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n",
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index d720b1a..ce9ddd5 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -410,7 +410,7 @@
 	int result;
 
 	opg->ops_from = 0;
-	opg->ops_to = PAGE_CACHE_SIZE;
+	opg->ops_to = PAGE_SIZE;
 
 	result = osc_prep_async_page(osc, opg, vmpage,
 				     cl_offset(obj, page->cp_index));
@@ -487,9 +487,9 @@
 /* LRU pages are freed in batch mode. OSC should at least free this
  * number of pages to avoid running out of LRU budget, and..
  */
-static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT);  /* 2M */
+static const int lru_shrink_min = 2 << (20 - PAGE_SHIFT);  /* 2M */
 /* free this number at most otherwise it will take too long time to finish. */
-static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */
+static const int lru_shrink_max = 32 << (20 - PAGE_SHIFT); /* 32M */
 
 /* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
  * we should free slots aggressively. In this way, slots are freed in a steady
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index 74805f1..30526eb 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -826,7 +826,7 @@
 		oa->o_undirty = 0;
 	} else {
 		long max_in_flight = (cli->cl_max_pages_per_rpc <<
-				      PAGE_CACHE_SHIFT)*
+				      PAGE_SHIFT)*
 				     (cli->cl_max_rpcs_in_flight + 1);
 		oa->o_undirty = max(cli->cl_dirty_max, max_in_flight);
 	}
@@ -909,11 +909,11 @@
 static int osc_shrink_grant(struct client_obd *cli)
 {
 	__u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
-			     (cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT);
+			     (cli->cl_max_pages_per_rpc << PAGE_SHIFT);
 
 	client_obd_list_lock(&cli->cl_loi_list_lock);
 	if (cli->cl_avail_grant <= target_bytes)
-		target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+		target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
 	client_obd_list_unlock(&cli->cl_loi_list_lock);
 
 	return osc_shrink_grant_to_target(cli, target_bytes);
@@ -929,8 +929,8 @@
 	 * We don't want to shrink below a single RPC, as that will negatively
 	 * impact block allocation and long-term performance.
 	 */
-	if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT)
-		target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+	if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
+		target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
 
 	if (target_bytes >= cli->cl_avail_grant) {
 		client_obd_list_unlock(&cli->cl_loi_list_lock);
@@ -978,7 +978,7 @@
 		 * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
 		 * Keep comment here so that it can be found by searching.
 		 */
-		int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+		int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
 
 		if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
 		    client->cl_avail_grant > brw_size)
@@ -1052,7 +1052,7 @@
 	}
 
 	/* determine the appropriate chunk size used by osc_extent. */
-	cli->cl_chunkbits = max_t(int, PAGE_CACHE_SHIFT, ocd->ocd_blocksize);
+	cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize);
 	client_obd_list_unlock(&cli->cl_loi_list_lock);
 
 	CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n",
@@ -1317,9 +1317,9 @@
 		LASSERT(pg->count > 0);
 		/* make sure there is no gap in the middle of page array */
 		LASSERTF(page_count == 1 ||
-			 (ergo(i == 0, poff + pg->count == PAGE_CACHE_SIZE) &&
+			 (ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
 			  ergo(i > 0 && i < page_count - 1,
-			       poff == 0 && pg->count == PAGE_CACHE_SIZE)   &&
+			       poff == 0 && pg->count == PAGE_SIZE)   &&
 			  ergo(i == page_count - 1, poff == 0)),
 			 "i: %d/%d pg: %p off: %llu, count: %u\n",
 			 i, page_count, pg, pg->off, pg->count);
@@ -1877,7 +1877,7 @@
 						oap->oap_count;
 			else
 				LASSERT(oap->oap_page_off + oap->oap_count ==
-					PAGE_CACHE_SIZE);
+					PAGE_SIZE);
 		}
 	}
 
@@ -1993,7 +1993,7 @@
 		tmp->oap_request = ptlrpc_request_addref(req);
 
 	client_obd_list_lock(&cli->cl_loi_list_lock);
-	starting_offset >>= PAGE_CACHE_SHIFT;
+	starting_offset >>= PAGE_SHIFT;
 	if (cmd == OBD_BRW_READ) {
 		cli->cl_r_in_flight++;
 		lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
@@ -2790,12 +2790,12 @@
 						CFS_PAGE_MASK;
 
 		if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
-		    fm_key->fiemap.fm_start + PAGE_CACHE_SIZE - 1)
+		    fm_key->fiemap.fm_start + PAGE_SIZE - 1)
 			policy.l_extent.end = OBD_OBJECT_EOF;
 		else
 			policy.l_extent.end = (fm_key->fiemap.fm_start +
 				fm_key->fiemap.fm_length +
-				PAGE_CACHE_SIZE - 1) & CFS_PAGE_MASK;
+				PAGE_SIZE - 1) & CFS_PAGE_MASK;
 
 		ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
 		mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index 1b7673e..cf3ac8e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -174,12 +174,12 @@
 	LASSERT(page);
 	LASSERT(pageoffset >= 0);
 	LASSERT(len > 0);
-	LASSERT(pageoffset + len <= PAGE_CACHE_SIZE);
+	LASSERT(pageoffset + len <= PAGE_SIZE);
 
 	desc->bd_nob += len;
 
 	if (pin)
-		page_cache_get(page);
+		get_page(page);
 
 	ptlrpc_add_bulk_page(desc, page, pageoffset, len);
 }
@@ -206,7 +206,7 @@
 
 	if (unpin) {
 		for (i = 0; i < desc->bd_iov_count; i++)
-			page_cache_release(desc->bd_iov[i].kiov_page);
+			put_page(desc->bd_iov[i].kiov_page);
 	}
 
 	kfree(desc);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index b4eddf2..cd94fed 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -1092,7 +1092,7 @@
 
 		if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
 			cli->cl_max_pages_per_rpc =
-				min(ocd->ocd_brw_size >> PAGE_CACHE_SHIFT,
+				min(ocd->ocd_brw_size >> PAGE_SHIFT,
 				    cli->cl_max_pages_per_rpc);
 		else if (imp->imp_connect_op == MDS_CONNECT ||
 			 imp->imp_connect_op == MGS_CONNECT)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index cee04ef..c95a91c 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -308,7 +308,7 @@
 	 * hose a kernel by allowing the request history to grow too
 	 * far.
 	 */
-	bufpages = (svc->srv_buf_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+	bufpages = (svc->srv_buf_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	if (val > totalram_pages / (2 * bufpages))
 		return -ERANGE;
 
@@ -1226,7 +1226,7 @@
 	const char prefix[] = "connection=";
 	const int prefix_len = sizeof(prefix) - 1;
 
-	if (count > PAGE_CACHE_SIZE - 1 || count <= prefix_len)
+	if (count > PAGE_SIZE - 1 || count <= prefix_len)
 		return -EINVAL;
 
 	kbuf = kzalloc(count + 1, GFP_NOFS);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c
index 5f27d9c..30d9a16 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/recover.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c
@@ -195,7 +195,7 @@
 	}
 
 	list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) {
-		LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON,
+		LASSERTF((long)req > PAGE_SIZE && req != LP_POISON,
 			 "req %p bad\n", req);
 		LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
 		if (!ptlrpc_no_resend(req))
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index 72d5b9b..d3872b8 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -58,7 +58,7 @@
  * bulk encryption page pools	   *
  ****************************************/
 
-#define POINTERS_PER_PAGE	(PAGE_CACHE_SIZE / sizeof(void *))
+#define POINTERS_PER_PAGE	(PAGE_SIZE / sizeof(void *))
 #define PAGES_PER_POOL		(POINTERS_PER_PAGE)
 
 #define IDLE_IDX_MAX	 (100)
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 0078b6a..de7e9f5 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -37,6 +37,8 @@
 
 source "drivers/staging/media/timb/Kconfig"
 
+source "drivers/staging/media/tw686x-kh/Kconfig"
+
 # Keep LIRC at the end, as it has sub-menus
 source "drivers/staging/media/lirc/Kconfig"
 
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 9149588..60a35b3 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -8,3 +8,4 @@
 obj-$(CONFIG_VIDEO_OMAP4)	+= omap4iss/
 obj-$(CONFIG_DVB_MN88472)       += mn88472/
 obj-$(CONFIG_VIDEO_TIMBERDALE)  += timb/
+obj-$(CONFIG_VIDEO_TW686X_KH)	+= tw686x-kh/
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index abf330f..8dade19 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -308,7 +308,7 @@
 MODULE_PARM_DESC(radio_nr,
 		 "Minor number for radio device (-1 ==> auto assign)");
 
-static struct region_info region_configs[] = {
+static const struct region_info region_configs[] = {
 	/* USA */
 	{
 		.channel_spacing	= 20,
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index b793c04..ea3ddec 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -154,7 +154,7 @@
 	while ((entity = media_entity_graph_walk_next(&graph))) {
 		if (entity == &video->video_dev.entity)
 			continue;
-		if (!is_media_entity_v4l2_io(entity))
+		if (!is_media_entity_v4l2_video_device(entity))
 			continue;
 		far_end = to_vpfe_video(media_entity_to_video_device(entity));
 		if (far_end->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -172,9 +172,11 @@
 static int vpfe_update_pipe_state(struct vpfe_video_device *video)
 {
 	struct vpfe_pipeline *pipe = &video->pipe;
+	int ret;
 
-	if (vpfe_prepare_pipeline(video))
-		return vpfe_prepare_pipeline(video);
+	ret = vpfe_prepare_pipeline(video);
+	if (ret)
+		return ret;
 
 	/*
 	 * Find out if there is any input video
@@ -182,9 +184,10 @@
 	 */
 	if (pipe->input_num == 0) {
 		pipe->state = VPFE_PIPELINE_STREAM_CONTINUOUS;
-		if (vpfe_update_current_ext_subdev(video)) {
+		ret = vpfe_update_current_ext_subdev(video);
+		if (ret) {
 			pr_err("Invalid external subdev\n");
-			return vpfe_update_current_ext_subdev(video);
+			return ret;
 		}
 	} else {
 		pipe->state = VPFE_PIPELINE_STREAM_SINGLESHOT;
@@ -667,6 +670,7 @@
 	struct v4l2_subdev *subdev;
 	struct v4l2_format format;
 	struct media_pad *remote;
+	int ret;
 
 	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt\n");
 
@@ -695,10 +699,11 @@
 	sd_fmt.pad = remote->index;
 	sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
 	/* get output format of remote subdev */
-	if (v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt)) {
+	ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt);
+	if (ret) {
 		v4l2_err(&vpfe_dev->v4l2_dev,
 			 "invalid remote subdev for video node\n");
-		return v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt);
+		return ret;
 	}
 	/* convert to pix format */
 	mbus.code = sd_fmt.format.code;
@@ -725,6 +730,7 @@
 	struct vpfe_video_device *video = video_drvdata(file);
 	struct vpfe_device *vpfe_dev = video->vpfe_dev;
 	struct v4l2_format format;
+	int ret;
 
 	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt\n");
 	/* If streaming is started, return error */
@@ -733,8 +739,9 @@
 		return -EBUSY;
 	}
 	/* get adjacent subdev's output pad format */
-	if (__vpfe_video_get_format(video, &format))
-		return __vpfe_video_get_format(video, &format);
+	ret = __vpfe_video_get_format(video, &format);
+	if (ret)
+		return ret;
 	*fmt = format;
 	video->fmt = *fmt;
 	return 0;
@@ -757,11 +764,13 @@
 	struct vpfe_video_device *video = video_drvdata(file);
 	struct vpfe_device *vpfe_dev = video->vpfe_dev;
 	struct v4l2_format format;
+	int ret;
 
 	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt\n");
 	/* get adjacent subdev's output pad format */
-	if (__vpfe_video_get_format(video, &format))
-		return __vpfe_video_get_format(video, &format);
+	ret = __vpfe_video_get_format(video, &format);
+	if (ret)
+		return ret;
 
 	*fmt = format;
 	return 0;
@@ -838,8 +847,9 @@
 
 	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n");
 
-	if (mutex_lock_interruptible(&video->lock))
-		return mutex_lock_interruptible(&video->lock);
+	ret = mutex_lock_interruptible(&video->lock);
+	if (ret)
+		return ret;
 	/*
 	 * If streaming is started return device busy
 	 * error
@@ -940,8 +950,9 @@
 	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n");
 
 	/* Call decoder driver function to set the standard */
-	if (mutex_lock_interruptible(&video->lock))
-		return mutex_lock_interruptible(&video->lock);
+	ret = mutex_lock_interruptible(&video->lock);
+	if (ret)
+		return ret;
 	sdinfo = video->current_ext_subdev;
 	/* If streaming is started, return device busy error */
 	if (video->started) {
@@ -1327,8 +1338,9 @@
 		return -EINVAL;
 	}
 
-	if (mutex_lock_interruptible(&video->lock))
-		return mutex_lock_interruptible(&video->lock);
+	ret = mutex_lock_interruptible(&video->lock);
+	if (ret)
+		return ret;
 
 	if (video->io_usrs != 0) {
 		v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n");
@@ -1354,10 +1366,11 @@
 	q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
 	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
 
-	if (vb2_queue_init(q)) {
+	ret = vb2_queue_init(q);
+	if (ret) {
 		v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n");
 		vb2_dma_contig_cleanup_ctx(vpfe_dev->pdev);
-		return vb2_queue_init(q);
+		return ret;
 	}
 
 	fh->io_allowed = 1;
@@ -1533,8 +1546,9 @@
 		return -EINVAL;
 	}
 
-	if (mutex_lock_interruptible(&video->lock))
-		return mutex_lock_interruptible(&video->lock);
+	ret = mutex_lock_interruptible(&video->lock);
+	if (ret)
+		return ret;
 
 	vpfe_stop_capture(video);
 	ret = vb2_streamoff(&video->buffer_queue, buf_type);
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index f54349b..cf8da23 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -223,7 +223,7 @@
 		if (entity == &video->video.entity)
 			continue;
 
-		if (!is_media_entity_v4l2_io(entity))
+		if (!is_media_entity_v4l2_video_device(entity))
 			continue;
 
 		far_end = to_iss_video(media_entity_to_video_device(entity));
diff --git a/drivers/staging/media/tw686x-kh/Kconfig b/drivers/staging/media/tw686x-kh/Kconfig
new file mode 100644
index 0000000..6264d30
--- /dev/null
+++ b/drivers/staging/media/tw686x-kh/Kconfig
@@ -0,0 +1,17 @@
+config VIDEO_TW686X_KH
+	tristate "Intersil/Techwell TW686x Video For Linux"
+	depends on VIDEO_DEV && PCI && VIDEO_V4L2
+	depends on !(VIDEO_TW686X=y || VIDEO_TW686X=m) || COMPILE_TEST
+	select VIDEOBUF2_DMA_SG
+	help
+	  Support for Intersil/Techwell TW686x-based frame grabber cards.
+
+	  Currently supported chips:
+	  - TW6864 (4 video channels),
+	  - TW6865 (4 video channels, not tested, second generation chip),
+	  - TW6868 (8 video channels but only 4 first channels using
+	    built-in video decoder are supported, not tested),
+	  - TW6869 (8 video channels, second generation chip).
+
+	  To compile this driver as a module, choose M here: the module
+	  will be named tw686x-kh.
diff --git a/drivers/staging/media/tw686x-kh/Makefile b/drivers/staging/media/tw686x-kh/Makefile
new file mode 100644
index 0000000..2a36a38
--- /dev/null
+++ b/drivers/staging/media/tw686x-kh/Makefile
@@ -0,0 +1,3 @@
+tw686x-kh-objs := tw686x-kh-core.o tw686x-kh-video.o
+
+obj-$(CONFIG_VIDEO_TW686X_KH) += tw686x-kh.o
diff --git a/drivers/staging/media/tw686x-kh/TODO b/drivers/staging/media/tw686x-kh/TODO
new file mode 100644
index 0000000..480a495
--- /dev/null
+++ b/drivers/staging/media/tw686x-kh/TODO
@@ -0,0 +1,6 @@
+TODO:
+
+- implement V4L2_FIELD_INTERLACED* mode(s).
+- add audio support
+
+Please Cc: patches to Krzysztof Halasa <khalasa@piap.pl>.
diff --git a/drivers/staging/media/tw686x-kh/tw686x-kh-core.c b/drivers/staging/media/tw686x-kh/tw686x-kh-core.c
new file mode 100644
index 0000000..03b3b62
--- /dev/null
+++ b/drivers/staging/media/tw686x-kh/tw686x-kh-core.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2015 Industrial Research Institute for Automation
+ * and Measurements PIAP
+ *
+ * Written by Krzysztof Ha?asa.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "tw686x-kh.h"
+#include "tw686x-kh-regs.h"
+
+static irqreturn_t tw686x_irq(int irq, void *dev_id)
+{
+	struct tw686x_dev *dev = (struct tw686x_dev *)dev_id;
+	u32 int_status = reg_read(dev, INT_STATUS); /* cleared on read */
+	unsigned long flags;
+	unsigned int handled = 0;
+
+	if (int_status) {
+		spin_lock_irqsave(&dev->irq_lock, flags);
+		dev->dma_requests |= int_status;
+		spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+		if (int_status & 0xFF0000FF)
+			handled = tw686x_kh_video_irq(dev);
+	}
+
+	return IRQ_RETVAL(handled);
+}
+
+static int tw686x_probe(struct pci_dev *pci_dev,
+			const struct pci_device_id *pci_id)
+{
+	struct tw686x_dev *dev;
+	int err;
+
+	dev = devm_kzalloc(&pci_dev->dev, sizeof(*dev) +
+			   (pci_id->driver_data & TYPE_MAX_CHANNELS) *
+			   sizeof(dev->video_channels[0]), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	sprintf(dev->name, "TW%04X", pci_dev->device);
+	dev->type = pci_id->driver_data;
+
+	pr_info("%s: PCI %s, IRQ %d, MMIO 0x%lx\n", dev->name,
+		pci_name(pci_dev), pci_dev->irq,
+		(unsigned long)pci_resource_start(pci_dev, 0));
+
+	dev->pci_dev = pci_dev;
+	if (pcim_enable_device(pci_dev))
+		return -EIO;
+
+	pci_set_master(pci_dev);
+
+	if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
+		pr_err("%s: 32-bit PCI DMA not supported\n", dev->name);
+		return -EIO;
+	}
+
+	err = pci_request_regions(pci_dev, dev->name);
+	if (err < 0) {
+		pr_err("%s: Unable to get MMIO region\n", dev->name);
+		return err;
+	}
+
+	dev->mmio = pci_ioremap_bar(pci_dev, 0);
+	if (!dev->mmio) {
+		pr_err("%s: Unable to remap MMIO region\n", dev->name);
+		return -EIO;
+	}
+
+	reg_write(dev, SYS_SOFT_RST, 0x0F); /* Reset all subsystems */
+	mdelay(1);
+
+	reg_write(dev, SRST[0], 0x3F);
+	if (max_channels(dev) > 4)
+		reg_write(dev, SRST[1], 0x3F);
+	reg_write(dev, DMA_CMD, 0);
+	reg_write(dev, DMA_CHANNEL_ENABLE, 0);
+	reg_write(dev, DMA_CHANNEL_TIMEOUT, 0x3EFF0FF0);
+	reg_write(dev, DMA_TIMER_INTERVAL, 0x38000);
+	reg_write(dev, DMA_CONFIG, 0xFFFFFF04);
+
+	spin_lock_init(&dev->irq_lock);
+
+	err = devm_request_irq(&pci_dev->dev, pci_dev->irq, tw686x_irq,
+			       IRQF_SHARED, dev->name, dev);
+	if (err < 0) {
+		pr_err("%s: Unable to get IRQ\n", dev->name);
+		return err;
+	}
+
+	err = tw686x_kh_video_init(dev);
+	if (err)
+		return err;
+
+	pci_set_drvdata(pci_dev, dev);
+	return 0;
+}
+
+static void tw686x_remove(struct pci_dev *pci_dev)
+{
+	struct tw686x_dev *dev = pci_get_drvdata(pci_dev);
+
+	tw686x_kh_video_free(dev);
+}
+
+/* driver_data is number of A/V channels */
+static const struct pci_device_id tw686x_pci_tbl[] = {
+	{PCI_DEVICE(0x1797, 0x6864), .driver_data = 4},
+	/* not tested */
+	{PCI_DEVICE(0x1797, 0x6865), .driver_data = 4 | TYPE_SECOND_GEN},
+	/* TW6868 supports 8 A/V channels with an external TW2865 chip -
+	   not supported by the driver */
+	{PCI_DEVICE(0x1797, 0x6868), .driver_data = 4}, /* not tested */
+	{PCI_DEVICE(0x1797, 0x6869), .driver_data = 8 | TYPE_SECOND_GEN},
+	{}
+};
+
+static struct pci_driver tw686x_pci_driver = {
+	.name = "tw686x-kh",
+	.id_table = tw686x_pci_tbl,
+	.probe = tw686x_probe,
+	.remove = tw686x_remove,
+};
+
+MODULE_DESCRIPTION("Driver for video frame grabber cards based on Intersil/Techwell TW686[4589]");
+MODULE_AUTHOR("Krzysztof Halasa");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, tw686x_pci_tbl);
+module_pci_driver(tw686x_pci_driver);
diff --git a/drivers/staging/media/tw686x-kh/tw686x-kh-regs.h b/drivers/staging/media/tw686x-kh/tw686x-kh-regs.h
new file mode 100644
index 0000000..53e1889
--- /dev/null
+++ b/drivers/staging/media/tw686x-kh/tw686x-kh-regs.h
@@ -0,0 +1,103 @@
+/* DMA controller registers */
+#define REG8_1(a0) ((const u16[8]) {a0, a0 + 1, a0 + 2, a0 + 3,	\
+				   a0 + 4, a0 + 5, a0 + 6, a0 + 7})
+#define REG8_2(a0) ((const u16[8]) {a0, a0 + 2, a0 + 4, a0 + 6,	\
+				   a0 + 8, a0 + 0xA, a0 + 0xC, a0 + 0xE})
+#define REG8_8(a0) ((const u16[8]) {a0, a0 + 8, a0 + 0x10, a0 + 0x18,	\
+				   a0 + 0x20, a0 + 0x28, a0 + 0x30, a0 + 0x38})
+#define INT_STATUS		0x00
+#define PB_STATUS		0x01
+#define DMA_CMD			0x02
+#define VIDEO_FIFO_STATUS	0x03
+#define VIDEO_CHANNEL_ID	0x04
+#define VIDEO_PARSER_STATUS	0x05
+#define SYS_SOFT_RST		0x06
+#define DMA_PAGE_TABLE0_ADDR	((const u16[8]) {0x08, 0xD0, 0xD2, 0xD4, \
+						0xD6, 0xD8, 0xDA, 0xDC})
+#define DMA_PAGE_TABLE1_ADDR	((const u16[8]) {0x09, 0xD1, 0xD3, 0xD5, \
+						0xD7, 0xD9, 0xDB, 0xDD})
+#define DMA_CHANNEL_ENABLE	0x0A
+#define DMA_CONFIG		0x0B
+#define DMA_TIMER_INTERVAL	0x0C
+#define DMA_CHANNEL_TIMEOUT	0x0D
+#define VDMA_CHANNEL_CONFIG	REG8_1(0x10)
+#define ADMA_P_ADDR		REG8_2(0x18)
+#define ADMA_B_ADDR		REG8_2(0x19)
+#define DMA10_P_ADDR		0x28 /* ??? */
+#define DMA10_B_ADDR		0x29
+#define VIDEO_CONTROL1		0x2A
+#define VIDEO_CONTROL2		0x2B
+#define AUDIO_CONTROL1		0x2C
+#define AUDIO_CONTROL2		0x2D
+#define PHASE_REF		0x2E
+#define GPIO_REG		0x2F
+#define INTL_HBAR_CTRL		REG8_1(0x30)
+#define AUDIO_CONTROL3		0x38
+#define VIDEO_FIELD_CTRL	REG8_1(0x39)
+#define HSCALER_CTRL		REG8_1(0x42)
+#define VIDEO_SIZE		REG8_1(0x4A)
+#define VIDEO_SIZE_F2		REG8_1(0x52)
+#define MD_CONF			REG8_1(0x60)
+#define MD_INIT			REG8_1(0x68)
+#define MD_MAP0			REG8_1(0x70)
+#define VDMA_P_ADDR		REG8_8(0x80) /* not used in DMA SG mode */
+#define VDMA_WHP		REG8_8(0x81)
+#define VDMA_B_ADDR		REG8_8(0x82)
+#define VDMA_F2_P_ADDR		REG8_8(0x84)
+#define VDMA_F2_WHP		REG8_8(0x85)
+#define VDMA_F2_B_ADDR		REG8_8(0x86)
+#define EP_REG_ADDR		0xFE
+#define EP_REG_DATA		0xFF
+
+/* Video decoder registers */
+#define VDREG8(a0) ((const u16[8]) {			\
+	a0 + 0x000, a0 + 0x010, a0 + 0x020, a0 + 0x030,	\
+	a0 + 0x100, a0 + 0x110, a0 + 0x120, a0 + 0x130})
+#define VIDSTAT			VDREG8(0x100)
+#define BRIGHT			VDREG8(0x101)
+#define CONTRAST		VDREG8(0x102)
+#define SHARPNESS		VDREG8(0x103)
+#define SAT_U			VDREG8(0x104)
+#define SAT_V			VDREG8(0x105)
+#define HUE			VDREG8(0x106)
+#define CROP_HI			VDREG8(0x107)
+#define VDELAY_LO		VDREG8(0x108)
+#define VACTIVE_LO		VDREG8(0x109)
+#define HDELAY_LO		VDREG8(0x10A)
+#define HACTIVE_LO		VDREG8(0x10B)
+#define MVSN			VDREG8(0x10C)
+#define STATUS2			VDREG8(0x10C)
+#define SDT			VDREG8(0x10E)
+#define SDT_EN			VDREG8(0x10F)
+
+#define VSCALE_LO		VDREG8(0x144)
+#define SCALE_HI		VDREG8(0x145)
+#define HSCALE_LO		VDREG8(0x146)
+#define F2CROP_HI		VDREG8(0x147)
+#define F2VDELAY_LO		VDREG8(0x148)
+#define F2VACTIVE_LO		VDREG8(0x149)
+#define F2HDELAY_LO		VDREG8(0x14A)
+#define F2HACTIVE_LO		VDREG8(0x14B)
+#define F2VSCALE_LO		VDREG8(0x14C)
+#define F2SCALE_HI		VDREG8(0x14D)
+#define F2HSCALE_LO		VDREG8(0x14E)
+#define F2CNT			VDREG8(0x14F)
+
+#define VDREG2(a0) ((const u16[2]) {a0, a0 + 0x100})
+#define SRST			VDREG2(0x180)
+#define ACNTL			VDREG2(0x181)
+#define ACNTL2			VDREG2(0x182)
+#define CNTRL1			VDREG2(0x183)
+#define CKHY			VDREG2(0x184)
+#define SHCOR			VDREG2(0x185)
+#define CORING			VDREG2(0x186)
+#define CLMPG			VDREG2(0x187)
+#define IAGC			VDREG2(0x188)
+#define VCTRL1			VDREG2(0x18F)
+#define MISC1			VDREG2(0x194)
+#define LOOP			VDREG2(0x195)
+#define MISC2			VDREG2(0x196)
+
+#define CLMD			VDREG2(0x197)
+#define AIGAIN			((const u16[8]) {0x1D0, 0x1D1, 0x1D2, 0x1D3, \
+						 0x2D0, 0x2D1, 0x2D2, 0x2D3})
diff --git a/drivers/staging/media/tw686x-kh/tw686x-kh-video.c b/drivers/staging/media/tw686x-kh/tw686x-kh-video.c
new file mode 100644
index 0000000..6ecb504
--- /dev/null
+++ b/drivers/staging/media/tw686x-kh/tw686x-kh-video.c
@@ -0,0 +1,821 @@
+/*
+ * Copyright (C) 2015 Industrial Research Institute for Automation
+ * and Measurements PIAP
+ *
+ * Written by Krzysztof Ha?asa.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include "tw686x-kh.h"
+#include "tw686x-kh-regs.h"
+
+#define MAX_SG_ENTRY_SIZE (/* 8192 - 128 */ 4096)
+#define MAX_SG_DESC_COUNT 256 /* PAL 704x576 needs up to 198 4-KB pages */
+
+static const struct tw686x_format formats[] = {
+	{
+		.name = "4:2:2 packed, UYVY", /* aka Y422 */
+		.fourcc = V4L2_PIX_FMT_UYVY,
+		.mode = 0,
+		.depth = 16,
+	}, {
+#if 0
+		.name = "4:2:0 packed, YUV",
+		.mode = 1,	/* non-standard */
+		.depth = 12,
+	}, {
+		.name = "4:1:1 packed, YUV",
+		.mode = 2,	/* non-standard */
+		.depth = 12,
+	}, {
+#endif
+		.name = "4:1:1 packed, YUV",
+		.fourcc = V4L2_PIX_FMT_Y41P,
+		.mode = 3,
+		.depth = 12,
+	}, {
+		.name = "15 bpp RGB",
+		.fourcc = V4L2_PIX_FMT_RGB555,
+		.mode = 4,
+		.depth = 16,
+	}, {
+		.name = "16 bpp RGB",
+		.fourcc = V4L2_PIX_FMT_RGB565,
+		.mode = 5,
+		.depth = 16,
+	}, {
+		.name = "4:2:2 packed, YUYV",
+		.fourcc = V4L2_PIX_FMT_YUYV,
+		.mode = 6,
+		.depth = 16,
+	}
+	/* mode 7 is "reserved" */
+};
+
+static const v4l2_std_id video_standards[7] = {
+	V4L2_STD_NTSC,
+	V4L2_STD_PAL,
+	V4L2_STD_SECAM,
+	V4L2_STD_NTSC_443,
+	V4L2_STD_PAL_M,
+	V4L2_STD_PAL_N,
+	V4L2_STD_PAL_60,
+};
+
+static const struct tw686x_format *format_by_fourcc(unsigned int fourcc)
+{
+	unsigned int cnt;
+
+	for (cnt = 0; cnt < ARRAY_SIZE(formats); cnt++)
+		if (formats[cnt].fourcc == fourcc)
+			return &formats[cnt];
+	return NULL;
+}
+
+static void tw686x_get_format(struct tw686x_video_channel *vc,
+			      struct v4l2_format *f)
+{
+	const struct tw686x_format *format;
+	unsigned int width, height, height_div = 1;
+
+	format = format_by_fourcc(f->fmt.pix.pixelformat);
+	if (!format) {
+		format = &formats[0];
+		f->fmt.pix.pixelformat = format->fourcc;
+	}
+
+	width = 704;
+	if (f->fmt.pix.width < width * 3 / 4 /* halfway */)
+		width /= 2;
+
+	height = (vc->video_standard & V4L2_STD_625_50) ? 576 : 480;
+	if (f->fmt.pix.height < height * 3 / 4 /* halfway */)
+		height_div = 2;
+
+	switch (f->fmt.pix.field) {
+	case V4L2_FIELD_TOP:
+	case V4L2_FIELD_BOTTOM:
+		height_div = 2;
+		break;
+	case V4L2_FIELD_SEQ_BT:
+		if (height_div > 1)
+			f->fmt.pix.field = V4L2_FIELD_BOTTOM;
+		break;
+	default:
+		if (height_div > 1)
+			f->fmt.pix.field = V4L2_FIELD_TOP;
+		else
+			f->fmt.pix.field = V4L2_FIELD_SEQ_TB;
+	}
+	height /= height_div;
+
+	f->fmt.pix.width = width;
+	f->fmt.pix.height = height;
+	f->fmt.pix.bytesperline = f->fmt.pix.width * format->depth / 8;
+	f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+	f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+}
+
+/* video queue operations */
+
+static int tw686x_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+			      unsigned int *nplanes, unsigned int sizes[],
+			      void *alloc_ctxs[])
+{
+	struct tw686x_video_channel *vc = vb2_get_drv_priv(vq);
+	unsigned int size = vc->width * vc->height * vc->format->depth / 8;
+
+	alloc_ctxs[0] = vc->alloc_ctx;
+	if (*nbuffers < 2)
+		*nbuffers = 2;
+
+	if (*nplanes)
+		return sizes[0] < size ? -EINVAL : 0;
+
+	sizes[0] = size;
+	*nplanes = 1;		/* packed formats only */
+	return 0;
+}
+
+static void tw686x_buf_queue(struct vb2_buffer *vb)
+{
+	struct tw686x_video_channel *vc = vb2_get_drv_priv(vb->vb2_queue);
+	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+	struct tw686x_vb2_buf *buf;
+
+	buf = container_of(vbuf, struct tw686x_vb2_buf, vb);
+
+	spin_lock(&vc->qlock);
+	list_add_tail(&buf->list, &vc->vidq_queued);
+	spin_unlock(&vc->qlock);
+}
+
+static void setup_descs(struct tw686x_video_channel *vc, unsigned int n)
+{
+loop:
+	while (!list_empty(&vc->vidq_queued)) {
+		struct vdma_desc *descs = vc->sg_descs[n];
+		struct tw686x_vb2_buf *buf;
+		struct sg_table *vbuf;
+		struct scatterlist *sg;
+		unsigned int buf_len, count = 0;
+		int i;
+
+		buf = list_first_entry(&vc->vidq_queued, struct tw686x_vb2_buf,
+				       list);
+		list_del(&buf->list);
+
+		buf_len = vc->width * vc->height * vc->format->depth / 8;
+		if (vb2_plane_size(&buf->vb.vb2_buf, 0) < buf_len) {
+			pr_err("Video buffer size too small\n");
+			vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+			goto loop; /* try another */
+		}
+
+		vbuf = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
+		for_each_sg(vbuf->sgl, sg, vbuf->nents, i) {
+			dma_addr_t phys = sg_dma_address(sg);
+			unsigned int len = sg_dma_len(sg);
+
+			while (len && buf_len) {
+				unsigned int entry_len = min_t(unsigned int, len,
+							   MAX_SG_ENTRY_SIZE);
+				entry_len = min(entry_len, buf_len);
+				if (count == MAX_SG_DESC_COUNT) {
+					pr_err("Video buffer size too fragmented\n");
+					vb2_buffer_done(&buf->vb.vb2_buf,
+							VB2_BUF_STATE_ERROR);
+					goto loop;
+				}
+				descs[count].phys = cpu_to_le32(phys);
+				descs[count++].flags_length =
+					cpu_to_le32(0x40000000 /* available */ |
+						    entry_len);
+				phys += entry_len;
+				len -= entry_len;
+				buf_len -= entry_len;
+			}
+			if (!buf_len)
+				break;
+		}
+
+		/* clear the remaining entries */
+		while (count < MAX_SG_DESC_COUNT) {
+			descs[count].phys = 0;
+			descs[count++].flags_length = 0; /* unavailable */
+		}
+
+		buf->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
+		vc->curr_bufs[n] = buf;
+		return;
+	}
+	vc->curr_bufs[n] = NULL;
+}
+
+/* On TW6864 and TW6868, all channels share the pair of video DMA SG tables,
+   with 10-bit start_idx and end_idx determining start and end of frame buffer
+   for particular channel.
+   TW6868 with all its 8 channels would be problematic (only 127 SG entries per
+   channel) but we support only 4 channels on this chip anyway (the first
+   4 channels are driven with internal video decoder, the other 4 would require
+   an external TW286x part).
+
+   On TW6865 and TW6869, each channel has its own DMA SG table, with indexes
+   starting with 0. Both chips have complete sets of internal video decoders
+   (respectively 4 or 8-channel).
+
+   All chips have separate SG tables for two video frames. */
+
+static void setup_dma_cfg(struct tw686x_video_channel *vc)
+{
+	unsigned int field_width = 704;
+	unsigned int field_height = (vc->video_standard & V4L2_STD_625_50) ?
+		288 : 240;
+	unsigned int start_idx = is_second_gen(vc->dev) ? 0 :
+		vc->ch * MAX_SG_DESC_COUNT;
+	unsigned int end_idx = start_idx + MAX_SG_DESC_COUNT - 1;
+	u32 dma_cfg = (0 << 30) /* input selection */ |
+		(1 << 29) /* field2 dropped (if any) */ |
+		((vc->height < 300) << 28) /* field dropping */ |
+		(1 << 27) /* master */ |
+		(0 << 25) /* master channel (for slave only) */ |
+		(0 << 24) /* (no) vertical (line) decimation */ |
+		((vc->width < 400) << 23) /* horizontal decimation */ |
+		(vc->format->mode << 20) /* output video format */ |
+		(end_idx << 10) /* DMA end index */ |
+		start_idx /* DMA start index */;
+	u32 reg;
+
+	reg_write(vc->dev, VDMA_CHANNEL_CONFIG[vc->ch], dma_cfg);
+	reg_write(vc->dev, VIDEO_SIZE[vc->ch], (1 << 31) | (field_height << 16)
+		  | field_width);
+	reg = reg_read(vc->dev, VIDEO_CONTROL1);
+	if (vc->video_standard & V4L2_STD_625_50)
+		reg |= 1 << (vc->ch + 13);
+	else
+		reg &= ~(1 << (vc->ch + 13));
+	reg_write(vc->dev, VIDEO_CONTROL1, reg);
+}
+
+static int tw686x_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+	struct tw686x_video_channel *vc = vb2_get_drv_priv(vq);
+	struct tw686x_dev *dev = vc->dev;
+	u32 dma_ch_mask;
+	unsigned int n;
+
+	setup_dma_cfg(vc);
+
+	/* queue video buffers if available */
+	spin_lock(&vc->qlock);
+	for (n = 0; n < 2; n++)
+		setup_descs(vc, n);
+	spin_unlock(&vc->qlock);
+
+	dev->video_active |= 1 << vc->ch;
+	vc->seq = 0;
+	dma_ch_mask = reg_read(dev, DMA_CHANNEL_ENABLE) | (1 << vc->ch);
+	reg_write(dev, DMA_CHANNEL_ENABLE, dma_ch_mask);
+	reg_write(dev, DMA_CMD, (1 << 31) | dma_ch_mask);
+	return 0;
+}
+
+static void tw686x_stop_streaming(struct vb2_queue *vq)
+{
+	struct tw686x_video_channel *vc = vb2_get_drv_priv(vq);
+	struct tw686x_dev *dev = vc->dev;
+	u32 dma_ch_mask = reg_read(dev, DMA_CHANNEL_ENABLE);
+	u32 dma_cmd = reg_read(dev, DMA_CMD);
+	unsigned int n;
+
+	dma_ch_mask &= ~(1 << vc->ch);
+	reg_write(dev, DMA_CHANNEL_ENABLE, dma_ch_mask);
+
+	dev->video_active &= ~(1 << vc->ch);
+
+	dma_cmd &= ~(1 << vc->ch);
+	reg_write(dev, DMA_CMD, dma_cmd);
+
+	if (!dev->video_active) {
+		reg_write(dev, DMA_CMD, 0);
+		reg_write(dev, DMA_CHANNEL_ENABLE, 0);
+	}
+
+	spin_lock(&vc->qlock);
+	while (!list_empty(&vc->vidq_queued)) {
+		struct tw686x_vb2_buf *buf;
+
+		buf = list_entry(vc->vidq_queued.next, struct tw686x_vb2_buf,
+				 list);
+		list_del(&buf->list);
+		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+	}
+
+	for (n = 0; n < 2; n++)
+		if (vc->curr_bufs[n])
+			vb2_buffer_done(&vc->curr_bufs[n]->vb.vb2_buf,
+					VB2_BUF_STATE_ERROR);
+
+	spin_unlock(&vc->qlock);
+}
+
+static struct vb2_ops tw686x_video_qops = {
+	.queue_setup		= tw686x_queue_setup,
+	.buf_queue		= tw686x_buf_queue,
+	.start_streaming	= tw686x_start_streaming,
+	.stop_streaming		= tw686x_stop_streaming,
+	.wait_prepare		= vb2_ops_wait_prepare,
+	.wait_finish		= vb2_ops_wait_finish,
+};
+
+static int tw686x_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct tw686x_video_channel *vc;
+	struct tw686x_dev *dev;
+	unsigned int ch;
+
+	vc = container_of(ctrl->handler, struct tw686x_video_channel,
+			  ctrl_handler);
+	dev = vc->dev;
+	ch = vc->ch;
+
+	switch (ctrl->id) {
+	case V4L2_CID_BRIGHTNESS:
+		reg_write(dev, BRIGHT[ch], ctrl->val & 0xFF);
+		return 0;
+
+	case V4L2_CID_CONTRAST:
+		reg_write(dev, CONTRAST[ch], ctrl->val);
+		return 0;
+
+	case V4L2_CID_SATURATION:
+		reg_write(dev, SAT_U[ch], ctrl->val);
+		reg_write(dev, SAT_V[ch], ctrl->val);
+		return 0;
+
+	case V4L2_CID_HUE:
+		reg_write(dev, HUE[ch], ctrl->val & 0xFF);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops ctrl_ops = {
+	.s_ctrl = tw686x_s_ctrl,
+};
+
+static int tw686x_g_fmt_vid_cap(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	struct tw686x_video_channel *vc = video_drvdata(file);
+
+	f->fmt.pix.width = vc->width;
+	f->fmt.pix.height = vc->height;
+	f->fmt.pix.field = vc->field;
+	f->fmt.pix.pixelformat = vc->format->fourcc;
+	f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+	f->fmt.pix.bytesperline = f->fmt.pix.width * vc->format->depth / 8;
+	f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+	return 0;
+}
+
+static int tw686x_try_fmt_vid_cap(struct file *file, void *priv,
+				  struct v4l2_format *f)
+{
+	tw686x_get_format(video_drvdata(file), f);
+	return 0;
+}
+
+static int tw686x_s_fmt_vid_cap(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	struct tw686x_video_channel *vc = video_drvdata(file);
+
+	tw686x_get_format(vc, f);
+	vc->format = format_by_fourcc(f->fmt.pix.pixelformat);
+	vc->field = f->fmt.pix.field;
+	vc->width = f->fmt.pix.width;
+	vc->height = f->fmt.pix.height;
+	return 0;
+}
+
+static int tw686x_querycap(struct file *file, void *priv,
+			   struct v4l2_capability *cap)
+{
+	struct tw686x_video_channel *vc = video_drvdata(file);
+	struct tw686x_dev *dev = vc->dev;
+
+	strcpy(cap->driver, "tw686x-kh");
+	strcpy(cap->card, dev->name);
+	sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci_dev));
+	cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+	return 0;
+}
+
+static int tw686x_s_std(struct file *file, void *priv, v4l2_std_id id)
+{
+	struct tw686x_video_channel *vc = video_drvdata(file);
+	unsigned int cnt;
+	u32 sdt = 0; /* default */
+
+	for (cnt = 0; cnt < ARRAY_SIZE(video_standards); cnt++)
+		if (id & video_standards[cnt]) {
+			sdt = cnt;
+			break;
+		}
+
+	reg_write(vc->dev, SDT[vc->ch], sdt);
+	vc->video_standard = video_standards[sdt];
+	return 0;
+}
+
+static int tw686x_g_std(struct file *file, void *priv, v4l2_std_id *id)
+{
+	struct tw686x_video_channel *vc = video_drvdata(file);
+
+	*id = vc->video_standard;
+	return 0;
+}
+
+static int tw686x_enum_fmt_vid_cap(struct file *file, void *priv,
+				   struct v4l2_fmtdesc *f)
+{
+	if (f->index >= ARRAY_SIZE(formats))
+		return -EINVAL;
+
+	strlcpy(f->description, formats[f->index].name, sizeof(f->description));
+	f->pixelformat = formats[f->index].fourcc;
+	return 0;
+}
+
+static int tw686x_g_parm(struct file *file, void *priv,
+			 struct v4l2_streamparm *sp)
+{
+	struct tw686x_video_channel *vc = video_drvdata(file);
+
+	if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+	memset(&sp->parm.capture, 0, sizeof(sp->parm.capture));
+	sp->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+	v4l2_video_std_frame_period(vc->video_standard,
+				    &sp->parm.capture.timeperframe);
+
+	return 0;
+}
+
+static int tw686x_enum_input(struct file *file, void *priv,
+			     struct v4l2_input *inp)
+{
+	/* the chip has internal multiplexer, support can be added
+	   if the actual hw uses it */
+	if (inp->index)
+		return -EINVAL;
+
+	snprintf(inp->name, sizeof(inp->name), "Composite");
+	inp->type = V4L2_INPUT_TYPE_CAMERA;
+	inp->std = V4L2_STD_ALL;
+	inp->capabilities = V4L2_IN_CAP_STD;
+	return 0;
+}
+
+static int tw686x_g_input(struct file *file, void *priv, unsigned int *v)
+{
+	*v = 0;
+	return 0;
+}
+
+static int tw686x_s_input(struct file *file, void *priv, unsigned int v)
+{
+	if (v)
+		return -EINVAL;
+	return 0;
+}
+
+static const struct v4l2_file_operations tw686x_video_fops = {
+	.owner		= THIS_MODULE,
+	.open		= v4l2_fh_open,
+	.unlocked_ioctl	= video_ioctl2,
+	.release	= vb2_fop_release,
+	.poll		= vb2_fop_poll,
+	.read		= vb2_fop_read,
+	.mmap		= vb2_fop_mmap,
+};
+
+static const struct v4l2_ioctl_ops tw686x_video_ioctl_ops = {
+	.vidioc_querycap		= tw686x_querycap,
+	.vidioc_enum_fmt_vid_cap	= tw686x_enum_fmt_vid_cap,
+	.vidioc_g_fmt_vid_cap		= tw686x_g_fmt_vid_cap,
+	.vidioc_s_fmt_vid_cap		= tw686x_s_fmt_vid_cap,
+	.vidioc_try_fmt_vid_cap		= tw686x_try_fmt_vid_cap,
+	.vidioc_reqbufs			= vb2_ioctl_reqbufs,
+	.vidioc_querybuf		= vb2_ioctl_querybuf,
+	.vidioc_qbuf			= vb2_ioctl_qbuf,
+	.vidioc_dqbuf			= vb2_ioctl_dqbuf,
+	.vidioc_create_bufs		= vb2_ioctl_create_bufs,
+	.vidioc_streamon		= vb2_ioctl_streamon,
+	.vidioc_streamoff		= vb2_ioctl_streamoff,
+	.vidioc_g_std			= tw686x_g_std,
+	.vidioc_s_std			= tw686x_s_std,
+	.vidioc_g_parm			= tw686x_g_parm,
+	.vidioc_enum_input		= tw686x_enum_input,
+	.vidioc_g_input			= tw686x_g_input,
+	.vidioc_s_input			= tw686x_s_input,
+	.vidioc_subscribe_event		= v4l2_ctrl_subscribe_event,
+	.vidioc_unsubscribe_event	= v4l2_event_unsubscribe,
+};
+
+static int video_thread(void *arg)
+{
+	struct tw686x_dev *dev = arg;
+	DECLARE_WAITQUEUE(wait, current);
+
+	set_freezable();
+	add_wait_queue(&dev->video_thread_wait, &wait);
+
+	while (1) {
+		long timeout = schedule_timeout_interruptible(HZ);
+		unsigned int ch;
+
+		if (timeout == -ERESTARTSYS || kthread_should_stop())
+			break;
+
+		for (ch = 0; ch < max_channels(dev); ch++) {
+			struct tw686x_video_channel *vc;
+			unsigned long flags;
+			u32 request, n, stat = VB2_BUF_STATE_DONE;
+
+			vc = &dev->video_channels[ch];
+			if (!(dev->video_active & (1 << ch)))
+				continue;
+
+			spin_lock_irq(&dev->irq_lock);
+			request = dev->dma_requests & (0x01000001 << ch);
+			if (request)
+				dev->dma_requests &= ~request;
+			spin_unlock_irq(&dev->irq_lock);
+
+			if (!request)
+				continue;
+
+			request >>= ch;
+
+			/* handle channel events */
+			if ((request & 0x01000000) |
+			    (reg_read(dev, VIDEO_FIFO_STATUS) & (0x01010001 << ch)) |
+			    (reg_read(dev, VIDEO_PARSER_STATUS) & (0x00000101 << ch))) {
+				/* DMA Errors - reset channel */
+				u32 reg;
+
+				spin_lock_irqsave(&dev->irq_lock, flags);
+				reg = reg_read(dev, DMA_CMD);
+				/* Reset DMA channel */
+				reg_write(dev, DMA_CMD, reg & ~(1 << ch));
+				reg_write(dev, DMA_CMD, reg);
+				spin_unlock_irqrestore(&dev->irq_lock, flags);
+				stat = VB2_BUF_STATE_ERROR;
+			}
+
+			/* handle video stream */
+			mutex_lock(&vc->vb_mutex);
+			spin_lock(&vc->qlock);
+			n = !!(reg_read(dev, PB_STATUS) & (1 << ch));
+			if (vc->curr_bufs[n]) {
+				struct vb2_v4l2_buffer *vb;
+
+				vb = &vc->curr_bufs[n]->vb;
+				vb->vb2_buf.timestamp = ktime_get_ns();
+				vb->field = vc->field;
+				if (V4L2_FIELD_HAS_BOTH(vc->field))
+					vb->sequence = vc->seq++;
+				else
+					vb->sequence = (vc->seq++) / 2;
+				vb2_set_plane_payload(&vb->vb2_buf, 0,
+				      vc->width * vc->height * vc->format->depth / 8);
+				vb2_buffer_done(&vb->vb2_buf, stat);
+			}
+			setup_descs(vc, n);
+			spin_unlock(&vc->qlock);
+			mutex_unlock(&vc->vb_mutex);
+		}
+		try_to_freeze();
+	}
+
+	remove_wait_queue(&dev->video_thread_wait, &wait);
+	return 0;
+}
+
+int tw686x_kh_video_irq(struct tw686x_dev *dev)
+{
+	unsigned long flags, handled = 0;
+	u32 requests;
+
+	spin_lock_irqsave(&dev->irq_lock, flags);
+	requests = dev->dma_requests;
+	spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+	if (requests & dev->video_active) {
+		wake_up_interruptible_all(&dev->video_thread_wait);
+		handled = 1;
+	}
+	return handled;
+}
+
+void tw686x_kh_video_free(struct tw686x_dev *dev)
+{
+	unsigned int ch, n;
+
+	if (dev->video_thread)
+		kthread_stop(dev->video_thread);
+
+	for (ch = 0; ch < max_channels(dev); ch++) {
+		struct tw686x_video_channel *vc = &dev->video_channels[ch];
+
+		v4l2_ctrl_handler_free(&vc->ctrl_handler);
+		if (vc->device)
+			video_unregister_device(vc->device);
+		vb2_dma_sg_cleanup_ctx(vc->alloc_ctx);
+		for (n = 0; n < 2; n++) {
+			struct dma_desc *descs = &vc->sg_tables[n];
+
+			if (descs->virt)
+				pci_free_consistent(dev->pci_dev, descs->size,
+						    descs->virt, descs->phys);
+		}
+	}
+
+	v4l2_device_unregister(&dev->v4l2_dev);
+}
+
+#define SG_TABLE_SIZE (MAX_SG_DESC_COUNT * sizeof(struct vdma_desc))
+
+int tw686x_kh_video_init(struct tw686x_dev *dev)
+{
+	unsigned int ch, n;
+	int err;
+
+	init_waitqueue_head(&dev->video_thread_wait);
+
+	err = v4l2_device_register(&dev->pci_dev->dev, &dev->v4l2_dev);
+	if (err)
+		return err;
+
+	reg_write(dev, VIDEO_CONTROL1, 0); /* NTSC, disable scaler */
+	reg_write(dev, PHASE_REF, 0x00001518); /* Scatter-gather DMA mode */
+
+	/* setup required SG table sizes */
+	for (n = 0; n < 2; n++)
+		if (is_second_gen(dev)) {
+			/* TW 6865, TW6869 - each channel needs a pair of
+			   descriptor tables */
+			for (ch = 0; ch < max_channels(dev); ch++)
+				dev->video_channels[ch].sg_tables[n].size =
+					SG_TABLE_SIZE;
+
+		} else
+			/* TW 6864, TW6868 - we need to allocate a pair of
+			   descriptor tables, common for all channels.
+			   Each table will be bigger than 4 KB. */
+			dev->video_channels[0].sg_tables[n].size =
+				max_channels(dev) * SG_TABLE_SIZE;
+
+	/* allocate SG tables and initialize video channels */
+	for (ch = 0; ch < max_channels(dev); ch++) {
+		struct tw686x_video_channel *vc = &dev->video_channels[ch];
+		struct video_device *vdev;
+
+		mutex_init(&vc->vb_mutex);
+		spin_lock_init(&vc->qlock);
+		INIT_LIST_HEAD(&vc->vidq_queued);
+
+		vc->dev = dev;
+		vc->ch = ch;
+
+		/* default settings: NTSC */
+		vc->format = &formats[0];
+		vc->video_standard = V4L2_STD_NTSC;
+		reg_write(vc->dev, SDT[vc->ch], 0);
+		vc->field = V4L2_FIELD_SEQ_BT;
+		vc->width = 704;
+		vc->height = 480;
+
+		for (n = 0; n < 2; n++) {
+			void *cpu;
+
+			if (vc->sg_tables[n].size) {
+				unsigned int reg = n ? DMA_PAGE_TABLE1_ADDR[ch] :
+					DMA_PAGE_TABLE0_ADDR[ch];
+
+				cpu = pci_alloc_consistent(dev->pci_dev,
+							   vc->sg_tables[n].size,
+							   &vc->sg_tables[n].phys);
+				if (!cpu) {
+					pr_err("Error allocating video DMA scatter-gather tables\n");
+					err = -ENOMEM;
+					goto error;
+				}
+				vc->sg_tables[n].virt = cpu;
+				reg_write(dev, reg, vc->sg_tables[n].phys);
+			} else
+				cpu = dev->video_channels[0].sg_tables[n].virt +
+					ch * SG_TABLE_SIZE;
+
+			vc->sg_descs[n] = cpu;
+		}
+
+		reg_write(dev, VCTRL1[0], 0x24);
+		reg_write(dev, LOOP[0], 0xA5);
+		if (max_channels(dev) > 4) {
+			reg_write(dev, VCTRL1[1], 0x24);
+			reg_write(dev, LOOP[1], 0xA5);
+		}
+		reg_write(dev, VIDEO_FIELD_CTRL[ch], 0);
+		reg_write(dev, VDELAY_LO[ch], 0x14);
+
+		vdev = video_device_alloc();
+		if (!vdev) {
+			pr_warn("Unable to allocate video device\n");
+			err = -ENOMEM;
+			goto error;
+		}
+
+		vc->alloc_ctx = vb2_dma_sg_init_ctx(&dev->pci_dev->dev);
+		if (IS_ERR(vc->alloc_ctx)) {
+			pr_warn("Unable to initialize DMA scatter-gather context\n");
+			err = PTR_ERR(vc->alloc_ctx);
+			goto error;
+		}
+
+		vc->vidq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+		vc->vidq.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+		vc->vidq.drv_priv = vc;
+		vc->vidq.buf_struct_size = sizeof(struct tw686x_vb2_buf);
+		vc->vidq.ops = &tw686x_video_qops;
+		vc->vidq.mem_ops = &vb2_dma_sg_memops;
+		vc->vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+		vc->vidq.min_buffers_needed = 2;
+		vc->vidq.lock = &vc->vb_mutex;
+		vc->vidq.gfp_flags = GFP_DMA32;
+
+		err = vb2_queue_init(&vc->vidq);
+		if (err)
+			goto error;
+
+		strcpy(vdev->name, "TW686x-video");
+		snprintf(vdev->name, sizeof(vdev->name), "%s video", dev->name);
+		vdev->fops = &tw686x_video_fops;
+		vdev->ioctl_ops = &tw686x_video_ioctl_ops;
+		vdev->release = video_device_release;
+		vdev->v4l2_dev = &dev->v4l2_dev;
+		vdev->queue = &vc->vidq;
+		vdev->tvnorms = V4L2_STD_ALL;
+		vdev->minor = -1;
+		vdev->lock = &vc->vb_mutex;
+
+		dev->video_channels[ch].device = vdev;
+		video_set_drvdata(vdev, vc);
+		err = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+		if (err < 0)
+			goto error;
+
+		v4l2_ctrl_handler_init(&vc->ctrl_handler,
+				       4 /* number of controls */);
+		vdev->ctrl_handler = &vc->ctrl_handler;
+		v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops,
+				  V4L2_CID_BRIGHTNESS, -128, 127, 1, 0);
+		v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops,
+				  V4L2_CID_CONTRAST, 0, 255, 1, 64);
+		v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops,
+				  V4L2_CID_SATURATION, 0, 255, 1, 128);
+		v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops, V4L2_CID_HUE,
+				  -124, 127, 1, 0);
+		err = vc->ctrl_handler.error;
+		if (err)
+			goto error;
+
+		v4l2_ctrl_handler_setup(&vc->ctrl_handler);
+	}
+
+	dev->video_thread = kthread_run(video_thread, dev, "tw686x_video");
+	if (IS_ERR(dev->video_thread)) {
+		err = PTR_ERR(dev->video_thread);
+		goto error;
+	}
+
+	return 0;
+
+error:
+	tw686x_kh_video_free(dev);
+	return err;
+}
diff --git a/drivers/staging/media/tw686x-kh/tw686x-kh.h b/drivers/staging/media/tw686x-kh/tw686x-kh.h
new file mode 100644
index 0000000..dc25796
--- /dev/null
+++ b/drivers/staging/media/tw686x-kh/tw686x-kh.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2015 Industrial Research Institute for Automation
+ * and Measurements PIAP
+ *
+ * Written by Krzysztof Ha?asa.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/freezer.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <media/videobuf2-dma-sg.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+
+#define TYPE_MAX_CHANNELS 0x0F
+#define TYPE_SECOND_GEN   0x10
+
+struct tw686x_format {
+	char *name;
+	unsigned int fourcc;
+	unsigned int depth;
+	unsigned int mode;
+};
+
+struct dma_desc {
+	dma_addr_t phys;
+	void *virt;
+	unsigned int size;
+};
+
+struct vdma_desc {
+	__le32 flags_length;	/* 3 MSBits for flags, 13 LSBits for length */
+	__le32 phys;
+};
+
+struct tw686x_vb2_buf {
+	struct vb2_v4l2_buffer vb;
+	struct list_head list;
+};
+
+struct tw686x_video_channel {
+	struct tw686x_dev *dev;
+
+	struct vb2_queue vidq;
+	struct list_head vidq_queued;
+	struct video_device *device;
+	struct dma_desc sg_tables[2];
+	struct tw686x_vb2_buf *curr_bufs[2];
+	void *alloc_ctx;
+	struct vdma_desc *sg_descs[2];
+
+	struct v4l2_ctrl_handler ctrl_handler;
+	const struct tw686x_format *format;
+	struct mutex vb_mutex;
+	spinlock_t qlock;
+	v4l2_std_id video_standard;
+	unsigned int width, height;
+	enum v4l2_field field; /* supported TOP, BOTTOM, SEQ_TB and SEQ_BT */
+	unsigned int seq;	       /* video field or frame counter */
+	unsigned int ch;
+};
+
+/* global device status */
+struct tw686x_dev {
+	spinlock_t irq_lock;
+
+	struct v4l2_device v4l2_dev;
+	struct snd_card *card;	/* sound card */
+
+	unsigned int video_active;	/* active video channel mask */
+
+	char name[32];
+	unsigned int type;
+	struct pci_dev *pci_dev;
+	__u32 __iomem *mmio;
+
+	struct task_struct *video_thread;
+	wait_queue_head_t video_thread_wait;
+	u32 dma_requests;
+
+	struct tw686x_video_channel video_channels[0];
+};
+
+static inline uint32_t reg_read(struct tw686x_dev *dev, unsigned int reg)
+{
+	return readl(dev->mmio + reg);
+}
+
+static inline void reg_write(struct tw686x_dev *dev, unsigned int reg,
+			     uint32_t value)
+{
+	writel(value, dev->mmio + reg);
+}
+
+static inline unsigned int max_channels(struct tw686x_dev *dev)
+{
+	return dev->type & TYPE_MAX_CHANNELS; /* 4 or 8 channels */
+}
+
+static inline unsigned int is_second_gen(struct tw686x_dev *dev)
+{
+	/* each channel has its own DMA SG table */
+	return dev->type & TYPE_SECOND_GEN;
+}
+
+int tw686x_kh_video_irq(struct tw686x_dev *dev);
+int tw686x_kh_video_init(struct tw686x_dev *dev);
+void tw686x_kh_video_free(struct tw686x_dev *dev);
diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig
new file mode 100644
index 0000000..d277f04
--- /dev/null
+++ b/drivers/staging/olpc_dcon/Kconfig
@@ -0,0 +1,35 @@
+config FB_OLPC_DCON
+	tristate "One Laptop Per Child Display CONtroller support"
+	depends on OLPC && FB
+	depends on I2C
+	depends on (GPIO_CS5535 || GPIO_CS5535=n)
+	select BACKLIGHT_CLASS_DEVICE
+	---help---
+	  In order to support very low power operation, the XO laptop uses a
+	  secondary Display CONtroller, or DCON.  This secondary controller
+	  is present in the video pipeline between the primary display
+	  controller (integrate into the processor or chipset) and the LCD
+	  panel.  It allows the main processor/display controller to be
+	  completely powered off while still retaining an image on the display.
+	  This controller is only available on OLPC platforms.  Unless you have
+	  one of these platforms, you will want to say 'N'.
+
+config FB_OLPC_DCON_1
+	bool "OLPC XO-1 DCON support"
+	depends on FB_OLPC_DCON && GPIO_CS5535
+	default y
+	---help---
+	  Enable support for the DCON in XO-1 model laptops.  The kernel
+	  communicates with the DCON using model-specific code.  If you
+	  have an XO-1 (or if you're unsure what model you have), you should
+	  say 'Y'.
+
+config FB_OLPC_DCON_1_5
+	bool "OLPC XO-1.5 DCON support"
+	depends on FB_OLPC_DCON && ACPI
+	default y
+	---help---
+	  Enable support for the DCON in XO-1.5 model laptops.  The kernel
+	  communicates with the DCON using model-specific code.  If you
+	  have an XO-1.5 (or if you're unsure what model you have), you
+	  should say 'Y'.
diff --git a/drivers/staging/olpc_dcon/Makefile b/drivers/staging/olpc_dcon/Makefile
new file mode 100644
index 0000000..36c7e67
--- /dev/null
+++ b/drivers/staging/olpc_dcon/Makefile
@@ -0,0 +1,6 @@
+olpc-dcon-objs += olpc_dcon.o
+olpc-dcon-$(CONFIG_FB_OLPC_DCON_1)	+= olpc_dcon_xo_1.o
+olpc-dcon-$(CONFIG_FB_OLPC_DCON_1_5)	+= olpc_dcon_xo_1_5.o
+obj-$(CONFIG_FB_OLPC_DCON)	+= olpc-dcon.o
+
+
diff --git a/drivers/staging/olpc_dcon/TODO b/drivers/staging/olpc_dcon/TODO
new file mode 100644
index 0000000..61c2e65
--- /dev/null
+++ b/drivers/staging/olpc_dcon/TODO
@@ -0,0 +1,9 @@
+TODO:
+	- see if vx855 gpio API can be made similar enough to cs5535 so we can
+	  share more code
+	- allow simultaneous XO-1 and XO-1.5 support
+
+Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
+copy:
+	Daniel Drake <dsd@laptop.org>
+	Jens Frederich <jfrederich@gmail.com>
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
new file mode 100644
index 0000000..f45b2ef
--- /dev/null
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -0,0 +1,813 @@
+/*
+ * Mainly by David Woodhouse, somewhat modified by Jordan Crouse
+ *
+ * Copyright © 2006-2007  Red Hat, Inc.
+ * Copyright © 2006-2007  Advanced Micro Devices, Inc.
+ * Copyright © 2009       VIA Technology, Inc.
+ * Copyright (c) 2010-2011  Andres Salomon <dilinger@queued.net>
+ *
+ * This program is free software.  You can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/fb.h>
+#include <linux/console.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/backlight.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/ctype.h>
+#include <linux/reboot.h>
+#include <linux/olpc-ec.h>
+#include <asm/tsc.h>
+#include <asm/olpc.h>
+
+#include "olpc_dcon.h"
+
+/* Module definitions */
+
+static ushort resumeline = 898;
+module_param(resumeline, ushort, 0444);
+
+static struct dcon_platform_data *pdata;
+
+/* I2C structures */
+
+/* Platform devices */
+static struct platform_device *dcon_device;
+
+static unsigned short normal_i2c[] = { 0x0d, I2C_CLIENT_END };
+
+static s32 dcon_write(struct dcon_priv *dcon, u8 reg, u16 val)
+{
+	return i2c_smbus_write_word_data(dcon->client, reg, val);
+}
+
+static s32 dcon_read(struct dcon_priv *dcon, u8 reg)
+{
+	return i2c_smbus_read_word_data(dcon->client, reg);
+}
+
+/* ===== API functions - these are called by a variety of users ==== */
+
+static int dcon_hw_init(struct dcon_priv *dcon, int is_init)
+{
+	u16 ver;
+	int rc = 0;
+
+	ver = dcon_read(dcon, DCON_REG_ID);
+	if ((ver >> 8) != 0xDC) {
+		pr_err("DCON ID not 0xDCxx: 0x%04x instead.\n", ver);
+		rc = -ENXIO;
+		goto err;
+	}
+
+	if (is_init) {
+		pr_info("Discovered DCON version %x\n", ver & 0xFF);
+		rc = pdata->init(dcon);
+		if (rc != 0) {
+			pr_err("Unable to init.\n");
+			goto err;
+		}
+	}
+
+	if (ver < 0xdc02) {
+		dev_err(&dcon->client->dev,
+				"DCON v1 is unsupported, giving up..\n");
+		rc = -ENODEV;
+		goto err;
+	}
+
+	/* SDRAM setup/hold time */
+	dcon_write(dcon, 0x3a, 0xc040);
+	dcon_write(dcon, DCON_REG_MEM_OPT_A, 0x0000);  /* clear option bits */
+	dcon_write(dcon, DCON_REG_MEM_OPT_A,
+				MEM_DLL_CLOCK_DELAY | MEM_POWER_DOWN);
+	dcon_write(dcon, DCON_REG_MEM_OPT_B, MEM_SOFT_RESET);
+
+	/* Colour swizzle, AA, no passthrough, backlight */
+	if (is_init) {
+		dcon->disp_mode = MODE_PASSTHRU | MODE_BL_ENABLE |
+				MODE_CSWIZZLE | MODE_COL_AA;
+	}
+	dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
+
+	/* Set the scanline to interrupt on during resume */
+	dcon_write(dcon, DCON_REG_SCAN_INT, resumeline);
+
+err:
+	return rc;
+}
+
+/*
+ * The smbus doesn't always come back due to what is believed to be
+ * hardware (power rail) bugs.  For older models where this is known to
+ * occur, our solution is to attempt to wait for the bus to stabilize;
+ * if it doesn't happen, cut power to the dcon, repower it, and wait
+ * for the bus to stabilize.  Rinse, repeat until we have a working
+ * smbus.  For newer models, we simply BUG(); we want to know if this
+ * still happens despite the power fixes that have been made!
+ */
+static int dcon_bus_stabilize(struct dcon_priv *dcon, int is_powered_down)
+{
+	unsigned long timeout;
+	u8 pm;
+	int x;
+
+power_up:
+	if (is_powered_down) {
+		pm = 1;
+		x = olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
+		if (x) {
+			pr_warn("unable to force dcon to power up: %d!\n", x);
+			return x;
+		}
+		usleep_range(10000, 11000);  /* we'll be conservative */
+	}
+
+	pdata->bus_stabilize_wiggle();
+
+	for (x = -1, timeout = 50; timeout && x < 0; timeout--) {
+		usleep_range(1000, 1100);
+		x = dcon_read(dcon, DCON_REG_ID);
+	}
+	if (x < 0) {
+		pr_err("unable to stabilize dcon's smbus, reasserting power and praying.\n");
+		BUG_ON(olpc_board_at_least(olpc_board(0xc2)));
+		pm = 0;
+		olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
+		msleep(100);
+		is_powered_down = 1;
+		goto power_up;	/* argh, stupid hardware.. */
+	}
+
+	if (is_powered_down)
+		return dcon_hw_init(dcon, 0);
+	return 0;
+}
+
+static void dcon_set_backlight(struct dcon_priv *dcon, u8 level)
+{
+	dcon->bl_val = level;
+	dcon_write(dcon, DCON_REG_BRIGHT, dcon->bl_val);
+
+	/* Purposely turn off the backlight when we go to level 0 */
+	if (dcon->bl_val == 0) {
+		dcon->disp_mode &= ~MODE_BL_ENABLE;
+		dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
+	} else if (!(dcon->disp_mode & MODE_BL_ENABLE)) {
+		dcon->disp_mode |= MODE_BL_ENABLE;
+		dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
+	}
+}
+
+/* Set the output type to either color or mono */
+static int dcon_set_mono_mode(struct dcon_priv *dcon, bool enable_mono)
+{
+	if (dcon->mono == enable_mono)
+		return 0;
+
+	dcon->mono = enable_mono;
+
+	if (enable_mono) {
+		dcon->disp_mode &= ~(MODE_CSWIZZLE | MODE_COL_AA);
+		dcon->disp_mode |= MODE_MONO_LUMA;
+	} else {
+		dcon->disp_mode &= ~(MODE_MONO_LUMA);
+		dcon->disp_mode |= MODE_CSWIZZLE | MODE_COL_AA;
+	}
+
+	dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode);
+	return 0;
+}
+
+/* For now, this will be really stupid - we need to address how
+ * DCONLOAD works in a sleep and account for it accordingly
+ */
+
+static void dcon_sleep(struct dcon_priv *dcon, bool sleep)
+{
+	int x;
+
+	/* Turn off the backlight and put the DCON to sleep */
+
+	if (dcon->asleep == sleep)
+		return;
+
+	if (!olpc_board_at_least(olpc_board(0xc2)))
+		return;
+
+	if (sleep) {
+		u8 pm = 0;
+
+		x = olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
+		if (x)
+			pr_warn("unable to force dcon to power down: %d!\n", x);
+		else
+			dcon->asleep = sleep;
+	} else {
+		/* Only re-enable the backlight if the backlight value is set */
+		if (dcon->bl_val != 0)
+			dcon->disp_mode |= MODE_BL_ENABLE;
+		x = dcon_bus_stabilize(dcon, 1);
+		if (x)
+			pr_warn("unable to reinit dcon hardware: %d!\n", x);
+		else
+			dcon->asleep = sleep;
+
+		/* Restore backlight */
+		dcon_set_backlight(dcon, dcon->bl_val);
+	}
+
+	/* We should turn off some stuff in the framebuffer - but what? */
+}
+
+/* the DCON seems to get confused if we change DCONLOAD too
+ * frequently -- i.e., approximately faster than frame time.
+ * normally we don't change it this fast, so in general we won't
+ * delay here.
+ */
+static void dcon_load_holdoff(struct dcon_priv *dcon)
+{
+	ktime_t delta_t, now;
+
+	while (1) {
+		now = ktime_get();
+		delta_t = ktime_sub(now, dcon->load_time);
+		if (ktime_to_ns(delta_t) > NSEC_PER_MSEC * 20)
+			break;
+		mdelay(4);
+	}
+}
+
+static bool dcon_blank_fb(struct dcon_priv *dcon, bool blank)
+{
+	int err;
+
+	console_lock();
+	if (!lock_fb_info(dcon->fbinfo)) {
+		console_unlock();
+		dev_err(&dcon->client->dev, "unable to lock framebuffer\n");
+		return false;
+	}
+
+	dcon->ignore_fb_events = true;
+	err = fb_blank(dcon->fbinfo,
+			blank ? FB_BLANK_POWERDOWN : FB_BLANK_UNBLANK);
+	dcon->ignore_fb_events = false;
+	unlock_fb_info(dcon->fbinfo);
+	console_unlock();
+
+	if (err) {
+		dev_err(&dcon->client->dev, "couldn't %sblank framebuffer\n",
+				blank ? "" : "un");
+		return false;
+	}
+	return true;
+}
+
+/* Set the source of the display (CPU or DCON) */
+static void dcon_source_switch(struct work_struct *work)
+{
+	struct dcon_priv *dcon = container_of(work, struct dcon_priv,
+			switch_source);
+	int source = dcon->pending_src;
+
+	if (dcon->curr_src == source)
+		return;
+
+	dcon_load_holdoff(dcon);
+
+	dcon->switched = false;
+
+	switch (source) {
+	case DCON_SOURCE_CPU:
+		pr_info("dcon_source_switch to CPU\n");
+		/* Enable the scanline interrupt bit */
+		if (dcon_write(dcon, DCON_REG_MODE,
+				dcon->disp_mode | MODE_SCAN_INT))
+			pr_err("couldn't enable scanline interrupt!\n");
+		else
+			/* Wait up to one second for the scanline interrupt */
+			wait_event_timeout(dcon->waitq, dcon->switched, HZ);
+
+		if (!dcon->switched)
+			pr_err("Timeout entering CPU mode; expect a screen glitch.\n");
+
+		/* Turn off the scanline interrupt */
+		if (dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode))
+			pr_err("couldn't disable scanline interrupt!\n");
+
+		/*
+		 * Ideally we'd like to disable interrupts here so that the
+		 * fb unblanking and DCON turn on happen at a known time value;
+		 * however, we can't do that right now with fb_blank
+		 * messing with semaphores.
+		 *
+		 * For now, we just hope..
+		 */
+		if (!dcon_blank_fb(dcon, false)) {
+			pr_err("Failed to enter CPU mode\n");
+			dcon->pending_src = DCON_SOURCE_DCON;
+			return;
+		}
+
+		/* And turn off the DCON */
+		pdata->set_dconload(1);
+		dcon->load_time = ktime_get();
+
+		pr_info("The CPU has control\n");
+		break;
+	case DCON_SOURCE_DCON:
+	{
+		ktime_t delta_t;
+
+		pr_info("dcon_source_switch to DCON\n");
+
+		/* Clear DCONLOAD - this implies that the DCON is in control */
+		pdata->set_dconload(0);
+		dcon->load_time = ktime_get();
+
+		wait_event_timeout(dcon->waitq, dcon->switched, HZ/2);
+
+		if (!dcon->switched) {
+			pr_err("Timeout entering DCON mode; expect a screen glitch.\n");
+		} else {
+			/* sometimes the DCON doesn't follow its own rules,
+			 * and doesn't wait for two vsync pulses before
+			 * ack'ing the frame load with an IRQ.  the result
+			 * is that the display shows the *previously*
+			 * loaded frame.  we can detect this by looking at
+			 * the time between asserting DCONLOAD and the IRQ --
+			 * if it's less than 20msec, then the DCON couldn't
+			 * have seen two VSYNC pulses.  in that case we
+			 * deassert and reassert, and hope for the best.
+			 * see http://dev.laptop.org/ticket/9664
+			 */
+			delta_t = ktime_sub(dcon->irq_time, dcon->load_time);
+			if (dcon->switched && ktime_to_ns(delta_t)
+			    < NSEC_PER_MSEC * 20) {
+				pr_err("missed loading, retrying\n");
+				pdata->set_dconload(1);
+				mdelay(41);
+				pdata->set_dconload(0);
+				dcon->load_time = ktime_get();
+				mdelay(41);
+			}
+		}
+
+		dcon_blank_fb(dcon, true);
+		pr_info("The DCON has control\n");
+		break;
+	}
+	default:
+		BUG();
+	}
+
+	dcon->curr_src = source;
+}
+
+static void dcon_set_source(struct dcon_priv *dcon, int arg)
+{
+	if (dcon->pending_src == arg)
+		return;
+
+	dcon->pending_src = arg;
+
+	if (dcon->curr_src != arg)
+		schedule_work(&dcon->switch_source);
+}
+
+static void dcon_set_source_sync(struct dcon_priv *dcon, int arg)
+{
+	dcon_set_source(dcon, arg);
+	flush_scheduled_work();
+}
+
+static ssize_t dcon_mode_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct dcon_priv *dcon = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%4.4X\n", dcon->disp_mode);
+}
+
+static ssize_t dcon_sleep_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct dcon_priv *dcon = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%d\n", dcon->asleep);
+}
+
+static ssize_t dcon_freeze_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct dcon_priv *dcon = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%d\n", dcon->curr_src == DCON_SOURCE_DCON ? 1 : 0);
+}
+
+static ssize_t dcon_mono_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct dcon_priv *dcon = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%d\n", dcon->mono);
+}
+
+static ssize_t dcon_resumeline_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", resumeline);
+}
+
+static ssize_t dcon_mono_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long enable_mono;
+	int rc;
+
+	rc = kstrtoul(buf, 10, &enable_mono);
+	if (rc)
+		return rc;
+
+	dcon_set_mono_mode(dev_get_drvdata(dev), enable_mono ? true : false);
+
+	return count;
+}
+
+static ssize_t dcon_freeze_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct dcon_priv *dcon = dev_get_drvdata(dev);
+	unsigned long output;
+	int ret;
+
+	ret = kstrtoul(buf, 10, &output);
+	if (ret)
+		return ret;
+
+	pr_info("dcon_freeze_store: %lu\n", output);
+
+	switch (output) {
+	case 0:
+		dcon_set_source(dcon, DCON_SOURCE_CPU);
+		break;
+	case 1:
+		dcon_set_source_sync(dcon, DCON_SOURCE_DCON);
+		break;
+	case 2:  /* normally unused */
+		dcon_set_source(dcon, DCON_SOURCE_DCON);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static ssize_t dcon_resumeline_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned short rl;
+	int rc;
+
+	rc = kstrtou16(buf, 10, &rl);
+	if (rc)
+		return rc;
+
+	resumeline = rl;
+	dcon_write(dev_get_drvdata(dev), DCON_REG_SCAN_INT, resumeline);
+
+	return count;
+}
+
+static ssize_t dcon_sleep_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned long output;
+	int ret;
+
+	ret = kstrtoul(buf, 10, &output);
+	if (ret)
+		return ret;
+
+	dcon_sleep(dev_get_drvdata(dev), output ? true : false);
+	return count;
+}
+
+static struct device_attribute dcon_device_files[] = {
+	__ATTR(mode, 0444, dcon_mode_show, NULL),
+	__ATTR(sleep, 0644, dcon_sleep_show, dcon_sleep_store),
+	__ATTR(freeze, 0644, dcon_freeze_show, dcon_freeze_store),
+	__ATTR(monochrome, 0644, dcon_mono_show, dcon_mono_store),
+	__ATTR(resumeline, 0644, dcon_resumeline_show, dcon_resumeline_store),
+};
+
+static int dcon_bl_update(struct backlight_device *dev)
+{
+	struct dcon_priv *dcon = bl_get_data(dev);
+	u8 level = dev->props.brightness & 0x0F;
+
+	if (dev->props.power != FB_BLANK_UNBLANK)
+		level = 0;
+
+	if (level != dcon->bl_val)
+		dcon_set_backlight(dcon, level);
+
+	/* power down the DCON when the screen is blanked */
+	if (!dcon->ignore_fb_events)
+		dcon_sleep(dcon, !!(dev->props.state & BL_CORE_FBBLANK));
+
+	return 0;
+}
+
+static int dcon_bl_get(struct backlight_device *dev)
+{
+	struct dcon_priv *dcon = bl_get_data(dev);
+
+	return dcon->bl_val;
+}
+
+static const struct backlight_ops dcon_bl_ops = {
+	.update_status = dcon_bl_update,
+	.get_brightness = dcon_bl_get,
+};
+
+static struct backlight_properties dcon_bl_props = {
+	.max_brightness = 15,
+	.type = BACKLIGHT_RAW,
+	.power = FB_BLANK_UNBLANK,
+};
+
+static int dcon_reboot_notify(struct notifier_block *nb,
+			      unsigned long foo, void *bar)
+{
+	struct dcon_priv *dcon = container_of(nb, struct dcon_priv, reboot_nb);
+
+	if (!dcon || !dcon->client)
+		return NOTIFY_DONE;
+
+	/* Turn off the DCON. Entirely. */
+	dcon_write(dcon, DCON_REG_MODE, 0x39);
+	dcon_write(dcon, DCON_REG_MODE, 0x32);
+	return NOTIFY_DONE;
+}
+
+static int unfreeze_on_panic(struct notifier_block *nb,
+			     unsigned long e, void *p)
+{
+	pdata->set_dconload(1);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block dcon_panic_nb = {
+	.notifier_call = unfreeze_on_panic,
+};
+
+static int dcon_detect(struct i2c_client *client, struct i2c_board_info *info)
+{
+	strlcpy(info->type, "olpc_dcon", I2C_NAME_SIZE);
+
+	return 0;
+}
+
+static int dcon_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+	struct dcon_priv *dcon;
+	int rc, i, j;
+
+	if (!pdata)
+		return -ENXIO;
+
+	dcon = kzalloc(sizeof(*dcon), GFP_KERNEL);
+	if (!dcon)
+		return -ENOMEM;
+
+	dcon->client = client;
+	init_waitqueue_head(&dcon->waitq);
+	INIT_WORK(&dcon->switch_source, dcon_source_switch);
+	dcon->reboot_nb.notifier_call = dcon_reboot_notify;
+	dcon->reboot_nb.priority = -1;
+
+	i2c_set_clientdata(client, dcon);
+
+	if (num_registered_fb < 1) {
+		dev_err(&client->dev, "DCON driver requires a registered fb\n");
+		rc = -EIO;
+		goto einit;
+	}
+	dcon->fbinfo = registered_fb[0];
+
+	rc = dcon_hw_init(dcon, 1);
+	if (rc)
+		goto einit;
+
+	/* Add the DCON device */
+
+	dcon_device = platform_device_alloc("dcon", -1);
+
+	if (!dcon_device) {
+		pr_err("Unable to create the DCON device\n");
+		rc = -ENOMEM;
+		goto eirq;
+	}
+	rc = platform_device_add(dcon_device);
+	platform_set_drvdata(dcon_device, dcon);
+
+	if (rc) {
+		pr_err("Unable to add the DCON device\n");
+		goto edev;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(dcon_device_files); i++) {
+		rc = device_create_file(&dcon_device->dev,
+					&dcon_device_files[i]);
+		if (rc) {
+			dev_err(&dcon_device->dev, "Cannot create sysfs file\n");
+			goto ecreate;
+		}
+	}
+
+	dcon->bl_val = dcon_read(dcon, DCON_REG_BRIGHT) & 0x0F;
+
+	/* Add the backlight device for the DCON */
+	dcon_bl_props.brightness = dcon->bl_val;
+	dcon->bl_dev = backlight_device_register("dcon-bl", &dcon_device->dev,
+		dcon, &dcon_bl_ops, &dcon_bl_props);
+	if (IS_ERR(dcon->bl_dev)) {
+		dev_err(&client->dev, "cannot register backlight dev (%ld)\n",
+				PTR_ERR(dcon->bl_dev));
+		dcon->bl_dev = NULL;
+	}
+
+	register_reboot_notifier(&dcon->reboot_nb);
+	atomic_notifier_chain_register(&panic_notifier_list, &dcon_panic_nb);
+
+	return 0;
+
+ ecreate:
+	for (j = 0; j < i; j++)
+		device_remove_file(&dcon_device->dev, &dcon_device_files[j]);
+ edev:
+	platform_device_unregister(dcon_device);
+	dcon_device = NULL;
+ eirq:
+	free_irq(DCON_IRQ, dcon);
+ einit:
+	kfree(dcon);
+	return rc;
+}
+
+static int dcon_remove(struct i2c_client *client)
+{
+	struct dcon_priv *dcon = i2c_get_clientdata(client);
+
+	unregister_reboot_notifier(&dcon->reboot_nb);
+	atomic_notifier_chain_unregister(&panic_notifier_list, &dcon_panic_nb);
+
+	free_irq(DCON_IRQ, dcon);
+
+	backlight_device_unregister(dcon->bl_dev);
+
+	if (dcon_device)
+		platform_device_unregister(dcon_device);
+	cancel_work_sync(&dcon->switch_source);
+
+	kfree(dcon);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int dcon_suspend(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct dcon_priv *dcon = i2c_get_clientdata(client);
+
+	if (!dcon->asleep) {
+		/* Set up the DCON to have the source */
+		dcon_set_source_sync(dcon, DCON_SOURCE_DCON);
+	}
+
+	return 0;
+}
+
+static int dcon_resume(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct dcon_priv *dcon = i2c_get_clientdata(client);
+
+	if (!dcon->asleep) {
+		dcon_bus_stabilize(dcon, 0);
+		dcon_set_source(dcon, DCON_SOURCE_CPU);
+	}
+
+	return 0;
+}
+
+#else
+
+#define dcon_suspend NULL
+#define dcon_resume NULL
+
+#endif /* CONFIG_PM */
+
+irqreturn_t dcon_interrupt(int irq, void *id)
+{
+	struct dcon_priv *dcon = id;
+	u8 status;
+
+	if (pdata->read_status(&status))
+		return IRQ_NONE;
+
+	switch (status & 3) {
+	case 3:
+		pr_debug("DCONLOAD_MISSED interrupt\n");
+		break;
+
+	case 2:	/* switch to DCON mode */
+	case 1: /* switch to CPU mode */
+		dcon->switched = true;
+		dcon->irq_time = ktime_get();
+		wake_up(&dcon->waitq);
+		break;
+
+	case 0:
+		/* workaround resume case:  the DCON (on 1.5) doesn't
+		 * ever assert status 0x01 when switching to CPU mode
+		 * during resume.  this is because DCONLOAD is de-asserted
+		 * _immediately_ upon exiting S3, so the actual release
+		 * of the DCON happened long before this point.
+		 * see http://dev.laptop.org/ticket/9869
+		 */
+		if (dcon->curr_src != dcon->pending_src && !dcon->switched) {
+			dcon->switched = true;
+			dcon->irq_time = ktime_get();
+			wake_up(&dcon->waitq);
+			pr_debug("switching w/ status 0/0\n");
+		} else {
+			pr_debug("scanline interrupt w/CPU\n");
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static const struct dev_pm_ops dcon_pm_ops = {
+	.suspend = dcon_suspend,
+	.resume = dcon_resume,
+};
+
+static const struct i2c_device_id dcon_idtable[] = {
+	{ "olpc_dcon",  0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, dcon_idtable);
+
+static struct i2c_driver dcon_driver = {
+	.driver = {
+		.name	= "olpc_dcon",
+		.pm = &dcon_pm_ops,
+	},
+	.class = I2C_CLASS_DDC | I2C_CLASS_HWMON,
+	.id_table = dcon_idtable,
+	.probe = dcon_probe,
+	.remove = dcon_remove,
+	.detect = dcon_detect,
+	.address_list = normal_i2c,
+};
+
+static int __init olpc_dcon_init(void)
+{
+#ifdef CONFIG_FB_OLPC_DCON_1_5
+	/* XO-1.5 */
+	if (olpc_board_at_least(olpc_board(0xd0)))
+		pdata = &dcon_pdata_xo_1_5;
+#endif
+#ifdef CONFIG_FB_OLPC_DCON_1
+	if (!pdata)
+		pdata = &dcon_pdata_xo_1;
+#endif
+
+	return i2c_add_driver(&dcon_driver);
+}
+
+static void __exit olpc_dcon_exit(void)
+{
+	i2c_del_driver(&dcon_driver);
+}
+
+module_init(olpc_dcon_init);
+module_exit(olpc_dcon_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
new file mode 100644
index 0000000..215e7ec
--- /dev/null
+++ b/drivers/staging/olpc_dcon/olpc_dcon.h
@@ -0,0 +1,111 @@
+#ifndef OLPC_DCON_H_
+#define OLPC_DCON_H_
+
+#include <linux/notifier.h>
+#include <linux/workqueue.h>
+
+/* DCON registers */
+
+#define DCON_REG_ID		 0
+#define DCON_REG_MODE		 1
+
+#define MODE_PASSTHRU	(1<<0)
+#define MODE_SLEEP	(1<<1)
+#define MODE_SLEEP_AUTO	(1<<2)
+#define MODE_BL_ENABLE	(1<<3)
+#define MODE_BLANK	(1<<4)
+#define MODE_CSWIZZLE	(1<<5)
+#define MODE_COL_AA	(1<<6)
+#define MODE_MONO_LUMA	(1<<7)
+#define MODE_SCAN_INT	(1<<8)
+#define MODE_CLOCKDIV	(1<<9)
+#define MODE_DEBUG	(1<<14)
+#define MODE_SELFTEST	(1<<15)
+
+#define DCON_REG_HRES		0x2
+#define DCON_REG_HTOTAL		0x3
+#define DCON_REG_HSYNC_WIDTH	0x4
+#define DCON_REG_VRES		0x5
+#define DCON_REG_VTOTAL		0x6
+#define DCON_REG_VSYNC_WIDTH	0x7
+#define DCON_REG_TIMEOUT	0x8
+#define DCON_REG_SCAN_INT	0x9
+#define DCON_REG_BRIGHT		0xa
+#define DCON_REG_MEM_OPT_A	0x41
+#define DCON_REG_MEM_OPT_B	0x42
+
+/* Load Delay Locked Loop (DLL) settings for clock delay */
+#define MEM_DLL_CLOCK_DELAY	(1<<0)
+/* Memory controller power down function */
+#define MEM_POWER_DOWN		(1<<8)
+/* Memory controller software reset */
+#define MEM_SOFT_RESET		(1<<0)
+
+/* Status values */
+
+#define DCONSTAT_SCANINT	0
+#define DCONSTAT_SCANINT_DCON	1
+#define DCONSTAT_DISPLAYLOAD	2
+#define DCONSTAT_MISSED		3
+
+/* Source values */
+
+#define DCON_SOURCE_DCON        0
+#define DCON_SOURCE_CPU         1
+
+/* Interrupt */
+#define DCON_IRQ                6
+
+struct dcon_priv {
+	struct i2c_client *client;
+	struct fb_info *fbinfo;
+	struct backlight_device *bl_dev;
+
+	wait_queue_head_t waitq;
+	struct work_struct switch_source;
+	struct notifier_block reboot_nb;
+
+	/* Shadow register for the DCON_REG_MODE register */
+	u8 disp_mode;
+
+	/* The current backlight value - this saves us some smbus traffic */
+	u8 bl_val;
+
+	/* Current source, initialized at probe time */
+	int curr_src;
+
+	/* Desired source */
+	int pending_src;
+
+	/* Variables used during switches */
+	bool switched;
+	ktime_t irq_time;
+	ktime_t load_time;
+
+	/* Current output type; true == mono, false == color */
+	bool mono;
+	bool asleep;
+	/* This get set while controlling fb blank state from the driver */
+	bool ignore_fb_events;
+};
+
+struct dcon_platform_data {
+	int (*init)(struct dcon_priv *);
+	void (*bus_stabilize_wiggle)(void);
+	void (*set_dconload)(int);
+	int (*read_status)(u8 *);
+};
+
+#include <linux/interrupt.h>
+
+irqreturn_t dcon_interrupt(int irq, void *id);
+
+#ifdef CONFIG_FB_OLPC_DCON_1
+extern struct dcon_platform_data dcon_pdata_xo_1;
+#endif
+
+#ifdef CONFIG_FB_OLPC_DCON_1_5
+extern struct dcon_platform_data dcon_pdata_xo_1_5;
+#endif
+
+#endif
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
new file mode 100644
index 0000000..0c5a10c
--- /dev/null
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
@@ -0,0 +1,205 @@
+/*
+ * Mainly by David Woodhouse, somewhat modified by Jordan Crouse
+ *
+ * Copyright © 2006-2007  Red Hat, Inc.
+ * Copyright © 2006-2007  Advanced Micro Devices, Inc.
+ * Copyright © 2009       VIA Technology, Inc.
+ * Copyright (c) 2010  Andres Salomon <dilinger@queued.net>
+ *
+ * This program is free software.  You can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cs5535.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <asm/olpc.h>
+
+#include "olpc_dcon.h"
+
+static int dcon_init_xo_1(struct dcon_priv *dcon)
+{
+	unsigned char lob;
+
+	if (gpio_request(OLPC_GPIO_DCON_STAT0, "OLPC-DCON")) {
+		pr_err("failed to request STAT0 GPIO\n");
+		return -EIO;
+	}
+	if (gpio_request(OLPC_GPIO_DCON_STAT1, "OLPC-DCON")) {
+		pr_err("failed to request STAT1 GPIO\n");
+		goto err_gp_stat1;
+	}
+	if (gpio_request(OLPC_GPIO_DCON_IRQ, "OLPC-DCON")) {
+		pr_err("failed to request IRQ GPIO\n");
+		goto err_gp_irq;
+	}
+	if (gpio_request(OLPC_GPIO_DCON_LOAD, "OLPC-DCON")) {
+		pr_err("failed to request LOAD GPIO\n");
+		goto err_gp_load;
+	}
+	if (gpio_request(OLPC_GPIO_DCON_BLANK, "OLPC-DCON")) {
+		pr_err("failed to request BLANK GPIO\n");
+		goto err_gp_blank;
+	}
+
+	/* Turn off the event enable for GPIO7 just to be safe */
+	cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE);
+
+	/*
+	 * Determine the current state by reading the GPIO bit; earlier
+	 * stages of the boot process have established the state.
+	 *
+	 * Note that we read GPIO_OUTPUT_VAL rather than GPIO_READ_BACK here;
+	 * this is because OFW will disable input for the pin and set a value..
+	 * READ_BACK will only contain a valid value if input is enabled and
+	 * then a value is set.  So, future readings of the pin can use
+	 * READ_BACK, but the first one cannot.  Awesome, huh?
+	 */
+	dcon->curr_src = cs5535_gpio_isset(OLPC_GPIO_DCON_LOAD, GPIO_OUTPUT_VAL)
+		? DCON_SOURCE_CPU
+		: DCON_SOURCE_DCON;
+	dcon->pending_src = dcon->curr_src;
+
+	/* Set the directions for the GPIO pins */
+	gpio_direction_input(OLPC_GPIO_DCON_STAT0);
+	gpio_direction_input(OLPC_GPIO_DCON_STAT1);
+	gpio_direction_input(OLPC_GPIO_DCON_IRQ);
+	gpio_direction_input(OLPC_GPIO_DCON_BLANK);
+	gpio_direction_output(OLPC_GPIO_DCON_LOAD,
+			dcon->curr_src == DCON_SOURCE_CPU);
+
+	/* Set up the interrupt mappings */
+
+	/* Set the IRQ to pair 2 */
+	cs5535_gpio_setup_event(OLPC_GPIO_DCON_IRQ, 2, 0);
+
+	/* Enable group 2 to trigger the DCON interrupt */
+	cs5535_gpio_set_irq(2, DCON_IRQ);
+
+	/* Select edge level for interrupt (in PIC) */
+	lob = inb(0x4d0);
+	lob &= ~(1 << DCON_IRQ);
+	outb(lob, 0x4d0);
+
+	/* Register the interrupt handler */
+	if (request_irq(DCON_IRQ, &dcon_interrupt, 0, "DCON", dcon)) {
+		pr_err("failed to request DCON's irq\n");
+		goto err_req_irq;
+	}
+
+	/* Clear INV_EN for GPIO7 (DCONIRQ) */
+	cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_INVERT);
+
+	/* Enable filter for GPIO12 (DCONBLANK) */
+	cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_FILTER);
+
+	/* Disable filter for GPIO7 */
+	cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_FILTER);
+
+	/* Disable event counter for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */
+	cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_EVENT_COUNT);
+	cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_EVENT_COUNT);
+
+	/* Add GPIO12 to the Filter Event Pair #7 */
+	cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_FE7_SEL);
+
+	/* Turn off negative Edge Enable for GPIO12 */
+	cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_EN);
+
+	/* Enable negative Edge Enable for GPIO7 */
+	cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_EN);
+
+	/* Zero the filter amount for Filter Event Pair #7 */
+	cs5535_gpio_set(0, GPIO_FLTR7_AMOUNT);
+
+	/* Clear the negative edge status for GPIO7 and GPIO12 */
+	cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
+	cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_STS);
+
+	/* FIXME:  Clear the positive status as well, just to be sure */
+	cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_POSITIVE_EDGE_STS);
+	cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_POSITIVE_EDGE_STS);
+
+	/* Enable events for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */
+	cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE);
+	cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_EVENTS_ENABLE);
+
+	return 0;
+
+err_req_irq:
+	gpio_free(OLPC_GPIO_DCON_BLANK);
+err_gp_blank:
+	gpio_free(OLPC_GPIO_DCON_LOAD);
+err_gp_load:
+	gpio_free(OLPC_GPIO_DCON_IRQ);
+err_gp_irq:
+	gpio_free(OLPC_GPIO_DCON_STAT1);
+err_gp_stat1:
+	gpio_free(OLPC_GPIO_DCON_STAT0);
+	return -EIO;
+}
+
+static void dcon_wiggle_xo_1(void)
+{
+	int x;
+
+	/*
+	 * According to HiMax, when powering the DCON up we should hold
+	 * SMB_DATA high for 8 SMB_CLK cycles.  This will force the DCON
+	 * state machine to reset to a (sane) initial state.  Mitch Bradley
+	 * did some testing and discovered that holding for 16 SMB_CLK cycles
+	 * worked a lot more reliably, so that's what we do here.
+	 *
+	 * According to the cs5536 spec, to set GPIO14 to SMB_CLK we must
+	 * simultaneously set AUX1 IN/OUT to GPIO14; ditto for SMB_DATA and
+	 * GPIO15.
+	 */
+	cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
+	cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_VAL);
+	cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_ENABLE);
+	cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_ENABLE);
+	cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1);
+	cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
+	cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX2);
+	cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX2);
+	cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1);
+	cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
+
+	for (x = 0; x < 16; x++) {
+		udelay(5);
+		cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
+		udelay(5);
+		cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
+	}
+	udelay(5);
+	cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1);
+	cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
+	cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1);
+	cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
+}
+
+static void dcon_set_dconload_1(int val)
+{
+	gpio_set_value(OLPC_GPIO_DCON_LOAD, val);
+}
+
+static int dcon_read_status_xo_1(u8 *status)
+{
+	*status = gpio_get_value(OLPC_GPIO_DCON_STAT0);
+	*status |= gpio_get_value(OLPC_GPIO_DCON_STAT1) << 1;
+
+	/* Clear the negative edge status for GPIO7 */
+	cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
+
+	return 0;
+}
+
+struct dcon_platform_data dcon_pdata_xo_1 = {
+	.init = dcon_init_xo_1,
+	.bus_stabilize_wiggle = dcon_wiggle_xo_1,
+	.set_dconload = dcon_set_dconload_1,
+	.read_status = dcon_read_status_xo_1,
+};
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
new file mode 100644
index 0000000..6a4d379
--- /dev/null
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2009,2010       One Laptop per Child
+ *
+ * This program is free software.  You can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <asm/olpc.h>
+
+/* TODO: this eventually belongs in linux/vx855.h */
+#define NR_VX855_GPI    14
+#define NR_VX855_GPO    13
+#define NR_VX855_GPIO   15
+
+#define VX855_GPI(n)    (n)
+#define VX855_GPO(n)    (NR_VX855_GPI + (n))
+#define VX855_GPIO(n)   (NR_VX855_GPI + NR_VX855_GPO + (n))
+
+#include "olpc_dcon.h"
+
+/* Hardware setup on the XO 1.5:
+ *	DCONLOAD connects to VX855_GPIO1 (not SMBCK2)
+ *	DCONBLANK connects to VX855_GPIO8 (not SSPICLK)  unused in driver
+ *	DCONSTAT0 connects to VX855_GPI10 (not SSPISDI)
+ *	DCONSTAT1 connects to VX855_GPI11 (not nSSPISS)
+ *	DCONIRQ connects to VX855_GPIO12
+ *	DCONSMBDATA connects to VX855 graphics CRTSPD
+ *	DCONSMBCLK connects to VX855 graphics CRTSPCLK
+ */
+
+#define VX855_GENL_PURPOSE_OUTPUT 0x44c /* PMIO_Rx4c-4f */
+#define VX855_GPI_STATUS_CHG 0x450  /* PMIO_Rx50 */
+#define VX855_GPI_SCI_SMI 0x452  /* PMIO_Rx52 */
+#define BIT_GPIO12 0x40
+
+#define PREFIX "OLPC DCON:"
+
+static void dcon_clear_irq(void)
+{
+	/* irq status will appear in PMIO_Rx50[6] (RW1C) on gpio12 */
+	outb(BIT_GPIO12, VX855_GPI_STATUS_CHG);
+}
+
+static int dcon_was_irq(void)
+{
+	u_int8_t tmp;
+
+	/* irq status will appear in PMIO_Rx50[6] on gpio12 */
+	tmp = inb(VX855_GPI_STATUS_CHG);
+	return !!(tmp & BIT_GPIO12);
+
+	return 0;
+}
+
+static int dcon_init_xo_1_5(struct dcon_priv *dcon)
+{
+	unsigned int irq;
+
+	dcon_clear_irq();
+
+	/* set   PMIO_Rx52[6] to enable SCI/SMI on gpio12 */
+	outb(inb(VX855_GPI_SCI_SMI)|BIT_GPIO12, VX855_GPI_SCI_SMI);
+
+	/* Determine the current state of DCONLOAD, likely set by firmware */
+	/* GPIO1 */
+	dcon->curr_src = (inl(VX855_GENL_PURPOSE_OUTPUT) & 0x1000) ?
+			DCON_SOURCE_CPU : DCON_SOURCE_DCON;
+	dcon->pending_src = dcon->curr_src;
+
+	/* we're sharing the IRQ with ACPI */
+	irq = acpi_gbl_FADT.sci_interrupt;
+	if (request_irq(irq, &dcon_interrupt, IRQF_SHARED, "DCON", dcon)) {
+		pr_err("DCON (IRQ%d) allocation failed\n", irq);
+		return 1;
+	}
+
+	return 0;
+}
+
+static void set_i2c_line(int sda, int scl)
+{
+	unsigned char tmp;
+	unsigned int port = 0x26;
+
+	/* FIXME: This directly accesses the CRT GPIO controller !!! */
+	outb(port, 0x3c4);
+	tmp = inb(0x3c5);
+
+	if (scl)
+		tmp |= 0x20;
+	else
+		tmp &= ~0x20;
+
+	if (sda)
+		tmp |= 0x10;
+	else
+		tmp &= ~0x10;
+
+	tmp |= 0x01;
+
+	outb(port, 0x3c4);
+	outb(tmp, 0x3c5);
+}
+
+
+static void dcon_wiggle_xo_1_5(void)
+{
+	int x;
+
+	/*
+	 * According to HiMax, when powering the DCON up we should hold
+	 * SMB_DATA high for 8 SMB_CLK cycles.  This will force the DCON
+	 * state machine to reset to a (sane) initial state.  Mitch Bradley
+	 * did some testing and discovered that holding for 16 SMB_CLK cycles
+	 * worked a lot more reliably, so that's what we do here.
+	 */
+	set_i2c_line(1, 1);
+
+	for (x = 0; x < 16; x++) {
+		udelay(5);
+		set_i2c_line(1, 0);
+		udelay(5);
+		set_i2c_line(1, 1);
+	}
+	udelay(5);
+
+	/* set   PMIO_Rx52[6] to enable SCI/SMI on gpio12 */
+	outb(inb(VX855_GPI_SCI_SMI)|BIT_GPIO12, VX855_GPI_SCI_SMI);
+}
+
+static void dcon_set_dconload_xo_1_5(int val)
+{
+	gpio_set_value(VX855_GPIO(1), val);
+}
+
+static int dcon_read_status_xo_1_5(u8 *status)
+{
+	if (!dcon_was_irq())
+		return -1;
+
+	/* i believe this is the same as "inb(0x44b) & 3" */
+	*status = gpio_get_value(VX855_GPI(10));
+	*status |= gpio_get_value(VX855_GPI(11)) << 1;
+
+	dcon_clear_irq();
+
+	return 0;
+}
+
+struct dcon_platform_data dcon_pdata_xo_1_5 = {
+	.init = dcon_init_xo_1_5,
+	.bus_stabilize_wiggle = dcon_wiggle_xo_1_5,
+	.set_dconload = dcon_set_dconload_xo_1_5,
+	.read_status = dcon_read_status_xo_1_5,
+};
diff --git a/drivers/staging/rdma/hfi1/Kconfig b/drivers/staging/rdma/hfi1/Kconfig
index 3e668d8..a925fb0 100644
--- a/drivers/staging/rdma/hfi1/Kconfig
+++ b/drivers/staging/rdma/hfi1/Kconfig
@@ -2,6 +2,7 @@
 	tristate "Intel OPA Gen1 support"
 	depends on X86_64 && INFINIBAND_RDMAVT
 	select MMU_NOTIFIER
+	select CRC32
 	default m
 	---help---
 	This is a low-level driver for Intel OPA Gen1 adapter.
diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO
index 05de0da..4c6f1d7 100644
--- a/drivers/staging/rdma/hfi1/TODO
+++ b/drivers/staging/rdma/hfi1/TODO
@@ -3,4 +3,4 @@
 - Remove unneeded file entries in sysfs
 - Remove software processing of IB protocol and place in library for use
   by qib, ipath (if still present), hfi1, and eventually soft-roce
-
+- Replace incorrect uAPI
diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c
index 8396dc5..c1c5bf8 100644
--- a/drivers/staging/rdma/hfi1/file_ops.c
+++ b/drivers/staging/rdma/hfi1/file_ops.c
@@ -49,6 +49,8 @@
 #include <linux/vmalloc.h>
 #include <linux/io.h>
 
+#include <rdma/ib.h>
+
 #include "hfi.h"
 #include "pio.h"
 #include "device.h"
@@ -190,6 +192,10 @@
 	int uctxt_required = 1;
 	int must_be_root = 0;
 
+	/* FIXME: This interface cannot continue out of staging */
+	if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
+		return -EACCES;
+
 	if (count < sizeof(cmd)) {
 		ret = -EINVAL;
 		goto bail;
@@ -791,15 +797,16 @@
 	spin_unlock_irqrestore(&dd->uctxt_lock, flags);
 
 	dd->rcd[uctxt->ctxt] = NULL;
+
+	hfi1_user_exp_rcv_free(fdata);
+	hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
+
 	uctxt->rcvwait_to = 0;
 	uctxt->piowait_to = 0;
 	uctxt->rcvnowait = 0;
 	uctxt->pionowait = 0;
 	uctxt->event_flags = 0;
 
-	hfi1_user_exp_rcv_free(fdata);
-	hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
-
 	hfi1_stats.sps_ctxts--;
 	if (++dd->freectxts == dd->num_user_contexts)
 		aspm_enable_all(dd);
@@ -1127,27 +1134,13 @@
 
 static int user_init(struct file *fp)
 {
-	int ret;
 	unsigned int rcvctrl_ops = 0;
 	struct hfi1_filedata *fd = fp->private_data;
 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
 
 	/* make sure that the context has already been setup */
-	if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) {
-		ret = -EFAULT;
-		goto done;
-	}
-
-	/*
-	 * Subctxts don't need to initialize anything since master
-	 * has done it.
-	 */
-	if (fd->subctxt) {
-		ret = wait_event_interruptible(uctxt->wait, !test_bit(
-					       HFI1_CTXT_MASTER_UNINIT,
-					       &uctxt->event_flags));
-		goto expected;
-	}
+	if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
+		return -EFAULT;
 
 	/* initialize poll variables... */
 	uctxt->urgent = 0;
@@ -1202,19 +1195,7 @@
 		wake_up(&uctxt->wait);
 	}
 
-expected:
-	/*
-	 * Expected receive has to be setup for all processes (including
-	 * shared contexts). However, it has to be done after the master
-	 * context has been fully configured as it depends on the
-	 * eager/expected split of the RcvArray entries.
-	 * Setting it up here ensures that the subcontexts will be waiting
-	 * (due to the above wait_event_interruptible() until the master
-	 * is setup.
-	 */
-	ret = hfi1_user_exp_rcv_init(fp);
-done:
-	return ret;
+	return 0;
 }
 
 static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
@@ -1261,7 +1242,7 @@
 	int ret = 0;
 
 	/*
-	 * Context should be set up only once (including allocation and
+	 * Context should be set up only once, including allocation and
 	 * programming of eager buffers. This is done if context sharing
 	 * is not requested or by the master process.
 	 */
@@ -1282,10 +1263,29 @@
 			if (ret)
 				goto done;
 		}
+	} else {
+		ret = wait_event_interruptible(uctxt->wait, !test_bit(
+					       HFI1_CTXT_MASTER_UNINIT,
+					       &uctxt->event_flags));
+		if (ret)
+			goto done;
 	}
+
 	ret = hfi1_user_sdma_alloc_queues(uctxt, fp);
 	if (ret)
 		goto done;
+	/*
+	 * Expected receive has to be setup for all processes (including
+	 * shared contexts). However, it has to be done after the master
+	 * context has been fully configured as it depends on the
+	 * eager/expected split of the RcvArray entries.
+	 * Setting it up here ensures that the subcontexts will be waiting
+	 * (due to the above wait_event_interruptible() until the master
+	 * is setup.
+	 */
+	ret = hfi1_user_exp_rcv_init(fp);
+	if (ret)
+		goto done;
 
 	set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags);
 done:
@@ -1565,29 +1565,8 @@
 {
 	struct hfi1_devdata *dd = filp->private_data;
 
-	switch (whence) {
-	case SEEK_SET:
-		break;
-	case SEEK_CUR:
-		offset += filp->f_pos;
-		break;
-	case SEEK_END:
-		offset = ((dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE) -
-			offset;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	if (offset < 0)
-		return -EINVAL;
-
-	if (offset >= (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE)
-		return -EINVAL;
-
-	filp->f_pos = offset;
-
-	return filp->f_pos;
+	return fixed_size_llseek(filp, offset, whence,
+		(dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE);
 }
 
 /* NOTE: assumes unsigned long is 8 bytes */
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/staging/rdma/hfi1/mmu_rb.c
index c7ad016..b3f0682 100644
--- a/drivers/staging/rdma/hfi1/mmu_rb.c
+++ b/drivers/staging/rdma/hfi1/mmu_rb.c
@@ -71,6 +71,7 @@
 					    struct mm_struct *,
 					    unsigned long, unsigned long);
 static void mmu_notifier_mem_invalidate(struct mmu_notifier *,
+					struct mm_struct *,
 					unsigned long, unsigned long);
 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
 					   unsigned long, unsigned long);
@@ -137,7 +138,7 @@
 			rbnode = rb_entry(node, struct mmu_rb_node, node);
 			rb_erase(node, root);
 			if (handler->ops->remove)
-				handler->ops->remove(root, rbnode, false);
+				handler->ops->remove(root, rbnode, NULL);
 		}
 	}
 
@@ -176,7 +177,7 @@
 	return ret;
 }
 
-/* Caller must host handler lock */
+/* Caller must hold handler lock */
 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
 					   unsigned long addr,
 					   unsigned long len)
@@ -200,15 +201,21 @@
 	return node;
 }
 
+/* Caller must *not* hold handler lock. */
 static void __mmu_rb_remove(struct mmu_rb_handler *handler,
-			    struct mmu_rb_node *node, bool arg)
+			    struct mmu_rb_node *node, struct mm_struct *mm)
 {
+	unsigned long flags;
+
 	/* Validity of handler and node pointers has been checked by caller. */
 	hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr,
 		  node->len);
+	spin_lock_irqsave(&handler->lock, flags);
 	__mmu_int_rb_remove(node, handler->root);
+	spin_unlock_irqrestore(&handler->lock, flags);
+
 	if (handler->ops->remove)
-		handler->ops->remove(handler->root, node, arg);
+		handler->ops->remove(handler->root, node, mm);
 }
 
 struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr,
@@ -231,14 +238,11 @@
 void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node)
 {
 	struct mmu_rb_handler *handler = find_mmu_handler(root);
-	unsigned long flags;
 
 	if (!handler || !node)
 		return;
 
-	spin_lock_irqsave(&handler->lock, flags);
-	__mmu_rb_remove(handler, node, false);
-	spin_unlock_irqrestore(&handler->lock, flags);
+	__mmu_rb_remove(handler, node, NULL);
 }
 
 static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root)
@@ -260,7 +264,7 @@
 static inline void mmu_notifier_page(struct mmu_notifier *mn,
 				     struct mm_struct *mm, unsigned long addr)
 {
-	mmu_notifier_mem_invalidate(mn, addr, addr + PAGE_SIZE);
+	mmu_notifier_mem_invalidate(mn, mm, addr, addr + PAGE_SIZE);
 }
 
 static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
@@ -268,25 +272,31 @@
 					    unsigned long start,
 					    unsigned long end)
 {
-	mmu_notifier_mem_invalidate(mn, start, end);
+	mmu_notifier_mem_invalidate(mn, mm, start, end);
 }
 
 static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
+					struct mm_struct *mm,
 					unsigned long start, unsigned long end)
 {
 	struct mmu_rb_handler *handler =
 		container_of(mn, struct mmu_rb_handler, mn);
 	struct rb_root *root = handler->root;
-	struct mmu_rb_node *node;
+	struct mmu_rb_node *node, *ptr = NULL;
 	unsigned long flags;
 
 	spin_lock_irqsave(&handler->lock, flags);
-	for (node = __mmu_int_rb_iter_first(root, start, end - 1); node;
-	     node = __mmu_int_rb_iter_next(node, start, end - 1)) {
+	for (node = __mmu_int_rb_iter_first(root, start, end - 1);
+	     node; node = ptr) {
+		/* Guard against node removal. */
+		ptr = __mmu_int_rb_iter_next(node, start, end - 1);
 		hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u",
 			  node->addr, node->len);
-		if (handler->ops->invalidate(root, node))
-			__mmu_rb_remove(handler, node, true);
+		if (handler->ops->invalidate(root, node)) {
+			spin_unlock_irqrestore(&handler->lock, flags);
+			__mmu_rb_remove(handler, node, mm);
+			spin_lock_irqsave(&handler->lock, flags);
+		}
 	}
 	spin_unlock_irqrestore(&handler->lock, flags);
 }
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.h b/drivers/staging/rdma/hfi1/mmu_rb.h
index f8523fd..19a306e 100644
--- a/drivers/staging/rdma/hfi1/mmu_rb.h
+++ b/drivers/staging/rdma/hfi1/mmu_rb.h
@@ -59,7 +59,8 @@
 struct mmu_rb_ops {
 	bool (*filter)(struct mmu_rb_node *, unsigned long, unsigned long);
 	int (*insert)(struct rb_root *, struct mmu_rb_node *);
-	void (*remove)(struct rb_root *, struct mmu_rb_node *, bool);
+	void (*remove)(struct rb_root *, struct mmu_rb_node *,
+		       struct mm_struct *);
 	int (*invalidate)(struct rb_root *, struct mmu_rb_node *);
 };
 
diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c
index 29a5ad2..dc9119e 100644
--- a/drivers/staging/rdma/hfi1/qp.c
+++ b/drivers/staging/rdma/hfi1/qp.c
@@ -519,10 +519,12 @@
 	 * do the flush work until that QP's
 	 * sdma work has finished.
 	 */
+	spin_lock(&qp->s_lock);
 	if (qp->s_flags & RVT_S_WAIT_DMA) {
 		qp->s_flags &= ~RVT_S_WAIT_DMA;
 		hfi1_schedule_send(qp);
 	}
+	spin_unlock(&qp->s_lock);
 }
 
 /**
diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c
index 0861e09..8bd56d5 100644
--- a/drivers/staging/rdma/hfi1/user_exp_rcv.c
+++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c
@@ -87,7 +87,8 @@
 static int set_rcvarray_entry(struct file *, unsigned long, u32,
 			      struct tid_group *, struct page **, unsigned);
 static int mmu_rb_insert(struct rb_root *, struct mmu_rb_node *);
-static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *, bool);
+static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *,
+			  struct mm_struct *);
 static int mmu_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
 static int program_rcvarray(struct file *, unsigned long, struct tid_group *,
 			    struct tid_pageset *, unsigned, u16, struct page **,
@@ -254,6 +255,8 @@
 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
 	struct tid_group *grp, *gptr;
 
+	if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
+		return 0;
 	/*
 	 * The notifier would have been removed when the process'es mm
 	 * was freed.
@@ -899,7 +902,7 @@
 	if (!node || node->rcventry != (uctxt->expected_base + rcventry))
 		return -EBADF;
 	if (HFI1_CAP_IS_USET(TID_UNMAP))
-		mmu_rb_remove(&fd->tid_rb_root, &node->mmu, false);
+		mmu_rb_remove(&fd->tid_rb_root, &node->mmu, NULL);
 	else
 		hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu);
 
@@ -965,7 +968,7 @@
 					continue;
 				if (HFI1_CAP_IS_USET(TID_UNMAP))
 					mmu_rb_remove(&fd->tid_rb_root,
-						      &node->mmu, false);
+						      &node->mmu, NULL);
 				else
 					hfi1_mmu_rb_remove(&fd->tid_rb_root,
 							   &node->mmu);
@@ -1032,7 +1035,7 @@
 }
 
 static void mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node,
-			  bool notifier)
+			  struct mm_struct *mm)
 {
 	struct hfi1_filedata *fdata =
 		container_of(root, struct hfi1_filedata, tid_rb_root);
diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c
index ab6b6a4..d53a659 100644
--- a/drivers/staging/rdma/hfi1/user_sdma.c
+++ b/drivers/staging/rdma/hfi1/user_sdma.c
@@ -278,7 +278,8 @@
 static void user_sdma_free_request(struct user_sdma_request *, bool);
 static int pin_vector_pages(struct user_sdma_request *,
 			    struct user_sdma_iovec *);
-static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned);
+static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned,
+			       unsigned);
 static int check_header_template(struct user_sdma_request *,
 				 struct hfi1_pkt_header *, u32, u32);
 static int set_txreq_header(struct user_sdma_request *,
@@ -299,7 +300,8 @@
 static void activate_packet_queue(struct iowait *, int);
 static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long);
 static int sdma_rb_insert(struct rb_root *, struct mmu_rb_node *);
-static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *, bool);
+static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *,
+			   struct mm_struct *);
 static int sdma_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
 
 static struct mmu_rb_ops sdma_rb_ops = {
@@ -1063,8 +1065,10 @@
 	rb_node = hfi1_mmu_rb_search(&pq->sdma_rb_root,
 				     (unsigned long)iovec->iov.iov_base,
 				     iovec->iov.iov_len);
-	if (rb_node)
+	if (rb_node && !IS_ERR(rb_node))
 		node = container_of(rb_node, struct sdma_mmu_node, rb);
+	else
+		rb_node = NULL;
 
 	if (!node) {
 		node = kzalloc(sizeof(*node), GFP_KERNEL);
@@ -1107,7 +1111,8 @@
 			goto bail;
 		}
 		if (pinned != npages) {
-			unpin_vector_pages(current->mm, pages, pinned);
+			unpin_vector_pages(current->mm, pages, node->npages,
+					   pinned);
 			ret = -EFAULT;
 			goto bail;
 		}
@@ -1147,9 +1152,9 @@
 }
 
 static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
-			       unsigned npages)
+			       unsigned start, unsigned npages)
 {
-	hfi1_release_user_pages(mm, pages, npages, 0);
+	hfi1_release_user_pages(mm, pages + start, npages, 0);
 	kfree(pages);
 }
 
@@ -1502,7 +1507,7 @@
 				&req->pq->sdma_rb_root,
 				(unsigned long)req->iovs[i].iov.iov_base,
 				req->iovs[i].iov.iov_len);
-			if (!mnode)
+			if (!mnode || IS_ERR(mnode))
 				continue;
 
 			node = container_of(mnode, struct sdma_mmu_node, rb);
@@ -1547,7 +1552,7 @@
 }
 
 static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
-			   bool notifier)
+			   struct mm_struct *mm)
 {
 	struct sdma_mmu_node *node =
 		container_of(mnode, struct sdma_mmu_node, rb);
@@ -1557,14 +1562,20 @@
 	node->pq->n_locked -= node->npages;
 	spin_unlock(&node->pq->evict_lock);
 
-	unpin_vector_pages(notifier ? NULL : current->mm, node->pages,
+	/*
+	 * If mm is set, we are being called by the MMU notifier and we
+	 * should not pass a mm_struct to unpin_vector_page(). This is to
+	 * prevent a deadlock when hfi1_release_user_pages() attempts to
+	 * take the mmap_sem, which the MMU notifier has already taken.
+	 */
+	unpin_vector_pages(mm ? NULL : current->mm, node->pages, 0,
 			   node->npages);
 	/*
 	 * If called by the MMU notifier, we have to adjust the pinned
 	 * page count ourselves.
 	 */
-	if (notifier)
-		current->mm->pinned_vm -= node->npages;
+	if (mm)
+		mm->pinned_vm -= node->npages;
 	kfree(node);
 }
 
diff --git a/drivers/staging/rtl8188eu/os_dep/mon.c b/drivers/staging/rtl8188eu/os_dep/mon.c
index 63bb875..d976e5e 100644
--- a/drivers/staging/rtl8188eu/os_dep/mon.c
+++ b/drivers/staging/rtl8188eu/os_dep/mon.c
@@ -155,7 +155,7 @@
 	dev->netdev_ops = &mon_netdev_ops;
 	dev->destructor = free_netdev;
 	ether_setup(dev);
-	dev->tx_queue_len = 0;
+	dev->priv_flags |= IFF_NO_QUEUE;
 	dev->type = ARPHRD_IEEE80211;
 	/*
 	 * Use a locally administered address (IEEE 802)
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 9b7cc7d..13a5ddc 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -1792,7 +1792,7 @@
 	__skb_queue_tail(&ring->queue, skb);
 	pdesc->OWN = 1;
 	spin_unlock_irqrestore(&priv->irq_th_lock, flags);
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 
 	rtl92e_writew(dev, TPPoll, 0x01 << tcb_desc->queue_index);
 	return 0;
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index cfab715..62154e3 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -1991,7 +1991,7 @@
 		return 2;
 
 	if (!time_after(jiffies,
-			ieee->dev->trans_start + msecs_to_jiffies(timeout)))
+			dev_trans_start(ieee->dev) + msecs_to_jiffies(timeout)))
 		return 0;
 	if (!time_after(jiffies,
 			ieee->last_rx_ps_time + msecs_to_jiffies(timeout)))
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index ae1274c..d705595 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -249,7 +249,7 @@
 				ieee->seq_ctrl[0]++;
 
 			/* avoid watchdog triggers */
-			ieee->dev->trans_start = jiffies;
+			netif_trans_update(ieee->dev);
 			ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate);
 			//dev_kfree_skb_any(skb);//edit by thomas
 		}
@@ -302,7 +302,7 @@
 			ieee->seq_ctrl[0]++;
 
 		/* avoid watchdog triggers */
-		ieee->dev->trans_start = jiffies;
+		netif_trans_update(ieee->dev);
 		ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate);
 
 	}else{
@@ -1737,7 +1737,7 @@
 		return 2;
 
 	if(!time_after(jiffies,
-		       ieee->dev->trans_start + msecs_to_jiffies(timeout)))
+		       dev_trans_start(ieee->dev) + msecs_to_jiffies(timeout)))
 		return 0;
 
 	if(!time_after(jiffies,
@@ -2205,7 +2205,7 @@
 				ieee->dev, ieee->rate);
 				//(i+1)<ieee->tx_pending.txb->nr_frags);
 			ieee->stats.tx_packets++;
-			ieee->dev->trans_start = jiffies;
+			netif_trans_update(ieee->dev);
 		}
 	}
 
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 849a95e..4af0140 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -1108,7 +1108,7 @@
 
 	if (tcb_desc->queue_index != TXCMD_QUEUE) {
 		if (tx_urb->status == 0) {
-			dev->trans_start = jiffies;
+			netif_trans_update(dev);
 			priv->stats.txoktotal++;
 			priv->ieee80211->LinkDetectInfo.NumTxOkInPeriod++;
 			priv->stats.txbytesunicast +=
@@ -1715,7 +1715,7 @@
 				return -1;
 			}
 		}
-		dev->trans_start = jiffies;
+		netif_trans_update(dev);
 		atomic_inc(&priv->tx_pending[tcb_desc->queue_index]);
 		return 0;
 	}
diff --git a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
index f4fff38..7dd1540 100644
--- a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
@@ -2113,10 +2113,10 @@
 
 	if (channel <= RTW_CH_MAX_2G_CHANNEL)
 		freq = ieee80211_channel_to_frequency(channel,
-						      IEEE80211_BAND_2GHZ);
+						      NL80211_BAND_2GHZ);
 	else
 		freq = ieee80211_channel_to_frequency(channel,
-						      IEEE80211_BAND_5GHZ);
+						      NL80211_BAND_5GHZ);
 
 	if (cfg80211_rx_mgmt(padapter->rtw_wdev, freq, 0, pframe,
 			     skb->len, 0))
diff --git a/drivers/staging/rtl8723au/include/ieee80211.h b/drivers/staging/rtl8723au/include/ieee80211.h
index 3aa40a3..634102e 100644
--- a/drivers/staging/rtl8723au/include/ieee80211.h
+++ b/drivers/staging/rtl8723au/include/ieee80211.h
@@ -266,7 +266,7 @@
 
 /* Represent channel details, subset of ieee80211_channel */
 struct rtw_ieee80211_channel {
-	/* enum ieee80211_band band; */
+	/* enum nl80211_band band; */
 	/* u16 center_freq; */
 	u16 hw_value;
 	u32 flags;
diff --git a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
index 12d1844..0da559d 100644
--- a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
@@ -39,7 +39,7 @@
 }
 
 #define CHAN2G(_channel, _freq, _flags) {			\
-	.band			= IEEE80211_BAND_2GHZ,		\
+	.band			= NL80211_BAND_2GHZ,		\
 	.center_freq		= (_freq),			\
 	.hw_value		= (_channel),			\
 	.flags			= (_flags),			\
@@ -48,7 +48,7 @@
 }
 
 #define CHAN5G(_channel, _flags) {				\
-	.band			= IEEE80211_BAND_5GHZ,		\
+	.band			= NL80211_BAND_5GHZ,		\
 	.center_freq		= 5000 + (5 * (_channel)),	\
 	.hw_value		= (_channel),			\
 	.flags			= (_flags),			\
@@ -143,15 +143,15 @@
 }
 
 static struct ieee80211_supported_band *
-rtw_spt_band_alloc(enum ieee80211_band band)
+rtw_spt_band_alloc(enum nl80211_band band)
 {
 	struct ieee80211_supported_band *spt_band = NULL;
 	int n_channels, n_bitrates;
 
-	if (band == IEEE80211_BAND_2GHZ) {
+	if (band == NL80211_BAND_2GHZ) {
 		n_channels = RTW_2G_CHANNELS_NUM;
 		n_bitrates = RTW_G_RATES_NUM;
-	} else if (band == IEEE80211_BAND_5GHZ) {
+	} else if (band == NL80211_BAND_5GHZ) {
 		n_channels = RTW_5G_CHANNELS_NUM;
 		n_bitrates = RTW_A_RATES_NUM;
 	} else {
@@ -176,10 +176,10 @@
 	spt_band->n_channels = n_channels;
 	spt_band->n_bitrates = n_bitrates;
 
-	if (band == IEEE80211_BAND_2GHZ) {
+	if (band == NL80211_BAND_2GHZ) {
 		rtw_2g_channels_init(spt_band->channels);
 		rtw_2g_rates_init(spt_band->bitrates);
-	} else if (band == IEEE80211_BAND_5GHZ) {
+	} else if (band == NL80211_BAND_5GHZ) {
 		rtw_5g_channels_init(spt_band->channels);
 		rtw_5g_rates_init(spt_band->bitrates);
 	}
@@ -257,10 +257,10 @@
 	channel = pnetwork->network.DSConfig;
 	if (channel <= RTW_CH_MAX_2G_CHANNEL)
 		freq = ieee80211_channel_to_frequency(channel,
-						      IEEE80211_BAND_2GHZ);
+						      NL80211_BAND_2GHZ);
 	else
 		freq = ieee80211_channel_to_frequency(channel,
-						      IEEE80211_BAND_5GHZ);
+						      NL80211_BAND_5GHZ);
 
 	notify_channel = ieee80211_get_channel(wiphy, freq);
 
@@ -322,11 +322,11 @@
 		if (channel <= RTW_CH_MAX_2G_CHANNEL)
 			freq =
 			    ieee80211_channel_to_frequency(channel,
-							   IEEE80211_BAND_2GHZ);
+							   NL80211_BAND_2GHZ);
 		else
 			freq =
 			    ieee80211_channel_to_frequency(channel,
-							   IEEE80211_BAND_5GHZ);
+							   NL80211_BAND_5GHZ);
 
 		notify_channel = ieee80211_get_channel(wiphy, freq);
 
@@ -2360,10 +2360,10 @@
 	channel = pmlmeext->cur_channel;
 	if (channel <= RTW_CH_MAX_2G_CHANNEL)
 		freq = ieee80211_channel_to_frequency(channel,
-						      IEEE80211_BAND_2GHZ);
+						      NL80211_BAND_2GHZ);
 	else
 		freq = ieee80211_channel_to_frequency(channel,
-						      IEEE80211_BAND_5GHZ);
+						      NL80211_BAND_5GHZ);
 
 	cfg80211_rx_mgmt(padapter->rtw_wdev, freq, 0, pmgmt_frame, frame_len,
 			 0);
@@ -2392,10 +2392,10 @@
 	channel = pmlmeext->cur_channel;
 	if (channel <= RTW_CH_MAX_2G_CHANNEL)
 		freq = ieee80211_channel_to_frequency(channel,
-						      IEEE80211_BAND_2GHZ);
+						      NL80211_BAND_2GHZ);
 	else
 		freq = ieee80211_channel_to_frequency(channel,
-						      IEEE80211_BAND_5GHZ);
+						      NL80211_BAND_5GHZ);
 
 	mgmt.frame_control =
 		cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH);
@@ -3109,7 +3109,7 @@
 };
 
 static void rtw_cfg80211_init_ht_capab(struct ieee80211_sta_ht_cap *ht_cap,
-				       enum ieee80211_band band, u8 rf_type)
+				       enum nl80211_band band, u8 rf_type)
 {
 
 #define MAX_BIT_RATE_40MHZ_MCS15	300	/* Mbps */
@@ -3133,7 +3133,7 @@
 	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
 
 	/*
-	 *hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+	 *hw->wiphy->bands[NL80211_BAND_2GHZ]
 	 *base on ant_num
 	 *rx_mask: RX mask
 	 *if rx_ant = 1 rx_mask[0]= 0xff;==>MCS0-MCS7
@@ -3173,19 +3173,19 @@
 
 	/* if (padapter->registrypriv.wireless_mode & WIRELESS_11G) */
 	{
-		bands = wiphy->bands[IEEE80211_BAND_2GHZ];
+		bands = wiphy->bands[NL80211_BAND_2GHZ];
 		if (bands)
 			rtw_cfg80211_init_ht_capab(&bands->ht_cap,
-						   IEEE80211_BAND_2GHZ,
+						   NL80211_BAND_2GHZ,
 						   rf_type);
 	}
 
 	/* if (padapter->registrypriv.wireless_mode & WIRELESS_11A) */
 	{
-		bands = wiphy->bands[IEEE80211_BAND_5GHZ];
+		bands = wiphy->bands[NL80211_BAND_5GHZ];
 		if (bands)
 			rtw_cfg80211_init_ht_capab(&bands->ht_cap,
-						   IEEE80211_BAND_5GHZ,
+						   NL80211_BAND_5GHZ,
 						   rf_type);
 	}
 }
@@ -3224,11 +3224,11 @@
 	wiphy->n_cipher_suites = ARRAY_SIZE(rtw_cipher_suites);
 
 	/* if (padapter->registrypriv.wireless_mode & WIRELESS_11G) */
-	wiphy->bands[IEEE80211_BAND_2GHZ] =
-	    rtw_spt_band_alloc(IEEE80211_BAND_2GHZ);
+	wiphy->bands[NL80211_BAND_2GHZ] =
+	    rtw_spt_band_alloc(NL80211_BAND_2GHZ);
 	/* if (padapter->registrypriv.wireless_mode & WIRELESS_11A) */
-	wiphy->bands[IEEE80211_BAND_5GHZ] =
-	    rtw_spt_band_alloc(IEEE80211_BAND_5GHZ);
+	wiphy->bands[NL80211_BAND_5GHZ] =
+	    rtw_spt_band_alloc(NL80211_BAND_5GHZ);
 
 	wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
 	wiphy->flags |= WIPHY_FLAG_OFFCHAN_TX | WIPHY_FLAG_HAVE_AP_SME;
@@ -3313,8 +3313,8 @@
 	if (!wdev)
 		return;
 
-	kfree(wdev->wiphy->bands[IEEE80211_BAND_2GHZ]);
-	kfree(wdev->wiphy->bands[IEEE80211_BAND_5GHZ]);
+	kfree(wdev->wiphy->bands[NL80211_BAND_2GHZ]);
+	kfree(wdev->wiphy->bands[NL80211_BAND_5GHZ]);
 
 	wiphy_free(wdev->wiphy);
 
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index 5fbda7b..9cf4f84 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -2425,7 +2425,7 @@
 {
 	unsigned int eax, ebx, ecx, edx;
 
-	if (cpu_has_hypervisor) {
+	if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
 		/* check the ID */
 		cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
 		return  (ebx == UNISYS_SPAR_ID_EBX) &&
diff --git a/drivers/staging/vme/devices/vme_pio2_gpio.c b/drivers/staging/vme/devices/vme_pio2_gpio.c
index df992c3..6d361201 100644
--- a/drivers/staging/vme/devices/vme_pio2_gpio.c
+++ b/drivers/staging/vme/devices/vme_pio2_gpio.c
@@ -17,7 +17,7 @@
 #include <linux/device.h>
 #include <linux/platform_device.h>
 #include <linux/ctype.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
 #include <linux/slab.h>
 #include <linux/vme.h>
 
@@ -25,16 +25,11 @@
 
 static const char driver_name[] = "pio2_gpio";
 
-static struct pio2_card *gpio_to_pio2_card(struct gpio_chip *chip)
-{
-	return container_of(chip, struct pio2_card, gc);
-}
-
 static int pio2_gpio_get(struct gpio_chip *chip, unsigned int offset)
 {
 	u8 reg;
 	int retval;
-	struct pio2_card *card = gpio_to_pio2_card(chip);
+	struct pio2_card *card = gpiochip_get_data(chip);
 
 	if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == OUTPUT) |
 	    (card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
@@ -71,7 +66,7 @@
 {
 	u8 reg;
 	int retval;
-	struct pio2_card *card = gpio_to_pio2_card(chip);
+	struct pio2_card *card = gpiochip_get_data(chip);
 
 	if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == INPUT) |
 	    (card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
@@ -100,7 +95,7 @@
 static int pio2_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
 {
 	int data;
-	struct pio2_card *card = gpio_to_pio2_card(chip);
+	struct pio2_card *card = gpiochip_get_data(chip);
 
 	if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == OUTPUT) |
 	    (card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
@@ -119,7 +114,7 @@
 static int pio2_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int value)
 {
 	int data;
-	struct pio2_card *card = gpio_to_pio2_card(chip);
+	struct pio2_card *card = gpiochip_get_data(chip);
 
 	if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == INPUT) |
 	    (card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
@@ -205,7 +200,7 @@
 	card->gc.set = pio2_gpio_set;
 
 	/* This function adds a memory mapped GPIO chip */
-	retval = gpiochip_add(&card->gc);
+	retval = gpiochip_add_data(&card->gc, card);
 	if (retval) {
 		dev_err(&card->vdev->dev, "Unable to register GPIO\n");
 		kfree(card->gc.label);
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index 9ac1ef9..b7d43a5 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -144,7 +144,7 @@
 			ch[i].flags = IEEE80211_CHAN_NO_HT40;
 		}
 
-		priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+		priv->hw->wiphy->bands[NL80211_BAND_5GHZ] =
 						&vnt_supported_5ghz_band;
 	/* fallthrough */
 	case RF_RFMD2959:
@@ -159,7 +159,7 @@
 			ch[i].flags = IEEE80211_CHAN_NO_HT40;
 		}
 
-		priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+		priv->hw->wiphy->bands[NL80211_BAND_2GHZ] =
 						&vnt_supported_2ghz_band;
 		break;
 	}
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index c3eea07..4941640 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -812,7 +812,7 @@
 		else if (fb_option & FIFOCTL_AUTO_FB_1)
 			tx_rate = fallback_rate1[tx_rate][retry];
 
-		if (info->band == IEEE80211_BAND_5GHZ)
+		if (info->band == NL80211_BAND_5GHZ)
 			idx = tx_rate - RATE_6M;
 		else
 			idx = tx_rate;
@@ -1290,7 +1290,7 @@
 	    (conf->flags & IEEE80211_CONF_OFFCHANNEL)) {
 		set_channel(priv, conf->chandef.chan);
 
-		if (conf->chandef.chan->band == IEEE80211_BAND_5GHZ)
+		if (conf->chandef.chan->band == NL80211_BAND_5GHZ)
 			bb_type = BB_TYPE_11A;
 		else
 			bb_type = BB_TYPE_11G;
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 1a2dda0..e4c3165 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -1307,7 +1307,7 @@
 	}
 
 	if (current_rate > RATE_11M) {
-		if (info->band == IEEE80211_BAND_5GHZ) {
+		if (info->band == NL80211_BAND_5GHZ) {
 			pkt_type = PK_TYPE_11A;
 		} else {
 			if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
diff --git a/drivers/staging/vt6656/channel.c b/drivers/staging/vt6656/channel.c
index a0fe288..a4299f4 100644
--- a/drivers/staging/vt6656/channel.c
+++ b/drivers/staging/vt6656/channel.c
@@ -153,7 +153,7 @@
 			ch[i].flags = IEEE80211_CHAN_NO_HT40;
 		}
 
-		priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+		priv->hw->wiphy->bands[NL80211_BAND_5GHZ] =
 						&vnt_supported_5ghz_band;
 	/* fallthrough */
 	case RF_AL2230:
@@ -167,7 +167,7 @@
 			ch[i].flags = IEEE80211_CHAN_NO_HT40;
 		}
 
-		priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+		priv->hw->wiphy->bands[NL80211_BAND_2GHZ] =
 						&vnt_supported_2ghz_band;
 		break;
 	}
diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c
index 8d05acb..73538fb 100644
--- a/drivers/staging/vt6656/int.c
+++ b/drivers/staging/vt6656/int.c
@@ -97,7 +97,7 @@
 		else if (context->fb_option == AUTO_FB_1)
 			tx_rate = fallback_rate1[tx_rate][retry];
 
-		if (info->band == IEEE80211_BAND_5GHZ)
+		if (info->band == NL80211_BAND_5GHZ)
 			idx = tx_rate - RATE_6M;
 		else
 			idx = tx_rate;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index f9afab7..fc5fe4e 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -662,7 +662,7 @@
 			(conf->flags & IEEE80211_CONF_OFFCHANNEL)) {
 		vnt_set_channel(priv, conf->chandef.chan->hw_value);
 
-		if (conf->chandef.chan->band == IEEE80211_BAND_5GHZ)
+		if (conf->chandef.chan->band == NL80211_BAND_5GHZ)
 			bb_type = BB_TYPE_11A;
 		else
 			bb_type = BB_TYPE_11G;
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index b74e3200..aa59e7f 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -813,7 +813,7 @@
 	}
 
 	if (current_rate > RATE_11M) {
-		if (info->band == IEEE80211_BAND_5GHZ) {
+		if (info->band == NL80211_BAND_5GHZ) {
 			pkt_type = PK_TYPE_11A;
 		} else {
 			if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 448a5c8..544917d 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -102,7 +102,7 @@
 u8 wilc_initialized = 1;
 
 #define CHAN2G(_channel, _freq, _flags) {	 \
-		.band             = IEEE80211_BAND_2GHZ, \
+		.band             = NL80211_BAND_2GHZ, \
 		.center_freq      = (_freq),		 \
 		.hw_value         = (_channel),		 \
 		.flags            = (_flags),		 \
@@ -241,7 +241,7 @@
 			struct ieee80211_channel *channel;
 
 			if (network_info) {
-				freq = ieee80211_channel_to_frequency((s32)network_info->ch, IEEE80211_BAND_2GHZ);
+				freq = ieee80211_channel_to_frequency((s32)network_info->ch, NL80211_BAND_2GHZ);
 				channel = ieee80211_get_channel(wiphy, freq);
 
 				rssi = get_rssi_avg(network_info);
@@ -409,7 +409,7 @@
 				return;
 
 			if (network_info) {
-				s32Freq = ieee80211_channel_to_frequency((s32)network_info->ch, IEEE80211_BAND_2GHZ);
+				s32Freq = ieee80211_channel_to_frequency((s32)network_info->ch, NL80211_BAND_2GHZ);
 				channel = ieee80211_get_channel(wiphy, s32Freq);
 
 				if (!channel)
@@ -1451,7 +1451,7 @@
 			return;
 		}
 	} else {
-		s32Freq = ieee80211_channel_to_frequency(curr_channel, IEEE80211_BAND_2GHZ);
+		s32Freq = ieee80211_channel_to_frequency(curr_channel, NL80211_BAND_2GHZ);
 
 		if (ieee80211_is_action(buff[FRAME_TYPE_ID])) {
 			if (priv->bCfgScanning && time_after_eq(jiffies, (unsigned long)pstrWFIDrv->p2p_timeout)) {
@@ -2246,7 +2246,7 @@
 	WILC_WFI_band_2ghz.ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K;
 	WILC_WFI_band_2ghz.ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
 
-	wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &WILC_WFI_band_2ghz;
+	wdev->wiphy->bands[NL80211_BAND_2GHZ] = &WILC_WFI_band_2ghz;
 
 	return wdev;
 
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 8bad018..2438cf7 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -415,7 +415,7 @@
 		ie_len = ie_buf[1] + 2;
 		memcpy(&ie_buf[2], &(msg2.ssid.data.data), msg2.ssid.data.len);
 		freq = ieee80211_channel_to_frequency(msg2.dschannel.data,
-						      IEEE80211_BAND_2GHZ);
+						      NL80211_BAND_2GHZ);
 		bss = cfg80211_inform_bss(wiphy,
 			ieee80211_get_channel(wiphy, freq),
 			CFG80211_BSS_FTYPE_UNKNOWN,
@@ -758,9 +758,9 @@
 	priv->band.n_channels = ARRAY_SIZE(prism2_channels);
 	priv->band.bitrates = priv->rates;
 	priv->band.n_bitrates = ARRAY_SIZE(prism2_rates);
-	priv->band.band = IEEE80211_BAND_2GHZ;
+	priv->band.band = NL80211_BAND_2GHZ;
 	priv->band.ht_cap.ht_supported = false;
-	wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+	wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
 
 	set_wiphy_dev(wiphy, dev);
 	wiphy->privid = prism2_wiphy_privid;
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 88255ce..1f9dfba 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -393,7 +393,7 @@
 		goto failed;
 	}
 
-	netdev->trans_start = jiffies;
+	netif_trans_update(netdev);
 
 	netdev->stats.tx_packets++;
 	/* count only the packet payload */
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 667406f..e116f0e 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -293,7 +293,7 @@
 		pr_debug("[server] MD5 Digests do not match!\n\n");
 		goto out;
 	} else
-		pr_debug("[server] MD5 Digests match, CHAP connetication"
+		pr_debug("[server] MD5 Digests match, CHAP connection"
 				" successful.\n\n");
 	/*
 	 * One way authentication has succeeded, return now if mutual
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index a24443b..97e5b69 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -779,14 +779,6 @@
 	return 0;
 }
 
-static void lio_target_cleanup_nodeacl( struct se_node_acl *se_nacl)
-{
-	struct iscsi_node_acl *acl = container_of(se_nacl,
-			struct iscsi_node_acl, se_node_acl);
-
-	configfs_remove_default_groups(&acl->se_node_acl.acl_fabric_stat_group);
-}
-
 /* End items for lio_target_acl_cit */
 
 /* Start items for lio_target_tpg_attrib_cit */
@@ -1247,6 +1239,16 @@
 	if (IS_ERR(tiqn))
 		return ERR_CAST(tiqn);
 
+	pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
+	pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:"
+			" %s\n", name);
+	return &tiqn->tiqn_wwn;
+}
+
+static void lio_target_add_wwn_groups(struct se_wwn *wwn)
+{
+	struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
+
 	config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_instance_group,
 			"iscsi_instance", &iscsi_stat_instance_cit);
 	configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_instance_group,
@@ -1271,12 +1273,6 @@
 			"iscsi_logout_stats", &iscsi_stat_logout_cit);
 	configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group,
 			&tiqn->tiqn_wwn.fabric_stat_group);
-
-
-	pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
-	pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:"
-			" %s\n", name);
-	return &tiqn->tiqn_wwn;
 }
 
 static void lio_target_call_coredeltiqn(
@@ -1284,8 +1280,6 @@
 {
 	struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
 
-	configfs_remove_default_groups(&tiqn->tiqn_wwn.fabric_stat_group);
-
 	pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s\n",
 			tiqn->tiqn);
 	iscsit_del_tiqn(tiqn);
@@ -1660,12 +1654,12 @@
 	.aborted_task			= lio_aborted_task,
 	.fabric_make_wwn		= lio_target_call_coreaddtiqn,
 	.fabric_drop_wwn		= lio_target_call_coredeltiqn,
+	.add_wwn_groups			= lio_target_add_wwn_groups,
 	.fabric_make_tpg		= lio_target_tiqn_addtpg,
 	.fabric_drop_tpg		= lio_target_tiqn_deltpg,
 	.fabric_make_np			= lio_target_call_addnptotpg,
 	.fabric_drop_np			= lio_target_call_delnpfromtpg,
 	.fabric_init_nodeacl		= lio_target_init_nodeacl,
-	.fabric_cleanup_nodeacl		= lio_target_cleanup_nodeacl,
 
 	.tfc_discovery_attrs		= lio_target_discovery_auth_attrs,
 	.tfc_wwn_attrs			= lio_target_wwn_attrs,
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 428b0d9..5772038 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -1283,9 +1283,8 @@
 	iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC,
 		      count->iov, count->iov_count, data);
 
-	while (total_rx < data) {
-		rx_loop = sock_recvmsg(conn->sock, &msg,
-				      (data - total_rx), MSG_WAITALL);
+	while (msg_data_left(&msg)) {
+		rx_loop = sock_recvmsg(conn->sock, &msg, MSG_WAITALL);
 		if (rx_loop <= 0) {
 			pr_debug("rx_loop: %d total_rx: %d\n",
 				rx_loop, total_rx);
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 1bd5c72..31a096a 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -338,10 +338,8 @@
 {
 	struct se_node_acl *se_nacl = container_of(to_config_group(item),
 			struct se_node_acl, acl_group);
-	struct target_fabric_configfs *tf = se_nacl->se_tpg->se_tpg_wwn->wwn_tf;
 
-	if (tf->tf_ops->fabric_cleanup_nodeacl)
-		tf->tf_ops->fabric_cleanup_nodeacl(se_nacl);
+	configfs_remove_default_groups(&se_nacl->acl_fabric_stat_group);
 	core_tpg_del_initiator_node_acl(se_nacl);
 }
 
@@ -383,14 +381,6 @@
 	if (IS_ERR(se_nacl))
 		return ERR_CAST(se_nacl);
 
-	if (tf->tf_ops->fabric_init_nodeacl) {
-		int ret = tf->tf_ops->fabric_init_nodeacl(se_nacl, name);
-		if (ret) {
-			core_tpg_del_initiator_node_acl(se_nacl);
-			return ERR_PTR(ret);
-		}
-	}
-
 	config_group_init_type_name(&se_nacl->acl_group, name,
 			&tf->tf_tpg_nacl_base_cit);
 
@@ -414,6 +404,15 @@
 	configfs_add_default_group(&se_nacl->acl_fabric_stat_group,
 			&se_nacl->acl_group);
 
+	if (tf->tf_ops->fabric_init_nodeacl) {
+		int ret = tf->tf_ops->fabric_init_nodeacl(se_nacl, name);
+		if (ret) {
+			configfs_remove_default_groups(&se_nacl->acl_fabric_stat_group);
+			core_tpg_del_initiator_node_acl(se_nacl);
+			return ERR_PTR(ret);
+		}
+	}
+
 	return &se_nacl->acl_group;
 }
 
@@ -892,6 +891,7 @@
 				struct se_wwn, wwn_group);
 	struct target_fabric_configfs *tf = wwn->wwn_tf;
 
+	configfs_remove_default_groups(&wwn->fabric_stat_group);
 	tf->tf_ops->fabric_drop_wwn(wwn);
 }
 
@@ -945,6 +945,8 @@
 			&tf->tf_wwn_fabric_stats_cit);
 	configfs_add_default_group(&wwn->fabric_stat_group, &wwn->wwn_group);
 
+	if (tf->tf_ops->add_wwn_groups)
+		tf->tf_ops->add_wwn_groups(wwn);
 	return &wwn->wwn_group;
 }
 
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 026a758..7c4efb4 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -687,10 +687,10 @@
 		 * Force writethrough using WRITE_FUA if a volatile write cache
 		 * is not enabled, or if initiator set the Force Unit Access bit.
 		 */
-		if (q->flush_flags & REQ_FUA) {
+		if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
 			if (cmd->se_cmd_flags & SCF_FUA)
 				rw = WRITE_FUA;
-			else if (!(q->flush_flags & REQ_FLUSH))
+			else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
 				rw = WRITE_FUA;
 			else
 				rw = WRITE;
@@ -836,7 +836,7 @@
 	struct block_device *bd = ib_dev->ibd_bd;
 	struct request_queue *q = bdev_get_queue(bd);
 
-	return q->flush_flags & REQ_FLUSH;
+	return test_bit(QUEUE_FLAG_WC, &q->queue_flags);
 }
 
 static const struct target_backend_ops iblock_ops = {
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index c37eedc..d89d60c 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -338,31 +338,9 @@
 	  hot & critical. The critical trip point default value is set by
 	  underlying BIOS/Firmware.
 
-config INT340X_THERMAL
-	tristate "ACPI INT340X thermal drivers"
-	depends on X86 && ACPI
-	select THERMAL_GOV_USER_SPACE
-	select ACPI_THERMAL_REL
-	select ACPI_FAN
-	select INTEL_SOC_DTS_IOSF_CORE
-	select THERMAL_WRITABLE_TRIPS
-	help
-	  Newer laptops and tablets that use ACPI may have thermal sensors and
-	  other devices with thermal control capabilities outside the core
-	  CPU/SOC, for thermal safety reasons.
-	  They are exposed for the OS to use via the INT3400 ACPI device object
-	  as the master, and INT3401~INT340B ACPI device objects as the slaves.
-	  Enable this to expose the temperature information and cooling ability
-	  from these objects to userspace via the normal thermal framework.
-	  This means that a wide range of applications and GUI widgets can show
-	  the information to the user or use this information for making
-	  decisions. For example, the Intel Thermal Daemon can use this
-	  information to allow the user to select his laptop to run without
-	  turning on the fans.
-
-config ACPI_THERMAL_REL
-	tristate
-	depends on ACPI
+menu "ACPI INT340X thermal drivers"
+source drivers/thermal/int340x_thermal/Kconfig
+endmenu
 
 config INTEL_PCH_THERMAL
 	tristate "Intel PCH Thermal Reporting Driver"
@@ -376,6 +354,8 @@
 	tristate "Temperature sensor driver for mediatek SoCs"
 	depends on ARCH_MEDIATEK || COMPILE_TEST
 	depends on HAS_IOMEM
+	depends on NVMEM || NVMEM=n
+	depends on RESET_CONTROLLER
 	default y
 	help
 	  Enable this option if you want to have support for thermal management
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index 36d0729..5e820b5 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -68,12 +68,12 @@
 	 * Every step equals (1 * 200) / 255 celsius, and finally
 	 * need convert to millicelsius.
 	 */
-	return (HISI_TEMP_BASE + (step * 200 / 255)) * 1000;
+	return (HISI_TEMP_BASE * 1000 + (step * 200000 / 255));
 }
 
 static inline long _temp_to_step(long temp)
 {
-	return ((temp / 1000 - HISI_TEMP_BASE) * 255 / 200);
+	return ((temp - HISI_TEMP_BASE * 1000) * 255) / 200000;
 }
 
 static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data,
diff --git a/drivers/thermal/int340x_thermal/Kconfig b/drivers/thermal/int340x_thermal/Kconfig
new file mode 100644
index 0000000..0582bd1
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/Kconfig
@@ -0,0 +1,42 @@
+#
+# ACPI INT340x thermal drivers configuration
+#
+
+config INT340X_THERMAL
+	tristate "ACPI INT340X thermal drivers"
+	depends on X86 && ACPI
+	select THERMAL_GOV_USER_SPACE
+	select ACPI_THERMAL_REL
+	select ACPI_FAN
+	select INTEL_SOC_DTS_IOSF_CORE
+	help
+	  Newer laptops and tablets that use ACPI may have thermal sensors and
+	  other devices with thermal control capabilities outside the core
+	  CPU/SOC, for thermal safety reasons.
+	  They are exposed for the OS to use via the INT3400 ACPI device object
+	  as the master, and INT3401~INT340B ACPI device objects as the slaves.
+	  Enable this to expose the temperature information and cooling ability
+	  from these objects to userspace via the normal thermal framework.
+	  This means that a wide range of applications and GUI widgets can show
+	  the information to the user or use this information for making
+	  decisions. For example, the Intel Thermal Daemon can use this
+	  information to allow the user to select his laptop to run without
+	  turning on the fans.
+
+config ACPI_THERMAL_REL
+	tristate
+	depends on ACPI
+
+if INT340X_THERMAL
+
+config INT3406_THERMAL
+	tristate "ACPI INT3406 display thermal driver"
+	depends on ACPI_VIDEO
+	help
+	  The display thermal device represents the LED/LCD display panel
+	  that may or may not include touch support. The main function of
+	  the display thermal device is to allow control of the display
+	  brightness in order to address a thermal condition or to reduce
+	  power consumed by display device.
+
+endif
diff --git a/drivers/thermal/int340x_thermal/Makefile b/drivers/thermal/int340x_thermal/Makefile
index ba77a34..df0df05 100644
--- a/drivers/thermal/int340x_thermal/Makefile
+++ b/drivers/thermal/int340x_thermal/Makefile
@@ -3,4 +3,5 @@
 obj-$(CONFIG_INT340X_THERMAL)	+= int3402_thermal.o
 obj-$(CONFIG_INT340X_THERMAL)	+= int3403_thermal.o
 obj-$(CONFIG_INT340X_THERMAL)	+= processor_thermal_device.o
+obj-$(CONFIG_INT3406_THERMAL)	+= int3406_thermal.o
 obj-$(CONFIG_ACPI_THERMAL_REL)	+= acpi_thermal_rel.o
diff --git a/drivers/thermal/int340x_thermal/int3406_thermal.c b/drivers/thermal/int340x_thermal/int3406_thermal.c
new file mode 100644
index 0000000..13d431c
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/int3406_thermal.c
@@ -0,0 +1,236 @@
+/*
+ * INT3406 thermal driver for display participant device
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Authors: Aaron Lu <aaron.lu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <linux/backlight.h>
+#include <linux/thermal.h>
+#include <acpi/video.h>
+
+#define INT3406_BRIGHTNESS_LIMITS_CHANGED	0x80
+
+struct int3406_thermal_data {
+	int upper_limit;
+	int upper_limit_index;
+	int lower_limit;
+	int lower_limit_index;
+	acpi_handle handle;
+	struct acpi_video_device_brightness *br;
+	struct backlight_device *raw_bd;
+	struct thermal_cooling_device *cooling_dev;
+};
+
+static int int3406_thermal_to_raw(int level, struct int3406_thermal_data *d)
+{
+	int max_level = d->br->levels[d->br->count - 1];
+	int raw_max = d->raw_bd->props.max_brightness;
+
+	return level * raw_max / max_level;
+}
+
+static int int3406_thermal_to_acpi(int level, struct int3406_thermal_data *d)
+{
+	int raw_max = d->raw_bd->props.max_brightness;
+	int max_level = d->br->levels[d->br->count - 1];
+
+	return level * max_level / raw_max;
+}
+
+static int
+int3406_thermal_get_max_state(struct thermal_cooling_device *cooling_dev,
+			      unsigned long *state)
+{
+	struct int3406_thermal_data *d = cooling_dev->devdata;
+	int index = d->lower_limit_index ? d->lower_limit_index : 2;
+
+	*state = d->br->count - 1 - index;
+	return 0;
+}
+
+static int
+int3406_thermal_set_cur_state(struct thermal_cooling_device *cooling_dev,
+			      unsigned long state)
+{
+	struct int3406_thermal_data *d = cooling_dev->devdata;
+	int level, raw_level;
+
+	if (state > d->br->count - 3)
+		return -EINVAL;
+
+	state = d->br->count - 1 - state;
+	level = d->br->levels[state];
+
+	if ((d->upper_limit && level > d->upper_limit) ||
+	    (d->lower_limit && level < d->lower_limit))
+		return -EINVAL;
+
+	raw_level = int3406_thermal_to_raw(level, d);
+	return backlight_device_set_brightness(d->raw_bd, raw_level);
+}
+
+static int
+int3406_thermal_get_cur_state(struct thermal_cooling_device *cooling_dev,
+			      unsigned long *state)
+{
+	struct int3406_thermal_data *d = cooling_dev->devdata;
+	int raw_level, level, i;
+	int *levels = d->br->levels;
+
+	raw_level = d->raw_bd->props.brightness;
+	level = int3406_thermal_to_acpi(raw_level, d);
+
+	/*
+	 * There is no 1:1 mapping between the firmware interface level with the
+	 * raw interface level, we will have to find one that is close enough.
+	 */
+	for (i = 2; i < d->br->count; i++) {
+		if (level < levels[i]) {
+			if (i == 2)
+				break;
+			if ((level - levels[i - 1]) < (levels[i] - level))
+				i--;
+			break;
+		}
+	}
+
+	*state = d->br->count - 1 - i;
+	return 0;
+}
+
+static const struct thermal_cooling_device_ops video_cooling_ops = {
+	.get_max_state = int3406_thermal_get_max_state,
+	.get_cur_state = int3406_thermal_get_cur_state,
+	.set_cur_state = int3406_thermal_set_cur_state,
+};
+
+static int int3406_thermal_get_index(int *array, int nr, int value)
+{
+	int i;
+
+	for (i = 0; i < nr; i++) {
+		if (array[i] == value)
+			break;
+	}
+	return i == nr ? -ENOENT : i;
+}
+
+static void int3406_thermal_get_limit(struct int3406_thermal_data *d)
+{
+	acpi_status status;
+	unsigned long long lower_limit, upper_limit;
+	int index;
+
+	status = acpi_evaluate_integer(d->handle, "DDDL", NULL, &lower_limit);
+	if (ACPI_SUCCESS(status)) {
+		index = int3406_thermal_get_index(d->br->levels, d->br->count,
+						  lower_limit);
+		if (index > 0) {
+			d->lower_limit = (int)lower_limit;
+			d->lower_limit_index = index;
+		}
+	}
+
+	status = acpi_evaluate_integer(d->handle, "DDPC", NULL, &upper_limit);
+	if (ACPI_SUCCESS(status)) {
+		index = int3406_thermal_get_index(d->br->levels, d->br->count,
+						  upper_limit);
+		if (index > 0) {
+			d->upper_limit = (int)upper_limit;
+			d->upper_limit_index = index;
+		}
+	}
+}
+
+static void int3406_notify(acpi_handle handle, u32 event, void *data)
+{
+	if (event == INT3406_BRIGHTNESS_LIMITS_CHANGED)
+		int3406_thermal_get_limit(data);
+}
+
+static int int3406_thermal_probe(struct platform_device *pdev)
+{
+	struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+	struct int3406_thermal_data *d;
+	struct backlight_device *bd;
+	int ret;
+
+	if (!ACPI_HANDLE(&pdev->dev))
+		return -ENODEV;
+
+	d = devm_kzalloc(&pdev->dev, sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+	d->handle = ACPI_HANDLE(&pdev->dev);
+
+	bd = backlight_device_get_by_type(BACKLIGHT_RAW);
+	if (!bd)
+		return -ENODEV;
+	d->raw_bd = bd;
+
+	ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br);
+	if (ret)
+		return ret;
+
+	int3406_thermal_get_limit(d);
+
+	d->cooling_dev = thermal_cooling_device_register(acpi_device_bid(adev),
+							 d, &video_cooling_ops);
+	if (IS_ERR(d->cooling_dev))
+		goto err;
+
+	ret = acpi_install_notify_handler(adev->handle, ACPI_DEVICE_NOTIFY,
+					  int3406_notify, d);
+	if (ret)
+		goto err_cdev;
+
+	platform_set_drvdata(pdev, d);
+
+	return 0;
+
+err_cdev:
+	thermal_cooling_device_unregister(d->cooling_dev);
+err:
+	kfree(d->br);
+	return -ENODEV;
+}
+
+static int int3406_thermal_remove(struct platform_device *pdev)
+{
+	struct int3406_thermal_data *d = platform_get_drvdata(pdev);
+
+	thermal_cooling_device_unregister(d->cooling_dev);
+	kfree(d->br);
+	return 0;
+}
+
+static const struct acpi_device_id int3406_thermal_match[] = {
+	{"INT3406", 0},
+	{}
+};
+
+MODULE_DEVICE_TABLE(acpi, int3406_thermal_match);
+
+static struct platform_driver int3406_thermal_driver = {
+	.probe = int3406_thermal_probe,
+	.remove = int3406_thermal_remove,
+	.driver = {
+		   .name = "int3406 thermal",
+		   .owner = THIS_MODULE,
+		   .acpi_match_table = int3406_thermal_match,
+		   },
+};
+
+module_platform_driver(int3406_thermal_driver);
+
+MODULE_DESCRIPTION("INT3406 Thermal driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index 3d93b1c..507632b 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -27,7 +27,6 @@
 #include <linux/thermal.h>
 #include <linux/reset.h>
 #include <linux/types.h>
-#include <linux/nvmem-consumer.h>
 
 /* AUXADC Registers */
 #define AUXADC_CON0_V		0x000
@@ -619,7 +618,7 @@
 
 module_platform_driver(mtk_thermal_driver);
 
-MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
 MODULE_AUTHOR("Hanyi Wu <hanyi.wu@mediatek.com>");
 MODULE_DESCRIPTION("Mediatek thermal driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 49ac23d..d8ec44b 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -803,8 +803,8 @@
  * otherwise, it returns a corresponding ERR_PTR(). Caller must
  * check the return value with help of IS_ERR() helper.
  */
-static struct __thermal_zone *
-thermal_of_build_thermal_zone(struct device_node *np)
+static struct __thermal_zone
+__init *thermal_of_build_thermal_zone(struct device_node *np)
 {
 	struct device_node *child = NULL, *gchild;
 	struct __thermal_zone *tz;
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 1246aa6..2f1a863 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -301,7 +301,7 @@
 	capped_extra_power = 0;
 	extra_power = 0;
 	for (i = 0; i < num_actors; i++) {
-		u64 req_range = req_power[i] * power_range;
+		u64 req_range = (u64)req_power[i] * power_range;
 
 		granted_power[i] = DIV_ROUND_CLOSEST_ULL(req_range,
 							 total_req_power);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index d4b5465..5133cd1 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -688,7 +688,7 @@
 {
 	struct thermal_zone_device *tz = to_thermal_zone(dev);
 	int trip, ret;
-	unsigned long temperature;
+	int temperature;
 
 	if (!tz->ops->set_trip_temp)
 		return -EPERM;
@@ -696,7 +696,7 @@
 	if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip))
 		return -EINVAL;
 
-	if (kstrtoul(buf, 10, &temperature))
+	if (kstrtoint(buf, 10, &temperature))
 		return -EINVAL;
 
 	ret = tz->ops->set_trip_temp(tz, trip, temperature);
@@ -899,9 +899,9 @@
 {
 	struct thermal_zone_device *tz = to_thermal_zone(dev);
 	int ret = 0;
-	unsigned long temperature;
+	int temperature;
 
-	if (kstrtoul(buf, 10, &temperature))
+	if (kstrtoint(buf, 10, &temperature))
 		return -EINVAL;
 
 	if (!tz->ops->set_emul_temp) {
@@ -959,7 +959,7 @@
 	struct thermal_zone_device *tz = to_thermal_zone(dev);		\
 									\
 	if (tz->tzp)							\
-		return sprintf(buf, "%u\n", tz->tzp->name);		\
+		return sprintf(buf, "%d\n", tz->tzp->name);		\
 	else								\
 		return -EIO;						\
 	}								\
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 799634b..1146ff4 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -249,7 +249,7 @@
 		 * cfg_read/cfg_write.
 		 */
 		tb_ctl_WARN(ctl,
-			"CFG_ERROR(%llx:%x): Invalid config space of offset\n",
+			"CFG_ERROR(%llx:%x): Invalid config space or offset\n",
 			res->response_route, res->response_port);
 		return;
 	case TB_CFG_ERROR_NO_SUCH_PORT:
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 0dde34e..2b9602c 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -221,7 +221,7 @@
 	u8 micro1:4;
 	u8 micro3;
 
-	/* BYTES 5-6, TODO: verify (find hardware that has these set) */
+	/* BYTES 6-7, TODO: verify (find hardware that has these set) */
 	u8 peer_port_rid:4;
 	u8 unknown3:3;
 	bool has_peer_port:1;
@@ -388,6 +388,11 @@
 		sw->ports[4].link_nr = 1;
 		sw->ports[3].dual_link_port = &sw->ports[4];
 		sw->ports[4].dual_link_port = &sw->ports[3];
+
+		/* Port 5 is inaccessible on this gen 1 controller */
+		if (sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE)
+			sw->ports[5].disabled = true;
+
 		return 0;
 	}
 
@@ -444,6 +449,7 @@
 	return tb_drom_parse_entries(sw);
 err:
 	kfree(sw->drom);
+	sw->drom = NULL;
 	return -EIO;
 
 }
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 20a41f7..9c15344 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -37,7 +37,8 @@
  */
 static void ring_interrupt_active(struct tb_ring *ring, bool active)
 {
-	int reg = REG_RING_INTERRUPT_BASE + ring_interrupt_index(ring) / 32;
+	int reg = REG_RING_INTERRUPT_BASE +
+		  ring_interrupt_index(ring) / 32 * 4;
 	int bit = ring_interrupt_index(ring) & 31;
 	int mask = 1 << bit;
 	u32 old, new;
@@ -564,7 +565,7 @@
 	/* cannot fail - table is allocated bin pcim_iomap_regions */
 	nhi->iobase = pcim_iomap_table(pdev)[0];
 	nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
-	if (nhi->hop_count != 12)
+	if (nhi->hop_count != 12 && nhi->hop_count != 32)
 		dev_warn(&pdev->dev, "unexpected hop count: %d\n",
 			 nhi->hop_count);
 	INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
@@ -633,16 +634,24 @@
 static struct pci_device_id nhi_ids[] = {
 	/*
 	 * We have to specify class, the TB bridges use the same device and
-	 * vendor (sub)id.
+	 * vendor (sub)id on gen 1 and gen 2 controllers.
 	 */
 	{
 		.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
-		.vendor = PCI_VENDOR_ID_INTEL, .device = 0x1547,
+		.vendor = PCI_VENDOR_ID_INTEL,
+		.device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
 		.subvendor = 0x2222, .subdevice = 0x1111,
 	},
 	{
 		.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
-		.vendor = PCI_VENDOR_ID_INTEL, .device = 0x156c,
+		.vendor = PCI_VENDOR_ID_INTEL,
+		.device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
+		.subvendor = 0x2222, .subdevice = 0x1111,
+	},
+	{
+		.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
+		.vendor = PCI_VENDOR_ID_INTEL,
+		.device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI,
 		.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
 	},
 	{ 0,}
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index aeb9829..1e116f5 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -293,9 +293,9 @@
 	if (active) {
 		data = data & 0xFFFFFF83;
 		switch (sw->config.device_id) {
-		case 0x1513:
-		case 0x151a:
-		case 0x1549:
+		case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
+		case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
+		case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
 			break;
 		default:
 			data |= 4;
@@ -350,7 +350,7 @@
 		return NULL;
 
 	sw->tb = tb;
-	if (tb_cfg_read(tb->ctl, &sw->config, route, 0, 2, 0, 5))
+	if (tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5))
 		goto err;
 	tb_info(tb,
 		"initializing Switch at %#llx (depth: %d, up port: %d)\n",
@@ -370,7 +370,9 @@
 		tb_sw_warn(sw, "unknown switch vendor id %#x\n",
 			   sw->config.vendor_id);
 
-	if (sw->config.device_id != 0x1547 && sw->config.device_id != 0x1549)
+	if (sw->config.device_id != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
+	    sw->config.device_id != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
+	    sw->config.device_id != PCI_DEVICE_ID_INTEL_PORT_RIDGE)
 		tb_sw_warn(sw, "unsupported switch device id %#x\n",
 			   sw->config.device_id);
 
@@ -425,9 +427,9 @@
 }
 
 /**
- * tb_sw_set_unpplugged() - set is_unplugged on switch and downstream switches
+ * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
  */
-void tb_sw_set_unpplugged(struct tb_switch *sw)
+void tb_sw_set_unplugged(struct tb_switch *sw)
 {
 	int i;
 	if (sw == sw->tb->root_switch) {
@@ -441,7 +443,7 @@
 	sw->is_unplugged = true;
 	for (i = 0; i <= sw->config.max_port_number; i++) {
 		if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote)
-			tb_sw_set_unpplugged(sw->ports[i].remote->sw);
+			tb_sw_set_unplugged(sw->ports[i].remote->sw);
 	}
 }
 
@@ -483,7 +485,7 @@
 			|| tb_switch_resume(port->remote->sw)) {
 			tb_port_warn(port,
 				     "lost during suspend, disconnecting\n");
-			tb_sw_set_unpplugged(port->remote->sw);
+			tb_sw_set_unplugged(port->remote->sw);
 		}
 	}
 	return 0;
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index d2c3fe3..24b6d30 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -246,7 +246,7 @@
 	if (ev->unplug) {
 		if (port->remote) {
 			tb_port_info(port, "unplugged\n");
-			tb_sw_set_unpplugged(port->remote->sw);
+			tb_sw_set_unplugged(port->remote->sw);
 			tb_free_invalid_tunnels(tb);
 			tb_switch_free(port->remote->sw);
 			port->remote = NULL;
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 8b0d7cf..61d57ba 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -226,7 +226,7 @@
 void tb_switch_suspend(struct tb_switch *sw);
 int tb_switch_resume(struct tb_switch *sw);
 int tb_switch_reset(struct tb *tb, u64 route);
-void tb_sw_set_unpplugged(struct tb_switch *sw);
+void tb_sw_set_unplugged(struct tb_switch *sw);
 struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route);
 
 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 6577af7..1e2a4a8 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -30,7 +30,7 @@
 	TB_CAP_I2C		= 0x0005,
 	TB_CAP_PLUG_EVENTS	= 0x0105, /* also EEPROM */
 	TB_CAP_TIME2		= 0x0305,
-	TB_CAL_IECS		= 0x0405,
+	TB_CAP_IECS		= 0x0405,
 	TB_CAP_LINK_CONTROLLER	= 0x0605, /* also IECS */
 };
 
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index c016207..0c27a00 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2662,7 +2662,7 @@
 	STATS(net).tx_bytes += skb->len;
 	gsm_dlci_data_kick(dlci);
 	/* And tell the kernel when the last transmit started. */
-	net->trans_start = jiffies;
+	netif_trans_update(net);
 	muxnet_put(mux_net);
 	return NETDEV_TX_OK;
 }
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index e16a49b..cf0dc51 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -626,7 +626,7 @@
  */
 
 static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver,
-		struct inode *ptm_inode, int idx)
+		struct file *file, int idx)
 {
 	/* Master must be open via /dev/ptmx */
 	return ERR_PTR(-EIO);
@@ -642,12 +642,12 @@
  */
 
 static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver,
-		struct inode *pts_inode, int idx)
+		struct file *file, int idx)
 {
 	struct tty_struct *tty;
 
 	mutex_lock(&devpts_mutex);
-	tty = devpts_get_priv(pts_inode);
+	tty = devpts_get_priv(file->f_path.dentry);
 	mutex_unlock(&devpts_mutex);
 	/* Master must be open before slave */
 	if (!tty)
@@ -663,14 +663,14 @@
 /* this is called once with whichever end is closed last */
 static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
 {
-	struct inode *ptmx_inode;
+	struct pts_fs_info *fsi;
 
 	if (tty->driver->subtype == PTY_TYPE_MASTER)
-		ptmx_inode = tty->driver_data;
+		fsi = tty->driver_data;
 	else
-		ptmx_inode = tty->link->driver_data;
-	devpts_kill_index(ptmx_inode, tty->index);
-	devpts_del_ref(ptmx_inode);
+		fsi = tty->link->driver_data;
+	devpts_kill_index(fsi, tty->index);
+	devpts_put_ref(fsi);
 }
 
 static const struct tty_operations ptm_unix98_ops = {
@@ -720,8 +720,9 @@
 
 static int ptmx_open(struct inode *inode, struct file *filp)
 {
+	struct pts_fs_info *fsi;
 	struct tty_struct *tty;
-	struct inode *slave_inode;
+	struct dentry *dentry;
 	int retval;
 	int index;
 
@@ -734,54 +735,46 @@
 	if (retval)
 		return retval;
 
+	fsi = devpts_get_ref(inode, filp);
+	retval = -ENODEV;
+	if (!fsi)
+		goto out_free_file;
+
 	/* find a device that is not in use. */
 	mutex_lock(&devpts_mutex);
-	index = devpts_new_index(inode);
-	if (index < 0) {
-		retval = index;
-		mutex_unlock(&devpts_mutex);
-		goto err_file;
-	}
-
+	index = devpts_new_index(fsi);
 	mutex_unlock(&devpts_mutex);
 
+	retval = index;
+	if (index < 0)
+		goto out_put_ref;
+
+
 	mutex_lock(&tty_mutex);
 	tty = tty_init_dev(ptm_driver, index);
-
-	if (IS_ERR(tty)) {
-		retval = PTR_ERR(tty);
-		goto out;
-	}
-
 	/* The tty returned here is locked so we can safely
 	   drop the mutex */
 	mutex_unlock(&tty_mutex);
 
-	set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
-	tty->driver_data = inode;
+	retval = PTR_ERR(tty);
+	if (IS_ERR(tty))
+		goto out;
 
 	/*
-	 * In the case where all references to ptmx inode are dropped and we
-	 * still have /dev/tty opened pointing to the master/slave pair (ptmx
-	 * is closed/released before /dev/tty), we must make sure that the inode
-	 * is still valid when we call the final pty_unix98_shutdown, thus we
-	 * hold an additional reference to the ptmx inode. For the same /dev/tty
-	 * last close case, we also need to make sure the super_block isn't
-	 * destroyed (devpts instance unmounted), before /dev/tty is closed and
-	 * on its release devpts_kill_index is called.
+	 * From here on out, the tty is "live", and the index and
+	 * fsi will be killed/put by the tty_release()
 	 */
-	devpts_add_ref(inode);
+	set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
+	tty->driver_data = fsi;
 
 	tty_add_file(tty, filp);
 
-	slave_inode = devpts_pty_new(inode,
-			MKDEV(UNIX98_PTY_SLAVE_MAJOR, index), index,
-			tty->link);
-	if (IS_ERR(slave_inode)) {
-		retval = PTR_ERR(slave_inode);
+	dentry = devpts_pty_new(fsi, index, tty->link);
+	if (IS_ERR(dentry)) {
+		retval = PTR_ERR(dentry);
 		goto err_release;
 	}
-	tty->link->driver_data = slave_inode;
+	tty->link->driver_data = dentry;
 
 	retval = ptm_driver->ops->open(tty, filp);
 	if (retval)
@@ -793,12 +786,14 @@
 	return 0;
 err_release:
 	tty_unlock(tty);
+	// This will also put-ref the fsi
 	tty_release(inode, filp);
 	return retval;
 out:
-	mutex_unlock(&tty_mutex);
-	devpts_kill_index(inode, index);
-err_file:
+	devpts_kill_index(fsi, index);
+out_put_ref:
+	devpts_put_ref(fsi);
+out_free_file:
 	tty_free_file(filp);
 	return retval;
 }
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 98862aa..5eea74d 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1454,13 +1454,13 @@
 		return -EINVAL;
 	}
 
-	rx_param->src_master = 1;
-	rx_param->dst_master = 0;
+	rx_param->m_master = 0;
+	rx_param->p_master = 1;
 
 	dma->rxconf.src_maxburst = 16;
 
-	tx_param->src_master = 1;
-	tx_param->dst_master = 0;
+	tx_param->m_master = 0;
+	tx_param->p_master = 1;
 
 	dma->txconf.dst_maxburst = 16;
 
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index e213da0..00ad263 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1403,9 +1403,18 @@
 	/*
 	 * Empty the RX FIFO, we are not interested in anything
 	 * received during the half-duplex transmission.
+	 * Enable previously disabled RX interrupts.
 	 */
-	if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX))
+	if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
 		serial8250_clear_fifos(p);
+
+		serial8250_rpm_get(p);
+
+		p->ier |= UART_IER_RLSI | UART_IER_RDI;
+		serial_port_out(&p->port, UART_IER, p->ier);
+
+		serial8250_rpm_put(p);
+	}
 }
 
 static void serial8250_em485_handle_stop_tx(unsigned long arg)
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 64742a0..4d7cb9c 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -324,7 +324,6 @@
 config SERIAL_8250_RT288X
 	bool "Ralink RT288x/RT305x/RT3662/RT3883 serial port support"
 	depends on SERIAL_8250
-	depends on MIPS || COMPILE_TEST
 	default y if MIPS_ALCHEMY || SOC_RT288X || SOC_RT305X || SOC_RT3883 || SOC_MT7620
 	help
 	  Selecting this option will add support for the alternate register
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 13d4ed6..7711b26 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -900,6 +900,27 @@
 		controller serial port as your console (you want this!),
 		say Y.  Otherwise, say N.
 
+config SERIAL_PIC32
+	tristate "Microchip PIC32 serial support"
+	depends on MACH_PIC32
+	select SERIAL_CORE
+	help
+	  If you have a PIC32, this driver supports the serial ports.
+
+	  Say Y or M to use PIC32 serial ports, otherwise say N. Note that
+	  to use a serial port as a console, this must be included in kernel and
+	  not as a module.
+
+config SERIAL_PIC32_CONSOLE
+	bool "PIC32 serial console support"
+	depends on SERIAL_PIC32
+	select SERIAL_CORE_CONSOLE
+	help
+	  If you have a PIC32, this driver supports the putting a console on one
+	  of the serial ports.
+
+	  Say Y to use the PIC32 console, otherwise say N.
+
 config SERIAL_MPC52xx
 	tristate "Freescale MPC52xx/MPC512x family PSC serial support"
 	depends on PPC_MPC52xx || PPC_MPC512x
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 8c261ad..74914aa 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -91,6 +91,7 @@
 obj-$(CONFIG_SERIAL_SPRD) += sprd_serial.o
 obj-$(CONFIG_SERIAL_STM32)	+= stm32-usart.o
 obj-$(CONFIG_SERIAL_MVEBU_UART)	+= mvebu-uart.o
+obj-$(CONFIG_SERIAL_PIC32)	+= pic32_uart.o
 
 # GPIOLIB helpers for modem control lines
 obj-$(CONFIG_SERIAL_MCTRL_GPIO)	+= serial_mctrl_gpio.o
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 3f98165..3f6e0ab 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -17,7 +17,7 @@
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -1036,7 +1036,7 @@
 static int max310x_gpio_get(struct gpio_chip *chip, unsigned offset)
 {
 	unsigned int val;
-	struct max310x_port *s = container_of(chip, struct max310x_port, gpio);
+	struct max310x_port *s = gpiochip_get_data(chip);
 	struct uart_port *port = &s->p[offset / 4].port;
 
 	val = max310x_port_read(port, MAX310X_GPIODATA_REG);
@@ -1046,7 +1046,7 @@
 
 static void max310x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 {
-	struct max310x_port *s = container_of(chip, struct max310x_port, gpio);
+	struct max310x_port *s = gpiochip_get_data(chip);
 	struct uart_port *port = &s->p[offset / 4].port;
 
 	max310x_port_update(port, MAX310X_GPIODATA_REG, 1 << (offset % 4),
@@ -1055,7 +1055,7 @@
 
 static int max310x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 {
-	struct max310x_port *s = container_of(chip, struct max310x_port, gpio);
+	struct max310x_port *s = gpiochip_get_data(chip);
 	struct uart_port *port = &s->p[offset / 4].port;
 
 	max310x_port_update(port, MAX310X_GPIOCFG_REG, 1 << (offset % 4), 0);
@@ -1066,7 +1066,7 @@
 static int max310x_gpio_direction_output(struct gpio_chip *chip,
 					 unsigned offset, int value)
 {
-	struct max310x_port *s = container_of(chip, struct max310x_port, gpio);
+	struct max310x_port *s = gpiochip_get_data(chip);
 	struct uart_port *port = &s->p[offset / 4].port;
 
 	max310x_port_update(port, MAX310X_GPIODATA_REG, 1 << (offset % 4),
@@ -1183,7 +1183,7 @@
 	s->gpio.base		= -1;
 	s->gpio.ngpio		= devtype->nr * 4;
 	s->gpio.can_sleep	= 1;
-	ret = gpiochip_add(&s->gpio);
+	ret = gpiochip_add_data(&s->gpio, s);
 	if (ret)
 		goto out_uart;
 #endif
diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
new file mode 100644
index 0000000..62a43bf
--- /dev/null
+++ b/drivers/tty/serial/pic32_uart.c
@@ -0,0 +1,960 @@
+/*
+ * PIC32 Integrated Serial Driver.
+ *
+ * Copyright (C) 2015 Microchip Technology, Inc.
+ *
+ * Authors:
+ *   Sorin-Andrei Pistirica <andrei.pistirica@microchip.com>
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/console.h>
+#include <linux/clk.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/delay.h>
+
+#include <asm/mach-pic32/pic32.h>
+#include "pic32_uart.h"
+
+/* UART name and device definitions */
+#define PIC32_DEV_NAME		"pic32-uart"
+#define PIC32_MAX_UARTS		6
+#define PIC32_SDEV_NAME		"ttyPIC"
+
+/* pic32_sport pointer for console use */
+static struct pic32_sport *pic32_sports[PIC32_MAX_UARTS];
+
+static inline void pic32_wait_deplete_txbuf(struct pic32_sport *sport)
+{
+	/* wait for tx empty, otherwise chars will be lost or corrupted */
+	while (!(pic32_uart_readl(sport, PIC32_UART_STA) & PIC32_UART_STA_TRMT))
+		udelay(1);
+}
+
+static inline int pic32_enable_clock(struct pic32_sport *sport)
+{
+	int ret = clk_prepare_enable(sport->clk);
+
+	if (ret)
+		return ret;
+
+	sport->ref_clk++;
+	return 0;
+}
+
+static inline void pic32_disable_clock(struct pic32_sport *sport)
+{
+	sport->ref_clk--;
+	clk_disable_unprepare(sport->clk);
+}
+
+/* serial core request to check if uart tx buffer is empty */
+static unsigned int pic32_uart_tx_empty(struct uart_port *port)
+{
+	struct pic32_sport *sport = to_pic32_sport(port);
+	u32 val = pic32_uart_readl(sport, PIC32_UART_STA);
+
+	return (val & PIC32_UART_STA_TRMT) ? 1 : 0;
+}
+
+/* serial core request to set UART outputs */
+static void pic32_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+	struct pic32_sport *sport = to_pic32_sport(port);
+
+	/* set loopback mode */
+	if (mctrl & TIOCM_LOOP)
+		pic32_uart_writel(sport, PIC32_SET(PIC32_UART_MODE),
+					PIC32_UART_MODE_LPBK);
+	else
+		pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE),
+					PIC32_UART_MODE_LPBK);
+}
+
+/* get the state of CTS input pin for this port */
+static unsigned int get_cts_state(struct pic32_sport *sport)
+{
+	/* read and invert UxCTS */
+	if (gpio_is_valid(sport->cts_gpio))
+		return !gpio_get_value(sport->cts_gpio);
+
+	return 1;
+}
+
+/* serial core request to return the state of misc UART input pins */
+static unsigned int pic32_uart_get_mctrl(struct uart_port *port)
+{
+	struct pic32_sport *sport = to_pic32_sport(port);
+	unsigned int mctrl = 0;
+
+	if (!sport->hw_flow_ctrl)
+		mctrl |= TIOCM_CTS;
+	else if (get_cts_state(sport))
+		mctrl |= TIOCM_CTS;
+
+	/* DSR and CD are not supported in PIC32, so return 1
+	 * RI is not supported in PIC32, so return 0
+	 */
+	mctrl |= TIOCM_CD;
+	mctrl |= TIOCM_DSR;
+
+	return mctrl;
+}
+
+/* stop tx and start tx are not called in pairs, therefore a flag indicates
+ * the status of irq to control the irq-depth.
+ */
+static inline void pic32_uart_irqtxen(struct pic32_sport *sport, u8 en)
+{
+	if (en && !tx_irq_enabled(sport)) {
+		enable_irq(sport->irq_tx);
+		tx_irq_enabled(sport) = 1;
+	} else if (!en && tx_irq_enabled(sport)) {
+		/* use disable_irq_nosync() and not disable_irq() to avoid self
+		 * imposed deadlock by not waiting for irq handler to end,
+		 * since this callback is called from interrupt context.
+		 */
+		disable_irq_nosync(sport->irq_tx);
+		tx_irq_enabled(sport) = 0;
+	}
+}
+
+/* serial core request to disable tx ASAP (used for flow control) */
+static void pic32_uart_stop_tx(struct uart_port *port)
+{
+	struct pic32_sport *sport = to_pic32_sport(port);
+
+	if (!(pic32_uart_readl(sport, PIC32_UART_MODE) & PIC32_UART_MODE_ON))
+		return;
+
+	if (!(pic32_uart_readl(sport, PIC32_UART_STA) & PIC32_UART_STA_UTXEN))
+		return;
+
+	/* wait for tx empty */
+	pic32_wait_deplete_txbuf(sport);
+
+	pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA),
+				PIC32_UART_STA_UTXEN);
+	pic32_uart_irqtxen(sport, 0);
+}
+
+/* serial core request to (re)enable tx */
+static void pic32_uart_start_tx(struct uart_port *port)
+{
+	struct pic32_sport *sport = to_pic32_sport(port);
+
+	pic32_uart_irqtxen(sport, 1);
+	pic32_uart_writel(sport, PIC32_SET(PIC32_UART_STA),
+				PIC32_UART_STA_UTXEN);
+}
+
+/* serial core request to stop rx, called before port shutdown */
+static void pic32_uart_stop_rx(struct uart_port *port)
+{
+	struct pic32_sport *sport = to_pic32_sport(port);
+
+	/* disable rx interrupts */
+	disable_irq(sport->irq_rx);
+
+	/* receiver Enable bit OFF */
+	pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA),
+				PIC32_UART_STA_URXEN);
+}
+
+/* serial core request to start/stop emitting break char */
+static void pic32_uart_break_ctl(struct uart_port *port, int ctl)
+{
+	struct pic32_sport *sport = to_pic32_sport(port);
+	unsigned long flags;
+
+	spin_lock_irqsave(&port->lock, flags);
+
+	if (ctl)
+		pic32_uart_writel(sport, PIC32_SET(PIC32_UART_STA),
+					PIC32_UART_STA_UTXBRK);
+	else
+		pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA),
+					PIC32_UART_STA_UTXBRK);
+
+	spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/* get port type in string format */
+static const char *pic32_uart_type(struct uart_port *port)
+{
+	return (port->type == PORT_PIC32) ? PIC32_DEV_NAME : NULL;
+}
+
+/* read all chars in rx fifo and send them to core */
+static void pic32_uart_do_rx(struct uart_port *port)
+{
+	struct pic32_sport *sport = to_pic32_sport(port);
+	struct tty_port *tty;
+	unsigned int max_count;
+
+	/* limit number of char read in interrupt, should not be
+	 * higher than fifo size anyway since we're much faster than
+	 * serial port
+	 */
+	max_count = PIC32_UART_RX_FIFO_DEPTH;
+
+	spin_lock(&port->lock);
+
+	tty = &port->state->port;
+
+	do {
+		u32 sta_reg, c;
+		char flag;
+
+		/* get overrun/fifo empty information from status register */
+		sta_reg = pic32_uart_readl(sport, PIC32_UART_STA);
+		if (unlikely(sta_reg & PIC32_UART_STA_OERR)) {
+
+			/* fifo reset is required to clear interrupt */
+			pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA),
+						PIC32_UART_STA_OERR);
+
+			port->icount.overrun++;
+			tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+		}
+
+		/* Can at least one more character can be read? */
+		if (!(sta_reg & PIC32_UART_STA_URXDA))
+			break;
+
+		/* read the character and increment the rx counter */
+		c = pic32_uart_readl(sport, PIC32_UART_RX);
+
+		port->icount.rx++;
+		flag = TTY_NORMAL;
+		c &= 0xff;
+
+		if (unlikely((sta_reg & PIC32_UART_STA_PERR) ||
+			     (sta_reg & PIC32_UART_STA_FERR))) {
+
+			/* do stats first */
+			if (sta_reg & PIC32_UART_STA_PERR)
+				port->icount.parity++;
+			if (sta_reg & PIC32_UART_STA_FERR)
+				port->icount.frame++;
+
+			/* update flag wrt read_status_mask */
+			sta_reg &= port->read_status_mask;
+
+			if (sta_reg & PIC32_UART_STA_FERR)
+				flag = TTY_FRAME;
+			if (sta_reg & PIC32_UART_STA_PERR)
+				flag = TTY_PARITY;
+		}
+
+		if (uart_handle_sysrq_char(port, c))
+			continue;
+
+		if ((sta_reg & port->ignore_status_mask) == 0)
+			tty_insert_flip_char(tty, c, flag);
+
+	} while (--max_count);
+
+	spin_unlock(&port->lock);
+
+	tty_flip_buffer_push(tty);
+}
+
+/* fill tx fifo with chars to send, stop when fifo is about to be full
+ * or when all chars have been sent.
+ */
+static void pic32_uart_do_tx(struct uart_port *port)
+{
+	struct pic32_sport *sport = to_pic32_sport(port);
+	struct circ_buf *xmit = &port->state->xmit;
+	unsigned int max_count = PIC32_UART_TX_FIFO_DEPTH;
+
+	if (port->x_char) {
+		pic32_uart_writel(sport, PIC32_UART_TX, port->x_char);
+		port->icount.tx++;
+		port->x_char = 0;
+		return;
+	}
+
+	if (uart_tx_stopped(port)) {
+		pic32_uart_stop_tx(port);
+		return;
+	}
+
+	if (uart_circ_empty(xmit))
+		goto txq_empty;
+
+	/* keep stuffing chars into uart tx buffer
+	 * 1) until uart fifo is full
+	 * or
+	 * 2) until the circ buffer is empty
+	 * (all chars have been sent)
+	 * or
+	 * 3) until the max count is reached
+	 * (prevents lingering here for too long in certain cases)
+	 */
+	while (!(PIC32_UART_STA_UTXBF &
+		pic32_uart_readl(sport, PIC32_UART_STA))) {
+		unsigned int c = xmit->buf[xmit->tail];
+
+		pic32_uart_writel(sport, PIC32_UART_TX, c);
+
+		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+		port->icount.tx++;
+		if (uart_circ_empty(xmit))
+			break;
+		if (--max_count == 0)
+			break;
+	}
+
+	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+		uart_write_wakeup(port);
+
+	if (uart_circ_empty(xmit))
+		goto txq_empty;
+
+	return;
+
+txq_empty:
+	pic32_uart_irqtxen(sport, 0);
+}
+
+/* RX interrupt handler */
+static irqreturn_t pic32_uart_rx_interrupt(int irq, void *dev_id)
+{
+	struct uart_port *port = dev_id;
+
+	pic32_uart_do_rx(port);
+
+	return IRQ_HANDLED;
+}
+
+/* TX interrupt handler */
+static irqreturn_t pic32_uart_tx_interrupt(int irq, void *dev_id)
+{
+	struct uart_port *port = dev_id;
+	unsigned long flags;
+
+	spin_lock_irqsave(&port->lock, flags);
+	pic32_uart_do_tx(port);
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+/* FAULT interrupt handler */
+static irqreturn_t pic32_uart_fault_interrupt(int irq, void *dev_id)
+{
+	/* do nothing: pic32_uart_do_rx() handles faults. */
+	return IRQ_HANDLED;
+}
+
+/* enable rx & tx operation on uart */
+static void pic32_uart_en_and_unmask(struct uart_port *port)
+{
+	struct pic32_sport *sport = to_pic32_sport(port);
+
+	pic32_uart_writel(sport, PIC32_SET(PIC32_UART_STA),
+				PIC32_UART_STA_UTXEN | PIC32_UART_STA_URXEN);
+	pic32_uart_writel(sport, PIC32_SET(PIC32_UART_MODE),
+				PIC32_UART_MODE_ON);
+}
+
+/* disable rx & tx operation on uart */
+static void pic32_uart_dsbl_and_mask(struct uart_port *port)
+{
+	struct pic32_sport *sport = to_pic32_sport(port);
+
+	/* wait for tx empty, otherwise chars will be lost or corrupted */
+	pic32_wait_deplete_txbuf(sport);
+
+	pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA),
+				PIC32_UART_STA_UTXEN | PIC32_UART_STA_URXEN);
+	pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE),
+				PIC32_UART_MODE_ON);
+}
+
+/* serial core request to initialize uart and start rx operation */
+static int pic32_uart_startup(struct uart_port *port)
+{
+	struct pic32_sport *sport = to_pic32_sport(port);
+	u32 dflt_baud = (port->uartclk / PIC32_UART_DFLT_BRATE / 16) - 1;
+	unsigned long flags;
+	int ret;
+
+	local_irq_save(flags);
+
+	ret = pic32_enable_clock(sport);
+	if (ret) {
+		local_irq_restore(flags);
+		goto out_done;
+	}
+
+	/* clear status and mode registers */
+	pic32_uart_writel(sport, PIC32_UART_MODE, 0);
+	pic32_uart_writel(sport, PIC32_UART_STA, 0);
+
+	/* disable uart and mask all interrupts */
+	pic32_uart_dsbl_and_mask(port);
+
+	/* set default baud */
+	pic32_uart_writel(sport, PIC32_UART_BRG, dflt_baud);
+
+	local_irq_restore(flags);
+
+	/* Each UART of a PIC32 has three interrupts therefore,
+	 * we setup driver to register the 3 irqs for the device.
+	 *
+	 * For each irq request_irq() is called with interrupt disabled.
+	 * And the irq is enabled as soon as we are ready to handle them.
+	 */
+	tx_irq_enabled(sport) = 0;
+
+	sport->irq_fault_name = kasprintf(GFP_KERNEL, "%s%d-fault",
+					  pic32_uart_type(port),
+					  sport->idx);
+	if (!sport->irq_fault_name) {
+		dev_err(port->dev, "%s: kasprintf err!", __func__);
+		ret = -ENOMEM;
+		goto out_done;
+	}
+	irq_set_status_flags(sport->irq_fault, IRQ_NOAUTOEN);
+	ret = request_irq(sport->irq_fault, pic32_uart_fault_interrupt,
+			  sport->irqflags_fault, sport->irq_fault_name, port);
+	if (ret) {
+		dev_err(port->dev, "%s: request irq(%d) err! ret:%d name:%s\n",
+			__func__, sport->irq_fault, ret,
+			pic32_uart_type(port));
+		goto out_f;
+	}
+
+	sport->irq_rx_name = kasprintf(GFP_KERNEL, "%s%d-rx",
+				       pic32_uart_type(port),
+				       sport->idx);
+	if (!sport->irq_rx_name) {
+		dev_err(port->dev, "%s: kasprintf err!", __func__);
+		kfree(sport->irq_fault_name);
+		ret = -ENOMEM;
+		goto out_f;
+	}
+	irq_set_status_flags(sport->irq_rx, IRQ_NOAUTOEN);
+	ret = request_irq(sport->irq_rx, pic32_uart_rx_interrupt,
+			  sport->irqflags_rx, sport->irq_rx_name, port);
+	if (ret) {
+		dev_err(port->dev, "%s: request irq(%d) err! ret:%d name:%s\n",
+			__func__, sport->irq_rx, ret,
+			pic32_uart_type(port));
+		goto out_r;
+	}
+
+	sport->irq_tx_name = kasprintf(GFP_KERNEL, "%s%d-tx",
+				       pic32_uart_type(port),
+				       sport->idx);
+	if (!sport->irq_tx_name) {
+		dev_err(port->dev, "%s: kasprintf err!", __func__);
+		ret = -ENOMEM;
+		goto out_r;
+	}
+	irq_set_status_flags(sport->irq_tx, IRQ_NOAUTOEN);
+	ret = request_irq(sport->irq_tx, pic32_uart_tx_interrupt,
+			  sport->irqflags_tx, sport->irq_tx_name, port);
+	if (ret) {
+		dev_err(port->dev, "%s: request irq(%d) err! ret:%d name:%s\n",
+			__func__, sport->irq_tx, ret,
+			pic32_uart_type(port));
+		goto out_t;
+	}
+
+	local_irq_save(flags);
+
+	/* set rx interrupt on first receive */
+	pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA),
+			PIC32_UART_STA_URXISEL1 | PIC32_UART_STA_URXISEL0);
+
+	/* set interrupt on empty */
+	pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA),
+			PIC32_UART_STA_UTXISEL1);
+
+	/* enable all interrupts and eanable uart */
+	pic32_uart_en_and_unmask(port);
+
+	enable_irq(sport->irq_rx);
+
+	return 0;
+
+out_t:
+	kfree(sport->irq_tx_name);
+	free_irq(sport->irq_tx, sport);
+out_r:
+	kfree(sport->irq_rx_name);
+	free_irq(sport->irq_rx, sport);
+out_f:
+	kfree(sport->irq_fault_name);
+	free_irq(sport->irq_fault, sport);
+out_done:
+	return ret;
+}
+
+/* serial core request to flush & disable uart */
+static void pic32_uart_shutdown(struct uart_port *port)
+{
+	struct pic32_sport *sport = to_pic32_sport(port);
+	unsigned long flags;
+
+	/* disable uart */
+	spin_lock_irqsave(&port->lock, flags);
+	pic32_uart_dsbl_and_mask(port);
+	spin_unlock_irqrestore(&port->lock, flags);
+	pic32_disable_clock(sport);
+
+	/* free all 3 interrupts for this UART */
+	free_irq(sport->irq_fault, port);
+	free_irq(sport->irq_tx, port);
+	free_irq(sport->irq_rx, port);
+}
+
+/* serial core request to change current uart setting */
+static void pic32_uart_set_termios(struct uart_port *port,
+				   struct ktermios *new,
+				   struct ktermios *old)
+{
+	struct pic32_sport *sport = to_pic32_sport(port);
+	unsigned int baud;
+	unsigned int quot;
+	unsigned long flags;
+
+	spin_lock_irqsave(&port->lock, flags);
+
+	/* disable uart and mask all interrupts while changing speed */
+	pic32_uart_dsbl_and_mask(port);
+
+	/* stop bit options */
+	if (new->c_cflag & CSTOPB)
+		pic32_uart_writel(sport, PIC32_SET(PIC32_UART_MODE),
+					PIC32_UART_MODE_STSEL);
+	else
+		pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE),
+					PIC32_UART_MODE_STSEL);
+
+	/* parity options */
+	if (new->c_cflag & PARENB) {
+		if (new->c_cflag & PARODD) {
+			pic32_uart_writel(sport, PIC32_SET(PIC32_UART_MODE),
+					PIC32_UART_MODE_PDSEL1);
+			pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE),
+					PIC32_UART_MODE_PDSEL0);
+		} else {
+			pic32_uart_writel(sport, PIC32_SET(PIC32_UART_MODE),
+					PIC32_UART_MODE_PDSEL0);
+			pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE),
+					PIC32_UART_MODE_PDSEL1);
+		}
+	} else {
+		pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE),
+					PIC32_UART_MODE_PDSEL1 |
+					PIC32_UART_MODE_PDSEL0);
+	}
+	/* if hw flow ctrl, then the pins must be specified in device tree */
+	if ((new->c_cflag & CRTSCTS) && sport->hw_flow_ctrl) {
+		/* enable hardware flow control */
+		pic32_uart_writel(sport, PIC32_SET(PIC32_UART_MODE),
+					PIC32_UART_MODE_UEN1);
+		pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE),
+					PIC32_UART_MODE_UEN0);
+		pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE),
+					PIC32_UART_MODE_RTSMD);
+	} else {
+		/* disable hardware flow control */
+		pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE),
+					PIC32_UART_MODE_UEN1);
+		pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE),
+					PIC32_UART_MODE_UEN0);
+		pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE),
+					PIC32_UART_MODE_RTSMD);
+	}
+
+	/* Always 8-bit */
+	new->c_cflag |= CS8;
+
+	/* Mark/Space parity is not supported */
+	new->c_cflag &= ~CMSPAR;
+
+	/* update baud */
+	baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16);
+	quot = uart_get_divisor(port, baud) - 1;
+	pic32_uart_writel(sport, PIC32_UART_BRG, quot);
+	uart_update_timeout(port, new->c_cflag, baud);
+
+	if (tty_termios_baud_rate(new))
+		tty_termios_encode_baud_rate(new, baud, baud);
+
+	/* enable uart */
+	pic32_uart_en_and_unmask(port);
+
+	spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/* serial core request to claim uart iomem */
+static int pic32_uart_request_port(struct uart_port *port)
+{
+	struct platform_device *pdev = to_platform_device(port->dev);
+	struct resource *res_mem;
+
+	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (unlikely(!res_mem))
+		return -EINVAL;
+
+	if (!request_mem_region(port->mapbase, resource_size(res_mem),
+				"pic32_uart_mem"))
+		return -EBUSY;
+
+	port->membase = devm_ioremap_nocache(port->dev, port->mapbase,
+						resource_size(res_mem));
+	if (!port->membase) {
+		dev_err(port->dev, "Unable to map registers\n");
+		release_mem_region(port->mapbase, resource_size(res_mem));
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/* serial core request to release uart iomem */
+static void pic32_uart_release_port(struct uart_port *port)
+{
+	struct platform_device *pdev = to_platform_device(port->dev);
+	struct resource *res_mem;
+	unsigned int res_size;
+
+	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (unlikely(!res_mem))
+		return;
+	res_size = resource_size(res_mem);
+
+	release_mem_region(port->mapbase, res_size);
+}
+
+/* serial core request to do any port required auto-configuration */
+static void pic32_uart_config_port(struct uart_port *port, int flags)
+{
+	if (flags & UART_CONFIG_TYPE) {
+		if (pic32_uart_request_port(port))
+			return;
+		port->type = PORT_PIC32;
+	}
+}
+
+/* serial core request to check that port information in serinfo are suitable */
+static int pic32_uart_verify_port(struct uart_port *port,
+				  struct serial_struct *serinfo)
+{
+	if (port->type != PORT_PIC32)
+		return -EINVAL;
+	if (port->irq != serinfo->irq)
+		return -EINVAL;
+	if (port->iotype != serinfo->io_type)
+		return -EINVAL;
+	if (port->mapbase != (unsigned long)serinfo->iomem_base)
+		return -EINVAL;
+
+	return 0;
+}
+
+/* serial core callbacks */
+static const struct uart_ops pic32_uart_ops = {
+	.tx_empty	= pic32_uart_tx_empty,
+	.get_mctrl	= pic32_uart_get_mctrl,
+	.set_mctrl	= pic32_uart_set_mctrl,
+	.start_tx	= pic32_uart_start_tx,
+	.stop_tx	= pic32_uart_stop_tx,
+	.stop_rx	= pic32_uart_stop_rx,
+	.break_ctl	= pic32_uart_break_ctl,
+	.startup	= pic32_uart_startup,
+	.shutdown	= pic32_uart_shutdown,
+	.set_termios	= pic32_uart_set_termios,
+	.type		= pic32_uart_type,
+	.release_port	= pic32_uart_release_port,
+	.request_port	= pic32_uart_request_port,
+	.config_port	= pic32_uart_config_port,
+	.verify_port	= pic32_uart_verify_port,
+};
+
+#ifdef CONFIG_SERIAL_PIC32_CONSOLE
+/* output given char */
+static void pic32_console_putchar(struct uart_port *port, int ch)
+{
+	struct pic32_sport *sport = to_pic32_sport(port);
+
+	if (!(pic32_uart_readl(sport, PIC32_UART_MODE) & PIC32_UART_MODE_ON))
+		return;
+
+	if (!(pic32_uart_readl(sport, PIC32_UART_STA) & PIC32_UART_STA_UTXEN))
+		return;
+
+	/* wait for tx empty */
+	pic32_wait_deplete_txbuf(sport);
+
+	pic32_uart_writel(sport, PIC32_UART_TX, ch & 0xff);
+}
+
+/* console core request to output given string */
+static void pic32_console_write(struct console *co, const char *s,
+				unsigned int count)
+{
+	struct pic32_sport *sport = pic32_sports[co->index];
+	struct uart_port *port = pic32_get_port(sport);
+
+	/* call uart helper to deal with \r\n */
+	uart_console_write(port, s, count, pic32_console_putchar);
+}
+
+/* console core request to setup given console, find matching uart
+ * port and setup it.
+ */
+static int pic32_console_setup(struct console *co, char *options)
+{
+	struct pic32_sport *sport;
+	struct uart_port *port = NULL;
+	int baud = 115200;
+	int bits = 8;
+	int parity = 'n';
+	int flow = 'n';
+	int ret = 0;
+
+	if (unlikely(co->index < 0 || co->index >= PIC32_MAX_UARTS))
+		return -ENODEV;
+
+	sport = pic32_sports[co->index];
+	if (!sport)
+		return -ENODEV;
+	port = pic32_get_port(sport);
+
+	ret = pic32_enable_clock(sport);
+	if (ret)
+		return ret;
+
+	if (options)
+		uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+	return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver pic32_uart_driver;
+static struct console pic32_console = {
+	.name		= PIC32_SDEV_NAME,
+	.write		= pic32_console_write,
+	.device		= uart_console_device,
+	.setup		= pic32_console_setup,
+	.flags		= CON_PRINTBUFFER,
+	.index		= -1,
+	.data		= &pic32_uart_driver,
+};
+#define PIC32_SCONSOLE (&pic32_console)
+
+static int __init pic32_console_init(void)
+{
+	register_console(&pic32_console);
+	return 0;
+}
+console_initcall(pic32_console_init);
+
+static inline bool is_pic32_console_port(struct uart_port *port)
+{
+	return port->cons && port->cons->index == port->line;
+}
+
+/*
+ * Late console initialization.
+ */
+static int __init pic32_late_console_init(void)
+{
+	if (!(pic32_console.flags & CON_ENABLED))
+		register_console(&pic32_console);
+
+	return 0;
+}
+
+core_initcall(pic32_late_console_init);
+
+#else
+#define PIC32_SCONSOLE NULL
+#endif
+
+static struct uart_driver pic32_uart_driver = {
+	.owner			= THIS_MODULE,
+	.driver_name		= PIC32_DEV_NAME,
+	.dev_name		= PIC32_SDEV_NAME,
+	.nr			= PIC32_MAX_UARTS,
+	.cons			= PIC32_SCONSOLE,
+};
+
+static int pic32_uart_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct pic32_sport *sport;
+	int uart_idx = 0;
+	struct resource *res_mem;
+	struct uart_port *port;
+	int ret;
+
+	uart_idx = of_alias_get_id(np, "serial");
+	if (uart_idx < 0 || uart_idx >= PIC32_MAX_UARTS)
+		return -EINVAL;
+
+	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res_mem)
+		return -EINVAL;
+
+	sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
+	if (!sport)
+		return -ENOMEM;
+
+	sport->idx		= uart_idx;
+	sport->irq_fault	= irq_of_parse_and_map(np, 0);
+	sport->irqflags_fault	= IRQF_NO_THREAD;
+	sport->irq_rx		= irq_of_parse_and_map(np, 1);
+	sport->irqflags_rx	= IRQF_NO_THREAD;
+	sport->irq_tx		= irq_of_parse_and_map(np, 2);
+	sport->irqflags_tx	= IRQF_NO_THREAD;
+	sport->clk		= devm_clk_get(&pdev->dev, NULL);
+	sport->cts_gpio		= -EINVAL;
+	sport->dev		= &pdev->dev;
+
+	/* Hardware flow control: gpios
+	 * !Note: Basically, CTS is needed for reading the status.
+	 */
+	sport->hw_flow_ctrl = false;
+	sport->cts_gpio = of_get_named_gpio(np, "cts-gpios", 0);
+	if (gpio_is_valid(sport->cts_gpio)) {
+		sport->hw_flow_ctrl = true;
+
+		ret = devm_gpio_request(sport->dev,
+					sport->cts_gpio, "CTS");
+		if (ret) {
+			dev_err(&pdev->dev,
+				"error requesting CTS GPIO\n");
+			goto err;
+		}
+
+		ret = gpio_direction_input(sport->cts_gpio);
+		if (ret) {
+			dev_err(&pdev->dev, "error setting CTS GPIO\n");
+			goto err;
+		}
+	}
+
+	pic32_sports[uart_idx] = sport;
+	port = &sport->port;
+	memset(port, 0, sizeof(*port));
+	port->iotype	= UPIO_MEM;
+	port->mapbase	= res_mem->start;
+	port->ops	= &pic32_uart_ops;
+	port->flags	= UPF_BOOT_AUTOCONF;
+	port->dev	= &pdev->dev;
+	port->fifosize	= PIC32_UART_TX_FIFO_DEPTH;
+	port->uartclk	= clk_get_rate(sport->clk);
+	port->line	= uart_idx;
+
+	ret = uart_add_one_port(&pic32_uart_driver, port);
+	if (ret) {
+		port->membase = NULL;
+		dev_err(port->dev, "%s: uart add port error!\n", __func__);
+		goto err;
+	}
+
+#ifdef CONFIG_SERIAL_PIC32_CONSOLE
+	if (is_pic32_console_port(port) &&
+	    (pic32_console.flags & CON_ENABLED)) {
+		/* The peripheral clock has been enabled by console_setup,
+		 * so disable it till the port is used.
+		 */
+		pic32_disable_clock(sport);
+	}
+#endif
+
+	platform_set_drvdata(pdev, port);
+
+	dev_info(&pdev->dev, "%s: uart(%d) driver initialized.\n",
+		 __func__, uart_idx);
+
+	return 0;
+err:
+	/* automatic unroll of sport and gpios */
+	return ret;
+}
+
+static int pic32_uart_remove(struct platform_device *pdev)
+{
+	struct uart_port *port = platform_get_drvdata(pdev);
+	struct pic32_sport *sport = to_pic32_sport(port);
+
+	uart_remove_one_port(&pic32_uart_driver, port);
+	pic32_disable_clock(sport);
+	platform_set_drvdata(pdev, NULL);
+	pic32_sports[sport->idx] = NULL;
+
+	/* automatic unroll of sport and gpios */
+	return 0;
+}
+
+static const struct of_device_id pic32_serial_dt_ids[] = {
+	{ .compatible = "microchip,pic32mzda-uart" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, pic32_serial_dt_ids);
+
+static struct platform_driver pic32_uart_platform_driver = {
+	.probe		= pic32_uart_probe,
+	.remove		= pic32_uart_remove,
+	.driver		= {
+		.name	= PIC32_DEV_NAME,
+		.of_match_table	= of_match_ptr(pic32_serial_dt_ids),
+	},
+};
+
+static int __init pic32_uart_init(void)
+{
+	int ret;
+
+	ret = uart_register_driver(&pic32_uart_driver);
+	if (ret) {
+		pr_err("failed to register %s:%d\n",
+		       pic32_uart_driver.driver_name, ret);
+		return ret;
+	}
+
+	ret = platform_driver_register(&pic32_uart_platform_driver);
+	if (ret) {
+		pr_err("fail to register pic32 uart\n");
+		uart_unregister_driver(&pic32_uart_driver);
+	}
+
+	return ret;
+}
+arch_initcall(pic32_uart_init);
+
+static void __exit pic32_uart_exit(void)
+{
+#ifdef CONFIG_SERIAL_PIC32_CONSOLE
+	unregister_console(&pic32_console);
+#endif
+	platform_driver_unregister(&pic32_uart_platform_driver);
+	uart_unregister_driver(&pic32_uart_driver);
+}
+module_exit(pic32_uart_exit);
+
+MODULE_AUTHOR("Sorin-Andrei Pistirica <andrei.pistirica@microchip.com>");
+MODULE_DESCRIPTION("Microchip PIC32 integrated serial port driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/pic32_uart.h b/drivers/tty/serial/pic32_uart.h
new file mode 100644
index 0000000..ec379da
--- /dev/null
+++ b/drivers/tty/serial/pic32_uart.h
@@ -0,0 +1,126 @@
+/*
+ * PIC32 Integrated Serial Driver.
+ *
+ * Copyright (C) 2015 Microchip Technology, Inc.
+ *
+ * Authors:
+ *   Sorin-Andrei Pistirica <andrei.pistirica@microchip.com>
+ *
+ * Licensed under GPLv2 or later.
+ */
+#ifndef __DT_PIC32_UART_H__
+#define __DT_PIC32_UART_H__
+
+#define PIC32_UART_DFLT_BRATE		(9600)
+#define PIC32_UART_TX_FIFO_DEPTH	(8)
+#define PIC32_UART_RX_FIFO_DEPTH	(8)
+
+#define PIC32_UART_MODE		0x00
+#define PIC32_UART_STA		0x10
+#define PIC32_UART_TX		0x20
+#define PIC32_UART_RX		0x30
+#define PIC32_UART_BRG		0x40
+
+struct pic32_console_opt {
+	int baud;
+	int parity;
+	int bits;
+	int flow;
+};
+
+/* struct pic32_sport - pic32 serial port descriptor
+ * @port: uart port descriptor
+ * @idx: port index
+ * @irq_fault: virtual fault interrupt number
+ * @irqflags_fault: flags related to fault irq
+ * @irq_fault_name: irq fault name
+ * @irq_rx: virtual rx interrupt number
+ * @irqflags_rx: flags related to rx irq
+ * @irq_rx_name: irq rx name
+ * @irq_tx: virtual tx interrupt number
+ * @irqflags_tx: : flags related to tx irq
+ * @irq_tx_name: irq tx name
+ * @cts_gpio: clear to send gpio
+ * @dev: device descriptor
+ **/
+struct pic32_sport {
+	struct uart_port port;
+	struct pic32_console_opt opt;
+	int idx;
+
+	int irq_fault;
+	int irqflags_fault;
+	const char *irq_fault_name;
+	int irq_rx;
+	int irqflags_rx;
+	const char *irq_rx_name;
+	int irq_tx;
+	int irqflags_tx;
+	const char *irq_tx_name;
+	u8 enable_tx_irq;
+
+	bool hw_flow_ctrl;
+	int cts_gpio;
+
+	int ref_clk;
+	struct clk *clk;
+
+	struct device *dev;
+};
+#define to_pic32_sport(c) container_of(c, struct pic32_sport, port)
+#define pic32_get_port(sport) (&sport->port)
+#define pic32_get_opt(sport) (&sport->opt)
+#define tx_irq_enabled(sport) (sport->enable_tx_irq)
+
+static inline void pic32_uart_writel(struct pic32_sport *sport,
+					u32 reg, u32 val)
+{
+	struct uart_port *port = pic32_get_port(sport);
+
+	__raw_writel(val, port->membase + reg);
+}
+
+static inline u32 pic32_uart_readl(struct pic32_sport *sport, u32 reg)
+{
+	struct uart_port *port = pic32_get_port(sport);
+
+	return	__raw_readl(port->membase + reg);
+}
+
+/* pic32 uart mode register bits */
+#define PIC32_UART_MODE_ON        BIT(15)
+#define PIC32_UART_MODE_FRZ       BIT(14)
+#define PIC32_UART_MODE_SIDL      BIT(13)
+#define PIC32_UART_MODE_IREN      BIT(12)
+#define PIC32_UART_MODE_RTSMD     BIT(11)
+#define PIC32_UART_MODE_RESV1     BIT(10)
+#define PIC32_UART_MODE_UEN1      BIT(9)
+#define PIC32_UART_MODE_UEN0      BIT(8)
+#define PIC32_UART_MODE_WAKE      BIT(7)
+#define PIC32_UART_MODE_LPBK      BIT(6)
+#define PIC32_UART_MODE_ABAUD     BIT(5)
+#define PIC32_UART_MODE_RXINV     BIT(4)
+#define PIC32_UART_MODE_BRGH      BIT(3)
+#define PIC32_UART_MODE_PDSEL1    BIT(2)
+#define PIC32_UART_MODE_PDSEL0    BIT(1)
+#define PIC32_UART_MODE_STSEL     BIT(0)
+
+/* pic32 uart status register bits */
+#define PIC32_UART_STA_UTXISEL1   BIT(15)
+#define PIC32_UART_STA_UTXISEL0   BIT(14)
+#define PIC32_UART_STA_UTXINV     BIT(13)
+#define PIC32_UART_STA_URXEN      BIT(12)
+#define PIC32_UART_STA_UTXBRK     BIT(11)
+#define PIC32_UART_STA_UTXEN      BIT(10)
+#define PIC32_UART_STA_UTXBF      BIT(9)
+#define PIC32_UART_STA_TRMT       BIT(8)
+#define PIC32_UART_STA_URXISEL1   BIT(7)
+#define PIC32_UART_STA_URXISEL0   BIT(6)
+#define PIC32_UART_STA_ADDEN      BIT(5)
+#define PIC32_UART_STA_RIDLE      BIT(4)
+#define PIC32_UART_STA_PERR       BIT(3)
+#define PIC32_UART_STA_FERR       BIT(2)
+#define PIC32_UART_STA_OERR       BIT(1)
+#define PIC32_UART_STA_URXDA      BIT(0)
+
+#endif /* __DT_PIC32_UART_H__ */
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 025a426..e639361 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -17,7 +17,7 @@
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
 #include <linux/i2c.h>
 #include <linux/module.h>
 #include <linux/of.h>
@@ -1104,8 +1104,7 @@
 static int sc16is7xx_gpio_get(struct gpio_chip *chip, unsigned offset)
 {
 	unsigned int val;
-	struct sc16is7xx_port *s = container_of(chip, struct sc16is7xx_port,
-						gpio);
+	struct sc16is7xx_port *s = gpiochip_get_data(chip);
 	struct uart_port *port = &s->p[0].port;
 
 	val = sc16is7xx_port_read(port, SC16IS7XX_IOSTATE_REG);
@@ -1115,8 +1114,7 @@
 
 static void sc16is7xx_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
 {
-	struct sc16is7xx_port *s = container_of(chip, struct sc16is7xx_port,
-						gpio);
+	struct sc16is7xx_port *s = gpiochip_get_data(chip);
 	struct uart_port *port = &s->p[0].port;
 
 	sc16is7xx_port_update(port, SC16IS7XX_IOSTATE_REG, BIT(offset),
@@ -1126,8 +1124,7 @@
 static int sc16is7xx_gpio_direction_input(struct gpio_chip *chip,
 					  unsigned offset)
 {
-	struct sc16is7xx_port *s = container_of(chip, struct sc16is7xx_port,
-						gpio);
+	struct sc16is7xx_port *s = gpiochip_get_data(chip);
 	struct uart_port *port = &s->p[0].port;
 
 	sc16is7xx_port_update(port, SC16IS7XX_IODIR_REG, BIT(offset), 0);
@@ -1138,8 +1135,7 @@
 static int sc16is7xx_gpio_direction_output(struct gpio_chip *chip,
 					   unsigned offset, int val)
 {
-	struct sc16is7xx_port *s = container_of(chip, struct sc16is7xx_port,
-						gpio);
+	struct sc16is7xx_port *s = gpiochip_get_data(chip);
 	struct uart_port *port = &s->p[0].port;
 
 	sc16is7xx_port_update(port, SC16IS7XX_IOSTATE_REG, BIT(offset),
@@ -1210,7 +1206,7 @@
 		s->gpio.base		 = -1;
 		s->gpio.ngpio		 = devtype->nr_gpio;
 		s->gpio.can_sleep	 = 1;
-		ret = gpiochip_add(&s->gpio);
+		ret = gpiochip_add_data(&s->gpio, s);
 		if (ret)
 			goto out_thread;
 	}
diff --git a/drivers/tty/serial/serial_mctrl_gpio.h b/drivers/tty/serial/serial_mctrl_gpio.h
index 9716db2..bcfad5d 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.h
+++ b/drivers/tty/serial/serial_mctrl_gpio.h
@@ -62,7 +62,7 @@
 				      enum mctrl_gpio_idx gidx);
 
 /*
- * Request and set direction of modem control lines GPIOs and sets up irq
+ * Request and set direction of modem control line GPIOs and set up irq
  * handling.
  * devm_* functions are used, so there's no need to call mctrl_gpio_free().
  * Returns a pointer to the allocated mctrl structure if ok, -ENOMEM on
@@ -71,7 +71,7 @@
 struct mctrl_gpios *mctrl_gpio_init(struct uart_port *port, unsigned int idx);
 
 /*
- * Request and set direction of modem control lines GPIOs.
+ * Request and set direction of modem control line GPIOs.
  * devm_* functions are used, so there's no need to call mctrl_gpio_free().
  * Returns a pointer to the allocated mctrl structure if ok, -ENOMEM on
  * allocation error.
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index c9fdfc8..d08baa6 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -72,7 +72,7 @@
 	iowrite32be(val, addr);
 }
 
-static const struct uartlite_reg_ops uartlite_be = {
+static struct uartlite_reg_ops uartlite_be = {
 	.in = uartlite_inbe32,
 	.out = uartlite_outbe32,
 };
@@ -87,21 +87,21 @@
 	iowrite32(val, addr);
 }
 
-static const struct uartlite_reg_ops uartlite_le = {
+static struct uartlite_reg_ops uartlite_le = {
 	.in = uartlite_inle32,
 	.out = uartlite_outle32,
 };
 
 static inline u32 uart_in32(u32 offset, struct uart_port *port)
 {
-	const struct uartlite_reg_ops *reg_ops = port->private_data;
+	struct uartlite_reg_ops *reg_ops = port->private_data;
 
 	return reg_ops->in(port->membase + offset);
 }
 
 static inline void uart_out32(u32 val, u32 offset, struct uart_port *port)
 {
-	const struct uartlite_reg_ops *reg_ops = port->private_data;
+	struct uartlite_reg_ops *reg_ops = port->private_data;
 
 	reg_ops->out(val, port->membase + offset);
 }
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index f5476e2..c8c7601 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -7708,7 +7708,7 @@
 	dev_kfree_skb(skb);
 
 	/* save start time for transmit timeout detection */
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 
 	/* start hardware transmitter if necessary */
 	spin_lock_irqsave(&info->irq_spinlock,flags);
@@ -7764,7 +7764,7 @@
 	mgsl_program_hw(info);
 
 	/* enable network layer transmit */
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	netif_start_queue(dev);
 
 	/* inform generic HDLC layer of current DCD status */
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index c0a2f5a..d5b6471 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -1493,7 +1493,7 @@
 	dev->stats.tx_bytes += skb->len;
 
 	/* save start time for transmit timeout detection */
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 
 	spin_lock_irqsave(&info->lock, flags);
 	tx_load(info, skb->data, skb->len);
@@ -1552,7 +1552,7 @@
 	program_hw(info);
 
 	/* enable network layer transmit */
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	netif_start_queue(dev);
 
 	/* inform generic HDLC layer of current DCD status */
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index 90da0c7..3f89685 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -1612,7 +1612,7 @@
 	dev_kfree_skb(skb);
 
 	/* save start time for transmit timeout detection */
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 
 	/* start hardware transmitter if necessary */
 	spin_lock_irqsave(&info->lock,flags);
@@ -1668,7 +1668,7 @@
 	program_hw(info);
 
 	/* enable network layer transmit */
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	netif_start_queue(dev);
 
 	/* inform generic HDLC layer of current DCD status */
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 8d26ed7..24d5491 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1367,12 +1367,12 @@
  *	Locking: tty_mutex must be held. If the tty is found, bump the tty kref.
  */
 static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
-		struct inode *inode, int idx)
+		struct file *file, int idx)
 {
 	struct tty_struct *tty;
 
 	if (driver->ops->lookup)
-		tty = driver->ops->lookup(driver, inode, idx);
+		tty = driver->ops->lookup(driver, file, idx);
 	else
 		tty = driver->ttys[idx];
 
@@ -2040,7 +2040,7 @@
 	}
 
 	/* check whether we're reopening an existing tty */
-	tty = tty_driver_lookup_tty(driver, inode, index);
+	tty = tty_driver_lookup_tty(driver, filp, index);
 	if (IS_ERR(tty)) {
 		mutex_unlock(&tty_mutex);
 		goto out;
@@ -2049,14 +2049,13 @@
 	if (tty) {
 		mutex_unlock(&tty_mutex);
 		retval = tty_lock_interruptible(tty);
+		tty_kref_put(tty);  /* drop kref from tty_driver_lookup_tty() */
 		if (retval) {
 			if (retval == -EINTR)
 				retval = -ERESTARTSYS;
 			tty = ERR_PTR(retval);
 			goto out;
 		}
-		/* safe to drop the kref from tty_driver_lookup_tty() */
-		tty_kref_put(tty);
 		retval = tty_reopen(tty);
 		if (retval < 0) {
 			tty_unlock(tty);
@@ -2158,7 +2157,7 @@
 	read_lock(&tasklist_lock);
 	spin_lock_irq(&current->sighand->siglock);
 	noctty = (filp->f_flags & O_NOCTTY) ||
-			device == MKDEV(TTY_MAJOR, 0) ||
+			(IS_ENABLED(CONFIG_VT) && device == MKDEV(TTY_MAJOR, 0)) ||
 			device == MKDEV(TTYAUX_MAJOR, 1) ||
 			(tty->driver->type == TTY_DRIVER_TYPE_PTY &&
 			 tty->driver->subtype == PTY_TYPE_MASTER);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 83fd30b..a6c4a1b 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -744,11 +744,15 @@
 	int err;
 	unsigned long flags;
 
+	if (!cur) /* nothing to do */
+		return;
+
 	acm->putbuffer = NULL;
 	err = usb_autopm_get_interface_async(acm->control);
 	spin_lock_irqsave(&acm->write_lock, flags);
 	if (err < 0) {
 		cur->use = 0;
+		acm->putbuffer = cur;
 		goto out;
 	}
 
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 5eb1a87..31ccdcc 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -75,8 +75,6 @@
 	 * be the first thing immediately following the endpoint descriptor.
 	 */
 	desc = (struct usb_ss_ep_comp_descriptor *) buffer;
-	buffer += desc->bLength;
-	size -= desc->bLength;
 
 	if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP ||
 			size < USB_DT_SS_EP_COMP_SIZE) {
@@ -100,7 +98,8 @@
 					ep->desc.wMaxPacketSize;
 		return;
 	}
-
+	buffer += desc->bLength;
+	size -= desc->bLength;
 	memcpy(&ep->ss_ep_comp, desc, USB_DT_SS_EP_COMP_SIZE);
 
 	/* Check the various values */
@@ -146,12 +145,6 @@
 		ep->ss_ep_comp.bmAttributes = 2;
 	}
 
-	/* Parse a possible SuperSpeedPlus isoc ep companion descriptor */
-	if (usb_endpoint_xfer_isoc(&ep->desc) &&
-	    USB_SS_SSP_ISOC_COMP(desc->bmAttributes))
-		usb_parse_ssp_isoc_endpoint_companion(ddev, cfgno, inum, asnum,
-							ep, buffer, size);
-
 	if (usb_endpoint_xfer_isoc(&ep->desc))
 		max_tx = (desc->bMaxBurst + 1) *
 			(USB_SS_MULT(desc->bmAttributes)) *
@@ -171,6 +164,11 @@
 				max_tx);
 		ep->ss_ep_comp.wBytesPerInterval = cpu_to_le16(max_tx);
 	}
+	/* Parse a possible SuperSpeedPlus isoc ep companion descriptor */
+	if (usb_endpoint_xfer_isoc(&ep->desc) &&
+	    USB_SS_SSP_ISOC_COMP(desc->bmAttributes))
+		usb_parse_ssp_isoc_endpoint_companion(ddev, cfgno, inum, asnum,
+							ep, buffer, size);
 }
 
 static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index f9d42cf..7859d73 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -73,6 +73,15 @@
 		if (companion->bus != pdev->bus ||
 				PCI_SLOT(companion->devfn) != slot)
 			continue;
+
+		/*
+		 * Companion device should be either UHCI,OHCI or EHCI host
+		 * controller, otherwise skip.
+		 */
+		if (companion->class != CL_UHCI && companion->class != CL_OHCI &&
+				companion->class != CL_EHCI)
+			continue;
+
 		companion_hcd = pci_get_drvdata(companion);
 		if (!companion_hcd || !companion_hcd->self.root_hub)
 			continue;
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 14718a9..460c855 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -249,18 +249,12 @@
 
 	return retval;
 }
-
-static int usb_port_prepare(struct device *dev)
-{
-	return 1;
-}
 #endif
 
 static const struct dev_pm_ops usb_port_pm_ops = {
 #ifdef CONFIG_PM
 	.runtime_suspend =	usb_port_runtime_suspend,
 	.runtime_resume =	usb_port_runtime_resume,
-	.prepare =		usb_port_prepare,
 #endif
 };
 
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index dcb85e3..479187c3 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -312,13 +312,7 @@
 
 static int usb_dev_prepare(struct device *dev)
 {
-	struct usb_device *udev = to_usb_device(dev);
-
-	/* Return 0 if the current wakeup setting is wrong, otherwise 1 */
-	if (udev->do_remote_wakeup != device_may_wakeup(dev))
-		return 0;
-
-	return 1;
+	return 0;		/* Implement eventually? */
 }
 
 static void usb_dev_complete(struct device *dev)
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index e9940dd..818f158 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -2254,6 +2254,7 @@
 {
 	u32 intmsk;
 	u32 val;
+	u32 usbcfg;
 
 	/* Kill any ep0 requests as controller will be reinitialized */
 	kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
@@ -2267,10 +2268,16 @@
 	 * set configuration.
 	 */
 
+	/* keep other bits untouched (so e.g. forced modes are not lost) */
+	usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+	usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
+		GUSBCFG_HNPCAP);
+
 	/* set the PLL on, remove the HNP/SRP and set the PHY */
 	val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
-	dwc2_writel(hsotg->phyif | GUSBCFG_TOUTCAL(7) |
-	       (val << GUSBCFG_USBTRDTIM_SHIFT), hsotg->regs + GUSBCFG);
+	usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
+		(val << GUSBCFG_USBTRDTIM_SHIFT);
+	dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
 
 	dwc2_hsotg_init_fifo(hsotg);
 
@@ -3031,6 +3038,7 @@
 static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
 {
 	u32 trdtim;
+	u32 usbcfg;
 	/* unmask subset of endpoint interrupts */
 
 	dwc2_writel(DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
@@ -3054,11 +3062,16 @@
 
 	dwc2_hsotg_init_fifo(hsotg);
 
+	/* keep other bits untouched (so e.g. forced modes are not lost) */
+	usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+	usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
+		GUSBCFG_HNPCAP);
+
 	/* set the PLL on, remove the HNP/SRP and set the PHY */
 	trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
-	dwc2_writel(hsotg->phyif | GUSBCFG_TOUTCAL(7) |
-		(trdtim << GUSBCFG_USBTRDTIM_SHIFT),
-		hsotg->regs + GUSBCFG);
+	usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
+		(trdtim << GUSBCFG_USBTRDTIM_SHIFT);
+	dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
 
 	if (using_dma(hsotg))
 		__orr32(hsotg->regs + GAHBCFG, GAHBCFG_DMA_EN);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 17fd814..34277ce 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -67,23 +67,9 @@
 static int dwc3_core_soft_reset(struct dwc3 *dwc)
 {
 	u32		reg;
+	int		retries = 1000;
 	int		ret;
 
-	/* Before Resetting PHY, put Core in Reset */
-	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
-	reg |= DWC3_GCTL_CORESOFTRESET;
-	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
-
-	/* Assert USB3 PHY reset */
-	reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
-	reg |= DWC3_GUSB3PIPECTL_PHYSOFTRST;
-	dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
-
-	/* Assert USB2 PHY reset */
-	reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
-	reg |= DWC3_GUSB2PHYCFG_PHYSOFTRST;
-	dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
-
 	usb_phy_init(dwc->usb2_phy);
 	usb_phy_init(dwc->usb3_phy);
 	ret = phy_init(dwc->usb2_generic_phy);
@@ -95,26 +81,28 @@
 		phy_exit(dwc->usb2_generic_phy);
 		return ret;
 	}
-	mdelay(100);
 
-	/* Clear USB3 PHY reset */
-	reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
-	reg &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST;
-	dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+	/*
+	 * We're resetting only the device side because, if we're in host mode,
+	 * XHCI driver will reset the host block. If dwc3 was configured for
+	 * host-only mode, then we can return early.
+	 */
+	if (dwc->dr_mode == USB_DR_MODE_HOST)
+		return 0;
 
-	/* Clear USB2 PHY reset */
-	reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
-	reg &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST;
-	dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+	reg |= DWC3_DCTL_CSFTRST;
+	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
 
-	mdelay(100);
+	do {
+		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+		if (!(reg & DWC3_DCTL_CSFTRST))
+			return 0;
 
-	/* After PHYs are stable we can take Core out of reset state */
-	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
-	reg &= ~DWC3_GCTL_CORESOFTRESET;
-	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+		udelay(1);
+	} while (--retries);
 
-	return 0;
+	return -ETIMEDOUT;
 }
 
 /**
@@ -1162,6 +1150,11 @@
 	phy_exit(dwc->usb2_generic_phy);
 	phy_exit(dwc->usb3_generic_phy);
 
+	usb_phy_set_suspend(dwc->usb2_phy, 1);
+	usb_phy_set_suspend(dwc->usb3_phy, 1);
+	WARN_ON(phy_power_off(dwc->usb2_generic_phy) < 0);
+	WARN_ON(phy_power_off(dwc->usb3_generic_phy) < 0);
+
 	pinctrl_pm_select_sleep_state(dev);
 
 	return 0;
@@ -1175,11 +1168,21 @@
 
 	pinctrl_pm_select_default_state(dev);
 
+	usb_phy_set_suspend(dwc->usb2_phy, 0);
+	usb_phy_set_suspend(dwc->usb3_phy, 0);
+	ret = phy_power_on(dwc->usb2_generic_phy);
+	if (ret < 0)
+		return ret;
+
+	ret = phy_power_on(dwc->usb3_generic_phy);
+	if (ret < 0)
+		goto err_usb2phy_power;
+
 	usb_phy_init(dwc->usb3_phy);
 	usb_phy_init(dwc->usb2_phy);
 	ret = phy_init(dwc->usb2_generic_phy);
 	if (ret < 0)
-		return ret;
+		goto err_usb3phy_power;
 
 	ret = phy_init(dwc->usb3_generic_phy);
 	if (ret < 0)
@@ -1212,6 +1215,12 @@
 err_usb2phy_init:
 	phy_exit(dwc->usb2_generic_phy);
 
+err_usb3phy_power:
+	phy_power_off(dwc->usb3_generic_phy);
+
+err_usb2phy_power:
+	phy_power_off(dwc->usb2_generic_phy);
+
 	return ret;
 }
 
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 9ac37fe..cebf9e3 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -645,7 +645,7 @@
 	file = debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset);
 	if (!file) {
 		ret = -ENOMEM;
-		goto err1;
+		goto err2;
 	}
 
 	if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) {
@@ -653,7 +653,7 @@
 				dwc, &dwc3_mode_fops);
 		if (!file) {
 			ret = -ENOMEM;
-			goto err1;
+			goto err2;
 		}
 	}
 
@@ -663,19 +663,22 @@
 				dwc, &dwc3_testmode_fops);
 		if (!file) {
 			ret = -ENOMEM;
-			goto err1;
+			goto err2;
 		}
 
 		file = debugfs_create_file("link_state", S_IRUGO | S_IWUSR, root,
 				dwc, &dwc3_link_state_fops);
 		if (!file) {
 			ret = -ENOMEM;
-			goto err1;
+			goto err2;
 		}
 	}
 
 	return 0;
 
+err2:
+	kfree(dwc->regset);
+
 err1:
 	debugfs_remove_recursive(root);
 
@@ -686,5 +689,5 @@
 void dwc3_debugfs_exit(struct dwc3 *dwc)
 {
 	debugfs_remove_recursive(dwc->root);
-	dwc->root = NULL;
+	kfree(dwc->regset);
 }
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index 2be268d..7266470 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -39,8 +39,6 @@
 #define USBSS_IRQ_COREIRQ_EN	BIT(0)
 #define USBSS_IRQ_COREIRQ_CLR	BIT(0)
 
-static u64 kdwc3_dma_mask;
-
 struct dwc3_keystone {
 	struct device			*dev;
 	struct clk			*clk;
@@ -108,9 +106,6 @@
 	if (IS_ERR(kdwc->usbss))
 		return PTR_ERR(kdwc->usbss);
 
-	kdwc3_dma_mask = dma_get_mask(dev);
-	dev->dma_mask = &kdwc3_dma_mask;
-
 	kdwc->clk = devm_clk_get(kdwc->dev, "usb");
 
 	error = clk_prepare_enable(kdwc->clk);
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 22e9606..55da2c7 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -496,7 +496,7 @@
 	ret = pm_runtime_get_sync(dev);
 	if (ret < 0) {
 		dev_err(dev, "get_sync failed with err %d\n", ret);
-		goto err0;
+		goto err1;
 	}
 
 	dwc3_omap_map_offset(omap);
@@ -516,28 +516,24 @@
 
 	ret = dwc3_omap_extcon_register(omap);
 	if (ret < 0)
-		goto err2;
+		goto err1;
 
 	ret = of_platform_populate(node, NULL, NULL, dev);
 	if (ret) {
 		dev_err(&pdev->dev, "failed to create dwc3 core\n");
-		goto err3;
+		goto err2;
 	}
 
 	dwc3_omap_enable_irqs(omap);
 
 	return 0;
 
-err3:
+err2:
 	extcon_unregister_notifier(omap->edev, EXTCON_USB, &omap->vbus_nb);
 	extcon_unregister_notifier(omap->edev, EXTCON_USB_HOST, &omap->id_nb);
-err2:
-	dwc3_omap_disable_irqs(omap);
 
 err1:
 	pm_runtime_put_sync(dev);
-
-err0:
 	pm_runtime_disable(dev);
 
 	return ret;
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 009d830..adc1e8a 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -35,6 +35,7 @@
 #define PCI_DEVICE_ID_INTEL_SPTLP		0x9d30
 #define PCI_DEVICE_ID_INTEL_SPTH		0xa130
 #define PCI_DEVICE_ID_INTEL_BXT			0x0aaa
+#define PCI_DEVICE_ID_INTEL_BXT_M		0x1aaa
 #define PCI_DEVICE_ID_INTEL_APL			0x5aaa
 
 static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
@@ -213,6 +214,7 @@
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
 	{  }	/* Terminating Entry */
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 3ac170f..8e4a1b1 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -568,7 +568,7 @@
 		dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
 
 		if (!usb_endpoint_xfer_isoc(desc))
-			return 0;
+			goto out;
 
 		/* Link TRB for ISOC. The HWO bit is never reset */
 		trb_st_hw = &dep->trb_pool[0];
@@ -582,9 +582,10 @@
 		trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
 	}
 
+out:
 	switch (usb_endpoint_type(desc)) {
 	case USB_ENDPOINT_XFER_CONTROL:
-		strlcat(dep->name, "-control", sizeof(dep->name));
+		/* don't change name */
 		break;
 	case USB_ENDPOINT_XFER_ISOC:
 		strlcat(dep->name, "-isoc", sizeof(dep->name));
@@ -2487,7 +2488,11 @@
 	 * implemented.
 	 */
 
-	dwc->gadget_driver->resume(&dwc->gadget);
+	if (dwc->gadget_driver && dwc->gadget_driver->resume) {
+		spin_unlock(&dwc->lock);
+		dwc->gadget_driver->resume(&dwc->gadget);
+		spin_lock(&dwc->lock);
+	}
 }
 
 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
@@ -2931,6 +2936,9 @@
 
 int dwc3_gadget_suspend(struct dwc3 *dwc)
 {
+	if (!dwc->gadget_driver)
+		return 0;
+
 	if (dwc->pullups_connected) {
 		dwc3_gadget_disable_irq(dwc);
 		dwc3_gadget_run_stop(dwc, true, true);
@@ -2949,6 +2957,9 @@
 	struct dwc3_ep		*dep;
 	int			ret;
 
+	if (!dwc->gadget_driver)
+		return 0;
+
 	/* Start with SuperSpeed Default */
 	dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
 
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index a5c6209..524e233 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -651,12 +651,15 @@
 		ssp_cap->bLength = USB_DT_USB_SSP_CAP_SIZE(1);
 		ssp_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
 		ssp_cap->bDevCapabilityType = USB_SSP_CAP_TYPE;
+		ssp_cap->bReserved = 0;
+		ssp_cap->wReserved = 0;
 
 		/* SSAC = 1 (2 attributes) */
 		ssp_cap->bmAttributes = cpu_to_le32(1);
 
 		/* Min RX/TX Lane Count = 1 */
-		ssp_cap->wFunctionalitySupport = (1 << 8) | (1 << 12);
+		ssp_cap->wFunctionalitySupport =
+			cpu_to_le16((1 << 8) | (1 << 12));
 
 		/*
 		 * bmSublinkSpeedAttr[0]:
@@ -666,7 +669,7 @@
 		 *   LSM = 10 (10 Gbps)
 		 */
 		ssp_cap->bmSublinkSpeedAttr[0] =
-			(3 << 4) | (1 << 14) | (0xa << 16);
+			cpu_to_le32((3 << 4) | (1 << 14) | (0xa << 16));
 		/*
 		 * bmSublinkSpeedAttr[1] =
 		 *   ST  = Symmetric, TX
@@ -675,7 +678,8 @@
 		 *   LSM = 10 (10 Gbps)
 		 */
 		ssp_cap->bmSublinkSpeedAttr[1] =
-			(3 << 4) | (1 << 14) | (0xa << 16) | (1 << 7);
+			cpu_to_le32((3 << 4) | (1 << 14) |
+				    (0xa << 16) | (1 << 7));
 	}
 
 	return le16_to_cpu(bos->wTotalLength);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 8cfce10..15b648c 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -646,6 +646,7 @@
 						   work);
 	int ret = io_data->req->status ? io_data->req->status :
 					 io_data->req->actual;
+	bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
 
 	if (io_data->read && ret > 0) {
 		use_mm(io_data->mm);
@@ -657,13 +658,11 @@
 
 	io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
 
-	if (io_data->ffs->ffs_eventfd &&
-	    !(io_data->kiocb->ki_flags & IOCB_EVENTFD))
+	if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
 		eventfd_signal(io_data->ffs->ffs_eventfd, 1);
 
 	usb_ep_free_request(io_data->ep, io_data->req);
 
-	io_data->kiocb->private = NULL;
 	if (io_data->read)
 		kfree(io_data->to_free);
 	kfree(io_data->buf);
@@ -1147,8 +1146,8 @@
 	ffs->sb              = sb;
 	data->ffs_data       = NULL;
 	sb->s_fs_info        = ffs;
-	sb->s_blocksize      = PAGE_CACHE_SIZE;
-	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	sb->s_blocksize      = PAGE_SIZE;
+	sb->s_blocksize_bits = PAGE_SHIFT;
 	sb->s_magic          = FUNCTIONFS_MAGIC;
 	sb->s_op             = &ffs_sb_operations;
 	sb->s_time_gran      = 1;
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 84c0ee5..58fc199 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -24,6 +24,7 @@
 #include <linux/slab.h>
 #include <linux/device.h>
 #include <linux/kfifo.h>
+#include <linux/spinlock.h>
 
 #include <sound/core.h>
 #include <sound/initval.h>
@@ -89,6 +90,7 @@
 	unsigned int buflen, qlen;
 	/* This fifo is used as a buffer ring for pre-allocated IN usb_requests */
 	DECLARE_KFIFO_PTR(in_req_fifo, struct usb_request *);
+	spinlock_t transmit_lock;
 	unsigned int in_last_port;
 
 	struct gmidi_in_port	in_ports_array[/* in_ports */];
@@ -358,7 +360,9 @@
 	/* allocate a bunch of read buffers and queue them all at once. */
 	for (i = 0; i < midi->qlen && err == 0; i++) {
 		struct usb_request *req =
-			midi_alloc_ep_req(midi->out_ep, midi->buflen);
+			midi_alloc_ep_req(midi->out_ep,
+				max_t(unsigned, midi->buflen,
+					bulk_out_desc.wMaxPacketSize));
 		if (req == NULL)
 			return -ENOMEM;
 
@@ -597,17 +601,24 @@
 {
 	struct usb_ep *ep = midi->in_ep;
 	int ret;
+	unsigned long flags;
 
 	/* We only care about USB requests if IN endpoint is enabled */
 	if (!ep || !ep->enabled)
 		goto drop_out;
 
+	spin_lock_irqsave(&midi->transmit_lock, flags);
+
 	do {
 		ret = f_midi_do_transmit(midi, ep);
-		if (ret < 0)
+		if (ret < 0) {
+			spin_unlock_irqrestore(&midi->transmit_lock, flags);
 			goto drop_out;
+		}
 	} while (ret);
 
+	spin_unlock_irqrestore(&midi->transmit_lock, flags);
+
 	return;
 
 drop_out:
@@ -1201,6 +1212,8 @@
 	if (status)
 		goto setup_fail;
 
+	spin_lock_init(&midi->transmit_lock);
+
 	++opts->refcnt;
 	mutex_unlock(&opts->lock);
 
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 637809e..a3f7e7c 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -597,7 +597,7 @@
 		DBG(dev, "tx queue err %d\n", retval);
 		break;
 	case 0:
-		net->trans_start = jiffies;
+		netif_trans_update(net);
 		atomic_inc(&dev->tx_qlen);
 	}
 
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 5cdaf01..e64479f 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1954,8 +1954,8 @@
 		return -ENODEV;
 
 	/* superblock */
-	sb->s_blocksize = PAGE_CACHE_SIZE;
-	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	sb->s_blocksize = PAGE_SIZE;
+	sb->s_blocksize_bits = PAGE_SHIFT;
 	sb->s_magic = GADGETFS_MAGIC;
 	sb->s_op = &gadget_fs_operations;
 	sb->s_time_gran = 1;
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 81d42cc..18569de 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1045,20 +1045,6 @@
 		list_del_init(&req->queue);
 		request_complete(ep, req, -ECONNRESET);
 	}
-
-	/* NOTE:  normally, the next call to the gadget driver is in
-	 * charge of disabling endpoints... usually disconnect().
-	 * The exception would be entering a high speed test mode.
-	 *
-	 * FIXME remove this code ... and retest thoroughly.
-	 */
-	list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
-		if (ep->ep.desc) {
-			spin_unlock(&udc->lock);
-			usba_ep_disable(&ep->ep);
-			spin_lock(&udc->lock);
-		}
-	}
 }
 
 static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex)
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index 4151597..e4e70e1 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -371,12 +371,6 @@
 	INIT_WORK(&gadget->work, usb_gadget_state_work);
 	gadget->dev.parent = parent;
 
-#ifdef	CONFIG_HAS_DMA
-	dma_set_coherent_mask(&gadget->dev, parent->coherent_dma_mask);
-	gadget->dev.dma_parms = parent->dma_parms;
-	gadget->dev.dma_mask = parent->dma_mask;
-#endif
-
 	if (release)
 		gadget->dev.release = release;
 	else
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 3050b18b..6acac62 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -69,6 +69,15 @@
 	  Say 'Y' to enable the support for the xHCI host controller
 	  found in Renesas R-Car ARM SoCs.
 
+config USB_XHCI_TEGRA
+	tristate "xHCI support for NVIDIA Tegra SoCs"
+	depends on PHY_TEGRA_XUSB
+	depends on RESET_CONTROLLER
+	select FW_LOADER
+	---help---
+	  Say 'Y' to enable the support for the xHCI host controller
+	  found in NVIDIA Tegra124 and later SoCs.
+
 endif # USB_XHCI_HCD
 
 config USB_EHCI_HCD
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index a9ddd3c..6ef785b 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -68,6 +68,7 @@
 obj-$(CONFIG_USB_XHCI_PCI)	+= xhci-pci.o
 obj-$(CONFIG_USB_XHCI_PLATFORM) += xhci-plat-hcd.o
 obj-$(CONFIG_USB_XHCI_MTK)	+= xhci-mtk.o
+obj-$(CONFIG_USB_XHCI_TEGRA)	+= xhci-tegra.o
 obj-$(CONFIG_USB_SL811_HCD)	+= sl811-hcd.o
 obj-$(CONFIG_USB_SL811_CS)	+= sl811_cs.o
 obj-$(CONFIG_USB_U132_HCD)	+= u132-hcd.o
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 04dcedf..0449235 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1245,11 +1245,6 @@
 #define TMIO_OHCI_DRIVER	ohci_hcd_tmio_driver
 #endif
 
-#ifdef CONFIG_MACH_JZ4740
-#include "ohci-jz4740.c"
-#define PLATFORM_DRIVER	ohci_hcd_jz4740_driver
-#endif
-
 #ifdef CONFIG_TILE_USB
 #include "ohci-tilegx.c"
 #define PLATFORM_DRIVER		ohci_hcd_tilegx_driver
diff --git a/drivers/usb/host/ohci-jz4740.c b/drivers/usb/host/ohci-jz4740.c
deleted file mode 100644
index 4db78f1..0000000
--- a/drivers/usb/host/ohci-jz4740.c
+++ /dev/null
@@ -1,245 +0,0 @@
-/*
- *  Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under  the terms of the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the License, or (at your
- *  option) any later version.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/regulator/consumer.h>
-
-struct jz4740_ohci_hcd {
-	struct ohci_hcd ohci_hcd;
-
-	struct regulator *vbus;
-	bool vbus_enabled;
-	struct clk *clk;
-};
-
-static inline struct jz4740_ohci_hcd *hcd_to_jz4740_hcd(struct usb_hcd *hcd)
-{
-	return (struct jz4740_ohci_hcd *)(hcd->hcd_priv);
-}
-
-static inline struct usb_hcd *jz4740_hcd_to_hcd(struct jz4740_ohci_hcd *jz4740_ohci)
-{
-	return container_of((void *)jz4740_ohci, struct usb_hcd, hcd_priv);
-}
-
-static int ohci_jz4740_start(struct usb_hcd *hcd)
-{
-	struct ohci_hcd *ohci = hcd_to_ohci(hcd);
-	int	ret;
-
-	ret = ohci_init(ohci);
-	if (ret < 0)
-		return ret;
-
-	ohci->num_ports = 1;
-
-	ret = ohci_run(ohci);
-	if (ret < 0) {
-		dev_err(hcd->self.controller, "Can not start %s",
-			hcd->self.bus_name);
-		ohci_stop(hcd);
-		return ret;
-	}
-	return 0;
-}
-
-static int ohci_jz4740_set_vbus_power(struct jz4740_ohci_hcd *jz4740_ohci,
-	bool enabled)
-{
-	int ret = 0;
-
-	if (!jz4740_ohci->vbus)
-		return 0;
-
-	if (enabled && !jz4740_ohci->vbus_enabled) {
-		ret = regulator_enable(jz4740_ohci->vbus);
-		if (ret)
-			dev_err(jz4740_hcd_to_hcd(jz4740_ohci)->self.controller,
-				"Could not power vbus\n");
-	} else if (!enabled && jz4740_ohci->vbus_enabled) {
-		ret = regulator_disable(jz4740_ohci->vbus);
-	}
-
-	if (ret == 0)
-		jz4740_ohci->vbus_enabled = enabled;
-
-	return ret;
-}
-
-static int ohci_jz4740_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
-	u16 wIndex, char *buf, u16 wLength)
-{
-	struct jz4740_ohci_hcd *jz4740_ohci = hcd_to_jz4740_hcd(hcd);
-	int ret = 0;
-
-	switch (typeReq) {
-	case SetPortFeature:
-		if (wValue == USB_PORT_FEAT_POWER)
-			ret = ohci_jz4740_set_vbus_power(jz4740_ohci, true);
-		break;
-	case ClearPortFeature:
-		if (wValue == USB_PORT_FEAT_POWER)
-			ret = ohci_jz4740_set_vbus_power(jz4740_ohci, false);
-		break;
-	}
-
-	if (ret)
-		return ret;
-
-	return ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
-}
-
-
-static const struct hc_driver ohci_jz4740_hc_driver = {
-	.description =		hcd_name,
-	.product_desc =		"JZ4740 OHCI",
-	.hcd_priv_size =	sizeof(struct jz4740_ohci_hcd),
-
-	/*
-	 * generic hardware linkage
-	 */
-	.irq =			ohci_irq,
-	.flags =		HCD_USB11 | HCD_MEMORY,
-
-	/*
-	 * basic lifecycle operations
-	 */
-	.start =		ohci_jz4740_start,
-	.stop =			ohci_stop,
-	.shutdown =		ohci_shutdown,
-
-	/*
-	 * managing i/o requests and associated device resources
-	 */
-	.urb_enqueue =		ohci_urb_enqueue,
-	.urb_dequeue =		ohci_urb_dequeue,
-	.endpoint_disable =	ohci_endpoint_disable,
-
-	/*
-	 * scheduling support
-	 */
-	.get_frame_number =	ohci_get_frame,
-
-	/*
-	 * root hub support
-	 */
-	.hub_status_data =	ohci_hub_status_data,
-	.hub_control =		ohci_jz4740_hub_control,
-#ifdef	CONFIG_PM
-	.bus_suspend =		ohci_bus_suspend,
-	.bus_resume =		ohci_bus_resume,
-#endif
-	.start_port_reset =	ohci_start_port_reset,
-};
-
-
-static int jz4740_ohci_probe(struct platform_device *pdev)
-{
-	int ret;
-	struct usb_hcd *hcd;
-	struct jz4740_ohci_hcd *jz4740_ohci;
-	struct resource *res;
-	int irq;
-
-	irq = platform_get_irq(pdev, 0);
-	if (irq < 0) {
-		dev_err(&pdev->dev, "Failed to get platform irq\n");
-		return irq;
-	}
-
-	hcd = usb_create_hcd(&ohci_jz4740_hc_driver, &pdev->dev, "jz4740");
-	if (!hcd) {
-		dev_err(&pdev->dev, "Failed to create hcd.\n");
-		return -ENOMEM;
-	}
-
-	jz4740_ohci = hcd_to_jz4740_hcd(hcd);
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	hcd->regs = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(hcd->regs)) {
-		ret = PTR_ERR(hcd->regs);
-		goto err_free;
-	}
-	hcd->rsrc_start = res->start;
-	hcd->rsrc_len = resource_size(res);
-
-	jz4740_ohci->clk = devm_clk_get(&pdev->dev, "uhc");
-	if (IS_ERR(jz4740_ohci->clk)) {
-		ret = PTR_ERR(jz4740_ohci->clk);
-		dev_err(&pdev->dev, "Failed to get clock: %d\n", ret);
-		goto err_free;
-	}
-
-	jz4740_ohci->vbus = devm_regulator_get(&pdev->dev, "vbus");
-	if (IS_ERR(jz4740_ohci->vbus))
-		jz4740_ohci->vbus = NULL;
-
-
-	clk_set_rate(jz4740_ohci->clk, 48000000);
-	clk_enable(jz4740_ohci->clk);
-	if (jz4740_ohci->vbus)
-		ohci_jz4740_set_vbus_power(jz4740_ohci, true);
-
-	platform_set_drvdata(pdev, hcd);
-
-	ohci_hcd_init(hcd_to_ohci(hcd));
-
-	ret = usb_add_hcd(hcd, irq, 0);
-	if (ret) {
-		dev_err(&pdev->dev, "Failed to add hcd: %d\n", ret);
-		goto err_disable;
-	}
-	device_wakeup_enable(hcd->self.controller);
-
-	return 0;
-
-err_disable:
-	if (jz4740_ohci->vbus)
-		regulator_disable(jz4740_ohci->vbus);
-	clk_disable(jz4740_ohci->clk);
-
-err_free:
-	usb_put_hcd(hcd);
-
-	return ret;
-}
-
-static int jz4740_ohci_remove(struct platform_device *pdev)
-{
-	struct usb_hcd *hcd = platform_get_drvdata(pdev);
-	struct jz4740_ohci_hcd *jz4740_ohci = hcd_to_jz4740_hcd(hcd);
-
-	usb_remove_hcd(hcd);
-
-	if (jz4740_ohci->vbus)
-		regulator_disable(jz4740_ohci->vbus);
-
-	clk_disable(jz4740_ohci->clk);
-
-	usb_put_hcd(hcd);
-
-	return 0;
-}
-
-static struct platform_driver ohci_hcd_jz4740_driver = {
-	.probe = jz4740_ohci_probe,
-	.remove = jz4740_ohci_remove,
-	.driver = {
-		.name = "jz4740-ohci",
-	},
-};
-
-MODULE_ALIAS("platform:jz4740-ohci");
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 80c1de2..bad0d1f 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1861,6 +1861,12 @@
 	kfree(xhci->rh_bw);
 	kfree(xhci->ext_caps);
 
+	xhci->usb2_ports = NULL;
+	xhci->usb3_ports = NULL;
+	xhci->port_array = NULL;
+	xhci->rh_bw = NULL;
+	xhci->ext_caps = NULL;
+
 	xhci->page_size = 0;
 	xhci->page_shift = 0;
 	xhci->bus_state[0].bus_suspended = 0;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index f0640b7..48672fa 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -48,6 +48,7 @@
 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI		0xa12f
 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI	0x9d2f
 #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI		0x0aa8
+#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI		0x1aa8
 
 static const char hcd_name[] = "xhci_hcd";
 
@@ -155,7 +156,8 @@
 		(pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
 		 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
 		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
-		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
+		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
+		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) {
 		xhci->quirks |= XHCI_PME_STUCK_QUIRK;
 	}
 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
@@ -302,6 +304,7 @@
 	struct xhci_hcd *xhci;
 
 	xhci = hcd_to_xhci(pci_get_drvdata(dev));
+	xhci->xhc_state |= XHCI_STATE_REMOVING;
 	if (xhci->shared_hcd) {
 		usb_remove_hcd(xhci->shared_hcd);
 		usb_put_hcd(xhci->shared_hcd);
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 5c15e9b..474b5fa 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -39,12 +39,25 @@
 
 static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
 {
+	struct usb_hcd *hcd = xhci_to_hcd(xhci);
+
 	/*
 	 * As of now platform drivers don't provide MSI support so we ensure
 	 * here that the generic code does not try to make a pci_dev from our
 	 * dev struct in order to setup MSI
 	 */
 	xhci->quirks |= XHCI_PLAT;
+
+	/*
+	 * On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set
+	 * to 1. However, these SoCs don't support 64-bit address memory
+	 * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
+	 * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
+	 * xhci_gen_setup().
+	 */
+	if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2) ||
+	    xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3))
+		xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
 }
 
 /* called during probe() after chip reset completes */
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
index 5a2e2e3..529c3c4 100644
--- a/drivers/usb/host/xhci-plat.h
+++ b/drivers/usb/host/xhci-plat.h
@@ -14,7 +14,7 @@
 #include "xhci.h"	/* for hcd_to_xhci() */
 
 enum xhci_plat_type {
-	XHCI_PLAT_TYPE_MARVELL_ARMADA,
+	XHCI_PLAT_TYPE_MARVELL_ARMADA = 1,
 	XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2,
 	XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3,
 };
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 7cf6621..99b4ff4 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -4004,7 +4004,8 @@
 	int reserved_trbs = xhci->cmd_ring_reserved_trbs;
 	int ret;
 
-	if (xhci->xhc_state) {
+	if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+		(xhci->xhc_state & XHCI_STATE_HALTED)) {
 		xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
 		return -ESHUTDOWN;
 	}
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
new file mode 100644
index 0000000..0f53ae0
--- /dev/null
+++ b/drivers/usb/host/xhci-tegra.c
@@ -0,0 +1,1331 @@
+/*
+ * NVIDIA Tegra xHCI host controller driver
+ *
+ * Copyright (C) 2014 NVIDIA Corporation
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/tegra/xusb.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "xhci.h"
+
+#define TEGRA_XHCI_SS_HIGH_SPEED 120000000
+#define TEGRA_XHCI_SS_LOW_SPEED   12000000
+
+/* FPCI CFG registers */
+#define XUSB_CFG_1				0x004
+#define  XUSB_IO_SPACE_EN			BIT(0)
+#define  XUSB_MEM_SPACE_EN			BIT(1)
+#define  XUSB_BUS_MASTER_EN			BIT(2)
+#define XUSB_CFG_4				0x010
+#define  XUSB_BASE_ADDR_SHIFT			15
+#define  XUSB_BASE_ADDR_MASK			0x1ffff
+#define XUSB_CFG_ARU_C11_CSBRANGE		0x41c
+#define XUSB_CFG_CSB_BASE_ADDR			0x800
+
+/* FPCI mailbox registers */
+#define XUSB_CFG_ARU_MBOX_CMD			0x0e4
+#define  MBOX_DEST_FALC				BIT(27)
+#define  MBOX_DEST_PME				BIT(28)
+#define  MBOX_DEST_SMI				BIT(29)
+#define  MBOX_DEST_XHCI				BIT(30)
+#define  MBOX_INT_EN				BIT(31)
+#define XUSB_CFG_ARU_MBOX_DATA_IN		0x0e8
+#define  CMD_DATA_SHIFT				0
+#define  CMD_DATA_MASK				0xffffff
+#define  CMD_TYPE_SHIFT				24
+#define  CMD_TYPE_MASK				0xff
+#define XUSB_CFG_ARU_MBOX_DATA_OUT		0x0ec
+#define XUSB_CFG_ARU_MBOX_OWNER			0x0f0
+#define  MBOX_OWNER_NONE			0
+#define  MBOX_OWNER_FW				1
+#define  MBOX_OWNER_SW				2
+#define XUSB_CFG_ARU_SMI_INTR			0x428
+#define  MBOX_SMI_INTR_FW_HANG			BIT(1)
+#define  MBOX_SMI_INTR_EN			BIT(3)
+
+/* IPFS registers */
+#define IPFS_XUSB_HOST_CONFIGURATION_0		0x180
+#define  IPFS_EN_FPCI				BIT(0)
+#define IPFS_XUSB_HOST_INTR_MASK_0		0x188
+#define  IPFS_IP_INT_MASK			BIT(16)
+#define IPFS_XUSB_HOST_CLKGATE_HYSTERESIS_0	0x1bc
+
+#define CSB_PAGE_SELECT_MASK			0x7fffff
+#define CSB_PAGE_SELECT_SHIFT			9
+#define CSB_PAGE_OFFSET_MASK			0x1ff
+#define CSB_PAGE_SELECT(addr)	((addr) >> (CSB_PAGE_SELECT_SHIFT) &	\
+				 CSB_PAGE_SELECT_MASK)
+#define CSB_PAGE_OFFSET(addr)	((addr) & CSB_PAGE_OFFSET_MASK)
+
+/* Falcon CSB registers */
+#define XUSB_FALC_CPUCTL			0x100
+#define  CPUCTL_STARTCPU			BIT(1)
+#define  CPUCTL_STATE_HALTED			BIT(4)
+#define  CPUCTL_STATE_STOPPED			BIT(5)
+#define XUSB_FALC_BOOTVEC			0x104
+#define XUSB_FALC_DMACTL			0x10c
+#define XUSB_FALC_IMFILLRNG1			0x154
+#define  IMFILLRNG1_TAG_MASK			0xffff
+#define  IMFILLRNG1_TAG_LO_SHIFT		0
+#define  IMFILLRNG1_TAG_HI_SHIFT		16
+#define XUSB_FALC_IMFILLCTL			0x158
+
+/* MP CSB registers */
+#define XUSB_CSB_MP_ILOAD_ATTR			0x101a00
+#define XUSB_CSB_MP_ILOAD_BASE_LO		0x101a04
+#define XUSB_CSB_MP_ILOAD_BASE_HI		0x101a08
+#define XUSB_CSB_MP_L2IMEMOP_SIZE		0x101a10
+#define  L2IMEMOP_SIZE_SRC_OFFSET_SHIFT		8
+#define  L2IMEMOP_SIZE_SRC_OFFSET_MASK		0x3ff
+#define  L2IMEMOP_SIZE_SRC_COUNT_SHIFT		24
+#define  L2IMEMOP_SIZE_SRC_COUNT_MASK		0xff
+#define XUSB_CSB_MP_L2IMEMOP_TRIG		0x101a14
+#define  L2IMEMOP_ACTION_SHIFT			24
+#define  L2IMEMOP_INVALIDATE_ALL		(0x40 << L2IMEMOP_ACTION_SHIFT)
+#define  L2IMEMOP_LOAD_LOCKED_RESULT		(0x11 << L2IMEMOP_ACTION_SHIFT)
+#define XUSB_CSB_MP_APMAP			0x10181c
+#define  APMAP_BOOTPATH				BIT(31)
+
+#define IMEM_BLOCK_SIZE				256
+
+struct tegra_xusb_fw_header {
+	u32 boot_loadaddr_in_imem;
+	u32 boot_codedfi_offset;
+	u32 boot_codetag;
+	u32 boot_codesize;
+	u32 phys_memaddr;
+	u16 reqphys_memsize;
+	u16 alloc_phys_memsize;
+	u32 rodata_img_offset;
+	u32 rodata_section_start;
+	u32 rodata_section_end;
+	u32 main_fnaddr;
+	u32 fwimg_cksum;
+	u32 fwimg_created_time;
+	u32 imem_resident_start;
+	u32 imem_resident_end;
+	u32 idirect_start;
+	u32 idirect_end;
+	u32 l2_imem_start;
+	u32 l2_imem_end;
+	u32 version_id;
+	u8 init_ddirect;
+	u8 reserved[3];
+	u32 phys_addr_log_buffer;
+	u32 total_log_entries;
+	u32 dequeue_ptr;
+	u32 dummy_var[2];
+	u32 fwimg_len;
+	u8 magic[8];
+	u32 ss_low_power_entry_timeout;
+	u8 num_hsic_port;
+	u8 padding[139]; /* Pad to 256 bytes */
+};
+
+struct tegra_xusb_phy_type {
+	const char *name;
+	unsigned int num;
+};
+
+struct tegra_xusb_soc {
+	const char *firmware;
+	const char * const *supply_names;
+	unsigned int num_supplies;
+	const struct tegra_xusb_phy_type *phy_types;
+	unsigned int num_types;
+
+	struct {
+		struct {
+			unsigned int offset;
+			unsigned int count;
+		} usb2, ulpi, hsic, usb3;
+	} ports;
+
+	bool scale_ss_clock;
+};
+
+struct tegra_xusb {
+	struct device *dev;
+	void __iomem *regs;
+	struct usb_hcd *hcd;
+
+	struct mutex lock;
+
+	int xhci_irq;
+	int mbox_irq;
+
+	void __iomem *ipfs_base;
+	void __iomem *fpci_base;
+
+	const struct tegra_xusb_soc *soc;
+
+	struct regulator_bulk_data *supplies;
+
+	struct tegra_xusb_padctl *padctl;
+
+	struct clk *host_clk;
+	struct clk *falcon_clk;
+	struct clk *ss_clk;
+	struct clk *ss_src_clk;
+	struct clk *hs_src_clk;
+	struct clk *fs_src_clk;
+	struct clk *pll_u_480m;
+	struct clk *clk_m;
+	struct clk *pll_e;
+
+	struct reset_control *host_rst;
+	struct reset_control *ss_rst;
+
+	struct phy **phys;
+	unsigned int num_phys;
+
+	/* Firmware loading related */
+	struct {
+		size_t size;
+		void *virt;
+		dma_addr_t phys;
+	} fw;
+};
+
+static struct hc_driver __read_mostly tegra_xhci_hc_driver;
+
+static inline u32 fpci_readl(struct tegra_xusb *tegra, unsigned int offset)
+{
+	return readl(tegra->fpci_base + offset);
+}
+
+static inline void fpci_writel(struct tegra_xusb *tegra, u32 value,
+			       unsigned int offset)
+{
+	writel(value, tegra->fpci_base + offset);
+}
+
+static inline u32 ipfs_readl(struct tegra_xusb *tegra, unsigned int offset)
+{
+	return readl(tegra->ipfs_base + offset);
+}
+
+static inline void ipfs_writel(struct tegra_xusb *tegra, u32 value,
+			       unsigned int offset)
+{
+	writel(value, tegra->ipfs_base + offset);
+}
+
+static u32 csb_readl(struct tegra_xusb *tegra, unsigned int offset)
+{
+	u32 page = CSB_PAGE_SELECT(offset);
+	u32 ofs = CSB_PAGE_OFFSET(offset);
+
+	fpci_writel(tegra, page, XUSB_CFG_ARU_C11_CSBRANGE);
+
+	return fpci_readl(tegra, XUSB_CFG_CSB_BASE_ADDR + ofs);
+}
+
+static void csb_writel(struct tegra_xusb *tegra, u32 value,
+		       unsigned int offset)
+{
+	u32 page = CSB_PAGE_SELECT(offset);
+	u32 ofs = CSB_PAGE_OFFSET(offset);
+
+	fpci_writel(tegra, page, XUSB_CFG_ARU_C11_CSBRANGE);
+	fpci_writel(tegra, value, XUSB_CFG_CSB_BASE_ADDR + ofs);
+}
+
+static int tegra_xusb_set_ss_clk(struct tegra_xusb *tegra,
+				 unsigned long rate)
+{
+	unsigned long new_parent_rate, old_parent_rate;
+	struct clk *clk = tegra->ss_src_clk;
+	unsigned int div;
+	int err;
+
+	if (clk_get_rate(clk) == rate)
+		return 0;
+
+	switch (rate) {
+	case TEGRA_XHCI_SS_HIGH_SPEED:
+		/*
+		 * Reparent to PLLU_480M. Set divider first to avoid
+		 * overclocking.
+		 */
+		old_parent_rate = clk_get_rate(clk_get_parent(clk));
+		new_parent_rate = clk_get_rate(tegra->pll_u_480m);
+		div = new_parent_rate / rate;
+
+		err = clk_set_rate(clk, old_parent_rate / div);
+		if (err)
+			return err;
+
+		err = clk_set_parent(clk, tegra->pll_u_480m);
+		if (err)
+			return err;
+
+		/*
+		 * The rate should already be correct, but set it again just
+		 * to be sure.
+		 */
+		err = clk_set_rate(clk, rate);
+		if (err)
+			return err;
+
+		break;
+
+	case TEGRA_XHCI_SS_LOW_SPEED:
+		/* Reparent to CLK_M */
+		err = clk_set_parent(clk, tegra->clk_m);
+		if (err)
+			return err;
+
+		err = clk_set_rate(clk, rate);
+		if (err)
+			return err;
+
+		break;
+
+	default:
+		dev_err(tegra->dev, "Invalid SS rate: %lu Hz\n", rate);
+		return -EINVAL;
+	}
+
+	if (clk_get_rate(clk) != rate) {
+		dev_err(tegra->dev, "SS clock doesn't match requested rate\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static unsigned long extract_field(u32 value, unsigned int start,
+				   unsigned int count)
+{
+	return (value >> start) & ((1 << count) - 1);
+}
+
+/* Command requests from the firmware */
+enum tegra_xusb_mbox_cmd {
+	MBOX_CMD_MSG_ENABLED = 1,
+	MBOX_CMD_INC_FALC_CLOCK,
+	MBOX_CMD_DEC_FALC_CLOCK,
+	MBOX_CMD_INC_SSPI_CLOCK,
+	MBOX_CMD_DEC_SSPI_CLOCK,
+	MBOX_CMD_SET_BW, /* no ACK/NAK required */
+	MBOX_CMD_SET_SS_PWR_GATING,
+	MBOX_CMD_SET_SS_PWR_UNGATING,
+	MBOX_CMD_SAVE_DFE_CTLE_CTX,
+	MBOX_CMD_AIRPLANE_MODE_ENABLED, /* unused */
+	MBOX_CMD_AIRPLANE_MODE_DISABLED, /* unused */
+	MBOX_CMD_START_HSIC_IDLE,
+	MBOX_CMD_STOP_HSIC_IDLE,
+	MBOX_CMD_DBC_WAKE_STACK, /* unused */
+	MBOX_CMD_HSIC_PRETEND_CONNECT,
+	MBOX_CMD_RESET_SSPI,
+	MBOX_CMD_DISABLE_SS_LFPS_DETECTION,
+	MBOX_CMD_ENABLE_SS_LFPS_DETECTION,
+
+	MBOX_CMD_MAX,
+
+	/* Response message to above commands */
+	MBOX_CMD_ACK = 128,
+	MBOX_CMD_NAK
+};
+
+static const char * const mbox_cmd_name[] = {
+	[  1] = "MSG_ENABLE",
+	[  2] = "INC_FALCON_CLOCK",
+	[  3] = "DEC_FALCON_CLOCK",
+	[  4] = "INC_SSPI_CLOCK",
+	[  5] = "DEC_SSPI_CLOCK",
+	[  6] = "SET_BW",
+	[  7] = "SET_SS_PWR_GATING",
+	[  8] = "SET_SS_PWR_UNGATING",
+	[  9] = "SAVE_DFE_CTLE_CTX",
+	[ 10] = "AIRPLANE_MODE_ENABLED",
+	[ 11] = "AIRPLANE_MODE_DISABLED",
+	[ 12] = "START_HSIC_IDLE",
+	[ 13] = "STOP_HSIC_IDLE",
+	[ 14] = "DBC_WAKE_STACK",
+	[ 15] = "HSIC_PRETEND_CONNECT",
+	[ 16] = "RESET_SSPI",
+	[ 17] = "DISABLE_SS_LFPS_DETECTION",
+	[ 18] = "ENABLE_SS_LFPS_DETECTION",
+	[128] = "ACK",
+	[129] = "NAK",
+};
+
+struct tegra_xusb_mbox_msg {
+	u32 cmd;
+	u32 data;
+};
+
+static inline u32 tegra_xusb_mbox_pack(const struct tegra_xusb_mbox_msg *msg)
+{
+	return (msg->cmd & CMD_TYPE_MASK) << CMD_TYPE_SHIFT |
+	       (msg->data & CMD_DATA_MASK) << CMD_DATA_SHIFT;
+}
+static inline void tegra_xusb_mbox_unpack(struct tegra_xusb_mbox_msg *msg,
+					  u32 value)
+{
+	msg->cmd = (value >> CMD_TYPE_SHIFT) & CMD_TYPE_MASK;
+	msg->data = (value >> CMD_DATA_SHIFT) & CMD_DATA_MASK;
+}
+
+static bool tegra_xusb_mbox_cmd_requires_ack(enum tegra_xusb_mbox_cmd cmd)
+{
+	switch (cmd) {
+	case MBOX_CMD_SET_BW:
+	case MBOX_CMD_ACK:
+	case MBOX_CMD_NAK:
+		return false;
+
+	default:
+		return true;
+	}
+}
+
+static int tegra_xusb_mbox_send(struct tegra_xusb *tegra,
+				const struct tegra_xusb_mbox_msg *msg)
+{
+	bool wait_for_idle = false;
+	u32 value;
+
+	/*
+	 * Acquire the mailbox. The firmware still owns the mailbox for
+	 * ACK/NAK messages.
+	 */
+	if (!(msg->cmd == MBOX_CMD_ACK || msg->cmd == MBOX_CMD_NAK)) {
+		value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+		if (value != MBOX_OWNER_NONE) {
+			dev_err(tegra->dev, "mailbox is busy\n");
+			return -EBUSY;
+		}
+
+		fpci_writel(tegra, MBOX_OWNER_SW, XUSB_CFG_ARU_MBOX_OWNER);
+
+		value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+		if (value != MBOX_OWNER_SW) {
+			dev_err(tegra->dev, "failed to acquire mailbox\n");
+			return -EBUSY;
+		}
+
+		wait_for_idle = true;
+	}
+
+	value = tegra_xusb_mbox_pack(msg);
+	fpci_writel(tegra, value, XUSB_CFG_ARU_MBOX_DATA_IN);
+
+	value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_CMD);
+	value |= MBOX_INT_EN | MBOX_DEST_FALC;
+	fpci_writel(tegra, value, XUSB_CFG_ARU_MBOX_CMD);
+
+	if (wait_for_idle) {
+		unsigned long timeout = jiffies + msecs_to_jiffies(250);
+
+		while (time_before(jiffies, timeout)) {
+			value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+			if (value == MBOX_OWNER_NONE)
+				break;
+
+			usleep_range(10, 20);
+		}
+
+		if (time_after(jiffies, timeout))
+			value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+
+		if (value != MBOX_OWNER_NONE)
+			return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static irqreturn_t tegra_xusb_mbox_irq(int irq, void *data)
+{
+	struct tegra_xusb *tegra = data;
+	u32 value;
+
+	/* clear mailbox interrupts */
+	value = fpci_readl(tegra, XUSB_CFG_ARU_SMI_INTR);
+	fpci_writel(tegra, value, XUSB_CFG_ARU_SMI_INTR);
+
+	if (value & MBOX_SMI_INTR_FW_HANG)
+		dev_err(tegra->dev, "controller firmware hang\n");
+
+	return IRQ_WAKE_THREAD;
+}
+
+static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
+				   const struct tegra_xusb_mbox_msg *msg)
+{
+	struct tegra_xusb_padctl *padctl = tegra->padctl;
+	const struct tegra_xusb_soc *soc = tegra->soc;
+	struct device *dev = tegra->dev;
+	struct tegra_xusb_mbox_msg rsp;
+	unsigned long mask;
+	unsigned int port;
+	bool idle, enable;
+	int err;
+
+	memset(&rsp, 0, sizeof(rsp));
+
+	switch (msg->cmd) {
+	case MBOX_CMD_INC_FALC_CLOCK:
+	case MBOX_CMD_DEC_FALC_CLOCK:
+		rsp.data = clk_get_rate(tegra->falcon_clk) / 1000;
+		if (rsp.data != msg->data)
+			rsp.cmd = MBOX_CMD_NAK;
+		else
+			rsp.cmd = MBOX_CMD_ACK;
+
+		break;
+
+	case MBOX_CMD_INC_SSPI_CLOCK:
+	case MBOX_CMD_DEC_SSPI_CLOCK:
+		if (tegra->soc->scale_ss_clock) {
+			err = tegra_xusb_set_ss_clk(tegra, msg->data * 1000);
+			if (err < 0)
+				rsp.cmd = MBOX_CMD_NAK;
+			else
+				rsp.cmd = MBOX_CMD_ACK;
+
+			rsp.data = clk_get_rate(tegra->ss_src_clk) / 1000;
+		} else {
+			rsp.cmd = MBOX_CMD_ACK;
+			rsp.data = msg->data;
+		}
+
+		break;
+
+	case MBOX_CMD_SET_BW:
+		/*
+		 * TODO: Request bandwidth once EMC scaling is supported.
+		 * Ignore for now since ACK/NAK is not required for SET_BW
+		 * messages.
+		 */
+		break;
+
+	case MBOX_CMD_SAVE_DFE_CTLE_CTX:
+		err = tegra_xusb_padctl_usb3_save_context(padctl, msg->data);
+		if (err < 0) {
+			dev_err(dev, "failed to save context for USB3#%u: %d\n",
+				msg->data, err);
+			rsp.cmd = MBOX_CMD_NAK;
+		} else {
+			rsp.cmd = MBOX_CMD_ACK;
+		}
+
+		rsp.data = msg->data;
+		break;
+
+	case MBOX_CMD_START_HSIC_IDLE:
+	case MBOX_CMD_STOP_HSIC_IDLE:
+		if (msg->cmd == MBOX_CMD_STOP_HSIC_IDLE)
+			idle = false;
+		else
+			idle = true;
+
+		mask = extract_field(msg->data, 1 + soc->ports.hsic.offset,
+				     soc->ports.hsic.count);
+
+		for_each_set_bit(port, &mask, 32) {
+			err = tegra_xusb_padctl_hsic_set_idle(padctl, port,
+							      idle);
+			if (err < 0)
+				break;
+		}
+
+		if (err < 0) {
+			dev_err(dev, "failed to set HSIC#%u %s: %d\n", port,
+				idle ? "idle" : "busy", err);
+			rsp.cmd = MBOX_CMD_NAK;
+		} else {
+			rsp.cmd = MBOX_CMD_ACK;
+		}
+
+		rsp.data = msg->data;
+		break;
+
+	case MBOX_CMD_DISABLE_SS_LFPS_DETECTION:
+	case MBOX_CMD_ENABLE_SS_LFPS_DETECTION:
+		if (msg->cmd == MBOX_CMD_DISABLE_SS_LFPS_DETECTION)
+			enable = false;
+		else
+			enable = true;
+
+		mask = extract_field(msg->data, 1 + soc->ports.usb3.offset,
+				     soc->ports.usb3.count);
+
+		for_each_set_bit(port, &mask, soc->ports.usb3.count) {
+			err = tegra_xusb_padctl_usb3_set_lfps_detect(padctl,
+								     port,
+								     enable);
+			if (err < 0)
+				break;
+		}
+
+		if (err < 0) {
+			dev_err(dev,
+				"failed to %s LFPS detection on USB3#%u: %d\n",
+				enable ? "enable" : "disable", port, err);
+			rsp.cmd = MBOX_CMD_NAK;
+		} else {
+			rsp.cmd = MBOX_CMD_ACK;
+		}
+
+		rsp.data = msg->data;
+		break;
+
+	default:
+		dev_warn(dev, "unknown message: %#x\n", msg->cmd);
+		break;
+	}
+
+	if (rsp.cmd) {
+		const char *cmd = (rsp.cmd == MBOX_CMD_ACK) ? "ACK" : "NAK";
+
+		err = tegra_xusb_mbox_send(tegra, &rsp);
+		if (err < 0)
+			dev_err(dev, "failed to send %s: %d\n", cmd, err);
+	}
+}
+
+static irqreturn_t tegra_xusb_mbox_thread(int irq, void *data)
+{
+	struct tegra_xusb *tegra = data;
+	struct tegra_xusb_mbox_msg msg;
+	u32 value;
+
+	mutex_lock(&tegra->lock);
+
+	value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_DATA_OUT);
+	tegra_xusb_mbox_unpack(&msg, value);
+
+	value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_CMD);
+	value &= ~MBOX_DEST_SMI;
+	fpci_writel(tegra, value, XUSB_CFG_ARU_MBOX_CMD);
+
+	/* clear mailbox owner if no ACK/NAK is required */
+	if (!tegra_xusb_mbox_cmd_requires_ack(msg.cmd))
+		fpci_writel(tegra, MBOX_OWNER_NONE, XUSB_CFG_ARU_MBOX_OWNER);
+
+	tegra_xusb_mbox_handle(tegra, &msg);
+
+	mutex_unlock(&tegra->lock);
+	return IRQ_HANDLED;
+}
+
+static void tegra_xusb_ipfs_config(struct tegra_xusb *tegra,
+				   struct resource *regs)
+{
+	u32 value;
+
+	value = ipfs_readl(tegra, IPFS_XUSB_HOST_CONFIGURATION_0);
+	value |= IPFS_EN_FPCI;
+	ipfs_writel(tegra, value, IPFS_XUSB_HOST_CONFIGURATION_0);
+
+	usleep_range(10, 20);
+
+	/* Program BAR0 space */
+	value = fpci_readl(tegra, XUSB_CFG_4);
+	value &= ~(XUSB_BASE_ADDR_MASK << XUSB_BASE_ADDR_SHIFT);
+	value |= regs->start & (XUSB_BASE_ADDR_MASK << XUSB_BASE_ADDR_SHIFT);
+	fpci_writel(tegra, value, XUSB_CFG_4);
+
+	usleep_range(100, 200);
+
+	/* Enable bus master */
+	value = fpci_readl(tegra, XUSB_CFG_1);
+	value |= XUSB_IO_SPACE_EN | XUSB_MEM_SPACE_EN | XUSB_BUS_MASTER_EN;
+	fpci_writel(tegra, value, XUSB_CFG_1);
+
+	/* Enable interrupt assertion */
+	value = ipfs_readl(tegra, IPFS_XUSB_HOST_INTR_MASK_0);
+	value |= IPFS_IP_INT_MASK;
+	ipfs_writel(tegra, value, IPFS_XUSB_HOST_INTR_MASK_0);
+
+	/* Set hysteresis */
+	ipfs_writel(tegra, 0x80, IPFS_XUSB_HOST_CLKGATE_HYSTERESIS_0);
+}
+
+static int tegra_xusb_clk_enable(struct tegra_xusb *tegra)
+{
+	int err;
+
+	err = clk_prepare_enable(tegra->pll_e);
+	if (err < 0)
+		return err;
+
+	err = clk_prepare_enable(tegra->host_clk);
+	if (err < 0)
+		goto disable_plle;
+
+	err = clk_prepare_enable(tegra->ss_clk);
+	if (err < 0)
+		goto disable_host;
+
+	err = clk_prepare_enable(tegra->falcon_clk);
+	if (err < 0)
+		goto disable_ss;
+
+	err = clk_prepare_enable(tegra->fs_src_clk);
+	if (err < 0)
+		goto disable_falc;
+
+	err = clk_prepare_enable(tegra->hs_src_clk);
+	if (err < 0)
+		goto disable_fs_src;
+
+	if (tegra->soc->scale_ss_clock) {
+		err = tegra_xusb_set_ss_clk(tegra, TEGRA_XHCI_SS_HIGH_SPEED);
+		if (err < 0)
+			goto disable_hs_src;
+	}
+
+	return 0;
+
+disable_hs_src:
+	clk_disable_unprepare(tegra->hs_src_clk);
+disable_fs_src:
+	clk_disable_unprepare(tegra->fs_src_clk);
+disable_falc:
+	clk_disable_unprepare(tegra->falcon_clk);
+disable_ss:
+	clk_disable_unprepare(tegra->ss_clk);
+disable_host:
+	clk_disable_unprepare(tegra->host_clk);
+disable_plle:
+	clk_disable_unprepare(tegra->pll_e);
+	return err;
+}
+
+static void tegra_xusb_clk_disable(struct tegra_xusb *tegra)
+{
+	clk_disable_unprepare(tegra->pll_e);
+	clk_disable_unprepare(tegra->host_clk);
+	clk_disable_unprepare(tegra->ss_clk);
+	clk_disable_unprepare(tegra->falcon_clk);
+	clk_disable_unprepare(tegra->fs_src_clk);
+	clk_disable_unprepare(tegra->hs_src_clk);
+}
+
+static int tegra_xusb_phy_enable(struct tegra_xusb *tegra)
+{
+	unsigned int i;
+	int err;
+
+	for (i = 0; i < tegra->num_phys; i++) {
+		err = phy_init(tegra->phys[i]);
+		if (err)
+			goto disable_phy;
+
+		err = phy_power_on(tegra->phys[i]);
+		if (err) {
+			phy_exit(tegra->phys[i]);
+			goto disable_phy;
+		}
+	}
+
+	return 0;
+
+disable_phy:
+	while (i--) {
+		phy_power_off(tegra->phys[i]);
+		phy_exit(tegra->phys[i]);
+	}
+
+	return err;
+}
+
+static void tegra_xusb_phy_disable(struct tegra_xusb *tegra)
+{
+	unsigned int i;
+
+	for (i = 0; i < tegra->num_phys; i++) {
+		phy_power_off(tegra->phys[i]);
+		phy_exit(tegra->phys[i]);
+	}
+}
+
+static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
+{
+	unsigned int code_tag_blocks, code_size_blocks, code_blocks;
+	struct tegra_xusb_fw_header *header;
+	struct device *dev = tegra->dev;
+	const struct firmware *fw;
+	unsigned long timeout;
+	time_t timestamp;
+	struct tm time;
+	u64 address;
+	u32 value;
+	int err;
+
+	err = request_firmware(&fw, tegra->soc->firmware, tegra->dev);
+	if (err < 0) {
+		dev_err(tegra->dev, "failed to request firmware: %d\n", err);
+		return err;
+	}
+
+	/* Load Falcon controller with its firmware. */
+	header = (struct tegra_xusb_fw_header *)fw->data;
+	tegra->fw.size = le32_to_cpu(header->fwimg_len);
+
+	tegra->fw.virt = dma_alloc_coherent(tegra->dev, tegra->fw.size,
+					    &tegra->fw.phys, GFP_KERNEL);
+	if (!tegra->fw.virt) {
+		dev_err(tegra->dev, "failed to allocate memory for firmware\n");
+		release_firmware(fw);
+		return -ENOMEM;
+	}
+
+	header = (struct tegra_xusb_fw_header *)tegra->fw.virt;
+	memcpy(tegra->fw.virt, fw->data, tegra->fw.size);
+	release_firmware(fw);
+
+	if (csb_readl(tegra, XUSB_CSB_MP_ILOAD_BASE_LO) != 0) {
+		dev_info(dev, "Firmware already loaded, Falcon state %#x\n",
+			 csb_readl(tegra, XUSB_FALC_CPUCTL));
+		return 0;
+	}
+
+	/* Program the size of DFI into ILOAD_ATTR. */
+	csb_writel(tegra, tegra->fw.size, XUSB_CSB_MP_ILOAD_ATTR);
+
+	/*
+	 * Boot code of the firmware reads the ILOAD_BASE registers
+	 * to get to the start of the DFI in system memory.
+	 */
+	address = tegra->fw.phys + sizeof(*header);
+	csb_writel(tegra, address >> 32, XUSB_CSB_MP_ILOAD_BASE_HI);
+	csb_writel(tegra, address, XUSB_CSB_MP_ILOAD_BASE_LO);
+
+	/* Set BOOTPATH to 1 in APMAP. */
+	csb_writel(tegra, APMAP_BOOTPATH, XUSB_CSB_MP_APMAP);
+
+	/* Invalidate L2IMEM. */
+	csb_writel(tegra, L2IMEMOP_INVALIDATE_ALL, XUSB_CSB_MP_L2IMEMOP_TRIG);
+
+	/*
+	 * Initiate fetch of bootcode from system memory into L2IMEM.
+	 * Program bootcode location and size in system memory.
+	 */
+	code_tag_blocks = DIV_ROUND_UP(le32_to_cpu(header->boot_codetag),
+				       IMEM_BLOCK_SIZE);
+	code_size_blocks = DIV_ROUND_UP(le32_to_cpu(header->boot_codesize),
+					IMEM_BLOCK_SIZE);
+	code_blocks = code_tag_blocks + code_size_blocks;
+
+	value = ((code_tag_blocks & L2IMEMOP_SIZE_SRC_OFFSET_MASK) <<
+			L2IMEMOP_SIZE_SRC_OFFSET_SHIFT) |
+		((code_size_blocks & L2IMEMOP_SIZE_SRC_COUNT_MASK) <<
+			L2IMEMOP_SIZE_SRC_COUNT_SHIFT);
+	csb_writel(tegra, value, XUSB_CSB_MP_L2IMEMOP_SIZE);
+
+	/* Trigger L2IMEM load operation. */
+	csb_writel(tegra, L2IMEMOP_LOAD_LOCKED_RESULT,
+		   XUSB_CSB_MP_L2IMEMOP_TRIG);
+
+	/* Setup Falcon auto-fill. */
+	csb_writel(tegra, code_size_blocks, XUSB_FALC_IMFILLCTL);
+
+	value = ((code_tag_blocks & IMFILLRNG1_TAG_MASK) <<
+			IMFILLRNG1_TAG_LO_SHIFT) |
+		((code_blocks & IMFILLRNG1_TAG_MASK) <<
+			IMFILLRNG1_TAG_HI_SHIFT);
+	csb_writel(tegra, value, XUSB_FALC_IMFILLRNG1);
+
+	csb_writel(tegra, 0, XUSB_FALC_DMACTL);
+
+	msleep(50);
+
+	csb_writel(tegra, le32_to_cpu(header->boot_codetag),
+		   XUSB_FALC_BOOTVEC);
+
+	/* Boot Falcon CPU and wait for it to enter the STOPPED (idle) state. */
+	timeout = jiffies + msecs_to_jiffies(5);
+
+	csb_writel(tegra, CPUCTL_STARTCPU, XUSB_FALC_CPUCTL);
+
+	while (time_before(jiffies, timeout)) {
+		if (csb_readl(tegra, XUSB_FALC_CPUCTL) == CPUCTL_STATE_STOPPED)
+			break;
+
+		usleep_range(100, 200);
+	}
+
+	if (csb_readl(tegra, XUSB_FALC_CPUCTL) != CPUCTL_STATE_STOPPED) {
+		dev_err(dev, "Falcon failed to start, state: %#x\n",
+			csb_readl(tegra, XUSB_FALC_CPUCTL));
+		return -EIO;
+	}
+
+	timestamp = le32_to_cpu(header->fwimg_created_time);
+	time_to_tm(timestamp, 0, &time);
+
+	dev_info(dev, "Firmware timestamp: %ld-%02d-%02d %02d:%02d:%02d UTC\n",
+		 time.tm_year + 1900, time.tm_mon + 1, time.tm_mday,
+		 time.tm_hour, time.tm_min, time.tm_sec);
+
+	return 0;
+}
+
+static int tegra_xusb_probe(struct platform_device *pdev)
+{
+	struct tegra_xusb_mbox_msg msg;
+	struct resource *res, *regs;
+	struct tegra_xusb *tegra;
+	struct xhci_hcd *xhci;
+	unsigned int i, j, k;
+	struct phy *phy;
+	int err;
+
+	BUILD_BUG_ON(sizeof(struct tegra_xusb_fw_header) != 256);
+
+	tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
+	if (!tegra)
+		return -ENOMEM;
+
+	tegra->soc = of_device_get_match_data(&pdev->dev);
+	mutex_init(&tegra->lock);
+	tegra->dev = &pdev->dev;
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	tegra->regs = devm_ioremap_resource(&pdev->dev, regs);
+	if (IS_ERR(tegra->regs))
+		return PTR_ERR(tegra->regs);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	tegra->fpci_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(tegra->fpci_base))
+		return PTR_ERR(tegra->fpci_base);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+	tegra->ipfs_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(tegra->ipfs_base))
+		return PTR_ERR(tegra->ipfs_base);
+
+	tegra->xhci_irq = platform_get_irq(pdev, 0);
+	if (tegra->xhci_irq < 0)
+		return tegra->xhci_irq;
+
+	tegra->mbox_irq = platform_get_irq(pdev, 1);
+	if (tegra->mbox_irq < 0)
+		return tegra->mbox_irq;
+
+	tegra->padctl = tegra_xusb_padctl_get(&pdev->dev);
+	if (IS_ERR(tegra->padctl))
+		return PTR_ERR(tegra->padctl);
+
+	tegra->host_rst = devm_reset_control_get(&pdev->dev, "xusb_host");
+	if (IS_ERR(tegra->host_rst)) {
+		err = PTR_ERR(tegra->host_rst);
+		dev_err(&pdev->dev, "failed to get xusb_host reset: %d\n", err);
+		goto put_padctl;
+	}
+
+	tegra->ss_rst = devm_reset_control_get(&pdev->dev, "xusb_ss");
+	if (IS_ERR(tegra->ss_rst)) {
+		err = PTR_ERR(tegra->ss_rst);
+		dev_err(&pdev->dev, "failed to get xusb_ss reset: %d\n", err);
+		goto put_padctl;
+	}
+
+	tegra->host_clk = devm_clk_get(&pdev->dev, "xusb_host");
+	if (IS_ERR(tegra->host_clk)) {
+		err = PTR_ERR(tegra->host_clk);
+		dev_err(&pdev->dev, "failed to get xusb_host: %d\n", err);
+		goto put_padctl;
+	}
+
+	tegra->falcon_clk = devm_clk_get(&pdev->dev, "xusb_falcon_src");
+	if (IS_ERR(tegra->falcon_clk)) {
+		err = PTR_ERR(tegra->falcon_clk);
+		dev_err(&pdev->dev, "failed to get xusb_falcon_src: %d\n", err);
+		goto put_padctl;
+	}
+
+	tegra->ss_clk = devm_clk_get(&pdev->dev, "xusb_ss");
+	if (IS_ERR(tegra->ss_clk)) {
+		err = PTR_ERR(tegra->ss_clk);
+		dev_err(&pdev->dev, "failed to get xusb_ss: %d\n", err);
+		goto put_padctl;
+	}
+
+	tegra->ss_src_clk = devm_clk_get(&pdev->dev, "xusb_ss_src");
+	if (IS_ERR(tegra->ss_src_clk)) {
+		err = PTR_ERR(tegra->ss_src_clk);
+		dev_err(&pdev->dev, "failed to get xusb_ss_src: %d\n", err);
+		goto put_padctl;
+	}
+
+	tegra->hs_src_clk = devm_clk_get(&pdev->dev, "xusb_hs_src");
+	if (IS_ERR(tegra->hs_src_clk)) {
+		err = PTR_ERR(tegra->hs_src_clk);
+		dev_err(&pdev->dev, "failed to get xusb_hs_src: %d\n", err);
+		goto put_padctl;
+	}
+
+	tegra->fs_src_clk = devm_clk_get(&pdev->dev, "xusb_fs_src");
+	if (IS_ERR(tegra->fs_src_clk)) {
+		err = PTR_ERR(tegra->fs_src_clk);
+		dev_err(&pdev->dev, "failed to get xusb_fs_src: %d\n", err);
+		goto put_padctl;
+	}
+
+	tegra->pll_u_480m = devm_clk_get(&pdev->dev, "pll_u_480m");
+	if (IS_ERR(tegra->pll_u_480m)) {
+		err = PTR_ERR(tegra->pll_u_480m);
+		dev_err(&pdev->dev, "failed to get pll_u_480m: %d\n", err);
+		goto put_padctl;
+	}
+
+	tegra->clk_m = devm_clk_get(&pdev->dev, "clk_m");
+	if (IS_ERR(tegra->clk_m)) {
+		err = PTR_ERR(tegra->clk_m);
+		dev_err(&pdev->dev, "failed to get clk_m: %d\n", err);
+		goto put_padctl;
+	}
+
+	tegra->pll_e = devm_clk_get(&pdev->dev, "pll_e");
+	if (IS_ERR(tegra->pll_e)) {
+		err = PTR_ERR(tegra->pll_e);
+		dev_err(&pdev->dev, "failed to get pll_e: %d\n", err);
+		goto put_padctl;
+	}
+
+	tegra->supplies = devm_kcalloc(&pdev->dev, tegra->soc->num_supplies,
+				       sizeof(*tegra->supplies), GFP_KERNEL);
+	if (!tegra->supplies) {
+		err = -ENOMEM;
+		goto put_padctl;
+	}
+
+	for (i = 0; i < tegra->soc->num_supplies; i++)
+		tegra->supplies[i].supply = tegra->soc->supply_names[i];
+
+	err = devm_regulator_bulk_get(&pdev->dev, tegra->soc->num_supplies,
+				      tegra->supplies);
+	if (err) {
+		dev_err(&pdev->dev, "failed to get regulators: %d\n", err);
+		goto put_padctl;
+	}
+
+	for (i = 0; i < tegra->soc->num_types; i++)
+		tegra->num_phys += tegra->soc->phy_types[i].num;
+
+	tegra->phys = devm_kcalloc(&pdev->dev, tegra->num_phys,
+				   sizeof(*tegra->phys), GFP_KERNEL);
+	if (!tegra->phys) {
+		dev_err(&pdev->dev, "failed to allocate PHY array\n");
+		err = -ENOMEM;
+		goto put_padctl;
+	}
+
+	for (i = 0, k = 0; i < tegra->soc->num_types; i++) {
+		char prop[8];
+
+		for (j = 0; j < tegra->soc->phy_types[i].num; j++) {
+			snprintf(prop, sizeof(prop), "%s-%d",
+				 tegra->soc->phy_types[i].name, j);
+
+			phy = devm_phy_optional_get(&pdev->dev, prop);
+			if (IS_ERR(phy)) {
+				dev_err(&pdev->dev,
+					"failed to get PHY %s: %ld\n", prop,
+					PTR_ERR(phy));
+				err = PTR_ERR(phy);
+				goto put_padctl;
+			}
+
+			tegra->phys[k++] = phy;
+		}
+	}
+
+	err = tegra_xusb_clk_enable(tegra);
+	if (err) {
+		dev_err(&pdev->dev, "failed to enable clocks: %d\n", err);
+		goto put_padctl;
+	}
+
+	err = regulator_bulk_enable(tegra->soc->num_supplies, tegra->supplies);
+	if (err) {
+		dev_err(&pdev->dev, "failed to enable regulators: %d\n", err);
+		goto disable_clk;
+	}
+
+	err = tegra_xusb_phy_enable(tegra);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to enable PHYs: %d\n", err);
+		goto disable_regulator;
+	}
+
+	tegra_xusb_ipfs_config(tegra, regs);
+
+	err = tegra_xusb_load_firmware(tegra);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to load firmware: %d\n", err);
+		goto disable_phy;
+	}
+
+	tegra->hcd = usb_create_hcd(&tegra_xhci_hc_driver, &pdev->dev,
+				    dev_name(&pdev->dev));
+	if (!tegra->hcd) {
+		err = -ENOMEM;
+		goto disable_phy;
+	}
+
+	/*
+	 * This must happen after usb_create_hcd(), because usb_create_hcd()
+	 * will overwrite the drvdata of the device with the hcd it creates.
+	 */
+	platform_set_drvdata(pdev, tegra);
+
+	tegra->hcd->regs = tegra->regs;
+	tegra->hcd->rsrc_start = regs->start;
+	tegra->hcd->rsrc_len = resource_size(regs);
+
+	err = usb_add_hcd(tegra->hcd, tegra->xhci_irq, IRQF_SHARED);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to add USB HCD: %d\n", err);
+		goto put_usb2;
+	}
+
+	device_wakeup_enable(tegra->hcd->self.controller);
+
+	xhci = hcd_to_xhci(tegra->hcd);
+
+	xhci->shared_hcd = usb_create_shared_hcd(&tegra_xhci_hc_driver,
+						 &pdev->dev,
+						 dev_name(&pdev->dev),
+						 tegra->hcd);
+	if (!xhci->shared_hcd) {
+		dev_err(&pdev->dev, "failed to create shared HCD\n");
+		goto remove_usb2;
+	}
+
+	err = usb_add_hcd(xhci->shared_hcd, tegra->xhci_irq, IRQF_SHARED);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to add shared HCD: %d\n", err);
+		goto put_usb3;
+	}
+
+	mutex_lock(&tegra->lock);
+
+	/* Enable firmware messages from controller. */
+	msg.cmd = MBOX_CMD_MSG_ENABLED;
+	msg.data = 0;
+
+	err = tegra_xusb_mbox_send(tegra, &msg);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to enable messages: %d\n", err);
+		mutex_unlock(&tegra->lock);
+		goto remove_usb3;
+	}
+
+	mutex_unlock(&tegra->lock);
+
+	err = devm_request_threaded_irq(&pdev->dev, tegra->mbox_irq,
+					tegra_xusb_mbox_irq,
+					tegra_xusb_mbox_thread, 0,
+					dev_name(&pdev->dev), tegra);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+		goto remove_usb3;
+	}
+
+	return 0;
+
+remove_usb3:
+	usb_remove_hcd(xhci->shared_hcd);
+put_usb3:
+	usb_put_hcd(xhci->shared_hcd);
+remove_usb2:
+	usb_remove_hcd(tegra->hcd);
+put_usb2:
+	usb_put_hcd(tegra->hcd);
+disable_phy:
+	tegra_xusb_phy_disable(tegra);
+disable_regulator:
+	regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
+disable_clk:
+	tegra_xusb_clk_disable(tegra);
+put_padctl:
+	tegra_xusb_padctl_put(tegra->padctl);
+	return err;
+}
+
+static int tegra_xusb_remove(struct platform_device *pdev)
+{
+	struct tegra_xusb *tegra = platform_get_drvdata(pdev);
+	struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+
+	usb_remove_hcd(xhci->shared_hcd);
+	usb_put_hcd(xhci->shared_hcd);
+	usb_remove_hcd(tegra->hcd);
+	usb_put_hcd(tegra->hcd);
+
+	dma_free_coherent(&pdev->dev, tegra->fw.size, tegra->fw.virt,
+			  tegra->fw.phys);
+
+	tegra_xusb_phy_disable(tegra);
+	regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
+	tegra_xusb_clk_disable(tegra);
+
+	tegra_xusb_padctl_put(tegra->padctl);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_xusb_suspend(struct device *dev)
+{
+	struct tegra_xusb *tegra = dev_get_drvdata(dev);
+	struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+	bool wakeup = device_may_wakeup(dev);
+
+	/* TODO: Powergate controller across suspend/resume. */
+	return xhci_suspend(xhci, wakeup);
+}
+
+static int tegra_xusb_resume(struct device *dev)
+{
+	struct tegra_xusb *tegra = dev_get_drvdata(dev);
+	struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+
+	return xhci_resume(xhci, 0);
+}
+#endif
+
+static const struct dev_pm_ops tegra_xusb_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(tegra_xusb_suspend, tegra_xusb_resume)
+};
+
+static const char * const tegra124_supply_names[] = {
+	"avddio-pex",
+	"dvddio-pex",
+	"avdd-usb",
+	"avdd-pll-utmip",
+	"avdd-pll-erefe",
+	"avdd-usb-ss-pll",
+	"hvdd-usb-ss",
+	"hvdd-usb-ss-pll-e",
+};
+
+static const struct tegra_xusb_phy_type tegra124_phy_types[] = {
+	{ .name = "usb3", .num = 2, },
+	{ .name = "usb2", .num = 3, },
+	{ .name = "hsic", .num = 2, },
+};
+
+static const struct tegra_xusb_soc tegra124_soc = {
+	.firmware = "nvidia/tegra124/xusb.bin",
+	.supply_names = tegra124_supply_names,
+	.num_supplies = ARRAY_SIZE(tegra124_supply_names),
+	.phy_types = tegra124_phy_types,
+	.num_types = ARRAY_SIZE(tegra124_phy_types),
+	.ports = {
+		.usb2 = { .offset = 4, .count = 4, },
+		.hsic = { .offset = 6, .count = 2, },
+		.usb3 = { .offset = 0, .count = 2, },
+	},
+	.scale_ss_clock = true,
+};
+MODULE_FIRMWARE("nvidia/tegra124/xusb.bin");
+
+static const char * const tegra210_supply_names[] = {
+	"dvddio-pex",
+	"hvddio-pex",
+	"avdd-usb",
+	"avdd-pll-utmip",
+	"avdd-pll-uerefe",
+	"dvdd-pex-pll",
+	"hvdd-pex-pll-e",
+};
+
+static const struct tegra_xusb_phy_type tegra210_phy_types[] = {
+	{ .name = "usb3", .num = 4, },
+	{ .name = "usb2", .num = 4, },
+	{ .name = "hsic", .num = 1, },
+};
+
+static const struct tegra_xusb_soc tegra210_soc = {
+	.firmware = "nvidia/tegra210/xusb.bin",
+	.supply_names = tegra210_supply_names,
+	.num_supplies = ARRAY_SIZE(tegra210_supply_names),
+	.phy_types = tegra210_phy_types,
+	.num_types = ARRAY_SIZE(tegra210_phy_types),
+	.ports = {
+		.usb2 = { .offset = 4, .count = 4, },
+		.hsic = { .offset = 8, .count = 1, },
+		.usb3 = { .offset = 0, .count = 4, },
+	},
+	.scale_ss_clock = false,
+};
+MODULE_FIRMWARE("nvidia/tegra210/xusb.bin");
+
+static const struct of_device_id tegra_xusb_of_match[] = {
+	{ .compatible = "nvidia,tegra124-xusb", .data = &tegra124_soc },
+	{ .compatible = "nvidia,tegra210-xusb", .data = &tegra210_soc },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, tegra_xusb_of_match);
+
+static struct platform_driver tegra_xusb_driver = {
+	.probe = tegra_xusb_probe,
+	.remove = tegra_xusb_remove,
+	.driver = {
+		.name = "tegra-xusb",
+		.pm = &tegra_xusb_pm_ops,
+		.of_match_table = tegra_xusb_of_match,
+	},
+};
+
+static void tegra_xhci_quirks(struct device *dev, struct xhci_hcd *xhci)
+{
+	xhci->quirks |= XHCI_PLAT;
+}
+
+static int tegra_xhci_setup(struct usb_hcd *hcd)
+{
+	return xhci_gen_setup(hcd, tegra_xhci_quirks);
+}
+
+static const struct xhci_driver_overrides tegra_xhci_overrides __initconst = {
+	.extra_priv_size = sizeof(struct xhci_hcd),
+	.reset = tegra_xhci_setup,
+};
+
+static int __init tegra_xusb_init(void)
+{
+	xhci_init_driver(&tegra_xhci_hc_driver, &tegra_xhci_overrides);
+
+	return platform_driver_register(&tegra_xusb_driver);
+}
+module_init(tegra_xusb_init);
+
+static void __exit tegra_xusb_exit(void)
+{
+	platform_driver_unregister(&tegra_xusb_driver);
+}
+module_exit(tegra_xusb_exit);
+
+MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
+MODULE_DESCRIPTION("NVIDIA Tegra XUSB xHCI host-controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index d51ee0c..9e71c96 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -147,7 +147,8 @@
 				"waited %u microseconds.\n",
 				XHCI_MAX_HALT_USEC);
 	if (!ret)
-		xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
+		/* clear state flags. Including dying, halted or removing */
+		xhci->xhc_state = 0;
 
 	return ret;
 }
@@ -1108,8 +1109,8 @@
 		/* Resume root hubs only when have pending events. */
 		status = readl(&xhci->op_regs->status);
 		if (status & STS_EINT) {
-			usb_hcd_resume_root_hub(hcd);
 			usb_hcd_resume_root_hub(xhci->shared_hcd);
+			usb_hcd_resume_root_hub(hcd);
 		}
 	}
 
@@ -1124,10 +1125,10 @@
 
 	/* Re-enable port polling. */
 	xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
-	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
-	usb_hcd_poll_rh_status(hcd);
 	set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
 	usb_hcd_poll_rh_status(xhci->shared_hcd);
+	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+	usb_hcd_poll_rh_status(hcd);
 
 	return retval;
 }
@@ -2773,7 +2774,8 @@
 	if (ret <= 0)
 		return ret;
 	xhci = hcd_to_xhci(hcd);
-	if (xhci->xhc_state & XHCI_STATE_DYING)
+	if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+		(xhci->xhc_state & XHCI_STATE_REMOVING))
 		return -ENODEV;
 
 	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
@@ -3820,7 +3822,7 @@
 
 	mutex_lock(&xhci->mutex);
 
-	if (xhci->xhc_state)	/* dying or halted */
+	if (xhci->xhc_state)	/* dying, removing or halted */
 		goto out;
 
 	if (!udev->slot_id) {
@@ -4948,6 +4950,16 @@
 		return retval;
 	xhci_dbg(xhci, "Reset complete\n");
 
+	/*
+	 * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
+	 * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
+	 * address memory pointers actually. So, this driver clears the AC64
+	 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
+	 * DMA_BIT_MASK(32)) in this xhci_gen_setup().
+	 */
+	if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
+		xhci->hcc_params &= ~BIT(0);
+
 	/* Set dma_mask and coherent_dma_mask to 64-bits,
 	 * if xHC supports 64-bit addressing */
 	if (HCC_64BIT_ADDR(xhci->hcc_params) &&
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index e293e09..6c629c9 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1605,6 +1605,7 @@
  */
 #define XHCI_STATE_DYING	(1 << 0)
 #define XHCI_STATE_HALTED	(1 << 1)
+#define XHCI_STATE_REMOVING	(1 << 2)
 	/* Statistics */
 	int			error_bitmask;
 	unsigned int		quirks;
@@ -1641,6 +1642,7 @@
 #define XHCI_PME_STUCK_QUIRK	(1 << 20)
 #define XHCI_MTK_HOST		(1 << 21)
 #define XHCI_SSIC_PORT_UNUSED	(1 << 22)
+#define XHCI_NO_64BIT_SUPPORT	(1 << 23)
 	unsigned int		num_active_eps;
 	unsigned int		limit_active_eps;
 	/* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
index 5e5a8fa..bc88899 100644
--- a/drivers/usb/musb/jz4740.c
+++ b/drivers/usb/musb/jz4740.c
@@ -83,9 +83,9 @@
 {
 	usb_phy_generic_register();
 	musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
-	if (!musb->xceiv) {
+	if (IS_ERR(musb->xceiv)) {
 		pr_err("HS UDC: no transceiver configured\n");
-		return -ENODEV;
+		return PTR_ERR(musb->xceiv);
 	}
 
 	/* Silicon does not implement ConfigData register.
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 87bd578..152865b 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1164,12 +1164,12 @@
 		musb_writew(epio, MUSB_RXMAXP, 0);
 	}
 
-	musb_ep->desc = NULL;
-	musb_ep->end_point.desc = NULL;
-
 	/* abort all pending DMA and requests */
 	nuke(musb_ep, -ESHUTDOWN);
 
+	musb_ep->desc = NULL;
+	musb_ep->end_point.desc = NULL;
+
 	schedule_work(&musb->irq_work);
 
 	spin_unlock_irqrestore(&(musb->lock), flags);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 58487a4..2f8ad7f 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2735,7 +2735,7 @@
 	.description		= "musb-hcd",
 	.product_desc		= "MUSB HDRC host driver",
 	.hcd_priv_size		= sizeof(struct musb *),
-	.flags			= HCD_USB2 | HCD_MEMORY | HCD_BH,
+	.flags			= HCD_USB2 | HCD_MEMORY,
 
 	/* not using irq handler or reset hooks from usbcore, since
 	 * those must be shared with peripheral code for OTG configs
diff --git a/drivers/usb/phy/phy-qcom-8x16-usb.c b/drivers/usb/phy/phy-qcom-8x16-usb.c
index 579587d..3d7af85 100644
--- a/drivers/usb/phy/phy-qcom-8x16-usb.c
+++ b/drivers/usb/phy/phy-qcom-8x16-usb.c
@@ -65,9 +65,7 @@
 	void __iomem			*regs;
 	struct clk			*core_clk;
 	struct clk			*iface_clk;
-	struct regulator		*v3p3;
-	struct regulator		*v1p8;
-	struct regulator		*vdd;
+	struct regulator_bulk_data	regulator[3];
 
 	struct reset_control		*phy_reset;
 
@@ -78,51 +76,6 @@
 	struct notifier_block		reboot_notify;
 };
 
-static int phy_8x16_regulators_enable(struct phy_8x16 *qphy)
-{
-	int ret;
-
-	ret = regulator_set_voltage(qphy->vdd, HSPHY_VDD_MIN, HSPHY_VDD_MAX);
-	if (ret)
-		return ret;
-
-	ret = regulator_enable(qphy->vdd);
-	if (ret)
-		return ret;
-
-	ret = regulator_set_voltage(qphy->v3p3, HSPHY_3P3_MIN, HSPHY_3P3_MAX);
-	if (ret)
-		goto off_vdd;
-
-	ret = regulator_enable(qphy->v3p3);
-	if (ret)
-		goto off_vdd;
-
-	ret = regulator_set_voltage(qphy->v1p8, HSPHY_1P8_MIN, HSPHY_1P8_MAX);
-	if (ret)
-		goto off_3p3;
-
-	ret = regulator_enable(qphy->v1p8);
-	if (ret)
-		goto off_3p3;
-
-	return 0;
-
-off_3p3:
-	regulator_disable(qphy->v3p3);
-off_vdd:
-	regulator_disable(qphy->vdd);
-
-	return ret;
-}
-
-static void phy_8x16_regulators_disable(struct phy_8x16 *qphy)
-{
-	regulator_disable(qphy->v1p8);
-	regulator_disable(qphy->v3p3);
-	regulator_disable(qphy->vdd);
-}
-
 static int phy_8x16_notify_connect(struct usb_phy *phy,
 				   enum usb_device_speed speed)
 {
@@ -261,7 +214,6 @@
 
 static int phy_8x16_read_devicetree(struct phy_8x16 *qphy)
 {
-	struct regulator_bulk_data regs[3];
 	struct device *dev = qphy->phy.dev;
 	int ret;
 
@@ -273,18 +225,15 @@
 	if (IS_ERR(qphy->iface_clk))
 		return PTR_ERR(qphy->iface_clk);
 
-	regs[0].supply = "v3p3";
-	regs[1].supply = "v1p8";
-	regs[2].supply = "vddcx";
+	qphy->regulator[0].supply = "v3p3";
+	qphy->regulator[1].supply = "v1p8";
+	qphy->regulator[2].supply = "vddcx";
 
-	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(regs), regs);
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(qphy->regulator),
+				      qphy->regulator);
 	if (ret)
 		return ret;
 
-	qphy->v3p3 = regs[0].consumer;
-	qphy->v1p8 = regs[1].consumer;
-	qphy->vdd  = regs[2].consumer;
-
 	qphy->phy_reset = devm_reset_control_get(dev, "phy");
 	if (IS_ERR(qphy->phy_reset))
 		return PTR_ERR(qphy->phy_reset);
@@ -364,8 +313,9 @@
 	if (ret < 0)
 		goto off_core;
 
-	ret = phy_8x16_regulators_enable(qphy);
-	if (0 && ret)
+	ret = regulator_bulk_enable(ARRAY_SIZE(qphy->regulator),
+				    qphy->regulator);
+	if (WARN_ON(ret))
 		goto off_clks;
 
 	qphy->vbus_notify.notifier_call = phy_8x16_vbus_notify;
@@ -387,7 +337,7 @@
 	extcon_unregister_notifier(qphy->vbus_edev, EXTCON_USB,
 				   &qphy->vbus_notify);
 off_power:
-	phy_8x16_regulators_disable(qphy);
+	regulator_bulk_disable(ARRAY_SIZE(qphy->regulator), qphy->regulator);
 off_clks:
 	clk_disable_unprepare(qphy->iface_clk);
 off_core:
@@ -413,7 +363,7 @@
 
 	clk_disable_unprepare(qphy->iface_clk);
 	clk_disable_unprepare(qphy->core_clk);
-	phy_8x16_regulators_disable(qphy);
+	regulator_bulk_disable(ARRAY_SIZE(qphy->regulator), qphy->regulator);
 	return 0;
 }
 
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index b4de70e..000f975 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -190,7 +190,8 @@
 		goto __usbhs_pkt_handler_end;
 	}
 
-	ret = func(pkt, &is_done);
+	if (likely(func))
+		ret = func(pkt, &is_done);
 
 	if (is_done)
 		__usbhsf_pkt_del(pkt);
@@ -889,6 +890,7 @@
 
 	pkt->trans = len;
 
+	usbhsf_tx_irq_ctrl(pipe, 0);
 	INIT_WORK(&pkt->work, xfer_work);
 	schedule_work(&pkt->work);
 
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 664b263..53d104b 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -158,10 +158,14 @@
 	struct usbhs_pipe *pipe = pkt->pipe;
 	struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe);
 	struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
+	unsigned long flags;
 
 	ureq->req.actual = pkt->actual;
 
-	usbhsg_queue_pop(uep, ureq, 0);
+	usbhs_lock(priv, flags);
+	if (uep)
+		__usbhsg_queue_pop(uep, ureq, 0);
+	usbhs_unlock(priv, flags);
 }
 
 static void usbhsg_queue_push(struct usbhsg_uep *uep,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index fbfe761..7c9f25e 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -109,6 +109,7 @@
 	{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
 	{ USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
 	{ USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
+	{ USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
 	{ USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
 	{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
 	{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
@@ -118,6 +119,7 @@
 	{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
 	{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
 	{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
+	{ USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
 	{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
 	{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
 	{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -141,6 +143,8 @@
 	{ USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
 	{ USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
 	{ USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
+	{ USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */
+	{ USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
 	{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
 	{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
 	{ USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
@@ -165,6 +169,7 @@
 	{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
 	{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
 	{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
+	{ USB_DEVICE(0x1901, 0x0194) },	/* GE Healthcare Remote Alarm Box */
 	{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
 	{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
 	{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index b283eb8..bbeeb2b 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -447,6 +447,11 @@
 	struct usb_serial *serial = port->serial;
 	struct cypress_private *priv;
 
+	if (!port->interrupt_out_urb || !port->interrupt_in_urb) {
+		dev_err(&port->dev, "required endpoint is missing\n");
+		return -ENODEV;
+	}
+
 	priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
@@ -606,12 +611,6 @@
 		cypress_set_termios(tty, port, &priv->tmp_termios);
 
 	/* setup the port and start reading from the device */
-	if (!port->interrupt_in_urb) {
-		dev_err(&port->dev, "%s - interrupt_in_urb is empty!\n",
-			__func__);
-		return -1;
-	}
-
 	usb_fill_int_urb(port->interrupt_in_urb, serial->dev,
 		usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress),
 		port->interrupt_in_urb->transfer_buffer,
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 010a42a..16e8e37 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1251,8 +1251,27 @@
 
 static int digi_startup(struct usb_serial *serial)
 {
+	struct device *dev = &serial->interface->dev;
 	struct digi_serial *serial_priv;
 	int ret;
+	int i;
+
+	/* check whether the device has the expected number of endpoints */
+	if (serial->num_port_pointers < serial->type->num_ports + 1) {
+		dev_err(dev, "OOB endpoints missing\n");
+		return -ENODEV;
+	}
+
+	for (i = 0; i < serial->type->num_ports + 1 ; i++) {
+		if (!serial->port[i]->read_urb) {
+			dev_err(dev, "bulk-in endpoint missing\n");
+			return -ENODEV;
+		}
+		if (!serial->port[i]->write_urb) {
+			dev_err(dev, "bulk-out endpoint missing\n");
+			return -ENODEV;
+		}
+	}
 
 	serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL);
 	if (!serial_priv)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 427ae43..3a814e8 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1004,6 +1004,10 @@
 	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
 	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
 	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
+	/* ICP DAS I-756xU devices */
+	{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
+	{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
+	{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
 	{ }					/* Terminating entry */
 };
 
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index a84df25..c5d6c1e 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -872,6 +872,14 @@
 #define NOVITUS_BONO_E_PID		0x6010
 
 /*
+ * ICPDAS I-756*U devices
+ */
+#define ICPDAS_VID			0x1b5c
+#define ICPDAS_I7560U_PID		0x0103
+#define ICPDAS_I7561U_PID		0x0104
+#define ICPDAS_I7563U_PID		0x0105
+
+/*
  * RT Systems programming cables for various ham radios
  */
 #define RTSYSTEMS_VID		0x2100	/* Vendor ID */
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 4446b8d..8856553 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -376,14 +376,21 @@
 
 static int mct_u232_port_probe(struct usb_serial_port *port)
 {
+	struct usb_serial *serial = port->serial;
 	struct mct_u232_private *priv;
 
+	/* check first to simplify error handling */
+	if (!serial->port[1] || !serial->port[1]->interrupt_in_urb) {
+		dev_err(&port->dev, "expected endpoint missing\n");
+		return -ENODEV;
+	}
+
 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
 
 	/* Use second interrupt-in endpoint for reading. */
-	priv->read_urb = port->serial->port[1]->interrupt_in_urb;
+	priv->read_urb = serial->port[1]->interrupt_in_urb;
 	priv->read_urb->context = port;
 
 	spin_lock_init(&priv->lock);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 348e198..c6f497f 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1818,6 +1818,8 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
+	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff),			/* D-Link DWM-221 B1 */
+	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
 	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                /* OLICARD300 - MT6225 */
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index dba5136..ae85861 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -123,7 +123,7 @@
 		unsigned int max_sectors = 64;
 
 		if (us->fflags & US_FL_MAX_SECTORS_MIN)
-			max_sectors = PAGE_CACHE_SIZE >> 9;
+			max_sectors = PAGE_SIZE >> 9;
 		if (queue_max_hw_sectors(sdev->request_queue) > max_sectors)
 			blk_queue_max_hw_sectors(sdev->request_queue,
 					      max_sectors);
@@ -563,7 +563,7 @@
 	.target_alloc =			target_alloc,
 
 	/* lots of sg segments can be handled */
-	.sg_tablesize =			SCSI_MAX_SG_CHAIN_SEGMENTS,
+	.sg_tablesize =			SG_MAX_SEGMENTS,
 
 	/* limit the total size of a transfer to 120 KB */
 	.max_sectors =                  240,
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 13e4cc3..16bc679 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -2,7 +2,7 @@
  * USB Attached SCSI
  * Note that this is not the same as the USB Mass Storage driver
  *
- * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2014
+ * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2016
  * Copyright Matthew Wilcox for Intel Corp, 2010
  * Copyright Sarah Sharp for Intel Corp, 2010
  *
@@ -781,6 +781,17 @@
 	return SUCCESS;
 }
 
+static int uas_target_alloc(struct scsi_target *starget)
+{
+	struct uas_dev_info *devinfo = (struct uas_dev_info *)
+			dev_to_shost(starget->dev.parent)->hostdata;
+
+	if (devinfo->flags & US_FL_NO_REPORT_LUNS)
+		starget->no_report_luns = 1;
+
+	return 0;
+}
+
 static int uas_slave_alloc(struct scsi_device *sdev)
 {
 	struct uas_dev_info *devinfo =
@@ -824,7 +835,6 @@
 	if (devinfo->flags & US_FL_BROKEN_FUA)
 		sdev->broken_fua = 1;
 
-	scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
 	return 0;
 }
 
@@ -832,6 +842,7 @@
 	.module = THIS_MODULE,
 	.name = "uas",
 	.queuecommand = uas_queuecommand,
+	.target_alloc = uas_target_alloc,
 	.slave_alloc = uas_slave_alloc,
 	.slave_configure = uas_slave_configure,
 	.eh_abort_handler = uas_eh_abort_handler,
@@ -956,6 +967,12 @@
 	if (result)
 		goto set_alt0;
 
+	/*
+	 * 1 tag is reserved for untagged commands +
+	 * 1 tag to avoid off by one errors in some bridge firmwares
+	 */
+	shost->can_queue = devinfo->qdepth - 2;
+
 	usb_set_intfdata(intf, shost);
 	result = scsi_add_host(shost, &intf->dev);
 	if (result)
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index ccc113e..53341a7 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -64,6 +64,13 @@
 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
 		US_FL_NO_ATA_1X),
 
+/* Reported-by: David Webb <djw@noc.ac.uk> */
+UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
+		"Seagate",
+		"Expansion Desk",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+		US_FL_NO_REPORT_LUNS),
+
 /* Reported-by: Hans de Goede <hdegoede@redhat.com> */
 UNUSUAL_DEV(0x0bc2, 0x3320, 0x0000, 0x9999,
 		"Seagate",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 43576ed..9de988a 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -482,7 +482,7 @@
 			US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
 			US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
 			US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES |
-			US_FL_MAX_SECTORS_240);
+			US_FL_MAX_SECTORS_240 | US_FL_NO_REPORT_LUNS);
 
 	p = quirks;
 	while (*p) {
@@ -532,6 +532,9 @@
 		case 'i':
 			f |= US_FL_IGNORE_DEVICE;
 			break;
+		case 'j':
+			f |= US_FL_NO_REPORT_LUNS;
+			break;
 		case 'l':
 			f |= US_FL_NOT_LOCKABLE;
 			break;
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index facaaf0..e40da77 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -741,6 +741,17 @@
 	if (!(size > 0))
 		return 0;
 
+	if (size > urb->transfer_buffer_length) {
+		/* should not happen, probably malicious packet */
+		if (ud->side == USBIP_STUB) {
+			usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
+			return 0;
+		} else {
+			usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
+			return -EPIPE;
+		}
+	}
+
 	ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
 	if (ret != size) {
 		dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 75b24e9..15a6582 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -407,7 +407,7 @@
 
 	mutex_lock(&iommu->lock);
 	list_for_each_entry(domain, &iommu->domain_list, next)
-		bitmap &= domain->domain->ops->pgsize_bitmap;
+		bitmap &= domain->domain->pgsize_bitmap;
 	mutex_unlock(&iommu->lock);
 
 	/*
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index bddc8b1..288318a 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -164,6 +164,30 @@
 	return sprintf(buf, "%d\n", bd->props.brightness);
 }
 
+int backlight_device_set_brightness(struct backlight_device *bd,
+				    unsigned long brightness)
+{
+	int rc = -ENXIO;
+
+	mutex_lock(&bd->ops_lock);
+	if (bd->ops) {
+		if (brightness > bd->props.max_brightness)
+			rc = -EINVAL;
+		else {
+			pr_debug("set brightness to %lu\n", brightness);
+			bd->props.brightness = brightness;
+			backlight_update_status(bd);
+			rc = 0;
+		}
+	}
+	mutex_unlock(&bd->ops_lock);
+
+	backlight_generate_event(bd, BACKLIGHT_UPDATE_SYSFS);
+
+	return rc;
+}
+EXPORT_SYMBOL(backlight_device_set_brightness);
+
 static ssize_t brightness_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t count)
 {
@@ -175,24 +199,9 @@
 	if (rc)
 		return rc;
 
-	rc = -ENXIO;
+	rc = backlight_device_set_brightness(bd, brightness);
 
-	mutex_lock(&bd->ops_lock);
-	if (bd->ops) {
-		if (brightness > bd->props.max_brightness)
-			rc = -EINVAL;
-		else {
-			pr_debug("set brightness to %lu\n", brightness);
-			bd->props.brightness = brightness;
-			backlight_update_status(bd);
-			rc = count;
-		}
-	}
-	mutex_unlock(&bd->ops_lock);
-
-	backlight_generate_event(bd, BACKLIGHT_UPDATE_SYSFS);
-
-	return rc;
+	return rc ? rc : count;
 }
 static DEVICE_ATTR_RW(brightness);
 
@@ -380,7 +389,7 @@
 }
 EXPORT_SYMBOL(backlight_device_register);
 
-bool backlight_device_registered(enum backlight_type type)
+struct backlight_device *backlight_device_get_by_type(enum backlight_type type)
 {
 	bool found = false;
 	struct backlight_device *bd;
@@ -394,9 +403,9 @@
 	}
 	mutex_unlock(&backlight_dev_list_mutex);
 
-	return found;
+	return found ? bd : NULL;
 }
-EXPORT_SYMBOL(backlight_device_registered);
+EXPORT_SYMBOL(backlight_device_get_by_type);
 
 /**
  * backlight_device_unregister - unregisters a backlight device object.
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 983280e..e5a391a 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -761,7 +761,7 @@
 
 config FB_EFI
 	bool "EFI-based Framebuffer Support"
-	depends on (FB = y) && X86 && EFI
+	depends on (FB = y) && !IA64 && EFI
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
 	select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index fe274b5..93e66a9 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -440,13 +440,14 @@
 		fb->off_ienb = CLCD_PL111_IENB;
 		fb->off_cntl = CLCD_PL111_CNTL;
 	} else {
-#ifdef CONFIG_ARCH_VERSATILE
-		fb->off_ienb = CLCD_PL111_IENB;
-		fb->off_cntl = CLCD_PL111_CNTL;
-#else
-		fb->off_ienb = CLCD_PL110_IENB;
-		fb->off_cntl = CLCD_PL110_CNTL;
-#endif
+		if (of_machine_is_compatible("arm,versatile-ab") ||
+		    of_machine_is_compatible("arm,versatile-pb")) {
+			fb->off_ienb = CLCD_PL111_IENB;
+			fb->off_cntl = CLCD_PL111_CNTL;
+		} else {
+			fb->off_ienb = CLCD_PL110_IENB;
+			fb->off_cntl = CLCD_PL110_CNTL;
+		}
 	}
 
 	fb->clk = clk_get(&fb->dev->dev, NULL);
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 95d293b..f4c045c 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -6,16 +6,14 @@
  *
  */
 
-#include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/efi.h>
 #include <linux/errno.h>
 #include <linux/fb.h>
 #include <linux/platform_device.h>
 #include <linux/screen_info.h>
-#include <linux/dmi.h>
-#include <linux/pci.h>
 #include <video/vga.h>
-#include <asm/sysfb.h>
+#include <asm/efi.h>
 
 static bool request_mem_succeeded = false;
 
@@ -85,21 +83,13 @@
 static int efifb_setup(char *options)
 {
 	char *this_opt;
-	int i;
 
 	if (options && *options) {
 		while ((this_opt = strsep(&options, ",")) != NULL) {
 			if (!*this_opt) continue;
 
-			for (i = 0; i < M_UNKNOWN; i++) {
-				if (efifb_dmi_list[i].base != 0 &&
-				    !strcmp(this_opt, efifb_dmi_list[i].optname)) {
-					screen_info.lfb_base = efifb_dmi_list[i].base;
-					screen_info.lfb_linelength = efifb_dmi_list[i].stride;
-					screen_info.lfb_width = efifb_dmi_list[i].width;
-					screen_info.lfb_height = efifb_dmi_list[i].height;
-				}
-			}
+			efifb_setup_from_dmi(&screen_info, this_opt);
+
 			if (!strncmp(this_opt, "base:", 5))
 				screen_info.lfb_base = simple_strtoul(this_opt+5, NULL, 0);
 			else if (!strncmp(this_opt, "stride:", 7))
@@ -338,5 +328,4 @@
 	.remove = efifb_remove,
 };
 
-module_platform_driver(efifb_driver);
-MODULE_LICENSE("GPL");
+builtin_platform_driver(efifb_driver);
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
index abfd1f6..1954ec9 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
@@ -200,20 +200,16 @@
 static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags,
 		  char *desc, struct gpio_desc **gpiod)
 {
-	struct gpio_desc *gd;
 	int r;
 
-	*gpiod = NULL;
-
 	r = devm_gpio_request_one(dev, gpio, flags, desc);
-	if (r)
+	if (r) {
+		*gpiod = NULL;
 		return r == -ENOENT ? 0 : r;
+	}
 
-	gd = gpio_to_desc(gpio);
-	if (IS_ERR(gd))
-		return PTR_ERR(gd) == -ENOENT ? 0 : PTR_ERR(gd);
+	*gpiod = gpio_to_desc(gpio);
 
-	*gpiod = gd;
 	return 0;
 }
 
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 71a923e..3b1ca44 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -735,7 +735,7 @@
 
 out_unmap:
 	for (i = 0; i < nr_pages; i++)
-		page_cache_release(pages[i]);
+		put_page(pages[i]);
 
 	kfree(pages);
 
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index f6f28cc..e76bd91 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -17,6 +17,7 @@
  *
  */
 
+#include <linux/delay.h>
 #define VIRTIO_PCI_NO_LEGACY
 #include "virtio_pci_common.h"
 
@@ -271,9 +272,13 @@
 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
 	/* 0 status means a reset. */
 	vp_iowrite8(0, &vp_dev->common->device_status);
-	/* Flush out the status write, and flush in device writes,
-	 * including MSI-X interrupts, if any. */
-	vp_ioread8(&vp_dev->common->device_status);
+	/* After writing 0 to device_status, the driver MUST wait for a read of
+	 * device_status to return 0 before reinitializing the device.
+	 * This will flush out the status write, and flush in device writes,
+	 * including MSI-X interrupts, if any.
+	 */
+	while (vp_ioread8(&vp_dev->common->device_status))
+		msleep(1);
 	/* Flush pending VQ/configuration callbacks. */
 	vp_synchronize_vectors(vdev);
 }
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 5c802d4..ca6bfdd 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -1006,7 +1006,7 @@
 	const char *name)
 {
 	struct virtqueue *vq;
-	void *queue;
+	void *queue = NULL;
 	dma_addr_t dma_addr;
 	size_t queue_size_in_bytes;
 	struct vring vring;
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index 2820924..f4bc8c1 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -352,7 +352,7 @@
 		w1_delay(70);
 
 		result = dev->bus_master->read_bit(dev->bus_master->data) & 0x1;
-		/* minmum 70 (above) + 430 = 500 us
+		/* minimum 70 (above) + 430 = 500 us
 		 * There aren't any timing requirements between a reset and
 		 * the following transactions.  Sleeping is safe here.
 		 */
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index fb94765..9c41431 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1475,6 +1475,32 @@
 	help
 	  Hardware driver for the Mediatek/Ralink MT7621/8 SoC Watchdog Timer.
 
+config PIC32_WDT
+	tristate "Microchip PIC32 hardware watchdog"
+	select WATCHDOG_CORE
+	depends on MACH_PIC32
+	help
+	  Watchdog driver for the built in watchdog hardware in a PIC32.
+
+	  Configuration bits must be set appropriately for the watchdog to be
+	  controlled by this driver.
+
+	  To compile this driver as a loadable module, choose M here.
+	  The module will be called pic32-wdt.
+
+config PIC32_DMT
+	tristate "Microchip PIC32 Deadman Timer"
+	select WATCHDOG_CORE
+	depends on MACH_PIC32
+	help
+	  Watchdog driver for PIC32 instruction fetch counting timer. This specific
+	  timer is typically be used in misson critical and safety critical
+	  applications, where any single failure of the software functionality
+	  and sequencing must be detected.
+
+	  To compile this driver as a loadable module, choose M here.
+	  The module will be called pic32-dmt.
+
 # PARISC Architecture
 
 # POWERPC Architecture
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index feb6270..9bde095 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -157,6 +157,8 @@
 obj-$(CONFIG_RALINK_WDT) += rt2880_wdt.o
 obj-$(CONFIG_IMGPDC_WDT) += imgpdc_wdt.o
 obj-$(CONFIG_MT7621_WDT) += mt7621_wdt.o
+obj-$(CONFIG_PIC32_WDT) += pic32-wdt.o
+obj-$(CONFIG_PIC32_DMT) += pic32-dmt.o
 
 # PARISC Architecture
 
diff --git a/drivers/watchdog/pic32-dmt.c b/drivers/watchdog/pic32-dmt.c
new file mode 100644
index 0000000..962f58c
--- /dev/null
+++ b/drivers/watchdog/pic32-dmt.c
@@ -0,0 +1,257 @@
+/*
+ * PIC32 deadman timer driver
+ *
+ * Purna Chandra Mandal <purna.mandal@microchip.com>
+ * Copyright (c) 2016, Microchip Technology Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/watchdog.h>
+
+#include <asm/mach-pic32/pic32.h>
+
+/* Deadman Timer Regs */
+#define DMTCON_REG	0x00
+#define DMTPRECLR_REG	0x10
+#define DMTCLR_REG	0x20
+#define DMTSTAT_REG	0x30
+#define DMTCNT_REG	0x40
+#define DMTPSCNT_REG	0x60
+#define DMTPSINTV_REG	0x70
+
+/* Deadman Timer Regs fields */
+#define DMT_ON			BIT(15)
+#define DMT_STEP1_KEY		BIT(6)
+#define DMT_STEP2_KEY		BIT(3)
+#define DMTSTAT_WINOPN		BIT(0)
+#define DMTSTAT_EVENT		BIT(5)
+#define DMTSTAT_BAD2		BIT(6)
+#define DMTSTAT_BAD1		BIT(7)
+
+/* Reset Control Register fields for watchdog */
+#define RESETCON_DMT_TIMEOUT	BIT(5)
+
+struct pic32_dmt {
+	void __iomem	*regs;
+	struct clk	*clk;
+};
+
+static inline void dmt_enable(struct pic32_dmt *dmt)
+{
+	writel(DMT_ON, PIC32_SET(dmt->regs + DMTCON_REG));
+}
+
+static inline void dmt_disable(struct pic32_dmt *dmt)
+{
+	writel(DMT_ON, PIC32_CLR(dmt->regs + DMTCON_REG));
+	/*
+	 * Cannot touch registers in the CPU cycle following clearing the
+	 * ON bit.
+	 */
+	nop();
+}
+
+static inline int dmt_bad_status(struct pic32_dmt *dmt)
+{
+	u32 val;
+
+	val = readl(dmt->regs + DMTSTAT_REG);
+	val &= (DMTSTAT_BAD1 | DMTSTAT_BAD2 | DMTSTAT_EVENT);
+	if (val)
+		return -EAGAIN;
+
+	return 0;
+}
+
+static inline int dmt_keepalive(struct pic32_dmt *dmt)
+{
+	u32 v;
+	u32 timeout = 500;
+
+	/* set pre-clear key */
+	writel(DMT_STEP1_KEY << 8, dmt->regs + DMTPRECLR_REG);
+
+	/* wait for DMT window to open */
+	while (--timeout) {
+		v = readl(dmt->regs + DMTSTAT_REG) & DMTSTAT_WINOPN;
+		if (v == DMTSTAT_WINOPN)
+			break;
+	}
+
+	/* apply key2 */
+	writel(DMT_STEP2_KEY, dmt->regs + DMTCLR_REG);
+
+	/* check whether keys are latched correctly */
+	return dmt_bad_status(dmt);
+}
+
+static inline u32 pic32_dmt_get_timeout_secs(struct pic32_dmt *dmt)
+{
+	unsigned long rate;
+
+	rate = clk_get_rate(dmt->clk);
+	if (rate)
+		return readl(dmt->regs + DMTPSCNT_REG) / rate;
+
+	return 0;
+}
+
+static inline u32 pic32_dmt_bootstatus(struct pic32_dmt *dmt)
+{
+	u32 v;
+	void __iomem *rst_base;
+
+	rst_base = ioremap(PIC32_BASE_RESET, 0x10);
+	if (!rst_base)
+		return 0;
+
+	v = readl(rst_base);
+
+	writel(RESETCON_DMT_TIMEOUT, PIC32_CLR(rst_base));
+
+	iounmap(rst_base);
+	return v & RESETCON_DMT_TIMEOUT;
+}
+
+static int pic32_dmt_start(struct watchdog_device *wdd)
+{
+	struct pic32_dmt *dmt = watchdog_get_drvdata(wdd);
+
+	dmt_enable(dmt);
+	return dmt_keepalive(dmt);
+}
+
+static int pic32_dmt_stop(struct watchdog_device *wdd)
+{
+	struct pic32_dmt *dmt = watchdog_get_drvdata(wdd);
+
+	dmt_disable(dmt);
+
+	return 0;
+}
+
+static int pic32_dmt_ping(struct watchdog_device *wdd)
+{
+	struct pic32_dmt *dmt = watchdog_get_drvdata(wdd);
+
+	return dmt_keepalive(dmt);
+}
+
+static const struct watchdog_ops pic32_dmt_fops = {
+	.owner		= THIS_MODULE,
+	.start		= pic32_dmt_start,
+	.stop		= pic32_dmt_stop,
+	.ping		= pic32_dmt_ping,
+};
+
+static const struct watchdog_info pic32_dmt_ident = {
+	.options	= WDIOF_KEEPALIVEPING |
+			  WDIOF_MAGICCLOSE,
+	.identity	= "PIC32 Deadman Timer",
+};
+
+static struct watchdog_device pic32_dmt_wdd = {
+	.info		= &pic32_dmt_ident,
+	.ops		= &pic32_dmt_fops,
+};
+
+static int pic32_dmt_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct pic32_dmt *dmt;
+	struct resource *mem;
+	struct watchdog_device *wdd = &pic32_dmt_wdd;
+
+	dmt = devm_kzalloc(&pdev->dev, sizeof(*dmt), GFP_KERNEL);
+	if (IS_ERR(dmt))
+		return PTR_ERR(dmt);
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dmt->regs = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(dmt->regs))
+		return PTR_ERR(dmt->regs);
+
+	dmt->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(dmt->clk)) {
+		dev_err(&pdev->dev, "clk not found\n");
+		return PTR_ERR(dmt->clk);
+	}
+
+	ret = clk_prepare_enable(dmt->clk);
+	if (ret)
+		return ret;
+
+	wdd->timeout = pic32_dmt_get_timeout_secs(dmt);
+	if (!wdd->timeout) {
+		dev_err(&pdev->dev,
+			"failed to read watchdog register timeout\n");
+		ret = -EINVAL;
+		goto out_disable_clk;
+	}
+
+	dev_info(&pdev->dev, "timeout %d\n", wdd->timeout);
+
+	wdd->bootstatus = pic32_dmt_bootstatus(dmt) ? WDIOF_CARDRESET : 0;
+
+	watchdog_set_nowayout(wdd, WATCHDOG_NOWAYOUT);
+	watchdog_set_drvdata(wdd, dmt);
+
+	ret = watchdog_register_device(wdd);
+	if (ret) {
+		dev_err(&pdev->dev, "watchdog register failed, err %d\n", ret);
+		goto out_disable_clk;
+	}
+
+	platform_set_drvdata(pdev, wdd);
+	return 0;
+
+out_disable_clk:
+	clk_disable_unprepare(dmt->clk);
+	return ret;
+}
+
+static int pic32_dmt_remove(struct platform_device *pdev)
+{
+	struct watchdog_device *wdd = platform_get_drvdata(pdev);
+	struct pic32_dmt *dmt = watchdog_get_drvdata(wdd);
+
+	watchdog_unregister_device(wdd);
+	clk_disable_unprepare(dmt->clk);
+
+	return 0;
+}
+
+static const struct of_device_id pic32_dmt_of_ids[] = {
+	{ .compatible = "microchip,pic32mzda-dmt",},
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, pic32_dmt_of_ids);
+
+static struct platform_driver pic32_dmt_driver = {
+	.probe		= pic32_dmt_probe,
+	.remove		= pic32_dmt_remove,
+	.driver		= {
+		.name		= "pic32-dmt",
+		.owner		= THIS_MODULE,
+		.of_match_table = of_match_ptr(pic32_dmt_of_ids),
+	}
+};
+
+module_platform_driver(pic32_dmt_driver);
+
+MODULE_AUTHOR("Purna Chandra Mandal <purna.mandal@microchip.com>");
+MODULE_DESCRIPTION("Microchip PIC32 DMT Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/pic32-wdt.c b/drivers/watchdog/pic32-wdt.c
new file mode 100644
index 0000000..6047aa8
--- /dev/null
+++ b/drivers/watchdog/pic32-wdt.c
@@ -0,0 +1,263 @@
+/*
+ * PIC32 watchdog driver
+ *
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (c) 2016, Microchip Technology Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/watchdog.h>
+
+#include <asm/mach-pic32/pic32.h>
+
+/* Watchdog Timer Registers */
+#define WDTCON_REG		0x00
+
+/* Watchdog Timer Control Register fields */
+#define WDTCON_WIN_EN		BIT(0)
+#define WDTCON_RMCS_MASK	0x0003
+#define WDTCON_RMCS_SHIFT	0x0006
+#define WDTCON_RMPS_MASK	0x001F
+#define WDTCON_RMPS_SHIFT	0x0008
+#define WDTCON_ON		BIT(15)
+#define WDTCON_CLR_KEY		0x5743
+
+/* Reset Control Register fields for watchdog */
+#define RESETCON_TIMEOUT_IDLE	BIT(2)
+#define RESETCON_TIMEOUT_SLEEP	BIT(3)
+#define RESETCON_WDT_TIMEOUT	BIT(4)
+
+struct pic32_wdt {
+	void __iomem	*regs;
+	void __iomem	*rst_base;
+	struct clk	*clk;
+};
+
+static inline bool pic32_wdt_is_win_enabled(struct pic32_wdt *wdt)
+{
+	return !!(readl(wdt->regs + WDTCON_REG) & WDTCON_WIN_EN);
+}
+
+static inline u32 pic32_wdt_get_post_scaler(struct pic32_wdt *wdt)
+{
+	u32 v = readl(wdt->regs + WDTCON_REG);
+
+	return (v >> WDTCON_RMPS_SHIFT) & WDTCON_RMPS_MASK;
+}
+
+static inline u32 pic32_wdt_get_clk_id(struct pic32_wdt *wdt)
+{
+	u32 v = readl(wdt->regs + WDTCON_REG);
+
+	return (v >> WDTCON_RMCS_SHIFT) & WDTCON_RMCS_MASK;
+}
+
+static int pic32_wdt_bootstatus(struct pic32_wdt *wdt)
+{
+	u32 v = readl(wdt->rst_base);
+
+	writel(RESETCON_WDT_TIMEOUT, PIC32_CLR(wdt->rst_base));
+
+	return v & RESETCON_WDT_TIMEOUT;
+}
+
+static u32 pic32_wdt_get_timeout_secs(struct pic32_wdt *wdt, struct device *dev)
+{
+	unsigned long rate;
+	u32 period, ps, terminal;
+
+	rate = clk_get_rate(wdt->clk);
+
+	dev_dbg(dev, "wdt: clk_id %d, clk_rate %lu (prescale)\n",
+		pic32_wdt_get_clk_id(wdt), rate);
+
+	/* default, prescaler of 32 (i.e. div-by-32) is implicit. */
+	rate >>= 5;
+	if (!rate)
+		return 0;
+
+	/* calculate terminal count from postscaler. */
+	ps = pic32_wdt_get_post_scaler(wdt);
+	terminal = BIT(ps);
+
+	/* find time taken (in secs) to reach terminal count */
+	period = terminal / rate;
+	dev_dbg(dev,
+		"wdt: clk_rate %lu (postscale) / terminal %d, timeout %dsec\n",
+		rate, terminal, period);
+
+	return period;
+}
+
+static void pic32_wdt_keepalive(struct pic32_wdt *wdt)
+{
+	/* write key through single half-word */
+	writew(WDTCON_CLR_KEY, wdt->regs + WDTCON_REG + 2);
+}
+
+static int pic32_wdt_start(struct watchdog_device *wdd)
+{
+	struct pic32_wdt *wdt = watchdog_get_drvdata(wdd);
+
+	writel(WDTCON_ON, PIC32_SET(wdt->regs + WDTCON_REG));
+	pic32_wdt_keepalive(wdt);
+
+	return 0;
+}
+
+static int pic32_wdt_stop(struct watchdog_device *wdd)
+{
+	struct pic32_wdt *wdt = watchdog_get_drvdata(wdd);
+
+	writel(WDTCON_ON, PIC32_CLR(wdt->regs + WDTCON_REG));
+
+	/*
+	 * Cannot touch registers in the CPU cycle following clearing the
+	 * ON bit.
+	 */
+	nop();
+
+	return 0;
+}
+
+static int pic32_wdt_ping(struct watchdog_device *wdd)
+{
+	struct pic32_wdt *wdt = watchdog_get_drvdata(wdd);
+
+	pic32_wdt_keepalive(wdt);
+
+	return 0;
+}
+
+static const struct watchdog_ops pic32_wdt_fops = {
+	.owner		= THIS_MODULE,
+	.start		= pic32_wdt_start,
+	.stop		= pic32_wdt_stop,
+	.ping		= pic32_wdt_ping,
+};
+
+static const struct watchdog_info pic32_wdt_ident = {
+	.options = WDIOF_KEEPALIVEPING |
+			WDIOF_MAGICCLOSE | WDIOF_CARDRESET,
+	.identity = "PIC32 Watchdog",
+};
+
+static struct watchdog_device pic32_wdd = {
+	.info		= &pic32_wdt_ident,
+	.ops		= &pic32_wdt_fops,
+};
+
+static const struct of_device_id pic32_wdt_dt_ids[] = {
+	{ .compatible = "microchip,pic32mzda-wdt", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, pic32_wdt_dt_ids);
+
+static int pic32_wdt_drv_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct watchdog_device *wdd = &pic32_wdd;
+	struct pic32_wdt *wdt;
+	struct resource *mem;
+
+	wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+	if (IS_ERR(wdt))
+		return PTR_ERR(wdt);
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	wdt->regs = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(wdt->regs))
+		return PTR_ERR(wdt->regs);
+
+	wdt->rst_base = devm_ioremap(&pdev->dev, PIC32_BASE_RESET, 0x10);
+	if (IS_ERR(wdt->rst_base))
+		return PTR_ERR(wdt->rst_base);
+
+	wdt->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(wdt->clk)) {
+		dev_err(&pdev->dev, "clk not found\n");
+		return PTR_ERR(wdt->clk);
+	}
+
+	ret = clk_prepare_enable(wdt->clk);
+	if (ret) {
+		dev_err(&pdev->dev, "clk enable failed\n");
+		return ret;
+	}
+
+	if (pic32_wdt_is_win_enabled(wdt)) {
+		dev_err(&pdev->dev, "windowed-clear mode is not supported.\n");
+		ret = -ENODEV;
+		goto out_disable_clk;
+	}
+
+	wdd->timeout = pic32_wdt_get_timeout_secs(wdt, &pdev->dev);
+	if (!wdd->timeout) {
+		dev_err(&pdev->dev,
+			"failed to read watchdog register timeout\n");
+		ret = -EINVAL;
+		goto out_disable_clk;
+	}
+
+	dev_info(&pdev->dev, "timeout %d\n", wdd->timeout);
+
+	wdd->bootstatus = pic32_wdt_bootstatus(wdt) ? WDIOF_CARDRESET : 0;
+
+	watchdog_set_nowayout(wdd, WATCHDOG_NOWAYOUT);
+	watchdog_set_drvdata(wdd, wdt);
+
+	ret = watchdog_register_device(wdd);
+	if (ret) {
+		dev_err(&pdev->dev, "watchdog register failed, err %d\n", ret);
+		goto out_disable_clk;
+	}
+
+	platform_set_drvdata(pdev, wdd);
+
+	return 0;
+
+out_disable_clk:
+	clk_disable_unprepare(wdt->clk);
+
+	return ret;
+}
+
+static int pic32_wdt_drv_remove(struct platform_device *pdev)
+{
+	struct watchdog_device *wdd = platform_get_drvdata(pdev);
+	struct pic32_wdt *wdt = watchdog_get_drvdata(wdd);
+
+	watchdog_unregister_device(wdd);
+	clk_disable_unprepare(wdt->clk);
+
+	return 0;
+}
+
+static struct platform_driver pic32_wdt_driver = {
+	.probe		= pic32_wdt_drv_probe,
+	.remove		= pic32_wdt_drv_remove,
+	.driver		= {
+		.name		= "pic32-wdt",
+		.owner		= THIS_MODULE,
+		.of_match_table = of_match_ptr(pic32_wdt_dt_ids),
+	}
+};
+
+module_platform_driver(pic32_wdt_driver);
+
+MODULE_AUTHOR("Joshua Henderson <joshua.henderson@microchip.com>");
+MODULE_DESCRIPTION("Microchip PIC32 Watchdog Timer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index c1658fe..981a668 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -262,7 +262,7 @@
 
 		ret = register_restart_handler(&wdd->restart_nb);
 		if (ret)
-			pr_warn("watchog%d: Cannot register restart handler (%d)\n",
+			pr_warn("watchdog%d: Cannot register restart handler (%d)\n",
 				wdd->id, ret);
 	}
 
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 9781e0d..d46839f 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -151,6 +151,8 @@
 static void balloon_process(struct work_struct *work);
 static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
 
+static void release_memory_resource(struct resource *resource);
+
 /* When ballooning out (allocating memory to return to Xen) we don't really
    want the kernel to try too hard since that can trigger the oom killer. */
 #define GFP_BALLOON \
@@ -267,6 +269,20 @@
 		return NULL;
 	}
 
+#ifdef CONFIG_SPARSEMEM
+	{
+		unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT);
+		unsigned long pfn = res->start >> PAGE_SHIFT;
+
+		if (pfn > limit) {
+			pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
+			       pfn, limit);
+			release_memory_resource(res);
+			return NULL;
+		}
+	}
+#endif
+
 	return res;
 }
 
diff --git a/drivers/xen/efi.c b/drivers/xen/efi.c
index be7e56a..e9d2135 100644
--- a/drivers/xen/efi.c
+++ b/drivers/xen/efi.c
@@ -316,7 +316,6 @@
 	.get_next_high_mono_count = xen_efi_get_next_high_mono_count,
 	.reset_system             = NULL, /* Functionality provided by Xen. */
 	.set_virtual_address_map  = NULL, /* Not used under Xen. */
-	.memmap                   = NULL, /* Not used under Xen. */
 	.flags			  = 0     /* Initialized later. */
 };
 
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 488017a..cb7138c 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -484,9 +484,19 @@
 	struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
 	int rc = 0;
 
-	irq_move_irq(data);
+	if (!VALID_EVTCHN(evtchn))
+		return;
 
-	if (VALID_EVTCHN(evtchn))
+	if (unlikely(irqd_is_setaffinity_pending(data))) {
+		int masked = test_and_set_mask(evtchn);
+
+		clear_evtchn(evtchn);
+
+		irq_move_masked_irq(data);
+
+		if (!masked)
+			unmask_evtchn(evtchn);
+	} else
 		clear_evtchn(evtchn);
 
 	if (pirq_needs_eoi(data->irq)) {
@@ -1357,9 +1367,19 @@
 {
 	int evtchn = evtchn_from_irq(data->irq);
 
-	irq_move_irq(data);
+	if (!VALID_EVTCHN(evtchn))
+		return;
 
-	if (VALID_EVTCHN(evtchn))
+	if (unlikely(irqd_is_setaffinity_pending(data))) {
+		int masked = test_and_set_mask(evtchn);
+
+		clear_evtchn(evtchn);
+
+		irq_move_masked_irq(data);
+
+		if (!masked)
+			unmask_evtchn(evtchn);
+	} else
 		clear_evtchn(evtchn);
 }
 
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 38272ad..f4edd6d 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -316,7 +316,6 @@
 {
 	unsigned int new_size;
 	evtchn_port_t *new_ring, *old_ring;
-	unsigned int p, c;
 
 	/*
 	 * Ensure the ring is large enough to capture all possible
@@ -346,20 +345,17 @@
 	/*
 	 * Copy the old ring contents to the new ring.
 	 *
-	 * If the ring contents crosses the end of the current ring,
-	 * it needs to be copied in two chunks.
+	 * To take care of wrapping, a full ring, and the new index
+	 * pointing into the second half, simply copy the old contents
+	 * twice.
 	 *
 	 * +---------+    +------------------+
-	 * |34567  12| -> |       1234567    |
-	 * +-----p-c-+    +------------------+
+	 * |34567  12| -> |34567  1234567  12|
+	 * +-----p-c-+    +-------c------p---+
 	 */
-	p = evtchn_ring_offset(u, u->ring_prod);
-	c = evtchn_ring_offset(u, u->ring_cons);
-	if (p < c) {
-		memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring));
-		memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring));
-	} else
-		memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring));
+	memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring));
+	memcpy(new_ring + u->ring_size, old_ring,
+	       u->ring_size * sizeof(*u->ring));
 
 	u->ring = new_ring;
 	u->ring_size = new_size;
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index 9da967f..eb3589e 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -93,7 +93,7 @@
 	 * instantiating the inode (v9fs_inode_from_fid)
 	 */
 	acl = get_cached_acl(inode, type);
-	BUG_ON(acl == ACL_NOT_CACHED);
+	BUG_ON(is_uncached_acl(acl));
 	return acl;
 }
 
@@ -213,8 +213,8 @@
 }
 
 static int v9fs_xattr_get_acl(const struct xattr_handler *handler,
-			      struct dentry *dentry, const char *name,
-			      void *buffer, size_t size)
+			      struct dentry *dentry, struct inode *inode,
+			      const char *name, void *buffer, size_t size)
 {
 	struct v9fs_session_info *v9ses;
 	struct posix_acl *acl;
@@ -227,7 +227,7 @@
 	if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT)
 		return v9fs_xattr_get(dentry, handler->name, buffer, size);
 
-	acl = v9fs_get_cached_acl(d_inode(dentry), handler->flags);
+	acl = v9fs_get_cached_acl(inode, handler->flags);
 	if (IS_ERR(acl))
 		return PTR_ERR(acl);
 	if (acl == NULL)
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index e9e0437..c37fb9c 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -153,7 +153,7 @@
 	 * If called with zero offset, we should release
 	 * the private state assocated with the page
 	 */
-	if (offset == 0 && length == PAGE_CACHE_SIZE)
+	if (offset == 0 && length == PAGE_SIZE)
 		v9fs_fscache_invalidate_page(page);
 }
 
@@ -166,10 +166,10 @@
 	struct bio_vec bvec;
 	int err, len;
 
-	if (page->index == size >> PAGE_CACHE_SHIFT)
-		len = size & ~PAGE_CACHE_MASK;
+	if (page->index == size >> PAGE_SHIFT)
+		len = size & ~PAGE_MASK;
 	else
-		len = PAGE_CACHE_SIZE;
+		len = PAGE_SIZE;
 
 	bvec.bv_page = page;
 	bvec.bv_offset = 0;
@@ -245,9 +245,10 @@
  *
  */
 static ssize_t
-v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
+v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct file *file = iocb->ki_filp;
+	loff_t pos = iocb->ki_pos;
 	ssize_t n;
 	int err = 0;
 	if (iov_iter_rw(iter) == WRITE) {
@@ -271,7 +272,7 @@
 	int retval = 0;
 	struct page *page;
 	struct v9fs_inode *v9inode;
-	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+	pgoff_t index = pos >> PAGE_SHIFT;
 	struct inode *inode = mapping->host;
 
 
@@ -288,11 +289,11 @@
 	if (PageUptodate(page))
 		goto out;
 
-	if (len == PAGE_CACHE_SIZE)
+	if (len == PAGE_SIZE)
 		goto out;
 
 	retval = v9fs_fid_readpage(v9inode->writeback_fid, page);
-	page_cache_release(page);
+	put_page(page);
 	if (!retval)
 		goto start;
 out:
@@ -313,7 +314,7 @@
 		/*
 		 * zero out the rest of the area
 		 */
-		unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+		unsigned from = pos & (PAGE_SIZE - 1);
 
 		zero_user(page, from + copied, len - copied);
 		flush_dcache_page(page);
@@ -331,7 +332,7 @@
 	}
 	set_page_dirty(page);
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 
 	return copied;
 }
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index 5cc00e5..b0405d6 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -246,7 +246,7 @@
 const struct file_operations v9fs_dir_operations = {
 	.read = generic_read_dir,
 	.llseek = generic_file_llseek,
-	.iterate = v9fs_dir_readdir,
+	.iterate_shared = v9fs_dir_readdir,
 	.open = v9fs_file_open,
 	.release = v9fs_dir_release,
 };
@@ -254,7 +254,7 @@
 const struct file_operations v9fs_dir_operations_dotl = {
 	.read = generic_read_dir,
 	.llseek = generic_file_llseek,
-	.iterate = v9fs_dir_readdir_dotl,
+	.iterate_shared = v9fs_dir_readdir_dotl,
 	.open = v9fs_file_open,
 	.release = v9fs_dir_release,
         .fsync = v9fs_file_fsync_dotl,
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index eadc894..b84c291 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -421,8 +421,8 @@
 		struct inode *inode = file_inode(file);
 		loff_t i_size;
 		unsigned long pg_start, pg_end;
-		pg_start = origin >> PAGE_CACHE_SHIFT;
-		pg_end = (origin + retval - 1) >> PAGE_CACHE_SHIFT;
+		pg_start = origin >> PAGE_SHIFT;
+		pg_end = (origin + retval - 1) >> PAGE_SHIFT;
 		if (inode->i_mapping && inode->i_mapping->nrpages)
 			invalidate_inode_pages2_range(inode->i_mapping,
 						      pg_start, pg_end);
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 3a08b3e..f4645c5 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -1071,7 +1071,7 @@
 	if (IS_ERR(st))
 		return PTR_ERR(st);
 
-	v9fs_stat2inode(st, d_inode(dentry), d_inode(dentry)->i_sb);
+	v9fs_stat2inode(st, d_inode(dentry), dentry->d_sb);
 	generic_fillattr(d_inode(dentry), stat);
 
 	p9stat_free(st);
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index bf495ce..de3ed86 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -87,7 +87,7 @@
 		sb->s_op = &v9fs_super_ops;
 	sb->s_bdi = &v9ses->bdi;
 	if (v9ses->cache)
-		sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_CACHE_SIZE;
+		sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_SIZE;
 
 	sb->s_flags |= MS_ACTIVE | MS_DIRSYNC | MS_NOATIME;
 	if (!v9ses->cache)
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index 9dd9b47..18c62ba 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -138,8 +138,8 @@
 }
 
 static int v9fs_xattr_handler_get(const struct xattr_handler *handler,
-				  struct dentry *dentry, const char *name,
-				  void *buffer, size_t size)
+				  struct dentry *dentry, struct inode *inode,
+				  const char *name, void *buffer, size_t size)
 {
 	const char *full_name = xattr_full_name(handler, name);
 
diff --git a/fs/affs/dir.c b/fs/affs/dir.c
index ac4f318..f1e7294 100644
--- a/fs/affs/dir.c
+++ b/fs/affs/dir.c
@@ -20,7 +20,7 @@
 const struct file_operations affs_dir_operations = {
 	.read		= generic_read_dir,
 	.llseek		= generic_file_llseek,
-	.iterate	= affs_readdir,
+	.iterate_shared	= affs_readdir,
 	.fsync		= affs_file_fsync,
 };
 
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 22fc7c8..0deec9c 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -389,12 +389,13 @@
 }
 
 static ssize_t
-affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
+affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct file *file = iocb->ki_filp;
 	struct address_space *mapping = file->f_mapping;
 	struct inode *inode = mapping->host;
 	size_t count = iov_iter_count(iter);
+	loff_t offset = iocb->ki_pos;
 	ssize_t ret;
 
 	if (iov_iter_rw(iter) == WRITE) {
@@ -404,7 +405,7 @@
 			return 0;
 	}
 
-	ret = blockdev_direct_IO(iocb, inode, iter, offset, affs_get_block);
+	ret = blockdev_direct_IO(iocb, inode, iter, affs_get_block);
 	if (ret < 0 && iov_iter_rw(iter) == WRITE)
 		affs_write_failed(mapping, offset + count);
 	return ret;
@@ -510,9 +511,9 @@
 
 	pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino,
 		 page->index, to);
-	BUG_ON(to > PAGE_CACHE_SIZE);
+	BUG_ON(to > PAGE_SIZE);
 	bsize = AFFS_SB(sb)->s_data_blksize;
-	tmp = page->index << PAGE_CACHE_SHIFT;
+	tmp = page->index << PAGE_SHIFT;
 	bidx = tmp / bsize;
 	boff = tmp % bsize;
 
@@ -613,10 +614,10 @@
 	int err;
 
 	pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, page->index);
-	to = PAGE_CACHE_SIZE;
-	if (((page->index + 1) << PAGE_CACHE_SHIFT) > inode->i_size) {
-		to = inode->i_size & ~PAGE_CACHE_MASK;
-		memset(page_address(page) + to, 0, PAGE_CACHE_SIZE - to);
+	to = PAGE_SIZE;
+	if (((page->index + 1) << PAGE_SHIFT) > inode->i_size) {
+		to = inode->i_size & ~PAGE_MASK;
+		memset(page_address(page) + to, 0, PAGE_SIZE - to);
 	}
 
 	err = affs_do_readpage_ofs(page, to);
@@ -646,7 +647,7 @@
 			return err;
 	}
 
-	index = pos >> PAGE_CACHE_SHIFT;
+	index = pos >> PAGE_SHIFT;
 	page = grab_cache_page_write_begin(mapping, index, flags);
 	if (!page)
 		return -ENOMEM;
@@ -656,10 +657,10 @@
 		return 0;
 
 	/* XXX: inefficient but safe in the face of short writes */
-	err = affs_do_readpage_ofs(page, PAGE_CACHE_SIZE);
+	err = affs_do_readpage_ofs(page, PAGE_SIZE);
 	if (err) {
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 	return err;
 }
@@ -677,7 +678,7 @@
 	u32 tmp;
 	int written;
 
-	from = pos & (PAGE_CACHE_SIZE - 1);
+	from = pos & (PAGE_SIZE - 1);
 	to = pos + len;
 	/*
 	 * XXX: not sure if this can handle short copies (len < copied), but
@@ -692,7 +693,7 @@
 
 	bh = NULL;
 	written = 0;
-	tmp = (page->index << PAGE_CACHE_SHIFT) + from;
+	tmp = (page->index << PAGE_SHIFT) + from;
 	bidx = tmp / bsize;
 	boff = tmp % bsize;
 	if (boff) {
@@ -788,13 +789,13 @@
 
 done:
 	affs_brelse(bh);
-	tmp = (page->index << PAGE_CACHE_SHIFT) + from;
+	tmp = (page->index << PAGE_SHIFT) + from;
 	if (tmp > inode->i_size)
 		inode->i_size = AFFS_I(inode)->mmu_private = tmp;
 
 err_first_bh:
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 
 	return written;
 
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index e10e1778..eba5410 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -43,7 +43,7 @@
 const struct file_operations afs_dir_file_operations = {
 	.open		= afs_dir_open,
 	.release	= afs_release,
-	.iterate	= afs_readdir,
+	.iterate_shared	= afs_readdir,
 	.lock		= afs_lock,
 	.llseek		= generic_file_llseek,
 };
@@ -128,7 +128,7 @@
 /*
  * check that a directory page is valid
  */
-static inline void afs_dir_check_page(struct inode *dir, struct page *page)
+static inline bool afs_dir_check_page(struct inode *dir, struct page *page)
 {
 	struct afs_dir_page *dbuf;
 	loff_t latter;
@@ -168,11 +168,11 @@
 	}
 
 	SetPageChecked(page);
-	return;
+	return true;
 
 error:
-	SetPageChecked(page);
 	SetPageError(page);
+	return false;
 }
 
 /*
@@ -181,7 +181,7 @@
 static inline void afs_dir_put_page(struct page *page)
 {
 	kunmap(page);
-	page_cache_release(page);
+	put_page(page);
 }
 
 /*
@@ -196,10 +196,10 @@
 	page = read_cache_page(dir->i_mapping, index, afs_page_filler, key);
 	if (!IS_ERR(page)) {
 		kmap(page);
-		if (!PageChecked(page))
-			afs_dir_check_page(dir, page);
-		if (PageError(page))
-			goto fail;
+		if (unlikely(!PageChecked(page))) {
+			if (PageError(page) || !afs_dir_check_page(dir, page))
+				goto fail;
+		}
 	}
 	return page;
 
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 999bc3c..6344aee 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -164,7 +164,7 @@
 		_debug("cache said ENOBUFS");
 	default:
 	go_on:
-		offset = page->index << PAGE_CACHE_SHIFT;
+		offset = page->index << PAGE_SHIFT;
 		len = min_t(size_t, i_size_read(inode) - offset, PAGE_SIZE);
 
 		/* read the contents of the file from the server into the
@@ -319,7 +319,7 @@
 	BUG_ON(!PageLocked(page));
 
 	/* we clean up only if the entire page is being invalidated */
-	if (offset == 0 && length == PAGE_CACHE_SIZE) {
+	if (offset == 0 && length == PAGE_SIZE) {
 #ifdef CONFIG_AFS_FSCACHE
 		if (PageFsCache(page)) {
 			struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index ccd0b21..81dd075 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -93,7 +93,7 @@
 
 	kunmap(page);
 out_free:
-	page_cache_release(page);
+	put_page(page);
 out:
 	_leave(" = %d", ret);
 	return ret;
@@ -189,7 +189,7 @@
 		buf = kmap_atomic(page);
 		memcpy(devname, buf, size);
 		kunmap_atomic(buf);
-		page_cache_release(page);
+		put_page(page);
 		page = NULL;
 	}
 
@@ -211,7 +211,7 @@
 	return mnt;
 
 error:
-	page_cache_release(page);
+	put_page(page);
 error_no_page:
 	free_page((unsigned long) options);
 error_no_options:
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index b506428..63cd9f9 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -65,6 +65,12 @@
 	call->async_workfn(call);
 }
 
+static int afs_wait_atomic_t(atomic_t *p)
+{
+	schedule();
+	return 0;
+}
+
 /*
  * open an RxRPC socket and bind it to be a server for callback notifications
  * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT
@@ -126,13 +132,16 @@
 {
 	_enter("");
 
+	wait_on_atomic_t(&afs_outstanding_calls, afs_wait_atomic_t,
+			 TASK_UNINTERRUPTIBLE);
+	_debug("no outstanding calls");
+
 	sock_release(afs_socket);
 
 	_debug("dework");
 	destroy_workqueue(afs_async_calls);
 
 	ASSERTCMP(atomic_read(&afs_outstanding_skbs), ==, 0);
-	ASSERTCMP(atomic_read(&afs_outstanding_calls), ==, 0);
 	_leave("");
 }
 
@@ -178,8 +187,6 @@
 {
 	_debug("DONE %p{%s} [%d]",
 	       call, call->type->name, atomic_read(&afs_outstanding_calls));
-	if (atomic_dec_return(&afs_outstanding_calls) == -1)
-		BUG();
 
 	ASSERTCMP(call->rxcall, ==, NULL);
 	ASSERT(!work_pending(&call->async_work));
@@ -188,6 +195,9 @@
 
 	kfree(call->request);
 	kfree(call);
+
+	if (atomic_dec_and_test(&afs_outstanding_calls))
+		wake_up_atomic_t(&afs_outstanding_calls);
 }
 
 /*
@@ -420,9 +430,11 @@
 }
 
 /*
- * handles intercepted messages that were arriving in the socket's Rx queue
- * - called with the socket receive queue lock held to ensure message ordering
- * - called with softirqs disabled
+ * Handles intercepted messages that were arriving in the socket's Rx queue.
+ *
+ * Called from the AF_RXRPC call processor in waitqueue process context.  For
+ * each call, it is guaranteed this will be called in order of packet to be
+ * delivered.
  */
 static void afs_rx_interceptor(struct sock *sk, unsigned long user_call_ID,
 			       struct sk_buff *skb)
@@ -513,6 +525,12 @@
 			call->state = AFS_CALL_ABORTED;
 			_debug("Rcv ABORT %u -> %d", abort_code, call->error);
 			break;
+		case RXRPC_SKB_MARK_LOCAL_ABORT:
+			abort_code = rxrpc_kernel_get_abort_code(skb);
+			call->error = call->type->abort_to_error(abort_code);
+			call->state = AFS_CALL_ABORTED;
+			_debug("Loc ABORT %u -> %d", abort_code, call->error);
+			break;
 		case RXRPC_SKB_MARK_NET_ERROR:
 			call->error = -rxrpc_kernel_get_error_number(skb);
 			call->state = AFS_CALL_ERROR;
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 81afefe..fbdb022 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -315,8 +315,8 @@
 	_enter("");
 
 	/* fill in the superblock */
-	sb->s_blocksize		= PAGE_CACHE_SIZE;
-	sb->s_blocksize_bits	= PAGE_CACHE_SHIFT;
+	sb->s_blocksize		= PAGE_SIZE;
+	sb->s_blocksize_bits	= PAGE_SHIFT;
 	sb->s_magic		= AFS_FS_MAGIC;
 	sb->s_op		= &afs_super_ops;
 	sb->s_bdi		= &as->volume->bdi;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index dfef94f..65de439 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -93,10 +93,10 @@
 	_enter(",,%llu", (unsigned long long)pos);
 
 	i_size = i_size_read(&vnode->vfs_inode);
-	if (pos + PAGE_CACHE_SIZE > i_size)
+	if (pos + PAGE_SIZE > i_size)
 		len = i_size - pos;
 	else
-		len = PAGE_CACHE_SIZE;
+		len = PAGE_SIZE;
 
 	ret = afs_vnode_fetch_data(vnode, key, pos, len, page);
 	if (ret < 0) {
@@ -123,9 +123,9 @@
 	struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
 	struct page *page;
 	struct key *key = file->private_data;
-	unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+	unsigned from = pos & (PAGE_SIZE - 1);
 	unsigned to = from + len;
-	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+	pgoff_t index = pos >> PAGE_SHIFT;
 	int ret;
 
 	_enter("{%x:%u},{%lx},%u,%u",
@@ -151,8 +151,8 @@
 	*pagep = page;
 	/* page won't leak in error case: it eventually gets cleaned off LRU */
 
-	if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) {
-		ret = afs_fill_page(vnode, key, index << PAGE_CACHE_SHIFT, page);
+	if (!PageUptodate(page) && len != PAGE_SIZE) {
+		ret = afs_fill_page(vnode, key, index << PAGE_SHIFT, page);
 		if (ret < 0) {
 			kfree(candidate);
 			_leave(" = %d [prep]", ret);
@@ -266,7 +266,7 @@
 	if (PageDirty(page))
 		_debug("dirtied");
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 
 	return copied;
 }
@@ -480,7 +480,7 @@
 
 		if (page->index > end) {
 			*_next = index;
-			page_cache_release(page);
+			put_page(page);
 			_leave(" = 0 [%lx]", *_next);
 			return 0;
 		}
@@ -494,7 +494,7 @@
 
 		if (page->mapping != mapping) {
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 			continue;
 		}
 
@@ -515,7 +515,7 @@
 
 		ret = afs_write_back_from_locked_page(wb, page);
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		if (ret < 0) {
 			_leave(" = %d", ret);
 			return ret;
@@ -551,13 +551,13 @@
 						    &next);
 		mapping->writeback_index = next;
 	} else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
-		end = (pgoff_t)(LLONG_MAX >> PAGE_CACHE_SHIFT);
+		end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
 		ret = afs_writepages_region(mapping, wbc, 0, end, &next);
 		if (wbc->nr_to_write > 0)
 			mapping->writeback_index = next;
 	} else {
-		start = wbc->range_start >> PAGE_CACHE_SHIFT;
-		end = wbc->range_end >> PAGE_CACHE_SHIFT;
+		start = wbc->range_start >> PAGE_SHIFT;
+		end = wbc->range_end >> PAGE_SHIFT;
 		ret = afs_writepages_region(mapping, wbc, start, end, &next);
 	}
 
diff --git a/fs/aio.c b/fs/aio.c
index 155f842..a6deaa7 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1447,8 +1447,6 @@
 			return ret;
 		}
 
-		len = ret;
-
 		if (rw == WRITE)
 			file_start_write(file);
 
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 7ab9239..78bd802 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -39,7 +39,7 @@
 	.open		= dcache_dir_open,
 	.release	= dcache_dir_close,
 	.read		= generic_read_dir,
-	.iterate	= dcache_readdir,
+	.iterate_shared	= dcache_readdir,
 	.llseek		= dcache_dir_lseek,
 	.unlocked_ioctl	= autofs4_root_ioctl,
 #ifdef CONFIG_COMPAT
@@ -51,7 +51,7 @@
 	.open		= autofs4_dir_open,
 	.release	= dcache_dir_close,
 	.read		= generic_read_dir,
-	.iterate	= dcache_readdir,
+	.iterate_shared	= dcache_readdir,
 	.llseek		= dcache_dir_lseek,
 };
 
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 103f5d7..72e35b7 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -106,8 +106,8 @@
 	return -EIO;
 }
 
-static ssize_t bad_inode_getxattr(struct dentry *dentry, const char *name,
-			void *buffer, size_t size)
+static ssize_t bad_inode_getxattr(struct dentry *dentry, struct inode *inode,
+			const char *name, void *buffer, size_t size)
 {
 	return -EIO;
 }
diff --git a/fs/befs/befs.h b/fs/befs/befs.h
index 35d19e8..e0f59263a 100644
--- a/fs/befs/befs.h
+++ b/fs/befs/befs.h
@@ -116,7 +116,7 @@
 }
 
 static inline befs_blocknr_t
-iaddr2blockno(struct super_block *sb, befs_inode_addr * iaddr)
+iaddr2blockno(struct super_block *sb, const befs_inode_addr *iaddr)
 {
 	return ((iaddr->allocation_group << BEFS_SB(sb)->ag_shift) +
 		iaddr->start);
@@ -141,7 +141,7 @@
 }
 
 static inline int
-befs_iaddr_is_empty(befs_inode_addr * iaddr)
+befs_iaddr_is_empty(const befs_inode_addr *iaddr)
 {
 	return (!iaddr->allocation_group) && (!iaddr->start) && (!iaddr->len);
 }
diff --git a/fs/befs/btree.c b/fs/befs/btree.c
index 22c16628..307645f9 100644
--- a/fs/befs/btree.c
+++ b/fs/befs/btree.c
@@ -88,15 +88,15 @@
 static const befs_off_t befs_bt_inval = 0xffffffffffffffffULL;
 
 /* local functions */
-static int befs_btree_seekleaf(struct super_block *sb, befs_data_stream * ds,
+static int befs_btree_seekleaf(struct super_block *sb, const befs_data_stream *ds,
 			       befs_btree_super * bt_super,
 			       struct befs_btree_node *this_node,
 			       befs_off_t * node_off);
 
-static int befs_bt_read_super(struct super_block *sb, befs_data_stream * ds,
+static int befs_bt_read_super(struct super_block *sb, const befs_data_stream *ds,
 			      befs_btree_super * sup);
 
-static int befs_bt_read_node(struct super_block *sb, befs_data_stream * ds,
+static int befs_bt_read_node(struct super_block *sb, const befs_data_stream *ds,
 			     struct befs_btree_node *node,
 			     befs_off_t node_off);
 
@@ -134,7 +134,7 @@
  * On failure, BEFS_ERR is returned.
  */
 static int
-befs_bt_read_super(struct super_block *sb, befs_data_stream * ds,
+befs_bt_read_super(struct super_block *sb, const befs_data_stream *ds,
 		   befs_btree_super * sup)
 {
 	struct buffer_head *bh;
@@ -193,7 +193,7 @@
  */
 
 static int
-befs_bt_read_node(struct super_block *sb, befs_data_stream * ds,
+befs_bt_read_node(struct super_block *sb, const befs_data_stream *ds,
 		  struct befs_btree_node *node, befs_off_t node_off)
 {
 	uint off = 0;
@@ -247,7 +247,7 @@
  *   actuall value stored with the key.
  */
 int
-befs_btree_find(struct super_block *sb, befs_data_stream * ds,
+befs_btree_find(struct super_block *sb, const befs_data_stream *ds,
 		const char *key, befs_off_t * value)
 {
 	struct befs_btree_node *this_node;
@@ -416,7 +416,7 @@
  *   until the (key_no)th key is found or the tree is out of keys.
  */
 int
-befs_btree_read(struct super_block *sb, befs_data_stream * ds,
+befs_btree_read(struct super_block *sb, const befs_data_stream *ds,
 		loff_t key_no, size_t bufsize, char *keybuf, size_t * keysize,
 		befs_off_t * value)
 {
@@ -548,7 +548,7 @@
  * Also checks for an empty tree. If there are no keys, returns BEFS_BT_EMPTY.
  */
 static int
-befs_btree_seekleaf(struct super_block *sb, befs_data_stream * ds,
+befs_btree_seekleaf(struct super_block *sb, const befs_data_stream *ds,
 		    befs_btree_super *bt_super,
 		    struct befs_btree_node *this_node,
 		    befs_off_t * node_off)
diff --git a/fs/befs/btree.h b/fs/befs/btree.h
index 92e781e..f2a8f63 100644
--- a/fs/befs/btree.h
+++ b/fs/befs/btree.h
@@ -4,10 +4,10 @@
  */
 
 
-int befs_btree_find(struct super_block *sb, befs_data_stream * ds,
+int befs_btree_find(struct super_block *sb, const befs_data_stream *ds,
 		    const char *key, befs_off_t * value);
 
-int befs_btree_read(struct super_block *sb, befs_data_stream * ds,
+int befs_btree_read(struct super_block *sb, const befs_data_stream *ds,
 		    loff_t key_no, size_t bufsize, char *keybuf,
 		    size_t * keysize, befs_off_t * value);
 
diff --git a/fs/befs/datastream.c b/fs/befs/datastream.c
index ebd5071..dde0b79 100644
--- a/fs/befs/datastream.c
+++ b/fs/befs/datastream.c
@@ -21,16 +21,16 @@
 const befs_inode_addr BAD_IADDR = { 0, 0, 0 };
 
 static int befs_find_brun_direct(struct super_block *sb,
-				 befs_data_stream * data,
+				 const befs_data_stream *data,
 				 befs_blocknr_t blockno, befs_block_run * run);
 
 static int befs_find_brun_indirect(struct super_block *sb,
-				   befs_data_stream * data,
+				   const befs_data_stream *data,
 				   befs_blocknr_t blockno,
 				   befs_block_run * run);
 
 static int befs_find_brun_dblindirect(struct super_block *sb,
-				      befs_data_stream * data,
+				      const befs_data_stream *data,
 				      befs_blocknr_t blockno,
 				      befs_block_run * run);
 
@@ -45,7 +45,7 @@
  * if you don't need to know offset just set @off = NULL.
  */
 struct buffer_head *
-befs_read_datastream(struct super_block *sb, befs_data_stream * ds,
+befs_read_datastream(struct super_block *sb, const befs_data_stream *ds,
 		     befs_off_t pos, uint * off)
 {
 	struct buffer_head *bh = NULL;
@@ -87,7 +87,7 @@
  * 2001-11-15 Will Dyson
  */
 int
-befs_fblock2brun(struct super_block *sb, befs_data_stream * data,
+befs_fblock2brun(struct super_block *sb, const befs_data_stream *data,
 		 befs_blocknr_t fblock, befs_block_run * run)
 {
 	int err;
@@ -122,8 +122,8 @@
  * Returns the number of bytes read
  */
 size_t
-befs_read_lsymlink(struct super_block * sb, befs_data_stream * ds, void *buff,
-		   befs_off_t len)
+befs_read_lsymlink(struct super_block *sb, const befs_data_stream *ds,
+		   void *buff, befs_off_t len)
 {
 	befs_off_t bytes_read = 0;	/* bytes readed */
 	u16 plen;
@@ -163,7 +163,7 @@
 */
 
 befs_blocknr_t
-befs_count_blocks(struct super_block * sb, befs_data_stream * ds)
+befs_count_blocks(struct super_block *sb, const befs_data_stream *ds)
 {
 	befs_blocknr_t blocks;
 	befs_blocknr_t datablocks;	/* File data blocks */
@@ -243,11 +243,11 @@
 	2001-11-15 Will Dyson
 */
 static int
-befs_find_brun_direct(struct super_block *sb, befs_data_stream * data,
+befs_find_brun_direct(struct super_block *sb, const befs_data_stream *data,
 		      befs_blocknr_t blockno, befs_block_run * run)
 {
 	int i;
-	befs_block_run *array = data->direct;
+	const befs_block_run *array = data->direct;
 	befs_blocknr_t sum;
 	befs_blocknr_t max_block =
 	    data->max_direct_range >> BEFS_SB(sb)->block_shift;
@@ -304,7 +304,8 @@
 */
 static int
 befs_find_brun_indirect(struct super_block *sb,
-			befs_data_stream * data, befs_blocknr_t blockno,
+			const befs_data_stream *data,
+			befs_blocknr_t blockno,
 			befs_block_run * run)
 {
 	int i, j;
@@ -412,7 +413,8 @@
 */
 static int
 befs_find_brun_dblindirect(struct super_block *sb,
-			   befs_data_stream * data, befs_blocknr_t blockno,
+			   const befs_data_stream *data,
+			   befs_blocknr_t blockno,
 			   befs_block_run * run)
 {
 	int dblindir_indx;
diff --git a/fs/befs/datastream.h b/fs/befs/datastream.h
index 45e8a3c..91ba820 100644
--- a/fs/befs/datastream.h
+++ b/fs/befs/datastream.h
@@ -4,16 +4,17 @@
  */
 
 struct buffer_head *befs_read_datastream(struct super_block *sb,
-					 befs_data_stream * ds, befs_off_t pos,
-					 uint * off);
+					 const befs_data_stream *ds,
+					 befs_off_t pos, uint * off);
 
-int befs_fblock2brun(struct super_block *sb, befs_data_stream * data,
+int befs_fblock2brun(struct super_block *sb, const befs_data_stream *data,
 		     befs_blocknr_t fblock, befs_block_run * run);
 
-size_t befs_read_lsymlink(struct super_block *sb, befs_data_stream * data,
+size_t befs_read_lsymlink(struct super_block *sb, const befs_data_stream *data,
 			  void *buff, befs_off_t len);
 
-befs_blocknr_t befs_count_blocks(struct super_block *sb, befs_data_stream * ds);
+befs_blocknr_t befs_count_blocks(struct super_block *sb,
+			  const befs_data_stream *ds);
 
 extern const befs_inode_addr BAD_IADDR;
 
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index cc0e082..71112aa 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -66,7 +66,7 @@
 
 static const struct file_operations befs_dir_operations = {
 	.read		= generic_read_dir,
-	.iterate	= befs_readdir,
+	.iterate_shared	= befs_readdir,
 	.llseek		= generic_file_llseek,
 };
 
@@ -157,7 +157,7 @@
 {
 	struct inode *inode = NULL;
 	struct super_block *sb = dir->i_sb;
-	befs_data_stream *ds = &BEFS_I(dir)->i_data.ds;
+	const befs_data_stream *ds = &BEFS_I(dir)->i_data.ds;
 	befs_off_t offset;
 	int ret;
 	int utfnamelen;
@@ -207,7 +207,7 @@
 {
 	struct inode *inode = file_inode(file);
 	struct super_block *sb = inode->i_sb;
-	befs_data_stream *ds = &BEFS_I(inode)->i_data.ds;
+	const befs_data_stream *ds = &BEFS_I(inode)->i_data.ds;
 	befs_off_t value;
 	int result;
 	size_t keysize;
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index 3ec6113..34a5bc2 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -70,7 +70,7 @@
 
 const struct file_operations bfs_dir_operations = {
 	.read		= generic_read_dir,
-	.iterate	= bfs_readdir,
+	.iterate_shared	= bfs_readdir,
 	.fsync		= generic_file_fsync,
 	.llseek		= generic_file_llseek,
 };
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 7d914c6..56224ff 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -2273,7 +2273,7 @@
 		goto end_coredump;
 
 	/* Align to page */
-	if (!dump_skip(cprm, dataoff - cprm->written))
+	if (!dump_skip(cprm, dataoff - cprm->file->f_pos))
 		goto end_coredump;
 
 	for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
@@ -2292,7 +2292,7 @@
 				void *kaddr = kmap(page);
 				stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
 				kunmap(page);
-				page_cache_release(page);
+				put_page(page);
 			} else
 				stop = !dump_skip(cprm, PAGE_SIZE);
 			if (stop)
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index b1adb92..71ade0e 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -1533,7 +1533,7 @@
 				void *kaddr = kmap(page);
 				res = dump_emit(cprm, kaddr, PAGE_SIZE);
 				kunmap(page);
-				page_cache_release(page);
+				put_page(page);
 			} else {
 				res = dump_skip(cprm, PAGE_SIZE);
 			}
@@ -1787,7 +1787,7 @@
 				goto end_coredump;
 	}
 
-	if (!dump_skip(cprm, dataoff - cprm->written))
+	if (!dump_skip(cprm, dataoff - cprm->file->f_pos))
 		goto end_coredump;
 
 	if (!elf_fdpic_dump_segments(cprm))
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 3172c4e..a063d4d 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -162,15 +162,15 @@
 }
 
 static ssize_t
-blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
+blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = bdev_file_inode(file);
 
 	if (IS_DAX(inode))
-		return dax_do_io(iocb, inode, iter, offset, blkdev_get_block,
+		return dax_do_io(iocb, inode, iter, blkdev_get_block,
 				NULL, DIO_SKIP_DIO_COUNT);
-	return __blockdev_direct_IO(iocb, inode, I_BDEV(inode), iter, offset,
+	return __blockdev_direct_IO(iocb, inode, I_BDEV(inode), iter,
 				    blkdev_get_block, NULL, NULL,
 				    DIO_SKIP_DIO_COUNT);
 }
@@ -331,7 +331,7 @@
 	ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
 
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 
 	return ret;
 }
@@ -1149,7 +1149,7 @@
 	inode_lock(bdev->bd_inode);
 	i_size_write(bdev->bd_inode, size);
 	inode_unlock(bdev->bd_inode);
-	while (bsize < PAGE_CACHE_SIZE) {
+	while (bsize < PAGE_SIZE) {
 		if (size & bsize)
 			break;
 		bsize <<= 1;
@@ -1660,12 +1660,8 @@
 
 	blk_start_plug(&plug);
 	ret = __generic_file_write_iter(iocb, from);
-	if (ret > 0) {
-		ssize_t err;
-		err = generic_write_sync(file, iocb->ki_pos - ret, ret);
-		if (err < 0)
-			ret = err;
-	}
+	if (ret > 0)
+		ret = generic_write_sync(iocb, ret);
 	blk_finish_plug(&plug);
 	return ret;
 }
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 6d263bb..67a6077 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -63,9 +63,6 @@
 	}
 	kfree(value);
 
-	if (!IS_ERR(acl))
-		set_cached_acl(inode, type, acl);
-
 	return acl;
 }
 
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index e34a71b..516e19d 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -757,7 +757,7 @@
 			BUG_ON(NULL == l);
 
 			ret = btrfsic_read_block(state, &tmp_next_block_ctx);
-			if (ret < (int)PAGE_CACHE_SIZE) {
+			if (ret < (int)PAGE_SIZE) {
 				printk(KERN_INFO
 				       "btrfsic: read @logical %llu failed!\n",
 				       tmp_next_block_ctx.start);
@@ -1231,15 +1231,15 @@
 	size_t offset_in_page;
 	char *kaddr;
 	char *dst = (char *)dstv;
-	size_t start_offset = block_ctx->start & ((u64)PAGE_CACHE_SIZE - 1);
-	unsigned long i = (start_offset + offset) >> PAGE_CACHE_SHIFT;
+	size_t start_offset = block_ctx->start & ((u64)PAGE_SIZE - 1);
+	unsigned long i = (start_offset + offset) >> PAGE_SHIFT;
 
 	WARN_ON(offset + len > block_ctx->len);
-	offset_in_page = (start_offset + offset) & (PAGE_CACHE_SIZE - 1);
+	offset_in_page = (start_offset + offset) & (PAGE_SIZE - 1);
 
 	while (len > 0) {
-		cur = min(len, ((size_t)PAGE_CACHE_SIZE - offset_in_page));
-		BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_CACHE_SIZE));
+		cur = min(len, ((size_t)PAGE_SIZE - offset_in_page));
+		BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_SIZE));
 		kaddr = block_ctx->datav[i];
 		memcpy(dst, kaddr + offset_in_page, cur);
 
@@ -1605,8 +1605,8 @@
 
 		BUG_ON(!block_ctx->datav);
 		BUG_ON(!block_ctx->pagev);
-		num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >>
-			    PAGE_CACHE_SHIFT;
+		num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >>
+			    PAGE_SHIFT;
 		while (num_pages > 0) {
 			num_pages--;
 			if (block_ctx->datav[num_pages]) {
@@ -1637,15 +1637,15 @@
 	BUG_ON(block_ctx->datav);
 	BUG_ON(block_ctx->pagev);
 	BUG_ON(block_ctx->mem_to_free);
-	if (block_ctx->dev_bytenr & ((u64)PAGE_CACHE_SIZE - 1)) {
+	if (block_ctx->dev_bytenr & ((u64)PAGE_SIZE - 1)) {
 		printk(KERN_INFO
 		       "btrfsic: read_block() with unaligned bytenr %llu\n",
 		       block_ctx->dev_bytenr);
 		return -1;
 	}
 
-	num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >>
-		    PAGE_CACHE_SHIFT;
+	num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >>
+		    PAGE_SHIFT;
 	block_ctx->mem_to_free = kzalloc((sizeof(*block_ctx->datav) +
 					  sizeof(*block_ctx->pagev)) *
 					 num_pages, GFP_NOFS);
@@ -1676,8 +1676,8 @@
 
 		for (j = i; j < num_pages; j++) {
 			ret = bio_add_page(bio, block_ctx->pagev[j],
-					   PAGE_CACHE_SIZE, 0);
-			if (PAGE_CACHE_SIZE != ret)
+					   PAGE_SIZE, 0);
+			if (PAGE_SIZE != ret)
 				break;
 		}
 		if (j == i) {
@@ -1693,7 +1693,7 @@
 			return -1;
 		}
 		bio_put(bio);
-		dev_bytenr += (j - i) * PAGE_CACHE_SIZE;
+		dev_bytenr += (j - i) * PAGE_SIZE;
 		i = j;
 	}
 	for (i = 0; i < num_pages; i++) {
@@ -1769,9 +1769,9 @@
 	u32 crc = ~(u32)0;
 	unsigned int i;
 
-	if (num_pages * PAGE_CACHE_SIZE < state->metablock_size)
+	if (num_pages * PAGE_SIZE < state->metablock_size)
 		return 1; /* not metadata */
-	num_pages = state->metablock_size >> PAGE_CACHE_SHIFT;
+	num_pages = state->metablock_size >> PAGE_SHIFT;
 	h = (struct btrfs_header *)datav[0];
 
 	if (memcmp(h->fsid, state->root->fs_info->fsid, BTRFS_UUID_SIZE))
@@ -1779,8 +1779,8 @@
 
 	for (i = 0; i < num_pages; i++) {
 		u8 *data = i ? datav[i] : (datav[i] + BTRFS_CSUM_SIZE);
-		size_t sublen = i ? PAGE_CACHE_SIZE :
-				    (PAGE_CACHE_SIZE - BTRFS_CSUM_SIZE);
+		size_t sublen = i ? PAGE_SIZE :
+				    (PAGE_SIZE - BTRFS_CSUM_SIZE);
 
 		crc = btrfs_crc32c(crc, data, sublen);
 	}
@@ -1826,14 +1826,14 @@
 		if (block->is_superblock) {
 			bytenr = btrfs_super_bytenr((struct btrfs_super_block *)
 						    mapped_datav[0]);
-			if (num_pages * PAGE_CACHE_SIZE <
+			if (num_pages * PAGE_SIZE <
 			    BTRFS_SUPER_INFO_SIZE) {
 				printk(KERN_INFO
 				       "btrfsic: cannot work with too short bios!\n");
 				return;
 			}
 			is_metadata = 1;
-			BUG_ON(BTRFS_SUPER_INFO_SIZE & (PAGE_CACHE_SIZE - 1));
+			BUG_ON(BTRFS_SUPER_INFO_SIZE & (PAGE_SIZE - 1));
 			processed_len = BTRFS_SUPER_INFO_SIZE;
 			if (state->print_mask &
 			    BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) {
@@ -1844,7 +1844,7 @@
 		}
 		if (is_metadata) {
 			if (!block->is_superblock) {
-				if (num_pages * PAGE_CACHE_SIZE <
+				if (num_pages * PAGE_SIZE <
 				    state->metablock_size) {
 					printk(KERN_INFO
 					       "btrfsic: cannot work with too short bios!\n");
@@ -1880,7 +1880,7 @@
 			}
 			block->logical_bytenr = bytenr;
 		} else {
-			if (num_pages * PAGE_CACHE_SIZE <
+			if (num_pages * PAGE_SIZE <
 			    state->datablock_size) {
 				printk(KERN_INFO
 				       "btrfsic: cannot work with too short bios!\n");
@@ -2013,7 +2013,7 @@
 			block->logical_bytenr = bytenr;
 			block->is_metadata = 1;
 			if (block->is_superblock) {
-				BUG_ON(PAGE_CACHE_SIZE !=
+				BUG_ON(PAGE_SIZE !=
 				       BTRFS_SUPER_INFO_SIZE);
 				ret = btrfsic_process_written_superblock(
 						state,
@@ -2172,8 +2172,8 @@
 continue_loop:
 	BUG_ON(!processed_len);
 	dev_bytenr += processed_len;
-	mapped_datav += processed_len >> PAGE_CACHE_SHIFT;
-	num_pages -= processed_len >> PAGE_CACHE_SHIFT;
+	mapped_datav += processed_len >> PAGE_SHIFT;
+	num_pages -= processed_len >> PAGE_SHIFT;
 	goto again;
 }
 
@@ -2954,7 +2954,7 @@
 			goto leave;
 		cur_bytenr = dev_bytenr;
 		for (i = 0; i < bio->bi_vcnt; i++) {
-			BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_CACHE_SIZE);
+			BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_SIZE);
 			mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page);
 			if (!mapped_datav[i]) {
 				while (i > 0) {
@@ -3037,16 +3037,16 @@
 	struct list_head *dev_head = &fs_devices->devices;
 	struct btrfs_device *device;
 
-	if (root->nodesize & ((u64)PAGE_CACHE_SIZE - 1)) {
+	if (root->nodesize & ((u64)PAGE_SIZE - 1)) {
 		printk(KERN_INFO
-		       "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
-		       root->nodesize, PAGE_CACHE_SIZE);
+		       "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n",
+		       root->nodesize, PAGE_SIZE);
 		return -1;
 	}
-	if (root->sectorsize & ((u64)PAGE_CACHE_SIZE - 1)) {
+	if (root->sectorsize & ((u64)PAGE_SIZE - 1)) {
 		printk(KERN_INFO
-		       "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
-		       root->sectorsize, PAGE_CACHE_SIZE);
+		       "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n",
+		       root->sectorsize, PAGE_SIZE);
 		return -1;
 	}
 	state = kzalloc(sizeof(*state), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 3346cd8..ff61a41 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -119,7 +119,7 @@
 		csum = ~(u32)0;
 
 		kaddr = kmap_atomic(page);
-		csum = btrfs_csum_data(kaddr, csum, PAGE_CACHE_SIZE);
+		csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE);
 		btrfs_csum_final(csum, (char *)&csum);
 		kunmap_atomic(kaddr);
 
@@ -190,7 +190,7 @@
 	for (index = 0; index < cb->nr_pages; index++) {
 		page = cb->compressed_pages[index];
 		page->mapping = NULL;
-		page_cache_release(page);
+		put_page(page);
 	}
 
 	/* do io completion on the original bio */
@@ -224,8 +224,8 @@
 static noinline void end_compressed_writeback(struct inode *inode,
 					      const struct compressed_bio *cb)
 {
-	unsigned long index = cb->start >> PAGE_CACHE_SHIFT;
-	unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_CACHE_SHIFT;
+	unsigned long index = cb->start >> PAGE_SHIFT;
+	unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
 	struct page *pages[16];
 	unsigned long nr_pages = end_index - index + 1;
 	int i;
@@ -247,7 +247,7 @@
 			if (cb->errors)
 				SetPageError(pages[i]);
 			end_page_writeback(pages[i]);
-			page_cache_release(pages[i]);
+			put_page(pages[i]);
 		}
 		nr_pages -= ret;
 		index += ret;
@@ -304,7 +304,7 @@
 	for (index = 0; index < cb->nr_pages; index++) {
 		page = cb->compressed_pages[index];
 		page->mapping = NULL;
-		page_cache_release(page);
+		put_page(page);
 	}
 
 	/* finally free the cb struct */
@@ -341,7 +341,7 @@
 	int ret;
 	int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
 
-	WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1));
+	WARN_ON(start & ((u64)PAGE_SIZE - 1));
 	cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
 	if (!cb)
 		return -ENOMEM;
@@ -374,14 +374,14 @@
 		page->mapping = inode->i_mapping;
 		if (bio->bi_iter.bi_size)
 			ret = io_tree->ops->merge_bio_hook(WRITE, page, 0,
-							   PAGE_CACHE_SIZE,
+							   PAGE_SIZE,
 							   bio, 0);
 		else
 			ret = 0;
 
 		page->mapping = NULL;
-		if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) <
-		    PAGE_CACHE_SIZE) {
+		if (ret || bio_add_page(bio, page, PAGE_SIZE, 0) <
+		    PAGE_SIZE) {
 			bio_get(bio);
 
 			/*
@@ -410,15 +410,15 @@
 			BUG_ON(!bio);
 			bio->bi_private = cb;
 			bio->bi_end_io = end_compressed_bio_write;
-			bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
+			bio_add_page(bio, page, PAGE_SIZE, 0);
 		}
-		if (bytes_left < PAGE_CACHE_SIZE) {
+		if (bytes_left < PAGE_SIZE) {
 			btrfs_info(BTRFS_I(inode)->root->fs_info,
 					"bytes left %lu compress len %lu nr %lu",
 			       bytes_left, cb->compressed_len, cb->nr_pages);
 		}
-		bytes_left -= PAGE_CACHE_SIZE;
-		first_byte += PAGE_CACHE_SIZE;
+		bytes_left -= PAGE_SIZE;
+		first_byte += PAGE_SIZE;
 		cond_resched();
 	}
 	bio_get(bio);
@@ -457,17 +457,17 @@
 	int misses = 0;
 
 	page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page;
-	last_offset = (page_offset(page) + PAGE_CACHE_SIZE);
+	last_offset = (page_offset(page) + PAGE_SIZE);
 	em_tree = &BTRFS_I(inode)->extent_tree;
 	tree = &BTRFS_I(inode)->io_tree;
 
 	if (isize == 0)
 		return 0;
 
-	end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
+	end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
 
 	while (last_offset < compressed_end) {
-		pg_index = last_offset >> PAGE_CACHE_SHIFT;
+		pg_index = last_offset >> PAGE_SHIFT;
 
 		if (pg_index > end_index)
 			break;
@@ -488,11 +488,11 @@
 			break;
 
 		if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
-			page_cache_release(page);
+			put_page(page);
 			goto next;
 		}
 
-		end = last_offset + PAGE_CACHE_SIZE - 1;
+		end = last_offset + PAGE_SIZE - 1;
 		/*
 		 * at this point, we have a locked page in the page cache
 		 * for these bytes in the file.  But, we have to make
@@ -502,27 +502,27 @@
 		lock_extent(tree, last_offset, end);
 		read_lock(&em_tree->lock);
 		em = lookup_extent_mapping(em_tree, last_offset,
-					   PAGE_CACHE_SIZE);
+					   PAGE_SIZE);
 		read_unlock(&em_tree->lock);
 
 		if (!em || last_offset < em->start ||
-		    (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
+		    (last_offset + PAGE_SIZE > extent_map_end(em)) ||
 		    (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
 			free_extent_map(em);
 			unlock_extent(tree, last_offset, end);
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 			break;
 		}
 		free_extent_map(em);
 
 		if (page->index == end_index) {
 			char *userpage;
-			size_t zero_offset = isize & (PAGE_CACHE_SIZE - 1);
+			size_t zero_offset = isize & (PAGE_SIZE - 1);
 
 			if (zero_offset) {
 				int zeros;
-				zeros = PAGE_CACHE_SIZE - zero_offset;
+				zeros = PAGE_SIZE - zero_offset;
 				userpage = kmap_atomic(page);
 				memset(userpage + zero_offset, 0, zeros);
 				flush_dcache_page(page);
@@ -531,19 +531,19 @@
 		}
 
 		ret = bio_add_page(cb->orig_bio, page,
-				   PAGE_CACHE_SIZE, 0);
+				   PAGE_SIZE, 0);
 
-		if (ret == PAGE_CACHE_SIZE) {
+		if (ret == PAGE_SIZE) {
 			nr_pages++;
-			page_cache_release(page);
+			put_page(page);
 		} else {
 			unlock_extent(tree, last_offset, end);
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 			break;
 		}
 next:
-		last_offset += PAGE_CACHE_SIZE;
+		last_offset += PAGE_SIZE;
 	}
 	return 0;
 }
@@ -567,7 +567,7 @@
 	struct extent_map_tree *em_tree;
 	struct compressed_bio *cb;
 	struct btrfs_root *root = BTRFS_I(inode)->root;
-	unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
+	unsigned long uncompressed_len = bio->bi_vcnt * PAGE_SIZE;
 	unsigned long compressed_len;
 	unsigned long nr_pages;
 	unsigned long pg_index;
@@ -589,7 +589,7 @@
 	read_lock(&em_tree->lock);
 	em = lookup_extent_mapping(em_tree,
 				   page_offset(bio->bi_io_vec->bv_page),
-				   PAGE_CACHE_SIZE);
+				   PAGE_SIZE);
 	read_unlock(&em_tree->lock);
 	if (!em)
 		return -EIO;
@@ -617,7 +617,7 @@
 	cb->compress_type = extent_compress_type(bio_flags);
 	cb->orig_bio = bio;
 
-	nr_pages = DIV_ROUND_UP(compressed_len, PAGE_CACHE_SIZE);
+	nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
 	cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
 				       GFP_NOFS);
 	if (!cb->compressed_pages)
@@ -640,7 +640,7 @@
 	add_ra_bio_pages(inode, em_start + em_len, cb);
 
 	/* include any pages we added in add_ra-bio_pages */
-	uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
+	uncompressed_len = bio->bi_vcnt * PAGE_SIZE;
 	cb->len = uncompressed_len;
 
 	comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
@@ -653,18 +653,18 @@
 	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
 		page = cb->compressed_pages[pg_index];
 		page->mapping = inode->i_mapping;
-		page->index = em_start >> PAGE_CACHE_SHIFT;
+		page->index = em_start >> PAGE_SHIFT;
 
 		if (comp_bio->bi_iter.bi_size)
 			ret = tree->ops->merge_bio_hook(READ, page, 0,
-							PAGE_CACHE_SIZE,
+							PAGE_SIZE,
 							comp_bio, 0);
 		else
 			ret = 0;
 
 		page->mapping = NULL;
-		if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) <
-		    PAGE_CACHE_SIZE) {
+		if (ret || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
+		    PAGE_SIZE) {
 			bio_get(comp_bio);
 
 			ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio,
@@ -702,9 +702,9 @@
 			comp_bio->bi_private = cb;
 			comp_bio->bi_end_io = end_compressed_bio_read;
 
-			bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0);
+			bio_add_page(comp_bio, page, PAGE_SIZE, 0);
 		}
-		cur_disk_byte += PAGE_CACHE_SIZE;
+		cur_disk_byte += PAGE_SIZE;
 	}
 	bio_get(comp_bio);
 
@@ -1013,8 +1013,8 @@
 
 	/* copy bytes from the working buffer into the pages */
 	while (working_bytes > 0) {
-		bytes = min(PAGE_CACHE_SIZE - *pg_offset,
-			    PAGE_CACHE_SIZE - buf_offset);
+		bytes = min(PAGE_SIZE - *pg_offset,
+			    PAGE_SIZE - buf_offset);
 		bytes = min(bytes, working_bytes);
 		kaddr = kmap_atomic(page_out);
 		memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
@@ -1027,7 +1027,7 @@
 		current_buf_start += bytes;
 
 		/* check if we need to pick another page */
-		if (*pg_offset == PAGE_CACHE_SIZE) {
+		if (*pg_offset == PAGE_SIZE) {
 			(*pg_index)++;
 			if (*pg_index >= vcnt)
 				return 0;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 7759293..ec7928a 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -19,6 +19,7 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/rbtree.h>
+#include <linux/vmalloc.h>
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
@@ -5361,10 +5362,13 @@
 		goto out;
 	}
 
-	tmp_buf = kmalloc(left_root->nodesize, GFP_KERNEL);
+	tmp_buf = kmalloc(left_root->nodesize, GFP_KERNEL | __GFP_NOWARN);
 	if (!tmp_buf) {
-		ret = -ENOMEM;
-		goto out;
+		tmp_buf = vmalloc(left_root->nodesize);
+		if (!tmp_buf) {
+			ret = -ENOMEM;
+			goto out;
+		}
 	}
 
 	left_path->search_commit_root = 1;
@@ -5565,7 +5569,7 @@
 out:
 	btrfs_free_path(left_path);
 	btrfs_free_path(right_path);
-	kfree(tmp_buf);
+	kvfree(tmp_buf);
 	return ret;
 }
 
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index a1d6652..26bcb48 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -394,6 +394,8 @@
 	dev_replace->cursor_right = 0;
 	dev_replace->is_valid = 1;
 	dev_replace->item_needs_writeback = 1;
+	atomic64_set(&dev_replace->num_write_errors, 0);
+	atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0);
 	args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
 	btrfs_dev_replace_unlock(dev_replace, 1);
 
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 4b02591..4e47849 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -25,7 +25,6 @@
 #include <linux/buffer_head.h>
 #include <linux/workqueue.h>
 #include <linux/kthread.h>
-#include <linux/freezer.h>
 #include <linux/slab.h>
 #include <linux/migrate.h>
 #include <linux/ratelimit.h>
@@ -303,7 +302,7 @@
 		err = map_private_extent_buffer(buf, offset, 32,
 					&kaddr, &map_start, &map_len);
 		if (err)
-			return 1;
+			return err;
 		cur_len = min(len, map_len - (offset - map_start));
 		crc = btrfs_csum_data(kaddr + offset - map_start,
 				      crc, cur_len);
@@ -313,7 +312,7 @@
 	if (csum_size > sizeof(inline_result)) {
 		result = kzalloc(csum_size, GFP_NOFS);
 		if (!result)
-			return 1;
+			return -ENOMEM;
 	} else {
 		result = (char *)&inline_result;
 	}
@@ -334,7 +333,7 @@
 				val, found, btrfs_header_level(buf));
 			if (result != (char *)&inline_result)
 				kfree(result);
-			return 1;
+			return -EUCLEAN;
 		}
 	} else {
 		write_extent_buffer(buf, result, 0, csum_size);
@@ -513,11 +512,21 @@
 	eb = (struct extent_buffer *)page->private;
 	if (page != eb->pages[0])
 		return 0;
+
 	found_start = btrfs_header_bytenr(eb);
-	if (WARN_ON(found_start != start || !PageUptodate(page)))
-		return 0;
-	csum_tree_block(fs_info, eb, 0);
-	return 0;
+	/*
+	 * Please do not consolidate these warnings into a single if.
+	 * It is useful to know what went wrong.
+	 */
+	if (WARN_ON(found_start != start))
+		return -EUCLEAN;
+	if (WARN_ON(!PageUptodate(page)))
+		return -EUCLEAN;
+
+	ASSERT(memcmp_extent_buffer(eb, fs_info->fsid,
+			btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
+
+	return csum_tree_block(fs_info, eb, 0);
 }
 
 static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
@@ -661,10 +670,8 @@
 				       eb, found_level);
 
 	ret = csum_tree_block(fs_info, eb, 1);
-	if (ret) {
-		ret = -EIO;
+	if (ret)
 		goto err;
-	}
 
 	/*
 	 * If this is a leaf block and it is corrupt, set the corrupt bit so
@@ -1055,7 +1062,7 @@
 			   (unsigned long long)page_offset(page));
 		ClearPagePrivate(page);
 		set_page_private(page, 0);
-		page_cache_release(page);
+		put_page(page);
 	}
 }
 
@@ -1757,7 +1764,7 @@
 	if (err)
 		return err;
 
-	bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
+	bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
 	bdi->congested_fn	= btrfs_congested_fn;
 	bdi->congested_data	= info;
 	bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
@@ -1831,7 +1838,7 @@
 		 */
 		btrfs_delete_unused_bgs(root->fs_info);
 sleep:
-		if (!try_to_freeze() && !again) {
+		if (!again) {
 			set_current_state(TASK_INTERRUPTIBLE);
 			if (!kthread_should_stop())
 				schedule();
@@ -1921,14 +1928,12 @@
 		if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
 				      &root->fs_info->fs_state)))
 			btrfs_cleanup_transaction(root);
-		if (!try_to_freeze()) {
-			set_current_state(TASK_INTERRUPTIBLE);
-			if (!kthread_should_stop() &&
-			    (!btrfs_transaction_blocked(root->fs_info) ||
-			     cannot_commit))
-				schedule_timeout(delay);
-			__set_current_state(TASK_RUNNING);
-		}
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (!kthread_should_stop() &&
+				(!btrfs_transaction_blocked(root->fs_info) ||
+				 cannot_commit))
+			schedule_timeout(delay);
+		__set_current_state(TASK_RUNNING);
 	} while (!kthread_should_stop());
 	return 0;
 }
@@ -2537,7 +2542,7 @@
 		err = ret;
 		goto fail_bdi;
 	}
-	fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE *
+	fs_info->dirty_metadata_batch = PAGE_SIZE *
 					(1 + ilog2(nr_cpu_ids));
 
 	ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
@@ -2782,7 +2787,7 @@
 	 * flag our filesystem as having big metadata blocks if
 	 * they are bigger than the page size
 	 */
-	if (btrfs_super_nodesize(disk_super) > PAGE_CACHE_SIZE) {
+	if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
 		if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
 			printk(KERN_INFO "BTRFS: flagging fs with big metadata feature\n");
 		features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
@@ -2832,7 +2837,7 @@
 
 	fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
 	fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
-				    SZ_4M / PAGE_CACHE_SIZE);
+				    SZ_4M / PAGE_SIZE);
 
 	tree_root->nodesize = nodesize;
 	tree_root->sectorsize = sectorsize;
@@ -4071,9 +4076,9 @@
 		ret = -EINVAL;
 	}
 	/* Only PAGE SIZE is supported yet */
-	if (sectorsize != PAGE_CACHE_SIZE) {
+	if (sectorsize != PAGE_SIZE) {
 		printk(KERN_ERR "BTRFS: sectorsize %llu not supported yet, only support %lu\n",
-				sectorsize, PAGE_CACHE_SIZE);
+				sectorsize, PAGE_SIZE);
 		ret = -EINVAL;
 	}
 	if (!is_power_of_2(nodesize) || nodesize < sectorsize ||
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 53e1297..84e060e 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3452,7 +3452,7 @@
 		num_pages = 1;
 
 	num_pages *= 16;
-	num_pages *= PAGE_CACHE_SIZE;
+	num_pages *= PAGE_SIZE;
 
 	ret = btrfs_check_data_free_space(inode, 0, num_pages);
 	if (ret)
@@ -4639,7 +4639,7 @@
 	loops = 0;
 	while (delalloc_bytes && loops < 3) {
 		max_reclaim = min(delalloc_bytes, to_reclaim);
-		nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
+		nr_pages = max_reclaim >> PAGE_SHIFT;
 		btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
 		/*
 		 * We need to wait for the async pages to actually start before
@@ -9386,15 +9386,23 @@
 	u64 dev_min = 1;
 	u64 dev_nr = 0;
 	u64 target;
+	int debug;
 	int index;
 	int full = 0;
 	int ret = 0;
 
+	debug = btrfs_test_opt(root, ENOSPC_DEBUG);
+
 	block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
 
 	/* odd, couldn't find the block group, leave it alone */
-	if (!block_group)
+	if (!block_group) {
+		if (debug)
+			btrfs_warn(root->fs_info,
+				   "can't find block group for bytenr %llu",
+				   bytenr);
 		return -1;
+	}
 
 	min_free = btrfs_block_group_used(&block_group->item);
 
@@ -9448,8 +9456,13 @@
 		 * this is just a balance, so if we were marked as full
 		 * we know there is no space for a new chunk
 		 */
-		if (full)
+		if (full) {
+			if (debug)
+				btrfs_warn(root->fs_info,
+					"no space to alloc new chunk for block group %llu",
+					block_group->key.objectid);
 			goto out;
+		}
 
 		index = get_block_group_index(block_group);
 	}
@@ -9496,6 +9509,10 @@
 			ret = -1;
 		}
 	}
+	if (debug && ret == -1)
+		btrfs_warn(root->fs_info,
+			"no space to allocate a new chunk for block group %llu",
+			block_group->key.objectid);
 	mutex_unlock(&root->fs_info->chunk_mutex);
 	btrfs_end_transaction(trans, root);
 out:
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 76a0c85..d247fc0 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1363,23 +1363,23 @@
 
 void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
 {
-	unsigned long index = start >> PAGE_CACHE_SHIFT;
-	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+	unsigned long index = start >> PAGE_SHIFT;
+	unsigned long end_index = end >> PAGE_SHIFT;
 	struct page *page;
 
 	while (index <= end_index) {
 		page = find_get_page(inode->i_mapping, index);
 		BUG_ON(!page); /* Pages should be in the extent_io_tree */
 		clear_page_dirty_for_io(page);
-		page_cache_release(page);
+		put_page(page);
 		index++;
 	}
 }
 
 void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
 {
-	unsigned long index = start >> PAGE_CACHE_SHIFT;
-	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+	unsigned long index = start >> PAGE_SHIFT;
+	unsigned long end_index = end >> PAGE_SHIFT;
 	struct page *page;
 
 	while (index <= end_index) {
@@ -1387,7 +1387,7 @@
 		BUG_ON(!page); /* Pages should be in the extent_io_tree */
 		__set_page_dirty_nobuffers(page);
 		account_page_redirty(page);
-		page_cache_release(page);
+		put_page(page);
 		index++;
 	}
 }
@@ -1397,15 +1397,15 @@
  */
 static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
 {
-	unsigned long index = start >> PAGE_CACHE_SHIFT;
-	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+	unsigned long index = start >> PAGE_SHIFT;
+	unsigned long end_index = end >> PAGE_SHIFT;
 	struct page *page;
 
 	while (index <= end_index) {
 		page = find_get_page(tree->mapping, index);
 		BUG_ON(!page); /* Pages should be in the extent_io_tree */
 		set_page_writeback(page);
-		page_cache_release(page);
+		put_page(page);
 		index++;
 	}
 }
@@ -1556,8 +1556,8 @@
 {
 	int ret;
 	struct page *pages[16];
-	unsigned long index = start >> PAGE_CACHE_SHIFT;
-	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+	unsigned long index = start >> PAGE_SHIFT;
+	unsigned long end_index = end >> PAGE_SHIFT;
 	unsigned long nr_pages = end_index - index + 1;
 	int i;
 
@@ -1571,7 +1571,7 @@
 		for (i = 0; i < ret; i++) {
 			if (pages[i] != locked_page)
 				unlock_page(pages[i]);
-			page_cache_release(pages[i]);
+			put_page(pages[i]);
 		}
 		nr_pages -= ret;
 		index += ret;
@@ -1584,9 +1584,9 @@
 					u64 delalloc_start,
 					u64 delalloc_end)
 {
-	unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
+	unsigned long index = delalloc_start >> PAGE_SHIFT;
 	unsigned long start_index = index;
-	unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
+	unsigned long end_index = delalloc_end >> PAGE_SHIFT;
 	unsigned long pages_locked = 0;
 	struct page *pages[16];
 	unsigned long nrpages;
@@ -1619,11 +1619,11 @@
 				    pages[i]->mapping != inode->i_mapping) {
 					ret = -EAGAIN;
 					unlock_page(pages[i]);
-					page_cache_release(pages[i]);
+					put_page(pages[i]);
 					goto done;
 				}
 			}
-			page_cache_release(pages[i]);
+			put_page(pages[i]);
 			pages_locked++;
 		}
 		nrpages -= ret;
@@ -1636,7 +1636,7 @@
 		__unlock_for_delalloc(inode, locked_page,
 			      delalloc_start,
 			      ((u64)(start_index + pages_locked - 1)) <<
-			      PAGE_CACHE_SHIFT);
+			      PAGE_SHIFT);
 	}
 	return ret;
 }
@@ -1696,7 +1696,7 @@
 		free_extent_state(cached_state);
 		cached_state = NULL;
 		if (!loops) {
-			max_bytes = PAGE_CACHE_SIZE;
+			max_bytes = PAGE_SIZE;
 			loops = 1;
 			goto again;
 		} else {
@@ -1735,8 +1735,8 @@
 	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
 	int ret;
 	struct page *pages[16];
-	unsigned long index = start >> PAGE_CACHE_SHIFT;
-	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+	unsigned long index = start >> PAGE_SHIFT;
+	unsigned long end_index = end >> PAGE_SHIFT;
 	unsigned long nr_pages = end_index - index + 1;
 	int i;
 
@@ -1757,7 +1757,7 @@
 				SetPagePrivate2(pages[i]);
 
 			if (pages[i] == locked_page) {
-				page_cache_release(pages[i]);
+				put_page(pages[i]);
 				continue;
 			}
 			if (page_ops & PAGE_CLEAR_DIRTY)
@@ -1770,7 +1770,7 @@
 				end_page_writeback(pages[i]);
 			if (page_ops & PAGE_UNLOCK)
 				unlock_page(pages[i]);
-			page_cache_release(pages[i]);
+			put_page(pages[i]);
 		}
 		nr_pages -= ret;
 		index += ret;
@@ -1961,7 +1961,7 @@
 static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
 {
 	u64 start = page_offset(page);
-	u64 end = start + PAGE_CACHE_SIZE - 1;
+	u64 end = start + PAGE_SIZE - 1;
 	if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
 		SetPageUptodate(page);
 }
@@ -2071,11 +2071,11 @@
 		struct page *p = eb->pages[i];
 
 		ret = repair_io_failure(root->fs_info->btree_inode, start,
-					PAGE_CACHE_SIZE, start, p,
+					PAGE_SIZE, start, p,
 					start - page_offset(p), mirror_num);
 		if (ret)
 			break;
-		start += PAGE_CACHE_SIZE;
+		start += PAGE_SIZE;
 	}
 
 	return ret;
@@ -2466,8 +2466,8 @@
 		 * advance bv_offset and adjust bv_len to compensate.
 		 * Print a warning for nonzero offsets, and an error
 		 * if they don't add up to a full page.  */
-		if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) {
-			if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE)
+		if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
+			if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
 				btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
 				   "partial page write in btrfs with offset %u and length %u",
 					bvec->bv_offset, bvec->bv_len);
@@ -2541,8 +2541,8 @@
 		 * advance bv_offset and adjust bv_len to compensate.
 		 * Print a warning for nonzero offsets, and an error
 		 * if they don't add up to a full page.  */
-		if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) {
-			if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE)
+		if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
+			if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
 				btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
 				   "partial page read in btrfs with offset %u and length %u",
 					bvec->bv_offset, bvec->bv_len);
@@ -2598,13 +2598,13 @@
 readpage_ok:
 		if (likely(uptodate)) {
 			loff_t i_size = i_size_read(inode);
-			pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+			pgoff_t end_index = i_size >> PAGE_SHIFT;
 			unsigned off;
 
 			/* Zero out the end if this page straddles i_size */
-			off = i_size & (PAGE_CACHE_SIZE-1);
+			off = i_size & (PAGE_SIZE-1);
 			if (page->index == end_index && off)
-				zero_user_segment(page, off, PAGE_CACHE_SIZE);
+				zero_user_segment(page, off, PAGE_SIZE);
 			SetPageUptodate(page);
 		} else {
 			ClearPageUptodate(page);
@@ -2768,7 +2768,7 @@
 	struct bio *bio;
 	int contig = 0;
 	int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
-	size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
+	size_t page_size = min_t(size_t, size, PAGE_SIZE);
 
 	if (bio_ret && *bio_ret) {
 		bio = *bio_ret;
@@ -2821,7 +2821,7 @@
 {
 	if (!PagePrivate(page)) {
 		SetPagePrivate(page);
-		page_cache_get(page);
+		get_page(page);
 		set_page_private(page, (unsigned long)eb);
 	} else {
 		WARN_ON(page->private != (unsigned long)eb);
@@ -2832,7 +2832,7 @@
 {
 	if (!PagePrivate(page)) {
 		SetPagePrivate(page);
-		page_cache_get(page);
+		get_page(page);
 		set_page_private(page, EXTENT_PAGE_PRIVATE);
 	}
 }
@@ -2880,7 +2880,7 @@
 {
 	struct inode *inode = page->mapping->host;
 	u64 start = page_offset(page);
-	u64 page_end = start + PAGE_CACHE_SIZE - 1;
+	u64 page_end = start + PAGE_SIZE - 1;
 	u64 end;
 	u64 cur = start;
 	u64 extent_offset;
@@ -2909,12 +2909,12 @@
 		}
 	}
 
-	if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
+	if (page->index == last_byte >> PAGE_SHIFT) {
 		char *userpage;
-		size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
+		size_t zero_offset = last_byte & (PAGE_SIZE - 1);
 
 		if (zero_offset) {
-			iosize = PAGE_CACHE_SIZE - zero_offset;
+			iosize = PAGE_SIZE - zero_offset;
 			userpage = kmap_atomic(page);
 			memset(userpage + zero_offset, 0, iosize);
 			flush_dcache_page(page);
@@ -2922,14 +2922,14 @@
 		}
 	}
 	while (cur <= end) {
-		unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
+		unsigned long pnr = (last_byte >> PAGE_SHIFT) + 1;
 		bool force_bio_submit = false;
 
 		if (cur >= last_byte) {
 			char *userpage;
 			struct extent_state *cached = NULL;
 
-			iosize = PAGE_CACHE_SIZE - pg_offset;
+			iosize = PAGE_SIZE - pg_offset;
 			userpage = kmap_atomic(page);
 			memset(userpage + pg_offset, 0, iosize);
 			flush_dcache_page(page);
@@ -3112,7 +3112,7 @@
 	for (index = 0; index < nr_pages; index++) {
 		__do_readpage(tree, pages[index], get_extent, em_cached, bio,
 			      mirror_num, bio_flags, rw, prev_em_start);
-		page_cache_release(pages[index]);
+		put_page(pages[index]);
 	}
 }
 
@@ -3134,10 +3134,10 @@
 		page_start = page_offset(pages[index]);
 		if (!end) {
 			start = page_start;
-			end = start + PAGE_CACHE_SIZE - 1;
+			end = start + PAGE_SIZE - 1;
 			first_index = index;
 		} else if (end + 1 == page_start) {
-			end += PAGE_CACHE_SIZE;
+			end += PAGE_SIZE;
 		} else {
 			__do_contiguous_readpages(tree, &pages[first_index],
 						  index - first_index, start,
@@ -3145,7 +3145,7 @@
 						  bio, mirror_num, bio_flags,
 						  rw, prev_em_start);
 			start = page_start;
-			end = start + PAGE_CACHE_SIZE - 1;
+			end = start + PAGE_SIZE - 1;
 			first_index = index;
 		}
 	}
@@ -3167,13 +3167,13 @@
 	struct inode *inode = page->mapping->host;
 	struct btrfs_ordered_extent *ordered;
 	u64 start = page_offset(page);
-	u64 end = start + PAGE_CACHE_SIZE - 1;
+	u64 end = start + PAGE_SIZE - 1;
 	int ret;
 
 	while (1) {
 		lock_extent(tree, start, end);
 		ordered = btrfs_lookup_ordered_range(inode, start,
-						PAGE_CACHE_SIZE);
+						PAGE_SIZE);
 		if (!ordered)
 			break;
 		unlock_extent(tree, start, end);
@@ -3227,7 +3227,7 @@
 			      unsigned long *nr_written)
 {
 	struct extent_io_tree *tree = epd->tree;
-	u64 page_end = delalloc_start + PAGE_CACHE_SIZE - 1;
+	u64 page_end = delalloc_start + PAGE_SIZE - 1;
 	u64 nr_delalloc;
 	u64 delalloc_to_write = 0;
 	u64 delalloc_end = 0;
@@ -3264,13 +3264,11 @@
 			goto done;
 		}
 		/*
-		 * delalloc_end is already one less than the total
-		 * length, so we don't subtract one from
-		 * PAGE_CACHE_SIZE
+		 * delalloc_end is already one less than the total length, so
+		 * we don't subtract one from PAGE_SIZE
 		 */
 		delalloc_to_write += (delalloc_end - delalloc_start +
-				      PAGE_CACHE_SIZE) >>
-				      PAGE_CACHE_SHIFT;
+				      PAGE_SIZE) >> PAGE_SHIFT;
 		delalloc_start = delalloc_end + 1;
 	}
 	if (wbc->nr_to_write < delalloc_to_write) {
@@ -3319,7 +3317,7 @@
 {
 	struct extent_io_tree *tree = epd->tree;
 	u64 start = page_offset(page);
-	u64 page_end = start + PAGE_CACHE_SIZE - 1;
+	u64 page_end = start + PAGE_SIZE - 1;
 	u64 end;
 	u64 cur = start;
 	u64 extent_offset;
@@ -3434,7 +3432,7 @@
 		if (ret) {
 			SetPageError(page);
 		} else {
-			unsigned long max_nr = (i_size >> PAGE_CACHE_SHIFT) + 1;
+			unsigned long max_nr = (i_size >> PAGE_SHIFT) + 1;
 
 			set_range_writeback(tree, cur, cur + iosize - 1);
 			if (!PageWriteback(page)) {
@@ -3477,12 +3475,12 @@
 	struct inode *inode = page->mapping->host;
 	struct extent_page_data *epd = data;
 	u64 start = page_offset(page);
-	u64 page_end = start + PAGE_CACHE_SIZE - 1;
+	u64 page_end = start + PAGE_SIZE - 1;
 	int ret;
 	int nr = 0;
 	size_t pg_offset = 0;
 	loff_t i_size = i_size_read(inode);
-	unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
+	unsigned long end_index = i_size >> PAGE_SHIFT;
 	int write_flags;
 	unsigned long nr_written = 0;
 
@@ -3497,10 +3495,10 @@
 
 	ClearPageError(page);
 
-	pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
+	pg_offset = i_size & (PAGE_SIZE - 1);
 	if (page->index > end_index ||
 	   (page->index == end_index && !pg_offset)) {
-		page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
+		page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
 		unlock_page(page);
 		return 0;
 	}
@@ -3510,7 +3508,7 @@
 
 		userpage = kmap_atomic(page);
 		memset(userpage + pg_offset, 0,
-		       PAGE_CACHE_SIZE - pg_offset);
+		       PAGE_SIZE - pg_offset);
 		kunmap_atomic(userpage);
 		flush_dcache_page(page);
 	}
@@ -3748,7 +3746,7 @@
 		clear_page_dirty_for_io(p);
 		set_page_writeback(p);
 		ret = submit_extent_page(rw, tree, wbc, p, offset >> 9,
-					 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
+					 PAGE_SIZE, 0, bdev, &epd->bio,
 					 -1, end_bio_extent_buffer_writepage,
 					 0, epd->bio_flags, bio_flags, false);
 		epd->bio_flags = bio_flags;
@@ -3760,7 +3758,7 @@
 			ret = -EIO;
 			break;
 		}
-		offset += PAGE_CACHE_SIZE;
+		offset += PAGE_SIZE;
 		update_nr_written(p, wbc, 1);
 		unlock_page(p);
 	}
@@ -3804,8 +3802,8 @@
 		index = mapping->writeback_index; /* Start from prev offset */
 		end = -1;
 	} else {
-		index = wbc->range_start >> PAGE_CACHE_SHIFT;
-		end = wbc->range_end >> PAGE_CACHE_SHIFT;
+		index = wbc->range_start >> PAGE_SHIFT;
+		end = wbc->range_end >> PAGE_SHIFT;
 		scanned = 1;
 	}
 	if (wbc->sync_mode == WB_SYNC_ALL)
@@ -3948,8 +3946,8 @@
 		index = mapping->writeback_index; /* Start from prev offset */
 		end = -1;
 	} else {
-		index = wbc->range_start >> PAGE_CACHE_SHIFT;
-		end = wbc->range_end >> PAGE_CACHE_SHIFT;
+		index = wbc->range_start >> PAGE_SHIFT;
+		end = wbc->range_end >> PAGE_SHIFT;
 		scanned = 1;
 	}
 	if (wbc->sync_mode == WB_SYNC_ALL)
@@ -4083,8 +4081,8 @@
 	int ret = 0;
 	struct address_space *mapping = inode->i_mapping;
 	struct page *page;
-	unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
-		PAGE_CACHE_SHIFT;
+	unsigned long nr_pages = (end - start + PAGE_SIZE) >>
+		PAGE_SHIFT;
 
 	struct extent_page_data epd = {
 		.bio = NULL,
@@ -4102,18 +4100,18 @@
 	};
 
 	while (start <= end) {
-		page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
+		page = find_get_page(mapping, start >> PAGE_SHIFT);
 		if (clear_page_dirty_for_io(page))
 			ret = __extent_writepage(page, &wbc_writepages, &epd);
 		else {
 			if (tree->ops && tree->ops->writepage_end_io_hook)
 				tree->ops->writepage_end_io_hook(page, start,
-						 start + PAGE_CACHE_SIZE - 1,
+						 start + PAGE_SIZE - 1,
 						 NULL, 1);
 			unlock_page(page);
 		}
-		page_cache_release(page);
-		start += PAGE_CACHE_SIZE;
+		put_page(page);
+		start += PAGE_SIZE;
 	}
 
 	flush_epd_write_bio(&epd);
@@ -4163,7 +4161,7 @@
 		list_del(&page->lru);
 		if (add_to_page_cache_lru(page, mapping,
 					page->index, GFP_NOFS)) {
-			page_cache_release(page);
+			put_page(page);
 			continue;
 		}
 
@@ -4197,7 +4195,7 @@
 {
 	struct extent_state *cached_state = NULL;
 	u64 start = page_offset(page);
-	u64 end = start + PAGE_CACHE_SIZE - 1;
+	u64 end = start + PAGE_SIZE - 1;
 	size_t blocksize = page->mapping->host->i_sb->s_blocksize;
 
 	start += ALIGN(offset, blocksize);
@@ -4223,7 +4221,7 @@
 				    struct page *page, gfp_t mask)
 {
 	u64 start = page_offset(page);
-	u64 end = start + PAGE_CACHE_SIZE - 1;
+	u64 end = start + PAGE_SIZE - 1;
 	int ret = 1;
 
 	if (test_range_bit(tree, start, end,
@@ -4262,7 +4260,7 @@
 {
 	struct extent_map *em;
 	u64 start = page_offset(page);
-	u64 end = start + PAGE_CACHE_SIZE - 1;
+	u64 end = start + PAGE_SIZE - 1;
 
 	if (gfpflags_allow_blocking(mask) &&
 	    page->mapping->host->i_size > SZ_16M) {
@@ -4587,14 +4585,14 @@
 			ClearPagePrivate(page);
 			set_page_private(page, 0);
 			/* One for the page private */
-			page_cache_release(page);
+			put_page(page);
 		}
 
 		if (mapped)
 			spin_unlock(&page->mapping->private_lock);
 
 		/* One for when we alloced the page */
-		page_cache_release(page);
+		put_page(page);
 	} while (index != 0);
 }
 
@@ -4779,7 +4777,7 @@
 
 	rcu_read_lock();
 	eb = radix_tree_lookup(&fs_info->buffer_radix,
-			       start >> PAGE_CACHE_SHIFT);
+			       start >> PAGE_SHIFT);
 	if (eb && atomic_inc_not_zero(&eb->refs)) {
 		rcu_read_unlock();
 		/*
@@ -4829,7 +4827,7 @@
 		goto free_eb;
 	spin_lock(&fs_info->buffer_lock);
 	ret = radix_tree_insert(&fs_info->buffer_radix,
-				start >> PAGE_CACHE_SHIFT, eb);
+				start >> PAGE_SHIFT, eb);
 	spin_unlock(&fs_info->buffer_lock);
 	radix_tree_preload_end();
 	if (ret == -EEXIST) {
@@ -4862,7 +4860,7 @@
 	unsigned long len = fs_info->tree_root->nodesize;
 	unsigned long num_pages = num_extent_pages(start, len);
 	unsigned long i;
-	unsigned long index = start >> PAGE_CACHE_SHIFT;
+	unsigned long index = start >> PAGE_SHIFT;
 	struct extent_buffer *eb;
 	struct extent_buffer *exists = NULL;
 	struct page *p;
@@ -4896,7 +4894,7 @@
 			if (atomic_inc_not_zero(&exists->refs)) {
 				spin_unlock(&mapping->private_lock);
 				unlock_page(p);
-				page_cache_release(p);
+				put_page(p);
 				mark_extent_buffer_accessed(exists, p);
 				goto free_eb;
 			}
@@ -4908,7 +4906,7 @@
 			 */
 			ClearPagePrivate(p);
 			WARN_ON(PageDirty(p));
-			page_cache_release(p);
+			put_page(p);
 		}
 		attach_extent_buffer_page(eb, p);
 		spin_unlock(&mapping->private_lock);
@@ -4931,7 +4929,7 @@
 
 	spin_lock(&fs_info->buffer_lock);
 	ret = radix_tree_insert(&fs_info->buffer_radix,
-				start >> PAGE_CACHE_SHIFT, eb);
+				start >> PAGE_SHIFT, eb);
 	spin_unlock(&fs_info->buffer_lock);
 	radix_tree_preload_end();
 	if (ret == -EEXIST) {
@@ -4994,7 +4992,7 @@
 
 			spin_lock(&fs_info->buffer_lock);
 			radix_tree_delete(&fs_info->buffer_radix,
-					  eb->start >> PAGE_CACHE_SHIFT);
+					  eb->start >> PAGE_SHIFT);
 			spin_unlock(&fs_info->buffer_lock);
 		} else {
 			spin_unlock(&eb->refs_lock);
@@ -5168,8 +5166,8 @@
 
 	if (start) {
 		WARN_ON(start < eb->start);
-		start_i = (start >> PAGE_CACHE_SHIFT) -
-			(eb->start >> PAGE_CACHE_SHIFT);
+		start_i = (start >> PAGE_SHIFT) -
+			(eb->start >> PAGE_SHIFT);
 	} else {
 		start_i = 0;
 	}
@@ -5252,18 +5250,18 @@
 	struct page *page;
 	char *kaddr;
 	char *dst = (char *)dstv;
-	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
-	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
+	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
+	unsigned long i = (start_offset + start) >> PAGE_SHIFT;
 
 	WARN_ON(start > eb->len);
 	WARN_ON(start + len > eb->start + eb->len);
 
-	offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
+	offset = (start_offset + start) & (PAGE_SIZE - 1);
 
 	while (len > 0) {
 		page = eb->pages[i];
 
-		cur = min(len, (PAGE_CACHE_SIZE - offset));
+		cur = min(len, (PAGE_SIZE - offset));
 		kaddr = page_address(page);
 		memcpy(dst, kaddr + offset, cur);
 
@@ -5283,19 +5281,19 @@
 	struct page *page;
 	char *kaddr;
 	char __user *dst = (char __user *)dstv;
-	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
-	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
+	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
+	unsigned long i = (start_offset + start) >> PAGE_SHIFT;
 	int ret = 0;
 
 	WARN_ON(start > eb->len);
 	WARN_ON(start + len > eb->start + eb->len);
 
-	offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
+	offset = (start_offset + start) & (PAGE_SIZE - 1);
 
 	while (len > 0) {
 		page = eb->pages[i];
 
-		cur = min(len, (PAGE_CACHE_SIZE - offset));
+		cur = min(len, (PAGE_SIZE - offset));
 		kaddr = page_address(page);
 		if (copy_to_user(dst, kaddr + offset, cur)) {
 			ret = -EFAULT;
@@ -5316,13 +5314,13 @@
 			       unsigned long *map_start,
 			       unsigned long *map_len)
 {
-	size_t offset = start & (PAGE_CACHE_SIZE - 1);
+	size_t offset = start & (PAGE_SIZE - 1);
 	char *kaddr;
 	struct page *p;
-	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
-	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
+	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
+	unsigned long i = (start_offset + start) >> PAGE_SHIFT;
 	unsigned long end_i = (start_offset + start + min_len - 1) >>
-		PAGE_CACHE_SHIFT;
+		PAGE_SHIFT;
 
 	if (i != end_i)
 		return -EINVAL;
@@ -5332,7 +5330,7 @@
 		*map_start = 0;
 	} else {
 		offset = 0;
-		*map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
+		*map_start = ((u64)i << PAGE_SHIFT) - start_offset;
 	}
 
 	if (start + min_len > eb->len) {
@@ -5345,7 +5343,7 @@
 	p = eb->pages[i];
 	kaddr = page_address(p);
 	*map = kaddr + offset;
-	*map_len = PAGE_CACHE_SIZE - offset;
+	*map_len = PAGE_SIZE - offset;
 	return 0;
 }
 
@@ -5358,19 +5356,19 @@
 	struct page *page;
 	char *kaddr;
 	char *ptr = (char *)ptrv;
-	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
-	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
+	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
+	unsigned long i = (start_offset + start) >> PAGE_SHIFT;
 	int ret = 0;
 
 	WARN_ON(start > eb->len);
 	WARN_ON(start + len > eb->start + eb->len);
 
-	offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
+	offset = (start_offset + start) & (PAGE_SIZE - 1);
 
 	while (len > 0) {
 		page = eb->pages[i];
 
-		cur = min(len, (PAGE_CACHE_SIZE - offset));
+		cur = min(len, (PAGE_SIZE - offset));
 
 		kaddr = page_address(page);
 		ret = memcmp(ptr, kaddr + offset, cur);
@@ -5393,19 +5391,19 @@
 	struct page *page;
 	char *kaddr;
 	char *src = (char *)srcv;
-	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
-	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
+	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
+	unsigned long i = (start_offset + start) >> PAGE_SHIFT;
 
 	WARN_ON(start > eb->len);
 	WARN_ON(start + len > eb->start + eb->len);
 
-	offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
+	offset = (start_offset + start) & (PAGE_SIZE - 1);
 
 	while (len > 0) {
 		page = eb->pages[i];
 		WARN_ON(!PageUptodate(page));
 
-		cur = min(len, PAGE_CACHE_SIZE - offset);
+		cur = min(len, PAGE_SIZE - offset);
 		kaddr = page_address(page);
 		memcpy(kaddr + offset, src, cur);
 
@@ -5423,19 +5421,19 @@
 	size_t offset;
 	struct page *page;
 	char *kaddr;
-	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
-	unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
+	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
+	unsigned long i = (start_offset + start) >> PAGE_SHIFT;
 
 	WARN_ON(start > eb->len);
 	WARN_ON(start + len > eb->start + eb->len);
 
-	offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
+	offset = (start_offset + start) & (PAGE_SIZE - 1);
 
 	while (len > 0) {
 		page = eb->pages[i];
 		WARN_ON(!PageUptodate(page));
 
-		cur = min(len, PAGE_CACHE_SIZE - offset);
+		cur = min(len, PAGE_SIZE - offset);
 		kaddr = page_address(page);
 		memset(kaddr + offset, c, cur);
 
@@ -5454,19 +5452,19 @@
 	size_t offset;
 	struct page *page;
 	char *kaddr;
-	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
-	unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
+	size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
+	unsigned long i = (start_offset + dst_offset) >> PAGE_SHIFT;
 
 	WARN_ON(src->len != dst_len);
 
 	offset = (start_offset + dst_offset) &
-		(PAGE_CACHE_SIZE - 1);
+		(PAGE_SIZE - 1);
 
 	while (len > 0) {
 		page = dst->pages[i];
 		WARN_ON(!PageUptodate(page));
 
-		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
+		cur = min(len, (unsigned long)(PAGE_SIZE - offset));
 
 		kaddr = page_address(page);
 		read_extent_buffer(src, kaddr + offset, src_offset, cur);
@@ -5508,7 +5506,7 @@
 				    unsigned long *page_index,
 				    size_t *page_offset)
 {
-	size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
+	size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
 	size_t byte_offset = BIT_BYTE(nr);
 	size_t offset;
 
@@ -5519,8 +5517,8 @@
 	 */
 	offset = start_offset + start + byte_offset;
 
-	*page_index = offset >> PAGE_CACHE_SHIFT;
-	*page_offset = offset & (PAGE_CACHE_SIZE - 1);
+	*page_index = offset >> PAGE_SHIFT;
+	*page_offset = offset & (PAGE_SIZE - 1);
 }
 
 /**
@@ -5572,7 +5570,7 @@
 		len -= bits_to_set;
 		bits_to_set = BITS_PER_BYTE;
 		mask_to_set = ~0U;
-		if (++offset >= PAGE_CACHE_SIZE && len > 0) {
+		if (++offset >= PAGE_SIZE && len > 0) {
 			offset = 0;
 			page = eb->pages[++i];
 			WARN_ON(!PageUptodate(page));
@@ -5614,7 +5612,7 @@
 		len -= bits_to_clear;
 		bits_to_clear = BITS_PER_BYTE;
 		mask_to_clear = ~0U;
-		if (++offset >= PAGE_CACHE_SIZE && len > 0) {
+		if (++offset >= PAGE_SIZE && len > 0) {
 			offset = 0;
 			page = eb->pages[++i];
 			WARN_ON(!PageUptodate(page));
@@ -5661,7 +5659,7 @@
 	size_t cur;
 	size_t dst_off_in_page;
 	size_t src_off_in_page;
-	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
+	size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
 	unsigned long dst_i;
 	unsigned long src_i;
 
@@ -5680,17 +5678,17 @@
 
 	while (len > 0) {
 		dst_off_in_page = (start_offset + dst_offset) &
-			(PAGE_CACHE_SIZE - 1);
+			(PAGE_SIZE - 1);
 		src_off_in_page = (start_offset + src_offset) &
-			(PAGE_CACHE_SIZE - 1);
+			(PAGE_SIZE - 1);
 
-		dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
-		src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
+		dst_i = (start_offset + dst_offset) >> PAGE_SHIFT;
+		src_i = (start_offset + src_offset) >> PAGE_SHIFT;
 
-		cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
+		cur = min(len, (unsigned long)(PAGE_SIZE -
 					       src_off_in_page));
 		cur = min_t(unsigned long, cur,
-			(unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
+			(unsigned long)(PAGE_SIZE - dst_off_in_page));
 
 		copy_pages(dst->pages[dst_i], dst->pages[src_i],
 			   dst_off_in_page, src_off_in_page, cur);
@@ -5709,7 +5707,7 @@
 	size_t src_off_in_page;
 	unsigned long dst_end = dst_offset + len - 1;
 	unsigned long src_end = src_offset + len - 1;
-	size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
+	size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
 	unsigned long dst_i;
 	unsigned long src_i;
 
@@ -5728,13 +5726,13 @@
 		return;
 	}
 	while (len > 0) {
-		dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
-		src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
+		dst_i = (start_offset + dst_end) >> PAGE_SHIFT;
+		src_i = (start_offset + src_end) >> PAGE_SHIFT;
 
 		dst_off_in_page = (start_offset + dst_end) &
-			(PAGE_CACHE_SIZE - 1);
+			(PAGE_SIZE - 1);
 		src_off_in_page = (start_offset + src_end) &
-			(PAGE_CACHE_SIZE - 1);
+			(PAGE_SIZE - 1);
 
 		cur = min_t(unsigned long, len, src_off_in_page + 1);
 		cur = min(cur, dst_off_in_page + 1);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 5dbf92e..b5e0ade 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -120,7 +120,7 @@
 };
 
 #define INLINE_EXTENT_BUFFER_PAGES 16
-#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_CACHE_SIZE)
+#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
 struct extent_buffer {
 	u64 start;
 	unsigned long len;
@@ -365,8 +365,8 @@
 
 static inline unsigned long num_extent_pages(u64 start, u64 len)
 {
-	return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
-		(start >> PAGE_CACHE_SHIFT);
+	return ((start + len + PAGE_SIZE - 1) >> PAGE_SHIFT) -
+		(start >> PAGE_SHIFT);
 }
 
 static inline void extent_buffer_get(struct extent_buffer *eb)
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index b5baf5b..7a7d6e2 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -32,7 +32,7 @@
 				  size) - 1))
 
 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
-				       PAGE_CACHE_SIZE))
+				       PAGE_SIZE))
 
 #define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \
 				   sizeof(struct btrfs_ordered_sum)) / \
@@ -203,7 +203,7 @@
 		csum = (u8 *)dst;
 	}
 
-	if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8)
+	if (bio->bi_iter.bi_size > PAGE_SIZE * 8)
 		path->reada = READA_FORWARD;
 
 	WARN_ON(bio->bi_vcnt <= 0);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 15a09cb..ea9f10b 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -414,11 +414,11 @@
 	size_t copied = 0;
 	size_t total_copied = 0;
 	int pg = 0;
-	int offset = pos & (PAGE_CACHE_SIZE - 1);
+	int offset = pos & (PAGE_SIZE - 1);
 
 	while (write_bytes > 0) {
 		size_t count = min_t(size_t,
-				     PAGE_CACHE_SIZE - offset, write_bytes);
+				     PAGE_SIZE - offset, write_bytes);
 		struct page *page = prepared_pages[pg];
 		/*
 		 * Copy data from userspace to the current page
@@ -448,7 +448,7 @@
 		if (unlikely(copied == 0))
 			break;
 
-		if (copied < PAGE_CACHE_SIZE - offset) {
+		if (copied < PAGE_SIZE - offset) {
 			offset += copied;
 		} else {
 			pg++;
@@ -473,7 +473,7 @@
 		 */
 		ClearPageChecked(pages[i]);
 		unlock_page(pages[i]);
-		page_cache_release(pages[i]);
+		put_page(pages[i]);
 	}
 }
 
@@ -1297,7 +1297,7 @@
 {
 	int ret = 0;
 
-	if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
+	if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
 	    !PageUptodate(page)) {
 		ret = btrfs_readpage(NULL, page);
 		if (ret)
@@ -1323,7 +1323,7 @@
 				  size_t write_bytes, bool force_uptodate)
 {
 	int i;
-	unsigned long index = pos >> PAGE_CACHE_SHIFT;
+	unsigned long index = pos >> PAGE_SHIFT;
 	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
 	int err = 0;
 	int faili;
@@ -1345,7 +1345,7 @@
 			err = prepare_uptodate_page(inode, pages[i],
 						    pos + write_bytes, false);
 		if (err) {
-			page_cache_release(pages[i]);
+			put_page(pages[i]);
 			if (err == -EAGAIN) {
 				err = 0;
 				goto again;
@@ -1360,7 +1360,7 @@
 fail:
 	while (faili >= 0) {
 		unlock_page(pages[faili]);
-		page_cache_release(pages[faili]);
+		put_page(pages[faili]);
 		faili--;
 	}
 	return err;
@@ -1408,7 +1408,7 @@
 					     cached_state, GFP_NOFS);
 			for (i = 0; i < num_pages; i++) {
 				unlock_page(pages[i]);
-				page_cache_release(pages[i]);
+				put_page(pages[i]);
 			}
 			btrfs_start_ordered_extent(inode, ordered, 1);
 			btrfs_put_ordered_extent(ordered);
@@ -1497,8 +1497,8 @@
 	bool force_page_uptodate = false;
 	bool need_unlock;
 
-	nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_CACHE_SIZE),
-			PAGE_CACHE_SIZE / (sizeof(struct page *)));
+	nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
+			PAGE_SIZE / (sizeof(struct page *)));
 	nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
 	nrptrs = max(nrptrs, 8);
 	pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
@@ -1506,13 +1506,13 @@
 		return -ENOMEM;
 
 	while (iov_iter_count(i) > 0) {
-		size_t offset = pos & (PAGE_CACHE_SIZE - 1);
+		size_t offset = pos & (PAGE_SIZE - 1);
 		size_t sector_offset;
 		size_t write_bytes = min(iov_iter_count(i),
-					 nrptrs * (size_t)PAGE_CACHE_SIZE -
+					 nrptrs * (size_t)PAGE_SIZE -
 					 offset);
 		size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
-						PAGE_CACHE_SIZE);
+						PAGE_SIZE);
 		size_t reserve_bytes;
 		size_t dirty_pages;
 		size_t copied;
@@ -1547,7 +1547,7 @@
 			 * write_bytes, so scale down.
 			 */
 			num_pages = DIV_ROUND_UP(write_bytes + offset,
-						 PAGE_CACHE_SIZE);
+						 PAGE_SIZE);
 			reserve_bytes = round_up(write_bytes + sector_offset,
 					root->sectorsize);
 			goto reserve_metadata;
@@ -1609,7 +1609,7 @@
 		} else {
 			force_page_uptodate = false;
 			dirty_pages = DIV_ROUND_UP(copied + offset,
-						   PAGE_CACHE_SIZE);
+						   PAGE_SIZE);
 		}
 
 		/*
@@ -1641,7 +1641,7 @@
 				u64 __pos;
 
 				__pos = round_down(pos, root->sectorsize) +
-					(dirty_pages << PAGE_CACHE_SHIFT);
+					(dirty_pages << PAGE_SHIFT);
 				btrfs_delalloc_release_space(inode, __pos,
 							     release_bytes);
 			}
@@ -1682,7 +1682,7 @@
 		cond_resched();
 
 		balance_dirty_pages_ratelimited(inode->i_mapping);
-		if (dirty_pages < (root->nodesize >> PAGE_CACHE_SHIFT) + 1)
+		if (dirty_pages < (root->nodesize >> PAGE_SHIFT) + 1)
 			btrfs_btree_balance_dirty(root);
 
 		pos += copied;
@@ -1703,18 +1703,17 @@
 	return num_written ? num_written : ret;
 }
 
-static ssize_t __btrfs_direct_write(struct kiocb *iocb,
-				    struct iov_iter *from,
-				    loff_t pos)
+static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file_inode(file);
+	loff_t pos = iocb->ki_pos;
 	ssize_t written;
 	ssize_t written_buffered;
 	loff_t endbyte;
 	int err;
 
-	written = generic_file_direct_write(iocb, from, pos);
+	written = generic_file_direct_write(iocb, from);
 
 	if (written < 0 || !iov_iter_count(from))
 		return written;
@@ -1738,8 +1737,8 @@
 		goto out;
 	written += written_buffered;
 	iocb->ki_pos = pos + written_buffered;
-	invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
-				 endbyte >> PAGE_CACHE_SHIFT);
+	invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
+				 endbyte >> PAGE_SHIFT);
 out:
 	return written ? written : err;
 }
@@ -1832,7 +1831,7 @@
 		atomic_inc(&BTRFS_I(inode)->sync_writers);
 
 	if (iocb->ki_flags & IOCB_DIRECT) {
-		num_written = __btrfs_direct_write(iocb, from, pos);
+		num_written = __btrfs_direct_write(iocb, from);
 	} else {
 		num_written = __btrfs_buffered_write(file, from, pos);
 		if (num_written > 0)
@@ -1852,11 +1851,8 @@
 	spin_lock(&BTRFS_I(inode)->lock);
 	BTRFS_I(inode)->last_sub_trans = root->log_transid;
 	spin_unlock(&BTRFS_I(inode)->lock);
-	if (num_written > 0) {
-		err = generic_write_sync(file, pos, num_written);
-		if (err < 0)
-			num_written = err;
-	}
+	if (num_written > 0)
+		num_written = generic_write_sync(iocb, num_written);
 
 	if (sync)
 		atomic_dec(&BTRFS_I(inode)->sync_writers);
@@ -1905,7 +1901,7 @@
  */
 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 {
-	struct dentry *dentry = file->f_path.dentry;
+	struct dentry *dentry = file_dentry(file);
 	struct inode *inode = d_inode(dentry);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_trans_handle *trans;
@@ -2682,9 +2678,12 @@
 		return ret;
 
 	inode_lock(inode);
-	ret = inode_newsize_ok(inode, alloc_end);
-	if (ret)
-		goto out;
+
+	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
+		ret = inode_newsize_ok(inode, offset + len);
+		if (ret)
+			goto out;
+	}
 
 	/*
 	 * TODO: Move these two operations after we have checked
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 8f835bf..5e6062c 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -29,7 +29,7 @@
 #include "inode-map.h"
 #include "volumes.h"
 
-#define BITS_PER_BITMAP		(PAGE_CACHE_SIZE * 8)
+#define BITS_PER_BITMAP		(PAGE_SIZE * 8)
 #define MAX_CACHE_BYTES_PER_GIG	SZ_32K
 
 struct btrfs_trim_range {
@@ -295,7 +295,7 @@
 		return -ENOMEM;
 
 	file_ra_state_init(ra, inode->i_mapping);
-	last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
+	last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
 
 	page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);
 
@@ -310,14 +310,14 @@
 	int num_pages;
 	int check_crcs = 0;
 
-	num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
+	num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
 
 	if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
 		check_crcs = 1;
 
 	/* Make sure we can fit our crcs into the first page */
 	if (write && check_crcs &&
-	    (num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE)
+	    (num_pages * sizeof(u32)) >= PAGE_SIZE)
 		return -ENOSPC;
 
 	memset(io_ctl, 0, sizeof(struct btrfs_io_ctl));
@@ -354,9 +354,9 @@
 	io_ctl->page = io_ctl->pages[io_ctl->index++];
 	io_ctl->cur = page_address(io_ctl->page);
 	io_ctl->orig = io_ctl->cur;
-	io_ctl->size = PAGE_CACHE_SIZE;
+	io_ctl->size = PAGE_SIZE;
 	if (clear)
-		memset(io_ctl->cur, 0, PAGE_CACHE_SIZE);
+		memset(io_ctl->cur, 0, PAGE_SIZE);
 }
 
 static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
@@ -369,7 +369,7 @@
 		if (io_ctl->pages[i]) {
 			ClearPageChecked(io_ctl->pages[i]);
 			unlock_page(io_ctl->pages[i]);
-			page_cache_release(io_ctl->pages[i]);
+			put_page(io_ctl->pages[i]);
 		}
 	}
 }
@@ -475,7 +475,7 @@
 		offset = sizeof(u32) * io_ctl->num_pages;
 
 	crc = btrfs_csum_data(io_ctl->orig + offset, crc,
-			      PAGE_CACHE_SIZE - offset);
+			      PAGE_SIZE - offset);
 	btrfs_csum_final(crc, (char *)&crc);
 	io_ctl_unmap_page(io_ctl);
 	tmp = page_address(io_ctl->pages[0]);
@@ -503,7 +503,7 @@
 
 	io_ctl_map_page(io_ctl, 0);
 	crc = btrfs_csum_data(io_ctl->orig + offset, crc,
-			      PAGE_CACHE_SIZE - offset);
+			      PAGE_SIZE - offset);
 	btrfs_csum_final(crc, (char *)&crc);
 	if (val != crc) {
 		btrfs_err_rl(io_ctl->root->fs_info,
@@ -561,7 +561,7 @@
 		io_ctl_map_page(io_ctl, 0);
 	}
 
-	memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE);
+	memcpy(io_ctl->cur, bitmap, PAGE_SIZE);
 	io_ctl_set_crc(io_ctl, io_ctl->index - 1);
 	if (io_ctl->index < io_ctl->num_pages)
 		io_ctl_map_page(io_ctl, 0);
@@ -621,7 +621,7 @@
 	if (ret)
 		return ret;
 
-	memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE);
+	memcpy(entry->bitmap, io_ctl->cur, PAGE_SIZE);
 	io_ctl_unmap_page(io_ctl);
 
 	return 0;
@@ -775,7 +775,7 @@
 		} else {
 			ASSERT(num_bitmaps);
 			num_bitmaps--;
-			e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
+			e->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
 			if (!e->bitmap) {
 				kmem_cache_free(
 					btrfs_free_space_cachep, e);
@@ -1660,7 +1660,7 @@
 	 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
 	 * we add more bitmaps.
 	 */
-	bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE;
+	bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_SIZE;
 
 	if (bitmap_bytes >= max_bytes) {
 		ctl->extents_thresh = 0;
@@ -2111,7 +2111,7 @@
 		}
 
 		/* allocate the bitmap */
-		info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
+		info->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
 		spin_lock(&ctl->tree_lock);
 		if (!info->bitmap) {
 			ret = -ENOMEM;
@@ -3580,7 +3580,7 @@
 	}
 
 	if (!map) {
-		map = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
+		map = kzalloc(PAGE_SIZE, GFP_NOFS);
 		if (!map) {
 			kmem_cache_free(btrfs_free_space_cachep, info);
 			return -ENOMEM;
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 1f0ec19..70107f7 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -283,7 +283,7 @@
 }
 
 #define INIT_THRESHOLD	((SZ_32K / 2) / sizeof(struct btrfs_free_space))
-#define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8)
+#define INODES_PER_BITMAP (PAGE_SIZE * 8)
 
 /*
  * The goal is to keep the memory used by the free_ino tree won't
@@ -317,7 +317,7 @@
 	}
 
 	ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) *
-				PAGE_CACHE_SIZE / sizeof(*info);
+				PAGE_SIZE / sizeof(*info);
 }
 
 /*
@@ -481,12 +481,12 @@
 
 	spin_lock(&ctl->tree_lock);
 	prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents;
-	prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE);
-	prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE;
+	prealloc = ALIGN(prealloc, PAGE_SIZE);
+	prealloc += ctl->total_bitmaps * PAGE_SIZE;
 	spin_unlock(&ctl->tree_lock);
 
 	/* Just to make sure we have enough space */
-	prealloc += 8 * PAGE_CACHE_SIZE;
+	prealloc += 8 * PAGE_SIZE;
 
 	ret = btrfs_delalloc_reserve_space(inode, 0, prealloc);
 	if (ret)
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 41a5688..6b7fe29 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -194,7 +194,7 @@
 		while (compressed_size > 0) {
 			cpage = compressed_pages[i];
 			cur_size = min_t(unsigned long, compressed_size,
-				       PAGE_CACHE_SIZE);
+				       PAGE_SIZE);
 
 			kaddr = kmap_atomic(cpage);
 			write_extent_buffer(leaf, kaddr, ptr, cur_size);
@@ -208,13 +208,13 @@
 						  compress_type);
 	} else {
 		page = find_get_page(inode->i_mapping,
-				     start >> PAGE_CACHE_SHIFT);
+				     start >> PAGE_SHIFT);
 		btrfs_set_file_extent_compression(leaf, ei, 0);
 		kaddr = kmap_atomic(page);
-		offset = start & (PAGE_CACHE_SIZE - 1);
+		offset = start & (PAGE_SIZE - 1);
 		write_extent_buffer(leaf, kaddr + offset, ptr, size);
 		kunmap_atomic(kaddr);
-		page_cache_release(page);
+		put_page(page);
 	}
 	btrfs_mark_buffer_dirty(leaf);
 	btrfs_release_path(path);
@@ -322,7 +322,7 @@
 	 * And at reserve time, it's always aligned to page size, so
 	 * just free one page here.
 	 */
-	btrfs_qgroup_free_data(inode, 0, PAGE_CACHE_SIZE);
+	btrfs_qgroup_free_data(inode, 0, PAGE_SIZE);
 	btrfs_free_path(path);
 	btrfs_end_transaction(trans, root);
 	return ret;
@@ -435,8 +435,8 @@
 	actual_end = min_t(u64, isize, end + 1);
 again:
 	will_compress = 0;
-	nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
-	nr_pages = min_t(unsigned long, nr_pages, SZ_128K / PAGE_CACHE_SIZE);
+	nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
+	nr_pages = min_t(unsigned long, nr_pages, SZ_128K / PAGE_SIZE);
 
 	/*
 	 * we don't want to send crud past the end of i_size through
@@ -514,7 +514,7 @@
 
 		if (!ret) {
 			unsigned long offset = total_compressed &
-				(PAGE_CACHE_SIZE - 1);
+				(PAGE_SIZE - 1);
 			struct page *page = pages[nr_pages_ret - 1];
 			char *kaddr;
 
@@ -524,7 +524,7 @@
 			if (offset) {
 				kaddr = kmap_atomic(page);
 				memset(kaddr + offset, 0,
-				       PAGE_CACHE_SIZE - offset);
+				       PAGE_SIZE - offset);
 				kunmap_atomic(kaddr);
 			}
 			will_compress = 1;
@@ -580,7 +580,7 @@
 		 * one last check to make sure the compression is really a
 		 * win, compare the page count read with the blocks on disk
 		 */
-		total_in = ALIGN(total_in, PAGE_CACHE_SIZE);
+		total_in = ALIGN(total_in, PAGE_SIZE);
 		if (total_compressed >= total_in) {
 			will_compress = 0;
 		} else {
@@ -594,7 +594,7 @@
 		 */
 		for (i = 0; i < nr_pages_ret; i++) {
 			WARN_ON(pages[i]->mapping);
-			page_cache_release(pages[i]);
+			put_page(pages[i]);
 		}
 		kfree(pages);
 		pages = NULL;
@@ -650,7 +650,7 @@
 free_pages_out:
 	for (i = 0; i < nr_pages_ret; i++) {
 		WARN_ON(pages[i]->mapping);
-		page_cache_release(pages[i]);
+		put_page(pages[i]);
 	}
 	kfree(pages);
 }
@@ -664,7 +664,7 @@
 
 	for (i = 0; i < async_extent->nr_pages; i++) {
 		WARN_ON(async_extent->pages[i]->mapping);
-		page_cache_release(async_extent->pages[i]);
+		put_page(async_extent->pages[i]);
 	}
 	kfree(async_extent->pages);
 	async_extent->nr_pages = 0;
@@ -966,7 +966,7 @@
 				     PAGE_END_WRITEBACK);
 
 			*nr_written = *nr_written +
-			     (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
+			     (end - start + PAGE_SIZE) / PAGE_SIZE;
 			*page_started = 1;
 			goto out;
 		} else if (ret < 0) {
@@ -1106,8 +1106,8 @@
 	async_cow = container_of(work, struct async_cow, work);
 
 	root = async_cow->root;
-	nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
-		PAGE_CACHE_SHIFT;
+	nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >>
+		PAGE_SHIFT;
 
 	/*
 	 * atomic_sub_return implies a barrier for waitqueue_active
@@ -1164,8 +1164,8 @@
 				async_cow_start, async_cow_submit,
 				async_cow_free);
 
-		nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
-			PAGE_CACHE_SHIFT;
+		nr_pages = (cur_end - start + PAGE_SIZE) >>
+			PAGE_SHIFT;
 		atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
 
 		btrfs_queue_work(root->fs_info->delalloc_workers,
@@ -1960,7 +1960,7 @@
 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
 			      struct extent_state **cached_state)
 {
-	WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0);
+	WARN_ON((end & (PAGE_SIZE - 1)) == 0);
 	return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
 				   cached_state, GFP_NOFS);
 }
@@ -1993,7 +1993,7 @@
 
 	inode = page->mapping->host;
 	page_start = page_offset(page);
-	page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
+	page_end = page_offset(page) + PAGE_SIZE - 1;
 
 	lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
 			 &cached_state);
@@ -2003,7 +2003,7 @@
 		goto out;
 
 	ordered = btrfs_lookup_ordered_range(inode, page_start,
-					PAGE_CACHE_SIZE);
+					PAGE_SIZE);
 	if (ordered) {
 		unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
 				     page_end, &cached_state, GFP_NOFS);
@@ -2014,7 +2014,7 @@
 	}
 
 	ret = btrfs_delalloc_reserve_space(inode, page_start,
-					   PAGE_CACHE_SIZE);
+					   PAGE_SIZE);
 	if (ret) {
 		mapping_set_error(page->mapping, ret);
 		end_extent_writepage(page, ret, page_start, page_end);
@@ -2030,7 +2030,7 @@
 			     &cached_state, GFP_NOFS);
 out_page:
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 	kfree(fixup);
 }
 
@@ -2063,7 +2063,7 @@
 		return -EAGAIN;
 
 	SetPageChecked(page);
-	page_cache_get(page);
+	get_page(page);
 	btrfs_init_work(&fixup->work, btrfs_fixup_helper,
 			btrfs_writepage_fixup_worker, NULL, NULL);
 	fixup->page = page;
@@ -4247,7 +4247,7 @@
 
 	if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) {
 		loff_t offset = new_size;
-		loff_t page_end = ALIGN(offset, PAGE_CACHE_SIZE);
+		loff_t page_end = ALIGN(offset, PAGE_SIZE);
 
 		/*
 		 * Zero out the remaining of the last page of our inline extent,
@@ -4633,7 +4633,7 @@
 	struct extent_state *cached_state = NULL;
 	char *kaddr;
 	u32 blocksize = root->sectorsize;
-	pgoff_t index = from >> PAGE_CACHE_SHIFT;
+	pgoff_t index = from >> PAGE_SHIFT;
 	unsigned offset = from & (blocksize - 1);
 	struct page *page;
 	gfp_t mask = btrfs_alloc_write_mask(mapping);
@@ -4668,7 +4668,7 @@
 		lock_page(page);
 		if (page->mapping != mapping) {
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 			goto again;
 		}
 		if (!PageUptodate(page)) {
@@ -4686,7 +4686,7 @@
 		unlock_extent_cached(io_tree, block_start, block_end,
 				     &cached_state, GFP_NOFS);
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		btrfs_start_ordered_extent(inode, ordered, 1);
 		btrfs_put_ordered_extent(ordered);
 		goto again;
@@ -4728,7 +4728,7 @@
 		btrfs_delalloc_release_space(inode, block_start,
 					     blocksize);
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 out:
 	return ret;
 }
@@ -6717,7 +6717,7 @@
 
 	read_extent_buffer(leaf, tmp, ptr, inline_size);
 
-	max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
+	max_size = min_t(unsigned long, PAGE_SIZE, max_size);
 	ret = btrfs_decompress(compress_type, tmp, page,
 			       extent_offset, inline_size, max_size);
 	kfree(tmp);
@@ -6879,8 +6879,8 @@
 
 		size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
 		extent_offset = page_offset(page) + pg_offset - extent_start;
-		copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
-				size - extent_offset);
+		copy_size = min_t(u64, PAGE_SIZE - pg_offset,
+				  size - extent_offset);
 		em->start = extent_start + extent_offset;
 		em->len = ALIGN(copy_size, root->sectorsize);
 		em->orig_block_len = em->len;
@@ -6899,9 +6899,9 @@
 				map = kmap(page);
 				read_extent_buffer(leaf, map + pg_offset, ptr,
 						   copy_size);
-				if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
+				if (pg_offset + copy_size < PAGE_SIZE) {
 					memset(map + pg_offset + copy_size, 0,
-					       PAGE_CACHE_SIZE - pg_offset -
+					       PAGE_SIZE - pg_offset -
 					       copy_size);
 				}
 				kunmap(page);
@@ -7336,12 +7336,12 @@
 	int start_idx;
 	int end_idx;
 
-	start_idx = start >> PAGE_CACHE_SHIFT;
+	start_idx = start >> PAGE_SHIFT;
 
 	/*
 	 * end is the last byte in the last page.  end == start is legal
 	 */
-	end_idx = end >> PAGE_CACHE_SHIFT;
+	end_idx = end >> PAGE_SHIFT;
 
 	rcu_read_lock();
 
@@ -7382,7 +7382,7 @@
 		 * include/linux/pagemap.h for details.
 		 */
 		if (unlikely(page != *pagep)) {
-			page_cache_release(page);
+			put_page(page);
 			page = NULL;
 		}
 	}
@@ -7390,7 +7390,7 @@
 	if (page) {
 		if (page->index <= end_idx)
 			found = true;
-		page_cache_release(page);
+		put_page(page);
 	}
 
 	rcu_read_unlock();
@@ -8541,13 +8541,13 @@
 	return retval;
 }
 
-static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
-			       loff_t offset)
+static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file->f_mapping->host;
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_dio_data dio_data = { 0 };
+	loff_t offset = iocb->ki_pos;
 	size_t count = 0;
 	int flags = 0;
 	bool wakeup = true;
@@ -8607,7 +8607,7 @@
 
 	ret = __blockdev_direct_IO(iocb, inode,
 				   BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
-				   iter, offset, btrfs_get_blocks_direct, NULL,
+				   iter, btrfs_get_blocks_direct, NULL,
 				   btrfs_submit_direct, flags);
 	if (iov_iter_rw(iter) == WRITE) {
 		current->journal_info = NULL;
@@ -8719,7 +8719,7 @@
 	if (ret == 1) {
 		ClearPagePrivate(page);
 		set_page_private(page, 0);
-		page_cache_release(page);
+		put_page(page);
 	}
 	return ret;
 }
@@ -8739,7 +8739,7 @@
 	struct btrfs_ordered_extent *ordered;
 	struct extent_state *cached_state = NULL;
 	u64 page_start = page_offset(page);
-	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
+	u64 page_end = page_start + PAGE_SIZE - 1;
 	u64 start;
 	u64 end;
 	int inode_evicting = inode->i_state & I_FREEING;
@@ -8822,7 +8822,7 @@
 	 * 2) Not written to disk
 	 *    This means the reserved space should be freed here.
 	 */
-	btrfs_qgroup_free_data(inode, page_start, PAGE_CACHE_SIZE);
+	btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE);
 	if (!inode_evicting) {
 		clear_extent_bit(tree, page_start, page_end,
 				 EXTENT_LOCKED | EXTENT_DIRTY |
@@ -8837,7 +8837,7 @@
 	if (PagePrivate(page)) {
 		ClearPagePrivate(page);
 		set_page_private(page, 0);
-		page_cache_release(page);
+		put_page(page);
 	}
 }
 
@@ -8874,11 +8874,11 @@
 	u64 page_end;
 	u64 end;
 
-	reserved_space = PAGE_CACHE_SIZE;
+	reserved_space = PAGE_SIZE;
 
 	sb_start_pagefault(inode->i_sb);
 	page_start = page_offset(page);
-	page_end = page_start + PAGE_CACHE_SIZE - 1;
+	page_end = page_start + PAGE_SIZE - 1;
 	end = page_end;
 
 	/*
@@ -8934,15 +8934,15 @@
 		goto again;
 	}
 
-	if (page->index == ((size - 1) >> PAGE_CACHE_SHIFT)) {
+	if (page->index == ((size - 1) >> PAGE_SHIFT)) {
 		reserved_space = round_up(size - page_start, root->sectorsize);
-		if (reserved_space < PAGE_CACHE_SIZE) {
+		if (reserved_space < PAGE_SIZE) {
 			end = page_start + reserved_space - 1;
 			spin_lock(&BTRFS_I(inode)->lock);
 			BTRFS_I(inode)->outstanding_extents++;
 			spin_unlock(&BTRFS_I(inode)->lock);
 			btrfs_delalloc_release_space(inode, page_start,
-						PAGE_CACHE_SIZE - reserved_space);
+						PAGE_SIZE - reserved_space);
 		}
 	}
 
@@ -8969,14 +8969,14 @@
 	ret = 0;
 
 	/* page is wholly or partially inside EOF */
-	if (page_start + PAGE_CACHE_SIZE > size)
-		zero_start = size & ~PAGE_CACHE_MASK;
+	if (page_start + PAGE_SIZE > size)
+		zero_start = size & ~PAGE_MASK;
 	else
-		zero_start = PAGE_CACHE_SIZE;
+		zero_start = PAGE_SIZE;
 
-	if (zero_start != PAGE_CACHE_SIZE) {
+	if (zero_start != PAGE_SIZE) {
 		kaddr = kmap(page);
-		memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
+		memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start);
 		flush_dcache_page(page);
 		kunmap(page);
 	}
@@ -10160,10 +10160,10 @@
 	.symlink	= btrfs_symlink,
 	.setattr	= btrfs_setattr,
 	.mknod		= btrfs_mknod,
-	.setxattr	= btrfs_setxattr,
+	.setxattr	= generic_setxattr,
 	.getxattr	= generic_getxattr,
 	.listxattr	= btrfs_listxattr,
-	.removexattr	= btrfs_removexattr,
+	.removexattr	= generic_removexattr,
 	.permission	= btrfs_permission,
 	.get_acl	= btrfs_get_acl,
 	.set_acl	= btrfs_set_acl,
@@ -10237,10 +10237,10 @@
 static const struct inode_operations btrfs_file_inode_operations = {
 	.getattr	= btrfs_getattr,
 	.setattr	= btrfs_setattr,
-	.setxattr	= btrfs_setxattr,
+	.setxattr	= generic_setxattr,
 	.getxattr	= generic_getxattr,
 	.listxattr      = btrfs_listxattr,
-	.removexattr	= btrfs_removexattr,
+	.removexattr	= generic_removexattr,
 	.permission	= btrfs_permission,
 	.fiemap		= btrfs_fiemap,
 	.get_acl	= btrfs_get_acl,
@@ -10251,10 +10251,10 @@
 	.getattr	= btrfs_getattr,
 	.setattr	= btrfs_setattr,
 	.permission	= btrfs_permission,
-	.setxattr	= btrfs_setxattr,
+	.setxattr	= generic_setxattr,
 	.getxattr	= generic_getxattr,
 	.listxattr	= btrfs_listxattr,
-	.removexattr	= btrfs_removexattr,
+	.removexattr	= generic_removexattr,
 	.get_acl	= btrfs_get_acl,
 	.set_acl	= btrfs_set_acl,
 	.update_time	= btrfs_update_time,
@@ -10265,10 +10265,10 @@
 	.getattr	= btrfs_getattr,
 	.setattr	= btrfs_setattr,
 	.permission	= btrfs_permission,
-	.setxattr	= btrfs_setxattr,
+	.setxattr	= generic_setxattr,
 	.getxattr	= generic_getxattr,
 	.listxattr	= btrfs_listxattr,
-	.removexattr	= btrfs_removexattr,
+	.removexattr	= generic_removexattr,
 	.update_time	= btrfs_update_time,
 };
 
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 053e677..0b8ba71 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -837,9 +837,11 @@
 	struct dentry *dentry;
 	int error;
 
-	error = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT);
-	if (error == -EINTR)
-		return error;
+	inode_lock_nested(dir, I_MUTEX_PARENT);
+	// XXX: should've been
+	// mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT);
+	// if (error == -EINTR)
+	//	return error;
 
 	dentry = lookup_one_len(name, parent->dentry, namelen);
 	error = PTR_ERR(dentry);
@@ -898,7 +900,7 @@
 	u64 end;
 
 	read_lock(&em_tree->lock);
-	em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
+	em = lookup_extent_mapping(em_tree, offset, PAGE_SIZE);
 	read_unlock(&em_tree->lock);
 
 	if (em) {
@@ -988,7 +990,7 @@
 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
 	struct extent_map *em;
-	u64 len = PAGE_CACHE_SIZE;
+	u64 len = PAGE_SIZE;
 
 	/*
 	 * hopefully we have this extent in the tree already, try without
@@ -1124,15 +1126,15 @@
 	struct extent_io_tree *tree;
 	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
 
-	file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
+	file_end = (isize - 1) >> PAGE_SHIFT;
 	if (!isize || start_index > file_end)
 		return 0;
 
 	page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
 
 	ret = btrfs_delalloc_reserve_space(inode,
-			start_index << PAGE_CACHE_SHIFT,
-			page_cnt << PAGE_CACHE_SHIFT);
+			start_index << PAGE_SHIFT,
+			page_cnt << PAGE_SHIFT);
 	if (ret)
 		return ret;
 	i_done = 0;
@@ -1148,7 +1150,7 @@
 			break;
 
 		page_start = page_offset(page);
-		page_end = page_start + PAGE_CACHE_SIZE - 1;
+		page_end = page_start + PAGE_SIZE - 1;
 		while (1) {
 			lock_extent_bits(tree, page_start, page_end,
 					 &cached_state);
@@ -1169,7 +1171,7 @@
 			 */
 			if (page->mapping != inode->i_mapping) {
 				unlock_page(page);
-				page_cache_release(page);
+				put_page(page);
 				goto again;
 			}
 		}
@@ -1179,7 +1181,7 @@
 			lock_page(page);
 			if (!PageUptodate(page)) {
 				unlock_page(page);
-				page_cache_release(page);
+				put_page(page);
 				ret = -EIO;
 				break;
 			}
@@ -1187,7 +1189,7 @@
 
 		if (page->mapping != inode->i_mapping) {
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 			goto again;
 		}
 
@@ -1208,7 +1210,7 @@
 		wait_on_page_writeback(pages[i]);
 
 	page_start = page_offset(pages[0]);
-	page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE;
+	page_end = page_offset(pages[i_done - 1]) + PAGE_SIZE;
 
 	lock_extent_bits(&BTRFS_I(inode)->io_tree,
 			 page_start, page_end - 1, &cached_state);
@@ -1222,8 +1224,8 @@
 		BTRFS_I(inode)->outstanding_extents++;
 		spin_unlock(&BTRFS_I(inode)->lock);
 		btrfs_delalloc_release_space(inode,
-				start_index << PAGE_CACHE_SHIFT,
-				(page_cnt - i_done) << PAGE_CACHE_SHIFT);
+				start_index << PAGE_SHIFT,
+				(page_cnt - i_done) << PAGE_SHIFT);
 	}
 
 
@@ -1240,17 +1242,17 @@
 		set_page_extent_mapped(pages[i]);
 		set_page_dirty(pages[i]);
 		unlock_page(pages[i]);
-		page_cache_release(pages[i]);
+		put_page(pages[i]);
 	}
 	return i_done;
 out:
 	for (i = 0; i < i_done; i++) {
 		unlock_page(pages[i]);
-		page_cache_release(pages[i]);
+		put_page(pages[i]);
 	}
 	btrfs_delalloc_release_space(inode,
-			start_index << PAGE_CACHE_SHIFT,
-			page_cnt << PAGE_CACHE_SHIFT);
+			start_index << PAGE_SHIFT,
+			page_cnt << PAGE_SHIFT);
 	return ret;
 
 }
@@ -1273,7 +1275,7 @@
 	int defrag_count = 0;
 	int compress_type = BTRFS_COMPRESS_ZLIB;
 	u32 extent_thresh = range->extent_thresh;
-	unsigned long max_cluster = SZ_256K >> PAGE_CACHE_SHIFT;
+	unsigned long max_cluster = SZ_256K >> PAGE_SHIFT;
 	unsigned long cluster = max_cluster;
 	u64 new_align = ~((u64)SZ_128K - 1);
 	struct page **pages = NULL;
@@ -1317,9 +1319,9 @@
 	/* find the last page to defrag */
 	if (range->start + range->len > range->start) {
 		last_index = min_t(u64, isize - 1,
-			 range->start + range->len - 1) >> PAGE_CACHE_SHIFT;
+			 range->start + range->len - 1) >> PAGE_SHIFT;
 	} else {
-		last_index = (isize - 1) >> PAGE_CACHE_SHIFT;
+		last_index = (isize - 1) >> PAGE_SHIFT;
 	}
 
 	if (newer_than) {
@@ -1331,11 +1333,11 @@
 			 * we always align our defrag to help keep
 			 * the extents in the file evenly spaced
 			 */
-			i = (newer_off & new_align) >> PAGE_CACHE_SHIFT;
+			i = (newer_off & new_align) >> PAGE_SHIFT;
 		} else
 			goto out_ra;
 	} else {
-		i = range->start >> PAGE_CACHE_SHIFT;
+		i = range->start >> PAGE_SHIFT;
 	}
 	if (!max_to_defrag)
 		max_to_defrag = last_index - i + 1;
@@ -1348,7 +1350,7 @@
 		inode->i_mapping->writeback_index = i;
 
 	while (i <= last_index && defrag_count < max_to_defrag &&
-	       (i < DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE))) {
+	       (i < DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE))) {
 		/*
 		 * make sure we stop running if someone unmounts
 		 * the FS
@@ -1362,7 +1364,7 @@
 			break;
 		}
 
-		if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
+		if (!should_defrag_range(inode, (u64)i << PAGE_SHIFT,
 					 extent_thresh, &last_len, &skip,
 					 &defrag_end, range->flags &
 					 BTRFS_DEFRAG_RANGE_COMPRESS)) {
@@ -1371,14 +1373,14 @@
 			 * the should_defrag function tells us how much to skip
 			 * bump our counter by the suggested amount
 			 */
-			next = DIV_ROUND_UP(skip, PAGE_CACHE_SIZE);
+			next = DIV_ROUND_UP(skip, PAGE_SIZE);
 			i = max(i + 1, next);
 			continue;
 		}
 
 		if (!newer_than) {
-			cluster = (PAGE_CACHE_ALIGN(defrag_end) >>
-				   PAGE_CACHE_SHIFT) - i;
+			cluster = (PAGE_ALIGN(defrag_end) >>
+				   PAGE_SHIFT) - i;
 			cluster = min(cluster, max_cluster);
 		} else {
 			cluster = max_cluster;
@@ -1412,20 +1414,20 @@
 				i += ret;
 
 			newer_off = max(newer_off + 1,
-					(u64)i << PAGE_CACHE_SHIFT);
+					(u64)i << PAGE_SHIFT);
 
 			ret = find_new_extents(root, inode, newer_than,
 					       &newer_off, SZ_64K);
 			if (!ret) {
 				range->start = newer_off;
-				i = (newer_off & new_align) >> PAGE_CACHE_SHIFT;
+				i = (newer_off & new_align) >> PAGE_SHIFT;
 			} else {
 				break;
 			}
 		} else {
 			if (ret > 0) {
 				i += ret;
-				last_len += ret << PAGE_CACHE_SHIFT;
+				last_len += ret << PAGE_SHIFT;
 			} else {
 				i++;
 				last_len = 0;
@@ -1654,7 +1656,7 @@
 
 		src_inode = file_inode(src.file);
 		if (src_inode->i_sb != file_inode(file)->i_sb) {
-			btrfs_info(BTRFS_I(src_inode)->root->fs_info,
+			btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
 				   "Snapshot src from another FS");
 			ret = -EXDEV;
 		} else if (!inode_owner_or_capable(src_inode)) {
@@ -1722,7 +1724,7 @@
 	if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
 		readonly = true;
 	if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
-		if (vol_args->size > PAGE_CACHE_SIZE) {
+		if (vol_args->size > PAGE_SIZE) {
 			ret = -EINVAL;
 			goto free_args;
 		}
@@ -2366,9 +2368,11 @@
 		goto out;
 
 
-	err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT);
-	if (err == -EINTR)
-		goto out_drop_write;
+	inode_lock_nested(dir, I_MUTEX_PARENT);
+	// XXX: should've been
+	// err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT);
+	// if (err == -EINTR)
+	//	goto out_drop_write;
 	dentry = lookup_one_len(vol_args->name, parent, namelen);
 	if (IS_ERR(dentry)) {
 		err = PTR_ERR(dentry);
@@ -2558,7 +2562,7 @@
 	dput(dentry);
 out_unlock_dir:
 	inode_unlock(dir);
-out_drop_write:
+//out_drop_write:
 	mnt_drop_write_file(file);
 out:
 	kfree(vol_args);
@@ -2806,12 +2810,12 @@
 		lock_page(page);
 		if (!PageUptodate(page)) {
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 			return ERR_PTR(-EIO);
 		}
 		if (page->mapping != inode->i_mapping) {
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 			return ERR_PTR(-EAGAIN);
 		}
 	}
@@ -2823,7 +2827,7 @@
 			       int num_pages, u64 off)
 {
 	int i;
-	pgoff_t index = off >> PAGE_CACHE_SHIFT;
+	pgoff_t index = off >> PAGE_SHIFT;
 
 	for (i = 0; i < num_pages; i++) {
 again:
@@ -2932,12 +2936,12 @@
 		pg = cmp->src_pages[i];
 		if (pg) {
 			unlock_page(pg);
-			page_cache_release(pg);
+			put_page(pg);
 		}
 		pg = cmp->dst_pages[i];
 		if (pg) {
 			unlock_page(pg);
-			page_cache_release(pg);
+			put_page(pg);
 		}
 	}
 	kfree(cmp->src_pages);
@@ -2949,7 +2953,7 @@
 				  u64 len, struct cmp_pages *cmp)
 {
 	int ret;
-	int num_pages = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
+	int num_pages = PAGE_ALIGN(len) >> PAGE_SHIFT;
 	struct page **src_pgarr, **dst_pgarr;
 
 	/*
@@ -2987,12 +2991,12 @@
 	int ret = 0;
 	int i;
 	struct page *src_page, *dst_page;
-	unsigned int cmp_len = PAGE_CACHE_SIZE;
+	unsigned int cmp_len = PAGE_SIZE;
 	void *addr, *dst_addr;
 
 	i = 0;
 	while (len) {
-		if (len < PAGE_CACHE_SIZE)
+		if (len < PAGE_SIZE)
 			cmp_len = len;
 
 		BUG_ON(i >= cmp->num_pages);
@@ -3191,7 +3195,7 @@
 	if (olen > BTRFS_MAX_DEDUPE_LEN)
 		olen = BTRFS_MAX_DEDUPE_LEN;
 
-	if (WARN_ON_ONCE(bs < PAGE_CACHE_SIZE)) {
+	if (WARN_ON_ONCE(bs < PAGE_SIZE)) {
 		/*
 		 * Btrfs does not support blocksize < page_size. As a
 		 * result, btrfs_cmp_data() won't correctly handle
@@ -3891,8 +3895,8 @@
 	 * data immediately and not the previous data.
 	 */
 	truncate_inode_pages_range(&inode->i_data,
-				round_down(destoff, PAGE_CACHE_SIZE),
-				round_up(destoff + len, PAGE_CACHE_SIZE) - 1);
+				round_down(destoff, PAGE_SIZE),
+				round_up(destoff + len, PAGE_SIZE) - 1);
 out_unlock:
 	if (!same_inode)
 		btrfs_double_inode_unlock(src, inode);
@@ -4124,7 +4128,7 @@
 	/* we generally have at most 6 or so space infos, one for each raid
 	 * level.  So, a whole page should be more than enough for everyone
 	 */
-	if (alloc_size > PAGE_CACHE_SIZE)
+	if (alloc_size > PAGE_SIZE)
 		return -ENOMEM;
 
 	space_args.total_spaces = 0;
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index a2f0513..1adfbe7 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -55,8 +55,8 @@
 		return ERR_PTR(-ENOMEM);
 
 	workspace->mem = vmalloc(LZO1X_MEM_COMPRESS);
-	workspace->buf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE));
-	workspace->cbuf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE));
+	workspace->buf = vmalloc(lzo1x_worst_compress(PAGE_SIZE));
+	workspace->cbuf = vmalloc(lzo1x_worst_compress(PAGE_SIZE));
 	if (!workspace->mem || !workspace->buf || !workspace->cbuf)
 		goto fail;
 
@@ -116,7 +116,7 @@
 	*total_out = 0;
 	*total_in = 0;
 
-	in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
+	in_page = find_get_page(mapping, start >> PAGE_SHIFT);
 	data_in = kmap(in_page);
 
 	/*
@@ -133,10 +133,10 @@
 	tot_out = LZO_LEN;
 	pages[0] = out_page;
 	nr_pages = 1;
-	pg_bytes_left = PAGE_CACHE_SIZE - LZO_LEN;
+	pg_bytes_left = PAGE_SIZE - LZO_LEN;
 
 	/* compress at most one page of data each time */
-	in_len = min(len, PAGE_CACHE_SIZE);
+	in_len = min(len, PAGE_SIZE);
 	while (tot_in < len) {
 		ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf,
 				       &out_len, workspace->mem);
@@ -201,7 +201,7 @@
 				cpage_out = kmap(out_page);
 				pages[nr_pages++] = out_page;
 
-				pg_bytes_left = PAGE_CACHE_SIZE;
+				pg_bytes_left = PAGE_SIZE;
 				out_offset = 0;
 			}
 		}
@@ -221,12 +221,12 @@
 
 		bytes_left = len - tot_in;
 		kunmap(in_page);
-		page_cache_release(in_page);
+		put_page(in_page);
 
-		start += PAGE_CACHE_SIZE;
-		in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
+		start += PAGE_SIZE;
+		in_page = find_get_page(mapping, start >> PAGE_SHIFT);
 		data_in = kmap(in_page);
-		in_len = min(bytes_left, PAGE_CACHE_SIZE);
+		in_len = min(bytes_left, PAGE_SIZE);
 	}
 
 	if (tot_out > tot_in)
@@ -248,7 +248,7 @@
 
 	if (in_page) {
 		kunmap(in_page);
-		page_cache_release(in_page);
+		put_page(in_page);
 	}
 
 	return ret;
@@ -266,7 +266,7 @@
 	char *data_in;
 	unsigned long page_in_index = 0;
 	unsigned long page_out_index = 0;
-	unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_CACHE_SIZE);
+	unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
 	unsigned long buf_start;
 	unsigned long buf_offset = 0;
 	unsigned long bytes;
@@ -289,7 +289,7 @@
 	tot_in = LZO_LEN;
 	in_offset = LZO_LEN;
 	tot_len = min_t(size_t, srclen, tot_len);
-	in_page_bytes_left = PAGE_CACHE_SIZE - LZO_LEN;
+	in_page_bytes_left = PAGE_SIZE - LZO_LEN;
 
 	tot_out = 0;
 	pg_offset = 0;
@@ -345,12 +345,12 @@
 
 				data_in = kmap(pages_in[++page_in_index]);
 
-				in_page_bytes_left = PAGE_CACHE_SIZE;
+				in_page_bytes_left = PAGE_SIZE;
 				in_offset = 0;
 			}
 		}
 
-		out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE);
+		out_len = lzo1x_worst_compress(PAGE_SIZE);
 		ret = lzo1x_decompress_safe(buf, in_len, workspace->buf,
 					    &out_len);
 		if (need_unmap)
@@ -399,7 +399,7 @@
 	in_len = read_compress_length(data_in);
 	data_in += LZO_LEN;
 
-	out_len = PAGE_CACHE_SIZE;
+	out_len = PAGE_SIZE;
 	ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
 	if (ret != LZO_E_OK) {
 		printk(KERN_WARNING "BTRFS: decompress failed!\n");
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 5279fda..9e11955 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1463,6 +1463,7 @@
 	u64 bytenr = record->bytenr;
 
 	assert_spin_locked(&delayed_refs->lock);
+	trace_btrfs_qgroup_insert_dirty_extent(record);
 
 	while (*p) {
 		parent_node = *p;
@@ -1594,6 +1595,9 @@
 		cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
 		cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
 
+		trace_qgroup_update_counters(qg->qgroupid, cur_old_count,
+					     cur_new_count);
+
 		/* Rfer update part */
 		if (cur_old_count == 0 && cur_new_count > 0) {
 			qg->rfer += num_bytes;
@@ -1683,6 +1687,9 @@
 		goto out_free;
 	BUG_ON(!fs_info->quota_root);
 
+	trace_btrfs_qgroup_account_extent(bytenr, num_bytes, nr_old_roots,
+					  nr_new_roots);
+
 	qgroups = ulist_alloc(GFP_NOFS);
 	if (!qgroups) {
 		ret = -ENOMEM;
@@ -1752,6 +1759,8 @@
 		record = rb_entry(node, struct btrfs_qgroup_extent_record,
 				  node);
 
+		trace_btrfs_qgroup_account_extents(record);
+
 		if (!ret) {
 			/*
 			 * Use (u64)-1 as time_seq to do special search, which
@@ -1842,8 +1851,10 @@
 }
 
 /*
- * copy the acounting information between qgroups. This is necessary when a
- * snapshot or a subvolume is created
+ * Copy the acounting information between qgroups. This is necessary
+ * when a snapshot or a subvolume is created. Throwing an error will
+ * cause a transaction abort so we take extra care here to only error
+ * when a readonly fs is a reasonable outcome.
  */
 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
 			 struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
@@ -1873,15 +1884,15 @@
 		       2 * inherit->num_excl_copies;
 		for (i = 0; i < nums; ++i) {
 			srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
-			if (!srcgroup) {
-				ret = -EINVAL;
-				goto out;
-			}
 
-			if ((srcgroup->qgroupid >> 48) <= (objectid >> 48)) {
-				ret = -EINVAL;
-				goto out;
-			}
+			/*
+			 * Zero out invalid groups so we can ignore
+			 * them later.
+			 */
+			if (!srcgroup ||
+			    ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
+				*i_qgroups = 0ULL;
+
 			++i_qgroups;
 		}
 	}
@@ -1916,17 +1927,19 @@
 	 */
 	if (inherit) {
 		i_qgroups = (u64 *)(inherit + 1);
-		for (i = 0; i < inherit->num_qgroups; ++i) {
+		for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
+			if (*i_qgroups == 0)
+				continue;
 			ret = add_qgroup_relation_item(trans, quota_root,
 						       objectid, *i_qgroups);
-			if (ret)
+			if (ret && ret != -EEXIST)
 				goto out;
 			ret = add_qgroup_relation_item(trans, quota_root,
 						       *i_qgroups, objectid);
-			if (ret)
+			if (ret && ret != -EEXIST)
 				goto out;
-			++i_qgroups;
 		}
+		ret = 0;
 	}
 
 
@@ -1987,17 +2000,22 @@
 
 	i_qgroups = (u64 *)(inherit + 1);
 	for (i = 0; i < inherit->num_qgroups; ++i) {
-		ret = add_relation_rb(quota_root->fs_info, objectid,
-				      *i_qgroups);
-		if (ret)
-			goto unlock;
+		if (*i_qgroups) {
+			ret = add_relation_rb(quota_root->fs_info, objectid,
+					      *i_qgroups);
+			if (ret)
+				goto unlock;
+		}
 		++i_qgroups;
 	}
 
-	for (i = 0; i <  inherit->num_ref_copies; ++i) {
+	for (i = 0; i <  inherit->num_ref_copies; ++i, i_qgroups += 2) {
 		struct btrfs_qgroup *src;
 		struct btrfs_qgroup *dst;
 
+		if (!i_qgroups[0] || !i_qgroups[1])
+			continue;
+
 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
 
@@ -2008,12 +2026,14 @@
 
 		dst->rfer = src->rfer - level_size;
 		dst->rfer_cmpr = src->rfer_cmpr - level_size;
-		i_qgroups += 2;
 	}
-	for (i = 0; i <  inherit->num_excl_copies; ++i) {
+	for (i = 0; i <  inherit->num_excl_copies; ++i, i_qgroups += 2) {
 		struct btrfs_qgroup *src;
 		struct btrfs_qgroup *dst;
 
+		if (!i_qgroups[0] || !i_qgroups[1])
+			continue;
+
 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
 
@@ -2024,7 +2044,6 @@
 
 		dst->excl = src->excl + level_size;
 		dst->excl_cmpr = src->excl_cmpr + level_size;
-		i_qgroups += 2;
 	}
 
 unlock:
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 5516136..0b7792e 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -270,7 +270,7 @@
 		s = kmap(rbio->bio_pages[i]);
 		d = kmap(rbio->stripe_pages[i]);
 
-		memcpy(d, s, PAGE_CACHE_SIZE);
+		memcpy(d, s, PAGE_SIZE);
 
 		kunmap(rbio->bio_pages[i]);
 		kunmap(rbio->stripe_pages[i]);
@@ -962,7 +962,7 @@
  */
 static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
 {
-	return DIV_ROUND_UP(stripe_len, PAGE_CACHE_SIZE) * nr_stripes;
+	return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
 }
 
 /*
@@ -1078,7 +1078,7 @@
 	u64 disk_start;
 
 	stripe = &rbio->bbio->stripes[stripe_nr];
-	disk_start = stripe->physical + (page_index << PAGE_CACHE_SHIFT);
+	disk_start = stripe->physical + (page_index << PAGE_SHIFT);
 
 	/* if the device is missing, just fail this stripe */
 	if (!stripe->dev->bdev)
@@ -1096,8 +1096,8 @@
 		if (last_end == disk_start && stripe->dev->bdev &&
 		    !last->bi_error &&
 		    last->bi_bdev == stripe->dev->bdev) {
-			ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0);
-			if (ret == PAGE_CACHE_SIZE)
+			ret = bio_add_page(last, page, PAGE_SIZE, 0);
+			if (ret == PAGE_SIZE)
 				return 0;
 		}
 	}
@@ -1111,7 +1111,7 @@
 	bio->bi_bdev = stripe->dev->bdev;
 	bio->bi_iter.bi_sector = disk_start >> 9;
 
-	bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
+	bio_add_page(bio, page, PAGE_SIZE, 0);
 	bio_list_add(bio_list, bio);
 	return 0;
 }
@@ -1154,7 +1154,7 @@
 	bio_list_for_each(bio, &rbio->bio_list) {
 		start = (u64)bio->bi_iter.bi_sector << 9;
 		stripe_offset = start - rbio->bbio->raid_map[0];
-		page_index = stripe_offset >> PAGE_CACHE_SHIFT;
+		page_index = stripe_offset >> PAGE_SHIFT;
 
 		for (i = 0; i < bio->bi_vcnt; i++) {
 			p = bio->bi_io_vec[i].bv_page;
@@ -1253,7 +1253,7 @@
 		} else {
 			/* raid5 */
 			memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
-			run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE);
+			run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
 		}
 
 
@@ -1914,7 +1914,7 @@
 			/* Copy parity block into failed block to start with */
 			memcpy(pointers[faila],
 			       pointers[rbio->nr_data],
-			       PAGE_CACHE_SIZE);
+			       PAGE_SIZE);
 
 			/* rearrange the pointer array */
 			p = pointers[faila];
@@ -1923,7 +1923,7 @@
 			pointers[rbio->nr_data - 1] = p;
 
 			/* xor in the rest */
-			run_xor(pointers, rbio->nr_data - 1, PAGE_CACHE_SIZE);
+			run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
 		}
 		/* if we're doing this rebuild as part of an rmw, go through
 		 * and set all of our private rbio pages in the
@@ -2250,7 +2250,7 @@
 	ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
 				rbio->stripe_len * rbio->nr_data);
 	stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
-	index = stripe_offset >> PAGE_CACHE_SHIFT;
+	index = stripe_offset >> PAGE_SHIFT;
 	rbio->bio_pages[index] = page;
 }
 
@@ -2365,14 +2365,14 @@
 		} else {
 			/* raid5 */
 			memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
-			run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE);
+			run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
 		}
 
 		/* Check scrubbing pairty and repair it */
 		p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
 		parity = kmap(p);
-		if (memcmp(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE))
-			memcpy(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE);
+		if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
+			memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE);
 		else
 			/* Parity is right, needn't writeback */
 			bitmap_clear(rbio->dbitmap, pagenr, 1);
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index b8929149..298631ea 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -226,7 +226,7 @@
 	/* find extent */
 	spin_lock(&fs_info->reada_lock);
 	re = radix_tree_lookup(&fs_info->reada_tree,
-			       start >> PAGE_CACHE_SHIFT);
+			       start >> PAGE_SHIFT);
 	if (re)
 		re->refcnt++;
 	spin_unlock(&fs_info->reada_lock);
@@ -257,7 +257,7 @@
 	zone = NULL;
 	spin_lock(&fs_info->reada_lock);
 	ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
-				     logical >> PAGE_CACHE_SHIFT, 1);
+				     logical >> PAGE_SHIFT, 1);
 	if (ret == 1 && logical >= zone->start && logical <= zone->end) {
 		kref_get(&zone->refcnt);
 		spin_unlock(&fs_info->reada_lock);
@@ -294,13 +294,13 @@
 
 	spin_lock(&fs_info->reada_lock);
 	ret = radix_tree_insert(&dev->reada_zones,
-				(unsigned long)(zone->end >> PAGE_CACHE_SHIFT),
+				(unsigned long)(zone->end >> PAGE_SHIFT),
 				zone);
 
 	if (ret == -EEXIST) {
 		kfree(zone);
 		ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
-					     logical >> PAGE_CACHE_SHIFT, 1);
+					     logical >> PAGE_SHIFT, 1);
 		if (ret == 1 && logical >= zone->start && logical <= zone->end)
 			kref_get(&zone->refcnt);
 		else
@@ -326,7 +326,7 @@
 	u64 length;
 	int real_stripes;
 	int nzones = 0;
-	unsigned long index = logical >> PAGE_CACHE_SHIFT;
+	unsigned long index = logical >> PAGE_SHIFT;
 	int dev_replace_is_ongoing;
 	int have_zone = 0;
 
@@ -495,7 +495,7 @@
 			     struct reada_extent *re)
 {
 	int i;
-	unsigned long index = re->logical >> PAGE_CACHE_SHIFT;
+	unsigned long index = re->logical >> PAGE_SHIFT;
 
 	spin_lock(&fs_info->reada_lock);
 	if (--re->refcnt) {
@@ -538,7 +538,7 @@
 	struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
 
 	radix_tree_delete(&zone->device->reada_zones,
-			  zone->end >> PAGE_CACHE_SHIFT);
+			  zone->end >> PAGE_SHIFT);
 
 	kfree(zone);
 }
@@ -587,7 +587,7 @@
 static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
 {
 	int i;
-	unsigned long index = zone->end >> PAGE_CACHE_SHIFT;
+	unsigned long index = zone->end >> PAGE_SHIFT;
 
 	for (i = 0; i < zone->ndevs; ++i) {
 		struct reada_zone *peer;
@@ -622,7 +622,7 @@
 					     (void **)&zone, index, 1);
 		if (ret == 0)
 			break;
-		index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
+		index = (zone->end >> PAGE_SHIFT) + 1;
 		if (zone->locked) {
 			if (zone->elems > top_locked_elems) {
 				top_locked_elems = zone->elems;
@@ -673,7 +673,7 @@
 	 * plugging to speed things up
 	 */
 	ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
-				     dev->reada_next >> PAGE_CACHE_SHIFT, 1);
+				     dev->reada_next >> PAGE_SHIFT, 1);
 	if (ret == 0 || re->logical > dev->reada_curr_zone->end) {
 		ret = reada_pick_zone(dev);
 		if (!ret) {
@@ -682,7 +682,7 @@
 		}
 		re = NULL;
 		ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
-					dev->reada_next >> PAGE_CACHE_SHIFT, 1);
+					dev->reada_next >> PAGE_SHIFT, 1);
 	}
 	if (ret == 0) {
 		spin_unlock(&fs_info->reada_lock);
@@ -838,7 +838,7 @@
 				printk(KERN_CONT " curr off %llu",
 					device->reada_next - zone->start);
 			printk(KERN_CONT "\n");
-			index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
+			index = (zone->end >> PAGE_SHIFT) + 1;
 		}
 		cnt = 0;
 		index = 0;
@@ -864,7 +864,7 @@
 				}
 			}
 			printk(KERN_CONT "\n");
-			index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
+			index = (re->logical >> PAGE_SHIFT) + 1;
 			if (++cnt > 15)
 				break;
 		}
@@ -880,7 +880,7 @@
 		if (ret == 0)
 			break;
 		if (!re->scheduled) {
-			index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
+			index = (re->logical >> PAGE_SHIFT) + 1;
 			continue;
 		}
 		printk(KERN_DEBUG
@@ -897,7 +897,7 @@
 			}
 		}
 		printk(KERN_CONT "\n");
-		index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
+		index = (re->logical >> PAGE_SHIFT) + 1;
 	}
 	spin_unlock(&fs_info->reada_lock);
 }
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 2bd0011..08ef890 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1850,6 +1850,7 @@
 			eb = read_tree_block(dest, old_bytenr, old_ptr_gen);
 			if (IS_ERR(eb)) {
 				ret = PTR_ERR(eb);
+				break;
 			} else if (!extent_buffer_uptodate(eb)) {
 				ret = -EIO;
 				free_extent_buffer(eb);
@@ -3129,10 +3130,10 @@
 	if (ret)
 		goto out;
 
-	index = (cluster->start - offset) >> PAGE_CACHE_SHIFT;
-	last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT;
+	index = (cluster->start - offset) >> PAGE_SHIFT;
+	last_index = (cluster->end - offset) >> PAGE_SHIFT;
 	while (index <= last_index) {
-		ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE);
+		ret = btrfs_delalloc_reserve_metadata(inode, PAGE_SIZE);
 		if (ret)
 			goto out;
 
@@ -3145,7 +3146,7 @@
 						   mask);
 			if (!page) {
 				btrfs_delalloc_release_metadata(inode,
-							PAGE_CACHE_SIZE);
+							PAGE_SIZE);
 				ret = -ENOMEM;
 				goto out;
 			}
@@ -3162,16 +3163,16 @@
 			lock_page(page);
 			if (!PageUptodate(page)) {
 				unlock_page(page);
-				page_cache_release(page);
+				put_page(page);
 				btrfs_delalloc_release_metadata(inode,
-							PAGE_CACHE_SIZE);
+							PAGE_SIZE);
 				ret = -EIO;
 				goto out;
 			}
 		}
 
 		page_start = page_offset(page);
-		page_end = page_start + PAGE_CACHE_SIZE - 1;
+		page_end = page_start + PAGE_SIZE - 1;
 
 		lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
 
@@ -3191,7 +3192,7 @@
 		unlock_extent(&BTRFS_I(inode)->io_tree,
 			      page_start, page_end);
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 
 		index++;
 		balance_dirty_pages_ratelimited(inode->i_mapping);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 39dbdcb..4678f03 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -703,7 +703,7 @@
 	if (IS_ERR(inode))
 		return PTR_ERR(inode);
 
-	index = offset >> PAGE_CACHE_SHIFT;
+	index = offset >> PAGE_SHIFT;
 
 	page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
 	if (!page) {
@@ -1636,7 +1636,7 @@
 	if (spage->io_error) {
 		void *mapped_buffer = kmap_atomic(spage->page);
 
-		memset(mapped_buffer, 0, PAGE_CACHE_SIZE);
+		memset(mapped_buffer, 0, PAGE_SIZE);
 		flush_dcache_page(spage->page);
 		kunmap_atomic(mapped_buffer);
 	}
@@ -4294,8 +4294,8 @@
 		goto out;
 	}
 
-	while (len >= PAGE_CACHE_SIZE) {
-		index = offset >> PAGE_CACHE_SHIFT;
+	while (len >= PAGE_SIZE) {
+		index = offset >> PAGE_SHIFT;
 again:
 		page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
 		if (!page) {
@@ -4326,7 +4326,7 @@
 			 */
 			if (page->mapping != inode->i_mapping) {
 				unlock_page(page);
-				page_cache_release(page);
+				put_page(page);
 				goto again;
 			}
 			if (!PageUptodate(page)) {
@@ -4348,15 +4348,15 @@
 			ret = err;
 next_page:
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 
 		if (ret)
 			break;
 
-		offset += PAGE_CACHE_SIZE;
-		physical_for_dev_replace += PAGE_CACHE_SIZE;
-		nocow_ctx_logical += PAGE_CACHE_SIZE;
-		len -= PAGE_CACHE_SIZE;
+		offset += PAGE_SIZE;
+		physical_for_dev_replace += PAGE_SIZE;
+		nocow_ctx_logical += PAGE_SIZE;
+		len -= PAGE_SIZE;
 	}
 	ret = COPY_COMPLETE;
 out:
@@ -4390,8 +4390,8 @@
 	bio->bi_iter.bi_size = 0;
 	bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
 	bio->bi_bdev = dev->bdev;
-	ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
-	if (ret != PAGE_CACHE_SIZE) {
+	ret = bio_add_page(bio, page, PAGE_SIZE, 0);
+	if (ret != PAGE_SIZE) {
 leave_with_eio:
 		bio_put(bio);
 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 19b7bf4..8d358c5 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -4449,9 +4449,9 @@
 	struct page *page;
 	char *addr;
 	struct btrfs_key key;
-	pgoff_t index = offset >> PAGE_CACHE_SHIFT;
+	pgoff_t index = offset >> PAGE_SHIFT;
 	pgoff_t last_index;
-	unsigned pg_offset = offset & ~PAGE_CACHE_MASK;
+	unsigned pg_offset = offset & ~PAGE_MASK;
 	ssize_t ret = 0;
 
 	key.objectid = sctx->cur_ino;
@@ -4471,7 +4471,7 @@
 	if (len == 0)
 		goto out;
 
-	last_index = (offset + len - 1) >> PAGE_CACHE_SHIFT;
+	last_index = (offset + len - 1) >> PAGE_SHIFT;
 
 	/* initial readahead */
 	memset(&sctx->ra, 0, sizeof(struct file_ra_state));
@@ -4481,7 +4481,7 @@
 
 	while (index <= last_index) {
 		unsigned cur_len = min_t(unsigned, len,
-					 PAGE_CACHE_SIZE - pg_offset);
+					 PAGE_SIZE - pg_offset);
 		page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
 		if (!page) {
 			ret = -ENOMEM;
@@ -4493,7 +4493,7 @@
 			lock_page(page);
 			if (!PageUptodate(page)) {
 				unlock_page(page);
-				page_cache_release(page);
+				put_page(page);
 				ret = -EIO;
 				break;
 			}
@@ -4503,7 +4503,7 @@
 		memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len);
 		kunmap(page);
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		index++;
 		pg_offset = 0;
 		len -= cur_len;
@@ -4804,7 +4804,7 @@
 		type = btrfs_file_extent_type(leaf, ei);
 		if (type == BTRFS_FILE_EXTENT_INLINE) {
 			ext_len = btrfs_file_extent_inline_len(leaf, slot, ei);
-			ext_len = PAGE_CACHE_ALIGN(ext_len);
+			ext_len = PAGE_ALIGN(ext_len);
 		} else {
 			ext_len = btrfs_file_extent_num_bytes(leaf, ei);
 		}
@@ -4886,7 +4886,7 @@
 		 * but there may be items after this page.  Make
 		 * sure to send the whole thing
 		 */
-		len = PAGE_CACHE_ALIGN(len);
+		len = PAGE_ALIGN(len);
 	} else {
 		len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
 	}
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index b976597..e05619f 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -66,7 +66,7 @@
 									\
 	if (token && token->kaddr && token->offset <= offset &&		\
 	    token->eb == eb &&						\
-	   (token->offset + PAGE_CACHE_SIZE >= offset + size)) {	\
+	   (token->offset + PAGE_SIZE >= offset + size)) {	\
 		kaddr = token->kaddr;					\
 		p = kaddr + part_offset - token->offset;		\
 		res = get_unaligned_le##bits(p + off);			\
@@ -104,7 +104,7 @@
 									\
 	if (token && token->kaddr && token->offset <= offset &&		\
 	    token->eb == eb &&						\
-	   (token->offset + PAGE_CACHE_SIZE >= offset + size)) {	\
+	   (token->offset + PAGE_SIZE >= offset + size)) {	\
 		kaddr = token->kaddr;					\
 		p = kaddr + part_offset - token->offset;		\
 		put_unaligned_le##bits(val, p + off);			\
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index 669b582..70948b1 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -32,8 +32,8 @@
 {
 	int ret;
 	struct page *pages[16];
-	unsigned long index = start >> PAGE_CACHE_SHIFT;
-	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+	unsigned long index = start >> PAGE_SHIFT;
+	unsigned long end_index = end >> PAGE_SHIFT;
 	unsigned long nr_pages = end_index - index + 1;
 	int i;
 	int count = 0;
@@ -49,9 +49,9 @@
 				count++;
 			if (flags & PROCESS_UNLOCK && PageLocked(pages[i]))
 				unlock_page(pages[i]);
-			page_cache_release(pages[i]);
+			put_page(pages[i]);
 			if (flags & PROCESS_RELEASE)
-				page_cache_release(pages[i]);
+				put_page(pages[i]);
 		}
 		nr_pages -= ret;
 		index += ret;
@@ -93,7 +93,7 @@
 	 * everything to make sure our pages don't get evicted and screw up our
 	 * test.
 	 */
-	for (index = 0; index < (total_dirty >> PAGE_CACHE_SHIFT); index++) {
+	for (index = 0; index < (total_dirty >> PAGE_SHIFT); index++) {
 		page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
 		if (!page) {
 			test_msg("Failed to allocate test page\n");
@@ -104,7 +104,7 @@
 		if (index) {
 			unlock_page(page);
 		} else {
-			page_cache_get(page);
+			get_page(page);
 			locked_page = page;
 		}
 	}
@@ -129,7 +129,7 @@
 	}
 	unlock_extent(&tmp, start, end);
 	unlock_page(locked_page);
-	page_cache_release(locked_page);
+	put_page(locked_page);
 
 	/*
 	 * Test this scenario
@@ -139,7 +139,7 @@
 	 */
 	test_start = SZ_64M;
 	locked_page = find_lock_page(inode->i_mapping,
-				     test_start >> PAGE_CACHE_SHIFT);
+				     test_start >> PAGE_SHIFT);
 	if (!locked_page) {
 		test_msg("Couldn't find the locked page\n");
 		goto out_bits;
@@ -165,7 +165,7 @@
 	}
 	unlock_extent(&tmp, start, end);
 	/* locked_page was unlocked above */
-	page_cache_release(locked_page);
+	put_page(locked_page);
 
 	/*
 	 * Test this scenario
@@ -174,7 +174,7 @@
 	 */
 	test_start = max_bytes + 4096;
 	locked_page = find_lock_page(inode->i_mapping, test_start >>
-				     PAGE_CACHE_SHIFT);
+				     PAGE_SHIFT);
 	if (!locked_page) {
 		test_msg("Could'nt find the locked page\n");
 		goto out_bits;
@@ -225,13 +225,13 @@
 	 * range we want to find.
 	 */
 	page = find_get_page(inode->i_mapping,
-			     (max_bytes + SZ_1M) >> PAGE_CACHE_SHIFT);
+			     (max_bytes + SZ_1M) >> PAGE_SHIFT);
 	if (!page) {
 		test_msg("Couldn't find our page\n");
 		goto out_bits;
 	}
 	ClearPageDirty(page);
-	page_cache_release(page);
+	put_page(page);
 
 	/* We unlocked it in the previous test */
 	lock_page(locked_page);
@@ -239,7 +239,7 @@
 	end = 0;
 	/*
 	 * Currently if we fail to find dirty pages in the delalloc range we
-	 * will adjust max_bytes down to PAGE_CACHE_SIZE and then re-search.  If
+	 * will adjust max_bytes down to PAGE_SIZE and then re-search.  If
 	 * this changes at any point in the future we will need to fix this
 	 * tests expected behavior.
 	 */
@@ -249,9 +249,9 @@
 		test_msg("Didn't find our range\n");
 		goto out_bits;
 	}
-	if (start != test_start && end != test_start + PAGE_CACHE_SIZE - 1) {
+	if (start != test_start && end != test_start + PAGE_SIZE - 1) {
 		test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n",
-			 test_start, test_start + PAGE_CACHE_SIZE - 1, start,
+			 test_start, test_start + PAGE_SIZE - 1, start,
 			 end);
 		goto out_bits;
 	}
@@ -265,7 +265,7 @@
 	clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_KERNEL);
 out:
 	if (locked_page)
-		page_cache_release(locked_page);
+		put_page(locked_page);
 	process_page_range(inode, 0, total_dirty - 1,
 			   PROCESS_UNLOCK | PROCESS_RELEASE);
 	iput(inode);
@@ -298,9 +298,9 @@
 		return -EINVAL;
 	}
 
-	bitmap_set(bitmap, (PAGE_CACHE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
+	bitmap_set(bitmap, (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
 		   sizeof(long) * BITS_PER_BYTE);
-	extent_buffer_bitmap_set(eb, PAGE_CACHE_SIZE - sizeof(long) / 2, 0,
+	extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
 				 sizeof(long) * BITS_PER_BYTE);
 	if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
 		test_msg("Setting straddling pages failed\n");
@@ -309,10 +309,10 @@
 
 	bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
 	bitmap_clear(bitmap,
-		     (PAGE_CACHE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
+		     (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
 		     sizeof(long) * BITS_PER_BYTE);
 	extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
-	extent_buffer_bitmap_clear(eb, PAGE_CACHE_SIZE - sizeof(long) / 2, 0,
+	extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
 				   sizeof(long) * BITS_PER_BYTE);
 	if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
 		test_msg("Clearing straddling pages failed\n");
@@ -353,7 +353,7 @@
 
 static int test_eb_bitmaps(void)
 {
-	unsigned long len = PAGE_CACHE_SIZE * 4;
+	unsigned long len = PAGE_SIZE * 4;
 	unsigned long *bitmap;
 	struct extent_buffer *eb;
 	int ret;
@@ -379,7 +379,7 @@
 
 	/* Do it over again with an extent buffer which isn't page-aligned. */
 	free_extent_buffer(eb);
-	eb = __alloc_dummy_extent_buffer(NULL, PAGE_CACHE_SIZE / 2, len);
+	eb = __alloc_dummy_extent_buffer(NULL, PAGE_SIZE / 2, len);
 	if (!eb) {
 		test_msg("Couldn't allocate test extent buffer\n");
 		kfree(bitmap);
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
index c9ad97b..5142475 100644
--- a/fs/btrfs/tests/free-space-tests.c
+++ b/fs/btrfs/tests/free-space-tests.c
@@ -22,7 +22,7 @@
 #include "../disk-io.h"
 #include "../free-space-cache.h"
 
-#define BITS_PER_BITMAP		(PAGE_CACHE_SIZE * 8)
+#define BITS_PER_BITMAP		(PAGE_SIZE * 8)
 
 /*
  * This test just does basic sanity checking, making sure we can add an exten
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 24d03c7..e692eea 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4415,6 +4415,127 @@
 	return ret;
 }
 
+/*
+ * When we are logging a new inode X, check if it doesn't have a reference that
+ * matches the reference from some other inode Y created in a past transaction
+ * and that was renamed in the current transaction. If we don't do this, then at
+ * log replay time we can lose inode Y (and all its files if it's a directory):
+ *
+ * mkdir /mnt/x
+ * echo "hello world" > /mnt/x/foobar
+ * sync
+ * mv /mnt/x /mnt/y
+ * mkdir /mnt/x                 # or touch /mnt/x
+ * xfs_io -c fsync /mnt/x
+ * <power fail>
+ * mount fs, trigger log replay
+ *
+ * After the log replay procedure, we would lose the first directory and all its
+ * files (file foobar).
+ * For the case where inode Y is not a directory we simply end up losing it:
+ *
+ * echo "123" > /mnt/foo
+ * sync
+ * mv /mnt/foo /mnt/bar
+ * echo "abc" > /mnt/foo
+ * xfs_io -c fsync /mnt/foo
+ * <power fail>
+ *
+ * We also need this for cases where a snapshot entry is replaced by some other
+ * entry (file or directory) otherwise we end up with an unreplayable log due to
+ * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
+ * if it were a regular entry:
+ *
+ * mkdir /mnt/x
+ * btrfs subvolume snapshot /mnt /mnt/x/snap
+ * btrfs subvolume delete /mnt/x/snap
+ * rmdir /mnt/x
+ * mkdir /mnt/x
+ * fsync /mnt/x or fsync some new file inside it
+ * <power fail>
+ *
+ * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
+ * the same transaction.
+ */
+static int btrfs_check_ref_name_override(struct extent_buffer *eb,
+					 const int slot,
+					 const struct btrfs_key *key,
+					 struct inode *inode)
+{
+	int ret;
+	struct btrfs_path *search_path;
+	char *name = NULL;
+	u32 name_len = 0;
+	u32 item_size = btrfs_item_size_nr(eb, slot);
+	u32 cur_offset = 0;
+	unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
+
+	search_path = btrfs_alloc_path();
+	if (!search_path)
+		return -ENOMEM;
+	search_path->search_commit_root = 1;
+	search_path->skip_locking = 1;
+
+	while (cur_offset < item_size) {
+		u64 parent;
+		u32 this_name_len;
+		u32 this_len;
+		unsigned long name_ptr;
+		struct btrfs_dir_item *di;
+
+		if (key->type == BTRFS_INODE_REF_KEY) {
+			struct btrfs_inode_ref *iref;
+
+			iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
+			parent = key->offset;
+			this_name_len = btrfs_inode_ref_name_len(eb, iref);
+			name_ptr = (unsigned long)(iref + 1);
+			this_len = sizeof(*iref) + this_name_len;
+		} else {
+			struct btrfs_inode_extref *extref;
+
+			extref = (struct btrfs_inode_extref *)(ptr +
+							       cur_offset);
+			parent = btrfs_inode_extref_parent(eb, extref);
+			this_name_len = btrfs_inode_extref_name_len(eb, extref);
+			name_ptr = (unsigned long)&extref->name;
+			this_len = sizeof(*extref) + this_name_len;
+		}
+
+		if (this_name_len > name_len) {
+			char *new_name;
+
+			new_name = krealloc(name, this_name_len, GFP_NOFS);
+			if (!new_name) {
+				ret = -ENOMEM;
+				goto out;
+			}
+			name_len = this_name_len;
+			name = new_name;
+		}
+
+		read_extent_buffer(eb, name, name_ptr, this_name_len);
+		di = btrfs_lookup_dir_item(NULL, BTRFS_I(inode)->root,
+					   search_path, parent,
+					   name, this_name_len, 0);
+		if (di && !IS_ERR(di)) {
+			ret = 1;
+			goto out;
+		} else if (IS_ERR(di)) {
+			ret = PTR_ERR(di);
+			goto out;
+		}
+		btrfs_release_path(search_path);
+
+		cur_offset += this_len;
+	}
+	ret = 0;
+out:
+	btrfs_free_path(search_path);
+	kfree(name);
+	return ret;
+}
+
 /* log a single inode in the tree log.
  * At least one parent directory for this inode must exist in the tree
  * or be logged already.
@@ -4602,6 +4723,22 @@
 		if (min_key.type == BTRFS_INODE_ITEM_KEY)
 			need_log_inode_item = false;
 
+		if ((min_key.type == BTRFS_INODE_REF_KEY ||
+		     min_key.type == BTRFS_INODE_EXTREF_KEY) &&
+		    BTRFS_I(inode)->generation == trans->transid) {
+			ret = btrfs_check_ref_name_override(path->nodes[0],
+							    path->slots[0],
+							    &min_key, inode);
+			if (ret < 0) {
+				err = ret;
+				goto out_unlock;
+			} else if (ret > 0) {
+				err = 1;
+				btrfs_set_log_full_commit(root->fs_info, trans);
+				goto out_unlock;
+			}
+		}
+
 		/* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
 		if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
 			if (ins_nr == 0)
@@ -4851,7 +4988,7 @@
 			goto out;
 
 	if (!S_ISDIR(inode->i_mode)) {
-		if (!parent || d_really_is_negative(parent) || sb != d_inode(parent)->i_sb)
+		if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
 			goto out;
 		inode = d_inode(parent);
 	}
@@ -4872,7 +5009,7 @@
 			break;
 		}
 
-		if (!parent || d_really_is_negative(parent) || sb != d_inode(parent)->i_sb)
+		if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
 			break;
 
 		if (IS_ROOT(parent))
@@ -5285,7 +5422,7 @@
 	}
 
 	while (1) {
-		if (!parent || d_really_is_negative(parent) || sb != d_inode(parent)->i_sb)
+		if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
 			break;
 
 		inode = d_inode(parent);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index e2b54d5..bd0f45f 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1025,16 +1025,16 @@
 	}
 
 	/* make sure our super fits in the device */
-	if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode))
+	if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
 		goto error_bdev_put;
 
 	/* make sure our super fits in the page */
-	if (sizeof(*disk_super) > PAGE_CACHE_SIZE)
+	if (sizeof(*disk_super) > PAGE_SIZE)
 		goto error_bdev_put;
 
 	/* make sure our super doesn't straddle pages on disk */
-	index = bytenr >> PAGE_CACHE_SHIFT;
-	if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index)
+	index = bytenr >> PAGE_SHIFT;
+	if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
 		goto error_bdev_put;
 
 	/* pull in the page with our super */
@@ -1047,7 +1047,7 @@
 	p = kmap(page);
 
 	/* align our pointer to the offset of the super block */
-	disk_super = p + (bytenr & ~PAGE_CACHE_MASK);
+	disk_super = p + (bytenr & ~PAGE_MASK);
 
 	if (btrfs_super_bytenr(disk_super) != bytenr ||
 	    btrfs_super_magic(disk_super) != BTRFS_MAGIC)
@@ -1075,7 +1075,7 @@
 
 error_unmap:
 	kunmap(page);
-	page_cache_release(page);
+	put_page(page);
 
 error_bdev_put:
 	blkdev_put(bdev, flags);
@@ -6527,7 +6527,7 @@
 	 * but sb spans only this function. Add an explicit SetPageUptodate call
 	 * to silence the warning eg. on PowerPC 64.
 	 */
-	if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
+	if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
 		SetPageUptodate(sb->pages[0]);
 
 	write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 145d2b8..3bfb252 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -237,6 +237,9 @@
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	int ret;
 
+	if (btrfs_root_readonly(root))
+		return -EROFS;
+
 	if (trans)
 		return do_setxattr(trans, inode, name, value, size, flags);
 
@@ -369,11 +372,9 @@
 }
 
 static int btrfs_xattr_handler_get(const struct xattr_handler *handler,
-				   struct dentry *dentry, const char *name,
-				   void *buffer, size_t size)
+				   struct dentry *unused, struct inode *inode,
+				   const char *name, void *buffer, size_t size)
 {
-	struct inode *inode = d_inode(dentry);
-
 	name = xattr_full_name(handler, name);
 	return __btrfs_getxattr(inode, name, buffer, size);
 }
@@ -434,25 +435,6 @@
 	NULL,
 };
 
-int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
-		   size_t size, int flags)
-{
-	struct btrfs_root *root = BTRFS_I(d_inode(dentry))->root;
-
-	if (btrfs_root_readonly(root))
-		return -EROFS;
-	return generic_setxattr(dentry, name, value, size, flags);
-}
-
-int btrfs_removexattr(struct dentry *dentry, const char *name)
-{
-	struct btrfs_root *root = BTRFS_I(d_inode(dentry))->root;
-
-	if (btrfs_root_readonly(root))
-		return -EROFS;
-	return generic_removexattr(dentry, name);
-}
-
 static int btrfs_initxattrs(struct inode *inode,
 			    const struct xattr *xattr_array, void *fs_info)
 {
diff --git a/fs/btrfs/xattr.h b/fs/btrfs/xattr.h
index 96807b3..15fc474 100644
--- a/fs/btrfs/xattr.h
+++ b/fs/btrfs/xattr.h
@@ -28,9 +28,6 @@
 extern int __btrfs_setxattr(struct btrfs_trans_handle *trans,
 			    struct inode *inode, const char *name,
 			    const void *value, size_t size, int flags);
-extern int btrfs_setxattr(struct dentry *dentry, const char *name,
-		const void *value, size_t size, int flags);
-extern int btrfs_removexattr(struct dentry *dentry, const char *name);
 
 extern int btrfs_xattr_security_init(struct btrfs_trans_handle *trans,
 				     struct inode *inode, struct inode *dir,
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 82990b8..88d274e 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -59,7 +59,7 @@
 	workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
 			zlib_inflate_workspacesize());
 	workspace->strm.workspace = vmalloc(workspacesize);
-	workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS);
+	workspace->buf = kmalloc(PAGE_SIZE, GFP_NOFS);
 	if (!workspace->strm.workspace || !workspace->buf)
 		goto fail;
 
@@ -103,7 +103,7 @@
 	workspace->strm.total_in = 0;
 	workspace->strm.total_out = 0;
 
-	in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
+	in_page = find_get_page(mapping, start >> PAGE_SHIFT);
 	data_in = kmap(in_page);
 
 	out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
@@ -117,8 +117,8 @@
 
 	workspace->strm.next_in = data_in;
 	workspace->strm.next_out = cpage_out;
-	workspace->strm.avail_out = PAGE_CACHE_SIZE;
-	workspace->strm.avail_in = min(len, PAGE_CACHE_SIZE);
+	workspace->strm.avail_out = PAGE_SIZE;
+	workspace->strm.avail_in = min(len, PAGE_SIZE);
 
 	while (workspace->strm.total_in < len) {
 		ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
@@ -156,7 +156,7 @@
 			cpage_out = kmap(out_page);
 			pages[nr_pages] = out_page;
 			nr_pages++;
-			workspace->strm.avail_out = PAGE_CACHE_SIZE;
+			workspace->strm.avail_out = PAGE_SIZE;
 			workspace->strm.next_out = cpage_out;
 		}
 		/* we're all done */
@@ -170,14 +170,14 @@
 
 			bytes_left = len - workspace->strm.total_in;
 			kunmap(in_page);
-			page_cache_release(in_page);
+			put_page(in_page);
 
-			start += PAGE_CACHE_SIZE;
+			start += PAGE_SIZE;
 			in_page = find_get_page(mapping,
-						start >> PAGE_CACHE_SHIFT);
+						start >> PAGE_SHIFT);
 			data_in = kmap(in_page);
 			workspace->strm.avail_in = min(bytes_left,
-							   PAGE_CACHE_SIZE);
+							   PAGE_SIZE);
 			workspace->strm.next_in = data_in;
 		}
 	}
@@ -205,7 +205,7 @@
 
 	if (in_page) {
 		kunmap(in_page);
-		page_cache_release(in_page);
+		put_page(in_page);
 	}
 	return ret;
 }
@@ -223,18 +223,18 @@
 	size_t total_out = 0;
 	unsigned long page_in_index = 0;
 	unsigned long page_out_index = 0;
-	unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_CACHE_SIZE);
+	unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
 	unsigned long buf_start;
 	unsigned long pg_offset;
 
 	data_in = kmap(pages_in[page_in_index]);
 	workspace->strm.next_in = data_in;
-	workspace->strm.avail_in = min_t(size_t, srclen, PAGE_CACHE_SIZE);
+	workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
 	workspace->strm.total_in = 0;
 
 	workspace->strm.total_out = 0;
 	workspace->strm.next_out = workspace->buf;
-	workspace->strm.avail_out = PAGE_CACHE_SIZE;
+	workspace->strm.avail_out = PAGE_SIZE;
 	pg_offset = 0;
 
 	/* If it's deflate, and it's got no preset dictionary, then
@@ -274,7 +274,7 @@
 		}
 
 		workspace->strm.next_out = workspace->buf;
-		workspace->strm.avail_out = PAGE_CACHE_SIZE;
+		workspace->strm.avail_out = PAGE_SIZE;
 
 		if (workspace->strm.avail_in == 0) {
 			unsigned long tmp;
@@ -288,7 +288,7 @@
 			workspace->strm.next_in = data_in;
 			tmp = srclen - workspace->strm.total_in;
 			workspace->strm.avail_in = min(tmp,
-							   PAGE_CACHE_SIZE);
+							   PAGE_SIZE);
 		}
 	}
 	if (ret != Z_STREAM_END)
@@ -325,7 +325,7 @@
 	workspace->strm.total_in = 0;
 
 	workspace->strm.next_out = workspace->buf;
-	workspace->strm.avail_out = PAGE_CACHE_SIZE;
+	workspace->strm.avail_out = PAGE_SIZE;
 	workspace->strm.total_out = 0;
 	/* If it's deflate, and it's got no preset dictionary, then
 	   we can tell zlib to skip the adler32 check. */
@@ -368,8 +368,8 @@
 		else
 			buf_offset = 0;
 
-		bytes = min(PAGE_CACHE_SIZE - pg_offset,
-			    PAGE_CACHE_SIZE - buf_offset);
+		bytes = min(PAGE_SIZE - pg_offset,
+			    PAGE_SIZE - buf_offset);
 		bytes = min(bytes, bytes_left);
 
 		kaddr = kmap_atomic(dest_page);
@@ -380,7 +380,7 @@
 		bytes_left -= bytes;
 next:
 		workspace->strm.next_out = workspace->buf;
-		workspace->strm.avail_out = PAGE_CACHE_SIZE;
+		workspace->strm.avail_out = PAGE_SIZE;
 	}
 
 	if (ret != Z_STREAM_END && bytes_left != 0)
diff --git a/fs/buffer.c b/fs/buffer.c
index 33be296..af0d9a8 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -129,7 +129,7 @@
 {
 	ClearPagePrivate(page);
 	set_page_private(page, 0);
-	page_cache_release(page);
+	put_page(page);
 }
 
 static void buffer_io_error(struct buffer_head *bh, char *msg)
@@ -207,7 +207,7 @@
 	struct page *page;
 	int all_mapped = 1;
 
-	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
+	index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
 	page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
 	if (!page)
 		goto out;
@@ -245,7 +245,7 @@
 	}
 out_unlock:
 	spin_unlock(&bd_mapping->private_lock);
-	page_cache_release(page);
+	put_page(page);
 out:
 	return ret;
 }
@@ -1040,7 +1040,7 @@
 	ret = (block < end_block) ? 1 : -ENXIO;
 failed:
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 	return ret;
 }
 
@@ -1533,7 +1533,7 @@
 	/*
 	 * Check for overflow
 	 */
-	BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
+	BUG_ON(stop > PAGE_SIZE || stop < length);
 
 	head = page_buffers(page);
 	bh = head;
@@ -1716,7 +1716,7 @@
 	blocksize = bh->b_size;
 	bbits = block_size_bits(blocksize);
 
-	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
+	block = (sector_t)page->index << (PAGE_SHIFT - bbits);
 	last_block = (i_size_read(inode) - 1) >> bbits;
 
 	/*
@@ -1894,7 +1894,7 @@
 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
 		get_block_t *get_block)
 {
-	unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+	unsigned from = pos & (PAGE_SIZE - 1);
 	unsigned to = from + len;
 	struct inode *inode = page->mapping->host;
 	unsigned block_start, block_end;
@@ -1904,15 +1904,15 @@
 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
 
 	BUG_ON(!PageLocked(page));
-	BUG_ON(from > PAGE_CACHE_SIZE);
-	BUG_ON(to > PAGE_CACHE_SIZE);
+	BUG_ON(from > PAGE_SIZE);
+	BUG_ON(to > PAGE_SIZE);
 	BUG_ON(from > to);
 
 	head = create_page_buffers(page, inode, 0);
 	blocksize = head->b_size;
 	bbits = block_size_bits(blocksize);
 
-	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
+	block = (sector_t)page->index << (PAGE_SHIFT - bbits);
 
 	for(bh = head, block_start = 0; bh != head || !block_start;
 	    block++, block_start=block_end, bh = bh->b_this_page) {
@@ -2020,7 +2020,7 @@
 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
 		unsigned flags, struct page **pagep, get_block_t *get_block)
 {
-	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+	pgoff_t index = pos >> PAGE_SHIFT;
 	struct page *page;
 	int status;
 
@@ -2031,7 +2031,7 @@
 	status = __block_write_begin(page, pos, len, get_block);
 	if (unlikely(status)) {
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		page = NULL;
 	}
 
@@ -2047,7 +2047,7 @@
 	struct inode *inode = mapping->host;
 	unsigned start;
 
-	start = pos & (PAGE_CACHE_SIZE - 1);
+	start = pos & (PAGE_SIZE - 1);
 
 	if (unlikely(copied < len)) {
 		/*
@@ -2099,7 +2099,7 @@
 	}
 
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 
 	if (old_size < pos)
 		pagecache_isize_extended(inode, old_size, pos);
@@ -2136,9 +2136,9 @@
 
 	head = page_buffers(page);
 	blocksize = head->b_size;
-	to = min_t(unsigned, PAGE_CACHE_SIZE - from, count);
+	to = min_t(unsigned, PAGE_SIZE - from, count);
 	to = from + to;
-	if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
+	if (from < blocksize && to > PAGE_SIZE - blocksize)
 		return 0;
 
 	bh = head;
@@ -2181,7 +2181,7 @@
 	blocksize = head->b_size;
 	bbits = block_size_bits(blocksize);
 
-	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
+	iblock = (sector_t)page->index << (PAGE_SHIFT - bbits);
 	lblock = (i_size_read(inode)+blocksize-1) >> bbits;
 	bh = head;
 	nr = 0;
@@ -2295,16 +2295,16 @@
 	unsigned zerofrom, offset, len;
 	int err = 0;
 
-	index = pos >> PAGE_CACHE_SHIFT;
-	offset = pos & ~PAGE_CACHE_MASK;
+	index = pos >> PAGE_SHIFT;
+	offset = pos & ~PAGE_MASK;
 
-	while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
-		zerofrom = curpos & ~PAGE_CACHE_MASK;
+	while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
+		zerofrom = curpos & ~PAGE_MASK;
 		if (zerofrom & (blocksize-1)) {
 			*bytes |= (blocksize-1);
 			(*bytes)++;
 		}
-		len = PAGE_CACHE_SIZE - zerofrom;
+		len = PAGE_SIZE - zerofrom;
 
 		err = pagecache_write_begin(file, mapping, curpos, len,
 						AOP_FLAG_UNINTERRUPTIBLE,
@@ -2329,7 +2329,7 @@
 
 	/* page covers the boundary, find the boundary offset */
 	if (index == curidx) {
-		zerofrom = curpos & ~PAGE_CACHE_MASK;
+		zerofrom = curpos & ~PAGE_MASK;
 		/* if we will expand the thing last block will be filled */
 		if (offset <= zerofrom) {
 			goto out;
@@ -2375,7 +2375,7 @@
 	if (err)
 		return err;
 
-	zerofrom = *bytes & ~PAGE_CACHE_MASK;
+	zerofrom = *bytes & ~PAGE_MASK;
 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
 		*bytes |= (blocksize-1);
 		(*bytes)++;
@@ -2430,10 +2430,10 @@
 	}
 
 	/* page is wholly or partially inside EOF */
-	if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
-		end = size & ~PAGE_CACHE_MASK;
+	if (((page->index + 1) << PAGE_SHIFT) > size)
+		end = size & ~PAGE_MASK;
 	else
-		end = PAGE_CACHE_SIZE;
+		end = PAGE_SIZE;
 
 	ret = __block_write_begin(page, 0, end, get_block);
 	if (!ret)
@@ -2508,8 +2508,8 @@
 	int ret = 0;
 	int is_mapped_to_disk = 1;
 
-	index = pos >> PAGE_CACHE_SHIFT;
-	from = pos & (PAGE_CACHE_SIZE - 1);
+	index = pos >> PAGE_SHIFT;
+	from = pos & (PAGE_SIZE - 1);
 	to = from + len;
 
 	page = grab_cache_page_write_begin(mapping, index, flags);
@@ -2543,7 +2543,7 @@
 		goto out_release;
 	}
 
-	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
+	block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
 
 	/*
 	 * We loop across all blocks in the page, whether or not they are
@@ -2551,7 +2551,7 @@
 	 * page is fully mapped-to-disk.
 	 */
 	for (block_start = 0, block_in_page = 0, bh = head;
-		  block_start < PAGE_CACHE_SIZE;
+		  block_start < PAGE_SIZE;
 		  block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
 		int create;
 
@@ -2623,7 +2623,7 @@
 
 out_release:
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 	*pagep = NULL;
 
 	return ret;
@@ -2653,7 +2653,7 @@
 	}
 
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 
 	while (head) {
 		bh = head;
@@ -2675,7 +2675,7 @@
 {
 	struct inode * const inode = page->mapping->host;
 	loff_t i_size = i_size_read(inode);
-	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+	const pgoff_t end_index = i_size >> PAGE_SHIFT;
 	unsigned offset;
 	int ret;
 
@@ -2684,7 +2684,7 @@
 		goto out;
 
 	/* Is the page fully outside i_size? (truncate in progress) */
-	offset = i_size & (PAGE_CACHE_SIZE-1);
+	offset = i_size & (PAGE_SIZE-1);
 	if (page->index >= end_index+1 || !offset) {
 		/*
 		 * The page may have dirty, unmapped buffers.  For example,
@@ -2707,7 +2707,7 @@
 	 * the  page size, the remaining memory is zeroed when mapped, and
 	 * writes to that region are not written out to the file."
 	 */
-	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+	zero_user_segment(page, offset, PAGE_SIZE);
 out:
 	ret = mpage_writepage(page, get_block, wbc);
 	if (ret == -EAGAIN)
@@ -2720,8 +2720,8 @@
 int nobh_truncate_page(struct address_space *mapping,
 			loff_t from, get_block_t *get_block)
 {
-	pgoff_t index = from >> PAGE_CACHE_SHIFT;
-	unsigned offset = from & (PAGE_CACHE_SIZE-1);
+	pgoff_t index = from >> PAGE_SHIFT;
+	unsigned offset = from & (PAGE_SIZE-1);
 	unsigned blocksize;
 	sector_t iblock;
 	unsigned length, pos;
@@ -2738,7 +2738,7 @@
 		return 0;
 
 	length = blocksize - length;
-	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+	iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
 
 	page = grab_cache_page(mapping, index);
 	err = -ENOMEM;
@@ -2748,7 +2748,7 @@
 	if (page_has_buffers(page)) {
 has_buffers:
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		return block_truncate_page(mapping, from, get_block);
 	}
 
@@ -2772,7 +2772,7 @@
 	if (!PageUptodate(page)) {
 		err = mapping->a_ops->readpage(NULL, page);
 		if (err) {
-			page_cache_release(page);
+			put_page(page);
 			goto out;
 		}
 		lock_page(page);
@@ -2789,7 +2789,7 @@
 
 unlock:
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 out:
 	return err;
 }
@@ -2798,8 +2798,8 @@
 int block_truncate_page(struct address_space *mapping,
 			loff_t from, get_block_t *get_block)
 {
-	pgoff_t index = from >> PAGE_CACHE_SHIFT;
-	unsigned offset = from & (PAGE_CACHE_SIZE-1);
+	pgoff_t index = from >> PAGE_SHIFT;
+	unsigned offset = from & (PAGE_SIZE-1);
 	unsigned blocksize;
 	sector_t iblock;
 	unsigned length, pos;
@@ -2816,7 +2816,7 @@
 		return 0;
 
 	length = blocksize - length;
-	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+	iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
 	
 	page = grab_cache_page(mapping, index);
 	err = -ENOMEM;
@@ -2865,7 +2865,7 @@
 
 unlock:
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 out:
 	return err;
 }
@@ -2879,7 +2879,7 @@
 {
 	struct inode * const inode = page->mapping->host;
 	loff_t i_size = i_size_read(inode);
-	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+	const pgoff_t end_index = i_size >> PAGE_SHIFT;
 	unsigned offset;
 
 	/* Is the page fully inside i_size? */
@@ -2888,14 +2888,14 @@
 					       end_buffer_async_write);
 
 	/* Is the page fully outside i_size? (truncate in progress) */
-	offset = i_size & (PAGE_CACHE_SIZE-1);
+	offset = i_size & (PAGE_SIZE-1);
 	if (page->index >= end_index+1 || !offset) {
 		/*
 		 * The page may have dirty, unmapped buffers.  For example,
 		 * they may have been added in ext3_writepage().  Make them
 		 * freeable here, so the page does not leak.
 		 */
-		do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
+		do_invalidatepage(page, 0, PAGE_SIZE);
 		unlock_page(page);
 		return 0; /* don't care */
 	}
@@ -2907,7 +2907,7 @@
 	 * the  page size, the remaining memory is zeroed when mapped, and
 	 * writes to that region are not written out to the file."
 	 */
-	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+	zero_user_segment(page, offset, PAGE_SIZE);
 	return __block_write_full_page(inode, page, get_block, wbc,
 							end_buffer_async_write);
 }
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index c0f3da3..afbdc41 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -194,10 +194,10 @@
 			error = -EIO;
 		}
 
-		page_cache_release(monitor->back_page);
+		put_page(monitor->back_page);
 
 		fscache_end_io(op, monitor->netfs_page, error);
-		page_cache_release(monitor->netfs_page);
+		put_page(monitor->netfs_page);
 		fscache_retrieval_complete(op, 1);
 		fscache_put_retrieval(op);
 		kfree(monitor);
@@ -288,8 +288,8 @@
 	_debug("- monitor add");
 
 	/* install the monitor */
-	page_cache_get(monitor->netfs_page);
-	page_cache_get(backpage);
+	get_page(monitor->netfs_page);
+	get_page(backpage);
 	monitor->back_page = backpage;
 	monitor->monitor.private = backpage;
 	add_page_wait_queue(backpage, &monitor->monitor);
@@ -310,7 +310,7 @@
 	_debug("- present");
 
 	if (newpage) {
-		page_cache_release(newpage);
+		put_page(newpage);
 		newpage = NULL;
 	}
 
@@ -342,7 +342,7 @@
 
 out:
 	if (backpage)
-		page_cache_release(backpage);
+		put_page(backpage);
 	if (monitor) {
 		fscache_put_retrieval(monitor->op);
 		kfree(monitor);
@@ -363,7 +363,7 @@
 	goto out;
 
 nomem_page:
-	page_cache_release(newpage);
+	put_page(newpage);
 nomem_monitor:
 	fscache_put_retrieval(monitor->op);
 	kfree(monitor);
@@ -530,7 +530,7 @@
 					    netpage->index, cachefiles_gfp);
 		if (ret < 0) {
 			if (ret == -EEXIST) {
-				page_cache_release(netpage);
+				put_page(netpage);
 				fscache_retrieval_complete(op, 1);
 				continue;
 			}
@@ -538,10 +538,10 @@
 		}
 
 		/* install a monitor */
-		page_cache_get(netpage);
+		get_page(netpage);
 		monitor->netfs_page = netpage;
 
-		page_cache_get(backpage);
+		get_page(backpage);
 		monitor->back_page = backpage;
 		monitor->monitor.private = backpage;
 		add_page_wait_queue(backpage, &monitor->monitor);
@@ -555,10 +555,10 @@
 			unlock_page(backpage);
 		}
 
-		page_cache_release(backpage);
+		put_page(backpage);
 		backpage = NULL;
 
-		page_cache_release(netpage);
+		put_page(netpage);
 		netpage = NULL;
 		continue;
 
@@ -603,7 +603,7 @@
 					    netpage->index, cachefiles_gfp);
 		if (ret < 0) {
 			if (ret == -EEXIST) {
-				page_cache_release(netpage);
+				put_page(netpage);
 				fscache_retrieval_complete(op, 1);
 				continue;
 			}
@@ -612,14 +612,14 @@
 
 		copy_highpage(netpage, backpage);
 
-		page_cache_release(backpage);
+		put_page(backpage);
 		backpage = NULL;
 
 		fscache_mark_page_cached(op, netpage);
 
 		/* the netpage is unlocked and marked up to date here */
 		fscache_end_io(op, netpage, 0);
-		page_cache_release(netpage);
+		put_page(netpage);
 		netpage = NULL;
 		fscache_retrieval_complete(op, 1);
 		continue;
@@ -632,11 +632,11 @@
 out:
 	/* tidy up */
 	if (newpage)
-		page_cache_release(newpage);
+		put_page(newpage);
 	if (netpage)
-		page_cache_release(netpage);
+		put_page(netpage);
 	if (backpage)
-		page_cache_release(backpage);
+		put_page(backpage);
 	if (monitor) {
 		fscache_put_retrieval(op);
 		kfree(monitor);
@@ -644,7 +644,7 @@
 
 	list_for_each_entry_safe(netpage, _n, list, lru) {
 		list_del(&netpage->lru);
-		page_cache_release(netpage);
+		put_page(netpage);
 		fscache_retrieval_complete(op, 1);
 	}
 
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index f197084..4f67227 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -37,6 +37,8 @@
 	spin_lock(&ci->i_ceph_lock);
 	if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 0))
 		set_cached_acl(inode, type, acl);
+	else
+		forget_cached_acl(inode, type);
 	spin_unlock(&ci->i_ceph_lock);
 }
 
@@ -88,7 +90,6 @@
 	char *value = NULL;
 	struct iattr newattrs;
 	umode_t new_mode = inode->i_mode, old_mode = inode->i_mode;
-	struct dentry *dentry;
 
 	switch (type) {
 	case ACL_TYPE_ACCESS:
@@ -126,29 +127,26 @@
 			goto out_free;
 	}
 
-	dentry = d_find_alias(inode);
 	if (new_mode != old_mode) {
 		newattrs.ia_mode = new_mode;
 		newattrs.ia_valid = ATTR_MODE;
-		ret = ceph_setattr(dentry, &newattrs);
+		ret = __ceph_setattr(inode, &newattrs);
 		if (ret)
-			goto out_dput;
+			goto out_free;
 	}
 
-	ret = __ceph_setxattr(dentry, name, value, size, 0);
+	ret = __ceph_setxattr(inode, name, value, size, 0);
 	if (ret) {
 		if (new_mode != old_mode) {
 			newattrs.ia_mode = old_mode;
 			newattrs.ia_valid = ATTR_MODE;
-			ceph_setattr(dentry, &newattrs);
+			__ceph_setattr(inode, &newattrs);
 		}
-		goto out_dput;
+		goto out_free;
 	}
 
 	ceph_set_cached_acl(inode, type, acl);
 
-out_dput:
-	dput(dentry);
 out_free:
 	kfree(value);
 out:
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index fc5cae2..43098cd 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -143,7 +143,7 @@
 	inode = page->mapping->host;
 	ci = ceph_inode(inode);
 
-	if (offset != 0 || length != PAGE_CACHE_SIZE) {
+	if (offset != 0 || length != PAGE_SIZE) {
 		dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n",
 		     inode, page, page->index, offset, length);
 		return;
@@ -197,10 +197,10 @@
 		&ceph_inode_to_client(inode)->client->osdc;
 	int err = 0;
 	u64 off = page_offset(page);
-	u64 len = PAGE_CACHE_SIZE;
+	u64 len = PAGE_SIZE;
 
 	if (off >= i_size_read(inode)) {
-		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+		zero_user_segment(page, 0, PAGE_SIZE);
 		SetPageUptodate(page);
 		return 0;
 	}
@@ -212,7 +212,7 @@
 		 */
 		if (off == 0)
 			return -EINVAL;
-		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+		zero_user_segment(page, 0, PAGE_SIZE);
 		SetPageUptodate(page);
 		return 0;
 	}
@@ -234,9 +234,9 @@
 		ceph_fscache_readpage_cancel(inode, page);
 		goto out;
 	}
-	if (err < PAGE_CACHE_SIZE)
+	if (err < PAGE_SIZE)
 		/* zero fill remainder of page */
-		zero_user_segment(page, err, PAGE_CACHE_SIZE);
+		zero_user_segment(page, err, PAGE_SIZE);
 	else
 		flush_dcache_page(page);
 
@@ -278,10 +278,10 @@
 
 		if (rc < 0 && rc != -ENOENT)
 			goto unlock;
-		if (bytes < (int)PAGE_CACHE_SIZE) {
+		if (bytes < (int)PAGE_SIZE) {
 			/* zero (remainder of) page */
 			int s = bytes < 0 ? 0 : bytes;
-			zero_user_segment(page, s, PAGE_CACHE_SIZE);
+			zero_user_segment(page, s, PAGE_SIZE);
 		}
  		dout("finish_read %p uptodate %p idx %lu\n", inode, page,
 		     page->index);
@@ -290,8 +290,8 @@
 		ceph_readpage_to_fscache(inode, page);
 unlock:
 		unlock_page(page);
-		page_cache_release(page);
-		bytes -= PAGE_CACHE_SIZE;
+		put_page(page);
+		bytes -= PAGE_SIZE;
 	}
 	kfree(osd_data->pages);
 }
@@ -336,7 +336,7 @@
 		if (max && nr_pages == max)
 			break;
 	}
-	len = nr_pages << PAGE_CACHE_SHIFT;
+	len = nr_pages << PAGE_SHIFT;
 	dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages,
 	     off, len);
 	vino = ceph_vino(inode);
@@ -364,7 +364,7 @@
 		if (add_to_page_cache_lru(page, &inode->i_data, page->index,
 					  GFP_KERNEL)) {
 			ceph_fscache_uncache_page(inode, page);
-			page_cache_release(page);
+			put_page(page);
 			dout("start_read %p add_to_page_cache failed %p\n",
 			     inode, page);
 			nr_pages = i;
@@ -415,8 +415,8 @@
 	if (rc == 0)
 		goto out;
 
-	if (fsc->mount_options->rsize >= PAGE_CACHE_SIZE)
-		max = (fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1)
+	if (fsc->mount_options->rsize >= PAGE_SIZE)
+		max = (fsc->mount_options->rsize + PAGE_SIZE - 1)
 			>> PAGE_SHIFT;
 
 	dout("readpages %p file %p nr_pages %d max %d\n", inode,
@@ -484,7 +484,7 @@
 	long writeback_stat;
 	u64 truncate_size;
 	u32 truncate_seq;
-	int err = 0, len = PAGE_CACHE_SIZE;
+	int err = 0, len = PAGE_SIZE;
 
 	dout("writepage %p idx %lu\n", page, page->index);
 
@@ -725,9 +725,9 @@
 	}
 	if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize)
 		wsize = fsc->mount_options->wsize;
-	if (wsize < PAGE_CACHE_SIZE)
-		wsize = PAGE_CACHE_SIZE;
-	max_pages_ever = wsize >> PAGE_CACHE_SHIFT;
+	if (wsize < PAGE_SIZE)
+		wsize = PAGE_SIZE;
+	max_pages_ever = wsize >> PAGE_SHIFT;
 
 	pagevec_init(&pvec, 0);
 
@@ -737,8 +737,8 @@
 		end = -1;
 		dout(" cyclic, start at %lu\n", start);
 	} else {
-		start = wbc->range_start >> PAGE_CACHE_SHIFT;
-		end = wbc->range_end >> PAGE_CACHE_SHIFT;
+		start = wbc->range_start >> PAGE_SHIFT;
+		end = wbc->range_end >> PAGE_SHIFT;
 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
 			range_whole = 1;
 		should_loop = 0;
@@ -887,7 +887,7 @@
 
 				num_ops = 1 + do_sync;
 				strip_unit_end = page->index +
-					((len - 1) >> PAGE_CACHE_SHIFT);
+					((len - 1) >> PAGE_SHIFT);
 
 				BUG_ON(pages);
 				max_pages = calc_pages_for(0, (u64)len);
@@ -901,7 +901,7 @@
 
 				len = 0;
 			} else if (page->index !=
-				   (offset + len) >> PAGE_CACHE_SHIFT) {
+				   (offset + len) >> PAGE_SHIFT) {
 				if (num_ops >= (pool ?  CEPH_OSD_SLAB_OPS :
 							CEPH_OSD_MAX_OPS)) {
 					redirty_page_for_writepage(wbc, page);
@@ -929,7 +929,7 @@
 
 			pages[locked_pages] = page;
 			locked_pages++;
-			len += PAGE_CACHE_SIZE;
+			len += PAGE_SIZE;
 		}
 
 		/* did we get anything? */
@@ -981,7 +981,7 @@
 			BUG_ON(IS_ERR(req));
 		}
 		BUG_ON(len < page_offset(pages[locked_pages - 1]) +
-			     PAGE_CACHE_SIZE - offset);
+			     PAGE_SIZE - offset);
 
 		req->r_callback = writepages_finish;
 		req->r_inode = inode;
@@ -1011,7 +1011,7 @@
 			}
 
 			set_page_writeback(pages[i]);
-			len += PAGE_CACHE_SIZE;
+			len += PAGE_SIZE;
 		}
 
 		if (snap_size != -1) {
@@ -1020,7 +1020,7 @@
 			/* writepages_finish() clears writeback pages
 			 * according to the data length, so make sure
 			 * data length covers all locked pages */
-			u64 min_len = len + 1 - PAGE_CACHE_SIZE;
+			u64 min_len = len + 1 - PAGE_SIZE;
 			len = min(len, (u64)i_size_read(inode) - offset);
 			len = max(len, min_len);
 		}
@@ -1135,8 +1135,8 @@
 {
 	struct inode *inode = file_inode(file);
 	struct ceph_inode_info *ci = ceph_inode(inode);
-	loff_t page_off = pos & PAGE_CACHE_MASK;
-	int pos_in_page = pos & ~PAGE_CACHE_MASK;
+	loff_t page_off = pos & PAGE_MASK;
+	int pos_in_page = pos & ~PAGE_MASK;
 	int end_in_page = pos_in_page + len;
 	loff_t i_size;
 	int r;
@@ -1191,7 +1191,7 @@
 	}
 
 	/* full page? */
-	if (pos_in_page == 0 && len == PAGE_CACHE_SIZE)
+	if (pos_in_page == 0 && len == PAGE_SIZE)
 		return 0;
 
 	/* past end of file? */
@@ -1199,12 +1199,12 @@
 
 	if (page_off >= i_size ||
 	    (pos_in_page == 0 && (pos+len) >= i_size &&
-	     end_in_page - pos_in_page != PAGE_CACHE_SIZE)) {
+	     end_in_page - pos_in_page != PAGE_SIZE)) {
 		dout(" zeroing %p 0 - %d and %d - %d\n",
-		     page, pos_in_page, end_in_page, (int)PAGE_CACHE_SIZE);
+		     page, pos_in_page, end_in_page, (int)PAGE_SIZE);
 		zero_user_segments(page,
 				   0, pos_in_page,
-				   end_in_page, PAGE_CACHE_SIZE);
+				   end_in_page, PAGE_SIZE);
 		return 0;
 	}
 
@@ -1228,7 +1228,7 @@
 {
 	struct inode *inode = file_inode(file);
 	struct page *page;
-	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+	pgoff_t index = pos >> PAGE_SHIFT;
 	int r;
 
 	do {
@@ -1242,7 +1242,7 @@
 
 		r = ceph_update_writeable_page(file, pos, len, page);
 		if (r < 0)
-			page_cache_release(page);
+			put_page(page);
 		else
 			*pagep = page;
 	} while (r == -EAGAIN);
@@ -1259,7 +1259,7 @@
 			  struct page *page, void *fsdata)
 {
 	struct inode *inode = file_inode(file);
-	unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+	unsigned from = pos & (PAGE_SIZE - 1);
 	int check_cap = 0;
 
 	dout("write_end file %p inode %p page %p %d~%d (%d)\n", file,
@@ -1279,7 +1279,7 @@
 	set_page_dirty(page);
 
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 
 	if (check_cap)
 		ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL);
@@ -1292,8 +1292,7 @@
  * intercept O_DIRECT reads and writes early, this function should
  * never get called.
  */
-static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter,
-			      loff_t pos)
+static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter)
 {
 	WARN_ON(1);
 	return -EINVAL;
@@ -1322,11 +1321,11 @@
 	struct ceph_inode_info *ci = ceph_inode(inode);
 	struct ceph_file_info *fi = vma->vm_file->private_data;
 	struct page *pinned_page = NULL;
-	loff_t off = vmf->pgoff << PAGE_CACHE_SHIFT;
+	loff_t off = vmf->pgoff << PAGE_SHIFT;
 	int want, got, ret;
 
 	dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n",
-	     inode, ceph_vinop(inode), off, (size_t)PAGE_CACHE_SIZE);
+	     inode, ceph_vinop(inode), off, (size_t)PAGE_SIZE);
 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
 		want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
 	else
@@ -1343,7 +1342,7 @@
 		}
 	}
 	dout("filemap_fault %p %llu~%zd got cap refs on %s\n",
-	     inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got));
+	     inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got));
 
 	if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
 	    ci->i_inline_version == CEPH_INLINE_NONE)
@@ -1352,16 +1351,16 @@
 		ret = -EAGAIN;
 
 	dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n",
-	     inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got), ret);
+	     inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got), ret);
 	if (pinned_page)
-		page_cache_release(pinned_page);
+		put_page(pinned_page);
 	ceph_put_cap_refs(ci, got);
 
 	if (ret != -EAGAIN)
 		return ret;
 
 	/* read inline data */
-	if (off >= PAGE_CACHE_SIZE) {
+	if (off >= PAGE_SIZE) {
 		/* does not support inline data > PAGE_SIZE */
 		ret = VM_FAULT_SIGBUS;
 	} else {
@@ -1378,12 +1377,12 @@
 					 CEPH_STAT_CAP_INLINE_DATA, true);
 		if (ret1 < 0 || off >= i_size_read(inode)) {
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 			ret = VM_FAULT_SIGBUS;
 			goto out;
 		}
-		if (ret1 < PAGE_CACHE_SIZE)
-			zero_user_segment(page, ret1, PAGE_CACHE_SIZE);
+		if (ret1 < PAGE_SIZE)
+			zero_user_segment(page, ret1, PAGE_SIZE);
 		else
 			flush_dcache_page(page);
 		SetPageUptodate(page);
@@ -1392,7 +1391,7 @@
 	}
 out:
 	dout("filemap_fault %p %llu~%zd read inline data ret %d\n",
-	     inode, off, (size_t)PAGE_CACHE_SIZE, ret);
+	     inode, off, (size_t)PAGE_SIZE, ret);
 	return ret;
 }
 
@@ -1430,10 +1429,10 @@
 		}
 	}
 
-	if (off + PAGE_CACHE_SIZE <= size)
-		len = PAGE_CACHE_SIZE;
+	if (off + PAGE_SIZE <= size)
+		len = PAGE_SIZE;
 	else
-		len = size & ~PAGE_CACHE_MASK;
+		len = size & ~PAGE_MASK;
 
 	dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n",
 	     inode, ceph_vinop(inode), off, len, size);
@@ -1519,7 +1518,7 @@
 			return;
 		if (PageUptodate(page)) {
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 			return;
 		}
 	}
@@ -1534,14 +1533,14 @@
 	}
 
 	if (page != locked_page) {
-		if (len < PAGE_CACHE_SIZE)
-			zero_user_segment(page, len, PAGE_CACHE_SIZE);
+		if (len < PAGE_SIZE)
+			zero_user_segment(page, len, PAGE_SIZE);
 		else
 			flush_dcache_page(page);
 
 		SetPageUptodate(page);
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 }
 
@@ -1578,7 +1577,7 @@
 				from_pagecache = true;
 				lock_page(page);
 			} else {
-				page_cache_release(page);
+				put_page(page);
 				page = NULL;
 			}
 		}
@@ -1586,8 +1585,8 @@
 
 	if (page) {
 		len = i_size_read(inode);
-		if (len > PAGE_CACHE_SIZE)
-			len = PAGE_CACHE_SIZE;
+		if (len > PAGE_SIZE)
+			len = PAGE_SIZE;
 	} else {
 		page = __page_cache_alloc(GFP_NOFS);
 		if (!page) {
@@ -1670,7 +1669,7 @@
 	if (page && page != locked_page) {
 		if (from_pagecache) {
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 		} else
 			__free_pages(page, 0);
 	}
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index de17bb2..cfaeef1 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2510,7 +2510,7 @@
 					*pinned_page = page;
 					break;
 				}
-				page_cache_release(page);
+				put_page(page);
 			}
 			/*
 			 * drop cap refs first because getattr while
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index fadc243..3ab1192 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -5,6 +5,7 @@
 #include <linux/namei.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <linux/xattr.h>
 
 #include "super.h"
 #include "mds_client.h"
@@ -129,7 +130,7 @@
 	struct inode *dir = d_inode(parent);
 	struct dentry *dentry, *last = NULL;
 	struct ceph_dentry_info *di;
-	unsigned nsize = PAGE_CACHE_SIZE / sizeof(struct dentry *);
+	unsigned nsize = PAGE_SIZE / sizeof(struct dentry *);
 	int err = 0;
 	loff_t ptr_pos = 0;
 	struct ceph_readdir_cache_control cache_ctl = {};
@@ -154,7 +155,7 @@
 		}
 
 		err = -EAGAIN;
-		pgoff = ptr_pos >> PAGE_CACHE_SHIFT;
+		pgoff = ptr_pos >> PAGE_SHIFT;
 		if (!cache_ctl.page || pgoff != page_index(cache_ctl.page)) {
 			ceph_readdir_cache_release(&cache_ctl);
 			cache_ctl.page = find_lock_page(&dir->i_data, pgoff);
@@ -1342,10 +1343,10 @@
 	.permission = ceph_permission,
 	.getattr = ceph_getattr,
 	.setattr = ceph_setattr,
-	.setxattr = ceph_setxattr,
-	.getxattr = ceph_getxattr,
+	.setxattr = generic_setxattr,
+	.getxattr = generic_getxattr,
 	.listxattr = ceph_listxattr,
-	.removexattr = ceph_removexattr,
+	.removexattr = generic_removexattr,
 	.get_acl = ceph_get_acl,
 	.set_acl = ceph_set_acl,
 	.mknod = ceph_mknod,
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index ef38f01..4f1dc71 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -466,7 +466,7 @@
 			ret += zlen;
 		}
 
-		didpages = (page_align + ret) >> PAGE_CACHE_SHIFT;
+		didpages = (page_align + ret) >> PAGE_SHIFT;
 		pos += ret;
 		read = pos - off;
 		left -= ret;
@@ -806,8 +806,8 @@
 
 	if (write) {
 		ret = invalidate_inode_pages2_range(inode->i_mapping,
-					pos >> PAGE_CACHE_SHIFT,
-					(pos + count) >> PAGE_CACHE_SHIFT);
+					pos >> PAGE_SHIFT,
+					(pos + count) >> PAGE_SHIFT);
 		if (ret < 0)
 			dout("invalidate_inode_pages2_range returned %d\n", ret);
 
@@ -872,7 +872,7 @@
 			 * may block.
 			 */
 			truncate_inode_pages_range(inode->i_mapping, pos,
-					(pos+len) | (PAGE_CACHE_SIZE - 1));
+					(pos+len) | (PAGE_SIZE - 1));
 
 			osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
 		}
@@ -1006,8 +1006,8 @@
 		return ret;
 
 	ret = invalidate_inode_pages2_range(inode->i_mapping,
-					    pos >> PAGE_CACHE_SHIFT,
-					    (pos + count) >> PAGE_CACHE_SHIFT);
+					    pos >> PAGE_SHIFT,
+					    (pos + count) >> PAGE_SHIFT);
 	if (ret < 0)
 		dout("invalidate_inode_pages2_range returned %d\n", ret);
 
@@ -1036,7 +1036,7 @@
 		 * write from beginning of first page,
 		 * regardless of io alignment
 		 */
-		num_pages = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+		num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
 		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
 		if (IS_ERR(pages)) {
@@ -1159,7 +1159,7 @@
 	dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
 	     inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
 	if (pinned_page) {
-		page_cache_release(pinned_page);
+		put_page(pinned_page);
 		pinned_page = NULL;
 	}
 	ceph_put_cap_refs(ci, got);
@@ -1188,10 +1188,10 @@
 		if (retry_op == READ_INLINE) {
 			BUG_ON(ret > 0 || read > 0);
 			if (iocb->ki_pos < i_size &&
-			    iocb->ki_pos < PAGE_CACHE_SIZE) {
+			    iocb->ki_pos < PAGE_SIZE) {
 				loff_t end = min_t(loff_t, i_size,
 						   iocb->ki_pos + len);
-				end = min_t(loff_t, end, PAGE_CACHE_SIZE);
+				end = min_t(loff_t, end, PAGE_SIZE);
 				if (statret < end)
 					zero_user_segment(page, statret, end);
 				ret = copy_page_to_iter(page,
@@ -1382,12 +1382,11 @@
 	     ceph_cap_string(got));
 	ceph_put_cap_refs(ci, got);
 
-	if (written >= 0 &&
-	    ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) ||
-	     ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) {
-		err = vfs_fsync_range(file, pos, pos + written - 1, 1);
-		if (err < 0)
-			written = err;
+	if (written >= 0) {
+		if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))
+			iocb->ki_flags |= IOCB_DSYNC;
+
+		written = generic_write_sync(iocb, written);
 	}
 
 	goto out_unlocked;
@@ -1463,21 +1462,21 @@
 	struct inode *inode, loff_t offset, unsigned size)
 {
 	struct page *page;
-	pgoff_t index = offset >> PAGE_CACHE_SHIFT;
+	pgoff_t index = offset >> PAGE_SHIFT;
 
 	page = find_lock_page(inode->i_mapping, index);
 	if (page) {
 		wait_on_page_writeback(page);
-		zero_user(page, offset & (PAGE_CACHE_SIZE - 1), size);
+		zero_user(page, offset & (PAGE_SIZE - 1), size);
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 }
 
 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
 				      loff_t length)
 {
-	loff_t nearly = round_up(offset, PAGE_CACHE_SIZE);
+	loff_t nearly = round_up(offset, PAGE_SIZE);
 	if (offset < nearly) {
 		loff_t size = nearly - offset;
 		if (length < size)
@@ -1486,8 +1485,8 @@
 		offset += size;
 		length -= size;
 	}
-	if (length >= PAGE_CACHE_SIZE) {
-		loff_t size = round_down(length, PAGE_CACHE_SIZE);
+	if (length >= PAGE_SIZE) {
+		loff_t size = round_down(length, PAGE_SIZE);
 		truncate_pagecache_range(inode, offset, offset + size - 1);
 		offset += size;
 		length -= size;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index ed58b16..e669cfa 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -8,6 +8,7 @@
 #include <linux/kernel.h>
 #include <linux/writeback.h>
 #include <linux/vmalloc.h>
+#include <linux/xattr.h>
 #include <linux/posix_acl.h>
 #include <linux/random.h>
 
@@ -92,10 +93,10 @@
 	.permission = ceph_permission,
 	.setattr = ceph_setattr,
 	.getattr = ceph_getattr,
-	.setxattr = ceph_setxattr,
-	.getxattr = ceph_getxattr,
+	.setxattr = generic_setxattr,
+	.getxattr = generic_getxattr,
 	.listxattr = ceph_listxattr,
-	.removexattr = ceph_removexattr,
+	.removexattr = generic_removexattr,
 	.get_acl = ceph_get_acl,
 	.set_acl = ceph_set_acl,
 };
@@ -1338,7 +1339,7 @@
 {
 	if (ctl->page) {
 		kunmap(ctl->page);
-		page_cache_release(ctl->page);
+		put_page(ctl->page);
 		ctl->page = NULL;
 	}
 }
@@ -1348,7 +1349,7 @@
 			      struct ceph_mds_request *req)
 {
 	struct ceph_inode_info *ci = ceph_inode(dir);
-	unsigned nsize = PAGE_CACHE_SIZE / sizeof(struct dentry*);
+	unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
 	unsigned idx = ctl->index % nsize;
 	pgoff_t pgoff = ctl->index / nsize;
 
@@ -1367,7 +1368,7 @@
 		unlock_page(ctl->page);
 		ctl->dentries = kmap(ctl->page);
 		if (idx == 0)
-			memset(ctl->dentries, 0, PAGE_CACHE_SIZE);
+			memset(ctl->dentries, 0, PAGE_SIZE);
 	}
 
 	if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
@@ -1770,22 +1771,18 @@
 	.get_link = simple_get_link,
 	.setattr = ceph_setattr,
 	.getattr = ceph_getattr,
-	.setxattr = ceph_setxattr,
-	.getxattr = ceph_getxattr,
+	.setxattr = generic_setxattr,
+	.getxattr = generic_getxattr,
 	.listxattr = ceph_listxattr,
-	.removexattr = ceph_removexattr,
+	.removexattr = generic_removexattr,
 };
 
-/*
- * setattr
- */
-int ceph_setattr(struct dentry *dentry, struct iattr *attr)
+int __ceph_setattr(struct inode *inode, struct iattr *attr)
 {
-	struct inode *inode = d_inode(dentry);
 	struct ceph_inode_info *ci = ceph_inode(inode);
 	const unsigned int ia_valid = attr->ia_valid;
 	struct ceph_mds_request *req;
-	struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
+	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
 	struct ceph_cap_flush *prealloc_cf;
 	int issued;
 	int release = 0, dirtied = 0;
@@ -2010,6 +2007,14 @@
 }
 
 /*
+ * setattr
+ */
+int ceph_setattr(struct dentry *dentry, struct iattr *attr)
+{
+	return __ceph_setattr(d_inode(dentry), attr);
+}
+
+/*
  * Verify that we have a lease on the given mask.  If not,
  * do a getattr against an mds.
  */
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 44852c3..85b8517 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -386,9 +386,7 @@
 	     atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
 	if (atomic_dec_and_test(&s->s_ref)) {
 		if (s->s_auth.authorizer)
-			ceph_auth_destroy_authorizer(
-				s->s_mdsc->fsc->client->monc.auth,
-				s->s_auth.authorizer);
+			ceph_auth_destroy_authorizer(s->s_auth.authorizer);
 		kfree(s);
 	}
 }
@@ -1610,7 +1608,7 @@
 	while (!list_empty(&tmp_list)) {
 		if (!msg) {
 			msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE,
-					PAGE_CACHE_SIZE, GFP_NOFS, false);
+					PAGE_SIZE, GFP_NOFS, false);
 			if (!msg)
 				goto out_err;
 			head = msg->front.iov_base;
@@ -3900,7 +3898,7 @@
 	struct ceph_auth_handshake *auth = &s->s_auth;
 
 	if (force_new && auth->authorizer) {
-		ceph_auth_destroy_authorizer(ac, auth->authorizer);
+		ceph_auth_destroy_authorizer(auth->authorizer);
 		auth->authorizer = NULL;
 	}
 	if (!auth->authorizer) {
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 37712cc..ee69a53 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -97,7 +97,7 @@
 /*
  * cap releases are batched and sent to the MDS en masse.
  */
-#define CEPH_CAPS_PER_RELEASE ((PAGE_CACHE_SIZE -			\
+#define CEPH_CAPS_PER_RELEASE ((PAGE_SIZE -			\
 				sizeof(struct ceph_mds_cap_release)) /	\
 			       sizeof(struct ceph_mds_cap_item))
 
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index c973043..f12d5e2 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -560,7 +560,7 @@
 
 	/* set up mempools */
 	err = -ENOMEM;
-	page_count = fsc->mount_options->wsize >> PAGE_CACHE_SHIFT;
+	page_count = fsc->mount_options->wsize >> PAGE_SHIFT;
 	size = sizeof (struct page *) * (page_count ? page_count : 1);
 	fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size);
 	if (!fsc->wb_pagevec_pool)
@@ -912,13 +912,13 @@
 	int err;
 
 	/* set ra_pages based on rasize mount option? */
-	if (fsc->mount_options->rasize >= PAGE_CACHE_SIZE)
+	if (fsc->mount_options->rasize >= PAGE_SIZE)
 		fsc->backing_dev_info.ra_pages =
-			(fsc->mount_options->rasize + PAGE_CACHE_SIZE - 1)
+			(fsc->mount_options->rasize + PAGE_SIZE - 1)
 			>> PAGE_SHIFT;
 	else
 		fsc->backing_dev_info.ra_pages =
-			VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
+			VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
 
 	err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
 			   atomic_long_inc_return(&bdi_seq));
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index e705c4d..7b99eb7 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -785,19 +785,15 @@
 	return __ceph_do_getattr(inode, NULL, mask, force);
 }
 extern int ceph_permission(struct inode *inode, int mask);
+extern int __ceph_setattr(struct inode *inode, struct iattr *attr);
 extern int ceph_setattr(struct dentry *dentry, struct iattr *attr);
 extern int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
 			struct kstat *stat);
 
 /* xattr.c */
-extern int ceph_setxattr(struct dentry *, const char *, const void *,
-			 size_t, int);
-int __ceph_setxattr(struct dentry *, const char *, const void *, size_t, int);
+int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int);
 ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t);
-int __ceph_removexattr(struct dentry *, const char *);
-extern ssize_t ceph_getxattr(struct dentry *, const char *, void *, size_t);
 extern ssize_t ceph_listxattr(struct dentry *, char *, size_t);
-extern int ceph_removexattr(struct dentry *, const char *);
 extern void __ceph_build_xattrs_blob(struct ceph_inode_info *ci);
 extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci);
 extern void __init ceph_xattr_init(void);
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 9410abd..0d66722 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -16,6 +16,8 @@
 static int __remove_xattr(struct ceph_inode_info *ci,
 			  struct ceph_inode_xattr *xattr);
 
+const struct xattr_handler ceph_other_xattr_handler;
+
 /*
  * List of handlers for synthetic system.* attributes. Other
  * attributes are handled directly.
@@ -25,6 +27,7 @@
 	&posix_acl_access_xattr_handler,
 	&posix_acl_default_xattr_handler,
 #endif
+	&ceph_other_xattr_handler,
 	NULL,
 };
 
@@ -33,7 +36,6 @@
 	return !strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN) ||
 	       !strncmp(name, XATTR_SECURITY_PREFIX,
 			XATTR_SECURITY_PREFIX_LEN) ||
-	       !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) ||
 	       !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
 	       !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
 }
@@ -496,19 +498,6 @@
 	return 0;
 }
 
-static int __remove_xattr_by_name(struct ceph_inode_info *ci,
-			   const char *name)
-{
-	struct rb_node **p;
-	struct ceph_inode_xattr *xattr;
-	int err;
-
-	p = &ci->i_xattrs.index.rb_node;
-	xattr = __get_xattr(ci, name);
-	err = __remove_xattr(ci, xattr);
-	return err;
-}
-
 static char *__copy_xattr_names(struct ceph_inode_info *ci,
 				char *dest)
 {
@@ -740,9 +729,6 @@
 	int req_mask;
 	int err;
 
-	if (!ceph_is_valid_xattr(name))
-		return -ENODATA;
-
 	/* let's see if a virtual xattr was requested */
 	vxattr = ceph_match_vxattr(inode, name);
 	if (vxattr) {
@@ -804,15 +790,6 @@
 	return err;
 }
 
-ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
-		      size_t size)
-{
-	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
-		return generic_getxattr(dentry, name, value, size);
-
-	return __ceph_getxattr(d_inode(dentry), name, value, size);
-}
-
 ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
 {
 	struct inode *inode = d_inode(dentry);
@@ -877,11 +854,10 @@
 	return err;
 }
 
-static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
+static int ceph_sync_setxattr(struct inode *inode, const char *name,
 			      const char *value, size_t size, int flags)
 {
-	struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
-	struct inode *inode = d_inode(dentry);
+	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
 	struct ceph_inode_info *ci = ceph_inode(inode);
 	struct ceph_mds_request *req;
 	struct ceph_mds_client *mdsc = fsc->mdsc;
@@ -939,13 +915,12 @@
 	return err;
 }
 
-int __ceph_setxattr(struct dentry *dentry, const char *name,
+int __ceph_setxattr(struct inode *inode, const char *name,
 			const void *value, size_t size, int flags)
 {
-	struct inode *inode = d_inode(dentry);
 	struct ceph_vxattr *vxattr;
 	struct ceph_inode_info *ci = ceph_inode(inode);
-	struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
+	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
 	struct ceph_cap_flush *prealloc_cf = NULL;
 	int issued;
 	int err;
@@ -958,8 +933,8 @@
 	int required_blob_size;
 	bool lock_snap_rwsem = false;
 
-	if (!ceph_is_valid_xattr(name))
-		return -EOPNOTSUPP;
+	if (ceph_snap(inode) != CEPH_NOSNAP)
+		return -EROFS;
 
 	vxattr = ceph_match_vxattr(inode, name);
 	if (vxattr && vxattr->readonly)
@@ -1056,7 +1031,7 @@
 				    "during filling trace\n", inode);
 		err = -EBUSY;
 	} else {
-		err = ceph_sync_setxattr(dentry, name, value, size, flags);
+		err = ceph_sync_setxattr(inode, name, value, size, flags);
 	}
 out:
 	ceph_free_cap_flush(prealloc_cf);
@@ -1066,147 +1041,30 @@
 	return err;
 }
 
-int ceph_setxattr(struct dentry *dentry, const char *name,
-		  const void *value, size_t size, int flags)
+static int ceph_get_xattr_handler(const struct xattr_handler *handler,
+				  struct dentry *dentry, struct inode *inode,
+				  const char *name, void *value, size_t size)
 {
-	if (ceph_snap(d_inode(dentry)) != CEPH_NOSNAP)
-		return -EROFS;
-
-	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
-		return generic_setxattr(dentry, name, value, size, flags);
-
-	if (size == 0)
-		value = "";  /* empty EA, do not remove */
-
-	return __ceph_setxattr(dentry, name, value, size, flags);
-}
-
-static int ceph_send_removexattr(struct dentry *dentry, const char *name)
-{
-	struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
-	struct ceph_mds_client *mdsc = fsc->mdsc;
-	struct inode *inode = d_inode(dentry);
-	struct ceph_mds_request *req;
-	int err;
-
-	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RMXATTR,
-				       USE_AUTH_MDS);
-	if (IS_ERR(req))
-		return PTR_ERR(req);
-	req->r_path2 = kstrdup(name, GFP_NOFS);
-	if (!req->r_path2)
-		return -ENOMEM;
-
-	req->r_inode = inode;
-	ihold(inode);
-	req->r_num_caps = 1;
-	req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
-	err = ceph_mdsc_do_request(mdsc, NULL, req);
-	ceph_mdsc_put_request(req);
-	return err;
-}
-
-int __ceph_removexattr(struct dentry *dentry, const char *name)
-{
-	struct inode *inode = d_inode(dentry);
-	struct ceph_vxattr *vxattr;
-	struct ceph_inode_info *ci = ceph_inode(inode);
-	struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
-	struct ceph_cap_flush *prealloc_cf = NULL;
-	int issued;
-	int err;
-	int required_blob_size;
-	int dirty;
-	bool lock_snap_rwsem = false;
-
 	if (!ceph_is_valid_xattr(name))
 		return -EOPNOTSUPP;
-
-	vxattr = ceph_match_vxattr(inode, name);
-	if (vxattr && vxattr->readonly)
-		return -EOPNOTSUPP;
-
-	/* pass any unhandled ceph.* xattrs through to the MDS */
-	if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
-		goto do_sync_unlocked;
-
-	prealloc_cf = ceph_alloc_cap_flush();
-	if (!prealloc_cf)
-		return -ENOMEM;
-
-	err = -ENOMEM;
-	spin_lock(&ci->i_ceph_lock);
-retry:
-	issued = __ceph_caps_issued(ci, NULL);
-	if (ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))
-		goto do_sync;
-
-	if (!lock_snap_rwsem && !ci->i_head_snapc) {
-		lock_snap_rwsem = true;
-		if (!down_read_trylock(&mdsc->snap_rwsem)) {
-			spin_unlock(&ci->i_ceph_lock);
-			down_read(&mdsc->snap_rwsem);
-			spin_lock(&ci->i_ceph_lock);
-			goto retry;
-		}
-	}
-
-	dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
-
-	__build_xattrs(inode);
-
-	required_blob_size = __get_required_blob_size(ci, 0, 0);
-
-	if (!ci->i_xattrs.prealloc_blob ||
-	    required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
-		struct ceph_buffer *blob;
-
-		spin_unlock(&ci->i_ceph_lock);
-		dout(" preaallocating new blob size=%d\n", required_blob_size);
-		blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
-		if (!blob)
-			goto do_sync_unlocked;
-		spin_lock(&ci->i_ceph_lock);
-		if (ci->i_xattrs.prealloc_blob)
-			ceph_buffer_put(ci->i_xattrs.prealloc_blob);
-		ci->i_xattrs.prealloc_blob = blob;
-		goto retry;
-	}
-
-	err = __remove_xattr_by_name(ceph_inode(inode), name);
-
-	dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL,
-				       &prealloc_cf);
-	ci->i_xattrs.dirty = true;
-	inode->i_ctime = current_fs_time(inode->i_sb);
-	spin_unlock(&ci->i_ceph_lock);
-	if (lock_snap_rwsem)
-		up_read(&mdsc->snap_rwsem);
-	if (dirty)
-		__mark_inode_dirty(inode, dirty);
-	ceph_free_cap_flush(prealloc_cf);
-	return err;
-do_sync:
-	spin_unlock(&ci->i_ceph_lock);
-do_sync_unlocked:
-	if (lock_snap_rwsem)
-		up_read(&mdsc->snap_rwsem);
-	ceph_free_cap_flush(prealloc_cf);
-	err = ceph_send_removexattr(dentry, name);
-	return err;
+	return __ceph_getxattr(inode, name, value, size);
 }
 
-int ceph_removexattr(struct dentry *dentry, const char *name)
+static int ceph_set_xattr_handler(const struct xattr_handler *handler,
+				  struct dentry *dentry, const char *name,
+				  const void *value, size_t size, int flags)
 {
-	if (ceph_snap(d_inode(dentry)) != CEPH_NOSNAP)
-		return -EROFS;
-
-	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
-		return generic_removexattr(dentry, name);
-
-	return __ceph_removexattr(dentry, name);
+	if (!ceph_is_valid_xattr(name))
+		return -EOPNOTSUPP;
+	return __ceph_setxattr(d_inode(dentry), name, value, size, flags);
 }
 
+const struct xattr_handler ceph_other_xattr_handler = {
+	.prefix = "",  /* match any name => handlers called with full name */
+	.get = ceph_get_xattr_handler,
+	.set = ceph_set_xattr_handler,
+};
+
 #ifdef CONFIG_SECURITY
 bool ceph_security_xattr_wanted(struct inode *in)
 {
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
index 1964d21..eed7eb0 100644
--- a/fs/cifs/Makefile
+++ b/fs/cifs/Makefile
@@ -5,9 +5,10 @@
 
 cifs-y := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o \
 	  link.o misc.o netmisc.o smbencrypt.o transport.o asn1.o \
-	  cifs_unicode.o nterr.o xattr.o cifsencrypt.o \
+	  cifs_unicode.o nterr.o cifsencrypt.o \
 	  readdir.o ioctl.o sess.o export.o smb1ops.o winucase.o
 
+cifs-$(CONFIG_CIFS_XATTR) += xattr.o
 cifs-$(CONFIG_CIFS_ACL) += cifsacl.o
 
 cifs-$(CONFIG_CIFS_UPCALL) += cifs_spnego.o
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index e956cba..ec9dbbc 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -151,8 +151,12 @@
 	if (sb_mountdata == NULL)
 		return ERR_PTR(-EINVAL);
 
-	if (strlen(fullpath) - ref->path_consumed)
+	if (strlen(fullpath) - ref->path_consumed) {
 		prepath = fullpath + ref->path_consumed;
+		/* skip initial delimiter */
+		if (*prepath == '/' || *prepath == '\\')
+			prepath++;
+	}
 
 	*devname = cifs_build_devname(ref->node_name, prepath);
 	if (IS_ERR(*devname)) {
@@ -302,7 +306,7 @@
 	if (full_path == NULL)
 		goto cdda_exit;
 
-	cifs_sb = CIFS_SB(d_inode(mntpt)->i_sb);
+	cifs_sb = CIFS_SB(mntpt->d_sb);
 	tlink = cifs_sb_tlink(cifs_sb);
 	if (IS_ERR(tlink)) {
 		mnt = ERR_CAST(tlink);
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 3f9312591..71e8a56 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -360,7 +360,7 @@
 				GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred,
 				(KEY_POS_ALL & ~KEY_POS_SETATTR) |
 				KEY_USR_VIEW | KEY_USR_READ,
-				KEY_ALLOC_NOT_IN_QUOTA, NULL);
+				KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL);
 	if (IS_ERR(keyring)) {
 		ret = PTR_ERR(keyring);
 		goto failed_put_cred;
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 4897dac..6aeb8d4 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -66,6 +66,60 @@
 	return 0;
 }
 
+int __cifs_calc_signature(struct smb_rqst *rqst,
+			struct TCP_Server_Info *server, char *signature,
+			struct shash_desc *shash)
+{
+	int i;
+	int rc;
+	struct kvec *iov = rqst->rq_iov;
+	int n_vec = rqst->rq_nvec;
+
+	for (i = 0; i < n_vec; i++) {
+		if (iov[i].iov_len == 0)
+			continue;
+		if (iov[i].iov_base == NULL) {
+			cifs_dbg(VFS, "null iovec entry\n");
+			return -EIO;
+		}
+		/* The first entry includes a length field (which does not get
+		   signed that occupies the first 4 bytes before the header */
+		if (i == 0) {
+			if (iov[0].iov_len <= 8) /* cmd field at offset 9 */
+				break; /* nothing to sign or corrupt header */
+			rc = crypto_shash_update(shash,
+				iov[i].iov_base + 4, iov[i].iov_len - 4);
+		} else {
+			rc = crypto_shash_update(shash,
+				iov[i].iov_base, iov[i].iov_len);
+		}
+		if (rc) {
+			cifs_dbg(VFS, "%s: Could not update with payload\n",
+				 __func__);
+			return rc;
+		}
+	}
+
+	/* now hash over the rq_pages array */
+	for (i = 0; i < rqst->rq_npages; i++) {
+		void *kaddr = kmap(rqst->rq_pages[i]);
+		size_t len = rqst->rq_pagesz;
+
+		if (i == rqst->rq_npages - 1)
+			len = rqst->rq_tailsz;
+
+		crypto_shash_update(shash, kaddr, len);
+
+		kunmap(rqst->rq_pages[i]);
+	}
+
+	rc = crypto_shash_final(shash, signature);
+	if (rc)
+		cifs_dbg(VFS, "%s: Could not generate hash\n", __func__);
+
+	return rc;
+}
+
 /*
  * Calculate and return the CIFS signature based on the mac key and SMB PDU.
  * The 16 byte signature must be allocated by the caller. Note we only use the
@@ -76,12 +130,9 @@
 static int cifs_calc_signature(struct smb_rqst *rqst,
 			struct TCP_Server_Info *server, char *signature)
 {
-	int i;
 	int rc;
-	struct kvec *iov = rqst->rq_iov;
-	int n_vec = rqst->rq_nvec;
 
-	if (iov == NULL || signature == NULL || server == NULL)
+	if (!rqst->rq_iov || !signature || !server)
 		return -EINVAL;
 
 	if (!server->secmech.sdescmd5) {
@@ -105,48 +156,8 @@
 		return rc;
 	}
 
-	for (i = 0; i < n_vec; i++) {
-		if (iov[i].iov_len == 0)
-			continue;
-		if (iov[i].iov_base == NULL) {
-			cifs_dbg(VFS, "null iovec entry\n");
-			return -EIO;
-		}
-		/* The first entry includes a length field (which does not get
-		   signed that occupies the first 4 bytes before the header */
-		if (i == 0) {
-			if (iov[0].iov_len <= 8) /* cmd field at offset 9 */
-				break; /* nothing to sign or corrupt header */
-			rc =
-			crypto_shash_update(&server->secmech.sdescmd5->shash,
-				iov[i].iov_base + 4, iov[i].iov_len - 4);
-		} else {
-			rc =
-			crypto_shash_update(&server->secmech.sdescmd5->shash,
-				iov[i].iov_base, iov[i].iov_len);
-		}
-		if (rc) {
-			cifs_dbg(VFS, "%s: Could not update with payload\n",
-				 __func__);
-			return rc;
-		}
-	}
-
-	/* now hash over the rq_pages array */
-	for (i = 0; i < rqst->rq_npages; i++) {
-		struct kvec p_iov;
-
-		cifs_rqst_page_to_kvec(rqst, i, &p_iov);
-		crypto_shash_update(&server->secmech.sdescmd5->shash,
-					p_iov.iov_base, p_iov.iov_len);
-		kunmap(rqst->rq_pages[i]);
-	}
-
-	rc = crypto_shash_final(&server->secmech.sdescmd5->shash, signature);
-	if (rc)
-		cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
-
-	return rc;
+	return __cifs_calc_signature(rqst, server, signature,
+				     &server->secmech.sdescmd5->shash);
 }
 
 /* must be called with server->srv_mutex held */
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 1d86fc6..08fa36e 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -37,6 +37,7 @@
 #include <linux/freezer.h>
 #include <linux/namei.h>
 #include <linux/random.h>
+#include <linux/xattr.h>
 #include <net/ipv6.h>
 #include "cifsfs.h"
 #include "cifspdu.h"
@@ -135,6 +136,7 @@
 
 	sb->s_magic = CIFS_MAGIC_NUMBER;
 	sb->s_op = &cifs_super_ops;
+	sb->s_xattr = cifs_xattr_handlers;
 	sb->s_bdi = &cifs_sb->bdi;
 	sb->s_blocksize = CIFS_MAX_MSGSIZE;
 	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
@@ -892,12 +894,10 @@
 	.setattr = cifs_setattr,
 	.symlink = cifs_symlink,
 	.mknod   = cifs_mknod,
-#ifdef CONFIG_CIFS_XATTR
-	.setxattr = cifs_setxattr,
-	.getxattr = cifs_getxattr,
+	.setxattr = generic_setxattr,
+	.getxattr = generic_getxattr,
 	.listxattr = cifs_listxattr,
-	.removexattr = cifs_removexattr,
-#endif
+	.removexattr = generic_removexattr,
 };
 
 const struct inode_operations cifs_file_inode_ops = {
@@ -905,12 +905,10 @@
 	.setattr = cifs_setattr,
 	.getattr = cifs_getattr, /* do we need this anymore? */
 	.permission = cifs_permission,
-#ifdef CONFIG_CIFS_XATTR
-	.setxattr = cifs_setxattr,
-	.getxattr = cifs_getxattr,
+	.setxattr = generic_setxattr,
+	.getxattr = generic_getxattr,
 	.listxattr = cifs_listxattr,
-	.removexattr = cifs_removexattr,
-#endif
+	.removexattr = generic_removexattr,
 };
 
 const struct inode_operations cifs_symlink_inode_ops = {
@@ -920,12 +918,10 @@
 	/* BB add the following two eventually */
 	/* revalidate: cifs_revalidate,
 	   setattr:    cifs_notify_change, *//* BB do we need notify change */
-#ifdef CONFIG_CIFS_XATTR
-	.setxattr = cifs_setxattr,
-	.getxattr = cifs_getxattr,
+	.setxattr = generic_setxattr,
+	.getxattr = generic_getxattr,
 	.listxattr = cifs_listxattr,
-	.removexattr = cifs_removexattr,
-#endif
+	.removexattr = generic_removexattr,
 };
 
 static int cifs_clone_file_range(struct file *src_file, loff_t off,
@@ -962,7 +958,7 @@
 	cifs_dbg(FYI, "about to flush pages\n");
 	/* should we flush first and last page first */
 	truncate_inode_pages_range(&target_inode->i_data, destoff,
-				   PAGE_CACHE_ALIGN(destoff + len)-1);
+				   PAGE_ALIGN(destoff + len)-1);
 
 	if (target_tcon->ses->server->ops->duplicate_extents)
 		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
@@ -1083,7 +1079,7 @@
 };
 
 const struct file_operations cifs_dir_ops = {
-	.iterate = cifs_readdir,
+	.iterate_shared = cifs_readdir,
 	.release = cifs_closedir,
 	.read    = generic_read_dir,
 	.unlocked_ioctl  = cifs_ioctl,
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 83aac8b..9dcf974 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -120,15 +120,19 @@
 			struct delayed_call *);
 extern int cifs_symlink(struct inode *inode, struct dentry *direntry,
 			const char *symname);
-extern int	cifs_removexattr(struct dentry *, const char *);
-extern int	cifs_setxattr(struct dentry *, const char *, const void *,
-			size_t, int);
-extern ssize_t	cifs_getxattr(struct dentry *, const char *, void *, size_t);
+
+#ifdef CONFIG_CIFS_XATTR
+extern const struct xattr_handler *cifs_xattr_handlers[];
 extern ssize_t	cifs_listxattr(struct dentry *, char *, size_t);
+#else
+# define cifs_xattr_handlers NULL
+# define cifs_listxattr NULL
+#endif
+
 extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
 #ifdef CONFIG_CIFS_NFSD_EXPORT
 extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
-#define CIFS_VERSION   "2.08"
+#define CIFS_VERSION   "2.09"
 #endif				/* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index d21da9f..bba106c 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -615,8 +615,6 @@
 	bool	sec_mskerberos;		/* supports legacy MS Kerberos */
 	bool	large_buf;		/* is current buffer large? */
 	struct delayed_work	echo; /* echo ping workqueue job */
-	struct kvec *iov;	/* reusable kvec array for receives */
-	unsigned int nr_iov;	/* number of kvecs in array */
 	char	*smallbuf;	/* pointer to current "small" buffer */
 	char	*bigbuf;	/* pointer to current "big" buffer */
 	unsigned int total_read; /* total amount of data read in this pass */
@@ -714,7 +712,7 @@
  *
  * Note that this might make for "interesting" allocation problems during
  * writeback however as we have to allocate an array of pointers for the
- * pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096.
+ * pages. A 16M write means ~32kb page array with PAGE_SIZE == 4096.
  *
  * For reads, there is a similar problem as we need to allocate an array
  * of kvecs to handle the receive, though that should only need to be done
@@ -733,7 +731,7 @@
 
 /*
  * The default wsize is 1M. find_get_pages seems to return a maximum of 256
- * pages in a single call. With PAGE_CACHE_SIZE == 4k, this means we can fill
+ * pages in a single call. With PAGE_SIZE == 4k, this means we can fill
  * a single wsize request with a single call.
  */
 #define CIFS_DEFAULT_IOSIZE (1024 * 1024)
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index eed7ff5..0f9a6bc 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -37,8 +37,6 @@
 extern struct smb_hdr *cifs_small_buf_get(void);
 extern void cifs_small_buf_release(void *);
 extern void free_rsp_buf(int, void *);
-extern void cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx,
-					struct kvec *iov);
 extern int smb_send(struct TCP_Server_Info *, struct smb_hdr *,
 			unsigned int /* length */);
 extern unsigned int _get_xid(void);
@@ -181,10 +179,9 @@
 
 extern void dequeue_mid(struct mid_q_entry *mid, bool malformed);
 extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
-		     unsigned int to_read);
-extern int cifs_readv_from_socket(struct TCP_Server_Info *server,
-		struct kvec *iov_orig, unsigned int nr_segs,
-		unsigned int to_read);
+			         unsigned int to_read);
+extern int cifs_read_page_from_socket(struct TCP_Server_Info *server,
+				      struct page *page, unsigned int to_read);
 extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
 			       struct cifs_sb_info *cifs_sb);
 extern int cifs_match_super(struct super_block *, void *);
@@ -512,4 +509,7 @@
 			   struct cifs_sb_info *cifs_sb,
 			   const unsigned char *path, char *pbuf,
 			   unsigned int *pbytes_written);
+int __cifs_calc_signature(struct smb_rqst *rqst,
+			struct TCP_Server_Info *server, char *signature,
+			struct shash_desc *shash);
 #endif			/* _CIFSPROTO_H */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 76fcb50..d47197e 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1447,10 +1447,8 @@
 	len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
 							HEADER_SIZE(server) + 1;
 
-	rdata->iov.iov_base = buf + HEADER_SIZE(server) - 1;
-	rdata->iov.iov_len = len;
-
-	length = cifs_readv_from_socket(server, &rdata->iov, 1, len);
+	length = cifs_read_from_socket(server,
+				       buf + HEADER_SIZE(server) - 1, len);
 	if (length < 0)
 		return length;
 	server->total_read += length;
@@ -1502,9 +1500,8 @@
 	len = data_offset - server->total_read;
 	if (len > 0) {
 		/* read any junk before data into the rest of smallbuf */
-		rdata->iov.iov_base = buf + server->total_read;
-		rdata->iov.iov_len = len;
-		length = cifs_readv_from_socket(server, &rdata->iov, 1, len);
+		length = cifs_read_from_socket(server,
+					       buf + server->total_read, len);
 		if (length < 0)
 			return length;
 		server->total_read += length;
@@ -1929,17 +1926,17 @@
 
 		wsize = server->ops->wp_retry_size(inode);
 		if (wsize < rest_len) {
-			nr_pages = wsize / PAGE_CACHE_SIZE;
+			nr_pages = wsize / PAGE_SIZE;
 			if (!nr_pages) {
 				rc = -ENOTSUPP;
 				break;
 			}
-			cur_len = nr_pages * PAGE_CACHE_SIZE;
-			tailsz = PAGE_CACHE_SIZE;
+			cur_len = nr_pages * PAGE_SIZE;
+			tailsz = PAGE_SIZE;
 		} else {
-			nr_pages = DIV_ROUND_UP(rest_len, PAGE_CACHE_SIZE);
+			nr_pages = DIV_ROUND_UP(rest_len, PAGE_SIZE);
 			cur_len = rest_len;
-			tailsz = rest_len - (nr_pages - 1) * PAGE_CACHE_SIZE;
+			tailsz = rest_len - (nr_pages - 1) * PAGE_SIZE;
 		}
 
 		wdata2 = cifs_writedata_alloc(nr_pages, cifs_writev_complete);
@@ -1957,7 +1954,7 @@
 		wdata2->sync_mode = wdata->sync_mode;
 		wdata2->nr_pages = nr_pages;
 		wdata2->offset = page_offset(wdata2->pages[0]);
-		wdata2->pagesz = PAGE_CACHE_SIZE;
+		wdata2->pagesz = PAGE_SIZE;
 		wdata2->tailsz = tailsz;
 		wdata2->bytes = cur_len;
 
@@ -1975,7 +1972,7 @@
 			if (rc != 0 && rc != -EAGAIN) {
 				SetPageError(wdata2->pages[j]);
 				end_page_writeback(wdata2->pages[j]);
-				page_cache_release(wdata2->pages[j]);
+				put_page(wdata2->pages[j]);
 			}
 		}
 
@@ -2018,7 +2015,7 @@
 		else if (wdata->result < 0)
 			SetPageError(page);
 		end_page_writeback(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 	if (wdata->result != -EAGAIN)
 		mapping_set_error(inode->i_mapping, wdata->result);
@@ -3366,7 +3363,7 @@
 	if (le16_to_cpu(cifs_acl->version) != CIFS_ACL_VERSION)
 		return -EOPNOTSUPP;
 
-	if (acl_type & ACL_TYPE_ACCESS) {
+	if (acl_type == ACL_TYPE_ACCESS) {
 		count = le16_to_cpu(cifs_acl->access_entry_count);
 		pACE = &cifs_acl->ace_array[0];
 		size = sizeof(struct cifs_posix_acl);
@@ -3377,7 +3374,7 @@
 				 size_of_data_area, size);
 			return -EINVAL;
 		}
-	} else if (acl_type & ACL_TYPE_DEFAULT) {
+	} else if (acl_type == ACL_TYPE_DEFAULT) {
 		count = le16_to_cpu(cifs_acl->access_entry_count);
 		size = sizeof(struct cifs_posix_acl);
 		size += sizeof(struct cifs_posix_ace) * count;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index a763cd3..66736f5 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -501,99 +501,34 @@
 	return false;
 }
 
-/*
- * kvec_array_init - clone a kvec array, and advance into it
- * @new:	pointer to memory for cloned array
- * @iov:	pointer to original array
- * @nr_segs:	number of members in original array
- * @bytes:	number of bytes to advance into the cloned array
- *
- * This function will copy the array provided in iov to a section of memory
- * and advance the specified number of bytes into the new array. It returns
- * the number of segments in the new array. "new" must be at least as big as
- * the original iov array.
- */
-static unsigned int
-kvec_array_init(struct kvec *new, struct kvec *iov, unsigned int nr_segs,
-		size_t bytes)
-{
-	size_t base = 0;
-
-	while (bytes || !iov->iov_len) {
-		int copy = min(bytes, iov->iov_len);
-
-		bytes -= copy;
-		base += copy;
-		if (iov->iov_len == base) {
-			iov++;
-			nr_segs--;
-			base = 0;
-		}
-	}
-	memcpy(new, iov, sizeof(*iov) * nr_segs);
-	new->iov_base += base;
-	new->iov_len -= base;
-	return nr_segs;
-}
-
-static struct kvec *
-get_server_iovec(struct TCP_Server_Info *server, unsigned int nr_segs)
-{
-	struct kvec *new_iov;
-
-	if (server->iov && nr_segs <= server->nr_iov)
-		return server->iov;
-
-	/* not big enough -- allocate a new one and release the old */
-	new_iov = kmalloc(sizeof(*new_iov) * nr_segs, GFP_NOFS);
-	if (new_iov) {
-		kfree(server->iov);
-		server->iov = new_iov;
-		server->nr_iov = nr_segs;
-	}
-	return new_iov;
-}
-
-int
-cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig,
-		       unsigned int nr_segs, unsigned int to_read)
+static int
+cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
 {
 	int length = 0;
 	int total_read;
-	unsigned int segs;
-	struct msghdr smb_msg;
-	struct kvec *iov;
 
-	iov = get_server_iovec(server, nr_segs);
-	if (!iov)
-		return -ENOMEM;
+	smb_msg->msg_control = NULL;
+	smb_msg->msg_controllen = 0;
 
-	smb_msg.msg_control = NULL;
-	smb_msg.msg_controllen = 0;
-
-	for (total_read = 0; to_read; total_read += length, to_read -= length) {
+	for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
 		try_to_freeze();
 
-		if (server_unresponsive(server)) {
-			total_read = -ECONNABORTED;
-			break;
+		if (server_unresponsive(server))
+			return -ECONNABORTED;
+
+		length = sock_recvmsg(server->ssocket, smb_msg, 0);
+
+		if (server->tcpStatus == CifsExiting)
+			return -ESHUTDOWN;
+
+		if (server->tcpStatus == CifsNeedReconnect) {
+			cifs_reconnect(server);
+			return -ECONNABORTED;
 		}
 
-		segs = kvec_array_init(iov, iov_orig, nr_segs, total_read);
-
-		length = kernel_recvmsg(server->ssocket, &smb_msg,
-					iov, segs, to_read, 0);
-
-		if (server->tcpStatus == CifsExiting) {
-			total_read = -ESHUTDOWN;
-			break;
-		} else if (server->tcpStatus == CifsNeedReconnect) {
-			cifs_reconnect(server);
-			total_read = -ECONNABORTED;
-			break;
-		} else if (length == -ERESTARTSYS ||
-			   length == -EAGAIN ||
-			   length == -EINTR) {
+		if (length == -ERESTARTSYS ||
+		    length == -EAGAIN ||
+		    length == -EINTR) {
 			/*
 			 * Minimum sleep to prevent looping, allowing socket
 			 * to clear and app threads to set tcpStatus
@@ -602,12 +537,12 @@
 			usleep_range(1000, 2000);
 			length = 0;
 			continue;
-		} else if (length <= 0) {
-			cifs_dbg(FYI, "Received no data or error: expecting %d\n"
-				 "got %d", to_read, length);
+		}
+
+		if (length <= 0) {
+			cifs_dbg(FYI, "Received no data or error: %d\n", length);
 			cifs_reconnect(server);
-			total_read = -ECONNABORTED;
-			break;
+			return -ECONNABORTED;
 		}
 	}
 	return total_read;
@@ -617,12 +552,21 @@
 cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
 		      unsigned int to_read)
 {
-	struct kvec iov;
+	struct msghdr smb_msg;
+	struct kvec iov = {.iov_base = buf, .iov_len = to_read};
+	iov_iter_kvec(&smb_msg.msg_iter, READ | ITER_KVEC, &iov, 1, to_read);
 
-	iov.iov_base = buf;
-	iov.iov_len = to_read;
+	return cifs_readv_from_socket(server, &smb_msg);
+}
 
-	return cifs_readv_from_socket(server, &iov, 1, to_read);
+int
+cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page,
+		      unsigned int to_read)
+{
+	struct msghdr smb_msg;
+	struct bio_vec bv = {.bv_page = page, .bv_len = to_read};
+	iov_iter_bvec(&smb_msg.msg_iter, READ | ITER_BVEC, &bv, 1, to_read);
+	return cifs_readv_from_socket(server, &smb_msg);
 }
 
 static bool
@@ -783,7 +727,6 @@
 	}
 
 	kfree(server->hostname);
-	kfree(server->iov);
 	kfree(server);
 
 	length = atomic_dec_return(&tcpSesAllocCount);
@@ -1196,8 +1139,12 @@
 
 	convert_delimiter(vol->UNC, '\\');
 
-	/* If pos is NULL, or is a bogus trailing delimiter then no prepath */
-	if (!*pos++ || !*pos)
+	/* skip any delimiter */
+	if (*pos == '/' || *pos == '\\')
+		pos++;
+
+	/* If pos is NULL then no prepath */
+	if (!*pos)
 		return 0;
 
 	vol->prepath = kstrdup(pos, GFP_KERNEL);
@@ -2918,7 +2865,7 @@
 cifs_reclassify_socket4(struct socket *sock)
 {
 	struct sock *sk = sock->sk;
-	BUG_ON(sock_owned_by_user(sk));
+	BUG_ON(!sock_allow_reclassification(sk));
 	sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS",
 		&cifs_slock_key[0], "sk_lock-AF_INET-CIFS", &cifs_key[0]);
 }
@@ -2927,7 +2874,7 @@
 cifs_reclassify_socket6(struct socket *sock)
 {
 	struct sock *sk = sock->sk;
-	BUG_ON(sock_owned_by_user(sk));
+	BUG_ON(!sock_allow_reclassification(sk));
 	sock_lock_init_class_and_name(sk, "slock-AF_INET6-CIFS",
 		&cifs_slock_key[1], "sk_lock-AF_INET6-CIFS", &cifs_key[1]);
 }
@@ -3630,7 +3577,7 @@
 	cifs_sb->rsize = server->ops->negotiate_rsize(tcon, volume_info);
 
 	/* tune readahead according to rsize */
-	cifs_sb->bdi.ra_pages = cifs_sb->rsize / PAGE_CACHE_SIZE;
+	cifs_sb->bdi.ra_pages = cifs_sb->rsize / PAGE_SIZE;
 
 remote_path_check:
 #ifdef CONFIG_CIFS_DFS_UPCALL
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index ff882ae..9793ae0 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -271,7 +271,7 @@
 cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
 		  struct tcon_link *tlink, __u32 oplock)
 {
-	struct dentry *dentry = file->f_path.dentry;
+	struct dentry *dentry = file_dentry(file);
 	struct inode *inode = d_inode(dentry);
 	struct cifsInodeInfo *cinode = CIFS_I(inode);
 	struct cifsFileInfo *cfile;
@@ -461,7 +461,7 @@
 	tcon = tlink_tcon(tlink);
 	server = tcon->ses->server;
 
-	full_path = build_path_from_dentry(file->f_path.dentry);
+	full_path = build_path_from_dentry(file_dentry(file));
 	if (full_path == NULL) {
 		rc = -ENOMEM;
 		goto out;
@@ -1833,7 +1833,7 @@
 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
 {
 	struct address_space *mapping = page->mapping;
-	loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
+	loff_t offset = (loff_t)page->index << PAGE_SHIFT;
 	char *write_data;
 	int rc = -EFAULT;
 	int bytes_written = 0;
@@ -1849,7 +1849,7 @@
 	write_data = kmap(page);
 	write_data += from;
 
-	if ((to > PAGE_CACHE_SIZE) || (from > to)) {
+	if ((to > PAGE_SIZE) || (from > to)) {
 		kunmap(page);
 		return -EIO;
 	}
@@ -1902,7 +1902,7 @@
 	 * find_get_pages_tag seems to return a max of 256 on each
 	 * iteration, so we must call it several times in order to
 	 * fill the array or the wsize is effectively limited to
-	 * 256 * PAGE_CACHE_SIZE.
+	 * 256 * PAGE_SIZE.
 	 */
 	*found_pages = 0;
 	pages = wdata->pages;
@@ -1991,7 +1991,7 @@
 
 	/* put any pages we aren't going to use */
 	for (i = nr_pages; i < found_pages; i++) {
-		page_cache_release(wdata->pages[i]);
+		put_page(wdata->pages[i]);
 		wdata->pages[i] = NULL;
 	}
 
@@ -2009,11 +2009,11 @@
 	wdata->sync_mode = wbc->sync_mode;
 	wdata->nr_pages = nr_pages;
 	wdata->offset = page_offset(wdata->pages[0]);
-	wdata->pagesz = PAGE_CACHE_SIZE;
+	wdata->pagesz = PAGE_SIZE;
 	wdata->tailsz = min(i_size_read(mapping->host) -
 			page_offset(wdata->pages[nr_pages - 1]),
-			(loff_t)PAGE_CACHE_SIZE);
-	wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) + wdata->tailsz;
+			(loff_t)PAGE_SIZE);
+	wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
 
 	if (wdata->cfile != NULL)
 		cifsFileInfo_put(wdata->cfile);
@@ -2047,15 +2047,15 @@
 	 * If wsize is smaller than the page cache size, default to writing
 	 * one page at a time via cifs_writepage
 	 */
-	if (cifs_sb->wsize < PAGE_CACHE_SIZE)
+	if (cifs_sb->wsize < PAGE_SIZE)
 		return generic_writepages(mapping, wbc);
 
 	if (wbc->range_cyclic) {
 		index = mapping->writeback_index; /* Start from prev offset */
 		end = -1;
 	} else {
-		index = wbc->range_start >> PAGE_CACHE_SHIFT;
-		end = wbc->range_end >> PAGE_CACHE_SHIFT;
+		index = wbc->range_start >> PAGE_SHIFT;
+		end = wbc->range_end >> PAGE_SHIFT;
 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
 			range_whole = true;
 		scanned = true;
@@ -2071,7 +2071,7 @@
 		if (rc)
 			break;
 
-		tofind = min((wsize / PAGE_CACHE_SIZE) - 1, end - index) + 1;
+		tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
 
 		wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
 						  &found_pages);
@@ -2111,7 +2111,7 @@
 				else
 					SetPageError(wdata->pages[i]);
 				end_page_writeback(wdata->pages[i]);
-				page_cache_release(wdata->pages[i]);
+				put_page(wdata->pages[i]);
 			}
 			if (rc != -EAGAIN)
 				mapping_set_error(mapping, rc);
@@ -2154,7 +2154,7 @@
 
 	xid = get_xid();
 /* BB add check for wbc flags */
-	page_cache_get(page);
+	get_page(page);
 	if (!PageUptodate(page))
 		cifs_dbg(FYI, "ppw - page not up to date\n");
 
@@ -2170,7 +2170,7 @@
 	 */
 	set_page_writeback(page);
 retry_write:
-	rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
+	rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
 	if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
 		goto retry_write;
 	else if (rc == -EAGAIN)
@@ -2180,7 +2180,7 @@
 	else
 		SetPageUptodate(page);
 	end_page_writeback(page);
-	page_cache_release(page);
+	put_page(page);
 	free_xid(xid);
 	return rc;
 }
@@ -2214,12 +2214,12 @@
 		if (copied == len)
 			SetPageUptodate(page);
 		ClearPageChecked(page);
-	} else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
+	} else if (!PageUptodate(page) && copied == PAGE_SIZE)
 		SetPageUptodate(page);
 
 	if (!PageUptodate(page)) {
 		char *page_data;
-		unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
+		unsigned offset = pos & (PAGE_SIZE - 1);
 		unsigned int xid;
 
 		xid = get_xid();
@@ -2248,7 +2248,7 @@
 	}
 
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 
 	return rc;
 }
@@ -2687,11 +2687,8 @@
 out:
 	inode_unlock(inode);
 
-	if (rc > 0) {
-		ssize_t err = generic_write_sync(file, iocb->ki_pos - rc, rc);
-		if (err < 0)
-			rc = err;
-	}
+	if (rc > 0)
+		rc = generic_write_sync(iocb, rc);
 	up_read(&cinode->lock_sem);
 	return rc;
 }
@@ -2855,39 +2852,31 @@
 	int result = 0;
 	unsigned int i;
 	unsigned int nr_pages = rdata->nr_pages;
-	struct kvec iov;
 
 	rdata->got_bytes = 0;
 	rdata->tailsz = PAGE_SIZE;
 	for (i = 0; i < nr_pages; i++) {
 		struct page *page = rdata->pages[i];
+		size_t n;
 
-		if (len >= PAGE_SIZE) {
-			/* enough data to fill the page */
-			iov.iov_base = kmap(page);
-			iov.iov_len = PAGE_SIZE;
-			cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n",
-				 i, iov.iov_base, iov.iov_len);
-			len -= PAGE_SIZE;
-		} else if (len > 0) {
-			/* enough for partial page, fill and zero the rest */
-			iov.iov_base = kmap(page);
-			iov.iov_len = len;
-			cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n",
-				 i, iov.iov_base, iov.iov_len);
-			memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
-			rdata->tailsz = len;
-			len = 0;
-		} else {
+		if (len <= 0) {
 			/* no need to hold page hostage */
 			rdata->pages[i] = NULL;
 			rdata->nr_pages--;
 			put_page(page);
 			continue;
 		}
-
-		result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
-		kunmap(page);
+		n = len;
+		if (len >= PAGE_SIZE) {
+			/* enough data to fill the page */
+			n = PAGE_SIZE;
+			len -= n;
+		} else {
+			zero_user(page, len, PAGE_SIZE - len);
+			rdata->tailsz = len;
+			len = 0;
+		}
+		result = cifs_read_page_from_socket(server, page, n);
 		if (result < 0)
 			break;
 
@@ -3286,9 +3275,9 @@
 		    (rdata->result == -EAGAIN && got_bytes))
 			cifs_readpage_to_fscache(rdata->mapping->host, page);
 
-		got_bytes -= min_t(unsigned int, PAGE_CACHE_SIZE, got_bytes);
+		got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
 
-		page_cache_release(page);
+		put_page(page);
 		rdata->pages[i] = NULL;
 	}
 	kref_put(&rdata->refcount, cifs_readdata_release);
@@ -3303,34 +3292,24 @@
 	u64 eof;
 	pgoff_t eof_index;
 	unsigned int nr_pages = rdata->nr_pages;
-	struct kvec iov;
 
 	/* determine the eof that the server (probably) has */
 	eof = CIFS_I(rdata->mapping->host)->server_eof;
-	eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
+	eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
 	cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
 
 	rdata->got_bytes = 0;
-	rdata->tailsz = PAGE_CACHE_SIZE;
+	rdata->tailsz = PAGE_SIZE;
 	for (i = 0; i < nr_pages; i++) {
 		struct page *page = rdata->pages[i];
+		size_t n = PAGE_SIZE;
 
-		if (len >= PAGE_CACHE_SIZE) {
-			/* enough data to fill the page */
-			iov.iov_base = kmap(page);
-			iov.iov_len = PAGE_CACHE_SIZE;
-			cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
-				 i, page->index, iov.iov_base, iov.iov_len);
-			len -= PAGE_CACHE_SIZE;
+		if (len >= PAGE_SIZE) {
+			len -= PAGE_SIZE;
 		} else if (len > 0) {
 			/* enough for partial page, fill and zero the rest */
-			iov.iov_base = kmap(page);
-			iov.iov_len = len;
-			cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
-				 i, page->index, iov.iov_base, iov.iov_len);
-			memset(iov.iov_base + len,
-				'\0', PAGE_CACHE_SIZE - len);
-			rdata->tailsz = len;
+			zero_user(page, len, PAGE_SIZE - len);
+			n = rdata->tailsz = len;
 			len = 0;
 		} else if (page->index > eof_index) {
 			/*
@@ -3341,12 +3320,12 @@
 			 * to prevent the VFS from repeatedly attempting to
 			 * fill them until the writes are flushed.
 			 */
-			zero_user(page, 0, PAGE_CACHE_SIZE);
+			zero_user(page, 0, PAGE_SIZE);
 			lru_cache_add_file(page);
 			flush_dcache_page(page);
 			SetPageUptodate(page);
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 			rdata->pages[i] = NULL;
 			rdata->nr_pages--;
 			continue;
@@ -3354,14 +3333,13 @@
 			/* no need to hold page hostage */
 			lru_cache_add_file(page);
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 			rdata->pages[i] = NULL;
 			rdata->nr_pages--;
 			continue;
 		}
 
-		result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
-		kunmap(page);
+		result = cifs_read_page_from_socket(server, page, n);
 		if (result < 0)
 			break;
 
@@ -3402,8 +3380,8 @@
 	}
 
 	/* move first page to the tmplist */
-	*offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
-	*bytes = PAGE_CACHE_SIZE;
+	*offset = (loff_t)page->index << PAGE_SHIFT;
+	*bytes = PAGE_SIZE;
 	*nr_pages = 1;
 	list_move_tail(&page->lru, tmplist);
 
@@ -3415,7 +3393,7 @@
 			break;
 
 		/* would this page push the read over the rsize? */
-		if (*bytes + PAGE_CACHE_SIZE > rsize)
+		if (*bytes + PAGE_SIZE > rsize)
 			break;
 
 		__SetPageLocked(page);
@@ -3424,7 +3402,7 @@
 			break;
 		}
 		list_move_tail(&page->lru, tmplist);
-		(*bytes) += PAGE_CACHE_SIZE;
+		(*bytes) += PAGE_SIZE;
 		expected_index++;
 		(*nr_pages)++;
 	}
@@ -3493,7 +3471,7 @@
 		 * reach this point however since we set ra_pages to 0 when the
 		 * rsize is smaller than a cache page.
 		 */
-		if (unlikely(rsize < PAGE_CACHE_SIZE)) {
+		if (unlikely(rsize < PAGE_SIZE)) {
 			add_credits_and_wake_if(server, credits, 0);
 			return 0;
 		}
@@ -3512,7 +3490,7 @@
 				list_del(&page->lru);
 				lru_cache_add_file(page);
 				unlock_page(page);
-				page_cache_release(page);
+				put_page(page);
 			}
 			rc = -ENOMEM;
 			add_credits_and_wake_if(server, credits, 0);
@@ -3524,7 +3502,7 @@
 		rdata->offset = offset;
 		rdata->bytes = bytes;
 		rdata->pid = pid;
-		rdata->pagesz = PAGE_CACHE_SIZE;
+		rdata->pagesz = PAGE_SIZE;
 		rdata->read_into_pages = cifs_readpages_read_into_pages;
 		rdata->credits = credits;
 
@@ -3542,7 +3520,7 @@
 				page = rdata->pages[i];
 				lru_cache_add_file(page);
 				unlock_page(page);
-				page_cache_release(page);
+				put_page(page);
 			}
 			/* Fallback to the readpage in error/reconnect cases */
 			kref_put(&rdata->refcount, cifs_readdata_release);
@@ -3577,7 +3555,7 @@
 	read_data = kmap(page);
 	/* for reads over a certain size could initiate async read ahead */
 
-	rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
+	rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
 
 	if (rc < 0)
 		goto io_error;
@@ -3587,8 +3565,8 @@
 	file_inode(file)->i_atime =
 		current_fs_time(file_inode(file)->i_sb);
 
-	if (PAGE_CACHE_SIZE > rc)
-		memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
+	if (PAGE_SIZE > rc)
+		memset(read_data + rc, 0, PAGE_SIZE - rc);
 
 	flush_dcache_page(page);
 	SetPageUptodate(page);
@@ -3608,7 +3586,7 @@
 
 static int cifs_readpage(struct file *file, struct page *page)
 {
-	loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
+	loff_t offset = (loff_t)page->index << PAGE_SHIFT;
 	int rc = -EACCES;
 	unsigned int xid;
 
@@ -3679,8 +3657,8 @@
 			struct page **pagep, void **fsdata)
 {
 	int oncethru = 0;
-	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
-	loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
+	pgoff_t index = pos >> PAGE_SHIFT;
+	loff_t offset = pos & (PAGE_SIZE - 1);
 	loff_t page_start = pos & PAGE_MASK;
 	loff_t i_size;
 	struct page *page;
@@ -3703,7 +3681,7 @@
 	 * the server. If the write is short, we'll end up doing a sync write
 	 * instead.
 	 */
-	if (len == PAGE_CACHE_SIZE)
+	if (len == PAGE_SIZE)
 		goto out;
 
 	/*
@@ -3718,7 +3696,7 @@
 		    (offset == 0 && (pos + len) >= i_size)) {
 			zero_user_segments(page, 0, offset,
 					   offset + len,
-					   PAGE_CACHE_SIZE);
+					   PAGE_SIZE);
 			/*
 			 * PageChecked means that the parts of the page
 			 * to which we're not writing are considered up
@@ -3737,7 +3715,7 @@
 		 * do a sync write instead since PG_uptodate isn't set.
 		 */
 		cifs_readpage_worker(file, page, &page_start);
-		page_cache_release(page);
+		put_page(page);
 		oncethru = 1;
 		goto start;
 	} else {
@@ -3764,7 +3742,7 @@
 {
 	struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
 
-	if (offset == 0 && length == PAGE_CACHE_SIZE)
+	if (offset == 0 && length == PAGE_SIZE)
 		cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
 }
 
@@ -3772,7 +3750,7 @@
 {
 	int rc = 0;
 	loff_t range_start = page_offset(page);
-	loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
+	loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
 	struct writeback_control wbc = {
 		.sync_mode = WB_SYNC_ALL,
 		.nr_to_write = 0,
@@ -3854,7 +3832,7 @@
  * Direct IO is not yet supported in the cached mode. 
  */
 static ssize_t
-cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
+cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
 {
         /*
          * FIXME
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index aeb26db..514dadb 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -59,7 +59,7 @@
 
 		/* check if server can support readpages */
 		if (cifs_sb_master_tcon(cifs_sb)->ses->server->maxBuf <
-				PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
+				PAGE_SIZE + MAX_CIFS_HDR_SIZE)
 			inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
 		else
 			inode->i_data.a_ops = &cifs_addr_ops;
@@ -2019,8 +2019,8 @@
 
 static int cifs_truncate_page(struct address_space *mapping, loff_t from)
 {
-	pgoff_t index = from >> PAGE_CACHE_SHIFT;
-	unsigned offset = from & (PAGE_CACHE_SIZE - 1);
+	pgoff_t index = from >> PAGE_SHIFT;
+	unsigned offset = from & (PAGE_SIZE - 1);
 	struct page *page;
 	int rc = 0;
 
@@ -2028,9 +2028,9 @@
 	if (!page)
 		return -ENOMEM;
 
-	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+	zero_user_segment(page, offset, PAGE_SIZE);
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 	return rc;
 }
 
@@ -2418,8 +2418,7 @@
 int
 cifs_setattr(struct dentry *direntry, struct iattr *attrs)
 {
-	struct inode *inode = d_inode(direntry);
-	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+	struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
 	struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb);
 
 	if (pTcon->unix_ext)
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index b30a4a6..65cf85d 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -78,20 +78,34 @@
 {
 	struct dentry *dentry, *alias;
 	struct inode *inode;
-	struct super_block *sb = d_inode(parent)->i_sb;
+	struct super_block *sb = parent->d_sb;
 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
 
 	cifs_dbg(FYI, "%s: for %s\n", __func__, name->name);
 
 	dentry = d_hash_and_lookup(parent, name);
+	if (!dentry) {
+		/*
+		 * If we know that the inode will need to be revalidated
+		 * immediately, then don't create a new dentry for it.
+		 * We'll end up doing an on the wire call either way and
+		 * this spares us an invalidation.
+		 */
+		if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
+			return;
+retry:
+		dentry = d_alloc_parallel(parent, name, &wq);
+	}
 	if (IS_ERR(dentry))
 		return;
-
-	if (dentry) {
+	if (!d_in_lookup(dentry)) {
 		inode = d_inode(dentry);
 		if (inode) {
-			if (d_mountpoint(dentry))
-				goto out;
+			if (d_mountpoint(dentry)) {
+				dput(dentry);
+				return;
+			}
 			/*
 			 * If we're generating inode numbers, then we don't
 			 * want to clobber the existing one with the one that
@@ -106,33 +120,22 @@
 			    (inode->i_mode & S_IFMT) ==
 			    (fattr->cf_mode & S_IFMT)) {
 				cifs_fattr_to_inode(inode, fattr);
-				goto out;
+				dput(dentry);
+				return;
 			}
 		}
 		d_invalidate(dentry);
 		dput(dentry);
+		goto retry;
+	} else {
+		inode = cifs_iget(sb, fattr);
+		if (!inode)
+			inode = ERR_PTR(-ENOMEM);
+		alias = d_splice_alias(inode, dentry);
+		d_lookup_done(dentry);
+		if (alias && !IS_ERR(alias))
+			dput(alias);
 	}
-
-	/*
-	 * If we know that the inode will need to be revalidated immediately,
-	 * then don't create a new dentry for it. We'll end up doing an on
-	 * the wire call either way and this spares us an invalidation.
-	 */
-	if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
-		return;
-
-	dentry = d_alloc(parent, name);
-	if (!dentry)
-		return;
-
-	inode = cifs_iget(sb, fattr);
-	if (!inode)
-		goto out;
-
-	alias = d_splice_alias(inode, dentry);
-	if (alias && !IS_ERR(alias))
-		dput(alias);
-out:
 	dput(dentry);
 }
 
@@ -300,7 +303,7 @@
 	cifsFile->invalidHandle = true;
 	cifsFile->srch_inf.endOfSearch = false;
 
-	full_path = build_path_from_dentry(file->f_path.dentry);
+	full_path = build_path_from_dentry(file_dentry(file));
 	if (full_path == NULL) {
 		rc = -ENOMEM;
 		goto error_exit;
@@ -759,7 +762,7 @@
 		 */
 		fattr.cf_flags |= CIFS_FATTR_NEED_REVAL;
 
-	cifs_prime_dcache(file->f_path.dentry, &name, &fattr);
+	cifs_prime_dcache(file_dentry(file), &name, &fattr);
 
 	ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid);
 	return !dir_emit(ctx, name.name, name.len, ino, fattr.cf_dtype);
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 59727e3..af0ec2d 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -400,19 +400,27 @@
 	sec_blob->LmChallengeResponse.MaximumLength = 0;
 
 	sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
-	rc = setup_ntlmv2_rsp(ses, nls_cp);
-	if (rc) {
-		cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
-		goto setup_ntlmv2_ret;
-	}
-	memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
-			ses->auth_key.len - CIFS_SESS_KEY_SIZE);
-	tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
+	if (ses->user_name != NULL) {
+		rc = setup_ntlmv2_rsp(ses, nls_cp);
+		if (rc) {
+			cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
+			goto setup_ntlmv2_ret;
+		}
+		memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+				ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+		tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
 
-	sec_blob->NtChallengeResponse.Length =
-			cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
-	sec_blob->NtChallengeResponse.MaximumLength =
-			cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+		sec_blob->NtChallengeResponse.Length =
+				cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+		sec_blob->NtChallengeResponse.MaximumLength =
+				cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+	} else {
+		/*
+		 * don't send an NT Response for anonymous access
+		 */
+		sec_blob->NtChallengeResponse.Length = 0;
+		sec_blob->NtChallengeResponse.MaximumLength = 0;
+	}
 
 	if (ses->domainName == NULL) {
 		sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
@@ -670,20 +678,24 @@
 
 	pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE;
 
-	/* no capabilities flags in old lanman negotiation */
-	pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
+	if (ses->user_name != NULL) {
+		/* no capabilities flags in old lanman negotiation */
+		pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
 
-	/* Calculate hash with password and copy into bcc_ptr.
-	 * Encryption Key (stored as in cryptkey) gets used if the
-	 * security mode bit in Negottiate Protocol response states
-	 * to use challenge/response method (i.e. Password bit is 1).
-	 */
-	rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
-			      ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
-			      true : false, lnm_session_key);
+		/* Calculate hash with password and copy into bcc_ptr.
+		 * Encryption Key (stored as in cryptkey) gets used if the
+		 * security mode bit in Negottiate Protocol response states
+		 * to use challenge/response method (i.e. Password bit is 1).
+		 */
+		rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
+				      ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
+				      true : false, lnm_session_key);
 
-	memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
-	bcc_ptr += CIFS_AUTH_RESP_SIZE;
+		memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
+		bcc_ptr += CIFS_AUTH_RESP_SIZE;
+	} else {
+		pSMB->old_req.PasswordLength = 0;
+	}
 
 	/*
 	 * can not sign if LANMAN negotiated so no need
@@ -769,27 +781,32 @@
 	capabilities = cifs_ssetup_hdr(ses, pSMB);
 
 	pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
-	pSMB->req_no_secext.CaseInsensitivePasswordLength =
-			cpu_to_le16(CIFS_AUTH_RESP_SIZE);
-	pSMB->req_no_secext.CaseSensitivePasswordLength =
-			cpu_to_le16(CIFS_AUTH_RESP_SIZE);
+	if (ses->user_name != NULL) {
+		pSMB->req_no_secext.CaseInsensitivePasswordLength =
+				cpu_to_le16(CIFS_AUTH_RESP_SIZE);
+		pSMB->req_no_secext.CaseSensitivePasswordLength =
+				cpu_to_le16(CIFS_AUTH_RESP_SIZE);
 
-	/* calculate ntlm response and session key */
-	rc = setup_ntlm_response(ses, sess_data->nls_cp);
-	if (rc) {
-		cifs_dbg(VFS, "Error %d during NTLM authentication\n",
-				 rc);
-		goto out;
+		/* calculate ntlm response and session key */
+		rc = setup_ntlm_response(ses, sess_data->nls_cp);
+		if (rc) {
+			cifs_dbg(VFS, "Error %d during NTLM authentication\n",
+					 rc);
+			goto out;
+		}
+
+		/* copy ntlm response */
+		memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+				CIFS_AUTH_RESP_SIZE);
+		bcc_ptr += CIFS_AUTH_RESP_SIZE;
+		memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+				CIFS_AUTH_RESP_SIZE);
+		bcc_ptr += CIFS_AUTH_RESP_SIZE;
+	} else {
+		pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
+		pSMB->req_no_secext.CaseSensitivePasswordLength = 0;
 	}
 
-	/* copy ntlm response */
-	memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
-			CIFS_AUTH_RESP_SIZE);
-	bcc_ptr += CIFS_AUTH_RESP_SIZE;
-	memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
-			CIFS_AUTH_RESP_SIZE);
-	bcc_ptr += CIFS_AUTH_RESP_SIZE;
-
 	if (ses->capabilities & CAP_UNICODE) {
 		/* unicode strings must be word aligned */
 		if (sess_data->iov[0].iov_len % 2) {
@@ -878,23 +895,27 @@
 	/* LM2 password would be here if we supported it */
 	pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
 
-	/* calculate nlmv2 response and session key */
-	rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp);
-	if (rc) {
-		cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc);
-		goto out;
+	if (ses->user_name != NULL) {
+		/* calculate nlmv2 response and session key */
+		rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp);
+		if (rc) {
+			cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc);
+			goto out;
+		}
+
+		memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+				ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+		bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
+
+		/* set case sensitive password length after tilen may get
+		 * assigned, tilen is 0 otherwise.
+		 */
+		pSMB->req_no_secext.CaseSensitivePasswordLength =
+			cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+	} else {
+		pSMB->req_no_secext.CaseSensitivePasswordLength = 0;
 	}
 
-	memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
-			ses->auth_key.len - CIFS_SESS_KEY_SIZE);
-	bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
-
-	/* set case sensitive password length after tilen may get
-	 * assigned, tilen is 0 otherwise.
-	 */
-	pSMB->req_no_secext.CaseSensitivePasswordLength =
-		cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
-
 	if (ses->capabilities & CAP_UNICODE) {
 		if (sess_data->iov[0].iov_len % 2) {
 			*bcc_ptr = 0;
diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
index bc0bb9c..0ffa180 100644
--- a/fs/cifs/smb2glob.h
+++ b/fs/cifs/smb2glob.h
@@ -44,6 +44,7 @@
 #define SMB2_OP_DELETE 7
 #define SMB2_OP_HARDLINK 8
 #define SMB2_OP_SET_EOF 9
+#define SMB2_OP_RMDIR 10
 
 /* Used when constructing chained read requests. */
 #define CHAINED_REQUEST 1
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 899bbc8..4f0231e 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -80,6 +80,10 @@
 		 * SMB2_open() call.
 		 */
 		break;
+	case SMB2_OP_RMDIR:
+		tmprc = SMB2_rmdir(xid, tcon, fid.persistent_fid,
+				   fid.volatile_fid);
+		break;
 	case SMB2_OP_RENAME:
 		tmprc = SMB2_rename(xid, tcon, fid.persistent_fid,
 				    fid.volatile_fid, (__le16 *)data);
@@ -191,8 +195,8 @@
 	   struct cifs_sb_info *cifs_sb)
 {
 	return smb2_open_op_close(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
-				  CREATE_NOT_FILE | CREATE_DELETE_ON_CLOSE,
-				  NULL, SMB2_OP_DELETE);
+				  CREATE_NOT_FILE,
+				  NULL, SMB2_OP_RMDIR);
 }
 
 int
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 42e1f44..8f38e33 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -2575,6 +2575,22 @@
 }
 
 int
+SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
+		  u64 persistent_fid, u64 volatile_fid)
+{
+	__u8 delete_pending = 1;
+	void *data;
+	unsigned int size;
+
+	data = &delete_pending;
+	size = 1; /* sizeof __u8 */
+
+	return send_set_info(xid, tcon, persistent_fid, volatile_fid,
+			current->tgid, FILE_DISPOSITION_INFORMATION, 1, &data,
+			&size);
+}
+
+int
 SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
 		  u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
 {
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 4f07dc9..eb2cde2 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -141,6 +141,8 @@
 extern int SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
 		       u64 persistent_fid, u64 volatile_fid,
 		       __le16 *target_file);
+extern int SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
+		      u64 persistent_fid, u64 volatile_fid);
 extern int SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
 			     u64 persistent_fid, u64 volatile_fid,
 			     __le16 *target_file);
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 8732a43..bc9a7b6 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -135,11 +135,10 @@
 int
 smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
 {
-	int i, rc;
+	int rc;
 	unsigned char smb2_signature[SMB2_HMACSHA256_SIZE];
 	unsigned char *sigptr = smb2_signature;
 	struct kvec *iov = rqst->rq_iov;
-	int n_vec = rqst->rq_nvec;
 	struct smb2_hdr *smb2_pdu = (struct smb2_hdr *)iov[0].iov_base;
 	struct cifs_ses *ses;
 
@@ -171,53 +170,11 @@
 		return rc;
 	}
 
-	for (i = 0; i < n_vec; i++) {
-		if (iov[i].iov_len == 0)
-			continue;
-		if (iov[i].iov_base == NULL) {
-			cifs_dbg(VFS, "null iovec entry\n");
-			return -EIO;
-		}
-		/*
-		 * The first entry includes a length field (which does not get
-		 * signed that occupies the first 4 bytes before the header).
-		 */
-		if (i == 0) {
-			if (iov[0].iov_len <= 8) /* cmd field at offset 9 */
-				break; /* nothing to sign or corrupt header */
-			rc =
-			crypto_shash_update(
-				&server->secmech.sdeschmacsha256->shash,
-				iov[i].iov_base + 4, iov[i].iov_len - 4);
-		} else {
-			rc =
-			crypto_shash_update(
-				&server->secmech.sdeschmacsha256->shash,
-				iov[i].iov_base, iov[i].iov_len);
-		}
-		if (rc) {
-			cifs_dbg(VFS, "%s: Could not update with payload\n",
-				 __func__);
-			return rc;
-		}
-	}
+	rc = __cifs_calc_signature(rqst, server, sigptr,
+		&server->secmech.sdeschmacsha256->shash);
 
-	/* now hash over the rq_pages array */
-	for (i = 0; i < rqst->rq_npages; i++) {
-		struct kvec p_iov;
-
-		cifs_rqst_page_to_kvec(rqst, i, &p_iov);
-		crypto_shash_update(&server->secmech.sdeschmacsha256->shash,
-					p_iov.iov_base, p_iov.iov_len);
-		kunmap(rqst->rq_pages[i]);
-	}
-
-	rc = crypto_shash_final(&server->secmech.sdeschmacsha256->shash,
-				sigptr);
-	if (rc)
-		cifs_dbg(VFS, "%s: Could not generate sha256 hash\n", __func__);
-
-	memcpy(smb2_pdu->Signature, sigptr, SMB2_SIGNATURE_SIZE);
+	if (!rc)
+		memcpy(smb2_pdu->Signature, sigptr, SMB2_SIGNATURE_SIZE);
 
 	return rc;
 }
@@ -395,12 +352,10 @@
 int
 smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
 {
-	int i;
 	int rc = 0;
 	unsigned char smb3_signature[SMB2_CMACAES_SIZE];
 	unsigned char *sigptr = smb3_signature;
 	struct kvec *iov = rqst->rq_iov;
-	int n_vec = rqst->rq_nvec;
 	struct smb2_hdr *smb2_pdu = (struct smb2_hdr *)iov[0].iov_base;
 	struct cifs_ses *ses;
 
@@ -431,54 +386,12 @@
 		cifs_dbg(VFS, "%s: Could not init cmac aes\n", __func__);
 		return rc;
 	}
+	
+	rc = __cifs_calc_signature(rqst, server, sigptr,
+				   &server->secmech.sdesccmacaes->shash);
 
-	for (i = 0; i < n_vec; i++) {
-		if (iov[i].iov_len == 0)
-			continue;
-		if (iov[i].iov_base == NULL) {
-			cifs_dbg(VFS, "null iovec entry");
-			return -EIO;
-		}
-		/*
-		 * The first entry includes a length field (which does not get
-		 * signed that occupies the first 4 bytes before the header).
-		 */
-		if (i == 0) {
-			if (iov[0].iov_len <= 8) /* cmd field at offset 9 */
-				break; /* nothing to sign or corrupt header */
-			rc =
-			crypto_shash_update(
-				&server->secmech.sdesccmacaes->shash,
-				iov[i].iov_base + 4, iov[i].iov_len - 4);
-		} else {
-			rc =
-			crypto_shash_update(
-				&server->secmech.sdesccmacaes->shash,
-				iov[i].iov_base, iov[i].iov_len);
-		}
-		if (rc) {
-			cifs_dbg(VFS, "%s: Couldn't update cmac aes with payload\n",
-							__func__);
-			return rc;
-		}
-	}
-
-	/* now hash over the rq_pages array */
-	for (i = 0; i < rqst->rq_npages; i++) {
-		struct kvec p_iov;
-
-		cifs_rqst_page_to_kvec(rqst, i, &p_iov);
-		crypto_shash_update(&server->secmech.sdesccmacaes->shash,
-					p_iov.iov_base, p_iov.iov_len);
-		kunmap(rqst->rq_pages[i]);
-	}
-
-	rc = crypto_shash_final(&server->secmech.sdesccmacaes->shash,
-						sigptr);
-	if (rc)
-		cifs_dbg(VFS, "%s: Could not generate cmac aes\n", __func__);
-
-	memcpy(smb2_pdu->Signature, sigptr, SMB2_SIGNATURE_SIZE);
+	if (!rc)
+		memcpy(smb2_pdu->Signature, sigptr, SMB2_SIGNATURE_SIZE);
 
 	return rc;
 }
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 87abe8e..206a597 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -124,41 +124,32 @@
 /*
  * smb_send_kvec - send an array of kvecs to the server
  * @server:	Server to send the data to
- * @iov:	Pointer to array of kvecs
- * @n_vec:	length of kvec array
+ * @smb_msg:	Message to send
  * @sent:	amount of data sent on socket is stored here
  *
  * Our basic "send data to server" function. Should be called with srv_mutex
  * held. The caller is responsible for handling the results.
  */
 static int
-smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec,
-		size_t *sent)
+smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
+	      size_t *sent)
 {
 	int rc = 0;
-	int i = 0;
-	struct msghdr smb_msg;
-	unsigned int remaining;
-	size_t first_vec = 0;
+	int retries = 0;
 	struct socket *ssocket = server->ssocket;
 
 	*sent = 0;
 
-	smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
-	smb_msg.msg_namelen = sizeof(struct sockaddr);
-	smb_msg.msg_control = NULL;
-	smb_msg.msg_controllen = 0;
+	smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
+	smb_msg->msg_namelen = sizeof(struct sockaddr);
+	smb_msg->msg_control = NULL;
+	smb_msg->msg_controllen = 0;
 	if (server->noblocksnd)
-		smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
+		smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
 	else
-		smb_msg.msg_flags = MSG_NOSIGNAL;
+		smb_msg->msg_flags = MSG_NOSIGNAL;
 
-	remaining = 0;
-	for (i = 0; i < n_vec; i++)
-		remaining += iov[i].iov_len;
-
-	i = 0;
-	while (remaining) {
+	while (msg_data_left(smb_msg)) {
 		/*
 		 * If blocking send, we try 3 times, since each can block
 		 * for 5 seconds. For nonblocking  we have to try more
@@ -177,35 +168,21 @@
 		 * after the retries we will kill the socket and
 		 * reconnect which may clear the network problem.
 		 */
-		rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
-				    n_vec - first_vec, remaining);
+		rc = sock_sendmsg(ssocket, smb_msg);
 		if (rc == -EAGAIN) {
-			i++;
-			if (i >= 14 || (!server->noblocksnd && (i > 2))) {
+			retries++;
+			if (retries >= 14 ||
+			    (!server->noblocksnd && (retries > 2))) {
 				cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
 					 ssocket);
-				rc = -EAGAIN;
-				break;
+				return -EAGAIN;
 			}
-			msleep(1 << i);
+			msleep(1 << retries);
 			continue;
 		}
 
 		if (rc < 0)
-			break;
-
-		/* send was at least partially successful */
-		*sent += rc;
-
-		if (rc == remaining) {
-			remaining = 0;
-			break;
-		}
-
-		if (rc > remaining) {
-			cifs_dbg(VFS, "sent %d requested %d\n", rc, remaining);
-			break;
-		}
+			return rc;
 
 		if (rc == 0) {
 			/* should never happen, letting socket clear before
@@ -215,59 +192,11 @@
 			continue;
 		}
 
-		remaining -= rc;
-
-		/* the line below resets i */
-		for (i = first_vec; i < n_vec; i++) {
-			if (iov[i].iov_len) {
-				if (rc > iov[i].iov_len) {
-					rc -= iov[i].iov_len;
-					iov[i].iov_len = 0;
-				} else {
-					iov[i].iov_base += rc;
-					iov[i].iov_len -= rc;
-					first_vec = i;
-					break;
-				}
-			}
-		}
-
-		i = 0; /* in case we get ENOSPC on the next send */
-		rc = 0;
+		/* send was at least partially successful */
+		*sent += rc;
+		retries = 0; /* in case we get ENOSPC on the next send */
 	}
-	return rc;
-}
-
-/**
- * rqst_page_to_kvec - Turn a slot in the smb_rqst page array into a kvec
- * @rqst: pointer to smb_rqst
- * @idx: index into the array of the page
- * @iov: pointer to struct kvec that will hold the result
- *
- * Helper function to convert a slot in the rqst->rq_pages array into a kvec.
- * The page will be kmapped and the address placed into iov_base. The length
- * will then be adjusted according to the ptailoff.
- */
-void
-cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx,
-			struct kvec *iov)
-{
-	/*
-	 * FIXME: We could avoid this kmap altogether if we used
-	 * kernel_sendpage instead of kernel_sendmsg. That will only
-	 * work if signing is disabled though as sendpage inlines the
-	 * page directly into the fraglist. If userspace modifies the
-	 * page after we calculate the signature, then the server will
-	 * reject it and may break the connection. kernel_sendmsg does
-	 * an extra copy of the data and avoids that issue.
-	 */
-	iov->iov_base = kmap(rqst->rq_pages[idx]);
-
-	/* if last page, don't send beyond this offset into page */
-	if (idx == (rqst->rq_npages - 1))
-		iov->iov_len = rqst->rq_tailsz;
-	else
-		iov->iov_len = rqst->rq_pagesz;
+	return 0;
 }
 
 static unsigned long
@@ -299,8 +228,9 @@
 	unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
 	unsigned long send_length;
 	unsigned int i;
-	size_t total_len = 0, sent;
+	size_t total_len = 0, sent, size;
 	struct socket *ssocket = server->ssocket;
+	struct msghdr smb_msg;
 	int val = 1;
 
 	if (ssocket == NULL)
@@ -321,7 +251,13 @@
 	kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
 				(char *)&val, sizeof(val));
 
-	rc = smb_send_kvec(server, iov, n_vec, &sent);
+	size = 0;
+	for (i = 0; i < n_vec; i++)
+		size += iov[i].iov_len;
+
+	iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, iov, n_vec, size);
+
+	rc = smb_send_kvec(server, &smb_msg, &sent);
 	if (rc < 0)
 		goto uncork;
 
@@ -329,11 +265,16 @@
 
 	/* now walk the page array and send each page in it */
 	for (i = 0; i < rqst->rq_npages; i++) {
-		struct kvec p_iov;
-
-		cifs_rqst_page_to_kvec(rqst, i, &p_iov);
-		rc = smb_send_kvec(server, &p_iov, 1, &sent);
-		kunmap(rqst->rq_pages[i]);
+		size_t len = i == rqst->rq_npages - 1
+				? rqst->rq_tailsz
+				: rqst->rq_pagesz;
+		struct bio_vec bvec = {
+			.bv_page = rqst->rq_pages[i],
+			.bv_len = len
+		};
+		iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
+			      &bvec, 1, len);
+		rc = smb_send_kvec(server, &smb_msg, &sent);
 		if (rc < 0)
 			break;
 
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index f5dc2f0..c8b77aa 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -32,92 +32,24 @@
 #include "cifs_unicode.h"
 
 #define MAX_EA_VALUE_SIZE 65535
-#define CIFS_XATTR_DOS_ATTRIB "user.DosAttrib"
 #define CIFS_XATTR_CIFS_ACL "system.cifs_acl"
 
 /* BB need to add server (Samba e.g) support for security and trusted prefix */
 
-int cifs_removexattr(struct dentry *direntry, const char *ea_name)
+enum { XATTR_USER, XATTR_CIFS_ACL, XATTR_ACL_ACCESS, XATTR_ACL_DEFAULT };
+
+static int cifs_xattr_set(const struct xattr_handler *handler,
+			  struct dentry *dentry, const char *name,
+			  const void *value, size_t size, int flags)
 {
 	int rc = -EOPNOTSUPP;
-#ifdef CONFIG_CIFS_XATTR
 	unsigned int xid;
-	struct cifs_sb_info *cifs_sb;
+	struct super_block *sb = dentry->d_sb;
+	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
 	struct tcon_link *tlink;
 	struct cifs_tcon *pTcon;
-	struct super_block *sb;
-	char *full_path = NULL;
-
-	if (direntry == NULL)
-		return -EIO;
-	if (d_really_is_negative(direntry))
-		return -EIO;
-	sb = d_inode(direntry)->i_sb;
-	if (sb == NULL)
-		return -EIO;
-
-	cifs_sb = CIFS_SB(sb);
-	tlink = cifs_sb_tlink(cifs_sb);
-	if (IS_ERR(tlink))
-		return PTR_ERR(tlink);
-	pTcon = tlink_tcon(tlink);
-
-	xid = get_xid();
-
-	full_path = build_path_from_dentry(direntry);
-	if (full_path == NULL) {
-		rc = -ENOMEM;
-		goto remove_ea_exit;
-	}
-	if (ea_name == NULL) {
-		cifs_dbg(FYI, "Null xattr names not supported\n");
-	} else if (strncmp(ea_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)
-		&& (strncmp(ea_name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN))) {
-		cifs_dbg(FYI,
-			 "illegal xattr request %s (only user namespace supported)\n",
-			 ea_name);
-		/* BB what if no namespace prefix? */
-		/* Should we just pass them to server, except for
-		system and perhaps security prefixes? */
-	} else {
-		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
-			goto remove_ea_exit;
-
-		ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */
-		if (pTcon->ses->server->ops->set_EA)
-			rc = pTcon->ses->server->ops->set_EA(xid, pTcon,
-				full_path, ea_name, NULL, (__u16)0,
-				cifs_sb->local_nls, cifs_remap(cifs_sb));
-	}
-remove_ea_exit:
-	kfree(full_path);
-	free_xid(xid);
-	cifs_put_tlink(tlink);
-#endif
-	return rc;
-}
-
-int cifs_setxattr(struct dentry *direntry, const char *ea_name,
-		  const void *ea_value, size_t value_size, int flags)
-{
-	int rc = -EOPNOTSUPP;
-#ifdef CONFIG_CIFS_XATTR
-	unsigned int xid;
-	struct cifs_sb_info *cifs_sb;
-	struct tcon_link *tlink;
-	struct cifs_tcon *pTcon;
-	struct super_block *sb;
 	char *full_path;
 
-	if (direntry == NULL)
-		return -EIO;
-	if (d_really_is_negative(direntry))
-		return -EIO;
-	sb = d_inode(direntry)->i_sb;
-	if (sb == NULL)
-		return -EIO;
-
-	cifs_sb = CIFS_SB(sb);
 	tlink = cifs_sb_tlink(cifs_sb);
 	if (IS_ERR(tlink))
 		return PTR_ERR(tlink);
@@ -125,10 +57,10 @@
 
 	xid = get_xid();
 
-	full_path = build_path_from_dentry(direntry);
+	full_path = build_path_from_dentry(dentry);
 	if (full_path == NULL) {
 		rc = -ENOMEM;
-		goto set_ea_exit;
+		goto out;
 	}
 	/* return dos attributes as pseudo xattr */
 	/* return alt name if available as pseudo attr */
@@ -136,123 +68,93 @@
 	/* if proc/fs/cifs/streamstoxattr is set then
 		search server for EAs or streams to
 		returns as xattrs */
-	if (value_size > MAX_EA_VALUE_SIZE) {
+	if (size > MAX_EA_VALUE_SIZE) {
 		cifs_dbg(FYI, "size of EA value too large\n");
 		rc = -EOPNOTSUPP;
-		goto set_ea_exit;
+		goto out;
 	}
 
-	if (ea_name == NULL) {
-		cifs_dbg(FYI, "Null xattr names not supported\n");
-	} else if (strncmp(ea_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)
-		   == 0) {
+	switch (handler->flags) {
+	case XATTR_USER:
 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
-			goto set_ea_exit;
-		if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0)
-			cifs_dbg(FYI, "attempt to set cifs inode metadata\n");
+			goto out;
 
-		ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */
 		if (pTcon->ses->server->ops->set_EA)
 			rc = pTcon->ses->server->ops->set_EA(xid, pTcon,
-				full_path, ea_name, ea_value, (__u16)value_size,
+				full_path, name, value, (__u16)size,
 				cifs_sb->local_nls, cifs_remap(cifs_sb));
-	} else if (strncmp(ea_name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN)
-		   == 0) {
-		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
-			goto set_ea_exit;
+		break;
 
-		ea_name += XATTR_OS2_PREFIX_LEN; /* skip past os2. prefix */
-		if (pTcon->ses->server->ops->set_EA)
-			rc = pTcon->ses->server->ops->set_EA(xid, pTcon,
-				full_path, ea_name, ea_value, (__u16)value_size,
-				cifs_sb->local_nls, cifs_remap(cifs_sb));
-	} else if (strncmp(ea_name, CIFS_XATTR_CIFS_ACL,
-			strlen(CIFS_XATTR_CIFS_ACL)) == 0) {
+	case XATTR_CIFS_ACL: {
 #ifdef CONFIG_CIFS_ACL
 		struct cifs_ntsd *pacl;
-		pacl = kmalloc(value_size, GFP_KERNEL);
+
+		if (!value)
+			goto out;
+		pacl = kmalloc(size, GFP_KERNEL);
 		if (!pacl) {
 			rc = -ENOMEM;
 		} else {
-			memcpy(pacl, ea_value, value_size);
-			if (pTcon->ses->server->ops->set_acl)
+			memcpy(pacl, value, size);
+			if (value &&
+			    pTcon->ses->server->ops->set_acl)
 				rc = pTcon->ses->server->ops->set_acl(pacl,
-						value_size, d_inode(direntry),
+						size, d_inode(dentry),
 						full_path, CIFS_ACL_DACL);
 			else
 				rc = -EOPNOTSUPP;
 			if (rc == 0) /* force revalidate of the inode */
-				CIFS_I(d_inode(direntry))->time = 0;
+				CIFS_I(d_inode(dentry))->time = 0;
 			kfree(pacl);
 		}
-#else
-		cifs_dbg(FYI, "Set CIFS ACL not supported yet\n");
 #endif /* CONFIG_CIFS_ACL */
-	} else {
-		int temp;
-		temp = strncmp(ea_name, XATTR_NAME_POSIX_ACL_ACCESS,
-			strlen(XATTR_NAME_POSIX_ACL_ACCESS));
-		if (temp == 0) {
-#ifdef CONFIG_CIFS_POSIX
-			if (sb->s_flags & MS_POSIXACL)
-				rc = CIFSSMBSetPosixACL(xid, pTcon, full_path,
-					ea_value, (const int)value_size,
-					ACL_TYPE_ACCESS, cifs_sb->local_nls,
-					cifs_remap(cifs_sb));
-			cifs_dbg(FYI, "set POSIX ACL rc %d\n", rc);
-#else
-			cifs_dbg(FYI, "set POSIX ACL not supported\n");
-#endif
-		} else if (strncmp(ea_name, XATTR_NAME_POSIX_ACL_DEFAULT,
-				   strlen(XATTR_NAME_POSIX_ACL_DEFAULT)) == 0) {
-#ifdef CONFIG_CIFS_POSIX
-			if (sb->s_flags & MS_POSIXACL)
-				rc = CIFSSMBSetPosixACL(xid, pTcon, full_path,
-					ea_value, (const int)value_size,
-					ACL_TYPE_DEFAULT, cifs_sb->local_nls,
-					cifs_remap(cifs_sb));
-			cifs_dbg(FYI, "set POSIX default ACL rc %d\n", rc);
-#else
-			cifs_dbg(FYI, "set default POSIX ACL not supported\n");
-#endif
-		} else {
-			cifs_dbg(FYI, "illegal xattr request %s (only user namespace supported)\n",
-				 ea_name);
-		  /* BB what if no namespace prefix? */
-		  /* Should we just pass them to server, except for
-		  system and perhaps security prefixes? */
-		}
+		break;
 	}
 
-set_ea_exit:
+	case XATTR_ACL_ACCESS:
+#ifdef CONFIG_CIFS_POSIX
+		if (!value)
+			goto out;
+		if (sb->s_flags & MS_POSIXACL)
+			rc = CIFSSMBSetPosixACL(xid, pTcon, full_path,
+				value, (const int)size,
+				ACL_TYPE_ACCESS, cifs_sb->local_nls,
+				cifs_remap(cifs_sb));
+#endif  /* CONFIG_CIFS_POSIX */
+		break;
+
+	case XATTR_ACL_DEFAULT:
+#ifdef CONFIG_CIFS_POSIX
+		if (!value)
+			goto out;
+		if (sb->s_flags & MS_POSIXACL)
+			rc = CIFSSMBSetPosixACL(xid, pTcon, full_path,
+				value, (const int)size,
+				ACL_TYPE_DEFAULT, cifs_sb->local_nls,
+				cifs_remap(cifs_sb));
+#endif  /* CONFIG_CIFS_POSIX */
+		break;
+	}
+
+out:
 	kfree(full_path);
 	free_xid(xid);
 	cifs_put_tlink(tlink);
-#endif
 	return rc;
 }
 
-ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
-	void *ea_value, size_t buf_size)
+static int cifs_xattr_get(const struct xattr_handler *handler,
+			  struct dentry *dentry, struct inode *inode,
+			  const char *name, void *value, size_t size)
 {
 	ssize_t rc = -EOPNOTSUPP;
-#ifdef CONFIG_CIFS_XATTR
 	unsigned int xid;
-	struct cifs_sb_info *cifs_sb;
+	struct super_block *sb = dentry->d_sb;
+	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
 	struct tcon_link *tlink;
 	struct cifs_tcon *pTcon;
-	struct super_block *sb;
 	char *full_path;
 
-	if (direntry == NULL)
-		return -EIO;
-	if (d_really_is_negative(direntry))
-		return -EIO;
-	sb = d_inode(direntry)->i_sb;
-	if (sb == NULL)
-		return -EIO;
-
-	cifs_sb = CIFS_SB(sb);
 	tlink = cifs_sb_tlink(cifs_sb);
 	if (IS_ERR(tlink))
 		return PTR_ERR(tlink);
@@ -260,98 +162,72 @@
 
 	xid = get_xid();
 
-	full_path = build_path_from_dentry(direntry);
+	full_path = build_path_from_dentry(dentry);
 	if (full_path == NULL) {
 		rc = -ENOMEM;
-		goto get_ea_exit;
+		goto out;
 	}
 	/* return dos attributes as pseudo xattr */
 	/* return alt name if available as pseudo attr */
-	if (ea_name == NULL) {
-		cifs_dbg(FYI, "Null xattr names not supported\n");
-	} else if (strncmp(ea_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)
-		   == 0) {
+	switch (handler->flags) {
+	case XATTR_USER:
 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
-			goto get_ea_exit;
+			goto out;
 
-		if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0) {
-			cifs_dbg(FYI, "attempt to query cifs inode metadata\n");
-			/* revalidate/getattr then populate from inode */
-		} /* BB add else when above is implemented */
-		ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */
 		if (pTcon->ses->server->ops->query_all_EAs)
 			rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon,
-				full_path, ea_name, ea_value, buf_size,
+				full_path, name, value, size,
 				cifs_sb->local_nls, cifs_remap(cifs_sb));
-	} else if (strncmp(ea_name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
-		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
-			goto get_ea_exit;
+		break;
 
-		ea_name += XATTR_OS2_PREFIX_LEN; /* skip past os2. prefix */
-		if (pTcon->ses->server->ops->query_all_EAs)
-			rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon,
-				full_path, ea_name, ea_value, buf_size,
-				cifs_sb->local_nls, cifs_remap(cifs_sb));
-	} else if (strncmp(ea_name, XATTR_NAME_POSIX_ACL_ACCESS,
-			  strlen(XATTR_NAME_POSIX_ACL_ACCESS)) == 0) {
-#ifdef CONFIG_CIFS_POSIX
-		if (sb->s_flags & MS_POSIXACL)
-			rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
-				ea_value, buf_size, ACL_TYPE_ACCESS,
-				cifs_sb->local_nls,
-				cifs_remap(cifs_sb));
-#else
-		cifs_dbg(FYI, "Query POSIX ACL not supported yet\n");
-#endif /* CONFIG_CIFS_POSIX */
-	} else if (strncmp(ea_name, XATTR_NAME_POSIX_ACL_DEFAULT,
-			  strlen(XATTR_NAME_POSIX_ACL_DEFAULT)) == 0) {
-#ifdef CONFIG_CIFS_POSIX
-		if (sb->s_flags & MS_POSIXACL)
-			rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
-				ea_value, buf_size, ACL_TYPE_DEFAULT,
-				cifs_sb->local_nls,
-				cifs_remap(cifs_sb));
-#else
-		cifs_dbg(FYI, "Query POSIX default ACL not supported yet\n");
-#endif /* CONFIG_CIFS_POSIX */
-	} else if (strncmp(ea_name, CIFS_XATTR_CIFS_ACL,
-				strlen(CIFS_XATTR_CIFS_ACL)) == 0) {
+	case XATTR_CIFS_ACL: {
 #ifdef CONFIG_CIFS_ACL
-			u32 acllen;
-			struct cifs_ntsd *pacl;
+		u32 acllen;
+		struct cifs_ntsd *pacl;
 
-			if (pTcon->ses->server->ops->get_acl == NULL)
-				goto get_ea_exit; /* rc already EOPNOTSUPP */
+		if (pTcon->ses->server->ops->get_acl == NULL)
+			goto out; /* rc already EOPNOTSUPP */
 
-			pacl = pTcon->ses->server->ops->get_acl(cifs_sb,
-					d_inode(direntry), full_path, &acllen);
-			if (IS_ERR(pacl)) {
-				rc = PTR_ERR(pacl);
-				cifs_dbg(VFS, "%s: error %zd getting sec desc\n",
-					 __func__, rc);
-			} else {
-				if (ea_value) {
-					if (acllen > buf_size)
-						acllen = -ERANGE;
-					else
-						memcpy(ea_value, pacl, acllen);
-				}
-				rc = acllen;
-				kfree(pacl);
+		pacl = pTcon->ses->server->ops->get_acl(cifs_sb,
+				inode, full_path, &acllen);
+		if (IS_ERR(pacl)) {
+			rc = PTR_ERR(pacl);
+			cifs_dbg(VFS, "%s: error %zd getting sec desc\n",
+				 __func__, rc);
+		} else {
+			if (value) {
+				if (acllen > size)
+					acllen = -ERANGE;
+				else
+					memcpy(value, pacl, acllen);
 			}
-#else
-			cifs_dbg(FYI, "Query CIFS ACL not supported yet\n");
-#endif /* CONFIG_CIFS_ACL */
-	} else if (strncmp(ea_name,
-		  XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) {
-		cifs_dbg(FYI, "Trusted xattr namespace not supported yet\n");
-	} else if (strncmp(ea_name,
-		  XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0) {
-		cifs_dbg(FYI, "Security xattr namespace not supported yet\n");
-	} else
-		cifs_dbg(FYI,
-			 "illegal xattr request %s (only user namespace supported)\n",
-			 ea_name);
+			rc = acllen;
+			kfree(pacl);
+		}
+#endif  /* CONFIG_CIFS_ACL */
+		break;
+	}
+
+	case XATTR_ACL_ACCESS:
+#ifdef CONFIG_CIFS_POSIX
+		if (sb->s_flags & MS_POSIXACL)
+			rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
+				value, size, ACL_TYPE_ACCESS,
+				cifs_sb->local_nls,
+				cifs_remap(cifs_sb));
+#endif  /* CONFIG_CIFS_POSIX */
+		break;
+
+	case XATTR_ACL_DEFAULT:
+#ifdef CONFIG_CIFS_POSIX
+		if (sb->s_flags & MS_POSIXACL)
+			rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
+				value, size, ACL_TYPE_DEFAULT,
+				cifs_sb->local_nls,
+				cifs_remap(cifs_sb));
+#endif  /* CONFIG_CIFS_POSIX */
+		break;
+	}
 
 	/* We could add an additional check for streams ie
 	    if proc/fs/cifs/streamstoxattr is set then
@@ -361,34 +237,22 @@
 	if (rc == -EINVAL)
 		rc = -EOPNOTSUPP;
 
-get_ea_exit:
+out:
 	kfree(full_path);
 	free_xid(xid);
 	cifs_put_tlink(tlink);
-#endif
 	return rc;
 }
 
 ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
 {
 	ssize_t rc = -EOPNOTSUPP;
-#ifdef CONFIG_CIFS_XATTR
 	unsigned int xid;
-	struct cifs_sb_info *cifs_sb;
+	struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
 	struct tcon_link *tlink;
 	struct cifs_tcon *pTcon;
-	struct super_block *sb;
 	char *full_path;
 
-	if (direntry == NULL)
-		return -EIO;
-	if (d_really_is_negative(direntry))
-		return -EIO;
-	sb = d_inode(direntry)->i_sb;
-	if (sb == NULL)
-		return -EIO;
-
-	cifs_sb = CIFS_SB(sb);
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
 		return -EOPNOTSUPP;
 
@@ -419,6 +283,50 @@
 	kfree(full_path);
 	free_xid(xid);
 	cifs_put_tlink(tlink);
-#endif
 	return rc;
 }
+
+static const struct xattr_handler cifs_user_xattr_handler = {
+	.prefix = XATTR_USER_PREFIX,
+	.flags = XATTR_USER,
+	.get = cifs_xattr_get,
+	.set = cifs_xattr_set,
+};
+
+/* os2.* attributes are treated like user.* attributes */
+static const struct xattr_handler cifs_os2_xattr_handler = {
+	.prefix = XATTR_OS2_PREFIX,
+	.flags = XATTR_USER,
+	.get = cifs_xattr_get,
+	.set = cifs_xattr_set,
+};
+
+static const struct xattr_handler cifs_cifs_acl_xattr_handler = {
+	.name = CIFS_XATTR_CIFS_ACL,
+	.flags = XATTR_CIFS_ACL,
+	.get = cifs_xattr_get,
+	.set = cifs_xattr_set,
+};
+
+static const struct xattr_handler cifs_posix_acl_access_xattr_handler = {
+	.name = XATTR_NAME_POSIX_ACL_ACCESS,
+	.flags = XATTR_ACL_ACCESS,
+	.get = cifs_xattr_get,
+	.set = cifs_xattr_set,
+};
+
+static const struct xattr_handler cifs_posix_acl_default_xattr_handler = {
+	.name = XATTR_NAME_POSIX_ACL_DEFAULT,
+	.flags = XATTR_ACL_DEFAULT,
+	.get = cifs_xattr_get,
+	.set = cifs_xattr_set,
+};
+
+const struct xattr_handler *cifs_xattr_handlers[] = {
+	&cifs_user_xattr_handler,
+	&cifs_os2_xattr_handler,
+	&cifs_cifs_acl_xattr_handler,
+	&cifs_posix_acl_access_xattr_handler,
+	&cifs_posix_acl_default_xattr_handler,
+	NULL
+};
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 42e731b..6fb8672 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -424,16 +424,22 @@
 	BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
 	host_file = cfi->cfi_container;
 
-	if (host_file->f_op->iterate) {
+	if (host_file->f_op->iterate || host_file->f_op->iterate_shared) {
 		struct inode *host_inode = file_inode(host_file);
-
-		inode_lock(host_inode);
 		ret = -ENOENT;
 		if (!IS_DEADDIR(host_inode)) {
-			ret = host_file->f_op->iterate(host_file, ctx);
-			file_accessed(host_file);
+			if (host_file->f_op->iterate_shared) {
+				inode_lock_shared(host_inode);
+				ret = host_file->f_op->iterate_shared(host_file, ctx);
+				file_accessed(host_file);
+				inode_unlock_shared(host_inode);
+			} else {
+				inode_lock(host_inode);
+				ret = host_file->f_op->iterate(host_file, ctx);
+				file_accessed(host_file);
+				inode_unlock(host_inode);
+			}
 		}
-		inode_unlock(host_inode);
 		return ret;
 	}
 	/* Venus: we must read Venus dirents from a file */
diff --git a/fs/compat.c b/fs/compat.c
index a71936a..8754e9a 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -884,7 +884,7 @@
 		struct compat_old_linux_dirent __user *, dirent, unsigned int, count)
 {
 	int error;
-	struct fd f = fdget(fd);
+	struct fd f = fdget_pos(fd);
 	struct compat_readdir_callback buf = {
 		.ctx.actor = compat_fillonedir,
 		.dirent = dirent
@@ -897,7 +897,7 @@
 	if (buf.result)
 		error = buf.result;
 
-	fdput(f);
+	fdput_pos(f);
 	return error;
 }
 
@@ -975,7 +975,7 @@
 	if (!access_ok(VERIFY_WRITE, dirent, count))
 		return -EFAULT;
 
-	f = fdget(fd);
+	f = fdget_pos(fd);
 	if (!f.file)
 		return -EBADF;
 
@@ -989,7 +989,7 @@
 		else
 			error = count - buf.count;
 	}
-	fdput(f);
+	fdput_pos(f);
 	return error;
 }
 
@@ -1062,7 +1062,7 @@
 	if (!access_ok(VERIFY_WRITE, dirent, count))
 		return -EFAULT;
 
-	f = fdget(fd);
+	f = fdget_pos(fd);
 	if (!f.file)
 		return -EBADF;
 
@@ -1077,7 +1077,7 @@
 		else
 			error = count - buf.count;
 	}
-	fdput(f);
+	fdput_pos(f);
 	return error;
 }
 #endif /* __ARCH_WANT_COMPAT_SYS_GETDENTS64 */
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index ea59c89..56fb261 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -494,7 +494,7 @@
  * If there is an error, the caller will reset the flags via
  * configfs_detach_rollback().
  */
-static int configfs_detach_prep(struct dentry *dentry, struct mutex **wait_mutex)
+static int configfs_detach_prep(struct dentry *dentry, struct dentry **wait)
 {
 	struct configfs_dirent *parent_sd = dentry->d_fsdata;
 	struct configfs_dirent *sd;
@@ -515,8 +515,8 @@
 		if (sd->s_type & CONFIGFS_USET_DEFAULT) {
 			/* Abort if racing with mkdir() */
 			if (sd->s_type & CONFIGFS_USET_IN_MKDIR) {
-				if (wait_mutex)
-					*wait_mutex = &d_inode(sd->s_dentry)->i_mutex;
+				if (wait)
+					*wait= dget(sd->s_dentry);
 				return -EAGAIN;
 			}
 
@@ -524,7 +524,7 @@
 			 * Yup, recursive.  If there's a problem, blame
 			 * deep nesting of default_groups
 			 */
-			ret = configfs_detach_prep(sd->s_dentry, wait_mutex);
+			ret = configfs_detach_prep(sd->s_dentry, wait);
 			if (!ret)
 				continue;
 		} else
@@ -1458,7 +1458,7 @@
 	 * the new link is temporarily attached
 	 */
 	do {
-		struct mutex *wait_mutex;
+		struct dentry *wait;
 
 		mutex_lock(&configfs_symlink_mutex);
 		spin_lock(&configfs_dirent_lock);
@@ -1469,7 +1469,7 @@
 		 */
 		ret = sd->s_dependent_count ? -EBUSY : 0;
 		if (!ret) {
-			ret = configfs_detach_prep(dentry, &wait_mutex);
+			ret = configfs_detach_prep(dentry, &wait);
 			if (ret)
 				configfs_detach_rollback(dentry);
 		}
@@ -1483,8 +1483,9 @@
 			}
 
 			/* Wait until the racing operation terminates */
-			mutex_lock(wait_mutex);
-			mutex_unlock(wait_mutex);
+			inode_lock(d_inode(wait));
+			inode_unlock(d_inode(wait));
+			dput(wait);
 		}
 	} while (ret == -EAGAIN);
 
@@ -1632,11 +1633,9 @@
 
 	if (!dir_emit_dots(file, ctx))
 		return 0;
-	if (ctx->pos == 2) {
-		spin_lock(&configfs_dirent_lock);
+	spin_lock(&configfs_dirent_lock);
+	if (ctx->pos == 2)
 		list_move(q, &parent_sd->s_children);
-		spin_unlock(&configfs_dirent_lock);
-	}
 	for (p = q->next; p != &parent_sd->s_children; p = p->next) {
 		struct configfs_dirent *next;
 		const char *name;
@@ -1647,9 +1646,6 @@
 		if (!next->s_element)
 			continue;
 
-		name = configfs_get_name(next);
-		len = strlen(name);
-
 		/*
 		 * We'll have a dentry and an inode for
 		 * PINNED items and for open attribute
@@ -1663,7 +1659,6 @@
 		 * they close it.  Beyond that, we don't
 		 * care.
 		 */
-		spin_lock(&configfs_dirent_lock);
 		dentry = next->s_dentry;
 		if (dentry)
 			inode = d_inode(dentry);
@@ -1673,15 +1668,18 @@
 		if (!inode)
 			ino = iunique(sb, 2);
 
+		name = configfs_get_name(next);
+		len = strlen(name);
+
 		if (!dir_emit(ctx, name, len, ino, dt_type(next)))
 			return 0;
 
 		spin_lock(&configfs_dirent_lock);
 		list_move(q, p);
-		spin_unlock(&configfs_dirent_lock);
 		p = q;
 		ctx->pos++;
 	}
+	spin_unlock(&configfs_dirent_lock);
 	return 0;
 }
 
@@ -1689,7 +1687,6 @@
 {
 	struct dentry * dentry = file->f_path.dentry;
 
-	inode_lock(d_inode(dentry));
 	switch (whence) {
 		case 1:
 			offset += file->f_pos;
@@ -1697,7 +1694,6 @@
 			if (offset >= 0)
 				break;
 		default:
-			inode_unlock(d_inode(dentry));
 			return -EINVAL;
 	}
 	if (offset != file->f_pos) {
@@ -1723,7 +1719,6 @@
 			spin_unlock(&configfs_dirent_lock);
 		}
 	}
-	inode_unlock(d_inode(dentry));
 	return offset;
 }
 
@@ -1732,7 +1727,7 @@
 	.release	= configfs_dir_close,
 	.llseek		= configfs_dir_lseek,
 	.read		= generic_read_dir,
-	.iterate	= configfs_readdir,
+	.iterate_shared	= configfs_readdir,
 };
 
 /**
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index 03d124a..0387968 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -156,7 +156,7 @@
 
 	if (depth > 0) {
 		if (depth <= ARRAY_SIZE(default_group_class)) {
-			lockdep_set_class(&inode->i_mutex,
+			lockdep_set_class(&inode->i_rwsem,
 					  &default_group_class[depth - 1]);
 		} else {
 			/*
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index a8f3b58..cfd9132 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -71,8 +71,8 @@
 	struct inode *inode;
 	struct dentry *root;
 
-	sb->s_blocksize = PAGE_CACHE_SIZE;
-	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	sb->s_blocksize = PAGE_SIZE;
+	sb->s_blocksize_bits = PAGE_SHIFT;
 	sb->s_magic = CONFIGFS_MAGIC;
 	sb->s_op = &configfs_ops;
 	sb->s_time_gran = 1;
diff --git a/fs/coredump.c b/fs/coredump.c
index 47c32c3..492c2db 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -803,12 +803,9 @@
 	static char zeroes[PAGE_SIZE];
 	struct file *file = cprm->file;
 	if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
-		if (cprm->written + nr > cprm->limit)
-			return 0;
 		if (dump_interrupted() ||
 		    file->f_op->llseek(file, nr, SEEK_CUR) < 0)
 			return 0;
-		cprm->written += nr;
 		return 1;
 	} else {
 		while (nr > PAGE_SIZE) {
@@ -823,7 +820,7 @@
 
 int dump_align(struct coredump_params *cprm, int align)
 {
-	unsigned mod = cprm->written & (align - 1);
+	unsigned mod = cprm->file->f_pos & (align - 1);
 	if (align & (align - 1))
 		return 0;
 	return mod ? dump_skip(cprm, align - mod) : 1;
diff --git a/fs/cramfs/README b/fs/cramfs/README
index 445d1c2..9d4e7ea 100644
--- a/fs/cramfs/README
+++ b/fs/cramfs/README
@@ -86,26 +86,26 @@
 
 (Block size in cramfs refers to the size of input data that is
 compressed at a time.  It's intended to be somewhere around
-PAGE_CACHE_SIZE for cramfs_readpage's convenience.)
+PAGE_SIZE for cramfs_readpage's convenience.)
 
 The superblock ought to indicate the block size that the fs was
 written for, since comments in <linux/pagemap.h> indicate that
-PAGE_CACHE_SIZE may grow in future (if I interpret the comment
+PAGE_SIZE may grow in future (if I interpret the comment
 correctly).
 
-Currently, mkcramfs #define's PAGE_CACHE_SIZE as 4096 and uses that
-for blksize, whereas Linux-2.3.39 uses its PAGE_CACHE_SIZE, which in
+Currently, mkcramfs #define's PAGE_SIZE as 4096 and uses that
+for blksize, whereas Linux-2.3.39 uses its PAGE_SIZE, which in
 turn is defined as PAGE_SIZE (which can be as large as 32KB on arm).
 This discrepancy is a bug, though it's not clear which should be
 changed.
 
-One option is to change mkcramfs to take its PAGE_CACHE_SIZE from
+One option is to change mkcramfs to take its PAGE_SIZE from
 <asm/page.h>.  Personally I don't like this option, but it does
 require the least amount of change: just change `#define
-PAGE_CACHE_SIZE (4096)' to `#include <asm/page.h>'.  The disadvantage
+PAGE_SIZE (4096)' to `#include <asm/page.h>'.  The disadvantage
 is that the generated cramfs cannot always be shared between different
 kernels, not even necessarily kernels of the same architecture if
-PAGE_CACHE_SIZE is subject to change between kernel versions
+PAGE_SIZE is subject to change between kernel versions
 (currently possible with arm and ia64).
 
 The remaining options try to make cramfs more sharable.
@@ -126,22 +126,22 @@
   1. Always 4096 bytes.
 
   2. Writer chooses blocksize; kernel adapts but rejects blocksize >
-     PAGE_CACHE_SIZE.
+     PAGE_SIZE.
 
   3. Writer chooses blocksize; kernel adapts even to blocksize >
-     PAGE_CACHE_SIZE.
+     PAGE_SIZE.
 
 It's easy enough to change the kernel to use a smaller value than
-PAGE_CACHE_SIZE: just make cramfs_readpage read multiple blocks.
+PAGE_SIZE: just make cramfs_readpage read multiple blocks.
 
-The cost of option 1 is that kernels with a larger PAGE_CACHE_SIZE
+The cost of option 1 is that kernels with a larger PAGE_SIZE
 value don't get as good compression as they can.
 
 The cost of option 2 relative to option 1 is that the code uses
 variables instead of #define'd constants.  The gain is that people
-with kernels having larger PAGE_CACHE_SIZE can make use of that if
+with kernels having larger PAGE_SIZE can make use of that if
 they don't mind their cramfs being inaccessible to kernels with
-smaller PAGE_CACHE_SIZE values.
+smaller PAGE_SIZE values.
 
 Option 3 is easy to implement if we don't mind being CPU-inefficient:
 e.g. get readpage to decompress to a buffer of size MAX_BLKSIZE (which
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index b862bc219..7919967 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -137,7 +137,7 @@
  * page cache and dentry tree anyway..
  *
  * This also acts as a way to guarantee contiguous areas of up to
- * BLKS_PER_BUF*PAGE_CACHE_SIZE, so that the caller doesn't need to
+ * BLKS_PER_BUF*PAGE_SIZE, so that the caller doesn't need to
  * worry about end-of-buffer issues even when decompressing a full
  * page cache.
  */
@@ -152,7 +152,7 @@
  */
 #define BLKS_PER_BUF_SHIFT	(2)
 #define BLKS_PER_BUF		(1 << BLKS_PER_BUF_SHIFT)
-#define BUFFER_SIZE		(BLKS_PER_BUF*PAGE_CACHE_SIZE)
+#define BUFFER_SIZE		(BLKS_PER_BUF*PAGE_SIZE)
 
 static unsigned char read_buffers[READ_BUFFERS][BUFFER_SIZE];
 static unsigned buffer_blocknr[READ_BUFFERS];
@@ -173,8 +173,8 @@
 
 	if (!len)
 		return NULL;
-	blocknr = offset >> PAGE_CACHE_SHIFT;
-	offset &= PAGE_CACHE_SIZE - 1;
+	blocknr = offset >> PAGE_SHIFT;
+	offset &= PAGE_SIZE - 1;
 
 	/* Check if an existing buffer already has the data.. */
 	for (i = 0; i < READ_BUFFERS; i++) {
@@ -184,14 +184,14 @@
 			continue;
 		if (blocknr < buffer_blocknr[i])
 			continue;
-		blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_CACHE_SHIFT;
+		blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_SHIFT;
 		blk_offset += offset;
 		if (blk_offset + len > BUFFER_SIZE)
 			continue;
 		return read_buffers[i] + blk_offset;
 	}
 
-	devsize = mapping->host->i_size >> PAGE_CACHE_SHIFT;
+	devsize = mapping->host->i_size >> PAGE_SHIFT;
 
 	/* Ok, read in BLKS_PER_BUF pages completely first. */
 	for (i = 0; i < BLKS_PER_BUF; i++) {
@@ -213,7 +213,7 @@
 			wait_on_page_locked(page);
 			if (!PageUptodate(page)) {
 				/* asynchronous error */
-				page_cache_release(page);
+				put_page(page);
 				pages[i] = NULL;
 			}
 		}
@@ -229,12 +229,12 @@
 		struct page *page = pages[i];
 
 		if (page) {
-			memcpy(data, kmap(page), PAGE_CACHE_SIZE);
+			memcpy(data, kmap(page), PAGE_SIZE);
 			kunmap(page);
-			page_cache_release(page);
+			put_page(page);
 		} else
-			memset(data, 0, PAGE_CACHE_SIZE);
-		data += PAGE_CACHE_SIZE;
+			memset(data, 0, PAGE_SIZE);
+		data += PAGE_SIZE;
 	}
 	return read_buffers[buffer] + offset;
 }
@@ -353,7 +353,7 @@
 	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
 
 	buf->f_type = CRAMFS_MAGIC;
-	buf->f_bsize = PAGE_CACHE_SIZE;
+	buf->f_bsize = PAGE_SIZE;
 	buf->f_blocks = CRAMFS_SB(sb)->blocks;
 	buf->f_bfree = 0;
 	buf->f_bavail = 0;
@@ -496,7 +496,7 @@
 	int bytes_filled;
 	void *pgdata;
 
-	maxblock = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+	maxblock = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	bytes_filled = 0;
 	pgdata = kmap(page);
 
@@ -516,14 +516,14 @@
 
 		if (compr_len == 0)
 			; /* hole */
-		else if (unlikely(compr_len > (PAGE_CACHE_SIZE << 1))) {
+		else if (unlikely(compr_len > (PAGE_SIZE << 1))) {
 			pr_err("bad compressed blocksize %u\n",
 				compr_len);
 			goto err;
 		} else {
 			mutex_lock(&read_mutex);
 			bytes_filled = cramfs_uncompress_block(pgdata,
-				 PAGE_CACHE_SIZE,
+				 PAGE_SIZE,
 				 cramfs_read(sb, start_offset, compr_len),
 				 compr_len);
 			mutex_unlock(&read_mutex);
@@ -532,7 +532,7 @@
 		}
 	}
 
-	memset(pgdata + bytes_filled, 0, PAGE_CACHE_SIZE - bytes_filled);
+	memset(pgdata + bytes_filled, 0, PAGE_SIZE - bytes_filled);
 	flush_dcache_page(page);
 	kunmap(page);
 	SetPageUptodate(page);
@@ -561,7 +561,7 @@
 static const struct file_operations cramfs_directory_operations = {
 	.llseek		= generic_file_llseek,
 	.read		= generic_read_dir,
-	.iterate	= cramfs_readdir,
+	.iterate_shared	= cramfs_readdir,
 };
 
 static const struct inode_operations cramfs_dir_inode_operations = {
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 06cd1a2..2fc8c43 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -26,6 +26,7 @@
 #include <linux/ratelimit.h>
 #include <linux/bio.h>
 #include <linux/dcache.h>
+#include <linux/namei.h>
 #include <linux/fscrypto.h>
 #include <linux/ecryptfs.h>
 
@@ -81,13 +82,14 @@
 /**
  * fscrypt_get_ctx() - Gets an encryption context
  * @inode:       The inode for which we are doing the crypto
+ * @gfp_flags:   The gfp flag for memory allocation
  *
  * Allocates and initializes an encryption context.
  *
  * Return: An allocated and initialized encryption context on success; error
  * value or NULL otherwise.
  */
-struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode)
+struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode, gfp_t gfp_flags)
 {
 	struct fscrypt_ctx *ctx = NULL;
 	struct fscrypt_info *ci = inode->i_crypt_info;
@@ -113,7 +115,7 @@
 		list_del(&ctx->free_list);
 	spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
 	if (!ctx) {
-		ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
+		ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
 		if (!ctx)
 			return ERR_PTR(-ENOMEM);
 		ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
@@ -147,7 +149,8 @@
 
 static int do_page_crypto(struct inode *inode,
 			fscrypt_direction_t rw, pgoff_t index,
-			struct page *src_page, struct page *dest_page)
+			struct page *src_page, struct page *dest_page,
+			gfp_t gfp_flags)
 {
 	u8 xts_tweak[FS_XTS_TWEAK_SIZE];
 	struct skcipher_request *req = NULL;
@@ -157,7 +160,7 @@
 	struct crypto_skcipher *tfm = ci->ci_ctfm;
 	int res = 0;
 
-	req = skcipher_request_alloc(tfm, GFP_NOFS);
+	req = skcipher_request_alloc(tfm, gfp_flags);
 	if (!req) {
 		printk_ratelimited(KERN_ERR
 				"%s: crypto_request_alloc() failed\n",
@@ -175,10 +178,10 @@
 			FS_XTS_TWEAK_SIZE - sizeof(index));
 
 	sg_init_table(&dst, 1);
-	sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
+	sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
 	sg_init_table(&src, 1);
-	sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
-	skcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
+	sg_set_page(&src, src_page, PAGE_SIZE, 0);
+	skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE,
 					xts_tweak);
 	if (rw == FS_DECRYPT)
 		res = crypto_skcipher_decrypt(req);
@@ -199,10 +202,9 @@
 	return 0;
 }
 
-static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx)
+static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags)
 {
-	ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool,
-							GFP_NOWAIT);
+	ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
 	if (ctx->w.bounce_page == NULL)
 		return ERR_PTR(-ENOMEM);
 	ctx->flags |= FS_WRITE_PATH_FL;
@@ -213,6 +215,7 @@
  * fscypt_encrypt_page() - Encrypts a page
  * @inode:          The inode for which the encryption should take place
  * @plaintext_page: The page to encrypt. Must be locked.
+ * @gfp_flags:      The gfp flag for memory allocation
  *
  * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
  * encryption context.
@@ -225,7 +228,7 @@
  * error value or NULL.
  */
 struct page *fscrypt_encrypt_page(struct inode *inode,
-				struct page *plaintext_page)
+				struct page *plaintext_page, gfp_t gfp_flags)
 {
 	struct fscrypt_ctx *ctx;
 	struct page *ciphertext_page = NULL;
@@ -233,18 +236,19 @@
 
 	BUG_ON(!PageLocked(plaintext_page));
 
-	ctx = fscrypt_get_ctx(inode);
+	ctx = fscrypt_get_ctx(inode, gfp_flags);
 	if (IS_ERR(ctx))
 		return (struct page *)ctx;
 
 	/* The encryption operation will require a bounce page. */
-	ciphertext_page = alloc_bounce_page(ctx);
+	ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
 	if (IS_ERR(ciphertext_page))
 		goto errout;
 
 	ctx->w.control_page = plaintext_page;
 	err = do_page_crypto(inode, FS_ENCRYPT, plaintext_page->index,
-					plaintext_page, ciphertext_page);
+					plaintext_page, ciphertext_page,
+					gfp_flags);
 	if (err) {
 		ciphertext_page = ERR_PTR(err);
 		goto errout;
@@ -275,7 +279,7 @@
 	BUG_ON(!PageLocked(page));
 
 	return do_page_crypto(page->mapping->host,
-			FS_DECRYPT, page->index, page, page);
+			FS_DECRYPT, page->index, page, page, GFP_NOFS);
 }
 EXPORT_SYMBOL(fscrypt_decrypt_page);
 
@@ -287,13 +291,13 @@
 	struct bio *bio;
 	int ret, err = 0;
 
-	BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);
+	BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
 
-	ctx = fscrypt_get_ctx(inode);
+	ctx = fscrypt_get_ctx(inode, GFP_NOFS);
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
 
-	ciphertext_page = alloc_bounce_page(ctx);
+	ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
 	if (IS_ERR(ciphertext_page)) {
 		err = PTR_ERR(ciphertext_page);
 		goto errout;
@@ -301,11 +305,12 @@
 
 	while (len--) {
 		err = do_page_crypto(inode, FS_ENCRYPT, lblk,
-						ZERO_PAGE(0), ciphertext_page);
+					ZERO_PAGE(0), ciphertext_page,
+					GFP_NOFS);
 		if (err)
 			goto errout;
 
-		bio = bio_alloc(GFP_KERNEL, 1);
+		bio = bio_alloc(GFP_NOWAIT, 1);
 		if (!bio) {
 			err = -ENOMEM;
 			goto errout;
@@ -345,13 +350,20 @@
  */
 static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
 {
-	struct inode *dir = d_inode(dentry->d_parent);
-	struct fscrypt_info *ci = dir->i_crypt_info;
+	struct dentry *dir;
+	struct fscrypt_info *ci;
 	int dir_has_key, cached_with_key;
 
-	if (!dir->i_sb->s_cop->is_encrypted(dir))
-		return 0;
+	if (flags & LOOKUP_RCU)
+		return -ECHILD;
 
+	dir = dget_parent(dentry);
+	if (!d_inode(dir)->i_sb->s_cop->is_encrypted(d_inode(dir))) {
+		dput(dir);
+		return 0;
+	}
+
+	ci = d_inode(dir)->i_crypt_info;
 	if (ci && ci->ci_keyring_key &&
 	    (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
 					  (1 << KEY_FLAG_REVOKED) |
@@ -363,6 +375,7 @@
 	cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
 	spin_unlock(&dentry->d_lock);
 	dir_has_key = (ci != NULL);
+	dput(dir);
 
 	/*
 	 * If the dentry was cached without the key, and it is a
diff --git a/fs/dax.c b/fs/dax.c
index 90322eb..0dbe4e0 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -244,7 +244,6 @@
  * @iocb: The control block for this I/O
  * @inode: The file which the I/O is directed at
  * @iter: The addresses to do I/O from or to
- * @pos: The file offset where the I/O starts
  * @get_block: The filesystem method used to translate file offsets to blocks
  * @end_io: A filesystem callback for I/O completion
  * @flags: See below
@@ -257,11 +256,12 @@
  * is in progress.
  */
 ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
-		  struct iov_iter *iter, loff_t pos, get_block_t get_block,
+		  struct iov_iter *iter, get_block_t get_block,
 		  dio_iodone_t end_io, int flags)
 {
 	struct buffer_head bh;
 	ssize_t retval = -EINVAL;
+	loff_t pos = iocb->ki_pos;
 	loff_t end = pos + iov_iter_count(iter);
 
 	memset(&bh, 0, sizeof(bh));
@@ -323,7 +323,7 @@
 	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	if (vmf->pgoff >= size) {
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		return VM_FAULT_SIGBUS;
 	}
 
@@ -351,7 +351,7 @@
 }
 
 #define NO_SECTOR -1
-#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_CACHE_SHIFT))
+#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT))
 
 static int dax_radix_entry(struct address_space *mapping, pgoff_t index,
 		sector_t sector, bool pmd_entry, bool dirty)
@@ -506,8 +506,8 @@
 	if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
 		return 0;
 
-	start_index = wbc->range_start >> PAGE_CACHE_SHIFT;
-	end_index = wbc->range_end >> PAGE_CACHE_SHIFT;
+	start_index = wbc->range_start >> PAGE_SHIFT;
+	end_index = wbc->range_end >> PAGE_SHIFT;
 	pmd_index = DAX_PMD_INDEX(start_index);
 
 	rcu_read_lock();
@@ -642,12 +642,12 @@
 	page = find_get_page(mapping, vmf->pgoff);
 	if (page) {
 		if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
-			page_cache_release(page);
+			put_page(page);
 			return VM_FAULT_RETRY;
 		}
 		if (unlikely(page->mapping != mapping)) {
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 			goto repeat;
 		}
 		size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -711,10 +711,10 @@
 
 	if (page) {
 		unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
-							PAGE_CACHE_SIZE, 0);
+							PAGE_SIZE, 0);
 		delete_from_page_cache(page);
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		page = NULL;
 	}
 
@@ -747,7 +747,7 @@
  unlock_page:
 	if (page) {
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 	goto out;
 }
@@ -1094,7 +1094,7 @@
  * you are truncating a file, the helper function dax_truncate_page() may be
  * more convenient.
  *
- * We work in terms of PAGE_CACHE_SIZE here for commonality with
+ * We work in terms of PAGE_SIZE here for commonality with
  * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
  * took care of disposing of the unnecessary blocks.  Even if the filesystem
  * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
@@ -1104,18 +1104,18 @@
 							get_block_t get_block)
 {
 	struct buffer_head bh;
-	pgoff_t index = from >> PAGE_CACHE_SHIFT;
-	unsigned offset = from & (PAGE_CACHE_SIZE-1);
+	pgoff_t index = from >> PAGE_SHIFT;
+	unsigned offset = from & (PAGE_SIZE-1);
 	int err;
 
 	/* Block boundary? Nothing to do */
 	if (!length)
 		return 0;
-	BUG_ON((offset + length) > PAGE_CACHE_SIZE);
+	BUG_ON((offset + length) > PAGE_SIZE);
 
 	memset(&bh, 0, sizeof(bh));
 	bh.b_bdev = inode->i_sb->s_bdev;
-	bh.b_size = PAGE_CACHE_SIZE;
+	bh.b_size = PAGE_SIZE;
 	err = get_block(inode, index, &bh, 0);
 	if (err < 0)
 		return err;
@@ -1123,7 +1123,7 @@
 		struct block_device *bdev = bh.b_bdev;
 		struct blk_dax_ctl dax = {
 			.sector = to_sector(&bh, inode),
-			.size = PAGE_CACHE_SIZE,
+			.size = PAGE_SIZE,
 		};
 
 		if (dax_map_atomic(bdev, &dax) < 0)
@@ -1146,7 +1146,7 @@
  * Similar to block_truncate_page(), this function can be called by a
  * filesystem when it is truncating a DAX file to handle the partial page.
  *
- * We work in terms of PAGE_CACHE_SIZE here for commonality with
+ * We work in terms of PAGE_SIZE here for commonality with
  * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
  * took care of disposing of the unnecessary blocks.  Even if the filesystem
  * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
@@ -1154,7 +1154,7 @@
  */
 int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
 {
-	unsigned length = PAGE_CACHE_ALIGN(from) - from;
+	unsigned length = PAGE_ALIGN(from) - from;
 	return dax_zero_page_range(inode, from, length, get_block);
 }
 EXPORT_SYMBOL_GPL(dax_truncate_page);
diff --git a/fs/dcache.c b/fs/dcache.c
index 32ceae3..c622872 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -111,6 +111,17 @@
 	return dentry_hashtable + hash_32(hash, d_hash_shift);
 }
 
+#define IN_LOOKUP_SHIFT 10
+static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
+
+static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
+					unsigned int hash)
+{
+	hash += (unsigned long) parent / L1_CACHE_BYTES;
+	return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
+}
+
+
 /* Statistics gathering. */
 struct dentry_stat_t dentry_stat = {
 	.age_limit = 45,
@@ -761,6 +772,8 @@
 	/* Slow case: now with the dentry lock held */
 	rcu_read_unlock();
 
+	WARN_ON(d_in_lookup(dentry));
+
 	/* Unreachable? Get rid of it */
 	if (unlikely(d_unhashed(dentry)))
 		goto kill_it;
@@ -1558,7 +1571,11 @@
 	 * be overwriting an internal NUL character
 	 */
 	dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
-	if (name->len > DNAME_INLINE_LEN-1) {
+	if (unlikely(!name)) {
+		static const struct qstr anon = QSTR_INIT("/", 1);
+		name = &anon;
+		dname = dentry->d_iname;
+	} else if (name->len > DNAME_INLINE_LEN-1) {
 		size_t size = offsetof(struct external_name, name[1]);
 		struct external_name *p = kmalloc(size + name->len,
 						  GFP_KERNEL_ACCOUNT);
@@ -1667,7 +1684,8 @@
 				DCACHE_OP_REVALIDATE	|
 				DCACHE_OP_WEAK_REVALIDATE	|
 				DCACHE_OP_DELETE	|
-				DCACHE_OP_SELECT_INODE));
+				DCACHE_OP_SELECT_INODE	|
+				DCACHE_OP_REAL));
 	dentry->d_op = op;
 	if (!op)
 		return;
@@ -1685,6 +1703,8 @@
 		dentry->d_flags |= DCACHE_OP_PRUNE;
 	if (op->d_select_inode)
 		dentry->d_flags |= DCACHE_OP_SELECT_INODE;
+	if (op->d_real)
+		dentry->d_flags |= DCACHE_OP_REAL;
 
 }
 EXPORT_SYMBOL(d_set_d_op);
@@ -1743,6 +1763,7 @@
 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
 {
 	unsigned add_flags = d_flags_for_inode(inode);
+	WARN_ON(d_in_lookup(dentry));
 
 	spin_lock(&dentry->d_lock);
 	hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
@@ -1772,11 +1793,11 @@
 {
 	BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
 	if (inode) {
+		security_d_instantiate(entry, inode);
 		spin_lock(&inode->i_lock);
 		__d_instantiate(entry, inode);
 		spin_unlock(&inode->i_lock);
 	}
-	security_d_instantiate(entry, inode);
 }
 EXPORT_SYMBOL(d_instantiate);
 
@@ -1793,6 +1814,7 @@
 {
 	BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
 
+	security_d_instantiate(entry, inode);
 	spin_lock(&inode->i_lock);
 	if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
 		spin_unlock(&inode->i_lock);
@@ -1801,7 +1823,6 @@
 	}
 	__d_instantiate(entry, inode);
 	spin_unlock(&inode->i_lock);
-	security_d_instantiate(entry, inode);
 
 	return 0;
 }
@@ -1812,9 +1833,7 @@
 	struct dentry *res = NULL;
 
 	if (root_inode) {
-		static const struct qstr name = QSTR_INIT("/", 1);
-
-		res = __d_alloc(root_inode->i_sb, &name);
+		res = __d_alloc(root_inode->i_sb, NULL);
 		if (res)
 			d_instantiate(res, root_inode);
 		else
@@ -1855,7 +1874,6 @@
 
 static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected)
 {
-	static const struct qstr anonstring = QSTR_INIT("/", 1);
 	struct dentry *tmp;
 	struct dentry *res;
 	unsigned add_flags;
@@ -1869,12 +1887,13 @@
 	if (res)
 		goto out_iput;
 
-	tmp = __d_alloc(inode->i_sb, &anonstring);
+	tmp = __d_alloc(inode->i_sb, NULL);
 	if (!tmp) {
 		res = ERR_PTR(-ENOMEM);
 		goto out_iput;
 	}
 
+	security_d_instantiate(tmp, inode);
 	spin_lock(&inode->i_lock);
 	res = __d_find_any_alias(inode);
 	if (res) {
@@ -1897,13 +1916,10 @@
 	hlist_bl_unlock(&tmp->d_sb->s_anon);
 	spin_unlock(&tmp->d_lock);
 	spin_unlock(&inode->i_lock);
-	security_d_instantiate(tmp, inode);
 
 	return tmp;
 
  out_iput:
-	if (res && !IS_ERR(res))
-		security_d_instantiate(res, inode);
 	iput(inode);
 	return res;
 }
@@ -1972,28 +1988,36 @@
 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
 			struct qstr *name)
 {
-	struct dentry *found;
-	struct dentry *new;
+	struct dentry *found, *res;
 
 	/*
 	 * First check if a dentry matching the name already exists,
 	 * if not go ahead and create it now.
 	 */
 	found = d_hash_and_lookup(dentry->d_parent, name);
-	if (!found) {
-		new = d_alloc(dentry->d_parent, name);
-		if (!new) {
-			found = ERR_PTR(-ENOMEM);
-		} else {
-			found = d_splice_alias(inode, new);
-			if (found) {
-				dput(new);
-				return found;
-			}
-			return new;
-		}
+	if (found) {
+		iput(inode);
+		return found;
 	}
-	iput(inode);
+	if (d_in_lookup(dentry)) {
+		found = d_alloc_parallel(dentry->d_parent, name,
+					dentry->d_wait);
+		if (IS_ERR(found) || !d_in_lookup(found)) {
+			iput(inode);
+			return found;
+		}
+	} else {
+		found = d_alloc(dentry->d_parent, name);
+		if (!found) {
+			iput(inode);
+			return ERR_PTR(-ENOMEM);
+		} 
+	}
+	res = d_splice_alias(inode, found);
+	if (res) {
+		dput(found);
+		return res;
+	}
 	return found;
 }
 EXPORT_SYMBOL(d_add_ci);
@@ -2360,17 +2384,194 @@
 }
 EXPORT_SYMBOL(d_rehash);
 
+static inline unsigned start_dir_add(struct inode *dir)
+{
+
+	for (;;) {
+		unsigned n = dir->i_dir_seq;
+		if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
+			return n;
+		cpu_relax();
+	}
+}
+
+static inline void end_dir_add(struct inode *dir, unsigned n)
+{
+	smp_store_release(&dir->i_dir_seq, n + 2);
+}
+
+static void d_wait_lookup(struct dentry *dentry)
+{
+	if (d_in_lookup(dentry)) {
+		DECLARE_WAITQUEUE(wait, current);
+		add_wait_queue(dentry->d_wait, &wait);
+		do {
+			set_current_state(TASK_UNINTERRUPTIBLE);
+			spin_unlock(&dentry->d_lock);
+			schedule();
+			spin_lock(&dentry->d_lock);
+		} while (d_in_lookup(dentry));
+	}
+}
+
+struct dentry *d_alloc_parallel(struct dentry *parent,
+				const struct qstr *name,
+				wait_queue_head_t *wq)
+{
+	unsigned int len = name->len;
+	unsigned int hash = name->hash;
+	const unsigned char *str = name->name;
+	struct hlist_bl_head *b = in_lookup_hash(parent, hash);
+	struct hlist_bl_node *node;
+	struct dentry *new = d_alloc(parent, name);
+	struct dentry *dentry;
+	unsigned seq, r_seq, d_seq;
+
+	if (unlikely(!new))
+		return ERR_PTR(-ENOMEM);
+
+retry:
+	rcu_read_lock();
+	seq = smp_load_acquire(&parent->d_inode->i_dir_seq) & ~1;
+	r_seq = read_seqbegin(&rename_lock);
+	dentry = __d_lookup_rcu(parent, name, &d_seq);
+	if (unlikely(dentry)) {
+		if (!lockref_get_not_dead(&dentry->d_lockref)) {
+			rcu_read_unlock();
+			goto retry;
+		}
+		if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
+			rcu_read_unlock();
+			dput(dentry);
+			goto retry;
+		}
+		rcu_read_unlock();
+		dput(new);
+		return dentry;
+	}
+	if (unlikely(read_seqretry(&rename_lock, r_seq))) {
+		rcu_read_unlock();
+		goto retry;
+	}
+	hlist_bl_lock(b);
+	if (unlikely(parent->d_inode->i_dir_seq != seq)) {
+		hlist_bl_unlock(b);
+		rcu_read_unlock();
+		goto retry;
+	}
+	rcu_read_unlock();
+	/*
+	 * No changes for the parent since the beginning of d_lookup().
+	 * Since all removals from the chain happen with hlist_bl_lock(),
+	 * any potential in-lookup matches are going to stay here until
+	 * we unlock the chain.  All fields are stable in everything
+	 * we encounter.
+	 */
+	hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
+		if (dentry->d_name.hash != hash)
+			continue;
+		if (dentry->d_parent != parent)
+			continue;
+		if (d_unhashed(dentry))
+			continue;
+		if (parent->d_flags & DCACHE_OP_COMPARE) {
+			int tlen = dentry->d_name.len;
+			const char *tname = dentry->d_name.name;
+			if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
+				continue;
+		} else {
+			if (dentry->d_name.len != len)
+				continue;
+			if (dentry_cmp(dentry, str, len))
+				continue;
+		}
+		dget(dentry);
+		hlist_bl_unlock(b);
+		/* somebody is doing lookup for it right now; wait for it */
+		spin_lock(&dentry->d_lock);
+		d_wait_lookup(dentry);
+		/*
+		 * it's not in-lookup anymore; in principle we should repeat
+		 * everything from dcache lookup, but it's likely to be what
+		 * d_lookup() would've found anyway.  If it is, just return it;
+		 * otherwise we really have to repeat the whole thing.
+		 */
+		if (unlikely(dentry->d_name.hash != hash))
+			goto mismatch;
+		if (unlikely(dentry->d_parent != parent))
+			goto mismatch;
+		if (unlikely(d_unhashed(dentry)))
+			goto mismatch;
+		if (parent->d_flags & DCACHE_OP_COMPARE) {
+			int tlen = dentry->d_name.len;
+			const char *tname = dentry->d_name.name;
+			if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
+				goto mismatch;
+		} else {
+			if (unlikely(dentry->d_name.len != len))
+				goto mismatch;
+			if (unlikely(dentry_cmp(dentry, str, len)))
+				goto mismatch;
+		}
+		/* OK, it *is* a hashed match; return it */
+		spin_unlock(&dentry->d_lock);
+		dput(new);
+		return dentry;
+	}
+	/* we can't take ->d_lock here; it's OK, though. */
+	new->d_flags |= DCACHE_PAR_LOOKUP;
+	new->d_wait = wq;
+	hlist_bl_add_head_rcu(&new->d_u.d_in_lookup_hash, b);
+	hlist_bl_unlock(b);
+	return new;
+mismatch:
+	spin_unlock(&dentry->d_lock);
+	dput(dentry);
+	goto retry;
+}
+EXPORT_SYMBOL(d_alloc_parallel);
+
+void __d_lookup_done(struct dentry *dentry)
+{
+	struct hlist_bl_head *b = in_lookup_hash(dentry->d_parent,
+						 dentry->d_name.hash);
+	hlist_bl_lock(b);
+	dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
+	__hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
+	wake_up_all(dentry->d_wait);
+	dentry->d_wait = NULL;
+	hlist_bl_unlock(b);
+	INIT_HLIST_NODE(&dentry->d_u.d_alias);
+	INIT_LIST_HEAD(&dentry->d_lru);
+}
+EXPORT_SYMBOL(__d_lookup_done);
 
 /* inode->i_lock held if inode is non-NULL */
 
 static inline void __d_add(struct dentry *dentry, struct inode *inode)
 {
-	if (inode) {
-		__d_instantiate(dentry, inode);
-		spin_unlock(&inode->i_lock);
+	struct inode *dir = NULL;
+	unsigned n;
+	spin_lock(&dentry->d_lock);
+	if (unlikely(d_in_lookup(dentry))) {
+		dir = dentry->d_parent->d_inode;
+		n = start_dir_add(dir);
+		__d_lookup_done(dentry);
 	}
-	security_d_instantiate(dentry, inode);
-	d_rehash(dentry);
+	if (inode) {
+		unsigned add_flags = d_flags_for_inode(inode);
+		hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
+		raw_write_seqcount_begin(&dentry->d_seq);
+		__d_set_inode_and_type(dentry, inode, add_flags);
+		raw_write_seqcount_end(&dentry->d_seq);
+		__fsnotify_d_instantiate(dentry);
+	}
+	_d_rehash(dentry);
+	if (dir)
+		end_dir_add(dir, n);
+	spin_unlock(&dentry->d_lock);
+	if (inode)
+		spin_unlock(&inode->i_lock);
 }
 
 /**
@@ -2384,8 +2585,10 @@
 
 void d_add(struct dentry *entry, struct inode *inode)
 {
-	if (inode)
+	if (inode) {
+		security_d_instantiate(entry, inode);
 		spin_lock(&inode->i_lock);
+	}
 	__d_add(entry, inode);
 }
 EXPORT_SYMBOL(d_add);
@@ -2595,6 +2798,8 @@
 static void __d_move(struct dentry *dentry, struct dentry *target,
 		     bool exchange)
 {
+	struct inode *dir = NULL;
+	unsigned n;
 	if (!dentry->d_inode)
 		printk(KERN_WARNING "VFS: moving negative dcache entry\n");
 
@@ -2602,6 +2807,11 @@
 	BUG_ON(d_ancestor(target, dentry));
 
 	dentry_lock_for_move(dentry, target);
+	if (unlikely(d_in_lookup(target))) {
+		dir = target->d_parent->d_inode;
+		n = start_dir_add(dir);
+		__d_lookup_done(target);
+	}
 
 	write_seqcount_begin(&dentry->d_seq);
 	write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
@@ -2651,6 +2861,8 @@
 	write_seqcount_end(&target->d_seq);
 	write_seqcount_end(&dentry->d_seq);
 
+	if (dir)
+		end_dir_add(dir, n);
 	dentry_unlock_for_move(dentry, target);
 }
 
@@ -2721,7 +2933,8 @@
 static int __d_unalias(struct inode *inode,
 		struct dentry *dentry, struct dentry *alias)
 {
-	struct mutex *m1 = NULL, *m2 = NULL;
+	struct mutex *m1 = NULL;
+	struct rw_semaphore *m2 = NULL;
 	int ret = -ESTALE;
 
 	/* If alias and dentry share a parent, then no extra locks required */
@@ -2732,15 +2945,15 @@
 	if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
 		goto out_err;
 	m1 = &dentry->d_sb->s_vfs_rename_mutex;
-	if (!inode_trylock(alias->d_parent->d_inode))
+	if (!inode_trylock_shared(alias->d_parent->d_inode))
 		goto out_err;
-	m2 = &alias->d_parent->d_inode->i_mutex;
+	m2 = &alias->d_parent->d_inode->i_rwsem;
 out_unalias:
 	__d_move(alias, dentry, false);
 	ret = 0;
 out_err:
 	if (m2)
-		mutex_unlock(m2);
+		up_read(m2);
 	if (m1)
 		mutex_unlock(m1);
 	return ret;
@@ -2779,6 +2992,7 @@
 	if (!inode)
 		goto out;
 
+	security_d_instantiate(dentry, inode);
 	spin_lock(&inode->i_lock);
 	if (S_ISDIR(inode->i_mode)) {
 		struct dentry *new = __d_find_any_alias(inode);
@@ -2806,7 +3020,6 @@
 			} else {
 				__d_move(new, dentry, false);
 				write_sequnlock(&rename_lock);
-				security_d_instantiate(new, inode);
 			}
 			iput(inode);
 			return new;
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index bece948..8580831 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -457,7 +457,7 @@
 	if (unlikely(!inode))
 		return failed_creating(dentry);
 
-	inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
+	make_empty_dir_inode(inode);
 	inode->i_flags |= S_AUTOMOUNT;
 	inode->i_private = data;
 	dentry->d_fsdata = (void *)f;
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 655f21f..0b2954d 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -128,6 +128,7 @@
 struct pts_fs_info {
 	struct ida allocated_ptys;
 	struct pts_mount_opts mount_opts;
+	struct super_block *sb;
 	struct dentry *ptmx_dentry;
 };
 
@@ -358,7 +359,7 @@
 	.show_options	= devpts_show_options,
 };
 
-static void *new_pts_fs_info(void)
+static void *new_pts_fs_info(struct super_block *sb)
 {
 	struct pts_fs_info *fsi;
 
@@ -369,6 +370,7 @@
 	ida_init(&fsi->allocated_ptys);
 	fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE;
 	fsi->mount_opts.ptmxmode = DEVPTS_DEFAULT_PTMX_MODE;
+	fsi->sb = sb;
 
 	return fsi;
 }
@@ -384,7 +386,7 @@
 	s->s_op = &devpts_sops;
 	s->s_time_gran = 1;
 
-	s->s_fs_info = new_pts_fs_info();
+	s->s_fs_info = new_pts_fs_info(s);
 	if (!s->s_fs_info)
 		goto fail;
 
@@ -524,17 +526,14 @@
  * to the System V naming convention
  */
 
-int devpts_new_index(struct inode *ptmx_inode)
+int devpts_new_index(struct pts_fs_info *fsi)
 {
-	struct super_block *sb = pts_sb_from_inode(ptmx_inode);
-	struct pts_fs_info *fsi;
 	int index;
 	int ida_ret;
 
-	if (!sb)
+	if (!fsi)
 		return -ENODEV;
 
-	fsi = DEVPTS_SB(sb);
 retry:
 	if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL))
 		return -ENOMEM;
@@ -564,11 +563,8 @@
 	return index;
 }
 
-void devpts_kill_index(struct inode *ptmx_inode, int idx)
+void devpts_kill_index(struct pts_fs_info *fsi, int idx)
 {
-	struct super_block *sb = pts_sb_from_inode(ptmx_inode);
-	struct pts_fs_info *fsi = DEVPTS_SB(sb);
-
 	mutex_lock(&allocated_ptys_lock);
 	ida_remove(&fsi->allocated_ptys, idx);
 	pty_count--;
@@ -578,21 +574,25 @@
 /*
  * pty code needs to hold extra references in case of last /dev/tty close
  */
-
-void devpts_add_ref(struct inode *ptmx_inode)
+struct pts_fs_info *devpts_get_ref(struct inode *ptmx_inode, struct file *file)
 {
-	struct super_block *sb = pts_sb_from_inode(ptmx_inode);
+	struct super_block *sb;
+	struct pts_fs_info *fsi;
+
+	sb = pts_sb_from_inode(ptmx_inode);
+	if (!sb)
+		return NULL;
+	fsi = DEVPTS_SB(sb);
+	if (!fsi)
+		return NULL;
 
 	atomic_inc(&sb->s_active);
-	ihold(ptmx_inode);
+	return fsi;
 }
 
-void devpts_del_ref(struct inode *ptmx_inode)
+void devpts_put_ref(struct pts_fs_info *fsi)
 {
-	struct super_block *sb = pts_sb_from_inode(ptmx_inode);
-
-	iput(ptmx_inode);
-	deactivate_super(sb);
+	deactivate_super(fsi->sb);
 }
 
 /**
@@ -604,22 +604,20 @@
  *
  * The created inode is returned. Remove it from /dev/pts/ by devpts_pty_kill.
  */
-struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
-		void *priv)
+struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
 {
 	struct dentry *dentry;
-	struct super_block *sb = pts_sb_from_inode(ptmx_inode);
+	struct super_block *sb;
 	struct inode *inode;
 	struct dentry *root;
-	struct pts_fs_info *fsi;
 	struct pts_mount_opts *opts;
 	char s[12];
 
-	if (!sb)
+	if (!fsi)
 		return ERR_PTR(-ENODEV);
 
+	sb = fsi->sb;
 	root = sb->s_root;
-	fsi = DEVPTS_SB(sb);
 	opts = &fsi->mount_opts;
 
 	inode = new_inode(sb);
@@ -630,25 +628,21 @@
 	inode->i_uid = opts->setuid ? opts->uid : current_fsuid();
 	inode->i_gid = opts->setgid ? opts->gid : current_fsgid();
 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
-	init_special_inode(inode, S_IFCHR|opts->mode, device);
-	inode->i_private = priv;
+	init_special_inode(inode, S_IFCHR|opts->mode, MKDEV(UNIX98_PTY_SLAVE_MAJOR, index));
 
 	sprintf(s, "%d", index);
 
-	inode_lock(d_inode(root));
-
 	dentry = d_alloc_name(root, s);
 	if (dentry) {
+		dentry->d_fsdata = priv;
 		d_add(dentry, inode);
 		fsnotify_create(d_inode(root), dentry);
 	} else {
 		iput(inode);
-		inode = ERR_PTR(-ENOMEM);
+		dentry = ERR_PTR(-ENOMEM);
 	}
 
-	inode_unlock(d_inode(root));
-
-	return inode;
+	return dentry;
 }
 
 /**
@@ -657,24 +651,10 @@
  *
  * Returns whatever was passed as priv in devpts_pty_new for a given inode.
  */
-void *devpts_get_priv(struct inode *pts_inode)
+void *devpts_get_priv(struct dentry *dentry)
 {
-	struct dentry *dentry;
-	void *priv = NULL;
-
-	BUG_ON(pts_inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR));
-
-	/* Ensure dentry has not been deleted by devpts_pty_kill() */
-	dentry = d_find_alias(pts_inode);
-	if (!dentry)
-		return NULL;
-
-	if (pts_inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC)
-		priv = pts_inode->i_private;
-
-	dput(dentry);
-
-	return priv;
+	WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC);
+	return dentry->d_fsdata;
 }
 
 /**
@@ -683,24 +663,14 @@
  *
  * This is an inverse operation of devpts_pty_new.
  */
-void devpts_pty_kill(struct inode *inode)
+void devpts_pty_kill(struct dentry *dentry)
 {
-	struct super_block *sb = pts_sb_from_inode(inode);
-	struct dentry *root = sb->s_root;
-	struct dentry *dentry;
+	WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC);
 
-	BUG_ON(inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR));
-
-	inode_lock(d_inode(root));
-
-	dentry = d_find_alias(inode);
-
-	drop_nlink(inode);
+	dentry->d_fsdata = NULL;
+	drop_nlink(dentry->d_inode);
 	d_delete(dentry);
 	dput(dentry);	/* d_alloc_name() in devpts_pty_new() */
-	dput(dentry);		/* d_find_alias above */
-
-	inode_unlock(d_inode(root));
 }
 
 static int __init init_devpts_fs(void)
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 476f1ec..3bf3f20 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -172,7 +172,7 @@
 		 */
 		if (dio->page_errors == 0)
 			dio->page_errors = ret;
-		page_cache_get(page);
+		get_page(page);
 		dio->pages[0] = page;
 		sdio->head = 0;
 		sdio->tail = 1;
@@ -224,9 +224,9 @@
  * filesystems can use it to hold additional state between get_block calls and
  * dio_complete.
  */
-static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret,
-		bool is_async)
+static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
 {
+	loff_t offset = dio->iocb->ki_pos;
 	ssize_t transferred = 0;
 
 	/*
@@ -256,6 +256,7 @@
 	if (dio->end_io) {
 		int err;
 
+		// XXX: ki_pos??
 		err = dio->end_io(dio->iocb, offset, ret, dio->private);
 		if (err)
 			ret = err;
@@ -265,15 +266,15 @@
 		inode_dio_end(dio->inode);
 
 	if (is_async) {
-		if (dio->rw & WRITE) {
-			int err;
+		/*
+		 * generic_write_sync expects ki_pos to have been updated
+		 * already, but the submission path only does this for
+		 * synchronous I/O.
+		 */
+		dio->iocb->ki_pos += transferred;
 
-			err = generic_write_sync(dio->iocb->ki_filp, offset,
-						 transferred);
-			if (err < 0 && ret > 0)
-				ret = err;
-		}
-
+		if (dio->rw & WRITE)
+			ret = generic_write_sync(dio->iocb,  transferred);
 		dio->iocb->ki_complete(dio->iocb, ret, 0);
 	}
 
@@ -285,7 +286,7 @@
 {
 	struct dio *dio = container_of(work, struct dio, complete_work);
 
-	dio_complete(dio, dio->iocb->ki_pos, 0, true);
+	dio_complete(dio, 0, true);
 }
 
 static int dio_bio_complete(struct dio *dio, struct bio *bio);
@@ -314,7 +315,7 @@
 			queue_work(dio->inode->i_sb->s_dio_done_wq,
 				   &dio->complete_work);
 		} else {
-			dio_complete(dio, dio->iocb->ki_pos, 0, true);
+			dio_complete(dio, 0, true);
 		}
 	}
 }
@@ -424,7 +425,7 @@
 static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
 {
 	while (sdio->head < sdio->tail)
-		page_cache_release(dio->pages[sdio->head++]);
+		put_page(dio->pages[sdio->head++]);
 }
 
 /*
@@ -487,7 +488,7 @@
 			if (dio->rw == READ && !PageCompound(page) &&
 					dio->should_dirty)
 				set_page_dirty_lock(page);
-			page_cache_release(page);
+			put_page(page);
 		}
 		err = bio->bi_error;
 		bio_put(bio);
@@ -696,7 +697,7 @@
 		 */
 		if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE)
 			sdio->pages_in_io--;
-		page_cache_get(sdio->cur_page);
+		get_page(sdio->cur_page);
 		sdio->final_block_in_bio = sdio->cur_page_block +
 			(sdio->cur_page_len >> sdio->blkbits);
 		ret = 0;
@@ -810,13 +811,13 @@
 	 */
 	if (sdio->cur_page) {
 		ret = dio_send_cur_page(dio, sdio, map_bh);
-		page_cache_release(sdio->cur_page);
+		put_page(sdio->cur_page);
 		sdio->cur_page = NULL;
 		if (ret)
 			return ret;
 	}
 
-	page_cache_get(page);		/* It is in dio */
+	get_page(page);		/* It is in dio */
 	sdio->cur_page = page;
 	sdio->cur_page_offset = offset;
 	sdio->cur_page_len = len;
@@ -830,7 +831,7 @@
 	if (sdio->boundary) {
 		ret = dio_send_cur_page(dio, sdio, map_bh);
 		dio_bio_submit(dio, sdio);
-		page_cache_release(sdio->cur_page);
+		put_page(sdio->cur_page);
 		sdio->cur_page = NULL;
 	}
 	return ret;
@@ -947,7 +948,7 @@
 
 				ret = get_more_blocks(dio, sdio, map_bh);
 				if (ret) {
-					page_cache_release(page);
+					put_page(page);
 					goto out;
 				}
 				if (!buffer_mapped(map_bh))
@@ -988,7 +989,7 @@
 
 				/* AKPM: eargh, -ENOTBLK is a hack */
 				if (dio->rw & WRITE) {
-					page_cache_release(page);
+					put_page(page);
 					return -ENOTBLK;
 				}
 
@@ -1001,7 +1002,7 @@
 				if (sdio->block_in_file >=
 						i_size_aligned >> blkbits) {
 					/* We hit eof */
-					page_cache_release(page);
+					put_page(page);
 					goto out;
 				}
 				zero_user(page, from, 1 << blkbits);
@@ -1041,7 +1042,7 @@
 						  sdio->next_block_for_io,
 						  map_bh);
 			if (ret) {
-				page_cache_release(page);
+				put_page(page);
 				goto out;
 			}
 			sdio->next_block_for_io += this_chunk_blocks;
@@ -1057,7 +1058,7 @@
 		}
 
 		/* Drop the ref which was taken in get_user_pages() */
-		page_cache_release(page);
+		put_page(page);
 	}
 out:
 	return ret;
@@ -1113,7 +1114,7 @@
 static inline ssize_t
 do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
 		      struct block_device *bdev, struct iov_iter *iter,
-		      loff_t offset, get_block_t get_block, dio_iodone_t end_io,
+		      get_block_t get_block, dio_iodone_t end_io,
 		      dio_submit_t submit_io, int flags)
 {
 	unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits);
@@ -1121,6 +1122,7 @@
 	unsigned blocksize_mask = (1 << blkbits) - 1;
 	ssize_t retval = -EINVAL;
 	size_t count = iov_iter_count(iter);
+	loff_t offset = iocb->ki_pos;
 	loff_t end = offset + count;
 	struct dio *dio;
 	struct dio_submit sdio = { 0, };
@@ -1281,7 +1283,7 @@
 		ret2 = dio_send_cur_page(dio, &sdio, &map_bh);
 		if (retval == 0)
 			retval = ret2;
-		page_cache_release(sdio.cur_page);
+		put_page(sdio.cur_page);
 		sdio.cur_page = NULL;
 	}
 	if (sdio.bio)
@@ -1318,7 +1320,7 @@
 		dio_await_completion(dio);
 
 	if (drop_refcount(dio) == 0) {
-		retval = dio_complete(dio, offset, retval, false);
+		retval = dio_complete(dio, retval, false);
 	} else
 		BUG_ON(retval != -EIOCBQUEUED);
 
@@ -1328,7 +1330,7 @@
 
 ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
 			     struct block_device *bdev, struct iov_iter *iter,
-			     loff_t offset, get_block_t get_block,
+			     get_block_t get_block,
 			     dio_iodone_t end_io, dio_submit_t submit_io,
 			     int flags)
 {
@@ -1344,7 +1346,7 @@
 	prefetch(bdev->bd_queue);
 	prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES);
 
-	return do_blockdev_direct_IO(iocb, inode, bdev, iter, offset, get_block,
+	return do_blockdev_direct_IO(iocb, inode, bdev, iter, get_block,
 				     end_io, submit_io, flags);
 }
 
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index 5191121..1669f62 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -343,13 +343,12 @@
 	struct dlm_cluster *cl = NULL;
 	struct dlm_spaces *sps = NULL;
 	struct dlm_comms *cms = NULL;
-	void *gps = NULL;
 
 	cl = kzalloc(sizeof(struct dlm_cluster), GFP_NOFS);
 	sps = kzalloc(sizeof(struct dlm_spaces), GFP_NOFS);
 	cms = kzalloc(sizeof(struct dlm_comms), GFP_NOFS);
 
-	if (!cl || !gps || !sps || !cms)
+	if (!cl || !sps || !cms)
 		goto fail;
 
 	config_group_init_type_name(&cl->group, name, &cluster_type);
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 00640e7..1ab012a 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -640,7 +640,7 @@
 		con->rx_page = alloc_page(GFP_ATOMIC);
 		if (con->rx_page == NULL)
 			goto out_resched;
-		cbuf_init(&con->cb, PAGE_CACHE_SIZE);
+		cbuf_init(&con->cb, PAGE_SIZE);
 	}
 
 	/*
@@ -657,7 +657,7 @@
 	 * buffer and the start of the currently used section (cb.base)
 	 */
 	if (cbuf_data(&con->cb) >= con->cb.base) {
-		iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&con->cb);
+		iov[0].iov_len = PAGE_SIZE - cbuf_data(&con->cb);
 		iov[1].iov_len = con->cb.base;
 		iov[1].iov_base = page_address(con->rx_page);
 		nvec = 2;
@@ -675,7 +675,7 @@
 	ret = dlm_process_incoming_buffer(con->nodeid,
 					  page_address(con->rx_page),
 					  con->cb.base, con->cb.len,
-					  PAGE_CACHE_SIZE);
+					  PAGE_SIZE);
 	if (ret == -EBADMSG) {
 		log_print("lowcomms: addr=%p, base=%u, len=%u, read=%d",
 			  page_address(con->rx_page), con->cb.base,
@@ -1416,7 +1416,7 @@
 	spin_lock(&con->writequeue_lock);
 	e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
 	if ((&e->list == &con->writequeue) ||
-	    (PAGE_CACHE_SIZE - e->end < len)) {
+	    (PAGE_SIZE - e->end < len)) {
 		e = NULL;
 	} else {
 		offset = e->end;
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 64026e5..ebd40f4 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -105,19 +105,7 @@
 	struct crypto_shash *tfm;
 	int rc = 0;
 
-	mutex_lock(&crypt_stat->cs_hash_tfm_mutex);
 	tfm = crypt_stat->hash_tfm;
-	if (!tfm) {
-		tfm = crypto_alloc_shash(ECRYPTFS_DEFAULT_HASH, 0, 0);
-		if (IS_ERR(tfm)) {
-			rc = PTR_ERR(tfm);
-			ecryptfs_printk(KERN_ERR, "Error attempting to "
-					"allocate crypto context; rc = [%d]\n",
-					rc);
-			goto out;
-		}
-		crypt_stat->hash_tfm = tfm;
-	}
 	rc = ecryptfs_hash_digest(tfm, src, len, dst);
 	if (rc) {
 		printk(KERN_ERR
@@ -126,7 +114,6 @@
 		goto out;
 	}
 out:
-	mutex_unlock(&crypt_stat->cs_hash_tfm_mutex);
 	return rc;
 }
 
@@ -207,16 +194,29 @@
  *
  * Initialize the crypt_stat structure.
  */
-void
-ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat)
+int ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat)
 {
+	struct crypto_shash *tfm;
+	int rc;
+
+	tfm = crypto_alloc_shash(ECRYPTFS_DEFAULT_HASH, 0, 0);
+	if (IS_ERR(tfm)) {
+		rc = PTR_ERR(tfm);
+		ecryptfs_printk(KERN_ERR, "Error attempting to "
+				"allocate crypto context; rc = [%d]\n",
+				rc);
+		return rc;
+	}
+
 	memset((void *)crypt_stat, 0, sizeof(struct ecryptfs_crypt_stat));
 	INIT_LIST_HEAD(&crypt_stat->keysig_list);
 	mutex_init(&crypt_stat->keysig_list_mutex);
 	mutex_init(&crypt_stat->cs_mutex);
 	mutex_init(&crypt_stat->cs_tfm_mutex);
-	mutex_init(&crypt_stat->cs_hash_tfm_mutex);
+	crypt_stat->hash_tfm = tfm;
 	crypt_stat->flags |= ECRYPTFS_STRUCT_INITIALIZED;
+
+	return 0;
 }
 
 /**
@@ -286,7 +286,7 @@
 		pg = virt_to_page(addr);
 		offset = offset_in_page(addr);
 		sg_set_page(&sg[i], pg, 0, offset);
-		remainder_of_page = PAGE_CACHE_SIZE - offset;
+		remainder_of_page = PAGE_SIZE - offset;
 		if (size >= remainder_of_page) {
 			sg[i].length = remainder_of_page;
 			addr += remainder_of_page;
@@ -400,7 +400,7 @@
 				    struct page *page)
 {
 	return ecryptfs_lower_header_size(crypt_stat) +
-	       ((loff_t)page->index << PAGE_CACHE_SHIFT);
+	       ((loff_t)page->index << PAGE_SHIFT);
 }
 
 /**
@@ -428,7 +428,7 @@
 	size_t extent_size = crypt_stat->extent_size;
 	int rc;
 
-	extent_base = (((loff_t)page_index) * (PAGE_CACHE_SIZE / extent_size));
+	extent_base = (((loff_t)page_index) * (PAGE_SIZE / extent_size));
 	rc = ecryptfs_derive_iv(extent_iv, crypt_stat,
 				(extent_base + extent_offset));
 	if (rc) {
@@ -498,7 +498,7 @@
 	}
 
 	for (extent_offset = 0;
-	     extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size);
+	     extent_offset < (PAGE_SIZE / crypt_stat->extent_size);
 	     extent_offset++) {
 		rc = crypt_extent(crypt_stat, enc_extent_page, page,
 				  extent_offset, ENCRYPT);
@@ -512,7 +512,7 @@
 	lower_offset = lower_offset_for_page(crypt_stat, page);
 	enc_extent_virt = kmap(enc_extent_page);
 	rc = ecryptfs_write_lower(ecryptfs_inode, enc_extent_virt, lower_offset,
-				  PAGE_CACHE_SIZE);
+				  PAGE_SIZE);
 	kunmap(enc_extent_page);
 	if (rc < 0) {
 		ecryptfs_printk(KERN_ERR,
@@ -560,7 +560,7 @@
 
 	lower_offset = lower_offset_for_page(crypt_stat, page);
 	page_virt = kmap(page);
-	rc = ecryptfs_read_lower(page_virt, lower_offset, PAGE_CACHE_SIZE,
+	rc = ecryptfs_read_lower(page_virt, lower_offset, PAGE_SIZE,
 				 ecryptfs_inode);
 	kunmap(page);
 	if (rc < 0) {
@@ -571,7 +571,7 @@
 	}
 
 	for (extent_offset = 0;
-	     extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size);
+	     extent_offset < (PAGE_SIZE / crypt_stat->extent_size);
 	     extent_offset++) {
 		rc = crypt_extent(crypt_stat, page, page,
 				  extent_offset, DECRYPT);
@@ -659,11 +659,11 @@
 	if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
 		crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
 	else {
-		if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)
+		if (PAGE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)
 			crypt_stat->metadata_size =
 				ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
 		else
-			crypt_stat->metadata_size = PAGE_CACHE_SIZE;
+			crypt_stat->metadata_size = PAGE_SIZE;
 	}
 }
 
@@ -1369,7 +1369,9 @@
 	ssize_t size;
 	int rc = 0;
 
-	size = ecryptfs_getxattr_lower(lower_dentry, ECRYPTFS_XATTR_NAME,
+	size = ecryptfs_getxattr_lower(lower_dentry,
+				       ecryptfs_inode_to_lower(ecryptfs_inode),
+				       ECRYPTFS_XATTR_NAME,
 				       page_virt, ECRYPTFS_DEFAULT_EXTENT_SIZE);
 	if (size < 0) {
 		if (unlikely(ecryptfs_verbosity > 0))
@@ -1391,6 +1393,7 @@
 	int rc;
 
 	rc = ecryptfs_getxattr_lower(ecryptfs_dentry_to_lower(dentry),
+				     ecryptfs_inode_to_lower(inode),
 				     ECRYPTFS_XATTR_NAME, file_size,
 				     ECRYPTFS_SIZE_AND_MARKER_BYTES);
 	if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
@@ -1442,7 +1445,7 @@
 						ECRYPTFS_VALIDATE_HEADER_SIZE);
 	if (rc) {
 		/* metadata is not in the file header, so try xattrs */
-		memset(page_virt, 0, PAGE_CACHE_SIZE);
+		memset(page_virt, 0, PAGE_SIZE);
 		rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode);
 		if (rc) {
 			printk(KERN_DEBUG "Valid eCryptfs headers not found in "
@@ -1475,7 +1478,7 @@
 	}
 out:
 	if (page_virt) {
-		memset(page_virt, 0, PAGE_CACHE_SIZE);
+		memset(page_virt, 0, PAGE_SIZE);
 		kmem_cache_free(ecryptfs_header_cache, page_virt);
 	}
 	return rc;
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index d123fba..3ec495d 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -242,7 +242,6 @@
 	struct list_head keysig_list;
 	struct mutex keysig_list_mutex;
 	struct mutex cs_tfm_mutex;
-	struct mutex cs_hash_tfm_mutex;
 	struct mutex cs_mutex;
 };
 
@@ -577,7 +576,7 @@
 			int sg_size);
 int ecryptfs_compute_root_iv(struct ecryptfs_crypt_stat *crypt_stat);
 void ecryptfs_rotate_iv(unsigned char *iv);
-void ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat);
+int ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat);
 void ecryptfs_destroy_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat);
 void ecryptfs_destroy_mount_crypt_stat(
 	struct ecryptfs_mount_crypt_stat *mount_crypt_stat);
@@ -607,8 +606,8 @@
 			  unsigned char *src, struct dentry *ecryptfs_dentry);
 int ecryptfs_truncate(struct dentry *dentry, loff_t new_length);
 ssize_t
-ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name,
-			void *value, size_t size);
+ecryptfs_getxattr_lower(struct dentry *lower_dentry, struct inode *lower_inode,
+			const char *name, void *value, size_t size);
 int
 ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
 		  size_t size, int flags);
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index feef8a9..7000b96 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -112,7 +112,6 @@
 		.sb = inode->i_sb,
 	};
 	lower_file = ecryptfs_file_to_lower(file);
-	lower_file->f_pos = ctx->pos;
 	rc = iterate_dir(lower_file, &buf.ctx);
 	ctx->pos = buf.ctx.pos;
 	if (rc < 0)
@@ -223,14 +222,6 @@
 	}
 	ecryptfs_set_file_lower(
 		file, ecryptfs_inode_to_private(inode)->lower_file);
-	if (d_is_dir(ecryptfs_dentry)) {
-		ecryptfs_printk(KERN_DEBUG, "This is a directory\n");
-		mutex_lock(&crypt_stat->cs_mutex);
-		crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
-		mutex_unlock(&crypt_stat->cs_mutex);
-		rc = 0;
-		goto out;
-	}
 	rc = read_or_initialize_metadata(ecryptfs_dentry);
 	if (rc)
 		goto out_put;
@@ -247,6 +238,45 @@
 	return rc;
 }
 
+/**
+ * ecryptfs_dir_open
+ * @inode: inode speciying file to open
+ * @file: Structure to return filled in
+ *
+ * Opens the file specified by inode.
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+static int ecryptfs_dir_open(struct inode *inode, struct file *file)
+{
+	struct dentry *ecryptfs_dentry = file->f_path.dentry;
+	/* Private value of ecryptfs_dentry allocated in
+	 * ecryptfs_lookup() */
+	struct ecryptfs_file_info *file_info;
+	struct file *lower_file;
+
+	/* Released in ecryptfs_release or end of function if failure */
+	file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL);
+	ecryptfs_set_file_private(file, file_info);
+	if (unlikely(!file_info)) {
+		ecryptfs_printk(KERN_ERR,
+				"Error attempting to allocate memory\n");
+		return -ENOMEM;
+	}
+	lower_file = dentry_open(ecryptfs_dentry_to_lower_path(ecryptfs_dentry),
+				 file->f_flags, current_cred());
+	if (IS_ERR(lower_file)) {
+		printk(KERN_ERR "%s: Error attempting to initialize "
+			"the lower file for the dentry with name "
+			"[%pd]; rc = [%ld]\n", __func__,
+			ecryptfs_dentry, PTR_ERR(lower_file));
+		kmem_cache_free(ecryptfs_file_info_cache, file_info);
+		return PTR_ERR(lower_file);
+	}
+	ecryptfs_set_file_lower(file, lower_file);
+	return 0;
+}
+
 static int ecryptfs_flush(struct file *file, fl_owner_t td)
 {
 	struct file *lower_file = ecryptfs_file_to_lower(file);
@@ -267,6 +297,19 @@
 	return 0;
 }
 
+static int ecryptfs_dir_release(struct inode *inode, struct file *file)
+{
+	fput(ecryptfs_file_to_lower(file));
+	kmem_cache_free(ecryptfs_file_info_cache,
+			ecryptfs_file_to_private(file));
+	return 0;
+}
+
+static loff_t ecryptfs_dir_llseek(struct file *file, loff_t offset, int whence)
+{
+	return vfs_llseek(ecryptfs_file_to_lower(file), offset, whence);
+}
+
 static int
 ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
@@ -340,26 +383,22 @@
 #endif
 
 const struct file_operations ecryptfs_dir_fops = {
-	.iterate = ecryptfs_readdir,
+	.iterate_shared = ecryptfs_readdir,
 	.read = generic_read_dir,
 	.unlocked_ioctl = ecryptfs_unlocked_ioctl,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl = ecryptfs_compat_ioctl,
 #endif
-	.open = ecryptfs_open,
-	.flush = ecryptfs_flush,
-	.release = ecryptfs_release,
+	.open = ecryptfs_dir_open,
+	.release = ecryptfs_dir_release,
 	.fsync = ecryptfs_fsync,
-	.fasync = ecryptfs_fasync,
-	.splice_read = generic_file_splice_read,
-	.llseek = default_llseek,
+	.llseek = ecryptfs_dir_llseek,
 };
 
 const struct file_operations ecryptfs_main_fops = {
 	.llseek = generic_file_llseek,
 	.read_iter = ecryptfs_read_update_atime,
 	.write_iter = generic_file_write_iter,
-	.iterate = ecryptfs_readdir,
 	.unlocked_ioctl = ecryptfs_unlocked_ioctl,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl = ecryptfs_compat_ioctl,
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 121114e..318b046 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -324,9 +324,8 @@
 /**
  * ecryptfs_lookup_interpose - Dentry interposition for a lookup
  */
-static int ecryptfs_lookup_interpose(struct dentry *dentry,
-				     struct dentry *lower_dentry,
-				     struct inode *dir_inode)
+static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry,
+				     struct dentry *lower_dentry)
 {
 	struct inode *inode, *lower_inode = d_inode(lower_dentry);
 	struct ecryptfs_dentry_info *dentry_info;
@@ -339,11 +338,12 @@
 		       "to allocate ecryptfs_dentry_info struct\n",
 			__func__);
 		dput(lower_dentry);
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 	}
 
 	lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(dentry->d_parent));
-	fsstack_copy_attr_atime(dir_inode, d_inode(lower_dentry->d_parent));
+	fsstack_copy_attr_atime(d_inode(dentry->d_parent),
+				d_inode(lower_dentry->d_parent));
 	BUG_ON(!d_count(lower_dentry));
 
 	ecryptfs_set_dentry_private(dentry, dentry_info);
@@ -353,27 +353,25 @@
 	if (d_really_is_negative(lower_dentry)) {
 		/* We want to add because we couldn't find in lower */
 		d_add(dentry, NULL);
-		return 0;
+		return NULL;
 	}
-	inode = __ecryptfs_get_inode(lower_inode, dir_inode->i_sb);
+	inode = __ecryptfs_get_inode(lower_inode, dentry->d_sb);
 	if (IS_ERR(inode)) {
 		printk(KERN_ERR "%s: Error interposing; rc = [%ld]\n",
 		       __func__, PTR_ERR(inode));
-		return PTR_ERR(inode);
+		return ERR_CAST(inode);
 	}
 	if (S_ISREG(inode->i_mode)) {
 		rc = ecryptfs_i_size_read(dentry, inode);
 		if (rc) {
 			make_bad_inode(inode);
-			return rc;
+			return ERR_PTR(rc);
 		}
 	}
 
 	if (inode->i_state & I_NEW)
 		unlock_new_inode(inode);
-	d_add(dentry, inode);
-
-	return rc;
+	return d_splice_alias(inode, dentry);
 }
 
 /**
@@ -390,55 +388,42 @@
 				      unsigned int flags)
 {
 	char *encrypted_and_encoded_name = NULL;
-	size_t encrypted_and_encoded_name_size;
-	struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL;
+	struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
 	struct dentry *lower_dir_dentry, *lower_dentry;
+	const char *name = ecryptfs_dentry->d_name.name;
+	size_t len = ecryptfs_dentry->d_name.len;
+	struct dentry *res;
 	int rc = 0;
 
 	lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent);
-	lower_dentry = lookup_one_len_unlocked(ecryptfs_dentry->d_name.name,
-				      lower_dir_dentry,
-				      ecryptfs_dentry->d_name.len);
-	if (IS_ERR(lower_dentry)) {
-		rc = PTR_ERR(lower_dentry);
-		ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
-				"[%d] on lower_dentry = [%pd]\n", __func__, rc,
-				ecryptfs_dentry);
-		goto out;
-	}
-	if (d_really_is_positive(lower_dentry))
-		goto interpose;
+
 	mount_crypt_stat = &ecryptfs_superblock_to_private(
 				ecryptfs_dentry->d_sb)->mount_crypt_stat;
-	if (!(mount_crypt_stat
-	    && (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)))
-		goto interpose;
-	dput(lower_dentry);
-	rc = ecryptfs_encrypt_and_encode_filename(
-		&encrypted_and_encoded_name, &encrypted_and_encoded_name_size,
-		mount_crypt_stat, ecryptfs_dentry->d_name.name,
-		ecryptfs_dentry->d_name.len);
-	if (rc) {
-		printk(KERN_ERR "%s: Error attempting to encrypt and encode "
-		       "filename; rc = [%d]\n", __func__, rc);
-		goto out;
+	if (mount_crypt_stat
+	    && (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)) {
+		rc = ecryptfs_encrypt_and_encode_filename(
+			&encrypted_and_encoded_name, &len,
+			mount_crypt_stat, name, len);
+		if (rc) {
+			printk(KERN_ERR "%s: Error attempting to encrypt and encode "
+			       "filename; rc = [%d]\n", __func__, rc);
+			return ERR_PTR(rc);
+		}
+		name = encrypted_and_encoded_name;
 	}
-	lower_dentry = lookup_one_len_unlocked(encrypted_and_encoded_name,
-				      lower_dir_dentry,
-				      encrypted_and_encoded_name_size);
+
+	lower_dentry = lookup_one_len_unlocked(name, lower_dir_dentry, len);
 	if (IS_ERR(lower_dentry)) {
-		rc = PTR_ERR(lower_dentry);
 		ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
-				"[%d] on lower_dentry = [%s]\n", __func__, rc,
-				encrypted_and_encoded_name);
-		goto out;
+				"[%ld] on lower_dentry = [%s]\n", __func__,
+				PTR_ERR(lower_dentry),
+				name);
+		res = ERR_CAST(lower_dentry);
+	} else {
+		res = ecryptfs_lookup_interpose(ecryptfs_dentry, lower_dentry);
 	}
-interpose:
-	rc = ecryptfs_lookup_interpose(ecryptfs_dentry, lower_dentry,
-				       ecryptfs_dir_inode);
-out:
 	kfree(encrypted_and_encoded_name);
-	return ERR_PTR(rc);
+	return res;
 }
 
 static int ecryptfs_link(struct dentry *old_dentry, struct inode *dir,
@@ -763,10 +748,10 @@
 	} else { /* ia->ia_size < i_size_read(inode) */
 		/* We're chopping off all the pages down to the page
 		 * in which ia->ia_size is located. Fill in the end of
-		 * that page from (ia->ia_size & ~PAGE_CACHE_MASK) to
-		 * PAGE_CACHE_SIZE with zeros. */
-		size_t num_zeros = (PAGE_CACHE_SIZE
-				    - (ia->ia_size & ~PAGE_CACHE_MASK));
+		 * that page from (ia->ia_size & ~PAGE_MASK) to
+		 * PAGE_SIZE with zeros. */
+		size_t num_zeros = (PAGE_SIZE
+				    - (ia->ia_size & ~PAGE_MASK));
 
 		if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
 			truncate_setsize(inode, ia->ia_size);
@@ -898,8 +883,11 @@
 	struct ecryptfs_crypt_stat *crypt_stat;
 
 	crypt_stat = &ecryptfs_inode_to_private(d_inode(dentry))->crypt_stat;
-	if (!(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED))
-		ecryptfs_init_crypt_stat(crypt_stat);
+	if (!(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED)) {
+		rc = ecryptfs_init_crypt_stat(crypt_stat);
+		if (rc)
+			return rc;
+	}
 	inode = d_inode(dentry);
 	lower_inode = ecryptfs_inode_to_lower(inode);
 	lower_dentry = ecryptfs_dentry_to_lower(dentry);
@@ -1033,29 +1021,30 @@
 }
 
 ssize_t
-ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name,
-			void *value, size_t size)
+ecryptfs_getxattr_lower(struct dentry *lower_dentry, struct inode *lower_inode,
+			const char *name, void *value, size_t size)
 {
 	int rc = 0;
 
-	if (!d_inode(lower_dentry)->i_op->getxattr) {
+	if (!lower_inode->i_op->getxattr) {
 		rc = -EOPNOTSUPP;
 		goto out;
 	}
-	inode_lock(d_inode(lower_dentry));
-	rc = d_inode(lower_dentry)->i_op->getxattr(lower_dentry, name, value,
-						   size);
-	inode_unlock(d_inode(lower_dentry));
+	inode_lock(lower_inode);
+	rc = lower_inode->i_op->getxattr(lower_dentry, lower_inode,
+					 name, value, size);
+	inode_unlock(lower_inode);
 out:
 	return rc;
 }
 
 static ssize_t
-ecryptfs_getxattr(struct dentry *dentry, const char *name, void *value,
-		  size_t size)
+ecryptfs_getxattr(struct dentry *dentry, struct inode *inode,
+		  const char *name, void *value, size_t size)
 {
-	return ecryptfs_getxattr_lower(ecryptfs_dentry_to_lower(dentry), name,
-				       value, size);
+	return ecryptfs_getxattr_lower(ecryptfs_dentry_to_lower(dentry),
+				       ecryptfs_inode_to_lower(inode),
+				       name, value, size);
 }
 
 static ssize_t
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 9893d15..3cf1546 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -1798,7 +1798,7 @@
 	 * added the our &auth_tok_list */
 	next_packet_is_auth_tok_packet = 1;
 	while (next_packet_is_auth_tok_packet) {
-		size_t max_packet_size = ((PAGE_CACHE_SIZE - 8) - i);
+		size_t max_packet_size = ((PAGE_SIZE - 8) - i);
 
 		switch (src[i]) {
 		case ECRYPTFS_TAG_3_PACKET_TYPE:
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 8b0b4a7..1698132 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -695,12 +695,12 @@
 	{
 		.cache = &ecryptfs_header_cache,
 		.name = "ecryptfs_headers",
-		.size = PAGE_CACHE_SIZE,
+		.size = PAGE_SIZE,
 	},
 	{
 		.cache = &ecryptfs_xattr_cache,
 		.name = "ecryptfs_xattr_cache",
-		.size = PAGE_CACHE_SIZE,
+		.size = PAGE_SIZE,
 	},
 	{
 		.cache = &ecryptfs_key_record_cache,
@@ -818,7 +818,7 @@
 {
 	int rc;
 
-	if (ECRYPTFS_DEFAULT_EXTENT_SIZE > PAGE_CACHE_SIZE) {
+	if (ECRYPTFS_DEFAULT_EXTENT_SIZE > PAGE_SIZE) {
 		rc = -EINVAL;
 		ecryptfs_printk(KERN_ERR, "The eCryptfs extent size is "
 				"larger than the host's page size, and so "
@@ -826,7 +826,7 @@
 				"default eCryptfs extent size is [%u] bytes; "
 				"the page size is [%lu] bytes.\n",
 				ECRYPTFS_DEFAULT_EXTENT_SIZE,
-				(unsigned long)PAGE_CACHE_SIZE);
+				(unsigned long)PAGE_SIZE);
 		goto out;
 	}
 	rc = ecryptfs_init_kmem_caches();
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 1f58652..148d11b 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -122,7 +122,7 @@
 				       struct ecryptfs_crypt_stat *crypt_stat)
 {
 	loff_t extent_num_in_page = 0;
-	loff_t num_extents_per_page = (PAGE_CACHE_SIZE
+	loff_t num_extents_per_page = (PAGE_SIZE
 				       / crypt_stat->extent_size);
 	int rc = 0;
 
@@ -138,7 +138,7 @@
 			char *page_virt;
 
 			page_virt = kmap_atomic(page);
-			memset(page_virt, 0, PAGE_CACHE_SIZE);
+			memset(page_virt, 0, PAGE_SIZE);
 			/* TODO: Support more than one header extent */
 			if (view_extent_num == 0) {
 				size_t written;
@@ -164,8 +164,8 @@
 				 - crypt_stat->metadata_size);
 
 			rc = ecryptfs_read_lower_page_segment(
-				page, (lower_offset >> PAGE_CACHE_SHIFT),
-				(lower_offset & ~PAGE_CACHE_MASK),
+				page, (lower_offset >> PAGE_SHIFT),
+				(lower_offset & ~PAGE_MASK),
 				crypt_stat->extent_size, page->mapping->host);
 			if (rc) {
 				printk(KERN_ERR "%s: Error attempting to read "
@@ -198,7 +198,7 @@
 
 	if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
 		rc = ecryptfs_read_lower_page_segment(page, page->index, 0,
-						      PAGE_CACHE_SIZE,
+						      PAGE_SIZE,
 						      page->mapping->host);
 	} else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) {
 		if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
@@ -215,7 +215,7 @@
 
 		} else {
 			rc = ecryptfs_read_lower_page_segment(
-				page, page->index, 0, PAGE_CACHE_SIZE,
+				page, page->index, 0, PAGE_SIZE,
 				page->mapping->host);
 			if (rc) {
 				printk(KERN_ERR "Error reading page; rc = "
@@ -250,12 +250,12 @@
 	struct inode *inode = page->mapping->host;
 	int end_byte_in_page;
 
-	if ((i_size_read(inode) / PAGE_CACHE_SIZE) != page->index)
+	if ((i_size_read(inode) / PAGE_SIZE) != page->index)
 		goto out;
-	end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE;
+	end_byte_in_page = i_size_read(inode) % PAGE_SIZE;
 	if (to > end_byte_in_page)
 		end_byte_in_page = to;
-	zero_user_segment(page, end_byte_in_page, PAGE_CACHE_SIZE);
+	zero_user_segment(page, end_byte_in_page, PAGE_SIZE);
 out:
 	return 0;
 }
@@ -279,7 +279,7 @@
 			loff_t pos, unsigned len, unsigned flags,
 			struct page **pagep, void **fsdata)
 {
-	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+	pgoff_t index = pos >> PAGE_SHIFT;
 	struct page *page;
 	loff_t prev_page_end_size;
 	int rc = 0;
@@ -289,14 +289,14 @@
 		return -ENOMEM;
 	*pagep = page;
 
-	prev_page_end_size = ((loff_t)index << PAGE_CACHE_SHIFT);
+	prev_page_end_size = ((loff_t)index << PAGE_SHIFT);
 	if (!PageUptodate(page)) {
 		struct ecryptfs_crypt_stat *crypt_stat =
 			&ecryptfs_inode_to_private(mapping->host)->crypt_stat;
 
 		if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
 			rc = ecryptfs_read_lower_page_segment(
-				page, index, 0, PAGE_CACHE_SIZE, mapping->host);
+				page, index, 0, PAGE_SIZE, mapping->host);
 			if (rc) {
 				printk(KERN_ERR "%s: Error attempting to read "
 				       "lower page segment; rc = [%d]\n",
@@ -322,7 +322,7 @@
 				SetPageUptodate(page);
 			} else {
 				rc = ecryptfs_read_lower_page_segment(
-					page, index, 0, PAGE_CACHE_SIZE,
+					page, index, 0, PAGE_SIZE,
 					mapping->host);
 				if (rc) {
 					printk(KERN_ERR "%s: Error reading "
@@ -336,9 +336,9 @@
 		} else {
 			if (prev_page_end_size
 			    >= i_size_read(page->mapping->host)) {
-				zero_user(page, 0, PAGE_CACHE_SIZE);
+				zero_user(page, 0, PAGE_SIZE);
 				SetPageUptodate(page);
-			} else if (len < PAGE_CACHE_SIZE) {
+			} else if (len < PAGE_SIZE) {
 				rc = ecryptfs_decrypt_page(page);
 				if (rc) {
 					printk(KERN_ERR "%s: Error decrypting "
@@ -371,11 +371,11 @@
 	 * of page?  Zero it out. */
 	if ((i_size_read(mapping->host) == prev_page_end_size)
 	    && (pos != 0))
-		zero_user(page, 0, PAGE_CACHE_SIZE);
+		zero_user(page, 0, PAGE_SIZE);
 out:
 	if (unlikely(rc)) {
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		*pagep = NULL;
 	}
 	return rc;
@@ -436,8 +436,9 @@
 		goto out;
 	}
 	inode_lock(lower_inode);
-	size = lower_inode->i_op->getxattr(lower_dentry, ECRYPTFS_XATTR_NAME,
-					   xattr_virt, PAGE_CACHE_SIZE);
+	size = lower_inode->i_op->getxattr(lower_dentry, lower_inode,
+					   ECRYPTFS_XATTR_NAME,
+					   xattr_virt, PAGE_SIZE);
 	if (size < 0)
 		size = 8;
 	put_unaligned_be64(i_size_read(ecryptfs_inode), xattr_virt);
@@ -479,8 +480,8 @@
 			loff_t pos, unsigned len, unsigned copied,
 			struct page *page, void *fsdata)
 {
-	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
-	unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+	pgoff_t index = pos >> PAGE_SHIFT;
+	unsigned from = pos & (PAGE_SIZE - 1);
 	unsigned to = from + copied;
 	struct inode *ecryptfs_inode = mapping->host;
 	struct ecryptfs_crypt_stat *crypt_stat =
@@ -500,7 +501,7 @@
 		goto out;
 	}
 	if (!PageUptodate(page)) {
-		if (copied < PAGE_CACHE_SIZE) {
+		if (copied < PAGE_SIZE) {
 			rc = 0;
 			goto out;
 		}
@@ -533,7 +534,7 @@
 		rc = copied;
 out:
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 	return rc;
 }
 
diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
index 09fe622..158a3a3 100644
--- a/fs/ecryptfs/read_write.c
+++ b/fs/ecryptfs/read_write.c
@@ -74,7 +74,7 @@
 	loff_t offset;
 	int rc;
 
-	offset = ((((loff_t)page_for_lower->index) << PAGE_CACHE_SHIFT)
+	offset = ((((loff_t)page_for_lower->index) << PAGE_SHIFT)
 		  + offset_in_page);
 	virt = kmap(page_for_lower);
 	rc = ecryptfs_write_lower(ecryptfs_inode, virt, offset, size);
@@ -123,9 +123,9 @@
 	else
 		pos = offset;
 	while (pos < (offset + size)) {
-		pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT);
-		size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK);
-		size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page);
+		pgoff_t ecryptfs_page_idx = (pos >> PAGE_SHIFT);
+		size_t start_offset_in_page = (pos & ~PAGE_MASK);
+		size_t num_bytes = (PAGE_SIZE - start_offset_in_page);
 		loff_t total_remaining_bytes = ((offset + size) - pos);
 
 		if (fatal_signal_pending(current)) {
@@ -165,7 +165,7 @@
 			 * Fill in zero values to the end of the page */
 			memset(((char *)ecryptfs_page_virt
 				+ start_offset_in_page), 0,
-				PAGE_CACHE_SIZE - start_offset_in_page);
+				PAGE_SIZE - start_offset_in_page);
 		}
 
 		/* pos >= offset, we are now writing the data request */
@@ -186,7 +186,7 @@
 						ecryptfs_page,
 						start_offset_in_page,
 						data_offset);
-		page_cache_release(ecryptfs_page);
+		put_page(ecryptfs_page);
 		if (rc) {
 			printk(KERN_ERR "%s: Error encrypting "
 			       "page; rc = [%d]\n", __func__, rc);
@@ -262,7 +262,7 @@
 	loff_t offset;
 	int rc;
 
-	offset = ((((loff_t)page_index) << PAGE_CACHE_SHIFT) + offset_in_page);
+	offset = ((((loff_t)page_index) << PAGE_SHIFT) + offset_in_page);
 	virt = kmap(page_for_ecryptfs);
 	rc = ecryptfs_read_lower(virt, offset, size, ecryptfs_inode);
 	if (rc > 0)
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index 77a486d..85411ce 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -55,7 +55,10 @@
 	inode_info = kmem_cache_alloc(ecryptfs_inode_info_cache, GFP_KERNEL);
 	if (unlikely(!inode_info))
 		goto out;
-	ecryptfs_init_crypt_stat(&inode_info->crypt_stat);
+	if (ecryptfs_init_crypt_stat(&inode_info->crypt_stat)) {
+		kmem_cache_free(ecryptfs_inode_info_cache, inode_info);
+		goto out;
+	}
 	mutex_init(&inode_info->lower_file_mutex);
 	atomic_set(&inode_info->lower_file_count, 0);
 	inode_info->lower_file = NULL;
diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
index d48e0d2..5f22e74 100644
--- a/fs/efivarfs/file.c
+++ b/fs/efivarfs/file.c
@@ -157,7 +157,7 @@
 	return 0;
 }
 
-long
+static long
 efivarfs_file_ioctl(struct file *file, unsigned int cmd, unsigned long p)
 {
 	void __user *arg = (void __user *)p;
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index dd029d1..9cb54a3 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -197,8 +197,8 @@
 	efivarfs_sb = sb;
 
 	sb->s_maxbytes          = MAX_LFS_FILESIZE;
-	sb->s_blocksize         = PAGE_CACHE_SIZE;
-	sb->s_blocksize_bits    = PAGE_CACHE_SHIFT;
+	sb->s_blocksize         = PAGE_SIZE;
+	sb->s_blocksize_bits    = PAGE_SHIFT;
 	sb->s_magic             = EFIVARFS_MAGIC;
 	sb->s_op                = &efivarfs_ops;
 	sb->s_d_op		= &efivarfs_d_ops;
@@ -216,8 +216,7 @@
 
 	INIT_LIST_HEAD(&efivarfs_list);
 
-	err = efivar_init(efivarfs_callback, (void *)sb, false,
-			  true, &efivarfs_list);
+	err = efivar_init(efivarfs_callback, (void *)sb, true, &efivarfs_list);
 	if (err)
 		__efivar_entry_iter(efivarfs_destroy, &efivarfs_list, NULL, NULL);
 
diff --git a/fs/efs/dir.c b/fs/efs/dir.c
index ce63b24..a7be96e 100644
--- a/fs/efs/dir.c
+++ b/fs/efs/dir.c
@@ -12,7 +12,7 @@
 const struct file_operations efs_dir_operations = {
 	.llseek		= generic_file_llseek,
 	.read		= generic_read_dir,
-	.iterate	= efs_readdir,
+	.iterate_shared	= efs_readdir,
 };
 
 const struct inode_operations efs_dir_inode_operations = {
@@ -100,4 +100,3 @@
 	ctx->pos = (block << EFS_DIRBSIZE_BITS) | slot;
 	return 0;
 }
-
diff --git a/fs/efs/namei.c b/fs/efs/namei.c
index 40ba9cc..d34a40e 100644
--- a/fs/efs/namei.c
+++ b/fs/efs/namei.c
@@ -113,7 +113,7 @@
 
 	ino = efs_find_entry(d_inode(child), "..", 2);
 	if (ino)
-		parent = d_obtain_alias(efs_iget(d_inode(child)->i_sb, ino));
+		parent = d_obtain_alias(efs_iget(child->d_sb, ino));
 
 	return parent;
 }
diff --git a/fs/exec.c b/fs/exec.c
index c4010b8..e92419f 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -850,15 +850,25 @@
 	if (ret)
 		return ret;
 
+	ret = deny_write_access(file);
+	if (ret)
+		return ret;
+
 	i_size = i_size_read(file_inode(file));
-	if (max_size > 0 && i_size > max_size)
-		return -EFBIG;
-	if (i_size <= 0)
-		return -EINVAL;
+	if (max_size > 0 && i_size > max_size) {
+		ret = -EFBIG;
+		goto out;
+	}
+	if (i_size <= 0) {
+		ret = -EINVAL;
+		goto out;
+	}
 
 	*buf = vmalloc(i_size);
-	if (!*buf)
-		return -ENOMEM;
+	if (!*buf) {
+		ret = -ENOMEM;
+		goto out;
+	}
 
 	pos = 0;
 	while (pos < i_size) {
@@ -876,18 +886,21 @@
 
 	if (pos != i_size) {
 		ret = -EIO;
-		goto out;
+		goto out_free;
 	}
 
 	ret = security_kernel_post_read_file(file, *buf, i_size, id);
 	if (!ret)
 		*size = pos;
 
-out:
+out_free:
 	if (ret < 0) {
 		vfree(*buf);
 		*buf = NULL;
 	}
+
+out:
+	allow_write_access(file);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(kernel_read_file);
@@ -1387,7 +1400,12 @@
 	kuid_t uid;
 	kgid_t gid;
 
-	/* clear any previous set[ug]id data from a previous binary */
+	/*
+	 * Since this can be called multiple times (via prepare_binprm),
+	 * we must clear any previous work done when setting set[ug]id
+	 * bits from any earlier bprm->file uses (for example when run
+	 * first for a setuid script then again for its interpreter).
+	 */
 	bprm->cred->euid = current_euid();
 	bprm->cred->egid = current_egid();
 
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index e5bb2ab..f69a1b5 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -41,16 +41,16 @@
 static inline void exofs_put_page(struct page *page)
 {
 	kunmap(page);
-	page_cache_release(page);
+	put_page(page);
 }
 
 static unsigned exofs_last_byte(struct inode *inode, unsigned long page_nr)
 {
 	loff_t last_byte = inode->i_size;
 
-	last_byte -= page_nr << PAGE_CACHE_SHIFT;
-	if (last_byte > PAGE_CACHE_SIZE)
-		last_byte = PAGE_CACHE_SIZE;
+	last_byte -= page_nr << PAGE_SHIFT;
+	if (last_byte > PAGE_SIZE)
+		last_byte = PAGE_SIZE;
 	return last_byte;
 }
 
@@ -79,19 +79,19 @@
 	return err;
 }
 
-static void exofs_check_page(struct page *page)
+static bool exofs_check_page(struct page *page)
 {
 	struct inode *dir = page->mapping->host;
 	unsigned chunk_size = exofs_chunk_size(dir);
 	char *kaddr = page_address(page);
 	unsigned offs, rec_len;
-	unsigned limit = PAGE_CACHE_SIZE;
+	unsigned limit = PAGE_SIZE;
 	struct exofs_dir_entry *p;
 	char *error;
 
 	/* if the page is the last one in the directory */
-	if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
-		limit = dir->i_size & ~PAGE_CACHE_MASK;
+	if ((dir->i_size >> PAGE_SHIFT) == page->index) {
+		limit = dir->i_size & ~PAGE_MASK;
 		if (limit & (chunk_size - 1))
 			goto Ebadsize;
 		if (!limit)
@@ -114,7 +114,7 @@
 		goto Eend;
 out:
 	SetPageChecked(page);
-	return;
+	return true;
 
 Ebadsize:
 	EXOFS_ERR("ERROR [exofs_check_page]: "
@@ -138,7 +138,7 @@
 	EXOFS_ERR(
 		"ERROR [exofs_check_page]: bad entry in directory(0x%lx): %s - "
 		"offset=%lu, inode=0x%llu, rec_len=%d, name_len=%d\n",
-		dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
+		dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
 		_LLU(le64_to_cpu(p->inode_no)),
 		rec_len, p->name_len);
 	goto fail;
@@ -147,11 +147,11 @@
 	EXOFS_ERR("ERROR [exofs_check_page]: "
 		"entry in directory(0x%lx) spans the page boundary"
 		"offset=%lu, inode=0x%llx\n",
-		dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
+		dir->i_ino, (page->index<<PAGE_SHIFT)+offs,
 		_LLU(le64_to_cpu(p->inode_no)));
 fail:
-	SetPageChecked(page);
 	SetPageError(page);
+	return false;
 }
 
 static struct page *exofs_get_page(struct inode *dir, unsigned long n)
@@ -161,10 +161,10 @@
 
 	if (!IS_ERR(page)) {
 		kmap(page);
-		if (!PageChecked(page))
-			exofs_check_page(page);
-		if (PageError(page))
-			goto fail;
+		if (unlikely(!PageChecked(page))) {
+			if (PageError(page) || !exofs_check_page(page))
+				goto fail;
+		}
 	}
 	return page;
 
@@ -237,8 +237,8 @@
 {
 	loff_t pos = ctx->pos;
 	struct inode *inode = file_inode(file);
-	unsigned int offset = pos & ~PAGE_CACHE_MASK;
-	unsigned long n = pos >> PAGE_CACHE_SHIFT;
+	unsigned int offset = pos & ~PAGE_MASK;
+	unsigned long n = pos >> PAGE_SHIFT;
 	unsigned long npages = dir_pages(inode);
 	unsigned chunk_mask = ~(exofs_chunk_size(inode)-1);
 	int need_revalidate = (file->f_version != inode->i_version);
@@ -254,7 +254,7 @@
 		if (IS_ERR(page)) {
 			EXOFS_ERR("ERROR: bad page in directory(0x%lx)\n",
 				  inode->i_ino);
-			ctx->pos += PAGE_CACHE_SIZE - offset;
+			ctx->pos += PAGE_SIZE - offset;
 			return PTR_ERR(page);
 		}
 		kaddr = page_address(page);
@@ -262,7 +262,7 @@
 			if (offset) {
 				offset = exofs_validate_entry(kaddr, offset,
 								chunk_mask);
-				ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset;
+				ctx->pos = (n<<PAGE_SHIFT) + offset;
 			}
 			file->f_version = inode->i_version;
 			need_revalidate = 0;
@@ -449,7 +449,7 @@
 		kaddr = page_address(page);
 		dir_end = kaddr + exofs_last_byte(dir, n);
 		de = (struct exofs_dir_entry *)kaddr;
-		kaddr += PAGE_CACHE_SIZE - reclen;
+		kaddr += PAGE_SIZE - reclen;
 		while ((char *)de <= kaddr) {
 			if ((char *)de == dir_end) {
 				name_len = 0;
@@ -602,7 +602,7 @@
 	kunmap_atomic(kaddr);
 	err = exofs_commit_chunk(page, 0, chunk_size);
 fail:
-	page_cache_release(page);
+	put_page(page);
 	return err;
 }
 
@@ -657,5 +657,5 @@
 const struct file_operations exofs_dir_operations = {
 	.llseek		= generic_file_llseek,
 	.read		= generic_read_dir,
-	.iterate	= exofs_readdir,
+	.iterate_shared	= exofs_readdir,
 };
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 9eaf595..9dc4c6d 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -317,7 +317,7 @@
 
 	if (!pcol->ios) {
 		int ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, true,
-					     pcol->pg_first << PAGE_CACHE_SHIFT,
+					     pcol->pg_first << PAGE_SHIFT,
 					     pcol->length, &pcol->ios);
 
 		if (ret)
@@ -383,7 +383,7 @@
 	struct inode *inode = pcol->inode;
 	struct exofs_i_info *oi = exofs_i(inode);
 	loff_t i_size = i_size_read(inode);
-	pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+	pgoff_t end_index = i_size >> PAGE_SHIFT;
 	size_t len;
 	int ret;
 
@@ -397,9 +397,9 @@
 	pcol->that_locked_page = page;
 
 	if (page->index < end_index)
-		len = PAGE_CACHE_SIZE;
+		len = PAGE_SIZE;
 	else if (page->index == end_index)
-		len = i_size & ~PAGE_CACHE_MASK;
+		len = i_size & ~PAGE_MASK;
 	else
 		len = 0;
 
@@ -442,8 +442,8 @@
 			goto fail;
 	}
 
-	if (len != PAGE_CACHE_SIZE)
-		zero_user(page, len, PAGE_CACHE_SIZE - len);
+	if (len != PAGE_SIZE)
+		zero_user(page, len, PAGE_SIZE - len);
 
 	EXOFS_DBGMSG2("    readpage_strip(0x%lx, 0x%lx) len=0x%zx\n",
 		     inode->i_ino, page->index, len);
@@ -609,7 +609,7 @@
 
 	if ((pcol->that_locked_page != page) && (ZERO_PAGE(0) != page)) {
 		EXOFS_DBGMSG2("index=0x%lx\n", page->index);
-		page_cache_release(page);
+		put_page(page);
 		return;
 	}
 	EXOFS_DBGMSG2("that_locked_page index=0x%lx\n",
@@ -633,7 +633,7 @@
 
 	BUG_ON(pcol->ios);
 	ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, false,
-				 pcol->pg_first << PAGE_CACHE_SHIFT,
+				 pcol->pg_first << PAGE_SHIFT,
 				 pcol->length, &pcol->ios);
 	if (unlikely(ret))
 		goto err;
@@ -696,7 +696,7 @@
 	struct inode *inode = pcol->inode;
 	struct exofs_i_info *oi = exofs_i(inode);
 	loff_t i_size = i_size_read(inode);
-	pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+	pgoff_t end_index = i_size >> PAGE_SHIFT;
 	size_t len;
 	int ret;
 
@@ -708,9 +708,9 @@
 
 	if (page->index < end_index)
 		/* in this case, the page is within the limits of the file */
-		len = PAGE_CACHE_SIZE;
+		len = PAGE_SIZE;
 	else {
-		len = i_size & ~PAGE_CACHE_MASK;
+		len = i_size & ~PAGE_MASK;
 
 		if (page->index > end_index || !len) {
 			/* in this case, the page is outside the limits
@@ -790,10 +790,10 @@
 	long start, end, expected_pages;
 	int ret;
 
-	start = wbc->range_start >> PAGE_CACHE_SHIFT;
+	start = wbc->range_start >> PAGE_SHIFT;
 	end = (wbc->range_end == LLONG_MAX) ?
 			start + mapping->nrpages :
-			wbc->range_end >> PAGE_CACHE_SHIFT;
+			wbc->range_end >> PAGE_SHIFT;
 
 	if (start || end)
 		expected_pages = end - start + 1;
@@ -881,15 +881,15 @@
 	}
 
 	 /* read modify write */
-	if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
+	if (!PageUptodate(page) && (len != PAGE_SIZE)) {
 		loff_t i_size = i_size_read(mapping->host);
-		pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+		pgoff_t end_index = i_size >> PAGE_SHIFT;
 		size_t rlen;
 
 		if (page->index < end_index)
-			rlen = PAGE_CACHE_SIZE;
+			rlen = PAGE_SIZE;
 		else if (page->index == end_index)
-			rlen = i_size & ~PAGE_CACHE_MASK;
+			rlen = i_size & ~PAGE_MASK;
 		else
 			rlen = 0;
 
@@ -960,8 +960,7 @@
 
 
  /* TODO: Should be easy enough to do proprly */
-static ssize_t exofs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
-			       loff_t offset)
+static ssize_t exofs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	return 0;
 }
diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c
index c20d77d..622a686 100644
--- a/fs/exofs/namei.c
+++ b/fs/exofs/namei.c
@@ -292,11 +292,11 @@
 out_dir:
 	if (dir_de) {
 		kunmap(dir_page);
-		page_cache_release(dir_page);
+		put_page(dir_page);
 	}
 out_old:
 	kunmap(old_page);
-	page_cache_release(old_page);
+	put_page(old_page);
 out:
 	return err;
 }
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 6658a50..1076a42 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -122,7 +122,7 @@
 			if (match_int(&args[0], &option))
 				return -EINVAL;
 			if (option <= 0) {
-				EXOFS_ERR("Timout must be > 0");
+				EXOFS_ERR("Timeout must be > 0");
 				return -EINVAL;
 			}
 			opts->timeout = option * HZ;
@@ -958,7 +958,7 @@
 	if (!ino)
 		return ERR_PTR(-ESTALE);
 
-	return d_obtain_alias(exofs_iget(d_inode(child)->i_sb, ino));
+	return d_obtain_alias(exofs_iget(child->d_sb, ino));
 }
 
 static struct inode *exofs_nfs_get_inode(struct super_block *sb,
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index c46f1a1..207ba8d 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -143,14 +143,18 @@
 	if (err)
 		goto out_err;
 	dprintk("%s: found name: %s\n", __func__, nbuf);
-	inode_lock(parent->d_inode);
-	tmp = lookup_one_len(nbuf, parent, strlen(nbuf));
-	inode_unlock(parent->d_inode);
+	tmp = lookup_one_len_unlocked(nbuf, parent, strlen(nbuf));
 	if (IS_ERR(tmp)) {
 		dprintk("%s: lookup failed: %d\n", __func__, PTR_ERR(tmp));
 		goto out_err;
 	}
 	if (tmp != dentry) {
+		/*
+		 * Somebody has renamed it since exportfs_get_name();
+		 * great, since it could've only been renamed if it
+		 * got looked up and thus connected, and it would
+		 * remain connected afterwards.  We are done.
+		 */
 		dput(tmp);
 		goto out_reconnected;
 	}
@@ -308,7 +312,7 @@
 		goto out;
 
 	error = -EINVAL;
-	if (!file->f_op->iterate)
+	if (!file->f_op->iterate && !file->f_op->iterate_shared)
 		goto out_close;
 
 	buffer.sequence = 0;
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index 27695e6..42f1d18 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -172,9 +172,6 @@
 		acl = ERR_PTR(retval);
 	kfree(value);
 
-	if (!IS_ERR(acl))
-		set_cached_acl(inode, type, acl);
-
 	return acl;
 }
 
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 0c6638b..19efd11 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -37,7 +37,7 @@
 {
 	unsigned len = le16_to_cpu(dlen);
 
-#if (PAGE_CACHE_SIZE >= 65536)
+#if (PAGE_SIZE >= 65536)
 	if (len == EXT2_MAX_REC_LEN)
 		return 1 << 16;
 #endif
@@ -46,7 +46,7 @@
 
 static inline __le16 ext2_rec_len_to_disk(unsigned len)
 {
-#if (PAGE_CACHE_SIZE >= 65536)
+#if (PAGE_SIZE >= 65536)
 	if (len == (1 << 16))
 		return cpu_to_le16(EXT2_MAX_REC_LEN);
 	else
@@ -67,7 +67,7 @@
 static inline void ext2_put_page(struct page *page)
 {
 	kunmap(page);
-	page_cache_release(page);
+	put_page(page);
 }
 
 /*
@@ -79,9 +79,9 @@
 {
 	unsigned last_byte = inode->i_size;
 
-	last_byte -= page_nr << PAGE_CACHE_SHIFT;
-	if (last_byte > PAGE_CACHE_SIZE)
-		last_byte = PAGE_CACHE_SIZE;
+	last_byte -= page_nr << PAGE_SHIFT;
+	if (last_byte > PAGE_SIZE)
+		last_byte = PAGE_SIZE;
 	return last_byte;
 }
 
@@ -110,7 +110,7 @@
 	return err;
 }
 
-static void ext2_check_page(struct page *page, int quiet)
+static bool ext2_check_page(struct page *page, int quiet)
 {
 	struct inode *dir = page->mapping->host;
 	struct super_block *sb = dir->i_sb;
@@ -118,12 +118,12 @@
 	char *kaddr = page_address(page);
 	u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count);
 	unsigned offs, rec_len;
-	unsigned limit = PAGE_CACHE_SIZE;
+	unsigned limit = PAGE_SIZE;
 	ext2_dirent *p;
 	char *error;
 
-	if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
-		limit = dir->i_size & ~PAGE_CACHE_MASK;
+	if ((dir->i_size >> PAGE_SHIFT) == page->index) {
+		limit = dir->i_size & ~PAGE_MASK;
 		if (limit & (chunk_size - 1))
 			goto Ebadsize;
 		if (!limit)
@@ -148,7 +148,7 @@
 		goto Eend;
 out:
 	SetPageChecked(page);
-	return;
+	return true;
 
 	/* Too bad, we had an error */
 
@@ -176,7 +176,7 @@
 	if (!quiet)
 		ext2_error(sb, __func__, "bad entry in directory #%lu: : %s - "
 			"offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
-			dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
+			dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
 			(unsigned long) le32_to_cpu(p->inode),
 			rec_len, p->name_len);
 	goto fail;
@@ -186,12 +186,12 @@
 		ext2_error(sb, "ext2_check_page",
 			"entry in directory #%lu spans the page boundary"
 			"offset=%lu, inode=%lu",
-			dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
+			dir->i_ino, (page->index<<PAGE_SHIFT)+offs,
 			(unsigned long) le32_to_cpu(p->inode));
 	}
 fail:
-	SetPageChecked(page);
 	SetPageError(page);
+	return false;
 }
 
 static struct page * ext2_get_page(struct inode *dir, unsigned long n,
@@ -201,10 +201,10 @@
 	struct page *page = read_mapping_page(mapping, n, NULL);
 	if (!IS_ERR(page)) {
 		kmap(page);
-		if (!PageChecked(page))
-			ext2_check_page(page, quiet);
-		if (PageError(page))
-			goto fail;
+		if (unlikely(!PageChecked(page))) {
+			if (PageError(page) || !ext2_check_page(page, quiet))
+				goto fail;
+		}
 	}
 	return page;
 
@@ -287,8 +287,8 @@
 	loff_t pos = ctx->pos;
 	struct inode *inode = file_inode(file);
 	struct super_block *sb = inode->i_sb;
-	unsigned int offset = pos & ~PAGE_CACHE_MASK;
-	unsigned long n = pos >> PAGE_CACHE_SHIFT;
+	unsigned int offset = pos & ~PAGE_MASK;
+	unsigned long n = pos >> PAGE_SHIFT;
 	unsigned long npages = dir_pages(inode);
 	unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
 	unsigned char *types = NULL;
@@ -309,14 +309,14 @@
 			ext2_error(sb, __func__,
 				   "bad page in #%lu",
 				   inode->i_ino);
-			ctx->pos += PAGE_CACHE_SIZE - offset;
+			ctx->pos += PAGE_SIZE - offset;
 			return PTR_ERR(page);
 		}
 		kaddr = page_address(page);
 		if (unlikely(need_revalidate)) {
 			if (offset) {
 				offset = ext2_validate_entry(kaddr, offset, chunk_mask);
-				ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset;
+				ctx->pos = (n<<PAGE_SHIFT) + offset;
 			}
 			file->f_version = inode->i_version;
 			need_revalidate = 0;
@@ -406,7 +406,7 @@
 		if (++n >= npages)
 			n = 0;
 		/* next page is past the blocks we've got */
-		if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) {
+		if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) {
 			ext2_error(dir->i_sb, __func__,
 				"dir %lu size %lld exceeds block count %llu",
 				dir->i_ino, dir->i_size,
@@ -511,7 +511,7 @@
 		kaddr = page_address(page);
 		dir_end = kaddr + ext2_last_byte(dir, n);
 		de = (ext2_dirent *)kaddr;
-		kaddr += PAGE_CACHE_SIZE - reclen;
+		kaddr += PAGE_SIZE - reclen;
 		while ((char *)de <= kaddr) {
 			if ((char *)de == dir_end) {
 				/* We hit i_size */
@@ -655,7 +655,7 @@
 	kunmap_atomic(kaddr);
 	err = ext2_commit_chunk(page, 0, chunk_size);
 fail:
-	page_cache_release(page);
+	put_page(page);
 	return err;
 }
 
@@ -716,7 +716,7 @@
 const struct file_operations ext2_dir_operations = {
 	.llseek		= generic_file_llseek,
 	.read		= generic_read_dir,
-	.iterate	= ext2_readdir,
+	.iterate_shared	= ext2_readdir,
 	.unlocked_ioctl = ext2_ioctl,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl	= ext2_compat_ioctl,
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 6bd58e6..b675610 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -854,20 +854,20 @@
 }
 
 static ssize_t
-ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
+ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct file *file = iocb->ki_filp;
 	struct address_space *mapping = file->f_mapping;
 	struct inode *inode = mapping->host;
 	size_t count = iov_iter_count(iter);
+	loff_t offset = iocb->ki_pos;
 	ssize_t ret;
 
 	if (IS_DAX(inode))
-		ret = dax_do_io(iocb, inode, iter, offset, ext2_get_block, NULL,
+		ret = dax_do_io(iocb, inode, iter, ext2_get_block, NULL,
 				DIO_LOCKING);
 	else
-		ret = blockdev_direct_IO(iocb, inode, iter, offset,
-					 ext2_get_block);
+		ret = blockdev_direct_IO(iocb, inode, iter, ext2_get_block);
 	if (ret < 0 && iov_iter_rw(iter) == WRITE)
 		ext2_write_failed(mapping, offset + count);
 	return ret;
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 7a2be8f..d446203 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -82,7 +82,7 @@
 	unsigned long ino = ext2_inode_by_name(d_inode(child), &dotdot);
 	if (!ino)
 		return ERR_PTR(-ENOENT);
-	return d_obtain_alias(ext2_iget(d_inode(child)->i_sb, ino));
+	return d_obtain_alias(ext2_iget(child->d_sb, ino));
 } 
 
 /*
@@ -398,7 +398,7 @@
 			ext2_set_link(old_inode, dir_de, dir_page, new_dir, 0);
 		else {
 			kunmap(dir_page);
-			page_cache_release(dir_page);
+			put_page(dir_page);
 		}
 		inode_dec_link_count(old_dir);
 	}
@@ -408,11 +408,11 @@
 out_dir:
 	if (dir_de) {
 		kunmap(dir_page);
-		page_cache_release(dir_page);
+		put_page(dir_page);
 	}
 out_old:
 	kunmap(old_page);
-	page_cache_release(old_page);
+	put_page(old_page);
 out:
 	return err;
 }
diff --git a/fs/ext2/xattr_security.c b/fs/ext2/xattr_security.c
index ba97f24..7fd3b86 100644
--- a/fs/ext2/xattr_security.c
+++ b/fs/ext2/xattr_security.c
@@ -9,10 +9,10 @@
 
 static int
 ext2_xattr_security_get(const struct xattr_handler *handler,
-			struct dentry *dentry, const char *name,
-			void *buffer, size_t size)
+			struct dentry *unused, struct inode *inode,
+			const char *name, void *buffer, size_t size)
 {
-	return ext2_xattr_get(d_inode(dentry), EXT2_XATTR_INDEX_SECURITY, name,
+	return ext2_xattr_get(inode, EXT2_XATTR_INDEX_SECURITY, name,
 			      buffer, size);
 }
 
diff --git a/fs/ext2/xattr_trusted.c b/fs/ext2/xattr_trusted.c
index 2c94d19..0f85705 100644
--- a/fs/ext2/xattr_trusted.c
+++ b/fs/ext2/xattr_trusted.c
@@ -16,10 +16,10 @@
 
 static int
 ext2_xattr_trusted_get(const struct xattr_handler *handler,
-		       struct dentry *dentry, const char *name,
-		       void *buffer, size_t size)
+		       struct dentry *unused, struct inode *inode,
+		       const char *name, void *buffer, size_t size)
 {
-	return ext2_xattr_get(d_inode(dentry), EXT2_XATTR_INDEX_TRUSTED, name,
+	return ext2_xattr_get(inode, EXT2_XATTR_INDEX_TRUSTED, name,
 			      buffer, size);
 }
 
diff --git a/fs/ext2/xattr_user.c b/fs/ext2/xattr_user.c
index 72a2a96..1fafd27 100644
--- a/fs/ext2/xattr_user.c
+++ b/fs/ext2/xattr_user.c
@@ -18,12 +18,12 @@
 
 static int
 ext2_xattr_user_get(const struct xattr_handler *handler,
-		    struct dentry *dentry, const char *name,
-		    void *buffer, size_t size)
+		    struct dentry *unused, struct inode *inode,
+		    const char *name, void *buffer, size_t size)
 {
-	if (!test_opt(dentry->d_sb, XATTR_USER))
+	if (!test_opt(inode->i_sb, XATTR_USER))
 		return -EOPNOTSUPP;
-	return ext2_xattr_get(d_inode(dentry), EXT2_XATTR_INDEX_USER,
+	return ext2_xattr_get(inode, EXT2_XATTR_INDEX_USER,
 			      name, buffer, size);
 }
 
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index 69b1e73..c6601a4 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -172,9 +172,6 @@
 		acl = ERR_PTR(retval);
 	kfree(value);
 
-	if (!IS_ERR(acl))
-		set_cached_acl(inode, type, acl);
-
 	return acl;
 }
 
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index edc053a..6a6c273 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -32,6 +32,7 @@
 #include <linux/random.h>
 #include <linux/scatterlist.h>
 #include <linux/spinlock_types.h>
+#include <linux/namei.h>
 
 #include "ext4_extents.h"
 #include "xattr.h"
@@ -91,7 +92,8 @@
  * Return: An allocated and initialized encryption context on success; error
  * value or NULL otherwise.
  */
-struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
+struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode,
+					    gfp_t gfp_flags)
 {
 	struct ext4_crypto_ctx *ctx = NULL;
 	int res = 0;
@@ -118,7 +120,7 @@
 		list_del(&ctx->free_list);
 	spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
 	if (!ctx) {
-		ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
+		ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, gfp_flags);
 		if (!ctx) {
 			res = -ENOMEM;
 			goto out;
@@ -255,7 +257,8 @@
 			    ext4_direction_t rw,
 			    pgoff_t index,
 			    struct page *src_page,
-			    struct page *dest_page)
+			    struct page *dest_page,
+			    gfp_t gfp_flags)
 
 {
 	u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
@@ -266,7 +269,7 @@
 	struct crypto_skcipher *tfm = ci->ci_ctfm;
 	int res = 0;
 
-	req = skcipher_request_alloc(tfm, GFP_NOFS);
+	req = skcipher_request_alloc(tfm, gfp_flags);
 	if (!req) {
 		printk_ratelimited(KERN_ERR
 				   "%s: crypto_request_alloc() failed\n",
@@ -283,10 +286,10 @@
 	       EXT4_XTS_TWEAK_SIZE - sizeof(index));
 
 	sg_init_table(&dst, 1);
-	sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
+	sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
 	sg_init_table(&src, 1);
-	sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
-	skcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
+	sg_set_page(&src, src_page, PAGE_SIZE, 0);
+	skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE,
 				   xts_tweak);
 	if (rw == EXT4_DECRYPT)
 		res = crypto_skcipher_decrypt(req);
@@ -307,9 +310,10 @@
 	return 0;
 }
 
-static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx)
+static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx,
+				      gfp_t gfp_flags)
 {
-	ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, GFP_NOWAIT);
+	ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, gfp_flags);
 	if (ctx->w.bounce_page == NULL)
 		return ERR_PTR(-ENOMEM);
 	ctx->flags |= EXT4_WRITE_PATH_FL;
@@ -332,7 +336,8 @@
  * error value or NULL.
  */
 struct page *ext4_encrypt(struct inode *inode,
-			  struct page *plaintext_page)
+			  struct page *plaintext_page,
+			  gfp_t gfp_flags)
 {
 	struct ext4_crypto_ctx *ctx;
 	struct page *ciphertext_page = NULL;
@@ -340,17 +345,17 @@
 
 	BUG_ON(!PageLocked(plaintext_page));
 
-	ctx = ext4_get_crypto_ctx(inode);
+	ctx = ext4_get_crypto_ctx(inode, gfp_flags);
 	if (IS_ERR(ctx))
 		return (struct page *) ctx;
 
 	/* The encryption operation will require a bounce page. */
-	ciphertext_page = alloc_bounce_page(ctx);
+	ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
 	if (IS_ERR(ciphertext_page))
 		goto errout;
 	ctx->w.control_page = plaintext_page;
 	err = ext4_page_crypto(inode, EXT4_ENCRYPT, plaintext_page->index,
-			       plaintext_page, ciphertext_page);
+			       plaintext_page, ciphertext_page, gfp_flags);
 	if (err) {
 		ciphertext_page = ERR_PTR(err);
 	errout:
@@ -378,8 +383,8 @@
 {
 	BUG_ON(!PageLocked(page));
 
-	return ext4_page_crypto(page->mapping->host,
-				EXT4_DECRYPT, page->index, page, page);
+	return ext4_page_crypto(page->mapping->host, EXT4_DECRYPT,
+				page->index, page, page, GFP_NOFS);
 }
 
 int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
@@ -396,13 +401,13 @@
 		 (unsigned long) inode->i_ino, lblk, len);
 #endif
 
-	BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);
+	BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
 
-	ctx = ext4_get_crypto_ctx(inode);
+	ctx = ext4_get_crypto_ctx(inode, GFP_NOFS);
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
 
-	ciphertext_page = alloc_bounce_page(ctx);
+	ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
 	if (IS_ERR(ciphertext_page)) {
 		err = PTR_ERR(ciphertext_page);
 		goto errout;
@@ -410,11 +415,12 @@
 
 	while (len--) {
 		err = ext4_page_crypto(inode, EXT4_ENCRYPT, lblk,
-				       ZERO_PAGE(0), ciphertext_page);
+				       ZERO_PAGE(0), ciphertext_page,
+				       GFP_NOFS);
 		if (err)
 			goto errout;
 
-		bio = bio_alloc(GFP_KERNEL, 1);
+		bio = bio_alloc(GFP_NOWAIT, 1);
 		if (!bio) {
 			err = -ENOMEM;
 			goto errout;
@@ -473,13 +479,19 @@
  */
 static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
 {
-	struct inode *dir = d_inode(dentry->d_parent);
-	struct ext4_crypt_info *ci = EXT4_I(dir)->i_crypt_info;
+	struct dentry *dir;
+	struct ext4_crypt_info *ci;
 	int dir_has_key, cached_with_key;
 
-	if (!ext4_encrypted_inode(dir))
-		return 0;
+	if (flags & LOOKUP_RCU)
+		return -ECHILD;
 
+	dir = dget_parent(dentry);
+	if (!ext4_encrypted_inode(d_inode(dir))) {
+		dput(dir);
+		return 0;
+	}
+	ci = EXT4_I(d_inode(dir))->i_crypt_info;
 	if (ci && ci->ci_keyring_key &&
 	    (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
 					  (1 << KEY_FLAG_REVOKED) |
@@ -489,6 +501,7 @@
 	/* this should eventually be an flag in d_flags */
 	cached_with_key = dentry->d_fsdata != NULL;
 	dir_has_key = (ci != NULL);
+	dput(dir);
 
 	/*
 	 * If the dentry was cached without the key, and it is a
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 50ba27c..5d00bf0 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -155,13 +155,13 @@
 		err = ext4_map_blocks(NULL, inode, &map, 0);
 		if (err > 0) {
 			pgoff_t index = map.m_pblk >>
-					(PAGE_CACHE_SHIFT - inode->i_blkbits);
+					(PAGE_SHIFT - inode->i_blkbits);
 			if (!ra_has_index(&file->f_ra, index))
 				page_cache_sync_readahead(
 					sb->s_bdev->bd_inode->i_mapping,
 					&file->f_ra, file,
 					index, 1);
-			file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
+			file->f_ra.prev_pos = (loff_t)index << PAGE_SHIFT;
 			bh = ext4_bread(NULL, inode, map.m_lblk, 0);
 			if (IS_ERR(bh)) {
 				err = PTR_ERR(bh);
@@ -266,7 +266,7 @@
 			ctx->pos += ext4_rec_len_from_disk(de->rec_len,
 						sb->s_blocksize);
 		}
-		if ((ctx->pos < inode->i_size) && !dir_relax(inode))
+		if ((ctx->pos < inode->i_size) && !dir_relax_shared(inode))
 			goto done;
 		brelse(bh);
 		bh = NULL;
@@ -644,7 +644,7 @@
 const struct file_operations ext4_dir_operations = {
 	.llseek		= ext4_dir_llseek,
 	.read		= generic_read_dir,
-	.iterate	= ext4_readdir,
+	.iterate_shared	= ext4_readdir,
 	.unlocked_ioctl = ext4_ioctl,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl	= ext4_compat_ioctl,
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index c047435..72f4c9e 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -912,6 +912,29 @@
 #include "extents_status.h"
 
 /*
+ * Lock subclasses for i_data_sem in the ext4_inode_info structure.
+ *
+ * These are needed to avoid lockdep false positives when we need to
+ * allocate blocks to the quota inode during ext4_map_blocks(), while
+ * holding i_data_sem for a normal (non-quota) inode.  Since we don't
+ * do quota tracking for the quota inode, this avoids deadlock (as
+ * well as infinite recursion, since it isn't turtles all the way
+ * down...)
+ *
+ *  I_DATA_SEM_NORMAL - Used for most inodes
+ *  I_DATA_SEM_OTHER  - Used by move_inode.c for the second normal inode
+ *			  where the second inode has larger inode number
+ *			  than the first
+ *  I_DATA_SEM_QUOTA  - Used for quota inodes only
+ */
+enum {
+	I_DATA_SEM_NORMAL = 0,
+	I_DATA_SEM_OTHER,
+	I_DATA_SEM_QUOTA,
+};
+
+
+/*
  * fourth extended file system inode data in memory
  */
 struct ext4_inode_info {
@@ -1961,7 +1984,7 @@
 {
 	unsigned len = le16_to_cpu(dlen);
 
-#if (PAGE_CACHE_SIZE >= 65536)
+#if (PAGE_SIZE >= 65536)
 	if (len == EXT4_MAX_REC_LEN || len == 0)
 		return blocksize;
 	return (len & 65532) | ((len & 3) << 16);
@@ -1974,7 +1997,7 @@
 {
 	if ((len > blocksize) || (blocksize > (1 << 18)) || (len & 3))
 		BUG();
-#if (PAGE_CACHE_SIZE >= 65536)
+#if (PAGE_SIZE >= 65536)
 	if (len < 65536)
 		return cpu_to_le16(len);
 	if (len == blocksize) {
@@ -2282,11 +2305,13 @@
 bool ext4_valid_contents_enc_mode(uint32_t mode);
 uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size);
 extern struct workqueue_struct *ext4_read_workqueue;
-struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode);
+struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode,
+					    gfp_t gfp_flags);
 void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx);
 void ext4_restore_control_page(struct page *data_page);
 struct page *ext4_encrypt(struct inode *inode,
-			  struct page *plaintext_page);
+			  struct page *plaintext_page,
+			  gfp_t gfp_flags);
 int ext4_decrypt(struct page *page);
 int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
 			   ext4_fsblk_t pblk, ext4_lblk_t len);
@@ -2556,8 +2581,7 @@
 /* indirect.c */
 extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
 				struct ext4_map_blocks *map, int flags);
-extern ssize_t ext4_ind_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
-				  loff_t offset);
+extern ssize_t ext4_ind_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
 extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock);
 extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks);
 extern void ext4_ind_truncate(handle_t *, struct inode *inode);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 6659e21..00ff691 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -169,13 +169,8 @@
 	ret = __generic_file_write_iter(iocb, from);
 	inode_unlock(inode);
 
-	if (ret > 0) {
-		ssize_t err;
-
-		err = generic_write_sync(file, iocb->ki_pos - ret, ret);
-		if (err < 0)
-			ret = err;
-	}
+	if (ret > 0)
+		ret = generic_write_sync(iocb, ret);
 	if (o_direct)
 		blk_finish_plug(&plug);
 
@@ -329,7 +324,7 @@
 	struct super_block *sb = inode->i_sb;
 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 	struct vfsmount *mnt = filp->f_path.mnt;
-	struct inode *dir = filp->f_path.dentry->d_parent->d_inode;
+	struct dentry *dir;
 	struct path path;
 	char buf[64], *cp;
 	int ret;
@@ -373,14 +368,18 @@
 		if (ext4_encryption_info(inode) == NULL)
 			return -ENOKEY;
 	}
-	if (ext4_encrypted_inode(dir) &&
-	    !ext4_is_child_context_consistent_with_parent(dir, inode)) {
+
+	dir = dget_parent(file_dentry(filp));
+	if (ext4_encrypted_inode(d_inode(dir)) &&
+	    !ext4_is_child_context_consistent_with_parent(d_inode(dir), inode)) {
 		ext4_warning(inode->i_sb,
 			     "Inconsistent encryption contexts: %lu/%lu\n",
-			     (unsigned long) dir->i_ino,
+			     (unsigned long) d_inode(dir)->i_ino,
 			     (unsigned long) inode->i_ino);
+		dput(dir);
 		return -EPERM;
 	}
+	dput(dir);
 	/*
 	 * Set up the jbd2_inode if we are opening the inode for
 	 * writing and the journal is present
@@ -428,8 +427,8 @@
 	lastoff = startoff;
 	endoff = (loff_t)end_blk << blkbits;
 
-	index = startoff >> PAGE_CACHE_SHIFT;
-	end = endoff >> PAGE_CACHE_SHIFT;
+	index = startoff >> PAGE_SHIFT;
+	end = endoff >> PAGE_SHIFT;
 
 	pagevec_init(&pvec, 0);
 	do {
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 3027fa6..627b7e8 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -659,12 +659,12 @@
  * crashes then stale disk data _may_ be exposed inside the file. But current
  * VFS code falls back into buffered path in that case so we are safe.
  */
-ssize_t ext4_ind_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
-			   loff_t offset)
+ssize_t ext4_ind_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file->f_mapping->host;
 	struct ext4_inode_info *ei = EXT4_I(inode);
+	loff_t offset = iocb->ki_pos;
 	handle_t *handle;
 	ssize_t ret;
 	int orphan = 0;
@@ -707,21 +707,21 @@
 			goto locked;
 		}
 		if (IS_DAX(inode))
-			ret = dax_do_io(iocb, inode, iter, offset,
+			ret = dax_do_io(iocb, inode, iter,
 					ext4_dio_get_block, NULL, 0);
 		else
 			ret = __blockdev_direct_IO(iocb, inode,
 						   inode->i_sb->s_bdev, iter,
-						   offset, ext4_dio_get_block,
+						   ext4_dio_get_block,
 						   NULL, NULL, 0);
 		inode_dio_end(inode);
 	} else {
 locked:
 		if (IS_DAX(inode))
-			ret = dax_do_io(iocb, inode, iter, offset,
+			ret = dax_do_io(iocb, inode, iter,
 					ext4_dio_get_block, NULL, DIO_LOCKING);
 		else
-			ret = blockdev_direct_IO(iocb, inode, iter, offset,
+			ret = blockdev_direct_IO(iocb, inode, iter,
 						 ext4_dio_get_block);
 
 		if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 7cbdd375..7bc6c85 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -482,7 +482,7 @@
 	ret = ext4_read_inline_data(inode, kaddr, len, &iloc);
 	flush_dcache_page(page);
 	kunmap_atomic(kaddr);
-	zero_user_segment(page, len, PAGE_CACHE_SIZE);
+	zero_user_segment(page, len, PAGE_SIZE);
 	SetPageUptodate(page);
 	brelse(iloc.bh);
 
@@ -507,7 +507,7 @@
 	if (!page->index)
 		ret = ext4_read_inline_page(inode, page);
 	else if (!PageUptodate(page)) {
-		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+		zero_user_segment(page, 0, PAGE_SIZE);
 		SetPageUptodate(page);
 	}
 
@@ -595,7 +595,7 @@
 
 	if (ret) {
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		page = NULL;
 		ext4_orphan_add(handle, inode);
 		up_write(&EXT4_I(inode)->xattr_sem);
@@ -621,7 +621,7 @@
 out:
 	if (page) {
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 	if (sem_held)
 		up_write(&EXT4_I(inode)->xattr_sem);
@@ -690,7 +690,7 @@
 	if (!ext4_has_inline_data(inode)) {
 		ret = 0;
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		goto out_up_read;
 	}
 
@@ -815,7 +815,7 @@
 	if (ret) {
 		up_read(&EXT4_I(inode)->xattr_sem);
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		ext4_truncate_failed_write(inode);
 		return ret;
 	}
@@ -829,7 +829,7 @@
 	up_read(&EXT4_I(inode)->xattr_sem);
 	if (page) {
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 	return ret;
 }
@@ -919,7 +919,7 @@
 out_release_page:
 	up_read(&EXT4_I(inode)->xattr_sem);
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 out_journal:
 	ext4_journal_stop(handle);
 out:
@@ -947,7 +947,7 @@
 		i_size_changed = 1;
 	}
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 
 	/*
 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index dab84a2..79b298d 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -763,39 +763,47 @@
 /* Maximum number of blocks we map for direct IO at once. */
 #define DIO_MAX_BLOCKS 4096
 
-static handle_t *start_dio_trans(struct inode *inode,
-				 struct buffer_head *bh_result)
+/*
+ * Get blocks function for the cases that need to start a transaction -
+ * generally difference cases of direct IO and DAX IO. It also handles retries
+ * in case of ENOSPC.
+ */
+static int ext4_get_block_trans(struct inode *inode, sector_t iblock,
+				struct buffer_head *bh_result, int flags)
 {
 	int dio_credits;
+	handle_t *handle;
+	int retries = 0;
+	int ret;
 
 	/* Trim mapping request to maximum we can map at once for DIO */
 	if (bh_result->b_size >> inode->i_blkbits > DIO_MAX_BLOCKS)
 		bh_result->b_size = DIO_MAX_BLOCKS << inode->i_blkbits;
 	dio_credits = ext4_chunk_trans_blocks(inode,
 				      bh_result->b_size >> inode->i_blkbits);
-	return ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
+retry:
+	handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	ret = _ext4_get_block(inode, iblock, bh_result, flags);
+	ext4_journal_stop(handle);
+
+	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+		goto retry;
+	return ret;
 }
 
 /* Get block function for DIO reads and writes to inodes without extents */
 int ext4_dio_get_block(struct inode *inode, sector_t iblock,
 		       struct buffer_head *bh, int create)
 {
-	handle_t *handle;
-	int ret;
-
 	/* We don't expect handle for direct IO */
 	WARN_ON_ONCE(ext4_journal_current_handle());
 
-	if (create) {
-		handle = start_dio_trans(inode, bh);
-		if (IS_ERR(handle))
-			return PTR_ERR(handle);
-	}
-	ret = _ext4_get_block(inode, iblock, bh,
-			      create ? EXT4_GET_BLOCKS_CREATE : 0);
-	if (create)
-		ext4_journal_stop(handle);
-	return ret;
+	if (!create)
+		return _ext4_get_block(inode, iblock, bh, 0);
+	return ext4_get_block_trans(inode, iblock, bh, EXT4_GET_BLOCKS_CREATE);
 }
 
 /*
@@ -806,18 +814,13 @@
 static int ext4_dio_get_block_unwritten_async(struct inode *inode,
 		sector_t iblock, struct buffer_head *bh_result,	int create)
 {
-	handle_t *handle;
 	int ret;
 
 	/* We don't expect handle for direct IO */
 	WARN_ON_ONCE(ext4_journal_current_handle());
 
-	handle = start_dio_trans(inode, bh_result);
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-	ret = _ext4_get_block(inode, iblock, bh_result,
-			      EXT4_GET_BLOCKS_IO_CREATE_EXT);
-	ext4_journal_stop(handle);
+	ret = ext4_get_block_trans(inode, iblock, bh_result,
+				   EXT4_GET_BLOCKS_IO_CREATE_EXT);
 
 	/*
 	 * When doing DIO using unwritten extents, we need io_end to convert
@@ -850,18 +853,13 @@
 static int ext4_dio_get_block_unwritten_sync(struct inode *inode,
 		sector_t iblock, struct buffer_head *bh_result,	int create)
 {
-	handle_t *handle;
 	int ret;
 
 	/* We don't expect handle for direct IO */
 	WARN_ON_ONCE(ext4_journal_current_handle());
 
-	handle = start_dio_trans(inode, bh_result);
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-	ret = _ext4_get_block(inode, iblock, bh_result,
-			      EXT4_GET_BLOCKS_IO_CREATE_EXT);
-	ext4_journal_stop(handle);
+	ret = ext4_get_block_trans(inode, iblock, bh_result,
+				   EXT4_GET_BLOCKS_IO_CREATE_EXT);
 
 	/*
 	 * Mark inode as having pending DIO writes to unwritten extents.
@@ -1057,7 +1055,7 @@
 static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
 				  get_block_t *get_block)
 {
-	unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+	unsigned from = pos & (PAGE_SIZE - 1);
 	unsigned to = from + len;
 	struct inode *inode = page->mapping->host;
 	unsigned block_start, block_end;
@@ -1069,15 +1067,15 @@
 	bool decrypt = false;
 
 	BUG_ON(!PageLocked(page));
-	BUG_ON(from > PAGE_CACHE_SIZE);
-	BUG_ON(to > PAGE_CACHE_SIZE);
+	BUG_ON(from > PAGE_SIZE);
+	BUG_ON(to > PAGE_SIZE);
 	BUG_ON(from > to);
 
 	if (!page_has_buffers(page))
 		create_empty_buffers(page, blocksize, 0);
 	head = page_buffers(page);
 	bbits = ilog2(blocksize);
-	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
+	block = (sector_t)page->index << (PAGE_SHIFT - bbits);
 
 	for (bh = head, block_start = 0; bh != head || !block_start;
 	    block++, block_start = block_end, bh = bh->b_this_page) {
@@ -1159,8 +1157,8 @@
 	 * we allocate blocks but write fails for some reason
 	 */
 	needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
-	index = pos >> PAGE_CACHE_SHIFT;
-	from = pos & (PAGE_CACHE_SIZE - 1);
+	index = pos >> PAGE_SHIFT;
+	from = pos & (PAGE_SIZE - 1);
 	to = from + len;
 
 	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
@@ -1188,7 +1186,7 @@
 retry_journal:
 	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
 	if (IS_ERR(handle)) {
-		page_cache_release(page);
+		put_page(page);
 		return PTR_ERR(handle);
 	}
 
@@ -1196,7 +1194,7 @@
 	if (page->mapping != mapping) {
 		/* The page got truncated from under us */
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		ext4_journal_stop(handle);
 		goto retry_grab;
 	}
@@ -1252,7 +1250,7 @@
 		if (ret == -ENOSPC &&
 		    ext4_should_retry_alloc(inode->i_sb, &retries))
 			goto retry_journal;
-		page_cache_release(page);
+		put_page(page);
 		return ret;
 	}
 	*pagep = page;
@@ -1295,7 +1293,7 @@
 		ret = ext4_jbd2_file_inode(handle, inode);
 		if (ret) {
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 			goto errout;
 		}
 	}
@@ -1315,7 +1313,7 @@
 	 */
 	i_size_changed = ext4_update_inode_size(inode, pos + copied);
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 
 	if (old_size < pos)
 		pagecache_isize_extended(inode, old_size, pos);
@@ -1399,7 +1397,7 @@
 	int size_changed = 0;
 
 	trace_ext4_journalled_write_end(inode, pos, len, copied);
-	from = pos & (PAGE_CACHE_SIZE - 1);
+	from = pos & (PAGE_SIZE - 1);
 	to = from + len;
 
 	BUG_ON(!ext4_handle_valid(handle));
@@ -1423,7 +1421,7 @@
 	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
 	EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 
 	if (old_size < pos)
 		pagecache_isize_extended(inode, old_size, pos);
@@ -1537,7 +1535,7 @@
 	int num_clusters;
 	ext4_fsblk_t lblk;
 
-	BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
+	BUG_ON(stop > PAGE_SIZE || stop < length);
 
 	head = page_buffers(page);
 	bh = head;
@@ -1553,7 +1551,7 @@
 			clear_buffer_delay(bh);
 		} else if (contiguous_blks) {
 			lblk = page->index <<
-			       (PAGE_CACHE_SHIFT - inode->i_blkbits);
+			       (PAGE_SHIFT - inode->i_blkbits);
 			lblk += (curr_off >> inode->i_blkbits) -
 				contiguous_blks;
 			ext4_es_remove_extent(inode, lblk, contiguous_blks);
@@ -1563,7 +1561,7 @@
 	} while ((bh = bh->b_this_page) != head);
 
 	if (contiguous_blks) {
-		lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+		lblk = page->index << (PAGE_SHIFT - inode->i_blkbits);
 		lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
 		ext4_es_remove_extent(inode, lblk, contiguous_blks);
 	}
@@ -1572,7 +1570,7 @@
 	 * need to release the reserved space for that cluster. */
 	num_clusters = EXT4_NUM_B2C(sbi, to_release);
 	while (num_clusters > 0) {
-		lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) +
+		lblk = (page->index << (PAGE_SHIFT - inode->i_blkbits)) +
 			((num_clusters - 1) << sbi->s_cluster_bits);
 		if (sbi->s_cluster_ratio == 1 ||
 		    !ext4_find_delalloc_cluster(inode, lblk))
@@ -1619,8 +1617,8 @@
 	end   = mpd->next_page - 1;
 	if (invalidate) {
 		ext4_lblk_t start, last;
-		start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
-		last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+		start = index << (PAGE_SHIFT - inode->i_blkbits);
+		last = end << (PAGE_SHIFT - inode->i_blkbits);
 		ext4_es_remove_extent(inode, start, last - start + 1);
 	}
 
@@ -1636,7 +1634,7 @@
 			BUG_ON(!PageLocked(page));
 			BUG_ON(PageWriteback(page));
 			if (invalidate) {
-				block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
+				block_invalidatepage(page, 0, PAGE_SIZE);
 				ClearPageUptodate(page);
 			}
 			unlock_page(page);
@@ -2007,10 +2005,10 @@
 
 	trace_ext4_writepage(page);
 	size = i_size_read(inode);
-	if (page->index == size >> PAGE_CACHE_SHIFT)
-		len = size & ~PAGE_CACHE_MASK;
+	if (page->index == size >> PAGE_SHIFT)
+		len = size & ~PAGE_MASK;
 	else
-		len = PAGE_CACHE_SIZE;
+		len = PAGE_SIZE;
 
 	page_bufs = page_buffers(page);
 	/*
@@ -2034,7 +2032,7 @@
 				   ext4_bh_delay_or_unwritten)) {
 		redirty_page_for_writepage(wbc, page);
 		if ((current->flags & PF_MEMALLOC) ||
-		    (inode->i_sb->s_blocksize == PAGE_CACHE_SIZE)) {
+		    (inode->i_sb->s_blocksize == PAGE_SIZE)) {
 			/*
 			 * For memory cleaning there's no point in writing only
 			 * some buffers. So just bail out. Warn if we came here
@@ -2076,10 +2074,10 @@
 	int err;
 
 	BUG_ON(page->index != mpd->first_page);
-	if (page->index == size >> PAGE_CACHE_SHIFT)
-		len = size & ~PAGE_CACHE_MASK;
+	if (page->index == size >> PAGE_SHIFT)
+		len = size & ~PAGE_MASK;
 	else
-		len = PAGE_CACHE_SIZE;
+		len = PAGE_SIZE;
 	clear_page_dirty_for_io(page);
 	err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
 	if (!err)
@@ -2213,7 +2211,7 @@
 	int nr_pages, i;
 	struct inode *inode = mpd->inode;
 	struct buffer_head *head, *bh;
-	int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits;
+	int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
 	pgoff_t start, end;
 	ext4_lblk_t lblk;
 	sector_t pblock;
@@ -2274,7 +2272,7 @@
 			 * supports blocksize < pagesize as we will try to
 			 * convert potentially unmapped parts of inode.
 			 */
-			mpd->io_submit.io_end->size += PAGE_CACHE_SIZE;
+			mpd->io_submit.io_end->size += PAGE_SIZE;
 			/* Page fully mapped - let IO run! */
 			err = mpage_submit_page(mpd, page);
 			if (err < 0) {
@@ -2426,7 +2424,7 @@
 	 * Update on-disk size after IO is submitted.  Races with
 	 * truncate are avoided by checking i_size under i_data_sem.
 	 */
-	disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT;
+	disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
 	if (disksize > EXT4_I(inode)->i_disksize) {
 		int err2;
 		loff_t i_size;
@@ -2562,7 +2560,7 @@
 			mpd->next_page = page->index + 1;
 			/* Add all dirty buffers to mpd */
 			lblk = ((ext4_lblk_t)page->index) <<
-				(PAGE_CACHE_SHIFT - blkbits);
+				(PAGE_SHIFT - blkbits);
 			head = page_buffers(page);
 			err = mpage_process_page_bufs(mpd, head, head, lblk);
 			if (err <= 0)
@@ -2647,7 +2645,7 @@
 		 * We may need to convert up to one extent per block in
 		 * the page and we may dirty the inode.
 		 */
-		rsv_blocks = 1 + (PAGE_CACHE_SIZE >> inode->i_blkbits);
+		rsv_blocks = 1 + (PAGE_SIZE >> inode->i_blkbits);
 	}
 
 	/*
@@ -2678,8 +2676,8 @@
 		mpd.first_page = writeback_index;
 		mpd.last_page = -1;
 	} else {
-		mpd.first_page = wbc->range_start >> PAGE_CACHE_SHIFT;
-		mpd.last_page = wbc->range_end >> PAGE_CACHE_SHIFT;
+		mpd.first_page = wbc->range_start >> PAGE_SHIFT;
+		mpd.last_page = wbc->range_end >> PAGE_SHIFT;
 	}
 
 	mpd.inode = inode;
@@ -2838,7 +2836,7 @@
 	struct inode *inode = mapping->host;
 	handle_t *handle;
 
-	index = pos >> PAGE_CACHE_SHIFT;
+	index = pos >> PAGE_SHIFT;
 
 	if (ext4_nonda_switch(inode->i_sb)) {
 		*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
@@ -2881,7 +2879,7 @@
 	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
 				ext4_da_write_credits(inode, pos, len));
 	if (IS_ERR(handle)) {
-		page_cache_release(page);
+		put_page(page);
 		return PTR_ERR(handle);
 	}
 
@@ -2889,7 +2887,7 @@
 	if (page->mapping != mapping) {
 		/* The page got truncated from under us */
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		ext4_journal_stop(handle);
 		goto retry_grab;
 	}
@@ -2917,7 +2915,7 @@
 		    ext4_should_retry_alloc(inode->i_sb, &retries))
 			goto retry_journal;
 
-		page_cache_release(page);
+		put_page(page);
 		return ret;
 	}
 
@@ -2965,7 +2963,7 @@
 				      len, copied, page, fsdata);
 
 	trace_ext4_da_write_end(inode, pos, len, copied);
-	start = pos & (PAGE_CACHE_SIZE - 1);
+	start = pos & (PAGE_SIZE - 1);
 	end = start + copied - 1;
 
 	/*
@@ -3187,7 +3185,7 @@
 	/*
 	 * If it's a full truncate we just forget about the pending dirtying
 	 */
-	if (offset == 0 && length == PAGE_CACHE_SIZE)
+	if (offset == 0 && length == PAGE_SIZE)
 		ClearPageChecked(page);
 
 	return jbd2_journal_invalidatepage(journal, page, offset, length);
@@ -3336,12 +3334,12 @@
  * if the machine crashes during the write.
  *
  */
-static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
-				  loff_t offset)
+static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file->f_mapping->host;
 	ssize_t ret;
+	loff_t offset = iocb->ki_pos;
 	size_t count = iov_iter_count(iter);
 	int overwrite = 0;
 	get_block_t *get_block_func = NULL;
@@ -3350,7 +3348,7 @@
 
 	/* Use the old path for reads and writes beyond i_size. */
 	if (iov_iter_rw(iter) != WRITE || final_size > inode->i_size)
-		return ext4_ind_direct_IO(iocb, iter, offset);
+		return ext4_ind_direct_IO(iocb, iter);
 
 	BUG_ON(iocb->private == NULL);
 
@@ -3402,11 +3400,11 @@
 	BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
 #endif
 	if (IS_DAX(inode))
-		ret = dax_do_io(iocb, inode, iter, offset, get_block_func,
+		ret = dax_do_io(iocb, inode, iter, get_block_func,
 				ext4_end_io_dio, dio_flags);
 	else
 		ret = __blockdev_direct_IO(iocb, inode,
-					   inode->i_sb->s_bdev, iter, offset,
+					   inode->i_sb->s_bdev, iter,
 					   get_block_func,
 					   ext4_end_io_dio, NULL, dio_flags);
 
@@ -3433,12 +3431,12 @@
 	return ret;
 }
 
-static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
-			      loff_t offset)
+static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file->f_mapping->host;
 	size_t count = iov_iter_count(iter);
+	loff_t offset = iocb->ki_pos;
 	ssize_t ret;
 
 #ifdef CONFIG_EXT4_FS_ENCRYPTION
@@ -3458,9 +3456,9 @@
 
 	trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
-		ret = ext4_ext_direct_IO(iocb, iter, offset);
+		ret = ext4_ext_direct_IO(iocb, iter);
 	else
-		ret = ext4_ind_direct_IO(iocb, iter, offset);
+		ret = ext4_ind_direct_IO(iocb, iter);
 	trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret);
 	return ret;
 }
@@ -3556,8 +3554,8 @@
 static int __ext4_block_zero_page_range(handle_t *handle,
 		struct address_space *mapping, loff_t from, loff_t length)
 {
-	ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
-	unsigned offset = from & (PAGE_CACHE_SIZE-1);
+	ext4_fsblk_t index = from >> PAGE_SHIFT;
+	unsigned offset = from & (PAGE_SIZE-1);
 	unsigned blocksize, pos;
 	ext4_lblk_t iblock;
 	struct inode *inode = mapping->host;
@@ -3565,14 +3563,14 @@
 	struct page *page;
 	int err = 0;
 
-	page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
+	page = find_or_create_page(mapping, from >> PAGE_SHIFT,
 				   mapping_gfp_constraint(mapping, ~__GFP_FS));
 	if (!page)
 		return -ENOMEM;
 
 	blocksize = inode->i_sb->s_blocksize;
 
-	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
+	iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
 
 	if (!page_has_buffers(page))
 		create_empty_buffers(page, blocksize, 0);
@@ -3614,7 +3612,7 @@
 		    ext4_encrypted_inode(inode)) {
 			/* We expect the key to be set. */
 			BUG_ON(!ext4_has_encryption_key(inode));
-			BUG_ON(blocksize != PAGE_CACHE_SIZE);
+			BUG_ON(blocksize != PAGE_SIZE);
 			WARN_ON_ONCE(ext4_decrypt(page));
 		}
 	}
@@ -3638,7 +3636,7 @@
 
 unlock:
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 	return err;
 }
 
@@ -3653,7 +3651,7 @@
 		struct address_space *mapping, loff_t from, loff_t length)
 {
 	struct inode *inode = mapping->host;
-	unsigned offset = from & (PAGE_CACHE_SIZE-1);
+	unsigned offset = from & (PAGE_SIZE-1);
 	unsigned blocksize = inode->i_sb->s_blocksize;
 	unsigned max = blocksize - (offset & (blocksize - 1));
 
@@ -3678,7 +3676,7 @@
 static int ext4_block_truncate_page(handle_t *handle,
 		struct address_space *mapping, loff_t from)
 {
-	unsigned offset = from & (PAGE_CACHE_SIZE-1);
+	unsigned offset = from & (PAGE_SIZE-1);
 	unsigned length;
 	unsigned blocksize;
 	struct inode *inode = mapping->host;
@@ -3816,7 +3814,7 @@
 	 */
 	if (offset + length > inode->i_size) {
 		length = inode->i_size +
-		   PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) -
+		   PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
 		   offset;
 	}
 
@@ -4891,23 +4889,23 @@
 	tid_t commit_tid = 0;
 	int ret;
 
-	offset = inode->i_size & (PAGE_CACHE_SIZE - 1);
+	offset = inode->i_size & (PAGE_SIZE - 1);
 	/*
 	 * All buffers in the last page remain valid? Then there's nothing to
-	 * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE ==
+	 * do. We do the check mainly to optimize the common PAGE_SIZE ==
 	 * blocksize case
 	 */
-	if (offset > PAGE_CACHE_SIZE - (1 << inode->i_blkbits))
+	if (offset > PAGE_SIZE - (1 << inode->i_blkbits))
 		return;
 	while (1) {
 		page = find_lock_page(inode->i_mapping,
-				      inode->i_size >> PAGE_CACHE_SHIFT);
+				      inode->i_size >> PAGE_SHIFT);
 		if (!page)
 			return;
 		ret = __ext4_journalled_invalidatepage(page, offset,
-						PAGE_CACHE_SIZE - offset);
+						PAGE_SIZE - offset);
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		if (ret != -EBUSY)
 			return;
 		commit_tid = 0;
@@ -5546,10 +5544,10 @@
 		goto out;
 	}
 
-	if (page->index == size >> PAGE_CACHE_SHIFT)
-		len = size & ~PAGE_CACHE_MASK;
+	if (page->index == size >> PAGE_SHIFT)
+		len = size & ~PAGE_MASK;
 	else
-		len = PAGE_CACHE_SIZE;
+		len = PAGE_SIZE;
 	/*
 	 * Return if we have all the buffers mapped. This avoids the need to do
 	 * journal_start/journal_stop which can block and take a long time
@@ -5580,7 +5578,7 @@
 	ret = block_page_mkwrite(vma, vmf, get_block);
 	if (!ret && ext4_should_journal_data(inode)) {
 		if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
-			  PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) {
+			  PAGE_SIZE, NULL, do_journal_get_write_access)) {
 			unlock_page(page);
 			ret = VM_FAULT_SIGBUS;
 			ext4_journal_stop(handle);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 50e05df..eeeade7 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -119,7 +119,7 @@
  *
  *
  * one block each for bitmap and buddy information.  So for each group we
- * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE /
+ * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
  * blocksize) blocks.  So it can have information regarding groups_per_page
  * which is blocks_per_page/2
  *
@@ -807,7 +807,7 @@
  *
  * one block each for bitmap and buddy information.
  * So for each group we take up 2 blocks. A page can
- * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize)  blocks.
+ * contain blocks_per_page (PAGE_SIZE / blocksize)  blocks.
  * So it can have information regarding groups_per_page which
  * is blocks_per_page/2
  *
@@ -839,7 +839,7 @@
 	sb = inode->i_sb;
 	ngroups = ext4_get_groups_count(sb);
 	blocksize = 1 << inode->i_blkbits;
-	blocks_per_page = PAGE_CACHE_SIZE / blocksize;
+	blocks_per_page = PAGE_SIZE / blocksize;
 
 	groups_per_page = blocks_per_page >> 1;
 	if (groups_per_page == 0)
@@ -993,7 +993,7 @@
 	e4b->bd_buddy_page = NULL;
 	e4b->bd_bitmap_page = NULL;
 
-	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
+	blocks_per_page = PAGE_SIZE / sb->s_blocksize;
 	/*
 	 * the buddy cache inode stores the block bitmap
 	 * and buddy information in consecutive blocks.
@@ -1028,11 +1028,11 @@
 {
 	if (e4b->bd_bitmap_page) {
 		unlock_page(e4b->bd_bitmap_page);
-		page_cache_release(e4b->bd_bitmap_page);
+		put_page(e4b->bd_bitmap_page);
 	}
 	if (e4b->bd_buddy_page) {
 		unlock_page(e4b->bd_buddy_page);
-		page_cache_release(e4b->bd_buddy_page);
+		put_page(e4b->bd_buddy_page);
 	}
 }
 
@@ -1125,7 +1125,7 @@
 	might_sleep();
 	mb_debug(1, "load group %u\n", group);
 
-	blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
+	blocks_per_page = PAGE_SIZE / sb->s_blocksize;
 	grp = ext4_get_group_info(sb, group);
 
 	e4b->bd_blkbits = sb->s_blocksize_bits;
@@ -1167,7 +1167,7 @@
 			 * is yet to initialize the same. So
 			 * wait for it to initialize.
 			 */
-			page_cache_release(page);
+			put_page(page);
 		page = find_or_create_page(inode->i_mapping, pnum, gfp);
 		if (page) {
 			BUG_ON(page->mapping != inode->i_mapping);
@@ -1203,7 +1203,7 @@
 	page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
 	if (page == NULL || !PageUptodate(page)) {
 		if (page)
-			page_cache_release(page);
+			put_page(page);
 		page = find_or_create_page(inode->i_mapping, pnum, gfp);
 		if (page) {
 			BUG_ON(page->mapping != inode->i_mapping);
@@ -1238,11 +1238,11 @@
 
 err:
 	if (page)
-		page_cache_release(page);
+		put_page(page);
 	if (e4b->bd_bitmap_page)
-		page_cache_release(e4b->bd_bitmap_page);
+		put_page(e4b->bd_bitmap_page);
 	if (e4b->bd_buddy_page)
-		page_cache_release(e4b->bd_buddy_page);
+		put_page(e4b->bd_buddy_page);
 	e4b->bd_buddy = NULL;
 	e4b->bd_bitmap = NULL;
 	return ret;
@@ -1257,9 +1257,9 @@
 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
 {
 	if (e4b->bd_bitmap_page)
-		page_cache_release(e4b->bd_bitmap_page);
+		put_page(e4b->bd_bitmap_page);
 	if (e4b->bd_buddy_page)
-		page_cache_release(e4b->bd_buddy_page);
+		put_page(e4b->bd_buddy_page);
 }
 
 
@@ -2833,8 +2833,8 @@
 		/* No more items in the per group rb tree
 		 * balance refcounts from ext4_mb_free_metadata()
 		 */
-		page_cache_release(e4b.bd_buddy_page);
-		page_cache_release(e4b.bd_bitmap_page);
+		put_page(e4b.bd_buddy_page);
+		put_page(e4b.bd_bitmap_page);
 	}
 	ext4_unlock_group(sb, entry->efd_group);
 	kmem_cache_free(ext4_free_data_cachep, entry);
@@ -4385,9 +4385,9 @@
 		ext4_mb_put_pa(ac, ac->ac_sb, pa);
 	}
 	if (ac->ac_bitmap_page)
-		page_cache_release(ac->ac_bitmap_page);
+		put_page(ac->ac_bitmap_page);
 	if (ac->ac_buddy_page)
-		page_cache_release(ac->ac_buddy_page);
+		put_page(ac->ac_buddy_page);
 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
 		mutex_unlock(&ac->ac_lg->lg_mutex);
 	ext4_mb_collect_stats(ac);
@@ -4599,8 +4599,8 @@
 		 * otherwise we'll refresh it from
 		 * on-disk bitmap and lose not-yet-available
 		 * blocks */
-		page_cache_get(e4b->bd_buddy_page);
-		page_cache_get(e4b->bd_bitmap_page);
+		get_page(e4b->bd_buddy_page);
+		get_page(e4b->bd_bitmap_page);
 	}
 	while (*n) {
 		parent = *n;
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 4098acc..325cef4 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -60,10 +60,10 @@
 {
 	if (first < second) {
 		down_write(&EXT4_I(first)->i_data_sem);
-		down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
+		down_write_nested(&EXT4_I(second)->i_data_sem, I_DATA_SEM_OTHER);
 	} else {
 		down_write(&EXT4_I(second)->i_data_sem);
-		down_write_nested(&EXT4_I(first)->i_data_sem, SINGLE_DEPTH_NESTING);
+		down_write_nested(&EXT4_I(first)->i_data_sem, I_DATA_SEM_OTHER);
 
 	}
 }
@@ -156,7 +156,7 @@
 	page[1] = grab_cache_page_write_begin(mapping[1], index2, fl);
 	if (!page[1]) {
 		unlock_page(page[0]);
-		page_cache_release(page[0]);
+		put_page(page[0]);
 		return -ENOMEM;
 	}
 	/*
@@ -192,7 +192,7 @@
 		create_empty_buffers(page, blocksize, 0);
 
 	head = page_buffers(page);
-	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+	block = (sector_t)page->index << (PAGE_SHIFT - inode->i_blkbits);
 	for (bh = head, block_start = 0; bh != head || !block_start;
 	     block++, block_start = block_end, bh = bh->b_this_page) {
 		block_end = block_start + blocksize;
@@ -268,7 +268,7 @@
 	int i, err2, jblocks, retries = 0;
 	int replaced_count = 0;
 	int from = data_offset_in_page << orig_inode->i_blkbits;
-	int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
+	int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
 	struct super_block *sb = orig_inode->i_sb;
 	struct buffer_head *bh = NULL;
 
@@ -404,9 +404,9 @@
 
 unlock_pages:
 	unlock_page(pagep[0]);
-	page_cache_release(pagep[0]);
+	put_page(pagep[0]);
 	unlock_page(pagep[1]);
-	page_cache_release(pagep[1]);
+	put_page(pagep[1]);
 stop_journal:
 	ext4_journal_stop(handle);
 	if (*err == -ENOSPC &&
@@ -484,6 +484,13 @@
 		return -EBUSY;
 	}
 
+	if (IS_NOQUOTA(orig_inode) || IS_NOQUOTA(donor_inode)) {
+		ext4_debug("ext4 move extent: The argument files should "
+			"not be quota files [ino:orig %lu, donor %lu]\n",
+			orig_inode->i_ino, donor_inode->i_ino);
+		return -EBUSY;
+	}
+
 	/* Ext4 move extent supports only extent based file */
 	if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
 		ext4_debug("ext4 move extent: orig file is not extents "
@@ -554,7 +561,7 @@
 	struct inode *orig_inode = file_inode(o_filp);
 	struct inode *donor_inode = file_inode(d_filp);
 	struct ext4_ext_path *path = NULL;
-	int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
+	int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
 	ext4_lblk_t o_end, o_start = orig_blk;
 	ext4_lblk_t d_start = donor_blk;
 	int ret;
@@ -648,9 +655,9 @@
 		if (o_end - o_start < cur_len)
 			cur_len = o_end - o_start;
 
-		orig_page_index = o_start >> (PAGE_CACHE_SHIFT -
+		orig_page_index = o_start >> (PAGE_SHIFT -
 					       orig_inode->i_blkbits);
-		donor_page_index = d_start >> (PAGE_CACHE_SHIFT -
+		donor_page_index = d_start >> (PAGE_SHIFT -
 					       donor_inode->i_blkbits);
 		offset_in_page = o_start % blocks_per_page;
 		if (cur_len > blocks_per_page- offset_in_page)
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 48e4b89..5611ec9 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1638,13 +1638,13 @@
 	ino = le32_to_cpu(de->inode);
 	brelse(bh);
 
-	if (!ext4_valid_inum(d_inode(child)->i_sb, ino)) {
+	if (!ext4_valid_inum(child->d_sb, ino)) {
 		EXT4_ERROR_INODE(d_inode(child),
 				 "bad parent inode number: %u", ino);
 		return ERR_PTR(-EFSCORRUPTED);
 	}
 
-	return d_obtain_alias(ext4_iget_normal(d_inode(child)->i_sb, ino));
+	return d_obtain_alias(ext4_iget_normal(child->d_sb, ino));
 }
 
 /*
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index d77d15f..e4fc8ea 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -23,6 +23,7 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
+#include <linux/backing-dev.h>
 
 #include "ext4_jbd2.h"
 #include "xattr.h"
@@ -432,8 +433,8 @@
 	 * the page size, the remaining memory is zeroed when mapped, and
 	 * writes to that region are not written out to the file."
 	 */
-	if (len < PAGE_CACHE_SIZE)
-		zero_user_segment(page, len, PAGE_CACHE_SIZE);
+	if (len < PAGE_SIZE)
+		zero_user_segment(page, len, PAGE_SIZE);
 	/*
 	 * In the first loop we prepare and mark buffers to submit. We have to
 	 * mark all buffers in the page before submitting so that
@@ -470,9 +471,20 @@
 
 	if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) &&
 	    nr_to_submit) {
-		data_page = ext4_encrypt(inode, page);
+		gfp_t gfp_flags = GFP_NOFS;
+
+	retry_encrypt:
+		data_page = ext4_encrypt(inode, page, gfp_flags);
 		if (IS_ERR(data_page)) {
 			ret = PTR_ERR(data_page);
+			if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
+				if (io->io_bio) {
+					ext4_io_submit(io);
+					congestion_wait(BLK_RW_ASYNC, HZ/50);
+				}
+				gfp_flags |= __GFP_NOFAIL;
+				goto retry_encrypt;
+			}
 			data_page = NULL;
 			goto out;
 		}
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index 5dc5e95..dc54a4b 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -23,7 +23,7 @@
  *
  * then this code just gives up and calls the buffer_head-based read function.
  * It does handle a page which has holes at the end - that is a common case:
- * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
+ * the end-of-file on blocksize < PAGE_SIZE setups.
  *
  */
 
@@ -140,7 +140,7 @@
 
 	struct inode *inode = mapping->host;
 	const unsigned blkbits = inode->i_blkbits;
-	const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
+	const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
 	const unsigned blocksize = 1 << blkbits;
 	sector_t block_in_file;
 	sector_t last_block;
@@ -173,7 +173,7 @@
 		if (page_has_buffers(page))
 			goto confused;
 
-		block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
+		block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
 		last_block = block_in_file + nr_pages * blocks_per_page;
 		last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
 		if (last_block > last_block_in_file)
@@ -217,7 +217,7 @@
 				set_error_page:
 					SetPageError(page);
 					zero_user_segment(page, 0,
-							  PAGE_CACHE_SIZE);
+							  PAGE_SIZE);
 					unlock_page(page);
 					goto next_page;
 				}
@@ -250,7 +250,7 @@
 		}
 		if (first_hole != blocks_per_page) {
 			zero_user_segment(page, first_hole << blkbits,
-					  PAGE_CACHE_SIZE);
+					  PAGE_SIZE);
 			if (first_hole == 0) {
 				SetPageUptodate(page);
 				unlock_page(page);
@@ -279,7 +279,7 @@
 
 			if (ext4_encrypted_inode(inode) &&
 			    S_ISREG(inode->i_mode)) {
-				ctx = ext4_get_crypto_ctx(inode);
+				ctx = ext4_get_crypto_ctx(inode, GFP_NOFS);
 				if (IS_ERR(ctx))
 					goto set_error_page;
 			}
@@ -319,7 +319,7 @@
 			unlock_page(page);
 	next_page:
 		if (pages)
-			page_cache_release(page);
+			put_page(page);
 	}
 	BUG_ON(pages && !list_empty(pages));
 	if (bio)
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 5392975..304c712 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1113,6 +1113,7 @@
 static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
 			     unsigned int flags);
 static int ext4_enable_quotas(struct super_block *sb);
+static int ext4_get_next_id(struct super_block *sb, struct kqid *qid);
 
 static struct dquot **ext4_get_dquots(struct inode *inode)
 {
@@ -1129,7 +1130,7 @@
 	.alloc_dquot	= dquot_alloc,
 	.destroy_dquot	= dquot_destroy,
 	.get_projid	= ext4_get_projid,
-	.get_next_id	= dquot_get_next_id,
+	.get_next_id	= ext4_get_next_id,
 };
 
 static const struct quotactl_ops ext4_qctl_operations = {
@@ -1323,9 +1324,9 @@
 		return -1;
 	}
 	if (ext4_has_feature_quota(sb)) {
-		ext4_msg(sb, KERN_ERR, "Cannot set journaled quota options "
-			 "when QUOTA feature is enabled");
-		return -1;
+		ext4_msg(sb, KERN_INFO, "Journaled quota options "
+			 "ignored when QUOTA feature is enabled");
+		return 1;
 	}
 	qname = match_strdup(args);
 	if (!qname) {
@@ -1688,10 +1689,10 @@
 			return -1;
 		}
 		if (ext4_has_feature_quota(sb)) {
-			ext4_msg(sb, KERN_ERR,
-				 "Cannot set journaled quota options "
+			ext4_msg(sb, KERN_INFO,
+				 "Quota format mount options ignored "
 				 "when QUOTA feature is enabled");
-			return -1;
+			return 1;
 		}
 		sbi->s_jquota_fmt = m->mount_opt;
 #endif
@@ -1756,11 +1757,11 @@
 #ifdef CONFIG_QUOTA
 	if (ext4_has_feature_quota(sb) &&
 	    (test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) {
-		ext4_msg(sb, KERN_ERR, "Cannot set quota options when QUOTA "
-			 "feature is enabled");
-		return 0;
-	}
-	if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
+		ext4_msg(sb, KERN_INFO, "Quota feature enabled, usrquota and grpquota "
+			 "mount options ignored.");
+		clear_opt(sb, USRQUOTA);
+		clear_opt(sb, GRPQUOTA);
+	} else if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
 		if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
 			clear_opt(sb, USRQUOTA);
 
@@ -1784,7 +1785,7 @@
 		int blocksize =
 			BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
 
-		if (blocksize < PAGE_CACHE_SIZE) {
+		if (blocksize < PAGE_SIZE) {
 			ext4_msg(sb, KERN_ERR, "can't mount with "
 				 "dioread_nolock if block size != PAGE_SIZE");
 			return 0;
@@ -3808,7 +3809,7 @@
 	}
 
 	if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
-	    (blocksize != PAGE_CACHE_SIZE)) {
+	    (blocksize != PAGE_SIZE)) {
 		ext4_msg(sb, KERN_ERR,
 			 "Unsupported blocksize for fs encryption");
 		goto failed_mount_wq;
@@ -5028,6 +5029,20 @@
 					EXT4_SB(sb)->s_jquota_fmt, type);
 }
 
+static void lockdep_set_quota_inode(struct inode *inode, int subclass)
+{
+	struct ext4_inode_info *ei = EXT4_I(inode);
+
+	/* The first argument of lockdep_set_subclass has to be
+	 * *exactly* the same as the argument to init_rwsem() --- in
+	 * this case, in init_once() --- or lockdep gets unhappy
+	 * because the name of the lock is set using the
+	 * stringification of the argument to init_rwsem().
+	 */
+	(void) ei;	/* shut up clang warning if !CONFIG_LOCKDEP */
+	lockdep_set_subclass(&ei->i_data_sem, subclass);
+}
+
 /*
  * Standard function to be called on quota_on
  */
@@ -5067,8 +5082,12 @@
 		if (err)
 			return err;
 	}
-
-	return dquot_quota_on(sb, type, format_id, path);
+	lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
+	err = dquot_quota_on(sb, type, format_id, path);
+	if (err)
+		lockdep_set_quota_inode(path->dentry->d_inode,
+					     I_DATA_SEM_NORMAL);
+	return err;
 }
 
 static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
@@ -5095,8 +5114,11 @@
 
 	/* Don't account quota for quota files to avoid recursion */
 	qf_inode->i_flags |= S_NOQUOTA;
+	lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
 	err = dquot_enable(qf_inode, type, format_id, flags);
 	iput(qf_inode);
+	if (err)
+		lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
 
 	return err;
 }
@@ -5253,6 +5275,17 @@
 	return len;
 }
 
+static int ext4_get_next_id(struct super_block *sb, struct kqid *qid)
+{
+	const struct quota_format_ops	*ops;
+
+	if (!sb_has_quota_loaded(sb, qid->type))
+		return -ESRCH;
+	ops = sb_dqopt(sb)->ops[qid->type];
+	if (!ops || !ops->get_next_id)
+		return -ENOSYS;
+	return dquot_get_next_id(sb, qid);
+}
 #endif
 
 static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index 6f7ee30..75ed5c2 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -80,12 +80,12 @@
 	if (res <= plen)
 		paddr[res] = '\0';
 	if (cpage)
-		page_cache_release(cpage);
+		put_page(cpage);
 	set_delayed_call(done, kfree_link, paddr);
 	return paddr;
 errout:
 	if (cpage)
-		page_cache_release(cpage);
+		put_page(cpage);
 	kfree(paddr);
 	return ERR_PTR(res);
 }
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 0441e05..e79bd32 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -230,6 +230,27 @@
 	return error;
 }
 
+static int
+__xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
+			 void *end, const char *function, unsigned int line)
+{
+	struct ext4_xattr_entry *entry = IFIRST(header);
+	int error = -EFSCORRUPTED;
+
+	if (((void *) header >= end) ||
+	    (header->h_magic != le32_to_cpu(EXT4_XATTR_MAGIC)))
+		goto errout;
+	error = ext4_xattr_check_names(entry, end, entry);
+errout:
+	if (error)
+		__ext4_error_inode(inode, function, line, 0,
+				   "corrupted in-inode xattr");
+	return error;
+}
+
+#define xattr_check_inode(inode, header, end) \
+	__xattr_check_inode((inode), (header), (end), __func__, __LINE__)
+
 static inline int
 ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size)
 {
@@ -341,7 +362,7 @@
 	header = IHDR(inode, raw_inode);
 	entry = IFIRST(header);
 	end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
-	error = ext4_xattr_check_names(entry, end, entry);
+	error = xattr_check_inode(inode, header, end);
 	if (error)
 		goto cleanup;
 	error = ext4_xattr_find_entry(&entry, name_index, name,
@@ -477,7 +498,7 @@
 	raw_inode = ext4_raw_inode(&iloc);
 	header = IHDR(inode, raw_inode);
 	end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
-	error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header));
+	error = xattr_check_inode(inode, header, end);
 	if (error)
 		goto cleanup;
 	error = ext4_xattr_list_entries(dentry, IFIRST(header),
@@ -1040,8 +1061,7 @@
 	is->s.here = is->s.first;
 	is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
 	if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
-		error = ext4_xattr_check_names(IFIRST(header), is->s.end,
-					       IFIRST(header));
+		error = xattr_check_inode(inode, header, is->s.end);
 		if (error)
 			return error;
 		/* Find the named attribute. */
@@ -1356,6 +1376,10 @@
 	last = entry;
 	total_ino = sizeof(struct ext4_xattr_ibody_header);
 
+	error = xattr_check_inode(inode, header, end);
+	if (error)
+		goto cleanup;
+
 	free = ext4_xattr_free_space(last, &min_offs, base, &total_ino);
 	if (free >= new_extra_isize) {
 		entry = IFIRST(header);
diff --git a/fs/ext4/xattr_security.c b/fs/ext4/xattr_security.c
index 3e81bdc..123a7d0 100644
--- a/fs/ext4/xattr_security.c
+++ b/fs/ext4/xattr_security.c
@@ -13,10 +13,10 @@
 
 static int
 ext4_xattr_security_get(const struct xattr_handler *handler,
-			struct dentry *dentry, const char *name,
-			void *buffer, size_t size)
+			struct dentry *unused, struct inode *inode,
+			const char *name, void *buffer, size_t size)
 {
-	return ext4_xattr_get(d_inode(dentry), EXT4_XATTR_INDEX_SECURITY,
+	return ext4_xattr_get(inode, EXT4_XATTR_INDEX_SECURITY,
 			      name, buffer, size);
 }
 
diff --git a/fs/ext4/xattr_trusted.c b/fs/ext4/xattr_trusted.c
index 2a3c6f9..60652fa 100644
--- a/fs/ext4/xattr_trusted.c
+++ b/fs/ext4/xattr_trusted.c
@@ -20,10 +20,10 @@
 
 static int
 ext4_xattr_trusted_get(const struct xattr_handler *handler,
-		       struct dentry *dentry, const char *name, void *buffer,
-		       size_t size)
+		       struct dentry *unused, struct inode *inode,
+		       const char *name, void *buffer, size_t size)
 {
-	return ext4_xattr_get(d_inode(dentry), EXT4_XATTR_INDEX_TRUSTED,
+	return ext4_xattr_get(inode, EXT4_XATTR_INDEX_TRUSTED,
 			      name, buffer, size);
 }
 
diff --git a/fs/ext4/xattr_user.c b/fs/ext4/xattr_user.c
index d152f43..17a446f 100644
--- a/fs/ext4/xattr_user.c
+++ b/fs/ext4/xattr_user.c
@@ -19,12 +19,12 @@
 
 static int
 ext4_xattr_user_get(const struct xattr_handler *handler,
-		    struct dentry *dentry, const char *name,
-		    void *buffer, size_t size)
+		    struct dentry *unused, struct inode *inode,
+		    const char *name, void *buffer, size_t size)
 {
-	if (!test_opt(dentry->d_sb, XATTR_USER))
+	if (!test_opt(inode->i_sb, XATTR_USER))
 		return -EOPNOTSUPP;
-	return ext4_xattr_get(d_inode(dentry), EXT4_XATTR_INDEX_USER,
+	return ext4_xattr_get(inode, EXT4_XATTR_INDEX_USER,
 			      name, buffer, size);
 }
 
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index c8f25f7..6f1fdda 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -190,9 +190,6 @@
 		acl = ERR_PTR(retval);
 	kfree(value);
 
-	if (!IS_ERR(acl))
-		set_cached_acl(inode, type, acl);
-
 	return acl;
 }
 
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index e5c762b..bb376c3 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -223,7 +223,7 @@
 	/* Allocate a new bio */
 	bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw));
 
-	if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
+	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
 		bio_put(bio);
 		return -EFAULT;
 	}
@@ -265,8 +265,8 @@
 
 	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
 
-	if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) <
-							PAGE_CACHE_SIZE) {
+	if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
+							PAGE_SIZE) {
 		__submit_merged_bio(io);
 		goto alloc_new;
 	}
@@ -406,7 +406,7 @@
 	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
 	 */
 	if (dn.data_blkaddr == NEW_ADDR) {
-		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+		zero_user_segment(page, 0, PAGE_SIZE);
 		SetPageUptodate(page);
 		unlock_page(page);
 		return page;
@@ -517,7 +517,7 @@
 		goto got_it;
 
 	if (dn.data_blkaddr == NEW_ADDR) {
-		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+		zero_user_segment(page, 0, PAGE_SIZE);
 		SetPageUptodate(page);
 	} else {
 		f2fs_put_page(page, 1);
@@ -530,8 +530,8 @@
 	}
 got_it:
 	if (new_i_size && i_size_read(inode) <
-				((loff_t)(index + 1) << PAGE_CACHE_SHIFT)) {
-		i_size_write(inode, ((loff_t)(index + 1) << PAGE_CACHE_SHIFT));
+				((loff_t)(index + 1) << PAGE_SHIFT)) {
+		i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
 		/* Only the directory inode sets new_i_size */
 		set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
 	}
@@ -570,9 +570,9 @@
 	/* update i_size */
 	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
 							dn->ofs_in_node;
-	if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT))
+	if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
 		i_size_write(dn->inode,
-				((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT));
+				((loff_t)(fofs + 1) << PAGE_SHIFT));
 	return 0;
 }
 
@@ -971,7 +971,7 @@
 				goto confused;
 			}
 		} else {
-			zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+			zero_user_segment(page, 0, PAGE_SIZE);
 			SetPageUptodate(page);
 			unlock_page(page);
 			goto next_page;
@@ -992,7 +992,7 @@
 			if (f2fs_encrypted_inode(inode) &&
 					S_ISREG(inode->i_mode)) {
 
-				ctx = fscrypt_get_ctx(inode);
+				ctx = fscrypt_get_ctx(inode, GFP_NOFS);
 				if (IS_ERR(ctx))
 					goto set_error_page;
 
@@ -1021,7 +1021,7 @@
 		goto next_page;
 set_error_page:
 		SetPageError(page);
-		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+		zero_user_segment(page, 0, PAGE_SIZE);
 		unlock_page(page);
 		goto next_page;
 confused:
@@ -1032,7 +1032,7 @@
 		unlock_page(page);
 next_page:
 		if (pages)
-			page_cache_release(page);
+			put_page(page);
 	}
 	BUG_ON(pages && !list_empty(pages));
 	if (bio)
@@ -1092,14 +1092,24 @@
 	}
 
 	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
+		gfp_t gfp_flags = GFP_NOFS;
 
 		/* wait for GCed encrypted page writeback */
 		f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
 							fio->old_blkaddr);
-
-		fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page);
+retry_encrypt:
+		fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
+								gfp_flags);
 		if (IS_ERR(fio->encrypted_page)) {
 			err = PTR_ERR(fio->encrypted_page);
+			if (err == -ENOMEM) {
+				/* flush pending ios and wait for a while */
+				f2fs_flush_merged_bios(F2FS_I_SB(inode));
+				congestion_wait(BLK_RW_ASYNC, HZ/50);
+				gfp_flags |= __GFP_NOFAIL;
+				err = 0;
+				goto retry_encrypt;
+			}
 			goto out_writepage;
 		}
 	}
@@ -1136,7 +1146,7 @@
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	loff_t i_size = i_size_read(inode);
 	const pgoff_t end_index = ((unsigned long long) i_size)
-							>> PAGE_CACHE_SHIFT;
+							>> PAGE_SHIFT;
 	unsigned offset = 0;
 	bool need_balance_fs = false;
 	int err = 0;
@@ -1157,11 +1167,11 @@
 	 * If the offset is out-of-range of file size,
 	 * this page does not have to be written to disk.
 	 */
-	offset = i_size & (PAGE_CACHE_SIZE - 1);
+	offset = i_size & (PAGE_SIZE - 1);
 	if ((page->index >= end_index + 1) || !offset)
 		goto out;
 
-	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+	zero_user_segment(page, offset, PAGE_SIZE);
 write:
 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
 		goto redirty_out;
@@ -1267,8 +1277,8 @@
 			cycled = 0;
 		end = -1;
 	} else {
-		index = wbc->range_start >> PAGE_CACHE_SHIFT;
-		end = wbc->range_end >> PAGE_CACHE_SHIFT;
+		index = wbc->range_start >> PAGE_SHIFT;
+		end = wbc->range_end >> PAGE_SHIFT;
 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
 			range_whole = 1;
 		cycled = 1; /* ignore range_cyclic tests */
@@ -1448,11 +1458,11 @@
 	 * the block addresses when there is no need to fill the page.
 	 */
 	if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) &&
-					len == PAGE_CACHE_SIZE)
+					len == PAGE_SIZE)
 		return 0;
 
 	if (f2fs_has_inline_data(inode) ||
-			(pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
+			(pos & PAGE_MASK) >= i_size_read(inode)) {
 		f2fs_lock_op(sbi);
 		locked = true;
 	}
@@ -1513,7 +1523,7 @@
 	struct inode *inode = mapping->host;
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct page *page = NULL;
-	pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
+	pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
 	bool need_balance = false;
 	block_t blkaddr = NULL_ADDR;
 	int err = 0;
@@ -1561,22 +1571,22 @@
 	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
 		f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
 
-	if (len == PAGE_CACHE_SIZE)
+	if (len == PAGE_SIZE)
 		goto out_update;
 	if (PageUptodate(page))
 		goto out_clear;
 
-	if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
-		unsigned start = pos & (PAGE_CACHE_SIZE - 1);
+	if ((pos & PAGE_MASK) >= i_size_read(inode)) {
+		unsigned start = pos & (PAGE_SIZE - 1);
 		unsigned end = start + len;
 
 		/* Reading beyond i_size is simple: memset to zero */
-		zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
+		zero_user_segments(page, 0, start, end, PAGE_SIZE);
 		goto out_update;
 	}
 
 	if (blkaddr == NEW_ADDR) {
-		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+		zero_user_segment(page, 0, PAGE_SIZE);
 	} else {
 		struct f2fs_io_info fio = {
 			.sbi = sbi,
@@ -1655,12 +1665,12 @@
 	return 0;
 }
 
-static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
-			      loff_t offset)
+static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct address_space *mapping = iocb->ki_filp->f_mapping;
 	struct inode *inode = mapping->host;
 	size_t count = iov_iter_count(iter);
+	loff_t offset = iocb->ki_pos;
 	int err;
 
 	err = check_direct_IO(inode, iter, offset);
@@ -1672,7 +1682,7 @@
 
 	trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
 
-	err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio);
+	err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
 	if (err < 0 && iov_iter_rw(iter) == WRITE)
 		f2fs_write_failed(mapping, offset + count);
 
@@ -1688,7 +1698,7 @@
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 
 	if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
-		(offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE))
+		(offset % PAGE_SIZE || length != PAGE_SIZE))
 		return;
 
 	if (PageDirty(page)) {
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 4fb6ef8..f4a61a5 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -164,7 +164,7 @@
 
 	/* build curseg */
 	si->base_mem += sizeof(struct curseg_info) * NR_CURSEG_TYPE;
-	si->base_mem += PAGE_CACHE_SIZE * NR_CURSEG_TYPE;
+	si->base_mem += PAGE_SIZE * NR_CURSEG_TYPE;
 
 	/* build dirty segmap */
 	si->base_mem += sizeof(struct dirty_seglist_info);
@@ -201,9 +201,9 @@
 
 	si->page_mem = 0;
 	npages = NODE_MAPPING(sbi)->nrpages;
-	si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT;
+	si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
 	npages = META_MAPPING(sbi)->nrpages;
-	si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT;
+	si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
 }
 
 static int stat_show(struct seq_file *s, void *v)
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 80641ad..9e46151 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -17,8 +17,8 @@
 
 static unsigned long dir_blocks(struct inode *inode)
 {
-	return ((unsigned long long) (i_size_read(inode) + PAGE_CACHE_SIZE - 1))
-							>> PAGE_CACHE_SHIFT;
+	return ((unsigned long long) (i_size_read(inode) + PAGE_SIZE - 1))
+							>> PAGE_SHIFT;
 }
 
 static unsigned int dir_buckets(unsigned int level, int dir_level)
@@ -902,7 +902,7 @@
 const struct file_operations f2fs_dir_operations = {
 	.llseek		= generic_file_llseek,
 	.read		= generic_read_dir,
-	.iterate	= f2fs_readdir,
+	.iterate_shared	= f2fs_readdir,
 	.fsync		= f2fs_sync_file,
 	.open		= f2fs_dir_open,
 	.unlocked_ioctl	= f2fs_ioctl,
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index bbe2cd1..7a4558d 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1294,7 +1294,7 @@
 		f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
 		unlock_page(page);
 	}
-	page_cache_release(page);
+	put_page(page);
 }
 
 static inline void f2fs_put_dnode(struct dnode_of_data *dn)
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index b41c357..eb9d027 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -74,11 +74,11 @@
 		goto mapped;
 
 	/* page is wholly or partially inside EOF */
-	if (((loff_t)(page->index + 1) << PAGE_CACHE_SHIFT) >
+	if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
 						i_size_read(inode)) {
 		unsigned offset;
-		offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
-		zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+		offset = i_size_read(inode) & ~PAGE_MASK;
+		zero_user_segment(page, offset, PAGE_SIZE);
 	}
 	set_page_dirty(page);
 	SetPageUptodate(page);
@@ -346,11 +346,11 @@
 		goto found;
 	}
 
-	pgofs = (pgoff_t)(offset >> PAGE_CACHE_SHIFT);
+	pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
 
 	dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
 
-	for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
+	for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
 		set_new_dnode(&dn, inode, NULL, NULL, 0);
 		err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
 		if (err && err != -ENOENT) {
@@ -370,7 +370,7 @@
 		/* find data/hole in dnode block */
 		for (; dn.ofs_in_node < end_offset;
 				dn.ofs_in_node++, pgofs++,
-				data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
+				data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
 			block_t blkaddr;
 			blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
 
@@ -441,7 +441,7 @@
 static int f2fs_file_open(struct inode *inode, struct file *filp)
 {
 	int ret = generic_file_open(inode, filp);
-	struct inode *dir = filp->f_path.dentry->d_parent->d_inode;
+	struct dentry *dir;
 
 	if (!ret && f2fs_encrypted_inode(inode)) {
 		ret = fscrypt_get_encryption_info(inode);
@@ -450,9 +450,13 @@
 		if (!fscrypt_has_encryption_key(inode))
 			return -ENOKEY;
 	}
-	if (f2fs_encrypted_inode(dir) &&
-			!fscrypt_has_permitted_context(dir, inode))
+	dir = dget_parent(file_dentry(filp));
+	if (f2fs_encrypted_inode(d_inode(dir)) &&
+			!fscrypt_has_permitted_context(d_inode(dir), inode)) {
+		dput(dir);
 		return -EPERM;
+	}
+	dput(dir);
 	return ret;
 }
 
@@ -508,8 +512,8 @@
 static int truncate_partial_data_page(struct inode *inode, u64 from,
 								bool cache_only)
 {
-	unsigned offset = from & (PAGE_CACHE_SIZE - 1);
-	pgoff_t index = from >> PAGE_CACHE_SHIFT;
+	unsigned offset = from & (PAGE_SIZE - 1);
+	pgoff_t index = from >> PAGE_SHIFT;
 	struct address_space *mapping = inode->i_mapping;
 	struct page *page;
 
@@ -529,7 +533,7 @@
 		return 0;
 truncate_out:
 	f2fs_wait_on_page_writeback(page, DATA, true);
-	zero_user(page, offset, PAGE_CACHE_SIZE - offset);
+	zero_user(page, offset, PAGE_SIZE - offset);
 	if (!cache_only || !f2fs_encrypted_inode(inode) ||
 					!S_ISREG(inode->i_mode))
 		set_page_dirty(page);
@@ -799,11 +803,11 @@
 	if (ret)
 		return ret;
 
-	pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
-	pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
+	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
+	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
 
-	off_start = offset & (PAGE_CACHE_SIZE - 1);
-	off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
+	off_start = offset & (PAGE_SIZE - 1);
+	off_end = (offset + len) & (PAGE_SIZE - 1);
 
 	if (pg_start == pg_end) {
 		ret = fill_zero(inode, pg_start, off_start,
@@ -813,7 +817,7 @@
 	} else {
 		if (off_start) {
 			ret = fill_zero(inode, pg_start++, off_start,
-						PAGE_CACHE_SIZE - off_start);
+						PAGE_SIZE - off_start);
 			if (ret)
 				return ret;
 		}
@@ -830,8 +834,8 @@
 
 			f2fs_balance_fs(sbi, true);
 
-			blk_start = (loff_t)pg_start << PAGE_CACHE_SHIFT;
-			blk_end = (loff_t)pg_end << PAGE_CACHE_SHIFT;
+			blk_start = (loff_t)pg_start << PAGE_SHIFT;
+			blk_end = (loff_t)pg_end << PAGE_SHIFT;
 			truncate_inode_pages_range(mapping, blk_start,
 					blk_end - 1);
 
@@ -954,8 +958,8 @@
 	if (ret)
 		return ret;
 
-	pg_start = offset >> PAGE_CACHE_SHIFT;
-	pg_end = (offset + len) >> PAGE_CACHE_SHIFT;
+	pg_start = offset >> PAGE_SHIFT;
+	pg_end = (offset + len) >> PAGE_SHIFT;
 
 	/* write out all dirty pages from offset */
 	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
@@ -1006,11 +1010,11 @@
 
 	truncate_pagecache_range(inode, offset, offset + len - 1);
 
-	pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
-	pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
+	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
+	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
 
-	off_start = offset & (PAGE_CACHE_SIZE - 1);
-	off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
+	off_start = offset & (PAGE_SIZE - 1);
+	off_end = (offset + len) & (PAGE_SIZE - 1);
 
 	if (pg_start == pg_end) {
 		ret = fill_zero(inode, pg_start, off_start,
@@ -1024,12 +1028,12 @@
 	} else {
 		if (off_start) {
 			ret = fill_zero(inode, pg_start++, off_start,
-						PAGE_CACHE_SIZE - off_start);
+						PAGE_SIZE - off_start);
 			if (ret)
 				return ret;
 
 			new_size = max_t(loff_t, new_size,
-					(loff_t)pg_start << PAGE_CACHE_SHIFT);
+					(loff_t)pg_start << PAGE_SHIFT);
 		}
 
 		for (index = pg_start; index < pg_end; index++) {
@@ -1060,7 +1064,7 @@
 			f2fs_unlock_op(sbi);
 
 			new_size = max_t(loff_t, new_size,
-				(loff_t)(index + 1) << PAGE_CACHE_SHIFT);
+				(loff_t)(index + 1) << PAGE_SHIFT);
 		}
 
 		if (off_end) {
@@ -1117,8 +1121,8 @@
 
 	truncate_pagecache(inode, offset);
 
-	pg_start = offset >> PAGE_CACHE_SHIFT;
-	pg_end = (offset + len) >> PAGE_CACHE_SHIFT;
+	pg_start = offset >> PAGE_SHIFT;
+	pg_end = (offset + len) >> PAGE_SHIFT;
 	delta = pg_end - pg_start;
 	nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
 
@@ -1158,11 +1162,11 @@
 
 	f2fs_balance_fs(sbi, true);
 
-	pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
-	pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
+	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
+	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
 
-	off_start = offset & (PAGE_CACHE_SIZE - 1);
-	off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
+	off_start = offset & (PAGE_SIZE - 1);
+	off_end = (offset + len) & (PAGE_SIZE - 1);
 
 	f2fs_lock_op(sbi);
 
@@ -1180,12 +1184,12 @@
 		if (pg_start == pg_end)
 			new_size = offset + len;
 		else if (index == pg_start && off_start)
-			new_size = (loff_t)(index + 1) << PAGE_CACHE_SHIFT;
+			new_size = (loff_t)(index + 1) << PAGE_SHIFT;
 		else if (index == pg_end)
-			new_size = ((loff_t)index << PAGE_CACHE_SHIFT) +
+			new_size = ((loff_t)index << PAGE_SHIFT) +
 								off_end;
 		else
-			new_size += PAGE_CACHE_SIZE;
+			new_size += PAGE_SIZE;
 	}
 
 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
@@ -1652,8 +1656,8 @@
 	if (need_inplace_update(inode))
 		return -EINVAL;
 
-	pg_start = range->start >> PAGE_CACHE_SHIFT;
-	pg_end = (range->start + range->len) >> PAGE_CACHE_SHIFT;
+	pg_start = range->start >> PAGE_SHIFT;
+	pg_end = (range->start + range->len) >> PAGE_SHIFT;
 
 	f2fs_balance_fs(sbi, true);
 
@@ -1770,7 +1774,7 @@
 out:
 	inode_unlock(inode);
 	if (!err)
-		range->len = (u64)total << PAGE_CACHE_SHIFT;
+		range->len = (u64)total << PAGE_SHIFT;
 	return err;
 }
 
@@ -1882,13 +1886,8 @@
 	}
 	inode_unlock(inode);
 
-	if (ret > 0) {
-		ssize_t err;
-
-		err = generic_write_sync(file, iocb->ki_pos - ret, ret);
-		if (err < 0)
-			ret = err;
-	}
+	if (ret > 0)
+		ret = generic_write_sync(iocb, ret);
 	return ret;
 }
 
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 358214e..a2fbe6f 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -51,7 +51,7 @@
 
 	f2fs_bug_on(F2FS_P_SB(page), page->index);
 
-	zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
+	zero_user_segment(page, MAX_INLINE_DATA, PAGE_SIZE);
 
 	/* Copy the whole inline data block */
 	src_addr = inline_data_addr(ipage);
@@ -93,7 +93,7 @@
 	}
 
 	if (page->index)
-		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+		zero_user_segment(page, 0, PAGE_SIZE);
 	else
 		read_inline_data(page, ipage);
 
@@ -375,7 +375,7 @@
 		goto out;
 
 	f2fs_wait_on_page_writeback(page, DATA, true);
-	zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
+	zero_user_segment(page, MAX_INLINE_DATA, PAGE_SIZE);
 
 	dentry_blk = kmap_atomic(page);
 
@@ -405,8 +405,8 @@
 	stat_dec_inline_dir(dir);
 	clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY);
 
-	if (i_size_read(dir) < PAGE_CACHE_SIZE) {
-		i_size_write(dir, PAGE_CACHE_SIZE);
+	if (i_size_read(dir) < PAGE_SIZE) {
+		i_size_write(dir, PAGE_SIZE);
 		set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
 	}
 
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 7876f10..324ed38 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -202,7 +202,7 @@
 	unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot);
 	if (!ino)
 		return ERR_PTR(-ENOENT);
-	return d_obtain_alias(f2fs_iget(d_inode(child)->i_sb, ino));
+	return d_obtain_alias(f2fs_iget(child->d_sb, ino));
 }
 
 static int __recover_dot_dentries(struct inode *dir, nid_t pino)
@@ -1027,12 +1027,6 @@
 		goto errout;
 	}
 
-	/* this is broken symlink case */
-	if (unlikely(cstr.name[0] == 0)) {
-		res = -ENOENT;
-		goto errout;
-	}
-
 	if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > max_size) {
 		/* Symlink data on the disk is corrupted */
 		res = -EIO;
@@ -1046,17 +1040,23 @@
 	if (res < 0)
 		goto errout;
 
+	/* this is broken symlink case */
+	if (unlikely(pstr.name[0] == 0)) {
+		res = -ENOENT;
+		goto errout;
+	}
+
 	paddr = pstr.name;
 
 	/* Null-terminate the name */
 	paddr[res] = '\0';
 
-	page_cache_release(cpage);
+	put_page(cpage);
 	set_delayed_call(done, kfree_link, paddr);
 	return paddr;
 errout:
 	fscrypt_fname_free_buffer(&pstr);
-	page_cache_release(cpage);
+	put_page(cpage);
 	return ERR_PTR(res);
 }
 
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 118321b..1a33de9 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -46,11 +46,11 @@
 	 */
 	if (type == FREE_NIDS) {
 		mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >>
-							PAGE_CACHE_SHIFT;
+							PAGE_SHIFT;
 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
 	} else if (type == NAT_ENTRIES) {
 		mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
-							PAGE_CACHE_SHIFT;
+							PAGE_SHIFT;
 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
 	} else if (type == DIRTY_DENTS) {
 		if (sbi->sb->s_bdi->wb.dirty_exceeded)
@@ -62,13 +62,13 @@
 
 		for (i = 0; i <= UPDATE_INO; i++)
 			mem_size += (sbi->im[i].ino_num *
-				sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT;
+				sizeof(struct ino_entry)) >> PAGE_SHIFT;
 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
 	} else if (type == EXTENT_CACHE) {
 		mem_size = (atomic_read(&sbi->total_ext_tree) *
 				sizeof(struct extent_tree) +
 				atomic_read(&sbi->total_ext_node) *
-				sizeof(struct extent_node)) >> PAGE_CACHE_SHIFT;
+				sizeof(struct extent_node)) >> PAGE_SHIFT;
 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
 	} else {
 		if (!sbi->sb->s_bdi->wb.dirty_exceeded)
@@ -121,7 +121,7 @@
 
 	src_addr = page_address(src_page);
 	dst_addr = page_address(dst_page);
-	memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
+	memcpy(dst_addr, src_addr, PAGE_SIZE);
 	set_page_dirty(dst_page);
 	f2fs_put_page(src_page, 1);
 
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 0b30cd2..011942f 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -591,7 +591,7 @@
 
 	/* truncate meta pages to be used by the recovery */
 	truncate_inode_pages_range(META_MAPPING(sbi),
-			(loff_t)MAIN_BLKADDR(sbi) << PAGE_CACHE_SHIFT, -1);
+			(loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
 
 	if (err) {
 		truncate_inode_pages_final(NODE_MAPPING(sbi));
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 6f16b39..540669d 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -885,12 +885,12 @@
 		}
 	}
 
-	sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE -
+	sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
 			SUM_FOOTER_SIZE) / SUMMARY_SIZE;
 	if (valid_sum_count <= sum_in_page)
 		return 1;
 	else if ((valid_sum_count - sum_in_page) <=
-		(PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
+		(PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
 		return 2;
 	return 3;
 }
@@ -909,9 +909,9 @@
 	void *dst = page_address(page);
 
 	if (src)
-		memcpy(dst, src, PAGE_CACHE_SIZE);
+		memcpy(dst, src, PAGE_SIZE);
 	else
-		memset(dst, 0, PAGE_CACHE_SIZE);
+		memset(dst, 0, PAGE_SIZE);
 	set_page_dirty(page);
 	f2fs_put_page(page, 1);
 }
@@ -1596,7 +1596,7 @@
 			s = (struct f2fs_summary *)(kaddr + offset);
 			seg_i->sum_blk->entries[j] = *s;
 			offset += SUMMARY_SIZE;
-			if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
+			if (offset + SUMMARY_SIZE <= PAGE_SIZE -
 						SUM_FOOTER_SIZE)
 				continue;
 
@@ -1757,7 +1757,7 @@
 			*summary = seg_i->sum_blk->entries[j];
 			written_size += SUMMARY_SIZE;
 
-			if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
+			if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
 							SUM_FOOTER_SIZE)
 				continue;
 
@@ -1844,7 +1844,7 @@
 
 	src_addr = page_address(src_page);
 	dst_addr = page_address(dst_page);
-	memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
+	memcpy(dst_addr, src_addr, PAGE_SIZE);
 
 	set_page_dirty(dst_page);
 	f2fs_put_page(src_page, 1);
@@ -2171,7 +2171,7 @@
 
 	for (i = 0; i < NR_CURSEG_TYPE; i++) {
 		mutex_init(&array[i].curseg_mutex);
-		array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+		array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL);
 		if (!array[i].sum_blk)
 			return -ENOMEM;
 		init_rwsem(&array[i].journal_rwsem);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 15bb81f..006f87d 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -984,9 +984,25 @@
 	return result;
 }
 
-static inline bool sanity_check_area_boundary(struct super_block *sb,
-					struct f2fs_super_block *raw_super)
+static int __f2fs_commit_super(struct buffer_head *bh,
+			struct f2fs_super_block *super)
 {
+	lock_buffer(bh);
+	if (super)
+		memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
+	set_buffer_uptodate(bh);
+	set_buffer_dirty(bh);
+	unlock_buffer(bh);
+
+	/* it's rare case, we can do fua all the time */
+	return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
+}
+
+static inline bool sanity_check_area_boundary(struct super_block *sb,
+					struct buffer_head *bh)
+{
+	struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
+					(bh->b_data + F2FS_SUPER_OFFSET);
 	u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
 	u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
 	u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
@@ -1000,6 +1016,10 @@
 	u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
 	u32 segment_count = le32_to_cpu(raw_super->segment_count);
 	u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
+	u64 main_end_blkaddr = main_blkaddr +
+				(segment_count_main << log_blocks_per_seg);
+	u64 seg_end_blkaddr = segment0_blkaddr +
+				(segment_count << log_blocks_per_seg);
 
 	if (segment0_blkaddr != cp_blkaddr) {
 		f2fs_msg(sb, KERN_INFO,
@@ -1044,22 +1064,45 @@
 		return true;
 	}
 
-	if (main_blkaddr + (segment_count_main << log_blocks_per_seg) !=
-		segment0_blkaddr + (segment_count << log_blocks_per_seg)) {
+	if (main_end_blkaddr > seg_end_blkaddr) {
 		f2fs_msg(sb, KERN_INFO,
-			"Wrong MAIN_AREA boundary, start(%u) end(%u) blocks(%u)",
+			"Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
 			main_blkaddr,
-			segment0_blkaddr + (segment_count << log_blocks_per_seg),
+			segment0_blkaddr +
+				(segment_count << log_blocks_per_seg),
 			segment_count_main << log_blocks_per_seg);
 		return true;
-	}
+	} else if (main_end_blkaddr < seg_end_blkaddr) {
+		int err = 0;
+		char *res;
 
+		/* fix in-memory information all the time */
+		raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
+				segment0_blkaddr) >> log_blocks_per_seg);
+
+		if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
+			res = "internally";
+		} else {
+			err = __f2fs_commit_super(bh, NULL);
+			res = err ? "failed" : "done";
+		}
+		f2fs_msg(sb, KERN_INFO,
+			"Fix alignment : %s, start(%u) end(%u) block(%u)",
+			res, main_blkaddr,
+			segment0_blkaddr +
+				(segment_count << log_blocks_per_seg),
+			segment_count_main << log_blocks_per_seg);
+		if (err)
+			return true;
+	}
 	return false;
 }
 
 static int sanity_check_raw_super(struct super_block *sb,
-			struct f2fs_super_block *raw_super)
+				struct buffer_head *bh)
 {
+	struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
+					(bh->b_data + F2FS_SUPER_OFFSET);
 	unsigned int blocksize;
 
 	if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
@@ -1070,10 +1113,10 @@
 	}
 
 	/* Currently, support only 4KB page cache size */
-	if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) {
+	if (F2FS_BLKSIZE != PAGE_SIZE) {
 		f2fs_msg(sb, KERN_INFO,
 			"Invalid page_cache_size (%lu), supports only 4KB\n",
-			PAGE_CACHE_SIZE);
+			PAGE_SIZE);
 		return 1;
 	}
 
@@ -1126,7 +1169,7 @@
 	}
 
 	/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
-	if (sanity_check_area_boundary(sb, raw_super))
+	if (sanity_check_area_boundary(sb, bh))
 		return 1;
 
 	return 0;
@@ -1202,7 +1245,7 @@
 {
 	int block;
 	struct buffer_head *bh;
-	struct f2fs_super_block *super, *buf;
+	struct f2fs_super_block *super;
 	int err = 0;
 
 	super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
@@ -1218,11 +1261,8 @@
 			continue;
 		}
 
-		buf = (struct f2fs_super_block *)
-				(bh->b_data + F2FS_SUPER_OFFSET);
-
 		/* sanity checking of raw super */
-		if (sanity_check_raw_super(sb, buf)) {
+		if (sanity_check_raw_super(sb, bh)) {
 			f2fs_msg(sb, KERN_ERR,
 				"Can't find valid F2FS filesystem in %dth superblock",
 				block + 1);
@@ -1232,7 +1272,8 @@
 		}
 
 		if (!*raw_super) {
-			memcpy(super, buf, sizeof(*super));
+			memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
+							sizeof(*super));
 			*valid_super_block = block;
 			*raw_super = super;
 		}
@@ -1252,42 +1293,29 @@
 	return err;
 }
 
-static int __f2fs_commit_super(struct f2fs_sb_info *sbi, int block)
+int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
 {
-	struct f2fs_super_block *super = F2FS_RAW_SUPER(sbi);
 	struct buffer_head *bh;
 	int err;
 
-	bh = sb_getblk(sbi->sb, block);
+	/* write back-up superblock first */
+	bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
 	if (!bh)
 		return -EIO;
-
-	lock_buffer(bh);
-	memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
-	set_buffer_uptodate(bh);
-	set_buffer_dirty(bh);
-	unlock_buffer(bh);
-
-	/* it's rare case, we can do fua all the time */
-	err = __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
+	err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
 	brelse(bh);
 
-	return err;
-}
-
-int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
-{
-	int err;
-
-	/* write back-up superblock first */
-	err = __f2fs_commit_super(sbi, sbi->valid_super_block ? 0 : 1);
-
 	/* if we are in recovery path, skip writing valid superblock */
 	if (recover || err)
 		return err;
 
 	/* write current valid superblock */
-	return __f2fs_commit_super(sbi, sbi->valid_super_block);
+	bh = sb_getblk(sbi->sb, sbi->valid_super_block);
+	if (!bh)
+		return -EIO;
+	err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
+	brelse(bh);
+	return err;
 }
 
 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
@@ -1442,7 +1470,7 @@
 	seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
 	if (__exist_node_summaries(sbi))
 		sbi->kbytes_written =
-			le64_to_cpu(seg_i->sum_blk->journal.info.kbytes_written);
+			le64_to_cpu(seg_i->journal->info.kbytes_written);
 
 	build_gc_manager(sbi);
 
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 06a72dc..17fd2b1 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -26,10 +26,10 @@
 #include "xattr.h"
 
 static int f2fs_xattr_generic_get(const struct xattr_handler *handler,
-		struct dentry *dentry, const char *name, void *buffer,
-		size_t size)
+		struct dentry *unused, struct inode *inode,
+		const char *name, void *buffer, size_t size)
 {
-	struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb);
+	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 
 	switch (handler->flags) {
 	case F2FS_XATTR_INDEX_USER:
@@ -45,7 +45,7 @@
 	default:
 		return -EINVAL;
 	}
-	return f2fs_getxattr(d_inode(dentry), handler->flags, name,
+	return f2fs_getxattr(inode, handler->flags, name,
 			     buffer, size, NULL);
 }
 
@@ -86,11 +86,9 @@
 }
 
 static int f2fs_xattr_advise_get(const struct xattr_handler *handler,
-		struct dentry *dentry, const char *name, void *buffer,
-		size_t size)
+		struct dentry *unused, struct inode *inode,
+		const char *name, void *buffer, size_t size)
 {
-	struct inode *inode = d_inode(dentry);
-
 	if (buffer)
 		*((char *)buffer) = F2FS_I(inode)->i_advise;
 	return sizeof(char);
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index d0b95c9..663e428 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -769,7 +769,7 @@
 
 	buf.dirent = dirent;
 	buf.result = 0;
-	inode_lock(inode);
+	inode_lock_shared(inode);
 	buf.ctx.pos = file->f_pos;
 	ret = -ENOENT;
 	if (!IS_DEADDIR(inode)) {
@@ -777,7 +777,7 @@
 				    short_only, both ? &buf : NULL);
 		file->f_pos = buf.ctx.pos;
 	}
-	inode_unlock(inode);
+	inode_unlock_shared(inode);
 	if (ret >= 0)
 		ret = buf.result;
 	return ret;
@@ -861,7 +861,7 @@
 const struct file_operations fat_dir_operations = {
 	.llseek		= generic_file_llseek,
 	.read		= generic_read_dir,
-	.iterate	= fat_readdir,
+	.iterate_shared	= fat_readdir,
 	.unlocked_ioctl	= fat_dir_ioctl,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl	= fat_compat_dir_ioctl,
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 2262810..3bcf579 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -244,13 +244,13 @@
 	return err;
 }
 
-static ssize_t fat_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
-			     loff_t offset)
+static ssize_t fat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct file *file = iocb->ki_filp;
 	struct address_space *mapping = file->f_mapping;
 	struct inode *inode = mapping->host;
 	size_t count = iov_iter_count(iter);
+	loff_t offset = iocb->ki_pos;
 	ssize_t ret;
 
 	if (iov_iter_rw(iter) == WRITE) {
@@ -272,7 +272,7 @@
 	 * FAT need to use the DIO_LOCKING for avoiding the race
 	 * condition of fat_get_block() and ->truncate().
 	 */
-	ret = blockdev_direct_IO(iocb, inode, iter, offset, fat_get_block);
+	ret = blockdev_direct_IO(iocb, inode, iter, fat_get_block);
 	if (ret < 0 && iov_iter_rw(iter) == WRITE)
 		fat_write_failed(mapping, offset + count);
 
diff --git a/fs/file.c b/fs/file.c
index 1fbc5c0..6b1acdf 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -784,6 +784,11 @@
 	return v;
 }
 
+void __f_unlock_pos(struct file *f)
+{
+	mutex_unlock(&f->f_pos_lock);
+}
+
 /*
  * We only lock f_pos if we have threads or if the file might be
  * shared with another process. In both cases we'll have an elevated
diff --git a/fs/freevxfs/vxfs_immed.c b/fs/freevxfs/vxfs_immed.c
index cb84f0f..bfc780c 100644
--- a/fs/freevxfs/vxfs_immed.c
+++ b/fs/freevxfs/vxfs_immed.c
@@ -66,11 +66,11 @@
 vxfs_immed_readpage(struct file *fp, struct page *pp)
 {
 	struct vxfs_inode_info	*vip = VXFS_INO(pp->mapping->host);
-	u_int64_t	offset = (u_int64_t)pp->index << PAGE_CACHE_SHIFT;
+	u_int64_t	offset = (u_int64_t)pp->index << PAGE_SHIFT;
 	caddr_t		kaddr;
 
 	kaddr = kmap(pp);
-	memcpy(kaddr, vip->vii_immed.vi_immed + offset, PAGE_CACHE_SIZE);
+	memcpy(kaddr, vip->vii_immed.vi_immed + offset, PAGE_SIZE);
 	kunmap(pp);
 	
 	flush_dcache_page(pp);
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c
index 1cff72d..6d576b9 100644
--- a/fs/freevxfs/vxfs_lookup.c
+++ b/fs/freevxfs/vxfs_lookup.c
@@ -45,7 +45,7 @@
 /*
  * Number of VxFS blocks per page.
  */
-#define VXFS_BLOCK_PER_PAGE(sbp)  ((PAGE_CACHE_SIZE / (sbp)->s_blocksize))
+#define VXFS_BLOCK_PER_PAGE(sbp)  ((PAGE_SIZE / (sbp)->s_blocksize))
 
 
 static struct dentry *	vxfs_lookup(struct inode *, struct dentry *, unsigned int);
@@ -58,7 +58,7 @@
 const struct file_operations vxfs_dir_operations = {
 	.llseek =		generic_file_llseek,
 	.read =			generic_read_dir,
-	.iterate =		vxfs_readdir,
+	.iterate_shared =	vxfs_readdir,
 };
 
 static inline u_long
@@ -175,7 +175,7 @@
 	if (de) {
 		ino = de->d_ino;
 		kunmap(pp);
-		page_cache_release(pp);
+		put_page(pp);
 	}
 	
 	return (ino);
@@ -255,8 +255,8 @@
 	nblocks = dir_blocks(ip);
 	pblocks = VXFS_BLOCK_PER_PAGE(sbp);
 
-	page = pos >> PAGE_CACHE_SHIFT;
-	offset = pos & ~PAGE_CACHE_MASK;
+	page = pos >> PAGE_SHIFT;
+	offset = pos & ~PAGE_MASK;
 	block = (u_long)(pos >> sbp->s_blocksize_bits) % pblocks;
 
 	for (; page < npages; page++, block = 0) {
@@ -289,7 +289,7 @@
 					continue;
 
 				offset = (char *)de - kaddr;
-				ctx->pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2;
+				ctx->pos = ((page << PAGE_SHIFT) | offset) + 2;
 				if (!dir_emit(ctx, de->d_name, de->d_namelen,
 					de->d_ino, DT_UNKNOWN)) {
 					vxfs_put_page(pp);
@@ -301,6 +301,6 @@
 		vxfs_put_page(pp);
 		offset = 0;
 	}
-	ctx->pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2;
+	ctx->pos = ((page << PAGE_SHIFT) | offset) + 2;
 	return 0;
 }
diff --git a/fs/freevxfs/vxfs_subr.c b/fs/freevxfs/vxfs_subr.c
index 5d318c4..e806694 100644
--- a/fs/freevxfs/vxfs_subr.c
+++ b/fs/freevxfs/vxfs_subr.c
@@ -50,7 +50,7 @@
 vxfs_put_page(struct page *pp)
 {
 	kunmap(pp);
-	page_cache_release(pp);
+	put_page(pp);
 }
 
 /**
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index fee81e8..592cea5 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -33,7 +33,7 @@
 /*
  * 4MB minimal write chunk size
  */
-#define MIN_WRITEBACK_PAGES	(4096UL >> (PAGE_CACHE_SHIFT - 10))
+#define MIN_WRITEBACK_PAGES	(4096UL >> (PAGE_SHIFT - 10))
 
 struct wb_completion {
 	atomic_t		cnt;
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 6b35fc4..3078b67 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -113,7 +113,7 @@
 
 	wake_up_bit(&cookie->flags, 0);
 	if (xpage)
-		page_cache_release(xpage);
+		put_page(xpage);
 	__fscache_uncache_page(cookie, page);
 	return true;
 
@@ -164,7 +164,7 @@
 	}
 	spin_unlock(&object->lock);
 	if (xpage)
-		page_cache_release(xpage);
+		put_page(xpage);
 }
 
 /*
@@ -884,7 +884,7 @@
 		spin_unlock(&cookie->stores_lock);
 
 		for (i = n - 1; i >= 0; i--)
-			page_cache_release(results[i]);
+			put_page(results[i]);
 	}
 
 	_leave("");
@@ -982,7 +982,7 @@
 
 	radix_tree_tag_set(&cookie->stores, page->index,
 			   FSCACHE_COOKIE_PENDING_TAG);
-	page_cache_get(page);
+	get_page(page);
 
 	/* we only want one writer at a time, but we do need to queue new
 	 * writers after exclusive ops */
@@ -1026,7 +1026,7 @@
 	radix_tree_delete(&cookie->stores, page->index);
 	spin_unlock(&cookie->stores_lock);
 	wake_cookie = __fscache_unuse_cookie(cookie);
-	page_cache_release(page);
+	put_page(page);
 	ret = -ENOBUFS;
 	goto nobufs;
 
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index ebb5e37..cbece12 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -897,7 +897,7 @@
 		return err;
 	}
 
-	page_cache_get(newpage);
+	get_page(newpage);
 
 	if (!(buf->flags & PIPE_BUF_FLAG_LRU))
 		lru_cache_add_file(newpage);
@@ -912,12 +912,12 @@
 
 	if (err) {
 		unlock_page(newpage);
-		page_cache_release(newpage);
+		put_page(newpage);
 		return err;
 	}
 
 	unlock_page(oldpage);
-	page_cache_release(oldpage);
+	put_page(oldpage);
 	cs->len = 0;
 
 	return 0;
@@ -951,7 +951,7 @@
 	fuse_copy_finish(cs);
 
 	buf = cs->pipebufs;
-	page_cache_get(page);
+	get_page(page);
 	buf->page = page;
 	buf->offset = offset;
 	buf->len = count;
@@ -1435,7 +1435,7 @@
 
 out:
 	for (; page_nr < cs.nr_segs; page_nr++)
-		page_cache_release(bufs[page_nr].page);
+		put_page(bufs[page_nr].page);
 
 	kfree(bufs);
 	return ret;
@@ -1632,8 +1632,8 @@
 		goto out_up_killsb;
 
 	mapping = inode->i_mapping;
-	index = outarg.offset >> PAGE_CACHE_SHIFT;
-	offset = outarg.offset & ~PAGE_CACHE_MASK;
+	index = outarg.offset >> PAGE_SHIFT;
+	offset = outarg.offset & ~PAGE_MASK;
 	file_size = i_size_read(inode);
 	end = outarg.offset + outarg.size;
 	if (end > file_size) {
@@ -1652,13 +1652,13 @@
 		if (!page)
 			goto out_iput;
 
-		this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
+		this_num = min_t(unsigned, num, PAGE_SIZE - offset);
 		err = fuse_copy_page(cs, &page, offset, this_num, 0);
 		if (!err && offset == 0 &&
-		    (this_num == PAGE_CACHE_SIZE || file_size == end))
+		    (this_num == PAGE_SIZE || file_size == end))
 			SetPageUptodate(page);
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 
 		if (err)
 			goto out_iput;
@@ -1697,7 +1697,7 @@
 	size_t total_len = 0;
 	int num_pages;
 
-	offset = outarg->offset & ~PAGE_CACHE_MASK;
+	offset = outarg->offset & ~PAGE_MASK;
 	file_size = i_size_read(inode);
 
 	num = outarg->size;
@@ -1720,7 +1720,7 @@
 	req->page_descs[0].offset = offset;
 	req->end = fuse_retrieve_end;
 
-	index = outarg->offset >> PAGE_CACHE_SHIFT;
+	index = outarg->offset >> PAGE_SHIFT;
 
 	while (num && req->num_pages < num_pages) {
 		struct page *page;
@@ -1730,7 +1730,7 @@
 		if (!page)
 			break;
 
-		this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
+		this_num = min_t(unsigned, num, PAGE_SIZE - offset);
 		req->pages[req->num_pages] = page;
 		req->page_descs[req->num_pages].length = this_num;
 		req->num_pages++;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 4b855b6..b941905 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1162,7 +1162,6 @@
 				struct fuse_direntplus *direntplus,
 				u64 attr_version)
 {
-	int err;
 	struct fuse_entry_out *o = &direntplus->entry_out;
 	struct fuse_dirent *dirent = &direntplus->dirent;
 	struct dentry *parent = file->f_path.dentry;
@@ -1172,6 +1171,7 @@
 	struct inode *dir = d_inode(parent);
 	struct fuse_conn *fc;
 	struct inode *inode;
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
 
 	if (!o->nodeid) {
 		/*
@@ -1204,65 +1204,61 @@
 
 	name.hash = full_name_hash(name.name, name.len);
 	dentry = d_lookup(parent, &name);
-	if (dentry) {
+	if (!dentry) {
+retry:
+		dentry = d_alloc_parallel(parent, &name, &wq);
+		if (IS_ERR(dentry))
+			return PTR_ERR(dentry);
+	}
+	if (!d_in_lookup(dentry)) {
+		struct fuse_inode *fi;
 		inode = d_inode(dentry);
-		if (!inode) {
-			d_drop(dentry);
-		} else if (get_node_id(inode) != o->nodeid ||
-			   ((o->attr.mode ^ inode->i_mode) & S_IFMT)) {
+		if (!inode ||
+		    get_node_id(inode) != o->nodeid ||
+		    ((o->attr.mode ^ inode->i_mode) & S_IFMT)) {
 			d_invalidate(dentry);
-		} else if (is_bad_inode(inode)) {
-			err = -EIO;
-			goto out;
-		} else {
-			struct fuse_inode *fi;
-			fi = get_fuse_inode(inode);
-			spin_lock(&fc->lock);
-			fi->nlookup++;
-			spin_unlock(&fc->lock);
-
-			fuse_change_attributes(inode, &o->attr,
-					       entry_attr_timeout(o),
-					       attr_version);
-
-			/*
-			 * The other branch to 'found' comes via fuse_iget()
-			 * which bumps nlookup inside
-			 */
-			goto found;
+			dput(dentry);
+			goto retry;
 		}
-		dput(dentry);
+		if (is_bad_inode(inode)) {
+			dput(dentry);
+			return -EIO;
+		}
+
+		fi = get_fuse_inode(inode);
+		spin_lock(&fc->lock);
+		fi->nlookup++;
+		spin_unlock(&fc->lock);
+
+		fuse_change_attributes(inode, &o->attr,
+				       entry_attr_timeout(o),
+				       attr_version);
+		/*
+		 * The other branch comes via fuse_iget()
+		 * which bumps nlookup inside
+		 */
+	} else {
+		inode = fuse_iget(dir->i_sb, o->nodeid, o->generation,
+				  &o->attr, entry_attr_timeout(o),
+				  attr_version);
+		if (!inode)
+			inode = ERR_PTR(-ENOMEM);
+
+		alias = d_splice_alias(inode, dentry);
+		d_lookup_done(dentry);
+		if (alias) {
+			dput(dentry);
+			dentry = alias;
+		}
+		if (IS_ERR(dentry))
+			return PTR_ERR(dentry);
 	}
-
-	dentry = d_alloc(parent, &name);
-	err = -ENOMEM;
-	if (!dentry)
-		goto out;
-
-	inode = fuse_iget(dir->i_sb, o->nodeid, o->generation,
-			  &o->attr, entry_attr_timeout(o), attr_version);
-	if (!inode)
-		goto out;
-
-	alias = d_splice_alias(inode, dentry);
-	err = PTR_ERR(alias);
-	if (IS_ERR(alias))
-		goto out;
-
-	if (alias) {
-		dput(dentry);
-		dentry = alias;
-	}
-
-found:
 	if (fc->readdirplus_auto)
 		set_bit(FUSE_I_INIT_RDPLUS, &get_fuse_inode(inode)->state);
 	fuse_change_entry_timeout(dentry, o);
 
-	err = 0;
-out:
 	dput(dentry);
-	return err;
+	return 0;
 }
 
 static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
@@ -1759,10 +1755,9 @@
 	return err;
 }
 
-static ssize_t fuse_getxattr(struct dentry *entry, const char *name,
-			     void *value, size_t size)
+static ssize_t fuse_getxattr(struct dentry *entry, struct inode *inode,
+			     const char *name, void *value, size_t size)
 {
-	struct inode *inode = d_inode(entry);
 	struct fuse_conn *fc = get_fuse_conn(inode);
 	FUSE_ARGS(args);
 	struct fuse_getxattr_in inarg;
@@ -1893,7 +1888,7 @@
 static const struct file_operations fuse_dir_operations = {
 	.llseek		= generic_file_llseek,
 	.read		= generic_read_dir,
-	.iterate	= fuse_readdir,
+	.iterate_shared	= fuse_readdir,
 	.open		= fuse_dir_open,
 	.release	= fuse_dir_release,
 	.fsync		= fuse_dir_fsync,
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 9dde38f..9154f86 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -348,7 +348,7 @@
 		pgoff_t curr_index;
 
 		BUG_ON(req->inode != inode);
-		curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
+		curr_index = req->misc.write.in.offset >> PAGE_SHIFT;
 		if (idx_from < curr_index + req->num_pages &&
 		    curr_index <= idx_to) {
 			found = true;
@@ -683,11 +683,11 @@
 		 * present there.
 		 */
 		int i;
-		int start_idx = num_read >> PAGE_CACHE_SHIFT;
-		size_t off = num_read & (PAGE_CACHE_SIZE - 1);
+		int start_idx = num_read >> PAGE_SHIFT;
+		size_t off = num_read & (PAGE_SIZE - 1);
 
 		for (i = start_idx; i < req->num_pages; i++) {
-			zero_user_segment(req->pages[i], off, PAGE_CACHE_SIZE);
+			zero_user_segment(req->pages[i], off, PAGE_SIZE);
 			off = 0;
 		}
 	} else {
@@ -704,7 +704,7 @@
 	struct fuse_req *req;
 	size_t num_read;
 	loff_t pos = page_offset(page);
-	size_t count = PAGE_CACHE_SIZE;
+	size_t count = PAGE_SIZE;
 	u64 attr_ver;
 	int err;
 
@@ -789,7 +789,7 @@
 		else
 			SetPageError(page);
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 	if (req->ff)
 		fuse_file_put(req->ff, false);
@@ -800,7 +800,7 @@
 	struct fuse_file *ff = file->private_data;
 	struct fuse_conn *fc = ff->fc;
 	loff_t pos = page_offset(req->pages[0]);
-	size_t count = req->num_pages << PAGE_CACHE_SHIFT;
+	size_t count = req->num_pages << PAGE_SHIFT;
 
 	req->out.argpages = 1;
 	req->out.page_zeroing = 1;
@@ -836,7 +836,7 @@
 
 	if (req->num_pages &&
 	    (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
-	     (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read ||
+	     (req->num_pages + 1) * PAGE_SIZE > fc->max_read ||
 	     req->pages[req->num_pages - 1]->index + 1 != page->index)) {
 		int nr_alloc = min_t(unsigned, data->nr_pages,
 				     FUSE_MAX_PAGES_PER_REQ);
@@ -858,7 +858,7 @@
 		return -EIO;
 	}
 
-	page_cache_get(page);
+	get_page(page);
 	req->pages[req->num_pages] = page;
 	req->page_descs[req->num_pages].length = PAGE_SIZE;
 	req->num_pages++;
@@ -1003,17 +1003,17 @@
 	for (i = 0; i < req->num_pages; i++) {
 		struct page *page = req->pages[i];
 
-		if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE)
+		if (!req->out.h.error && !offset && count >= PAGE_SIZE)
 			SetPageUptodate(page);
 
-		if (count > PAGE_CACHE_SIZE - offset)
-			count -= PAGE_CACHE_SIZE - offset;
+		if (count > PAGE_SIZE - offset)
+			count -= PAGE_SIZE - offset;
 		else
 			count = 0;
 		offset = 0;
 
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 
 	return res;
@@ -1024,7 +1024,7 @@
 			       struct iov_iter *ii, loff_t pos)
 {
 	struct fuse_conn *fc = get_fuse_conn(mapping->host);
-	unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
+	unsigned offset = pos & (PAGE_SIZE - 1);
 	size_t count = 0;
 	int err;
 
@@ -1034,8 +1034,8 @@
 	do {
 		size_t tmp;
 		struct page *page;
-		pgoff_t index = pos >> PAGE_CACHE_SHIFT;
-		size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset,
+		pgoff_t index = pos >> PAGE_SHIFT;
+		size_t bytes = min_t(size_t, PAGE_SIZE - offset,
 				     iov_iter_count(ii));
 
 		bytes = min_t(size_t, bytes, fc->max_write - count);
@@ -1059,7 +1059,7 @@
 		iov_iter_advance(ii, tmp);
 		if (!tmp) {
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 			bytes = min(bytes, iov_iter_single_seg_count(ii));
 			goto again;
 		}
@@ -1072,7 +1072,7 @@
 		count += tmp;
 		pos += tmp;
 		offset += tmp;
-		if (offset == PAGE_CACHE_SIZE)
+		if (offset == PAGE_SIZE)
 			offset = 0;
 
 		if (!fc->big_writes)
@@ -1086,8 +1086,8 @@
 static inline unsigned fuse_wr_pages(loff_t pos, size_t len)
 {
 	return min_t(unsigned,
-		     ((pos + len - 1) >> PAGE_CACHE_SHIFT) -
-		     (pos >> PAGE_CACHE_SHIFT) + 1,
+		     ((pos + len - 1) >> PAGE_SHIFT) -
+		     (pos >> PAGE_SHIFT) + 1,
 		     FUSE_MAX_PAGES_PER_REQ);
 }
 
@@ -1186,7 +1186,7 @@
 
 	if (iocb->ki_flags & IOCB_DIRECT) {
 		loff_t pos = iocb->ki_pos;
-		written = generic_file_direct_write(iocb, from, pos);
+		written = generic_file_direct_write(iocb, from);
 		if (written < 0 || !iov_iter_count(from))
 			goto out;
 
@@ -1205,8 +1205,8 @@
 			goto out;
 
 		invalidate_mapping_pages(file->f_mapping,
-					 pos >> PAGE_CACHE_SHIFT,
-					 endbyte >> PAGE_CACHE_SHIFT);
+					 pos >> PAGE_SHIFT,
+					 endbyte >> PAGE_SHIFT);
 
 		written += written_buffered;
 		iocb->ki_pos = pos + written_buffered;
@@ -1295,7 +1295,7 @@
 
 	*nbytesp = nbytes;
 
-	return ret;
+	return ret < 0 ? ret : 0;
 }
 
 static inline int fuse_iter_npages(const struct iov_iter *ii_p)
@@ -1315,8 +1315,8 @@
 	size_t nmax = write ? fc->max_write : fc->max_read;
 	loff_t pos = *ppos;
 	size_t count = iov_iter_count(iter);
-	pgoff_t idx_from = pos >> PAGE_CACHE_SHIFT;
-	pgoff_t idx_to = (pos + count - 1) >> PAGE_CACHE_SHIFT;
+	pgoff_t idx_from = pos >> PAGE_SHIFT;
+	pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT;
 	ssize_t res = 0;
 	struct fuse_req *req;
 	int err = 0;
@@ -1466,7 +1466,7 @@
 {
 	struct fuse_inode *fi = get_fuse_inode(req->inode);
 	struct fuse_write_in *inarg = &req->misc.write.in;
-	__u64 data_size = req->num_pages * PAGE_CACHE_SIZE;
+	__u64 data_size = req->num_pages * PAGE_SIZE;
 
 	if (!fc->connected)
 		goto out_free;
@@ -1727,7 +1727,7 @@
 	list_del(&new_req->writepages_entry);
 	list_for_each_entry(old_req, &fi->writepages, writepages_entry) {
 		BUG_ON(old_req->inode != new_req->inode);
-		curr_index = old_req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
+		curr_index = old_req->misc.write.in.offset >> PAGE_SHIFT;
 		if (curr_index <= page->index &&
 		    page->index < curr_index + old_req->num_pages) {
 			found = true;
@@ -1742,7 +1742,7 @@
 	new_req->num_pages = 1;
 	for (tmp = old_req; tmp != NULL; tmp = tmp->misc.write.next) {
 		BUG_ON(tmp->inode != new_req->inode);
-		curr_index = tmp->misc.write.in.offset >> PAGE_CACHE_SHIFT;
+		curr_index = tmp->misc.write.in.offset >> PAGE_SHIFT;
 		if (tmp->num_pages == 1 &&
 		    curr_index == page->index) {
 			old_req = tmp;
@@ -1799,7 +1799,7 @@
 
 	if (req && req->num_pages &&
 	    (is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
-	     (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_write ||
+	     (req->num_pages + 1) * PAGE_SIZE > fc->max_write ||
 	     data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) {
 		fuse_writepages_send(data);
 		data->req = NULL;
@@ -1924,7 +1924,7 @@
 		loff_t pos, unsigned len, unsigned flags,
 		struct page **pagep, void **fsdata)
 {
-	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+	pgoff_t index = pos >> PAGE_SHIFT;
 	struct fuse_conn *fc = get_fuse_conn(file_inode(file));
 	struct page *page;
 	loff_t fsize;
@@ -1938,15 +1938,15 @@
 
 	fuse_wait_on_page_writeback(mapping->host, page->index);
 
-	if (PageUptodate(page) || len == PAGE_CACHE_SIZE)
+	if (PageUptodate(page) || len == PAGE_SIZE)
 		goto success;
 	/*
 	 * Check if the start this page comes after the end of file, in which
 	 * case the readpage can be optimized away.
 	 */
 	fsize = i_size_read(mapping->host);
-	if (fsize <= (pos & PAGE_CACHE_MASK)) {
-		size_t off = pos & ~PAGE_CACHE_MASK;
+	if (fsize <= (pos & PAGE_MASK)) {
+		size_t off = pos & ~PAGE_MASK;
 		if (off)
 			zero_user_segment(page, 0, off);
 		goto success;
@@ -1960,7 +1960,7 @@
 
 cleanup:
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 error:
 	return err;
 }
@@ -1973,16 +1973,16 @@
 
 	if (!PageUptodate(page)) {
 		/* Zero any unwritten bytes at the end of the page */
-		size_t endoff = (pos + copied) & ~PAGE_CACHE_MASK;
+		size_t endoff = (pos + copied) & ~PAGE_MASK;
 		if (endoff)
-			zero_user_segment(page, endoff, PAGE_CACHE_SIZE);
+			zero_user_segment(page, endoff, PAGE_SIZE);
 		SetPageUptodate(page);
 	}
 
 	fuse_write_update_size(inode, pos + copied);
 	set_page_dirty(page);
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 
 	return copied;
 }
@@ -2837,7 +2837,7 @@
 }
 
 static ssize_t
-fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
+fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	DECLARE_COMPLETION_ONSTACK(wait);
 	ssize_t ret = 0;
@@ -2848,6 +2848,7 @@
 	struct inode *inode;
 	loff_t i_size;
 	size_t count = iov_iter_count(iter);
+	loff_t offset = iocb->ki_pos;
 	struct fuse_io_priv *io;
 	bool is_sync = is_sync_kiocb(iocb);
 
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 4d69d5c..1ce6766 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -339,11 +339,11 @@
 
 	fuse_invalidate_attr(inode);
 	if (offset >= 0) {
-		pg_start = offset >> PAGE_CACHE_SHIFT;
+		pg_start = offset >> PAGE_SHIFT;
 		if (len <= 0)
 			pg_end = -1;
 		else
-			pg_end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
+			pg_end = (offset + len - 1) >> PAGE_SHIFT;
 		invalidate_inode_pages2_range(inode->i_mapping,
 					      pg_start, pg_end);
 	}
@@ -864,7 +864,7 @@
 		process_init_limits(fc, arg);
 
 		if (arg->minor >= 6) {
-			ra_pages = arg->max_readahead / PAGE_CACHE_SIZE;
+			ra_pages = arg->max_readahead / PAGE_SIZE;
 			if (arg->flags & FUSE_ASYNC_READ)
 				fc->async_read = 1;
 			if (!(arg->flags & FUSE_POSIX_LOCKS))
@@ -901,7 +901,7 @@
 			if (arg->time_gran && arg->time_gran <= 1000000000)
 				fc->sb->s_time_gran = arg->time_gran;
 		} else {
-			ra_pages = fc->max_read / PAGE_CACHE_SIZE;
+			ra_pages = fc->max_read / PAGE_SIZE;
 			fc->no_lock = 1;
 			fc->no_flock = 1;
 		}
@@ -922,7 +922,7 @@
 
 	arg->major = FUSE_KERNEL_VERSION;
 	arg->minor = FUSE_KERNEL_MINOR_VERSION;
-	arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE;
+	arg->max_readahead = fc->bdi.ra_pages * PAGE_SIZE;
 	arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
 		FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
 		FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
@@ -955,7 +955,7 @@
 	int err;
 
 	fc->bdi.name = "fuse";
-	fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
+	fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
 	/* fuse does it's own writeback accounting */
 	fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB | BDI_CAP_STRICTLIMIT;
 
@@ -1053,8 +1053,8 @@
 			goto err;
 #endif
 	} else {
-		sb->s_blocksize = PAGE_CACHE_SIZE;
-		sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+		sb->s_blocksize = PAGE_SIZE;
+		sb->s_blocksize_bits = PAGE_SHIFT;
 	}
 	sb->s_magic = FUSE_SUPER_MAGIC;
 	sb->s_op = &fuse_super_operations;
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 7919326..363ba9e 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -24,6 +24,7 @@
 #include "glock.h"
 #include "inode.h"
 #include "meta_io.h"
+#include "rgrp.h"
 #include "trans.h"
 #include "util.h"
 
@@ -38,7 +39,7 @@
 	return NULL;
 }
 
-struct posix_acl *gfs2_get_acl(struct inode *inode, int type)
+static struct posix_acl *__gfs2_get_acl(struct inode *inode, int type)
 {
 	struct gfs2_inode *ip = GFS2_I(inode);
 	struct posix_acl *acl;
@@ -50,29 +51,41 @@
 		return NULL;
 
 	name = gfs2_acl_name(type);
-	if (name == NULL)
-		return ERR_PTR(-EINVAL);
-
 	len = gfs2_xattr_acl_get(ip, name, &data);
-	if (len < 0)
+	if (len <= 0)
 		return ERR_PTR(len);
-	if (len == 0)
-		return NULL;
-
 	acl = posix_acl_from_xattr(&init_user_ns, data, len);
 	kfree(data);
 	return acl;
 }
 
-int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+struct posix_acl *gfs2_get_acl(struct inode *inode, int type)
+{
+	struct gfs2_inode *ip = GFS2_I(inode);
+	struct gfs2_holder gh;
+	bool need_unlock = false;
+	struct posix_acl *acl;
+
+	if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
+		int ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
+					     LM_FLAG_ANY, &gh);
+		if (ret)
+			return ERR_PTR(ret);
+		need_unlock = true;
+	}
+	acl = __gfs2_get_acl(inode, type);
+	if (need_unlock)
+		gfs2_glock_dq_uninit(&gh);
+	return acl;
+}
+
+int __gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
 {
 	int error;
 	int len;
 	char *data;
 	const char *name = gfs2_acl_name(type);
 
-	BUG_ON(name == NULL);
-
 	if (acl && acl->a_count > GFS2_ACL_MAX_ENTRIES(GFS2_SB(inode)))
 		return -E2BIG;
 
@@ -115,3 +128,26 @@
 	kfree(data);
 	return error;
 }
+
+int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+{
+	struct gfs2_inode *ip = GFS2_I(inode);
+	struct gfs2_holder gh;
+	bool need_unlock = false;
+	int ret;
+
+	ret = gfs2_rsqa_alloc(ip);
+	if (ret)
+		return ret;
+
+	if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
+		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
+		if (ret)
+			return ret;
+		need_unlock = true;
+	}
+	ret = __gfs2_set_acl(inode, acl, type);
+	if (need_unlock)
+		gfs2_glock_dq_uninit(&gh);
+	return ret;
+}
diff --git a/fs/gfs2/acl.h b/fs/gfs2/acl.h
index 3af4f40..f674fdd 100644
--- a/fs/gfs2/acl.h
+++ b/fs/gfs2/acl.h
@@ -15,6 +15,7 @@
 #define GFS2_ACL_MAX_ENTRIES(sdp) ((300 << (sdp)->sd_sb.sb_bsize_shift) >> 12)
 
 extern struct posix_acl *gfs2_get_acl(struct inode *inode, int type);
+extern int __gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type);
 extern int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type);
 
 #endif /* __ACL_DOT_H__ */
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index aa016e4..8524c0e 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -101,7 +101,7 @@
 	struct gfs2_inode *ip = GFS2_I(inode);
 	struct gfs2_sbd *sdp = GFS2_SB(inode);
 	loff_t i_size = i_size_read(inode);
-	pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+	pgoff_t end_index = i_size >> PAGE_SHIFT;
 	unsigned offset;
 
 	if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
@@ -109,9 +109,9 @@
 	if (current->journal_info)
 		goto redirty;
 	/* Is the page fully outside i_size? (truncate in progress) */
-	offset = i_size & (PAGE_CACHE_SIZE-1);
+	offset = i_size & (PAGE_SIZE-1);
 	if (page->index > end_index || (page->index == end_index && !offset)) {
-		page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
+		page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
 		goto out;
 	}
 	return 1;
@@ -238,7 +238,7 @@
 {
 	struct inode *inode = mapping->host;
 	struct gfs2_sbd *sdp = GFS2_SB(inode);
-	unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize);
+	unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize);
 	int i;
 	int ret;
 
@@ -366,8 +366,8 @@
 			cycled = 0;
 		end = -1;
 	} else {
-		index = wbc->range_start >> PAGE_CACHE_SHIFT;
-		end = wbc->range_end >> PAGE_CACHE_SHIFT;
+		index = wbc->range_start >> PAGE_SHIFT;
+		end = wbc->range_end >> PAGE_SHIFT;
 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
 			range_whole = 1;
 		cycled = 1; /* ignore range_cyclic tests */
@@ -458,7 +458,7 @@
 	 * so we need to supply one here. It doesn't happen often.
 	 */
 	if (unlikely(page->index)) {
-		zero_user(page, 0, PAGE_CACHE_SIZE);
+		zero_user(page, 0, PAGE_SIZE);
 		SetPageUptodate(page);
 		return 0;
 	}
@@ -471,7 +471,7 @@
 	if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
 		dsize = (dibh->b_size - sizeof(struct gfs2_dinode));
 	memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
-	memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
+	memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
 	kunmap_atomic(kaddr);
 	flush_dcache_page(page);
 	brelse(dibh);
@@ -560,8 +560,8 @@
                        unsigned size)
 {
 	struct address_space *mapping = ip->i_inode.i_mapping;
-	unsigned long index = *pos / PAGE_CACHE_SIZE;
-	unsigned offset = *pos & (PAGE_CACHE_SIZE - 1);
+	unsigned long index = *pos / PAGE_SIZE;
+	unsigned offset = *pos & (PAGE_SIZE - 1);
 	unsigned copied = 0;
 	unsigned amt;
 	struct page *page;
@@ -569,15 +569,15 @@
 
 	do {
 		amt = size - copied;
-		if (offset + size > PAGE_CACHE_SIZE)
-			amt = PAGE_CACHE_SIZE - offset;
+		if (offset + size > PAGE_SIZE)
+			amt = PAGE_SIZE - offset;
 		page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
 		if (IS_ERR(page))
 			return PTR_ERR(page);
 		p = kmap_atomic(page);
 		memcpy(buf + copied, p + offset, amt);
 		kunmap_atomic(p);
-		page_cache_release(page);
+		put_page(page);
 		copied += amt;
 		index++;
 		offset = 0;
@@ -651,8 +651,8 @@
 	unsigned requested = 0;
 	int alloc_required;
 	int error = 0;
-	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
-	unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+	pgoff_t index = pos >> PAGE_SHIFT;
+	unsigned from = pos & (PAGE_SIZE - 1);
 	struct page *page;
 
 	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
@@ -697,7 +697,7 @@
 		rblocks += gfs2_rg_blocks(ip, requested);
 
 	error = gfs2_trans_begin(sdp, rblocks,
-				 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
+				 PAGE_SIZE/sdp->sd_sb.sb_bsize);
 	if (error)
 		goto out_trans_fail;
 
@@ -727,7 +727,7 @@
 		return 0;
 
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 
 	gfs2_trans_end(sdp);
 	if (pos + len > ip->i_inode.i_size)
@@ -827,7 +827,7 @@
 	if (!PageUptodate(page))
 		SetPageUptodate(page);
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 
 	if (copied) {
 		if (inode->i_size < to)
@@ -877,7 +877,7 @@
 	struct gfs2_sbd *sdp = GFS2_SB(inode);
 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
 	struct buffer_head *dibh;
-	unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
+	unsigned int from = pos & (PAGE_SIZE - 1);
 	unsigned int to = from + len;
 	int ret;
 	struct gfs2_trans *tr = current->journal_info;
@@ -888,7 +888,7 @@
 	ret = gfs2_meta_inode_buffer(ip, &dibh);
 	if (unlikely(ret)) {
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		goto failed;
 	}
 
@@ -992,7 +992,7 @@
 {
 	struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
 	unsigned int stop = offset + length;
-	int partial_page = (offset || length < PAGE_CACHE_SIZE);
+	int partial_page = (offset || length < PAGE_SIZE);
 	struct buffer_head *bh, *head;
 	unsigned long pos = 0;
 
@@ -1042,13 +1042,13 @@
 
 
 
-static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
-			      loff_t offset)
+static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file->f_mapping->host;
 	struct address_space *mapping = inode->i_mapping;
 	struct gfs2_inode *ip = GFS2_I(inode);
+	loff_t offset = iocb->ki_pos;
 	struct gfs2_holder gh;
 	int rv;
 
@@ -1082,7 +1082,7 @@
 	 * the first place, mapping->nr_pages will always be zero.
 	 */
 	if (mapping->nrpages) {
-		loff_t lstart = offset & ~(PAGE_CACHE_SIZE - 1);
+		loff_t lstart = offset & ~(PAGE_SIZE - 1);
 		loff_t len = iov_iter_count(iter);
 		loff_t end = PAGE_ALIGN(offset + len) - 1;
 
@@ -1099,7 +1099,7 @@
 	}
 
 	rv = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
-				  offset, gfs2_get_block_direct, NULL, NULL, 0);
+				  gfs2_get_block_direct, NULL, NULL, 0);
 out:
 	gfs2_glock_dq(&gh);
 	gfs2_holder_uninit(&gh);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 0860f0b..24ce1cd 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -75,7 +75,7 @@
 			dsize = dibh->b_size - sizeof(struct gfs2_dinode);
 
 		memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
-		memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
+		memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
 		kunmap(page);
 
 		SetPageUptodate(page);
@@ -98,7 +98,7 @@
 
 	if (release) {
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 
 	return 0;
@@ -932,8 +932,8 @@
 {
 	struct inode *inode = mapping->host;
 	struct gfs2_inode *ip = GFS2_I(inode);
-	unsigned long index = from >> PAGE_CACHE_SHIFT;
-	unsigned offset = from & (PAGE_CACHE_SIZE-1);
+	unsigned long index = from >> PAGE_SHIFT;
+	unsigned offset = from & (PAGE_SIZE-1);
 	unsigned blocksize, iblock, length, pos;
 	struct buffer_head *bh;
 	struct page *page;
@@ -945,7 +945,7 @@
 
 	blocksize = inode->i_sb->s_blocksize;
 	length = blocksize - (offset & (blocksize - 1));
-	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
+	iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
 
 	if (!page_has_buffers(page))
 		create_empty_buffers(page, blocksize, 0);
@@ -989,7 +989,7 @@
 	mark_buffer_dirty(bh);
 unlock:
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 	return err;
 }
 
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index c9384f9..e53b723 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -354,8 +354,8 @@
 {
 	struct inode *inode = page->mapping->host;
 	struct buffer_head bh;
-	unsigned long size = PAGE_CACHE_SIZE;
-	u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+	unsigned long size = PAGE_SIZE;
+	u64 lblock = page->index << (PAGE_SHIFT - inode->i_blkbits);
 
 	do {
 		bh.b_state = 0;
@@ -386,7 +386,7 @@
 	struct gfs2_sbd *sdp = GFS2_SB(inode);
 	struct gfs2_alloc_parms ap = { .aflags = 0, };
 	unsigned long last_index;
-	u64 pos = page->index << PAGE_CACHE_SHIFT;
+	u64 pos = page->index << PAGE_SHIFT;
 	unsigned int data_blocks, ind_blocks, rblocks;
 	struct gfs2_holder gh;
 	loff_t size;
@@ -401,7 +401,7 @@
 	if (ret)
 		goto out;
 
-	gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE);
+	gfs2_size_hint(vma->vm_file, pos, PAGE_SIZE);
 
 	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
 	ret = gfs2_glock_nq(&gh);
@@ -411,7 +411,7 @@
 	set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
 	set_bit(GIF_SW_PAGED, &ip->i_flags);
 
-	if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) {
+	if (!gfs2_write_alloc_required(ip, pos, PAGE_SIZE)) {
 		lock_page(page);
 		if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
 			ret = -EAGAIN;
@@ -424,7 +424,7 @@
 	if (ret)
 		goto out_unlock;
 
-	gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
+	gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
 	ap.target = data_blocks + ind_blocks;
 	ret = gfs2_quota_lock_check(ip, &ap);
 	if (ret)
@@ -447,7 +447,7 @@
 	lock_page(page);
 	ret = -EINVAL;
 	size = i_size_read(inode);
-	last_index = (size - 1) >> PAGE_CACHE_SHIFT;
+	last_index = (size - 1) >> PAGE_SHIFT;
 	/* Check page index against inode size */
 	if (size == 0 || (page->index > last_index))
 		goto out_trans_end;
@@ -873,7 +873,7 @@
 			rblocks += data_blocks ? data_blocks : 1;
 
 		error = gfs2_trans_begin(sdp, rblocks,
-					 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
+					 PAGE_SIZE/sdp->sd_sb.sb_bsize);
 		if (error)
 			goto out_trans_fail;
 
@@ -895,7 +895,10 @@
 		mark_inode_dirty(inode);
 	}
 
-	return generic_write_sync(file, pos, count);
+	if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
+		return vfs_fsync_range(file, pos, pos + count - 1,
+			       (file->f_flags & __O_SYNC) ? 0 : 1);
+	return 0;
 
 out_trans_fail:
 	gfs2_inplace_release(ip);
@@ -1119,7 +1122,7 @@
 };
 
 const struct file_operations gfs2_dir_fops = {
-	.iterate	= gfs2_readdir,
+	.iterate_shared	= gfs2_readdir,
 	.unlocked_ioctl	= gfs2_ioctl,
 	.open		= gfs2_open,
 	.release	= gfs2_release,
@@ -1147,7 +1150,7 @@
 };
 
 const struct file_operations gfs2_dir_fops_nolock = {
-	.iterate	= gfs2_readdir,
+	.iterate_shared	= gfs2_readdir,
 	.unlocked_ioctl	= gfs2_ioctl,
 	.open		= gfs2_open,
 	.release	= gfs2_release,
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 6539131..4b73bd1 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1913,7 +1913,7 @@
 		if (seq->buf)
 			seq->size = GFS2_SEQ_GOODSIZE;
 		gi->gl = NULL;
-		ret = rhashtable_walk_init(&gl_hash_table, &gi->hti);
+		ret = rhashtable_walk_init(&gl_hash_table, &gi->hti, GFP_KERNEL);
 	}
 	return ret;
 }
@@ -1941,7 +1941,7 @@
 		if (seq->buf)
 			seq->size = GFS2_SEQ_GOODSIZE;
 		gi->gl = NULL;
-		ret = rhashtable_walk_init(&gl_hash_table, &gi->hti);
+		ret = rhashtable_walk_init(&gl_hash_table, &gi->hti, GFP_KERNEL);
 	}
 	return ret;
 }
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index bb30f9a..72e9c64 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -692,12 +692,12 @@
 			       considered free. Any failures need to undo
 			       the gfs2 structures. */
 	if (default_acl) {
-		error = gfs2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+		error = __gfs2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
 		posix_acl_release(default_acl);
 	}
 	if (acl) {
 		if (!error)
-			error = gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
+			error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
 		posix_acl_release(acl);
 	}
 
@@ -1948,67 +1948,6 @@
 	return 0;
 }
 
-static int gfs2_setxattr(struct dentry *dentry, const char *name,
-			 const void *data, size_t size, int flags)
-{
-	struct inode *inode = d_inode(dentry);
-	struct gfs2_inode *ip = GFS2_I(inode);
-	struct gfs2_holder gh;
-	int ret;
-
-	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
-	ret = gfs2_glock_nq(&gh);
-	if (ret == 0) {
-		ret = gfs2_rsqa_alloc(ip);
-		if (ret == 0)
-			ret = generic_setxattr(dentry, name, data, size, flags);
-		gfs2_glock_dq(&gh);
-	}
-	gfs2_holder_uninit(&gh);
-	return ret;
-}
-
-static ssize_t gfs2_getxattr(struct dentry *dentry, const char *name,
-			     void *data, size_t size)
-{
-	struct inode *inode = d_inode(dentry);
-	struct gfs2_inode *ip = GFS2_I(inode);
-	struct gfs2_holder gh;
-	int ret;
-
-	/* For selinux during lookup */
-	if (gfs2_glock_is_locked_by_me(ip->i_gl))
-		return generic_getxattr(dentry, name, data, size);
-
-	gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
-	ret = gfs2_glock_nq(&gh);
-	if (ret == 0) {
-		ret = generic_getxattr(dentry, name, data, size);
-		gfs2_glock_dq(&gh);
-	}
-	gfs2_holder_uninit(&gh);
-	return ret;
-}
-
-static int gfs2_removexattr(struct dentry *dentry, const char *name)
-{
-	struct inode *inode = d_inode(dentry);
-	struct gfs2_inode *ip = GFS2_I(inode);
-	struct gfs2_holder gh;
-	int ret;
-
-	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
-	ret = gfs2_glock_nq(&gh);
-	if (ret == 0) {
-		ret = gfs2_rsqa_alloc(ip);
-		if (ret == 0)
-			ret = generic_removexattr(dentry, name);
-		gfs2_glock_dq(&gh);
-	}
-	gfs2_holder_uninit(&gh);
-	return ret;
-}
-
 static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 		       u64 start, u64 len)
 {
@@ -2055,10 +1994,10 @@
 	.permission = gfs2_permission,
 	.setattr = gfs2_setattr,
 	.getattr = gfs2_getattr,
-	.setxattr = gfs2_setxattr,
-	.getxattr = gfs2_getxattr,
+	.setxattr = generic_setxattr,
+	.getxattr = generic_getxattr,
 	.listxattr = gfs2_listxattr,
-	.removexattr = gfs2_removexattr,
+	.removexattr = generic_removexattr,
 	.fiemap = gfs2_fiemap,
 	.get_acl = gfs2_get_acl,
 	.set_acl = gfs2_set_acl,
@@ -2077,10 +2016,10 @@
 	.permission = gfs2_permission,
 	.setattr = gfs2_setattr,
 	.getattr = gfs2_getattr,
-	.setxattr = gfs2_setxattr,
-	.getxattr = gfs2_getxattr,
+	.setxattr = generic_setxattr,
+	.getxattr = generic_getxattr,
 	.listxattr = gfs2_listxattr,
-	.removexattr = gfs2_removexattr,
+	.removexattr = generic_removexattr,
 	.fiemap = gfs2_fiemap,
 	.get_acl = gfs2_get_acl,
 	.set_acl = gfs2_set_acl,
@@ -2093,10 +2032,10 @@
 	.permission = gfs2_permission,
 	.setattr = gfs2_setattr,
 	.getattr = gfs2_getattr,
-	.setxattr = gfs2_setxattr,
-	.getxattr = gfs2_getxattr,
+	.setxattr = generic_setxattr,
+	.getxattr = generic_getxattr,
 	.listxattr = gfs2_listxattr,
-	.removexattr = gfs2_removexattr,
+	.removexattr = generic_removexattr,
 	.fiemap = gfs2_fiemap,
 };
 
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index e137d96..0448524 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -124,7 +124,7 @@
 	if (mapping == NULL)
 		mapping = &sdp->sd_aspace;
 
-	shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift;
+	shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
 	index = blkno >> shift;             /* convert block to page */
 	bufnum = blkno - (index << shift);  /* block buf index within page */
 
@@ -154,7 +154,7 @@
 		map_bh(bh, sdp->sd_vfs, blkno);
 
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 
 	return bh;
 }
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 49b0bff..4546360 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -824,7 +824,7 @@
 	 * i_mutex on quota files is special. Since this inode is hidden system
 	 * file, we are safe to define locking ourselves.
 	 */
-	lockdep_set_class(&sdp->sd_quota_inode->i_mutex,
+	lockdep_set_class(&sdp->sd_quota_inode->i_rwsem,
 			  &gfs2_quota_imutex_key);
 
 	error = gfs2_rindex_update(sdp);
@@ -1360,7 +1360,7 @@
 		return ERR_PTR(error);
 	}
 	s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super, flags,
-		 d_inode(path.dentry)->i_sb->s_bdev);
+		 path.dentry->d_sb->s_bdev);
 	path_put(&path);
 	if (IS_ERR(s)) {
 		pr_warn("gfs2 mount does not exist\n");
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index a398913..ce7d69a 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -701,7 +701,7 @@
 	unsigned to_write = bytes, pg_off = off;
 	int done = 0;
 
-	blk = index << (PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift);
+	blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
 	boff = off % bsize;
 
 	page = find_or_create_page(mapping, index, GFP_NOFS);
@@ -753,13 +753,13 @@
 	flush_dcache_page(page);
 	kunmap_atomic(kaddr);
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 
 	return 0;
 
 unlock_out:
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 	return -EIO;
 }
 
@@ -773,13 +773,13 @@
 
 	nbytes = sizeof(struct gfs2_quota);
 
-	pg_beg = loc >> PAGE_CACHE_SHIFT;
-	pg_off = loc % PAGE_CACHE_SIZE;
+	pg_beg = loc >> PAGE_SHIFT;
+	pg_off = loc % PAGE_SIZE;
 
 	/* If the quota straddles a page boundary, split the write in two */
-	if ((pg_off + nbytes) > PAGE_CACHE_SIZE) {
+	if ((pg_off + nbytes) > PAGE_SIZE) {
 		pg_oflow = 1;
-		overflow = (pg_off + nbytes) - PAGE_CACHE_SIZE;
+		overflow = (pg_off + nbytes) - PAGE_SIZE;
 	}
 
 	ptr = qp;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 07c0265..99a0bda 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -918,9 +918,8 @@
 		goto fail;
 
 	rgd->rd_gl->gl_object = rgd;
-	rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_CACHE_MASK;
-	rgd->rd_gl->gl_vm.end = PAGE_CACHE_ALIGN((rgd->rd_addr +
-						  rgd->rd_length) * bsize) - 1;
+	rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
+	rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
 	rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
 	rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
 	if (rgd->rd_data > sdp->sd_max_rg_data)
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index f8a0cd8..9b2ff353 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1176,7 +1176,7 @@
 
 static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
-	struct super_block *sb = d_inode(dentry)->i_sb;
+	struct super_block *sb = dentry->d_sb;
 	struct gfs2_sbd *sdp = sb->s_fs_info;
 	struct gfs2_statfs_change_host sc;
 	int error;
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index e8dfb47..f42ab53b 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -583,13 +583,11 @@
  *
  * Returns: actual size of data on success, -errno on error
  */
-static int gfs2_xattr_get(const struct xattr_handler *handler,
-			  struct dentry *dentry, const char *name,
-			  void *buffer, size_t size)
+static int __gfs2_xattr_get(struct inode *inode, const char *name,
+			    void *buffer, size_t size, int type)
 {
-	struct gfs2_inode *ip = GFS2_I(d_inode(dentry));
+	struct gfs2_inode *ip = GFS2_I(inode);
 	struct gfs2_ea_location el;
-	int type = handler->flags;
 	int error;
 
 	if (!ip->i_eattr)
@@ -611,6 +609,29 @@
 	return error;
 }
 
+static int gfs2_xattr_get(const struct xattr_handler *handler,
+			  struct dentry *unused, struct inode *inode,
+			  const char *name, void *buffer, size_t size)
+{
+	struct gfs2_inode *ip = GFS2_I(inode);
+	struct gfs2_holder gh;
+	bool need_unlock = false;
+	int ret;
+
+	/* During lookup, SELinux calls this function with the glock locked. */
+
+	if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
+		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
+		if (ret)
+			return ret;
+		need_unlock = true;
+	}
+	ret = __gfs2_xattr_get(inode, name, buffer, size, handler->flags);
+	if (need_unlock)
+		gfs2_glock_dq_uninit(&gh);
+	return ret;
+}
+
 /**
  * ea_alloc_blk - allocates a new block for extended attributes.
  * @ip: A pointer to the inode that's getting extended attributes
@@ -1233,8 +1254,21 @@
 			  struct dentry *dentry, const char *name,
 			  const void *value, size_t size, int flags)
 {
-	return __gfs2_xattr_set(d_inode(dentry), name, value,
-				size, flags, handler->flags);
+	struct inode *inode = d_inode(dentry);
+	struct gfs2_inode *ip = GFS2_I(inode);
+	struct gfs2_holder gh;
+	int ret;
+
+	ret = gfs2_rsqa_alloc(ip);
+	if (ret)
+		return ret;
+
+	ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
+	if (ret)
+		return ret;
+	ret = __gfs2_xattr_set(inode, name, value, size, flags, handler->flags);
+	gfs2_glock_dq_uninit(&gh);
+	return ret;
 }
 
 static int ea_dealloc_indirect(struct gfs2_inode *ip)
diff --git a/fs/hfs/attr.c b/fs/hfs/attr.c
index 8d931b1..064f92f 100644
--- a/fs/hfs/attr.c
+++ b/fs/hfs/attr.c
@@ -56,10 +56,9 @@
 	return res;
 }
 
-ssize_t hfs_getxattr(struct dentry *dentry, const char *name,
-			 void *value, size_t size)
+ssize_t hfs_getxattr(struct dentry *unused, struct inode *inode,
+		     const char *name, void *value, size_t size)
 {
-	struct inode *inode = d_inode(dentry);
 	struct hfs_find_data fd;
 	hfs_cat_rec rec;
 	struct hfs_cat_file *file;
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
index 221719e..d77d844 100644
--- a/fs/hfs/bnode.c
+++ b/fs/hfs/bnode.c
@@ -278,14 +278,14 @@
 
 	mapping = tree->inode->i_mapping;
 	off = (loff_t)cnid * tree->node_size;
-	block = off >> PAGE_CACHE_SHIFT;
-	node->page_offset = off & ~PAGE_CACHE_MASK;
+	block = off >> PAGE_SHIFT;
+	node->page_offset = off & ~PAGE_MASK;
 	for (i = 0; i < tree->pages_per_bnode; i++) {
 		page = read_mapping_page(mapping, block++, NULL);
 		if (IS_ERR(page))
 			goto fail;
 		if (PageError(page)) {
-			page_cache_release(page);
+			put_page(page);
 			goto fail;
 		}
 		node->page[i] = page;
@@ -401,7 +401,7 @@
 
 	for (i = 0; i < node->tree->pages_per_bnode; i++)
 		if (node->page[i])
-			page_cache_release(node->page[i]);
+			put_page(node->page[i]);
 	kfree(node);
 }
 
@@ -429,11 +429,11 @@
 
 	pagep = node->page;
 	memset(kmap(*pagep) + node->page_offset, 0,
-	       min((int)PAGE_CACHE_SIZE, (int)tree->node_size));
+	       min((int)PAGE_SIZE, (int)tree->node_size));
 	set_page_dirty(*pagep);
 	kunmap(*pagep);
 	for (i = 1; i < tree->pages_per_bnode; i++) {
-		memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE);
+		memset(kmap(*++pagep), 0, PAGE_SIZE);
 		set_page_dirty(*pagep);
 		kunmap(*pagep);
 	}
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 1ab19e6..37cdd95 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -116,14 +116,14 @@
 	}
 
 	tree->node_size_shift = ffs(size) - 1;
-	tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+	tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
 	kunmap(page);
-	page_cache_release(page);
+	put_page(page);
 	return tree;
 
 fail_page:
-	page_cache_release(page);
+	put_page(page);
 free_inode:
 	tree->inode->i_mapping->a_ops = &hfs_aops;
 	iput(tree->inode);
@@ -257,9 +257,9 @@
 	off = off16;
 
 	off += node->page_offset;
-	pagep = node->page + (off >> PAGE_CACHE_SHIFT);
+	pagep = node->page + (off >> PAGE_SHIFT);
 	data = kmap(*pagep);
-	off &= ~PAGE_CACHE_MASK;
+	off &= ~PAGE_MASK;
 	idx = 0;
 
 	for (;;) {
@@ -279,7 +279,7 @@
 					}
 				}
 			}
-			if (++off >= PAGE_CACHE_SIZE) {
+			if (++off >= PAGE_SIZE) {
 				kunmap(*pagep);
 				data = kmap(*++pagep);
 				off = 0;
@@ -302,9 +302,9 @@
 		len = hfs_brec_lenoff(node, 0, &off16);
 		off = off16;
 		off += node->page_offset;
-		pagep = node->page + (off >> PAGE_CACHE_SHIFT);
+		pagep = node->page + (off >> PAGE_SHIFT);
 		data = kmap(*pagep);
-		off &= ~PAGE_CACHE_MASK;
+		off &= ~PAGE_MASK;
 	}
 }
 
@@ -348,9 +348,9 @@
 		len = hfs_brec_lenoff(node, 0, &off);
 	}
 	off += node->page_offset + nidx / 8;
-	page = node->page[off >> PAGE_CACHE_SHIFT];
+	page = node->page[off >> PAGE_SHIFT];
 	data = kmap(page);
-	off &= ~PAGE_CACHE_MASK;
+	off &= ~PAGE_MASK;
 	m = 1 << (~nidx & 7);
 	byte = data[off];
 	if (!(byte & m)) {
diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c
index 1eb5d41..98cde8b 100644
--- a/fs/hfs/catalog.c
+++ b/fs/hfs/catalog.c
@@ -240,10 +240,13 @@
 		}
 	}
 
+	/* we only need to take spinlock for exclusion with ->release() */
+	spin_lock(&HFS_I(dir)->open_dir_lock);
 	list_for_each_entry(rd, &HFS_I(dir)->open_dir_list, list) {
 		if (fd.tree->keycmp(fd.search_key, (void *)&rd->key) < 0)
 			rd->file->f_pos--;
 	}
+	spin_unlock(&HFS_I(dir)->open_dir_lock);
 
 	res = hfs_brec_remove(&fd);
 	if (res)
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index e9f2b85..163190e 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -161,8 +161,14 @@
 		}
 		file->private_data = rd;
 		rd->file = file;
+		spin_lock(&HFS_I(inode)->open_dir_lock);
 		list_add(&rd->list, &HFS_I(inode)->open_dir_list);
+		spin_unlock(&HFS_I(inode)->open_dir_lock);
 	}
+	/*
+	 * Can be done after the list insertion; exclusion with
+	 * hfs_delete_cat() is provided by directory lock.
+	 */
 	memcpy(&rd->key, &fd.key, sizeof(struct hfs_cat_key));
 out:
 	hfs_find_exit(&fd);
@@ -173,9 +179,9 @@
 {
 	struct hfs_readdir_data *rd = file->private_data;
 	if (rd) {
-		inode_lock(inode);
+		spin_lock(&HFS_I(inode)->open_dir_lock);
 		list_del(&rd->list);
-		inode_unlock(inode);
+		spin_unlock(&HFS_I(inode)->open_dir_lock);
 		kfree(rd);
 	}
 	return 0;
@@ -303,7 +309,7 @@
 
 const struct file_operations hfs_dir_operations = {
 	.read		= generic_read_dir,
-	.iterate	= hfs_readdir,
+	.iterate_shared	= hfs_readdir,
 	.llseek		= generic_file_llseek,
 	.release	= hfs_dir_release,
 };
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index 1f1c7dc..fa3eed8 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -69,6 +69,7 @@
 	struct hfs_cat_key cat_key;
 
 	struct list_head open_dir_list;
+	spinlock_t open_dir_lock;
 	struct inode *rsrc_inode;
 
 	struct mutex extents_lock;
@@ -213,8 +214,8 @@
 /* attr.c */
 extern int hfs_setxattr(struct dentry *dentry, const char *name,
 			const void *value, size_t size, int flags);
-extern ssize_t hfs_getxattr(struct dentry *dentry, const char *name,
-			    void *value, size_t size);
+extern ssize_t hfs_getxattr(struct dentry *dentry, struct inode *inode,
+			    const char *name, void *value, size_t size);
 extern ssize_t hfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
 
 /* mdb.c */
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 6686bf3..8eed66a 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -91,8 +91,8 @@
 	if (!tree)
 		return 0;
 
-	if (tree->node_size >= PAGE_CACHE_SIZE) {
-		nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT);
+	if (tree->node_size >= PAGE_SIZE) {
+		nidx = page->index >> (tree->node_size_shift - PAGE_SHIFT);
 		spin_lock(&tree->hash_lock);
 		node = hfs_bnode_findhash(tree, nidx);
 		if (!node)
@@ -105,8 +105,8 @@
 		}
 		spin_unlock(&tree->hash_lock);
 	} else {
-		nidx = page->index << (PAGE_CACHE_SHIFT - tree->node_size_shift);
-		i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift);
+		nidx = page->index << (PAGE_SHIFT - tree->node_size_shift);
+		i = 1 << (PAGE_SHIFT - tree->node_size_shift);
 		spin_lock(&tree->hash_lock);
 		do {
 			node = hfs_bnode_findhash(tree, nidx++);
@@ -124,8 +124,7 @@
 	return res ? try_to_free_buffers(page) : 0;
 }
 
-static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
-			     loff_t offset)
+static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct file *file = iocb->ki_filp;
 	struct address_space *mapping = file->f_mapping;
@@ -133,7 +132,7 @@
 	size_t count = iov_iter_count(iter);
 	ssize_t ret;
 
-	ret = blockdev_direct_IO(iocb, inode, iter, offset, hfs_get_block);
+	ret = blockdev_direct_IO(iocb, inode, iter, hfs_get_block);
 
 	/*
 	 * In case of error extending write may have instantiated a few
@@ -141,7 +140,7 @@
 	 */
 	if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
 		loff_t isize = i_size_read(inode);
-		loff_t end = offset + count;
+		loff_t end = iocb->ki_pos + count;
 
 		if (end > isize)
 			hfs_write_failed(mapping, end);
@@ -187,6 +186,7 @@
 
 	mutex_init(&HFS_I(inode)->extents_lock);
 	INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list);
+	spin_lock_init(&HFS_I(inode)->open_dir_lock);
 	hfs_cat_build_key(sb, (btree_key *)&HFS_I(inode)->cat_key, dir->i_ino, name);
 	inode->i_ino = HFS_SB(sb)->next_id++;
 	inode->i_mode = mode;
@@ -318,6 +318,7 @@
 	HFS_I(inode)->rsrc_inode = NULL;
 	mutex_init(&HFS_I(inode)->extents_lock);
 	INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list);
+	spin_lock_init(&HFS_I(inode)->open_dir_lock);
 
 	/* Initialize the inode */
 	inode->i_uid = hsb->s_uid;
diff --git a/fs/hfsplus/bitmap.c b/fs/hfsplus/bitmap.c
index d2954451..c0ae274 100644
--- a/fs/hfsplus/bitmap.c
+++ b/fs/hfsplus/bitmap.c
@@ -13,7 +13,7 @@
 #include "hfsplus_fs.h"
 #include "hfsplus_raw.h"
 
-#define PAGE_CACHE_BITS	(PAGE_CACHE_SIZE * 8)
+#define PAGE_CACHE_BITS	(PAGE_SIZE * 8)
 
 int hfsplus_block_allocate(struct super_block *sb, u32 size,
 		u32 offset, u32 *max)
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 6392466..ce014ce 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -24,16 +24,16 @@
 	int l;
 
 	off += node->page_offset;
-	pagep = node->page + (off >> PAGE_CACHE_SHIFT);
-	off &= ~PAGE_CACHE_MASK;
+	pagep = node->page + (off >> PAGE_SHIFT);
+	off &= ~PAGE_MASK;
 
-	l = min_t(int, len, PAGE_CACHE_SIZE - off);
+	l = min_t(int, len, PAGE_SIZE - off);
 	memcpy(buf, kmap(*pagep) + off, l);
 	kunmap(*pagep);
 
 	while ((len -= l) != 0) {
 		buf += l;
-		l = min_t(int, len, PAGE_CACHE_SIZE);
+		l = min_t(int, len, PAGE_SIZE);
 		memcpy(buf, kmap(*++pagep), l);
 		kunmap(*pagep);
 	}
@@ -77,17 +77,17 @@
 	int l;
 
 	off += node->page_offset;
-	pagep = node->page + (off >> PAGE_CACHE_SHIFT);
-	off &= ~PAGE_CACHE_MASK;
+	pagep = node->page + (off >> PAGE_SHIFT);
+	off &= ~PAGE_MASK;
 
-	l = min_t(int, len, PAGE_CACHE_SIZE - off);
+	l = min_t(int, len, PAGE_SIZE - off);
 	memcpy(kmap(*pagep) + off, buf, l);
 	set_page_dirty(*pagep);
 	kunmap(*pagep);
 
 	while ((len -= l) != 0) {
 		buf += l;
-		l = min_t(int, len, PAGE_CACHE_SIZE);
+		l = min_t(int, len, PAGE_SIZE);
 		memcpy(kmap(*++pagep), buf, l);
 		set_page_dirty(*pagep);
 		kunmap(*pagep);
@@ -107,16 +107,16 @@
 	int l;
 
 	off += node->page_offset;
-	pagep = node->page + (off >> PAGE_CACHE_SHIFT);
-	off &= ~PAGE_CACHE_MASK;
+	pagep = node->page + (off >> PAGE_SHIFT);
+	off &= ~PAGE_MASK;
 
-	l = min_t(int, len, PAGE_CACHE_SIZE - off);
+	l = min_t(int, len, PAGE_SIZE - off);
 	memset(kmap(*pagep) + off, 0, l);
 	set_page_dirty(*pagep);
 	kunmap(*pagep);
 
 	while ((len -= l) != 0) {
-		l = min_t(int, len, PAGE_CACHE_SIZE);
+		l = min_t(int, len, PAGE_SIZE);
 		memset(kmap(*++pagep), 0, l);
 		set_page_dirty(*pagep);
 		kunmap(*pagep);
@@ -136,20 +136,20 @@
 	tree = src_node->tree;
 	src += src_node->page_offset;
 	dst += dst_node->page_offset;
-	src_page = src_node->page + (src >> PAGE_CACHE_SHIFT);
-	src &= ~PAGE_CACHE_MASK;
-	dst_page = dst_node->page + (dst >> PAGE_CACHE_SHIFT);
-	dst &= ~PAGE_CACHE_MASK;
+	src_page = src_node->page + (src >> PAGE_SHIFT);
+	src &= ~PAGE_MASK;
+	dst_page = dst_node->page + (dst >> PAGE_SHIFT);
+	dst &= ~PAGE_MASK;
 
 	if (src == dst) {
-		l = min_t(int, len, PAGE_CACHE_SIZE - src);
+		l = min_t(int, len, PAGE_SIZE - src);
 		memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l);
 		kunmap(*src_page);
 		set_page_dirty(*dst_page);
 		kunmap(*dst_page);
 
 		while ((len -= l) != 0) {
-			l = min_t(int, len, PAGE_CACHE_SIZE);
+			l = min_t(int, len, PAGE_SIZE);
 			memcpy(kmap(*++dst_page), kmap(*++src_page), l);
 			kunmap(*src_page);
 			set_page_dirty(*dst_page);
@@ -161,12 +161,12 @@
 		do {
 			src_ptr = kmap(*src_page) + src;
 			dst_ptr = kmap(*dst_page) + dst;
-			if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) {
-				l = PAGE_CACHE_SIZE - src;
+			if (PAGE_SIZE - src < PAGE_SIZE - dst) {
+				l = PAGE_SIZE - src;
 				src = 0;
 				dst += l;
 			} else {
-				l = PAGE_CACHE_SIZE - dst;
+				l = PAGE_SIZE - dst;
 				src += l;
 				dst = 0;
 			}
@@ -195,11 +195,11 @@
 	dst += node->page_offset;
 	if (dst > src) {
 		src += len - 1;
-		src_page = node->page + (src >> PAGE_CACHE_SHIFT);
-		src = (src & ~PAGE_CACHE_MASK) + 1;
+		src_page = node->page + (src >> PAGE_SHIFT);
+		src = (src & ~PAGE_MASK) + 1;
 		dst += len - 1;
-		dst_page = node->page + (dst >> PAGE_CACHE_SHIFT);
-		dst = (dst & ~PAGE_CACHE_MASK) + 1;
+		dst_page = node->page + (dst >> PAGE_SHIFT);
+		dst = (dst & ~PAGE_MASK) + 1;
 
 		if (src == dst) {
 			while (src < len) {
@@ -208,7 +208,7 @@
 				set_page_dirty(*dst_page);
 				kunmap(*dst_page);
 				len -= src;
-				src = PAGE_CACHE_SIZE;
+				src = PAGE_SIZE;
 				src_page--;
 				dst_page--;
 			}
@@ -226,32 +226,32 @@
 				dst_ptr = kmap(*dst_page) + dst;
 				if (src < dst) {
 					l = src;
-					src = PAGE_CACHE_SIZE;
+					src = PAGE_SIZE;
 					dst -= l;
 				} else {
 					l = dst;
 					src -= l;
-					dst = PAGE_CACHE_SIZE;
+					dst = PAGE_SIZE;
 				}
 				l = min(len, l);
 				memmove(dst_ptr - l, src_ptr - l, l);
 				kunmap(*src_page);
 				set_page_dirty(*dst_page);
 				kunmap(*dst_page);
-				if (dst == PAGE_CACHE_SIZE)
+				if (dst == PAGE_SIZE)
 					dst_page--;
 				else
 					src_page--;
 			} while ((len -= l));
 		}
 	} else {
-		src_page = node->page + (src >> PAGE_CACHE_SHIFT);
-		src &= ~PAGE_CACHE_MASK;
-		dst_page = node->page + (dst >> PAGE_CACHE_SHIFT);
-		dst &= ~PAGE_CACHE_MASK;
+		src_page = node->page + (src >> PAGE_SHIFT);
+		src &= ~PAGE_MASK;
+		dst_page = node->page + (dst >> PAGE_SHIFT);
+		dst &= ~PAGE_MASK;
 
 		if (src == dst) {
-			l = min_t(int, len, PAGE_CACHE_SIZE - src);
+			l = min_t(int, len, PAGE_SIZE - src);
 			memmove(kmap(*dst_page) + src,
 				kmap(*src_page) + src, l);
 			kunmap(*src_page);
@@ -259,7 +259,7 @@
 			kunmap(*dst_page);
 
 			while ((len -= l) != 0) {
-				l = min_t(int, len, PAGE_CACHE_SIZE);
+				l = min_t(int, len, PAGE_SIZE);
 				memmove(kmap(*++dst_page),
 					kmap(*++src_page), l);
 				kunmap(*src_page);
@@ -272,13 +272,13 @@
 			do {
 				src_ptr = kmap(*src_page) + src;
 				dst_ptr = kmap(*dst_page) + dst;
-				if (PAGE_CACHE_SIZE - src <
-						PAGE_CACHE_SIZE - dst) {
-					l = PAGE_CACHE_SIZE - src;
+				if (PAGE_SIZE - src <
+						PAGE_SIZE - dst) {
+					l = PAGE_SIZE - src;
 					src = 0;
 					dst += l;
 				} else {
-					l = PAGE_CACHE_SIZE - dst;
+					l = PAGE_SIZE - dst;
 					src += l;
 					dst = 0;
 				}
@@ -444,14 +444,14 @@
 
 	mapping = tree->inode->i_mapping;
 	off = (loff_t)cnid << tree->node_size_shift;
-	block = off >> PAGE_CACHE_SHIFT;
-	node->page_offset = off & ~PAGE_CACHE_MASK;
+	block = off >> PAGE_SHIFT;
+	node->page_offset = off & ~PAGE_MASK;
 	for (i = 0; i < tree->pages_per_bnode; block++, i++) {
 		page = read_mapping_page(mapping, block, NULL);
 		if (IS_ERR(page))
 			goto fail;
 		if (PageError(page)) {
-			page_cache_release(page);
+			put_page(page);
 			goto fail;
 		}
 		node->page[i] = page;
@@ -569,7 +569,7 @@
 
 	for (i = 0; i < node->tree->pages_per_bnode; i++)
 		if (node->page[i])
-			page_cache_release(node->page[i]);
+			put_page(node->page[i]);
 	kfree(node);
 }
 
@@ -597,11 +597,11 @@
 
 	pagep = node->page;
 	memset(kmap(*pagep) + node->page_offset, 0,
-	       min_t(int, PAGE_CACHE_SIZE, tree->node_size));
+	       min_t(int, PAGE_SIZE, tree->node_size));
 	set_page_dirty(*pagep);
 	kunmap(*pagep);
 	for (i = 1; i < tree->pages_per_bnode; i++) {
-		memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE);
+		memset(kmap(*++pagep), 0, PAGE_SIZE);
 		set_page_dirty(*pagep);
 		kunmap(*pagep);
 	}
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index 3345c75..d9d1a36 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -236,15 +236,15 @@
 	tree->node_size_shift = ffs(size) - 1;
 
 	tree->pages_per_bnode =
-		(tree->node_size + PAGE_CACHE_SIZE - 1) >>
-		PAGE_CACHE_SHIFT;
+		(tree->node_size + PAGE_SIZE - 1) >>
+		PAGE_SHIFT;
 
 	kunmap(page);
-	page_cache_release(page);
+	put_page(page);
 	return tree;
 
  fail_page:
-	page_cache_release(page);
+	put_page(page);
  free_inode:
 	tree->inode->i_mapping->a_ops = &hfsplus_aops;
 	iput(tree->inode);
@@ -380,9 +380,9 @@
 	off = off16;
 
 	off += node->page_offset;
-	pagep = node->page + (off >> PAGE_CACHE_SHIFT);
+	pagep = node->page + (off >> PAGE_SHIFT);
 	data = kmap(*pagep);
-	off &= ~PAGE_CACHE_MASK;
+	off &= ~PAGE_MASK;
 	idx = 0;
 
 	for (;;) {
@@ -403,7 +403,7 @@
 					}
 				}
 			}
-			if (++off >= PAGE_CACHE_SIZE) {
+			if (++off >= PAGE_SIZE) {
 				kunmap(*pagep);
 				data = kmap(*++pagep);
 				off = 0;
@@ -426,9 +426,9 @@
 		len = hfs_brec_lenoff(node, 0, &off16);
 		off = off16;
 		off += node->page_offset;
-		pagep = node->page + (off >> PAGE_CACHE_SHIFT);
+		pagep = node->page + (off >> PAGE_SHIFT);
 		data = kmap(*pagep);
-		off &= ~PAGE_CACHE_MASK;
+		off &= ~PAGE_MASK;
 	}
 }
 
@@ -475,9 +475,9 @@
 		len = hfs_brec_lenoff(node, 0, &off);
 	}
 	off += node->page_offset + nidx / 8;
-	page = node->page[off >> PAGE_CACHE_SHIFT];
+	page = node->page[off >> PAGE_SHIFT];
 	data = kmap(page);
-	off &= ~PAGE_CACHE_MASK;
+	off &= ~PAGE_MASK;
 	m = 1 << (~nidx & 7);
 	byte = data[off];
 	if (!(byte & m)) {
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index 022974a..fb707e8 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -374,12 +374,15 @@
 		hfsplus_free_fork(sb, cnid, &fork, HFSPLUS_TYPE_RSRC);
 	}
 
+	/* we only need to take spinlock for exclusion with ->release() */
+	spin_lock(&HFSPLUS_I(dir)->open_dir_lock);
 	list_for_each(pos, &HFSPLUS_I(dir)->open_dir_list) {
 		struct hfsplus_readdir_data *rd =
 			list_entry(pos, struct hfsplus_readdir_data, list);
 		if (fd.tree->keycmp(fd.search_key, (void *)&rd->key) < 0)
 			rd->file->f_pos--;
 	}
+	spin_unlock(&HFSPLUS_I(dir)->open_dir_lock);
 
 	err = hfs_brec_remove(&fd);
 	if (err)
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index a4e867e..42e1286 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -271,8 +271,14 @@
 		}
 		file->private_data = rd;
 		rd->file = file;
+		spin_lock(&HFSPLUS_I(inode)->open_dir_lock);
 		list_add(&rd->list, &HFSPLUS_I(inode)->open_dir_list);
+		spin_unlock(&HFSPLUS_I(inode)->open_dir_lock);
 	}
+	/*
+	 * Can be done after the list insertion; exclusion with
+	 * hfsplus_delete_cat() is provided by directory lock.
+	 */
 	memcpy(&rd->key, fd.key, sizeof(struct hfsplus_cat_key));
 out:
 	kfree(strbuf);
@@ -284,9 +290,9 @@
 {
 	struct hfsplus_readdir_data *rd = file->private_data;
 	if (rd) {
-		inode_lock(inode);
+		spin_lock(&HFSPLUS_I(inode)->open_dir_lock);
 		list_del(&rd->list);
-		inode_unlock(inode);
+		spin_unlock(&HFSPLUS_I(inode)->open_dir_lock);
 		kfree(rd);
 	}
 	return 0;
@@ -569,7 +575,7 @@
 const struct file_operations hfsplus_dir_operations = {
 	.fsync		= hfsplus_file_fsync,
 	.read		= generic_read_dir,
-	.iterate	= hfsplus_readdir,
+	.iterate_shared	= hfsplus_readdir,
 	.unlocked_ioctl = hfsplus_ioctl,
 	.llseek		= generic_file_llseek,
 	.release	= hfsplus_dir_release,
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index f91a1fa..fdc3446 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -244,6 +244,7 @@
 	u8 userflags;		/* BSD user file flags */
 	u32 subfolders;		/* Subfolder count (HFSX only) */
 	struct list_head open_dir_list;
+	spinlock_t open_dir_lock;
 	loff_t phys_size;
 
 	struct inode vfs_inode;
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 1a6394c..ef9fefe 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -87,9 +87,9 @@
 	}
 	if (!tree)
 		return 0;
-	if (tree->node_size >= PAGE_CACHE_SIZE) {
+	if (tree->node_size >= PAGE_SIZE) {
 		nidx = page->index >>
-			(tree->node_size_shift - PAGE_CACHE_SHIFT);
+			(tree->node_size_shift - PAGE_SHIFT);
 		spin_lock(&tree->hash_lock);
 		node = hfs_bnode_findhash(tree, nidx);
 		if (!node)
@@ -103,8 +103,8 @@
 		spin_unlock(&tree->hash_lock);
 	} else {
 		nidx = page->index <<
-			(PAGE_CACHE_SHIFT - tree->node_size_shift);
-		i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift);
+			(PAGE_SHIFT - tree->node_size_shift);
+		i = 1 << (PAGE_SHIFT - tree->node_size_shift);
 		spin_lock(&tree->hash_lock);
 		do {
 			node = hfs_bnode_findhash(tree, nidx++);
@@ -122,8 +122,7 @@
 	return res ? try_to_free_buffers(page) : 0;
 }
 
-static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
-				 loff_t offset)
+static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct file *file = iocb->ki_filp;
 	struct address_space *mapping = file->f_mapping;
@@ -131,7 +130,7 @@
 	size_t count = iov_iter_count(iter);
 	ssize_t ret;
 
-	ret = blockdev_direct_IO(iocb, inode, iter, offset, hfsplus_get_block);
+	ret = blockdev_direct_IO(iocb, inode, iter, hfsplus_get_block);
 
 	/*
 	 * In case of error extending write may have instantiated a few
@@ -139,7 +138,7 @@
 	 */
 	if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
 		loff_t isize = i_size_read(inode);
-		loff_t end = offset + count;
+		loff_t end = iocb->ki_pos + count;
 
 		if (end > isize)
 			hfsplus_write_failed(mapping, end);
@@ -374,6 +373,7 @@
 
 	hip = HFSPLUS_I(inode);
 	INIT_LIST_HEAD(&hip->open_dir_list);
+	spin_lock_init(&hip->open_dir_lock);
 	mutex_init(&hip->extents_lock);
 	atomic_set(&hip->opencnt, 0);
 	hip->extent_state = 0;
diff --git a/fs/hfsplus/posix_acl.c b/fs/hfsplus/posix_acl.c
index afb33ed..ab7ea25 100644
--- a/fs/hfsplus/posix_acl.c
+++ b/fs/hfsplus/posix_acl.c
@@ -48,9 +48,6 @@
 
 	hfsplus_destroy_attr_entry((hfsplus_attr_entry *)value);
 
-	if (!IS_ERR(acl))
-		set_cached_acl(inode, type, acl);
-
 	return acl;
 }
 
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 5d54490..755bf30 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -67,6 +67,7 @@
 		return inode;
 
 	INIT_LIST_HEAD(&HFSPLUS_I(inode)->open_dir_list);
+	spin_lock_init(&HFSPLUS_I(inode)->open_dir_lock);
 	mutex_init(&HFSPLUS_I(inode)->extents_lock);
 	HFSPLUS_I(inode)->flags = 0;
 	HFSPLUS_I(inode)->extent_state = 0;
@@ -438,7 +439,7 @@
 	err = -EFBIG;
 	last_fs_block = sbi->total_blocks - 1;
 	last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >>
-			PAGE_CACHE_SHIFT;
+			PAGE_SHIFT;
 
 	if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) ||
 	    (last_fs_page > (pgoff_t)(~0ULL))) {
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index ab01530..4f118d2 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -220,7 +220,7 @@
 
 	index = 0;
 	written = 0;
-	for (; written < node_size; index++, written += PAGE_CACHE_SIZE) {
+	for (; written < node_size; index++, written += PAGE_SIZE) {
 		void *kaddr;
 
 		page = read_mapping_page(mapping, index, NULL);
@@ -231,11 +231,11 @@
 
 		kaddr = kmap_atomic(page);
 		memcpy(kaddr, buf + written,
-			min_t(size_t, PAGE_CACHE_SIZE, node_size - written));
+			min_t(size_t, PAGE_SIZE, node_size - written));
 		kunmap_atomic(kaddr);
 
 		set_page_dirty(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 
 	hfsplus_mark_inode_dirty(attr_file, HFSPLUS_I_ATTR_DIRTY);
@@ -579,7 +579,7 @@
 	return res;
 }
 
-ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
+ssize_t hfsplus_getxattr(struct inode *inode, const char *name,
 			 void *value, size_t size,
 			 const char *prefix, size_t prefixlen)
 {
@@ -594,7 +594,7 @@
 	strcpy(xattr_name, prefix);
 	strcpy(xattr_name + prefixlen, name);
 
-	res = __hfsplus_getxattr(d_inode(dentry), xattr_name, value, size);
+	res = __hfsplus_getxattr(inode, xattr_name, value, size);
 	kfree(xattr_name);
 	return res;
 
@@ -844,8 +844,8 @@
 }
 
 static int hfsplus_osx_getxattr(const struct xattr_handler *handler,
-				struct dentry *dentry, const char *name,
-				void *buffer, size_t size)
+				struct dentry *unused, struct inode *inode,
+				const char *name, void *buffer, size_t size)
 {
 	/*
 	 * Don't allow retrieving properly prefixed attributes
@@ -860,7 +860,7 @@
 	 * creates), so we pass the name through unmodified (after
 	 * ensuring it doesn't conflict with another namespace).
 	 */
-	return __hfsplus_getxattr(d_inode(dentry), name, buffer, size);
+	return __hfsplus_getxattr(inode, name, buffer, size);
 }
 
 static int hfsplus_osx_setxattr(const struct xattr_handler *handler,
diff --git a/fs/hfsplus/xattr.h b/fs/hfsplus/xattr.h
index f9b0955..d04ba6f 100644
--- a/fs/hfsplus/xattr.h
+++ b/fs/hfsplus/xattr.h
@@ -28,7 +28,7 @@
 ssize_t __hfsplus_getxattr(struct inode *inode, const char *name,
 			   void *value, size_t size);
 
-ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
+ssize_t hfsplus_getxattr(struct inode *inode, const char *name,
 			 void *value, size_t size,
 			 const char *prefix, size_t prefixlen);
 
diff --git a/fs/hfsplus/xattr_security.c b/fs/hfsplus/xattr_security.c
index 72a68a3..ae2ca8c 100644
--- a/fs/hfsplus/xattr_security.c
+++ b/fs/hfsplus/xattr_security.c
@@ -14,10 +14,10 @@
 #include "acl.h"
 
 static int hfsplus_security_getxattr(const struct xattr_handler *handler,
-				     struct dentry *dentry, const char *name,
-				     void *buffer, size_t size)
+				     struct dentry *unused, struct inode *inode,
+				     const char *name, void *buffer, size_t size)
 {
-	return hfsplus_getxattr(dentry, name, buffer, size,
+	return hfsplus_getxattr(inode, name, buffer, size,
 				XATTR_SECURITY_PREFIX,
 				XATTR_SECURITY_PREFIX_LEN);
 }
diff --git a/fs/hfsplus/xattr_trusted.c b/fs/hfsplus/xattr_trusted.c
index 95a7704..eae2947 100644
--- a/fs/hfsplus/xattr_trusted.c
+++ b/fs/hfsplus/xattr_trusted.c
@@ -12,10 +12,10 @@
 #include "xattr.h"
 
 static int hfsplus_trusted_getxattr(const struct xattr_handler *handler,
-				    struct dentry *dentry, const char *name,
-				    void *buffer, size_t size)
+				    struct dentry *unused, struct inode *inode,
+				    const char *name, void *buffer, size_t size)
 {
-	return hfsplus_getxattr(dentry, name, buffer, size,
+	return hfsplus_getxattr(inode, name, buffer, size,
 				XATTR_TRUSTED_PREFIX,
 				XATTR_TRUSTED_PREFIX_LEN);
 }
diff --git a/fs/hfsplus/xattr_user.c b/fs/hfsplus/xattr_user.c
index 6fc269b..3c9eec3 100644
--- a/fs/hfsplus/xattr_user.c
+++ b/fs/hfsplus/xattr_user.c
@@ -12,11 +12,11 @@
 #include "xattr.h"
 
 static int hfsplus_user_getxattr(const struct xattr_handler *handler,
-				 struct dentry *dentry, const char *name,
-				 void *buffer, size_t size)
+				 struct dentry *unused, struct inode *inode,
+				 const char *name, void *buffer, size_t size)
 {
 
-	return hfsplus_getxattr(dentry, name, buffer, size,
+	return hfsplus_getxattr(inode, name, buffer, size,
 				XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
 }
 
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index d1abbee..5c57654 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -398,7 +398,7 @@
 
 static const struct file_operations hostfs_dir_fops = {
 	.llseek		= generic_file_llseek,
-	.iterate	= hostfs_readdir,
+	.iterate_shared	= hostfs_readdir,
 	.read		= generic_read_dir,
 	.open		= hostfs_open,
 	.fsync		= hostfs_fsync,
@@ -410,12 +410,12 @@
 	struct inode *inode = mapping->host;
 	char *buffer;
 	loff_t base = page_offset(page);
-	int count = PAGE_CACHE_SIZE;
-	int end_index = inode->i_size >> PAGE_CACHE_SHIFT;
+	int count = PAGE_SIZE;
+	int end_index = inode->i_size >> PAGE_SHIFT;
 	int err;
 
 	if (page->index >= end_index)
-		count = inode->i_size & (PAGE_CACHE_SIZE-1);
+		count = inode->i_size & (PAGE_SIZE-1);
 
 	buffer = kmap(page);
 
@@ -447,7 +447,7 @@
 
 	buffer = kmap(page);
 	bytes_read = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer,
-			PAGE_CACHE_SIZE);
+			PAGE_SIZE);
 	if (bytes_read < 0) {
 		ClearPageUptodate(page);
 		SetPageError(page);
@@ -455,7 +455,7 @@
 		goto out;
 	}
 
-	memset(buffer + bytes_read, 0, PAGE_CACHE_SIZE - bytes_read);
+	memset(buffer + bytes_read, 0, PAGE_SIZE - bytes_read);
 
 	ClearPageError(page);
 	SetPageUptodate(page);
@@ -471,7 +471,7 @@
 			      loff_t pos, unsigned len, unsigned flags,
 			      struct page **pagep, void **fsdata)
 {
-	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+	pgoff_t index = pos >> PAGE_SHIFT;
 
 	*pagep = grab_cache_page_write_begin(mapping, index, flags);
 	if (!*pagep)
@@ -485,14 +485,14 @@
 {
 	struct inode *inode = mapping->host;
 	void *buffer;
-	unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+	unsigned from = pos & (PAGE_SIZE - 1);
 	int err;
 
 	buffer = kmap(page);
 	err = write_file(FILE_HOSTFS_I(file)->fd, &pos, buffer + from, copied);
 	kunmap(page);
 
-	if (!PageUptodate(page) && err == PAGE_CACHE_SIZE)
+	if (!PageUptodate(page) && err == PAGE_SIZE)
 		SetPageUptodate(page);
 
 	/*
@@ -502,7 +502,7 @@
 	if (err > 0 && (pos > inode->i_size))
 		inode->i_size = pos;
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 
 	return err;
 }
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index e57a53c..7b9150c 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -44,7 +44,11 @@
 		else goto fail;
 		if (pos == 12) goto fail;
 	}
-	hpfs_add_pos(i, &filp->f_pos);
+	if (unlikely(hpfs_add_pos(i, &filp->f_pos) < 0)) {
+		hpfs_unlock(s);
+		inode_unlock(i);
+		return -ENOMEM;
+	}
 ok:
 	filp->f_pos = new_off;
 	hpfs_unlock(s);
@@ -141,8 +145,10 @@
 			ctx->pos = 1;
 		}
 		if (ctx->pos == 1) {
+			ret = hpfs_add_pos(inode, &file->f_pos);
+			if (unlikely(ret < 0))
+				goto out;
 			ctx->pos = ((loff_t) hpfs_de_as_down_as_possible(inode->i_sb, hpfs_inode->i_dno) << 4) + 1;
-			hpfs_add_pos(inode, &file->f_pos);
 			file->f_version = inode->i_version;
 		}
 		next_pos = ctx->pos;
@@ -324,7 +330,7 @@
 {
 	.llseek		= hpfs_dir_lseek,
 	.read		= generic_read_dir,
-	.iterate	= hpfs_readdir,
+	.iterate_shared	= hpfs_readdir,
 	.release	= hpfs_dir_release,
 	.fsync		= hpfs_file_fsync,
 	.unlocked_ioctl	= hpfs_ioctl,
diff --git a/fs/hpfs/dnode.c b/fs/hpfs/dnode.c
index 2923a7b..86ab7e7 100644
--- a/fs/hpfs/dnode.c
+++ b/fs/hpfs/dnode.c
@@ -21,7 +21,7 @@
 	return ((loff_t)le32_to_cpu(d->self) << 4) | (loff_t)1;
 }
 
-void hpfs_add_pos(struct inode *inode, loff_t *pos)
+int hpfs_add_pos(struct inode *inode, loff_t *pos)
 {
 	struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
 	int i = 0;
@@ -29,11 +29,12 @@
 
 	if (hpfs_inode->i_rddir_off)
 		for (; hpfs_inode->i_rddir_off[i]; i++)
-			if (hpfs_inode->i_rddir_off[i] == pos) return;
+			if (hpfs_inode->i_rddir_off[i] == pos)
+				return 0;
 	if (!(i&0x0f)) {
 		if (!(ppos = kmalloc((i+0x11) * sizeof(loff_t*), GFP_NOFS))) {
 			pr_err("out of memory for position list\n");
-			return;
+			return -ENOMEM;
 		}
 		if (hpfs_inode->i_rddir_off) {
 			memcpy(ppos, hpfs_inode->i_rddir_off, i * sizeof(loff_t));
@@ -43,6 +44,7 @@
 	}
 	hpfs_inode->i_rddir_off[i] = pos;
 	hpfs_inode->i_rddir_off[i + 1] = NULL;
+	return 0;
 }
 
 void hpfs_del_pos(struct inode *inode, loff_t *pos)
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index 975654a..aebb78f 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -242,7 +242,7 @@
 
 /* dnode.c */
 
-void hpfs_add_pos(struct inode *, loff_t *);
+int hpfs_add_pos(struct inode *, loff_t *);
 void hpfs_del_pos(struct inode *, loff_t *);
 struct hpfs_dirent *hpfs_add_de(struct super_block *, struct dnode *,
 				const unsigned char *, unsigned, secno);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index e1f465a..4ea71eb 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -213,12 +213,12 @@
 	int i, chunksize;
 
 	/* Find which 4k chunk and offset with in that chunk */
-	i = offset >> PAGE_CACHE_SHIFT;
-	offset = offset & ~PAGE_CACHE_MASK;
+	i = offset >> PAGE_SHIFT;
+	offset = offset & ~PAGE_MASK;
 
 	while (size) {
 		size_t n;
-		chunksize = PAGE_CACHE_SIZE;
+		chunksize = PAGE_SIZE;
 		if (offset)
 			chunksize -= offset;
 		if (chunksize > size)
@@ -237,7 +237,7 @@
 /*
  * Support for read() - Find the page attached to f_mapping and copy out the
  * data. Its *very* similar to do_generic_mapping_read(), we can't use that
- * since it has PAGE_CACHE_SIZE assumptions.
+ * since it has PAGE_SIZE assumptions.
  */
 static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
@@ -285,7 +285,7 @@
 			 * We have the page, copy it to user space buffer.
 			 */
 			copied = hugetlbfs_read_actor(page, offset, to, nr);
-			page_cache_release(page);
+			put_page(page);
 		}
 		offset += copied;
 		retval += copied;
diff --git a/fs/inode.c b/fs/inode.c
index 69b8b52..4ccbc21 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -151,6 +151,7 @@
 	inode->i_bdev = NULL;
 	inode->i_cdev = NULL;
 	inode->i_link = NULL;
+	inode->i_dir_seq = 0;
 	inode->i_rdev = 0;
 	inode->dirtied_when = 0;
 
@@ -165,8 +166,8 @@
 	spin_lock_init(&inode->i_lock);
 	lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
 
-	mutex_init(&inode->i_mutex);
-	lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);
+	init_rwsem(&inode->i_rwsem);
+	lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key);
 
 	atomic_set(&inode->i_dio_count, 0);
 
@@ -238,9 +239,9 @@
 	}
 
 #ifdef CONFIG_FS_POSIX_ACL
-	if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED)
+	if (inode->i_acl && !is_uncached_acl(inode->i_acl))
 		posix_acl_release(inode->i_acl);
-	if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
+	if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl))
 		posix_acl_release(inode->i_default_acl);
 #endif
 	this_cpu_dec(nr_inodes);
@@ -924,13 +925,13 @@
 		struct file_system_type *type = inode->i_sb->s_type;
 
 		/* Set new key only if filesystem hasn't already changed it */
-		if (lockdep_match_class(&inode->i_mutex, &type->i_mutex_key)) {
+		if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) {
 			/*
 			 * ensure nobody is actually holding i_mutex
 			 */
-			mutex_destroy(&inode->i_mutex);
-			mutex_init(&inode->i_mutex);
-			lockdep_set_class(&inode->i_mutex,
+			// mutex_destroy(&inode->i_mutex);
+			init_rwsem(&inode->i_rwsem);
+			lockdep_set_class(&inode->i_rwsem,
 					  &type->i_mutex_dir_key);
 		}
 	}
diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
index f311bf0..2e4e834 100644
--- a/fs/isofs/compress.c
+++ b/fs/isofs/compress.c
@@ -26,7 +26,7 @@
 #include "zisofs.h"
 
 /* This should probably be global. */
-static char zisofs_sink_page[PAGE_CACHE_SIZE];
+static char zisofs_sink_page[PAGE_SIZE];
 
 /*
  * This contains the zlib memory allocation and the mutex for the
@@ -70,11 +70,11 @@
 		for ( i = 0 ; i < pcount ; i++ ) {
 			if (!pages[i])
 				continue;
-			memset(page_address(pages[i]), 0, PAGE_CACHE_SIZE);
+			memset(page_address(pages[i]), 0, PAGE_SIZE);
 			flush_dcache_page(pages[i]);
 			SetPageUptodate(pages[i]);
 		}
-		return ((loff_t)pcount) << PAGE_CACHE_SHIFT;
+		return ((loff_t)pcount) << PAGE_SHIFT;
 	}
 
 	/* Because zlib is not thread-safe, do all the I/O at the top. */
@@ -121,11 +121,11 @@
 			if (pages[curpage]) {
 				stream.next_out = page_address(pages[curpage])
 						+ poffset;
-				stream.avail_out = PAGE_CACHE_SIZE - poffset;
+				stream.avail_out = PAGE_SIZE - poffset;
 				poffset = 0;
 			} else {
 				stream.next_out = (void *)&zisofs_sink_page;
-				stream.avail_out = PAGE_CACHE_SIZE;
+				stream.avail_out = PAGE_SIZE;
 			}
 		}
 		if (!stream.avail_in) {
@@ -220,14 +220,14 @@
 	 * pages with the data we have anyway...
 	 */
 	start_off = page_offset(pages[full_page]);
-	end_off = min_t(loff_t, start_off + PAGE_CACHE_SIZE, inode->i_size);
+	end_off = min_t(loff_t, start_off + PAGE_SIZE, inode->i_size);
 
 	cstart_block = start_off >> zisofs_block_shift;
 	cend_block = (end_off + (1 << zisofs_block_shift) - 1)
 			>> zisofs_block_shift;
 
-	WARN_ON(start_off - (full_page << PAGE_CACHE_SHIFT) !=
-		((cstart_block << zisofs_block_shift) & PAGE_CACHE_MASK));
+	WARN_ON(start_off - (full_page << PAGE_SHIFT) !=
+		((cstart_block << zisofs_block_shift) & PAGE_MASK));
 
 	/* Find the pointer to this specific chunk */
 	/* Note: we're not using isonum_731() here because the data is known aligned */
@@ -260,10 +260,10 @@
 		ret = zisofs_uncompress_block(inode, block_start, block_end,
 					      pcount, pages, poffset, &err);
 		poffset += ret;
-		pages += poffset >> PAGE_CACHE_SHIFT;
-		pcount -= poffset >> PAGE_CACHE_SHIFT;
-		full_page -= poffset >> PAGE_CACHE_SHIFT;
-		poffset &= ~PAGE_CACHE_MASK;
+		pages += poffset >> PAGE_SHIFT;
+		pcount -= poffset >> PAGE_SHIFT;
+		full_page -= poffset >> PAGE_SHIFT;
+		poffset &= ~PAGE_MASK;
 
 		if (err) {
 			brelse(bh);
@@ -282,7 +282,7 @@
 
 	if (poffset && *pages) {
 		memset(page_address(*pages) + poffset, 0,
-		       PAGE_CACHE_SIZE - poffset);
+		       PAGE_SIZE - poffset);
 		flush_dcache_page(*pages);
 		SetPageUptodate(*pages);
 	}
@@ -302,12 +302,12 @@
 	int i, pcount, full_page;
 	unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1];
 	unsigned int zisofs_pages_per_cblock =
-		PAGE_CACHE_SHIFT <= zisofs_block_shift ?
-		(1 << (zisofs_block_shift - PAGE_CACHE_SHIFT)) : 0;
+		PAGE_SHIFT <= zisofs_block_shift ?
+		(1 << (zisofs_block_shift - PAGE_SHIFT)) : 0;
 	struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)];
 	pgoff_t index = page->index, end_index;
 
-	end_index = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+	end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	/*
 	 * If this page is wholly outside i_size we just return zero;
 	 * do_generic_file_read() will handle this for us
@@ -318,7 +318,7 @@
 		return 0;
 	}
 
-	if (PAGE_CACHE_SHIFT <= zisofs_block_shift) {
+	if (PAGE_SHIFT <= zisofs_block_shift) {
 		/* We have already been given one page, this is the one
 		   we must do. */
 		full_page = index & (zisofs_pages_per_cblock - 1);
@@ -351,7 +351,7 @@
 			kunmap(pages[i]);
 			unlock_page(pages[i]);
 			if (i != full_page)
-				page_cache_release(pages[i]);
+				put_page(pages[i]);
 		}
 	}			
 
diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c
index b943cbd..e759961 100644
--- a/fs/isofs/dir.c
+++ b/fs/isofs/dir.c
@@ -58,7 +58,7 @@
 	std = sizeof(struct iso_directory_record) + de->name_len[0];
 	if (std & 1)
 		std++;
-	if ((*((unsigned char *) de) - std) != 32)
+	if (de->length[0] - std != 32)
 		return retnamlen;
 	chr = ((unsigned char *) de) + std;
 	if (strncmp(chr, "ARCHIMEDES", 10))
@@ -269,7 +269,7 @@
 {
 	.llseek = generic_file_llseek,
 	.read = generic_read_dir,
-	.iterate = isofs_readdir,
+	.iterate_shared = isofs_readdir,
 };
 
 /*
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index bcd2d41..131dedc 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -1021,7 +1021,7 @@
 		 * the page with useless information without generating any
 		 * I/O errors.
 		 */
-		if (b_off > ((inode->i_size + PAGE_CACHE_SIZE - 1) >> ISOFS_BUFFER_BITS(inode))) {
+		if (b_off > ((inode->i_size + PAGE_SIZE - 1) >> ISOFS_BUFFER_BITS(inode))) {
 			printk(KERN_DEBUG "%s: block >= EOF (%lu, %llu)\n",
 				__func__, b_off,
 				(unsigned long long)inode->i_size);
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index 5384ceb..98b3eb7 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -203,6 +203,8 @@
 	int retnamlen = 0;
 	int truncate = 0;
 	int ret = 0;
+	char *p;
+	int len;
 
 	if (!ISOFS_SB(inode->i_sb)->s_rock)
 		return 0;
@@ -267,12 +269,17 @@
 					rr->u.NM.flags);
 				break;
 			}
-			if ((strlen(retname) + rr->len - 5) >= 254) {
+			len = rr->len - 5;
+			if (retnamlen + len >= 254) {
 				truncate = 1;
 				break;
 			}
-			strncat(retname, rr->u.NM.name, rr->len - 5);
-			retnamlen += rr->len - 5;
+			p = memchr(rr->u.NM.name, '\0', len);
+			if (unlikely(p))
+				len = p - rr->u.NM.name;
+			memcpy(retname + retnamlen, rr->u.NM.name, len);
+			retnamlen += len;
+			retname[retnamlen] = '\0';
 			break;
 		case SIG('R', 'E'):
 			kfree(rs.buffer);
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 517f2de..2ad98d6 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -81,11 +81,11 @@
 	if (!trylock_page(page))
 		goto nope;
 
-	page_cache_get(page);
+	get_page(page);
 	__brelse(bh);
 	try_to_free_buffers(page);
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 	return;
 
 nope:
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index de73a95..435f0b2 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -2221,7 +2221,7 @@
 
 int jbd2_journal_blocks_per_page(struct inode *inode)
 {
-	return 1 << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
+	return 1 << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
 }
 
 /*
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 08a456b..805bc6b 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -303,7 +303,7 @@
  * Locate any valid recovery information from the journal and set up the
  * journal structures in memory to ignore it (presumably because the
  * caller has evidence that it is out of date).
- * This function does'nt appear to be exorted..
+ * This function doesn't appear to be exported..
  *
  * We perform one pass over the journal to allow us to tell the user how
  * much recovery information is being erased, and to let us initialise
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 01e4652d..2c56c3e 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -543,7 +543,7 @@
  *
  * Some transactions, such as large extends and truncates, can be done
  * atomically all at once or in several stages.  The operation requests
- * a credit for a number of buffer modications in advance, but can
+ * a credit for a number of buffer modifications in advance, but can
  * extend its credit if it needs more.
  *
  * jbd2_journal_extend tries to give the running handle more buffer credits.
@@ -627,7 +627,7 @@
  * If the jbd2_journal_extend() call above fails to grant new buffer credits
  * to a running handle, a call to jbd2_journal_restart will commit the
  * handle's transaction so far and reattach the handle to a new
- * transaction capabable of guaranteeing the requested number of
+ * transaction capable of guaranteeing the requested number of
  * credits. We preserve reserved handle if there's any attached to the
  * passed in handle.
  */
@@ -1586,7 +1586,7 @@
 
 /**
  * int jbd2_journal_stop() - complete a transaction
- * @handle: tranaction to complete.
+ * @handle: transaction to complete.
  *
  * All done for a particular handle.
  *
@@ -2263,7 +2263,7 @@
 	struct buffer_head *head, *bh, *next;
 	unsigned int stop = offset + length;
 	unsigned int curr_off = 0;
-	int partial_page = (offset || length < PAGE_CACHE_SIZE);
+	int partial_page = (offset || length < PAGE_SIZE);
 	int may_free = 1;
 	int ret = 0;
 
@@ -2272,7 +2272,7 @@
 	if (!page_has_buffers(page))
 		return 0;
 
-	BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
+	BUG_ON(stop > PAGE_SIZE || stop < length);
 
 	/* We will potentially be playing with lists other than just the
 	 * data lists (especially for journaled data mode), so be
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index 2f7a3c0..bc2693d 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -203,8 +203,6 @@
 		acl = ERR_PTR(rc);
 	}
 	kfree(value);
-	if (!IS_ERR(acl))
-		set_cached_acl(inode, type, acl);
 	return acl;
 }
 
diff --git a/fs/jffs2/debug.c b/fs/jffs2/debug.c
index 1090eb6..9d26b1b9 100644
--- a/fs/jffs2/debug.c
+++ b/fs/jffs2/debug.c
@@ -95,15 +95,15 @@
 			   rather than mucking around with actually reading the node
 			   and checking the compression type, which is the real way
 			   to tell a hole node. */
-			if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag)
-					&& frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) {
+			if (frag->ofs & (PAGE_SIZE-1) && frag_prev(frag)
+					&& frag_prev(frag)->size < PAGE_SIZE && frag_prev(frag)->node) {
 				JFFS2_ERROR("REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2.\n",
 					ref_offset(fn->raw));
 				bitched = 1;
 			}
 
-			if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag)
-					&& frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) {
+			if ((frag->ofs+frag->size) & (PAGE_SIZE-1) && frag_next(frag)
+					&& frag_next(frag)->size < PAGE_SIZE && frag_next(frag)->node) {
 				JFFS2_ERROR("REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2.\n",
 				       ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size);
 				bitched = 1;
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 30c4c9e..84c4bf3 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -40,7 +40,7 @@
 const struct file_operations jffs2_dir_operations =
 {
 	.read =		generic_read_dir,
-	.iterate =	jffs2_readdir,
+	.iterate_shared=jffs2_readdir,
 	.unlocked_ioctl=jffs2_ioctl,
 	.fsync =	jffs2_fsync,
 	.llseek =	generic_file_llseek,
@@ -241,7 +241,7 @@
 
 static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct dentry *dentry)
 {
-	struct jffs2_sb_info *c = JFFS2_SB_INFO(d_inode(old_dentry)->i_sb);
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dentry->d_sb);
 	struct jffs2_inode_info *f = JFFS2_INODE_INFO(d_inode(old_dentry));
 	struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i);
 	int ret;
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index cad86ba..0e62dec 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -87,14 +87,15 @@
 	int ret;
 
 	jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n",
-		  __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT);
+		  __func__, inode->i_ino, pg->index << PAGE_SHIFT);
 
 	BUG_ON(!PageLocked(pg));
 
 	pg_buf = kmap(pg);
 	/* FIXME: Can kmap fail? */
 
-	ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE);
+	ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_SHIFT,
+				     PAGE_SIZE);
 
 	if (ret) {
 		ClearPageUptodate(pg);
@@ -137,8 +138,8 @@
 	struct page *pg;
 	struct inode *inode = mapping->host;
 	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
-	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
-	uint32_t pageofs = index << PAGE_CACHE_SHIFT;
+	pgoff_t index = pos >> PAGE_SHIFT;
+	uint32_t pageofs = index << PAGE_SHIFT;
 	int ret = 0;
 
 	pg = grab_cache_page_write_begin(mapping, index, flags);
@@ -230,7 +231,7 @@
 
 out_page:
 	unlock_page(pg);
-	page_cache_release(pg);
+	put_page(pg);
 	return ret;
 }
 
@@ -245,14 +246,14 @@
 	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
 	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
 	struct jffs2_raw_inode *ri;
-	unsigned start = pos & (PAGE_CACHE_SIZE - 1);
+	unsigned start = pos & (PAGE_SIZE - 1);
 	unsigned end = start + copied;
 	unsigned aligned_start = start & ~3;
 	int ret = 0;
 	uint32_t writtenlen = 0;
 
 	jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
-		  __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT,
+		  __func__, inode->i_ino, pg->index << PAGE_SHIFT,
 		  start, end, pg->flags);
 
 	/* We need to avoid deadlock with page_cache_read() in
@@ -261,7 +262,7 @@
 	   to re-lock it. */
 	BUG_ON(!PageUptodate(pg));
 
-	if (end == PAGE_CACHE_SIZE) {
+	if (end == PAGE_SIZE) {
 		/* When writing out the end of a page, write out the
 		   _whole_ page. This helps to reduce the number of
 		   nodes in files which have many short writes, like
@@ -275,7 +276,7 @@
 		jffs2_dbg(1, "%s(): Allocation of raw inode failed\n",
 			  __func__);
 		unlock_page(pg);
-		page_cache_release(pg);
+		put_page(pg);
 		return -ENOMEM;
 	}
 
@@ -292,7 +293,7 @@
 	kmap(pg);
 
 	ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start,
-				      (pg->index << PAGE_CACHE_SHIFT) + aligned_start,
+				      (pg->index << PAGE_SHIFT) + aligned_start,
 				      end - aligned_start, &writtenlen);
 
 	kunmap(pg);
@@ -329,6 +330,6 @@
 	jffs2_dbg(1, "%s() returning %d\n",
 		  __func__, writtenlen > 0 ? writtenlen : ret);
 	unlock_page(pg);
-	page_cache_release(pg);
+	put_page(pg);
 	return writtenlen > 0 ? writtenlen : ret;
 }
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index bead25a..ae2ebb2 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -586,8 +586,8 @@
 		goto out_root;
 
 	sb->s_maxbytes = 0xFFFFFFFF;
-	sb->s_blocksize = PAGE_CACHE_SIZE;
-	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	sb->s_blocksize = PAGE_SIZE;
+	sb->s_blocksize_bits = PAGE_SHIFT;
 	sb->s_magic = JFFS2_SUPER_MAGIC;
 	if (!(sb->s_flags & MS_RDONLY))
 		jffs2_start_garbage_collect_thread(c);
@@ -685,7 +685,7 @@
 	struct inode *inode = OFNI_EDONI_2SFFJ(f);
 	struct page *pg;
 
-	pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
+	pg = read_cache_page(inode->i_mapping, offset >> PAGE_SHIFT,
 			     (void *)jffs2_do_readpage_unlock, inode);
 	if (IS_ERR(pg))
 		return (void *)pg;
@@ -701,7 +701,7 @@
 	struct page *pg = (void *)*priv;
 
 	kunmap(pg);
-	page_cache_release(pg);
+	put_page(pg);
 }
 
 static int jffs2_flash_setup(struct jffs2_sb_info *c) {
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 7e553f2..9ed0f26 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -552,7 +552,7 @@
 				goto upnout;
 		}
 		/* We found a datanode. Do the GC */
-		if((start >> PAGE_CACHE_SHIFT) < ((end-1) >> PAGE_CACHE_SHIFT)) {
+		if((start >> PAGE_SHIFT) < ((end-1) >> PAGE_SHIFT)) {
 			/* It crosses a page boundary. Therefore, it must be a hole. */
 			ret = jffs2_garbage_collect_hole(c, jeb, f, fn, start, end);
 		} else {
@@ -1192,8 +1192,8 @@
 		struct jffs2_node_frag *frag;
 		uint32_t min, max;
 
-		min = start & ~(PAGE_CACHE_SIZE-1);
-		max = min + PAGE_CACHE_SIZE;
+		min = start & ~(PAGE_SIZE-1);
+		max = min + PAGE_SIZE;
 
 		frag = jffs2_lookup_node_frag(&f->fragtree, start);
 
@@ -1351,7 +1351,7 @@
 		cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset);
 		datalen = end - offset;
 
-		writebuf = pg_ptr + (offset & (PAGE_CACHE_SIZE -1));
+		writebuf = pg_ptr + (offset & (PAGE_SIZE -1));
 
 		comprtype = jffs2_compress(c, f, writebuf, &comprbuf, &datalen, &cdatalen);
 
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c
index 9a5449b..b86c78d 100644
--- a/fs/jffs2/nodelist.c
+++ b/fs/jffs2/nodelist.c
@@ -90,7 +90,7 @@
 
 	/* If the last fragment starts at the RAM page boundary, it is
 	 * REF_PRISTINE irrespective of its size. */
-	if (frag->node && (frag->ofs & (PAGE_CACHE_SIZE - 1)) == 0) {
+	if (frag->node && (frag->ofs & (PAGE_SIZE - 1)) == 0) {
 		dbg_fragtree2("marking the last fragment 0x%08x-0x%08x REF_PRISTINE.\n",
 			frag->ofs, frag->ofs + frag->size);
 		frag->node->raw->flash_offset = ref_offset(frag->node->raw) | REF_PRISTINE;
@@ -237,7 +237,7 @@
 		   If so, both 'this' and the new node get marked REF_NORMAL so
 		   the GC can take a look.
 		*/
-		if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) {
+		if (lastend && (lastend-1) >> PAGE_SHIFT == newfrag->ofs >> PAGE_SHIFT) {
 			if (this->node)
 				mark_ref_normal(this->node->raw);
 			mark_ref_normal(newfrag->node->raw);
@@ -382,7 +382,7 @@
 
 	/* If we now share a page with other nodes, mark either previous
 	   or next node REF_NORMAL, as appropriate.  */
-	if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) {
+	if (newfrag->ofs & (PAGE_SIZE-1)) {
 		struct jffs2_node_frag *prev = frag_prev(newfrag);
 
 		mark_ref_normal(fn->raw);
@@ -391,7 +391,7 @@
 			mark_ref_normal(prev->node->raw);
 	}
 
-	if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) {
+	if ((newfrag->ofs+newfrag->size) & (PAGE_SIZE-1)) {
 		struct jffs2_node_frag *next = frag_next(newfrag);
 
 		if (next) {
diff --git a/fs/jffs2/security.c b/fs/jffs2/security.c
index 7a28fac..3ed9a4b4 100644
--- a/fs/jffs2/security.c
+++ b/fs/jffs2/security.c
@@ -49,10 +49,10 @@
 
 /* ---- XATTR Handler for "security.*" ----------------- */
 static int jffs2_security_getxattr(const struct xattr_handler *handler,
-				   struct dentry *dentry, const char *name,
-				   void *buffer, size_t size)
+				   struct dentry *unused, struct inode *inode,
+				   const char *name, void *buffer, size_t size)
 {
-	return do_jffs2_getxattr(d_inode(dentry), JFFS2_XPREFIX_SECURITY,
+	return do_jffs2_getxattr(inode, JFFS2_XPREFIX_SECURITY,
 				 name, buffer, size);
 }
 
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 0a9a114..5ef21f4 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -147,7 +147,7 @@
 	JFFS2_DEBUG("Parent of directory ino #%u is #%u\n",
 		    f->inocache->ino, pino);
 
-	return d_obtain_alias(jffs2_iget(d_inode(child)->i_sb, pino));
+	return d_obtain_alias(jffs2_iget(child->d_sb, pino));
 }
 
 static const struct export_operations jffs2_export_ops = {
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c
index b634de4c..7fb187a 100644
--- a/fs/jffs2/write.c
+++ b/fs/jffs2/write.c
@@ -172,8 +172,8 @@
 	   beginning of a page and runs to the end of the file, or if
 	   it's a hole node, mark it REF_PRISTINE, else REF_NORMAL.
 	*/
-	if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) ||
-	    ( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) &&
+	if ((je32_to_cpu(ri->dsize) >= PAGE_SIZE) ||
+	    ( ((je32_to_cpu(ri->offset)&(PAGE_SIZE-1))==0) &&
 	      (je32_to_cpu(ri->dsize)+je32_to_cpu(ri->offset) ==  je32_to_cpu(ri->isize)))) {
 		flash_ofs |= REF_PRISTINE;
 	} else {
@@ -366,7 +366,8 @@
 			break;
 		}
 		mutex_lock(&f->sem);
-		datalen = min_t(uint32_t, writelen, PAGE_CACHE_SIZE - (offset & (PAGE_CACHE_SIZE-1)));
+		datalen = min_t(uint32_t, writelen,
+				PAGE_SIZE - (offset & (PAGE_SIZE-1)));
 		cdatalen = min_t(uint32_t, alloclen - sizeof(*ri), datalen);
 
 		comprtype = jffs2_compress(c, f, buf, &comprbuf, &datalen, &cdatalen);
diff --git a/fs/jffs2/xattr_trusted.c b/fs/jffs2/xattr_trusted.c
index b2555ef..4ebecff 100644
--- a/fs/jffs2/xattr_trusted.c
+++ b/fs/jffs2/xattr_trusted.c
@@ -17,10 +17,10 @@
 #include "nodelist.h"
 
 static int jffs2_trusted_getxattr(const struct xattr_handler *handler,
-				  struct dentry *dentry, const char *name,
-				  void *buffer, size_t size)
+				  struct dentry *unused, struct inode *inode,
+				  const char *name, void *buffer, size_t size)
 {
-	return do_jffs2_getxattr(d_inode(dentry), JFFS2_XPREFIX_TRUSTED,
+	return do_jffs2_getxattr(inode, JFFS2_XPREFIX_TRUSTED,
 				 name, buffer, size);
 }
 
diff --git a/fs/jffs2/xattr_user.c b/fs/jffs2/xattr_user.c
index 539bd63..bce249e 100644
--- a/fs/jffs2/xattr_user.c
+++ b/fs/jffs2/xattr_user.c
@@ -17,10 +17,10 @@
 #include "nodelist.h"
 
 static int jffs2_user_getxattr(const struct xattr_handler *handler,
-			       struct dentry *dentry, const char *name,
-			       void *buffer, size_t size)
+			       struct dentry *unused, struct inode *inode,
+			       const char *name, void *buffer, size_t size)
 {
-	return do_jffs2_getxattr(d_inode(dentry), JFFS2_XPREFIX_USER,
+	return do_jffs2_getxattr(inode, JFFS2_XPREFIX_USER,
 				 name, buffer, size);
 }
 
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index 4945685..21fa92b 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -34,10 +34,6 @@
 	int size;
 	char *value = NULL;
 
-	acl = get_cached_acl(inode, type);
-	if (acl != ACL_NOT_CACHED)
-		return acl;
-
 	switch(type) {
 		case ACL_TYPE_ACCESS:
 			ea_name = XATTR_NAME_POSIX_ACL_ACCESS;
@@ -67,8 +63,6 @@
 		acl = posix_acl_from_xattr(&init_user_ns, value, size);
 	}
 	kfree(value);
-	if (!IS_ERR(acl))
-		set_cached_acl(inode, type, acl);
 	return acl;
 }
 
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index 4ce7735..7f1a585 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -140,10 +140,10 @@
 }
 
 const struct inode_operations jfs_file_inode_operations = {
-	.setxattr	= jfs_setxattr,
-	.getxattr	= jfs_getxattr,
+	.setxattr	= generic_setxattr,
+	.getxattr	= generic_getxattr,
 	.listxattr	= jfs_listxattr,
-	.removexattr	= jfs_removexattr,
+	.removexattr	= generic_removexattr,
 	.setattr	= jfs_setattr,
 #ifdef CONFIG_JFS_POSIX_ACL
 	.get_acl	= jfs_get_acl,
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 9d9bae6..ad3e7b1 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -102,8 +102,8 @@
 		 * partitions and may think inode is dirty
 		 */
 		if (!special_file(inode->i_mode) && noisy) {
-			jfs_err("jfs_commit_inode(0x%p) called on "
-				   "read-only volume", inode);
+			jfs_err("jfs_commit_inode(0x%p) called on read-only volume",
+				inode);
 			jfs_err("Is remount racy?");
 			noisy--;
 		}
@@ -332,8 +332,7 @@
 	return generic_block_bmap(mapping, block, jfs_get_block);
 }
 
-static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
-			     loff_t offset)
+static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct file *file = iocb->ki_filp;
 	struct address_space *mapping = file->f_mapping;
@@ -341,7 +340,7 @@
 	size_t count = iov_iter_count(iter);
 	ssize_t ret;
 
-	ret = blockdev_direct_IO(iocb, inode, iter, offset, jfs_get_block);
+	ret = blockdev_direct_IO(iocb, inode, iter, jfs_get_block);
 
 	/*
 	 * In case of error extending write may have instantiated a few
@@ -349,7 +348,7 @@
 	 */
 	if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
 		loff_t isize = i_size_read(inode);
-		loff_t end = offset + count;
+		loff_t end = iocb->ki_pos + count;
 
 		if (end > isize)
 			jfs_write_failed(mapping, end);
diff --git a/fs/jfs/jfs_discard.c b/fs/jfs/jfs_discard.c
index dfcd503..f76ff0a 100644
--- a/fs/jfs/jfs_discard.c
+++ b/fs/jfs/jfs_discard.c
@@ -49,14 +49,12 @@
 
 	r = sb_issue_discard(sb, blkno, nblocks, GFP_NOFS, 0);
 	if (unlikely(r != 0)) {
-		jfs_err("JFS: sb_issue_discard" \
-			"(%p, %llu, %llu, GFP_NOFS, 0) = %d => failed!\n",
+		jfs_err("JFS: sb_issue_discard(%p, %llu, %llu, GFP_NOFS, 0) = %d => failed!",
 			sb, (unsigned long long)blkno,
 			(unsigned long long)nblocks, r);
 	}
 
-	jfs_info("JFS: sb_issue_discard" \
-		"(%p, %llu, %llu, GFP_NOFS, 0) = %d\n",
+	jfs_info("JFS: sb_issue_discard(%p, %llu, %llu, GFP_NOFS, 0) = %d",
 		sb, (unsigned long long)blkno,
 		(unsigned long long)nblocks, r);
 
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index d88576e..de2bcb3 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -3072,8 +3072,7 @@
 			}
 			if (dirtab_slot.flag == DIR_INDEX_FREE) {
 				if (loop_count++ > JFS_IP(ip)->next_index) {
-					jfs_err("jfs_readdir detected "
-						   "infinite loop!");
+					jfs_err("jfs_readdir detected infinite loop!");
 					ctx->pos = DIREND;
 					return 0;
 				}
@@ -3151,8 +3150,7 @@
 				if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR))
 					return 0;
 			} else {
-				jfs_err("jfs_readdir called with "
-					"invalid offset!");
+				jfs_err("jfs_readdir called with invalid offset!");
 			}
 			dtoffset->pn = 1;
 			dtoffset->index = 0;
@@ -3165,8 +3163,8 @@
 		}
 
 		if ((rc = dtReadNext(ip, &ctx->pos, &btstack))) {
-			jfs_err("jfs_readdir: unexpected rc = %d "
-				"from dtReadNext", rc);
+			jfs_err("jfs_readdir: unexpected rc = %d from dtReadNext",
+				rc);
 			ctx->pos = DIREND;
 			return 0;
 		}
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index f321986..6aca224 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -534,8 +534,7 @@
 	/* read the page of fixed disk inode (AIT) in raw mode */
 	mp = read_metapage(ip, address << sbi->l2nbperpage, PSIZE, 1);
 	if (mp == NULL) {
-		jfs_err("diWriteSpecial: failed to read aggregate inode "
-			"extent!");
+		jfs_err("diWriteSpecial: failed to read aggregate inode extent!");
 		return;
 	}
 
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
index cf7936f..5e33cb9 100644
--- a/fs/jfs/jfs_inode.c
+++ b/fs/jfs/jfs_inode.c
@@ -151,7 +151,7 @@
 	jfs_inode->xtlid = 0;
 	jfs_set_inode_flags(inode);
 
-	jfs_info("ialloc returns inode = 0x%p\n", inode);
+	jfs_info("ialloc returns inode = 0x%p", inode);
 
 	return inode;
 
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index a270cb7..63759d7 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1094,7 +1094,7 @@
 		if (log->bdev->bd_dev == sbi->logdev) {
 			if (memcmp(log->uuid, sbi->loguuid,
 				   sizeof(log->uuid))) {
-				jfs_warn("wrong uuid on JFS journal\n");
+				jfs_warn("wrong uuid on JFS journal");
 				mutex_unlock(&jfs_log_mutex);
 				return -EINVAL;
 			}
@@ -1333,9 +1333,8 @@
 				rc = -EINVAL;
 				goto errout20;
 			}
-			jfs_info("lmLogInit: inline log:0x%p base:0x%Lx "
-				 "size:0x%x", log,
-				 (unsigned long long) log->base, log->size);
+			jfs_info("lmLogInit: inline log:0x%p base:0x%Lx size:0x%x",
+				 log, (unsigned long long)log->base, log->size);
 		} else {
 			if (memcmp(logsuper->uuid, log->uuid, 16)) {
 				jfs_warn("wrong uuid on JFS log device");
@@ -1343,9 +1342,8 @@
 			}
 			log->size = le32_to_cpu(logsuper->size);
 			log->l2bsize = le32_to_cpu(logsuper->l2bsize);
-			jfs_info("lmLogInit: external log:0x%p base:0x%Lx "
-				 "size:0x%x", log,
-				 (unsigned long long) log->base, log->size);
+			jfs_info("lmLogInit: external log:0x%p base:0x%Lx size:0x%x",
+				 log, (unsigned long long)log->base, log->size);
 		}
 
 		log->page = le32_to_cpu(logsuper->end) / LOGPSIZE;
@@ -2136,7 +2134,7 @@
 	struct bio *bio;
 	struct jfs_log *log = bp->l_log;
 
-	jfs_info("lbmStartIO\n");
+	jfs_info("lbmStartIO");
 
 	bio = bio_alloc(GFP_NOFS, 1);
 	bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index a3eb316..b60e015 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -80,7 +80,7 @@
 static struct kmem_cache *metapage_cache;
 static mempool_t *metapage_mempool;
 
-#define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE)
+#define MPS_PER_PAGE (PAGE_SIZE >> L2PSIZE)
 
 #if MPS_PER_PAGE > 1
 
@@ -316,7 +316,7 @@
 	struct metapage *mp;
 	unsigned int offset;
 
-	for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
+	for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
 		mp = page_to_mp(page, offset);
 		if (mp && test_bit(META_io, &mp->flag)) {
 			if (mp->lsn)
@@ -366,12 +366,12 @@
 	int bad_blocks = 0;
 
 	page_start = (sector_t)page->index <<
-		     (PAGE_CACHE_SHIFT - inode->i_blkbits);
+		     (PAGE_SHIFT - inode->i_blkbits);
 	BUG_ON(!PageLocked(page));
 	BUG_ON(PageWriteback(page));
 	set_page_writeback(page);
 
-	for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
+	for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
 		mp = page_to_mp(page, offset);
 
 		if (!mp || !test_bit(META_dirty, &mp->flag))
@@ -416,7 +416,7 @@
 			bio = NULL;
 		} else
 			inc_io(page);
-		xlen = (PAGE_CACHE_SIZE - offset) >> inode->i_blkbits;
+		xlen = (PAGE_SIZE - offset) >> inode->i_blkbits;
 		pblock = metapage_get_blocks(inode, lblock, &xlen);
 		if (!pblock) {
 			printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
@@ -485,7 +485,7 @@
 	struct inode *inode = page->mapping->host;
 	struct bio *bio = NULL;
 	int block_offset;
-	int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
+	int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
 	sector_t page_start;	/* address of page in fs blocks */
 	sector_t pblock;
 	int xlen;
@@ -494,7 +494,7 @@
 
 	BUG_ON(!PageLocked(page));
 	page_start = (sector_t)page->index <<
-		     (PAGE_CACHE_SHIFT - inode->i_blkbits);
+		     (PAGE_SHIFT - inode->i_blkbits);
 
 	block_offset = 0;
 	while (block_offset < blocks_per_page) {
@@ -542,7 +542,7 @@
 	int ret = 1;
 	int offset;
 
-	for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
+	for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
 		mp = page_to_mp(page, offset);
 
 		if (!mp)
@@ -568,7 +568,7 @@
 static void metapage_invalidatepage(struct page *page, unsigned int offset,
 				    unsigned int length)
 {
-	BUG_ON(offset || length < PAGE_CACHE_SIZE);
+	BUG_ON(offset || length < PAGE_SIZE);
 
 	BUG_ON(PageWriteback(page));
 
@@ -599,10 +599,10 @@
 		 inode->i_ino, lblock, absolute);
 
 	l2bsize = inode->i_blkbits;
-	l2BlocksPerPage = PAGE_CACHE_SHIFT - l2bsize;
+	l2BlocksPerPage = PAGE_SHIFT - l2bsize;
 	page_index = lblock >> l2BlocksPerPage;
 	page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
-	if ((page_offset + size) > PAGE_CACHE_SIZE) {
+	if ((page_offset + size) > PAGE_SIZE) {
 		jfs_err("MetaData crosses page boundary!!");
 		jfs_err("lblock = %lx, size  = %d", lblock, size);
 		dump_stack();
@@ -621,7 +621,7 @@
 		mapping = inode->i_mapping;
 	}
 
-	if (new && (PSIZE == PAGE_CACHE_SIZE)) {
+	if (new && (PSIZE == PAGE_SIZE)) {
 		page = grab_cache_page(mapping, page_index);
 		if (!page) {
 			jfs_err("grab_cache_page failed!");
@@ -693,7 +693,7 @@
 void grab_metapage(struct metapage * mp)
 {
 	jfs_info("grab_metapage: mp = 0x%p", mp);
-	page_cache_get(mp->page);
+	get_page(mp->page);
 	lock_page(mp->page);
 	mp->count++;
 	lock_metapage(mp);
@@ -706,12 +706,12 @@
 	jfs_info("force_metapage: mp = 0x%p", mp);
 	set_bit(META_forcewrite, &mp->flag);
 	clear_bit(META_sync, &mp->flag);
-	page_cache_get(page);
+	get_page(page);
 	lock_page(page);
 	set_page_dirty(page);
 	write_one_page(page, 1);
 	clear_bit(META_forcewrite, &mp->flag);
-	page_cache_release(page);
+	put_page(page);
 }
 
 void hold_metapage(struct metapage *mp)
@@ -726,7 +726,7 @@
 		unlock_page(mp->page);
 		return;
 	}
-	page_cache_get(mp->page);
+	get_page(mp->page);
 	mp->count++;
 	lock_metapage(mp);
 	unlock_page(mp->page);
@@ -746,7 +746,7 @@
 	assert(mp->count);
 	if (--mp->count || mp->nohomeok) {
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		return;
 	}
 
@@ -764,13 +764,13 @@
 	drop_metapage(page, mp);
 
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 }
 
 void __invalidate_metapages(struct inode *ip, s64 addr, int len)
 {
 	sector_t lblock;
-	int l2BlocksPerPage = PAGE_CACHE_SHIFT - ip->i_blkbits;
+	int l2BlocksPerPage = PAGE_SHIFT - ip->i_blkbits;
 	int BlocksPerPage = 1 << l2BlocksPerPage;
 	/* All callers are interested in block device's mapping */
 	struct address_space *mapping =
@@ -788,7 +788,7 @@
 		page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
 		if (!page)
 			continue;
-		for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
+		for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
 			mp = page_to_mp(page, offset);
 			if (!mp)
 				continue;
@@ -803,7 +803,7 @@
 				remove_from_logsync(mp);
 		}
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 }
 
diff --git a/fs/jfs/jfs_metapage.h b/fs/jfs/jfs_metapage.h
index 337e9e5..a869fb4 100644
--- a/fs/jfs/jfs_metapage.h
+++ b/fs/jfs/jfs_metapage.h
@@ -106,7 +106,7 @@
 	lock_page(page);
 	if (!mp->nohomeok++) {
 		mark_metapage_dirty(mp);
-		page_cache_get(page);
+		get_page(page);
 		wait_on_page_writeback(page);
 	}
 	unlock_page(page);
@@ -128,7 +128,7 @@
 static inline void _metapage_homeok(struct metapage *mp)
 {
 	if (!--mp->nohomeok)
-		page_cache_release(mp->page);
+		put_page(mp->page);
 }
 
 static inline void metapage_homeok(struct metapage *mp)
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index d595856..eddf2b6 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -1764,7 +1764,7 @@
 		if (lwm == next)
 			goto out;
 		if (lwm > next) {
-			jfs_err("xtLog: lwm > next\n");
+			jfs_err("xtLog: lwm > next");
 			goto out;
 		}
 		tlck->flag |= tlckUPDATEMAP;
@@ -1798,8 +1798,8 @@
 			xadlock->xdlist = &p->xad[lwm];
 			tblk->xflag &= ~COMMIT_LAZY;
 		}
-		jfs_info("xtLog: alloc ip:0x%p mp:0x%p tlck:0x%p lwm:%d "
-			 "count:%d", tlck->ip, mp, tlck, lwm, xadlock->count);
+		jfs_info("xtLog: alloc ip:0x%p mp:0x%p tlck:0x%p lwm:%d count:%d",
+			 tlck->ip, mp, tlck, lwm, xadlock->count);
 
 		maplock->index = 1;
 
@@ -2025,8 +2025,7 @@
 			xadlock->count = next - lwm;
 			xadlock->xdlist = &p->xad[lwm];
 
-			jfs_info("xtLog: alloc ip:0x%p mp:0x%p count:%d "
-				 "lwm:%d next:%d",
+			jfs_info("xtLog: alloc ip:0x%p mp:0x%p count:%d lwm:%d next:%d",
 				 tlck->ip, mp, xadlock->count, lwm, next);
 			maplock->index++;
 			xadlock++;
@@ -2047,8 +2046,8 @@
 			pxdlock->count = 1;
 			pxdlock->pxd = pxd;
 
-			jfs_info("xtLog: truncate ip:0x%p mp:0x%p count:%d "
-				 "hwm:%d", ip, mp, pxdlock->count, hwm);
+			jfs_info("xtLog: truncate ip:0x%p mp:0x%p count:%d hwm:%d",
+				 ip, mp, pxdlock->count, hwm);
 			maplock->index++;
 			xadlock++;
 		}
@@ -2066,8 +2065,7 @@
 			xadlock->count = hwm - next + 1;
 			xadlock->xdlist = &p->xad[next];
 
-			jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d "
-				 "next:%d hwm:%d",
+			jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d next:%d hwm:%d",
 				 tlck->ip, mp, xadlock->count, next, hwm);
 			maplock->index++;
 		}
@@ -2523,8 +2521,7 @@
 					xlen = lengthXAD(xad);
 					dbUpdatePMap(ipbmap, true, xaddr,
 						     (s64) xlen, tblk);
-					jfs_info("freePMap: xaddr:0x%lx "
-						 "xlen:%d",
+					jfs_info("freePMap: xaddr:0x%lx xlen:%d",
 						 (ulong) xaddr, xlen);
 				}
 			}
@@ -2814,7 +2811,7 @@
 	if (!list_empty(&TxAnchor.unlock_queue))
 		jfs_err("jfs_lazycommit being killed w/pending transactions!");
 	else
-		jfs_info("jfs_lazycommit being killed\n");
+		jfs_info("jfs_lazycommit being killed");
 	return 0;
 }
 
diff --git a/fs/jfs/jfs_xattr.h b/fs/jfs/jfs_xattr.h
index e8d717d..561f6af 100644
--- a/fs/jfs/jfs_xattr.h
+++ b/fs/jfs/jfs_xattr.h
@@ -19,6 +19,8 @@
 #ifndef H_JFS_XATTR
 #define H_JFS_XATTR
 
+#include <linux/xattr.h>
+
 /*
  * jfs_ea_list describe the on-disk format of the extended attributes.
  * I know the null-terminator is redundant since namelen is stored, but
@@ -54,12 +56,8 @@
 
 extern int __jfs_setxattr(tid_t, struct inode *, const char *, const void *,
 			  size_t, int);
-extern int jfs_setxattr(struct dentry *, const char *, const void *, size_t,
-			int);
 extern ssize_t __jfs_getxattr(struct inode *, const char *, void *, size_t);
-extern ssize_t jfs_getxattr(struct dentry *, const char *, void *, size_t);
 extern ssize_t jfs_listxattr(struct dentry *, char *, size_t);
-extern int jfs_removexattr(struct dentry *, const char *);
 
 extern const struct xattr_handler *jfs_xattr_handlers[];
 
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 701f893..539dedd 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -1225,8 +1225,8 @@
 		rc = dtSearch(new_dir, &new_dname, &ino, &btstack,
 			      JFS_CREATE);
 		if (rc) {
-			jfs_err("jfs_rename didn't expect dtSearch to fail "
-				"w/rc = %d", rc);
+			jfs_err("jfs_rename didn't expect dtSearch to fail w/rc = %d",
+				rc);
 			goto out_tx;
 		}
 
@@ -1524,7 +1524,7 @@
 	parent_ino =
 		le32_to_cpu(JFS_IP(d_inode(dentry))->i_dtroot.header.idotdot);
 
-	return d_obtain_alias(jfs_iget(d_inode(dentry)->i_sb, parent_ino));
+	return d_obtain_alias(jfs_iget(dentry->d_sb, parent_ino));
 }
 
 const struct inode_operations jfs_dir_inode_operations = {
@@ -1537,10 +1537,10 @@
 	.rmdir		= jfs_rmdir,
 	.mknod		= jfs_mknod,
 	.rename		= jfs_rename,
-	.setxattr	= jfs_setxattr,
-	.getxattr	= jfs_getxattr,
+	.setxattr	= generic_setxattr,
+	.getxattr	= generic_getxattr,
 	.listxattr	= jfs_listxattr,
-	.removexattr	= jfs_removexattr,
+	.removexattr	= generic_removexattr,
 	.setattr	= jfs_setattr,
 #ifdef CONFIG_JFS_POSIX_ACL
 	.get_acl	= jfs_get_acl,
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 4f5d85b..cec8814 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -84,7 +84,7 @@
 		panic("JFS (device %s): panic forced after error\n",
 			sb->s_id);
 	else if (sbi->flag & JFS_ERR_REMOUNT_RO) {
-		jfs_err("ERROR: (device %s): remounting filesystem as read-only\n",
+		jfs_err("ERROR: (device %s): remounting filesystem as read-only",
 			sb->s_id);
 		sb->s_flags |= MS_RDONLY;
 	}
@@ -596,7 +596,7 @@
 	 * Page cache is indexed by long.
 	 * I would use MAX_LFS_FILESIZE, but it's only half as big
 	 */
-	sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1,
+	sb->s_maxbytes = min(((u64) PAGE_SIZE << 32) - 1,
 			     (u64)sb->s_maxbytes);
 #endif
 	sb->s_time_gran = 1;
@@ -641,7 +641,7 @@
 		}
 		rc = updateSuper(sb, FM_CLEAN);
 		if (rc) {
-			jfs_err("jfs_freeze: updateSuper failed\n");
+			jfs_err("jfs_freeze: updateSuper failed");
 			/*
 			 * Don't fail here. Everything succeeded except
 			 * marking the superblock clean, so there's really
diff --git a/fs/jfs/symlink.c b/fs/jfs/symlink.c
index f8db4fd..c94c7e4 100644
--- a/fs/jfs/symlink.c
+++ b/fs/jfs/symlink.c
@@ -25,19 +25,19 @@
 	.readlink	= generic_readlink,
 	.get_link	= simple_get_link,
 	.setattr	= jfs_setattr,
-	.setxattr	= jfs_setxattr,
-	.getxattr	= jfs_getxattr,
+	.setxattr	= generic_setxattr,
+	.getxattr	= generic_getxattr,
 	.listxattr	= jfs_listxattr,
-	.removexattr	= jfs_removexattr,
+	.removexattr	= generic_removexattr,
 };
 
 const struct inode_operations jfs_symlink_inode_operations = {
 	.readlink	= generic_readlink,
 	.get_link	= page_get_link,
 	.setattr	= jfs_setattr,
-	.setxattr	= jfs_setxattr,
-	.getxattr	= jfs_getxattr,
+	.setxattr	= generic_setxattr,
+	.getxattr	= generic_getxattr,
 	.listxattr	= jfs_listxattr,
-	.removexattr	= jfs_removexattr,
+	.removexattr	= generic_removexattr,
 };
 
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 48b15a6..beb182b 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -86,6 +86,14 @@
 #define EA_MALLOC	0x0008
 
 
+/*
+ * Mapping of on-disk attribute names: for on-disk attribute names with an
+ * unknown prefix (not "system.", "user.", "security.", or "trusted."), the
+ * prefix "os2." is prepended.  On the way back to disk, "os2." prefixes are
+ * stripped and we make sure that the remaining name does not start with one
+ * of the know prefixes.
+ */
+
 static int is_known_namespace(const char *name)
 {
 	if (strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) &&
@@ -97,29 +105,19 @@
 	return true;
 }
 
-/*
- * These three routines are used to recognize on-disk extended attributes
- * that are in a recognized namespace.  If the attribute is not recognized,
- * "os2." is prepended to the name
- */
-static int is_os2_xattr(struct jfs_ea *ea)
-{
-	return !is_known_namespace(ea->name);
-}
-
 static inline int name_size(struct jfs_ea *ea)
 {
-	if (is_os2_xattr(ea))
-		return ea->namelen + XATTR_OS2_PREFIX_LEN;
-	else
+	if (is_known_namespace(ea->name))
 		return ea->namelen;
+	else
+		return ea->namelen + XATTR_OS2_PREFIX_LEN;
 }
 
 static inline int copy_name(char *buffer, struct jfs_ea *ea)
 {
 	int len = ea->namelen;
 
-	if (is_os2_xattr(ea)) {
+	if (!is_known_namespace(ea->name)) {
 		memcpy(buffer, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN);
 		buffer += XATTR_OS2_PREFIX_LEN;
 		len += XATTR_OS2_PREFIX_LEN;
@@ -665,35 +663,6 @@
 	return 0;
 }
 
-/*
- * Most of the permission checking is done by xattr_permission in the vfs.
- * We also need to verify that this is a namespace that we recognize.
- */
-static int can_set_xattr(struct inode *inode, const char *name,
-			 const void *value, size_t value_len)
-{
-	if (!strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN)) {
-		/*
-		 * This makes sure that we aren't trying to set an
-		 * attribute in a different namespace by prefixing it
-		 * with "os2."
-		 */
-		if (is_known_namespace(name + XATTR_OS2_PREFIX_LEN))
-			return -EOPNOTSUPP;
-		return 0;
-	}
-
-	/*
-	 * Don't allow setting an attribute in an unknown namespace.
-	 */
-	if (strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) &&
-	    strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
-	    strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
-		return -EOPNOTSUPP;
-
-	return 0;
-}
-
 int __jfs_setxattr(tid_t tid, struct inode *inode, const char *name,
 		   const void *value, size_t value_len, int flags)
 {
@@ -704,21 +673,10 @@
 	int xattr_size;
 	int new_size;
 	int namelen = strlen(name);
-	char *os2name = NULL;
 	int found = 0;
 	int rc;
 	int length;
 
-	if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
-		os2name = kmalloc(namelen - XATTR_OS2_PREFIX_LEN + 1,
-				  GFP_KERNEL);
-		if (!os2name)
-			return -ENOMEM;
-		strcpy(os2name, name + XATTR_OS2_PREFIX_LEN);
-		name = os2name;
-		namelen -= XATTR_OS2_PREFIX_LEN;
-	}
-
 	down_write(&JFS_IP(inode)->xattr_sem);
 
 	xattr_size = ea_get(inode, &ea_buf, 0);
@@ -841,44 +799,6 @@
       out:
 	up_write(&JFS_IP(inode)->xattr_sem);
 
-	kfree(os2name);
-
-	return rc;
-}
-
-int jfs_setxattr(struct dentry *dentry, const char *name, const void *value,
-		 size_t value_len, int flags)
-{
-	struct inode *inode = d_inode(dentry);
-	struct jfs_inode_info *ji = JFS_IP(inode);
-	int rc;
-	tid_t tid;
-
-	/*
-	 * If this is a request for a synthetic attribute in the system.*
-	 * namespace use the generic infrastructure to resolve a handler
-	 * for it via sb->s_xattr.
-	 */
-	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
-		return generic_setxattr(dentry, name, value, value_len, flags);
-
-	if ((rc = can_set_xattr(inode, name, value, value_len)))
-		return rc;
-
-	if (value == NULL) {	/* empty EA, do not remove */
-		value = "";
-		value_len = 0;
-	}
-
-	tid = txBegin(inode->i_sb, 0);
-	mutex_lock(&ji->commit_mutex);
-	rc = __jfs_setxattr(tid, d_inode(dentry), name, value, value_len,
-			    flags);
-	if (!rc)
-		rc = txCommit(tid, 1, &inode, 0);
-	txEnd(tid);
-	mutex_unlock(&ji->commit_mutex);
-
 	return rc;
 }
 
@@ -933,37 +853,6 @@
 	return size;
 }
 
-ssize_t jfs_getxattr(struct dentry *dentry, const char *name, void *data,
-		     size_t buf_size)
-{
-	int err;
-
-	/*
-	 * If this is a request for a synthetic attribute in the system.*
-	 * namespace use the generic infrastructure to resolve a handler
-	 * for it via sb->s_xattr.
-	 */
-	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
-		return generic_getxattr(dentry, name, data, buf_size);
-
-	if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
-		/*
-		 * skip past "os2." prefix
-		 */
-		name += XATTR_OS2_PREFIX_LEN;
-		/*
-		 * Don't allow retrieving properly prefixed attributes
-		 * by prepending them with "os2."
-		 */
-		if (is_known_namespace(name))
-			return -EOPNOTSUPP;
-	}
-
-	err = __jfs_getxattr(d_inode(dentry), name, data, buf_size);
-
-	return err;
-}
-
 /*
  * No special permissions are needed to list attributes except for trusted.*
  */
@@ -1027,27 +916,16 @@
 	return size;
 }
 
-int jfs_removexattr(struct dentry *dentry, const char *name)
+static int __jfs_xattr_set(struct inode *inode, const char *name,
+			   const void *value, size_t size, int flags)
 {
-	struct inode *inode = d_inode(dentry);
 	struct jfs_inode_info *ji = JFS_IP(inode);
-	int rc;
 	tid_t tid;
-
-	/*
-	 * If this is a request for a synthetic attribute in the system.*
-	 * namespace use the generic infrastructure to resolve a handler
-	 * for it via sb->s_xattr.
-	 */
-	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
-		return generic_removexattr(dentry, name);
-
-	if ((rc = can_set_xattr(inode, name, NULL, 0)))
-		return rc;
+	int rc;
 
 	tid = txBegin(inode->i_sb, 0);
 	mutex_lock(&ji->commit_mutex);
-	rc = __jfs_setxattr(tid, d_inode(dentry), name, NULL, 0, XATTR_REPLACE);
+	rc = __jfs_setxattr(tid, inode, name, value, size, flags);
 	if (!rc)
 		rc = txCommit(tid, 1, &inode, 0);
 	txEnd(tid);
@@ -1056,15 +934,77 @@
 	return rc;
 }
 
-/*
- * List of handlers for synthetic system.* attributes.  All real ondisk
- * attributes are handled directly.
- */
+static int jfs_xattr_get(const struct xattr_handler *handler,
+			 struct dentry *unused, struct inode *inode,
+			 const char *name, void *value, size_t size)
+{
+	name = xattr_full_name(handler, name);
+	return __jfs_getxattr(inode, name, value, size);
+}
+
+static int jfs_xattr_set(const struct xattr_handler *handler,
+			 struct dentry *dentry, const char *name,
+			 const void *value, size_t size, int flags)
+{
+	struct inode *inode = d_inode(dentry);
+
+	name = xattr_full_name(handler, name);
+	return __jfs_xattr_set(inode, name, value, size, flags);
+}
+
+static int jfs_xattr_get_os2(const struct xattr_handler *handler,
+			     struct dentry *unused, struct inode *inode,
+			     const char *name, void *value, size_t size)
+{
+	if (is_known_namespace(name))
+		return -EOPNOTSUPP;
+	return __jfs_getxattr(inode, name, value, size);
+}
+
+static int jfs_xattr_set_os2(const struct xattr_handler *handler,
+			     struct dentry *dentry, const char *name,
+			     const void *value, size_t size, int flags)
+{
+	struct inode *inode = d_inode(dentry);
+
+	if (is_known_namespace(name))
+		return -EOPNOTSUPP;
+	return __jfs_xattr_set(inode, name, value, size, flags);
+}
+
+static const struct xattr_handler jfs_user_xattr_handler = {
+	.prefix = XATTR_USER_PREFIX,
+	.get = jfs_xattr_get,
+	.set = jfs_xattr_set,
+};
+
+static const struct xattr_handler jfs_os2_xattr_handler = {
+	.prefix = XATTR_OS2_PREFIX,
+	.get = jfs_xattr_get_os2,
+	.set = jfs_xattr_set_os2,
+};
+
+static const struct xattr_handler jfs_security_xattr_handler = {
+	.prefix = XATTR_SECURITY_PREFIX,
+	.get = jfs_xattr_get,
+	.set = jfs_xattr_set,
+};
+
+static const struct xattr_handler jfs_trusted_xattr_handler = {
+	.prefix = XATTR_TRUSTED_PREFIX,
+	.get = jfs_xattr_get,
+	.set = jfs_xattr_set,
+};
+
 const struct xattr_handler *jfs_xattr_handlers[] = {
 #ifdef CONFIG_JFS_POSIX_ACL
 	&posix_acl_access_xattr_handler,
 	&posix_acl_default_xattr_handler,
 #endif
+	&jfs_os2_xattr_handler,
+	&jfs_user_xattr_handler,
+	&jfs_security_xattr_handler,
+	&jfs_trusted_xattr_handler,
 	NULL,
 };
 
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 03b688d..68a4431 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -153,9 +153,9 @@
 	p = buf + len + nlen;
 	*p = '\0';
 	for (kn = kn_to; kn != common; kn = kn->parent) {
-		nlen = strlen(kn->name);
-		p -= nlen;
-		memcpy(p, kn->name, nlen);
+		size_t tmp = strlen(kn->name);
+		p -= tmp;
+		memcpy(p, kn->name, tmp);
 		*(--p) = '/';
 	}
 
@@ -1643,22 +1643,9 @@
 	return 0;
 }
 
-static loff_t kernfs_dir_fop_llseek(struct file *file, loff_t offset,
-				    int whence)
-{
-	struct inode *inode = file_inode(file);
-	loff_t ret;
-
-	inode_lock(inode);
-	ret = generic_file_llseek(file, offset, whence);
-	inode_unlock(inode);
-
-	return ret;
-}
-
 const struct file_operations kernfs_dir_fops = {
 	.read		= generic_read_dir,
-	.iterate	= kernfs_fop_readdir,
+	.iterate_shared	= kernfs_fop_readdir,
 	.release	= kernfs_dir_fop_release,
-	.llseek		= kernfs_dir_fop_llseek,
+	.llseek		= generic_file_llseek,
 };
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index 16405ae..b524722 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -208,10 +208,10 @@
 	return simple_xattr_set(&attrs->xattrs, name, NULL, 0, XATTR_REPLACE);
 }
 
-ssize_t kernfs_iop_getxattr(struct dentry *dentry, const char *name, void *buf,
-			    size_t size)
+ssize_t kernfs_iop_getxattr(struct dentry *unused, struct inode *inode,
+			    const char *name, void *buf, size_t size)
 {
-	struct kernfs_node *kn = dentry->d_fsdata;
+	struct kernfs_node *kn = inode->i_private;
 	struct kernfs_iattrs *attrs;
 
 	attrs = kernfs_iattrs(kn);
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index 6762bfb..45c9192 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -84,8 +84,8 @@
 int kernfs_iop_setxattr(struct dentry *dentry, const char *name, const void *value,
 			size_t size, int flags);
 int kernfs_iop_removexattr(struct dentry *dentry, const char *name);
-ssize_t kernfs_iop_getxattr(struct dentry *dentry, const char *name, void *buf,
-			    size_t size);
+ssize_t kernfs_iop_getxattr(struct dentry *dentry, struct inode *inode,
+			    const char *name, void *buf, size_t size);
 ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size);
 
 /*
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index b67dbcc..63534f5 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -15,6 +15,7 @@
 #include <linux/slab.h>
 #include <linux/pagemap.h>
 #include <linux/namei.h>
+#include <linux/seq_file.h>
 
 #include "kernfs-internal.h"
 
@@ -40,6 +41,19 @@
 	return 0;
 }
 
+static int kernfs_sop_show_path(struct seq_file *sf, struct dentry *dentry)
+{
+	struct kernfs_node *node = dentry->d_fsdata;
+	struct kernfs_root *root = kernfs_root(node);
+	struct kernfs_syscall_ops *scops = root->syscall_ops;
+
+	if (scops && scops->show_path)
+		return scops->show_path(sf, node, root);
+
+	seq_dentry(sf, dentry, " \t\n\\");
+	return 0;
+}
+
 const struct super_operations kernfs_sops = {
 	.statfs		= simple_statfs,
 	.drop_inode	= generic_delete_inode,
@@ -47,6 +61,7 @@
 
 	.remount_fs	= kernfs_sop_remount_fs,
 	.show_options	= kernfs_sop_show_options,
+	.show_path	= kernfs_sop_show_path,
 };
 
 /**
@@ -120,9 +135,8 @@
 		kntmp = find_next_ancestor(kn, knparent);
 		if (WARN_ON(!kntmp))
 			return ERR_PTR(-EINVAL);
-		mutex_lock(&d_inode(dentry)->i_mutex);
-		dtmp = lookup_one_len(kntmp->name, dentry, strlen(kntmp->name));
-		mutex_unlock(&d_inode(dentry)->i_mutex);
+		dtmp = lookup_one_len_unlocked(kntmp->name, dentry,
+					       strlen(kntmp->name));
 		dput(dentry);
 		if (IS_ERR(dtmp))
 			return dtmp;
@@ -138,8 +152,8 @@
 	struct dentry *root;
 
 	info->sb = sb;
-	sb->s_blocksize = PAGE_CACHE_SIZE;
-	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	sb->s_blocksize = PAGE_SIZE;
+	sb->s_blocksize_bits = PAGE_SHIFT;
 	sb->s_magic = magic;
 	sb->s_op = &kernfs_sops;
 	sb->s_time_gran = 1;
diff --git a/fs/libfs.c b/fs/libfs.c
index 0ca80b2..8765ff1 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -25,7 +25,7 @@
 {
 	struct inode *inode = d_inode(dentry);
 	generic_fillattr(inode, stat);
-	stat->blocks = inode->i_mapping->nrpages << (PAGE_CACHE_SHIFT - 9);
+	stat->blocks = inode->i_mapping->nrpages << (PAGE_SHIFT - 9);
 	return 0;
 }
 EXPORT_SYMBOL(simple_getattr);
@@ -33,7 +33,7 @@
 int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
 	buf->f_type = dentry->d_sb->s_magic;
-	buf->f_bsize = PAGE_CACHE_SIZE;
+	buf->f_bsize = PAGE_SIZE;
 	buf->f_namelen = NAME_MAX;
 	return 0;
 }
@@ -89,7 +89,6 @@
 loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
 {
 	struct dentry *dentry = file->f_path.dentry;
-	inode_lock(d_inode(dentry));
 	switch (whence) {
 		case 1:
 			offset += file->f_pos;
@@ -97,7 +96,6 @@
 			if (offset >= 0)
 				break;
 		default:
-			inode_unlock(d_inode(dentry));
 			return -EINVAL;
 	}
 	if (offset != file->f_pos) {
@@ -124,7 +122,6 @@
 			spin_unlock(&dentry->d_lock);
 		}
 	}
-	inode_unlock(d_inode(dentry));
 	return offset;
 }
 EXPORT_SYMBOL(dcache_dir_lseek);
@@ -190,7 +187,7 @@
 	.release	= dcache_dir_close,
 	.llseek		= dcache_dir_lseek,
 	.read		= generic_read_dir,
-	.iterate	= dcache_readdir,
+	.iterate_shared	= dcache_readdir,
 	.fsync		= noop_fsync,
 };
 EXPORT_SYMBOL(simple_dir_operations);
@@ -395,7 +392,7 @@
 	struct page *page;
 	pgoff_t index;
 
-	index = pos >> PAGE_CACHE_SHIFT;
+	index = pos >> PAGE_SHIFT;
 
 	page = grab_cache_page_write_begin(mapping, index, flags);
 	if (!page)
@@ -403,10 +400,10 @@
 
 	*pagep = page;
 
-	if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
-		unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+	if (!PageUptodate(page) && (len != PAGE_SIZE)) {
+		unsigned from = pos & (PAGE_SIZE - 1);
 
-		zero_user_segments(page, 0, from, from + len, PAGE_CACHE_SIZE);
+		zero_user_segments(page, 0, from, from + len, PAGE_SIZE);
 	}
 	return 0;
 }
@@ -442,7 +439,7 @@
 
 	/* zero the stale part of the page if we did a short copy */
 	if (copied < len) {
-		unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+		unsigned from = pos & (PAGE_SIZE - 1);
 
 		zero_user(page, from + copied, len - copied);
 	}
@@ -458,7 +455,7 @@
 
 	set_page_dirty(page);
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 
 	return copied;
 }
@@ -477,8 +474,8 @@
 	struct dentry *dentry;
 	int i;
 
-	s->s_blocksize = PAGE_CACHE_SIZE;
-	s->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	s->s_blocksize = PAGE_SIZE;
+	s->s_blocksize_bits = PAGE_SHIFT;
 	s->s_magic = magic;
 	s->s_op = &simple_super_operations;
 	s->s_time_gran = 1;
@@ -994,12 +991,12 @@
 {
 	u64 last_fs_block = num_blocks - 1;
 	u64 last_fs_page =
-		last_fs_block >> (PAGE_CACHE_SHIFT - blocksize_bits);
+		last_fs_block >> (PAGE_SHIFT - blocksize_bits);
 
 	if (unlikely(num_blocks == 0))
 		return 0;
 
-	if ((blocksize_bits < 9) || (blocksize_bits > PAGE_CACHE_SHIFT))
+	if ((blocksize_bits < 9) || (blocksize_bits > PAGE_SHIFT))
 		return -EINVAL;
 
 	if ((last_fs_block > (sector_t)(~0ULL) >> (blocksize_bits - 9)) ||
@@ -1127,8 +1124,8 @@
 	return -EOPNOTSUPP;
 }
 
-static ssize_t empty_dir_getxattr(struct dentry *dentry, const char *name,
-				  void *value, size_t size)
+static ssize_t empty_dir_getxattr(struct dentry *dentry, struct inode *inode,
+				  const char *name, void *value, size_t size)
 {
 	return -EOPNOTSUPP;
 }
@@ -1169,7 +1166,7 @@
 static const struct file_operations empty_dir_operations = {
 	.llseek		= empty_dir_llseek,
 	.read		= generic_read_dir,
-	.iterate	= empty_dir_readdir,
+	.iterate_shared	= empty_dir_readdir,
 	.fsync		= noop_fsync,
 };
 
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index a709d80..cc26f8f 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -64,7 +64,7 @@
 
 	bio_for_each_segment_all(bvec, bio, i) {
 		end_page_writeback(bvec->bv_page);
-		page_cache_release(bvec->bv_page);
+		put_page(bvec->bv_page);
 	}
 	bio_put(bio);
 	if (atomic_dec_and_test(&super->s_pending_writes))
diff --git a/fs/logfs/dev_mtd.c b/fs/logfs/dev_mtd.c
index 9c50144..b76a62b 100644
--- a/fs/logfs/dev_mtd.c
+++ b/fs/logfs/dev_mtd.c
@@ -46,9 +46,9 @@
 
 	BUG_ON((ofs >= mtd->size) || (len > mtd->size - ofs));
 	BUG_ON(ofs != (ofs >> super->s_writeshift) << super->s_writeshift);
-	BUG_ON(len > PAGE_CACHE_SIZE);
-	page_start = ofs & PAGE_CACHE_MASK;
-	page_end = PAGE_CACHE_ALIGN(ofs + len) - 1;
+	BUG_ON(len > PAGE_SIZE);
+	page_start = ofs & PAGE_MASK;
+	page_end = PAGE_ALIGN(ofs + len) - 1;
 	ret = mtd_write(mtd, ofs, len, &retlen, buf);
 	if (ret || (retlen != len))
 		return -EIO;
@@ -82,7 +82,7 @@
 		if (!page)
 			continue;
 		memset(page_address(page), 0xFF, PAGE_SIZE);
-		page_cache_release(page);
+		put_page(page);
 	}
 	return 0;
 }
@@ -195,7 +195,7 @@
 		err = loffs_mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
 					page_address(page));
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		if (err)
 			return err;
 	}
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 542468e..2d5336b 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -183,7 +183,7 @@
 		if (name->len != be16_to_cpu(dd->namelen) ||
 				memcmp(name->name, dd->name, name->len)) {
 			kunmap_atomic(dd);
-			page_cache_release(page);
+			put_page(page);
 			continue;
 		}
 
@@ -238,7 +238,7 @@
 		return PTR_ERR(page);
 	}
 	index = page->index;
-	page_cache_release(page);
+	put_page(page);
 
 	mutex_lock(&super->s_dirop_mutex);
 	logfs_add_transaction(dir, ta);
@@ -316,7 +316,7 @@
 				be16_to_cpu(dd->namelen),
 				be64_to_cpu(dd->ino), dd->type);
 		kunmap(page);
-		page_cache_release(page);
+		put_page(page);
 		if (full)
 			break;
 	}
@@ -349,7 +349,7 @@
 	dd = kmap_atomic(page);
 	ino = be64_to_cpu(dd->ino);
 	kunmap_atomic(dd);
-	page_cache_release(page);
+	put_page(page);
 
 	inode = logfs_iget(dir->i_sb, ino);
 	if (IS_ERR(inode))
@@ -392,7 +392,7 @@
 
 		err = logfs_write_buf(dir, page, WF_LOCK);
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		if (!err)
 			grow_dir(dir, index);
 		return err;
@@ -561,7 +561,7 @@
 	map = kmap_atomic(page);
 	memcpy(dd, map, sizeof(*dd));
 	kunmap_atomic(map);
-	page_cache_release(page);
+	put_page(page);
 	return 0;
 }
 
@@ -791,7 +791,7 @@
 const struct file_operations logfs_dir_fops = {
 	.fsync		= logfs_fsync,
 	.unlocked_ioctl	= logfs_ioctl,
-	.iterate	= logfs_readdir,
+	.iterate_shared	= logfs_readdir,
 	.read		= generic_read_dir,
-	.llseek		= default_llseek,
+	.llseek		= generic_file_llseek,
 };
diff --git a/fs/logfs/file.c b/fs/logfs/file.c
index 61eaeb1..f01ddfb 100644
--- a/fs/logfs/file.c
+++ b/fs/logfs/file.c
@@ -15,21 +15,21 @@
 {
 	struct inode *inode = mapping->host;
 	struct page *page;
-	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+	pgoff_t index = pos >> PAGE_SHIFT;
 
 	page = grab_cache_page_write_begin(mapping, index, flags);
 	if (!page)
 		return -ENOMEM;
 	*pagep = page;
 
-	if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
+	if ((len == PAGE_SIZE) || PageUptodate(page))
 		return 0;
-	if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
-		unsigned start = pos & (PAGE_CACHE_SIZE - 1);
+	if ((pos & PAGE_MASK) >= i_size_read(inode)) {
+		unsigned start = pos & (PAGE_SIZE - 1);
 		unsigned end = start + len;
 
 		/* Reading beyond i_size is simple: memset to zero */
-		zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
+		zero_user_segments(page, 0, start, end, PAGE_SIZE);
 		return 0;
 	}
 	return logfs_readpage_nolock(page);
@@ -41,11 +41,11 @@
 {
 	struct inode *inode = mapping->host;
 	pgoff_t index = page->index;
-	unsigned start = pos & (PAGE_CACHE_SIZE - 1);
+	unsigned start = pos & (PAGE_SIZE - 1);
 	unsigned end = start + copied;
 	int ret = 0;
 
-	BUG_ON(PAGE_CACHE_SIZE != inode->i_sb->s_blocksize);
+	BUG_ON(PAGE_SIZE != inode->i_sb->s_blocksize);
 	BUG_ON(page->index > I3_BLOCKS);
 
 	if (copied < len) {
@@ -61,8 +61,8 @@
 	if (copied == 0)
 		goto out; /* FIXME: do we need to update inode? */
 
-	if (i_size_read(inode) < (index << PAGE_CACHE_SHIFT) + end) {
-		i_size_write(inode, (index << PAGE_CACHE_SHIFT) + end);
+	if (i_size_read(inode) < (index << PAGE_SHIFT) + end) {
+		i_size_write(inode, (index << PAGE_SHIFT) + end);
 		mark_inode_dirty_sync(inode);
 	}
 
@@ -75,7 +75,7 @@
 	}
 out:
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 	return ret ? ret : copied;
 }
 
@@ -118,7 +118,7 @@
 {
 	struct inode *inode = page->mapping->host;
 	loff_t i_size = i_size_read(inode);
-	pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+	pgoff_t end_index = i_size >> PAGE_SHIFT;
 	unsigned offset;
 	u64 bix;
 	level_t level;
@@ -142,7 +142,7 @@
 		return __logfs_writepage(page);
 
 	 /* Is the page fully outside i_size? (truncate in progress) */
-	offset = i_size & (PAGE_CACHE_SIZE-1);
+	offset = i_size & (PAGE_SIZE-1);
 	if (bix > end_index || offset == 0) {
 		unlock_page(page);
 		return 0; /* don't care */
@@ -155,7 +155,7 @@
 	 * the  page size, the remaining memory is zeroed when mapped, and
 	 * writes to that region are not written out to the file."
 	 */
-	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+	zero_user_segment(page, offset, PAGE_SIZE);
 	return __logfs_writepage(page);
 }
 
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index 20973c9..3fb8c6d 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -281,7 +281,7 @@
 static void logfs_put_read_page(struct page *page)
 {
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 }
 
 static void logfs_lock_write_page(struct page *page)
@@ -323,7 +323,7 @@
 			return NULL;
 		err = add_to_page_cache_lru(page, mapping, index, GFP_NOFS);
 		if (unlikely(err)) {
-			page_cache_release(page);
+			put_page(page);
 			if (err == -EEXIST)
 				goto repeat;
 			return NULL;
@@ -342,7 +342,7 @@
 static void logfs_put_write_page(struct page *page)
 {
 	logfs_unlock_write_page(page);
-	page_cache_release(page);
+	put_page(page);
 }
 
 static struct page *logfs_get_page(struct inode *inode, u64 bix, level_t level,
@@ -562,7 +562,7 @@
 
 	if (PagePrivate(page)) {
 		ClearPagePrivate(page);
-		page_cache_release(page);
+		put_page(page);
 		set_page_private(page, 0);
 	}
 	__free_block(sb, block);
@@ -655,7 +655,7 @@
 	block->page = page;
 
 	SetPagePrivate(page);
-	page_cache_get(page);
+	get_page(page);
 	set_page_private(page, (unsigned long) block);
 
 	block->ops = &indirect_block_ops;
@@ -709,7 +709,7 @@
 
 static int logfs_read_empty(struct page *page)
 {
-	zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+	zero_user_segment(page, 0, PAGE_SIZE);
 	return 0;
 }
 
@@ -1660,7 +1660,7 @@
 	if (err)
 		return err;
 
-	zero_user_segment(page, size - pageofs, PAGE_CACHE_SIZE);
+	zero_user_segment(page, size - pageofs, PAGE_SIZE);
 	return logfs_segment_write(inode, page, shadow);
 }
 
@@ -1919,7 +1919,7 @@
 	block->page = NULL;
 	if (PagePrivate(page)) {
 		ClearPagePrivate(page);
-		page_cache_release(page);
+		put_page(page);
 		set_page_private(page, 0);
 	}
 }
@@ -1940,7 +1940,7 @@
 
 	if (!PagePrivate(page)) {
 		SetPagePrivate(page);
-		page_cache_get(page);
+		get_page(page);
 		set_page_private(page, (unsigned long) block);
 	}
 
@@ -1971,7 +1971,7 @@
 	logfs_disk_to_inode(di, inode);
 	kunmap_atomic(di);
 	move_page_to_inode(inode, page);
-	page_cache_release(page);
+	put_page(page);
 	return 0;
 }
 
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
index d270e4b..1efd605 100644
--- a/fs/logfs/segment.c
+++ b/fs/logfs/segment.c
@@ -90,9 +90,9 @@
 
 		if (!PagePrivate(page)) {
 			SetPagePrivate(page);
-			page_cache_get(page);
+			get_page(page);
 		}
-		page_cache_release(page);
+		put_page(page);
 
 		buf += copylen;
 		len -= copylen;
@@ -117,9 +117,9 @@
 		memset(page_address(page) + offset, 0xff, len);
 		if (!PagePrivate(page)) {
 			SetPagePrivate(page);
-			page_cache_get(page);
+			get_page(page);
 		}
-		page_cache_release(page);
+		put_page(page);
 	}
 }
 
@@ -129,20 +129,20 @@
 	struct logfs_super *super = logfs_super(sb);
 	u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
 	u32 len = super->s_segsize - area->a_used_bytes;
-	pgoff_t index = PAGE_CACHE_ALIGN(ofs) >> PAGE_CACHE_SHIFT;
-	pgoff_t no_indizes = len >> PAGE_CACHE_SHIFT;
+	pgoff_t index = PAGE_ALIGN(ofs) >> PAGE_SHIFT;
+	pgoff_t no_indizes = len >> PAGE_SHIFT;
 	struct page *page;
 
 	while (no_indizes) {
 		page = get_mapping_page(sb, index, 0);
 		BUG_ON(!page); /* FIXME: reserve a pool */
 		SetPageUptodate(page);
-		memset(page_address(page), 0xff, PAGE_CACHE_SIZE);
+		memset(page_address(page), 0xff, PAGE_SIZE);
 		if (!PagePrivate(page)) {
 			SetPagePrivate(page);
-			page_cache_get(page);
+			get_page(page);
 		}
-		page_cache_release(page);
+		put_page(page);
 		index++;
 		no_indizes--;
 	}
@@ -411,7 +411,7 @@
 		if (IS_ERR(page))
 			return PTR_ERR(page);
 		memcpy(buf, page_address(page) + offset, copylen);
-		page_cache_release(page);
+		put_page(page);
 
 		buf += copylen;
 		len -= copylen;
@@ -499,7 +499,7 @@
 
 	if (!PagePrivate(page)) {
 		SetPagePrivate(page);
-		page_cache_get(page);
+		get_page(page);
 		set_page_private(page, (unsigned long) block);
 	}
 	block->ops = &indirect_block_ops;
@@ -554,7 +554,7 @@
 
 	if (PagePrivate(page)) {
 		ClearPagePrivate(page);
-		page_cache_release(page);
+		put_page(page);
 		set_page_private(page, 0);
 	}
 	block->ops = &btree_block_ops;
@@ -723,9 +723,9 @@
 			continue;
 		if (PagePrivate(page)) {
 			ClearPagePrivate(page);
-			page_cache_release(page);
+			put_page(page);
 		}
-		page_cache_release(page);
+		put_page(page);
 	}
 }
 
diff --git a/fs/logfs/super.c b/fs/logfs/super.c
index 5436029..5751082 100644
--- a/fs/logfs/super.c
+++ b/fs/logfs/super.c
@@ -48,7 +48,7 @@
 	if (page == emergency_page)
 		mutex_unlock(&emergency_mutex);
 	else
-		page_cache_release(page);
+		put_page(page);
 }
 
 static void dump_segfile(struct super_block *sb)
@@ -206,7 +206,7 @@
 	logfs_set_segment_erased(sb, segno, ec, 0);
 	logfs_write_ds(sb, ds, segno, ec);
 	err = super->s_devops->write_sb(sb, page);
-	page_cache_release(page);
+	put_page(page);
 	return err;
 }
 
@@ -366,24 +366,24 @@
 		return NULL;
 	last = super->s_devops->find_last_sb(sb, &super->s_sb_ofs[1]);
 	if (!last || IS_ERR(last)) {
-		page_cache_release(first);
+		put_page(first);
 		return NULL;
 	}
 
 	if (!logfs_check_ds(page_address(first))) {
-		page_cache_release(last);
+		put_page(last);
 		return first;
 	}
 
 	/* First one didn't work, try the second superblock */
 	if (!logfs_check_ds(page_address(last))) {
-		page_cache_release(first);
+		put_page(first);
 		return last;
 	}
 
 	/* Neither worked, sorry folks */
-	page_cache_release(first);
-	page_cache_release(last);
+	put_page(first);
+	put_page(last);
 	return NULL;
 }
 
@@ -425,7 +425,7 @@
 	super->s_data_levels = ds->ds_data_levels;
 	super->s_total_levels = super->s_ifile_levels + super->s_iblock_levels
 		+ super->s_data_levels;
-	page_cache_release(page);
+	put_page(page);
 	return 0;
 }
 
diff --git a/fs/minix/dir.c b/fs/minix/dir.c
index d19ac25..31dcd51 100644
--- a/fs/minix/dir.c
+++ b/fs/minix/dir.c
@@ -21,14 +21,14 @@
 const struct file_operations minix_dir_operations = {
 	.llseek		= generic_file_llseek,
 	.read		= generic_read_dir,
-	.iterate	= minix_readdir,
+	.iterate_shared	= minix_readdir,
 	.fsync		= generic_file_fsync,
 };
 
 static inline void dir_put_page(struct page *page)
 {
 	kunmap(page);
-	page_cache_release(page);
+	put_page(page);
 }
 
 /*
@@ -38,10 +38,10 @@
 static unsigned
 minix_last_byte(struct inode *inode, unsigned long page_nr)
 {
-	unsigned last_byte = PAGE_CACHE_SIZE;
+	unsigned last_byte = PAGE_SIZE;
 
-	if (page_nr == (inode->i_size >> PAGE_CACHE_SHIFT))
-		last_byte = inode->i_size & (PAGE_CACHE_SIZE - 1);
+	if (page_nr == (inode->i_size >> PAGE_SHIFT))
+		last_byte = inode->i_size & (PAGE_SIZE - 1);
 	return last_byte;
 }
 
@@ -92,8 +92,8 @@
 	if (pos >= inode->i_size)
 		return 0;
 
-	offset = pos & ~PAGE_CACHE_MASK;
-	n = pos >> PAGE_CACHE_SHIFT;
+	offset = pos & ~PAGE_MASK;
+	n = pos >> PAGE_SHIFT;
 
 	for ( ; n < npages; n++, offset = 0) {
 		char *p, *kaddr, *limit;
@@ -229,7 +229,7 @@
 		lock_page(page);
 		kaddr = (char*)page_address(page);
 		dir_end = kaddr + minix_last_byte(dir, n);
-		limit = kaddr + PAGE_CACHE_SIZE - sbi->s_dirsize;
+		limit = kaddr + PAGE_SIZE - sbi->s_dirsize;
 		for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
 			de = (minix_dirent *)p;
 			de3 = (minix3_dirent *)p;
@@ -327,7 +327,7 @@
 	}
 
 	kaddr = kmap_atomic(page);
-	memset(kaddr, 0, PAGE_CACHE_SIZE);
+	memset(kaddr, 0, PAGE_SIZE);
 
 	if (sbi->s_version == MINIX_V3) {
 		minix3_dirent *de3 = (minix3_dirent *)kaddr;
@@ -350,7 +350,7 @@
 
 	err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
 fail:
-	page_cache_release(page);
+	put_page(page);
 	return err;
 }
 
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index a795a11..2887d1d 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -243,11 +243,11 @@
 out_dir:
 	if (dir_de) {
 		kunmap(dir_page);
-		page_cache_release(dir_page);
+		put_page(dir_page);
 	}
 out_old:
 	kunmap(old_page);
-	page_cache_release(old_page);
+	put_page(old_page);
 out:
 	return err;
 }
diff --git a/fs/mpage.c b/fs/mpage.c
index 6bd9fd9..eedc644 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -107,7 +107,7 @@
 		 * don't make any buffers if there is only one buffer on
 		 * the page and the page just needs to be set up to date
 		 */
-		if (inode->i_blkbits == PAGE_CACHE_SHIFT && 
+		if (inode->i_blkbits == PAGE_SHIFT &&
 		    buffer_uptodate(bh)) {
 			SetPageUptodate(page);    
 			return;
@@ -145,7 +145,7 @@
 {
 	struct inode *inode = page->mapping->host;
 	const unsigned blkbits = inode->i_blkbits;
-	const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
+	const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
 	const unsigned blocksize = 1 << blkbits;
 	sector_t block_in_file;
 	sector_t last_block;
@@ -162,7 +162,7 @@
 	if (page_has_buffers(page))
 		goto confused;
 
-	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
+	block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
 	last_block = block_in_file + nr_pages * blocks_per_page;
 	last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
 	if (last_block > last_block_in_file)
@@ -249,7 +249,7 @@
 	}
 
 	if (first_hole != blocks_per_page) {
-		zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE);
+		zero_user_segment(page, first_hole << blkbits, PAGE_SIZE);
 		if (first_hole == 0) {
 			SetPageUptodate(page);
 			unlock_page(page);
@@ -331,7 +331,7 @@
  *
  * then this code just gives up and calls the buffer_head-based read function.
  * It does handle a page which has holes at the end - that is a common case:
- * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
+ * the end-of-file on blocksize < PAGE_SIZE setups.
  *
  * BH_Boundary explanation:
  *
@@ -380,7 +380,7 @@
 					&first_logical_block,
 					get_block, gfp);
 		}
-		page_cache_release(page);
+		put_page(page);
 	}
 	BUG_ON(!list_empty(pages));
 	if (bio)
@@ -472,7 +472,7 @@
 	struct inode *inode = page->mapping->host;
 	const unsigned blkbits = inode->i_blkbits;
 	unsigned long end_index;
-	const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
+	const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
 	sector_t last_block;
 	sector_t block_in_file;
 	sector_t blocks[MAX_BUF_PER_PAGE];
@@ -542,7 +542,7 @@
 	 * The page has no buffers: map it to disk
 	 */
 	BUG_ON(!PageUptodate(page));
-	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
+	block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
 	last_block = (i_size - 1) >> blkbits;
 	map_bh.b_page = page;
 	for (page_block = 0; page_block < blocks_per_page; ) {
@@ -574,7 +574,7 @@
 	first_unmapped = page_block;
 
 page_is_mapped:
-	end_index = i_size >> PAGE_CACHE_SHIFT;
+	end_index = i_size >> PAGE_SHIFT;
 	if (page->index >= end_index) {
 		/*
 		 * The page straddles i_size.  It must be zeroed out on each
@@ -584,11 +584,11 @@
 		 * is zeroed when mapped, and writes to that region are not
 		 * written out to the file."
 		 */
-		unsigned offset = i_size & (PAGE_CACHE_SIZE - 1);
+		unsigned offset = i_size & (PAGE_SIZE - 1);
 
 		if (page->index > end_index || !offset)
 			goto confused;
-		zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+		zero_user_segment(page, offset, PAGE_SIZE);
 	}
 
 	/*
diff --git a/fs/namei.c b/fs/namei.c
index 794f81d..5375571 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -265,7 +265,7 @@
 	        if (!acl)
 	                return -EAGAIN;
 		/* no ->get_acl() calls in RCU mode... */
-		if (acl == ACL_NOT_CACHED)
+		if (is_uncached_acl(acl))
 			return -ECHILD;
 	        return posix_acl_permission(inode, acl, mask & ~MAY_NOT_BLOCK);
 	}
@@ -1603,32 +1603,42 @@
 				  struct dentry *dir,
 				  unsigned int flags)
 {
-	struct dentry *dentry;
-	inode_lock(dir->d_inode);
-	dentry = d_lookup(dir, name);
-	if (unlikely(dentry)) {
+	struct dentry *dentry = ERR_PTR(-ENOENT), *old;
+	struct inode *inode = dir->d_inode;
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+
+	inode_lock_shared(inode);
+	/* Don't go there if it's already dead */
+	if (unlikely(IS_DEADDIR(inode)))
+		goto out;
+again:
+	dentry = d_alloc_parallel(dir, name, &wq);
+	if (IS_ERR(dentry))
+		goto out;
+	if (unlikely(!d_in_lookup(dentry))) {
 		if ((dentry->d_flags & DCACHE_OP_REVALIDATE) &&
 		    !(flags & LOOKUP_NO_REVAL)) {
 			int error = d_revalidate(dentry, flags);
 			if (unlikely(error <= 0)) {
-				if (!error)
+				if (!error) {
 					d_invalidate(dentry);
+					dput(dentry);
+					goto again;
+				}
 				dput(dentry);
 				dentry = ERR_PTR(error);
 			}
 		}
-		if (dentry) {
-			inode_unlock(dir->d_inode);
-			return dentry;
+	} else {
+		old = inode->i_op->lookup(inode, dentry, flags);
+		d_lookup_done(dentry);
+		if (unlikely(old)) {
+			dput(dentry);
+			dentry = old;
 		}
 	}
-	dentry = d_alloc(dir, name);
-	if (unlikely(!dentry)) {
-		inode_unlock(dir->d_inode);
-		return ERR_PTR(-ENOMEM);
-	}
-	dentry = lookup_real(dir->d_inode, dentry, flags);
-	inode_unlock(dir->d_inode);
+out:
+	inode_unlock_shared(inode);
 	return dentry;
 }
 
@@ -1740,15 +1750,17 @@
 					  nd->flags);
 		if (IS_ERR(path.dentry))
 			return PTR_ERR(path.dentry);
-		if (unlikely(d_is_negative(path.dentry))) {
-			dput(path.dentry);
-			return -ENOENT;
-		}
+
 		path.mnt = nd->path.mnt;
 		err = follow_managed(&path, nd);
 		if (unlikely(err < 0))
 			return err;
 
+		if (unlikely(d_is_negative(path.dentry))) {
+			path_to_nameidata(&path, nd);
+			return -ENOENT;
+		}
+
 		seq = 0;	/* we are already out of RCU mode */
 		inode = d_backing_inode(path.dentry);
 	}
@@ -1792,30 +1804,49 @@
 	return hash_64(hash, 32);
 }
 
+/*
+ * This is George Marsaglia's XORSHIFT generator.
+ * It implements a maximum-period LFSR in only a few
+ * instructions.  It also has the property (required
+ * by hash_name()) that mix_hash(0) = 0.
+ */
+static inline unsigned long mix_hash(unsigned long hash)
+{
+	hash ^= hash << 13;
+	hash ^= hash >> 7;
+	hash ^= hash << 17;
+	return hash;
+}
+
 #else	/* 32-bit case */
 
 #define fold_hash(x) (x)
 
+static inline unsigned long mix_hash(unsigned long hash)
+{
+	hash ^= hash << 13;
+	hash ^= hash >> 17;
+	hash ^= hash << 5;
+	return hash;
+}
+
 #endif
 
 unsigned int full_name_hash(const unsigned char *name, unsigned int len)
 {
-	unsigned long a, mask;
-	unsigned long hash = 0;
+	unsigned long a, hash = 0;
 
 	for (;;) {
 		a = load_unaligned_zeropad(name);
 		if (len < sizeof(unsigned long))
 			break;
-		hash += a;
-		hash *= 9;
+		hash = mix_hash(hash + a);
 		name += sizeof(unsigned long);
 		len -= sizeof(unsigned long);
 		if (!len)
 			goto done;
 	}
-	mask = bytemask_from_count(len);
-	hash += mask & a;
+	hash += a & bytemask_from_count(len);
 done:
 	return fold_hash(hash);
 }
@@ -1833,7 +1864,7 @@
 	hash = a = 0;
 	len = -sizeof(unsigned long);
 	do {
-		hash = (hash + a) * 9;
+		hash = mix_hash(hash + a);
 		len += sizeof(unsigned long);
 		a = load_unaligned_zeropad(name+len);
 		b = a ^ REPEAT_BYTE('/');
@@ -2265,6 +2296,33 @@
 EXPORT_SYMBOL(vfs_path_lookup);
 
 /**
+ * lookup_hash - lookup single pathname component on already hashed name
+ * @name:	name and hash to lookup
+ * @base:	base directory to lookup from
+ *
+ * The name must have been verified and hashed (see lookup_one_len()).  Using
+ * this after just full_name_hash() is unsafe.
+ *
+ * This function also doesn't check for search permission on base directory.
+ *
+ * Use lookup_one_len_unlocked() instead, unless you really know what you are
+ * doing.
+ *
+ * Do not hold i_mutex; this helper takes i_mutex if necessary.
+ */
+struct dentry *lookup_hash(const struct qstr *name, struct dentry *base)
+{
+	struct dentry *ret;
+
+	ret = lookup_dcache(name, base, 0);
+	if (!ret)
+		ret = lookup_slow(name, base, 0);
+
+	return ret;
+}
+EXPORT_SYMBOL(lookup_hash);
+
+/**
  * lookup_one_len - filesystem helper to lookup single pathname component
  * @name:	pathname component to lookup
  * @base:	base directory to lookup from
@@ -2335,7 +2393,6 @@
 	struct qstr this;
 	unsigned int c;
 	int err;
-	struct dentry *ret;
 
 	this.name = name;
 	this.len = len;
@@ -2367,10 +2424,7 @@
 	if (err)
 		return ERR_PTR(err);
 
-	ret = lookup_dcache(&this, base, 0);
-	if (!ret)
-		ret = lookup_slow(&this, base, 0);
-	return ret;
+	return lookup_hash(&this, base);
 }
 EXPORT_SYMBOL(lookup_one_len_unlocked);
 
@@ -2653,7 +2707,7 @@
 		return NULL;
 	}
 
-	mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex);
+	mutex_lock(&p1->d_sb->s_vfs_rename_mutex);
 
 	p = d_ancestor(p2, p1);
 	if (p) {
@@ -2680,7 +2734,7 @@
 	inode_unlock(p1->d_inode);
 	if (p1 != p2) {
 		inode_unlock(p2->d_inode);
-		mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex);
+		mutex_unlock(&p1->d_sb->s_vfs_rename_mutex);
 	}
 }
 EXPORT_SYMBOL(unlock_rename);
@@ -2783,7 +2837,7 @@
 	return flag;
 }
 
-static int may_o_create(struct path *dir, struct dentry *dentry, umode_t mode)
+static int may_o_create(const struct path *dir, struct dentry *dentry, umode_t mode)
 {
 	int error = security_path_mknod(dir, dentry, mode, 0);
 	if (error)
@@ -2812,155 +2866,56 @@
 static int atomic_open(struct nameidata *nd, struct dentry *dentry,
 			struct path *path, struct file *file,
 			const struct open_flags *op,
-			bool got_write, bool need_lookup,
+			int open_flag, umode_t mode,
 			int *opened)
 {
-	struct inode *dir =  nd->path.dentry->d_inode;
-	unsigned open_flag = open_to_namei_flags(op->open_flag);
-	umode_t mode;
-	int error;
-	int acc_mode;
-	int create_error = 0;
 	struct dentry *const DENTRY_NOT_SET = (void *) -1UL;
-	bool excl;
+	struct inode *dir =  nd->path.dentry->d_inode;
+	int error;
 
-	BUG_ON(dentry->d_inode);
-
-	/* Don't create child dentry for a dead directory. */
-	if (unlikely(IS_DEADDIR(dir))) {
-		error = -ENOENT;
-		goto out;
-	}
-
-	mode = op->mode;
-	if ((open_flag & O_CREAT) && !IS_POSIXACL(dir))
-		mode &= ~current_umask();
-
-	excl = (open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT);
-	if (excl)
+	if (!(~open_flag & (O_EXCL | O_CREAT)))	/* both O_EXCL and O_CREAT */
 		open_flag &= ~O_TRUNC;
 
-	/*
-	 * Checking write permission is tricky, bacuse we don't know if we are
-	 * going to actually need it: O_CREAT opens should work as long as the
-	 * file exists.  But checking existence breaks atomicity.  The trick is
-	 * to check access and if not granted clear O_CREAT from the flags.
-	 *
-	 * Another problem is returing the "right" error value (e.g. for an
-	 * O_EXCL open we want to return EEXIST not EROFS).
-	 */
-	if (((open_flag & (O_CREAT | O_TRUNC)) ||
-	    (open_flag & O_ACCMODE) != O_RDONLY) && unlikely(!got_write)) {
-		if (!(open_flag & O_CREAT)) {
-			/*
-			 * No O_CREATE -> atomicity not a requirement -> fall
-			 * back to lookup + open
-			 */
-			goto no_open;
-		} else if (open_flag & (O_EXCL | O_TRUNC)) {
-			/* Fall back and fail with the right error */
-			create_error = -EROFS;
-			goto no_open;
-		} else {
-			/* No side effects, safe to clear O_CREAT */
-			create_error = -EROFS;
-			open_flag &= ~O_CREAT;
-		}
-	}
-
-	if (open_flag & O_CREAT) {
-		error = may_o_create(&nd->path, dentry, mode);
-		if (error) {
-			create_error = error;
-			if (open_flag & O_EXCL)
-				goto no_open;
-			open_flag &= ~O_CREAT;
-		}
-	}
-
 	if (nd->flags & LOOKUP_DIRECTORY)
 		open_flag |= O_DIRECTORY;
 
 	file->f_path.dentry = DENTRY_NOT_SET;
 	file->f_path.mnt = nd->path.mnt;
-	error = dir->i_op->atomic_open(dir, dentry, file, open_flag, mode,
-				      opened);
-	if (error < 0) {
-		if (create_error && error == -ENOENT)
-			error = create_error;
-		goto out;
-	}
-
-	if (error) {	/* returned 1, that is */
+	error = dir->i_op->atomic_open(dir, dentry, file,
+				       open_to_namei_flags(open_flag),
+				       mode, opened);
+	d_lookup_done(dentry);
+	if (!error) {
+		/*
+		 * We didn't have the inode before the open, so check open
+		 * permission here.
+		 */
+		int acc_mode = op->acc_mode;
+		if (*opened & FILE_CREATED) {
+			WARN_ON(!(open_flag & O_CREAT));
+			fsnotify_create(dir, dentry);
+			acc_mode = 0;
+		}
+		error = may_open(&file->f_path, acc_mode, open_flag);
+		if (WARN_ON(error > 0))
+			error = -EINVAL;
+	} else if (error > 0) {
 		if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) {
 			error = -EIO;
-			goto out;
-		}
-		if (file->f_path.dentry) {
-			dput(dentry);
-			dentry = file->f_path.dentry;
-		}
-		if (*opened & FILE_CREATED)
-			fsnotify_create(dir, dentry);
-		if (!dentry->d_inode) {
-			WARN_ON(*opened & FILE_CREATED);
-			if (create_error) {
-				error = create_error;
-				goto out;
-			}
 		} else {
-			if (excl && !(*opened & FILE_CREATED)) {
-				error = -EEXIST;
-				goto out;
+			if (file->f_path.dentry) {
+				dput(dentry);
+				dentry = file->f_path.dentry;
 			}
+			if (*opened & FILE_CREATED)
+				fsnotify_create(dir, dentry);
+			path->dentry = dentry;
+			path->mnt = nd->path.mnt;
+			return 1;
 		}
-		goto looked_up;
 	}
-
-	/*
-	 * We didn't have the inode before the open, so check open permission
-	 * here.
-	 */
-	acc_mode = op->acc_mode;
-	if (*opened & FILE_CREATED) {
-		WARN_ON(!(open_flag & O_CREAT));
-		fsnotify_create(dir, dentry);
-		acc_mode = 0;
-	}
-	error = may_open(&file->f_path, acc_mode, open_flag);
-	if (error)
-		fput(file);
-
-out:
 	dput(dentry);
 	return error;
-
-no_open:
-	if (need_lookup) {
-		dentry = lookup_real(dir, dentry, nd->flags);
-		if (IS_ERR(dentry))
-			return PTR_ERR(dentry);
-
-		if (create_error) {
-			int open_flag = op->open_flag;
-
-			error = create_error;
-			if ((open_flag & O_EXCL)) {
-				if (!dentry->d_inode)
-					goto out;
-			} else if (!dentry->d_inode) {
-				goto out;
-			} else if ((open_flag & O_TRUNC) &&
-				   d_is_reg(dentry)) {
-				goto out;
-			}
-			/* will fail later, go on to get the right error */
-		}
-	}
-looked_up:
-	path->dentry = dentry;
-	path->mnt = nd->path.mnt;
-	return 1;
 }
 
 /*
@@ -2988,62 +2943,118 @@
 {
 	struct dentry *dir = nd->path.dentry;
 	struct inode *dir_inode = dir->d_inode;
+	int open_flag = op->open_flag;
 	struct dentry *dentry;
-	int error;
-	bool need_lookup = false;
+	int error, create_error = 0;
+	umode_t mode = op->mode;
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+
+	if (unlikely(IS_DEADDIR(dir_inode)))
+		return -ENOENT;
 
 	*opened &= ~FILE_CREATED;
-	dentry = lookup_dcache(&nd->last, dir, nd->flags);
-	if (IS_ERR(dentry))
-		return PTR_ERR(dentry);
+	dentry = d_lookup(dir, &nd->last);
+	for (;;) {
+		if (!dentry) {
+			dentry = d_alloc_parallel(dir, &nd->last, &wq);
+			if (IS_ERR(dentry))
+				return PTR_ERR(dentry);
+		}
+		if (d_in_lookup(dentry))
+			break;
 
-	if (!dentry) {
-		dentry = d_alloc(dir, &nd->last);
-		if (unlikely(!dentry))
-			return -ENOMEM;
-		need_lookup = true;
-	} else if (dentry->d_inode) {
+		if (!(dentry->d_flags & DCACHE_OP_REVALIDATE))
+			break;
+
+		error = d_revalidate(dentry, nd->flags);
+		if (likely(error > 0))
+			break;
+		if (error)
+			goto out_dput;
+		d_invalidate(dentry);
+		dput(dentry);
+		dentry = NULL;
+	}
+	if (dentry->d_inode) {
 		/* Cached positive dentry: will open in f_op->open */
 		goto out_no_open;
 	}
 
-	if ((nd->flags & LOOKUP_OPEN) && dir_inode->i_op->atomic_open) {
-		return atomic_open(nd, dentry, path, file, op, got_write,
-				   need_lookup, opened);
+	/*
+	 * Checking write permission is tricky, bacuse we don't know if we are
+	 * going to actually need it: O_CREAT opens should work as long as the
+	 * file exists.  But checking existence breaks atomicity.  The trick is
+	 * to check access and if not granted clear O_CREAT from the flags.
+	 *
+	 * Another problem is returing the "right" error value (e.g. for an
+	 * O_EXCL open we want to return EEXIST not EROFS).
+	 */
+	if (open_flag & O_CREAT) {
+		if (!IS_POSIXACL(dir->d_inode))
+			mode &= ~current_umask();
+		if (unlikely(!got_write)) {
+			create_error = -EROFS;
+			open_flag &= ~O_CREAT;
+			if (open_flag & (O_EXCL | O_TRUNC))
+				goto no_open;
+			/* No side effects, safe to clear O_CREAT */
+		} else {
+			create_error = may_o_create(&nd->path, dentry, mode);
+			if (create_error) {
+				open_flag &= ~O_CREAT;
+				if (open_flag & O_EXCL)
+					goto no_open;
+			}
+		}
+	} else if ((open_flag & (O_TRUNC|O_WRONLY|O_RDWR)) &&
+		   unlikely(!got_write)) {
+		/*
+		 * No O_CREATE -> atomicity not a requirement -> fall
+		 * back to lookup + open
+		 */
+		goto no_open;
 	}
 
-	if (need_lookup) {
-		BUG_ON(dentry->d_inode);
+	if (dir_inode->i_op->atomic_open) {
+		error = atomic_open(nd, dentry, path, file, op, open_flag,
+				    mode, opened);
+		if (unlikely(error == -ENOENT) && create_error)
+			error = create_error;
+		return error;
+	}
 
-		dentry = lookup_real(dir_inode, dentry, nd->flags);
-		if (IS_ERR(dentry))
-			return PTR_ERR(dentry);
+no_open:
+	if (d_in_lookup(dentry)) {
+		struct dentry *res = dir_inode->i_op->lookup(dir_inode, dentry,
+							     nd->flags);
+		d_lookup_done(dentry);
+		if (unlikely(res)) {
+			if (IS_ERR(res)) {
+				error = PTR_ERR(res);
+				goto out_dput;
+			}
+			dput(dentry);
+			dentry = res;
+		}
 	}
 
 	/* Negative dentry, just create the file */
-	if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
-		umode_t mode = op->mode;
-		if (!IS_POSIXACL(dir->d_inode))
-			mode &= ~current_umask();
-		/*
-		 * This write is needed to ensure that a
-		 * rw->ro transition does not occur between
-		 * the time when the file is created and when
-		 * a permanent write count is taken through
-		 * the 'struct file' in finish_open().
-		 */
-		if (!got_write) {
-			error = -EROFS;
+	if (!dentry->d_inode && (open_flag & O_CREAT)) {
+		*opened |= FILE_CREATED;
+		audit_inode_child(dir_inode, dentry, AUDIT_TYPE_CHILD_CREATE);
+		if (!dir_inode->i_op->create) {
+			error = -EACCES;
 			goto out_dput;
 		}
-		*opened |= FILE_CREATED;
-		error = security_path_mknod(&nd->path, dentry, mode, 0);
+		error = dir_inode->i_op->create(dir_inode, dentry, mode,
+						open_flag & O_EXCL);
 		if (error)
 			goto out_dput;
-		error = vfs_create(dir->d_inode, dentry, mode,
-				   nd->flags & LOOKUP_EXCL);
-		if (error)
-			goto out_dput;
+		fsnotify_create(dir_inode, dentry);
+	}
+	if (unlikely(create_error) && !dentry->d_inode) {
+		error = create_error;
+		goto out_dput;
 	}
 out_no_open:
 	path->dentry = dentry;
@@ -3115,7 +3126,7 @@
 	}
 
 retry_lookup:
-	if (op->open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) {
+	if (open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) {
 		error = mnt_want_write(nd->path.mnt);
 		if (!error)
 			got_write = true;
@@ -3125,9 +3136,15 @@
 		 * dropping this one anyway.
 		 */
 	}
-	inode_lock(dir->d_inode);
+	if (open_flag & O_CREAT)
+		inode_lock(dir->d_inode);
+	else
+		inode_lock_shared(dir->d_inode);
 	error = lookup_open(nd, &path, file, op, got_write, opened);
-	inode_unlock(dir->d_inode);
+	if (open_flag & O_CREAT)
+		inode_unlock(dir->d_inode);
+	else
+		inode_unlock_shared(dir->d_inode);
 
 	if (error <= 0) {
 		if (error)
@@ -3207,10 +3224,6 @@
 		return error;
 	}
 	audit_inode(nd->name, nd->path.dentry, 0);
-	if (unlikely(d_is_symlink(nd->path.dentry)) && !(open_flag & O_PATH)) {
-		error = -ELOOP;
-		goto out;
-	}
 	error = -EISDIR;
 	if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
 		goto out;
@@ -3227,11 +3240,9 @@
 		got_write = true;
 	}
 finish_open_created:
-	if (likely(!(open_flag & O_PATH))) {
-		error = may_open(&nd->path, acc_mode, open_flag);
-		if (error)
-			goto out;
-	}
+	error = may_open(&nd->path, acc_mode, open_flag);
+	if (error)
+		goto out;
 	BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */
 	error = vfs_open(&nd->path, file, current_cred());
 	if (!error) {
@@ -3243,18 +3254,13 @@
 	}
 opened:
 	error = open_check_o_direct(file);
-	if (error)
-		goto exit_fput;
-	error = ima_file_check(file, op->acc_mode, *opened);
-	if (error)
-		goto exit_fput;
-
-	if (will_truncate) {
+	if (!error)
+		error = ima_file_check(file, op->acc_mode, *opened);
+	if (!error && will_truncate)
 		error = handle_truncate(file);
-		if (error)
-			goto exit_fput;
-	}
 out:
+	if (unlikely(error) && (*opened & FILE_OPENED))
+		fput(file);
 	if (unlikely(error > 0)) {
 		WARN_ON(1);
 		error = -EINVAL;
@@ -3264,10 +3270,6 @@
 	path_put(&save_parent);
 	return error;
 
-exit_fput:
-	fput(file);
-	goto out;
-
 stale_open:
 	/* If no saved parent or already retried then can't retry */
 	if (!save_parent.dentry || retried)
@@ -3345,6 +3347,18 @@
 	return error;
 }
 
+static int do_o_path(struct nameidata *nd, unsigned flags, struct file *file)
+{
+	struct path path;
+	int error = path_lookupat(nd, flags, &path);
+	if (!error) {
+		audit_inode(nd->name, path.dentry, 0);
+		error = vfs_open(&path, file, current_cred());
+		path_put(&path);
+	}
+	return error;
+}
+
 static struct file *path_openat(struct nameidata *nd,
 			const struct open_flags *op, unsigned flags)
 {
@@ -3364,6 +3378,13 @@
 		goto out2;
 	}
 
+	if (unlikely(file->f_flags & O_PATH)) {
+		error = do_o_path(nd, flags, file);
+		if (!error)
+			opened |= FILE_OPENED;
+		goto out2;
+	}
+
 	s = path_init(nd, flags);
 	if (IS_ERR(s)) {
 		put_filp(file);
@@ -3606,6 +3627,8 @@
 	switch (mode & S_IFMT) {
 		case 0: case S_IFREG:
 			error = vfs_create(path.dentry->d_inode,dentry,mode,true);
+			if (!error)
+				ima_post_path_mknod(dentry);
 			break;
 		case S_IFCHR: case S_IFBLK:
 			error = vfs_mknod(path.dentry->d_inode,dentry,mode,
@@ -4211,7 +4234,11 @@
 	bool new_is_dir = false;
 	unsigned max_links = new_dir->i_sb->s_max_links;
 
-	if (source == target)
+	/*
+	 * Check source == target.
+	 * On overlayfs need to look at underlying inodes.
+	 */
+	if (vfs_select_inode(old_dentry, 0) == vfs_select_inode(new_dentry, 0))
 		return 0;
 
 	error = may_delete(old_dir, old_dentry, is_dir);
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index b7f8eae..bfdad00 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -510,7 +510,7 @@
 			kunmap(ctl.page);
 			SetPageUptodate(ctl.page);
 			unlock_page(ctl.page);
-			page_cache_release(ctl.page);
+			put_page(ctl.page);
 			ctl.page = NULL;
 		}
 		ctl.idx  = 0;
@@ -520,7 +520,7 @@
 	if (ctl.page) {
 		kunmap(ctl.page);
 		unlock_page(ctl.page);
-		page_cache_release(ctl.page);
+		put_page(ctl.page);
 		ctl.page = NULL;
 	}
 	ctl.cache = cache;
@@ -554,14 +554,14 @@
 		kunmap(ctl.page);
 		SetPageUptodate(ctl.page);
 		unlock_page(ctl.page);
-		page_cache_release(ctl.page);
+		put_page(ctl.page);
 	}
 	if (page) {
 		cache->head = ctl.head;
 		kunmap(page);
 		SetPageUptodate(page);
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 out:
 	return result;
@@ -649,7 +649,7 @@
 			kunmap(ctl.page);
 			SetPageUptodate(ctl.page);
 			unlock_page(ctl.page);
-			page_cache_release(ctl.page);
+			put_page(ctl.page);
 		}
 		ctl.cache = NULL;
 		ctl.idx  -= NCP_DIRCACHE_SIZE;
diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
index 5233fbc..17cfb74 100644
--- a/fs/ncpfs/ncplib_kernel.h
+++ b/fs/ncpfs/ncplib_kernel.h
@@ -191,7 +191,7 @@
 	int		eof;
 };
 
-#define NCP_DIRCACHE_SIZE	((int)(PAGE_CACHE_SIZE/sizeof(struct dentry *)))
+#define NCP_DIRCACHE_SIZE	((int)(PAGE_SIZE/sizeof(struct dentry *)))
 union ncp_dir_cache {
 	struct ncp_cache_head	head;
 	struct dentry		*dentry[NCP_DIRCACHE_SIZE];
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 02e4d87..17a42e4 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -231,7 +231,7 @@
 	size_t bytes_left = header->args.count;
 	unsigned int pg_offset = header->args.pgbase, pg_len;
 	struct page **pages = header->args.pages;
-	int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
+	int pg_index = header->args.pgbase >> PAGE_SHIFT;
 	const bool is_dio = (header->dreq != NULL);
 	struct blk_plug plug;
 	int i;
@@ -263,13 +263,13 @@
 		}
 
 		if (is_dio) {
-			if (pg_offset + bytes_left > PAGE_CACHE_SIZE)
-				pg_len = PAGE_CACHE_SIZE - pg_offset;
+			if (pg_offset + bytes_left > PAGE_SIZE)
+				pg_len = PAGE_SIZE - pg_offset;
 			else
 				pg_len = bytes_left;
 		} else {
 			BUG_ON(pg_offset != 0);
-			pg_len = PAGE_CACHE_SIZE;
+			pg_len = PAGE_SIZE;
 		}
 
 		if (is_hole(&be)) {
@@ -339,9 +339,9 @@
 
 	if (likely(!hdr->pnfs_error)) {
 		struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg);
-		u64 start = hdr->args.offset & (loff_t)PAGE_CACHE_MASK;
+		u64 start = hdr->args.offset & (loff_t)PAGE_MASK;
 		u64 end = (hdr->args.offset + hdr->args.count +
-			PAGE_CACHE_SIZE - 1) & (loff_t)PAGE_CACHE_MASK;
+			PAGE_SIZE - 1) & (loff_t)PAGE_MASK;
 
 		ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
 					(end - start) >> SECTOR_SHIFT);
@@ -373,7 +373,7 @@
 	loff_t offset = header->args.offset;
 	size_t count = header->args.count;
 	struct page **pages = header->args.pages;
-	int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
+	int pg_index = header->args.pgbase >> PAGE_SHIFT;
 	unsigned int pg_len;
 	struct blk_plug plug;
 	int i;
@@ -392,7 +392,7 @@
 	blk_start_plug(&plug);
 
 	/* we always write out the whole page */
-	offset = offset & (loff_t)PAGE_CACHE_MASK;
+	offset = offset & (loff_t)PAGE_MASK;
 	isect = offset >> SECTOR_SHIFT;
 
 	for (i = pg_index; i < header->page_array.npages; i++) {
@@ -408,7 +408,7 @@
 			extent_length = be.be_length - (isect - be.be_f_offset);
 		}
 
-		pg_len = PAGE_CACHE_SIZE;
+		pg_len = PAGE_SIZE;
 		bio = do_add_page_to_bio(bio, header->page_array.npages - i,
 					 WRITE, isect, pages[i], &map, &be,
 					 bl_end_io_write, par,
@@ -820,7 +820,7 @@
 	pgoff_t end;
 
 	/* Optimize common case that writes from 0 to end of file */
-	end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
+	end = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
 	if (end != inode->i_mapping->nrpages) {
 		rcu_read_lock();
 		end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
@@ -828,9 +828,9 @@
 	}
 
 	if (!end)
-		return i_size_read(inode) - (idx << PAGE_CACHE_SHIFT);
+		return i_size_read(inode) - (idx << PAGE_SHIFT);
 	else
-		return (end - idx) << PAGE_CACHE_SHIFT;
+		return (end - idx) << PAGE_SHIFT;
 }
 
 static void
diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
index bc21205..18e6fd0 100644
--- a/fs/nfs/blocklayout/blocklayout.h
+++ b/fs/nfs/blocklayout/blocklayout.h
@@ -40,8 +40,8 @@
 #include "../pnfs.h"
 #include "../netns.h"
 
-#define PAGE_CACHE_SECTORS (PAGE_CACHE_SIZE >> SECTOR_SHIFT)
-#define PAGE_CACHE_SECTOR_SHIFT (PAGE_CACHE_SHIFT - SECTOR_SHIFT)
+#define PAGE_CACHE_SECTORS (PAGE_SIZE >> SECTOR_SHIFT)
+#define PAGE_CACHE_SECTOR_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
 #define SECTOR_SIZE (1 << SECTOR_SHIFT)
 
 struct pnfs_block_dev;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index d6d5d2a..0c96528 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -736,7 +736,7 @@
 		server->rsize = max_rpc_payload;
 	if (server->rsize > NFS_MAX_FILE_IO_SIZE)
 		server->rsize = NFS_MAX_FILE_IO_SIZE;
-	server->rpages = (server->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+	server->rpages = (server->rsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
 	server->backing_dev_info.name = "nfs";
 	server->backing_dev_info.ra_pages = server->rpages * NFS_MAX_READAHEAD;
@@ -745,13 +745,13 @@
 		server->wsize = max_rpc_payload;
 	if (server->wsize > NFS_MAX_FILE_IO_SIZE)
 		server->wsize = NFS_MAX_FILE_IO_SIZE;
-	server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+	server->wpages = (server->wsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
 	server->wtmult = nfs_block_bits(fsinfo->wtmult, NULL);
 
 	server->dtsize = nfs_block_size(fsinfo->dtpref, NULL);
-	if (server->dtsize > PAGE_CACHE_SIZE * NFS_MAX_READDIR_PAGES)
-		server->dtsize = PAGE_CACHE_SIZE * NFS_MAX_READDIR_PAGES;
+	if (server->dtsize > PAGE_SIZE * NFS_MAX_READDIR_PAGES)
+		server->dtsize = PAGE_SIZE * NFS_MAX_READDIR_PAGES;
 	if (server->dtsize > server->rsize)
 		server->dtsize = server->rsize;
 
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 4bfa7d8..aaf7bd0 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -57,7 +57,7 @@
 const struct file_operations nfs_dir_operations = {
 	.llseek		= nfs_llseek_dir,
 	.read		= generic_read_dir,
-	.iterate	= nfs_readdir,
+	.iterate_shared	= nfs_readdir,
 	.open		= nfs_opendir,
 	.release	= nfs_closedir,
 	.fsync		= nfs_fsync_dir,
@@ -145,6 +145,7 @@
 };
 
 struct nfs_cache_array {
+	atomic_t refcount;
 	int size;
 	int eof_index;
 	u64 last_cookie;
@@ -200,11 +201,20 @@
 	int i;
 
 	array = kmap_atomic(page);
-	for (i = 0; i < array->size; i++)
-		kfree(array->array[i].string.name);
+	if (atomic_dec_and_test(&array->refcount))
+		for (i = 0; i < array->size; i++)
+			kfree(array->array[i].string.name);
 	kunmap_atomic(array);
 }
 
+static bool grab_page(struct page *page)
+{
+	struct nfs_cache_array *array = kmap_atomic(page);
+	bool res = atomic_inc_not_zero(&array->refcount);
+	kunmap_atomic(array);
+	return res;
+}
+
 /*
  * the caller is responsible for freeing qstr.name
  * when called by nfs_readdir_add_to_array, the strings will be freed in
@@ -377,7 +387,7 @@
  again:
 	timestamp = jiffies;
 	gencount = nfs_inc_attr_generation_counter();
-	error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, entry->cookie, pages,
+	error = NFS_PROTO(inode)->readdir(file_dentry(file), cred, entry->cookie, pages,
 					  NFS_SERVER(inode)->dtsize, desc->plus);
 	if (error < 0) {
 		/* We requested READDIRPLUS, but the server doesn't grok it */
@@ -470,6 +480,7 @@
 void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
 {
 	struct qstr filename = QSTR_INIT(entry->name, entry->len);
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
 	struct dentry *dentry;
 	struct dentry *alias;
 	struct inode *dir = d_inode(parent);
@@ -489,7 +500,13 @@
 	filename.hash = full_name_hash(filename.name, filename.len);
 
 	dentry = d_lookup(parent, &filename);
-	if (dentry != NULL) {
+again:
+	if (!dentry) {
+		dentry = d_alloc_parallel(parent, &filename, &wq);
+		if (IS_ERR(dentry))
+			return;
+	}
+	if (!d_in_lookup(dentry)) {
 		/* Is there a mountpoint here? If so, just exit */
 		if (!nfs_fsid_equal(&NFS_SB(dentry->d_sb)->fsid,
 					&entry->fattr->fsid))
@@ -503,26 +520,21 @@
 		} else {
 			d_invalidate(dentry);
 			dput(dentry);
+			dentry = NULL;
+			goto again;
 		}
 	}
 
-	dentry = d_alloc(parent, &filename);
-	if (dentry == NULL)
-		return;
-
 	inode = nfs_fhget(dentry->d_sb, entry->fh, entry->fattr, entry->label);
-	if (IS_ERR(inode))
-		goto out;
-
 	alias = d_splice_alias(inode, dentry);
-	if (IS_ERR(alias))
-		goto out;
-	else if (alias) {
-		nfs_set_verifier(alias, nfs_save_change_attribute(dir));
-		dput(alias);
-	} else
-		nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
-
+	d_lookup_done(dentry);
+	if (alias) {
+		if (IS_ERR(alias))
+			goto out;
+		dput(dentry);
+		dentry = alias;
+	}
+	nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
 out:
 	dput(dentry);
 }
@@ -560,7 +572,7 @@
 		count++;
 
 		if (desc->plus != 0)
-			nfs_prime_dcache(desc->file->f_path.dentry, entry);
+			nfs_prime_dcache(file_dentry(desc->file), entry);
 
 		status = nfs_readdir_add_to_array(entry, page);
 		if (status != 0)
@@ -643,6 +655,7 @@
 		goto out_label_free;
 	}
 	memset(array, 0, sizeof(struct nfs_cache_array));
+	atomic_set(&array->refcount, 1);
 	array->eof_index = -1;
 
 	status = nfs_readdir_alloc_pages(pages, array_size);
@@ -705,17 +718,24 @@
 static
 void cache_page_release(nfs_readdir_descriptor_t *desc)
 {
-	if (!desc->page->mapping)
-		nfs_readdir_clear_array(desc->page);
-	page_cache_release(desc->page);
+	nfs_readdir_clear_array(desc->page);
+	put_page(desc->page);
 	desc->page = NULL;
 }
 
 static
 struct page *get_cache_page(nfs_readdir_descriptor_t *desc)
 {
-	return read_cache_page(file_inode(desc->file)->i_mapping,
+	struct page *page;
+
+	for (;;) {
+		page = read_cache_page(file_inode(desc->file)->i_mapping,
 			desc->page_index, (filler_t *)nfs_readdir_filler, desc);
+		if (IS_ERR(page) || grab_page(page))
+			break;
+		put_page(page);
+	}
+	return page;
 }
 
 /*
@@ -864,7 +884,7 @@
  */
 static int nfs_readdir(struct file *file, struct dir_context *ctx)
 {
-	struct dentry	*dentry = file->f_path.dentry;
+	struct dentry	*dentry = file_dentry(file);
 	struct inode	*inode = d_inode(dentry);
 	nfs_readdir_descriptor_t my_desc,
 			*desc = &my_desc;
@@ -889,7 +909,6 @@
 	desc->decode = NFS_PROTO(inode)->decode_dirent;
 	desc->plus = nfs_use_readdirplus(inode, ctx) ? 1 : 0;
 
-	nfs_block_sillyrename(dentry);
 	if (ctx->pos == 0 || nfs_dir_mapping_need_revalidate(inode))
 		res = nfs_revalidate_mapping(inode, file->f_mapping);
 	if (res < 0)
@@ -925,7 +944,6 @@
 			break;
 	} while (!desc->eof);
 out:
-	nfs_unblock_sillyrename(dentry);
 	if (res > 0)
 		res = 0;
 	dfprintk(FILE, "NFS: readdir(%pD2) returns %d\n", file, res);
@@ -934,13 +952,11 @@
 
 static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence)
 {
-	struct inode *inode = file_inode(filp);
 	struct nfs_open_dir_context *dir_ctx = filp->private_data;
 
 	dfprintk(FILE, "NFS: llseek dir(%pD2, %lld, %d)\n",
 			filp, offset, whence);
 
-	inode_lock(inode);
 	switch (whence) {
 		case 1:
 			offset += filp->f_pos;
@@ -948,16 +964,13 @@
 			if (offset >= 0)
 				break;
 		default:
-			offset = -EINVAL;
-			goto out;
+			return -EINVAL;
 	}
 	if (offset != filp->f_pos) {
 		filp->f_pos = offset;
 		dir_ctx->dir_cookie = 0;
 		dir_ctx->duped = 0;
 	}
-out:
-	inode_unlock(inode);
 	return offset;
 }
 
@@ -1383,7 +1396,6 @@
 	parent = dentry->d_parent;
 	/* Protect against concurrent sillydeletes */
 	trace_nfs_lookup_enter(dir, dentry, flags);
-	nfs_block_sillyrename(parent);
 	error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
 	if (error == -ENOENT)
 		goto no_entry;
@@ -1408,7 +1420,6 @@
 	}
 	nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
 out_unblock_sillyrename:
-	nfs_unblock_sillyrename(parent);
 	trace_nfs_lookup_exit(dir, dentry, flags, error);
 	nfs4_label_free(label);
 out:
@@ -1520,9 +1531,7 @@
 		goto out;
 
 	trace_nfs_atomic_open_enter(dir, ctx, open_flags);
-	nfs_block_sillyrename(dentry->d_parent);
 	inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr, opened);
-	nfs_unblock_sillyrename(dentry->d_parent);
 	if (IS_ERR(inode)) {
 		err = PTR_ERR(inode);
 		trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
@@ -1766,7 +1775,7 @@
 
 	trace_nfs_rmdir_enter(dir, dentry);
 	if (d_really_is_positive(dentry)) {
-		nfs_wait_on_sillyrename(dentry);
+		down_write(&NFS_I(d_inode(dentry))->rmdir_sem);
 		error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
 		/* Ensure the VFS deletes this inode */
 		switch (error) {
@@ -1776,6 +1785,7 @@
 		case -ENOENT:
 			nfs_dentry_handle_enoent(dentry);
 		}
+		up_write(&NFS_I(d_inode(dentry))->rmdir_sem);
 	} else
 		error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
 	trace_nfs_rmdir_exit(dir, dentry, error);
@@ -1923,7 +1933,7 @@
 		 * add_to_page_cache_lru() grabs an extra page refcount.
 		 * Drop it here to avoid leaking this page later.
 		 */
-		page_cache_release(page);
+		put_page(page);
 	} else
 		__free_page(page);
 
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 7a0cfd3..741a92c 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -250,7 +250,7 @@
  * shunt off direct read and write requests before the VFS gets them,
  * so this method is only ever called for swap.
  */
-ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
+ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct inode *inode = iocb->ki_filp->f_mapping->host;
 
@@ -261,7 +261,7 @@
 	VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
 
 	if (iov_iter_rw(iter) == READ)
-		return nfs_file_direct_read(iocb, iter, pos);
+		return nfs_file_direct_read(iocb, iter);
 	return nfs_file_direct_write(iocb, iter);
 }
 
@@ -269,7 +269,7 @@
 {
 	unsigned int i;
 	for (i = 0; i < npages; i++)
-		page_cache_release(pages[i]);
+		put_page(pages[i]);
 }
 
 void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
@@ -396,7 +396,7 @@
 static void nfs_direct_readpage_release(struct nfs_page *req)
 {
 	dprintk("NFS: direct read done (%s/%llu %d@%lld)\n",
-		d_inode(req->wb_context->dentry)->i_sb->s_id,
+		req->wb_context->dentry->d_sb->s_id,
 		(unsigned long long)NFS_FILEID(d_inode(req->wb_context->dentry)),
 		req->wb_bytes,
 		(long long)req_offset(req));
@@ -545,7 +545,6 @@
  * nfs_file_direct_read - file direct read operation for NFS files
  * @iocb: target I/O control block
  * @iter: vector of user buffers into which to read data
- * @pos: byte offset in file where reading starts
  *
  * We use this function for direct reads instead of calling
  * generic_file_aio_read() in order to avoid gfar's check to see if
@@ -561,8 +560,7 @@
  * client must read the updated atime from the server back into its
  * cache.
  */
-ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
-				loff_t pos)
+ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct file *file = iocb->ki_filp;
 	struct address_space *mapping = file->f_mapping;
@@ -574,7 +572,7 @@
 	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
 
 	dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
-		file, count, (long long) pos);
+		file, count, (long long) iocb->ki_pos);
 
 	result = 0;
 	if (!count)
@@ -594,7 +592,7 @@
 
 	dreq->inode = inode;
 	dreq->bytes_left = count;
-	dreq->io_start = pos;
+	dreq->io_start = iocb->ki_pos;
 	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
 	l_ctx = nfs_get_lock_context(dreq->ctx);
 	if (IS_ERR(l_ctx)) {
@@ -606,14 +604,14 @@
 		dreq->iocb = iocb;
 
 	NFS_I(inode)->read_io += count;
-	result = nfs_direct_read_schedule_iovec(dreq, iter, pos);
+	result = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
 
 	inode_unlock(inode);
 
 	if (!result) {
 		result = nfs_direct_wait(dreq);
 		if (result > 0)
-			iocb->ki_pos = pos + result;
+			iocb->ki_pos += result;
 	}
 
 	nfs_direct_req_release(dreq);
@@ -969,7 +967,6 @@
  * nfs_file_direct_write - file direct write operation for NFS files
  * @iocb: target I/O control block
  * @iter: vector of user buffers from which to write data
- * @pos: byte offset in file where writing starts
  *
  * We use this function for direct writes instead of calling
  * generic_file_aio_write() in order to avoid taking the inode
@@ -1003,7 +1000,7 @@
 		      iov_iter_count(iter));
 
 	pos = iocb->ki_pos;
-	end = (pos + iov_iter_count(iter) - 1) >> PAGE_CACHE_SHIFT;
+	end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
 
 	inode_lock(inode);
 
@@ -1013,7 +1010,7 @@
 
 	if (mapping->nrpages) {
 		result = invalidate_inode_pages2_range(mapping,
-					pos >> PAGE_CACHE_SHIFT, end);
+					pos >> PAGE_SHIFT, end);
 		if (result)
 			goto out_unlock;
 	}
@@ -1042,7 +1039,7 @@
 
 	if (mapping->nrpages) {
 		invalidate_inode_pages2_range(mapping,
-					      pos >> PAGE_CACHE_SHIFT, end);
+					      pos >> PAGE_SHIFT, end);
 	}
 
 	inode_unlock(inode);
@@ -1057,7 +1054,9 @@
 			if (i_size_read(inode) < iocb->ki_pos)
 				i_size_write(inode, iocb->ki_pos);
 			spin_unlock(&inode->i_lock);
-			generic_write_sync(file, pos, result);
+
+			/* XXX: should check the generic_write_sync retval */
+			generic_write_sync(iocb, result);
 		}
 	}
 	nfs_direct_req_release(dreq);
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 89bf093..717a8d6 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -164,7 +164,7 @@
 	ssize_t result;
 
 	if (iocb->ki_flags & IOCB_DIRECT)
-		return nfs_file_direct_read(iocb, to, iocb->ki_pos);
+		return nfs_file_direct_read(iocb, to);
 
 	dprintk("NFS: read(%pD2, %zu@%lu)\n",
 		iocb->ki_filp,
@@ -320,7 +320,7 @@
 			loff_t pos, unsigned len)
 {
 	unsigned int pglen = nfs_page_length(page);
-	unsigned int offset = pos & (PAGE_CACHE_SIZE - 1);
+	unsigned int offset = pos & (PAGE_SIZE - 1);
 	unsigned int end = offset + len;
 
 	if (pnfs_ld_read_whole_page(file->f_mapping->host)) {
@@ -351,7 +351,7 @@
 			struct page **pagep, void **fsdata)
 {
 	int ret;
-	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+	pgoff_t index = pos >> PAGE_SHIFT;
 	struct page *page;
 	int once_thru = 0;
 
@@ -380,12 +380,12 @@
 	ret = nfs_flush_incompatible(file, page);
 	if (ret) {
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 	} else if (!once_thru &&
 		   nfs_want_read_modify_write(file, page, pos, len)) {
 		once_thru = 1;
 		ret = nfs_readpage(file, page);
-		page_cache_release(page);
+		put_page(page);
 		if (!ret)
 			goto start;
 	}
@@ -396,7 +396,7 @@
 			loff_t pos, unsigned len, unsigned copied,
 			struct page *page, void *fsdata)
 {
-	unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
+	unsigned offset = pos & (PAGE_SIZE - 1);
 	struct nfs_open_context *ctx = nfs_file_open_context(file);
 	int status;
 
@@ -413,20 +413,20 @@
 
 		if (pglen == 0) {
 			zero_user_segments(page, 0, offset,
-					end, PAGE_CACHE_SIZE);
+					end, PAGE_SIZE);
 			SetPageUptodate(page);
 		} else if (end >= pglen) {
-			zero_user_segment(page, end, PAGE_CACHE_SIZE);
+			zero_user_segment(page, end, PAGE_SIZE);
 			if (offset == 0)
 				SetPageUptodate(page);
 		} else
-			zero_user_segment(page, pglen, PAGE_CACHE_SIZE);
+			zero_user_segment(page, pglen, PAGE_SIZE);
 	}
 
 	status = nfs_updatepage(file, page, offset, copied);
 
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 
 	if (status < 0)
 		return status;
@@ -454,7 +454,7 @@
 	dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n",
 		 page, offset, length);
 
-	if (offset != 0 || length < PAGE_CACHE_SIZE)
+	if (offset != 0 || length < PAGE_SIZE)
 		return;
 	/* Cancel any unstarted writes on this page */
 	nfs_wb_page_cancel(page_file_mapping(page)->host, page);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 33d18c4..52e7d68 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -940,7 +940,7 @@
 {
 	struct nfs_open_context *ctx;
 
-	ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
+	ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
 	nfs_file_set_open_context(filp, ctx);
@@ -1958,9 +1958,7 @@
 	nfsi->nrequests = 0;
 	nfsi->commit_info.ncommit = 0;
 	atomic_set(&nfsi->commit_info.rpcs_out, 0);
-	atomic_set(&nfsi->silly_count, 1);
-	INIT_HLIST_HEAD(&nfsi->silly_list);
-	init_waitqueue_head(&nfsi->waitqueue);
+	init_rwsem(&nfsi->rmdir_sem);
 	nfs4_init_once(nfsi);
 }
 
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 565f813..f1d1d2c 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -638,11 +638,11 @@
 
 	if (i_size > 0) {
 		pgoff_t page_index = page_file_index(page);
-		pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
+		pgoff_t end_index = (i_size - 1) >> PAGE_SHIFT;
 		if (page_index < end_index)
-			return PAGE_CACHE_SIZE;
+			return PAGE_SIZE;
 		if (page_index == end_index)
-			return ((i_size - 1) & ~PAGE_CACHE_MASK) + 1;
+			return ((i_size - 1) & ~PAGE_MASK) + 1;
 	}
 	return 0;
 }
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index 17c0fa1..720d92f5 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -11,6 +11,38 @@
 
 #define NFSDBG_FACILITY	NFSDBG_PROC
 
+/*
+ * nfs3_prepare_get_acl, nfs3_complete_get_acl, nfs3_abort_get_acl: Helpers for
+ * caching get_acl results in a race-free way.  See fs/posix_acl.c:get_acl()
+ * for explanations.
+ */
+static void nfs3_prepare_get_acl(struct posix_acl **p)
+{
+	struct posix_acl *sentinel = uncached_acl_sentinel(current);
+
+	if (cmpxchg(p, ACL_NOT_CACHED, sentinel) != ACL_NOT_CACHED) {
+		/* Not the first reader or sentinel already in place. */
+	}
+}
+
+static void nfs3_complete_get_acl(struct posix_acl **p, struct posix_acl *acl)
+{
+	struct posix_acl *sentinel = uncached_acl_sentinel(current);
+
+	/* Only cache the ACL if our sentinel is still in place. */
+	posix_acl_dup(acl);
+	if (cmpxchg(p, sentinel, acl) != sentinel)
+		posix_acl_release(acl);
+}
+
+static void nfs3_abort_get_acl(struct posix_acl **p)
+{
+	struct posix_acl *sentinel = uncached_acl_sentinel(current);
+
+	/* Remove our sentinel upon failure. */
+	cmpxchg(p, sentinel, ACL_NOT_CACHED);
+}
+
 struct posix_acl *nfs3_get_acl(struct inode *inode, int type)
 {
 	struct nfs_server *server = NFS_SERVER(inode);
@@ -55,6 +87,11 @@
 	if (res.fattr == NULL)
 		return ERR_PTR(-ENOMEM);
 
+	if (args.mask & NFS_ACL)
+		nfs3_prepare_get_acl(&inode->i_acl);
+	if (args.mask & NFS_DFACL)
+		nfs3_prepare_get_acl(&inode->i_default_acl);
+
 	status = rpc_call_sync(server->client_acl, &msg, 0);
 	dprintk("NFS reply getacl: %d\n", status);
 
@@ -89,12 +126,12 @@
 	}
 
 	if (res.mask & NFS_ACL)
-		set_cached_acl(inode, ACL_TYPE_ACCESS, res.acl_access);
+		nfs3_complete_get_acl(&inode->i_acl, res.acl_access);
 	else
 		forget_cached_acl(inode, ACL_TYPE_ACCESS);
 
 	if (res.mask & NFS_DFACL)
-		set_cached_acl(inode, ACL_TYPE_DEFAULT, res.acl_default);
+		nfs3_complete_get_acl(&inode->i_default_acl, res.acl_default);
 	else
 		forget_cached_acl(inode, ACL_TYPE_DEFAULT);
 
@@ -108,6 +145,8 @@
 	}
 
 getout:
+	nfs3_abort_get_acl(&inode->i_acl);
+	nfs3_abort_get_acl(&inode->i_default_acl);
 	posix_acl_release(res.acl_access);
 	posix_acl_release(res.acl_default);
 	nfs_free_fattr(res.fattr);
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 22c35ab..d039051 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -26,7 +26,7 @@
 nfs4_file_open(struct inode *inode, struct file *filp)
 {
 	struct nfs_open_context *ctx;
-	struct dentry *dentry = filp->f_path.dentry;
+	struct dentry *dentry = file_dentry(filp);
 	struct dentry *parent = NULL;
 	struct inode *dir;
 	unsigned openflags = filp->f_flags;
@@ -57,7 +57,7 @@
 	parent = dget_parent(dentry);
 	dir = d_inode(parent);
 
-	ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
+	ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
 	err = PTR_ERR(ctx);
 	if (IS_ERR(ctx))
 		goto out;
diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
index 5ba22c6..c444285 100644
--- a/fs/nfs/nfs4idmap.c
+++ b/fs/nfs/nfs4idmap.c
@@ -201,7 +201,7 @@
 				GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred,
 				(KEY_POS_ALL & ~KEY_POS_SETATTR) |
 				KEY_USR_VIEW | KEY_USR_READ,
-				KEY_ALLOC_NOT_IN_QUOTA, NULL);
+				KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL);
 	if (IS_ERR(keyring)) {
 		ret = PTR_ERR(keyring);
 		goto failed_put_cred;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 327b8c3..084e857 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3777,7 +3777,7 @@
 
 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
 {
-	nfs4_setup_sequence(NFS_SERVER(data->dir),
+	nfs4_setup_sequence(NFS_SB(data->dentry->d_sb),
 			&data->args.seq_args,
 			&data->res.seq_res,
 			task);
@@ -6263,10 +6263,10 @@
 }
 
 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
-				   struct dentry *dentry, const char *key,
-				   void *buf, size_t buflen)
+				   struct dentry *unused, struct inode *inode,
+				   const char *key, void *buf, size_t buflen)
 {
-	return nfs4_proc_get_acl(d_inode(dentry), buf, buflen);
+	return nfs4_proc_get_acl(inode, buf, buflen);
 }
 
 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry)
@@ -6288,11 +6288,11 @@
 }
 
 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler,
-				     struct dentry *dentry, const char *key,
-				     void *buf, size_t buflen)
+				     struct dentry *unused, struct inode *inode,
+				     const char *key, void *buf, size_t buflen)
 {
 	if (security_ismaclabel(key))
-		return nfs4_get_security_label(d_inode(dentry), buf, buflen);
+		return nfs4_get_security_label(inode, buf, buflen);
 	return -EOPNOTSUPP;
 }
 
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 4e44412..88474a4 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -5001,7 +5001,7 @@
 		blocksize = be32_to_cpup(p);
 		maxsize = (uint64_t)nblocks * (uint64_t)blocksize;
 	}
-	maxsize >>= PAGE_CACHE_SHIFT;
+	maxsize >>= PAGE_SHIFT;
 	*pagemod_limit = min_t(u64, maxsize, ULONG_MAX);
 	return 0;
 out_overflow:
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index 9f80a08..0b9e5cc 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -702,7 +702,7 @@
 		),
 
 		TP_fast_assign(
-			struct inode *dir = data->dir;
+			struct inode *dir = d_inode(data->dentry->d_parent);
 			size_t len = data->args.name.len;
 			__entry->dev = dir->i_sb->s_dev;
 			__entry->dir = NFS_FILEID(dir);
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index 9aebffb..049c1b1 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -486,7 +486,7 @@
 	dprintk("%s: index=0x%lx\n", __func__,
 		(page == ZERO_PAGE(0)) ? -1UL : page->index);
 	if (ZERO_PAGE(0) != page)
-		page_cache_release(page);
+		put_page(page);
 	return;
 }
 
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 8ce4f61..1f6db42 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -342,7 +342,7 @@
 	 * update_nfs_request below if the region is not locked. */
 	req->wb_page    = page;
 	req->wb_index	= page_file_index(page);
-	page_cache_get(page);
+	get_page(page);
 	req->wb_offset  = offset;
 	req->wb_pgbase	= offset;
 	req->wb_bytes   = count;
@@ -392,7 +392,7 @@
 	struct nfs_lock_context *l_ctx = req->wb_lock_context;
 
 	if (page != NULL) {
-		page_cache_release(page);
+		put_page(page);
 		req->wb_page = NULL;
 	}
 	if (l_ctx != NULL) {
@@ -904,7 +904,7 @@
 				return false;
 		} else {
 			if (req->wb_pgbase != 0 ||
-			    prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
+			    prev->wb_pgbase + prev->wb_bytes != PAGE_SIZE)
 				return false;
 		}
 	}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 2fa483e..89a5ef4 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -841,7 +841,7 @@
 
 		i_size = i_size_read(ino);
 
-		lgp->args.minlength = PAGE_CACHE_SIZE;
+		lgp->args.minlength = PAGE_SIZE;
 		if (lgp->args.minlength > range->length)
 			lgp->args.minlength = range->length;
 		if (range->iomode == IOMODE_READ) {
@@ -1618,13 +1618,13 @@
 		spin_unlock(&clp->cl_lock);
 	}
 
-	pg_offset = arg.offset & ~PAGE_CACHE_MASK;
+	pg_offset = arg.offset & ~PAGE_MASK;
 	if (pg_offset) {
 		arg.offset -= pg_offset;
 		arg.length += pg_offset;
 	}
 	if (arg.length != NFS4_MAX_UINT64)
-		arg.length = PAGE_CACHE_ALIGN(arg.length);
+		arg.length = PAGE_ALIGN(arg.length);
 
 	lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
 	atomic_dec(&lo->plh_outstanding);
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index eb31e23..6776d7a 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -46,7 +46,7 @@
 static
 int nfs_return_empty_page(struct page *page)
 {
-	zero_user(page, 0, PAGE_CACHE_SIZE);
+	zero_user(page, 0, PAGE_SIZE);
 	SetPageUptodate(page);
 	unlock_page(page);
 	return 0;
@@ -118,8 +118,8 @@
 		unlock_page(page);
 		return PTR_ERR(new);
 	}
-	if (len < PAGE_CACHE_SIZE)
-		zero_user_segment(page, len, PAGE_CACHE_SIZE);
+	if (len < PAGE_SIZE)
+		zero_user_segment(page, len, PAGE_SIZE);
 
 	nfs_pageio_init_read(&pgio, inode, false,
 			     &nfs_async_read_completion_ops);
@@ -295,7 +295,7 @@
 	int		error;
 
 	dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
-		page, PAGE_CACHE_SIZE, page_file_index(page));
+		page, PAGE_SIZE, page_file_index(page));
 	nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
 	nfs_add_stats(inode, NFSIOS_READPAGES, 1);
 
@@ -361,8 +361,8 @@
 	if (IS_ERR(new))
 		goto out_error;
 
-	if (len < PAGE_CACHE_SIZE)
-		zero_user_segment(page, len, PAGE_CACHE_SIZE);
+	if (len < PAGE_SIZE)
+		zero_user_segment(page, len, PAGE_SIZE);
 	if (!nfs_pageio_add_request(desc->pgio, new)) {
 		nfs_list_remove_request(new);
 		nfs_readpage_release(new);
@@ -424,8 +424,8 @@
 
 	pgm = &pgio.pg_mirrors[0];
 	NFS_I(inode)->read_io += pgm->pg_bytes_written;
-	npages = (pgm->pg_bytes_written + PAGE_CACHE_SIZE - 1) >>
-		 PAGE_CACHE_SHIFT;
+	npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >>
+		 PAGE_SHIFT;
 	nfs_add_stats(inode, NFSIOS_READPAGES, npages);
 read_complete:
 	put_nfs_open_context(desc.ctx);
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index fa538b2..1868246 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -30,45 +30,11 @@
 static void
 nfs_free_unlinkdata(struct nfs_unlinkdata *data)
 {
-	iput(data->dir);
 	put_rpccred(data->cred);
 	kfree(data->args.name.name);
 	kfree(data);
 }
 
-#define NAME_ALLOC_LEN(len)	((len+16) & ~15)
-/**
- * nfs_copy_dname - copy dentry name to data structure
- * @dentry: pointer to dentry
- * @data: nfs_unlinkdata
- */
-static int nfs_copy_dname(struct dentry *dentry, struct nfs_unlinkdata *data)
-{
-	char		*str;
-	int		len = dentry->d_name.len;
-
-	str = kmemdup(dentry->d_name.name, NAME_ALLOC_LEN(len), GFP_KERNEL);
-	if (!str)
-		return -ENOMEM;
-	data->args.name.len = len;
-	data->args.name.name = str;
-	return 0;
-}
-
-static void nfs_free_dname(struct nfs_unlinkdata *data)
-{
-	kfree(data->args.name.name);
-	data->args.name.name = NULL;
-	data->args.name.len = 0;
-}
-
-static void nfs_dec_sillycount(struct inode *dir)
-{
-	struct nfs_inode *nfsi = NFS_I(dir);
-	if (atomic_dec_return(&nfsi->silly_count) == 1)
-		wake_up(&nfsi->waitqueue);
-}
-
 /**
  * nfs_async_unlink_done - Sillydelete post-processing
  * @task: rpc_task of the sillydelete
@@ -78,7 +44,7 @@
 static void nfs_async_unlink_done(struct rpc_task *task, void *calldata)
 {
 	struct nfs_unlinkdata *data = calldata;
-	struct inode *dir = data->dir;
+	struct inode *dir = d_inode(data->dentry->d_parent);
 
 	trace_nfs_sillyrename_unlink(data, task->tk_status);
 	if (!NFS_PROTO(dir)->unlink_done(task, dir))
@@ -95,17 +61,21 @@
 static void nfs_async_unlink_release(void *calldata)
 {
 	struct nfs_unlinkdata	*data = calldata;
-	struct super_block *sb = data->dir->i_sb;
+	struct dentry *dentry = data->dentry;
+	struct super_block *sb = dentry->d_sb;
 
-	nfs_dec_sillycount(data->dir);
+	up_read_non_owner(&NFS_I(d_inode(dentry->d_parent))->rmdir_sem);
+	d_lookup_done(dentry);
 	nfs_free_unlinkdata(data);
+	dput(dentry);
 	nfs_sb_deactive(sb);
 }
 
 static void nfs_unlink_prepare(struct rpc_task *task, void *calldata)
 {
 	struct nfs_unlinkdata *data = calldata;
-	NFS_PROTO(data->dir)->unlink_rpc_prepare(task, data);
+	struct inode *dir = d_inode(data->dentry->d_parent);
+	NFS_PROTO(dir)->unlink_rpc_prepare(task, data);
 }
 
 static const struct rpc_call_ops nfs_unlink_ops = {
@@ -114,7 +84,7 @@
 	.rpc_call_prepare = nfs_unlink_prepare,
 };
 
-static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct nfs_unlinkdata *data)
+static void nfs_do_call_unlink(struct nfs_unlinkdata *data)
 {
 	struct rpc_message msg = {
 		.rpc_argp = &data->args,
@@ -129,44 +99,7 @@
 		.flags = RPC_TASK_ASYNC,
 	};
 	struct rpc_task *task;
-	struct dentry *alias;
-
-	alias = d_lookup(parent, &data->args.name);
-	if (alias != NULL) {
-		int ret;
-		void *devname_garbage = NULL;
-
-		/*
-		 * Hey, we raced with lookup... See if we need to transfer
-		 * the sillyrename information to the aliased dentry.
-		 */
-		nfs_free_dname(data);
-		ret = nfs_copy_dname(alias, data);
-		spin_lock(&alias->d_lock);
-		if (ret == 0 && d_really_is_positive(alias) &&
-		    !(alias->d_flags & DCACHE_NFSFS_RENAMED)) {
-			devname_garbage = alias->d_fsdata;
-			alias->d_fsdata = data;
-			alias->d_flags |= DCACHE_NFSFS_RENAMED;
-			ret = 1;
-		} else
-			ret = 0;
-		spin_unlock(&alias->d_lock);
-		nfs_dec_sillycount(dir);
-		dput(alias);
-		/*
-		 * If we'd displaced old cached devname, free it.  At that
-		 * point dentry is definitely not a root, so we won't need
-		 * that anymore.
-		 */
-		kfree(devname_garbage);
-		return ret;
-	}
-	data->dir = igrab(dir);
-	if (!data->dir) {
-		nfs_dec_sillycount(dir);
-		return 0;
-	}
+	struct inode *dir = d_inode(data->dentry->d_parent);
 	nfs_sb_active(dir->i_sb);
 	data->args.fh = NFS_FH(dir);
 	nfs_fattr_init(data->res.dir_attr);
@@ -177,70 +110,50 @@
 	task = rpc_run_task(&task_setup_data);
 	if (!IS_ERR(task))
 		rpc_put_task_async(task);
-	return 1;
 }
 
 static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data)
 {
-	struct dentry *parent;
-	struct inode *dir;
-	int ret = 0;
+	struct inode *dir = d_inode(dentry->d_parent);
+	struct dentry *alias;
 
-
-	parent = dget_parent(dentry);
-	if (parent == NULL)
-		goto out_free;
-	dir = d_inode(parent);
-	/* Non-exclusive lock protects against concurrent lookup() calls */
-	spin_lock(&dir->i_lock);
-	if (atomic_inc_not_zero(&NFS_I(dir)->silly_count) == 0) {
-		/* Deferred delete */
-		hlist_add_head(&data->list, &NFS_I(dir)->silly_list);
-		spin_unlock(&dir->i_lock);
-		ret = 1;
-		goto out_dput;
+	down_read_non_owner(&NFS_I(dir)->rmdir_sem);
+	alias = d_alloc_parallel(dentry->d_parent, &data->args.name, &data->wq);
+	if (IS_ERR(alias)) {
+		up_read_non_owner(&NFS_I(dir)->rmdir_sem);
+		return 0;
 	}
-	spin_unlock(&dir->i_lock);
-	ret = nfs_do_call_unlink(parent, dir, data);
-out_dput:
-	dput(parent);
-out_free:
-	return ret;
-}
+	if (!d_in_lookup(alias)) {
+		int ret;
+		void *devname_garbage = NULL;
 
-void nfs_wait_on_sillyrename(struct dentry *dentry)
-{
-	struct nfs_inode *nfsi = NFS_I(d_inode(dentry));
-
-	wait_event(nfsi->waitqueue, atomic_read(&nfsi->silly_count) <= 1);
-}
-
-void nfs_block_sillyrename(struct dentry *dentry)
-{
-	struct nfs_inode *nfsi = NFS_I(d_inode(dentry));
-
-	wait_event(nfsi->waitqueue, atomic_cmpxchg(&nfsi->silly_count, 1, 0) == 1);
-}
-
-void nfs_unblock_sillyrename(struct dentry *dentry)
-{
-	struct inode *dir = d_inode(dentry);
-	struct nfs_inode *nfsi = NFS_I(dir);
-	struct nfs_unlinkdata *data;
-
-	atomic_inc(&nfsi->silly_count);
-	spin_lock(&dir->i_lock);
-	while (!hlist_empty(&nfsi->silly_list)) {
-		if (!atomic_inc_not_zero(&nfsi->silly_count))
-			break;
-		data = hlist_entry(nfsi->silly_list.first, struct nfs_unlinkdata, list);
-		hlist_del(&data->list);
-		spin_unlock(&dir->i_lock);
-		if (nfs_do_call_unlink(dentry, dir, data) == 0)
-			nfs_free_unlinkdata(data);
-		spin_lock(&dir->i_lock);
+		/*
+		 * Hey, we raced with lookup... See if we need to transfer
+		 * the sillyrename information to the aliased dentry.
+		 */
+		spin_lock(&alias->d_lock);
+		if (d_really_is_positive(alias) &&
+		    !(alias->d_flags & DCACHE_NFSFS_RENAMED)) {
+			devname_garbage = alias->d_fsdata;
+			alias->d_fsdata = data;
+			alias->d_flags |= DCACHE_NFSFS_RENAMED;
+			ret = 1;
+		} else
+			ret = 0;
+		spin_unlock(&alias->d_lock);
+		dput(alias);
+		up_read_non_owner(&NFS_I(dir)->rmdir_sem);
+		/*
+		 * If we'd displaced old cached devname, free it.  At that
+		 * point dentry is definitely not a root, so we won't need
+		 * that anymore.
+		 */
+		kfree(devname_garbage);
+		return ret;
 	}
-	spin_unlock(&dir->i_lock);
+	data->dentry = alias;
+	nfs_do_call_unlink(data);
+	return 1;
 }
 
 /**
@@ -249,7 +162,7 @@
  * @dentry: dentry to unlink
  */
 static int
-nfs_async_unlink(struct inode *dir, struct dentry *dentry)
+nfs_async_unlink(struct dentry *dentry, struct qstr *name)
 {
 	struct nfs_unlinkdata *data;
 	int status = -ENOMEM;
@@ -258,13 +171,18 @@
 	data = kzalloc(sizeof(*data), GFP_KERNEL);
 	if (data == NULL)
 		goto out;
+	data->args.name.name = kstrdup(name->name, GFP_KERNEL);
+	if (!data->args.name.name)
+		goto out_free;
+	data->args.name.len = name->len;
 
 	data->cred = rpc_lookup_cred();
 	if (IS_ERR(data->cred)) {
 		status = PTR_ERR(data->cred);
-		goto out_free;
+		goto out_free_name;
 	}
 	data->res.dir_attr = &data->dir_attr;
+	init_waitqueue_head(&data->wq);
 
 	status = -EBUSY;
 	spin_lock(&dentry->d_lock);
@@ -284,6 +202,8 @@
 out_unlock:
 	spin_unlock(&dentry->d_lock);
 	put_rpccred(data->cred);
+out_free_name:
+	kfree(data->args.name.name);
 out_free:
 	kfree(data);
 out:
@@ -302,17 +222,15 @@
 void
 nfs_complete_unlink(struct dentry *dentry, struct inode *inode)
 {
-	struct nfs_unlinkdata	*data = NULL;
+	struct nfs_unlinkdata	*data;
 
 	spin_lock(&dentry->d_lock);
-	if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
-		dentry->d_flags &= ~DCACHE_NFSFS_RENAMED;
-		data = dentry->d_fsdata;
-		dentry->d_fsdata = NULL;
-	}
+	dentry->d_flags &= ~DCACHE_NFSFS_RENAMED;
+	data = dentry->d_fsdata;
+	dentry->d_fsdata = NULL;
 	spin_unlock(&dentry->d_lock);
 
-	if (data != NULL && (NFS_STALE(inode) || !nfs_call_unlink(dentry, data)))
+	if (NFS_STALE(inode) || !nfs_call_unlink(dentry, data))
 		nfs_free_unlinkdata(data);
 }
 
@@ -559,18 +477,10 @@
 	/* queue unlink first. Can't do this from rpc_release as it
 	 * has to allocate memory
 	 */
-	error = nfs_async_unlink(dir, dentry);
+	error = nfs_async_unlink(dentry, &sdentry->d_name);
 	if (error)
 		goto out_dput;
 
-	/* populate unlinkdata with the right dname */
-	error = nfs_copy_dname(sdentry,
-				(struct nfs_unlinkdata *)dentry->d_fsdata);
-	if (error) {
-		nfs_cancel_async_unlink(dentry);
-		goto out_dput;
-	}
-
 	/* run the rename task, undo unlink if it fails */
 	task = nfs_async_rename(dir, dir, dentry, sdentry,
 					nfs_complete_sillyrename);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 5754835..5f4fd53 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -150,7 +150,7 @@
 
 	spin_lock(&inode->i_lock);
 	i_size = i_size_read(inode);
-	end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
+	end_index = (i_size - 1) >> PAGE_SHIFT;
 	if (i_size > 0 && page_file_index(page) < end_index)
 		goto out;
 	end = page_file_offset(page) + ((loff_t)offset+count);
@@ -1942,7 +1942,7 @@
 int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder)
 {
 	loff_t range_start = page_file_offset(page);
-	loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
+	loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
 	struct writeback_control wbc = {
 		.sync_mode = WB_SYNC_ALL,
 		.nr_to_write = 0,
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 51c3b06..d818e4f 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -552,7 +552,7 @@
 	 * different read/write sizes for file systems known to have
 	 * problems with large blocks */
 	if (nfserr == 0) {
-		struct super_block *sb = d_inode(argp->fh.fh_dentry)->i_sb;
+		struct super_block *sb = argp->fh.fh_dentry->d_sb;
 
 		/* Note that we don't care for remote fs's here */
 		if (sb->s_magic == MSDOS_SUPER_MAGIC) {
@@ -588,7 +588,7 @@
 	nfserr = fh_verify(rqstp, &argp->fh, 0, NFSD_MAY_NOP);
 
 	if (nfserr == 0) {
-		struct super_block *sb = d_inode(argp->fh.fh_dentry)->i_sb;
+		struct super_block *sb = argp->fh.fh_dentry->d_sb;
 
 		/* Note that we don't care for remote fs's here */
 		switch (sb->s_magic) {
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 2246454..93d5853 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -146,7 +146,7 @@
 	default:
 	case FSIDSOURCE_DEV:
 		p = xdr_encode_hyper(p, (u64)huge_encode_dev
-				     (d_inode(fhp->fh_dentry)->i_sb->s_dev));
+				     (fhp->fh_dentry->d_sb->s_dev));
 		break;
 	case FSIDSOURCE_FSID:
 		p = xdr_encode_hyper(p, (u64) fhp->fh_export->ex_fsid);
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index c1681ce..a891944 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -426,7 +426,7 @@
 
 static struct super_block *exp_sb(struct svc_export *exp)
 {
-	return d_inode(exp->ex_path.dentry)->i_sb;
+	return exp->ex_path.dentry->d_sb;
 }
 
 static bool fsid_type_ok_for_exp(u8 fsid_type, struct svc_export *exp)
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index d40010e..6fbd81e 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -935,8 +935,8 @@
 	int			stable = *stablep;
 	int			use_wgather;
 	loff_t			pos = offset;
-	loff_t			end = LLONG_MAX;
 	unsigned int		pflags = current->flags;
+	int			flags = 0;
 
 	if (test_bit(RQ_LOCAL, &rqstp->rq_flags))
 		/*
@@ -955,9 +955,12 @@
 	if (!EX_ISSYNC(exp))
 		stable = 0;
 
+	if (stable && !use_wgather)
+		flags |= RWF_SYNC;
+
 	/* Write the data. */
 	oldfs = get_fs(); set_fs(KERNEL_DS);
-	host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos, 0);
+	host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos, flags);
 	set_fs(oldfs);
 	if (host_err < 0)
 		goto out_nfserr;
@@ -965,15 +968,8 @@
 	nfsdstats.io_write += host_err;
 	fsnotify_modify(file);
 
-	if (stable) {
-		if (use_wgather) {
-			host_err = wait_for_concurrent_writes(file);
-		} else {
-			if (*cnt)
-				end = offset + *cnt - 1;
-			host_err = vfs_fsync_range(file, offset, end, 0);
-		}
-	}
+	if (stable && use_wgather)
+		host_err = wait_for_concurrent_writes(file);
 
 out_nfserr:
 	dprintk("nfsd: write complete host_err=%d\n", host_err);
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index 27f75bc..a9fb363 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -458,7 +458,7 @@
 	struct buffer_head *pbh;
 	__u64 key;
 
-	key = page_index(bh->b_page) << (PAGE_CACHE_SHIFT -
+	key = page_index(bh->b_page) << (PAGE_SHIFT -
 					 bmap->b_inode->i_blkbits);
 	for (pbh = page_buffers(bh->b_page); pbh != bh; pbh = pbh->b_this_page)
 		key++;
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index a35ae35..e0c9daf 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -62,7 +62,7 @@
 	set_buffer_uptodate(bh);
 
 	unlock_page(bh->b_page);
-	page_cache_release(bh->b_page);
+	put_page(bh->b_page);
 	return bh;
 }
 
@@ -128,7 +128,7 @@
 
 out_locked:
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 	return err;
 }
 
@@ -146,7 +146,7 @@
 	pgoff_t index = page_index(page);
 	int still_dirty;
 
-	page_cache_get(page);
+	get_page(page);
 	lock_page(page);
 	wait_on_page_writeback(page);
 
@@ -154,7 +154,7 @@
 	still_dirty = PageDirty(page);
 	mapping = page->mapping;
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 
 	if (!still_dirty && mapping)
 		invalidate_inode_pages2_range(mapping, index, index);
@@ -181,7 +181,7 @@
 	obh = ctxt->bh;
 	ctxt->newbh = NULL;
 
-	if (inode->i_blkbits == PAGE_CACHE_SHIFT) {
+	if (inode->i_blkbits == PAGE_SHIFT) {
 		lock_page(obh->b_page);
 		/*
 		 * We cannot call radix_tree_preload for the kernels older
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index 6b8b92b..6723d45 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -58,7 +58,7 @@
 static inline void nilfs_put_page(struct page *page)
 {
 	kunmap(page);
-	page_cache_release(page);
+	put_page(page);
 }
 
 /*
@@ -69,9 +69,9 @@
 {
 	unsigned last_byte = inode->i_size;
 
-	last_byte -= page_nr << PAGE_CACHE_SHIFT;
-	if (last_byte > PAGE_CACHE_SIZE)
-		last_byte = PAGE_CACHE_SIZE;
+	last_byte -= page_nr << PAGE_SHIFT;
+	if (last_byte > PAGE_SIZE)
+		last_byte = PAGE_SIZE;
 	return last_byte;
 }
 
@@ -102,19 +102,19 @@
 	unlock_page(page);
 }
 
-static void nilfs_check_page(struct page *page)
+static bool nilfs_check_page(struct page *page)
 {
 	struct inode *dir = page->mapping->host;
 	struct super_block *sb = dir->i_sb;
 	unsigned chunk_size = nilfs_chunk_size(dir);
 	char *kaddr = page_address(page);
 	unsigned offs, rec_len;
-	unsigned limit = PAGE_CACHE_SIZE;
+	unsigned limit = PAGE_SIZE;
 	struct nilfs_dir_entry *p;
 	char *error;
 
-	if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
-		limit = dir->i_size & ~PAGE_CACHE_MASK;
+	if ((dir->i_size >> PAGE_SHIFT) == page->index) {
+		limit = dir->i_size & ~PAGE_MASK;
 		if (limit & (chunk_size - 1))
 			goto Ebadsize;
 		if (!limit)
@@ -137,7 +137,7 @@
 		goto Eend;
 out:
 	SetPageChecked(page);
-	return;
+	return true;
 
 	/* Too bad, we had an error */
 
@@ -161,7 +161,7 @@
 bad_entry:
 	nilfs_error(sb, "nilfs_check_page", "bad entry in directory #%lu: %s - "
 		    "offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
-		    dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
+		    dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
 		    (unsigned long) le64_to_cpu(p->inode),
 		    rec_len, p->name_len);
 	goto fail;
@@ -170,11 +170,11 @@
 	nilfs_error(sb, "nilfs_check_page",
 		    "entry in directory #%lu spans the page boundary"
 		    "offset=%lu, inode=%lu",
-		    dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
+		    dir->i_ino, (page->index<<PAGE_SHIFT)+offs,
 		    (unsigned long) le64_to_cpu(p->inode));
 fail:
-	SetPageChecked(page);
 	SetPageError(page);
+	return false;
 }
 
 static struct page *nilfs_get_page(struct inode *dir, unsigned long n)
@@ -184,10 +184,10 @@
 
 	if (!IS_ERR(page)) {
 		kmap(page);
-		if (!PageChecked(page))
-			nilfs_check_page(page);
-		if (PageError(page))
-			goto fail;
+		if (unlikely(!PageChecked(page))) {
+			if (PageError(page) || !nilfs_check_page(page))
+				goto fail;
+		}
 	}
 	return page;
 
@@ -256,8 +256,8 @@
 	loff_t pos = ctx->pos;
 	struct inode *inode = file_inode(file);
 	struct super_block *sb = inode->i_sb;
-	unsigned int offset = pos & ~PAGE_CACHE_MASK;
-	unsigned long n = pos >> PAGE_CACHE_SHIFT;
+	unsigned int offset = pos & ~PAGE_MASK;
+	unsigned long n = pos >> PAGE_SHIFT;
 	unsigned long npages = dir_pages(inode);
 /*	unsigned chunk_mask = ~(nilfs_chunk_size(inode)-1); */
 
@@ -272,7 +272,7 @@
 		if (IS_ERR(page)) {
 			nilfs_error(sb, __func__, "bad page in #%lu",
 				    inode->i_ino);
-			ctx->pos += PAGE_CACHE_SIZE - offset;
+			ctx->pos += PAGE_SIZE - offset;
 			return -EIO;
 		}
 		kaddr = page_address(page);
@@ -361,7 +361,7 @@
 		if (++n >= npages)
 			n = 0;
 		/* next page is past the blocks we've got */
-		if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) {
+		if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) {
 			nilfs_error(dir->i_sb, __func__,
 			       "dir %lu size %lld exceeds block count %llu",
 			       dir->i_ino, dir->i_size,
@@ -401,7 +401,7 @@
 	if (de) {
 		res = le64_to_cpu(de->inode);
 		kunmap(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 	return res;
 }
@@ -460,7 +460,7 @@
 		kaddr = page_address(page);
 		dir_end = kaddr + nilfs_last_byte(dir, n);
 		de = (struct nilfs_dir_entry *)kaddr;
-		kaddr += PAGE_CACHE_SIZE - reclen;
+		kaddr += PAGE_SIZE - reclen;
 		while ((char *)de <= kaddr) {
 			if ((char *)de == dir_end) {
 				/* We hit i_size */
@@ -603,7 +603,7 @@
 	kunmap_atomic(kaddr);
 	nilfs_commit_chunk(page, mapping, 0, chunk_size);
 fail:
-	page_cache_release(page);
+	put_page(page);
 	return err;
 }
 
@@ -661,7 +661,7 @@
 const struct file_operations nilfs_dir_operations = {
 	.llseek		= generic_file_llseek,
 	.read		= generic_read_dir,
-	.iterate	= nilfs_readdir,
+	.iterate_shared	= nilfs_readdir,
 	.unlocked_ioctl	= nilfs_ioctl,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl	= nilfs_compat_ioctl,
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index 748ca23..0224b78 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -115,7 +115,7 @@
 
  failed:
 	unlock_page(bh->b_page);
-	page_cache_release(bh->b_page);
+	put_page(bh->b_page);
 	return err;
 }
 
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 21a1e2e..cfebcd2 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -249,7 +249,7 @@
 		if (nr_dirty)
 			nilfs_set_file_dirty(inode, nr_dirty);
 	} else if (ret) {
-		unsigned nr_dirty = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+		unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
 
 		nilfs_set_file_dirty(inode, nr_dirty);
 	}
@@ -291,7 +291,7 @@
 			   struct page *page, void *fsdata)
 {
 	struct inode *inode = mapping->host;
-	unsigned start = pos & (PAGE_CACHE_SIZE - 1);
+	unsigned start = pos & (PAGE_SIZE - 1);
 	unsigned nr_dirty;
 	int err;
 
@@ -305,7 +305,7 @@
 }
 
 static ssize_t
-nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
+nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct inode *inode = file_inode(iocb->ki_filp);
 
@@ -313,7 +313,7 @@
 		return 0;
 
 	/* Needs synchronization with the cleaner */
-	return blockdev_direct_IO(iocb, inode, iter, offset, nilfs_get_block);
+	return blockdev_direct_IO(iocb, inode, iter, nilfs_get_block);
 }
 
 const struct address_space_operations nilfs_aops = {
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 1125f40..f6982b9 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -110,7 +110,7 @@
 
  failed_bh:
 	unlock_page(bh->b_page);
-	page_cache_release(bh->b_page);
+	put_page(bh->b_page);
 	brelse(bh);
 
  failed_unlock:
@@ -170,7 +170,7 @@
 
  failed_bh:
 	unlock_page(bh->b_page);
-	page_cache_release(bh->b_page);
+	put_page(bh->b_page);
 	brelse(bh);
  failed:
 	return ret;
@@ -363,7 +363,7 @@
 int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
 {
 	pgoff_t index = (pgoff_t)block >>
-		(PAGE_CACHE_SHIFT - inode->i_blkbits);
+		(PAGE_SHIFT - inode->i_blkbits);
 	struct page *page;
 	unsigned long first_block;
 	int ret = 0;
@@ -376,7 +376,7 @@
 	wait_on_page_writeback(page);
 
 	first_block = (unsigned long)index <<
-		(PAGE_CACHE_SHIFT - inode->i_blkbits);
+		(PAGE_SHIFT - inode->i_blkbits);
 	if (page_has_buffers(page)) {
 		struct buffer_head *bh;
 
@@ -385,7 +385,7 @@
 	}
 	still_dirty = PageDirty(page);
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 
 	if (still_dirty ||
 	    invalidate_inode_pages2_range(inode->i_mapping, index, index) != 0)
@@ -578,7 +578,7 @@
 	}
 
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 	return 0;
 }
 
@@ -597,7 +597,7 @@
 			bh_frozen = nilfs_page_get_nth_block(page, n);
 		}
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 	return bh_frozen;
 }
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 7ccdb96..3b2af05 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -431,11 +431,11 @@
 out_dir:
 	if (dir_de) {
 		kunmap(dir_page);
-		page_cache_release(dir_page);
+		put_page(dir_page);
 	}
 out_old:
 	kunmap(old_page);
-	page_cache_release(old_page);
+	put_page(old_page);
 out:
 	nilfs_transaction_abort(old_dir->i_sb);
 	return err;
@@ -457,7 +457,7 @@
 
 	root = NILFS_I(d_inode(child))->i_root;
 
-	inode = nilfs_iget(d_inode(child)->i_sb, root, ino);
+	inode = nilfs_iget(child->d_sb, root, ino);
 	if (IS_ERR(inode))
 		return ERR_CAST(inode);
 
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index c20df77..4893915 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -50,7 +50,7 @@
 	if (!page_has_buffers(page))
 		create_empty_buffers(page, 1 << blkbits, b_state);
 
-	first_block = (unsigned long)index << (PAGE_CACHE_SHIFT - blkbits);
+	first_block = (unsigned long)index << (PAGE_SHIFT - blkbits);
 	bh = nilfs_page_get_nth_block(page, block - first_block);
 
 	touch_buffer(bh);
@@ -64,7 +64,7 @@
 				      unsigned long b_state)
 {
 	int blkbits = inode->i_blkbits;
-	pgoff_t index = blkoff >> (PAGE_CACHE_SHIFT - blkbits);
+	pgoff_t index = blkoff >> (PAGE_SHIFT - blkbits);
 	struct page *page;
 	struct buffer_head *bh;
 
@@ -75,7 +75,7 @@
 	bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state);
 	if (unlikely(!bh)) {
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		return NULL;
 	}
 	return bh;
@@ -288,7 +288,7 @@
 		__set_page_dirty_nobuffers(dpage);
 
 		unlock_page(dpage);
-		page_cache_release(dpage);
+		put_page(dpage);
 		unlock_page(page);
 	}
 	pagevec_release(&pvec);
@@ -333,7 +333,7 @@
 			WARN_ON(PageDirty(dpage));
 			nilfs_copy_page(dpage, page, 0);
 			unlock_page(dpage);
-			page_cache_release(dpage);
+			put_page(dpage);
 		} else {
 			struct page *page2;
 
@@ -350,7 +350,7 @@
 			if (unlikely(err < 0)) {
 				WARN_ON(err == -EEXIST);
 				page->mapping = NULL;
-				page_cache_release(page); /* for cache */
+				put_page(page); /* for cache */
 			} else {
 				page->mapping = dmap;
 				dmap->nrpages++;
@@ -523,8 +523,8 @@
 	if (inode->i_mapping->nrpages == 0)
 		return 0;
 
-	index = start_blk >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
-	nblocks_in_page = 1U << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+	index = start_blk >> (PAGE_SHIFT - inode->i_blkbits);
+	nblocks_in_page = 1U << (PAGE_SHIFT - inode->i_blkbits);
 
 	pagevec_init(&pvec, 0);
 
@@ -537,7 +537,7 @@
 	if (length > 0 && pvec.pages[0]->index > index)
 		goto out;
 
-	b = pvec.pages[0]->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+	b = pvec.pages[0]->index << (PAGE_SHIFT - inode->i_blkbits);
 	i = 0;
 	do {
 		page = pvec.pages[i];
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index 9b4f205..5afa77f 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -544,14 +544,14 @@
 				blocksize, page, NULL);
 
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 
 		(*nr_salvaged_blocks)++;
 		goto next;
 
  failed_page:
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 
  failed_inode:
 		printk(KERN_WARNING
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 3b65ada..4317f72 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2070,7 +2070,7 @@
 			goto failed_to_write;
 
 		if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE ||
-		    nilfs->ns_blocksize_bits != PAGE_CACHE_SHIFT) {
+		    nilfs->ns_blocksize_bits != PAGE_SHIFT) {
 			/*
 			 * At this point, we avoid double buffering
 			 * for blocksize < pagesize because page dirty
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index 7521e11..97768a1 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -74,7 +74,7 @@
 
 		set_buffer_uptodate(bh);
 
-		file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) +
+		file_ofs = ((s64)page->index << PAGE_SHIFT) +
 				bh_offset(bh);
 		read_lock_irqsave(&ni->size_lock, flags);
 		init_size = ni->initialized_size;
@@ -142,7 +142,7 @@
 		u32 rec_size;
 
 		rec_size = ni->itype.index.block_size;
-		recs = PAGE_CACHE_SIZE / rec_size;
+		recs = PAGE_SIZE / rec_size;
 		/* Should have been verified before we got here... */
 		BUG_ON(!recs);
 		local_irq_save(flags);
@@ -229,7 +229,7 @@
 	 * fully truncated, truncate will throw it away as soon as we unlock
 	 * it so no need to worry what we do with it.
 	 */
-	iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
+	iblock = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
 	read_lock_irqsave(&ni->size_lock, flags);
 	lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
 	init_size = ni->initialized_size;
@@ -412,9 +412,9 @@
 	vi = page->mapping->host;
 	i_size = i_size_read(vi);
 	/* Is the page fully outside i_size? (truncate in progress) */
-	if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >>
-			PAGE_CACHE_SHIFT)) {
-		zero_user(page, 0, PAGE_CACHE_SIZE);
+	if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
+			PAGE_SHIFT)) {
+		zero_user(page, 0, PAGE_SIZE);
 		ntfs_debug("Read outside i_size - truncated?");
 		goto done;
 	}
@@ -463,7 +463,7 @@
 	 * ok to ignore the compressed flag here.
 	 */
 	if (unlikely(page->index > 0)) {
-		zero_user(page, 0, PAGE_CACHE_SIZE);
+		zero_user(page, 0, PAGE_SIZE);
 		goto done;
 	}
 	if (!NInoAttr(ni))
@@ -509,7 +509,7 @@
 			le16_to_cpu(ctx->attr->data.resident.value_offset),
 			attr_len);
 	/* Zero the remainder of the page. */
-	memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
+	memset(addr + attr_len, 0, PAGE_SIZE - attr_len);
 	flush_dcache_page(page);
 	kunmap_atomic(addr);
 put_unm_err_out:
@@ -599,7 +599,7 @@
 	/* NOTE: Different naming scheme to ntfs_read_block()! */
 
 	/* The first block in the page. */
-	block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
+	block = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
 
 	read_lock_irqsave(&ni->size_lock, flags);
 	i_size = i_size_read(vi);
@@ -674,7 +674,7 @@
 				// in the inode.
 				// Again, for each page do:
 				//	__set_page_dirty_buffers();
-				// page_cache_release()
+				// put_page()
 				// We don't need to wait on the writes.
 				// Update iblock.
 			}
@@ -925,7 +925,7 @@
 	ntfs_volume *vol = ni->vol;
 	u8 *kaddr;
 	unsigned int rec_size = ni->itype.index.block_size;
-	ntfs_inode *locked_nis[PAGE_CACHE_SIZE / rec_size];
+	ntfs_inode *locked_nis[PAGE_SIZE / rec_size];
 	struct buffer_head *bh, *head, *tbh, *rec_start_bh;
 	struct buffer_head *bhs[MAX_BUF_PER_PAGE];
 	runlist_element *rl;
@@ -949,7 +949,7 @@
 			(NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION)));
 	bh_size = vol->sb->s_blocksize;
 	bh_size_bits = vol->sb->s_blocksize_bits;
-	max_bhs = PAGE_CACHE_SIZE / bh_size;
+	max_bhs = PAGE_SIZE / bh_size;
 	BUG_ON(!max_bhs);
 	BUG_ON(max_bhs > MAX_BUF_PER_PAGE);
 
@@ -961,13 +961,13 @@
 	BUG_ON(!bh);
 
 	rec_size_bits = ni->itype.index.block_size_bits;
-	BUG_ON(!(PAGE_CACHE_SIZE >> rec_size_bits));
+	BUG_ON(!(PAGE_SIZE >> rec_size_bits));
 	bhs_per_rec = rec_size >> bh_size_bits;
 	BUG_ON(!bhs_per_rec);
 
 	/* The first block in the page. */
 	rec_block = block = (sector_t)page->index <<
-			(PAGE_CACHE_SHIFT - bh_size_bits);
+			(PAGE_SHIFT - bh_size_bits);
 
 	/* The first out of bounds block for the data size. */
 	dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits;
@@ -1133,7 +1133,7 @@
 			unsigned long mft_no;
 
 			/* Get the mft record number. */
-			mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs)
+			mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
 					>> rec_size_bits;
 			/* Check whether to write this mft record. */
 			tni = NULL;
@@ -1249,7 +1249,7 @@
 				continue;
 			ofs = bh_offset(tbh);
 			/* Get the mft record number. */
-			mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs)
+			mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
 					>> rec_size_bits;
 			if (mft_no < vol->mftmirr_size)
 				ntfs_sync_mft_mirror(vol, mft_no,
@@ -1300,7 +1300,7 @@
 		 * Set page error if there is only one ntfs record in the page.
 		 * Otherwise we would loose per-record granularity.
 		 */
-		if (ni->itype.index.block_size == PAGE_CACHE_SIZE)
+		if (ni->itype.index.block_size == PAGE_SIZE)
 			SetPageError(page);
 		NVolSetErrors(vol);
 	}
@@ -1308,7 +1308,7 @@
 		ntfs_debug("Page still contains one or more dirty ntfs "
 				"records.  Redirtying the page starting at "
 				"record 0x%lx.", page->index <<
-				(PAGE_CACHE_SHIFT - rec_size_bits));
+				(PAGE_SHIFT - rec_size_bits));
 		redirty_page_for_writepage(wbc, page);
 		unlock_page(page);
 	} else {
@@ -1365,13 +1365,13 @@
 	BUG_ON(!PageLocked(page));
 	i_size = i_size_read(vi);
 	/* Is the page fully outside i_size? (truncate in progress) */
-	if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >>
-			PAGE_CACHE_SHIFT)) {
+	if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
+			PAGE_SHIFT)) {
 		/*
 		 * The page may have dirty, unmapped buffers.  Make them
 		 * freeable here, so the page does not leak.
 		 */
-		block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
+		block_invalidatepage(page, 0, PAGE_SIZE);
 		unlock_page(page);
 		ntfs_debug("Write outside i_size - truncated?");
 		return 0;
@@ -1414,10 +1414,10 @@
 	/* NInoNonResident() == NInoIndexAllocPresent() */
 	if (NInoNonResident(ni)) {
 		/* We have to zero every time due to mmap-at-end-of-file. */
-		if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) {
+		if (page->index >= (i_size >> PAGE_SHIFT)) {
 			/* The page straddles i_size. */
-			unsigned int ofs = i_size & ~PAGE_CACHE_MASK;
-			zero_user_segment(page, ofs, PAGE_CACHE_SIZE);
+			unsigned int ofs = i_size & ~PAGE_MASK;
+			zero_user_segment(page, ofs, PAGE_SIZE);
 		}
 		/* Handle mst protected attributes. */
 		if (NInoMstProtected(ni))
@@ -1500,7 +1500,7 @@
 			le16_to_cpu(ctx->attr->data.resident.value_offset),
 			addr, attr_len);
 	/* Zero out of bounds area in the page cache page. */
-	memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
+	memset(addr + attr_len, 0, PAGE_SIZE - attr_len);
 	kunmap_atomic(addr);
 	flush_dcache_page(page);
 	flush_dcache_mft_record_page(ctx->ntfs_ino);
diff --git a/fs/ntfs/aops.h b/fs/ntfs/aops.h
index caecc58..820d6ea 100644
--- a/fs/ntfs/aops.h
+++ b/fs/ntfs/aops.h
@@ -40,7 +40,7 @@
 static inline void ntfs_unmap_page(struct page *page)
 {
 	kunmap(page);
-	page_cache_release(page);
+	put_page(page);
 }
 
 /**
@@ -49,7 +49,7 @@
  * @index:	index into the page cache for @mapping of the page to map
  *
  * Read a page from the page cache of the address space @mapping at position
- * @index, where @index is in units of PAGE_CACHE_SIZE, and not in bytes.
+ * @index, where @index is in units of PAGE_SIZE, and not in bytes.
  *
  * If the page is not in memory it is loaded from disk first using the readpage
  * method defined in the address space operations of @mapping and the page is
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index 250ed5b..44a39a0 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -152,7 +152,7 @@
 			if (old_ctx.base_ntfs_ino && old_ctx.ntfs_ino !=
 					old_ctx.base_ntfs_ino) {
 				put_this_page = old_ctx.ntfs_ino->page;
-				page_cache_get(put_this_page);
+				get_page(put_this_page);
 			}
 			/*
 			 * Reinitialize the search context so we can lookup the
@@ -275,7 +275,7 @@
 		 * the pieces anyway.
 		 */
 		if (put_this_page)
-			page_cache_release(put_this_page);
+			put_page(put_this_page);
 	}
 	return err;
 }
@@ -1660,7 +1660,7 @@
 		memcpy(kaddr, (u8*)a +
 				le16_to_cpu(a->data.resident.value_offset),
 				attr_size);
-		memset(kaddr + attr_size, 0, PAGE_CACHE_SIZE - attr_size);
+		memset(kaddr + attr_size, 0, PAGE_SIZE - attr_size);
 		kunmap_atomic(kaddr);
 		flush_dcache_page(page);
 		SetPageUptodate(page);
@@ -1748,7 +1748,7 @@
 	if (page) {
 		set_page_dirty(page);
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 	ntfs_debug("Done.");
 	return 0;
@@ -1835,7 +1835,7 @@
 		ntfs_free(rl);
 page_err_out:
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 	if (err == -EINVAL)
 		err = -EIO;
@@ -2513,17 +2513,17 @@
 	BUG_ON(NInoEncrypted(ni));
 	mapping = VFS_I(ni)->i_mapping;
 	/* Work out the starting index and page offset. */
-	idx = ofs >> PAGE_CACHE_SHIFT;
-	start_ofs = ofs & ~PAGE_CACHE_MASK;
+	idx = ofs >> PAGE_SHIFT;
+	start_ofs = ofs & ~PAGE_MASK;
 	/* Work out the ending index and page offset. */
 	end = ofs + cnt;
-	end_ofs = end & ~PAGE_CACHE_MASK;
+	end_ofs = end & ~PAGE_MASK;
 	/* If the end is outside the inode size return -ESPIPE. */
 	if (unlikely(end > i_size_read(VFS_I(ni)))) {
 		ntfs_error(vol->sb, "Request exceeds end of attribute.");
 		return -ESPIPE;
 	}
-	end >>= PAGE_CACHE_SHIFT;
+	end >>= PAGE_SHIFT;
 	/* If there is a first partial page, need to do it the slow way. */
 	if (start_ofs) {
 		page = read_mapping_page(mapping, idx, NULL);
@@ -2536,7 +2536,7 @@
 		 * If the last page is the same as the first page, need to
 		 * limit the write to the end offset.
 		 */
-		size = PAGE_CACHE_SIZE;
+		size = PAGE_SIZE;
 		if (idx == end)
 			size = end_ofs;
 		kaddr = kmap_atomic(page);
@@ -2544,7 +2544,7 @@
 		flush_dcache_page(page);
 		kunmap_atomic(kaddr);
 		set_page_dirty(page);
-		page_cache_release(page);
+		put_page(page);
 		balance_dirty_pages_ratelimited(mapping);
 		cond_resched();
 		if (idx == end)
@@ -2561,7 +2561,7 @@
 			return -ENOMEM;
 		}
 		kaddr = kmap_atomic(page);
-		memset(kaddr, val, PAGE_CACHE_SIZE);
+		memset(kaddr, val, PAGE_SIZE);
 		flush_dcache_page(page);
 		kunmap_atomic(kaddr);
 		/*
@@ -2585,7 +2585,7 @@
 		set_page_dirty(page);
 		/* Finally unlock and release the page. */
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		balance_dirty_pages_ratelimited(mapping);
 		cond_resched();
 	}
@@ -2602,7 +2602,7 @@
 		flush_dcache_page(page);
 		kunmap_atomic(kaddr);
 		set_page_dirty(page);
-		page_cache_release(page);
+		put_page(page);
 		balance_dirty_pages_ratelimited(mapping);
 		cond_resched();
 	}
diff --git a/fs/ntfs/bitmap.c b/fs/ntfs/bitmap.c
index 0809cf8..ec130c5 100644
--- a/fs/ntfs/bitmap.c
+++ b/fs/ntfs/bitmap.c
@@ -67,8 +67,8 @@
 	 * Calculate the indices for the pages containing the first and last
 	 * bits, i.e. @start_bit and @start_bit + @cnt - 1, respectively.
 	 */
-	index = start_bit >> (3 + PAGE_CACHE_SHIFT);
-	end_index = (start_bit + cnt - 1) >> (3 + PAGE_CACHE_SHIFT);
+	index = start_bit >> (3 + PAGE_SHIFT);
+	end_index = (start_bit + cnt - 1) >> (3 + PAGE_SHIFT);
 
 	/* Get the page containing the first bit (@start_bit). */
 	mapping = vi->i_mapping;
@@ -82,7 +82,7 @@
 	kaddr = page_address(page);
 
 	/* Set @pos to the position of the byte containing @start_bit. */
-	pos = (start_bit >> 3) & ~PAGE_CACHE_MASK;
+	pos = (start_bit >> 3) & ~PAGE_MASK;
 
 	/* Calculate the position of @start_bit in the first byte. */
 	bit = start_bit & 7;
@@ -108,7 +108,7 @@
 	 * Depending on @value, modify all remaining whole bytes in the page up
 	 * to @cnt.
 	 */
-	len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE - pos);
+	len = min_t(s64, cnt >> 3, PAGE_SIZE - pos);
 	memset(kaddr + pos, value ? 0xff : 0, len);
 	cnt -= len << 3;
 
@@ -132,7 +132,7 @@
 		 * Depending on @value, modify all remaining whole bytes in the
 		 * page up to @cnt.
 		 */
-		len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE);
+		len = min_t(s64, cnt >> 3, PAGE_SIZE);
 		memset(kaddr, value ? 0xff : 0, len);
 		cnt -= len << 3;
 	}
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c
index f82498c..f2b5e74 100644
--- a/fs/ntfs/compress.c
+++ b/fs/ntfs/compress.c
@@ -104,16 +104,12 @@
 	unsigned int kp_ofs;
 
 	ntfs_debug("Zeroing page region outside initialized size.");
-	if (((s64)page->index << PAGE_CACHE_SHIFT) >= initialized_size) {
-		/*
-		 * FIXME: Using clear_page() will become wrong when we get
-		 * PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem.
-		 */
+	if (((s64)page->index << PAGE_SHIFT) >= initialized_size) {
 		clear_page(kp);
 		return;
 	}
-	kp_ofs = initialized_size & ~PAGE_CACHE_MASK;
-	memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs);
+	kp_ofs = initialized_size & ~PAGE_MASK;
+	memset(kp + kp_ofs, 0, PAGE_SIZE - kp_ofs);
 	return;
 }
 
@@ -123,7 +119,7 @@
 static inline void handle_bounds_compressed_page(struct page *page,
 		const loff_t i_size, const s64 initialized_size)
 {
-	if ((page->index >= (initialized_size >> PAGE_CACHE_SHIFT)) &&
+	if ((page->index >= (initialized_size >> PAGE_SHIFT)) &&
 			(initialized_size < i_size))
 		zero_partial_compressed_page(page, initialized_size);
 	return;
@@ -160,7 +156,7 @@
  * @xpage_done indicates whether the target page (@dest_pages[@xpage]) was
  * completed during the decompression of the compression block (@cb_start).
  *
- * Warning: This function *REQUIRES* PAGE_CACHE_SIZE >= 4096 or it will blow up
+ * Warning: This function *REQUIRES* PAGE_SIZE >= 4096 or it will blow up
  * unpredicatbly! You have been warned!
  *
  * Note to hackers: This function may not sleep until it has finished accessing
@@ -241,7 +237,7 @@
 				if (di == xpage)
 					*xpage_done = 1;
 				else
-					page_cache_release(dp);
+					put_page(dp);
 				dest_pages[di] = NULL;
 			}
 		}
@@ -274,7 +270,7 @@
 		cb = cb_sb_end;
 
 		/* Advance destination position to next sub-block. */
-		*dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_CACHE_MASK;
+		*dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_MASK;
 		if (!*dest_ofs && (++*dest_index > dest_max_index))
 			goto return_overflow;
 		goto do_next_sb;
@@ -301,7 +297,7 @@
 
 		/* Advance destination position to next sub-block. */
 		*dest_ofs += NTFS_SB_SIZE;
-		if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) {
+		if (!(*dest_ofs &= ~PAGE_MASK)) {
 finalize_page:
 			/*
 			 * First stage: add current page index to array of
@@ -335,7 +331,7 @@
 			*dest_ofs += nr_bytes;
 		}
 		/* We have finished the current sub-block. */
-		if (!(*dest_ofs &= ~PAGE_CACHE_MASK))
+		if (!(*dest_ofs &= ~PAGE_MASK))
 			goto finalize_page;
 		goto do_next_sb;
 	}
@@ -462,7 +458,7 @@
  * have been written to so that we would lose data if we were to just overwrite
  * them with the out-of-date uncompressed data.
  *
- * FIXME: For PAGE_CACHE_SIZE > cb_size we are not doing the Right Thing(TM) at
+ * FIXME: For PAGE_SIZE > cb_size we are not doing the Right Thing(TM) at
  * the end of the file I think. We need to detect this case and zero the out
  * of bounds remainder of the page in question and mark it as handled. At the
  * moment we would just return -EIO on such a page. This bug will only become
@@ -470,7 +466,7 @@
  * clusters so is probably not going to be seen by anyone. Still this should
  * be fixed. (AIA)
  *
- * FIXME: Again for PAGE_CACHE_SIZE > cb_size we are screwing up both in
+ * FIXME: Again for PAGE_SIZE > cb_size we are screwing up both in
  * handling sparse and compressed cbs. (AIA)
  *
  * FIXME: At the moment we don't do any zeroing out in the case that
@@ -497,14 +493,14 @@
 	u64 cb_size_mask = cb_size - 1UL;
 	VCN vcn;
 	LCN lcn;
-	/* The first wanted vcn (minimum alignment is PAGE_CACHE_SIZE). */
-	VCN start_vcn = (((s64)index << PAGE_CACHE_SHIFT) & ~cb_size_mask) >>
+	/* The first wanted vcn (minimum alignment is PAGE_SIZE). */
+	VCN start_vcn = (((s64)index << PAGE_SHIFT) & ~cb_size_mask) >>
 			vol->cluster_size_bits;
 	/*
 	 * The first vcn after the last wanted vcn (minimum alignment is again
-	 * PAGE_CACHE_SIZE.
+	 * PAGE_SIZE.
 	 */
-	VCN end_vcn = ((((s64)(index + 1UL) << PAGE_CACHE_SHIFT) + cb_size - 1)
+	VCN end_vcn = ((((s64)(index + 1UL) << PAGE_SHIFT) + cb_size - 1)
 			& ~cb_size_mask) >> vol->cluster_size_bits;
 	/* Number of compression blocks (cbs) in the wanted vcn range. */
 	unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits
@@ -515,7 +511,7 @@
 	 * guarantees of start_vcn and end_vcn, no need to round up here.
 	 */
 	unsigned int nr_pages = (end_vcn - start_vcn) <<
-			vol->cluster_size_bits >> PAGE_CACHE_SHIFT;
+			vol->cluster_size_bits >> PAGE_SHIFT;
 	unsigned int xpage, max_page, cur_page, cur_ofs, i;
 	unsigned int cb_clusters, cb_max_ofs;
 	int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0;
@@ -549,7 +545,7 @@
 	 * We have already been given one page, this is the one we must do.
 	 * Once again, the alignment guarantees keep it simple.
 	 */
-	offset = start_vcn << vol->cluster_size_bits >> PAGE_CACHE_SHIFT;
+	offset = start_vcn << vol->cluster_size_bits >> PAGE_SHIFT;
 	xpage = index - offset;
 	pages[xpage] = page;
 	/*
@@ -560,13 +556,13 @@
 	i_size = i_size_read(VFS_I(ni));
 	initialized_size = ni->initialized_size;
 	read_unlock_irqrestore(&ni->size_lock, flags);
-	max_page = ((i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
+	max_page = ((i_size + PAGE_SIZE - 1) >> PAGE_SHIFT) -
 			offset;
 	/* Is the page fully outside i_size? (truncate in progress) */
 	if (xpage >= max_page) {
 		kfree(bhs);
 		kfree(pages);
-		zero_user(page, 0, PAGE_CACHE_SIZE);
+		zero_user(page, 0, PAGE_SIZE);
 		ntfs_debug("Compressed read outside i_size - truncated?");
 		SetPageUptodate(page);
 		unlock_page(page);
@@ -591,7 +587,7 @@
 				continue;
 			}
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 			pages[i] = NULL;
 		}
 	}
@@ -735,9 +731,9 @@
 	ntfs_debug("Successfully read the compression block.");
 
 	/* The last page and maximum offset within it for the current cb. */
-	cb_max_page = (cur_page << PAGE_CACHE_SHIFT) + cur_ofs + cb_size;
-	cb_max_ofs = cb_max_page & ~PAGE_CACHE_MASK;
-	cb_max_page >>= PAGE_CACHE_SHIFT;
+	cb_max_page = (cur_page << PAGE_SHIFT) + cur_ofs + cb_size;
+	cb_max_ofs = cb_max_page & ~PAGE_MASK;
+	cb_max_page >>= PAGE_SHIFT;
 
 	/* Catch end of file inside a compression block. */
 	if (cb_max_page > max_page)
@@ -753,16 +749,11 @@
 		for (; cur_page < cb_max_page; cur_page++) {
 			page = pages[cur_page];
 			if (page) {
-				/*
-				 * FIXME: Using clear_page() will become wrong
-				 * when we get PAGE_CACHE_SIZE != PAGE_SIZE but
-				 * for now there is no problem.
-				 */
 				if (likely(!cur_ofs))
 					clear_page(page_address(page));
 				else
 					memset(page_address(page) + cur_ofs, 0,
-							PAGE_CACHE_SIZE -
+							PAGE_SIZE -
 							cur_ofs);
 				flush_dcache_page(page);
 				kunmap(page);
@@ -771,10 +762,10 @@
 				if (cur_page == xpage)
 					xpage_done = 1;
 				else
-					page_cache_release(page);
+					put_page(page);
 				pages[cur_page] = NULL;
 			}
-			cb_pos += PAGE_CACHE_SIZE - cur_ofs;
+			cb_pos += PAGE_SIZE - cur_ofs;
 			cur_ofs = 0;
 			if (cb_pos >= cb_end)
 				break;
@@ -807,7 +798,7 @@
 		 * synchronous io for the majority of pages.
 		 * Or if we choose not to do the read-ahead/-behind stuff, we
 		 * could just return block_read_full_page(pages[xpage]) as long
-		 * as PAGE_CACHE_SIZE <= cb_size.
+		 * as PAGE_SIZE <= cb_size.
 		 */
 		if (cb_max_ofs)
 			cb_max_page--;
@@ -816,8 +807,8 @@
 			page = pages[cur_page];
 			if (page)
 				memcpy(page_address(page) + cur_ofs, cb_pos,
-						PAGE_CACHE_SIZE - cur_ofs);
-			cb_pos += PAGE_CACHE_SIZE - cur_ofs;
+						PAGE_SIZE - cur_ofs);
+			cb_pos += PAGE_SIZE - cur_ofs;
 			cur_ofs = 0;
 			if (cb_pos >= cb_end)
 				break;
@@ -850,10 +841,10 @@
 				if (cur2_page == xpage)
 					xpage_done = 1;
 				else
-					page_cache_release(page);
+					put_page(page);
 				pages[cur2_page] = NULL;
 			}
-			cb_pos2 += PAGE_CACHE_SIZE - cur_ofs2;
+			cb_pos2 += PAGE_SIZE - cur_ofs2;
 			cur_ofs2 = 0;
 			if (cb_pos2 >= cb_end)
 				break;
@@ -884,7 +875,7 @@
 					kunmap(page);
 					unlock_page(page);
 					if (prev_cur_page != xpage)
-						page_cache_release(page);
+						put_page(page);
 					pages[prev_cur_page] = NULL;
 				}
 			}
@@ -914,7 +905,7 @@
 			kunmap(page);
 			unlock_page(page);
 			if (cur_page != xpage)
-				page_cache_release(page);
+				put_page(page);
 			pages[cur_page] = NULL;
 		}
 	}
@@ -961,7 +952,7 @@
 			kunmap(page);
 			unlock_page(page);
 			if (i != xpage)
-				page_cache_release(page);
+				put_page(page);
 		}
 	}
 	kfree(pages);
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
index b2eff58..a186135 100644
--- a/fs/ntfs/dir.c
+++ b/fs/ntfs/dir.c
@@ -315,11 +315,11 @@
 descend_into_child_node:
 	/*
 	 * Convert vcn to index into the index allocation attribute in units
-	 * of PAGE_CACHE_SIZE and map the page cache page, reading it from
+	 * of PAGE_SIZE and map the page cache page, reading it from
 	 * disk if necessary.
 	 */
 	page = ntfs_map_page(ia_mapping, vcn <<
-			dir_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT);
+			dir_ni->itype.index.vcn_size_bits >> PAGE_SHIFT);
 	if (IS_ERR(page)) {
 		ntfs_error(sb, "Failed to map directory index page, error %ld.",
 				-PTR_ERR(page));
@@ -331,9 +331,9 @@
 fast_descend_into_child_node:
 	/* Get to the index allocation block. */
 	ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
-			dir_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK));
+			dir_ni->itype.index.vcn_size_bits) & ~PAGE_MASK));
 	/* Bounds checks. */
-	if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) {
+	if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) {
 		ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
 				"inode 0x%lx or driver bug.", dir_ni->mft_no);
 		goto unm_err_out;
@@ -366,7 +366,7 @@
 		goto unm_err_out;
 	}
 	index_end = (u8*)ia + dir_ni->itype.index.block_size;
-	if (index_end > kaddr + PAGE_CACHE_SIZE) {
+	if (index_end > kaddr + PAGE_SIZE) {
 		ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
 				"0x%lx crosses page boundary. Impossible! "
 				"Cannot access! This is probably a bug in the "
@@ -559,9 +559,9 @@
 			/* If vcn is in the same page cache page as old_vcn we
 			 * recycle the mapped page. */
 			if (old_vcn << vol->cluster_size_bits >>
-					PAGE_CACHE_SHIFT == vcn <<
+					PAGE_SHIFT == vcn <<
 					vol->cluster_size_bits >>
-					PAGE_CACHE_SHIFT)
+					PAGE_SHIFT)
 				goto fast_descend_into_child_node;
 			unlock_page(page);
 			ntfs_unmap_page(page);
@@ -793,11 +793,11 @@
 descend_into_child_node:
 	/*
 	 * Convert vcn to index into the index allocation attribute in units
-	 * of PAGE_CACHE_SIZE and map the page cache page, reading it from
+	 * of PAGE_SIZE and map the page cache page, reading it from
 	 * disk if necessary.
 	 */
 	page = ntfs_map_page(ia_mapping, vcn <<
-			dir_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT);
+			dir_ni->itype.index.vcn_size_bits >> PAGE_SHIFT);
 	if (IS_ERR(page)) {
 		ntfs_error(sb, "Failed to map directory index page, error %ld.",
 				-PTR_ERR(page));
@@ -809,9 +809,9 @@
 fast_descend_into_child_node:
 	/* Get to the index allocation block. */
 	ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
-			dir_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK));
+			dir_ni->itype.index.vcn_size_bits) & ~PAGE_MASK));
 	/* Bounds checks. */
-	if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) {
+	if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) {
 		ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
 				"inode 0x%lx or driver bug.", dir_ni->mft_no);
 		goto unm_err_out;
@@ -844,7 +844,7 @@
 		goto unm_err_out;
 	}
 	index_end = (u8*)ia + dir_ni->itype.index.block_size;
-	if (index_end > kaddr + PAGE_CACHE_SIZE) {
+	if (index_end > kaddr + PAGE_SIZE) {
 		ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
 				"0x%lx crosses page boundary. Impossible! "
 				"Cannot access! This is probably a bug in the "
@@ -968,9 +968,9 @@
 			/* If vcn is in the same page cache page as old_vcn we
 			 * recycle the mapped page. */
 			if (old_vcn << vol->cluster_size_bits >>
-					PAGE_CACHE_SHIFT == vcn <<
+					PAGE_SHIFT == vcn <<
 					vol->cluster_size_bits >>
-					PAGE_CACHE_SHIFT)
+					PAGE_SHIFT)
 				goto fast_descend_into_child_node;
 			unlock_page(page);
 			ntfs_unmap_page(page);
@@ -1246,15 +1246,15 @@
 		goto iput_err_out;
 	}
 	/* Get the starting bit position in the current bitmap page. */
-	cur_bmp_pos = bmp_pos & ((PAGE_CACHE_SIZE * 8) - 1);
-	bmp_pos &= ~(u64)((PAGE_CACHE_SIZE * 8) - 1);
+	cur_bmp_pos = bmp_pos & ((PAGE_SIZE * 8) - 1);
+	bmp_pos &= ~(u64)((PAGE_SIZE * 8) - 1);
 get_next_bmp_page:
 	ntfs_debug("Reading bitmap with page index 0x%llx, bit ofs 0x%llx",
-			(unsigned long long)bmp_pos >> (3 + PAGE_CACHE_SHIFT),
+			(unsigned long long)bmp_pos >> (3 + PAGE_SHIFT),
 			(unsigned long long)bmp_pos &
-			(unsigned long long)((PAGE_CACHE_SIZE * 8) - 1));
+			(unsigned long long)((PAGE_SIZE * 8) - 1));
 	bmp_page = ntfs_map_page(bmp_mapping,
-			bmp_pos >> (3 + PAGE_CACHE_SHIFT));
+			bmp_pos >> (3 + PAGE_SHIFT));
 	if (IS_ERR(bmp_page)) {
 		ntfs_error(sb, "Reading index bitmap failed.");
 		err = PTR_ERR(bmp_page);
@@ -1270,9 +1270,9 @@
 		 * If we have reached the end of the bitmap page, get the next
 		 * page, and put away the old one.
 		 */
-		if (unlikely((cur_bmp_pos >> 3) >= PAGE_CACHE_SIZE)) {
+		if (unlikely((cur_bmp_pos >> 3) >= PAGE_SIZE)) {
 			ntfs_unmap_page(bmp_page);
-			bmp_pos += PAGE_CACHE_SIZE * 8;
+			bmp_pos += PAGE_SIZE * 8;
 			cur_bmp_pos = 0;
 			goto get_next_bmp_page;
 		}
@@ -1285,8 +1285,8 @@
 	ntfs_debug("Handling index buffer 0x%llx.",
 			(unsigned long long)bmp_pos + cur_bmp_pos);
 	/* If the current index buffer is in the same page we reuse the page. */
-	if ((prev_ia_pos & (s64)PAGE_CACHE_MASK) !=
-			(ia_pos & (s64)PAGE_CACHE_MASK)) {
+	if ((prev_ia_pos & (s64)PAGE_MASK) !=
+			(ia_pos & (s64)PAGE_MASK)) {
 		prev_ia_pos = ia_pos;
 		if (likely(ia_page != NULL)) {
 			unlock_page(ia_page);
@@ -1296,7 +1296,7 @@
 		 * Map the page cache page containing the current ia_pos,
 		 * reading it from disk if necessary.
 		 */
-		ia_page = ntfs_map_page(ia_mapping, ia_pos >> PAGE_CACHE_SHIFT);
+		ia_page = ntfs_map_page(ia_mapping, ia_pos >> PAGE_SHIFT);
 		if (IS_ERR(ia_page)) {
 			ntfs_error(sb, "Reading index allocation data failed.");
 			err = PTR_ERR(ia_page);
@@ -1307,10 +1307,10 @@
 		kaddr = (u8*)page_address(ia_page);
 	}
 	/* Get the current index buffer. */
-	ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
-			~(s64)(ndir->itype.index.block_size - 1)));
+	ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_MASK &
+					  ~(s64)(ndir->itype.index.block_size - 1)));
 	/* Bounds checks. */
-	if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
+	if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE)) {
 		ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
 				"inode 0x%lx or driver bug.", vdir->i_ino);
 		goto err_out;
@@ -1348,7 +1348,7 @@
 		goto err_out;
 	}
 	index_end = (u8*)ia + ndir->itype.index.block_size;
-	if (unlikely(index_end > kaddr + PAGE_CACHE_SIZE)) {
+	if (unlikely(index_end > kaddr + PAGE_SIZE)) {
 		ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
 				"0x%lx crosses page boundary. Impossible! "
 				"Cannot access! This is probably a bug in the "
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index bed4d42..5622ed5 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -220,8 +220,8 @@
 		m = NULL;
 	}
 	mapping = vi->i_mapping;
-	index = old_init_size >> PAGE_CACHE_SHIFT;
-	end_index = (new_init_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+	index = old_init_size >> PAGE_SHIFT;
+	end_index = (new_init_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	do {
 		/*
 		 * Read the page.  If the page is not present, this will zero
@@ -233,7 +233,7 @@
 			goto init_err_out;
 		}
 		if (unlikely(PageError(page))) {
-			page_cache_release(page);
+			put_page(page);
 			err = -EIO;
 			goto init_err_out;
 		}
@@ -242,13 +242,13 @@
 		 * enough to make ntfs_writepage() work.
 		 */
 		write_lock_irqsave(&ni->size_lock, flags);
-		ni->initialized_size = (s64)(index + 1) << PAGE_CACHE_SHIFT;
+		ni->initialized_size = (s64)(index + 1) << PAGE_SHIFT;
 		if (ni->initialized_size > new_init_size)
 			ni->initialized_size = new_init_size;
 		write_unlock_irqrestore(&ni->size_lock, flags);
 		/* Set the page dirty so it gets written out. */
 		set_page_dirty(page);
-		page_cache_release(page);
+		put_page(page);
 		/*
 		 * Play nice with the vm and the rest of the system.  This is
 		 * very much needed as we can potentially be modifying the
@@ -543,7 +543,7 @@
 err_out:
 	while (nr > 0) {
 		unlock_page(pages[--nr]);
-		page_cache_release(pages[nr]);
+		put_page(pages[nr]);
 	}
 	goto out;
 }
@@ -573,7 +573,7 @@
  * only partially being written to.
  *
  * If @nr_pages is greater than one, we are guaranteed that the cluster size is
- * greater than PAGE_CACHE_SIZE, that all pages in @pages are entirely inside
+ * greater than PAGE_SIZE, that all pages in @pages are entirely inside
  * the same cluster and that they are the entirety of that cluster, and that
  * the cluster is sparse, i.e. we need to allocate a cluster to fill the hole.
  *
@@ -653,7 +653,7 @@
 	u = 0;
 do_next_page:
 	page = pages[u];
-	bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
+	bh_pos = (s64)page->index << PAGE_SHIFT;
 	bh = head = page_buffers(page);
 	do {
 		VCN cdelta;
@@ -810,11 +810,11 @@
 					
 				kaddr = kmap_atomic(page);
 				if (bh_pos < pos) {
-					pofs = bh_pos & ~PAGE_CACHE_MASK;
+					pofs = bh_pos & ~PAGE_MASK;
 					memset(kaddr + pofs, 0, pos - bh_pos);
 				}
 				if (bh_end > end) {
-					pofs = end & ~PAGE_CACHE_MASK;
+					pofs = end & ~PAGE_MASK;
 					memset(kaddr + pofs, 0, bh_end - end);
 				}
 				kunmap_atomic(kaddr);
@@ -942,7 +942,7 @@
 		 * unmapped.  This can only happen when the cluster size is
 		 * less than the page cache size.
 		 */
-		if (unlikely(vol->cluster_size < PAGE_CACHE_SIZE)) {
+		if (unlikely(vol->cluster_size < PAGE_SIZE)) {
 			bh_cend = (bh_end + vol->cluster_size - 1) >>
 					vol->cluster_size_bits;
 			if ((bh_cend <= cpos || bh_cpos >= cend)) {
@@ -1208,7 +1208,7 @@
 		wait_on_buffer(bh);
 		if (likely(buffer_uptodate(bh))) {
 			page = bh->b_page;
-			bh_pos = ((s64)page->index << PAGE_CACHE_SHIFT) +
+			bh_pos = ((s64)page->index << PAGE_SHIFT) +
 					bh_offset(bh);
 			/*
 			 * If the buffer overflows the initialized size, need
@@ -1350,7 +1350,7 @@
 		bh = head = page_buffers(page);
 		do {
 			if (u == nr_pages &&
-					((s64)page->index << PAGE_CACHE_SHIFT) +
+					((s64)page->index << PAGE_SHIFT) +
 					bh_offset(bh) >= end)
 				break;
 			if (!buffer_new(bh))
@@ -1422,7 +1422,7 @@
 		bool partial;
 
 		page = pages[u];
-		bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
+		bh_pos = (s64)page->index << PAGE_SHIFT;
 		bh = head = page_buffers(page);
 		partial = false;
 		do {
@@ -1639,7 +1639,7 @@
 		if (end < attr_len)
 			memcpy(kaddr + end, kattr + end, attr_len - end);
 		/* Zero the region outside the end of the attribute value. */
-		memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
+		memset(kaddr + attr_len, 0, PAGE_SIZE - attr_len);
 		flush_dcache_page(page);
 		SetPageUptodate(page);
 	}
@@ -1706,7 +1706,7 @@
 	unsigned len, copied;
 
 	do {
-		len = PAGE_CACHE_SIZE - ofs;
+		len = PAGE_SIZE - ofs;
 		if (len > bytes)
 			len = bytes;
 		copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs,
@@ -1724,14 +1724,14 @@
 	return total;
 err:
 	/* Zero the rest of the target like __copy_from_user(). */
-	len = PAGE_CACHE_SIZE - copied;
+	len = PAGE_SIZE - copied;
 	do {
 		if (len > bytes)
 			len = bytes;
 		zero_user(*pages, copied, len);
 		bytes -= len;
 		copied = 0;
-		len = PAGE_CACHE_SIZE;
+		len = PAGE_SIZE;
 	} while (++pages < last_page);
 	goto out;
 }
@@ -1787,8 +1787,8 @@
 	 * attributes.
 	 */
 	nr_pages = 1;
-	if (vol->cluster_size > PAGE_CACHE_SIZE && NInoNonResident(ni))
-		nr_pages = vol->cluster_size >> PAGE_CACHE_SHIFT;
+	if (vol->cluster_size > PAGE_SIZE && NInoNonResident(ni))
+		nr_pages = vol->cluster_size >> PAGE_SHIFT;
 	last_vcn = -1;
 	do {
 		VCN vcn;
@@ -1796,9 +1796,9 @@
 		unsigned ofs, do_pages, u;
 		size_t copied;
 
-		start_idx = idx = pos >> PAGE_CACHE_SHIFT;
-		ofs = pos & ~PAGE_CACHE_MASK;
-		bytes = PAGE_CACHE_SIZE - ofs;
+		start_idx = idx = pos >> PAGE_SHIFT;
+		ofs = pos & ~PAGE_MASK;
+		bytes = PAGE_SIZE - ofs;
 		do_pages = 1;
 		if (nr_pages > 1) {
 			vcn = pos >> vol->cluster_size_bits;
@@ -1832,7 +1832,7 @@
 				if (lcn == LCN_HOLE) {
 					start_idx = (pos & ~(s64)
 							vol->cluster_size_mask)
-							>> PAGE_CACHE_SHIFT;
+							>> PAGE_SHIFT;
 					bytes = vol->cluster_size - (pos &
 							vol->cluster_size_mask);
 					do_pages = nr_pages;
@@ -1871,12 +1871,12 @@
 			if (unlikely(status)) {
 				do {
 					unlock_page(pages[--do_pages]);
-					page_cache_release(pages[do_pages]);
+					put_page(pages[do_pages]);
 				} while (do_pages);
 				break;
 			}
 		}
-		u = (pos >> PAGE_CACHE_SHIFT) - pages[0]->index;
+		u = (pos >> PAGE_SHIFT) - pages[0]->index;
 		copied = ntfs_copy_from_user_iter(pages + u, do_pages - u, ofs,
 					i, bytes);
 		ntfs_flush_dcache_pages(pages + u, do_pages - u);
@@ -1889,7 +1889,7 @@
 		}
 		do {
 			unlock_page(pages[--do_pages]);
-			page_cache_release(pages[do_pages]);
+			put_page(pages[do_pages]);
 		} while (do_pages);
 		if (unlikely(status < 0))
 			break;
@@ -1921,7 +1921,7 @@
 		}
 	} while (iov_iter_count(i));
 	if (cached_page)
-		page_cache_release(cached_page);
+		put_page(cached_page);
 	ntfs_debug("Done.  Returning %s (written 0x%lx, status %li).",
 			written ? "written" : "status", (unsigned long)written,
 			(long)status);
@@ -1952,12 +1952,9 @@
 		written = ntfs_perform_write(file, from, iocb->ki_pos);
 	current->backing_dev_info = NULL;
 	inode_unlock(vi);
-	if (likely(written > 0)) {
-		err = generic_write_sync(file, iocb->ki_pos, written);
-		if (err < 0)
-			written = 0;
-	}
 	iocb->ki_pos += written;
+	if (likely(written > 0))
+		written = generic_write_sync(iocb, written);
 	return written ? written : err;
 }
 
diff --git a/fs/ntfs/index.c b/fs/ntfs/index.c
index 096c135..0d645f3 100644
--- a/fs/ntfs/index.c
+++ b/fs/ntfs/index.c
@@ -272,11 +272,11 @@
 descend_into_child_node:
 	/*
 	 * Convert vcn to index into the index allocation attribute in units
-	 * of PAGE_CACHE_SIZE and map the page cache page, reading it from
+	 * of PAGE_SIZE and map the page cache page, reading it from
 	 * disk if necessary.
 	 */
 	page = ntfs_map_page(ia_mapping, vcn <<
-			idx_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT);
+			idx_ni->itype.index.vcn_size_bits >> PAGE_SHIFT);
 	if (IS_ERR(page)) {
 		ntfs_error(sb, "Failed to map index page, error %ld.",
 				-PTR_ERR(page));
@@ -288,9 +288,9 @@
 fast_descend_into_child_node:
 	/* Get to the index allocation block. */
 	ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
-			idx_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK));
+			idx_ni->itype.index.vcn_size_bits) & ~PAGE_MASK));
 	/* Bounds checks. */
-	if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) {
+	if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) {
 		ntfs_error(sb, "Out of bounds check failed.  Corrupt inode "
 				"0x%lx or driver bug.", idx_ni->mft_no);
 		goto unm_err_out;
@@ -323,7 +323,7 @@
 		goto unm_err_out;
 	}
 	index_end = (u8*)ia + idx_ni->itype.index.block_size;
-	if (index_end > kaddr + PAGE_CACHE_SIZE) {
+	if (index_end > kaddr + PAGE_SIZE) {
 		ntfs_error(sb, "Index buffer (VCN 0x%llx) of inode 0x%lx "
 				"crosses page boundary.  Impossible!  Cannot "
 				"access!  This is probably a bug in the "
@@ -427,9 +427,9 @@
 		 * the mapped page.
 		 */
 		if (old_vcn << vol->cluster_size_bits >>
-				PAGE_CACHE_SHIFT == vcn <<
+				PAGE_SHIFT == vcn <<
 				vol->cluster_size_bits >>
-				PAGE_CACHE_SHIFT)
+				PAGE_SHIFT)
 			goto fast_descend_into_child_node;
 		unlock_page(page);
 		ntfs_unmap_page(page);
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index d284f07..f40972d 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -868,12 +868,12 @@
 					ni->itype.index.block_size);
 			goto unm_err_out;
 		}
-		if (ni->itype.index.block_size > PAGE_CACHE_SIZE) {
+		if (ni->itype.index.block_size > PAGE_SIZE) {
 			ntfs_error(vi->i_sb, "Index block size (%u) > "
-					"PAGE_CACHE_SIZE (%ld) is not "
+					"PAGE_SIZE (%ld) is not "
 					"supported.  Sorry.",
 					ni->itype.index.block_size,
-					PAGE_CACHE_SIZE);
+					PAGE_SIZE);
 			err = -EOPNOTSUPP;
 			goto unm_err_out;
 		}
@@ -1585,10 +1585,10 @@
 				"two.", ni->itype.index.block_size);
 		goto unm_err_out;
 	}
-	if (ni->itype.index.block_size > PAGE_CACHE_SIZE) {
-		ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_CACHE_SIZE "
+	if (ni->itype.index.block_size > PAGE_SIZE) {
+		ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_SIZE "
 				"(%ld) is not supported.  Sorry.",
-				ni->itype.index.block_size, PAGE_CACHE_SIZE);
+				ni->itype.index.block_size, PAGE_SIZE);
 		err = -EOPNOTSUPP;
 		goto unm_err_out;
 	}
diff --git a/fs/ntfs/lcnalloc.c b/fs/ntfs/lcnalloc.c
index 1711b71..27a24a4 100644
--- a/fs/ntfs/lcnalloc.c
+++ b/fs/ntfs/lcnalloc.c
@@ -283,15 +283,15 @@
 			ntfs_unmap_page(page);
 		}
 		page = ntfs_map_page(mapping, last_read_pos >>
-				PAGE_CACHE_SHIFT);
+				PAGE_SHIFT);
 		if (IS_ERR(page)) {
 			err = PTR_ERR(page);
 			ntfs_error(vol->sb, "Failed to map page.");
 			goto out;
 		}
-		buf_size = last_read_pos & ~PAGE_CACHE_MASK;
+		buf_size = last_read_pos & ~PAGE_MASK;
 		buf = page_address(page) + buf_size;
-		buf_size = PAGE_CACHE_SIZE - buf_size;
+		buf_size = PAGE_SIZE - buf_size;
 		if (unlikely(last_read_pos + buf_size > i_size))
 			buf_size = i_size - last_read_pos;
 		buf_size <<= 3;
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c
index c71de29..9d71213 100644
--- a/fs/ntfs/logfile.c
+++ b/fs/ntfs/logfile.c
@@ -381,7 +381,7 @@
 	 * completely inside @rp, just copy it from there.  Otherwise map all
 	 * the required pages and copy the data from them.
 	 */
-	size = PAGE_CACHE_SIZE - (pos & ~PAGE_CACHE_MASK);
+	size = PAGE_SIZE - (pos & ~PAGE_MASK);
 	if (size >= le32_to_cpu(rp->system_page_size)) {
 		memcpy(trp, rp, le32_to_cpu(rp->system_page_size));
 	} else {
@@ -394,8 +394,8 @@
 		/* Copy the remaining data one page at a time. */
 		have_read = size;
 		to_read = le32_to_cpu(rp->system_page_size) - size;
-		idx = (pos + size) >> PAGE_CACHE_SHIFT;
-		BUG_ON((pos + size) & ~PAGE_CACHE_MASK);
+		idx = (pos + size) >> PAGE_SHIFT;
+		BUG_ON((pos + size) & ~PAGE_MASK);
 		do {
 			page = ntfs_map_page(vi->i_mapping, idx);
 			if (IS_ERR(page)) {
@@ -406,7 +406,7 @@
 					err = -EIO;
 				goto err_out;
 			}
-			size = min_t(int, to_read, PAGE_CACHE_SIZE);
+			size = min_t(int, to_read, PAGE_SIZE);
 			memcpy((u8*)trp + have_read, page_address(page), size);
 			ntfs_unmap_page(page);
 			have_read += size;
@@ -509,11 +509,11 @@
 	 * log page size if the page cache size is between the default log page
 	 * size and twice that.
 	 */
-	if (PAGE_CACHE_SIZE >= DefaultLogPageSize && PAGE_CACHE_SIZE <=
+	if (PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <=
 			DefaultLogPageSize * 2)
 		log_page_size = DefaultLogPageSize;
 	else
-		log_page_size = PAGE_CACHE_SIZE;
+		log_page_size = PAGE_SIZE;
 	log_page_mask = log_page_size - 1;
 	/*
 	 * Use ntfs_ffs() instead of ffs() to enable the compiler to
@@ -539,7 +539,7 @@
 	 * to be empty.
 	 */
 	for (pos = 0; pos < size; pos <<= 1) {
-		pgoff_t idx = pos >> PAGE_CACHE_SHIFT;
+		pgoff_t idx = pos >> PAGE_SHIFT;
 		if (!page || page->index != idx) {
 			if (page)
 				ntfs_unmap_page(page);
@@ -550,7 +550,7 @@
 				goto err_out;
 			}
 		}
-		kaddr = (u8*)page_address(page) + (pos & ~PAGE_CACHE_MASK);
+		kaddr = (u8*)page_address(page) + (pos & ~PAGE_MASK);
 		/*
 		 * A non-empty block means the logfile is not empty while an
 		 * empty block after a non-empty block has been encountered
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 3014a36..37b2501 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -61,16 +61,16 @@
 	 * here if the volume was that big...
 	 */
 	index = (u64)ni->mft_no << vol->mft_record_size_bits >>
-			PAGE_CACHE_SHIFT;
-	ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
+			PAGE_SHIFT;
+	ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_MASK;
 
 	i_size = i_size_read(mft_vi);
 	/* The maximum valid index into the page cache for $MFT's data. */
-	end_index = i_size >> PAGE_CACHE_SHIFT;
+	end_index = i_size >> PAGE_SHIFT;
 
 	/* If the wanted index is out of bounds the mft record doesn't exist. */
 	if (unlikely(index >= end_index)) {
-		if (index > end_index || (i_size & ~PAGE_CACHE_MASK) < ofs +
+		if (index > end_index || (i_size & ~PAGE_MASK) < ofs +
 				vol->mft_record_size) {
 			page = ERR_PTR(-ENOENT);
 			ntfs_error(vol->sb, "Attempt to read mft record 0x%lx, "
@@ -487,7 +487,7 @@
 	}
 	/* Get the page containing the mirror copy of the mft record @m. */
 	page = ntfs_map_page(vol->mftmirr_ino->i_mapping, mft_no >>
-			(PAGE_CACHE_SHIFT - vol->mft_record_size_bits));
+			(PAGE_SHIFT - vol->mft_record_size_bits));
 	if (IS_ERR(page)) {
 		ntfs_error(vol->sb, "Failed to map mft mirror page.");
 		err = PTR_ERR(page);
@@ -497,7 +497,7 @@
 	BUG_ON(!PageUptodate(page));
 	ClearPageUptodate(page);
 	/* Offset of the mft mirror record inside the page. */
-	page_ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
+	page_ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_MASK;
 	/* The address in the page of the mirror copy of the mft record @m. */
 	kmirr = page_address(page) + page_ofs;
 	/* Copy the mst protected mft record to the mirror. */
@@ -1178,8 +1178,8 @@
 	for (; pass <= 2;) {
 		/* Cap size to pass_end. */
 		ofs = data_pos >> 3;
-		page_ofs = ofs & ~PAGE_CACHE_MASK;
-		size = PAGE_CACHE_SIZE - page_ofs;
+		page_ofs = ofs & ~PAGE_MASK;
+		size = PAGE_SIZE - page_ofs;
 		ll = ((pass_end + 7) >> 3) - ofs;
 		if (size > ll)
 			size = ll;
@@ -1190,7 +1190,7 @@
 		 */
 		if (size) {
 			page = ntfs_map_page(mftbmp_mapping,
-					ofs >> PAGE_CACHE_SHIFT);
+					ofs >> PAGE_SHIFT);
 			if (IS_ERR(page)) {
 				ntfs_error(vol->sb, "Failed to read mft "
 						"bitmap, aborting.");
@@ -1328,13 +1328,13 @@
 	 */
 	ll = lcn >> 3;
 	page = ntfs_map_page(vol->lcnbmp_ino->i_mapping,
-			ll >> PAGE_CACHE_SHIFT);
+			ll >> PAGE_SHIFT);
 	if (IS_ERR(page)) {
 		up_write(&mftbmp_ni->runlist.lock);
 		ntfs_error(vol->sb, "Failed to read from lcn bitmap.");
 		return PTR_ERR(page);
 	}
-	b = (u8*)page_address(page) + (ll & ~PAGE_CACHE_MASK);
+	b = (u8*)page_address(page) + (ll & ~PAGE_MASK);
 	tb = 1 << (lcn & 7ull);
 	down_write(&vol->lcnbmp_lock);
 	if (*b != 0xff && !(*b & tb)) {
@@ -2103,14 +2103,14 @@
 	 * The index into the page cache and the offset within the page cache
 	 * page of the wanted mft record.
 	 */
-	index = mft_no << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT;
-	ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
+	index = mft_no << vol->mft_record_size_bits >> PAGE_SHIFT;
+	ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_MASK;
 	/* The maximum valid index into the page cache for $MFT's data. */
 	i_size = i_size_read(mft_vi);
-	end_index = i_size >> PAGE_CACHE_SHIFT;
+	end_index = i_size >> PAGE_SHIFT;
 	if (unlikely(index >= end_index)) {
 		if (unlikely(index > end_index || ofs + vol->mft_record_size >=
-				(i_size & ~PAGE_CACHE_MASK))) {
+				(i_size & ~PAGE_MASK))) {
 			ntfs_error(vol->sb, "Tried to format non-existing mft "
 					"record 0x%llx.", (long long)mft_no);
 			return -ENOENT;
@@ -2515,8 +2515,8 @@
 	 * We now have allocated and initialized the mft record.  Calculate the
 	 * index of and the offset within the page cache page the record is in.
 	 */
-	index = bit << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT;
-	ofs = (bit << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
+	index = bit << vol->mft_record_size_bits >> PAGE_SHIFT;
+	ofs = (bit << vol->mft_record_size_bits) & ~PAGE_MASK;
 	/* Read, map, and pin the page containing the mft record. */
 	page = ntfs_map_page(vol->mft_ino->i_mapping, index);
 	if (IS_ERR(page)) {
diff --git a/fs/ntfs/ntfs.h b/fs/ntfs/ntfs.h
index c581e26..12de47b 100644
--- a/fs/ntfs/ntfs.h
+++ b/fs/ntfs/ntfs.h
@@ -43,7 +43,7 @@
 	NTFS_MAX_NAME_LEN	= 255,
 	NTFS_MAX_ATTR_NAME_LEN	= 255,
 	NTFS_MAX_CLUSTER_SIZE	= 64 * 1024,	/* 64kiB */
-	NTFS_MAX_PAGES_PER_CLUSTER = NTFS_MAX_CLUSTER_SIZE / PAGE_CACHE_SIZE,
+	NTFS_MAX_PAGES_PER_CLUSTER = NTFS_MAX_CLUSTER_SIZE / PAGE_SIZE,
 } NTFS_CONSTANTS;
 
 /* Global variables. */
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 1b38abd..ecb4987 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -823,14 +823,14 @@
 	ntfs_debug("vol->mft_record_size_bits = %i (0x%x)",
 			vol->mft_record_size_bits, vol->mft_record_size_bits);
 	/*
-	 * We cannot support mft record sizes above the PAGE_CACHE_SIZE since
+	 * We cannot support mft record sizes above the PAGE_SIZE since
 	 * we store $MFT/$DATA, the table of mft records in the page cache.
 	 */
-	if (vol->mft_record_size > PAGE_CACHE_SIZE) {
+	if (vol->mft_record_size > PAGE_SIZE) {
 		ntfs_error(vol->sb, "Mft record size (%i) exceeds the "
-				"PAGE_CACHE_SIZE on your system (%lu).  "
+				"PAGE_SIZE on your system (%lu).  "
 				"This is not supported.  Sorry.",
-				vol->mft_record_size, PAGE_CACHE_SIZE);
+				vol->mft_record_size, PAGE_SIZE);
 		return false;
 	}
 	/* We cannot support mft record sizes below the sector size. */
@@ -1096,7 +1096,7 @@
 
 	ntfs_debug("Entering.");
 	/* Compare contents of $MFT and $MFTMirr. */
-	mrecs_per_page = PAGE_CACHE_SIZE / vol->mft_record_size;
+	mrecs_per_page = PAGE_SIZE / vol->mft_record_size;
 	BUG_ON(!mrecs_per_page);
 	BUG_ON(!vol->mftmirr_size);
 	mft_page = mirr_page = NULL;
@@ -1615,20 +1615,20 @@
 	if (!vol->attrdef)
 		goto iput_failed;
 	index = 0;
-	max_index = i_size >> PAGE_CACHE_SHIFT;
-	size = PAGE_CACHE_SIZE;
+	max_index = i_size >> PAGE_SHIFT;
+	size = PAGE_SIZE;
 	while (index < max_index) {
 		/* Read the attrdef table and copy it into the linear buffer. */
 read_partial_attrdef_page:
 		page = ntfs_map_page(ino->i_mapping, index);
 		if (IS_ERR(page))
 			goto free_iput_failed;
-		memcpy((u8*)vol->attrdef + (index++ << PAGE_CACHE_SHIFT),
+		memcpy((u8*)vol->attrdef + (index++ << PAGE_SHIFT),
 				page_address(page), size);
 		ntfs_unmap_page(page);
 	};
-	if (size == PAGE_CACHE_SIZE) {
-		size = i_size & ~PAGE_CACHE_MASK;
+	if (size == PAGE_SIZE) {
+		size = i_size & ~PAGE_MASK;
 		if (size)
 			goto read_partial_attrdef_page;
 	}
@@ -1684,20 +1684,20 @@
 	if (!vol->upcase)
 		goto iput_upcase_failed;
 	index = 0;
-	max_index = i_size >> PAGE_CACHE_SHIFT;
-	size = PAGE_CACHE_SIZE;
+	max_index = i_size >> PAGE_SHIFT;
+	size = PAGE_SIZE;
 	while (index < max_index) {
 		/* Read the upcase table and copy it into the linear buffer. */
 read_partial_upcase_page:
 		page = ntfs_map_page(ino->i_mapping, index);
 		if (IS_ERR(page))
 			goto iput_upcase_failed;
-		memcpy((char*)vol->upcase + (index++ << PAGE_CACHE_SHIFT),
+		memcpy((char*)vol->upcase + (index++ << PAGE_SHIFT),
 				page_address(page), size);
 		ntfs_unmap_page(page);
 	};
-	if (size == PAGE_CACHE_SIZE) {
-		size = i_size & ~PAGE_CACHE_MASK;
+	if (size == PAGE_SIZE) {
+		size = i_size & ~PAGE_MASK;
 		if (size)
 			goto read_partial_upcase_page;
 	}
@@ -2471,14 +2471,14 @@
 	down_read(&vol->lcnbmp_lock);
 	/*
 	 * Convert the number of bits into bytes rounded up, then convert into
-	 * multiples of PAGE_CACHE_SIZE, rounding up so that if we have one
+	 * multiples of PAGE_SIZE, rounding up so that if we have one
 	 * full and one partial page max_index = 2.
 	 */
-	max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_CACHE_SIZE - 1) >>
-			PAGE_CACHE_SHIFT;
-	/* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */
+	max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_SIZE - 1) >>
+			PAGE_SHIFT;
+	/* Use multiples of 4 bytes, thus max_size is PAGE_SIZE / 4. */
 	ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.",
-			max_index, PAGE_CACHE_SIZE / 4);
+			max_index, PAGE_SIZE / 4);
 	for (index = 0; index < max_index; index++) {
 		unsigned long *kaddr;
 
@@ -2491,7 +2491,7 @@
 		if (IS_ERR(page)) {
 			ntfs_debug("read_mapping_page() error. Skipping "
 					"page (index 0x%lx).", index);
-			nr_free -= PAGE_CACHE_SIZE * 8;
+			nr_free -= PAGE_SIZE * 8;
 			continue;
 		}
 		kaddr = kmap_atomic(page);
@@ -2503,9 +2503,9 @@
 		 * ntfs_readpage().
 		 */
 		nr_free -= bitmap_weight(kaddr,
-					PAGE_CACHE_SIZE * BITS_PER_BYTE);
+					PAGE_SIZE * BITS_PER_BYTE);
 		kunmap_atomic(kaddr);
-		page_cache_release(page);
+		put_page(page);
 	}
 	ntfs_debug("Finished reading $Bitmap, last index = 0x%lx.", index - 1);
 	/*
@@ -2547,9 +2547,9 @@
 	pgoff_t index;
 
 	ntfs_debug("Entering.");
-	/* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */
+	/* Use multiples of 4 bytes, thus max_size is PAGE_SIZE / 4. */
 	ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = "
-			"0x%lx.", max_index, PAGE_CACHE_SIZE / 4);
+			"0x%lx.", max_index, PAGE_SIZE / 4);
 	for (index = 0; index < max_index; index++) {
 		unsigned long *kaddr;
 
@@ -2562,7 +2562,7 @@
 		if (IS_ERR(page)) {
 			ntfs_debug("read_mapping_page() error. Skipping "
 					"page (index 0x%lx).", index);
-			nr_free -= PAGE_CACHE_SIZE * 8;
+			nr_free -= PAGE_SIZE * 8;
 			continue;
 		}
 		kaddr = kmap_atomic(page);
@@ -2574,9 +2574,9 @@
 		 * ntfs_readpage().
 		 */
 		nr_free -= bitmap_weight(kaddr,
-					PAGE_CACHE_SIZE * BITS_PER_BYTE);
+					PAGE_SIZE * BITS_PER_BYTE);
 		kunmap_atomic(kaddr);
-		page_cache_release(page);
+		put_page(page);
 	}
 	ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.",
 			index - 1);
@@ -2618,17 +2618,17 @@
 	/* Type of filesystem. */
 	sfs->f_type   = NTFS_SB_MAGIC;
 	/* Optimal transfer block size. */
-	sfs->f_bsize  = PAGE_CACHE_SIZE;
+	sfs->f_bsize  = PAGE_SIZE;
 	/*
 	 * Total data blocks in filesystem in units of f_bsize and since
 	 * inodes are also stored in data blocs ($MFT is a file) this is just
 	 * the total clusters.
 	 */
 	sfs->f_blocks = vol->nr_clusters << vol->cluster_size_bits >>
-				PAGE_CACHE_SHIFT;
+				PAGE_SHIFT;
 	/* Free data blocks in filesystem in units of f_bsize. */
 	size	      = get_nr_free_clusters(vol) << vol->cluster_size_bits >>
-				PAGE_CACHE_SHIFT;
+				PAGE_SHIFT;
 	if (size < 0LL)
 		size = 0LL;
 	/* Free blocks avail to non-superuser, same as above on NTFS. */
@@ -2639,11 +2639,11 @@
 	size = i_size_read(vol->mft_ino) >> vol->mft_record_size_bits;
 	/*
 	 * Convert the maximum number of set bits into bytes rounded up, then
-	 * convert into multiples of PAGE_CACHE_SIZE, rounding up so that if we
+	 * convert into multiples of PAGE_SIZE, rounding up so that if we
 	 * have one full and one partial page max_index = 2.
 	 */
 	max_index = ((((mft_ni->initialized_size >> vol->mft_record_size_bits)
-			+ 7) >> 3) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+			+ 7) >> 3) + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	read_unlock_irqrestore(&mft_ni->size_lock, flags);
 	/* Number of inodes in filesystem (at this point in time). */
 	sfs->f_files = size;
@@ -2765,15 +2765,15 @@
 	if (!parse_options(vol, (char*)opt))
 		goto err_out_now;
 
-	/* We support sector sizes up to the PAGE_CACHE_SIZE. */
-	if (bdev_logical_block_size(sb->s_bdev) > PAGE_CACHE_SIZE) {
+	/* We support sector sizes up to the PAGE_SIZE. */
+	if (bdev_logical_block_size(sb->s_bdev) > PAGE_SIZE) {
 		if (!silent)
 			ntfs_error(sb, "Device has unsupported sector size "
 					"(%i).  The maximum supported sector "
 					"size on this architecture is %lu "
 					"bytes.",
 					bdev_logical_block_size(sb->s_bdev),
-					PAGE_CACHE_SIZE);
+					PAGE_SIZE);
 		goto err_out_now;
 	}
 	/*
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 0cdf497..2162434 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -322,3 +322,90 @@
 	brelse(di_bh);
 	return acl;
 }
+
+int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
+{
+	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+	struct posix_acl *acl;
+	int ret;
+
+	if (S_ISLNK(inode->i_mode))
+		return -EOPNOTSUPP;
+
+	if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
+		return 0;
+
+	acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
+	if (IS_ERR(acl) || !acl)
+		return PTR_ERR(acl);
+	ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
+	if (ret)
+		return ret;
+	ret = ocfs2_set_acl(NULL, inode, NULL, ACL_TYPE_ACCESS,
+			    acl, NULL, NULL);
+	posix_acl_release(acl);
+	return ret;
+}
+
+/*
+ * Initialize the ACLs of a new inode. If parent directory has default ACL,
+ * then clone to new inode. Called from ocfs2_mknod.
+ */
+int ocfs2_init_acl(handle_t *handle,
+		   struct inode *inode,
+		   struct inode *dir,
+		   struct buffer_head *di_bh,
+		   struct buffer_head *dir_bh,
+		   struct ocfs2_alloc_context *meta_ac,
+		   struct ocfs2_alloc_context *data_ac)
+{
+	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+	struct posix_acl *acl = NULL;
+	int ret = 0, ret2;
+	umode_t mode;
+
+	if (!S_ISLNK(inode->i_mode)) {
+		if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
+			acl = ocfs2_get_acl_nolock(dir, ACL_TYPE_DEFAULT,
+						   dir_bh);
+			if (IS_ERR(acl))
+				return PTR_ERR(acl);
+		}
+		if (!acl) {
+			mode = inode->i_mode & ~current_umask();
+			ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
+			if (ret) {
+				mlog_errno(ret);
+				goto cleanup;
+			}
+		}
+	}
+	if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) {
+		if (S_ISDIR(inode->i_mode)) {
+			ret = ocfs2_set_acl(handle, inode, di_bh,
+					    ACL_TYPE_DEFAULT, acl,
+					    meta_ac, data_ac);
+			if (ret)
+				goto cleanup;
+		}
+		mode = inode->i_mode;
+		ret = __posix_acl_create(&acl, GFP_NOFS, &mode);
+		if (ret < 0)
+			return ret;
+
+		ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
+		if (ret2) {
+			mlog_errno(ret2);
+			ret = ret2;
+			goto cleanup;
+		}
+		if (ret > 0) {
+			ret = ocfs2_set_acl(handle, inode,
+					    di_bh, ACL_TYPE_ACCESS,
+					    acl, meta_ac, data_ac);
+		}
+	}
+cleanup:
+	posix_acl_release(acl);
+	return ret;
+}
diff --git a/fs/ocfs2/acl.h b/fs/ocfs2/acl.h
index 3fce68d..2783a75 100644
--- a/fs/ocfs2/acl.h
+++ b/fs/ocfs2/acl.h
@@ -35,5 +35,10 @@
 			 struct posix_acl *acl,
 			 struct ocfs2_alloc_context *meta_ac,
 			 struct ocfs2_alloc_context *data_ac);
+extern int ocfs2_acl_chmod(struct inode *, struct buffer_head *);
+extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *,
+			  struct buffer_head *, struct buffer_head *,
+			  struct ocfs2_alloc_context *,
+			  struct ocfs2_alloc_context *);
 
 #endif /* OCFS2_ACL_H */
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 70907d6..e361d1a 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -6671,7 +6671,7 @@
 {
 	int i;
 	struct page *page;
-	unsigned int from, to = PAGE_CACHE_SIZE;
+	unsigned int from, to = PAGE_SIZE;
 	struct super_block *sb = inode->i_sb;
 
 	BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb)));
@@ -6679,21 +6679,21 @@
 	if (numpages == 0)
 		goto out;
 
-	to = PAGE_CACHE_SIZE;
+	to = PAGE_SIZE;
 	for(i = 0; i < numpages; i++) {
 		page = pages[i];
 
-		from = start & (PAGE_CACHE_SIZE - 1);
-		if ((end >> PAGE_CACHE_SHIFT) == page->index)
-			to = end & (PAGE_CACHE_SIZE - 1);
+		from = start & (PAGE_SIZE - 1);
+		if ((end >> PAGE_SHIFT) == page->index)
+			to = end & (PAGE_SIZE - 1);
 
-		BUG_ON(from > PAGE_CACHE_SIZE);
-		BUG_ON(to > PAGE_CACHE_SIZE);
+		BUG_ON(from > PAGE_SIZE);
+		BUG_ON(to > PAGE_SIZE);
 
 		ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1,
 					 &phys);
 
-		start = (page->index + 1) << PAGE_CACHE_SHIFT;
+		start = (page->index + 1) << PAGE_SHIFT;
 	}
 out:
 	if (pages)
@@ -6712,7 +6712,7 @@
 
 	numpages = 0;
 	last_page_bytes = PAGE_ALIGN(end);
-	index = start >> PAGE_CACHE_SHIFT;
+	index = start >> PAGE_SHIFT;
 	do {
 		pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS);
 		if (!pages[numpages]) {
@@ -6723,7 +6723,7 @@
 
 		numpages++;
 		index++;
-	} while (index < (last_page_bytes >> PAGE_CACHE_SHIFT));
+	} while (index < (last_page_bytes >> PAGE_SHIFT));
 
 out:
 	if (ret != 0) {
@@ -6950,8 +6950,8 @@
 		 * to do that now.
 		 */
 		if (!ocfs2_sparse_alloc(osb) &&
-		    PAGE_CACHE_SIZE < osb->s_clustersize)
-			end = PAGE_CACHE_SIZE;
+		    PAGE_SIZE < osb->s_clustersize)
+			end = PAGE_SIZE;
 
 		ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages);
 		if (ret) {
@@ -6971,8 +6971,8 @@
 			goto out_unlock;
 		}
 
-		page_end = PAGE_CACHE_SIZE;
-		if (PAGE_CACHE_SIZE > osb->s_clustersize)
+		page_end = PAGE_SIZE;
+		if (PAGE_SIZE > osb->s_clustersize)
 			page_end = osb->s_clustersize;
 
 		for (i = 0; i < num_pages; i++)
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 1581240..c034edf 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -234,7 +234,7 @@
 
 	size = i_size_read(inode);
 
-	if (size > PAGE_CACHE_SIZE ||
+	if (size > PAGE_SIZE ||
 	    size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) {
 		ocfs2_error(inode->i_sb,
 			    "Inode %llu has with inline data has bad size: %Lu\n",
@@ -247,7 +247,7 @@
 	if (size)
 		memcpy(kaddr, di->id2.i_data.id_data, size);
 	/* Clear the remaining part of the page */
-	memset(kaddr + size, 0, PAGE_CACHE_SIZE - size);
+	memset(kaddr + size, 0, PAGE_SIZE - size);
 	flush_dcache_page(page);
 	kunmap_atomic(kaddr);
 
@@ -282,7 +282,7 @@
 {
 	struct inode *inode = page->mapping->host;
 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
-	loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT;
+	loff_t start = (loff_t)page->index << PAGE_SHIFT;
 	int ret, unlock = 1;
 
 	trace_ocfs2_readpage((unsigned long long)oi->ip_blkno,
@@ -385,7 +385,7 @@
 	 * drop out in that case as it's not worth handling here.
 	 */
 	last = list_entry(pages->prev, struct page, lru);
-	start = (loff_t)last->index << PAGE_CACHE_SHIFT;
+	start = (loff_t)last->index << PAGE_SHIFT;
 	if (start >= i_size_read(inode))
 		goto out_unlock;
 
@@ -511,12 +511,12 @@
 					    unsigned int *start,
 					    unsigned int *end)
 {
-	unsigned int cluster_start = 0, cluster_end = PAGE_CACHE_SIZE;
+	unsigned int cluster_start = 0, cluster_end = PAGE_SIZE;
 
-	if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) {
+	if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) {
 		unsigned int cpp;
 
-		cpp = 1 << (PAGE_CACHE_SHIFT - osb->s_clustersize_bits);
+		cpp = 1 << (PAGE_SHIFT - osb->s_clustersize_bits);
 
 		cluster_start = cpos % cpp;
 		cluster_start = cluster_start << osb->s_clustersize_bits;
@@ -684,13 +684,13 @@
 	return ret;
 }
 
-#if (PAGE_CACHE_SIZE >= OCFS2_MAX_CLUSTERSIZE)
+#if (PAGE_SIZE >= OCFS2_MAX_CLUSTERSIZE)
 #define OCFS2_MAX_CTXT_PAGES	1
 #else
-#define OCFS2_MAX_CTXT_PAGES	(OCFS2_MAX_CLUSTERSIZE / PAGE_CACHE_SIZE)
+#define OCFS2_MAX_CTXT_PAGES	(OCFS2_MAX_CLUSTERSIZE / PAGE_SIZE)
 #endif
 
-#define OCFS2_MAX_CLUSTERS_PER_PAGE	(PAGE_CACHE_SIZE / OCFS2_MIN_CLUSTERSIZE)
+#define OCFS2_MAX_CLUSTERS_PER_PAGE	(PAGE_SIZE / OCFS2_MIN_CLUSTERSIZE)
 
 struct ocfs2_unwritten_extent {
 	struct list_head	ue_node;
@@ -785,7 +785,7 @@
 		if (pages[i]) {
 			unlock_page(pages[i]);
 			mark_page_accessed(pages[i]);
-			page_cache_release(pages[i]);
+			put_page(pages[i]);
 		}
 	}
 }
@@ -808,7 +808,7 @@
 			}
 		}
 		mark_page_accessed(wc->w_target_page);
-		page_cache_release(wc->w_target_page);
+		put_page(wc->w_target_page);
 	}
 	ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
 }
@@ -857,7 +857,7 @@
 	wc->w_di_bh = di_bh;
 	wc->w_type = type;
 
-	if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits))
+	if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits))
 		wc->w_large_pages = 1;
 	else
 		wc->w_large_pages = 0;
@@ -920,7 +920,7 @@
 				loff_t user_pos, unsigned user_len)
 {
 	int i;
-	unsigned from = user_pos & (PAGE_CACHE_SIZE - 1),
+	unsigned from = user_pos & (PAGE_SIZE - 1),
 		to = user_pos + user_len;
 	struct page *tmppage;
 
@@ -960,7 +960,7 @@
 			(page_offset(page) <= user_pos));
 
 	if (page == wc->w_target_page) {
-		map_from = user_pos & (PAGE_CACHE_SIZE - 1);
+		map_from = user_pos & (PAGE_SIZE - 1);
 		map_to = map_from + user_len;
 
 		if (new)
@@ -1034,7 +1034,7 @@
 	struct inode *inode = mapping->host;
 	loff_t last_byte;
 
-	target_index = user_pos >> PAGE_CACHE_SHIFT;
+	target_index = user_pos >> PAGE_SHIFT;
 
 	/*
 	 * Figure out how many pages we'll be manipulating here. For
@@ -1053,14 +1053,14 @@
 		 */
 		last_byte = max(user_pos + user_len, i_size_read(inode));
 		BUG_ON(last_byte < 1);
-		end_index = ((last_byte - 1) >> PAGE_CACHE_SHIFT) + 1;
+		end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1;
 		if ((start + wc->w_num_pages) > end_index)
 			wc->w_num_pages = end_index - start;
 	} else {
 		wc->w_num_pages = 1;
 		start = target_index;
 	}
-	end_index = (user_pos + user_len - 1) >> PAGE_CACHE_SHIFT;
+	end_index = (user_pos + user_len - 1) >> PAGE_SHIFT;
 
 	for(i = 0; i < wc->w_num_pages; i++) {
 		index = start + i;
@@ -1082,7 +1082,7 @@
 				goto out;
 			}
 
-			page_cache_get(mmap_page);
+			get_page(mmap_page);
 			wc->w_pages[i] = mmap_page;
 			wc->w_target_locked = true;
 		} else if (index >= target_index && index <= end_index &&
@@ -1272,7 +1272,7 @@
 {
 	struct ocfs2_write_cluster_desc *desc;
 
-	wc->w_target_from = pos & (PAGE_CACHE_SIZE - 1);
+	wc->w_target_from = pos & (PAGE_SIZE - 1);
 	wc->w_target_to = wc->w_target_from + len;
 
 	if (alloc == 0)
@@ -1309,7 +1309,7 @@
 							&wc->w_target_to);
 	} else {
 		wc->w_target_from = 0;
-		wc->w_target_to = PAGE_CACHE_SIZE;
+		wc->w_target_to = PAGE_SIZE;
 	}
 }
 
@@ -1981,7 +1981,7 @@
 			   struct page *page, void *fsdata)
 {
 	int i, ret;
-	unsigned from, to, start = pos & (PAGE_CACHE_SIZE - 1);
+	unsigned from, to, start = pos & (PAGE_SIZE - 1);
 	struct inode *inode = mapping->host;
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 	struct ocfs2_write_ctxt *wc = fsdata;
@@ -2027,8 +2027,8 @@
 			from = wc->w_target_from;
 			to = wc->w_target_to;
 
-			BUG_ON(from > PAGE_CACHE_SIZE ||
-			       to > PAGE_CACHE_SIZE ||
+			BUG_ON(from > PAGE_SIZE ||
+			       to > PAGE_SIZE ||
 			       to < from);
 		} else {
 			/*
@@ -2037,7 +2037,7 @@
 			 * to flush their entire range.
 			 */
 			from = 0;
-			to = PAGE_CACHE_SIZE;
+			to = PAGE_SIZE;
 		}
 
 		if (page_has_buffers(tmppage)) {
@@ -2311,7 +2311,7 @@
 	/* ocfs2_file_write_iter will get i_mutex, so we need not lock if we
 	 * are in that context. */
 	if (dwc->dw_writer_pid != task_pid_nr(current)) {
-		mutex_lock(&inode->i_mutex);
+		inode_lock(inode);
 		locked = 1;
 	}
 
@@ -2390,7 +2390,7 @@
 		ocfs2_free_alloc_context(meta_ac);
 	ocfs2_run_deallocs(osb, &dealloc);
 	if (locked)
-		mutex_unlock(&inode->i_mutex);
+		inode_unlock(inode);
 	ocfs2_dio_free_write_ctx(inode, dwc);
 }
 
@@ -2423,13 +2423,11 @@
 	return 0;
 }
 
-static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
-			       loff_t offset)
+static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file_inode(file)->i_mapping->host;
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
-	loff_t end = offset + iter->count;
 	get_block_t *get_block;
 
 	/*
@@ -2440,7 +2438,8 @@
 		return 0;
 
 	/* Fallback to buffered I/O if we do not support append dio. */
-	if (end > i_size_read(inode) && !ocfs2_supports_append_dio(osb))
+	if (iocb->ki_pos + iter->count > i_size_read(inode) &&
+	    !ocfs2_supports_append_dio(osb))
 		return 0;
 
 	if (iov_iter_rw(iter) == READ)
@@ -2449,7 +2448,7 @@
 		get_block = ocfs2_dio_get_block;
 
 	return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
-				    iter, offset, get_block,
+				    iter, get_block,
 				    ocfs2_dio_end_io, NULL, 0);
 }
 
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index bd15929..1934abb 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -417,13 +417,13 @@
 	bio->bi_private = wc;
 	bio->bi_end_io = o2hb_bio_end_io;
 
-	vec_start = (cs << bits) % PAGE_CACHE_SIZE;
+	vec_start = (cs << bits) % PAGE_SIZE;
 	while(cs < max_slots) {
 		current_page = cs / spp;
 		page = reg->hr_slot_data[current_page];
 
-		vec_len = min(PAGE_CACHE_SIZE - vec_start,
-			      (max_slots-cs) * (PAGE_CACHE_SIZE/spp) );
+		vec_len = min(PAGE_SIZE - vec_start,
+			      (max_slots-cs) * (PAGE_SIZE/spp) );
 
 		mlog(ML_HB_BIO, "page %d, vec_len = %u, vec_start = %u\n",
 		     current_page, vec_len, vec_start);
@@ -431,7 +431,7 @@
 		len = bio_add_page(bio, page, vec_len, vec_start);
 		if (len != vec_len) break;
 
-		cs += vec_len / (PAGE_CACHE_SIZE/spp);
+		cs += vec_len / (PAGE_SIZE/spp);
 		vec_start = 0;
 	}
 
@@ -1576,7 +1576,7 @@
 
 static void o2hb_init_region_params(struct o2hb_region *reg)
 {
-	reg->hr_slots_per_page = PAGE_CACHE_SIZE >> reg->hr_block_bits;
+	reg->hr_slots_per_page = PAGE_SIZE >> reg->hr_block_bits;
 	reg->hr_timeout_ms = O2HB_REGION_TIMEOUT_MS;
 
 	mlog(ML_HEARTBEAT, "hr_start_block = %llu, hr_blocks = %u\n",
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 9aed6e2..13719d3 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2455,6 +2455,8 @@
 
 	spin_unlock(&dlm->spinlock);
 
+	ret = 0;
+
 done:
 	dlm_put(dlm);
 	return ret;
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 03768bb..47b3b2d 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -571,8 +571,8 @@
 			    int silent)
 {
 	sb->s_maxbytes = MAX_LFS_FILESIZE;
-	sb->s_blocksize = PAGE_CACHE_SIZE;
-	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	sb->s_blocksize = PAGE_SIZE;
+	sb->s_blocksize_bits = PAGE_SHIFT;
 	sb->s_magic = DLMFS_MAGIC;
 	sb->s_op = &dlmfs_ops;
 	sb->s_root = d_make_root(dlmfs_get_root_inode(sb));
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 474e57f8..1eaa910 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -54,6 +54,7 @@
 #include "uptodate.h"
 #include "quota.h"
 #include "refcounttree.h"
+#include "acl.h"
 
 #include "buffer_head_io.h"
 
@@ -3623,6 +3624,8 @@
 		filemap_fdatawait(mapping);
 	}
 
+	forget_all_cached_acls(inode);
+
 out:
 	return UNBLOCK_CONTINUE;
 }
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index c18ab45..4e7b0dc 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -770,14 +770,14 @@
 {
 	struct address_space *mapping = inode->i_mapping;
 	struct page *page;
-	unsigned long index = abs_from >> PAGE_CACHE_SHIFT;
+	unsigned long index = abs_from >> PAGE_SHIFT;
 	handle_t *handle;
 	int ret = 0;
 	unsigned zero_from, zero_to, block_start, block_end;
 	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 
 	BUG_ON(abs_from >= abs_to);
-	BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT));
+	BUG_ON(abs_to > (((u64)index + 1) << PAGE_SHIFT));
 	BUG_ON(abs_from & (inode->i_blkbits - 1));
 
 	handle = ocfs2_zero_start_ordered_transaction(inode, di_bh);
@@ -794,10 +794,10 @@
 	}
 
 	/* Get the offsets within the page that we want to zero */
-	zero_from = abs_from & (PAGE_CACHE_SIZE - 1);
-	zero_to = abs_to & (PAGE_CACHE_SIZE - 1);
+	zero_from = abs_from & (PAGE_SIZE - 1);
+	zero_to = abs_to & (PAGE_SIZE - 1);
 	if (!zero_to)
-		zero_to = PAGE_CACHE_SIZE;
+		zero_to = PAGE_SIZE;
 
 	trace_ocfs2_write_zero_page(
 			(unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -851,7 +851,7 @@
 
 out_unlock:
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 out_commit_trans:
 	if (handle)
 		ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
@@ -959,7 +959,7 @@
 	BUG_ON(range_start >= range_end);
 
 	while (zero_pos < range_end) {
-		next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE;
+		next_pos = (zero_pos & PAGE_MASK) + PAGE_SIZE;
 		if (next_pos > range_end)
 			next_pos = range_end;
 		rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh);
@@ -1268,20 +1268,20 @@
 	if (size_change)
 		ocfs2_rw_unlock(inode, 1);
 bail:
-	brelse(bh);
 
 	/* Release quota pointers in case we acquired them */
 	for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
 		dqput(transfer_to[qtype]);
 
 	if (!status && attr->ia_valid & ATTR_MODE) {
-		status = posix_acl_chmod(inode, inode->i_mode);
+		status = ocfs2_acl_chmod(inode, bh);
 		if (status < 0)
 			mlog_errno(status);
 	}
 	if (inode_locked)
 		ocfs2_inode_unlock(inode, 1);
 
+	brelse(bh);
 	return status;
 }
 
@@ -1290,7 +1290,7 @@
 		  struct kstat *stat)
 {
 	struct inode *inode = d_inode(dentry);
-	struct super_block *sb = d_inode(dentry)->i_sb;
+	struct super_block *sb = dentry->d_sb;
 	struct ocfs2_super *osb = sb->s_fs_info;
 	int err;
 
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 12f4a9e..0748777 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -262,7 +262,7 @@
 	inode->i_ino = args->fi_ino;
 	OCFS2_I(inode)->ip_blkno = args->fi_blkno;
 	if (args->fi_sysfile_type != 0)
-		lockdep_set_class(&inode->i_mutex,
+		lockdep_set_class(&inode->i_rwsem,
 			&ocfs2_sysfile_lock_key[args->fi_sysfile_type]);
 	if (args->fi_sysfile_type == USER_QUOTA_SYSTEM_INODE ||
 	    args->fi_sysfile_type == GROUP_QUOTA_SYSTEM_INODE ||
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 9ea081f..71545ad 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -65,13 +65,13 @@
 	struct inode *inode = file_inode(file);
 	struct address_space *mapping = inode->i_mapping;
 	loff_t pos = page_offset(page);
-	unsigned int len = PAGE_CACHE_SIZE;
+	unsigned int len = PAGE_SIZE;
 	pgoff_t last_index;
 	struct page *locked_page = NULL;
 	void *fsdata;
 	loff_t size = i_size_read(inode);
 
-	last_index = (size - 1) >> PAGE_CACHE_SHIFT;
+	last_index = (size - 1) >> PAGE_SHIFT;
 
 	/*
 	 * There are cases that lead to the page no longer bebongs to the
@@ -102,7 +102,7 @@
 	 * because the "write" would invalidate their data.
 	 */
 	if (page->index == last_index)
-		len = ((size - 1) & ~PAGE_CACHE_MASK) + 1;
+		len = ((size - 1) & ~PAGE_MASK) + 1;
 
 	ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP,
 				       &locked_page, &fsdata, di_bh, page);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 6b3e871..a8f1225 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -259,7 +259,6 @@
 	struct ocfs2_dir_lookup_result lookup = { NULL, };
 	sigset_t oldset;
 	int did_block_signals = 0;
-	struct posix_acl *default_acl = NULL, *acl = NULL;
 	struct ocfs2_dentry_lock *dl = NULL;
 
 	trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name,
@@ -367,12 +366,6 @@
 		goto leave;
 	}
 
-	status = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
-	if (status) {
-		mlog_errno(status);
-		goto leave;
-	}
-
 	handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
 							    S_ISDIR(mode),
 							    xattr_credits));
@@ -421,16 +414,8 @@
 		inc_nlink(dir);
 	}
 
-	if (default_acl) {
-		status = ocfs2_set_acl(handle, inode, new_fe_bh,
-				       ACL_TYPE_DEFAULT, default_acl,
-				       meta_ac, data_ac);
-	}
-	if (!status && acl) {
-		status = ocfs2_set_acl(handle, inode, new_fe_bh,
-				       ACL_TYPE_ACCESS, acl,
-				       meta_ac, data_ac);
-	}
+	status = ocfs2_init_acl(handle, inode, dir, new_fe_bh, parent_fe_bh,
+			 meta_ac, data_ac);
 
 	if (status < 0) {
 		mlog_errno(status);
@@ -472,10 +457,6 @@
 	d_instantiate(dentry, inode);
 	status = 0;
 leave:
-	if (default_acl)
-		posix_acl_release(default_acl);
-	if (acl)
-		posix_acl_release(acl);
 	if (status < 0 && did_quota_inode)
 		dquot_free_inode(inode);
 	if (handle)
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 6cf6538..e63af7d 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -822,10 +822,10 @@
 	u32 clusters = pg_index;
 	unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
 
-	if (unlikely(PAGE_CACHE_SHIFT > cbits))
-		clusters = pg_index << (PAGE_CACHE_SHIFT - cbits);
-	else if (PAGE_CACHE_SHIFT < cbits)
-		clusters = pg_index >> (cbits - PAGE_CACHE_SHIFT);
+	if (unlikely(PAGE_SHIFT > cbits))
+		clusters = pg_index << (PAGE_SHIFT - cbits);
+	else if (PAGE_SHIFT < cbits)
+		clusters = pg_index >> (cbits - PAGE_SHIFT);
 
 	return clusters;
 }
@@ -839,10 +839,10 @@
 	unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
         pgoff_t index = clusters;
 
-	if (PAGE_CACHE_SHIFT > cbits) {
-		index = (pgoff_t)clusters >> (PAGE_CACHE_SHIFT - cbits);
-	} else if (PAGE_CACHE_SHIFT < cbits) {
-		index = (pgoff_t)clusters << (cbits - PAGE_CACHE_SHIFT);
+	if (PAGE_SHIFT > cbits) {
+		index = (pgoff_t)clusters >> (PAGE_SHIFT - cbits);
+	} else if (PAGE_SHIFT < cbits) {
+		index = (pgoff_t)clusters << (cbits - PAGE_SHIFT);
 	}
 
 	return index;
@@ -853,8 +853,8 @@
 	unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
 	unsigned int pages_per_cluster = 1;
 
-	if (PAGE_CACHE_SHIFT < cbits)
-		pages_per_cluster = 1 << (cbits - PAGE_CACHE_SHIFT);
+	if (PAGE_SHIFT < cbits)
+		pages_per_cluster = 1 << (cbits - PAGE_SHIFT);
 
 	return pages_per_cluster;
 }
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 3892f3c..ab6a6cd 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -867,6 +867,10 @@
 	int status = 0;
 
 	trace_ocfs2_get_next_id(from_kqid(&init_user_ns, *qid), type);
+	if (!sb_has_quota_loaded(sb, type)) {
+		status = -ESRCH;
+		goto out;
+	}
 	status = ocfs2_lock_global_qf(info, 0);
 	if (status < 0)
 		goto out;
@@ -878,8 +882,11 @@
 out_global:
 	ocfs2_unlock_global_qf(info, 0);
 out:
-	/* Avoid logging ENOENT since it just means there isn't next ID */
-	if (status && status != -ENOENT)
+	/*
+	 * Avoid logging ENOENT since it just means there isn't next ID and
+	 * ESRCH which means quota isn't enabled for the filesystem.
+	 */
+	if (status && status != -ENOENT && status != -ESRCH)
 		mlog_errno(status);
 	return status;
 }
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 3eff031..92bbe93 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2937,16 +2937,16 @@
 		end = i_size_read(inode);
 
 	while (offset < end) {
-		page_index = offset >> PAGE_CACHE_SHIFT;
-		map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
+		page_index = offset >> PAGE_SHIFT;
+		map_end = ((loff_t)page_index + 1) << PAGE_SHIFT;
 		if (map_end > end)
 			map_end = end;
 
 		/* from, to is the offset within the page. */
-		from = offset & (PAGE_CACHE_SIZE - 1);
-		to = PAGE_CACHE_SIZE;
-		if (map_end & (PAGE_CACHE_SIZE - 1))
-			to = map_end & (PAGE_CACHE_SIZE - 1);
+		from = offset & (PAGE_SIZE - 1);
+		to = PAGE_SIZE;
+		if (map_end & (PAGE_SIZE - 1))
+			to = map_end & (PAGE_SIZE - 1);
 
 		page = find_or_create_page(mapping, page_index, GFP_NOFS);
 		if (!page) {
@@ -2956,10 +2956,10 @@
 		}
 
 		/*
-		 * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
+		 * In case PAGE_SIZE <= CLUSTER_SIZE, This page
 		 * can't be dirtied before we CoW it out.
 		 */
-		if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
+		if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize)
 			BUG_ON(PageDirty(page));
 
 		if (!PageUptodate(page)) {
@@ -2987,7 +2987,7 @@
 		mark_page_accessed(page);
 unlock:
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		page = NULL;
 		offset = map_end;
 		if (ret)
@@ -3165,8 +3165,8 @@
 	}
 
 	while (offset < end) {
-		page_index = offset >> PAGE_CACHE_SHIFT;
-		map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
+		page_index = offset >> PAGE_SHIFT;
+		map_end = ((loff_t)page_index + 1) << PAGE_SHIFT;
 		if (map_end > end)
 			map_end = end;
 
@@ -3182,7 +3182,7 @@
 			mark_page_accessed(page);
 
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		page = NULL;
 		offset = map_end;
 		if (ret)
@@ -4248,20 +4248,12 @@
 	struct inode *inode = d_inode(old_dentry);
 	struct buffer_head *old_bh = NULL;
 	struct inode *new_orphan_inode = NULL;
-	struct posix_acl *default_acl, *acl;
-	umode_t mode;
 
 	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
 		return -EOPNOTSUPP;
 
-	mode = inode->i_mode;
-	error = posix_acl_create(dir, &mode, &default_acl, &acl);
-	if (error) {
-		mlog_errno(error);
-		return error;
-	}
 
-	error = ocfs2_create_inode_in_orphan(dir, mode,
+	error = ocfs2_create_inode_in_orphan(dir, inode->i_mode,
 					     &new_orphan_inode);
 	if (error) {
 		mlog_errno(error);
@@ -4300,16 +4292,11 @@
 	/* If the security isn't preserved, we need to re-initialize them. */
 	if (!preserve) {
 		error = ocfs2_init_security_and_acl(dir, new_orphan_inode,
-						    &new_dentry->d_name,
-						    default_acl, acl);
+						    &new_dentry->d_name);
 		if (error)
 			mlog_errno(error);
 	}
 out:
-	if (default_acl)
-		posix_acl_release(default_acl);
-	if (acl)
-		posix_acl_release(acl);
 	if (!error) {
 		error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
 						       new_dentry);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 7db631e..d7cae33 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -605,8 +605,8 @@
 	/*
 	 * We might be limited by page cache size.
 	 */
-	if (bytes > PAGE_CACHE_SIZE) {
-		bytes = PAGE_CACHE_SIZE;
+	if (bytes > PAGE_SIZE) {
+		bytes = PAGE_SIZE;
 		trim = 1;
 		/*
 		 * Shift by 31 here so that we don't get larger than
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 7d3d979..ad16995 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -7216,12 +7216,10 @@
  */
 int ocfs2_init_security_and_acl(struct inode *dir,
 				struct inode *inode,
-				const struct qstr *qstr,
-				struct posix_acl *default_acl,
-				struct posix_acl *acl)
+				const struct qstr *qstr)
 {
-	struct buffer_head *dir_bh = NULL;
 	int ret = 0;
+	struct buffer_head *dir_bh = NULL;
 
 	ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
 	if (ret) {
@@ -7234,11 +7232,9 @@
 		mlog_errno(ret);
 		goto leave;
 	}
-
-	if (!ret && default_acl)
-		ret = ocfs2_iop_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
-	if (!ret && acl)
-		ret = ocfs2_iop_set_acl(inode, acl, ACL_TYPE_ACCESS);
+	ret = ocfs2_init_acl(NULL, inode, dir, NULL, dir_bh, NULL, NULL);
+	if (ret)
+		mlog_errno(ret);
 
 	ocfs2_inode_unlock(dir, 0);
 	brelse(dir_bh);
@@ -7250,10 +7246,10 @@
  * 'security' attributes support
  */
 static int ocfs2_xattr_security_get(const struct xattr_handler *handler,
-				    struct dentry *dentry, const char *name,
-				    void *buffer, size_t size)
+				    struct dentry *unused, struct inode *inode,
+				    const char *name, void *buffer, size_t size)
 {
-	return ocfs2_xattr_get(d_inode(dentry), OCFS2_XATTR_INDEX_SECURITY,
+	return ocfs2_xattr_get(inode, OCFS2_XATTR_INDEX_SECURITY,
 			       name, buffer, size);
 }
 
@@ -7321,10 +7317,10 @@
  * 'trusted' attributes support
  */
 static int ocfs2_xattr_trusted_get(const struct xattr_handler *handler,
-				   struct dentry *dentry, const char *name,
-				   void *buffer, size_t size)
+				   struct dentry *unused, struct inode *inode,
+				   const char *name, void *buffer, size_t size)
 {
-	return ocfs2_xattr_get(d_inode(dentry), OCFS2_XATTR_INDEX_TRUSTED,
+	return ocfs2_xattr_get(inode, OCFS2_XATTR_INDEX_TRUSTED,
 			       name, buffer, size);
 }
 
@@ -7346,14 +7342,14 @@
  * 'user' attributes support
  */
 static int ocfs2_xattr_user_get(const struct xattr_handler *handler,
-				struct dentry *dentry, const char *name,
-				void *buffer, size_t size)
+				struct dentry *unusde, struct inode *inode,
+				const char *name, void *buffer, size_t size)
 {
-	struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
+	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 
 	if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
 		return -EOPNOTSUPP;
-	return ocfs2_xattr_get(d_inode(dentry), OCFS2_XATTR_INDEX_USER, name,
+	return ocfs2_xattr_get(inode, OCFS2_XATTR_INDEX_USER, name,
 			       buffer, size);
 }
 
diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h
index f10d5b9..1633cc1 100644
--- a/fs/ocfs2/xattr.h
+++ b/fs/ocfs2/xattr.h
@@ -94,7 +94,5 @@
 			 bool preserve_security);
 int ocfs2_init_security_and_acl(struct inode *dir,
 				struct inode *inode,
-				const struct qstr *qstr,
-				struct posix_acl *default_acl,
-				struct posix_acl *acl);
+				const struct qstr *qstr);
 #endif /* OCFS2_XATTR_H */
diff --git a/fs/omfs/dir.c b/fs/omfs/dir.c
index f833bf8..c8cbf3b 100644
--- a/fs/omfs/dir.c
+++ b/fs/omfs/dir.c
@@ -452,6 +452,6 @@
 
 const struct file_operations omfs_dir_operations = {
 	.read = generic_read_dir,
-	.iterate = omfs_readdir,
+	.iterate_shared = omfs_readdir,
 	.llseek = generic_file_llseek,
 };
diff --git a/fs/open.c b/fs/open.c
index 17cb6b1..93ae3cd 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -65,7 +65,7 @@
 	return ret;
 }
 
-long vfs_truncate(struct path *path, loff_t length)
+long vfs_truncate(const struct path *path, loff_t length)
 {
 	struct inode *inode;
 	long error;
@@ -499,7 +499,7 @@
 	return error;
 }
 
-static int chmod_common(struct path *path, umode_t mode)
+static int chmod_common(const struct path *path, umode_t mode)
 {
 	struct inode *inode = path->dentry->d_inode;
 	struct inode *delegated_inode = NULL;
@@ -564,7 +564,7 @@
 	return sys_fchmodat(AT_FDCWD, filename, mode);
 }
 
-static int chown_common(struct path *path, uid_t user, gid_t group)
+static int chown_common(const struct path *path, uid_t user, gid_t group)
 {
 	struct inode *inode = path->dentry->d_inode;
 	struct inode *delegated_inode = NULL;
@@ -713,7 +713,7 @@
 	}
 
 	/* POSIX.1-2008/SUSv4 Section XSI 2.9.7 */
-	if (S_ISREG(inode->i_mode))
+	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))
 		f->f_mode |= FMODE_ATOMIC_POS;
 
 	f->f_op = fops_get(inode->i_fop);
@@ -840,16 +840,12 @@
 int vfs_open(const struct path *path, struct file *file,
 	     const struct cred *cred)
 {
-	struct dentry *dentry = path->dentry;
-	struct inode *inode = dentry->d_inode;
+	struct inode *inode = vfs_select_inode(path->dentry, file->f_flags);
+
+	if (IS_ERR(inode))
+		return PTR_ERR(inode);
 
 	file->f_path = *path;
-	if (dentry->d_flags & DCACHE_OP_SELECT_INODE) {
-		inode = dentry->d_op->d_select_inode(dentry, file->f_flags);
-		if (IS_ERR(inode))
-			return PTR_ERR(inode);
-	}
-
 	return do_dentry_open(file, inode, NULL, cred);
 }
 
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index b61b883..c7a8699 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -166,7 +166,7 @@
 
 static const struct file_operations openprom_operations = {
 	.read		= generic_read_dir,
-	.iterate	= openpromfs_readdir,
+	.iterate_shared	= openpromfs_readdir,
 	.llseek		= generic_file_llseek,
 };
 
diff --git a/fs/orangefs/dir.c b/fs/orangefs/dir.c
index f30b6ec..324f0af 100644
--- a/fs/orangefs/dir.c
+++ b/fs/orangefs/dir.c
@@ -153,7 +153,6 @@
 	struct dentry *dentry = file->f_path.dentry;
 	struct orangefs_kernel_op_s *new_op = NULL;
 	struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(dentry->d_inode);
-	int buffer_full = 0;
 	struct orangefs_readdir_response_s readdir_response;
 	void *dents_buf;
 	int i = 0;
@@ -235,7 +234,7 @@
 	if (ret == -EIO && op_state_purged(new_op)) {
 		gossip_err("%s: Client is down. Aborting readdir call.\n",
 			__func__);
-		goto out_slot;
+		goto out_free_op;
 	}
 
 	if (ret < 0 || new_op->downcall.status != 0) {
@@ -244,14 +243,14 @@
 			     new_op->downcall.status);
 		if (ret >= 0)
 			ret = new_op->downcall.status;
-		goto out_slot;
+		goto out_free_op;
 	}
 
 	dents_buf = new_op->downcall.trailer_buf;
 	if (dents_buf == NULL) {
 		gossip_err("Invalid NULL buffer in readdir response\n");
 		ret = -ENOMEM;
-		goto out_slot;
+		goto out_free_op;
 	}
 
 	bytes_decoded = decode_dirents(dents_buf, new_op->downcall.trailer_size,
@@ -350,8 +349,7 @@
 	/*
 	 * Did we hit the end of the directory?
 	 */
-	if (readdir_response.token == ORANGEFS_READDIR_END &&
-	    !buffer_full) {
+	if (readdir_response.token == ORANGEFS_READDIR_END) {
 		gossip_debug(GOSSIP_DIR_DEBUG,
 		"End of dir detected; setting ctx->pos to ORANGEFS_READDIR_END.\n");
 		ctx->pos = ORANGEFS_READDIR_END;
@@ -363,8 +361,6 @@
 out_vfree:
 	gossip_debug(GOSSIP_DIR_DEBUG, "vfree %p\n", dents_buf);
 	vfree(dents_buf);
-out_slot:
-	orangefs_readdir_index_put(buffer_index);
 out_free_op:
 	op_release(new_op);
 	gossip_debug(GOSSIP_DIR_DEBUG, "orangefs_readdir returning %d\n", ret);
diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
index ae92795..491e82c 100644
--- a/fs/orangefs/file.c
+++ b/fs/orangefs/file.c
@@ -445,7 +445,7 @@
 
 	gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_write_iter\n");
 
-	mutex_lock(&file->f_mapping->host->i_mutex);
+	inode_lock(file->f_mapping->host);
 
 	/* Make sure generic_write_checks sees an up to date inode size. */
 	if (file->f_flags & O_APPEND) {
@@ -492,7 +492,7 @@
 
 out:
 
-	mutex_unlock(&file->f_mapping->host->i_mutex);
+	inode_unlock(file->f_mapping->host);
 	return rc;
 }
 
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index 2382e26..85640e9 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -18,8 +18,8 @@
 	int max_block;
 	ssize_t bytes_read = 0;
 	struct inode *inode = page->mapping->host;
-	const __u32 blocksize = PAGE_CACHE_SIZE;	/* inode->i_blksize */
-	const __u32 blockbits = PAGE_CACHE_SHIFT;	/* inode->i_blkbits */
+	const __u32 blocksize = PAGE_SIZE;	/* inode->i_blksize */
+	const __u32 blockbits = PAGE_SHIFT;	/* inode->i_blkbits */
 	struct iov_iter to;
 	struct bio_vec bv = {.bv_page = page, .bv_len = PAGE_SIZE};
 
@@ -86,7 +86,7 @@
 				"failure adding page to cache, read_one_page returned: %d\n",
 				ret);
 	      } else {
-			page_cache_release(page);
+			put_page(page);
 	      }
 	}
 	BUG_ON(!list_empty(pages));
@@ -204,22 +204,8 @@
 	if (ret != 0)
 		return ret;
 
-	/*
-	 * Only change the c/mtime if we are changing the size or we are
-	 * explicitly asked to change it.  This handles the semantic difference
-	 * between truncate() and ftruncate() as implemented in the VFS.
-	 *
-	 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
-	 * special case where we need to update the times despite not having
-	 * these flags set.  For all other operations the VFS set these flags
-	 * explicitly if it wants a timestamp update.
-	 */
-	if (orig_size != i_size_read(inode) &&
-	    !(iattr->ia_valid & (ATTR_CTIME | ATTR_MTIME))) {
-		iattr->ia_ctime = iattr->ia_mtime =
-			current_fs_time(inode->i_sb);
+	if (orig_size != i_size_read(inode))
 		iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;
-	}
 
 	return ret;
 }
@@ -328,7 +314,7 @@
 	case S_IFREG:
 		inode->i_op = &orangefs_file_inode_operations;
 		inode->i_fop = &orangefs_file_operations;
-		inode->i_blkbits = PAGE_CACHE_SHIFT;
+		inode->i_blkbits = PAGE_SHIFT;
 		break;
 	case S_IFLNK:
 		inode->i_op = &orangefs_symlink_inode_operations;
@@ -456,7 +442,7 @@
 	inode->i_uid = current_fsuid();
 	inode->i_gid = current_fsgid();
 	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-	inode->i_size = PAGE_CACHE_SIZE;
+	inode->i_size = PAGE_SIZE;
 	inode->i_rdev = dev;
 
 	error = insert_inode_locked4(inode, hash, orangefs_test_inode, ref);
diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c
index 1f8acc9f..75375e9 100644
--- a/fs/orangefs/orangefs-bufmap.c
+++ b/fs/orangefs/orangefs-bufmap.c
@@ -170,7 +170,7 @@
 	int i;
 
 	for (i = 0; i < bufmap->page_count; i++)
-		page_cache_release(bufmap->page_array[i]);
+		put_page(bufmap->page_array[i]);
 }
 
 static void
@@ -299,7 +299,7 @@
 
 		for (i = 0; i < ret; i++) {
 			SetPageError(bufmap->page_array[i]);
-			page_cache_release(bufmap->page_array[i]);
+			put_page(bufmap->page_array[i]);
 		}
 		return -ENOMEM;
 	}
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
index 19670b8..1714a73 100644
--- a/fs/orangefs/orangefs-debugfs.c
+++ b/fs/orangefs/orangefs-debugfs.c
@@ -126,8 +126,7 @@
 
 void orangefs_debugfs_cleanup(void)
 {
-	if (debug_dir)
-		debugfs_remove_recursive(debug_dir);
+	debugfs_remove_recursive(debug_dir);
 }
 
 /* open ORANGEFS_KMOD_DEBUG_HELP_FILE */
diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
index a9925e2..2281882 100644
--- a/fs/orangefs/orangefs-kernel.h
+++ b/fs/orangefs/orangefs-kernel.h
@@ -612,11 +612,11 @@
 static inline void orangefs_i_size_write(struct inode *inode, loff_t i_size)
 {
 #if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
-	mutex_lock(&inode->i_mutex);
+	inode_lock(inode);
 #endif
 	i_size_write(inode, i_size);
 #if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
-	mutex_unlock(&inode->i_mutex);
+	inode_unlock(inode);
 #endif
 }
 
diff --git a/fs/orangefs/orangefs-utils.c b/fs/orangefs/orangefs-utils.c
index 40f5163..2d129b5 100644
--- a/fs/orangefs/orangefs-utils.c
+++ b/fs/orangefs/orangefs-utils.c
@@ -303,7 +303,7 @@
 		}
 		break;
 	case S_IFDIR:
-		inode->i_size = PAGE_CACHE_SIZE;
+		inode->i_size = PAGE_SIZE;
 		orangefs_inode->blksize = (1 << inode->i_blkbits);
 		spin_lock(&inode->i_lock);
 		inode_set_bytes(inode, inode->i_size);
@@ -315,9 +315,13 @@
 			inode->i_size = (loff_t)strlen(new_op->
 			    downcall.resp.getattr.link_target);
 			orangefs_inode->blksize = (1 << inode->i_blkbits);
-			strlcpy(orangefs_inode->link_target,
+			ret = strscpy(orangefs_inode->link_target,
 			    new_op->downcall.resp.getattr.link_target,
 			    ORANGEFS_NAME_MAX);
+			if (ret == -E2BIG) {
+				ret = -EIO;
+				goto out;
+			}
 			inode->i_link = orangefs_inode->link_target;
 		}
 		break;
diff --git a/fs/orangefs/protocol.h b/fs/orangefs/protocol.h
index 45ce4ff..1efc6f8 100644
--- a/fs/orangefs/protocol.h
+++ b/fs/orangefs/protocol.h
@@ -1,3 +1,4 @@
+#include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/spinlock_types.h>
 #include <linux/slab.h>
@@ -74,8 +75,8 @@
 				   void *p, int size)
 {
 
-	memset(p, 0, size);
 	memcpy(p, kh->u, 16);
+	memset(p + 16, 0, size - 16);
 
 }
 
@@ -407,7 +408,7 @@
  * space. Zero signifies the upstream version of the kernel module.
  */
 #define ORANGEFS_KERNEL_PROTO_VERSION 0
-#define ORANGEFS_MINIMUM_USERSPACE_VERSION 20904
+#define ORANGEFS_MINIMUM_USERSPACE_VERSION 20903
 
 /*
  * describes memory regions to map in the ORANGEFS_DEV_MAP ioctl.
@@ -427,26 +428,28 @@
 /* gossip.h *****************************************************************/
 
 #ifdef GOSSIP_DISABLE_DEBUG
-#define gossip_debug(mask, format, f...) do {} while (0)
+#define gossip_debug(mask, fmt, ...)					\
+do {									\
+	if (0)								\
+		printk(KERN_DEBUG fmt, ##__VA_ARGS__);			\
+} while (0)
 #else
 extern __u64 gossip_debug_mask;
 extern struct client_debug_mask client_debug_mask;
 
 /* try to avoid function call overhead by checking masks in macro */
-#define gossip_debug(mask, format, f...)			\
-do {								\
-	if (gossip_debug_mask & mask)				\
-		printk(format, ##f);				\
+#define gossip_debug(mask, fmt, ...)					\
+do {									\
+	if (gossip_debug_mask & (mask))					\
+		printk(KERN_DEBUG fmt, ##__VA_ARGS__);			\
 } while (0)
 #endif /* GOSSIP_DISABLE_DEBUG */
 
 /* do file and line number printouts w/ the GNU preprocessor */
-#define gossip_ldebug(mask, format, f...)				\
-		gossip_debug(mask, "%s: " format, __func__, ##f)
+#define gossip_ldebug(mask, fmt, ...)					\
+	gossip_debug(mask, "%s: " fmt, __func__, ##__VA_ARGS__)
 
-#define gossip_err printk
-#define gossip_lerr(format, f...)					\
-		gossip_err("%s line %d: " format,			\
-			   __FILE__,					\
-			   __LINE__,					\
-			   ##f)
+#define gossip_err pr_err
+#define gossip_lerr(fmt, ...)						\
+	gossip_err("%s line %d: " fmt,					\
+		   __FILE__, __LINE__, ##__VA_ARGS__)
diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c
index ef5da75..99c1954 100644
--- a/fs/orangefs/xattr.c
+++ b/fs/orangefs/xattr.c
@@ -73,10 +73,6 @@
 		     "%s: prefix %s name %s, buffer_size %zd\n",
 		     __func__, prefix, name, size);
 
-	if (name == NULL || (size > 0 && buffer == NULL)) {
-		gossip_err("orangefs_inode_getxattr: bogus NULL pointers\n");
-		return -EINVAL;
-	}
 	if ((strlen(name) + strlen(prefix)) >= ORANGEFS_MAX_XATTR_NAMELEN) {
 		gossip_err("Invalid key length (%d)\n",
 			   (int)(strlen(name) + strlen(prefix)));
@@ -146,8 +142,8 @@
 		goto out_release_op;
 	}
 
-	memset(buffer, 0, size);
 	memcpy(buffer, new_op->downcall.resp.getxattr.val, length);
+	memset(buffer + length, 0, size - length);
 	gossip_debug(GOSSIP_XATTR_DEBUG,
 	     "orangefs_inode_getxattr: inode %pU "
 	     "key %s key_sz %d, val_len %d\n",
@@ -239,8 +235,7 @@
 		     "%s: prefix %s, name %s, buffer_size %zd\n",
 		     __func__, prefix, name, size);
 
-	if (size < 0 ||
-	    size >= ORANGEFS_MAX_XATTR_VALUELEN ||
+	if (size >= ORANGEFS_MAX_XATTR_VALUELEN ||
 	    flags < 0) {
 		gossip_err("orangefs_inode_setxattr: bogus values of size(%d), flags(%d)\n",
 			   (int)size,
@@ -248,12 +243,6 @@
 		return -EINVAL;
 	}
 
-	if (name == NULL ||
-	    (size > 0 && value == NULL)) {
-		gossip_err("orangefs_inode_setxattr: bogus NULL pointers!\n");
-		return -EINVAL;
-	}
-
 	internal_flag = convert_to_internal_xattr_flags(flags);
 
 	if (prefix) {
@@ -353,10 +342,6 @@
 		gossip_err("%s: bogus NULL pointers\n", __func__);
 		return -EINVAL;
 	}
-	if (size < 0) {
-		gossip_err("Invalid size (%d)\n", (int)size);
-		return -EINVAL;
-	}
 
 	down_read(&orangefs_inode->xattr_sem);
 	new_op = op_alloc(ORANGEFS_VFS_OP_LISTXATTR);
@@ -478,12 +463,13 @@
 }
 
 static int orangefs_xattr_get_default(const struct xattr_handler *handler,
-				      struct dentry *dentry,
+				      struct dentry *unused,
+				      struct inode *inode,
 				      const char *name,
 				      void *buffer,
 				      size_t size)
 {
-	return orangefs_inode_getxattr(dentry->d_inode,
+	return orangefs_inode_getxattr(inode,
 				    ORANGEFS_XATTR_NAME_DEFAULT_PREFIX,
 				    name,
 				    buffer,
@@ -507,12 +493,13 @@
 }
 
 static int orangefs_xattr_get_trusted(const struct xattr_handler *handler,
-				      struct dentry *dentry,
+				      struct dentry *unused,
+				      struct inode *inode,
 				      const char *name,
 				      void *buffer,
 				      size_t size)
 {
-	return orangefs_inode_getxattr(dentry->d_inode,
+	return orangefs_inode_getxattr(inode,
 				    ORANGEFS_XATTR_NAME_TRUSTED_PREFIX,
 				    name,
 				    buffer,
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index a4ff5d0..c7b31a0 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -246,8 +246,8 @@
 		return false;
 }
 
-ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
-		     void *value, size_t size)
+ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode,
+		     const char *name, void *value, size_t size)
 {
 	struct path realpath;
 	enum ovl_path_type type = ovl_path_real(dentry, &realpath);
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 6a7090f..99ec4b0 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -173,8 +173,8 @@
 int ovl_permission(struct inode *inode, int mask);
 int ovl_setxattr(struct dentry *dentry, const char *name,
 		 const void *value, size_t size, int flags);
-ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
-		     void *value, size_t size);
+ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode,
+		     const char *name, void *value, size_t size);
 ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
 int ovl_removexattr(struct dentry *dentry, const char *name);
 struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags);
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index 6ec1e43..da186ee 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -218,7 +218,9 @@
 	cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
 	old_cred = override_creds(override_cred);
 
-	err = mutex_lock_killable(&dir->d_inode->i_mutex);
+	inode_lock(dir->d_inode);
+	err = 0;
+	// XXX: err = mutex_lock_killable(&dir->d_inode->i_mutex);
 	if (!err) {
 		while (rdd->first_maybe_whiteout) {
 			p = rdd->first_maybe_whiteout;
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index ef64984..ed53ae0 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -274,7 +274,7 @@
 	if (!S_ISDIR(inode->i_mode) || !inode->i_op->getxattr)
 		return false;
 
-	res = inode->i_op->getxattr(dentry, OVL_XATTR_OPAQUE, &val, 1);
+	res = inode->i_op->getxattr(dentry, inode, OVL_XATTR_OPAQUE, &val, 1);
 	if (res == 1 && val == 'y')
 		return true;
 
@@ -295,6 +295,37 @@
 	}
 }
 
+static struct dentry *ovl_d_real(struct dentry *dentry, struct inode *inode)
+{
+	struct dentry *real;
+
+	if (d_is_dir(dentry)) {
+		if (!inode || inode == d_inode(dentry))
+			return dentry;
+		goto bug;
+	}
+
+	real = ovl_dentry_upper(dentry);
+	if (real && (!inode || inode == d_inode(real)))
+		return real;
+
+	real = ovl_dentry_lower(dentry);
+	if (!real)
+		goto bug;
+
+	if (!inode || inode == d_inode(real))
+		return real;
+
+	/* Handle recursion */
+	if (real->d_flags & DCACHE_OP_REAL)
+		return real->d_op->d_real(real, inode);
+
+bug:
+	WARN(1, "ovl_d_real(%pd4, %s:%lu\n): real dentry not found\n", dentry,
+	     inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
+	return dentry;
+}
+
 static int ovl_dentry_revalidate(struct dentry *dentry, unsigned int flags)
 {
 	struct ovl_entry *oe = dentry->d_fsdata;
@@ -339,11 +370,13 @@
 static const struct dentry_operations ovl_dentry_operations = {
 	.d_release = ovl_dentry_release,
 	.d_select_inode = ovl_d_select_inode,
+	.d_real = ovl_d_real,
 };
 
 static const struct dentry_operations ovl_reval_dentry_operations = {
 	.d_release = ovl_dentry_release,
 	.d_select_inode = ovl_d_select_inode,
+	.d_real = ovl_d_real,
 	.d_revalidate = ovl_dentry_revalidate,
 	.d_weak_revalidate = ovl_dentry_weak_revalidate,
 };
@@ -378,9 +411,7 @@
 {
 	struct dentry *dentry;
 
-	inode_lock(dir->d_inode);
-	dentry = lookup_one_len(name->name, dir, name->len);
-	inode_unlock(dir->d_inode);
+	dentry = lookup_hash(name, dir);
 
 	if (IS_ERR(dentry)) {
 		if (PTR_ERR(dentry) == -ENOENT)
diff --git a/fs/pipe.c b/fs/pipe.c
index ab8dad3..0d3f516 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -134,7 +134,7 @@
 	if (page_count(page) == 1 && !pipe->tmp_page)
 		pipe->tmp_page = page;
 	else
-		page_cache_release(page);
+		put_page(page);
 }
 
 /**
@@ -180,7 +180,7 @@
  */
 void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
 {
-	page_cache_get(buf->page);
+	get_page(buf->page);
 }
 EXPORT_SYMBOL(generic_pipe_buf_get);
 
@@ -211,7 +211,7 @@
 void generic_pipe_buf_release(struct pipe_inode_info *pipe,
 			      struct pipe_buffer *buf)
 {
-	page_cache_release(buf->page);
+	put_page(buf->page);
 }
 EXPORT_SYMBOL(generic_pipe_buf_release);
 
diff --git a/fs/pnode.c b/fs/pnode.c
index c524fdd..9989970 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -198,7 +198,7 @@
 
 /* all accesses are serialized by namespace_sem */
 static struct user_namespace *user_ns;
-static struct mount *last_dest, *last_source, *dest_master;
+static struct mount *last_dest, *first_source, *last_source, *dest_master;
 static struct mountpoint *mp;
 static struct hlist_head *list;
 
@@ -221,20 +221,22 @@
 		type = CL_MAKE_SHARED;
 	} else {
 		struct mount *n, *p;
+		bool done;
 		for (n = m; ; n = p) {
 			p = n->mnt_master;
-			if (p == dest_master || IS_MNT_MARKED(p)) {
-				while (last_dest->mnt_master != p) {
-					last_source = last_source->mnt_master;
-					last_dest = last_source->mnt_parent;
-				}
-				if (!peers(n, last_dest)) {
-					last_source = last_source->mnt_master;
-					last_dest = last_source->mnt_parent;
-				}
+			if (p == dest_master || IS_MNT_MARKED(p))
 				break;
-			}
 		}
+		do {
+			struct mount *parent = last_source->mnt_parent;
+			if (last_source == first_source)
+				break;
+			done = parent->mnt_master == p;
+			if (done && peers(n, parent))
+				break;
+			last_source = last_source->mnt_master;
+		} while (!done);
+
 		type = CL_SLAVE;
 		/* beginning of peer group among the slaves? */
 		if (IS_MNT_SHARED(m))
@@ -286,6 +288,7 @@
 	 */
 	user_ns = current->nsproxy->mnt_ns->user_ns;
 	last_dest = dest_mnt;
+	first_source = source_mnt;
 	last_source = source_mnt;
 	mp = dest_mp;
 	list = tree_list;
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 711dd51..2c60f17 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -21,7 +21,7 @@
 #include <linux/export.h>
 #include <linux/user_namespace.h>
 
-struct posix_acl **acl_by_type(struct inode *inode, int type)
+static struct posix_acl **acl_by_type(struct inode *inode, int type)
 {
 	switch (type) {
 	case ACL_TYPE_ACCESS:
@@ -32,19 +32,22 @@
 		BUG();
 	}
 }
-EXPORT_SYMBOL(acl_by_type);
 
 struct posix_acl *get_cached_acl(struct inode *inode, int type)
 {
 	struct posix_acl **p = acl_by_type(inode, type);
-	struct posix_acl *acl = ACCESS_ONCE(*p);
-	if (acl) {
-		spin_lock(&inode->i_lock);
-		acl = *p;
-		if (acl != ACL_NOT_CACHED)
-			acl = posix_acl_dup(acl);
-		spin_unlock(&inode->i_lock);
+	struct posix_acl *acl;
+
+	for (;;) {
+		rcu_read_lock();
+		acl = rcu_dereference(*p);
+		if (!acl || is_uncached_acl(acl) ||
+		    atomic_inc_not_zero(&acl->a_refcount))
+			break;
+		rcu_read_unlock();
+		cpu_relax();
 	}
+	rcu_read_unlock();
 	return acl;
 }
 EXPORT_SYMBOL(get_cached_acl);
@@ -59,58 +62,72 @@
 {
 	struct posix_acl **p = acl_by_type(inode, type);
 	struct posix_acl *old;
-	spin_lock(&inode->i_lock);
-	old = *p;
-	rcu_assign_pointer(*p, posix_acl_dup(acl));
-	spin_unlock(&inode->i_lock);
-	if (old != ACL_NOT_CACHED)
+
+	old = xchg(p, posix_acl_dup(acl));
+	if (!is_uncached_acl(old))
 		posix_acl_release(old);
 }
 EXPORT_SYMBOL(set_cached_acl);
 
+static void __forget_cached_acl(struct posix_acl **p)
+{
+	struct posix_acl *old;
+
+	old = xchg(p, ACL_NOT_CACHED);
+	if (!is_uncached_acl(old))
+		posix_acl_release(old);
+}
+
 void forget_cached_acl(struct inode *inode, int type)
 {
-	struct posix_acl **p = acl_by_type(inode, type);
-	struct posix_acl *old;
-	spin_lock(&inode->i_lock);
-	old = *p;
-	*p = ACL_NOT_CACHED;
-	spin_unlock(&inode->i_lock);
-	if (old != ACL_NOT_CACHED)
-		posix_acl_release(old);
+	__forget_cached_acl(acl_by_type(inode, type));
 }
 EXPORT_SYMBOL(forget_cached_acl);
 
 void forget_all_cached_acls(struct inode *inode)
 {
-	struct posix_acl *old_access, *old_default;
-	spin_lock(&inode->i_lock);
-	old_access = inode->i_acl;
-	old_default = inode->i_default_acl;
-	inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
-	spin_unlock(&inode->i_lock);
-	if (old_access != ACL_NOT_CACHED)
-		posix_acl_release(old_access);
-	if (old_default != ACL_NOT_CACHED)
-		posix_acl_release(old_default);
+	__forget_cached_acl(&inode->i_acl);
+	__forget_cached_acl(&inode->i_default_acl);
 }
 EXPORT_SYMBOL(forget_all_cached_acls);
 
 struct posix_acl *get_acl(struct inode *inode, int type)
 {
+	void *sentinel;
+	struct posix_acl **p;
 	struct posix_acl *acl;
 
+	/*
+	 * The sentinel is used to detect when another operation like
+	 * set_cached_acl() or forget_cached_acl() races with get_acl().
+	 * It is guaranteed that is_uncached_acl(sentinel) is true.
+	 */
+
 	acl = get_cached_acl(inode, type);
-	if (acl != ACL_NOT_CACHED)
+	if (!is_uncached_acl(acl))
 		return acl;
 
 	if (!IS_POSIXACL(inode))
 		return NULL;
 
+	sentinel = uncached_acl_sentinel(current);
+	p = acl_by_type(inode, type);
+
 	/*
-	 * A filesystem can force a ACL callback by just never filling the
-	 * ACL cache. But normally you'd fill the cache either at inode
-	 * instantiation time, or on the first ->get_acl call.
+	 * If the ACL isn't being read yet, set our sentinel.  Otherwise, the
+	 * current value of the ACL will not be ACL_NOT_CACHED and so our own
+	 * sentinel will not be set; another task will update the cache.  We
+	 * could wait for that other task to complete its job, but it's easier
+	 * to just call ->get_acl to fetch the ACL ourself.  (This is going to
+	 * be an unlikely race.)
+	 */
+	if (cmpxchg(p, ACL_NOT_CACHED, sentinel) != ACL_NOT_CACHED)
+		/* fall through */ ;
+
+	/*
+	 * Normally, the ACL returned by ->get_acl will be cached.
+	 * A filesystem can prevent that by calling
+	 * forget_cached_acl(inode, type) in ->get_acl.
 	 *
 	 * If the filesystem doesn't have a get_acl() function at all, we'll
 	 * just create the negative cache entry.
@@ -119,7 +136,24 @@
 		set_cached_acl(inode, type, NULL);
 		return NULL;
 	}
-	return inode->i_op->get_acl(inode, type);
+	acl = inode->i_op->get_acl(inode, type);
+
+	if (IS_ERR(acl)) {
+		/*
+		 * Remove our sentinel so that we don't block future attempts
+		 * to cache the ACL.
+		 */
+		cmpxchg(p, sentinel, ACL_NOT_CACHED);
+		return acl;
+	}
+
+	/*
+	 * Cache the result, but only if our sentinel is still in place.
+	 */
+	posix_acl_dup(acl);
+	if (unlikely(cmpxchg(p, sentinel, acl) != sentinel))
+		posix_acl_release(acl);
+	return acl;
 }
 EXPORT_SYMBOL(get_acl);
 
@@ -763,18 +797,18 @@
 
 static int
 posix_acl_xattr_get(const struct xattr_handler *handler,
-		    struct dentry *dentry, const char *name,
-		    void *value, size_t size)
+		    struct dentry *unused, struct inode *inode,
+		    const char *name, void *value, size_t size)
 {
 	struct posix_acl *acl;
 	int error;
 
-	if (!IS_POSIXACL(d_backing_inode(dentry)))
+	if (!IS_POSIXACL(inode))
 		return -EOPNOTSUPP;
-	if (d_is_symlink(dentry))
+	if (S_ISLNK(inode->i_mode))
 		return -EOPNOTSUPP;
 
-	acl = get_acl(d_backing_inode(dentry), handler->flags);
+	acl = get_acl(inode, handler->flags);
 	if (IS_ERR(acl))
 		return PTR_ERR(acl);
 	if (acl == NULL)
diff --git a/fs/proc/base.c b/fs/proc/base.c
index b1755b2..ff4527d 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -434,7 +434,7 @@
 			&& !lookup_symbol_name(wchan, symname))
 		seq_printf(m, "%s", symname);
 	else
-		seq_puts(m, "0\n");
+		seq_putc(m, '0');
 
 	return 0;
 }
@@ -955,7 +955,8 @@
 	struct mm_struct *mm = file->private_data;
 	unsigned long env_start, env_end;
 
-	if (!mm)
+	/* Ensure the process spawned far enough to have an environment. */
+	if (!mm || !mm->env_end)
 		return 0;
 
 	page = (char *)__get_free_page(GFP_TEMPORARY);
@@ -1819,12 +1820,17 @@
 
 	child = d_hash_and_lookup(dir, &qname);
 	if (!child) {
-		child = d_alloc(dir, &qname);
-		if (!child)
+		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+		child = d_alloc_parallel(dir, &qname, &wq);
+		if (IS_ERR(child))
 			goto end_instantiate;
-		if (instantiate(d_inode(dir), child, task, ptr) < 0) {
-			dput(child);
-			goto end_instantiate;
+		if (d_in_lookup(child)) {
+			int err = instantiate(d_inode(dir), child, task, ptr);
+			d_lookup_done(child);
+			if (err < 0) {
+				dput(child);
+				goto end_instantiate;
+			}
 		}
 	}
 	inode = d_inode(child);
@@ -2154,8 +2160,8 @@
 
 static const struct file_operations proc_map_files_operations = {
 	.read		= generic_read_dir,
-	.iterate	= proc_map_files_readdir,
-	.llseek		= default_llseek,
+	.iterate_shared	= proc_map_files_readdir,
+	.llseek		= generic_file_llseek,
 };
 
 #ifdef CONFIG_CHECKPOINT_RESTORE
@@ -2502,8 +2508,8 @@
 
 static const struct file_operations proc_attr_dir_operations = {
 	.read		= generic_read_dir,
-	.iterate	= proc_attr_dir_readdir,
-	.llseek		= default_llseek,
+	.iterate_shared	= proc_attr_dir_readdir,
+	.llseek		= generic_file_llseek,
 };
 
 static struct dentry *proc_attr_dir_lookup(struct inode *dir,
@@ -2910,8 +2916,8 @@
 
 static const struct file_operations proc_tgid_base_operations = {
 	.read		= generic_read_dir,
-	.iterate	= proc_tgid_base_readdir,
-	.llseek		= default_llseek,
+	.iterate_shared	= proc_tgid_base_readdir,
+	.llseek		= generic_file_llseek,
 };
 
 static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
@@ -3258,8 +3264,8 @@
 
 static const struct file_operations proc_tid_base_operations = {
 	.read		= generic_read_dir,
-	.iterate	= proc_tid_base_readdir,
-	.llseek		= default_llseek,
+	.iterate_shared	= proc_tid_base_readdir,
+	.llseek		= generic_file_llseek,
 };
 
 static const struct inode_operations proc_tid_base_inode_operations = {
@@ -3469,6 +3475,6 @@
 
 static const struct file_operations proc_task_operations = {
 	.read		= generic_read_dir,
-	.iterate	= proc_task_readdir,
-	.llseek		= default_llseek,
+	.iterate_shared	= proc_task_readdir,
+	.llseek		= generic_file_llseek,
 };
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index 56afa5e..01df23c 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -276,8 +276,8 @@
 
 const struct file_operations proc_fd_operations = {
 	.read		= generic_read_dir,
-	.iterate	= proc_readfd,
-	.llseek		= default_llseek,
+	.iterate_shared	= proc_readfd,
+	.llseek		= generic_file_llseek,
 };
 
 static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
@@ -361,6 +361,6 @@
 
 const struct file_operations proc_fdinfo_operations = {
 	.read		= generic_read_dir,
-	.iterate	= proc_readfdinfo,
-	.llseek		= default_llseek,
+	.iterate_shared	= proc_readfdinfo,
+	.llseek		= generic_file_llseek,
 };
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index ff3ffc7..c633476 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -318,7 +318,7 @@
 static const struct file_operations proc_dir_operations = {
 	.llseek			= generic_file_llseek,
 	.read			= generic_read_dir,
-	.iterate		= proc_readdir,
+	.iterate_shared		= proc_readdir,
 };
 
 /*
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
index 72cb26f..51b8b0a 100644
--- a/fs/proc/namespaces.c
+++ b/fs/proc/namespaces.c
@@ -139,7 +139,8 @@
 
 const struct file_operations proc_ns_dir_operations = {
 	.read		= generic_read_dir,
-	.iterate	= proc_ns_dir_readdir,
+	.iterate_shared	= proc_ns_dir_readdir,
+	.llseek		= generic_file_llseek,
 };
 
 static struct dentry *proc_ns_dir_lookup(struct inode *dir,
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index 350984a..c8bbc68 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -179,7 +179,7 @@
 const struct file_operations proc_net_operations = {
 	.llseek		= generic_file_llseek,
 	.read		= generic_read_dir,
-	.iterate	= proc_tgid_net_readdir,
+	.iterate_shared	= proc_tgid_net_readdir,
 };
 
 static __net_init int proc_net_ns_init(struct net *net)
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index fe5b6e6..5e57c3e 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -627,18 +627,19 @@
 
 	child = d_lookup(dir, &qname);
 	if (!child) {
-		child = d_alloc(dir, &qname);
-		if (child) {
+		DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+		child = d_alloc_parallel(dir, &qname, &wq);
+		if (IS_ERR(child))
+			return false;
+		if (d_in_lookup(child)) {
 			inode = proc_sys_make_inode(dir->d_sb, head, table);
 			if (!inode) {
+				d_lookup_done(child);
 				dput(child);
 				return false;
-			} else {
-				d_set_d_op(child, &proc_sys_dentry_operations);
-				d_add(child, inode);
 			}
-		} else {
-			return false;
+			d_set_d_op(child, &proc_sys_dentry_operations);
+			d_add(child, inode);
 		}
 	}
 	inode = d_inode(child);
@@ -789,7 +790,7 @@
 
 static const struct file_operations proc_sys_dir_file_operations = {
 	.read		= generic_read_dir,
-	.iterate	= proc_sys_readdir,
+	.iterate_shared	= proc_sys_readdir,
 	.llseek		= generic_file_llseek,
 };
 
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 361ab4e..55bc7d6 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -226,8 +226,8 @@
  */
 static const struct file_operations proc_root_operations = {
 	.read		 = generic_read_dir,
-	.iterate	 = proc_root_readdir,
-	.llseek		= default_llseek,
+	.iterate_shared	 = proc_root_readdir,
+	.llseek		= generic_file_llseek,
 };
 
 /*
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 9df4316..5415835 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -553,7 +553,7 @@
 		if (radix_tree_exceptional_entry(page))
 			mss->swap += PAGE_SIZE;
 		else
-			page_cache_release(page);
+			put_page(page);
 
 		return;
 	}
@@ -1518,6 +1518,32 @@
 	return page;
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
+					      struct vm_area_struct *vma,
+					      unsigned long addr)
+{
+	struct page *page;
+	int nid;
+
+	if (!pmd_present(pmd))
+		return NULL;
+
+	page = vm_normal_page_pmd(vma, addr, pmd);
+	if (!page)
+		return NULL;
+
+	if (PageReserved(page))
+		return NULL;
+
+	nid = page_to_nid(page);
+	if (!node_isset(nid, node_states[N_MEMORY]))
+		return NULL;
+
+	return page;
+}
+#endif
+
 static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
 		unsigned long end, struct mm_walk *walk)
 {
@@ -1527,14 +1553,14 @@
 	pte_t *orig_pte;
 	pte_t *pte;
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 	ptl = pmd_trans_huge_lock(pmd, vma);
 	if (ptl) {
-		pte_t huge_pte = *(pte_t *)pmd;
 		struct page *page;
 
-		page = can_gather_numa_stats(huge_pte, vma, addr);
+		page = can_gather_numa_stats_pmd(*pmd, vma, addr);
 		if (page)
-			gather_stats(page, md, pte_dirty(huge_pte),
+			gather_stats(page, md, pmd_dirty(*pmd),
 				     HPAGE_PMD_SIZE/PAGE_SIZE);
 		spin_unlock(ptl);
 		return 0;
@@ -1542,6 +1568,7 @@
 
 	if (pmd_trans_unstable(pmd))
 		return 0;
+#endif
 	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
 	do {
 		struct page *page = can_gather_numa_stats(*pte, vma, addr);
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 55bb57e..8ab782d 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -279,12 +279,12 @@
 	if (!page)
 		return VM_FAULT_OOM;
 	if (!PageUptodate(page)) {
-		offset = (loff_t) index << PAGE_CACHE_SHIFT;
+		offset = (loff_t) index << PAGE_SHIFT;
 		buf = __va((page_to_pfn(page) << PAGE_SHIFT));
 		rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
 		if (rc < 0) {
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 			return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
 		}
 		SetPageUptodate(page);
@@ -1071,7 +1071,7 @@
 	/* Do some basic Verification. */
 	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
 		(ehdr.e_type != ET_CORE) ||
-		!elf_check_arch(&ehdr) ||
+		!vmcore_elf32_check_arch(&ehdr) ||
 		ehdr.e_ident[EI_CLASS] != ELFCLASS32||
 		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
 		ehdr.e_version != EV_CURRENT ||
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index dc645b6..45d6110 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -420,8 +420,8 @@
 	pstore_sb = sb;
 
 	sb->s_maxbytes		= MAX_LFS_FILESIZE;
-	sb->s_blocksize		= PAGE_CACHE_SIZE;
-	sb->s_blocksize_bits	= PAGE_CACHE_SHIFT;
+	sb->s_blocksize		= PAGE_SIZE;
+	sb->s_blocksize_bits	= PAGE_SHIFT;
 	sb->s_magic		= PSTOREFS_MAGIC;
 	sb->s_op		= &pstore_ops;
 	sb->s_time_gran		= 1;
diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c
index b218f96..781056a 100644
--- a/fs/qnx4/dir.c
+++ b/fs/qnx4/dir.c
@@ -71,7 +71,7 @@
 {
 	.llseek		= generic_file_llseek,
 	.read		= generic_read_dir,
-	.iterate	= qnx4_readdir,
+	.iterate_shared	= qnx4_readdir,
 	.fsync		= generic_file_fsync,
 };
 
diff --git a/fs/qnx6/dir.c b/fs/qnx6/dir.c
index e1f3727..27637e0 100644
--- a/fs/qnx6/dir.c
+++ b/fs/qnx6/dir.c
@@ -35,9 +35,9 @@
 static unsigned last_entry(struct inode *inode, unsigned long page_nr)
 {
 	unsigned long last_byte = inode->i_size;
-	last_byte -= page_nr << PAGE_CACHE_SHIFT;
-	if (last_byte > PAGE_CACHE_SIZE)
-		last_byte = PAGE_CACHE_SIZE;
+	last_byte -= page_nr << PAGE_SHIFT;
+	if (last_byte > PAGE_SIZE)
+		last_byte = PAGE_SIZE;
 	return last_byte / QNX6_DIR_ENTRY_SIZE;
 }
 
@@ -47,9 +47,9 @@
 {
 	struct qnx6_sb_info *sbi = QNX6_SB(sb);
 	u32 s = fs32_to_cpu(sbi, de->de_long_inode); /* in block units */
-	u32 n = s >> (PAGE_CACHE_SHIFT - sb->s_blocksize_bits); /* in pages */
+	u32 n = s >> (PAGE_SHIFT - sb->s_blocksize_bits); /* in pages */
 	/* within page */
-	u32 offs = (s << sb->s_blocksize_bits) & ~PAGE_CACHE_MASK;
+	u32 offs = (s << sb->s_blocksize_bits) & ~PAGE_MASK;
 	struct address_space *mapping = sbi->longfile->i_mapping;
 	struct page *page = read_mapping_page(mapping, n, NULL);
 	if (IS_ERR(page))
@@ -115,8 +115,8 @@
 	struct qnx6_sb_info *sbi = QNX6_SB(s);
 	loff_t pos = ctx->pos & ~(QNX6_DIR_ENTRY_SIZE - 1);
 	unsigned long npages = dir_pages(inode);
-	unsigned long n = pos >> PAGE_CACHE_SHIFT;
-	unsigned start = (pos & ~PAGE_CACHE_MASK) / QNX6_DIR_ENTRY_SIZE;
+	unsigned long n = pos >> PAGE_SHIFT;
+	unsigned start = (pos & ~PAGE_MASK) / QNX6_DIR_ENTRY_SIZE;
 	bool done = false;
 
 	ctx->pos = pos;
@@ -131,7 +131,7 @@
 
 		if (IS_ERR(page)) {
 			pr_err("%s(): read failed\n", __func__);
-			ctx->pos = (n + 1) << PAGE_CACHE_SHIFT;
+			ctx->pos = (n + 1) << PAGE_SHIFT;
 			return PTR_ERR(page);
 		}
 		de = ((struct qnx6_dir_entry *)page_address(page)) + start;
@@ -272,7 +272,7 @@
 const struct file_operations qnx6_dir_operations = {
 	.llseek		= generic_file_llseek,
 	.read		= generic_read_dir,
-	.iterate	= qnx6_readdir,
+	.iterate_shared	= qnx6_readdir,
 	.fsync		= generic_file_fsync,
 };
 
diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c
index 47bb1de..1192422 100644
--- a/fs/qnx6/inode.c
+++ b/fs/qnx6/inode.c
@@ -542,8 +542,8 @@
 		iget_failed(inode);
 		return ERR_PTR(-EIO);
 	}
-	n = (ino - 1) >> (PAGE_CACHE_SHIFT - QNX6_INODE_SIZE_BITS);
-	offs = (ino - 1) & (~PAGE_CACHE_MASK >> QNX6_INODE_SIZE_BITS);
+	n = (ino - 1) >> (PAGE_SHIFT - QNX6_INODE_SIZE_BITS);
+	offs = (ino - 1) & (~PAGE_MASK >> QNX6_INODE_SIZE_BITS);
 	mapping = sbi->inodes->i_mapping;
 	page = read_mapping_page(mapping, n, NULL);
 	if (IS_ERR(page)) {
diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
index d3fb2b6..f23b5c4 100644
--- a/fs/qnx6/qnx6.h
+++ b/fs/qnx6/qnx6.h
@@ -128,7 +128,7 @@
 static inline void qnx6_put_page(struct page *page)
 {
 	kunmap(page);
-	page_cache_release(page);
+	put_page(page);
 }
 
 extern unsigned qnx6_find_entry(int len, struct inode *dir, const char *name,
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index ba827da..ff21980 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2047,11 +2047,20 @@
 	struct quota_info *dqopt = sb_dqopt(sb);
 	int err;
 
-	if (!dqopt->ops[qid->type]->get_next_id)
-		return -ENOSYS;
+	mutex_lock(&dqopt->dqonoff_mutex);
+	if (!sb_has_quota_active(sb, qid->type)) {
+		err = -ESRCH;
+		goto out;
+	}
+	if (!dqopt->ops[qid->type]->get_next_id) {
+		err = -ENOSYS;
+		goto out;
+	}
 	mutex_lock(&dqopt->dqio_mutex);
 	err = dqopt->ops[qid->type]->get_next_id(sb, qid);
 	mutex_unlock(&dqopt->dqio_mutex);
+out:
+	mutex_unlock(&dqopt->dqonoff_mutex);
 
 	return err;
 }
diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
index d07a2f9..8b25267 100644
--- a/fs/quota/netlink.c
+++ b/fs/quota/netlink.c
@@ -47,7 +47,7 @@
 	void *msg_head;
 	int ret;
 	int msg_size = 4 * nla_total_size(sizeof(u32)) +
-		       2 * nla_total_size(sizeof(u64));
+		       2 * nla_total_size_64bit(sizeof(u64));
 
 	/* We have to allocate using GFP_NOFS as we are called from a
 	 * filesystem performing write and thus further recursion into
@@ -68,8 +68,9 @@
 	ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, qid.type);
 	if (ret)
 		goto attr_err_out;
-	ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID,
-			  from_kqid_munged(&init_user_ns, qid));
+	ret = nla_put_u64_64bit(skb, QUOTA_NL_A_EXCESS_ID,
+				from_kqid_munged(&init_user_ns, qid),
+				QUOTA_NL_A_PAD);
 	if (ret)
 		goto attr_err_out;
 	ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
@@ -81,8 +82,9 @@
 	ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev));
 	if (ret)
 		goto attr_err_out;
-	ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID,
-			  from_kuid_munged(&init_user_ns, current_uid()));
+	ret = nla_put_u64_64bit(skb, QUOTA_NL_A_CAUSED_ID,
+				from_kuid_munged(&init_user_ns, current_uid()),
+				QUOTA_NL_A_PAD);
 	if (ret)
 		goto attr_err_out;
 	genlmsg_end(skb, msg_head);
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index 38981b0..1ab6e6c 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -223,8 +223,8 @@
 		return err;
 
 	sb->s_maxbytes		= MAX_LFS_FILESIZE;
-	sb->s_blocksize		= PAGE_CACHE_SIZE;
-	sb->s_blocksize_bits	= PAGE_CACHE_SHIFT;
+	sb->s_blocksize		= PAGE_SIZE;
+	sb->s_blocksize_bits	= PAGE_SHIFT;
 	sb->s_magic		= RAMFS_MAGIC;
 	sb->s_op		= &ramfs_ops;
 	sb->s_time_gran		= 1;
diff --git a/fs/read_write.c b/fs/read_write.c
index cf377cf..933b53a 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -302,18 +302,6 @@
 }
 EXPORT_SYMBOL(vfs_llseek);
 
-static inline struct fd fdget_pos(int fd)
-{
-	return __to_fd(__fdget_pos(fd));
-}
-
-static inline void fdput_pos(struct fd f)
-{
-	if (f.flags & FDPUT_POS_UNLOCK)
-		mutex_unlock(&f.file->f_pos_lock);
-	fdput(f);
-}
-
 SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence)
 {
 	off_t retval;
@@ -410,11 +398,6 @@
 }
 EXPORT_SYMBOL(vfs_iter_write);
 
-/*
- * rw_verify_area doesn't like huge counts. We limit
- * them to something that fits in "int" so that others
- * won't have to do range checks all the time.
- */
 int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t count)
 {
 	struct inode *inode;
@@ -441,11 +424,8 @@
 		if (retval < 0)
 			return retval;
 	}
-	retval = security_file_permission(file,
+	return security_file_permission(file,
 				read_write == READ ? MAY_READ : MAY_WRITE);
-	if (retval)
-		return retval;
-	return count > MAX_RW_COUNT ? MAX_RW_COUNT : count;
 }
 
 static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
@@ -489,8 +469,9 @@
 		return -EFAULT;
 
 	ret = rw_verify_area(READ, file, pos, count);
-	if (ret >= 0) {
-		count = ret;
+	if (!ret) {
+		if (count > MAX_RW_COUNT)
+			count =  MAX_RW_COUNT;
 		ret = __vfs_read(file, buf, count, pos);
 		if (ret > 0) {
 			fsnotify_access(file);
@@ -572,8 +553,9 @@
 		return -EFAULT;
 
 	ret = rw_verify_area(WRITE, file, pos, count);
-	if (ret >= 0) {
-		count = ret;
+	if (!ret) {
+		if (count > MAX_RW_COUNT)
+			count =  MAX_RW_COUNT;
 		file_start_write(file);
 		ret = __vfs_write(file, buf, count, pos);
 		if (ret > 0) {
@@ -698,12 +680,16 @@
 	struct kiocb kiocb;
 	ssize_t ret;
 
-	if (flags & ~RWF_HIPRI)
+	if (flags & ~(RWF_HIPRI | RWF_DSYNC | RWF_SYNC))
 		return -EOPNOTSUPP;
 
 	init_sync_kiocb(&kiocb, filp);
 	if (flags & RWF_HIPRI)
 		kiocb.ki_flags |= IOCB_HIPRI;
+	if (flags & RWF_DSYNC)
+		kiocb.ki_flags |= IOCB_DSYNC;
+	if (flags & RWF_SYNC)
+		kiocb.ki_flags |= (IOCB_DSYNC | IOCB_SYNC);
 	kiocb.ki_pos = *ppos;
 
 	ret = fn(&kiocb, iter);
@@ -1323,7 +1309,8 @@
 	retval = rw_verify_area(READ, in.file, &pos, count);
 	if (retval < 0)
 		goto fput_in;
-	count = retval;
+	if (count > MAX_RW_COUNT)
+		count =  MAX_RW_COUNT;
 
 	/*
 	 * Get output file, and verify that it is ok..
@@ -1341,7 +1328,6 @@
 	retval = rw_verify_area(WRITE, out.file, &out_pos, count);
 	if (retval < 0)
 		goto fput_out;
-	count = retval;
 
 	if (!max)
 		max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes);
@@ -1485,11 +1471,12 @@
 	if (flags != 0)
 		return -EINVAL;
 
-	/* copy_file_range allows full ssize_t len, ignoring MAX_RW_COUNT  */
 	ret = rw_verify_area(READ, file_in, &pos_in, len);
-	if (ret >= 0)
-		ret = rw_verify_area(WRITE, file_out, &pos_out, len);
-	if (ret < 0)
+	if (unlikely(ret))
+		return ret;
+
+	ret = rw_verify_area(WRITE, file_out, &pos_out, len);
+	if (unlikely(ret))
 		return ret;
 
 	if (!(file_in->f_mode & FMODE_READ) ||
diff --git a/fs/readdir.c b/fs/readdir.c
index e69ef3b..a86c6c0 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -24,27 +24,40 @@
 int iterate_dir(struct file *file, struct dir_context *ctx)
 {
 	struct inode *inode = file_inode(file);
+	bool shared = false;
 	int res = -ENOTDIR;
-	if (!file->f_op->iterate)
+	if (file->f_op->iterate_shared)
+		shared = true;
+	else if (!file->f_op->iterate)
 		goto out;
 
 	res = security_file_permission(file, MAY_READ);
 	if (res)
 		goto out;
 
-	res = mutex_lock_killable(&inode->i_mutex);
-	if (res)
-		goto out;
+	if (shared)
+		inode_lock_shared(inode);
+	else
+		inode_lock(inode);
+	// res = mutex_lock_killable(&inode->i_mutex);
+	// if (res)
+	//	goto out;
 
 	res = -ENOENT;
 	if (!IS_DEADDIR(inode)) {
 		ctx->pos = file->f_pos;
-		res = file->f_op->iterate(file, ctx);
+		if (shared)
+			res = file->f_op->iterate_shared(file, ctx);
+		else
+			res = file->f_op->iterate(file, ctx);
 		file->f_pos = ctx->pos;
 		fsnotify_access(file);
 		file_accessed(file);
 	}
-	inode_unlock(inode);
+	if (shared)
+		inode_unlock_shared(inode);
+	else
+		inode_unlock(inode);
 out:
 	return res;
 }
@@ -111,7 +124,7 @@
 		struct old_linux_dirent __user *, dirent, unsigned int, count)
 {
 	int error;
-	struct fd f = fdget(fd);
+	struct fd f = fdget_pos(fd);
 	struct readdir_callback buf = {
 		.ctx.actor = fillonedir,
 		.dirent = dirent
@@ -124,7 +137,7 @@
 	if (buf.result)
 		error = buf.result;
 
-	fdput(f);
+	fdput_pos(f);
 	return error;
 }
 
@@ -208,7 +221,7 @@
 	if (!access_ok(VERIFY_WRITE, dirent, count))
 		return -EFAULT;
 
-	f = fdget(fd);
+	f = fdget_pos(fd);
 	if (!f.file)
 		return -EBADF;
 
@@ -222,7 +235,7 @@
 		else
 			error = count - buf.count;
 	}
-	fdput(f);
+	fdput_pos(f);
 	return error;
 }
 
@@ -289,7 +302,7 @@
 	if (!access_ok(VERIFY_WRITE, dirent, count))
 		return -EFAULT;
 
-	f = fdget(fd);
+	f = fdget_pos(fd);
 	if (!f.file)
 		return -EBADF;
 
@@ -304,6 +317,6 @@
 		else
 			error = count - buf.count;
 	}
-	fdput(f);
+	fdput_pos(f);
 	return error;
 }
diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
index 3abd400..45aa05e 100644
--- a/fs/reiserfs/dir.c
+++ b/fs/reiserfs/dir.c
@@ -20,7 +20,7 @@
 const struct file_operations reiserfs_dir_operations = {
 	.llseek = generic_file_llseek,
 	.read = generic_read_dir,
-	.iterate = reiserfs_readdir,
+	.iterate_shared = reiserfs_readdir,
 	.fsync = reiserfs_dir_fsync,
 	.unlocked_ioctl = reiserfs_ioctl,
 #ifdef CONFIG_COMPAT
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 9424a4b..90f815b 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -180,11 +180,11 @@
 	int partial = 0;
 	unsigned blocksize;
 	struct buffer_head *bh, *head;
-	unsigned long i_size_index = inode->i_size >> PAGE_CACHE_SHIFT;
+	unsigned long i_size_index = inode->i_size >> PAGE_SHIFT;
 	int new;
 	int logit = reiserfs_file_data_log(inode);
 	struct super_block *s = inode->i_sb;
-	int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize;
+	int bh_per_page = PAGE_SIZE / s->s_blocksize;
 	struct reiserfs_transaction_handle th;
 	int ret = 0;
 
@@ -260,10 +260,10 @@
 
 const struct inode_operations reiserfs_file_inode_operations = {
 	.setattr = reiserfs_setattr,
-	.setxattr = reiserfs_setxattr,
-	.getxattr = reiserfs_getxattr,
+	.setxattr = generic_setxattr,
+	.getxattr = generic_getxattr,
 	.listxattr = reiserfs_listxattr,
-	.removexattr = reiserfs_removexattr,
+	.removexattr = generic_removexattr,
 	.permission = reiserfs_permission,
 	.get_acl = reiserfs_get_acl,
 	.set_acl = reiserfs_set_acl,
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index ae9e5b3..825455d 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -386,7 +386,7 @@
 		goto finished;
 	}
 	/* read file tail into part of page */
-	offset = (cpu_key_k_offset(&key) - 1) & (PAGE_CACHE_SIZE - 1);
+	offset = (cpu_key_k_offset(&key) - 1) & (PAGE_SIZE - 1);
 	copy_item_head(&tmp_ih, ih);
 
 	/*
@@ -587,10 +587,10 @@
 		return -EIO;
 
 	/* always try to read until the end of the block */
-	tail_start = tail_offset & (PAGE_CACHE_SIZE - 1);
+	tail_start = tail_offset & (PAGE_SIZE - 1);
 	tail_end = (tail_start | (bh_result->b_size - 1)) + 1;
 
-	index = tail_offset >> PAGE_CACHE_SHIFT;
+	index = tail_offset >> PAGE_SHIFT;
 	/*
 	 * hole_page can be zero in case of direct_io, we are sure
 	 * that we cannot get here if we write with O_DIRECT into tail page
@@ -629,7 +629,7 @@
 unlock:
 	if (tail_page != hole_page) {
 		unlock_page(tail_page);
-		page_cache_release(tail_page);
+		put_page(tail_page);
 	}
 out:
 	return retval;
@@ -2189,11 +2189,11 @@
 	 * we want the page with the last byte in the file,
 	 * not the page that will hold the next byte for appending
 	 */
-	unsigned long index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT;
+	unsigned long index = (inode->i_size - 1) >> PAGE_SHIFT;
 	unsigned long pos = 0;
 	unsigned long start = 0;
 	unsigned long blocksize = inode->i_sb->s_blocksize;
-	unsigned long offset = (inode->i_size) & (PAGE_CACHE_SIZE - 1);
+	unsigned long offset = (inode->i_size) & (PAGE_SIZE - 1);
 	struct buffer_head *bh;
 	struct buffer_head *head;
 	struct page *page;
@@ -2251,7 +2251,7 @@
 
 unlock:
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 	return error;
 }
 
@@ -2265,7 +2265,7 @@
 {
 	struct reiserfs_transaction_handle th;
 	/* we want the offset for the first byte after the end of the file */
-	unsigned long offset = inode->i_size & (PAGE_CACHE_SIZE - 1);
+	unsigned long offset = inode->i_size & (PAGE_SIZE - 1);
 	unsigned blocksize = inode->i_sb->s_blocksize;
 	unsigned length;
 	struct page *page = NULL;
@@ -2345,7 +2345,7 @@
 			}
 		}
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 
 	reiserfs_write_unlock(inode->i_sb);
@@ -2354,7 +2354,7 @@
 out:
 	if (page) {
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 
 	reiserfs_write_unlock(inode->i_sb);
@@ -2426,7 +2426,7 @@
 	} else if (is_direct_le_ih(ih)) {
 		char *p;
 		p = page_address(bh_result->b_page);
-		p += (byte_offset - 1) & (PAGE_CACHE_SIZE - 1);
+		p += (byte_offset - 1) & (PAGE_SIZE - 1);
 		copy_size = ih_item_len(ih) - pos_in_item;
 
 		fs_gen = get_generation(inode->i_sb);
@@ -2525,7 +2525,7 @@
 				    struct writeback_control *wbc)
 {
 	struct inode *inode = page->mapping->host;
-	unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT;
+	unsigned long end_index = inode->i_size >> PAGE_SHIFT;
 	int error = 0;
 	unsigned long block;
 	sector_t last_block;
@@ -2535,7 +2535,7 @@
 	int checked = PageChecked(page);
 	struct reiserfs_transaction_handle th;
 	struct super_block *s = inode->i_sb;
-	int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize;
+	int bh_per_page = PAGE_SIZE / s->s_blocksize;
 	th.t_trans_id = 0;
 
 	/* no logging allowed when nonblocking or from PF_MEMALLOC */
@@ -2564,16 +2564,16 @@
 	if (page->index >= end_index) {
 		unsigned last_offset;
 
-		last_offset = inode->i_size & (PAGE_CACHE_SIZE - 1);
+		last_offset = inode->i_size & (PAGE_SIZE - 1);
 		/* no file contents in this page */
 		if (page->index >= end_index + 1 || !last_offset) {
 			unlock_page(page);
 			return 0;
 		}
-		zero_user_segment(page, last_offset, PAGE_CACHE_SIZE);
+		zero_user_segment(page, last_offset, PAGE_SIZE);
 	}
 	bh = head;
-	block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits);
+	block = page->index << (PAGE_SHIFT - s->s_blocksize_bits);
 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
 	/* first map all the buffers, logging any direct items we find */
 	do {
@@ -2774,7 +2774,7 @@
 		*fsdata = (void *)(unsigned long)flags;
 	}
 
-	index = pos >> PAGE_CACHE_SHIFT;
+	index = pos >> PAGE_SHIFT;
 	page = grab_cache_page_write_begin(mapping, index, flags);
 	if (!page)
 		return -ENOMEM;
@@ -2822,7 +2822,7 @@
 	}
 	if (ret) {
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		/* Truncate allocated blocks */
 		reiserfs_truncate_failed_write(inode);
 	}
@@ -2909,7 +2909,7 @@
 	else
 		th = NULL;
 
-	start = pos & (PAGE_CACHE_SIZE - 1);
+	start = pos & (PAGE_SIZE - 1);
 	if (unlikely(copied < len)) {
 		if (!PageUptodate(page))
 			copied = 0;
@@ -2974,7 +2974,7 @@
 	if (locked)
 		reiserfs_write_unlock(inode->i_sb);
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 
 	if (pos + len > inode->i_size)
 		reiserfs_truncate_failed_write(inode);
@@ -2996,7 +2996,7 @@
 			  unsigned from, unsigned to)
 {
 	struct inode *inode = page->mapping->host;
-	loff_t pos = ((loff_t) page->index << PAGE_CACHE_SHIFT) + to;
+	loff_t pos = ((loff_t) page->index << PAGE_SHIFT) + to;
 	int ret = 0;
 	int update_sd = 0;
 	struct reiserfs_transaction_handle *th = NULL;
@@ -3181,7 +3181,7 @@
 	struct inode *inode = page->mapping->host;
 	unsigned int curr_off = 0;
 	unsigned int stop = offset + length;
-	int partial_page = (offset || length < PAGE_CACHE_SIZE);
+	int partial_page = (offset || length < PAGE_SIZE);
 	int ret = 1;
 
 	BUG_ON(!PageLocked(page));
@@ -3279,15 +3279,14 @@
  * We thank Mingming Cao for helping us understand in great detail what
  * to do in this section of the code.
  */
-static ssize_t reiserfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
-				  loff_t offset)
+static ssize_t reiserfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file->f_mapping->host;
 	size_t count = iov_iter_count(iter);
 	ssize_t ret;
 
-	ret = blockdev_direct_IO(iocb, inode, iter, offset,
+	ret = blockdev_direct_IO(iocb, inode, iter,
 				 reiserfs_get_blocks_direct_io);
 
 	/*
@@ -3296,7 +3295,7 @@
 	 */
 	if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
 		loff_t isize = i_size_read(inode);
-		loff_t end = offset + count;
+		loff_t end = iocb->ki_pos + count;
 
 		if ((end > isize) && inode_newsize_ok(inode, isize) == 0) {
 			truncate_setsize(inode, isize);
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 036a1fc..2f1ddc9 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -187,7 +187,11 @@
 	}
 
 	/* we need to make sure nobody is changing the file size beneath us */
-	reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb);
+{
+	int depth = reiserfs_write_unlock_nested(inode->i_sb);
+	inode_lock(inode);
+	reiserfs_write_lock_nested(inode->i_sb, depth);
+}
 
 	reiserfs_write_lock(inode->i_sb);
 
@@ -203,7 +207,7 @@
 	 * __reiserfs_write_begin on that page.  This will force a
 	 * reiserfs_get_block to unpack the tail for us.
 	 */
-	index = inode->i_size >> PAGE_CACHE_SHIFT;
+	index = inode->i_size >> PAGE_SHIFT;
 	mapping = inode->i_mapping;
 	page = grab_cache_page(mapping, index);
 	retval = -ENOMEM;
@@ -221,7 +225,7 @@
 
 out_unlock:
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 
 out:
 	inode_unlock(inode);
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 44c2bdc..2ace90e 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -599,18 +599,18 @@
  * This does a check to see if the buffer belongs to one of these
  * lost pages before doing the final put_bh.  If page->mapping was
  * null, it tries to free buffers on the page, which should make the
- * final page_cache_release drop the page from the lru.
+ * final put_page drop the page from the lru.
  */
 static void release_buffer_page(struct buffer_head *bh)
 {
 	struct page *page = bh->b_page;
 	if (!page->mapping && trylock_page(page)) {
-		page_cache_get(page);
+		get_page(page);
 		put_bh(bh);
 		if (!page->mapping)
 			try_to_free_buffers(page);
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 	} else {
 		put_bh(bh);
 	}
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 2a12d46..8a36696 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -1650,10 +1650,10 @@
 	.mknod = reiserfs_mknod,
 	.rename = reiserfs_rename,
 	.setattr = reiserfs_setattr,
-	.setxattr = reiserfs_setxattr,
-	.getxattr = reiserfs_getxattr,
+	.setxattr = generic_setxattr,
+	.getxattr = generic_getxattr,
 	.listxattr = reiserfs_listxattr,
-	.removexattr = reiserfs_removexattr,
+	.removexattr = generic_removexattr,
 	.permission = reiserfs_permission,
 	.get_acl = reiserfs_get_acl,
 	.set_acl = reiserfs_set_acl,
@@ -1667,10 +1667,10 @@
 	.readlink = generic_readlink,
 	.get_link	= page_get_link,
 	.setattr = reiserfs_setattr,
-	.setxattr = reiserfs_setxattr,
-	.getxattr = reiserfs_getxattr,
+	.setxattr = generic_setxattr,
+	.getxattr = generic_getxattr,
 	.listxattr = reiserfs_listxattr,
-	.removexattr = reiserfs_removexattr,
+	.removexattr = generic_removexattr,
 	.permission = reiserfs_permission,
 };
 
@@ -1679,10 +1679,10 @@
  */
 const struct inode_operations reiserfs_special_inode_operations = {
 	.setattr = reiserfs_setattr,
-	.setxattr = reiserfs_setxattr,
-	.getxattr = reiserfs_getxattr,
+	.setxattr = generic_setxattr,
+	.getxattr = generic_getxattr,
 	.listxattr = reiserfs_listxattr,
-	.removexattr = reiserfs_removexattr,
+	.removexattr = generic_removexattr,
 	.permission = reiserfs_permission,
 	.get_acl = reiserfs_get_acl,
 	.set_acl = reiserfs_set_acl,
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 24cbe01..5feacd6 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -1342,7 +1342,7 @@
 		 */
 
 		data = kmap_atomic(un_bh->b_page);
-		off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_CACHE_SIZE - 1));
+		off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_SIZE - 1));
 		memcpy(data + off,
 		       ih_item_body(PATH_PLAST_BUFFER(path), &s_ih),
 		       ret_value);
@@ -1511,7 +1511,7 @@
 
 	if (page) {
 		if (page_has_buffers(page)) {
-			tail_index = pos & (PAGE_CACHE_SIZE - 1);
+			tail_index = pos & (PAGE_SIZE - 1);
 			cur_index = 0;
 			head = page_buffers(page);
 			bh = head;
diff --git a/fs/reiserfs/tail_conversion.c b/fs/reiserfs/tail_conversion.c
index f41e19b..2d5489b 100644
--- a/fs/reiserfs/tail_conversion.c
+++ b/fs/reiserfs/tail_conversion.c
@@ -151,7 +151,7 @@
 	 */
 	if (up_to_date_bh) {
 		unsigned pgoff =
-		    (tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1);
+		    (tail_offset + total_tail - 1) & (PAGE_SIZE - 1);
 		char *kaddr = kmap_atomic(up_to_date_bh->b_page);
 		memset(kaddr + pgoff, 0, blk_size - total_tail);
 		kunmap_atomic(kaddr);
@@ -271,7 +271,7 @@
 	 * the page was locked and this part of the page was up to date when
 	 * indirect2direct was called, so we know the bytes are still valid
 	 */
-	tail = tail + (pos & (PAGE_CACHE_SIZE - 1));
+	tail = tail + (pos & (PAGE_SIZE - 1));
 
 	PATH_LAST_POSITION(path)++;
 
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 57e0b23..a33812a 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -415,7 +415,7 @@
 static inline void reiserfs_put_page(struct page *page)
 {
 	kunmap(page);
-	page_cache_release(page);
+	put_page(page);
 }
 
 static struct page *reiserfs_get_page(struct inode *dir, size_t n)
@@ -427,7 +427,7 @@
 	 * and an unlink/rmdir has just occurred - GFP_NOFS avoids this
 	 */
 	mapping_set_gfp_mask(mapping, GFP_NOFS);
-	page = read_mapping_page(mapping, n >> PAGE_CACHE_SHIFT, NULL);
+	page = read_mapping_page(mapping, n >> PAGE_SHIFT, NULL);
 	if (!IS_ERR(page)) {
 		kmap(page);
 		if (PageError(page))
@@ -526,10 +526,10 @@
 	while (buffer_pos < buffer_size || buffer_pos == 0) {
 		size_t chunk;
 		size_t skip = 0;
-		size_t page_offset = (file_pos & (PAGE_CACHE_SIZE - 1));
+		size_t page_offset = (file_pos & (PAGE_SIZE - 1));
 
-		if (buffer_size - buffer_pos > PAGE_CACHE_SIZE)
-			chunk = PAGE_CACHE_SIZE;
+		if (buffer_size - buffer_pos > PAGE_SIZE)
+			chunk = PAGE_SIZE;
 		else
 			chunk = buffer_size - buffer_pos;
 
@@ -546,8 +546,8 @@
 			struct reiserfs_xattr_header *rxh;
 
 			skip = file_pos = sizeof(struct reiserfs_xattr_header);
-			if (chunk + skip > PAGE_CACHE_SIZE)
-				chunk = PAGE_CACHE_SIZE - skip;
+			if (chunk + skip > PAGE_SIZE)
+				chunk = PAGE_SIZE - skip;
 			rxh = (struct reiserfs_xattr_header *)data;
 			rxh->h_magic = cpu_to_le32(REISERFS_XATTR_MAGIC);
 			rxh->h_hash = cpu_to_le32(xahash);
@@ -675,8 +675,8 @@
 		char *data;
 		size_t skip = 0;
 
-		if (isize - file_pos > PAGE_CACHE_SIZE)
-			chunk = PAGE_CACHE_SIZE;
+		if (isize - file_pos > PAGE_SIZE)
+			chunk = PAGE_SIZE;
 		else
 			chunk = isize - file_pos;
 
@@ -764,60 +764,6 @@
 	return xah;
 }
 
-
-/*
- * Inode operation getxattr()
- */
-ssize_t
-reiserfs_getxattr(struct dentry * dentry, const char *name, void *buffer,
-		  size_t size)
-{
-	const struct xattr_handler *handler;
-
-	handler = find_xattr_handler_prefix(dentry->d_sb->s_xattr, name);
-
-	if (!handler || get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1)
-		return -EOPNOTSUPP;
-
-	return handler->get(handler, dentry, name, buffer, size);
-}
-
-/*
- * Inode operation setxattr()
- *
- * d_inode(dentry)->i_mutex down
- */
-int
-reiserfs_setxattr(struct dentry *dentry, const char *name, const void *value,
-		  size_t size, int flags)
-{
-	const struct xattr_handler *handler;
-
-	handler = find_xattr_handler_prefix(dentry->d_sb->s_xattr, name);
-
-	if (!handler || get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1)
-		return -EOPNOTSUPP;
-
-	return handler->set(handler, dentry, name, value, size, flags);
-}
-
-/*
- * Inode operation removexattr()
- *
- * d_inode(dentry)->i_mutex down
- */
-int reiserfs_removexattr(struct dentry *dentry, const char *name)
-{
-	const struct xattr_handler *handler;
-
-	handler = find_xattr_handler_prefix(dentry->d_sb->s_xattr, name);
-
-	if (!handler || get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1)
-		return -EOPNOTSUPP;
-
-	return handler->set(handler, dentry, name, NULL, 0, XATTR_REPLACE);
-}
-
 struct listxattr_buf {
 	struct dir_context ctx;
 	size_t size;
diff --git a/fs/reiserfs/xattr.h b/fs/reiserfs/xattr.h
index 15dde62..613ff5a 100644
--- a/fs/reiserfs/xattr.h
+++ b/fs/reiserfs/xattr.h
@@ -2,6 +2,7 @@
 #include <linux/init.h>
 #include <linux/list.h>
 #include <linux/rwsem.h>
+#include <linux/xattr.h>
 
 struct inode;
 struct dentry;
@@ -18,12 +19,7 @@
 
 #ifdef CONFIG_REISERFS_FS_XATTR
 #define has_xattr_dir(inode) (REISERFS_I(inode)->i_flags & i_has_xattr_dir)
-ssize_t reiserfs_getxattr(struct dentry *dentry, const char *name,
-			  void *buffer, size_t size);
-int reiserfs_setxattr(struct dentry *dentry, const char *name,
-		      const void *value, size_t size, int flags);
 ssize_t reiserfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
-int reiserfs_removexattr(struct dentry *dentry, const char *name);
 
 int reiserfs_xattr_get(struct inode *, const char *, void *, size_t);
 int reiserfs_xattr_set(struct inode *, const char *, const void *, size_t, int);
@@ -92,10 +88,7 @@
 
 #else
 
-#define reiserfs_getxattr NULL
-#define reiserfs_setxattr NULL
 #define reiserfs_listxattr NULL
-#define reiserfs_removexattr NULL
 
 static inline void reiserfs_init_xattr_rwsem(struct inode *inode)
 {
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index 558a16b..dbed42f 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -197,10 +197,8 @@
 
 	size = reiserfs_xattr_get(inode, name, NULL, 0);
 	if (size < 0) {
-		if (size == -ENODATA || size == -ENOSYS) {
-			set_cached_acl(inode, type, NULL);
+		if (size == -ENODATA || size == -ENOSYS)
 			return NULL;
-		}
 		return ERR_PTR(size);
 	}
 
@@ -220,8 +218,6 @@
 	} else {
 		acl = reiserfs_posix_acl_from_disk(value, retval);
 	}
-	if (!IS_ERR(acl))
-		set_cached_acl(inode, type, acl);
 
 	kfree(value);
 	return acl;
@@ -370,7 +366,7 @@
 	if (IS_PRIVATE(inode))
 		return 0;
 
-	acl = reiserfs_get_acl(inode, ACL_TYPE_DEFAULT);
+	acl = get_acl(inode, ACL_TYPE_DEFAULT);
 
 	if (acl && !IS_ERR(acl)) {
 		int size = reiserfs_acl_size(acl->a_count);
diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c
index ab0217d..86aeb9d 100644
--- a/fs/reiserfs/xattr_security.c
+++ b/fs/reiserfs/xattr_security.c
@@ -9,29 +9,26 @@
 #include <linux/uaccess.h>
 
 static int
-security_get(const struct xattr_handler *handler, struct dentry *dentry,
-	     const char *name, void *buffer, size_t size)
+security_get(const struct xattr_handler *handler, struct dentry *unused,
+	     struct inode *inode, const char *name, void *buffer, size_t size)
 {
-	if (strlen(name) < sizeof(XATTR_SECURITY_PREFIX))
-		return -EINVAL;
-
-	if (IS_PRIVATE(d_inode(dentry)))
+	if (IS_PRIVATE(inode))
 		return -EPERM;
 
-	return reiserfs_xattr_get(d_inode(dentry), name, buffer, size);
+	return reiserfs_xattr_get(inode, xattr_full_name(handler, name),
+				  buffer, size);
 }
 
 static int
 security_set(const struct xattr_handler *handler, struct dentry *dentry,
 	     const char *name, const void *buffer, size_t size, int flags)
 {
-	if (strlen(name) < sizeof(XATTR_SECURITY_PREFIX))
-		return -EINVAL;
-
 	if (IS_PRIVATE(d_inode(dentry)))
 		return -EPERM;
 
-	return reiserfs_xattr_set(d_inode(dentry), name, buffer, size, flags);
+	return reiserfs_xattr_set(d_inode(dentry),
+				  xattr_full_name(handler, name),
+				  buffer, size, flags);
 }
 
 static bool security_list(struct dentry *dentry)
diff --git a/fs/reiserfs/xattr_trusted.c b/fs/reiserfs/xattr_trusted.c
index 64b67aa..31837f0 100644
--- a/fs/reiserfs/xattr_trusted.c
+++ b/fs/reiserfs/xattr_trusted.c
@@ -8,29 +8,26 @@
 #include <linux/uaccess.h>
 
 static int
-trusted_get(const struct xattr_handler *handler, struct dentry *dentry,
-	    const char *name, void *buffer, size_t size)
+trusted_get(const struct xattr_handler *handler, struct dentry *unused,
+	    struct inode *inode, const char *name, void *buffer, size_t size)
 {
-	if (strlen(name) < sizeof(XATTR_TRUSTED_PREFIX))
-		return -EINVAL;
-
-	if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(d_inode(dentry)))
+	if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(inode))
 		return -EPERM;
 
-	return reiserfs_xattr_get(d_inode(dentry), name, buffer, size);
+	return reiserfs_xattr_get(inode, xattr_full_name(handler, name),
+				  buffer, size);
 }
 
 static int
 trusted_set(const struct xattr_handler *handler, struct dentry *dentry,
 	    const char *name, const void *buffer, size_t size, int flags)
 {
-	if (strlen(name) < sizeof(XATTR_TRUSTED_PREFIX))
-		return -EINVAL;
-
 	if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(d_inode(dentry)))
 		return -EPERM;
 
-	return reiserfs_xattr_set(d_inode(dentry), name, buffer, size, flags);
+	return reiserfs_xattr_set(d_inode(dentry),
+				  xattr_full_name(handler, name),
+				  buffer, size, flags);
 }
 
 static bool trusted_list(struct dentry *dentry)
diff --git a/fs/reiserfs/xattr_user.c b/fs/reiserfs/xattr_user.c
index 12e6306..f7c3973 100644
--- a/fs/reiserfs/xattr_user.c
+++ b/fs/reiserfs/xattr_user.c
@@ -7,27 +7,24 @@
 #include <linux/uaccess.h>
 
 static int
-user_get(const struct xattr_handler *handler, struct dentry *dentry,
-	 const char *name, void *buffer, size_t size)
+user_get(const struct xattr_handler *handler, struct dentry *unused,
+	 struct inode *inode, const char *name, void *buffer, size_t size)
 {
-
-	if (strlen(name) < sizeof(XATTR_USER_PREFIX))
-		return -EINVAL;
-	if (!reiserfs_xattrs_user(dentry->d_sb))
+	if (!reiserfs_xattrs_user(inode->i_sb))
 		return -EOPNOTSUPP;
-	return reiserfs_xattr_get(d_inode(dentry), name, buffer, size);
+	return reiserfs_xattr_get(inode, xattr_full_name(handler, name),
+				  buffer, size);
 }
 
 static int
 user_set(const struct xattr_handler *handler, struct dentry *dentry,
 	 const char *name, const void *buffer, size_t size, int flags)
 {
-	if (strlen(name) < sizeof(XATTR_USER_PREFIX))
-		return -EINVAL;
-
 	if (!reiserfs_xattrs_user(dentry->d_sb))
 		return -EOPNOTSUPP;
-	return reiserfs_xattr_set(d_inode(dentry), name, buffer, size, flags);
+	return reiserfs_xattr_set(d_inode(dentry),
+				  xattr_full_name(handler, name),
+				  buffer, size, flags);
 }
 
 static bool user_list(struct dentry *dentry)
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index 6b00ca3..d0f8a38 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -280,8 +280,8 @@
 
 static const struct file_operations romfs_dir_operations = {
 	.read		= generic_read_dir,
-	.iterate	= romfs_readdir,
-	.llseek		= default_llseek,
+	.iterate_shared	= romfs_readdir,
+	.llseek		= generic_file_llseek,
 };
 
 static const struct inode_operations romfs_dir_inode_operations = {
diff --git a/fs/seq_file.c b/fs/seq_file.c
index e85664b..19f532e 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -72,9 +72,10 @@
 
 	mutex_init(&p->lock);
 	p->op = op;
-#ifdef CONFIG_USER_NS
-	p->user_ns = file->f_cred->user_ns;
-#endif
+
+	// No refcounting: the lifetime of 'p' is constrained
+	// to the lifetime of the file.
+	p->file = file;
 
 	/*
 	 * Wrappers around seq_open(e.g. swaps_open) need to be
diff --git a/fs/splice.c b/fs/splice.c
index 9947b5c..dd9bf7e 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -88,7 +88,7 @@
 static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe,
 					struct pipe_buffer *buf)
 {
-	page_cache_release(buf->page);
+	put_page(buf->page);
 	buf->flags &= ~PIPE_BUF_FLAG_LRU;
 }
 
@@ -268,7 +268,7 @@
 
 void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
 {
-	page_cache_release(spd->pages[i]);
+	put_page(spd->pages[i]);
 }
 
 /*
@@ -328,9 +328,9 @@
 	if (splice_grow_spd(pipe, &spd))
 		return -ENOMEM;
 
-	index = *ppos >> PAGE_CACHE_SHIFT;
-	loff = *ppos & ~PAGE_CACHE_MASK;
-	req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+	index = *ppos >> PAGE_SHIFT;
+	loff = *ppos & ~PAGE_MASK;
+	req_pages = (len + loff + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	nr_pages = min(req_pages, spd.nr_pages_max);
 
 	/*
@@ -365,7 +365,7 @@
 			error = add_to_page_cache_lru(page, mapping, index,
 				   mapping_gfp_constraint(mapping, GFP_KERNEL));
 			if (unlikely(error)) {
-				page_cache_release(page);
+				put_page(page);
 				if (error == -EEXIST)
 					continue;
 				break;
@@ -385,7 +385,7 @@
 	 * Now loop over the map and see if we need to start IO on any
 	 * pages, fill in the partial map, etc.
 	 */
-	index = *ppos >> PAGE_CACHE_SHIFT;
+	index = *ppos >> PAGE_SHIFT;
 	nr_pages = spd.nr_pages;
 	spd.nr_pages = 0;
 	for (page_nr = 0; page_nr < nr_pages; page_nr++) {
@@ -397,7 +397,7 @@
 		/*
 		 * this_len is the max we'll use from this page
 		 */
-		this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
+		this_len = min_t(unsigned long, len, PAGE_SIZE - loff);
 		page = spd.pages[page_nr];
 
 		if (PageReadahead(page))
@@ -426,7 +426,7 @@
 					error = -ENOMEM;
 					break;
 				}
-				page_cache_release(spd.pages[page_nr]);
+				put_page(spd.pages[page_nr]);
 				spd.pages[page_nr] = page;
 			}
 			/*
@@ -456,7 +456,7 @@
 		 * i_size must be checked after PageUptodate.
 		 */
 		isize = i_size_read(mapping->host);
-		end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
+		end_index = (isize - 1) >> PAGE_SHIFT;
 		if (unlikely(!isize || index > end_index))
 			break;
 
@@ -470,7 +470,7 @@
 			/*
 			 * max good bytes in this page
 			 */
-			plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
+			plen = ((isize - 1) & ~PAGE_MASK) + 1;
 			if (plen <= loff)
 				break;
 
@@ -494,8 +494,8 @@
 	 * we got, 'nr_pages' is how many pages are in the map.
 	 */
 	while (page_nr < nr_pages)
-		page_cache_release(spd.pages[page_nr++]);
-	in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
+		put_page(spd.pages[page_nr++]);
+	in->f_ra.prev_pos = (loff_t)index << PAGE_SHIFT;
 
 	if (spd.nr_pages)
 		error = splice_to_pipe(pipe, &spd);
@@ -636,8 +636,8 @@
 			goto shrink_ret;
 	}
 
-	offset = *ppos & ~PAGE_CACHE_MASK;
-	nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+	offset = *ppos & ~PAGE_MASK;
+	nr_pages = (len + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
 	for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) {
 		struct page *page;
@@ -647,7 +647,7 @@
 		if (!page)
 			goto err;
 
-		this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
+		this_len = min_t(size_t, len, PAGE_SIZE - offset);
 		vec[i].iov_base = (void __user *) page_address(page);
 		vec[i].iov_len = this_len;
 		spd.pages[i] = page;
@@ -1143,6 +1143,9 @@
 	if (unlikely(ret < 0))
 		return ret;
 
+	if (unlikely(len > MAX_RW_COUNT))
+		len = MAX_RW_COUNT;
+
 	if (in->f_op->splice_read)
 		splice_read = in->f_op->splice_read;
 	else
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 0cea9b9..2c26184 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -181,11 +181,11 @@
 			in = min(bytes, msblk->devblksize - offset);
 			bytes -= in;
 			while (in) {
-				if (pg_offset == PAGE_CACHE_SIZE) {
+				if (pg_offset == PAGE_SIZE) {
 					data = squashfs_next_page(output);
 					pg_offset = 0;
 				}
-				avail = min_t(int, in, PAGE_CACHE_SIZE -
+				avail = min_t(int, in, PAGE_SIZE -
 						pg_offset);
 				memcpy(data + pg_offset, bh[k]->b_data + offset,
 						avail);
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index 1cb70a0..23813c0 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -30,7 +30,7 @@
  * access the metadata and fragment caches.
  *
  * To avoid out of memory and fragmentation issues with vmalloc the cache
- * uses sequences of kmalloced PAGE_CACHE_SIZE buffers.
+ * uses sequences of kmalloced PAGE_SIZE buffers.
  *
  * It should be noted that the cache is not used for file datablocks, these
  * are decompressed and cached in the page-cache in the normal way.  The
@@ -231,7 +231,7 @@
 /*
  * Initialise cache allocating the specified number of entries, each of
  * size block_size.  To avoid vmalloc fragmentation issues each entry
- * is allocated as a sequence of kmalloced PAGE_CACHE_SIZE buffers.
+ * is allocated as a sequence of kmalloced PAGE_SIZE buffers.
  */
 struct squashfs_cache *squashfs_cache_init(char *name, int entries,
 	int block_size)
@@ -255,7 +255,7 @@
 	cache->unused = entries;
 	cache->entries = entries;
 	cache->block_size = block_size;
-	cache->pages = block_size >> PAGE_CACHE_SHIFT;
+	cache->pages = block_size >> PAGE_SHIFT;
 	cache->pages = cache->pages ? cache->pages : 1;
 	cache->name = name;
 	cache->num_waiters = 0;
@@ -275,7 +275,7 @@
 		}
 
 		for (j = 0; j < cache->pages; j++) {
-			entry->data[j] = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+			entry->data[j] = kmalloc(PAGE_SIZE, GFP_KERNEL);
 			if (entry->data[j] == NULL) {
 				ERROR("Failed to allocate %s buffer\n", name);
 				goto cleanup;
@@ -314,10 +314,10 @@
 		return min(length, entry->length - offset);
 
 	while (offset < entry->length) {
-		void *buff = entry->data[offset / PAGE_CACHE_SIZE]
-				+ (offset % PAGE_CACHE_SIZE);
+		void *buff = entry->data[offset / PAGE_SIZE]
+				+ (offset % PAGE_SIZE);
 		int bytes = min_t(int, entry->length - offset,
-				PAGE_CACHE_SIZE - (offset % PAGE_CACHE_SIZE));
+				PAGE_SIZE - (offset % PAGE_SIZE));
 
 		if (bytes >= remaining) {
 			memcpy(buffer, buff, remaining);
@@ -415,7 +415,7 @@
  */
 void *squashfs_read_table(struct super_block *sb, u64 block, int length)
 {
-	int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+	int pages = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	int i, res;
 	void *table, *buffer, **data;
 	struct squashfs_page_actor *actor;
@@ -436,7 +436,7 @@
 		goto failed2;
 	}
 
-	for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE)
+	for (i = 0; i < pages; i++, buffer += PAGE_SIZE)
 		data[i] = buffer;
 
 	res = squashfs_read_data(sb, block, length |
diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c
index e9034bf..d2bc136 100644
--- a/fs/squashfs/decompressor.c
+++ b/fs/squashfs/decompressor.c
@@ -102,7 +102,7 @@
 	 * Read decompressor specific options from file system if present
 	 */
 	if (SQUASHFS_COMP_OPTS(flags)) {
-		buffer = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+		buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
 		if (buffer == NULL) {
 			comp_opts = ERR_PTR(-ENOMEM);
 			goto out;
diff --git a/fs/squashfs/dir.c b/fs/squashfs/dir.c
index d8c2d74..a5845f9 100644
--- a/fs/squashfs/dir.c
+++ b/fs/squashfs/dir.c
@@ -231,6 +231,6 @@
 
 const struct file_operations squashfs_dir_ops = {
 	.read = generic_read_dir,
-	.iterate = squashfs_readdir,
-	.llseek = default_llseek,
+	.iterate_shared = squashfs_readdir,
+	.llseek = generic_file_llseek,
 };
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index e5c9689..13d8094 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -175,7 +175,7 @@
 {
 	int err, i;
 	long long block = 0;
-	__le32 *blist = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+	__le32 *blist = kmalloc(PAGE_SIZE, GFP_KERNEL);
 
 	if (blist == NULL) {
 		ERROR("read_indexes: Failed to allocate block_list\n");
@@ -183,7 +183,7 @@
 	}
 
 	while (n) {
-		int blocks = min_t(int, n, PAGE_CACHE_SIZE >> 2);
+		int blocks = min_t(int, n, PAGE_SIZE >> 2);
 
 		err = squashfs_read_metadata(sb, blist, start_block,
 				offset, blocks << 2);
@@ -377,19 +377,19 @@
 	struct inode *inode = page->mapping->host;
 	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
 	void *pageaddr;
-	int i, mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1;
+	int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
 	int start_index = page->index & ~mask, end_index = start_index | mask;
 
 	/*
 	 * Loop copying datablock into pages.  As the datablock likely covers
-	 * many PAGE_CACHE_SIZE pages (default block size is 128 KiB) explicitly
+	 * many PAGE_SIZE pages (default block size is 128 KiB) explicitly
 	 * grab the pages from the page cache, except for the page that we've
 	 * been called to fill.
 	 */
 	for (i = start_index; i <= end_index && bytes > 0; i++,
-			bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) {
+			bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
 		struct page *push_page;
-		int avail = buffer ? min_t(int, bytes, PAGE_CACHE_SIZE) : 0;
+		int avail = buffer ? min_t(int, bytes, PAGE_SIZE) : 0;
 
 		TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail);
 
@@ -404,14 +404,14 @@
 
 		pageaddr = kmap_atomic(push_page);
 		squashfs_copy_data(pageaddr, buffer, offset, avail);
-		memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail);
+		memset(pageaddr + avail, 0, PAGE_SIZE - avail);
 		kunmap_atomic(pageaddr);
 		flush_dcache_page(push_page);
 		SetPageUptodate(push_page);
 skip_page:
 		unlock_page(push_page);
 		if (i != page->index)
-			page_cache_release(push_page);
+			put_page(push_page);
 	}
 }
 
@@ -454,7 +454,7 @@
 {
 	struct inode *inode = page->mapping->host;
 	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
-	int index = page->index >> (msblk->block_log - PAGE_CACHE_SHIFT);
+	int index = page->index >> (msblk->block_log - PAGE_SHIFT);
 	int file_end = i_size_read(inode) >> msblk->block_log;
 	int res;
 	void *pageaddr;
@@ -462,8 +462,8 @@
 	TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
 				page->index, squashfs_i(inode)->start);
 
-	if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
-					PAGE_CACHE_SHIFT))
+	if (page->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >>
+					PAGE_SHIFT))
 		goto out;
 
 	if (index < file_end || squashfs_i(inode)->fragment_block ==
@@ -487,7 +487,7 @@
 	SetPageError(page);
 out:
 	pageaddr = kmap_atomic(page);
-	memset(pageaddr, 0, PAGE_CACHE_SIZE);
+	memset(pageaddr, 0, PAGE_SIZE);
 	kunmap_atomic(pageaddr);
 	flush_dcache_page(page);
 	if (!PageError(page))
diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c
index 43e7a7e..cb485d8 100644
--- a/fs/squashfs/file_direct.c
+++ b/fs/squashfs/file_direct.c
@@ -30,8 +30,8 @@
 	struct inode *inode = target_page->mapping->host;
 	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
 
-	int file_end = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
-	int mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1;
+	int file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
+	int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
 	int start_index = target_page->index & ~mask;
 	int end_index = start_index | mask;
 	int i, n, pages, missing_pages, bytes, res = -ENOMEM;
@@ -68,7 +68,7 @@
 
 		if (PageUptodate(page[i])) {
 			unlock_page(page[i]);
-			page_cache_release(page[i]);
+			put_page(page[i]);
 			page[i] = NULL;
 			missing_pages++;
 		}
@@ -96,10 +96,10 @@
 		goto mark_errored;
 
 	/* Last page may have trailing bytes not filled */
-	bytes = res % PAGE_CACHE_SIZE;
+	bytes = res % PAGE_SIZE;
 	if (bytes) {
 		pageaddr = kmap_atomic(page[pages - 1]);
-		memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
+		memset(pageaddr + bytes, 0, PAGE_SIZE - bytes);
 		kunmap_atomic(pageaddr);
 	}
 
@@ -109,7 +109,7 @@
 		SetPageUptodate(page[i]);
 		unlock_page(page[i]);
 		if (page[i] != target_page)
-			page_cache_release(page[i]);
+			put_page(page[i]);
 	}
 
 	kfree(actor);
@@ -127,7 +127,7 @@
 		flush_dcache_page(page[i]);
 		SetPageError(page[i]);
 		unlock_page(page[i]);
-		page_cache_release(page[i]);
+		put_page(page[i]);
 	}
 
 out:
@@ -153,21 +153,21 @@
 	}
 
 	for (n = 0; n < pages && bytes > 0; n++,
-			bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) {
-		int avail = min_t(int, bytes, PAGE_CACHE_SIZE);
+			bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
+		int avail = min_t(int, bytes, PAGE_SIZE);
 
 		if (page[n] == NULL)
 			continue;
 
 		pageaddr = kmap_atomic(page[n]);
 		squashfs_copy_data(pageaddr, buffer, offset, avail);
-		memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail);
+		memset(pageaddr + avail, 0, PAGE_SIZE - avail);
 		kunmap_atomic(pageaddr);
 		flush_dcache_page(page[n]);
 		SetPageUptodate(page[n]);
 		unlock_page(page[n]);
 		if (page[n] != target_page)
-			page_cache_release(page[n]);
+			put_page(page[n]);
 	}
 
 out:
diff --git a/fs/squashfs/lz4_wrapper.c b/fs/squashfs/lz4_wrapper.c
index c31e2bc..ff4468b 100644
--- a/fs/squashfs/lz4_wrapper.c
+++ b/fs/squashfs/lz4_wrapper.c
@@ -117,13 +117,13 @@
 	data = squashfs_first_page(output);
 	buff = stream->output;
 	while (data) {
-		if (bytes <= PAGE_CACHE_SIZE) {
+		if (bytes <= PAGE_SIZE) {
 			memcpy(data, buff, bytes);
 			break;
 		}
-		memcpy(data, buff, PAGE_CACHE_SIZE);
-		buff += PAGE_CACHE_SIZE;
-		bytes -= PAGE_CACHE_SIZE;
+		memcpy(data, buff, PAGE_SIZE);
+		buff += PAGE_SIZE;
+		bytes -= PAGE_SIZE;
 		data = squashfs_next_page(output);
 	}
 	squashfs_finish_page(output);
diff --git a/fs/squashfs/lzo_wrapper.c b/fs/squashfs/lzo_wrapper.c
index 244b9fb..934c17e 100644
--- a/fs/squashfs/lzo_wrapper.c
+++ b/fs/squashfs/lzo_wrapper.c
@@ -102,13 +102,13 @@
 	data = squashfs_first_page(output);
 	buff = stream->output;
 	while (data) {
-		if (bytes <= PAGE_CACHE_SIZE) {
+		if (bytes <= PAGE_SIZE) {
 			memcpy(data, buff, bytes);
 			break;
 		} else {
-			memcpy(data, buff, PAGE_CACHE_SIZE);
-			buff += PAGE_CACHE_SIZE;
-			bytes -= PAGE_CACHE_SIZE;
+			memcpy(data, buff, PAGE_SIZE);
+			buff += PAGE_SIZE;
+			bytes -= PAGE_SIZE;
 			data = squashfs_next_page(output);
 		}
 	}
diff --git a/fs/squashfs/page_actor.c b/fs/squashfs/page_actor.c
index 5a1c11f..9b7b1b6 100644
--- a/fs/squashfs/page_actor.c
+++ b/fs/squashfs/page_actor.c
@@ -48,7 +48,7 @@
 	if (actor == NULL)
 		return NULL;
 
-	actor->length = length ? : pages * PAGE_CACHE_SIZE;
+	actor->length = length ? : pages * PAGE_SIZE;
 	actor->buffer = buffer;
 	actor->pages = pages;
 	actor->next_page = 0;
@@ -88,7 +88,7 @@
 	if (actor == NULL)
 		return NULL;
 
-	actor->length = length ? : pages * PAGE_CACHE_SIZE;
+	actor->length = length ? : pages * PAGE_SIZE;
 	actor->page = page;
 	actor->pages = pages;
 	actor->next_page = 0;
diff --git a/fs/squashfs/page_actor.h b/fs/squashfs/page_actor.h
index 26dd820..98537ea 100644
--- a/fs/squashfs/page_actor.h
+++ b/fs/squashfs/page_actor.h
@@ -24,7 +24,7 @@
 	if (actor == NULL)
 		return NULL;
 
-	actor->length = length ? : pages * PAGE_CACHE_SIZE;
+	actor->length = length ? : pages * PAGE_SIZE;
 	actor->page = page;
 	actor->pages = pages;
 	actor->next_page = 0;
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 5e79bfa..cf01e15 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -152,7 +152,7 @@
 	 * Check the system page size is not larger than the filesystem
 	 * block size (by default 128K).  This is currently not supported.
 	 */
-	if (PAGE_CACHE_SIZE > msblk->block_size) {
+	if (PAGE_SIZE > msblk->block_size) {
 		ERROR("Page size > filesystem block size (%d).  This is "
 			"currently not supported!\n", msblk->block_size);
 		goto failed_mount;
diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c
index dbcc2f5..d688ef4 100644
--- a/fs/squashfs/symlink.c
+++ b/fs/squashfs/symlink.c
@@ -48,10 +48,10 @@
 	struct inode *inode = page->mapping->host;
 	struct super_block *sb = inode->i_sb;
 	struct squashfs_sb_info *msblk = sb->s_fs_info;
-	int index = page->index << PAGE_CACHE_SHIFT;
+	int index = page->index << PAGE_SHIFT;
 	u64 block = squashfs_i(inode)->start;
 	int offset = squashfs_i(inode)->offset;
-	int length = min_t(int, i_size_read(inode) - index, PAGE_CACHE_SIZE);
+	int length = min_t(int, i_size_read(inode) - index, PAGE_SIZE);
 	int bytes, copied;
 	void *pageaddr;
 	struct squashfs_cache_entry *entry;
@@ -94,7 +94,7 @@
 		copied = squashfs_copy_data(pageaddr + bytes, entry, offset,
 								length - bytes);
 		if (copied == length - bytes)
-			memset(pageaddr + length, 0, PAGE_CACHE_SIZE - length);
+			memset(pageaddr + length, 0, PAGE_SIZE - length);
 		else
 			block = entry->next_index;
 		kunmap_atomic(pageaddr);
diff --git a/fs/squashfs/xattr.c b/fs/squashfs/xattr.c
index 1e9de96..1548b37 100644
--- a/fs/squashfs/xattr.c
+++ b/fs/squashfs/xattr.c
@@ -214,10 +214,12 @@
 
 
 static int squashfs_xattr_handler_get(const struct xattr_handler *handler,
-				      struct dentry *d, const char *name,
+				      struct dentry *unused,
+				      struct inode *inode,
+				      const char *name,
 				      void *buffer, size_t size)
 {
-	return squashfs_xattr_get(d_inode(d), handler->flags, name,
+	return squashfs_xattr_get(inode, handler->flags, name,
 		buffer, size);
 }
 
diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c
index c609624..6bfaef7 100644
--- a/fs/squashfs/xz_wrapper.c
+++ b/fs/squashfs/xz_wrapper.c
@@ -141,7 +141,7 @@
 	stream->buf.in_pos = 0;
 	stream->buf.in_size = 0;
 	stream->buf.out_pos = 0;
-	stream->buf.out_size = PAGE_CACHE_SIZE;
+	stream->buf.out_size = PAGE_SIZE;
 	stream->buf.out = squashfs_first_page(output);
 
 	do {
@@ -158,7 +158,7 @@
 			stream->buf.out = squashfs_next_page(output);
 			if (stream->buf.out != NULL) {
 				stream->buf.out_pos = 0;
-				total += PAGE_CACHE_SIZE;
+				total += PAGE_SIZE;
 			}
 		}
 
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c
index 8727cab..2ec24d1 100644
--- a/fs/squashfs/zlib_wrapper.c
+++ b/fs/squashfs/zlib_wrapper.c
@@ -69,7 +69,7 @@
 	int zlib_err, zlib_init = 0, k = 0;
 	z_stream *stream = strm;
 
-	stream->avail_out = PAGE_CACHE_SIZE;
+	stream->avail_out = PAGE_SIZE;
 	stream->next_out = squashfs_first_page(output);
 	stream->avail_in = 0;
 
@@ -85,7 +85,7 @@
 		if (stream->avail_out == 0) {
 			stream->next_out = squashfs_next_page(output);
 			if (stream->next_out != NULL)
-				stream->avail_out = PAGE_CACHE_SIZE;
+				stream->avail_out = PAGE_SIZE;
 		}
 
 		if (!zlib_init) {
diff --git a/fs/super.c b/fs/super.c
index 74914b1..d78b984 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -285,7 +285,7 @@
  *	deactivate_locked_super	-	drop an active reference to superblock
  *	@s: superblock to deactivate
  *
- *	Drops an active reference to superblock, converting it into a temprory
+ *	Drops an active reference to superblock, converting it into a temporary
  *	one if there is no other active references left.  In that case we
  *	tell fs driver to shut it down and drop the temporary reference we
  *	had just acquired.
diff --git a/fs/sync.c b/fs/sync.c
index dd5d171..2a54c1f 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -302,7 +302,7 @@
 		goto out;
 
 	if (sizeof(pgoff_t) == 4) {
-		if (offset >= (0x100000000ULL << PAGE_CACHE_SHIFT)) {
+		if (offset >= (0x100000000ULL << PAGE_SHIFT)) {
 			/*
 			 * The range starts outside a 32 bit machine's
 			 * pagecache addressing capabilities.  Let it "succeed"
@@ -310,7 +310,7 @@
 			ret = 0;
 			goto out;
 		}
-		if (endbyte >= (0x100000000ULL << PAGE_CACHE_SHIFT)) {
+		if (endbyte >= (0x100000000ULL << PAGE_SHIFT)) {
 			/*
 			 * Out to EOF
 			 */
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c
index 63c1bcb..2661b77 100644
--- a/fs/sysv/dir.c
+++ b/fs/sysv/dir.c
@@ -23,14 +23,14 @@
 const struct file_operations sysv_dir_operations = {
 	.llseek		= generic_file_llseek,
 	.read		= generic_read_dir,
-	.iterate	= sysv_readdir,
+	.iterate_shared	= sysv_readdir,
 	.fsync		= generic_file_fsync,
 };
 
 static inline void dir_put_page(struct page *page)
 {
 	kunmap(page);
-	page_cache_release(page);
+	put_page(page);
 }
 
 static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len)
@@ -73,8 +73,8 @@
 	if (pos >= inode->i_size)
 		return 0;
 
-	offset = pos & ~PAGE_CACHE_MASK;
-	n = pos >> PAGE_CACHE_SHIFT;
+	offset = pos & ~PAGE_MASK;
+	n = pos >> PAGE_SHIFT;
 
 	for ( ; n < npages; n++, offset = 0) {
 		char *kaddr, *limit;
@@ -85,7 +85,7 @@
 			continue;
 		kaddr = (char *)page_address(page);
 		de = (struct sysv_dir_entry *)(kaddr+offset);
-		limit = kaddr + PAGE_CACHE_SIZE - SYSV_DIRSIZE;
+		limit = kaddr + PAGE_SIZE - SYSV_DIRSIZE;
 		for ( ;(char*)de <= limit; de++, ctx->pos += sizeof(*de)) {
 			char *name = de->name;
 
@@ -146,7 +146,7 @@
 		if (!IS_ERR(page)) {
 			kaddr = (char*)page_address(page);
 			de = (struct sysv_dir_entry *) kaddr;
-			kaddr += PAGE_CACHE_SIZE - SYSV_DIRSIZE;
+			kaddr += PAGE_SIZE - SYSV_DIRSIZE;
 			for ( ; (char *) de <= kaddr ; de++) {
 				if (!de->inode)
 					continue;
@@ -190,7 +190,7 @@
 			goto out;
 		kaddr = (char*)page_address(page);
 		de = (struct sysv_dir_entry *)kaddr;
-		kaddr += PAGE_CACHE_SIZE - SYSV_DIRSIZE;
+		kaddr += PAGE_SIZE - SYSV_DIRSIZE;
 		while ((char *)de <= kaddr) {
 			if (!de->inode)
 				goto got_it;
@@ -261,7 +261,7 @@
 	kmap(page);
 
 	base = (char*)page_address(page);
-	memset(base, 0, PAGE_CACHE_SIZE);
+	memset(base, 0, PAGE_SIZE);
 
 	de = (struct sysv_dir_entry *) base;
 	de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
@@ -273,7 +273,7 @@
 	kunmap(page);
 	err = dir_commit_chunk(page, 0, 2 * SYSV_DIRSIZE);
 fail:
-	page_cache_release(page);
+	put_page(page);
 	return err;
 }
 
@@ -296,7 +296,7 @@
 
 		kaddr = (char *)page_address(page);
 		de = (struct sysv_dir_entry *)kaddr;
-		kaddr += PAGE_CACHE_SIZE-SYSV_DIRSIZE;
+		kaddr += PAGE_SIZE-SYSV_DIRSIZE;
 
 		for ( ;(char *)de <= kaddr; de++) {
 			if (!de->inode)
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index 11e83ed..90b60c0 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -264,11 +264,11 @@
 out_dir:
 	if (dir_de) {
 		kunmap(dir_page);
-		page_cache_release(dir_page);
+		put_page(dir_page);
 	}
 out_old:
 	kunmap(old_page);
-	page_cache_release(old_page);
+	put_page(old_page);
 out:
 	return err;
 }
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 795992a..4b86d3a 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -1182,10 +1182,10 @@
 	.rename      = ubifs_rename,
 	.setattr     = ubifs_setattr,
 	.getattr     = ubifs_getattr,
-	.setxattr    = ubifs_setxattr,
-	.getxattr    = ubifs_getxattr,
+	.setxattr    = generic_setxattr,
+	.getxattr    = generic_getxattr,
 	.listxattr   = ubifs_listxattr,
-	.removexattr = ubifs_removexattr,
+	.removexattr = generic_removexattr,
 #ifdef CONFIG_UBIFS_ATIME_SUPPORT
 	.update_time = ubifs_update_time,
 #endif
@@ -1195,7 +1195,7 @@
 	.llseek         = generic_file_llseek,
 	.release        = ubifs_dir_release,
 	.read           = generic_read_dir,
-	.iterate        = ubifs_readdir,
+	.iterate_shared = ubifs_readdir,
 	.fsync          = ubifs_fsync,
 	.unlocked_ioctl = ubifs_ioctl,
 #ifdef CONFIG_COMPAT
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 065c88f..0831697 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -121,7 +121,7 @@
 	if (block >= beyond) {
 		/* Reading beyond inode */
 		SetPageChecked(page);
-		memset(addr, 0, PAGE_CACHE_SIZE);
+		memset(addr, 0, PAGE_SIZE);
 		goto out;
 	}
 
@@ -223,7 +223,7 @@
 {
 	struct inode *inode = mapping->host;
 	struct ubifs_info *c = inode->i_sb->s_fs_info;
-	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+	pgoff_t index = pos >> PAGE_SHIFT;
 	struct ubifs_budget_req req = { .new_page = 1 };
 	int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
 	struct page *page;
@@ -254,13 +254,13 @@
 	}
 
 	if (!PageUptodate(page)) {
-		if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE)
+		if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE)
 			SetPageChecked(page);
 		else {
 			err = do_readpage(page);
 			if (err) {
 				unlock_page(page);
-				page_cache_release(page);
+				put_page(page);
 				ubifs_release_budget(c, &req);
 				return err;
 			}
@@ -428,7 +428,7 @@
 	struct inode *inode = mapping->host;
 	struct ubifs_info *c = inode->i_sb->s_fs_info;
 	struct ubifs_inode *ui = ubifs_inode(inode);
-	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+	pgoff_t index = pos >> PAGE_SHIFT;
 	int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
 	int skipped_read = 0;
 	struct page *page;
@@ -446,7 +446,7 @@
 
 	if (!PageUptodate(page)) {
 		/* The page is not loaded from the flash */
-		if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) {
+		if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE) {
 			/*
 			 * We change whole page so no need to load it. But we
 			 * do not know whether this page exists on the media or
@@ -462,7 +462,7 @@
 			err = do_readpage(page);
 			if (err) {
 				unlock_page(page);
-				page_cache_release(page);
+				put_page(page);
 				return err;
 			}
 		}
@@ -494,7 +494,7 @@
 			mutex_unlock(&ui->ui_mutex);
 		}
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 
 		return write_begin_slow(mapping, pos, len, pagep, flags);
 	}
@@ -549,12 +549,12 @@
 	dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld",
 		inode->i_ino, pos, page->index, len, copied, inode->i_size);
 
-	if (unlikely(copied < len && len == PAGE_CACHE_SIZE)) {
+	if (unlikely(copied < len && len == PAGE_SIZE)) {
 		/*
 		 * VFS copied less data to the page that it intended and
 		 * declared in its '->write_begin()' call via the @len
 		 * argument. If the page was not up-to-date, and @len was
-		 * @PAGE_CACHE_SIZE, the 'ubifs_write_begin()' function did
+		 * @PAGE_SIZE, the 'ubifs_write_begin()' function did
 		 * not load it from the media (for optimization reasons). This
 		 * means that part of the page contains garbage. So read the
 		 * page now.
@@ -593,7 +593,7 @@
 
 out:
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 	return copied;
 }
 
@@ -621,10 +621,10 @@
 
 	addr = zaddr = kmap(page);
 
-	end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
+	end_index = (i_size - 1) >> PAGE_SHIFT;
 	if (!i_size || page->index > end_index) {
 		hole = 1;
-		memset(addr, 0, PAGE_CACHE_SIZE);
+		memset(addr, 0, PAGE_SIZE);
 		goto out_hole;
 	}
 
@@ -673,7 +673,7 @@
 	}
 
 	if (end_index == page->index) {
-		int len = i_size & (PAGE_CACHE_SIZE - 1);
+		int len = i_size & (PAGE_SIZE - 1);
 
 		if (len && len < read)
 			memset(zaddr + len, 0, read - len);
@@ -773,7 +773,7 @@
 	isize = i_size_read(inode);
 	if (isize == 0)
 		goto out_free;
-	end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
+	end_index = ((isize - 1) >> PAGE_SHIFT);
 
 	for (page_idx = 1; page_idx < page_cnt; page_idx++) {
 		pgoff_t page_offset = offset + page_idx;
@@ -788,7 +788,7 @@
 		if (!PageUptodate(page))
 			err = populate_page(c, page, bu, &n);
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		if (err)
 			break;
 	}
@@ -905,7 +905,7 @@
 #ifdef UBIFS_DEBUG
 	struct ubifs_inode *ui = ubifs_inode(inode);
 	spin_lock(&ui->ui_lock);
-	ubifs_assert(page->index <= ui->synced_i_size >> PAGE_CACHE_SHIFT);
+	ubifs_assert(page->index <= ui->synced_i_size >> PAGE_SHIFT);
 	spin_unlock(&ui->ui_lock);
 #endif
 
@@ -1001,8 +1001,8 @@
 	struct inode *inode = page->mapping->host;
 	struct ubifs_inode *ui = ubifs_inode(inode);
 	loff_t i_size =  i_size_read(inode), synced_i_size;
-	pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
-	int err, len = i_size & (PAGE_CACHE_SIZE - 1);
+	pgoff_t end_index = i_size >> PAGE_SHIFT;
+	int err, len = i_size & (PAGE_SIZE - 1);
 	void *kaddr;
 
 	dbg_gen("ino %lu, pg %lu, pg flags %#lx",
@@ -1021,7 +1021,7 @@
 
 	/* Is the page fully inside @i_size? */
 	if (page->index < end_index) {
-		if (page->index >= synced_i_size >> PAGE_CACHE_SHIFT) {
+		if (page->index >= synced_i_size >> PAGE_SHIFT) {
 			err = inode->i_sb->s_op->write_inode(inode, NULL);
 			if (err)
 				goto out_unlock;
@@ -1034,7 +1034,7 @@
 			 * with this.
 			 */
 		}
-		return do_writepage(page, PAGE_CACHE_SIZE);
+		return do_writepage(page, PAGE_SIZE);
 	}
 
 	/*
@@ -1045,7 +1045,7 @@
 	 * writes to that region are not written out to the file."
 	 */
 	kaddr = kmap_atomic(page);
-	memset(kaddr + len, 0, PAGE_CACHE_SIZE - len);
+	memset(kaddr + len, 0, PAGE_SIZE - len);
 	flush_dcache_page(page);
 	kunmap_atomic(kaddr);
 
@@ -1138,7 +1138,7 @@
 	truncate_setsize(inode, new_size);
 
 	if (offset) {
-		pgoff_t index = new_size >> PAGE_CACHE_SHIFT;
+		pgoff_t index = new_size >> PAGE_SHIFT;
 		struct page *page;
 
 		page = find_lock_page(inode->i_mapping, index);
@@ -1157,9 +1157,9 @@
 				clear_page_dirty_for_io(page);
 				if (UBIFS_BLOCKS_PER_PAGE_SHIFT)
 					offset = new_size &
-						 (PAGE_CACHE_SIZE - 1);
+						 (PAGE_SIZE - 1);
 				err = do_writepage(page, offset);
-				page_cache_release(page);
+				put_page(page);
 				if (err)
 					goto out_budg;
 				/*
@@ -1173,7 +1173,7 @@
 				 * having to read it.
 				 */
 				unlock_page(page);
-				page_cache_release(page);
+				put_page(page);
 			}
 		}
 	}
@@ -1285,7 +1285,7 @@
 	struct ubifs_info *c = inode->i_sb->s_fs_info;
 
 	ubifs_assert(PagePrivate(page));
-	if (offset || length < PAGE_CACHE_SIZE)
+	if (offset || length < PAGE_SIZE)
 		/* Partial page remains dirty */
 		return;
 
@@ -1597,10 +1597,10 @@
 const struct inode_operations ubifs_file_inode_operations = {
 	.setattr     = ubifs_setattr,
 	.getattr     = ubifs_getattr,
-	.setxattr    = ubifs_setxattr,
-	.getxattr    = ubifs_getxattr,
+	.setxattr    = generic_setxattr,
+	.getxattr    = generic_getxattr,
 	.listxattr   = ubifs_listxattr,
-	.removexattr = ubifs_removexattr,
+	.removexattr = generic_removexattr,
 #ifdef CONFIG_UBIFS_ATIME_SUPPORT
 	.update_time = ubifs_update_time,
 #endif
@@ -1611,10 +1611,10 @@
 	.get_link    = simple_get_link,
 	.setattr     = ubifs_setattr,
 	.getattr     = ubifs_getattr,
-	.setxattr    = ubifs_setxattr,
-	.getxattr    = ubifs_getxattr,
+	.setxattr    = generic_setxattr,
+	.getxattr    = generic_getxattr,
 	.listxattr   = ubifs_listxattr,
-	.removexattr = ubifs_removexattr,
+	.removexattr = generic_removexattr,
 #ifdef CONFIG_UBIFS_ATIME_SUPPORT
 	.update_time = ubifs_update_time,
 #endif
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index a233ba9..70349954 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -2040,6 +2040,7 @@
 	if (c->max_inode_sz > MAX_LFS_FILESIZE)
 		sb->s_maxbytes = c->max_inode_sz = MAX_LFS_FILESIZE;
 	sb->s_op = &ubifs_super_operations;
+	sb->s_xattr = ubifs_xattr_handlers;
 
 	mutex_lock(&c->umount_mutex);
 	err = mount_ubifs(c);
@@ -2237,12 +2238,12 @@
 	BUILD_BUG_ON(UBIFS_COMPR_TYPES_CNT > 4);
 
 	/*
-	 * We require that PAGE_CACHE_SIZE is greater-than-or-equal-to
+	 * We require that PAGE_SIZE is greater-than-or-equal-to
 	 * UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2.
 	 */
-	if (PAGE_CACHE_SIZE < UBIFS_BLOCK_SIZE) {
+	if (PAGE_SIZE < UBIFS_BLOCK_SIZE) {
 		pr_err("UBIFS error (pid %d): VFS page cache size is %u bytes, but UBIFS requires at least 4096 bytes",
-		       current->pid, (unsigned int)PAGE_CACHE_SIZE);
+		       current->pid, (unsigned int)PAGE_SIZE);
 		return -EINVAL;
 	}
 
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index c2a57e1..ddf9f6b9 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -37,6 +37,7 @@
 #include <linux/pagemap.h>
 #include <linux/backing-dev.h>
 #include <linux/security.h>
+#include <linux/xattr.h>
 #include "ubifs-media.h"
 
 /* Version of this UBIFS implementation */
@@ -46,8 +47,8 @@
 #define UBIFS_SUPER_MAGIC 0x24051905
 
 /* Number of UBIFS blocks per VFS page */
-#define UBIFS_BLOCKS_PER_PAGE (PAGE_CACHE_SIZE / UBIFS_BLOCK_SIZE)
-#define UBIFS_BLOCKS_PER_PAGE_SHIFT (PAGE_CACHE_SHIFT - UBIFS_BLOCK_SHIFT)
+#define UBIFS_BLOCKS_PER_PAGE (PAGE_SIZE / UBIFS_BLOCK_SIZE)
+#define UBIFS_BLOCKS_PER_PAGE_SHIFT (PAGE_SHIFT - UBIFS_BLOCK_SHIFT)
 
 /* "File system end of life" sequence number watermark */
 #define SQNUM_WARN_WATERMARK 0xFFFFFFFF00000000ULL
@@ -1732,12 +1733,8 @@
 		  struct kstat *stat);
 
 /* xattr.c */
-int ubifs_setxattr(struct dentry *dentry, const char *name,
-		   const void *value, size_t size, int flags);
-ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf,
-		       size_t size);
+extern const struct xattr_handler *ubifs_xattr_handlers[];
 ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size);
-int ubifs_removexattr(struct dentry *dentry, const char *name);
 int ubifs_init_security(struct inode *dentry, struct inode *inode,
 			const struct qstr *qstr);
 
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index b043e04..6c277eb 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -249,42 +249,6 @@
 	return err;
 }
 
-/**
- * check_namespace - check extended attribute name-space.
- * @nm: extended attribute name
- *
- * This function makes sure the extended attribute name belongs to one of the
- * supported extended attribute name-spaces. Returns name-space index in case
- * of success and a negative error code in case of failure.
- */
-static int check_namespace(const struct qstr *nm)
-{
-	int type;
-
-	if (nm->len > UBIFS_MAX_NLEN)
-		return -ENAMETOOLONG;
-
-	if (!strncmp(nm->name, XATTR_TRUSTED_PREFIX,
-		     XATTR_TRUSTED_PREFIX_LEN)) {
-		if (nm->name[XATTR_TRUSTED_PREFIX_LEN] == '\0')
-			return -EINVAL;
-		type = TRUSTED_XATTR;
-	} else if (!strncmp(nm->name, XATTR_USER_PREFIX,
-				      XATTR_USER_PREFIX_LEN)) {
-		if (nm->name[XATTR_USER_PREFIX_LEN] == '\0')
-			return -EINVAL;
-		type = USER_XATTR;
-	} else if (!strncmp(nm->name, XATTR_SECURITY_PREFIX,
-				     XATTR_SECURITY_PREFIX_LEN)) {
-		if (nm->name[XATTR_SECURITY_PREFIX_LEN] == '\0')
-			return -EINVAL;
-		type = SECURITY_XATTR;
-	} else
-		return -EOPNOTSUPP;
-
-	return type;
-}
-
 static struct inode *iget_xattr(struct ubifs_info *c, ino_t inum)
 {
 	struct inode *inode;
@@ -302,24 +266,23 @@
 	return ERR_PTR(-EINVAL);
 }
 
-static int setxattr(struct inode *host, const char *name, const void *value,
-		    size_t size, int flags)
+static int __ubifs_setxattr(struct inode *host, const char *name,
+			    const void *value, size_t size, int flags)
 {
 	struct inode *inode;
 	struct ubifs_info *c = host->i_sb->s_fs_info;
 	struct qstr nm = QSTR_INIT(name, strlen(name));
 	struct ubifs_dent_node *xent;
 	union ubifs_key key;
-	int err, type;
+	int err;
 
 	ubifs_assert(inode_is_locked(host));
 
 	if (size > UBIFS_MAX_INO_DATA)
 		return -ERANGE;
 
-	type = check_namespace(&nm);
-	if (type < 0)
-		return type;
+	if (nm.len > UBIFS_MAX_NLEN)
+		return -ENAMETOOLONG;
 
 	xent = kmalloc(UBIFS_MAX_XENT_NODE_SZ, GFP_NOFS);
 	if (!xent)
@@ -363,19 +326,10 @@
 	return err;
 }
 
-int ubifs_setxattr(struct dentry *dentry, const char *name,
-		   const void *value, size_t size, int flags)
+static ssize_t __ubifs_getxattr(struct inode *host, const char *name,
+				void *buf, size_t size)
 {
-	dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd",
-		name, d_inode(dentry)->i_ino, dentry, size);
-
-	return setxattr(d_inode(dentry), name, value, size, flags);
-}
-
-ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf,
-		       size_t size)
-{
-	struct inode *inode, *host = d_inode(dentry);
+	struct inode *inode;
 	struct ubifs_info *c = host->i_sb->s_fs_info;
 	struct qstr nm = QSTR_INIT(name, strlen(name));
 	struct ubifs_inode *ui;
@@ -383,12 +337,8 @@
 	union ubifs_key key;
 	int err;
 
-	dbg_gen("xattr '%s', ino %lu ('%pd'), buf size %zd", name,
-		host->i_ino, dentry, size);
-
-	err = check_namespace(&nm);
-	if (err < 0)
-		return err;
+	if (nm.len > UBIFS_MAX_NLEN)
+		return -ENAMETOOLONG;
 
 	xent = kmalloc(UBIFS_MAX_XENT_NODE_SZ, GFP_NOFS);
 	if (!xent)
@@ -460,8 +410,6 @@
 
 	lowest_xent_key(c, &key, host->i_ino);
 	while (1) {
-		int type;
-
 		xent = ubifs_tnc_next_ent(c, &key, &nm);
 		if (IS_ERR(xent)) {
 			err = PTR_ERR(xent);
@@ -471,14 +419,10 @@
 		nm.name = xent->name;
 		nm.len = le16_to_cpu(xent->nlen);
 
-		type = check_namespace(&nm);
-		if (unlikely(type < 0)) {
-			err = type;
-			break;
-		}
-
 		/* Show trusted namespace only for "power" users */
-		if (type != TRUSTED_XATTR || capable(CAP_SYS_ADMIN)) {
+		if (strncmp(xent->name, XATTR_TRUSTED_PREFIX,
+			    XATTR_TRUSTED_PREFIX_LEN) ||
+		    capable(CAP_SYS_ADMIN)) {
 			memcpy(buffer + written, nm.name, nm.len + 1);
 			written += nm.len + 1;
 		}
@@ -538,22 +482,19 @@
 	return err;
 }
 
-int ubifs_removexattr(struct dentry *dentry, const char *name)
+static int __ubifs_removexattr(struct inode *host, const char *name)
 {
-	struct inode *inode, *host = d_inode(dentry);
+	struct inode *inode;
 	struct ubifs_info *c = host->i_sb->s_fs_info;
 	struct qstr nm = QSTR_INIT(name, strlen(name));
 	struct ubifs_dent_node *xent;
 	union ubifs_key key;
 	int err;
 
-	dbg_gen("xattr '%s', ino %lu ('%pd')", name,
-		host->i_ino, dentry);
 	ubifs_assert(inode_is_locked(host));
 
-	err = check_namespace(&nm);
-	if (err < 0)
-		return err;
+	if (nm.len > UBIFS_MAX_NLEN)
+		return -ENAMETOOLONG;
 
 	xent = kmalloc(UBIFS_MAX_XENT_NODE_SZ, GFP_NOFS);
 	if (!xent)
@@ -603,7 +544,7 @@
 		}
 		strcpy(name, XATTR_SECURITY_PREFIX);
 		strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name);
-		err = setxattr(inode, name, xattr->value, xattr->value_len, 0);
+		err = __ubifs_setxattr(inode, name, xattr->value, xattr->value_len, 0);
 		kfree(name);
 		if (err < 0)
 			break;
@@ -626,3 +567,53 @@
 	}
 	return err;
 }
+
+static int ubifs_xattr_get(const struct xattr_handler *handler,
+			   struct dentry *dentry, struct inode *inode,
+			   const char *name, void *buffer, size_t size)
+{
+	dbg_gen("xattr '%s', ino %lu ('%pd'), buf size %zd", name,
+		inode->i_ino, dentry, size);
+
+	return  __ubifs_getxattr(inode, name, buffer, size);
+}
+
+static int ubifs_xattr_set(const struct xattr_handler *handler,
+			   struct dentry *dentry, const char *name,
+			   const void *value, size_t size, int flags)
+{
+	struct inode *inode = d_inode(dentry);
+
+	dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd",
+		name, inode->i_ino, dentry, size);
+
+	if (value)
+		return __ubifs_setxattr(inode, name, value, size, flags);
+	else
+		return __ubifs_removexattr(inode, name);
+}
+
+const struct xattr_handler ubifs_user_xattr_handler = {
+	.prefix = XATTR_USER_PREFIX,
+	.get = ubifs_xattr_get,
+	.set = ubifs_xattr_set,
+};
+
+const struct xattr_handler ubifs_trusted_xattr_handler = {
+	.prefix = XATTR_TRUSTED_PREFIX,
+	.get = ubifs_xattr_get,
+	.set = ubifs_xattr_set,
+};
+
+const struct xattr_handler ubifs_security_xattr_handler = {
+	.prefix = XATTR_SECURITY_PREFIX,
+	.get = ubifs_xattr_get,
+	.set = ubifs_xattr_set,
+};
+
+const struct xattr_handler *ubifs_xattr_handlers[] = {
+	&ubifs_user_xattr_handler,
+	&ubifs_trusted_xattr_handler,
+	&ubifs_security_xattr_handler,
+	NULL
+};
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index b51b371..4c5593a 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -202,7 +202,7 @@
 const struct file_operations udf_dir_operations = {
 	.llseek			= generic_file_llseek,
 	.read			= generic_read_dir,
-	.iterate		= udf_readdir,
+	.iterate_shared		= udf_readdir,
 	.unlocked_ioctl		= udf_ioctl,
 	.fsync			= generic_file_fsync,
 };
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 1af9896..6325706 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -46,7 +46,7 @@
 
 	kaddr = kmap(page);
 	memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size);
-	memset(kaddr + inode->i_size, 0, PAGE_CACHE_SIZE - inode->i_size);
+	memset(kaddr + inode->i_size, 0, PAGE_SIZE - inode->i_size);
 	flush_dcache_page(page);
 	SetPageUptodate(page);
 	kunmap(page);
@@ -87,20 +87,19 @@
 {
 	struct page *page;
 
-	if (WARN_ON_ONCE(pos >= PAGE_CACHE_SIZE))
+	if (WARN_ON_ONCE(pos >= PAGE_SIZE))
 		return -EIO;
 	page = grab_cache_page_write_begin(mapping, 0, flags);
 	if (!page)
 		return -ENOMEM;
 	*pagep = page;
 
-	if (!PageUptodate(page) && len != PAGE_CACHE_SIZE)
+	if (!PageUptodate(page) && len != PAGE_SIZE)
 		__udf_adinicb_readpage(page);
 	return 0;
 }
 
-static ssize_t udf_adinicb_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
-				     loff_t offset)
+static ssize_t udf_adinicb_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	/* Fallback to buffered I/O. */
 	return 0;
@@ -153,9 +152,7 @@
 
 	if (retval > 0) {
 		mark_inode_dirty(inode);
-		err = generic_write_sync(file, iocb->ki_pos - retval, retval);
-		if (err < 0)
-			retval = err;
+		retval = generic_write_sync(iocb, retval);
 	}
 
 	return retval;
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 166d3ed..f323aff 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -214,8 +214,7 @@
 	return ret;
 }
 
-static ssize_t udf_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
-			     loff_t offset)
+static ssize_t udf_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct file *file = iocb->ki_filp;
 	struct address_space *mapping = file->f_mapping;
@@ -223,9 +222,9 @@
 	size_t count = iov_iter_count(iter);
 	ssize_t ret;
 
-	ret = blockdev_direct_IO(iocb, inode, iter, offset, udf_get_block);
+	ret = blockdev_direct_IO(iocb, inode, iter, udf_get_block);
 	if (unlikely(ret < 0 && iov_iter_rw(iter) == WRITE))
-		udf_write_failed(mapping, offset + count);
+		udf_write_failed(mapping, iocb->ki_pos + count);
 	return ret;
 }
 
@@ -287,7 +286,7 @@
 	if (!PageUptodate(page)) {
 		kaddr = kmap(page);
 		memset(kaddr + iinfo->i_lenAlloc, 0x00,
-		       PAGE_CACHE_SIZE - iinfo->i_lenAlloc);
+		       PAGE_SIZE - iinfo->i_lenAlloc);
 		memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr,
 			iinfo->i_lenAlloc);
 		flush_dcache_page(page);
@@ -319,7 +318,7 @@
 		inode->i_data.a_ops = &udf_adinicb_aops;
 		up_write(&iinfo->i_data_sem);
 	}
-	page_cache_release(page);
+	put_page(page);
 	mark_inode_dirty(inode);
 
 	return err;
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index a2ba11e..c3e5c96 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -1250,7 +1250,7 @@
 	brelse(fibh.sbh);
 
 	tloc = lelb_to_cpu(cfi.icb.extLocation);
-	inode = udf_iget(d_inode(child)->i_sb, &tloc);
+	inode = udf_iget(child->d_sb, &tloc);
 	if (IS_ERR(inode))
 		return ERR_CAST(inode);
 
diff --git a/fs/udf/super.c b/fs/udf/super.c
index fa92fe8..5e2c8c8 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -78,6 +78,15 @@
 #define VSD_FIRST_SECTOR_OFFSET		32768
 #define VSD_MAX_SECTOR_OFFSET		0x800000
 
+/*
+ * Maximum number of Terminating Descriptor / Logical Volume Integrity
+ * Descriptor redirections. The chosen numbers are arbitrary - just that we
+ * hopefully don't limit any real use of rewritten inode on write-once media
+ * but avoid looping for too long on corrupted media.
+ */
+#define UDF_MAX_TD_NESTING 64
+#define UDF_MAX_LVID_NESTING 1000
+
 enum { UDF_MAX_LINKS = 0xffff };
 
 /* These are the "meat" - everything else is stuffing */
@@ -919,14 +928,14 @@
 #endif
 	}
 
-	ret = udf_CS0toUTF8(outstr, 31, pvoldesc->volIdent, 32);
+	ret = udf_dstrCS0toUTF8(outstr, 31, pvoldesc->volIdent, 32);
 	if (ret < 0)
 		goto out_bh;
 
 	strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
 	udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident);
 
-	ret = udf_CS0toUTF8(outstr, 127, pvoldesc->volSetIdent, 128);
+	ret = udf_dstrCS0toUTF8(outstr, 127, pvoldesc->volSetIdent, 128);
 	if (ret < 0)
 		goto out_bh;
 
@@ -1541,42 +1550,52 @@
 }
 
 /*
- * udf_load_logicalvolint
- *
+ * Find the prevailing Logical Volume Integrity Descriptor.
  */
 static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ad loc)
 {
-	struct buffer_head *bh = NULL;
+	struct buffer_head *bh, *final_bh;
 	uint16_t ident;
 	struct udf_sb_info *sbi = UDF_SB(sb);
 	struct logicalVolIntegrityDesc *lvid;
+	int indirections = 0;
 
-	while (loc.extLength > 0 &&
-	       (bh = udf_read_tagged(sb, loc.extLocation,
-				     loc.extLocation, &ident)) &&
-	       ident == TAG_IDENT_LVID) {
-		sbi->s_lvid_bh = bh;
-		lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
+	while (++indirections <= UDF_MAX_LVID_NESTING) {
+		final_bh = NULL;
+		while (loc.extLength > 0 &&
+			(bh = udf_read_tagged(sb, loc.extLocation,
+					loc.extLocation, &ident))) {
+			if (ident != TAG_IDENT_LVID) {
+				brelse(bh);
+				break;
+			}
 
-		if (lvid->nextIntegrityExt.extLength)
-			udf_load_logicalvolint(sb,
-				leea_to_cpu(lvid->nextIntegrityExt));
+			brelse(final_bh);
+			final_bh = bh;
 
-		if (sbi->s_lvid_bh != bh)
-			brelse(bh);
-		loc.extLength -= sb->s_blocksize;
-		loc.extLocation++;
+			loc.extLength -= sb->s_blocksize;
+			loc.extLocation++;
+		}
+
+		if (!final_bh)
+			return;
+
+		brelse(sbi->s_lvid_bh);
+		sbi->s_lvid_bh = final_bh;
+
+		lvid = (struct logicalVolIntegrityDesc *)final_bh->b_data;
+		if (lvid->nextIntegrityExt.extLength == 0)
+			return;
+
+		loc = leea_to_cpu(lvid->nextIntegrityExt);
 	}
-	if (sbi->s_lvid_bh != bh)
-		brelse(bh);
+
+	udf_warn(sb, "Too many LVID indirections (max %u), ignoring.\n",
+		UDF_MAX_LVID_NESTING);
+	brelse(sbi->s_lvid_bh);
+	sbi->s_lvid_bh = NULL;
 }
 
-/*
- * Maximum number of Terminating Descriptor redirections. The chosen number is
- * arbitrary - just that we hopefully don't limit any real use of rewritten
- * inode on write-once media but avoid looping for too long on corrupted media.
- */
-#define UDF_MAX_TD_NESTING 64
 
 /*
  * Process a main/reserve volume descriptor sequence.
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index 1f32c7b..27b5335 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -3,9 +3,7 @@
 
 #include <linux/mutex.h>
 #include <linux/bitops.h>
-
-/* Since UDF 2.01 is ISO 13346 based... */
-#define UDF_SUPER_MAGIC			0x15013346
+#include <linux/magic.h>
 
 #define UDF_MAX_READ_VERSION		0x0250
 #define UDF_MAX_WRITE_VERSION		0x0201
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 972b706..263829e 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -212,7 +212,7 @@
 			    uint8_t *, int);
 extern int udf_put_filename(struct super_block *, const uint8_t *, int,
 			    uint8_t *, int);
-extern int udf_CS0toUTF8(uint8_t *, int, const uint8_t *, int);
+extern int udf_dstrCS0toUTF8(uint8_t *, int, const uint8_t *, int);
 
 /* ialloc.c */
 extern void udf_free_inode(struct inode *);
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
index 3ff42f4..695389a 100644
--- a/fs/udf/unicode.c
+++ b/fs/udf/unicode.c
@@ -335,9 +335,21 @@
 	return u_len;
 }
 
-int udf_CS0toUTF8(uint8_t *utf_o, int o_len, const uint8_t *ocu_i, int i_len)
+int udf_dstrCS0toUTF8(uint8_t *utf_o, int o_len,
+		      const uint8_t *ocu_i, int i_len)
 {
-	return udf_name_from_CS0(utf_o, o_len, ocu_i, i_len,
+	int s_len = 0;
+
+	if (i_len > 0) {
+		s_len = ocu_i[i_len - 1];
+		if (s_len >= i_len) {
+			pr_err("incorrect dstring lengths (%d/%d)\n",
+			       s_len, i_len);
+			return -EINVAL;
+		}
+	}
+
+	return udf_name_from_CS0(utf_o, o_len, ocu_i, s_len,
 				 udf_uni2char_utf8, 0);
 }
 
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index dc5fae6..0447b94 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -237,7 +237,7 @@
 			       sector_t newb, struct page *locked_page)
 {
 	const unsigned blks_per_page =
-		1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+		1 << (PAGE_SHIFT - inode->i_blkbits);
 	const unsigned mask = blks_per_page - 1;
 	struct address_space * const mapping = inode->i_mapping;
 	pgoff_t index, cur_index, last_index;
@@ -255,9 +255,9 @@
 
 	cur_index = locked_page->index;
 	end = count + beg;
-	last_index = end >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
+	last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
 	for (i = beg; i < end; i = (i | mask) + 1) {
-		index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
+		index = i >> (PAGE_SHIFT - inode->i_blkbits);
 
 		if (likely(cur_index != index)) {
 			page = ufs_get_locked_page(mapping, index);
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index 74f2e80..57dcced 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -62,7 +62,7 @@
 static inline void ufs_put_page(struct page *page)
 {
 	kunmap(page);
-	page_cache_release(page);
+	put_page(page);
 }
 
 ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr)
@@ -105,19 +105,19 @@
 }
 
 
-static void ufs_check_page(struct page *page)
+static bool ufs_check_page(struct page *page)
 {
 	struct inode *dir = page->mapping->host;
 	struct super_block *sb = dir->i_sb;
 	char *kaddr = page_address(page);
 	unsigned offs, rec_len;
-	unsigned limit = PAGE_CACHE_SIZE;
+	unsigned limit = PAGE_SIZE;
 	const unsigned chunk_mask = UFS_SB(sb)->s_uspi->s_dirblksize - 1;
 	struct ufs_dir_entry *p;
 	char *error;
 
-	if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
-		limit = dir->i_size & ~PAGE_CACHE_MASK;
+	if ((dir->i_size >> PAGE_SHIFT) == page->index) {
+		limit = dir->i_size & ~PAGE_MASK;
 		if (limit & chunk_mask)
 			goto Ebadsize;
 		if (!limit)
@@ -143,7 +143,7 @@
 		goto Eend;
 out:
 	SetPageChecked(page);
-	return;
+	return true;
 
 	/* Too bad, we had an error */
 
@@ -170,7 +170,7 @@
 bad_entry:
 	ufs_error (sb, "ufs_check_page", "bad entry in directory #%lu: %s - "
 		   "offset=%lu, rec_len=%d, name_len=%d",
-		   dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
+		   dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
 		   rec_len, ufs_get_de_namlen(sb, p));
 	goto fail;
 Eend:
@@ -178,10 +178,10 @@
 	ufs_error(sb, __func__,
 		   "entry in directory #%lu spans the page boundary"
 		   "offset=%lu",
-		   dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs);
+		   dir->i_ino, (page->index<<PAGE_SHIFT)+offs);
 fail:
-	SetPageChecked(page);
 	SetPageError(page);
+	return false;
 }
 
 static struct page *ufs_get_page(struct inode *dir, unsigned long n)
@@ -190,10 +190,10 @@
 	struct page *page = read_mapping_page(mapping, n, NULL);
 	if (!IS_ERR(page)) {
 		kmap(page);
-		if (!PageChecked(page))
-			ufs_check_page(page);
-		if (PageError(page))
-			goto fail;
+		if (unlikely(!PageChecked(page))) {
+			if (PageError(page) || !ufs_check_page(page))
+				goto fail;
+		}
 	}
 	return page;
 
@@ -211,9 +211,9 @@
 {
 	unsigned last_byte = inode->i_size;
 
-	last_byte -= page_nr << PAGE_CACHE_SHIFT;
-	if (last_byte > PAGE_CACHE_SIZE)
-		last_byte = PAGE_CACHE_SIZE;
+	last_byte -= page_nr << PAGE_SHIFT;
+	if (last_byte > PAGE_SIZE)
+		last_byte = PAGE_SIZE;
 	return last_byte;
 }
 
@@ -341,7 +341,7 @@
 		kaddr = page_address(page);
 		dir_end = kaddr + ufs_last_byte(dir, n);
 		de = (struct ufs_dir_entry *)kaddr;
-		kaddr += PAGE_CACHE_SIZE - reclen;
+		kaddr += PAGE_SIZE - reclen;
 		while ((char *)de <= kaddr) {
 			if ((char *)de == dir_end) {
 				/* We hit i_size */
@@ -432,8 +432,8 @@
 	loff_t pos = ctx->pos;
 	struct inode *inode = file_inode(file);
 	struct super_block *sb = inode->i_sb;
-	unsigned int offset = pos & ~PAGE_CACHE_MASK;
-	unsigned long n = pos >> PAGE_CACHE_SHIFT;
+	unsigned int offset = pos & ~PAGE_MASK;
+	unsigned long n = pos >> PAGE_SHIFT;
 	unsigned long npages = dir_pages(inode);
 	unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1);
 	int need_revalidate = file->f_version != inode->i_version;
@@ -454,14 +454,14 @@
 			ufs_error(sb, __func__,
 				  "bad page in #%lu",
 				  inode->i_ino);
-			ctx->pos += PAGE_CACHE_SIZE - offset;
+			ctx->pos += PAGE_SIZE - offset;
 			return -EIO;
 		}
 		kaddr = page_address(page);
 		if (unlikely(need_revalidate)) {
 			if (offset) {
 				offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask);
-				ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset;
+				ctx->pos = (n<<PAGE_SHIFT) + offset;
 			}
 			file->f_version = inode->i_version;
 			need_revalidate = 0;
@@ -574,7 +574,7 @@
 
 	kmap(page);
 	base = (char*)page_address(page);
-	memset(base, 0, PAGE_CACHE_SIZE);
+	memset(base, 0, PAGE_SIZE);
 
 	de = (struct ufs_dir_entry *) base;
 
@@ -594,7 +594,7 @@
 
 	err = ufs_commit_chunk(page, 0, chunk_size);
 fail:
-	page_cache_release(page);
+	put_page(page);
 	return err;
 }
 
@@ -653,7 +653,7 @@
 
 const struct file_operations ufs_dir_operations = {
 	.read		= generic_read_dir,
-	.iterate	= ufs_readdir,
+	.iterate_shared	= ufs_readdir,
 	.fsync		= generic_file_fsync,
 	.llseek		= generic_file_llseek,
 };
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index d897e16..9f49431 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -1051,13 +1051,13 @@
 	lastfrag--;
 
 	lastpage = ufs_get_locked_page(mapping, lastfrag >>
-				       (PAGE_CACHE_SHIFT - inode->i_blkbits));
+				       (PAGE_SHIFT - inode->i_blkbits));
        if (IS_ERR(lastpage)) {
                err = -EIO;
                goto out;
        }
 
-       end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1);
+       end = lastfrag & ((1 << (PAGE_SHIFT - inode->i_blkbits)) - 1);
        bh = page_buffers(lastpage);
        for (i = 0; i < end; ++i)
                bh = bh->b_this_page;
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index acf4a3b..a1559f7 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -305,7 +305,7 @@
 			ufs_set_link(old_inode, dir_de, dir_page, new_dir, 0);
 		else {
 			kunmap(dir_page);
-			page_cache_release(dir_page);
+			put_page(dir_page);
 		}
 		inode_dec_link_count(old_dir);
 	}
@@ -315,11 +315,11 @@
 out_dir:
 	if (dir_de) {
 		kunmap(dir_page);
-		page_cache_release(dir_page);
+		put_page(dir_page);
 	}
 out_old:
 	kunmap(old_page);
-	page_cache_release(old_page);
+	put_page(old_page);
 out:
 	return err;
 }
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 442fd52..f04ab23 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -132,7 +132,7 @@
 	ino = ufs_inode_by_name(d_inode(child), &dot_dot);
 	if (!ino)
 		return ERR_PTR(-ENOENT);
-	return d_obtain_alias(ufs_iget(d_inode(child)->i_sb, ino));
+	return d_obtain_alias(ufs_iget(child->d_sb, ino));
 }
 
 static const struct export_operations ufs_export_ops = {
diff --git a/fs/ufs/util.c b/fs/ufs/util.c
index b6c2f94..a409e3e 100644
--- a/fs/ufs/util.c
+++ b/fs/ufs/util.c
@@ -261,14 +261,14 @@
 		if (unlikely(page->mapping == NULL)) {
 			/* Truncate got there first */
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 			page = NULL;
 			goto out;
 		}
 
 		if (!PageUptodate(page) || PageError(page)) {
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 
 			printk(KERN_ERR "ufs_change_blocknr: "
 			       "can not read page: ino %lu, index: %lu\n",
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 9541759..b7fbf53 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -283,7 +283,7 @@
 static inline void ufs_put_locked_page(struct page *page)
 {
        unlock_page(page);
-       page_cache_release(page);
+       put_page(page);
 }
 
 
diff --git a/fs/xattr.c b/fs/xattr.c
index 4861322..b11945e 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -192,7 +192,7 @@
 	if (!inode->i_op->getxattr)
 		return -EOPNOTSUPP;
 
-	error = inode->i_op->getxattr(dentry, name, NULL, 0);
+	error = inode->i_op->getxattr(dentry, inode, name, NULL, 0);
 	if (error < 0)
 		return error;
 
@@ -203,7 +203,7 @@
 		memset(value, 0, error + 1);
 	}
 
-	error = inode->i_op->getxattr(dentry, name, value, error);
+	error = inode->i_op->getxattr(dentry, inode, name, value, error);
 	*xattr_value = value;
 	return error;
 }
@@ -236,7 +236,7 @@
 	}
 nolsm:
 	if (inode->i_op->getxattr)
-		error = inode->i_op->getxattr(dentry, name, value, size);
+		error = inode->i_op->getxattr(dentry, inode, name, value, size);
 	else
 		error = -EOPNOTSUPP;
 
@@ -691,14 +691,16 @@
  * Find the handler for the prefix and dispatch its get() operation.
  */
 ssize_t
-generic_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size)
+generic_getxattr(struct dentry *dentry, struct inode *inode,
+		 const char *name, void *buffer, size_t size)
 {
 	const struct xattr_handler *handler;
 
 	handler = xattr_resolve_name(dentry->d_sb->s_xattr, &name);
 	if (IS_ERR(handler))
 		return PTR_ERR(handler);
-	return handler->get(handler, dentry, name, buffer, size);
+	return handler->get(handler, dentry, inode,
+			    name, buffer, size);
 }
 
 /*
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 041b694..ce41d7f 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -3742,11 +3742,11 @@
 		args.prod = align;
 		if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
 			args.mod = (xfs_extlen_t)(args.prod - args.mod);
-	} else if (mp->m_sb.sb_blocksize >= PAGE_CACHE_SIZE) {
+	} else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
 		args.prod = 1;
 		args.mod = 0;
 	} else {
-		args.prod = PAGE_CACHE_SIZE >> mp->m_sb.sb_blocklog;
+		args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
 		if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod))))
 			args.mod = (xfs_extlen_t)(args.prod - args.mod);
 	}
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index 2d5df1f..b6e527b 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -158,22 +158,14 @@
 	if (error) {
 		/*
 		 * If the attribute doesn't exist make sure we have a negative
-		 * cache entry, for any other error assume it is transient and
-		 * leave the cache entry as ACL_NOT_CACHED.
+		 * cache entry, for any other error assume it is transient.
 		 */
-		if (error == -ENOATTR)
-			goto out_update_cache;
-		acl = ERR_PTR(error);
-		goto out;
+		if (error != -ENOATTR)
+			acl = ERR_PTR(error);
+	} else  {
+		acl = xfs_acl_from_disk(xfs_acl, len,
+					XFS_ACL_MAX_ENTRIES(ip->i_mount));
 	}
-
-	acl = xfs_acl_from_disk(xfs_acl, len, XFS_ACL_MAX_ENTRIES(ip->i_mount));
-	if (IS_ERR(acl))
-		goto out;
-
-out_update_cache:
-	set_cached_acl(inode, type, acl);
-out:
 	kmem_free(xfs_acl);
 	return acl;
 }
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index d445a64..c535887 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -704,7 +704,7 @@
 
 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 out_invalidate:
-	xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE);
+	xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
 	return;
 }
 
@@ -925,9 +925,9 @@
 	 * ---------------------------------^------------------|
 	 */
 	offset = i_size_read(inode);
-	end_index = offset >> PAGE_CACHE_SHIFT;
+	end_index = offset >> PAGE_SHIFT;
 	if (page->index < end_index)
-		end_offset = (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT;
+		end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
 	else {
 		/*
 		 * Check whether the page to write out is beyond or straddles
@@ -940,7 +940,7 @@
 		 * |				    |      Straddles     |
 		 * ---------------------------------^-----------|--------|
 		 */
-		unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1);
+		unsigned offset_into_page = offset & (PAGE_SIZE - 1);
 
 		/*
 		 * Skip the page if it is fully outside i_size, e.g. due to a
@@ -971,7 +971,7 @@
 		 * memory is zeroed when mapped, and writes to that region are
 		 * not written out to the file."
 		 */
-		zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE);
+		zero_user_segment(page, offset_into_page, PAGE_SIZE);
 
 		/* Adjust the end_offset to the end of file */
 		end_offset = offset;
@@ -1406,8 +1406,7 @@
 STATIC ssize_t
 xfs_vm_direct_IO(
 	struct kiocb		*iocb,
-	struct iov_iter		*iter,
-	loff_t			offset)
+	struct iov_iter		*iter)
 {
 	struct inode		*inode = iocb->ki_filp->f_mapping->host;
 	dio_iodone_t		*endio = NULL;
@@ -1420,12 +1419,12 @@
 	}
 
 	if (IS_DAX(inode)) {
-		return dax_do_io(iocb, inode, iter, offset,
+		return dax_do_io(iocb, inode, iter,
 				 xfs_get_blocks_direct, endio, 0);
 	}
 
 	bdev = xfs_find_bdev_for_inode(inode);
-	return  __blockdev_direct_IO(iocb, inode, bdev, iter, offset,
+	return  __blockdev_direct_IO(iocb, inode, bdev, iter,
 			xfs_get_blocks_direct, endio, NULL, flags);
 }
 
@@ -1475,7 +1474,7 @@
 	loff_t			block_offset;
 	loff_t			block_start;
 	loff_t			block_end;
-	loff_t			from = pos & (PAGE_CACHE_SIZE - 1);
+	loff_t			from = pos & (PAGE_SIZE - 1);
 	loff_t			to = from + len;
 	struct buffer_head	*bh, *head;
 	struct xfs_mount	*mp = XFS_I(inode)->i_mount;
@@ -1491,7 +1490,7 @@
 	 * start of the page by using shifts rather than masks the mismatch
 	 * problem.
 	 */
-	block_offset = (pos >> PAGE_CACHE_SHIFT) << PAGE_CACHE_SHIFT;
+	block_offset = (pos >> PAGE_SHIFT) << PAGE_SHIFT;
 
 	ASSERT(block_offset + from == pos);
 
@@ -1558,12 +1557,12 @@
 	struct page		**pagep,
 	void			**fsdata)
 {
-	pgoff_t			index = pos >> PAGE_CACHE_SHIFT;
+	pgoff_t			index = pos >> PAGE_SHIFT;
 	struct page		*page;
 	int			status;
 	struct xfs_mount	*mp = XFS_I(mapping->host)->i_mount;
 
-	ASSERT(len <= PAGE_CACHE_SIZE);
+	ASSERT(len <= PAGE_SIZE);
 
 	page = grab_cache_page_write_begin(mapping, index, flags);
 	if (!page)
@@ -1592,7 +1591,7 @@
 			truncate_pagecache_range(inode, start, pos + len);
 		}
 
-		page_cache_release(page);
+		put_page(page);
 		page = NULL;
 	}
 
@@ -1620,7 +1619,7 @@
 {
 	int			ret;
 
-	ASSERT(len <= PAGE_CACHE_SIZE);
+	ASSERT(len <= PAGE_SIZE);
 
 	ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
 	if (unlikely(ret < len)) {
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index a32c1dc..3b63098 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1237,7 +1237,7 @@
 	/* wait for the completion of any pending DIOs */
 	inode_dio_wait(VFS_I(ip));
 
-	rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
+	rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
 	ioffset = round_down(offset, rounding);
 	iendoffset = round_up(offset + len, rounding) - 1;
 	error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ioffset,
@@ -1466,7 +1466,7 @@
 	if (error)
 		return error;
 	error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
-					offset >> PAGE_CACHE_SHIFT, -1);
+					offset >> PAGE_SHIFT, -1);
 	if (error)
 		return error;
 
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index 93b3ab0..f44f799 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -273,10 +273,11 @@
 	size_t			bufsize,
 	struct xfs_dir2_leaf_map_info *mip,
 	xfs_dir2_off_t		*curoff,
-	struct xfs_buf		**bpp)
+	struct xfs_buf		**bpp,
+	bool			trim_map)
 {
 	struct xfs_inode	*dp = args->dp;
-	struct xfs_buf		*bp = *bpp;
+	struct xfs_buf		*bp = NULL;
 	struct xfs_bmbt_irec	*map = mip->map;
 	struct blk_plug		plug;
 	int			error = 0;
@@ -286,13 +287,10 @@
 	struct xfs_da_geometry	*geo = args->geo;
 
 	/*
-	 * If we have a buffer, we need to release it and
-	 * take it out of the mapping.
+	 * If the caller just finished processing a buffer, it will tell us
+	 * we need to trim that block out of the mapping now it is done.
 	 */
-
-	if (bp) {
-		xfs_trans_brelse(NULL, bp);
-		bp = NULL;
+	if (trim_map) {
 		mip->map_blocks -= geo->fsbcount;
 		/*
 		 * Loop to get rid of the extents for the
@@ -533,10 +531,17 @@
 		 */
 		if (!bp || ptr >= (char *)bp->b_addr + geo->blksize) {
 			int	lock_mode;
+			bool	trim_map = false;
+
+			if (bp) {
+				xfs_trans_brelse(NULL, bp);
+				bp = NULL;
+				trim_map = true;
+			}
 
 			lock_mode = xfs_ilock_data_map_shared(dp);
 			error = xfs_dir2_leaf_readbuf(args, bufsize, map_info,
-						      &curoff, &bp);
+						      &curoff, &bp, trim_map);
 			xfs_iunlock(dp, lock_mode);
 			if (error || !map_info->map_valid)
 				break;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index ac0fd32..85ce303 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -106,8 +106,8 @@
 		unsigned offset, bytes;
 		void *fsdata;
 
-		offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
-		bytes = PAGE_CACHE_SIZE - offset;
+		offset = (pos & (PAGE_SIZE -1)); /* Within page */
+		bytes = PAGE_SIZE - offset;
 		if (bytes > count)
 			bytes = count;
 
@@ -718,18 +718,19 @@
 	int			unaligned_io = 0;
 	int			iolock;
 	size_t			count = iov_iter_count(from);
-	loff_t			pos = iocb->ki_pos;
 	loff_t			end;
 	struct iov_iter		data;
 	struct xfs_buftarg	*target = XFS_IS_REALTIME_INODE(ip) ?
 					mp->m_rtdev_targp : mp->m_ddev_targp;
 
 	/* DIO must be aligned to device logical sector size */
-	if (!IS_DAX(inode) && ((pos | count) & target->bt_logical_sectormask))
+	if (!IS_DAX(inode) &&
+	    ((iocb->ki_pos | count) & target->bt_logical_sectormask))
 		return -EINVAL;
 
 	/* "unaligned" here means not aligned to a filesystem block */
-	if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask))
+	if ((iocb->ki_pos & mp->m_blockmask) ||
+	    ((iocb->ki_pos + count) & mp->m_blockmask))
 		unaligned_io = 1;
 
 	/*
@@ -760,8 +761,7 @@
 	if (ret)
 		goto out;
 	count = iov_iter_count(from);
-	pos = iocb->ki_pos;
-	end = pos + count - 1;
+	end = iocb->ki_pos + count - 1;
 
 	/*
 	 * See xfs_file_read_iter() for why we do a full-file flush here.
@@ -794,19 +794,18 @@
 	trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
 
 	data = *from;
-	ret = mapping->a_ops->direct_IO(iocb, &data, pos);
+	ret = mapping->a_ops->direct_IO(iocb, &data);
 
 	/* see generic_file_direct_write() for why this is necessary */
 	if (mapping->nrpages) {
 		invalidate_inode_pages2_range(mapping,
-					      pos >> PAGE_CACHE_SHIFT,
-					      end >> PAGE_CACHE_SHIFT);
+					      iocb->ki_pos >> PAGE_SHIFT,
+					      end >> PAGE_SHIFT);
 	}
 
 	if (ret > 0) {
-		pos += ret;
+		iocb->ki_pos += ret;
 		iov_iter_advance(from, ret);
-		iocb->ki_pos = pos;
 	}
 out:
 	xfs_rw_iunlock(ip, iolock);
@@ -904,14 +903,10 @@
 		ret = xfs_file_buffered_aio_write(iocb, from);
 
 	if (ret > 0) {
-		ssize_t err;
-
 		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
 
 		/* Handle various SYNC-type writes */
-		err = generic_write_sync(file, iocb->ki_pos - ret, ret);
-		if (err < 0)
-			ret = err;
+		ret = generic_write_sync(iocb, ret);
 	}
 	return ret;
 }
@@ -1207,9 +1202,9 @@
 
 	pagevec_init(&pvec, 0);
 
-	index = startoff >> PAGE_CACHE_SHIFT;
+	index = startoff >> PAGE_SHIFT;
 	endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
-	end = endoff >> PAGE_CACHE_SHIFT;
+	end = endoff >> PAGE_SHIFT;
 	do {
 		int		want;
 		unsigned	nr_pages;
@@ -1714,7 +1709,7 @@
 const struct file_operations xfs_dir_file_operations = {
 	.open		= xfs_dir_open,
 	.read		= generic_read_dir,
-	.iterate	= xfs_file_readdir,
+	.iterate_shared	= xfs_file_readdir,
 	.llseek		= generic_file_llseek,
 	.unlocked_ioctl	= xfs_file_ioctl,
 #ifdef CONFIG_COMPAT
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index ec0e239..a8192dc 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -135,7 +135,7 @@
  * Size of block device i/o is parameterized here.
  * Currently the system supports page-sized i/o.
  */
-#define	BLKDEV_IOSHIFT		PAGE_CACHE_SHIFT
+#define	BLKDEV_IOSHIFT		PAGE_SHIFT
 #define	BLKDEV_IOSIZE		(1<<BLKDEV_IOSHIFT)
 /* number of BB's per block device block */
 #define	BLKDEV_BB		BTOBB(BLKDEV_IOSIZE)
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 536a0ee..cfd4210 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -171,7 +171,7 @@
 	ASSERT(sbp->sb_blocklog >= BBSHIFT);
 
 	/* Limited by ULONG_MAX of page cache index */
-	if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
+	if (nblocks >> (PAGE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
 		return -EFBIG;
 	return 0;
 }
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index bac6b34..eafe257 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -231,12 +231,12 @@
 xfs_preferred_iosize(xfs_mount_t *mp)
 {
 	if (mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE)
-		return PAGE_CACHE_SIZE;
+		return PAGE_SIZE;
 	return (mp->m_swidth ?
 		(mp->m_swidth << mp->m_sb.sb_blocklog) :
 		((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ?
 			(1 << (int)MAX(mp->m_readio_log, mp->m_writeio_log)) :
-			PAGE_CACHE_SIZE));
+			PAGE_SIZE));
 }
 
 #define XFS_LAST_UNMOUNT_WAS_CLEAN(mp)	\
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index ade236e..51ddaf2 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -293,8 +293,8 @@
 		 * Make sure reads through the pagecache see the new data.
 		 */
 		error = invalidate_inode_pages2_range(inode->i_mapping,
-					start >> PAGE_CACHE_SHIFT,
-					(end - 1) >> PAGE_CACHE_SHIFT);
+					start >> PAGE_SHIFT,
+					(end - 1) >> PAGE_SHIFT);
 		WARN_ON_ONCE(error);
 
 		error = xfs_iomap_write_unwritten(ip, start, length);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index d760934..187e14b 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -556,10 +556,10 @@
 	/* Figure out maximum filesize, on Linux this can depend on
 	 * the filesystem blocksize (on 32 bit platforms).
 	 * __block_write_begin does this in an [unsigned] long...
-	 *      page->index << (PAGE_CACHE_SHIFT - bbits)
+	 *      page->index << (PAGE_SHIFT - bbits)
 	 * So, for page sized blocks (4K on 32 bit platforms),
 	 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
-	 *      (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
+	 *      (((u64)PAGE_SIZE << (BITS_PER_LONG-1))-1)
 	 * but for smaller blocksizes it is less (bbits = log2 bsize).
 	 * Note1: get_block_t takes a long (implicit cast from above)
 	 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
@@ -570,10 +570,10 @@
 #if BITS_PER_LONG == 32
 # if defined(CONFIG_LBDAF)
 	ASSERT(sizeof(sector_t) == 8);
-	pagefactor = PAGE_CACHE_SIZE;
+	pagefactor = PAGE_SIZE;
 	bitshift = BITS_PER_LONG;
 # else
-	pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
+	pagefactor = PAGE_SIZE >> (PAGE_SHIFT - blockshift);
 # endif
 #endif
 
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index 110f1d7..d111f69 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -32,11 +32,11 @@
 
 
 static int
-xfs_xattr_get(const struct xattr_handler *handler, struct dentry *dentry,
-		const char *name, void *value, size_t size)
+xfs_xattr_get(const struct xattr_handler *handler, struct dentry *unused,
+		struct inode *inode, const char *name, void *value, size_t size)
 {
 	int xflags = handler->flags;
-	struct xfs_inode *ip = XFS_I(d_inode(dentry));
+	struct xfs_inode *ip = XFS_I(inode);
 	int error, asize = size;
 
 	/* Convert Linux syscall to XFS internal ATTR flags */
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 14362a8..4d40e9b 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -87,7 +87,7 @@
 	  .package.elements = (eles)			\
 	}
 
-bool acpi_dev_present(const char *hid);
+bool acpi_dev_found(const char *hid);
 
 #ifdef CONFIG_ACPI
 
@@ -394,13 +394,13 @@
 
 static inline bool is_acpi_node(struct fwnode_handle *fwnode)
 {
-	return fwnode && (fwnode->type == FWNODE_ACPI
+	return !IS_ERR_OR_NULL(fwnode) && (fwnode->type == FWNODE_ACPI
 		|| fwnode->type == FWNODE_ACPI_DATA);
 }
 
 static inline bool is_acpi_device_node(struct fwnode_handle *fwnode)
 {
-	return fwnode && fwnode->type == FWNODE_ACPI;
+	return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_ACPI;
 }
 
 static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode)
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 29c6912..797ae2e 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -78,7 +78,6 @@
 
 /* ACPI PCI Interrupt Link (pci_link.c) */
 
-int acpi_irq_penalty_init(void);
 int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
 			       int *polarity, char **name);
 int acpi_pci_link_free_irq(acpi_handle handle);
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index d1e34d1..562603d 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -96,7 +96,7 @@
 #ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_predefined_override
 acpi_status
 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
-			    char **new_val);
+			    acpi_string *new_val);
 #endif
 
 #ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_table_override
@@ -108,7 +108,7 @@
 #ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_physical_table_override
 acpi_status
 acpi_os_physical_table_override(struct acpi_table_header *existing_table,
-				acpi_physical_address * new_address,
+				acpi_physical_address *new_address,
 				u32 *new_table_length);
 #endif
 
@@ -203,7 +203,7 @@
 #ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_physical_address
 acpi_status
 acpi_os_get_physical_address(void *logical_address,
-			     acpi_physical_address * physical_address);
+			     acpi_physical_address *physical_address);
 #endif
 
 /*
@@ -379,14 +379,14 @@
 acpi_os_get_table_by_name(char *signature,
 			  u32 instance,
 			  struct acpi_table_header **table,
-			  acpi_physical_address * address);
+			  acpi_physical_address *address);
 #endif
 
 #ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_index
 acpi_status
 acpi_os_get_table_by_index(u32 index,
 			   struct acpi_table_header **table,
-			   u32 *instance, acpi_physical_address * address);
+			   u32 *instance, acpi_physical_address *address);
 #endif
 
 #ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_address
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 1755697..4e4c214 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20160108
+#define ACPI_CA_VERSION                 0x20160422
 
 #include <acpi/acconfig.h>
 #include <acpi/actypes.h>
@@ -192,7 +192,7 @@
 /*
  * Optionally support group module level code.
  */
-ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, TRUE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, FALSE);
 
 /*
  * Optionally use 32-bit FADT addresses if and when there is a conflict
@@ -484,8 +484,8 @@
 ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_reallocate_root_table(void))
 
 ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init
-			    acpi_find_root_pointer(acpi_physical_address *
-						   rsdp_address))
+			    acpi_find_root_pointer(acpi_physical_address
+						   *rsdp_address))
 ACPI_EXTERNAL_RETURN_STATUS(acpi_status
 			     acpi_get_table_header(acpi_string signature,
 						   u32 instance,
@@ -530,7 +530,7 @@
 ACPI_EXTERNAL_RETURN_STATUS(acpi_status
 			     acpi_get_handle(acpi_handle parent,
 					     acpi_string pathname,
-					     acpi_handle * ret_handle))
+					     acpi_handle *ret_handle))
 ACPI_EXTERNAL_RETURN_STATUS(acpi_status
 			     acpi_attach_data(acpi_handle object,
 					      acpi_object_handler handler,
@@ -575,15 +575,15 @@
 			    acpi_get_next_object(acpi_object_type type,
 						 acpi_handle parent,
 						 acpi_handle child,
-						 acpi_handle * out_handle))
+						 acpi_handle *out_handle))
 
 ACPI_EXTERNAL_RETURN_STATUS(acpi_status
 			    acpi_get_type(acpi_handle object,
-					  acpi_object_type * out_type))
+					  acpi_object_type *out_type))
 
 ACPI_EXTERNAL_RETURN_STATUS(acpi_status
 			    acpi_get_parent(acpi_handle object,
-					    acpi_handle * out_handle))
+					    acpi_handle *out_handle))
 
 /*
  * Handler interfaces
@@ -755,7 +755,7 @@
 
 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
 				acpi_get_gpe_device(u32 gpe_index,
-						    acpi_handle * gpe_device))
+						    acpi_handle *gpe_device))
 
 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
 				acpi_install_gpe_block(acpi_handle gpe_device,
@@ -771,8 +771,8 @@
  * Resource interfaces
  */
 typedef
-acpi_status(*acpi_walk_resource_callback) (struct acpi_resource * resource,
-					   void *context);
+acpi_status (*acpi_walk_resource_callback) (struct acpi_resource * resource,
+					    void *context);
 
 ACPI_EXTERNAL_RETURN_STATUS(acpi_status
 			    acpi_get_vendor_resource(acpi_handle device,
@@ -938,7 +938,8 @@
 ACPI_APP_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1)
 				void ACPI_INTERNAL_VAR_XFACE
 				acpi_log_error(const char *format, ...))
- acpi_status acpi_initialize_debugger(void);
+
+acpi_status acpi_initialize_debugger(void);
 
 void acpi_terminate_debugger(void);
 
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
index cf2acb8..16c1892 100644
--- a/include/acpi/acrestyp.h
+++ b/include/acpi/acrestyp.h
@@ -417,6 +417,7 @@
 	u8                                      type; \
 	u8                                      producer_consumer;   /* For values, see Producer/Consumer above */\
 	u8                                      slave_mode; \
+	u8                                      connection_sharing; \
 	u8                                      type_revision_id; \
 	u16                                     type_data_length; \
 	u16                                     vendor_length; \
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 0cb1a00..c19700e 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -223,7 +223,7 @@
 /*******************************************************************************
  *
  * FADT - Fixed ACPI Description Table (Signature "FACP")
- *        Version 4
+ *        Version 6
  *
  ******************************************************************************/
 
@@ -413,4 +413,6 @@
 #define ACPI_FADT_V5_SIZE       (u32) (ACPI_FADT_OFFSET (hypervisor_id))
 #define ACPI_FADT_V6_SIZE       (u32) (sizeof (struct acpi_table_fadt))
 
+#define ACPI_FADT_CONFORMANCE   "ACPI 6.1 (FADT version 6)"
+
 #endif				/* __ACTBL_H__ */
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 16e0136..796d6ba 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -236,7 +236,8 @@
 	ACPI_EINJ_CHECK_BUSY_STATUS = 6,
 	ACPI_EINJ_GET_COMMAND_STATUS = 7,
 	ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS = 8,
-	ACPI_EINJ_ACTION_RESERVED = 9,	/* 9 and greater are reserved */
+	ACPI_EINJ_GET_EXECUTE_TIMINGS = 9,
+	ACPI_EINJ_ACTION_RESERVED = 10,	/* 10 and greater are reserved */
 	ACPI_EINJ_TRIGGER_ERROR = 0xFF	/* Except for this value */
 };
 
@@ -348,7 +349,8 @@
 	ACPI_ERST_GET_ERROR_RANGE = 13,
 	ACPI_ERST_GET_ERROR_LENGTH = 14,
 	ACPI_ERST_GET_ERROR_ATTRIBUTES = 15,
-	ACPI_ERST_ACTION_RESERVED = 16	/* 16 and greater are reserved */
+	ACPI_ERST_EXECUTE_TIMINGS = 16,
+	ACPI_ERST_ACTION_RESERVED = 17	/* 17 and greater are reserved */
 };
 
 /* Values for Instruction field above */
@@ -427,7 +429,8 @@
 	ACPI_HEST_TYPE_AER_ENDPOINT = 7,
 	ACPI_HEST_TYPE_AER_BRIDGE = 8,
 	ACPI_HEST_TYPE_GENERIC_ERROR = 9,
-	ACPI_HEST_TYPE_RESERVED = 10	/* 10 and greater are reserved */
+	ACPI_HEST_TYPE_GENERIC_ERROR_V2 = 10,
+	ACPI_HEST_TYPE_RESERVED = 11	/* 11 and greater are reserved */
 };
 
 /*
@@ -506,7 +509,11 @@
 	ACPI_HEST_NOTIFY_NMI = 4,
 	ACPI_HEST_NOTIFY_CMCI = 5,	/* ACPI 5.0 */
 	ACPI_HEST_NOTIFY_MCE = 6,	/* ACPI 5.0 */
-	ACPI_HEST_NOTIFY_RESERVED = 7	/* 7 and greater are reserved */
+	ACPI_HEST_NOTIFY_GPIO = 7,	/* ACPI 6.0 */
+	ACPI_HEST_NOTIFY_SEA = 8,	/* ACPI 6.1 */
+	ACPI_HEST_NOTIFY_SEI = 9,	/* ACPI 6.1 */
+	ACPI_HEST_NOTIFY_GSIV = 10,	/* ACPI 6.1 */
+	ACPI_HEST_NOTIFY_RESERVED = 11	/* 11 and greater are reserved */
 };
 
 /* Values for config_write_enable bitfield above */
@@ -603,6 +610,24 @@
 	u32 error_block_length;
 };
 
+/* 10: Generic Hardware Error Source, version 2 */
+
+struct acpi_hest_generic_v2 {
+	struct acpi_hest_header header;
+	u16 related_source_id;
+	u8 reserved;
+	u8 enabled;
+	u32 records_to_preallocate;
+	u32 max_sections_per_record;
+	u32 max_raw_data_length;
+	struct acpi_generic_address error_status_address;
+	struct acpi_hest_notify notify;
+	u32 error_block_length;
+	struct acpi_generic_address read_ack_register;
+	u64 read_ack_preserve;
+	u64 read_ack_write;
+};
+
 /* Generic Error Status block */
 
 struct acpi_hest_generic_status {
@@ -634,6 +659,33 @@
 	u8 fru_text[20];
 };
 
+/* Extension for revision 0x0300 */
+
+struct acpi_hest_generic_data_v300 {
+	u8 section_type[16];
+	u32 error_severity;
+	u16 revision;
+	u8 validation_bits;
+	u8 flags;
+	u32 error_data_length;
+	u8 fru_id[16];
+	u8 fru_text[20];
+	u64 time_stamp;
+};
+
+/* Values for error_severity above */
+
+#define ACPI_HEST_GEN_ERROR_RECOVERABLE     0
+#define ACPI_HEST_GEN_ERROR_FATAL           1
+#define ACPI_HEST_GEN_ERROR_CORRECTED       2
+#define ACPI_HEST_GEN_ERROR_NONE            3
+
+/* Flags for validation_bits above */
+
+#define ACPI_HEST_GEN_VALID_FRU_ID          (1)
+#define ACPI_HEST_GEN_VALID_FRU_STRING      (1<<1)
+#define ACPI_HEST_GEN_VALID_TIMESTAMP       (1<<2)
+
 /*******************************************************************************
  *
  * MADT - Multiple APIC Description Table
@@ -934,7 +986,7 @@
 
 /*******************************************************************************
  *
- * NFIT - NVDIMM Interface Table (ACPI 6.0)
+ * NFIT - NVDIMM Interface Table (ACPI 6.0+)
  *        Version 1
  *
  ******************************************************************************/
@@ -1015,6 +1067,7 @@
 #define ACPI_NFIT_MEM_NOT_ARMED         (1<<3)	/* 03: Memory Device is not armed */
 #define ACPI_NFIT_MEM_HEALTH_OBSERVED   (1<<4)	/* 04: Memory Device observed SMART/health events */
 #define ACPI_NFIT_MEM_HEALTH_ENABLED    (1<<5)	/* 05: SMART/health events enabled */
+#define ACPI_NFIT_MEM_MAP_FAILED        (1<<6)	/* 06: Mapping to SPA failed */
 
 /* 2: Interleave Structure */
 
@@ -1046,7 +1099,10 @@
 	u16 subsystem_vendor_id;
 	u16 subsystem_device_id;
 	u16 subsystem_revision_id;
-	u8 reserved[6];		/* Reserved, must be zero */
+	u8 valid_fields;
+	u8 manufacturing_location;
+	u16 manufacturing_date;
+	u8 reserved[2];		/* Reserved, must be zero */
 	u32 serial_number;
 	u16 code;
 	u16 windows;
@@ -1061,7 +1117,11 @@
 
 /* Flags */
 
-#define ACPI_NFIT_CONTROL_BUFFERED      (1)	/* Block Data Windows implementation is buffered */
+#define ACPI_NFIT_CONTROL_BUFFERED          (1)	/* Block Data Windows implementation is buffered */
+
+/* valid_fields bits */
+
+#define ACPI_NFIT_CONTROL_MFG_INFO_VALID    (1)	/* Manufacturing fields are valid */
 
 /* 5: NVDIMM Block Data Window Region Structure */
 
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index a4ef625..c93dbad 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -321,7 +321,7 @@
  * DBG2 - Debug Port Table 2
  *        Version 0 (Both main table and subtables)
  *
- * Conforms to "Microsoft Debug Port Table 2 (DBG2)", May 22 2012.
+ * Conforms to "Microsoft Debug Port Table 2 (DBG2)", December 10, 2015
  *
  ******************************************************************************/
 
@@ -371,6 +371,11 @@
 
 #define ACPI_DBG2_16550_COMPATIBLE  0x0000
 #define ACPI_DBG2_16550_SUBSET      0x0001
+#define ACPI_DBG2_ARM_PL011         0x0003
+#define ACPI_DBG2_ARM_SBSA_32BIT    0x000D
+#define ACPI_DBG2_ARM_SBSA_GENERIC  0x000E
+#define ACPI_DBG2_ARM_DCC           0x000F
+#define ACPI_DBG2_BCM2835           0x0010
 
 #define ACPI_DBG2_1394_STANDARD     0x0000
 
@@ -399,7 +404,7 @@
  *        Version 1
  *
  * Conforms to "Intel Virtualization Technology for Directed I/O",
- * Version 2.2, Sept. 2013
+ * Version 2.3, October 2014
  *
  ******************************************************************************/
 
@@ -413,6 +418,8 @@
 /* Masks for Flags field above */
 
 #define ACPI_DMAR_INTR_REMAP        (1)
+#define ACPI_DMAR_X2APIC_OPT_OUT    (1<<1)
+#define ACPI_DMAR_X2APIC_MODE       (1<<2)
 
 /* DMAR subtable header */
 
@@ -655,7 +662,7 @@
  * IORT - IO Remapping Table
  *
  * Conforms to "IO Remapping Table System Software on ARM Platforms",
- * Document number: ARM DEN 0049A, 2015
+ * Document number: ARM DEN 0049B, October 2015
  *
  ******************************************************************************/
 
@@ -685,7 +692,8 @@
 	ACPI_IORT_NODE_ITS_GROUP = 0x00,
 	ACPI_IORT_NODE_NAMED_COMPONENT = 0x01,
 	ACPI_IORT_NODE_PCI_ROOT_COMPLEX = 0x02,
-	ACPI_IORT_NODE_SMMU = 0x03
+	ACPI_IORT_NODE_SMMU = 0x03,
+	ACPI_IORT_NODE_SMMU_V3 = 0x04
 };
 
 struct acpi_iort_id_mapping {
@@ -775,6 +783,23 @@
 #define ACPI_IORT_SMMU_DVM_SUPPORTED    (1)
 #define ACPI_IORT_SMMU_COHERENT_WALK    (1<<1)
 
+struct acpi_iort_smmu_v3 {
+	u64 base_address;	/* SMMUv3 base address */
+	u32 flags;
+	u32 reserved;
+	u64 vatos_address;
+	u32 model;		/* O: generic SMMUv3 */
+	u32 event_gsiv;
+	u32 pri_gsiv;
+	u32 gerr_gsiv;
+	u32 sync_gsiv;
+};
+
+/* Masks for Flags field above */
+
+#define ACPI_IORT_SMMU_V3_COHACC_OVERRIDE   (1)
+#define ACPI_IORT_SMMU_V3_HTTU_OVERRIDE     (1<<1)
+
 /*******************************************************************************
  *
  * IVRS - I/O Virtualization Reporting Structure
@@ -1102,10 +1127,10 @@
 /*******************************************************************************
  *
  * SPCR - Serial Port Console Redirection table
- *        Version 1
+ *        Version 2
  *
  * Conforms to "Serial Port Console Redirection Table",
- * Version 1.00, January 11, 2002
+ * Version 1.03, August 10, 2015
  *
  ******************************************************************************/
 
@@ -1137,6 +1162,8 @@
 
 #define ACPI_SPCR_DO_NOT_DISABLE    (1)
 
+/* Values for Interface Type: See the definition of the DBG2 table */
+
 /*******************************************************************************
  *
  * SPMI - Server Platform Management Interface table
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index ddf5e66..ebc1f4f 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -184,7 +184,7 @@
 	struct acpi_table_header header;	/* Common ACPI table header */
 };
 
-/* FPDT subtable header */
+/* FPDT subtable header (Performance Record Structure) */
 
 struct acpi_fpdt_header {
 	u16 type;
@@ -205,6 +205,57 @@
 
 /* 0: Firmware Basic Boot Performance Record */
 
+struct acpi_fpdt_boot_pointer {
+	struct acpi_fpdt_header header;
+	u8 reserved[4];
+	u64 address;
+};
+
+/* 1: S3 Performance Table Pointer Record */
+
+struct acpi_fpdt_s3pt_pointer {
+	struct acpi_fpdt_header header;
+	u8 reserved[4];
+	u64 address;
+};
+
+/*
+ * S3PT - S3 Performance Table. This table is pointed to by the
+ * S3 Pointer Record above.
+ */
+struct acpi_table_s3pt {
+	u8 signature[4];	/* "S3PT" */
+	u32 length;
+};
+
+/*
+ * S3PT Subtables (Not part of the actual FPDT)
+ */
+
+/* Values for Type field in S3PT header */
+
+enum acpi_s3pt_type {
+	ACPI_S3PT_TYPE_RESUME = 0,
+	ACPI_S3PT_TYPE_SUSPEND = 1,
+	ACPI_FPDT_BOOT_PERFORMANCE = 2
+};
+
+struct acpi_s3pt_resume {
+	struct acpi_fpdt_header header;
+	u32 resume_count;
+	u64 full_resume;
+	u64 average_resume;
+};
+
+struct acpi_s3pt_suspend {
+	struct acpi_fpdt_header header;
+	u64 suspend_start;
+	u64 suspend_end;
+};
+
+/*
+ * FPDT Boot Performance Record (Not part of the actual FPDT)
+ */
 struct acpi_fpdt_boot {
 	struct acpi_fpdt_header header;
 	u8 reserved[4];
@@ -215,52 +266,6 @@
 	u64 exit_services_exit;
 };
 
-/* 1: S3 Performance Table Pointer Record */
-
-struct acpi_fpdt_s3pt_ptr {
-	struct acpi_fpdt_header header;
-	u8 reserved[4];
-	u64 address;
-};
-
-/*
- * S3PT - S3 Performance Table. This table is pointed to by the
- * FPDT S3 Pointer Record above.
- */
-struct acpi_table_s3pt {
-	u8 signature[4];	/* "S3PT" */
-	u32 length;
-};
-
-/*
- * S3PT Subtables
- */
-struct acpi_s3pt_header {
-	u16 type;
-	u8 length;
-	u8 revision;
-};
-
-/* Values for Type field above */
-
-enum acpi_s3pt_type {
-	ACPI_S3PT_TYPE_RESUME = 0,
-	ACPI_S3PT_TYPE_SUSPEND = 1
-};
-
-struct acpi_s3pt_resume {
-	struct acpi_s3pt_header header;
-	u32 resume_count;
-	u64 full_resume;
-	u64 average_resume;
-};
-
-struct acpi_s3pt_suspend {
-	struct acpi_s3pt_header header;
-	u64 suspend_start;
-	u64 suspend_end;
-};
-
 /*******************************************************************************
  *
  * GTDT - Generic Timer Description Table (ACPI 5.1)
@@ -476,7 +481,8 @@
 enum acpi_pcct_type {
 	ACPI_PCCT_TYPE_GENERIC_SUBSPACE = 0,
 	ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE = 1,
-	ACPI_PCCT_TYPE_RESERVED = 2	/* 2 and greater are reserved */
+	ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2 = 2,	/* ACPI 6.1 */
+	ACPI_PCCT_TYPE_RESERVED = 3	/* 3 and greater are reserved */
 };
 
 /*
@@ -515,6 +521,26 @@
 	u16 min_turnaround_time;
 };
 
+/* 2: HW-reduced Communications Subspace Type 2 (ACPI 6.1) */
+
+struct acpi_pcct_hw_reduced_type2 {
+	struct acpi_subtable_header header;
+	u32 doorbell_interrupt;
+	u8 flags;
+	u8 reserved;
+	u64 base_address;
+	u64 length;
+	struct acpi_generic_address doorbell_register;
+	u64 preserve_mask;
+	u64 write_mask;
+	u32 latency;
+	u32 max_access_rate;
+	u16 min_turnaround_time;
+	struct acpi_generic_address doorbell_ack_register;
+	u64 ack_preserve_mask;
+	u64 ack_write_mask;
+};
+
 /* Values for doorbell flags above */
 
 #define ACPI_PCCT_INTERRUPT_POLARITY    (1)
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index db46546..cb389ef 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -630,7 +630,8 @@
 #define ACPI_NOTIFY_SHUTDOWN_REQUEST    (u8) 0x0C
 #define ACPI_NOTIFY_AFFINITY_UPDATE     (u8) 0x0D
 
-#define ACPI_NOTIFY_MAX                 0x0D
+#define ACPI_GENERIC_NOTIFY_MAX         0x0D
+#define ACPI_SPECIFIC_NOTIFY_MAX        0x84
 
 /*
  * Types associated with ACPI names and objects. The first group of
@@ -892,7 +893,7 @@
 
 /* Sleep function dispatch */
 
-typedef acpi_status(*acpi_sleep_function) (u8 sleep_state);
+typedef acpi_status (*acpi_sleep_function) (u8 sleep_state);
 
 struct acpi_sleep_functions {
 	acpi_sleep_function legacy_function;
@@ -994,7 +995,7 @@
  * Predefined Namespace items
  */
 struct acpi_predefined_names {
-	char *name;
+	const char *name;
 	u8 type;
 	char *val;
 };
@@ -1071,20 +1072,21 @@
 typedef
 void (*acpi_object_handler) (acpi_handle object, void *data);
 
-typedef acpi_status(*acpi_init_handler) (acpi_handle object, u32 function);
+typedef
+acpi_status (*acpi_init_handler) (acpi_handle object, u32 function);
 
 #define ACPI_INIT_DEVICE_INI        1
 
 typedef
-acpi_status(*acpi_exception_handler) (acpi_status aml_status,
-				      acpi_name name,
-				      u16 opcode,
-				      u32 aml_offset, void *context);
+acpi_status (*acpi_exception_handler) (acpi_status aml_status,
+				       acpi_name name,
+				       u16 opcode,
+				       u32 aml_offset, void *context);
 
 /* Table Event handler (Load, load_table, etc.) and types */
 
 typedef
-acpi_status(*acpi_table_handler) (u32 event, void *table, void *context);
+acpi_status (*acpi_table_handler) (u32 event, void *table, void *context);
 
 #define ACPI_TABLE_LOAD             0x0
 #define ACPI_TABLE_UNLOAD           0x1
@@ -1093,12 +1095,12 @@
 /* Address Spaces (For Operation Regions) */
 
 typedef
-acpi_status(*acpi_adr_space_handler) (u32 function,
-				      acpi_physical_address address,
-				      u32 bit_width,
-				      u64 *value,
-				      void *handler_context,
-				      void *region_context);
+acpi_status (*acpi_adr_space_handler) (u32 function,
+				       acpi_physical_address address,
+				       u32 bit_width,
+				       u64 *value,
+				       void *handler_context,
+				       void *region_context);
 
 #define ACPI_DEFAULT_HANDLER            NULL
 
@@ -1111,18 +1113,18 @@
 };
 
 typedef
-acpi_status(*acpi_adr_space_setup) (acpi_handle region_handle,
-				    u32 function,
-				    void *handler_context,
-				    void **region_context);
+acpi_status (*acpi_adr_space_setup) (acpi_handle region_handle,
+				     u32 function,
+				     void *handler_context,
+				     void **region_context);
 
 #define ACPI_REGION_ACTIVATE    0
 #define ACPI_REGION_DEACTIVATE  1
 
 typedef
-acpi_status(*acpi_walk_callback) (acpi_handle object,
-				  u32 nesting_level,
-				  void *context, void **return_value);
+acpi_status (*acpi_walk_callback) (acpi_handle object,
+				   u32 nesting_level,
+				   void *context, void **return_value);
 
 typedef
 u32 (*acpi_interface_handler) (acpi_string interface_name, u32 supported);
@@ -1227,7 +1229,7 @@
  * struct acpi_memory_list is used only if the ACPICA local cache is enabled
  */
 struct acpi_memory_list {
-	char *list_name;
+	const char *list_name;
 	void *list_head;
 	u16 object_size;
 	u16 max_depth;
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 7c0595b..86b5a84 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -66,17 +66,28 @@
  *
  *****************************************************************************/
 
+/* Common application configuration. All single threaded except for acpi_exec. */
+
+#if (defined ACPI_ASL_COMPILER) || \
+	(defined ACPI_BIN_APP)      || \
+	(defined ACPI_DUMP_APP)     || \
+	(defined ACPI_HELP_APP)     || \
+	(defined ACPI_NAMES_APP)    || \
+	(defined ACPI_SRC_APP)      || \
+	(defined ACPI_XTRACT_APP)   || \
+	(defined ACPI_EXAMPLE_APP)
+#define ACPI_APPLICATION
+#define ACPI_SINGLE_THREADED
+#endif
+
 /* iASL configuration */
 
 #ifdef ACPI_ASL_COMPILER
-#define ACPI_APPLICATION
 #define ACPI_DEBUG_OUTPUT
 #define ACPI_CONSTANT_EVAL_ONLY
 #define ACPI_LARGE_NAMESPACE_NODE
 #define ACPI_DATA_TABLE_DISASSEMBLY
-#define ACPI_SINGLE_THREADED
 #define ACPI_32BIT_PHYSICAL_ADDRESS
-
 #define ACPI_DISASSEMBLER 1
 #endif
 
@@ -89,21 +100,6 @@
 #define ACPI_DBG_TRACK_ALLOCATIONS
 #endif
 
-/*
- * acpi_bin/acpi_dump/acpi_help/acpi_names/acpi_src/acpi_xtract/Example
- * configuration. All single threaded.
- */
-#if (defined ACPI_BIN_APP)      || \
-	(defined ACPI_DUMP_APP)     || \
-	(defined ACPI_HELP_APP)     || \
-	(defined ACPI_NAMES_APP)    || \
-	(defined ACPI_SRC_APP)      || \
-	(defined ACPI_XTRACT_APP)   || \
-	(defined ACPI_EXAMPLE_APP)
-#define ACPI_APPLICATION
-#define ACPI_SINGLE_THREADED
-#endif
-
 /* acpi_help configuration. Error messages disabled. */
 
 #ifdef ACPI_HELP_APP
@@ -138,11 +134,16 @@
 #define ACPI_REDUCED_HARDWARE 1
 #endif
 
-/* Linkable ACPICA library */
+/* Linkable ACPICA library. Two versions, one with full debug. */
 
 #ifdef ACPI_LIBRARY
 #define ACPI_USE_LOCAL_CACHE
-#define ACPI_FULL_DEBUG
+#define ACPI_DEBUGGER 1
+#define ACPI_DISASSEMBLER 1
+
+#ifdef _DEBUG
+#define ACPI_DEBUG_OUTPUT
+#endif
 #endif
 
 /* Common for all ACPICA applications */
@@ -218,6 +219,9 @@
 #elif defined(__HAIKU__)
 #include "achaiku.h"
 
+#elif defined(__QNX__)
+#include "acqnx.h"
+
 #else
 
 /* Unknown environment */
diff --git a/include/acpi/platform/acmsvcex.h b/include/acpi/platform/acmsvcex.h
deleted file mode 100644
index 28084a1..0000000
--- a/include/acpi/platform/acmsvcex.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/******************************************************************************
- *
- * Name: acmsvcex.h - Extra VC specific defines, etc.
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2016, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#ifndef __ACMSVCEX_H__
-#define __ACMSVCEX_H__
-
-/* Debug support. */
-
-#ifdef _DEBUG
-#define _CRTDBG_MAP_ALLOC	/* Enables specific file/lineno for leaks */
-#include <crtdbg.h>
-#endif
-
-#endif				/* __ACMSVCEX_H__ */
diff --git a/include/acpi/platform/acwinex.h b/include/acpi/platform/acwinex.h
deleted file mode 100644
index a00b3e4..0000000
--- a/include/acpi/platform/acwinex.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/******************************************************************************
- *
- * Name: acwinex.h - Extra OS specific defines, etc.
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2016, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- *    substantially similar to the "NO WARRANTY" disclaimer below
- *    ("Disclaimer") and any redistribution must be conditioned upon
- *    including a substantially similar Disclaimer requirement for further
- *    binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- *    of any contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#ifndef __ACWINEX_H__
-#define __ACWINEX_H__
-
-/* Windows uses VC */
-
-#endif				/* __ACWINEX_H__ */
diff --git a/include/acpi/video.h b/include/acpi/video.h
index 5ca2f2c..70a41f7 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -4,6 +4,19 @@
 #include <linux/errno.h> /* for ENODEV */
 #include <linux/types.h> /* for bool */
 
+struct acpi_video_brightness_flags {
+	u8 _BCL_no_ac_battery_levels:1;	/* no AC/Battery levels in _BCL */
+	u8 _BCL_reversed:1;		/* _BCL package is in a reversed order */
+	u8 _BQC_use_index:1;		/* _BQC returns an index value */
+};
+
+struct acpi_video_device_brightness {
+	int curr;
+	int count;
+	int *levels;
+	struct acpi_video_brightness_flags flags;
+};
+
 struct acpi_device;
 
 #define ACPI_VIDEO_CLASS	"video"
@@ -37,6 +50,8 @@
  * may change over time and should not be cached.
  */
 extern bool acpi_video_handles_brightness_key_presses(void);
+extern int acpi_video_get_levels(struct acpi_device *device,
+				 struct acpi_video_device_brightness **dev_br);
 #else
 static inline int acpi_video_register(void) { return 0; }
 static inline void acpi_video_unregister(void) { return; }
@@ -56,6 +71,11 @@
 {
 	return false;
 }
+static inline int acpi_video_get_levels(struct acpi_device *device,
+			struct acpi_video_device_brightness **dev_br)
+{
+	return -ENODEV;
+}
 #endif
 
 #endif
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index e56272c..bf2d34c 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -108,11 +108,15 @@
 	u32 val;
 
 	preempt_disable();
-	if (unlikely(get_user(val, uaddr) != 0))
+	if (unlikely(get_user(val, uaddr) != 0)) {
+		preempt_enable();
 		return -EFAULT;
+	}
 
-	if (val == oldval && unlikely(put_user(newval, uaddr) != 0))
+	if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
+		preempt_enable();
 		return -EFAULT;
+	}
 
 	*uval = val;
 	preempt_enable();
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index eed3bbe..002b81f 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -191,7 +191,7 @@
 #define readl_relaxed readl
 #endif
 
-#ifndef readq_relaxed
+#if defined(readq) && !defined(readq_relaxed)
 #define readq_relaxed readq
 #endif
 
@@ -207,7 +207,7 @@
 #define writel_relaxed writel
 #endif
 
-#ifndef writeq_relaxed
+#if defined(writeq) && !defined(writeq_relaxed)
 #define writeq_relaxed writeq
 #endif
 
diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h
index d6d5dc9..3fc94a0 100644
--- a/include/asm-generic/rwsem.h
+++ b/include/asm-generic/rwsem.h
@@ -53,7 +53,7 @@
 /*
  * lock for writing
  */
-static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+static inline void __down_write(struct rw_semaphore *sem)
 {
 	long tmp;
 
@@ -63,9 +63,16 @@
 		rwsem_down_write_failed(sem);
 }
 
-static inline void __down_write(struct rw_semaphore *sem)
+static inline int __down_write_killable(struct rw_semaphore *sem)
 {
-	__down_write_nested(sem, 0);
+	long tmp;
+
+	tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
+				     (atomic_long_t *)&sem->count);
+	if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
+		if (IS_ERR(rwsem_down_write_failed_killable(sem)))
+			return -EINTR;
+	return 0;
 }
 
 static inline int __down_write_trylock(struct rw_semaphore *sem)
diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h
index c9ccafa..e74072d 100644
--- a/include/asm-generic/seccomp.h
+++ b/include/asm-generic/seccomp.h
@@ -29,4 +29,18 @@
 #define __NR_seccomp_sigreturn		__NR_rt_sigreturn
 #endif
 
+#ifdef CONFIG_COMPAT
+#ifndef get_compat_mode1_syscalls
+static inline const int *get_compat_mode1_syscalls(void)
+{
+	static const int mode1_syscalls_32[] = {
+		__NR_seccomp_read_32, __NR_seccomp_write_32,
+		__NR_seccomp_exit_32, __NR_seccomp_sigreturn_32,
+		0, /* null terminated */
+	};
+	return mode1_syscalls_32;
+}
+#endif
+#endif /* CONFIG_COMPAT */
+
 #endif /* _ASM_GENERIC_SECCOMP_H */
diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h
index 3d1a3af..a2508a8 100644
--- a/include/asm-generic/siginfo.h
+++ b/include/asm-generic/siginfo.h
@@ -17,21 +17,6 @@
 struct siginfo;
 void do_schedule_next_timer(struct siginfo *info);
 
-#ifndef HAVE_ARCH_COPY_SIGINFO
-
-#include <linux/string.h>
-
-static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
-{
-	if (from->si_code < 0)
-		memcpy(to, from, sizeof(*to));
-	else
-		/* _sigchld is currently the largest know union member */
-		memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
-}
-
-#endif
-
 extern int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
 
 #endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 339125b..6a67ab9 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -245,7 +245,9 @@
 
 #define INIT_TASK_DATA(align)						\
 	. = ALIGN(align);						\
-	*(.data..init_task)
+	VMLINUX_SYMBOL(__start_init_task) = .;				\
+	*(.data..init_task)						\
+	VMLINUX_SYMBOL(__end_init_task) = .;
 
 /*
  * Read only Data
diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h
index 25d0914..caedb74 100644
--- a/include/clocksource/arm_arch_timer.h
+++ b/include/clocksource/arm_arch_timer.h
@@ -49,11 +49,16 @@
 
 #define ARCH_TIMER_EVT_STREAM_FREQ	10000	/* 100us */
 
+struct arch_timer_kvm_info {
+	struct timecounter timecounter;
+	int virtual_irq;
+};
+
 #ifdef CONFIG_ARM_ARCH_TIMER
 
 extern u32 arch_timer_get_rate(void);
 extern u64 (*arch_timer_read_counter)(void);
-extern struct timecounter *arch_timer_get_timecounter(void);
+extern struct arch_timer_kvm_info *arch_timer_get_kvm_info(void);
 
 #else
 
@@ -67,11 +72,6 @@
 	return 0;
 }
 
-static inline struct timecounter *arch_timer_get_timecounter(void)
-{
-	return NULL;
-}
-
 #endif
 
 #endif
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 957bb87..75174f8 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -405,8 +405,7 @@
  * encrypt and decrypt API calls. During the allocation, the provided aead
  * handle is registered in the request data structure.
  *
- * Return: allocated request handle in case of success; IS_ERR() is true in case
- *	   of an error, PTR_ERR() returns the error code.
+ * Return: allocated request handle in case of success, or NULL if out of memory
  */
 static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
 						      gfp_t gfp)
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 1969f14..2660588 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -547,8 +547,7 @@
  * the allocation, the provided ahash handle
  * is registered in the request data structure.
  *
- * Return: allocated request handle in case of success; IS_ERR() is true in case
- *	   of an error, PTR_ERR() returns the error code.
+ * Return: allocated request handle in case of success, or NULL if out of memory
  */
 static inline struct ahash_request *ahash_request_alloc(
 	struct crypto_ahash *tfm, gfp_t gfp)
diff --git a/include/crypto/pkcs7.h b/include/crypto/pkcs7.h
index 441aff9..583f199 100644
--- a/include/crypto/pkcs7.h
+++ b/include/crypto/pkcs7.h
@@ -12,6 +12,7 @@
 #ifndef _CRYPTO_PKCS7_H
 #define _CRYPTO_PKCS7_H
 
+#include <linux/verification.h>
 #include <crypto/public_key.h>
 
 struct key;
@@ -26,14 +27,13 @@
 
 extern int pkcs7_get_content_data(const struct pkcs7_message *pkcs7,
 				  const void **_data, size_t *_datalen,
-				  bool want_wrapper);
+				  size_t *_headerlen);
 
 /*
  * pkcs7_trust.c
  */
 extern int pkcs7_validate_trust(struct pkcs7_message *pkcs7,
-				struct key *trust_keyring,
-				bool *_trusted);
+				struct key *trust_keyring);
 
 /*
  * pkcs7_verify.c
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index aa730ea..882ca0e1 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -15,20 +15,6 @@
 #define _LINUX_PUBLIC_KEY_H
 
 /*
- * The use to which an asymmetric key is being put.
- */
-enum key_being_used_for {
-	VERIFYING_MODULE_SIGNATURE,
-	VERIFYING_FIRMWARE_SIGNATURE,
-	VERIFYING_KEXEC_PE_SIGNATURE,
-	VERIFYING_KEY_SIGNATURE,
-	VERIFYING_KEY_SELF_SIGNATURE,
-	VERIFYING_UNSPECIFIED_SIGNATURE,
-	NR__KEY_BEING_USED_FOR
-};
-extern const char *const key_being_used_for[NR__KEY_BEING_USED_FOR];
-
-/*
  * Cryptographic data for the public-key subtype of the asymmetric key type.
  *
  * Note that this may include private part of the key as well as the public
@@ -41,12 +27,13 @@
 	const char *pkey_algo;
 };
 
-extern void public_key_destroy(void *payload);
+extern void public_key_free(struct public_key *key);
 
 /*
  * Public key cryptography signature data
  */
 struct public_key_signature {
+	struct asymmetric_key_id *auth_ids[2];
 	u8 *s;			/* Signature */
 	u32 s_size;		/* Number of bytes in signature */
 	u8 *digest;
@@ -55,17 +42,21 @@
 	const char *hash_algo;
 };
 
+extern void public_key_signature_free(struct public_key_signature *sig);
+
 extern struct asymmetric_key_subtype public_key_subtype;
+
 struct key;
+struct key_type;
+union key_payload;
+
+extern int restrict_link_by_signature(struct key *trust_keyring,
+				      const struct key_type *type,
+				      const union key_payload *payload);
+
 extern int verify_signature(const struct key *key,
 			    const struct public_key_signature *sig);
 
-struct asymmetric_key_id;
-extern struct key *x509_request_asymmetric_key(struct key *keyring,
-					       const struct asymmetric_key_id *id,
-					       const struct asymmetric_key_id *skid,
-					       bool partial);
-
 int public_key_verify_signature(const struct public_key *pkey,
 				const struct public_key_signature *sig);
 
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 905490c..0f987f5 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -425,8 +425,7 @@
  * encrypt and decrypt API calls. During the allocation, the provided skcipher
  * handle is registered in the request data structure.
  *
- * Return: allocated request handle in case of success; IS_ERR() is true in case
- *	   of an error, PTR_ERR() returns the error code.
+ * Return: allocated request handle in case of success, or NULL if out of memory
  */
 static inline struct skcipher_request *skcipher_request_alloc(
 	struct crypto_skcipher *tfm, gfp_t gfp)
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index 461a055..cebecff 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -39,6 +39,8 @@
 {
 #if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
 	return false;
+#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
+	return false;
 #else
 	return true;
 #endif
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index afae231..055a08d 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -92,7 +92,7 @@
  */
 struct ttm_bus_placement {
 	void		*addr;
-	unsigned long	base;
+	phys_addr_t	base;
 	unsigned long	size;
 	unsigned long	offset;
 	bool		is_iomem;
diff --git a/include/dt-bindings/clock/ath79-clk.h b/include/dt-bindings/clock/ath79-clk.h
new file mode 100644
index 0000000..27359ad
--- /dev/null
+++ b/include/dt-bindings/clock/ath79-clk.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2014, 2016 Antony Pavlov <antonynpavlov@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __DT_BINDINGS_ATH79_CLK_H
+#define __DT_BINDINGS_ATH79_CLK_H
+
+#define ATH79_CLK_CPU		0
+#define ATH79_CLK_DDR		1
+#define ATH79_CLK_AHB		2
+
+#define ATH79_CLK_END		3
+
+#endif /* __DT_BINDINGS_ATH79_CLK_H */
diff --git a/include/dt-bindings/clock/exynos3250.h b/include/dt-bindings/clock/exynos3250.h
index 63d01c1..c796ff0 100644
--- a/include/dt-bindings/clock/exynos3250.h
+++ b/include/dt-bindings/clock/exynos3250.h
@@ -79,6 +79,8 @@
 #define CLK_MOUT_CORE			58
 #define CLK_MOUT_APLL			59
 #define CLK_MOUT_ACLK_266_SUB		60
+#define CLK_MOUT_UART2			61
+#define CLK_MOUT_MMC2			62
 
 /* Dividers */
 #define CLK_DIV_GPL			64
@@ -127,6 +129,9 @@
 #define CLK_DIV_CORE			107
 #define CLK_DIV_HPM			108
 #define CLK_DIV_COPY			109
+#define CLK_DIV_UART2			110
+#define CLK_DIV_MMC2_PRE		111
+#define CLK_DIV_MMC2			112
 
 /* Gates */
 #define CLK_ASYNC_G3D			128
@@ -223,6 +228,8 @@
 #define CLK_BLOCK_MFC			219
 #define CLK_BLOCK_CAM			220
 #define CLK_SMIES			221
+#define CLK_UART2			222
+#define CLK_SDMMC2			223
 
 /* Special clocks */
 #define CLK_SCLK_JPEG			224
@@ -249,12 +256,14 @@
 #define CLK_SCLK_SPI0			245
 #define CLK_SCLK_UART1			246
 #define CLK_SCLK_UART0			247
+#define CLK_SCLK_UART2			248
+#define CLK_SCLK_MMC2			249
 
 /*
  * Total number of clocks of main CMU.
  * NOTE: Must be equal to last clock ID increased by one.
  */
-#define CLK_NR_CLKS			248
+#define CLK_NR_CLKS			250
 
 /*
  * CMU DMC
diff --git a/include/dt-bindings/clock/microchip,pic32-clock.h b/include/dt-bindings/clock/microchip,pic32-clock.h
new file mode 100644
index 0000000..184647a6
--- /dev/null
+++ b/include/dt-bindings/clock/microchip,pic32-clock.h
@@ -0,0 +1,42 @@
+/*
+ * Purna Chandra Mandal,<purna.mandal@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc.  All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MICROCHIP_PIC32_H_
+#define _DT_BINDINGS_CLK_MICROCHIP_PIC32_H_
+
+/* clock output indices */
+#define POSCCLK		0
+#define FRCCLK		1
+#define BFRCCLK		2
+#define LPRCCLK		3
+#define SOSCCLK		4
+#define FRCDIVCLK	5
+#define PLLCLK		6
+#define SCLK		7
+#define PB1CLK		8
+#define PB2CLK		9
+#define PB3CLK		10
+#define PB4CLK		11
+#define PB5CLK		12
+#define PB6CLK		13
+#define PB7CLK		14
+#define REF1CLK		15
+#define REF2CLK		16
+#define REF3CLK		17
+#define REF4CLK		18
+#define REF5CLK		19
+#define UPLLCLK		20
+#define MAXCLKS		21
+
+#endif	/* _DT_BINDINGS_CLK_MICROCHIP_PIC32_H_ */
diff --git a/include/dt-bindings/clock/r8a7790-clock.h b/include/dt-bindings/clock/r8a7790-clock.h
index 7b1ad89..fa5e8da 100644
--- a/include/dt-bindings/clock/r8a7790-clock.h
+++ b/include/dt-bindings/clock/r8a7790-clock.h
@@ -66,6 +66,7 @@
 #define R8A7790_CLK_IIC2		0
 #define R8A7790_CLK_TPU0		4
 #define R8A7790_CLK_MMCIF1		5
+#define R8A7790_CLK_SCIF2		10
 #define R8A7790_CLK_SDHI3		11
 #define R8A7790_CLK_SDHI2		12
 #define R8A7790_CLK_SDHI1		13
diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h
index f843de6..4d3ecd6 100644
--- a/include/dt-bindings/clock/r8a7794-clock.h
+++ b/include/dt-bindings/clock/r8a7794-clock.h
@@ -21,6 +21,7 @@
 #define R8A7794_CLK_SDH			6
 #define R8A7794_CLK_SD0			7
 #define R8A7794_CLK_Z			8
+#define R8A7794_CLK_RCAN		9
 
 /* MSTP0 */
 #define R8A7794_CLK_MSIOF0		0
@@ -56,6 +57,8 @@
 #define R8A7794_CLK_SDHI1		12
 #define R8A7794_CLK_SDHI0		14
 #define R8A7794_CLK_MMCIF0		15
+#define R8A7794_CLK_IIC0		18
+#define R8A7794_CLK_IIC1		23
 #define R8A7794_CLK_CMT1		29
 #define R8A7794_CLK_USBDMAC0		30
 #define R8A7794_CLK_USBDMAC1		31
@@ -95,6 +98,8 @@
 #define R8A7794_CLK_GPIO2		10
 #define R8A7794_CLK_GPIO1		11
 #define R8A7794_CLK_GPIO0		12
+#define R8A7794_CLK_RCAN1		15
+#define R8A7794_CLK_RCAN0		16
 #define R8A7794_CLK_QSPI_MOD		17
 #define R8A7794_CLK_I2C5		25
 #define R8A7794_CLK_I2C4		27
diff --git a/include/dt-bindings/clock/rk3399-cru.h b/include/dt-bindings/clock/rk3399-cru.h
new file mode 100644
index 0000000..50a44cf
--- /dev/null
+++ b/include/dt-bindings/clock/rk3399-cru.h
@@ -0,0 +1,755 @@
+/*
+ * Copyright (c) 2016 Rockchip Electronics Co. Ltd.
+ * Author: Xing Zheng <zhengxing@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3399_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3399_H
+
+/* core clocks */
+#define PLL_APLLL			1
+#define PLL_APLLB			2
+#define PLL_DPLL			3
+#define PLL_CPLL			4
+#define PLL_GPLL			5
+#define PLL_NPLL			6
+#define PLL_VPLL			7
+#define ARMCLKL				8
+#define ARMCLKB				9
+
+/* sclk gates (special clocks) */
+#define SCLK_I2C1			65
+#define SCLK_I2C2			66
+#define SCLK_I2C3			67
+#define SCLK_I2C5			68
+#define SCLK_I2C6			69
+#define SCLK_I2C7			70
+#define SCLK_SPI0			71
+#define SCLK_SPI1			72
+#define SCLK_SPI2			73
+#define SCLK_SPI4			74
+#define SCLK_SPI5			75
+#define SCLK_SDMMC			76
+#define SCLK_SDIO			77
+#define SCLK_EMMC			78
+#define SCLK_TSADC			79
+#define SCLK_SARADC			80
+#define SCLK_UART0			81
+#define SCLK_UART1			82
+#define SCLK_UART2			83
+#define SCLK_UART3			84
+#define SCLK_SPDIF_8CH			85
+#define SCLK_I2S0_8CH			86
+#define SCLK_I2S1_8CH			87
+#define SCLK_I2S2_8CH			88
+#define SCLK_I2S_8CH_OUT		89
+#define SCLK_TIMER00			90
+#define SCLK_TIMER01			91
+#define SCLK_TIMER02			92
+#define SCLK_TIMER03			93
+#define SCLK_TIMER04			94
+#define SCLK_TIMER05			95
+#define SCLK_TIMER06			96
+#define SCLK_TIMER07			97
+#define SCLK_TIMER08			98
+#define SCLK_TIMER09			99
+#define SCLK_TIMER10			100
+#define SCLK_TIMER11			101
+#define SCLK_MACREF			102
+#define SCLK_MAC_RX			103
+#define SCLK_MAC_TX			104
+#define SCLK_MAC			105
+#define SCLK_MACREF_OUT			106
+#define SCLK_VOP0_PWM			107
+#define SCLK_VOP1_PWM			108
+#define SCLK_RGA_CORE			109
+#define SCLK_ISP0			110
+#define SCLK_ISP1			111
+#define SCLK_HDMI_CEC			112
+#define SCLK_HDMI_SFR			113
+#define SCLK_DP_CORE			114
+#define SCLK_PVTM_CORE_L		115
+#define SCLK_PVTM_CORE_B		116
+#define SCLK_PVTM_GPU			117
+#define SCLK_PVTM_DDR			118
+#define SCLK_MIPIDPHY_REF		119
+#define SCLK_MIPIDPHY_CFG		120
+#define SCLK_HSICPHY			121
+#define SCLK_USBPHY480M			122
+#define SCLK_USB2PHY0_REF		123
+#define SCLK_USB2PHY1_REF		124
+#define SCLK_UPHY0_TCPDPHY_REF		125
+#define SCLK_UPHY0_TCPDCORE		126
+#define SCLK_UPHY1_TCPDPHY_REF		127
+#define SCLK_UPHY1_TCPDCORE		128
+#define SCLK_USB3OTG0_REF		129
+#define SCLK_USB3OTG1_REF		130
+#define SCLK_USB3OTG0_SUSPEND		131
+#define SCLK_USB3OTG1_SUSPEND		132
+#define SCLK_CRYPTO0			133
+#define SCLK_CRYPTO1			134
+#define SCLK_CCI_TRACE			135
+#define SCLK_CS				136
+#define SCLK_CIF_OUT			137
+#define SCLK_PCIEPHY_REF		138
+#define SCLK_PCIE_CORE			139
+#define SCLK_M0_PERILP			140
+#define SCLK_M0_PERILP_DEC		141
+#define SCLK_CM0S			142
+#define SCLK_DBG_NOC			143
+#define SCLK_DBG_PD_CORE_B		144
+#define SCLK_DBG_PD_CORE_L		145
+#define SCLK_DFIMON0_TIMER		146
+#define SCLK_DFIMON1_TIMER		147
+#define SCLK_INTMEM0			148
+#define SCLK_INTMEM1			149
+#define SCLK_INTMEM2			150
+#define SCLK_INTMEM3			151
+#define SCLK_INTMEM4			152
+#define SCLK_INTMEM5			153
+#define SCLK_SDMMC_DRV			154
+#define SCLK_SDMMC_SAMPLE		155
+#define SCLK_SDIO_DRV			156
+#define SCLK_SDIO_SAMPLE		157
+#define SCLK_VDU_CORE			158
+#define SCLK_VDU_CA			159
+#define SCLK_PCIE_PM			160
+#define SCLK_SPDIF_REC_DPTX		161
+#define SCLK_DPHY_PLL			162
+#define SCLK_DPHY_TX0_CFG		163
+#define SCLK_DPHY_TX1RX1_CFG		164
+#define SCLK_DPHY_RX0_CFG		165
+#define SCLK_RMII_SRC			166
+#define SCLK_PCIEPHY_REF100M		167
+
+#define DCLK_VOP0			180
+#define DCLK_VOP1			181
+#define DCLK_VOP0_DIV			182
+#define DCLK_VOP1_DIV			183
+#define DCLK_M0_PERILP			184
+
+#define FCLK_CM0S			190
+
+/* aclk gates */
+#define ACLK_PERIHP			192
+#define ACLK_PERIHP_NOC			193
+#define ACLK_PERILP0			194
+#define ACLK_PERILP0_NOC		195
+#define ACLK_PERF_PCIE			196
+#define ACLK_PCIE			197
+#define ACLK_INTMEM			198
+#define ACLK_TZMA			199
+#define ACLK_DCF			200
+#define ACLK_CCI			201
+#define ACLK_CCI_NOC0			202
+#define ACLK_CCI_NOC1			203
+#define ACLK_CCI_GRF			204
+#define ACLK_CENTER			205
+#define ACLK_CENTER_MAIN_NOC		206
+#define ACLK_CENTER_PERI_NOC		207
+#define ACLK_GPU			208
+#define ACLK_PERF_GPU			209
+#define ACLK_GPU_GRF			210
+#define ACLK_DMAC0_PERILP		211
+#define ACLK_DMAC1_PERILP		212
+#define ACLK_GMAC			213
+#define ACLK_GMAC_NOC			214
+#define ACLK_PERF_GMAC			215
+#define ACLK_VOP0_NOC			216
+#define ACLK_VOP0			217
+#define ACLK_VOP1_NOC			218
+#define ACLK_VOP1			219
+#define ACLK_RGA			220
+#define ACLK_RGA_NOC			221
+#define ACLK_HDCP			222
+#define ACLK_HDCP_NOC			223
+#define ACLK_HDCP22			224
+#define ACLK_IEP			225
+#define ACLK_IEP_NOC			226
+#define ACLK_VIO			227
+#define ACLK_VIO_NOC			228
+#define ACLK_ISP0			229
+#define ACLK_ISP1			230
+#define ACLK_ISP0_NOC			231
+#define ACLK_ISP1_NOC			232
+#define ACLK_ISP0_WRAPPER		233
+#define ACLK_ISP1_WRAPPER		234
+#define ACLK_VCODEC			235
+#define ACLK_VCODEC_NOC			236
+#define ACLK_VDU			237
+#define ACLK_VDU_NOC			238
+#define ACLK_PERI			239
+#define ACLK_EMMC			240
+#define ACLK_EMMC_CORE			241
+#define ACLK_EMMC_NOC			242
+#define ACLK_EMMC_GRF			243
+#define ACLK_USB3			244
+#define ACLK_USB3_NOC			245
+#define ACLK_USB3OTG0			246
+#define ACLK_USB3OTG1			247
+#define ACLK_USB3_RKSOC_AXI_PERF	248
+#define ACLK_USB3_GRF			249
+#define ACLK_GIC			250
+#define ACLK_GIC_NOC			251
+#define ACLK_GIC_ADB400_CORE_L_2_GIC	252
+#define ACLK_GIC_ADB400_CORE_B_2_GIC	253
+#define ACLK_GIC_ADB400_GIC_2_CORE_L	254
+#define ACLK_GIC_ADB400_GIC_2_CORE_B	255
+#define ACLK_CORE_ADB400_CORE_L_2_CCI500 256
+#define ACLK_CORE_ADB400_CORE_B_2_CCI500 257
+#define ACLK_ADB400M_PD_CORE_L		258
+#define ACLK_ADB400M_PD_CORE_B		259
+#define ACLK_PERF_CORE_L		260
+#define ACLK_PERF_CORE_B		261
+#define ACLK_GIC_PRE			262
+#define ACLK_VOP0_PRE			263
+#define ACLK_VOP1_PRE			264
+
+/* pclk gates */
+#define PCLK_PERIHP			320
+#define PCLK_PERIHP_NOC			321
+#define PCLK_PERILP0			322
+#define PCLK_PERILP1			323
+#define PCLK_PERILP1_NOC		324
+#define PCLK_PERILP_SGRF		325
+#define PCLK_PERIHP_GRF			326
+#define PCLK_PCIE			327
+#define PCLK_SGRF			328
+#define PCLK_INTR_ARB			329
+#define PCLK_CENTER_MAIN_NOC		330
+#define PCLK_CIC			331
+#define PCLK_COREDBG_B			332
+#define PCLK_COREDBG_L			333
+#define PCLK_DBG_CXCS_PD_CORE_B		334
+#define PCLK_DCF			335
+#define PCLK_GPIO2			336
+#define PCLK_GPIO3			337
+#define PCLK_GPIO4			338
+#define PCLK_GRF			339
+#define PCLK_HSICPHY			340
+#define PCLK_I2C1			341
+#define PCLK_I2C2			342
+#define PCLK_I2C3			343
+#define PCLK_I2C5			344
+#define PCLK_I2C6			345
+#define PCLK_I2C7			346
+#define PCLK_SPI0			347
+#define PCLK_SPI1			348
+#define PCLK_SPI2			349
+#define PCLK_SPI4			350
+#define PCLK_SPI5			351
+#define PCLK_UART0			352
+#define PCLK_UART1			353
+#define PCLK_UART2			354
+#define PCLK_UART3			355
+#define PCLK_TSADC			356
+#define PCLK_SARADC			357
+#define PCLK_GMAC			358
+#define PCLK_GMAC_NOC			359
+#define PCLK_TIMER0			360
+#define PCLK_TIMER1			361
+#define PCLK_EDP			362
+#define PCLK_EDP_NOC			363
+#define PCLK_EDP_CTRL			364
+#define PCLK_VIO			365
+#define PCLK_VIO_NOC			366
+#define PCLK_VIO_GRF			367
+#define PCLK_MIPI_DSI0			368
+#define PCLK_MIPI_DSI1			369
+#define PCLK_HDCP			370
+#define PCLK_HDCP_NOC			371
+#define PCLK_HDMI_CTRL			372
+#define PCLK_DP_CTRL			373
+#define PCLK_HDCP22			374
+#define PCLK_GASKET			375
+#define PCLK_DDR			376
+#define PCLK_DDR_MON			377
+#define PCLK_DDR_SGRF			378
+#define PCLK_ISP1_WRAPPER		379
+#define PCLK_WDT			380
+#define PCLK_EFUSE1024NS		381
+#define PCLK_EFUSE1024S			382
+#define PCLK_PMU_INTR_ARB		383
+#define PCLK_MAILBOX0			384
+#define PCLK_USBPHY_MUX_G		385
+#define PCLK_UPHY0_TCPHY_G		386
+#define PCLK_UPHY0_TCPD_G		387
+#define PCLK_UPHY1_TCPHY_G		388
+#define PCLK_UPHY1_TCPD_G		389
+#define PCLK_ALIVE			390
+
+/* hclk gates */
+#define HCLK_PERIHP			448
+#define HCLK_PERILP0			449
+#define HCLK_PERILP1			450
+#define HCLK_PERILP0_NOC		451
+#define HCLK_PERILP1_NOC		452
+#define HCLK_M0_PERILP			453
+#define HCLK_M0_PERILP_NOC		454
+#define HCLK_AHB1TOM			455
+#define HCLK_HOST0			456
+#define HCLK_HOST0_ARB			457
+#define HCLK_HOST1			458
+#define HCLK_HOST1_ARB			459
+#define HCLK_HSIC			460
+#define HCLK_SD				461
+#define HCLK_SDMMC			462
+#define HCLK_SDMMC_NOC			463
+#define HCLK_M_CRYPTO0			464
+#define HCLK_M_CRYPTO1			465
+#define HCLK_S_CRYPTO0			466
+#define HCLK_S_CRYPTO1			467
+#define HCLK_I2S0_8CH			468
+#define HCLK_I2S1_8CH			469
+#define HCLK_I2S2_8CH			470
+#define HCLK_SPDIF			471
+#define HCLK_VOP0_NOC			472
+#define HCLK_VOP0			473
+#define HCLK_VOP1_NOC			474
+#define HCLK_VOP1			475
+#define HCLK_ROM			476
+#define HCLK_IEP			477
+#define HCLK_IEP_NOC			478
+#define HCLK_ISP0			479
+#define HCLK_ISP1			480
+#define HCLK_ISP0_NOC			481
+#define HCLK_ISP1_NOC			482
+#define HCLK_ISP0_WRAPPER		483
+#define HCLK_ISP1_WRAPPER		484
+#define HCLK_RGA			485
+#define HCLK_RGA_NOC			486
+#define HCLK_HDCP			487
+#define HCLK_HDCP_NOC			488
+#define HCLK_HDCP22			489
+#define HCLK_VCODEC			490
+#define HCLK_VCODEC_NOC			491
+#define HCLK_VDU			492
+#define HCLK_VDU_NOC			493
+#define HCLK_SDIO			494
+#define HCLK_SDIO_NOC			495
+#define HCLK_SDIOAUDIO_NOC		496
+
+#define CLK_NR_CLKS			(HCLK_SDIOAUDIO_NOC + 1)
+
+/* pmu-clocks indices */
+
+#define PLL_PPLL			1
+
+#define SCLK_32K_SUSPEND_PMU		2
+#define SCLK_SPI3_PMU			3
+#define SCLK_TIMER12_PMU		4
+#define SCLK_TIMER13_PMU		5
+#define SCLK_UART4_PMU			6
+#define SCLK_PVTM_PMU			7
+#define SCLK_WIFI_PMU			8
+#define SCLK_I2C0_PMU			9
+#define SCLK_I2C4_PMU			10
+#define SCLK_I2C8_PMU			11
+
+#define PCLK_SRC_PMU			19
+#define PCLK_PMU			20
+#define PCLK_PMUGRF_PMU			21
+#define PCLK_INTMEM1_PMU		22
+#define PCLK_GPIO0_PMU			23
+#define PCLK_GPIO1_PMU			24
+#define PCLK_SGRF_PMU			25
+#define PCLK_NOC_PMU			26
+#define PCLK_I2C0_PMU			27
+#define PCLK_I2C4_PMU			28
+#define PCLK_I2C8_PMU			29
+#define PCLK_RKPWM_PMU			30
+#define PCLK_SPI3_PMU			31
+#define PCLK_TIMER_PMU			32
+#define PCLK_MAILBOX_PMU		33
+#define PCLK_UART4_PMU			34
+#define PCLK_WDT_M0_PMU			35
+
+#define FCLK_CM0S_SRC_PMU		44
+#define FCLK_CM0S_PMU			45
+#define SCLK_CM0S_PMU			46
+#define HCLK_CM0S_PMU			47
+#define DCLK_CM0S_PMU			48
+#define PCLK_INTR_ARB_PMU		49
+#define HCLK_NOC_PMU			50
+
+#define CLKPMU_NR_CLKS			(HCLK_NOC_PMU + 1)
+
+/* soft-reset indices */
+
+/* cru_softrst_con0 */
+#define SRST_CORE_L0			0
+#define SRST_CORE_B0			1
+#define SRST_CORE_PO_L0			2
+#define SRST_CORE_PO_B0			3
+#define SRST_L2_L			4
+#define SRST_L2_B			5
+#define SRST_ADB_L			6
+#define SRST_ADB_B			7
+#define SRST_A_CCI			8
+#define SRST_A_CCIM0_NOC		9
+#define SRST_A_CCIM1_NOC		10
+#define SRST_DBG_NOC			11
+
+/* cru_softrst_con1 */
+#define SRST_CORE_L0_T			16
+#define SRST_CORE_L1			17
+#define SRST_CORE_L2			18
+#define SRST_CORE_L3			19
+#define SRST_CORE_PO_L0_T		20
+#define SRST_CORE_PO_L1			21
+#define SRST_CORE_PO_L2			22
+#define SRST_CORE_PO_L3			23
+#define SRST_A_ADB400_GIC2COREL		24
+#define SRST_A_ADB400_COREL2GIC		25
+#define SRST_P_DBG_L			26
+#define SRST_L2_L_T			28
+#define SRST_ADB_L_T			29
+#define SRST_A_RKPERF_L			30
+#define SRST_PVTM_CORE_L		31
+
+/* cru_softrst_con2 */
+#define SRST_CORE_B0_T			32
+#define SRST_CORE_B1			33
+#define SRST_CORE_PO_B0_T		36
+#define SRST_CORE_PO_B1			37
+#define SRST_A_ADB400_GIC2COREB		40
+#define SRST_A_ADB400_COREB2GIC		41
+#define SRST_P_DBG_B			42
+#define SRST_L2_B_T			43
+#define SRST_ADB_B_T			45
+#define SRST_A_RKPERF_B			46
+#define SRST_PVTM_CORE_B		47
+
+/* cru_softrst_con3 */
+#define SRST_A_CCI_T			50
+#define SRST_A_CCIM0_NOC_T		51
+#define SRST_A_CCIM1_NOC_T		52
+#define SRST_A_ADB400M_PD_CORE_B_T	53
+#define SRST_A_ADB400M_PD_CORE_L_T	54
+#define SRST_DBG_NOC_T			55
+#define SRST_DBG_CXCS			56
+#define SRST_CCI_TRACE			57
+#define SRST_P_CCI_GRF			58
+
+/* cru_softrst_con4 */
+#define SRST_A_CENTER_MAIN_NOC		64
+#define SRST_A_CENTER_PERI_NOC		65
+#define SRST_P_CENTER_MAIN		66
+#define SRST_P_DDRMON			67
+#define SRST_P_CIC			68
+#define SRST_P_CENTER_SGRF		69
+#define SRST_DDR0_MSCH			70
+#define SRST_DDRCFG0_MSCH		71
+#define SRST_DDR0			72
+#define SRST_DDRPHY0			73
+#define SRST_DDR1_MSCH			74
+#define SRST_DDRCFG1_MSCH		75
+#define SRST_DDR1			76
+#define SRST_DDRPHY1			77
+#define SRST_DDR_CIC			78
+#define SRST_PVTM_DDR			79
+
+/* cru_softrst_con5 */
+#define SRST_A_VCODEC_NOC		80
+#define SRST_A_VCODEC			81
+#define SRST_H_VCODEC_NOC		82
+#define SRST_H_VCODEC			83
+#define SRST_A_VDU_NOC			88
+#define SRST_A_VDU			89
+#define SRST_H_VDU_NOC			90
+#define SRST_H_VDU			91
+#define SRST_VDU_CORE			92
+#define SRST_VDU_CA			93
+
+/* cru_softrst_con6 */
+#define SRST_A_IEP_NOC			96
+#define SRST_A_VOP_IEP			97
+#define SRST_A_IEP			98
+#define SRST_H_IEP_NOC			99
+#define SRST_H_IEP			100
+#define SRST_A_RGA_NOC			102
+#define SRST_A_RGA			103
+#define SRST_H_RGA_NOC			104
+#define SRST_H_RGA			105
+#define SRST_RGA_CORE			106
+#define SRST_EMMC_NOC			108
+#define SRST_EMMC			109
+#define SRST_EMMC_GRF			110
+
+/* cru_softrst_con7 */
+#define SRST_A_PERIHP_NOC		112
+#define SRST_P_PERIHP_GRF		113
+#define SRST_H_PERIHP_NOC		114
+#define SRST_USBHOST0			115
+#define SRST_HOSTC0_AUX			116
+#define SRST_HOST0_ARB			117
+#define SRST_USBHOST1			118
+#define SRST_HOSTC1_AUX			119
+#define SRST_HOST1_ARB			120
+#define SRST_SDIO0			121
+#define SRST_SDMMC			122
+#define SRST_HSIC			123
+#define SRST_HSIC_AUX			124
+#define SRST_AHB1TOM			125
+#define SRST_P_PERIHP_NOC		126
+#define SRST_HSICPHY			127
+
+/* cru_softrst_con8 */
+#define SRST_A_PCIE			128
+#define SRST_P_PCIE			129
+#define SRST_PCIE_CORE			130
+#define SRST_PCIE_MGMT			131
+#define SRST_PCIE_MGMT_STICKY		132
+#define SRST_PCIE_PIPE			133
+#define SRST_PCIE_PM			134
+#define SRST_PCIEPHY			135
+#define SRST_A_GMAC_NOC			136
+#define SRST_A_GMAC			137
+#define SRST_P_GMAC_NOC			138
+#define SRST_P_GMAC_GRF			140
+#define SRST_HSICPHY_POR		142
+#define SRST_HSICPHY_UTMI		143
+
+/* cru_softrst_con9 */
+#define SRST_USB2PHY0_POR		144
+#define SRST_USB2PHY0_UTMI_PORT0	145
+#define SRST_USB2PHY0_UTMI_PORT1	146
+#define SRST_USB2PHY0_EHCIPHY		147
+#define SRST_UPHY0_PIPE_L00		148
+#define SRST_UPHY0			149
+#define SRST_UPHY0_TCPDPWRUP		150
+#define SRST_USB2PHY1_POR		152
+#define SRST_USB2PHY1_UTMI_PORT0	153
+#define SRST_USB2PHY1_UTMI_PORT1	154
+#define SRST_USB2PHY1_EHCIPHY		155
+#define SRST_UPHY1_PIPE_L00		156
+#define SRST_UPHY1			157
+#define SRST_UPHY1_TCPDPWRUP		158
+
+/* cru_softrst_con10 */
+#define SRST_A_PERILP0_NOC		160
+#define SRST_A_DCF			161
+#define SRST_GIC500			162
+#define SRST_DMAC0_PERILP0		163
+#define SRST_DMAC1_PERILP0		164
+#define SRST_TZMA			165
+#define SRST_INTMEM			166
+#define SRST_ADB400_MST0		167
+#define SRST_ADB400_MST1		168
+#define SRST_ADB400_SLV0		169
+#define SRST_ADB400_SLV1		170
+#define SRST_H_PERILP0			171
+#define SRST_H_PERILP0_NOC		172
+#define SRST_ROM			173
+#define SRST_CRYPTO_S			174
+#define SRST_CRYPTO_M			175
+
+/* cru_softrst_con11 */
+#define SRST_P_DCF			176
+#define SRST_CM0S_NOC			177
+#define SRST_CM0S			178
+#define SRST_CM0S_DBG			179
+#define SRST_CM0S_PO			180
+#define SRST_CRYPTO			181
+#define SRST_P_PERILP1_SGRF		182
+#define SRST_P_PERILP1_GRF		183
+#define SRST_CRYPTO1_S			184
+#define SRST_CRYPTO1_M			185
+#define SRST_CRYPTO1			186
+#define SRST_GIC_NOC			188
+#define SRST_SD_NOC			189
+#define SRST_SDIOAUDIO_BRG		190
+
+/* cru_softrst_con12 */
+#define SRST_H_PERILP1			192
+#define SRST_H_PERILP1_NOC		193
+#define SRST_H_I2S0_8CH			194
+#define SRST_H_I2S1_8CH			195
+#define SRST_H_I2S2_8CH			196
+#define SRST_H_SPDIF_8CH		197
+#define SRST_P_PERILP1_NOC		198
+#define SRST_P_EFUSE_1024		199
+#define SRST_P_EFUSE_1024S		200
+#define SRST_P_I2C0			201
+#define SRST_P_I2C1			202
+#define SRST_P_I2C2			203
+#define SRST_P_I2C3			204
+#define SRST_P_I2C4			205
+#define SRST_P_I2C5			206
+#define SRST_P_MAILBOX0			207
+
+/* cru_softrst_con13 */
+#define SRST_P_UART0			208
+#define SRST_P_UART1			209
+#define SRST_P_UART2			210
+#define SRST_P_UART3			211
+#define SRST_P_SARADC			212
+#define SRST_P_TSADC			213
+#define SRST_P_SPI0			214
+#define SRST_P_SPI1			215
+#define SRST_P_SPI2			216
+#define SRST_P_SPI3			217
+#define SRST_P_SPI4			218
+#define SRST_SPI0			219
+#define SRST_SPI1			220
+#define SRST_SPI2			221
+#define SRST_SPI3			222
+#define SRST_SPI4			223
+
+/* cru_softrst_con14 */
+#define SRST_I2S0_8CH			224
+#define SRST_I2S1_8CH			225
+#define SRST_I2S2_8CH			226
+#define SRST_SPDIF_8CH			227
+#define SRST_UART0			228
+#define SRST_UART1			229
+#define SRST_UART2			230
+#define SRST_UART3			231
+#define SRST_TSADC			232
+#define SRST_I2C0			233
+#define SRST_I2C1			234
+#define SRST_I2C2			235
+#define SRST_I2C3			236
+#define SRST_I2C4			237
+#define SRST_I2C5			238
+#define SRST_SDIOAUDIO_NOC		239
+
+/* cru_softrst_con15 */
+#define SRST_A_VIO_NOC			240
+#define SRST_A_HDCP_NOC			241
+#define SRST_A_HDCP			242
+#define SRST_H_HDCP_NOC			243
+#define SRST_H_HDCP			244
+#define SRST_P_HDCP_NOC			245
+#define SRST_P_HDCP			246
+#define SRST_P_HDMI_CTRL		247
+#define SRST_P_DP_CTRL			248
+#define SRST_S_DP_CTRL			249
+#define SRST_C_DP_CTRL			250
+#define SRST_P_MIPI_DSI0		251
+#define SRST_P_MIPI_DSI1		252
+#define SRST_DP_CORE			253
+#define SRST_DP_I2S			254
+
+/* cru_softrst_con16 */
+#define SRST_GASKET			256
+#define SRST_VIO_GRF			258
+#define SRST_DPTX_SPDIF_REC		259
+#define SRST_HDMI_CTRL			260
+#define SRST_HDCP_CTRL			261
+#define SRST_A_ISP0_NOC			262
+#define SRST_A_ISP1_NOC			263
+#define SRST_H_ISP0_NOC			266
+#define SRST_H_ISP1_NOC			267
+#define SRST_H_ISP0			268
+#define SRST_H_ISP1			269
+#define SRST_ISP0			270
+#define SRST_ISP1			271
+
+/* cru_softrst_con17 */
+#define SRST_A_VOP0_NOC			272
+#define SRST_A_VOP1_NOC			273
+#define SRST_A_VOP0			274
+#define SRST_A_VOP1			275
+#define SRST_H_VOP0_NOC			276
+#define SRST_H_VOP1_NOC			277
+#define SRST_H_VOP0			278
+#define SRST_H_VOP1			279
+#define SRST_D_VOP0			280
+#define SRST_D_VOP1			281
+#define SRST_VOP0_PWM			282
+#define SRST_VOP1_PWM			283
+#define SRST_P_EDP_NOC			284
+#define SRST_P_EDP_CTRL			285
+
+/* cru_softrst_con18 */
+#define SRST_A_GPU			288
+#define SRST_A_GPU_NOC			289
+#define SRST_A_GPU_GRF			290
+#define SRST_PVTM_GPU			291
+#define SRST_A_USB3_NOC			292
+#define SRST_A_USB3_OTG0		293
+#define SRST_A_USB3_OTG1		294
+#define SRST_A_USB3_GRF			295
+#define SRST_PMU			296
+
+/* cru_softrst_con19 */
+#define SRST_P_TIMER0_5			304
+#define SRST_TIMER0			305
+#define SRST_TIMER1			306
+#define SRST_TIMER2			307
+#define SRST_TIMER3			308
+#define SRST_TIMER4			309
+#define SRST_TIMER5			310
+#define SRST_P_TIMER6_11		311
+#define SRST_TIMER6			312
+#define SRST_TIMER7			313
+#define SRST_TIMER8			314
+#define SRST_TIMER9			315
+#define SRST_TIMER10			316
+#define SRST_TIMER11			317
+#define SRST_P_INTR_ARB_PMU		318
+#define SRST_P_ALIVE_SGRF		319
+
+/* cru_softrst_con20 */
+#define SRST_P_GPIO2			320
+#define SRST_P_GPIO3			321
+#define SRST_P_GPIO4			322
+#define SRST_P_GRF			323
+#define SRST_P_ALIVE_NOC		324
+#define SRST_P_WDT0			325
+#define SRST_P_WDT1			326
+#define SRST_P_INTR_ARB			327
+#define SRST_P_UPHY0_DPTX		328
+#define SRST_P_UPHY0_APB		330
+#define SRST_P_UPHY0_TCPHY		332
+#define SRST_P_UPHY1_TCPHY		333
+#define SRST_P_UPHY0_TCPDCTRL		334
+#define SRST_P_UPHY1_TCPDCTRL		335
+
+/* pmu soft-reset indices */
+
+/* pmu_cru_softrst_con0 */
+#define SRST_P_NOC			0
+#define SRST_P_INTMEM			1
+#define SRST_H_CM0S			2
+#define SRST_H_CM0S_NOC			3
+#define SRST_DBG_CM0S			4
+#define SRST_PO_CM0S			5
+#define SRST_P_SPI6			6
+#define SRST_SPI6			7
+#define SRST_P_TIMER_0_1		8
+#define SRST_P_TIMER_0			9
+#define SRST_P_TIMER_1			10
+#define SRST_P_UART4			11
+#define SRST_UART4			12
+#define SRST_P_WDT			13
+
+/* pmu_cru_softrst_con1 */
+#define SRST_P_I2C6			16
+#define SRST_P_I2C7			17
+#define SRST_P_I2C8			18
+#define SRST_P_MAILBOX			19
+#define SRST_P_RKPWM			20
+#define SRST_P_PMUGRF			21
+#define SRST_P_SGRF			22
+#define SRST_P_GPIO0			23
+#define SRST_P_GPIO1			24
+#define SRST_P_CRU			25
+#define SRST_P_INTR			26
+#define SRST_PVTM			27
+#define SRST_I2C6			28
+#define SRST_I2C7			29
+#define SRST_I2C8			30
+
+#endif
diff --git a/include/dt-bindings/gpio/meson-gxbb-gpio.h b/include/dt-bindings/gpio/meson-gxbb-gpio.h
new file mode 100644
index 0000000..58654fd
--- /dev/null
+++ b/include/dt-bindings/gpio/meson-gxbb-gpio.h
@@ -0,0 +1,154 @@
+/*
+ * GPIO definitions for Amlogic Meson GXBB SoCs
+ *
+ * Copyright (C) 2016 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DT_BINDINGS_MESON_GXBB_GPIO_H
+#define _DT_BINDINGS_MESON_GXBB_GPIO_H
+
+#define	GPIOAO_0	0
+#define	GPIOAO_1	1
+#define	GPIOAO_2	2
+#define	GPIOAO_3	3
+#define	GPIOAO_4	4
+#define	GPIOAO_5	5
+#define	GPIOAO_6	6
+#define	GPIOAO_7	7
+#define	GPIOAO_8	8
+#define	GPIOAO_9	9
+#define	GPIOAO_10	10
+#define	GPIOAO_11	11
+#define	GPIOAO_12	12
+#define	GPIOAO_13	13
+
+#define	GPIOZ_0		0
+#define	GPIOZ_1		1
+#define	GPIOZ_2		2
+#define	GPIOZ_3		3
+#define	GPIOZ_4		4
+#define	GPIOZ_5		5
+#define	GPIOZ_6		6
+#define	GPIOZ_7		7
+#define	GPIOZ_8		8
+#define	GPIOZ_9		9
+#define	GPIOZ_10	10
+#define	GPIOZ_11	11
+#define	GPIOZ_12	12
+#define	GPIOZ_13	13
+#define	GPIOZ_14	14
+#define	GPIOZ_15	15
+#define	GPIOH_0		16
+#define	GPIOH_1		17
+#define	GPIOH_2		18
+#define	GPIOH_3		19
+#define	BOOT_0		20
+#define	BOOT_1		21
+#define	BOOT_2		22
+#define	BOOT_3		23
+#define	BOOT_4		24
+#define	BOOT_5		25
+#define	BOOT_6		26
+#define	BOOT_7		27
+#define	BOOT_8		28
+#define	BOOT_9		29
+#define	BOOT_10		30
+#define	BOOT_11		31
+#define	BOOT_12		32
+#define	BOOT_13		33
+#define	BOOT_14		34
+#define	BOOT_15		35
+#define	BOOT_16		36
+#define	BOOT_17		37
+#define	CARD_0		38
+#define	CARD_1		39
+#define	CARD_2		40
+#define	CARD_3		41
+#define	CARD_4		42
+#define	CARD_5		43
+#define	CARD_6		44
+#define	GPIODV_0	45
+#define	GPIODV_1	46
+#define	GPIODV_2	47
+#define	GPIODV_3	48
+#define	GPIODV_4	49
+#define	GPIODV_5	50
+#define	GPIODV_6	51
+#define	GPIODV_7	52
+#define	GPIODV_8	53
+#define	GPIODV_9	54
+#define	GPIODV_10	55
+#define	GPIODV_11	56
+#define	GPIODV_12	57
+#define	GPIODV_13	58
+#define	GPIODV_14	59
+#define	GPIODV_15	60
+#define	GPIODV_16	61
+#define	GPIODV_17	62
+#define	GPIODV_18	63
+#define	GPIODV_19	64
+#define	GPIODV_20	65
+#define	GPIODV_21	66
+#define	GPIODV_22	67
+#define	GPIODV_23	68
+#define	GPIODV_24	69
+#define	GPIODV_25	70
+#define	GPIODV_26	71
+#define	GPIODV_27	72
+#define	GPIODV_28	73
+#define	GPIODV_29	74
+#define	GPIOY_0		75
+#define	GPIOY_1		76
+#define	GPIOY_2		77
+#define	GPIOY_3		78
+#define	GPIOY_4		79
+#define	GPIOY_5		80
+#define	GPIOY_6		81
+#define	GPIOY_7		82
+#define	GPIOY_8		83
+#define	GPIOY_9		84
+#define	GPIOY_10	85
+#define	GPIOY_11	86
+#define	GPIOY_12	87
+#define	GPIOY_13	88
+#define	GPIOY_14	89
+#define	GPIOY_15	90
+#define	GPIOY_16	91
+#define	GPIOX_0		92
+#define	GPIOX_1		93
+#define	GPIOX_2		94
+#define	GPIOX_3		95
+#define	GPIOX_4		96
+#define	GPIOX_5		97
+#define	GPIOX_6		98
+#define	GPIOX_7		99
+#define	GPIOX_8		100
+#define	GPIOX_9		101
+#define	GPIOX_10	102
+#define	GPIOX_11	103
+#define	GPIOX_12	104
+#define	GPIOX_13	105
+#define	GPIOX_14	106
+#define	GPIOX_15	107
+#define	GPIOX_16	108
+#define	GPIOX_17	109
+#define	GPIOX_18	110
+#define	GPIOX_19	111
+#define	GPIOX_20	112
+#define	GPIOX_21	113
+#define	GPIOX_22	114
+#define	GPIOCLK_0	115
+#define	GPIOCLK_1	116
+#define	GPIOCLK_2	117
+#define	GPIOCLK_3	118
+#define	GPIO_TEST_N	119
+
+#endif
diff --git a/include/dt-bindings/gpio/tegra-gpio.h b/include/dt-bindings/gpio/tegra-gpio.h
index 197dc28..a1c09e8 100644
--- a/include/dt-bindings/gpio/tegra-gpio.h
+++ b/include/dt-bindings/gpio/tegra-gpio.h
@@ -12,40 +12,40 @@
 
 #include <dt-bindings/gpio/gpio.h>
 
-#define TEGRA_GPIO_BANK_ID_A 0
-#define TEGRA_GPIO_BANK_ID_B 1
-#define TEGRA_GPIO_BANK_ID_C 2
-#define TEGRA_GPIO_BANK_ID_D 3
-#define TEGRA_GPIO_BANK_ID_E 4
-#define TEGRA_GPIO_BANK_ID_F 5
-#define TEGRA_GPIO_BANK_ID_G 6
-#define TEGRA_GPIO_BANK_ID_H 7
-#define TEGRA_GPIO_BANK_ID_I 8
-#define TEGRA_GPIO_BANK_ID_J 9
-#define TEGRA_GPIO_BANK_ID_K 10
-#define TEGRA_GPIO_BANK_ID_L 11
-#define TEGRA_GPIO_BANK_ID_M 12
-#define TEGRA_GPIO_BANK_ID_N 13
-#define TEGRA_GPIO_BANK_ID_O 14
-#define TEGRA_GPIO_BANK_ID_P 15
-#define TEGRA_GPIO_BANK_ID_Q 16
-#define TEGRA_GPIO_BANK_ID_R 17
-#define TEGRA_GPIO_BANK_ID_S 18
-#define TEGRA_GPIO_BANK_ID_T 19
-#define TEGRA_GPIO_BANK_ID_U 20
-#define TEGRA_GPIO_BANK_ID_V 21
-#define TEGRA_GPIO_BANK_ID_W 22
-#define TEGRA_GPIO_BANK_ID_X 23
-#define TEGRA_GPIO_BANK_ID_Y 24
-#define TEGRA_GPIO_BANK_ID_Z 25
-#define TEGRA_GPIO_BANK_ID_AA 26
-#define TEGRA_GPIO_BANK_ID_BB 27
-#define TEGRA_GPIO_BANK_ID_CC 28
-#define TEGRA_GPIO_BANK_ID_DD 29
-#define TEGRA_GPIO_BANK_ID_EE 30
-#define TEGRA_GPIO_BANK_ID_FF 31
+#define TEGRA_GPIO_PORT_A 0
+#define TEGRA_GPIO_PORT_B 1
+#define TEGRA_GPIO_PORT_C 2
+#define TEGRA_GPIO_PORT_D 3
+#define TEGRA_GPIO_PORT_E 4
+#define TEGRA_GPIO_PORT_F 5
+#define TEGRA_GPIO_PORT_G 6
+#define TEGRA_GPIO_PORT_H 7
+#define TEGRA_GPIO_PORT_I 8
+#define TEGRA_GPIO_PORT_J 9
+#define TEGRA_GPIO_PORT_K 10
+#define TEGRA_GPIO_PORT_L 11
+#define TEGRA_GPIO_PORT_M 12
+#define TEGRA_GPIO_PORT_N 13
+#define TEGRA_GPIO_PORT_O 14
+#define TEGRA_GPIO_PORT_P 15
+#define TEGRA_GPIO_PORT_Q 16
+#define TEGRA_GPIO_PORT_R 17
+#define TEGRA_GPIO_PORT_S 18
+#define TEGRA_GPIO_PORT_T 19
+#define TEGRA_GPIO_PORT_U 20
+#define TEGRA_GPIO_PORT_V 21
+#define TEGRA_GPIO_PORT_W 22
+#define TEGRA_GPIO_PORT_X 23
+#define TEGRA_GPIO_PORT_Y 24
+#define TEGRA_GPIO_PORT_Z 25
+#define TEGRA_GPIO_PORT_AA 26
+#define TEGRA_GPIO_PORT_BB 27
+#define TEGRA_GPIO_PORT_CC 28
+#define TEGRA_GPIO_PORT_DD 29
+#define TEGRA_GPIO_PORT_EE 30
+#define TEGRA_GPIO_PORT_FF 31
 
-#define TEGRA_GPIO(bank, offset) \
-	((TEGRA_GPIO_BANK_ID_##bank * 8) + offset)
+#define TEGRA_GPIO(port, offset) \
+	((TEGRA_GPIO_PORT_##port * 8) + offset)
 
 #endif
diff --git a/include/dt-bindings/gpio/tegra186-gpio.h b/include/dt-bindings/gpio/tegra186-gpio.h
new file mode 100644
index 0000000..38001c70
--- /dev/null
+++ b/include/dt-bindings/gpio/tegra186-gpio.h
@@ -0,0 +1,56 @@
+/*
+ * This header provides constants for binding nvidia,tegra186-gpio*.
+ *
+ * The first cell in Tegra's GPIO specifier is the GPIO ID. The macros below
+ * provide names for this.
+ *
+ * The second cell contains standard flag values specified in gpio.h.
+ */
+
+#ifndef _DT_BINDINGS_GPIO_TEGRA_GPIO_H
+#define _DT_BINDINGS_GPIO_TEGRA_GPIO_H
+
+#include <dt-bindings/gpio/gpio.h>
+
+/* GPIOs implemented by main GPIO controller */
+#define TEGRA_MAIN_GPIO_PORT_A 0
+#define TEGRA_MAIN_GPIO_PORT_B 1
+#define TEGRA_MAIN_GPIO_PORT_C 2
+#define TEGRA_MAIN_GPIO_PORT_D 3
+#define TEGRA_MAIN_GPIO_PORT_E 4
+#define TEGRA_MAIN_GPIO_PORT_F 5
+#define TEGRA_MAIN_GPIO_PORT_G 6
+#define TEGRA_MAIN_GPIO_PORT_H 7
+#define TEGRA_MAIN_GPIO_PORT_I 8
+#define TEGRA_MAIN_GPIO_PORT_J 9
+#define TEGRA_MAIN_GPIO_PORT_K 10
+#define TEGRA_MAIN_GPIO_PORT_L 11
+#define TEGRA_MAIN_GPIO_PORT_M 12
+#define TEGRA_MAIN_GPIO_PORT_N 13
+#define TEGRA_MAIN_GPIO_PORT_O 14
+#define TEGRA_MAIN_GPIO_PORT_P 15
+#define TEGRA_MAIN_GPIO_PORT_Q 16
+#define TEGRA_MAIN_GPIO_PORT_R 17
+#define TEGRA_MAIN_GPIO_PORT_T 18
+#define TEGRA_MAIN_GPIO_PORT_X 19
+#define TEGRA_MAIN_GPIO_PORT_Y 20
+#define TEGRA_MAIN_GPIO_PORT_BB 21
+#define TEGRA_MAIN_GPIO_PORT_CC 22
+
+#define TEGRA_MAIN_GPIO(port, offset) \
+	((TEGRA_MAIN_GPIO_PORT_##port * 8) + offset)
+
+/* GPIOs implemented by AON GPIO controller */
+#define TEGRA_AON_GPIO_PORT_S 0
+#define TEGRA_AON_GPIO_PORT_U 1
+#define TEGRA_AON_GPIO_PORT_V 2
+#define TEGRA_AON_GPIO_PORT_W 3
+#define TEGRA_AON_GPIO_PORT_Z 4
+#define TEGRA_AON_GPIO_PORT_AA 5
+#define TEGRA_AON_GPIO_PORT_EE 6
+#define TEGRA_AON_GPIO_PORT_FF 7
+
+#define TEGRA_AON_GPIO(port, offset) \
+	((TEGRA_AON_GPIO_PORT_##port * 8) + offset)
+
+#endif
diff --git a/include/dt-bindings/pinctrl/hisi.h b/include/dt-bindings/pinctrl/hisi.h
new file mode 100644
index 0000000..38f1ea8
--- /dev/null
+++ b/include/dt-bindings/pinctrl/hisi.h
@@ -0,0 +1,59 @@
+/*
+ * This header provides constants for hisilicon pinctrl bindings.
+ *
+ * Copyright (c) 2015 Hisilicon Limited.
+ * Copyright (c) 2015 Linaro Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_HISI_H
+#define _DT_BINDINGS_PINCTRL_HISI_H
+
+/* iomg bit definition */
+#define MUX_M0		0
+#define MUX_M1		1
+#define MUX_M2		2
+#define MUX_M3		3
+#define MUX_M4		4
+#define MUX_M5		5
+#define MUX_M6		6
+#define MUX_M7		7
+
+/* iocg bit definition */
+#define PULL_MASK	(3)
+#define PULL_DIS	(0)
+#define PULL_UP		(1 << 0)
+#define PULL_DOWN	(1 << 1)
+
+/* drive strength definition */
+#define DRIVE_MASK	(7 << 4)
+#define DRIVE1_02MA	(0 << 4)
+#define DRIVE1_04MA	(1 << 4)
+#define DRIVE1_08MA	(2 << 4)
+#define DRIVE1_10MA	(3 << 4)
+#define DRIVE2_02MA	(0 << 4)
+#define DRIVE2_04MA	(1 << 4)
+#define DRIVE2_08MA	(2 << 4)
+#define DRIVE2_10MA	(3 << 4)
+#define DRIVE3_04MA	(0 << 4)
+#define DRIVE3_08MA	(1 << 4)
+#define DRIVE3_12MA	(2 << 4)
+#define DRIVE3_16MA	(3 << 4)
+#define DRIVE3_20MA	(4 << 4)
+#define DRIVE3_24MA	(5 << 4)
+#define DRIVE3_32MA	(6 << 4)
+#define DRIVE3_40MA	(7 << 4)
+#define DRIVE4_02MA	(0 << 4)
+#define DRIVE4_04MA	(2 << 4)
+#define DRIVE4_08MA	(4 << 4)
+#define DRIVE4_10MA	(6 << 4)
+
+#endif
diff --git a/include/dt-bindings/power/r8a7779-sysc.h b/include/dt-bindings/power/r8a7779-sysc.h
new file mode 100644
index 0000000..183571d
--- /dev/null
+++ b/include/dt-bindings/power/r8a7779-sysc.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7779_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7779_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A7779_PD_ARM1			 1
+#define R8A7779_PD_ARM2			 2
+#define R8A7779_PD_ARM3			 3
+#define R8A7779_PD_SGX			20
+#define R8A7779_PD_VDP			21
+#define R8A7779_PD_IMP			24
+
+/* Always-on power area */
+#define R8A7779_PD_ALWAYS_ON		32
+
+#endif /* __DT_BINDINGS_POWER_R8A7779_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a7790-sysc.h b/include/dt-bindings/power/r8a7790-sysc.h
new file mode 100644
index 0000000..6af4e99
--- /dev/null
+++ b/include/dt-bindings/power/r8a7790-sysc.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7790_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7790_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A7790_PD_CA15_CPU0		 0
+#define R8A7790_PD_CA15_CPU1		 1
+#define R8A7790_PD_CA15_CPU2		 2
+#define R8A7790_PD_CA15_CPU3		 3
+#define R8A7790_PD_CA7_CPU0		 5
+#define R8A7790_PD_CA7_CPU1		 6
+#define R8A7790_PD_CA7_CPU2		 7
+#define R8A7790_PD_CA7_CPU3		 8
+#define R8A7790_PD_CA15_SCU		12
+#define R8A7790_PD_SH_4A		16
+#define R8A7790_PD_RGX			20
+#define R8A7790_PD_CA7_SCU		21
+#define R8A7790_PD_IMP			24
+
+/* Always-on power area */
+#define R8A7790_PD_ALWAYS_ON		32
+
+#endif /* __DT_BINDINGS_POWER_R8A7790_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a7791-sysc.h b/include/dt-bindings/power/r8a7791-sysc.h
new file mode 100644
index 0000000..1403baa
--- /dev/null
+++ b/include/dt-bindings/power/r8a7791-sysc.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7791_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7791_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A7791_PD_CA15_CPU0		 0
+#define R8A7791_PD_CA15_CPU1		 1
+#define R8A7791_PD_CA15_SCU		12
+#define R8A7791_PD_SH_4A		16
+#define R8A7791_PD_SGX			20
+
+/* Always-on power area */
+#define R8A7791_PD_ALWAYS_ON		32
+
+#endif /* __DT_BINDINGS_POWER_R8A7791_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a7793-sysc.h b/include/dt-bindings/power/r8a7793-sysc.h
new file mode 100644
index 0000000..b5693df
--- /dev/null
+++ b/include/dt-bindings/power/r8a7793-sysc.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7793_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7793_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ *
+ * Note that R-Car M2-N is identical to R-Car M2-W w.r.t. power domains.
+ */
+
+#define R8A7793_PD_CA15_CPU0		 0
+#define R8A7793_PD_CA15_CPU1		 1
+#define R8A7793_PD_CA15_SCU		12
+#define R8A7793_PD_SH_4A		16
+#define R8A7793_PD_SGX			20
+
+/* Always-on power area */
+#define R8A7793_PD_ALWAYS_ON		32
+
+#endif /* __DT_BINDINGS_POWER_R8A7793_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a7794-sysc.h b/include/dt-bindings/power/r8a7794-sysc.h
new file mode 100644
index 0000000..862241c
--- /dev/null
+++ b/include/dt-bindings/power/r8a7794-sysc.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7794_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7794_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A7794_PD_CA7_CPU0		 5
+#define R8A7794_PD_CA7_CPU1		 6
+#define R8A7794_PD_SH_4A		16
+#define R8A7794_PD_SGX			20
+#define R8A7794_PD_CA7_SCU		21
+
+/* Always-on power area */
+#define R8A7794_PD_ALWAYS_ON		32
+
+#endif /* __DT_BINDINGS_POWER_R8A7794_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a7795-sysc.h b/include/dt-bindings/power/r8a7795-sysc.h
new file mode 100644
index 0000000..ee2e26b
--- /dev/null
+++ b/include/dt-bindings/power/r8a7795-sysc.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7795_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7795_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A7795_PD_CA57_CPU0		 0
+#define R8A7795_PD_CA57_CPU1		 1
+#define R8A7795_PD_CA57_CPU2		 2
+#define R8A7795_PD_CA57_CPU3		 3
+#define R8A7795_PD_CA53_CPU0		 5
+#define R8A7795_PD_CA53_CPU1		 6
+#define R8A7795_PD_CA53_CPU2		 7
+#define R8A7795_PD_CA53_CPU3		 8
+#define R8A7795_PD_A3VP			 9
+#define R8A7795_PD_CA57_SCU		12
+#define R8A7795_PD_CR7			13
+#define R8A7795_PD_A3VC			14
+#define R8A7795_PD_3DG_A		17
+#define R8A7795_PD_3DG_B		18
+#define R8A7795_PD_3DG_C		19
+#define R8A7795_PD_3DG_D		20
+#define R8A7795_PD_CA53_SCU		21
+#define R8A7795_PD_3DG_E		22
+#define R8A7795_PD_A3IR			24
+#define R8A7795_PD_A2VC0		25
+#define R8A7795_PD_A2VC1		26
+
+/* Always-on power area */
+#define R8A7795_PD_ALWAYS_ON		32
+
+#endif /* __DT_BINDINGS_POWER_R8A7795_SYSC_H__ */
diff --git a/include/dt-bindings/power/rk3399-power.h b/include/dt-bindings/power/rk3399-power.h
new file mode 100644
index 0000000..168b3bf
--- /dev/null
+++ b/include/dt-bindings/power/rk3399-power.h
@@ -0,0 +1,53 @@
+#ifndef __DT_BINDINGS_POWER_RK3399_POWER_H__
+#define __DT_BINDINGS_POWER_RK3399_POWER_H__
+
+/* VD_CORE_L */
+#define RK3399_PD_A53_L0	0
+#define RK3399_PD_A53_L1	1
+#define RK3399_PD_A53_L2	2
+#define RK3399_PD_A53_L3	3
+#define RK3399_PD_SCU_L		4
+
+/* VD_CORE_B */
+#define RK3399_PD_A72_B0	5
+#define RK3399_PD_A72_B1	6
+#define RK3399_PD_SCU_B		7
+
+/* VD_LOGIC */
+#define RK3399_PD_TCPD0		8
+#define RK3399_PD_TCPD1		9
+#define RK3399_PD_CCI		10
+#define RK3399_PD_CCI0		11
+#define RK3399_PD_CCI1		12
+#define RK3399_PD_PERILP	13
+#define RK3399_PD_PERIHP	14
+#define RK3399_PD_VIO		15
+#define RK3399_PD_VO		16
+#define RK3399_PD_VOPB		17
+#define RK3399_PD_VOPL		18
+#define RK3399_PD_ISP0		19
+#define RK3399_PD_ISP1		20
+#define RK3399_PD_HDCP		21
+#define RK3399_PD_GMAC		22
+#define RK3399_PD_EMMC		23
+#define RK3399_PD_USB3		24
+#define RK3399_PD_EDP		25
+#define RK3399_PD_GIC		26
+#define RK3399_PD_SD		27
+#define RK3399_PD_SDIOAUDIO	28
+#define RK3399_PD_ALIVE		29
+
+/* VD_CENTER */
+#define RK3399_PD_CENTER	30
+#define RK3399_PD_VCODEC	31
+#define RK3399_PD_VDU		32
+#define RK3399_PD_RGA		33
+#define RK3399_PD_IEP		34
+
+/* VD_GPU */
+#define RK3399_PD_GPU		35
+
+/* VD_PMU */
+#define RK3399_PD_PMU		36
+
+#endif
diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
index 4915d40..2480469 100644
--- a/include/keys/asymmetric-subtype.h
+++ b/include/keys/asymmetric-subtype.h
@@ -32,7 +32,7 @@
 	void (*describe)(const struct key *key, struct seq_file *m);
 
 	/* Destroy a key of this subtype */
-	void (*destroy)(void *payload);
+	void (*destroy)(void *payload_crypto, void *payload_auth);
 
 	/* Verify the signature on a key of this subtype (optional) */
 	int (*verify_signature)(const struct key *key,
diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h
index 59c1df9..b382407 100644
--- a/include/keys/asymmetric-type.h
+++ b/include/keys/asymmetric-type.h
@@ -15,6 +15,7 @@
 #define _KEYS_ASYMMETRIC_TYPE_H
 
 #include <linux/key-type.h>
+#include <linux/verification.h>
 
 extern struct key_type key_type_asymmetric;
 
@@ -23,9 +24,10 @@
  * follows:
  */
 enum asymmetric_payload_bits {
-	asym_crypto,
-	asym_subtype,
-	asym_key_ids,
+	asym_crypto,		/* The data representing the key */
+	asym_subtype,		/* Pointer to an asymmetric_key_subtype struct */
+	asym_key_ids,		/* Pointer to an asymmetric_key_ids struct */
+	asym_auth		/* The key's authorisation (signature, parent key ID) */
 };
 
 /*
@@ -74,6 +76,11 @@
 	return key->payload.data[asym_key_ids];
 }
 
+extern struct key *find_asymmetric_key(struct key *keyring,
+				       const struct asymmetric_key_id *id_0,
+				       const struct asymmetric_key_id *id_1,
+				       bool partial);
+
 /*
  * The payload is at the discretion of the subtype.
  */
diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h
index 39fd38c..fbd4647 100644
--- a/include/keys/system_keyring.h
+++ b/include/keys/system_keyring.h
@@ -12,51 +12,40 @@
 #ifndef _KEYS_SYSTEM_KEYRING_H
 #define _KEYS_SYSTEM_KEYRING_H
 
+#include <linux/key.h>
+
 #ifdef CONFIG_SYSTEM_TRUSTED_KEYRING
 
-#include <linux/key.h>
-#include <crypto/public_key.h>
+extern int restrict_link_by_builtin_trusted(struct key *keyring,
+					    const struct key_type *type,
+					    const union key_payload *payload);
 
-extern struct key *system_trusted_keyring;
-static inline struct key *get_system_trusted_keyring(void)
-{
-	return system_trusted_keyring;
-}
 #else
-static inline struct key *get_system_trusted_keyring(void)
-{
-	return NULL;
-}
+#define restrict_link_by_builtin_trusted restrict_link_reject
 #endif
 
-#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
-extern int system_verify_data(const void *data, unsigned long len,
-			      const void *raw_pkcs7, size_t pkcs7_len,
-			      enum key_being_used_for usage);
+#ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
+extern int restrict_link_by_builtin_and_secondary_trusted(
+	struct key *keyring,
+	const struct key_type *type,
+	const union key_payload *payload);
+#else
+#define restrict_link_by_builtin_and_secondary_trusted restrict_link_by_builtin_trusted
 #endif
 
-#ifdef CONFIG_IMA_MOK_KEYRING
-extern struct key *ima_mok_keyring;
+#ifdef CONFIG_IMA_BLACKLIST_KEYRING
 extern struct key *ima_blacklist_keyring;
 
-static inline struct key *get_ima_mok_keyring(void)
-{
-	return ima_mok_keyring;
-}
 static inline struct key *get_ima_blacklist_keyring(void)
 {
 	return ima_blacklist_keyring;
 }
 #else
-static inline struct key *get_ima_mok_keyring(void)
-{
-	return NULL;
-}
 static inline struct key *get_ima_blacklist_keyring(void)
 {
 	return NULL;
 }
-#endif /* CONFIG_IMA_MOK_KEYRING */
+#endif /* CONFIG_IMA_BLACKLIST_KEYRING */
 
 
 #endif /* _KEYS_SYSTEM_KEYRING_H */
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 281caf8..be6037a 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -25,6 +25,7 @@
 #include <linux/spinlock.h>
 #include <linux/types.h>
 #include <kvm/iodev.h>
+#include <linux/irqchip/arm-gic-common.h>
 
 #define VGIC_NR_IRQS_LEGACY	256
 #define VGIC_NR_SGIS		16
@@ -353,15 +354,15 @@
 #define vgic_initialized(k)	(!!((k)->arch.vgic.nr_cpus))
 #define vgic_ready(k)		((k)->arch.vgic.ready)
 
-int vgic_v2_probe(struct device_node *vgic_node,
+int vgic_v2_probe(const struct gic_kvm_info *gic_kvm_info,
 		  const struct vgic_ops **ops,
 		  const struct vgic_params **params);
 #ifdef CONFIG_KVM_ARM_VGIC_V3
-int vgic_v3_probe(struct device_node *vgic_node,
+int vgic_v3_probe(const struct gic_kvm_info *gic_kvm_info,
 		  const struct vgic_ops **ops,
 		  const struct vgic_params **params);
 #else
-static inline int vgic_v3_probe(struct device_node *vgic_node,
+static inline int vgic_v3_probe(const struct gic_kvm_info *gic_kvm_info,
 				const struct vgic_ops **ops,
 				const struct vgic_params **params)
 {
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 06ed7e5..288fac5 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -190,14 +190,6 @@
 }
 #endif
 
-#ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
-void acpi_initrd_override(void *data, size_t size);
-#else
-static inline void acpi_initrd_override(void *data, size_t size)
-{
-}
-#endif
-
 #define BAD_MADT_ENTRY(entry, end) (					    \
 		(!entry) || (unsigned long)entry + sizeof(*entry) > end ||  \
 		((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
@@ -216,6 +208,7 @@
 int acpi_mps_check (void);
 int acpi_numa_init (void);
 
+void early_acpi_table_init(void *data, size_t size);
 int acpi_table_init (void);
 int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
 int __init acpi_parse_entries(char *id, unsigned long table_size,
@@ -278,6 +271,7 @@
 extern u32 acpi_irq_handled;
 extern u32 acpi_irq_not_handled;
 extern unsigned int acpi_sci_irq;
+extern bool acpi_no_s5;
 #define INVALID_ACPI_IRQ	((unsigned)-1)
 static inline bool acpi_sci_irq_valid(void)
 {
@@ -311,7 +305,6 @@
 int acpi_pci_irq_enable (struct pci_dev *dev);
 void acpi_penalize_isa_irq(int irq, int active);
 bool acpi_isa_irq_available(int irq);
-void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
 void acpi_pci_irq_disable (struct pci_dev *dev);
 
 extern int ec_read(u8 addr, u8 *val);
@@ -359,7 +352,6 @@
 extern char acpi_video_backlight_string[];
 extern long acpi_is_video_device(acpi_handle handle);
 extern int acpi_blacklisted(void);
-extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d);
 extern void acpi_osi_setup(char *str);
 extern bool acpi_osi_is_win8(void);
 
@@ -596,6 +588,7 @@
 	return NULL;
 }
 
+static inline void early_acpi_table_init(void *data, size_t size) { }
 static inline void acpi_early_init(void) { }
 static inline void acpi_subsystem_init(void) { }
 
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 10fe2a2..27e9ec8 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -86,7 +86,7 @@
  * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2
  */
 struct pl08x_platform_data {
-	const struct pl08x_channel_data *slave_channels;
+	struct pl08x_channel_data *slave_channels;
 	unsigned int num_slave_channels;
 	struct pl08x_channel_data memcpy_channel;
 	int (*get_xfer_signal)(const struct pl08x_channel_data *);
diff --git a/include/linux/apple-gmux.h b/include/linux/apple-gmux.h
index b2d32e0..714186d 100644
--- a/include/linux/apple-gmux.h
+++ b/include/linux/apple-gmux.h
@@ -35,7 +35,7 @@
  */
 static inline bool apple_gmux_present(void)
 {
-	return acpi_dev_present(GMUX_ACPI_HID);
+	return acpi_dev_found(GMUX_ACPI_HID);
 }
 
 #else  /* !CONFIG_APPLE_GMUX */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index c1a2f34..f310ec0 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -371,7 +371,8 @@
 	SETFEATURES_AAM_ON	= 0x42,
 	SETFEATURES_AAM_OFF	= 0xC2,
 
-	SETFEATURES_SPINUP	= 0x07, /* Spin-up drive */
+	SETFEATURES_SPINUP		= 0x07, /* Spin-up drive */
+	SETFEATURES_SPINUP_TIMEOUT	= 30000, /* 30s timeout for drive spin-up from PUIS */
 
 	SETFEATURES_SATA_ENABLE = 0x10, /* Enable use of SATA feature */
 	SETFEATURES_SATA_DISABLE = 0x90, /* Disable use of SATA feature */
diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h
index 33eb274..e66153d 100644
--- a/include/linux/ath9k_platform.h
+++ b/include/linux/ath9k_platform.h
@@ -31,6 +31,10 @@
 	u32 gpio_mask;
 	u32 gpio_val;
 
+	u32 bt_active_pin;
+	u32 bt_priority_pin;
+	u32 wlan_active_pin;
+
 	bool endian_check;
 	bool is_clk_25mhz;
 	bool tx_gain_buffalo;
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index df4f369..e451534 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -559,25 +559,25 @@
 #endif
 
 /**
- * fetch_or - perform *ptr |= mask and return old value of *ptr
- * @ptr: pointer to value
- * @mask: mask to OR on the value
- *
- * cmpxchg based fetch_or, macro so it works for different integer types
+ * atomic_fetch_or - perform *p |= mask and return old value of *p
+ * @mask: mask to OR on the atomic_t
+ * @p: pointer to atomic_t
  */
-#ifndef fetch_or
-#define fetch_or(ptr, mask)						\
-({	typeof(*(ptr)) __old, __val = *(ptr);				\
-	for (;;) {							\
-		__old = cmpxchg((ptr), __val, __val | (mask));		\
-		if (__old == __val)					\
-			break;						\
-		__val = __old;						\
-	}								\
-	__old;								\
-})
-#endif
+#ifndef atomic_fetch_or
+static inline int atomic_fetch_or(int mask, atomic_t *p)
+{
+	int old, val = atomic_read(p);
 
+	for (;;) {
+		old = atomic_cmpxchg(p, val, val | mask);
+		if (old == val)
+			break;
+		val = old;
+	}
+
+	return old;
+}
+#endif
 
 #ifdef CONFIG_GENERIC_ATOMIC64
 #include <asm-generic/atomic64.h>
diff --git a/include/linux/audit.h b/include/linux/audit.h
index e38e3fc..961a417 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -26,6 +26,7 @@
 #include <linux/sched.h>
 #include <linux/ptrace.h>
 #include <uapi/linux/audit.h>
+#include <linux/tty.h>
 
 #define AUDIT_INO_UNSET ((unsigned long)-1)
 #define AUDIT_DEV_UNSET ((dev_t)-1)
@@ -347,6 +348,23 @@
 	return tsk->sessionid;
 }
 
+static inline struct tty_struct *audit_get_tty(struct task_struct *tsk)
+{
+	struct tty_struct *tty = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tsk->sighand->siglock, flags);
+	if (tsk->signal)
+		tty = tty_kref_get(tsk->signal->tty);
+	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
+	return tty;
+}
+
+static inline void audit_put_tty(struct tty_struct *tty)
+{
+	tty_kref_put(tty);
+}
+
 extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp);
 extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode);
 extern void __audit_bprm(struct linux_binprm *bprm);
@@ -504,6 +522,12 @@
 {
 	return -1;
 }
+static inline struct tty_struct *audit_get_tty(struct task_struct *tsk)
+{
+	return NULL;
+}
+static inline void audit_put_tty(struct tty_struct *tty)
+{ }
 static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
 { }
 static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid,
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 1b4d69f..3f10307 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -135,7 +135,7 @@
 
 struct backing_dev_info {
 	struct list_head bdi_list;
-	unsigned long ra_pages;	/* max readahead in PAGE_CACHE_SIZE units */
+	unsigned long ra_pages;	/* max readahead in PAGE_SIZE units */
 	unsigned int capabilities; /* Device capabilities */
 	congested_fn *congested_fn; /* Function pointer if device is md/dm */
 	void *congested_data;	/* Pointer to aux data for congested func */
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 1e7a69a..5f2fd61 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -141,9 +141,10 @@
 					struct backlight_device *bd);
 extern void backlight_force_update(struct backlight_device *bd,
 				   enum backlight_update_reason reason);
-extern bool backlight_device_registered(enum backlight_type type);
 extern int backlight_register_notifier(struct notifier_block *nb);
 extern int backlight_unregister_notifier(struct notifier_block *nb);
+extern struct backlight_device *backlight_device_get_by_type(enum backlight_type type);
+extern int backlight_device_set_brightness(struct backlight_device *bd, unsigned long brightness);
 
 #define to_backlight_device(obj) container_of(obj, struct backlight_device, dev)
 
diff --git a/include/linux/bcm47xx_sprom.h b/include/linux/bcm47xx_sprom.h
new file mode 100644
index 0000000..c06b47c
--- /dev/null
+++ b/include/linux/bcm47xx_sprom.h
@@ -0,0 +1,24 @@
+/*
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ */
+
+#ifndef __BCM47XX_SPROM_H
+#define __BCM47XX_SPROM_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+
+#ifdef CONFIG_BCM47XX_SPROM
+int bcm47xx_sprom_register_fallbacks(void);
+#else
+static inline int bcm47xx_sprom_register_fallbacks(void)
+{
+	return -ENOTSUPP;
+};
+#endif
+
+#endif /* __BCM47XX_SPROM_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 88bc64f..9faebf7 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -41,7 +41,7 @@
 #endif
 
 #define BIO_MAX_PAGES		256
-#define BIO_MAX_SIZE		(BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
+#define BIO_MAX_SIZE		(BIO_MAX_PAGES << PAGE_SHIFT)
 #define BIO_MAX_SECTORS		(BIO_MAX_SIZE >> 9)
 
 /*
@@ -703,6 +703,17 @@
 }
 
 /*
+ * Increment chain count for the bio. Make sure the CHAIN flag update
+ * is visible before the raised count.
+ */
+static inline void bio_inc_remaining(struct bio *bio)
+{
+	bio_set_flag(bio, BIO_CHAIN);
+	smp_mb__before_atomic();
+	atomic_inc(&bio->__bi_remaining);
+}
+
+/*
  * bio_set is used to allow other portions of the IO system to
  * allocate their own private memory pools for bio and iovec structures.
  * These memory pools in turn all allocate from the bio_slab
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index defeaac..299e76b 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -227,6 +227,22 @@
 })
 #endif
 
+#ifndef bit_clear_unless
+#define bit_clear_unless(ptr, _clear, _test)	\
+({								\
+	const typeof(*ptr) clear = (_clear), test = (_test);	\
+	typeof(*ptr) old, new;					\
+								\
+	do {							\
+		old = ACCESS_ONCE(*ptr);			\
+		new = old & ~clear;				\
+	} while (!(old & test) &&				\
+		 cmpxchg(ptr, old, new) != old);		\
+								\
+	!(old & test);						\
+})
+#endif
+
 #ifndef find_last_bit
 /**
  * find_last_bit - find the last set bit in a memory region
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 9ac9799..2498fdf 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -238,8 +238,8 @@
 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
-void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
-		void *priv);
+void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
+		busy_tag_iter_fn *fn, void *priv);
 void blk_mq_freeze_queue(struct request_queue *q);
 void blk_mq_unfreeze_queue(struct request_queue *q);
 void blk_mq_freeze_queue_start(struct request_queue *q);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 86a38ea..77e5d81 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -208,7 +208,7 @@
 #define REQ_COMMON_MASK \
 	(REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
 	 REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \
-	 REQ_SECURE | REQ_INTEGRITY)
+	 REQ_SECURE | REQ_INTEGRITY | REQ_NOMERGE)
 #define REQ_CLONE_MASK		REQ_COMMON_MASK
 
 #define BIO_NO_ADVANCE_ITER_MASK	(REQ_DISCARD|REQ_WRITE_SAME)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 7e5d7e0..1fd8fdf 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -433,8 +433,6 @@
 	/*
 	 * for flush operations
 	 */
-	unsigned int		flush_flags;
-	unsigned int		flush_not_queueable:1;
 	struct blk_flush_queue	*fq;
 
 	struct list_head	requeue_list;
@@ -491,6 +489,9 @@
 #define QUEUE_FLAG_INIT_DONE   20	/* queue is initialized */
 #define QUEUE_FLAG_NO_SG_MERGE 21	/* don't attempt to merge SG segments*/
 #define QUEUE_FLAG_POLL	       22	/* IO polling enabled if set */
+#define QUEUE_FLAG_WC	       23	/* Write back caching */
+#define QUEUE_FLAG_FUA	       24	/* device supports FUA writes */
+#define QUEUE_FLAG_FLUSH_NQ    25	/* flush not queueuable */
 
 #define QUEUE_FLAG_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
 				 (1 << QUEUE_FLAG_STACKABLE)	|	\
@@ -779,7 +780,7 @@
 extern void blk_rq_set_block_pc(struct request *);
 extern void blk_requeue_request(struct request_queue *, struct request *);
 extern void blk_add_request_payload(struct request *rq, struct page *page,
-		unsigned int len);
+		int offset, unsigned int len);
 extern int blk_lld_busy(struct request_queue *q);
 extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
 			     struct bio_set *bs, gfp_t gfp_mask,
@@ -1007,8 +1008,8 @@
 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
 extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
-extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
 extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
+extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
 
 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
@@ -1128,6 +1129,8 @@
 extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
 extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
+extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+		sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop);
 extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
 		sector_t nr_sects, gfp_t gfp_mask, struct page *page);
 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
@@ -1363,7 +1366,7 @@
 
 static inline bool queue_flush_queueable(struct request_queue *q)
 {
-	return !q->flush_not_queueable;
+	return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
 }
 
 typedef struct {struct page *v;} Sector;
@@ -1372,7 +1375,7 @@
 
 static inline void put_dev_sector(Sector p)
 {
-	page_cache_release(p.v);
+	put_page(p.v);
 }
 
 static inline bool __bvec_gap_to_prev(struct request_queue *q,
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index afc1343..0f3172b 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -57,6 +57,14 @@
 	} while (0)
 #define BLK_TN_MAX_MSG		128
 
+static inline bool blk_trace_note_message_enabled(struct request_queue *q)
+{
+	struct blk_trace *bt = q->blk_trace;
+	if (likely(!bt))
+		return false;
+	return bt->act_mask & BLK_TC_NOTIFY;
+}
+
 extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
 				void *data, size_t len);
 extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
@@ -79,6 +87,7 @@
 # define blk_trace_remove(q)				(-ENOTTY)
 # define blk_add_trace_msg(q, fmt, ...)			do { } while (0)
 # define blk_trace_remove_sysfs(dev)			do { } while (0)
+# define blk_trace_note_message_enabled(q)		(false)
 static inline int blk_trace_init_sysfs(struct device *dev)
 {
 	return 0;
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 21ee41b..8ee27b8 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -66,6 +66,11 @@
 	 * functions that access data on eBPF program stack
 	 */
 	ARG_PTR_TO_STACK,	/* any pointer to eBPF program stack */
+	ARG_PTR_TO_RAW_STACK,	/* any pointer to eBPF program stack, area does not
+				 * need to be initialized, helper function must fill
+				 * all bytes or clear them in error case.
+				 */
+
 	ARG_CONST_STACK_SIZE,	/* number of bytes accessed from stack */
 	ARG_CONST_STACK_SIZE_OR_ZERO, /* number of bytes accessed from stack or 0 */
 
@@ -131,6 +136,7 @@
 struct bpf_prog_aux {
 	atomic_t refcnt;
 	u32 used_map_cnt;
+	u32 max_ctx_offset;
 	const struct bpf_verifier_ops *ops;
 	struct bpf_map **used_maps;
 	struct bpf_prog *prog;
@@ -160,9 +166,12 @@
 #define MAX_TAIL_CALL_CNT 32
 
 u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
+u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
 void bpf_fd_array_map_clear(struct bpf_map *map);
 bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
+
 const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
+const struct bpf_func_proto *bpf_get_event_output_proto(void);
 
 #ifdef CONFIG_BPF_SYSCALL
 DECLARE_PER_CPU(int, bpf_prog_active);
@@ -171,12 +180,13 @@
 void bpf_register_map_type(struct bpf_map_type_list *tl);
 
 struct bpf_prog *bpf_prog_get(u32 ufd);
+struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
 void bpf_prog_put(struct bpf_prog *prog);
 void bpf_prog_put_rcu(struct bpf_prog *prog);
 
 struct bpf_map *bpf_map_get_with_uref(u32 ufd);
 struct bpf_map *__bpf_map_get(struct fd f);
-void bpf_map_inc(struct bpf_map *map, bool uref);
+struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
 void bpf_map_put_with_uref(struct bpf_map *map);
 void bpf_map_put(struct bpf_map *map);
 int bpf_map_precharge_memlock(u32 pages);
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index f0ba9c2..e3354b7 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -24,6 +24,8 @@
 #define PHY_ID_BCM7250			0xae025280
 #define PHY_ID_BCM7364			0xae025260
 #define PHY_ID_BCM7366			0x600d8490
+#define PHY_ID_BCM7346			0x600d8650
+#define PHY_ID_BCM7362			0x600d84b0
 #define PHY_ID_BCM7425			0x600d86b0
 #define PHY_ID_BCM7429			0x600d8730
 #define PHY_ID_BCM7435			0x600d8750
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index c67f052..d48daa3 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -43,7 +43,7 @@
 			 */
 };
 
-#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
+#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
 
 struct page;
 struct buffer_head;
@@ -263,7 +263,7 @@
 static inline void attach_page_buffers(struct page *page,
 		struct buffer_head *head)
 {
-	page_cache_get(page);
+	get_page(page);
 	SetPagePrivate(page);
 	set_page_private(page, (unsigned long)head);
 }
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 735f9f8..5261751 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -40,8 +40,11 @@
 	struct can_clock clock;
 
 	enum can_state state;
-	u32 ctrlmode;
-	u32 ctrlmode_supported;
+
+	/* CAN controller features - see include/uapi/linux/can/netlink.h */
+	u32 ctrlmode;		/* current options setting */
+	u32 ctrlmode_supported;	/* options that can be modified by netlink */
+	u32 ctrlmode_static;	/* static enabled options for driver/hardware */
 
 	int restart_ms;
 	struct timer_list restart_timer;
@@ -108,6 +111,21 @@
 	return skb->len == CANFD_MTU;
 }
 
+/* helper to define static CAN controller features at device creation time */
+static inline void can_set_static_ctrlmode(struct net_device *dev,
+					   u32 static_mode)
+{
+	struct can_priv *priv = netdev_priv(dev);
+
+	/* alloc_candev() succeeded => netdev_priv() is valid at this point */
+	priv->ctrlmode = static_mode;
+	priv->ctrlmode_static = static_mode;
+
+	/* override MTU which was set by default in can_setup()? */
+	if (static_mode & CAN_CTRLMODE_FD)
+		dev->mtu = CANFD_MTU;
+}
+
 /* get data length from can_dlc with sanitized can_dlc */
 u8 can_dlc2len(u8 can_dlc);
 
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index 915af30..7c2bb27 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -1,9 +1,10 @@
 /*
  * AMD Cryptographic Coprocessor (CCP) driver
  *
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
  *
  * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -381,6 +382,35 @@
 	u32 final;
 };
 
+/**
+ * struct ccp_passthru_nomap_engine - CCP pass-through operation
+ *   without performing DMA mapping
+ * @bit_mod: bitwise operation to perform
+ * @byte_swap: byteswap operation to perform
+ * @mask: mask to be applied to data
+ * @mask_len: length in bytes of mask
+ * @src: data to be used for this operation
+ * @dst: data produced by this operation
+ * @src_len: length in bytes of data used for this operation
+ * @final: indicate final pass-through operation
+ *
+ * Variables required to be set when calling ccp_enqueue_cmd():
+ *   - bit_mod, byte_swap, src, dst, src_len
+ *   - mask, mask_len if bit_mod is not CCP_PASSTHRU_BITWISE_NOOP
+ */
+struct ccp_passthru_nomap_engine {
+	enum ccp_passthru_bitwise bit_mod;
+	enum ccp_passthru_byteswap byte_swap;
+
+	dma_addr_t mask;
+	u32 mask_len;		/* In bytes */
+
+	dma_addr_t src_dma, dst_dma;
+	u64 src_len;		/* In bytes */
+
+	u32 final;
+};
+
 /***** ECC engine *****/
 #define CCP_ECC_MODULUS_BYTES	48	/* 384-bits */
 #define CCP_ECC_MAX_OPERANDS	6
@@ -522,7 +552,8 @@
 };
 
 /* Flag values for flags member of ccp_cmd */
-#define CCP_CMD_MAY_BACKLOG	0x00000001
+#define CCP_CMD_MAY_BACKLOG		0x00000001
+#define CCP_CMD_PASSTHRU_NO_DMA_MAP	0x00000002
 
 /**
  * struct ccp_cmd - CPP operation request
@@ -562,6 +593,7 @@
 		struct ccp_sha_engine sha;
 		struct ccp_rsa_engine rsa;
 		struct ccp_passthru_engine passthru;
+		struct ccp_passthru_nomap_engine passthru_nomap;
 		struct ccp_ecc_engine ecc;
 	} u;
 
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index 260d78b..1563265 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -12,9 +12,12 @@
  */
 
 struct ceph_auth_client;
-struct ceph_authorizer;
 struct ceph_msg;
 
+struct ceph_authorizer {
+	void (*destroy)(struct ceph_authorizer *);
+};
+
 struct ceph_auth_handshake {
 	struct ceph_authorizer *authorizer;
 	void *authorizer_buf;
@@ -62,8 +65,6 @@
 				 struct ceph_auth_handshake *auth);
 	int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
 				       struct ceph_authorizer *a, size_t len);
-	void (*destroy_authorizer)(struct ceph_auth_client *ac,
-				   struct ceph_authorizer *a);
 	void (*invalidate_authorizer)(struct ceph_auth_client *ac,
 				      int peer_type);
 
@@ -112,8 +113,7 @@
 extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac,
 				       int peer_type,
 				       struct ceph_auth_handshake *auth);
-extern void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac,
-					 struct ceph_authorizer *a);
+void ceph_auth_destroy_authorizer(struct ceph_authorizer *a);
 extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac,
 				       int peer_type,
 				       struct ceph_auth_handshake *a);
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index e7975e4..db92a8d 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -176,8 +176,8 @@
  */
 static inline int calc_pages_for(u64 off, u64 len)
 {
-	return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) -
-		(off >> PAGE_CACHE_SHIFT);
+	return ((off+len+PAGE_SIZE-1) >> PAGE_SHIFT) -
+		(off >> PAGE_SHIFT);
 }
 
 extern struct kmem_cache *ceph_inode_cachep;
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 4343df8..cbf4609 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -16,7 +16,6 @@
 struct ceph_snap_context;
 struct ceph_osd_request;
 struct ceph_osd_client;
-struct ceph_authorizer;
 
 /*
  * completion callback for async writepages
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 3e39ae5..5b17de6 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -444,6 +444,7 @@
 	int (*can_attach)(struct cgroup_taskset *tset);
 	void (*cancel_attach)(struct cgroup_taskset *tset);
 	void (*attach)(struct cgroup_taskset *tset);
+	void (*post_attach)(void);
 	int (*can_fork)(struct task_struct *task);
 	void (*cancel_fork)(struct task_struct *task);
 	void (*fork)(struct task_struct *task);
diff --git a/include/linux/clk/renesas.h b/include/linux/clk/renesas.h
index 7adfd80..ba6fa41 100644
--- a/include/linux/clk/renesas.h
+++ b/include/linux/clk/renesas.h
@@ -24,12 +24,20 @@
 void r8a7779_clocks_init(u32 mode);
 void rcar_gen2_clocks_init(u32 mode);
 
-#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
 void cpg_mstp_add_clk_domain(struct device_node *np);
-int cpg_mstp_attach_dev(struct generic_pm_domain *domain, struct device *dev);
-void cpg_mstp_detach_dev(struct generic_pm_domain *domain, struct device *dev);
+#ifdef CONFIG_CLK_RENESAS_CPG_MSTP
+int cpg_mstp_attach_dev(struct generic_pm_domain *unused, struct device *dev);
+void cpg_mstp_detach_dev(struct generic_pm_domain *unused, struct device *dev);
 #else
-static inline void cpg_mstp_add_clk_domain(struct device_node *np) {}
+#define cpg_mstp_attach_dev	NULL
+#define cpg_mstp_detach_dev	NULL
 #endif
 
+#ifdef CONFIG_CLK_RENESAS_CPG_MSSR
+int cpg_mssr_attach_dev(struct generic_pm_domain *unused, struct device *dev);
+void cpg_mssr_detach_dev(struct generic_pm_domain *unused, struct device *dev);
+#else
+#define cpg_mssr_attach_dev	NULL
+#define cpg_mssr_detach_dev	NULL
+#endif
 #endif
diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h
index 57bf7aa..7007a5f 100644
--- a/include/linux/clk/tegra.h
+++ b/include/linux/clk/tegra.h
@@ -121,4 +121,9 @@
 }
 #endif
 
+extern void tegra210_xusb_pll_hw_control_enable(void);
+extern void tegra210_xusb_pll_hw_sequence_start(void);
+extern void tegra210_sata_pll_hw_control_enable(void);
+extern void tegra210_sata_pll_hw_sequence_start(void);
+
 #endif /* __LINUX_CLK_TEGRA_H_ */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index a307bf6..44a1aff 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -15,6 +15,7 @@
 #include <linux/cache.h>
 #include <linux/timer.h>
 #include <linux/init.h>
+#include <linux/of.h>
 #include <asm/div64.h>
 #include <asm/io.h>
 
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 22ab246..3d5202e 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -199,7 +199,7 @@
 #define unreachable() __builtin_unreachable()
 
 /* Mark a function definition as prohibited from being cloned. */
-#define __noclone	__attribute__((__noclone__))
+#define __noclone	__attribute__((__noclone__, __optimize__("no-tracer")))
 
 #endif /* GCC_VERSION >= 40500 */
 
@@ -246,7 +246,7 @@
 #define __HAVE_BUILTIN_BSWAP32__
 #define __HAVE_BUILTIN_BSWAP64__
 #endif
-#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
+#if GCC_VERSION >= 40800
 #define __HAVE_BUILTIN_BSWAP16__
 #endif
 #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index 485fe55..d9d6a9d 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -188,7 +188,7 @@
 }
 
 #define CONFIGFS_BIN_ATTR_RO(_pfx, _name, _priv, _maxsz)	\
-static struct configfs_attribute _pfx##attr_##_name = {		\
+static struct configfs_bin_attribute _pfx##attr_##_name = {	\
 	.cb_attr = {						\
 		.ca_name	= __stringify(_name),		\
 		.ca_mode	= S_IRUGO,			\
@@ -200,7 +200,7 @@
 }
 
 #define CONFIGFS_BIN_ATTR_WO(_pfx, _name, _priv, _maxsz)	\
-static struct configfs_attribute _pfx##attr_##_name = {		\
+static struct configfs_bin_attribute _pfx##attr_##_name = {	\
 	.cb_attr = {						\
 		.ca_name	= __stringify(_name),		\
 		.ca_mode	= S_IWUSR,			\
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index f9b1fab..21597dc 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -59,25 +59,7 @@
  * CPU notifier priorities.
  */
 enum {
-	/*
-	 * SCHED_ACTIVE marks a cpu which is coming up active during
-	 * CPU_ONLINE and CPU_DOWN_FAILED and must be the first
-	 * notifier.  CPUSET_ACTIVE adjusts cpuset according to
-	 * cpu_active mask right after SCHED_ACTIVE.  During
-	 * CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are
-	 * ordered in the similar way.
-	 *
-	 * This ordering guarantees consistent cpu_active mask and
-	 * migration behavior to all cpu notifiers.
-	 */
-	CPU_PRI_SCHED_ACTIVE	= INT_MAX,
-	CPU_PRI_CPUSET_ACTIVE	= INT_MAX - 1,
-	CPU_PRI_SCHED_INACTIVE	= INT_MIN + 1,
-	CPU_PRI_CPUSET_INACTIVE	= INT_MIN,
-
-	/* migration should happen before other stuff but after perf */
 	CPU_PRI_PERF		= 20,
-	CPU_PRI_MIGRATION	= 10,
 
 	/* bring up workqueues before normal notifiers and down after */
 	CPU_PRI_WORKQUEUE_UP	= 5,
diff --git a/include/linux/cpufreq-dt.h b/include/linux/cpufreq-dt.h
deleted file mode 100644
index 0414009..0000000
--- a/include/linux/cpufreq-dt.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) 2014 Marvell
- * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __CPUFREQ_DT_H__
-#define __CPUFREQ_DT_H__
-
-struct cpufreq_dt_platform_data {
-	/*
-	 * True when each CPU has its own clock to control its
-	 * frequency, false when all CPUs are controlled by a single
-	 * clock.
-	 */
-	bool independent_clocks;
-};
-
-#endif /* __CPUFREQ_DT_H__ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 718e872..4e81e08 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -102,6 +102,17 @@
 	 */
 	struct rw_semaphore	rwsem;
 
+	/*
+	 * Fast switch flags:
+	 * - fast_switch_possible should be set by the driver if it can
+	 *   guarantee that frequency can be changed on any CPU sharing the
+	 *   policy and that the change will affect all of the policy CPUs then.
+	 * - fast_switch_enabled is to be set by governors that support fast
+	 *   freqnency switching with the help of cpufreq_enable_fast_switch().
+	 */
+	bool			fast_switch_possible;
+	bool			fast_switch_enabled;
+
 	/* Synchronization for frequency transitions */
 	bool			transition_ongoing; /* Tracks transition status */
 	spinlock_t		transition_lock;
@@ -156,6 +167,8 @@
 int cpufreq_update_policy(unsigned int cpu);
 bool have_governor_per_policy(void);
 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
+void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
+void cpufreq_disable_fast_switch(struct cpufreq_policy *policy);
 #else
 static inline unsigned int cpufreq_get(unsigned int cpu)
 {
@@ -236,6 +249,8 @@
 				  unsigned int relation);	/* Deprecated */
 	int		(*target_index)(struct cpufreq_policy *policy,
 					unsigned int index);
+	unsigned int	(*fast_switch)(struct cpufreq_policy *policy,
+				       unsigned int target_freq);
 	/*
 	 * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION
 	 * unset.
@@ -426,6 +441,20 @@
 #define CPUFREQ_POLICY_POWERSAVE	(1)
 #define CPUFREQ_POLICY_PERFORMANCE	(2)
 
+/*
+ * The polling frequency depends on the capability of the processor. Default
+ * polling frequency is 1000 times the transition latency of the processor. The
+ * ondemand governor will work on any processor with transition latency <= 10ms,
+ * using appropriate sampling rate.
+ *
+ * For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL)
+ * the ondemand governor will not work. All times here are in us (microseconds).
+ */
+#define MIN_SAMPLING_RATE_RATIO		(2)
+#define LATENCY_MULTIPLIER		(1000)
+#define MIN_LATENCY_MULTIPLIER		(20)
+#define TRANSITION_LATENCY_LIMIT	(10 * 1000 * 1000)
+
 /* Governor Events */
 #define CPUFREQ_GOV_START	1
 #define CPUFREQ_GOV_STOP	2
@@ -450,6 +479,8 @@
 };
 
 /* Pass a target to the cpufreq driver */
+unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
+					unsigned int target_freq);
 int cpufreq_driver_target(struct cpufreq_policy *policy,
 				 unsigned int target_freq,
 				 unsigned int relation);
@@ -462,6 +493,29 @@
 struct cpufreq_governor *cpufreq_default_governor(void);
 struct cpufreq_governor *cpufreq_fallback_governor(void);
 
+/* Governor attribute set */
+struct gov_attr_set {
+	struct kobject kobj;
+	struct list_head policy_list;
+	struct mutex update_lock;
+	int usage_count;
+};
+
+/* sysfs ops for cpufreq governors */
+extern const struct sysfs_ops governor_sysfs_ops;
+
+void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node);
+void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node);
+unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node);
+
+/* Governor sysfs attribute */
+struct governor_attr {
+	struct attribute attr;
+	ssize_t (*show)(struct gov_attr_set *attr_set, char *buf);
+	ssize_t (*store)(struct gov_attr_set *attr_set, const char *buf,
+			 size_t count);
+};
+
 /*********************************************************************
  *                     FREQUENCY TABLE HELPERS                       *
  *********************************************************************/
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 5d68e15..386374d 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -8,6 +8,7 @@
 	CPUHP_BRINGUP_CPU,
 	CPUHP_AP_IDLE_DEAD,
 	CPUHP_AP_OFFLINE,
+	CPUHP_AP_SCHED_STARTING,
 	CPUHP_AP_NOTIFY_STARTING,
 	CPUHP_AP_ONLINE,
 	CPUHP_TEARDOWN_CPU,
@@ -16,6 +17,7 @@
 	CPUHP_AP_NOTIFY_ONLINE,
 	CPUHP_AP_ONLINE_DYN,
 	CPUHP_AP_ONLINE_DYN_END		= CPUHP_AP_ONLINE_DYN + 30,
+	CPUHP_AP_ACTIVE,
 	CPUHP_ONLINE,
 };
 
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 40cee6b..e828cf6 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -743,12 +743,10 @@
 static inline void
 set_cpu_online(unsigned int cpu, bool online)
 {
-	if (online) {
+	if (online)
 		cpumask_set_cpu(cpu, &__cpu_online_mask);
-		cpumask_set_cpu(cpu, &__cpu_active_mask);
-	} else {
+	else
 		cpumask_clear_cpu(cpu, &__cpu_online_mask);
-	}
 }
 
 static inline void
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index fea160e..85a868c 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -137,8 +137,6 @@
 	task_unlock(current);
 }
 
-extern void cpuset_post_attach_flush(void);
-
 #else /* !CONFIG_CPUSETS */
 
 static inline bool cpusets_enabled(void) { return false; }
@@ -245,10 +243,6 @@
 	return false;
 }
 
-static inline void cpuset_post_attach_flush(void)
-{
-}
-
 #endif /* !CONFIG_CPUSETS */
 
 #endif /* _LINUX_CPUSET_H */
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 3849fce..3873697 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -34,9 +34,13 @@
 
 /*
  * Architecture code can redefine this if there are any special checks
- * needed for 64-bit ELF vmcores. In case of 32-bit only architecture,
- * this can be set to zero.
+ * needed for 32-bit ELF or 64-bit ELF vmcores.  In case of 32-bit
+ * only architecture, vmcore_elf64_check_arch can be set to zero.
  */
+#ifndef vmcore_elf32_check_arch
+#define vmcore_elf32_check_arch(x) elf_check_arch(x)
+#endif
+
 #ifndef vmcore_elf64_check_arch
 #define vmcore_elf64_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x))
 #endif
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 99c9489..6e28c89 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -948,8 +948,7 @@
  * encrypt and decrypt API calls. During the allocation, the provided ablkcipher
  * handle is registered in the request data structure.
  *
- * Return: allocated request handle in case of success; IS_ERR() is true in case
- *	   of an error, PTR_ERR() returns the error code.
+ * Return: allocated request handle in case of success, or NULL if out of memory
  */
 static inline struct ablkcipher_request *ablkcipher_request_alloc(
 	struct crypto_ablkcipher *tfm, gfp_t gfp)
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 636dd59..982a6c4 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -5,7 +5,7 @@
 #include <linux/mm.h>
 #include <asm/pgtable.h>
 
-ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
+ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *,
 		  get_block_t, dio_iodone_t, int flags);
 int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size);
 int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 7cb043d..f8506e8 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -123,7 +123,10 @@
 	unsigned long d_time;		/* used by d_revalidate */
 	void *d_fsdata;			/* fs-specific data */
 
-	struct list_head d_lru;		/* LRU list */
+	union {
+		struct list_head d_lru;		/* LRU list */
+		wait_queue_head_t *d_wait;	/* in-lookup ones only */
+	};
 	struct list_head d_child;	/* child of parent list */
 	struct list_head d_subdirs;	/* our children */
 	/*
@@ -131,6 +134,7 @@
 	 */
 	union {
 		struct hlist_node d_alias;	/* inode alias list */
+		struct hlist_bl_node d_in_lookup_hash;	/* only for in-lookup ones */
 	 	struct rcu_head d_rcu;
 	} d_u;
 };
@@ -161,6 +165,7 @@
 	struct vfsmount *(*d_automount)(struct path *);
 	int (*d_manage)(struct dentry *, bool);
 	struct inode *(*d_select_inode)(struct dentry *, unsigned);
+	struct dentry *(*d_real)(struct dentry *, struct inode *);
 } ____cacheline_aligned;
 
 /*
@@ -229,6 +234,9 @@
 #define DCACHE_OP_SELECT_INODE		0x02000000 /* Unioned entry: dcache op selects inode */
 
 #define DCACHE_ENCRYPTED_WITH_KEY	0x04000000 /* dir is encrypted with a valid key */
+#define DCACHE_OP_REAL			0x08000000
+
+#define DCACHE_PAR_LOOKUP		0x10000000 /* being looked up (with parent locked shared) */
 
 extern seqlock_t rename_lock;
 
@@ -246,6 +254,8 @@
 /* allocate/de-allocate */
 extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
 extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
+extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
+					wait_queue_head_t *);
 extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
 extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
 extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
@@ -365,6 +375,22 @@
 	spin_unlock(&dentry->d_lock);
 }
 
+extern void __d_lookup_done(struct dentry *);
+
+static inline int d_in_lookup(struct dentry *dentry)
+{
+	return dentry->d_flags & DCACHE_PAR_LOOKUP;
+}
+
+static inline void d_lookup_done(struct dentry *dentry)
+{
+	if (unlikely(d_in_lookup(dentry))) {
+		spin_lock(&dentry->d_lock);
+		__d_lookup_done(dentry);
+		spin_unlock(&dentry->d_lock);
+	}
+}
+
 extern void dput(struct dentry *);
 
 static inline bool d_managed(const struct dentry *dentry)
@@ -555,4 +581,24 @@
 	return upper;
 }
 
+static inline struct dentry *d_real(struct dentry *dentry)
+{
+	if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
+		return dentry->d_op->d_real(dentry, NULL);
+	else
+		return dentry;
+}
+
+static inline struct inode *vfs_select_inode(struct dentry *dentry,
+					     unsigned open_flags)
+{
+	struct inode *inode = d_inode(dentry);
+
+	if (inode && unlikely(dentry->d_flags & DCACHE_OP_SELECT_INODE))
+		inode = dentry->d_op->d_select_inode(dentry, open_flags);
+
+	return inode;
+}
+
+
 #endif	/* __LINUX_DCACHE_H */
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 6fa02a2..2de4e2e 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -19,6 +19,13 @@
 
 #define DEVFREQ_NAME_LEN 16
 
+/* DEVFREQ notifier interface */
+#define DEVFREQ_TRANSITION_NOTIFIER	(0)
+
+/* Transition notifiers of DEVFREQ_TRANSITION_NOTIFIER */
+#define	DEVFREQ_PRECHANGE		(0)
+#define DEVFREQ_POSTCHANGE		(1)
+
 struct devfreq;
 
 /**
@@ -143,6 +150,7 @@
  * @trans_table:	Statistics of devfreq transitions
  * @time_in_state:	Statistics of devfreq states
  * @last_stat_updated:	The last time stat updated
+ * @transition_notifier_list: list head of DEVFREQ_TRANSITION_NOTIFIER notifier
  *
  * This structure stores the devfreq information for a give device.
  *
@@ -177,6 +185,13 @@
 	unsigned int *trans_table;
 	unsigned long *time_in_state;
 	unsigned long last_stat_updated;
+
+	struct srcu_notifier_head transition_notifier_list;
+};
+
+struct devfreq_freqs {
+	unsigned long old;
+	unsigned long new;
 };
 
 #if defined(CONFIG_PM_DEVFREQ)
@@ -207,6 +222,22 @@
 					      struct devfreq *devfreq);
 extern void devm_devfreq_unregister_opp_notifier(struct device *dev,
 						struct devfreq *devfreq);
+extern int devfreq_register_notifier(struct devfreq *devfreq,
+					struct notifier_block *nb,
+					unsigned int list);
+extern int devfreq_unregister_notifier(struct devfreq *devfreq,
+					struct notifier_block *nb,
+					unsigned int list);
+extern int devm_devfreq_register_notifier(struct device *dev,
+				struct devfreq *devfreq,
+				struct notifier_block *nb,
+				unsigned int list);
+extern void devm_devfreq_unregister_notifier(struct device *dev,
+				struct devfreq *devfreq,
+				struct notifier_block *nb,
+				unsigned int list);
+extern struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
+						int index);
 
 /**
  * devfreq_update_stats() - update the last_status pointer in struct devfreq
@@ -241,6 +272,39 @@
 };
 #endif
 
+#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
+/**
+ * struct devfreq_passive_data - void *data fed to struct devfreq
+ *	and devfreq_add_device
+ * @parent:	the devfreq instance of parent device.
+ * @get_target_freq:	Optional callback, Returns desired operating frequency
+ *			for the device using passive governor. That is called
+ *			when passive governor should decide the next frequency
+ *			by using the new frequency of parent devfreq device
+ *			using governors except for passive governor.
+ *			If the devfreq device has the specific method to decide
+ *			the next frequency, should use this callback.
+ * @this:	the devfreq instance of own device.
+ * @nb:		the notifier block for DEVFREQ_TRANSITION_NOTIFIER list
+ *
+ * The devfreq_passive_data have to set the devfreq instance of parent
+ * device with governors except for the passive governor. But, don't need to
+ * initialize the 'this' and 'nb' field because the devfreq core will handle
+ * them.
+ */
+struct devfreq_passive_data {
+	/* Should set the devfreq instance of parent device */
+	struct devfreq *parent;
+
+	/* Optional callback to decide the next frequency of passvice device */
+	int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
+
+	/* For passive governor's internal use. Don't need to set them */
+	struct devfreq *this;
+	struct notifier_block nb;
+};
+#endif
+
 #else /* !CONFIG_PM_DEVFREQ */
 static inline struct devfreq *devfreq_add_device(struct device *dev,
 					  struct devfreq_dev_profile *profile,
@@ -307,6 +371,41 @@
 {
 }
 
+static inline int devfreq_register_notifier(struct devfreq *devfreq,
+					struct notifier_block *nb,
+					unsigned int list)
+{
+	return 0;
+}
+
+static inline int devfreq_unregister_notifier(struct devfreq *devfreq,
+					struct notifier_block *nb,
+					unsigned int list)
+{
+	return 0;
+}
+
+static inline int devm_devfreq_register_notifier(struct device *dev,
+				struct devfreq *devfreq,
+				struct notifier_block *nb,
+				unsigned int list)
+{
+	return 0;
+}
+
+static inline void devm_devfreq_unregister_notifier(struct device *dev,
+				struct devfreq *devfreq,
+				struct notifier_block *nb,
+				unsigned int list)
+{
+}
+
+static inline struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
+							int index)
+{
+	return ERR_PTR(-ENODEV);
+}
+
 static inline int devfreq_update_stats(struct devfreq *df)
 {
 	return -EINVAL;
diff --git a/include/linux/device.h b/include/linux/device.h
index 002c597..b130304 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -956,11 +956,6 @@
 	return !!dev->power.async_suspend;
 }
 
-static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
-{
-	dev->power.ignore_children = enable;
-}
-
 static inline void dev_pm_syscore_device(struct device *dev, bool val)
 {
 #ifdef CONFIG_PM_SLEEP
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
index e0ee0b3..5871f29 100644
--- a/include/linux/devpts_fs.h
+++ b/include/linux/devpts_fs.h
@@ -15,37 +15,23 @@
 
 #include <linux/errno.h>
 
+struct pts_fs_info;
+
 #ifdef CONFIG_UNIX98_PTYS
 
-int devpts_new_index(struct inode *ptmx_inode);
-void devpts_kill_index(struct inode *ptmx_inode, int idx);
-void devpts_add_ref(struct inode *ptmx_inode);
-void devpts_del_ref(struct inode *ptmx_inode);
+/* Look up a pts fs info and get a ref to it */
+struct pts_fs_info *devpts_get_ref(struct inode *, struct file *);
+void devpts_put_ref(struct pts_fs_info *);
+
+int devpts_new_index(struct pts_fs_info *);
+void devpts_kill_index(struct pts_fs_info *, int);
+
 /* mknod in devpts */
-struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
-		void *priv);
+struct dentry *devpts_pty_new(struct pts_fs_info *, int, void *);
 /* get private structure */
-void *devpts_get_priv(struct inode *pts_inode);
+void *devpts_get_priv(struct dentry *);
 /* unlink */
-void devpts_pty_kill(struct inode *inode);
-
-#else
-
-/* Dummy stubs in the no-pty case */
-static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
-static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
-static inline void devpts_add_ref(struct inode *ptmx_inode) { }
-static inline void devpts_del_ref(struct inode *ptmx_inode) { }
-static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
-		dev_t device, int index, void *priv)
-{
-	return ERR_PTR(-EINVAL);
-}
-static inline void *devpts_get_priv(struct inode *pts_inode)
-{
-	return NULL;
-}
-static inline void devpts_pty_kill(struct inode *inode) { }
+void devpts_pty_kill(struct dentry *);
 
 #endif
 
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index fc48103..8443bbb 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -38,8 +38,8 @@
  * These implement the bulk of the relevant DMA mapping callbacks, but require
  * the arch code to take care of attributes and cache maintenance
  */
-struct page **iommu_dma_alloc(struct device *dev, size_t size,
-		gfp_t gfp, int prot, dma_addr_t *handle,
+struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
+		struct dma_attrs *attrs, int prot, dma_addr_t *handle,
 		void (*flush_page)(struct device *, const void *, phys_addr_t));
 void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
 		dma_addr_t *handle);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 9ea9aba..71c1b215 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -514,7 +514,7 @@
 
 #ifndef arch_setup_dma_ops
 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
-				      u64 size, struct iommu_ops *iommu,
+				      u64 size, const struct iommu_ops *iommu,
 				      bool coherent) { }
 #endif
 
diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h
index 7145644..f2e538a 100644
--- a/include/linux/dma/dw.h
+++ b/include/linux/dma/dw.h
@@ -27,6 +27,7 @@
  * @regs:		memory mapped I/O space
  * @clk:		hclk clock
  * @dw:			struct dw_dma that is filed by dw_dma_probe()
+ * @pdata:		pointer to platform data
  */
 struct dw_dma_chip {
 	struct device	*dev;
@@ -34,10 +35,12 @@
 	void __iomem	*regs;
 	struct clk	*clk;
 	struct dw_dma	*dw;
+
+	const struct dw_dma_platform_data	*pdata;
 };
 
 /* Export to the platform drivers */
-int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata);
+int dw_dma_probe(struct dw_dma_chip *chip);
 int dw_dma_remove(struct dw_dma_chip *chip);
 
 /* DMA API extensions */
diff --git a/include/linux/dma/xilinx_dma.h b/include/linux/dma/xilinx_dma.h
index 34b98f2..3ae3000 100644
--- a/include/linux/dma/xilinx_dma.h
+++ b/include/linux/dma/xilinx_dma.h
@@ -41,6 +41,20 @@
 	int ext_fsync;
 };
 
+/**
+ * enum xdma_ip_type: DMA IP type.
+ *
+ * XDMA_TYPE_AXIDMA: Axi dma ip.
+ * XDMA_TYPE_CDMA: Axi cdma ip.
+ * XDMA_TYPE_VDMA: Axi vdma ip.
+ *
+ */
+enum xdma_ip_type {
+	XDMA_TYPE_AXIDMA = 0,
+	XDMA_TYPE_CDMA,
+	XDMA_TYPE_VDMA,
+};
+
 int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
 					struct xilinx_vdma_config *cfg);
 
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 0174337..30de019 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -804,6 +804,9 @@
 	sg_dma_address(&sg) = buf;
 	sg_dma_len(&sg) = len;
 
+	if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
+		return NULL;
+
 	return chan->device->device_prep_slave_sg(chan, &sg, 1,
 						  dir, flags, NULL);
 }
@@ -812,6 +815,9 @@
 	struct dma_chan *chan, struct scatterlist *sgl,	unsigned int sg_len,
 	enum dma_transfer_direction dir, unsigned long flags)
 {
+	if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
+		return NULL;
+
 	return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
 						  dir, flags, NULL);
 }
@@ -823,6 +829,9 @@
 	enum dma_transfer_direction dir, unsigned long flags,
 	struct rio_dma_ext *rio_ext)
 {
+	if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
+		return NULL;
+
 	return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
 						  dir, flags, rio_ext);
 }
@@ -833,6 +842,9 @@
 		size_t period_len, enum dma_transfer_direction dir,
 		unsigned long flags)
 {
+	if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic)
+		return NULL;
+
 	return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
 						period_len, dir, flags);
 }
@@ -841,6 +853,9 @@
 		struct dma_chan *chan, struct dma_interleaved_template *xt,
 		unsigned long flags)
 {
+	if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
+		return NULL;
+
 	return chan->device->device_prep_interleaved_dma(chan, xt, flags);
 }
 
@@ -848,7 +863,7 @@
 		struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
 		unsigned long flags)
 {
-	if (!chan || !chan->device)
+	if (!chan || !chan->device || !chan->device->device_prep_dma_memset)
 		return NULL;
 
 	return chan->device->device_prep_dma_memset(chan, dest, value,
@@ -861,6 +876,9 @@
 		struct scatterlist *src_sg, unsigned int src_nents,
 		unsigned long flags)
 {
+	if (!chan || !chan->device || !chan->device->device_prep_dma_sg)
+		return NULL;
+
 	return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents,
 			src_sg, src_nents, flags);
 }
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 1626474..df7acb5 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -21,6 +21,7 @@
 #include <linux/pfn.h>
 #include <linux/pstore.h>
 #include <linux/reboot.h>
+#include <linux/screen_info.h>
 
 #include <asm/page.h>
 
@@ -124,6 +125,13 @@
 } efi_capsule_header_t;
 
 /*
+ * EFI capsule flags
+ */
+#define EFI_CAPSULE_PERSIST_ACROSS_RESET	0x00010000
+#define EFI_CAPSULE_POPULATE_SYSTEM_TABLE	0x00020000
+#define EFI_CAPSULE_INITIATE_RESET		0x00040000
+
+/*
  * Allocation types for calls to boottime->allocate_pages.
  */
 #define EFI_ALLOCATE_ANY_PAGES		0
@@ -282,9 +290,10 @@
 	efi_status_t (*handle_protocol)(efi_handle_t, efi_guid_t *, void **);
 	void *__reserved;
 	void *register_protocol_notify;
-	void *locate_handle;
+	efi_status_t (*locate_handle)(int, efi_guid_t *, void *,
+				      unsigned long *, efi_handle_t *);
 	void *locate_device_path;
-	void *install_configuration_table;
+	efi_status_t (*install_configuration_table)(efi_guid_t *, void *);
 	void *load_image;
 	void *start_image;
 	void *exit;
@@ -623,6 +632,27 @@
 	EFI_GUID(0x3152bca5, 0xeade, 0x433d, \
 		 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44)
 
+#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID \
+	EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, \
+		 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
+
+#define EFI_CONSOLE_OUT_DEVICE_GUID \
+	EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, \
+		 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
+
+/*
+ * This GUID is used to pass to the kernel proper the struct screen_info
+ * structure that was populated by the stub based on the GOP protocol instance
+ * associated with ConOut
+ */
+#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID \
+	EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, \
+		 0xb9, 0xe, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
+
+#define LINUX_EFI_LOADER_ENTRY_GUID \
+	EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, \
+		 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
+
 typedef struct {
 	efi_guid_t guid;
 	u64 table;
@@ -847,6 +877,14 @@
 
 #define EFI_INVALID_TABLE_ADDR		(~0UL)
 
+typedef struct {
+	u32 version;
+	u32 num_entries;
+	u32 desc_size;
+	u32 reserved;
+	efi_memory_desc_t entry[0];
+} efi_memory_attributes_table_t;
+
 /*
  * All runtime access to EFI goes through this structure:
  */
@@ -868,6 +906,7 @@
 	unsigned long config_table;	/* config tables */
 	unsigned long esrt;		/* ESRT table */
 	unsigned long properties_table;	/* properties table */
+	unsigned long mem_attr_table;	/* memory attributes table */
 	efi_get_time_t *get_time;
 	efi_set_time_t *set_time;
 	efi_get_wakeup_time_t *get_wakeup_time;
@@ -883,7 +922,7 @@
 	efi_get_next_high_mono_count_t *get_next_high_mono_count;
 	efi_reset_system_t *reset_system;
 	efi_set_virtual_address_map_t *set_virtual_address_map;
-	struct efi_memory_map *memmap;
+	struct efi_memory_map memmap;
 	unsigned long flags;
 } efi;
 
@@ -945,7 +984,6 @@
 extern void efi_get_time(struct timespec *now);
 extern void efi_reserve_boot_services(void);
 extern int efi_get_fdt_params(struct efi_fdt_params *params);
-extern struct efi_memory_map memmap;
 extern struct kobject *efi_kobj;
 
 extern int efi_reboot_quirk_mode;
@@ -957,12 +995,34 @@
 static inline void efi_fake_memmap(void) { }
 #endif
 
+/*
+ * efi_memattr_perm_setter - arch specific callback function passed into
+ *                           efi_memattr_apply_permissions() that updates the
+ *                           mapping permissions described by the second
+ *                           argument in the page tables referred to by the
+ *                           first argument.
+ */
+typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *);
+
+extern int efi_memattr_init(void);
+extern int efi_memattr_apply_permissions(struct mm_struct *mm,
+					 efi_memattr_perm_setter fn);
+
 /* Iterate through an efi_memory_map */
-#define for_each_efi_memory_desc(m, md)					   \
+#define for_each_efi_memory_desc_in_map(m, md)				   \
 	for ((md) = (m)->map;						   \
 	     (md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \
 	     (md) = (void *)(md) + (m)->desc_size)
 
+/**
+ * for_each_efi_memory_desc - iterate over descriptors in efi.memmap
+ * @md: the efi_memory_desc_t * iterator
+ *
+ * Once the loop finishes @md must not be accessed.
+ */
+#define for_each_efi_memory_desc(md) \
+	for_each_efi_memory_desc_in_map(&efi.memmap, md)
+
 /*
  * Format an EFI memory descriptor's type and attributes to a user-provided
  * character buffer, as per snprintf(), and return the buffer.
@@ -1000,7 +1060,6 @@
  * possible, remove EFI-related code altogether.
  */
 #define EFI_BOOT		0	/* Were we booted from EFI? */
-#define EFI_SYSTEM_TABLES	1	/* Can we use EFI system tables? */
 #define EFI_CONFIG_TABLES	2	/* Can we use EFI config tables? */
 #define EFI_RUNTIME_SERVICES	3	/* Can we use runtime services? */
 #define EFI_MEMMAP		4	/* Can we use EFI memory map? */
@@ -1026,8 +1085,16 @@
 }
 static inline void
 efi_reboot(enum reboot_mode reboot_mode, const char *__unused) {}
+
+static inline bool
+efi_capsule_pending(int *reset_type)
+{
+	return false;
+}
 #endif
 
+extern int efi_status_to_err(efi_status_t status);
+
 /*
  * Variable Attributes
  */
@@ -1180,6 +1247,80 @@
 	void *test_string;
 };
 
+#define PIXEL_RGB_RESERVED_8BIT_PER_COLOR		0
+#define PIXEL_BGR_RESERVED_8BIT_PER_COLOR		1
+#define PIXEL_BIT_MASK					2
+#define PIXEL_BLT_ONLY					3
+#define PIXEL_FORMAT_MAX				4
+
+struct efi_pixel_bitmask {
+	u32 red_mask;
+	u32 green_mask;
+	u32 blue_mask;
+	u32 reserved_mask;
+};
+
+struct efi_graphics_output_mode_info {
+	u32 version;
+	u32 horizontal_resolution;
+	u32 vertical_resolution;
+	int pixel_format;
+	struct efi_pixel_bitmask pixel_information;
+	u32 pixels_per_scan_line;
+} __packed;
+
+struct efi_graphics_output_protocol_mode_32 {
+	u32 max_mode;
+	u32 mode;
+	u32 info;
+	u32 size_of_info;
+	u64 frame_buffer_base;
+	u32 frame_buffer_size;
+} __packed;
+
+struct efi_graphics_output_protocol_mode_64 {
+	u32 max_mode;
+	u32 mode;
+	u64 info;
+	u64 size_of_info;
+	u64 frame_buffer_base;
+	u64 frame_buffer_size;
+} __packed;
+
+struct efi_graphics_output_protocol_mode {
+	u32 max_mode;
+	u32 mode;
+	unsigned long info;
+	unsigned long size_of_info;
+	u64 frame_buffer_base;
+	unsigned long frame_buffer_size;
+} __packed;
+
+struct efi_graphics_output_protocol_32 {
+	u32 query_mode;
+	u32 set_mode;
+	u32 blt;
+	u32 mode;
+};
+
+struct efi_graphics_output_protocol_64 {
+	u64 query_mode;
+	u64 set_mode;
+	u64 blt;
+	u64 mode;
+};
+
+struct efi_graphics_output_protocol {
+	unsigned long query_mode;
+	unsigned long set_mode;
+	unsigned long blt;
+	struct efi_graphics_output_protocol_mode *mode;
+};
+
+typedef efi_status_t (*efi_graphics_output_protocol_query_mode)(
+	struct efi_graphics_output_protocol *, u32, unsigned long *,
+	struct efi_graphics_output_mode_info **);
+
 extern struct list_head efivar_sysfs_list;
 
 static inline void
@@ -1195,8 +1336,7 @@
 struct kobject *efivars_kobject(void);
 
 int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
-		void *data, bool atomic, bool duplicates,
-		struct list_head *head);
+		void *data, bool duplicates, struct list_head *head);
 
 void efivar_entry_add(struct efivar_entry *entry, struct list_head *head);
 void efivar_entry_remove(struct efivar_entry *entry);
@@ -1242,6 +1382,13 @@
 #define EFIVARS_DATA_SIZE_MAX 1024
 
 #endif /* CONFIG_EFI_VARS */
+extern bool efi_capsule_pending(int *reset_type);
+
+extern int efi_capsule_supported(efi_guid_t guid, u32 flags,
+				 size_t size, int *reset);
+
+extern int efi_capsule_update(efi_capsule_header_t *capsule,
+			      struct page **pages);
 
 #ifdef CONFIG_EFI_RUNTIME_MAP
 int efi_runtime_map_init(struct kobject *);
@@ -1319,5 +1466,9 @@
 
 efi_status_t efi_parse_options(char *cmdline);
 
+efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
+			   struct screen_info *si, efi_guid_t *proto,
+			   unsigned long size);
+
 bool efi_runtime_disabled(void);
 #endif /* _LINUX_EFI_H */
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index e2b7bf2..9ded8c6 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -150,6 +150,13 @@
 __ethtool_get_link_ksettings(struct net_device *dev,
 			     struct ethtool_link_ksettings *link_ksettings);
 
+void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst,
+					     u32 legacy_u32);
+
+/* return false if src had higher bits set. lower bits always updated. */
+bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
+				     const unsigned long *src);
+
 /**
  * struct ethtool_ops - optional netdev operations
  * @get_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 9eb215a..b90e9bd 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -262,7 +262,7 @@
 /*
  * For NAT entries
  */
-#define NAT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_nat_entry))
+#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
 
 struct f2fs_nat_entry {
 	__u8 version;		/* latest version of cached nat entry */
@@ -282,7 +282,7 @@
  * Not allow to change this.
  */
 #define SIT_VBLOCK_MAP_SIZE 64
-#define SIT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_sit_entry))
+#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry))
 
 /*
  * Note that f2fs_sit_entry->vblocks has the following bit-field information.
diff --git a/include/linux/file.h b/include/linux/file.h
index f87d308..7444f5f 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -44,6 +44,7 @@
 extern unsigned long __fdget(unsigned int fd);
 extern unsigned long __fdget_raw(unsigned int fd);
 extern unsigned long __fdget_pos(unsigned int fd);
+extern void __f_unlock_pos(struct file *);
 
 static inline struct fd __to_fd(unsigned long v)
 {
@@ -60,6 +61,18 @@
 	return __to_fd(__fdget_raw(fd));
 }
 
+static inline struct fd fdget_pos(int fd)
+{
+	return __to_fd(__fdget_pos(fd));
+}
+
+static inline void fdput_pos(struct fd f)
+{
+	if (f.flags & FDPUT_POS_UNLOCK)
+		__f_unlock_pos(f.file);
+	fdput(f);
+}
+
 extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
 extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
 extern void set_close_on_exec(unsigned int fd, int flag);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 43aa1f8..6fc31ef 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -13,6 +13,8 @@
 #include <linux/printk.h>
 #include <linux/workqueue.h>
 #include <linux/sched.h>
+#include <linux/capability.h>
+
 #include <net/sch_generic.h>
 
 #include <asm/cacheflush.h>
@@ -42,6 +44,15 @@
 #define BPF_REG_X	BPF_REG_7
 #define BPF_REG_TMP	BPF_REG_8
 
+/* Kernel hidden auxiliary/helper register for hardening step.
+ * Only used by eBPF JITs. It's nothing more than a temporary
+ * register that JITs use internally, only that here it's part
+ * of eBPF instructions that have been rewritten for blinding
+ * constants. See JIT pre-step in bpf_jit_blind_constants().
+ */
+#define BPF_REG_AX		MAX_BPF_REG
+#define MAX_BPF_JIT_REG		(MAX_BPF_REG + 1)
+
 /* BPF program can access up to 512 bytes of stack space. */
 #define MAX_BPF_STACK	512
 
@@ -352,6 +363,22 @@
 
 #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
 
+struct bpf_skb_data_end {
+	struct qdisc_skb_cb qdisc_cb;
+	void *data_end;
+};
+
+/* compute the linear packet data range [data, data_end) which
+ * will be accessed by cls_bpf and act_bpf programs
+ */
+static inline void bpf_compute_data_end(struct sk_buff *skb)
+{
+	struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
+
+	BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb));
+	cb->data_end = skb->data + skb_headlen(skb);
+}
+
 static inline u8 *bpf_skb_cb(struct sk_buff *skb)
 {
 	/* eBPF programs may read/write skb->cb[] area to transfer meta
@@ -442,7 +469,7 @@
 
 int sk_filter(struct sock *sk, struct sk_buff *skb);
 
-int bpf_prog_select_runtime(struct bpf_prog *fp);
+struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
 void bpf_prog_free(struct bpf_prog *fp);
 
 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
@@ -476,10 +503,17 @@
 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
 
 u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
-void bpf_int_jit_compile(struct bpf_prog *fp);
+
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
 bool bpf_helper_changes_skb_data(void *func);
 
+struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
+				       const struct bpf_insn *patch, u32 len);
+
 #ifdef CONFIG_BPF_JIT
+extern int bpf_jit_enable;
+extern int bpf_jit_harden;
+
 typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
 
 struct bpf_binary_header *
@@ -491,6 +525,9 @@
 void bpf_jit_compile(struct bpf_prog *fp);
 void bpf_jit_free(struct bpf_prog *fp);
 
+struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
+void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
+
 static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
 				u32 pass, void *image)
 {
@@ -501,6 +538,33 @@
 		print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
 			       16, 1, image, proglen, false);
 }
+
+static inline bool bpf_jit_is_ebpf(void)
+{
+# ifdef CONFIG_HAVE_EBPF_JIT
+	return true;
+# else
+	return false;
+# endif
+}
+
+static inline bool bpf_jit_blinding_enabled(void)
+{
+	/* These are the prerequisites, should someone ever have the
+	 * idea to call blinding outside of them, we make sure to
+	 * bail out.
+	 */
+	if (!bpf_jit_is_ebpf())
+		return false;
+	if (!bpf_jit_enable)
+		return false;
+	if (!bpf_jit_harden)
+		return false;
+	if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN))
+		return false;
+
+	return true;
+}
 #else
 static inline void bpf_jit_compile(struct bpf_prog *fp)
 {
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 14a9719..10d3d8f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -323,6 +323,8 @@
 #define IOCB_APPEND		(1 << 1)
 #define IOCB_DIRECT		(1 << 2)
 #define IOCB_HIPRI		(1 << 3)
+#define IOCB_DSYNC		(1 << 4)
+#define IOCB_SYNC		(1 << 5)
 
 struct kiocb {
 	struct file		*ki_filp;
@@ -394,7 +396,7 @@
 	void (*invalidatepage) (struct page *, unsigned int, unsigned int);
 	int (*releasepage) (struct page *, gfp_t);
 	void (*freepage)(struct page *);
-	ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset);
+	ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
 	/*
 	 * migrate the contents of a page to the specified target. If
 	 * migrate_mode is MIGRATE_ASYNC, it must not block.
@@ -577,6 +579,18 @@
 struct posix_acl;
 #define ACL_NOT_CACHED ((void *)(-1))
 
+static inline struct posix_acl *
+uncached_acl_sentinel(struct task_struct *task)
+{
+	return (void *)task + 1;
+}
+
+static inline bool
+is_uncached_acl(struct posix_acl *acl)
+{
+	return (long)acl & 1;
+}
+
 #define IOP_FASTPERM	0x0001
 #define IOP_LOOKUP	0x0002
 #define IOP_NOFOLLOW	0x0004
@@ -635,7 +649,7 @@
 
 	/* Misc */
 	unsigned long		i_state;
-	struct mutex		i_mutex;
+	struct rw_semaphore	i_rwsem;
 
 	unsigned long		dirtied_when;	/* jiffies of first dirtying */
 	unsigned long		dirtied_time_when;
@@ -672,6 +686,7 @@
 		struct block_device	*i_bdev;
 		struct cdev		*i_cdev;
 		char			*i_link;
+		unsigned		i_dir_seq;
 	};
 
 	__u32			i_generation;
@@ -721,27 +736,42 @@
 
 static inline void inode_lock(struct inode *inode)
 {
-	mutex_lock(&inode->i_mutex);
+	down_write(&inode->i_rwsem);
 }
 
 static inline void inode_unlock(struct inode *inode)
 {
-	mutex_unlock(&inode->i_mutex);
+	up_write(&inode->i_rwsem);
+}
+
+static inline void inode_lock_shared(struct inode *inode)
+{
+	down_read(&inode->i_rwsem);
+}
+
+static inline void inode_unlock_shared(struct inode *inode)
+{
+	up_read(&inode->i_rwsem);
 }
 
 static inline int inode_trylock(struct inode *inode)
 {
-	return mutex_trylock(&inode->i_mutex);
+	return down_write_trylock(&inode->i_rwsem);
+}
+
+static inline int inode_trylock_shared(struct inode *inode)
+{
+	return down_read_trylock(&inode->i_rwsem);
 }
 
 static inline int inode_is_locked(struct inode *inode)
 {
-	return mutex_is_locked(&inode->i_mutex);
+	return rwsem_is_locked(&inode->i_rwsem);
 }
 
 static inline void inode_lock_nested(struct inode *inode, unsigned subclass)
 {
-	mutex_lock_nested(&inode->i_mutex, subclass);
+	down_write_nested(&inode->i_rwsem, subclass);
 }
 
 void lock_two_nondirectories(struct inode *, struct inode*);
@@ -929,7 +959,7 @@
 /* Page cache limit. The filesystems should put that into their s_maxbytes 
    limits, otherwise bad things can happen in VM. */ 
 #if BITS_PER_LONG==32
-#define MAX_LFS_FILESIZE	(((loff_t)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) 
+#define MAX_LFS_FILESIZE	(((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1)
 #elif BITS_PER_LONG==64
 #define MAX_LFS_FILESIZE 	((loff_t)0x7fffffffffffffffLL)
 #endif
@@ -1241,6 +1271,16 @@
 	return f->f_inode;
 }
 
+static inline struct dentry *file_dentry(const struct file *file)
+{
+	struct dentry *dentry = file->f_path.dentry;
+
+	if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
+		return dentry->d_op->d_real(dentry, file_inode(file));
+	else
+		return dentry;
+}
+
 static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
 {
 	return locks_lock_inode_wait(file_inode(filp), fl);
@@ -1636,6 +1676,7 @@
 	ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
 	ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
 	int (*iterate) (struct file *, struct dir_context *);
+	int (*iterate_shared) (struct file *, struct dir_context *);
 	unsigned int (*poll) (struct file *, struct poll_table_struct *);
 	long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
 	long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
@@ -1690,7 +1731,8 @@
 	int (*setattr) (struct dentry *, struct iattr *);
 	int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
 	int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
-	ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
+	ssize_t (*getxattr) (struct dentry *, struct inode *,
+			     const char *, void *, size_t);
 	ssize_t (*listxattr) (struct dentry *, char *, size_t);
 	int (*removexattr) (struct dentry *, const char *);
 	int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
@@ -2067,7 +2109,7 @@
 /* /sys/fs */
 extern struct kobject *fs_kobj;
 
-#define MAX_RW_COUNT (INT_MAX & PAGE_CACHE_MASK)
+#define MAX_RW_COUNT (INT_MAX & PAGE_MASK)
 
 #ifdef CONFIG_MANDATORY_FILE_LOCKING
 extern int locks_mandatory_locked(struct file *);
@@ -2253,7 +2295,7 @@
 	const char		iname[];
 };
 
-extern long vfs_truncate(struct path *, loff_t);
+extern long vfs_truncate(const struct path *, loff_t);
 extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
 		       struct file *filp);
 extern int vfs_fallocate(struct file *file, int mode, loff_t offset,
@@ -2475,13 +2517,25 @@
 extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
 			   int datasync);
 extern int vfs_fsync(struct file *file, int datasync);
-static inline int generic_write_sync(struct file *file, loff_t pos, loff_t count)
+
+/*
+ * Sync the bytes written if this was a synchronous write.  Expect ki_pos
+ * to already be updated for the write, and will return either the amount
+ * of bytes passed in, or an error if syncing the file failed.
+ */
+static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count)
 {
-	if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host))
-		return 0;
-	return vfs_fsync_range(file, pos, pos + count - 1,
-			       (file->f_flags & __O_SYNC) ? 0 : 1);
+	if (iocb->ki_flags & IOCB_DSYNC) {
+		int ret = vfs_fsync_range(iocb->ki_filp,
+				iocb->ki_pos - count, iocb->ki_pos - 1,
+				(iocb->ki_flags & IOCB_SYNC) ? 0 : 1);
+		if (ret)
+			return ret;
+	}
+
+	return count;
 }
+
 extern void emergency_sync(void);
 extern void emergency_remount(void);
 #ifdef CONFIG_BLOCK
@@ -2580,15 +2634,34 @@
 #endif
 extern int do_pipe_flags(int *, int);
 
+#define __kernel_read_file_id(id) \
+	id(UNKNOWN, unknown)		\
+	id(FIRMWARE, firmware)		\
+	id(MODULE, kernel-module)		\
+	id(KEXEC_IMAGE, kexec-image)		\
+	id(KEXEC_INITRAMFS, kexec-initramfs)	\
+	id(POLICY, security-policy)		\
+	id(MAX_ID, )
+
+#define __fid_enumify(ENUM, dummy) READING_ ## ENUM,
+#define __fid_stringify(dummy, str) #str,
+
 enum kernel_read_file_id {
-	READING_FIRMWARE = 1,
-	READING_MODULE,
-	READING_KEXEC_IMAGE,
-	READING_KEXEC_INITRAMFS,
-	READING_POLICY,
-	READING_MAX_ID
+	__kernel_read_file_id(__fid_enumify)
 };
 
+static const char * const kernel_read_file_str[] = {
+	__kernel_read_file_id(__fid_stringify)
+};
+
+static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id)
+{
+	if (id < 0 || id >= READING_MAX_ID)
+		return kernel_read_file_str[READING_UNKNOWN];
+
+	return kernel_read_file_str[id];
+}
+
 extern int kernel_read(struct file *, loff_t, char *, unsigned long);
 extern int kernel_read_file(struct file *, void **, loff_t *, loff_t,
 			    enum kernel_read_file_id);
@@ -2693,7 +2766,7 @@
 extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
 extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
 extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
-extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *, loff_t);
+extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *);
 extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
 
 ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos);
@@ -2756,18 +2829,17 @@
 
 ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
 			     struct block_device *bdev, struct iov_iter *iter,
-			     loff_t offset, get_block_t get_block,
+			     get_block_t get_block,
 			     dio_iodone_t end_io, dio_submit_t submit_io,
 			     int flags);
 
 static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
 					 struct inode *inode,
-					 struct iov_iter *iter, loff_t offset,
+					 struct iov_iter *iter,
 					 get_block_t get_block)
 {
 	return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
-				    offset, get_block, NULL, NULL,
-				    DIO_LOCKING | DIO_SKIP_HOLES);
+			get_block, NULL, NULL, DIO_LOCKING | DIO_SKIP_HOLES);
 }
 #endif
 
@@ -2933,6 +3005,10 @@
 		res |= IOCB_APPEND;
 	if (io_is_direct(file))
 		res |= IOCB_DIRECT;
+	if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
+		res |= IOCB_DSYNC;
+	if (file->f_flags & __O_SYNC)
+		res |= IOCB_SYNC;
 	return res;
 }
 
@@ -3094,6 +3170,13 @@
 	return !IS_DEADDIR(inode);
 }
 
+static inline bool dir_relax_shared(struct inode *inode)
+{
+	inode_unlock_shared(inode);
+	inode_lock_shared(inode);
+	return !IS_DEADDIR(inode);
+}
+
 extern bool path_noexec(const struct path *path);
 extern void inode_nohighmem(struct inode *inode);
 
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
index cd91f75..6027f6b 100644
--- a/include/linux/fscrypto.h
+++ b/include/linux/fscrypto.h
@@ -263,9 +263,9 @@
 extern struct kmem_cache *fscrypt_info_cachep;
 int fscrypt_initialize(void);
 
-extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *);
+extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *, gfp_t);
 extern void fscrypt_release_ctx(struct fscrypt_ctx *);
-extern struct page *fscrypt_encrypt_page(struct inode *, struct page *);
+extern struct page *fscrypt_encrypt_page(struct inode *, struct page *, gfp_t);
 extern int fscrypt_decrypt_page(struct page *);
 extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
 extern void fscrypt_pullback_bio_page(struct page **, bool);
@@ -299,7 +299,8 @@
 #endif
 
 /* crypto.c */
-static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i)
+static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i,
+							gfp_t f)
 {
 	return ERR_PTR(-EOPNOTSUPP);
 }
@@ -310,7 +311,7 @@
 }
 
 static inline struct page *fscrypt_notsupp_encrypt_page(struct inode *i,
-						struct page *p)
+						struct page *p, gfp_t f)
 {
 	return ERR_PTR(-EOPNOTSUPP);
 }
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index dea12a6..66a36a8 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -455,6 +455,7 @@
 int ftrace_test_record(struct dyn_ftrace *rec, int enable);
 void ftrace_run_stop_machine(int command);
 unsigned long ftrace_location(unsigned long ip);
+unsigned long ftrace_location_range(unsigned long start, unsigned long end);
 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
 
diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h
index eecd19b..6270a56 100644
--- a/include/linux/genl_magic_struct.h
+++ b/include/linux/genl_magic_struct.h
@@ -62,6 +62,11 @@
 
 /* MAGIC helpers							{{{2 */
 
+static inline int nla_put_u64_0pad(struct sk_buff *skb, int attrtype, u64 value)
+{
+	return nla_put_64bit(skb, attrtype, sizeof(u64), &value, 0);
+}
+
 /* possible field types */
 #define __flg_field(attr_nr, attr_flag, name) \
 	__field(attr_nr, attr_flag, name, NLA_U8, char, \
@@ -80,7 +85,7 @@
 			nla_get_u32, nla_put_u32, true)
 #define __u64_field(attr_nr, attr_flag, name)	\
 	__field(attr_nr, attr_flag, name, NLA_U64, __u64, \
-			nla_get_u64, nla_put_u64, false)
+			nla_get_u64, nla_put_u64_0pad, false)
 #define __str_field(attr_nr, attr_flag, name, maxlen) \
 	__array(attr_nr, attr_flag, name, NLA_NUL_STRING, char, maxlen, \
 			nla_strlcpy, nla_put, false)
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index bee976f..50882e0 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -20,6 +20,18 @@
 #ifdef CONFIG_GPIOLIB
 
 /**
+ * enum single_ended_mode - mode for single ended operation
+ * @LINE_MODE_PUSH_PULL: normal mode for a GPIO line, drive actively high/low
+ * @LINE_MODE_OPEN_DRAIN: set line to be open drain
+ * @LINE_MODE_OPEN_SOURCE: set line to be open source
+ */
+enum single_ended_mode {
+	LINE_MODE_PUSH_PULL,
+	LINE_MODE_OPEN_DRAIN,
+	LINE_MODE_OPEN_SOURCE,
+};
+
+/**
  * struct gpio_chip - abstract a GPIO controller
  * @label: a functional name for the GPIO device, such as a part
  *	number or the name of the SoC IP-block implementing it.
@@ -38,7 +50,15 @@
  * @set: assigns output value for signal "offset"
  * @set_multiple: assigns output values for multiple signals defined by "mask"
  * @set_debounce: optional hook for setting debounce time for specified gpio in
- *      interrupt triggered gpio chips
+ *	interrupt triggered gpio chips
+ * @set_single_ended: optional hook for setting a line as open drain, open
+ *	source, or non-single ended (restore from open drain/source to normal
+ *	push-pull mode) this should be implemented if the hardware supports
+ *	open drain or open source settings. The GPIOlib will otherwise try
+ *	to emulate open drain/source by not actively driving lines high/low
+ *	if a consumer request this. The driver may return -ENOTSUPP if e.g.
+ *	it supports just open drain but not open source and is called
+ *	with LINE_MODE_OPEN_SOURCE as mode argument.
  * @to_irq: optional hook supporting non-static gpio_to_irq() mappings;
  *	implementation may not sleep
  * @dbg_show: optional routine to show contents in debugfs; default code
@@ -130,6 +150,9 @@
 	int			(*set_debounce)(struct gpio_chip *chip,
 						unsigned offset,
 						unsigned debounce);
+	int			(*set_single_ended)(struct gpio_chip *chip,
+						unsigned offset,
+						enum single_ended_mode mode);
 
 	int			(*to_irq)(struct gpio_chip *chip,
 						unsigned offset);
diff --git a/include/linux/hash.h b/include/linux/hash.h
index 1afde47..79c52fa 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -32,12 +32,28 @@
 #error Wordsize not 32 or 64
 #endif
 
+/*
+ * The above primes are actively bad for hashing, since they are
+ * too sparse. The 32-bit one is mostly ok, the 64-bit one causes
+ * real problems. Besides, the "prime" part is pointless for the
+ * multiplicative hash.
+ *
+ * Although a random odd number will do, it turns out that the golden
+ * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
+ * properties.
+ *
+ * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2.
+ * (See Knuth vol 3, section 6.4, exercise 9.)
+ */
+#define GOLDEN_RATIO_32 0x61C88647
+#define GOLDEN_RATIO_64 0x61C8864680B583EBull
+
 static __always_inline u64 hash_64(u64 val, unsigned int bits)
 {
 	u64 hash = val;
 
-#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
-	hash = hash * GOLDEN_RATIO_PRIME_64;
+#if BITS_PER_LONG == 64
+	hash = hash * GOLDEN_RATIO_64;
 #else
 	/*  Sigh, gcc can't optimise this alone like it does for 32 bits. */
 	u64 n = hash;
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 79b0ef6..d7b9e53 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -127,7 +127,7 @@
 	if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
 		return __pmd_trans_huge_lock(pmd, vma);
 	else
-		return false;
+		return NULL;
 }
 static inline int hpage_nr_pages(struct page *page)
 {
@@ -152,6 +152,7 @@
 }
 
 struct page *get_huge_zero_page(void);
+void put_huge_zero_page(void);
 
 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
@@ -208,6 +209,10 @@
 	return false;
 }
 
+static inline void put_huge_zero_page(void)
+{
+	BUILD_BUG();
+}
 
 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
 		unsigned long addr, pmd_t *pmd, int flags)
diff --git a/include/linux/i2c-mux.h b/include/linux/i2c-mux.h
index b5f9a00..d4c1d12 100644
--- a/include/linux/i2c-mux.h
+++ b/include/linux/i2c-mux.h
@@ -27,22 +27,49 @@
 
 #ifdef __KERNEL__
 
-/*
- * Called to create a i2c bus on a multiplexed bus segment.
- * The mux_dev and chan_id parameters are passed to the select
- * and deselect callback functions to perform hardware-specific
- * mux control.
- */
-struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
-				struct device *mux_dev,
-				void *mux_priv, u32 force_nr, u32 chan_id,
-				unsigned int class,
-				int (*select) (struct i2c_adapter *,
-					       void *mux_dev, u32 chan_id),
-				int (*deselect) (struct i2c_adapter *,
-						 void *mux_dev, u32 chan_id));
+#include <linux/bitops.h>
 
-void i2c_del_mux_adapter(struct i2c_adapter *adap);
+struct i2c_mux_core {
+	struct i2c_adapter *parent;
+	struct device *dev;
+	bool mux_locked;
+
+	void *priv;
+
+	int (*select)(struct i2c_mux_core *, u32 chan_id);
+	int (*deselect)(struct i2c_mux_core *, u32 chan_id);
+
+	int num_adapters;
+	int max_adapters;
+	struct i2c_adapter *adapter[0];
+};
+
+struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
+				   struct device *dev, int max_adapters,
+				   int sizeof_priv, u32 flags,
+				   int (*select)(struct i2c_mux_core *, u32),
+				   int (*deselect)(struct i2c_mux_core *, u32));
+
+/* flags for i2c_mux_alloc */
+#define I2C_MUX_LOCKED BIT(0)
+
+static inline void *i2c_mux_priv(struct i2c_mux_core *muxc)
+{
+	return muxc->priv;
+}
+
+struct i2c_adapter *i2c_root_adapter(struct device *dev);
+
+/*
+ * Called to create an i2c bus on a multiplexed bus segment.
+ * The chan_id parameter is passed to the select and deselect
+ * callback functions to perform hardware-specific mux control.
+ */
+int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
+			u32 force_nr, u32 chan_id,
+			unsigned int class);
+
+void i2c_mux_del_adapters(struct i2c_mux_core *muxc);
 
 #endif /* __KERNEL__ */
 
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 200cf13b..96a25ae 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -524,6 +524,7 @@
 
 	/* data fields that are valid for all devices	*/
 	struct rt_mutex bus_lock;
+	struct rt_mutex mux_lock;
 
 	int timeout;			/* in jiffies */
 	int retries;
@@ -538,6 +539,10 @@
 
 	struct i2c_bus_recovery_info *bus_recovery_info;
 	const struct i2c_adapter_quirks *quirks;
+
+	void (*lock_bus)(struct i2c_adapter *, unsigned int flags);
+	int (*trylock_bus)(struct i2c_adapter *, unsigned int flags);
+	void (*unlock_bus)(struct i2c_adapter *, unsigned int flags);
 };
 #define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
 
@@ -567,8 +572,44 @@
 int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *));
 
 /* Adapter locking functions, exported for shared pin cases */
-void i2c_lock_adapter(struct i2c_adapter *);
-void i2c_unlock_adapter(struct i2c_adapter *);
+#define I2C_LOCK_ROOT_ADAPTER BIT(0)
+#define I2C_LOCK_SEGMENT      BIT(1)
+
+/**
+ * i2c_lock_bus - Get exclusive access to an I2C bus segment
+ * @adapter: Target I2C bus segment
+ * @flags: I2C_LOCK_ROOT_ADAPTER locks the root i2c adapter, I2C_LOCK_SEGMENT
+ *	locks only this branch in the adapter tree
+ */
+static inline void
+i2c_lock_bus(struct i2c_adapter *adapter, unsigned int flags)
+{
+	adapter->lock_bus(adapter, flags);
+}
+
+/**
+ * i2c_unlock_bus - Release exclusive access to an I2C bus segment
+ * @adapter: Target I2C bus segment
+ * @flags: I2C_LOCK_ROOT_ADAPTER unlocks the root i2c adapter, I2C_LOCK_SEGMENT
+ *	unlocks only this branch in the adapter tree
+ */
+static inline void
+i2c_unlock_bus(struct i2c_adapter *adapter, unsigned int flags)
+{
+	adapter->unlock_bus(adapter, flags);
+}
+
+static inline void
+i2c_lock_adapter(struct i2c_adapter *adapter)
+{
+	i2c_lock_bus(adapter, I2C_LOCK_ROOT_ADAPTER);
+}
+
+static inline void
+i2c_unlock_adapter(struct i2c_adapter *adapter)
+{
+	i2c_unlock_bus(adapter, I2C_LOCK_ROOT_ADAPTER);
+}
 
 /*flags for the client struct: */
 #define I2C_CLIENT_PEC		0x04	/* Use Packet Error Checking */
@@ -654,6 +695,11 @@
 	return adap->nr;
 }
 
+static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg)
+{
+	return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0);
+}
+
 /**
  * module_i2c_driver() - Helper macro for registering a modular I2C driver
  * @__i2c_driver: i2c_driver struct
diff --git a/include/linux/i2c/sx150x.h b/include/linux/i2c/sx150x.h
deleted file mode 100644
index 52baa79..0000000
--- a/include/linux/i2c/sx150x.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Driver for the Semtech SX150x I2C GPIO Expanders
- *
- * Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-#ifndef __LINUX_I2C_SX150X_H
-#define __LINUX_I2C_SX150X_H
-
-/**
- * struct sx150x_platform_data - config data for SX150x driver
- * @gpio_base: The index number of the first GPIO assigned to this
- *             GPIO expander.  The expander will create a block of
- *             consecutively numbered gpios beginning at the given base,
- *             with the size of the block depending on the model of the
- *             expander chip.
- * @oscio_is_gpo: If set to true, the driver will configure OSCIO as a GPO
- *                instead of as an oscillator, increasing the size of the
- *                GP(I)O pool created by this expander by one.  The
- *                output-only GPO pin will be added at the end of the block.
- * @io_pullup_ena: A bit-mask which enables or disables the pull-up resistor
- *                 for each IO line in the expander.  Setting the bit at
- *                 position n will enable the pull-up for the IO at
- *                 the corresponding offset.  For chips with fewer than
- *                 16 IO pins, high-end bits are ignored.
- * @io_pulldn_ena: A bit-mask which enables-or disables the pull-down
- *                 resistor for each IO line in the expander. Setting the
- *                 bit at position n will enable the pull-down for the IO at
- *                 the corresponding offset.  For chips with fewer than
- *                 16 IO pins, high-end bits are ignored.
- * @io_open_drain_ena: A bit-mask which enables-or disables open-drain
- *                     operation for each IO line in the expander. Setting the
- *                     bit at position n enables open-drain operation for
- *                     the IO at the corresponding offset.  Clearing the bit
- *                     enables regular push-pull operation for that IO.
- *                     For chips with fewer than 16 IO pins, high-end bits
- *                     are ignored.
- * @io_polarity: A bit-mask which enables polarity inversion for each IO line
- *               in the expander.  Setting the bit at position n inverts
- *               the polarity of that IO line, while clearing it results
- *               in normal polarity. For chips with fewer than 16 IO pins,
- *               high-end bits are ignored.
- * @irq_summary: The 'summary IRQ' line to which the GPIO expander's INT line
- *               is connected, via which it reports interrupt events
- *               across all GPIO lines.  This must be a real,
- *               pre-existing IRQ line.
- *               Setting this value < 0 disables the irq_chip functionality
- *               of the driver.
- * @irq_base: The first 'virtual IRQ' line at which our block of GPIO-based
- *            IRQ lines will appear.  Similarly to gpio_base, the expander
- *            will create a block of irqs beginning at this number.
- *            This value is ignored if irq_summary is < 0.
- * @reset_during_probe: If set to true, the driver will trigger a full
- *                      reset of the chip at the beginning of the probe
- *                      in order to place it in a known state.
- */
-struct sx150x_platform_data {
-	unsigned gpio_base;
-	bool     oscio_is_gpo;
-	u16      io_pullup_ena;
-	u16      io_pulldn_ena;
-	u16      io_open_drain_ena;
-	u16      io_polarity;
-	int      irq_summary;
-	unsigned irq_base;
-	bool     reset_during_probe;
-};
-
-#endif /* __LINUX_I2C_SX150X_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 3b1f6ce..b118744 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -7,6 +7,7 @@
  * Copyright (c) 2005, Devicescape Software, Inc.
  * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
  * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -163,6 +164,9 @@
 /* 30 byte 4 addr hdr, 2 byte QoS, 2304 byte MSDU, 12 byte crypt, 4 byte FCS */
 #define IEEE80211_MAX_FRAME_LEN		2352
 
+/* Maximal size of an A-MSDU that can be transported in a HT BA session */
+#define IEEE80211_MAX_MPDU_LEN_HT_BA		4095
+
 /* Maximal size of an A-MSDU */
 #define IEEE80211_MAX_MPDU_LEN_HT_3839		3839
 #define IEEE80211_MAX_MPDU_LEN_HT_7935		7935
@@ -637,6 +641,16 @@
 	return (seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0;
 }
 
+/**
+ * ieee80211_is_frag - check if a frame is a fragment
+ * @hdr: 802.11 header of the frame
+ */
+static inline bool ieee80211_is_frag(struct ieee80211_hdr *hdr)
+{
+	return ieee80211_has_morefrags(hdr->frame_control) ||
+	       hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
+}
+
 struct ieee80211s_hdr {
 	u8 flags;
 	u8 ttl;
@@ -1011,6 +1025,16 @@
 					u8 tpc_elem_length;
 					struct ieee80211_tpc_report_ie tpc;
 				} __packed tpc_report;
+				struct {
+					u8 action_code;
+					u8 dialog_token;
+					u8 follow_up;
+					u8 tod[6];
+					u8 toa[6];
+					__le16 tod_error;
+					__le16 toa_error;
+					u8 variable[0];
+				} __packed ftm;
 			} u;
 		} __packed action;
 	} u;
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h
index d3e4156..acedbb6 100644
--- a/include/linux/ieee802154.h
+++ b/include/linux/ieee802154.h
@@ -47,6 +47,7 @@
 #define IEEE802154_ADDR_SHORT_UNSPEC	0xfffe
 
 #define IEEE802154_EXTENDED_ADDR_LEN	8
+#define IEEE802154_SHORT_ADDR_LEN	2
 
 #define IEEE802154_LIFS_PERIOD		40
 #define IEEE802154_SIFS_PERIOD		12
@@ -218,6 +219,7 @@
 /* frame control handling */
 #define IEEE802154_FCTL_FTYPE		0x0003
 #define IEEE802154_FCTL_ACKREQ		0x0020
+#define IEEE802154_FCTL_SECEN		0x0004
 #define IEEE802154_FCTL_INTRA_PAN	0x0040
 
 #define IEEE802154_FTYPE_DATA		0x0001
@@ -233,6 +235,15 @@
 }
 
 /**
+ * ieee802154_is_secen - check if Security bit is set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline bool ieee802154_is_secen(__le16 fc)
+{
+	return fc & cpu_to_le16(IEEE802154_FCTL_SECEN);
+}
+
+/**
  * ieee802154_is_ackreq - check if acknowledgment request bit is set
  * @fc: frame control bytes in little-endian byteorder
  */
@@ -260,17 +271,17 @@
  *
  * @len: psdu len with (MHR + payload + MFR)
  */
-static inline bool ieee802154_is_valid_psdu_len(const u8 len)
+static inline bool ieee802154_is_valid_psdu_len(u8 len)
 {
 	return (len == IEEE802154_ACK_PSDU_LEN ||
 		(len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU));
 }
 
 /**
- * ieee802154_is_valid_psdu_len - check if extended addr is valid
+ * ieee802154_is_valid_extended_unicast_addr - check if extended addr is valid
  * @addr: extended addr to check
  */
-static inline bool ieee802154_is_valid_extended_unicast_addr(const __le64 addr)
+static inline bool ieee802154_is_valid_extended_unicast_addr(__le64 addr)
 {
 	/* Bail out if the address is all zero, or if the group
 	 * address bit is set.
@@ -280,6 +291,34 @@
 }
 
 /**
+ * ieee802154_is_broadcast_short_addr - check if short addr is broadcast
+ * @addr: short addr to check
+ */
+static inline bool ieee802154_is_broadcast_short_addr(__le16 addr)
+{
+	return (addr == cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST));
+}
+
+/**
+ * ieee802154_is_unspec_short_addr - check if short addr is unspecified
+ * @addr: short addr to check
+ */
+static inline bool ieee802154_is_unspec_short_addr(__le16 addr)
+{
+	return (addr == cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC));
+}
+
+/**
+ * ieee802154_is_valid_src_short_addr - check if source short address is valid
+ * @addr: short addr to check
+ */
+static inline bool ieee802154_is_valid_src_short_addr(__le16 addr)
+{
+	return !(ieee802154_is_broadcast_short_addr(addr) ||
+		 ieee802154_is_unspec_short_addr(addr));
+}
+
+/**
  * ieee802154_random_extended_addr - generates a random extended address
  * @addr: extended addr pointer to place the random address
  */
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index d556973..548fd53 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -28,6 +28,11 @@
 	return (struct ethhdr *)skb_mac_header(skb);
 }
 
+static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
+{
+	return (struct ethhdr *)skb_inner_mac_header(skb);
+}
+
 int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
 
 extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
diff --git a/include/linux/ima.h b/include/linux/ima.h
index e6516cb..0eb7c2e 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -21,6 +21,7 @@
 extern int ima_read_file(struct file *file, enum kernel_read_file_id id);
 extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
 			      enum kernel_read_file_id id);
+extern void ima_post_path_mknod(struct dentry *dentry);
 
 #else
 static inline int ima_bprm_check(struct linux_binprm *bprm)
@@ -54,6 +55,11 @@
 	return 0;
 }
 
+static inline void ima_post_path_mknod(struct dentry *dentry)
+{
+	return;
+}
+
 #endif /* CONFIG_IMA */
 
 #ifdef CONFIG_IMA_APPRAISE
diff --git a/include/linux/io-64-nonatomic-hi-lo.h b/include/linux/io-64-nonatomic-hi-lo.h
index 11d7e84..defcc46 100644
--- a/include/linux/io-64-nonatomic-hi-lo.h
+++ b/include/linux/io-64-nonatomic-hi-lo.h
@@ -21,6 +21,23 @@
 	writel(val, addr);
 }
 
+static inline __u64 hi_lo_readq_relaxed(const volatile void __iomem *addr)
+{
+	const volatile u32 __iomem *p = addr;
+	u32 low, high;
+
+	high = readl_relaxed(p + 1);
+	low = readl_relaxed(p);
+
+	return low + ((u64)high << 32);
+}
+
+static inline void hi_lo_writeq_relaxed(__u64 val, volatile void __iomem *addr)
+{
+	writel_relaxed(val >> 32, addr + 4);
+	writel_relaxed(val, addr);
+}
+
 #ifndef readq
 #define readq hi_lo_readq
 #endif
@@ -29,4 +46,12 @@
 #define writeq hi_lo_writeq
 #endif
 
+#ifndef readq_relaxed
+#define readq_relaxed hi_lo_readq_relaxed
+#endif
+
+#ifndef writeq_relaxed
+#define writeq_relaxed hi_lo_writeq_relaxed
+#endif
+
 #endif	/* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */
diff --git a/include/linux/io-64-nonatomic-lo-hi.h b/include/linux/io-64-nonatomic-lo-hi.h
index 1a4315f..084461a 100644
--- a/include/linux/io-64-nonatomic-lo-hi.h
+++ b/include/linux/io-64-nonatomic-lo-hi.h
@@ -21,6 +21,23 @@
 	writel(val >> 32, addr + 4);
 }
 
+static inline __u64 lo_hi_readq_relaxed(const volatile void __iomem *addr)
+{
+	const volatile u32 __iomem *p = addr;
+	u32 low, high;
+
+	low = readl_relaxed(p);
+	high = readl_relaxed(p + 1);
+
+	return low + ((u64)high << 32);
+}
+
+static inline void lo_hi_writeq_relaxed(__u64 val, volatile void __iomem *addr)
+{
+	writel_relaxed(val, addr);
+	writel_relaxed(val >> 32, addr + 4);
+}
+
 #ifndef readq
 #define readq lo_hi_readq
 #endif
@@ -29,4 +46,12 @@
 #define writeq lo_hi_writeq
 #endif
 
+#ifndef readq_relaxed
+#define readq_relaxed lo_hi_readq_relaxed
+#endif
+
+#ifndef writeq_relaxed
+#define writeq_relaxed lo_hi_writeq_relaxed
+#endif
+
 #endif	/* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index a5c539f..664683a 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -30,6 +30,7 @@
 #define IOMMU_WRITE	(1 << 1)
 #define IOMMU_CACHE	(1 << 2) /* DMA cache coherency */
 #define IOMMU_NOEXEC	(1 << 3)
+#define IOMMU_MMIO	(1 << 4) /* e.g. things like MSI doorbells */
 
 struct iommu_ops;
 struct iommu_group;
@@ -78,6 +79,7 @@
 struct iommu_domain {
 	unsigned type;
 	const struct iommu_ops *ops;
+	unsigned long pgsize_bitmap;	/* Bitmap of page sizes in use */
 	iommu_fault_handler_t handler;
 	void *handler_token;
 	struct iommu_domain_geometry geometry;
@@ -155,8 +157,7 @@
  * @domain_set_windows: Set the number of windows for a domain
  * @domain_get_windows: Return the number of windows for a domain
  * @of_xlate: add OF master IDs to iommu grouping
- * @pgsize_bitmap: bitmap of supported page sizes
- * @priv: per-instance data private to the iommu driver
+ * @pgsize_bitmap: bitmap of all possible supported page sizes
  */
 struct iommu_ops {
 	bool (*capable)(enum iommu_cap);
@@ -195,12 +196,9 @@
 	/* Get the number of windows per domain */
 	u32 (*domain_get_windows)(struct iommu_domain *domain);
 
-#ifdef CONFIG_OF_IOMMU
 	int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
-#endif
 
 	unsigned long pgsize_bitmap;
-	void *priv;
 };
 
 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE		1 /* Device added */
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 0b65543..6230064 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -26,6 +26,9 @@
 
 /*
  * IO resources have these defined flags.
+ *
+ * PCI devices expose these flags to userspace in the "resource" sysfs file,
+ * so don't move them.
  */
 #define IORESOURCE_BITS		0x000000ff	/* Bus-specific bits */
 
@@ -110,6 +113,7 @@
 
 /* PCI control bits.  Shares IORESOURCE_BITS with above PCI ROM.  */
 #define IORESOURCE_PCI_FIXED		(1<<4)	/* Do not move resource */
+#define IORESOURCE_PCI_EA_BEI		(1<<5)	/* BAR Equivalent Indicator */
 
 /*
  * I/O Resource Descriptors
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 7edc14f..5c91b0b 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -63,7 +63,8 @@
 	} stable_secret;
 	__s32		use_oif_addrs_only;
 	__s32		keep_addr_on_down;
-	void		*sysctl;
+
+	struct ctl_table_header *sysctl_header;
 };
 
 struct ipv6_params {
@@ -117,14 +118,29 @@
 #define IP6SKB_ROUTERALERT	8
 #define IP6SKB_FRAGMENTED      16
 #define IP6SKB_HOPBYHOP        32
+#define IP6SKB_L3SLAVE         64
 };
 
+#if defined(CONFIG_NET_L3_MASTER_DEV)
+static inline bool skb_l3mdev_slave(__u16 flags)
+{
+	return flags & IP6SKB_L3SLAVE;
+}
+#else
+static inline bool skb_l3mdev_slave(__u16 flags)
+{
+	return false;
+}
+#endif
+
 #define IP6CB(skb)	((struct inet6_skb_parm*)((skb)->cb))
 #define IP6CBMTU(skb)	((struct ip6_mtuinfo *)((skb)->cb))
 
 static inline int inet6_iif(const struct sk_buff *skb)
 {
-	return IP6CB(skb)->iif;
+	bool l3_slave = skb_l3mdev_slave(IP6CB(skb)->flags);
+
+	return l3_slave ? skb->skb_iif : IP6CB(skb)->iif;
 }
 
 struct tcp6_request_sock {
diff --git a/include/linux/irq.h b/include/linux/irq.h
index c4de623..4d758a7 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -530,6 +530,10 @@
 }
 
 extern int irq_set_percpu_devid(unsigned int irq);
+extern int irq_set_percpu_devid_partition(unsigned int irq,
+					  const struct cpumask *affinity);
+extern int irq_get_percpu_devid_partition(unsigned int irq,
+					  struct cpumask *affinity);
 
 extern void
 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
diff --git a/include/linux/irqbypass.h b/include/linux/irqbypass.h
index 1551b5b..f0f5d26 100644
--- a/include/linux/irqbypass.h
+++ b/include/linux/irqbypass.h
@@ -34,7 +34,7 @@
 /**
  * struct irq_bypass_producer - IRQ bypass producer definition
  * @node: IRQ bypass manager private list management
- * @token: opaque token to match between producer and consumer
+ * @token: opaque token to match between producer and consumer (non-NULL)
  * @irq: Linux IRQ number for the producer device
  * @add_consumer: Connect the IRQ producer to an IRQ consumer (optional)
  * @del_consumer: Disconnect the IRQ producer from an IRQ consumer (optional)
@@ -60,7 +60,7 @@
 /**
  * struct irq_bypass_consumer - IRQ bypass consumer definition
  * @node: IRQ bypass manager private list management
- * @token: opaque token to match between producer and consumer
+ * @token: opaque token to match between producer and consumer (non-NULL)
  * @add_producer: Connect the IRQ consumer to an IRQ producer
  * @del_producer: Disconnect the IRQ consumer from an IRQ producer
  * @stop: Perform any quiesce operations necessary prior to add/del (optional)
diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h
new file mode 100644
index 0000000..c647b05
--- /dev/null
+++ b/include/linux/irqchip/arm-gic-common.h
@@ -0,0 +1,34 @@
+/*
+ * include/linux/irqchip/arm-gic-common.h
+ *
+ * Copyright (C) 2016 ARM Limited, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __LINUX_IRQCHIP_ARM_GIC_COMMON_H
+#define __LINUX_IRQCHIP_ARM_GIC_COMMON_H
+
+#include <linux/types.h>
+#include <linux/ioport.h>
+
+enum gic_type {
+	GIC_V2,
+	GIC_V3,
+};
+
+struct gic_kvm_info {
+	/* GIC type */
+	enum gic_type	type;
+	/* Virtual CPU interface */
+	struct resource vcpu;
+	/* Interrupt number */
+	unsigned int	maint_irq;
+	/* Virtual control interface */
+	struct resource vctrl;
+};
+
+const struct gic_kvm_info *gic_get_kvm_info(void);
+
+#endif /* __LINUX_IRQCHIP_ARM_GIC_COMMON_H */
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index d5d798b..9e6fdd3 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -102,8 +102,6 @@
 #define GICR_SYNCR			0x00C0
 #define GICR_MOVLPIR			0x0100
 #define GICR_MOVALLR			0x0110
-#define GICR_ISACTIVER			GICD_ISACTIVER
-#define GICR_ICACTIVER			GICD_ICACTIVER
 #define GICR_IDREGS			GICD_IDREGS
 #define GICR_PIDR2			GICD_PIDR2
 
diff --git a/include/linux/irqchip/irq-partition-percpu.h b/include/linux/irqchip/irq-partition-percpu.h
new file mode 100644
index 0000000..87433a5
--- /dev/null
+++ b/include/linux/irqchip/irq-partition-percpu.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2016 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/fwnode.h>
+#include <linux/cpumask.h>
+#include <linux/irqdomain.h>
+
+struct partition_affinity {
+	cpumask_t			mask;
+	void				*partition_id;
+};
+
+struct partition_desc;
+
+#ifdef CONFIG_PARTITION_PERCPU
+int partition_translate_id(struct partition_desc *desc, void *partition_id);
+struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
+					     struct partition_affinity *parts,
+					     int nr_parts,
+					     int chained_irq,
+					     const struct irq_domain_ops *ops);
+struct irq_domain *partition_get_domain(struct partition_desc *dsc);
+#else
+static inline int partition_translate_id(struct partition_desc *desc,
+					 void *partition_id)
+{
+	return -EINVAL;
+}
+
+static inline
+struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
+					     struct partition_affinity *parts,
+					     int nr_parts,
+					     int chained_irq,
+					     const struct irq_domain_ops *ops)
+{
+	return NULL;
+}
+
+static inline
+struct irq_domain *partition_get_domain(struct partition_desc *dsc)
+{
+	return NULL;
+}
+#endif
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
index 80f89e4..81f930b 100644
--- a/include/linux/irqchip/mips-gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -103,6 +103,7 @@
 #define GIC_VPE_SWINT0_MAP_OFS		0x0054
 #define GIC_VPE_SWINT1_MAP_OFS		0x0058
 #define GIC_VPE_OTHER_ADDR_OFS		0x0080
+#define GIC_VP_IDENT_OFS		0x0088
 #define GIC_VPE_WD_CONFIG0_OFS		0x0090
 #define GIC_VPE_WD_COUNT0_OFS		0x0094
 #define GIC_VPE_WD_INITIAL0_OFS		0x0098
@@ -211,6 +212,10 @@
 #define GIC_VPE_SMASK_FDC_SHF		6
 #define GIC_VPE_SMASK_FDC_MSK		(MSK(1) << GIC_VPE_SMASK_FDC_SHF)
 
+/* GIC_VP_IDENT fields */
+#define GIC_VP_IDENT_VCNUM_SHF		0
+#define GIC_VP_IDENT_VCNUM_MSK		(MSK(6) << GIC_VP_IDENT_VCNUM_SHF)
+
 /* GIC nomenclature for Core Interrupt Pins. */
 #define GIC_CPU_INT0		0 /* Core Interrupt 2 */
 #define GIC_CPU_INT1		1 /* .		      */
@@ -278,4 +283,16 @@
 
 #endif /* CONFIG_MIPS_GIC */
 
+/**
+ * gic_read_local_vp_id() - read the local VPs VCNUM
+ *
+ * Read the VCNUM of the local VP from the GIC_VP_IDENT register and
+ * return it to the caller. This ID should be used to refer to the VP
+ * via the GICs VP-other region, or when calculating an offset to a
+ * bit representing the VP in interrupt masks.
+ *
+ * Return: The VCNUM value for the local VP.
+ */
+extern unsigned gic_read_local_vp_id(void);
+
 #endif /* __LINUX_IRQCHIP_MIPS_GIC_H */
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index dcca77c..b51beeb 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -66,6 +66,7 @@
 	int			threads_handled_last;
 	raw_spinlock_t		lock;
 	struct cpumask		*percpu_enabled;
+	const struct cpumask	*percpu_affinity;
 #ifdef CONFIG_SMP
 	const struct cpumask	*affinity_hint;
 	struct irq_affinity_notify *affinity_notify;
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 2aed043..f1f36e0 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -96,6 +96,8 @@
 struct irq_domain_ops {
 	int (*match)(struct irq_domain *d, struct device_node *node,
 		     enum irq_domain_bus_token bus_token);
+	int (*select)(struct irq_domain *d, struct irq_fwspec *fwspec,
+		      enum irq_domain_bus_token bus_token);
 	int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
 	void (*unmap)(struct irq_domain *d, unsigned int virq);
 	int (*xlate)(struct irq_domain *d, struct device_node *node,
@@ -211,7 +213,7 @@
 					 irq_hw_number_t first_hwirq,
 					 const struct irq_domain_ops *ops,
 					 void *host_data);
-extern struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
+extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
 						   enum irq_domain_bus_token bus_token);
 extern void irq_set_default_host(struct irq_domain *host);
 extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
@@ -227,6 +229,17 @@
 	return fwnode && fwnode->type == FWNODE_IRQCHIP;
 }
 
+static inline
+struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
+					    enum irq_domain_bus_token bus_token)
+{
+	struct irq_fwspec fwspec = {
+		.fwnode = fwnode,
+	};
+
+	return irq_find_matching_fwspec(&fwspec, bus_token);
+}
+
 static inline struct irq_domain *irq_find_matching_host(struct device_node *node,
 							enum irq_domain_bus_token bus_token)
 {
@@ -346,9 +359,8 @@
 			irq_hw_number_t *out_hwirq, unsigned int *out_type);
 
 /* IPI functions */
-unsigned int irq_reserve_ipi(struct irq_domain *domain,
-			     const struct cpumask *dest);
-void irq_destroy_ipi(unsigned int irq);
+int irq_reserve_ipi(struct irq_domain *domain, const struct cpumask *dest);
+int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest);
 
 /* V2 interfaces to support hierarchy IRQ domains. */
 extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
diff --git a/include/linux/iscsi_boot_sysfs.h b/include/linux/iscsi_boot_sysfs.h
index 548d553..10923d7 100644
--- a/include/linux/iscsi_boot_sysfs.h
+++ b/include/linux/iscsi_boot_sysfs.h
@@ -64,6 +64,12 @@
 	ISCSI_BOOT_INI_END_MARKER,
 };
 
+enum iscsi_boot_acpitbl_properties_enum {
+	ISCSI_BOOT_ACPITBL_SIGNATURE,
+	ISCSI_BOOT_ACPITBL_OEM_ID,
+	ISCSI_BOOT_ACPITBL_OEM_TABLE_ID,
+};
+
 struct attribute_group;
 
 struct iscsi_boot_kobj {
@@ -127,6 +133,13 @@
 			 umode_t (*is_visible) (void *data, int type),
 			 void (*release) (void *data));
 
+struct iscsi_boot_kobj *
+iscsi_boot_create_acpitbl(struct iscsi_boot_kset *boot_kset, int index,
+			  void *data,
+			  ssize_t (*show)(void *data, int type, char *buf),
+			  umode_t (*is_visible)(void *data, int type),
+			  void (*release)(void *data));
+
 struct iscsi_boot_kset *iscsi_boot_create_kset(const char *set_name);
 struct iscsi_boot_kset *iscsi_boot_create_host_kset(unsigned int hostno);
 void iscsi_boot_destroy_kset(struct iscsi_boot_kset *boot_kset);
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index c06c442..30f089e 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -152,6 +152,8 @@
 	int (*rmdir)(struct kernfs_node *kn);
 	int (*rename)(struct kernfs_node *kn, struct kernfs_node *new_parent,
 		      const char *new_name);
+	int (*show_path)(struct seq_file *sf, struct kernfs_node *kn,
+			 struct kernfs_root *root);
 };
 
 struct kernfs_root {
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 7463355..eaee981 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -45,7 +45,6 @@
 	size_t		datalen;	/* Raw datalen */
 	size_t		quotalen;	/* Quota length for proposed payload */
 	time_t		expiry;		/* Expiry time of key */
-	bool		trusted;	/* True if key is trusted */
 };
 
 typedef int (*request_key_actor_t)(struct key_construction *key,
diff --git a/include/linux/key.h b/include/linux/key.h
index 5f5b112..7229147 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -173,11 +173,9 @@
 #define KEY_FLAG_NEGATIVE	5	/* set if key is negative */
 #define KEY_FLAG_ROOT_CAN_CLEAR	6	/* set if key can be cleared by root without permission */
 #define KEY_FLAG_INVALIDATED	7	/* set if key has been invalidated */
-#define KEY_FLAG_TRUSTED	8	/* set if key is trusted */
-#define KEY_FLAG_TRUSTED_ONLY	9	/* set if keyring only accepts links to trusted keys */
-#define KEY_FLAG_BUILTIN	10	/* set if key is builtin */
-#define KEY_FLAG_ROOT_CAN_INVAL	11	/* set if key can be invalidated by root without permission */
-#define KEY_FLAG_KEEP		12	/* set if key should not be removed */
+#define KEY_FLAG_BUILTIN	8	/* set if key is built in to the kernel */
+#define KEY_FLAG_ROOT_CAN_INVAL	9	/* set if key can be invalidated by root without permission */
+#define KEY_FLAG_KEEP		10	/* set if key should not be removed */
 
 	/* the key type and key description string
 	 * - the desc is used to match a key against search criteria
@@ -205,6 +203,20 @@
 		};
 		int reject_error;
 	};
+
+	/* This is set on a keyring to restrict the addition of a link to a key
+	 * to it.  If this method isn't provided then it is assumed that the
+	 * keyring is open to any addition.  It is ignored for non-keyring
+	 * keys.
+	 *
+	 * This is intended for use with rings of trusted keys whereby addition
+	 * to the keyring needs to be controlled.  KEY_ALLOC_BYPASS_RESTRICTION
+	 * overrides this, allowing the kernel to add extra keys without
+	 * restriction.
+	 */
+	int (*restrict_link)(struct key *keyring,
+			     const struct key_type *type,
+			     const union key_payload *payload);
 };
 
 extern struct key *key_alloc(struct key_type *type,
@@ -212,14 +224,17 @@
 			     kuid_t uid, kgid_t gid,
 			     const struct cred *cred,
 			     key_perm_t perm,
-			     unsigned long flags);
+			     unsigned long flags,
+			     int (*restrict_link)(struct key *,
+						  const struct key_type *,
+						  const union key_payload *));
 
 
-#define KEY_ALLOC_IN_QUOTA	0x0000	/* add to quota, reject if would overrun */
-#define KEY_ALLOC_QUOTA_OVERRUN	0x0001	/* add to quota, permit even if overrun */
-#define KEY_ALLOC_NOT_IN_QUOTA	0x0002	/* not in quota */
-#define KEY_ALLOC_TRUSTED	0x0004	/* Key should be flagged as trusted */
-#define KEY_ALLOC_BUILT_IN	0x0008	/* Key is built into kernel */
+#define KEY_ALLOC_IN_QUOTA		0x0000	/* add to quota, reject if would overrun */
+#define KEY_ALLOC_QUOTA_OVERRUN		0x0001	/* add to quota, permit even if overrun */
+#define KEY_ALLOC_NOT_IN_QUOTA		0x0002	/* not in quota */
+#define KEY_ALLOC_BUILT_IN		0x0004	/* Key is built into kernel */
+#define KEY_ALLOC_BYPASS_RESTRICTION	0x0008	/* Override the check on restricted keyrings */
 
 extern void key_revoke(struct key *key);
 extern void key_invalidate(struct key *key);
@@ -288,8 +303,15 @@
 				 const struct cred *cred,
 				 key_perm_t perm,
 				 unsigned long flags,
+				 int (*restrict_link)(struct key *,
+						      const struct key_type *,
+						      const union key_payload *),
 				 struct key *dest);
 
+extern int restrict_link_reject(struct key *keyring,
+				const struct key_type *type,
+				const union key_payload *payload);
+
 extern int keyring_clear(struct key *keyring);
 
 extern key_ref_t keyring_search(key_ref_t keyring,
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 5276fe0..b1fa8f1 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -35,6 +35,10 @@
 
 #include <asm/kvm_host.h>
 
+#ifndef KVM_MAX_VCPU_ID
+#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS
+#endif
+
 /*
  * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
  * in kvm, other bits are visible for userspace which are defined in
@@ -225,6 +229,7 @@
 	sigset_t sigset;
 	struct kvm_vcpu_stat stat;
 	unsigned int halt_poll_ns;
+	bool valid_wakeup;
 
 #ifdef CONFIG_HAS_IOMEM
 	int mmio_needed;
@@ -447,12 +452,13 @@
 
 static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
 {
-	struct kvm_vcpu *vcpu;
+	struct kvm_vcpu *vcpu = NULL;
 	int i;
 
-	if (id < 0 || id >= KVM_MAX_VCPUS)
+	if (id < 0)
 		return NULL;
-	vcpu = kvm_get_vcpu(kvm, id);
+	if (id < KVM_MAX_VCPUS)
+		vcpu = kvm_get_vcpu(kvm, id);
 	if (vcpu && vcpu->vcpu_id == id)
 		return vcpu;
 	kvm_for_each_vcpu(i, vcpu, kvm)
@@ -651,6 +657,7 @@
 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
+void kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 int kvm_vcpu_yield_to(struct kvm_vcpu *target);
 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
@@ -1091,6 +1098,11 @@
 
 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
 {
+	/*
+	 * Ensure the rest of the request is published to kvm_check_request's
+	 * caller.  Paired with the smp_mb__after_atomic in kvm_check_request.
+	 */
+	smp_wmb();
 	set_bit(req, &vcpu->requests);
 }
 
@@ -1098,6 +1110,12 @@
 {
 	if (test_bit(req, &vcpu->requests)) {
 		clear_bit(req, &vcpu->requests);
+
+		/*
+		 * Ensure the rest of the request is visible to kvm_check_request's
+		 * caller.  Paired with the smp_wmb in kvm_make_request.
+		 */
+		smp_mb__after_atomic();
 		return true;
 	} else {
 		return false;
@@ -1169,6 +1187,7 @@
 #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
 
 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+bool kvm_arch_has_irq_bypass(void);
 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
 			   struct irq_bypass_producer *);
 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
@@ -1179,4 +1198,18 @@
 				  uint32_t guest_irq, bool set);
 #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
 
+#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
+/* If we wakeup during the poll time, was it a sucessful poll? */
+static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
+{
+	return vcpu->valid_wakeup;
+}
+
+#else
+static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
+{
+	return true;
+}
+#endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */
+
 #endif
diff --git a/include/linux/leds.h b/include/linux/leds.h
index f203a8f..d2b1306 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -50,6 +50,7 @@
 #define LED_SYSFS_DISABLE	(1 << 22)
 #define LED_DEV_CAP_FLASH	(1 << 23)
 #define LED_HW_PLUGGABLE	(1 << 24)
+#define LED_PANIC_INDICATOR	(1 << 25)
 
 	/* Set LED brightness level
 	 * Must not sleep. Use brightness_set_blocking for drivers
@@ -329,6 +330,12 @@
 static inline void ledtrig_ide_activity(void) {}
 #endif
 
+#ifdef CONFIG_LEDS_TRIGGER_MTD
+extern void ledtrig_mtd_activity(void);
+#else
+static inline void ledtrig_mtd_activity(void) {}
+#endif
+
 #if defined(CONFIG_LEDS_TRIGGER_CAMERA) || defined(CONFIG_LEDS_TRIGGER_CAMERA_MODULE)
 extern void ledtrig_flash_ctrl(bool on);
 extern void ledtrig_torch_ctrl(bool on);
@@ -358,6 +365,7 @@
 	unsigned 	gpio;
 	unsigned	active_low : 1;
 	unsigned	retain_state_suspended : 1;
+	unsigned	panic_indicator : 1;
 	unsigned	default_state : 2;
 	/* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */
 	struct gpio_desc *gpiod;
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index cdcb2cc..ef2c7d2 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -18,7 +18,7 @@
 #define NVM_SEC_BITS (8)
 #define NVM_PL_BITS  (8)
 #define NVM_LUN_BITS (8)
-#define NVM_CH_BITS  (8)
+#define NVM_CH_BITS  (7)
 
 struct ppa_addr {
 	/* Generic structure for all addresses */
@@ -30,8 +30,14 @@
 			u64 pl		: NVM_PL_BITS;
 			u64 lun		: NVM_LUN_BITS;
 			u64 ch		: NVM_CH_BITS;
+			u64 reserved	: 1;
 		} g;
 
+		struct {
+			u64 line	: 63;
+			u64 is_cached	: 1;
+		} c;
+
 		u64 ppa;
 	};
 };
@@ -41,13 +47,11 @@
 struct nvm_dev;
 
 typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
-typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
 typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
 typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
 				nvm_l2p_update_fn *, void *);
-typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
-				nvm_bb_update_fn *, void *);
-typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
+typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
+typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
 typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
 typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
 typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
@@ -202,6 +206,7 @@
 
 struct nvm_target {
 	struct list_head list;
+	struct nvm_dev *dev;
 	struct nvm_tgt_type *type;
 	struct gendisk *disk;
 };
@@ -232,14 +237,14 @@
 
 	struct ppa_addr *ppa_list;
 
-	void *metadata;
-	dma_addr_t dma_metadata;
+	void *meta_list;
+	dma_addr_t dma_meta_list;
 
 	struct completion *wait;
 	nvm_end_io_fn *end_io;
 
 	uint8_t opcode;
-	uint16_t nr_pages;
+	uint16_t nr_ppas;
 	uint16_t flags;
 
 	u64 ppa_status; /* ppa media status */
@@ -307,7 +312,6 @@
 	struct nvm_dev_ops *ops;
 
 	struct list_head devices;
-	struct list_head online_targets;
 
 	/* Media manager */
 	struct nvmm_type *mt;
@@ -323,6 +327,8 @@
 	int sec_per_pg; /* only sectors for a single page */
 	int pgs_per_blk;
 	int blks_per_lun;
+	int fpg_size;
+	int pfpg_size; /* size of buffer if all pages are to be read */
 	int sec_size;
 	int oob_size;
 	int mccap;
@@ -345,10 +351,9 @@
 	unsigned long total_blocks;
 	unsigned long total_secs;
 	int nr_luns;
-	unsigned max_pages_per_blk;
 
 	unsigned long *lun_map;
-	void *ppalist_pool;
+	void *dma_pool;
 
 	struct nvm_id identity;
 
@@ -450,8 +455,8 @@
 	struct list_head list;
 };
 
-extern int nvm_register_target(struct nvm_tgt_type *);
-extern void nvm_unregister_target(struct nvm_tgt_type *);
+extern int nvm_register_tgt_type(struct nvm_tgt_type *);
+extern void nvm_unregister_tgt_type(struct nvm_tgt_type *);
 
 extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
 extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
@@ -467,6 +472,7 @@
 typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
 typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
 								unsigned long);
+typedef void (nvmm_mark_blk_fn)(struct nvm_dev *, struct ppa_addr, int);
 typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
 typedef int (nvmm_reserve_lun)(struct nvm_dev *, int);
 typedef void (nvmm_release_lun)(struct nvm_dev *, int);
@@ -494,6 +500,9 @@
 	nvmm_submit_io_fn *submit_io;
 	nvmm_erase_blk_fn *erase_blk;
 
+	/* Bad block mgmt */
+	nvmm_mark_blk_fn *mark_blk;
+
 	/* Configuration management */
 	nvmm_get_lun_fn *get_lun;
 	nvmm_reserve_lun *reserve_lun;
@@ -527,13 +536,17 @@
 extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *);
 extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *);
 extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
-							struct ppa_addr *, int);
+						struct ppa_addr *, int, int);
 extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
 extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int);
 extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
 extern void nvm_end_io(struct nvm_rq *, int);
 extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int,
 								void *, int);
+extern int nvm_submit_ppa_list(struct nvm_dev *, struct ppa_addr *, int, int,
+							int, void *, int);
+extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
+extern int nvm_get_bb_tbl(struct nvm_dev *, struct ppa_addr, u8 *);
 
 /* sysblk.c */
 #define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */
@@ -554,6 +567,13 @@
 extern int nvm_init_sysblock(struct nvm_dev *, struct nvm_sb_info *);
 
 extern int nvm_dev_factory(struct nvm_dev *, int flags);
+
+#define nvm_for_each_lun_ppa(dev, ppa, chid, lunid)			\
+	for ((chid) = 0, (ppa).ppa = 0; (chid) < (dev)->nr_chnls;	\
+					(chid)++, (ppa).g.ch = (chid))	\
+		for ((lunid) = 0; (lunid) < (dev)->luns_per_chnl;	\
+					(lunid)++, (ppa).g.lun = (lunid))
+
 #else /* CONFIG_NVM */
 struct nvm_dev_ops;
 
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index bd830d5..a93a0b2 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -65,27 +65,8 @@
 };
 
 /**
- * struct klp_reloc - relocation structure for live patching
- * @loc:	address where the relocation will be written
- * @sympos:	position in kallsyms to disambiguate symbols (optional)
- * @type:	ELF relocation type
- * @name:	name of the referenced symbol (for lookup/verification)
- * @addend:	offset from the referenced symbol
- * @external:	symbol is either exported or within the live patch module itself
- */
-struct klp_reloc {
-	unsigned long loc;
-	unsigned long sympos;
-	unsigned long type;
-	const char *name;
-	int addend;
-	int external;
-};
-
-/**
  * struct klp_object - kernel object structure for live patching
  * @name:	module name (or NULL for vmlinux)
- * @relocs:	relocation entries to be applied at load time
  * @funcs:	function entries for functions to be patched in the object
  * @kobj:	kobject for sysfs resources
  * @mod:	kernel module associated with the patched object
@@ -95,7 +76,6 @@
 struct klp_object {
 	/* external */
 	const char *name;
-	struct klp_reloc *relocs;
 	struct klp_func *funcs;
 
 	/* internal */
@@ -124,10 +104,12 @@
 };
 
 #define klp_for_each_object(patch, obj) \
-	for (obj = patch->objs; obj->funcs; obj++)
+	for (obj = patch->objs; obj->funcs || obj->name; obj++)
 
 #define klp_for_each_func(obj, func) \
-	for (func = obj->funcs; func->old_name; func++)
+	for (func = obj->funcs; \
+	     func->old_name || func->new_func || func->old_sympos; \
+	     func++)
 
 int klp_register_patch(struct klp_patch *);
 int klp_unregister_patch(struct klp_patch *);
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index d026b19..eabe013 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -196,9 +196,11 @@
  * We record lock dependency chains, so that we can cache them:
  */
 struct lock_chain {
-	u8				irq_context;
-	u8				depth;
-	u16				base;
+	/* see BUILD_BUG_ON()s in lookup_chain_cache() */
+	unsigned int			irq_context :  2,
+					depth       :  6,
+					base	    : 24;
+	/* 4 byte hole */
 	struct hlist_node		entry;
 	u64				chain_key;
 };
@@ -354,8 +356,13 @@
 extern void lockdep_clear_current_reclaim_state(void);
 extern void lockdep_trace_alloc(gfp_t mask);
 
-extern void lock_pin_lock(struct lockdep_map *lock);
-extern void lock_unpin_lock(struct lockdep_map *lock);
+struct pin_cookie { unsigned int val; };
+
+#define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
+
+extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
+extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
+extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
 
 # define INIT_LOCKDEP				.lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
 
@@ -371,8 +378,9 @@
 
 #define lockdep_recursing(tsk)	((tsk)->lockdep_recursion)
 
-#define lockdep_pin_lock(l)		lock_pin_lock(&(l)->dep_map)
-#define lockdep_unpin_lock(l)	lock_unpin_lock(&(l)->dep_map)
+#define lockdep_pin_lock(l)	lock_pin_lock(&(l)->dep_map)
+#define lockdep_repin_lock(l,c)	lock_repin_lock(&(l)->dep_map, (c))
+#define lockdep_unpin_lock(l,c)	lock_unpin_lock(&(l)->dep_map, (c))
 
 #else /* !CONFIG_LOCKDEP */
 
@@ -425,8 +433,13 @@
 
 #define lockdep_recursing(tsk)			(0)
 
-#define lockdep_pin_lock(l)				do { (void)(l); } while (0)
-#define lockdep_unpin_lock(l)			do { (void)(l); } while (0)
+struct pin_cookie { };
+
+#define NIL_COOKIE (struct pin_cookie){ }
+
+#define lockdep_pin_lock(l)			({ struct pin_cookie cookie; cookie; })
+#define lockdep_repin_lock(l, c)		do { (void)(l); (void)(c); } while (0)
+#define lockdep_unpin_lock(l, c)		do { (void)(l); (void)(c); } while (0)
 
 #endif /* !LOCKDEP */
 
@@ -444,6 +457,18 @@
 	lock_acquired(&(_lock)->dep_map, _RET_IP_);			\
 } while (0)
 
+#define LOCK_CONTENDED_RETURN(_lock, try, lock)			\
+({								\
+	int ____err = 0;					\
+	if (!try(_lock)) {					\
+		lock_contended(&(_lock)->dep_map, _RET_IP_);	\
+		____err = lock(_lock);				\
+	}							\
+	if (!____err)						\
+		lock_acquired(&(_lock)->dep_map, _RET_IP_);	\
+	____err;						\
+})
+
 #else /* CONFIG_LOCK_STAT */
 
 #define lock_contended(lockdep_map, ip) do {} while (0)
@@ -452,6 +477,9 @@
 #define LOCK_CONTENDED(_lock, try, lock) \
 	lock(_lock)
 
+#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
+	lock(_lock)
+
 #endif /* CONFIG_LOCK_STAT */
 
 #ifdef CONFIG_LOCKDEP
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index cdee11c..7ae3976 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1190,7 +1190,8 @@
  *	Return 0 if permission is granted.
  * @settime:
  *	Check permission to change the system time.
- *	struct timespec and timezone are defined in include/linux/time.h
+ *	struct timespec64 is defined in include/linux/time64.h and timezone
+ *	is defined in include/linux/time.h
  *	@ts contains new time
  *	@tz contains new timezone
  *	Return 0 if permission is granted.
@@ -1327,7 +1328,7 @@
 	int (*quotactl)(int cmds, int type, int id, struct super_block *sb);
 	int (*quota_on)(struct dentry *dentry);
 	int (*syslog)(int type);
-	int (*settime)(const struct timespec *ts, const struct timezone *tz);
+	int (*settime)(const struct timespec64 *ts, const struct timezone *tz);
 	int (*vm_enough_memory)(struct mm_struct *mm, long pages);
 
 	int (*bprm_set_creds)(struct linux_binprm *bprm);
@@ -1343,10 +1344,10 @@
 	int (*sb_kern_mount)(struct super_block *sb, int flags, void *data);
 	int (*sb_show_options)(struct seq_file *m, struct super_block *sb);
 	int (*sb_statfs)(struct dentry *dentry);
-	int (*sb_mount)(const char *dev_name, struct path *path,
+	int (*sb_mount)(const char *dev_name, const struct path *path,
 			const char *type, unsigned long flags, void *data);
 	int (*sb_umount)(struct vfsmount *mnt, int flags);
-	int (*sb_pivotroot)(struct path *old_path, struct path *new_path);
+	int (*sb_pivotroot)(const struct path *old_path, const struct path *new_path);
 	int (*sb_set_mnt_opts)(struct super_block *sb,
 				struct security_mnt_opts *opts,
 				unsigned long kern_flags,
@@ -1360,23 +1361,23 @@
 
 
 #ifdef CONFIG_SECURITY_PATH
-	int (*path_unlink)(struct path *dir, struct dentry *dentry);
-	int (*path_mkdir)(struct path *dir, struct dentry *dentry,
+	int (*path_unlink)(const struct path *dir, struct dentry *dentry);
+	int (*path_mkdir)(const struct path *dir, struct dentry *dentry,
 				umode_t mode);
-	int (*path_rmdir)(struct path *dir, struct dentry *dentry);
-	int (*path_mknod)(struct path *dir, struct dentry *dentry,
+	int (*path_rmdir)(const struct path *dir, struct dentry *dentry);
+	int (*path_mknod)(const struct path *dir, struct dentry *dentry,
 				umode_t mode, unsigned int dev);
-	int (*path_truncate)(struct path *path);
-	int (*path_symlink)(struct path *dir, struct dentry *dentry,
+	int (*path_truncate)(const struct path *path);
+	int (*path_symlink)(const struct path *dir, struct dentry *dentry,
 				const char *old_name);
-	int (*path_link)(struct dentry *old_dentry, struct path *new_dir,
+	int (*path_link)(struct dentry *old_dentry, const struct path *new_dir,
 				struct dentry *new_dentry);
-	int (*path_rename)(struct path *old_dir, struct dentry *old_dentry,
-				struct path *new_dir,
+	int (*path_rename)(const struct path *old_dir, struct dentry *old_dentry,
+				const struct path *new_dir,
 				struct dentry *new_dentry);
-	int (*path_chmod)(struct path *path, umode_t mode);
-	int (*path_chown)(struct path *path, kuid_t uid, kgid_t gid);
-	int (*path_chroot)(struct path *path);
+	int (*path_chmod)(const struct path *path, umode_t mode);
+	int (*path_chown)(const struct path *path, kuid_t uid, kgid_t gid);
+	int (*path_chroot)(const struct path *path);
 #endif
 
 	int (*inode_alloc_security)(struct inode *inode);
@@ -1804,7 +1805,6 @@
 	struct list_head tun_dev_attach_queue;
 	struct list_head tun_dev_attach;
 	struct list_head tun_dev_open;
-	struct list_head skb_owned_by;
 #endif	/* CONFIG_SECURITY_NETWORK */
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
 	struct list_head xfrm_policy_alloc_security;
@@ -1893,5 +1893,10 @@
 #else
 static inline void __init yama_add_hooks(void) { }
 #endif
+#ifdef CONFIG_SECURITY_LOADPIN
+void __init loadpin_add_hooks(void);
+#else
+static inline void loadpin_add_hooks(void) { };
+#endif
 
 #endif /* ! __LINUX_LSM_HOOKS_H */
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index 5bfd99d..bf9d1d7 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -13,6 +13,17 @@
 
 struct mii_bus;
 
+/* Multiple levels of nesting are possible. However typically this is
+ * limited to nested DSA like layer, a MUX layer, and the normal
+ * user. Instead of trying to handle the general case, just define
+ * these cases.
+ */
+enum mdio_mutex_lock_class {
+	MDIO_MUTEX_NORMAL,
+	MDIO_MUTEX_MUX,
+	MDIO_MUTEX_NESTED,
+};
+
 struct mdio_device {
 	struct device dev;
 
diff --git a/include/linux/mfd/as3722.h b/include/linux/mfd/as3722.h
index 8d43e9f..51e6f94 100644
--- a/include/linux/mfd/as3722.h
+++ b/include/linux/mfd/as3722.h
@@ -196,6 +196,7 @@
 #define AS3722_LDO3_VSEL_MIN				0x01
 #define AS3722_LDO3_VSEL_MAX				0x2D
 #define AS3722_LDO3_NUM_VOLT				0x2D
+#define AS3722_LDO6_VSEL_BYPASS 			0x3F
 #define AS3722_LDO_VSEL_MASK				0x7F
 #define AS3722_LDO_VSEL_MIN				0x01
 #define AS3722_LDO_VSEL_MAX				0x7F
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index bc6f7e0..9837f1e 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -17,7 +17,7 @@
 #include <linux/platform_device.h>
 
 struct irq_domain;
-struct property_set;
+struct property_entry;
 
 /* Matches ACPI PNP id, either _HID or _CID, or ACPI _ADR */
 struct mfd_cell_acpi_match {
@@ -47,7 +47,7 @@
 	size_t			pdata_size;
 
 	/* device properties passed to the sub devices drivers */
-	const struct property_set *pset;
+	struct property_entry *properties;
 
 	/*
 	 * Device Tree compatible string
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index 6bc4bcd..5a23dd4 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -30,6 +30,9 @@
 #define MIN_600_MV		600000
 #define MIN_500_MV		500000
 
+/* Ramp delay in uV/us */
+#define RAMP_DELAY_12_MVUS	12000
+
 /* Macros to represent steps for LDO/BUCK */
 #define STEP_50_MV		50000
 #define STEP_25_MV		25000
diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
index b288965..2c14eeca 100644
--- a/include/linux/mfd/samsung/s2mps11.h
+++ b/include/linux/mfd/samsung/s2mps11.h
@@ -173,10 +173,12 @@
 
 #define S2MPS11_LDO_VSEL_MASK	0x3F
 #define S2MPS11_BUCK_VSEL_MASK	0xFF
+#define S2MPS11_BUCK9_VSEL_MASK	0x1F
 #define S2MPS11_ENABLE_MASK	(0x03 << S2MPS11_ENABLE_SHIFT)
 #define S2MPS11_ENABLE_SHIFT	0x06
 #define S2MPS11_LDO_N_VOLTAGES	(S2MPS11_LDO_VSEL_MASK + 1)
 #define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1)
+#define S2MPS11_BUCK9_N_VOLTAGES (S2MPS11_BUCK9_VSEL_MASK + 1)
 #define S2MPS11_RAMP_DELAY	25000		/* uV/us */
 
 #define S2MPS11_CTRL1_PWRHOLD_MASK	BIT(4)
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index 238c8db..c8e0164 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -95,6 +95,7 @@
 #define IMX6Q_GPR0_DMAREQ_MUX_SEL0_IOMUX	BIT(0)
 
 #define IMX6Q_GPR1_PCIE_REQ_MASK		(0x3 << 30)
+#define IMX6Q_GPR1_PCIE_SW_RST			BIT(29)
 #define IMX6Q_GPR1_PCIE_EXIT_L1			BIT(28)
 #define IMX6Q_GPR1_PCIE_RDY_L23			BIT(27)
 #define IMX6Q_GPR1_PCIE_ENTER_L1		BIT(26)
@@ -447,5 +448,11 @@
 #define IMX6UL_GPR1_ENET2_CLK_OUTPUT		(0x1 << 18)
 #define IMX6UL_GPR1_ENET_CLK_DIR		(0x3 << 17)
 #define IMX6UL_GPR1_ENET_CLK_OUTPUT		(0x3 << 17)
+#define IMX6UL_GPR1_SAI1_MCLK_DIR		(0x1 << 19)
+#define IMX6UL_GPR1_SAI2_MCLK_DIR		(0x1 << 20)
+#define IMX6UL_GPR1_SAI3_MCLK_DIR		(0x1 << 21)
+#define IMX6UL_GPR1_SAI_MCLK_MASK		(0x7 << 19)
+#define MCLK_DIR(x) (x == 1 ? IMX6UL_GPR1_SAI1_MCLK_DIR : x == 2 ? \
+		     IMX6UL_GPR1_SAI2_MCLK_DIR : IMX6UL_GPR1_SAI3_MCLK_DIR)
 
 #endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 05d58ee..7a26286 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -66,8 +66,8 @@
  */
 #define TMIO_MMC_SDIO_IRQ		(1 << 2)
 
-/* Some controllers don't need to wait 10ms for clock changes */
-#define TMIO_MMC_FAST_CLK_CHG		(1 << 3)
+/* Some features are only available or tested on RCar Gen2 or later */
+#define TMIO_MMC_MIN_RCAR2		(1 << 3)
 
 /*
  * Some controllers require waiting for the SD bus to become
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 8541a91..80dec87 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -828,6 +828,11 @@
 	u8			n_ports;
 };
 
+enum mlx4_pci_status {
+	MLX4_PCI_STATUS_DISABLED,
+	MLX4_PCI_STATUS_ENABLED,
+};
+
 struct mlx4_dev_persistent {
 	struct pci_dev	       *pdev;
 	struct mlx4_dev	       *dev;
@@ -841,6 +846,8 @@
 	u8		state;
 	struct mutex	interface_state_mutex; /* protect SW state */
 	u8	interface_state;
+	struct mutex		pci_status_mutex; /* sync pci state */
+	enum mlx4_pci_status	pci_status;
 };
 
 struct mlx4_dev {
@@ -1051,7 +1058,7 @@
 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
 static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
 {
-	if (BITS_PER_LONG == 64 || buf->nbufs == 1)
+	if (buf->nbufs == 1)
 		return buf->direct.buf + offset;
 	else
 		return buf->page_list[offset >> PAGE_SHIFT].buf +
@@ -1091,7 +1098,7 @@
 void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
 
 int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
-		       int size, int max_direct);
+		       int size);
 void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
 		       int size);
 
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 8156e3c..035abdf 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -59,6 +59,7 @@
 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
+#define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64)
 #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
 #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
@@ -392,6 +393,17 @@
 	MLX5_CAP_OFF_CMDIF_CSUM		= 46,
 };
 
+enum {
+	/*
+	 * Max wqe size for rdma read is 512 bytes, so this
+	 * limits our max_sge_rd as the wqe needs to fit:
+	 * - ctrl segment (16 bytes)
+	 * - rdma segment (16 bytes)
+	 * - scatter elements (16 bytes each)
+	 */
+	MLX5_MAX_SGE_RD	= (512 - 16 - 16) / 16
+};
+
 struct mlx5_inbox_hdr {
 	__be16		opcode;
 	u8		rsvd[4];
@@ -644,7 +656,9 @@
 };
 
 struct mlx5_cqe64 {
-	u8		rsvd0[4];
+	u8		outer_l3_tunneled;
+	u8		rsvd0;
+	__be16		wqe_id;
 	u8		lro_tcppsh_abort_dupack;
 	u8		lro_min_ttl;
 	__be16		lro_tcp_win;
@@ -657,7 +671,7 @@
 	__be16		slid;
 	__be32		flags_rqpn;
 	u8		hds_ip_ext;
-	u8		l4_hdr_type_etc;
+	u8		l4_l3_hdr_type;
 	__be16		vlan_info;
 	__be32		srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
 	__be32		imm_inval_pkey;
@@ -671,6 +685,40 @@
 	u8		op_own;
 };
 
+struct mlx5_mini_cqe8 {
+	union {
+		__be32 rx_hash_result;
+		struct {
+			__be16 checksum;
+			__be16 rsvd;
+		};
+		struct {
+			__be16 wqe_counter;
+			u8  s_wqe_opcode;
+			u8  reserved;
+		} s_wqe_info;
+	};
+	__be32 byte_cnt;
+};
+
+enum {
+	MLX5_NO_INLINE_DATA,
+	MLX5_INLINE_DATA32_SEG,
+	MLX5_INLINE_DATA64_SEG,
+	MLX5_COMPRESSED,
+};
+
+enum {
+	MLX5_CQE_FORMAT_CSUM = 0x1,
+};
+
+#define MLX5_MINI_CQE_ARRAY_SIZE 8
+
+static inline int mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
+{
+	return (cqe->op_own >> 2) & 0x3;
+}
+
 static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
 {
 	return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
@@ -678,12 +726,22 @@
 
 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
 {
-	return (cqe->l4_hdr_type_etc >> 4) & 0x7;
+	return (cqe->l4_l3_hdr_type >> 4) & 0x7;
+}
+
+static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe)
+{
+	return (cqe->l4_l3_hdr_type >> 2) & 0x3;
+}
+
+static inline u8 cqe_is_tunneled(struct mlx5_cqe64 *cqe)
+{
+	return cqe->outer_l3_tunneled & 0x1;
 }
 
 static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
 {
-	return !!(cqe->l4_hdr_type_etc & 0x1);
+	return !!(cqe->l4_l3_hdr_type & 0x1);
 }
 
 static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
@@ -696,6 +754,42 @@
 	return (u64)lo | ((u64)hi << 32);
 }
 
+struct mpwrq_cqe_bc {
+	__be16	filler_consumed_strides;
+	__be16	byte_cnt;
+};
+
+static inline u16 mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 *cqe)
+{
+	struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
+
+	return be16_to_cpu(bc->byte_cnt);
+}
+
+static inline u16 mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc *bc)
+{
+	return 0x7fff & be16_to_cpu(bc->filler_consumed_strides);
+}
+
+static inline u16 mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 *cqe)
+{
+	struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
+
+	return mpwrq_get_cqe_bc_consumed_strides(bc);
+}
+
+static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64 *cqe)
+{
+	struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
+
+	return 0x8000 & be16_to_cpu(bc->filler_consumed_strides);
+}
+
+static inline u16 mpwrq_get_cqe_stride_index(struct mlx5_cqe64 *cqe)
+{
+	return be16_to_cpu(cqe->wqe_counter);
+}
+
 enum {
 	CQE_L4_HDR_TYPE_NONE			= 0x0,
 	CQE_L4_HDR_TYPE_TCP_NO_ACK		= 0x1,
@@ -1289,6 +1383,18 @@
 #define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
 	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
 
+#define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
+	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
+
+#define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
+	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
+
+#define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
+	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
+
+#define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
+	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
+
 #define MLX5_CAP_ESW(mdev, cap) \
 	MLX5_GET(e_switch_cap, \
 		 mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
@@ -1331,6 +1437,7 @@
 	MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
 	MLX5_PER_PRIORITY_COUNTERS_GROUP      = 0x10,
 	MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
+	MLX5_PHYSICAL_LAYER_COUNTERS_GROUP    = 0x12,
 	MLX5_INFINIBAND_PORT_COUNTERS_GROUP   = 0x20,
 };
 
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index dcd5ac8..07b504f 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -41,11 +41,16 @@
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/radix-tree.h>
+#include <linux/workqueue.h>
 
 #include <linux/mlx5/device.h>
 #include <linux/mlx5/doorbell.h>
 
 enum {
+	MLX5_RQ_BITMASK_VSD = 1 << 1,
+};
+
+enum {
 	MLX5_BOARD_ID_LEN = 64,
 	MLX5_MAX_NAME_LEN = 16,
 };
@@ -112,9 +117,12 @@
 	MLX5_REG_PMPE		 = 0x5010,
 	MLX5_REG_PELC		 = 0x500e,
 	MLX5_REG_PVLC		 = 0x500f,
-	MLX5_REG_PMLP		 = 0, /* TBD */
+	MLX5_REG_PCMR		 = 0x5041,
+	MLX5_REG_PMLP		 = 0x5002,
 	MLX5_REG_NODE_DESC	 = 0x6001,
 	MLX5_REG_HOST_ENDIANNESS = 0x7004,
+	MLX5_REG_MCIA		 = 0x9014,
+	MLX5_REG_MLCR		 = 0x902b,
 };
 
 enum {
@@ -450,6 +458,17 @@
 	char name[MLX5_MAX_IRQ_NAME];
 };
 
+struct mlx5_fc_stats {
+	struct list_head list;
+	struct list_head addlist;
+	/* protect addlist add/splice operations */
+	spinlock_t addlist_lock;
+
+	struct workqueue_struct *wq;
+	struct delayed_work work;
+	unsigned long next_query;
+};
+
 struct mlx5_eswitch;
 
 struct mlx5_priv {
@@ -511,6 +530,10 @@
 	unsigned long		pci_dev_data;
 	struct mlx5_flow_root_namespace *root_ns;
 	struct mlx5_flow_root_namespace *fdb_root_ns;
+	struct mlx5_flow_root_namespace *esw_egress_root_ns;
+	struct mlx5_flow_root_namespace *esw_ingress_root_ns;
+
+	struct mlx5_fc_stats		fc_stats;
 };
 
 enum mlx5_device_state {
@@ -519,8 +542,9 @@
 };
 
 enum mlx5_interface_state {
-	MLX5_INTERFACE_STATE_DOWN,
-	MLX5_INTERFACE_STATE_UP,
+	MLX5_INTERFACE_STATE_DOWN = BIT(0),
+	MLX5_INTERFACE_STATE_UP = BIT(1),
+	MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2),
 };
 
 enum mlx5_pci_status {
@@ -544,7 +568,7 @@
 	enum mlx5_device_state	state;
 	/* sync interface state */
 	struct mutex		intf_state_mutex;
-	enum mlx5_interface_state interface_state;
+	unsigned long		intf_state;
 	void			(*event) (struct mlx5_core_dev *dev,
 					  enum mlx5_dev_event event,
 					  unsigned long param);
@@ -552,6 +576,9 @@
 	struct mlx5_profile	*profile;
 	atomic_t		num_qps;
 	u32			issi;
+#ifdef CONFIG_RFS_ACCEL
+	struct cpu_rmap         *rmap;
+#endif
 };
 
 struct mlx5_db {
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 8dec550..4b7a107 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -58,6 +58,8 @@
 	MLX5_FLOW_NAMESPACE_LEFTOVERS,
 	MLX5_FLOW_NAMESPACE_ANCHOR,
 	MLX5_FLOW_NAMESPACE_FDB,
+	MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+	MLX5_FLOW_NAMESPACE_ESW_INGRESS,
 };
 
 struct mlx5_flow_table;
@@ -71,6 +73,7 @@
 		u32			tir_num;
 		struct mlx5_flow_table	*ft;
 		u32			vport_num;
+		struct mlx5_fc		*counter;
 	};
 };
 
@@ -82,12 +85,19 @@
 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
 				    int prio,
 				    int num_flow_table_entries,
-				    int max_num_groups);
+				    int max_num_groups,
+				    u32 level);
 
 struct mlx5_flow_table *
 mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
 		       int prio,
-		       int num_flow_table_entries);
+		       int num_flow_table_entries,
+		       u32 level);
+struct mlx5_flow_table *
+mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
+			     int prio,
+			     int num_flow_table_entries,
+			     u32 level, u16 vport);
 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
 
 /* inbox should be set with the following values:
@@ -113,4 +123,13 @@
 		   struct mlx5_flow_destination *dest);
 void mlx5_del_flow_rule(struct mlx5_flow_rule *fr);
 
+int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
+				 struct mlx5_flow_destination *dest);
+
+struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule);
+struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
+void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
+void mlx5_fc_query_cached(struct mlx5_fc *counter,
+			  u64 *bytes, u64 *packets, u64 *lastuse);
+
 #endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index c15b8a8..9a05cd7 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -202,6 +202,9 @@
 	MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY          = 0x936,
 	MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY        = 0x937,
 	MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY       = 0x938,
+	MLX5_CMD_OP_ALLOC_FLOW_COUNTER            = 0x939,
+	MLX5_CMD_OP_DEALLOC_FLOW_COUNTER          = 0x93a,
+	MLX5_CMD_OP_QUERY_FLOW_COUNTER            = 0x93b,
 	MLX5_CMD_OP_MODIFY_FLOW_TABLE             = 0x93c
 };
 
@@ -265,7 +268,8 @@
 
 struct mlx5_ifc_flow_table_prop_layout_bits {
 	u8         ft_support[0x1];
-	u8         reserved_at_1[0x2];
+	u8         reserved_at_1[0x1];
+	u8         flow_counter[0x1];
 	u8	   flow_modify_en[0x1];
 	u8         modify_root[0x1];
 	u8         identified_miss_table_mode[0x1];
@@ -513,7 +517,9 @@
 	u8         max_lso_cap[0x5];
 	u8         reserved_at_10[0x4];
 	u8         rss_ind_tbl_cap[0x4];
-	u8         reserved_at_18[0x3];
+	u8         reg_umr_sq[0x1];
+	u8         scatter_fcs[0x1];
+	u8         reserved_at_1a[0x1];
 	u8         tunnel_lso_const_out_ip_id[0x1];
 	u8         reserved_at_1c[0x2];
 	u8         tunnel_statless_gre[0x1];
@@ -648,7 +654,7 @@
 enum {
 	MLX5_WQ_TYPE_LINKED_LIST  = 0x0,
 	MLX5_WQ_TYPE_CYCLIC       = 0x1,
-	MLX5_WQ_TYPE_STRQ         = 0x2,
+	MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ = 0x2,
 };
 
 enum {
@@ -750,21 +756,25 @@
 	u8         ets[0x1];
 	u8         nic_flow_table[0x1];
 	u8         eswitch_flow_table[0x1];
-	u8	   early_vf_enable;
-	u8         reserved_at_1a8[0x2];
+	u8	   early_vf_enable[0x1];
+	u8         reserved_at_1a9[0x2];
 	u8         local_ca_ack_delay[0x5];
-	u8         reserved_at_1af[0x6];
+	u8         reserved_at_1af[0x2];
+	u8         ports_check[0x1];
+	u8         reserved_at_1b2[0x1];
+	u8         disable_link_up[0x1];
+	u8         beacon_led[0x1];
 	u8         port_type[0x2];
 	u8         num_ports[0x8];
 
-	u8         reserved_at_1bf[0x3];
+	u8         reserved_at_1c0[0x3];
 	u8         log_max_msg[0x5];
-	u8         reserved_at_1c7[0x4];
+	u8         reserved_at_1c8[0x4];
 	u8         max_tc[0x4];
-	u8         reserved_at_1cf[0x6];
+	u8         reserved_at_1d0[0x6];
 	u8         rol_s[0x1];
 	u8         rol_g[0x1];
-	u8         reserved_at_1d7[0x1];
+	u8         reserved_at_1d8[0x1];
 	u8         wol_s[0x1];
 	u8         wol_g[0x1];
 	u8         wol_a[0x1];
@@ -774,47 +784,48 @@
 	u8         wol_p[0x1];
 
 	u8         stat_rate_support[0x10];
-	u8         reserved_at_1ef[0xc];
+	u8         reserved_at_1f0[0xc];
 	u8         cqe_version[0x4];
 
 	u8         compact_address_vector[0x1];
-	u8         reserved_at_200[0x3];
+	u8         striding_rq[0x1];
+	u8         reserved_at_201[0x2];
 	u8         ipoib_basic_offloads[0x1];
-	u8         reserved_at_204[0xa];
+	u8         reserved_at_205[0xa];
 	u8         drain_sigerr[0x1];
 	u8         cmdif_checksum[0x2];
 	u8         sigerr_cqe[0x1];
-	u8         reserved_at_212[0x1];
+	u8         reserved_at_213[0x1];
 	u8         wq_signature[0x1];
 	u8         sctr_data_cqe[0x1];
-	u8         reserved_at_215[0x1];
+	u8         reserved_at_216[0x1];
 	u8         sho[0x1];
 	u8         tph[0x1];
 	u8         rf[0x1];
 	u8         dct[0x1];
-	u8         reserved_at_21a[0x1];
+	u8         reserved_at_21b[0x1];
 	u8         eth_net_offloads[0x1];
 	u8         roce[0x1];
 	u8         atomic[0x1];
-	u8         reserved_at_21e[0x1];
+	u8         reserved_at_21f[0x1];
 
 	u8         cq_oi[0x1];
 	u8         cq_resize[0x1];
 	u8         cq_moderation[0x1];
-	u8         reserved_at_222[0x3];
+	u8         reserved_at_223[0x3];
 	u8         cq_eq_remap[0x1];
 	u8         pg[0x1];
 	u8         block_lb_mc[0x1];
-	u8         reserved_at_228[0x1];
+	u8         reserved_at_229[0x1];
 	u8         scqe_break_moderation[0x1];
-	u8         reserved_at_22a[0x1];
+	u8         cq_period_start_from_cqe[0x1];
 	u8         cd[0x1];
-	u8         reserved_at_22c[0x1];
+	u8         reserved_at_22d[0x1];
 	u8         apm[0x1];
 	u8         vector_calc[0x1];
-	u8         reserved_at_22f[0x1];
+	u8         umr_ptr_rlky[0x1];
 	u8	   imaicl[0x1];
-	u8         reserved_at_231[0x4];
+	u8         reserved_at_232[0x4];
 	u8         qkv[0x1];
 	u8         pkv[0x1];
 	u8         set_deth_sqpn[0x1];
@@ -824,104 +835,109 @@
 	u8         uc[0x1];
 	u8         rc[0x1];
 
-	u8         reserved_at_23f[0xa];
+	u8         reserved_at_240[0xa];
 	u8         uar_sz[0x6];
-	u8         reserved_at_24f[0x8];
+	u8         reserved_at_250[0x8];
 	u8         log_pg_sz[0x8];
 
 	u8         bf[0x1];
-	u8         reserved_at_260[0x1];
+	u8         reserved_at_261[0x1];
 	u8         pad_tx_eth_packet[0x1];
-	u8         reserved_at_262[0x8];
+	u8         reserved_at_263[0x8];
 	u8         log_bf_reg_size[0x5];
-	u8         reserved_at_26f[0x10];
+	u8         reserved_at_270[0x10];
 
-	u8         reserved_at_27f[0x10];
+	u8         reserved_at_280[0x10];
 	u8         max_wqe_sz_sq[0x10];
 
-	u8         reserved_at_29f[0x10];
+	u8         reserved_at_2a0[0x10];
 	u8         max_wqe_sz_rq[0x10];
 
-	u8         reserved_at_2bf[0x10];
+	u8         reserved_at_2c0[0x10];
 	u8         max_wqe_sz_sq_dc[0x10];
 
-	u8         reserved_at_2df[0x7];
+	u8         reserved_at_2e0[0x7];
 	u8         max_qp_mcg[0x19];
 
-	u8         reserved_at_2ff[0x18];
+	u8         reserved_at_300[0x18];
 	u8         log_max_mcg[0x8];
 
-	u8         reserved_at_31f[0x3];
+	u8         reserved_at_320[0x3];
 	u8         log_max_transport_domain[0x5];
-	u8         reserved_at_327[0x3];
+	u8         reserved_at_328[0x3];
 	u8         log_max_pd[0x5];
-	u8         reserved_at_32f[0xb];
+	u8         reserved_at_330[0xb];
 	u8         log_max_xrcd[0x5];
 
-	u8         reserved_at_33f[0x20];
+	u8         reserved_at_340[0x20];
 
-	u8         reserved_at_35f[0x3];
+	u8         reserved_at_360[0x3];
 	u8         log_max_rq[0x5];
-	u8         reserved_at_367[0x3];
+	u8         reserved_at_368[0x3];
 	u8         log_max_sq[0x5];
-	u8         reserved_at_36f[0x3];
+	u8         reserved_at_370[0x3];
 	u8         log_max_tir[0x5];
-	u8         reserved_at_377[0x3];
+	u8         reserved_at_378[0x3];
 	u8         log_max_tis[0x5];
 
 	u8         basic_cyclic_rcv_wqe[0x1];
-	u8         reserved_at_380[0x2];
+	u8         reserved_at_381[0x2];
 	u8         log_max_rmp[0x5];
-	u8         reserved_at_387[0x3];
+	u8         reserved_at_388[0x3];
 	u8         log_max_rqt[0x5];
-	u8         reserved_at_38f[0x3];
+	u8         reserved_at_390[0x3];
 	u8         log_max_rqt_size[0x5];
-	u8         reserved_at_397[0x3];
+	u8         reserved_at_398[0x3];
 	u8         log_max_tis_per_sq[0x5];
 
-	u8         reserved_at_39f[0x3];
+	u8         reserved_at_3a0[0x3];
 	u8         log_max_stride_sz_rq[0x5];
-	u8         reserved_at_3a7[0x3];
+	u8         reserved_at_3a8[0x3];
 	u8         log_min_stride_sz_rq[0x5];
-	u8         reserved_at_3af[0x3];
+	u8         reserved_at_3b0[0x3];
 	u8         log_max_stride_sz_sq[0x5];
-	u8         reserved_at_3b7[0x3];
+	u8         reserved_at_3b8[0x3];
 	u8         log_min_stride_sz_sq[0x5];
 
-	u8         reserved_at_3bf[0x1b];
+	u8         reserved_at_3c0[0x1b];
 	u8         log_max_wq_sz[0x5];
 
 	u8         nic_vport_change_event[0x1];
-	u8         reserved_at_3e0[0xa];
+	u8         reserved_at_3e1[0xa];
 	u8         log_max_vlan_list[0x5];
-	u8         reserved_at_3ef[0x3];
+	u8         reserved_at_3f0[0x3];
 	u8         log_max_current_mc_list[0x5];
-	u8         reserved_at_3f7[0x3];
+	u8         reserved_at_3f8[0x3];
 	u8         log_max_current_uc_list[0x5];
 
-	u8         reserved_at_3ff[0x80];
+	u8         reserved_at_400[0x80];
 
-	u8         reserved_at_47f[0x3];
+	u8         reserved_at_480[0x3];
 	u8         log_max_l2_table[0x5];
-	u8         reserved_at_487[0x8];
+	u8         reserved_at_488[0x8];
 	u8         log_uar_page_sz[0x10];
 
-	u8         reserved_at_49f[0x20];
+	u8         reserved_at_4a0[0x20];
 	u8         device_frequency_mhz[0x20];
 	u8         device_frequency_khz[0x20];
-	u8         reserved_at_4ff[0x5f];
-	u8         cqe_zip[0x1];
 
-	u8         cqe_zip_timeout[0x10];
-	u8         cqe_zip_max_num[0x10];
+	u8         reserved_at_500[0x80];
 
-	u8         reserved_at_57f[0x220];
+	u8         reserved_at_580[0x3f];
+	u8         cqe_compression[0x1];
+
+	u8         cqe_compression_timeout[0x10];
+	u8         cqe_compression_max_num[0x10];
+
+	u8         reserved_at_5e0[0x220];
 };
 
 enum mlx5_flow_destination_type {
 	MLX5_FLOW_DESTINATION_TYPE_VPORT        = 0x0,
 	MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE   = 0x1,
 	MLX5_FLOW_DESTINATION_TYPE_TIR          = 0x2,
+
+	MLX5_FLOW_DESTINATION_TYPE_COUNTER      = 0x100,
 };
 
 struct mlx5_ifc_dest_format_struct_bits {
@@ -931,6 +947,19 @@
 	u8         reserved_at_20[0x20];
 };
 
+struct mlx5_ifc_flow_counter_list_bits {
+	u8         reserved_at_0[0x10];
+	u8         flow_counter_id[0x10];
+
+	u8         reserved_at_20[0x20];
+};
+
+union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
+	struct mlx5_ifc_dest_format_struct_bits dest_format_struct;
+	struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
+	u8         reserved_at_0[0x40];
+};
+
 struct mlx5_ifc_fte_match_param_bits {
 	struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
 
@@ -997,7 +1026,13 @@
 	u8         reserved_at_118[0x3];
 	u8         log_wq_sz[0x5];
 
-	u8         reserved_at_120[0x4e0];
+	u8         reserved_at_120[0x15];
+	u8         log_wqe_num_of_strides[0x3];
+	u8         two_byte_shift_en[0x1];
+	u8         reserved_at_139[0x4];
+	u8         log_wqe_stride_size[0x3];
+
+	u8         reserved_at_140[0x4c0];
 
 	struct mlx5_ifc_cmd_pas_bits pas[0];
 };
@@ -1990,6 +2025,7 @@
 	MLX5_FLOW_CONTEXT_ACTION_ALLOW     = 0x1,
 	MLX5_FLOW_CONTEXT_ACTION_DROP      = 0x2,
 	MLX5_FLOW_CONTEXT_ACTION_FWD_DEST  = 0x4,
+	MLX5_FLOW_CONTEXT_ACTION_COUNT     = 0x8,
 };
 
 struct mlx5_ifc_flow_context_bits {
@@ -2006,13 +2042,16 @@
 	u8         reserved_at_80[0x8];
 	u8         destination_list_size[0x18];
 
-	u8         reserved_at_a0[0x160];
+	u8         reserved_at_a0[0x8];
+	u8         flow_counter_list_size[0x18];
+
+	u8         reserved_at_c0[0x140];
 
 	struct mlx5_ifc_fte_match_param_bits match_value;
 
 	u8         reserved_at_1200[0x600];
 
-	struct mlx5_ifc_dest_format_struct_bits destination[0];
+	union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[0];
 };
 
 enum {
@@ -2196,7 +2235,8 @@
 	u8         flush_in_error_en[0x1];
 	u8         reserved_at_4[0x4];
 	u8         state[0x4];
-	u8         reserved_at_c[0x14];
+	u8         reg_umr[0x1];
+	u8         reserved_at_d[0x13];
 
 	u8         reserved_at_20[0x8];
 	u8         user_index[0x18];
@@ -2244,7 +2284,8 @@
 
 struct mlx5_ifc_rqc_bits {
 	u8         rlky[0x1];
-	u8         reserved_at_1[0x2];
+	u8         reserved_at_1[0x1];
+	u8         scatter_fcs[0x1];
 	u8         vsd[0x1];
 	u8         mem_rq_type[0x4];
 	u8         state[0x4];
@@ -2601,6 +2642,11 @@
 	MLX5_CQC_ST_FIRED                                 = 0xa,
 };
 
+enum {
+	MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
+	MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
+};
+
 struct mlx5_ifc_cqc_bits {
 	u8         status[0x4];
 	u8         reserved_at_4[0x4];
@@ -2609,8 +2655,8 @@
 	u8         reserved_at_c[0x1];
 	u8         scqe_break_moderation_en[0x1];
 	u8         oi[0x1];
-	u8         reserved_at_f[0x2];
-	u8         cqe_zip_en[0x1];
+	u8         cq_period_mode[0x2];
+	u8         cqe_comp_en[0x1];
 	u8         mini_cqe_res_format[0x2];
 	u8         st[0x4];
 	u8         reserved_at_18[0x8];
@@ -2984,7 +3030,11 @@
 	u8         reserved_at_20[0x10];
 	u8         op_mod[0x10];
 
-	u8         reserved_at_40[0x40];
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xf];
+	u8         vport_number[0x10];
+
+	u8         reserved_at_60[0x20];
 
 	u8         table_type[0x8];
 	u8         reserved_at_88[0x18];
@@ -3910,6 +3960,34 @@
 	u8         reserved_at_e0[0x120];
 };
 
+struct mlx5_ifc_query_flow_counter_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	struct mlx5_ifc_traffic_counter_bits flow_statistics[0];
+};
+
+struct mlx5_ifc_query_flow_counter_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x80];
+
+	u8         clear[0x1];
+	u8         reserved_at_c1[0xf];
+	u8         num_of_counters[0x10];
+
+	u8         reserved_at_e0[0x10];
+	u8         flow_counter_id[0x10];
+};
+
 struct mlx5_ifc_query_esw_vport_context_out_bits {
 	u8         status[0x8];
 	u8         reserved_at_8[0x18];
@@ -5178,7 +5256,11 @@
 	u8         reserved_at_20[0x10];
 	u8         op_mod[0x10];
 
-	u8         reserved_at_40[0x40];
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xf];
+	u8         vport_number[0x10];
+
+	u8         reserved_at_60[0x20];
 
 	u8         table_type[0x8];
 	u8         reserved_at_88[0x18];
@@ -5205,7 +5287,11 @@
 	u8         reserved_at_20[0x10];
 	u8         op_mod[0x10];
 
-	u8         reserved_at_40[0x40];
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xf];
+	u8         vport_number[0x10];
+
+	u8         reserved_at_60[0x20];
 
 	u8         table_type[0x8];
 	u8         reserved_at_88[0x18];
@@ -5346,7 +5432,11 @@
 	u8         reserved_at_20[0x10];
 	u8         op_mod[0x10];
 
-	u8         reserved_at_40[0x40];
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xf];
+	u8         vport_number[0x10];
+
+	u8         reserved_at_60[0x20];
 
 	u8         table_type[0x8];
 	u8         reserved_at_88[0x18];
@@ -5471,6 +5561,28 @@
 	u8         reserved_at_60[0x20];
 };
 
+struct mlx5_ifc_dealloc_flow_counter_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_dealloc_flow_counter_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x10];
+	u8         flow_counter_id[0x10];
+
+	u8         reserved_at_60[0x20];
+};
+
 struct mlx5_ifc_create_xrc_srq_out_bits {
 	u8         status[0x8];
 	u8         reserved_at_8[0x18];
@@ -5792,7 +5904,11 @@
 	u8         reserved_at_20[0x10];
 	u8         op_mod[0x10];
 
-	u8         reserved_at_40[0x40];
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xf];
+	u8         vport_number[0x10];
+
+	u8         reserved_at_60[0x20];
 
 	u8         table_type[0x8];
 	u8         reserved_at_88[0x18];
@@ -5836,7 +5952,11 @@
 	u8         reserved_at_20[0x10];
 	u8         op_mod[0x10];
 
-	u8         reserved_at_40[0x40];
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xf];
+	u8         vport_number[0x10];
+
+	u8         reserved_at_60[0x20];
 
 	u8         table_type[0x8];
 	u8         reserved_at_88[0x18];
@@ -6190,6 +6310,28 @@
 	u8         reserved_at_40[0x40];
 };
 
+struct mlx5_ifc_alloc_flow_counter_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x10];
+	u8         flow_counter_id[0x10];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_flow_counter_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+};
+
 struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
 	u8         status[0x8];
 	u8         reserved_at_8[0x18];
@@ -6369,6 +6511,17 @@
 	u8         reserved_at_1a0[0x60];
 };
 
+struct mlx5_ifc_mlcr_reg_bits {
+	u8         reserved_at_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_at_10[0x20];
+
+	u8         beacon_duration[0x10];
+	u8         reserved_at_40[0x10];
+
+	u8         beacon_remain[0x10];
+};
+
 struct mlx5_ifc_ptas_reg_bits {
 	u8         reserved_at_0[0x20];
 
@@ -6778,6 +6931,16 @@
 	u8         index_data[18][0x10];
 };
 
+struct mlx5_ifc_pcmr_reg_bits {
+	u8         reserved_at_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_at_10[0x2e];
+	u8         fcs_cap[0x1];
+	u8         reserved_at_3f[0x1f];
+	u8         fcs_chk[0x1];
+	u8         reserved_at_5f[0x1];
+};
+
 struct mlx5_ifc_lane_2_module_mapping_bits {
 	u8         reserved_at_0[0x6];
 	u8         rx_lane[0x2];
@@ -7114,6 +7277,7 @@
 	struct mlx5_ifc_pspa_reg_bits pspa_reg;
 	struct mlx5_ifc_ptas_reg_bits ptas_reg;
 	struct mlx5_ifc_ptys_reg_bits ptys_reg;
+	struct mlx5_ifc_mlcr_reg_bits mlcr_reg;
 	struct mlx5_ifc_pude_reg_bits pude_reg;
 	struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
 	struct mlx5_ifc_slrg_reg_bits slrg_reg;
@@ -7147,7 +7311,11 @@
 	u8         reserved_at_20[0x10];
 	u8         op_mod[0x10];
 
-	u8         reserved_at_40[0x40];
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xf];
+	u8         vport_number[0x10];
+
+	u8         reserved_at_60[0x20];
 
 	u8         table_type[0x8];
 	u8         reserved_at_88[0x18];
@@ -7178,7 +7346,9 @@
 	u8         reserved_at_20[0x10];
 	u8         op_mod[0x10];
 
-	u8         reserved_at_40[0x20];
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xf];
+	u8         vport_number[0x10];
 
 	u8         reserved_at_60[0x10];
 	u8         modify_field_select[0x10];
@@ -7244,4 +7414,34 @@
 	u8         tclass[0x3];
 };
 
+struct mlx5_ifc_mcia_reg_bits {
+	u8         l[0x1];
+	u8         reserved_at_1[0x7];
+	u8         module[0x8];
+	u8         reserved_at_10[0x8];
+	u8         status[0x8];
+
+	u8         i2c_device_address[0x8];
+	u8         page_number[0x8];
+	u8         device_address[0x10];
+
+	u8         reserved_at_40[0x10];
+	u8         size[0x10];
+
+	u8         reserved_at_60[0x20];
+
+	u8         dword_0[0x20];
+	u8         dword_1[0x20];
+	u8         dword_2[0x20];
+	u8         dword_3[0x20];
+	u8         dword_4[0x20];
+	u8         dword_5[0x20];
+	u8         dword_6[0x20];
+	u8         dword_7[0x20];
+	u8         dword_8[0x20];
+	u8         dword_9[0x20];
+	u8         dword_10[0x20];
+	u8         dword_11[0x20];
+};
+
 #endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index a1d145a..9851862 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -35,6 +35,24 @@
 
 #include <linux/mlx5/driver.h>
 
+enum mlx5_beacon_duration {
+	MLX5_BEACON_DURATION_OFF = 0x0,
+	MLX5_BEACON_DURATION_INF = 0xffff,
+};
+
+enum mlx5_module_id {
+	MLX5_MODULE_ID_SFP              = 0x3,
+	MLX5_MODULE_ID_QSFP             = 0xC,
+	MLX5_MODULE_ID_QSFP_PLUS        = 0xD,
+	MLX5_MODULE_ID_QSFP28           = 0x11,
+};
+
+#define MLX5_EEPROM_MAX_BYTES			32
+#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK	0x000000ff
+#define MLX5_I2C_ADDR_LOW		0x50
+#define MLX5_I2C_ADDR_HIGH		0x51
+#define MLX5_EEPROM_PAGE_LENGTH		256
+
 int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
 int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
 			 int ptys_size, int proto_mask, u8 local_port);
@@ -53,10 +71,11 @@
 			       enum mlx5_port_status status);
 int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
 				 enum mlx5_port_status *status);
+int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
 
-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
-void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
-void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
 			      u8 port);
 
 int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
@@ -84,4 +103,10 @@
 int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
 int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
 
+int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
+void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
+			 bool *enabled);
+int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
+			     u16 offset, u16 size, u8 *data);
+
 #endif /* __MLX5_PORT_H__ */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index cf031a3..6422102 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -668,6 +668,12 @@
 				struct mlx5_core_qp *sq);
 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
 				  struct mlx5_core_qp *sq);
+int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id);
+int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id);
+int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
+			      int reset, void *out, int out_size);
+int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id,
+				  u32 *out_of_buffer);
 
 static inline const char *mlx5_qp_type_str(int type)
 {
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index bd93e63..301da4a 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -45,6 +45,8 @@
 				     u16 vport, u8 *addr);
 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
 				      u16 vport, u8 *addr);
+int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
+int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
 					   u64 *system_image_guid);
 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ed6407d..727f7997 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -72,6 +72,10 @@
 #define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
 #endif
 
+#ifndef page_to_virt
+#define page_to_virt(x)	__va(PFN_PHYS(page_to_pfn(x)))
+#endif
+
 /*
  * To prevent common memory management code establishing
  * a zero page mapping on a read fault.
@@ -500,11 +504,20 @@
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 int total_mapcount(struct page *page);
+int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
 #else
 static inline int total_mapcount(struct page *page)
 {
 	return page_mapcount(page);
 }
+static inline int page_trans_huge_mapcount(struct page *page,
+					   int *total_mapcount)
+{
+	int mapcount = page_mapcount(page);
+	if (total_mapcount)
+		*total_mapcount = mapcount;
+	return mapcount;
+}
 #endif
 
 static inline struct page *virt_to_head_page(const void *x)
@@ -623,7 +636,7 @@
  *
  * A page may belong to an inode's memory mapping. In this case, page->mapping
  * is the pointer to the inode, and page->index is the file offset of the page,
- * in units of PAGE_CACHE_SIZE.
+ * in units of PAGE_SIZE.
  *
  * If pagecache pages are not associated with an inode, they are said to be
  * anonymous pages. These may become associated with the swapcache, and in that
@@ -948,7 +961,7 @@
 
 static __always_inline void *lowmem_page_address(const struct page *page)
 {
-	return __va(PFN_PHYS(page_to_pfn(page)));
+	return page_to_virt(page);
 }
 
 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
@@ -1031,6 +1044,8 @@
 	page = compound_head(page);
 	if (atomic_read(compound_mapcount_ptr(page)) >= 0)
 		return true;
+	if (PageHuge(page))
+		return false;
 	for (i = 0; i < hpage_nr_pages(page); i++) {
 		if (atomic_read(&page[i]._mapcount) >= 0)
 			return true;
@@ -1138,6 +1153,8 @@
 
 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
 		pte_t pte);
+struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
+				pmd_t pmd);
 
 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
 		unsigned long size);
@@ -1250,78 +1267,20 @@
 			    unsigned long start, unsigned long nr_pages,
 			    int write, int force, struct page **pages,
 			    struct vm_area_struct **vmas);
-long get_user_pages6(unsigned long start, unsigned long nr_pages,
+long get_user_pages(unsigned long start, unsigned long nr_pages,
 			    int write, int force, struct page **pages,
 			    struct vm_area_struct **vmas);
-long get_user_pages_locked6(unsigned long start, unsigned long nr_pages,
+long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
 		    int write, int force, struct page **pages, int *locked);
 long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
 			       unsigned long start, unsigned long nr_pages,
 			       int write, int force, struct page **pages,
 			       unsigned int gup_flags);
-long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages,
+long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
 		    int write, int force, struct page **pages);
 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
 			struct page **pages);
 
-/* suppress warnings from use in EXPORT_SYMBOL() */
-#ifndef __DISABLE_GUP_DEPRECATED
-#define __gup_deprecated __deprecated
-#else
-#define __gup_deprecated
-#endif
-/*
- * These macros provide backward-compatibility with the old
- * get_user_pages() variants which took tsk/mm.  These
- * functions/macros provide both compile-time __deprecated so we
- * can catch old-style use and not break the build.  The actual
- * functions also have WARN_ON()s to let us know at runtime if
- * the get_user_pages() should have been the "remote" variant.
- *
- * These are hideous, but temporary.
- *
- * If you run into one of these __deprecated warnings, look
- * at how you are calling get_user_pages().  If you are calling
- * it with current/current->mm as the first two arguments,
- * simply remove those arguments.  The behavior will be the same
- * as it is now.  If you are calling it on another task, use
- * get_user_pages_remote() instead.
- *
- * Any questions?  Ask Dave Hansen <dave@sr71.net>
- */
-long
-__gup_deprecated
-get_user_pages8(struct task_struct *tsk, struct mm_struct *mm,
-		unsigned long start, unsigned long nr_pages,
-		int write, int force, struct page **pages,
-		struct vm_area_struct **vmas);
-#define GUP_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, get_user_pages, ...)	\
-	get_user_pages
-#define get_user_pages(...) GUP_MACRO(__VA_ARGS__,	\
-		get_user_pages8, x,			\
-		get_user_pages6, x, x, x, x, x)(__VA_ARGS__)
-
-__gup_deprecated
-long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm,
-		unsigned long start, unsigned long nr_pages,
-		int write, int force, struct page **pages,
-		int *locked);
-#define GUPL_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, get_user_pages_locked, ...)	\
-	get_user_pages_locked
-#define get_user_pages_locked(...) GUPL_MACRO(__VA_ARGS__,	\
-		get_user_pages_locked8,	x,			\
-		get_user_pages_locked6, x, x, x, x)(__VA_ARGS__)
-
-__gup_deprecated
-long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm,
-		unsigned long start, unsigned long nr_pages,
-		int write, int force, struct page **pages);
-#define GUPU_MACRO(_1, _2, _3, _4, _5, _6, _7, get_user_pages_unlocked, ...)	\
-	get_user_pages_unlocked
-#define get_user_pages_unlocked(...) GUPU_MACRO(__VA_ARGS__,	\
-		get_user_pages_unlocked7, x,			\
-		get_user_pages_unlocked5, x, x, x, x)(__VA_ARGS__)
-
 /* Container for pinned pfns / pages */
 struct frame_vector {
 	unsigned int nr_allocated;	/* Number of frames we have space for */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 944b2b3..c2d75b4 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -341,7 +341,7 @@
 
 	/* Information about our backing store: */
 	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
-					   units, *not* PAGE_CACHE_SIZE */
+					   units */
 	struct file * vm_file;		/* File we map to (can be NULL). */
 	void * vm_private_data;		/* was vm_pte (shared mem) */
 
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 7b41c6d..f7ed271 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -36,7 +36,6 @@
 	EVENT_XFER_COMPLETE,
 	EVENT_DATA_COMPLETE,
 	EVENT_DATA_ERROR,
-	EVENT_XFER_ERROR
 };
 
 struct mmc_data;
@@ -55,6 +54,7 @@
 /**
  * struct dw_mci - MMC controller state shared between all slots
  * @lock: Spinlock protecting the queue and associated data.
+ * @irq_lock: Spinlock protecting the INTMASK setting.
  * @regs: Pointer to MMIO registers.
  * @fifo_reg: Pointer to MMIO registers for data FIFO
  * @sg: Scatterlist entry currently being processed by PIO code, if any.
@@ -65,6 +65,9 @@
  * @cmd: The command currently being sent to the card, or NULL.
  * @data: The data currently being transferred, or NULL if no data
  *	transfer is in progress.
+ * @stop_abort: The command currently prepared for stoping transfer.
+ * @prev_blksz: The former transfer blksz record.
+ * @timing: Record of current ios timing.
  * @use_dma: Whether DMA channel is initialized or not.
  * @using_dma: Whether DMA is in use for the current transfer.
  * @dma_64bit_address: Whether DMA supports 64-bit address mode or not.
@@ -72,7 +75,10 @@
  * @sg_cpu: Virtual address of DMA buffer.
  * @dma_ops: Pointer to platform-specific DMA callbacks.
  * @cmd_status: Snapshot of SR taken upon completion of the current
+ * @ring_size: Buffer size for idma descriptors.
  *	command. Only valid when EVENT_CMD_COMPLETE is pending.
+ * @dms: structure of slave-dma private data.
+ * @phy_regs: physical address of controller's register map
  * @data_status: Snapshot of SR taken upon completion of the current
  *	data transfer. Only valid when EVENT_DATA_COMPLETE or
  *	EVENT_DATA_ERROR is pending.
@@ -80,7 +86,6 @@
  *	to be sent.
  * @dir_status: Direction of current transfer.
  * @tasklet: Tasklet running the request state machine.
- * @card_tasklet: Tasklet handling card detect.
  * @pending_events: Bitmask of events flagged by the interrupt handler
  *	to be processed by the tasklet.
  * @completed_events: Bitmask of events which the state machine has
@@ -91,6 +96,7 @@
  *	rate and timeout calculations.
  * @current_speed: Configured rate of the controller.
  * @num_slots: Number of slots available.
+ * @fifoth_val: The value of FIFOTH register.
  * @verid: Denote Version ID.
  * @dev: Device associated with the MMC controller.
  * @pdata: Platform data associated with the MMC controller.
@@ -107,9 +113,11 @@
  * @push_data: Pointer to FIFO push function.
  * @pull_data: Pointer to FIFO pull function.
  * @quirks: Set of quirks that apply to specific versions of the IP.
+ * @vqmmc_enabled: Status of vqmmc, should be true or false.
  * @irq_flags: The flags to be passed to request_irq.
  * @irq: The irq value to be passed to request_irq.
  * @sdio_id0: Number of slot0 in the SDIO interrupt registers.
+ * @cmd11_timer: Timer for SD3.0 voltage switch over scheme.
  * @dto_timer: Timer for broken data transfer over scheme.
  *
  * Locking
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 8dd4d29..85800b4 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -93,28 +93,39 @@
 	void	(*pre_req)(struct mmc_host *host, struct mmc_request *req,
 			   bool is_first_req);
 	void	(*request)(struct mmc_host *host, struct mmc_request *req);
+
 	/*
-	 * Avoid calling these three functions too often or in a "fast path",
-	 * since underlaying controller might implement them in an expensive
-	 * and/or slow way.
-	 *
-	 * Also note that these functions might sleep, so don't call them
-	 * in the atomic contexts!
-	 *
+	 * Avoid calling the next three functions too often or in a "fast
+	 * path", since underlaying controller might implement them in an
+	 * expensive and/or slow way. Also note that these functions might
+	 * sleep, so don't call them in the atomic contexts!
+	 */
+
+	/*
+	 * Notes to the set_ios callback:
+	 * ios->clock might be 0. For some controllers, setting 0Hz
+	 * as any other frequency works. However, some controllers
+	 * explicitly need to disable the clock. Otherwise e.g. voltage
+	 * switching might fail because the SDCLK is not really quiet.
+	 */
+	void	(*set_ios)(struct mmc_host *host, struct mmc_ios *ios);
+
+	/*
 	 * Return values for the get_ro callback should be:
 	 *   0 for a read/write card
 	 *   1 for a read-only card
 	 *   -ENOSYS when not supported (equal to NULL callback)
 	 *   or a negative errno value when something bad happened
-	 *
+	 */
+	int	(*get_ro)(struct mmc_host *host);
+
+	/*
 	 * Return values for the get_cd callback should be:
 	 *   0 for a absent card
 	 *   1 for a present card
 	 *   -ENOSYS when not supported (equal to NULL callback)
 	 *   or a negative errno value when something bad happened
 	 */
-	void	(*set_ios)(struct mmc_host *host, struct mmc_ios *ios);
-	int	(*get_ro)(struct mmc_host *host);
 	int	(*get_cd)(struct mmc_host *host);
 
 	void	(*enable_sdio_irq)(struct mmc_host *host, int enable);
diff --git a/include/linux/mmc/sh_mobile_sdhi.h b/include/linux/mmc/sh_mobile_sdhi.h
deleted file mode 100644
index 95d6f03..0000000
--- a/include/linux/mmc/sh_mobile_sdhi.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef LINUX_MMC_SH_MOBILE_SDHI_H
-#define LINUX_MMC_SH_MOBILE_SDHI_H
-
-#include <linux/types.h>
-
-#define SH_MOBILE_SDHI_IRQ_CARD_DETECT	"card_detect"
-#define SH_MOBILE_SDHI_IRQ_SDCARD	"sdcard"
-#define SH_MOBILE_SDHI_IRQ_SDIO		"sdio"
-
-#endif /* LINUX_MMC_SH_MOBILE_SDHI_H */
diff --git a/include/linux/mmc/tmio.h b/include/linux/mmc/tmio.h
deleted file mode 100644
index 5f5cd80..0000000
--- a/include/linux/mmc/tmio.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * include/linux/mmc/tmio.h
- *
- * Copyright (C) 2016 Sang Engineering, Wolfram Sang
- * Copyright (C) 2015-16 Renesas Electronics Corporation
- * Copyright (C) 2007 Ian Molton
- * Copyright (C) 2004 Ian Molton
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Driver for the MMC / SD / SDIO cell found in:
- *
- * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
- */
-#ifndef LINUX_MMC_TMIO_H
-#define LINUX_MMC_TMIO_H
-
-#define CTL_SD_CMD 0x00
-#define CTL_ARG_REG 0x04
-#define CTL_STOP_INTERNAL_ACTION 0x08
-#define CTL_XFER_BLK_COUNT 0xa
-#define CTL_RESPONSE 0x0c
-#define CTL_STATUS 0x1c
-#define CTL_STATUS2 0x1e
-#define CTL_IRQ_MASK 0x20
-#define CTL_SD_CARD_CLK_CTL 0x24
-#define CTL_SD_XFER_LEN 0x26
-#define CTL_SD_MEM_CARD_OPT 0x28
-#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
-#define CTL_SD_DATA_PORT 0x30
-#define CTL_TRANSACTION_CTL 0x34
-#define CTL_SDIO_STATUS 0x36
-#define CTL_SDIO_IRQ_MASK 0x38
-#define CTL_DMA_ENABLE 0xd8
-#define CTL_RESET_SD 0xe0
-#define CTL_VERSION 0xe2
-#define CTL_SDIO_REGS 0x100
-#define CTL_CLK_AND_WAIT_CTL 0x138
-#define CTL_RESET_SDIO 0x1e0
-
-/* Definitions for values the CTRL_STATUS register can take. */
-#define TMIO_STAT_CMDRESPEND    0x00000001
-#define TMIO_STAT_DATAEND       0x00000004
-#define TMIO_STAT_CARD_REMOVE   0x00000008
-#define TMIO_STAT_CARD_INSERT   0x00000010
-#define TMIO_STAT_SIGSTATE      0x00000020
-#define TMIO_STAT_WRPROTECT     0x00000080
-#define TMIO_STAT_CARD_REMOVE_A 0x00000100
-#define TMIO_STAT_CARD_INSERT_A 0x00000200
-#define TMIO_STAT_SIGSTATE_A    0x00000400
-#define TMIO_STAT_CMD_IDX_ERR   0x00010000
-#define TMIO_STAT_CRCFAIL       0x00020000
-#define TMIO_STAT_STOPBIT_ERR   0x00040000
-#define TMIO_STAT_DATATIMEOUT   0x00080000
-#define TMIO_STAT_RXOVERFLOW    0x00100000
-#define TMIO_STAT_TXUNDERRUN    0x00200000
-#define TMIO_STAT_CMDTIMEOUT    0x00400000
-#define TMIO_STAT_RXRDY         0x01000000
-#define TMIO_STAT_TXRQ          0x02000000
-#define TMIO_STAT_ILL_FUNC      0x20000000
-#define TMIO_STAT_CMD_BUSY      0x40000000
-#define TMIO_STAT_ILL_ACCESS    0x80000000
-
-#define	CLK_CTL_DIV_MASK	0xff
-#define	CLK_CTL_SCLKEN		BIT(8)
-
-#define TMIO_BBS		512		/* Boot block size */
-
-#endif /* LINUX_MMC_TMIO_H */
diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h
index 70fffeb..a444178 100644
--- a/include/linux/mmu_context.h
+++ b/include/linux/mmu_context.h
@@ -1,9 +1,16 @@
 #ifndef _LINUX_MMU_CONTEXT_H
 #define _LINUX_MMU_CONTEXT_H
 
+#include <asm/mmu_context.h>
+
 struct mm_struct;
 
 void use_mm(struct mm_struct *mm);
 void unuse_mm(struct mm_struct *mm);
 
+/* Architectures that care about IRQ state in switch_mm can override this. */
+#ifndef switch_mm_irqs_off
+# define switch_mm_irqs_off switch_mm
+#endif
+
 #endif
diff --git a/include/linux/module.h b/include/linux/module.h
index 2bb0c30..3daf2b3 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -330,6 +330,15 @@
 	char *strtab;
 };
 
+#ifdef CONFIG_LIVEPATCH
+struct klp_modinfo {
+	Elf_Ehdr hdr;
+	Elf_Shdr *sechdrs;
+	char *secstrings;
+	unsigned int symndx;
+};
+#endif
+
 struct module {
 	enum module_state state;
 
@@ -456,7 +465,11 @@
 #endif
 
 #ifdef CONFIG_LIVEPATCH
+	bool klp; /* Is this a livepatch module? */
 	bool klp_alive;
+
+	/* Elf information */
+	struct klp_modinfo *klp_info;
 #endif
 
 #ifdef CONFIG_MODULE_UNLOAD
@@ -630,6 +643,18 @@
 	return module && module->async_probe_requested;
 }
 
+#ifdef CONFIG_LIVEPATCH
+static inline bool is_livepatch_module(struct module *mod)
+{
+	return mod->klp;
+}
+#else /* !CONFIG_LIVEPATCH */
+static inline bool is_livepatch_module(struct module *mod)
+{
+	return false;
+}
+#endif /* CONFIG_LIVEPATCH */
+
 #else /* !CONFIG_MODULES... */
 
 /* Given an address, look for it in the exception tables. */
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 7712721..ef9fea4 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -283,17 +283,7 @@
 		    const u_char *buf);
 
 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops);
-
-static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to,
-				struct mtd_oob_ops *ops)
-{
-	ops->retlen = ops->oobretlen = 0;
-	if (!mtd->_write_oob)
-		return -EOPNOTSUPP;
-	if (!(mtd->flags & MTD_WRITEABLE))
-		return -EROFS;
-	return mtd->_write_oob(mtd, to, ops);
-}
+int mtd_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops);
 
 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
 			   struct otp_info *buf);
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 77d0170..ec5ec28 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -79,6 +79,8 @@
 
 extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
 extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int);
+struct qstr;
+extern struct dentry *lookup_hash(const struct qstr *, struct dentry *);
 
 extern int follow_down_one(struct path *);
 extern int follow_down(struct path *);
diff --git a/include/linux/net.h b/include/linux/net.h
index 49175e4..9aa49a0 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -218,8 +218,7 @@
 struct socket *sock_alloc(void);
 void sock_release(struct socket *sock);
 int sock_sendmsg(struct socket *sock, struct msghdr *msg);
-int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
-		 int flags);
+int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags);
 struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
 struct socket *sockfd_lookup(int fd, int *err);
 struct socket *sock_from_file(struct file *file, int *err);
@@ -246,7 +245,15 @@
 	net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
 #define net_info_ratelimited(fmt, ...)				\
 	net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
-#if defined(DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define net_dbg_ratelimited(fmt, ...)					\
+do {									\
+	DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);			\
+	if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) &&	\
+	    net_ratelimit())						\
+		__dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__);	\
+} while (0)
+#elif defined(DEBUG)
 #define net_dbg_ratelimited(fmt, ...)				\
 	net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
 #else
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index a734bf4..bc87362 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -39,6 +39,7 @@
 	NETIF_F_UFO_BIT,		/* ... UDPv4 fragmentation */
 	NETIF_F_GSO_ROBUST_BIT,		/* ... ->SKB_GSO_DODGY */
 	NETIF_F_TSO_ECN_BIT,		/* ... TCP ECN support */
+	NETIF_F_TSO_MANGLEID_BIT,	/* ... IPV4 ID mangling allowed */
 	NETIF_F_TSO6_BIT,		/* ... TCPv6 segmentation */
 	NETIF_F_FSO_BIT,		/* ... FCoE segmentation */
 	NETIF_F_GSO_GRE_BIT,		/* ... GRE with TSO */
@@ -47,6 +48,10 @@
 	NETIF_F_GSO_SIT_BIT,		/* ... SIT tunnel with TSO */
 	NETIF_F_GSO_UDP_TUNNEL_BIT,	/* ... UDP TUNNEL with TSO */
 	NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */
+	NETIF_F_GSO_PARTIAL_BIT,	/* ... Only segment inner-most L4
+					 *     in hardware and all other
+					 *     headers in software.
+					 */
 	NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */
 	/**/NETIF_F_GSO_LAST =		/* last bit, see GSO_MASK */
 		NETIF_F_GSO_TUNNEL_REMCSUM_BIT,
@@ -120,6 +125,8 @@
 #define NETIF_F_GSO_SIT		__NETIF_F(GSO_SIT)
 #define NETIF_F_GSO_UDP_TUNNEL	__NETIF_F(GSO_UDP_TUNNEL)
 #define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM)
+#define NETIF_F_TSO_MANGLEID	__NETIF_F(TSO_MANGLEID)
+#define NETIF_F_GSO_PARTIAL	 __NETIF_F(GSO_PARTIAL)
 #define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM)
 #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
 #define NETIF_F_HW_VLAN_STAG_RX	__NETIF_F(HW_VLAN_STAG_RX)
@@ -145,10 +152,6 @@
 #define NETIF_F_GSO_MASK	(__NETIF_F_BIT(NETIF_F_GSO_LAST + 1) - \
 		__NETIF_F_BIT(NETIF_F_GSO_SHIFT))
 
-/* List of features with software fallbacks. */
-#define NETIF_F_GSO_SOFTWARE	(NETIF_F_TSO | NETIF_F_TSO_ECN | \
-				 NETIF_F_TSO6 | NETIF_F_UFO)
-
 /* List of IP checksum features. Note that NETIF_F_ HW_CSUM should not be
  * set in features when NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM are set--
  * this would be contradictory
@@ -156,11 +159,15 @@
 #define NETIF_F_CSUM_MASK	(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \
 				 NETIF_F_HW_CSUM)
 
-#define NETIF_F_ALL_TSO 	(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+#define NETIF_F_ALL_TSO 	(NETIF_F_TSO | NETIF_F_TSO6 | \
+				 NETIF_F_TSO_ECN | NETIF_F_TSO_MANGLEID)
 
 #define NETIF_F_ALL_FCOE	(NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
 				 NETIF_F_FSO)
 
+/* List of features with software fallbacks. */
+#define NETIF_F_GSO_SOFTWARE	(NETIF_F_ALL_TSO | NETIF_F_UFO)
+
 /*
  * If one device supports one of these features, then enable them
  * for all in netdev_increment_features.
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index cb0d5d0..c148edf 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -106,7 +106,6 @@
 	__NETDEV_TX_MIN	 = INT_MIN,	/* make sure enum is signed */
 	NETDEV_TX_OK	 = 0x00,	/* driver took care of packet */
 	NETDEV_TX_BUSY	 = 0x10,	/* driver tx path was busy*/
-	NETDEV_TX_LOCKED = 0x20,	/* driver tx lock was already taken */
 };
 typedef enum netdev_tx netdev_tx_t;
 
@@ -570,28 +569,27 @@
 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
 	int			numa_node;
 #endif
+	unsigned long		tx_maxrate;
+	/*
+	 * Number of TX timeouts for this queue
+	 * (/sys/class/net/DEV/Q/trans_timeout)
+	 */
+	unsigned long		trans_timeout;
 /*
  * write-mostly part
  */
 	spinlock_t		_xmit_lock ____cacheline_aligned_in_smp;
 	int			xmit_lock_owner;
 	/*
-	 * please use this field instead of dev->trans_start
+	 * Time (in jiffies) of last Tx
 	 */
 	unsigned long		trans_start;
 
-	/*
-	 * Number of TX timeouts for this queue
-	 * (/sys/class/net/DEV/Q/trans_timeout)
-	 */
-	unsigned long		trans_timeout;
-
 	unsigned long		state;
 
 #ifdef CONFIG_BQL
 	struct dql		dql;
 #endif
-	unsigned long		tx_maxrate;
 } ____cacheline_aligned_in_smp;
 
 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
@@ -831,7 +829,6 @@
  *	the queue before that can happen; it's for obsolete devices and weird
  *	corner cases, but the stack really does a non-trivial amount
  *	of useless work if you return NETDEV_TX_BUSY.
- *        (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
  *	Required; cannot be NULL.
  *
  * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
@@ -1548,7 +1545,6 @@
  *
  *	@offload_fwd_mark:	Offload device fwding mark
  *
- *	@trans_start:		Time (in jiffies) of last Tx
  *	@watchdog_timeo:	Represents the timeout that is used by
  *				the watchdog (see dev_watchdog())
  *	@watchdog_timer:	List of timers
@@ -1586,8 +1582,6 @@
  *	@gso_max_size:	Maximum size of generic segmentation offload
  *	@gso_max_segs:	Maximum number of segments that can be passed to the
  *			NIC for GSO
- *	@gso_min_segs:	Minimum number of segments that can be passed to the
- *			NIC for GSO
  *
  *	@dcbnl_ops:	Data Center Bridging netlink ops
  *	@num_tc:	Number of traffic classes in the net device
@@ -1656,6 +1650,7 @@
 	netdev_features_t	vlan_features;
 	netdev_features_t	hw_enc_features;
 	netdev_features_t	mpls_features;
+	netdev_features_t	gso_partial_features;
 
 	int			ifindex;
 	int			group;
@@ -1798,13 +1793,6 @@
 #endif
 
 	/* These may be needed for future network-power-down code. */
-
-	/*
-	 * trans_start here is expensive for high speed devices on SMP,
-	 * please use netdev_queue->trans_start instead.
-	 */
-	unsigned long		trans_start;
-
 	struct timer_list	watchdog_timer;
 
 	int __percpu		*pcpu_refcnt;
@@ -1858,7 +1846,7 @@
 	unsigned int		gso_max_size;
 #define GSO_MAX_SEGS		65535
 	u16			gso_max_segs;
-	u16			gso_min_segs;
+
 #ifdef CONFIG_DCB
 	const struct dcbnl_rtnl_ops *dcbnl_ops;
 #endif
@@ -2120,7 +2108,13 @@
 	/* Used in foo-over-udp, set in udp[46]_gro_receive */
 	u8	is_ipv6:1;
 
-	/* 7 bit hole */
+	/* Used in GRE, set in fou/gue_gro_receive */
+	u8	is_fou:1;
+
+	/* Used to determine if flush_id can be ignored */
+	u8	is_atomic:1;
+
+	/* 5 bit hole */
 
 	/* used to support CHECKSUM_COMPLETE for tunneling protocols */
 	__wsum	csum;
@@ -2159,23 +2153,6 @@
 	struct list_head	 list;
 };
 
-struct udp_offload;
-
-struct udp_offload_callbacks {
-	struct sk_buff		**(*gro_receive)(struct sk_buff **head,
-						 struct sk_buff *skb,
-						 struct udp_offload *uoff);
-	int			(*gro_complete)(struct sk_buff *skb,
-						int nhoff,
-						struct udp_offload *uoff);
-};
-
-struct udp_offload {
-	__be16			 port;
-	u8			 ipproto;
-	struct udp_offload_callbacks callbacks;
-};
-
 /* often modified stats are per-CPU, other are shared (netdev->stats) */
 struct pcpu_sw_netstats {
 	u64     rx_packets;
@@ -2256,6 +2233,8 @@
 #define NETDEV_BONDING_INFO	0x0019
 #define NETDEV_PRECHANGEUPPER	0x001A
 #define NETDEV_CHANGELOWERSTATE	0x001B
+#define NETDEV_OFFLOAD_PUSH_VXLAN	0x001C
+#define NETDEV_OFFLOAD_PUSH_GENEVE	0x001D
 
 int register_netdevice_notifier(struct notifier_block *nb);
 int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -2747,7 +2726,6 @@
 	/* stats */
 	unsigned int		processed;
 	unsigned int		time_squeeze;
-	unsigned int		cpu_collision;
 	unsigned int		received_rps;
 #ifdef CONFIG_RPS
 	struct softnet_data	*rps_ipi_list;
@@ -2760,11 +2738,15 @@
 	struct sk_buff		*completion_queue;
 
 #ifdef CONFIG_RPS
-	/* Elements below can be accessed between CPUs for RPS */
+	/* input_queue_head should be written by cpu owning this struct,
+	 * and only read by other cpus. Worth using a cache line.
+	 */
+	unsigned int		input_queue_head ____cacheline_aligned_in_smp;
+
+	/* Elements below can be accessed between CPUs for RPS/RFS */
 	struct call_single_data	csd ____cacheline_aligned_in_smp;
 	struct softnet_data	*rps_ipi_next;
 	unsigned int		cpu;
-	unsigned int		input_queue_head;
 	unsigned int		input_queue_tail;
 #endif
 	unsigned int		dropped;
@@ -2801,7 +2783,7 @@
 		netif_schedule_queue(netdev_get_tx_queue(dev, i));
 }
 
-static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
+static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
 {
 	clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 }
@@ -2851,7 +2833,7 @@
 	}
 }
 
-static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
+static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
 {
 	set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 }
@@ -3273,7 +3255,10 @@
 				    struct netdev_queue *txq, int *ret);
 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
-bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb);
+bool is_skb_forwardable(const struct net_device *dev,
+			const struct sk_buff *skb);
+
+void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
 
 extern int		netdev_budget;
 
@@ -3490,6 +3475,15 @@
 		txq->trans_start = jiffies;
 }
 
+/* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
+static inline void netif_trans_update(struct net_device *dev)
+{
+	struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+
+	if (txq->trans_start != jiffies)
+		txq->trans_start = jiffies;
+}
+
 /**
  *	netif_tx_lock - grab network device transmit lock
  *	@dev: network device
@@ -3765,7 +3759,6 @@
 extern int		netdev_max_backlog;
 extern int		netdev_tstamp_prequeue;
 extern int		weight_p;
-extern int		bpf_jit_enable;
 
 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
@@ -4001,13 +3994,14 @@
 
 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
 {
-	netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
+	netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
 
 	/* check flags correspondence */
 	BUILD_BUG_ON(SKB_GSO_TCPV4   != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
 	BUILD_BUG_ON(SKB_GSO_UDP     != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
 	BUILD_BUG_ON(SKB_GSO_DODGY   != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
 	BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
+	BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
 	BUILD_BUG_ON(SKB_GSO_TCPV6   != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
 	BUILD_BUG_ON(SKB_GSO_FCOE    != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
 	BUILD_BUG_ON(SKB_GSO_GRE     != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
@@ -4016,6 +4010,7 @@
 	BUILD_BUG_ON(SKB_GSO_SIT     != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT));
 	BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
 	BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
+	BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
 	BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
 
 	return (features & feature) == feature;
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 0e1f433..83b9a2e 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -234,6 +234,10 @@
 	spinlock_t lock;
 	/* References to the set */
 	u32 ref;
+	/* References to the set for netlink events like dump,
+	 * ref can be swapped out by ip_set_swap
+	 */
+	u32 ref_netlink;
 	/* The core set type */
 	struct ip_set_type *type;
 	/* The type variant doing the real job */
@@ -347,7 +351,8 @@
 	return ((skbinfo->skbmark || skbinfo->skbmarkmask) &&
 		nla_put_net64(skb, IPSET_ATTR_SKBMARK,
 			      cpu_to_be64((u64)skbinfo->skbmark << 32 |
-					  skbinfo->skbmarkmask))) ||
+					  skbinfo->skbmarkmask),
+			      IPSET_ATTR_PAD)) ||
 	       (skbinfo->skbprio &&
 		nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
 			      cpu_to_be32(skbinfo->skbprio))) ||
@@ -370,9 +375,11 @@
 ip_set_put_counter(struct sk_buff *skb, struct ip_set_counter *counter)
 {
 	return nla_put_net64(skb, IPSET_ATTR_BYTES,
-			     cpu_to_be64(ip_set_get_bytes(counter))) ||
+			     cpu_to_be64(ip_set_get_bytes(counter)),
+			     IPSET_ATTR_PAD) ||
 	       nla_put_net64(skb, IPSET_ATTR_PACKETS,
-			     cpu_to_be64(ip_set_get_packets(counter)));
+			     cpu_to_be64(ip_set_get_packets(counter)),
+			     IPSET_ATTR_PAD);
 }
 
 static inline void
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 80a305b..dc4f58a 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -242,11 +242,18 @@
 int xt_register_matches(struct xt_match *match, unsigned int n);
 void xt_unregister_matches(struct xt_match *match, unsigned int n);
 
+int xt_check_entry_offsets(const void *base, const char *elems,
+			   unsigned int target_offset,
+			   unsigned int next_offset);
+
 int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
 		   bool inv_proto);
 int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
 		    bool inv_proto);
 
+void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
+				 struct xt_counters_info *info, bool compat);
+
 struct xt_table *xt_register_table(struct net *net,
 				   const struct xt_table *table,
 				   struct xt_table_info *bootstrap,
@@ -373,16 +380,16 @@
  * allows us to return 0 for single core systems without forcing
  * callers to deal with SMP vs. NONSMP issues.
  */
-static inline u64 xt_percpu_counter_alloc(void)
+static inline unsigned long xt_percpu_counter_alloc(void)
 {
 	if (nr_cpu_ids > 1) {
 		void __percpu *res = __alloc_percpu(sizeof(struct xt_counters),
 						    sizeof(struct xt_counters));
 
 		if (res == NULL)
-			return (u64) -ENOMEM;
+			return -ENOMEM;
 
-		return (u64) (__force unsigned long) res;
+		return (__force unsigned long) res;
 	}
 
 	return 0;
@@ -480,7 +487,7 @@
 int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
 
 int xt_compat_match_offset(const struct xt_match *match);
-int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
+void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
 			      unsigned int *size);
 int xt_compat_match_to_user(const struct xt_entry_match *m,
 			    void __user **dstptr, unsigned int *size);
@@ -490,6 +497,9 @@
 				unsigned int *size);
 int xt_compat_target_to_user(const struct xt_entry_target *t,
 			     void __user **dstptr, unsigned int *size);
+int xt_compat_check_entry_offsets(const void *base, const char *elems,
+				  unsigned int target_offset,
+				  unsigned int next_offset);
 
 #endif /* CONFIG_COMPAT */
 #endif /* _X_TABLES_H */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 67300f8..d71278c 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -163,11 +163,9 @@
 	/* Open contexts for shared mmap writes */
 	struct list_head	open_files;
 
-	/* Number of in-flight sillydelete RPC calls */
-	atomic_t		silly_count;
-	/* List of deferred sillydelete requests */
-	struct hlist_head	silly_list;
-	wait_queue_head_t	waitqueue;
+	/* Readers: in-flight sillydelete RPC calls */
+	/* Writers: rmdir */
+	struct rw_semaphore	rmdir_sem;
 
 #if IS_ENABLED(CONFIG_NFS_V4)
 	struct nfs4_cached_acl	*nfs4_acl;
@@ -445,10 +443,9 @@
 /*
  * linux/fs/nfs/direct.c
  */
-extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *, loff_t);
+extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *);
 extern ssize_t nfs_file_direct_read(struct kiocb *iocb,
-			struct iov_iter *iter,
-			loff_t pos);
+			struct iov_iter *iter);
 extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
 			struct iov_iter *iter);
 
@@ -492,9 +489,6 @@
  * linux/fs/nfs/unlink.c
  */
 extern void nfs_complete_unlink(struct dentry *dentry, struct inode *);
-extern void nfs_wait_on_sillyrename(struct dentry *dentry);
-extern void nfs_block_sillyrename(struct dentry *dentry);
-extern void nfs_unblock_sillyrename(struct dentry *dentry);
 
 /*
  * linux/fs/nfs/write.c
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index f2f650f..957049f 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -41,8 +41,8 @@
 	struct page		*wb_page;	/* page to read in/write out */
 	struct nfs_open_context	*wb_context;	/* File state context info */
 	struct nfs_lock_context	*wb_lock_context;	/* lock context info */
-	pgoff_t			wb_index;	/* Offset >> PAGE_CACHE_SHIFT */
-	unsigned int		wb_offset,	/* Offset & ~PAGE_CACHE_MASK */
+	pgoff_t			wb_index;	/* Offset >> PAGE_SHIFT */
+	unsigned int		wb_offset,	/* Offset & ~PAGE_MASK */
 				wb_pgbase,	/* Start of page data */
 				wb_bytes;	/* Length of request */
 	struct kref		wb_kref;	/* reference count */
@@ -184,7 +184,7 @@
 static inline
 loff_t req_offset(struct nfs_page *req)
 {
-	return (((loff_t)req->wb_index) << PAGE_CACHE_SHIFT) + req->wb_offset;
+	return (((loff_t)req->wb_index) << PAGE_SHIFT) + req->wb_offset;
 }
 
 #endif /* _LINUX_NFS_PAGE_H */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index d320906..ee8491d 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1468,10 +1468,10 @@
 };
 
 struct nfs_unlinkdata {
-	struct hlist_node list;
 	struct nfs_removeargs args;
 	struct nfs_removeres res;
-	struct inode *dir;
+	struct dentry *dentry;
+	wait_queue_head_t wq;
 	struct rpc_cred	*cred;
 	struct nfs_fattr dir_attr;
 	long timeout;
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index 9abb763..e9fcf90 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -331,7 +331,7 @@
 {
 	unsigned len = le16_to_cpu(dlen);
 
-#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536)
+#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536)
 	if (len == NILFS_MAX_REC_LEN)
 		return 1 << 16;
 #endif
@@ -340,7 +340,7 @@
 
 static inline __le16 nilfs_rec_len_to_disk(unsigned len)
 {
-#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536)
+#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536)
 	if (len == (1 << 16))
 		return cpu_to_le16(NILFS_MAX_REC_LEN);
 	else if (len > (1 << 16))
diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h
index 167342c..0f6f660 100644
--- a/include/linux/nl802154.h
+++ b/include/linux/nl802154.h
@@ -92,6 +92,8 @@
 	IEEE802154_ATTR_LLSEC_DEV_OVERRIDE,
 	IEEE802154_ATTR_LLSEC_DEV_KEY_MODE,
 
+	IEEE802154_ATTR_PAD,
+
 	__IEEE802154_ATTR_MAX,
 };
 
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index a55986f..7d51b29 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -21,13 +21,13 @@
 	NVME_REG_CAP	= 0x0000,	/* Controller Capabilities */
 	NVME_REG_VS	= 0x0008,	/* Version */
 	NVME_REG_INTMS	= 0x000c,	/* Interrupt Mask Set */
-	NVME_REG_INTMC	= 0x0010,	/* Interrupt Mask Set */
+	NVME_REG_INTMC	= 0x0010,	/* Interrupt Mask Clear */
 	NVME_REG_CC	= 0x0014,	/* Controller Configuration */
 	NVME_REG_CSTS	= 0x001c,	/* Controller Status */
 	NVME_REG_NSSR	= 0x0020,	/* NVM Subsystem Reset */
 	NVME_REG_AQA	= 0x0024,	/* Admin Queue Attributes */
 	NVME_REG_ASQ	= 0x0028,	/* Admin SQ Base Address */
-	NVME_REG_ACQ	= 0x0030,	/* Admin SQ Base Address */
+	NVME_REG_ACQ	= 0x0030,	/* Admin CQ Base Address */
 	NVME_REG_CMBLOC = 0x0038,	/* Controller Memory Buffer Location */
 	NVME_REG_CMBSZ	= 0x003c,	/* Controller Memory Buffer Size */
 };
diff --git a/include/linux/of.h b/include/linux/of.h
index 7fcb681..77ddace 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -133,7 +133,7 @@
 
 static inline bool is_of_node(struct fwnode_handle *fwnode)
 {
-	return fwnode && fwnode->type == FWNODE_OF;
+	return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_OF;
 }
 
 static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
@@ -685,6 +685,15 @@
 }
 #endif
 
+#ifdef CONFIG_OF_NUMA
+extern int of_numa_init(void);
+#else
+static inline int of_numa_init(void)
+{
+	return -ENOSYS;
+}
+#endif
+
 static inline struct device_node *of_find_matching_node(
 	struct device_node *from,
 	const struct of_device_id *matches)
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index 01c0a55..3786473 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -47,10 +47,6 @@
 extern const __be32 *of_get_address(struct device_node *dev, int index,
 			   u64 *size, unsigned int *flags);
 
-extern int pci_register_io_range(phys_addr_t addr, resource_size_t size);
-extern unsigned long pci_address_to_pio(phys_addr_t addr);
-extern phys_addr_t pci_pio_to_address(unsigned long pio);
-
 extern int of_pci_range_parser_init(struct of_pci_range_parser *parser,
 			struct device_node *node);
 extern struct of_pci_range *of_pci_range_parser_one(
@@ -86,11 +82,6 @@
 	return NULL;
 }
 
-static inline phys_addr_t pci_pio_to_address(unsigned long pio)
-{
-	return 0;
-}
-
 static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser,
 			struct device_node *node)
 {
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index ffbe470..bd02b44 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -12,7 +12,7 @@
 			     size_t *size);
 
 extern void of_iommu_init(void);
-extern struct iommu_ops *of_iommu_configure(struct device *dev,
+extern const struct iommu_ops *of_iommu_configure(struct device *dev,
 					struct device_node *master_np);
 
 #else
@@ -25,7 +25,7 @@
 }
 
 static inline void of_iommu_init(void) { }
-static inline struct iommu_ops *of_iommu_configure(struct device *dev,
+static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
 					 struct device_node *master_np)
 {
 	return NULL;
@@ -33,8 +33,8 @@
 
 #endif	/* CONFIG_OF_IOMMU */
 
-void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops);
-struct iommu_ops *of_iommu_get_ops(struct device_node *np);
+void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops);
+const struct iommu_ops *of_iommu_get_ops(struct device_node *np);
 
 extern struct of_device_id __iommu_of_table;
 
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index f4ed4f1b..6b052aa 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -517,6 +517,27 @@
 }
 
 /*
+ * PageTransCompoundMap is the same as PageTransCompound, but it also
+ * guarantees the primary MMU has the entire compound page mapped
+ * through pmd_trans_huge, which in turn guarantees the secondary MMUs
+ * can also map the entire compound page. This allows the secondary
+ * MMUs to call get_user_pages() only once for each compound page and
+ * to immediately map the entire compound page with a single secondary
+ * MMU fault. If there will be a pmd split later, the secondary MMUs
+ * will get an update through the MMU notifier invalidation through
+ * split_huge_pmd().
+ *
+ * Unlike PageTransCompound, this is safe to be called only while
+ * split_huge_pmd() cannot run from under us, like if protected by the
+ * MMU notifier, otherwise it may result in page->_mapcount < 0 false
+ * positives.
+ */
+static inline int PageTransCompoundMap(struct page *page)
+{
+	return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0;
+}
+
+/*
  * PageTransTail returns true for both transparent huge pages
  * and hugetlbfs pages, so it should only be called when it's known
  * that hugetlbfs pages aren't involved.
@@ -559,6 +580,7 @@
 #else
 TESTPAGEFLAG_FALSE(TransHuge)
 TESTPAGEFLAG_FALSE(TransCompound)
+TESTPAGEFLAG_FALSE(TransCompoundMap)
 TESTPAGEFLAG_FALSE(TransTail)
 TESTPAGEFLAG_FALSE(DoubleMap)
 	TESTSETFLAG_FALSE(DoubleMap)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 1ebd65c..7e1ab15 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -86,21 +86,6 @@
 				(__force unsigned long)mask;
 }
 
-/*
- * The page cache can be done in larger chunks than
- * one page, because it allows for more efficient
- * throughput (it can then be mapped into user
- * space in smaller chunks for same flexibility).
- *
- * Or rather, it _will_ be done in larger chunks.
- */
-#define PAGE_CACHE_SHIFT	PAGE_SHIFT
-#define PAGE_CACHE_SIZE		PAGE_SIZE
-#define PAGE_CACHE_MASK		PAGE_MASK
-#define PAGE_CACHE_ALIGN(addr)	(((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
-
-#define page_cache_get(page)		get_page(page)
-#define page_cache_release(page)	put_page(page)
 void release_pages(struct page **pages, int nr, bool cold);
 
 /*
@@ -390,13 +375,13 @@
 		return page->index << compound_order(page);
 
 	if (likely(!PageTransTail(page)))
-		return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+		return page->index;
 
 	/*
 	 *  We don't initialize ->index for tail pages: calculate based on
 	 *  head page
 	 */
-	pgoff = compound_head(page)->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+	pgoff = compound_head(page)->index;
 	pgoff += page - compound_head(page);
 	return pgoff;
 }
@@ -406,12 +391,12 @@
  */
 static inline loff_t page_offset(struct page *page)
 {
-	return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
+	return ((loff_t)page->index) << PAGE_SHIFT;
 }
 
 static inline loff_t page_file_offset(struct page *page)
 {
-	return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT;
+	return ((loff_t)page_file_index(page)) << PAGE_SHIFT;
 }
 
 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
@@ -425,7 +410,7 @@
 		return linear_hugepage_index(vma, address);
 	pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
 	pgoff += vma->vm_pgoff;
-	return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+	return pgoff;
 }
 
 extern void __lock_page(struct page *page);
@@ -535,8 +520,7 @@
 /*
  * Fault a userspace page into pagetables.  Return non-zero on a fault.
  *
- * This assumes that two userspace pages are always sufficient.  That's
- * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
+ * This assumes that two userspace pages are always sufficient.
  */
 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
 {
@@ -671,8 +655,8 @@
 
 static inline unsigned long dir_pages(struct inode *inode)
 {
-	return (unsigned long)(inode->i_size + PAGE_CACHE_SIZE - 1) >>
-			       PAGE_CACHE_SHIFT;
+	return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
+			       PAGE_SHIFT;
 }
 
 #endif /* _LINUX_PAGEMAP_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 004b813..b67e4df 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -166,8 +166,6 @@
 	PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2),
 	/* Flag for quirk use to store if quirk-specific ACS is enabled */
 	PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3),
-	/* Flag to indicate the device uses dma_alias_devfn */
-	PCI_DEV_FLAGS_DMA_ALIAS_DEVFN = (__force pci_dev_flags_t) (1 << 4),
 	/* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
 	PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
 	/* Do not use bus resets for device */
@@ -273,7 +271,7 @@
 	u8		rom_base_reg;	/* which config register controls the ROM */
 	u8		pin;		/* which interrupt pin this device uses */
 	u16		pcie_flags_reg;	/* cached PCIe Capabilities Register */
-	u8		dma_alias_devfn;/* devfn of DMA alias, if any */
+	unsigned long	*dma_alias_mask;/* mask of enabled devfn aliases */
 
 	struct pci_driver *driver;	/* which driver has allocated this device */
 	u64		dma_mask;	/* Mask of the bits of bus address this
@@ -1111,6 +1109,7 @@
 /* Vital product data routines */
 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
+int pci_set_vpd_size(struct pci_dev *dev, size_t len);
 
 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
@@ -1164,6 +1163,9 @@
 			void *alignf_data);
 
 
+int pci_register_io_range(phys_addr_t addr, resource_size_t size);
+unsigned long pci_address_to_pio(phys_addr_t addr);
+phys_addr_t pci_pio_to_address(unsigned long pio);
 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
 
 static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
@@ -1480,6 +1482,8 @@
 { return -EIO; }
 static inline void pci_release_regions(struct pci_dev *dev) { }
 
+static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
+
 static inline void pci_block_cfg_access(struct pci_dev *dev) { }
 static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev)
 { return 0; }
@@ -1663,7 +1667,7 @@
 #ifdef CONFIG_PCI_QUIRKS
 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
 int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
-void pci_dev_specific_enable_acs(struct pci_dev *dev);
+int pci_dev_specific_enable_acs(struct pci_dev *dev);
 #else
 static inline void pci_fixup_device(enum pci_fixup_pass pass,
 				    struct pci_dev *dev) { }
@@ -1672,7 +1676,10 @@
 {
 	return -ENOTTY;
 }
-static inline void pci_dev_specific_enable_acs(struct pci_dev *dev) { }
+static inline int pci_dev_specific_enable_acs(struct pci_dev *dev)
+{
+	return -ENOTTY;
+}
 #endif
 
 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
@@ -1988,6 +1995,8 @@
 }
 #endif
 
+void pci_add_dma_alias(struct pci_dev *dev, u8 devfn);
+bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
 int pci_for_each_dma_alias(struct pci_dev *pdev,
 			   int (*fn)(struct pci_dev *pdev,
 				     u16 alias, void *data), void *data);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 247da8c..c58752f 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2604,6 +2604,24 @@
 #define PCI_DEVICE_ID_INTEL_82441	0x1237
 #define PCI_DEVICE_ID_INTEL_82380FB	0x124b
 #define PCI_DEVICE_ID_INTEL_82439	0x1250
+#define PCI_DEVICE_ID_INTEL_LIGHT_RIDGE             0x1513 /* Tbt 1 Gen 1 */
+#define PCI_DEVICE_ID_INTEL_EAGLE_RIDGE             0x151a
+#define PCI_DEVICE_ID_INTEL_LIGHT_PEAK              0x151b
+#define PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C         0x1547 /* Tbt 1 Gen 2 */
+#define PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C         0x1548
+#define PCI_DEVICE_ID_INTEL_PORT_RIDGE              0x1549
+#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_NHI    0x1566 /* Tbt 1 Gen 3 */
+#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE 0x1567
+#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_NHI    0x1568
+#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE 0x1569
+#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI     0x156a /* Thunderbolt 2 */
+#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE  0x156b
+#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI     0x156c
+#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE  0x156d
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI     0x1575 /* Thunderbolt 3 */
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE  0x1576
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI     0x1577
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE  0x1578
 #define PCI_DEVICE_ID_INTEL_80960_RP	0x1960
 #define PCI_DEVICE_ID_INTEL_82840_HB	0x1a21
 #define PCI_DEVICE_ID_INTEL_82845_HB	0x1a30
diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h
index 4f1089f..afcd130 100644
--- a/include/linux/pcieport_if.h
+++ b/include/linux/pcieport_if.h
@@ -21,6 +21,8 @@
 #define PCIE_PORT_SERVICE_HP		(1 << PCIE_PORT_SERVICE_HP_SHIFT)
 #define PCIE_PORT_SERVICE_VC_SHIFT	3	/* Virtual Channel */
 #define PCIE_PORT_SERVICE_VC		(1 << PCIE_PORT_SERVICE_VC_SHIFT)
+#define PCIE_PORT_SERVICE_DPC_SHIFT	4	/* Downstream Port Containment */
+#define PCIE_PORT_SERVICE_DPC		(1 << PCIE_PORT_SERVICE_DPC_SHIFT)
 
 struct pcie_device {
 	int		irq;	    /* Service IRQ/MSI/MSI-X Vector */
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 4196c90..d28ac05 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -105,6 +105,8 @@
 	struct mutex	reserve_mutex;
 	u64		max_period;
 	bool		secure_access; /* 32-bit ARM only */
+#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
+	DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
 	struct platform_device	*plat_device;
 	struct pmu_hw_events	__percpu *hw_events;
 	struct notifier_block	hotplug_nb;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index f291275..44f3383 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -58,7 +58,7 @@
 
 struct perf_callchain_entry {
 	__u64				nr;
-	__u64				ip[PERF_MAX_STACK_DEPTH];
+	__u64				ip[0]; /* /proc/sys/kernel/perf_event_max_stack */
 };
 
 struct perf_raw_record {
@@ -151,6 +151,15 @@
 	 */
 	struct task_struct		*target;
 
+	/*
+	 * PMU would store hardware filter configuration
+	 * here.
+	 */
+	void				*addr_filters;
+
+	/* Last sync'ed generation of filters */
+	unsigned long			addr_filters_gen;
+
 /*
  * hw_perf_event::state flags; used to track the PERF_EF_* state.
  */
@@ -216,6 +225,7 @@
 #define PERF_PMU_CAP_AUX_SW_DOUBLEBUF		0x08
 #define PERF_PMU_CAP_EXCLUSIVE			0x10
 #define PERF_PMU_CAP_ITRACE			0x20
+#define PERF_PMU_CAP_HETEROGENEOUS_CPUS		0x40
 
 /**
  * struct pmu - generic performance monitoring unit
@@ -240,6 +250,9 @@
 	int				task_ctx_nr;
 	int				hrtimer_interval_ms;
 
+	/* number of address filters this PMU can do */
+	unsigned int			nr_addr_filters;
+
 	/*
 	 * Fully disable/enable this PMU, can be used to protect from the PMI
 	 * as well as for lazy/batch writing of the MSRs.
@@ -393,12 +406,71 @@
 	void (*free_aux)		(void *aux); /* optional */
 
 	/*
+	 * Validate address range filters: make sure the HW supports the
+	 * requested configuration and number of filters; return 0 if the
+	 * supplied filters are valid, -errno otherwise.
+	 *
+	 * Runs in the context of the ioctl()ing process and is not serialized
+	 * with the rest of the PMU callbacks.
+	 */
+	int (*addr_filters_validate)	(struct list_head *filters);
+					/* optional */
+
+	/*
+	 * Synchronize address range filter configuration:
+	 * translate hw-agnostic filters into hardware configuration in
+	 * event::hw::addr_filters.
+	 *
+	 * Runs as a part of filter sync sequence that is done in ->start()
+	 * callback by calling perf_event_addr_filters_sync().
+	 *
+	 * May (and should) traverse event::addr_filters::list, for which its
+	 * caller provides necessary serialization.
+	 */
+	void (*addr_filters_sync)	(struct perf_event *event);
+					/* optional */
+
+	/*
 	 * Filter events for PMU-specific reasons.
 	 */
 	int (*filter_match)		(struct perf_event *event); /* optional */
 };
 
 /**
+ * struct perf_addr_filter - address range filter definition
+ * @entry:	event's filter list linkage
+ * @inode:	object file's inode for file-based filters
+ * @offset:	filter range offset
+ * @size:	filter range size
+ * @range:	1: range, 0: address
+ * @filter:	1: filter/start, 0: stop
+ *
+ * This is a hardware-agnostic filter configuration as specified by the user.
+ */
+struct perf_addr_filter {
+	struct list_head	entry;
+	struct inode		*inode;
+	unsigned long		offset;
+	unsigned long		size;
+	unsigned int		range	: 1,
+				filter	: 1;
+};
+
+/**
+ * struct perf_addr_filters_head - container for address range filters
+ * @list:	list of filters for this event
+ * @lock:	spinlock that serializes accesses to the @list and event's
+ *		(and its children's) filter generations.
+ *
+ * A child event will use parent's @list (and therefore @lock), so they are
+ * bundled together; see perf_event_addr_filters().
+ */
+struct perf_addr_filters_head {
+	struct list_head	list;
+	raw_spinlock_t		lock;
+};
+
+/**
  * enum perf_event_active_state - the states of a event
  */
 enum perf_event_active_state {
@@ -566,6 +638,12 @@
 
 	atomic_t			event_limit;
 
+	/* address range filters */
+	struct perf_addr_filters_head	addr_filters;
+	/* vma address array for file-based filders */
+	unsigned long			*addr_filters_offs;
+	unsigned long			addr_filters_gen;
+
 	void (*destroy)(struct perf_event *);
 	struct rcu_head			rcu_head;
 
@@ -834,9 +912,25 @@
 				 struct perf_sample_data *data,
 				 struct pt_regs *regs);
 
+extern void perf_event_output_forward(struct perf_event *event,
+				     struct perf_sample_data *data,
+				     struct pt_regs *regs);
+extern void perf_event_output_backward(struct perf_event *event,
+				       struct perf_sample_data *data,
+				       struct pt_regs *regs);
 extern void perf_event_output(struct perf_event *event,
-				struct perf_sample_data *data,
-				struct pt_regs *regs);
+			      struct perf_sample_data *data,
+			      struct pt_regs *regs);
+
+static inline bool
+is_default_overflow_handler(struct perf_event *event)
+{
+	if (likely(event->overflow_handler == perf_event_output_forward))
+		return true;
+	if (unlikely(event->overflow_handler == perf_event_output_backward))
+		return true;
+	return false;
+}
 
 extern void
 perf_event_header__init_id(struct perf_event_header *header,
@@ -882,8 +976,6 @@
  */
 static inline void perf_fetch_caller_regs(struct pt_regs *regs)
 {
-	memset(regs, 0, sizeof(*regs));
-
 	perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
 }
 
@@ -977,9 +1069,11 @@
 extern int get_callchain_buffers(void);
 extern void put_callchain_buffers(void);
 
+extern int sysctl_perf_event_max_stack;
+
 static inline int perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
 {
-	if (entry->nr < PERF_MAX_STACK_DEPTH) {
+	if (entry->nr < sysctl_perf_event_max_stack) {
 		entry->ip[entry->nr++] = ip;
 		return 0;
 	} else {
@@ -1001,6 +1095,8 @@
 		void __user *buffer, size_t *lenp,
 		loff_t *ppos);
 
+int perf_event_max_stack_handler(struct ctl_table *table, int write,
+				 void __user *buffer, size_t *lenp, loff_t *ppos);
 
 static inline bool perf_paranoid_tracepoint_raw(void)
 {
@@ -1018,7 +1114,7 @@
 }
 
 extern void perf_event_init(void);
-extern void perf_tp_event(u64 addr, u64 count, void *record,
+extern void perf_tp_event(u16 event_type, u64 count, void *record,
 			  int entry_size, struct pt_regs *regs,
 			  struct hlist_head *head, int rctx,
 			  struct task_struct *task);
@@ -1045,8 +1141,41 @@
 	return event->pmu->setup_aux;
 }
 
+static inline bool is_write_backward(struct perf_event *event)
+{
+	return !!event->attr.write_backward;
+}
+
+static inline bool has_addr_filter(struct perf_event *event)
+{
+	return event->pmu->nr_addr_filters;
+}
+
+/*
+ * An inherited event uses parent's filters
+ */
+static inline struct perf_addr_filters_head *
+perf_event_addr_filters(struct perf_event *event)
+{
+	struct perf_addr_filters_head *ifh = &event->addr_filters;
+
+	if (event->parent)
+		ifh = &event->parent->addr_filters;
+
+	return ifh;
+}
+
+extern void perf_event_addr_filters_sync(struct perf_event *event);
+
 extern int perf_output_begin(struct perf_output_handle *handle,
 			     struct perf_event *event, unsigned int size);
+extern int perf_output_begin_forward(struct perf_output_handle *handle,
+				    struct perf_event *event,
+				    unsigned int size);
+extern int perf_output_begin_backward(struct perf_output_handle *handle,
+				      struct perf_event *event,
+				      unsigned int size);
+
 extern void perf_output_end(struct perf_output_handle *handle);
 extern unsigned int perf_output_copy(struct perf_output_handle *handle,
 			     const void *buf, unsigned int len);
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 2abd791..2d24b28 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -805,6 +805,10 @@
 void phy_stop_machine(struct phy_device *phydev);
 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
+int phy_ethtool_ksettings_get(struct phy_device *phydev,
+			      struct ethtool_link_ksettings *cmd);
+int phy_ethtool_ksettings_set(struct phy_device *phydev,
+			      const struct ethtool_link_ksettings *cmd);
 int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
 int phy_start_interrupts(struct phy_device *phydev);
 void phy_print_status(struct phy_device *phydev);
@@ -825,6 +829,10 @@
 int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol);
 void phy_ethtool_get_wol(struct phy_device *phydev,
 			 struct ethtool_wolinfo *wol);
+int phy_ethtool_get_link_ksettings(struct net_device *ndev,
+				   struct ethtool_link_ksettings *cmd);
+int phy_ethtool_set_link_ksettings(struct net_device *ndev,
+				   const struct ethtool_link_ksettings *cmd);
 
 int __init mdio_bus_init(void);
 void mdio_bus_exit(void);
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 8cf05e3..a810f2a 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -77,6 +77,7 @@
  */
 struct phy_provider {
 	struct device		*dev;
+	struct device_node	*children;
 	struct module		*owner;
 	struct list_head	list;
 	struct phy * (*of_xlate)(struct device *dev,
@@ -93,10 +94,16 @@
 #define	to_phy(a)	(container_of((a), struct phy, dev))
 
 #define	of_phy_provider_register(dev, xlate)	\
-	__of_phy_provider_register((dev), THIS_MODULE, (xlate))
+	__of_phy_provider_register((dev), NULL, THIS_MODULE, (xlate))
 
 #define	devm_of_phy_provider_register(dev, xlate)	\
-	__devm_of_phy_provider_register((dev), THIS_MODULE, (xlate))
+	__devm_of_phy_provider_register((dev), NULL, THIS_MODULE, (xlate))
+
+#define of_phy_provider_register_full(dev, children, xlate) \
+	__of_phy_provider_register(dev, children, THIS_MODULE, xlate)
+
+#define devm_of_phy_provider_register_full(dev, children, xlate) \
+	__devm_of_phy_provider_register(dev, children, THIS_MODULE, xlate)
 
 static inline void phy_set_drvdata(struct phy *phy, void *data)
 {
@@ -147,11 +154,13 @@
 void phy_destroy(struct phy *phy);
 void devm_phy_destroy(struct device *dev, struct phy *phy);
 struct phy_provider *__of_phy_provider_register(struct device *dev,
-	struct module *owner, struct phy * (*of_xlate)(struct device *dev,
-	struct of_phandle_args *args));
+	struct device_node *children, struct module *owner,
+	struct phy * (*of_xlate)(struct device *dev,
+				 struct of_phandle_args *args));
 struct phy_provider *__devm_of_phy_provider_register(struct device *dev,
-	struct module *owner, struct phy * (*of_xlate)(struct device *dev,
-	struct of_phandle_args *args));
+	struct device_node *children, struct module *owner,
+	struct phy * (*of_xlate)(struct device *dev,
+				 struct of_phandle_args *args));
 void of_phy_provider_unregister(struct phy_provider *phy_provider);
 void devm_of_phy_provider_unregister(struct device *dev,
 	struct phy_provider *phy_provider);
@@ -312,15 +321,17 @@
 }
 
 static inline struct phy_provider *__of_phy_provider_register(
-	struct device *dev, struct module *owner, struct phy * (*of_xlate)(
-	struct device *dev, struct of_phandle_args *args))
+	struct device *dev, struct device_node *children, struct module *owner,
+	struct phy * (*of_xlate)(struct device *dev,
+				 struct of_phandle_args *args))
 {
 	return ERR_PTR(-ENOSYS);
 }
 
 static inline struct phy_provider *__devm_of_phy_provider_register(struct device
-	*dev, struct module *owner, struct phy * (*of_xlate)(struct device *dev,
-	struct of_phandle_args *args))
+	*dev, struct device_node *children, struct module *owner,
+	struct phy * (*of_xlate)(struct device *dev,
+				 struct of_phandle_args *args))
 {
 	return ERR_PTR(-ENOSYS);
 }
diff --git a/include/linux/phy/tegra/xusb.h b/include/linux/phy/tegra/xusb.h
new file mode 100644
index 0000000..8e1a57a
--- /dev/null
+++ b/include/linux/phy/tegra/xusb.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef PHY_TEGRA_XUSB_H
+#define PHY_TEGRA_XUSB_H
+
+struct tegra_xusb_padctl;
+struct device;
+
+struct tegra_xusb_padctl *tegra_xusb_padctl_get(struct device *dev);
+void tegra_xusb_padctl_put(struct tegra_xusb_padctl *padctl);
+
+int tegra_xusb_padctl_usb3_save_context(struct tegra_xusb_padctl *padctl,
+					unsigned int port);
+int tegra_xusb_padctl_hsic_set_idle(struct tegra_xusb_padctl *padctl,
+				    unsigned int port, bool idle);
+int tegra_xusb_padctl_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl,
+					   unsigned int port, bool enable);
+
+#endif /* PHY_TEGRA_XUSB_H */
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 9ba59fc..a42e57d 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -144,6 +144,12 @@
 extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
 				struct device *dev, void *driver_data);
 extern void pinctrl_unregister(struct pinctrl_dev *pctldev);
+extern struct pinctrl_dev *devm_pinctrl_register(struct device *dev,
+				struct pinctrl_desc *pctldesc,
+				void *driver_data);
+extern void devm_pinctrl_unregister(struct device *dev,
+				struct pinctrl_dev *pctldev);
+
 extern bool pin_is_valid(struct pinctrl_dev *pctldev, int pin);
 extern void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev,
 				struct pinctrl_gpio_range *range);
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
index 03b6095..d15d8ba 100644
--- a/include/linux/platform_data/dma-dw.h
+++ b/include/linux/platform_data/dma-dw.h
@@ -21,15 +21,15 @@
  * @dma_dev:	required DMA master device
  * @src_id:	src request line
  * @dst_id:	dst request line
- * @src_master: src master for transfers on allocated channel.
- * @dst_master: dest master for transfers on allocated channel.
+ * @m_master:	memory master for transfers on allocated channel
+ * @p_master:	peripheral master for transfers on allocated channel
  */
 struct dw_dma_slave {
 	struct device		*dma_dev;
 	u8			src_id;
 	u8			dst_id;
-	u8			src_master;
-	u8			dst_master;
+	u8			m_master;
+	u8			p_master;
 };
 
 /**
@@ -43,7 +43,7 @@
  * @block_size: Maximum block size supported by the controller
  * @nr_masters: Number of AHB masters supported by the controller
  * @data_width: Maximum data width supported by hardware per AHB master
- *		(0 - 8bits, 1 - 16bits, ..., 5 - 256bits)
+ *		(in bytes, power of 2)
  */
 struct dw_dma_platform_data {
 	unsigned int	nr_channels;
@@ -55,7 +55,7 @@
 #define CHAN_PRIORITY_ASCENDING		0	/* chan0 highest */
 #define CHAN_PRIORITY_DESCENDING	1	/* chan7 highest */
 	unsigned char	chan_priority;
-	unsigned short	block_size;
+	unsigned int	block_size;
 	unsigned char	nr_masters;
 	unsigned char	data_width[DW_DMA_MAX_NR_MASTERS];
 };
diff --git a/include/linux/platform_data/gpio-dwapb.h b/include/linux/platform_data/gpio-dwapb.h
index 28702c8..2dc7f4a 100644
--- a/include/linux/platform_data/gpio-dwapb.h
+++ b/include/linux/platform_data/gpio-dwapb.h
@@ -15,8 +15,7 @@
 #define GPIO_DW_APB_H
 
 struct dwapb_port_property {
-	struct device_node *node;
-	const char	*name;
+	struct fwnode_handle *fwnode;
 	unsigned int	idx;
 	unsigned int	ngpio;
 	unsigned int	gpio_base;
diff --git a/include/linux/platform_data/media/ir-rx51.h b/include/linux/platform_data/media/ir-rx51.h
index 104aa89..3038120 100644
--- a/include/linux/platform_data/media/ir-rx51.h
+++ b/include/linux/platform_data/media/ir-rx51.h
@@ -5,6 +5,7 @@
 	int pwm_timer;
 
 	int(*set_max_mpu_wakeup_lat)(struct device *dev, long t);
+	struct pwm_omap_dmtimer_pdata *dmtimer;
 };
 
 #endif
diff --git a/include/linux/platform_data/pwm_omap_dmtimer.h b/include/linux/platform_data/pwm_omap_dmtimer.h
index 5938421..e7d521e 100644
--- a/include/linux/platform_data/pwm_omap_dmtimer.h
+++ b/include/linux/platform_data/pwm_omap_dmtimer.h
@@ -35,6 +35,16 @@
 #ifndef __PWM_OMAP_DMTIMER_PDATA_H
 #define __PWM_OMAP_DMTIMER_PDATA_H
 
+/* clock sources */
+#define PWM_OMAP_DMTIMER_SRC_SYS_CLK			0x00
+#define PWM_OMAP_DMTIMER_SRC_32_KHZ			0x01
+#define PWM_OMAP_DMTIMER_SRC_EXT_CLK			0x02
+
+/* timer interrupt enable bits */
+#define PWM_OMAP_DMTIMER_INT_CAPTURE			(1 << 2)
+#define PWM_OMAP_DMTIMER_INT_OVERFLOW			(1 << 1)
+#define PWM_OMAP_DMTIMER_INT_MATCH			(1 << 0)
+
 /* trigger types */
 #define PWM_OMAP_DMTIMER_TRIGGER_NONE			0x00
 #define PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW		0x01
@@ -45,15 +55,23 @@
 
 struct pwm_omap_dmtimer_pdata {
 	pwm_omap_dmtimer *(*request_by_node)(struct device_node *np);
+	pwm_omap_dmtimer *(*request_specific)(int timer_id);
+	pwm_omap_dmtimer *(*request)(void);
+
 	int	(*free)(pwm_omap_dmtimer *timer);
 
 	void	(*enable)(pwm_omap_dmtimer *timer);
 	void	(*disable)(pwm_omap_dmtimer *timer);
 
+	int	(*get_irq)(pwm_omap_dmtimer *timer);
+	int	(*set_int_enable)(pwm_omap_dmtimer *timer, unsigned int value);
+	int	(*set_int_disable)(pwm_omap_dmtimer *timer, u32 mask);
+
 	struct clk *(*get_fclk)(pwm_omap_dmtimer *timer);
 
 	int	(*start)(pwm_omap_dmtimer *timer);
 	int	(*stop)(pwm_omap_dmtimer *timer);
+	int	(*set_source)(pwm_omap_dmtimer *timer, int source);
 
 	int	(*set_load)(pwm_omap_dmtimer *timer, int autoreload,
 			unsigned int value);
@@ -63,7 +81,10 @@
 			int toggle, int trigger);
 	int	(*set_prescaler)(pwm_omap_dmtimer *timer, int prescaler);
 
+	unsigned int (*read_counter)(pwm_omap_dmtimer *timer);
 	int	(*write_counter)(pwm_omap_dmtimer *timer, unsigned int value);
+	unsigned int (*read_status)(pwm_omap_dmtimer *timer);
+	int	(*write_status)(pwm_omap_dmtimer *timer, unsigned int value);
 };
 
 #endif /* __PWM_OMAP_DMTIMER_PDATA_H */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 03b7555..98c2a7c 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -18,7 +18,7 @@
 #define PLATFORM_DEVID_AUTO	(-2)
 
 struct mfd_cell;
-struct property_set;
+struct property_entry;
 
 struct platform_device {
 	const char	*name;
@@ -73,7 +73,7 @@
 		size_t size_data;
 		u64 dma_mask;
 
-		const struct property_set *pset;
+		struct property_entry *properties;
 };
 extern struct platform_device *platform_device_register_full(
 		const struct platform_device_info *pdevinfo);
@@ -172,7 +172,7 @@
 extern int platform_device_add_data(struct platform_device *pdev,
 				    const void *data, size_t size);
 extern int platform_device_add_properties(struct platform_device *pdev,
-					  const struct property_set *pset);
+					  struct property_entry *properties);
 extern int platform_device_add(struct platform_device *pdev);
 extern void platform_device_del(struct platform_device *pdev);
 extern void platform_device_put(struct platform_device *pdev);
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 6a5d654..06eb353 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -563,7 +563,6 @@
 	bool			is_suspended:1;	/* Ditto */
 	bool			is_noirq_suspended:1;
 	bool			is_late_suspended:1;
-	bool			ignore_children:1;
 	bool			early_init:1;	/* Owned by the PM core */
 	bool			direct_complete:1;	/* Owned by the PM core */
 	spinlock_t		lock;
@@ -591,6 +590,7 @@
 	unsigned int		deferred_resume:1;
 	unsigned int		run_wake:1;
 	unsigned int		runtime_auto:1;
+	bool			ignore_children:1;
 	unsigned int		no_callbacks:1;
 	unsigned int		irq_safe:1;
 	unsigned int		use_autosuspend:1;
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 49cd889..39285c7 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -28,14 +28,12 @@
 
 struct dev_power_governor {
 	bool (*power_down_ok)(struct dev_pm_domain *domain);
-	bool (*stop_ok)(struct device *dev);
+	bool (*suspend_ok)(struct device *dev);
 };
 
 struct gpd_dev_ops {
 	int (*start)(struct device *dev);
 	int (*stop)(struct device *dev);
-	int (*save_state)(struct device *dev);
-	int (*restore_state)(struct device *dev);
 	bool (*active_wakeup)(struct device *dev);
 };
 
@@ -94,7 +92,7 @@
 	s64 resume_latency_ns;
 	s64 effective_constraint_ns;
 	bool constraint_changed;
-	bool cached_stop_ok;
+	bool cached_suspend_ok;
 };
 
 struct pm_domain_data {
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index cccaf4a..bca2615 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -65,6 +65,10 @@
 int dev_pm_opp_set_regulator(struct device *dev, const char *name);
 void dev_pm_opp_put_regulator(struct device *dev);
 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
+int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask);
+int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
+void dev_pm_opp_remove_table(struct device *dev);
+void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask);
 #else
 static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
 {
@@ -109,25 +113,25 @@
 static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
 					unsigned long freq, bool available)
 {
-	return ERR_PTR(-EINVAL);
+	return ERR_PTR(-ENOTSUPP);
 }
 
 static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
 					unsigned long *freq)
 {
-	return ERR_PTR(-EINVAL);
+	return ERR_PTR(-ENOTSUPP);
 }
 
 static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
 					unsigned long *freq)
 {
-	return ERR_PTR(-EINVAL);
+	return ERR_PTR(-ENOTSUPP);
 }
 
 static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
 					unsigned long u_volt)
 {
-	return -EINVAL;
+	return -ENOTSUPP;
 }
 
 static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq)
@@ -147,73 +151,85 @@
 static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
 							struct device *dev)
 {
-	return ERR_PTR(-EINVAL);
+	return ERR_PTR(-ENOTSUPP);
 }
 
 static inline int dev_pm_opp_set_supported_hw(struct device *dev,
 					      const u32 *versions,
 					      unsigned int count)
 {
-	return -EINVAL;
+	return -ENOTSUPP;
 }
 
 static inline void dev_pm_opp_put_supported_hw(struct device *dev) {}
 
 static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
 {
-	return -EINVAL;
+	return -ENOTSUPP;
 }
 
 static inline void dev_pm_opp_put_prop_name(struct device *dev) {}
 
 static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name)
 {
-	return -EINVAL;
+	return -ENOTSUPP;
 }
 
 static inline void dev_pm_opp_put_regulator(struct device *dev) {}
 
 static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
 {
+	return -ENOTSUPP;
+}
+
+static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask)
+{
+	return -ENOTSUPP;
+}
+
+static inline int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
+{
 	return -EINVAL;
 }
 
+static inline void dev_pm_opp_remove_table(struct device *dev)
+{
+}
+
+static inline void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask)
+{
+}
+
 #endif		/* CONFIG_PM_OPP */
 
 #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
 int dev_pm_opp_of_add_table(struct device *dev);
 void dev_pm_opp_of_remove_table(struct device *dev);
-int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask);
-void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask);
-int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask);
-int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask);
+int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask);
+void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask);
+int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
 #else
 static inline int dev_pm_opp_of_add_table(struct device *dev)
 {
-	return -EINVAL;
+	return -ENOTSUPP;
 }
 
 static inline void dev_pm_opp_of_remove_table(struct device *dev)
 {
 }
 
-static inline int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask)
+static inline int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
 {
-	return -ENOSYS;
+	return -ENOTSUPP;
 }
 
-static inline void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask)
+static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
 {
 }
 
-static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
+static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
 {
-	return -ENOSYS;
-}
-
-static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
-{
-	return -ENOSYS;
+	return -ENOTSUPP;
 }
 #endif
 
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 7af093d..2e14d26 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -56,6 +56,11 @@
 						 s64 delta_ns);
 extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
 
+static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
+{
+	dev->power.ignore_children = enable;
+}
+
 static inline bool pm_children_suspended(struct device *dev)
 {
 	return dev->power.ignore_children
@@ -156,6 +161,7 @@
 static inline void pm_runtime_allow(struct device *dev) {}
 static inline void pm_runtime_forbid(struct device *dev) {}
 
+static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
 static inline bool pm_children_suspended(struct device *dev) { return false; }
 static inline void pm_runtime_get_noresume(struct device *dev) {}
 static inline void pm_runtime_put_noidle(struct device *dev) {}
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index 3ec5309..57d146f 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -42,6 +42,13 @@
 	BUG();
 }
 
+static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src,
+		size_t n)
+{
+	BUG();
+	return -EFAULT;
+}
+
 static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
 		struct iov_iter *i)
 {
@@ -65,22 +72,35 @@
 }
 #endif
 
-/*
- * Architectures that define ARCH_HAS_PMEM_API must provide
- * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(),
- * arch_copy_from_iter_pmem(), arch_clear_pmem(), arch_wb_cache_pmem()
- * and arch_has_wmb_pmem().
- */
-static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size)
-{
-	memcpy(dst, (void __force const *) src, size);
-}
-
 static inline bool arch_has_pmem_api(void)
 {
 	return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
 }
 
+static inline int default_memcpy_from_pmem(void *dst, void __pmem const *src,
+		size_t size)
+{
+	memcpy(dst, (void __force *) src, size);
+	return 0;
+}
+
+/*
+ * memcpy_from_pmem - read from persistent memory with error handling
+ * @dst: destination buffer
+ * @src: source buffer
+ * @size: transfer length
+ *
+ * Returns 0 on success negative error code on failure.
+ */
+static inline int memcpy_from_pmem(void *dst, void __pmem const *src,
+		size_t size)
+{
+	if (arch_has_pmem_api())
+		return arch_memcpy_from_pmem(dst, src, size);
+	else
+		return default_memcpy_from_pmem(dst, src, size);
+}
+
 /**
  * arch_has_wmb_pmem - true if wmb_pmem() ensures durability
  *
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 5df733b..2588ca6 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -337,9 +337,11 @@
 
 #ifdef CONFIG_PNPBIOS
 extern struct pnp_protocol pnpbios_protocol;
+extern bool arch_pnpbios_disabled(void);
 #define pnp_device_is_pnpbios(dev) ((dev)->protocol == (&pnpbios_protocol))
 #else
 #define pnp_device_is_pnpbios(dev) 0
+#define arch_pnpbios_disabled()	false
 #endif
 
 #ifdef CONFIG_PNPACPI
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index 3e96a6a..5b5a80c 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -99,7 +99,6 @@
 extern int simple_set_acl(struct inode *, struct posix_acl *, int);
 extern int simple_acl_create(struct inode *, struct inode *);
 
-struct posix_acl **acl_by_type(struct inode *inode, int type);
 struct posix_acl *get_cached_acl(struct inode *inode, int type);
 struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type);
 void set_cached_acl(struct inode *inode, int type, struct posix_acl *acl);
diff --git a/include/linux/property.h b/include/linux/property.h
index b51fcd36..ecab11e 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -238,18 +238,9 @@
 	.name = _name_,				\
 }
 
-/**
- * struct property_set - Collection of "built-in" device properties.
- * @fwnode: Handle to be pointed to by the fwnode field of struct device.
- * @properties: Array of properties terminated with a null entry.
- */
-struct property_set {
-	struct fwnode_handle fwnode;
-	struct property_entry *properties;
-};
-
-int device_add_property_set(struct device *dev, const struct property_set *pset);
-void device_remove_property_set(struct device *dev);
+int device_add_properties(struct device *dev,
+			  struct property_entry *properties);
+void device_remove_properties(struct device *dev);
 
 bool device_dma_supported(struct device *dev);
 
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
deleted file mode 100644
index 2122133..0000000
--- a/include/linux/proportions.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * FLoating proportions
- *
- *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
- *
- * This file contains the public data structure and API definitions.
- */
-
-#ifndef _LINUX_PROPORTIONS_H
-#define _LINUX_PROPORTIONS_H
-
-#include <linux/percpu_counter.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-#include <linux/gfp.h>
-
-struct prop_global {
-	/*
-	 * The period over which we differentiate
-	 *
-	 *   period = 2^shift
-	 */
-	int shift;
-	/*
-	 * The total event counter aka 'time'.
-	 *
-	 * Treated as an unsigned long; the lower 'shift - 1' bits are the
-	 * counter bits, the remaining upper bits the period counter.
-	 */
-	struct percpu_counter events;
-};
-
-/*
- * global proportion descriptor
- *
- * this is needed to consistently flip prop_global structures.
- */
-struct prop_descriptor {
-	int index;
-	struct prop_global pg[2];
-	struct mutex mutex;		/* serialize the prop_global switch */
-};
-
-int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp);
-void prop_change_shift(struct prop_descriptor *pd, int new_shift);
-
-/*
- * ----- PERCPU ------
- */
-
-struct prop_local_percpu {
-	/*
-	 * the local events counter
-	 */
-	struct percpu_counter events;
-
-	/*
-	 * snapshot of the last seen global state
-	 */
-	int shift;
-	unsigned long period;
-	raw_spinlock_t lock;		/* protect the snapshot state */
-};
-
-int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp);
-void prop_local_destroy_percpu(struct prop_local_percpu *pl);
-void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
-void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
-		long *numerator, long *denominator);
-
-static inline
-void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
-{
-	unsigned long flags;
-
-	local_irq_save(flags);
-	__prop_inc_percpu(pd, pl);
-	local_irq_restore(flags);
-}
-
-/*
- * Limit the time part in order to ensure there are some bits left for the
- * cycle counter and fraction multiply.
- */
-#if BITS_PER_LONG == 32
-#define PROP_MAX_SHIFT (3*BITS_PER_LONG/4)
-#else
-#define PROP_MAX_SHIFT (BITS_PER_LONG/2)
-#endif
-
-#define PROP_FRAC_SHIFT		(BITS_PER_LONG - PROP_MAX_SHIFT - 1)
-#define PROP_FRAC_BASE		(1UL << PROP_FRAC_SHIFT)
-
-void __prop_inc_percpu_max(struct prop_descriptor *pd,
-			   struct prop_local_percpu *pl, long frac);
-
-
-/*
- * ----- SINGLE ------
- */
-
-struct prop_local_single {
-	/*
-	 * the local events counter
-	 */
-	unsigned long events;
-
-	/*
-	 * snapshot of the last seen global state
-	 * and a lock protecting this state
-	 */
-	unsigned long period;
-	int shift;
-	raw_spinlock_t lock;		/* protect the snapshot state */
-};
-
-#define INIT_PROP_LOCAL_SINGLE(name)			\
-{	.lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock),	\
-}
-
-int prop_local_init_single(struct prop_local_single *pl);
-void prop_local_destroy_single(struct prop_local_single *pl);
-void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl);
-void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl,
-		long *numerator, long *denominator);
-
-static inline
-void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl)
-{
-	unsigned long flags;
-
-	local_irq_save(flags);
-	__prop_inc_single(pd, pl);
-	local_irq_restore(flags);
-}
-
-#endif /* _LINUX_PROPORTIONS_H */
diff --git a/include/linux/psci.h b/include/linux/psci.h
index 393efe2..bdea1cb 100644
--- a/include/linux/psci.h
+++ b/include/linux/psci.h
@@ -21,8 +21,6 @@
 #define PSCI_POWER_STATE_TYPE_POWER_DOWN	1
 
 bool psci_tos_resident_on(int cpu);
-bool psci_power_state_loses_context(u32 state);
-bool psci_power_state_is_valid(u32 state);
 
 int psci_cpu_init_idle(unsigned int cpu);
 int psci_cpu_suspend_enter(unsigned long index);
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index cfc3ed4..b78d27c 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -74,6 +74,24 @@
 	PWM_POLARITY_INVERSED,
 };
 
+/**
+ * struct pwm_args - board-dependent PWM arguments
+ * @period: reference period
+ * @polarity: reference polarity
+ *
+ * This structure describes board-dependent arguments attached to a PWM
+ * device. These arguments are usually retrieved from the PWM lookup table or
+ * device tree.
+ *
+ * Do not confuse this with the PWM state: PWM arguments represent the initial
+ * configuration that users want to use on this PWM device rather than the
+ * current PWM hardware state.
+ */
+struct pwm_args {
+	unsigned int period;
+	enum pwm_polarity polarity;
+};
+
 enum {
 	PWMF_REQUESTED = 1 << 0,
 	PWMF_ENABLED = 1 << 1,
@@ -92,6 +110,7 @@
  * @period: period of the PWM signal (in nanoseconds)
  * @duty_cycle: duty cycle of the PWM signal (in nanoseconds)
  * @polarity: polarity of the PWM signal
+ * @args: PWM arguments
  */
 struct pwm_device {
 	const char *label;
@@ -105,6 +124,8 @@
 	unsigned int period;
 	unsigned int duty_cycle;
 	enum pwm_polarity polarity;
+
+	struct pwm_args args;
 };
 
 static inline bool pwm_is_enabled(const struct pwm_device *pwm)
@@ -144,6 +165,18 @@
 	return pwm ? pwm->polarity : PWM_POLARITY_NORMAL;
 }
 
+static inline void pwm_get_args(const struct pwm_device *pwm,
+				struct pwm_args *args)
+{
+	*args = pwm->args;
+}
+
+static inline void pwm_apply_args(struct pwm_device *pwm)
+{
+	pwm_set_period(pwm, pwm->args.period);
+	pwm_set_polarity(pwm, pwm->args.polarity);
+}
+
 /**
  * struct pwm_ops - PWM controller operations
  * @request: optional hook for requesting a PWM
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 53ecb37..3f14c7e 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -285,6 +285,63 @@
 #define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN	12
 #define PXP_ILT_BLOCK_FACTOR_MULTIPLIER	1024
 
+#define PXP_VF_BAR0_START_IGU                   0
+#define PXP_VF_BAR0_IGU_LENGTH                  0x3000
+#define PXP_VF_BAR0_END_IGU                     (PXP_VF_BAR0_START_IGU + \
+						 PXP_VF_BAR0_IGU_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_DQ                    0x3000
+#define PXP_VF_BAR0_DQ_LENGTH                   0x200
+#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET            0
+#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS           (PXP_VF_BAR0_START_DQ +	\
+						 PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
+#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS         (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
+						 + 4)
+#define PXP_VF_BAR0_END_DQ                      (PXP_VF_BAR0_START_DQ +	\
+						 PXP_VF_BAR0_DQ_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_TSDM_ZONE_B           0x3200
+#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B           0x200
+#define PXP_VF_BAR0_END_TSDM_ZONE_B             (PXP_VF_BAR0_START_TSDM_ZONE_B \
+						 +			       \
+						 PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
+						 - 1)
+
+#define PXP_VF_BAR0_START_MSDM_ZONE_B           0x3400
+#define PXP_VF_BAR0_END_MSDM_ZONE_B             (PXP_VF_BAR0_START_MSDM_ZONE_B \
+						 +			       \
+						 PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
+						 - 1)
+
+#define PXP_VF_BAR0_START_USDM_ZONE_B           0x3600
+#define PXP_VF_BAR0_END_USDM_ZONE_B             (PXP_VF_BAR0_START_USDM_ZONE_B \
+						 +			       \
+						 PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
+						 - 1)
+
+#define PXP_VF_BAR0_START_XSDM_ZONE_B           0x3800
+#define PXP_VF_BAR0_END_XSDM_ZONE_B             (PXP_VF_BAR0_START_XSDM_ZONE_B \
+						 +			       \
+						 PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
+						 - 1)
+
+#define PXP_VF_BAR0_START_YSDM_ZONE_B           0x3a00
+#define PXP_VF_BAR0_END_YSDM_ZONE_B             (PXP_VF_BAR0_START_YSDM_ZONE_B \
+						 +			       \
+						 PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
+						 - 1)
+
+#define PXP_VF_BAR0_START_PSDM_ZONE_B           0x3c00
+#define PXP_VF_BAR0_END_PSDM_ZONE_B             (PXP_VF_BAR0_START_PSDM_ZONE_B \
+						 +			       \
+						 PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
+						 - 1)
+
+#define PXP_VF_BAR0_START_SDM_ZONE_A            0x4000
+#define PXP_VF_BAR0_END_SDM_ZONE_A              0x10000
+
+#define PXP_VF_BAR0_GRC_WINDOW_LENGTH           32
+
 /* ILT Records */
 #define PXP_NUM_ILT_RECORDS_BB 7600
 #define PXP_NUM_ILT_RECORDS_K2 11000
@@ -327,9 +384,14 @@
 	__le32	hi;
 };
 
+struct vf_pf_channel_eqe_data {
+	struct regpair msg_addr;
+};
+
 /* Event Data Union */
 union event_ring_data {
 	u8				bytes[8];
+	struct vf_pf_channel_eqe_data	vf_pf_channel;
 	struct async_data		async_info;
 };
 
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index e1d6983..6ae8cb4 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -13,6 +13,7 @@
 #include <linux/if_link.h>
 #include <linux/qed/eth_common.h>
 #include <linux/qed/qed_if.h>
+#include <linux/qed/qed_iov_if.h>
 
 struct qed_dev_eth_info {
 	struct qed_dev_info common;
@@ -27,12 +28,15 @@
 struct qed_update_vport_rss_params {
 	u16	rss_ind_table[128];
 	u32	rss_key[10];
+	u8	rss_caps;
 };
 
 struct qed_update_vport_params {
 	u8 vport_id;
 	u8 update_vport_active_flg;
 	u8 vport_active_flg;
+	u8 update_tx_switching_flg;
+	u8 tx_switching_flg;
 	u8 update_accept_any_vlan_flg;
 	u8 accept_any_vlan;
 	u8 update_rss_flg;
@@ -111,12 +115,23 @@
 	u16 sb_idx;
 };
 
+struct qed_tunn_params {
+	u16 vxlan_port;
+	u8 update_vxlan_port;
+	u16 geneve_port;
+	u8 update_geneve_port;
+};
+
 struct qed_eth_cb_ops {
 	struct qed_common_cb_ops common;
+	void (*force_mac) (void *dev, u8 *mac);
 };
 
 struct qed_eth_ops {
 	const struct qed_common_ops *common;
+#ifdef CONFIG_QED_SRIOV
+	const struct qed_iov_hv_ops *iov;
+#endif
 
 	int (*fill_dev_info)(struct qed_dev *cdev,
 			     struct qed_dev_eth_info *info);
@@ -125,6 +140,8 @@
 			     struct qed_eth_cb_ops *ops,
 			     void *cookie);
 
+	 bool(*check_mac) (struct qed_dev *cdev, u8 *mac);
+
 	int (*vport_start)(struct qed_dev *cdev,
 			   struct qed_start_vport_params *params);
 
@@ -165,9 +182,12 @@
 
 	void (*get_vport_stats)(struct qed_dev *cdev,
 				struct qed_eth_stats *stats);
+
+	int (*tunn_config)(struct qed_dev *cdev,
+			   struct qed_tunn_params *params);
 };
 
-const struct qed_eth_ops *qed_get_eth_ops(u32 version);
+const struct qed_eth_ops *qed_get_eth_ops(void);
 void qed_put_eth_ops(void);
 
 #endif
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 1f7599c7..4c29439 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -25,6 +25,15 @@
 #include <linux/qed/common_hsi.h>
 #include <linux/qed/qed_chain.h>
 
+enum dcbx_protocol_type {
+	DCBX_PROTOCOL_ISCSI,
+	DCBX_PROTOCOL_FCOE,
+	DCBX_PROTOCOL_ROCE,
+	DCBX_PROTOCOL_ROCE_V2,
+	DCBX_PROTOCOL_ETH,
+	DCBX_MAX_PROTOCOL_TYPE
+};
+
 enum qed_led_mode {
 	QED_LED_MODE_OFF,
 	QED_LED_MODE_ON,
@@ -93,6 +102,7 @@
 
 	u32		flash_size;
 	u8		mf_mode;
+	bool		tx_switching;
 };
 
 enum qed_sb_type {
@@ -110,6 +120,7 @@
 #define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS      BIT(1)
 #define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED    BIT(2)
 #define QED_LINK_OVERRIDE_PAUSE_CONFIG          BIT(3)
+#define QED_LINK_OVERRIDE_LOOPBACK_MODE         BIT(4)
 	u32	override_flags;
 	bool	autoneg;
 	u32	adv_speeds;
@@ -118,6 +129,12 @@
 #define QED_LINK_PAUSE_RX_ENABLE                BIT(1)
 #define QED_LINK_PAUSE_TX_ENABLE                BIT(2)
 	u32	pause_config;
+#define QED_LINK_LOOPBACK_NONE                  BIT(0)
+#define QED_LINK_LOOPBACK_INT_PHY               BIT(1)
+#define QED_LINK_LOOPBACK_EXT_PHY               BIT(2)
+#define QED_LINK_LOOPBACK_EXT                   BIT(3)
+#define QED_LINK_LOOPBACK_MAC                   BIT(4)
+	u32	loopback_mode;
 };
 
 struct qed_link_output {
@@ -133,6 +150,13 @@
 	u32	pause_config;
 };
 
+struct qed_probe_params {
+	enum qed_protocol protocol;
+	u32 dp_module;
+	u8 dp_level;
+	bool is_vf;
+};
+
 #define QED_DRV_VER_STR_SIZE 12
 struct qed_slowpath_params {
 	u32	int_mode;
@@ -158,10 +182,49 @@
 			       struct qed_link_output	*link);
 };
 
+struct qed_selftest_ops {
+/**
+ * @brief selftest_interrupt - Perform interrupt test
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+	int (*selftest_interrupt)(struct qed_dev *cdev);
+
+/**
+ * @brief selftest_memory - Perform memory test
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+	int (*selftest_memory)(struct qed_dev *cdev);
+
+/**
+ * @brief selftest_register - Perform register test
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+	int (*selftest_register)(struct qed_dev *cdev);
+
+/**
+ * @brief selftest_clock - Perform clock test
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+	int (*selftest_clock)(struct qed_dev *cdev);
+};
+
 struct qed_common_ops {
+	struct qed_selftest_ops *selftest;
+
 	struct qed_dev*	(*probe)(struct pci_dev *dev,
-				 enum qed_protocol protocol,
-				 u32 dp_module, u8 dp_level);
+				 struct qed_probe_params *params);
 
 	void		(*remove)(struct qed_dev *cdev);
 
@@ -211,6 +274,16 @@
 
 	void		(*simd_handler_clean)(struct qed_dev *cdev,
 					      int index);
+
+/**
+ * @brief can_link_change - can the instance change the link or not
+ *
+ * @param cdev
+ *
+ * @return true if link-change is allowed, false otherwise.
+ */
+	bool (*can_link_change)(struct qed_dev *cdev);
+
 /**
  * @brief set_link - set links according to params
  *
@@ -271,15 +344,6 @@
 		       enum qed_led_mode mode);
 };
 
-/**
- * @brief qed_get_protocol_version
- *
- * @param protocol
- *
- * @return version supported by qed for given protocol driver
- */
-u32 qed_get_protocol_version(enum qed_protocol protocol);
-
 #define MASK_FIELD(_name, _value) \
 	((_value) &= (_name ## _MASK))
 
@@ -393,16 +457,16 @@
 
 	/* port */
 	u64	rx_64_byte_packets;
-	u64	rx_127_byte_packets;
-	u64	rx_255_byte_packets;
-	u64	rx_511_byte_packets;
-	u64	rx_1023_byte_packets;
-	u64	rx_1518_byte_packets;
-	u64	rx_1522_byte_packets;
-	u64	rx_2047_byte_packets;
-	u64	rx_4095_byte_packets;
-	u64	rx_9216_byte_packets;
-	u64	rx_16383_byte_packets;
+	u64	rx_65_to_127_byte_packets;
+	u64	rx_128_to_255_byte_packets;
+	u64	rx_256_to_511_byte_packets;
+	u64	rx_512_to_1023_byte_packets;
+	u64	rx_1024_to_1518_byte_packets;
+	u64	rx_1519_to_1522_byte_packets;
+	u64	rx_1519_to_2047_byte_packets;
+	u64	rx_2048_to_4095_byte_packets;
+	u64	rx_4096_to_9216_byte_packets;
+	u64	rx_9217_to_16383_byte_packets;
 	u64	rx_crc_errors;
 	u64	rx_mac_crtl_frames;
 	u64	rx_pause_frames;
@@ -524,4 +588,15 @@
 	__internal_ram_wr(NULL, addr, size, data);
 }
 
+enum qed_rss_caps {
+	QED_RSS_IPV4		= 0x1,
+	QED_RSS_IPV6		= 0x2,
+	QED_RSS_IPV4_TCP	= 0x4,
+	QED_RSS_IPV6_TCP	= 0x8,
+	QED_RSS_IPV4_UDP	= 0x10,
+	QED_RSS_IPV6_UDP	= 0x20,
+};
+
+#define QED_RSS_IND_TABLE_SIZE 128
+#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
 #endif
diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h
new file mode 100644
index 0000000..5a4f8d0
--- /dev/null
+++ b/include/linux/qed/qed_iov_if.h
@@ -0,0 +1,34 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_IOV_IF_H
+#define _QED_IOV_IF_H
+
+#include <linux/qed/qed_if.h>
+
+/* Structs used by PF to control and manipulate child VFs */
+struct qed_iov_hv_ops {
+	int (*configure)(struct qed_dev *cdev, int num_vfs_param);
+
+	int (*set_mac) (struct qed_dev *cdev, u8 *mac, int vfid);
+
+	int (*set_vlan) (struct qed_dev *cdev, u16 vid, int vfid);
+
+	int (*get_config) (struct qed_dev *cdev, int vf_id,
+			   struct ifla_vf_info *ivi);
+
+	int (*set_link_state) (struct qed_dev *cdev, int vf_id,
+			       int link_state);
+
+	int (*set_spoof) (struct qed_dev *cdev, int vfid, bool val);
+
+	int (*set_rate) (struct qed_dev *cdev, int vfid,
+			 u32 min_rate, u32 max_rate);
+};
+
+#endif
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 17d4f84..8beb98d 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -488,6 +488,42 @@
 }
 
 /**
+ * hlist_add_tail_rcu
+ * @n: the element to add to the hash list.
+ * @h: the list to add to.
+ *
+ * Description:
+ * Adds the specified element to the specified hlist,
+ * while permitting racing traversals.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_add_head_rcu()
+ * or hlist_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs.  Regardless of the type of CPU, the
+ * list-traversal primitive must be guarded by rcu_read_lock().
+ */
+static inline void hlist_add_tail_rcu(struct hlist_node *n,
+				      struct hlist_head *h)
+{
+	struct hlist_node *i, *last = NULL;
+
+	for (i = hlist_first_rcu(h); i; i = hlist_next_rcu(i))
+		last = i;
+
+	if (last) {
+		n->next = last->next;
+		n->pprev = &last->next;
+		rcu_assign_pointer(hlist_next_rcu(last), n);
+	} else {
+		hlist_add_head_rcu(n, h);
+	}
+}
+
+/**
  * hlist_add_before_rcu
  * @n: the new element to add to the hash list.
  * @next: the existing element to add the new element before.
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index 1c33dd7..4ae95f7 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -98,6 +98,45 @@
 	if (!is_a_nulls(first))
 		first->pprev = &n->next;
 }
+
+/**
+ * hlist_nulls_add_tail_rcu
+ * @n: the element to add to the hash list.
+ * @h: the list to add to.
+ *
+ * Description:
+ * Adds the specified element to the end of the specified hlist_nulls,
+ * while permitting racing traversals.  NOTE: tail insertion requires
+ * list traversal.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
+ * or hlist_nulls_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs.  Regardless of the type of CPU, the
+ * list-traversal primitive must be guarded by rcu_read_lock().
+ */
+static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
+					struct hlist_nulls_head *h)
+{
+	struct hlist_nulls_node *i, *last = NULL;
+
+	for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
+	     i = hlist_nulls_next_rcu(i))
+		last = i;
+
+	if (last) {
+		n->next = last->next;
+		n->pprev = &last->next;
+		rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
+	} else {
+		hlist_nulls_add_head_rcu(n, h);
+	}
+}
+
 /**
  * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
  * @tpos:	the type * to use as a loop cursor.
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 2657aff..5f1533e 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -508,14 +508,7 @@
  * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
  * critical section unless it can prove otherwise.
  */
-#ifdef CONFIG_PREEMPT_COUNT
 int rcu_read_lock_sched_held(void);
-#else /* #ifdef CONFIG_PREEMPT_COUNT */
-static inline int rcu_read_lock_sched_held(void)
-{
-	return 1;
-}
-#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
 
 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 
@@ -532,18 +525,10 @@
 	return 1;
 }
 
-#ifdef CONFIG_PREEMPT_COUNT
 static inline int rcu_read_lock_sched_held(void)
 {
-	return preempt_count() != 0 || irqs_disabled();
+	return !preemptible();
 }
-#else /* #ifdef CONFIG_PREEMPT_COUNT */
-static inline int rcu_read_lock_sched_held(void)
-{
-	return 1;
-}
-#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
-
 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 
 #ifdef CONFIG_PROVE_RCU
@@ -1144,4 +1129,17 @@
 #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
 
 
+/*
+ * Dump the ftrace buffer, but only one time per callsite per boot.
+ */
+#define rcu_ftrace_dump(oops_dump_mode) \
+do { \
+	static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
+	\
+	if (!atomic_read(&___rfd_beenhere) && \
+	    !atomic_xchg(&___rfd_beenhere, 1)) \
+		ftrace_dump(oops_dump_mode); \
+} while (0)
+
+
 #endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 64809ae..93aea75 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -149,6 +149,22 @@
 	return 0;
 }
 
+/*
+ * Return the number of expedited grace periods completed.
+ */
+static inline unsigned long rcu_exp_batches_completed(void)
+{
+	return 0;
+}
+
+/*
+ * Return the number of expedited sched grace periods completed.
+ */
+static inline unsigned long rcu_exp_batches_completed_sched(void)
+{
+	return 0;
+}
+
 static inline void rcu_force_quiescent_state(void)
 {
 }
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index ad1eda9..5043cb8 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -87,6 +87,8 @@
 unsigned long rcu_batches_completed(void);
 unsigned long rcu_batches_completed_bh(void);
 unsigned long rcu_batches_completed_sched(void);
+unsigned long rcu_exp_batches_completed(void);
+unsigned long rcu_exp_batches_completed_sched(void);
 void show_rcu_gp_kthreads(void);
 
 void rcu_force_quiescent_state(void);
diff --git a/include/linux/regulator/act8865.h b/include/linux/regulator/act8865.h
index 2eb3860..113d861 100644
--- a/include/linux/regulator/act8865.h
+++ b/include/linux/regulator/act8865.h
@@ -69,11 +69,13 @@
  * @id: regulator id
  * @name: regulator name
  * @init_data: regulator init data
+ * @of_node: device tree node (optional)
  */
 struct act8865_regulator_data {
 	int id;
 	const char *name;
 	struct regulator_init_data *init_data;
+	struct device_node *of_node;
 };
 
 /**
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index cd271e8..fcfa40a 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -93,6 +93,9 @@
  * @get_current_limit: Get the configured limit for a current-limited regulator.
  * @set_input_current_limit: Configure an input limit.
  *
+ * @set_over_current_protection: Support capability of automatically shutting
+ *                               down when detecting an over current event.
+ *
  * @set_active_discharge: Set active discharge enable/disable of regulators.
  *
  * @set_mode: Set the configured operating mode for the regulator.
@@ -255,6 +258,8 @@
  *
  * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_
  * @vsel_mask: Mask for register bitfield used for selector
+ * @csel_reg: Register for TPS65218 LS3 current regulator
+ * @csel_mask: Mask for TPS65218 LS3 current regulator
  * @apply_reg: Register for initiate voltage change on the output when
  *                using regulator_set_voltage_sel_regmap
  * @apply_bit: Register bitfield used for initiate voltage change on the
@@ -292,7 +297,7 @@
 			    const struct regulator_desc *,
 			    struct regulator_config *);
 	int id;
-	bool continuous_voltage_range;
+	unsigned int continuous_voltage_range:1;
 	unsigned n_voltages;
 	const struct regulator_ops *ops;
 	int irq;
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 5d627c8..ad3e515 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -97,6 +97,7 @@
  * @ramp_disable: Disable ramp delay when initialising or when setting voltage.
  * @soft_start: Enable soft start so that voltage ramps slowly.
  * @pull_down: Enable pull down when regulator is disabled.
+ * @over_current_protection: Auto disable on over current event.
  *
  * @input_uV: Input voltage for regulator when supplied by another regulator.
  *
diff --git a/include/linux/regulator/max8973-regulator.h b/include/linux/regulator/max8973-regulator.h
index f6a8a16..2fcb998 100644
--- a/include/linux/regulator/max8973-regulator.h
+++ b/include/linux/regulator/max8973-regulator.h
@@ -54,6 +54,10 @@
  * @reg_init_data: The regulator init data.
  * @control_flags: Control flags which are ORed value of above flags to
  *		configure device.
+ * @junction_temp_warning: Junction temp in millicelcius on which warning need
+ *			   to be set. Thermal functionality is only supported on
+ *			   MAX77621. The threshold warning supported by MAX77621
+ *			   are 120C and 140C.
  * @enable_ext_control: Enable the voltage enable/disable through external
  *		control signal from EN input pin. If it is false then
  *		voltage output will be enabled/disabled through EN bit of
@@ -67,6 +71,7 @@
 struct max8973_regulator_platform_data {
 	struct regulator_init_data *reg_init_data;
 	unsigned long control_flags;
+	unsigned long junction_temp_warning;
 	bool enable_ext_control;
 	int enable_gpio;
 	int dvs_gpio;
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 9c4e138..1c457a8 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -365,6 +365,8 @@
 /**
  * enum rproc_crash_type - remote processor crash types
  * @RPROC_MMUFAULT:	iommu fault
+ * @RPROC_WATCHDOG:	watchdog bite
+ * @RPROC_FATAL_ERROR	fatal error
  *
  * Each element of the enum is used as an array index. So that, the value of
  * the elements should be always something sane.
@@ -373,6 +375,8 @@
  */
 enum rproc_crash_type {
 	RPROC_MMUFAULT,
+	RPROC_WATCHDOG,
+	RPROC_FATAL_ERROR,
 };
 
 /**
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
index a3a5bcd..b91ba93 100644
--- a/include/linux/reset-controller.h
+++ b/include/linux/reset-controller.h
@@ -31,6 +31,7 @@
  * @ops: a pointer to device specific struct reset_control_ops
  * @owner: kernel module of the reset controller driver
  * @list: internal list of reset controller devices
+ * @reset_control_head: head of internal list of requested reset controls
  * @of_node: corresponding device tree node as phandle target
  * @of_reset_n_cells: number of cells in reset line specifiers
  * @of_xlate: translation function to translate from specifier as found in the
@@ -41,6 +42,7 @@
 	const struct reset_control_ops *ops;
 	struct module *owner;
 	struct list_head list;
+	struct list_head reset_control_head;
 	struct device_node *of_node;
 	int of_reset_n_cells;
 	int (*of_xlate)(struct reset_controller_dev *rcdev,
diff --git a/include/linux/reset.h b/include/linux/reset.h
index c4c097d..ec0306ce 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -1,8 +1,8 @@
 #ifndef _LINUX_RESET_H_
 #define _LINUX_RESET_H_
 
-struct device;
-struct device_node;
+#include <linux/device.h>
+
 struct reset_control;
 
 #ifdef CONFIG_RESET_CONTROLLER
@@ -12,9 +12,11 @@
 int reset_control_deassert(struct reset_control *rstc);
 int reset_control_status(struct reset_control *rstc);
 
-struct reset_control *reset_control_get(struct device *dev, const char *id);
+struct reset_control *__of_reset_control_get(struct device_node *node,
+				     const char *id, int index, int shared);
 void reset_control_put(struct reset_control *rstc);
-struct reset_control *devm_reset_control_get(struct device *dev, const char *id);
+struct reset_control *__devm_reset_control_get(struct device *dev,
+				     const char *id, int index, int shared);
 
 int __must_check device_reset(struct device *dev);
 
@@ -23,24 +25,6 @@
 	return device_reset(dev);
 }
 
-static inline struct reset_control *reset_control_get_optional(
-					struct device *dev, const char *id)
-{
-	return reset_control_get(dev, id);
-}
-
-static inline struct reset_control *devm_reset_control_get_optional(
-					struct device *dev, const char *id)
-{
-	return devm_reset_control_get(dev, id);
-}
-
-struct reset_control *of_reset_control_get(struct device_node *node,
-					   const char *id);
-
-struct reset_control *of_reset_control_get_by_index(
-					struct device_node *node, int index);
-
 #else
 
 static inline int reset_control_reset(struct reset_control *rstc)
@@ -72,49 +56,191 @@
 	WARN_ON(1);
 }
 
+static inline int __must_check device_reset(struct device *dev)
+{
+	WARN_ON(1);
+	return -ENOTSUPP;
+}
+
 static inline int device_reset_optional(struct device *dev)
 {
 	return -ENOTSUPP;
 }
 
-static inline struct reset_control *__must_check reset_control_get(
-					struct device *dev, const char *id)
+static inline struct reset_control *__of_reset_control_get(
+					struct device_node *node,
+					const char *id, int index, int shared)
 {
-	WARN_ON(1);
 	return ERR_PTR(-EINVAL);
 }
 
-static inline struct reset_control *__must_check devm_reset_control_get(
+static inline struct reset_control *__devm_reset_control_get(
+					struct device *dev,
+					const char *id, int index, int shared)
+{
+	return ERR_PTR(-EINVAL);
+}
+
+#endif /* CONFIG_RESET_CONTROLLER */
+
+/**
+ * reset_control_get - Lookup and obtain an exclusive reference to a
+ *                     reset controller.
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
+ * If this function is called more then once for the same reset_control it will
+ * return -EBUSY.
+ *
+ * See reset_control_get_shared for details on shared references to
+ * reset-controls.
+ *
+ * Use of id names is optional.
+ */
+static inline struct reset_control *__must_check reset_control_get(
 					struct device *dev, const char *id)
 {
+#ifndef CONFIG_RESET_CONTROLLER
 	WARN_ON(1);
-	return ERR_PTR(-EINVAL);
+#endif
+	return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
 }
 
 static inline struct reset_control *reset_control_get_optional(
 					struct device *dev, const char *id)
 {
-	return ERR_PTR(-ENOTSUPP);
+	return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
+}
+
+/**
+ * reset_control_get_shared - Lookup and obtain a shared reference to a
+ *                            reset controller.
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
+ * This function is intended for use with reset-controls which are shared
+ * between hardware-blocks.
+ *
+ * When a reset-control is shared, the behavior of reset_control_assert /
+ * deassert is changed, the reset-core will keep track of a deassert_count
+ * and only (re-)assert the reset after reset_control_assert has been called
+ * as many times as reset_control_deassert was called. Also see the remark
+ * about shared reset-controls in the reset_control_assert docs.
+ *
+ * Calling reset_control_assert without first calling reset_control_deassert
+ * is not allowed on a shared reset control. Calling reset_control_reset is
+ * also not allowed on a shared reset control.
+ *
+ * Use of id names is optional.
+ */
+static inline struct reset_control *reset_control_get_shared(
+					struct device *dev, const char *id)
+{
+	return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1);
+}
+
+/**
+ * of_reset_control_get - Lookup and obtain an exclusive reference to a
+ *                        reset controller.
+ * @node: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
+ *
+ * Use of id names is optional.
+ */
+static inline struct reset_control *of_reset_control_get(
+				struct device_node *node, const char *id)
+{
+	return __of_reset_control_get(node, id, 0, 0);
+}
+
+/**
+ * of_reset_control_get_by_index - Lookup and obtain an exclusive reference to
+ *                                 a reset controller by index.
+ * @node: device to be reset by the controller
+ * @index: index of the reset controller
+ *
+ * This is to be used to perform a list of resets for a device or power domain
+ * in whatever order. Returns a struct reset_control or IS_ERR() condition
+ * containing errno.
+ */
+static inline struct reset_control *of_reset_control_get_by_index(
+					struct device_node *node, int index)
+{
+	return __of_reset_control_get(node, NULL, index, 0);
+}
+
+/**
+ * devm_reset_control_get - resource managed reset_control_get()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed reset_control_get(). For reset controllers returned from this
+ * function, reset_control_put() is called automatically on driver detach.
+ * See reset_control_get() for more information.
+ */
+static inline struct reset_control *__must_check devm_reset_control_get(
+					struct device *dev, const char *id)
+{
+#ifndef CONFIG_RESET_CONTROLLER
+	WARN_ON(1);
+#endif
+	return __devm_reset_control_get(dev, id, 0, 0);
 }
 
 static inline struct reset_control *devm_reset_control_get_optional(
 					struct device *dev, const char *id)
 {
-	return ERR_PTR(-ENOTSUPP);
+	return __devm_reset_control_get(dev, id, 0, 0);
 }
 
-static inline struct reset_control *of_reset_control_get(
-				struct device_node *node, const char *id)
+/**
+ * devm_reset_control_get_by_index - resource managed reset_control_get
+ * @dev: device to be reset by the controller
+ * @index: index of the reset controller
+ *
+ * Managed reset_control_get(). For reset controllers returned from this
+ * function, reset_control_put() is called automatically on driver detach.
+ * See reset_control_get() for more information.
+ */
+static inline struct reset_control *devm_reset_control_get_by_index(
+					struct device *dev, int index)
 {
-	return ERR_PTR(-ENOTSUPP);
+	return __devm_reset_control_get(dev, NULL, index, 0);
 }
 
-static inline struct reset_control *of_reset_control_get_by_index(
-				struct device_node *node, int index)
+/**
+ * devm_reset_control_get_shared - resource managed reset_control_get_shared()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed reset_control_get_shared(). For reset controllers returned from
+ * this function, reset_control_put() is called automatically on driver detach.
+ * See reset_control_get_shared() for more information.
+ */
+static inline struct reset_control *devm_reset_control_get_shared(
+					struct device *dev, const char *id)
 {
-	return ERR_PTR(-ENOTSUPP);
+	return __devm_reset_control_get(dev, id, 0, 1);
 }
 
-#endif /* CONFIG_RESET_CONTROLLER */
+/**
+ * devm_reset_control_get_shared_by_index - resource managed
+ * reset_control_get_shared
+ * @dev: device to be reset by the controller
+ * @index: index of the reset controller
+ *
+ * Managed reset_control_get_shared(). For reset controllers returned from
+ * this function, reset_control_put() is called automatically on driver detach.
+ * See reset_control_get_shared() for more information.
+ */
+static inline struct reset_control *devm_reset_control_get_shared_by_index(
+					struct device *dev, int index)
+{
+	return __devm_reset_control_get(dev, NULL, index, 1);
+}
 
 #endif
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 63bd760..3eef080 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -346,7 +346,8 @@
 					    struct bucket_table *old_tbl);
 int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
 
-int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
+int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter,
+			 gfp_t gfp);
 void rhashtable_walk_exit(struct rhashtable_iter *iter);
 int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
 void *rhashtable_walk_next(struct rhashtable_iter *iter);
diff --git a/include/linux/rio_mport_cdev.h b/include/linux/rio_mport_cdev.h
deleted file mode 100644
index b65d19d..0000000
--- a/include/linux/rio_mport_cdev.h
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
- * Copyright (c) 2015-2016, Integrated Device Technology Inc.
- * Copyright (c) 2015, Prodrive Technologies
- * Copyright (c) 2015, Texas Instruments Incorporated
- * Copyright (c) 2015, RapidIO Trade Association
- * All rights reserved.
- *
- * This software is available to you under a choice of one of two licenses.
- * You may choose to be licensed under the terms of the GNU General Public
- * License(GPL) Version 2, or the BSD-3 Clause license below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its contributors
- * may be used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RIO_MPORT_CDEV_H_
-#define _RIO_MPORT_CDEV_H_
-
-#ifndef __user
-#define __user
-#endif
-
-struct rio_mport_maint_io {
-	uint32_t rioid;		/* destID of remote device */
-	uint32_t hopcount;	/* hopcount to remote device */
-	uint32_t offset;	/* offset in register space */
-	size_t length;		/* length in bytes */
-	void __user *buffer;	/* data buffer */
-};
-
-/*
- * Definitions for RapidIO data transfers:
- * - memory mapped (MAPPED)
- * - packet generation from memory (TRANSFER)
- */
-#define RIO_TRANSFER_MODE_MAPPED	(1 << 0)
-#define RIO_TRANSFER_MODE_TRANSFER	(1 << 1)
-#define RIO_CAP_DBL_SEND		(1 << 2)
-#define RIO_CAP_DBL_RECV		(1 << 3)
-#define RIO_CAP_PW_SEND			(1 << 4)
-#define RIO_CAP_PW_RECV			(1 << 5)
-#define RIO_CAP_MAP_OUTB		(1 << 6)
-#define RIO_CAP_MAP_INB			(1 << 7)
-
-struct rio_mport_properties {
-	uint16_t hdid;
-	uint8_t id;			/* Physical port ID */
-	uint8_t  index;
-	uint32_t flags;
-	uint32_t sys_size;		/* Default addressing size */
-	uint8_t  port_ok;
-	uint8_t  link_speed;
-	uint8_t  link_width;
-	uint32_t dma_max_sge;
-	uint32_t dma_max_size;
-	uint32_t dma_align;
-	uint32_t transfer_mode;		/* Default transfer mode */
-	uint32_t cap_sys_size;		/* Capable system sizes */
-	uint32_t cap_addr_size;		/* Capable addressing sizes */
-	uint32_t cap_transfer_mode;	/* Capable transfer modes */
-	uint32_t cap_mport;		/* Mport capabilities */
-};
-
-/*
- * Definitions for RapidIO events;
- * - incoming port-writes
- * - incoming doorbells
- */
-#define RIO_DOORBELL	(1 << 0)
-#define RIO_PORTWRITE	(1 << 1)
-
-struct rio_doorbell {
-	uint32_t rioid;
-	uint16_t payload;
-};
-
-struct rio_doorbell_filter {
-	uint32_t rioid;			/* 0xffffffff to match all ids */
-	uint16_t low;
-	uint16_t high;
-};
-
-
-struct rio_portwrite {
-	uint32_t payload[16];
-};
-
-struct rio_pw_filter {
-	uint32_t mask;
-	uint32_t low;
-	uint32_t high;
-};
-
-/* RapidIO base address for inbound requests set to value defined below
- * indicates that no specific RIO-to-local address translation is requested
- * and driver should use direct (one-to-one) address mapping.
-*/
-#define RIO_MAP_ANY_ADDR	(uint64_t)(~((uint64_t) 0))
-
-struct rio_mmap {
-	uint32_t rioid;
-	uint64_t rio_addr;
-	uint64_t length;
-	uint64_t handle;
-	void *address;
-};
-
-struct rio_dma_mem {
-	uint64_t length;		/* length of DMA memory */
-	uint64_t dma_handle;		/* handle associated with this memory */
-	void *buffer;			/* pointer to this memory */
-};
-
-
-struct rio_event {
-	unsigned int header;	/* event type RIO_DOORBELL or RIO_PORTWRITE */
-	union {
-		struct rio_doorbell doorbell;	/* header for RIO_DOORBELL */
-		struct rio_portwrite portwrite; /* header for RIO_PORTWRITE */
-	} u;
-};
-
-enum rio_transfer_sync {
-	RIO_TRANSFER_SYNC,	/* synchronous transfer */
-	RIO_TRANSFER_ASYNC,	/* asynchronous transfer */
-	RIO_TRANSFER_FAF,	/* fire-and-forget transfer */
-};
-
-enum rio_transfer_dir {
-	RIO_TRANSFER_DIR_READ,	/* Read operation */
-	RIO_TRANSFER_DIR_WRITE,	/* Write operation */
-};
-
-/*
- * RapidIO data exchange transactions are lists of individual transfers. Each
- * transfer exchanges data between two RapidIO devices by remote direct memory
- * access and has its own completion code.
- *
- * The RapidIO specification defines four types of data exchange requests:
- * NREAD, NWRITE, SWRITE and NWRITE_R. The RapidIO DMA channel interface allows
- * to specify the required type of write operation or combination of them when
- * only the last data packet requires response.
- *
- * NREAD:    read up to 256 bytes from remote device memory into local memory
- * NWRITE:   write up to 256 bytes from local memory to remote device memory
- *           without confirmation
- * SWRITE:   as NWRITE, but all addresses and payloads must be 64-bit aligned
- * NWRITE_R: as NWRITE, but expect acknowledgment from remote device.
- *
- * The default exchange is chosen from NREAD and any of the WRITE modes as the
- * driver sees fit. For write requests the user can explicitly choose between
- * any of the write modes for each transaction.
- */
-enum rio_exchange {
-	RIO_EXCHANGE_DEFAULT,	/* Default method */
-	RIO_EXCHANGE_NWRITE,	/* All packets using NWRITE */
-	RIO_EXCHANGE_SWRITE,	/* All packets using SWRITE */
-	RIO_EXCHANGE_NWRITE_R,	/* Last packet NWRITE_R, others NWRITE */
-	RIO_EXCHANGE_SWRITE_R,	/* Last packet NWRITE_R, others SWRITE */
-	RIO_EXCHANGE_NWRITE_R_ALL, /* All packets using NWRITE_R */
-};
-
-struct rio_transfer_io {
-	uint32_t rioid;			/* Target destID */
-	uint64_t rio_addr;		/* Address in target's RIO mem space */
-	enum rio_exchange method;	/* Data exchange method */
-	void __user *loc_addr;
-	uint64_t handle;
-	uint64_t offset;		/* Offset in buffer */
-	uint64_t length;		/* Length in bytes */
-	uint32_t completion_code;	/* Completion code for this transfer */
-};
-
-struct rio_transaction {
-	uint32_t transfer_mode;		/* Data transfer mode */
-	enum rio_transfer_sync sync;	/* Synchronization method */
-	enum rio_transfer_dir dir;	/* Transfer direction */
-	size_t count;			/* Number of transfers */
-	struct rio_transfer_io __user *block;	/* Array of <count> transfers */
-};
-
-struct rio_async_tx_wait {
-	uint32_t token;		/* DMA transaction ID token */
-	uint32_t timeout;	/* Wait timeout in msec, if 0 use default TO */
-};
-
-#define RIO_MAX_DEVNAME_SZ	20
-
-struct rio_rdev_info {
-	uint32_t destid;
-	uint8_t hopcount;
-	uint32_t comptag;
-	char name[RIO_MAX_DEVNAME_SZ + 1];
-};
-
-/* Driver IOCTL codes */
-#define RIO_MPORT_DRV_MAGIC           'm'
-
-#define RIO_MPORT_MAINT_HDID_SET	\
-	_IOW(RIO_MPORT_DRV_MAGIC, 1, uint16_t)
-#define RIO_MPORT_MAINT_COMPTAG_SET	\
-	_IOW(RIO_MPORT_DRV_MAGIC, 2, uint32_t)
-#define RIO_MPORT_MAINT_PORT_IDX_GET	\
-	_IOR(RIO_MPORT_DRV_MAGIC, 3, uint32_t)
-#define RIO_MPORT_GET_PROPERTIES \
-	_IOR(RIO_MPORT_DRV_MAGIC, 4, struct rio_mport_properties)
-#define RIO_MPORT_MAINT_READ_LOCAL \
-	_IOR(RIO_MPORT_DRV_MAGIC, 5, struct rio_mport_maint_io)
-#define RIO_MPORT_MAINT_WRITE_LOCAL \
-	_IOW(RIO_MPORT_DRV_MAGIC, 6, struct rio_mport_maint_io)
-#define RIO_MPORT_MAINT_READ_REMOTE \
-	_IOR(RIO_MPORT_DRV_MAGIC, 7, struct rio_mport_maint_io)
-#define RIO_MPORT_MAINT_WRITE_REMOTE \
-	_IOW(RIO_MPORT_DRV_MAGIC, 8, struct rio_mport_maint_io)
-#define RIO_ENABLE_DOORBELL_RANGE	\
-	_IOW(RIO_MPORT_DRV_MAGIC, 9, struct rio_doorbell_filter)
-#define RIO_DISABLE_DOORBELL_RANGE	\
-	_IOW(RIO_MPORT_DRV_MAGIC, 10, struct rio_doorbell_filter)
-#define RIO_ENABLE_PORTWRITE_RANGE	\
-	_IOW(RIO_MPORT_DRV_MAGIC, 11, struct rio_pw_filter)
-#define RIO_DISABLE_PORTWRITE_RANGE	\
-	_IOW(RIO_MPORT_DRV_MAGIC, 12, struct rio_pw_filter)
-#define RIO_SET_EVENT_MASK		\
-	_IOW(RIO_MPORT_DRV_MAGIC, 13, unsigned int)
-#define RIO_GET_EVENT_MASK		\
-	_IOR(RIO_MPORT_DRV_MAGIC, 14, unsigned int)
-#define RIO_MAP_OUTBOUND \
-	_IOWR(RIO_MPORT_DRV_MAGIC, 15, struct rio_mmap)
-#define RIO_UNMAP_OUTBOUND \
-	_IOW(RIO_MPORT_DRV_MAGIC, 16, struct rio_mmap)
-#define RIO_MAP_INBOUND \
-	_IOWR(RIO_MPORT_DRV_MAGIC, 17, struct rio_mmap)
-#define RIO_UNMAP_INBOUND \
-	_IOW(RIO_MPORT_DRV_MAGIC, 18, uint64_t)
-#define RIO_ALLOC_DMA \
-	_IOWR(RIO_MPORT_DRV_MAGIC, 19, struct rio_dma_mem)
-#define RIO_FREE_DMA \
-	_IOW(RIO_MPORT_DRV_MAGIC, 20, uint64_t)
-#define RIO_TRANSFER \
-	_IOWR(RIO_MPORT_DRV_MAGIC, 21, struct rio_transaction)
-#define RIO_WAIT_FOR_ASYNC \
-	_IOW(RIO_MPORT_DRV_MAGIC, 22, struct rio_async_tx_wait)
-#define RIO_DEV_ADD \
-	_IOW(RIO_MPORT_DRV_MAGIC, 23, struct rio_rdev_info)
-#define RIO_DEV_DEL \
-	_IOW(RIO_MPORT_DRV_MAGIC, 24, struct rio_rdev_info)
-
-#endif /* _RIO_MPORT_CDEV_H_ */
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index 82a6739..ada50ff 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -169,7 +169,7 @@
 
 int register_rpmsg_device(struct rpmsg_channel *dev);
 void unregister_rpmsg_device(struct rpmsg_channel *dev);
-int register_rpmsg_driver(struct rpmsg_driver *drv);
+int __register_rpmsg_driver(struct rpmsg_driver *drv, struct module *owner);
 void unregister_rpmsg_driver(struct rpmsg_driver *drv);
 void rpmsg_destroy_ept(struct rpmsg_endpoint *);
 struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *,
@@ -177,6 +177,22 @@
 int
 rpmsg_send_offchannel_raw(struct rpmsg_channel *, u32, u32, void *, int, bool);
 
+/* use a macro to avoid include chaining to get THIS_MODULE */
+#define register_rpmsg_driver(drv) \
+	__register_rpmsg_driver(drv, THIS_MODULE)
+
+/**
+ * module_rpmsg_driver() - Helper macro for registering an rpmsg driver
+ * @__rpmsg_driver: rpmsg_driver struct
+ *
+ * Helper macro for rpmsg drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate.  Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_rpmsg_driver(__rpmsg_driver) \
+	module_driver(__rpmsg_driver, register_rpmsg_driver, \
+			unregister_rpmsg_driver)
+
 /**
  * rpmsg_send() - send a message across to the remote processor
  * @rpdev: the rpmsg channel
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index 561e861..ae0528b 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -34,7 +34,7 @@
 extern void __down_read(struct rw_semaphore *sem);
 extern int __down_read_trylock(struct rw_semaphore *sem);
 extern void __down_write(struct rw_semaphore *sem);
-extern void __down_write_nested(struct rw_semaphore *sem, int subclass);
+extern int __must_check __down_write_killable(struct rw_semaphore *sem);
 extern int __down_write_trylock(struct rw_semaphore *sem);
 extern void __up_read(struct rw_semaphore *sem);
 extern void __up_write(struct rw_semaphore *sem);
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 8f498cd..d1c12d1 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -14,6 +14,7 @@
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/atomic.h>
+#include <linux/err.h>
 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
 #include <linux/osq_lock.h>
 #endif
@@ -43,6 +44,7 @@
 
 extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
 extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem);
 extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
 extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
 
@@ -116,6 +118,7 @@
  * lock for writing
  */
 extern void down_write(struct rw_semaphore *sem);
+extern int __must_check down_write_killable(struct rw_semaphore *sem);
 
 /*
  * trylock for writing -- returns 1 if successful, 0 if contention
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 556ec1e..cb3c8fe 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -286,6 +286,31 @@
 #define SG_MAX_SINGLE_ALLOC		(PAGE_SIZE / sizeof(struct scatterlist))
 
 /*
+ * The maximum number of SG segments that we will put inside a
+ * scatterlist (unless chaining is used). Should ideally fit inside a
+ * single page, to avoid a higher order allocation.  We could define this
+ * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order.  The
+ * minimum value is 32
+ */
+#define SG_CHUNK_SIZE	128
+
+/*
+ * Like SG_CHUNK_SIZE, but for archs that have sg chaining. This limit
+ * is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
+ */
+#ifdef CONFIG_ARCH_HAS_SG_CHAIN
+#define SG_MAX_SEGMENTS	2048
+#else
+#define SG_MAX_SEGMENTS	SG_CHUNK_SIZE
+#endif
+
+#ifdef CONFIG_SG_POOL
+void sg_free_table_chained(struct sg_table *table, bool first_chunk);
+int sg_alloc_table_chained(struct sg_table *table, int nents,
+			   struct scatterlist *first_chunk);
+#endif
+
+/*
  * sg page iterator
  *
  * Iterates over sg entries page-by-page.  On each successful iteration,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 60bba7e..31bd0d9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -40,7 +40,6 @@
 #include <linux/pid.h>
 #include <linux/percpu.h>
 #include <linux/topology.h>
-#include <linux/proportions.h>
 #include <linux/seccomp.h>
 #include <linux/rcupdate.h>
 #include <linux/rculist.h>
@@ -178,9 +177,11 @@
 extern void calc_global_load(unsigned long ticks);
 
 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
-extern void update_cpu_load_nohz(int active);
+extern void cpu_load_update_nohz_start(void);
+extern void cpu_load_update_nohz_stop(void);
 #else
-static inline void update_cpu_load_nohz(int active) { }
+static inline void cpu_load_update_nohz_start(void) { }
+static inline void cpu_load_update_nohz_stop(void) { }
 #endif
 
 extern void dump_cpu_task(int cpu);
@@ -372,6 +373,15 @@
 extern void trap_init(void);
 extern void update_process_times(int user);
 extern void scheduler_tick(void);
+extern int sched_cpu_starting(unsigned int cpu);
+extern int sched_cpu_activate(unsigned int cpu);
+extern int sched_cpu_deactivate(unsigned int cpu);
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern int sched_cpu_dying(unsigned int cpu);
+#else
+# define sched_cpu_dying	NULL
+#endif
 
 extern void sched_show_task(struct task_struct *p);
 
@@ -720,7 +730,7 @@
 	struct task_cputime cputime_expires;
 
 #ifdef CONFIG_NO_HZ_FULL
-	unsigned long tick_dep_mask;
+	atomic_t tick_dep_mask;
 #endif
 
 	struct list_head cpu_timers[3];
@@ -935,9 +945,19 @@
 };
 
 /*
+ * Integer metrics need fixed point arithmetic, e.g., sched/fair
+ * has a few: load, load_avg, util_avg, freq, and capacity.
+ *
+ * We define a basic fixed point arithmetic range, and then formalize
+ * all these metrics based on that basic range.
+ */
+# define SCHED_FIXEDPOINT_SHIFT	10
+# define SCHED_FIXEDPOINT_SCALE	(1L << SCHED_FIXEDPOINT_SHIFT)
+
+/*
  * Increase resolution of cpu_capacity calculations
  */
-#define SCHED_CAPACITY_SHIFT	10
+#define SCHED_CAPACITY_SHIFT	SCHED_FIXEDPOINT_SHIFT
 #define SCHED_CAPACITY_SCALE	(1L << SCHED_CAPACITY_SHIFT)
 
 /*
@@ -1199,18 +1219,56 @@
 };
 
 /*
- * The load_avg/util_avg accumulates an infinite geometric series.
- * 1) load_avg factors frequency scaling into the amount of time that a
- * sched_entity is runnable on a rq into its weight. For cfs_rq, it is the
- * aggregated such weights of all runnable and blocked sched_entities.
- * 2) util_avg factors frequency and cpu scaling into the amount of time
- * that a sched_entity is running on a CPU, in the range [0..SCHED_LOAD_SCALE].
- * For cfs_rq, it is the aggregated such times of all runnable and
+ * The load_avg/util_avg accumulates an infinite geometric series
+ * (see __update_load_avg() in kernel/sched/fair.c).
+ *
+ * [load_avg definition]
+ *
+ *   load_avg = runnable% * scale_load_down(load)
+ *
+ * where runnable% is the time ratio that a sched_entity is runnable.
+ * For cfs_rq, it is the aggregated load_avg of all runnable and
  * blocked sched_entities.
- * The 64 bit load_sum can:
- * 1) for cfs_rq, afford 4353082796 (=2^64/47742/88761) entities with
- * the highest weight (=88761) always runnable, we should not overflow
- * 2) for entity, support any load.weight always runnable
+ *
+ * load_avg may also take frequency scaling into account:
+ *
+ *   load_avg = runnable% * scale_load_down(load) * freq%
+ *
+ * where freq% is the CPU frequency normalized to the highest frequency.
+ *
+ * [util_avg definition]
+ *
+ *   util_avg = running% * SCHED_CAPACITY_SCALE
+ *
+ * where running% is the time ratio that a sched_entity is running on
+ * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
+ * and blocked sched_entities.
+ *
+ * util_avg may also factor frequency scaling and CPU capacity scaling:
+ *
+ *   util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
+ *
+ * where freq% is the same as above, and capacity% is the CPU capacity
+ * normalized to the greatest capacity (due to uarch differences, etc).
+ *
+ * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
+ * themselves are in the range of [0, 1]. To do fixed point arithmetics,
+ * we therefore scale them to as large a range as necessary. This is for
+ * example reflected by util_avg's SCHED_CAPACITY_SCALE.
+ *
+ * [Overflow issue]
+ *
+ * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
+ * with the highest load (=88761), always runnable on a single cfs_rq,
+ * and should not overflow as the number already hits PID_MAX_LIMIT.
+ *
+ * For all other cases (including 32-bit kernels), struct load_weight's
+ * weight will overflow first before we do, because:
+ *
+ *    Max(load_avg) <= Max(load.weight)
+ *
+ * Then it is the load_weight's responsibility to consider overflow
+ * issues.
  */
 struct sched_avg {
 	u64 last_update_time, load_sum;
@@ -1549,7 +1607,7 @@
 #endif
 
 #ifdef CONFIG_NO_HZ_FULL
-	unsigned long tick_dep_mask;
+	atomic_t tick_dep_mask;
 #endif
 	unsigned long nvcsw, nivcsw; /* context switch counts */
 	u64 start_time;		/* monotonic time in nsec */
@@ -1596,6 +1654,7 @@
 
 	unsigned long sas_ss_sp;
 	size_t sas_ss_size;
+	unsigned sas_ss_flags;
 
 	struct callback_head *task_works;
 
@@ -1871,6 +1930,11 @@
 /* Future-safe accessor for struct task_struct's cpus_allowed. */
 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
 
+static inline int tsk_nr_cpus_allowed(struct task_struct *p)
+{
+	return p->nr_cpus_allowed;
+}
+
 #define TNF_MIGRATED	0x01
 #define TNF_NO_GROUP	0x02
 #define TNF_SHARED	0x04
@@ -2303,8 +2367,6 @@
 /*
  * See the comment in kernel/sched/clock.c
  */
-extern u64 cpu_clock(int cpu);
-extern u64 local_clock(void);
 extern u64 running_clock(void);
 extern u64 sched_clock_cpu(int cpu);
 
@@ -2323,6 +2385,16 @@
 static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
 {
 }
+
+static inline u64 cpu_clock(int cpu)
+{
+	return sched_clock();
+}
+
+static inline u64 local_clock(void)
+{
+	return sched_clock();
+}
 #else
 /*
  * Architectures can set this to 1 if they have specified
@@ -2337,6 +2409,26 @@
 extern void sched_clock_tick(void);
 extern void sched_clock_idle_sleep_event(void);
 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
+
+/*
+ * As outlined in clock.c, provides a fast, high resolution, nanosecond
+ * time source that is monotonic per cpu argument and has bounded drift
+ * between cpus.
+ *
+ * ######################### BIG FAT WARNING ##########################
+ * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
+ * # go backwards !!                                                  #
+ * ####################################################################
+ */
+static inline u64 cpu_clock(int cpu)
+{
+	return sched_clock_cpu(cpu);
+}
+
+static inline u64 local_clock(void)
+{
+	return sched_clock_cpu(raw_smp_processor_id());
+}
 #endif
 
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
@@ -2575,6 +2667,18 @@
  */
 static inline int on_sig_stack(unsigned long sp)
 {
+	/*
+	 * If the signal stack is SS_AUTODISARM then, by construction, we
+	 * can't be on the signal stack unless user code deliberately set
+	 * SS_AUTODISARM when we were already on it.
+	 *
+	 * This improves reliability: if user state gets corrupted such that
+	 * the stack pointer points very close to the end of the signal stack,
+	 * then this check will enable the signal to be handled anyway.
+	 */
+	if (current->sas_ss_flags & SS_AUTODISARM)
+		return 0;
+
 #ifdef CONFIG_STACK_GROWSUP
 	return sp >= current->sas_ss_sp &&
 		sp - current->sas_ss_sp < current->sas_ss_size;
@@ -2592,6 +2696,13 @@
 	return on_sig_stack(sp) ? SS_ONSTACK : 0;
 }
 
+static inline void sas_ss_reset(struct task_struct *p)
+{
+	p->sas_ss_sp = 0;
+	p->sas_ss_size = 0;
+	p->sas_ss_flags = SS_DISABLE;
+}
+
 static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
 {
 	if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
@@ -3240,7 +3351,10 @@
 		     u64 time, unsigned long util, unsigned long max);
 };
 
-void cpufreq_set_update_util_data(int cpu, struct update_util_data *data);
+void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
+			void (*func)(struct update_util_data *data, u64 time,
+				     unsigned long util, unsigned long max));
+void cpufreq_remove_update_util_hook(int cpu);
 #endif /* CONFIG_CPU_FREQ */
 
 #endif
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index a9414fd..dacb5e7 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -705,4 +705,71 @@
 	sctp_authhdr_t auth_hdr;
 } __packed sctp_auth_chunk_t;
 
+struct sctp_info {
+	__u32	sctpi_tag;
+	__u32	sctpi_state;
+	__u32	sctpi_rwnd;
+	__u16	sctpi_unackdata;
+	__u16	sctpi_penddata;
+	__u16	sctpi_instrms;
+	__u16	sctpi_outstrms;
+	__u32	sctpi_fragmentation_point;
+	__u32	sctpi_inqueue;
+	__u32	sctpi_outqueue;
+	__u32	sctpi_overall_error;
+	__u32	sctpi_max_burst;
+	__u32	sctpi_maxseg;
+	__u32	sctpi_peer_rwnd;
+	__u32	sctpi_peer_tag;
+	__u8	sctpi_peer_capable;
+	__u8	sctpi_peer_sack;
+	__u16	__reserved1;
+
+	/* assoc status info */
+	__u64	sctpi_isacks;
+	__u64	sctpi_osacks;
+	__u64	sctpi_opackets;
+	__u64	sctpi_ipackets;
+	__u64	sctpi_rtxchunks;
+	__u64	sctpi_outofseqtsns;
+	__u64	sctpi_idupchunks;
+	__u64	sctpi_gapcnt;
+	__u64	sctpi_ouodchunks;
+	__u64	sctpi_iuodchunks;
+	__u64	sctpi_oodchunks;
+	__u64	sctpi_iodchunks;
+	__u64	sctpi_octrlchunks;
+	__u64	sctpi_ictrlchunks;
+
+	/* primary transport info */
+	struct sockaddr_storage	sctpi_p_address;
+	__s32	sctpi_p_state;
+	__u32	sctpi_p_cwnd;
+	__u32	sctpi_p_srtt;
+	__u32	sctpi_p_rto;
+	__u32	sctpi_p_hbinterval;
+	__u32	sctpi_p_pathmaxrxt;
+	__u32	sctpi_p_sackdelay;
+	__u32	sctpi_p_sackfreq;
+	__u32	sctpi_p_ssthresh;
+	__u32	sctpi_p_partial_bytes_acked;
+	__u32	sctpi_p_flight_size;
+	__u16	sctpi_p_error;
+	__u16	__reserved2;
+
+	/* sctp sock info */
+	__u32	sctpi_s_autoclose;
+	__u32	sctpi_s_adaptation_ind;
+	__u32	sctpi_s_pd_point;
+	__u8	sctpi_s_nodelay;
+	__u8	sctpi_s_disable_fragments;
+	__u8	sctpi_s_v4mapped;
+	__u8	sctpi_s_frag_interleave;
+};
+
+struct sctp_infox {
+	struct sctp_info *sctpinfo;
+	struct sctp_association *asoc;
+};
+
 #endif /* __LINUX_SCTP_H__ */
diff --git a/include/linux/security.h b/include/linux/security.h
index 157f0cb..14df373 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -71,7 +71,7 @@
 /* These functions are in security/commoncap.c */
 extern int cap_capable(const struct cred *cred, struct user_namespace *ns,
 		       int cap, int audit);
-extern int cap_settime(const struct timespec *ts, const struct timezone *tz);
+extern int cap_settime(const struct timespec64 *ts, const struct timezone *tz);
 extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode);
 extern int cap_ptrace_traceme(struct task_struct *parent);
 extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
@@ -208,7 +208,13 @@
 int security_quotactl(int cmds, int type, int id, struct super_block *sb);
 int security_quota_on(struct dentry *dentry);
 int security_syslog(int type);
-int security_settime(const struct timespec *ts, const struct timezone *tz);
+int security_settime64(const struct timespec64 *ts, const struct timezone *tz);
+static inline int security_settime(const struct timespec *ts, const struct timezone *tz)
+{
+	struct timespec64 ts64 = timespec_to_timespec64(*ts);
+
+	return security_settime64(&ts64, tz);
+}
 int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
 int security_bprm_set_creds(struct linux_binprm *bprm);
 int security_bprm_check(struct linux_binprm *bprm);
@@ -222,10 +228,10 @@
 int security_sb_kern_mount(struct super_block *sb, int flags, void *data);
 int security_sb_show_options(struct seq_file *m, struct super_block *sb);
 int security_sb_statfs(struct dentry *dentry);
-int security_sb_mount(const char *dev_name, struct path *path,
+int security_sb_mount(const char *dev_name, const struct path *path,
 		      const char *type, unsigned long flags, void *data);
 int security_sb_umount(struct vfsmount *mnt, int flags);
-int security_sb_pivotroot(struct path *old_path, struct path *new_path);
+int security_sb_pivotroot(const struct path *old_path, const struct path *new_path);
 int security_sb_set_mnt_opts(struct super_block *sb,
 				struct security_mnt_opts *opts,
 				unsigned long kern_flags,
@@ -462,10 +468,18 @@
 	return 0;
 }
 
+static inline int security_settime64(const struct timespec64 *ts,
+				     const struct timezone *tz)
+{
+	return cap_settime(ts, tz);
+}
+
 static inline int security_settime(const struct timespec *ts,
 				   const struct timezone *tz)
 {
-	return cap_settime(ts, tz);
+	struct timespec64 ts64 = timespec_to_timespec64(*ts);
+
+	return cap_settime(&ts64, tz);
 }
 
 static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
@@ -530,7 +544,7 @@
 	return 0;
 }
 
-static inline int security_sb_mount(const char *dev_name, struct path *path,
+static inline int security_sb_mount(const char *dev_name, const struct path *path,
 				    const char *type, unsigned long flags,
 				    void *data)
 {
@@ -542,8 +556,8 @@
 	return 0;
 }
 
-static inline int security_sb_pivotroot(struct path *old_path,
-					struct path *new_path)
+static inline int security_sb_pivotroot(const struct path *old_path,
+					const struct path *new_path)
 {
 	return 0;
 }
@@ -1442,83 +1456,83 @@
 #endif	/* CONFIG_SECURITY_NETWORK_XFRM */
 
 #ifdef CONFIG_SECURITY_PATH
-int security_path_unlink(struct path *dir, struct dentry *dentry);
-int security_path_mkdir(struct path *dir, struct dentry *dentry, umode_t mode);
-int security_path_rmdir(struct path *dir, struct dentry *dentry);
-int security_path_mknod(struct path *dir, struct dentry *dentry, umode_t mode,
+int security_path_unlink(const struct path *dir, struct dentry *dentry);
+int security_path_mkdir(const struct path *dir, struct dentry *dentry, umode_t mode);
+int security_path_rmdir(const struct path *dir, struct dentry *dentry);
+int security_path_mknod(const struct path *dir, struct dentry *dentry, umode_t mode,
 			unsigned int dev);
-int security_path_truncate(struct path *path);
-int security_path_symlink(struct path *dir, struct dentry *dentry,
+int security_path_truncate(const struct path *path);
+int security_path_symlink(const struct path *dir, struct dentry *dentry,
 			  const char *old_name);
-int security_path_link(struct dentry *old_dentry, struct path *new_dir,
+int security_path_link(struct dentry *old_dentry, const struct path *new_dir,
 		       struct dentry *new_dentry);
-int security_path_rename(struct path *old_dir, struct dentry *old_dentry,
-			 struct path *new_dir, struct dentry *new_dentry,
+int security_path_rename(const struct path *old_dir, struct dentry *old_dentry,
+			 const struct path *new_dir, struct dentry *new_dentry,
 			 unsigned int flags);
-int security_path_chmod(struct path *path, umode_t mode);
-int security_path_chown(struct path *path, kuid_t uid, kgid_t gid);
-int security_path_chroot(struct path *path);
+int security_path_chmod(const struct path *path, umode_t mode);
+int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid);
+int security_path_chroot(const struct path *path);
 #else	/* CONFIG_SECURITY_PATH */
-static inline int security_path_unlink(struct path *dir, struct dentry *dentry)
+static inline int security_path_unlink(const struct path *dir, struct dentry *dentry)
 {
 	return 0;
 }
 
-static inline int security_path_mkdir(struct path *dir, struct dentry *dentry,
+static inline int security_path_mkdir(const struct path *dir, struct dentry *dentry,
 				      umode_t mode)
 {
 	return 0;
 }
 
-static inline int security_path_rmdir(struct path *dir, struct dentry *dentry)
+static inline int security_path_rmdir(const struct path *dir, struct dentry *dentry)
 {
 	return 0;
 }
 
-static inline int security_path_mknod(struct path *dir, struct dentry *dentry,
+static inline int security_path_mknod(const struct path *dir, struct dentry *dentry,
 				      umode_t mode, unsigned int dev)
 {
 	return 0;
 }
 
-static inline int security_path_truncate(struct path *path)
+static inline int security_path_truncate(const struct path *path)
 {
 	return 0;
 }
 
-static inline int security_path_symlink(struct path *dir, struct dentry *dentry,
+static inline int security_path_symlink(const struct path *dir, struct dentry *dentry,
 					const char *old_name)
 {
 	return 0;
 }
 
 static inline int security_path_link(struct dentry *old_dentry,
-				     struct path *new_dir,
+				     const struct path *new_dir,
 				     struct dentry *new_dentry)
 {
 	return 0;
 }
 
-static inline int security_path_rename(struct path *old_dir,
+static inline int security_path_rename(const struct path *old_dir,
 				       struct dentry *old_dentry,
-				       struct path *new_dir,
+				       const struct path *new_dir,
 				       struct dentry *new_dentry,
 				       unsigned int flags)
 {
 	return 0;
 }
 
-static inline int security_path_chmod(struct path *path, umode_t mode)
+static inline int security_path_chmod(const struct path *path, umode_t mode)
 {
 	return 0;
 }
 
-static inline int security_path_chown(struct path *path, kuid_t uid, kgid_t gid)
+static inline int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
 {
 	return 0;
 }
 
-static inline int security_path_chroot(struct path *path)
+static inline int security_path_chroot(const struct path *path)
 {
 	return 0;
 }
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index dde00de..f3d45dd 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -7,13 +7,10 @@
 #include <linux/mutex.h>
 #include <linux/cpumask.h>
 #include <linux/nodemask.h>
+#include <linux/fs.h>
+#include <linux/cred.h>
 
 struct seq_operations;
-struct file;
-struct path;
-struct inode;
-struct dentry;
-struct user_namespace;
 
 struct seq_file {
 	char *buf;
@@ -27,9 +24,7 @@
 	struct mutex lock;
 	const struct seq_operations *op;
 	int poll_event;
-#ifdef CONFIG_USER_NS
-	struct user_namespace *user_ns;
-#endif
+	const struct file *file;
 	void *private;
 };
 
@@ -147,7 +142,7 @@
 static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
 {
 #ifdef CONFIG_USER_NS
-	return seq->user_ns;
+	return seq->file->f_cred->user_ns;
 #else
 	extern struct user_namespace init_user_ns;
 	return &init_user_ns;
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 92557bb..639be26 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -28,6 +28,21 @@
 	sigset_t signal;
 };
 
+#ifndef HAVE_ARCH_COPY_SIGINFO
+
+#include <linux/string.h>
+
+static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
+{
+	if (from->si_code < 0)
+		memcpy(to, from, sizeof(*to));
+	else
+		/* _sigchld is currently the largest know union member */
+		memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
+}
+
+#endif
+
 /*
  * Define some primitives to manipulate sigset_t.
  */
@@ -432,8 +447,10 @@
 	stack_t __user *__uss = uss; \
 	struct task_struct *t = current; \
 	put_user_ex((void __user *)t->sas_ss_sp, &__uss->ss_sp); \
-	put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \
+	put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \
 	put_user_ex(t->sas_ss_size, &__uss->ss_size); \
+	if (t->sas_ss_flags & SS_AUTODISARM) \
+		sas_ss_reset(t); \
 } while (0);
 
 #ifdef CONFIG_PROC_FS
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 15d0df9..c413c58 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -382,14 +382,10 @@
 
 	/* generate software time stamp when entering packet scheduling */
 	SKBTX_SCHED_TSTAMP = 1 << 6,
-
-	/* generate software timestamp on peer data acknowledgment */
-	SKBTX_ACK_TSTAMP = 1 << 7,
 };
 
 #define SKBTX_ANY_SW_TSTAMP	(SKBTX_SW_TSTAMP    | \
-				 SKBTX_SCHED_TSTAMP | \
-				 SKBTX_ACK_TSTAMP)
+				 SKBTX_SCHED_TSTAMP)
 #define SKBTX_ANY_TSTAMP	(SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
 
 /*
@@ -465,23 +461,27 @@
 	/* This indicates the tcp segment has CWR set. */
 	SKB_GSO_TCP_ECN = 1 << 3,
 
-	SKB_GSO_TCPV6 = 1 << 4,
+	SKB_GSO_TCP_FIXEDID = 1 << 4,
 
-	SKB_GSO_FCOE = 1 << 5,
+	SKB_GSO_TCPV6 = 1 << 5,
 
-	SKB_GSO_GRE = 1 << 6,
+	SKB_GSO_FCOE = 1 << 6,
 
-	SKB_GSO_GRE_CSUM = 1 << 7,
+	SKB_GSO_GRE = 1 << 7,
 
-	SKB_GSO_IPIP = 1 << 8,
+	SKB_GSO_GRE_CSUM = 1 << 8,
 
-	SKB_GSO_SIT = 1 << 9,
+	SKB_GSO_IPIP = 1 << 9,
 
-	SKB_GSO_UDP_TUNNEL = 1 << 10,
+	SKB_GSO_SIT = 1 << 10,
 
-	SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
+	SKB_GSO_UDP_TUNNEL = 1 << 11,
 
-	SKB_GSO_TUNNEL_REMCSUM = 1 << 12,
+	SKB_GSO_UDP_TUNNEL_CSUM = 1 << 12,
+
+	SKB_GSO_PARTIAL = 1 << 13,
+
+	SKB_GSO_TUNNEL_REMCSUM = 1 << 14,
 };
 
 #if BITS_PER_LONG > 32
@@ -1325,6 +1325,16 @@
 	return dataref != 1;
 }
 
+static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
+{
+	might_sleep_if(gfpflags_allow_blocking(pri));
+
+	if (skb_header_cloned(skb))
+		return pskb_expand_head(skb, 0, 0, pri);
+
+	return 0;
+}
+
 /**
  *	skb_header_release - release reference to header
  *	@skb: buffer to operate on
@@ -2949,7 +2959,12 @@
 				 struct iov_iter *from, int len);
 int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
 void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
-void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb);
+void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
+static inline void skb_free_datagram_locked(struct sock *sk,
+					    struct sk_buff *skb)
+{
+	__skb_free_datagram_locked(sk, skb, 0);
+}
 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
@@ -2977,6 +2992,8 @@
 int skb_ensure_writable(struct sk_buff *skb, int write_len);
 int skb_vlan_pop(struct sk_buff *skb);
 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
+struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
+			     gfp_t gfp);
 
 static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
 {
@@ -3584,7 +3601,10 @@
  * Keeps track of level of encapsulation of network headers.
  */
 struct skb_gso_cb {
-	int	mac_offset;
+	union {
+		int	mac_offset;
+		int	data_offset;
+	};
 	int	encap_level;
 	__wsum	csum;
 	__u16	csum_start;
diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h
index d0cb6d1..cbb0f06 100644
--- a/include/linux/soc/qcom/smd.h
+++ b/include/linux/soc/qcom/smd.h
@@ -26,6 +26,8 @@
 	struct qcom_smd_channel *channel;
 };
 
+typedef int (*qcom_smd_cb_t)(struct qcom_smd_channel *, const void *, size_t);
+
 /**
  * struct qcom_smd_driver - smd driver struct
  * @driver:	underlying device driver
@@ -42,16 +44,71 @@
 
 	int (*probe)(struct qcom_smd_device *dev);
 	void (*remove)(struct qcom_smd_device *dev);
-	int (*callback)(struct qcom_smd_device *, const void *, size_t);
+	qcom_smd_cb_t callback;
 };
 
+#if IS_ENABLED(CONFIG_QCOM_SMD)
+
 int qcom_smd_driver_register(struct qcom_smd_driver *drv);
 void qcom_smd_driver_unregister(struct qcom_smd_driver *drv);
 
+struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *channel,
+					       const char *name,
+					       qcom_smd_cb_t cb);
+void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel);
+void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data);
+int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len);
+
+
+#else
+
+static inline int qcom_smd_driver_register(struct qcom_smd_driver *drv)
+{
+	return -ENXIO;
+}
+
+static inline void qcom_smd_driver_unregister(struct qcom_smd_driver *drv)
+{
+	/* This shouldn't be possible */
+	WARN_ON(1);
+}
+
+static inline struct qcom_smd_channel *
+qcom_smd_open_channel(struct qcom_smd_channel *channel,
+		      const char *name,
+		      qcom_smd_cb_t cb)
+{
+	/* This shouldn't be possible */
+	WARN_ON(1);
+	return NULL;
+}
+
+void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel)
+{
+	/* This shouldn't be possible */
+	WARN_ON(1);
+	return NULL;
+}
+
+void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data)
+{
+	/* This shouldn't be possible */
+	WARN_ON(1);
+}
+
+static inline int qcom_smd_send(struct qcom_smd_channel *channel,
+				const void *data, int len)
+{
+	/* This shouldn't be possible */
+	WARN_ON(1);
+	return -ENXIO;
+}
+
+#endif
+
 #define module_qcom_smd_driver(__smd_driver) \
 	module_driver(__smd_driver, qcom_smd_driver_register, \
 		      qcom_smd_driver_unregister)
 
-int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len);
 
 #endif
diff --git a/include/linux/soc/qcom/smem_state.h b/include/linux/soc/qcom/smem_state.h
index f35e151..7b88697 100644
--- a/include/linux/soc/qcom/smem_state.h
+++ b/include/linux/soc/qcom/smem_state.h
@@ -1,12 +1,17 @@
 #ifndef __QCOM_SMEM_STATE__
 #define __QCOM_SMEM_STATE__
 
+#include <linux/errno.h>
+
+struct device_node;
 struct qcom_smem_state;
 
 struct qcom_smem_state_ops {
 	int (*update_bits)(void *, u32, u32);
 };
 
+#ifdef CONFIG_QCOM_SMEM_STATE
+
 struct qcom_smem_state *qcom_smem_state_get(struct device *dev, const char *con_id, unsigned *bit);
 void qcom_smem_state_put(struct qcom_smem_state *);
 
@@ -15,4 +20,34 @@
 struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node, const struct qcom_smem_state_ops *ops, void *data);
 void qcom_smem_state_unregister(struct qcom_smem_state *state);
 
+#else
+
+static inline struct qcom_smem_state *qcom_smem_state_get(struct device *dev,
+	const char *con_id, unsigned *bit)
+{
+	return ERR_PTR(-EINVAL);
+}
+
+static inline void qcom_smem_state_put(struct qcom_smem_state *state)
+{
+}
+
+static inline int qcom_smem_state_update_bits(struct qcom_smem_state *state,
+	u32 mask, u32 value)
+{
+	return -EINVAL;
+}
+
+static inline struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node,
+	const struct qcom_smem_state_ops *ops, void *data)
+{
+	return ERR_PTR(-EINVAL);
+}
+
+static inline void qcom_smem_state_unregister(struct qcom_smem_state *state)
+{
+}
+
+#endif
+
 #endif
diff --git a/include/linux/soc/renesas/rcar-sysc.h b/include/linux/soc/renesas/rcar-sysc.h
new file mode 100644
index 0000000..92fc613
--- /dev/null
+++ b/include/linux/soc/renesas/rcar-sysc.h
@@ -0,0 +1,16 @@
+#ifndef __LINUX_SOC_RENESAS_RCAR_SYSC_H__
+#define __LINUX_SOC_RENESAS_RCAR_SYSC_H__
+
+#include <linux/types.h>
+
+struct rcar_sysc_ch {
+	u16 chan_offs;
+	u8 chan_bit;
+	u8 isr_bit;
+};
+
+int rcar_sysc_power_down(const struct rcar_sysc_ch *sysc_ch);
+int rcar_sysc_power_up(const struct rcar_sysc_ch *sysc_ch);
+void __iomem *rcar_sysc_init(phys_addr_t base);
+
+#endif /* __LINUX_SOC_RENESAS_RCAR_SYSC_H__ */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 73bf6c6..b5cc5a6 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -201,8 +201,9 @@
 #define AF_NFC		39	/* NFC sockets			*/
 #define AF_VSOCK	40	/* vSockets			*/
 #define AF_KCM		41	/* Kernel Connection Multiplexor*/
+#define AF_QIPCRTR	42	/* Qualcomm IPC Router          */
 
-#define AF_MAX		42	/* For now.. */
+#define AF_MAX		43	/* For now.. */
 
 /* Protocol families, same as address families. */
 #define PF_UNSPEC	AF_UNSPEC
@@ -249,6 +250,7 @@
 #define PF_NFC		AF_NFC
 #define PF_VSOCK	AF_VSOCK
 #define PF_KCM		AF_KCM
+#define PF_QIPCRTR	AF_QIPCRTR
 #define PF_MAX		AF_MAX
 
 /* Maximum queue length specifiable by listen.  */
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 4bcf5a6..ffdaca9 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -108,7 +108,6 @@
 };
 
 struct plat_stmmacenet_data {
-	char *phy_bus_name;
 	int bus_id;
 	int phy_addr;
 	int interface;
@@ -138,5 +137,7 @@
 	void (*exit)(struct platform_device *pdev, void *priv);
 	void *bsp_priv;
 	struct stmmac_axi *axi;
+	int has_gmac4;
+	bool tso_en;
 };
 #endif
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index dabe643..5ce9538 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -3,6 +3,8 @@
 
 #include <linux/types.h>
 
+struct file;
+
 /* Descriptions of the types of units to
  * print in */
 enum string_size_units {
@@ -68,4 +70,8 @@
 	return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, only);
 }
 
+char *kstrdup_quotable(const char *src, gfp_t gfp);
+char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp);
+char *kstrdup_quotable_file(struct file *file, gfp_t gfp);
+
 #endif
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index cc0fc71..7ca44fb 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -129,7 +129,7 @@
  *
  * These happen to all be powers of 2, which is not strictly
  * necessary but helps enforce the real limitation, which is
- * that they should be multiples of PAGE_CACHE_SIZE.
+ * that they should be multiples of PAGE_SIZE.
  *
  * For UDP transports, a block plus NFS,RPC, and UDP headers
  * has to fit into the IP datagram limit of 64K.  The largest
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d18b65c..ad22035 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -418,7 +418,7 @@
 extern int page_swapcount(struct page *);
 extern int swp_swapcount(swp_entry_t entry);
 extern struct swap_info_struct *page_swap_info(struct page *);
-extern int reuse_swap_page(struct page *);
+extern bool reuse_swap_page(struct page *, int *);
 extern int try_to_free_swap(struct page *);
 struct backing_dev_info;
 
@@ -433,9 +433,9 @@
 #define si_swapinfo(val) \
 	do { (val)->freeswap = (val)->totalswap = 0; } while (0)
 /* only sparc can not include linux/pagemap.h in this file
- * so leave page_cache_release and release_pages undeclared... */
+ * so leave put_page and release_pages undeclared... */
 #define free_page_and_swap_cache(page) \
-	page_cache_release(page)
+	put_page(page)
 #define free_pages_and_swap_cache(pages, nr) \
 	release_pages((pages), (nr), false);
 
@@ -513,8 +513,8 @@
 	return 0;
 }
 
-#define reuse_swap_page(page) \
-	(!PageTransCompound(page) && page_mapcount(page) == 1)
+#define reuse_swap_page(page, total_mapcount) \
+	(page_trans_huge_mapcount(page, total_mapcount) == 1)
 
 static inline int try_to_free_swap(struct page *page)
 {
@@ -533,6 +533,10 @@
 #ifdef CONFIG_MEMCG
 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
 {
+	/* Cgroup2 doesn't have per-cgroup swappiness */
+	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
+		return vm_swappiness;
+
 	/* root ? */
 	if (mem_cgroup_disabled() || !memcg->css.parent)
 		return vm_swappiness;
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index a55d052..1b8a5a7 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -352,8 +352,8 @@
 
 struct thermal_trip {
 	struct device_node *np;
-	unsigned long int temperature;
-	unsigned long int hysteresis;
+	int temperature;
+	int hysteresis;
 	enum thermal_trip_type type;
 };
 
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 96f37be..37dbacf 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -1,6 +1,8 @@
 #ifndef _LINUX_TIMEKEEPING_H
 #define _LINUX_TIMEKEEPING_H
 
+#include <asm-generic/errno-base.h>
+
 /* Included from linux/ktime.h */
 
 void timekeeping_init(void);
@@ -11,8 +13,19 @@
  */
 extern void do_gettimeofday(struct timeval *tv);
 extern int do_settimeofday64(const struct timespec64 *ts);
-extern int do_sys_settimeofday(const struct timespec *tv,
-			       const struct timezone *tz);
+extern int do_sys_settimeofday64(const struct timespec64 *tv,
+				 const struct timezone *tz);
+static inline int do_sys_settimeofday(const struct timespec *tv,
+				      const struct timezone *tz)
+{
+	struct timespec64 ts64;
+
+	if (!tv)
+		return -EINVAL;
+
+	ts64 = timespec_to_timespec64(*tv);
+	return do_sys_settimeofday64(&ts64, tz);
+}
 
 /*
  * Kernel time accessors
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 0810f81b..be00761 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -154,21 +154,6 @@
 				struct trace_event_file *trace_file,
 				int type, unsigned long len,
 				unsigned long flags, int pc);
-struct ring_buffer_event *
-trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer,
-				  int type, unsigned long len,
-				  unsigned long flags, int pc);
-void trace_buffer_unlock_commit(struct trace_array *tr,
-				struct ring_buffer *buffer,
-				struct ring_buffer_event *event,
-				unsigned long flags, int pc);
-void trace_buffer_unlock_commit_regs(struct trace_array *tr,
-				     struct ring_buffer *buffer,
-				     struct ring_buffer_event *event,
-				     unsigned long flags, int pc,
-				     struct pt_regs *regs);
-void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
-					 struct ring_buffer_event *event);
 
 void tracing_record_cmdline(struct task_struct *tsk);
 
@@ -229,7 +214,6 @@
 	TRACE_EVENT_FL_NO_SET_FILTER_BIT,
 	TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
 	TRACE_EVENT_FL_WAS_ENABLED_BIT,
-	TRACE_EVENT_FL_USE_CALL_FILTER_BIT,
 	TRACE_EVENT_FL_TRACEPOINT_BIT,
 	TRACE_EVENT_FL_KPROBE_BIT,
 	TRACE_EVENT_FL_UPROBE_BIT,
@@ -244,7 +228,6 @@
  *  WAS_ENABLED   - Set and stays set when an event was ever enabled
  *                    (used for module unloading, if a module event is enabled,
  *                     it is best to clear the buffers that used it).
- *  USE_CALL_FILTER - For trace internal events, don't use file filter
  *  TRACEPOINT    - Event is a tracepoint
  *  KPROBE        - Event is a kprobe
  *  UPROBE        - Event is a uprobe
@@ -255,7 +238,6 @@
 	TRACE_EVENT_FL_NO_SET_FILTER	= (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
 	TRACE_EVENT_FL_IGNORE_ENABLE	= (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
 	TRACE_EVENT_FL_WAS_ENABLED	= (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT),
-	TRACE_EVENT_FL_USE_CALL_FILTER	= (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT),
 	TRACE_EVENT_FL_TRACEPOINT	= (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
 	TRACE_EVENT_FL_KPROBE		= (1 << TRACE_EVENT_FL_KPROBE_BIT),
 	TRACE_EVENT_FL_UPROBE		= (1 << TRACE_EVENT_FL_UPROBE_BIT),
@@ -407,16 +389,12 @@
 	ETT_SNAPSHOT		= (1 << 1),
 	ETT_STACKTRACE		= (1 << 2),
 	ETT_EVENT_ENABLE	= (1 << 3),
+	ETT_EVENT_HIST		= (1 << 4),
+	ETT_HIST_ENABLE		= (1 << 5),
 };
 
 extern int filter_match_preds(struct event_filter *filter, void *rec);
 
-extern int filter_check_discard(struct trace_event_file *file, void *rec,
-				struct ring_buffer *buffer,
-				struct ring_buffer_event *event);
-extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
-				     struct ring_buffer *buffer,
-				     struct ring_buffer_event *event);
 extern enum event_trigger_type event_triggers_call(struct trace_event_file *file,
 						   void *rec);
 extern void event_triggers_post_call(struct trace_event_file *file,
@@ -450,100 +428,6 @@
 	return false;
 }
 
-/*
- * Helper function for event_trigger_unlock_commit{_regs}().
- * If there are event triggers attached to this event that requires
- * filtering against its fields, then they wil be called as the
- * entry already holds the field information of the current event.
- *
- * It also checks if the event should be discarded or not.
- * It is to be discarded if the event is soft disabled and the
- * event was only recorded to process triggers, or if the event
- * filter is active and this event did not match the filters.
- *
- * Returns true if the event is discarded, false otherwise.
- */
-static inline bool
-__event_trigger_test_discard(struct trace_event_file *file,
-			     struct ring_buffer *buffer,
-			     struct ring_buffer_event *event,
-			     void *entry,
-			     enum event_trigger_type *tt)
-{
-	unsigned long eflags = file->flags;
-
-	if (eflags & EVENT_FILE_FL_TRIGGER_COND)
-		*tt = event_triggers_call(file, entry);
-
-	if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags))
-		ring_buffer_discard_commit(buffer, event);
-	else if (!filter_check_discard(file, entry, buffer, event))
-		return false;
-
-	return true;
-}
-
-/**
- * event_trigger_unlock_commit - handle triggers and finish event commit
- * @file: The file pointer assoctiated to the event
- * @buffer: The ring buffer that the event is being written to
- * @event: The event meta data in the ring buffer
- * @entry: The event itself
- * @irq_flags: The state of the interrupts at the start of the event
- * @pc: The state of the preempt count at the start of the event.
- *
- * This is a helper function to handle triggers that require data
- * from the event itself. It also tests the event against filters and
- * if the event is soft disabled and should be discarded.
- */
-static inline void
-event_trigger_unlock_commit(struct trace_event_file *file,
-			    struct ring_buffer *buffer,
-			    struct ring_buffer_event *event,
-			    void *entry, unsigned long irq_flags, int pc)
-{
-	enum event_trigger_type tt = ETT_NONE;
-
-	if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
-		trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
-
-	if (tt)
-		event_triggers_post_call(file, tt, entry);
-}
-
-/**
- * event_trigger_unlock_commit_regs - handle triggers and finish event commit
- * @file: The file pointer assoctiated to the event
- * @buffer: The ring buffer that the event is being written to
- * @event: The event meta data in the ring buffer
- * @entry: The event itself
- * @irq_flags: The state of the interrupts at the start of the event
- * @pc: The state of the preempt count at the start of the event.
- *
- * This is a helper function to handle triggers that require data
- * from the event itself. It also tests the event against filters and
- * if the event is soft disabled and should be discarded.
- *
- * Same as event_trigger_unlock_commit() but calls
- * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
- */
-static inline void
-event_trigger_unlock_commit_regs(struct trace_event_file *file,
-				 struct ring_buffer *buffer,
-				 struct ring_buffer_event *event,
-				 void *entry, unsigned long irq_flags, int pc,
-				 struct pt_regs *regs)
-{
-	enum event_trigger_type tt = ETT_NONE;
-
-	if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
-		trace_buffer_unlock_commit_regs(file->tr, buffer, event,
-						irq_flags, pc, regs);
-
-	if (tt)
-		event_triggers_post_call(file, tt, entry);
-}
-
 #ifdef CONFIG_BPF_EVENTS
 unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx);
 #else
@@ -569,6 +453,7 @@
 			      int is_signed, int filter_type);
 extern int trace_add_event_call(struct trace_event_call *call);
 extern int trace_remove_event_call(struct trace_event_call *call);
+extern int trace_event_get_offsets(struct trace_event_call *call);
 
 #define is_signed_type(type)	(((type)(-1)) < (type)1)
 
@@ -605,15 +490,20 @@
 extern int  ftrace_profile_set_filter(struct perf_event *event, int event_id,
 				     char *filter_str);
 extern void ftrace_profile_free_filter(struct perf_event *event);
-extern void *perf_trace_buf_prepare(int size, unsigned short type,
-				    struct pt_regs **regs, int *rctxp);
+void perf_trace_buf_update(void *record, u16 type);
+void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
+
+void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
+			       struct trace_event_call *call, u64 count,
+			       struct pt_regs *regs, struct hlist_head *head,
+			       struct task_struct *task);
 
 static inline void
-perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
+perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
 		       u64 count, struct pt_regs *regs, void *head,
 		       struct task_struct *task)
 {
-	perf_tp_event(addr, count, raw_data, size, regs, head, rctx, task);
+	perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
 }
 #endif
 
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 3b09f23..17b247c 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -371,6 +371,7 @@
 extern struct tty_struct *get_current_tty(void);
 /* tty_io.c */
 extern int __init tty_init(void);
+extern const char *tty_name(const struct tty_struct *tty);
 #else
 static inline void console_init(void)
 { }
@@ -391,6 +392,8 @@
 /* tty_io.c */
 static inline int __init tty_init(void)
 { return 0; }
+static inline const char *tty_name(const struct tty_struct *tty)
+{ return "(none)"; }
 #endif
 
 extern struct ktermios tty_std_termios;
@@ -415,7 +418,6 @@
 	return tty;
 }
 
-extern const char *tty_name(const struct tty_struct *tty);
 extern const char *tty_driver_name(const struct tty_struct *tty);
 extern void tty_wait_until_sent(struct tty_struct *tty, long timeout);
 extern int __tty_check_change(struct tty_struct *tty, int sig);
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 1610524..b742b5e 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -7,7 +7,7 @@
  * defined; unless noted otherwise, they are optional, and can be
  * filled in with a null pointer.
  *
- * struct tty_struct * (*lookup)(struct tty_driver *self, int idx)
+ * struct tty_struct * (*lookup)(struct tty_driver *self, struct file *, int idx)
  *
  *	Return the tty device corresponding to idx, NULL if there is not
  *	one currently in use and an ERR_PTR value on error. Called under
@@ -250,7 +250,7 @@
 
 struct tty_operations {
 	struct tty_struct * (*lookup)(struct tty_driver *driver,
-			struct inode *inode, int idx);
+			struct file *filp, int idx);
 	int  (*install)(struct tty_driver *driver, struct tty_struct *tty);
 	void (*remove)(struct tty_driver *driver, struct tty_struct *tty);
 	int  (*open)(struct tty_struct * tty, struct file * filp);
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index df89c9bc..d3a2bb7 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -89,6 +89,20 @@
 #endif
 }
 
+static inline void u64_stats_update_begin_raw(struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+	raw_write_seqcount_begin(&syncp->seq);
+#endif
+}
+
+static inline void u64_stats_update_end_raw(struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+	raw_write_seqcount_end(&syncp->seq);
+#endif
+}
+
 static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 87c0949..d1fd8cd 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -71,6 +71,14 @@
 	 */
 	int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
 	void (*encap_destroy)(struct sock *sk);
+
+	/* GRO functions for UDP socket */
+	struct sk_buff **	(*gro_receive)(struct sock *sk,
+					       struct sk_buff **head,
+					       struct sk_buff *skb);
+	int			(*gro_complete)(struct sock *sk,
+						struct sk_buff *skb,
+						int nhoff);
 };
 
 static inline struct udp_sock *udp_sk(const struct sock *sk)
@@ -98,11 +106,11 @@
 	return udp_sk(sk)->no_check6_rx;
 }
 
-#define udp_portaddr_for_each_entry(__sk, node, list) \
-	hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node)
+#define udp_portaddr_for_each_entry(__sk, list) \
+	hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node)
 
-#define udp_portaddr_for_each_entry_rcu(__sk, node, list) \
-	hlist_nulls_for_each_entry_rcu(__sk, node, list, __sk_common.skc_portaddr_node)
+#define udp_portaddr_for_each_entry_rcu(__sk, list) \
+	hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node)
 
 #define IS_UDPLITE(__sk) (udp_sk(__sk)->pcflag)
 
diff --git a/include/linux/uio.h b/include/linux/uio.h
index fd9bcfe..1b5d1cd 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -87,6 +87,7 @@
 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
 unsigned long iov_iter_alignment(const struct iov_iter *i);
+unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
 void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
 			unsigned long nr_segs, size_t count);
 void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec,
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 7f5f78b..245f57d 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -79,6 +79,8 @@
 		/* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */	\
 	US_FLAG(MAX_SECTORS_240,	0x08000000)		\
 		/* Sets max_sectors to 240 */			\
+	US_FLAG(NO_REPORT_LUNS,	0x10000000)			\
+		/* Cannot handle REPORT_LUNS */			\
 
 #define US_FLAG(name, value)	US_FL_##name = value ,
 enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/verification.h b/include/linux/verification.h
new file mode 100644
index 0000000..a10549a
--- /dev/null
+++ b/include/linux/verification.h
@@ -0,0 +1,49 @@
+/* Signature verification
+ *
+ * Copyright (C) 2014 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_VERIFICATION_H
+#define _LINUX_VERIFICATION_H
+
+/*
+ * The use to which an asymmetric key is being put.
+ */
+enum key_being_used_for {
+	VERIFYING_MODULE_SIGNATURE,
+	VERIFYING_FIRMWARE_SIGNATURE,
+	VERIFYING_KEXEC_PE_SIGNATURE,
+	VERIFYING_KEY_SIGNATURE,
+	VERIFYING_KEY_SELF_SIGNATURE,
+	VERIFYING_UNSPECIFIED_SIGNATURE,
+	NR__KEY_BEING_USED_FOR
+};
+extern const char *const key_being_used_for[NR__KEY_BEING_USED_FOR];
+
+#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
+
+struct key;
+
+extern int verify_pkcs7_signature(const void *data, size_t len,
+				  const void *raw_pkcs7, size_t pkcs7_len,
+				  struct key *trusted_keys,
+				  enum key_being_used_for usage,
+				  int (*view_content)(void *ctx,
+						      const void *data, size_t len,
+						      size_t asn1hdrlen),
+				  void *ctx);
+
+#ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION
+extern int verify_pefile_signature(const void *pebuf, unsigned pelen,
+				   struct key *trusted_keys,
+				   enum key_being_used_for usage);
+#endif
+
+#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
+#endif /* _LINUX_VERIFY_PEFILE_H */
diff --git a/include/linux/verify_pefile.h b/include/linux/verify_pefile.h
deleted file mode 100644
index da2049b..0000000
--- a/include/linux/verify_pefile.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* Signed PE file verification
- *
- * Copyright (C) 2014 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#ifndef _LINUX_VERIFY_PEFILE_H
-#define _LINUX_VERIFY_PEFILE_H
-
-#include <crypto/public_key.h>
-
-extern int verify_pefile_signature(const void *pebuf, unsigned pelen,
-				   struct key *trusted_keyring,
-				   enum key_being_used_for usage,
-				   bool *_trusted);
-
-#endif /* _LINUX_VERIFY_PEFILE_H */
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 4457541..1cc4c57 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -30,7 +30,8 @@
 	int flags;      /* fs private flags */
 	bool (*list)(struct dentry *dentry);
 	int (*get)(const struct xattr_handler *, struct dentry *dentry,
-		   const char *name, void *buffer, size_t size);
+		   struct inode *inode, const char *name, void *buffer,
+		   size_t size);
 	int (*set)(const struct xattr_handler *, struct dentry *dentry,
 		   const char *name, const void *buffer, size_t size,
 		   int flags);
@@ -51,7 +52,7 @@
 int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int);
 int vfs_removexattr(struct dentry *, const char *);
 
-ssize_t generic_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size);
+ssize_t generic_getxattr(struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size);
 ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
 int generic_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags);
 int generic_removexattr(struct dentry *dentry, const char *name);
diff --git a/include/media/media-device.h b/include/media/media-device.h
index df74cfa..a9b33c4 100644
--- a/include/media/media-device.h
+++ b/include/media/media-device.h
@@ -25,7 +25,6 @@
 
 #include <linux/list.h>
 #include <linux/mutex.h>
-#include <linux/spinlock.h>
 
 #include <media/media-devnode.h>
 #include <media/media-entity.h>
@@ -304,8 +303,7 @@
  * @pads:	List of registered pads
  * @links:	List of registered links
  * @entity_notify: List of registered entity_notify callbacks
- * @lock:	Entities list lock
- * @graph_mutex: Entities graph operation lock
+ * @graph_mutex: Protects access to struct media_device data
  * @pm_count_walk: Graph walk for power state walk. Access serialised using
  *		   graph_mutex.
  *
@@ -313,7 +311,8 @@
  * @enable_source: Enable Source Handler function pointer
  * @disable_source: Disable Source Handler function pointer
  *
- * @link_notify: Link state change notification callback
+ * @link_notify: Link state change notification callback. This callback is
+ *		 called with the graph_mutex held.
  *
  * This structure represents an abstract high-level media device. It allows easy
  * access to entities and provides basic media device-level support. The
@@ -357,7 +356,7 @@
 	u32 hw_revision;
 	u32 driver_version;
 
-	u32 topology_version;
+	u64 topology_version;
 
 	u32 id;
 	struct ida entity_internal_idx;
@@ -371,8 +370,6 @@
 	/* notify callback list invoked when a new entity is registered */
 	struct list_head entity_notify;
 
-	/* Protects the graph objects creation/removal */
-	spinlock_t lock;
 	/* Serializes graph operations. */
 	struct mutex graph_mutex;
 	struct media_entity_graph pm_count_walk;
@@ -494,7 +491,7 @@
 #define media_device_register(mdev) __media_device_register(mdev, THIS_MODULE)
 
 /**
- * __media_device_unregister() - Unegisters a media device element
+ * media_device_unregister() - Unregisters a media device element
  *
  * @mdev:	pointer to struct &media_device
  *
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index 6dc9e4e..cbb266f 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -179,6 +179,9 @@
  * @link_validate:	Return whether a link is valid from the entity point of
  *			view. The media_entity_pipeline_start() function
  *			validates all links by calling this operation. Optional.
+ *
+ * Note: Those these callbacks are called with struct media_device.@graph_mutex
+ * mutex held.
  */
 struct media_entity_operations {
 	int (*link_setup)(struct media_entity *entity,
@@ -188,10 +191,38 @@
 };
 
 /**
+ * enum media_entity_type - Media entity type
+ *
+ * @MEDIA_ENTITY_TYPE_BASE:
+ *	The entity isn't embedded in another subsystem structure.
+ * @MEDIA_ENTITY_TYPE_VIDEO_DEVICE:
+ *	The entity is embedded in a struct video_device instance.
+ * @MEDIA_ENTITY_TYPE_V4L2_SUBDEV:
+ *	The entity is embedded in a struct v4l2_subdev instance.
+ *
+ * Media entity objects are often not instantiated directly, but the media
+ * entity structure is inherited by (through embedding) other subsystem-specific
+ * structures. The media entity type identifies the type of the subclass
+ * structure that implements a media entity instance.
+ *
+ * This allows runtime type identification of media entities and safe casting to
+ * the correct object type. For instance, a media entity structure instance
+ * embedded in a v4l2_subdev structure instance will have the type
+ * MEDIA_ENTITY_TYPE_V4L2_SUBDEV and can safely be cast to a v4l2_subdev
+ * structure using the container_of() macro.
+ */
+enum media_entity_type {
+	MEDIA_ENTITY_TYPE_BASE,
+	MEDIA_ENTITY_TYPE_VIDEO_DEVICE,
+	MEDIA_ENTITY_TYPE_V4L2_SUBDEV,
+};
+
+/**
  * struct media_entity - A media entity graph object.
  *
  * @graph_obj:	Embedded structure containing the media object common data.
  * @name:	Entity name.
+ * @obj_type:	Type of the object that implements the media_entity.
  * @function:	Entity main function, as defined in uapi/media.h
  *		(MEDIA_ENT_F_*)
  * @flags:	Entity flags, as defined in uapi/media.h (MEDIA_ENT_FL_*)
@@ -220,6 +251,7 @@
 struct media_entity {
 	struct media_gobj graph_obj;	/* must be first field in struct */
 	const char *name;
+	enum media_entity_type obj_type;
 	u32 function;
 	unsigned long flags;
 
@@ -329,56 +361,29 @@
 }
 
 /**
- * is_media_entity_v4l2_io() - identify if the entity main function
- *			       is a V4L2 I/O
- *
+ * is_media_entity_v4l2_video_device() - Check if the entity is a video_device
  * @entity:	pointer to entity
  *
- * Return: true if the entity main function is one of the V4L2 I/O types
- *	(video, VBI or SDR radio); false otherwise.
+ * Return: true if the entity is an instance of a video_device object and can
+ * safely be cast to a struct video_device using the container_of() macro, or
+ * false otherwise.
  */
-static inline bool is_media_entity_v4l2_io(struct media_entity *entity)
+static inline bool is_media_entity_v4l2_video_device(struct media_entity *entity)
 {
-	if (!entity)
-		return false;
-
-	switch (entity->function) {
-	case MEDIA_ENT_F_IO_V4L:
-	case MEDIA_ENT_F_IO_VBI:
-	case MEDIA_ENT_F_IO_SWRADIO:
-		return true;
-	default:
-		return false;
-	}
+	return entity && entity->obj_type == MEDIA_ENTITY_TYPE_VIDEO_DEVICE;
 }
 
 /**
- * is_media_entity_v4l2_subdev - return true if the entity main function is
- *				 associated with the V4L2 API subdev usage
- *
+ * is_media_entity_v4l2_subdev() - Check if the entity is a v4l2_subdev
  * @entity:	pointer to entity
  *
- * This is an ancillary function used by subdev-based V4L2 drivers.
- * It checks if the entity function is one of functions used by a V4L2 subdev,
- * e. g. camera-relatef functions, analog TV decoder, TV tuner, V4L2 DSPs.
+ * Return: true if the entity is an instance of a v4l2_subdev object and can
+ * safely be cast to a struct v4l2_subdev using the container_of() macro, or
+ * false otherwise.
  */
 static inline bool is_media_entity_v4l2_subdev(struct media_entity *entity)
 {
-	if (!entity)
-		return false;
-
-	switch (entity->function) {
-	case MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN:
-	case MEDIA_ENT_F_CAM_SENSOR:
-	case MEDIA_ENT_F_FLASH:
-	case MEDIA_ENT_F_LENS:
-	case MEDIA_ENT_F_ATV_DECODER:
-	case MEDIA_ENT_F_TUNER:
-		return true;
-
-	default:
-		return false;
-	}
+	return entity && entity->obj_type == MEDIA_ENTITY_TYPE_V4L2_SUBDEV;
 }
 
 /**
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 0f77b3d..b6586a9 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -215,12 +215,9 @@
 struct ir_raw_event {
 	union {
 		u32             duration;
-
-		struct {
-			u32     carrier;
-			u8      duty_cycle;
-		};
+		u32             carrier;
 	};
+	u8                      duty_cycle;
 
 	unsigned                pulse:1;
 	unsigned                reset:1;
@@ -228,13 +225,7 @@
 	unsigned                carrier_report:1;
 };
 
-#define DEFINE_IR_RAW_EVENT(event) \
-	struct ir_raw_event event = { \
-		{ .duration = 0 } , \
-		.pulse = 0, \
-		.reset = 0, \
-		.timeout = 0, \
-		.carrier_report = 0 }
+#define DEFINE_IR_RAW_EVENT(event) struct ir_raw_event event = {}
 
 static inline void init_ir_raw_event(struct ir_raw_event *ev)
 {
@@ -256,8 +247,7 @@
 
 static inline void ir_raw_event_reset(struct rc_dev *dev)
 {
-	DEFINE_IR_RAW_EVENT(ev);
-	ev.reset = true;
+	struct ir_raw_event ev = { .reset = true };
 
 	ir_raw_event_store(dev, &ev);
 	ir_raw_event_handle(dev);
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 76056ab..25a3190 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -92,6 +92,9 @@
 	/* device ops */
 	const struct v4l2_file_operations *fops;
 
+	/* device capabilities as used in v4l2_capabilities */
+	u32 device_caps;
+
 	/* sysfs */
 	struct device dev;		/* v4l device */
 	struct cdev *cdev;		/* character device */
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index 9c58157..d5d45a8 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -196,11 +196,64 @@
 			##args);					\
 })
 
-#define v4l2_device_has_op(v4l2_dev, o, f)				\
+/*
+ * Call the specified callback for all subdevs where grp_id & grpmsk != 0
+ * (if grpmsk == `0, then match them all). Ignore any errors. Note that you
+ * cannot add or delete a subdev while walking the subdevs list.
+ */
+#define v4l2_device_mask_call_all(v4l2_dev, grpmsk, o, f, args...)	\
+	do {								\
+		struct v4l2_subdev *__sd;				\
+									\
+		__v4l2_device_call_subdevs_p(v4l2_dev, __sd,		\
+			!(grpmsk) || (__sd->grp_id & (grpmsk)), o, f ,	\
+			##args);					\
+	} while (0)
+
+/*
+ * Call the specified callback for all subdevs where grp_id & grpmsk != 0
+ * (if grpmsk == `0, then match them all). If the callback returns an error
+ * other than 0 or -ENOIOCTLCMD, then return with that error code. Note that
+ * you cannot add or delete a subdev while walking the subdevs list.
+ */
+#define v4l2_device_mask_call_until_err(v4l2_dev, grpmsk, o, f, args...) \
+({									\
+	struct v4l2_subdev *__sd;					\
+	__v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd,		\
+			!(grpmsk) || (__sd->grp_id & (grpmsk)), o, f ,	\
+			##args);					\
+})
+
+/*
+ * Does any subdev with matching grpid (or all if grpid == 0) has the given
+ * op?
+ */
+#define v4l2_device_has_op(v4l2_dev, grpid, o, f)			\
 ({									\
 	struct v4l2_subdev *__sd;					\
 	bool __result = false;						\
 	list_for_each_entry(__sd, &(v4l2_dev)->subdevs, list) {		\
+		if ((grpid) && __sd->grp_id != (grpid))			\
+			continue;					\
+		if (v4l2_subdev_has_op(__sd, o, f)) {			\
+			__result = true;				\
+			break;						\
+		}							\
+	}								\
+	__result;							\
+})
+
+/*
+ * Does any subdev with matching grpmsk (or all if grpmsk == 0) has the given
+ * op?
+ */
+#define v4l2_device_mask_has_op(v4l2_dev, grpmsk, o, f)			\
+({									\
+	struct v4l2_subdev *__sd;					\
+	bool __result = false;						\
+	list_for_each_entry(__sd, &(v4l2_dev)->subdevs, list) {		\
+		if ((grpmsk) && !(__sd->grp_id & (grpmsk)))		\
+			continue;					\
 		if (v4l2_subdev_has_op(__sd, o, f)) {			\
 			__result = true;				\
 			break;						\
diff --git a/include/media/v4l2-rect.h b/include/media/v4l2-rect.h
new file mode 100644
index 0000000..d2125f0
--- /dev/null
+++ b/include/media/v4l2-rect.h
@@ -0,0 +1,173 @@
+/*
+ * v4l2-rect.h - v4l2_rect helper functions
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _V4L2_RECT_H_
+#define _V4L2_RECT_H_
+
+#include <linux/videodev2.h>
+
+/**
+ * v4l2_rect_set_size_to() - copy the width/height values.
+ * @r: rect whose width and height fields will be set
+ * @size: rect containing the width and height fields you need.
+ */
+static inline void v4l2_rect_set_size_to(struct v4l2_rect *r,
+					 const struct v4l2_rect *size)
+{
+	r->width = size->width;
+	r->height = size->height;
+}
+
+/**
+ * v4l2_rect_set_min_size() - width and height of r should be >= min_size.
+ * @r: rect whose width and height will be modified
+ * @min_size: rect containing the minimal width and height
+ */
+static inline void v4l2_rect_set_min_size(struct v4l2_rect *r,
+					  const struct v4l2_rect *min_size)
+{
+	if (r->width < min_size->width)
+		r->width = min_size->width;
+	if (r->height < min_size->height)
+		r->height = min_size->height;
+}
+
+/**
+ * v4l2_rect_set_max_size() - width and height of r should be <= max_size
+ * @r: rect whose width and height will be modified
+ * @max_size: rect containing the maximum width and height
+ */
+static inline void v4l2_rect_set_max_size(struct v4l2_rect *r,
+					  const struct v4l2_rect *max_size)
+{
+	if (r->width > max_size->width)
+		r->width = max_size->width;
+	if (r->height > max_size->height)
+		r->height = max_size->height;
+}
+
+/**
+ * v4l2_rect_map_inside()- r should be inside boundary.
+ * @r: rect that will be modified
+ * @boundary: rect containing the boundary for @r
+ */
+static inline void v4l2_rect_map_inside(struct v4l2_rect *r,
+					const struct v4l2_rect *boundary)
+{
+	v4l2_rect_set_max_size(r, boundary);
+	if (r->left < boundary->left)
+		r->left = boundary->left;
+	if (r->top < boundary->top)
+		r->top = boundary->top;
+	if (r->left + r->width > boundary->width)
+		r->left = boundary->width - r->width;
+	if (r->top + r->height > boundary->height)
+		r->top = boundary->height - r->height;
+}
+
+/**
+ * v4l2_rect_same_size() - return true if r1 has the same size as r2
+ * @r1: rectangle.
+ * @r2: rectangle.
+ *
+ * Return true if both rectangles have the same size.
+ */
+static inline bool v4l2_rect_same_size(const struct v4l2_rect *r1,
+				       const struct v4l2_rect *r2)
+{
+	return r1->width == r2->width && r1->height == r2->height;
+}
+
+/**
+ * v4l2_rect_intersect() - calculate the intersection of two rects.
+ * @r: intersection of @r1 and @r2.
+ * @r1: rectangle.
+ * @r2: rectangle.
+ */
+static inline void v4l2_rect_intersect(struct v4l2_rect *r,
+				       const struct v4l2_rect *r1,
+				       const struct v4l2_rect *r2)
+{
+	int right, bottom;
+
+	r->top = max(r1->top, r2->top);
+	r->left = max(r1->left, r2->left);
+	bottom = min(r1->top + r1->height, r2->top + r2->height);
+	right = min(r1->left + r1->width, r2->left + r2->width);
+	r->height = max(0, bottom - r->top);
+	r->width = max(0, right - r->left);
+}
+
+/**
+ * v4l2_rect_scale() - scale rect r by to/from
+ * @r: rect to be scaled.
+ * @from: from rectangle.
+ * @to: to rectangle.
+ *
+ * This scales rectangle @r horizontally by @to->width / @from->width and
+ * vertically by @to->height / @from->height.
+ *
+ * Typically @r is a rectangle inside @from and you want the rectangle as
+ * it would appear after scaling @from to @to. So the resulting @r will
+ * be the scaled rectangle inside @to.
+ */
+static inline void v4l2_rect_scale(struct v4l2_rect *r,
+				   const struct v4l2_rect *from,
+				   const struct v4l2_rect *to)
+{
+	if (from->width == 0 || from->height == 0) {
+		r->left = r->top = r->width = r->height = 0;
+		return;
+	}
+	r->left = (((r->left - from->left) * to->width) / from->width) & ~1;
+	r->width = ((r->width * to->width) / from->width) & ~1;
+	r->top = ((r->top - from->top) * to->height) / from->height;
+	r->height = (r->height * to->height) / from->height;
+}
+
+/**
+ * v4l2_rect_overlap() - do r1 and r2 overlap?
+ * @r1: rectangle.
+ * @r2: rectangle.
+ *
+ * Returns true if @r1 and @r2 overlap.
+ */
+static inline bool v4l2_rect_overlap(const struct v4l2_rect *r1,
+				     const struct v4l2_rect *r2)
+{
+	/*
+	 * IF the left side of r1 is to the right of the right side of r2 OR
+	 *    the left side of r2 is to the right of the right side of r1 THEN
+	 * they do not overlap.
+	 */
+	if (r1->left >= r2->left + r2->width ||
+	    r2->left >= r1->left + r1->width)
+		return false;
+	/*
+	 * IF the top side of r1 is below the bottom of r2 OR
+	 *    the top side of r2 is below the bottom of r1 THEN
+	 * they do not overlap.
+	 */
+	if (r1->top >= r2->top + r2->height ||
+	    r2->top >= r1->top + r1->height)
+		return false;
+	return true;
+}
+
+#endif
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 11e2dfe..32fc7a4 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -572,6 +572,7 @@
 /**
  * struct v4l2_subdev_pad_ops - v4l2-subdev pad level operations
  *
+ * @init_cfg: initialize the pad config to default values
  * @enum_mbus_code: callback for VIDIOC_SUBDEV_ENUM_MBUS_CODE ioctl handler
  *		    code.
  * @enum_frame_size: callback for VIDIOC_SUBDEV_ENUM_FRAME_SIZE ioctl handler
@@ -607,6 +608,8 @@
  *                  may be adjusted by the subdev driver to device capabilities.
  */
 struct v4l2_subdev_pad_ops {
+	int (*init_cfg)(struct v4l2_subdev *sd,
+			struct v4l2_subdev_pad_config *cfg);
 	int (*enum_mbus_code)(struct v4l2_subdev *sd,
 			      struct v4l2_subdev_pad_config *cfg,
 			      struct v4l2_subdev_mbus_code_enum *code);
@@ -801,7 +804,12 @@
 				      struct v4l2_subdev_format *source_fmt,
 				      struct v4l2_subdev_format *sink_fmt);
 int v4l2_subdev_link_validate(struct media_link *link);
+
+struct v4l2_subdev_pad_config *
+v4l2_subdev_alloc_pad_config(struct v4l2_subdev *sd);
+void v4l2_subdev_free_pad_config(struct v4l2_subdev_pad_config *cfg);
 #endif /* CONFIG_MEDIA_CONTROLLER */
+
 void v4l2_subdev_init(struct v4l2_subdev *sd,
 		      const struct v4l2_subdev_ops *ops);
 
diff --git a/include/media/v4l2-tpg-colors.h b/include/media/v4l2-tpg-colors.h
new file mode 100644
index 0000000..2a88d1f
--- /dev/null
+++ b/include/media/v4l2-tpg-colors.h
@@ -0,0 +1,68 @@
+/*
+ * v4l2-tpg-colors.h - Color definitions for the test pattern generator
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _V4L2_TPG_COLORS_H_
+#define _V4L2_TPG_COLORS_H_
+
+struct color {
+	unsigned char r, g, b;
+};
+
+struct color16 {
+	int r, g, b;
+};
+
+enum tpg_color {
+	TPG_COLOR_CSC_WHITE,
+	TPG_COLOR_CSC_YELLOW,
+	TPG_COLOR_CSC_CYAN,
+	TPG_COLOR_CSC_GREEN,
+	TPG_COLOR_CSC_MAGENTA,
+	TPG_COLOR_CSC_RED,
+	TPG_COLOR_CSC_BLUE,
+	TPG_COLOR_CSC_BLACK,
+	TPG_COLOR_75_YELLOW,
+	TPG_COLOR_75_CYAN,
+	TPG_COLOR_75_GREEN,
+	TPG_COLOR_75_MAGENTA,
+	TPG_COLOR_75_RED,
+	TPG_COLOR_75_BLUE,
+	TPG_COLOR_100_WHITE,
+	TPG_COLOR_100_YELLOW,
+	TPG_COLOR_100_CYAN,
+	TPG_COLOR_100_GREEN,
+	TPG_COLOR_100_MAGENTA,
+	TPG_COLOR_100_RED,
+	TPG_COLOR_100_BLUE,
+	TPG_COLOR_100_BLACK,
+	TPG_COLOR_TEXTFG,
+	TPG_COLOR_TEXTBG,
+	TPG_COLOR_RANDOM,
+	TPG_COLOR_RAMP,
+	TPG_COLOR_MAX = TPG_COLOR_RAMP + 256
+};
+
+extern const struct color tpg_colors[TPG_COLOR_MAX];
+extern const unsigned short tpg_rec709_to_linear[255 * 16 + 1];
+extern const unsigned short tpg_linear_to_rec709[255 * 16 + 1];
+extern const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1]
+					  [V4L2_XFER_FUNC_SMPTE2084 + 1]
+					  [TPG_COLOR_CSC_BLACK + 1];
+
+#endif
diff --git a/include/media/v4l2-tpg.h b/include/media/v4l2-tpg.h
new file mode 100644
index 0000000..329bebf
--- /dev/null
+++ b/include/media/v4l2-tpg.h
@@ -0,0 +1,597 @@
+/*
+ * v4l2-tpg.h - Test Pattern Generator
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _V4L2_TPG_H_
+#define _V4L2_TPG_H_
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-tpg-colors.h>
+
+enum tpg_pattern {
+	TPG_PAT_75_COLORBAR,
+	TPG_PAT_100_COLORBAR,
+	TPG_PAT_CSC_COLORBAR,
+	TPG_PAT_100_HCOLORBAR,
+	TPG_PAT_100_COLORSQUARES,
+	TPG_PAT_BLACK,
+	TPG_PAT_WHITE,
+	TPG_PAT_RED,
+	TPG_PAT_GREEN,
+	TPG_PAT_BLUE,
+	TPG_PAT_CHECKERS_16X16,
+	TPG_PAT_CHECKERS_2X2,
+	TPG_PAT_CHECKERS_1X1,
+	TPG_PAT_COLOR_CHECKERS_2X2,
+	TPG_PAT_COLOR_CHECKERS_1X1,
+	TPG_PAT_ALTERNATING_HLINES,
+	TPG_PAT_ALTERNATING_VLINES,
+	TPG_PAT_CROSS_1_PIXEL,
+	TPG_PAT_CROSS_2_PIXELS,
+	TPG_PAT_CROSS_10_PIXELS,
+	TPG_PAT_GRAY_RAMP,
+
+	/* Must be the last pattern */
+	TPG_PAT_NOISE,
+};
+
+extern const char * const tpg_pattern_strings[];
+
+enum tpg_quality {
+	TPG_QUAL_COLOR,
+	TPG_QUAL_GRAY,
+	TPG_QUAL_NOISE
+};
+
+enum tpg_video_aspect {
+	TPG_VIDEO_ASPECT_IMAGE,
+	TPG_VIDEO_ASPECT_4X3,
+	TPG_VIDEO_ASPECT_14X9_CENTRE,
+	TPG_VIDEO_ASPECT_16X9_CENTRE,
+	TPG_VIDEO_ASPECT_16X9_ANAMORPHIC,
+};
+
+enum tpg_pixel_aspect {
+	TPG_PIXEL_ASPECT_SQUARE,
+	TPG_PIXEL_ASPECT_NTSC,
+	TPG_PIXEL_ASPECT_PAL,
+};
+
+enum tpg_move_mode {
+	TPG_MOVE_NEG_FAST,
+	TPG_MOVE_NEG,
+	TPG_MOVE_NEG_SLOW,
+	TPG_MOVE_NONE,
+	TPG_MOVE_POS_SLOW,
+	TPG_MOVE_POS,
+	TPG_MOVE_POS_FAST,
+};
+
+extern const char * const tpg_aspect_strings[];
+
+#define TPG_MAX_PLANES 3
+#define TPG_MAX_PAT_LINES 8
+
+struct tpg_data {
+	/* Source frame size */
+	unsigned			src_width, src_height;
+	/* Buffer height */
+	unsigned			buf_height;
+	/* Scaled output frame size */
+	unsigned			scaled_width;
+	u32				field;
+	bool				field_alternate;
+	/* crop coordinates are frame-based */
+	struct v4l2_rect		crop;
+	/* compose coordinates are format-based */
+	struct v4l2_rect		compose;
+	/* border and square coordinates are frame-based */
+	struct v4l2_rect		border;
+	struct v4l2_rect		square;
+
+	/* Color-related fields */
+	enum tpg_quality		qual;
+	unsigned			qual_offset;
+	u8				alpha_component;
+	bool				alpha_red_only;
+	u8				brightness;
+	u8				contrast;
+	u8				saturation;
+	s16				hue;
+	u32				fourcc;
+	bool				is_yuv;
+	u32				colorspace;
+	u32				xfer_func;
+	u32				ycbcr_enc;
+	/*
+	 * Stores the actual transfer function, i.e. will never be
+	 * V4L2_XFER_FUNC_DEFAULT.
+	 */
+	u32				real_xfer_func;
+	/*
+	 * Stores the actual Y'CbCr encoding, i.e. will never be
+	 * V4L2_YCBCR_ENC_DEFAULT.
+	 */
+	u32				real_ycbcr_enc;
+	u32				quantization;
+	/*
+	 * Stores the actual quantization, i.e. will never be
+	 * V4L2_QUANTIZATION_DEFAULT.
+	 */
+	u32				real_quantization;
+	enum tpg_video_aspect		vid_aspect;
+	enum tpg_pixel_aspect		pix_aspect;
+	unsigned			rgb_range;
+	unsigned			real_rgb_range;
+	unsigned			buffers;
+	unsigned			planes;
+	bool				interleaved;
+	u8				vdownsampling[TPG_MAX_PLANES];
+	u8				hdownsampling[TPG_MAX_PLANES];
+	/*
+	 * horizontal positions must be ANDed with this value to enforce
+	 * correct boundaries for packed YUYV values.
+	 */
+	unsigned			hmask[TPG_MAX_PLANES];
+	/* Used to store the colors in native format, either RGB or YUV */
+	u8				colors[TPG_COLOR_MAX][3];
+	u8				textfg[TPG_MAX_PLANES][8], textbg[TPG_MAX_PLANES][8];
+	/* size in bytes for two pixels in each plane */
+	unsigned			twopixelsize[TPG_MAX_PLANES];
+	unsigned			bytesperline[TPG_MAX_PLANES];
+
+	/* Configuration */
+	enum tpg_pattern		pattern;
+	bool				hflip;
+	bool				vflip;
+	unsigned			perc_fill;
+	bool				perc_fill_blank;
+	bool				show_border;
+	bool				show_square;
+	bool				insert_sav;
+	bool				insert_eav;
+
+	/* Test pattern movement */
+	enum tpg_move_mode		mv_hor_mode;
+	int				mv_hor_count;
+	int				mv_hor_step;
+	enum tpg_move_mode		mv_vert_mode;
+	int				mv_vert_count;
+	int				mv_vert_step;
+
+	bool				recalc_colors;
+	bool				recalc_lines;
+	bool				recalc_square_border;
+
+	/* Used to store TPG_MAX_PAT_LINES lines, each with up to two planes */
+	unsigned			max_line_width;
+	u8				*lines[TPG_MAX_PAT_LINES][TPG_MAX_PLANES];
+	u8				*downsampled_lines[TPG_MAX_PAT_LINES][TPG_MAX_PLANES];
+	u8				*random_line[TPG_MAX_PLANES];
+	u8				*contrast_line[TPG_MAX_PLANES];
+	u8				*black_line[TPG_MAX_PLANES];
+};
+
+void tpg_init(struct tpg_data *tpg, unsigned w, unsigned h);
+int tpg_alloc(struct tpg_data *tpg, unsigned max_w);
+void tpg_free(struct tpg_data *tpg);
+void tpg_reset_source(struct tpg_data *tpg, unsigned width, unsigned height,
+		       u32 field);
+void tpg_log_status(struct tpg_data *tpg);
+
+void tpg_set_font(const u8 *f);
+void tpg_gen_text(const struct tpg_data *tpg,
+		u8 *basep[TPG_MAX_PLANES][2], int y, int x, char *text);
+void tpg_calc_text_basep(struct tpg_data *tpg,
+		u8 *basep[TPG_MAX_PLANES][2], unsigned p, u8 *vbuf);
+unsigned tpg_g_interleaved_plane(const struct tpg_data *tpg, unsigned buf_line);
+void tpg_fill_plane_buffer(struct tpg_data *tpg, v4l2_std_id std,
+			   unsigned p, u8 *vbuf);
+void tpg_fillbuffer(struct tpg_data *tpg, v4l2_std_id std,
+		    unsigned p, u8 *vbuf);
+bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc);
+void tpg_s_crop_compose(struct tpg_data *tpg, const struct v4l2_rect *crop,
+		const struct v4l2_rect *compose);
+
+static inline void tpg_s_pattern(struct tpg_data *tpg, enum tpg_pattern pattern)
+{
+	if (tpg->pattern == pattern)
+		return;
+	tpg->pattern = pattern;
+	tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_quality(struct tpg_data *tpg,
+				    enum tpg_quality qual, unsigned qual_offset)
+{
+	if (tpg->qual == qual && tpg->qual_offset == qual_offset)
+		return;
+	tpg->qual = qual;
+	tpg->qual_offset = qual_offset;
+	tpg->recalc_colors = true;
+}
+
+static inline enum tpg_quality tpg_g_quality(const struct tpg_data *tpg)
+{
+	return tpg->qual;
+}
+
+static inline void tpg_s_alpha_component(struct tpg_data *tpg,
+					    u8 alpha_component)
+{
+	if (tpg->alpha_component == alpha_component)
+		return;
+	tpg->alpha_component = alpha_component;
+	tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_alpha_mode(struct tpg_data *tpg,
+					    bool red_only)
+{
+	if (tpg->alpha_red_only == red_only)
+		return;
+	tpg->alpha_red_only = red_only;
+	tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_brightness(struct tpg_data *tpg,
+					u8 brightness)
+{
+	if (tpg->brightness == brightness)
+		return;
+	tpg->brightness = brightness;
+	tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_contrast(struct tpg_data *tpg,
+					u8 contrast)
+{
+	if (tpg->contrast == contrast)
+		return;
+	tpg->contrast = contrast;
+	tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_saturation(struct tpg_data *tpg,
+					u8 saturation)
+{
+	if (tpg->saturation == saturation)
+		return;
+	tpg->saturation = saturation;
+	tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_hue(struct tpg_data *tpg,
+					s16 hue)
+{
+	if (tpg->hue == hue)
+		return;
+	tpg->hue = hue;
+	tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_rgb_range(struct tpg_data *tpg,
+					unsigned rgb_range)
+{
+	if (tpg->rgb_range == rgb_range)
+		return;
+	tpg->rgb_range = rgb_range;
+	tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_real_rgb_range(struct tpg_data *tpg,
+					unsigned rgb_range)
+{
+	if (tpg->real_rgb_range == rgb_range)
+		return;
+	tpg->real_rgb_range = rgb_range;
+	tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_colorspace(struct tpg_data *tpg, u32 colorspace)
+{
+	if (tpg->colorspace == colorspace)
+		return;
+	tpg->colorspace = colorspace;
+	tpg->recalc_colors = true;
+}
+
+static inline u32 tpg_g_colorspace(const struct tpg_data *tpg)
+{
+	return tpg->colorspace;
+}
+
+static inline void tpg_s_ycbcr_enc(struct tpg_data *tpg, u32 ycbcr_enc)
+{
+	if (tpg->ycbcr_enc == ycbcr_enc)
+		return;
+	tpg->ycbcr_enc = ycbcr_enc;
+	tpg->recalc_colors = true;
+}
+
+static inline u32 tpg_g_ycbcr_enc(const struct tpg_data *tpg)
+{
+	return tpg->ycbcr_enc;
+}
+
+static inline void tpg_s_xfer_func(struct tpg_data *tpg, u32 xfer_func)
+{
+	if (tpg->xfer_func == xfer_func)
+		return;
+	tpg->xfer_func = xfer_func;
+	tpg->recalc_colors = true;
+}
+
+static inline u32 tpg_g_xfer_func(const struct tpg_data *tpg)
+{
+	return tpg->xfer_func;
+}
+
+static inline void tpg_s_quantization(struct tpg_data *tpg, u32 quantization)
+{
+	if (tpg->quantization == quantization)
+		return;
+	tpg->quantization = quantization;
+	tpg->recalc_colors = true;
+}
+
+static inline u32 tpg_g_quantization(const struct tpg_data *tpg)
+{
+	return tpg->quantization;
+}
+
+static inline unsigned tpg_g_buffers(const struct tpg_data *tpg)
+{
+	return tpg->buffers;
+}
+
+static inline unsigned tpg_g_planes(const struct tpg_data *tpg)
+{
+	return tpg->interleaved ? 1 : tpg->planes;
+}
+
+static inline bool tpg_g_interleaved(const struct tpg_data *tpg)
+{
+	return tpg->interleaved;
+}
+
+static inline unsigned tpg_g_twopixelsize(const struct tpg_data *tpg, unsigned plane)
+{
+	return tpg->twopixelsize[plane];
+}
+
+static inline unsigned tpg_hdiv(const struct tpg_data *tpg,
+				  unsigned plane, unsigned x)
+{
+	return ((x / tpg->hdownsampling[plane]) & tpg->hmask[plane]) *
+		tpg->twopixelsize[plane] / 2;
+}
+
+static inline unsigned tpg_hscale(const struct tpg_data *tpg, unsigned x)
+{
+	return (x * tpg->scaled_width) / tpg->src_width;
+}
+
+static inline unsigned tpg_hscale_div(const struct tpg_data *tpg,
+				      unsigned plane, unsigned x)
+{
+	return tpg_hdiv(tpg, plane, tpg_hscale(tpg, x));
+}
+
+static inline unsigned tpg_g_bytesperline(const struct tpg_data *tpg, unsigned plane)
+{
+	return tpg->bytesperline[plane];
+}
+
+static inline void tpg_s_bytesperline(struct tpg_data *tpg, unsigned plane, unsigned bpl)
+{
+	unsigned p;
+
+	if (tpg->buffers > 1) {
+		tpg->bytesperline[plane] = bpl;
+		return;
+	}
+
+	for (p = 0; p < tpg_g_planes(tpg); p++) {
+		unsigned plane_w = bpl * tpg->twopixelsize[p] / tpg->twopixelsize[0];
+
+		tpg->bytesperline[p] = plane_w / tpg->hdownsampling[p];
+	}
+	if (tpg_g_interleaved(tpg))
+		tpg->bytesperline[1] = tpg->bytesperline[0];
+}
+
+
+static inline unsigned tpg_g_line_width(const struct tpg_data *tpg, unsigned plane)
+{
+	unsigned w = 0;
+	unsigned p;
+
+	if (tpg->buffers > 1)
+		return tpg_g_bytesperline(tpg, plane);
+	for (p = 0; p < tpg_g_planes(tpg); p++) {
+		unsigned plane_w = tpg_g_bytesperline(tpg, p);
+
+		w += plane_w / tpg->vdownsampling[p];
+	}
+	return w;
+}
+
+static inline unsigned tpg_calc_line_width(const struct tpg_data *tpg,
+					   unsigned plane, unsigned bpl)
+{
+	unsigned w = 0;
+	unsigned p;
+
+	if (tpg->buffers > 1)
+		return bpl;
+	for (p = 0; p < tpg_g_planes(tpg); p++) {
+		unsigned plane_w = bpl * tpg->twopixelsize[p] / tpg->twopixelsize[0];
+
+		plane_w /= tpg->hdownsampling[p];
+		w += plane_w / tpg->vdownsampling[p];
+	}
+	return w;
+}
+
+static inline unsigned tpg_calc_plane_size(const struct tpg_data *tpg, unsigned plane)
+{
+	if (plane >= tpg_g_planes(tpg))
+		return 0;
+
+	return tpg_g_bytesperline(tpg, plane) * tpg->buf_height /
+	       tpg->vdownsampling[plane];
+}
+
+static inline void tpg_s_buf_height(struct tpg_data *tpg, unsigned h)
+{
+	tpg->buf_height = h;
+}
+
+static inline void tpg_s_field(struct tpg_data *tpg, unsigned field, bool alternate)
+{
+	tpg->field = field;
+	tpg->field_alternate = alternate;
+}
+
+static inline void tpg_s_perc_fill(struct tpg_data *tpg,
+				      unsigned perc_fill)
+{
+	tpg->perc_fill = perc_fill;
+}
+
+static inline unsigned tpg_g_perc_fill(const struct tpg_data *tpg)
+{
+	return tpg->perc_fill;
+}
+
+static inline void tpg_s_perc_fill_blank(struct tpg_data *tpg,
+					 bool perc_fill_blank)
+{
+	tpg->perc_fill_blank = perc_fill_blank;
+}
+
+static inline void tpg_s_video_aspect(struct tpg_data *tpg,
+					enum tpg_video_aspect vid_aspect)
+{
+	if (tpg->vid_aspect == vid_aspect)
+		return;
+	tpg->vid_aspect = vid_aspect;
+	tpg->recalc_square_border = true;
+}
+
+static inline enum tpg_video_aspect tpg_g_video_aspect(const struct tpg_data *tpg)
+{
+	return tpg->vid_aspect;
+}
+
+static inline void tpg_s_pixel_aspect(struct tpg_data *tpg,
+					enum tpg_pixel_aspect pix_aspect)
+{
+	if (tpg->pix_aspect == pix_aspect)
+		return;
+	tpg->pix_aspect = pix_aspect;
+	tpg->recalc_square_border = true;
+}
+
+static inline void tpg_s_show_border(struct tpg_data *tpg,
+					bool show_border)
+{
+	tpg->show_border = show_border;
+}
+
+static inline void tpg_s_show_square(struct tpg_data *tpg,
+					bool show_square)
+{
+	tpg->show_square = show_square;
+}
+
+static inline void tpg_s_insert_sav(struct tpg_data *tpg, bool insert_sav)
+{
+	tpg->insert_sav = insert_sav;
+}
+
+static inline void tpg_s_insert_eav(struct tpg_data *tpg, bool insert_eav)
+{
+	tpg->insert_eav = insert_eav;
+}
+
+void tpg_update_mv_step(struct tpg_data *tpg);
+
+static inline void tpg_s_mv_hor_mode(struct tpg_data *tpg,
+				enum tpg_move_mode mv_hor_mode)
+{
+	tpg->mv_hor_mode = mv_hor_mode;
+	tpg_update_mv_step(tpg);
+}
+
+static inline void tpg_s_mv_vert_mode(struct tpg_data *tpg,
+				enum tpg_move_mode mv_vert_mode)
+{
+	tpg->mv_vert_mode = mv_vert_mode;
+	tpg_update_mv_step(tpg);
+}
+
+static inline void tpg_init_mv_count(struct tpg_data *tpg)
+{
+	tpg->mv_hor_count = tpg->mv_vert_count = 0;
+}
+
+static inline void tpg_update_mv_count(struct tpg_data *tpg, bool frame_is_field)
+{
+	tpg->mv_hor_count += tpg->mv_hor_step * (frame_is_field ? 1 : 2);
+	tpg->mv_vert_count += tpg->mv_vert_step * (frame_is_field ? 1 : 2);
+}
+
+static inline void tpg_s_hflip(struct tpg_data *tpg, bool hflip)
+{
+	if (tpg->hflip == hflip)
+		return;
+	tpg->hflip = hflip;
+	tpg_update_mv_step(tpg);
+	tpg->recalc_lines = true;
+}
+
+static inline bool tpg_g_hflip(const struct tpg_data *tpg)
+{
+	return tpg->hflip;
+}
+
+static inline void tpg_s_vflip(struct tpg_data *tpg, bool vflip)
+{
+	tpg->vflip = vflip;
+}
+
+static inline bool tpg_g_vflip(const struct tpg_data *tpg)
+{
+	return tpg->vflip;
+}
+
+static inline bool tpg_pattern_is_static(const struct tpg_data *tpg)
+{
+	return tpg->pattern != TPG_PAT_NOISE &&
+	       tpg->mv_hor_mode == TPG_MOVE_NONE &&
+	       tpg->mv_vert_mode == TPG_MOVE_NONE;
+}
+
+#endif
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 8a0f55b..88e3ab4 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -375,6 +375,9 @@
 /**
  * struct vb2_ops - driver-specific callbacks
  *
+ * @verify_planes_array: Verify that a given user space structure contains
+ *			enough planes for the buffer. This is called
+ *			for each dequeued buffer.
  * @fill_user_buffer:	given a vb2_buffer fill in the userspace structure.
  *			For V4L2 this is a struct v4l2_buffer.
  * @fill_vb2_buffer:	given a userspace structure, fill in the vb2_buffer.
@@ -384,6 +387,7 @@
  *			the vb2_buffer struct.
  */
 struct vb2_buf_ops {
+	int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb);
 	void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb);
 	int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb,
 				struct vb2_plane *planes);
@@ -400,6 +404,9 @@
  * @fileio_read_once:		report EOF after reading the first buffer
  * @fileio_write_immediately:	queue buffer after each write() call
  * @allow_zero_bytesused:	allow bytesused == 0 to be passed to the driver
+ * @quirk_poll_must_check_waiting_for_buffers: Return POLLERR at poll when QBUF
+ *              has not been called. This is a vb1 idiom that has been adopted
+ *              also by vb2.
  * @lock:	pointer to a mutex that protects the vb2_queue struct. The
  *		driver can set this to a mutex to let the v4l2 core serialize
  *		the queuing ioctls. If the driver wants to handle locking
@@ -463,6 +470,7 @@
 	unsigned			fileio_read_once:1;
 	unsigned			fileio_write_immediately:1;
 	unsigned			allow_zero_bytesused:1;
+	unsigned		   quirk_poll_must_check_waiting_for_buffers:1;
 
 	struct mutex			*lock;
 	void				*owner;
diff --git a/include/media/vsp1.h b/include/media/vsp1.h
index cc54175..3e654a0 100644
--- a/include/media/vsp1.h
+++ b/include/media/vsp1.h
@@ -23,11 +23,22 @@
 int vsp1_du_setup_lif(struct device *dev, unsigned int width,
 		      unsigned int height);
 
-int vsp1_du_atomic_begin(struct device *dev);
-int vsp1_du_atomic_update(struct device *dev, unsigned int rpf, u32 pixelformat,
-			  unsigned int pitch, dma_addr_t mem[2],
-			  const struct v4l2_rect *src,
-			  const struct v4l2_rect *dst);
-int vsp1_du_atomic_flush(struct device *dev);
+void vsp1_du_atomic_begin(struct device *dev);
+int vsp1_du_atomic_update_ext(struct device *dev, unsigned int rpf,
+			      u32 pixelformat, unsigned int pitch,
+			      dma_addr_t mem[2], const struct v4l2_rect *src,
+			      const struct v4l2_rect *dst, unsigned int alpha,
+			      unsigned int zpos);
+void vsp1_du_atomic_flush(struct device *dev);
+
+static inline int vsp1_du_atomic_update(struct device *dev,
+					unsigned int rpf_index, u32 pixelformat,
+					unsigned int pitch, dma_addr_t mem[2],
+					const struct v4l2_rect *src,
+					const struct v4l2_rect *dst)
+{
+	return vsp1_du_atomic_update_ext(dev, rpf_index, pixelformat, pitch,
+					 mem, src, dst, 255, 0);
+}
 
 #endif /* __MEDIA_VSP1_H__ */
diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h
index da3a77d..da84cf9 100644
--- a/include/net/6lowpan.h
+++ b/include/net/6lowpan.h
@@ -58,6 +58,9 @@
 #include <net/ipv6.h>
 #include <net/net_namespace.h>
 
+/* special link-layer handling */
+#include <net/mac802154.h>
+
 #define EUI64_ADDR_LEN		8
 
 #define LOWPAN_NHC_MAX_ID_LEN	1
@@ -93,7 +96,7 @@
 }
 
 #define LOWPAN_PRIV_SIZE(llpriv_size)	\
-	(sizeof(struct lowpan_priv) + llpriv_size)
+	(sizeof(struct lowpan_dev) + llpriv_size)
 
 enum lowpan_lltypes {
 	LOWPAN_LLTYPE_BTLE,
@@ -129,7 +132,7 @@
 	return test_bit(LOWPAN_IPHC_CTX_FLAG_COMPRESSION, &ctx->flags);
 }
 
-struct lowpan_priv {
+struct lowpan_dev {
 	enum lowpan_lltypes lltype;
 	struct dentry *iface_debugfs;
 	struct lowpan_iphc_ctx_table ctx;
@@ -139,11 +142,23 @@
 };
 
 static inline
-struct lowpan_priv *lowpan_priv(const struct net_device *dev)
+struct lowpan_dev *lowpan_dev(const struct net_device *dev)
 {
 	return netdev_priv(dev);
 }
 
+/* private device info */
+struct lowpan_802154_dev {
+	struct net_device	*wdev; /* wpan device ptr */
+	u16			fragment_tag;
+};
+
+static inline struct
+lowpan_802154_dev *lowpan_802154_dev(const struct net_device *dev)
+{
+	return (struct lowpan_802154_dev *)lowpan_dev(dev)->priv;
+}
+
 struct lowpan_802154_cb {
 	u16 d_tag;
 	unsigned int d_size;
@@ -157,6 +172,22 @@
 	return (struct lowpan_802154_cb *)skb->cb;
 }
 
+static inline void lowpan_iphc_uncompress_eui64_lladdr(struct in6_addr *ipaddr,
+						       const void *lladdr)
+{
+	/* fe:80::XXXX:XXXX:XXXX:XXXX
+	 *        \_________________/
+	 *              hwaddr
+	 */
+	ipaddr->s6_addr[0] = 0xFE;
+	ipaddr->s6_addr[1] = 0x80;
+	memcpy(&ipaddr->s6_addr[8], lladdr, EUI64_ADDR_LEN);
+	/* second bit-flip (Universe/Local)
+	 * is done according RFC2464
+	 */
+	ipaddr->s6_addr[8] ^= 0x02;
+}
+
 #ifdef DEBUG
 /* print data in line */
 static inline void raw_dump_inline(const char *caller, char *msg,
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 2a19fe1..2cd9e9b 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -106,6 +106,7 @@
 			int bind);
 	int     (*walk)(struct net *, struct sk_buff *,
 			struct netlink_callback *, int, struct tc_action *);
+	void	(*stats_update)(struct tc_action *, u64, u32, u64);
 };
 
 struct tc_action_net {
@@ -135,6 +136,7 @@
 static inline void tc_action_net_exit(struct tc_action_net *tn)
 {
 	tcf_hashinfo_destroy(tn->ops, tn->hinfo);
+	kfree(tn->hinfo);
 }
 
 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
@@ -177,10 +179,21 @@
 
 #define tc_for_each_action(_a, _exts) \
 	list_for_each_entry(a, &(_exts)->actions, list)
+
+static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
+					   u64 packets, u64 lastuse)
+{
+	if (!a->ops->stats_update)
+		return;
+
+	a->ops->stats_update(a, bytes, packets, lastuse);
+}
+
 #else /* CONFIG_NET_CLS_ACT */
 
 #define tc_no_actions(_exts) true
 #define tc_for_each_action(_a, _exts) while (0)
+#define tcf_action_stats_update(a, bytes, packets, lastuse)
 
 #endif /* CONFIG_NET_CLS_ACT */
 #endif
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index e797d45..ac1bc3c 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -12,6 +12,7 @@
 #ifndef _NET_RXRPC_H
 #define _NET_RXRPC_H
 
+#include <linux/skbuff.h>
 #include <linux/rxrpc.h>
 
 struct rxrpc_call;
@@ -19,11 +20,12 @@
 /*
  * the mark applied to socket buffers that may be intercepted
  */
-enum {
+enum rxrpc_skb_mark {
 	RXRPC_SKB_MARK_DATA,		/* data message */
 	RXRPC_SKB_MARK_FINAL_ACK,	/* final ACK received message */
 	RXRPC_SKB_MARK_BUSY,		/* server busy message */
 	RXRPC_SKB_MARK_REMOTE_ABORT,	/* remote abort message */
+	RXRPC_SKB_MARK_LOCAL_ABORT,	/* local abort message */
 	RXRPC_SKB_MARK_NET_ERROR,	/* network error message */
 	RXRPC_SKB_MARK_LOCAL_ERROR,	/* local error message */
 	RXRPC_SKB_MARK_NEW_CALL,	/* local error message */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 5d38d98..eefcf3e 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -61,6 +61,8 @@
 #define HCI_RS232	4
 #define HCI_PCI		5
 #define HCI_SDIO	6
+#define HCI_SPI		7
+#define HCI_I2C		8
 
 /* HCI controller types */
 #define HCI_BREDR	0x00
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 9e1b24c..6392167 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -68,26 +68,6 @@
  */
 
 /**
- * enum ieee80211_band - supported frequency bands
- *
- * The bands are assigned this way because the supported
- * bitrates differ in these bands.
- *
- * @IEEE80211_BAND_2GHZ: 2.4GHz ISM band
- * @IEEE80211_BAND_5GHZ: around 5GHz band (4.9-5.7)
- * @IEEE80211_BAND_60GHZ: around 60 GHz band (58.32 - 64.80 GHz)
- * @IEEE80211_NUM_BANDS: number of defined bands
- */
-enum ieee80211_band {
-	IEEE80211_BAND_2GHZ = NL80211_BAND_2GHZ,
-	IEEE80211_BAND_5GHZ = NL80211_BAND_5GHZ,
-	IEEE80211_BAND_60GHZ = NL80211_BAND_60GHZ,
-
-	/* keep last */
-	IEEE80211_NUM_BANDS
-};
-
-/**
  * enum ieee80211_channel_flags - channel flags
  *
  * Channel flags set by the regulatory control code.
@@ -167,7 +147,7 @@
  * @dfs_cac_ms: DFS CAC time in milliseconds, this is valid for DFS channels.
  */
 struct ieee80211_channel {
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	u16 center_freq;
 	u16 hw_value;
 	u32 flags;
@@ -324,7 +304,7 @@
 struct ieee80211_supported_band {
 	struct ieee80211_channel *channels;
 	struct ieee80211_rate *bitrates;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	int n_channels;
 	int n_bitrates;
 	struct ieee80211_sta_ht_cap ht_cap;
@@ -816,6 +796,7 @@
  * @supported_oper_classes_len: number of supported operating classes
  * @opmode_notif: operating mode field from Operating Mode Notification
  * @opmode_notif_used: information if operating mode field is used
+ * @support_p2p_ps: information if station supports P2P PS mechanism
  */
 struct station_parameters {
 	const u8 *supported_rates;
@@ -841,6 +822,7 @@
 	u8 supported_oper_classes_len;
 	u8 opmode_notif;
 	bool opmode_notif_used;
+	int support_p2p_ps;
 };
 
 /**
@@ -1063,11 +1045,12 @@
  * @rx_beacon: number of beacons received from this peer
  * @rx_beacon_signal_avg: signal strength average (in dBm) for beacons received
  *	from this peer
+ * @rx_duration: aggregate PPDU duration(usecs) for all the frames from a peer
  * @pertid: per-TID statistics, see &struct cfg80211_tid_stats, using the last
  *	(IEEE80211_NUM_TIDS) index for MSDUs not encapsulated in QoS-MPDUs.
  */
 struct station_info {
-	u32 filled;
+	u64 filled;
 	u32 connected_time;
 	u32 inactive_time;
 	u64 rx_bytes;
@@ -1106,6 +1089,7 @@
 	u32 expected_throughput;
 
 	u64 rx_beacon;
+	u64 rx_duration;
 	u8 rx_beacon_signal_avg;
 	struct cfg80211_tid_stats pertid[IEEE80211_NUM_TIDS + 1];
 };
@@ -1368,7 +1352,7 @@
 	bool user_mpm;
 	u8 dtim_period;
 	u16 beacon_interval;
-	int mcast_rate[IEEE80211_NUM_BANDS];
+	int mcast_rate[NUM_NL80211_BANDS];
 	u32 basic_rates;
 };
 
@@ -1455,6 +1439,7 @@
  * @mac_addr_mask: MAC address mask used with randomisation, bits that
  *	are 0 in the mask should be randomised, bits that are 1 should
  *	be taken from the @mac_addr
+ * @bssid: BSSID to scan for (most commonly, the wildcard BSSID)
  */
 struct cfg80211_scan_request {
 	struct cfg80211_ssid *ssids;
@@ -1465,12 +1450,13 @@
 	size_t ie_len;
 	u32 flags;
 
-	u32 rates[IEEE80211_NUM_BANDS];
+	u32 rates[NUM_NL80211_BANDS];
 
 	struct wireless_dev *wdev;
 
 	u8 mac_addr[ETH_ALEN] __aligned(2);
 	u8 mac_addr_mask[ETH_ALEN] __aligned(2);
+	u8 bssid[ETH_ALEN] __aligned(2);
 
 	/* internal */
 	struct wiphy *wiphy;
@@ -1617,7 +1603,7 @@
 };
 
 /**
- * struct cfg80211_bss_ie_data - BSS entry IE data
+ * struct cfg80211_bss_ies - BSS entry IE data
  * @tsf: TSF contained in the frame that carried these IEs
  * @rcu_head: internal use, for freeing
  * @len: length of the IEs
@@ -1746,7 +1732,12 @@
  * @ie_len: Length of ie buffer in octets
  * @use_mfp: Use management frame protection (IEEE 802.11w) in this association
  * @crypto: crypto settings
- * @prev_bssid: previous BSSID, if not %NULL use reassociate frame
+ * @prev_bssid: previous BSSID, if not %NULL use reassociate frame. This is used
+ *	to indicate a request to reassociate within the ESS instead of a request
+ *	do the initial association with the ESS. When included, this is set to
+ *	the BSSID of the current association, i.e., to the value that is
+ *	included in the Current AP address field of the Reassociation Request
+ *	frame.
  * @flags:  See &enum cfg80211_assoc_req_flags
  * @ht_capa:  HT Capabilities over-rides.  Values set in ht_capa_mask
  *	will be used in ht_capa.  Un-supported values will be ignored.
@@ -1851,12 +1842,39 @@
 	bool privacy;
 	bool control_port;
 	bool userspace_handles_dfs;
-	int mcast_rate[IEEE80211_NUM_BANDS];
+	int mcast_rate[NUM_NL80211_BANDS];
 	struct ieee80211_ht_cap ht_capa;
 	struct ieee80211_ht_cap ht_capa_mask;
 };
 
 /**
+ * struct cfg80211_bss_select_adjust - BSS selection with RSSI adjustment.
+ *
+ * @band: band of BSS which should match for RSSI level adjustment.
+ * @delta: value of RSSI level adjustment.
+ */
+struct cfg80211_bss_select_adjust {
+	enum nl80211_band band;
+	s8 delta;
+};
+
+/**
+ * struct cfg80211_bss_selection - connection parameters for BSS selection.
+ *
+ * @behaviour: requested BSS selection behaviour.
+ * @param: parameters for requestion behaviour.
+ * @band_pref: preferred band for %NL80211_BSS_SELECT_ATTR_BAND_PREF.
+ * @adjust: parameters for %NL80211_BSS_SELECT_ATTR_RSSI_ADJUST.
+ */
+struct cfg80211_bss_selection {
+	enum nl80211_bss_select_attr behaviour;
+	union {
+		enum nl80211_band band_pref;
+		struct cfg80211_bss_select_adjust adjust;
+	} param;
+};
+
+/**
  * struct cfg80211_connect_params - Connection parameters
  *
  * This structure provides information needed to complete IEEE 802.11
@@ -1893,6 +1911,13 @@
  * @vht_capa_mask: The bits of vht_capa which are to be used.
  * @pbss: if set, connect to a PCP instead of AP. Valid for DMG
  *	networks.
+ * @bss_select: criteria to be used for BSS selection.
+ * @prev_bssid: previous BSSID, if not %NULL use reassociate frame. This is used
+ *	to indicate a request to reassociate within the ESS instead of a request
+ *	do the initial association with the ESS. When included, this is set to
+ *	the BSSID of the current association, i.e., to the value that is
+ *	included in the Current AP address field of the Reassociation Request
+ *	frame.
  */
 struct cfg80211_connect_params {
 	struct ieee80211_channel *channel;
@@ -1916,6 +1941,8 @@
 	struct ieee80211_vht_cap vht_capa;
 	struct ieee80211_vht_cap vht_capa_mask;
 	bool pbss;
+	struct cfg80211_bss_selection bss_select;
+	const u8 *prev_bssid;
 };
 
 /**
@@ -1945,7 +1972,7 @@
 		u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
 		u16 vht_mcs[NL80211_VHT_NSS_MAX];
 		enum nl80211_txrate_gi gi;
-	} control[IEEE80211_NUM_BANDS];
+	} control[NUM_NL80211_BANDS];
 };
 /**
  * struct cfg80211_pmksa - PMK Security Association
@@ -2342,7 +2369,17 @@
  * @connect: Connect to the ESS with the specified parameters. When connected,
  *	call cfg80211_connect_result() with status code %WLAN_STATUS_SUCCESS.
  *	If the connection fails for some reason, call cfg80211_connect_result()
- *	with the status from the AP.
+ *	with the status from the AP. The driver is allowed to roam to other
+ *	BSSes within the ESS when the other BSS matches the connect parameters.
+ *	When such roaming is initiated by the driver, the driver is expected to
+ *	verify that the target matches the configured security parameters and
+ *	to use Reassociation Request frame instead of Association Request frame.
+ *	The connect function can also be used to request the driver to perform
+ *	a specific roam when connected to an ESS. In that case, the prev_bssid
+ *	parameter is set to the BSSID of the currently associated BSS as an
+ *	indication of requesting reassociation. In both the driver-initiated and
+ *	new connect() call initiated roaming cases, the result of roaming is
+ *	indicated with a call to cfg80211_roamed() or cfg80211_roamed_bss().
  *	(invoked with the wireless_dev mutex held)
  * @disconnect: Disconnect from the BSS/ESS.
  *	(invoked with the wireless_dev mutex held)
@@ -2622,7 +2659,7 @@
 	int	(*leave_ibss)(struct wiphy *wiphy, struct net_device *dev);
 
 	int	(*set_mcast_rate)(struct wiphy *wiphy, struct net_device *dev,
-				  int rate[IEEE80211_NUM_BANDS]);
+				  int rate[NUM_NL80211_BANDS]);
 
 	int	(*set_wiphy_params)(struct wiphy *wiphy, u32 changed);
 
@@ -3152,6 +3189,9 @@
  * @vht_capa_mod_mask:  Specify what VHT capabilities can be over-ridden.
  *	If null, then none can be over-ridden.
  *
+ * @wdev_list: the list of associated (virtual) interfaces; this list must
+ *	not be modified by the driver, but can be read with RTNL/RCU protection.
+ *
  * @max_acl_mac_addrs: Maximum number of MAC addresses that the device
  *	supports for ACL.
  *
@@ -3184,6 +3224,9 @@
  *	low rssi when a frame is heard on different channel, then it should set
  *	this variable to the maximal offset for which it can compensate.
  *	This value should be set in MHz.
+ * @bss_select_support: bitmask indicating the BSS selection criteria supported
+ *	by the driver in the .connect() callback. The bit position maps to the
+ *	attribute indices defined in &enum nl80211_bss_select_attr.
  */
 struct wiphy {
 	/* assign these fields before you register the wiphy */
@@ -3265,7 +3308,7 @@
 	 * help determine whether you own this wiphy or not. */
 	const void *privid;
 
-	struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS];
+	struct ieee80211_supported_band *bands[NUM_NL80211_BANDS];
 
 	/* Lets us get back the wiphy on the callback */
 	void (*reg_notifier)(struct wiphy *wiphy,
@@ -3288,6 +3331,8 @@
 	const struct ieee80211_ht_cap *ht_capa_mod_mask;
 	const struct ieee80211_vht_cap *vht_capa_mod_mask;
 
+	struct list_head wdev_list;
+
 	/* the network namespace this phy lives in currently */
 	possible_net_t _net;
 
@@ -3306,6 +3351,8 @@
 	u8 max_num_csa_counters;
 	u8 max_adj_channel_rssi_comp;
 
+	u32 bss_select_support;
+
 	char priv[0] __aligned(NETDEV_ALIGN);
 };
 
@@ -3598,7 +3645,7 @@
  * @band: band, necessary due to channel number overlap
  * Return: The corresponding frequency (in MHz), or 0 if the conversion failed.
  */
-int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band);
+int ieee80211_channel_to_frequency(int chan, enum nl80211_band band);
 
 /**
  * ieee80211_frequency_to_channel - convert frequency to channel number
@@ -3851,7 +3898,7 @@
  * cfg80211_find_vendor_ie - find vendor specific information element in data
  *
  * @oui: vendor OUI
- * @oui_type: vendor-specific OUI type
+ * @oui_type: vendor-specific OUI type (must be < 0xff), negative means any
  * @ies: data consisting of IEs
  * @len: length of data
  *
@@ -3863,7 +3910,7 @@
  * Note: There are no checks on the element length other than having to fit into
  * the given data.
  */
-const u8 *cfg80211_find_vendor_ie(unsigned int oui, u8 oui_type,
+const u8 *cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
 				  const u8 *ies, int len);
 
 /**
@@ -4610,6 +4657,32 @@
 #endif
 
 /**
+ * cfg80211_connect_bss - notify cfg80211 of connection result
+ *
+ * @dev: network device
+ * @bssid: the BSSID of the AP
+ * @bss: entry of bss to which STA got connected to, can be obtained
+ *	through cfg80211_get_bss (may be %NULL)
+ * @req_ie: association request IEs (maybe be %NULL)
+ * @req_ie_len: association request IEs length
+ * @resp_ie: association response IEs (may be %NULL)
+ * @resp_ie_len: assoc response IEs length
+ * @status: status code, 0 for successful connection, use
+ *      %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
+ *      the real status code for failures.
+ * @gfp: allocation flags
+ *
+ * It should be called by the underlying driver whenever connect() has
+ * succeeded. This is similar to cfg80211_connect_result(), but with the
+ * option of identifying the exact bss entry for the connection. Only one of
+ * these functions should be called.
+ */
+void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
+			  struct cfg80211_bss *bss, const u8 *req_ie,
+			  size_t req_ie_len, const u8 *resp_ie,
+			  size_t resp_ie_len, u16 status, gfp_t gfp);
+
+/**
  * cfg80211_connect_result - notify cfg80211 of connection result
  *
  * @dev: network device
@@ -4626,10 +4699,15 @@
  * It should be called by the underlying driver whenever connect() has
  * succeeded.
  */
-void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
-			     const u8 *req_ie, size_t req_ie_len,
-			     const u8 *resp_ie, size_t resp_ie_len,
-			     u16 status, gfp_t gfp);
+static inline void
+cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
+			const u8 *req_ie, size_t req_ie_len,
+			const u8 *resp_ie, size_t resp_ie_len,
+			u16 status, gfp_t gfp)
+{
+	cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, resp_ie,
+			     resp_ie_len, status, gfp);
+}
 
 /**
  * cfg80211_roamed - notify cfg80211 of roaming
@@ -5029,7 +5107,7 @@
  * Returns %true if the conversion was successful, %false otherwise.
  */
 bool ieee80211_operating_class_to_band(u8 operating_class,
-				       enum ieee80211_band *band);
+				       enum nl80211_band *band);
 
 /**
  * ieee80211_chandef_to_operating_class - convert chandef to operation class
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index c0a92e2..74c9693 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -17,6 +17,7 @@
 #include <linux/hardirq.h>
 #include <linux/rcupdate.h>
 #include <net/sock.h>
+#include <net/inet_sock.h>
 
 #ifdef CONFIG_CGROUP_NET_CLASSID
 struct cgroup_cls_state {
@@ -63,11 +64,13 @@
 	 * softirqs always disables bh.
 	 */
 	if (in_serving_softirq()) {
+		struct sock *sk = skb_to_full_sk(skb);
+
 		/* If there is an sock_cgroup_classid we'll use that. */
-		if (!skb->sk)
+		if (!sk || !sk_fullsock(sk))
 			return 0;
 
-		classid = sock_cgroup_classid(&skb->sk->sk_cgrp_data);
+		classid = sock_cgroup_classid(&sk->sk_cgrp_data);
 	}
 
 	return classid;
diff --git a/include/net/codel.h b/include/net/codel.h
index d168aca..a6e428f 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -87,27 +87,6 @@
 	 ((s32)((a) - (b)) >= 0))
 #define codel_time_before_eq(a, b)	codel_time_after_eq(b, a)
 
-/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */
-struct codel_skb_cb {
-	codel_time_t enqueue_time;
-};
-
-static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
-{
-	qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb));
-	return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data;
-}
-
-static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb)
-{
-	return get_codel_cb(skb)->enqueue_time;
-}
-
-static void codel_set_enqueue_time(struct sk_buff *skb)
-{
-	get_codel_cb(skb)->enqueue_time = codel_get_time();
-}
-
 static inline u32 codel_time_to_us(codel_time_t val)
 {
 	u64 valns = ((u64)val << CODEL_SHIFT);
@@ -176,198 +155,10 @@
 
 #define CODEL_DISABLED_THRESHOLD INT_MAX
 
-static void codel_params_init(struct codel_params *params,
-			      const struct Qdisc *sch)
-{
-	params->interval = MS2TIME(100);
-	params->target = MS2TIME(5);
-	params->mtu = psched_mtu(qdisc_dev(sch));
-	params->ce_threshold = CODEL_DISABLED_THRESHOLD;
-	params->ecn = false;
-}
-
-static void codel_vars_init(struct codel_vars *vars)
-{
-	memset(vars, 0, sizeof(*vars));
-}
-
-static void codel_stats_init(struct codel_stats *stats)
-{
-	stats->maxpacket = 0;
-}
-
-/*
- * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots
- * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
- *
- * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
- */
-static void codel_Newton_step(struct codel_vars *vars)
-{
-	u32 invsqrt = ((u32)vars->rec_inv_sqrt) << REC_INV_SQRT_SHIFT;
-	u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
-	u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2);
-
-	val >>= 2; /* avoid overflow in following multiply */
-	val = (val * invsqrt) >> (32 - 2 + 1);
-
-	vars->rec_inv_sqrt = val >> REC_INV_SQRT_SHIFT;
-}
-
-/*
- * CoDel control_law is t + interval/sqrt(count)
- * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
- * both sqrt() and divide operation.
- */
-static codel_time_t codel_control_law(codel_time_t t,
-				      codel_time_t interval,
-				      u32 rec_inv_sqrt)
-{
-	return t + reciprocal_scale(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT);
-}
-
-static bool codel_should_drop(const struct sk_buff *skb,
-			      struct Qdisc *sch,
-			      struct codel_vars *vars,
-			      struct codel_params *params,
-			      struct codel_stats *stats,
-			      codel_time_t now)
-{
-	bool ok_to_drop;
-
-	if (!skb) {
-		vars->first_above_time = 0;
-		return false;
-	}
-
-	vars->ldelay = now - codel_get_enqueue_time(skb);
-	sch->qstats.backlog -= qdisc_pkt_len(skb);
-
-	if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket))
-		stats->maxpacket = qdisc_pkt_len(skb);
-
-	if (codel_time_before(vars->ldelay, params->target) ||
-	    sch->qstats.backlog <= params->mtu) {
-		/* went below - stay below for at least interval */
-		vars->first_above_time = 0;
-		return false;
-	}
-	ok_to_drop = false;
-	if (vars->first_above_time == 0) {
-		/* just went above from below. If we stay above
-		 * for at least interval we'll say it's ok to drop
-		 */
-		vars->first_above_time = now + params->interval;
-	} else if (codel_time_after(now, vars->first_above_time)) {
-		ok_to_drop = true;
-	}
-	return ok_to_drop;
-}
-
+typedef u32 (*codel_skb_len_t)(const struct sk_buff *skb);
+typedef codel_time_t (*codel_skb_time_t)(const struct sk_buff *skb);
+typedef void (*codel_skb_drop_t)(struct sk_buff *skb, void *ctx);
 typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars,
-						struct Qdisc *sch);
+						void *ctx);
 
-static struct sk_buff *codel_dequeue(struct Qdisc *sch,
-				     struct codel_params *params,
-				     struct codel_vars *vars,
-				     struct codel_stats *stats,
-				     codel_skb_dequeue_t dequeue_func)
-{
-	struct sk_buff *skb = dequeue_func(vars, sch);
-	codel_time_t now;
-	bool drop;
-
-	if (!skb) {
-		vars->dropping = false;
-		return skb;
-	}
-	now = codel_get_time();
-	drop = codel_should_drop(skb, sch, vars, params, stats, now);
-	if (vars->dropping) {
-		if (!drop) {
-			/* sojourn time below target - leave dropping state */
-			vars->dropping = false;
-		} else if (codel_time_after_eq(now, vars->drop_next)) {
-			/* It's time for the next drop. Drop the current
-			 * packet and dequeue the next. The dequeue might
-			 * take us out of dropping state.
-			 * If not, schedule the next drop.
-			 * A large backlog might result in drop rates so high
-			 * that the next drop should happen now,
-			 * hence the while loop.
-			 */
-			while (vars->dropping &&
-			       codel_time_after_eq(now, vars->drop_next)) {
-				vars->count++; /* dont care of possible wrap
-						* since there is no more divide
-						*/
-				codel_Newton_step(vars);
-				if (params->ecn && INET_ECN_set_ce(skb)) {
-					stats->ecn_mark++;
-					vars->drop_next =
-						codel_control_law(vars->drop_next,
-								  params->interval,
-								  vars->rec_inv_sqrt);
-					goto end;
-				}
-				stats->drop_len += qdisc_pkt_len(skb);
-				qdisc_drop(skb, sch);
-				stats->drop_count++;
-				skb = dequeue_func(vars, sch);
-				if (!codel_should_drop(skb, sch,
-						       vars, params, stats, now)) {
-					/* leave dropping state */
-					vars->dropping = false;
-				} else {
-					/* and schedule the next drop */
-					vars->drop_next =
-						codel_control_law(vars->drop_next,
-								  params->interval,
-								  vars->rec_inv_sqrt);
-				}
-			}
-		}
-	} else if (drop) {
-		u32 delta;
-
-		if (params->ecn && INET_ECN_set_ce(skb)) {
-			stats->ecn_mark++;
-		} else {
-			stats->drop_len += qdisc_pkt_len(skb);
-			qdisc_drop(skb, sch);
-			stats->drop_count++;
-
-			skb = dequeue_func(vars, sch);
-			drop = codel_should_drop(skb, sch, vars, params,
-						 stats, now);
-		}
-		vars->dropping = true;
-		/* if min went above target close to when we last went below it
-		 * assume that the drop rate that controlled the queue on the
-		 * last cycle is a good starting point to control it now.
-		 */
-		delta = vars->count - vars->lastcount;
-		if (delta > 1 &&
-		    codel_time_before(now - vars->drop_next,
-				      16 * params->interval)) {
-			vars->count = delta;
-			/* we dont care if rec_inv_sqrt approximation
-			 * is not very precise :
-			 * Next Newton steps will correct it quadratically.
-			 */
-			codel_Newton_step(vars);
-		} else {
-			vars->count = 1;
-			vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT;
-		}
-		vars->lastcount = vars->count;
-		vars->drop_next = codel_control_law(now, params->interval,
-						    vars->rec_inv_sqrt);
-	}
-end:
-	if (skb && codel_time_after(vars->ldelay, params->ce_threshold) &&
-	    INET_ECN_set_ce(skb))
-		stats->ce_mark++;
-	return skb;
-}
 #endif
diff --git a/include/net/codel_impl.h b/include/net/codel_impl.h
new file mode 100644
index 0000000..d289b91
--- /dev/null
+++ b/include/net/codel_impl.h
@@ -0,0 +1,255 @@
+#ifndef __NET_SCHED_CODEL_IMPL_H
+#define __NET_SCHED_CODEL_IMPL_H
+
+/*
+ * Codel - The Controlled-Delay Active Queue Management algorithm
+ *
+ *  Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
+ *  Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
+ *  Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
+ *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+/* Controlling Queue Delay (CoDel) algorithm
+ * =========================================
+ * Source : Kathleen Nichols and Van Jacobson
+ * http://queue.acm.org/detail.cfm?id=2209336
+ *
+ * Implemented on linux by Dave Taht and Eric Dumazet
+ */
+
+static void codel_params_init(struct codel_params *params)
+{
+	params->interval = MS2TIME(100);
+	params->target = MS2TIME(5);
+	params->ce_threshold = CODEL_DISABLED_THRESHOLD;
+	params->ecn = false;
+}
+
+static void codel_vars_init(struct codel_vars *vars)
+{
+	memset(vars, 0, sizeof(*vars));
+}
+
+static void codel_stats_init(struct codel_stats *stats)
+{
+	stats->maxpacket = 0;
+}
+
+/*
+ * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots
+ * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
+ *
+ * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
+ */
+static void codel_Newton_step(struct codel_vars *vars)
+{
+	u32 invsqrt = ((u32)vars->rec_inv_sqrt) << REC_INV_SQRT_SHIFT;
+	u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
+	u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2);
+
+	val >>= 2; /* avoid overflow in following multiply */
+	val = (val * invsqrt) >> (32 - 2 + 1);
+
+	vars->rec_inv_sqrt = val >> REC_INV_SQRT_SHIFT;
+}
+
+/*
+ * CoDel control_law is t + interval/sqrt(count)
+ * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
+ * both sqrt() and divide operation.
+ */
+static codel_time_t codel_control_law(codel_time_t t,
+				      codel_time_t interval,
+				      u32 rec_inv_sqrt)
+{
+	return t + reciprocal_scale(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT);
+}
+
+static bool codel_should_drop(const struct sk_buff *skb,
+			      void *ctx,
+			      struct codel_vars *vars,
+			      struct codel_params *params,
+			      struct codel_stats *stats,
+			      codel_skb_len_t skb_len_func,
+			      codel_skb_time_t skb_time_func,
+			      u32 *backlog,
+			      codel_time_t now)
+{
+	bool ok_to_drop;
+	u32 skb_len;
+
+	if (!skb) {
+		vars->first_above_time = 0;
+		return false;
+	}
+
+	skb_len = skb_len_func(skb);
+	vars->ldelay = now - skb_time_func(skb);
+
+	if (unlikely(skb_len > stats->maxpacket))
+		stats->maxpacket = skb_len;
+
+	if (codel_time_before(vars->ldelay, params->target) ||
+	    *backlog <= params->mtu) {
+		/* went below - stay below for at least interval */
+		vars->first_above_time = 0;
+		return false;
+	}
+	ok_to_drop = false;
+	if (vars->first_above_time == 0) {
+		/* just went above from below. If we stay above
+		 * for at least interval we'll say it's ok to drop
+		 */
+		vars->first_above_time = now + params->interval;
+	} else if (codel_time_after(now, vars->first_above_time)) {
+		ok_to_drop = true;
+	}
+	return ok_to_drop;
+}
+
+static struct sk_buff *codel_dequeue(void *ctx,
+				     u32 *backlog,
+				     struct codel_params *params,
+				     struct codel_vars *vars,
+				     struct codel_stats *stats,
+				     codel_skb_len_t skb_len_func,
+				     codel_skb_time_t skb_time_func,
+				     codel_skb_drop_t drop_func,
+				     codel_skb_dequeue_t dequeue_func)
+{
+	struct sk_buff *skb = dequeue_func(vars, ctx);
+	codel_time_t now;
+	bool drop;
+
+	if (!skb) {
+		vars->dropping = false;
+		return skb;
+	}
+	now = codel_get_time();
+	drop = codel_should_drop(skb, ctx, vars, params, stats,
+				 skb_len_func, skb_time_func, backlog, now);
+	if (vars->dropping) {
+		if (!drop) {
+			/* sojourn time below target - leave dropping state */
+			vars->dropping = false;
+		} else if (codel_time_after_eq(now, vars->drop_next)) {
+			/* It's time for the next drop. Drop the current
+			 * packet and dequeue the next. The dequeue might
+			 * take us out of dropping state.
+			 * If not, schedule the next drop.
+			 * A large backlog might result in drop rates so high
+			 * that the next drop should happen now,
+			 * hence the while loop.
+			 */
+			while (vars->dropping &&
+			       codel_time_after_eq(now, vars->drop_next)) {
+				vars->count++; /* dont care of possible wrap
+						* since there is no more divide
+						*/
+				codel_Newton_step(vars);
+				if (params->ecn && INET_ECN_set_ce(skb)) {
+					stats->ecn_mark++;
+					vars->drop_next =
+						codel_control_law(vars->drop_next,
+								  params->interval,
+								  vars->rec_inv_sqrt);
+					goto end;
+				}
+				stats->drop_len += skb_len_func(skb);
+				drop_func(skb, ctx);
+				stats->drop_count++;
+				skb = dequeue_func(vars, ctx);
+				if (!codel_should_drop(skb, ctx,
+						       vars, params, stats,
+						       skb_len_func,
+						       skb_time_func,
+						       backlog, now)) {
+					/* leave dropping state */
+					vars->dropping = false;
+				} else {
+					/* and schedule the next drop */
+					vars->drop_next =
+						codel_control_law(vars->drop_next,
+								  params->interval,
+								  vars->rec_inv_sqrt);
+				}
+			}
+		}
+	} else if (drop) {
+		u32 delta;
+
+		if (params->ecn && INET_ECN_set_ce(skb)) {
+			stats->ecn_mark++;
+		} else {
+			stats->drop_len += skb_len_func(skb);
+			drop_func(skb, ctx);
+			stats->drop_count++;
+
+			skb = dequeue_func(vars, ctx);
+			drop = codel_should_drop(skb, ctx, vars, params,
+						 stats, skb_len_func,
+						 skb_time_func, backlog, now);
+		}
+		vars->dropping = true;
+		/* if min went above target close to when we last went below it
+		 * assume that the drop rate that controlled the queue on the
+		 * last cycle is a good starting point to control it now.
+		 */
+		delta = vars->count - vars->lastcount;
+		if (delta > 1 &&
+		    codel_time_before(now - vars->drop_next,
+				      16 * params->interval)) {
+			vars->count = delta;
+			/* we dont care if rec_inv_sqrt approximation
+			 * is not very precise :
+			 * Next Newton steps will correct it quadratically.
+			 */
+			codel_Newton_step(vars);
+		} else {
+			vars->count = 1;
+			vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT;
+		}
+		vars->lastcount = vars->count;
+		vars->drop_next = codel_control_law(now, params->interval,
+						    vars->rec_inv_sqrt);
+	}
+end:
+	if (skb && codel_time_after(vars->ldelay, params->ce_threshold) &&
+	    INET_ECN_set_ce(skb))
+		stats->ce_mark++;
+	return skb;
+}
+
+#endif
diff --git a/include/net/codel_qdisc.h b/include/net/codel_qdisc.h
new file mode 100644
index 0000000..8144d9c
--- /dev/null
+++ b/include/net/codel_qdisc.h
@@ -0,0 +1,73 @@
+#ifndef __NET_SCHED_CODEL_QDISC_H
+#define __NET_SCHED_CODEL_QDISC_H
+
+/*
+ * Codel - The Controlled-Delay Active Queue Management algorithm
+ *
+ *  Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
+ *  Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
+ *  Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
+ *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+/* Controlling Queue Delay (CoDel) algorithm
+ * =========================================
+ * Source : Kathleen Nichols and Van Jacobson
+ * http://queue.acm.org/detail.cfm?id=2209336
+ *
+ * Implemented on linux by Dave Taht and Eric Dumazet
+ */
+
+/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */
+struct codel_skb_cb {
+	codel_time_t enqueue_time;
+};
+
+static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
+{
+	qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb));
+	return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb)
+{
+	return get_codel_cb(skb)->enqueue_time;
+}
+
+static void codel_set_enqueue_time(struct sk_buff *skb)
+{
+	get_codel_cb(skb)->enqueue_time = codel_get_time();
+}
+
+#endif
diff --git a/include/net/devlink.h b/include/net/devlink.h
index c37d257..1d45b61 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -24,6 +24,7 @@
 struct devlink {
 	struct list_head list;
 	struct list_head port_list;
+	struct list_head sb_list;
 	const struct devlink_ops *ops;
 	struct device *dev;
 	possible_net_t _net;
@@ -42,6 +43,12 @@
 	u32 split_group;
 };
 
+struct devlink_sb_pool_info {
+	enum devlink_sb_pool_type pool_type;
+	u32 size;
+	enum devlink_sb_threshold_type threshold_type;
+};
+
 struct devlink_ops {
 	size_t priv_size;
 	int (*port_type_set)(struct devlink_port *devlink_port,
@@ -49,6 +56,40 @@
 	int (*port_split)(struct devlink *devlink, unsigned int port_index,
 			  unsigned int count);
 	int (*port_unsplit)(struct devlink *devlink, unsigned int port_index);
+	int (*sb_pool_get)(struct devlink *devlink, unsigned int sb_index,
+			   u16 pool_index,
+			   struct devlink_sb_pool_info *pool_info);
+	int (*sb_pool_set)(struct devlink *devlink, unsigned int sb_index,
+			   u16 pool_index, u32 size,
+			   enum devlink_sb_threshold_type threshold_type);
+	int (*sb_port_pool_get)(struct devlink_port *devlink_port,
+				unsigned int sb_index, u16 pool_index,
+				u32 *p_threshold);
+	int (*sb_port_pool_set)(struct devlink_port *devlink_port,
+				unsigned int sb_index, u16 pool_index,
+				u32 threshold);
+	int (*sb_tc_pool_bind_get)(struct devlink_port *devlink_port,
+				   unsigned int sb_index,
+				   u16 tc_index,
+				   enum devlink_sb_pool_type pool_type,
+				   u16 *p_pool_index, u32 *p_threshold);
+	int (*sb_tc_pool_bind_set)(struct devlink_port *devlink_port,
+				   unsigned int sb_index,
+				   u16 tc_index,
+				   enum devlink_sb_pool_type pool_type,
+				   u16 pool_index, u32 threshold);
+	int (*sb_occ_snapshot)(struct devlink *devlink,
+			       unsigned int sb_index);
+	int (*sb_occ_max_clear)(struct devlink *devlink,
+				unsigned int sb_index);
+	int (*sb_occ_port_pool_get)(struct devlink_port *devlink_port,
+				    unsigned int sb_index, u16 pool_index,
+				    u32 *p_cur, u32 *p_max);
+	int (*sb_occ_tc_port_bind_get)(struct devlink_port *devlink_port,
+				       unsigned int sb_index,
+				       u16 tc_index,
+				       enum devlink_sb_pool_type pool_type,
+				       u32 *p_cur, u32 *p_max);
 };
 
 static inline void *devlink_priv(struct devlink *devlink)
@@ -82,6 +123,11 @@
 void devlink_port_type_clear(struct devlink_port *devlink_port);
 void devlink_port_split_set(struct devlink_port *devlink_port,
 			    u32 split_group);
+int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
+			u32 size, u16 ingress_pools_count,
+			u16 egress_pools_count, u16 ingress_tc_count,
+			u16 egress_tc_count);
+void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index);
 
 #else
 
@@ -135,6 +181,21 @@
 {
 }
 
+static inline int devlink_sb_register(struct devlink *devlink,
+				      unsigned int sb_index, u32 size,
+				      u16 ingress_pools_count,
+				      u16 egress_pools_count,
+				      u16 ingress_tc_count,
+				      u16 egress_tc_count)
+{
+	return 0;
+}
+
+static inline void devlink_sb_unregister(struct devlink *devlink,
+					 unsigned int sb_index)
+{
+}
+
 #endif
 
 #endif /* _NET_DEVLINK_H_ */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 6463bb2..17c3d37 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -16,7 +16,6 @@
 #include <linux/timer.h>
 #include <linux/workqueue.h>
 #include <linux/of.h>
-#include <linux/of_gpio.h>
 #include <linux/phy.h>
 #include <linux/phy_fixed.h>
 #include <linux/ethtool.h>
@@ -65,13 +64,6 @@
 	 * NULL if there is only one switch chip.
 	 */
 	s8		*rtable;
-
-	/*
-	 * A switch may have a GPIO line tied to its reset pin. Parse
-	 * this from the device tree, and use it before performing
-	 * switch soft reset.
-	 */
-	struct gpio_desc *reset;
 };
 
 struct dsa_platform_data {
@@ -111,6 +103,11 @@
 	enum dsa_tag_protocol	tag_protocol;
 
 	/*
+	 * Original copy of the master netdev ethtool_ops
+	 */
+	struct ethtool_ops	master_ethtool_ops;
+
+	/*
 	 * The switch and port to which the CPU is attached.
 	 */
 	s8			cpu_switch;
@@ -123,6 +120,8 @@
 };
 
 struct dsa_switch {
+	struct device *dev;
+
 	/*
 	 * Parent switch tree, and switch index.
 	 */
@@ -130,25 +129,21 @@
 	int			index;
 
 	/*
-	 * Tagging protocol understood by this switch
+	 * Give the switch driver somewhere to hang its private data
+	 * structure.
 	 */
-	enum dsa_tag_protocol	tag_protocol;
+	void *priv;
 
 	/*
 	 * Configuration data for this switch.
 	 */
-	struct dsa_chip_data	*pd;
+	struct dsa_chip_data	*cd;
 
 	/*
 	 * The used switch driver.
 	 */
 	struct dsa_switch_driver	*drv;
 
-	/*
-	 * Reference to host device to use.
-	 */
-	struct device		*master_dev;
-
 #ifdef CONFIG_NET_DSA_HWMON
 	/*
 	 * Hardware monitoring information
@@ -161,7 +156,7 @@
 	 * Slave mii_bus and devices for the individual ports.
 	 */
 	u32			dsa_port_mask;
-	u32			phys_port_mask;
+	u32			enabled_port_mask;
 	u32			phys_mii_mask;
 	struct mii_bus		*slave_mii_bus;
 	struct net_device	*ports[DSA_MAX_PORTS];
@@ -179,7 +174,7 @@
 
 static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
 {
-	return ds->phys_port_mask & (1 << p) && ds->ports[p];
+	return ds->enabled_port_mask & (1 << p) && ds->ports[p];
 }
 
 static inline u8 dsa_upstream_port(struct dsa_switch *ds)
@@ -195,7 +190,7 @@
 	if (dst->cpu_switch == ds->index)
 		return dst->cpu_port;
 	else
-		return ds->pd->rtable[dst->cpu_switch];
+		return ds->cd->rtable[dst->cpu_switch];
 }
 
 struct switchdev_trans;
@@ -207,12 +202,13 @@
 	struct list_head	list;
 
 	enum dsa_tag_protocol	tag_protocol;
-	int			priv_size;
 
 	/*
 	 * Probing and setup.
 	 */
-	char	*(*probe)(struct device *host_dev, int sw_addr);
+	const char	*(*probe)(struct device *dsa_dev,
+				  struct device *host_dev, int sw_addr,
+				  void **priv);
 	int	(*setup)(struct dsa_switch *ds);
 	int	(*set_addr)(struct dsa_switch *ds, u8 *addr);
 	u32	(*get_phy_flags)(struct dsa_switch *ds, int port);
@@ -299,8 +295,8 @@
 	int	(*port_bridge_join)(struct dsa_switch *ds, int port,
 				    struct net_device *bridge);
 	void	(*port_bridge_leave)(struct dsa_switch *ds, int port);
-	int	(*port_stp_update)(struct dsa_switch *ds, int port,
-				   u8 state);
+	void	(*port_stp_state_set)(struct dsa_switch *ds, int port,
+				      u8 state);
 
 	/*
 	 * VLAN support
@@ -310,7 +306,7 @@
 	int	(*port_vlan_prepare)(struct dsa_switch *ds, int port,
 				     const struct switchdev_obj_port_vlan *vlan,
 				     struct switchdev_trans *trans);
-	int	(*port_vlan_add)(struct dsa_switch *ds, int port,
+	void	(*port_vlan_add)(struct dsa_switch *ds, int port,
 				 const struct switchdev_obj_port_vlan *vlan,
 				 struct switchdev_trans *trans);
 	int	(*port_vlan_del)(struct dsa_switch *ds, int port,
@@ -325,7 +321,7 @@
 	int	(*port_fdb_prepare)(struct dsa_switch *ds, int port,
 				    const struct switchdev_obj_port_fdb *fdb,
 				    struct switchdev_trans *trans);
-	int	(*port_fdb_add)(struct dsa_switch *ds, int port,
+	void	(*port_fdb_add)(struct dsa_switch *ds, int port,
 				const struct switchdev_obj_port_fdb *fdb,
 				struct switchdev_trans *trans);
 	int	(*port_fdb_del)(struct dsa_switch *ds, int port,
@@ -341,7 +337,7 @@
 
 static inline void *ds_to_priv(struct dsa_switch *ds)
 {
-	return (void *)(ds + 1);
+	return ds->priv;
 }
 
 static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst)
diff --git a/include/net/dst.h b/include/net/dst.h
index 5c98443..6835d22 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -85,12 +85,11 @@
 #endif
 
 #ifdef CONFIG_64BIT
-	struct lwtunnel_state   *lwtstate;
 	/*
 	 * Align __refcnt to a 64 bytes alignment
 	 * (L1_CACHE_SIZE would be too much)
 	 */
-	long			__pad_to_align_refcnt[1];
+	long			__pad_to_align_refcnt[2];
 #endif
 	/*
 	 * __refcnt wants to be on a different cache line from
@@ -99,9 +98,7 @@
 	atomic_t		__refcnt;	/* client references	*/
 	int			__use;
 	unsigned long		lastuse;
-#ifndef CONFIG_64BIT
 	struct lwtunnel_state   *lwtstate;
-#endif
 	union {
 		struct dst_entry	*next;
 		struct rtable __rcu	*rt_next;
diff --git a/include/net/fq.h b/include/net/fq.h
new file mode 100644
index 0000000..268b490
--- /dev/null
+++ b/include/net/fq.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2016 Qualcomm Atheros, Inc
+ *
+ * GPL v2
+ *
+ * Based on net/sched/sch_fq_codel.c
+ */
+#ifndef __NET_SCHED_FQ_H
+#define __NET_SCHED_FQ_H
+
+struct fq_tin;
+
+/**
+ * struct fq_flow - per traffic flow queue
+ *
+ * @tin: owner of this flow. Used to manage collisions, i.e. when a packet
+ *	hashes to an index which points to a flow that is already owned by a
+ *	different tin the packet is destined to. In such case the implementer
+ *	must provide a fallback flow
+ * @flowchain: can be linked to fq_tin's new_flows or old_flows. Used for DRR++
+ *	(deficit round robin) based round robin queuing similar to the one
+ *	found in net/sched/sch_fq_codel.c
+ * @backlogchain: can be linked to other fq_flow and fq. Used to keep track of
+ *	fat flows and efficient head-dropping if packet limit is reached
+ * @queue: sk_buff queue to hold packets
+ * @backlog: number of bytes pending in the queue. The number of packets can be
+ *	found in @queue.qlen
+ * @deficit: used for DRR++
+ */
+struct fq_flow {
+	struct fq_tin *tin;
+	struct list_head flowchain;
+	struct list_head backlogchain;
+	struct sk_buff_head queue;
+	u32 backlog;
+	int deficit;
+};
+
+/**
+ * struct fq_tin - a logical container of fq_flows
+ *
+ * Used to group fq_flows into a logical aggregate. DRR++ scheme is used to
+ * pull interleaved packets out of the associated flows.
+ *
+ * @new_flows: linked list of fq_flow
+ * @old_flows: linked list of fq_flow
+ */
+struct fq_tin {
+	struct list_head new_flows;
+	struct list_head old_flows;
+	u32 backlog_bytes;
+	u32 backlog_packets;
+	u32 overlimit;
+	u32 collisions;
+	u32 flows;
+	u32 tx_bytes;
+	u32 tx_packets;
+};
+
+/**
+ * struct fq - main container for fair queuing purposes
+ *
+ * @backlogs: linked to fq_flows. Used to maintain fat flows for efficient
+ *	head-dropping when @backlog reaches @limit
+ * @limit: max number of packets that can be queued across all flows
+ * @backlog: number of packets queued across all flows
+ */
+struct fq {
+	struct fq_flow *flows;
+	struct list_head backlogs;
+	spinlock_t lock;
+	u32 flows_cnt;
+	u32 perturbation;
+	u32 limit;
+	u32 quantum;
+	u32 backlog;
+	u32 overlimit;
+	u32 collisions;
+};
+
+typedef struct sk_buff *fq_tin_dequeue_t(struct fq *,
+					 struct fq_tin *,
+					 struct fq_flow *flow);
+
+typedef void fq_skb_free_t(struct fq *,
+			   struct fq_tin *,
+			   struct fq_flow *,
+			   struct sk_buff *);
+
+typedef struct fq_flow *fq_flow_get_default_t(struct fq *,
+					      struct fq_tin *,
+					      int idx,
+					      struct sk_buff *);
+
+#endif
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
new file mode 100644
index 0000000..163f3ed
--- /dev/null
+++ b/include/net/fq_impl.h
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2016 Qualcomm Atheros, Inc
+ *
+ * GPL v2
+ *
+ * Based on net/sched/sch_fq_codel.c
+ */
+#ifndef __NET_SCHED_FQ_IMPL_H
+#define __NET_SCHED_FQ_IMPL_H
+
+#include <net/fq.h>
+
+/* functions that are embedded into includer */
+
+static struct sk_buff *fq_flow_dequeue(struct fq *fq,
+				       struct fq_flow *flow)
+{
+	struct fq_tin *tin = flow->tin;
+	struct fq_flow *i;
+	struct sk_buff *skb;
+
+	lockdep_assert_held(&fq->lock);
+
+	skb = __skb_dequeue(&flow->queue);
+	if (!skb)
+		return NULL;
+
+	tin->backlog_bytes -= skb->len;
+	tin->backlog_packets--;
+	flow->backlog -= skb->len;
+	fq->backlog--;
+
+	if (flow->backlog == 0) {
+		list_del_init(&flow->backlogchain);
+	} else {
+		i = flow;
+
+		list_for_each_entry_continue(i, &fq->backlogs, backlogchain)
+			if (i->backlog < flow->backlog)
+				break;
+
+		list_move_tail(&flow->backlogchain,
+			       &i->backlogchain);
+	}
+
+	return skb;
+}
+
+static struct sk_buff *fq_tin_dequeue(struct fq *fq,
+				      struct fq_tin *tin,
+				      fq_tin_dequeue_t dequeue_func)
+{
+	struct fq_flow *flow;
+	struct list_head *head;
+	struct sk_buff *skb;
+
+	lockdep_assert_held(&fq->lock);
+
+begin:
+	head = &tin->new_flows;
+	if (list_empty(head)) {
+		head = &tin->old_flows;
+		if (list_empty(head))
+			return NULL;
+	}
+
+	flow = list_first_entry(head, struct fq_flow, flowchain);
+
+	if (flow->deficit <= 0) {
+		flow->deficit += fq->quantum;
+		list_move_tail(&flow->flowchain,
+			       &tin->old_flows);
+		goto begin;
+	}
+
+	skb = dequeue_func(fq, tin, flow);
+	if (!skb) {
+		/* force a pass through old_flows to prevent starvation */
+		if ((head == &tin->new_flows) &&
+		    !list_empty(&tin->old_flows)) {
+			list_move_tail(&flow->flowchain, &tin->old_flows);
+		} else {
+			list_del_init(&flow->flowchain);
+			flow->tin = NULL;
+		}
+		goto begin;
+	}
+
+	flow->deficit -= skb->len;
+	tin->tx_bytes += skb->len;
+	tin->tx_packets++;
+
+	return skb;
+}
+
+static struct fq_flow *fq_flow_classify(struct fq *fq,
+					struct fq_tin *tin,
+					struct sk_buff *skb,
+					fq_flow_get_default_t get_default_func)
+{
+	struct fq_flow *flow;
+	u32 hash;
+	u32 idx;
+
+	lockdep_assert_held(&fq->lock);
+
+	hash = skb_get_hash_perturb(skb, fq->perturbation);
+	idx = reciprocal_scale(hash, fq->flows_cnt);
+	flow = &fq->flows[idx];
+
+	if (flow->tin && flow->tin != tin) {
+		flow = get_default_func(fq, tin, idx, skb);
+		tin->collisions++;
+		fq->collisions++;
+	}
+
+	if (!flow->tin)
+		tin->flows++;
+
+	return flow;
+}
+
+static void fq_recalc_backlog(struct fq *fq,
+			      struct fq_tin *tin,
+			      struct fq_flow *flow)
+{
+	struct fq_flow *i;
+
+	if (list_empty(&flow->backlogchain))
+		list_add_tail(&flow->backlogchain, &fq->backlogs);
+
+	i = flow;
+	list_for_each_entry_continue_reverse(i, &fq->backlogs,
+					     backlogchain)
+		if (i->backlog > flow->backlog)
+			break;
+
+	list_move(&flow->backlogchain, &i->backlogchain);
+}
+
+static void fq_tin_enqueue(struct fq *fq,
+			   struct fq_tin *tin,
+			   struct sk_buff *skb,
+			   fq_skb_free_t free_func,
+			   fq_flow_get_default_t get_default_func)
+{
+	struct fq_flow *flow;
+
+	lockdep_assert_held(&fq->lock);
+
+	flow = fq_flow_classify(fq, tin, skb, get_default_func);
+
+	flow->tin = tin;
+	flow->backlog += skb->len;
+	tin->backlog_bytes += skb->len;
+	tin->backlog_packets++;
+	fq->backlog++;
+
+	fq_recalc_backlog(fq, tin, flow);
+
+	if (list_empty(&flow->flowchain)) {
+		flow->deficit = fq->quantum;
+		list_add_tail(&flow->flowchain,
+			      &tin->new_flows);
+	}
+
+	__skb_queue_tail(&flow->queue, skb);
+
+	if (fq->backlog > fq->limit) {
+		flow = list_first_entry_or_null(&fq->backlogs,
+						struct fq_flow,
+						backlogchain);
+		if (!flow)
+			return;
+
+		skb = fq_flow_dequeue(fq, flow);
+		if (!skb)
+			return;
+
+		free_func(fq, flow->tin, flow, skb);
+
+		flow->tin->overlimit++;
+		fq->overlimit++;
+	}
+}
+
+static void fq_flow_reset(struct fq *fq,
+			  struct fq_flow *flow,
+			  fq_skb_free_t free_func)
+{
+	struct sk_buff *skb;
+
+	while ((skb = fq_flow_dequeue(fq, flow)))
+		free_func(fq, flow->tin, flow, skb);
+
+	if (!list_empty(&flow->flowchain))
+		list_del_init(&flow->flowchain);
+
+	if (!list_empty(&flow->backlogchain))
+		list_del_init(&flow->backlogchain);
+
+	flow->tin = NULL;
+
+	WARN_ON_ONCE(flow->backlog);
+}
+
+static void fq_tin_reset(struct fq *fq,
+			 struct fq_tin *tin,
+			 fq_skb_free_t free_func)
+{
+	struct list_head *head;
+	struct fq_flow *flow;
+
+	for (;;) {
+		head = &tin->new_flows;
+		if (list_empty(head)) {
+			head = &tin->old_flows;
+			if (list_empty(head))
+				break;
+		}
+
+		flow = list_first_entry(head, struct fq_flow, flowchain);
+		fq_flow_reset(fq, flow, free_func);
+	}
+
+	WARN_ON_ONCE(tin->backlog_bytes);
+	WARN_ON_ONCE(tin->backlog_packets);
+}
+
+static void fq_flow_init(struct fq_flow *flow)
+{
+	INIT_LIST_HEAD(&flow->flowchain);
+	INIT_LIST_HEAD(&flow->backlogchain);
+	__skb_queue_head_init(&flow->queue);
+}
+
+static void fq_tin_init(struct fq_tin *tin)
+{
+	INIT_LIST_HEAD(&tin->new_flows);
+	INIT_LIST_HEAD(&tin->old_flows);
+}
+
+static int fq_init(struct fq *fq, int flows_cnt)
+{
+	int i;
+
+	memset(fq, 0, sizeof(fq[0]));
+	INIT_LIST_HEAD(&fq->backlogs);
+	spin_lock_init(&fq->lock);
+	fq->flows_cnt = max_t(u32, flows_cnt, 1);
+	fq->perturbation = prandom_u32();
+	fq->quantum = 300;
+	fq->limit = 8192;
+
+	fq->flows = kcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL);
+	if (!fq->flows)
+		return -ENOMEM;
+
+	for (i = 0; i < fq->flows_cnt; i++)
+		fq_flow_init(&fq->flows[i]);
+
+	return 0;
+}
+
+static void fq_reset(struct fq *fq,
+		     fq_skb_free_t free_func)
+{
+	int i;
+
+	for (i = 0; i < fq->flows_cnt; i++)
+		fq_flow_reset(fq, &fq->flows[i], free_func);
+
+	kfree(fq->flows);
+	fq->flows = NULL;
+}
+
+#endif
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index cbafa37..610cd39 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -19,17 +19,19 @@
 	/* Backward compatibility */
 	int               compat_tc_stats;
 	int               compat_xstats;
+	int               padattr;
 	void *            xstats;
 	int               xstats_len;
 	struct tc_stats   tc_stats;
 };
 
 int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
-			  struct gnet_dump *d);
+			  struct gnet_dump *d, int padattr);
 
 int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
 				 int tc_stats_type, int xstats_type,
-				 spinlock_t *lock, struct gnet_dump *d);
+				 spinlock_t *lock, struct gnet_dump *d,
+				 int padattr);
 
 int gnet_stats_copy_basic(struct gnet_dump *d,
 			  struct gnet_stats_basic_cpu __percpu *cpu,
diff --git a/include/net/geneve.h b/include/net/geneve.h
index e6c23dc..cb544a5 100644
--- a/include/net/geneve.h
+++ b/include/net/geneve.h
@@ -62,13 +62,11 @@
 	struct geneve_opt options[];
 };
 
-#if IS_ENABLED(CONFIG_GENEVE)
-void geneve_get_rx_port(struct net_device *netdev);
-#else
 static inline void geneve_get_rx_port(struct net_device *netdev)
 {
+	ASSERT_RTNL();
+	call_netdevice_notifiers(NETDEV_OFFLOAD_PUSH_GENEVE, netdev);
 }
-#endif
 
 #ifdef CONFIG_INET
 struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
diff --git a/include/net/gre.h b/include/net/gre.h
index 97eafdc..5dce30a 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -25,4 +25,108 @@
 
 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
 				       u8 name_assign_type);
+int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
+		     bool *csum_err, __be16 proto);
+
+static inline int gre_calc_hlen(__be16 o_flags)
+{
+	int addend = 4;
+
+	if (o_flags & TUNNEL_CSUM)
+		addend += 4;
+	if (o_flags & TUNNEL_KEY)
+		addend += 4;
+	if (o_flags & TUNNEL_SEQ)
+		addend += 4;
+	return addend;
+}
+
+static inline __be16 gre_flags_to_tnl_flags(__be16 flags)
+{
+	__be16 tflags = 0;
+
+	if (flags & GRE_CSUM)
+		tflags |= TUNNEL_CSUM;
+	if (flags & GRE_ROUTING)
+		tflags |= TUNNEL_ROUTING;
+	if (flags & GRE_KEY)
+		tflags |= TUNNEL_KEY;
+	if (flags & GRE_SEQ)
+		tflags |= TUNNEL_SEQ;
+	if (flags & GRE_STRICT)
+		tflags |= TUNNEL_STRICT;
+	if (flags & GRE_REC)
+		tflags |= TUNNEL_REC;
+	if (flags & GRE_VERSION)
+		tflags |= TUNNEL_VERSION;
+
+	return tflags;
+}
+
+static inline __be16 gre_tnl_flags_to_gre_flags(__be16 tflags)
+{
+	__be16 flags = 0;
+
+	if (tflags & TUNNEL_CSUM)
+		flags |= GRE_CSUM;
+	if (tflags & TUNNEL_ROUTING)
+		flags |= GRE_ROUTING;
+	if (tflags & TUNNEL_KEY)
+		flags |= GRE_KEY;
+	if (tflags & TUNNEL_SEQ)
+		flags |= GRE_SEQ;
+	if (tflags & TUNNEL_STRICT)
+		flags |= GRE_STRICT;
+	if (tflags & TUNNEL_REC)
+		flags |= GRE_REC;
+	if (tflags & TUNNEL_VERSION)
+		flags |= GRE_VERSION;
+
+	return flags;
+}
+
+static inline __sum16 gre_checksum(struct sk_buff *skb)
+{
+	__wsum csum;
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL)
+		csum = lco_csum(skb);
+	else
+		csum = skb_checksum(skb, 0, skb->len, 0);
+	return csum_fold(csum);
+}
+
+static inline void gre_build_header(struct sk_buff *skb, int hdr_len,
+				    __be16 flags, __be16 proto,
+				    __be32 key, __be32 seq)
+{
+	struct gre_base_hdr *greh;
+
+	skb_push(skb, hdr_len);
+
+	skb_reset_transport_header(skb);
+	greh = (struct gre_base_hdr *)skb->data;
+	greh->flags = gre_tnl_flags_to_gre_flags(flags);
+	greh->protocol = proto;
+
+	if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) {
+		__be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
+
+		if (flags & TUNNEL_SEQ) {
+			*ptr = seq;
+			ptr--;
+		}
+		if (flags & TUNNEL_KEY) {
+			*ptr = key;
+			ptr--;
+		}
+		if (flags & TUNNEL_CSUM &&
+		    !(skb_shinfo(skb)->gso_type &
+		      (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
+			*ptr = 0;
+			*(__sum16 *)ptr = gre_checksum(skb);
+		}
+	}
+}
+
 #endif
diff --git a/include/net/gtp.h b/include/net/gtp.h
new file mode 100644
index 0000000..894a37b
--- /dev/null
+++ b/include/net/gtp.h
@@ -0,0 +1,34 @@
+#ifndef _GTP_H_
+#define _GTP_H
+
+/* General GTP protocol related definitions. */
+
+#define GTP0_PORT	3386
+#define GTP1U_PORT	2152
+
+#define GTP_TPDU	255
+
+struct gtp0_header {	/* According to GSM TS 09.60. */
+	__u8	flags;
+	__u8	type;
+	__be16	length;
+	__be16	seq;
+	__be16	flow;
+	__u8	number;
+	__u8	spare[3];
+	__be64	tid;
+} __attribute__ ((packed));
+
+struct gtp1_header {	/* According to 3GPP TS 29.060. */
+	__u8	flags;
+	__u8	type;
+	__be16	length;
+	__be32	tid;
+} __attribute__ ((packed));
+
+#define GTP1_F_NPDU	0x01
+#define GTP1_F_SEQ	0x02
+#define GTP1_F_EXTHDR	0x04
+#define GTP1_F_MASK	0x07
+
+#endif
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 970028e..3ef2743 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -30,9 +30,9 @@
 
 extern const struct icmp_err icmp_err_convert[];
 #define ICMP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.icmp_statistics, field)
-#define ICMP_INC_STATS_BH(net, field)	SNMP_INC_STATS_BH((net)->mib.icmp_statistics, field)
+#define __ICMP_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.icmp_statistics, field)
 #define ICMPMSGOUT_INC_STATS(net, field)	SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field+256)
-#define ICMPMSGIN_INC_STATS_BH(net, field)	SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field)
+#define ICMPMSGIN_INC_STATS(net, field)		SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field)
 
 struct dst_entry;
 struct net_proto_family;
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index 28332bd..b87beca 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -66,13 +66,15 @@
 					  const __be16 sport,
 					  const struct in6_addr *daddr,
 					  const u16 hnum,
-					  const int dif)
+					  const int dif,
+					  bool *refcounted)
 {
 	struct sock *sk = __inet6_lookup_established(net, hashinfo, saddr,
 						sport, daddr, hnum, dif);
+	*refcounted = true;
 	if (sk)
 		return sk;
-
+	*refcounted = false;
 	return inet6_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
 				     daddr, hnum, dif);
 }
@@ -81,17 +83,19 @@
 					      struct sk_buff *skb, int doff,
 					      const __be16 sport,
 					      const __be16 dport,
-					      int iif)
+					      int iif,
+					      bool *refcounted)
 {
 	struct sock *sk = skb_steal_sock(skb);
 
+	*refcounted = true;
 	if (sk)
 		return sk;
 
 	return __inet6_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
 			      doff, &ipv6_hdr(skb)->saddr, sport,
 			      &ipv6_hdr(skb)->daddr, ntohs(dport),
-			      iif);
+			      iif, refcounted);
 }
 
 struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 50f635c..0574493 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -100,14 +100,10 @@
 
 /*
  * Sockets can be hashed in established or listening table
- * We must use different 'nulls' end-of-chain value for listening
- * hash table, or we might find a socket that was closed and
- * reallocated/inserted into established hash table
  */
-#define LISTENING_NULLS_BASE (1U << 29)
 struct inet_listen_hashbucket {
 	spinlock_t		lock;
-	struct hlist_nulls_head	head;
+	struct hlist_head	head;
 };
 
 /* This is for listening sockets, thus all sockets which possess wildcards. */
@@ -280,11 +276,8 @@
 	 net_eq(sock_net(__sk), (__net)))
 #endif /* 64-bit arch */
 
-/*
- * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
+/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
  * not check it for lookups anymore, thanks Alexey. -DaveM
- *
- * Local BH must be disabled here.
  */
 struct sock *__inet_lookup_established(struct net *net,
 				       struct inet_hashinfo *hashinfo,
@@ -307,14 +300,20 @@
 					 struct sk_buff *skb, int doff,
 					 const __be32 saddr, const __be16 sport,
 					 const __be32 daddr, const __be16 dport,
-					 const int dif)
+					 const int dif,
+					 bool *refcounted)
 {
 	u16 hnum = ntohs(dport);
-	struct sock *sk = __inet_lookup_established(net, hashinfo,
-				saddr, sport, daddr, hnum, dif);
+	struct sock *sk;
 
-	return sk ? : __inet_lookup_listener(net, hashinfo, skb, doff, saddr,
-					     sport, daddr, hnum, dif);
+	sk = __inet_lookup_established(net, hashinfo, saddr, sport,
+				       daddr, hnum, dif);
+	*refcounted = true;
+	if (sk)
+		return sk;
+	*refcounted = false;
+	return __inet_lookup_listener(net, hashinfo, skb, doff, saddr,
+				      sport, daddr, hnum, dif);
 }
 
 static inline struct sock *inet_lookup(struct net *net,
@@ -325,12 +324,13 @@
 				       const int dif)
 {
 	struct sock *sk;
+	bool refcounted;
 
-	local_bh_disable();
 	sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
-			   dport, dif);
-	local_bh_enable();
+			   dport, dif, &refcounted);
 
+	if (sk && !refcounted && !atomic_inc_not_zero(&sk->sk_refcnt))
+		sk = NULL;
 	return sk;
 }
 
@@ -338,17 +338,20 @@
 					     struct sk_buff *skb,
 					     int doff,
 					     const __be16 sport,
-					     const __be16 dport)
+					     const __be16 dport,
+					     bool *refcounted)
 {
 	struct sock *sk = skb_steal_sock(skb);
 	const struct iphdr *iph = ip_hdr(skb);
 
+	*refcounted = true;
 	if (sk)
 		return sk;
-	else
-		return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
-				     doff, iph->saddr, sport,
-				     iph->daddr, dport, inet_iif(skb));
+
+	return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
+			     doff, iph->saddr, sport,
+			     iph->daddr, dport, inet_iif(skb),
+			     refcounted);
 }
 
 u32 sk_ehashfn(const struct sock *sk);
diff --git a/include/net/ip.h b/include/net/ip.h
index fad74d3..37165fb 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -36,6 +36,7 @@
 struct sock;
 
 struct inet_skb_parm {
+	int			iif;
 	struct ip_options	opt;		/* Compiled IP options		*/
 	unsigned char		flags;
 
@@ -56,6 +57,7 @@
 }
 
 struct ipcm_cookie {
+	struct sockcm_cookie	sockc;
 	__be32			addr;
 	int			oif;
 	struct ip_options_rcu	*opt;
@@ -186,17 +188,15 @@
 			   unsigned int len);
 
 #define IP_INC_STATS(net, field)	SNMP_INC_STATS64((net)->mib.ip_statistics, field)
-#define IP_INC_STATS_BH(net, field)	SNMP_INC_STATS64_BH((net)->mib.ip_statistics, field)
+#define __IP_INC_STATS(net, field)	__SNMP_INC_STATS64((net)->mib.ip_statistics, field)
 #define IP_ADD_STATS(net, field, val)	SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
-#define IP_ADD_STATS_BH(net, field, val) SNMP_ADD_STATS64_BH((net)->mib.ip_statistics, field, val)
+#define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
 #define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
-#define IP_UPD_PO_STATS_BH(net, field, val) SNMP_UPD_PO_STATS64_BH((net)->mib.ip_statistics, field, val)
+#define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
 #define NET_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.net_statistics, field)
-#define NET_INC_STATS_BH(net, field)	SNMP_INC_STATS_BH((net)->mib.net_statistics, field)
-#define NET_INC_STATS_USER(net, field) 	SNMP_INC_STATS_USER((net)->mib.net_statistics, field)
+#define __NET_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.net_statistics, field)
 #define NET_ADD_STATS(net, field, adnd)	SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
-#define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
-#define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
+#define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
 
 u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
 unsigned long snmp_fold_field(void __percpu *mib, int offt);
@@ -550,7 +550,7 @@
 
 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
 void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int offset);
-int ip_cmsg_send(struct net *net, struct msghdr *msg,
+int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
 		 struct ipcm_cookie *ipc, bool allow_ipv6);
 int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
 		  unsigned int optlen);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 295d291..54c7794 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -101,6 +101,9 @@
 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
 				    const struct in6_addr *addr, bool anycast);
 
+struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
+			       int flags);
+
 /*
  *	support functions for ND
  *
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 499a707..fb9e015 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -42,6 +42,7 @@
 	struct __ip6_tnl_parm parms;	/* tunnel configuration parameters */
 	struct flowi fl;	/* flowi template for xmit */
 	struct dst_cache dst_cache;	/* cached dst */
+	struct gro_cells gro_cells;
 
 	int err_count;
 	unsigned long err_time;
@@ -49,8 +50,10 @@
 	/* These fields used only by GRE */
 	__u32 i_seqno;	/* The last seen seqno	*/
 	__u32 o_seqno;	/* The last output seqno */
-	int hlen;       /* Precalculated GRE header length */
+	int hlen;       /* tun_hlen + encap_hlen */
+	int tun_hlen;	/* Precalculated header length */
 	int mlink;
+
 };
 
 /* Tunnel encapsulation limit destination sub-option */
@@ -63,13 +66,19 @@
 
 int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
 		const struct in6_addr *raddr);
+int ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
+		const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
+		bool log_ecn_error);
 int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
 		     const struct in6_addr *raddr);
+int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
+		 struct flowi6 *fl6, int encap_limit, __u32 *pmtu, __u8 proto);
 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
 __u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
 			     const struct in6_addr *raddr);
 struct net *ip6_tnl_get_link_net(const struct net_device *dev);
 int ip6_tnl_get_iflink(const struct net_device *dev);
+int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu);
 
 #ifdef CONFIG_INET
 static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 56050f9..d916b43 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -105,24 +105,23 @@
 	struct net_device	*dev;
 	struct net		*net;	/* netns for packet i/o */
 
-	int		err_count;	/* Number of arrived ICMP errors */
 	unsigned long	err_time;	/* Time when the last ICMP error
 					 * arrived */
+	int		err_count;	/* Number of arrived ICMP errors */
 
 	/* These four fields used only by GRE */
 	u32		i_seqno;	/* The last seen seqno	*/
 	u32		o_seqno;	/* The last output seqno */
 	int		tun_hlen;	/* Precalculated header length */
-	int		mlink;
 
 	struct dst_cache dst_cache;
 
 	struct ip_tunnel_parm parms;
 
+	int		mlink;
 	int		encap_hlen;	/* Encap header length (FOU,GUE) */
-	struct ip_tunnel_encap encap;
-
 	int		hlen;		/* tun_hlen + encap_hlen */
+	struct ip_tunnel_encap encap;
 
 	/* for SIT */
 #ifdef CONFIG_IPV6_SIT_6RD
@@ -161,6 +160,7 @@
 
 #define PACKET_RCVD	0
 #define PACKET_REJECT	1
+#define PACKET_NEXT	2
 
 #define IP_TNL_HASH_BITS   7
 #define IP_TNL_HASH_SIZE   (1 << IP_TNL_HASH_BITS)
@@ -295,15 +295,22 @@
 	return INET_ECN_encapsulate(tos, inner);
 }
 
-int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto,
-			 bool xnet);
+int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
+			   __be16 inner_proto, bool raw_proto, bool xnet);
+
+static inline int iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
+				       __be16 inner_proto, bool xnet)
+{
+	return __iptunnel_pull_header(skb, hdr_len, inner_proto, false, xnet);
+}
+
 void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
 		   __be32 src, __be32 dst, u8 proto,
 		   u8 tos, u8 ttl, __be16 df, bool xnet);
 struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
 					     gfp_t flags);
 
-struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
+int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
 
 static inline int iptunnel_pull_offloads(struct sk_buff *skb)
 {
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index a6cc576..af4c10e 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -731,6 +731,12 @@
 	u32 (*hashkey_raw)(const struct ip_vs_conn_param *p, u32 initval,
 			   bool inverse);
 	int (*show_pe_data)(const struct ip_vs_conn *cp, char *buf);
+	/* create connections for real-server outgoing packets */
+	struct ip_vs_conn* (*conn_out)(struct ip_vs_service *svc,
+				       struct ip_vs_dest *dest,
+				       struct sk_buff *skb,
+				       const struct ip_vs_iphdr *iph,
+				       __be16 dport, __be16 cport);
 };
 
 /* The application module object (a.k.a. app incarnation) */
@@ -874,6 +880,7 @@
 	/* Service counters */
 	atomic_t		ftpsvc_counter;
 	atomic_t		nullsvc_counter;
+	atomic_t		conn_out_counter;
 
 #ifdef CONFIG_SYSCTL
 	/* 1/rate drop and drop-entry variables */
@@ -1147,6 +1154,12 @@
  */
 const char *ip_vs_proto_name(unsigned int proto);
 void ip_vs_init_hash_table(struct list_head *table, int rows);
+struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc,
+				      struct ip_vs_dest *dest,
+				      struct sk_buff *skb,
+				      const struct ip_vs_iphdr *iph,
+				      __be16 dport,
+				      __be16 cport);
 #define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t)))
 
 #define IP_VS_APP_TYPE_FTP	1
@@ -1378,6 +1391,10 @@
 bool ip_vs_has_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol,
 			    const union nf_inet_addr *daddr, __be16 dport);
 
+struct ip_vs_dest *
+ip_vs_find_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol,
+			const union nf_inet_addr *daddr, __be16 dport);
+
 int ip_vs_use_count_inc(void);
 void ip_vs_use_count_dec(void);
 int ip_vs_register_nl_ioctl(void);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index d0aeb97..11a0452 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -121,21 +121,21 @@
 extern int sysctl_mld_max_msf;
 extern int sysctl_mld_qrv;
 
-#define _DEVINC(net, statname, modifier, idev, field)			\
+#define _DEVINC(net, statname, mod, idev, field)			\
 ({									\
 	struct inet6_dev *_idev = (idev);				\
 	if (likely(_idev != NULL))					\
-		SNMP_INC_STATS##modifier((_idev)->stats.statname, (field)); \
-	SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\
+		mod##SNMP_INC_STATS64((_idev)->stats.statname, (field));\
+	mod##SNMP_INC_STATS64((net)->mib.statname##_statistics, (field));\
 })
 
 /* per device counters are atomic_long_t */
-#define _DEVINCATOMIC(net, statname, modifier, idev, field)		\
+#define _DEVINCATOMIC(net, statname, mod, idev, field)			\
 ({									\
 	struct inet6_dev *_idev = (idev);				\
 	if (likely(_idev != NULL))					\
 		SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \
-	SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\
+	mod##SNMP_INC_STATS((net)->mib.statname##_statistics, (field));\
 })
 
 /* per device and per net counters are atomic_long_t */
@@ -147,46 +147,44 @@
 	SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\
 })
 
-#define _DEVADD(net, statname, modifier, idev, field, val)		\
+#define _DEVADD(net, statname, mod, idev, field, val)			\
 ({									\
 	struct inet6_dev *_idev = (idev);				\
 	if (likely(_idev != NULL))					\
-		SNMP_ADD_STATS##modifier((_idev)->stats.statname, (field), (val)); \
-	SNMP_ADD_STATS##modifier((net)->mib.statname##_statistics, (field), (val));\
+		mod##SNMP_ADD_STATS((_idev)->stats.statname, (field), (val)); \
+	mod##SNMP_ADD_STATS((net)->mib.statname##_statistics, (field), (val));\
 })
 
-#define _DEVUPD(net, statname, modifier, idev, field, val)		\
+#define _DEVUPD(net, statname, mod, idev, field, val)			\
 ({									\
 	struct inet6_dev *_idev = (idev);				\
 	if (likely(_idev != NULL))					\
-		SNMP_UPD_PO_STATS##modifier((_idev)->stats.statname, field, (val)); \
-	SNMP_UPD_PO_STATS##modifier((net)->mib.statname##_statistics, field, (val));\
+		mod##SNMP_UPD_PO_STATS((_idev)->stats.statname, field, (val)); \
+	mod##SNMP_UPD_PO_STATS((net)->mib.statname##_statistics, field, (val));\
 })
 
 /* MIBs */
 
 #define IP6_INC_STATS(net, idev,field)		\
-		_DEVINC(net, ipv6, 64, idev, field)
-#define IP6_INC_STATS_BH(net, idev,field)	\
-		_DEVINC(net, ipv6, 64_BH, idev, field)
+		_DEVINC(net, ipv6, , idev, field)
+#define __IP6_INC_STATS(net, idev,field)	\
+		_DEVINC(net, ipv6, __, idev, field)
 #define IP6_ADD_STATS(net, idev,field,val)	\
-		_DEVADD(net, ipv6, 64, idev, field, val)
-#define IP6_ADD_STATS_BH(net, idev,field,val)	\
-		_DEVADD(net, ipv6, 64_BH, idev, field, val)
+		_DEVADD(net, ipv6, , idev, field, val)
+#define __IP6_ADD_STATS(net, idev,field,val)	\
+		_DEVADD(net, ipv6, __, idev, field, val)
 #define IP6_UPD_PO_STATS(net, idev,field,val)   \
-		_DEVUPD(net, ipv6, 64, idev, field, val)
-#define IP6_UPD_PO_STATS_BH(net, idev,field,val)   \
-		_DEVUPD(net, ipv6, 64_BH, idev, field, val)
+		_DEVUPD(net, ipv6, , idev, field, val)
+#define __IP6_UPD_PO_STATS(net, idev,field,val)   \
+		_DEVUPD(net, ipv6, __, idev, field, val)
 #define ICMP6_INC_STATS(net, idev, field)	\
 		_DEVINCATOMIC(net, icmpv6, , idev, field)
-#define ICMP6_INC_STATS_BH(net, idev, field)	\
-		_DEVINCATOMIC(net, icmpv6, _BH, idev, field)
+#define __ICMP6_INC_STATS(net, idev, field)	\
+		_DEVINCATOMIC(net, icmpv6, __, idev, field)
 
 #define ICMP6MSGOUT_INC_STATS(net, idev, field)		\
 	_DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
-#define ICMP6MSGOUT_INC_STATS_BH(net, idev, field)	\
-	_DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
-#define ICMP6MSGIN_INC_STATS_BH(net, idev, field)	\
+#define ICMP6MSGIN_INC_STATS(net, idev, field)	\
 	_DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field)
 
 struct ip6_ra_chain {
@@ -253,6 +251,13 @@
 	struct rcu_head			rcu;
 };
 
+struct ipcm6_cookie {
+	__s16 hlimit;
+	__s16 tclass;
+	__s8  dontfrag;
+	struct ipv6_txoptions *opt;
+};
+
 static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
 {
 	struct ipv6_txoptions *opt;
@@ -865,9 +870,10 @@
 int ip6_append_data(struct sock *sk,
 		    int getfrag(void *from, char *to, int offset, int len,
 				int odd, struct sk_buff *skb),
-		    void *from, int length, int transhdrlen, int hlimit,
-		    int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
-		    struct rt6_info *rt, unsigned int flags, int dontfrag);
+		    void *from, int length, int transhdrlen,
+		    struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
+		    struct rt6_info *rt, unsigned int flags,
+		    const struct sockcm_cookie *sockc);
 
 int ip6_push_pending_frames(struct sock *sk);
 
@@ -882,9 +888,9 @@
 			     int getfrag(void *from, char *to, int offset,
 					 int len, int odd, struct sk_buff *skb),
 			     void *from, int length, int transhdrlen,
-			     int hlimit, int tclass, struct ipv6_txoptions *opt,
-			     struct flowi6 *fl6, struct rt6_info *rt,
-			     unsigned int flags, int dontfrag);
+			     struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
+			     struct rt6_info *rt, unsigned int flags,
+			     const struct sockcm_cookie *sockc);
 
 static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
 {
@@ -959,6 +965,8 @@
 int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
 int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr,
 				 int addr_len);
+int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr);
+void ip6_datagram_release_cb(struct sock *sk);
 
 int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
 		    int *addr_len);
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index c43a9c7..374388d 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -25,6 +25,8 @@
 
 struct l3mdev_ops {
 	u32		(*l3mdev_fib_table)(const struct net_device *dev);
+	struct sk_buff * (*l3mdev_l3_rcv)(struct net_device *dev,
+					  struct sk_buff *skb, u16 proto);
 
 	/* IPv4 ops */
 	struct rtable *	(*l3mdev_get_rtable)(const struct net_device *dev,
@@ -130,51 +132,36 @@
 	return rc;
 }
 
-static inline int l3mdev_get_saddr(struct net *net, int ifindex,
-				   struct flowi4 *fl4)
+int l3mdev_get_saddr(struct net *net, int ifindex, struct flowi4 *fl4);
+
+struct dst_entry *l3mdev_get_rt6_dst(struct net *net, const struct flowi6 *fl6);
+
+static inline
+struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto)
 {
-	struct net_device *dev;
-	int rc = 0;
+	struct net_device *master = NULL;
 
-	if (ifindex) {
+	if (netif_is_l3_slave(skb->dev))
+		master = netdev_master_upper_dev_get_rcu(skb->dev);
+	else if (netif_is_l3_master(skb->dev))
+		master = skb->dev;
 
-		rcu_read_lock();
+	if (master && master->l3mdev_ops->l3mdev_l3_rcv)
+		skb = master->l3mdev_ops->l3mdev_l3_rcv(master, skb, proto);
 
-		dev = dev_get_by_index_rcu(net, ifindex);
-		if (dev && netif_is_l3_master(dev) &&
-		    dev->l3mdev_ops->l3mdev_get_saddr) {
-			rc = dev->l3mdev_ops->l3mdev_get_saddr(dev, fl4);
-		}
-
-		rcu_read_unlock();
-	}
-
-	return rc;
-}
-
-static inline struct dst_entry *l3mdev_get_rt6_dst(const struct net_device *dev,
-						   const struct flowi6 *fl6)
-{
-	if (netif_is_l3_master(dev) && dev->l3mdev_ops->l3mdev_get_rt6_dst)
-		return dev->l3mdev_ops->l3mdev_get_rt6_dst(dev, fl6);
-
-	return NULL;
+	return skb;
 }
 
 static inline
-struct dst_entry *l3mdev_rt6_dst_by_oif(struct net *net,
-					const struct flowi6 *fl6)
+struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb)
 {
-	struct dst_entry *dst = NULL;
-	struct net_device *dev;
+	return l3mdev_l3_rcv(skb, AF_INET);
+}
 
-	dev = dev_get_by_index(net, fl6->flowi6_oif);
-	if (dev) {
-		dst = l3mdev_get_rt6_dst(dev, fl6);
-		dev_put(dev);
-	}
-
-	return dst;
+static inline
+struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
+{
+	return l3mdev_l3_rcv(skb, AF_INET6);
 }
 
 #else
@@ -233,16 +220,21 @@
 }
 
 static inline
-struct dst_entry *l3mdev_get_rt6_dst(const struct net_device *dev,
-				     const struct flowi6 *fl6)
+struct dst_entry *l3mdev_get_rt6_dst(struct net *net, const struct flowi6 *fl6)
 {
 	return NULL;
 }
+
 static inline
-struct dst_entry *l3mdev_rt6_dst_by_oif(struct net *net,
-					const struct flowi6 *fl6)
+struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb)
 {
-	return NULL;
+	return skb;
+}
+
+static inline
+struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
+{
+	return skb;
 }
 #endif
 
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 0c09da3..be30b05 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -291,7 +291,7 @@
  * @BSS_CHANGED_PS: PS changed for this BSS (STA mode)
  * @BSS_CHANGED_TXPOWER: TX power setting changed for this interface
  * @BSS_CHANGED_P2P_PS: P2P powersave settings (CTWindow, opportunistic PS)
- *	changed (currently only in P2P client mode, GO mode will be later)
+ *	changed
  * @BSS_CHANGED_BEACON_INFO: Data from the AP's beacon became available:
  *	currently dtim_period only is under consideration.
  * @BSS_CHANGED_BANDWIDTH: The bandwidth used by this interface changed,
@@ -526,6 +526,9 @@
  *	userspace), whereas TPC is disabled if %txpower_type is set to
  *	NL80211_TX_POWER_FIXED (use value configured from userspace)
  * @p2p_noa_attr: P2P NoA attribute for P2P powersave
+ * @allow_p2p_go_ps: indication for AP or P2P GO interface, whether it's allowed
+ *	to use P2P PS mechanism or not. AP/P2P GO is not allowed to use P2P PS
+ *	if it has associated clients without P2P PS support.
  */
 struct ieee80211_bss_conf {
 	const u8 *bssid;
@@ -546,7 +549,7 @@
 	u8 sync_dtim_count;
 	u32 basic_rates;
 	struct ieee80211_rate *beacon_rate;
-	int mcast_rate[IEEE80211_NUM_BANDS];
+	int mcast_rate[NUM_NL80211_BANDS];
 	u16 ht_operation_mode;
 	s32 cqm_rssi_thold;
 	u32 cqm_rssi_hyst;
@@ -563,6 +566,7 @@
 	int txpower;
 	enum nl80211_tx_power_setting txpower_type;
 	struct ieee80211_p2p_noa_attr p2p_noa_attr;
+	bool allow_p2p_go_ps;
 };
 
 /**
@@ -709,6 +713,7 @@
  * @IEEE80211_TX_CTRL_PS_RESPONSE: This frame is a response to a poll
  *	frame (PS-Poll or uAPSD).
  * @IEEE80211_TX_CTRL_RATE_INJECT: This frame is injected with rate information
+ * @IEEE80211_TX_CTRL_AMSDU: This frame is an A-MSDU frame
  *
  * These flags are used in tx_info->control.flags.
  */
@@ -716,6 +721,7 @@
 	IEEE80211_TX_CTRL_PORT_CTRL_PROTO	= BIT(0),
 	IEEE80211_TX_CTRL_PS_RESPONSE		= BIT(1),
 	IEEE80211_TX_CTRL_RATE_INJECT		= BIT(2),
+	IEEE80211_TX_CTRL_AMSDU			= BIT(3),
 };
 
 /*
@@ -932,8 +938,8 @@
  * @common_ie_len: length of the common_ies
  */
 struct ieee80211_scan_ies {
-	const u8 *ies[IEEE80211_NUM_BANDS];
-	size_t len[IEEE80211_NUM_BANDS];
+	const u8 *ies[NUM_NL80211_BANDS];
+	size_t len[NUM_NL80211_BANDS];
 	const u8 *common_ies;
 	size_t common_ie_len;
 };
@@ -1001,6 +1007,8 @@
  *	flag indicates that the PN was verified for replay protection.
  *	Note that this flag is also currently only supported when a frame
  *	is also decrypted (ie. @RX_FLAG_DECRYPTED must be set)
+ * @RX_FLAG_DUP_VALIDATED: The driver should set this flag if it did
+ *	de-duplication by itself.
  * @RX_FLAG_FAILED_FCS_CRC: Set this flag if the FCS check failed on
  *	the frame.
  * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on
@@ -1034,6 +1042,8 @@
  *	on this subframe
  * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
  *	is stored in the @ampdu_delimiter_crc field)
+ * @RX_FLAG_MIC_STRIPPED: The mic was stripped of this packet. Decryption was
+ *	done by the hardware
  * @RX_FLAG_LDPC: LDPC was used
  * @RX_FLAG_ONLY_MONITOR: Report frame only to monitor interfaces without
  *	processing it in any regular way.
@@ -1058,6 +1068,9 @@
  * @RX_FLAG_RADIOTAP_VENDOR_DATA: This frame contains vendor-specific
  *	radiotap data in the skb->data (before the frame) as described by
  *	the &struct ieee80211_vendor_radiotap.
+ * @RX_FLAG_ALLOW_SAME_PN: Allow the same PN as same packet before.
+ *	This is used for AMSDU subframes which can have the same PN as
+ *	the first subframe.
  */
 enum mac80211_rx_flags {
 	RX_FLAG_MMIC_ERROR		= BIT(0),
@@ -1091,6 +1104,8 @@
 	RX_FLAG_5MHZ			= BIT(29),
 	RX_FLAG_AMSDU_MORE		= BIT(30),
 	RX_FLAG_RADIOTAP_VENDOR_DATA	= BIT(31),
+	RX_FLAG_MIC_STRIPPED		= BIT_ULL(32),
+	RX_FLAG_ALLOW_SAME_PN		= BIT_ULL(33),
 };
 
 #define RX_FLAG_STBC_SHIFT		26
@@ -1120,6 +1135,8 @@
  *
  * @mactime: value in microseconds of the 64-bit Time Synchronization Function
  * 	(TSF) timer when the first data symbol (MPDU) arrived at the hardware.
+ * @boottime_ns: CLOCK_BOOTTIME timestamp the frame was received at, this is
+ *	needed only for beacons and probe responses that update the scan cache.
  * @device_timestamp: arbitrary timestamp for the device, mac80211 doesn't use
  *	it but can store it and pass it back to the driver for synchronisation
  * @band: the active band when this frame was received
@@ -1146,9 +1163,10 @@
  */
 struct ieee80211_rx_status {
 	u64 mactime;
+	u64 boottime_ns;
 	u32 device_timestamp;
 	u32 ampdu_reference;
-	u32 flag;
+	u64 flag;
 	u16 freq;
 	u8 vht_flag;
 	u8 rate_idx;
@@ -1735,10 +1753,12 @@
  *		  size is min(max_amsdu_len, 7935) bytes.
  *	Both additional HT limits must be enforced by the low level driver.
  *	This is defined by the spec (IEEE 802.11-2012 section 8.3.2.2 NOTE 2).
+ * @support_p2p_ps: indicates whether the STA supports P2P PS mechanism or not.
+ * @max_rc_amsdu_len: Maximum A-MSDU size in bytes recommended by rate control.
  * @txq: per-TID data TX queues (if driver uses the TXQ abstraction)
  */
 struct ieee80211_sta {
-	u32 supp_rates[IEEE80211_NUM_BANDS];
+	u32 supp_rates[NUM_NL80211_BANDS];
 	u8 addr[ETH_ALEN];
 	u16 aid;
 	struct ieee80211_sta_ht_cap ht_cap;
@@ -1755,6 +1775,8 @@
 	bool mfp;
 	u8 max_amsdu_subframes;
 	u16 max_amsdu_len;
+	bool support_p2p_ps;
+	u16 max_rc_amsdu_len;
 
 	struct ieee80211_txq *txq[IEEE80211_NUM_TIDS];
 
@@ -1968,6 +1990,18 @@
  *	order and does not need to manage its own reorder buffer or BA session
  *	timeout.
  *
+ * @IEEE80211_HW_USES_RSS: The device uses RSS and thus requires parallel RX,
+ *	which implies using per-CPU station statistics.
+ *
+ * @IEEE80211_HW_TX_AMSDU: Hardware (or driver) supports software aggregated
+ *	A-MSDU frames. Requires software tx queueing and fast-xmit support.
+ *	When not using minstrel/minstrel_ht rate control, the driver must
+ *	limit the maximum A-MSDU size based on the current tx rate by setting
+ *	max_rc_amsdu_len in struct ieee80211_sta.
+ *
+ * @IEEE80211_HW_TX_FRAG_LIST: Hardware (or driver) supports sending frag_list
+ *	skbs, needed for zero-copy software A-MSDU.
+ *
  * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
  */
 enum ieee80211_hw_flags {
@@ -2005,6 +2039,9 @@
 	IEEE80211_HW_BEACON_TX_STATUS,
 	IEEE80211_HW_NEEDS_UNIQUE_STA_ADDR,
 	IEEE80211_HW_SUPPORTS_REORDERING_BUFFER,
+	IEEE80211_HW_USES_RSS,
+	IEEE80211_HW_TX_AMSDU,
+	IEEE80211_HW_TX_FRAG_LIST,
 
 	/* keep last, obviously */
 	NUM_IEEE80211_HW_FLAGS
@@ -2077,6 +2114,9 @@
  *	size is smaller (an example is LinkSys WRT120N with FW v1.0.07
  *	build 002 Jun 18 2012).
  *
+ * @max_tx_fragments: maximum number of tx buffers per (A)-MSDU, sum
+ *	of 1 + skb_shinfo(skb)->nr_frags for each skb in the frag_list.
+ *
  * @offchannel_tx_hw_queue: HW queue ID to use for offchannel TX
  *	(if %IEEE80211_HW_QUEUE_CONTROL is set)
  *
@@ -2131,6 +2171,7 @@
 	u8 max_rate_tries;
 	u8 max_rx_aggregation_subframes;
 	u8 max_tx_aggregation_subframes;
+	u8 max_tx_fragments;
 	u8 offchannel_tx_hw_queue;
 	u8 radiotap_mcs_details;
 	u16 radiotap_vht_details;
@@ -3348,6 +3389,10 @@
  *	the function call.
  *
  * @wake_tx_queue: Called when new packets have been added to the queue.
+ * @sync_rx_queues: Process all pending frames in RSS queues. This is a
+ *	synchronization which is needed in case driver has in its RSS queues
+ *	pending frames that were received prior to the control path action
+ *	currently taken (e.g. disassociation) but are not processed yet.
  */
 struct ieee80211_ops {
 	void (*tx)(struct ieee80211_hw *hw,
@@ -3585,6 +3630,7 @@
 
 	void (*wake_tx_queue)(struct ieee80211_hw *hw,
 			      struct ieee80211_txq *txq);
+	void (*sync_rx_queues)(struct ieee80211_hw *hw);
 };
 
 /**
@@ -3838,11 +3884,12 @@
  * This function must be called with BHs disabled.
  *
  * @hw: the hardware this frame came in on
+ * @sta: the station the frame was received from, or %NULL
  * @skb: the buffer to receive, owned by mac80211 after this call
  * @napi: the NAPI context
  */
-void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
-		       struct napi_struct *napi);
+void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+		       struct sk_buff *skb, struct napi_struct *napi);
 
 /**
  * ieee80211_rx - receive frame
@@ -3866,7 +3913,7 @@
  */
 static inline void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
-	ieee80211_rx_napi(hw, skb, NULL);
+	ieee80211_rx_napi(hw, NULL, skb, NULL);
 }
 
 /**
@@ -3949,6 +3996,33 @@
 	return ret;
 }
 
+/**
+ * ieee80211_sta_pspoll - PS-Poll frame received
+ * @sta: currently connected station
+ *
+ * When operating in AP mode with the %IEEE80211_HW_AP_LINK_PS flag set,
+ * use this function to inform mac80211 that a PS-Poll frame from a
+ * connected station was received.
+ * This must be used in conjunction with ieee80211_sta_ps_transition()
+ * and possibly ieee80211_sta_uapsd_trigger(); calls to all three must
+ * be serialized.
+ */
+void ieee80211_sta_pspoll(struct ieee80211_sta *sta);
+
+/**
+ * ieee80211_sta_uapsd_trigger - (potential) U-APSD trigger frame received
+ * @sta: currently connected station
+ * @tid: TID of the received (potential) trigger frame
+ *
+ * When operating in AP mode with the %IEEE80211_HW_AP_LINK_PS flag set,
+ * use this function to inform mac80211 that a (potential) trigger frame
+ * from a connected station was received.
+ * This must be used in conjunction with ieee80211_sta_ps_transition()
+ * and possibly ieee80211_sta_pspoll(); calls to all three must be
+ * serialized.
+ */
+void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *sta, u8 tid);
+
 /*
  * The TX headroom reserved by mac80211 for its own tx_status functions.
  * This is enough for the radiotap header.
@@ -4361,7 +4435,7 @@
  */
 __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
 					struct ieee80211_vif *vif,
-					enum ieee80211_band band,
+					enum nl80211_band band,
 					size_t frame_len,
 					struct ieee80211_rate *rate);
 
@@ -5314,7 +5388,7 @@
 };
 
 static inline int rate_supported(struct ieee80211_sta *sta,
-				 enum ieee80211_band band,
+				 enum nl80211_band band,
 				 int index)
 {
 	return (sta == NULL || sta->supp_rates[band] & BIT(index));
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index 6cd7a70..e465c855 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -288,6 +288,16 @@
 }
 
 /**
+ * ieee802154_be16_to_le16 - copies and convert be16 to le16
+ * @le16_dst: le16 destination pointer
+ * @be16_src: be16 source pointer
+ */
+static inline void ieee802154_be16_to_le16(void *le16_dst, const void *be16_src)
+{
+	put_unaligned_le16(get_unaligned_be16(be16_src), le16_dst);
+}
+
+/**
  * ieee802154_alloc_hw - Allocate a new hardware device
  *
  * This must be called once for each hardware device. The returned pointer
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index fde4068..dd78bea 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -289,8 +289,6 @@
 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
 extern unsigned int nf_conntrack_htable_size;
 extern unsigned int nf_conntrack_max;
-extern unsigned int nf_conntrack_hash_rnd;
-void init_nf_conntrack_hash_rnd(void);
 
 struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
 				 const struct nf_conntrack_zone *zone,
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 62e17d1..3e2f332 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -81,6 +81,7 @@
 
 #define CONNTRACK_LOCKS 1024
 
+extern struct hlist_nulls_head *nf_conntrack_hash;
 extern spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
 void nf_conntrack_lock(spinlock_t *lock);
 
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 57c8803..fa36447 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -73,6 +73,8 @@
 				      struct nf_ct_event_notifier *nb);
 
 void nf_ct_deliver_cached_events(struct nf_conn *ct);
+int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct,
+				  u32 portid, int report);
 
 static inline void
 nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
@@ -91,69 +93,25 @@
 }
 
 static inline int
-nf_conntrack_eventmask_report(unsigned int eventmask,
-			      struct nf_conn *ct,
-			      u32 portid,
-			      int report)
-{
-	int ret = 0;
-	struct net *net = nf_ct_net(ct);
-	struct nf_ct_event_notifier *notify;
-	struct nf_conntrack_ecache *e;
-
-	rcu_read_lock();
-	notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
-	if (notify == NULL)
-		goto out_unlock;
-
-	e = nf_ct_ecache_find(ct);
-	if (e == NULL)
-		goto out_unlock;
-
-	if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) {
-		struct nf_ct_event item = {
-			.ct 	= ct,
-			.portid	= e->portid ? e->portid : portid,
-			.report = report
-		};
-		/* This is a resent of a destroy event? If so, skip missed */
-		unsigned long missed = e->portid ? 0 : e->missed;
-
-		if (!((eventmask | missed) & e->ctmask))
-			goto out_unlock;
-
-		ret = notify->fcn(eventmask | missed, &item);
-		if (unlikely(ret < 0 || missed)) {
-			spin_lock_bh(&ct->lock);
-			if (ret < 0) {
-				/* This is a destroy event that has been
-				 * triggered by a process, we store the PORTID
-				 * to include it in the retransmission. */
-				if (eventmask & (1 << IPCT_DESTROY) &&
-				    e->portid == 0 && portid != 0)
-					e->portid = portid;
-				else
-					e->missed |= eventmask;
-			} else
-				e->missed &= ~missed;
-			spin_unlock_bh(&ct->lock);
-		}
-	}
-out_unlock:
-	rcu_read_unlock();
-	return ret;
-}
-
-static inline int
 nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct,
 			  u32 portid, int report)
 {
+	const struct net *net = nf_ct_net(ct);
+
+	if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
+		return 0;
+
 	return nf_conntrack_eventmask_report(1 << event, ct, portid, report);
 }
 
 static inline int
 nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct)
 {
+	const struct net *net = nf_ct_net(ct);
+
+	if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
+		return 0;
+
 	return nf_conntrack_eventmask_report(1 << event, ct, 0, 0);
 }
 
@@ -172,43 +130,9 @@
 void nf_ct_expect_unregister_notifier(struct net *net,
 				      struct nf_exp_event_notifier *nb);
 
-static inline void
-nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
-			  struct nf_conntrack_expect *exp,
-			  u32 portid,
-			  int report)
-{
-	struct net *net = nf_ct_exp_net(exp);
-	struct nf_exp_event_notifier *notify;
-	struct nf_conntrack_ecache *e;
-
-	rcu_read_lock();
-	notify = rcu_dereference(net->ct.nf_expect_event_cb);
-	if (notify == NULL)
-		goto out_unlock;
-
-	e = nf_ct_ecache_find(exp->master);
-	if (e == NULL)
-		goto out_unlock;
-
-	if (e->expmask & (1 << event)) {
-		struct nf_exp_event item = {
-			.exp	= exp,
-			.portid	= portid,
-			.report = report
-		};
-		notify->fcn(1 << event, &item);
-	}
-out_unlock:
-	rcu_read_unlock();
-}
-
-static inline void
-nf_ct_expect_event(enum ip_conntrack_expect_events event,
-		   struct nf_conntrack_expect *exp)
-{
-	nf_ct_expect_event_report(event, exp, 0, 0);
-}
+void nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
+			       struct nf_conntrack_expect *exp,
+			       u32 portid, int report);
 
 int nf_conntrack_ecache_pernet_init(struct net *net);
 void nf_conntrack_ecache_pernet_fini(struct net *net);
@@ -245,8 +169,6 @@
 					    u32 portid,
 					    int report) { return 0; }
 static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {}
-static inline void nf_ct_expect_event(enum ip_conntrack_expect_events event,
-				      struct nf_conntrack_expect *exp) {}
 static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e,
 					     struct nf_conntrack_expect *exp,
  					     u32 portid,
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index dce56f0..5ed33ea 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -10,6 +10,7 @@
 
 extern unsigned int nf_ct_expect_hsize;
 extern unsigned int nf_ct_expect_max;
+extern struct hlist_head *nf_ct_expect_hash;
 
 struct nf_conntrack_expect {
 	/* Conntrack expectation list member */
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 956d8a6..1a5fb36 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -23,6 +23,9 @@
 	/* L4 Protocol number. */
 	u_int8_t l4proto;
 
+	/* Resolve clashes on insertion races. */
+	bool allow_clash;
+
 	/* Try to fill in the third arg: dataoff is offset past network protocol
            hdr.  Return true if possible. */
 	bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int dataoff,
diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h
index 7e2b1d0..c5f8fc73 100644
--- a/include/net/netfilter/nf_conntrack_labels.h
+++ b/include/net/netfilter/nf_conntrack_labels.h
@@ -45,7 +45,6 @@
 #endif
 }
 
-bool nf_connlabel_match(const struct nf_conn *ct, u16 bit);
 int nf_connlabel_set(struct nf_conn *ct, u16 bit);
 
 int nf_connlabels_replace(struct nf_conn *ct,
@@ -54,11 +53,11 @@
 #ifdef CONFIG_NF_CONNTRACK_LABELS
 int nf_conntrack_labels_init(void);
 void nf_conntrack_labels_fini(void);
-int nf_connlabels_get(struct net *net, unsigned int n_bits);
+int nf_connlabels_get(struct net *net, unsigned int bit);
 void nf_connlabels_put(struct net *net);
 #else
 static inline int nf_conntrack_labels_init(void) { return 0; }
 static inline void nf_conntrack_labels_fini(void) {}
-static inline int nf_connlabels_get(struct net *net, unsigned int n_bits) { return 0; }
+static inline int nf_connlabels_get(struct net *net, unsigned int bit) { return 0; }
 static inline void nf_connlabels_put(struct net *net) {}
 #endif
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index f6b1daf..0922354 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -303,7 +303,7 @@
 struct nft_set {
 	struct list_head		list;
 	struct list_head		bindings;
-	char				name[IFNAMSIZ];
+	char				name[NFT_SET_MAXNAMELEN];
 	u32				ktype;
 	u32				dtype;
 	u32				size;
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 0e31727..254a0fc 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -98,14 +98,17 @@
  *   nla_put_u8(skb, type, value)	add u8 attribute to skb
  *   nla_put_u16(skb, type, value)	add u16 attribute to skb
  *   nla_put_u32(skb, type, value)	add u32 attribute to skb
- *   nla_put_u64(skb, type, value)	add u64 attribute to skb
+ *   nla_put_u64_64bits(skb, type,
+ *			value, padattr)	add u64 attribute to skb
  *   nla_put_s8(skb, type, value)	add s8 attribute to skb
  *   nla_put_s16(skb, type, value)	add s16 attribute to skb
  *   nla_put_s32(skb, type, value)	add s32 attribute to skb
- *   nla_put_s64(skb, type, value)	add s64 attribute to skb
+ *   nla_put_s64(skb, type, value,
+ *               padattr)		add s64 attribute to skb
  *   nla_put_string(skb, type, str)	add string attribute to skb
  *   nla_put_flag(skb, type)		add flag attribute to skb
- *   nla_put_msecs(skb, type, jiffies)	add msecs attribute to skb
+ *   nla_put_msecs(skb, type, jiffies,
+ *                 padattr)		add msecs attribute to skb
  *   nla_put_in_addr(skb, type, addr)	add IPv4 address attribute to skb
  *   nla_put_in6_addr(skb, type, addr)	add IPv6 address attribute to skb
  *
@@ -244,13 +247,21 @@
 int nla_memcmp(const struct nlattr *nla, const void *data, size_t size);
 int nla_strcmp(const struct nlattr *nla, const char *str);
 struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
+struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype,
+				   int attrlen, int padattr);
 void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
 struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
+struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype,
+				 int attrlen, int padattr);
 void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
 void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
 	       const void *data);
+void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
+		     const void *data, int padattr);
 void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
 int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data);
+int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
+		  const void *data, int padattr);
 int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
 int nla_append(struct sk_buff *skb, int attrlen, const void *data);
 
@@ -837,47 +848,56 @@
 }
 
 /**
- * nla_put_u64 - Add a u64 netlink attribute to a socket buffer
+ * nla_put_u64_64bit - Add a u64 netlink attribute to a skb and align it
  * @skb: socket buffer to add attribute to
  * @attrtype: attribute type
  * @value: numeric value
+ * @padattr: attribute type for the padding
  */
-static inline int nla_put_u64(struct sk_buff *skb, int attrtype, u64 value)
+static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
+				    u64 value, int padattr)
 {
-	return nla_put(skb, attrtype, sizeof(u64), &value);
+	return nla_put_64bit(skb, attrtype, sizeof(u64), &value, padattr);
 }
 
 /**
- * nla_put_be64 - Add a __be64 netlink attribute to a socket buffer
+ * nla_put_be64 - Add a __be64 netlink attribute to a socket buffer and align it
  * @skb: socket buffer to add attribute to
  * @attrtype: attribute type
  * @value: numeric value
+ * @padattr: attribute type for the padding
  */
-static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value)
+static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value,
+			       int padattr)
 {
-	return nla_put(skb, attrtype, sizeof(__be64), &value);
+	return nla_put_64bit(skb, attrtype, sizeof(__be64), &value, padattr);
 }
 
 /**
- * nla_put_net64 - Add 64-bit network byte order netlink attribute to a socket buffer
+ * nla_put_net64 - Add 64-bit network byte order nlattr to a skb and align it
  * @skb: socket buffer to add attribute to
  * @attrtype: attribute type
  * @value: numeric value
+ * @padattr: attribute type for the padding
  */
-static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value)
+static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value,
+				int padattr)
 {
-	return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+	return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value,
+			    padattr);
 }
 
 /**
- * nla_put_le64 - Add a __le64 netlink attribute to a socket buffer
+ * nla_put_le64 - Add a __le64 netlink attribute to a socket buffer and align it
  * @skb: socket buffer to add attribute to
  * @attrtype: attribute type
  * @value: numeric value
+ * @padattr: attribute type for the padding
  */
-static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value)
+static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value,
+			       int padattr)
 {
-	return nla_put(skb, attrtype, sizeof(__le64), &value);
+	return nla_put_64bit(skb, attrtype, sizeof(__le64), &value, padattr);
 }
 
 /**
@@ -914,14 +934,16 @@
 }
 
 /**
- * nla_put_s64 - Add a s64 netlink attribute to a socket buffer
+ * nla_put_s64 - Add a s64 netlink attribute to a socket buffer and align it
  * @skb: socket buffer to add attribute to
  * @attrtype: attribute type
  * @value: numeric value
+ * @padattr: attribute type for the padding
  */
-static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value)
+static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value,
+			      int padattr)
 {
-	return nla_put(skb, attrtype, sizeof(s64), &value);
+	return nla_put_64bit(skb, attrtype, sizeof(s64), &value, padattr);
 }
 
 /**
@@ -947,16 +969,18 @@
 }
 
 /**
- * nla_put_msecs - Add a msecs netlink attribute to a socket buffer
+ * nla_put_msecs - Add a msecs netlink attribute to a skb and align it
  * @skb: socket buffer to add attribute to
  * @attrtype: attribute type
  * @njiffies: number of jiffies to convert to msecs
+ * @padattr: attribute type for the padding
  */
 static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
-				unsigned long njiffies)
+				unsigned long njiffies, int padattr)
 {
 	u64 tmp = jiffies_to_msecs(njiffies);
-	return nla_put(skb, attrtype, sizeof(u64), &tmp);
+
+	return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr);
 }
 
 /**
@@ -1231,6 +1255,61 @@
 }
 
 /**
+ * nla_need_padding_for_64bit - test 64-bit alignment of the next attribute
+ * @skb: socket buffer the message is stored in
+ *
+ * Return true if padding is needed to align the next attribute (nla_data()) to
+ * a 64-bit aligned area.
+ */
+static inline bool nla_need_padding_for_64bit(struct sk_buff *skb)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+	/* The nlattr header is 4 bytes in size, that's why we test
+	 * if the skb->data _is_ aligned.  A NOP attribute, plus
+	 * nlattr header for next attribute, will make nla_data()
+	 * 8-byte aligned.
+	 */
+	if (IS_ALIGNED((unsigned long)skb_tail_pointer(skb), 8))
+		return true;
+#endif
+	return false;
+}
+
+/**
+ * nla_align_64bit - 64-bit align the nla_data() of next attribute
+ * @skb: socket buffer the message is stored in
+ * @padattr: attribute type for the padding
+ *
+ * Conditionally emit a padding netlink attribute in order to make
+ * the next attribute we emit have a 64-bit aligned nla_data() area.
+ * This will only be done in architectures which do not have
+ * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS defined.
+ *
+ * Returns zero on success or a negative error code.
+ */
+static inline int nla_align_64bit(struct sk_buff *skb, int padattr)
+{
+	if (nla_need_padding_for_64bit(skb) &&
+	    !nla_reserve(skb, padattr, 0))
+		return -EMSGSIZE;
+
+	return 0;
+}
+
+/**
+ * nla_total_size_64bit - total length of attribute including padding
+ * @payload: length of payload
+ */
+static inline int nla_total_size_64bit(int payload)
+{
+	return NLA_ALIGN(nla_attr_size(payload))
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+		+ NLA_ALIGN(nla_attr_size(0))
+#endif
+		;
+}
+
+/**
  * nla_for_each_attr - iterate over a stream of attributes
  * @pos: loop counter, set to current attribute
  * @head: head of attribute stream
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 723b61c..38b1a80 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -84,7 +84,6 @@
 	struct ctl_table_header	*event_sysctl_header;
 	struct ctl_table_header	*helper_sysctl_header;
 #endif
-	char			*slabname;
 	unsigned int		sysctl_log_invalid; /* Log invalid packets */
 	int			sysctl_events;
 	int			sysctl_acct;
@@ -93,11 +92,6 @@
 	int			sysctl_tstamp;
 	int			sysctl_checksum;
 
-	unsigned int		htable_size;
-	seqcount_t		generation;
-	struct kmem_cache	*nf_conntrack_cachep;
-	struct hlist_nulls_head	*hash;
-	struct hlist_head	*expect_hash;
 	struct ct_pcpu __percpu *pcpu_lists;
 	struct ip_conntrack_stat __percpu *stat;
 	struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
@@ -107,9 +101,5 @@
 	unsigned int		labels_used;
 	u8			label_words;
 #endif
-#ifdef CONFIG_NF_NAT_NEEDED
-	struct hlist_head	*nat_bysource;
-	unsigned int		nat_htable_size;
-#endif
 };
 #endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index a69cde3..d061ffe 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -133,6 +133,9 @@
 	struct fib_rules_ops	*mr_rules_ops;
 #endif
 #endif
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+	int sysctl_fib_multipath_use_neigh;
+#endif
 	atomic_t	rt_genid;
 };
 #endif
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 730d82a..24cd394 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -80,6 +80,7 @@
 	struct flow_cache	flow_cache_global;
 	atomic_t		flow_cache_genid;
 	struct list_head	flow_cache_gc_list;
+	atomic_t		flow_cache_gc_count;
 	spinlock_t		flow_cache_gc_lock;
 	struct work_struct	flow_cache_gc_work;
 	struct work_struct	flow_cache_flush_work;
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index 57ce24f..87499b6 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -109,7 +109,13 @@
 
 struct nci_conn_info {
 	struct list_head list;
-	__u8	id; /* can be an RF Discovery ID or an NFCEE ID */
+	/* NCI specification 4.4.2 Connection Creation
+	 * The combination of destination type and destination specific
+	 * parameters shall uniquely identify a single destination for the
+	 * Logical Connection
+	 */
+	struct dest_spec_params *dest_params;
+	__u8	dest_type;
 	__u8	conn_id;
 	__u8	max_pkt_payload_len;
 
@@ -260,7 +266,9 @@
 	__u32			manufact_specific_info;
 
 	/* Save RF Discovery ID or NFCEE ID under conn_create */
-	__u8			cur_id;
+	struct dest_spec_params cur_params;
+	/* Save destination type under conn_create */
+	__u8			cur_dest_type;
 
 	/* stored during nci_data_exchange */
 	struct sk_buff		*rx_data_reassembly;
@@ -298,6 +306,8 @@
 			 size_t params_len,
 			 struct core_conn_create_dest_spec_params *params);
 int nci_core_conn_close(struct nci_dev *ndev, u8 conn_id);
+int nci_nfcc_loopback(struct nci_dev *ndev, void *data, size_t data_len,
+		      struct sk_buff **resp);
 
 struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev);
 int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event,
@@ -378,7 +388,8 @@
 void nci_req_complete(struct nci_dev *ndev, int result);
 struct nci_conn_info *nci_get_conn_info_by_conn_id(struct nci_dev *ndev,
 						   int conn_id);
-int nci_get_conn_info_by_id(struct nci_dev *ndev, u8 id);
+int nci_get_conn_info_by_dest_type_params(struct nci_dev *ndev, u8 dest_type,
+					  struct dest_spec_params *params);
 
 /* ----- NCI status code ----- */
 int nci_to_errno(__u8 code);
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index 32cb3e5..fcab4de 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -138,6 +138,8 @@
 	NL802154_ATTR_SEC_KEY,
 #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
 
+	NL802154_ATTR_PAD,
+
 	__NL802154_ATTR_AFTER_LAST,
 	NL802154_ATTR_MAX = __NL802154_ATTR_AFTER_LAST - 1
 };
@@ -295,6 +297,7 @@
 	NL802154_DEV_ADDR_ATTR_MODE,
 	NL802154_DEV_ADDR_ATTR_SHORT,
 	NL802154_DEV_ADDR_ATTR_EXTENDED,
+	NL802154_DEV_ADDR_ATTR_PAD,
 
 	/* keep last */
 	__NL802154_DEV_ADDR_ATTR_AFTER_LAST,
@@ -320,6 +323,7 @@
 	NL802154_KEY_ID_ATTR_IMPLICIT,
 	NL802154_KEY_ID_ATTR_SOURCE_SHORT,
 	NL802154_KEY_ID_ATTR_SOURCE_EXTENDED,
+	NL802154_KEY_ID_ATTR_PAD,
 
 	/* keep last */
 	__NL802154_KEY_ID_ATTR_AFTER_LAST,
@@ -402,6 +406,7 @@
 	NL802154_DEV_ATTR_EXTENDED_ADDR,
 	NL802154_DEV_ATTR_SECLEVEL_EXEMPT,
 	NL802154_DEV_ATTR_KEY_MODE,
+	NL802154_DEV_ATTR_PAD,
 
 	/* keep last */
 	__NL802154_DEV_ATTR_AFTER_LAST,
@@ -414,6 +419,7 @@
 	NL802154_DEVKEY_ATTR_FRAME_COUNTER,
 	NL802154_DEVKEY_ATTR_EXTENDED_ADDR,
 	NL802154_DEVKEY_ATTR_ID,
+	NL802154_DEVKEY_ATTR_PAD,
 
 	/* keep last */
 	__NL802154_DEVKEY_ATTR_AFTER_LAST,
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index caa5e18..0f7efa8 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -392,9 +392,6 @@
 	};
 };
 
-/* tca flags definitions */
-#define TCA_CLS_FLAGS_SKIP_HW 1
-
 static inline bool tc_should_offload(struct net_device *dev, u32 flags)
 {
 	if (!(dev->features & NETIF_F_HW_TC))
@@ -409,9 +406,27 @@
 	return true;
 }
 
+static inline bool tc_skip_sw(u32 flags)
+{
+	return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
+}
+
+/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
+static inline bool tc_flags_valid(u32 flags)
+{
+	if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))
+		return false;
+
+	if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
+		return false;
+
+	return true;
+}
+
 enum tc_fl_command {
 	TC_CLSFLOWER_REPLACE,
 	TC_CLSFLOWER_DESTROY,
+	TC_CLSFLOWER_STATS,
 };
 
 struct tc_cls_flower_offload {
diff --git a/include/net/protocol.h b/include/net/protocol.h
index da689f5..bf36ca3 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -107,9 +107,6 @@
 void inet_register_protosw(struct inet_protosw *p);
 void inet_unregister_protosw(struct inet_protosw *p);
 
-int  udp_add_offload(struct net *net, struct udp_offload *prot);
-void udp_del_offload(struct udp_offload *prot);
-
 #if IS_ENABLED(CONFIG_IPV6)
 int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num);
 int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num);
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index f49759d..6ebe13e 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -85,24 +85,23 @@
 	struct request_sock *req;
 
 	req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
-
-	if (req) {
-		req->rsk_ops = ops;
-		if (attach_listener) {
-			sock_hold(sk_listener);
-			req->rsk_listener = sk_listener;
-		} else {
-			req->rsk_listener = NULL;
+	if (!req)
+		return NULL;
+	req->rsk_listener = NULL;
+	if (attach_listener) {
+		if (unlikely(!atomic_inc_not_zero(&sk_listener->sk_refcnt))) {
+			kmem_cache_free(ops->slab, req);
+			return NULL;
 		}
-		req_to_sk(req)->sk_prot = sk_listener->sk_prot;
-		sk_node_init(&req_to_sk(req)->sk_node);
-		sk_tx_queue_clear(req_to_sk(req));
-		req->saved_syn = NULL;
-		/* Following is temporary. It is coupled with debugging
-		 * helpers in reqsk_put() & reqsk_free()
-		 */
-		atomic_set(&req->rsk_refcnt, 0);
+		req->rsk_listener = sk_listener;
 	}
+	req->rsk_ops = ops;
+	req_to_sk(req)->sk_prot = sk_listener->sk_prot;
+	sk_node_init(&req_to_sk(req)->sk_node);
+	sk_tx_queue_clear(req_to_sk(req));
+	req->saved_syn = NULL;
+	atomic_set(&req->rsk_refcnt, 0);
+
 	return req;
 }
 
diff --git a/include/net/route.h b/include/net/route.h
index 9b0a523..ad777d7 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -209,6 +209,9 @@
 void ip_rt_multicast_event(struct in_device *);
 int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
 void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
+struct rtable *rt_dst_alloc(struct net_device *dev,
+			     unsigned int flags, u16 type,
+			     bool nopolicy, bool noxfrm, bool will_cache);
 
 struct in_ifaddr;
 void fib_add_ifaddr(struct in_ifaddr *);
@@ -322,10 +325,11 @@
 
 static inline int inet_iif(const struct sk_buff *skb)
 {
-	int iif = skb_rtable(skb)->rt_iif;
+	struct rtable *rt = skb_rtable(skb);
 
-	if (iif)
-		return iif;
+	if (rt && rt->rt_iif)
+		return rt->rt_iif;
+
 	return skb->skb_iif;
 }
 
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 2f87c1b..006a7b8 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -47,6 +47,9 @@
  *	@get_num_rx_queues: Function to determine number of receive queues
  *			    to create when creating a new device.
  *	@get_link_net: Function to get the i/o netns of the device
+ *	@get_linkxstats_size: Function to calculate the required room for
+ *			      dumping device-specific extended link stats
+ *	@fill_linkxstats: Function to dump device-specific extended link stats
  */
 struct rtnl_link_ops {
 	struct list_head	list;
@@ -95,6 +98,10 @@
 						   const struct net_device *dev,
 						   const struct net_device *slave_dev);
 	struct net		*(*get_link_net)(const struct net_device *dev);
+	size_t			(*get_linkxstats_size)(const struct net_device *dev);
+	int			(*fill_linkxstats)(struct sk_buff *skb,
+						   const struct net_device *dev,
+						   int *prividx);
 };
 
 int __rtnl_link_register(struct rtnl_link_ops *ops);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 46e55f0..a1fd76c 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -527,11 +527,27 @@
 	return q->flags & TCQ_F_CPUSTATS;
 }
 
+static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
+				  __u64 bytes, __u32 packets)
+{
+	bstats->bytes += bytes;
+	bstats->packets += packets;
+}
+
 static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
 				 const struct sk_buff *skb)
 {
-	bstats->bytes += qdisc_pkt_len(skb);
-	bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
+	_bstats_update(bstats,
+		       qdisc_pkt_len(skb),
+		       skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
+}
+
+static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
+				      __u64 bytes, __u32 packets)
+{
+	u64_stats_update_begin(&bstats->syncp);
+	_bstats_update(&bstats->bstats, bytes, packets);
+	u64_stats_update_end(&bstats->syncp);
 }
 
 static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 65521cf..b392ac8 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -116,6 +116,22 @@
 int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
 struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
 
+int sctp_transport_walk_start(struct rhashtable_iter *iter);
+void sctp_transport_walk_stop(struct rhashtable_iter *iter);
+struct sctp_transport *sctp_transport_get_next(struct net *net,
+			struct rhashtable_iter *iter);
+struct sctp_transport *sctp_transport_get_idx(struct net *net,
+			struct rhashtable_iter *iter, int pos);
+int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
+				  struct net *net,
+				  const union sctp_addr *laddr,
+				  const union sctp_addr *paddr, void *p);
+int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
+			    struct net *net, int pos, void *p);
+int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p);
+int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
+		       struct sctp_info *info);
+
 /*
  * sctp/primitive.c
  */
@@ -189,10 +205,9 @@
  */
 
 /* SCTP SNMP MIB stats handlers */
-#define SCTP_INC_STATS(net, field)      SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
-#define SCTP_INC_STATS_BH(net, field)   SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field)
-#define SCTP_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->sctp.sctp_statistics, field)
-#define SCTP_DEC_STATS(net, field)      SNMP_DEC_STATS((net)->sctp.sctp_statistics, field)
+#define SCTP_INC_STATS(net, field)	SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
+#define __SCTP_INC_STATS(net, field)	__SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
+#define SCTP_DEC_STATS(net, field)	SNMP_DEC_STATS((net)->sctp.sctp_statistics, field)
 
 /* sctp mib definitions */
 enum {
@@ -359,21 +374,6 @@
 #define sctp_skb_for_each(pos, head, tmp) \
 	skb_queue_walk_safe(head, pos, tmp)
 
-/* A helper to append an entire skb list (list) to another (head). */
-static inline void sctp_skb_list_tail(struct sk_buff_head *list,
-				      struct sk_buff_head *head)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&head->lock, flags);
-	spin_lock(&list->lock);
-
-	skb_queue_splice_tail_init(list, head);
-
-	spin_unlock(&list->lock);
-	spin_unlock_irqrestore(&head->lock, flags);
-}
-
 /**
  *	sctp_list_dequeue - remove from the head of the queue
  *	@list: list to dequeue from
@@ -386,11 +386,9 @@
 {
 	struct list_head *result = NULL;
 
-	if (list->next != list) {
+	if (!list_empty(list)) {
 		result = list->next;
-		list->next = result->next;
-		list->next->prev = list;
-		INIT_LIST_HEAD(result);
+		list_del_init(result);
 	}
 	return result;
 }
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 6df1ce7..16b013a 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -210,14 +210,15 @@
 	int user_frag;
 
 	__u32 autoclose;
-	__u8 nodelay;
-	__u8 disable_fragments;
-	__u8 v4mapped;
-	__u8 frag_interleave;
 	__u32 adaptation_ind;
 	__u32 pd_point;
-	__u8 recvrcvinfo;
-	__u8 recvnxtinfo;
+	__u16	nodelay:1,
+		disable_fragments:1,
+		v4mapped:1,
+		frag_interleave:1,
+		recvrcvinfo:1,
+		recvnxtinfo:1,
+		data_ready_signalled:1;
 
 	atomic_t pd_mode;
 	/* Receive to here while partial delivery is in effect. */
@@ -847,6 +848,11 @@
 	 */
 	ktime_t last_time_heard;
 
+	/* When was the last time that we sent a chunk using this
+	 * transport? We use this to check for idle transports
+	 */
+	unsigned long last_time_sent;
+
 	/* Last time(in jiffies) when cwnd is reduced due to the congestion
 	 * indication based on ECNE chunk.
 	 */
@@ -952,7 +958,8 @@
 			  struct sctp_sock *);
 void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk);
 void sctp_transport_free(struct sctp_transport *);
-void sctp_transport_reset_timers(struct sctp_transport *);
+void sctp_transport_reset_t3_rtx(struct sctp_transport *);
+void sctp_transport_reset_hb_timer(struct sctp_transport *);
 int sctp_transport_hold(struct sctp_transport *);
 void sctp_transport_put(struct sctp_transport *);
 void sctp_transport_update_rto(struct sctp_transport *, __u32);
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 35512ac..c9228ad 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -123,12 +123,9 @@
 #define DECLARE_SNMP_STAT(type, name)	\
 	extern __typeof__(type) __percpu *name
 
-#define SNMP_INC_STATS_BH(mib, field)	\
+#define __SNMP_INC_STATS(mib, field)	\
 			__this_cpu_inc(mib->mibs[field])
 
-#define SNMP_INC_STATS_USER(mib, field)	\
-			this_cpu_inc(mib->mibs[field])
-
 #define SNMP_INC_STATS_ATOMIC_LONG(mib, field)	\
 			atomic_long_inc(&mib->mibs[field])
 
@@ -138,12 +135,9 @@
 #define SNMP_DEC_STATS(mib, field)	\
 			this_cpu_dec(mib->mibs[field])
 
-#define SNMP_ADD_STATS_BH(mib, field, addend)	\
+#define __SNMP_ADD_STATS(mib, field, addend)	\
 			__this_cpu_add(mib->mibs[field], addend)
 
-#define SNMP_ADD_STATS_USER(mib, field, addend)	\
-			this_cpu_add(mib->mibs[field], addend)
-
 #define SNMP_ADD_STATS(mib, field, addend)	\
 			this_cpu_add(mib->mibs[field], addend)
 #define SNMP_UPD_PO_STATS(mib, basefield, addend)	\
@@ -152,7 +146,7 @@
 		this_cpu_inc(ptr[basefield##PKTS]);		\
 		this_cpu_add(ptr[basefield##OCTETS], addend);	\
 	} while (0)
-#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend)	\
+#define __SNMP_UPD_PO_STATS(mib, basefield, addend)	\
 	do { \
 		__typeof__((mib->mibs) + 0) ptr = mib->mibs;	\
 		__this_cpu_inc(ptr[basefield##PKTS]);		\
@@ -162,7 +156,7 @@
 
 #if BITS_PER_LONG==32
 
-#define SNMP_ADD_STATS64_BH(mib, field, addend) 			\
+#define __SNMP_ADD_STATS64(mib, field, addend) 				\
 	do {								\
 		__typeof__(*mib) *ptr = raw_cpu_ptr(mib);		\
 		u64_stats_update_begin(&ptr->syncp);			\
@@ -170,20 +164,16 @@
 		u64_stats_update_end(&ptr->syncp);			\
 	} while (0)
 
-#define SNMP_ADD_STATS64_USER(mib, field, addend) 			\
+#define SNMP_ADD_STATS64(mib, field, addend) 				\
 	do {								\
 		local_bh_disable();					\
-		SNMP_ADD_STATS64_BH(mib, field, addend);		\
-		local_bh_enable();					\
+		__SNMP_ADD_STATS64(mib, field, addend);			\
+		local_bh_enable();				\
 	} while (0)
 
-#define SNMP_ADD_STATS64(mib, field, addend)				\
-		SNMP_ADD_STATS64_USER(mib, field, addend)
-
-#define SNMP_INC_STATS64_BH(mib, field) SNMP_ADD_STATS64_BH(mib, field, 1)
-#define SNMP_INC_STATS64_USER(mib, field) SNMP_ADD_STATS64_USER(mib, field, 1)
+#define __SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
 #define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
-#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend)			\
+#define __SNMP_UPD_PO_STATS64(mib, basefield, addend)			\
 	do {								\
 		__typeof__(*mib) *ptr;				\
 		ptr = raw_cpu_ptr((mib));				\
@@ -195,19 +185,17 @@
 #define SNMP_UPD_PO_STATS64(mib, basefield, addend)			\
 	do {								\
 		local_bh_disable();					\
-		SNMP_UPD_PO_STATS64_BH(mib, basefield, addend);		\
-		local_bh_enable();					\
+		__SNMP_UPD_PO_STATS64(mib, basefield, addend);		\
+		local_bh_enable();				\
 	} while (0)
 #else
-#define SNMP_INC_STATS64_BH(mib, field)		SNMP_INC_STATS_BH(mib, field)
-#define SNMP_INC_STATS64_USER(mib, field)	SNMP_INC_STATS_USER(mib, field)
+#define __SNMP_INC_STATS64(mib, field)		__SNMP_INC_STATS(mib, field)
 #define SNMP_INC_STATS64(mib, field)		SNMP_INC_STATS(mib, field)
 #define SNMP_DEC_STATS64(mib, field)		SNMP_DEC_STATS(mib, field)
-#define SNMP_ADD_STATS64_BH(mib, field, addend) SNMP_ADD_STATS_BH(mib, field, addend)
-#define SNMP_ADD_STATS64_USER(mib, field, addend) SNMP_ADD_STATS_USER(mib, field, addend)
+#define __SNMP_ADD_STATS64(mib, field, addend)	__SNMP_ADD_STATS(mib, field, addend)
 #define SNMP_ADD_STATS64(mib, field, addend)	SNMP_ADD_STATS(mib, field, addend)
 #define SNMP_UPD_PO_STATS64(mib, basefield, addend) SNMP_UPD_PO_STATS(mib, basefield, addend)
-#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) SNMP_UPD_PO_STATS_BH(mib, basefield, addend)
+#define __SNMP_UPD_PO_STATS64(mib, basefield, addend) __SNMP_UPD_PO_STATS(mib, basefield, addend)
 #endif
 
 #endif
diff --git a/include/net/sock.h b/include/net/sock.h
index 255d3e0..c9c8b19 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -178,7 +178,7 @@
 	int			skc_bound_dev_if;
 	union {
 		struct hlist_node	skc_bind_node;
-		struct hlist_nulls_node skc_portaddr_node;
+		struct hlist_node	skc_portaddr_node;
 	};
 	struct proto		*skc_prot;
 	possible_net_t		skc_net;
@@ -438,6 +438,7 @@
 						  struct sk_buff *skb);
 	void                    (*sk_destruct)(struct sock *sk);
 	struct sock_reuseport __rcu	*sk_reuseport_cb;
+	struct rcu_head		sk_rcu;
 };
 
 #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
@@ -456,28 +457,32 @@
 #define SK_CAN_REUSE	1
 #define SK_FORCE_REUSE	2
 
+int sk_set_peek_off(struct sock *sk, int val);
+
 static inline int sk_peek_offset(struct sock *sk, int flags)
 {
-	if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0))
-		return sk->sk_peek_off;
-	else
-		return 0;
+	if (unlikely(flags & MSG_PEEK)) {
+		s32 off = READ_ONCE(sk->sk_peek_off);
+		if (off >= 0)
+			return off;
+	}
+
+	return 0;
 }
 
 static inline void sk_peek_offset_bwd(struct sock *sk, int val)
 {
-	if (sk->sk_peek_off >= 0) {
-		if (sk->sk_peek_off >= val)
-			sk->sk_peek_off -= val;
-		else
-			sk->sk_peek_off = 0;
+	s32 off = READ_ONCE(sk->sk_peek_off);
+
+	if (unlikely(off >= 0)) {
+		off = max_t(s32, off - val, 0);
+		WRITE_ONCE(sk->sk_peek_off, off);
 	}
 }
 
 static inline void sk_peek_offset_fwd(struct sock *sk, int val)
 {
-	if (sk->sk_peek_off >= 0)
-		sk->sk_peek_off += val;
+	sk_peek_offset_bwd(sk, -val);
 }
 
 /*
@@ -564,7 +569,7 @@
    modifications.
  */
 
-static inline void sock_hold(struct sock *sk)
+static __always_inline void sock_hold(struct sock *sk)
 {
 	atomic_inc(&sk->sk_refcnt);
 }
@@ -572,7 +577,7 @@
 /* Ungrab socket in the context, which assumes that socket refcnt
    cannot hit zero, f.e. it is true in context of any socketcall.
  */
-static inline void __sock_put(struct sock *sk)
+static __always_inline void __sock_put(struct sock *sk)
 {
 	atomic_dec(&sk->sk_refcnt);
 }
@@ -625,12 +630,20 @@
 static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
 {
 	sock_hold(sk);
-	hlist_add_head_rcu(&sk->sk_node, list);
+	if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
+	    sk->sk_family == AF_INET6)
+		hlist_add_tail_rcu(&sk->sk_node, list);
+	else
+		hlist_add_head_rcu(&sk->sk_node, list);
 }
 
 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
 {
-	hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
+	if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
+	    sk->sk_family == AF_INET6)
+		hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
+	else
+		hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
 }
 
 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
@@ -669,18 +682,18 @@
 	hlist_for_each_entry(__sk, list, sk_bind_node)
 
 /**
- * sk_nulls_for_each_entry_offset - iterate over a list at a given struct offset
+ * sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset
  * @tpos:	the type * to use as a loop cursor.
  * @pos:	the &struct hlist_node to use as a loop cursor.
  * @head:	the head for your list.
  * @offset:	offset of hlist_node within the struct.
  *
  */
-#define sk_nulls_for_each_entry_offset(tpos, pos, head, offset)		       \
-	for (pos = (head)->first;					       \
-	     (!is_a_nulls(pos)) &&					       \
+#define sk_for_each_entry_offset_rcu(tpos, pos, head, offset)		       \
+	for (pos = rcu_dereference((head)->first);			       \
+	     pos != NULL &&						       \
 		({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;});       \
-	     pos = pos->next)
+	     pos = rcu_dereference(pos->next))
 
 static inline struct user_namespace *sk_user_ns(struct sock *sk)
 {
@@ -720,6 +733,7 @@
 		     */
 	SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */
 	SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
+	SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */
 };
 
 #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
@@ -912,6 +926,17 @@
 void sk_set_memalloc(struct sock *sk);
 void sk_clear_memalloc(struct sock *sk);
 
+void __sk_flush_backlog(struct sock *sk);
+
+static inline bool sk_flush_backlog(struct sock *sk)
+{
+	if (unlikely(READ_ONCE(sk->sk_backlog.tail))) {
+		__sk_flush_backlog(sk);
+		return true;
+	}
+	return false;
+}
+
 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
 
 struct request_sock_ops;
@@ -1310,24 +1335,14 @@
 	__kfree_skb(skb);
 }
 
-/* Used by processes to "lock" a socket state, so that
- * interrupts and bottom half handlers won't change it
- * from under us. It essentially blocks any incoming
- * packets, so that we won't get any new data or any
- * packets that change the state of the socket.
- *
- * While locked, BH processing will add new packets to
- * the backlog queue.  This queue is processed by the
- * owner of the socket lock right before it is released.
- *
- * Since ~2.3.5 it is also exclusive sleep lock serializing
- * accesses from user process context.
- */
-#define sock_owned_by_user(sk)	((sk)->sk_lock.owned)
-
 static inline void sock_release_ownership(struct sock *sk)
 {
-	sk->sk_lock.owned = 0;
+	if (sk->sk_lock.owned) {
+		sk->sk_lock.owned = 0;
+
+		/* The sk_lock has mutex_unlock() semantics: */
+		mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
+	}
 }
 
 /*
@@ -1349,6 +1364,16 @@
 	lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);	\
 } while (0)
 
+#ifdef CONFIG_LOCKDEP
+static inline bool lockdep_sock_is_held(const struct sock *csk)
+{
+	struct sock *sk = (struct sock *)csk;
+
+	return lockdep_is_held(&sk->sk_lock) ||
+	       lockdep_is_held(&sk->sk_lock.slock);
+}
+#endif
+
 void lock_sock_nested(struct sock *sk, int subclass);
 
 static inline void lock_sock(struct sock *sk)
@@ -1382,6 +1407,40 @@
 		spin_unlock_bh(&sk->sk_lock.slock);
 }
 
+/* Used by processes to "lock" a socket state, so that
+ * interrupts and bottom half handlers won't change it
+ * from under us. It essentially blocks any incoming
+ * packets, so that we won't get any new data or any
+ * packets that change the state of the socket.
+ *
+ * While locked, BH processing will add new packets to
+ * the backlog queue.  This queue is processed by the
+ * owner of the socket lock right before it is released.
+ *
+ * Since ~2.3.5 it is also exclusive sleep lock serializing
+ * accesses from user process context.
+ */
+
+static inline void sock_owned_by_me(const struct sock *sk)
+{
+#ifdef CONFIG_LOCKDEP
+	WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks);
+#endif
+}
+
+static inline bool sock_owned_by_user(const struct sock *sk)
+{
+	sock_owned_by_me(sk);
+	return sk->sk_lock.owned;
+}
+
+/* no reclassification while locks are held */
+static inline bool sock_allow_reclassification(const struct sock *csk)
+{
+	struct sock *sk = (struct sock *)csk;
+
+	return !sk->sk_lock.owned && !spin_is_locked(&sk->sk_lock.slock);
+}
 
 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
 		      struct proto *prot, int kern);
@@ -1391,6 +1450,7 @@
 
 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
 			     gfp_t priority);
+void __sock_wfree(struct sk_buff *skb);
 void sock_wfree(struct sk_buff *skb);
 void skb_orphan_partial(struct sk_buff *skb);
 void sock_rfree(struct sk_buff *skb);
@@ -1418,8 +1478,11 @@
 
 struct sockcm_cookie {
 	u32 mark;
+	u16 tsflags;
 };
 
+int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
+		     struct sockcm_cookie *sockc);
 int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
 		   struct sockcm_cookie *sockc);
 
@@ -1584,8 +1647,8 @@
 static inline struct dst_entry *
 __sk_dst_get(struct sock *sk)
 {
-	return rcu_dereference_check(sk->sk_dst_cache, sock_owned_by_user(sk) ||
-						       lockdep_is_held(&sk->sk_lock.slock));
+	return rcu_dereference_check(sk->sk_dst_cache,
+				     lockdep_sock_is_held(sk));
 }
 
 static inline struct dst_entry *
@@ -1857,6 +1920,7 @@
 
 void sk_stop_timer(struct sock *sk, struct timer_list *timer);
 
+int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
 
 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
@@ -1893,11 +1957,19 @@
  */
 static inline void sk_set_bit(int nr, struct sock *sk)
 {
+	if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
+	    !sock_flag(sk, SOCK_FASYNC))
+		return;
+
 	set_bit(nr, &sk->sk_wq_raw->flags);
 }
 
 static inline void sk_clear_bit(int nr, struct sock *sk)
 {
+	if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
+	    !sock_flag(sk, SOCK_FASYNC))
+		return;
+
 	clear_bit(nr, &sk->sk_wq_raw->flags);
 }
 
@@ -2007,6 +2079,13 @@
 	SOCK_SKB_CB(skb)->dropcount = atomic_read(&sk->sk_drops);
 }
 
+static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
+{
+	int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
+
+	atomic_add(segs, &sk->sk_drops);
+}
+
 void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
 			   struct sk_buff *skb);
 void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
@@ -2054,19 +2133,21 @@
 		sk->sk_stamp = skb->tstamp;
 }
 
-void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags);
+void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
 
 /**
  * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
  * @sk:		socket sending this packet
+ * @tsflags:	timestamping flags to use
  * @tx_flags:	completed with instructions for time stamping
  *
  * Note : callers should take care of initial *tx_flags value (usually 0)
  */
-static inline void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags)
+static inline void sock_tx_timestamp(const struct sock *sk, __u16 tsflags,
+				     __u8 *tx_flags)
 {
-	if (unlikely(sk->sk_tsflags))
-		__sock_tx_timestamp(sk, tx_flags);
+	if (unlikely(tsflags))
+		__sock_tx_timestamp(tsflags, tx_flags);
 	if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
 		*tx_flags |= SKBTX_WIFI_STATUS;
 }
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index d451122..985619a 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -54,6 +54,8 @@
 	struct net_device *orig_dev;
 	enum switchdev_attr_id id;
 	u32 flags;
+	void *complete_priv;
+	void (*complete)(struct net_device *dev, int err, void *priv);
 	union {
 		struct netdev_phys_item_id ppid;	/* PORT_PARENT_ID */
 		u8 stp_state;				/* PORT_STP_STATE */
@@ -75,6 +77,8 @@
 	struct net_device *orig_dev;
 	enum switchdev_obj_id id;
 	u32 flags;
+	void *complete_priv;
+	void (*complete)(struct net_device *dev, int err, void *priv);
 };
 
 /* SWITCHDEV_OBJ_ID_PORT_VLAN */
@@ -93,7 +97,7 @@
 	struct switchdev_obj obj;
 	u32 dst;
 	int dst_len;
-	struct fib_info fi;
+	struct fib_info *fi;
 	u8 tos;
 	u8 type;
 	u32 nlflags;
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
index dae96ba..e891835 100644
--- a/include/net/tc_act/tc_mirred.h
+++ b/include/net/tc_act/tc_mirred.h
@@ -2,6 +2,7 @@
 #define __NET_TC_MIR_H
 
 #include <net/act_api.h>
+#include <linux/tc_act/tc_mirred.h>
 
 struct tcf_mirred {
 	struct tcf_common	common;
@@ -14,4 +15,18 @@
 #define to_mirred(a) \
 	container_of(a->priv, struct tcf_mirred, common)
 
+static inline bool is_tcf_mirred_redirect(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+	if (a->ops && a->ops->type == TCA_ACT_MIRRED)
+		return to_mirred(a)->tcfm_eaction == TCA_EGRESS_REDIR;
+#endif
+	return false;
+}
+
+static inline int tcf_mirred_ifindex(const struct tc_action *a)
+{
+	return to_mirred(a)->tcfm_ifindex;
+}
+
 #endif /* __NET_TC_MIR_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index b91370f..0bcc70f 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -332,9 +332,8 @@
 extern struct proto tcp_prot;
 
 #define TCP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.tcp_statistics, field)
-#define TCP_INC_STATS_BH(net, field)	SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
+#define __TCP_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.tcp_statistics, field)
 #define TCP_DEC_STATS(net, field)	SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
-#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
 #define TCP_ADD_STATS(net, field, val)	SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
 
 void tcp_tasklet_init(void);
@@ -452,10 +451,15 @@
 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 int tcp_connect(struct sock *sk);
+enum tcp_synack_type {
+	TCP_SYNACK_NORMAL,
+	TCP_SYNACK_FASTOPEN,
+	TCP_SYNACK_COOKIE,
+};
 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
 				struct request_sock *req,
 				struct tcp_fastopen_cookie *foc,
-				bool attach_req);
+				enum tcp_synack_type synack_type);
 int tcp_disconnect(struct sock *sk, int flags);
 
 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
@@ -533,8 +537,8 @@
 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
 			       int nonagle);
 bool tcp_may_send_now(struct sock *sk);
-int __tcp_retransmit_skb(struct sock *, struct sk_buff *);
-int tcp_retransmit_skb(struct sock *, struct sk_buff *);
+int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
+int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
 void tcp_retransmit_timer(struct sock *sk);
 void tcp_xmit_retransmit_queue(struct sock *);
 void tcp_simple_retransmit(struct sock *);
@@ -552,6 +556,8 @@
 void tcp_send_delayed_ack(struct sock *sk);
 void tcp_send_loss_probe(struct sock *sk);
 bool tcp_schedule_loss_probe(struct sock *sk);
+void tcp_skb_collapse_tstamp(struct sk_buff *skb,
+			     const struct sk_buff *next_skb);
 
 /* tcp_input.c */
 void tcp_resume_early_retransmit(struct sock *sk);
@@ -754,14 +760,21 @@
 				TCPCB_REPAIRED)
 
 	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
-	/* 1 byte hole */
+	__u8		txstamp_ack:1,	/* Record TX timestamp for ack? */
+			eor:1,		/* Is skb MSG_EOR marked? */
+			unused:6;
 	__u32		ack_seq;	/* Sequence number ACK'd	*/
 	union {
-		struct inet_skb_parm	h4;
+		struct {
+			/* There is space for up to 20 bytes */
+		} tx;   /* only used for outgoing skbs */
+		union {
+			struct inet_skb_parm	h4;
 #if IS_ENABLED(CONFIG_IPV6)
-		struct inet6_skb_parm	h6;
+			struct inet6_skb_parm	h6;
 #endif
-	} header;	/* For incoming frames		*/
+		} header;	/* For incoming skbs */
+	};
 };
 
 #define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
@@ -773,7 +786,9 @@
  */
 static inline int tcp_v6_iif(const struct sk_buff *skb)
 {
-	return TCP_SKB_CB(skb)->header.h6.iif;
+	bool l3_slave = skb_l3mdev_slave(TCP_SKB_CB(skb)->header.h6.flags);
+
+	return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
 }
 #endif
 
@@ -801,6 +816,11 @@
 	return TCP_SKB_CB(skb)->tcp_gso_size;
 }
 
+static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
+{
+	return likely(!TCP_SKB_CB(skb)->eor);
+}
+
 /* Events passed to congestion control interface */
 enum tcp_ca_event {
 	CA_EVENT_TX_START,	/* first transmit when no packets in flight */
@@ -836,6 +856,11 @@
 
 union tcp_cc_info;
 
+struct ack_sample {
+	u32 pkts_acked;
+	s32 rtt_us;
+};
+
 struct tcp_congestion_ops {
 	struct list_head	list;
 	u32 key;
@@ -859,7 +884,7 @@
 	/* new value of cwnd after loss (optional) */
 	u32  (*undo_cwnd)(struct sock *sk);
 	/* hook for packet ack accounting (optional) */
-	void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
+	void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
 	/* get info for inet_diag (optional) */
 	size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
 			   union tcp_cc_info *info);
@@ -1039,17 +1064,6 @@
 	return 3;
 }
 
-/* Slow start with delack produces 3 packets of burst, so that
- * it is safe "de facto".  This will be the default - same as
- * the default reordering threshold - but if reordering increases,
- * we must be able to allow cwnd to burst at least this much in order
- * to not pull it back when holes are filled.
- */
-static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
-{
-	return tp->reordering;
-}
-
 /* Returns end sequence number of the receiver's advertised window */
 static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
 {
@@ -1301,10 +1315,10 @@
 static inline void tcp_mib_init(struct net *net)
 {
 	/* See RFC 2012 */
-	TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
-	TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
-	TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
-	TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
+	TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
+	TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
+	TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
+	TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
 }
 
 /* from STCP */
@@ -1738,7 +1752,7 @@
 	int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
 			   struct flowi *fl, struct request_sock *req,
 			   struct tcp_fastopen_cookie *foc,
-			   bool attach_req);
+			   enum tcp_synack_type synack_type);
 };
 
 #ifdef CONFIG_SYN_COOKIES
@@ -1747,7 +1761,7 @@
 					 __u16 *mss)
 {
 	tcp_synq_overflow(sk);
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
+	__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
 	return ops->cookie_init_seq(skb, mss);
 }
 #else
@@ -1846,4 +1860,17 @@
 		tp->data_segs_in += segs_in;
 }
 
+/*
+ * TCP listen path runs lockless.
+ * We forced "struct sock" to be const qualified to make sure
+ * we don't modify one of its field by mistake.
+ * Here, we increment sk_drops which is an atomic_t, so we can safely
+ * make sock writable again.
+ */
+static inline void tcp_listendrop(const struct sock *sk)
+{
+	atomic_inc(&((struct sock *)sk)->sk_drops);
+	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
+}
+
 #endif	/* _TCP_H */
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
index b927413d..276f976 100644
--- a/include/net/transp_v6.h
+++ b/include/net/transp_v6.h
@@ -41,8 +41,8 @@
 				    struct sk_buff *skb);
 
 int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg,
-			  struct flowi6 *fl6, struct ipv6_txoptions *opt,
-			  int *hlimit, int *tclass, int *dontfrag);
+			  struct flowi6 *fl6, struct ipcm6_cookie *ipc6,
+			  struct sockcm_cookie *sockc);
 
 void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
 			     __u16 srcp, __u16 destp, int bucket);
diff --git a/include/net/udp.h b/include/net/udp.h
index 92927f7..ae07f37 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -59,7 +59,7 @@
  *	@lock:	spinlock protecting changes to head/count
  */
 struct udp_hslot {
-	struct hlist_nulls_head	head;
+	struct hlist_head	head;
 	int			count;
 	spinlock_t		lock;
 } __attribute__((aligned(2 * sizeof(long))));
@@ -158,9 +158,21 @@
 void udp_set_csum(bool nocheck, struct sk_buff *skb,
 		  __be32 saddr, __be32 daddr, int len);
 
+static inline void udp_csum_pull_header(struct sk_buff *skb)
+{
+	if (skb->ip_summed == CHECKSUM_NONE)
+		skb->csum = csum_partial(udp_hdr(skb), sizeof(struct udphdr),
+					 skb->csum);
+	skb_pull_rcsum(skb, sizeof(struct udphdr));
+	UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr);
+}
+
+typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
+				     __be16 dport);
+
 struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
-				 struct udphdr *uh);
-int udp_gro_complete(struct sk_buff *skb, int nhoff);
+				 struct udphdr *uh, udp_lookup_t lookup);
+int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
 
 static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
 {
@@ -260,6 +272,8 @@
 struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
 			       __be32 daddr, __be16 dport, int dif,
 			       struct udp_table *tbl, struct sk_buff *skb);
+struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
+				 __be16 sport, __be16 dport);
 struct sock *udp6_lib_lookup(struct net *net,
 			     const struct in6_addr *saddr, __be16 sport,
 			     const struct in6_addr *daddr, __be16 dport,
@@ -269,36 +283,38 @@
 			       const struct in6_addr *daddr, __be16 dport,
 			       int dif, struct udp_table *tbl,
 			       struct sk_buff *skb);
+struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
+				 __be16 sport, __be16 dport);
 
 /*
  * 	SNMP statistics for UDP and UDP-Lite
  */
-#define UDP_INC_STATS_USER(net, field, is_udplite)	      do { \
-	if (is_udplite) SNMP_INC_STATS_USER((net)->mib.udplite_statistics, field);       \
-	else		SNMP_INC_STATS_USER((net)->mib.udp_statistics, field);  }  while(0)
-#define UDP_INC_STATS_BH(net, field, is_udplite) 	      do { \
-	if (is_udplite) SNMP_INC_STATS_BH((net)->mib.udplite_statistics, field);         \
-	else		SNMP_INC_STATS_BH((net)->mib.udp_statistics, field);    }  while(0)
+#define UDP_INC_STATS(net, field, is_udplite)		      do { \
+	if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field);       \
+	else		SNMP_INC_STATS((net)->mib.udp_statistics, field);  }  while(0)
+#define __UDP_INC_STATS(net, field, is_udplite) 	      do { \
+	if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field);         \
+	else		__SNMP_INC_STATS((net)->mib.udp_statistics, field);    }  while(0)
 
-#define UDP6_INC_STATS_BH(net, field, is_udplite) 	    do { \
-	if (is_udplite) SNMP_INC_STATS_BH((net)->mib.udplite_stats_in6, field);\
-	else		SNMP_INC_STATS_BH((net)->mib.udp_stats_in6, field);  \
+#define __UDP6_INC_STATS(net, field, is_udplite)	    do { \
+	if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\
+	else		__SNMP_INC_STATS((net)->mib.udp_stats_in6, field);  \
 } while(0)
-#define UDP6_INC_STATS_USER(net, field, __lite)		    do { \
-	if (__lite) SNMP_INC_STATS_USER((net)->mib.udplite_stats_in6, field);  \
-	else	    SNMP_INC_STATS_USER((net)->mib.udp_stats_in6, field);      \
+#define UDP6_INC_STATS(net, field, __lite)		    do { \
+	if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);  \
+	else	    SNMP_INC_STATS((net)->mib.udp_stats_in6, field);      \
 } while(0)
 
 #if IS_ENABLED(CONFIG_IPV6)
-#define UDPX_INC_STATS_BH(sk, field)					\
+#define __UDPX_INC_STATS(sk, field)					\
 do {									\
 	if ((sk)->sk_family == AF_INET)					\
-		UDP_INC_STATS_BH(sock_net(sk), field, 0);		\
+		__UDP_INC_STATS(sock_net(sk), field, 0);		\
 	else								\
-		UDP6_INC_STATS_BH(sock_net(sk), field, 0);		\
+		__UDP6_INC_STATS(sock_net(sk), field, 0);		\
 } while (0)
 #else
-#define UDPX_INC_STATS_BH(sk, field) UDP_INC_STATS_BH(sock_net(sk), field, 0)
+#define __UDPX_INC_STATS(sk, field) __UDP_INC_STATS(sock_net(sk), field, 0)
 #endif
 
 /* /proc */
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index b831140..9d14f70 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -64,6 +64,11 @@
 
 typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
 typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
+typedef struct sk_buff **(*udp_tunnel_gro_receive_t)(struct sock *sk,
+						     struct sk_buff **head,
+						     struct sk_buff *skb);
+typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb,
+					 int nhoff);
 
 struct udp_tunnel_sock_cfg {
 	void *sk_user_data;     /* user data used by encap_rcv call back */
@@ -71,6 +76,8 @@
 	__u8  encap_type;
 	udp_tunnel_encap_rcv_t encap_rcv;
 	udp_tunnel_encap_destroy_t encap_destroy;
+	udp_tunnel_gro_receive_t gro_receive;
+	udp_tunnel_gro_complete_t gro_complete;
 };
 
 /* Setup the given (UDP) sock to receive UDP encapsulated packets */
@@ -98,23 +105,13 @@
 				    __be16 flags, __be64 tunnel_id,
 				    int md_size);
 
-static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
-							 bool udp_csum)
+static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
 {
 	int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
 
 	return iptunnel_handle_offloads(skb, type);
 }
 
-static inline void udp_tunnel_gro_complete(struct sk_buff *skb, int nhoff)
-{
-	struct udphdr *uh;
-
-	uh = (struct udphdr *)(skb->data + nhoff - sizeof(struct udphdr));
-	skb_shinfo(skb)->gso_type |= uh->check ?
-				SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
-}
-
 static inline void udp_tunnel_encap_enable(struct socket *sock)
 {
 #if IS_ENABLED(CONFIG_IPV6)
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 73ed2e9..b880316 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -119,6 +119,64 @@
 #define VXLAN_GBP_POLICY_APPLIED	(BIT(3) << 16)
 #define VXLAN_GBP_ID_MASK		(0xFFFF)
 
+/*
+ * VXLAN Generic Protocol Extension (VXLAN_F_GPE):
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |R|R|Ver|I|P|R|O|       Reserved                |Next Protocol  |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                VXLAN Network Identifier (VNI) |   Reserved    |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Ver = Version. Indicates VXLAN GPE protocol version.
+ *
+ * P = Next Protocol Bit. The P bit is set to indicate that the
+ *     Next Protocol field is present.
+ *
+ * O = OAM Flag Bit. The O bit is set to indicate that the packet
+ *     is an OAM packet.
+ *
+ * Next Protocol = This 8 bit field indicates the protocol header
+ * immediately following the VXLAN GPE header.
+ *
+ * https://tools.ietf.org/html/draft-ietf-nvo3-vxlan-gpe-01
+ */
+
+struct vxlanhdr_gpe {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8	oam_flag:1,
+		reserved_flags1:1,
+		np_applied:1,
+		instance_applied:1,
+		version:2,
+reserved_flags2:2;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+	u8	reserved_flags2:2,
+		version:2,
+		instance_applied:1,
+		np_applied:1,
+		reserved_flags1:1,
+		oam_flag:1;
+#endif
+	u8	reserved_flags3;
+	u8	reserved_flags4;
+	u8	next_protocol;
+	__be32	vx_vni;
+};
+
+/* VXLAN-GPE header flags. */
+#define VXLAN_HF_VER	cpu_to_be32(BIT(29) | BIT(28))
+#define VXLAN_HF_NP	cpu_to_be32(BIT(26))
+#define VXLAN_HF_OAM	cpu_to_be32(BIT(24))
+
+#define VXLAN_GPE_USED_BITS (VXLAN_HF_VER | VXLAN_HF_NP | VXLAN_HF_OAM | \
+			     cpu_to_be32(0xff))
+
+/* VXLAN-GPE header Next Protocol. */
+#define VXLAN_GPE_NP_IPV4      0x01
+#define VXLAN_GPE_NP_IPV6      0x02
+#define VXLAN_GPE_NP_ETHERNET  0x03
+#define VXLAN_GPE_NP_NSH       0x04
+
 struct vxlan_metadata {
 	u32		gbp;
 };
@@ -126,12 +184,9 @@
 /* per UDP socket information */
 struct vxlan_sock {
 	struct hlist_node hlist;
-	struct work_struct del_work;
 	struct socket	 *sock;
-	struct rcu_head	  rcu;
 	struct hlist_head vni_list[VNI_HASH_SIZE];
 	atomic_t	  refcnt;
-	struct udp_offload udp_offloads;
 	u32		  flags;
 };
 
@@ -206,16 +261,26 @@
 #define VXLAN_F_GBP			0x800
 #define VXLAN_F_REMCSUM_NOPARTIAL	0x1000
 #define VXLAN_F_COLLECT_METADATA	0x2000
+#define VXLAN_F_GPE			0x4000
 
 /* Flags that are used in the receive path. These flags must match in
  * order for a socket to be shareable
  */
 #define VXLAN_F_RCV_FLAGS		(VXLAN_F_GBP |			\
+					 VXLAN_F_GPE |			\
 					 VXLAN_F_UDP_ZERO_CSUM6_RX |	\
 					 VXLAN_F_REMCSUM_RX |		\
 					 VXLAN_F_REMCSUM_NOPARTIAL |	\
 					 VXLAN_F_COLLECT_METADATA)
 
+/* Flags that can be set together with VXLAN_F_GPE. */
+#define VXLAN_F_ALLOWED_GPE		(VXLAN_F_GPE |			\
+					 VXLAN_F_IPV6 |			\
+					 VXLAN_F_UDP_ZERO_CSUM_TX |	\
+					 VXLAN_F_UDP_ZERO_CSUM6_TX |	\
+					 VXLAN_F_UDP_ZERO_CSUM6_RX |	\
+					 VXLAN_F_COLLECT_METADATA)
+
 struct net_device *vxlan_dev_create(struct net *net, const char *name,
 				    u8 name_assign_type, struct vxlan_config *conf);
 
@@ -252,7 +317,9 @@
 	    (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
 	     skb->inner_protocol != htons(ETH_P_TEB) ||
 	     (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
-	      sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
+	      sizeof(struct udphdr) + sizeof(struct vxlanhdr)) ||
+	     (skb->ip_summed != CHECKSUM_NONE &&
+	      !can_checksum_protocol(features, inner_eth_hdr(skb)->h_proto))))
 		return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
 
 	return features;
@@ -325,13 +392,11 @@
 	return vni_field;
 }
 
-#if IS_ENABLED(CONFIG_VXLAN)
-void vxlan_get_rx_port(struct net_device *netdev);
-#else
 static inline void vxlan_get_rx_port(struct net_device *netdev)
 {
+	ASSERT_RTNL();
+	call_netdevice_notifiers(NETDEV_OFFLOAD_PUSH_VXLAN, netdev);
 }
-#endif
 
 static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs)
 {
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index d6f6e50..adfebd6 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -45,12 +45,8 @@
 
 #ifdef CONFIG_XFRM_STATISTICS
 #define XFRM_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
-#define XFRM_INC_STATS_BH(net, field)	SNMP_INC_STATS_BH((net)->mib.xfrm_statistics, field)
-#define XFRM_INC_STATS_USER(net, field)	SNMP_INC_STATS_USER((net)-mib.xfrm_statistics, field)
 #else
 #define XFRM_INC_STATS(net, field)	((void)(net))
-#define XFRM_INC_STATS_BH(net, field)	((void)(net))
-#define XFRM_INC_STATS_USER(net, field)	((void)(net))
 #endif
 
 
diff --git a/include/rdma/ib.h b/include/rdma/ib.h
index cf8f9e7..a6b9370 100644
--- a/include/rdma/ib.h
+++ b/include/rdma/ib.h
@@ -34,6 +34,7 @@
 #define _RDMA_IB_H
 
 #include <linux/types.h>
+#include <linux/sched.h>
 
 struct ib_addr {
 	union {
@@ -86,4 +87,19 @@
 	__u64			sib_scope_id;
 };
 
+/*
+ * The IB interfaces that use write() as bi-directional ioctl() are
+ * fundamentally unsafe, since there are lots of ways to trigger "write()"
+ * calls from various contexts with elevated privileges. That includes the
+ * traditional suid executable error message writes, but also various kernel
+ * interfaces that can write to file descriptors.
+ *
+ * This function provides protection for the legacy API by restricting the
+ * calling context.
+ */
+static inline bool ib_safe_file_access(struct file *filp)
+{
+	return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
+}
+
 #endif /* _RDMA_IB_H */
diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h
index 9ebab3a..b201744 100644
--- a/include/rxrpc/packet.h
+++ b/include/rxrpc/packet.h
@@ -68,8 +68,6 @@
 
 } __packed;
 
-extern const char *rxrpc_pkts[];
-
 #define RXRPC_SUPPORTED_PACKET_TYPES (			\
 		(1 << RXRPC_PACKET_TYPE_DATA) |		\
 		(1 << RXRPC_PACKET_TYPE_ACK) |		\
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index e0a3398..8ec7c30 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -18,25 +18,6 @@
 };
 
 /*
- * The maximum number of SG segments that we will put inside a
- * scatterlist (unless chaining is used). Should ideally fit inside a
- * single page, to avoid a higher order allocation.  We could define this
- * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order.  The
- * minimum value is 32
- */
-#define SCSI_MAX_SG_SEGMENTS	128
-
-/*
- * Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit
- * is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
- */
-#ifdef CONFIG_ARCH_HAS_SG_CHAIN
-#define SCSI_MAX_SG_CHAIN_SEGMENTS	2048
-#else
-#define SCSI_MAX_SG_CHAIN_SEGMENTS	SCSI_MAX_SG_SEGMENTS
-#endif
-
-/*
  * DIX-capable adapters effectively support infinite chaining for the
  * protection information scatterlist
  */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index c067019..a6c346d 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -50,6 +50,12 @@
 	SDEV_CREATED_BLOCK,	/* same as above but for created devices */
 };
 
+enum scsi_scan_mode {
+	SCSI_SCAN_INITIAL = 0,
+	SCSI_SCAN_RESCAN,
+	SCSI_SCAN_MANUAL,
+};
+
 enum scsi_device_event {
 	SDEV_EVT_MEDIA_CHANGE	= 1,	/* media has changed */
 	SDEV_EVT_INQUIRY_CHANGE_REPORTED,		/* 3F 03  UA reported */
@@ -242,6 +248,7 @@
 enum scsi_target_state {
 	STARGET_CREATED = 1,
 	STARGET_RUNNING,
+	STARGET_REMOVE,
 	STARGET_DEL,
 };
 
@@ -391,7 +398,8 @@
 extern void scsi_target_quiesce(struct scsi_target *);
 extern void scsi_target_resume(struct scsi_target *);
 extern void scsi_scan_target(struct device *parent, unsigned int channel,
-			     unsigned int id, u64 lun, int rescan);
+			     unsigned int id, u64 lun,
+			     enum scsi_scan_mode rescan);
 extern void scsi_target_reap(struct scsi_target *);
 extern void scsi_target_block(struct device *);
 extern void scsi_target_unblock(struct device *, enum scsi_device_state);
@@ -516,6 +524,31 @@
 	return sdev->inquiry ? (sdev->inquiry[5] >> 4) & 0x3 : 0;
 }
 
+/**
+ * scsi_device_supports_vpd - test if a device supports VPD pages
+ * @sdev: the &struct scsi_device to test
+ *
+ * If the 'try_vpd_pages' flag is set it takes precedence.
+ * Otherwise we will assume VPD pages are supported if the
+ * SCSI level is at least SPC-3 and 'skip_vpd_pages' is not set.
+ */
+static inline int scsi_device_supports_vpd(struct scsi_device *sdev)
+{
+	/* Attempt VPD inquiry if the device blacklist explicitly calls
+	 * for it.
+	 */
+	if (sdev->try_vpd_pages)
+		return 1;
+	/*
+	 * Although VPD inquiries can go to SCSI-2 type devices,
+	 * some USB ones crash on receiving them, and the pages
+	 * we currently ask for are mandatory for SPC-2 and beyond
+	 */
+	if (sdev->scsi_level >= SCSI_SPC_2 && !sdev->skip_vpd_pages)
+		return 1;
+	return 0;
+}
+
 #define MODULE_ALIAS_SCSI_DEVICE(type) \
 	MODULE_ALIAS("scsi:t-" __stringify(type) "*")
 #define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x"
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index fcfa3d7..76e9d27 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -37,7 +37,7 @@
  *	 used in one scatter-gather request.
  */
 #define SG_NONE 0
-#define SG_ALL	SCSI_MAX_SG_SEGMENTS
+#define SG_ALL	SG_CHUNK_SIZE
 
 #define MODE_UNKNOWN 0x00
 #define MODE_INITIATOR 0x01
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
index c2ae21c..d1defd1 100644
--- a/include/scsi/scsi_proto.h
+++ b/include/scsi/scsi_proto.h
@@ -115,6 +115,8 @@
 #define VERIFY_16	      0x8f
 #define SYNCHRONIZE_CACHE_16  0x91
 #define WRITE_SAME_16	      0x93
+#define ZBC_OUT		      0x94
+#define ZBC_IN		      0x95
 #define SERVICE_ACTION_BIDIRECTIONAL 0x9d
 #define SERVICE_ACTION_IN_16  0x9e
 #define SERVICE_ACTION_OUT_16 0x9f
@@ -143,6 +145,13 @@
 #define MO_SET_PRIORITY       0x0e
 #define MO_SET_TIMESTAMP      0x0f
 #define MO_MANAGEMENT_PROTOCOL_OUT 0x10
+/* values for ZBC_IN */
+#define ZI_REPORT_ZONES	      0x00
+/* values for ZBC_OUT */
+#define ZO_CLOSE_ZONE	      0x01
+#define ZO_FINISH_ZONE	      0x02
+#define ZO_OPEN_ZONE	      0x03
+#define ZO_RESET_WRITE_POINTER 0x04
 /* values for variable length command */
 #define XDREAD_32	      0x03
 #define XDWRITE_32	      0x04
diff --git a/include/soc/at91/atmel-sfr.h b/include/soc/at91/atmel-sfr.h
new file mode 100644
index 0000000..2f9bb98
--- /dev/null
+++ b/include/soc/at91/atmel-sfr.h
@@ -0,0 +1,18 @@
+/*
+ * Atmel SFR (Special Function Registers) register offsets and bit definitions.
+ *
+ * Copyright (C) 2016 Atmel
+ *
+ * Author: Ludovic Desroches <ludovic.desroches@atmel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_MFD_SYSCON_ATMEL_SFR_H
+#define _LINUX_MFD_SYSCON_ATMEL_SFR_H
+
+#define AT91_SFR_I2SCLKSEL	0x90	/* I2SC Register */
+
+#endif /* _LINUX_MFD_SYSCON_ATMEL_SFR_H */
diff --git a/include/soc/nps/common.h b/include/soc/nps/common.h
new file mode 100644
index 0000000..9b1d43d
--- /dev/null
+++ b/include/soc/nps/common.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef SOC_NPS_COMMON_H
+#define SOC_NPS_COMMON_H
+
+#ifdef CONFIG_SMP
+#define NPS_IPI_IRQ					5
+#endif
+
+#define NPS_HOST_REG_BASE			0xF6000000
+
+#define NPS_MSU_BLKID				0x018
+
+#define CTOP_INST_RSPI_GIC_0_R12		0x3C56117E
+#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST	0x5B60
+#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM	0x00010422
+
+#ifndef __ASSEMBLY__
+
+/* In order to increase compilation test coverage */
+#ifdef CONFIG_ARC
+static inline void nps_ack_gic(void)
+{
+	__asm__ __volatile__ (
+	"       .word %0\n"
+	:
+	: "i"(CTOP_INST_RSPI_GIC_0_R12)
+	: "memory");
+}
+#else
+static inline void nps_ack_gic(void) { }
+#define write_aux_reg(r, v)
+#define read_aux_reg(r) 0
+#endif
+
+/* CPU global ID */
+struct global_id {
+	union {
+		struct {
+#ifdef CONFIG_EZNPS_MTM_EXT
+			u32 __reserved:20, cluster:4, core:4, thread:4;
+#else
+			u32 __reserved:24, cluster:4, core:4;
+#endif
+		};
+		u32 value;
+	};
+};
+
+/*
+ * Convert logical to physical CPU IDs
+ *
+ * The conversion swap bits 1 and 2 of cluster id (out of 4 bits)
+ * Now quad of logical clusters id's are adjacent physically,
+ * and not like the id's physically came with each cluster.
+ * Below table is 4x4 mesh of core clusters as it layout on chip.
+ * Cluster ids are in format: logical (physical)
+ *
+ *    -----------------   ------------------
+ * 3 |  5 (3)   7 (7)  | | 13 (11)   15 (15)|
+ *
+ * 2 |  4 (2)   6 (6)  | | 12 (10)   14 (14)|
+ *    -----------------   ------------------
+ * 1 |  1 (1)   3 (5)  | |  9  (9)   11 (13)|
+ *
+ * 0 |  0 (0)   2 (4)  | |  8  (8)   10 (12)|
+ *    -----------------   ------------------
+ *       0       1            2        3
+ */
+static inline int nps_cluster_logic_to_phys(int cluster)
+{
+#ifdef __arc__
+	 __asm__ __volatile__(
+	"       mov r3,%0\n"
+	"       .short %1\n"
+	"       .word %2\n"
+	"       mov %0,r3\n"
+	: "+r"(cluster)
+	: "i"(CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST),
+	  "i"(CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM)
+	: "r3");
+#endif
+
+	return cluster;
+}
+
+#define NPS_CPU_TO_CLUSTER_NUM(cpu) \
+	({ struct global_id gid; gid.value = cpu; \
+		nps_cluster_logic_to_phys(gid.cluster); })
+
+struct nps_host_reg_address {
+	union {
+		struct {
+			u32 base:8, cl_x:4, cl_y:4,
+			blkid:6, reg:8, __reserved:2;
+		};
+		u32 value;
+	};
+};
+
+struct nps_host_reg_address_non_cl {
+	union {
+		struct {
+			u32 base:7, blkid:11, reg:12, __reserved:2;
+		};
+		u32 value;
+	};
+};
+
+static inline void *nps_host_reg_non_cl(u32 blkid, u32 reg)
+{
+	struct nps_host_reg_address_non_cl reg_address;
+
+	reg_address.value = NPS_HOST_REG_BASE;
+	reg_address.blkid = blkid;
+	reg_address.reg = reg;
+
+	return (void *)reg_address.value;
+}
+
+static inline void *nps_host_reg(u32 cpu, u32 blkid, u32 reg)
+{
+	struct nps_host_reg_address reg_address;
+	u32 cl = NPS_CPU_TO_CLUSTER_NUM(cpu);
+
+	reg_address.value = NPS_HOST_REG_BASE;
+	reg_address.cl_x  = (cl >> 2) & 0x3;
+	reg_address.cl_y  = cl & 0x3;
+	reg_address.blkid = blkid;
+	reg_address.reg   = reg;
+
+	return (void *)reg_address.value;
+}
+#endif /* __ASSEMBLY__ */
+
+#endif /* SOC_NPS_COMMON_H */
diff --git a/include/soc/tegra/fuse.h b/include/soc/tegra/fuse.h
index 961b821..b4c9219 100644
--- a/include/soc/tegra/fuse.h
+++ b/include/soc/tegra/fuse.h
@@ -26,6 +26,7 @@
 
 #define TEGRA_FUSE_SKU_CALIB_0	0xf0
 #define TEGRA30_FUSE_SATA_CALIB	0x124
+#define TEGRA_FUSE_USB_CALIB_EXT_0 0x250
 
 #ifndef __ASSEMBLY__
 
diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h
index d18efe4..e9e5347 100644
--- a/include/soc/tegra/pmc.h
+++ b/include/soc/tegra/pmc.h
@@ -33,9 +33,9 @@
 #endif /* CONFIG_PM_SLEEP */
 
 #ifdef CONFIG_SMP
-bool tegra_pmc_cpu_is_powered(int cpuid);
-int tegra_pmc_cpu_power_on(int cpuid);
-int tegra_pmc_cpu_remove_clamping(int cpuid);
+bool tegra_pmc_cpu_is_powered(unsigned int cpuid);
+int tegra_pmc_cpu_power_on(unsigned int cpuid);
+int tegra_pmc_cpu_remove_clamping(unsigned int cpuid);
 #endif /* CONFIG_SMP */
 
 /*
@@ -72,6 +72,7 @@
 #define TEGRA_POWERGATE_AUD	27
 #define TEGRA_POWERGATE_DFD	28
 #define TEGRA_POWERGATE_VE2	29
+#define TEGRA_POWERGATE_MAX	TEGRA_POWERGATE_VE2
 
 #define TEGRA_POWERGATE_3D0	TEGRA_POWERGATE_3D
 
@@ -108,50 +109,51 @@
 #define TEGRA_IO_RAIL_SYS_DDC	58
 
 #ifdef CONFIG_ARCH_TEGRA
-int tegra_powergate_is_powered(int id);
-int tegra_powergate_power_on(int id);
-int tegra_powergate_power_off(int id);
-int tegra_powergate_remove_clamping(int id);
+int tegra_powergate_is_powered(unsigned int id);
+int tegra_powergate_power_on(unsigned int id);
+int tegra_powergate_power_off(unsigned int id);
+int tegra_powergate_remove_clamping(unsigned int id);
 
 /* Must be called with clk disabled, and returns with clk enabled */
-int tegra_powergate_sequence_power_up(int id, struct clk *clk,
+int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
 				      struct reset_control *rst);
 
-int tegra_io_rail_power_on(int id);
-int tegra_io_rail_power_off(int id);
+int tegra_io_rail_power_on(unsigned int id);
+int tegra_io_rail_power_off(unsigned int id);
 #else
-static inline int tegra_powergate_is_powered(int id)
+static inline int tegra_powergate_is_powered(unsigned int id)
 {
 	return -ENOSYS;
 }
 
-static inline int tegra_powergate_power_on(int id)
+static inline int tegra_powergate_power_on(unsigned int id)
 {
 	return -ENOSYS;
 }
 
-static inline int tegra_powergate_power_off(int id)
+static inline int tegra_powergate_power_off(unsigned int id)
 {
 	return -ENOSYS;
 }
 
-static inline int tegra_powergate_remove_clamping(int id)
+static inline int tegra_powergate_remove_clamping(unsigned int id)
 {
 	return -ENOSYS;
 }
 
-static inline int tegra_powergate_sequence_power_up(int id, struct clk *clk,
+static inline int tegra_powergate_sequence_power_up(unsigned int id,
+						    struct clk *clk,
 						    struct reset_control *rst)
 {
 	return -ENOSYS;
 }
 
-static inline int tegra_io_rail_power_on(int id)
+static inline int tegra_io_rail_power_on(unsigned int id)
 {
 	return -ENOSYS;
 }
 
-static inline int tegra_io_rail_power_off(int id)
+static inline int tegra_io_rail_power_off(unsigned int id)
 {
 	return -ENOSYS;
 }
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index f86ef5e..67be244 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -51,6 +51,16 @@
 	void *filter_data);
 struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream);
 
+/*
+ * The DAI supports packed transfers, eg 2 16-bit samples in a 32-bit word.
+ * If this flag is set the dmaengine driver won't put any restriction on
+ * the supported sample formats and set the DMA transfer size to undefined.
+ * The DAI driver is responsible to disable any unsupported formats in it's
+ * configuration and catch corner cases that are not already handled in
+ * the ALSA core.
+ */
+#define SND_DMAENGINE_PCM_DAI_FLAG_PACK BIT(0)
+
 /**
  * struct snd_dmaengine_dai_dma_data - DAI DMA configuration data
  * @addr: Address of the DAI data source or destination register.
@@ -63,6 +73,7 @@
  * requesting the DMA channel.
  * @chan_name: Custom channel name to use when requesting DMA channel.
  * @fifo_size: FIFO size of the DAI controller in bytes
+ * @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now
  */
 struct snd_dmaengine_dai_dma_data {
 	dma_addr_t addr;
@@ -72,6 +83,7 @@
 	void *filter_data;
 	const char *chan_name;
 	unsigned int fifo_size;
+	unsigned int flags;
 };
 
 void snd_dmaengine_pcm_set_config_from_dai_data(
diff --git a/include/sound/hda_chmap.h b/include/sound/hda_chmap.h
index e20d219..babd445 100644
--- a/include/sound/hda_chmap.h
+++ b/include/sound/hda_chmap.h
@@ -36,6 +36,8 @@
 	int (*chmap_validate)(struct hdac_chmap *hchmap, int ca,
 			int channels, unsigned char *chmap);
 
+	int (*get_spk_alloc)(struct hdac_device *hdac, int pcm_idx);
+
 	void (*get_chmap)(struct hdac_device *hdac, int pcm_idx,
 					unsigned char *chmap);
 	void (*set_chmap)(struct hdac_device *hdac, int pcm_idx,
diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
index fa341fc..796cabf 100644
--- a/include/sound/hda_i915.h
+++ b/include/sound/hda_i915.h
@@ -9,9 +9,9 @@
 #ifdef CONFIG_SND_HDA_I915
 int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
 int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
-int snd_hdac_get_display_clk(struct hdac_bus *bus);
-int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate);
-int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid,
+void snd_hdac_i915_set_bclk(struct hdac_bus *bus);
+int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid, int rate);
+int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
 			   bool *audio_enabled, char *buffer, int max_bytes);
 int snd_hdac_i915_init(struct hdac_bus *bus);
 int snd_hdac_i915_exit(struct hdac_bus *bus);
@@ -25,16 +25,15 @@
 {
 	return 0;
 }
-static inline int snd_hdac_get_display_clk(struct hdac_bus *bus)
+static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
+{
+}
+static inline int snd_hdac_sync_audio_rate(struct hdac_device *codec,
+					   hda_nid_t nid, int rate)
 {
 	return 0;
 }
-static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid,
-					   int rate)
-{
-	return 0;
-}
-static inline int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid,
+static inline int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
 					 bool *audio_enabled, char *buffer,
 					 int max_bytes)
 {
diff --git a/include/sound/hda_regmap.h b/include/sound/hda_regmap.h
index 2767c55..ca64f0f 100644
--- a/include/sound/hda_regmap.h
+++ b/include/sound/hda_regmap.h
@@ -17,6 +17,8 @@
 				    unsigned int verb);
 int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg,
 			     unsigned int *val);
+int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec,
+				      unsigned int reg, unsigned int *val);
 int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
 			      unsigned int val);
 int snd_hdac_regmap_update_raw(struct hdac_device *codec, unsigned int reg,
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index 07fa592..b9593b2 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -14,6 +14,8 @@
  * @gtscap: gts capabilities pointer
  * @drsmcap: dma resume capabilities pointer
  * @hlink_list: link list of HDA links
+ * @lock: lock for link mgmt
+ * @cmd_dma_state: state of cmd DMAs: CORB and RIRB
  */
 struct hdac_ext_bus {
 	struct hdac_bus bus;
@@ -27,6 +29,9 @@
 	void __iomem *drsmcap;
 
 	struct list_head hlink_list;
+
+	struct mutex lock;
+	bool cmd_dma_state;
 };
 
 int snd_hdac_ext_bus_init(struct hdac_ext_bus *sbus, struct device *dev,
@@ -142,6 +147,9 @@
 	void __iomem *ml_addr; /* link output stream reg pointer */
 	u32 lcaps;   /* link capablities */
 	u16 lsdiid;  /* link sdi identifier */
+
+	int ref_count;
+
 	struct list_head list;
 };
 
@@ -154,6 +162,11 @@
 void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
 				 int stream);
 
+int snd_hdac_ext_bus_link_get(struct hdac_ext_bus *ebus,
+				struct hdac_ext_link *link);
+int snd_hdac_ext_bus_link_put(struct hdac_ext_bus *ebus,
+				struct hdac_ext_link *link);
+
 /* update register macro */
 #define snd_hdac_updatel(addr, reg, mask, val)		\
 	writel(((readl(addr + reg) & ~(mask)) | (val)), \
diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h
new file mode 100644
index 0000000..fc3a481
--- /dev/null
+++ b/include/sound/hdmi-codec.h
@@ -0,0 +1,100 @@
+/*
+ * hdmi-codec.h - HDMI Codec driver API
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Jyri Sarha <jsarha@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __HDMI_CODEC_H__
+#define __HDMI_CODEC_H__
+
+#include <linux/hdmi.h>
+#include <drm/drm_edid.h>
+#include <sound/asoundef.h>
+#include <uapi/sound/asound.h>
+
+/*
+ * Protocol between ASoC cpu-dai and HDMI-encoder
+ */
+struct hdmi_codec_daifmt {
+	enum {
+		HDMI_I2S,
+		HDMI_RIGHT_J,
+		HDMI_LEFT_J,
+		HDMI_DSP_A,
+		HDMI_DSP_B,
+		HDMI_AC97,
+		HDMI_SPDIF,
+	} fmt;
+	int bit_clk_inv:1;
+	int frame_clk_inv:1;
+	int bit_clk_master:1;
+	int frame_clk_master:1;
+};
+
+/*
+ * HDMI audio parameters
+ */
+struct hdmi_codec_params {
+	struct hdmi_audio_infoframe cea;
+	struct snd_aes_iec958 iec;
+	int sample_rate;
+	int sample_width;
+	int channels;
+};
+
+struct hdmi_codec_ops {
+	/*
+	 * Called when ASoC starts an audio stream setup.
+	 * Optional
+	 */
+	int (*audio_startup)(struct device *dev);
+
+	/*
+	 * Configures HDMI-encoder for audio stream.
+	 * Mandatory
+	 */
+	int (*hw_params)(struct device *dev,
+			 struct hdmi_codec_daifmt *fmt,
+			 struct hdmi_codec_params *hparms);
+
+	/*
+	 * Shuts down the audio stream.
+	 * Mandatory
+	 */
+	void (*audio_shutdown)(struct device *dev);
+
+	/*
+	 * Mute/unmute HDMI audio stream.
+	 * Optional
+	 */
+	int (*digital_mute)(struct device *dev, bool enable);
+
+	/*
+	 * Provides EDID-Like-Data from connected HDMI device.
+	 * Optional
+	 */
+	int (*get_eld)(struct device *dev, uint8_t *buf, size_t len);
+};
+
+/* HDMI codec initalization data */
+struct hdmi_codec_pdata {
+	const struct hdmi_codec_ops *ops;
+	uint i2s:1;
+	uint spdif:1;
+	int max_i2s_channels;
+};
+
+#define HDMI_CODEC_DRV_NAME "hdmi-audio-codec"
+
+#endif /* __HDMI_CODEC_H__ */
diff --git a/include/sound/pcm_iec958.h b/include/sound/pcm_iec958.h
index 0eed397..36f023a 100644
--- a/include/sound/pcm_iec958.h
+++ b/include/sound/pcm_iec958.h
@@ -6,4 +6,6 @@
 int snd_pcm_create_iec958_consumer(struct snd_pcm_runtime *runtime, u8 *cs,
 	size_t len);
 
+int snd_pcm_create_iec958_consumer_hw_params(struct snd_pcm_hw_params *params,
+					     u8 *cs, size_t len);
 #endif
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 9706946..3101d53 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -100,6 +100,7 @@
 {       .id = snd_soc_dapm_mixer_named_ctl, .name = wname, \
 	SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
 	.kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
+/* DEPRECATED: use SND_SOC_DAPM_SUPPLY */
 #define SND_SOC_DAPM_MICBIAS(wname, wreg, wshift, winvert) \
 {	.id = snd_soc_dapm_micbias, .name = wname, \
 	SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
@@ -473,7 +474,7 @@
 	snd_soc_dapm_out_drv,			/* output driver */
 	snd_soc_dapm_adc,			/* analog to digital converter */
 	snd_soc_dapm_dac,			/* digital to analog converter */
-	snd_soc_dapm_micbias,		/* microphone bias (power) */
+	snd_soc_dapm_micbias,		/* microphone bias (power) - DEPRECATED: use snd_soc_dapm_supply */
 	snd_soc_dapm_mic,			/* microphone */
 	snd_soc_dapm_hp,			/* headphones */
 	snd_soc_dapm_spk,			/* speaker */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 02b4a21..fd7b58a 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -1002,7 +1002,7 @@
 	 */
 	const char *platform_name;
 	struct device_node *platform_of_node;
-	int be_id;	/* optional ID for machine driver BE identification */
+	int id;	/* optional ID for machine driver link identification */
 
 	const struct snd_soc_pcm_stream *params;
 	unsigned int num_params;
@@ -1683,6 +1683,9 @@
 int snd_soc_register_dai(struct snd_soc_component *component,
 	struct snd_soc_dai_driver *dai_drv);
 
+struct snd_soc_dai *snd_soc_find_dai(
+	const struct snd_soc_dai_link_component *dlc);
+
 #include <sound/soc-dai.h>
 
 #ifdef CONFIG_DEBUG_FS
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 685a51a..8ff6d40 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -76,6 +76,7 @@
 	struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *,
 				struct config_group *, const char *);
 	void (*fabric_drop_wwn)(struct se_wwn *);
+	void (*add_wwn_groups)(struct se_wwn *);
 	struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
 				struct config_group *, const char *);
 	void (*fabric_drop_tpg)(struct se_portal_group *);
@@ -87,7 +88,6 @@
 				struct config_group *, const char *);
 	void (*fabric_drop_np)(struct se_tpg_np *);
 	int (*fabric_init_nodeacl)(struct se_node_acl *, const char *);
-	void (*fabric_cleanup_nodeacl)(struct se_node_acl *);
 
 	struct configfs_attribute **tfc_discovery_attrs;
 	struct configfs_attribute **tfc_wwn_attrs;
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 677807f..e90e82a 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -23,7 +23,7 @@
 struct extent_buffer;
 struct btrfs_work;
 struct __btrfs_workqueue;
-struct btrfs_qgroup_operation;
+struct btrfs_qgroup_extent_record;
 
 #define show_ref_type(type)						\
 	__print_symbolic(type,						\
@@ -1231,6 +1231,93 @@
 
 	TP_ARGS(ref_root, reserved)
 );
+
+DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
+	TP_PROTO(struct btrfs_qgroup_extent_record *rec),
+
+	TP_ARGS(rec),
+
+	TP_STRUCT__entry(
+		__field(	u64,  bytenr		)
+		__field(	u64,  num_bytes		)
+	),
+
+	TP_fast_assign(
+		__entry->bytenr		= rec->bytenr,
+		__entry->num_bytes	= rec->num_bytes;
+	),
+
+	TP_printk("bytenr = %llu, num_bytes = %llu",
+		  (unsigned long long)__entry->bytenr,
+		  (unsigned long long)__entry->num_bytes)
+);
+
+DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_account_extents,
+
+	TP_PROTO(struct btrfs_qgroup_extent_record *rec),
+
+	TP_ARGS(rec)
+);
+
+DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_insert_dirty_extent,
+
+	TP_PROTO(struct btrfs_qgroup_extent_record *rec),
+
+	TP_ARGS(rec)
+);
+
+TRACE_EVENT(btrfs_qgroup_account_extent,
+
+	TP_PROTO(u64 bytenr, u64 num_bytes, u64 nr_old_roots, u64 nr_new_roots),
+
+	TP_ARGS(bytenr, num_bytes, nr_old_roots, nr_new_roots),
+
+	TP_STRUCT__entry(
+		__field(	u64,  bytenr			)
+		__field(	u64,  num_bytes			)
+		__field(	u64,  nr_old_roots		)
+		__field(	u64,  nr_new_roots		)
+	),
+
+	TP_fast_assign(
+		__entry->bytenr		= bytenr;
+		__entry->num_bytes	= num_bytes;
+		__entry->nr_old_roots	= nr_old_roots;
+		__entry->nr_new_roots	= nr_new_roots;
+	),
+
+	TP_printk("bytenr = %llu, num_bytes = %llu, nr_old_roots = %llu, "
+		  "nr_new_roots = %llu",
+		  __entry->bytenr,
+		  __entry->num_bytes,
+		  __entry->nr_old_roots,
+		  __entry->nr_new_roots)
+);
+
+TRACE_EVENT(qgroup_update_counters,
+
+	TP_PROTO(u64 qgid, u64 cur_old_count, u64 cur_new_count),
+
+	TP_ARGS(qgid, cur_old_count, cur_new_count),
+
+	TP_STRUCT__entry(
+		__field(	u64,  qgid			)
+		__field(	u64,  cur_old_count		)
+		__field(	u64,  cur_new_count		)
+	),
+
+	TP_fast_assign(
+		__entry->qgid		= qgid;
+		__entry->cur_old_count	= cur_old_count;
+		__entry->cur_new_count	= cur_new_count;
+	),
+
+	TP_printk("qgid = %llu, cur_old_count = %llu, cur_new_count = %llu",
+		  __entry->qgid,
+		  __entry->cur_old_count,
+		  __entry->cur_new_count)
+);
+
 #endif /* _TRACE_BTRFS_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 4e4b2fa..09c71e9 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -872,7 +872,7 @@
 	TP_fast_assign(
 		struct dentry *dentry = file->f_path.dentry;
 
-		__entry->dev		= d_inode(dentry)->i_sb->s_dev;
+		__entry->dev		= dentry->d_sb->s_dev;
 		__entry->ino		= d_inode(dentry)->i_ino;
 		__entry->datasync	= datasync;
 		__entry->parent		= d_inode(dentry->d_parent)->i_ino;
@@ -1451,7 +1451,7 @@
 	),
 
 	TP_fast_assign(
-		__entry->dev		= d_inode(dentry)->i_sb->s_dev;
+		__entry->dev		= dentry->d_sb->s_dev;
 		__entry->ino		= d_inode(dentry)->i_ino;
 		__entry->parent		= parent->i_ino;
 		__entry->size		= d_inode(dentry)->i_size;
@@ -1475,7 +1475,7 @@
 	),
 
 	TP_fast_assign(
-		__entry->dev		= d_inode(dentry)->i_sb->s_dev;
+		__entry->dev		= dentry->d_sb->s_dev;
 		__entry->ino		= d_inode(dentry)->i_ino;
 		__entry->ret		= ret;
 	),
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index aa69253..526fb3d 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -38,22 +38,25 @@
 );
 
 TRACE_EVENT(kvm_vcpu_wakeup,
-	    TP_PROTO(__u64 ns, bool waited),
-	    TP_ARGS(ns, waited),
+	    TP_PROTO(__u64 ns, bool waited, bool valid),
+	    TP_ARGS(ns, waited, valid),
 
 	TP_STRUCT__entry(
 		__field(	__u64,		ns		)
 		__field(	bool,		waited		)
+		__field(	bool,		valid		)
 	),
 
 	TP_fast_assign(
 		__entry->ns		= ns;
 		__entry->waited		= waited;
+		__entry->valid		= valid;
 	),
 
-	TP_printk("%s time %lld ns",
+	TP_printk("%s time %lld ns, polling %s",
 		  __entry->waited ? "wait" : "poll",
-		  __entry->ns)
+		  __entry->ns,
+		  __entry->valid ? "valid" : "invalid")
 );
 
 #if defined(CONFIG_HAVE_KVM_IRQFD)
diff --git a/include/trace/events/mmc.h b/include/trace/events/mmc.h
new file mode 100644
index 0000000..a72f9b9
--- /dev/null
+++ b/include/trace/events/mmc.h
@@ -0,0 +1,182 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mmc
+
+#if !defined(_TRACE_MMC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MMC_H
+
+#include <linux/blkdev.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/host.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(mmc_request_start,
+
+	TP_PROTO(struct mmc_host *host, struct mmc_request *mrq),
+
+	TP_ARGS(host, mrq),
+
+	TP_STRUCT__entry(
+		__field(u32,			cmd_opcode)
+		__field(u32,			cmd_arg)
+		__field(unsigned int,		cmd_flags)
+		__field(unsigned int,		cmd_retries)
+		__field(u32,			stop_opcode)
+		__field(u32,			stop_arg)
+		__field(unsigned int,		stop_flags)
+		__field(unsigned int,		stop_retries)
+		__field(u32,			sbc_opcode)
+		__field(u32,			sbc_arg)
+		__field(unsigned int,		sbc_flags)
+		__field(unsigned int,		sbc_retries)
+		__field(unsigned int,		blocks)
+		__field(unsigned int,		blksz)
+		__field(unsigned int,		data_flags)
+		__field(unsigned int,		can_retune)
+		__field(unsigned int,		doing_retune)
+		__field(unsigned int,		retune_now)
+		__field(int,			need_retune)
+		__field(int,			hold_retune)
+		__field(unsigned int,		retune_period)
+		__field(struct mmc_request *,	mrq)
+		__string(name,			mmc_hostname(host))
+	),
+
+	TP_fast_assign(
+		__entry->cmd_opcode = mrq->cmd->opcode;
+		__entry->cmd_arg = mrq->cmd->arg;
+		__entry->cmd_flags = mrq->cmd->flags;
+		__entry->cmd_retries = mrq->cmd->retries;
+		__entry->stop_opcode = mrq->stop ? mrq->stop->opcode : 0;
+		__entry->stop_arg = mrq->stop ? mrq->stop->arg : 0;
+		__entry->stop_flags = mrq->stop ? mrq->stop->flags : 0;
+		__entry->stop_retries = mrq->stop ? mrq->stop->retries : 0;
+		__entry->sbc_opcode = mrq->sbc ? mrq->sbc->opcode : 0;
+		__entry->sbc_arg = mrq->sbc ? mrq->sbc->arg : 0;
+		__entry->sbc_flags = mrq->sbc ? mrq->sbc->flags : 0;
+		__entry->sbc_retries = mrq->sbc ? mrq->sbc->retries : 0;
+		__entry->blksz = mrq->data ? mrq->data->blksz : 0;
+		__entry->blocks = mrq->data ? mrq->data->blocks : 0;
+		__entry->data_flags = mrq->data ? mrq->data->flags : 0;
+		__entry->can_retune = host->can_retune;
+		__entry->doing_retune = host->doing_retune;
+		__entry->retune_now = host->retune_now;
+		__entry->need_retune = host->need_retune;
+		__entry->hold_retune = host->hold_retune;
+		__entry->retune_period = host->retune_period;
+		__assign_str(name, mmc_hostname(host));
+		__entry->mrq = mrq;
+	),
+
+	TP_printk("%s: start struct mmc_request[%p]: "
+		  "cmd_opcode=%u cmd_arg=0x%x cmd_flags=0x%x cmd_retries=%u "
+		  "stop_opcode=%u stop_arg=0x%x stop_flags=0x%x stop_retries=%u "
+		  "sbc_opcode=%u sbc_arg=0x%x sbc_flags=0x%x sbc_retires=%u "
+		  "blocks=%u block_size=%u data_flags=0x%x "
+		  "can_retune=%u doing_retune=%u retune_now=%u "
+		  "need_retune=%d hold_retune=%d retune_period=%u",
+		  __get_str(name), __entry->mrq,
+		  __entry->cmd_opcode, __entry->cmd_arg,
+		  __entry->cmd_flags, __entry->cmd_retries,
+		  __entry->stop_opcode, __entry->stop_arg,
+		  __entry->stop_flags, __entry->stop_retries,
+		  __entry->sbc_opcode, __entry->sbc_arg,
+		  __entry->sbc_flags, __entry->sbc_retries,
+		  __entry->blocks, __entry->blksz, __entry->data_flags,
+		  __entry->can_retune, __entry->doing_retune,
+		  __entry->retune_now, __entry->need_retune,
+		  __entry->hold_retune, __entry->retune_period)
+);
+
+TRACE_EVENT(mmc_request_done,
+
+	TP_PROTO(struct mmc_host *host, struct mmc_request *mrq),
+
+	TP_ARGS(host, mrq),
+
+	TP_STRUCT__entry(
+		__field(u32,			cmd_opcode)
+		__field(int,			cmd_err)
+		__array(u32,			cmd_resp,	4)
+		__field(unsigned int,		cmd_retries)
+		__field(u32,			stop_opcode)
+		__field(int,			stop_err)
+		__array(u32,			stop_resp,	4)
+		__field(unsigned int,		stop_retries)
+		__field(u32,			sbc_opcode)
+		__field(int,			sbc_err)
+		__array(u32,			sbc_resp,	4)
+		__field(unsigned int,		sbc_retries)
+		__field(unsigned int,		bytes_xfered)
+		__field(int,			data_err)
+		__field(unsigned int,		can_retune)
+		__field(unsigned int,		doing_retune)
+		__field(unsigned int,		retune_now)
+		__field(int,			need_retune)
+		__field(int,			hold_retune)
+		__field(unsigned int,		retune_period)
+		__field(struct mmc_request *,	mrq)
+		__string(name,			mmc_hostname(host))
+	),
+
+	TP_fast_assign(
+		__entry->cmd_opcode = mrq->cmd->opcode;
+		__entry->cmd_err = mrq->cmd->error;
+		memcpy(__entry->cmd_resp, mrq->cmd->resp, 4);
+		__entry->cmd_retries = mrq->cmd->retries;
+		__entry->stop_opcode = mrq->stop ? mrq->stop->opcode : 0;
+		__entry->stop_err = mrq->stop ? mrq->stop->error : 0;
+		__entry->stop_resp[0] = mrq->stop ? mrq->stop->resp[0] : 0;
+		__entry->stop_resp[1] = mrq->stop ? mrq->stop->resp[1] : 0;
+		__entry->stop_resp[2] = mrq->stop ? mrq->stop->resp[2] : 0;
+		__entry->stop_resp[3] = mrq->stop ? mrq->stop->resp[3] : 0;
+		__entry->stop_retries = mrq->stop ? mrq->stop->retries : 0;
+		__entry->sbc_opcode = mrq->sbc ? mrq->sbc->opcode : 0;
+		__entry->sbc_err = mrq->sbc ? mrq->sbc->error : 0;
+		__entry->sbc_resp[0] = mrq->sbc ? mrq->sbc->resp[0] : 0;
+		__entry->sbc_resp[1] = mrq->sbc ? mrq->sbc->resp[1] : 0;
+		__entry->sbc_resp[2] = mrq->sbc ? mrq->sbc->resp[2] : 0;
+		__entry->sbc_resp[3] = mrq->sbc ? mrq->sbc->resp[3] : 0;
+		__entry->sbc_retries = mrq->sbc ? mrq->sbc->retries : 0;
+		__entry->bytes_xfered = mrq->data ? mrq->data->bytes_xfered : 0;
+		__entry->data_err = mrq->data ? mrq->data->error : 0;
+		__entry->can_retune = host->can_retune;
+		__entry->doing_retune = host->doing_retune;
+		__entry->retune_now = host->retune_now;
+		__entry->need_retune = host->need_retune;
+		__entry->hold_retune = host->hold_retune;
+		__entry->retune_period = host->retune_period;
+		__assign_str(name, mmc_hostname(host));
+		__entry->mrq = mrq;
+	),
+
+	TP_printk("%s: end struct mmc_request[%p]: "
+		  "cmd_opcode=%u cmd_err=%d cmd_resp=0x%x 0x%x 0x%x 0x%x "
+		  "cmd_retries=%u stop_opcode=%u stop_err=%d "
+		  "stop_resp=0x%x 0x%x 0x%x 0x%x stop_retries=%u "
+		  "sbc_opcode=%u sbc_err=%d sbc_resp=0x%x 0x%x 0x%x 0x%x "
+		  "sbc_retries=%u bytes_xfered=%u data_err=%d "
+		  "can_retune=%u doing_retune=%u retune_now=%u need_retune=%d "
+		  "hold_retune=%d retune_period=%u",
+		  __get_str(name), __entry->mrq,
+		  __entry->cmd_opcode, __entry->cmd_err,
+		  __entry->cmd_resp[0], __entry->cmd_resp[1],
+		  __entry->cmd_resp[2], __entry->cmd_resp[3],
+		  __entry->cmd_retries,
+		  __entry->stop_opcode, __entry->stop_err,
+		  __entry->stop_resp[0], __entry->stop_resp[1],
+		  __entry->stop_resp[2], __entry->stop_resp[3],
+		  __entry->stop_retries,
+		  __entry->sbc_opcode, __entry->sbc_err,
+		  __entry->sbc_resp[0], __entry->sbc_resp[1],
+		  __entry->sbc_resp[2], __entry->sbc_resp[3],
+		  __entry->sbc_retries,
+		  __entry->bytes_xfered, __entry->data_err,
+		  __entry->can_retune, __entry->doing_retune,
+		  __entry->retune_now, __entry->need_retune,
+		  __entry->hold_retune, __entry->retune_period)
+);
+
+#endif /* _TRACE_MMC_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/page_isolation.h b/include/trace/events/page_isolation.h
index 6fb6440..8738a78 100644
--- a/include/trace/events/page_isolation.h
+++ b/include/trace/events/page_isolation.h
@@ -29,7 +29,7 @@
 
 	TP_printk("start_pfn=0x%lx end_pfn=0x%lx fin_pfn=0x%lx ret=%s",
 		__entry->start_pfn, __entry->end_pfn, __entry->fin_pfn,
-		__entry->end_pfn == __entry->fin_pfn ? "success" : "fail")
+		__entry->end_pfn <= __entry->fin_pfn ? "success" : "fail")
 );
 
 #endif /* _TRACE_PAGE_ISOLATION_H */
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index ef72c4a..d3e7565 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -172,6 +172,77 @@
 );
 
 /*
+ * Tracepoint for expedited grace-period events.  Takes a string identifying
+ * the RCU flavor, the expedited grace-period sequence number, and a string
+ * identifying the grace-period-related event as follows:
+ *
+ *	"snap": Captured snapshot of expedited grace period sequence number.
+ *	"start": Started a real expedited grace period.
+ *	"end": Ended a real expedited grace period.
+ *	"endwake": Woke piggybackers up.
+ *	"done": Someone else did the expedited grace period for us.
+ */
+TRACE_EVENT(rcu_exp_grace_period,
+
+	TP_PROTO(const char *rcuname, unsigned long gpseq, const char *gpevent),
+
+	TP_ARGS(rcuname, gpseq, gpevent),
+
+	TP_STRUCT__entry(
+		__field(const char *, rcuname)
+		__field(unsigned long, gpseq)
+		__field(const char *, gpevent)
+	),
+
+	TP_fast_assign(
+		__entry->rcuname = rcuname;
+		__entry->gpseq = gpseq;
+		__entry->gpevent = gpevent;
+	),
+
+	TP_printk("%s %lu %s",
+		  __entry->rcuname, __entry->gpseq, __entry->gpevent)
+);
+
+/*
+ * Tracepoint for expedited grace-period funnel-locking events.  Takes a
+ * string identifying the RCU flavor, an integer identifying the rcu_node
+ * combining-tree level, another pair of integers identifying the lowest-
+ * and highest-numbered CPU associated with the current rcu_node structure,
+ * and a string.  identifying the grace-period-related event as follows:
+ *
+ *	"nxtlvl": Advance to next level of rcu_node funnel
+ *	"wait": Wait for someone else to do expedited GP
+ */
+TRACE_EVENT(rcu_exp_funnel_lock,
+
+	TP_PROTO(const char *rcuname, u8 level, int grplo, int grphi,
+		 const char *gpevent),
+
+	TP_ARGS(rcuname, level, grplo, grphi, gpevent),
+
+	TP_STRUCT__entry(
+		__field(const char *, rcuname)
+		__field(u8, level)
+		__field(int, grplo)
+		__field(int, grphi)
+		__field(const char *, gpevent)
+	),
+
+	TP_fast_assign(
+		__entry->rcuname = rcuname;
+		__entry->level = level;
+		__entry->grplo = grplo;
+		__entry->grphi = grphi;
+		__entry->gpevent = gpevent;
+	),
+
+	TP_printk("%s %d %d %d %s",
+		  __entry->rcuname, __entry->level, __entry->grplo,
+		  __entry->grphi, __entry->gpevent)
+);
+
+/*
  * Tracepoint for RCU no-CBs CPU callback handoffs.  This event is intended
  * to assist debugging of these handoffs.
  *
@@ -704,11 +775,15 @@
 #else /* #ifdef CONFIG_RCU_TRACE */
 
 #define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
-#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
-				    qsmask) do { } while (0)
 #define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \
 				      level, grplo, grphi, event) \
 				      do { } while (0)
+#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
+				    qsmask) do { } while (0)
+#define trace_rcu_exp_grace_period(rcuname, gqseq, gpevent) \
+	do { } while (0)
+#define trace_rcu_exp_funnel_lock(rcuname, level, grplo, grphi, gpevent) \
+	do { } while (0)
 #define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0)
 #define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
 #define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h
index 079bd10..9a9b3e2 100644
--- a/include/trace/events/scsi.h
+++ b/include/trace/events/scsi.h
@@ -94,11 +94,9 @@
 		scsi_opcode_name(WRITE_16),			\
 		scsi_opcode_name(VERIFY_16),			\
 		scsi_opcode_name(WRITE_SAME_16),		\
+		scsi_opcode_name(ZBC_OUT),			\
+		scsi_opcode_name(ZBC_IN),			\
 		scsi_opcode_name(SERVICE_ACTION_IN_16),		\
-		scsi_opcode_name(SAI_READ_CAPACITY_16),		\
-		scsi_opcode_name(SAI_GET_LBA_STATUS),		\
-		scsi_opcode_name(MI_REPORT_TARGET_PGS),		\
-		scsi_opcode_name(MO_SET_TARGET_PGS),		\
 		scsi_opcode_name(READ_32),			\
 		scsi_opcode_name(WRITE_32),			\
 		scsi_opcode_name(WRITE_SAME_32),		\
diff --git a/include/trace/perf.h b/include/trace/perf.h
index 26486fc..88de5c2 100644
--- a/include/trace/perf.h
+++ b/include/trace/perf.h
@@ -20,9 +20,6 @@
 #undef __get_bitmask
 #define __get_bitmask(field) (char *)__get_dynamic_array(field)
 
-#undef __perf_addr
-#define __perf_addr(a)	(__addr = (a))
-
 #undef __perf_count
 #define __perf_count(c)	(__count = (c))
 
@@ -37,8 +34,9 @@
 	struct trace_event_call *event_call = __data;			\
 	struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
 	struct trace_event_raw_##call *entry;				\
+	struct bpf_prog *prog = event_call->prog;			\
 	struct pt_regs *__regs;						\
-	u64 __addr = 0, __count = 1;					\
+	u64 __count = 1;						\
 	struct task_struct *__task = NULL;				\
 	struct hlist_head *head;					\
 	int __entry_size;						\
@@ -48,7 +46,7 @@
 	__data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
 									\
 	head = this_cpu_ptr(event_call->perf_events);			\
-	if (__builtin_constant_p(!__task) && !__task &&			\
+	if (!prog && __builtin_constant_p(!__task) && !__task &&	\
 				hlist_empty(head))			\
 		return;							\
 									\
@@ -56,8 +54,7 @@
 			     sizeof(u64));				\
 	__entry_size -= sizeof(u32);					\
 									\
-	entry = perf_trace_buf_prepare(__entry_size,			\
-			event_call->event.type, &__regs, &rctx);	\
+	entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx);	\
 	if (!entry)							\
 		return;							\
 									\
@@ -67,8 +64,9 @@
 									\
 	{ assign; }							\
 									\
-	perf_trace_buf_submit(entry, __entry_size, rctx, __addr,	\
-		__count, __regs, head, __task);				\
+	perf_trace_run_bpf_submit(entry, __entry_size, rctx,		\
+				  event_call, __count, __regs,		\
+				  head, __task);			\
 }
 
 /*
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
index 170c93b..80679a9 100644
--- a/include/trace/trace_events.h
+++ b/include/trace/trace_events.h
@@ -652,9 +652,6 @@
 #undef TP_fast_assign
 #define TP_fast_assign(args...) args
 
-#undef __perf_addr
-#define __perf_addr(a)	(a)
-
 #undef __perf_count
 #define __perf_count(c)	(c)
 
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 2622b33..c51afb7 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -717,9 +717,13 @@
 __SYSCALL(__NR_mlock2, sys_mlock2)
 #define __NR_copy_file_range 285
 __SYSCALL(__NR_copy_file_range, sys_copy_file_range)
+#define __NR_preadv2 286
+__SC_COMP(__NR_preadv2, sys_preadv2, compat_sys_preadv2)
+#define __NR_pwritev2 287
+__SC_COMP(__NR_pwritev2, sys_pwritev2, compat_sys_pwritev2)
 
 #undef __NR_syscalls
-#define __NR_syscalls 286
+#define __NR_syscalls 288
 
 /*
  * All syscalls below here should go away really,
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index b71fd0b..8bdae34 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -96,6 +96,7 @@
 header-y += cycx_cfm.h
 header-y += dcbnl.h
 header-y += dccp.h
+header-y += devlink.h
 header-y += dlmconstants.h
 header-y += dlm_device.h
 header-y += dlm.h
@@ -140,6 +141,7 @@
 header-y += gigaset_dev.h
 header-y += gpio.h
 header-y += gsmmux.h
+header-y += gtp.h
 header-y += hdlcdrv.h
 header-y += hdlc.h
 header-y += hdreg.h
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 924f537..406459b 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -92,6 +92,7 @@
 	BPF_PROG_TYPE_KPROBE,
 	BPF_PROG_TYPE_SCHED_CLS,
 	BPF_PROG_TYPE_SCHED_ACT,
+	BPF_PROG_TYPE_TRACEPOINT,
 };
 
 #define BPF_PSEUDO_MAP_FD	1
@@ -346,6 +347,10 @@
 #define BPF_F_ZERO_CSUM_TX		(1ULL << 1)
 #define BPF_F_DONT_FRAGMENT		(1ULL << 2)
 
+/* BPF_FUNC_perf_event_output flags. */
+#define BPF_F_INDEX_MASK		0xffffffffULL
+#define BPF_F_CURRENT_CPU		BPF_F_INDEX_MASK
+
 /* user accessible mirror of in-kernel sk_buff.
  * new fields can only be added to the end of this structure
  */
@@ -365,6 +370,8 @@
 	__u32 cb[5];
 	__u32 hash;
 	__u32 tc_classid;
+	__u32 data;
+	__u32 data_end;
 };
 
 struct bpf_tunnel_key {
@@ -375,6 +382,7 @@
 	};
 	__u8 tunnel_tos;
 	__u8 tunnel_ttl;
+	__u16 tunnel_ext;
 	__u32 tunnel_label;
 };
 
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index c9fee57..ba0073b 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -33,6 +33,30 @@
 	DEVLINK_CMD_PORT_SPLIT,
 	DEVLINK_CMD_PORT_UNSPLIT,
 
+	DEVLINK_CMD_SB_GET,		/* can dump */
+	DEVLINK_CMD_SB_SET,
+	DEVLINK_CMD_SB_NEW,
+	DEVLINK_CMD_SB_DEL,
+
+	DEVLINK_CMD_SB_POOL_GET,	/* can dump */
+	DEVLINK_CMD_SB_POOL_SET,
+	DEVLINK_CMD_SB_POOL_NEW,
+	DEVLINK_CMD_SB_POOL_DEL,
+
+	DEVLINK_CMD_SB_PORT_POOL_GET,	/* can dump */
+	DEVLINK_CMD_SB_PORT_POOL_SET,
+	DEVLINK_CMD_SB_PORT_POOL_NEW,
+	DEVLINK_CMD_SB_PORT_POOL_DEL,
+
+	DEVLINK_CMD_SB_TC_POOL_BIND_GET,	/* can dump */
+	DEVLINK_CMD_SB_TC_POOL_BIND_SET,
+	DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
+	DEVLINK_CMD_SB_TC_POOL_BIND_DEL,
+
+	/* Shared buffer occupancy monitoring commands */
+	DEVLINK_CMD_SB_OCC_SNAPSHOT,
+	DEVLINK_CMD_SB_OCC_MAX_CLEAR,
+
 	/* add new commands above here */
 
 	__DEVLINK_CMD_MAX,
@@ -46,6 +70,31 @@
 	DEVLINK_PORT_TYPE_IB,
 };
 
+enum devlink_sb_pool_type {
+	DEVLINK_SB_POOL_TYPE_INGRESS,
+	DEVLINK_SB_POOL_TYPE_EGRESS,
+};
+
+/* static threshold - limiting the maximum number of bytes.
+ * dynamic threshold - limiting the maximum number of bytes
+ *   based on the currently available free space in the shared buffer pool.
+ *   In this mode, the maximum quota is calculated based
+ *   on the following formula:
+ *     max_quota = alpha / (1 + alpha) * Free_Buffer
+ *   While Free_Buffer is the amount of none-occupied buffer associated to
+ *   the relevant pool.
+ *   The value range which can be passed is 0-20 and serves
+ *   for computation of alpha by following formula:
+ *     alpha = 2 ^ (passed_value - 10)
+ */
+
+enum devlink_sb_threshold_type {
+	DEVLINK_SB_THRESHOLD_TYPE_STATIC,
+	DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC,
+};
+
+#define DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX 20
+
 enum devlink_attr {
 	/* don't change the order or add anything between, this is ABI! */
 	DEVLINK_ATTR_UNSPEC,
@@ -62,6 +111,20 @@
 	DEVLINK_ATTR_PORT_IBDEV_NAME,		/* string */
 	DEVLINK_ATTR_PORT_SPLIT_COUNT,		/* u32 */
 	DEVLINK_ATTR_PORT_SPLIT_GROUP,		/* u32 */
+	DEVLINK_ATTR_SB_INDEX,			/* u32 */
+	DEVLINK_ATTR_SB_SIZE,			/* u32 */
+	DEVLINK_ATTR_SB_INGRESS_POOL_COUNT,	/* u16 */
+	DEVLINK_ATTR_SB_EGRESS_POOL_COUNT,	/* u16 */
+	DEVLINK_ATTR_SB_INGRESS_TC_COUNT,	/* u16 */
+	DEVLINK_ATTR_SB_EGRESS_TC_COUNT,	/* u16 */
+	DEVLINK_ATTR_SB_POOL_INDEX,		/* u16 */
+	DEVLINK_ATTR_SB_POOL_TYPE,		/* u8 */
+	DEVLINK_ATTR_SB_POOL_SIZE,		/* u32 */
+	DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE,	/* u8 */
+	DEVLINK_ATTR_SB_THRESHOLD,		/* u32 */
+	DEVLINK_ATTR_SB_TC_INDEX,		/* u16 */
+	DEVLINK_ATTR_SB_OCC_CUR,		/* u32 */
+	DEVLINK_ATTR_SB_OCC_MAX,		/* u32 */
 
 	/* add new attributes above here, update the policy in devlink.c */
 
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index 71e1d0e..cb4a72f 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -282,16 +282,18 @@
 #define SHT_HIUSER	0xffffffff
 
 /* sh_flags */
-#define SHF_WRITE	0x1
-#define SHF_ALLOC	0x2
-#define SHF_EXECINSTR	0x4
-#define SHF_MASKPROC	0xf0000000
+#define SHF_WRITE		0x1
+#define SHF_ALLOC		0x2
+#define SHF_EXECINSTR		0x4
+#define SHF_RELA_LIVEPATCH	0x00100000
+#define SHF_MASKPROC		0xf0000000
 
 /* special section indexes */
 #define SHN_UNDEF	0
 #define SHN_LORESERVE	0xff00
 #define SHN_LOPROC	0xff00
 #define SHN_HIPROC	0xff1f
+#define SHN_LIVEPATCH	0xff20
 #define SHN_ABS		0xfff1
 #define SHN_COMMON	0xfff2
 #define SHN_HIRESERVE	0xffff
diff --git a/include/uapi/linux/fib_rules.h b/include/uapi/linux/fib_rules.h
index 96161b8..620c8a5 100644
--- a/include/uapi/linux/fib_rules.h
+++ b/include/uapi/linux/fib_rules.h
@@ -49,6 +49,7 @@
 	FRA_TABLE,	/* Extended table id */
 	FRA_FWMASK,	/* mask for netfilter mark */
 	FRA_OIFNAME,
+	FRA_PAD,
 	__FRA_MAX
 };
 
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index a079d50..e21fe04 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -324,5 +324,7 @@
 
 /* flags for preadv2/pwritev2: */
 #define RWF_HIPRI			0x00000001 /* high priority request, poll if possible */
+#define RWF_DSYNC			0x00000002 /* per-IO O_DSYNC */
+#define RWF_SYNC			0x00000004 /* per-IO O_SYNC */
 
 #endif /* _UAPI_LINUX_FS_H */
diff --git a/include/uapi/linux/gen_stats.h b/include/uapi/linux/gen_stats.h
index 6487317..52deccc 100644
--- a/include/uapi/linux/gen_stats.h
+++ b/include/uapi/linux/gen_stats.h
@@ -10,6 +10,7 @@
 	TCA_STATS_QUEUE,
 	TCA_STATS_APP,
 	TCA_STATS_RATE_EST64,
+	TCA_STATS_PAD,
 	__TCA_STATS_MAX,
 };
 #define TCA_STATS_MAX (__TCA_STATS_MAX - 1)
diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h
new file mode 100644
index 0000000..ca1054d
--- /dev/null
+++ b/include/uapi/linux/gtp.h
@@ -0,0 +1,33 @@
+#ifndef _UAPI_LINUX_GTP_H_
+#define _UAPI_LINUX_GTP_H__
+
+enum gtp_genl_cmds {
+	GTP_CMD_NEWPDP,
+	GTP_CMD_DELPDP,
+	GTP_CMD_GETPDP,
+
+	GTP_CMD_MAX,
+};
+
+enum gtp_version {
+	GTP_V0 = 0,
+	GTP_V1,
+};
+
+enum gtp_attrs {
+	GTPA_UNSPEC = 0,
+	GTPA_LINK,
+	GTPA_VERSION,
+	GTPA_TID,	/* for GTPv0 only */
+	GTPA_SGSN_ADDRESS,
+	GTPA_MS_ADDRESS,
+	GTPA_FLOW,
+	GTPA_NET_NS_FD,
+	GTPA_I_TEI,	/* for GTPv1 only */
+	GTPA_O_TEI,	/* for GTPv1 only */
+	GTPA_PAD,
+	__GTPA_MAX,
+};
+#define GTPA_MAX (__GTPA_MAX + 1)
+
+#endif /* _UAPI_LINUX_GTP_H_ */
diff --git a/include/uapi/linux/i2c.h b/include/uapi/linux/i2c.h
index b0a7dd6..adcbef4 100644
--- a/include/uapi/linux/i2c.h
+++ b/include/uapi/linux/i2c.h
@@ -68,14 +68,15 @@
 struct i2c_msg {
 	__u16 addr;	/* slave address			*/
 	__u16 flags;
-#define I2C_M_TEN		0x0010	/* this is a ten bit chip address */
 #define I2C_M_RD		0x0001	/* read data, from slave to master */
-#define I2C_M_STOP		0x8000	/* if I2C_FUNC_PROTOCOL_MANGLING */
-#define I2C_M_NOSTART		0x4000	/* if I2C_FUNC_NOSTART */
-#define I2C_M_REV_DIR_ADDR	0x2000	/* if I2C_FUNC_PROTOCOL_MANGLING */
-#define I2C_M_IGNORE_NAK	0x1000	/* if I2C_FUNC_PROTOCOL_MANGLING */
-#define I2C_M_NO_RD_ACK		0x0800	/* if I2C_FUNC_PROTOCOL_MANGLING */
+					/* I2C_M_RD is guaranteed to be 0x0001! */
+#define I2C_M_TEN		0x0010	/* this is a ten bit chip address */
 #define I2C_M_RECV_LEN		0x0400	/* length will be first received byte */
+#define I2C_M_NO_RD_ACK		0x0800	/* if I2C_FUNC_PROTOCOL_MANGLING */
+#define I2C_M_IGNORE_NAK	0x1000	/* if I2C_FUNC_PROTOCOL_MANGLING */
+#define I2C_M_REV_DIR_ADDR	0x2000	/* if I2C_FUNC_PROTOCOL_MANGLING */
+#define I2C_M_NOSTART		0x4000	/* if I2C_FUNC_NOSTART */
+#define I2C_M_STOP		0x8000	/* if I2C_FUNC_PROTOCOL_MANGLING */
 	__u16 len;		/* msg length				*/
 	__u8 *buf;		/* pointer to msg data			*/
 };
diff --git a/include/uapi/linux/if.h b/include/uapi/linux/if.h
index f802775..e601c8c 100644
--- a/include/uapi/linux/if.h
+++ b/include/uapi/linux/if.h
@@ -19,14 +19,20 @@
 #ifndef _LINUX_IF_H
 #define _LINUX_IF_H
 
+#include <linux/libc-compat.h>          /* for compatibility with glibc */
 #include <linux/types.h>		/* for "__kernel_caddr_t" et al	*/
 #include <linux/socket.h>		/* for "struct sockaddr" et al	*/
 #include <linux/compiler.h>		/* for "__user" et al           */
 
+#if __UAPI_DEF_IF_IFNAMSIZ
 #define	IFNAMSIZ	16
+#endif /* __UAPI_DEF_IF_IFNAMSIZ */
 #define	IFALIASZ	256
 #include <linux/hdlc/ioctl.h>
 
+/* For glibc compatibility. An empty enum does not compile. */
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && \
+    __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0
 /**
  * enum net_device_flags - &struct net_device flags
  *
@@ -68,6 +74,8 @@
  * @IFF_ECHO: echo sent packets. Volatile.
  */
 enum net_device_flags {
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
 	IFF_UP				= 1<<0,  /* sysfs */
 	IFF_BROADCAST			= 1<<1,  /* volatile */
 	IFF_DEBUG			= 1<<2,  /* sysfs */
@@ -84,11 +92,17 @@
 	IFF_PORTSEL			= 1<<13, /* sysfs */
 	IFF_AUTOMEDIA			= 1<<14, /* sysfs */
 	IFF_DYNAMIC			= 1<<15, /* sysfs */
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
 	IFF_LOWER_UP			= 1<<16, /* volatile */
 	IFF_DORMANT			= 1<<17, /* volatile */
 	IFF_ECHO			= 1<<18, /* volatile */
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
 };
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0 */
 
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
 #define IFF_UP				IFF_UP
 #define IFF_BROADCAST			IFF_BROADCAST
 #define IFF_DEBUG			IFF_DEBUG
@@ -105,9 +119,13 @@
 #define IFF_PORTSEL			IFF_PORTSEL
 #define IFF_AUTOMEDIA			IFF_AUTOMEDIA
 #define IFF_DYNAMIC			IFF_DYNAMIC
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
+
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
 #define IFF_LOWER_UP			IFF_LOWER_UP
 #define IFF_DORMANT			IFF_DORMANT
 #define IFF_ECHO			IFF_ECHO
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
 
 #define IFF_VOLATILE	(IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ECHO|\
 		IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT)
@@ -166,6 +184,8 @@
  *	being very small might be worth keeping for clean configuration.
  */
 
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_IFMAP
 struct ifmap {
 	unsigned long mem_start;
 	unsigned long mem_end;
@@ -175,6 +195,7 @@
 	unsigned char port;
 	/* 3 bytes spare */
 };
+#endif /* __UAPI_DEF_IF_IFMAP */
 
 struct if_settings {
 	unsigned int type;	/* Type of physical device or protocol */
@@ -200,6 +221,8 @@
  * remainder may be interface specific.
  */
 
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_IFREQ
 struct ifreq {
 #define IFHWADDRLEN	6
 	union
@@ -223,6 +246,7 @@
 		struct	if_settings ifru_settings;
 	} ifr_ifru;
 };
+#endif /* __UAPI_DEF_IF_IFREQ */
 
 #define ifr_name	ifr_ifrn.ifrn_name	/* interface name 	*/
 #define ifr_hwaddr	ifr_ifru.ifru_hwaddr	/* MAC address 		*/
@@ -249,6 +273,8 @@
  * must know all networks accessible).
  */
 
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_IFCONF
 struct ifconf  {
 	int	ifc_len;			/* size of buffer	*/
 	union {
@@ -256,6 +282,8 @@
 		struct ifreq __user *ifcu_req;
 	} ifc_ifcu;
 };
+#endif /* __UAPI_DEF_IF_IFCONF */
+
 #define	ifc_buf	ifc_ifcu.ifcu_buf		/* buffer address	*/
 #define	ifc_req	ifc_ifcu.ifcu_req		/* array of structures	*/
 
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index 0536eef..397d503 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -134,6 +134,16 @@
 	__u16 vid;
 };
 
+struct bridge_vlan_xstats {
+	__u64 rx_bytes;
+	__u64 rx_packets;
+	__u64 tx_bytes;
+	__u64 tx_packets;
+	__u16 vid;
+	__u16 pad1;
+	__u32 pad2;
+};
+
 /* Bridge multicast database attributes
  * [MDBA_MDB] = {
  *     [MDBA_MDB_ENTRY] = {
@@ -233,4 +243,12 @@
 };
 #define MDBA_SET_ENTRY_MAX (__MDBA_SET_ENTRY_MAX - 1)
 
+/* Embedded inside LINK_XSTATS_TYPE_BRIDGE */
+enum {
+	BRIDGE_XSTATS_UNSPEC,
+	BRIDGE_XSTATS_VLAN,
+	__BRIDGE_XSTATS_MAX
+};
+#define BRIDGE_XSTATS_MAX (__BRIDGE_XSTATS_MAX - 1)
+
 #endif /* _UAPI_LINUX_IF_BRIDGE_H */
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 4a93051..cec849a 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -92,6 +92,7 @@
 #define ETH_P_TDLS	0x890D          /* TDLS */
 #define ETH_P_FIP	0x8914		/* FCoE Initialization Protocol */
 #define ETH_P_80221	0x8917		/* IEEE 802.21 Media Independent Handover Protocol */
+#define ETH_P_HSR	0x892F		/* IEC 62439-3 HSRv1	*/
 #define ETH_P_LOOPBACK	0x9000		/* Ethernet loopback packet, per IEEE 802.3 */
 #define ETH_P_QINQ1	0x9100		/* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
 #define ETH_P_QINQ2	0x9200		/* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index c488066..bb36bd5 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -155,6 +155,7 @@
 	IFLA_PROTO_DOWN,
 	IFLA_GSO_MAX_SEGS,
 	IFLA_GSO_MAX_SIZE,
+	IFLA_PAD,
 	__IFLA_MAX
 };
 
@@ -270,6 +271,8 @@
 	IFLA_BR_NF_CALL_IP6TABLES,
 	IFLA_BR_NF_CALL_ARPTABLES,
 	IFLA_BR_VLAN_DEFAULT_PVID,
+	IFLA_BR_PAD,
+	IFLA_BR_VLAN_STATS_ENABLED,
 	__IFLA_BR_MAX,
 };
 
@@ -312,6 +315,7 @@
 	IFLA_BRPORT_HOLD_TIMER,
 	IFLA_BRPORT_FLUSH,
 	IFLA_BRPORT_MULTICAST_ROUTER,
+	IFLA_BRPORT_PAD,
 	__IFLA_BRPORT_MAX
 };
 #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -431,6 +435,7 @@
 	IFLA_MACSEC_SCB,
 	IFLA_MACSEC_REPLAY_PROTECT,
 	IFLA_MACSEC_VALIDATION,
+	IFLA_MACSEC_PAD,
 	__IFLA_MACSEC_MAX,
 };
 
@@ -488,6 +493,7 @@
 	IFLA_VXLAN_REMCSUM_NOPARTIAL,
 	IFLA_VXLAN_COLLECT_METADATA,
 	IFLA_VXLAN_LABEL,
+	IFLA_VXLAN_GPE,
 	__IFLA_VXLAN_MAX
 };
 #define IFLA_VXLAN_MAX	(__IFLA_VXLAN_MAX - 1)
@@ -515,6 +521,24 @@
 };
 #define IFLA_GENEVE_MAX	(__IFLA_GENEVE_MAX - 1)
 
+/* PPP section */
+enum {
+	IFLA_PPP_UNSPEC,
+	IFLA_PPP_DEV_FD,
+	__IFLA_PPP_MAX
+};
+#define IFLA_PPP_MAX (__IFLA_PPP_MAX - 1)
+
+/* GTP section */
+enum {
+	IFLA_GTP_UNSPEC,
+	IFLA_GTP_FD0,
+	IFLA_GTP_FD1,
+	IFLA_GTP_PDP_HASHSIZE,
+	__IFLA_GTP_MAX,
+};
+#define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1)
+
 /* Bonding section */
 
 enum {
@@ -664,6 +688,7 @@
 	IFLA_VF_STATS_TX_BYTES,
 	IFLA_VF_STATS_BROADCAST,
 	IFLA_VF_STATS_MULTICAST,
+	IFLA_VF_STATS_PAD,
 	__IFLA_VF_STATS_MAX,
 };
 
@@ -774,9 +799,46 @@
 	IFLA_HSR_MULTICAST_SPEC,	/* Last byte of supervision addr */
 	IFLA_HSR_SUPERVISION_ADDR,	/* Supervision frame multicast addr */
 	IFLA_HSR_SEQ_NR,
+	IFLA_HSR_VERSION,		/* HSR version */
 	__IFLA_HSR_MAX,
 };
 
 #define IFLA_HSR_MAX (__IFLA_HSR_MAX - 1)
 
+/* STATS section */
+
+struct if_stats_msg {
+	__u8  family;
+	__u8  pad1;
+	__u16 pad2;
+	__u32 ifindex;
+	__u32 filter_mask;
+};
+
+/* A stats attribute can be netdev specific or a global stat.
+ * For netdev stats, lets use the prefix IFLA_STATS_LINK_*
+ */
+enum {
+	IFLA_STATS_UNSPEC, /* also used as 64bit pad attribute */
+	IFLA_STATS_LINK_64,
+	IFLA_STATS_LINK_XSTATS,
+	__IFLA_STATS_MAX,
+};
+
+#define IFLA_STATS_MAX (__IFLA_STATS_MAX - 1)
+
+#define IFLA_STATS_FILTER_BIT(ATTR)	(1 << (ATTR - 1))
+
+/* These are embedded into IFLA_STATS_LINK_XSTATS:
+ * [IFLA_STATS_LINK_XSTATS]
+ * -> [LINK_XSTATS_TYPE_xxx]
+ *    -> [rtnl link type specific attributes]
+ */
+enum {
+	LINK_XSTATS_TYPE_UNSPEC,
+	LINK_XSTATS_TYPE_BRIDGE,
+	__LINK_XSTATS_TYPE_MAX
+};
+#define LINK_XSTATS_TYPE_MAX (__LINK_XSTATS_TYPE_MAX - 1)
+
 #endif /* _UAPI_LINUX_IF_LINK_H */
diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h
index 26b0d1e..f7d4831 100644
--- a/include/uapi/linux/if_macsec.h
+++ b/include/uapi/linux/if_macsec.h
@@ -19,8 +19,10 @@
 
 #define MACSEC_MAX_KEY_LEN 128
 
-#define DEFAULT_CIPHER_ID   0x0080020001000001ULL
-#define DEFAULT_CIPHER_ALT  0x0080C20001000001ULL
+#define MACSEC_KEYID_LEN 16
+
+#define MACSEC_DEFAULT_CIPHER_ID   0x0080020001000001ULL
+#define MACSEC_DEFAULT_CIPHER_ALT  0x0080C20001000001ULL
 
 #define MACSEC_MIN_ICV_LEN 8
 #define MACSEC_MAX_ICV_LEN 32
@@ -55,6 +57,7 @@
 	MACSEC_SECY_ATTR_INC_SCI,
 	MACSEC_SECY_ATTR_ES,
 	MACSEC_SECY_ATTR_SCB,
+	MACSEC_SECY_ATTR_PAD,
 	__MACSEC_SECY_ATTR_END,
 	NUM_MACSEC_SECY_ATTR = __MACSEC_SECY_ATTR_END,
 	MACSEC_SECY_ATTR_MAX = __MACSEC_SECY_ATTR_END - 1,
@@ -66,6 +69,7 @@
 	MACSEC_RXSC_ATTR_ACTIVE,  /* config/dump, u8 0..1 */
 	MACSEC_RXSC_ATTR_SA_LIST, /* dump, nested */
 	MACSEC_RXSC_ATTR_STATS,   /* dump, nested, macsec_rxsc_stats_attr */
+	MACSEC_RXSC_ATTR_PAD,
 	__MACSEC_RXSC_ATTR_END,
 	NUM_MACSEC_RXSC_ATTR = __MACSEC_RXSC_ATTR_END,
 	MACSEC_RXSC_ATTR_MAX = __MACSEC_RXSC_ATTR_END - 1,
@@ -77,8 +81,9 @@
 	MACSEC_SA_ATTR_ACTIVE, /* config/dump, u8 0..1 */
 	MACSEC_SA_ATTR_PN,     /* config/dump, u32 */
 	MACSEC_SA_ATTR_KEY,    /* config, data */
-	MACSEC_SA_ATTR_KEYID,  /* config/dump, u64 */
+	MACSEC_SA_ATTR_KEYID,  /* config/dump, 128-bit */
 	MACSEC_SA_ATTR_STATS,  /* dump, nested, macsec_sa_stats_attr */
+	MACSEC_SA_ATTR_PAD,
 	__MACSEC_SA_ATTR_END,
 	NUM_MACSEC_SA_ATTR = __MACSEC_SA_ATTR_END,
 	MACSEC_SA_ATTR_MAX = __MACSEC_SA_ATTR_END - 1,
@@ -110,6 +115,7 @@
 	MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
 	MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
 	MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
+	MACSEC_RXSC_STATS_ATTR_PAD,
 	__MACSEC_RXSC_STATS_ATTR_END,
 	NUM_MACSEC_RXSC_STATS_ATTR = __MACSEC_RXSC_STATS_ATTR_END,
 	MACSEC_RXSC_STATS_ATTR_MAX = __MACSEC_RXSC_STATS_ATTR_END - 1,
@@ -137,6 +143,7 @@
 	MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
 	MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
 	MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
+	MACSEC_TXSC_STATS_ATTR_PAD,
 	__MACSEC_TXSC_STATS_ATTR_END,
 	NUM_MACSEC_TXSC_STATS_ATTR = __MACSEC_TXSC_STATS_ATTR_END,
 	MACSEC_TXSC_STATS_ATTR_MAX = __MACSEC_TXSC_STATS_ATTR_END - 1,
@@ -153,6 +160,7 @@
 	MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
 	MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
 	MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
+	MACSEC_SECY_STATS_ATTR_PAD,
 	__MACSEC_SECY_STATS_ATTR_END,
 	NUM_MACSEC_SECY_STATS_ATTR = __MACSEC_SECY_STATS_ATTR_END,
 	MACSEC_SECY_STATS_ATTR_MAX = __MACSEC_SECY_STATS_ATTR_END - 1,
diff --git a/include/uapi/linux/ila.h b/include/uapi/linux/ila.h
index abde7bb..948c0a9 100644
--- a/include/uapi/linux/ila.h
+++ b/include/uapi/linux/ila.h
@@ -14,6 +14,8 @@
 	ILA_ATTR_LOCATOR_MATCH,			/* u64 */
 	ILA_ATTR_IFINDEX,			/* s32 */
 	ILA_ATTR_DIR,				/* u32 */
+	ILA_ATTR_PAD,
+	ILA_ATTR_CSUM_MODE,			/* u8 */
 
 	__ILA_ATTR_MAX,
 };
@@ -34,4 +36,10 @@
 #define ILA_DIR_IN	(1 << 0)
 #define ILA_DIR_OUT	(1 << 1)
 
+enum {
+	ILA_CSUM_ADJUST_TRANSPORT,
+	ILA_CSUM_NEUTRAL_MAP,
+	ILA_CSUM_NO_ACTION,
+};
+
 #endif /* _UAPI_LINUX_ILA_H */
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index 68a1f71..a166437 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -113,9 +113,13 @@
 	INET_DIAG_DCTCPINFO,
 	INET_DIAG_PROTOCOL,  /* response attribute only */
 	INET_DIAG_SKV6ONLY,
+	INET_DIAG_LOCALS,
+	INET_DIAG_PEERS,
+	INET_DIAG_PAD,
+	__INET_DIAG_MAX,
 };
 
-#define INET_DIAG_MAX INET_DIAG_SKV6ONLY
+#define INET_DIAG_MAX (__INET_DIAG_MAX - 1)
 
 /* INET_DIAG_MEM */
 
diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h
index 391395c..22d6989 100644
--- a/include/uapi/linux/ip_vs.h
+++ b/include/uapi/linux/ip_vs.h
@@ -435,6 +435,7 @@
 	IPVS_STATS_ATTR_OUTPPS,		/* current out packet rate */
 	IPVS_STATS_ATTR_INBPS,		/* current in byte rate */
 	IPVS_STATS_ATTR_OUTBPS,		/* current out byte rate */
+	IPVS_STATS_ATTR_PAD,
 	__IPVS_STATS_ATTR_MAX,
 };
 
diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
index 840cb99..86eddd6 100644
--- a/include/uapi/linux/keyctl.h
+++ b/include/uapi/linux/keyctl.h
@@ -12,6 +12,8 @@
 #ifndef _LINUX_KEYCTL_H
 #define _LINUX_KEYCTL_H
 
+#include <linux/types.h>
+
 /* special process keyring shortcut IDs */
 #define KEY_SPEC_THREAD_KEYRING		-1	/* - key ID for thread-specific keyring */
 #define KEY_SPEC_PROCESS_KEYRING	-2	/* - key ID for process-specific keyring */
@@ -57,5 +59,13 @@
 #define KEYCTL_INSTANTIATE_IOV		20	/* instantiate a partially constructed key */
 #define KEYCTL_INVALIDATE		21	/* invalidate a key */
 #define KEYCTL_GET_PERSISTENT		22	/* get a user's persistent keyring */
+#define KEYCTL_DH_COMPUTE		23	/* Compute Diffie-Hellman values */
+
+/* keyctl structures */
+struct keyctl_dh_params {
+	__s32 private;
+	__s32 prime;
+	__s32 base;
+};
 
 #endif /*  _LINUX_KEYCTL_H */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index a7f1f80..05ebf47 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -865,6 +865,7 @@
 #define KVM_CAP_SPAPR_TCE_64 125
 #define KVM_CAP_ARM_PMU_V3 126
 #define KVM_CAP_VCPU_ATTRIBUTES 127
+#define KVM_CAP_MAX_VCPU_ID 128
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h
index 347ef22..4bd27d0 100644
--- a/include/uapi/linux/l2tp.h
+++ b/include/uapi/linux/l2tp.h
@@ -126,6 +126,7 @@
 	L2TP_ATTR_IP6_DADDR,		/* struct in6_addr */
 	L2TP_ATTR_UDP_ZERO_CSUM6_TX,	/* u8 */
 	L2TP_ATTR_UDP_ZERO_CSUM6_RX,	/* u8 */
+	L2TP_ATTR_PAD,
 	__L2TP_ATTR_MAX,
 };
 
@@ -142,6 +143,7 @@
 	L2TP_ATTR_RX_SEQ_DISCARDS,	/* u64 */
 	L2TP_ATTR_RX_OOS_PACKETS,	/* u64 */
 	L2TP_ATTR_RX_ERRORS,		/* u64 */
+	L2TP_ATTR_STATS_PAD,
 	__L2TP_ATTR_STATS_MAX,
 };
 
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
index 7d024ce..d5e38c7 100644
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -51,6 +51,40 @@
 /* We have included glibc headers... */
 #if defined(__GLIBC__)
 
+/* Coordinate with glibc net/if.h header. */
+#if defined(_NET_IF_H)
+
+/* GLIBC headers included first so don't define anything
+ * that would already be defined. */
+
+#define __UAPI_DEF_IF_IFCONF 0
+#define __UAPI_DEF_IF_IFMAP 0
+#define __UAPI_DEF_IF_IFNAMSIZ 0
+#define __UAPI_DEF_IF_IFREQ 0
+/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 0
+/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
+#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
+
+#else /* _NET_IF_H */
+
+/* Linux headers included first, and we must define everything
+ * we need. The expectation is that glibc will check the
+ * __UAPI_DEF_* defines and adjust appropriately. */
+
+#define __UAPI_DEF_IF_IFCONF 1
+#define __UAPI_DEF_IF_IFMAP 1
+#define __UAPI_DEF_IF_IFNAMSIZ 1
+#define __UAPI_DEF_IF_IFREQ 1
+/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
+/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
+
+#endif /* _NET_IF_H */
+
 /* Coordinate with glibc netinet/in.h header. */
 #if defined(_NETINET_IN_H)
 
@@ -117,6 +151,16 @@
  * that we need. */
 #else /* !defined(__GLIBC__) */
 
+/* Definitions for if.h */
+#define __UAPI_DEF_IF_IFCONF 1
+#define __UAPI_DEF_IF_IFMAP 1
+#define __UAPI_DEF_IF_IFNAMSIZ 1
+#define __UAPI_DEF_IF_IFREQ 1
+/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
+/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
+
 /* Definitions for in.h */
 #define __UAPI_DEF_IN_ADDR		1
 #define __UAPI_DEF_IN_IPPROTO		1
diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h
index f8b0188..a478fe8 100644
--- a/include/uapi/linux/lwtunnel.h
+++ b/include/uapi/linux/lwtunnel.h
@@ -22,6 +22,7 @@
 	LWTUNNEL_IP_TTL,
 	LWTUNNEL_IP_TOS,
 	LWTUNNEL_IP_FLAGS,
+	LWTUNNEL_IP_PAD,
 	__LWTUNNEL_IP_MAX,
 };
 
@@ -35,6 +36,7 @@
 	LWTUNNEL_IP6_HOPLIMIT,
 	LWTUNNEL_IP6_TC,
 	LWTUNNEL_IP6_FLAGS,
+	LWTUNNEL_IP6_PAD,
 	__LWTUNNEL_IP6_MAX,
 };
 
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index 0de181a..546b388 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -78,5 +78,7 @@
 #define BTRFS_TEST_MAGIC	0x73727279
 #define NSFS_MAGIC		0x6e736673
 #define BPF_FS_MAGIC		0xcafe4a11
+/* Since UDF 2.01 is ISO 13346 based... */
+#define UDF_SUPER_MAGIC		0x15013346
 
 #endif /* __LINUX_MAGIC_H__ */
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index 788655b..bd99a8d 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -128,6 +128,7 @@
 	NDTPA_LOCKTIME,			/* u64, msecs */
 	NDTPA_QUEUE_LENBYTES,		/* u32 */
 	NDTPA_MCAST_REPROBES,		/* u32 */
+	NDTPA_PAD,
 	__NDTPA_MAX
 };
 #define NDTPA_MAX (__NDTPA_MAX - 1)
@@ -160,6 +161,7 @@
 	NDTA_PARMS,			/* nested TLV NDTPA_* */
 	NDTA_STATS,			/* struct ndt_stats, read-only */
 	NDTA_GC_INTERVAL,		/* u64, msecs */
+	NDTA_PAD,
 	__NDTA_MAX
 };
 #define NDTA_MAX (__NDTA_MAX - 1)
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index 6d1abea..264e515 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -31,6 +31,16 @@
 				 SOF_TIMESTAMPING_LAST
 };
 
+/*
+ * SO_TIMESTAMPING flags are either for recording a packet timestamp or for
+ * reporting the timestamp to user space.
+ * Recording flags can be set both via socket options and control messages.
+ */
+#define SOF_TIMESTAMPING_TX_RECORD_MASK	(SOF_TIMESTAMPING_TX_HARDWARE | \
+					 SOF_TIMESTAMPING_TX_SOFTWARE | \
+					 SOF_TIMESTAMPING_TX_SCHED | \
+					 SOF_TIMESTAMPING_TX_ACK)
+
 /**
  * struct hwtstamp_config - %SIOCGHWTSTAMP and %SIOCSHWTSTAMP parameter
  *
diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h
index 63b2e34..ebb5154 100644
--- a/include/uapi/linux/netfilter/ipset/ip_set.h
+++ b/include/uapi/linux/netfilter/ipset/ip_set.h
@@ -118,6 +118,7 @@
 	IPSET_ATTR_SKBMARK,
 	IPSET_ATTR_SKBPRIO,
 	IPSET_ATTR_SKBQUEUE,
+	IPSET_ATTR_PAD,
 	__IPSET_ATTR_ADT_MAX,
 };
 #define IPSET_ATTR_ADT_MAX	(__IPSET_ATTR_ADT_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index eeffde1..6a4dbe0 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -3,6 +3,7 @@
 
 #define NFT_TABLE_MAXNAMELEN	32
 #define NFT_CHAIN_MAXNAMELEN	32
+#define NFT_SET_MAXNAMELEN	32
 #define NFT_USERDATA_MAXLEN	256
 
 /**
@@ -182,6 +183,7 @@
 	NFTA_CHAIN_USE,
 	NFTA_CHAIN_TYPE,
 	NFTA_CHAIN_COUNTERS,
+	NFTA_CHAIN_PAD,
 	__NFTA_CHAIN_MAX
 };
 #define NFTA_CHAIN_MAX		(__NFTA_CHAIN_MAX - 1)
@@ -206,6 +208,7 @@
 	NFTA_RULE_COMPAT,
 	NFTA_RULE_POSITION,
 	NFTA_RULE_USERDATA,
+	NFTA_RULE_PAD,
 	__NFTA_RULE_MAX
 };
 #define NFTA_RULE_MAX		(__NFTA_RULE_MAX - 1)
@@ -308,6 +311,7 @@
 	NFTA_SET_TIMEOUT,
 	NFTA_SET_GC_INTERVAL,
 	NFTA_SET_USERDATA,
+	NFTA_SET_PAD,
 	__NFTA_SET_MAX
 };
 #define NFTA_SET_MAX		(__NFTA_SET_MAX - 1)
@@ -341,6 +345,7 @@
 	NFTA_SET_ELEM_EXPIRATION,
 	NFTA_SET_ELEM_USERDATA,
 	NFTA_SET_ELEM_EXPR,
+	NFTA_SET_ELEM_PAD,
 	__NFTA_SET_ELEM_MAX
 };
 #define NFTA_SET_ELEM_MAX	(__NFTA_SET_ELEM_MAX - 1)
@@ -584,6 +589,7 @@
 	NFTA_DYNSET_SREG_DATA,
 	NFTA_DYNSET_TIMEOUT,
 	NFTA_DYNSET_EXPR,
+	NFTA_DYNSET_PAD,
 	__NFTA_DYNSET_MAX,
 };
 #define NFTA_DYNSET_MAX		(__NFTA_DYNSET_MAX - 1)
@@ -806,6 +812,7 @@
 	NFTA_LIMIT_BURST,
 	NFTA_LIMIT_TYPE,
 	NFTA_LIMIT_FLAGS,
+	NFTA_LIMIT_PAD,
 	__NFTA_LIMIT_MAX
 };
 #define NFTA_LIMIT_MAX		(__NFTA_LIMIT_MAX - 1)
@@ -820,6 +827,7 @@
 	NFTA_COUNTER_UNSPEC,
 	NFTA_COUNTER_BYTES,
 	NFTA_COUNTER_PACKETS,
+	NFTA_COUNTER_PAD,
 	__NFTA_COUNTER_MAX
 };
 #define NFTA_COUNTER_MAX	(__NFTA_COUNTER_MAX - 1)
@@ -1055,6 +1063,7 @@
 	NFTA_TRACE_MARK,
 	NFTA_TRACE_NFPROTO,
 	NFTA_TRACE_POLICY,
+	NFTA_TRACE_PAD,
 	__NFTA_TRACE_MAX
 };
 #define NFTA_TRACE_MAX (__NFTA_TRACE_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nfnetlink_acct.h b/include/uapi/linux/netfilter/nfnetlink_acct.h
index f3e34db..36047ec 100644
--- a/include/uapi/linux/netfilter/nfnetlink_acct.h
+++ b/include/uapi/linux/netfilter/nfnetlink_acct.h
@@ -29,6 +29,7 @@
 	NFACCT_FLAGS,
 	NFACCT_QUOTA,
 	NFACCT_FILTER,
+	NFACCT_PAD,
 	__NFACCT_MAX
 };
 #define NFACCT_MAX (__NFACCT_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
index c1a4e144..9df78970 100644
--- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
@@ -116,6 +116,7 @@
 	CTA_PROTOINFO_DCCP_STATE,
 	CTA_PROTOINFO_DCCP_ROLE,
 	CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ,
+	CTA_PROTOINFO_DCCP_PAD,
 	__CTA_PROTOINFO_DCCP_MAX,
 };
 #define CTA_PROTOINFO_DCCP_MAX (__CTA_PROTOINFO_DCCP_MAX - 1)
@@ -135,6 +136,7 @@
 	CTA_COUNTERS_BYTES,		/* 64bit counters */
 	CTA_COUNTERS32_PACKETS,		/* old 32bit counters, unused */
 	CTA_COUNTERS32_BYTES,		/* old 32bit counters, unused */
+	CTA_COUNTERS_PAD,
 	__CTA_COUNTERS_MAX
 };
 #define CTA_COUNTERS_MAX (__CTA_COUNTERS_MAX - 1)
@@ -143,6 +145,7 @@
 	CTA_TIMESTAMP_UNSPEC,
 	CTA_TIMESTAMP_START,
 	CTA_TIMESTAMP_STOP,
+	CTA_TIMESTAMP_PAD,
 	__CTA_TIMESTAMP_MAX
 };
 #define CTA_TIMESTAMP_MAX (__CTA_TIMESTAMP_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nfnetlink_queue.h b/include/uapi/linux/netfilter/nfnetlink_queue.h
index b67a853..ae30841 100644
--- a/include/uapi/linux/netfilter/nfnetlink_queue.h
+++ b/include/uapi/linux/netfilter/nfnetlink_queue.h
@@ -30,6 +30,14 @@
 	__aligned_be64	usec;
 };
 
+enum nfqnl_vlan_attr {
+	NFQA_VLAN_UNSPEC,
+	NFQA_VLAN_PROTO,		/* __be16 skb vlan_proto */
+	NFQA_VLAN_TCI,			/* __be16 skb htons(vlan_tci) */
+	__NFQA_VLAN_MAX,
+};
+#define NFQA_VLAN_MAX (__NFQA_VLAN_MAX + 1)
+
 enum nfqnl_attr_type {
 	NFQA_UNSPEC,
 	NFQA_PACKET_HDR,
@@ -50,6 +58,8 @@
 	NFQA_UID,			/* __u32 sk uid */
 	NFQA_GID,			/* __u32 sk gid */
 	NFQA_SECCTX,			/* security context string */
+	NFQA_VLAN,			/* nested attribute: packet vlan info */
+	NFQA_L2HDR,			/* full L2 header */
 
 	__NFQA_MAX
 };
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 5a30a75..e23d786 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -322,7 +322,9 @@
  * @NL80211_CMD_GET_SCAN: get scan results
  * @NL80211_CMD_TRIGGER_SCAN: trigger a new scan with the given parameters
  *	%NL80211_ATTR_TX_NO_CCK_RATE is used to decide whether to send the
- *	probe requests at CCK rate or not.
+ *	probe requests at CCK rate or not. %NL80211_ATTR_MAC can be used to
+ *	specify a BSSID to scan for; if not included, the wildcard BSSID will
+ *	be used.
  * @NL80211_CMD_NEW_SCAN_RESULTS: scan notification (as a reply to
  *	NL80211_CMD_GET_SCAN and on the "scan" multicast group)
  * @NL80211_CMD_SCAN_ABORTED: scan was aborted, for unspecified reasons,
@@ -427,7 +429,11 @@
  * @NL80211_CMD_ASSOCIATE: association request and notification; like
  *	NL80211_CMD_AUTHENTICATE but for Association and Reassociation
  *	(similar to MLME-ASSOCIATE.request, MLME-REASSOCIATE.request,
- *	MLME-ASSOCIATE.confirm or MLME-REASSOCIATE.confirm primitives).
+ *	MLME-ASSOCIATE.confirm or MLME-REASSOCIATE.confirm primitives). The
+ *	%NL80211_ATTR_PREV_BSSID attribute is used to specify whether the
+ *	request is for the initial association to an ESS (that attribute not
+ *	included) or for reassociation within the ESS (that attribute is
+ *	included).
  * @NL80211_CMD_DEAUTHENTICATE: deauthentication request and notification; like
  *	NL80211_CMD_AUTHENTICATE but for Deauthentication frames (similar to
  *	MLME-DEAUTHENTICATION.request and MLME-DEAUTHENTICATE.indication
@@ -477,6 +483,9 @@
  *	set of BSSID,frequency parameters is used (i.e., either the enforcing
  *	%NL80211_ATTR_MAC,%NL80211_ATTR_WIPHY_FREQ or the less strict
  *	%NL80211_ATTR_MAC_HINT and %NL80211_ATTR_WIPHY_FREQ_HINT).
+ *	%NL80211_ATTR_PREV_BSSID can be used to request a reassociation within
+ *	the ESS in case the device is already associated and an association with
+ *	a different BSS is desired.
  *	Background scan period can optionally be
  *	specified in %NL80211_ATTR_BG_SCAN_PERIOD,
  *	if not specified default background scan configuration
@@ -1285,8 +1294,11 @@
  * @NL80211_ATTR_RESP_IE: (Re)association response information elements as
  *	sent by peer, for ROAM and successful CONNECT events.
  *
- * @NL80211_ATTR_PREV_BSSID: previous BSSID, to be used by in ASSOCIATE
- *	commands to specify using a reassociate frame
+ * @NL80211_ATTR_PREV_BSSID: previous BSSID, to be used in ASSOCIATE and CONNECT
+ *	commands to specify a request to reassociate within an ESS, i.e., to use
+ *	Reassociate Request frame (with the value of this attribute in the
+ *	Current AP address field) instead of Association Request frame which is
+ *	used for the initial association to an ESS.
  *
  * @NL80211_ATTR_KEY: key information in a nested attribute with
  *	%NL80211_KEY_* sub-attributes
@@ -1795,6 +1807,17 @@
  *	in a PBSS. Specified in %NL80211_CMD_CONNECT to request
  *	connecting to a PCP, and in %NL80211_CMD_START_AP to start
  *	a PCP instead of AP. Relevant for DMG networks only.
+ * @NL80211_ATTR_BSS_SELECT: nested attribute for driver supporting the
+ *	BSS selection feature. When used with %NL80211_CMD_GET_WIPHY it contains
+ *	attributes according &enum nl80211_bss_select_attr to indicate what
+ *	BSS selection behaviours are supported. When used with %NL80211_CMD_CONNECT
+ *	it contains the behaviour-specific attribute containing the parameters for
+ *	BSS selection to be done by driver and/or firmware.
+ *
+ * @NL80211_ATTR_STA_SUPPORT_P2P_PS: whether P2P PS mechanism supported
+ *	or not. u8, one of the values of &enum nl80211_sta_p2p_ps_status
+ *
+ * @NL80211_ATTR_PAD: attribute used for padding for 64-bit alignment
  *
  * @NUM_NL80211_ATTR: total number of nl80211_attrs available
  * @NL80211_ATTR_MAX: highest attribute number currently defined
@@ -2172,6 +2195,12 @@
 
 	NL80211_ATTR_PBSS,
 
+	NL80211_ATTR_BSS_SELECT,
+
+	NL80211_ATTR_STA_SUPPORT_P2P_PS,
+
+	NL80211_ATTR_PAD,
+
 	/* add attributes here, update the policy in nl80211.c */
 
 	__NL80211_ATTR_AFTER_LAST,
@@ -2315,6 +2344,20 @@
 	NL80211_STA_FLAG_MAX = __NL80211_STA_FLAG_AFTER_LAST - 1
 };
 
+/**
+ * enum nl80211_sta_p2p_ps_status - station support of P2P PS
+ *
+ * @NL80211_P2P_PS_UNSUPPORTED: station doesn't support P2P PS mechanism
+ * @@NL80211_P2P_PS_SUPPORTED: station supports P2P PS mechanism
+ * @NUM_NL80211_P2P_PS_STATUS: number of values
+ */
+enum nl80211_sta_p2p_ps_status {
+	NL80211_P2P_PS_UNSUPPORTED = 0,
+	NL80211_P2P_PS_SUPPORTED,
+
+	NUM_NL80211_P2P_PS_STATUS,
+};
+
 #define NL80211_STA_FLAG_MAX_OLD_API	NL80211_STA_FLAG_TDLS_PEER
 
 /**
@@ -2472,6 +2515,9 @@
  *	TID+1 and the special TID 16 (i.e. value 17) is used for non-QoS frames;
  *	each one of those is again nested with &enum nl80211_tid_stats
  *	attributes carrying the actual values.
+ * @NL80211_STA_INFO_RX_DURATION: aggregate PPDU duration for all frames
+ *	received from the station (u64, usec)
+ * @NL80211_STA_INFO_PAD: attribute used for padding for 64-bit alignment
  * @__NL80211_STA_INFO_AFTER_LAST: internal
  * @NL80211_STA_INFO_MAX: highest possible station info attribute
  */
@@ -2508,6 +2554,8 @@
 	NL80211_STA_INFO_BEACON_RX,
 	NL80211_STA_INFO_BEACON_SIGNAL_AVG,
 	NL80211_STA_INFO_TID_STATS,
+	NL80211_STA_INFO_RX_DURATION,
+	NL80211_STA_INFO_PAD,
 
 	/* keep last */
 	__NL80211_STA_INFO_AFTER_LAST,
@@ -2524,6 +2572,7 @@
  *	transmitted MSDUs (not counting the first attempt; u64)
  * @NL80211_TID_STATS_TX_MSDU_FAILED: number of failed transmitted
  *	MSDUs (u64)
+ * @NL80211_TID_STATS_PAD: attribute used for padding for 64-bit alignment
  * @NUM_NL80211_TID_STATS: number of attributes here
  * @NL80211_TID_STATS_MAX: highest numbered attribute here
  */
@@ -2533,6 +2582,7 @@
 	NL80211_TID_STATS_TX_MSDU,
 	NL80211_TID_STATS_TX_MSDU_RETRIES,
 	NL80211_TID_STATS_TX_MSDU_FAILED,
+	NL80211_TID_STATS_PAD,
 
 	/* keep last */
 	NUM_NL80211_TID_STATS,
@@ -2969,6 +3019,7 @@
  *	transmitting data (on channel or globally)
  * @NL80211_SURVEY_INFO_TIME_SCAN: time the radio spent for scan
  *	(on this channel or globally)
+ * @NL80211_SURVEY_INFO_PAD: attribute used for padding for 64-bit alignment
  * @NL80211_SURVEY_INFO_MAX: highest survey info attribute number
  *	currently defined
  * @__NL80211_SURVEY_INFO_AFTER_LAST: internal use
@@ -2984,6 +3035,7 @@
 	NL80211_SURVEY_INFO_TIME_RX,
 	NL80211_SURVEY_INFO_TIME_TX,
 	NL80211_SURVEY_INFO_TIME_SCAN,
+	NL80211_SURVEY_INFO_PAD,
 
 	/* keep last */
 	__NL80211_SURVEY_INFO_AFTER_LAST,
@@ -3409,6 +3461,7 @@
  * @NL80211_BSS_LAST_SEEN_BOOTTIME: CLOCK_BOOTTIME timestamp when this entry
  *	was last updated by a received frame. The value is expected to be
  *	accurate to about 10ms. (u64, nanoseconds)
+ * @NL80211_BSS_PAD: attribute used for padding for 64-bit alignment
  * @__NL80211_BSS_AFTER_LAST: internal
  * @NL80211_BSS_MAX: highest BSS attribute
  */
@@ -3429,6 +3482,7 @@
 	NL80211_BSS_BEACON_TSF,
 	NL80211_BSS_PRESP_DATA,
 	NL80211_BSS_LAST_SEEN_BOOTTIME,
+	NL80211_BSS_PAD,
 
 	/* keep last */
 	__NL80211_BSS_AFTER_LAST,
@@ -3614,11 +3668,15 @@
  * @NL80211_BAND_2GHZ: 2.4 GHz ISM band
  * @NL80211_BAND_5GHZ: around 5 GHz band (4.9 - 5.7 GHz)
  * @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 64.80 GHz)
+ * @NUM_NL80211_BANDS: number of bands, avoid using this in userspace
+ *	since newer kernel versions may support more bands
  */
 enum nl80211_band {
 	NL80211_BAND_2GHZ,
 	NL80211_BAND_5GHZ,
 	NL80211_BAND_60GHZ,
+
+	NUM_NL80211_BANDS,
 };
 
 /**
@@ -4665,4 +4723,48 @@
 		__NL80211_SCHED_SCAN_PLAN_AFTER_LAST - 1
 };
 
+/**
+ * struct nl80211_bss_select_rssi_adjust - RSSI adjustment parameters.
+ *
+ * @band: band of BSS that must match for RSSI value adjustment.
+ * @delta: value used to adjust the RSSI value of matching BSS.
+ */
+struct nl80211_bss_select_rssi_adjust {
+	__u8 band;
+	__s8 delta;
+} __attribute__((packed));
+
+/**
+ * enum nl80211_bss_select_attr - attributes for bss selection.
+ *
+ * @__NL80211_BSS_SELECT_ATTR_INVALID: reserved.
+ * @NL80211_BSS_SELECT_ATTR_RSSI: Flag indicating only RSSI-based BSS selection
+ *	is requested.
+ * @NL80211_BSS_SELECT_ATTR_BAND_PREF: attribute indicating BSS
+ *	selection should be done such that the specified band is preferred.
+ *	When there are multiple BSS-es in the preferred band, the driver
+ *	shall use RSSI-based BSS selection as a second step. The value of
+ *	this attribute is according to &enum nl80211_band (u32).
+ * @NL80211_BSS_SELECT_ATTR_RSSI_ADJUST: When present the RSSI level for
+ *	BSS-es in the specified band is to be adjusted before doing
+ *	RSSI-based BSS selection. The attribute value is a packed structure
+ *	value as specified by &struct nl80211_bss_select_rssi_adjust.
+ * @NL80211_BSS_SELECT_ATTR_MAX: highest bss select attribute number.
+ * @__NL80211_BSS_SELECT_ATTR_AFTER_LAST: internal use.
+ *
+ * One and only one of these attributes are found within %NL80211_ATTR_BSS_SELECT
+ * for %NL80211_CMD_CONNECT. It specifies the required BSS selection behaviour
+ * which the driver shall use.
+ */
+enum nl80211_bss_select_attr {
+	__NL80211_BSS_SELECT_ATTR_INVALID,
+	NL80211_BSS_SELECT_ATTR_RSSI,
+	NL80211_BSS_SELECT_ATTR_BAND_PREF,
+	NL80211_BSS_SELECT_ATTR_RSSI_ADJUST,
+
+	/* keep last */
+	__NL80211_BSS_SELECT_ATTR_AFTER_LAST,
+	NL80211_BSS_SELECT_ATTR_MAX = __NL80211_BSS_SELECT_ATTR_AFTER_LAST - 1
+};
+
 #endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 616d047..bb0d515 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -84,6 +84,7 @@
 	OVS_DP_ATTR_STATS,		/* struct ovs_dp_stats */
 	OVS_DP_ATTR_MEGAFLOW_STATS,	/* struct ovs_dp_megaflow_stats */
 	OVS_DP_ATTR_USER_FEATURES,	/* OVS_DP_F_*  */
+	OVS_DP_ATTR_PAD,
 	__OVS_DP_ATTR_MAX
 };
 
@@ -253,6 +254,7 @@
 	OVS_VPORT_ATTR_UPCALL_PID, /* array of u32 Netlink socket PIDs for */
 				/* receiving upcalls */
 	OVS_VPORT_ATTR_STATS,	/* struct ovs_vport_stats */
+	OVS_VPORT_ATTR_PAD,
 	__OVS_VPORT_ATTR_MAX
 };
 
@@ -351,6 +353,7 @@
 	OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS,		/* Nested OVS_VXLAN_EXT_* */
 	OVS_TUNNEL_KEY_ATTR_IPV6_SRC,		/* struct in6_addr src IPv6 address. */
 	OVS_TUNNEL_KEY_ATTR_IPV6_DST,		/* struct in6_addr dst IPv6 address. */
+	OVS_TUNNEL_KEY_ATTR_PAD,
 	__OVS_TUNNEL_KEY_ATTR_MAX
 };
 
@@ -518,6 +521,7 @@
 				  * logging should be suppressed. */
 	OVS_FLOW_ATTR_UFID,      /* Variable length unique flow identifier. */
 	OVS_FLOW_ATTR_UFID_FLAGS,/* u32 of OVS_UFID_F_*. */
+	OVS_FLOW_ATTR_PAD,
 	__OVS_FLOW_ATTR_MAX
 };
 
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 1becea8..4040951 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -670,7 +670,8 @@
 #define PCI_EXT_CAP_ID_SECPCI	0x19	/* Secondary PCIe Capability */
 #define PCI_EXT_CAP_ID_PMUX	0x1A	/* Protocol Multiplexing */
 #define PCI_EXT_CAP_ID_PASID	0x1B	/* Process Address Space ID */
-#define PCI_EXT_CAP_ID_MAX	PCI_EXT_CAP_ID_PASID
+#define PCI_EXT_CAP_ID_DPC	0x1D	/* Downstream Port Containment */
+#define PCI_EXT_CAP_ID_MAX	PCI_EXT_CAP_ID_DPC
 
 #define PCI_EXT_CAP_DSN_SIZEOF	12
 #define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40
@@ -946,4 +947,21 @@
 #define PCI_TPH_CAP_ST_SHIFT	16	/* st table shift */
 #define PCI_TPH_BASE_SIZEOF	12	/* size with no st table */
 
+/* Downstream Port Containment */
+#define PCI_EXP_DPC_CAP			4	/* DPC Capability */
+#define  PCI_EXP_DPC_CAP_RP_EXT		0x20	/* Root Port Extensions for DPC */
+#define  PCI_EXP_DPC_CAP_POISONED_TLP	0x40	/* Poisoned TLP Egress Blocking Supported */
+#define  PCI_EXP_DPC_CAP_SW_TRIGGER	0x80	/* Software Triggering Supported */
+#define  PCI_EXP_DPC_CAP_DL_ACTIVE	0x1000	/* ERR_COR signal on DL_Active supported */
+
+#define PCI_EXP_DPC_CTL			6	/* DPC control */
+#define  PCI_EXP_DPC_CTL_EN_NONFATAL 	0x02	/* Enable trigger on ERR_NONFATAL message */
+#define  PCI_EXP_DPC_CTL_INT_EN 	0x08	/* DPC Interrupt Enable */
+
+#define PCI_EXP_DPC_STATUS		8	/* DPC Status */
+#define  PCI_EXP_DPC_STATUS_TRIGGER	0x01	/* Trigger Status */
+#define  PCI_EXP_DPC_STATUS_INTERRUPT	0x08	/* Interrupt Status */
+
+#define PCI_EXP_DPC_SOURCE_ID		10	/* DPC Source Identifier */
+
 #endif /* LINUX_PCI_REGS_H */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 1afe962..43fc8d2 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -340,7 +340,8 @@
 				comm_exec      :  1, /* flag comm events that are due to an exec */
 				use_clockid    :  1, /* use @clockid for time fields */
 				context_switch :  1, /* context switch data */
-				__reserved_1   : 37;
+				write_backward :  1, /* Write ring buffer from end to beginning */
+				__reserved_1   : 36;
 
 	union {
 		__u32		wakeup_events;	  /* wakeup every n events */
@@ -401,6 +402,7 @@
 #define PERF_EVENT_IOC_SET_FILTER	_IOW('$', 6, char *)
 #define PERF_EVENT_IOC_ID		_IOR('$', 7, __u64 *)
 #define PERF_EVENT_IOC_SET_BPF		_IOW('$', 8, __u32)
+#define PERF_EVENT_IOC_PAUSE_OUTPUT	_IOW('$', 9, __u32)
 
 enum perf_event_ioc_flags {
 	PERF_IOC_FLAG_GROUP		= 1U << 0,
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index c43c5f7..eba5914 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -66,6 +66,7 @@
 	TCA_ACT_OPTIONS,
 	TCA_ACT_INDEX,
 	TCA_ACT_STATS,
+	TCA_ACT_PAD,
 	__TCA_ACT_MAX
 };
 
@@ -150,6 +151,10 @@
 
 #define TCA_POLICE_MAX (__TCA_POLICE_MAX - 1)
 
+/* tca flags definitions */
+#define TCA_CLS_FLAGS_SKIP_HW	(1 << 0)
+#define TCA_CLS_FLAGS_SKIP_SW	(1 << 1)
+
 /* U32 filters */
 
 #define TC_U32_HTID(h) ((h)&0xFFF00000)
@@ -173,6 +178,7 @@
 	TCA_U32_PCNT,
 	TCA_U32_MARK,
 	TCA_U32_FLAGS,
+	TCA_U32_PAD,
 	__TCA_U32_MAX
 };
 
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 8cb18b4..2382eed 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -179,6 +179,7 @@
 	TCA_TBF_PRATE64,
 	TCA_TBF_BURST,
 	TCA_TBF_PBURST,
+	TCA_TBF_PAD,
 	__TCA_TBF_MAX,
 };
 
@@ -368,6 +369,7 @@
 	TCA_HTB_DIRECT_QLEN,
 	TCA_HTB_RATE64,
 	TCA_HTB_CEIL64,
+	TCA_HTB_PAD,
 	__TCA_HTB_MAX,
 };
 
@@ -531,6 +533,7 @@
 	TCA_NETEM_RATE,
 	TCA_NETEM_ECN,
 	TCA_NETEM_RATE64,
+	TCA_NETEM_PAD,
 	__TCA_NETEM_MAX,
 };
 
@@ -715,6 +718,8 @@
 	TCA_FQ_CODEL_FLOWS,
 	TCA_FQ_CODEL_QUANTUM,
 	TCA_FQ_CODEL_CE_THRESHOLD,
+	TCA_FQ_CODEL_DROP_BATCH_SIZE,
+	TCA_FQ_CODEL_MEMORY_LIMIT,
 	__TCA_FQ_CODEL_MAX
 };
 
@@ -739,6 +744,8 @@
 	__u32	new_flows_len;	/* count of flows in new list */
 	__u32	old_flows_len;	/* count of flows in old list */
 	__u32	ce_mark;	/* packets above ce_threshold */
+	__u32	memory_usage;	/* in bytes */
+	__u32	drop_overmemory;
 };
 
 struct tc_fq_codel_cl_stats {
diff --git a/include/uapi/linux/qrtr.h b/include/uapi/linux/qrtr.h
new file mode 100644
index 0000000..66c0748
--- /dev/null
+++ b/include/uapi/linux/qrtr.h
@@ -0,0 +1,12 @@
+#ifndef _LINUX_QRTR_H
+#define _LINUX_QRTR_H
+
+#include <linux/socket.h>
+
+struct sockaddr_qrtr {
+	__kernel_sa_family_t sq_family;
+	__u32 sq_node;
+	__u32 sq_port;
+};
+
+#endif /* _LINUX_QRTR_H */
diff --git a/include/uapi/linux/quota.h b/include/uapi/linux/quota.h
index 38baddb..4d2489e 100644
--- a/include/uapi/linux/quota.h
+++ b/include/uapi/linux/quota.h
@@ -191,6 +191,7 @@
 	QUOTA_NL_A_DEV_MAJOR,
 	QUOTA_NL_A_DEV_MINOR,
 	QUOTA_NL_A_CAUSED_ID,
+	QUOTA_NL_A_PAD,
 	__QUOTA_NL_A_MAX,
 };
 #define QUOTA_NL_A_MAX (__QUOTA_NL_A_MAX - 1)
diff --git a/include/uapi/linux/rio_mport_cdev.h b/include/uapi/linux/rio_mport_cdev.h
new file mode 100644
index 0000000..5796bf1
--- /dev/null
+++ b/include/uapi/linux/rio_mport_cdev.h
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2015-2016, Integrated Device Technology Inc.
+ * Copyright (c) 2015, Prodrive Technologies
+ * Copyright (c) 2015, Texas Instruments Incorporated
+ * Copyright (c) 2015, RapidIO Trade Association
+ * All rights reserved.
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License(GPL) Version 2, or the BSD-3 Clause license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RIO_MPORT_CDEV_H_
+#define _RIO_MPORT_CDEV_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+struct rio_mport_maint_io {
+	__u16 rioid;		/* destID of remote device */
+	__u8  hopcount;		/* hopcount to remote device */
+	__u8  pad0[5];
+	__u32 offset;		/* offset in register space */
+	__u32 length;		/* length in bytes */
+	__u64 buffer;		/* pointer to data buffer */
+};
+
+/*
+ * Definitions for RapidIO data transfers:
+ * - memory mapped (MAPPED)
+ * - packet generation from memory (TRANSFER)
+ */
+#define RIO_TRANSFER_MODE_MAPPED	(1 << 0)
+#define RIO_TRANSFER_MODE_TRANSFER	(1 << 1)
+#define RIO_CAP_DBL_SEND		(1 << 2)
+#define RIO_CAP_DBL_RECV		(1 << 3)
+#define RIO_CAP_PW_SEND			(1 << 4)
+#define RIO_CAP_PW_RECV			(1 << 5)
+#define RIO_CAP_MAP_OUTB		(1 << 6)
+#define RIO_CAP_MAP_INB			(1 << 7)
+
+struct rio_mport_properties {
+	__u16 hdid;
+	__u8  id;			/* Physical port ID */
+	__u8  index;
+	__u32 flags;
+	__u32 sys_size;		/* Default addressing size */
+	__u8  port_ok;
+	__u8  link_speed;
+	__u8  link_width;
+	__u8  pad0;
+	__u32 dma_max_sge;
+	__u32 dma_max_size;
+	__u32 dma_align;
+	__u32 transfer_mode;		/* Default transfer mode */
+	__u32 cap_sys_size;		/* Capable system sizes */
+	__u32 cap_addr_size;		/* Capable addressing sizes */
+	__u32 cap_transfer_mode;	/* Capable transfer modes */
+	__u32 cap_mport;		/* Mport capabilities */
+};
+
+/*
+ * Definitions for RapidIO events;
+ * - incoming port-writes
+ * - incoming doorbells
+ */
+#define RIO_DOORBELL	(1 << 0)
+#define RIO_PORTWRITE	(1 << 1)
+
+struct rio_doorbell {
+	__u16 rioid;
+	__u16 payload;
+};
+
+struct rio_doorbell_filter {
+	__u16 rioid;	/* Use RIO_INVALID_DESTID to match all ids */
+	__u16 low;
+	__u16 high;
+	__u16 pad0;
+};
+
+
+struct rio_portwrite {
+	__u32 payload[16];
+};
+
+struct rio_pw_filter {
+	__u32 mask;
+	__u32 low;
+	__u32 high;
+	__u32 pad0;
+};
+
+/* RapidIO base address for inbound requests set to value defined below
+ * indicates that no specific RIO-to-local address translation is requested
+ * and driver should use direct (one-to-one) address mapping.
+*/
+#define RIO_MAP_ANY_ADDR	(__u64)(~((__u64) 0))
+
+struct rio_mmap {
+	__u16 rioid;
+	__u16 pad0[3];
+	__u64 rio_addr;
+	__u64 length;
+	__u64 handle;
+	__u64 address;
+};
+
+struct rio_dma_mem {
+	__u64 length;		/* length of DMA memory */
+	__u64 dma_handle;	/* handle associated with this memory */
+	__u64 address;
+};
+
+struct rio_event {
+	__u32 header;	/* event type RIO_DOORBELL or RIO_PORTWRITE */
+	union {
+		struct rio_doorbell doorbell;	/* header for RIO_DOORBELL */
+		struct rio_portwrite portwrite; /* header for RIO_PORTWRITE */
+	} u;
+	__u32 pad0;
+};
+
+enum rio_transfer_sync {
+	RIO_TRANSFER_SYNC,	/* synchronous transfer */
+	RIO_TRANSFER_ASYNC,	/* asynchronous transfer */
+	RIO_TRANSFER_FAF,	/* fire-and-forget transfer */
+};
+
+enum rio_transfer_dir {
+	RIO_TRANSFER_DIR_READ,	/* Read operation */
+	RIO_TRANSFER_DIR_WRITE,	/* Write operation */
+};
+
+/*
+ * RapidIO data exchange transactions are lists of individual transfers. Each
+ * transfer exchanges data between two RapidIO devices by remote direct memory
+ * access and has its own completion code.
+ *
+ * The RapidIO specification defines four types of data exchange requests:
+ * NREAD, NWRITE, SWRITE and NWRITE_R. The RapidIO DMA channel interface allows
+ * to specify the required type of write operation or combination of them when
+ * only the last data packet requires response.
+ *
+ * NREAD:    read up to 256 bytes from remote device memory into local memory
+ * NWRITE:   write up to 256 bytes from local memory to remote device memory
+ *           without confirmation
+ * SWRITE:   as NWRITE, but all addresses and payloads must be 64-bit aligned
+ * NWRITE_R: as NWRITE, but expect acknowledgment from remote device.
+ *
+ * The default exchange is chosen from NREAD and any of the WRITE modes as the
+ * driver sees fit. For write requests the user can explicitly choose between
+ * any of the write modes for each transaction.
+ */
+enum rio_exchange {
+	RIO_EXCHANGE_DEFAULT,	/* Default method */
+	RIO_EXCHANGE_NWRITE,	/* All packets using NWRITE */
+	RIO_EXCHANGE_SWRITE,	/* All packets using SWRITE */
+	RIO_EXCHANGE_NWRITE_R,	/* Last packet NWRITE_R, others NWRITE */
+	RIO_EXCHANGE_SWRITE_R,	/* Last packet NWRITE_R, others SWRITE */
+	RIO_EXCHANGE_NWRITE_R_ALL, /* All packets using NWRITE_R */
+};
+
+struct rio_transfer_io {
+	__u64 rio_addr;	/* Address in target's RIO mem space */
+	__u64 loc_addr;
+	__u64 handle;
+	__u64 offset;	/* Offset in buffer */
+	__u64 length;	/* Length in bytes */
+	__u16 rioid;	/* Target destID */
+	__u16 method;	/* Data exchange method, one of rio_exchange enum */
+	__u32 completion_code;	/* Completion code for this transfer */
+};
+
+struct rio_transaction {
+	__u64 block;	/* Pointer to array of <count> transfers */
+	__u32 count;	/* Number of transfers */
+	__u32 transfer_mode;	/* Data transfer mode */
+	__u16 sync;	/* Synch method, one of rio_transfer_sync enum */
+	__u16 dir;	/* Transfer direction, one of rio_transfer_dir enum */
+	__u32 pad0;
+};
+
+struct rio_async_tx_wait {
+	__u32 token;	/* DMA transaction ID token */
+	__u32 timeout;	/* Wait timeout in msec, if 0 use default TO */
+};
+
+#define RIO_MAX_DEVNAME_SZ	20
+
+struct rio_rdev_info {
+	__u16 destid;
+	__u8 hopcount;
+	__u8 pad0;
+	__u32 comptag;
+	char name[RIO_MAX_DEVNAME_SZ + 1];
+};
+
+/* Driver IOCTL codes */
+#define RIO_MPORT_DRV_MAGIC           'm'
+
+#define RIO_MPORT_MAINT_HDID_SET	\
+	_IOW(RIO_MPORT_DRV_MAGIC, 1, __u16)
+#define RIO_MPORT_MAINT_COMPTAG_SET	\
+	_IOW(RIO_MPORT_DRV_MAGIC, 2, __u32)
+#define RIO_MPORT_MAINT_PORT_IDX_GET	\
+	_IOR(RIO_MPORT_DRV_MAGIC, 3, __u32)
+#define RIO_MPORT_GET_PROPERTIES \
+	_IOR(RIO_MPORT_DRV_MAGIC, 4, struct rio_mport_properties)
+#define RIO_MPORT_MAINT_READ_LOCAL \
+	_IOR(RIO_MPORT_DRV_MAGIC, 5, struct rio_mport_maint_io)
+#define RIO_MPORT_MAINT_WRITE_LOCAL \
+	_IOW(RIO_MPORT_DRV_MAGIC, 6, struct rio_mport_maint_io)
+#define RIO_MPORT_MAINT_READ_REMOTE \
+	_IOR(RIO_MPORT_DRV_MAGIC, 7, struct rio_mport_maint_io)
+#define RIO_MPORT_MAINT_WRITE_REMOTE \
+	_IOW(RIO_MPORT_DRV_MAGIC, 8, struct rio_mport_maint_io)
+#define RIO_ENABLE_DOORBELL_RANGE	\
+	_IOW(RIO_MPORT_DRV_MAGIC, 9, struct rio_doorbell_filter)
+#define RIO_DISABLE_DOORBELL_RANGE	\
+	_IOW(RIO_MPORT_DRV_MAGIC, 10, struct rio_doorbell_filter)
+#define RIO_ENABLE_PORTWRITE_RANGE	\
+	_IOW(RIO_MPORT_DRV_MAGIC, 11, struct rio_pw_filter)
+#define RIO_DISABLE_PORTWRITE_RANGE	\
+	_IOW(RIO_MPORT_DRV_MAGIC, 12, struct rio_pw_filter)
+#define RIO_SET_EVENT_MASK		\
+	_IOW(RIO_MPORT_DRV_MAGIC, 13, __u32)
+#define RIO_GET_EVENT_MASK		\
+	_IOR(RIO_MPORT_DRV_MAGIC, 14, __u32)
+#define RIO_MAP_OUTBOUND \
+	_IOWR(RIO_MPORT_DRV_MAGIC, 15, struct rio_mmap)
+#define RIO_UNMAP_OUTBOUND \
+	_IOW(RIO_MPORT_DRV_MAGIC, 16, struct rio_mmap)
+#define RIO_MAP_INBOUND \
+	_IOWR(RIO_MPORT_DRV_MAGIC, 17, struct rio_mmap)
+#define RIO_UNMAP_INBOUND \
+	_IOW(RIO_MPORT_DRV_MAGIC, 18, __u64)
+#define RIO_ALLOC_DMA \
+	_IOWR(RIO_MPORT_DRV_MAGIC, 19, struct rio_dma_mem)
+#define RIO_FREE_DMA \
+	_IOW(RIO_MPORT_DRV_MAGIC, 20, __u64)
+#define RIO_TRANSFER \
+	_IOWR(RIO_MPORT_DRV_MAGIC, 21, struct rio_transaction)
+#define RIO_WAIT_FOR_ASYNC \
+	_IOW(RIO_MPORT_DRV_MAGIC, 22, struct rio_async_tx_wait)
+#define RIO_DEV_ADD \
+	_IOW(RIO_MPORT_DRV_MAGIC, 23, struct rio_rdev_info)
+#define RIO_DEV_DEL \
+	_IOW(RIO_MPORT_DRV_MAGIC, 24, struct rio_rdev_info)
+
+#endif /* _RIO_MPORT_CDEV_H_ */
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index ca764b5..262f037 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -139,6 +139,11 @@
 	RTM_GETNSID = 90,
 #define RTM_GETNSID RTM_GETNSID
 
+	RTM_NEWSTATS = 92,
+#define RTM_NEWSTATS RTM_NEWSTATS
+	RTM_GETSTATS = 94,
+#define RTM_GETSTATS RTM_GETSTATS
+
 	__RTM_MAX,
 #define RTM_MAX		(((__RTM_MAX + 3) & ~3) - 1)
 };
@@ -312,6 +317,7 @@
 	RTA_ENCAP_TYPE,
 	RTA_ENCAP,
 	RTA_EXPIRES,
+	RTA_PAD,
 	__RTA_MAX
 };
 
@@ -536,6 +542,7 @@
 	TCA_FCNT,
 	TCA_STATS2,
 	TCA_STAB,
+	TCA_PAD,
 	__TCA_MAX
 };
 
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index e513a4e..24da334 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -264,4 +264,7 @@
 /* MVEBU UART */
 #define PORT_MVEBU	114
 
+/* Microchip PIC32 UART */
+#define PORT_PIC32	115
+
 #endif /* _UAPILINUX_SERIAL_CORE_H */
diff --git a/include/uapi/linux/signal.h b/include/uapi/linux/signal.h
index e1bd50c2..cd0804b 100644
--- a/include/uapi/linux/signal.h
+++ b/include/uapi/linux/signal.h
@@ -7,4 +7,9 @@
 #define SS_ONSTACK	1
 #define SS_DISABLE	2
 
+/* bit-flags */
+#define SS_AUTODISARM	(1U << 31)	/* disable sas during sighandling */
+/* mask for all SS_xxx flags */
+#define SS_FLAG_BITS	SS_AUTODISARM
+
 #endif /* _UAPI_LINUX_SIGNAL_H */
diff --git a/include/uapi/linux/sock_diag.h b/include/uapi/linux/sock_diag.h
index bae2d80..7ff505d 100644
--- a/include/uapi/linux/sock_diag.h
+++ b/include/uapi/linux/sock_diag.h
@@ -20,6 +20,7 @@
 	SK_MEMINFO_WMEM_QUEUED,
 	SK_MEMINFO_OPTMEM,
 	SK_MEMINFO_BACKLOG,
+	SK_MEMINFO_DROPS,
 
 	SK_MEMINFO_VARS,
 };
diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
index aa9f104..621fa8a 100644
--- a/include/uapi/linux/stddef.h
+++ b/include/uapi/linux/stddef.h
@@ -1 +1,5 @@
 #include <linux/compiler.h>
+
+#ifndef __always_inline
+#define __always_inline inline
+#endif
diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
index 3f10e53..8f3a8f6 100644
--- a/include/uapi/linux/swab.h
+++ b/include/uapi/linux/swab.h
@@ -45,9 +45,7 @@
 
 static inline __attribute_const__ __u16 __fswab16(__u16 val)
 {
-#ifdef __HAVE_BUILTIN_BSWAP16__
-	return __builtin_bswap16(val);
-#elif defined (__arch_swab16)
+#if defined (__arch_swab16)
 	return __arch_swab16(val);
 #else
 	return ___constant_swab16(val);
@@ -56,9 +54,7 @@
 
 static inline __attribute_const__ __u32 __fswab32(__u32 val)
 {
-#ifdef __HAVE_BUILTIN_BSWAP32__
-	return __builtin_bswap32(val);
-#elif defined(__arch_swab32)
+#if defined(__arch_swab32)
 	return __arch_swab32(val);
 #else
 	return ___constant_swab32(val);
@@ -67,9 +63,7 @@
 
 static inline __attribute_const__ __u64 __fswab64(__u64 val)
 {
-#ifdef __HAVE_BUILTIN_BSWAP64__
-	return __builtin_bswap64(val);
-#elif defined (__arch_swab64)
+#if defined (__arch_swab64)
 	return __arch_swab64(val);
 #elif defined(__SWAB_64_THRU_32__)
 	__u32 h = val >> 32;
@@ -102,28 +96,40 @@
  * __swab16 - return a byteswapped 16-bit value
  * @x: value to byteswap
  */
+#ifdef __HAVE_BUILTIN_BSWAP16__
+#define __swab16(x) (__u16)__builtin_bswap16((__u16)(x))
+#else
 #define __swab16(x)				\
 	(__builtin_constant_p((__u16)(x)) ?	\
 	___constant_swab16(x) :			\
 	__fswab16(x))
+#endif
 
 /**
  * __swab32 - return a byteswapped 32-bit value
  * @x: value to byteswap
  */
+#ifdef __HAVE_BUILTIN_BSWAP32__
+#define __swab32(x) (__u32)__builtin_bswap32((__u32)(x))
+#else
 #define __swab32(x)				\
 	(__builtin_constant_p((__u32)(x)) ?	\
 	___constant_swab32(x) :			\
 	__fswab32(x))
+#endif
 
 /**
  * __swab64 - return a byteswapped 64-bit value
  * @x: value to byteswap
  */
+#ifdef __HAVE_BUILTIN_BSWAP64__
+#define __swab64(x) (__u64)__builtin_bswap64((__u64)(x))
+#else
 #define __swab64(x)				\
 	(__builtin_constant_p((__u64)(x)) ?	\
 	___constant_swab64(x) :			\
 	__fswab64(x))
+#endif
 
 /**
  * __swahw32 - return a word-swapped 32-bit value
diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild
index 242cf0c..e3969bd 100644
--- a/include/uapi/linux/tc_act/Kbuild
+++ b/include/uapi/linux/tc_act/Kbuild
@@ -10,3 +10,4 @@
 header-y += tc_vlan.h
 header-y += tc_bpf.h
 header-y += tc_connmark.h
+header-y += tc_ife.h
diff --git a/include/uapi/linux/tc_act/tc_bpf.h b/include/uapi/linux/tc_act/tc_bpf.h
index 07f17cc..063d9d4 100644
--- a/include/uapi/linux/tc_act/tc_bpf.h
+++ b/include/uapi/linux/tc_act/tc_bpf.h
@@ -26,6 +26,7 @@
 	TCA_ACT_BPF_OPS,
 	TCA_ACT_BPF_FD,
 	TCA_ACT_BPF_NAME,
+	TCA_ACT_BPF_PAD,
 	__TCA_ACT_BPF_MAX,
 };
 #define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_connmark.h b/include/uapi/linux/tc_act/tc_connmark.h
index 994b097..62a5e94 100644
--- a/include/uapi/linux/tc_act/tc_connmark.h
+++ b/include/uapi/linux/tc_act/tc_connmark.h
@@ -15,6 +15,7 @@
 	TCA_CONNMARK_UNSPEC,
 	TCA_CONNMARK_PARMS,
 	TCA_CONNMARK_TM,
+	TCA_CONNMARK_PAD,
 	__TCA_CONNMARK_MAX
 };
 #define TCA_CONNMARK_MAX (__TCA_CONNMARK_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_csum.h b/include/uapi/linux/tc_act/tc_csum.h
index a047c49..8ac8041 100644
--- a/include/uapi/linux/tc_act/tc_csum.h
+++ b/include/uapi/linux/tc_act/tc_csum.h
@@ -10,6 +10,7 @@
 	TCA_CSUM_UNSPEC,
 	TCA_CSUM_PARMS,
 	TCA_CSUM_TM,
+	TCA_CSUM_PAD,
 	__TCA_CSUM_MAX
 };
 #define TCA_CSUM_MAX (__TCA_CSUM_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_defact.h b/include/uapi/linux/tc_act/tc_defact.h
index 17dddb4..d2a3abb 100644
--- a/include/uapi/linux/tc_act/tc_defact.h
+++ b/include/uapi/linux/tc_act/tc_defact.h
@@ -12,6 +12,7 @@
 	TCA_DEF_TM,
 	TCA_DEF_PARMS,
 	TCA_DEF_DATA,
+	TCA_DEF_PAD,
 	__TCA_DEF_MAX
 };
 #define TCA_DEF_MAX (__TCA_DEF_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_gact.h b/include/uapi/linux/tc_act/tc_gact.h
index f7bf94e..70b536a 100644
--- a/include/uapi/linux/tc_act/tc_gact.h
+++ b/include/uapi/linux/tc_act/tc_gact.h
@@ -25,6 +25,7 @@
 	TCA_GACT_TM,
 	TCA_GACT_PARMS,
 	TCA_GACT_PROB,
+	TCA_GACT_PAD,
 	__TCA_GACT_MAX
 };
 #define TCA_GACT_MAX (__TCA_GACT_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_ife.h b/include/uapi/linux/tc_act/tc_ife.h
index d648ff6..4ece02a 100644
--- a/include/uapi/linux/tc_act/tc_ife.h
+++ b/include/uapi/linux/tc_act/tc_ife.h
@@ -23,6 +23,7 @@
 	TCA_IFE_SMAC,
 	TCA_IFE_TYPE,
 	TCA_IFE_METALST,
+	TCA_IFE_PAD,
 	__TCA_IFE_MAX
 };
 #define TCA_IFE_MAX (__TCA_IFE_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_ipt.h b/include/uapi/linux/tc_act/tc_ipt.h
index 130aaad..7c6e155 100644
--- a/include/uapi/linux/tc_act/tc_ipt.h
+++ b/include/uapi/linux/tc_act/tc_ipt.h
@@ -14,6 +14,7 @@
 	TCA_IPT_CNT,
 	TCA_IPT_TM,
 	TCA_IPT_TARG,
+	TCA_IPT_PAD,
 	__TCA_IPT_MAX
 };
 #define TCA_IPT_MAX (__TCA_IPT_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_mirred.h b/include/uapi/linux/tc_act/tc_mirred.h
index 7561750..3d7a2b3 100644
--- a/include/uapi/linux/tc_act/tc_mirred.h
+++ b/include/uapi/linux/tc_act/tc_mirred.h
@@ -20,6 +20,7 @@
 	TCA_MIRRED_UNSPEC,
 	TCA_MIRRED_TM,
 	TCA_MIRRED_PARMS,
+	TCA_MIRRED_PAD,
 	__TCA_MIRRED_MAX
 };
 #define TCA_MIRRED_MAX (__TCA_MIRRED_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_nat.h b/include/uapi/linux/tc_act/tc_nat.h
index 6663aeb..923457c 100644
--- a/include/uapi/linux/tc_act/tc_nat.h
+++ b/include/uapi/linux/tc_act/tc_nat.h
@@ -10,6 +10,7 @@
 	TCA_NAT_UNSPEC,
 	TCA_NAT_PARMS,
 	TCA_NAT_TM,
+	TCA_NAT_PAD,
 	__TCA_NAT_MAX
 };
 #define TCA_NAT_MAX (__TCA_NAT_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_pedit.h b/include/uapi/linux/tc_act/tc_pedit.h
index 716cfab..6389959 100644
--- a/include/uapi/linux/tc_act/tc_pedit.h
+++ b/include/uapi/linux/tc_act/tc_pedit.h
@@ -10,6 +10,7 @@
 	TCA_PEDIT_UNSPEC,
 	TCA_PEDIT_TM,
 	TCA_PEDIT_PARMS,
+	TCA_PEDIT_PAD,
 	__TCA_PEDIT_MAX
 };
 #define TCA_PEDIT_MAX (__TCA_PEDIT_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_skbedit.h b/include/uapi/linux/tc_act/tc_skbedit.h
index 7a2e910..fecb5cc 100644
--- a/include/uapi/linux/tc_act/tc_skbedit.h
+++ b/include/uapi/linux/tc_act/tc_skbedit.h
@@ -39,6 +39,7 @@
 	TCA_SKBEDIT_PRIORITY,
 	TCA_SKBEDIT_QUEUE_MAPPING,
 	TCA_SKBEDIT_MARK,
+	TCA_SKBEDIT_PAD,
 	__TCA_SKBEDIT_MAX
 };
 #define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_vlan.h b/include/uapi/linux/tc_act/tc_vlan.h
index f7b8d44..31151ff 100644
--- a/include/uapi/linux/tc_act/tc_vlan.h
+++ b/include/uapi/linux/tc_act/tc_vlan.h
@@ -28,6 +28,7 @@
 	TCA_VLAN_PARMS,
 	TCA_VLAN_PUSH_VLAN_ID,
 	TCA_VLAN_PUSH_VLAN_PROTOCOL,
+	TCA_VLAN_PAD,
 	__TCA_VLAN_MAX,
 };
 #define TCA_VLAN_MAX (__TCA_VLAN_MAX - 1)
diff --git a/include/uapi/linux/tcp_metrics.h b/include/uapi/linux/tcp_metrics.h
index 9353392..80ad90d 100644
--- a/include/uapi/linux/tcp_metrics.h
+++ b/include/uapi/linux/tcp_metrics.h
@@ -40,6 +40,7 @@
 	TCP_METRICS_ATTR_FOPEN_COOKIE,		/* binary */
 	TCP_METRICS_ATTR_SADDR_IPV4,		/* u32 */
 	TCP_METRICS_ATTR_SADDR_IPV6,		/* binary */
+	TCP_METRICS_ATTR_PAD,
 
 	__TCP_METRICS_ATTR_MAX,
 };
diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h
index 16574ea..2c8180f 100644
--- a/include/uapi/linux/udp.h
+++ b/include/uapi/linux/udp.h
@@ -36,6 +36,7 @@
 #define UDP_ENCAP_ESPINUDP_NON_IKE	1 /* draft-ietf-ipsec-nat-t-ike-00/01 */
 #define UDP_ENCAP_ESPINUDP	2 /* draft-ietf-ipsec-udp-encaps-06 */
 #define UDP_ENCAP_L2TPINUDP	3 /* rfc2661 */
-
+#define UDP_ENCAP_GTP0		4 /* GSM TS 09.60 */
+#define UDP_ENCAP_GTP1U		5 /* 3GPP TS 29.060 */
 
 #endif /* _UAPI_LINUX_UDP_H */
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index 06d6c62..d5ce716 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -899,7 +899,7 @@
 	__le32 bmAttributes;
 #define USB_SSP_SUBLINK_SPEED_ATTRIBS	(0x1f << 0) /* sublink speed entries */
 #define USB_SSP_SUBLINK_SPEED_IDS	(0xf << 5)  /* speed ID entries */
-	__u16  wFunctionalitySupport;
+	__le16  wFunctionalitySupport;
 #define USB_SSP_MIN_SUBLINK_SPEED_ATTRIBUTE_ID	(0xf)
 #define USB_SSP_MIN_RX_LANE_COUNT		(0xf << 8)
 #define USB_SSP_MIN_TX_LANE_COUNT		(0xf << 12)
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
index c039f1d..086168e 100644
--- a/include/uapi/linux/v4l2-dv-timings.h
+++ b/include/uapi/linux/v4l2-dv-timings.h
@@ -183,7 +183,8 @@
 
 #define V4L2_DV_BT_CEA_3840X2160P24 { \
 	.type = V4L2_DV_BT_656_1120, \
-	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
 		V4L2_DV_BT_STD_CEA861, \
 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -191,14 +192,16 @@
 
 #define V4L2_DV_BT_CEA_3840X2160P25 { \
 	.type = V4L2_DV_BT_656_1120, \
-	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
 		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
 }
 
 #define V4L2_DV_BT_CEA_3840X2160P30 { \
 	.type = V4L2_DV_BT_656_1120, \
-	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
 		V4L2_DV_BT_STD_CEA861, \
 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -206,14 +209,16 @@
 
 #define V4L2_DV_BT_CEA_3840X2160P50 { \
 	.type = V4L2_DV_BT_656_1120, \
-	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
 		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
 }
 
 #define V4L2_DV_BT_CEA_3840X2160P60 { \
 	.type = V4L2_DV_BT_656_1120, \
-	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
 		V4L2_DV_BT_STD_CEA861, \
 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -221,7 +226,8 @@
 
 #define V4L2_DV_BT_CEA_4096X2160P24 { \
 	.type = V4L2_DV_BT_656_1120, \
-	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
 		V4L2_DV_BT_STD_CEA861, \
 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -229,14 +235,16 @@
 
 #define V4L2_DV_BT_CEA_4096X2160P25 { \
 	.type = V4L2_DV_BT_656_1120, \
-	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
 		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
 }
 
 #define V4L2_DV_BT_CEA_4096X2160P30 { \
 	.type = V4L2_DV_BT_656_1120, \
-	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
 		V4L2_DV_BT_STD_CEA861, \
 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -244,14 +252,16 @@
 
 #define V4L2_DV_BT_CEA_4096X2160P50 { \
 	.type = V4L2_DV_BT_656_1120, \
-	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
 		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
 }
 
 #define V4L2_DV_BT_CEA_4096X2160P60 { \
 	.type = V4L2_DV_BT_656_1120, \
-	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
 		V4L2_DV_BT_STD_CEA861, \
 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index e895975..8f95191 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -138,10 +138,7 @@
 	V4L2_BUF_TYPE_VBI_OUTPUT           = 5,
 	V4L2_BUF_TYPE_SLICED_VBI_CAPTURE   = 6,
 	V4L2_BUF_TYPE_SLICED_VBI_OUTPUT    = 7,
-#if 1
-	/* Experimental */
 	V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY = 8,
-#endif
 	V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE = 9,
 	V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE  = 10,
 	V4L2_BUF_TYPE_SDR_CAPTURE          = 11,
@@ -657,8 +654,7 @@
 #define V4L2_FMT_FLAG_COMPRESSED 0x0001
 #define V4L2_FMT_FLAG_EMULATED   0x0002
 
-#if 1
-	/* Experimental Frame Size and frame rate enumeration */
+	/* Frame Size and frame rate enumeration */
 /*
  *	F R A M E   S I Z E   E N U M E R A T I O N
  */
@@ -724,7 +720,6 @@
 
 	__u32	reserved[2];			/* Reserved space for future use */
 };
-#endif
 
 /*
  *	T I M E C O D E
@@ -1728,8 +1723,6 @@
 
 /*
  *	M P E G   S E R V I C E S
- *
- *	NOTE: EXPERIMENTAL API
  */
 #if 1
 #define V4L2_ENC_IDX_FRAME_I    (0)
@@ -2259,46 +2252,35 @@
 #define VIDIOC_ENCODER_CMD      _IOWR('V', 77, struct v4l2_encoder_cmd)
 #define VIDIOC_TRY_ENCODER_CMD  _IOWR('V', 78, struct v4l2_encoder_cmd)
 
-/* Experimental, meant for debugging, testing and internal use.
-   Only implemented if CONFIG_VIDEO_ADV_DEBUG is defined.
-   You must be root to use these ioctls. Never use these in applications! */
+/*
+ * Experimental, meant for debugging, testing and internal use.
+ * Only implemented if CONFIG_VIDEO_ADV_DEBUG is defined.
+ * You must be root to use these ioctls. Never use these in applications!
+ */
 #define	VIDIOC_DBG_S_REGISTER 	 _IOW('V', 79, struct v4l2_dbg_register)
 #define	VIDIOC_DBG_G_REGISTER 	_IOWR('V', 80, struct v4l2_dbg_register)
 
 #define VIDIOC_S_HW_FREQ_SEEK	 _IOW('V', 82, struct v4l2_hw_freq_seek)
-
 #define	VIDIOC_S_DV_TIMINGS	_IOWR('V', 87, struct v4l2_dv_timings)
 #define	VIDIOC_G_DV_TIMINGS	_IOWR('V', 88, struct v4l2_dv_timings)
 #define	VIDIOC_DQEVENT		 _IOR('V', 89, struct v4l2_event)
 #define	VIDIOC_SUBSCRIBE_EVENT	 _IOW('V', 90, struct v4l2_event_subscription)
 #define	VIDIOC_UNSUBSCRIBE_EVENT _IOW('V', 91, struct v4l2_event_subscription)
-
-/* Experimental, the below two ioctls may change over the next couple of kernel
-   versions */
 #define VIDIOC_CREATE_BUFS	_IOWR('V', 92, struct v4l2_create_buffers)
 #define VIDIOC_PREPARE_BUF	_IOWR('V', 93, struct v4l2_buffer)
-
-/* Experimental selection API */
 #define VIDIOC_G_SELECTION	_IOWR('V', 94, struct v4l2_selection)
 #define VIDIOC_S_SELECTION	_IOWR('V', 95, struct v4l2_selection)
-
-/* Experimental, these two ioctls may change over the next couple of kernel
-   versions. */
 #define VIDIOC_DECODER_CMD	_IOWR('V', 96, struct v4l2_decoder_cmd)
 #define VIDIOC_TRY_DECODER_CMD	_IOWR('V', 97, struct v4l2_decoder_cmd)
-
-/* Experimental, these three ioctls may change over the next couple of kernel
-   versions. */
 #define VIDIOC_ENUM_DV_TIMINGS  _IOWR('V', 98, struct v4l2_enum_dv_timings)
 #define VIDIOC_QUERY_DV_TIMINGS  _IOR('V', 99, struct v4l2_dv_timings)
 #define VIDIOC_DV_TIMINGS_CAP   _IOWR('V', 100, struct v4l2_dv_timings_cap)
-
-/* Experimental, this ioctl may change over the next couple of kernel
-   versions. */
 #define VIDIOC_ENUM_FREQ_BANDS	_IOWR('V', 101, struct v4l2_frequency_band)
 
-/* Experimental, meant for debugging, testing and internal use.
-   Never use these in applications! */
+/*
+ * Experimental, meant for debugging, testing and internal use.
+ * Never use this in applications!
+ */
 #define VIDIOC_DBG_G_CHIP_INFO  _IOWR('V', 102, struct v4l2_dbg_chip_info)
 
 #define VIDIOC_QUERY_EXT_CTRL	_IOWR('V', 103, struct v4l2_query_ext_ctrl)
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h
index c18264d..4cb65bb 100644
--- a/include/uapi/linux/virtio_config.h
+++ b/include/uapi/linux/virtio_config.h
@@ -40,6 +40,8 @@
 #define VIRTIO_CONFIG_S_DRIVER_OK	4
 /* Driver has finished configuring features */
 #define VIRTIO_CONFIG_S_FEATURES_OK	8
+/* Device entered invalid state, driver must reset it */
+#define VIRTIO_CONFIG_S_NEEDS_RESET	0x40
 /* We've given up on this device. */
 #define VIRTIO_CONFIG_S_FAILED		0x80
 
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index 2cd9e60..1433389 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -302,6 +302,7 @@
 	XFRMA_SA_EXTRA_FLAGS,	/* __u32 */
 	XFRMA_PROTO,		/* __u8 */
 	XFRMA_ADDRESS_FILTER,	/* struct xfrm_address_filter */
+	XFRMA_PAD,
 	__XFRMA_MAX
 
 #define XFRMA_MAX (__XFRMA_MAX - 1)
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 67bf49d..609cadb 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -672,7 +672,7 @@
 
 /* global timers (device member) */
 #define SNDRV_TIMER_GLOBAL_SYSTEM	0
-#define SNDRV_TIMER_GLOBAL_RTC		1
+#define SNDRV_TIMER_GLOBAL_RTC		1	/* unused */
 #define SNDRV_TIMER_GLOBAL_HPET		2
 #define SNDRV_TIMER_GLOBAL_HRTIMER	3
 
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index eeba753..ad66589 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -194,8 +194,9 @@
 int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width);
 void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format);
 void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
-				   u32 pixel_format, int stride,
-				   int u_offset, int v_offset);
+				   unsigned int uv_stride,
+				   unsigned int u_offset,
+				   unsigned int v_offset);
 void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
 			      u32 pixel_format, int stride, int height);
 int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc);
@@ -236,7 +237,7 @@
 int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc,
 		unsigned long bandwidth_mbs, int burstsize);
 void ipu_dmfc_free_bandwidth(struct dmfc_channel *dmfc);
-int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width);
+void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width);
 struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipuv3_channel);
 void ipu_dmfc_put(struct dmfc_channel *dmfc);
 
diff --git a/include/xen/page.h b/include/xen/page.h
index 96294ac..9dc46cb 100644
--- a/include/xen/page.h
+++ b/include/xen/page.h
@@ -15,9 +15,9 @@
  */
 
 #define xen_pfn_to_page(xen_pfn)	\
-	((pfn_to_page(((unsigned long)(xen_pfn) << XEN_PAGE_SHIFT) >> PAGE_SHIFT)))
+	(pfn_to_page((unsigned long)(xen_pfn) >> (PAGE_SHIFT - XEN_PAGE_SHIFT)))
 #define page_to_xen_pfn(page)		\
-	(((page_to_pfn(page)) << PAGE_SHIFT) >> XEN_PAGE_SHIFT)
+	((page_to_pfn(page)) << (PAGE_SHIFT - XEN_PAGE_SHIFT))
 
 #define XEN_PFN_PER_PAGE	(PAGE_SIZE / XEN_PAGE_SIZE)
 
diff --git a/init/Kconfig b/init/Kconfig
index e0d2616..0dfd09d 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -272,8 +272,9 @@
 	  See the man page for more details.
 
 config FHANDLE
-	bool "open by fhandle syscalls"
+	bool "open by fhandle syscalls" if EXPERT
 	select EXPORTFS
+	default y
 	help
 	  If you say Y here, a user level program will be able to map
 	  file names to handle and then later use the handle for
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 781c139..ade739f 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -307,8 +307,8 @@
 	struct inode *inode;
 	struct ipc_namespace *ns = data;
 
-	sb->s_blocksize = PAGE_CACHE_SIZE;
-	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	sb->s_blocksize = PAGE_SIZE;
+	sb->s_blocksize_bits = PAGE_SHIFT;
 	sb->s_magic = MQUEUE_MAGIC;
 	sb->s_op = &mqueue_super_ops;
 
diff --git a/kernel/audit.c b/kernel/audit.c
index 678c3f0..22bb4f2 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -64,7 +64,6 @@
 #include <linux/security.h>
 #endif
 #include <linux/freezer.h>
-#include <linux/tty.h>
 #include <linux/pid_namespace.h>
 #include <net/netns/generic.h>
 
@@ -430,7 +429,6 @@
 					attempts, audit_pid);
 				set_current_state(TASK_INTERRUPTIBLE);
 				schedule();
-				__set_current_state(TASK_RUNNING);
 				goto restart;
 			}
 		}
@@ -1341,15 +1339,14 @@
 static long wait_for_auditd(long sleep_time)
 {
 	DECLARE_WAITQUEUE(wait, current);
-	set_current_state(TASK_UNINTERRUPTIBLE);
-	add_wait_queue_exclusive(&audit_backlog_wait, &wait);
 
 	if (audit_backlog_limit &&
-	    skb_queue_len(&audit_skb_queue) > audit_backlog_limit)
+	    skb_queue_len(&audit_skb_queue) > audit_backlog_limit) {
+		add_wait_queue_exclusive(&audit_backlog_wait, &wait);
+		set_current_state(TASK_UNINTERRUPTIBLE);
 		sleep_time = schedule_timeout(sleep_time);
-
-	__set_current_state(TASK_RUNNING);
-	remove_wait_queue(&audit_backlog_wait, &wait);
+		remove_wait_queue(&audit_backlog_wait, &wait);
+	}
 
 	return sleep_time;
 }
@@ -1890,21 +1887,14 @@
 {
 	const struct cred *cred;
 	char comm[sizeof(tsk->comm)];
-	char *tty;
+	struct tty_struct *tty;
 
 	if (!ab)
 		return;
 
 	/* tsk == current */
 	cred = current_cred();
-
-	spin_lock_irq(&tsk->sighand->siglock);
-	if (tsk->signal && tsk->signal->tty && tsk->signal->tty->name)
-		tty = tsk->signal->tty->name;
-	else
-		tty = "(none)";
-	spin_unlock_irq(&tsk->sighand->siglock);
-
+	tty = audit_get_tty(tsk);
 	audit_log_format(ab,
 			 " ppid=%d pid=%d auid=%u uid=%u gid=%u"
 			 " euid=%u suid=%u fsuid=%u"
@@ -1920,11 +1910,11 @@
 			 from_kgid(&init_user_ns, cred->egid),
 			 from_kgid(&init_user_ns, cred->sgid),
 			 from_kgid(&init_user_ns, cred->fsgid),
-			 tty, audit_get_sessionid(tsk));
-
+			 tty ? tty_name(tty) : "(none)",
+			 audit_get_sessionid(tsk));
+	audit_put_tty(tty);
 	audit_log_format(ab, " comm=");
 	audit_log_untrustedstring(ab, get_task_comm(comm, tsk));
-
 	audit_log_d_path_exe(ab, tsk->mm);
 	audit_log_task_context(ab);
 }
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 5efe9b29..2577247 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -661,10 +661,10 @@
 static int prune_tree_thread(void *unused)
 {
 	for (;;) {
-		set_current_state(TASK_INTERRUPTIBLE);
-		if (list_empty(&prune_list))
+		if (list_empty(&prune_list)) {
+			set_current_state(TASK_INTERRUPTIBLE);
 			schedule();
-		__set_current_state(TASK_RUNNING);
+		}
 
 		mutex_lock(&audit_cmd_mutex);
 		mutex_lock(&audit_filter_mutex);
@@ -693,16 +693,14 @@
 {
 	if (prune_thread)
 		return 0;
-	prune_thread = kthread_create(prune_tree_thread, NULL,
+	prune_thread = kthread_run(prune_tree_thread, NULL,
 				"audit_prune_tree");
 	if (IS_ERR(prune_thread)) {
 		pr_err("cannot start thread audit_prune_tree");
 		prune_thread = NULL;
 		return -ENOMEM;
-	} else {
-		wake_up_process(prune_thread);
-		return 0;
 	}
+	return 0;
 }
 
 /* called with audit_filter_mutex */
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 3cf1c59..d6709eb 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -367,7 +367,7 @@
 	inode_unlock(d_backing_inode(parent->dentry));
 	if (d_is_positive(d)) {
 		/* update watch filter fields */
-		watch->dev = d_backing_inode(d)->i_sb->s_dev;
+		watch->dev = d->d_sb->s_dev;
 		watch->ino = d_backing_inode(d)->i_ino;
 	}
 	dput(d);
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 7d0e3cf..62ab53d 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1980,6 +1980,7 @@
 {
 	struct audit_buffer *ab;
 	uid_t uid, oldloginuid, loginuid;
+	struct tty_struct *tty;
 
 	if (!audit_enabled)
 		return;
@@ -1987,14 +1988,17 @@
 	uid = from_kuid(&init_user_ns, task_uid(current));
 	oldloginuid = from_kuid(&init_user_ns, koldloginuid);
 	loginuid = from_kuid(&init_user_ns, kloginuid),
+	tty = audit_get_tty(current);
 
 	ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
 	if (!ab)
 		return;
 	audit_log_format(ab, "pid=%d uid=%u", task_pid_nr(current), uid);
 	audit_log_task_context(ab);
-	audit_log_format(ab, " old-auid=%u auid=%u old-ses=%u ses=%u res=%d",
-			 oldloginuid, loginuid, oldsessionid, sessionid, !rc);
+	audit_log_format(ab, " old-auid=%u auid=%u tty=%s old-ses=%u ses=%u res=%d",
+			 oldloginuid, loginuid, tty ? tty_name(tty) : "(none)",
+			 oldsessionid, sessionid, !rc);
+	audit_put_tty(tty);
 	audit_log_end(ab);
 }
 
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index be0abf6..f1e8a0d 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -129,14 +129,83 @@
 
 	return fp;
 }
-EXPORT_SYMBOL_GPL(bpf_prog_realloc);
 
 void __bpf_prog_free(struct bpf_prog *fp)
 {
 	kfree(fp->aux);
 	vfree(fp);
 }
-EXPORT_SYMBOL_GPL(__bpf_prog_free);
+
+static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
+{
+	return BPF_CLASS(insn->code) == BPF_JMP  &&
+	       /* Call and Exit are both special jumps with no
+		* target inside the BPF instruction image.
+		*/
+	       BPF_OP(insn->code) != BPF_CALL &&
+	       BPF_OP(insn->code) != BPF_EXIT;
+}
+
+static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
+{
+	struct bpf_insn *insn = prog->insnsi;
+	u32 i, insn_cnt = prog->len;
+
+	for (i = 0; i < insn_cnt; i++, insn++) {
+		if (!bpf_is_jmp_and_has_target(insn))
+			continue;
+
+		/* Adjust offset of jmps if we cross boundaries. */
+		if (i < pos && i + insn->off + 1 > pos)
+			insn->off += delta;
+		else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
+			insn->off -= delta;
+	}
+}
+
+struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
+				       const struct bpf_insn *patch, u32 len)
+{
+	u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
+	struct bpf_prog *prog_adj;
+
+	/* Since our patchlet doesn't expand the image, we're done. */
+	if (insn_delta == 0) {
+		memcpy(prog->insnsi + off, patch, sizeof(*patch));
+		return prog;
+	}
+
+	insn_adj_cnt = prog->len + insn_delta;
+
+	/* Several new instructions need to be inserted. Make room
+	 * for them. Likely, there's no need for a new allocation as
+	 * last page could have large enough tailroom.
+	 */
+	prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
+				    GFP_USER);
+	if (!prog_adj)
+		return NULL;
+
+	prog_adj->len = insn_adj_cnt;
+
+	/* Patching happens in 3 steps:
+	 *
+	 * 1) Move over tail of insnsi from next instruction onwards,
+	 *    so we can patch the single target insn with one or more
+	 *    new ones (patching is always from 1 to n insns, n > 0).
+	 * 2) Inject new instructions at the target location.
+	 * 3) Adjust branch offsets if necessary.
+	 */
+	insn_rest = insn_adj_cnt - off - len;
+
+	memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
+		sizeof(*patch) * insn_rest);
+	memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
+
+	bpf_adj_branches(prog_adj, off, insn_delta);
+
+	return prog_adj;
+}
 
 #ifdef CONFIG_BPF_JIT
 struct bpf_binary_header *
@@ -174,6 +243,209 @@
 {
 	module_memfree(hdr);
 }
+
+int bpf_jit_harden __read_mostly;
+
+static int bpf_jit_blind_insn(const struct bpf_insn *from,
+			      const struct bpf_insn *aux,
+			      struct bpf_insn *to_buff)
+{
+	struct bpf_insn *to = to_buff;
+	u32 imm_rnd = prandom_u32();
+	s16 off;
+
+	BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
+	BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
+
+	if (from->imm == 0 &&
+	    (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
+	     from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
+		*to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
+		goto out;
+	}
+
+	switch (from->code) {
+	case BPF_ALU | BPF_ADD | BPF_K:
+	case BPF_ALU | BPF_SUB | BPF_K:
+	case BPF_ALU | BPF_AND | BPF_K:
+	case BPF_ALU | BPF_OR  | BPF_K:
+	case BPF_ALU | BPF_XOR | BPF_K:
+	case BPF_ALU | BPF_MUL | BPF_K:
+	case BPF_ALU | BPF_MOV | BPF_K:
+	case BPF_ALU | BPF_DIV | BPF_K:
+	case BPF_ALU | BPF_MOD | BPF_K:
+		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
+		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
+		*to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
+		break;
+
+	case BPF_ALU64 | BPF_ADD | BPF_K:
+	case BPF_ALU64 | BPF_SUB | BPF_K:
+	case BPF_ALU64 | BPF_AND | BPF_K:
+	case BPF_ALU64 | BPF_OR  | BPF_K:
+	case BPF_ALU64 | BPF_XOR | BPF_K:
+	case BPF_ALU64 | BPF_MUL | BPF_K:
+	case BPF_ALU64 | BPF_MOV | BPF_K:
+	case BPF_ALU64 | BPF_DIV | BPF_K:
+	case BPF_ALU64 | BPF_MOD | BPF_K:
+		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
+		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
+		*to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
+		break;
+
+	case BPF_JMP | BPF_JEQ  | BPF_K:
+	case BPF_JMP | BPF_JNE  | BPF_K:
+	case BPF_JMP | BPF_JGT  | BPF_K:
+	case BPF_JMP | BPF_JGE  | BPF_K:
+	case BPF_JMP | BPF_JSGT | BPF_K:
+	case BPF_JMP | BPF_JSGE | BPF_K:
+	case BPF_JMP | BPF_JSET | BPF_K:
+		/* Accommodate for extra offset in case of a backjump. */
+		off = from->off;
+		if (off < 0)
+			off -= 2;
+		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
+		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
+		*to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
+		break;
+
+	case BPF_LD | BPF_ABS | BPF_W:
+	case BPF_LD | BPF_ABS | BPF_H:
+	case BPF_LD | BPF_ABS | BPF_B:
+		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
+		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
+		*to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
+		break;
+
+	case BPF_LD | BPF_IND | BPF_W:
+	case BPF_LD | BPF_IND | BPF_H:
+	case BPF_LD | BPF_IND | BPF_B:
+		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
+		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
+		*to++ = BPF_ALU32_REG(BPF_ADD, BPF_REG_AX, from->src_reg);
+		*to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
+		break;
+
+	case BPF_LD | BPF_IMM | BPF_DW:
+		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
+		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
+		*to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
+		*to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
+		break;
+	case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
+		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
+		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
+		*to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
+		break;
+
+	case BPF_ST | BPF_MEM | BPF_DW:
+	case BPF_ST | BPF_MEM | BPF_W:
+	case BPF_ST | BPF_MEM | BPF_H:
+	case BPF_ST | BPF_MEM | BPF_B:
+		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
+		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
+		*to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
+		break;
+	}
+out:
+	return to - to_buff;
+}
+
+static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
+					      gfp_t gfp_extra_flags)
+{
+	gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
+			  gfp_extra_flags;
+	struct bpf_prog *fp;
+
+	fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
+	if (fp != NULL) {
+		kmemcheck_annotate_bitfield(fp, meta);
+
+		/* aux->prog still points to the fp_other one, so
+		 * when promoting the clone to the real program,
+		 * this still needs to be adapted.
+		 */
+		memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
+	}
+
+	return fp;
+}
+
+static void bpf_prog_clone_free(struct bpf_prog *fp)
+{
+	/* aux was stolen by the other clone, so we cannot free
+	 * it from this path! It will be freed eventually by the
+	 * other program on release.
+	 *
+	 * At this point, we don't need a deferred release since
+	 * clone is guaranteed to not be locked.
+	 */
+	fp->aux = NULL;
+	__bpf_prog_free(fp);
+}
+
+void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
+{
+	/* We have to repoint aux->prog to self, as we don't
+	 * know whether fp here is the clone or the original.
+	 */
+	fp->aux->prog = fp;
+	bpf_prog_clone_free(fp_other);
+}
+
+struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
+{
+	struct bpf_insn insn_buff[16], aux[2];
+	struct bpf_prog *clone, *tmp;
+	int insn_delta, insn_cnt;
+	struct bpf_insn *insn;
+	int i, rewritten;
+
+	if (!bpf_jit_blinding_enabled())
+		return prog;
+
+	clone = bpf_prog_clone_create(prog, GFP_USER);
+	if (!clone)
+		return ERR_PTR(-ENOMEM);
+
+	insn_cnt = clone->len;
+	insn = clone->insnsi;
+
+	for (i = 0; i < insn_cnt; i++, insn++) {
+		/* We temporarily need to hold the original ld64 insn
+		 * so that we can still access the first part in the
+		 * second blinding run.
+		 */
+		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
+		    insn[1].code == 0)
+			memcpy(aux, insn, sizeof(aux));
+
+		rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
+		if (!rewritten)
+			continue;
+
+		tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
+		if (!tmp) {
+			/* Patching may have repointed aux->prog during
+			 * realloc from the original one, so we need to
+			 * fix it up here on error.
+			 */
+			bpf_jit_prog_release_other(prog, clone);
+			return ERR_PTR(-ENOMEM);
+		}
+
+		clone = tmp;
+		insn_delta = rewritten - 1;
+
+		/* Walk new program and skip insns we just inserted. */
+		insn = clone->insnsi + i + insn_delta;
+		insn_cnt += insn_delta;
+		i        += insn_delta;
+	}
+
+	return clone;
+}
 #endif /* CONFIG_BPF_JIT */
 
 /* Base function for offset calculation. Needs to go into .text section,
@@ -692,15 +964,22 @@
 /**
  *	bpf_prog_select_runtime - select exec runtime for BPF program
  *	@fp: bpf_prog populated with internal BPF program
+ *	@err: pointer to error variable
  *
  * Try to JIT eBPF program, if JIT is not available, use interpreter.
  * The BPF program will be executed via BPF_PROG_RUN() macro.
  */
-int bpf_prog_select_runtime(struct bpf_prog *fp)
+struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
 {
 	fp->bpf_func = (void *) __bpf_prog_run;
 
-	bpf_int_jit_compile(fp);
+	/* eBPF JITs can rewrite the program in case constant
+	 * blinding is active. However, in case of error during
+	 * blinding, bpf_int_jit_compile() must always return a
+	 * valid program, which in this case would simply not
+	 * be JITed, but falls back to the interpreter.
+	 */
+	fp = bpf_int_jit_compile(fp);
 	bpf_prog_lock_ro(fp);
 
 	/* The tail call compatibility check can only be done at
@@ -708,7 +987,9 @@
 	 * with JITed or non JITed program concatenations and not
 	 * all eBPF JITs might immediately support all features.
 	 */
-	return bpf_check_tail_call(fp);
+	*err = bpf_check_tail_call(fp);
+
+	return fp;
 }
 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
 
@@ -764,14 +1045,21 @@
 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
+
 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
+
 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
 {
 	return NULL;
 }
 
+const struct bpf_func_proto * __weak bpf_get_event_output_proto(void)
+{
+	return NULL;
+}
+
 /* Always built-in helper functions. */
 const struct bpf_func_proto bpf_tail_call_proto = {
 	.func		= NULL,
@@ -783,8 +1071,14 @@
 };
 
 /* For classic BPF JITs that don't implement bpf_int_jit_compile(). */
-void __weak bpf_int_jit_compile(struct bpf_prog *prog)
+struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
 {
+	return prog;
+}
+
+bool __weak bpf_helper_changes_skb_data(void *func)
+{
+	return false;
 }
 
 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 50da680..ad7a057 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -163,17 +163,26 @@
 	struct task_struct *task = current;
 	char *buf = (char *) (long) r1;
 
-	if (!task)
-		return -EINVAL;
+	if (unlikely(!task))
+		goto err_clear;
 
-	strlcpy(buf, task->comm, min_t(size_t, size, sizeof(task->comm)));
+	strncpy(buf, task->comm, size);
+
+	/* Verifier guarantees that size > 0. For task->comm exceeding
+	 * size, guarantee that buf is %NUL-terminated. Unconditionally
+	 * done here to save the size test.
+	 */
+	buf[size - 1] = 0;
 	return 0;
+err_clear:
+	memset(buf, 0, size);
+	return -EINVAL;
 }
 
 const struct bpf_func_proto bpf_get_current_comm_proto = {
 	.func		= bpf_get_current_comm,
 	.gpl_only	= false,
 	.ret_type	= RET_INTEGER,
-	.arg1_type	= ARG_PTR_TO_STACK,
+	.arg1_type	= ARG_PTR_TO_RAW_STACK,
 	.arg2_type	= ARG_CONST_STACK_SIZE,
 };
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index f2ece3c..71b75d9 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -31,10 +31,10 @@
 {
 	switch (type) {
 	case BPF_TYPE_PROG:
-		atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt);
+		raw = bpf_prog_inc(raw);
 		break;
 	case BPF_TYPE_MAP:
-		bpf_map_inc(raw, true);
+		raw = bpf_map_inc(raw, true);
 		break;
 	default:
 		WARN_ON_ONCE(1);
@@ -119,18 +119,10 @@
 	return 0;
 }
 
-static bool bpf_dname_reserved(const struct dentry *dentry)
-{
-	return strchr(dentry->d_name.name, '.');
-}
-
 static int bpf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	struct inode *inode;
 
-	if (bpf_dname_reserved(dentry))
-		return -EPERM;
-
 	inode = bpf_get_inode(dir->i_sb, dir, mode | S_IFDIR);
 	if (IS_ERR(inode))
 		return PTR_ERR(inode);
@@ -152,9 +144,6 @@
 {
 	struct inode *inode;
 
-	if (bpf_dname_reserved(dentry))
-		return -EPERM;
-
 	inode = bpf_get_inode(dir->i_sb, dir, mode | S_IFREG);
 	if (IS_ERR(inode))
 		return PTR_ERR(inode);
@@ -187,31 +176,21 @@
 	}
 }
 
-static int bpf_link(struct dentry *old_dentry, struct inode *dir,
-		    struct dentry *new_dentry)
+static struct dentry *
+bpf_lookup(struct inode *dir, struct dentry *dentry, unsigned flags)
 {
-	if (bpf_dname_reserved(new_dentry))
-		return -EPERM;
-
-	return simple_link(old_dentry, dir, new_dentry);
-}
-
-static int bpf_rename(struct inode *old_dir, struct dentry *old_dentry,
-		      struct inode *new_dir, struct dentry *new_dentry)
-{
-	if (bpf_dname_reserved(new_dentry))
-		return -EPERM;
-
-	return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
+	if (strchr(dentry->d_name.name, '.'))
+		return ERR_PTR(-EPERM);
+	return simple_lookup(dir, dentry, flags);
 }
 
 static const struct inode_operations bpf_dir_iops = {
-	.lookup		= simple_lookup,
+	.lookup		= bpf_lookup,
 	.mknod		= bpf_mkobj,
 	.mkdir		= bpf_mkdir,
 	.rmdir		= simple_rmdir,
-	.rename		= bpf_rename,
-	.link		= bpf_link,
+	.rename		= simple_rename,
+	.link		= simple_link,
 	.unlink		= simple_unlink,
 };
 
@@ -297,7 +276,8 @@
 		goto out;
 
 	raw = bpf_any_get(inode->i_private, *type);
-	touch_atime(&path);
+	if (!IS_ERR(raw))
+		touch_atime(&path);
 
 	path_put(&path);
 	return raw;
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 499d9e9..c8ee352 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -66,7 +66,7 @@
 	/* check sanity of attributes */
 	if (attr->max_entries == 0 || attr->key_size != 4 ||
 	    value_size < 8 || value_size % 8 ||
-	    value_size / 8 > PERF_MAX_STACK_DEPTH)
+	    value_size / 8 > sysctl_perf_event_max_stack)
 		return ERR_PTR(-EINVAL);
 
 	/* hash table size must be power of 2 */
@@ -116,7 +116,7 @@
 	return ERR_PTR(err);
 }
 
-static u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5)
+u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5)
 {
 	struct pt_regs *regs = (struct pt_regs *) (long) r1;
 	struct bpf_map *map = (struct bpf_map *) (long) r2;
@@ -124,8 +124,8 @@
 	struct perf_callchain_entry *trace;
 	struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
 	u32 max_depth = map->value_size / 8;
-	/* stack_map_alloc() checks that max_depth <= PERF_MAX_STACK_DEPTH */
-	u32 init_nr = PERF_MAX_STACK_DEPTH - max_depth;
+	/* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */
+	u32 init_nr = sysctl_perf_event_max_stack - max_depth;
 	u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
 	u32 hash, id, trace_nr, trace_len;
 	bool user = flags & BPF_F_USER_STACK;
@@ -143,7 +143,7 @@
 		return -EFAULT;
 
 	/* get_perf_callchain() guarantees that trace->nr >= init_nr
-	 * and trace-nr <= PERF_MAX_STACK_DEPTH, so trace_nr <= max_depth
+	 * and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth
 	 */
 	trace_nr = trace->nr - init_nr;
 
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 2a2efe1..46ecce4 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -137,11 +137,13 @@
 		   "map_type:\t%u\n"
 		   "key_size:\t%u\n"
 		   "value_size:\t%u\n"
-		   "max_entries:\t%u\n",
+		   "max_entries:\t%u\n"
+		   "map_flags:\t%#x\n",
 		   map->map_type,
 		   map->key_size,
 		   map->value_size,
-		   map->max_entries);
+		   map->max_entries,
+		   map->map_flags);
 }
 #endif
 
@@ -216,11 +218,18 @@
 	return f.file->private_data;
 }
 
-void bpf_map_inc(struct bpf_map *map, bool uref)
+/* prog's and map's refcnt limit */
+#define BPF_MAX_REFCNT 32768
+
+struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
 {
-	atomic_inc(&map->refcnt);
+	if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
+		atomic_dec(&map->refcnt);
+		return ERR_PTR(-EBUSY);
+	}
 	if (uref)
 		atomic_inc(&map->usercnt);
+	return map;
 }
 
 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
@@ -232,7 +241,7 @@
 	if (IS_ERR(map))
 		return map;
 
-	bpf_map_inc(map, true);
+	map = bpf_map_inc(map, true);
 	fdput(f);
 
 	return map;
@@ -656,6 +665,15 @@
 	return f.file->private_data;
 }
 
+struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
+{
+	if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
+		atomic_dec(&prog->aux->refcnt);
+		return ERR_PTR(-EBUSY);
+	}
+	return prog;
+}
+
 /* called by sockets/tracing/seccomp before attaching program to an event
  * pairs with bpf_prog_put()
  */
@@ -668,7 +686,7 @@
 	if (IS_ERR(prog))
 		return prog;
 
-	atomic_inc(&prog->aux->refcnt);
+	prog = bpf_prog_inc(prog);
 	fdput(f);
 
 	return prog;
@@ -744,7 +762,7 @@
 	fixup_bpf_calls(prog);
 
 	/* eBPF program is ready to be JITed */
-	err = bpf_prog_select_runtime(prog);
+	prog = bpf_prog_select_runtime(prog, &err);
 	if (err < 0)
 		goto free_used_maps;
 
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 2e08f8e..a08d662 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1,4 +1,5 @@
 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
+ * Copyright (c) 2016 Facebook
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of version 2 of the GNU General Public
@@ -136,13 +137,32 @@
 	FRAME_PTR,		 /* reg == frame_pointer */
 	PTR_TO_STACK,		 /* reg == frame_pointer + imm */
 	CONST_IMM,		 /* constant integer value */
+
+	/* PTR_TO_PACKET represents:
+	 * skb->data
+	 * skb->data + imm
+	 * skb->data + (u16) var
+	 * skb->data + (u16) var + imm
+	 * if (range > 0) then [ptr, ptr + range - off) is safe to access
+	 * if (id > 0) means that some 'var' was added
+	 * if (off > 0) menas that 'imm' was added
+	 */
+	PTR_TO_PACKET,
+	PTR_TO_PACKET_END,	 /* skb->data + headlen */
 };
 
 struct reg_state {
 	enum bpf_reg_type type;
 	union {
-		/* valid when type == CONST_IMM | PTR_TO_STACK */
-		int imm;
+		/* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
+		s64 imm;
+
+		/* valid when type == PTR_TO_PACKET* */
+		struct {
+			u32 id;
+			u16 off;
+			u16 range;
+		};
 
 		/* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
 		 *   PTR_TO_MAP_VALUE_OR_NULL
@@ -202,6 +222,16 @@
 	bool allow_ptr_leaks;
 };
 
+#define BPF_COMPLEXITY_LIMIT_INSNS	65536
+#define BPF_COMPLEXITY_LIMIT_STACK	1024
+
+struct bpf_call_arg_meta {
+	struct bpf_map *map_ptr;
+	bool raw_mode;
+	int regno;
+	int access_size;
+};
+
 /* verbose verifier prints what it's seeing
  * bpf_check() is called under lock, so no race to access these global vars
  */
@@ -237,40 +267,39 @@
 	[FRAME_PTR]		= "fp",
 	[PTR_TO_STACK]		= "fp",
 	[CONST_IMM]		= "imm",
+	[PTR_TO_PACKET]		= "pkt",
+	[PTR_TO_PACKET_END]	= "pkt_end",
 };
 
-static const struct {
-	int map_type;
-	int func_id;
-} func_limit[] = {
-	{BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
-	{BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
-	{BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output},
-	{BPF_MAP_TYPE_STACK_TRACE, BPF_FUNC_get_stackid},
-};
-
-static void print_verifier_state(struct verifier_env *env)
+static void print_verifier_state(struct verifier_state *state)
 {
+	struct reg_state *reg;
 	enum bpf_reg_type t;
 	int i;
 
 	for (i = 0; i < MAX_BPF_REG; i++) {
-		t = env->cur_state.regs[i].type;
+		reg = &state->regs[i];
+		t = reg->type;
 		if (t == NOT_INIT)
 			continue;
 		verbose(" R%d=%s", i, reg_type_str[t]);
 		if (t == CONST_IMM || t == PTR_TO_STACK)
-			verbose("%d", env->cur_state.regs[i].imm);
+			verbose("%lld", reg->imm);
+		else if (t == PTR_TO_PACKET)
+			verbose("(id=%d,off=%d,r=%d)",
+				reg->id, reg->off, reg->range);
+		else if (t == UNKNOWN_VALUE && reg->imm)
+			verbose("%lld", reg->imm);
 		else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE ||
 			 t == PTR_TO_MAP_VALUE_OR_NULL)
 			verbose("(ks=%d,vs=%d)",
-				env->cur_state.regs[i].map_ptr->key_size,
-				env->cur_state.regs[i].map_ptr->value_size);
+				reg->map_ptr->key_size,
+				reg->map_ptr->value_size);
 	}
 	for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
-		if (env->cur_state.stack_slot_type[i] == STACK_SPILL)
+		if (state->stack_slot_type[i] == STACK_SPILL)
 			verbose(" fp%d=%s", -MAX_BPF_STACK + i,
-				reg_type_str[env->cur_state.spilled_regs[i / BPF_REG_SIZE].type]);
+				reg_type_str[state->spilled_regs[i / BPF_REG_SIZE].type]);
 	}
 	verbose("\n");
 }
@@ -454,7 +483,7 @@
 	elem->next = env->head;
 	env->head = elem;
 	env->stack_size++;
-	if (env->stack_size > 1024) {
+	if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
 		verbose("BPF program is too complex\n");
 		goto err;
 	}
@@ -477,7 +506,6 @@
 	for (i = 0; i < MAX_BPF_REG; i++) {
 		regs[i].type = NOT_INIT;
 		regs[i].imm = 0;
-		regs[i].map_ptr = NULL;
 	}
 
 	/* frame pointer */
@@ -492,7 +520,6 @@
 	BUG_ON(regno >= MAX_BPF_REG);
 	regs[regno].type = UNKNOWN_VALUE;
 	regs[regno].imm = 0;
-	regs[regno].map_ptr = NULL;
 }
 
 enum reg_arg_type {
@@ -548,6 +575,8 @@
 	case PTR_TO_MAP_VALUE_OR_NULL:
 	case PTR_TO_STACK:
 	case PTR_TO_CTX:
+	case PTR_TO_PACKET:
+	case PTR_TO_PACKET_END:
 	case FRAME_PTR:
 	case CONST_PTR_TO_MAP:
 		return true;
@@ -647,13 +676,38 @@
 	return 0;
 }
 
+#define MAX_PACKET_OFF 0xffff
+
+static int check_packet_access(struct verifier_env *env, u32 regno, int off,
+			       int size)
+{
+	struct reg_state *regs = env->cur_state.regs;
+	struct reg_state *reg = &regs[regno];
+	int linear_size = (int) reg->range - (int) reg->off;
+
+	if (linear_size < 0 || linear_size >= MAX_PACKET_OFF) {
+		verbose("verifier bug\n");
+		return -EFAULT;
+	}
+	if (off < 0 || off + size > linear_size) {
+		verbose("invalid access to packet, off=%d size=%d, allowed=%d\n",
+			off, size, linear_size);
+		return -EACCES;
+	}
+	return 0;
+}
+
 /* check access to 'struct bpf_context' fields */
 static int check_ctx_access(struct verifier_env *env, int off, int size,
 			    enum bpf_access_type t)
 {
 	if (env->prog->aux->ops->is_valid_access &&
-	    env->prog->aux->ops->is_valid_access(off, size, t))
+	    env->prog->aux->ops->is_valid_access(off, size, t)) {
+		/* remember the offset of last byte accessed in ctx */
+		if (env->prog->aux->max_ctx_offset < off + size)
+			env->prog->aux->max_ctx_offset = off + size;
 		return 0;
+	}
 
 	verbose("invalid bpf_context access off=%d size=%d\n", off, size);
 	return -EACCES;
@@ -673,6 +727,45 @@
 	}
 }
 
+static int check_ptr_alignment(struct verifier_env *env, struct reg_state *reg,
+			       int off, int size)
+{
+	if (reg->type != PTR_TO_PACKET) {
+		if (off % size != 0) {
+			verbose("misaligned access off %d size %d\n", off, size);
+			return -EACCES;
+		} else {
+			return 0;
+		}
+	}
+
+	switch (env->prog->type) {
+	case BPF_PROG_TYPE_SCHED_CLS:
+	case BPF_PROG_TYPE_SCHED_ACT:
+		break;
+	default:
+		verbose("verifier is misconfigured\n");
+		return -EACCES;
+	}
+
+	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
+		/* misaligned access to packet is ok on x86,arm,arm64 */
+		return 0;
+
+	if (reg->id && size != 1) {
+		verbose("Unknown packet alignment. Only byte-sized access allowed\n");
+		return -EACCES;
+	}
+
+	/* skb->data is NET_IP_ALIGN-ed */
+	if ((NET_IP_ALIGN + reg->off + off) % size != 0) {
+		verbose("misaligned packet access off %d+%d+%d size %d\n",
+			NET_IP_ALIGN, reg->off, off, size);
+		return -EACCES;
+	}
+	return 0;
+}
+
 /* check whether memory at (regno + off) is accessible for t = (read | write)
  * if t==write, value_regno is a register which value is stored into memory
  * if t==read, value_regno is a register which will receive the value from memory
@@ -684,21 +777,21 @@
 			    int value_regno)
 {
 	struct verifier_state *state = &env->cur_state;
+	struct reg_state *reg = &state->regs[regno];
 	int size, err = 0;
 
-	if (state->regs[regno].type == PTR_TO_STACK)
-		off += state->regs[regno].imm;
+	if (reg->type == PTR_TO_STACK)
+		off += reg->imm;
 
 	size = bpf_size_to_bytes(bpf_size);
 	if (size < 0)
 		return size;
 
-	if (off % size != 0) {
-		verbose("misaligned access off %d size %d\n", off, size);
-		return -EACCES;
-	}
+	err = check_ptr_alignment(env, reg, off, size);
+	if (err)
+		return err;
 
-	if (state->regs[regno].type == PTR_TO_MAP_VALUE) {
+	if (reg->type == PTR_TO_MAP_VALUE) {
 		if (t == BPF_WRITE && value_regno >= 0 &&
 		    is_pointer_value(env, value_regno)) {
 			verbose("R%d leaks addr into map\n", value_regno);
@@ -708,18 +801,25 @@
 		if (!err && t == BPF_READ && value_regno >= 0)
 			mark_reg_unknown_value(state->regs, value_regno);
 
-	} else if (state->regs[regno].type == PTR_TO_CTX) {
+	} else if (reg->type == PTR_TO_CTX) {
 		if (t == BPF_WRITE && value_regno >= 0 &&
 		    is_pointer_value(env, value_regno)) {
 			verbose("R%d leaks addr into ctx\n", value_regno);
 			return -EACCES;
 		}
 		err = check_ctx_access(env, off, size, t);
-		if (!err && t == BPF_READ && value_regno >= 0)
+		if (!err && t == BPF_READ && value_regno >= 0) {
 			mark_reg_unknown_value(state->regs, value_regno);
+			if (off == offsetof(struct __sk_buff, data) &&
+			    env->allow_ptr_leaks)
+				/* note that reg.[id|off|range] == 0 */
+				state->regs[value_regno].type = PTR_TO_PACKET;
+			else if (off == offsetof(struct __sk_buff, data_end) &&
+				 env->allow_ptr_leaks)
+				state->regs[value_regno].type = PTR_TO_PACKET_END;
+		}
 
-	} else if (state->regs[regno].type == FRAME_PTR ||
-		   state->regs[regno].type == PTR_TO_STACK) {
+	} else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) {
 		if (off >= 0 || off < -MAX_BPF_STACK) {
 			verbose("invalid stack off=%d size=%d\n", off, size);
 			return -EACCES;
@@ -735,11 +835,28 @@
 		} else {
 			err = check_stack_read(state, off, size, value_regno);
 		}
+	} else if (state->regs[regno].type == PTR_TO_PACKET) {
+		if (t == BPF_WRITE) {
+			verbose("cannot write into packet\n");
+			return -EACCES;
+		}
+		err = check_packet_access(env, regno, off, size);
+		if (!err && t == BPF_READ && value_regno >= 0)
+			mark_reg_unknown_value(state->regs, value_regno);
 	} else {
 		verbose("R%d invalid mem access '%s'\n",
-			regno, reg_type_str[state->regs[regno].type]);
+			regno, reg_type_str[reg->type]);
 		return -EACCES;
 	}
+
+	if (!err && size <= 2 && value_regno >= 0 && env->allow_ptr_leaks &&
+	    state->regs[value_regno].type == UNKNOWN_VALUE) {
+		/* 1 or 2 byte load zero-extends, determine the number of
+		 * zero upper bits. Not doing it fo 4 byte load, since
+		 * such values cannot be added to ptr_to_packet anyway.
+		 */
+		state->regs[value_regno].imm = 64 - size * 8;
+	}
 	return err;
 }
 
@@ -780,7 +897,8 @@
  * and all elements of stack are initialized
  */
 static int check_stack_boundary(struct verifier_env *env, int regno,
-				int access_size, bool zero_size_allowed)
+				int access_size, bool zero_size_allowed,
+				struct bpf_call_arg_meta *meta)
 {
 	struct verifier_state *state = &env->cur_state;
 	struct reg_state *regs = state->regs;
@@ -806,6 +924,12 @@
 		return -EACCES;
 	}
 
+	if (meta && meta->raw_mode) {
+		meta->access_size = access_size;
+		meta->regno = regno;
+		return 0;
+	}
+
 	for (i = 0; i < access_size; i++) {
 		if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) {
 			verbose("invalid indirect read from stack off %d+%d size %d\n",
@@ -817,7 +941,8 @@
 }
 
 static int check_func_arg(struct verifier_env *env, u32 regno,
-			  enum bpf_arg_type arg_type, struct bpf_map **mapp)
+			  enum bpf_arg_type arg_type,
+			  struct bpf_call_arg_meta *meta)
 {
 	struct reg_state *reg = env->cur_state.regs + regno;
 	enum bpf_reg_type expected_type;
@@ -849,7 +974,8 @@
 		expected_type = CONST_PTR_TO_MAP;
 	} else if (arg_type == ARG_PTR_TO_CTX) {
 		expected_type = PTR_TO_CTX;
-	} else if (arg_type == ARG_PTR_TO_STACK) {
+	} else if (arg_type == ARG_PTR_TO_STACK ||
+		   arg_type == ARG_PTR_TO_RAW_STACK) {
 		expected_type = PTR_TO_STACK;
 		/* One exception here. In case function allows for NULL to be
 		 * passed in as argument, it's a CONST_IMM type. Final test
@@ -857,6 +983,7 @@
 		 */
 		if (reg->type == CONST_IMM && reg->imm == 0)
 			expected_type = CONST_IMM;
+		meta->raw_mode = arg_type == ARG_PTR_TO_RAW_STACK;
 	} else {
 		verbose("unsupported arg_type %d\n", arg_type);
 		return -EFAULT;
@@ -870,14 +997,13 @@
 
 	if (arg_type == ARG_CONST_MAP_PTR) {
 		/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
-		*mapp = reg->map_ptr;
-
+		meta->map_ptr = reg->map_ptr;
 	} else if (arg_type == ARG_PTR_TO_MAP_KEY) {
 		/* bpf_map_xxx(..., map_ptr, ..., key) call:
 		 * check that [key, key + map->key_size) are within
 		 * stack limits and initialized
 		 */
-		if (!*mapp) {
+		if (!meta->map_ptr) {
 			/* in function declaration map_ptr must come before
 			 * map_key, so that it's verified and known before
 			 * we have to check map_key here. Otherwise it means
@@ -886,19 +1012,20 @@
 			verbose("invalid map_ptr to access map->key\n");
 			return -EACCES;
 		}
-		err = check_stack_boundary(env, regno, (*mapp)->key_size,
-					   false);
+		err = check_stack_boundary(env, regno, meta->map_ptr->key_size,
+					   false, NULL);
 	} else if (arg_type == ARG_PTR_TO_MAP_VALUE) {
 		/* bpf_map_xxx(..., map_ptr, ..., value) call:
 		 * check [value, value + map->value_size) validity
 		 */
-		if (!*mapp) {
+		if (!meta->map_ptr) {
 			/* kernel subsystem misconfigured verifier */
 			verbose("invalid map_ptr to access map->value\n");
 			return -EACCES;
 		}
-		err = check_stack_boundary(env, regno, (*mapp)->value_size,
-					   false);
+		err = check_stack_boundary(env, regno,
+					   meta->map_ptr->value_size,
+					   false, NULL);
 	} else if (arg_type == ARG_CONST_STACK_SIZE ||
 		   arg_type == ARG_CONST_STACK_SIZE_OR_ZERO) {
 		bool zero_size_allowed = (arg_type == ARG_CONST_STACK_SIZE_OR_ZERO);
@@ -913,7 +1040,7 @@
 			return -EACCES;
 		}
 		err = check_stack_boundary(env, regno - 1, reg->imm,
-					   zero_size_allowed);
+					   zero_size_allowed, meta);
 	}
 
 	return err;
@@ -921,27 +1048,93 @@
 
 static int check_map_func_compatibility(struct bpf_map *map, int func_id)
 {
-	bool bool_map, bool_func;
-	int i;
-
 	if (!map)
 		return 0;
 
-	for (i = 0; i < ARRAY_SIZE(func_limit); i++) {
-		bool_map = (map->map_type == func_limit[i].map_type);
-		bool_func = (func_id == func_limit[i].func_id);
-		/* only when map & func pair match it can continue.
-		 * don't allow any other map type to be passed into
-		 * the special func;
-		 */
-		if (bool_func && bool_map != bool_func) {
-			verbose("cannot pass map_type %d into func %d\n",
-				map->map_type, func_id);
-			return -EINVAL;
-		}
+	/* We need a two way check, first is from map perspective ... */
+	switch (map->map_type) {
+	case BPF_MAP_TYPE_PROG_ARRAY:
+		if (func_id != BPF_FUNC_tail_call)
+			goto error;
+		break;
+	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
+		if (func_id != BPF_FUNC_perf_event_read &&
+		    func_id != BPF_FUNC_perf_event_output)
+			goto error;
+		break;
+	case BPF_MAP_TYPE_STACK_TRACE:
+		if (func_id != BPF_FUNC_get_stackid)
+			goto error;
+		break;
+	default:
+		break;
+	}
+
+	/* ... and second from the function itself. */
+	switch (func_id) {
+	case BPF_FUNC_tail_call:
+		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
+			goto error;
+		break;
+	case BPF_FUNC_perf_event_read:
+	case BPF_FUNC_perf_event_output:
+		if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
+			goto error;
+		break;
+	case BPF_FUNC_get_stackid:
+		if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
+			goto error;
+		break;
+	default:
+		break;
 	}
 
 	return 0;
+error:
+	verbose("cannot pass map_type %d into func %d\n",
+		map->map_type, func_id);
+	return -EINVAL;
+}
+
+static int check_raw_mode(const struct bpf_func_proto *fn)
+{
+	int count = 0;
+
+	if (fn->arg1_type == ARG_PTR_TO_RAW_STACK)
+		count++;
+	if (fn->arg2_type == ARG_PTR_TO_RAW_STACK)
+		count++;
+	if (fn->arg3_type == ARG_PTR_TO_RAW_STACK)
+		count++;
+	if (fn->arg4_type == ARG_PTR_TO_RAW_STACK)
+		count++;
+	if (fn->arg5_type == ARG_PTR_TO_RAW_STACK)
+		count++;
+
+	return count > 1 ? -EINVAL : 0;
+}
+
+static void clear_all_pkt_pointers(struct verifier_env *env)
+{
+	struct verifier_state *state = &env->cur_state;
+	struct reg_state *regs = state->regs, *reg;
+	int i;
+
+	for (i = 0; i < MAX_BPF_REG; i++)
+		if (regs[i].type == PTR_TO_PACKET ||
+		    regs[i].type == PTR_TO_PACKET_END)
+			mark_reg_unknown_value(regs, i);
+
+	for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
+		if (state->stack_slot_type[i] != STACK_SPILL)
+			continue;
+		reg = &state->spilled_regs[i / BPF_REG_SIZE];
+		if (reg->type != PTR_TO_PACKET &&
+		    reg->type != PTR_TO_PACKET_END)
+			continue;
+		reg->type = UNKNOWN_VALUE;
+		reg->imm = 0;
+	}
 }
 
 static int check_call(struct verifier_env *env, int func_id)
@@ -949,8 +1142,9 @@
 	struct verifier_state *state = &env->cur_state;
 	const struct bpf_func_proto *fn = NULL;
 	struct reg_state *regs = state->regs;
-	struct bpf_map *map = NULL;
 	struct reg_state *reg;
+	struct bpf_call_arg_meta meta;
+	bool changes_data;
 	int i, err;
 
 	/* find function prototype */
@@ -973,23 +1167,45 @@
 		return -EINVAL;
 	}
 
+	changes_data = bpf_helper_changes_skb_data(fn->func);
+
+	memset(&meta, 0, sizeof(meta));
+
+	/* We only support one arg being in raw mode at the moment, which
+	 * is sufficient for the helper functions we have right now.
+	 */
+	err = check_raw_mode(fn);
+	if (err) {
+		verbose("kernel subsystem misconfigured func %d\n", func_id);
+		return err;
+	}
+
 	/* check args */
-	err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &map);
+	err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta);
 	if (err)
 		return err;
-	err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &map);
+	err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
 	if (err)
 		return err;
-	err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &map);
+	err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
 	if (err)
 		return err;
-	err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &map);
+	err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta);
 	if (err)
 		return err;
-	err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &map);
+	err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta);
 	if (err)
 		return err;
 
+	/* Mark slots with STACK_MISC in case of raw mode, stack offset
+	 * is inferred from register state.
+	 */
+	for (i = 0; i < meta.access_size; i++) {
+		err = check_mem_access(env, meta.regno, i, BPF_B, BPF_WRITE, -1);
+		if (err)
+			return err;
+	}
+
 	/* reset caller saved regs */
 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
 		reg = regs + caller_saved[i];
@@ -1008,28 +1224,211 @@
 		 * can check 'value_size' boundary of memory access
 		 * to map element returned from bpf_map_lookup_elem()
 		 */
-		if (map == NULL) {
+		if (meta.map_ptr == NULL) {
 			verbose("kernel subsystem misconfigured verifier\n");
 			return -EINVAL;
 		}
-		regs[BPF_REG_0].map_ptr = map;
+		regs[BPF_REG_0].map_ptr = meta.map_ptr;
 	} else {
 		verbose("unknown return type %d of func %d\n",
 			fn->ret_type, func_id);
 		return -EINVAL;
 	}
 
-	err = check_map_func_compatibility(map, func_id);
+	err = check_map_func_compatibility(meta.map_ptr, func_id);
 	if (err)
 		return err;
 
+	if (changes_data)
+		clear_all_pkt_pointers(env);
+	return 0;
+}
+
+static int check_packet_ptr_add(struct verifier_env *env, struct bpf_insn *insn)
+{
+	struct reg_state *regs = env->cur_state.regs;
+	struct reg_state *dst_reg = &regs[insn->dst_reg];
+	struct reg_state *src_reg = &regs[insn->src_reg];
+	s32 imm;
+
+	if (BPF_SRC(insn->code) == BPF_K) {
+		/* pkt_ptr += imm */
+		imm = insn->imm;
+
+add_imm:
+		if (imm <= 0) {
+			verbose("addition of negative constant to packet pointer is not allowed\n");
+			return -EACCES;
+		}
+		if (imm >= MAX_PACKET_OFF ||
+		    imm + dst_reg->off >= MAX_PACKET_OFF) {
+			verbose("constant %d is too large to add to packet pointer\n",
+				imm);
+			return -EACCES;
+		}
+		/* a constant was added to pkt_ptr.
+		 * Remember it while keeping the same 'id'
+		 */
+		dst_reg->off += imm;
+	} else {
+		if (src_reg->type == CONST_IMM) {
+			/* pkt_ptr += reg where reg is known constant */
+			imm = src_reg->imm;
+			goto add_imm;
+		}
+		/* disallow pkt_ptr += reg
+		 * if reg is not uknown_value with guaranteed zero upper bits
+		 * otherwise pkt_ptr may overflow and addition will become
+		 * subtraction which is not allowed
+		 */
+		if (src_reg->type != UNKNOWN_VALUE) {
+			verbose("cannot add '%s' to ptr_to_packet\n",
+				reg_type_str[src_reg->type]);
+			return -EACCES;
+		}
+		if (src_reg->imm < 48) {
+			verbose("cannot add integer value with %lld upper zero bits to ptr_to_packet\n",
+				src_reg->imm);
+			return -EACCES;
+		}
+		/* dst_reg stays as pkt_ptr type and since some positive
+		 * integer value was added to the pointer, increment its 'id'
+		 */
+		dst_reg->id++;
+
+		/* something was added to pkt_ptr, set range and off to zero */
+		dst_reg->off = 0;
+		dst_reg->range = 0;
+	}
+	return 0;
+}
+
+static int evaluate_reg_alu(struct verifier_env *env, struct bpf_insn *insn)
+{
+	struct reg_state *regs = env->cur_state.regs;
+	struct reg_state *dst_reg = &regs[insn->dst_reg];
+	u8 opcode = BPF_OP(insn->code);
+	s64 imm_log2;
+
+	/* for type == UNKNOWN_VALUE:
+	 * imm > 0 -> number of zero upper bits
+	 * imm == 0 -> don't track which is the same as all bits can be non-zero
+	 */
+
+	if (BPF_SRC(insn->code) == BPF_X) {
+		struct reg_state *src_reg = &regs[insn->src_reg];
+
+		if (src_reg->type == UNKNOWN_VALUE && src_reg->imm > 0 &&
+		    dst_reg->imm && opcode == BPF_ADD) {
+			/* dreg += sreg
+			 * where both have zero upper bits. Adding them
+			 * can only result making one more bit non-zero
+			 * in the larger value.
+			 * Ex. 0xffff (imm=48) + 1 (imm=63) = 0x10000 (imm=47)
+			 *     0xffff (imm=48) + 0xffff = 0x1fffe (imm=47)
+			 */
+			dst_reg->imm = min(dst_reg->imm, src_reg->imm);
+			dst_reg->imm--;
+			return 0;
+		}
+		if (src_reg->type == CONST_IMM && src_reg->imm > 0 &&
+		    dst_reg->imm && opcode == BPF_ADD) {
+			/* dreg += sreg
+			 * where dreg has zero upper bits and sreg is const.
+			 * Adding them can only result making one more bit
+			 * non-zero in the larger value.
+			 */
+			imm_log2 = __ilog2_u64((long long)src_reg->imm);
+			dst_reg->imm = min(dst_reg->imm, 63 - imm_log2);
+			dst_reg->imm--;
+			return 0;
+		}
+		/* all other cases non supported yet, just mark dst_reg */
+		dst_reg->imm = 0;
+		return 0;
+	}
+
+	/* sign extend 32-bit imm into 64-bit to make sure that
+	 * negative values occupy bit 63. Note ilog2() would have
+	 * been incorrect, since sizeof(insn->imm) == 4
+	 */
+	imm_log2 = __ilog2_u64((long long)insn->imm);
+
+	if (dst_reg->imm && opcode == BPF_LSH) {
+		/* reg <<= imm
+		 * if reg was a result of 2 byte load, then its imm == 48
+		 * which means that upper 48 bits are zero and shifting this reg
+		 * left by 4 would mean that upper 44 bits are still zero
+		 */
+		dst_reg->imm -= insn->imm;
+	} else if (dst_reg->imm && opcode == BPF_MUL) {
+		/* reg *= imm
+		 * if multiplying by 14 subtract 4
+		 * This is conservative calculation of upper zero bits.
+		 * It's not trying to special case insn->imm == 1 or 0 cases
+		 */
+		dst_reg->imm -= imm_log2 + 1;
+	} else if (opcode == BPF_AND) {
+		/* reg &= imm */
+		dst_reg->imm = 63 - imm_log2;
+	} else if (dst_reg->imm && opcode == BPF_ADD) {
+		/* reg += imm */
+		dst_reg->imm = min(dst_reg->imm, 63 - imm_log2);
+		dst_reg->imm--;
+	} else if (opcode == BPF_RSH) {
+		/* reg >>= imm
+		 * which means that after right shift, upper bits will be zero
+		 * note that verifier already checked that
+		 * 0 <= imm < 64 for shift insn
+		 */
+		dst_reg->imm += insn->imm;
+		if (unlikely(dst_reg->imm > 64))
+			/* some dumb code did:
+			 * r2 = *(u32 *)mem;
+			 * r2 >>= 32;
+			 * and all bits are zero now */
+			dst_reg->imm = 64;
+	} else {
+		/* all other alu ops, means that we don't know what will
+		 * happen to the value, mark it with unknown number of zero bits
+		 */
+		dst_reg->imm = 0;
+	}
+
+	if (dst_reg->imm < 0) {
+		/* all 64 bits of the register can contain non-zero bits
+		 * and such value cannot be added to ptr_to_packet, since it
+		 * may overflow, mark it as unknown to avoid further eval
+		 */
+		dst_reg->imm = 0;
+	}
+	return 0;
+}
+
+static int evaluate_reg_imm_alu(struct verifier_env *env, struct bpf_insn *insn)
+{
+	struct reg_state *regs = env->cur_state.regs;
+	struct reg_state *dst_reg = &regs[insn->dst_reg];
+	struct reg_state *src_reg = &regs[insn->src_reg];
+	u8 opcode = BPF_OP(insn->code);
+
+	/* dst_reg->type == CONST_IMM here, simulate execution of 'add' insn.
+	 * Don't care about overflow or negative values, just add them
+	 */
+	if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_K)
+		dst_reg->imm += insn->imm;
+	else if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_X &&
+		 src_reg->type == CONST_IMM)
+		dst_reg->imm += src_reg->imm;
+	else
+		mark_reg_unknown_value(regs, insn->dst_reg);
 	return 0;
 }
 
 /* check validity of 32-bit and 64-bit arithmetic operations */
 static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
 {
-	struct reg_state *regs = env->cur_state.regs;
+	struct reg_state *regs = env->cur_state.regs, *dst_reg;
 	u8 opcode = BPF_OP(insn->code);
 	int err;
 
@@ -1118,8 +1517,6 @@
 
 	} else {	/* all other ALU ops: and, sub, xor, add, ... */
 
-		bool stack_relative = false;
-
 		if (BPF_SRC(insn->code) == BPF_X) {
 			if (insn->imm != 0 || insn->off != 0) {
 				verbose("BPF_ALU uses reserved fields\n");
@@ -1157,11 +1554,34 @@
 			}
 		}
 
+		/* check dest operand */
+		err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK);
+		if (err)
+			return err;
+
+		dst_reg = &regs[insn->dst_reg];
+
 		/* pattern match 'bpf_add Rx, imm' instruction */
 		if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 &&
-		    regs[insn->dst_reg].type == FRAME_PTR &&
-		    BPF_SRC(insn->code) == BPF_K) {
-			stack_relative = true;
+		    dst_reg->type == FRAME_PTR && BPF_SRC(insn->code) == BPF_K) {
+			dst_reg->type = PTR_TO_STACK;
+			dst_reg->imm = insn->imm;
+			return 0;
+		} else if (opcode == BPF_ADD &&
+			   BPF_CLASS(insn->code) == BPF_ALU64 &&
+			   dst_reg->type == PTR_TO_PACKET) {
+			/* ptr_to_packet += K|X */
+			return check_packet_ptr_add(env, insn);
+		} else if (BPF_CLASS(insn->code) == BPF_ALU64 &&
+			   dst_reg->type == UNKNOWN_VALUE &&
+			   env->allow_ptr_leaks) {
+			/* unknown += K|X */
+			return evaluate_reg_alu(env, insn);
+		} else if (BPF_CLASS(insn->code) == BPF_ALU64 &&
+			   dst_reg->type == CONST_IMM &&
+			   env->allow_ptr_leaks) {
+			/* reg_imm += K|X */
+			return evaluate_reg_imm_alu(env, insn);
 		} else if (is_pointer_value(env, insn->dst_reg)) {
 			verbose("R%d pointer arithmetic prohibited\n",
 				insn->dst_reg);
@@ -1173,24 +1593,45 @@
 			return -EACCES;
 		}
 
-		/* check dest operand */
-		err = check_reg_arg(regs, insn->dst_reg, DST_OP);
-		if (err)
-			return err;
-
-		if (stack_relative) {
-			regs[insn->dst_reg].type = PTR_TO_STACK;
-			regs[insn->dst_reg].imm = insn->imm;
-		}
+		/* mark dest operand */
+		mark_reg_unknown_value(regs, insn->dst_reg);
 	}
 
 	return 0;
 }
 
+static void find_good_pkt_pointers(struct verifier_env *env,
+				   struct reg_state *dst_reg)
+{
+	struct verifier_state *state = &env->cur_state;
+	struct reg_state *regs = state->regs, *reg;
+	int i;
+	/* r2 = r3;
+	 * r2 += 8
+	 * if (r2 > pkt_end) goto somewhere
+	 * r2 == dst_reg, pkt_end == src_reg,
+	 * r2=pkt(id=n,off=8,r=0)
+	 * r3=pkt(id=n,off=0,r=0)
+	 * find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
+	 * so that range of bytes [r3, r3 + 8) is safe to access
+	 */
+	for (i = 0; i < MAX_BPF_REG; i++)
+		if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)
+			regs[i].range = dst_reg->off;
+
+	for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
+		if (state->stack_slot_type[i] != STACK_SPILL)
+			continue;
+		reg = &state->spilled_regs[i / BPF_REG_SIZE];
+		if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id)
+			reg->range = dst_reg->off;
+	}
+}
+
 static int check_cond_jmp_op(struct verifier_env *env,
 			     struct bpf_insn *insn, int *insn_idx)
 {
-	struct reg_state *regs = env->cur_state.regs;
+	struct reg_state *regs = env->cur_state.regs, *dst_reg;
 	struct verifier_state *other_branch;
 	u8 opcode = BPF_OP(insn->code);
 	int err;
@@ -1228,11 +1669,12 @@
 	if (err)
 		return err;
 
+	dst_reg = &regs[insn->dst_reg];
+
 	/* detect if R == 0 where R was initialized to zero earlier */
 	if (BPF_SRC(insn->code) == BPF_K &&
 	    (opcode == BPF_JEQ || opcode == BPF_JNE) &&
-	    regs[insn->dst_reg].type == CONST_IMM &&
-	    regs[insn->dst_reg].imm == insn->imm) {
+	    dst_reg->type == CONST_IMM && dst_reg->imm == insn->imm) {
 		if (opcode == BPF_JEQ) {
 			/* if (imm == imm) goto pc+off;
 			 * only follow the goto, ignore fall-through
@@ -1254,44 +1696,30 @@
 
 	/* detect if R == 0 where R is returned value from bpf_map_lookup_elem() */
 	if (BPF_SRC(insn->code) == BPF_K &&
-	    insn->imm == 0 && (opcode == BPF_JEQ ||
-			       opcode == BPF_JNE) &&
-	    regs[insn->dst_reg].type == PTR_TO_MAP_VALUE_OR_NULL) {
+	    insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
+	    dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
 		if (opcode == BPF_JEQ) {
 			/* next fallthrough insn can access memory via
 			 * this register
 			 */
 			regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
 			/* branch targer cannot access it, since reg == 0 */
-			other_branch->regs[insn->dst_reg].type = CONST_IMM;
-			other_branch->regs[insn->dst_reg].imm = 0;
+			mark_reg_unknown_value(other_branch->regs,
+					       insn->dst_reg);
 		} else {
 			other_branch->regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
-			regs[insn->dst_reg].type = CONST_IMM;
-			regs[insn->dst_reg].imm = 0;
+			mark_reg_unknown_value(regs, insn->dst_reg);
 		}
+	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
+		   dst_reg->type == PTR_TO_PACKET &&
+		   regs[insn->src_reg].type == PTR_TO_PACKET_END) {
+		find_good_pkt_pointers(env, dst_reg);
 	} else if (is_pointer_value(env, insn->dst_reg)) {
 		verbose("R%d pointer comparison prohibited\n", insn->dst_reg);
 		return -EACCES;
-	} else if (BPF_SRC(insn->code) == BPF_K &&
-		   (opcode == BPF_JEQ || opcode == BPF_JNE)) {
-
-		if (opcode == BPF_JEQ) {
-			/* detect if (R == imm) goto
-			 * and in the target state recognize that R = imm
-			 */
-			other_branch->regs[insn->dst_reg].type = CONST_IMM;
-			other_branch->regs[insn->dst_reg].imm = insn->imm;
-		} else {
-			/* detect if (R != imm) goto
-			 * and in the fall-through state recognize that R = imm
-			 */
-			regs[insn->dst_reg].type = CONST_IMM;
-			regs[insn->dst_reg].imm = insn->imm;
-		}
 	}
 	if (log_level)
-		print_verifier_state(env);
+		print_verifier_state(&env->cur_state);
 	return 0;
 }
 
@@ -1369,13 +1797,14 @@
 	int i, err;
 
 	if (!may_access_skb(env->prog->type)) {
-		verbose("BPF_LD_ABS|IND instructions not allowed for this program type\n");
+		verbose("BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
 		return -EINVAL;
 	}
 
 	if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
+	    BPF_SIZE(insn->code) == BPF_DW ||
 	    (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
-		verbose("BPF_LD_ABS uses reserved fields\n");
+		verbose("BPF_LD_[ABS|IND] uses reserved fields\n");
 		return -EINVAL;
 	}
 
@@ -1539,6 +1968,8 @@
 				goto peek_stack;
 			else if (ret < 0)
 				goto err_free;
+			if (t + 1 < insn_cnt)
+				env->explored_states[t + 1] = STATE_LIST_MARK;
 		} else if (opcode == BPF_JA) {
 			if (BPF_SRC(insns[t].code) != BPF_K) {
 				ret = -EINVAL;
@@ -1606,6 +2037,58 @@
 	return ret;
 }
 
+/* the following conditions reduce the number of explored insns
+ * from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet
+ */
+static bool compare_ptrs_to_packet(struct reg_state *old, struct reg_state *cur)
+{
+	if (old->id != cur->id)
+		return false;
+
+	/* old ptr_to_packet is more conservative, since it allows smaller
+	 * range. Ex:
+	 * old(off=0,r=10) is equal to cur(off=0,r=20), because
+	 * old(off=0,r=10) means that with range=10 the verifier proceeded
+	 * further and found no issues with the program. Now we're in the same
+	 * spot with cur(off=0,r=20), so we're safe too, since anything further
+	 * will only be looking at most 10 bytes after this pointer.
+	 */
+	if (old->off == cur->off && old->range < cur->range)
+		return true;
+
+	/* old(off=20,r=10) is equal to cur(off=22,re=22 or 5 or 0)
+	 * since both cannot be used for packet access and safe(old)
+	 * pointer has smaller off that could be used for further
+	 * 'if (ptr > data_end)' check
+	 * Ex:
+	 * old(off=20,r=10) and cur(off=22,r=22) and cur(off=22,r=0) mean
+	 * that we cannot access the packet.
+	 * The safe range is:
+	 * [ptr, ptr + range - off)
+	 * so whenever off >=range, it means no safe bytes from this pointer.
+	 * When comparing old->off <= cur->off, it means that older code
+	 * went with smaller offset and that offset was later
+	 * used to figure out the safe range after 'if (ptr > data_end)' check
+	 * Say, 'old' state was explored like:
+	 * ... R3(off=0, r=0)
+	 * R4 = R3 + 20
+	 * ... now R4(off=20,r=0)  <-- here
+	 * if (R4 > data_end)
+	 * ... R4(off=20,r=20), R3(off=0,r=20) and R3 can be used to access.
+	 * ... the code further went all the way to bpf_exit.
+	 * Now the 'cur' state at the mark 'here' has R4(off=30,r=0).
+	 * old_R4(off=20,r=0) equal to cur_R4(off=30,r=0), since if the verifier
+	 * goes further, such cur_R4 will give larger safe packet range after
+	 * 'if (R4 > data_end)' and all further insn were already good with r=20,
+	 * so they will be good with r=30 and we can prune the search.
+	 */
+	if (old->off <= cur->off &&
+	    old->off >= old->range && cur->off >= cur->range)
+		return true;
+
+	return false;
+}
+
 /* compare two verifier states
  *
  * all states stored in state_list are known to be valid, since
@@ -1634,17 +2117,25 @@
  */
 static bool states_equal(struct verifier_state *old, struct verifier_state *cur)
 {
+	struct reg_state *rold, *rcur;
 	int i;
 
 	for (i = 0; i < MAX_BPF_REG; i++) {
-		if (memcmp(&old->regs[i], &cur->regs[i],
-			   sizeof(old->regs[0])) != 0) {
-			if (old->regs[i].type == NOT_INIT ||
-			    (old->regs[i].type == UNKNOWN_VALUE &&
-			     cur->regs[i].type != NOT_INIT))
-				continue;
-			return false;
-		}
+		rold = &old->regs[i];
+		rcur = &cur->regs[i];
+
+		if (memcmp(rold, rcur, sizeof(*rold)) == 0)
+			continue;
+
+		if (rold->type == NOT_INIT ||
+		    (rold->type == UNKNOWN_VALUE && rcur->type != NOT_INIT))
+			continue;
+
+		if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET &&
+		    compare_ptrs_to_packet(rold, rcur))
+			continue;
+
+		return false;
 	}
 
 	for (i = 0; i < MAX_BPF_STACK; i++) {
@@ -1743,7 +2234,7 @@
 		insn = &insns[insn_idx];
 		class = BPF_CLASS(insn->code);
 
-		if (++insn_processed > 32768) {
+		if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
 			verbose("BPF program is too large. Proccessed %d insn\n",
 				insn_processed);
 			return -E2BIG;
@@ -1766,7 +2257,7 @@
 
 		if (log_level && do_print_state) {
 			verbose("\nfrom %d to %d:", prev_insn_idx, insn_idx);
-			print_verifier_state(env);
+			print_verifier_state(&env->cur_state);
 			do_print_state = false;
 		}
 
@@ -1978,6 +2469,7 @@
 		insn_idx++;
 	}
 
+	verbose("processed %d insns\n", insn_processed);
 	return 0;
 }
 
@@ -2029,7 +2521,6 @@
 			if (IS_ERR(map)) {
 				verbose("fd %d is not pointing to valid bpf_map\n",
 					insn->imm);
-				fdput(f);
 				return PTR_ERR(map);
 			}
 
@@ -2049,15 +2540,18 @@
 				return -E2BIG;
 			}
 
-			/* remember this map */
-			env->used_maps[env->used_map_cnt++] = map;
-
 			/* hold the map. If the program is rejected by verifier,
 			 * the map will be released by release_maps() or it
 			 * will be used by the valid program until it's unloaded
 			 * and all maps are released in free_bpf_prog_info()
 			 */
-			bpf_map_inc(map, false);
+			map = bpf_map_inc(map, false);
+			if (IS_ERR(map)) {
+				fdput(f);
+				return PTR_ERR(map);
+			}
+			env->used_maps[env->used_map_cnt++] = map;
+
 			fdput(f);
 next_insn:
 			insn++;
@@ -2093,26 +2587,6 @@
 			insn->src_reg = 0;
 }
 
-static void adjust_branches(struct bpf_prog *prog, int pos, int delta)
-{
-	struct bpf_insn *insn = prog->insnsi;
-	int insn_cnt = prog->len;
-	int i;
-
-	for (i = 0; i < insn_cnt; i++, insn++) {
-		if (BPF_CLASS(insn->code) != BPF_JMP ||
-		    BPF_OP(insn->code) == BPF_CALL ||
-		    BPF_OP(insn->code) == BPF_EXIT)
-			continue;
-
-		/* adjust offset of jmps if necessary */
-		if (i < pos && i + insn->off + 1 > pos)
-			insn->off += delta;
-		else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
-			insn->off -= delta;
-	}
-}
-
 /* convert load instructions that access fields of 'struct __sk_buff'
  * into sequence of instructions that access fields of 'struct sk_buff'
  */
@@ -2122,14 +2596,15 @@
 	int insn_cnt = env->prog->len;
 	struct bpf_insn insn_buf[16];
 	struct bpf_prog *new_prog;
-	u32 cnt;
-	int i;
 	enum bpf_access_type type;
+	int i;
 
 	if (!env->prog->aux->ops->convert_ctx_access)
 		return 0;
 
 	for (i = 0; i < insn_cnt; i++, insn++) {
+		u32 insn_delta, cnt;
+
 		if (insn->code == (BPF_LDX | BPF_MEM | BPF_W))
 			type = BPF_READ;
 		else if (insn->code == (BPF_STX | BPF_MEM | BPF_W))
@@ -2151,34 +2626,18 @@
 			return -EINVAL;
 		}
 
-		if (cnt == 1) {
-			memcpy(insn, insn_buf, sizeof(*insn));
-			continue;
-		}
-
-		/* several new insns need to be inserted. Make room for them */
-		insn_cnt += cnt - 1;
-		new_prog = bpf_prog_realloc(env->prog,
-					    bpf_prog_size(insn_cnt),
-					    GFP_USER);
+		new_prog = bpf_patch_insn_single(env->prog, i, insn_buf, cnt);
 		if (!new_prog)
 			return -ENOMEM;
 
-		new_prog->len = insn_cnt;
-
-		memmove(new_prog->insnsi + i + cnt, new_prog->insns + i + 1,
-			sizeof(*insn) * (insn_cnt - i - cnt));
-
-		/* copy substitute insns in place of load instruction */
-		memcpy(new_prog->insnsi + i, insn_buf, sizeof(*insn) * cnt);
-
-		/* adjust branches in the whole program */
-		adjust_branches(new_prog, i, cnt - 1);
+		insn_delta = cnt - 1;
 
 		/* keep walking new program and skip insns we just inserted */
 		env->prog = new_prog;
-		insn = new_prog->insnsi + i + cnt - 1;
-		i += cnt - 1;
+		insn      = new_prog->insnsi + i + insn_delta;
+
+		insn_cnt += insn_delta;
+		i        += insn_delta;
 	}
 
 	return 0;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 671dc05..86cb5c6 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1215,6 +1215,41 @@
 	cgroup_free_root(root);
 }
 
+/*
+ * look up cgroup associated with current task's cgroup namespace on the
+ * specified hierarchy
+ */
+static struct cgroup *
+current_cgns_cgroup_from_root(struct cgroup_root *root)
+{
+	struct cgroup *res = NULL;
+	struct css_set *cset;
+
+	lockdep_assert_held(&css_set_lock);
+
+	rcu_read_lock();
+
+	cset = current->nsproxy->cgroup_ns->root_cset;
+	if (cset == &init_css_set) {
+		res = &root->cgrp;
+	} else {
+		struct cgrp_cset_link *link;
+
+		list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
+			struct cgroup *c = link->cgrp;
+
+			if (c->root == root) {
+				res = c;
+				break;
+			}
+		}
+	}
+	rcu_read_unlock();
+
+	BUG_ON(!res);
+	return res;
+}
+
 /* look up cgroup associated with given css_set on the specified hierarchy */
 static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
 					    struct cgroup_root *root)
@@ -1593,6 +1628,33 @@
 	return 0;
 }
 
+static int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
+			    struct kernfs_root *kf_root)
+{
+	int len = 0;
+	char *buf = NULL;
+	struct cgroup_root *kf_cgroot = cgroup_root_from_kf(kf_root);
+	struct cgroup *ns_cgroup;
+
+	buf = kmalloc(PATH_MAX, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	spin_lock_bh(&css_set_lock);
+	ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot);
+	len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX);
+	spin_unlock_bh(&css_set_lock);
+
+	if (len >= PATH_MAX)
+		len = -ERANGE;
+	else if (len > 0) {
+		seq_escape(sf, buf, " \t\n\\");
+		len = 0;
+	}
+	kfree(buf);
+	return len;
+}
+
 static int cgroup_show_options(struct seq_file *seq,
 			       struct kernfs_root *kf_root)
 {
@@ -2825,9 +2887,10 @@
 				    size_t nbytes, loff_t off, bool threadgroup)
 {
 	struct task_struct *tsk;
+	struct cgroup_subsys *ss;
 	struct cgroup *cgrp;
 	pid_t pid;
-	int ret;
+	int ssid, ret;
 
 	if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
 		return -EINVAL;
@@ -2875,8 +2938,10 @@
 	rcu_read_unlock();
 out_unlock_threadgroup:
 	percpu_up_write(&cgroup_threadgroup_rwsem);
+	for_each_subsys(ss, ssid)
+		if (ss->post_attach)
+			ss->post_attach();
 	cgroup_kn_unlock(of->kn);
-	cpuset_post_attach_flush();
 	return ret ?: nbytes;
 }
 
@@ -5430,6 +5495,7 @@
 	.mkdir			= cgroup_mkdir,
 	.rmdir			= cgroup_rmdir,
 	.rename			= cgroup_rename,
+	.show_path		= cgroup_show_path,
 };
 
 static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 6ea42e8..d948e44 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -36,6 +36,7 @@
  * @target:	The target state
  * @thread:	Pointer to the hotplug thread
  * @should_run:	Thread should execute
+ * @rollback:	Perform a rollback
  * @cb_stat:	The state for a single callback (install/uninstall)
  * @cb:		Single callback function (install/uninstall)
  * @result:	Result of the operation
@@ -47,6 +48,7 @@
 #ifdef CONFIG_SMP
 	struct task_struct	*thread;
 	bool			should_run;
+	bool			rollback;
 	enum cpuhp_state	cb_state;
 	int			(*cb)(unsigned int cpu);
 	int			result;
@@ -301,6 +303,11 @@
 	return __cpu_notify(val, cpu, -1, NULL);
 }
 
+static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
+{
+	BUG_ON(cpu_notify(val, cpu));
+}
+
 /* Notifier wrappers for transitioning to state machine */
 static int notify_prepare(unsigned int cpu)
 {
@@ -477,6 +484,16 @@
 		} else {
 			ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb);
 		}
+	} else if (st->rollback) {
+		BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
+
+		undo_cpu_down(cpu, st, cpuhp_ap_states);
+		/*
+		 * This is a momentary workaround to keep the notifier users
+		 * happy. Will go away once we got rid of the notifiers.
+		 */
+		cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
+		st->rollback = false;
 	} else {
 		/* Cannot happen .... */
 		BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
@@ -636,11 +653,6 @@
 	read_unlock(&tasklist_lock);
 }
 
-static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
-{
-	BUG_ON(cpu_notify(val, cpu));
-}
-
 static int notify_down_prepare(unsigned int cpu)
 {
 	int err, nr_calls = 0;
@@ -691,21 +703,6 @@
 	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 	int err;
 
-	/*
-	 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
-	 * and RCU users of this state to go away such that all new such users
-	 * will observe it.
-	 *
-	 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
-	 * not imply sync_sched(), so wait for both.
-	 *
-	 * Do sync before park smpboot threads to take care the rcu boost case.
-	 */
-	if (IS_ENABLED(CONFIG_PREEMPT))
-		synchronize_rcu_mult(call_rcu, call_rcu_sched);
-	else
-		synchronize_rcu();
-
 	/* Park the smpboot threads */
 	kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
 	smpboot_park_threads(cpu);
@@ -721,9 +718,10 @@
 	 */
 	err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
 	if (err) {
-		/* CPU didn't die: tell everyone.  Can't complain. */
-		cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
+		/* CPU refused to die */
 		irq_unlock_sparse();
+		/* Unpark the hotplug thread so we can rollback there */
+		kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
 		return err;
 	}
 	BUG_ON(cpu_online(cpu));
@@ -832,6 +830,11 @@
 	 * to do the further cleanups.
 	 */
 	ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target);
+	if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
+		st->target = prev_state;
+		st->rollback = true;
+		cpuhp_kick_ap_work(cpu);
+	}
 
 	hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
 out:
@@ -905,8 +908,6 @@
 
 	st->state = CPUHP_AP_ONLINE_IDLE;
 
-	/* The cpu is marked online, set it active now */
-	set_cpu_active(cpu, true);
 	/* Unpark the stopper thread and the hotplug thread of this cpu */
 	stop_machine_unpark(cpu);
 	kthread_unpark(st->thread);
@@ -1218,6 +1219,12 @@
 		.name			= "ap:offline",
 		.cant_stop		= true,
 	},
+	/* First state is scheduler control. Interrupts are disabled */
+	[CPUHP_AP_SCHED_STARTING] = {
+		.name			= "sched:starting",
+		.startup		= sched_cpu_starting,
+		.teardown		= sched_cpu_dying,
+	},
 	/*
 	 * Low level startup/teardown notifiers. Run with interrupts
 	 * disabled. Will be removed once the notifiers are converted to
@@ -1249,12 +1256,22 @@
 		.name			= "notify:online",
 		.startup		= notify_online,
 		.teardown		= notify_down_prepare,
+		.skip_onerr		= true,
 	},
 #endif
 	/*
 	 * The dynamically registered state space is here
 	 */
 
+#ifdef CONFIG_SMP
+	/* Last state is scheduler control setting the cpu active */
+	[CPUHP_AP_ACTIVE] = {
+		.name			= "sched:active",
+		.startup		= sched_cpu_activate,
+		.teardown		= sched_cpu_deactivate,
+	},
+#endif
+
 	/* CPU is fully up and running. */
 	[CPUHP_ONLINE] = {
 		.name			= "online",
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 00ab5c2..1902956 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -58,7 +58,6 @@
 #include <asm/uaccess.h>
 #include <linux/atomic.h>
 #include <linux/mutex.h>
-#include <linux/workqueue.h>
 #include <linux/cgroup.h>
 #include <linux/wait.h>
 
@@ -1016,7 +1015,7 @@
 	}
 }
 
-void cpuset_post_attach_flush(void)
+static void cpuset_post_attach(void)
 {
 	flush_workqueue(cpuset_migrate_mm_wq);
 }
@@ -2087,6 +2086,7 @@
 	.can_attach	= cpuset_can_attach,
 	.cancel_attach	= cpuset_cancel_attach,
 	.attach		= cpuset_attach,
+	.post_attach	= cpuset_post_attach,
 	.bind		= cpuset_bind,
 	.legacy_cftypes	= files,
 	.early_init	= true,
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 343c22f..b9325e7 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -18,6 +18,14 @@
 	struct perf_callchain_entry	*cpu_entries[0];
 };
 
+int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH;
+
+static inline size_t perf_callchain_entry__sizeof(void)
+{
+	return (sizeof(struct perf_callchain_entry) +
+		sizeof(__u64) * sysctl_perf_event_max_stack);
+}
+
 static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
 static atomic_t nr_callchain_events;
 static DEFINE_MUTEX(callchain_mutex);
@@ -73,7 +81,7 @@
 	if (!entries)
 		return -ENOMEM;
 
-	size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
+	size = perf_callchain_entry__sizeof() * PERF_NR_CONTEXTS;
 
 	for_each_possible_cpu(cpu) {
 		entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
@@ -147,7 +155,8 @@
 
 	cpu = smp_processor_id();
 
-	return &entries->cpu_entries[cpu][*rctx];
+	return (((void *)entries->cpu_entries[cpu]) +
+		(*rctx * perf_callchain_entry__sizeof()));
 }
 
 static void
@@ -215,3 +224,25 @@
 
 	return entry;
 }
+
+int perf_event_max_stack_handler(struct ctl_table *table, int write,
+				 void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	int new_value = sysctl_perf_event_max_stack, ret;
+	struct ctl_table new_table = *table;
+
+	new_table.data = &new_value;
+	ret = proc_dointvec_minmax(&new_table, write, buffer, lenp, ppos);
+	if (ret || !write)
+		return ret;
+
+	mutex_lock(&callchain_mutex);
+	if (atomic_read(&nr_callchain_events))
+		ret = -EBUSY;
+	else
+		sysctl_perf_event_max_stack = new_value;
+
+	mutex_unlock(&callchain_mutex);
+
+	return ret;
+}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index de24fbc..274450e 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -44,6 +44,8 @@
 #include <linux/compat.h>
 #include <linux/bpf.h>
 #include <linux/filter.h>
+#include <linux/namei.h>
+#include <linux/parser.h>
 
 #include "internal.h"
 
@@ -351,7 +353,7 @@
  *   1 - disallow cpu events for unpriv
  *   2 - disallow kernel profiling for unpriv
  */
-int sysctl_perf_event_paranoid __read_mostly = 1;
+int sysctl_perf_event_paranoid __read_mostly = 2;
 
 /* Minimum for 512 kiB + 1 user control page */
 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
@@ -412,7 +414,8 @@
 	if (ret || !write)
 		return ret;
 
-	if (sysctl_perf_cpu_time_max_percent == 100) {
+	if (sysctl_perf_cpu_time_max_percent == 100 ||
+	    sysctl_perf_cpu_time_max_percent == 0) {
 		printk(KERN_WARNING
 		       "perf: Dynamic interrupt throttling disabled, can hang your system!\n");
 		WRITE_ONCE(perf_sample_allowed_ns, 0);
@@ -1105,6 +1108,7 @@
  * function.
  *
  * Lock order:
+ *    cred_guard_mutex
  *	task_struct::perf_event_mutex
  *	  perf_event_context::mutex
  *	    perf_event::child_mutex;
@@ -1925,8 +1929,13 @@
 	if (event->state <= PERF_EVENT_STATE_OFF)
 		return 0;
 
-	event->state = PERF_EVENT_STATE_ACTIVE;
-	event->oncpu = smp_processor_id();
+	WRITE_ONCE(event->oncpu, smp_processor_id());
+	/*
+	 * Order event::oncpu write to happen before the ACTIVE state
+	 * is visible.
+	 */
+	smp_wmb();
+	WRITE_ONCE(event->state, PERF_EVENT_STATE_ACTIVE);
 
 	/*
 	 * Unthrottle events, since we scheduled we might have missed several
@@ -2358,6 +2367,112 @@
 }
 EXPORT_SYMBOL_GPL(perf_event_enable);
 
+struct stop_event_data {
+	struct perf_event	*event;
+	unsigned int		restart;
+};
+
+static int __perf_event_stop(void *info)
+{
+	struct stop_event_data *sd = info;
+	struct perf_event *event = sd->event;
+
+	/* if it's already INACTIVE, do nothing */
+	if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
+		return 0;
+
+	/* matches smp_wmb() in event_sched_in() */
+	smp_rmb();
+
+	/*
+	 * There is a window with interrupts enabled before we get here,
+	 * so we need to check again lest we try to stop another CPU's event.
+	 */
+	if (READ_ONCE(event->oncpu) != smp_processor_id())
+		return -EAGAIN;
+
+	event->pmu->stop(event, PERF_EF_UPDATE);
+
+	/*
+	 * May race with the actual stop (through perf_pmu_output_stop()),
+	 * but it is only used for events with AUX ring buffer, and such
+	 * events will refuse to restart because of rb::aux_mmap_count==0,
+	 * see comments in perf_aux_output_begin().
+	 *
+	 * Since this is happening on a event-local CPU, no trace is lost
+	 * while restarting.
+	 */
+	if (sd->restart)
+		event->pmu->start(event, PERF_EF_START);
+
+	return 0;
+}
+
+static int perf_event_restart(struct perf_event *event)
+{
+	struct stop_event_data sd = {
+		.event		= event,
+		.restart	= 1,
+	};
+	int ret = 0;
+
+	do {
+		if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
+			return 0;
+
+		/* matches smp_wmb() in event_sched_in() */
+		smp_rmb();
+
+		/*
+		 * We only want to restart ACTIVE events, so if the event goes
+		 * inactive here (event->oncpu==-1), there's nothing more to do;
+		 * fall through with ret==-ENXIO.
+		 */
+		ret = cpu_function_call(READ_ONCE(event->oncpu),
+					__perf_event_stop, &sd);
+	} while (ret == -EAGAIN);
+
+	return ret;
+}
+
+/*
+ * In order to contain the amount of racy and tricky in the address filter
+ * configuration management, it is a two part process:
+ *
+ * (p1) when userspace mappings change as a result of (1) or (2) or (3) below,
+ *      we update the addresses of corresponding vmas in
+ *	event::addr_filters_offs array and bump the event::addr_filters_gen;
+ * (p2) when an event is scheduled in (pmu::add), it calls
+ *      perf_event_addr_filters_sync() which calls pmu::addr_filters_sync()
+ *      if the generation has changed since the previous call.
+ *
+ * If (p1) happens while the event is active, we restart it to force (p2).
+ *
+ * (1) perf_addr_filters_apply(): adjusting filters' offsets based on
+ *     pre-existing mappings, called once when new filters arrive via SET_FILTER
+ *     ioctl;
+ * (2) perf_addr_filters_adjust(): adjusting filters' offsets based on newly
+ *     registered mapping, called for every new mmap(), with mm::mmap_sem down
+ *     for reading;
+ * (3) perf_event_addr_filters_exec(): clearing filters' offsets in the process
+ *     of exec.
+ */
+void perf_event_addr_filters_sync(struct perf_event *event)
+{
+	struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
+
+	if (!has_addr_filter(event))
+		return;
+
+	raw_spin_lock(&ifh->lock);
+	if (event->addr_filters_gen != event->hw.addr_filters_gen) {
+		event->pmu->addr_filters_sync(event);
+		event->hw.addr_filters_gen = event->addr_filters_gen;
+	}
+	raw_spin_unlock(&ifh->lock);
+}
+EXPORT_SYMBOL_GPL(perf_event_addr_filters_sync);
+
 static int _perf_event_refresh(struct perf_event *event, int refresh)
 {
 	/*
@@ -2417,14 +2532,24 @@
 			cpuctx->task_ctx = NULL;
 	}
 
-	is_active ^= ctx->is_active; /* changed bits */
-
+	/*
+	 * Always update time if it was set; not only when it changes.
+	 * Otherwise we can 'forget' to update time for any but the last
+	 * context we sched out. For example:
+	 *
+	 *   ctx_sched_out(.event_type = EVENT_FLEXIBLE)
+	 *   ctx_sched_out(.event_type = EVENT_PINNED)
+	 *
+	 * would only update time for the pinned events.
+	 */
 	if (is_active & EVENT_TIME) {
 		/* update (and stop) ctx time */
 		update_context_time(ctx);
 		update_cgrp_time_from_cpuctx(cpuctx);
 	}
 
+	is_active ^= ctx->is_active; /* changed bits */
+
 	if (!ctx->nr_active || !(is_active & EVENT_ALL))
 		return;
 
@@ -3197,16 +3322,6 @@
 		put_ctx(clone_ctx);
 }
 
-void perf_event_exec(void)
-{
-	int ctxn;
-
-	rcu_read_lock();
-	for_each_task_context_nr(ctxn)
-		perf_event_enable_on_exec(ctxn);
-	rcu_read_unlock();
-}
-
 struct perf_read_data {
 	struct perf_event *event;
 	bool group;
@@ -3410,7 +3525,6 @@
 find_lively_task_by_vpid(pid_t vpid)
 {
 	struct task_struct *task;
-	int err;
 
 	rcu_read_lock();
 	if (!vpid)
@@ -3424,16 +3538,7 @@
 	if (!task)
 		return ERR_PTR(-ESRCH);
 
-	/* Reuse ptrace permission checks for now. */
-	err = -EACCES;
-	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
-		goto errout;
-
 	return task;
-errout:
-	put_task_struct(task);
-	return ERR_PTR(err);
-
 }
 
 /*
@@ -3718,6 +3823,9 @@
 	return true;
 }
 
+static void perf_addr_filters_splice(struct perf_event *event,
+				       struct list_head *head);
+
 static void _free_event(struct perf_event *event)
 {
 	irq_work_sync(&event->pending);
@@ -3745,6 +3853,8 @@
 	}
 
 	perf_event_free_bpf_prog(event);
+	perf_addr_filters_splice(event, NULL);
+	kfree(event->addr_filters_offs);
 
 	if (event->destroy)
 		event->destroy(event);
@@ -4341,6 +4451,19 @@
 	case PERF_EVENT_IOC_SET_BPF:
 		return perf_event_set_bpf_prog(event, arg);
 
+	case PERF_EVENT_IOC_PAUSE_OUTPUT: {
+		struct ring_buffer *rb;
+
+		rcu_read_lock();
+		rb = rcu_dereference(event->rb);
+		if (!rb || !rb->nr_pages) {
+			rcu_read_unlock();
+			return -EINVAL;
+		}
+		rb_toggle_paused(rb, !!arg);
+		rcu_read_unlock();
+		return 0;
+	}
 	default:
 		return -ENOTTY;
 	}
@@ -4657,6 +4780,8 @@
 		event->pmu->event_mapped(event);
 }
 
+static void perf_pmu_output_stop(struct perf_event *event);
+
 /*
  * A buffer can be mmap()ed multiple times; either directly through the same
  * event, or through other events by use of perf_event_set_output().
@@ -4684,10 +4809,22 @@
 	 */
 	if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
 	    atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
+		/*
+		 * Stop all AUX events that are writing to this buffer,
+		 * so that we can free its AUX pages and corresponding PMU
+		 * data. Note that after rb::aux_mmap_count dropped to zero,
+		 * they won't start any more (see perf_aux_output_begin()).
+		 */
+		perf_pmu_output_stop(event);
+
+		/* now it's safe to free the pages */
 		atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
 		vma->vm_mm->pinned_vm -= rb->aux_mmap_locked;
 
+		/* this has to be the last one */
 		rb_free_aux(rb);
+		WARN_ON_ONCE(atomic_read(&rb->aux_refcount));
+
 		mutex_unlock(&event->mmap_mutex);
 	}
 
@@ -5628,9 +5765,13 @@
 	}
 }
 
-void perf_event_output(struct perf_event *event,
-			struct perf_sample_data *data,
-			struct pt_regs *regs)
+static void __always_inline
+__perf_event_output(struct perf_event *event,
+		    struct perf_sample_data *data,
+		    struct pt_regs *regs,
+		    int (*output_begin)(struct perf_output_handle *,
+					struct perf_event *,
+					unsigned int))
 {
 	struct perf_output_handle handle;
 	struct perf_event_header header;
@@ -5640,7 +5781,7 @@
 
 	perf_prepare_sample(&header, data, event, regs);
 
-	if (perf_output_begin(&handle, event, header.size))
+	if (output_begin(&handle, event, header.size))
 		goto exit;
 
 	perf_output_sample(&handle, &header, data, event);
@@ -5651,6 +5792,30 @@
 	rcu_read_unlock();
 }
 
+void
+perf_event_output_forward(struct perf_event *event,
+			 struct perf_sample_data *data,
+			 struct pt_regs *regs)
+{
+	__perf_event_output(event, data, regs, perf_output_begin_forward);
+}
+
+void
+perf_event_output_backward(struct perf_event *event,
+			   struct perf_sample_data *data,
+			   struct pt_regs *regs)
+{
+	__perf_event_output(event, data, regs, perf_output_begin_backward);
+}
+
+void
+perf_event_output(struct perf_event *event,
+		  struct perf_sample_data *data,
+		  struct pt_regs *regs)
+{
+	__perf_event_output(event, data, regs, perf_output_begin);
+}
+
 /*
  * read event_id
  */
@@ -5696,15 +5861,18 @@
 static void
 perf_event_aux_ctx(struct perf_event_context *ctx,
 		   perf_event_aux_output_cb output,
-		   void *data)
+		   void *data, bool all)
 {
 	struct perf_event *event;
 
 	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
-		if (event->state < PERF_EVENT_STATE_INACTIVE)
-			continue;
-		if (!event_filter_match(event))
-			continue;
+		if (!all) {
+			if (event->state < PERF_EVENT_STATE_INACTIVE)
+				continue;
+			if (!event_filter_match(event))
+				continue;
+		}
+
 		output(event, data);
 	}
 }
@@ -5715,7 +5883,7 @@
 {
 	rcu_read_lock();
 	preempt_disable();
-	perf_event_aux_ctx(task_ctx, output, data);
+	perf_event_aux_ctx(task_ctx, output, data, false);
 	preempt_enable();
 	rcu_read_unlock();
 }
@@ -5745,13 +5913,13 @@
 		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
 		if (cpuctx->unique_pmu != pmu)
 			goto next;
-		perf_event_aux_ctx(&cpuctx->ctx, output, data);
+		perf_event_aux_ctx(&cpuctx->ctx, output, data, false);
 		ctxn = pmu->task_ctx_nr;
 		if (ctxn < 0)
 			goto next;
 		ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
 		if (ctx)
-			perf_event_aux_ctx(ctx, output, data);
+			perf_event_aux_ctx(ctx, output, data, false);
 next:
 		put_cpu_ptr(pmu->pmu_cpu_context);
 	}
@@ -5759,6 +5927,134 @@
 }
 
 /*
+ * Clear all file-based filters at exec, they'll have to be
+ * re-instated when/if these objects are mmapped again.
+ */
+static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
+{
+	struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
+	struct perf_addr_filter *filter;
+	unsigned int restart = 0, count = 0;
+	unsigned long flags;
+
+	if (!has_addr_filter(event))
+		return;
+
+	raw_spin_lock_irqsave(&ifh->lock, flags);
+	list_for_each_entry(filter, &ifh->list, entry) {
+		if (filter->inode) {
+			event->addr_filters_offs[count] = 0;
+			restart++;
+		}
+
+		count++;
+	}
+
+	if (restart)
+		event->addr_filters_gen++;
+	raw_spin_unlock_irqrestore(&ifh->lock, flags);
+
+	if (restart)
+		perf_event_restart(event);
+}
+
+void perf_event_exec(void)
+{
+	struct perf_event_context *ctx;
+	int ctxn;
+
+	rcu_read_lock();
+	for_each_task_context_nr(ctxn) {
+		ctx = current->perf_event_ctxp[ctxn];
+		if (!ctx)
+			continue;
+
+		perf_event_enable_on_exec(ctxn);
+
+		perf_event_aux_ctx(ctx, perf_event_addr_filters_exec, NULL,
+				   true);
+	}
+	rcu_read_unlock();
+}
+
+struct remote_output {
+	struct ring_buffer	*rb;
+	int			err;
+};
+
+static void __perf_event_output_stop(struct perf_event *event, void *data)
+{
+	struct perf_event *parent = event->parent;
+	struct remote_output *ro = data;
+	struct ring_buffer *rb = ro->rb;
+	struct stop_event_data sd = {
+		.event	= event,
+	};
+
+	if (!has_aux(event))
+		return;
+
+	if (!parent)
+		parent = event;
+
+	/*
+	 * In case of inheritance, it will be the parent that links to the
+	 * ring-buffer, but it will be the child that's actually using it:
+	 */
+	if (rcu_dereference(parent->rb) == rb)
+		ro->err = __perf_event_stop(&sd);
+}
+
+static int __perf_pmu_output_stop(void *info)
+{
+	struct perf_event *event = info;
+	struct pmu *pmu = event->pmu;
+	struct perf_cpu_context *cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+	struct remote_output ro = {
+		.rb	= event->rb,
+	};
+
+	rcu_read_lock();
+	perf_event_aux_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false);
+	if (cpuctx->task_ctx)
+		perf_event_aux_ctx(cpuctx->task_ctx, __perf_event_output_stop,
+				   &ro, false);
+	rcu_read_unlock();
+
+	return ro.err;
+}
+
+static void perf_pmu_output_stop(struct perf_event *event)
+{
+	struct perf_event *iter;
+	int err, cpu;
+
+restart:
+	rcu_read_lock();
+	list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
+		/*
+		 * For per-CPU events, we need to make sure that neither they
+		 * nor their children are running; for cpu==-1 events it's
+		 * sufficient to stop the event itself if it's active, since
+		 * it can't have children.
+		 */
+		cpu = iter->cpu;
+		if (cpu == -1)
+			cpu = READ_ONCE(iter->oncpu);
+
+		if (cpu == -1)
+			continue;
+
+		err = cpu_function_call(cpu, __perf_pmu_output_stop, event);
+		if (err == -EAGAIN) {
+			rcu_read_unlock();
+			goto restart;
+		}
+	}
+	rcu_read_unlock();
+}
+
+/*
  * task tracking -- fork/exit
  *
  * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
@@ -6167,6 +6463,87 @@
 	kfree(buf);
 }
 
+/*
+ * Whether this @filter depends on a dynamic object which is not loaded
+ * yet or its load addresses are not known.
+ */
+static bool perf_addr_filter_needs_mmap(struct perf_addr_filter *filter)
+{
+	return filter->filter && filter->inode;
+}
+
+/*
+ * Check whether inode and address range match filter criteria.
+ */
+static bool perf_addr_filter_match(struct perf_addr_filter *filter,
+				     struct file *file, unsigned long offset,
+				     unsigned long size)
+{
+	if (filter->inode != file->f_inode)
+		return false;
+
+	if (filter->offset > offset + size)
+		return false;
+
+	if (filter->offset + filter->size < offset)
+		return false;
+
+	return true;
+}
+
+static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
+{
+	struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
+	struct vm_area_struct *vma = data;
+	unsigned long off = vma->vm_pgoff << PAGE_SHIFT, flags;
+	struct file *file = vma->vm_file;
+	struct perf_addr_filter *filter;
+	unsigned int restart = 0, count = 0;
+
+	if (!has_addr_filter(event))
+		return;
+
+	if (!file)
+		return;
+
+	raw_spin_lock_irqsave(&ifh->lock, flags);
+	list_for_each_entry(filter, &ifh->list, entry) {
+		if (perf_addr_filter_match(filter, file, off,
+					     vma->vm_end - vma->vm_start)) {
+			event->addr_filters_offs[count] = vma->vm_start;
+			restart++;
+		}
+
+		count++;
+	}
+
+	if (restart)
+		event->addr_filters_gen++;
+	raw_spin_unlock_irqrestore(&ifh->lock, flags);
+
+	if (restart)
+		perf_event_restart(event);
+}
+
+/*
+ * Adjust all task's events' filters to the new vma
+ */
+static void perf_addr_filters_adjust(struct vm_area_struct *vma)
+{
+	struct perf_event_context *ctx;
+	int ctxn;
+
+	rcu_read_lock();
+	for_each_task_context_nr(ctxn) {
+		ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
+		if (!ctx)
+			continue;
+
+		perf_event_aux_ctx(ctx, __perf_addr_filters_adjust, vma, true);
+	}
+	rcu_read_unlock();
+}
+
 void perf_event_mmap(struct vm_area_struct *vma)
 {
 	struct perf_mmap_event mmap_event;
@@ -6198,6 +6575,7 @@
 		/* .flags (attr_mmap2 only) */
 	};
 
+	perf_addr_filters_adjust(vma);
 	perf_event_mmap_event(&mmap_event);
 }
 
@@ -6489,10 +6867,7 @@
 		irq_work_queue(&event->pending);
 	}
 
-	if (event->overflow_handler)
-		event->overflow_handler(event, data, regs);
-	else
-		perf_event_output(event, data, regs);
+	event->overflow_handler(event, data, regs);
 
 	if (*perf_event_fasync(event) && event->pending_kill) {
 		event->pending_wakeup = 1;
@@ -6725,7 +7100,7 @@
 }
 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
 
-inline void perf_swevent_put_recursion_context(int rctx)
+void perf_swevent_put_recursion_context(int rctx)
 {
 	struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
 
@@ -6987,7 +7362,26 @@
 	return 1;
 }
 
-void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
+void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
+			       struct trace_event_call *call, u64 count,
+			       struct pt_regs *regs, struct hlist_head *head,
+			       struct task_struct *task)
+{
+	struct bpf_prog *prog = call->prog;
+
+	if (prog) {
+		*(struct pt_regs **)raw_data = regs;
+		if (!trace_call_bpf(prog, raw_data) || hlist_empty(head)) {
+			perf_swevent_put_recursion_context(rctx);
+			return;
+		}
+	}
+	perf_tp_event(call->event.type, count, raw_data, size, regs, head,
+		      rctx, task);
+}
+EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
+
+void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
 		   struct pt_regs *regs, struct hlist_head *head, int rctx,
 		   struct task_struct *task)
 {
@@ -6999,9 +7393,11 @@
 		.data = record,
 	};
 
-	perf_sample_data_init(&data, addr, 0);
+	perf_sample_data_init(&data, 0, 0);
 	data.raw = &raw;
 
+	perf_trace_buf_update(record, event_type);
+
 	hlist_for_each_entry_rcu(event, head, hlist_entry) {
 		if (perf_tp_event_match(event, &data, regs))
 			perf_swevent_event(event, count, &data, regs);
@@ -7079,24 +7475,6 @@
 	perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
 }
 
-static int perf_event_set_filter(struct perf_event *event, void __user *arg)
-{
-	char *filter_str;
-	int ret;
-
-	if (event->attr.type != PERF_TYPE_TRACEPOINT)
-		return -EINVAL;
-
-	filter_str = strndup_user(arg, PAGE_SIZE);
-	if (IS_ERR(filter_str))
-		return PTR_ERR(filter_str);
-
-	ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
-
-	kfree(filter_str);
-	return ret;
-}
-
 static void perf_event_free_filter(struct perf_event *event)
 {
 	ftrace_profile_free_filter(event);
@@ -7104,6 +7482,7 @@
 
 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
 {
+	bool is_kprobe, is_tracepoint;
 	struct bpf_prog *prog;
 
 	if (event->attr.type != PERF_TYPE_TRACEPOINT)
@@ -7112,20 +7491,31 @@
 	if (event->tp_event->prog)
 		return -EEXIST;
 
-	if (!(event->tp_event->flags & TRACE_EVENT_FL_UKPROBE))
-		/* bpf programs can only be attached to u/kprobes */
+	is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE;
+	is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT;
+	if (!is_kprobe && !is_tracepoint)
+		/* bpf programs can only be attached to u/kprobe or tracepoint */
 		return -EINVAL;
 
 	prog = bpf_prog_get(prog_fd);
 	if (IS_ERR(prog))
 		return PTR_ERR(prog);
 
-	if (prog->type != BPF_PROG_TYPE_KPROBE) {
+	if ((is_kprobe && prog->type != BPF_PROG_TYPE_KPROBE) ||
+	    (is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT)) {
 		/* valid fd, but invalid bpf program type */
 		bpf_prog_put(prog);
 		return -EINVAL;
 	}
 
+	if (is_tracepoint) {
+		int off = trace_event_get_offsets(event->tp_event);
+
+		if (prog->aux->max_ctx_offset > off) {
+			bpf_prog_put(prog);
+			return -EACCES;
+		}
+	}
 	event->tp_event->prog = prog;
 
 	return 0;
@@ -7151,11 +7541,6 @@
 {
 }
 
-static int perf_event_set_filter(struct perf_event *event, void __user *arg)
-{
-	return -ENOENT;
-}
-
 static void perf_event_free_filter(struct perf_event *event)
 {
 }
@@ -7184,6 +7569,387 @@
 #endif
 
 /*
+ * Allocate a new address filter
+ */
+static struct perf_addr_filter *
+perf_addr_filter_new(struct perf_event *event, struct list_head *filters)
+{
+	int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu);
+	struct perf_addr_filter *filter;
+
+	filter = kzalloc_node(sizeof(*filter), GFP_KERNEL, node);
+	if (!filter)
+		return NULL;
+
+	INIT_LIST_HEAD(&filter->entry);
+	list_add_tail(&filter->entry, filters);
+
+	return filter;
+}
+
+static void free_filters_list(struct list_head *filters)
+{
+	struct perf_addr_filter *filter, *iter;
+
+	list_for_each_entry_safe(filter, iter, filters, entry) {
+		if (filter->inode)
+			iput(filter->inode);
+		list_del(&filter->entry);
+		kfree(filter);
+	}
+}
+
+/*
+ * Free existing address filters and optionally install new ones
+ */
+static void perf_addr_filters_splice(struct perf_event *event,
+				     struct list_head *head)
+{
+	unsigned long flags;
+	LIST_HEAD(list);
+
+	if (!has_addr_filter(event))
+		return;
+
+	/* don't bother with children, they don't have their own filters */
+	if (event->parent)
+		return;
+
+	raw_spin_lock_irqsave(&event->addr_filters.lock, flags);
+
+	list_splice_init(&event->addr_filters.list, &list);
+	if (head)
+		list_splice(head, &event->addr_filters.list);
+
+	raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags);
+
+	free_filters_list(&list);
+}
+
+/*
+ * Scan through mm's vmas and see if one of them matches the
+ * @filter; if so, adjust filter's address range.
+ * Called with mm::mmap_sem down for reading.
+ */
+static unsigned long perf_addr_filter_apply(struct perf_addr_filter *filter,
+					    struct mm_struct *mm)
+{
+	struct vm_area_struct *vma;
+
+	for (vma = mm->mmap; vma; vma = vma->vm_next) {
+		struct file *file = vma->vm_file;
+		unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
+		unsigned long vma_size = vma->vm_end - vma->vm_start;
+
+		if (!file)
+			continue;
+
+		if (!perf_addr_filter_match(filter, file, off, vma_size))
+			continue;
+
+		return vma->vm_start;
+	}
+
+	return 0;
+}
+
+/*
+ * Update event's address range filters based on the
+ * task's existing mappings, if any.
+ */
+static void perf_event_addr_filters_apply(struct perf_event *event)
+{
+	struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
+	struct task_struct *task = READ_ONCE(event->ctx->task);
+	struct perf_addr_filter *filter;
+	struct mm_struct *mm = NULL;
+	unsigned int count = 0;
+	unsigned long flags;
+
+	/*
+	 * We may observe TASK_TOMBSTONE, which means that the event tear-down
+	 * will stop on the parent's child_mutex that our caller is also holding
+	 */
+	if (task == TASK_TOMBSTONE)
+		return;
+
+	mm = get_task_mm(event->ctx->task);
+	if (!mm)
+		goto restart;
+
+	down_read(&mm->mmap_sem);
+
+	raw_spin_lock_irqsave(&ifh->lock, flags);
+	list_for_each_entry(filter, &ifh->list, entry) {
+		event->addr_filters_offs[count] = 0;
+
+		if (perf_addr_filter_needs_mmap(filter))
+			event->addr_filters_offs[count] =
+				perf_addr_filter_apply(filter, mm);
+
+		count++;
+	}
+
+	event->addr_filters_gen++;
+	raw_spin_unlock_irqrestore(&ifh->lock, flags);
+
+	up_read(&mm->mmap_sem);
+
+	mmput(mm);
+
+restart:
+	perf_event_restart(event);
+}
+
+/*
+ * Address range filtering: limiting the data to certain
+ * instruction address ranges. Filters are ioctl()ed to us from
+ * userspace as ascii strings.
+ *
+ * Filter string format:
+ *
+ * ACTION RANGE_SPEC
+ * where ACTION is one of the
+ *  * "filter": limit the trace to this region
+ *  * "start": start tracing from this address
+ *  * "stop": stop tracing at this address/region;
+ * RANGE_SPEC is
+ *  * for kernel addresses: <start address>[/<size>]
+ *  * for object files:     <start address>[/<size>]@</path/to/object/file>
+ *
+ * if <size> is not specified, the range is treated as a single address.
+ */
+enum {
+	IF_ACT_FILTER,
+	IF_ACT_START,
+	IF_ACT_STOP,
+	IF_SRC_FILE,
+	IF_SRC_KERNEL,
+	IF_SRC_FILEADDR,
+	IF_SRC_KERNELADDR,
+};
+
+enum {
+	IF_STATE_ACTION = 0,
+	IF_STATE_SOURCE,
+	IF_STATE_END,
+};
+
+static const match_table_t if_tokens = {
+	{ IF_ACT_FILTER,	"filter" },
+	{ IF_ACT_START,		"start" },
+	{ IF_ACT_STOP,		"stop" },
+	{ IF_SRC_FILE,		"%u/%u@%s" },
+	{ IF_SRC_KERNEL,	"%u/%u" },
+	{ IF_SRC_FILEADDR,	"%u@%s" },
+	{ IF_SRC_KERNELADDR,	"%u" },
+};
+
+/*
+ * Address filter string parser
+ */
+static int
+perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
+			     struct list_head *filters)
+{
+	struct perf_addr_filter *filter = NULL;
+	char *start, *orig, *filename = NULL;
+	struct path path;
+	substring_t args[MAX_OPT_ARGS];
+	int state = IF_STATE_ACTION, token;
+	unsigned int kernel = 0;
+	int ret = -EINVAL;
+
+	orig = fstr = kstrdup(fstr, GFP_KERNEL);
+	if (!fstr)
+		return -ENOMEM;
+
+	while ((start = strsep(&fstr, " ,\n")) != NULL) {
+		ret = -EINVAL;
+
+		if (!*start)
+			continue;
+
+		/* filter definition begins */
+		if (state == IF_STATE_ACTION) {
+			filter = perf_addr_filter_new(event, filters);
+			if (!filter)
+				goto fail;
+		}
+
+		token = match_token(start, if_tokens, args);
+		switch (token) {
+		case IF_ACT_FILTER:
+		case IF_ACT_START:
+			filter->filter = 1;
+
+		case IF_ACT_STOP:
+			if (state != IF_STATE_ACTION)
+				goto fail;
+
+			state = IF_STATE_SOURCE;
+			break;
+
+		case IF_SRC_KERNELADDR:
+		case IF_SRC_KERNEL:
+			kernel = 1;
+
+		case IF_SRC_FILEADDR:
+		case IF_SRC_FILE:
+			if (state != IF_STATE_SOURCE)
+				goto fail;
+
+			if (token == IF_SRC_FILE || token == IF_SRC_KERNEL)
+				filter->range = 1;
+
+			*args[0].to = 0;
+			ret = kstrtoul(args[0].from, 0, &filter->offset);
+			if (ret)
+				goto fail;
+
+			if (filter->range) {
+				*args[1].to = 0;
+				ret = kstrtoul(args[1].from, 0, &filter->size);
+				if (ret)
+					goto fail;
+			}
+
+			if (token == IF_SRC_FILE) {
+				filename = match_strdup(&args[2]);
+				if (!filename) {
+					ret = -ENOMEM;
+					goto fail;
+				}
+			}
+
+			state = IF_STATE_END;
+			break;
+
+		default:
+			goto fail;
+		}
+
+		/*
+		 * Filter definition is fully parsed, validate and install it.
+		 * Make sure that it doesn't contradict itself or the event's
+		 * attribute.
+		 */
+		if (state == IF_STATE_END) {
+			if (kernel && event->attr.exclude_kernel)
+				goto fail;
+
+			if (!kernel) {
+				if (!filename)
+					goto fail;
+
+				/* look up the path and grab its inode */
+				ret = kern_path(filename, LOOKUP_FOLLOW, &path);
+				if (ret)
+					goto fail_free_name;
+
+				filter->inode = igrab(d_inode(path.dentry));
+				path_put(&path);
+				kfree(filename);
+				filename = NULL;
+
+				ret = -EINVAL;
+				if (!filter->inode ||
+				    !S_ISREG(filter->inode->i_mode))
+					/* free_filters_list() will iput() */
+					goto fail;
+			}
+
+			/* ready to consume more filters */
+			state = IF_STATE_ACTION;
+			filter = NULL;
+		}
+	}
+
+	if (state != IF_STATE_ACTION)
+		goto fail;
+
+	kfree(orig);
+
+	return 0;
+
+fail_free_name:
+	kfree(filename);
+fail:
+	free_filters_list(filters);
+	kfree(orig);
+
+	return ret;
+}
+
+static int
+perf_event_set_addr_filter(struct perf_event *event, char *filter_str)
+{
+	LIST_HEAD(filters);
+	int ret;
+
+	/*
+	 * Since this is called in perf_ioctl() path, we're already holding
+	 * ctx::mutex.
+	 */
+	lockdep_assert_held(&event->ctx->mutex);
+
+	if (WARN_ON_ONCE(event->parent))
+		return -EINVAL;
+
+	/*
+	 * For now, we only support filtering in per-task events; doing so
+	 * for CPU-wide events requires additional context switching trickery,
+	 * since same object code will be mapped at different virtual
+	 * addresses in different processes.
+	 */
+	if (!event->ctx->task)
+		return -EOPNOTSUPP;
+
+	ret = perf_event_parse_addr_filter(event, filter_str, &filters);
+	if (ret)
+		return ret;
+
+	ret = event->pmu->addr_filters_validate(&filters);
+	if (ret) {
+		free_filters_list(&filters);
+		return ret;
+	}
+
+	/* remove existing filters, if any */
+	perf_addr_filters_splice(event, &filters);
+
+	/* install new filters */
+	perf_event_for_each_child(event, perf_event_addr_filters_apply);
+
+	return ret;
+}
+
+static int perf_event_set_filter(struct perf_event *event, void __user *arg)
+{
+	char *filter_str;
+	int ret = -EINVAL;
+
+	if ((event->attr.type != PERF_TYPE_TRACEPOINT ||
+	    !IS_ENABLED(CONFIG_EVENT_TRACING)) &&
+	    !has_addr_filter(event))
+		return -EINVAL;
+
+	filter_str = strndup_user(arg, PAGE_SIZE);
+	if (IS_ERR(filter_str))
+		return PTR_ERR(filter_str);
+
+	if (IS_ENABLED(CONFIG_EVENT_TRACING) &&
+	    event->attr.type == PERF_TYPE_TRACEPOINT)
+		ret = ftrace_profile_set_filter(event, event->attr.config,
+						filter_str);
+	else if (has_addr_filter(event))
+		ret = perf_event_set_addr_filter(event, filter_str);
+
+	kfree(filter_str);
+	return ret;
+}
+
+/*
  * hrtimer based swevent callback
  */
 
@@ -7540,6 +8306,20 @@
 out:
 	mutex_unlock(&pmus_lock);
 }
+
+/*
+ * Let userspace know that this PMU supports address range filtering:
+ */
+static ssize_t nr_addr_filters_show(struct device *dev,
+				    struct device_attribute *attr,
+				    char *page)
+{
+	struct pmu *pmu = dev_get_drvdata(dev);
+
+	return snprintf(page, PAGE_SIZE - 1, "%d\n", pmu->nr_addr_filters);
+}
+DEVICE_ATTR_RO(nr_addr_filters);
+
 static struct idr pmu_idr;
 
 static ssize_t
@@ -7641,9 +8421,19 @@
 	if (ret)
 		goto free_dev;
 
+	/* For PMUs with address filters, throw in an extra attribute: */
+	if (pmu->nr_addr_filters)
+		ret = device_create_file(pmu->dev, &dev_attr_nr_addr_filters);
+
+	if (ret)
+		goto del_dev;
+
 out:
 	return ret;
 
+del_dev:
+	device_del(pmu->dev);
+
 free_dev:
 	put_device(pmu->dev);
 	goto out;
@@ -7683,6 +8473,21 @@
 	}
 
 skip_type:
+	if (pmu->task_ctx_nr == perf_hw_context) {
+		static int hw_context_taken = 0;
+
+		/*
+		 * Other than systems with heterogeneous CPUs, it never makes
+		 * sense for two PMUs to share perf_hw_context. PMUs which are
+		 * uncore must use perf_invalid_context.
+		 */
+		if (WARN_ON_ONCE(hw_context_taken &&
+		    !(pmu->capabilities & PERF_PMU_CAP_HETEROGENEOUS_CPUS)))
+			pmu->task_ctx_nr = perf_invalid_context;
+
+		hw_context_taken = 1;
+	}
+
 	pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
 	if (pmu->pmu_cpu_context)
 		goto got_cpu_context;
@@ -7770,6 +8575,8 @@
 	free_percpu(pmu->pmu_disable_count);
 	if (pmu->type >= PERF_TYPE_MAX)
 		idr_remove(&pmu_idr, pmu->type);
+	if (pmu->nr_addr_filters)
+		device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
 	device_del(pmu->dev);
 	put_device(pmu->dev);
 	free_pmu_context(pmu);
@@ -7963,6 +8770,7 @@
 	INIT_LIST_HEAD(&event->sibling_list);
 	INIT_LIST_HEAD(&event->rb_entry);
 	INIT_LIST_HEAD(&event->active_entry);
+	INIT_LIST_HEAD(&event->addr_filters.list);
 	INIT_HLIST_NODE(&event->hlist_entry);
 
 
@@ -7970,6 +8778,7 @@
 	init_irq_work(&event->pending, perf_pending_event);
 
 	mutex_init(&event->mmap_mutex);
+	raw_spin_lock_init(&event->addr_filters.lock);
 
 	atomic_long_set(&event->refcount, 1);
 	event->cpu		= cpu;
@@ -8004,8 +8813,16 @@
 		context = parent_event->overflow_handler_context;
 	}
 
-	event->overflow_handler	= overflow_handler;
-	event->overflow_handler_context = context;
+	if (overflow_handler) {
+		event->overflow_handler	= overflow_handler;
+		event->overflow_handler_context = context;
+	} else if (is_write_backward(event)){
+		event->overflow_handler = perf_event_output_backward;
+		event->overflow_handler_context = NULL;
+	} else {
+		event->overflow_handler = perf_event_output_forward;
+		event->overflow_handler_context = NULL;
+	}
 
 	perf_event__state_init(event);
 
@@ -8046,11 +8863,22 @@
 	if (err)
 		goto err_pmu;
 
+	if (has_addr_filter(event)) {
+		event->addr_filters_offs = kcalloc(pmu->nr_addr_filters,
+						   sizeof(unsigned long),
+						   GFP_KERNEL);
+		if (!event->addr_filters_offs)
+			goto err_per_task;
+
+		/* force hw sync on the address filters */
+		event->addr_filters_gen = 1;
+	}
+
 	if (!event->parent) {
 		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
 			err = get_callchain_buffers();
 			if (err)
-				goto err_per_task;
+				goto err_addr_filters;
 		}
 	}
 
@@ -8059,6 +8887,9 @@
 
 	return event;
 
+err_addr_filters:
+	kfree(event->addr_filters_offs);
+
 err_per_task:
 	exclusive_event_destroy(event);
 
@@ -8238,6 +9069,13 @@
 		goto out;
 
 	/*
+	 * Either writing ring buffer from beginning or from end.
+	 * Mixing is not allowed.
+	 */
+	if (is_write_backward(output_event) != is_write_backward(event))
+		goto out;
+
+	/*
 	 * If both events generate aux data, they must be on the same PMU
 	 */
 	if (has_aux(event) && has_aux(output_event) &&
@@ -8403,6 +9241,24 @@
 
 	get_online_cpus();
 
+	if (task) {
+		err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
+		if (err)
+			goto err_cpus;
+
+		/*
+		 * Reuse ptrace permission checks for now.
+		 *
+		 * We must hold cred_guard_mutex across this and any potential
+		 * perf_install_in_context() call for this new event to
+		 * serialize against exec() altering our credentials (and the
+		 * perf_event_exit_task() that could imply).
+		 */
+		err = -EACCES;
+		if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
+			goto err_cred;
+	}
+
 	if (flags & PERF_FLAG_PID_CGROUP)
 		cgroup_fd = pid;
 
@@ -8410,7 +9266,7 @@
 				 NULL, NULL, cgroup_fd);
 	if (IS_ERR(event)) {
 		err = PTR_ERR(event);
-		goto err_cpus;
+		goto err_cred;
 	}
 
 	if (is_sampling_event(event)) {
@@ -8469,11 +9325,6 @@
 		goto err_context;
 	}
 
-	if (task) {
-		put_task_struct(task);
-		task = NULL;
-	}
-
 	/*
 	 * Look up the group leader (we will attach this event to it):
 	 */
@@ -8532,6 +9383,7 @@
 					f_flags);
 	if (IS_ERR(event_file)) {
 		err = PTR_ERR(event_file);
+		event_file = NULL;
 		goto err_context;
 	}
 
@@ -8570,6 +9422,11 @@
 
 	WARN_ON_ONCE(ctx->parent_ctx);
 
+	/*
+	 * This is the point on no return; we cannot fail hereafter. This is
+	 * where we start modifying current state.
+	 */
+
 	if (move_group) {
 		/*
 		 * See perf_event_ctx_lock() for comments on the details
@@ -8641,6 +9498,11 @@
 		mutex_unlock(&gctx->mutex);
 	mutex_unlock(&ctx->mutex);
 
+	if (task) {
+		mutex_unlock(&task->signal->cred_guard_mutex);
+		put_task_struct(task);
+	}
+
 	put_online_cpus();
 
 	mutex_lock(&current->perf_event_mutex);
@@ -8673,6 +9535,9 @@
 	 */
 	if (!event_file)
 		free_event(event);
+err_cred:
+	if (task)
+		mutex_unlock(&task->signal->cred_guard_mutex);
 err_cpus:
 	put_online_cpus();
 err_task:
@@ -8957,6 +9822,9 @@
 
 /*
  * When a child task exits, feed back event values to parent events.
+ *
+ * Can be called with cred_guard_mutex held when called from
+ * install_exec_creds().
  */
 void perf_event_exit_task(struct task_struct *child)
 {
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 4199b6d..05f9f6d 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -11,13 +11,13 @@
 struct ring_buffer {
 	atomic_t			refcount;
 	struct rcu_head			rcu_head;
-	struct irq_work			irq_work;
 #ifdef CONFIG_PERF_USE_VMALLOC
 	struct work_struct		work;
 	int				page_order;	/* allocation order  */
 #endif
 	int				nr_pages;	/* nr of data pages  */
 	int				overwrite;	/* can overwrite itself */
+	int				paused;		/* can write into ring buffer */
 
 	atomic_t			poll;		/* POLL_ for wakeups */
 
@@ -65,6 +65,14 @@
 	rb_free(rb);
 }
 
+static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause)
+{
+	if (!pause && rb->nr_pages)
+		rb->paused = 0;
+	else
+		rb->paused = 1;
+}
+
 extern struct ring_buffer *
 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
 extern void perf_event_wakeup(struct perf_event *event);
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index c61f0cb..ae9b90d 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -102,8 +102,21 @@
 	preempt_enable();
 }
 
-int perf_output_begin(struct perf_output_handle *handle,
-		      struct perf_event *event, unsigned int size)
+static bool __always_inline
+ring_buffer_has_space(unsigned long head, unsigned long tail,
+		      unsigned long data_size, unsigned int size,
+		      bool backward)
+{
+	if (!backward)
+		return CIRC_SPACE(head, tail, data_size) >= size;
+	else
+		return CIRC_SPACE(tail, head, data_size) >= size;
+}
+
+static int __always_inline
+__perf_output_begin(struct perf_output_handle *handle,
+		    struct perf_event *event, unsigned int size,
+		    bool backward)
 {
 	struct ring_buffer *rb;
 	unsigned long tail, offset, head;
@@ -125,8 +138,11 @@
 	if (unlikely(!rb))
 		goto out;
 
-	if (unlikely(!rb->nr_pages))
+	if (unlikely(rb->paused)) {
+		if (rb->nr_pages)
+			local_inc(&rb->lost);
 		goto out;
+	}
 
 	handle->rb    = rb;
 	handle->event = event;
@@ -143,9 +159,12 @@
 	do {
 		tail = READ_ONCE(rb->user_page->data_tail);
 		offset = head = local_read(&rb->head);
-		if (!rb->overwrite &&
-		    unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size))
-			goto fail;
+		if (!rb->overwrite) {
+			if (unlikely(!ring_buffer_has_space(head, tail,
+							    perf_data_size(rb),
+							    size, backward)))
+				goto fail;
+		}
 
 		/*
 		 * The above forms a control dependency barrier separating the
@@ -159,9 +178,17 @@
 		 * See perf_output_put_handle().
 		 */
 
-		head += size;
+		if (!backward)
+			head += size;
+		else
+			head -= size;
 	} while (local_cmpxchg(&rb->head, offset, head) != offset);
 
+	if (backward) {
+		offset = head;
+		head = (u64)(-head);
+	}
+
 	/*
 	 * We rely on the implied barrier() by local_cmpxchg() to ensure
 	 * none of the data stores below can be lifted up by the compiler.
@@ -203,6 +230,26 @@
 	return -ENOSPC;
 }
 
+int perf_output_begin_forward(struct perf_output_handle *handle,
+			     struct perf_event *event, unsigned int size)
+{
+	return __perf_output_begin(handle, event, size, false);
+}
+
+int perf_output_begin_backward(struct perf_output_handle *handle,
+			       struct perf_event *event, unsigned int size)
+{
+	return __perf_output_begin(handle, event, size, true);
+}
+
+int perf_output_begin(struct perf_output_handle *handle,
+		      struct perf_event *event, unsigned int size)
+{
+
+	return __perf_output_begin(handle, event, size,
+				   unlikely(is_write_backward(event)));
+}
+
 unsigned int perf_output_copy(struct perf_output_handle *handle,
 		      const void *buf, unsigned int len)
 {
@@ -221,8 +268,6 @@
 	rcu_read_unlock();
 }
 
-static void rb_irq_work(struct irq_work *work);
-
 static void
 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
 {
@@ -243,16 +288,13 @@
 
 	INIT_LIST_HEAD(&rb->event_list);
 	spin_lock_init(&rb->event_lock);
-	init_irq_work(&rb->irq_work, rb_irq_work);
-}
 
-static void ring_buffer_put_async(struct ring_buffer *rb)
-{
-	if (!atomic_dec_and_test(&rb->refcount))
-		return;
-
-	rb->rcu_head.next = (void *)rb;
-	irq_work_queue(&rb->irq_work);
+	/*
+	 * perf_output_begin() only checks rb->paused, therefore
+	 * rb->paused must be true if we have no pages for output.
+	 */
+	if (!rb->nr_pages)
+		rb->paused = 1;
 }
 
 /*
@@ -264,6 +306,10 @@
  * The ordering is similar to that of perf_output_{begin,end}, with
  * the exception of (B), which should be taken care of by the pmu
  * driver, since ordering rules will differ depending on hardware.
+ *
+ * Call this from pmu::start(); see the comment in perf_aux_output_end()
+ * about its use in pmu callbacks. Both can also be called from the PMI
+ * handler if needed.
  */
 void *perf_aux_output_begin(struct perf_output_handle *handle,
 			    struct perf_event *event)
@@ -288,6 +334,13 @@
 		goto err;
 
 	/*
+	 * If rb::aux_mmap_count is zero (and rb_has_aux() above went through),
+	 * the aux buffer is in perf_mmap_close(), about to get freed.
+	 */
+	if (!atomic_read(&rb->aux_mmap_count))
+		goto err_put;
+
+	/*
 	 * Nesting is not supported for AUX area, make sure nested
 	 * writers are caught early
 	 */
@@ -328,10 +381,11 @@
 	return handle->rb->aux_priv;
 
 err_put:
+	/* can't be last */
 	rb_free_aux(rb);
 
 err:
-	ring_buffer_put_async(rb);
+	ring_buffer_put(rb);
 	handle->event = NULL;
 
 	return NULL;
@@ -342,11 +396,16 @@
  * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
  * pmu driver's responsibility to observe ordering rules of the hardware,
  * so that all the data is externally visible before this is called.
+ *
+ * Note: this has to be called from pmu::stop() callback, as the assumption
+ * of the AUX buffer management code is that after pmu::stop(), the AUX
+ * transaction must be stopped and therefore drop the AUX reference count.
  */
 void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
 			 bool truncated)
 {
 	struct ring_buffer *rb = handle->rb;
+	bool wakeup = truncated;
 	unsigned long aux_head;
 	u64 flags = 0;
 
@@ -375,14 +434,22 @@
 	aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
 
 	if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
-		perf_output_wakeup(handle);
+		wakeup = true;
 		local_add(rb->aux_watermark, &rb->aux_wakeup);
 	}
+
+	if (wakeup) {
+		if (truncated)
+			handle->event->pending_disable = 1;
+		perf_output_wakeup(handle);
+	}
+
 	handle->event = NULL;
 
 	local_set(&rb->aux_nest, 0);
+	/* can't be last */
 	rb_free_aux(rb);
-	ring_buffer_put_async(rb);
+	ring_buffer_put(rb);
 }
 
 /*
@@ -463,6 +530,14 @@
 {
 	int pg;
 
+	/*
+	 * Should never happen, the last reference should be dropped from
+	 * perf_mmap_close() path, which first stops aux transactions (which
+	 * in turn are the atomic holders of aux_refcount) and then does the
+	 * last rb_free_aux().
+	 */
+	WARN_ON_ONCE(in_atomic());
+
 	if (rb->aux_priv) {
 		rb->free_aux(rb->aux_priv);
 		rb->free_aux = NULL;
@@ -574,18 +649,7 @@
 void rb_free_aux(struct ring_buffer *rb)
 {
 	if (atomic_dec_and_test(&rb->aux_refcount))
-		irq_work_queue(&rb->irq_work);
-}
-
-static void rb_irq_work(struct irq_work *work)
-{
-	struct ring_buffer *rb = container_of(work, struct ring_buffer, irq_work);
-
-	if (!atomic_read(&rb->aux_refcount))
 		__rb_free_aux(rb);
-
-	if (rb->rcu_head.next == (void *)rb)
-		call_rcu(&rb->rcu_head, rb_free_rcu);
 }
 
 #ifndef CONFIG_PERF_USE_VMALLOC
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 220fc17..7edc95e 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -321,7 +321,7 @@
 	copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
 
 	ret = __replace_page(vma, vaddr, old_page, new_page);
-	page_cache_release(new_page);
+	put_page(new_page);
 put_old:
 	put_page(old_page);
 
@@ -539,14 +539,14 @@
 	 * see uprobe_register().
 	 */
 	if (mapping->a_ops->readpage)
-		page = read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT, filp);
+		page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
 	else
-		page = shmem_read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT);
+		page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
 	if (IS_ERR(page))
 		return PTR_ERR(page);
 
 	copy_from_page(page, offset, insn, nbytes);
-	page_cache_release(page);
+	put_page(page);
 
 	return 0;
 }
diff --git a/kernel/fork.c b/kernel/fork.c
index d277e83..3e84515 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1494,7 +1494,7 @@
 	 * sigaltstack should be cleared when sharing the same VM
 	 */
 	if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
-		p->sas_ss_sp = p->sas_ss_size = 0;
+		sas_ss_reset(p);
 
 	/*
 	 * Syscall tracing and stepping should be turned off in the
diff --git a/kernel/futex.c b/kernel/futex.c
index a5d2e74..c20f06f 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1295,10 +1295,20 @@
 	if (unlikely(should_fail_futex(true)))
 		ret = -EFAULT;
 
-	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
+	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
 		ret = -EFAULT;
-	else if (curval != uval)
-		ret = -EINVAL;
+	} else if (curval != uval) {
+		/*
+		 * If a unconditional UNLOCK_PI operation (user space did not
+		 * try the TID->0 transition) raced with a waiter setting the
+		 * FUTEX_WAITERS flag between get_user() and locking the hash
+		 * bucket lock, retry the operation.
+		 */
+		if ((FUTEX_TID_MASK & curval) == uval)
+			ret = -EAGAIN;
+		else
+			ret = -EINVAL;
+	}
 	if (ret) {
 		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
 		return ret;
@@ -1525,8 +1535,8 @@
 	if (likely(&hb1->chain != &hb2->chain)) {
 		plist_del(&q->list, &hb1->chain);
 		hb_waiters_dec(hb1);
-		plist_add(&q->list, &hb2->chain);
 		hb_waiters_inc(hb2);
+		plist_add(&q->list, &hb2->chain);
 		q->lock_ptr = &hb2->lock;
 	}
 	get_futex_key_refs(key2);
@@ -2623,6 +2633,15 @@
 		if (ret == -EFAULT)
 			goto pi_faulted;
 		/*
+		 * A unconditional UNLOCK_PI op raced against a waiter
+		 * setting the FUTEX_WAITERS bit. Try again.
+		 */
+		if (ret == -EAGAIN) {
+			spin_unlock(&hb->lock);
+			put_futex_key(&key);
+			goto retry;
+		}
+		/*
 		 * wake_futex_pi has detected invalid state. Tell user
 		 * space.
 		 */
diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c
index c37f34b..c427422 100644
--- a/kernel/irq/ipi.c
+++ b/kernel/irq/ipi.c
@@ -19,9 +19,9 @@
  *
  * Allocate a virq that can be used to send IPI to any CPU in dest mask.
  *
- * On success it'll return linux irq number and 0 on failure
+ * On success it'll return linux irq number and error code on failure
  */
-unsigned int irq_reserve_ipi(struct irq_domain *domain,
+int irq_reserve_ipi(struct irq_domain *domain,
 			     const struct cpumask *dest)
 {
 	unsigned int nr_irqs, offset;
@@ -30,18 +30,18 @@
 
 	if (!domain ||!irq_domain_is_ipi(domain)) {
 		pr_warn("Reservation on a non IPI domain\n");
-		return 0;
+		return -EINVAL;
 	}
 
 	if (!cpumask_subset(dest, cpu_possible_mask)) {
 		pr_warn("Reservation is not in possible_cpu_mask\n");
-		return 0;
+		return -EINVAL;
 	}
 
 	nr_irqs = cpumask_weight(dest);
 	if (!nr_irqs) {
 		pr_warn("Reservation for empty destination mask\n");
-		return 0;
+		return -EINVAL;
 	}
 
 	if (irq_domain_is_ipi_single(domain)) {
@@ -72,14 +72,14 @@
 			next = cpumask_next(next, dest);
 		if (next < nr_cpu_ids) {
 			pr_warn("Destination mask has holes\n");
-			return 0;
+			return -EINVAL;
 		}
 	}
 
 	virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE);
 	if (virq <= 0) {
 		pr_warn("Can't reserve IPI, failed to alloc descs\n");
-		return 0;
+		return -ENOMEM;
 	}
 
 	virq = __irq_domain_alloc_irqs(domain, virq, nr_irqs, NUMA_NO_NODE,
@@ -94,22 +94,26 @@
 		data = irq_get_irq_data(virq + i);
 		cpumask_copy(data->common->affinity, dest);
 		data->common->ipi_offset = offset;
+		irq_set_status_flags(virq + i, IRQ_NO_BALANCING);
 	}
 	return virq;
 
 free_descs:
 	irq_free_descs(virq, nr_irqs);
-	return 0;
+	return -EBUSY;
 }
 
 /**
  * irq_destroy_ipi() - unreserve an IPI that was previously allocated
  * @irq:	linux irq number to be destroyed
+ * @dest:	cpumask of cpus which should have the IPI removed
  *
- * Return the IPIs allocated with irq_reserve_ipi() to the system destroying
- * all virqs associated with them.
+ * The IPIs allocated with irq_reserve_ipi() are retuerned to the system
+ * destroying all virqs associated with them.
+ *
+ * Return 0 on success or error code on failure.
  */
-void irq_destroy_ipi(unsigned int irq)
+int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
 {
 	struct irq_data *data = irq_get_irq_data(irq);
 	struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;
@@ -117,7 +121,7 @@
 	unsigned int nr_irqs;
 
 	if (!irq || !data || !ipimask)
-		return;
+		return -EINVAL;
 
 	domain = data->domain;
 	if (WARN_ON(domain == NULL))
@@ -125,15 +129,25 @@
 
 	if (!irq_domain_is_ipi(domain)) {
 		pr_warn("Trying to destroy a non IPI domain!\n");
-		return;
+		return -EINVAL;
 	}
 
-	if (irq_domain_is_ipi_per_cpu(domain))
-		nr_irqs = cpumask_weight(ipimask);
-	else
+	if (WARN_ON(!cpumask_subset(dest, ipimask)))
+		/*
+		 * Must be destroying a subset of CPUs to which this IPI
+		 * was set up to target
+		 */
+		return -EINVAL;
+
+	if (irq_domain_is_ipi_per_cpu(domain)) {
+		irq = irq + cpumask_first(dest) - data->common->ipi_offset;
+		nr_irqs = cpumask_weight(dest);
+	} else {
 		nr_irqs = 1;
+	}
 
 	irq_domain_free_irqs(irq, nr_irqs);
+	return 0;
 }
 
 /**
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 0ccd028..8731e1c 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -595,7 +595,8 @@
 		chip_bus_sync_unlock(desc);
 }
 
-int irq_set_percpu_devid(unsigned int irq)
+int irq_set_percpu_devid_partition(unsigned int irq,
+				   const struct cpumask *affinity)
 {
 	struct irq_desc *desc = irq_to_desc(irq);
 
@@ -610,10 +611,33 @@
 	if (!desc->percpu_enabled)
 		return -ENOMEM;
 
+	if (affinity)
+		desc->percpu_affinity = affinity;
+	else
+		desc->percpu_affinity = cpu_possible_mask;
+
 	irq_set_percpu_devid_flags(irq);
 	return 0;
 }
 
+int irq_set_percpu_devid(unsigned int irq)
+{
+	return irq_set_percpu_devid_partition(irq, NULL);
+}
+
+int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity)
+{
+	struct irq_desc *desc = irq_to_desc(irq);
+
+	if (!desc || !desc->percpu_enabled)
+		return -EINVAL;
+
+	if (affinity)
+		cpumask_copy(affinity, desc->percpu_affinity);
+
+	return 0;
+}
+
 void kstat_incr_irq_this_cpu(unsigned int irq)
 {
 	kstat_incr_irqs_this_cpu(irq_to_desc(irq));
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 3a519a0..d65f6f3 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -243,14 +243,15 @@
 EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
 
 /**
- * irq_find_matching_fwnode() - Locates a domain for a given fwnode
- * @fwnode: FW descriptor of the interrupt controller
+ * irq_find_matching_fwspec() - Locates a domain for a given fwspec
+ * @fwspec: FW specifier for an interrupt
  * @bus_token: domain-specific data
  */
-struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
+struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
 					    enum irq_domain_bus_token bus_token)
 {
 	struct irq_domain *h, *found = NULL;
+	struct fwnode_handle *fwnode = fwspec->fwnode;
 	int rc;
 
 	/* We might want to match the legacy controller last since
@@ -264,7 +265,9 @@
 	 */
 	mutex_lock(&irq_domain_mutex);
 	list_for_each_entry(h, &irq_domain_list, link) {
-		if (h->ops->match)
+		if (h->ops->select && fwspec->param_count)
+			rc = h->ops->select(h, fwspec, bus_token);
+		else if (h->ops->match)
 			rc = h->ops->match(h, to_of_node(fwnode), bus_token);
 		else
 			rc = ((fwnode != NULL) && (h->fwnode == fwnode) &&
@@ -279,7 +282,7 @@
 	mutex_unlock(&irq_domain_mutex);
 	return found;
 }
-EXPORT_SYMBOL_GPL(irq_find_matching_fwnode);
+EXPORT_SYMBOL_GPL(irq_find_matching_fwspec);
 
 /**
  * irq_set_default_host() - Set a "default" irq domain
@@ -574,11 +577,9 @@
 	int virq;
 
 	if (fwspec->fwnode) {
-		domain = irq_find_matching_fwnode(fwspec->fwnode,
-						  DOMAIN_BUS_WIRED);
+		domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_WIRED);
 		if (!domain)
-			domain = irq_find_matching_fwnode(fwspec->fwnode,
-							  DOMAIN_BUS_ANY);
+			domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_ANY);
 	} else {
 		domain = irq_default_domain;
 	}
@@ -1099,6 +1100,7 @@
 	}
 	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
 }
+EXPORT_SYMBOL_GPL(irq_domain_free_irqs_common);
 
 /**
  * irq_domain_free_irqs_top - Clear handler and handler data, clear irqdata and free parent
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index cc1cc64..ef0bc02 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1407,7 +1407,7 @@
 	int retval;
 	struct irq_desc *desc = irq_to_desc(irq);
 
-	if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
+	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
 		return -EINVAL;
 	chip_bus_lock(desc);
 	retval = __setup_irq(irq, desc, act);
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 3efbee0..a02f2dd 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -1,5 +1,6 @@
 #define pr_fmt(fmt) "kcov: " fmt
 
+#define DISABLE_BRANCH_PROFILING
 #include <linux/compiler.h>
 #include <linux/types.h>
 #include <linux/file.h>
@@ -43,7 +44,7 @@
  * Entry point from instrumented code.
  * This is called once per basic-block/edge.
  */
-void __sanitizer_cov_trace_pc(void)
+void notrace __sanitizer_cov_trace_pc(void)
 {
 	struct task_struct *t;
 	enum kcov_mode mode;
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 8d34308..1391d3e 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -1415,6 +1415,9 @@
 	VMCOREINFO_OFFSET(page, lru);
 	VMCOREINFO_OFFSET(page, _mapcount);
 	VMCOREINFO_OFFSET(page, private);
+	VMCOREINFO_OFFSET(page, compound_dtor);
+	VMCOREINFO_OFFSET(page, compound_order);
+	VMCOREINFO_OFFSET(page, compound_head);
 	VMCOREINFO_OFFSET(pglist_data, node_zones);
 	VMCOREINFO_OFFSET(pglist_data, nr_zones);
 #ifdef CONFIG_FLAT_NODE_MEM_MAP
@@ -1447,8 +1450,8 @@
 #ifdef CONFIG_X86
 	VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE);
 #endif
-#ifdef CONFIG_HUGETLBFS
-	VMCOREINFO_SYMBOL(free_huge_page);
+#ifdef CONFIG_HUGETLB_PAGE
+	VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR);
 #endif
 
 	arch_crash_save_vmcoreinfo();
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index d68fbf6..5c2bc10 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -28,6 +28,8 @@
 #include <linux/list.h>
 #include <linux/kallsyms.h>
 #include <linux/livepatch.h>
+#include <linux/elf.h>
+#include <linux/moduleloader.h>
 #include <asm/cacheflush.h>
 
 /**
@@ -204,75 +206,109 @@
 	return -EINVAL;
 }
 
-/*
- * external symbols are located outside the parent object (where the parent
- * object is either vmlinux or the kmod being patched).
- */
-static int klp_find_external_symbol(struct module *pmod, const char *name,
-				    unsigned long *addr)
+static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
 {
-	const struct kernel_symbol *sym;
-
-	/* first, check if it's an exported symbol */
-	preempt_disable();
-	sym = find_symbol(name, NULL, NULL, true, true);
-	if (sym) {
-		*addr = sym->value;
-		preempt_enable();
-		return 0;
-	}
-	preempt_enable();
+	int i, cnt, vmlinux, ret;
+	char objname[MODULE_NAME_LEN];
+	char symname[KSYM_NAME_LEN];
+	char *strtab = pmod->core_kallsyms.strtab;
+	Elf_Rela *relas;
+	Elf_Sym *sym;
+	unsigned long sympos, addr;
 
 	/*
-	 * Check if it's in another .o within the patch module. This also
-	 * checks that the external symbol is unique.
+	 * Since the field widths for objname and symname in the sscanf()
+	 * call are hard-coded and correspond to MODULE_NAME_LEN and
+	 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
+	 * and KSYM_NAME_LEN have the values we expect them to have.
+	 *
+	 * Because the value of MODULE_NAME_LEN can differ among architectures,
+	 * we use the smallest/strictest upper bound possible (56, based on
+	 * the current definition of MODULE_NAME_LEN) to prevent overflows.
 	 */
-	return klp_find_object_symbol(pmod->name, name, 0, addr);
+	BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
+
+	relas = (Elf_Rela *) relasec->sh_addr;
+	/* For each rela in this klp relocation section */
+	for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
+		sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
+		if (sym->st_shndx != SHN_LIVEPATCH) {
+			pr_err("symbol %s is not marked as a livepatch symbol",
+			       strtab + sym->st_name);
+			return -EINVAL;
+		}
+
+		/* Format: .klp.sym.objname.symname,sympos */
+		cnt = sscanf(strtab + sym->st_name,
+			     ".klp.sym.%55[^.].%127[^,],%lu",
+			     objname, symname, &sympos);
+		if (cnt != 3) {
+			pr_err("symbol %s has an incorrectly formatted name",
+			       strtab + sym->st_name);
+			return -EINVAL;
+		}
+
+		/* klp_find_object_symbol() treats a NULL objname as vmlinux */
+		vmlinux = !strcmp(objname, "vmlinux");
+		ret = klp_find_object_symbol(vmlinux ? NULL : objname,
+					     symname, sympos, &addr);
+		if (ret)
+			return ret;
+
+		sym->st_value = addr;
+	}
+
+	return 0;
 }
 
 static int klp_write_object_relocations(struct module *pmod,
 					struct klp_object *obj)
 {
-	int ret = 0;
-	unsigned long val;
-	struct klp_reloc *reloc;
+	int i, cnt, ret = 0;
+	const char *objname, *secname;
+	char sec_objname[MODULE_NAME_LEN];
+	Elf_Shdr *sec;
 
 	if (WARN_ON(!klp_is_object_loaded(obj)))
 		return -EINVAL;
 
-	if (WARN_ON(!obj->relocs))
-		return -EINVAL;
+	objname = klp_is_module(obj) ? obj->name : "vmlinux";
 
 	module_disable_ro(pmod);
+	/* For each klp relocation section */
+	for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
+		sec = pmod->klp_info->sechdrs + i;
+		secname = pmod->klp_info->secstrings + sec->sh_name;
+		if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
+			continue;
 
-	for (reloc = obj->relocs; reloc->name; reloc++) {
-		/* discover the address of the referenced symbol */
-		if (reloc->external) {
-			if (reloc->sympos > 0) {
-				pr_err("non-zero sympos for external reloc symbol '%s' is not supported\n",
-				       reloc->name);
-				ret = -EINVAL;
-				goto out;
-			}
-			ret = klp_find_external_symbol(pmod, reloc->name, &val);
-		} else
-			ret = klp_find_object_symbol(obj->name,
-						     reloc->name,
-						     reloc->sympos,
-						     &val);
-		if (ret)
-			goto out;
-
-		ret = klp_write_module_reloc(pmod, reloc->type, reloc->loc,
-					     val + reloc->addend);
-		if (ret) {
-			pr_err("relocation failed for symbol '%s' at 0x%016lx (%d)\n",
-			       reloc->name, val, ret);
-			goto out;
+		/*
+		 * Format: .klp.rela.sec_objname.section_name
+		 * See comment in klp_resolve_symbols() for an explanation
+		 * of the selected field width value.
+		 */
+		cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
+		if (cnt != 1) {
+			pr_err("section %s has an incorrectly formatted name",
+			       secname);
+			ret = -EINVAL;
+			break;
 		}
+
+		if (strcmp(objname, sec_objname))
+			continue;
+
+		ret = klp_resolve_symbols(sec, pmod);
+		if (ret)
+			break;
+
+		ret = apply_relocate_add(pmod->klp_info->sechdrs,
+					 pmod->core_kallsyms.strtab,
+					 pmod->klp_info->symndx, i, pmod);
+		if (ret)
+			break;
 	}
 
-out:
 	module_enable_ro(pmod);
 	return ret;
 }
@@ -298,6 +334,19 @@
 	rcu_read_unlock();
 }
 
+/*
+ * Convert a function address into the appropriate ftrace location.
+ *
+ * Usually this is just the address of the function, but on some architectures
+ * it's more complicated so allow them to provide a custom behaviour.
+ */
+#ifndef klp_get_ftrace_location
+static unsigned long klp_get_ftrace_location(unsigned long faddr)
+{
+	return faddr;
+}
+#endif
+
 static void klp_disable_func(struct klp_func *func)
 {
 	struct klp_ops *ops;
@@ -312,8 +361,14 @@
 		return;
 
 	if (list_is_singular(&ops->func_stack)) {
+		unsigned long ftrace_loc;
+
+		ftrace_loc = klp_get_ftrace_location(func->old_addr);
+		if (WARN_ON(!ftrace_loc))
+			return;
+
 		WARN_ON(unregister_ftrace_function(&ops->fops));
-		WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0));
+		WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
 
 		list_del_rcu(&func->stack_node);
 		list_del(&ops->node);
@@ -338,6 +393,15 @@
 
 	ops = klp_find_ops(func->old_addr);
 	if (!ops) {
+		unsigned long ftrace_loc;
+
+		ftrace_loc = klp_get_ftrace_location(func->old_addr);
+		if (!ftrace_loc) {
+			pr_err("failed to find location for function '%s'\n",
+				func->old_name);
+			return -EINVAL;
+		}
+
 		ops = kzalloc(sizeof(*ops), GFP_KERNEL);
 		if (!ops)
 			return -ENOMEM;
@@ -352,7 +416,7 @@
 		INIT_LIST_HEAD(&ops->func_stack);
 		list_add_rcu(&func->stack_node, &ops->func_stack);
 
-		ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
+		ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
 		if (ret) {
 			pr_err("failed to set ftrace filter for function '%s' (%d)\n",
 			       func->old_name, ret);
@@ -363,7 +427,7 @@
 		if (ret) {
 			pr_err("failed to register ftrace handler for function '%s' (%d)\n",
 			       func->old_name, ret);
-			ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
+			ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
 			goto err;
 		}
 
@@ -683,6 +747,9 @@
 
 static int klp_init_func(struct klp_object *obj, struct klp_func *func)
 {
+	if (!func->old_name || !func->new_func)
+		return -EINVAL;
+
 	INIT_LIST_HEAD(&func->stack_node);
 	func->state = KLP_DISABLED;
 
@@ -703,11 +770,9 @@
 	struct klp_func *func;
 	int ret;
 
-	if (obj->relocs) {
-		ret = klp_write_object_relocations(patch->mod, obj);
-		if (ret)
-			return ret;
-	}
+	ret = klp_write_object_relocations(patch->mod, obj);
+	if (ret)
+		return ret;
 
 	klp_for_each_func(obj, func) {
 		ret = klp_find_object_symbol(obj->name, func->old_name,
@@ -842,12 +907,18 @@
 {
 	int ret;
 
-	if (!klp_initialized())
-		return -ENODEV;
-
 	if (!patch || !patch->mod)
 		return -EINVAL;
 
+	if (!is_livepatch_module(patch->mod)) {
+		pr_err("module %s is not marked as a livepatch module",
+		       patch->mod->name);
+		return -EINVAL;
+	}
+
+	if (!klp_initialized())
+		return -ENODEV;
+
 	/*
 	 * A reference is taken on the patch module to prevent it from being
 	 * unloaded.  Right now, we don't allow patch modules to unload since
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 53ab2f8..81f1a71 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -45,6 +45,7 @@
 #include <linux/bitops.h>
 #include <linux/gfp.h>
 #include <linux/kmemcheck.h>
+#include <linux/random.h>
 
 #include <asm/sections.h>
 
@@ -708,7 +709,7 @@
  * yet. Otherwise we look it up. We cache the result in the lock object
  * itself, so actual lookup of the hash should be once per lock object.
  */
-static inline struct lock_class *
+static struct lock_class *
 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
 {
 	struct lockdep_subclass_key *key;
@@ -1999,6 +2000,79 @@
 	return ++i;
 }
 
+#ifdef CONFIG_DEBUG_LOCKDEP
+/*
+ * Returns the next chain_key iteration
+ */
+static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
+{
+	u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
+
+	printk(" class_idx:%d -> chain_key:%016Lx",
+		class_idx,
+		(unsigned long long)new_chain_key);
+	return new_chain_key;
+}
+
+static void
+print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
+{
+	struct held_lock *hlock;
+	u64 chain_key = 0;
+	int depth = curr->lockdep_depth;
+	int i;
+
+	printk("depth: %u\n", depth + 1);
+	for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) {
+		hlock = curr->held_locks + i;
+		chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
+
+		print_lock(hlock);
+	}
+
+	print_chain_key_iteration(hlock_next->class_idx, chain_key);
+	print_lock(hlock_next);
+}
+
+static void print_chain_keys_chain(struct lock_chain *chain)
+{
+	int i;
+	u64 chain_key = 0;
+	int class_id;
+
+	printk("depth: %u\n", chain->depth);
+	for (i = 0; i < chain->depth; i++) {
+		class_id = chain_hlocks[chain->base + i];
+		chain_key = print_chain_key_iteration(class_id + 1, chain_key);
+
+		print_lock_name(lock_classes + class_id);
+		printk("\n");
+	}
+}
+
+static void print_collision(struct task_struct *curr,
+			struct held_lock *hlock_next,
+			struct lock_chain *chain)
+{
+	printk("\n");
+	printk("======================\n");
+	printk("[chain_key collision ]\n");
+	print_kernel_ident();
+	printk("----------------------\n");
+	printk("%s/%d: ", current->comm, task_pid_nr(current));
+	printk("Hash chain already cached but the contents don't match!\n");
+
+	printk("Held locks:");
+	print_chain_keys_held_locks(curr, hlock_next);
+
+	printk("Locks in cached chain:");
+	print_chain_keys_chain(chain);
+
+	printk("\nstack backtrace:\n");
+	dump_stack();
+}
+#endif
+
 /*
  * Checks whether the chain and the current held locks are consistent
  * in depth and also in content. If they are not it most likely means
@@ -2014,14 +2088,18 @@
 
 	i = get_first_held_lock(curr, hlock);
 
-	if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1)))
+	if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
+		print_collision(curr, hlock, chain);
 		return 0;
+	}
 
 	for (j = 0; j < chain->depth - 1; j++, i++) {
 		id = curr->held_locks[i].class_idx - 1;
 
-		if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id))
+		if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
+			print_collision(curr, hlock, chain);
 			return 0;
+		}
 	}
 #endif
 	return 1;
@@ -2099,15 +2177,37 @@
 	chain->irq_context = hlock->irq_context;
 	i = get_first_held_lock(curr, hlock);
 	chain->depth = curr->lockdep_depth + 1 - i;
+
+	BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
+	BUILD_BUG_ON((1UL << 6)  <= ARRAY_SIZE(curr->held_locks));
+	BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
+
 	if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
 		chain->base = nr_chain_hlocks;
-		nr_chain_hlocks += chain->depth;
 		for (j = 0; j < chain->depth - 1; j++, i++) {
 			int lock_id = curr->held_locks[i].class_idx - 1;
 			chain_hlocks[chain->base + j] = lock_id;
 		}
 		chain_hlocks[chain->base + j] = class - lock_classes;
 	}
+
+	if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS)
+		nr_chain_hlocks += chain->depth;
+
+#ifdef CONFIG_DEBUG_LOCKDEP
+	/*
+	 * Important for check_no_collision().
+	 */
+	if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) {
+		if (debug_locks_off_graph_unlock())
+			return 0;
+
+		print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
+		dump_stack();
+		return 0;
+	}
+#endif
+
 	hlist_add_head_rcu(&chain->entry, hash_head);
 	debug_atomic_inc(chain_lookup_misses);
 	inc_chains();
@@ -2855,6 +2955,11 @@
 	return 1;
 }
 
+static inline unsigned int task_irq_context(struct task_struct *task)
+{
+	return 2 * !!task->hardirq_context + !!task->softirq_context;
+}
+
 static int separate_irq_context(struct task_struct *curr,
 		struct held_lock *hlock)
 {
@@ -2863,8 +2968,6 @@
 	/*
 	 * Keep track of points where we cross into an interrupt context:
 	 */
-	hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
-				curr->softirq_context;
 	if (depth) {
 		struct held_lock *prev_hlock;
 
@@ -2896,6 +2999,11 @@
 	return 1;
 }
 
+static inline unsigned int task_irq_context(struct task_struct *task)
+{
+	return 0;
+}
+
 static inline int separate_irq_context(struct task_struct *curr,
 		struct held_lock *hlock)
 {
@@ -3164,6 +3272,7 @@
 	hlock->acquire_ip = ip;
 	hlock->instance = lock;
 	hlock->nest_lock = nest_lock;
+	hlock->irq_context = task_irq_context(curr);
 	hlock->trylock = trylock;
 	hlock->read = read;
 	hlock->check = check;
@@ -3477,7 +3586,35 @@
 	return 0;
 }
 
-static void __lock_pin_lock(struct lockdep_map *lock)
+static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
+{
+	struct pin_cookie cookie = NIL_COOKIE;
+	struct task_struct *curr = current;
+	int i;
+
+	if (unlikely(!debug_locks))
+		return cookie;
+
+	for (i = 0; i < curr->lockdep_depth; i++) {
+		struct held_lock *hlock = curr->held_locks + i;
+
+		if (match_held_lock(hlock, lock)) {
+			/*
+			 * Grab 16bits of randomness; this is sufficient to not
+			 * be guessable and still allows some pin nesting in
+			 * our u32 pin_count.
+			 */
+			cookie.val = 1 + (prandom_u32() >> 16);
+			hlock->pin_count += cookie.val;
+			return cookie;
+		}
+	}
+
+	WARN(1, "pinning an unheld lock\n");
+	return cookie;
+}
+
+static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
 {
 	struct task_struct *curr = current;
 	int i;
@@ -3489,7 +3626,7 @@
 		struct held_lock *hlock = curr->held_locks + i;
 
 		if (match_held_lock(hlock, lock)) {
-			hlock->pin_count++;
+			hlock->pin_count += cookie.val;
 			return;
 		}
 	}
@@ -3497,7 +3634,7 @@
 	WARN(1, "pinning an unheld lock\n");
 }
 
-static void __lock_unpin_lock(struct lockdep_map *lock)
+static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
 {
 	struct task_struct *curr = current;
 	int i;
@@ -3512,7 +3649,11 @@
 			if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
 				return;
 
-			hlock->pin_count--;
+			hlock->pin_count -= cookie.val;
+
+			if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
+				hlock->pin_count = 0;
+
 			return;
 		}
 	}
@@ -3643,24 +3784,27 @@
 }
 EXPORT_SYMBOL_GPL(lock_is_held);
 
-void lock_pin_lock(struct lockdep_map *lock)
+struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
 {
+	struct pin_cookie cookie = NIL_COOKIE;
 	unsigned long flags;
 
 	if (unlikely(current->lockdep_recursion))
-		return;
+		return cookie;
 
 	raw_local_irq_save(flags);
 	check_flags(flags);
 
 	current->lockdep_recursion = 1;
-	__lock_pin_lock(lock);
+	cookie = __lock_pin_lock(lock);
 	current->lockdep_recursion = 0;
 	raw_local_irq_restore(flags);
+
+	return cookie;
 }
 EXPORT_SYMBOL_GPL(lock_pin_lock);
 
-void lock_unpin_lock(struct lockdep_map *lock)
+void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
 {
 	unsigned long flags;
 
@@ -3671,7 +3815,24 @@
 	check_flags(flags);
 
 	current->lockdep_recursion = 1;
-	__lock_unpin_lock(lock);
+	__lock_repin_lock(lock, cookie);
+	current->lockdep_recursion = 0;
+	raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(lock_repin_lock);
+
+void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
+{
+	unsigned long flags;
+
+	if (unlikely(current->lockdep_recursion))
+		return;
+
+	raw_local_irq_save(flags);
+	check_flags(flags);
+
+	current->lockdep_recursion = 1;
+	__lock_unpin_lock(lock, cookie);
 	current->lockdep_recursion = 0;
 	raw_local_irq_restore(flags);
 }
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index dbb61a3..a0f61ef 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -141,6 +141,8 @@
 	int i;
 
 	if (v == SEQ_START_TOKEN) {
+		if (nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)
+			seq_printf(m, "(buggered) ");
 		seq_printf(m, "all lock chains:\n");
 		return 0;
 	}
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 8ef1919..f8c5af5 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -75,12 +75,7 @@
 	long n_lock_acquired;
 };
 
-#if defined(MODULE)
-#define LOCKTORTURE_RUNNABLE_INIT 1
-#else
-#define LOCKTORTURE_RUNNABLE_INIT 0
-#endif
-int torture_runnable = LOCKTORTURE_RUNNABLE_INIT;
+int torture_runnable = IS_ENABLED(MODULE);
 module_param(torture_runnable, int, 0444);
 MODULE_PARM_DESC(torture_runnable, "Start locktorture at module init");
 
@@ -394,12 +389,12 @@
 
 	if (!rt_task(current)) {
 		/*
-		 * (1) Boost priority once every ~50k operations. When the
+		 * Boost priority once every ~50k operations. When the
 		 * task tries to take the lock, the rtmutex it will account
 		 * for the new priority, and do any corresponding pi-dance.
 		 */
-		if (!(torture_random(trsp) %
-		      (cxt.nrealwriters_stress * factor))) {
+		if (trsp && !(torture_random(trsp) %
+			      (cxt.nrealwriters_stress * factor))) {
 			policy = SCHED_FIFO;
 			param.sched_priority = MAX_RT_PRIO - 1;
 		} else /* common case, do nothing */
@@ -748,6 +743,15 @@
 	if (torture_cleanup_begin())
 		return;
 
+	/*
+	 * Indicates early cleanup, meaning that the test has not run,
+	 * such as when passing bogus args when loading the module. As
+	 * such, only perform the underlying torture-specific cleanups,
+	 * and avoid anything related to locktorture.
+	 */
+	if (!cxt.lwsa)
+		goto end;
+
 	if (writer_tasks) {
 		for (i = 0; i < cxt.nrealwriters_stress; i++)
 			torture_stop_kthread(lock_torture_writer,
@@ -776,6 +780,7 @@
 	else
 		lock_torture_print_module_parms(cxt.cur_ops,
 						"End of test: SUCCESS");
+end:
 	torture_cleanup_end();
 }
 
@@ -870,6 +875,7 @@
 			VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
 			firsterr = -ENOMEM;
 			kfree(cxt.lwsa);
+			cxt.lwsa = NULL;
 			goto unwind;
 		}
 
@@ -878,6 +884,7 @@
 			cxt.lrsa[i].n_lock_acquired = 0;
 		}
 	}
+
 	lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
 
 	/* Prepare torture context. */
diff --git a/kernel/locking/qspinlock_stat.h b/kernel/locking/qspinlock_stat.h
index eb2a2c9..22e0253 100644
--- a/kernel/locking/qspinlock_stat.h
+++ b/kernel/locking/qspinlock_stat.h
@@ -136,10 +136,12 @@
 	}
 
 	if (counter == qstat_pv_hash_hops) {
-		u64 frac;
+		u64 frac = 0;
 
-		frac = 100ULL * do_div(stat, kicks);
-		frac = DIV_ROUND_CLOSEST_ULL(frac, kicks);
+		if (kicks) {
+			frac = 100ULL * do_div(stat, kicks);
+			frac = DIV_ROUND_CLOSEST_ULL(frac, kicks);
+		}
 
 		/*
 		 * Return a X.XX decimal number
@@ -189,8 +191,6 @@
 
 		for (i = 0 ; i < qstat_num; i++)
 			WRITE_ONCE(ptr[i], 0);
-		for (i = 0 ; i < qstat_num; i++)
-			WRITE_ONCE(ptr[i], 0);
 	}
 	return count;
 }
@@ -212,10 +212,8 @@
 	struct dentry *d_qstat = debugfs_create_dir("qlockstat", NULL);
 	int i;
 
-	if (!d_qstat) {
-		pr_warn("Could not create 'qlockstat' debugfs directory\n");
-		return 0;
-	}
+	if (!d_qstat)
+		goto out;
 
 	/*
 	 * Create the debugfs files
@@ -225,12 +223,20 @@
 	 * performance.
 	 */
 	for (i = 0; i < qstat_num; i++)
-		debugfs_create_file(qstat_names[i], 0400, d_qstat,
-				   (void *)(long)i, &fops_qstat);
+		if (!debugfs_create_file(qstat_names[i], 0400, d_qstat,
+					 (void *)(long)i, &fops_qstat))
+			goto fail_undo;
 
-	debugfs_create_file(qstat_names[qstat_reset_cnts], 0200, d_qstat,
-			   (void *)(long)qstat_reset_cnts, &fops_qstat);
+	if (!debugfs_create_file(qstat_names[qstat_reset_cnts], 0200, d_qstat,
+				 (void *)(long)qstat_reset_cnts, &fops_qstat))
+		goto fail_undo;
+
 	return 0;
+fail_undo:
+	debugfs_remove_recursive(d_qstat);
+out:
+	pr_warn("Could not create 'qlockstat' debugfs entries\n");
+	return -ENOMEM;
 }
 fs_initcall(init_qspinlock_stat);
 
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
index 3a50485..1591f6b 100644
--- a/kernel/locking/rwsem-spinlock.c
+++ b/kernel/locking/rwsem-spinlock.c
@@ -191,11 +191,12 @@
 /*
  * get a write lock on the semaphore
  */
-void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
+int __sched __down_write_common(struct rw_semaphore *sem, int state)
 {
 	struct rwsem_waiter waiter;
 	struct task_struct *tsk;
 	unsigned long flags;
+	int ret = 0;
 
 	raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
@@ -215,21 +216,33 @@
 		 */
 		if (sem->count == 0)
 			break;
-		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+		if (signal_pending_state(state, current)) {
+			ret = -EINTR;
+			goto out;
+		}
+		set_task_state(tsk, state);
 		raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 		schedule();
 		raw_spin_lock_irqsave(&sem->wait_lock, flags);
 	}
 	/* got the lock */
 	sem->count = -1;
+out:
 	list_del(&waiter.list);
 
 	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+
+	return ret;
 }
 
 void __sched __down_write(struct rw_semaphore *sem)
 {
-	__down_write_nested(sem, 0);
+	__down_write_common(sem, TASK_UNINTERRUPTIBLE);
+}
+
+int __sched __down_write_killable(struct rw_semaphore *sem)
+{
+	return __down_write_common(sem, TASK_KILLABLE);
 }
 
 /*
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index a4d4de0..09e30c62 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -433,12 +433,13 @@
 /*
  * Wait until we successfully acquire the write lock
  */
-__visible
-struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
+static inline struct rw_semaphore *
+__rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
 {
 	long count;
 	bool waiting = true; /* any queued threads before us */
 	struct rwsem_waiter waiter;
+	struct rw_semaphore *ret = sem;
 
 	/* undo write bias from down_write operation, stop active locking */
 	count = rwsem_atomic_update(-RWSEM_ACTIVE_WRITE_BIAS, sem);
@@ -478,7 +479,7 @@
 		count = rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
 
 	/* wait until we successfully acquire the lock */
-	set_current_state(TASK_UNINTERRUPTIBLE);
+	set_current_state(state);
 	while (true) {
 		if (rwsem_try_write_lock(count, sem))
 			break;
@@ -486,21 +487,48 @@
 
 		/* Block until there are no active lockers. */
 		do {
+			if (signal_pending_state(state, current))
+				goto out_nolock;
+
 			schedule();
-			set_current_state(TASK_UNINTERRUPTIBLE);
+			set_current_state(state);
 		} while ((count = sem->count) & RWSEM_ACTIVE_MASK);
 
 		raw_spin_lock_irq(&sem->wait_lock);
 	}
 	__set_current_state(TASK_RUNNING);
-
 	list_del(&waiter.list);
 	raw_spin_unlock_irq(&sem->wait_lock);
 
-	return sem;
+	return ret;
+
+out_nolock:
+	__set_current_state(TASK_RUNNING);
+	raw_spin_lock_irq(&sem->wait_lock);
+	list_del(&waiter.list);
+	if (list_empty(&sem->wait_list))
+		rwsem_atomic_update(-RWSEM_WAITING_BIAS, sem);
+	else
+		__rwsem_do_wake(sem, RWSEM_WAKE_ANY);
+	raw_spin_unlock_irq(&sem->wait_lock);
+
+	return ERR_PTR(-EINTR);
+}
+
+__visible struct rw_semaphore * __sched
+rwsem_down_write_failed(struct rw_semaphore *sem)
+{
+	return __rwsem_down_write_failed_common(sem, TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(rwsem_down_write_failed);
 
+__visible struct rw_semaphore * __sched
+rwsem_down_write_failed_killable(struct rw_semaphore *sem)
+{
+	return __rwsem_down_write_failed_common(sem, TASK_KILLABLE);
+}
+EXPORT_SYMBOL(rwsem_down_write_failed_killable);
+
 /*
  * handle waking up a waiter on the semaphore
  * - up_read/up_write has decremented the active part of count if we come here
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 205be0c..c817216 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -55,6 +55,25 @@
 EXPORT_SYMBOL(down_write);
 
 /*
+ * lock for writing
+ */
+int __sched down_write_killable(struct rw_semaphore *sem)
+{
+	might_sleep();
+	rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
+
+	if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, __down_write_killable)) {
+		rwsem_release(&sem->dep_map, 1, _RET_IP_);
+		return -EINTR;
+	}
+
+	rwsem_set_owner(sem);
+	return 0;
+}
+
+EXPORT_SYMBOL(down_write_killable);
+
+/*
  * trylock for writing -- returns 1 if successful, 0 if contention
  */
 int down_write_trylock(struct rw_semaphore *sem)
diff --git a/kernel/module.c b/kernel/module.c
index 041200c..5f71aa6 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1973,6 +1973,83 @@
 static void module_disable_nx(const struct module *mod) { }
 #endif
 
+#ifdef CONFIG_LIVEPATCH
+/*
+ * Persist Elf information about a module. Copy the Elf header,
+ * section header table, section string table, and symtab section
+ * index from info to mod->klp_info.
+ */
+static int copy_module_elf(struct module *mod, struct load_info *info)
+{
+	unsigned int size, symndx;
+	int ret;
+
+	size = sizeof(*mod->klp_info);
+	mod->klp_info = kmalloc(size, GFP_KERNEL);
+	if (mod->klp_info == NULL)
+		return -ENOMEM;
+
+	/* Elf header */
+	size = sizeof(mod->klp_info->hdr);
+	memcpy(&mod->klp_info->hdr, info->hdr, size);
+
+	/* Elf section header table */
+	size = sizeof(*info->sechdrs) * info->hdr->e_shnum;
+	mod->klp_info->sechdrs = kmalloc(size, GFP_KERNEL);
+	if (mod->klp_info->sechdrs == NULL) {
+		ret = -ENOMEM;
+		goto free_info;
+	}
+	memcpy(mod->klp_info->sechdrs, info->sechdrs, size);
+
+	/* Elf section name string table */
+	size = info->sechdrs[info->hdr->e_shstrndx].sh_size;
+	mod->klp_info->secstrings = kmalloc(size, GFP_KERNEL);
+	if (mod->klp_info->secstrings == NULL) {
+		ret = -ENOMEM;
+		goto free_sechdrs;
+	}
+	memcpy(mod->klp_info->secstrings, info->secstrings, size);
+
+	/* Elf symbol section index */
+	symndx = info->index.sym;
+	mod->klp_info->symndx = symndx;
+
+	/*
+	 * For livepatch modules, core_kallsyms.symtab is a complete
+	 * copy of the original symbol table. Adjust sh_addr to point
+	 * to core_kallsyms.symtab since the copy of the symtab in module
+	 * init memory is freed at the end of do_init_module().
+	 */
+	mod->klp_info->sechdrs[symndx].sh_addr = \
+		(unsigned long) mod->core_kallsyms.symtab;
+
+	return 0;
+
+free_sechdrs:
+	kfree(mod->klp_info->sechdrs);
+free_info:
+	kfree(mod->klp_info);
+	return ret;
+}
+
+static void free_module_elf(struct module *mod)
+{
+	kfree(mod->klp_info->sechdrs);
+	kfree(mod->klp_info->secstrings);
+	kfree(mod->klp_info);
+}
+#else /* !CONFIG_LIVEPATCH */
+static int copy_module_elf(struct module *mod, struct load_info *info)
+{
+	return 0;
+}
+
+static void free_module_elf(struct module *mod)
+{
+}
+#endif /* CONFIG_LIVEPATCH */
+
 void __weak module_memfree(void *module_region)
 {
 	vfree(module_region);
@@ -2011,6 +2088,9 @@
 	/* Free any allocated parameters. */
 	destroy_params(mod->kp, mod->num_kp);
 
+	if (is_livepatch_module(mod))
+		free_module_elf(mod);
+
 	/* Now we can delete it from the lists */
 	mutex_lock(&module_mutex);
 	/* Unlink carefully: kallsyms could be walking list. */
@@ -2126,6 +2206,10 @@
 			       (long)sym[i].st_value);
 			break;
 
+		case SHN_LIVEPATCH:
+			/* Livepatch symbols are resolved by livepatch */
+			break;
+
 		case SHN_UNDEF:
 			ksym = resolve_symbol_wait(mod, info, name);
 			/* Ok if resolved.  */
@@ -2174,6 +2258,10 @@
 		if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
 			continue;
 
+		/* Livepatch relocation sections are applied by livepatch */
+		if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH)
+			continue;
+
 		if (info->sechdrs[i].sh_type == SHT_REL)
 			err = apply_relocate(info->sechdrs, info->strtab,
 					     info->index.sym, i, mod);
@@ -2469,7 +2557,7 @@
 
 	/* Compute total space required for the core symbols' strtab. */
 	for (ndst = i = 0; i < nsrc; i++) {
-		if (i == 0 ||
+		if (i == 0 || is_livepatch_module(mod) ||
 		    is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
 				   info->index.pcpu)) {
 			strtab_size += strlen(&info->strtab[src[i].st_name])+1;
@@ -2528,7 +2616,7 @@
 	mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs;
 	src = mod->kallsyms->symtab;
 	for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
-		if (i == 0 ||
+		if (i == 0 || is_livepatch_module(mod) ||
 		    is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
 				   info->index.pcpu)) {
 			dst[ndst] = src[i];
@@ -2667,6 +2755,26 @@
 	return 0;
 }
 
+#ifdef CONFIG_LIVEPATCH
+static int find_livepatch_modinfo(struct module *mod, struct load_info *info)
+{
+	mod->klp = get_modinfo(info, "livepatch") ? true : false;
+
+	return 0;
+}
+#else /* !CONFIG_LIVEPATCH */
+static int find_livepatch_modinfo(struct module *mod, struct load_info *info)
+{
+	if (get_modinfo(info, "livepatch")) {
+		pr_err("%s: module is marked as livepatch module, but livepatch support is disabled",
+		       mod->name);
+		return -ENOEXEC;
+	}
+
+	return 0;
+}
+#endif /* CONFIG_LIVEPATCH */
+
 /* Sets info->hdr and info->len. */
 static int copy_module_from_user(const void __user *umod, unsigned long len,
 				  struct load_info *info)
@@ -2821,6 +2929,10 @@
 			"is unknown, you have been warned.\n", mod->name);
 	}
 
+	err = find_livepatch_modinfo(mod, info);
+	if (err)
+		return err;
+
 	/* Set up license info based on the info section */
 	set_license(mod, get_modinfo(info, "license"));
 
@@ -3494,6 +3606,12 @@
 	if (err < 0)
 		goto coming_cleanup;
 
+	if (is_livepatch_module(mod)) {
+		err = copy_module_elf(mod, info);
+		if (err < 0)
+			goto sysfs_cleanup;
+	}
+
 	/* Get rid of temporary copy. */
 	free_copy(info);
 
@@ -3502,11 +3620,12 @@
 
 	return do_init_module(mod);
 
+ sysfs_cleanup:
+	mod_sysfs_teardown(mod);
  coming_cleanup:
 	blocking_notifier_call_chain(&module_notify_list,
 				     MODULE_STATE_GOING, mod);
 	klp_module_going(mod);
-
  bug_cleanup:
 	/* module_bug_cleanup needs module_mutex protection */
 	mutex_lock(&module_mutex);
diff --git a/kernel/module_signing.c b/kernel/module_signing.c
index 64b9dea..937c844 100644
--- a/kernel/module_signing.c
+++ b/kernel/module_signing.c
@@ -12,7 +12,7 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/string.h>
-#include <keys/system_keyring.h>
+#include <linux/verification.h>
 #include <crypto/public_key.h>
 #include "module-internal.h"
 
@@ -80,6 +80,7 @@
 		return -EBADMSG;
 	}
 
-	return system_verify_data(mod, modlen, mod + modlen, sig_len,
-				  VERIFYING_MODULE_SIGNATURE);
+	return verify_pkcs7_signature(mod, modlen, mod + modlen, sig_len,
+				      NULL, VERIFYING_MODULE_SIGNATURE,
+				      NULL, NULL);
 }
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 12cd989..160e100 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -37,6 +37,14 @@
 #define HIBERNATE_SIG	"S1SUSPEND"
 
 /*
+ * When reading an {un,}compressed image, we may restore pages in place,
+ * in which case some architectures need these pages cleaning before they
+ * can be executed. We don't know which pages these may be, so clean the lot.
+ */
+static bool clean_pages_on_read;
+static bool clean_pages_on_decompress;
+
+/*
  *	The swap map is a data structure used for keeping track of each page
  *	written to a swap partition.  It consists of many swap_map_page
  *	structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
@@ -241,6 +249,9 @@
 
 	if (bio_data_dir(bio) == WRITE)
 		put_page(page);
+	else if (clean_pages_on_read)
+		flush_icache_range((unsigned long)page_address(page),
+				   (unsigned long)page_address(page) + PAGE_SIZE);
 
 	if (bio->bi_error && !hb->error)
 		hb->error = bio->bi_error;
@@ -1049,6 +1060,7 @@
 
 	hib_init_batch(&hb);
 
+	clean_pages_on_read = true;
 	printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n",
 		nr_to_read);
 	m = nr_to_read / 10;
@@ -1124,6 +1136,10 @@
 		d->unc_len = LZO_UNC_SIZE;
 		d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
 		                               d->unc, &d->unc_len);
+		if (clean_pages_on_decompress)
+			flush_icache_range((unsigned long)d->unc,
+					   (unsigned long)d->unc + d->unc_len);
+
 		atomic_set(&d->stop, 1);
 		wake_up(&d->done);
 	}
@@ -1189,6 +1205,8 @@
 	}
 	memset(crc, 0, offsetof(struct crc_data, go));
 
+	clean_pages_on_decompress = true;
+
 	/*
 	 * Start the decompression threads.
 	 */
diff --git a/kernel/rcu/Makefile b/kernel/rcu/Makefile
index 032b2c0..18dfc48 100644
--- a/kernel/rcu/Makefile
+++ b/kernel/rcu/Makefile
@@ -5,6 +5,7 @@
 obj-y += update.o sync.o
 obj-$(CONFIG_SRCU) += srcu.o
 obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
+obj-$(CONFIG_RCU_PERF_TEST) += rcuperf.o
 obj-$(CONFIG_TREE_RCU) += tree.o
 obj-$(CONFIG_PREEMPT_RCU) += tree.o
 obj-$(CONFIG_TREE_RCU_TRACE) += tree_trace.o
diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c
new file mode 100644
index 0000000..3cee0d8
--- /dev/null
+++ b/kernel/rcu/rcuperf.c
@@ -0,0 +1,655 @@
+/*
+ * Read-Copy Update module-based performance-test facility
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright (C) IBM Corporation, 2015
+ *
+ * Authors: Paul E. McKenney <paulmck@us.ibm.com>
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/freezer.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/stat.h>
+#include <linux/srcu.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+#include <linux/torture.h>
+#include <linux/vmalloc.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.vnet.ibm.com>");
+
+#define PERF_FLAG "-perf:"
+#define PERFOUT_STRING(s) \
+	pr_alert("%s" PERF_FLAG s "\n", perf_type)
+#define VERBOSE_PERFOUT_STRING(s) \
+	do { if (verbose) pr_alert("%s" PERF_FLAG " %s\n", perf_type, s); } while (0)
+#define VERBOSE_PERFOUT_ERRSTRING(s) \
+	do { if (verbose) pr_alert("%s" PERF_FLAG "!!! %s\n", perf_type, s); } while (0)
+
+torture_param(bool, gp_exp, true, "Use expedited GP wait primitives");
+torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
+torture_param(int, nreaders, -1, "Number of RCU reader threads");
+torture_param(int, nwriters, -1, "Number of RCU updater threads");
+torture_param(bool, shutdown, false, "Shutdown at end of performance tests.");
+torture_param(bool, verbose, true, "Enable verbose debugging printk()s");
+
+static char *perf_type = "rcu";
+module_param(perf_type, charp, 0444);
+MODULE_PARM_DESC(perf_type, "Type of RCU to performance-test (rcu, rcu_bh, ...)");
+
+static int nrealreaders;
+static int nrealwriters;
+static struct task_struct **writer_tasks;
+static struct task_struct **reader_tasks;
+static struct task_struct *shutdown_task;
+
+static u64 **writer_durations;
+static int *writer_n_durations;
+static atomic_t n_rcu_perf_reader_started;
+static atomic_t n_rcu_perf_writer_started;
+static atomic_t n_rcu_perf_writer_finished;
+static wait_queue_head_t shutdown_wq;
+static u64 t_rcu_perf_writer_started;
+static u64 t_rcu_perf_writer_finished;
+static unsigned long b_rcu_perf_writer_started;
+static unsigned long b_rcu_perf_writer_finished;
+
+static int rcu_perf_writer_state;
+#define RTWS_INIT		0
+#define RTWS_EXP_SYNC		1
+#define RTWS_SYNC		2
+#define RTWS_IDLE		2
+#define RTWS_STOPPING		3
+
+#define MAX_MEAS 10000
+#define MIN_MEAS 100
+
+#if defined(MODULE) || defined(CONFIG_RCU_PERF_TEST_RUNNABLE)
+#define RCUPERF_RUNNABLE_INIT 1
+#else
+#define RCUPERF_RUNNABLE_INIT 0
+#endif
+static int perf_runnable = RCUPERF_RUNNABLE_INIT;
+module_param(perf_runnable, int, 0444);
+MODULE_PARM_DESC(perf_runnable, "Start rcuperf at boot");
+
+/*
+ * Operations vector for selecting different types of tests.
+ */
+
+struct rcu_perf_ops {
+	int ptype;
+	void (*init)(void);
+	void (*cleanup)(void);
+	int (*readlock)(void);
+	void (*readunlock)(int idx);
+	unsigned long (*started)(void);
+	unsigned long (*completed)(void);
+	unsigned long (*exp_completed)(void);
+	void (*sync)(void);
+	void (*exp_sync)(void);
+	const char *name;
+};
+
+static struct rcu_perf_ops *cur_ops;
+
+/*
+ * Definitions for rcu perf testing.
+ */
+
+static int rcu_perf_read_lock(void) __acquires(RCU)
+{
+	rcu_read_lock();
+	return 0;
+}
+
+static void rcu_perf_read_unlock(int idx) __releases(RCU)
+{
+	rcu_read_unlock();
+}
+
+static unsigned long __maybe_unused rcu_no_completed(void)
+{
+	return 0;
+}
+
+static void rcu_sync_perf_init(void)
+{
+}
+
+static struct rcu_perf_ops rcu_ops = {
+	.ptype		= RCU_FLAVOR,
+	.init		= rcu_sync_perf_init,
+	.readlock	= rcu_perf_read_lock,
+	.readunlock	= rcu_perf_read_unlock,
+	.started	= rcu_batches_started,
+	.completed	= rcu_batches_completed,
+	.exp_completed	= rcu_exp_batches_completed,
+	.sync		= synchronize_rcu,
+	.exp_sync	= synchronize_rcu_expedited,
+	.name		= "rcu"
+};
+
+/*
+ * Definitions for rcu_bh perf testing.
+ */
+
+static int rcu_bh_perf_read_lock(void) __acquires(RCU_BH)
+{
+	rcu_read_lock_bh();
+	return 0;
+}
+
+static void rcu_bh_perf_read_unlock(int idx) __releases(RCU_BH)
+{
+	rcu_read_unlock_bh();
+}
+
+static struct rcu_perf_ops rcu_bh_ops = {
+	.ptype		= RCU_BH_FLAVOR,
+	.init		= rcu_sync_perf_init,
+	.readlock	= rcu_bh_perf_read_lock,
+	.readunlock	= rcu_bh_perf_read_unlock,
+	.started	= rcu_batches_started_bh,
+	.completed	= rcu_batches_completed_bh,
+	.exp_completed	= rcu_exp_batches_completed_sched,
+	.sync		= synchronize_rcu_bh,
+	.exp_sync	= synchronize_rcu_bh_expedited,
+	.name		= "rcu_bh"
+};
+
+/*
+ * Definitions for srcu perf testing.
+ */
+
+DEFINE_STATIC_SRCU(srcu_ctl_perf);
+static struct srcu_struct *srcu_ctlp = &srcu_ctl_perf;
+
+static int srcu_perf_read_lock(void) __acquires(srcu_ctlp)
+{
+	return srcu_read_lock(srcu_ctlp);
+}
+
+static void srcu_perf_read_unlock(int idx) __releases(srcu_ctlp)
+{
+	srcu_read_unlock(srcu_ctlp, idx);
+}
+
+static unsigned long srcu_perf_completed(void)
+{
+	return srcu_batches_completed(srcu_ctlp);
+}
+
+static void srcu_perf_synchronize(void)
+{
+	synchronize_srcu(srcu_ctlp);
+}
+
+static void srcu_perf_synchronize_expedited(void)
+{
+	synchronize_srcu_expedited(srcu_ctlp);
+}
+
+static struct rcu_perf_ops srcu_ops = {
+	.ptype		= SRCU_FLAVOR,
+	.init		= rcu_sync_perf_init,
+	.readlock	= srcu_perf_read_lock,
+	.readunlock	= srcu_perf_read_unlock,
+	.started	= NULL,
+	.completed	= srcu_perf_completed,
+	.exp_completed	= srcu_perf_completed,
+	.sync		= srcu_perf_synchronize,
+	.exp_sync	= srcu_perf_synchronize_expedited,
+	.name		= "srcu"
+};
+
+/*
+ * Definitions for sched perf testing.
+ */
+
+static int sched_perf_read_lock(void)
+{
+	preempt_disable();
+	return 0;
+}
+
+static void sched_perf_read_unlock(int idx)
+{
+	preempt_enable();
+}
+
+static struct rcu_perf_ops sched_ops = {
+	.ptype		= RCU_SCHED_FLAVOR,
+	.init		= rcu_sync_perf_init,
+	.readlock	= sched_perf_read_lock,
+	.readunlock	= sched_perf_read_unlock,
+	.started	= rcu_batches_started_sched,
+	.completed	= rcu_batches_completed_sched,
+	.exp_completed	= rcu_exp_batches_completed_sched,
+	.sync		= synchronize_sched,
+	.exp_sync	= synchronize_sched_expedited,
+	.name		= "sched"
+};
+
+#ifdef CONFIG_TASKS_RCU
+
+/*
+ * Definitions for RCU-tasks perf testing.
+ */
+
+static int tasks_perf_read_lock(void)
+{
+	return 0;
+}
+
+static void tasks_perf_read_unlock(int idx)
+{
+}
+
+static struct rcu_perf_ops tasks_ops = {
+	.ptype		= RCU_TASKS_FLAVOR,
+	.init		= rcu_sync_perf_init,
+	.readlock	= tasks_perf_read_lock,
+	.readunlock	= tasks_perf_read_unlock,
+	.started	= rcu_no_completed,
+	.completed	= rcu_no_completed,
+	.sync		= synchronize_rcu_tasks,
+	.exp_sync	= synchronize_rcu_tasks,
+	.name		= "tasks"
+};
+
+#define RCUPERF_TASKS_OPS &tasks_ops,
+
+static bool __maybe_unused torturing_tasks(void)
+{
+	return cur_ops == &tasks_ops;
+}
+
+#else /* #ifdef CONFIG_TASKS_RCU */
+
+#define RCUPERF_TASKS_OPS
+
+static bool __maybe_unused torturing_tasks(void)
+{
+	return false;
+}
+
+#endif /* #else #ifdef CONFIG_TASKS_RCU */
+
+/*
+ * If performance tests complete, wait for shutdown to commence.
+ */
+static void rcu_perf_wait_shutdown(void)
+{
+	cond_resched_rcu_qs();
+	if (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters)
+		return;
+	while (!torture_must_stop())
+		schedule_timeout_uninterruptible(1);
+}
+
+/*
+ * RCU perf reader kthread.  Repeatedly does empty RCU read-side
+ * critical section, minimizing update-side interference.
+ */
+static int
+rcu_perf_reader(void *arg)
+{
+	unsigned long flags;
+	int idx;
+	long me = (long)arg;
+
+	VERBOSE_PERFOUT_STRING("rcu_perf_reader task started");
+	set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
+	set_user_nice(current, MAX_NICE);
+	atomic_inc(&n_rcu_perf_reader_started);
+
+	do {
+		local_irq_save(flags);
+		idx = cur_ops->readlock();
+		cur_ops->readunlock(idx);
+		local_irq_restore(flags);
+		rcu_perf_wait_shutdown();
+	} while (!torture_must_stop());
+	torture_kthread_stopping("rcu_perf_reader");
+	return 0;
+}
+
+/*
+ * RCU perf writer kthread.  Repeatedly does a grace period.
+ */
+static int
+rcu_perf_writer(void *arg)
+{
+	int i = 0;
+	int i_max;
+	long me = (long)arg;
+	struct sched_param sp;
+	bool started = false, done = false, alldone = false;
+	u64 t;
+	u64 *wdp;
+	u64 *wdpp = writer_durations[me];
+
+	VERBOSE_PERFOUT_STRING("rcu_perf_writer task started");
+	WARN_ON(rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp);
+	WARN_ON(rcu_gp_is_normal() && gp_exp);
+	WARN_ON(!wdpp);
+	set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
+	sp.sched_priority = 1;
+	sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
+
+	if (holdoff)
+		schedule_timeout_uninterruptible(holdoff * HZ);
+
+	t = ktime_get_mono_fast_ns();
+	if (atomic_inc_return(&n_rcu_perf_writer_started) >= nrealwriters) {
+		t_rcu_perf_writer_started = t;
+		if (gp_exp) {
+			b_rcu_perf_writer_started =
+				cur_ops->exp_completed() / 2;
+		} else {
+			b_rcu_perf_writer_started =
+				cur_ops->completed();
+		}
+	}
+
+	do {
+		wdp = &wdpp[i];
+		*wdp = ktime_get_mono_fast_ns();
+		if (gp_exp) {
+			rcu_perf_writer_state = RTWS_EXP_SYNC;
+			cur_ops->exp_sync();
+		} else {
+			rcu_perf_writer_state = RTWS_SYNC;
+			cur_ops->sync();
+		}
+		rcu_perf_writer_state = RTWS_IDLE;
+		t = ktime_get_mono_fast_ns();
+		*wdp = t - *wdp;
+		i_max = i;
+		if (!started &&
+		    atomic_read(&n_rcu_perf_writer_started) >= nrealwriters)
+			started = true;
+		if (!done && i >= MIN_MEAS) {
+			done = true;
+			sp.sched_priority = 0;
+			sched_setscheduler_nocheck(current,
+						   SCHED_NORMAL, &sp);
+			pr_alert("%s" PERF_FLAG
+				 "rcu_perf_writer %ld has %d measurements\n",
+				 perf_type, me, MIN_MEAS);
+			if (atomic_inc_return(&n_rcu_perf_writer_finished) >=
+			    nrealwriters) {
+				schedule_timeout_interruptible(10);
+				rcu_ftrace_dump(DUMP_ALL);
+				PERFOUT_STRING("Test complete");
+				t_rcu_perf_writer_finished = t;
+				if (gp_exp) {
+					b_rcu_perf_writer_finished =
+						cur_ops->exp_completed() / 2;
+				} else {
+					b_rcu_perf_writer_finished =
+						cur_ops->completed();
+				}
+				if (shutdown) {
+					smp_mb(); /* Assign before wake. */
+					wake_up(&shutdown_wq);
+				}
+			}
+		}
+		if (done && !alldone &&
+		    atomic_read(&n_rcu_perf_writer_finished) >= nrealwriters)
+			alldone = true;
+		if (started && !alldone && i < MAX_MEAS - 1)
+			i++;
+		rcu_perf_wait_shutdown();
+	} while (!torture_must_stop());
+	rcu_perf_writer_state = RTWS_STOPPING;
+	writer_n_durations[me] = i_max;
+	torture_kthread_stopping("rcu_perf_writer");
+	return 0;
+}
+
+static inline void
+rcu_perf_print_module_parms(struct rcu_perf_ops *cur_ops, const char *tag)
+{
+	pr_alert("%s" PERF_FLAG
+		 "--- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d\n",
+		 perf_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
+}
+
+static void
+rcu_perf_cleanup(void)
+{
+	int i;
+	int j;
+	int ngps = 0;
+	u64 *wdp;
+	u64 *wdpp;
+
+	if (torture_cleanup_begin())
+		return;
+
+	if (reader_tasks) {
+		for (i = 0; i < nrealreaders; i++)
+			torture_stop_kthread(rcu_perf_reader,
+					     reader_tasks[i]);
+		kfree(reader_tasks);
+	}
+
+	if (writer_tasks) {
+		for (i = 0; i < nrealwriters; i++) {
+			torture_stop_kthread(rcu_perf_writer,
+					     writer_tasks[i]);
+			if (!writer_n_durations)
+				continue;
+			j = writer_n_durations[i];
+			pr_alert("%s%s writer %d gps: %d\n",
+				 perf_type, PERF_FLAG, i, j);
+			ngps += j;
+		}
+		pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
+			 perf_type, PERF_FLAG,
+			 t_rcu_perf_writer_started, t_rcu_perf_writer_finished,
+			 t_rcu_perf_writer_finished -
+			 t_rcu_perf_writer_started,
+			 ngps,
+			 b_rcu_perf_writer_finished -
+			 b_rcu_perf_writer_started);
+		for (i = 0; i < nrealwriters; i++) {
+			if (!writer_durations)
+				break;
+			if (!writer_n_durations)
+				continue;
+			wdpp = writer_durations[i];
+			if (!wdpp)
+				continue;
+			for (j = 0; j <= writer_n_durations[i]; j++) {
+				wdp = &wdpp[j];
+				pr_alert("%s%s %4d writer-duration: %5d %llu\n",
+					perf_type, PERF_FLAG,
+					i, j, *wdp);
+				if (j % 100 == 0)
+					schedule_timeout_uninterruptible(1);
+			}
+			kfree(writer_durations[i]);
+		}
+		kfree(writer_tasks);
+		kfree(writer_durations);
+		kfree(writer_n_durations);
+	}
+
+	/* Do flavor-specific cleanup operations.  */
+	if (cur_ops->cleanup != NULL)
+		cur_ops->cleanup();
+
+	torture_cleanup_end();
+}
+
+/*
+ * Return the number if non-negative.  If -1, the number of CPUs.
+ * If less than -1, that much less than the number of CPUs, but
+ * at least one.
+ */
+static int compute_real(int n)
+{
+	int nr;
+
+	if (n >= 0) {
+		nr = n;
+	} else {
+		nr = num_online_cpus() + 1 + n;
+		if (nr <= 0)
+			nr = 1;
+	}
+	return nr;
+}
+
+/*
+ * RCU perf shutdown kthread.  Just waits to be awakened, then shuts
+ * down system.
+ */
+static int
+rcu_perf_shutdown(void *arg)
+{
+	do {
+		wait_event(shutdown_wq,
+			   atomic_read(&n_rcu_perf_writer_finished) >=
+			   nrealwriters);
+	} while (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters);
+	smp_mb(); /* Wake before output. */
+	rcu_perf_cleanup();
+	kernel_power_off();
+	return -EINVAL;
+}
+
+static int __init
+rcu_perf_init(void)
+{
+	long i;
+	int firsterr = 0;
+	static struct rcu_perf_ops *perf_ops[] = {
+		&rcu_ops, &rcu_bh_ops, &srcu_ops, &sched_ops,
+		RCUPERF_TASKS_OPS
+	};
+
+	if (!torture_init_begin(perf_type, verbose, &perf_runnable))
+		return -EBUSY;
+
+	/* Process args and tell the world that the perf'er is on the job. */
+	for (i = 0; i < ARRAY_SIZE(perf_ops); i++) {
+		cur_ops = perf_ops[i];
+		if (strcmp(perf_type, cur_ops->name) == 0)
+			break;
+	}
+	if (i == ARRAY_SIZE(perf_ops)) {
+		pr_alert("rcu-perf: invalid perf type: \"%s\"\n",
+			 perf_type);
+		pr_alert("rcu-perf types:");
+		for (i = 0; i < ARRAY_SIZE(perf_ops); i++)
+			pr_alert(" %s", perf_ops[i]->name);
+		pr_alert("\n");
+		firsterr = -EINVAL;
+		goto unwind;
+	}
+	if (cur_ops->init)
+		cur_ops->init();
+
+	nrealwriters = compute_real(nwriters);
+	nrealreaders = compute_real(nreaders);
+	atomic_set(&n_rcu_perf_reader_started, 0);
+	atomic_set(&n_rcu_perf_writer_started, 0);
+	atomic_set(&n_rcu_perf_writer_finished, 0);
+	rcu_perf_print_module_parms(cur_ops, "Start of test");
+
+	/* Start up the kthreads. */
+
+	if (shutdown) {
+		init_waitqueue_head(&shutdown_wq);
+		firsterr = torture_create_kthread(rcu_perf_shutdown, NULL,
+						  shutdown_task);
+		if (firsterr)
+			goto unwind;
+		schedule_timeout_uninterruptible(1);
+	}
+	reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
+			       GFP_KERNEL);
+	if (reader_tasks == NULL) {
+		VERBOSE_PERFOUT_ERRSTRING("out of memory");
+		firsterr = -ENOMEM;
+		goto unwind;
+	}
+	for (i = 0; i < nrealreaders; i++) {
+		firsterr = torture_create_kthread(rcu_perf_reader, (void *)i,
+						  reader_tasks[i]);
+		if (firsterr)
+			goto unwind;
+	}
+	while (atomic_read(&n_rcu_perf_reader_started) < nrealreaders)
+		schedule_timeout_uninterruptible(1);
+	writer_tasks = kcalloc(nrealwriters, sizeof(reader_tasks[0]),
+			       GFP_KERNEL);
+	writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations),
+				   GFP_KERNEL);
+	writer_n_durations =
+		kcalloc(nrealwriters, sizeof(*writer_n_durations),
+			GFP_KERNEL);
+	if (!writer_tasks || !writer_durations || !writer_n_durations) {
+		VERBOSE_PERFOUT_ERRSTRING("out of memory");
+		firsterr = -ENOMEM;
+		goto unwind;
+	}
+	for (i = 0; i < nrealwriters; i++) {
+		writer_durations[i] =
+			kcalloc(MAX_MEAS, sizeof(*writer_durations[i]),
+				GFP_KERNEL);
+		if (!writer_durations[i])
+			goto unwind;
+		firsterr = torture_create_kthread(rcu_perf_writer, (void *)i,
+						  writer_tasks[i]);
+		if (firsterr)
+			goto unwind;
+	}
+	torture_init_end();
+	return 0;
+
+unwind:
+	torture_init_end();
+	rcu_perf_cleanup();
+	return firsterr;
+}
+
+module_init(rcu_perf_init);
+module_exit(rcu_perf_cleanup);
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 250ea67..084a28a 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -130,8 +130,8 @@
 static unsigned long rcu_torture_current_version;
 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
 static DEFINE_SPINLOCK(rcu_torture_lock);
-static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) = { 0 };
-static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) = { 0 };
+static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
+static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
 static atomic_t n_rcu_torture_alloc;
 static atomic_t n_rcu_torture_alloc_fail;
@@ -916,7 +916,7 @@
 static int
 rcu_torture_writer(void *arg)
 {
-	bool can_expedite = !rcu_gp_is_expedited();
+	bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
 	int expediting = 0;
 	unsigned long gp_snap;
 	bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
@@ -932,7 +932,7 @@
 	VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
 	if (!can_expedite) {
 		pr_alert("%s" TORTURE_FLAG
-			 " Grace periods expedited from boot/sysfs for %s,\n",
+			 " GP expediting controlled from boot/sysfs for %s,\n",
 			 torture_type, cur_ops->name);
 		pr_alert("%s" TORTURE_FLAG
 			 " Disabled dynamic grace-period expediting.\n",
@@ -1082,17 +1082,6 @@
 	return 0;
 }
 
-static void rcutorture_trace_dump(void)
-{
-	static atomic_t beenhere = ATOMIC_INIT(0);
-
-	if (atomic_read(&beenhere))
-		return;
-	if (atomic_xchg(&beenhere, 1) != 0)
-		return;
-	ftrace_dump(DUMP_ALL);
-}
-
 /*
  * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
  * incrementing the corresponding element of the pipeline array.  The
@@ -1142,7 +1131,7 @@
 	if (pipe_count > 1) {
 		do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts,
 					  started, completed);
-		rcutorture_trace_dump();
+		rcu_ftrace_dump(DUMP_ALL);
 	}
 	__this_cpu_inc(rcu_torture_count[pipe_count]);
 	completed = completed - started;
@@ -1215,7 +1204,7 @@
 		if (pipe_count > 1) {
 			do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
 						  ts, started, completed);
-			rcutorture_trace_dump();
+			rcu_ftrace_dump(DUMP_ALL);
 		}
 		__this_cpu_inc(rcu_torture_count[pipe_count]);
 		completed = completed - started;
@@ -1333,7 +1322,7 @@
 			 rcu_torture_writer_state,
 			 gpnum, completed, flags);
 		show_rcu_gp_kthreads();
-		rcutorture_trace_dump();
+		rcu_ftrace_dump(DUMP_ALL);
 	}
 	rtcv_snap = rcu_torture_current_version;
 }
@@ -1489,7 +1478,9 @@
 		 * The above smp_load_acquire() ensures barrier_phase load
 		 * is ordered before the folloiwng ->call().
 		 */
+		local_irq_disable(); /* Just to test no-irq call_rcu(). */
 		cur_ops->call(&rcu, rcu_torture_barrier_cbf);
+		local_irq_enable();
 		if (atomic_dec_and_test(&barrier_cbs_count))
 			wake_up(&barrier_wq);
 	} while (!torture_must_stop());
@@ -1596,7 +1587,7 @@
 {
 	long cpu = (long)hcpu;
 
-	switch (action) {
+	switch (action & ~CPU_TASKS_FROZEN) {
 	case CPU_ONLINE:
 	case CPU_DOWN_FAILED:
 		(void)rcutorture_booster_init(cpu);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 9a535a8..c7f1bc4 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -102,6 +102,8 @@
 	.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
 	.name = RCU_STATE_NAME(sname), \
 	.abbr = sabbr, \
+	.exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \
+	.exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \
 }
 
 RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
@@ -370,6 +372,21 @@
 		rcu_momentary_dyntick_idle();
 		local_irq_restore(flags);
 	}
+	if (unlikely(raw_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))) {
+		/*
+		 * Yes, we just checked a per-CPU variable with preemption
+		 * enabled, so we might be migrated to some other CPU at
+		 * this point.  That is OK because in that case, the
+		 * migration will supply the needed quiescent state.
+		 * We might end up needlessly disabling preemption and
+		 * invoking rcu_sched_qs() on the destination CPU, but
+		 * the probability and cost are both quite low, so this
+		 * should not be a problem in practice.
+		 */
+		preempt_disable();
+		rcu_sched_qs();
+		preempt_enable();
+	}
 	this_cpu_inc(rcu_qs_ctr);
 	barrier(); /* Avoid RCU read-side critical sections leaking up. */
 }
@@ -385,9 +402,11 @@
 
 static ulong jiffies_till_first_fqs = ULONG_MAX;
 static ulong jiffies_till_next_fqs = ULONG_MAX;
+static bool rcu_kick_kthreads;
 
 module_param(jiffies_till_first_fqs, ulong, 0644);
 module_param(jiffies_till_next_fqs, ulong, 0644);
+module_param(rcu_kick_kthreads, bool, 0644);
 
 /*
  * How long the grace period must be before we start recruiting
@@ -460,6 +479,28 @@
 EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
 
 /*
+ * Return the number of RCU expedited batches completed thus far for
+ * debug & stats.  Odd numbers mean that a batch is in progress, even
+ * numbers mean idle.  The value returned will thus be roughly double
+ * the cumulative batches since boot.
+ */
+unsigned long rcu_exp_batches_completed(void)
+{
+	return rcu_state_p->expedited_sequence;
+}
+EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
+
+/*
+ * Return the number of RCU-sched expedited batches completed thus far
+ * for debug & stats.  Similar to rcu_exp_batches_completed().
+ */
+unsigned long rcu_exp_batches_completed_sched(void)
+{
+	return rcu_sched_state.expedited_sequence;
+}
+EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
+
+/*
  * Force a quiescent state.
  */
 void rcu_force_quiescent_state(void)
@@ -637,7 +678,7 @@
 			idle_task(smp_processor_id());
 
 		trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0);
-		ftrace_dump(DUMP_ORIG);
+		rcu_ftrace_dump(DUMP_ORIG);
 		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
 			  current->pid, current->comm,
 			  idle->pid, idle->comm); /* must be idle task! */
@@ -799,7 +840,7 @@
 
 		trace_rcu_dyntick(TPS("Error on exit: not idle task"),
 				  oldval, rdtp->dynticks_nesting);
-		ftrace_dump(DUMP_ORIG);
+		rcu_ftrace_dump(DUMP_ORIG);
 		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
 			  current->pid, current->comm,
 			  idle->pid, idle->comm); /* must be idle task! */
@@ -1224,8 +1265,10 @@
 		       rsp->gp_flags,
 		       gp_state_getname(rsp->gp_state), rsp->gp_state,
 		       rsp->gp_kthread ? rsp->gp_kthread->state : ~0);
-		if (rsp->gp_kthread)
+		if (rsp->gp_kthread) {
 			sched_show_task(rsp->gp_kthread);
+			wake_up_process(rsp->gp_kthread);
+		}
 	}
 }
 
@@ -1249,6 +1292,25 @@
 	}
 }
 
+/*
+ * If too much time has passed in the current grace period, and if
+ * so configured, go kick the relevant kthreads.
+ */
+static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
+{
+	unsigned long j;
+
+	if (!rcu_kick_kthreads)
+		return;
+	j = READ_ONCE(rsp->jiffies_kick_kthreads);
+	if (time_after(jiffies, j) && rsp->gp_kthread) {
+		WARN_ONCE(1, "Kicking %s grace-period kthread\n", rsp->name);
+		rcu_ftrace_dump(DUMP_ALL);
+		wake_up_process(rsp->gp_kthread);
+		WRITE_ONCE(rsp->jiffies_kick_kthreads, j + HZ);
+	}
+}
+
 static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
 {
 	int cpu;
@@ -1260,6 +1322,11 @@
 	struct rcu_node *rnp = rcu_get_root(rsp);
 	long totqlen = 0;
 
+	/* Kick and suppress, if so configured. */
+	rcu_stall_kick_kthreads(rsp);
+	if (rcu_cpu_stall_suppress)
+		return;
+
 	/* Only let one CPU complain about others per time interval. */
 
 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
@@ -1333,6 +1400,11 @@
 	struct rcu_node *rnp = rcu_get_root(rsp);
 	long totqlen = 0;
 
+	/* Kick and suppress, if so configured. */
+	rcu_stall_kick_kthreads(rsp);
+	if (rcu_cpu_stall_suppress)
+		return;
+
 	/*
 	 * OK, time to rat on ourselves...
 	 * See Documentation/RCU/stallwarn.txt for info on how to debug
@@ -1377,8 +1449,10 @@
 	unsigned long js;
 	struct rcu_node *rnp;
 
-	if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp))
+	if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
+	    !rcu_gp_in_progress(rsp))
 		return;
+	rcu_stall_kick_kthreads(rsp);
 	j = jiffies;
 
 	/*
@@ -2117,8 +2191,11 @@
 		}
 		ret = 0;
 		for (;;) {
-			if (!ret)
+			if (!ret) {
 				rsp->jiffies_force_qs = jiffies + j;
+				WRITE_ONCE(rsp->jiffies_kick_kthreads,
+					   jiffies + 3 * j);
+			}
 			trace_rcu_grace_period(rsp->name,
 					       READ_ONCE(rsp->gpnum),
 					       TPS("fqswait"));
@@ -2144,6 +2221,15 @@
 						       TPS("fqsend"));
 				cond_resched_rcu_qs();
 				WRITE_ONCE(rsp->gp_activity, jiffies);
+				ret = 0; /* Force full wait till next FQS. */
+				j = jiffies_till_next_fqs;
+				if (j > HZ) {
+					j = HZ;
+					jiffies_till_next_fqs = HZ;
+				} else if (j < 1) {
+					j = 1;
+					jiffies_till_next_fqs = 1;
+				}
 			} else {
 				/* Deal with stray signal. */
 				cond_resched_rcu_qs();
@@ -2152,14 +2238,12 @@
 				trace_rcu_grace_period(rsp->name,
 						       READ_ONCE(rsp->gpnum),
 						       TPS("fqswaitsig"));
-			}
-			j = jiffies_till_next_fqs;
-			if (j > HZ) {
-				j = HZ;
-				jiffies_till_next_fqs = HZ;
-			} else if (j < 1) {
-				j = 1;
-				jiffies_till_next_fqs = 1;
+				ret = 1; /* Keep old FQS timing. */
+				j = jiffies;
+				if (time_after(jiffies, rsp->jiffies_force_qs))
+					j = 1;
+				else
+					j = rsp->jiffies_force_qs - j;
 			}
 		}
 
@@ -3376,8 +3460,12 @@
 }
 static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp)
 {
+	unsigned long s;
+
 	smp_mb(); /* Caller's modifications seen first by other CPUs. */
-	return rcu_seq_snap(&rsp->expedited_sequence);
+	s = rcu_seq_snap(&rsp->expedited_sequence);
+	trace_rcu_exp_grace_period(rsp->name, s, TPS("snap"));
+	return s;
 }
 static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
 {
@@ -3469,7 +3557,7 @@
  * for the current expedited grace period.  Works only for preemptible
  * RCU -- other RCU implementation use other means.
  *
- * Caller must hold the root rcu_node's exp_funnel_mutex.
+ * Caller must hold the rcu_state's exp_mutex.
  */
 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
 {
@@ -3485,8 +3573,8 @@
  * recursively up the tree.  (Calm down, calm down, we do the recursion
  * iteratively!)
  *
- * Caller must hold the root rcu_node's exp_funnel_mutex and the
- * specified rcu_node structure's ->lock.
+ * Caller must hold the rcu_state's exp_mutex and the specified rcu_node
+ * structure's ->lock.
  */
 static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
 				 bool wake, unsigned long flags)
@@ -3523,7 +3611,7 @@
  * Report expedited quiescent state for specified node.  This is a
  * lock-acquisition wrapper function for __rcu_report_exp_rnp().
  *
- * Caller must hold the root rcu_node's exp_funnel_mutex.
+ * Caller must hold the rcu_state's exp_mutex.
  */
 static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp,
 					      struct rcu_node *rnp, bool wake)
@@ -3536,8 +3624,8 @@
 
 /*
  * Report expedited quiescent state for multiple CPUs, all covered by the
- * specified leaf rcu_node structure.  Caller must hold the root
- * rcu_node's exp_funnel_mutex.
+ * specified leaf rcu_node structure.  Caller must hold the rcu_state's
+ * exp_mutex.
  */
 static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
 				    unsigned long mask, bool wake)
@@ -3555,7 +3643,6 @@
 
 /*
  * Report expedited quiescent state for specified rcu_data (CPU).
- * Caller must hold the root rcu_node's exp_funnel_mutex.
  */
 static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp,
 			       bool wake)
@@ -3564,15 +3651,11 @@
 }
 
 /* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */
-static bool sync_exp_work_done(struct rcu_state *rsp, struct rcu_node *rnp,
-			       struct rcu_data *rdp,
-			       atomic_long_t *stat, unsigned long s)
+static bool sync_exp_work_done(struct rcu_state *rsp, atomic_long_t *stat,
+			       unsigned long s)
 {
 	if (rcu_exp_gp_seq_done(rsp, s)) {
-		if (rnp)
-			mutex_unlock(&rnp->exp_funnel_mutex);
-		else if (rdp)
-			mutex_unlock(&rdp->exp_funnel_mutex);
+		trace_rcu_exp_grace_period(rsp->name, s, TPS("done"));
 		/* Ensure test happens before caller kfree(). */
 		smp_mb__before_atomic(); /* ^^^ */
 		atomic_long_inc(stat);
@@ -3582,59 +3665,65 @@
 }
 
 /*
- * Funnel-lock acquisition for expedited grace periods.  Returns a
- * pointer to the root rcu_node structure, or NULL if some other
- * task did the expedited grace period for us.
+ * Funnel-lock acquisition for expedited grace periods.  Returns true
+ * if some other task completed an expedited grace period that this task
+ * can piggy-back on, and with no mutex held.  Otherwise, returns false
+ * with the mutex held, indicating that the caller must actually do the
+ * expedited grace period.
  */
-static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
+static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
 {
 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
-	struct rcu_node *rnp0;
-	struct rcu_node *rnp1 = NULL;
+	struct rcu_node *rnp = rdp->mynode;
+	struct rcu_node *rnp_root = rcu_get_root(rsp);
+
+	/* Low-contention fastpath. */
+	if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
+	    (rnp == rnp_root ||
+	     ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
+	    !mutex_is_locked(&rsp->exp_mutex) &&
+	    mutex_trylock(&rsp->exp_mutex))
+		goto fastpath;
 
 	/*
-	 * First try directly acquiring the root lock in order to reduce
-	 * latency in the common case where expedited grace periods are
-	 * rare.  We check mutex_is_locked() to avoid pathological levels of
-	 * memory contention on ->exp_funnel_mutex in the heavy-load case.
+	 * Each pass through the following loop works its way up
+	 * the rcu_node tree, returning if others have done the work or
+	 * otherwise falls through to acquire rsp->exp_mutex.  The mapping
+	 * from CPU to rcu_node structure can be inexact, as it is just
+	 * promoting locality and is not strictly needed for correctness.
 	 */
-	rnp0 = rcu_get_root(rsp);
-	if (!mutex_is_locked(&rnp0->exp_funnel_mutex)) {
-		if (mutex_trylock(&rnp0->exp_funnel_mutex)) {
-			if (sync_exp_work_done(rsp, rnp0, NULL,
-					       &rdp->expedited_workdone0, s))
-				return NULL;
-			return rnp0;
+	for (; rnp != NULL; rnp = rnp->parent) {
+		if (sync_exp_work_done(rsp, &rdp->exp_workdone1, s))
+			return true;
+
+		/* Work not done, either wait here or go up. */
+		spin_lock(&rnp->exp_lock);
+		if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
+
+			/* Someone else doing GP, so wait for them. */
+			spin_unlock(&rnp->exp_lock);
+			trace_rcu_exp_funnel_lock(rsp->name, rnp->level,
+						  rnp->grplo, rnp->grphi,
+						  TPS("wait"));
+			wait_event(rnp->exp_wq[(s >> 1) & 0x3],
+				   sync_exp_work_done(rsp,
+						      &rdp->exp_workdone2, s));
+			return true;
 		}
+		rnp->exp_seq_rq = s; /* Followers can wait on us. */
+		spin_unlock(&rnp->exp_lock);
+		trace_rcu_exp_funnel_lock(rsp->name, rnp->level, rnp->grplo,
+					  rnp->grphi, TPS("nxtlvl"));
 	}
-
-	/*
-	 * Each pass through the following loop works its way
-	 * up the rcu_node tree, returning if others have done the
-	 * work or otherwise falls through holding the root rnp's
-	 * ->exp_funnel_mutex.  The mapping from CPU to rcu_node structure
-	 * can be inexact, as it is just promoting locality and is not
-	 * strictly needed for correctness.
-	 */
-	if (sync_exp_work_done(rsp, NULL, NULL, &rdp->expedited_workdone1, s))
-		return NULL;
-	mutex_lock(&rdp->exp_funnel_mutex);
-	rnp0 = rdp->mynode;
-	for (; rnp0 != NULL; rnp0 = rnp0->parent) {
-		if (sync_exp_work_done(rsp, rnp1, rdp,
-				       &rdp->expedited_workdone2, s))
-			return NULL;
-		mutex_lock(&rnp0->exp_funnel_mutex);
-		if (rnp1)
-			mutex_unlock(&rnp1->exp_funnel_mutex);
-		else
-			mutex_unlock(&rdp->exp_funnel_mutex);
-		rnp1 = rnp0;
+	mutex_lock(&rsp->exp_mutex);
+fastpath:
+	if (sync_exp_work_done(rsp, &rdp->exp_workdone3, s)) {
+		mutex_unlock(&rsp->exp_mutex);
+		return true;
 	}
-	if (sync_exp_work_done(rsp, rnp1, rdp,
-			       &rdp->expedited_workdone3, s))
-		return NULL;
-	return rnp1;
+	rcu_exp_gp_seq_start(rsp);
+	trace_rcu_exp_grace_period(rsp->name, s, TPS("start"));
+	return false;
 }
 
 /* Invoked on each online non-idle CPU for expedited quiescent state. */
@@ -3649,6 +3738,11 @@
 	if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
 	    __this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
 		return;
+	if (rcu_is_cpu_rrupt_from_idle()) {
+		rcu_report_exp_rdp(&rcu_sched_state,
+				   this_cpu_ptr(&rcu_sched_data), true);
+		return;
+	}
 	__this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
 	resched_cpu(smp_processor_id());
 }
@@ -3773,7 +3867,7 @@
 		       rsp->name);
 		ndetected = 0;
 		rcu_for_each_leaf_node(rsp, rnp) {
-			ndetected = rcu_print_task_exp_stall(rnp);
+			ndetected += rcu_print_task_exp_stall(rnp);
 			mask = 1;
 			for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
 				struct rcu_data *rdp;
@@ -3783,7 +3877,7 @@
 				ndetected++;
 				rdp = per_cpu_ptr(rsp->rda, cpu);
 				pr_cont(" %d-%c%c%c", cpu,
-					"O."[cpu_online(cpu)],
+					"O."[!!cpu_online(cpu)],
 					"o."[!!(rdp->grpmask & rnp->expmaskinit)],
 					"N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
 			}
@@ -3792,7 +3886,7 @@
 		pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
 			jiffies - jiffies_start, rsp->expedited_sequence,
 			rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
-		if (!ndetected) {
+		if (ndetected) {
 			pr_err("blocking rcu_node structures:");
 			rcu_for_each_node_breadth_first(rsp, rnp) {
 				if (rnp == rnp_root)
@@ -3818,6 +3912,41 @@
 	}
 }
 
+/*
+ * Wait for the current expedited grace period to complete, and then
+ * wake up everyone who piggybacked on the just-completed expedited
+ * grace period.  Also update all the ->exp_seq_rq counters as needed
+ * in order to avoid counter-wrap problems.
+ */
+static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
+{
+	struct rcu_node *rnp;
+
+	synchronize_sched_expedited_wait(rsp);
+	rcu_exp_gp_seq_end(rsp);
+	trace_rcu_exp_grace_period(rsp->name, s, TPS("end"));
+
+	/*
+	 * Switch over to wakeup mode, allowing the next GP, but -only- the
+	 * next GP, to proceed.
+	 */
+	mutex_lock(&rsp->exp_wake_mutex);
+	mutex_unlock(&rsp->exp_mutex);
+
+	rcu_for_each_node_breadth_first(rsp, rnp) {
+		if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
+			spin_lock(&rnp->exp_lock);
+			/* Recheck, avoid hang in case someone just arrived. */
+			if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
+				rnp->exp_seq_rq = s;
+			spin_unlock(&rnp->exp_lock);
+		}
+		wake_up_all(&rnp->exp_wq[(rsp->expedited_sequence >> 1) & 0x3]);
+	}
+	trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake"));
+	mutex_unlock(&rsp->exp_wake_mutex);
+}
+
 /**
  * synchronize_sched_expedited - Brute-force RCU-sched grace period
  *
@@ -3837,7 +3966,6 @@
 void synchronize_sched_expedited(void)
 {
 	unsigned long s;
-	struct rcu_node *rnp;
 	struct rcu_state *rsp = &rcu_sched_state;
 
 	/* If only one CPU, this is automatically a grace period. */
@@ -3852,17 +3980,14 @@
 
 	/* Take a snapshot of the sequence number.  */
 	s = rcu_exp_gp_seq_snap(rsp);
-
-	rnp = exp_funnel_lock(rsp, s);
-	if (rnp == NULL)
+	if (exp_funnel_lock(rsp, s))
 		return;  /* Someone else did our work for us. */
 
-	rcu_exp_gp_seq_start(rsp);
+	/* Initialize the rcu_node tree in preparation for the wait. */
 	sync_rcu_exp_select_cpus(rsp, sync_sched_exp_handler);
-	synchronize_sched_expedited_wait(rsp);
 
-	rcu_exp_gp_seq_end(rsp);
-	mutex_unlock(&rnp->exp_funnel_mutex);
+	/* Wait and clean up, including waking everyone. */
+	rcu_exp_wait_wake(rsp, s);
 }
 EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
 
@@ -4162,7 +4287,6 @@
 	WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
 	rdp->cpu = cpu;
 	rdp->rsp = rsp;
-	mutex_init(&rdp->exp_funnel_mutex);
 	rcu_boot_init_nocb_percpu_data(rdp);
 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 }
@@ -4420,10 +4544,8 @@
 {
 	static const char * const buf[] = RCU_NODE_NAME_INIT;
 	static const char * const fqs[] = RCU_FQS_NAME_INIT;
-	static const char * const exp[] = RCU_EXP_NAME_INIT;
 	static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
 	static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
-	static struct lock_class_key rcu_exp_class[RCU_NUM_LVLS];
 	static u8 fl_mask = 0x1;
 
 	int levelcnt[RCU_NUM_LVLS];		/* # nodes in each level. */
@@ -4482,9 +4604,11 @@
 			rnp->level = i;
 			INIT_LIST_HEAD(&rnp->blkd_tasks);
 			rcu_init_one_nocb(rnp);
-			mutex_init(&rnp->exp_funnel_mutex);
-			lockdep_set_class_and_name(&rnp->exp_funnel_mutex,
-						   &rcu_exp_class[i], exp[i]);
+			init_waitqueue_head(&rnp->exp_wq[0]);
+			init_waitqueue_head(&rnp->exp_wq[1]);
+			init_waitqueue_head(&rnp->exp_wq[2]);
+			init_waitqueue_head(&rnp->exp_wq[3]);
+			spin_lock_init(&rnp->exp_lock);
 		}
 	}
 
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index df668c0..e3959f5 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -70,7 +70,6 @@
 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0 }
 #  define RCU_NODE_NAME_INIT  { "rcu_node_0" }
 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0" }
-#  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0" }
 #elif NR_CPUS <= RCU_FANOUT_2
 #  define RCU_NUM_LVLS	      2
 #  define NUM_RCU_LVL_0	      1
@@ -79,7 +78,6 @@
 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1 }
 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1" }
 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1" }
-#  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1" }
 #elif NR_CPUS <= RCU_FANOUT_3
 #  define RCU_NUM_LVLS	      3
 #  define NUM_RCU_LVL_0	      1
@@ -89,7 +87,6 @@
 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 }
 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1", "rcu_node_2" }
 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" }
-#  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2" }
 #elif NR_CPUS <= RCU_FANOUT_4
 #  define RCU_NUM_LVLS	      4
 #  define NUM_RCU_LVL_0	      1
@@ -100,7 +97,6 @@
 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 }
 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" }
 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" }
-#  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2", "rcu_node_exp_3" }
 #else
 # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
 #endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */
@@ -252,7 +248,9 @@
 				/* Counts of upcoming no-CB GP requests. */
 	raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
 
-	struct mutex exp_funnel_mutex ____cacheline_internodealigned_in_smp;
+	spinlock_t exp_lock ____cacheline_internodealigned_in_smp;
+	unsigned long exp_seq_rq;
+	wait_queue_head_t exp_wq[4];
 } ____cacheline_internodealigned_in_smp;
 
 /*
@@ -387,11 +385,9 @@
 #ifdef CONFIG_RCU_FAST_NO_HZ
 	struct rcu_head oom_head;
 #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
-	struct mutex exp_funnel_mutex;
-	atomic_long_t expedited_workdone0;	/* # done by others #0. */
-	atomic_long_t expedited_workdone1;	/* # done by others #1. */
-	atomic_long_t expedited_workdone2;	/* # done by others #2. */
-	atomic_long_t expedited_workdone3;	/* # done by others #3. */
+	atomic_long_t exp_workdone1;	/* # done by others #1. */
+	atomic_long_t exp_workdone2;	/* # done by others #2. */
+	atomic_long_t exp_workdone3;	/* # done by others #3. */
 
 	/* 7) Callback offloading. */
 #ifdef CONFIG_RCU_NOCB_CPU
@@ -505,6 +501,8 @@
 						/*  _rcu_barrier(). */
 	/* End of fields guarded by barrier_mutex. */
 
+	struct mutex exp_mutex;			/* Serialize expedited GP. */
+	struct mutex exp_wake_mutex;		/* Serialize wakeup. */
 	unsigned long expedited_sequence;	/* Take a ticket. */
 	atomic_long_t expedited_normal;		/* # fallbacks to normal. */
 	atomic_t expedited_need_qs;		/* # CPUs left to check in. */
@@ -513,6 +511,8 @@
 
 	unsigned long jiffies_force_qs;		/* Time at which to invoke */
 						/*  force_quiescent_state(). */
+	unsigned long jiffies_kick_kthreads;	/* Time at which to kick */
+						/*  kthreads, if configured. */
 	unsigned long n_force_qs;		/* Number of calls to */
 						/*  force_quiescent_state(). */
 	unsigned long n_force_qs_lh;		/* ~Number of calls leaving */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index efdf7b6..ff1cd4e 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -722,18 +722,22 @@
  * synchronize_rcu_expedited - Brute-force RCU grace period
  *
  * Wait for an RCU-preempt grace period, but expedite it.  The basic
- * idea is to invoke synchronize_sched_expedited() to push all the tasks to
- * the ->blkd_tasks lists and wait for this list to drain.  This consumes
- * significant time on all CPUs and is unfriendly to real-time workloads,
- * so is thus not recommended for any sort of common-case code.
- * In fact, if you are using synchronize_rcu_expedited() in a loop,
- * please restructure your code to batch your updates, and then Use a
- * single synchronize_rcu() instead.
+ * idea is to IPI all non-idle non-nohz online CPUs.  The IPI handler
+ * checks whether the CPU is in an RCU-preempt critical section, and
+ * if so, it sets a flag that causes the outermost rcu_read_unlock()
+ * to report the quiescent state.  On the other hand, if the CPU is
+ * not in an RCU read-side critical section, the IPI handler reports
+ * the quiescent state immediately.
+ *
+ * Although this is a greate improvement over previous expedited
+ * implementations, it is still unfriendly to real-time workloads, so is
+ * thus not recommended for any sort of common-case code.  In fact, if
+ * you are using synchronize_rcu_expedited() in a loop, please restructure
+ * your code to batch your updates, and then Use a single synchronize_rcu()
+ * instead.
  */
 void synchronize_rcu_expedited(void)
 {
-	struct rcu_node *rnp;
-	struct rcu_node *rnp_unlock;
 	struct rcu_state *rsp = rcu_state_p;
 	unsigned long s;
 
@@ -744,23 +748,14 @@
 	}
 
 	s = rcu_exp_gp_seq_snap(rsp);
-
-	rnp_unlock = exp_funnel_lock(rsp, s);
-	if (rnp_unlock == NULL)
+	if (exp_funnel_lock(rsp, s))
 		return;  /* Someone else did our work for us. */
 
-	rcu_exp_gp_seq_start(rsp);
-
 	/* Initialize the rcu_node tree in preparation for the wait. */
 	sync_rcu_exp_select_cpus(rsp, sync_rcu_exp_handler);
 
-	/* Wait for snapshotted ->blkd_tasks lists to drain. */
-	rnp = rcu_get_root(rsp);
-	synchronize_sched_expedited_wait(rsp);
-
-	/* Clean up and exit. */
-	rcu_exp_gp_seq_end(rsp);
-	mutex_unlock(&rnp_unlock->exp_funnel_mutex);
+	/* Wait for ->blkd_tasks lists to drain, then wake everyone up. */
+	rcu_exp_wait_wake(rsp, s);
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
 
diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
index 1088e64..86782f9a 100644
--- a/kernel/rcu/tree_trace.c
+++ b/kernel/rcu/tree_trace.c
@@ -185,17 +185,16 @@
 	int cpu;
 	struct rcu_state *rsp = (struct rcu_state *)m->private;
 	struct rcu_data *rdp;
-	unsigned long s0 = 0, s1 = 0, s2 = 0, s3 = 0;
+	unsigned long s1 = 0, s2 = 0, s3 = 0;
 
 	for_each_possible_cpu(cpu) {
 		rdp = per_cpu_ptr(rsp->rda, cpu);
-		s0 += atomic_long_read(&rdp->expedited_workdone0);
-		s1 += atomic_long_read(&rdp->expedited_workdone1);
-		s2 += atomic_long_read(&rdp->expedited_workdone2);
-		s3 += atomic_long_read(&rdp->expedited_workdone3);
+		s1 += atomic_long_read(&rdp->exp_workdone1);
+		s2 += atomic_long_read(&rdp->exp_workdone2);
+		s3 += atomic_long_read(&rdp->exp_workdone3);
 	}
-	seq_printf(m, "s=%lu wd0=%lu wd1=%lu wd2=%lu wd3=%lu n=%lu enq=%d sc=%lu\n",
-		   rsp->expedited_sequence, s0, s1, s2, s3,
+	seq_printf(m, "s=%lu wd1=%lu wd2=%lu wd3=%lu n=%lu enq=%d sc=%lu\n",
+		   rsp->expedited_sequence, s1, s2, s3,
 		   atomic_long_read(&rsp->expedited_normal),
 		   atomic_read(&rsp->expedited_need_qs),
 		   rsp->expedited_sequence / 2);
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index ca828b4..3ccdc8e 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -67,7 +67,7 @@
 module_param(rcu_normal_after_boot, int, 0);
 #endif /* #ifndef CONFIG_TINY_RCU */
 
-#if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_PREEMPT_COUNT)
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
 /**
  * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
  *
@@ -111,7 +111,7 @@
 		return 0;
 	if (debug_locks)
 		lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
-	return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
+	return lockdep_opinion || !preemptible();
 }
 EXPORT_SYMBOL(rcu_read_lock_sched_held);
 #endif
diff --git a/kernel/resource.c b/kernel/resource.c
index 2e78ead..9b5f044 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -105,16 +105,25 @@
 {
 	struct resource *root = m->private;
 	struct resource *r = v, *p;
+	unsigned long long start, end;
 	int width = root->end < 0x10000 ? 4 : 8;
 	int depth;
 
 	for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
 		if (p->parent == root)
 			break;
+
+	if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) {
+		start = r->start;
+		end = r->end;
+	} else {
+		start = end = 0;
+	}
+
 	seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
 			depth * 2, "",
-			width, (unsigned long long) r->start,
-			width, (unsigned long long) r->end,
+			width, start,
+			width, end,
 			r->name ? r->name : "<BAD>");
 	return 0;
 }
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 414d9c1..5e59b83 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -24,3 +24,4 @@
 obj-$(CONFIG_SCHED_DEBUG) += debug.o
 obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
 obj-$(CONFIG_CPU_FREQ) += cpufreq.o
+obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index fedb967..e85a725 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -318,6 +318,7 @@
 
 	return clock;
 }
+EXPORT_SYMBOL_GPL(sched_clock_cpu);
 
 void sched_clock_tick(void)
 {
@@ -363,39 +364,6 @@
 }
 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
 
-/*
- * As outlined at the top, provides a fast, high resolution, nanosecond
- * time source that is monotonic per cpu argument and has bounded drift
- * between cpus.
- *
- * ######################### BIG FAT WARNING ##########################
- * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
- * # go backwards !!                                                  #
- * ####################################################################
- */
-u64 cpu_clock(int cpu)
-{
-	if (!sched_clock_stable())
-		return sched_clock_cpu(cpu);
-
-	return sched_clock();
-}
-
-/*
- * Similar to cpu_clock() for the current cpu. Time will only be observed
- * to be monotonic if care is taken to only compare timestampt taken on the
- * same CPU.
- *
- * See cpu_clock().
- */
-u64 local_clock(void)
-{
-	if (!sched_clock_stable())
-		return sched_clock_cpu(raw_smp_processor_id());
-
-	return sched_clock();
-}
-
 #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
 
 void sched_clock_init(void)
@@ -410,22 +378,8 @@
 
 	return sched_clock();
 }
-
-u64 cpu_clock(int cpu)
-{
-	return sched_clock();
-}
-
-u64 local_clock(void)
-{
-	return sched_clock();
-}
-
 #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
 
-EXPORT_SYMBOL_GPL(cpu_clock);
-EXPORT_SYMBOL_GPL(local_clock);
-
 /*
  * Running clock - returns the time that has elapsed while a guest has been
  * running.
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d8465ee..404c078 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -33,7 +33,7 @@
 #include <linux/init.h>
 #include <linux/uaccess.h>
 #include <linux/highmem.h>
-#include <asm/mmu_context.h>
+#include <linux/mmu_context.h>
 #include <linux/interrupt.h>
 #include <linux/capability.h>
 #include <linux/completion.h>
@@ -170,6 +170,71 @@
 	return rq;
 }
 
+/*
+ * __task_rq_lock - lock the rq @p resides on.
+ */
+struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
+	__acquires(rq->lock)
+{
+	struct rq *rq;
+
+	lockdep_assert_held(&p->pi_lock);
+
+	for (;;) {
+		rq = task_rq(p);
+		raw_spin_lock(&rq->lock);
+		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
+			rf->cookie = lockdep_pin_lock(&rq->lock);
+			return rq;
+		}
+		raw_spin_unlock(&rq->lock);
+
+		while (unlikely(task_on_rq_migrating(p)))
+			cpu_relax();
+	}
+}
+
+/*
+ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
+ */
+struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
+	__acquires(p->pi_lock)
+	__acquires(rq->lock)
+{
+	struct rq *rq;
+
+	for (;;) {
+		raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
+		rq = task_rq(p);
+		raw_spin_lock(&rq->lock);
+		/*
+		 *	move_queued_task()		task_rq_lock()
+		 *
+		 *	ACQUIRE (rq->lock)
+		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
+		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
+		 *	[S] ->cpu = new_cpu		[L] task_rq()
+		 *					[L] ->on_rq
+		 *	RELEASE (rq->lock)
+		 *
+		 * If we observe the old cpu in task_rq_lock, the acquire of
+		 * the old rq->lock will fully serialize against the stores.
+		 *
+		 * If we observe the new cpu in task_rq_lock, the acquire will
+		 * pair with the WMB to ensure we must then also see migrating.
+		 */
+		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
+			rf->cookie = lockdep_pin_lock(&rq->lock);
+			return rq;
+		}
+		raw_spin_unlock(&rq->lock);
+		raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
+
+		while (unlikely(task_on_rq_migrating(p)))
+			cpu_relax();
+	}
+}
+
 #ifdef CONFIG_SCHED_HRTICK
 /*
  * Use HR-timers to deliver accurate preemption points.
@@ -249,29 +314,6 @@
 	}
 }
 
-static int
-hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
-{
-	int cpu = (int)(long)hcpu;
-
-	switch (action) {
-	case CPU_UP_CANCELED:
-	case CPU_UP_CANCELED_FROZEN:
-	case CPU_DOWN_PREPARE:
-	case CPU_DOWN_PREPARE_FROZEN:
-	case CPU_DEAD:
-	case CPU_DEAD_FROZEN:
-		hrtick_clear(cpu_rq(cpu));
-		return NOTIFY_OK;
-	}
-
-	return NOTIFY_DONE;
-}
-
-static __init void init_hrtick(void)
-{
-	hotcpu_notifier(hotplug_hrtick, 0);
-}
 #else
 /*
  * Called to set the hrtick timer state.
@@ -288,10 +330,6 @@
 	hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
 		      HRTIMER_MODE_REL_PINNED);
 }
-
-static inline void init_hrtick(void)
-{
-}
 #endif /* CONFIG_SMP */
 
 static void init_rq_hrtick(struct rq *rq)
@@ -315,12 +353,26 @@
 static inline void init_rq_hrtick(struct rq *rq)
 {
 }
-
-static inline void init_hrtick(void)
-{
-}
 #endif	/* CONFIG_SCHED_HRTICK */
 
+/*
+ * cmpxchg based fetch_or, macro so it works for different integer types
+ */
+#define fetch_or(ptr, mask)						\
+	({								\
+		typeof(ptr) _ptr = (ptr);				\
+		typeof(mask) _mask = (mask);				\
+		typeof(*_ptr) _old, _val = *_ptr;			\
+									\
+		for (;;) {						\
+			_old = cmpxchg(_ptr, _val, _val | _mask);	\
+			if (_old == _val)				\
+				break;					\
+			_val = _old;					\
+		}							\
+	_old;								\
+})
+
 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
 /*
  * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
@@ -382,7 +434,7 @@
 	 * wakeup due to that.
 	 *
 	 * This cmpxchg() implies a full barrier, which pairs with the write
-	 * barrier implied by the wakeup in wake_up_list().
+	 * barrier implied by the wakeup in wake_up_q().
 	 */
 	if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
 		return;
@@ -481,7 +533,10 @@
 	rcu_read_lock();
 	for_each_domain(cpu, sd) {
 		for_each_cpu(i, sched_domain_span(sd)) {
-			if (!idle_cpu(i) && is_housekeeping_cpu(cpu)) {
+			if (cpu == i)
+				continue;
+
+			if (!idle_cpu(i) && is_housekeeping_cpu(i)) {
 				cpu = i;
 				goto unlock;
 			}
@@ -578,17 +633,8 @@
 		return false;
 
 	/*
-	 * FIFO realtime policy runs the highest priority task (after DEADLINE).
-	 * Other runnable tasks are of a lower priority. The scheduler tick
-	 * isn't needed.
-	 */
-	fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
-	if (fifo_nr_running)
-		return true;
-
-	/*
-	 * Round-robin realtime tasks time slice with other tasks at the same
-	 * realtime priority.
+	 * If there are more than one RR tasks, we need the tick to effect the
+	 * actual RR behaviour.
 	 */
 	if (rq->rt.rr_nr_running) {
 		if (rq->rt.rr_nr_running == 1)
@@ -597,8 +643,20 @@
 			return false;
 	}
 
-	/* Normal multitasking need periodic preemption checks */
-	if (rq->cfs.nr_running > 1)
+	/*
+	 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
+	 * forced preemption between FIFO tasks.
+	 */
+	fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
+	if (fifo_nr_running)
+		return true;
+
+	/*
+	 * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
+	 * if there's more than one we need the tick for involuntary
+	 * preemption.
+	 */
+	if (rq->nr_running > 1)
 		return false;
 
 	return true;
@@ -1064,12 +1122,20 @@
 static int __set_cpus_allowed_ptr(struct task_struct *p,
 				  const struct cpumask *new_mask, bool check)
 {
-	unsigned long flags;
-	struct rq *rq;
+	const struct cpumask *cpu_valid_mask = cpu_active_mask;
 	unsigned int dest_cpu;
+	struct rq_flags rf;
+	struct rq *rq;
 	int ret = 0;
 
-	rq = task_rq_lock(p, &flags);
+	rq = task_rq_lock(p, &rf);
+
+	if (p->flags & PF_KTHREAD) {
+		/*
+		 * Kernel threads are allowed on online && !active CPUs
+		 */
+		cpu_valid_mask = cpu_online_mask;
+	}
 
 	/*
 	 * Must re-check here, to close a race against __kthread_bind(),
@@ -1083,22 +1149,32 @@
 	if (cpumask_equal(&p->cpus_allowed, new_mask))
 		goto out;
 
-	if (!cpumask_intersects(new_mask, cpu_active_mask)) {
+	if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
 		ret = -EINVAL;
 		goto out;
 	}
 
 	do_set_cpus_allowed(p, new_mask);
 
+	if (p->flags & PF_KTHREAD) {
+		/*
+		 * For kernel threads that do indeed end up on online &&
+		 * !active we want to ensure they are strict per-cpu threads.
+		 */
+		WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) &&
+			!cpumask_intersects(new_mask, cpu_active_mask) &&
+			p->nr_cpus_allowed != 1);
+	}
+
 	/* Can the task run on the task's current CPU? If so, we're done */
 	if (cpumask_test_cpu(task_cpu(p), new_mask))
 		goto out;
 
-	dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
+	dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
 	if (task_running(rq, p) || p->state == TASK_WAKING) {
 		struct migration_arg arg = { p, dest_cpu };
 		/* Need help from migration thread: drop lock and wait. */
-		task_rq_unlock(rq, p, &flags);
+		task_rq_unlock(rq, p, &rf);
 		stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
 		tlb_migrate_finish(p->mm);
 		return 0;
@@ -1107,12 +1183,12 @@
 		 * OK, since we're going to drop the lock immediately
 		 * afterwards anyway.
 		 */
-		lockdep_unpin_lock(&rq->lock);
+		lockdep_unpin_lock(&rq->lock, rf.cookie);
 		rq = move_queued_task(rq, p, dest_cpu);
-		lockdep_pin_lock(&rq->lock);
+		lockdep_repin_lock(&rq->lock, rf.cookie);
 	}
 out:
-	task_rq_unlock(rq, p, &flags);
+	task_rq_unlock(rq, p, &rf);
 
 	return ret;
 }
@@ -1296,8 +1372,8 @@
  */
 unsigned long wait_task_inactive(struct task_struct *p, long match_state)
 {
-	unsigned long flags;
 	int running, queued;
+	struct rq_flags rf;
 	unsigned long ncsw;
 	struct rq *rq;
 
@@ -1332,14 +1408,14 @@
 		 * lock now, to be *sure*. If we're wrong, we'll
 		 * just go back and repeat.
 		 */
-		rq = task_rq_lock(p, &flags);
+		rq = task_rq_lock(p, &rf);
 		trace_sched_wait_task(p);
 		running = task_running(rq, p);
 		queued = task_on_rq_queued(p);
 		ncsw = 0;
 		if (!match_state || p->state == match_state)
 			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
-		task_rq_unlock(rq, p, &flags);
+		task_rq_unlock(rq, p, &rf);
 
 		/*
 		 * If it changed from the expected state, bail out now.
@@ -1413,6 +1489,25 @@
 
 /*
  * ->cpus_allowed is protected by both rq->lock and p->pi_lock
+ *
+ * A few notes on cpu_active vs cpu_online:
+ *
+ *  - cpu_active must be a subset of cpu_online
+ *
+ *  - on cpu-up we allow per-cpu kthreads on the online && !active cpu,
+ *    see __set_cpus_allowed_ptr(). At this point the newly online
+ *    cpu isn't yet part of the sched domains, and balancing will not
+ *    see it.
+ *
+ *  - on cpu-down we clear cpu_active() to mask the sched domains and
+ *    avoid the load balancer to place new tasks on the to be removed
+ *    cpu. Existing tasks will remain running there and will be taken
+ *    off.
+ *
+ * This means that fallback selection must not select !active CPUs.
+ * And can assume that any active CPU must be online. Conversely
+ * select_task_rq() below may allow selection of !active CPUs in order
+ * to satisfy the above rules.
  */
 static int select_fallback_rq(int cpu, struct task_struct *p)
 {
@@ -1431,8 +1526,6 @@
 
 		/* Look for allowed, online CPU in same node. */
 		for_each_cpu(dest_cpu, nodemask) {
-			if (!cpu_online(dest_cpu))
-				continue;
 			if (!cpu_active(dest_cpu))
 				continue;
 			if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
@@ -1443,8 +1536,6 @@
 	for (;;) {
 		/* Any allowed, online CPU? */
 		for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
-			if (!cpu_online(dest_cpu))
-				continue;
 			if (!cpu_active(dest_cpu))
 				continue;
 			goto out;
@@ -1494,8 +1585,10 @@
 {
 	lockdep_assert_held(&p->pi_lock);
 
-	if (p->nr_cpus_allowed > 1)
+	if (tsk_nr_cpus_allowed(p) > 1)
 		cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
+	else
+		cpu = cpumask_any(tsk_cpus_allowed(p));
 
 	/*
 	 * In order not to call set_task_cpu() on a blocking task we need
@@ -1583,8 +1676,8 @@
 /*
  * Mark the task runnable and perform wakeup-preemption.
  */
-static void
-ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
+static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
+			   struct pin_cookie cookie)
 {
 	check_preempt_curr(rq, p, wake_flags);
 	p->state = TASK_RUNNING;
@@ -1596,9 +1689,9 @@
 		 * Our task @p is fully woken up and running; so its safe to
 		 * drop the rq->lock, hereafter rq is only used for statistics.
 		 */
-		lockdep_unpin_lock(&rq->lock);
+		lockdep_unpin_lock(&rq->lock, cookie);
 		p->sched_class->task_woken(rq, p);
-		lockdep_pin_lock(&rq->lock);
+		lockdep_repin_lock(&rq->lock, cookie);
 	}
 
 	if (rq->idle_stamp) {
@@ -1616,17 +1709,23 @@
 }
 
 static void
-ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
+ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
+		 struct pin_cookie cookie)
 {
+	int en_flags = ENQUEUE_WAKEUP;
+
 	lockdep_assert_held(&rq->lock);
 
 #ifdef CONFIG_SMP
 	if (p->sched_contributes_to_load)
 		rq->nr_uninterruptible--;
+
+	if (wake_flags & WF_MIGRATED)
+		en_flags |= ENQUEUE_MIGRATED;
 #endif
 
-	ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
-	ttwu_do_wakeup(rq, p, wake_flags);
+	ttwu_activate(rq, p, en_flags);
+	ttwu_do_wakeup(rq, p, wake_flags, cookie);
 }
 
 /*
@@ -1637,17 +1736,18 @@
  */
 static int ttwu_remote(struct task_struct *p, int wake_flags)
 {
+	struct rq_flags rf;
 	struct rq *rq;
 	int ret = 0;
 
-	rq = __task_rq_lock(p);
+	rq = __task_rq_lock(p, &rf);
 	if (task_on_rq_queued(p)) {
 		/* check_preempt_curr() may use rq clock */
 		update_rq_clock(rq);
-		ttwu_do_wakeup(rq, p, wake_flags);
+		ttwu_do_wakeup(rq, p, wake_flags, rf.cookie);
 		ret = 1;
 	}
-	__task_rq_unlock(rq);
+	__task_rq_unlock(rq, &rf);
 
 	return ret;
 }
@@ -1657,6 +1757,7 @@
 {
 	struct rq *rq = this_rq();
 	struct llist_node *llist = llist_del_all(&rq->wake_list);
+	struct pin_cookie cookie;
 	struct task_struct *p;
 	unsigned long flags;
 
@@ -1664,15 +1765,19 @@
 		return;
 
 	raw_spin_lock_irqsave(&rq->lock, flags);
-	lockdep_pin_lock(&rq->lock);
+	cookie = lockdep_pin_lock(&rq->lock);
 
 	while (llist) {
 		p = llist_entry(llist, struct task_struct, wake_entry);
 		llist = llist_next(llist);
-		ttwu_do_activate(rq, p, 0);
+		/*
+		 * See ttwu_queue(); we only call ttwu_queue_remote() when
+		 * its a x-cpu wakeup.
+		 */
+		ttwu_do_activate(rq, p, WF_MIGRATED, cookie);
 	}
 
-	lockdep_unpin_lock(&rq->lock);
+	lockdep_unpin_lock(&rq->lock, cookie);
 	raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
@@ -1756,9 +1861,10 @@
 }
 #endif /* CONFIG_SMP */
 
-static void ttwu_queue(struct task_struct *p, int cpu)
+static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
 {
 	struct rq *rq = cpu_rq(cpu);
+	struct pin_cookie cookie;
 
 #if defined(CONFIG_SMP)
 	if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
@@ -1769,9 +1875,9 @@
 #endif
 
 	raw_spin_lock(&rq->lock);
-	lockdep_pin_lock(&rq->lock);
-	ttwu_do_activate(rq, p, 0);
-	lockdep_unpin_lock(&rq->lock);
+	cookie = lockdep_pin_lock(&rq->lock);
+	ttwu_do_activate(rq, p, wake_flags, cookie);
+	lockdep_unpin_lock(&rq->lock, cookie);
 	raw_spin_unlock(&rq->lock);
 }
 
@@ -1940,9 +2046,6 @@
 	p->sched_contributes_to_load = !!task_contributes_to_load(p);
 	p->state = TASK_WAKING;
 
-	if (p->sched_class->task_waking)
-		p->sched_class->task_waking(p);
-
 	cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
 	if (task_cpu(p) != cpu) {
 		wake_flags |= WF_MIGRATED;
@@ -1950,7 +2053,7 @@
 	}
 #endif /* CONFIG_SMP */
 
-	ttwu_queue(p, cpu);
+	ttwu_queue(p, cpu, wake_flags);
 stat:
 	if (schedstat_enabled())
 		ttwu_stat(p, cpu, wake_flags);
@@ -1968,7 +2071,7 @@
  * ensure that this_rq() is locked, @p is bound to this_rq() and not
  * the current task.
  */
-static void try_to_wake_up_local(struct task_struct *p)
+static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie)
 {
 	struct rq *rq = task_rq(p);
 
@@ -1985,11 +2088,11 @@
 		 * disabled avoiding further scheduler activity on it and we've
 		 * not yet picked a replacement task.
 		 */
-		lockdep_unpin_lock(&rq->lock);
+		lockdep_unpin_lock(&rq->lock, cookie);
 		raw_spin_unlock(&rq->lock);
 		raw_spin_lock(&p->pi_lock);
 		raw_spin_lock(&rq->lock);
-		lockdep_pin_lock(&rq->lock);
+		lockdep_repin_lock(&rq->lock, cookie);
 	}
 
 	if (!(p->state & TASK_NORMAL))
@@ -2000,7 +2103,7 @@
 	if (!task_on_rq_queued(p))
 		ttwu_activate(rq, p, ENQUEUE_WAKEUP);
 
-	ttwu_do_wakeup(rq, p, 0);
+	ttwu_do_wakeup(rq, p, 0, cookie);
 	if (schedstat_enabled())
 		ttwu_stat(p, smp_processor_id(), 0);
 out:
@@ -2360,7 +2463,8 @@
 	u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
 	int cpus, err = -1;
 
-	if (new_bw == p->dl.dl_bw)
+	/* !deadline task may carry old deadline bandwidth */
+	if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
 		return 0;
 
 	/*
@@ -2399,12 +2503,12 @@
  */
 void wake_up_new_task(struct task_struct *p)
 {
-	unsigned long flags;
+	struct rq_flags rf;
 	struct rq *rq;
 
-	raw_spin_lock_irqsave(&p->pi_lock, flags);
 	/* Initialize new task's runnable average */
 	init_entity_runnable_average(&p->se);
+	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
 #ifdef CONFIG_SMP
 	/*
 	 * Fork balancing, do it here and not earlier because:
@@ -2413,8 +2517,10 @@
 	 */
 	set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
 #endif
+	/* Post initialize new task's util average when its cfs_rq is set */
+	post_init_entity_util_avg(&p->se);
 
-	rq = __task_rq_lock(p);
+	rq = __task_rq_lock(p, &rf);
 	activate_task(rq, p, 0);
 	p->on_rq = TASK_ON_RQ_QUEUED;
 	trace_sched_wakeup_new(p);
@@ -2425,12 +2531,12 @@
 		 * Nothing relies on rq->lock after this, so its fine to
 		 * drop it.
 		 */
-		lockdep_unpin_lock(&rq->lock);
+		lockdep_unpin_lock(&rq->lock, rf.cookie);
 		p->sched_class->task_woken(rq, p);
-		lockdep_pin_lock(&rq->lock);
+		lockdep_repin_lock(&rq->lock, rf.cookie);
 	}
 #endif
-	task_rq_unlock(rq, p, &flags);
+	task_rq_unlock(rq, p, &rf);
 }
 
 #ifdef CONFIG_PREEMPT_NOTIFIERS
@@ -2692,7 +2798,7 @@
  */
 static __always_inline struct rq *
 context_switch(struct rq *rq, struct task_struct *prev,
-	       struct task_struct *next)
+	       struct task_struct *next, struct pin_cookie cookie)
 {
 	struct mm_struct *mm, *oldmm;
 
@@ -2712,7 +2818,7 @@
 		atomic_inc(&oldmm->mm_count);
 		enter_lazy_tlb(oldmm, next);
 	} else
-		switch_mm(oldmm, mm, next);
+		switch_mm_irqs_off(oldmm, mm, next);
 
 	if (!prev->mm) {
 		prev->active_mm = NULL;
@@ -2724,7 +2830,7 @@
 	 * of the scheduler it's an obvious special-case), so we
 	 * do an early lockdep release here:
 	 */
-	lockdep_unpin_lock(&rq->lock);
+	lockdep_unpin_lock(&rq->lock, cookie);
 	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
 
 	/* Here we just switch the register state and the stack. */
@@ -2846,7 +2952,7 @@
  */
 unsigned long long task_sched_runtime(struct task_struct *p)
 {
-	unsigned long flags;
+	struct rq_flags rf;
 	struct rq *rq;
 	u64 ns;
 
@@ -2866,7 +2972,7 @@
 		return p->se.sum_exec_runtime;
 #endif
 
-	rq = task_rq_lock(p, &flags);
+	rq = task_rq_lock(p, &rf);
 	/*
 	 * Must be ->curr _and_ ->on_rq.  If dequeued, we would
 	 * project cycles that may never be accounted to this
@@ -2877,7 +2983,7 @@
 		p->sched_class->update_curr(rq);
 	}
 	ns = p->se.sum_exec_runtime;
-	task_rq_unlock(rq, p, &flags);
+	task_rq_unlock(rq, p, &rf);
 
 	return ns;
 }
@@ -2897,7 +3003,7 @@
 	raw_spin_lock(&rq->lock);
 	update_rq_clock(rq);
 	curr->sched_class->task_tick(rq, curr, 0);
-	update_cpu_load_active(rq);
+	cpu_load_update_active(rq);
 	calc_global_load_tick(rq);
 	raw_spin_unlock(&rq->lock);
 
@@ -2940,6 +3046,20 @@
 
 #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
 				defined(CONFIG_PREEMPT_TRACER))
+/*
+ * If the value passed in is equal to the current preempt count
+ * then we just disabled preemption. Start timing the latency.
+ */
+static inline void preempt_latency_start(int val)
+{
+	if (preempt_count() == val) {
+		unsigned long ip = get_lock_parent_ip();
+#ifdef CONFIG_DEBUG_PREEMPT
+		current->preempt_disable_ip = ip;
+#endif
+		trace_preempt_off(CALLER_ADDR0, ip);
+	}
+}
 
 void preempt_count_add(int val)
 {
@@ -2958,17 +3078,21 @@
 	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
 				PREEMPT_MASK - 10);
 #endif
-	if (preempt_count() == val) {
-		unsigned long ip = get_lock_parent_ip();
-#ifdef CONFIG_DEBUG_PREEMPT
-		current->preempt_disable_ip = ip;
-#endif
-		trace_preempt_off(CALLER_ADDR0, ip);
-	}
+	preempt_latency_start(val);
 }
 EXPORT_SYMBOL(preempt_count_add);
 NOKPROBE_SYMBOL(preempt_count_add);
 
+/*
+ * If the value passed in equals to the current preempt count
+ * then we just enabled preemption. Stop timing the latency.
+ */
+static inline void preempt_latency_stop(int val)
+{
+	if (preempt_count() == val)
+		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
+}
+
 void preempt_count_sub(int val)
 {
 #ifdef CONFIG_DEBUG_PREEMPT
@@ -2985,13 +3109,15 @@
 		return;
 #endif
 
-	if (preempt_count() == val)
-		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
+	preempt_latency_stop(val);
 	__preempt_count_sub(val);
 }
 EXPORT_SYMBOL(preempt_count_sub);
 NOKPROBE_SYMBOL(preempt_count_sub);
 
+#else
+static inline void preempt_latency_start(int val) { }
+static inline void preempt_latency_stop(int val) { }
 #endif
 
 /*
@@ -3044,7 +3170,7 @@
  * Pick up the highest-prio task:
  */
 static inline struct task_struct *
-pick_next_task(struct rq *rq, struct task_struct *prev)
+pick_next_task(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
 {
 	const struct sched_class *class = &fair_sched_class;
 	struct task_struct *p;
@@ -3055,20 +3181,20 @@
 	 */
 	if (likely(prev->sched_class == class &&
 		   rq->nr_running == rq->cfs.h_nr_running)) {
-		p = fair_sched_class.pick_next_task(rq, prev);
+		p = fair_sched_class.pick_next_task(rq, prev, cookie);
 		if (unlikely(p == RETRY_TASK))
 			goto again;
 
 		/* assumes fair_sched_class->next == idle_sched_class */
 		if (unlikely(!p))
-			p = idle_sched_class.pick_next_task(rq, prev);
+			p = idle_sched_class.pick_next_task(rq, prev, cookie);
 
 		return p;
 	}
 
 again:
 	for_each_class(class) {
-		p = class->pick_next_task(rq, prev);
+		p = class->pick_next_task(rq, prev, cookie);
 		if (p) {
 			if (unlikely(p == RETRY_TASK))
 				goto again;
@@ -3122,6 +3248,7 @@
 {
 	struct task_struct *prev, *next;
 	unsigned long *switch_count;
+	struct pin_cookie cookie;
 	struct rq *rq;
 	int cpu;
 
@@ -3155,7 +3282,7 @@
 	 */
 	smp_mb__before_spinlock();
 	raw_spin_lock(&rq->lock);
-	lockdep_pin_lock(&rq->lock);
+	cookie = lockdep_pin_lock(&rq->lock);
 
 	rq->clock_skip_update <<= 1; /* promote REQ to ACT */
 
@@ -3177,7 +3304,7 @@
 
 				to_wakeup = wq_worker_sleeping(prev);
 				if (to_wakeup)
-					try_to_wake_up_local(to_wakeup);
+					try_to_wake_up_local(to_wakeup, cookie);
 			}
 		}
 		switch_count = &prev->nvcsw;
@@ -3186,7 +3313,7 @@
 	if (task_on_rq_queued(prev))
 		update_rq_clock(rq);
 
-	next = pick_next_task(rq, prev);
+	next = pick_next_task(rq, prev, cookie);
 	clear_tsk_need_resched(prev);
 	clear_preempt_need_resched();
 	rq->clock_skip_update = 0;
@@ -3197,9 +3324,9 @@
 		++*switch_count;
 
 		trace_sched_switch(preempt, prev, next);
-		rq = context_switch(rq, prev, next); /* unlocks the rq */
+		rq = context_switch(rq, prev, next, cookie); /* unlocks the rq */
 	} else {
-		lockdep_unpin_lock(&rq->lock);
+		lockdep_unpin_lock(&rq->lock, cookie);
 		raw_spin_unlock_irq(&rq->lock);
 	}
 
@@ -3266,8 +3393,23 @@
 static void __sched notrace preempt_schedule_common(void)
 {
 	do {
+		/*
+		 * Because the function tracer can trace preempt_count_sub()
+		 * and it also uses preempt_enable/disable_notrace(), if
+		 * NEED_RESCHED is set, the preempt_enable_notrace() called
+		 * by the function tracer will call this function again and
+		 * cause infinite recursion.
+		 *
+		 * Preemption must be disabled here before the function
+		 * tracer can trace. Break up preempt_disable() into two
+		 * calls. One to disable preemption without fear of being
+		 * traced. The other to still record the preemption latency,
+		 * which can also be traced by the function tracer.
+		 */
 		preempt_disable_notrace();
+		preempt_latency_start(1);
 		__schedule(true);
+		preempt_latency_stop(1);
 		preempt_enable_no_resched_notrace();
 
 		/*
@@ -3319,7 +3461,21 @@
 		return;
 
 	do {
+		/*
+		 * Because the function tracer can trace preempt_count_sub()
+		 * and it also uses preempt_enable/disable_notrace(), if
+		 * NEED_RESCHED is set, the preempt_enable_notrace() called
+		 * by the function tracer will call this function again and
+		 * cause infinite recursion.
+		 *
+		 * Preemption must be disabled here before the function
+		 * tracer can trace. Break up preempt_disable() into two
+		 * calls. One to disable preemption without fear of being
+		 * traced. The other to still record the preemption latency,
+		 * which can also be traced by the function tracer.
+		 */
 		preempt_disable_notrace();
+		preempt_latency_start(1);
 		/*
 		 * Needs preempt disabled in case user_exit() is traced
 		 * and the tracer calls preempt_enable_notrace() causing
@@ -3329,6 +3485,7 @@
 		__schedule(true);
 		exception_exit(prev_ctx);
 
+		preempt_latency_stop(1);
 		preempt_enable_no_resched_notrace();
 	} while (need_resched());
 }
@@ -3385,12 +3542,13 @@
 void rt_mutex_setprio(struct task_struct *p, int prio)
 {
 	int oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE;
-	struct rq *rq;
 	const struct sched_class *prev_class;
+	struct rq_flags rf;
+	struct rq *rq;
 
 	BUG_ON(prio > MAX_PRIO);
 
-	rq = __task_rq_lock(p);
+	rq = __task_rq_lock(p, &rf);
 
 	/*
 	 * Idle task boosting is a nono in general. There is one
@@ -3466,7 +3624,7 @@
 	check_class_changed(rq, p, prev_class, oldprio);
 out_unlock:
 	preempt_disable(); /* avoid rq from going away on us */
-	__task_rq_unlock(rq);
+	__task_rq_unlock(rq, &rf);
 
 	balance_callback(rq);
 	preempt_enable();
@@ -3476,7 +3634,7 @@
 void set_user_nice(struct task_struct *p, long nice)
 {
 	int old_prio, delta, queued;
-	unsigned long flags;
+	struct rq_flags rf;
 	struct rq *rq;
 
 	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
@@ -3485,7 +3643,7 @@
 	 * We have to be careful, if called from sys_setpriority(),
 	 * the task might be in the middle of scheduling on another CPU.
 	 */
-	rq = task_rq_lock(p, &flags);
+	rq = task_rq_lock(p, &rf);
 	/*
 	 * The RT priorities are set via sched_setscheduler(), but we still
 	 * allow the 'normal' nice value to be set - but as expected
@@ -3516,7 +3674,7 @@
 			resched_curr(rq);
 	}
 out_unlock:
-	task_rq_unlock(rq, p, &flags);
+	task_rq_unlock(rq, p, &rf);
 }
 EXPORT_SYMBOL(set_user_nice);
 
@@ -3813,11 +3971,11 @@
 		      MAX_RT_PRIO - 1 - attr->sched_priority;
 	int retval, oldprio, oldpolicy = -1, queued, running;
 	int new_effective_prio, policy = attr->sched_policy;
-	unsigned long flags;
 	const struct sched_class *prev_class;
-	struct rq *rq;
+	struct rq_flags rf;
 	int reset_on_fork;
 	int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE;
+	struct rq *rq;
 
 	/* may grab non-irq protected spin_locks */
 	BUG_ON(in_interrupt());
@@ -3912,13 +4070,13 @@
 	 * To be able to change p->policy safely, the appropriate
 	 * runqueue lock must be held.
 	 */
-	rq = task_rq_lock(p, &flags);
+	rq = task_rq_lock(p, &rf);
 
 	/*
 	 * Changing the policy of the stop threads its a very bad idea
 	 */
 	if (p == rq->stop) {
-		task_rq_unlock(rq, p, &flags);
+		task_rq_unlock(rq, p, &rf);
 		return -EINVAL;
 	}
 
@@ -3935,7 +4093,7 @@
 			goto change;
 
 		p->sched_reset_on_fork = reset_on_fork;
-		task_rq_unlock(rq, p, &flags);
+		task_rq_unlock(rq, p, &rf);
 		return 0;
 	}
 change:
@@ -3949,7 +4107,7 @@
 		if (rt_bandwidth_enabled() && rt_policy(policy) &&
 				task_group(p)->rt_bandwidth.rt_runtime == 0 &&
 				!task_group_is_autogroup(task_group(p))) {
-			task_rq_unlock(rq, p, &flags);
+			task_rq_unlock(rq, p, &rf);
 			return -EPERM;
 		}
 #endif
@@ -3964,7 +4122,7 @@
 			 */
 			if (!cpumask_subset(span, &p->cpus_allowed) ||
 			    rq->rd->dl_bw.bw == 0) {
-				task_rq_unlock(rq, p, &flags);
+				task_rq_unlock(rq, p, &rf);
 				return -EPERM;
 			}
 		}
@@ -3974,7 +4132,7 @@
 	/* recheck policy now with rq lock held */
 	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
 		policy = oldpolicy = -1;
-		task_rq_unlock(rq, p, &flags);
+		task_rq_unlock(rq, p, &rf);
 		goto recheck;
 	}
 
@@ -3984,7 +4142,7 @@
 	 * is available.
 	 */
 	if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) {
-		task_rq_unlock(rq, p, &flags);
+		task_rq_unlock(rq, p, &rf);
 		return -EBUSY;
 	}
 
@@ -4029,7 +4187,7 @@
 
 	check_class_changed(rq, p, prev_class, oldprio);
 	preempt_disable(); /* avoid rq from going away on us */
-	task_rq_unlock(rq, p, &flags);
+	task_rq_unlock(rq, p, &rf);
 
 	if (pi)
 		rt_mutex_adjust_pi(p);
@@ -4882,10 +5040,10 @@
 {
 	struct task_struct *p;
 	unsigned int time_slice;
-	unsigned long flags;
+	struct rq_flags rf;
+	struct timespec t;
 	struct rq *rq;
 	int retval;
-	struct timespec t;
 
 	if (pid < 0)
 		return -EINVAL;
@@ -4900,11 +5058,11 @@
 	if (retval)
 		goto out_unlock;
 
-	rq = task_rq_lock(p, &flags);
+	rq = task_rq_lock(p, &rf);
 	time_slice = 0;
 	if (p->sched_class->get_rr_interval)
 		time_slice = p->sched_class->get_rr_interval(rq, p);
-	task_rq_unlock(rq, p, &flags);
+	task_rq_unlock(rq, p, &rf);
 
 	rcu_read_unlock();
 	jiffies_to_timespec(time_slice, &t);
@@ -4980,7 +5138,8 @@
 	touch_all_softlockup_watchdogs();
 
 #ifdef CONFIG_SCHED_DEBUG
-	sysrq_sched_debug_show();
+	if (!state_filter)
+		sysrq_sched_debug_show();
 #endif
 	rcu_read_unlock();
 	/*
@@ -5142,6 +5301,8 @@
 
 #ifdef CONFIG_SMP
 
+static bool sched_smp_initialized __read_mostly;
+
 #ifdef CONFIG_NUMA_BALANCING
 /* Migrate current task p to target_cpu */
 int migrate_task_to(struct task_struct *p, int target_cpu)
@@ -5167,11 +5328,11 @@
  */
 void sched_setnuma(struct task_struct *p, int nid)
 {
-	struct rq *rq;
-	unsigned long flags;
 	bool queued, running;
+	struct rq_flags rf;
+	struct rq *rq;
 
-	rq = task_rq_lock(p, &flags);
+	rq = task_rq_lock(p, &rf);
 	queued = task_on_rq_queued(p);
 	running = task_current(rq, p);
 
@@ -5186,7 +5347,7 @@
 		p->sched_class->set_curr_task(rq);
 	if (queued)
 		enqueue_task(rq, p, ENQUEUE_RESTORE);
-	task_rq_unlock(rq, p, &flags);
+	task_rq_unlock(rq, p, &rf);
 }
 #endif /* CONFIG_NUMA_BALANCING */
 
@@ -5202,7 +5363,7 @@
 	BUG_ON(cpu_online(smp_processor_id()));
 
 	if (mm != &init_mm) {
-		switch_mm(mm, &init_mm, current);
+		switch_mm_irqs_off(mm, &init_mm, current);
 		finish_arch_post_lock_switch();
 	}
 	mmdrop(mm);
@@ -5250,6 +5411,7 @@
 {
 	struct rq *rq = dead_rq;
 	struct task_struct *next, *stop = rq->stop;
+	struct pin_cookie cookie;
 	int dest_cpu;
 
 	/*
@@ -5281,8 +5443,8 @@
 		/*
 		 * pick_next_task assumes pinned rq->lock.
 		 */
-		lockdep_pin_lock(&rq->lock);
-		next = pick_next_task(rq, &fake_task);
+		cookie = lockdep_pin_lock(&rq->lock);
+		next = pick_next_task(rq, &fake_task, cookie);
 		BUG_ON(!next);
 		next->sched_class->put_prev_task(rq, next);
 
@@ -5295,7 +5457,7 @@
 		 * because !cpu_active at this point, which means load-balance
 		 * will not interfere. Also, stop-machine.
 		 */
-		lockdep_unpin_lock(&rq->lock);
+		lockdep_unpin_lock(&rq->lock, cookie);
 		raw_spin_unlock(&rq->lock);
 		raw_spin_lock(&next->pi_lock);
 		raw_spin_lock(&rq->lock);
@@ -5356,127 +5518,13 @@
 	}
 }
 
-/*
- * migration_call - callback that gets triggered when a CPU is added.
- * Here we can start up the necessary migration thread for the new CPU.
- */
-static int
-migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
+static void set_cpu_rq_start_time(unsigned int cpu)
 {
-	int cpu = (long)hcpu;
-	unsigned long flags;
 	struct rq *rq = cpu_rq(cpu);
 
-	switch (action & ~CPU_TASKS_FROZEN) {
-
-	case CPU_UP_PREPARE:
-		rq->calc_load_update = calc_load_update;
-		account_reset_rq(rq);
-		break;
-
-	case CPU_ONLINE:
-		/* Update our root-domain */
-		raw_spin_lock_irqsave(&rq->lock, flags);
-		if (rq->rd) {
-			BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
-
-			set_rq_online(rq);
-		}
-		raw_spin_unlock_irqrestore(&rq->lock, flags);
-		break;
-
-#ifdef CONFIG_HOTPLUG_CPU
-	case CPU_DYING:
-		sched_ttwu_pending();
-		/* Update our root-domain */
-		raw_spin_lock_irqsave(&rq->lock, flags);
-		if (rq->rd) {
-			BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
-			set_rq_offline(rq);
-		}
-		migrate_tasks(rq);
-		BUG_ON(rq->nr_running != 1); /* the migration thread */
-		raw_spin_unlock_irqrestore(&rq->lock, flags);
-		break;
-
-	case CPU_DEAD:
-		calc_load_migrate(rq);
-		break;
-#endif
-	}
-
-	update_max_interval();
-
-	return NOTIFY_OK;
-}
-
-/*
- * Register at high priority so that task migration (migrate_all_tasks)
- * happens before everything else.  This has to be lower priority than
- * the notifier in the perf_event subsystem, though.
- */
-static struct notifier_block migration_notifier = {
-	.notifier_call = migration_call,
-	.priority = CPU_PRI_MIGRATION,
-};
-
-static void set_cpu_rq_start_time(void)
-{
-	int cpu = smp_processor_id();
-	struct rq *rq = cpu_rq(cpu);
 	rq->age_stamp = sched_clock_cpu(cpu);
 }
 
-static int sched_cpu_active(struct notifier_block *nfb,
-				      unsigned long action, void *hcpu)
-{
-	int cpu = (long)hcpu;
-
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_STARTING:
-		set_cpu_rq_start_time();
-		return NOTIFY_OK;
-
-	case CPU_DOWN_FAILED:
-		set_cpu_active(cpu, true);
-		return NOTIFY_OK;
-
-	default:
-		return NOTIFY_DONE;
-	}
-}
-
-static int sched_cpu_inactive(struct notifier_block *nfb,
-					unsigned long action, void *hcpu)
-{
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_DOWN_PREPARE:
-		set_cpu_active((long)hcpu, false);
-		return NOTIFY_OK;
-	default:
-		return NOTIFY_DONE;
-	}
-}
-
-static int __init migration_init(void)
-{
-	void *cpu = (void *)(long)smp_processor_id();
-	int err;
-
-	/* Initialize migration for the boot CPU */
-	err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
-	BUG_ON(err == NOTIFY_BAD);
-	migration_call(&migration_notifier, CPU_ONLINE, cpu);
-	register_cpu_notifier(&migration_notifier);
-
-	/* Register cpu active notifiers */
-	cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
-	cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
-
-	return 0;
-}
-early_initcall(migration_init);
-
 static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
 
 #ifdef CONFIG_SCHED_DEBUG
@@ -6624,10 +6672,10 @@
 	init_numa_topology_type();
 }
 
-static void sched_domains_numa_masks_set(int cpu)
+static void sched_domains_numa_masks_set(unsigned int cpu)
 {
-	int i, j;
 	int node = cpu_to_node(cpu);
+	int i, j;
 
 	for (i = 0; i < sched_domains_numa_levels; i++) {
 		for (j = 0; j < nr_node_ids; j++) {
@@ -6637,51 +6685,20 @@
 	}
 }
 
-static void sched_domains_numa_masks_clear(int cpu)
+static void sched_domains_numa_masks_clear(unsigned int cpu)
 {
 	int i, j;
+
 	for (i = 0; i < sched_domains_numa_levels; i++) {
 		for (j = 0; j < nr_node_ids; j++)
 			cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
 	}
 }
 
-/*
- * Update sched_domains_numa_masks[level][node] array when new cpus
- * are onlined.
- */
-static int sched_domains_numa_masks_update(struct notifier_block *nfb,
-					   unsigned long action,
-					   void *hcpu)
-{
-	int cpu = (long)hcpu;
-
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_ONLINE:
-		sched_domains_numa_masks_set(cpu);
-		break;
-
-	case CPU_DEAD:
-		sched_domains_numa_masks_clear(cpu);
-		break;
-
-	default:
-		return NOTIFY_DONE;
-	}
-
-	return NOTIFY_OK;
-}
 #else
-static inline void sched_init_numa(void)
-{
-}
-
-static int sched_domains_numa_masks_update(struct notifier_block *nfb,
-					   unsigned long action,
-					   void *hcpu)
-{
-	return 0;
-}
+static inline void sched_init_numa(void) { }
+static void sched_domains_numa_masks_set(unsigned int cpu) { }
+static void sched_domains_numa_masks_clear(unsigned int cpu) { }
 #endif /* CONFIG_NUMA */
 
 static int __sdt_alloc(const struct cpumask *cpu_map)
@@ -7071,13 +7088,9 @@
  * If we come here as part of a suspend/resume, don't touch cpusets because we
  * want to restore it back to its original state upon resume anyway.
  */
-static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
-			     void *hcpu)
+static void cpuset_cpu_active(void)
 {
-	switch (action) {
-	case CPU_ONLINE_FROZEN:
-	case CPU_DOWN_FAILED_FROZEN:
-
+	if (cpuhp_tasks_frozen) {
 		/*
 		 * num_cpus_frozen tracks how many CPUs are involved in suspend
 		 * resume sequence. As long as this is not the last online
@@ -7087,35 +7100,25 @@
 		num_cpus_frozen--;
 		if (likely(num_cpus_frozen)) {
 			partition_sched_domains(1, NULL, NULL);
-			break;
+			return;
 		}
-
 		/*
 		 * This is the last CPU online operation. So fall through and
 		 * restore the original sched domains by considering the
 		 * cpuset configurations.
 		 */
-
-	case CPU_ONLINE:
-		cpuset_update_active_cpus(true);
-		break;
-	default:
-		return NOTIFY_DONE;
 	}
-	return NOTIFY_OK;
+	cpuset_update_active_cpus(true);
 }
 
-static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
-			       void *hcpu)
+static int cpuset_cpu_inactive(unsigned int cpu)
 {
 	unsigned long flags;
-	long cpu = (long)hcpu;
 	struct dl_bw *dl_b;
 	bool overflow;
 	int cpus;
 
-	switch (action) {
-	case CPU_DOWN_PREPARE:
+	if (!cpuhp_tasks_frozen) {
 		rcu_read_lock_sched();
 		dl_b = dl_bw_of(cpu);
 
@@ -7127,19 +7130,120 @@
 		rcu_read_unlock_sched();
 
 		if (overflow)
-			return notifier_from_errno(-EBUSY);
+			return -EBUSY;
 		cpuset_update_active_cpus(false);
-		break;
-	case CPU_DOWN_PREPARE_FROZEN:
+	} else {
 		num_cpus_frozen++;
 		partition_sched_domains(1, NULL, NULL);
-		break;
-	default:
-		return NOTIFY_DONE;
 	}
-	return NOTIFY_OK;
+	return 0;
 }
 
+int sched_cpu_activate(unsigned int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+	unsigned long flags;
+
+	set_cpu_active(cpu, true);
+
+	if (sched_smp_initialized) {
+		sched_domains_numa_masks_set(cpu);
+		cpuset_cpu_active();
+	}
+
+	/*
+	 * Put the rq online, if not already. This happens:
+	 *
+	 * 1) In the early boot process, because we build the real domains
+	 *    after all cpus have been brought up.
+	 *
+	 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
+	 *    domains.
+	 */
+	raw_spin_lock_irqsave(&rq->lock, flags);
+	if (rq->rd) {
+		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+		set_rq_online(rq);
+	}
+	raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+	update_max_interval();
+
+	return 0;
+}
+
+int sched_cpu_deactivate(unsigned int cpu)
+{
+	int ret;
+
+	set_cpu_active(cpu, false);
+	/*
+	 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
+	 * users of this state to go away such that all new such users will
+	 * observe it.
+	 *
+	 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
+	 * not imply sync_sched(), so wait for both.
+	 *
+	 * Do sync before park smpboot threads to take care the rcu boost case.
+	 */
+	if (IS_ENABLED(CONFIG_PREEMPT))
+		synchronize_rcu_mult(call_rcu, call_rcu_sched);
+	else
+		synchronize_rcu();
+
+	if (!sched_smp_initialized)
+		return 0;
+
+	ret = cpuset_cpu_inactive(cpu);
+	if (ret) {
+		set_cpu_active(cpu, true);
+		return ret;
+	}
+	sched_domains_numa_masks_clear(cpu);
+	return 0;
+}
+
+static void sched_rq_cpu_starting(unsigned int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	rq->calc_load_update = calc_load_update;
+	account_reset_rq(rq);
+	update_max_interval();
+}
+
+int sched_cpu_starting(unsigned int cpu)
+{
+	set_cpu_rq_start_time(cpu);
+	sched_rq_cpu_starting(cpu);
+	return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+int sched_cpu_dying(unsigned int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+	unsigned long flags;
+
+	/* Handle pending wakeups and then migrate everything off */
+	sched_ttwu_pending();
+	raw_spin_lock_irqsave(&rq->lock, flags);
+	if (rq->rd) {
+		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+		set_rq_offline(rq);
+	}
+	migrate_tasks(rq);
+	BUG_ON(rq->nr_running != 1);
+	raw_spin_unlock_irqrestore(&rq->lock, flags);
+	calc_load_migrate(rq);
+	update_max_interval();
+	nohz_balance_exit_idle(cpu);
+	hrtick_clear(rq);
+	return 0;
+}
+#endif
+
 void __init sched_init_smp(void)
 {
 	cpumask_var_t non_isolated_cpus;
@@ -7161,12 +7265,6 @@
 		cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
 	mutex_unlock(&sched_domains_mutex);
 
-	hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
-	hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
-	hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
-
-	init_hrtick();
-
 	/* Move init over to a non-isolated CPU */
 	if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
 		BUG();
@@ -7175,7 +7273,16 @@
 
 	init_sched_rt_class();
 	init_sched_dl_class();
+	sched_smp_initialized = true;
 }
+
+static int __init migration_init(void)
+{
+	sched_rq_cpu_starting(smp_processor_id());
+	return 0;
+}
+early_initcall(migration_init);
+
 #else
 void __init sched_init_smp(void)
 {
@@ -7310,8 +7417,6 @@
 		for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
 			rq->cpu_load[j] = 0;
 
-		rq->last_load_update_tick = jiffies;
-
 #ifdef CONFIG_SMP
 		rq->sd = NULL;
 		rq->rd = NULL;
@@ -7330,12 +7435,13 @@
 
 		rq_attach_root(rq, &def_root_domain);
 #ifdef CONFIG_NO_HZ_COMMON
+		rq->last_load_update_tick = jiffies;
 		rq->nohz_flags = 0;
 #endif
 #ifdef CONFIG_NO_HZ_FULL
 		rq->last_sched_tick = 0;
 #endif
-#endif
+#endif /* CONFIG_SMP */
 		init_rq_hrtick(rq);
 		atomic_set(&rq->nr_iowait, 0);
 	}
@@ -7373,7 +7479,7 @@
 	if (cpu_isolated_map == NULL)
 		zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
 	idle_thread_set_boot_cpu();
-	set_cpu_rq_start_time();
+	set_cpu_rq_start_time(smp_processor_id());
 #endif
 	init_sched_fair_class();
 
@@ -7618,10 +7724,10 @@
 {
 	struct task_group *tg;
 	int queued, running;
-	unsigned long flags;
+	struct rq_flags rf;
 	struct rq *rq;
 
-	rq = task_rq_lock(tsk, &flags);
+	rq = task_rq_lock(tsk, &rf);
 
 	running = task_current(rq, tsk);
 	queued = task_on_rq_queued(tsk);
@@ -7653,7 +7759,7 @@
 	if (queued)
 		enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE);
 
-	task_rq_unlock(rq, tsk, &flags);
+	task_rq_unlock(rq, tsk, &rf);
 }
 #endif /* CONFIG_CGROUP_SCHED */
 
@@ -7873,7 +7979,7 @@
 static int sched_rt_global_constraints(void)
 {
 	unsigned long flags;
-	int i, ret = 0;
+	int i;
 
 	raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
 	for_each_possible_cpu(i) {
@@ -7885,7 +7991,7 @@
 	}
 	raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
 
-	return ret;
+	return 0;
 }
 #endif /* CONFIG_RT_GROUP_SCHED */
 
diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c
index 4a81120..41f85c4 100644
--- a/kernel/sched/cpuacct.c
+++ b/kernel/sched/cpuacct.c
@@ -25,11 +25,22 @@
 	CPUACCT_STAT_NSTATS,
 };
 
+enum cpuacct_usage_index {
+	CPUACCT_USAGE_USER,	/* ... user mode */
+	CPUACCT_USAGE_SYSTEM,	/* ... kernel mode */
+
+	CPUACCT_USAGE_NRUSAGE,
+};
+
+struct cpuacct_usage {
+	u64	usages[CPUACCT_USAGE_NRUSAGE];
+};
+
 /* track cpu usage of a group of tasks and its child groups */
 struct cpuacct {
 	struct cgroup_subsys_state css;
 	/* cpuusage holds pointer to a u64-type object on every cpu */
-	u64 __percpu *cpuusage;
+	struct cpuacct_usage __percpu *cpuusage;
 	struct kernel_cpustat __percpu *cpustat;
 };
 
@@ -49,7 +60,7 @@
 	return css_ca(ca->css.parent);
 }
 
-static DEFINE_PER_CPU(u64, root_cpuacct_cpuusage);
+static DEFINE_PER_CPU(struct cpuacct_usage, root_cpuacct_cpuusage);
 static struct cpuacct root_cpuacct = {
 	.cpustat	= &kernel_cpustat,
 	.cpuusage	= &root_cpuacct_cpuusage,
@@ -68,7 +79,7 @@
 	if (!ca)
 		goto out;
 
-	ca->cpuusage = alloc_percpu(u64);
+	ca->cpuusage = alloc_percpu(struct cpuacct_usage);
 	if (!ca->cpuusage)
 		goto out_free_ca;
 
@@ -96,20 +107,37 @@
 	kfree(ca);
 }
 
-static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
+static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu,
+				 enum cpuacct_usage_index index)
 {
-	u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
+	struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
 	u64 data;
 
+	/*
+	 * We allow index == CPUACCT_USAGE_NRUSAGE here to read
+	 * the sum of suages.
+	 */
+	BUG_ON(index > CPUACCT_USAGE_NRUSAGE);
+
 #ifndef CONFIG_64BIT
 	/*
 	 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
 	 */
 	raw_spin_lock_irq(&cpu_rq(cpu)->lock);
-	data = *cpuusage;
+#endif
+
+	if (index == CPUACCT_USAGE_NRUSAGE) {
+		int i = 0;
+
+		data = 0;
+		for (i = 0; i < CPUACCT_USAGE_NRUSAGE; i++)
+			data += cpuusage->usages[i];
+	} else {
+		data = cpuusage->usages[index];
+	}
+
+#ifndef CONFIG_64BIT
 	raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
-#else
-	data = *cpuusage;
 #endif
 
 	return data;
@@ -117,69 +145,103 @@
 
 static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
 {
-	u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
+	struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
+	int i;
 
 #ifndef CONFIG_64BIT
 	/*
 	 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
 	 */
 	raw_spin_lock_irq(&cpu_rq(cpu)->lock);
-	*cpuusage = val;
+#endif
+
+	for (i = 0; i < CPUACCT_USAGE_NRUSAGE; i++)
+		cpuusage->usages[i] = val;
+
+#ifndef CONFIG_64BIT
 	raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
-#else
-	*cpuusage = val;
 #endif
 }
 
 /* return total cpu usage (in nanoseconds) of a group */
-static u64 cpuusage_read(struct cgroup_subsys_state *css, struct cftype *cft)
+static u64 __cpuusage_read(struct cgroup_subsys_state *css,
+			   enum cpuacct_usage_index index)
 {
 	struct cpuacct *ca = css_ca(css);
 	u64 totalcpuusage = 0;
 	int i;
 
-	for_each_present_cpu(i)
-		totalcpuusage += cpuacct_cpuusage_read(ca, i);
+	for_each_possible_cpu(i)
+		totalcpuusage += cpuacct_cpuusage_read(ca, i, index);
 
 	return totalcpuusage;
 }
 
+static u64 cpuusage_user_read(struct cgroup_subsys_state *css,
+			      struct cftype *cft)
+{
+	return __cpuusage_read(css, CPUACCT_USAGE_USER);
+}
+
+static u64 cpuusage_sys_read(struct cgroup_subsys_state *css,
+			     struct cftype *cft)
+{
+	return __cpuusage_read(css, CPUACCT_USAGE_SYSTEM);
+}
+
+static u64 cpuusage_read(struct cgroup_subsys_state *css, struct cftype *cft)
+{
+	return __cpuusage_read(css, CPUACCT_USAGE_NRUSAGE);
+}
+
 static int cpuusage_write(struct cgroup_subsys_state *css, struct cftype *cft,
 			  u64 val)
 {
 	struct cpuacct *ca = css_ca(css);
-	int err = 0;
-	int i;
+	int cpu;
 
 	/*
 	 * Only allow '0' here to do a reset.
 	 */
-	if (val) {
-		err = -EINVAL;
-		goto out;
-	}
+	if (val)
+		return -EINVAL;
 
-	for_each_present_cpu(i)
-		cpuacct_cpuusage_write(ca, i, 0);
+	for_each_possible_cpu(cpu)
+		cpuacct_cpuusage_write(ca, cpu, 0);
 
-out:
-	return err;
+	return 0;
 }
 
-static int cpuacct_percpu_seq_show(struct seq_file *m, void *V)
+static int __cpuacct_percpu_seq_show(struct seq_file *m,
+				     enum cpuacct_usage_index index)
 {
 	struct cpuacct *ca = css_ca(seq_css(m));
 	u64 percpu;
 	int i;
 
-	for_each_present_cpu(i) {
-		percpu = cpuacct_cpuusage_read(ca, i);
+	for_each_possible_cpu(i) {
+		percpu = cpuacct_cpuusage_read(ca, i, index);
 		seq_printf(m, "%llu ", (unsigned long long) percpu);
 	}
 	seq_printf(m, "\n");
 	return 0;
 }
 
+static int cpuacct_percpu_user_seq_show(struct seq_file *m, void *V)
+{
+	return __cpuacct_percpu_seq_show(m, CPUACCT_USAGE_USER);
+}
+
+static int cpuacct_percpu_sys_seq_show(struct seq_file *m, void *V)
+{
+	return __cpuacct_percpu_seq_show(m, CPUACCT_USAGE_SYSTEM);
+}
+
+static int cpuacct_percpu_seq_show(struct seq_file *m, void *V)
+{
+	return __cpuacct_percpu_seq_show(m, CPUACCT_USAGE_NRUSAGE);
+}
+
 static const char * const cpuacct_stat_desc[] = {
 	[CPUACCT_STAT_USER] = "user",
 	[CPUACCT_STAT_SYSTEM] = "system",
@@ -191,7 +253,7 @@
 	int cpu;
 	s64 val = 0;
 
-	for_each_online_cpu(cpu) {
+	for_each_possible_cpu(cpu) {
 		struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
 		val += kcpustat->cpustat[CPUTIME_USER];
 		val += kcpustat->cpustat[CPUTIME_NICE];
@@ -200,7 +262,7 @@
 	seq_printf(sf, "%s %lld\n", cpuacct_stat_desc[CPUACCT_STAT_USER], val);
 
 	val = 0;
-	for_each_online_cpu(cpu) {
+	for_each_possible_cpu(cpu) {
 		struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
 		val += kcpustat->cpustat[CPUTIME_SYSTEM];
 		val += kcpustat->cpustat[CPUTIME_IRQ];
@@ -220,10 +282,26 @@
 		.write_u64 = cpuusage_write,
 	},
 	{
+		.name = "usage_user",
+		.read_u64 = cpuusage_user_read,
+	},
+	{
+		.name = "usage_sys",
+		.read_u64 = cpuusage_sys_read,
+	},
+	{
 		.name = "usage_percpu",
 		.seq_show = cpuacct_percpu_seq_show,
 	},
 	{
+		.name = "usage_percpu_user",
+		.seq_show = cpuacct_percpu_user_seq_show,
+	},
+	{
+		.name = "usage_percpu_sys",
+		.seq_show = cpuacct_percpu_sys_seq_show,
+	},
+	{
 		.name = "stat",
 		.seq_show = cpuacct_stats_show,
 	},
@@ -238,10 +316,17 @@
 void cpuacct_charge(struct task_struct *tsk, u64 cputime)
 {
 	struct cpuacct *ca;
+	int index = CPUACCT_USAGE_SYSTEM;
+	struct pt_regs *regs = task_pt_regs(tsk);
+
+	if (regs && user_mode(regs))
+		index = CPUACCT_USAGE_USER;
 
 	rcu_read_lock();
+
 	for (ca = task_ca(tsk); ca; ca = parent_ca(ca))
-		*this_cpu_ptr(ca->cpuusage) += cputime;
+		this_cpu_ptr(ca->cpuusage)->usages[index] += cputime;
+
 	rcu_read_unlock();
 }
 
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index 5a75b08..5be5882 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -103,10 +103,10 @@
 	const struct sched_dl_entity *dl_se = &p->dl;
 
 	if (later_mask &&
-	    cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) {
+	    cpumask_and(later_mask, cp->free_cpus, tsk_cpus_allowed(p))) {
 		best_cpu = cpumask_any(later_mask);
 		goto out;
-	} else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
+	} else if (cpumask_test_cpu(cpudl_maximum(cp), tsk_cpus_allowed(p)) &&
 			dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
 		best_cpu = cpudl_maximum(cp);
 		if (later_mask)
diff --git a/kernel/sched/cpufreq.c b/kernel/sched/cpufreq.c
index 928c4ba..1141954 100644
--- a/kernel/sched/cpufreq.c
+++ b/kernel/sched/cpufreq.c
@@ -14,24 +14,50 @@
 DEFINE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
 
 /**
- * cpufreq_set_update_util_data - Populate the CPU's update_util_data pointer.
+ * cpufreq_add_update_util_hook - Populate the CPU's update_util_data pointer.
  * @cpu: The CPU to set the pointer for.
  * @data: New pointer value.
+ * @func: Callback function to set for the CPU.
  *
- * Set and publish the update_util_data pointer for the given CPU.  That pointer
- * points to a struct update_util_data object containing a callback function
- * to call from cpufreq_update_util().  That function will be called from an RCU
- * read-side critical section, so it must not sleep.
+ * Set and publish the update_util_data pointer for the given CPU.
+ *
+ * The update_util_data pointer of @cpu is set to @data and the callback
+ * function pointer in the target struct update_util_data is set to @func.
+ * That function will be called by cpufreq_update_util() from RCU-sched
+ * read-side critical sections, so it must not sleep.  @data will always be
+ * passed to it as the first argument which allows the function to get to the
+ * target update_util_data structure and its container.
+ *
+ * The update_util_data pointer of @cpu must be NULL when this function is
+ * called or it will WARN() and return with no effect.
+ */
+void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
+			void (*func)(struct update_util_data *data, u64 time,
+				     unsigned long util, unsigned long max))
+{
+	if (WARN_ON(!data || !func))
+		return;
+
+	if (WARN_ON(per_cpu(cpufreq_update_util_data, cpu)))
+		return;
+
+	data->func = func;
+	rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), data);
+}
+EXPORT_SYMBOL_GPL(cpufreq_add_update_util_hook);
+
+/**
+ * cpufreq_remove_update_util_hook - Clear the CPU's update_util_data pointer.
+ * @cpu: The CPU to clear the pointer for.
+ *
+ * Clear the update_util_data pointer for the given CPU.
  *
  * Callers must use RCU-sched callbacks to free any memory that might be
  * accessed via the old update_util_data pointer or invoke synchronize_sched()
  * right after this function to avoid use-after-free.
  */
-void cpufreq_set_update_util_data(int cpu, struct update_util_data *data)
+void cpufreq_remove_update_util_hook(int cpu)
 {
-	if (WARN_ON(data && !data->func))
-		return;
-
-	rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), data);
+	rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), NULL);
 }
-EXPORT_SYMBOL_GPL(cpufreq_set_update_util_data);
+EXPORT_SYMBOL_GPL(cpufreq_remove_update_util_hook);
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
new file mode 100644
index 0000000..154ae3a
--- /dev/null
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -0,0 +1,530 @@
+/*
+ * CPUFreq governor based on scheduler-provided CPU utilization data.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <trace/events/power.h>
+
+#include "sched.h"
+
+struct sugov_tunables {
+	struct gov_attr_set attr_set;
+	unsigned int rate_limit_us;
+};
+
+struct sugov_policy {
+	struct cpufreq_policy *policy;
+
+	struct sugov_tunables *tunables;
+	struct list_head tunables_hook;
+
+	raw_spinlock_t update_lock;  /* For shared policies */
+	u64 last_freq_update_time;
+	s64 freq_update_delay_ns;
+	unsigned int next_freq;
+
+	/* The next fields are only needed if fast switch cannot be used. */
+	struct irq_work irq_work;
+	struct work_struct work;
+	struct mutex work_lock;
+	bool work_in_progress;
+
+	bool need_freq_update;
+};
+
+struct sugov_cpu {
+	struct update_util_data update_util;
+	struct sugov_policy *sg_policy;
+
+	/* The fields below are only needed when sharing a policy. */
+	unsigned long util;
+	unsigned long max;
+	u64 last_update;
+};
+
+static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
+
+/************************ Governor internals ***********************/
+
+static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
+{
+	s64 delta_ns;
+
+	if (sg_policy->work_in_progress)
+		return false;
+
+	if (unlikely(sg_policy->need_freq_update)) {
+		sg_policy->need_freq_update = false;
+		/*
+		 * This happens when limits change, so forget the previous
+		 * next_freq value and force an update.
+		 */
+		sg_policy->next_freq = UINT_MAX;
+		return true;
+	}
+
+	delta_ns = time - sg_policy->last_freq_update_time;
+	return delta_ns >= sg_policy->freq_update_delay_ns;
+}
+
+static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
+				unsigned int next_freq)
+{
+	struct cpufreq_policy *policy = sg_policy->policy;
+
+	sg_policy->last_freq_update_time = time;
+
+	if (policy->fast_switch_enabled) {
+		if (sg_policy->next_freq == next_freq) {
+			trace_cpu_frequency(policy->cur, smp_processor_id());
+			return;
+		}
+		sg_policy->next_freq = next_freq;
+		next_freq = cpufreq_driver_fast_switch(policy, next_freq);
+		if (next_freq == CPUFREQ_ENTRY_INVALID)
+			return;
+
+		policy->cur = next_freq;
+		trace_cpu_frequency(next_freq, smp_processor_id());
+	} else if (sg_policy->next_freq != next_freq) {
+		sg_policy->next_freq = next_freq;
+		sg_policy->work_in_progress = true;
+		irq_work_queue(&sg_policy->irq_work);
+	}
+}
+
+/**
+ * get_next_freq - Compute a new frequency for a given cpufreq policy.
+ * @policy: cpufreq policy object to compute the new frequency for.
+ * @util: Current CPU utilization.
+ * @max: CPU capacity.
+ *
+ * If the utilization is frequency-invariant, choose the new frequency to be
+ * proportional to it, that is
+ *
+ * next_freq = C * max_freq * util / max
+ *
+ * Otherwise, approximate the would-be frequency-invariant utilization by
+ * util_raw * (curr_freq / max_freq) which leads to
+ *
+ * next_freq = C * curr_freq * util_raw / max
+ *
+ * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
+ */
+static unsigned int get_next_freq(struct cpufreq_policy *policy,
+				  unsigned long util, unsigned long max)
+{
+	unsigned int freq = arch_scale_freq_invariant() ?
+				policy->cpuinfo.max_freq : policy->cur;
+
+	return (freq + (freq >> 2)) * util / max;
+}
+
+static void sugov_update_single(struct update_util_data *hook, u64 time,
+				unsigned long util, unsigned long max)
+{
+	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
+	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
+	struct cpufreq_policy *policy = sg_policy->policy;
+	unsigned int next_f;
+
+	if (!sugov_should_update_freq(sg_policy, time))
+		return;
+
+	next_f = util == ULONG_MAX ? policy->cpuinfo.max_freq :
+			get_next_freq(policy, util, max);
+	sugov_update_commit(sg_policy, time, next_f);
+}
+
+static unsigned int sugov_next_freq_shared(struct sugov_policy *sg_policy,
+					   unsigned long util, unsigned long max)
+{
+	struct cpufreq_policy *policy = sg_policy->policy;
+	unsigned int max_f = policy->cpuinfo.max_freq;
+	u64 last_freq_update_time = sg_policy->last_freq_update_time;
+	unsigned int j;
+
+	if (util == ULONG_MAX)
+		return max_f;
+
+	for_each_cpu(j, policy->cpus) {
+		struct sugov_cpu *j_sg_cpu;
+		unsigned long j_util, j_max;
+		s64 delta_ns;
+
+		if (j == smp_processor_id())
+			continue;
+
+		j_sg_cpu = &per_cpu(sugov_cpu, j);
+		/*
+		 * If the CPU utilization was last updated before the previous
+		 * frequency update and the time elapsed between the last update
+		 * of the CPU utilization and the last frequency update is long
+		 * enough, don't take the CPU into account as it probably is
+		 * idle now.
+		 */
+		delta_ns = last_freq_update_time - j_sg_cpu->last_update;
+		if (delta_ns > TICK_NSEC)
+			continue;
+
+		j_util = j_sg_cpu->util;
+		if (j_util == ULONG_MAX)
+			return max_f;
+
+		j_max = j_sg_cpu->max;
+		if (j_util * max > j_max * util) {
+			util = j_util;
+			max = j_max;
+		}
+	}
+
+	return get_next_freq(policy, util, max);
+}
+
+static void sugov_update_shared(struct update_util_data *hook, u64 time,
+				unsigned long util, unsigned long max)
+{
+	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
+	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
+	unsigned int next_f;
+
+	raw_spin_lock(&sg_policy->update_lock);
+
+	sg_cpu->util = util;
+	sg_cpu->max = max;
+	sg_cpu->last_update = time;
+
+	if (sugov_should_update_freq(sg_policy, time)) {
+		next_f = sugov_next_freq_shared(sg_policy, util, max);
+		sugov_update_commit(sg_policy, time, next_f);
+	}
+
+	raw_spin_unlock(&sg_policy->update_lock);
+}
+
+static void sugov_work(struct work_struct *work)
+{
+	struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
+
+	mutex_lock(&sg_policy->work_lock);
+	__cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
+				CPUFREQ_RELATION_L);
+	mutex_unlock(&sg_policy->work_lock);
+
+	sg_policy->work_in_progress = false;
+}
+
+static void sugov_irq_work(struct irq_work *irq_work)
+{
+	struct sugov_policy *sg_policy;
+
+	sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
+	schedule_work_on(smp_processor_id(), &sg_policy->work);
+}
+
+/************************** sysfs interface ************************/
+
+static struct sugov_tunables *global_tunables;
+static DEFINE_MUTEX(global_tunables_lock);
+
+static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
+{
+	return container_of(attr_set, struct sugov_tunables, attr_set);
+}
+
+static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
+{
+	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+
+	return sprintf(buf, "%u\n", tunables->rate_limit_us);
+}
+
+static ssize_t rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf,
+				   size_t count)
+{
+	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+	struct sugov_policy *sg_policy;
+	unsigned int rate_limit_us;
+
+	if (kstrtouint(buf, 10, &rate_limit_us))
+		return -EINVAL;
+
+	tunables->rate_limit_us = rate_limit_us;
+
+	list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
+		sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
+
+	return count;
+}
+
+static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
+
+static struct attribute *sugov_attributes[] = {
+	&rate_limit_us.attr,
+	NULL
+};
+
+static struct kobj_type sugov_tunables_ktype = {
+	.default_attrs = sugov_attributes,
+	.sysfs_ops = &governor_sysfs_ops,
+};
+
+/********************** cpufreq governor interface *********************/
+
+static struct cpufreq_governor schedutil_gov;
+
+static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
+{
+	struct sugov_policy *sg_policy;
+
+	sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
+	if (!sg_policy)
+		return NULL;
+
+	sg_policy->policy = policy;
+	init_irq_work(&sg_policy->irq_work, sugov_irq_work);
+	INIT_WORK(&sg_policy->work, sugov_work);
+	mutex_init(&sg_policy->work_lock);
+	raw_spin_lock_init(&sg_policy->update_lock);
+	return sg_policy;
+}
+
+static void sugov_policy_free(struct sugov_policy *sg_policy)
+{
+	mutex_destroy(&sg_policy->work_lock);
+	kfree(sg_policy);
+}
+
+static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
+{
+	struct sugov_tunables *tunables;
+
+	tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
+	if (tunables) {
+		gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
+		if (!have_governor_per_policy())
+			global_tunables = tunables;
+	}
+	return tunables;
+}
+
+static void sugov_tunables_free(struct sugov_tunables *tunables)
+{
+	if (!have_governor_per_policy())
+		global_tunables = NULL;
+
+	kfree(tunables);
+}
+
+static int sugov_init(struct cpufreq_policy *policy)
+{
+	struct sugov_policy *sg_policy;
+	struct sugov_tunables *tunables;
+	unsigned int lat;
+	int ret = 0;
+
+	/* State should be equivalent to EXIT */
+	if (policy->governor_data)
+		return -EBUSY;
+
+	sg_policy = sugov_policy_alloc(policy);
+	if (!sg_policy)
+		return -ENOMEM;
+
+	mutex_lock(&global_tunables_lock);
+
+	if (global_tunables) {
+		if (WARN_ON(have_governor_per_policy())) {
+			ret = -EINVAL;
+			goto free_sg_policy;
+		}
+		policy->governor_data = sg_policy;
+		sg_policy->tunables = global_tunables;
+
+		gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
+		goto out;
+	}
+
+	tunables = sugov_tunables_alloc(sg_policy);
+	if (!tunables) {
+		ret = -ENOMEM;
+		goto free_sg_policy;
+	}
+
+	tunables->rate_limit_us = LATENCY_MULTIPLIER;
+	lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
+	if (lat)
+		tunables->rate_limit_us *= lat;
+
+	policy->governor_data = sg_policy;
+	sg_policy->tunables = tunables;
+
+	ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
+				   get_governor_parent_kobj(policy), "%s",
+				   schedutil_gov.name);
+	if (ret)
+		goto fail;
+
+ out:
+	mutex_unlock(&global_tunables_lock);
+
+	cpufreq_enable_fast_switch(policy);
+	return 0;
+
+ fail:
+	policy->governor_data = NULL;
+	sugov_tunables_free(tunables);
+
+ free_sg_policy:
+	mutex_unlock(&global_tunables_lock);
+
+	sugov_policy_free(sg_policy);
+	pr_err("cpufreq: schedutil governor initialization failed (error %d)\n", ret);
+	return ret;
+}
+
+static int sugov_exit(struct cpufreq_policy *policy)
+{
+	struct sugov_policy *sg_policy = policy->governor_data;
+	struct sugov_tunables *tunables = sg_policy->tunables;
+	unsigned int count;
+
+	cpufreq_disable_fast_switch(policy);
+
+	mutex_lock(&global_tunables_lock);
+
+	count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
+	policy->governor_data = NULL;
+	if (!count)
+		sugov_tunables_free(tunables);
+
+	mutex_unlock(&global_tunables_lock);
+
+	sugov_policy_free(sg_policy);
+	return 0;
+}
+
+static int sugov_start(struct cpufreq_policy *policy)
+{
+	struct sugov_policy *sg_policy = policy->governor_data;
+	unsigned int cpu;
+
+	sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
+	sg_policy->last_freq_update_time = 0;
+	sg_policy->next_freq = UINT_MAX;
+	sg_policy->work_in_progress = false;
+	sg_policy->need_freq_update = false;
+
+	for_each_cpu(cpu, policy->cpus) {
+		struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
+
+		sg_cpu->sg_policy = sg_policy;
+		if (policy_is_shared(policy)) {
+			sg_cpu->util = ULONG_MAX;
+			sg_cpu->max = 0;
+			sg_cpu->last_update = 0;
+			cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
+						     sugov_update_shared);
+		} else {
+			cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
+						     sugov_update_single);
+		}
+	}
+	return 0;
+}
+
+static int sugov_stop(struct cpufreq_policy *policy)
+{
+	struct sugov_policy *sg_policy = policy->governor_data;
+	unsigned int cpu;
+
+	for_each_cpu(cpu, policy->cpus)
+		cpufreq_remove_update_util_hook(cpu);
+
+	synchronize_sched();
+
+	irq_work_sync(&sg_policy->irq_work);
+	cancel_work_sync(&sg_policy->work);
+	return 0;
+}
+
+static int sugov_limits(struct cpufreq_policy *policy)
+{
+	struct sugov_policy *sg_policy = policy->governor_data;
+
+	if (!policy->fast_switch_enabled) {
+		mutex_lock(&sg_policy->work_lock);
+
+		if (policy->max < policy->cur)
+			__cpufreq_driver_target(policy, policy->max,
+						CPUFREQ_RELATION_H);
+		else if (policy->min > policy->cur)
+			__cpufreq_driver_target(policy, policy->min,
+						CPUFREQ_RELATION_L);
+
+		mutex_unlock(&sg_policy->work_lock);
+	}
+
+	sg_policy->need_freq_update = true;
+	return 0;
+}
+
+int sugov_governor(struct cpufreq_policy *policy, unsigned int event)
+{
+	if (event == CPUFREQ_GOV_POLICY_INIT) {
+		return sugov_init(policy);
+	} else if (policy->governor_data) {
+		switch (event) {
+		case CPUFREQ_GOV_POLICY_EXIT:
+			return sugov_exit(policy);
+		case CPUFREQ_GOV_START:
+			return sugov_start(policy);
+		case CPUFREQ_GOV_STOP:
+			return sugov_stop(policy);
+		case CPUFREQ_GOV_LIMITS:
+			return sugov_limits(policy);
+		}
+	}
+	return -EINVAL;
+}
+
+static struct cpufreq_governor schedutil_gov = {
+	.name = "schedutil",
+	.governor = sugov_governor,
+	.owner = THIS_MODULE,
+};
+
+static int __init sugov_module_init(void)
+{
+	return cpufreq_register_governor(&schedutil_gov);
+}
+
+static void __exit sugov_module_exit(void)
+{
+	cpufreq_unregister_governor(&schedutil_gov);
+}
+
+MODULE_AUTHOR("Rafael J. Wysocki <rafael.j.wysocki@intel.com>");
+MODULE_DESCRIPTION("Utilization-based CPU frequency selection");
+MODULE_LICENSE("GPL");
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+	return &schedutil_gov;
+}
+
+fs_initcall(sugov_module_init);
+#else
+module_init(sugov_module_init);
+#endif
+module_exit(sugov_module_exit);
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 981fcd7..11e9705 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -103,11 +103,11 @@
 		if (skip)
 			continue;
 
-		if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
+		if (cpumask_any_and(tsk_cpus_allowed(p), vec->mask) >= nr_cpu_ids)
 			continue;
 
 		if (lowest_mask) {
-			cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
+			cpumask_and(lowest_mask, tsk_cpus_allowed(p), vec->mask);
 
 			/*
 			 * We have to ensure that we have at least one bit
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index affd97e..fcb7f02 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -134,7 +134,7 @@
 {
 	struct task_struct *p = dl_task_of(dl_se);
 
-	if (p->nr_cpus_allowed > 1)
+	if (tsk_nr_cpus_allowed(p) > 1)
 		dl_rq->dl_nr_migratory++;
 
 	update_dl_migration(dl_rq);
@@ -144,7 +144,7 @@
 {
 	struct task_struct *p = dl_task_of(dl_se);
 
-	if (p->nr_cpus_allowed > 1)
+	if (tsk_nr_cpus_allowed(p) > 1)
 		dl_rq->dl_nr_migratory--;
 
 	update_dl_migration(dl_rq);
@@ -591,10 +591,10 @@
 						     struct sched_dl_entity,
 						     dl_timer);
 	struct task_struct *p = dl_task_of(dl_se);
-	unsigned long flags;
+	struct rq_flags rf;
 	struct rq *rq;
 
-	rq = task_rq_lock(p, &flags);
+	rq = task_rq_lock(p, &rf);
 
 	/*
 	 * The task might have changed its scheduling policy to something
@@ -670,14 +670,14 @@
 		 * Nothing relies on rq->lock after this, so its safe to drop
 		 * rq->lock.
 		 */
-		lockdep_unpin_lock(&rq->lock);
+		lockdep_unpin_lock(&rq->lock, rf.cookie);
 		push_dl_task(rq);
-		lockdep_pin_lock(&rq->lock);
+		lockdep_repin_lock(&rq->lock, rf.cookie);
 	}
 #endif
 
 unlock:
-	task_rq_unlock(rq, p, &flags);
+	task_rq_unlock(rq, p, &rf);
 
 	/*
 	 * This can free the task_struct, including this hrtimer, do not touch
@@ -717,10 +717,6 @@
 	if (!dl_task(curr) || !on_dl_rq(dl_se))
 		return;
 
-	/* Kick cpufreq (see the comment in linux/cpufreq.h). */
-	if (cpu_of(rq) == smp_processor_id())
-		cpufreq_trigger_update(rq_clock(rq));
-
 	/*
 	 * Consumed budget is computed considering the time as
 	 * observed by schedulable tasks (excluding time spent
@@ -736,6 +732,10 @@
 		return;
 	}
 
+	/* kick cpufreq (see the comment in linux/cpufreq.h). */
+	if (cpu_of(rq) == smp_processor_id())
+		cpufreq_trigger_update(rq_clock(rq));
+
 	schedstat_set(curr->se.statistics.exec_max,
 		      max(curr->se.statistics.exec_max, delta_exec));
 
@@ -966,7 +966,7 @@
 
 	enqueue_dl_entity(&p->dl, pi_se, flags);
 
-	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
+	if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
 		enqueue_pushable_dl_task(rq, p);
 }
 
@@ -1040,9 +1040,9 @@
 	 * try to make it stay here, it might be important.
 	 */
 	if (unlikely(dl_task(curr)) &&
-	    (curr->nr_cpus_allowed < 2 ||
+	    (tsk_nr_cpus_allowed(curr) < 2 ||
 	     !dl_entity_preempt(&p->dl, &curr->dl)) &&
-	    (p->nr_cpus_allowed > 1)) {
+	    (tsk_nr_cpus_allowed(p) > 1)) {
 		int target = find_later_rq(p);
 
 		if (target != -1 &&
@@ -1063,7 +1063,7 @@
 	 * Current can't be migrated, useless to reschedule,
 	 * let's hope p can move out.
 	 */
-	if (rq->curr->nr_cpus_allowed == 1 ||
+	if (tsk_nr_cpus_allowed(rq->curr) == 1 ||
 	    cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
 		return;
 
@@ -1071,7 +1071,7 @@
 	 * p is migratable, so let's not schedule it and
 	 * see if it is pushed or pulled somewhere else.
 	 */
-	if (p->nr_cpus_allowed != 1 &&
+	if (tsk_nr_cpus_allowed(p) != 1 &&
 	    cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
 		return;
 
@@ -1125,7 +1125,8 @@
 	return rb_entry(left, struct sched_dl_entity, rb_node);
 }
 
-struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
+struct task_struct *
+pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
 {
 	struct sched_dl_entity *dl_se;
 	struct task_struct *p;
@@ -1140,9 +1141,9 @@
 		 * disabled avoiding further scheduler activity on it and we're
 		 * being very careful to re-start the picking loop.
 		 */
-		lockdep_unpin_lock(&rq->lock);
+		lockdep_unpin_lock(&rq->lock, cookie);
 		pull_dl_task(rq);
-		lockdep_pin_lock(&rq->lock);
+		lockdep_repin_lock(&rq->lock, cookie);
 		/*
 		 * pull_rt_task() can drop (and re-acquire) rq->lock; this
 		 * means a stop task can slip in, in which case we need to
@@ -1185,7 +1186,7 @@
 {
 	update_curr_dl(rq);
 
-	if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
+	if (on_dl_rq(&p->dl) && tsk_nr_cpus_allowed(p) > 1)
 		enqueue_pushable_dl_task(rq, p);
 }
 
@@ -1286,7 +1287,7 @@
 	if (unlikely(!later_mask))
 		return -1;
 
-	if (task->nr_cpus_allowed == 1)
+	if (tsk_nr_cpus_allowed(task) == 1)
 		return -1;
 
 	/*
@@ -1392,8 +1393,9 @@
 		if (double_lock_balance(rq, later_rq)) {
 			if (unlikely(task_rq(task) != rq ||
 				     !cpumask_test_cpu(later_rq->cpu,
-				                       &task->cpus_allowed) ||
+						       tsk_cpus_allowed(task)) ||
 				     task_running(rq, task) ||
+				     !dl_task(task) ||
 				     !task_on_rq_queued(task))) {
 				double_unlock_balance(rq, later_rq);
 				later_rq = NULL;
@@ -1431,7 +1433,7 @@
 
 	BUG_ON(rq->cpu != task_cpu(p));
 	BUG_ON(task_current(rq, p));
-	BUG_ON(p->nr_cpus_allowed <= 1);
+	BUG_ON(tsk_nr_cpus_allowed(p) <= 1);
 
 	BUG_ON(!task_on_rq_queued(p));
 	BUG_ON(!dl_task(p));
@@ -1470,7 +1472,7 @@
 	 */
 	if (dl_task(rq->curr) &&
 	    dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
-	    rq->curr->nr_cpus_allowed > 1) {
+	    tsk_nr_cpus_allowed(rq->curr) > 1) {
 		resched_curr(rq);
 		return 0;
 	}
@@ -1617,9 +1619,9 @@
 {
 	if (!task_running(rq, p) &&
 	    !test_tsk_need_resched(rq->curr) &&
-	    p->nr_cpus_allowed > 1 &&
+	    tsk_nr_cpus_allowed(p) > 1 &&
 	    dl_task(rq->curr) &&
-	    (rq->curr->nr_cpus_allowed < 2 ||
+	    (tsk_nr_cpus_allowed(rq->curr) < 2 ||
 	     !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
 		push_dl_tasks(rq);
 	}
@@ -1723,7 +1725,7 @@
 
 	if (task_on_rq_queued(p) && rq->curr != p) {
 #ifdef CONFIG_SMP
-		if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
+		if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded)
 			queue_push_tasks(rq);
 #else
 		if (dl_task(rq->curr))
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 4fbc3bd..cf905f6 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -626,15 +626,16 @@
 #undef P
 #undef PN
 
-#ifdef CONFIG_SCHEDSTATS
-#define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, rq->n);
-#define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
-
 #ifdef CONFIG_SMP
+#define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
 	P64(avg_idle);
 	P64(max_idle_balance_cost);
+#undef P64
 #endif
 
+#ifdef CONFIG_SCHEDSTATS
+#define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, rq->n);
+
 	if (schedstat_enabled()) {
 		P(yld_count);
 		P(sched_count);
@@ -644,7 +645,6 @@
 	}
 
 #undef P
-#undef P64
 #endif
 	spin_lock_irqsave(&sched_debug_lock, flags);
 	print_cfs_stats(m, cpu);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0fe30e6..218f8e8 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -204,7 +204,7 @@
  *   OR
  * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
  *
- * Either weight := NICE_0_LOAD and lw \e prio_to_wmult[], in which case
+ * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case
  * we're guaranteed shift stays positive because inv_weight is guaranteed to
  * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
  *
@@ -682,17 +682,68 @@
 	sa->period_contrib = 1023;
 	sa->load_avg = scale_load_down(se->load.weight);
 	sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
-	sa->util_avg = scale_load_down(SCHED_LOAD_SCALE);
-	sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
+	/*
+	 * At this point, util_avg won't be used in select_task_rq_fair anyway
+	 */
+	sa->util_avg = 0;
+	sa->util_sum = 0;
 	/* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
 }
 
+/*
+ * With new tasks being created, their initial util_avgs are extrapolated
+ * based on the cfs_rq's current util_avg:
+ *
+ *   util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
+ *
+ * However, in many cases, the above util_avg does not give a desired
+ * value. Moreover, the sum of the util_avgs may be divergent, such
+ * as when the series is a harmonic series.
+ *
+ * To solve this problem, we also cap the util_avg of successive tasks to
+ * only 1/2 of the left utilization budget:
+ *
+ *   util_avg_cap = (1024 - cfs_rq->avg.util_avg) / 2^n
+ *
+ * where n denotes the nth task.
+ *
+ * For example, a simplest series from the beginning would be like:
+ *
+ *  task  util_avg: 512, 256, 128,  64,  32,   16,    8, ...
+ * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
+ *
+ * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
+ * if util_avg > util_avg_cap.
+ */
+void post_init_entity_util_avg(struct sched_entity *se)
+{
+	struct cfs_rq *cfs_rq = cfs_rq_of(se);
+	struct sched_avg *sa = &se->avg;
+	long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2;
+
+	if (cap > 0) {
+		if (cfs_rq->avg.util_avg != 0) {
+			sa->util_avg  = cfs_rq->avg.util_avg * se->load.weight;
+			sa->util_avg /= (cfs_rq->avg.load_avg + 1);
+
+			if (sa->util_avg > cap)
+				sa->util_avg = cap;
+		} else {
+			sa->util_avg = cap;
+		}
+		sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
+	}
+}
+
 static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
 #else
 void init_entity_runnable_average(struct sched_entity *se)
 {
 }
+void post_init_entity_util_avg(struct sched_entity *se)
+{
+}
 #endif
 
 /*
@@ -2437,10 +2488,12 @@
 	update_load_sub(&cfs_rq->load, se->load.weight);
 	if (!parent_entity(se))
 		update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
+#ifdef CONFIG_SMP
 	if (entity_is_task(se)) {
 		account_numa_dequeue(rq_of(cfs_rq), task_of(se));
 		list_del_init(&se->group_node);
 	}
+#endif
 	cfs_rq->nr_running--;
 }
 
@@ -2550,6 +2603,16 @@
 };
 
 /*
+ * Precomputed \Sum y^k { 1<=k<=n, where n%32=0). Values are rolled down to
+ * lower integers. See Documentation/scheduler/sched-avg.txt how these
+ * were generated:
+ */
+static const u32 __accumulated_sum_N32[] = {
+	    0, 23371, 35056, 40899, 43820, 45281,
+	46011, 46376, 46559, 46650, 46696, 46719,
+};
+
+/*
  * Approximate:
  *   val * y^n,    where y^32 ~= 0.5 (~1 scheduling period)
  */
@@ -2597,22 +2660,13 @@
 	else if (unlikely(n >= LOAD_AVG_MAX_N))
 		return LOAD_AVG_MAX;
 
-	/* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
-	do {
-		contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
-		contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
-
-		n -= LOAD_AVG_PERIOD;
-	} while (n > LOAD_AVG_PERIOD);
-
+	/* Since n < LOAD_AVG_MAX_N, n/LOAD_AVG_PERIOD < 11 */
+	contrib = __accumulated_sum_N32[n/LOAD_AVG_PERIOD];
+	n %= LOAD_AVG_PERIOD;
 	contrib = decay_load(contrib, n);
 	return contrib + runnable_avg_yN_sum[n];
 }
 
-#if (SCHED_LOAD_SHIFT - SCHED_LOAD_RESOLUTION) != 10 || SCHED_CAPACITY_SHIFT != 10
-#error "load tracking assumes 2^10 as unit"
-#endif
-
 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
 
 /*
@@ -2821,55 +2875,11 @@
 
 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
 
-/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
-static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
 {
-	struct sched_avg *sa = &cfs_rq->avg;
-	int decayed, removed = 0;
-
-	if (atomic_long_read(&cfs_rq->removed_load_avg)) {
-		s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
-		sa->load_avg = max_t(long, sa->load_avg - r, 0);
-		sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
-		removed = 1;
-	}
-
-	if (atomic_long_read(&cfs_rq->removed_util_avg)) {
-		long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
-		sa->util_avg = max_t(long, sa->util_avg - r, 0);
-		sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0);
-	}
-
-	decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
-		scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, cfs_rq);
-
-#ifndef CONFIG_64BIT
-	smp_wmb();
-	cfs_rq->load_last_update_time_copy = sa->last_update_time;
-#endif
-
-	return decayed || removed;
-}
-
-/* Update task and its cfs_rq load average */
-static inline void update_load_avg(struct sched_entity *se, int update_tg)
-{
-	struct cfs_rq *cfs_rq = cfs_rq_of(se);
-	u64 now = cfs_rq_clock_task(cfs_rq);
 	struct rq *rq = rq_of(cfs_rq);
 	int cpu = cpu_of(rq);
 
-	/*
-	 * Track task load average for carrying it to new CPU after migrated, and
-	 * track group sched_entity load average for task_h_load calc in migration
-	 */
-	__update_load_avg(now, cpu, &se->avg,
-			  se->on_rq * scale_load_down(se->load.weight),
-			  cfs_rq->curr == se, NULL);
-
-	if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg)
-		update_tg_load_avg(cfs_rq, 0);
-
 	if (cpu == smp_processor_id() && &rq->cfs == cfs_rq) {
 		unsigned long max = rq->cpu_capacity_orig;
 
@@ -2894,6 +2904,61 @@
 	}
 }
 
+/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
+static inline int
+update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
+{
+	struct sched_avg *sa = &cfs_rq->avg;
+	int decayed, removed_load = 0, removed_util = 0;
+
+	if (atomic_long_read(&cfs_rq->removed_load_avg)) {
+		s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
+		sa->load_avg = max_t(long, sa->load_avg - r, 0);
+		sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
+		removed_load = 1;
+	}
+
+	if (atomic_long_read(&cfs_rq->removed_util_avg)) {
+		long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
+		sa->util_avg = max_t(long, sa->util_avg - r, 0);
+		sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0);
+		removed_util = 1;
+	}
+
+	decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
+		scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, cfs_rq);
+
+#ifndef CONFIG_64BIT
+	smp_wmb();
+	cfs_rq->load_last_update_time_copy = sa->last_update_time;
+#endif
+
+	if (update_freq && (decayed || removed_util))
+		cfs_rq_util_change(cfs_rq);
+
+	return decayed || removed_load;
+}
+
+/* Update task and its cfs_rq load average */
+static inline void update_load_avg(struct sched_entity *se, int update_tg)
+{
+	struct cfs_rq *cfs_rq = cfs_rq_of(se);
+	u64 now = cfs_rq_clock_task(cfs_rq);
+	struct rq *rq = rq_of(cfs_rq);
+	int cpu = cpu_of(rq);
+
+	/*
+	 * Track task load average for carrying it to new CPU after migrated, and
+	 * track group sched_entity load average for task_h_load calc in migration
+	 */
+	__update_load_avg(now, cpu, &se->avg,
+			  se->on_rq * scale_load_down(se->load.weight),
+			  cfs_rq->curr == se, NULL);
+
+	if (update_cfs_rq_load_avg(now, cfs_rq, true) && update_tg)
+		update_tg_load_avg(cfs_rq, 0);
+}
+
 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
 	if (!sched_feat(ATTACH_AGE_LOAD))
@@ -2919,6 +2984,8 @@
 	cfs_rq->avg.load_sum += se->avg.load_sum;
 	cfs_rq->avg.util_avg += se->avg.util_avg;
 	cfs_rq->avg.util_sum += se->avg.util_sum;
+
+	cfs_rq_util_change(cfs_rq);
 }
 
 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -2931,6 +2998,8 @@
 	cfs_rq->avg.load_sum = max_t(s64,  cfs_rq->avg.load_sum - se->avg.load_sum, 0);
 	cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
 	cfs_rq->avg.util_sum = max_t(s32,  cfs_rq->avg.util_sum - se->avg.util_sum, 0);
+
+	cfs_rq_util_change(cfs_rq);
 }
 
 /* Add the load generated by se into cfs_rq's load average */
@@ -2948,7 +3017,7 @@
 			cfs_rq->curr == se, NULL);
 	}
 
-	decayed = update_cfs_rq_load_avg(now, cfs_rq);
+	decayed = update_cfs_rq_load_avg(now, cfs_rq, !migrated);
 
 	cfs_rq->runnable_load_avg += sa->load_avg;
 	cfs_rq->runnable_load_sum += sa->load_sum;
@@ -3030,7 +3099,14 @@
 
 #else /* CONFIG_SMP */
 
-static inline void update_load_avg(struct sched_entity *se, int update_tg) {}
+static inline void update_load_avg(struct sched_entity *se, int not_used)
+{
+	struct cfs_rq *cfs_rq = cfs_rq_of(se);
+	struct rq *rq = rq_of(cfs_rq);
+
+	cpufreq_trigger_update(rq_clock(rq));
+}
+
 static inline void
 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
 static inline void
@@ -3178,10 +3254,41 @@
 #endif
 }
 
+
+/*
+ * MIGRATION
+ *
+ *	dequeue
+ *	  update_curr()
+ *	    update_min_vruntime()
+ *	  vruntime -= min_vruntime
+ *
+ *	enqueue
+ *	  update_curr()
+ *	    update_min_vruntime()
+ *	  vruntime += min_vruntime
+ *
+ * this way the vruntime transition between RQs is done when both
+ * min_vruntime are up-to-date.
+ *
+ * WAKEUP (remote)
+ *
+ *	->migrate_task_rq_fair() (p->state == TASK_WAKING)
+ *	  vruntime -= min_vruntime
+ *
+ *	enqueue
+ *	  update_curr()
+ *	    update_min_vruntime()
+ *	  vruntime += min_vruntime
+ *
+ * this way we don't have the most up-to-date min_vruntime on the originating
+ * CPU and an up-to-date min_vruntime on the destination CPU.
+ */
+
 static void
 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 {
-	bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING);
+	bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
 	bool curr = cfs_rq->curr == se;
 
 	/*
@@ -3195,7 +3302,9 @@
 
 	/*
 	 * Otherwise, renormalise after, such that we're placed at the current
-	 * moment in time, instead of some random moment in the past.
+	 * moment in time, instead of some random moment in the past. Being
+	 * placed in the past could significantly boost this task to the
+	 * fairness detriment of existing tasks.
 	 */
 	if (renorm && !curr)
 		se->vruntime += cfs_rq->min_vruntime;
@@ -4423,7 +4532,7 @@
 }
 
 #ifdef CONFIG_SMP
-
+#ifdef CONFIG_NO_HZ_COMMON
 /*
  * per rq 'load' arrray crap; XXX kill this.
  */
@@ -4489,13 +4598,13 @@
 	}
 	return load;
 }
+#endif /* CONFIG_NO_HZ_COMMON */
 
 /**
- * __update_cpu_load - update the rq->cpu_load[] statistics
+ * __cpu_load_update - update the rq->cpu_load[] statistics
  * @this_rq: The rq to update statistics for
  * @this_load: The current load
  * @pending_updates: The number of missed updates
- * @active: !0 for NOHZ_FULL
  *
  * Update rq->cpu_load[] statistics. This function is usually called every
  * scheduler tick (TICK_NSEC).
@@ -4524,12 +4633,12 @@
  *   load[i]_n = (1 - 1/2^i)^n * load[i]_0
  *
  * see decay_load_misses(). For NOHZ_FULL we get to subtract and add the extra
- * term. See the @active paramter.
+ * term.
  */
-static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
-			      unsigned long pending_updates, int active)
+static void cpu_load_update(struct rq *this_rq, unsigned long this_load,
+			    unsigned long pending_updates)
 {
-	unsigned long tickless_load = active ? this_rq->cpu_load[0] : 0;
+	unsigned long __maybe_unused tickless_load = this_rq->cpu_load[0];
 	int i, scale;
 
 	this_rq->nr_load_updates++;
@@ -4542,6 +4651,7 @@
 		/* scale is effectively 1 << i now, and >> i divides by scale */
 
 		old_load = this_rq->cpu_load[i];
+#ifdef CONFIG_NO_HZ_COMMON
 		old_load = decay_load_missed(old_load, pending_updates - 1, i);
 		if (tickless_load) {
 			old_load -= decay_load_missed(tickless_load, pending_updates - 1, i);
@@ -4552,6 +4662,7 @@
 			 */
 			old_load += tickless_load;
 		}
+#endif
 		new_load = this_load;
 		/*
 		 * Round up the averaging division if load is increasing. This
@@ -4574,10 +4685,23 @@
 }
 
 #ifdef CONFIG_NO_HZ_COMMON
-static void __update_cpu_load_nohz(struct rq *this_rq,
-				   unsigned long curr_jiffies,
-				   unsigned long load,
-				   int active)
+/*
+ * There is no sane way to deal with nohz on smp when using jiffies because the
+ * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
+ * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
+ *
+ * Therefore we need to avoid the delta approach from the regular tick when
+ * possible since that would seriously skew the load calculation. This is why we
+ * use cpu_load_update_periodic() for CPUs out of nohz. However we'll rely on
+ * jiffies deltas for updates happening while in nohz mode (idle ticks, idle
+ * loop exit, nohz_idle_balance, nohz full exit...)
+ *
+ * This means we might still be one tick off for nohz periods.
+ */
+
+static void cpu_load_update_nohz(struct rq *this_rq,
+				 unsigned long curr_jiffies,
+				 unsigned long load)
 {
 	unsigned long pending_updates;
 
@@ -4589,28 +4713,15 @@
 		 * In the NOHZ_FULL case, we were non-idle, we should consider
 		 * its weighted load.
 		 */
-		__update_cpu_load(this_rq, load, pending_updates, active);
+		cpu_load_update(this_rq, load, pending_updates);
 	}
 }
 
 /*
- * There is no sane way to deal with nohz on smp when using jiffies because the
- * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
- * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
- *
- * Therefore we cannot use the delta approach from the regular tick since that
- * would seriously skew the load calculation. However we'll make do for those
- * updates happening while idle (nohz_idle_balance) or coming out of idle
- * (tick_nohz_idle_exit).
- *
- * This means we might still be one tick off for nohz periods.
- */
-
-/*
  * Called from nohz_idle_balance() to update the load ratings before doing the
  * idle balance.
  */
-static void update_cpu_load_idle(struct rq *this_rq)
+static void cpu_load_update_idle(struct rq *this_rq)
 {
 	/*
 	 * bail if there's load or we're actually up-to-date.
@@ -4618,38 +4729,71 @@
 	if (weighted_cpuload(cpu_of(this_rq)))
 		return;
 
-	__update_cpu_load_nohz(this_rq, READ_ONCE(jiffies), 0, 0);
+	cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), 0);
 }
 
 /*
- * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
+ * Record CPU load on nohz entry so we know the tickless load to account
+ * on nohz exit. cpu_load[0] happens then to be updated more frequently
+ * than other cpu_load[idx] but it should be fine as cpu_load readers
+ * shouldn't rely into synchronized cpu_load[*] updates.
  */
-void update_cpu_load_nohz(int active)
+void cpu_load_update_nohz_start(void)
 {
 	struct rq *this_rq = this_rq();
+
+	/*
+	 * This is all lockless but should be fine. If weighted_cpuload changes
+	 * concurrently we'll exit nohz. And cpu_load write can race with
+	 * cpu_load_update_idle() but both updater would be writing the same.
+	 */
+	this_rq->cpu_load[0] = weighted_cpuload(cpu_of(this_rq));
+}
+
+/*
+ * Account the tickless load in the end of a nohz frame.
+ */
+void cpu_load_update_nohz_stop(void)
+{
 	unsigned long curr_jiffies = READ_ONCE(jiffies);
-	unsigned long load = active ? weighted_cpuload(cpu_of(this_rq)) : 0;
+	struct rq *this_rq = this_rq();
+	unsigned long load;
 
 	if (curr_jiffies == this_rq->last_load_update_tick)
 		return;
 
+	load = weighted_cpuload(cpu_of(this_rq));
 	raw_spin_lock(&this_rq->lock);
-	__update_cpu_load_nohz(this_rq, curr_jiffies, load, active);
+	update_rq_clock(this_rq);
+	cpu_load_update_nohz(this_rq, curr_jiffies, load);
 	raw_spin_unlock(&this_rq->lock);
 }
-#endif /* CONFIG_NO_HZ */
+#else /* !CONFIG_NO_HZ_COMMON */
+static inline void cpu_load_update_nohz(struct rq *this_rq,
+					unsigned long curr_jiffies,
+					unsigned long load) { }
+#endif /* CONFIG_NO_HZ_COMMON */
+
+static void cpu_load_update_periodic(struct rq *this_rq, unsigned long load)
+{
+#ifdef CONFIG_NO_HZ_COMMON
+	/* See the mess around cpu_load_update_nohz(). */
+	this_rq->last_load_update_tick = READ_ONCE(jiffies);
+#endif
+	cpu_load_update(this_rq, load, 1);
+}
 
 /*
  * Called from scheduler_tick()
  */
-void update_cpu_load_active(struct rq *this_rq)
+void cpu_load_update_active(struct rq *this_rq)
 {
 	unsigned long load = weighted_cpuload(cpu_of(this_rq));
-	/*
-	 * See the mess around update_cpu_load_idle() / update_cpu_load_nohz().
-	 */
-	this_rq->last_load_update_tick = jiffies;
-	__update_cpu_load(this_rq, load, 1, 1);
+
+	if (tick_nohz_tick_stopped())
+		cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), load);
+	else
+		cpu_load_update_periodic(this_rq, load);
 }
 
 /*
@@ -4707,46 +4851,6 @@
 	return 0;
 }
 
-static void record_wakee(struct task_struct *p)
-{
-	/*
-	 * Rough decay (wiping) for cost saving, don't worry
-	 * about the boundary, really active task won't care
-	 * about the loss.
-	 */
-	if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
-		current->wakee_flips >>= 1;
-		current->wakee_flip_decay_ts = jiffies;
-	}
-
-	if (current->last_wakee != p) {
-		current->last_wakee = p;
-		current->wakee_flips++;
-	}
-}
-
-static void task_waking_fair(struct task_struct *p)
-{
-	struct sched_entity *se = &p->se;
-	struct cfs_rq *cfs_rq = cfs_rq_of(se);
-	u64 min_vruntime;
-
-#ifndef CONFIG_64BIT
-	u64 min_vruntime_copy;
-
-	do {
-		min_vruntime_copy = cfs_rq->min_vruntime_copy;
-		smp_rmb();
-		min_vruntime = cfs_rq->min_vruntime;
-	} while (min_vruntime != min_vruntime_copy);
-#else
-	min_vruntime = cfs_rq->min_vruntime;
-#endif
-
-	se->vruntime -= min_vruntime;
-	record_wakee(p);
-}
-
 #ifdef CONFIG_FAIR_GROUP_SCHED
 /*
  * effective_load() calculates the load change as seen from the root_task_group
@@ -4862,17 +4966,39 @@
 
 #endif
 
+static void record_wakee(struct task_struct *p)
+{
+	/*
+	 * Only decay a single time; tasks that have less then 1 wakeup per
+	 * jiffy will not have built up many flips.
+	 */
+	if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
+		current->wakee_flips >>= 1;
+		current->wakee_flip_decay_ts = jiffies;
+	}
+
+	if (current->last_wakee != p) {
+		current->last_wakee = p;
+		current->wakee_flips++;
+	}
+}
+
 /*
  * Detect M:N waker/wakee relationships via a switching-frequency heuristic.
+ *
  * A waker of many should wake a different task than the one last awakened
- * at a frequency roughly N times higher than one of its wakees.  In order
- * to determine whether we should let the load spread vs consolodating to
- * shared cache, we look for a minimum 'flip' frequency of llc_size in one
- * partner, and a factor of lls_size higher frequency in the other.  With
- * both conditions met, we can be relatively sure that the relationship is
- * non-monogamous, with partner count exceeding socket size.  Waker/wakee
- * being client/server, worker/dispatcher, interrupt source or whatever is
- * irrelevant, spread criteria is apparent partner count exceeds socket size.
+ * at a frequency roughly N times higher than one of its wakees.
+ *
+ * In order to determine whether we should let the load spread vs consolidating
+ * to shared cache, we look for a minimum 'flip' frequency of llc_size in one
+ * partner, and a factor of lls_size higher frequency in the other.
+ *
+ * With both conditions met, we can be relatively sure that the relationship is
+ * non-monogamous, with partner count exceeding socket size.
+ *
+ * Waker/wakee being client/server, worker/dispatcher, interrupt source or
+ * whatever is irrelevant, spread criteria is apparent partner count exceeds
+ * socket size.
  */
 static int wake_wide(struct task_struct *p)
 {
@@ -5177,8 +5303,10 @@
 	int want_affine = 0;
 	int sync = wake_flags & WF_SYNC;
 
-	if (sd_flag & SD_BALANCE_WAKE)
+	if (sd_flag & SD_BALANCE_WAKE) {
+		record_wakee(p);
 		want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
+	}
 
 	rcu_read_lock();
 	for_each_domain(cpu, tmp) {
@@ -5258,6 +5386,32 @@
 static void migrate_task_rq_fair(struct task_struct *p)
 {
 	/*
+	 * As blocked tasks retain absolute vruntime the migration needs to
+	 * deal with this by subtracting the old and adding the new
+	 * min_vruntime -- the latter is done by enqueue_entity() when placing
+	 * the task on the new runqueue.
+	 */
+	if (p->state == TASK_WAKING) {
+		struct sched_entity *se = &p->se;
+		struct cfs_rq *cfs_rq = cfs_rq_of(se);
+		u64 min_vruntime;
+
+#ifndef CONFIG_64BIT
+		u64 min_vruntime_copy;
+
+		do {
+			min_vruntime_copy = cfs_rq->min_vruntime_copy;
+			smp_rmb();
+			min_vruntime = cfs_rq->min_vruntime;
+		} while (min_vruntime != min_vruntime_copy);
+#else
+		min_vruntime = cfs_rq->min_vruntime;
+#endif
+
+		se->vruntime -= min_vruntime;
+	}
+
+	/*
 	 * We are supposed to update the task to "current" time, then its up to date
 	 * and ready to go to new CPU/cfs_rq. But we have difficulty in getting
 	 * what current time is, so simply throw away the out-of-date time. This
@@ -5440,7 +5594,7 @@
 }
 
 static struct task_struct *
-pick_next_task_fair(struct rq *rq, struct task_struct *prev)
+pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
 {
 	struct cfs_rq *cfs_rq = &rq->cfs;
 	struct sched_entity *se;
@@ -5553,9 +5707,9 @@
 	 * further scheduler activity on it and we're being very careful to
 	 * re-start the picking loop.
 	 */
-	lockdep_unpin_lock(&rq->lock);
+	lockdep_unpin_lock(&rq->lock, cookie);
 	new_tasks = idle_balance(rq);
-	lockdep_pin_lock(&rq->lock);
+	lockdep_repin_lock(&rq->lock, cookie);
 	/*
 	 * Because idle_balance() releases (and re-acquires) rq->lock, it is
 	 * possible for any higher priority task to appear. In that case we
@@ -5654,7 +5808,7 @@
  *   W_i,0 = \Sum_j w_i,j                                             (2)
  *
  * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
- * is derived from the nice value as per prio_to_weight[].
+ * is derived from the nice value as per sched_prio_to_weight[].
  *
  * The weight average is an exponential decay average of the instantaneous
  * weight:
@@ -6156,7 +6310,7 @@
 		if (throttled_hierarchy(cfs_rq))
 			continue;
 
-		if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq))
+		if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true))
 			update_tg_load_avg(cfs_rq, 0);
 	}
 	raw_spin_unlock_irqrestore(&rq->lock, flags);
@@ -6217,7 +6371,7 @@
 
 	raw_spin_lock_irqsave(&rq->lock, flags);
 	update_rq_clock(rq);
-	update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
+	update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true);
 	raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
@@ -6626,6 +6780,9 @@
 	if (!(env->sd->flags & SD_ASYM_PACKING))
 		return true;
 
+	/* No ASYM_PACKING if target cpu is already busy */
+	if (env->idle == CPU_NOT_IDLE)
+		return true;
 	/*
 	 * ASYM_PACKING needs to move all the work to the lowest
 	 * numbered CPUs in the group, therefore mark all groups
@@ -6635,7 +6792,8 @@
 		if (!sds->busiest)
 			return true;
 
-		if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
+		/* Prefer to move from highest possible cpu's work */
+		if (group_first_cpu(sds->busiest) < group_first_cpu(sg))
 			return true;
 	}
 
@@ -6781,6 +6939,9 @@
 	if (!(env->sd->flags & SD_ASYM_PACKING))
 		return 0;
 
+	if (env->idle == CPU_NOT_IDLE)
+		return 0;
+
 	if (!sds->busiest)
 		return 0;
 
@@ -6889,9 +7050,10 @@
 	}
 
 	/*
-	 * In the presence of smp nice balancing, certain scenarios can have
-	 * max load less than avg load(as we skip the groups at or below
-	 * its cpu_capacity, while calculating max_load..)
+	 * Avg load of busiest sg can be less and avg load of local sg can
+	 * be greater than avg load across all sgs of sd because avg load
+	 * factors in sg capacity and sgs with smaller group_type are
+	 * skipped when updating the busiest sg:
 	 */
 	if (busiest->avg_load <= sds->avg_load ||
 	    local->avg_load >= sds->avg_load) {
@@ -6904,11 +7066,12 @@
 	 */
 	if (busiest->group_type == group_overloaded &&
 	    local->group_type   == group_overloaded) {
-		load_above_capacity = busiest->sum_nr_running *
-					SCHED_LOAD_SCALE;
-		if (load_above_capacity > busiest->group_capacity)
+		load_above_capacity = busiest->sum_nr_running * SCHED_CAPACITY_SCALE;
+		if (load_above_capacity > busiest->group_capacity) {
 			load_above_capacity -= busiest->group_capacity;
-		else
+			load_above_capacity *= NICE_0_LOAD;
+			load_above_capacity /= busiest->group_capacity;
+		} else
 			load_above_capacity = ~0UL;
 	}
 
@@ -6916,9 +7079,8 @@
 	 * We're trying to get all the cpus to the average_load, so we don't
 	 * want to push ourselves above the average load, nor do we wish to
 	 * reduce the max loaded cpu below the average load. At the same time,
-	 * we also don't want to reduce the group load below the group capacity
-	 * (so that we can implement power-savings policies etc). Thus we look
-	 * for the minimum possible imbalance.
+	 * we also don't want to reduce the group load below the group
+	 * capacity. Thus we look for the minimum possible imbalance.
 	 */
 	max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
 
@@ -6942,10 +7104,7 @@
 
 /**
  * find_busiest_group - Returns the busiest group within the sched_domain
- * if there is an imbalance. If there isn't an imbalance, and
- * the user has opted for power-savings, it returns a group whose
- * CPUs can be put to idle by rebalancing those tasks elsewhere, if
- * such a group exists.
+ * if there is an imbalance.
  *
  * Also calculates the amount of weighted load which should be moved
  * to restore balance.
@@ -6953,9 +7112,6 @@
  * @env: The load balancing environment.
  *
  * Return:	- The busiest group if imbalance exists.
- *		- If no imbalance and user has opted for power-savings balance,
- *		   return the least loaded group whose CPUs can be
- *		   put to idle by rebalancing its tasks onto our group.
  */
 static struct sched_group *find_busiest_group(struct lb_env *env)
 {
@@ -6973,8 +7129,7 @@
 	busiest = &sds.busiest_stat;
 
 	/* ASYM feature bypasses nice load balance check */
-	if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
-	    check_asym_packing(env, &sds))
+	if (check_asym_packing(env, &sds))
 		return sds.busiest;
 
 	/* There is no busy sibling group to pull tasks from */
@@ -7399,10 +7554,7 @@
 					&busiest->active_balance_work);
 			}
 
-			/*
-			 * We've kicked active balancing, reset the failure
-			 * counter.
-			 */
+			/* We've kicked active balancing, force task migration. */
 			sd->nr_balance_failed = sd->cache_nice_tries+1;
 		}
 	} else
@@ -7637,10 +7789,13 @@
 		schedstat_inc(sd, alb_count);
 
 		p = detach_one_task(&env);
-		if (p)
+		if (p) {
 			schedstat_inc(sd, alb_pushed);
-		else
+			/* Active balancing done, reset the failure counter. */
+			sd->nr_balance_failed = 0;
+		} else {
 			schedstat_inc(sd, alb_failed);
+		}
 	}
 	rcu_read_unlock();
 out_unlock:
@@ -7711,7 +7866,7 @@
 	return;
 }
 
-static inline void nohz_balance_exit_idle(int cpu)
+void nohz_balance_exit_idle(unsigned int cpu)
 {
 	if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
 		/*
@@ -7784,18 +7939,6 @@
 	atomic_inc(&nohz.nr_cpus);
 	set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
 }
-
-static int sched_ilb_notifier(struct notifier_block *nfb,
-					unsigned long action, void *hcpu)
-{
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_DYING:
-		nohz_balance_exit_idle(smp_processor_id());
-		return NOTIFY_OK;
-	default:
-		return NOTIFY_DONE;
-	}
-}
 #endif
 
 static DEFINE_SPINLOCK(balancing);
@@ -7957,7 +8100,7 @@
 		if (time_after_eq(jiffies, rq->next_balance)) {
 			raw_spin_lock_irq(&rq->lock);
 			update_rq_clock(rq);
-			update_cpu_load_idle(rq);
+			cpu_load_update_idle(rq);
 			raw_spin_unlock_irq(&rq->lock);
 			rebalance_domains(rq, CPU_IDLE);
 		}
@@ -8382,6 +8525,7 @@
 		init_cfs_rq(cfs_rq);
 		init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
 		init_entity_runnable_average(se);
+		post_init_entity_util_avg(se);
 	}
 
 	return 1;
@@ -8538,7 +8682,6 @@
 	.rq_online		= rq_online_fair,
 	.rq_offline		= rq_offline_fair,
 
-	.task_waking		= task_waking_fair,
 	.task_dead		= task_dead_fair,
 	.set_cpus_allowed	= set_cpus_allowed_common,
 #endif
@@ -8600,7 +8743,6 @@
 #ifdef CONFIG_NO_HZ_COMMON
 	nohz.next_balance = jiffies;
 	zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
-	cpu_notifier(sched_ilb_notifier, 0);
 #endif
 #endif /* SMP */
 
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index 47ce949..2ce5458 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -24,7 +24,7 @@
 }
 
 static struct task_struct *
-pick_next_task_idle(struct rq *rq, struct task_struct *prev)
+pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
 {
 	put_prev_task(rq, prev);
 
diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
index ef71590..b0b93fd 100644
--- a/kernel/sched/loadavg.c
+++ b/kernel/sched/loadavg.c
@@ -99,10 +99,13 @@
 static unsigned long
 calc_load(unsigned long load, unsigned long exp, unsigned long active)
 {
-	load *= exp;
-	load += active * (FIXED_1 - exp);
-	load += 1UL << (FSHIFT - 1);
-	return load >> FSHIFT;
+	unsigned long newload;
+
+	newload = load * exp + active * (FIXED_1 - exp);
+	if (active >= load)
+		newload += FIXED_1-1;
+
+	return newload / FIXED_1;
 }
 
 #ifdef CONFIG_NO_HZ_COMMON
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index c41ea7a..d5690b7 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -334,7 +334,7 @@
 	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
 
 	rt_rq->rt_nr_total++;
-	if (p->nr_cpus_allowed > 1)
+	if (tsk_nr_cpus_allowed(p) > 1)
 		rt_rq->rt_nr_migratory++;
 
 	update_rt_migration(rt_rq);
@@ -351,7 +351,7 @@
 	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
 
 	rt_rq->rt_nr_total--;
-	if (p->nr_cpus_allowed > 1)
+	if (tsk_nr_cpus_allowed(p) > 1)
 		rt_rq->rt_nr_migratory--;
 
 	update_rt_migration(rt_rq);
@@ -953,14 +953,14 @@
 	if (curr->sched_class != &rt_sched_class)
 		return;
 
-	/* Kick cpufreq (see the comment in linux/cpufreq.h). */
-	if (cpu_of(rq) == smp_processor_id())
-		cpufreq_trigger_update(rq_clock(rq));
-
 	delta_exec = rq_clock_task(rq) - curr->se.exec_start;
 	if (unlikely((s64)delta_exec <= 0))
 		return;
 
+	/* Kick cpufreq (see the comment in linux/cpufreq.h). */
+	if (cpu_of(rq) == smp_processor_id())
+		cpufreq_trigger_update(rq_clock(rq));
+
 	schedstat_set(curr->se.statistics.exec_max,
 		      max(curr->se.statistics.exec_max, delta_exec));
 
@@ -1324,7 +1324,7 @@
 
 	enqueue_rt_entity(rt_se, flags);
 
-	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
+	if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
 		enqueue_pushable_task(rq, p);
 }
 
@@ -1413,7 +1413,7 @@
 	 * will have to sort it out.
 	 */
 	if (curr && unlikely(rt_task(curr)) &&
-	    (curr->nr_cpus_allowed < 2 ||
+	    (tsk_nr_cpus_allowed(curr) < 2 ||
 	     curr->prio <= p->prio)) {
 		int target = find_lowest_rq(p);
 
@@ -1437,7 +1437,7 @@
 	 * Current can't be migrated, useless to reschedule,
 	 * let's hope p can move out.
 	 */
-	if (rq->curr->nr_cpus_allowed == 1 ||
+	if (tsk_nr_cpus_allowed(rq->curr) == 1 ||
 	    !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
 		return;
 
@@ -1445,7 +1445,7 @@
 	 * p is migratable, so let's not schedule it and
 	 * see if it is pushed or pulled somewhere else.
 	 */
-	if (p->nr_cpus_allowed != 1
+	if (tsk_nr_cpus_allowed(p) != 1
 	    && cpupri_find(&rq->rd->cpupri, p, NULL))
 		return;
 
@@ -1524,7 +1524,7 @@
 }
 
 static struct task_struct *
-pick_next_task_rt(struct rq *rq, struct task_struct *prev)
+pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
 {
 	struct task_struct *p;
 	struct rt_rq *rt_rq = &rq->rt;
@@ -1536,9 +1536,9 @@
 		 * disabled avoiding further scheduler activity on it and we're
 		 * being very careful to re-start the picking loop.
 		 */
-		lockdep_unpin_lock(&rq->lock);
+		lockdep_unpin_lock(&rq->lock, cookie);
 		pull_rt_task(rq);
-		lockdep_pin_lock(&rq->lock);
+		lockdep_repin_lock(&rq->lock, cookie);
 		/*
 		 * pull_rt_task() can drop (and re-acquire) rq->lock; this
 		 * means a dl or stop task can slip in, in which case we need
@@ -1579,7 +1579,7 @@
 	 * The previous task needs to be made eligible for pushing
 	 * if it is still active
 	 */
-	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
+	if (on_rt_rq(&p->rt) && tsk_nr_cpus_allowed(p) > 1)
 		enqueue_pushable_task(rq, p);
 }
 
@@ -1629,7 +1629,7 @@
 	if (unlikely(!lowest_mask))
 		return -1;
 
-	if (task->nr_cpus_allowed == 1)
+	if (tsk_nr_cpus_allowed(task) == 1)
 		return -1; /* No other targets possible */
 
 	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
@@ -1729,6 +1729,7 @@
 				     !cpumask_test_cpu(lowest_rq->cpu,
 						       tsk_cpus_allowed(task)) ||
 				     task_running(rq, task) ||
+				     !rt_task(task) ||
 				     !task_on_rq_queued(task))) {
 
 				double_unlock_balance(rq, lowest_rq);
@@ -1761,7 +1762,7 @@
 
 	BUG_ON(rq->cpu != task_cpu(p));
 	BUG_ON(task_current(rq, p));
-	BUG_ON(p->nr_cpus_allowed <= 1);
+	BUG_ON(tsk_nr_cpus_allowed(p) <= 1);
 
 	BUG_ON(!task_on_rq_queued(p));
 	BUG_ON(!rt_task(p));
@@ -2121,9 +2122,9 @@
 {
 	if (!task_running(rq, p) &&
 	    !test_tsk_need_resched(rq->curr) &&
-	    p->nr_cpus_allowed > 1 &&
+	    tsk_nr_cpus_allowed(p) > 1 &&
 	    (dl_task(rq->curr) || rt_task(rq->curr)) &&
-	    (rq->curr->nr_cpus_allowed < 2 ||
+	    (tsk_nr_cpus_allowed(rq->curr) < 2 ||
 	     rq->curr->prio <= p->prio))
 		push_rt_tasks(rq);
 }
@@ -2196,7 +2197,7 @@
 	 */
 	if (task_on_rq_queued(p) && rq->curr != p) {
 #ifdef CONFIG_SMP
-		if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
+		if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded)
 			queue_push_tasks(rq);
 #else
 		if (p->prio < rq->curr->prio)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ec2e8d2..72f1f30 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -31,9 +31,9 @@
 extern long calc_load_fold_active(struct rq *this_rq);
 
 #ifdef CONFIG_SMP
-extern void update_cpu_load_active(struct rq *this_rq);
+extern void cpu_load_update_active(struct rq *this_rq);
 #else
-static inline void update_cpu_load_active(struct rq *this_rq) { }
+static inline void cpu_load_update_active(struct rq *this_rq) { }
 #endif
 
 /*
@@ -49,25 +49,32 @@
  * and does not change the user-interface for setting shares/weights.
  *
  * We increase resolution only if we have enough bits to allow this increased
- * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
- * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
- * increased costs.
+ * resolution (i.e. 64bit). The costs for increasing resolution when 32bit are
+ * pretty high and the returns do not justify the increased costs.
+ *
+ * Really only required when CONFIG_FAIR_GROUP_SCHED is also set, but to
+ * increase coverage and consistency always enable it on 64bit platforms.
  */
-#if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load  */
-# define SCHED_LOAD_RESOLUTION	10
-# define scale_load(w)		((w) << SCHED_LOAD_RESOLUTION)
-# define scale_load_down(w)	((w) >> SCHED_LOAD_RESOLUTION)
+#ifdef CONFIG_64BIT
+# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
+# define scale_load(w)		((w) << SCHED_FIXEDPOINT_SHIFT)
+# define scale_load_down(w)	((w) >> SCHED_FIXEDPOINT_SHIFT)
 #else
-# define SCHED_LOAD_RESOLUTION	0
+# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT)
 # define scale_load(w)		(w)
 # define scale_load_down(w)	(w)
 #endif
 
-#define SCHED_LOAD_SHIFT	(10 + SCHED_LOAD_RESOLUTION)
-#define SCHED_LOAD_SCALE	(1L << SCHED_LOAD_SHIFT)
-
-#define NICE_0_LOAD		SCHED_LOAD_SCALE
-#define NICE_0_SHIFT		SCHED_LOAD_SHIFT
+/*
+ * Task weight (visible to users) and its load (invisible to users) have
+ * independent resolution, but they should be well calibrated. We use
+ * scale_load() and scale_load_down(w) to convert between them. The
+ * following must be true:
+ *
+ *  scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD
+ *
+ */
+#define NICE_0_LOAD		(1L << NICE_0_LOAD_SHIFT)
 
 /*
  * Single value that decides SCHED_DEADLINE internal math precision.
@@ -585,11 +592,13 @@
 #endif
 	#define CPU_LOAD_IDX_MAX 5
 	unsigned long cpu_load[CPU_LOAD_IDX_MAX];
-	unsigned long last_load_update_tick;
 #ifdef CONFIG_NO_HZ_COMMON
+#ifdef CONFIG_SMP
+	unsigned long last_load_update_tick;
+#endif /* CONFIG_SMP */
 	u64 nohz_stamp;
 	unsigned long nohz_flags;
-#endif
+#endif /* CONFIG_NO_HZ_COMMON */
 #ifdef CONFIG_NO_HZ_FULL
 	unsigned long last_sched_tick;
 #endif
@@ -854,7 +863,7 @@
 struct sched_group_capacity {
 	atomic_t ref;
 	/*
-	 * CPU capacity of this group, SCHED_LOAD_SCALE being max capacity
+	 * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
 	 * for a single CPU.
 	 */
 	unsigned int capacity;
@@ -1159,7 +1168,7 @@
  *
  * ENQUEUE_HEAD      - place at front of runqueue (tail if not specified)
  * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
- * ENQUEUE_WAKING    - sched_class::task_waking was called
+ * ENQUEUE_MIGRATED  - the task was migrated during wakeup
  *
  */
 
@@ -1174,9 +1183,9 @@
 #define ENQUEUE_HEAD		0x08
 #define ENQUEUE_REPLENISH	0x10
 #ifdef CONFIG_SMP
-#define ENQUEUE_WAKING		0x20
+#define ENQUEUE_MIGRATED	0x20
 #else
-#define ENQUEUE_WAKING		0x00
+#define ENQUEUE_MIGRATED	0x00
 #endif
 
 #define RETRY_TASK		((void *)-1UL)
@@ -1200,14 +1209,14 @@
 	 * tasks.
 	 */
 	struct task_struct * (*pick_next_task) (struct rq *rq,
-						struct task_struct *prev);
+						struct task_struct *prev,
+						struct pin_cookie cookie);
 	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
 
 #ifdef CONFIG_SMP
 	int  (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
 	void (*migrate_task_rq)(struct task_struct *p);
 
-	void (*task_waking) (struct task_struct *task);
 	void (*task_woken) (struct rq *this_rq, struct task_struct *task);
 
 	void (*set_cpus_allowed)(struct task_struct *p,
@@ -1313,6 +1322,7 @@
 unsigned long to_ratio(u64 period, u64 runtime);
 
 extern void init_entity_runnable_average(struct sched_entity *se);
+extern void post_init_entity_util_avg(struct sched_entity *se);
 
 #ifdef CONFIG_NO_HZ_FULL
 extern bool sched_can_stop_tick(struct rq *rq);
@@ -1448,86 +1458,32 @@
 static inline void sched_avg_update(struct rq *rq) { }
 #endif
 
-/*
- * __task_rq_lock - lock the rq @p resides on.
- */
-static inline struct rq *__task_rq_lock(struct task_struct *p)
-	__acquires(rq->lock)
-{
-	struct rq *rq;
+struct rq_flags {
+	unsigned long flags;
+	struct pin_cookie cookie;
+};
 
-	lockdep_assert_held(&p->pi_lock);
-
-	for (;;) {
-		rq = task_rq(p);
-		raw_spin_lock(&rq->lock);
-		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
-			lockdep_pin_lock(&rq->lock);
-			return rq;
-		}
-		raw_spin_unlock(&rq->lock);
-
-		while (unlikely(task_on_rq_migrating(p)))
-			cpu_relax();
-	}
-}
-
-/*
- * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
- */
-static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
+struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
+	__acquires(rq->lock);
+struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
 	__acquires(p->pi_lock)
-	__acquires(rq->lock)
-{
-	struct rq *rq;
+	__acquires(rq->lock);
 
-	for (;;) {
-		raw_spin_lock_irqsave(&p->pi_lock, *flags);
-		rq = task_rq(p);
-		raw_spin_lock(&rq->lock);
-		/*
-		 *	move_queued_task()		task_rq_lock()
-		 *
-		 *	ACQUIRE (rq->lock)
-		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
-		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
-		 *	[S] ->cpu = new_cpu		[L] task_rq()
-		 *					[L] ->on_rq
-		 *	RELEASE (rq->lock)
-		 *
-		 * If we observe the old cpu in task_rq_lock, the acquire of
-		 * the old rq->lock will fully serialize against the stores.
-		 *
-		 * If we observe the new cpu in task_rq_lock, the acquire will
-		 * pair with the WMB to ensure we must then also see migrating.
-		 */
-		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
-			lockdep_pin_lock(&rq->lock);
-			return rq;
-		}
-		raw_spin_unlock(&rq->lock);
-		raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-
-		while (unlikely(task_on_rq_migrating(p)))
-			cpu_relax();
-	}
-}
-
-static inline void __task_rq_unlock(struct rq *rq)
+static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
 	__releases(rq->lock)
 {
-	lockdep_unpin_lock(&rq->lock);
+	lockdep_unpin_lock(&rq->lock, rf->cookie);
 	raw_spin_unlock(&rq->lock);
 }
 
 static inline void
-task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
+task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
 	__releases(rq->lock)
 	__releases(p->pi_lock)
 {
-	lockdep_unpin_lock(&rq->lock);
+	lockdep_unpin_lock(&rq->lock, rf->cookie);
 	raw_spin_unlock(&rq->lock);
-	raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
+	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
 }
 
 #ifdef CONFIG_SMP
@@ -1743,6 +1699,10 @@
 };
 
 #define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
+
+extern void nohz_balance_exit_idle(unsigned int cpu);
+#else
+static inline void nohz_balance_exit_idle(unsigned int cpu) { }
 #endif
 
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
@@ -1842,6 +1802,14 @@
 static inline void cpufreq_trigger_update(u64 time) {}
 #endif /* CONFIG_CPU_FREQ */
 
+#ifdef arch_scale_freq_capacity
+#ifndef arch_scale_freq_invariant
+#define arch_scale_freq_invariant()	(true)
+#endif
+#else /* arch_scale_freq_capacity */
+#define arch_scale_freq_invariant()	(false)
+#endif
+
 static inline void account_reset_rq(struct rq *rq)
 {
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index cbc67da..604297a 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -24,7 +24,7 @@
 }
 
 static struct task_struct *
-pick_next_task_stop(struct rq *rq, struct task_struct *prev)
+pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
 {
 	struct task_struct *stop = rq->stop;
 
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index e1e5a35..7002796 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -513,24 +513,17 @@
  * To be fully secure this must be combined with rlimit
  * to limit the stack allocations too.
  */
-static int mode1_syscalls[] = {
+static const int mode1_syscalls[] = {
 	__NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
 	0, /* null terminated */
 };
 
-#ifdef CONFIG_COMPAT
-static int mode1_syscalls_32[] = {
-	__NR_seccomp_read_32, __NR_seccomp_write_32, __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32,
-	0, /* null terminated */
-};
-#endif
-
 static void __secure_computing_strict(int this_syscall)
 {
-	int *syscall_whitelist = mode1_syscalls;
+	const int *syscall_whitelist = mode1_syscalls;
 #ifdef CONFIG_COMPAT
 	if (in_compat_syscall())
-		syscall_whitelist = mode1_syscalls_32;
+		syscall_whitelist = get_compat_mode1_syscalls();
 #endif
 	do {
 		if (*syscall_whitelist == this_syscall)
@@ -915,7 +908,7 @@
 
 	fprog = filter->prog->orig_prog;
 	if (!fprog) {
-		/* This must be a new non-cBPF filter, since we save every
+		/* This must be a new non-cBPF filter, since we save
 		 * every cBPF filter's orig_prog above when
 		 * CONFIG_CHECKPOINT_RESTORE is enabled.
 		 */
diff --git a/kernel/signal.c b/kernel/signal.c
index aa9bf00..ab122a2 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -3099,12 +3099,14 @@
 
 	oss.ss_sp = (void __user *) current->sas_ss_sp;
 	oss.ss_size = current->sas_ss_size;
-	oss.ss_flags = sas_ss_flags(sp);
+	oss.ss_flags = sas_ss_flags(sp) |
+		(current->sas_ss_flags & SS_FLAG_BITS);
 
 	if (uss) {
 		void __user *ss_sp;
 		size_t ss_size;
-		int ss_flags;
+		unsigned ss_flags;
+		int ss_mode;
 
 		error = -EFAULT;
 		if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
@@ -3119,18 +3121,13 @@
 		if (on_sig_stack(sp))
 			goto out;
 
+		ss_mode = ss_flags & ~SS_FLAG_BITS;
 		error = -EINVAL;
-		/*
-		 * Note - this code used to test ss_flags incorrectly:
-		 *  	  old code may have been written using ss_flags==0
-		 *	  to mean ss_flags==SS_ONSTACK (as this was the only
-		 *	  way that worked) - this fix preserves that older
-		 *	  mechanism.
-		 */
-		if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
+		if (ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
+				ss_mode != 0)
 			goto out;
 
-		if (ss_flags == SS_DISABLE) {
+		if (ss_mode == SS_DISABLE) {
 			ss_size = 0;
 			ss_sp = NULL;
 		} else {
@@ -3141,6 +3138,7 @@
 
 		current->sas_ss_sp = (unsigned long) ss_sp;
 		current->sas_ss_size = ss_size;
+		current->sas_ss_flags = ss_flags;
 	}
 
 	error = 0;
@@ -3171,9 +3169,14 @@
 int __save_altstack(stack_t __user *uss, unsigned long sp)
 {
 	struct task_struct *t = current;
-	return  __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
-		__put_user(sas_ss_flags(sp), &uss->ss_flags) |
+	int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
+		__put_user(t->sas_ss_flags, &uss->ss_flags) |
 		__put_user(t->sas_ss_size, &uss->ss_size);
+	if (err)
+		return err;
+	if (t->sas_ss_flags & SS_AUTODISARM)
+		sas_ss_reset(t);
+	return 0;
 }
 
 #ifdef CONFIG_COMPAT
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 725587f..c8b3186 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -130,6 +130,9 @@
 #ifdef CONFIG_PRINTK
 static int ten_thousand = 10000;
 #endif
+#ifdef CONFIG_PERF_EVENTS
+static int six_hundred_forty_kb = 640 * 1024;
+#endif
 
 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
 static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
@@ -1144,6 +1147,15 @@
 		.extra1		= &zero,
 		.extra2		= &one_hundred,
 	},
+	{
+		.procname	= "perf_event_max_stack",
+		.data		= NULL, /* filled in by handler */
+		.maxlen		= sizeof(sysctl_perf_event_max_stack),
+		.mode		= 0644,
+		.proc_handler	= perf_event_max_stack_handler,
+		.extra1		= &zero,
+		.extra2		= &six_hundred_forty_kb,
+	},
 #endif
 #ifdef CONFIG_KMEMCHECK
 	{
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 21f82c2..b3f05ee 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -357,10 +357,6 @@
 	return ret;
 }
 
-#if defined(CONFIG_64BIT) && !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
-#define TASKSTATS_NEEDS_PADDING 1
-#endif
-
 static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
 {
 	struct nlattr *na, *ret;
@@ -370,29 +366,6 @@
 			? TASKSTATS_TYPE_AGGR_PID
 			: TASKSTATS_TYPE_AGGR_TGID;
 
-	/*
-	 * The taskstats structure is internally aligned on 8 byte
-	 * boundaries but the layout of the aggregrate reply, with
-	 * two NLA headers and the pid (each 4 bytes), actually
-	 * force the entire structure to be unaligned. This causes
-	 * the kernel to issue unaligned access warnings on some
-	 * architectures like ia64. Unfortunately, some software out there
-	 * doesn't properly unroll the NLA packet and assumes that the start
-	 * of the taskstats structure will always be 20 bytes from the start
-	 * of the netlink payload. Aligning the start of the taskstats
-	 * structure breaks this software, which we don't want. So, for now
-	 * the alignment only happens on architectures that require it
-	 * and those users will have to update to fixed versions of those
-	 * packages. Space is reserved in the packet only when needed.
-	 * This ifdef should be removed in several years e.g. 2012 once
-	 * we can be confident that fixed versions are installed on most
-	 * systems. We add the padding before the aggregate since the
-	 * aggregate is already a defined type.
-	 */
-#ifdef TASKSTATS_NEEDS_PADDING
-	if (nla_put(skb, TASKSTATS_TYPE_NULL, 0, NULL) < 0)
-		goto err;
-#endif
 	na = nla_nest_start(skb, aggr);
 	if (!na)
 		goto err;
@@ -401,7 +374,8 @@
 		nla_nest_cancel(skb, na);
 		goto err;
 	}
-	ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
+	ret = nla_reserve_64bit(skb, TASKSTATS_TYPE_STATS,
+				sizeof(struct taskstats), TASKSTATS_TYPE_NULL);
 	if (!ret) {
 		nla_nest_cancel(skb, na);
 		goto err;
@@ -500,10 +474,9 @@
 	size_t size;
 
 	size = nla_total_size(sizeof(u32)) +
-		nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
-#ifdef TASKSTATS_NEEDS_PADDING
-	size += nla_total_size(0); /* Padding for alignment */
-#endif
+		nla_total_size_64bit(sizeof(struct taskstats)) +
+		nla_total_size(0);
+
 	return size;
 }
 
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 084b79f..536ada8 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -157,52 +157,50 @@
 cpumask_var_t tick_nohz_full_mask;
 cpumask_var_t housekeeping_mask;
 bool tick_nohz_full_running;
-static unsigned long tick_dep_mask;
+static atomic_t tick_dep_mask;
 
-static void trace_tick_dependency(unsigned long dep)
+static bool check_tick_dependency(atomic_t *dep)
 {
-	if (dep & TICK_DEP_MASK_POSIX_TIMER) {
+	int val = atomic_read(dep);
+
+	if (val & TICK_DEP_MASK_POSIX_TIMER) {
 		trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER);
-		return;
+		return true;
 	}
 
-	if (dep & TICK_DEP_MASK_PERF_EVENTS) {
+	if (val & TICK_DEP_MASK_PERF_EVENTS) {
 		trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
-		return;
+		return true;
 	}
 
-	if (dep & TICK_DEP_MASK_SCHED) {
+	if (val & TICK_DEP_MASK_SCHED) {
 		trace_tick_stop(0, TICK_DEP_MASK_SCHED);
-		return;
+		return true;
 	}
 
-	if (dep & TICK_DEP_MASK_CLOCK_UNSTABLE)
+	if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) {
 		trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE);
+		return true;
+	}
+
+	return false;
 }
 
 static bool can_stop_full_tick(struct tick_sched *ts)
 {
 	WARN_ON_ONCE(!irqs_disabled());
 
-	if (tick_dep_mask) {
-		trace_tick_dependency(tick_dep_mask);
+	if (check_tick_dependency(&tick_dep_mask))
 		return false;
-	}
 
-	if (ts->tick_dep_mask) {
-		trace_tick_dependency(ts->tick_dep_mask);
+	if (check_tick_dependency(&ts->tick_dep_mask))
 		return false;
-	}
 
-	if (current->tick_dep_mask) {
-		trace_tick_dependency(current->tick_dep_mask);
+	if (check_tick_dependency(&current->tick_dep_mask))
 		return false;
-	}
 
-	if (current->signal->tick_dep_mask) {
-		trace_tick_dependency(current->signal->tick_dep_mask);
+	if (check_tick_dependency(&current->signal->tick_dep_mask))
 		return false;
-	}
 
 	return true;
 }
@@ -259,12 +257,12 @@
 	preempt_enable();
 }
 
-static void tick_nohz_dep_set_all(unsigned long *dep,
+static void tick_nohz_dep_set_all(atomic_t *dep,
 				  enum tick_dep_bits bit)
 {
-	unsigned long prev;
+	int prev;
 
-	prev = fetch_or(dep, BIT_MASK(bit));
+	prev = atomic_fetch_or(BIT(bit), dep);
 	if (!prev)
 		tick_nohz_full_kick_all();
 }
@@ -280,7 +278,7 @@
 
 void tick_nohz_dep_clear(enum tick_dep_bits bit)
 {
-	clear_bit(bit, &tick_dep_mask);
+	atomic_andnot(BIT(bit), &tick_dep_mask);
 }
 
 /*
@@ -289,12 +287,12 @@
  */
 void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
 {
-	unsigned long prev;
+	int prev;
 	struct tick_sched *ts;
 
 	ts = per_cpu_ptr(&tick_cpu_sched, cpu);
 
-	prev = fetch_or(&ts->tick_dep_mask, BIT_MASK(bit));
+	prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask);
 	if (!prev) {
 		preempt_disable();
 		/* Perf needs local kick that is NMI safe */
@@ -313,7 +311,7 @@
 {
 	struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
 
-	clear_bit(bit, &ts->tick_dep_mask);
+	atomic_andnot(BIT(bit), &ts->tick_dep_mask);
 }
 
 /*
@@ -331,7 +329,7 @@
 
 void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit)
 {
-	clear_bit(bit, &tsk->tick_dep_mask);
+	atomic_andnot(BIT(bit), &tsk->tick_dep_mask);
 }
 
 /*
@@ -345,7 +343,7 @@
 
 void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit)
 {
-	clear_bit(bit, &sig->tick_dep_mask);
+	atomic_andnot(BIT(bit), &sig->tick_dep_mask);
 }
 
 /*
@@ -366,7 +364,8 @@
 	ts = this_cpu_ptr(&tick_cpu_sched);
 
 	if (ts->tick_stopped) {
-		if (current->tick_dep_mask || current->signal->tick_dep_mask)
+		if (atomic_read(&current->tick_dep_mask) ||
+		    atomic_read(&current->signal->tick_dep_mask))
 			tick_nohz_full_kick();
 	}
 out:
@@ -777,6 +776,7 @@
 	if (!ts->tick_stopped) {
 		nohz_balance_enter_idle(cpu);
 		calc_load_enter_idle();
+		cpu_load_update_nohz_start();
 
 		ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
 		ts->tick_stopped = 1;
@@ -803,11 +803,11 @@
 	return tick;
 }
 
-static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now, int active)
+static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
 {
 	/* Update jiffies first */
 	tick_do_update_jiffies64(now);
-	update_cpu_load_nohz(active);
+	cpu_load_update_nohz_stop();
 
 	calc_load_exit_idle();
 	touch_softlockup_watchdog_sched();
@@ -834,7 +834,7 @@
 	if (can_stop_full_tick(ts))
 		tick_nohz_stop_sched_tick(ts, ktime_get(), cpu);
 	else if (ts->tick_stopped)
-		tick_nohz_restart_sched_tick(ts, ktime_get(), 1);
+		tick_nohz_restart_sched_tick(ts, ktime_get());
 #endif
 }
 
@@ -1025,7 +1025,7 @@
 		tick_nohz_stop_idle(ts, now);
 
 	if (ts->tick_stopped) {
-		tick_nohz_restart_sched_tick(ts, now, 0);
+		tick_nohz_restart_sched_tick(ts, now);
 		tick_nohz_account_idle_ticks(ts);
 	}
 
diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h
index eb4e325..bf38226 100644
--- a/kernel/time/tick-sched.h
+++ b/kernel/time/tick-sched.h
@@ -60,7 +60,7 @@
 	u64				next_timer;
 	ktime_t				idle_expires;
 	int				do_timer_last;
-	unsigned long			tick_dep_mask;
+	atomic_t			tick_dep_mask;
 };
 
 extern struct tick_sched *tick_get_tick_sched(int cpu);
diff --git a/kernel/time/time.c b/kernel/time/time.c
index be115b0..a4064b6 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -160,15 +160,15 @@
  * various programs will get confused when the clock gets warped.
  */
 
-int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
+int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz)
 {
 	static int firsttime = 1;
 	int error = 0;
 
-	if (tv && !timespec_valid(tv))
+	if (tv && !timespec64_valid(tv))
 		return -EINVAL;
 
-	error = security_settime(tv, tz);
+	error = security_settime64(tv, tz);
 	if (error)
 		return error;
 
@@ -186,7 +186,7 @@
 		}
 	}
 	if (tv)
-		return do_settimeofday(tv);
+		return do_settimeofday64(tv);
 	return 0;
 }
 
diff --git a/kernel/torture.c b/kernel/torture.c
index 44aa462..fa0bdee 100644
--- a/kernel/torture.c
+++ b/kernel/torture.c
@@ -451,6 +451,7 @@
 		torture_shutdown_hook();
 	else
 		VERBOSE_TOROUT_STRING("No torture_shutdown_hook(), skipping.");
+	ftrace_dump(DUMP_ALL);
 	kernel_power_off();	/* Shut down the system. */
 	return 0;
 }
@@ -602,8 +603,9 @@
 {
 	mutex_lock(&fullstop_mutex);
 	if (torture_type != NULL) {
-		pr_alert("torture_init_begin: refusing %s init: %s running",
+		pr_alert("torture_init_begin: Refusing %s init: %s running.\n",
 			 ttype, torture_type);
+		pr_alert("torture_init_begin: One torture test at a time!\n");
 		mutex_unlock(&fullstop_mutex);
 		return false;
 	}
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index e45db6b..fafeaf8 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -528,6 +528,32 @@
 	  See Documentation/trace/mmiotrace.txt.
 	  If you are not helping to develop drivers, say N.
 
+config TRACING_MAP
+	bool
+	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
+	help
+	  tracing_map is a special-purpose lock-free map for tracing,
+	  separated out as a stand-alone facility in order to allow it
+	  to be shared between multiple tracers.  It isn't meant to be
+	  generally used outside of that context, and is normally
+	  selected by tracers that use it.
+
+config HIST_TRIGGERS
+	bool "Histogram triggers"
+	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
+	select TRACING_MAP
+	default n
+	help
+	  Hist triggers allow one or more arbitrary trace event fields
+	  to be aggregated into hash tables and dumped to stdout by
+	  reading a debugfs/tracefs file.  They're useful for
+	  gathering quick and dirty (though precise) summaries of
+	  event activity as an initial guide for further investigation
+	  using more advanced tools.
+
+	  See Documentation/trace/events.txt.
+	  If in doubt, say N.
+
 config MMIOTRACE_TEST
 	tristate "Test module for mmiotrace"
 	depends on MMIOTRACE && m
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 9b1044e..979e7bf 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -31,6 +31,7 @@
 obj-$(CONFIG_TRACING) += trace_seq.o
 obj-$(CONFIG_TRACING) += trace_stat.o
 obj-$(CONFIG_TRACING) += trace_printk.o
+obj-$(CONFIG_TRACING_MAP) += tracing_map.o
 obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
 obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
 obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
@@ -53,6 +54,7 @@
 endif
 obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
 obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o
+obj-$(CONFIG_HIST_TRIGGERS) += trace_events_hist.o
 obj-$(CONFIG_BPF_EVENTS) += bpf_trace.o
 obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
 obj-$(CONFIG_TRACEPOINTS) += power-traces.o
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index f94e7a2..9aef865 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1349,6 +1349,7 @@
 	if (t->action == BLK_TN_MESSAGE) {
 		log_action(iter, long_act ? "message" : "m");
 		blk_log_msg(s, iter->ent);
+		return trace_handle_return(s);
 	}
 
 	if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
@@ -1551,6 +1552,7 @@
 	{ BLK_TC_COMPLETE,	"complete"	},
 	{ BLK_TC_FS,		"fs"		},
 	{ BLK_TC_PC,		"pc"		},
+	{ BLK_TC_NOTIFY,	"notify"	},
 	{ BLK_TC_AHEAD,		"ahead"		},
 	{ BLK_TC_META,		"meta"		},
 	{ BLK_TC_DISCARD,	"discard"	},
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 3e4ffb3..780bcbe 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -62,17 +62,21 @@
 static u64 bpf_probe_read(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 {
 	void *dst = (void *) (long) r1;
-	int size = (int) r2;
+	int ret, size = (int) r2;
 	void *unsafe_ptr = (void *) (long) r3;
 
-	return probe_kernel_read(dst, unsafe_ptr, size);
+	ret = probe_kernel_read(dst, unsafe_ptr, size);
+	if (unlikely(ret < 0))
+		memset(dst, 0, size);
+
+	return ret;
 }
 
 static const struct bpf_func_proto bpf_probe_read_proto = {
 	.func		= bpf_probe_read,
 	.gpl_only	= true,
 	.ret_type	= RET_INTEGER,
-	.arg1_type	= ARG_PTR_TO_STACK,
+	.arg1_type	= ARG_PTR_TO_RAW_STACK,
 	.arg2_type	= ARG_CONST_STACK_SIZE,
 	.arg3_type	= ARG_ANYTHING,
 };
@@ -221,11 +225,12 @@
 	.arg2_type	= ARG_ANYTHING,
 };
 
-static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
+static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
 {
 	struct pt_regs *regs = (struct pt_regs *) (long) r1;
 	struct bpf_map *map = (struct bpf_map *) (long) r2;
 	struct bpf_array *array = container_of(map, struct bpf_array, map);
+	u64 index = flags & BPF_F_INDEX_MASK;
 	void *data = (void *) (long) r4;
 	struct perf_sample_data sample_data;
 	struct perf_event *event;
@@ -235,6 +240,10 @@
 		.data = data,
 	};
 
+	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
+		return -EINVAL;
+	if (index == BPF_F_CURRENT_CPU)
+		index = raw_smp_processor_id();
 	if (unlikely(index >= array->map.max_entries))
 		return -E2BIG;
 
@@ -268,7 +277,34 @@
 	.arg5_type	= ARG_CONST_STACK_SIZE,
 };
 
-static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
+static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
+
+static u64 bpf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
+{
+	struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
+
+	perf_fetch_caller_regs(regs);
+
+	return bpf_perf_event_output((long)regs, r2, flags, r4, size);
+}
+
+static const struct bpf_func_proto bpf_event_output_proto = {
+	.func		= bpf_event_output,
+	.gpl_only	= true,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_PTR_TO_CTX,
+	.arg2_type	= ARG_CONST_MAP_PTR,
+	.arg3_type	= ARG_ANYTHING,
+	.arg4_type	= ARG_PTR_TO_STACK,
+	.arg5_type	= ARG_CONST_STACK_SIZE,
+};
+
+const struct bpf_func_proto *bpf_get_event_output_proto(void)
+{
+	return &bpf_event_output_proto;
+}
+
+static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
 {
 	switch (func_id) {
 	case BPF_FUNC_map_lookup_elem:
@@ -295,12 +331,20 @@
 		return &bpf_get_smp_processor_id_proto;
 	case BPF_FUNC_perf_event_read:
 		return &bpf_perf_event_read_proto;
+	default:
+		return NULL;
+	}
+}
+
+static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
+{
+	switch (func_id) {
 	case BPF_FUNC_perf_event_output:
 		return &bpf_perf_event_output_proto;
 	case BPF_FUNC_get_stackid:
 		return &bpf_get_stackid_proto;
 	default:
-		return NULL;
+		return tracing_func_proto(func_id);
 	}
 }
 
@@ -332,9 +376,82 @@
 	.type	= BPF_PROG_TYPE_KPROBE,
 };
 
+static u64 bpf_perf_event_output_tp(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
+{
+	/*
+	 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
+	 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
+	 * from there and call the same bpf_perf_event_output() helper
+	 */
+	u64 ctx = *(long *)(uintptr_t)r1;
+
+	return bpf_perf_event_output(ctx, r2, index, r4, size);
+}
+
+static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
+	.func		= bpf_perf_event_output_tp,
+	.gpl_only	= true,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_PTR_TO_CTX,
+	.arg2_type	= ARG_CONST_MAP_PTR,
+	.arg3_type	= ARG_ANYTHING,
+	.arg4_type	= ARG_PTR_TO_STACK,
+	.arg5_type	= ARG_CONST_STACK_SIZE,
+};
+
+static u64 bpf_get_stackid_tp(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+	u64 ctx = *(long *)(uintptr_t)r1;
+
+	return bpf_get_stackid(ctx, r2, r3, r4, r5);
+}
+
+static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
+	.func		= bpf_get_stackid_tp,
+	.gpl_only	= true,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_PTR_TO_CTX,
+	.arg2_type	= ARG_CONST_MAP_PTR,
+	.arg3_type	= ARG_ANYTHING,
+};
+
+static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
+{
+	switch (func_id) {
+	case BPF_FUNC_perf_event_output:
+		return &bpf_perf_event_output_proto_tp;
+	case BPF_FUNC_get_stackid:
+		return &bpf_get_stackid_proto_tp;
+	default:
+		return tracing_func_proto(func_id);
+	}
+}
+
+static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type)
+{
+	if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
+		return false;
+	if (type != BPF_READ)
+		return false;
+	if (off % size != 0)
+		return false;
+	return true;
+}
+
+static const struct bpf_verifier_ops tracepoint_prog_ops = {
+	.get_func_proto  = tp_prog_func_proto,
+	.is_valid_access = tp_prog_is_valid_access,
+};
+
+static struct bpf_prog_type_list tracepoint_tl = {
+	.ops	= &tracepoint_prog_ops,
+	.type	= BPF_PROG_TYPE_TRACEPOINT,
+};
+
 static int __init register_kprobe_prog_ops(void)
 {
 	bpf_register_prog_type(&kprobe_tl);
+	bpf_register_prog_type(&tracepoint_tl);
 	return 0;
 }
 late_initcall(register_kprobe_prog_ops);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b1870fb..7e8d792 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1530,7 +1530,19 @@
 	return 0;
 }
 
-static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
+/**
+ * ftrace_location_range - return the first address of a traced location
+ *	if it touches the given ip range
+ * @start: start of range to search.
+ * @end: end of range to search (inclusive). @end points to the last byte
+ *	to check.
+ *
+ * Returns rec->ip if the related ftrace location is a least partly within
+ * the given address range. That is, the first address of the instruction
+ * that is either a NOP or call to the function tracer. It checks the ftrace
+ * internal tables to determine if the address belongs or not.
+ */
+unsigned long ftrace_location_range(unsigned long start, unsigned long end)
 {
 	struct ftrace_page *pg;
 	struct dyn_ftrace *rec;
diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c
index 81b8745..0c7dee2 100644
--- a/kernel/trace/power-traces.c
+++ b/kernel/trace/power-traces.c
@@ -15,5 +15,6 @@
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(suspend_resume);
 EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle);
+EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_frequency);
 EXPORT_TRACEPOINT_SYMBOL_GPL(powernv_throttle);
 
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 95181e3..9c14373 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -437,7 +437,7 @@
 	raw_spinlock_t			reader_lock;	/* serialize readers */
 	arch_spinlock_t			lock;
 	struct lock_class_key		lock_key;
-	unsigned int			nr_pages;
+	unsigned long			nr_pages;
 	unsigned int			current_context;
 	struct list_head		*pages;
 	struct buffer_page		*head_page;	/* read from head */
@@ -458,7 +458,7 @@
 	u64				write_stamp;
 	u64				read_stamp;
 	/* ring buffer pages to update, > 0 to add, < 0 to remove */
-	int				nr_pages_to_update;
+	long				nr_pages_to_update;
 	struct list_head		new_pages; /* new pages to add */
 	struct work_struct		update_pages_work;
 	struct completion		update_done;
@@ -1128,10 +1128,10 @@
 	return 0;
 }
 
-static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
+static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
 {
-	int i;
 	struct buffer_page *bpage, *tmp;
+	long i;
 
 	for (i = 0; i < nr_pages; i++) {
 		struct page *page;
@@ -1168,7 +1168,7 @@
 }
 
 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
-			     unsigned nr_pages)
+			     unsigned long nr_pages)
 {
 	LIST_HEAD(pages);
 
@@ -1193,7 +1193,7 @@
 }
 
 static struct ring_buffer_per_cpu *
-rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
+rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
 	struct buffer_page *bpage;
@@ -1293,8 +1293,9 @@
 					struct lock_class_key *key)
 {
 	struct ring_buffer *buffer;
+	long nr_pages;
 	int bsize;
-	int cpu, nr_pages;
+	int cpu;
 
 	/* keep it in its own cache line */
 	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
@@ -1420,12 +1421,12 @@
 }
 
 static int
-rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
+rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
 {
 	struct list_head *tail_page, *to_remove, *next_page;
 	struct buffer_page *to_remove_page, *tmp_iter_page;
 	struct buffer_page *last_page, *first_page;
-	unsigned int nr_removed;
+	unsigned long nr_removed;
 	unsigned long head_bit;
 	int page_entries;
 
@@ -1642,7 +1643,7 @@
 			int cpu_id)
 {
 	struct ring_buffer_per_cpu *cpu_buffer;
-	unsigned nr_pages;
+	unsigned long nr_pages;
 	int cpu, err = 0;
 
 	/*
@@ -1656,14 +1657,13 @@
 	    !cpumask_test_cpu(cpu_id, buffer->cpumask))
 		return size;
 
-	size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
-	size *= BUF_PAGE_SIZE;
+	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
 
 	/* we need a minimum of two pages */
-	if (size < BUF_PAGE_SIZE * 2)
-		size = BUF_PAGE_SIZE * 2;
+	if (nr_pages < 2)
+		nr_pages = 2;
 
-	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
+	size = nr_pages * BUF_PAGE_SIZE;
 
 	/*
 	 * Don't succeed if resizing is disabled, as a reader might be
@@ -4640,8 +4640,9 @@
 	struct ring_buffer *buffer =
 		container_of(self, struct ring_buffer, cpu_notify);
 	long cpu = (long)hcpu;
-	int cpu_i, nr_pages_same;
-	unsigned int nr_pages;
+	long nr_pages_same;
+	int cpu_i;
+	unsigned long nr_pages;
 
 	switch (action) {
 	case CPU_UP_PREPARE:
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index a2f0b9f..8a4bd6b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -253,6 +253,9 @@
 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK |			\
 	       TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
 
+/* trace_flags that are default zero for instances */
+#define ZEROED_TRACE_FLAGS \
+	TRACE_ITER_EVENT_FORK
 
 /*
  * The global_trace is the descriptor that holds the tracing
@@ -303,33 +306,18 @@
 	mutex_unlock(&trace_types_lock);
 }
 
-int filter_check_discard(struct trace_event_file *file, void *rec,
-			 struct ring_buffer *buffer,
-			 struct ring_buffer_event *event)
-{
-	if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
-	    !filter_match_preds(file->filter, rec)) {
-		ring_buffer_discard_commit(buffer, event);
-		return 1;
-	}
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(filter_check_discard);
-
 int call_filter_check_discard(struct trace_event_call *call, void *rec,
 			      struct ring_buffer *buffer,
 			      struct ring_buffer_event *event)
 {
 	if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
 	    !filter_match_preds(call->filter, rec)) {
-		ring_buffer_discard_commit(buffer, event);
+		__trace_event_discard_commit(buffer, event);
 		return 1;
 	}
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(call_filter_check_discard);
 
 static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
 {
@@ -1672,6 +1660,16 @@
 }
 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
 
+static __always_inline void
+trace_event_setup(struct ring_buffer_event *event,
+		  int type, unsigned long flags, int pc)
+{
+	struct trace_entry *ent = ring_buffer_event_data(event);
+
+	tracing_generic_entry_update(ent, flags, pc);
+	ent->type = type;
+}
+
 struct ring_buffer_event *
 trace_buffer_lock_reserve(struct ring_buffer *buffer,
 			  int type,
@@ -1681,34 +1679,137 @@
 	struct ring_buffer_event *event;
 
 	event = ring_buffer_lock_reserve(buffer, len);
-	if (event != NULL) {
-		struct trace_entry *ent = ring_buffer_event_data(event);
-
-		tracing_generic_entry_update(ent, flags, pc);
-		ent->type = type;
-	}
+	if (event != NULL)
+		trace_event_setup(event, type, flags, pc);
 
 	return event;
 }
 
+DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
+DEFINE_PER_CPU(int, trace_buffered_event_cnt);
+static int trace_buffered_event_ref;
+
+/**
+ * trace_buffered_event_enable - enable buffering events
+ *
+ * When events are being filtered, it is quicker to use a temporary
+ * buffer to write the event data into if there's a likely chance
+ * that it will not be committed. The discard of the ring buffer
+ * is not as fast as committing, and is much slower than copying
+ * a commit.
+ *
+ * When an event is to be filtered, allocate per cpu buffers to
+ * write the event data into, and if the event is filtered and discarded
+ * it is simply dropped, otherwise, the entire data is to be committed
+ * in one shot.
+ */
+void trace_buffered_event_enable(void)
+{
+	struct ring_buffer_event *event;
+	struct page *page;
+	int cpu;
+
+	WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
+
+	if (trace_buffered_event_ref++)
+		return;
+
+	for_each_tracing_cpu(cpu) {
+		page = alloc_pages_node(cpu_to_node(cpu),
+					GFP_KERNEL | __GFP_NORETRY, 0);
+		if (!page)
+			goto failed;
+
+		event = page_address(page);
+		memset(event, 0, sizeof(*event));
+
+		per_cpu(trace_buffered_event, cpu) = event;
+
+		preempt_disable();
+		if (cpu == smp_processor_id() &&
+		    this_cpu_read(trace_buffered_event) !=
+		    per_cpu(trace_buffered_event, cpu))
+			WARN_ON_ONCE(1);
+		preempt_enable();
+	}
+
+	return;
+ failed:
+	trace_buffered_event_disable();
+}
+
+static void enable_trace_buffered_event(void *data)
+{
+	/* Probably not needed, but do it anyway */
+	smp_rmb();
+	this_cpu_dec(trace_buffered_event_cnt);
+}
+
+static void disable_trace_buffered_event(void *data)
+{
+	this_cpu_inc(trace_buffered_event_cnt);
+}
+
+/**
+ * trace_buffered_event_disable - disable buffering events
+ *
+ * When a filter is removed, it is faster to not use the buffered
+ * events, and to commit directly into the ring buffer. Free up
+ * the temp buffers when there are no more users. This requires
+ * special synchronization with current events.
+ */
+void trace_buffered_event_disable(void)
+{
+	int cpu;
+
+	WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
+
+	if (WARN_ON_ONCE(!trace_buffered_event_ref))
+		return;
+
+	if (--trace_buffered_event_ref)
+		return;
+
+	preempt_disable();
+	/* For each CPU, set the buffer as used. */
+	smp_call_function_many(tracing_buffer_mask,
+			       disable_trace_buffered_event, NULL, 1);
+	preempt_enable();
+
+	/* Wait for all current users to finish */
+	synchronize_sched();
+
+	for_each_tracing_cpu(cpu) {
+		free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
+		per_cpu(trace_buffered_event, cpu) = NULL;
+	}
+	/*
+	 * Make sure trace_buffered_event is NULL before clearing
+	 * trace_buffered_event_cnt.
+	 */
+	smp_wmb();
+
+	preempt_disable();
+	/* Do the work on each cpu */
+	smp_call_function_many(tracing_buffer_mask,
+			       enable_trace_buffered_event, NULL, 1);
+	preempt_enable();
+}
+
 void
 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
 {
 	__this_cpu_write(trace_cmdline_save, true);
-	ring_buffer_unlock_commit(buffer, event);
-}
 
-void trace_buffer_unlock_commit(struct trace_array *tr,
-				struct ring_buffer *buffer,
-				struct ring_buffer_event *event,
-				unsigned long flags, int pc)
-{
-	__buffer_unlock_commit(buffer, event);
-
-	ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
-	ftrace_trace_userstack(buffer, flags, pc);
+	/* If this is the temp buffer, we need to commit fully */
+	if (this_cpu_read(trace_buffered_event) == event) {
+		/* Length is in event->array[0] */
+		ring_buffer_write(buffer, event->array[0], &event->array[1]);
+		/* Release the temp buffer */
+		this_cpu_dec(trace_buffered_event_cnt);
+	} else
+		ring_buffer_unlock_commit(buffer, event);
 }
-EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
 
 static struct ring_buffer *temp_buffer;
 
@@ -1719,8 +1820,23 @@
 			  unsigned long flags, int pc)
 {
 	struct ring_buffer_event *entry;
+	int val;
 
 	*current_rb = trace_file->tr->trace_buffer.buffer;
+
+	if ((trace_file->flags &
+	     (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
+	    (entry = this_cpu_read(trace_buffered_event))) {
+		/* Try to use the per cpu buffer first */
+		val = this_cpu_inc_return(trace_buffered_event_cnt);
+		if (val == 1) {
+			trace_event_setup(entry, type, flags, pc);
+			entry->array[0] = len;
+			return entry;
+		}
+		this_cpu_dec(trace_buffered_event_cnt);
+	}
+
 	entry = trace_buffer_lock_reserve(*current_rb,
 					 type, len, flags, pc);
 	/*
@@ -1738,17 +1854,6 @@
 }
 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
 
-struct ring_buffer_event *
-trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
-				  int type, unsigned long len,
-				  unsigned long flags, int pc)
-{
-	*current_rb = global_trace.trace_buffer.buffer;
-	return trace_buffer_lock_reserve(*current_rb,
-					 type, len, flags, pc);
-}
-EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
-
 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
 				     struct ring_buffer *buffer,
 				     struct ring_buffer_event *event,
@@ -1760,14 +1865,6 @@
 	ftrace_trace_stack(tr, buffer, flags, 0, pc, regs);
 	ftrace_trace_userstack(buffer, flags, pc);
 }
-EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
-
-void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
-					 struct ring_buffer_event *event)
-{
-	ring_buffer_discard_commit(buffer, event);
-}
-EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
 
 void
 trace_function(struct trace_array *tr,
@@ -3571,6 +3668,9 @@
 	if (mask == TRACE_ITER_RECORD_CMD)
 		trace_event_enable_cmd_record(enabled);
 
+	if (mask == TRACE_ITER_EVENT_FORK)
+		trace_event_follow_fork(tr, enabled);
+
 	if (mask == TRACE_ITER_OVERWRITE) {
 		ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
 #ifdef CONFIG_TRACER_MAX_TRACE
@@ -3658,7 +3758,7 @@
 	if (cnt >= sizeof(buf))
 		return -EINVAL;
 
-	if (copy_from_user(&buf, ubuf, cnt))
+	if (copy_from_user(buf, ubuf, cnt))
 		return -EFAULT;
 
 	buf[cnt] = 0;
@@ -3804,12 +3904,19 @@
 	"\t   trigger: traceon, traceoff\n"
 	"\t            enable_event:<system>:<event>\n"
 	"\t            disable_event:<system>:<event>\n"
+#ifdef CONFIG_HIST_TRIGGERS
+	"\t            enable_hist:<system>:<event>\n"
+	"\t            disable_hist:<system>:<event>\n"
+#endif
 #ifdef CONFIG_STACKTRACE
 	"\t\t    stacktrace\n"
 #endif
 #ifdef CONFIG_TRACER_SNAPSHOT
 	"\t\t    snapshot\n"
 #endif
+#ifdef CONFIG_HIST_TRIGGERS
+	"\t\t    hist (see below)\n"
+#endif
 	"\t   example: echo traceoff > events/block/block_unplug/trigger\n"
 	"\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
 	"\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
@@ -3825,6 +3932,56 @@
 	"\t   To remove a trigger with a count:\n"
 	"\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
 	"\t   Filters can be ignored when removing a trigger.\n"
+#ifdef CONFIG_HIST_TRIGGERS
+	"      hist trigger\t- If set, event hits are aggregated into a hash table\n"
+	"\t    Format: hist:keys=<field1[,field2,...]>\n"
+	"\t            [:values=<field1[,field2,...]>]\n"
+	"\t            [:sort=<field1[,field2,...]>]\n"
+	"\t            [:size=#entries]\n"
+	"\t            [:pause][:continue][:clear]\n"
+	"\t            [:name=histname1]\n"
+	"\t            [if <filter>]\n\n"
+	"\t    When a matching event is hit, an entry is added to a hash\n"
+	"\t    table using the key(s) and value(s) named, and the value of a\n"
+	"\t    sum called 'hitcount' is incremented.  Keys and values\n"
+	"\t    correspond to fields in the event's format description.  Keys\n"
+	"\t    can be any field, or the special string 'stacktrace'.\n"
+	"\t    Compound keys consisting of up to two fields can be specified\n"
+	"\t    by the 'keys' keyword.  Values must correspond to numeric\n"
+	"\t    fields.  Sort keys consisting of up to two fields can be\n"
+	"\t    specified using the 'sort' keyword.  The sort direction can\n"
+	"\t    be modified by appending '.descending' or '.ascending' to a\n"
+	"\t    sort field.  The 'size' parameter can be used to specify more\n"
+	"\t    or fewer than the default 2048 entries for the hashtable size.\n"
+	"\t    If a hist trigger is given a name using the 'name' parameter,\n"
+	"\t    its histogram data will be shared with other triggers of the\n"
+	"\t    same name, and trigger hits will update this common data.\n\n"
+	"\t    Reading the 'hist' file for the event will dump the hash\n"
+	"\t    table in its entirety to stdout.  If there are multiple hist\n"
+	"\t    triggers attached to an event, there will be a table for each\n"
+	"\t    trigger in the output.  The table displayed for a named\n"
+	"\t    trigger will be the same as any other instance having the\n"
+	"\t    same name.  The default format used to display a given field\n"
+	"\t    can be modified by appending any of the following modifiers\n"
+	"\t    to the field name, as applicable:\n\n"
+	"\t            .hex        display a number as a hex value\n"
+	"\t            .sym        display an address as a symbol\n"
+	"\t            .sym-offset display an address as a symbol and offset\n"
+	"\t            .execname   display a common_pid as a program name\n"
+	"\t            .syscall    display a syscall id as a syscall name\n\n"
+	"\t            .log2       display log2 value rather than raw number\n\n"
+	"\t    The 'pause' parameter can be used to pause an existing hist\n"
+	"\t    trigger or to start a hist trigger but not log any events\n"
+	"\t    until told to do so.  'continue' can be used to start or\n"
+	"\t    restart a paused hist trigger.\n\n"
+	"\t    The 'clear' parameter will clear the contents of a running\n"
+	"\t    hist trigger and leave its current paused/active state\n"
+	"\t    unchanged.\n\n"
+	"\t    The enable_hist and disable_hist triggers can be used to\n"
+	"\t    have one event conditionally start and stop another event's\n"
+	"\t    already-attached hist trigger.  The syntax is analagous to\n"
+	"\t    the enable_event and disable_event triggers.\n"
+#endif
 ;
 
 static ssize_t
@@ -4474,7 +4631,7 @@
 	if (cnt > MAX_TRACER_SIZE)
 		cnt = MAX_TRACER_SIZE;
 
-	if (copy_from_user(&buf, ubuf, cnt))
+	if (copy_from_user(buf, ubuf, cnt))
 		return -EFAULT;
 
 	buf[cnt] = 0;
@@ -5264,7 +5421,7 @@
 	if (cnt >= sizeof(buf))
 		return -EINVAL;
 
-	if (copy_from_user(&buf, ubuf, cnt))
+	if (copy_from_user(buf, ubuf, cnt))
 		return -EFAULT;
 
 	buf[cnt] = 0;
@@ -6650,7 +6807,7 @@
 	if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
 		goto out_free_tr;
 
-	tr->trace_flags = global_trace.trace_flags;
+	tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
 
 	cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
 
@@ -6724,6 +6881,12 @@
 
 	list_del(&tr->list);
 
+	/* Disable all the flags that were enabled coming in */
+	for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
+		if ((1 << i) & ZEROED_TRACE_FLAGS)
+			set_tracer_flag(tr, 1 << i, 0);
+	}
+
 	tracing_set_nop(tr);
 	event_trace_del_tracer(tr);
 	ftrace_destroy_function_files(tr);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 3fff4ad..5167c36 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -177,9 +177,8 @@
 };
 
 struct trace_pid_list {
-	unsigned int			nr_pids;
-	int				order;
-	pid_t				*pids;
+	int				pid_max;
+	unsigned long			*pids;
 };
 
 /*
@@ -656,6 +655,7 @@
 extern cycle_t ftrace_now(int cpu);
 
 extern void trace_find_cmdline(int pid, char comm[]);
+extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 extern unsigned long ftrace_update_tot_cnt;
@@ -967,6 +967,7 @@
 		C(STOP_ON_FREE,		"disable_on_free"),	\
 		C(IRQ_INFO,		"irq-info"),		\
 		C(MARKERS,		"markers"),		\
+		C(EVENT_FORK,		"event-fork"),		\
 		FUNCTION_FLAGS					\
 		FGRAPH_FLAGS					\
 		STACK_FLAGS					\
@@ -1064,6 +1065,137 @@
 	int				nr_events;
 };
 
+extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
+				     struct ring_buffer *buffer,
+				     struct ring_buffer_event *event);
+
+void trace_buffer_unlock_commit_regs(struct trace_array *tr,
+				     struct ring_buffer *buffer,
+				     struct ring_buffer_event *event,
+				     unsigned long flags, int pc,
+				     struct pt_regs *regs);
+
+static inline void trace_buffer_unlock_commit(struct trace_array *tr,
+					      struct ring_buffer *buffer,
+					      struct ring_buffer_event *event,
+					      unsigned long flags, int pc)
+{
+	trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL);
+}
+
+DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
+DECLARE_PER_CPU(int, trace_buffered_event_cnt);
+void trace_buffered_event_disable(void);
+void trace_buffered_event_enable(void);
+
+static inline void
+__trace_event_discard_commit(struct ring_buffer *buffer,
+			     struct ring_buffer_event *event)
+{
+	if (this_cpu_read(trace_buffered_event) == event) {
+		/* Simply release the temp buffer */
+		this_cpu_dec(trace_buffered_event_cnt);
+		return;
+	}
+	ring_buffer_discard_commit(buffer, event);
+}
+
+/*
+ * Helper function for event_trigger_unlock_commit{_regs}().
+ * If there are event triggers attached to this event that requires
+ * filtering against its fields, then they wil be called as the
+ * entry already holds the field information of the current event.
+ *
+ * It also checks if the event should be discarded or not.
+ * It is to be discarded if the event is soft disabled and the
+ * event was only recorded to process triggers, or if the event
+ * filter is active and this event did not match the filters.
+ *
+ * Returns true if the event is discarded, false otherwise.
+ */
+static inline bool
+__event_trigger_test_discard(struct trace_event_file *file,
+			     struct ring_buffer *buffer,
+			     struct ring_buffer_event *event,
+			     void *entry,
+			     enum event_trigger_type *tt)
+{
+	unsigned long eflags = file->flags;
+
+	if (eflags & EVENT_FILE_FL_TRIGGER_COND)
+		*tt = event_triggers_call(file, entry);
+
+	if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
+	    (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
+	     !filter_match_preds(file->filter, entry))) {
+		__trace_event_discard_commit(buffer, event);
+		return true;
+	}
+
+	return false;
+}
+
+/**
+ * event_trigger_unlock_commit - handle triggers and finish event commit
+ * @file: The file pointer assoctiated to the event
+ * @buffer: The ring buffer that the event is being written to
+ * @event: The event meta data in the ring buffer
+ * @entry: The event itself
+ * @irq_flags: The state of the interrupts at the start of the event
+ * @pc: The state of the preempt count at the start of the event.
+ *
+ * This is a helper function to handle triggers that require data
+ * from the event itself. It also tests the event against filters and
+ * if the event is soft disabled and should be discarded.
+ */
+static inline void
+event_trigger_unlock_commit(struct trace_event_file *file,
+			    struct ring_buffer *buffer,
+			    struct ring_buffer_event *event,
+			    void *entry, unsigned long irq_flags, int pc)
+{
+	enum event_trigger_type tt = ETT_NONE;
+
+	if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
+		trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
+
+	if (tt)
+		event_triggers_post_call(file, tt, entry);
+}
+
+/**
+ * event_trigger_unlock_commit_regs - handle triggers and finish event commit
+ * @file: The file pointer assoctiated to the event
+ * @buffer: The ring buffer that the event is being written to
+ * @event: The event meta data in the ring buffer
+ * @entry: The event itself
+ * @irq_flags: The state of the interrupts at the start of the event
+ * @pc: The state of the preempt count at the start of the event.
+ *
+ * This is a helper function to handle triggers that require data
+ * from the event itself. It also tests the event against filters and
+ * if the event is soft disabled and should be discarded.
+ *
+ * Same as event_trigger_unlock_commit() but calls
+ * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
+ */
+static inline void
+event_trigger_unlock_commit_regs(struct trace_event_file *file,
+				 struct ring_buffer *buffer,
+				 struct ring_buffer_event *event,
+				 void *entry, unsigned long irq_flags, int pc,
+				 struct pt_regs *regs)
+{
+	enum event_trigger_type tt = ETT_NONE;
+
+	if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
+		trace_buffer_unlock_commit_regs(file->tr, buffer, event,
+						irq_flags, pc, regs);
+
+	if (tt)
+		event_triggers_post_call(file, tt, entry);
+}
+
 #define FILTER_PRED_INVALID	((unsigned short)-1)
 #define FILTER_PRED_IS_RIGHT	(1 << 15)
 #define FILTER_PRED_FOLD	(1 << 15)
@@ -1161,6 +1293,15 @@
 extern struct list_head ftrace_events;
 
 extern const struct file_operations event_trigger_fops;
+extern const struct file_operations event_hist_fops;
+
+#ifdef CONFIG_HIST_TRIGGERS
+extern int register_trigger_hist_cmd(void);
+extern int register_trigger_hist_enable_disable_cmds(void);
+#else
+static inline int register_trigger_hist_cmd(void) { return 0; }
+static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
+#endif
 
 extern int register_trigger_cmds(void);
 extern void clear_event_triggers(struct trace_array *tr);
@@ -1174,9 +1315,41 @@
 	char				*filter_str;
 	void				*private_data;
 	bool				paused;
+	bool				paused_tmp;
 	struct list_head		list;
+	char				*name;
+	struct list_head		named_list;
+	struct event_trigger_data	*named_data;
 };
 
+/* Avoid typos */
+#define ENABLE_EVENT_STR	"enable_event"
+#define DISABLE_EVENT_STR	"disable_event"
+#define ENABLE_HIST_STR		"enable_hist"
+#define DISABLE_HIST_STR	"disable_hist"
+
+struct enable_trigger_data {
+	struct trace_event_file		*file;
+	bool				enable;
+	bool				hist;
+};
+
+extern int event_enable_trigger_print(struct seq_file *m,
+				      struct event_trigger_ops *ops,
+				      struct event_trigger_data *data);
+extern void event_enable_trigger_free(struct event_trigger_ops *ops,
+				      struct event_trigger_data *data);
+extern int event_enable_trigger_func(struct event_command *cmd_ops,
+				     struct trace_event_file *file,
+				     char *glob, char *cmd, char *param);
+extern int event_enable_register_trigger(char *glob,
+					 struct event_trigger_ops *ops,
+					 struct event_trigger_data *data,
+					 struct trace_event_file *file);
+extern void event_enable_unregister_trigger(char *glob,
+					    struct event_trigger_ops *ops,
+					    struct event_trigger_data *test,
+					    struct trace_event_file *file);
 extern void trigger_data_free(struct event_trigger_data *data);
 extern int event_trigger_init(struct event_trigger_ops *ops,
 			      struct event_trigger_data *data);
@@ -1189,7 +1362,18 @@
 extern int set_trigger_filter(char *filter_str,
 			      struct event_trigger_data *trigger_data,
 			      struct trace_event_file *file);
+extern struct event_trigger_data *find_named_trigger(const char *name);
+extern bool is_named_trigger(struct event_trigger_data *test);
+extern int save_named_trigger(const char *name,
+			      struct event_trigger_data *data);
+extern void del_named_trigger(struct event_trigger_data *data);
+extern void pause_named_trigger(struct event_trigger_data *data);
+extern void unpause_named_trigger(struct event_trigger_data *data);
+extern void set_named_trigger_data(struct event_trigger_data *data,
+				   struct event_trigger_data *named_data);
 extern int register_event_command(struct event_command *cmd);
+extern int unregister_event_command(struct event_command *cmd);
+extern int register_trigger_hist_enable_disable_cmds(void);
 
 /**
  * struct event_trigger_ops - callbacks for trace event triggers
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 00df25f..562fa69 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -47,6 +47,9 @@
 		if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
 			return -EPERM;
 
+		if (!is_sampling_event(p_event))
+			return 0;
+
 		/*
 		 * We don't allow user space callchains for  function trace
 		 * event, due to issues with page faults while tracing page
@@ -260,42 +263,43 @@
 	tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
 }
 
-void *perf_trace_buf_prepare(int size, unsigned short type,
-			     struct pt_regs **regs, int *rctxp)
+void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
 {
-	struct trace_entry *entry;
-	unsigned long flags;
 	char *raw_data;
-	int pc;
+	int rctx;
 
 	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
 
 	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
-			"perf buffer not large enough"))
+		      "perf buffer not large enough"))
 		return NULL;
 
-	pc = preempt_count();
-
-	*rctxp = perf_swevent_get_recursion_context();
-	if (*rctxp < 0)
+	*rctxp = rctx = perf_swevent_get_recursion_context();
+	if (rctx < 0)
 		return NULL;
 
 	if (regs)
-		*regs = this_cpu_ptr(&__perf_regs[*rctxp]);
-	raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
+		*regs = this_cpu_ptr(&__perf_regs[rctx]);
+	raw_data = this_cpu_ptr(perf_trace_buf[rctx]);
 
 	/* zero the dead bytes from align to not leak stack to user */
 	memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
+	return raw_data;
+}
+EXPORT_SYMBOL_GPL(perf_trace_buf_alloc);
+NOKPROBE_SYMBOL(perf_trace_buf_alloc);
 
-	entry = (struct trace_entry *)raw_data;
+void perf_trace_buf_update(void *record, u16 type)
+{
+	struct trace_entry *entry = record;
+	int pc = preempt_count();
+	unsigned long flags;
+
 	local_save_flags(flags);
 	tracing_generic_entry_update(entry, flags, pc);
 	entry->type = type;
-
-	return raw_data;
 }
-EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
-NOKPROBE_SYMBOL(perf_trace_buf_prepare);
+NOKPROBE_SYMBOL(perf_trace_buf_update);
 
 #ifdef CONFIG_FUNCTION_TRACER
 static void
@@ -316,15 +320,16 @@
 
 	BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
 
+	memset(&regs, 0, sizeof(regs));
 	perf_fetch_caller_regs(&regs);
 
-	entry = perf_trace_buf_prepare(ENTRY_SIZE, TRACE_FN, NULL, &rctx);
+	entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx);
 	if (!entry)
 		return;
 
 	entry->ip = ip;
 	entry->parent_ip = parent_ip;
-	perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
+	perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
 			      1, &regs, head, NULL);
 
 #undef ENTRY_SIZE
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 05ddc08..3d41558 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -15,7 +15,7 @@
 #include <linux/kthread.h>
 #include <linux/tracefs.h>
 #include <linux/uaccess.h>
-#include <linux/bsearch.h>
+#include <linux/vmalloc.h>
 #include <linux/module.h>
 #include <linux/ctype.h>
 #include <linux/sort.h>
@@ -204,6 +204,24 @@
 	}
 }
 
+/*
+ * run-time version of trace_event_get_offsets_<call>() that returns the last
+ * accessible offset of trace fields excluding __dynamic_array bytes
+ */
+int trace_event_get_offsets(struct trace_event_call *call)
+{
+	struct ftrace_event_field *tail;
+	struct list_head *head;
+
+	head = trace_get_fields(call);
+	/*
+	 * head->next points to the last field with the largest offset,
+	 * since it was added last by trace_define_field()
+	 */
+	tail = list_first_entry(head, struct ftrace_event_field, link);
+	return tail->offset + tail->size;
+}
+
 int trace_event_raw_init(struct trace_event_call *call)
 {
 	int id;
@@ -363,6 +381,7 @@
 {
 	struct trace_event_call *call = file->event_call;
 	struct trace_array *tr = file->tr;
+	unsigned long file_flags = file->flags;
 	int ret = 0;
 	int disable;
 
@@ -445,6 +464,15 @@
 		break;
 	}
 
+	/* Enable or disable use of trace_buffered_event */
+	if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) !=
+	    (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) {
+		if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
+			trace_buffered_event_enable();
+		else
+			trace_buffered_event_disable();
+	}
+
 	return ret;
 }
 
@@ -471,24 +499,26 @@
 	mutex_unlock(&event_mutex);
 }
 
-static int cmp_pid(const void *key, const void *elt)
-{
-	const pid_t *search_pid = key;
-	const pid_t *pid = elt;
+/* Shouldn't this be in a header? */
+extern int pid_max;
 
-	if (*search_pid == *pid)
-		return 0;
-	if (*search_pid < *pid)
-		return -1;
-	return 1;
+/* Returns true if found in filter */
+static bool
+find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
+{
+	/*
+	 * If pid_max changed after filtered_pids was created, we
+	 * by default ignore all pids greater than the previous pid_max.
+	 */
+	if (search_pid >= filtered_pids->pid_max)
+		return false;
+
+	return test_bit(search_pid, filtered_pids->pids);
 }
 
 static bool
-check_ignore_pid(struct trace_pid_list *filtered_pids, struct task_struct *task)
+ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
 {
-	pid_t search_pid;
-	pid_t *pid;
-
 	/*
 	 * Return false, because if filtered_pids does not exist,
 	 * all pids are good to trace.
@@ -496,15 +526,68 @@
 	if (!filtered_pids)
 		return false;
 
-	search_pid = task->pid;
+	return !find_filtered_pid(filtered_pids, task->pid);
+}
 
-	pid = bsearch(&search_pid, filtered_pids->pids,
-		      filtered_pids->nr_pids, sizeof(pid_t),
-		      cmp_pid);
-	if (!pid)
-		return true;
+static void filter_add_remove_task(struct trace_pid_list *pid_list,
+				   struct task_struct *self,
+				   struct task_struct *task)
+{
+	if (!pid_list)
+		return;
 
-	return false;
+	/* For forks, we only add if the forking task is listed */
+	if (self) {
+		if (!find_filtered_pid(pid_list, self->pid))
+			return;
+	}
+
+	/* Sorry, but we don't support pid_max changing after setting */
+	if (task->pid >= pid_list->pid_max)
+		return;
+
+	/* "self" is set for forks, and NULL for exits */
+	if (self)
+		set_bit(task->pid, pid_list->pids);
+	else
+		clear_bit(task->pid, pid_list->pids);
+}
+
+static void
+event_filter_pid_sched_process_exit(void *data, struct task_struct *task)
+{
+	struct trace_pid_list *pid_list;
+	struct trace_array *tr = data;
+
+	pid_list = rcu_dereference_sched(tr->filtered_pids);
+	filter_add_remove_task(pid_list, NULL, task);
+}
+
+static void
+event_filter_pid_sched_process_fork(void *data,
+				    struct task_struct *self,
+				    struct task_struct *task)
+{
+	struct trace_pid_list *pid_list;
+	struct trace_array *tr = data;
+
+	pid_list = rcu_dereference_sched(tr->filtered_pids);
+	filter_add_remove_task(pid_list, self, task);
+}
+
+void trace_event_follow_fork(struct trace_array *tr, bool enable)
+{
+	if (enable) {
+		register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork,
+						       tr, INT_MIN);
+		register_trace_prio_sched_process_exit(event_filter_pid_sched_process_exit,
+						       tr, INT_MAX);
+	} else {
+		unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork,
+						    tr);
+		unregister_trace_sched_process_exit(event_filter_pid_sched_process_exit,
+						    tr);
+	}
 }
 
 static void
@@ -517,8 +600,8 @@
 	pid_list = rcu_dereference_sched(tr->filtered_pids);
 
 	this_cpu_write(tr->trace_buffer.data->ignore_pid,
-		       check_ignore_pid(pid_list, prev) &&
-		       check_ignore_pid(pid_list, next));
+		       ignore_this_task(pid_list, prev) &&
+		       ignore_this_task(pid_list, next));
 }
 
 static void
@@ -531,7 +614,7 @@
 	pid_list = rcu_dereference_sched(tr->filtered_pids);
 
 	this_cpu_write(tr->trace_buffer.data->ignore_pid,
-		       check_ignore_pid(pid_list, next));
+		       ignore_this_task(pid_list, next));
 }
 
 static void
@@ -547,7 +630,7 @@
 	pid_list = rcu_dereference_sched(tr->filtered_pids);
 
 	this_cpu_write(tr->trace_buffer.data->ignore_pid,
-		       check_ignore_pid(pid_list, task));
+		       ignore_this_task(pid_list, task));
 }
 
 static void
@@ -564,7 +647,7 @@
 
 	/* Set tracing if current is enabled */
 	this_cpu_write(tr->trace_buffer.data->ignore_pid,
-		       check_ignore_pid(pid_list, current));
+		       ignore_this_task(pid_list, current));
 }
 
 static void __ftrace_clear_event_pids(struct trace_array *tr)
@@ -602,7 +685,7 @@
 	/* Wait till all users are no longer using pid filtering */
 	synchronize_sched();
 
-	free_pages((unsigned long)pid_list->pids, pid_list->order);
+	vfree(pid_list->pids);
 	kfree(pid_list);
 }
 
@@ -946,11 +1029,32 @@
 	mutex_unlock(&event_mutex);
 }
 
+static void *
+p_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	struct trace_array *tr = m->private;
+	struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids);
+	unsigned long pid = (unsigned long)v;
+
+	(*pos)++;
+
+	/* pid already is +1 of the actual prevous bit */
+	pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
+
+	/* Return pid + 1 to allow zero to be represented */
+	if (pid < pid_list->pid_max)
+		return (void *)(pid + 1);
+
+	return NULL;
+}
+
 static void *p_start(struct seq_file *m, loff_t *pos)
 	__acquires(RCU)
 {
 	struct trace_pid_list *pid_list;
 	struct trace_array *tr = m->private;
+	unsigned long pid;
+	loff_t l = 0;
 
 	/*
 	 * Grab the mutex, to keep calls to p_next() having the same
@@ -963,10 +1067,18 @@
 
 	pid_list = rcu_dereference_sched(tr->filtered_pids);
 
-	if (!pid_list || *pos >= pid_list->nr_pids)
+	if (!pid_list)
 		return NULL;
 
-	return (void *)&pid_list->pids[*pos];
+	pid = find_first_bit(pid_list->pids, pid_list->pid_max);
+	if (pid >= pid_list->pid_max)
+		return NULL;
+
+	/* Return pid + 1 so that zero can be the exit value */
+	for (pid++; pid && l < *pos;
+	     pid = (unsigned long)p_next(m, (void *)pid, &l))
+		;
+	return (void *)pid;
 }
 
 static void p_stop(struct seq_file *m, void *p)
@@ -976,25 +1088,11 @@
 	mutex_unlock(&event_mutex);
 }
 
-static void *
-p_next(struct seq_file *m, void *v, loff_t *pos)
-{
-	struct trace_array *tr = m->private;
-	struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids);
-
-	(*pos)++;
-
-	if (*pos >= pid_list->nr_pids)
-		return NULL;
-
-	return (void *)&pid_list->pids[*pos];
-}
-
 static int p_show(struct seq_file *m, void *v)
 {
-	pid_t *pid = v;
+	unsigned long pid = (unsigned long)v - 1;
 
-	seq_printf(m, "%d\n", *pid);
+	seq_printf(m, "%lu\n", pid);
 	return 0;
 }
 
@@ -1543,11 +1641,6 @@
 	return r;
 }
 
-static int max_pids(struct trace_pid_list *pid_list)
-{
-	return (PAGE_SIZE << pid_list->order) / sizeof(pid_t);
-}
-
 static void ignore_task_cpu(void *data)
 {
 	struct trace_array *tr = data;
@@ -1561,7 +1654,7 @@
 					     mutex_is_locked(&event_mutex));
 
 	this_cpu_write(tr->trace_buffer.data->ignore_pid,
-		       check_ignore_pid(pid_list, current));
+		       ignore_this_task(pid_list, current));
 }
 
 static ssize_t
@@ -1571,7 +1664,7 @@
 	struct seq_file *m = filp->private_data;
 	struct trace_array *tr = m->private;
 	struct trace_pid_list *filtered_pids = NULL;
-	struct trace_pid_list *pid_list = NULL;
+	struct trace_pid_list *pid_list;
 	struct trace_event_file *file;
 	struct trace_parser parser;
 	unsigned long val;
@@ -1579,7 +1672,7 @@
 	ssize_t read = 0;
 	ssize_t ret = 0;
 	pid_t pid;
-	int i;
+	int nr_pids = 0;
 
 	if (!cnt)
 		return 0;
@@ -1592,10 +1685,43 @@
 		return -ENOMEM;
 
 	mutex_lock(&event_mutex);
+	filtered_pids = rcu_dereference_protected(tr->filtered_pids,
+					     lockdep_is_held(&event_mutex));
+
 	/*
-	 * Load as many pids into the array before doing a
-	 * swap from the tr->filtered_pids to the new list.
+	 * Always recreate a new array. The write is an all or nothing
+	 * operation. Always create a new array when adding new pids by
+	 * the user. If the operation fails, then the current list is
+	 * not modified.
 	 */
+	pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
+	if (!pid_list) {
+		read = -ENOMEM;
+		goto out;
+	}
+	pid_list->pid_max = READ_ONCE(pid_max);
+	/* Only truncating will shrink pid_max */
+	if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
+		pid_list->pid_max = filtered_pids->pid_max;
+	pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
+	if (!pid_list->pids) {
+		kfree(pid_list);
+		read = -ENOMEM;
+		goto out;
+	}
+	if (filtered_pids) {
+		/* copy the current bits to the new max */
+		pid = find_first_bit(filtered_pids->pids,
+				     filtered_pids->pid_max);
+		while (pid < filtered_pids->pid_max) {
+			set_bit(pid, pid_list->pids);
+			pid = find_next_bit(filtered_pids->pids,
+					    filtered_pids->pid_max,
+					    pid + 1);
+			nr_pids++;
+		}
+	}
+
 	while (cnt > 0) {
 
 		this_pos = 0;
@@ -1613,92 +1739,35 @@
 		ret = -EINVAL;
 		if (kstrtoul(parser.buffer, 0, &val))
 			break;
-		if (val > INT_MAX)
+		if (val >= pid_list->pid_max)
 			break;
 
 		pid = (pid_t)val;
 
-		ret = -ENOMEM;
-		if (!pid_list) {
-			pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
-			if (!pid_list)
-				break;
+		set_bit(pid, pid_list->pids);
+		nr_pids++;
 
-			filtered_pids = rcu_dereference_protected(tr->filtered_pids,
-							lockdep_is_held(&event_mutex));
-			if (filtered_pids)
-				pid_list->order = filtered_pids->order;
-			else
-				pid_list->order = 0;
-
-			pid_list->pids = (void *)__get_free_pages(GFP_KERNEL,
-								  pid_list->order);
-			if (!pid_list->pids)
-				break;
-
-			if (filtered_pids) {
-				pid_list->nr_pids = filtered_pids->nr_pids;
-				memcpy(pid_list->pids, filtered_pids->pids,
-				       pid_list->nr_pids * sizeof(pid_t));
-			} else
-				pid_list->nr_pids = 0;
-		}
-
-		if (pid_list->nr_pids >= max_pids(pid_list)) {
-			pid_t *pid_page;
-
-			pid_page = (void *)__get_free_pages(GFP_KERNEL,
-							    pid_list->order + 1);
-			if (!pid_page)
-				break;
-			memcpy(pid_page, pid_list->pids,
-			       pid_list->nr_pids * sizeof(pid_t));
-			free_pages((unsigned long)pid_list->pids, pid_list->order);
-
-			pid_list->order++;
-			pid_list->pids = pid_page;
-		}
-
-		pid_list->pids[pid_list->nr_pids++] = pid;
 		trace_parser_clear(&parser);
 		ret = 0;
 	}
 	trace_parser_put(&parser);
 
 	if (ret < 0) {
-		if (pid_list)
-			free_pages((unsigned long)pid_list->pids, pid_list->order);
+		vfree(pid_list->pids);
 		kfree(pid_list);
-		mutex_unlock(&event_mutex);
-		return ret;
+		read = ret;
+		goto out;
 	}
 
-	if (!pid_list) {
-		mutex_unlock(&event_mutex);
-		return ret;
+	if (!nr_pids) {
+		/* Cleared the list of pids */
+		vfree(pid_list->pids);
+		kfree(pid_list);
+		read = ret;
+		if (!filtered_pids)
+			goto out;
+		pid_list = NULL;
 	}
-
-	sort(pid_list->pids, pid_list->nr_pids, sizeof(pid_t), cmp_pid, NULL);
-
-	/* Remove duplicates */
-	for (i = 1; i < pid_list->nr_pids; i++) {
-		int start = i;
-
-		while (i < pid_list->nr_pids &&
-		       pid_list->pids[i - 1] == pid_list->pids[i])
-			i++;
-
-		if (start != i) {
-			if (i < pid_list->nr_pids) {
-				memmove(&pid_list->pids[start], &pid_list->pids[i],
-					(pid_list->nr_pids - i) * sizeof(pid_t));
-				pid_list->nr_pids -= i - start;
-				i = start;
-			} else
-				pid_list->nr_pids = start;
-		}
-	}
-
 	rcu_assign_pointer(tr->filtered_pids, pid_list);
 
 	list_for_each_entry(file, &tr->events, list) {
@@ -1708,7 +1777,7 @@
 	if (filtered_pids) {
 		synchronize_sched();
 
-		free_pages((unsigned long)filtered_pids->pids, filtered_pids->order);
+		vfree(filtered_pids->pids);
 		kfree(filtered_pids);
 	} else {
 		/*
@@ -1745,10 +1814,12 @@
 	 */
 	on_each_cpu(ignore_task_cpu, tr, 1);
 
+ out:
 	mutex_unlock(&event_mutex);
 
 	ret = read;
-	*ppos += read;
+	if (read > 0)
+		*ppos += read;
 
 	return ret;
 }
@@ -2095,9 +2166,18 @@
 	trace_create_file("filter", 0644, file->dir, file,
 			  &ftrace_event_filter_fops);
 
-	trace_create_file("trigger", 0644, file->dir, file,
-			  &event_trigger_fops);
+	/*
+	 * Only event directories that can be enabled should have
+	 * triggers.
+	 */
+	if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
+		trace_create_file("trigger", 0644, file->dir, file,
+				  &event_trigger_fops);
 
+#ifdef CONFIG_HIST_TRIGGERS
+	trace_create_file("hist", 0444, file->dir, file,
+			  &event_hist_fops);
+#endif
 	trace_create_file("format", 0444, file->dir, call,
 			  &ftrace_event_format_fops);
 
@@ -3345,7 +3425,7 @@
 
 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
 
-static struct trace_array *event_tr;
+static struct trace_event_file event_trace_file __initdata;
 
 static void __init
 function_test_events_call(unsigned long ip, unsigned long parent_ip,
@@ -3369,17 +3449,17 @@
 
 	local_save_flags(flags);
 
-	event = trace_current_buffer_lock_reserve(&buffer,
-						  TRACE_FN, sizeof(*entry),
-						  flags, pc);
+	event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file,
+						TRACE_FN, sizeof(*entry),
+						flags, pc);
 	if (!event)
 		goto out;
 	entry	= ring_buffer_event_data(event);
 	entry->ip			= ip;
 	entry->parent_ip		= parent_ip;
 
-	trace_buffer_unlock_commit(event_tr, buffer, event, flags, pc);
-
+	event_trigger_unlock_commit(&event_trace_file, buffer, event,
+				    entry, flags, pc);
  out:
 	atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
 	preempt_enable_notrace();
@@ -3394,9 +3474,11 @@
 static __init void event_trace_self_test_with_function(void)
 {
 	int ret;
-	event_tr = top_trace_array();
-	if (WARN_ON(!event_tr))
+
+	event_trace_file.tr = top_trace_array();
+	if (WARN_ON(!event_trace_file.tr))
 		return;
+
 	ret = register_ftrace_function(&trace_ops);
 	if (WARN_ON(ret < 0)) {
 		pr_info("Failed to enable function tracer for event tests\n");
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index b3f5051..9daa9b3 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -689,10 +689,7 @@
 
 static inline struct event_filter *event_filter(struct trace_event_file *file)
 {
-	if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
-		return file->event_call->filter;
-	else
-		return file->filter;
+	return file->filter;
 }
 
 /* caller must hold event_mutex */
@@ -826,12 +823,12 @@
 
 static void filter_disable(struct trace_event_file *file)
 {
-	struct trace_event_call *call = file->event_call;
+	unsigned long old_flags = file->flags;
 
-	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
-		call->flags &= ~TRACE_EVENT_FL_FILTERED;
-	else
-		file->flags &= ~EVENT_FILE_FL_FILTERED;
+	file->flags &= ~EVENT_FILE_FL_FILTERED;
+
+	if (old_flags != file->flags)
+		trace_buffered_event_disable();
 }
 
 static void __free_filter(struct event_filter *filter)
@@ -883,13 +880,8 @@
 
 static inline void __remove_filter(struct trace_event_file *file)
 {
-	struct trace_event_call *call = file->event_call;
-
 	filter_disable(file);
-	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
-		remove_filter_string(call->filter);
-	else
-		remove_filter_string(file->filter);
+	remove_filter_string(file->filter);
 }
 
 static void filter_free_subsystem_preds(struct trace_subsystem_dir *dir,
@@ -906,15 +898,8 @@
 
 static inline void __free_subsystem_filter(struct trace_event_file *file)
 {
-	struct trace_event_call *call = file->event_call;
-
-	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) {
-		__free_filter(call->filter);
-		call->filter = NULL;
-	} else {
-		__free_filter(file->filter);
-		file->filter = NULL;
-	}
+	__free_filter(file->filter);
+	file->filter = NULL;
 }
 
 static void filter_free_subsystem_filters(struct trace_subsystem_dir *dir,
@@ -1718,69 +1703,43 @@
 
 static inline void event_set_filtered_flag(struct trace_event_file *file)
 {
-	struct trace_event_call *call = file->event_call;
+	unsigned long old_flags = file->flags;
 
-	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
-		call->flags |= TRACE_EVENT_FL_FILTERED;
-	else
-		file->flags |= EVENT_FILE_FL_FILTERED;
+	file->flags |= EVENT_FILE_FL_FILTERED;
+
+	if (old_flags != file->flags)
+		trace_buffered_event_enable();
 }
 
 static inline void event_set_filter(struct trace_event_file *file,
 				    struct event_filter *filter)
 {
-	struct trace_event_call *call = file->event_call;
-
-	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
-		rcu_assign_pointer(call->filter, filter);
-	else
-		rcu_assign_pointer(file->filter, filter);
+	rcu_assign_pointer(file->filter, filter);
 }
 
 static inline void event_clear_filter(struct trace_event_file *file)
 {
-	struct trace_event_call *call = file->event_call;
-
-	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
-		RCU_INIT_POINTER(call->filter, NULL);
-	else
-		RCU_INIT_POINTER(file->filter, NULL);
+	RCU_INIT_POINTER(file->filter, NULL);
 }
 
 static inline void
 event_set_no_set_filter_flag(struct trace_event_file *file)
 {
-	struct trace_event_call *call = file->event_call;
-
-	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
-		call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
-	else
-		file->flags |= EVENT_FILE_FL_NO_SET_FILTER;
+	file->flags |= EVENT_FILE_FL_NO_SET_FILTER;
 }
 
 static inline void
 event_clear_no_set_filter_flag(struct trace_event_file *file)
 {
-	struct trace_event_call *call = file->event_call;
-
-	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
-		call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
-	else
-		file->flags &= ~EVENT_FILE_FL_NO_SET_FILTER;
+	file->flags &= ~EVENT_FILE_FL_NO_SET_FILTER;
 }
 
 static inline bool
 event_no_set_filter_flag(struct trace_event_file *file)
 {
-	struct trace_event_call *call = file->event_call;
-
 	if (file->flags & EVENT_FILE_FL_NO_SET_FILTER)
 		return true;
 
-	if ((call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) &&
-	    (call->flags & TRACE_EVENT_FL_NO_SET_FILTER))
-		return true;
-
 	return false;
 }
 
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
new file mode 100644
index 0000000..0c05b8a
--- /dev/null
+++ b/kernel/trace/trace_events_hist.c
@@ -0,0 +1,1755 @@
+/*
+ * trace_events_hist - trace event hist triggers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/stacktrace.h>
+
+#include "tracing_map.h"
+#include "trace.h"
+
+struct hist_field;
+
+typedef u64 (*hist_field_fn_t) (struct hist_field *field, void *event);
+
+struct hist_field {
+	struct ftrace_event_field	*field;
+	unsigned long			flags;
+	hist_field_fn_t			fn;
+	unsigned int			size;
+	unsigned int			offset;
+};
+
+static u64 hist_field_none(struct hist_field *field, void *event)
+{
+	return 0;
+}
+
+static u64 hist_field_counter(struct hist_field *field, void *event)
+{
+	return 1;
+}
+
+static u64 hist_field_string(struct hist_field *hist_field, void *event)
+{
+	char *addr = (char *)(event + hist_field->field->offset);
+
+	return (u64)(unsigned long)addr;
+}
+
+static u64 hist_field_dynstring(struct hist_field *hist_field, void *event)
+{
+	u32 str_item = *(u32 *)(event + hist_field->field->offset);
+	int str_loc = str_item & 0xffff;
+	char *addr = (char *)(event + str_loc);
+
+	return (u64)(unsigned long)addr;
+}
+
+static u64 hist_field_pstring(struct hist_field *hist_field, void *event)
+{
+	char **addr = (char **)(event + hist_field->field->offset);
+
+	return (u64)(unsigned long)*addr;
+}
+
+static u64 hist_field_log2(struct hist_field *hist_field, void *event)
+{
+	u64 val = *(u64 *)(event + hist_field->field->offset);
+
+	return (u64) ilog2(roundup_pow_of_two(val));
+}
+
+#define DEFINE_HIST_FIELD_FN(type)					\
+static u64 hist_field_##type(struct hist_field *hist_field, void *event)\
+{									\
+	type *addr = (type *)(event + hist_field->field->offset);	\
+									\
+	return (u64)(unsigned long)*addr;				\
+}
+
+DEFINE_HIST_FIELD_FN(s64);
+DEFINE_HIST_FIELD_FN(u64);
+DEFINE_HIST_FIELD_FN(s32);
+DEFINE_HIST_FIELD_FN(u32);
+DEFINE_HIST_FIELD_FN(s16);
+DEFINE_HIST_FIELD_FN(u16);
+DEFINE_HIST_FIELD_FN(s8);
+DEFINE_HIST_FIELD_FN(u8);
+
+#define for_each_hist_field(i, hist_data)	\
+	for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
+
+#define for_each_hist_val_field(i, hist_data)	\
+	for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
+
+#define for_each_hist_key_field(i, hist_data)	\
+	for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
+
+#define HIST_STACKTRACE_DEPTH	16
+#define HIST_STACKTRACE_SIZE	(HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
+#define HIST_STACKTRACE_SKIP	5
+
+#define HITCOUNT_IDX		0
+#define HIST_KEY_SIZE_MAX	(MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
+
+enum hist_field_flags {
+	HIST_FIELD_FL_HITCOUNT		= 1,
+	HIST_FIELD_FL_KEY		= 2,
+	HIST_FIELD_FL_STRING		= 4,
+	HIST_FIELD_FL_HEX		= 8,
+	HIST_FIELD_FL_SYM		= 16,
+	HIST_FIELD_FL_SYM_OFFSET	= 32,
+	HIST_FIELD_FL_EXECNAME		= 64,
+	HIST_FIELD_FL_SYSCALL		= 128,
+	HIST_FIELD_FL_STACKTRACE	= 256,
+	HIST_FIELD_FL_LOG2		= 512,
+};
+
+struct hist_trigger_attrs {
+	char		*keys_str;
+	char		*vals_str;
+	char		*sort_key_str;
+	char		*name;
+	bool		pause;
+	bool		cont;
+	bool		clear;
+	unsigned int	map_bits;
+};
+
+struct hist_trigger_data {
+	struct hist_field               *fields[TRACING_MAP_FIELDS_MAX];
+	unsigned int			n_vals;
+	unsigned int			n_keys;
+	unsigned int			n_fields;
+	unsigned int			key_size;
+	struct tracing_map_sort_key	sort_keys[TRACING_MAP_SORT_KEYS_MAX];
+	unsigned int			n_sort_keys;
+	struct trace_event_file		*event_file;
+	struct hist_trigger_attrs	*attrs;
+	struct tracing_map		*map;
+};
+
+static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
+{
+	hist_field_fn_t fn = NULL;
+
+	switch (field_size) {
+	case 8:
+		if (field_is_signed)
+			fn = hist_field_s64;
+		else
+			fn = hist_field_u64;
+		break;
+	case 4:
+		if (field_is_signed)
+			fn = hist_field_s32;
+		else
+			fn = hist_field_u32;
+		break;
+	case 2:
+		if (field_is_signed)
+			fn = hist_field_s16;
+		else
+			fn = hist_field_u16;
+		break;
+	case 1:
+		if (field_is_signed)
+			fn = hist_field_s8;
+		else
+			fn = hist_field_u8;
+		break;
+	}
+
+	return fn;
+}
+
+static int parse_map_size(char *str)
+{
+	unsigned long size, map_bits;
+	int ret;
+
+	strsep(&str, "=");
+	if (!str) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = kstrtoul(str, 0, &size);
+	if (ret)
+		goto out;
+
+	map_bits = ilog2(roundup_pow_of_two(size));
+	if (map_bits < TRACING_MAP_BITS_MIN ||
+	    map_bits > TRACING_MAP_BITS_MAX)
+		ret = -EINVAL;
+	else
+		ret = map_bits;
+ out:
+	return ret;
+}
+
+static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
+{
+	if (!attrs)
+		return;
+
+	kfree(attrs->name);
+	kfree(attrs->sort_key_str);
+	kfree(attrs->keys_str);
+	kfree(attrs->vals_str);
+	kfree(attrs);
+}
+
+static struct hist_trigger_attrs *parse_hist_trigger_attrs(char *trigger_str)
+{
+	struct hist_trigger_attrs *attrs;
+	int ret = 0;
+
+	attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
+	if (!attrs)
+		return ERR_PTR(-ENOMEM);
+
+	while (trigger_str) {
+		char *str = strsep(&trigger_str, ":");
+
+		if ((strncmp(str, "key=", strlen("key=")) == 0) ||
+		    (strncmp(str, "keys=", strlen("keys=")) == 0))
+			attrs->keys_str = kstrdup(str, GFP_KERNEL);
+		else if ((strncmp(str, "val=", strlen("val=")) == 0) ||
+			 (strncmp(str, "vals=", strlen("vals=")) == 0) ||
+			 (strncmp(str, "values=", strlen("values=")) == 0))
+			attrs->vals_str = kstrdup(str, GFP_KERNEL);
+		else if (strncmp(str, "sort=", strlen("sort=")) == 0)
+			attrs->sort_key_str = kstrdup(str, GFP_KERNEL);
+		else if (strncmp(str, "name=", strlen("name=")) == 0)
+			attrs->name = kstrdup(str, GFP_KERNEL);
+		else if (strcmp(str, "pause") == 0)
+			attrs->pause = true;
+		else if ((strcmp(str, "cont") == 0) ||
+			 (strcmp(str, "continue") == 0))
+			attrs->cont = true;
+		else if (strcmp(str, "clear") == 0)
+			attrs->clear = true;
+		else if (strncmp(str, "size=", strlen("size=")) == 0) {
+			int map_bits = parse_map_size(str);
+
+			if (map_bits < 0) {
+				ret = map_bits;
+				goto free;
+			}
+			attrs->map_bits = map_bits;
+		} else {
+			ret = -EINVAL;
+			goto free;
+		}
+	}
+
+	if (!attrs->keys_str) {
+		ret = -EINVAL;
+		goto free;
+	}
+
+	return attrs;
+ free:
+	destroy_hist_trigger_attrs(attrs);
+
+	return ERR_PTR(ret);
+}
+
+static inline void save_comm(char *comm, struct task_struct *task)
+{
+	if (!task->pid) {
+		strcpy(comm, "<idle>");
+		return;
+	}
+
+	if (WARN_ON_ONCE(task->pid < 0)) {
+		strcpy(comm, "<XXX>");
+		return;
+	}
+
+	memcpy(comm, task->comm, TASK_COMM_LEN);
+}
+
+static void hist_trigger_elt_comm_free(struct tracing_map_elt *elt)
+{
+	kfree((char *)elt->private_data);
+}
+
+static int hist_trigger_elt_comm_alloc(struct tracing_map_elt *elt)
+{
+	struct hist_trigger_data *hist_data = elt->map->private_data;
+	struct hist_field *key_field;
+	unsigned int i;
+
+	for_each_hist_key_field(i, hist_data) {
+		key_field = hist_data->fields[i];
+
+		if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
+			unsigned int size = TASK_COMM_LEN + 1;
+
+			elt->private_data = kzalloc(size, GFP_KERNEL);
+			if (!elt->private_data)
+				return -ENOMEM;
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static void hist_trigger_elt_comm_copy(struct tracing_map_elt *to,
+				       struct tracing_map_elt *from)
+{
+	char *comm_from = from->private_data;
+	char *comm_to = to->private_data;
+
+	if (comm_from)
+		memcpy(comm_to, comm_from, TASK_COMM_LEN + 1);
+}
+
+static void hist_trigger_elt_comm_init(struct tracing_map_elt *elt)
+{
+	char *comm = elt->private_data;
+
+	if (comm)
+		save_comm(comm, current);
+}
+
+static const struct tracing_map_ops hist_trigger_elt_comm_ops = {
+	.elt_alloc	= hist_trigger_elt_comm_alloc,
+	.elt_copy	= hist_trigger_elt_comm_copy,
+	.elt_free	= hist_trigger_elt_comm_free,
+	.elt_init	= hist_trigger_elt_comm_init,
+};
+
+static void destroy_hist_field(struct hist_field *hist_field)
+{
+	kfree(hist_field);
+}
+
+static struct hist_field *create_hist_field(struct ftrace_event_field *field,
+					    unsigned long flags)
+{
+	struct hist_field *hist_field;
+
+	if (field && is_function_field(field))
+		return NULL;
+
+	hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
+	if (!hist_field)
+		return NULL;
+
+	if (flags & HIST_FIELD_FL_HITCOUNT) {
+		hist_field->fn = hist_field_counter;
+		goto out;
+	}
+
+	if (flags & HIST_FIELD_FL_STACKTRACE) {
+		hist_field->fn = hist_field_none;
+		goto out;
+	}
+
+	if (flags & HIST_FIELD_FL_LOG2) {
+		hist_field->fn = hist_field_log2;
+		goto out;
+	}
+
+	if (WARN_ON_ONCE(!field))
+		goto out;
+
+	if (is_string_field(field)) {
+		flags |= HIST_FIELD_FL_STRING;
+
+		if (field->filter_type == FILTER_STATIC_STRING)
+			hist_field->fn = hist_field_string;
+		else if (field->filter_type == FILTER_DYN_STRING)
+			hist_field->fn = hist_field_dynstring;
+		else
+			hist_field->fn = hist_field_pstring;
+	} else {
+		hist_field->fn = select_value_fn(field->size,
+						 field->is_signed);
+		if (!hist_field->fn) {
+			destroy_hist_field(hist_field);
+			return NULL;
+		}
+	}
+ out:
+	hist_field->field = field;
+	hist_field->flags = flags;
+
+	return hist_field;
+}
+
+static void destroy_hist_fields(struct hist_trigger_data *hist_data)
+{
+	unsigned int i;
+
+	for (i = 0; i < TRACING_MAP_FIELDS_MAX; i++) {
+		if (hist_data->fields[i]) {
+			destroy_hist_field(hist_data->fields[i]);
+			hist_data->fields[i] = NULL;
+		}
+	}
+}
+
+static int create_hitcount_val(struct hist_trigger_data *hist_data)
+{
+	hist_data->fields[HITCOUNT_IDX] =
+		create_hist_field(NULL, HIST_FIELD_FL_HITCOUNT);
+	if (!hist_data->fields[HITCOUNT_IDX])
+		return -ENOMEM;
+
+	hist_data->n_vals++;
+
+	if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int create_val_field(struct hist_trigger_data *hist_data,
+			    unsigned int val_idx,
+			    struct trace_event_file *file,
+			    char *field_str)
+{
+	struct ftrace_event_field *field = NULL;
+	unsigned long flags = 0;
+	char *field_name;
+	int ret = 0;
+
+	if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
+		return -EINVAL;
+
+	field_name = strsep(&field_str, ".");
+	if (field_str) {
+		if (strcmp(field_str, "hex") == 0)
+			flags |= HIST_FIELD_FL_HEX;
+		else {
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	field = trace_find_event_field(file->event_call, field_name);
+	if (!field) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	hist_data->fields[val_idx] = create_hist_field(field, flags);
+	if (!hist_data->fields[val_idx]) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	++hist_data->n_vals;
+
+	if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
+		ret = -EINVAL;
+ out:
+	return ret;
+}
+
+static int create_val_fields(struct hist_trigger_data *hist_data,
+			     struct trace_event_file *file)
+{
+	char *fields_str, *field_str;
+	unsigned int i, j;
+	int ret;
+
+	ret = create_hitcount_val(hist_data);
+	if (ret)
+		goto out;
+
+	fields_str = hist_data->attrs->vals_str;
+	if (!fields_str)
+		goto out;
+
+	strsep(&fields_str, "=");
+	if (!fields_str)
+		goto out;
+
+	for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
+		     j < TRACING_MAP_VALS_MAX; i++) {
+		field_str = strsep(&fields_str, ",");
+		if (!field_str)
+			break;
+		if (strcmp(field_str, "hitcount") == 0)
+			continue;
+		ret = create_val_field(hist_data, j++, file, field_str);
+		if (ret)
+			goto out;
+	}
+	if (fields_str && (strcmp(fields_str, "hitcount") != 0))
+		ret = -EINVAL;
+ out:
+	return ret;
+}
+
+static int create_key_field(struct hist_trigger_data *hist_data,
+			    unsigned int key_idx,
+			    unsigned int key_offset,
+			    struct trace_event_file *file,
+			    char *field_str)
+{
+	struct ftrace_event_field *field = NULL;
+	unsigned long flags = 0;
+	unsigned int key_size;
+	int ret = 0;
+
+	if (WARN_ON(key_idx >= TRACING_MAP_FIELDS_MAX))
+		return -EINVAL;
+
+	flags |= HIST_FIELD_FL_KEY;
+
+	if (strcmp(field_str, "stacktrace") == 0) {
+		flags |= HIST_FIELD_FL_STACKTRACE;
+		key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
+	} else {
+		char *field_name = strsep(&field_str, ".");
+
+		if (field_str) {
+			if (strcmp(field_str, "hex") == 0)
+				flags |= HIST_FIELD_FL_HEX;
+			else if (strcmp(field_str, "sym") == 0)
+				flags |= HIST_FIELD_FL_SYM;
+			else if (strcmp(field_str, "sym-offset") == 0)
+				flags |= HIST_FIELD_FL_SYM_OFFSET;
+			else if ((strcmp(field_str, "execname") == 0) &&
+				 (strcmp(field_name, "common_pid") == 0))
+				flags |= HIST_FIELD_FL_EXECNAME;
+			else if (strcmp(field_str, "syscall") == 0)
+				flags |= HIST_FIELD_FL_SYSCALL;
+			else if (strcmp(field_str, "log2") == 0)
+				flags |= HIST_FIELD_FL_LOG2;
+			else {
+				ret = -EINVAL;
+				goto out;
+			}
+		}
+
+		field = trace_find_event_field(file->event_call, field_name);
+		if (!field) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		if (is_string_field(field))
+			key_size = MAX_FILTER_STR_VAL;
+		else
+			key_size = field->size;
+	}
+
+	hist_data->fields[key_idx] = create_hist_field(field, flags);
+	if (!hist_data->fields[key_idx]) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	key_size = ALIGN(key_size, sizeof(u64));
+	hist_data->fields[key_idx]->size = key_size;
+	hist_data->fields[key_idx]->offset = key_offset;
+	hist_data->key_size += key_size;
+	if (hist_data->key_size > HIST_KEY_SIZE_MAX) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	hist_data->n_keys++;
+
+	if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX))
+		return -EINVAL;
+
+	ret = key_size;
+ out:
+	return ret;
+}
+
+static int create_key_fields(struct hist_trigger_data *hist_data,
+			     struct trace_event_file *file)
+{
+	unsigned int i, key_offset = 0, n_vals = hist_data->n_vals;
+	char *fields_str, *field_str;
+	int ret = -EINVAL;
+
+	fields_str = hist_data->attrs->keys_str;
+	if (!fields_str)
+		goto out;
+
+	strsep(&fields_str, "=");
+	if (!fields_str)
+		goto out;
+
+	for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
+		field_str = strsep(&fields_str, ",");
+		if (!field_str)
+			break;
+		ret = create_key_field(hist_data, i, key_offset,
+				       file, field_str);
+		if (ret < 0)
+			goto out;
+		key_offset += ret;
+	}
+	if (fields_str) {
+		ret = -EINVAL;
+		goto out;
+	}
+	ret = 0;
+ out:
+	return ret;
+}
+
+static int create_hist_fields(struct hist_trigger_data *hist_data,
+			      struct trace_event_file *file)
+{
+	int ret;
+
+	ret = create_val_fields(hist_data, file);
+	if (ret)
+		goto out;
+
+	ret = create_key_fields(hist_data, file);
+	if (ret)
+		goto out;
+
+	hist_data->n_fields = hist_data->n_vals + hist_data->n_keys;
+ out:
+	return ret;
+}
+
+static int is_descending(const char *str)
+{
+	if (!str)
+		return 0;
+
+	if (strcmp(str, "descending") == 0)
+		return 1;
+
+	if (strcmp(str, "ascending") == 0)
+		return 0;
+
+	return -EINVAL;
+}
+
+static int create_sort_keys(struct hist_trigger_data *hist_data)
+{
+	char *fields_str = hist_data->attrs->sort_key_str;
+	struct ftrace_event_field *field = NULL;
+	struct tracing_map_sort_key *sort_key;
+	int descending, ret = 0;
+	unsigned int i, j;
+
+	hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */
+
+	if (!fields_str)
+		goto out;
+
+	strsep(&fields_str, "=");
+	if (!fields_str) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
+		char *field_str, *field_name;
+
+		sort_key = &hist_data->sort_keys[i];
+
+		field_str = strsep(&fields_str, ",");
+		if (!field_str) {
+			if (i == 0)
+				ret = -EINVAL;
+			break;
+		}
+
+		if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
+			ret = -EINVAL;
+			break;
+		}
+
+		field_name = strsep(&field_str, ".");
+		if (!field_name) {
+			ret = -EINVAL;
+			break;
+		}
+
+		if (strcmp(field_name, "hitcount") == 0) {
+			descending = is_descending(field_str);
+			if (descending < 0) {
+				ret = descending;
+				break;
+			}
+			sort_key->descending = descending;
+			continue;
+		}
+
+		for (j = 1; j < hist_data->n_fields; j++) {
+			field = hist_data->fields[j]->field;
+			if (field && (strcmp(field_name, field->name) == 0)) {
+				sort_key->field_idx = j;
+				descending = is_descending(field_str);
+				if (descending < 0) {
+					ret = descending;
+					goto out;
+				}
+				sort_key->descending = descending;
+				break;
+			}
+		}
+		if (j == hist_data->n_fields) {
+			ret = -EINVAL;
+			break;
+		}
+	}
+	hist_data->n_sort_keys = i;
+ out:
+	return ret;
+}
+
+static void destroy_hist_data(struct hist_trigger_data *hist_data)
+{
+	destroy_hist_trigger_attrs(hist_data->attrs);
+	destroy_hist_fields(hist_data);
+	tracing_map_destroy(hist_data->map);
+	kfree(hist_data);
+}
+
+static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
+{
+	struct tracing_map *map = hist_data->map;
+	struct ftrace_event_field *field;
+	struct hist_field *hist_field;
+	int i, idx;
+
+	for_each_hist_field(i, hist_data) {
+		hist_field = hist_data->fields[i];
+		if (hist_field->flags & HIST_FIELD_FL_KEY) {
+			tracing_map_cmp_fn_t cmp_fn;
+
+			field = hist_field->field;
+
+			if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
+				cmp_fn = tracing_map_cmp_none;
+			else if (is_string_field(field))
+				cmp_fn = tracing_map_cmp_string;
+			else
+				cmp_fn = tracing_map_cmp_num(field->size,
+							     field->is_signed);
+			idx = tracing_map_add_key_field(map,
+							hist_field->offset,
+							cmp_fn);
+
+		} else
+			idx = tracing_map_add_sum_field(map);
+
+		if (idx < 0)
+			return idx;
+	}
+
+	return 0;
+}
+
+static bool need_tracing_map_ops(struct hist_trigger_data *hist_data)
+{
+	struct hist_field *key_field;
+	unsigned int i;
+
+	for_each_hist_key_field(i, hist_data) {
+		key_field = hist_data->fields[i];
+
+		if (key_field->flags & HIST_FIELD_FL_EXECNAME)
+			return true;
+	}
+
+	return false;
+}
+
+static struct hist_trigger_data *
+create_hist_data(unsigned int map_bits,
+		 struct hist_trigger_attrs *attrs,
+		 struct trace_event_file *file)
+{
+	const struct tracing_map_ops *map_ops = NULL;
+	struct hist_trigger_data *hist_data;
+	int ret = 0;
+
+	hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL);
+	if (!hist_data)
+		return ERR_PTR(-ENOMEM);
+
+	hist_data->attrs = attrs;
+
+	ret = create_hist_fields(hist_data, file);
+	if (ret)
+		goto free;
+
+	ret = create_sort_keys(hist_data);
+	if (ret)
+		goto free;
+
+	if (need_tracing_map_ops(hist_data))
+		map_ops = &hist_trigger_elt_comm_ops;
+
+	hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
+					    map_ops, hist_data);
+	if (IS_ERR(hist_data->map)) {
+		ret = PTR_ERR(hist_data->map);
+		hist_data->map = NULL;
+		goto free;
+	}
+
+	ret = create_tracing_map_fields(hist_data);
+	if (ret)
+		goto free;
+
+	ret = tracing_map_init(hist_data->map);
+	if (ret)
+		goto free;
+
+	hist_data->event_file = file;
+ out:
+	return hist_data;
+ free:
+	hist_data->attrs = NULL;
+
+	destroy_hist_data(hist_data);
+
+	hist_data = ERR_PTR(ret);
+
+	goto out;
+}
+
+static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
+				    struct tracing_map_elt *elt,
+				    void *rec)
+{
+	struct hist_field *hist_field;
+	unsigned int i;
+	u64 hist_val;
+
+	for_each_hist_val_field(i, hist_data) {
+		hist_field = hist_data->fields[i];
+		hist_val = hist_field->fn(hist_field, rec);
+		tracing_map_update_sum(elt, i, hist_val);
+	}
+}
+
+static inline void add_to_key(char *compound_key, void *key,
+			      struct hist_field *key_field, void *rec)
+{
+	size_t size = key_field->size;
+
+	if (key_field->flags & HIST_FIELD_FL_STRING) {
+		struct ftrace_event_field *field;
+
+		field = key_field->field;
+		if (field->filter_type == FILTER_DYN_STRING)
+			size = *(u32 *)(rec + field->offset) >> 16;
+		else if (field->filter_type == FILTER_PTR_STRING)
+			size = strlen(key);
+		else if (field->filter_type == FILTER_STATIC_STRING)
+			size = field->size;
+
+		/* ensure NULL-termination */
+		if (size > key_field->size - 1)
+			size = key_field->size - 1;
+	}
+
+	memcpy(compound_key + key_field->offset, key, size);
+}
+
+static void event_hist_trigger(struct event_trigger_data *data, void *rec)
+{
+	struct hist_trigger_data *hist_data = data->private_data;
+	bool use_compound_key = (hist_data->n_keys > 1);
+	unsigned long entries[HIST_STACKTRACE_DEPTH];
+	char compound_key[HIST_KEY_SIZE_MAX];
+	struct stack_trace stacktrace;
+	struct hist_field *key_field;
+	struct tracing_map_elt *elt;
+	u64 field_contents;
+	void *key = NULL;
+	unsigned int i;
+
+	memset(compound_key, 0, hist_data->key_size);
+
+	for_each_hist_key_field(i, hist_data) {
+		key_field = hist_data->fields[i];
+
+		if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
+			stacktrace.max_entries = HIST_STACKTRACE_DEPTH;
+			stacktrace.entries = entries;
+			stacktrace.nr_entries = 0;
+			stacktrace.skip = HIST_STACKTRACE_SKIP;
+
+			memset(stacktrace.entries, 0, HIST_STACKTRACE_SIZE);
+			save_stack_trace(&stacktrace);
+
+			key = entries;
+		} else {
+			field_contents = key_field->fn(key_field, rec);
+			if (key_field->flags & HIST_FIELD_FL_STRING) {
+				key = (void *)(unsigned long)field_contents;
+				use_compound_key = true;
+			} else
+				key = (void *)&field_contents;
+		}
+
+		if (use_compound_key)
+			add_to_key(compound_key, key, key_field, rec);
+	}
+
+	if (use_compound_key)
+		key = compound_key;
+
+	elt = tracing_map_insert(hist_data->map, key);
+	if (elt)
+		hist_trigger_elt_update(hist_data, elt, rec);
+}
+
+static void hist_trigger_stacktrace_print(struct seq_file *m,
+					  unsigned long *stacktrace_entries,
+					  unsigned int max_entries)
+{
+	char str[KSYM_SYMBOL_LEN];
+	unsigned int spaces = 8;
+	unsigned int i;
+
+	for (i = 0; i < max_entries; i++) {
+		if (stacktrace_entries[i] == ULONG_MAX)
+			return;
+
+		seq_printf(m, "%*c", 1 + spaces, ' ');
+		sprint_symbol(str, stacktrace_entries[i]);
+		seq_printf(m, "%s\n", str);
+	}
+}
+
+static void
+hist_trigger_entry_print(struct seq_file *m,
+			 struct hist_trigger_data *hist_data, void *key,
+			 struct tracing_map_elt *elt)
+{
+	struct hist_field *key_field;
+	char str[KSYM_SYMBOL_LEN];
+	bool multiline = false;
+	unsigned int i;
+	u64 uval;
+
+	seq_puts(m, "{ ");
+
+	for_each_hist_key_field(i, hist_data) {
+		key_field = hist_data->fields[i];
+
+		if (i > hist_data->n_vals)
+			seq_puts(m, ", ");
+
+		if (key_field->flags & HIST_FIELD_FL_HEX) {
+			uval = *(u64 *)(key + key_field->offset);
+			seq_printf(m, "%s: %llx",
+				   key_field->field->name, uval);
+		} else if (key_field->flags & HIST_FIELD_FL_SYM) {
+			uval = *(u64 *)(key + key_field->offset);
+			sprint_symbol_no_offset(str, uval);
+			seq_printf(m, "%s: [%llx] %-45s",
+				   key_field->field->name, uval, str);
+		} else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
+			uval = *(u64 *)(key + key_field->offset);
+			sprint_symbol(str, uval);
+			seq_printf(m, "%s: [%llx] %-55s",
+				   key_field->field->name, uval, str);
+		} else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
+			char *comm = elt->private_data;
+
+			uval = *(u64 *)(key + key_field->offset);
+			seq_printf(m, "%s: %-16s[%10llu]",
+				   key_field->field->name, comm, uval);
+		} else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
+			const char *syscall_name;
+
+			uval = *(u64 *)(key + key_field->offset);
+			syscall_name = get_syscall_name(uval);
+			if (!syscall_name)
+				syscall_name = "unknown_syscall";
+
+			seq_printf(m, "%s: %-30s[%3llu]",
+				   key_field->field->name, syscall_name, uval);
+		} else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
+			seq_puts(m, "stacktrace:\n");
+			hist_trigger_stacktrace_print(m,
+						      key + key_field->offset,
+						      HIST_STACKTRACE_DEPTH);
+			multiline = true;
+		} else if (key_field->flags & HIST_FIELD_FL_LOG2) {
+			seq_printf(m, "%s: ~ 2^%-2llu", key_field->field->name,
+				   *(u64 *)(key + key_field->offset));
+		} else if (key_field->flags & HIST_FIELD_FL_STRING) {
+			seq_printf(m, "%s: %-50s", key_field->field->name,
+				   (char *)(key + key_field->offset));
+		} else {
+			uval = *(u64 *)(key + key_field->offset);
+			seq_printf(m, "%s: %10llu", key_field->field->name,
+				   uval);
+		}
+	}
+
+	if (!multiline)
+		seq_puts(m, " ");
+
+	seq_puts(m, "}");
+
+	seq_printf(m, " hitcount: %10llu",
+		   tracing_map_read_sum(elt, HITCOUNT_IDX));
+
+	for (i = 1; i < hist_data->n_vals; i++) {
+		if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
+			seq_printf(m, "  %s: %10llx",
+				   hist_data->fields[i]->field->name,
+				   tracing_map_read_sum(elt, i));
+		} else {
+			seq_printf(m, "  %s: %10llu",
+				   hist_data->fields[i]->field->name,
+				   tracing_map_read_sum(elt, i));
+		}
+	}
+
+	seq_puts(m, "\n");
+}
+
+static int print_entries(struct seq_file *m,
+			 struct hist_trigger_data *hist_data)
+{
+	struct tracing_map_sort_entry **sort_entries = NULL;
+	struct tracing_map *map = hist_data->map;
+	int i, n_entries;
+
+	n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
+					     hist_data->n_sort_keys,
+					     &sort_entries);
+	if (n_entries < 0)
+		return n_entries;
+
+	for (i = 0; i < n_entries; i++)
+		hist_trigger_entry_print(m, hist_data,
+					 sort_entries[i]->key,
+					 sort_entries[i]->elt);
+
+	tracing_map_destroy_sort_entries(sort_entries, n_entries);
+
+	return n_entries;
+}
+
+static void hist_trigger_show(struct seq_file *m,
+			      struct event_trigger_data *data, int n)
+{
+	struct hist_trigger_data *hist_data;
+	int n_entries, ret = 0;
+
+	if (n > 0)
+		seq_puts(m, "\n\n");
+
+	seq_puts(m, "# event histogram\n#\n# trigger info: ");
+	data->ops->print(m, data->ops, data);
+	seq_puts(m, "#\n\n");
+
+	hist_data = data->private_data;
+	n_entries = print_entries(m, hist_data);
+	if (n_entries < 0) {
+		ret = n_entries;
+		n_entries = 0;
+	}
+
+	seq_printf(m, "\nTotals:\n    Hits: %llu\n    Entries: %u\n    Dropped: %llu\n",
+		   (u64)atomic64_read(&hist_data->map->hits),
+		   n_entries, (u64)atomic64_read(&hist_data->map->drops));
+}
+
+static int hist_show(struct seq_file *m, void *v)
+{
+	struct event_trigger_data *data;
+	struct trace_event_file *event_file;
+	int n = 0, ret = 0;
+
+	mutex_lock(&event_mutex);
+
+	event_file = event_file_data(m->private);
+	if (unlikely(!event_file)) {
+		ret = -ENODEV;
+		goto out_unlock;
+	}
+
+	list_for_each_entry_rcu(data, &event_file->triggers, list) {
+		if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
+			hist_trigger_show(m, data, n++);
+	}
+
+ out_unlock:
+	mutex_unlock(&event_mutex);
+
+	return ret;
+}
+
+static int event_hist_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, hist_show, file);
+}
+
+const struct file_operations event_hist_fops = {
+	.open = event_hist_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static const char *get_hist_field_flags(struct hist_field *hist_field)
+{
+	const char *flags_str = NULL;
+
+	if (hist_field->flags & HIST_FIELD_FL_HEX)
+		flags_str = "hex";
+	else if (hist_field->flags & HIST_FIELD_FL_SYM)
+		flags_str = "sym";
+	else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET)
+		flags_str = "sym-offset";
+	else if (hist_field->flags & HIST_FIELD_FL_EXECNAME)
+		flags_str = "execname";
+	else if (hist_field->flags & HIST_FIELD_FL_SYSCALL)
+		flags_str = "syscall";
+	else if (hist_field->flags & HIST_FIELD_FL_LOG2)
+		flags_str = "log2";
+
+	return flags_str;
+}
+
+static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
+{
+	seq_printf(m, "%s", hist_field->field->name);
+	if (hist_field->flags) {
+		const char *flags_str = get_hist_field_flags(hist_field);
+
+		if (flags_str)
+			seq_printf(m, ".%s", flags_str);
+	}
+}
+
+static int event_hist_trigger_print(struct seq_file *m,
+				    struct event_trigger_ops *ops,
+				    struct event_trigger_data *data)
+{
+	struct hist_trigger_data *hist_data = data->private_data;
+	struct hist_field *key_field;
+	unsigned int i;
+
+	seq_puts(m, "hist:");
+
+	if (data->name)
+		seq_printf(m, "%s:", data->name);
+
+	seq_puts(m, "keys=");
+
+	for_each_hist_key_field(i, hist_data) {
+		key_field = hist_data->fields[i];
+
+		if (i > hist_data->n_vals)
+			seq_puts(m, ",");
+
+		if (key_field->flags & HIST_FIELD_FL_STACKTRACE)
+			seq_puts(m, "stacktrace");
+		else
+			hist_field_print(m, key_field);
+	}
+
+	seq_puts(m, ":vals=");
+
+	for_each_hist_val_field(i, hist_data) {
+		if (i == HITCOUNT_IDX)
+			seq_puts(m, "hitcount");
+		else {
+			seq_puts(m, ",");
+			hist_field_print(m, hist_data->fields[i]);
+		}
+	}
+
+	seq_puts(m, ":sort=");
+
+	for (i = 0; i < hist_data->n_sort_keys; i++) {
+		struct tracing_map_sort_key *sort_key;
+
+		sort_key = &hist_data->sort_keys[i];
+
+		if (i > 0)
+			seq_puts(m, ",");
+
+		if (sort_key->field_idx == HITCOUNT_IDX)
+			seq_puts(m, "hitcount");
+		else {
+			unsigned int idx = sort_key->field_idx;
+
+			if (WARN_ON(idx >= TRACING_MAP_FIELDS_MAX))
+				return -EINVAL;
+
+			hist_field_print(m, hist_data->fields[idx]);
+		}
+
+		if (sort_key->descending)
+			seq_puts(m, ".descending");
+	}
+
+	seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
+
+	if (data->filter_str)
+		seq_printf(m, " if %s", data->filter_str);
+
+	if (data->paused)
+		seq_puts(m, " [paused]");
+	else
+		seq_puts(m, " [active]");
+
+	seq_putc(m, '\n');
+
+	return 0;
+}
+
+static int event_hist_trigger_init(struct event_trigger_ops *ops,
+				   struct event_trigger_data *data)
+{
+	struct hist_trigger_data *hist_data = data->private_data;
+
+	if (!data->ref && hist_data->attrs->name)
+		save_named_trigger(hist_data->attrs->name, data);
+
+	data->ref++;
+
+	return 0;
+}
+
+static void event_hist_trigger_free(struct event_trigger_ops *ops,
+				    struct event_trigger_data *data)
+{
+	struct hist_trigger_data *hist_data = data->private_data;
+
+	if (WARN_ON_ONCE(data->ref <= 0))
+		return;
+
+	data->ref--;
+	if (!data->ref) {
+		if (data->name)
+			del_named_trigger(data);
+		trigger_data_free(data);
+		destroy_hist_data(hist_data);
+	}
+}
+
+static struct event_trigger_ops event_hist_trigger_ops = {
+	.func			= event_hist_trigger,
+	.print			= event_hist_trigger_print,
+	.init			= event_hist_trigger_init,
+	.free			= event_hist_trigger_free,
+};
+
+static int event_hist_trigger_named_init(struct event_trigger_ops *ops,
+					 struct event_trigger_data *data)
+{
+	data->ref++;
+
+	save_named_trigger(data->named_data->name, data);
+
+	event_hist_trigger_init(ops, data->named_data);
+
+	return 0;
+}
+
+static void event_hist_trigger_named_free(struct event_trigger_ops *ops,
+					  struct event_trigger_data *data)
+{
+	if (WARN_ON_ONCE(data->ref <= 0))
+		return;
+
+	event_hist_trigger_free(ops, data->named_data);
+
+	data->ref--;
+	if (!data->ref) {
+		del_named_trigger(data);
+		trigger_data_free(data);
+	}
+}
+
+static struct event_trigger_ops event_hist_trigger_named_ops = {
+	.func			= event_hist_trigger,
+	.print			= event_hist_trigger_print,
+	.init			= event_hist_trigger_named_init,
+	.free			= event_hist_trigger_named_free,
+};
+
+static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd,
+							    char *param)
+{
+	return &event_hist_trigger_ops;
+}
+
+static void hist_clear(struct event_trigger_data *data)
+{
+	struct hist_trigger_data *hist_data = data->private_data;
+
+	if (data->name)
+		pause_named_trigger(data);
+
+	synchronize_sched();
+
+	tracing_map_clear(hist_data->map);
+
+	if (data->name)
+		unpause_named_trigger(data);
+}
+
+static bool compatible_field(struct ftrace_event_field *field,
+			     struct ftrace_event_field *test_field)
+{
+	if (field == test_field)
+		return true;
+	if (field == NULL || test_field == NULL)
+		return false;
+	if (strcmp(field->name, test_field->name) != 0)
+		return false;
+	if (strcmp(field->type, test_field->type) != 0)
+		return false;
+	if (field->size != test_field->size)
+		return false;
+	if (field->is_signed != test_field->is_signed)
+		return false;
+
+	return true;
+}
+
+static bool hist_trigger_match(struct event_trigger_data *data,
+			       struct event_trigger_data *data_test,
+			       struct event_trigger_data *named_data,
+			       bool ignore_filter)
+{
+	struct tracing_map_sort_key *sort_key, *sort_key_test;
+	struct hist_trigger_data *hist_data, *hist_data_test;
+	struct hist_field *key_field, *key_field_test;
+	unsigned int i;
+
+	if (named_data && (named_data != data_test) &&
+	    (named_data != data_test->named_data))
+		return false;
+
+	if (!named_data && is_named_trigger(data_test))
+		return false;
+
+	hist_data = data->private_data;
+	hist_data_test = data_test->private_data;
+
+	if (hist_data->n_vals != hist_data_test->n_vals ||
+	    hist_data->n_fields != hist_data_test->n_fields ||
+	    hist_data->n_sort_keys != hist_data_test->n_sort_keys)
+		return false;
+
+	if (!ignore_filter) {
+		if ((data->filter_str && !data_test->filter_str) ||
+		   (!data->filter_str && data_test->filter_str))
+			return false;
+	}
+
+	for_each_hist_field(i, hist_data) {
+		key_field = hist_data->fields[i];
+		key_field_test = hist_data_test->fields[i];
+
+		if (key_field->flags != key_field_test->flags)
+			return false;
+		if (!compatible_field(key_field->field, key_field_test->field))
+			return false;
+		if (key_field->offset != key_field_test->offset)
+			return false;
+	}
+
+	for (i = 0; i < hist_data->n_sort_keys; i++) {
+		sort_key = &hist_data->sort_keys[i];
+		sort_key_test = &hist_data_test->sort_keys[i];
+
+		if (sort_key->field_idx != sort_key_test->field_idx ||
+		    sort_key->descending != sort_key_test->descending)
+			return false;
+	}
+
+	if (!ignore_filter && data->filter_str &&
+	    (strcmp(data->filter_str, data_test->filter_str) != 0))
+		return false;
+
+	return true;
+}
+
+static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
+				 struct event_trigger_data *data,
+				 struct trace_event_file *file)
+{
+	struct hist_trigger_data *hist_data = data->private_data;
+	struct event_trigger_data *test, *named_data = NULL;
+	int ret = 0;
+
+	if (hist_data->attrs->name) {
+		named_data = find_named_trigger(hist_data->attrs->name);
+		if (named_data) {
+			if (!hist_trigger_match(data, named_data, named_data,
+						true)) {
+				ret = -EINVAL;
+				goto out;
+			}
+		}
+	}
+
+	if (hist_data->attrs->name && !named_data)
+		goto new;
+
+	list_for_each_entry_rcu(test, &file->triggers, list) {
+		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
+			if (!hist_trigger_match(data, test, named_data, false))
+				continue;
+			if (hist_data->attrs->pause)
+				test->paused = true;
+			else if (hist_data->attrs->cont)
+				test->paused = false;
+			else if (hist_data->attrs->clear)
+				hist_clear(test);
+			else
+				ret = -EEXIST;
+			goto out;
+		}
+	}
+ new:
+	if (hist_data->attrs->cont || hist_data->attrs->clear) {
+		ret = -ENOENT;
+		goto out;
+	}
+
+	if (named_data) {
+		destroy_hist_data(data->private_data);
+		data->private_data = named_data->private_data;
+		set_named_trigger_data(data, named_data);
+		data->ops = &event_hist_trigger_named_ops;
+	}
+
+	if (hist_data->attrs->pause)
+		data->paused = true;
+
+	if (data->ops->init) {
+		ret = data->ops->init(data->ops, data);
+		if (ret < 0)
+			goto out;
+	}
+
+	list_add_rcu(&data->list, &file->triggers);
+	ret++;
+
+	update_cond_flag(file);
+
+	if (trace_event_trigger_enable_disable(file, 1) < 0) {
+		list_del_rcu(&data->list);
+		update_cond_flag(file);
+		ret--;
+	}
+ out:
+	return ret;
+}
+
+static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
+				    struct event_trigger_data *data,
+				    struct trace_event_file *file)
+{
+	struct hist_trigger_data *hist_data = data->private_data;
+	struct event_trigger_data *test, *named_data = NULL;
+	bool unregistered = false;
+
+	if (hist_data->attrs->name)
+		named_data = find_named_trigger(hist_data->attrs->name);
+
+	list_for_each_entry_rcu(test, &file->triggers, list) {
+		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
+			if (!hist_trigger_match(data, test, named_data, false))
+				continue;
+			unregistered = true;
+			list_del_rcu(&test->list);
+			trace_event_trigger_enable_disable(file, 0);
+			update_cond_flag(file);
+			break;
+		}
+	}
+
+	if (unregistered && test->ops->free)
+		test->ops->free(test->ops, test);
+}
+
+static void hist_unreg_all(struct trace_event_file *file)
+{
+	struct event_trigger_data *test;
+
+	list_for_each_entry_rcu(test, &file->triggers, list) {
+		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
+			list_del_rcu(&test->list);
+			trace_event_trigger_enable_disable(file, 0);
+			update_cond_flag(file);
+			if (test->ops->free)
+				test->ops->free(test->ops, test);
+		}
+	}
+}
+
+static int event_hist_trigger_func(struct event_command *cmd_ops,
+				   struct trace_event_file *file,
+				   char *glob, char *cmd, char *param)
+{
+	unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT;
+	struct event_trigger_data *trigger_data;
+	struct hist_trigger_attrs *attrs;
+	struct event_trigger_ops *trigger_ops;
+	struct hist_trigger_data *hist_data;
+	char *trigger;
+	int ret = 0;
+
+	if (!param)
+		return -EINVAL;
+
+	/* separate the trigger from the filter (k:v [if filter]) */
+	trigger = strsep(&param, " \t");
+	if (!trigger)
+		return -EINVAL;
+
+	attrs = parse_hist_trigger_attrs(trigger);
+	if (IS_ERR(attrs))
+		return PTR_ERR(attrs);
+
+	if (attrs->map_bits)
+		hist_trigger_bits = attrs->map_bits;
+
+	hist_data = create_hist_data(hist_trigger_bits, attrs, file);
+	if (IS_ERR(hist_data)) {
+		destroy_hist_trigger_attrs(attrs);
+		return PTR_ERR(hist_data);
+	}
+
+	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
+
+	ret = -ENOMEM;
+	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
+	if (!trigger_data)
+		goto out_free;
+
+	trigger_data->count = -1;
+	trigger_data->ops = trigger_ops;
+	trigger_data->cmd_ops = cmd_ops;
+
+	INIT_LIST_HEAD(&trigger_data->list);
+	RCU_INIT_POINTER(trigger_data->filter, NULL);
+
+	trigger_data->private_data = hist_data;
+
+	/* if param is non-empty, it's supposed to be a filter */
+	if (param && cmd_ops->set_filter) {
+		ret = cmd_ops->set_filter(param, trigger_data, file);
+		if (ret < 0)
+			goto out_free;
+	}
+
+	if (glob[0] == '!') {
+		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
+		ret = 0;
+		goto out_free;
+	}
+
+	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
+	/*
+	 * The above returns on success the # of triggers registered,
+	 * but if it didn't register any it returns zero.  Consider no
+	 * triggers registered a failure too.
+	 */
+	if (!ret) {
+		if (!(attrs->pause || attrs->cont || attrs->clear))
+			ret = -ENOENT;
+		goto out_free;
+	} else if (ret < 0)
+		goto out_free;
+	/* Just return zero, not the number of registered triggers */
+	ret = 0;
+ out:
+	return ret;
+ out_free:
+	if (cmd_ops->set_filter)
+		cmd_ops->set_filter(NULL, trigger_data, NULL);
+
+	kfree(trigger_data);
+
+	destroy_hist_data(hist_data);
+	goto out;
+}
+
+static struct event_command trigger_hist_cmd = {
+	.name			= "hist",
+	.trigger_type		= ETT_EVENT_HIST,
+	.flags			= EVENT_CMD_FL_NEEDS_REC,
+	.func			= event_hist_trigger_func,
+	.reg			= hist_register_trigger,
+	.unreg			= hist_unregister_trigger,
+	.unreg_all		= hist_unreg_all,
+	.get_trigger_ops	= event_hist_get_trigger_ops,
+	.set_filter		= set_trigger_filter,
+};
+
+__init int register_trigger_hist_cmd(void)
+{
+	int ret;
+
+	ret = register_event_command(&trigger_hist_cmd);
+	WARN_ON(ret < 0);
+
+	return ret;
+}
+
+static void
+hist_enable_trigger(struct event_trigger_data *data, void *rec)
+{
+	struct enable_trigger_data *enable_data = data->private_data;
+	struct event_trigger_data *test;
+
+	list_for_each_entry_rcu(test, &enable_data->file->triggers, list) {
+		if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
+			if (enable_data->enable)
+				test->paused = false;
+			else
+				test->paused = true;
+		}
+	}
+}
+
+static void
+hist_enable_count_trigger(struct event_trigger_data *data, void *rec)
+{
+	if (!data->count)
+		return;
+
+	if (data->count != -1)
+		(data->count)--;
+
+	hist_enable_trigger(data, rec);
+}
+
+static struct event_trigger_ops hist_enable_trigger_ops = {
+	.func			= hist_enable_trigger,
+	.print			= event_enable_trigger_print,
+	.init			= event_trigger_init,
+	.free			= event_enable_trigger_free,
+};
+
+static struct event_trigger_ops hist_enable_count_trigger_ops = {
+	.func			= hist_enable_count_trigger,
+	.print			= event_enable_trigger_print,
+	.init			= event_trigger_init,
+	.free			= event_enable_trigger_free,
+};
+
+static struct event_trigger_ops hist_disable_trigger_ops = {
+	.func			= hist_enable_trigger,
+	.print			= event_enable_trigger_print,
+	.init			= event_trigger_init,
+	.free			= event_enable_trigger_free,
+};
+
+static struct event_trigger_ops hist_disable_count_trigger_ops = {
+	.func			= hist_enable_count_trigger,
+	.print			= event_enable_trigger_print,
+	.init			= event_trigger_init,
+	.free			= event_enable_trigger_free,
+};
+
+static struct event_trigger_ops *
+hist_enable_get_trigger_ops(char *cmd, char *param)
+{
+	struct event_trigger_ops *ops;
+	bool enable;
+
+	enable = (strcmp(cmd, ENABLE_HIST_STR) == 0);
+
+	if (enable)
+		ops = param ? &hist_enable_count_trigger_ops :
+			&hist_enable_trigger_ops;
+	else
+		ops = param ? &hist_disable_count_trigger_ops :
+			&hist_disable_trigger_ops;
+
+	return ops;
+}
+
+static void hist_enable_unreg_all(struct trace_event_file *file)
+{
+	struct event_trigger_data *test;
+
+	list_for_each_entry_rcu(test, &file->triggers, list) {
+		if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) {
+			list_del_rcu(&test->list);
+			update_cond_flag(file);
+			trace_event_trigger_enable_disable(file, 0);
+			if (test->ops->free)
+				test->ops->free(test->ops, test);
+		}
+	}
+}
+
+static struct event_command trigger_hist_enable_cmd = {
+	.name			= ENABLE_HIST_STR,
+	.trigger_type		= ETT_HIST_ENABLE,
+	.func			= event_enable_trigger_func,
+	.reg			= event_enable_register_trigger,
+	.unreg			= event_enable_unregister_trigger,
+	.unreg_all		= hist_enable_unreg_all,
+	.get_trigger_ops	= hist_enable_get_trigger_ops,
+	.set_filter		= set_trigger_filter,
+};
+
+static struct event_command trigger_hist_disable_cmd = {
+	.name			= DISABLE_HIST_STR,
+	.trigger_type		= ETT_HIST_ENABLE,
+	.func			= event_enable_trigger_func,
+	.reg			= event_enable_register_trigger,
+	.unreg			= event_enable_unregister_trigger,
+	.unreg_all		= hist_enable_unreg_all,
+	.get_trigger_ops	= hist_enable_get_trigger_ops,
+	.set_filter		= set_trigger_filter,
+};
+
+static __init void unregister_trigger_hist_enable_disable_cmds(void)
+{
+	unregister_event_command(&trigger_hist_enable_cmd);
+	unregister_event_command(&trigger_hist_disable_cmd);
+}
+
+__init int register_trigger_hist_enable_disable_cmds(void)
+{
+	int ret;
+
+	ret = register_event_command(&trigger_hist_enable_cmd);
+	if (WARN_ON(ret < 0))
+		return ret;
+	ret = register_event_command(&trigger_hist_disable_cmd);
+	if (WARN_ON(ret < 0))
+		unregister_trigger_hist_enable_disable_cmds();
+
+	return ret;
+}
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index d67992f..a975571 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -347,7 +347,7 @@
  * Currently we only unregister event commands from __init, so mark
  * this __init too.
  */
-static __init int unregister_event_command(struct event_command *cmd)
+__init int unregister_event_command(struct event_command *cmd)
 {
 	struct event_command *p, *n;
 	int ret = -ENODEV;
@@ -641,6 +641,7 @@
 	trigger_data->ops = trigger_ops;
 	trigger_data->cmd_ops = cmd_ops;
 	INIT_LIST_HEAD(&trigger_data->list);
+	INIT_LIST_HEAD(&trigger_data->named_list);
 
 	if (glob[0] == '!') {
 		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
@@ -764,6 +765,148 @@
 	return ret;
 }
 
+static LIST_HEAD(named_triggers);
+
+/**
+ * find_named_trigger - Find the common named trigger associated with @name
+ * @name: The name of the set of named triggers to find the common data for
+ *
+ * Named triggers are sets of triggers that share a common set of
+ * trigger data.  The first named trigger registered with a given name
+ * owns the common trigger data that the others subsequently
+ * registered with the same name will reference.  This function
+ * returns the common trigger data associated with that first
+ * registered instance.
+ *
+ * Return: the common trigger data for the given named trigger on
+ * success, NULL otherwise.
+ */
+struct event_trigger_data *find_named_trigger(const char *name)
+{
+	struct event_trigger_data *data;
+
+	if (!name)
+		return NULL;
+
+	list_for_each_entry(data, &named_triggers, named_list) {
+		if (data->named_data)
+			continue;
+		if (strcmp(data->name, name) == 0)
+			return data;
+	}
+
+	return NULL;
+}
+
+/**
+ * is_named_trigger - determine if a given trigger is a named trigger
+ * @test: The trigger data to test
+ *
+ * Return: true if 'test' is a named trigger, false otherwise.
+ */
+bool is_named_trigger(struct event_trigger_data *test)
+{
+	struct event_trigger_data *data;
+
+	list_for_each_entry(data, &named_triggers, named_list) {
+		if (test == data)
+			return true;
+	}
+
+	return false;
+}
+
+/**
+ * save_named_trigger - save the trigger in the named trigger list
+ * @name: The name of the named trigger set
+ * @data: The trigger data to save
+ *
+ * Return: 0 if successful, negative error otherwise.
+ */
+int save_named_trigger(const char *name, struct event_trigger_data *data)
+{
+	data->name = kstrdup(name, GFP_KERNEL);
+	if (!data->name)
+		return -ENOMEM;
+
+	list_add(&data->named_list, &named_triggers);
+
+	return 0;
+}
+
+/**
+ * del_named_trigger - delete a trigger from the named trigger list
+ * @data: The trigger data to delete
+ */
+void del_named_trigger(struct event_trigger_data *data)
+{
+	kfree(data->name);
+	data->name = NULL;
+
+	list_del(&data->named_list);
+}
+
+static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
+{
+	struct event_trigger_data *test;
+
+	list_for_each_entry(test, &named_triggers, named_list) {
+		if (strcmp(test->name, data->name) == 0) {
+			if (pause) {
+				test->paused_tmp = test->paused;
+				test->paused = true;
+			} else {
+				test->paused = test->paused_tmp;
+			}
+		}
+	}
+}
+
+/**
+ * pause_named_trigger - Pause all named triggers with the same name
+ * @data: The trigger data of a named trigger to pause
+ *
+ * Pauses a named trigger along with all other triggers having the
+ * same name.  Because named triggers share a common set of data,
+ * pausing only one is meaningless, so pausing one named trigger needs
+ * to pause all triggers with the same name.
+ */
+void pause_named_trigger(struct event_trigger_data *data)
+{
+	__pause_named_trigger(data, true);
+}
+
+/**
+ * unpause_named_trigger - Un-pause all named triggers with the same name
+ * @data: The trigger data of a named trigger to unpause
+ *
+ * Un-pauses a named trigger along with all other triggers having the
+ * same name.  Because named triggers share a common set of data,
+ * unpausing only one is meaningless, so unpausing one named trigger
+ * needs to unpause all triggers with the same name.
+ */
+void unpause_named_trigger(struct event_trigger_data *data)
+{
+	__pause_named_trigger(data, false);
+}
+
+/**
+ * set_named_trigger_data - Associate common named trigger data
+ * @data: The trigger data of a named trigger to unpause
+ *
+ * Named triggers are sets of triggers that share a common set of
+ * trigger data.  The first named trigger registered with a given name
+ * owns the common trigger data that the others subsequently
+ * registered with the same name will reference.  This function
+ * associates the common trigger data from the first trigger with the
+ * given trigger.
+ */
+void set_named_trigger_data(struct event_trigger_data *data,
+			    struct event_trigger_data *named_data)
+{
+	data->named_data = named_data;
+}
+
 static void
 traceon_trigger(struct event_trigger_data *data, void *rec)
 {
@@ -1062,15 +1205,6 @@
 	unregister_event_command(&trigger_traceoff_cmd);
 }
 
-/* Avoid typos */
-#define ENABLE_EVENT_STR	"enable_event"
-#define DISABLE_EVENT_STR	"disable_event"
-
-struct enable_trigger_data {
-	struct trace_event_file		*file;
-	bool				enable;
-};
-
 static void
 event_enable_trigger(struct event_trigger_data *data, void *rec)
 {
@@ -1100,14 +1234,16 @@
 	event_enable_trigger(data, rec);
 }
 
-static int
-event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
-			   struct event_trigger_data *data)
+int event_enable_trigger_print(struct seq_file *m,
+			       struct event_trigger_ops *ops,
+			       struct event_trigger_data *data)
 {
 	struct enable_trigger_data *enable_data = data->private_data;
 
 	seq_printf(m, "%s:%s:%s",
-		   enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
+		   enable_data->hist ?
+		   (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
+		   (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
 		   enable_data->file->event_call->class->system,
 		   trace_event_name(enable_data->file->event_call));
 
@@ -1124,9 +1260,8 @@
 	return 0;
 }
 
-static void
-event_enable_trigger_free(struct event_trigger_ops *ops,
-			  struct event_trigger_data *data)
+void event_enable_trigger_free(struct event_trigger_ops *ops,
+			       struct event_trigger_data *data)
 {
 	struct enable_trigger_data *enable_data = data->private_data;
 
@@ -1171,10 +1306,9 @@
 	.free			= event_enable_trigger_free,
 };
 
-static int
-event_enable_trigger_func(struct event_command *cmd_ops,
-			  struct trace_event_file *file,
-			  char *glob, char *cmd, char *param)
+int event_enable_trigger_func(struct event_command *cmd_ops,
+			      struct trace_event_file *file,
+			      char *glob, char *cmd, char *param)
 {
 	struct trace_event_file *event_enable_file;
 	struct enable_trigger_data *enable_data;
@@ -1183,6 +1317,7 @@
 	struct trace_array *tr = file->tr;
 	const char *system;
 	const char *event;
+	bool hist = false;
 	char *trigger;
 	char *number;
 	bool enable;
@@ -1207,8 +1342,15 @@
 	if (!event_enable_file)
 		goto out;
 
-	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
+#ifdef CONFIG_HIST_TRIGGERS
+	hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
+		(strcmp(cmd, DISABLE_HIST_STR) == 0));
 
+	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
+		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
+#else
+	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
+#endif
 	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
 
 	ret = -ENOMEM;
@@ -1228,6 +1370,7 @@
 	INIT_LIST_HEAD(&trigger_data->list);
 	RCU_INIT_POINTER(trigger_data->filter, NULL);
 
+	enable_data->hist = hist;
 	enable_data->enable = enable;
 	enable_data->file = event_enable_file;
 	trigger_data->private_data = enable_data;
@@ -1305,10 +1448,10 @@
 	goto out;
 }
 
-static int event_enable_register_trigger(char *glob,
-					 struct event_trigger_ops *ops,
-					 struct event_trigger_data *data,
-					 struct trace_event_file *file)
+int event_enable_register_trigger(char *glob,
+				  struct event_trigger_ops *ops,
+				  struct event_trigger_data *data,
+				  struct trace_event_file *file)
 {
 	struct enable_trigger_data *enable_data = data->private_data;
 	struct enable_trigger_data *test_enable_data;
@@ -1318,6 +1461,8 @@
 	list_for_each_entry_rcu(test, &file->triggers, list) {
 		test_enable_data = test->private_data;
 		if (test_enable_data &&
+		    (test->cmd_ops->trigger_type ==
+		     data->cmd_ops->trigger_type) &&
 		    (test_enable_data->file == enable_data->file)) {
 			ret = -EEXIST;
 			goto out;
@@ -1343,10 +1488,10 @@
 	return ret;
 }
 
-static void event_enable_unregister_trigger(char *glob,
-					    struct event_trigger_ops *ops,
-					    struct event_trigger_data *test,
-					    struct trace_event_file *file)
+void event_enable_unregister_trigger(char *glob,
+				     struct event_trigger_ops *ops,
+				     struct event_trigger_data *test,
+				     struct trace_event_file *file)
 {
 	struct enable_trigger_data *test_enable_data = test->private_data;
 	struct enable_trigger_data *enable_data;
@@ -1356,6 +1501,8 @@
 	list_for_each_entry_rcu(data, &file->triggers, list) {
 		enable_data = data->private_data;
 		if (enable_data &&
+		    (data->cmd_ops->trigger_type ==
+		     test->cmd_ops->trigger_type) &&
 		    (enable_data->file == test_enable_data->file)) {
 			unregistered = true;
 			list_del_rcu(&data->list);
@@ -1375,8 +1522,12 @@
 	struct event_trigger_ops *ops;
 	bool enable;
 
+#ifdef CONFIG_HIST_TRIGGERS
+	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
+		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
+#else
 	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
-
+#endif
 	if (enable)
 		ops = param ? &event_enable_count_trigger_ops :
 			&event_enable_trigger_ops;
@@ -1447,6 +1598,8 @@
 	register_trigger_snapshot_cmd();
 	register_trigger_stacktrace_cmd();
 	register_trigger_enable_disable_cmds();
+	register_trigger_hist_enable_disable_cmds();
+	register_trigger_hist_cmd();
 
 	return 0;
 }
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 919e0dd..5546eec 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1149,14 +1149,15 @@
 	size = ALIGN(__size + sizeof(u32), sizeof(u64));
 	size -= sizeof(u32);
 
-	entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
+	entry = perf_trace_buf_alloc(size, NULL, &rctx);
 	if (!entry)
 		return;
 
 	entry->ip = (unsigned long)tk->rp.kp.addr;
 	memset(&entry[1], 0, dsize);
 	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
-	perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
+	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
+			      head, NULL);
 }
 NOKPROBE_SYMBOL(kprobe_perf_func);
 
@@ -1184,14 +1185,15 @@
 	size = ALIGN(__size + sizeof(u32), sizeof(u64));
 	size -= sizeof(u32);
 
-	entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
+	entry = perf_trace_buf_alloc(size, NULL, &rctx);
 	if (!entry)
 		return;
 
 	entry->func = (unsigned long)tk->rp.kp.addr;
 	entry->ret_ip = (unsigned long)ri->ret_addr;
 	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
-	perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
+	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
+			      head, NULL);
 }
 NOKPROBE_SYMBOL(kretprobe_perf_func);
 #endif	/* CONFIG_PERF_EVENTS */
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index e78f364..b2b6efc 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -587,15 +587,16 @@
 	size = ALIGN(size + sizeof(u32), sizeof(u64));
 	size -= sizeof(u32);
 
-	rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
-				sys_data->enter_event->event.type, NULL, &rctx);
+	rec = perf_trace_buf_alloc(size, NULL, &rctx);
 	if (!rec)
 		return;
 
 	rec->nr = syscall_nr;
 	syscall_get_arguments(current, regs, 0, sys_data->nb_args,
 			       (unsigned long *)&rec->args);
-	perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
+	perf_trace_buf_submit(rec, size, rctx,
+			      sys_data->enter_event->event.type, 1, regs,
+			      head, NULL);
 }
 
 static int perf_sysenter_enable(struct trace_event_call *call)
@@ -660,14 +661,14 @@
 	size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
 	size -= sizeof(u32);
 
-	rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
-				sys_data->exit_event->event.type, NULL, &rctx);
+	rec = perf_trace_buf_alloc(size, NULL, &rctx);
 	if (!rec)
 		return;
 
 	rec->nr = syscall_nr;
 	rec->ret = syscall_get_return_value(current, regs);
-	perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
+	perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type,
+			      1, regs, head, NULL);
 }
 
 static int perf_sysexit_enable(struct trace_event_call *call)
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 7915142..c534854 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -1131,7 +1131,7 @@
 	if (hlist_empty(head))
 		goto out;
 
-	entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
+	entry = perf_trace_buf_alloc(size, NULL, &rctx);
 	if (!entry)
 		goto out;
 
@@ -1152,7 +1152,8 @@
 		memset(data + len, 0, size - esize - len);
 	}
 
-	perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
+	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
+			      head, NULL);
  out:
 	preempt_enable();
 }
diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
new file mode 100644
index 0000000..0a689bb
--- /dev/null
+++ b/kernel/trace/tracing_map.c
@@ -0,0 +1,1062 @@
+/*
+ * tracing_map - lock-free map for tracing
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
+ *
+ * tracing_map implementation inspired by lock-free map algorithms
+ * originated by Dr. Cliff Click:
+ *
+ * http://www.azulsystems.com/blog/cliff/2007-03-26-non-blocking-hashtable
+ * http://www.azulsystems.com/events/javaone_2007/2007_LockFreeHash.pdf
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/jhash.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+
+#include "tracing_map.h"
+#include "trace.h"
+
+/*
+ * NOTE: For a detailed description of the data structures used by
+ * these functions (such as tracing_map_elt) please see the overview
+ * of tracing_map data structures at the beginning of tracing_map.h.
+ */
+
+/**
+ * tracing_map_update_sum - Add a value to a tracing_map_elt's sum field
+ * @elt: The tracing_map_elt
+ * @i: The index of the given sum associated with the tracing_map_elt
+ * @n: The value to add to the sum
+ *
+ * Add n to sum i associated with the specified tracing_map_elt
+ * instance.  The index i is the index returned by the call to
+ * tracing_map_add_sum_field() when the tracing map was set up.
+ */
+void tracing_map_update_sum(struct tracing_map_elt *elt, unsigned int i, u64 n)
+{
+	atomic64_add(n, &elt->fields[i].sum);
+}
+
+/**
+ * tracing_map_read_sum - Return the value of a tracing_map_elt's sum field
+ * @elt: The tracing_map_elt
+ * @i: The index of the given sum associated with the tracing_map_elt
+ *
+ * Retrieve the value of the sum i associated with the specified
+ * tracing_map_elt instance.  The index i is the index returned by the
+ * call to tracing_map_add_sum_field() when the tracing map was set
+ * up.
+ *
+ * Return: The sum associated with field i for elt.
+ */
+u64 tracing_map_read_sum(struct tracing_map_elt *elt, unsigned int i)
+{
+	return (u64)atomic64_read(&elt->fields[i].sum);
+}
+
+int tracing_map_cmp_string(void *val_a, void *val_b)
+{
+	char *a = val_a;
+	char *b = val_b;
+
+	return strcmp(a, b);
+}
+
+int tracing_map_cmp_none(void *val_a, void *val_b)
+{
+	return 0;
+}
+
+static int tracing_map_cmp_atomic64(void *val_a, void *val_b)
+{
+	u64 a = atomic64_read((atomic64_t *)val_a);
+	u64 b = atomic64_read((atomic64_t *)val_b);
+
+	return (a > b) ? 1 : ((a < b) ? -1 : 0);
+}
+
+#define DEFINE_TRACING_MAP_CMP_FN(type)					\
+static int tracing_map_cmp_##type(void *val_a, void *val_b)		\
+{									\
+	type a = *(type *)val_a;					\
+	type b = *(type *)val_b;					\
+									\
+	return (a > b) ? 1 : ((a < b) ? -1 : 0);			\
+}
+
+DEFINE_TRACING_MAP_CMP_FN(s64);
+DEFINE_TRACING_MAP_CMP_FN(u64);
+DEFINE_TRACING_MAP_CMP_FN(s32);
+DEFINE_TRACING_MAP_CMP_FN(u32);
+DEFINE_TRACING_MAP_CMP_FN(s16);
+DEFINE_TRACING_MAP_CMP_FN(u16);
+DEFINE_TRACING_MAP_CMP_FN(s8);
+DEFINE_TRACING_MAP_CMP_FN(u8);
+
+tracing_map_cmp_fn_t tracing_map_cmp_num(int field_size,
+					 int field_is_signed)
+{
+	tracing_map_cmp_fn_t fn = tracing_map_cmp_none;
+
+	switch (field_size) {
+	case 8:
+		if (field_is_signed)
+			fn = tracing_map_cmp_s64;
+		else
+			fn = tracing_map_cmp_u64;
+		break;
+	case 4:
+		if (field_is_signed)
+			fn = tracing_map_cmp_s32;
+		else
+			fn = tracing_map_cmp_u32;
+		break;
+	case 2:
+		if (field_is_signed)
+			fn = tracing_map_cmp_s16;
+		else
+			fn = tracing_map_cmp_u16;
+		break;
+	case 1:
+		if (field_is_signed)
+			fn = tracing_map_cmp_s8;
+		else
+			fn = tracing_map_cmp_u8;
+		break;
+	}
+
+	return fn;
+}
+
+static int tracing_map_add_field(struct tracing_map *map,
+				 tracing_map_cmp_fn_t cmp_fn)
+{
+	int ret = -EINVAL;
+
+	if (map->n_fields < TRACING_MAP_FIELDS_MAX) {
+		ret = map->n_fields;
+		map->fields[map->n_fields++].cmp_fn = cmp_fn;
+	}
+
+	return ret;
+}
+
+/**
+ * tracing_map_add_sum_field - Add a field describing a tracing_map sum
+ * @map: The tracing_map
+ *
+ * Add a sum field to the key and return the index identifying it in
+ * the map and associated tracing_map_elts.  This is the index used
+ * for instance to update a sum for a particular tracing_map_elt using
+ * tracing_map_update_sum() or reading it via tracing_map_read_sum().
+ *
+ * Return: The index identifying the field in the map and associated
+ * tracing_map_elts, or -EINVAL on error.
+ */
+int tracing_map_add_sum_field(struct tracing_map *map)
+{
+	return tracing_map_add_field(map, tracing_map_cmp_atomic64);
+}
+
+/**
+ * tracing_map_add_key_field - Add a field describing a tracing_map key
+ * @map: The tracing_map
+ * @offset: The offset within the key
+ * @cmp_fn: The comparison function that will be used to sort on the key
+ *
+ * Let the map know there is a key and that if it's used as a sort key
+ * to use cmp_fn.
+ *
+ * A key can be a subset of a compound key; for that purpose, the
+ * offset param is used to describe where within the the compound key
+ * the key referenced by this key field resides.
+ *
+ * Return: The index identifying the field in the map and associated
+ * tracing_map_elts, or -EINVAL on error.
+ */
+int tracing_map_add_key_field(struct tracing_map *map,
+			      unsigned int offset,
+			      tracing_map_cmp_fn_t cmp_fn)
+
+{
+	int idx = tracing_map_add_field(map, cmp_fn);
+
+	if (idx < 0)
+		return idx;
+
+	map->fields[idx].offset = offset;
+
+	map->key_idx[map->n_keys++] = idx;
+
+	return idx;
+}
+
+void tracing_map_array_clear(struct tracing_map_array *a)
+{
+	unsigned int i;
+
+	if (!a->pages)
+		return;
+
+	for (i = 0; i < a->n_pages; i++)
+		memset(a->pages[i], 0, PAGE_SIZE);
+}
+
+void tracing_map_array_free(struct tracing_map_array *a)
+{
+	unsigned int i;
+
+	if (!a)
+		return;
+
+	if (!a->pages) {
+		kfree(a);
+		return;
+	}
+
+	for (i = 0; i < a->n_pages; i++) {
+		if (!a->pages[i])
+			break;
+		free_page((unsigned long)a->pages[i]);
+	}
+}
+
+struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
+						  unsigned int entry_size)
+{
+	struct tracing_map_array *a;
+	unsigned int i;
+
+	a = kzalloc(sizeof(*a), GFP_KERNEL);
+	if (!a)
+		return NULL;
+
+	a->entry_size_shift = fls(roundup_pow_of_two(entry_size) - 1);
+	a->entries_per_page = PAGE_SIZE / (1 << a->entry_size_shift);
+	a->n_pages = n_elts / a->entries_per_page;
+	if (!a->n_pages)
+		a->n_pages = 1;
+	a->entry_shift = fls(a->entries_per_page) - 1;
+	a->entry_mask = (1 << a->entry_shift) - 1;
+
+	a->pages = kcalloc(a->n_pages, sizeof(void *), GFP_KERNEL);
+	if (!a->pages)
+		goto free;
+
+	for (i = 0; i < a->n_pages; i++) {
+		a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
+		if (!a->pages[i])
+			goto free;
+	}
+ out:
+	return a;
+ free:
+	tracing_map_array_free(a);
+	a = NULL;
+
+	goto out;
+}
+
+static void tracing_map_elt_clear(struct tracing_map_elt *elt)
+{
+	unsigned i;
+
+	for (i = 0; i < elt->map->n_fields; i++)
+		if (elt->fields[i].cmp_fn == tracing_map_cmp_atomic64)
+			atomic64_set(&elt->fields[i].sum, 0);
+
+	if (elt->map->ops && elt->map->ops->elt_clear)
+		elt->map->ops->elt_clear(elt);
+}
+
+static void tracing_map_elt_init_fields(struct tracing_map_elt *elt)
+{
+	unsigned int i;
+
+	tracing_map_elt_clear(elt);
+
+	for (i = 0; i < elt->map->n_fields; i++) {
+		elt->fields[i].cmp_fn = elt->map->fields[i].cmp_fn;
+
+		if (elt->fields[i].cmp_fn != tracing_map_cmp_atomic64)
+			elt->fields[i].offset = elt->map->fields[i].offset;
+	}
+}
+
+static void tracing_map_elt_free(struct tracing_map_elt *elt)
+{
+	if (!elt)
+		return;
+
+	if (elt->map->ops && elt->map->ops->elt_free)
+		elt->map->ops->elt_free(elt);
+	kfree(elt->fields);
+	kfree(elt->key);
+	kfree(elt);
+}
+
+static struct tracing_map_elt *tracing_map_elt_alloc(struct tracing_map *map)
+{
+	struct tracing_map_elt *elt;
+	int err = 0;
+
+	elt = kzalloc(sizeof(*elt), GFP_KERNEL);
+	if (!elt)
+		return ERR_PTR(-ENOMEM);
+
+	elt->map = map;
+
+	elt->key = kzalloc(map->key_size, GFP_KERNEL);
+	if (!elt->key) {
+		err = -ENOMEM;
+		goto free;
+	}
+
+	elt->fields = kcalloc(map->n_fields, sizeof(*elt->fields), GFP_KERNEL);
+	if (!elt->fields) {
+		err = -ENOMEM;
+		goto free;
+	}
+
+	tracing_map_elt_init_fields(elt);
+
+	if (map->ops && map->ops->elt_alloc) {
+		err = map->ops->elt_alloc(elt);
+		if (err)
+			goto free;
+	}
+	return elt;
+ free:
+	tracing_map_elt_free(elt);
+
+	return ERR_PTR(err);
+}
+
+static struct tracing_map_elt *get_free_elt(struct tracing_map *map)
+{
+	struct tracing_map_elt *elt = NULL;
+	int idx;
+
+	idx = atomic_inc_return(&map->next_elt);
+	if (idx < map->max_elts) {
+		elt = *(TRACING_MAP_ELT(map->elts, idx));
+		if (map->ops && map->ops->elt_init)
+			map->ops->elt_init(elt);
+	}
+
+	return elt;
+}
+
+static void tracing_map_free_elts(struct tracing_map *map)
+{
+	unsigned int i;
+
+	if (!map->elts)
+		return;
+
+	for (i = 0; i < map->max_elts; i++) {
+		tracing_map_elt_free(*(TRACING_MAP_ELT(map->elts, i)));
+		*(TRACING_MAP_ELT(map->elts, i)) = NULL;
+	}
+
+	tracing_map_array_free(map->elts);
+	map->elts = NULL;
+}
+
+static int tracing_map_alloc_elts(struct tracing_map *map)
+{
+	unsigned int i;
+
+	map->elts = tracing_map_array_alloc(map->max_elts,
+					    sizeof(struct tracing_map_elt *));
+	if (!map->elts)
+		return -ENOMEM;
+
+	for (i = 0; i < map->max_elts; i++) {
+		*(TRACING_MAP_ELT(map->elts, i)) = tracing_map_elt_alloc(map);
+		if (IS_ERR(*(TRACING_MAP_ELT(map->elts, i)))) {
+			*(TRACING_MAP_ELT(map->elts, i)) = NULL;
+			tracing_map_free_elts(map);
+
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+
+static inline bool keys_match(void *key, void *test_key, unsigned key_size)
+{
+	bool match = true;
+
+	if (memcmp(key, test_key, key_size))
+		match = false;
+
+	return match;
+}
+
+static inline struct tracing_map_elt *
+__tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
+{
+	u32 idx, key_hash, test_key;
+	struct tracing_map_entry *entry;
+
+	key_hash = jhash(key, map->key_size, 0);
+	if (key_hash == 0)
+		key_hash = 1;
+	idx = key_hash >> (32 - (map->map_bits + 1));
+
+	while (1) {
+		idx &= (map->map_size - 1);
+		entry = TRACING_MAP_ENTRY(map->map, idx);
+		test_key = entry->key;
+
+		if (test_key && test_key == key_hash && entry->val &&
+		    keys_match(key, entry->val->key, map->key_size)) {
+			atomic64_inc(&map->hits);
+			return entry->val;
+		}
+
+		if (!test_key) {
+			if (lookup_only)
+				break;
+
+			if (!cmpxchg(&entry->key, 0, key_hash)) {
+				struct tracing_map_elt *elt;
+
+				elt = get_free_elt(map);
+				if (!elt) {
+					atomic64_inc(&map->drops);
+					entry->key = 0;
+					break;
+				}
+
+				memcpy(elt->key, key, map->key_size);
+				entry->val = elt;
+				atomic64_inc(&map->hits);
+
+				return entry->val;
+			}
+		}
+
+		idx++;
+	}
+
+	return NULL;
+}
+
+/**
+ * tracing_map_insert - Insert key and/or retrieve val from a tracing_map
+ * @map: The tracing_map to insert into
+ * @key: The key to insert
+ *
+ * Inserts a key into a tracing_map and creates and returns a new
+ * tracing_map_elt for it, or if the key has already been inserted by
+ * a previous call, returns the tracing_map_elt already associated
+ * with it.  When the map was created, the number of elements to be
+ * allocated for the map was specified (internally maintained as
+ * 'max_elts' in struct tracing_map), and that number of
+ * tracing_map_elts was created by tracing_map_init().  This is the
+ * pre-allocated pool of tracing_map_elts that tracing_map_insert()
+ * will allocate from when adding new keys.  Once that pool is
+ * exhausted, tracing_map_insert() is useless and will return NULL to
+ * signal that state.  There are two user-visible tracing_map
+ * variables, 'hits' and 'drops', which are updated by this function.
+ * Every time an element is either successfully inserted or retrieved,
+ * the 'hits' value is incrememented.  Every time an element insertion
+ * fails, the 'drops' value is incremented.
+ *
+ * This is a lock-free tracing map insertion function implementing a
+ * modified form of Cliff Click's basic insertion algorithm.  It
+ * requires the table size be a power of two.  To prevent any
+ * possibility of an infinite loop we always make the internal table
+ * size double the size of the requested table size (max_elts * 2).
+ * Likewise, we never reuse a slot or resize or delete elements - when
+ * we've reached max_elts entries, we simply return NULL once we've
+ * run out of entries.  Readers can at any point in time traverse the
+ * tracing map and safely access the key/val pairs.
+ *
+ * Return: the tracing_map_elt pointer val associated with the key.
+ * If this was a newly inserted key, the val will be a newly allocated
+ * and associated tracing_map_elt pointer val.  If the key wasn't
+ * found and the pool of tracing_map_elts has been exhausted, NULL is
+ * returned and no further insertions will succeed.
+ */
+struct tracing_map_elt *tracing_map_insert(struct tracing_map *map, void *key)
+{
+	return __tracing_map_insert(map, key, false);
+}
+
+/**
+ * tracing_map_lookup - Retrieve val from a tracing_map
+ * @map: The tracing_map to perform the lookup on
+ * @key: The key to look up
+ *
+ * Looks up key in tracing_map and if found returns the matching
+ * tracing_map_elt.  This is a lock-free lookup; see
+ * tracing_map_insert() for details on tracing_map and how it works.
+ * Every time an element is retrieved, the 'hits' value is
+ * incrememented.  There is one user-visible tracing_map variable,
+ * 'hits', which is updated by this function.  Every time an element
+ * is successfully retrieved, the 'hits' value is incrememented.  The
+ * 'drops' value is never updated by this function.
+ *
+ * Return: the tracing_map_elt pointer val associated with the key.
+ * If the key wasn't found, NULL is returned.
+ */
+struct tracing_map_elt *tracing_map_lookup(struct tracing_map *map, void *key)
+{
+	return __tracing_map_insert(map, key, true);
+}
+
+/**
+ * tracing_map_destroy - Destroy a tracing_map
+ * @map: The tracing_map to destroy
+ *
+ * Frees a tracing_map along with its associated array of
+ * tracing_map_elts.
+ *
+ * Callers should make sure there are no readers or writers actively
+ * reading or inserting into the map before calling this.
+ */
+void tracing_map_destroy(struct tracing_map *map)
+{
+	if (!map)
+		return;
+
+	tracing_map_free_elts(map);
+
+	tracing_map_array_free(map->map);
+	kfree(map);
+}
+
+/**
+ * tracing_map_clear - Clear a tracing_map
+ * @map: The tracing_map to clear
+ *
+ * Resets the tracing map to a cleared or initial state.  The
+ * tracing_map_elts are all cleared, and the array of struct
+ * tracing_map_entry is reset to an initialized state.
+ *
+ * Callers should make sure there are no writers actively inserting
+ * into the map before calling this.
+ */
+void tracing_map_clear(struct tracing_map *map)
+{
+	unsigned int i;
+
+	atomic_set(&map->next_elt, -1);
+	atomic64_set(&map->hits, 0);
+	atomic64_set(&map->drops, 0);
+
+	tracing_map_array_clear(map->map);
+
+	for (i = 0; i < map->max_elts; i++)
+		tracing_map_elt_clear(*(TRACING_MAP_ELT(map->elts, i)));
+}
+
+static void set_sort_key(struct tracing_map *map,
+			 struct tracing_map_sort_key *sort_key)
+{
+	map->sort_key = *sort_key;
+}
+
+/**
+ * tracing_map_create - Create a lock-free map and element pool
+ * @map_bits: The size of the map (2 ** map_bits)
+ * @key_size: The size of the key for the map in bytes
+ * @ops: Optional client-defined tracing_map_ops instance
+ * @private_data: Client data associated with the map
+ *
+ * Creates and sets up a map to contain 2 ** map_bits number of
+ * elements (internally maintained as 'max_elts' in struct
+ * tracing_map).  Before using, map fields should be added to the map
+ * with tracing_map_add_sum_field() and tracing_map_add_key_field().
+ * tracing_map_init() should then be called to allocate the array of
+ * tracing_map_elts, in order to avoid allocating anything in the map
+ * insertion path.  The user-specified map size reflects the maximum
+ * number of elements that can be contained in the table requested by
+ * the user - internally we double that in order to keep the table
+ * sparse and keep collisions manageable.
+ *
+ * A tracing_map is a special-purpose map designed to aggregate or
+ * 'sum' one or more values associated with a specific object of type
+ * tracing_map_elt, which is attached by the map to a given key.
+ *
+ * tracing_map_create() sets up the map itself, and provides
+ * operations for inserting tracing_map_elts, but doesn't allocate the
+ * tracing_map_elts themselves, or provide a means for describing the
+ * keys or sums associated with the tracing_map_elts.  All
+ * tracing_map_elts for a given map have the same set of sums and
+ * keys, which are defined by the client using the functions
+ * tracing_map_add_key_field() and tracing_map_add_sum_field().  Once
+ * the fields are defined, the pool of elements allocated for the map
+ * can be created, which occurs when the client code calls
+ * tracing_map_init().
+ *
+ * When tracing_map_init() returns, tracing_map_elt elements can be
+ * inserted into the map using tracing_map_insert().  When called,
+ * tracing_map_insert() grabs a free tracing_map_elt from the pool, or
+ * finds an existing match in the map and in either case returns it.
+ * The client can then use tracing_map_update_sum() and
+ * tracing_map_read_sum() to update or read a given sum field for the
+ * tracing_map_elt.
+ *
+ * The client can at any point retrieve and traverse the current set
+ * of inserted tracing_map_elts in a tracing_map, via
+ * tracing_map_sort_entries().  Sorting can be done on any field,
+ * including keys.
+ *
+ * See tracing_map.h for a description of tracing_map_ops.
+ *
+ * Return: the tracing_map pointer if successful, ERR_PTR if not.
+ */
+struct tracing_map *tracing_map_create(unsigned int map_bits,
+				       unsigned int key_size,
+				       const struct tracing_map_ops *ops,
+				       void *private_data)
+{
+	struct tracing_map *map;
+	unsigned int i;
+
+	if (map_bits < TRACING_MAP_BITS_MIN ||
+	    map_bits > TRACING_MAP_BITS_MAX)
+		return ERR_PTR(-EINVAL);
+
+	map = kzalloc(sizeof(*map), GFP_KERNEL);
+	if (!map)
+		return ERR_PTR(-ENOMEM);
+
+	map->map_bits = map_bits;
+	map->max_elts = (1 << map_bits);
+	atomic_set(&map->next_elt, -1);
+
+	map->map_size = (1 << (map_bits + 1));
+	map->ops = ops;
+
+	map->private_data = private_data;
+
+	map->map = tracing_map_array_alloc(map->map_size,
+					   sizeof(struct tracing_map_entry));
+	if (!map->map)
+		goto free;
+
+	map->key_size = key_size;
+	for (i = 0; i < TRACING_MAP_KEYS_MAX; i++)
+		map->key_idx[i] = -1;
+ out:
+	return map;
+ free:
+	tracing_map_destroy(map);
+	map = ERR_PTR(-ENOMEM);
+
+	goto out;
+}
+
+/**
+ * tracing_map_init - Allocate and clear a map's tracing_map_elts
+ * @map: The tracing_map to initialize
+ *
+ * Allocates a clears a pool of tracing_map_elts equal to the
+ * user-specified size of 2 ** map_bits (internally maintained as
+ * 'max_elts' in struct tracing_map).  Before using, the map fields
+ * should be added to the map with tracing_map_add_sum_field() and
+ * tracing_map_add_key_field().  tracing_map_init() should then be
+ * called to allocate the array of tracing_map_elts, in order to avoid
+ * allocating anything in the map insertion path.  The user-specified
+ * map size reflects the max number of elements requested by the user
+ * - internally we double that in order to keep the table sparse and
+ * keep collisions manageable.
+ *
+ * See tracing_map.h for a description of tracing_map_ops.
+ *
+ * Return: the tracing_map pointer if successful, ERR_PTR if not.
+ */
+int tracing_map_init(struct tracing_map *map)
+{
+	int err;
+
+	if (map->n_fields < 2)
+		return -EINVAL; /* need at least 1 key and 1 val */
+
+	err = tracing_map_alloc_elts(map);
+	if (err)
+		return err;
+
+	tracing_map_clear(map);
+
+	return err;
+}
+
+static int cmp_entries_dup(const struct tracing_map_sort_entry **a,
+			   const struct tracing_map_sort_entry **b)
+{
+	int ret = 0;
+
+	if (memcmp((*a)->key, (*b)->key, (*a)->elt->map->key_size))
+		ret = 1;
+
+	return ret;
+}
+
+static int cmp_entries_sum(const struct tracing_map_sort_entry **a,
+			   const struct tracing_map_sort_entry **b)
+{
+	const struct tracing_map_elt *elt_a, *elt_b;
+	struct tracing_map_sort_key *sort_key;
+	struct tracing_map_field *field;
+	tracing_map_cmp_fn_t cmp_fn;
+	void *val_a, *val_b;
+	int ret = 0;
+
+	elt_a = (*a)->elt;
+	elt_b = (*b)->elt;
+
+	sort_key = &elt_a->map->sort_key;
+
+	field = &elt_a->fields[sort_key->field_idx];
+	cmp_fn = field->cmp_fn;
+
+	val_a = &elt_a->fields[sort_key->field_idx].sum;
+	val_b = &elt_b->fields[sort_key->field_idx].sum;
+
+	ret = cmp_fn(val_a, val_b);
+	if (sort_key->descending)
+		ret = -ret;
+
+	return ret;
+}
+
+static int cmp_entries_key(const struct tracing_map_sort_entry **a,
+			   const struct tracing_map_sort_entry **b)
+{
+	const struct tracing_map_elt *elt_a, *elt_b;
+	struct tracing_map_sort_key *sort_key;
+	struct tracing_map_field *field;
+	tracing_map_cmp_fn_t cmp_fn;
+	void *val_a, *val_b;
+	int ret = 0;
+
+	elt_a = (*a)->elt;
+	elt_b = (*b)->elt;
+
+	sort_key = &elt_a->map->sort_key;
+
+	field = &elt_a->fields[sort_key->field_idx];
+
+	cmp_fn = field->cmp_fn;
+
+	val_a = elt_a->key + field->offset;
+	val_b = elt_b->key + field->offset;
+
+	ret = cmp_fn(val_a, val_b);
+	if (sort_key->descending)
+		ret = -ret;
+
+	return ret;
+}
+
+static void destroy_sort_entry(struct tracing_map_sort_entry *entry)
+{
+	if (!entry)
+		return;
+
+	if (entry->elt_copied)
+		tracing_map_elt_free(entry->elt);
+
+	kfree(entry);
+}
+
+/**
+ * tracing_map_destroy_sort_entries - Destroy an array of sort entries
+ * @entries: The entries to destroy
+ * @n_entries: The number of entries in the array
+ *
+ * Destroy the elements returned by a tracing_map_sort_entries() call.
+ */
+void tracing_map_destroy_sort_entries(struct tracing_map_sort_entry **entries,
+				      unsigned int n_entries)
+{
+	unsigned int i;
+
+	for (i = 0; i < n_entries; i++)
+		destroy_sort_entry(entries[i]);
+
+	vfree(entries);
+}
+
+static struct tracing_map_sort_entry *
+create_sort_entry(void *key, struct tracing_map_elt *elt)
+{
+	struct tracing_map_sort_entry *sort_entry;
+
+	sort_entry = kzalloc(sizeof(*sort_entry), GFP_KERNEL);
+	if (!sort_entry)
+		return NULL;
+
+	sort_entry->key = key;
+	sort_entry->elt = elt;
+
+	return sort_entry;
+}
+
+static struct tracing_map_elt *copy_elt(struct tracing_map_elt *elt)
+{
+	struct tracing_map_elt *dup_elt;
+	unsigned int i;
+
+	dup_elt = tracing_map_elt_alloc(elt->map);
+	if (IS_ERR(dup_elt))
+		return NULL;
+
+	if (elt->map->ops && elt->map->ops->elt_copy)
+		elt->map->ops->elt_copy(dup_elt, elt);
+
+	dup_elt->private_data = elt->private_data;
+	memcpy(dup_elt->key, elt->key, elt->map->key_size);
+
+	for (i = 0; i < elt->map->n_fields; i++) {
+		atomic64_set(&dup_elt->fields[i].sum,
+			     atomic64_read(&elt->fields[i].sum));
+		dup_elt->fields[i].cmp_fn = elt->fields[i].cmp_fn;
+	}
+
+	return dup_elt;
+}
+
+static int merge_dup(struct tracing_map_sort_entry **sort_entries,
+		     unsigned int target, unsigned int dup)
+{
+	struct tracing_map_elt *target_elt, *elt;
+	bool first_dup = (target - dup) == 1;
+	int i;
+
+	if (first_dup) {
+		elt = sort_entries[target]->elt;
+		target_elt = copy_elt(elt);
+		if (!target_elt)
+			return -ENOMEM;
+		sort_entries[target]->elt = target_elt;
+		sort_entries[target]->elt_copied = true;
+	} else
+		target_elt = sort_entries[target]->elt;
+
+	elt = sort_entries[dup]->elt;
+
+	for (i = 0; i < elt->map->n_fields; i++)
+		atomic64_add(atomic64_read(&elt->fields[i].sum),
+			     &target_elt->fields[i].sum);
+
+	sort_entries[dup]->dup = true;
+
+	return 0;
+}
+
+static int merge_dups(struct tracing_map_sort_entry **sort_entries,
+		      int n_entries, unsigned int key_size)
+{
+	unsigned int dups = 0, total_dups = 0;
+	int err, i, j;
+	void *key;
+
+	if (n_entries < 2)
+		return total_dups;
+
+	sort(sort_entries, n_entries, sizeof(struct tracing_map_sort_entry *),
+	     (int (*)(const void *, const void *))cmp_entries_dup, NULL);
+
+	key = sort_entries[0]->key;
+	for (i = 1; i < n_entries; i++) {
+		if (!memcmp(sort_entries[i]->key, key, key_size)) {
+			dups++; total_dups++;
+			err = merge_dup(sort_entries, i - dups, i);
+			if (err)
+				return err;
+			continue;
+		}
+		key = sort_entries[i]->key;
+		dups = 0;
+	}
+
+	if (!total_dups)
+		return total_dups;
+
+	for (i = 0, j = 0; i < n_entries; i++) {
+		if (!sort_entries[i]->dup) {
+			sort_entries[j] = sort_entries[i];
+			if (j++ != i)
+				sort_entries[i] = NULL;
+		} else {
+			destroy_sort_entry(sort_entries[i]);
+			sort_entries[i] = NULL;
+		}
+	}
+
+	return total_dups;
+}
+
+static bool is_key(struct tracing_map *map, unsigned int field_idx)
+{
+	unsigned int i;
+
+	for (i = 0; i < map->n_keys; i++)
+		if (map->key_idx[i] == field_idx)
+			return true;
+	return false;
+}
+
+static void sort_secondary(struct tracing_map *map,
+			   const struct tracing_map_sort_entry **entries,
+			   unsigned int n_entries,
+			   struct tracing_map_sort_key *primary_key,
+			   struct tracing_map_sort_key *secondary_key)
+{
+	int (*primary_fn)(const struct tracing_map_sort_entry **,
+			  const struct tracing_map_sort_entry **);
+	int (*secondary_fn)(const struct tracing_map_sort_entry **,
+			    const struct tracing_map_sort_entry **);
+	unsigned i, start = 0, n_sub = 1;
+
+	if (is_key(map, primary_key->field_idx))
+		primary_fn = cmp_entries_key;
+	else
+		primary_fn = cmp_entries_sum;
+
+	if (is_key(map, secondary_key->field_idx))
+		secondary_fn = cmp_entries_key;
+	else
+		secondary_fn = cmp_entries_sum;
+
+	for (i = 0; i < n_entries - 1; i++) {
+		const struct tracing_map_sort_entry **a = &entries[i];
+		const struct tracing_map_sort_entry **b = &entries[i + 1];
+
+		if (primary_fn(a, b) == 0) {
+			n_sub++;
+			if (i < n_entries - 2)
+				continue;
+		}
+
+		if (n_sub < 2) {
+			start = i + 1;
+			n_sub = 1;
+			continue;
+		}
+
+		set_sort_key(map, secondary_key);
+		sort(&entries[start], n_sub,
+		     sizeof(struct tracing_map_sort_entry *),
+		     (int (*)(const void *, const void *))secondary_fn, NULL);
+		set_sort_key(map, primary_key);
+
+		start = i + 1;
+		n_sub = 1;
+	}
+}
+
+/**
+ * tracing_map_sort_entries - Sort the current set of tracing_map_elts in a map
+ * @map: The tracing_map
+ * @sort_key: The sort key to use for sorting
+ * @sort_entries: outval: pointer to allocated and sorted array of entries
+ *
+ * tracing_map_sort_entries() sorts the current set of entries in the
+ * map and returns the list of tracing_map_sort_entries containing
+ * them to the client in the sort_entries param.  The client can
+ * access the struct tracing_map_elt element of interest directly as
+ * the 'elt' field of a returned struct tracing_map_sort_entry object.
+ *
+ * The sort_key has only two fields: idx and descending.  'idx' refers
+ * to the index of the field added via tracing_map_add_sum_field() or
+ * tracing_map_add_key_field() when the tracing_map was initialized.
+ * 'descending' is a flag that if set reverses the sort order, which
+ * by default is ascending.
+ *
+ * The client should not hold on to the returned array but should use
+ * it and call tracing_map_destroy_sort_entries() when done.
+ *
+ * Return: the number of sort_entries in the struct tracing_map_sort_entry
+ * array, negative on error
+ */
+int tracing_map_sort_entries(struct tracing_map *map,
+			     struct tracing_map_sort_key *sort_keys,
+			     unsigned int n_sort_keys,
+			     struct tracing_map_sort_entry ***sort_entries)
+{
+	int (*cmp_entries_fn)(const struct tracing_map_sort_entry **,
+			      const struct tracing_map_sort_entry **);
+	struct tracing_map_sort_entry *sort_entry, **entries;
+	int i, n_entries, ret;
+
+	entries = vmalloc(map->max_elts * sizeof(sort_entry));
+	if (!entries)
+		return -ENOMEM;
+
+	for (i = 0, n_entries = 0; i < map->map_size; i++) {
+		struct tracing_map_entry *entry;
+
+		entry = TRACING_MAP_ENTRY(map->map, i);
+
+		if (!entry->key || !entry->val)
+			continue;
+
+		entries[n_entries] = create_sort_entry(entry->val->key,
+						       entry->val);
+		if (!entries[n_entries++]) {
+			ret = -ENOMEM;
+			goto free;
+		}
+	}
+
+	if (n_entries == 0) {
+		ret = 0;
+		goto free;
+	}
+
+	if (n_entries == 1) {
+		*sort_entries = entries;
+		return 1;
+	}
+
+	ret = merge_dups(entries, n_entries, map->key_size);
+	if (ret < 0)
+		goto free;
+	n_entries -= ret;
+
+	if (is_key(map, sort_keys[0].field_idx))
+		cmp_entries_fn = cmp_entries_key;
+	else
+		cmp_entries_fn = cmp_entries_sum;
+
+	set_sort_key(map, &sort_keys[0]);
+
+	sort(entries, n_entries, sizeof(struct tracing_map_sort_entry *),
+	     (int (*)(const void *, const void *))cmp_entries_fn, NULL);
+
+	if (n_sort_keys > 1)
+		sort_secondary(map,
+			       (const struct tracing_map_sort_entry **)entries,
+			       n_entries,
+			       &sort_keys[0],
+			       &sort_keys[1]);
+
+	*sort_entries = entries;
+
+	return n_entries;
+ free:
+	tracing_map_destroy_sort_entries(entries, n_entries);
+
+	return ret;
+}
diff --git a/kernel/trace/tracing_map.h b/kernel/trace/tracing_map.h
new file mode 100644
index 0000000..618838f
--- /dev/null
+++ b/kernel/trace/tracing_map.h
@@ -0,0 +1,283 @@
+#ifndef __TRACING_MAP_H
+#define __TRACING_MAP_H
+
+#define TRACING_MAP_BITS_DEFAULT	11
+#define TRACING_MAP_BITS_MAX		17
+#define TRACING_MAP_BITS_MIN		7
+
+#define TRACING_MAP_KEYS_MAX		2
+#define TRACING_MAP_VALS_MAX		3
+#define TRACING_MAP_FIELDS_MAX		(TRACING_MAP_KEYS_MAX + \
+					 TRACING_MAP_VALS_MAX)
+#define TRACING_MAP_SORT_KEYS_MAX	2
+
+typedef int (*tracing_map_cmp_fn_t) (void *val_a, void *val_b);
+
+/*
+ * This is an overview of the tracing_map data structures and how they
+ * relate to the tracing_map API.  The details of the algorithms
+ * aren't discussed here - this is just a general overview of the data
+ * structures and how they interact with the API.
+ *
+ * The central data structure of the tracing_map is an initially
+ * zeroed array of struct tracing_map_entry (stored in the map field
+ * of struct tracing_map).  tracing_map_entry is a very simple data
+ * structure containing only two fields: a 32-bit unsigned 'key'
+ * variable and a pointer named 'val'.  This array of struct
+ * tracing_map_entry is essentially a hash table which will be
+ * modified by a single function, tracing_map_insert(), but which can
+ * be traversed and read by a user at any time (though the user does
+ * this indirectly via an array of tracing_map_sort_entry - see the
+ * explanation of that data structure in the discussion of the
+ * sorting-related data structures below).
+ *
+ * The central function of the tracing_map API is
+ * tracing_map_insert().  tracing_map_insert() hashes the
+ * arbitrarily-sized key passed into it into a 32-bit unsigned key.
+ * It then uses this key, truncated to the array size, as an index
+ * into the array of tracing_map_entries.  If the value of the 'key'
+ * field of the tracing_map_entry found at that location is 0, then
+ * that entry is considered to be free and can be claimed, by
+ * replacing the 0 in the 'key' field of the tracing_map_entry with
+ * the new 32-bit hashed key.  Once claimed, that tracing_map_entry's
+ * 'val' field is then used to store a unique element which will be
+ * forever associated with that 32-bit hashed key in the
+ * tracing_map_entry.
+ *
+ * That unique element now in the tracing_map_entry's 'val' field is
+ * an instance of tracing_map_elt, where 'elt' in the latter part of
+ * that variable name is short for 'element'.  The purpose of a
+ * tracing_map_elt is to hold values specific to the particular
+ * 32-bit hashed key it's assocated with.  Things such as the unique
+ * set of aggregated sums associated with the 32-bit hashed key, along
+ * with a copy of the full key associated with the entry, and which
+ * was used to produce the 32-bit hashed key.
+ *
+ * When tracing_map_create() is called to create the tracing map, the
+ * user specifies (indirectly via the map_bits param, the details are
+ * unimportant for this discussion) the maximum number of elements
+ * that the map can hold (stored in the max_elts field of struct
+ * tracing_map).  This is the maximum possible number of
+ * tracing_map_entries in the tracing_map_entry array which can be
+ * 'claimed' as described in the above discussion, and therefore is
+ * also the maximum number of tracing_map_elts that can be associated
+ * with the tracing_map_entry array in the tracing_map.  Because of
+ * the way the insertion algorithm works, the size of the allocated
+ * tracing_map_entry array is always twice the maximum number of
+ * elements (2 * max_elts).  This value is stored in the map_size
+ * field of struct tracing_map.
+ *
+ * Because tracing_map_insert() needs to work from any context,
+ * including from within the memory allocation functions themselves,
+ * both the tracing_map_entry array and a pool of max_elts
+ * tracing_map_elts are pre-allocated before any call is made to
+ * tracing_map_insert().
+ *
+ * The tracing_map_entry array is allocated as a single block by
+ * tracing_map_create().
+ *
+ * Because the tracing_map_elts are much larger objects and can't
+ * generally be allocated together as a single large array without
+ * failure, they're allocated individually, by tracing_map_init().
+ *
+ * The pool of tracing_map_elts are allocated by tracing_map_init()
+ * rather than by tracing_map_create() because at the time
+ * tracing_map_create() is called, there isn't enough information to
+ * create the tracing_map_elts.  Specifically,the user first needs to
+ * tell the tracing_map implementation how many fields the
+ * tracing_map_elts contain, and which types of fields they are (key
+ * or sum).  The user does this via the tracing_map_add_sum_field()
+ * and tracing_map_add_key_field() functions, following which the user
+ * calls tracing_map_init() to finish up the tracing map setup.  The
+ * array holding the pointers which make up the pre-allocated pool of
+ * tracing_map_elts is allocated as a single block and is stored in
+ * the elts field of struct tracing_map.
+ *
+ * There is also a set of structures used for sorting that might
+ * benefit from some minimal explanation.
+ *
+ * struct tracing_map_sort_key is used to drive the sort at any given
+ * time.  By 'any given time' we mean that a different
+ * tracing_map_sort_key will be used at different times depending on
+ * whether the sort currently being performed is a primary or a
+ * secondary sort.
+ *
+ * The sort key is very simple, consisting of the field index of the
+ * tracing_map_elt field to sort on (which the user saved when adding
+ * the field), and whether the sort should be done in an ascending or
+ * descending order.
+ *
+ * For the convenience of the sorting code, a tracing_map_sort_entry
+ * is created for each tracing_map_elt, again individually allocated
+ * to avoid failures that might be expected if allocated as a single
+ * large array of struct tracing_map_sort_entry.
+ * tracing_map_sort_entry instances are the objects expected by the
+ * various internal sorting functions, and are also what the user
+ * ultimately receives after calling tracing_map_sort_entries().
+ * Because it doesn't make sense for users to access an unordered and
+ * sparsely populated tracing_map directly, the
+ * tracing_map_sort_entries() function is provided so that users can
+ * retrieve a sorted list of all existing elements.  In addition to
+ * the associated tracing_map_elt 'elt' field contained within the
+ * tracing_map_sort_entry, which is the object of interest to the
+ * user, tracing_map_sort_entry objects contain a number of additional
+ * fields which are used for caching and internal purposes and can
+ * safely be ignored.
+*/
+
+struct tracing_map_field {
+	tracing_map_cmp_fn_t		cmp_fn;
+	union {
+		atomic64_t			sum;
+		unsigned int			offset;
+	};
+};
+
+struct tracing_map_elt {
+	struct tracing_map		*map;
+	struct tracing_map_field	*fields;
+	void				*key;
+	void				*private_data;
+};
+
+struct tracing_map_entry {
+	u32				key;
+	struct tracing_map_elt		*val;
+};
+
+struct tracing_map_sort_key {
+	unsigned int			field_idx;
+	bool				descending;
+};
+
+struct tracing_map_sort_entry {
+	void				*key;
+	struct tracing_map_elt		*elt;
+	bool				elt_copied;
+	bool				dup;
+};
+
+struct tracing_map_array {
+	unsigned int entries_per_page;
+	unsigned int entry_size_shift;
+	unsigned int entry_shift;
+	unsigned int entry_mask;
+	unsigned int n_pages;
+	void **pages;
+};
+
+#define TRACING_MAP_ARRAY_ELT(array, idx)				\
+	(array->pages[idx >> array->entry_shift] +			\
+	 ((idx & array->entry_mask) << array->entry_size_shift))
+
+#define TRACING_MAP_ENTRY(array, idx)					\
+	((struct tracing_map_entry *)TRACING_MAP_ARRAY_ELT(array, idx))
+
+#define TRACING_MAP_ELT(array, idx)					\
+	((struct tracing_map_elt **)TRACING_MAP_ARRAY_ELT(array, idx))
+
+struct tracing_map {
+	unsigned int			key_size;
+	unsigned int			map_bits;
+	unsigned int			map_size;
+	unsigned int			max_elts;
+	atomic_t			next_elt;
+	struct tracing_map_array	*elts;
+	struct tracing_map_array	*map;
+	const struct tracing_map_ops	*ops;
+	void				*private_data;
+	struct tracing_map_field	fields[TRACING_MAP_FIELDS_MAX];
+	unsigned int			n_fields;
+	int				key_idx[TRACING_MAP_KEYS_MAX];
+	unsigned int			n_keys;
+	struct tracing_map_sort_key	sort_key;
+	atomic64_t			hits;
+	atomic64_t			drops;
+};
+
+/**
+ * struct tracing_map_ops - callbacks for tracing_map
+ *
+ * The methods in this structure define callback functions for various
+ * operations on a tracing_map or objects related to a tracing_map.
+ *
+ * For a detailed description of tracing_map_elt objects please see
+ * the overview of tracing_map data structures at the beginning of
+ * this file.
+ *
+ * All the methods below are optional.
+ *
+ * @elt_alloc: When a tracing_map_elt is allocated, this function, if
+ *	defined, will be called and gives clients the opportunity to
+ *	allocate additional data and attach it to the element
+ *	(tracing_map_elt->private_data is meant for that purpose).
+ *	Element allocation occurs before tracing begins, when the
+ *	tracing_map_init() call is made by client code.
+ *
+ * @elt_copy: At certain points in the lifetime of an element, it may
+ *	need to be copied.  The copy should include a copy of the
+ *	client-allocated data, which can be copied into the 'to'
+ *	element from the 'from' element.
+ *
+ * @elt_free: When a tracing_map_elt is freed, this function is called
+ *	and allows client-allocated per-element data to be freed.
+ *
+ * @elt_clear: This callback allows per-element client-defined data to
+ *	be cleared, if applicable.
+ *
+ * @elt_init: This callback allows per-element client-defined data to
+ *	be initialized when used i.e. when the element is actually
+ *	claimed by tracing_map_insert() in the context of the map
+ *	insertion.
+ */
+struct tracing_map_ops {
+	int			(*elt_alloc)(struct tracing_map_elt *elt);
+	void			(*elt_copy)(struct tracing_map_elt *to,
+					    struct tracing_map_elt *from);
+	void			(*elt_free)(struct tracing_map_elt *elt);
+	void			(*elt_clear)(struct tracing_map_elt *elt);
+	void			(*elt_init)(struct tracing_map_elt *elt);
+};
+
+extern struct tracing_map *
+tracing_map_create(unsigned int map_bits,
+		   unsigned int key_size,
+		   const struct tracing_map_ops *ops,
+		   void *private_data);
+extern int tracing_map_init(struct tracing_map *map);
+
+extern int tracing_map_add_sum_field(struct tracing_map *map);
+extern int tracing_map_add_key_field(struct tracing_map *map,
+				     unsigned int offset,
+				     tracing_map_cmp_fn_t cmp_fn);
+
+extern void tracing_map_destroy(struct tracing_map *map);
+extern void tracing_map_clear(struct tracing_map *map);
+
+extern struct tracing_map_elt *
+tracing_map_insert(struct tracing_map *map, void *key);
+extern struct tracing_map_elt *
+tracing_map_lookup(struct tracing_map *map, void *key);
+
+extern tracing_map_cmp_fn_t tracing_map_cmp_num(int field_size,
+						int field_is_signed);
+extern int tracing_map_cmp_string(void *val_a, void *val_b);
+extern int tracing_map_cmp_none(void *val_a, void *val_b);
+
+extern void tracing_map_update_sum(struct tracing_map_elt *elt,
+				   unsigned int i, u64 n);
+extern u64 tracing_map_read_sum(struct tracing_map_elt *elt, unsigned int i);
+extern void tracing_map_set_field_descr(struct tracing_map *map,
+					unsigned int i,
+					unsigned int key_offset,
+					tracing_map_cmp_fn_t cmp_fn);
+extern int
+tracing_map_sort_entries(struct tracing_map *map,
+			 struct tracing_map_sort_key *sort_keys,
+			 unsigned int n_sort_keys,
+			 struct tracing_map_sort_entry ***sort_entries);
+
+extern void
+tracing_map_destroy_sort_entries(struct tracing_map_sort_entry **entries,
+				 unsigned int n_entries);
+#endif /* __TRACING_MAP_H */
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 2232ae3..5f5068e 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -666,6 +666,35 @@
 	 */
 	smp_wmb();
 	set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
+	/*
+	 * The following mb guarantees that previous clear of a PENDING bit
+	 * will not be reordered with any speculative LOADS or STORES from
+	 * work->current_func, which is executed afterwards.  This possible
+	 * reordering can lead to a missed execution on attempt to qeueue
+	 * the same @work.  E.g. consider this case:
+	 *
+	 *   CPU#0                         CPU#1
+	 *   ----------------------------  --------------------------------
+	 *
+	 * 1  STORE event_indicated
+	 * 2  queue_work_on() {
+	 * 3    test_and_set_bit(PENDING)
+	 * 4 }                             set_..._and_clear_pending() {
+	 * 5                                 set_work_data() # clear bit
+	 * 6                                 smp_mb()
+	 * 7                               work->current_func() {
+	 * 8				      LOAD event_indicated
+	 *				   }
+	 *
+	 * Without an explicit full barrier speculative LOAD on line 8 can
+	 * be executed before CPU#0 does STORE on line 1.  If that happens,
+	 * CPU#0 observes the PENDING bit is still set and new execution of
+	 * a @work is not queued in a hope, that CPU#1 will eventually
+	 * finish the queued @work.  Meanwhile CPU#1 does not see
+	 * event_indicated is set, because speculative LOAD was executed
+	 * before actual STORE.
+	 */
+	smp_mb();
 }
 
 static void clear_work_data(struct work_struct *work)
@@ -4525,6 +4554,17 @@
 						  pool->attrs->cpumask) < 0);
 
 	spin_lock_irq(&pool->lock);
+
+	/*
+	 * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
+	 * w/o preceding DOWN_PREPARE.  Work around it.  CPU hotplug is
+	 * being reworked and this can go away in time.
+	 */
+	if (!(pool->flags & POOL_DISASSOCIATED)) {
+		spin_unlock_irq(&pool->lock);
+		return;
+	}
+
 	pool->flags &= ~POOL_DISASSOCIATED;
 
 	for_each_pool_worker(worker, pool) {
diff --git a/lib/Kconfig b/lib/Kconfig
index 3cca122..61d55bd 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -523,6 +523,13 @@
 	 a scatterlist. This should be selected by a driver or an API which
 	 whishes to split a scatterlist amongst multiple DMA channels.
 
+config SG_POOL
+	def_bool n
+	help
+	 Provides a helper to allocate chained scatterlists. This should be
+	 selected by a driver or an API which whishes to allocate chained
+	 scatterlist.
+
 #
 # sg chaining option
 #
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 1e9a607..f4b797a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1289,6 +1289,39 @@
 	tristate
 	default n
 
+config RCU_PERF_TEST
+	tristate "performance tests for RCU"
+	depends on DEBUG_KERNEL
+	select TORTURE_TEST
+	select SRCU
+	select TASKS_RCU
+	default n
+	help
+	  This option provides a kernel module that runs performance
+	  tests on the RCU infrastructure.  The kernel module may be built
+	  after the fact on the running kernel to be tested, if desired.
+
+	  Say Y here if you want RCU performance tests to be built into
+	  the kernel.
+	  Say M if you want the RCU performance tests to build as a module.
+	  Say N if you are unsure.
+
+config RCU_PERF_TEST_RUNNABLE
+	bool "performance tests for RCU runnable by default"
+	depends on RCU_PERF_TEST = y
+	default n
+	help
+	  This option provides a way to build the RCU performance tests
+	  directly into the kernel without them starting up at boot time.
+	  You can use /sys/module to manually override this setting.
+	  This /proc file is available only when the RCU performance
+	  tests have been built into the kernel.
+
+	  Say Y here if you want the RCU performance tests to start during
+	  boot (you probably don't).
+	  Say N here if you want the RCU performance tests to start only
+	  after being manually enabled via /sys/module.
+
 config RCU_TORTURE_TEST
 	tristate "torture tests for RCU"
 	depends on DEBUG_KERNEL
diff --git a/lib/Makefile b/lib/Makefile
index 7bd6fd4..931396a 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -23,7 +23,7 @@
 	 rbtree.o radix-tree.o dump_stack.o timerqueue.o\
 	 idr.o int_sqrt.o extable.o \
 	 sha1.o md5.o irq_regs.o argv_split.o \
-	 proportions.o flex_proportions.o ratelimit.o show_mem.o \
+	 flex_proportions.o ratelimit.o show_mem.o \
 	 is_single_threaded.o plist.o decompress.o kobject_uevent.o \
 	 earlycpio.o seq_buf.o nmi_backtrace.o
 
@@ -178,6 +178,7 @@
 obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o
 
 obj-$(CONFIG_SG_SPLIT) += sg_split.o
+obj-$(CONFIG_SG_POOL) += sg_pool.o
 obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
 obj-$(CONFIG_IRQ_POLL) += irq_poll.o
 
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 2b3f46c..0bd8a61 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -12,6 +12,7 @@
 #include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
+#include <linux/module.h>
 #include <linux/asn1_decoder.h>
 #include <linux/asn1_ber_bytecode.h>
 
@@ -74,7 +75,7 @@
 
 	/* Extract a tag from the data */
 	tag = data[dp++];
-	if (tag == 0) {
+	if (tag == ASN1_EOC) {
 		/* It appears to be an EOC. */
 		if (data[dp++] != 0)
 			goto invalid_eoc;
@@ -96,10 +97,8 @@
 
 	/* Extract the length */
 	len = data[dp++];
-	if (len <= 0x7f) {
-		dp += len;
-		goto next_tag;
-	}
+	if (len <= 0x7f)
+		goto check_length;
 
 	if (unlikely(len == ASN1_INDEFINITE_LENGTH)) {
 		/* Indefinite length */
@@ -110,14 +109,18 @@
 	}
 
 	n = len - 0x80;
-	if (unlikely(n > sizeof(size_t) - 1))
+	if (unlikely(n > sizeof(len) - 1))
 		goto length_too_long;
 	if (unlikely(n > datalen - dp))
 		goto data_overrun_error;
-	for (len = 0; n > 0; n--) {
+	len = 0;
+	for (; n > 0; n--) {
 		len <<= 8;
 		len |= data[dp++];
 	}
+check_length:
+	if (len > datalen - dp)
+		goto data_overrun_error;
 	dp += len;
 	goto next_tag;
 
@@ -504,3 +507,5 @@
 	return -EBADMSG;
 }
 EXPORT_SYMBOL_GPL(asn1_ber_decoder);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 03dd576..59fd7c0 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -524,7 +524,9 @@
 			free_slot = i;
 			continue;
 		}
-		if (ops->compare_object(assoc_array_ptr_to_leaf(ptr), index_key)) {
+		if (assoc_array_ptr_is_leaf(ptr) &&
+		    ops->compare_object(assoc_array_ptr_to_leaf(ptr),
+					index_key)) {
 			pr_devel("replace in slot %d\n", i);
 			edit->leaf_p = &node->slots[i];
 			edit->dead_leaf = node->slots[i];
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 5fecddc..28cb431 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -99,40 +99,44 @@
 }
 
 #define iterate_and_advance(i, n, v, I, B, K) {			\
-	size_t skip = i->iov_offset;				\
-	if (unlikely(i->type & ITER_BVEC)) {			\
-		const struct bio_vec *bvec;			\
-		struct bio_vec v;				\
-		iterate_bvec(i, n, v, bvec, skip, (B))		\
-		if (skip == bvec->bv_len) {			\
-			bvec++;					\
-			skip = 0;				\
+	if (unlikely(i->count < n))				\
+		n = i->count;					\
+	if (n) {						\
+		size_t skip = i->iov_offset;			\
+		if (unlikely(i->type & ITER_BVEC)) {		\
+			const struct bio_vec *bvec;		\
+			struct bio_vec v;			\
+			iterate_bvec(i, n, v, bvec, skip, (B))	\
+			if (skip == bvec->bv_len) {		\
+				bvec++;				\
+				skip = 0;			\
+			}					\
+			i->nr_segs -= bvec - i->bvec;		\
+			i->bvec = bvec;				\
+		} else if (unlikely(i->type & ITER_KVEC)) {	\
+			const struct kvec *kvec;		\
+			struct kvec v;				\
+			iterate_kvec(i, n, v, kvec, skip, (K))	\
+			if (skip == kvec->iov_len) {		\
+				kvec++;				\
+				skip = 0;			\
+			}					\
+			i->nr_segs -= kvec - i->kvec;		\
+			i->kvec = kvec;				\
+		} else {					\
+			const struct iovec *iov;		\
+			struct iovec v;				\
+			iterate_iovec(i, n, v, iov, skip, (I))	\
+			if (skip == iov->iov_len) {		\
+				iov++;				\
+				skip = 0;			\
+			}					\
+			i->nr_segs -= iov - i->iov;		\
+			i->iov = iov;				\
 		}						\
-		i->nr_segs -= bvec - i->bvec;			\
-		i->bvec = bvec;					\
-	} else if (unlikely(i->type & ITER_KVEC)) {		\
-		const struct kvec *kvec;			\
-		struct kvec v;					\
-		iterate_kvec(i, n, v, kvec, skip, (K))		\
-		if (skip == kvec->iov_len) {			\
-			kvec++;					\
-			skip = 0;				\
-		}						\
-		i->nr_segs -= kvec - i->kvec;			\
-		i->kvec = kvec;					\
-	} else {						\
-		const struct iovec *iov;			\
-		struct iovec v;					\
-		iterate_iovec(i, n, v, iov, skip, (I))		\
-		if (skip == iov->iov_len) {			\
-			iov++;					\
-			skip = 0;				\
-		}						\
-		i->nr_segs -= iov - i->iov;			\
-		i->iov = iov;					\
+		i->count -= n;					\
+		i->iov_offset = skip;				\
 	}							\
-	i->count -= n;						\
-	i->iov_offset = skip;					\
 }
 
 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
@@ -386,12 +390,6 @@
 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
 {
 	const char *from = addr;
-	if (unlikely(bytes > i->count))
-		bytes = i->count;
-
-	if (unlikely(!bytes))
-		return 0;
-
 	iterate_and_advance(i, bytes, v,
 		__copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
 			       v.iov_len),
@@ -407,12 +405,6 @@
 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
 {
 	char *to = addr;
-	if (unlikely(bytes > i->count))
-		bytes = i->count;
-
-	if (unlikely(!bytes))
-		return 0;
-
 	iterate_and_advance(i, bytes, v,
 		__copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
 				 v.iov_len),
@@ -428,12 +420,6 @@
 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
 {
 	char *to = addr;
-	if (unlikely(bytes > i->count))
-		bytes = i->count;
-
-	if (unlikely(!bytes))
-		return 0;
-
 	iterate_and_advance(i, bytes, v,
 		__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
 					 v.iov_base, v.iov_len),
@@ -474,12 +460,6 @@
 
 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
 {
-	if (unlikely(bytes > i->count))
-		bytes = i->count;
-
-	if (unlikely(!bytes))
-		return 0;
-
 	iterate_and_advance(i, bytes, v,
 		__clear_user(v.iov_base, v.iov_len),
 		memzero_page(v.bv_page, v.bv_offset, v.bv_len),
@@ -569,6 +549,25 @@
 }
 EXPORT_SYMBOL(iov_iter_alignment);
 
+unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
+{
+        unsigned long res = 0;
+	size_t size = i->count;
+	if (!size)
+		return 0;
+
+	iterate_all_kinds(i, size, v,
+		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
+			(size != v.iov_len ? size : 0), 0),
+		(res |= (!res ? 0 : (unsigned long)v.bv_offset) |
+			(size != v.bv_len ? size : 0)),
+		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
+			(size != v.iov_len ? size : 0))
+		);
+		return res;
+}
+EXPORT_SYMBOL(iov_iter_gap_alignment);
+
 ssize_t iov_iter_get_pages(struct iov_iter *i,
 		   struct page **pages, size_t maxsize, unsigned maxpages,
 		   size_t *start)
@@ -666,12 +665,6 @@
 	char *to = addr;
 	__wsum sum, next;
 	size_t off = 0;
-	if (unlikely(bytes > i->count))
-		bytes = i->count;
-
-	if (unlikely(!bytes))
-		return 0;
-
 	sum = *csum;
 	iterate_and_advance(i, bytes, v, ({
 		int err = 0;
@@ -710,12 +703,6 @@
 	const char *from = addr;
 	__wsum sum, next;
 	size_t off = 0;
-	if (unlikely(bytes > i->count))
-		bytes = i->count;
-
-	if (unlikely(!bytes))
-		return 0;
-
 	sum = *csum;
 	iterate_and_advance(i, bytes, v, ({
 		int err = 0;
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
index abcecdc..c79d7ea 100644
--- a/lib/lz4/lz4defs.h
+++ b/lib/lz4/lz4defs.h
@@ -11,8 +11,7 @@
 /*
  * Detects 64 bits mode
  */
-#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \
-	|| defined(__ppc64__) || defined(__LP64__))
+#if defined(CONFIG_64BIT)
 #define LZ4_ARCH64 1
 #else
 #define LZ4_ARCH64 0
@@ -25,9 +24,7 @@
 typedef struct _U16_S { u16 v; } U16_S;
 typedef struct _U32_S { u32 v; } U32_S;
 typedef struct _U64_S { u64 v; } U64_S;
-#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)		\
-	|| defined(CONFIG_ARM) && __LINUX_ARM_ARCH__ >= 6	\
-	&& defined(ARM_EFFICIENT_UNALIGNED_ACCESS)
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
 
 #define A16(x) (((U16_S *)(x))->v)
 #define A32(x) (((U32_S *)(x))->v)
@@ -35,6 +32,10 @@
 
 #define PUT4(s, d) (A32(d) = A32(s))
 #define PUT8(s, d) (A64(d) = A64(s))
+
+#define LZ4_READ_LITTLEENDIAN_16(d, s, p)	\
+	(d = s - A16(p))
+
 #define LZ4_WRITE_LITTLEENDIAN_16(p, v)	\
 	do {	\
 		A16(p) = v; \
@@ -51,10 +52,13 @@
 #define PUT8(s, d) \
 	put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
 
-#define LZ4_WRITE_LITTLEENDIAN_16(p, v)	\
-	do {	\
-		put_unaligned(v, (u16 *)(p)); \
-		p += 2; \
+#define LZ4_READ_LITTLEENDIAN_16(d, s, p)	\
+	(d = s - get_unaligned_le16(p))
+
+#define LZ4_WRITE_LITTLEENDIAN_16(p, v)			\
+	do {						\
+		put_unaligned_le16(v, (u16 *)(p));	\
+		p += 2;					\
 	} while (0)
 #endif
 
@@ -140,9 +144,6 @@
 
 #endif
 
-#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
-	(d = s - get_unaligned_le16(p))
-
 #define LZ4_WILDCOPY(s, d, e)		\
 	do {				\
 		LZ4_COPYPACKET(s, d);	\
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index eb15e7d..747606f 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -20,6 +20,8 @@
 
 #include <linux/bitops.h>
 #include <linux/count_zeros.h>
+#include <linux/byteorder/generic.h>
+#include <linux/string.h>
 #include "mpi-internal.h"
 
 #define MAX_EXTERN_MPI_BITS 16384
@@ -163,7 +165,13 @@
 		    int *sign)
 {
 	uint8_t *p;
-	mpi_limb_t alimb;
+#if BYTES_PER_MPI_LIMB == 4
+	__be32 alimb;
+#elif BYTES_PER_MPI_LIMB == 8
+	__be64 alimb;
+#else
+#error please implement for this limb size.
+#endif
 	unsigned int n = mpi_get_size(a);
 	int i, lzeros;
 
@@ -183,38 +191,19 @@
 	p = buf;
 	*nbytes = n - lzeros;
 
-	for (i = a->nlimbs - 1; i >= 0; i--) {
-		alimb = a->d[i];
+	for (i = a->nlimbs - 1 - lzeros / BYTES_PER_MPI_LIMB,
+			lzeros %= BYTES_PER_MPI_LIMB;
+		i >= 0; i--) {
 #if BYTES_PER_MPI_LIMB == 4
-		*p++ = alimb >> 24;
-		*p++ = alimb >> 16;
-		*p++ = alimb >> 8;
-		*p++ = alimb;
+		alimb = cpu_to_be32(a->d[i]);
 #elif BYTES_PER_MPI_LIMB == 8
-		*p++ = alimb >> 56;
-		*p++ = alimb >> 48;
-		*p++ = alimb >> 40;
-		*p++ = alimb >> 32;
-		*p++ = alimb >> 24;
-		*p++ = alimb >> 16;
-		*p++ = alimb >> 8;
-		*p++ = alimb;
+		alimb = cpu_to_be64(a->d[i]);
 #else
 #error please implement for this limb size.
 #endif
-
-		if (lzeros > 0) {
-			if (lzeros >= sizeof(alimb)) {
-				p -= sizeof(alimb);
-			} else {
-				mpi_limb_t *limb1 = (void *)p - sizeof(alimb);
-				mpi_limb_t *limb2 = (void *)p - sizeof(alimb)
-							+ lzeros;
-				*limb1 = *limb2;
-				p -= lzeros;
-			}
-			lzeros -= sizeof(alimb);
-		}
+		memcpy(p, (u8 *)&alimb + lzeros, BYTES_PER_MPI_LIMB - lzeros);
+		p += BYTES_PER_MPI_LIMB - lzeros;
+		lzeros = 0;
 	}
 	return 0;
 }
@@ -359,7 +348,13 @@
 		     int *sign)
 {
 	u8 *p, *p2;
-	mpi_limb_t alimb, alimb2;
+#if BYTES_PER_MPI_LIMB == 4
+	__be32 alimb;
+#elif BYTES_PER_MPI_LIMB == 8
+	__be64 alimb;
+#else
+#error please implement for this limb size.
+#endif
 	unsigned int n = mpi_get_size(a);
 	int i, x, y = 0, lzeros, buf_len;
 
@@ -380,42 +375,22 @@
 	buf_len = sgl->length;
 	p2 = sg_virt(sgl);
 
-	for (i = a->nlimbs - 1; i >= 0; i--) {
-		alimb = a->d[i];
-		p = (u8 *)&alimb2;
+	for (i = a->nlimbs - 1 - lzeros / BYTES_PER_MPI_LIMB,
+			lzeros %= BYTES_PER_MPI_LIMB;
+		i >= 0; i--) {
 #if BYTES_PER_MPI_LIMB == 4
-		*p++ = alimb >> 24;
-		*p++ = alimb >> 16;
-		*p++ = alimb >> 8;
-		*p++ = alimb;
+		alimb = cpu_to_be32(a->d[i]);
 #elif BYTES_PER_MPI_LIMB == 8
-		*p++ = alimb >> 56;
-		*p++ = alimb >> 48;
-		*p++ = alimb >> 40;
-		*p++ = alimb >> 32;
-		*p++ = alimb >> 24;
-		*p++ = alimb >> 16;
-		*p++ = alimb >> 8;
-		*p++ = alimb;
+		alimb = cpu_to_be64(a->d[i]);
 #else
 #error please implement for this limb size.
 #endif
-		if (lzeros > 0) {
-			if (lzeros >= sizeof(alimb)) {
-				p -= sizeof(alimb);
-				continue;
-			} else {
-				mpi_limb_t *limb1 = (void *)p - sizeof(alimb);
-				mpi_limb_t *limb2 = (void *)p - sizeof(alimb)
-							+ lzeros;
-				*limb1 = *limb2;
-				p -= lzeros;
-				y = lzeros;
-			}
-			lzeros -= sizeof(alimb);
+		if (lzeros) {
+			y = lzeros;
+			lzeros = 0;
 		}
 
-		p = p - (sizeof(alimb) - y);
+		p = (u8 *)&alimb + y;
 
 		for (x = 0; x < sizeof(alimb) - y; x++) {
 			if (!buf_len) {
@@ -443,15 +418,15 @@
  * a new MPI and reads the content of the sgl to the MPI.
  *
  * @sgl:	scatterlist to read from
- * @len:	number of bytes to read
+ * @nbytes:	number of bytes to read
  *
  * Return:	Pointer to a new MPI or NULL on error
  */
-MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len)
+MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
 {
 	struct scatterlist *sg;
 	int x, i, j, z, lzeros, ents;
-	unsigned int nbits, nlimbs, nbytes;
+	unsigned int nbits, nlimbs;
 	mpi_limb_t a;
 	MPI val = NULL;
 
@@ -472,16 +447,12 @@
 			break;
 
 		ents--;
+		nbytes -= lzeros;
 		lzeros = 0;
 	}
 
 	sgl = sg;
-
-	if (!ents)
-		nbytes = 0;
-	else
-		nbytes = len - lzeros;
-
+	nbytes -= lzeros;
 	nbits = nbytes * 8;
 	if (nbits > MAX_EXTERN_MPI_BITS) {
 		pr_info("MPI: mpi too large (%u bits)\n", nbits);
@@ -489,9 +460,8 @@
 	}
 
 	if (nbytes > 0)
-		nbits -= count_leading_zeros(*(u8 *)(sg_virt(sgl) + lzeros));
-	else
-		nbits = 0;
+		nbits -= count_leading_zeros(*(u8 *)(sg_virt(sgl) + lzeros)) -
+			(BITS_PER_LONG - 8);
 
 	nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB);
 	val = mpi_alloc(nlimbs);
@@ -507,19 +477,14 @@
 
 	j = nlimbs - 1;
 	a = 0;
-	z = 0;
-	x = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
-	x %= BYTES_PER_MPI_LIMB;
+	z = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
+	z %= BYTES_PER_MPI_LIMB;
 
 	for_each_sg(sgl, sg, ents, i) {
 		const u8 *buffer = sg_virt(sg) + lzeros;
 		int len = sg->length - lzeros;
-		int buf_shift = x;
 
-		if  (sg_is_last(sg) && (len % BYTES_PER_MPI_LIMB))
-			len += BYTES_PER_MPI_LIMB - (len % BYTES_PER_MPI_LIMB);
-
-		for (; x < len + buf_shift; x++) {
+		for (x = 0; x < len; x++) {
 			a <<= 8;
 			a |= *buffer++;
 			if (((z + x + 1) % BYTES_PER_MPI_LIMB) == 0) {
@@ -528,7 +493,6 @@
 			}
 		}
 		z += x;
-		x = 0;
 		lzeros = 0;
 	}
 	return val;
diff --git a/lib/nlattr.c b/lib/nlattr.c
index f5907d2..fce1e9a 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -355,6 +355,30 @@
 EXPORT_SYMBOL(__nla_reserve);
 
 /**
+ * __nla_reserve_64bit - reserve room for attribute on the skb and align it
+ * @skb: socket buffer to reserve room on
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ * @padattr: attribute type for the padding
+ *
+ * Adds a netlink attribute header to a socket buffer and reserves
+ * room for the payload but does not copy it. It also ensure that this
+ * attribute will have a 64-bit aligned nla_data() area.
+ *
+ * The caller is responsible to ensure that the skb provides enough
+ * tailroom for the attribute header and payload.
+ */
+struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype,
+				   int attrlen, int padattr)
+{
+	if (nla_need_padding_for_64bit(skb))
+		nla_align_64bit(skb, padattr);
+
+	return __nla_reserve(skb, attrtype, attrlen);
+}
+EXPORT_SYMBOL(__nla_reserve_64bit);
+
+/**
  * __nla_reserve_nohdr - reserve room for attribute without header
  * @skb: socket buffer to reserve room on
  * @attrlen: length of attribute payload
@@ -397,6 +421,36 @@
 EXPORT_SYMBOL(nla_reserve);
 
 /**
+ * nla_reserve_64bit - reserve room for attribute on the skb and align it
+ * @skb: socket buffer to reserve room on
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ * @padattr: attribute type for the padding
+ *
+ * Adds a netlink attribute header to a socket buffer and reserves
+ * room for the payload but does not copy it. It also ensure that this
+ * attribute will have a 64-bit aligned nla_data() area.
+ *
+ * Returns NULL if the tailroom of the skb is insufficient to store
+ * the attribute header and payload.
+ */
+struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype, int attrlen,
+				 int padattr)
+{
+	size_t len;
+
+	if (nla_need_padding_for_64bit(skb))
+		len = nla_total_size_64bit(attrlen);
+	else
+		len = nla_total_size(attrlen);
+	if (unlikely(skb_tailroom(skb) < len))
+		return NULL;
+
+	return __nla_reserve_64bit(skb, attrtype, attrlen, padattr);
+}
+EXPORT_SYMBOL(nla_reserve_64bit);
+
+/**
  * nla_reserve_nohdr - reserve room for attribute without header
  * @skb: socket buffer to reserve room on
  * @attrlen: length of attribute payload
@@ -436,6 +490,27 @@
 EXPORT_SYMBOL(__nla_put);
 
 /**
+ * __nla_put_64bit - Add a netlink attribute to a socket buffer and align it
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ * @padattr: attribute type for the padding
+ *
+ * The caller is responsible to ensure that the skb provides enough
+ * tailroom for the attribute header and payload.
+ */
+void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
+		     const void *data, int padattr)
+{
+	struct nlattr *nla;
+
+	nla = __nla_reserve_64bit(skb, attrtype, attrlen, padattr);
+	memcpy(nla_data(nla), data, attrlen);
+}
+EXPORT_SYMBOL(__nla_put_64bit);
+
+/**
  * __nla_put_nohdr - Add a netlink attribute without header
  * @skb: socket buffer to add attribute to
  * @attrlen: length of attribute payload
@@ -474,6 +549,34 @@
 EXPORT_SYMBOL(nla_put);
 
 /**
+ * nla_put_64bit - Add a netlink attribute to a socket buffer and align it
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ * @padattr: attribute type for the padding
+ *
+ * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
+ * the attribute header and payload.
+ */
+int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
+		  const void *data, int padattr)
+{
+	size_t len;
+
+	if (nla_need_padding_for_64bit(skb))
+		len = nla_total_size_64bit(attrlen);
+	else
+		len = nla_total_size(attrlen);
+	if (unlikely(skb_tailroom(skb) < len))
+		return -EMSGSIZE;
+
+	__nla_put_64bit(skb, attrtype, attrlen, data, padattr);
+	return 0;
+}
+EXPORT_SYMBOL(nla_put_64bit);
+
+/**
  * nla_put_nohdr - Add a netlink attribute without header
  * @skb: socket buffer to add attribute to
  * @attrlen: length of attribute payload
diff --git a/lib/proportions.c b/lib/proportions.c
deleted file mode 100644
index efa54f25..0000000
--- a/lib/proportions.c
+++ /dev/null
@@ -1,407 +0,0 @@
-/*
- * Floating proportions
- *
- *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
- *
- * Description:
- *
- * The floating proportion is a time derivative with an exponentially decaying
- * history:
- *
- *   p_{j} = \Sum_{i=0} (dx_{j}/dt_{-i}) / 2^(1+i)
- *
- * Where j is an element from {prop_local}, x_{j} is j's number of events,
- * and i the time period over which the differential is taken. So d/dt_{-i} is
- * the differential over the i-th last period.
- *
- * The decaying history gives smooth transitions. The time differential carries
- * the notion of speed.
- *
- * The denominator is 2^(1+i) because we want the series to be normalised, ie.
- *
- *   \Sum_{i=0} 1/2^(1+i) = 1
- *
- * Further more, if we measure time (t) in the same events as x; so that:
- *
- *   t = \Sum_{j} x_{j}
- *
- * we get that:
- *
- *   \Sum_{j} p_{j} = 1
- *
- * Writing this in an iterative fashion we get (dropping the 'd's):
- *
- *   if (++x_{j}, ++t > period)
- *     t /= 2;
- *     for_each (j)
- *       x_{j} /= 2;
- *
- * so that:
- *
- *   p_{j} = x_{j} / t;
- *
- * We optimize away the '/= 2' for the global time delta by noting that:
- *
- *   if (++t > period) t /= 2:
- *
- * Can be approximated by:
- *
- *   period/2 + (++t % period/2)
- *
- * [ Furthermore, when we choose period to be 2^n it can be written in terms of
- *   binary operations and wraparound artefacts disappear. ]
- *
- * Also note that this yields a natural counter of the elapsed periods:
- *
- *   c = t / (period/2)
- *
- * [ Its monotonic increasing property can be applied to mitigate the wrap-
- *   around issue. ]
- *
- * This allows us to do away with the loop over all prop_locals on each period
- * expiration. By remembering the period count under which it was last accessed
- * as c_{j}, we can obtain the number of 'missed' cycles from:
- *
- *   c - c_{j}
- *
- * We can then lazily catch up to the global period count every time we are
- * going to use x_{j}, by doing:
- *
- *   x_{j} /= 2^(c - c_{j}), c_{j} = c
- */
-
-#include <linux/proportions.h>
-#include <linux/rcupdate.h>
-
-int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp)
-{
-	int err;
-
-	if (shift > PROP_MAX_SHIFT)
-		shift = PROP_MAX_SHIFT;
-
-	pd->index = 0;
-	pd->pg[0].shift = shift;
-	mutex_init(&pd->mutex);
-	err = percpu_counter_init(&pd->pg[0].events, 0, gfp);
-	if (err)
-		goto out;
-
-	err = percpu_counter_init(&pd->pg[1].events, 0, gfp);
-	if (err)
-		percpu_counter_destroy(&pd->pg[0].events);
-
-out:
-	return err;
-}
-
-/*
- * We have two copies, and flip between them to make it seem like an atomic
- * update. The update is not really atomic wrt the events counter, but
- * it is internally consistent with the bit layout depending on shift.
- *
- * We copy the events count, move the bits around and flip the index.
- */
-void prop_change_shift(struct prop_descriptor *pd, int shift)
-{
-	int index;
-	int offset;
-	u64 events;
-	unsigned long flags;
-
-	if (shift > PROP_MAX_SHIFT)
-		shift = PROP_MAX_SHIFT;
-
-	mutex_lock(&pd->mutex);
-
-	index = pd->index ^ 1;
-	offset = pd->pg[pd->index].shift - shift;
-	if (!offset)
-		goto out;
-
-	pd->pg[index].shift = shift;
-
-	local_irq_save(flags);
-	events = percpu_counter_sum(&pd->pg[pd->index].events);
-	if (offset < 0)
-		events <<= -offset;
-	else
-		events >>= offset;
-	percpu_counter_set(&pd->pg[index].events, events);
-
-	/*
-	 * ensure the new pg is fully written before the switch
-	 */
-	smp_wmb();
-	pd->index = index;
-	local_irq_restore(flags);
-
-	synchronize_rcu();
-
-out:
-	mutex_unlock(&pd->mutex);
-}
-
-/*
- * wrap the access to the data in an rcu_read_lock() section;
- * this is used to track the active references.
- */
-static struct prop_global *prop_get_global(struct prop_descriptor *pd)
-__acquires(RCU)
-{
-	int index;
-
-	rcu_read_lock();
-	index = pd->index;
-	/*
-	 * match the wmb from vcd_flip()
-	 */
-	smp_rmb();
-	return &pd->pg[index];
-}
-
-static void prop_put_global(struct prop_descriptor *pd, struct prop_global *pg)
-__releases(RCU)
-{
-	rcu_read_unlock();
-}
-
-static void
-prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift)
-{
-	int offset = *pl_shift - new_shift;
-
-	if (!offset)
-		return;
-
-	if (offset < 0)
-		*pl_period <<= -offset;
-	else
-		*pl_period >>= offset;
-
-	*pl_shift = new_shift;
-}
-
-/*
- * PERCPU
- */
-
-#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
-
-int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp)
-{
-	raw_spin_lock_init(&pl->lock);
-	pl->shift = 0;
-	pl->period = 0;
-	return percpu_counter_init(&pl->events, 0, gfp);
-}
-
-void prop_local_destroy_percpu(struct prop_local_percpu *pl)
-{
-	percpu_counter_destroy(&pl->events);
-}
-
-/*
- * Catch up with missed period expirations.
- *
- *   until (c_{j} == c)
- *     x_{j} -= x_{j}/2;
- *     c_{j}++;
- */
-static
-void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl)
-{
-	unsigned long period = 1UL << (pg->shift - 1);
-	unsigned long period_mask = ~(period - 1);
-	unsigned long global_period;
-	unsigned long flags;
-
-	global_period = percpu_counter_read(&pg->events);
-	global_period &= period_mask;
-
-	/*
-	 * Fast path - check if the local and global period count still match
-	 * outside of the lock.
-	 */
-	if (pl->period == global_period)
-		return;
-
-	raw_spin_lock_irqsave(&pl->lock, flags);
-	prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
-
-	/*
-	 * For each missed period, we half the local counter.
-	 * basically:
-	 *   pl->events >> (global_period - pl->period);
-	 */
-	period = (global_period - pl->period) >> (pg->shift - 1);
-	if (period < BITS_PER_LONG) {
-		s64 val = percpu_counter_read(&pl->events);
-
-		if (val < (nr_cpu_ids * PROP_BATCH))
-			val = percpu_counter_sum(&pl->events);
-
-		__percpu_counter_add(&pl->events, -val + (val >> period),
-					PROP_BATCH);
-	} else
-		percpu_counter_set(&pl->events, 0);
-
-	pl->period = global_period;
-	raw_spin_unlock_irqrestore(&pl->lock, flags);
-}
-
-/*
- *   ++x_{j}, ++t
- */
-void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
-{
-	struct prop_global *pg = prop_get_global(pd);
-
-	prop_norm_percpu(pg, pl);
-	__percpu_counter_add(&pl->events, 1, PROP_BATCH);
-	percpu_counter_add(&pg->events, 1);
-	prop_put_global(pd, pg);
-}
-
-/*
- * identical to __prop_inc_percpu, except that it limits this pl's fraction to
- * @frac/PROP_FRAC_BASE by ignoring events when this limit has been exceeded.
- */
-void __prop_inc_percpu_max(struct prop_descriptor *pd,
-			   struct prop_local_percpu *pl, long frac)
-{
-	struct prop_global *pg = prop_get_global(pd);
-
-	prop_norm_percpu(pg, pl);
-
-	if (unlikely(frac != PROP_FRAC_BASE)) {
-		unsigned long period_2 = 1UL << (pg->shift - 1);
-		unsigned long counter_mask = period_2 - 1;
-		unsigned long global_count;
-		long numerator, denominator;
-
-		numerator = percpu_counter_read_positive(&pl->events);
-		global_count = percpu_counter_read(&pg->events);
-		denominator = period_2 + (global_count & counter_mask);
-
-		if (numerator > ((denominator * frac) >> PROP_FRAC_SHIFT))
-			goto out_put;
-	}
-
-	percpu_counter_add(&pl->events, 1);
-	percpu_counter_add(&pg->events, 1);
-
-out_put:
-	prop_put_global(pd, pg);
-}
-
-/*
- * Obtain a fraction of this proportion
- *
- *   p_{j} = x_{j} / (period/2 + t % period/2)
- */
-void prop_fraction_percpu(struct prop_descriptor *pd,
-		struct prop_local_percpu *pl,
-		long *numerator, long *denominator)
-{
-	struct prop_global *pg = prop_get_global(pd);
-	unsigned long period_2 = 1UL << (pg->shift - 1);
-	unsigned long counter_mask = period_2 - 1;
-	unsigned long global_count;
-
-	prop_norm_percpu(pg, pl);
-	*numerator = percpu_counter_read_positive(&pl->events);
-
-	global_count = percpu_counter_read(&pg->events);
-	*denominator = period_2 + (global_count & counter_mask);
-
-	prop_put_global(pd, pg);
-}
-
-/*
- * SINGLE
- */
-
-int prop_local_init_single(struct prop_local_single *pl)
-{
-	raw_spin_lock_init(&pl->lock);
-	pl->shift = 0;
-	pl->period = 0;
-	pl->events = 0;
-	return 0;
-}
-
-void prop_local_destroy_single(struct prop_local_single *pl)
-{
-}
-
-/*
- * Catch up with missed period expirations.
- */
-static
-void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl)
-{
-	unsigned long period = 1UL << (pg->shift - 1);
-	unsigned long period_mask = ~(period - 1);
-	unsigned long global_period;
-	unsigned long flags;
-
-	global_period = percpu_counter_read(&pg->events);
-	global_period &= period_mask;
-
-	/*
-	 * Fast path - check if the local and global period count still match
-	 * outside of the lock.
-	 */
-	if (pl->period == global_period)
-		return;
-
-	raw_spin_lock_irqsave(&pl->lock, flags);
-	prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
-	/*
-	 * For each missed period, we half the local counter.
-	 */
-	period = (global_period - pl->period) >> (pg->shift - 1);
-	if (likely(period < BITS_PER_LONG))
-		pl->events >>= period;
-	else
-		pl->events = 0;
-	pl->period = global_period;
-	raw_spin_unlock_irqrestore(&pl->lock, flags);
-}
-
-/*
- *   ++x_{j}, ++t
- */
-void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl)
-{
-	struct prop_global *pg = prop_get_global(pd);
-
-	prop_norm_single(pg, pl);
-	pl->events++;
-	percpu_counter_add(&pg->events, 1);
-	prop_put_global(pd, pg);
-}
-
-/*
- * Obtain a fraction of this proportion
- *
- *   p_{j} = x_{j} / (period/2 + t % period/2)
- */
-void prop_fraction_single(struct prop_descriptor *pd,
-	       	struct prop_local_single *pl,
-		long *numerator, long *denominator)
-{
-	struct prop_global *pg = prop_get_global(pd);
-	unsigned long period_2 = 1UL << (pg->shift - 1);
-	unsigned long counter_mask = period_2 - 1;
-	unsigned long global_count;
-
-	prop_norm_single(pg, pl);
-	*numerator = pl->events;
-
-	global_count = percpu_counter_read(&pg->events);
-	*denominator = period_2 + (global_count & counter_mask);
-
-	prop_put_global(pd, pg);
-}
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index cc80870..5d845ff 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -487,6 +487,7 @@
  * rhashtable_walk_init - Initialise an iterator
  * @ht:		Table to walk over
  * @iter:	Hash table Iterator
+ * @gfp:	GFP flags for allocations
  *
  * This function prepares a hash table walk.
  *
@@ -504,14 +505,15 @@
  * You must call rhashtable_walk_exit if this function returns
  * successfully.
  */
-int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
+int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter,
+			 gfp_t gfp)
 {
 	iter->ht = ht;
 	iter->p = NULL;
 	iter->slot = 0;
 	iter->skip = 0;
 
-	iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
+	iter->walker = kmalloc(sizeof(*iter->walker), gfp);
 	if (!iter->walker)
 		return -ENOMEM;
 
diff --git a/lib/sg_pool.c b/lib/sg_pool.c
new file mode 100644
index 0000000..6dd3061
--- /dev/null
+++ b/lib/sg_pool.c
@@ -0,0 +1,172 @@
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/mempool.h>
+#include <linux/slab.h>
+
+#define SG_MEMPOOL_NR		ARRAY_SIZE(sg_pools)
+#define SG_MEMPOOL_SIZE		2
+
+struct sg_pool {
+	size_t		size;
+	char		*name;
+	struct kmem_cache	*slab;
+	mempool_t	*pool;
+};
+
+#define SP(x) { .size = x, "sgpool-" __stringify(x) }
+#if (SG_CHUNK_SIZE < 32)
+#error SG_CHUNK_SIZE is too small (must be 32 or greater)
+#endif
+static struct sg_pool sg_pools[] = {
+	SP(8),
+	SP(16),
+#if (SG_CHUNK_SIZE > 32)
+	SP(32),
+#if (SG_CHUNK_SIZE > 64)
+	SP(64),
+#if (SG_CHUNK_SIZE > 128)
+	SP(128),
+#if (SG_CHUNK_SIZE > 256)
+#error SG_CHUNK_SIZE is too large (256 MAX)
+#endif
+#endif
+#endif
+#endif
+	SP(SG_CHUNK_SIZE)
+};
+#undef SP
+
+static inline unsigned int sg_pool_index(unsigned short nents)
+{
+	unsigned int index;
+
+	BUG_ON(nents > SG_CHUNK_SIZE);
+
+	if (nents <= 8)
+		index = 0;
+	else
+		index = get_count_order(nents) - 3;
+
+	return index;
+}
+
+static void sg_pool_free(struct scatterlist *sgl, unsigned int nents)
+{
+	struct sg_pool *sgp;
+
+	sgp = sg_pools + sg_pool_index(nents);
+	mempool_free(sgl, sgp->pool);
+}
+
+static struct scatterlist *sg_pool_alloc(unsigned int nents, gfp_t gfp_mask)
+{
+	struct sg_pool *sgp;
+
+	sgp = sg_pools + sg_pool_index(nents);
+	return mempool_alloc(sgp->pool, gfp_mask);
+}
+
+/**
+ * sg_free_table_chained - Free a previously mapped sg table
+ * @table:	The sg table header to use
+ * @first_chunk: was first_chunk not NULL in sg_alloc_table_chained?
+ *
+ *  Description:
+ *    Free an sg table previously allocated and setup with
+ *    sg_alloc_table_chained().
+ *
+ **/
+void sg_free_table_chained(struct sg_table *table, bool first_chunk)
+{
+	if (first_chunk && table->orig_nents <= SG_CHUNK_SIZE)
+		return;
+	__sg_free_table(table, SG_CHUNK_SIZE, first_chunk, sg_pool_free);
+}
+EXPORT_SYMBOL_GPL(sg_free_table_chained);
+
+/**
+ * sg_alloc_table_chained - Allocate and chain SGLs in an sg table
+ * @table:	The sg table header to use
+ * @nents:	Number of entries in sg list
+ * @first_chunk: first SGL
+ *
+ *  Description:
+ *    Allocate and chain SGLs in an sg table. If @nents@ is larger than
+ *    SG_CHUNK_SIZE a chained sg table will be setup.
+ *
+ **/
+int sg_alloc_table_chained(struct sg_table *table, int nents,
+		struct scatterlist *first_chunk)
+{
+	int ret;
+
+	BUG_ON(!nents);
+
+	if (first_chunk) {
+		if (nents <= SG_CHUNK_SIZE) {
+			table->nents = table->orig_nents = nents;
+			sg_init_table(table->sgl, nents);
+			return 0;
+		}
+	}
+
+	ret = __sg_alloc_table(table, nents, SG_CHUNK_SIZE,
+			       first_chunk, GFP_ATOMIC, sg_pool_alloc);
+	if (unlikely(ret))
+		sg_free_table_chained(table, (bool)first_chunk);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(sg_alloc_table_chained);
+
+static __init int sg_pool_init(void)
+{
+	int i;
+
+	for (i = 0; i < SG_MEMPOOL_NR; i++) {
+		struct sg_pool *sgp = sg_pools + i;
+		int size = sgp->size * sizeof(struct scatterlist);
+
+		sgp->slab = kmem_cache_create(sgp->name, size, 0,
+				SLAB_HWCACHE_ALIGN, NULL);
+		if (!sgp->slab) {
+			printk(KERN_ERR "SG_POOL: can't init sg slab %s\n",
+					sgp->name);
+			goto cleanup_sdb;
+		}
+
+		sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
+						     sgp->slab);
+		if (!sgp->pool) {
+			printk(KERN_ERR "SG_POOL: can't init sg mempool %s\n",
+					sgp->name);
+			goto cleanup_sdb;
+		}
+	}
+
+	return 0;
+
+cleanup_sdb:
+	for (i = 0; i < SG_MEMPOOL_NR; i++) {
+		struct sg_pool *sgp = sg_pools + i;
+		if (sgp->pool)
+			mempool_destroy(sgp->pool);
+		if (sgp->slab)
+			kmem_cache_destroy(sgp->slab);
+	}
+
+	return -ENOMEM;
+}
+
+static __exit void sg_pool_exit(void)
+{
+	int i;
+
+	for (i = 0; i < SG_MEMPOOL_NR; i++) {
+		struct sg_pool *sgp = sg_pools + i;
+		mempool_destroy(sgp->pool);
+		kmem_cache_destroy(sgp->slab);
+	}
+}
+
+module_init(sg_pool_init);
+module_exit(sg_pool_exit);
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 654c9d8..53ad6c0 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -42,12 +42,14 @@
 
 #define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
 
+#define STACK_ALLOC_NULL_PROTECTION_BITS 1
 #define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
 #define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
 #define STACK_ALLOC_ALIGN 4
 #define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
 					STACK_ALLOC_ALIGN)
-#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - STACK_ALLOC_OFFSET_BITS)
+#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
+		STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
 #define STACK_ALLOC_SLABS_CAP 1024
 #define STACK_ALLOC_MAX_SLABS \
 	(((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
@@ -59,6 +61,7 @@
 	struct {
 		u32 slabindex : STACK_ALLOC_INDEX_BITS;
 		u32 offset : STACK_ALLOC_OFFSET_BITS;
+		u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
 	};
 };
 
@@ -136,6 +139,7 @@
 	stack->size = size;
 	stack->handle.slabindex = depot_index;
 	stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
+	stack->handle.valid = 1;
 	memcpy(stack->entries, entries, size * sizeof(unsigned long));
 	depot_offset += required_size;
 
@@ -210,10 +214,6 @@
 		goto fast_exit;
 
 	hash = hash_stack(trace->entries, trace->nr_entries);
-	/* Bad luck, we won't store this stack. */
-	if (hash == 0)
-		goto exit;
-
 	bucket = &stack_table[hash & STACK_HASH_MASK];
 
 	/*
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 5c88204..ecaac2c 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -10,6 +10,10 @@
 #include <linux/export.h>
 #include <linux/ctype.h>
 #include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/limits.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/string_helpers.h>
 
@@ -534,3 +538,91 @@
 	return p - dst;
 }
 EXPORT_SYMBOL(string_escape_mem);
+
+/*
+ * Return an allocated string that has been escaped of special characters
+ * and double quotes, making it safe to log in quotes.
+ */
+char *kstrdup_quotable(const char *src, gfp_t gfp)
+{
+	size_t slen, dlen;
+	char *dst;
+	const int flags = ESCAPE_HEX;
+	const char esc[] = "\f\n\r\t\v\a\e\\\"";
+
+	if (!src)
+		return NULL;
+	slen = strlen(src);
+
+	dlen = string_escape_mem(src, slen, NULL, 0, flags, esc);
+	dst = kmalloc(dlen + 1, gfp);
+	if (!dst)
+		return NULL;
+
+	WARN_ON(string_escape_mem(src, slen, dst, dlen, flags, esc) != dlen);
+	dst[dlen] = '\0';
+
+	return dst;
+}
+EXPORT_SYMBOL_GPL(kstrdup_quotable);
+
+/*
+ * Returns allocated NULL-terminated string containing process
+ * command line, with inter-argument NULLs replaced with spaces,
+ * and other special characters escaped.
+ */
+char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp)
+{
+	char *buffer, *quoted;
+	int i, res;
+
+	buffer = kmalloc(PAGE_SIZE, GFP_TEMPORARY);
+	if (!buffer)
+		return NULL;
+
+	res = get_cmdline(task, buffer, PAGE_SIZE - 1);
+	buffer[res] = '\0';
+
+	/* Collapse trailing NULLs, leave res pointing to last non-NULL. */
+	while (--res >= 0 && buffer[res] == '\0')
+		;
+
+	/* Replace inter-argument NULLs. */
+	for (i = 0; i <= res; i++)
+		if (buffer[i] == '\0')
+			buffer[i] = ' ';
+
+	/* Make sure result is printable. */
+	quoted = kstrdup_quotable(buffer, gfp);
+	kfree(buffer);
+	return quoted;
+}
+EXPORT_SYMBOL_GPL(kstrdup_quotable_cmdline);
+
+/*
+ * Returns allocated NULL-terminated string containing pathname,
+ * with special characters escaped, able to be safely logged. If
+ * there is an error, the leading character will be "<".
+ */
+char *kstrdup_quotable_file(struct file *file, gfp_t gfp)
+{
+	char *temp, *pathname;
+
+	if (!file)
+		return kstrdup("<unknown>", gfp);
+
+	/* We add 11 spaces for ' (deleted)' to be appended */
+	temp = kmalloc(PATH_MAX + 11, GFP_TEMPORARY);
+	if (!temp)
+		return kstrdup("<no_memory>", gfp);
+
+	pathname = file_path(file, temp, PATH_MAX + 11);
+	if (IS_ERR(pathname))
+		pathname = kstrdup("<too_long>", gfp);
+	else
+		pathname = kstrdup_quotable(pathname, gfp);
+
+	kfree(temp);
+	return pathname;
+}
+EXPORT_SYMBOL_GPL(kstrdup_quotable_file);
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 27a7a26..93f4501 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -2444,6 +2444,22 @@
 		{ { 0, 4294967295U } },
 	},
 	{
+		"ALU_ADD_X: 2 + 4294967294 = 0",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2),
+			BPF_LD_IMM64(R1, 4294967294U),
+			BPF_ALU32_REG(BPF_ADD, R0, R1),
+			BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
 		"ALU64_ADD_X: 1 + 2 = 3",
 		.u.insns_int = {
 			BPF_LD_IMM64(R0, 1),
@@ -2467,6 +2483,23 @@
 		{ },
 		{ { 0, 4294967295U } },
 	},
+	{
+		"ALU64_ADD_X: 2 + 4294967294 = 4294967296",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 2),
+			BPF_LD_IMM64(R1, 4294967294U),
+			BPF_LD_IMM64(R2, 4294967296ULL),
+			BPF_ALU64_REG(BPF_ADD, R0, R1),
+			BPF_JMP_REG(BPF_JEQ, R0, R2, 2),
+			BPF_MOV32_IMM(R0, 0),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
 	/* BPF_ALU | BPF_ADD | BPF_K */
 	{
 		"ALU_ADD_K: 1 + 2 = 3",
@@ -2502,6 +2535,21 @@
 		{ { 0, 4294967295U } },
 	},
 	{
+		"ALU_ADD_K: 4294967294 + 2 = 0",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 4294967294U),
+			BPF_ALU32_IMM(BPF_ADD, R0, 2),
+			BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
 		"ALU_ADD_K: 0 + (-1) = 0x00000000ffffffff",
 		.u.insns_int = {
 			BPF_LD_IMM64(R2, 0x0),
@@ -2518,6 +2566,70 @@
 		{ { 0, 0x1 } },
 	},
 	{
+		"ALU_ADD_K: 0 + 0xffff = 0xffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0),
+			BPF_LD_IMM64(R3, 0xffff),
+			BPF_ALU32_IMM(BPF_ADD, R2, 0xffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU_ADD_K: 0 + 0x7fffffff = 0x7fffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0),
+			BPF_LD_IMM64(R3, 0x7fffffff),
+			BPF_ALU32_IMM(BPF_ADD, R2, 0x7fffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU_ADD_K: 0 + 0x80000000 = 0x80000000",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0),
+			BPF_LD_IMM64(R3, 0x80000000),
+			BPF_ALU32_IMM(BPF_ADD, R2, 0x80000000),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU_ADD_K: 0 + 0x80008000 = 0x80008000",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0),
+			BPF_LD_IMM64(R3, 0x80008000),
+			BPF_ALU32_IMM(BPF_ADD, R2, 0x80008000),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
 		"ALU64_ADD_K: 1 + 2 = 3",
 		.u.insns_int = {
 			BPF_LD_IMM64(R0, 1),
@@ -2551,6 +2663,22 @@
 		{ { 0, 2147483647 } },
 	},
 	{
+		"ALU64_ADD_K: 4294967294 + 2 = 4294967296",
+		.u.insns_int = {
+			BPF_LD_IMM64(R0, 4294967294U),
+			BPF_LD_IMM64(R1, 4294967296ULL),
+			BPF_ALU64_IMM(BPF_ADD, R0, 2),
+			BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
+	{
 		"ALU64_ADD_K: 2147483646 + -2147483647 = -1",
 		.u.insns_int = {
 			BPF_LD_IMM64(R0, 2147483646),
@@ -2593,6 +2721,70 @@
 		{ },
 		{ { 0, 0x1 } },
 	},
+	{
+		"ALU64_ADD_K: 0 + 0xffff = 0xffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0),
+			BPF_LD_IMM64(R3, 0xffff),
+			BPF_ALU64_IMM(BPF_ADD, R2, 0xffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_ADD_K: 0 + 0x7fffffff = 0x7fffffff",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0),
+			BPF_LD_IMM64(R3, 0x7fffffff),
+			BPF_ALU64_IMM(BPF_ADD, R2, 0x7fffffff),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU64_ADD_K: 0 + 0x80000000 = 0xffffffff80000000",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0),
+			BPF_LD_IMM64(R3, 0xffffffff80000000LL),
+			BPF_ALU64_IMM(BPF_ADD, R2, 0x80000000),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
+	{
+		"ALU_ADD_K: 0 + 0x80008000 = 0xffffffff80008000",
+		.u.insns_int = {
+			BPF_LD_IMM64(R2, 0x0),
+			BPF_LD_IMM64(R3, 0xffffffff80008000LL),
+			BPF_ALU64_IMM(BPF_ADD, R2, 0x80008000),
+			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+			BPF_MOV32_IMM(R0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV32_IMM(R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 0x1 } },
+	},
 	/* BPF_ALU | BPF_SUB | BPF_X */
 	{
 		"ALU_SUB_X: 3 - 1 = 2",
@@ -4222,6 +4414,20 @@
 		{ },
 		{ { 0, 1 } },
 	},
+	{
+		"JMP_JGT_K: Unsigned jump: if (-1 > 1) return 1",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, -1),
+			BPF_JMP_IMM(BPF_JGT, R1, 1, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
 	/* BPF_JMP | BPF_JGE | BPF_K */
 	{
 		"JMP_JGE_K: if (3 >= 2) return 1",
@@ -4303,7 +4509,7 @@
 		.u.insns_int = {
 			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_LD_IMM64(R1, 3),
-			BPF_JMP_IMM(BPF_JNE, R1, 2, 1),
+			BPF_JMP_IMM(BPF_JSET, R1, 2, 1),
 			BPF_EXIT_INSN(),
 			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
@@ -4317,7 +4523,7 @@
 		.u.insns_int = {
 			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_LD_IMM64(R1, 3),
-			BPF_JMP_IMM(BPF_JNE, R1, 0xffffffff, 1),
+			BPF_JMP_IMM(BPF_JSET, R1, 0xffffffff, 1),
 			BPF_EXIT_INSN(),
 			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
@@ -4404,6 +4610,21 @@
 		{ },
 		{ { 0, 1 } },
 	},
+	{
+		"JMP_JGT_X: Unsigned jump: if (-1 > 1) return 1",
+		.u.insns_int = {
+			BPF_ALU32_IMM(BPF_MOV, R0, 0),
+			BPF_LD_IMM64(R1, -1),
+			BPF_LD_IMM64(R2, 1),
+			BPF_JMP_REG(BPF_JGT, R1, R2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU32_IMM(BPF_MOV, R0, 1),
+			BPF_EXIT_INSN(),
+		},
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+	},
 	/* BPF_JMP | BPF_JGE | BPF_X */
 	{
 		"JMP_JGE_X: if (3 >= 2) return 1",
@@ -4474,7 +4695,7 @@
 			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_LD_IMM64(R1, 3),
 			BPF_LD_IMM64(R2, 2),
-			BPF_JMP_REG(BPF_JNE, R1, R2, 1),
+			BPF_JMP_REG(BPF_JSET, R1, R2, 1),
 			BPF_EXIT_INSN(),
 			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
@@ -4489,7 +4710,7 @@
 			BPF_ALU32_IMM(BPF_MOV, R0, 0),
 			BPF_LD_IMM64(R1, 3),
 			BPF_LD_IMM64(R2, 0xffffffff),
-			BPF_JMP_REG(BPF_JNE, R1, R2, 1),
+			BPF_JMP_REG(BPF_JSET, R1, R2, 1),
 			BPF_EXIT_INSN(),
 			BPF_ALU32_IMM(BPF_MOV, R0, 1),
 			BPF_EXIT_INSN(),
@@ -5400,7 +5621,10 @@
 		fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
 		memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
 
-		bpf_prog_select_runtime(fp);
+		/* We cannot error here as we don't need type compatibility
+		 * checks.
+		 */
+		fp = bpf_prog_select_runtime(fp, err);
 		break;
 	}
 
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 270bf72..297fdb5 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -143,7 +143,7 @@
 	struct rhashtable_iter hti;
 	struct rhash_head *pos;
 
-	err = rhashtable_walk_init(ht, &hti);
+	err = rhashtable_walk_init(ht, &hti, GFP_KERNEL);
 	if (err) {
 		pr_warn("Test failed: allocation error");
 		return;
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index bfbd709..0c6317b 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -898,7 +898,7 @@
 void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
 {
 	wait_queue_head_t *wqh = &congestion_wqh[sync];
-	enum wb_state bit;
+	enum wb_congested_state bit;
 
 	bit = sync ? WB_sync_congested : WB_async_congested;
 	if (test_and_clear_bit(bit, &congested->state))
@@ -911,7 +911,7 @@
 
 void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
 {
-	enum wb_state bit;
+	enum wb_congested_state bit;
 
 	bit = sync ? WB_sync_congested : WB_async_congested;
 	if (!test_and_set_bit(bit, &congested->state))
diff --git a/mm/compaction.c b/mm/compaction.c
index ccf97b0..8fa2540 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -852,16 +852,8 @@
 		pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
 							ISOLATE_UNEVICTABLE);
 
-		/*
-		 * In case of fatal failure, release everything that might
-		 * have been isolated in the previous iteration, and signal
-		 * the failure back to caller.
-		 */
-		if (!pfn) {
-			putback_movable_pages(&cc->migratepages);
-			cc->nr_migratepages = 0;
+		if (!pfn)
 			break;
-		}
 
 		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
 			break;
@@ -1741,7 +1733,7 @@
 
 static inline bool kcompactd_work_requested(pg_data_t *pgdat)
 {
-	return pgdat->kcompactd_max_order > 0;
+	return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
 }
 
 static bool kcompactd_node_suitable(pg_data_t *pgdat)
@@ -1805,6 +1797,8 @@
 		INIT_LIST_HEAD(&cc.freepages);
 		INIT_LIST_HEAD(&cc.migratepages);
 
+		if (kthread_should_stop())
+			return;
 		status = compact_zone(zone, &cc);
 
 		if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone),
diff --git a/mm/fadvise.c b/mm/fadvise.c
index b8a5bc6..b8024fa 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -97,8 +97,8 @@
 		break;
 	case POSIX_FADV_WILLNEED:
 		/* First and last PARTIAL page! */
-		start_index = offset >> PAGE_CACHE_SHIFT;
-		end_index = endbyte >> PAGE_CACHE_SHIFT;
+		start_index = offset >> PAGE_SHIFT;
+		end_index = endbyte >> PAGE_SHIFT;
 
 		/* Careful about overflow on the "+1" */
 		nrpages = end_index - start_index + 1;
@@ -124,8 +124,8 @@
 		 * preserved on the expectation that it is better to preserve
 		 * needed memory than to discard unneeded memory.
 		 */
-		start_index = (offset+(PAGE_CACHE_SIZE-1)) >> PAGE_CACHE_SHIFT;
-		end_index = (endbyte >> PAGE_CACHE_SHIFT);
+		start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT;
+		end_index = (endbyte >> PAGE_SHIFT);
 
 		if (end_index >= start_index) {
 			unsigned long count = invalidate_mapping_pages(mapping,
diff --git a/mm/filemap.c b/mm/filemap.c
index a8c69c8..182b218 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -265,7 +265,7 @@
 
 	if (freepage)
 		freepage(page);
-	page_cache_release(page);
+	put_page(page);
 }
 EXPORT_SYMBOL(delete_from_page_cache);
 
@@ -352,8 +352,8 @@
 static int __filemap_fdatawait_range(struct address_space *mapping,
 				     loff_t start_byte, loff_t end_byte)
 {
-	pgoff_t index = start_byte >> PAGE_CACHE_SHIFT;
-	pgoff_t end = end_byte >> PAGE_CACHE_SHIFT;
+	pgoff_t index = start_byte >> PAGE_SHIFT;
+	pgoff_t end = end_byte >> PAGE_SHIFT;
 	struct pagevec pvec;
 	int nr_pages;
 	int ret = 0;
@@ -550,7 +550,7 @@
 		pgoff_t offset = old->index;
 		freepage = mapping->a_ops->freepage;
 
-		page_cache_get(new);
+		get_page(new);
 		new->mapping = mapping;
 		new->index = offset;
 
@@ -572,7 +572,7 @@
 		radix_tree_preload_end();
 		if (freepage)
 			freepage(old);
-		page_cache_release(old);
+		put_page(old);
 	}
 
 	return error;
@@ -651,7 +651,7 @@
 		return error;
 	}
 
-	page_cache_get(page);
+	get_page(page);
 	page->mapping = mapping;
 	page->index = offset;
 
@@ -675,7 +675,7 @@
 	spin_unlock_irq(&mapping->tree_lock);
 	if (!huge)
 		mem_cgroup_cancel_charge(page, memcg, false);
-	page_cache_release(page);
+	put_page(page);
 	return error;
 }
 
@@ -1083,7 +1083,7 @@
 		 * include/linux/pagemap.h for details.
 		 */
 		if (unlikely(page != *pagep)) {
-			page_cache_release(page);
+			put_page(page);
 			goto repeat;
 		}
 	}
@@ -1121,7 +1121,7 @@
 		/* Has the page been truncated? */
 		if (unlikely(page->mapping != mapping)) {
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 			goto repeat;
 		}
 		VM_BUG_ON_PAGE(page->index != offset, page);
@@ -1168,7 +1168,7 @@
 	if (fgp_flags & FGP_LOCK) {
 		if (fgp_flags & FGP_NOWAIT) {
 			if (!trylock_page(page)) {
-				page_cache_release(page);
+				put_page(page);
 				return NULL;
 			}
 		} else {
@@ -1178,7 +1178,7 @@
 		/* Has the page been truncated? */
 		if (unlikely(page->mapping != mapping)) {
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 			goto repeat;
 		}
 		VM_BUG_ON_PAGE(page->index != offset, page);
@@ -1209,7 +1209,7 @@
 		err = add_to_page_cache_lru(page, mapping, offset,
 				gfp_mask & GFP_RECLAIM_MASK);
 		if (unlikely(err)) {
-			page_cache_release(page);
+			put_page(page);
 			page = NULL;
 			if (err == -EEXIST)
 				goto repeat;
@@ -1278,7 +1278,7 @@
 
 		/* Has the page moved? */
 		if (unlikely(page != *slot)) {
-			page_cache_release(page);
+			put_page(page);
 			goto repeat;
 		}
 export:
@@ -1343,7 +1343,7 @@
 
 		/* Has the page moved? */
 		if (unlikely(page != *slot)) {
-			page_cache_release(page);
+			put_page(page);
 			goto repeat;
 		}
 
@@ -1405,7 +1405,7 @@
 
 		/* Has the page moved? */
 		if (unlikely(page != *slot)) {
-			page_cache_release(page);
+			put_page(page);
 			goto repeat;
 		}
 
@@ -1415,7 +1415,7 @@
 		 * negatives, which is just confusing to the caller.
 		 */
 		if (page->mapping == NULL || page->index != iter.index) {
-			page_cache_release(page);
+			put_page(page);
 			break;
 		}
 
@@ -1482,7 +1482,7 @@
 
 		/* Has the page moved? */
 		if (unlikely(page != *slot)) {
-			page_cache_release(page);
+			put_page(page);
 			goto repeat;
 		}
 
@@ -1549,7 +1549,7 @@
 
 		/* Has the page moved? */
 		if (unlikely(page != *slot)) {
-			page_cache_release(page);
+			put_page(page);
 			goto repeat;
 		}
 export:
@@ -1610,11 +1610,11 @@
 	unsigned int prev_offset;
 	int error = 0;
 
-	index = *ppos >> PAGE_CACHE_SHIFT;
-	prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
-	prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
-	last_index = (*ppos + iter->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
-	offset = *ppos & ~PAGE_CACHE_MASK;
+	index = *ppos >> PAGE_SHIFT;
+	prev_index = ra->prev_pos >> PAGE_SHIFT;
+	prev_offset = ra->prev_pos & (PAGE_SIZE-1);
+	last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT;
+	offset = *ppos & ~PAGE_MASK;
 
 	for (;;) {
 		struct page *page;
@@ -1648,7 +1648,7 @@
 			if (PageUptodate(page))
 				goto page_ok;
 
-			if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
+			if (inode->i_blkbits == PAGE_SHIFT ||
 					!mapping->a_ops->is_partially_uptodate)
 				goto page_not_up_to_date;
 			if (!trylock_page(page))
@@ -1672,18 +1672,18 @@
 		 */
 
 		isize = i_size_read(inode);
-		end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
+		end_index = (isize - 1) >> PAGE_SHIFT;
 		if (unlikely(!isize || index > end_index)) {
-			page_cache_release(page);
+			put_page(page);
 			goto out;
 		}
 
 		/* nr is the maximum number of bytes to copy from this page */
-		nr = PAGE_CACHE_SIZE;
+		nr = PAGE_SIZE;
 		if (index == end_index) {
-			nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
+			nr = ((isize - 1) & ~PAGE_MASK) + 1;
 			if (nr <= offset) {
-				page_cache_release(page);
+				put_page(page);
 				goto out;
 			}
 		}
@@ -1711,11 +1711,11 @@
 
 		ret = copy_page_to_iter(page, offset, nr, iter);
 		offset += ret;
-		index += offset >> PAGE_CACHE_SHIFT;
-		offset &= ~PAGE_CACHE_MASK;
+		index += offset >> PAGE_SHIFT;
+		offset &= ~PAGE_MASK;
 		prev_offset = offset;
 
-		page_cache_release(page);
+		put_page(page);
 		written += ret;
 		if (!iov_iter_count(iter))
 			goto out;
@@ -1735,7 +1735,7 @@
 		/* Did it get truncated before we got the lock? */
 		if (!page->mapping) {
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 			continue;
 		}
 
@@ -1757,7 +1757,7 @@
 
 		if (unlikely(error)) {
 			if (error == AOP_TRUNCATED_PAGE) {
-				page_cache_release(page);
+				put_page(page);
 				error = 0;
 				goto find_page;
 			}
@@ -1774,7 +1774,7 @@
 					 * invalidate_mapping_pages got it
 					 */
 					unlock_page(page);
-					page_cache_release(page);
+					put_page(page);
 					goto find_page;
 				}
 				unlock_page(page);
@@ -1789,7 +1789,7 @@
 
 readpage_error:
 		/* UHHUH! A synchronous read error occurred. Report it */
-		page_cache_release(page);
+		put_page(page);
 		goto out;
 
 no_cached_page:
@@ -1805,7 +1805,7 @@
 		error = add_to_page_cache_lru(page, mapping, index,
 				mapping_gfp_constraint(mapping, GFP_KERNEL));
 		if (error) {
-			page_cache_release(page);
+			put_page(page);
 			if (error == -EEXIST) {
 				error = 0;
 				goto find_page;
@@ -1817,10 +1817,10 @@
 
 out:
 	ra->prev_pos = prev_index;
-	ra->prev_pos <<= PAGE_CACHE_SHIFT;
+	ra->prev_pos <<= PAGE_SHIFT;
 	ra->prev_pos |= prev_offset;
 
-	*ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
+	*ppos = ((loff_t)index << PAGE_SHIFT) + offset;
 	file_accessed(filp);
 	return written ? written : error;
 }
@@ -1838,8 +1838,6 @@
 {
 	struct file *file = iocb->ki_filp;
 	ssize_t retval = 0;
-	loff_t *ppos = &iocb->ki_pos;
-	loff_t pos = *ppos;
 	size_t count = iov_iter_count(iter);
 
 	if (!count)
@@ -1851,15 +1849,15 @@
 		loff_t size;
 
 		size = i_size_read(inode);
-		retval = filemap_write_and_wait_range(mapping, pos,
-					pos + count - 1);
+		retval = filemap_write_and_wait_range(mapping, iocb->ki_pos,
+					iocb->ki_pos + count - 1);
 		if (!retval) {
 			struct iov_iter data = *iter;
-			retval = mapping->a_ops->direct_IO(iocb, &data, pos);
+			retval = mapping->a_ops->direct_IO(iocb, &data);
 		}
 
 		if (retval > 0) {
-			*ppos = pos + retval;
+			iocb->ki_pos += retval;
 			iov_iter_advance(iter, retval);
 		}
 
@@ -1872,14 +1870,14 @@
 		 * the rest of the read.  Buffered reads will not work for
 		 * DAX files, so don't bother trying.
 		 */
-		if (retval < 0 || !iov_iter_count(iter) || *ppos >= size ||
+		if (retval < 0 || !iov_iter_count(iter) || iocb->ki_pos >= size ||
 		    IS_DAX(inode)) {
 			file_accessed(file);
 			goto out;
 		}
 	}
 
-	retval = do_generic_file_read(file, ppos, iter, retval);
+	retval = do_generic_file_read(file, &iocb->ki_pos, iter, retval);
 out:
 	return retval;
 }
@@ -1912,7 +1910,7 @@
 		else if (ret == -EEXIST)
 			ret = 0; /* losing race to add is OK */
 
-		page_cache_release(page);
+		put_page(page);
 
 	} while (ret == AOP_TRUNCATED_PAGE);
 
@@ -2022,8 +2020,8 @@
 	loff_t size;
 	int ret = 0;
 
-	size = round_up(i_size_read(inode), PAGE_CACHE_SIZE);
-	if (offset >= size >> PAGE_CACHE_SHIFT)
+	size = round_up(i_size_read(inode), PAGE_SIZE);
+	if (offset >= size >> PAGE_SHIFT)
 		return VM_FAULT_SIGBUS;
 
 	/*
@@ -2049,7 +2047,7 @@
 	}
 
 	if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
-		page_cache_release(page);
+		put_page(page);
 		return ret | VM_FAULT_RETRY;
 	}
 
@@ -2072,10 +2070,10 @@
 	 * Found the page and have a reference on it.
 	 * We must recheck i_size under page lock.
 	 */
-	size = round_up(i_size_read(inode), PAGE_CACHE_SIZE);
-	if (unlikely(offset >= size >> PAGE_CACHE_SHIFT)) {
+	size = round_up(i_size_read(inode), PAGE_SIZE);
+	if (unlikely(offset >= size >> PAGE_SHIFT)) {
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		return VM_FAULT_SIGBUS;
 	}
 
@@ -2120,7 +2118,7 @@
 		if (!PageUptodate(page))
 			error = -EIO;
 	}
-	page_cache_release(page);
+	put_page(page);
 
 	if (!error || error == AOP_TRUNCATED_PAGE)
 		goto retry_find;
@@ -2164,7 +2162,7 @@
 
 		/* Has the page moved? */
 		if (unlikely(page != *slot)) {
-			page_cache_release(page);
+			put_page(page);
 			goto repeat;
 		}
 
@@ -2178,8 +2176,8 @@
 		if (page->mapping != mapping || !PageUptodate(page))
 			goto unlock;
 
-		size = round_up(i_size_read(mapping->host), PAGE_CACHE_SIZE);
-		if (page->index >= size >> PAGE_CACHE_SHIFT)
+		size = round_up(i_size_read(mapping->host), PAGE_SIZE);
+		if (page->index >= size >> PAGE_SHIFT)
 			goto unlock;
 
 		pte = vmf->pte + page->index - vmf->pgoff;
@@ -2195,7 +2193,7 @@
 unlock:
 		unlock_page(page);
 skip:
-		page_cache_release(page);
+		put_page(page);
 next:
 		if (iter.index == vmf->max_pgoff)
 			break;
@@ -2278,7 +2276,7 @@
 	if (!IS_ERR(page)) {
 		wait_on_page_locked(page);
 		if (!PageUptodate(page)) {
-			page_cache_release(page);
+			put_page(page);
 			page = ERR_PTR(-EIO);
 		}
 	}
@@ -2301,7 +2299,7 @@
 			return ERR_PTR(-ENOMEM);
 		err = add_to_page_cache_lru(page, mapping, index, gfp);
 		if (unlikely(err)) {
-			page_cache_release(page);
+			put_page(page);
 			if (err == -EEXIST)
 				goto repeat;
 			/* Presumably ENOMEM for radix tree node */
@@ -2311,7 +2309,7 @@
 filler:
 		err = filler(data, page);
 		if (err < 0) {
-			page_cache_release(page);
+			put_page(page);
 			return ERR_PTR(err);
 		}
 
@@ -2364,7 +2362,7 @@
 	/* Case c or d, restart the operation */
 	if (!page->mapping) {
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		goto repeat;
 	}
 
@@ -2500,18 +2498,19 @@
 EXPORT_SYMBOL(pagecache_write_end);
 
 ssize_t
-generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
+generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
 {
 	struct file	*file = iocb->ki_filp;
 	struct address_space *mapping = file->f_mapping;
 	struct inode	*inode = mapping->host;
+	loff_t		pos = iocb->ki_pos;
 	ssize_t		written;
 	size_t		write_len;
 	pgoff_t		end;
 	struct iov_iter data;
 
 	write_len = iov_iter_count(from);
-	end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
+	end = (pos + write_len - 1) >> PAGE_SHIFT;
 
 	written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
 	if (written)
@@ -2525,7 +2524,7 @@
 	 */
 	if (mapping->nrpages) {
 		written = invalidate_inode_pages2_range(mapping,
-					pos >> PAGE_CACHE_SHIFT, end);
+					pos >> PAGE_SHIFT, end);
 		/*
 		 * If a page can not be invalidated, return 0 to fall back
 		 * to buffered write.
@@ -2538,7 +2537,7 @@
 	}
 
 	data = *from;
-	written = mapping->a_ops->direct_IO(iocb, &data, pos);
+	written = mapping->a_ops->direct_IO(iocb, &data);
 
 	/*
 	 * Finally, try again to invalidate clean pages which might have been
@@ -2550,7 +2549,7 @@
 	 */
 	if (mapping->nrpages) {
 		invalidate_inode_pages2_range(mapping,
-					      pos >> PAGE_CACHE_SHIFT, end);
+					      pos >> PAGE_SHIFT, end);
 	}
 
 	if (written > 0) {
@@ -2611,8 +2610,8 @@
 		size_t copied;		/* Bytes copied from user */
 		void *fsdata;
 
-		offset = (pos & (PAGE_CACHE_SIZE - 1));
-		bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
+		offset = (pos & (PAGE_SIZE - 1));
+		bytes = min_t(unsigned long, PAGE_SIZE - offset,
 						iov_iter_count(i));
 
 again:
@@ -2665,7 +2664,7 @@
 			 * because not all segments in the iov can be copied at
 			 * once without a pagefault.
 			 */
-			bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
+			bytes = min_t(unsigned long, PAGE_SIZE - offset,
 						iov_iter_single_seg_count(i));
 			goto again;
 		}
@@ -2718,7 +2717,7 @@
 	if (iocb->ki_flags & IOCB_DIRECT) {
 		loff_t pos, endbyte;
 
-		written = generic_file_direct_write(iocb, from, iocb->ki_pos);
+		written = generic_file_direct_write(iocb, from);
 		/*
 		 * If the write stopped short of completing, fall back to
 		 * buffered writes.  Some filesystems do this for writes to
@@ -2752,8 +2751,8 @@
 			iocb->ki_pos = endbyte + 1;
 			written += status;
 			invalidate_mapping_pages(mapping,
-						 pos >> PAGE_CACHE_SHIFT,
-						 endbyte >> PAGE_CACHE_SHIFT);
+						 pos >> PAGE_SHIFT,
+						 endbyte >> PAGE_SHIFT);
 		} else {
 			/*
 			 * We don't know how much we wrote, so just return
@@ -2792,13 +2791,8 @@
 		ret = __generic_file_write_iter(iocb, from);
 	inode_unlock(inode);
 
-	if (ret > 0) {
-		ssize_t err;
-
-		err = generic_write_sync(file, iocb->ki_pos - ret, ret);
-		if (err < 0)
-			ret = err;
-	}
+	if (ret > 0)
+		ret = generic_write_sync(iocb, ret);
 	return ret;
 }
 EXPORT_SYMBOL(generic_file_write_iter);
diff --git a/mm/gup.c b/mm/gup.c
index 7f1c4fb..c057784 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1,4 +1,3 @@
-#define __DISABLE_GUP_DEPRECATED 1
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/err.h>
@@ -839,7 +838,7 @@
  *      if (locked)
  *          up_read(&mm->mmap_sem);
  */
-long get_user_pages_locked6(unsigned long start, unsigned long nr_pages,
+long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
 			   int write, int force, struct page **pages,
 			   int *locked)
 {
@@ -847,7 +846,7 @@
 				       write, force, pages, NULL, locked, true,
 				       FOLL_TOUCH);
 }
-EXPORT_SYMBOL(get_user_pages_locked6);
+EXPORT_SYMBOL(get_user_pages_locked);
 
 /*
  * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to
@@ -892,13 +891,13 @@
  * or if "force" shall be set to 1 (get_user_pages_fast misses the
  * "force" parameter).
  */
-long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages,
+long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
 			     int write, int force, struct page **pages)
 {
 	return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
 					 write, force, pages, FOLL_TOUCH);
 }
-EXPORT_SYMBOL(get_user_pages_unlocked5);
+EXPORT_SYMBOL(get_user_pages_unlocked);
 
 /*
  * get_user_pages_remote() - pin user pages in memory
@@ -972,7 +971,7 @@
  * and mm being operated on are the current task's.  We also
  * obviously don't pass FOLL_REMOTE in here.
  */
-long get_user_pages6(unsigned long start, unsigned long nr_pages,
+long get_user_pages(unsigned long start, unsigned long nr_pages,
 		int write, int force, struct page **pages,
 		struct vm_area_struct **vmas)
 {
@@ -980,7 +979,7 @@
 				       write, force, pages, vmas, NULL, false,
 				       FOLL_TOUCH);
 }
-EXPORT_SYMBOL(get_user_pages6);
+EXPORT_SYMBOL(get_user_pages);
 
 /**
  * populate_vma_page_range() -  populate a range of pages in the vma.
@@ -1107,7 +1106,7 @@
  * @addr: user address
  *
  * Returns struct page pointer of user page pinned for dump,
- * to be freed afterwards by page_cache_release() or put_page().
+ * to be freed afterwards by put_page().
  *
  * Returns NULL on any kind of failure - a hole must then be inserted into
  * the corefile, to preserve alignment with its headers; and also returns
@@ -1491,7 +1490,6 @@
 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
 			struct page **pages)
 {
-	struct mm_struct *mm = current->mm;
 	int nr, ret;
 
 	start &= PAGE_MASK;
@@ -1503,8 +1501,7 @@
 		start += nr << PAGE_SHIFT;
 		pages += nr;
 
-		ret = get_user_pages_unlocked(current, mm, start,
-					      nr_pages - nr, write, 0, pages);
+		ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages);
 
 		/* Have to be a bit careful with return values */
 		if (nr > 0) {
@@ -1519,38 +1516,3 @@
 }
 
 #endif /* CONFIG_HAVE_GENERIC_RCU_GUP */
-
-long get_user_pages8(struct task_struct *tsk, struct mm_struct *mm,
-		     unsigned long start, unsigned long nr_pages,
-		     int write, int force, struct page **pages,
-		     struct vm_area_struct **vmas)
-{
-	WARN_ONCE(tsk != current, "get_user_pages() called on remote task");
-	WARN_ONCE(mm != current->mm, "get_user_pages() called on remote mm");
-
-	return get_user_pages6(start, nr_pages, write, force, pages, vmas);
-}
-EXPORT_SYMBOL(get_user_pages8);
-
-long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm,
-			    unsigned long start, unsigned long nr_pages,
-			    int write, int force, struct page **pages, int *locked)
-{
-	WARN_ONCE(tsk != current, "get_user_pages_locked() called on remote task");
-	WARN_ONCE(mm != current->mm, "get_user_pages_locked() called on remote mm");
-
-	return get_user_pages_locked6(start, nr_pages, write, force, pages, locked);
-}
-EXPORT_SYMBOL(get_user_pages_locked8);
-
-long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm,
-				  unsigned long start, unsigned long nr_pages,
-				  int write, int force, struct page **pages)
-{
-	WARN_ONCE(tsk != current, "get_user_pages_unlocked() called on remote task");
-	WARN_ONCE(mm != current->mm, "get_user_pages_unlocked() called on remote mm");
-
-	return get_user_pages_unlocked5(start, nr_pages, write, force, pages);
-}
-EXPORT_SYMBOL(get_user_pages_unlocked7);
-
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 86f9f8b..b49ee126 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -232,7 +232,7 @@
 	return READ_ONCE(huge_zero_page);
 }
 
-static void put_huge_zero_page(void)
+void put_huge_zero_page(void)
 {
 	/*
 	 * Counter should never go to zero here. Only shrinker can put
@@ -1298,15 +1298,9 @@
 	VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
 	/*
 	 * We can only reuse the page if nobody else maps the huge page or it's
-	 * part. We can do it by checking page_mapcount() on each sub-page, but
-	 * it's expensive.
-	 * The cheaper way is to check page_count() to be equal 1: every
-	 * mapcount takes page reference reference, so this way we can
-	 * guarantee, that the PMD is the only mapping.
-	 * This can give false negative if somebody pinned the page, but that's
-	 * fine.
+	 * part.
 	 */
-	if (page_mapcount(page) == 1 && page_count(page) == 1) {
+	if (page_trans_huge_mapcount(page, NULL) == 1) {
 		pmd_t entry;
 		entry = pmd_mkyoung(orig_pmd);
 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
@@ -1684,12 +1678,12 @@
 	if (vma_is_dax(vma)) {
 		spin_unlock(ptl);
 		if (is_huge_zero_pmd(orig_pmd))
-			put_huge_zero_page();
+			tlb_remove_page(tlb, pmd_page(orig_pmd));
 	} else if (is_huge_zero_pmd(orig_pmd)) {
 		pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
 		atomic_long_dec(&tlb->mm->nr_ptes);
 		spin_unlock(ptl);
-		put_huge_zero_page();
+		tlb_remove_page(tlb, pmd_page(orig_pmd));
 	} else {
 		struct page *page = pmd_page(orig_pmd);
 		page_remove_rmap(page, true);
@@ -1960,10 +1954,9 @@
 		 * page fault if needed.
 		 */
 		return 0;
-	if (vma->vm_ops)
+	if (vma->vm_ops || (vm_flags & VM_NO_THP))
 		/* khugepaged not yet working on file or special mappings */
 		return 0;
-	VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
 	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
 	hend = vma->vm_end & HPAGE_PMD_MASK;
 	if (hstart < hend)
@@ -2080,7 +2073,8 @@
 		if (pte_write(pteval)) {
 			writable = true;
 		} else {
-			if (PageSwapCache(page) && !reuse_swap_page(page)) {
+			if (PageSwapCache(page) &&
+			    !reuse_swap_page(page, NULL)) {
 				unlock_page(page);
 				result = SCAN_SWAP_CACHE_PAGE;
 				goto out;
@@ -2352,8 +2346,7 @@
 		return false;
 	if (is_vma_temporary_stack(vma))
 		return false;
-	VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
-	return true;
+	return !(vma->vm_flags & VM_NO_THP);
 }
 
 static void collapse_huge_page(struct mm_struct *mm,
@@ -3225,6 +3218,64 @@
 }
 
 /*
+ * This calculates accurately how many mappings a transparent hugepage
+ * has (unlike page_mapcount() which isn't fully accurate). This full
+ * accuracy is primarily needed to know if copy-on-write faults can
+ * reuse the page and change the mapping to read-write instead of
+ * copying them. At the same time this returns the total_mapcount too.
+ *
+ * The function returns the highest mapcount any one of the subpages
+ * has. If the return value is one, even if different processes are
+ * mapping different subpages of the transparent hugepage, they can
+ * all reuse it, because each process is reusing a different subpage.
+ *
+ * The total_mapcount is instead counting all virtual mappings of the
+ * subpages. If the total_mapcount is equal to "one", it tells the
+ * caller all mappings belong to the same "mm" and in turn the
+ * anon_vma of the transparent hugepage can become the vma->anon_vma
+ * local one as no other process may be mapping any of the subpages.
+ *
+ * It would be more accurate to replace page_mapcount() with
+ * page_trans_huge_mapcount(), however we only use
+ * page_trans_huge_mapcount() in the copy-on-write faults where we
+ * need full accuracy to avoid breaking page pinning, because
+ * page_trans_huge_mapcount() is slower than page_mapcount().
+ */
+int page_trans_huge_mapcount(struct page *page, int *total_mapcount)
+{
+	int i, ret, _total_mapcount, mapcount;
+
+	/* hugetlbfs shouldn't call it */
+	VM_BUG_ON_PAGE(PageHuge(page), page);
+
+	if (likely(!PageTransCompound(page))) {
+		mapcount = atomic_read(&page->_mapcount) + 1;
+		if (total_mapcount)
+			*total_mapcount = mapcount;
+		return mapcount;
+	}
+
+	page = compound_head(page);
+
+	_total_mapcount = ret = 0;
+	for (i = 0; i < HPAGE_PMD_NR; i++) {
+		mapcount = atomic_read(&page[i]._mapcount) + 1;
+		ret = max(ret, mapcount);
+		_total_mapcount += mapcount;
+	}
+	if (PageDoubleMap(page)) {
+		ret -= 1;
+		_total_mapcount -= HPAGE_PMD_NR;
+	}
+	mapcount = compound_mapcount(page);
+	ret += mapcount;
+	_total_mapcount += mapcount;
+	if (total_mapcount)
+		*total_mapcount = _total_mapcount;
+	return ret;
+}
+
+/*
  * This function splits huge page into normal pages. @page can point to any
  * subpage of huge page to split. Split doesn't change the position of @page.
  *
@@ -3454,7 +3505,7 @@
 		}
 	}
 
-	pr_info("%lu of %lu THP split", split, total);
+	pr_info("%lu of %lu THP split\n", split, total);
 
 	return 0;
 }
@@ -3465,7 +3516,7 @@
 {
 	void *ret;
 
-	ret = debugfs_create_file("split_huge_pages", 0644, NULL, NULL,
+	ret = debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
 			&split_huge_pages_fops);
 	if (!ret)
 		pr_warn("Failed to create split_huge_pages in debugfs");
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 06058eaa..19d0d08 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3346,7 +3346,7 @@
 			old_page != pagecache_page)
 		outside_reserve = 1;
 
-	page_cache_get(old_page);
+	get_page(old_page);
 
 	/*
 	 * Drop page table lock as buddy allocator may be called. It will
@@ -3364,7 +3364,7 @@
 		 * may get SIGKILLed if it later faults.
 		 */
 		if (outside_reserve) {
-			page_cache_release(old_page);
+			put_page(old_page);
 			BUG_ON(huge_pte_none(pte));
 			unmap_ref_private(mm, vma, old_page, address);
 			BUG_ON(huge_pte_none(pte));
@@ -3425,9 +3425,9 @@
 	spin_unlock(ptl);
 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 out_release_all:
-	page_cache_release(new_page);
+	put_page(new_page);
 out_release_old:
-	page_cache_release(old_page);
+	put_page(old_page);
 
 	spin_lock(ptl); /* Caller expects lock to be held */
 	return ret;
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index acb3b6c..38f1dd7 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -498,7 +498,7 @@
 		struct kasan_alloc_meta *alloc_info =
 			get_alloc_info(cache, object);
 		alloc_info->state = KASAN_STATE_FREE;
-		set_track(&free_info->track);
+		set_track(&free_info->track, GFP_NOWAIT);
 	}
 #endif
 
diff --git a/mm/ksm.c b/mm/ksm.c
index b99e8281..4786b41 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -783,6 +783,7 @@
 		}
 
 		remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list);
+		up_read(&mm->mmap_sem);
 
 		spin_lock(&ksm_mmlist_lock);
 		ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
@@ -794,12 +795,9 @@
 
 			free_mm_slot(mm_slot);
 			clear_bit(MMF_VM_MERGEABLE, &mm->flags);
-			up_read(&mm->mmap_sem);
 			mmdrop(mm);
-		} else {
+		} else
 			spin_unlock(&ksm_mmlist_lock);
-			up_read(&mm->mmap_sem);
-		}
 	}
 
 	/* Clean up stable nodes, but don't worry if some are still busy */
@@ -1663,8 +1661,15 @@
 		up_read(&mm->mmap_sem);
 		mmdrop(mm);
 	} else {
-		spin_unlock(&ksm_mmlist_lock);
 		up_read(&mm->mmap_sem);
+		/*
+		 * up_read(&mm->mmap_sem) first because after
+		 * spin_unlock(&ksm_mmlist_lock) run, the "mm" may
+		 * already have been freed under us by __ksm_exit()
+		 * because the "mm_slot" is still hashed and
+		 * ksm_scan.mm_slot doesn't point to it anymore.
+		 */
+		spin_unlock(&ksm_mmlist_lock);
 	}
 
 	/* Repeat until we've completed scanning the whole list */
diff --git a/mm/madvise.c b/mm/madvise.c
index a011473..07427d3 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -170,7 +170,7 @@
 		page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
 								vma, index);
 		if (page)
-			page_cache_release(page);
+			put_page(page);
 	}
 
 	return 0;
@@ -204,14 +204,14 @@
 		page = find_get_entry(mapping, index);
 		if (!radix_tree_exceptional_entry(page)) {
 			if (page)
-				page_cache_release(page);
+				put_page(page);
 			continue;
 		}
 		swap = radix_to_swp_entry(page);
 		page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
 								NULL, 0);
 		if (page)
-			page_cache_release(page);
+			put_page(page);
 	}
 
 	lru_add_drain();	/* Push any new pages onto the LRU now */
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 36db05f..fe787f5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -207,6 +207,7 @@
 /* "mc" and its members are protected by cgroup_mutex */
 static struct move_charge_struct {
 	spinlock_t	  lock; /* for from, to */
+	struct mm_struct  *mm;
 	struct mem_cgroup *from;
 	struct mem_cgroup *to;
 	unsigned long flags;
@@ -4667,6 +4668,8 @@
 
 static void mem_cgroup_clear_mc(void)
 {
+	struct mm_struct *mm = mc.mm;
+
 	/*
 	 * we must clear moving_task before waking up waiters at the end of
 	 * task migration.
@@ -4676,7 +4679,10 @@
 	spin_lock(&mc.lock);
 	mc.from = NULL;
 	mc.to = NULL;
+	mc.mm = NULL;
 	spin_unlock(&mc.lock);
+
+	mmput(mm);
 }
 
 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
@@ -4733,6 +4739,7 @@
 		VM_BUG_ON(mc.moved_swap);
 
 		spin_lock(&mc.lock);
+		mc.mm = mm;
 		mc.from = from;
 		mc.to = memcg;
 		mc.flags = move_flags;
@@ -4742,8 +4749,9 @@
 		ret = mem_cgroup_precharge_mc(mm);
 		if (ret)
 			mem_cgroup_clear_mc();
+	} else {
+		mmput(mm);
 	}
-	mmput(mm);
 	return ret;
 }
 
@@ -4852,11 +4860,11 @@
 	return ret;
 }
 
-static void mem_cgroup_move_charge(struct mm_struct *mm)
+static void mem_cgroup_move_charge(void)
 {
 	struct mm_walk mem_cgroup_move_charge_walk = {
 		.pmd_entry = mem_cgroup_move_charge_pte_range,
-		.mm = mm,
+		.mm = mc.mm,
 	};
 
 	lru_add_drain_all();
@@ -4868,7 +4876,7 @@
 	atomic_inc(&mc.from->moving_account);
 	synchronize_rcu();
 retry:
-	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+	if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
 		/*
 		 * Someone who are holding the mmap_sem might be waiting in
 		 * waitq. So we cancel all extra charges, wake up all waiters,
@@ -4885,23 +4893,16 @@
 	 * additional charge, the page walk just aborts.
 	 */
 	walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
-	up_read(&mm->mmap_sem);
+	up_read(&mc.mm->mmap_sem);
 	atomic_dec(&mc.from->moving_account);
 }
 
-static void mem_cgroup_move_task(struct cgroup_taskset *tset)
+static void mem_cgroup_move_task(void)
 {
-	struct cgroup_subsys_state *css;
-	struct task_struct *p = cgroup_taskset_first(tset, &css);
-	struct mm_struct *mm = get_task_mm(p);
-
-	if (mm) {
-		if (mc.to)
-			mem_cgroup_move_charge(mm);
-		mmput(mm);
-	}
-	if (mc.to)
+	if (mc.to) {
+		mem_cgroup_move_charge();
 		mem_cgroup_clear_mc();
+	}
 }
 #else	/* !CONFIG_MMU */
 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
@@ -4911,7 +4912,7 @@
 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
 {
 }
-static void mem_cgroup_move_task(struct cgroup_taskset *tset)
+static void mem_cgroup_move_task(void)
 {
 }
 #endif
@@ -5195,7 +5196,7 @@
 	.css_reset = mem_cgroup_css_reset,
 	.can_attach = mem_cgroup_can_attach,
 	.cancel_attach = mem_cgroup_cancel_attach,
-	.attach = mem_cgroup_move_task,
+	.post_attach = mem_cgroup_move_task,
 	.bind = mem_cgroup_bind,
 	.dfl_cftypes = memory_files,
 	.legacy_cftypes = mem_cgroup_legacy_files,
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 5a544c6..ca5acee 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -538,7 +538,7 @@
 		/*
 		 * drop the page count elevated by isolate_lru_page()
 		 */
-		page_cache_release(p);
+		put_page(p);
 		return 0;
 	}
 	return -EIO;
@@ -888,7 +888,15 @@
 		}
 	}
 
-	return get_page_unless_zero(head);
+	if (get_page_unless_zero(head)) {
+		if (head == compound_head(page))
+			return 1;
+
+		pr_info("MCE: %#lx cannot catch tail\n", page_to_pfn(page));
+		put_page(head);
+	}
+
+	return 0;
 }
 EXPORT_SYMBOL_GPL(get_hwpoison_page);
 
diff --git a/mm/memory.c b/mm/memory.c
index 098f00d..07493e3 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -789,6 +789,46 @@
 	return pfn_to_page(pfn);
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
+				pmd_t pmd)
+{
+	unsigned long pfn = pmd_pfn(pmd);
+
+	/*
+	 * There is no pmd_special() but there may be special pmds, e.g.
+	 * in a direct-access (dax) mapping, so let's just replicate the
+	 * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
+	 */
+	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
+		if (vma->vm_flags & VM_MIXEDMAP) {
+			if (!pfn_valid(pfn))
+				return NULL;
+			goto out;
+		} else {
+			unsigned long off;
+			off = (addr - vma->vm_start) >> PAGE_SHIFT;
+			if (pfn == vma->vm_pgoff + off)
+				return NULL;
+			if (!is_cow_mapping(vma->vm_flags))
+				return NULL;
+		}
+	}
+
+	if (is_zero_pfn(pfn))
+		return NULL;
+	if (unlikely(pfn > highest_memmap_pfn))
+		return NULL;
+
+	/*
+	 * NOTE! We still have PageReserved() pages in the page tables.
+	 * eg. VDSO mappings can cause them to exist.
+	 */
+out:
+	return pfn_to_page(pfn);
+}
+#endif
+
 /*
  * copy one vm_area from one task to the other. Assumes the page tables
  * already present in the new task to be cleared in the whole range
@@ -1182,15 +1222,8 @@
 		next = pmd_addr_end(addr, end);
 		if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
 			if (next - addr != HPAGE_PMD_SIZE) {
-#ifdef CONFIG_DEBUG_VM
-				if (!rwsem_is_locked(&tlb->mm->mmap_sem)) {
-					pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n",
-						__func__, addr, end,
-						vma->vm_start,
-						vma->vm_end);
-					BUG();
-				}
-#endif
+				VM_BUG_ON_VMA(vma_is_anonymous(vma) &&
+				    !rwsem_is_locked(&tlb->mm->mmap_sem), vma);
 				split_huge_pmd(vma, pmd, addr);
 			} else if (zap_huge_pmd(tlb, vma, pmd, addr))
 				goto next;
@@ -2054,7 +2087,7 @@
 		VM_BUG_ON_PAGE(PageAnon(page), page);
 		mapping = page->mapping;
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 
 		if ((dirtied || page_mkwrite) && mapping) {
 			/*
@@ -2188,7 +2221,7 @@
 	}
 
 	if (new_page)
-		page_cache_release(new_page);
+		put_page(new_page);
 
 	pte_unmap_unlock(page_table, ptl);
 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
@@ -2203,14 +2236,14 @@
 				munlock_vma_page(old_page);
 			unlock_page(old_page);
 		}
-		page_cache_release(old_page);
+		put_page(old_page);
 	}
 	return page_copied ? VM_FAULT_WRITE : 0;
 oom_free_new:
-	page_cache_release(new_page);
+	put_page(new_page);
 oom:
 	if (old_page)
-		page_cache_release(old_page);
+		put_page(old_page);
 	return VM_FAULT_OOM;
 }
 
@@ -2258,7 +2291,7 @@
 {
 	int page_mkwrite = 0;
 
-	page_cache_get(old_page);
+	get_page(old_page);
 
 	if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
 		int tmp;
@@ -2267,7 +2300,7 @@
 		tmp = do_page_mkwrite(vma, old_page, address);
 		if (unlikely(!tmp || (tmp &
 				      (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
-			page_cache_release(old_page);
+			put_page(old_page);
 			return tmp;
 		}
 		/*
@@ -2281,7 +2314,7 @@
 		if (!pte_same(*page_table, orig_pte)) {
 			unlock_page(old_page);
 			pte_unmap_unlock(page_table, ptl);
-			page_cache_release(old_page);
+			put_page(old_page);
 			return 0;
 		}
 		page_mkwrite = 1;
@@ -2340,8 +2373,9 @@
 	 * not dirty accountable.
 	 */
 	if (PageAnon(old_page) && !PageKsm(old_page)) {
+		int total_mapcount;
 		if (!trylock_page(old_page)) {
-			page_cache_get(old_page);
+			get_page(old_page);
 			pte_unmap_unlock(page_table, ptl);
 			lock_page(old_page);
 			page_table = pte_offset_map_lock(mm, pmd, address,
@@ -2349,18 +2383,23 @@
 			if (!pte_same(*page_table, orig_pte)) {
 				unlock_page(old_page);
 				pte_unmap_unlock(page_table, ptl);
-				page_cache_release(old_page);
+				put_page(old_page);
 				return 0;
 			}
-			page_cache_release(old_page);
+			put_page(old_page);
 		}
-		if (reuse_swap_page(old_page)) {
-			/*
-			 * The page is all ours.  Move it to our anon_vma so
-			 * the rmap code will not search our parent or siblings.
-			 * Protected against the rmap code by the page lock.
-			 */
-			page_move_anon_rmap(old_page, vma, address);
+		if (reuse_swap_page(old_page, &total_mapcount)) {
+			if (total_mapcount == 1) {
+				/*
+				 * The page is all ours. Move it to
+				 * our anon_vma so the rmap code will
+				 * not search our parent or siblings.
+				 * Protected against the rmap code by
+				 * the page lock.
+				 */
+				page_move_anon_rmap(compound_head(old_page),
+						    vma, address);
+			}
 			unlock_page(old_page);
 			return wp_page_reuse(mm, vma, address, page_table, ptl,
 					     orig_pte, old_page, 0, 0);
@@ -2375,7 +2414,7 @@
 	/*
 	 * Ok, we need to copy. Oh, well..
 	 */
-	page_cache_get(old_page);
+	get_page(old_page);
 
 	pte_unmap_unlock(page_table, ptl);
 	return wp_page_copy(mm, vma, address, page_table, pmd,
@@ -2400,7 +2439,6 @@
 
 		vba = vma->vm_pgoff;
 		vea = vba + vma_pages(vma) - 1;
-		/* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
 		zba = details->first_index;
 		if (zba < vba)
 			zba = vba;
@@ -2585,7 +2623,7 @@
 	inc_mm_counter_fast(mm, MM_ANONPAGES);
 	dec_mm_counter_fast(mm, MM_SWAPENTS);
 	pte = mk_pte(page, vma->vm_page_prot);
-	if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
+	if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
 		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
 		flags &= ~FAULT_FLAG_WRITE;
 		ret |= VM_FAULT_WRITE;
@@ -2619,7 +2657,7 @@
 		 * parallel locked swapcache.
 		 */
 		unlock_page(swapcache);
-		page_cache_release(swapcache);
+		put_page(swapcache);
 	}
 
 	if (flags & FAULT_FLAG_WRITE) {
@@ -2641,10 +2679,10 @@
 out_page:
 	unlock_page(page);
 out_release:
-	page_cache_release(page);
+	put_page(page);
 	if (page != swapcache) {
 		unlock_page(swapcache);
-		page_cache_release(swapcache);
+		put_page(swapcache);
 	}
 	return ret;
 }
@@ -2752,7 +2790,7 @@
 	if (userfaultfd_missing(vma)) {
 		pte_unmap_unlock(page_table, ptl);
 		mem_cgroup_cancel_charge(page, memcg, false);
-		page_cache_release(page);
+		put_page(page);
 		return handle_userfault(vma, address, flags,
 					VM_UFFD_MISSING);
 	}
@@ -2771,10 +2809,10 @@
 	return 0;
 release:
 	mem_cgroup_cancel_charge(page, memcg, false);
-	page_cache_release(page);
+	put_page(page);
 	goto unlock;
 oom_free_page:
-	page_cache_release(page);
+	put_page(page);
 oom:
 	return VM_FAULT_OOM;
 }
@@ -2807,7 +2845,7 @@
 	if (unlikely(PageHWPoison(vmf.page))) {
 		if (ret & VM_FAULT_LOCKED)
 			unlock_page(vmf.page);
-		page_cache_release(vmf.page);
+		put_page(vmf.page);
 		return VM_FAULT_HWPOISON;
 	}
 
@@ -2996,7 +3034,7 @@
 	if (unlikely(!pte_same(*pte, orig_pte))) {
 		pte_unmap_unlock(pte, ptl);
 		unlock_page(fault_page);
-		page_cache_release(fault_page);
+		put_page(fault_page);
 		return ret;
 	}
 	do_set_pte(vma, address, fault_page, pte, false, false);
@@ -3024,7 +3062,7 @@
 		return VM_FAULT_OOM;
 
 	if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false)) {
-		page_cache_release(new_page);
+		put_page(new_page);
 		return VM_FAULT_OOM;
 	}
 
@@ -3041,7 +3079,7 @@
 		pte_unmap_unlock(pte, ptl);
 		if (fault_page) {
 			unlock_page(fault_page);
-			page_cache_release(fault_page);
+			put_page(fault_page);
 		} else {
 			/*
 			 * The fault handler has no page to lock, so it holds
@@ -3057,7 +3095,7 @@
 	pte_unmap_unlock(pte, ptl);
 	if (fault_page) {
 		unlock_page(fault_page);
-		page_cache_release(fault_page);
+		put_page(fault_page);
 	} else {
 		/*
 		 * The fault handler has no page to lock, so it holds
@@ -3068,7 +3106,7 @@
 	return ret;
 uncharge_out:
 	mem_cgroup_cancel_charge(new_page, memcg, false);
-	page_cache_release(new_page);
+	put_page(new_page);
 	return ret;
 }
 
@@ -3096,7 +3134,7 @@
 		tmp = do_page_mkwrite(vma, fault_page, address);
 		if (unlikely(!tmp ||
 				(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
-			page_cache_release(fault_page);
+			put_page(fault_page);
 			return tmp;
 		}
 	}
@@ -3105,7 +3143,7 @@
 	if (unlikely(!pte_same(*pte, orig_pte))) {
 		pte_unmap_unlock(pte, ptl);
 		unlock_page(fault_page);
-		page_cache_release(fault_page);
+		put_page(fault_page);
 		return ret;
 	}
 	do_set_pte(vma, address, fault_page, pte, true, false);
@@ -3736,7 +3774,7 @@
 						    buf, maddr + offset, bytes);
 			}
 			kunmap(page);
-			page_cache_release(page);
+			put_page(page);
 		}
 		len -= bytes;
 		buf += bytes;
diff --git a/mm/migrate.c b/mm/migrate.c
index 6c822a7..f9dfb18 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -975,7 +975,13 @@
 		dec_zone_page_state(page, NR_ISOLATED_ANON +
 				page_is_file_cache(page));
 		/* Soft-offlined page shouldn't go through lru cache list */
-		if (reason == MR_MEMORY_FAILURE) {
+		if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) {
+			/*
+			 * With this release, we free successfully migrated
+			 * page and set PG_HWPoison on just freed page
+			 * intentionally. Although it's rather weird, it's how
+			 * HWPoison flag works at the moment.
+			 */
 			put_page(page);
 			if (!test_set_page_hwpoison(page))
 				num_poisoned_pages_inc();
diff --git a/mm/mincore.c b/mm/mincore.c
index 563f320..c0b5ba9 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -75,7 +75,7 @@
 #endif
 	if (page) {
 		present = PageUptodate(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 
 	return present;
@@ -211,7 +211,7 @@
  * return values:
  *  zero    - success
  *  -EFAULT - vec points to an illegal address
- *  -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE
+ *  -EINVAL - addr is not a multiple of PAGE_SIZE
  *  -ENOMEM - Addresses in the range [addr, addr + len] are
  *		invalid for the address space of this process, or
  *		specify one or more pages which are not currently
@@ -226,14 +226,14 @@
 	unsigned char *tmp;
 
 	/* Check the start address: needs to be page-aligned.. */
- 	if (start & ~PAGE_CACHE_MASK)
+	if (start & ~PAGE_MASK)
 		return -EINVAL;
 
 	/* ..and we need to be passed a valid user-space range */
 	if (!access_ok(VERIFY_READ, (void __user *) start, len))
 		return -ENOMEM;
 
-	/* This also avoids any overflows on PAGE_CACHE_ALIGN */
+	/* This also avoids any overflows on PAGE_ALIGN */
 	pages = len >> PAGE_SHIFT;
 	pages += (offset_in_page(len)) != 0;
 
diff --git a/mm/mmu_context.c b/mm/mmu_context.c
index f802c2d..6f4d27c 100644
--- a/mm/mmu_context.c
+++ b/mm/mmu_context.c
@@ -4,9 +4,9 @@
  */
 
 #include <linux/mm.h>
+#include <linux/sched.h>
 #include <linux/mmu_context.h>
 #include <linux/export.h>
-#include <linux/sched.h>
 
 #include <asm/mmu_context.h>
 
diff --git a/mm/nommu.c b/mm/nommu.c
index de8b6b6..c8bd59a 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -15,8 +15,6 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-#define __DISABLE_GUP_DEPRECATED
-
 #include <linux/export.h>
 #include <linux/mm.h>
 #include <linux/vmacache.h>
@@ -141,7 +139,7 @@
 		if (pages) {
 			pages[i] = virt_to_page(start);
 			if (pages[i])
-				page_cache_get(pages[i]);
+				get_page(pages[i]);
 		}
 		if (vmas)
 			vmas[i] = vma;
@@ -161,7 +159,7 @@
  *   slab page or a secondary page from a compound page
  * - don't permit access to VMAs that don't support it, such as I/O mappings
  */
-long get_user_pages6(unsigned long start, unsigned long nr_pages,
+long get_user_pages(unsigned long start, unsigned long nr_pages,
 		    int write, int force, struct page **pages,
 		    struct vm_area_struct **vmas)
 {
@@ -175,15 +173,15 @@
 	return __get_user_pages(current, current->mm, start, nr_pages, flags,
 				pages, vmas, NULL);
 }
-EXPORT_SYMBOL(get_user_pages6);
+EXPORT_SYMBOL(get_user_pages);
 
-long get_user_pages_locked6(unsigned long start, unsigned long nr_pages,
+long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
 			    int write, int force, struct page **pages,
 			    int *locked)
 {
-	return get_user_pages6(start, nr_pages, write, force, pages, NULL);
+	return get_user_pages(start, nr_pages, write, force, pages, NULL);
 }
-EXPORT_SYMBOL(get_user_pages_locked6);
+EXPORT_SYMBOL(get_user_pages_locked);
 
 long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
 			       unsigned long start, unsigned long nr_pages,
@@ -199,13 +197,13 @@
 }
 EXPORT_SYMBOL(__get_user_pages_unlocked);
 
-long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages,
+long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
 			     int write, int force, struct page **pages)
 {
 	return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
 					 write, force, pages, 0);
 }
-EXPORT_SYMBOL(get_user_pages_unlocked5);
+EXPORT_SYMBOL(get_user_pages_unlocked);
 
 /**
  * follow_pfn - look up PFN at a user virtual address
@@ -1989,31 +1987,3 @@
 	return 0;
 }
 subsys_initcall(init_admin_reserve);
-
-long get_user_pages8(struct task_struct *tsk, struct mm_struct *mm,
-		     unsigned long start, unsigned long nr_pages,
-		     int write, int force, struct page **pages,
-		     struct vm_area_struct **vmas)
-{
-	return get_user_pages6(start, nr_pages, write, force, pages, vmas);
-}
-EXPORT_SYMBOL(get_user_pages8);
-
-long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm,
-			    unsigned long start, unsigned long nr_pages,
-			    int write, int force, struct page **pages,
-			    int *locked)
-{
-	return get_user_pages_locked6(start, nr_pages, write,
-				      force, pages, locked);
-}
-EXPORT_SYMBOL(get_user_pages_locked8);
-
-long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm,
-			      unsigned long start, unsigned long nr_pages,
-			      int write, int force, struct page **pages)
-{
-	return get_user_pages_unlocked5(start, nr_pages, write, force, pages);
-}
-EXPORT_SYMBOL(get_user_pages_unlocked7);
-
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index b34d279..8634958 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -547,7 +547,11 @@
 
 static void wake_oom_reaper(struct task_struct *tsk)
 {
-	if (!oom_reaper_th || tsk->oom_reaper_list)
+	if (!oom_reaper_th)
+		return;
+
+	/* tsk is already queued? */
+	if (tsk == oom_reaper_list || tsk->oom_reaper_list)
 		return;
 
 	get_task_struct(tsk);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 11ff8f7..bc5149d 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1910,7 +1910,8 @@
 	if (gdtc->dirty > gdtc->bg_thresh)
 		return true;
 
-	if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(gdtc))
+	if (wb_stat(wb, WB_RECLAIMABLE) >
+	    wb_calc_thresh(gdtc->wb, gdtc->bg_thresh))
 		return true;
 
 	if (mdtc) {
@@ -1924,7 +1925,8 @@
 		if (mdtc->dirty > mdtc->bg_thresh)
 			return true;
 
-		if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(mdtc))
+		if (wb_stat(wb, WB_RECLAIMABLE) >
+		    wb_calc_thresh(mdtc->wb, mdtc->bg_thresh))
 			return true;
 	}
 
@@ -2176,8 +2178,8 @@
 			cycled = 0;
 		end = -1;
 	} else {
-		index = wbc->range_start >> PAGE_CACHE_SHIFT;
-		end = wbc->range_end >> PAGE_CACHE_SHIFT;
+		index = wbc->range_start >> PAGE_SHIFT;
+		end = wbc->range_end >> PAGE_SHIFT;
 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
 			range_whole = 1;
 		cycled = 1; /* ignore range_cyclic tests */
@@ -2382,14 +2384,14 @@
 		wait_on_page_writeback(page);
 
 	if (clear_page_dirty_for_io(page)) {
-		page_cache_get(page);
+		get_page(page);
 		ret = mapping->a_ops->writepage(page, &wbc);
 		if (ret == 0 && wait) {
 			wait_on_page_writeback(page);
 			if (PageError(page))
 				ret = -EIO;
 		}
-		page_cache_release(page);
+		put_page(page);
 	} else {
 		unlock_page(page);
 	}
@@ -2431,7 +2433,7 @@
 		__inc_zone_page_state(page, NR_DIRTIED);
 		__inc_wb_stat(wb, WB_RECLAIMABLE);
 		__inc_wb_stat(wb, WB_DIRTIED);
-		task_io_account_write(PAGE_CACHE_SIZE);
+		task_io_account_write(PAGE_SIZE);
 		current->nr_dirtied++;
 		this_cpu_inc(bdp_ratelimits);
 	}
@@ -2450,7 +2452,7 @@
 		mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY);
 		dec_zone_page_state(page, NR_FILE_DIRTY);
 		dec_wb_stat(wb, WB_RECLAIMABLE);
-		task_io_account_cancelled_write(PAGE_CACHE_SIZE);
+		task_io_account_cancelled_write(PAGE_SIZE);
 	}
 }
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 59de90d..c1069ef 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6485,7 +6485,7 @@
 	setup_per_zone_inactive_ratio();
 	return 0;
 }
-module_init(init_per_zone_wmark_min)
+core_initcall(init_per_zone_wmark_min)
 
 /*
  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
diff --git a/mm/page_io.c b/mm/page_io.c
index 18aac78..242dba0 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -252,7 +252,7 @@
 
 static sector_t swap_page_sector(struct page *page)
 {
-	return (sector_t)__page_file_index(page) << (PAGE_CACHE_SHIFT - 9);
+	return (sector_t)__page_file_index(page) << (PAGE_SHIFT - 9);
 }
 
 int __swap_writepage(struct page *page, struct writeback_control *wbc,
@@ -279,7 +279,7 @@
 
 		set_page_writeback(page);
 		unlock_page(page);
-		ret = mapping->a_ops->direct_IO(&kiocb, &from, kiocb.ki_pos);
+		ret = mapping->a_ops->direct_IO(&kiocb, &from);
 		if (ret == PAGE_SIZE) {
 			count_vm_event(PSWPOUT);
 			ret = 0;
@@ -353,7 +353,11 @@
 
 	ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
 	if (!ret) {
-		swap_slot_free_notify(page);
+		if (trylock_page(page)) {
+			swap_slot_free_notify(page);
+			unlock_page(page);
+		}
+
 		count_vm_event(PSWPIN);
 		return 0;
 	}
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 92c4c36..c4f5682 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -215,7 +215,7 @@
  * all pages in [start_pfn...end_pfn) must be in the same zone.
  * zone->lock must be held before call this.
  *
- * Returns 1 if all pages in the range are isolated.
+ * Returns the last tested pfn.
  */
 static unsigned long
 __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
@@ -289,11 +289,11 @@
 	 * now as a simple work-around, we use the next node for destination.
 	 */
 	if (PageHuge(page)) {
-		nodemask_t src = nodemask_of_node(page_to_nid(page));
-		nodemask_t dst;
-		nodes_complement(dst, src);
+		int node = next_online_node(page_to_nid(page));
+		if (node == MAX_NUMNODES)
+			node = first_online_node;
 		return alloc_huge_page_node(page_hstate(compound_head(page)),
-					    next_node(page_to_nid(page), dst));
+					    node);
 	}
 
 	if (PageHighMem(page))
diff --git a/mm/readahead.c b/mm/readahead.c
index 20e58e8..40be3ae 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -47,11 +47,11 @@
 		if (!trylock_page(page))
 			BUG();
 		page->mapping = mapping;
-		do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
+		do_invalidatepage(page, 0, PAGE_SIZE);
 		page->mapping = NULL;
 		unlock_page(page);
 	}
-	page_cache_release(page);
+	put_page(page);
 }
 
 /*
@@ -93,14 +93,14 @@
 			read_cache_pages_invalidate_page(mapping, page);
 			continue;
 		}
-		page_cache_release(page);
+		put_page(page);
 
 		ret = filler(data, page);
 		if (unlikely(ret)) {
 			read_cache_pages_invalidate_pages(mapping, pages);
 			break;
 		}
-		task_io_account_read(PAGE_CACHE_SIZE);
+		task_io_account_read(PAGE_SIZE);
 	}
 	return ret;
 }
@@ -130,7 +130,7 @@
 				mapping_gfp_constraint(mapping, GFP_KERNEL))) {
 			mapping->a_ops->readpage(filp, page);
 		}
-		page_cache_release(page);
+		put_page(page);
 	}
 	ret = 0;
 
@@ -163,7 +163,7 @@
 	if (isize == 0)
 		goto out;
 
-	end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
+	end_index = ((isize - 1) >> PAGE_SHIFT);
 
 	/*
 	 * Preallocate as many pages as we will need.
@@ -216,7 +216,7 @@
 	while (nr_to_read) {
 		int err;
 
-		unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE;
+		unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE;
 
 		if (this_chunk > nr_to_read)
 			this_chunk = nr_to_read;
@@ -425,7 +425,7 @@
 	 * trivial case: (offset - prev_offset) == 1
 	 * unaligned reads: (offset - prev_offset) == 0
 	 */
-	prev_offset = (unsigned long long)ra->prev_pos >> PAGE_CACHE_SHIFT;
+	prev_offset = (unsigned long long)ra->prev_pos >> PAGE_SHIFT;
 	if (offset - prev_offset <= 1UL)
 		goto initial_readahead;
 
@@ -558,8 +558,8 @@
 	if (f.file) {
 		if (f.file->f_mode & FMODE_READ) {
 			struct address_space *mapping = f.file->f_mapping;
-			pgoff_t start = offset >> PAGE_CACHE_SHIFT;
-			pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
+			pgoff_t start = offset >> PAGE_SHIFT;
+			pgoff_t end = (offset + count - 1) >> PAGE_SHIFT;
 			unsigned long len = end - start + 1;
 			ret = do_readahead(mapping, f.file, start, len);
 		}
diff --git a/mm/rmap.c b/mm/rmap.c
index c399a0d..307b555 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -569,19 +569,6 @@
 }
 
 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
-static void percpu_flush_tlb_batch_pages(void *data)
-{
-	/*
-	 * All TLB entries are flushed on the assumption that it is
-	 * cheaper to flush all TLBs and let them be refilled than
-	 * flushing individual PFNs. Note that we do not track mm's
-	 * to flush as that might simply be multiple full TLB flushes
-	 * for no gain.
-	 */
-	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
-	flush_tlb_local();
-}
-
 /*
  * Flush TLB entries for recently unmapped pages from remote CPUs. It is
  * important if a PTE was dirty when it was unmapped that it's flushed
@@ -598,15 +585,14 @@
 
 	cpu = get_cpu();
 
-	trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, -1UL);
-
-	if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask))
-		percpu_flush_tlb_batch_pages(&tlb_ubc->cpumask);
-
-	if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) {
-		smp_call_function_many(&tlb_ubc->cpumask,
-			percpu_flush_tlb_batch_pages, (void *)tlb_ubc, true);
+	if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) {
+		count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+		local_flush_tlb();
+		trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
 	}
+
+	if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids)
+		flush_tlb_others(&tlb_ubc->cpumask, NULL, 0, TLB_FLUSH_ALL);
 	cpumask_clear(&tlb_ubc->cpumask);
 	tlb_ubc->flush_required = false;
 	tlb_ubc->writable = false;
@@ -1555,7 +1541,7 @@
 
 discard:
 	page_remove_rmap(page, PageHuge(page));
-	page_cache_release(page);
+	put_page(page);
 
 out_unmap:
 	pte_unmap_unlock(pte, ptl);
diff --git a/mm/shmem.c b/mm/shmem.c
index 9428c51..e684a91 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -75,8 +75,8 @@
 
 #include "internal.h"
 
-#define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
-#define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
+#define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
+#define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
 
 /* Pretend that each entry is of this size in directory's i_size */
 #define BOGO_DIRENT_SIZE 20
@@ -176,13 +176,13 @@
 static inline int shmem_acct_block(unsigned long flags)
 {
 	return (flags & VM_NORESERVE) ?
-		security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0;
+		security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_SIZE)) : 0;
 }
 
 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
 {
 	if (flags & VM_NORESERVE)
-		vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
+		vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
 }
 
 static const struct super_operations shmem_ops;
@@ -300,7 +300,7 @@
 	VM_BUG_ON_PAGE(!PageLocked(page), page);
 	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
 
-	page_cache_get(page);
+	get_page(page);
 	page->mapping = mapping;
 	page->index = index;
 
@@ -318,7 +318,7 @@
 	} else {
 		page->mapping = NULL;
 		spin_unlock_irq(&mapping->tree_lock);
-		page_cache_release(page);
+		put_page(page);
 	}
 	return error;
 }
@@ -338,7 +338,7 @@
 	__dec_zone_page_state(page, NR_FILE_PAGES);
 	__dec_zone_page_state(page, NR_SHMEM);
 	spin_unlock_irq(&mapping->tree_lock);
-	page_cache_release(page);
+	put_page(page);
 	BUG_ON(error);
 }
 
@@ -474,10 +474,10 @@
 {
 	struct address_space *mapping = inode->i_mapping;
 	struct shmem_inode_info *info = SHMEM_I(inode);
-	pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-	pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT;
-	unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1);
-	unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
+	pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	pgoff_t end = (lend + 1) >> PAGE_SHIFT;
+	unsigned int partial_start = lstart & (PAGE_SIZE - 1);
+	unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
 	struct pagevec pvec;
 	pgoff_t indices[PAGEVEC_SIZE];
 	long nr_swaps_freed = 0;
@@ -530,7 +530,7 @@
 		struct page *page = NULL;
 		shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
 		if (page) {
-			unsigned int top = PAGE_CACHE_SIZE;
+			unsigned int top = PAGE_SIZE;
 			if (start > end) {
 				top = partial_end;
 				partial_end = 0;
@@ -538,7 +538,7 @@
 			zero_user_segment(page, partial_start, top);
 			set_page_dirty(page);
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 		}
 	}
 	if (partial_end) {
@@ -548,7 +548,7 @@
 			zero_user_segment(page, 0, partial_end);
 			set_page_dirty(page);
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 		}
 	}
 	if (start >= end)
@@ -833,7 +833,7 @@
 		mem_cgroup_commit_charge(page, memcg, true, false);
 out:
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 	return error;
 }
 
@@ -1080,7 +1080,7 @@
 	if (!newpage)
 		return -ENOMEM;
 
-	page_cache_get(newpage);
+	get_page(newpage);
 	copy_highpage(newpage, oldpage);
 	flush_dcache_page(newpage);
 
@@ -1120,8 +1120,8 @@
 	set_page_private(oldpage, 0);
 
 	unlock_page(oldpage);
-	page_cache_release(oldpage);
-	page_cache_release(oldpage);
+	put_page(oldpage);
+	put_page(oldpage);
 	return error;
 }
 
@@ -1145,7 +1145,7 @@
 	int once = 0;
 	int alloced = 0;
 
-	if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
+	if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
 		return -EFBIG;
 repeat:
 	swap.val = 0;
@@ -1156,7 +1156,7 @@
 	}
 
 	if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
-	    ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
+	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
 		error = -EINVAL;
 		goto unlock;
 	}
@@ -1169,7 +1169,7 @@
 		if (sgp != SGP_READ)
 			goto clear;
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		page = NULL;
 	}
 	if (page || (sgp == SGP_READ && !swap.val)) {
@@ -1327,7 +1327,7 @@
 
 	/* Perhaps the file has been truncated since we checked */
 	if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
-	    ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
+	    ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
 		if (alloced) {
 			ClearPageDirty(page);
 			delete_from_page_cache(page);
@@ -1355,7 +1355,7 @@
 unlock:
 	if (page) {
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 	if (error == -ENOSPC && !once++) {
 		info = SHMEM_I(inode);
@@ -1577,7 +1577,7 @@
 {
 	struct inode *inode = mapping->host;
 	struct shmem_inode_info *info = SHMEM_I(inode);
-	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+	pgoff_t index = pos >> PAGE_SHIFT;
 
 	/* i_mutex is held by caller */
 	if (unlikely(info->seals)) {
@@ -1601,16 +1601,16 @@
 		i_size_write(inode, pos + copied);
 
 	if (!PageUptodate(page)) {
-		if (copied < PAGE_CACHE_SIZE) {
-			unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+		if (copied < PAGE_SIZE) {
+			unsigned from = pos & (PAGE_SIZE - 1);
 			zero_user_segments(page, 0, from,
-					from + copied, PAGE_CACHE_SIZE);
+					from + copied, PAGE_SIZE);
 		}
 		SetPageUptodate(page);
 	}
 	set_page_dirty(page);
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 
 	return copied;
 }
@@ -1635,8 +1635,8 @@
 	if (!iter_is_iovec(to))
 		sgp = SGP_DIRTY;
 
-	index = *ppos >> PAGE_CACHE_SHIFT;
-	offset = *ppos & ~PAGE_CACHE_MASK;
+	index = *ppos >> PAGE_SHIFT;
+	offset = *ppos & ~PAGE_MASK;
 
 	for (;;) {
 		struct page *page = NULL;
@@ -1644,11 +1644,11 @@
 		unsigned long nr, ret;
 		loff_t i_size = i_size_read(inode);
 
-		end_index = i_size >> PAGE_CACHE_SHIFT;
+		end_index = i_size >> PAGE_SHIFT;
 		if (index > end_index)
 			break;
 		if (index == end_index) {
-			nr = i_size & ~PAGE_CACHE_MASK;
+			nr = i_size & ~PAGE_MASK;
 			if (nr <= offset)
 				break;
 		}
@@ -1666,14 +1666,14 @@
 		 * We must evaluate after, since reads (unlike writes)
 		 * are called without i_mutex protection against truncate
 		 */
-		nr = PAGE_CACHE_SIZE;
+		nr = PAGE_SIZE;
 		i_size = i_size_read(inode);
-		end_index = i_size >> PAGE_CACHE_SHIFT;
+		end_index = i_size >> PAGE_SHIFT;
 		if (index == end_index) {
-			nr = i_size & ~PAGE_CACHE_MASK;
+			nr = i_size & ~PAGE_MASK;
 			if (nr <= offset) {
 				if (page)
-					page_cache_release(page);
+					put_page(page);
 				break;
 			}
 		}
@@ -1694,7 +1694,7 @@
 				mark_page_accessed(page);
 		} else {
 			page = ZERO_PAGE(0);
-			page_cache_get(page);
+			get_page(page);
 		}
 
 		/*
@@ -1704,10 +1704,10 @@
 		ret = copy_page_to_iter(page, offset, nr, to);
 		retval += ret;
 		offset += ret;
-		index += offset >> PAGE_CACHE_SHIFT;
-		offset &= ~PAGE_CACHE_MASK;
+		index += offset >> PAGE_SHIFT;
+		offset &= ~PAGE_MASK;
 
-		page_cache_release(page);
+		put_page(page);
 		if (!iov_iter_count(to))
 			break;
 		if (ret < nr) {
@@ -1717,7 +1717,7 @@
 		cond_resched();
 	}
 
-	*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
+	*ppos = ((loff_t) index << PAGE_SHIFT) + offset;
 	file_accessed(file);
 	return retval ? retval : error;
 }
@@ -1755,9 +1755,9 @@
 	if (splice_grow_spd(pipe, &spd))
 		return -ENOMEM;
 
-	index = *ppos >> PAGE_CACHE_SHIFT;
-	loff = *ppos & ~PAGE_CACHE_MASK;
-	req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+	index = *ppos >> PAGE_SHIFT;
+	loff = *ppos & ~PAGE_MASK;
+	req_pages = (len + loff + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	nr_pages = min(req_pages, spd.nr_pages_max);
 
 	spd.nr_pages = find_get_pages_contig(mapping, index,
@@ -1774,7 +1774,7 @@
 		index++;
 	}
 
-	index = *ppos >> PAGE_CACHE_SHIFT;
+	index = *ppos >> PAGE_SHIFT;
 	nr_pages = spd.nr_pages;
 	spd.nr_pages = 0;
 
@@ -1784,7 +1784,7 @@
 		if (!len)
 			break;
 
-		this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
+		this_len = min_t(unsigned long, len, PAGE_SIZE - loff);
 		page = spd.pages[page_nr];
 
 		if (!PageUptodate(page) || page->mapping != mapping) {
@@ -1793,19 +1793,19 @@
 			if (error)
 				break;
 			unlock_page(page);
-			page_cache_release(spd.pages[page_nr]);
+			put_page(spd.pages[page_nr]);
 			spd.pages[page_nr] = page;
 		}
 
 		isize = i_size_read(inode);
-		end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
+		end_index = (isize - 1) >> PAGE_SHIFT;
 		if (unlikely(!isize || index > end_index))
 			break;
 
 		if (end_index == index) {
 			unsigned int plen;
 
-			plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
+			plen = ((isize - 1) & ~PAGE_MASK) + 1;
 			if (plen <= loff)
 				break;
 
@@ -1822,7 +1822,7 @@
 	}
 
 	while (page_nr < nr_pages)
-		page_cache_release(spd.pages[page_nr++]);
+		put_page(spd.pages[page_nr++]);
 
 	if (spd.nr_pages)
 		error = splice_to_pipe(pipe, &spd);
@@ -1904,10 +1904,10 @@
 	else if (offset >= inode->i_size)
 		offset = -ENXIO;
 	else {
-		start = offset >> PAGE_CACHE_SHIFT;
-		end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+		start = offset >> PAGE_SHIFT;
+		end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 		new_offset = shmem_seek_hole_data(mapping, start, end, whence);
-		new_offset <<= PAGE_CACHE_SHIFT;
+		new_offset <<= PAGE_SHIFT;
 		if (new_offset > offset) {
 			if (new_offset < inode->i_size)
 				offset = new_offset;
@@ -2203,8 +2203,8 @@
 		goto out;
 	}
 
-	start = offset >> PAGE_CACHE_SHIFT;
-	end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+	start = offset >> PAGE_SHIFT;
+	end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	/* Try to avoid a swapstorm if len is impossible to satisfy */
 	if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
 		error = -ENOSPC;
@@ -2237,8 +2237,8 @@
 		if (error) {
 			/* Remove the !PageUptodate pages we added */
 			shmem_undo_range(inode,
-				(loff_t)start << PAGE_CACHE_SHIFT,
-				(loff_t)index << PAGE_CACHE_SHIFT, true);
+				(loff_t)start << PAGE_SHIFT,
+				(loff_t)index << PAGE_SHIFT, true);
 			goto undone;
 		}
 
@@ -2259,7 +2259,7 @@
 		 */
 		set_page_dirty(page);
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 		cond_resched();
 	}
 
@@ -2280,7 +2280,7 @@
 	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
 
 	buf->f_type = TMPFS_MAGIC;
-	buf->f_bsize = PAGE_CACHE_SIZE;
+	buf->f_bsize = PAGE_SIZE;
 	buf->f_namelen = NAME_MAX;
 	if (sbinfo->max_blocks) {
 		buf->f_blocks = sbinfo->max_blocks;
@@ -2523,7 +2523,7 @@
 	struct shmem_inode_info *info;
 
 	len = strlen(symname) + 1;
-	if (len > PAGE_CACHE_SIZE)
+	if (len > PAGE_SIZE)
 		return -ENAMETOOLONG;
 
 	inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
@@ -2562,7 +2562,7 @@
 		SetPageUptodate(page);
 		set_page_dirty(page);
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 	dir->i_size += BOGO_DIRENT_SIZE;
 	dir->i_ctime = dir->i_mtime = CURRENT_TIME;
@@ -2646,10 +2646,10 @@
 }
 
 static int shmem_xattr_handler_get(const struct xattr_handler *handler,
-				   struct dentry *dentry, const char *name,
-				   void *buffer, size_t size)
+				   struct dentry *unused, struct inode *inode,
+				   const char *name, void *buffer, size_t size)
 {
-	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
+	struct shmem_inode_info *info = SHMEM_I(inode);
 
 	name = xattr_full_name(handler, name);
 	return simple_xattr_get(&info->xattrs, name, buffer, size);
@@ -2835,7 +2835,7 @@
 			if (*rest)
 				goto bad_val;
 			sbinfo->max_blocks =
-				DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
+				DIV_ROUND_UP(size, PAGE_SIZE);
 		} else if (!strcmp(this_char,"nr_blocks")) {
 			sbinfo->max_blocks = memparse(value, &rest);
 			if (*rest)
@@ -2940,7 +2940,7 @@
 
 	if (sbinfo->max_blocks != shmem_default_max_blocks())
 		seq_printf(seq, ",size=%luk",
-			sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
+			sbinfo->max_blocks << (PAGE_SHIFT - 10));
 	if (sbinfo->max_inodes != shmem_default_max_inodes())
 		seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
 	if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
@@ -3082,8 +3082,8 @@
 	sbinfo->free_inodes = sbinfo->max_inodes;
 
 	sb->s_maxbytes = MAX_LFS_FILESIZE;
-	sb->s_blocksize = PAGE_CACHE_SIZE;
-	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	sb->s_blocksize = PAGE_SIZE;
+	sb->s_blocksize_bits = PAGE_SHIFT;
 	sb->s_magic = TMPFS_MAGIC;
 	sb->s_op = &shmem_ops;
 	sb->s_time_gran = 1;
@@ -3123,7 +3123,8 @@
 static void shmem_destroy_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
-	kfree(inode->i_link);
+	if (S_ISLNK(inode->i_mode))
+		kfree(inode->i_link);
 	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
 }
 
diff --git a/mm/swap.c b/mm/swap.c
index 09fe5e9..03aacbc 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -114,7 +114,7 @@
 
 		victim = list_entry(pages->prev, struct page, lru);
 		list_del(&victim->lru);
-		page_cache_release(victim);
+		put_page(victim);
 	}
 }
 EXPORT_SYMBOL(put_pages_list);
@@ -142,7 +142,7 @@
 			return seg;
 
 		pages[seg] = kmap_to_page(kiov[seg].iov_base);
-		page_cache_get(pages[seg]);
+		get_page(pages[seg]);
 	}
 
 	return seg;
@@ -236,7 +236,7 @@
 		struct pagevec *pvec;
 		unsigned long flags;
 
-		page_cache_get(page);
+		get_page(page);
 		local_irq_save(flags);
 		pvec = this_cpu_ptr(&lru_rotate_pvecs);
 		if (!pagevec_add(pvec, page))
@@ -294,7 +294,7 @@
 	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
 		struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
 
-		page_cache_get(page);
+		get_page(page);
 		if (!pagevec_add(pvec, page))
 			pagevec_lru_move_fn(pvec, __activate_page, NULL);
 		put_cpu_var(activate_page_pvecs);
@@ -389,7 +389,7 @@
 {
 	struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
 
-	page_cache_get(page);
+	get_page(page);
 	if (!pagevec_space(pvec))
 		__pagevec_lru_add(pvec);
 	pagevec_add(pvec, page);
@@ -646,7 +646,7 @@
 	if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
 		struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
 
-		page_cache_get(page);
+		get_page(page);
 		if (!pagevec_add(pvec, page))
 			pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
 		put_cpu_var(lru_deactivate_pvecs);
@@ -698,7 +698,7 @@
 }
 
 /**
- * release_pages - batched page_cache_release()
+ * release_pages - batched put_page()
  * @pages: array of pages to release
  * @nr: number of pages
  * @cold: whether the pages are cache cold
@@ -728,6 +728,11 @@
 			zone = NULL;
 		}
 
+		if (is_huge_zero_page(page)) {
+			put_huge_zero_page();
+			continue;
+		}
+
 		page = compound_head(page);
 		if (!put_page_testzero(page))
 			continue;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 69cb246..366ce35 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -85,7 +85,7 @@
 	VM_BUG_ON_PAGE(PageSwapCache(page), page);
 	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
 
-	page_cache_get(page);
+	get_page(page);
 	SetPageSwapCache(page);
 	set_page_private(page, entry.val);
 
@@ -109,7 +109,7 @@
 		VM_BUG_ON(error == -EEXIST);
 		set_page_private(page, 0UL);
 		ClearPageSwapCache(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 
 	return error;
@@ -226,7 +226,7 @@
 	spin_unlock_irq(&address_space->tree_lock);
 
 	swapcache_free(entry);
-	page_cache_release(page);
+	put_page(page);
 }
 
 /* 
@@ -252,7 +252,7 @@
 void free_page_and_swap_cache(struct page *page)
 {
 	free_swap_cache(page);
-	page_cache_release(page);
+	put_page(page);
 }
 
 /*
@@ -380,7 +380,7 @@
 	} while (err != -ENOMEM);
 
 	if (new_page)
-		page_cache_release(new_page);
+		put_page(new_page);
 	return found_page;
 }
 
@@ -495,7 +495,7 @@
 			continue;
 		if (offset != entry_offset)
 			SetPageReadahead(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 	blk_finish_plug(&plug);
 
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 560ad38..031713ab 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -119,7 +119,7 @@
 		ret = try_to_free_swap(page);
 		unlock_page(page);
 	}
-	page_cache_release(page);
+	put_page(page);
 	return ret;
 }
 
@@ -922,18 +922,19 @@
  * to it.  And as a side-effect, free up its swap: because the old content
  * on disk will never be read, and seeking back there to write new content
  * later would only waste time away from clustering.
+ *
+ * NOTE: total_mapcount should not be relied upon by the caller if
+ * reuse_swap_page() returns false, but it may be always overwritten
+ * (see the other implementation for CONFIG_SWAP=n).
  */
-int reuse_swap_page(struct page *page)
+bool reuse_swap_page(struct page *page, int *total_mapcount)
 {
 	int count;
 
 	VM_BUG_ON_PAGE(!PageLocked(page), page);
 	if (unlikely(PageKsm(page)))
-		return 0;
-	/* The page is part of THP and cannot be reused */
-	if (PageTransCompound(page))
-		return 0;
-	count = page_mapcount(page);
+		return false;
+	count = page_trans_huge_mapcount(page, total_mapcount);
 	if (count <= 1 && PageSwapCache(page)) {
 		count += page_swapcount(page);
 		if (count == 1 && !PageWriteback(page)) {
@@ -1000,7 +1001,7 @@
 			page = find_get_page(swap_address_space(entry),
 						entry.val);
 			if (page && !trylock_page(page)) {
-				page_cache_release(page);
+				put_page(page);
 				page = NULL;
 			}
 		}
@@ -1017,7 +1018,7 @@
 			SetPageDirty(page);
 		}
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 	return p != NULL;
 }
@@ -1518,7 +1519,7 @@
 		}
 		if (retval) {
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 			break;
 		}
 
@@ -1570,7 +1571,7 @@
 		 */
 		SetPageDirty(page);
 		unlock_page(page);
-		page_cache_release(page);
+		put_page(page);
 
 		/*
 		 * Make sure that we aren't completely killing
@@ -2574,7 +2575,7 @@
 out:
 	if (page && !IS_ERR(page)) {
 		kunmap(page);
-		page_cache_release(page);
+		put_page(page);
 	}
 	if (name)
 		putname(name);
diff --git a/mm/truncate.c b/mm/truncate.c
index 7598b55..b002728 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -118,7 +118,7 @@
 		return -EIO;
 
 	if (page_has_private(page))
-		do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
+		do_invalidatepage(page, 0, PAGE_SIZE);
 
 	/*
 	 * Some filesystems seem to re-dirty the page even after
@@ -159,8 +159,8 @@
 {
 	if (page_mapped(page)) {
 		unmap_mapping_range(mapping,
-				   (loff_t)page->index << PAGE_CACHE_SHIFT,
-				   PAGE_CACHE_SIZE, 0);
+				   (loff_t)page->index << PAGE_SHIFT,
+				   PAGE_SIZE, 0);
 	}
 	return truncate_complete_page(mapping, page);
 }
@@ -241,8 +241,8 @@
 		return;
 
 	/* Offsets within partial pages */
-	partial_start = lstart & (PAGE_CACHE_SIZE - 1);
-	partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
+	partial_start = lstart & (PAGE_SIZE - 1);
+	partial_end = (lend + 1) & (PAGE_SIZE - 1);
 
 	/*
 	 * 'start' and 'end' always covers the range of pages to be fully
@@ -250,7 +250,7 @@
 	 * start of the range and 'partial_end' at the end of the range.
 	 * Note that 'end' is exclusive while 'lend' is inclusive.
 	 */
-	start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+	start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	if (lend == -1)
 		/*
 		 * lend == -1 indicates end-of-file so we have to set 'end'
@@ -259,7 +259,7 @@
 		 */
 		end = -1;
 	else
-		end = (lend + 1) >> PAGE_CACHE_SHIFT;
+		end = (lend + 1) >> PAGE_SHIFT;
 
 	pagevec_init(&pvec, 0);
 	index = start;
@@ -298,7 +298,7 @@
 	if (partial_start) {
 		struct page *page = find_lock_page(mapping, start - 1);
 		if (page) {
-			unsigned int top = PAGE_CACHE_SIZE;
+			unsigned int top = PAGE_SIZE;
 			if (start > end) {
 				/* Truncation within a single page */
 				top = partial_end;
@@ -311,7 +311,7 @@
 				do_invalidatepage(page, partial_start,
 						  top - partial_start);
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 		}
 	}
 	if (partial_end) {
@@ -324,7 +324,7 @@
 				do_invalidatepage(page, 0,
 						  partial_end);
 			unlock_page(page);
-			page_cache_release(page);
+			put_page(page);
 		}
 	}
 	/*
@@ -538,7 +538,7 @@
 	if (mapping->a_ops->freepage)
 		mapping->a_ops->freepage(page);
 
-	page_cache_release(page);	/* pagecache ref */
+	put_page(page);	/* pagecache ref */
 	return 1;
 failed:
 	spin_unlock_irqrestore(&mapping->tree_lock, flags);
@@ -608,18 +608,18 @@
 					 * Zap the rest of the file in one hit.
 					 */
 					unmap_mapping_range(mapping,
-					   (loff_t)index << PAGE_CACHE_SHIFT,
+					   (loff_t)index << PAGE_SHIFT,
 					   (loff_t)(1 + end - index)
-							 << PAGE_CACHE_SHIFT,
-					    0);
+							 << PAGE_SHIFT,
+							 0);
 					did_range_unmap = 1;
 				} else {
 					/*
 					 * Just zap this page
 					 */
 					unmap_mapping_range(mapping,
-					   (loff_t)index << PAGE_CACHE_SHIFT,
-					   PAGE_CACHE_SIZE, 0);
+					   (loff_t)index << PAGE_SHIFT,
+					   PAGE_SIZE, 0);
 				}
 			}
 			BUG_ON(page_mapped(page));
@@ -744,14 +744,14 @@
 
 	WARN_ON(to > inode->i_size);
 
-	if (from >= to || bsize == PAGE_CACHE_SIZE)
+	if (from >= to || bsize == PAGE_SIZE)
 		return;
 	/* Page straddling @from will not have any hole block created? */
 	rounded_from = round_up(from, bsize);
-	if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1)))
+	if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
 		return;
 
-	index = from >> PAGE_CACHE_SHIFT;
+	index = from >> PAGE_SHIFT;
 	page = find_lock_page(inode->i_mapping, index);
 	/* Page not cached? Nothing to do */
 	if (!page)
@@ -763,7 +763,7 @@
 	if (page_mkclean(page))
 		set_page_dirty(page);
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 }
 EXPORT_SYMBOL(pagecache_isize_extended);
 
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 9f3a029..af817e5 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -93,7 +93,7 @@
 	pte_unmap_unlock(dst_pte, ptl);
 	mem_cgroup_cancel_charge(page, memcg, false);
 out_release:
-	page_cache_release(page);
+	put_page(page);
 	goto out;
 }
 
@@ -287,7 +287,7 @@
 	up_read(&dst_mm->mmap_sem);
 out:
 	if (page)
-		page_cache_release(page);
+		put_page(page);
 	BUG_ON(copied < 0);
 	BUG_ON(err > 0);
 	BUG_ON(!copied && !err);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b934223e..142cb61 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2553,7 +2553,7 @@
 		sc->gfp_mask |= __GFP_HIGHMEM;
 
 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
-					requested_highidx, sc->nodemask) {
+					gfp_zone(sc->gfp_mask), sc->nodemask) {
 		enum zone_type classzone_idx;
 
 		if (!populated_zone(zone))
@@ -3318,6 +3318,20 @@
 	/* Try to sleep for a short interval */
 	if (prepare_kswapd_sleep(pgdat, order, remaining,
 						balanced_classzone_idx)) {
+		/*
+		 * Compaction records what page blocks it recently failed to
+		 * isolate pages from and skips them in the future scanning.
+		 * When kswapd is going to sleep, it is reasonable to assume
+		 * that pages and compaction may succeed so reset the cache.
+		 */
+		reset_isolation_suitable(pgdat);
+
+		/*
+		 * We have freed the memory, now we should compact it to make
+		 * allocation of the requested order possible.
+		 */
+		wakeup_kcompactd(pgdat, order, classzone_idx);
+
 		remaining = schedule_timeout(HZ/10);
 		finish_wait(&pgdat->kswapd_wait, &wait);
 		prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
@@ -3341,20 +3355,6 @@
 		 */
 		set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
 
-		/*
-		 * Compaction records what page blocks it recently failed to
-		 * isolate pages from and skips them in the future scanning.
-		 * When kswapd is going to sleep, it is reasonable to assume
-		 * that pages and compaction may succeed so reset the cache.
-		 */
-		reset_isolation_suitable(pgdat);
-
-		/*
-		 * We have freed the memory, now we should compact it to make
-		 * allocation of the requested order possible.
-		 */
-		wakeup_kcompactd(pgdat, order, classzone_idx);
-
 		if (!kthread_should_stop())
 			schedule();
 
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index e72efb10..fe47fbb 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1735,10 +1735,13 @@
 static unsigned long zs_can_compact(struct size_class *class)
 {
 	unsigned long obj_wasted;
+	unsigned long obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
+	unsigned long obj_used = zs_stat_get(class, OBJ_USED);
 
-	obj_wasted = zs_stat_get(class, OBJ_ALLOCATED) -
-		zs_stat_get(class, OBJ_USED);
+	if (obj_allocated <= obj_used)
+		return 0;
 
+	obj_wasted = obj_allocated - obj_used;
 	obj_wasted /= get_maxobj_per_zspage(class->size,
 			class->pages_per_zspage);
 
diff --git a/mm/zswap.c b/mm/zswap.c
index bf14508..de0f119b 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -170,6 +170,8 @@
 static LIST_HEAD(zswap_pools);
 /* protects zswap_pools list modification */
 static DEFINE_SPINLOCK(zswap_pools_lock);
+/* pool counter to provide unique names to zpool */
+static atomic_t zswap_pools_count = ATOMIC_INIT(0);
 
 /* used by param callback function */
 static bool zswap_init_started;
@@ -565,6 +567,7 @@
 static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
 {
 	struct zswap_pool *pool;
+	char name[38]; /* 'zswap' + 32 char (max) num + \0 */
 	gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
 
 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
@@ -573,7 +576,10 @@
 		return NULL;
 	}
 
-	pool->zpool = zpool_create_pool(type, "zswap", gfp, &zswap_zpool_ops);
+	/* unique name for each pool specifically required by zsmalloc */
+	snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
+
+	pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
 	if (!pool->zpool) {
 		pr_err("%s zpool not available\n", type);
 		goto error;
@@ -869,7 +875,7 @@
 
 	case ZSWAP_SWAPCACHE_EXIST:
 		/* page is already in the swap cache, ignore for now */
-		page_cache_release(page);
+		put_page(page);
 		ret = -EEXIST;
 		goto fail;
 
@@ -897,7 +903,7 @@
 
 	/* start writeback */
 	__swap_writepage(page, &wbc, end_swap_bio_write);
-	page_cache_release(page);
+	put_page(page);
 	zswap_written_back_pages++;
 
 	spin_lock(&tree->lock);
diff --git a/net/6lowpan/6lowpan_i.h b/net/6lowpan/6lowpan_i.h
index d16bb4b..97ecc27 100644
--- a/net/6lowpan/6lowpan_i.h
+++ b/net/6lowpan/6lowpan_i.h
@@ -3,6 +3,15 @@
 
 #include <linux/netdevice.h>
 
+#include <net/6lowpan.h>
+
+/* caller need to be sure it's dev->type is ARPHRD_6LOWPAN */
+static inline bool lowpan_is_ll(const struct net_device *dev,
+				enum lowpan_lltypes lltype)
+{
+	return lowpan_dev(dev)->lltype == lltype;
+}
+
 #ifdef CONFIG_6LOWPAN_DEBUGFS
 int lowpan_dev_debugfs_init(struct net_device *dev);
 void lowpan_dev_debugfs_exit(struct net_device *dev);
diff --git a/net/6lowpan/core.c b/net/6lowpan/core.c
index 34e44c0..7a240b3 100644
--- a/net/6lowpan/core.c
+++ b/net/6lowpan/core.c
@@ -27,11 +27,11 @@
 	dev->mtu = IPV6_MIN_MTU;
 	dev->priv_flags |= IFF_NO_QUEUE;
 
-	lowpan_priv(dev)->lltype = lltype;
+	lowpan_dev(dev)->lltype = lltype;
 
-	spin_lock_init(&lowpan_priv(dev)->ctx.lock);
+	spin_lock_init(&lowpan_dev(dev)->ctx.lock);
 	for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++)
-		lowpan_priv(dev)->ctx.table[i].id = i;
+		lowpan_dev(dev)->ctx.table[i].id = i;
 
 	ret = register_netdevice(dev);
 	if (ret < 0)
@@ -85,7 +85,7 @@
 	case NETDEV_DOWN:
 		for (i = 0; i < LOWPAN_IPHC_CTX_TABLE_SIZE; i++)
 			clear_bit(LOWPAN_IPHC_CTX_FLAG_ACTIVE,
-				  &lowpan_priv(dev)->ctx.table[i].flags);
+				  &lowpan_dev(dev)->ctx.table[i].flags);
 		break;
 	default:
 		return NOTIFY_DONE;
diff --git a/net/6lowpan/debugfs.c b/net/6lowpan/debugfs.c
index 0793a81..acbaa3d 100644
--- a/net/6lowpan/debugfs.c
+++ b/net/6lowpan/debugfs.c
@@ -172,7 +172,7 @@
 static int lowpan_dev_debugfs_ctx_init(struct net_device *dev,
 				       struct dentry *ctx, u8 id)
 {
-	struct lowpan_priv *lpriv = lowpan_priv(dev);
+	struct lowpan_dev *ldev = lowpan_dev(dev);
 	struct dentry *dentry, *root;
 	char buf[32];
 
@@ -185,25 +185,25 @@
 		return -EINVAL;
 
 	dentry = debugfs_create_file("active", 0644, root,
-				     &lpriv->ctx.table[id],
+				     &ldev->ctx.table[id],
 				     &lowpan_ctx_flag_active_fops);
 	if (!dentry)
 		return -EINVAL;
 
 	dentry = debugfs_create_file("compression", 0644, root,
-				     &lpriv->ctx.table[id],
+				     &ldev->ctx.table[id],
 				     &lowpan_ctx_flag_c_fops);
 	if (!dentry)
 		return -EINVAL;
 
 	dentry = debugfs_create_file("prefix", 0644, root,
-				     &lpriv->ctx.table[id],
+				     &ldev->ctx.table[id],
 				     &lowpan_ctx_pfx_fops);
 	if (!dentry)
 		return -EINVAL;
 
 	dentry = debugfs_create_file("prefix_len", 0644, root,
-				     &lpriv->ctx.table[id],
+				     &ldev->ctx.table[id],
 				     &lowpan_ctx_plen_fops);
 	if (!dentry)
 		return -EINVAL;
@@ -247,21 +247,21 @@
 
 int lowpan_dev_debugfs_init(struct net_device *dev)
 {
-	struct lowpan_priv *lpriv = lowpan_priv(dev);
+	struct lowpan_dev *ldev = lowpan_dev(dev);
 	struct dentry *contexts, *dentry;
 	int ret, i;
 
 	/* creating the root */
-	lpriv->iface_debugfs = debugfs_create_dir(dev->name, lowpan_debugfs);
-	if (!lpriv->iface_debugfs)
+	ldev->iface_debugfs = debugfs_create_dir(dev->name, lowpan_debugfs);
+	if (!ldev->iface_debugfs)
 		goto fail;
 
-	contexts = debugfs_create_dir("contexts", lpriv->iface_debugfs);
+	contexts = debugfs_create_dir("contexts", ldev->iface_debugfs);
 	if (!contexts)
 		goto remove_root;
 
 	dentry = debugfs_create_file("show", 0644, contexts,
-				     &lowpan_priv(dev)->ctx,
+				     &lowpan_dev(dev)->ctx,
 				     &lowpan_context_fops);
 	if (!dentry)
 		goto remove_root;
@@ -282,7 +282,7 @@
 
 void lowpan_dev_debugfs_exit(struct net_device *dev)
 {
-	debugfs_remove_recursive(lowpan_priv(dev)->iface_debugfs);
+	debugfs_remove_recursive(lowpan_dev(dev)->iface_debugfs);
 }
 
 int __init lowpan_debugfs_init(void)
diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c
index 99bb22a..8501dd5 100644
--- a/net/6lowpan/iphc.c
+++ b/net/6lowpan/iphc.c
@@ -53,9 +53,6 @@
 #include <net/6lowpan.h>
 #include <net/ipv6.h>
 
-/* special link-layer handling */
-#include <net/mac802154.h>
-
 #include "6lowpan_i.h"
 #include "nhc.h"
 
@@ -148,35 +145,25 @@
 	 (((a)->s6_addr16[6]) == 0) &&		\
 	 (((a)->s6_addr[14]) == 0))
 
+#define lowpan_is_linklocal_zero_padded(a)	\
+	(!(hdr->saddr.s6_addr[1] & 0x3f) &&	\
+	 !hdr->saddr.s6_addr16[1] &&		\
+	 !hdr->saddr.s6_addr32[1])
+
 #define LOWPAN_IPHC_CID_DCI(cid)	(cid & 0x0f)
 #define LOWPAN_IPHC_CID_SCI(cid)	((cid & 0xf0) >> 4)
 
-static inline void iphc_uncompress_eui64_lladdr(struct in6_addr *ipaddr,
-						const void *lladdr)
-{
-	/* fe:80::XXXX:XXXX:XXXX:XXXX
-	 *        \_________________/
-	 *              hwaddr
-	 */
-	ipaddr->s6_addr[0] = 0xFE;
-	ipaddr->s6_addr[1] = 0x80;
-	memcpy(&ipaddr->s6_addr[8], lladdr, EUI64_ADDR_LEN);
-	/* second bit-flip (Universe/Local)
-	 * is done according RFC2464
-	 */
-	ipaddr->s6_addr[8] ^= 0x02;
-}
-
-static inline void iphc_uncompress_802154_lladdr(struct in6_addr *ipaddr,
-						 const void *lladdr)
+static inline void
+lowpan_iphc_uncompress_802154_lladdr(struct in6_addr *ipaddr,
+				     const void *lladdr)
 {
 	const struct ieee802154_addr *addr = lladdr;
-	u8 eui64[EUI64_ADDR_LEN] = { };
+	u8 eui64[EUI64_ADDR_LEN];
 
 	switch (addr->mode) {
 	case IEEE802154_ADDR_LONG:
 		ieee802154_le64_to_be64(eui64, &addr->extended_addr);
-		iphc_uncompress_eui64_lladdr(ipaddr, eui64);
+		lowpan_iphc_uncompress_eui64_lladdr(ipaddr, eui64);
 		break;
 	case IEEE802154_ADDR_SHORT:
 		/* fe:80::ff:fe00:XXXX
@@ -202,7 +189,7 @@
 static struct lowpan_iphc_ctx *
 lowpan_iphc_ctx_get_by_id(const struct net_device *dev, u8 id)
 {
-	struct lowpan_iphc_ctx *ret = &lowpan_priv(dev)->ctx.table[id];
+	struct lowpan_iphc_ctx *ret = &lowpan_dev(dev)->ctx.table[id];
 
 	if (!lowpan_iphc_ctx_is_active(ret))
 		return NULL;
@@ -214,7 +201,7 @@
 lowpan_iphc_ctx_get_by_addr(const struct net_device *dev,
 			    const struct in6_addr *addr)
 {
-	struct lowpan_iphc_ctx *table = lowpan_priv(dev)->ctx.table;
+	struct lowpan_iphc_ctx *table = lowpan_dev(dev)->ctx.table;
 	struct lowpan_iphc_ctx *ret = NULL;
 	struct in6_addr addr_pfx;
 	u8 addr_plen;
@@ -258,7 +245,7 @@
 lowpan_iphc_ctx_get_by_mcast_addr(const struct net_device *dev,
 				  const struct in6_addr *addr)
 {
-	struct lowpan_iphc_ctx *table = lowpan_priv(dev)->ctx.table;
+	struct lowpan_iphc_ctx *table = lowpan_dev(dev)->ctx.table;
 	struct lowpan_iphc_ctx *ret = NULL;
 	struct in6_addr addr_mcast, network_pfx = {};
 	int i;
@@ -296,9 +283,10 @@
  *
  * address_mode is the masked value for sam or dam value
  */
-static int uncompress_addr(struct sk_buff *skb, const struct net_device *dev,
-			   struct in6_addr *ipaddr, u8 address_mode,
-			   const void *lladdr)
+static int lowpan_iphc_uncompress_addr(struct sk_buff *skb,
+				       const struct net_device *dev,
+				       struct in6_addr *ipaddr,
+				       u8 address_mode, const void *lladdr)
 {
 	bool fail;
 
@@ -327,12 +315,12 @@
 	case LOWPAN_IPHC_SAM_11:
 	case LOWPAN_IPHC_DAM_11:
 		fail = false;
-		switch (lowpan_priv(dev)->lltype) {
+		switch (lowpan_dev(dev)->lltype) {
 		case LOWPAN_LLTYPE_IEEE802154:
-			iphc_uncompress_802154_lladdr(ipaddr, lladdr);
+			lowpan_iphc_uncompress_802154_lladdr(ipaddr, lladdr);
 			break;
 		default:
-			iphc_uncompress_eui64_lladdr(ipaddr, lladdr);
+			lowpan_iphc_uncompress_eui64_lladdr(ipaddr, lladdr);
 			break;
 		}
 		break;
@@ -355,11 +343,11 @@
 /* Uncompress address function for source context
  * based address(non-multicast).
  */
-static int uncompress_ctx_addr(struct sk_buff *skb,
-			       const struct net_device *dev,
-			       const struct lowpan_iphc_ctx *ctx,
-			       struct in6_addr *ipaddr, u8 address_mode,
-			       const void *lladdr)
+static int lowpan_iphc_uncompress_ctx_addr(struct sk_buff *skb,
+					   const struct net_device *dev,
+					   const struct lowpan_iphc_ctx *ctx,
+					   struct in6_addr *ipaddr,
+					   u8 address_mode, const void *lladdr)
 {
 	bool fail;
 
@@ -388,12 +376,12 @@
 	case LOWPAN_IPHC_SAM_11:
 	case LOWPAN_IPHC_DAM_11:
 		fail = false;
-		switch (lowpan_priv(dev)->lltype) {
+		switch (lowpan_dev(dev)->lltype) {
 		case LOWPAN_LLTYPE_IEEE802154:
-			iphc_uncompress_802154_lladdr(ipaddr, lladdr);
+			lowpan_iphc_uncompress_802154_lladdr(ipaddr, lladdr);
 			break;
 		default:
-			iphc_uncompress_eui64_lladdr(ipaddr, lladdr);
+			lowpan_iphc_uncompress_eui64_lladdr(ipaddr, lladdr);
 			break;
 		}
 		ipv6_addr_prefix_copy(ipaddr, &ctx->pfx, ctx->plen);
@@ -652,22 +640,24 @@
 	}
 
 	if (iphc1 & LOWPAN_IPHC_SAC) {
-		spin_lock_bh(&lowpan_priv(dev)->ctx.lock);
+		spin_lock_bh(&lowpan_dev(dev)->ctx.lock);
 		ci = lowpan_iphc_ctx_get_by_id(dev, LOWPAN_IPHC_CID_SCI(cid));
 		if (!ci) {
-			spin_unlock_bh(&lowpan_priv(dev)->ctx.lock);
+			spin_unlock_bh(&lowpan_dev(dev)->ctx.lock);
 			return -EINVAL;
 		}
 
 		pr_debug("SAC bit is set. Handle context based source address.\n");
-		err = uncompress_ctx_addr(skb, dev, ci, &hdr.saddr,
-					  iphc1 & LOWPAN_IPHC_SAM_MASK, saddr);
-		spin_unlock_bh(&lowpan_priv(dev)->ctx.lock);
+		err = lowpan_iphc_uncompress_ctx_addr(skb, dev, ci, &hdr.saddr,
+						      iphc1 & LOWPAN_IPHC_SAM_MASK,
+						      saddr);
+		spin_unlock_bh(&lowpan_dev(dev)->ctx.lock);
 	} else {
 		/* Source address uncompression */
 		pr_debug("source address stateless compression\n");
-		err = uncompress_addr(skb, dev, &hdr.saddr,
-				      iphc1 & LOWPAN_IPHC_SAM_MASK, saddr);
+		err = lowpan_iphc_uncompress_addr(skb, dev, &hdr.saddr,
+						  iphc1 & LOWPAN_IPHC_SAM_MASK,
+						  saddr);
 	}
 
 	/* Check on error of previous branch */
@@ -676,10 +666,10 @@
 
 	switch (iphc1 & (LOWPAN_IPHC_M | LOWPAN_IPHC_DAC)) {
 	case LOWPAN_IPHC_M | LOWPAN_IPHC_DAC:
-		spin_lock_bh(&lowpan_priv(dev)->ctx.lock);
+		spin_lock_bh(&lowpan_dev(dev)->ctx.lock);
 		ci = lowpan_iphc_ctx_get_by_id(dev, LOWPAN_IPHC_CID_DCI(cid));
 		if (!ci) {
-			spin_unlock_bh(&lowpan_priv(dev)->ctx.lock);
+			spin_unlock_bh(&lowpan_dev(dev)->ctx.lock);
 			return -EINVAL;
 		}
 
@@ -688,7 +678,7 @@
 		err = lowpan_uncompress_multicast_ctx_daddr(skb, ci,
 							    &hdr.daddr,
 							    iphc1 & LOWPAN_IPHC_DAM_MASK);
-		spin_unlock_bh(&lowpan_priv(dev)->ctx.lock);
+		spin_unlock_bh(&lowpan_dev(dev)->ctx.lock);
 		break;
 	case LOWPAN_IPHC_M:
 		/* multicast */
@@ -696,22 +686,24 @@
 							iphc1 & LOWPAN_IPHC_DAM_MASK);
 		break;
 	case LOWPAN_IPHC_DAC:
-		spin_lock_bh(&lowpan_priv(dev)->ctx.lock);
+		spin_lock_bh(&lowpan_dev(dev)->ctx.lock);
 		ci = lowpan_iphc_ctx_get_by_id(dev, LOWPAN_IPHC_CID_DCI(cid));
 		if (!ci) {
-			spin_unlock_bh(&lowpan_priv(dev)->ctx.lock);
+			spin_unlock_bh(&lowpan_dev(dev)->ctx.lock);
 			return -EINVAL;
 		}
 
 		/* Destination address context based uncompression */
 		pr_debug("DAC bit is set. Handle context based destination address.\n");
-		err = uncompress_ctx_addr(skb, dev, ci, &hdr.daddr,
-					  iphc1 & LOWPAN_IPHC_DAM_MASK, daddr);
-		spin_unlock_bh(&lowpan_priv(dev)->ctx.lock);
+		err = lowpan_iphc_uncompress_ctx_addr(skb, dev, ci, &hdr.daddr,
+						      iphc1 & LOWPAN_IPHC_DAM_MASK,
+						      daddr);
+		spin_unlock_bh(&lowpan_dev(dev)->ctx.lock);
 		break;
 	default:
-		err = uncompress_addr(skb, dev, &hdr.daddr,
-				      iphc1 & LOWPAN_IPHC_DAM_MASK, daddr);
+		err = lowpan_iphc_uncompress_addr(skb, dev, &hdr.daddr,
+						  iphc1 & LOWPAN_IPHC_DAM_MASK,
+						  daddr);
 		pr_debug("dest: stateless compression mode %d dest %pI6c\n",
 			 iphc1 & LOWPAN_IPHC_DAM_MASK, &hdr.daddr);
 		break;
@@ -731,7 +723,7 @@
 			return err;
 	}
 
-	switch (lowpan_priv(dev)->lltype) {
+	switch (lowpan_dev(dev)->lltype) {
 	case LOWPAN_LLTYPE_IEEE802154:
 		if (lowpan_802154_cb(skb)->d_size)
 			hdr.payload_len = htons(lowpan_802154_cb(skb)->d_size -
@@ -1028,7 +1020,7 @@
 		       skb->data, skb->len);
 
 	ipv6_daddr_type = ipv6_addr_type(&hdr->daddr);
-	spin_lock_bh(&lowpan_priv(dev)->ctx.lock);
+	spin_lock_bh(&lowpan_dev(dev)->ctx.lock);
 	if (ipv6_daddr_type & IPV6_ADDR_MULTICAST)
 		dci = lowpan_iphc_ctx_get_by_mcast_addr(dev, &hdr->daddr);
 	else
@@ -1037,15 +1029,15 @@
 		memcpy(&dci_entry, dci, sizeof(*dci));
 		cid |= dci->id;
 	}
-	spin_unlock_bh(&lowpan_priv(dev)->ctx.lock);
+	spin_unlock_bh(&lowpan_dev(dev)->ctx.lock);
 
-	spin_lock_bh(&lowpan_priv(dev)->ctx.lock);
+	spin_lock_bh(&lowpan_dev(dev)->ctx.lock);
 	sci = lowpan_iphc_ctx_get_by_addr(dev, &hdr->saddr);
 	if (sci) {
 		memcpy(&sci_entry, sci, sizeof(*sci));
 		cid |= (sci->id << 4);
 	}
-	spin_unlock_bh(&lowpan_priv(dev)->ctx.lock);
+	spin_unlock_bh(&lowpan_dev(dev)->ctx.lock);
 
 	/* if cid is zero it will be compressed */
 	if (cid) {
@@ -1101,7 +1093,8 @@
 							  true);
 			iphc1 |= LOWPAN_IPHC_SAC;
 		} else {
-			if (ipv6_saddr_type & IPV6_ADDR_LINKLOCAL) {
+			if (ipv6_saddr_type & IPV6_ADDR_LINKLOCAL &&
+			    lowpan_is_linklocal_zero_padded(hdr->saddr)) {
 				iphc1 |= lowpan_compress_addr_64(&hc_ptr,
 								 &hdr->saddr,
 								 saddr, true);
@@ -1135,7 +1128,8 @@
 							  false);
 			iphc1 |= LOWPAN_IPHC_DAC;
 		} else {
-			if (ipv6_daddr_type & IPV6_ADDR_LINKLOCAL) {
+			if (ipv6_daddr_type & IPV6_ADDR_LINKLOCAL &&
+			    lowpan_is_linklocal_zero_padded(hdr->daddr)) {
 				iphc1 |= lowpan_compress_addr_64(&hc_ptr,
 								 &hdr->daddr,
 								 daddr, false);
diff --git a/net/6lowpan/nhc_udp.c b/net/6lowpan/nhc_udp.c
index 69537a2..225d919 100644
--- a/net/6lowpan/nhc_udp.c
+++ b/net/6lowpan/nhc_udp.c
@@ -91,7 +91,7 @@
 	 * here, we obtain the hint from the remaining size of the
 	 * frame
 	 */
-	switch (lowpan_priv(skb->dev)->lltype) {
+	switch (lowpan_dev(skb->dev)->lltype) {
 	case LOWPAN_LLTYPE_IEEE802154:
 		if (lowpan_802154_cb(skb)->d_size)
 			uh.len = htons(lowpan_802154_cb(skb)->d_size -
diff --git a/net/Kconfig b/net/Kconfig
index a8934d8..ff40562 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -236,6 +236,7 @@
 source "net/hsr/Kconfig"
 source "net/switchdev/Kconfig"
 source "net/l3mdev/Kconfig"
+source "net/qrtr/Kconfig"
 
 config RPS
 	bool
@@ -288,14 +289,17 @@
 
 config BPF_JIT
 	bool "enable BPF Just In Time compiler"
-	depends on HAVE_BPF_JIT
+	depends on HAVE_CBPF_JIT || HAVE_EBPF_JIT
 	depends on MODULES
 	---help---
 	  Berkeley Packet Filter filtering capabilities are normally handled
 	  by an interpreter. This option allows kernel to generate a native
 	  code when filter is loaded in memory. This should speedup
-	  packet sniffing (libpcap/tcpdump). Note : Admin should enable
-	  this feature changing /proc/sys/net/core/bpf_jit_enable
+	  packet sniffing (libpcap/tcpdump).
+
+	  Note, admin should enable this feature changing:
+	  /proc/sys/net/core/bpf_jit_enable
+	  /proc/sys/net/core/bpf_jit_harden (optional)
 
 config NET_FLOW_LIMIT
 	bool
@@ -418,6 +422,14 @@
 
 endif   # if NET
 
-# Used by archs to tell that they support BPF_JIT
-config HAVE_BPF_JIT
+# Used by archs to tell that they support BPF JIT compiler plus which flavour.
+# Only one of the two can be selected for a specific arch since eBPF JIT supersedes
+# the cBPF JIT.
+
+# Classic BPF JIT (cBPF)
+config HAVE_CBPF_JIT
+	bool
+
+# Extended BPF JIT (eBPF)
+config HAVE_EBPF_JIT
 	bool
diff --git a/net/Makefile b/net/Makefile
index 81d1411..bdd1455 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -78,3 +78,4 @@
 ifneq ($(CONFIG_NET_L3_MASTER_DEV),)
 obj-y				+= l3mdev/
 endif
+obj-$(CONFIG_QRTR)		+= qrtr/
diff --git a/net/atm/lec.c b/net/atm/lec.c
index cd3b379..e574a7e 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -194,7 +194,7 @@
 static void lec_tx_timeout(struct net_device *dev)
 {
 	pr_info("%s\n", dev->name);
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	netif_wake_queue(dev);
 }
 
@@ -324,7 +324,7 @@
 out:
 	if (entry)
 		lec_arp_put(entry);
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	return NETDEV_TX_OK;
 }
 
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index cb2d1b9..7f98a9d 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -32,6 +32,7 @@
 #include <linux/jiffies.h>
 #include <linux/list.h>
 #include <linux/kref.h>
+#include <linux/lockdep.h>
 #include <linux/netdevice.h>
 #include <linux/pkt_sched.h>
 #include <linux/printk.h>
@@ -175,6 +176,79 @@
 }
 
 /**
+ * batadv_iv_ogm_drop_bcast_own_entry - drop section of bcast_own
+ * @orig_node: the orig_node that has to be changed
+ * @max_if_num: the current amount of interfaces
+ * @del_if_num: the index of the interface being removed
+ */
+static void
+batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node,
+				   int max_if_num, int del_if_num)
+{
+	size_t chunk_size;
+	size_t if_offset;
+	void *data_ptr;
+
+	lockdep_assert_held(&orig_node->bat_iv.ogm_cnt_lock);
+
+	chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS;
+	data_ptr = kmalloc_array(max_if_num, chunk_size, GFP_ATOMIC);
+	if (!data_ptr)
+		/* use old buffer when new one could not be allocated */
+		data_ptr = orig_node->bat_iv.bcast_own;
+
+	/* copy first part */
+	memmove(data_ptr, orig_node->bat_iv.bcast_own, del_if_num * chunk_size);
+
+	/* copy second part */
+	if_offset = (del_if_num + 1) * chunk_size;
+	memmove((char *)data_ptr + del_if_num * chunk_size,
+		(uint8_t *)orig_node->bat_iv.bcast_own + if_offset,
+		(max_if_num - del_if_num) * chunk_size);
+
+	/* bcast_own was shrunk down in new buffer; free old one */
+	if (orig_node->bat_iv.bcast_own != data_ptr) {
+		kfree(orig_node->bat_iv.bcast_own);
+		orig_node->bat_iv.bcast_own = data_ptr;
+	}
+}
+
+/**
+ * batadv_iv_ogm_drop_bcast_own_sum_entry - drop section of bcast_own_sum
+ * @orig_node: the orig_node that has to be changed
+ * @max_if_num: the current amount of interfaces
+ * @del_if_num: the index of the interface being removed
+ */
+static void
+batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node,
+				       int max_if_num, int del_if_num)
+{
+	size_t if_offset;
+	void *data_ptr;
+
+	lockdep_assert_held(&orig_node->bat_iv.ogm_cnt_lock);
+
+	data_ptr = kmalloc_array(max_if_num, sizeof(u8), GFP_ATOMIC);
+	if (!data_ptr)
+		/* use old buffer when new one could not be allocated */
+		data_ptr = orig_node->bat_iv.bcast_own_sum;
+
+	memmove(data_ptr, orig_node->bat_iv.bcast_own_sum,
+		del_if_num * sizeof(u8));
+
+	if_offset = (del_if_num + 1) * sizeof(u8);
+	memmove((char *)data_ptr + del_if_num * sizeof(u8),
+		orig_node->bat_iv.bcast_own_sum + if_offset,
+		(max_if_num - del_if_num) * sizeof(u8));
+
+	/* bcast_own_sum was shrunk down in new buffer; free old one */
+	if (orig_node->bat_iv.bcast_own_sum != data_ptr) {
+		kfree(orig_node->bat_iv.bcast_own_sum);
+		orig_node->bat_iv.bcast_own_sum = data_ptr;
+	}
+}
+
+/**
  * batadv_iv_ogm_orig_del_if - change the private structures of the orig_node to
  *  exclude the removed interface
  * @orig_node: the orig_node that has to be changed
@@ -186,60 +260,23 @@
 static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
 				     int max_if_num, int del_if_num)
 {
-	int ret = -ENOMEM;
-	size_t chunk_size, if_offset;
-	void *data_ptr = NULL;
-
 	spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
 
-	/* last interface was removed */
-	if (max_if_num == 0)
-		goto free_bcast_own;
-
-	chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS;
-	data_ptr = kmalloc_array(max_if_num, chunk_size, GFP_ATOMIC);
-	if (!data_ptr)
-		goto unlock;
-
-	/* copy first part */
-	memcpy(data_ptr, orig_node->bat_iv.bcast_own, del_if_num * chunk_size);
-
-	/* copy second part */
-	if_offset = (del_if_num + 1) * chunk_size;
-	memcpy((char *)data_ptr + del_if_num * chunk_size,
-	       (uint8_t *)orig_node->bat_iv.bcast_own + if_offset,
-	       (max_if_num - del_if_num) * chunk_size);
-
-free_bcast_own:
-	kfree(orig_node->bat_iv.bcast_own);
-	orig_node->bat_iv.bcast_own = data_ptr;
-
-	if (max_if_num == 0)
-		goto free_own_sum;
-
-	data_ptr = kmalloc_array(max_if_num, sizeof(u8), GFP_ATOMIC);
-	if (!data_ptr) {
+	if (max_if_num == 0) {
 		kfree(orig_node->bat_iv.bcast_own);
-		goto unlock;
+		kfree(orig_node->bat_iv.bcast_own_sum);
+		orig_node->bat_iv.bcast_own = NULL;
+		orig_node->bat_iv.bcast_own_sum = NULL;
+	} else {
+		batadv_iv_ogm_drop_bcast_own_entry(orig_node, max_if_num,
+						   del_if_num);
+		batadv_iv_ogm_drop_bcast_own_sum_entry(orig_node, max_if_num,
+						       del_if_num);
 	}
 
-	memcpy(data_ptr, orig_node->bat_iv.bcast_own_sum,
-	       del_if_num * sizeof(u8));
-
-	if_offset = (del_if_num + 1) * sizeof(u8);
-	memcpy((char *)data_ptr + del_if_num * sizeof(u8),
-	       orig_node->bat_iv.bcast_own_sum + if_offset,
-	       (max_if_num - del_if_num) * sizeof(u8));
-
-free_own_sum:
-	kfree(orig_node->bat_iv.bcast_own_sum);
-	orig_node->bat_iv.bcast_own_sum = data_ptr;
-
-	ret = 0;
-unlock:
 	spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
 
-	return ret;
+	return 0;
 }
 
 /**
@@ -644,18 +681,12 @@
 	unsigned char *skb_buff;
 	unsigned int skb_size;
 
-	if (!kref_get_unless_zero(&if_incoming->refcount))
-		return;
-
-	if (!kref_get_unless_zero(&if_outgoing->refcount))
-		goto out_free_incoming;
-
 	/* own packet should always be scheduled */
 	if (!own_packet) {
 		if (!batadv_atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
 			batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
 				   "batman packet queue full\n");
-			goto out_free_outgoing;
+			return;
 		}
 	}
 
@@ -681,6 +712,8 @@
 	forw_packet_aggr->packet_len = packet_len;
 	memcpy(skb_buff, packet_buff, packet_len);
 
+	kref_get(&if_incoming->refcount);
+	kref_get(&if_outgoing->refcount);
 	forw_packet_aggr->own = own_packet;
 	forw_packet_aggr->if_incoming = if_incoming;
 	forw_packet_aggr->if_outgoing = if_outgoing;
@@ -710,10 +743,6 @@
 out_nomem:
 	if (!own_packet)
 		atomic_inc(&bat_priv->batman_queue_left);
-out_free_outgoing:
-	batadv_hardif_put(if_outgoing);
-out_free_incoming:
-	batadv_hardif_put(if_incoming);
 }
 
 /* aggregate a new packet into the existing ogm packet */
@@ -950,9 +979,15 @@
 	list_for_each_entry_rcu(tmp_hard_iface, &batadv_hardif_list, list) {
 		if (tmp_hard_iface->soft_iface != hard_iface->soft_iface)
 			continue;
+
+		if (!kref_get_unless_zero(&tmp_hard_iface->refcount))
+			continue;
+
 		batadv_iv_ogm_queue_add(bat_priv, *ogm_buff,
 					*ogm_buff_len, hard_iface,
 					tmp_hard_iface, 1, send_time);
+
+		batadv_hardif_put(tmp_hard_iface);
 	}
 	rcu_read_unlock();
 
@@ -1133,13 +1168,13 @@
  * @if_incoming: interface where the packet was received
  * @if_outgoing: interface for which the retransmission should be considered
  *
- * Return: 1 if the link can be considered bidirectional, 0 otherwise
+ * Return: true if the link can be considered bidirectional, false otherwise
  */
-static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
-				 struct batadv_orig_node *orig_neigh_node,
-				 struct batadv_ogm_packet *batadv_ogm_packet,
-				 struct batadv_hard_iface *if_incoming,
-				 struct batadv_hard_iface *if_outgoing)
+static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
+				  struct batadv_orig_node *orig_neigh_node,
+				  struct batadv_ogm_packet *batadv_ogm_packet,
+				  struct batadv_hard_iface *if_incoming,
+				  struct batadv_hard_iface *if_outgoing)
 {
 	struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 	struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node;
@@ -1147,9 +1182,10 @@
 	u8 total_count;
 	u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
 	unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
-	int tq_asym_penalty, inv_asym_penalty, if_num, ret = 0;
+	int tq_asym_penalty, inv_asym_penalty, if_num;
 	unsigned int combined_tq;
 	int tq_iface_penalty;
+	bool ret = false;
 
 	/* find corresponding one hop neighbor */
 	rcu_read_lock();
@@ -1261,7 +1297,7 @@
 	 * consider it bidirectional
 	 */
 	if (batadv_ogm_packet->tq >= BATADV_TQ_TOTAL_BIDRECT_LIMIT)
-		ret = 1;
+		ret = true;
 
 out:
 	if (neigh_node)
@@ -1290,9 +1326,9 @@
 	struct batadv_orig_ifinfo *orig_ifinfo = NULL;
 	struct batadv_neigh_node *neigh_node;
 	struct batadv_neigh_ifinfo *neigh_ifinfo;
-	int is_dup;
+	bool is_dup;
 	s32 seq_diff;
-	int need_update = 0;
+	bool need_update = false;
 	int set_mark;
 	enum batadv_dup_status ret = BATADV_NO_DUP;
 	u32 seqno = ntohl(batadv_ogm_packet->seqno);
@@ -1402,7 +1438,7 @@
 	struct sk_buff *skb_priv;
 	struct ethhdr *ethhdr;
 	u8 *prev_sender;
-	int is_bidirect;
+	bool is_bidirect;
 
 	/* create a private copy of the skb, as some functions change tq value
 	 * and/or flags.
@@ -1730,8 +1766,13 @@
 		if (hard_iface->soft_iface != bat_priv->soft_iface)
 			continue;
 
+		if (!kref_get_unless_zero(&hard_iface->refcount))
+			continue;
+
 		batadv_iv_ogm_process_per_outif(skb, ogm_offset, orig_node,
 						if_incoming, hard_iface);
+
+		batadv_hardif_put(hard_iface);
 	}
 	rcu_read_unlock();
 
@@ -1829,9 +1870,8 @@
 	int batman_count = 0;
 	u32 i;
 
-	seq_printf(seq, "  %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
-		   "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE,
-		   "Nexthop", "outgoingIF", "Potential nexthops");
+	seq_puts(seq,
+		 "  Originator      last-seen (#/255)           Nexthop [outgoingIF]:   Potential nexthops ...\n");
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
@@ -1911,8 +1951,7 @@
 	struct batadv_hard_iface *hard_iface;
 	int batman_count = 0;
 
-	seq_printf(seq, "   %10s        %-13s %s\n",
-		   "IF", "Neighbor", "last-seen");
+	seq_puts(seq, "           IF        Neighbor      last-seen\n");
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index 3315b9a..3ff8bd1 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -32,10 +32,21 @@
 
 #include "bat_v_elp.h"
 #include "bat_v_ogm.h"
+#include "hard-interface.h"
 #include "hash.h"
 #include "originator.h"
 #include "packet.h"
 
+static void batadv_v_iface_activate(struct batadv_hard_iface *hard_iface)
+{
+	/* B.A.T.M.A.N. V does not use any queuing mechanism, therefore it can
+	 * set the interface as ACTIVE right away, without any risk of race
+	 * condition
+	 */
+	if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
+		hard_iface->if_status = BATADV_IF_ACTIVE;
+}
+
 static int batadv_v_iface_enable(struct batadv_hard_iface *hard_iface)
 {
 	int ret;
@@ -151,8 +162,8 @@
 	struct batadv_hard_iface *hard_iface;
 	int batman_count = 0;
 
-	seq_printf(seq, "  %-15s %s (%11s) [%10s]\n", "Neighbor",
-		   "last-seen", "throughput", "IF");
+	seq_puts(seq,
+		 "  Neighbor        last-seen ( throughput) [        IF]\n");
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
@@ -191,9 +202,8 @@
 	int batman_count = 0;
 	u32 i;
 
-	seq_printf(seq, "  %-15s %s (%11s) %17s [%10s]: %20s ...\n",
-		   "Originator", "last-seen", "throughput", "Nexthop",
-		   "outgoingIF", "Potential nexthops");
+	seq_puts(seq,
+		 "  Originator      last-seen ( throughput)           Nexthop [outgoingIF]:   Potential nexthops ...\n");
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
@@ -274,6 +284,7 @@
 
 static struct batadv_algo_ops batadv_batman_v __read_mostly = {
 	.name = "BATMAN_V",
+	.bat_iface_activate = batadv_v_iface_activate,
 	.bat_iface_enable = batadv_v_iface_enable,
 	.bat_iface_disable = batadv_v_iface_disable,
 	.bat_iface_update_mac = batadv_v_iface_update_mac,
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index d9bcbe6..473ebb9 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -26,6 +26,7 @@
 #include <linux/if_ether.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
+#include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/random.h>
@@ -176,6 +177,9 @@
 		if (hard_iface->soft_iface != bat_priv->soft_iface)
 			continue;
 
+		if (!kref_get_unless_zero(&hard_iface->refcount))
+			continue;
+
 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
 			   "Sending own OGM2 packet (originator %pM, seqno %u, throughput %u, TTL %d) on interface %s [%pM]\n",
 			   ogm_packet->orig, ntohl(ogm_packet->seqno),
@@ -185,10 +189,13 @@
 
 		/* this skb gets consumed by batadv_v_ogm_send_to_if() */
 		skb_tmp = skb_clone(skb, GFP_ATOMIC);
-		if (!skb_tmp)
+		if (!skb_tmp) {
+			batadv_hardif_put(hard_iface);
 			break;
+		}
 
 		batadv_v_ogm_send_to_if(skb_tmp, hard_iface);
+		batadv_hardif_put(hard_iface);
 	}
 	rcu_read_unlock();
 
@@ -234,73 +241,6 @@
 }
 
 /**
- * batadv_v_ogm_orig_update - update the originator status based on the received
- *  OGM
- * @bat_priv: the bat priv with all the soft interface information
- * @orig_node: the originator to update
- * @neigh_node: the neighbour the OGM has been received from (to update)
- * @ogm2: the received OGM
- * @if_outgoing: the interface where this OGM is going to be forwarded through
- */
-static void
-batadv_v_ogm_orig_update(struct batadv_priv *bat_priv,
-			 struct batadv_orig_node *orig_node,
-			 struct batadv_neigh_node *neigh_node,
-			 const struct batadv_ogm2_packet *ogm2,
-			 struct batadv_hard_iface *if_outgoing)
-{
-	struct batadv_neigh_ifinfo *router_ifinfo = NULL, *neigh_ifinfo = NULL;
-	struct batadv_neigh_node *router = NULL;
-	s32 neigh_seq_diff;
-	u32 neigh_last_seqno;
-	u32 router_last_seqno;
-	u32 router_throughput, neigh_throughput;
-
-	batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-		   "Searching and updating originator entry of received packet\n");
-
-	/* if this neighbor already is our next hop there is nothing
-	 * to change
-	 */
-	router = batadv_orig_router_get(orig_node, if_outgoing);
-	if (router == neigh_node)
-		goto out;
-
-	/* don't consider neighbours with worse throughput.
-	 * also switch route if this seqno is BATADV_V_MAX_ORIGDIFF newer than
-	 * the last received seqno from our best next hop.
-	 */
-	if (router) {
-		router_ifinfo = batadv_neigh_ifinfo_get(router, if_outgoing);
-		neigh_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing);
-
-		/* if these are not allocated, something is wrong. */
-		if (!router_ifinfo || !neigh_ifinfo)
-			goto out;
-
-		neigh_last_seqno = neigh_ifinfo->bat_v.last_seqno;
-		router_last_seqno = router_ifinfo->bat_v.last_seqno;
-		neigh_seq_diff = neigh_last_seqno - router_last_seqno;
-		router_throughput = router_ifinfo->bat_v.throughput;
-		neigh_throughput = neigh_ifinfo->bat_v.throughput;
-
-		if ((neigh_seq_diff < BATADV_OGM_MAX_ORIGDIFF) &&
-		    (router_throughput >= neigh_throughput))
-			goto out;
-	}
-
-	batadv_update_route(bat_priv, orig_node, if_outgoing, neigh_node);
-
-out:
-	if (router_ifinfo)
-		batadv_neigh_ifinfo_put(router_ifinfo);
-	if (neigh_ifinfo)
-		batadv_neigh_ifinfo_put(neigh_ifinfo);
-	if (router)
-		batadv_neigh_node_put(router);
-}
-
-/**
  * batadv_v_forward_penalty - apply a penalty to the throughput metric forwarded
  *  with B.A.T.M.A.N. V OGMs
  * @bat_priv: the bat priv with all the soft interface information
@@ -347,10 +287,12 @@
 }
 
 /**
- * batadv_v_ogm_forward - forward an OGM to the given outgoing interface
+ * batadv_v_ogm_forward - check conditions and forward an OGM to the given
+ *  outgoing interface
  * @bat_priv: the bat priv with all the soft interface information
  * @ogm_received: previously received OGM to be forwarded
- * @throughput: throughput to announce, may vary per outgoing interface
+ * @orig_node: the originator which has been updated
+ * @neigh_node: the neigh_node through with the OGM has been received
  * @if_incoming: the interface on which this OGM was received on
  * @if_outgoing: the interface to which the OGM has to be forwarded to
  *
@@ -359,28 +301,57 @@
  */
 static void batadv_v_ogm_forward(struct batadv_priv *bat_priv,
 				 const struct batadv_ogm2_packet *ogm_received,
-				 u32 throughput,
+				 struct batadv_orig_node *orig_node,
+				 struct batadv_neigh_node *neigh_node,
 				 struct batadv_hard_iface *if_incoming,
 				 struct batadv_hard_iface *if_outgoing)
 {
+	struct batadv_neigh_ifinfo *neigh_ifinfo = NULL;
+	struct batadv_orig_ifinfo *orig_ifinfo = NULL;
+	struct batadv_neigh_node *router = NULL;
 	struct batadv_ogm2_packet *ogm_forward;
 	unsigned char *skb_buff;
 	struct sk_buff *skb;
 	size_t packet_len;
 	u16 tvlv_len;
 
+	/* only forward for specific interfaces, not for the default one. */
+	if (if_outgoing == BATADV_IF_DEFAULT)
+		goto out;
+
+	orig_ifinfo = batadv_orig_ifinfo_new(orig_node, if_outgoing);
+	if (!orig_ifinfo)
+		goto out;
+
+	/* acquire possibly updated router */
+	router = batadv_orig_router_get(orig_node, if_outgoing);
+
+	/* strict rule: forward packets coming from the best next hop only */
+	if (neigh_node != router)
+		goto out;
+
+	/* don't forward the same seqno twice on one interface */
+	if (orig_ifinfo->last_seqno_forwarded == ntohl(ogm_received->seqno))
+		goto out;
+
+	orig_ifinfo->last_seqno_forwarded = ntohl(ogm_received->seqno);
+
 	if (ogm_received->ttl <= 1) {
 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n");
-		return;
+		goto out;
 	}
 
+	neigh_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing);
+	if (!neigh_ifinfo)
+		goto out;
+
 	tvlv_len = ntohs(ogm_received->tvlv_len);
 
 	packet_len = BATADV_OGM2_HLEN + tvlv_len;
 	skb = netdev_alloc_skb_ip_align(if_outgoing->net_dev,
 					ETH_HLEN + packet_len);
 	if (!skb)
-		return;
+		goto out;
 
 	skb_reserve(skb, ETH_HLEN);
 	skb_buff = skb_put(skb, packet_len);
@@ -388,15 +359,23 @@
 
 	/* apply forward penalty */
 	ogm_forward = (struct batadv_ogm2_packet *)skb_buff;
-	ogm_forward->throughput = htonl(throughput);
+	ogm_forward->throughput = htonl(neigh_ifinfo->bat_v.throughput);
 	ogm_forward->ttl--;
 
 	batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
 		   "Forwarding OGM2 packet on %s: throughput %u, ttl %u, received via %s\n",
-		   if_outgoing->net_dev->name, throughput, ogm_forward->ttl,
-		   if_incoming->net_dev->name);
+		   if_outgoing->net_dev->name, ntohl(ogm_forward->throughput),
+		   ogm_forward->ttl, if_incoming->net_dev->name);
 
 	batadv_v_ogm_send_to_if(skb, if_outgoing);
+
+out:
+	if (orig_ifinfo)
+		batadv_orig_ifinfo_put(orig_ifinfo);
+	if (router)
+		batadv_neigh_node_put(router);
+	if (neigh_ifinfo)
+		batadv_neigh_ifinfo_put(neigh_ifinfo);
 }
 
 /**
@@ -493,8 +472,10 @@
  * @neigh_node: the neigh_node through with the OGM has been received
  * @if_incoming: the interface where this packet was received
  * @if_outgoing: the interface for which the packet should be considered
+ *
+ * Return: true if the packet should be forwarded, false otherwise
  */
-static void batadv_v_ogm_route_update(struct batadv_priv *bat_priv,
+static bool batadv_v_ogm_route_update(struct batadv_priv *bat_priv,
 				      const struct ethhdr *ethhdr,
 				      const struct batadv_ogm2_packet *ogm2,
 				      struct batadv_orig_node *orig_node,
@@ -503,14 +484,14 @@
 				      struct batadv_hard_iface *if_outgoing)
 {
 	struct batadv_neigh_node *router = NULL;
-	struct batadv_neigh_ifinfo *neigh_ifinfo = NULL;
 	struct batadv_orig_node *orig_neigh_node = NULL;
-	struct batadv_orig_ifinfo *orig_ifinfo = NULL;
 	struct batadv_neigh_node *orig_neigh_router = NULL;
-
-	neigh_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing);
-	if (!neigh_ifinfo)
-		goto out;
+	struct batadv_neigh_ifinfo *router_ifinfo = NULL, *neigh_ifinfo = NULL;
+	u32 router_throughput, neigh_throughput;
+	u32 router_last_seqno;
+	u32 neigh_last_seqno;
+	s32 neigh_seq_diff;
+	bool forward = false;
 
 	orig_neigh_node = batadv_v_ogm_orig_get(bat_priv, ethhdr->h_source);
 	if (!orig_neigh_node)
@@ -529,47 +510,57 @@
 		goto out;
 	}
 
-	if (router)
-		batadv_neigh_node_put(router);
+	/* Mark the OGM to be considered for forwarding, and update routes
+	 * if needed.
+	 */
+	forward = true;
 
-	/* Update routes, and check if the OGM is from the best next hop */
-	batadv_v_ogm_orig_update(bat_priv, orig_node, neigh_node, ogm2,
-				 if_outgoing);
+	batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+		   "Searching and updating originator entry of received packet\n");
 
-	orig_ifinfo = batadv_orig_ifinfo_new(orig_node, if_outgoing);
-	if (!orig_ifinfo)
+	/* if this neighbor already is our next hop there is nothing
+	 * to change
+	 */
+	if (router == neigh_node)
 		goto out;
 
-	/* don't forward the same seqno twice on one interface */
-	if (orig_ifinfo->last_seqno_forwarded == ntohl(ogm2->seqno))
-		goto out;
+	/* don't consider neighbours with worse throughput.
+	 * also switch route if this seqno is BATADV_V_MAX_ORIGDIFF newer than
+	 * the last received seqno from our best next hop.
+	 */
+	if (router) {
+		router_ifinfo = batadv_neigh_ifinfo_get(router, if_outgoing);
+		neigh_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing);
 
-	/* acquire possibly updated router */
-	router = batadv_orig_router_get(orig_node, if_outgoing);
+		/* if these are not allocated, something is wrong. */
+		if (!router_ifinfo || !neigh_ifinfo)
+			goto out;
 
-	/* strict rule: forward packets coming from the best next hop only */
-	if (neigh_node != router)
-		goto out;
+		neigh_last_seqno = neigh_ifinfo->bat_v.last_seqno;
+		router_last_seqno = router_ifinfo->bat_v.last_seqno;
+		neigh_seq_diff = neigh_last_seqno - router_last_seqno;
+		router_throughput = router_ifinfo->bat_v.throughput;
+		neigh_throughput = neigh_ifinfo->bat_v.throughput;
 
-	/* only forward for specific interface, not for the default one. */
-	if (if_outgoing != BATADV_IF_DEFAULT) {
-		orig_ifinfo->last_seqno_forwarded = ntohl(ogm2->seqno);
-		batadv_v_ogm_forward(bat_priv, ogm2,
-				     neigh_ifinfo->bat_v.throughput,
-				     if_incoming, if_outgoing);
+		if ((neigh_seq_diff < BATADV_OGM_MAX_ORIGDIFF) &&
+		    (router_throughput >= neigh_throughput))
+			goto out;
 	}
 
+	batadv_update_route(bat_priv, orig_node, if_outgoing, neigh_node);
 out:
-	if (orig_ifinfo)
-		batadv_orig_ifinfo_put(orig_ifinfo);
 	if (router)
 		batadv_neigh_node_put(router);
 	if (orig_neigh_router)
 		batadv_neigh_node_put(orig_neigh_router);
 	if (orig_neigh_node)
 		batadv_orig_node_put(orig_neigh_node);
+	if (router_ifinfo)
+		batadv_neigh_ifinfo_put(router_ifinfo);
 	if (neigh_ifinfo)
 		batadv_neigh_ifinfo_put(neigh_ifinfo);
+
+	return forward;
 }
 
 /**
@@ -592,6 +583,7 @@
 			       struct batadv_hard_iface *if_outgoing)
 {
 	int seqno_age;
+	bool forward;
 
 	/* first, update the metric with according sanity checks */
 	seqno_age = batadv_v_ogm_metric_update(bat_priv, ogm2, orig_node,
@@ -610,8 +602,14 @@
 					       ntohs(ogm2->tvlv_len));
 
 	/* if the metric update went through, update routes if needed */
-	batadv_v_ogm_route_update(bat_priv, ethhdr, ogm2, orig_node,
-				  neigh_node, if_incoming, if_outgoing);
+	forward = batadv_v_ogm_route_update(bat_priv, ethhdr, ogm2, orig_node,
+					    neigh_node, if_incoming,
+					    if_outgoing);
+
+	/* if the routes have been processed correctly, check and forward */
+	if (forward)
+		batadv_v_ogm_forward(bat_priv, ogm2, orig_node, neigh_node,
+				     if_incoming, if_outgoing);
 }
 
 /**
@@ -713,9 +711,14 @@
 		if (hard_iface->soft_iface != bat_priv->soft_iface)
 			continue;
 
+		if (!kref_get_unless_zero(&hard_iface->refcount))
+			continue;
+
 		batadv_v_ogm_process_per_outif(bat_priv, ethhdr, ogm_packet,
 					       orig_node, neigh_node,
 					       if_incoming, hard_iface);
+
+		batadv_hardif_put(hard_iface);
 	}
 	rcu_read_unlock();
 out:
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index b56bb00..a0c7913 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -38,11 +38,11 @@
  *  the last sequence number
  * @set_mark: whether this packet should be marked in seq_bits
  *
- * Return: 1 if the window was moved (either new or very old),
- *  0 if the window was not moved/shifted.
+ * Return: true if the window was moved (either new or very old),
+ *  false if the window was not moved/shifted.
  */
-int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, s32 seq_num_diff,
-			  int set_mark)
+bool batadv_bit_get_packet(void *priv, unsigned long *seq_bits,
+			   s32 seq_num_diff, int set_mark)
 {
 	struct batadv_priv *bat_priv = priv;
 
@@ -52,7 +52,7 @@
 	if (seq_num_diff <= 0 && seq_num_diff > -BATADV_TQ_LOCAL_WINDOW_SIZE) {
 		if (set_mark)
 			batadv_set_bit(seq_bits, -seq_num_diff);
-		return 0;
+		return false;
 	}
 
 	/* sequence number is slightly newer, so we shift the window and
@@ -63,7 +63,7 @@
 
 		if (set_mark)
 			batadv_set_bit(seq_bits, 0);
-		return 1;
+		return true;
 	}
 
 	/* sequence number is much newer, probably missed a lot of packets */
@@ -75,7 +75,7 @@
 		bitmap_zero(seq_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
 		if (set_mark)
 			batadv_set_bit(seq_bits, 0);
-		return 1;
+		return true;
 	}
 
 	/* received a much older packet. The other host either restarted
@@ -94,5 +94,5 @@
 	if (set_mark)
 		batadv_set_bit(seq_bits, 0);
 
-	return 1;
+	return true;
 }
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index 3e41bb8..0e6e9d0 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -22,6 +22,7 @@
 
 #include <linux/bitops.h>
 #include <linux/compiler.h>
+#include <linux/stddef.h>
 #include <linux/types.h>
 
 /**
@@ -31,17 +32,17 @@
  * @last_seqno: latest sequence number in seq_bits
  * @curr_seqno: sequence number to test for
  *
- * Return: 1 if the corresponding bit in the given seq_bits indicates true
- * and curr_seqno is within range of last_seqno. Otherwise returns 0.
+ * Return: true if the corresponding bit in the given seq_bits indicates true
+ * and curr_seqno is within range of last_seqno. Otherwise returns false.
  */
-static inline int batadv_test_bit(const unsigned long *seq_bits,
-				  u32 last_seqno, u32 curr_seqno)
+static inline bool batadv_test_bit(const unsigned long *seq_bits,
+				   u32 last_seqno, u32 curr_seqno)
 {
 	s32 diff;
 
 	diff = last_seqno - curr_seqno;
 	if (diff < 0 || diff >= BATADV_TQ_LOCAL_WINDOW_SIZE)
-		return 0;
+		return false;
 	return test_bit(diff, seq_bits) != 0;
 }
 
@@ -55,7 +56,7 @@
 	set_bit(n, seq_bits); /* turn the position on */
 }
 
-int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, s32 seq_num_diff,
-			  int set_mark);
+bool batadv_bit_get_packet(void *priv, unsigned long *seq_bits,
+			   s32 seq_num_diff, int set_mark);
 
 #endif /* _NET_BATMAN_ADV_BITARRAY_H_ */
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 0a6c8b8..748a9ea 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -50,6 +50,7 @@
 #include "hash.h"
 #include "originator.h"
 #include "packet.h"
+#include "sysfs.h"
 #include "translation-table.h"
 
 static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
@@ -100,10 +101,10 @@
  * @node: list node of the first entry to compare
  * @data2: pointer to the second backbone gateway
  *
- * Return: 1 if the backbones have the same data, 0 otherwise
+ * Return: true if the backbones have the same data, false otherwise
  */
-static int batadv_compare_backbone_gw(const struct hlist_node *node,
-				      const void *data2)
+static bool batadv_compare_backbone_gw(const struct hlist_node *node,
+				       const void *data2)
 {
 	const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
 					 hash_entry);
@@ -111,23 +112,23 @@
 	const struct batadv_bla_backbone_gw *gw2 = data2;
 
 	if (!batadv_compare_eth(gw1->orig, gw2->orig))
-		return 0;
+		return false;
 
 	if (gw1->vid != gw2->vid)
-		return 0;
+		return false;
 
-	return 1;
+	return true;
 }
 
 /**
- * batadv_compare_backbone_gw - compare address and vid of two claims
+ * batadv_compare_claim - compare address and vid of two claims
  * @node: list node of the first entry to compare
  * @data2: pointer to the second claims
  *
- * Return: 1 if the claim have the same data, 0 otherwise
+ * Return: true if the claim have the same data, 0 otherwise
  */
-static int batadv_compare_claim(const struct hlist_node *node,
-				const void *data2)
+static bool batadv_compare_claim(const struct hlist_node *node,
+				 const void *data2)
 {
 	const void *data1 = container_of(node, struct batadv_bla_claim,
 					 hash_entry);
@@ -135,12 +136,12 @@
 	const struct batadv_bla_claim *cl2 = data2;
 
 	if (!batadv_compare_eth(cl1->addr, cl2->addr))
-		return 0;
+		return false;
 
 	if (cl1->vid != cl2->vid)
-		return 0;
+		return false;
 
-	return 1;
+	return true;
 }
 
 /**
@@ -200,9 +201,9 @@
  *
  * Return: claim if found or NULL otherwise.
  */
-static struct batadv_bla_claim
-*batadv_claim_hash_find(struct batadv_priv *bat_priv,
-			struct batadv_bla_claim *data)
+static struct batadv_bla_claim *
+batadv_claim_hash_find(struct batadv_priv *bat_priv,
+		       struct batadv_bla_claim *data)
 {
 	struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
 	struct hlist_head *head;
@@ -407,6 +408,14 @@
 			   ethhdr->h_source, ethhdr->h_dest,
 			   BATADV_PRINT_VID(vid));
 		break;
+	case BATADV_CLAIM_TYPE_LOOPDETECT:
+		ether_addr_copy(ethhdr->h_source, mac);
+		batadv_dbg(BATADV_DBG_BLA, bat_priv,
+			   "bla_send_claim(): LOOPDETECT of %pM to %pM on vid %d\n",
+			   ethhdr->h_source, ethhdr->h_dest,
+			   BATADV_PRINT_VID(vid));
+
+		break;
 	}
 
 	if (vid & BATADV_VLAN_HAS_TAG)
@@ -427,6 +436,36 @@
 }
 
 /**
+ * batadv_bla_loopdetect_report - worker for reporting the loop
+ * @work: work queue item
+ *
+ * Throws an uevent, as the loopdetect check function can't do that itself
+ * since the kernel may sleep while throwing uevents.
+ */
+static void batadv_bla_loopdetect_report(struct work_struct *work)
+{
+	struct batadv_bla_backbone_gw *backbone_gw;
+	struct batadv_priv *bat_priv;
+	char vid_str[6] = { '\0' };
+
+	backbone_gw = container_of(work, struct batadv_bla_backbone_gw,
+				   report_work);
+	bat_priv = backbone_gw->bat_priv;
+
+	batadv_info(bat_priv->soft_iface,
+		    "Possible loop on VLAN %d detected which can't be handled by BLA - please check your network setup!\n",
+		    BATADV_PRINT_VID(backbone_gw->vid));
+	snprintf(vid_str, sizeof(vid_str), "%d",
+		 BATADV_PRINT_VID(backbone_gw->vid));
+	vid_str[sizeof(vid_str) - 1] = 0;
+
+	batadv_throw_uevent(bat_priv, BATADV_UEV_BLA, BATADV_UEV_LOOPDETECT,
+			    vid_str);
+
+	batadv_backbone_gw_put(backbone_gw);
+}
+
+/**
  * batadv_bla_get_backbone_gw - finds or creates a backbone gateway
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the mac address of the originator
@@ -464,6 +503,7 @@
 	atomic_set(&entry->request_sent, 0);
 	atomic_set(&entry->wait_periods, 0);
 	ether_addr_copy(entry->orig, orig);
+	INIT_WORK(&entry->report_work, batadv_bla_loopdetect_report);
 
 	/* one for the hash, one for returning */
 	kref_init(&entry->refcount);
@@ -735,22 +775,22 @@
  * @backbone_addr: originator address of the sender (Ethernet source MAC)
  * @vid: the VLAN ID of the frame
  *
- * Return: 1 if handled
+ * Return: true if handled
  */
-static int batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
-				  u8 *backbone_addr, unsigned short vid)
+static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
+				   u8 *backbone_addr, unsigned short vid)
 {
 	struct batadv_bla_backbone_gw *backbone_gw;
 	u16 backbone_crc, crc;
 
 	if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
-		return 0;
+		return false;
 
 	backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
 						 false);
 
 	if (unlikely(!backbone_gw))
-		return 1;
+		return true;
 
 	/* handle as ANNOUNCE frame */
 	backbone_gw->lasttime = jiffies;
@@ -783,7 +823,7 @@
 	}
 
 	batadv_backbone_gw_put(backbone_gw);
-	return 1;
+	return true;
 }
 
 /**
@@ -794,29 +834,29 @@
  * @ethhdr: ethernet header of a packet
  * @vid: the VLAN ID of the frame
  *
- * Return: 1 if handled
+ * Return: true if handled
  */
-static int batadv_handle_request(struct batadv_priv *bat_priv,
-				 struct batadv_hard_iface *primary_if,
-				 u8 *backbone_addr, struct ethhdr *ethhdr,
-				 unsigned short vid)
+static bool batadv_handle_request(struct batadv_priv *bat_priv,
+				  struct batadv_hard_iface *primary_if,
+				  u8 *backbone_addr, struct ethhdr *ethhdr,
+				  unsigned short vid)
 {
 	/* check for REQUEST frame */
 	if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
-		return 0;
+		return false;
 
 	/* sanity check, this should not happen on a normal switch,
 	 * we ignore it in this case.
 	 */
 	if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
-		return 1;
+		return true;
 
 	batadv_dbg(BATADV_DBG_BLA, bat_priv,
 		   "handle_request(): REQUEST vid %d (sent by %pM)...\n",
 		   BATADV_PRINT_VID(vid), ethhdr->h_source);
 
 	batadv_bla_answer_request(bat_priv, primary_if, vid);
-	return 1;
+	return true;
 }
 
 /**
@@ -827,12 +867,12 @@
  * @claim_addr: Client to be unclaimed (ARP sender HW MAC)
  * @vid: the VLAN ID of the frame
  *
- * Return: 1 if handled
+ * Return: true if handled
  */
-static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
-				 struct batadv_hard_iface *primary_if,
-				 u8 *backbone_addr, u8 *claim_addr,
-				 unsigned short vid)
+static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
+				  struct batadv_hard_iface *primary_if,
+				  u8 *backbone_addr, u8 *claim_addr,
+				  unsigned short vid)
 {
 	struct batadv_bla_backbone_gw *backbone_gw;
 
@@ -845,7 +885,7 @@
 	backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
 
 	if (!backbone_gw)
-		return 1;
+		return true;
 
 	/* this must be an UNCLAIM frame */
 	batadv_dbg(BATADV_DBG_BLA, bat_priv,
@@ -854,7 +894,7 @@
 
 	batadv_bla_del_claim(bat_priv, claim_addr, vid);
 	batadv_backbone_gw_put(backbone_gw);
-	return 1;
+	return true;
 }
 
 /**
@@ -865,12 +905,12 @@
  * @claim_addr: client mac address to be claimed (ARP sender HW MAC)
  * @vid: the VLAN ID of the frame
  *
- * Return: 1 if handled
+ * Return: true if handled
  */
-static int batadv_handle_claim(struct batadv_priv *bat_priv,
-			       struct batadv_hard_iface *primary_if,
-			       u8 *backbone_addr, u8 *claim_addr,
-			       unsigned short vid)
+static bool batadv_handle_claim(struct batadv_priv *bat_priv,
+				struct batadv_hard_iface *primary_if,
+				u8 *backbone_addr, u8 *claim_addr,
+				unsigned short vid)
 {
 	struct batadv_bla_backbone_gw *backbone_gw;
 
@@ -880,7 +920,7 @@
 						 false);
 
 	if (unlikely(!backbone_gw))
-		return 1;
+		return true;
 
 	/* this must be a CLAIM frame */
 	batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
@@ -891,7 +931,7 @@
 	/* TODO: we could call something like tt_local_del() here. */
 
 	batadv_backbone_gw_put(backbone_gw);
-	return 1;
+	return true;
 }
 
 /**
@@ -975,12 +1015,12 @@
  * @primary_if: the primary hard interface of this batman soft interface
  * @skb: the frame to be checked
  *
- * Return: 1 if it was a claim frame, otherwise return 0 to
+ * Return: true if it was a claim frame, otherwise return false to
  * tell the callee that it can use the frame on its own.
  */
-static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
-				    struct batadv_hard_iface *primary_if,
-				    struct sk_buff *skb)
+static bool batadv_bla_process_claim(struct batadv_priv *bat_priv,
+				     struct batadv_hard_iface *primary_if,
+				     struct sk_buff *skb)
 {
 	struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
 	u8 *hw_src, *hw_dst;
@@ -1011,7 +1051,7 @@
 			vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
 						  &vhdr_buf);
 			if (!vhdr)
-				return 0;
+				return false;
 
 			proto = vhdr->h_vlan_encapsulated_proto;
 			headlen += VLAN_HLEN;
@@ -1020,12 +1060,12 @@
 	}
 
 	if (proto != htons(ETH_P_ARP))
-		return 0; /* not a claim frame */
+		return false; /* not a claim frame */
 
 	/* this must be a ARP frame. check if it is a claim. */
 
 	if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
-		return 0;
+		return false;
 
 	/* pskb_may_pull() may have modified the pointers, get ethhdr again */
 	ethhdr = eth_hdr(skb);
@@ -1035,13 +1075,13 @@
 	 * IP information
 	 */
 	if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
-		return 0;
+		return false;
 	if (arphdr->ar_pro != htons(ETH_P_IP))
-		return 0;
+		return false;
 	if (arphdr->ar_hln != ETH_ALEN)
-		return 0;
+		return false;
 	if (arphdr->ar_pln != 4)
-		return 0;
+		return false;
 
 	hw_src = (u8 *)arphdr + sizeof(struct arphdr);
 	hw_dst = hw_src + ETH_ALEN + 4;
@@ -1051,14 +1091,18 @@
 	/* check if it is a claim frame in general */
 	if (memcmp(bla_dst->magic, bla_dst_own->magic,
 		   sizeof(bla_dst->magic)) != 0)
-		return 0;
+		return false;
 
 	/* check if there is a claim frame encapsulated deeper in (QinQ) and
 	 * drop that, as this is not supported by BLA but should also not be
 	 * sent via the mesh.
 	 */
 	if (vlan_depth > 1)
-		return 1;
+		return true;
+
+	/* Let the loopdetect frames on the mesh in any case. */
+	if (bla_dst->type == BATADV_CLAIM_TYPE_LOOPDETECT)
+		return 0;
 
 	/* check if it is a claim frame. */
 	ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
@@ -1070,7 +1114,7 @@
 			   hw_dst);
 
 	if (ret < 2)
-		return ret;
+		return !!ret;
 
 	/* become a backbone gw ourselves on this vlan if not happened yet */
 	batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
@@ -1080,30 +1124,30 @@
 	case BATADV_CLAIM_TYPE_CLAIM:
 		if (batadv_handle_claim(bat_priv, primary_if, hw_src,
 					ethhdr->h_source, vid))
-			return 1;
+			return true;
 		break;
 	case BATADV_CLAIM_TYPE_UNCLAIM:
 		if (batadv_handle_unclaim(bat_priv, primary_if,
 					  ethhdr->h_source, hw_src, vid))
-			return 1;
+			return true;
 		break;
 
 	case BATADV_CLAIM_TYPE_ANNOUNCE:
 		if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
 					   vid))
-			return 1;
+			return true;
 		break;
 	case BATADV_CLAIM_TYPE_REQUEST:
 		if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
 					  vid))
-			return 1;
+			return true;
 		break;
 	}
 
 	batadv_dbg(BATADV_DBG_BLA, bat_priv,
 		   "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
 		   ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src, hw_dst);
-	return 1;
+	return true;
 }
 
 /**
@@ -1265,6 +1309,26 @@
 }
 
 /**
+ * batadv_bla_send_loopdetect - send a loopdetect frame
+ * @bat_priv: the bat priv with all the soft interface information
+ * @backbone_gw: the backbone gateway for which a loop should be detected
+ *
+ * To detect loops that the bridge loop avoidance can't handle, send a loop
+ * detection packet on the backbone. Unlike other BLA frames, this frame will
+ * be allowed on the mesh by other nodes. If it is received on the mesh, this
+ * indicates that there is a loop.
+ */
+static void
+batadv_bla_send_loopdetect(struct batadv_priv *bat_priv,
+			   struct batadv_bla_backbone_gw *backbone_gw)
+{
+	batadv_dbg(BATADV_DBG_BLA, bat_priv, "Send loopdetect frame for vid %d\n",
+		   backbone_gw->vid);
+	batadv_bla_send_claim(bat_priv, bat_priv->bla.loopdetect_addr,
+			      backbone_gw->vid, BATADV_CLAIM_TYPE_LOOPDETECT);
+}
+
+/**
  * batadv_bla_status_update - purge bla interfaces if necessary
  * @net_dev: the soft interface net device
  */
@@ -1301,9 +1365,10 @@
 	struct batadv_bla_backbone_gw *backbone_gw;
 	struct batadv_hashtable *hash;
 	struct batadv_hard_iface *primary_if;
+	bool send_loopdetect = false;
 	int i;
 
-	delayed_work = container_of(work, struct delayed_work, work);
+	delayed_work = to_delayed_work(work);
 	priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
 	bat_priv = container_of(priv_bla, struct batadv_priv, bla);
 	primary_if = batadv_primary_if_get_selected(bat_priv);
@@ -1316,6 +1381,22 @@
 	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
 		goto out;
 
+	if (atomic_dec_and_test(&bat_priv->bla.loopdetect_next)) {
+		/* set a new random mac address for the next bridge loop
+		 * detection frames. Set the locally administered bit to avoid
+		 * collisions with users mac addresses.
+		 */
+		random_ether_addr(bat_priv->bla.loopdetect_addr);
+		bat_priv->bla.loopdetect_addr[0] = 0xba;
+		bat_priv->bla.loopdetect_addr[1] = 0xbe;
+		bat_priv->bla.loopdetect_lasttime = jiffies;
+		atomic_set(&bat_priv->bla.loopdetect_next,
+			   BATADV_BLA_LOOPDETECT_PERIODS);
+
+		/* mark for sending loop detect on all VLANs */
+		send_loopdetect = true;
+	}
+
 	hash = bat_priv->bla.backbone_hash;
 	if (!hash)
 		goto out;
@@ -1332,6 +1413,9 @@
 			backbone_gw->lasttime = jiffies;
 
 			batadv_bla_send_announce(bat_priv, backbone_gw);
+			if (send_loopdetect)
+				batadv_bla_send_loopdetect(bat_priv,
+							   backbone_gw);
 
 			/* request_sent is only set after creation to avoid
 			 * problems when we are not yet known as backbone gw
@@ -1405,6 +1489,9 @@
 		bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
 	bat_priv->bla.bcast_duplist_curr = 0;
 
+	atomic_set(&bat_priv->bla.loopdetect_next,
+		   BATADV_BLA_LOOPDETECT_PERIODS);
+
 	if (bat_priv->bla.claim_hash)
 		return 0;
 
@@ -1442,15 +1529,16 @@
  * sent by another host, drop it. We allow equal packets from
  * the same host however as this might be intended.
  *
- * Return: 1 if a packet is in the duplicate list, 0 otherwise.
+ * Return: true if a packet is in the duplicate list, false otherwise.
  */
-int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
-				   struct sk_buff *skb)
+bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
+				    struct sk_buff *skb)
 {
-	int i, curr, ret = 0;
+	int i, curr;
 	__be32 crc;
 	struct batadv_bcast_packet *bcast_packet;
 	struct batadv_bcast_duplist_entry *entry;
+	bool ret = false;
 
 	bcast_packet = (struct batadv_bcast_packet *)skb->data;
 
@@ -1478,9 +1566,9 @@
 			continue;
 
 		/* this entry seems to match: same crc, not too old,
-		 * and from another gw. therefore return 1 to forbid it.
+		 * and from another gw. therefore return true to forbid it.
 		 */
-		ret = 1;
+		ret = true;
 		goto out;
 	}
 	/* not found, add a new entry (overwrite the oldest entry)
@@ -1546,21 +1634,21 @@
  * @orig_node: the orig_node of the frame
  * @hdr_size: maximum length of the frame
  *
- * Return: 1 if the orig_node is also a gateway on the soft interface, otherwise
- * it returns 0.
+ * Return: true if the orig_node is also a gateway on the soft interface,
+ * otherwise it returns false.
  */
-int batadv_bla_is_backbone_gw(struct sk_buff *skb,
-			      struct batadv_orig_node *orig_node, int hdr_size)
+bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
+			       struct batadv_orig_node *orig_node, int hdr_size)
 {
 	struct batadv_bla_backbone_gw *backbone_gw;
 	unsigned short vid;
 
 	if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
-		return 0;
+		return false;
 
 	/* first, find out the vid. */
 	if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
-		return 0;
+		return false;
 
 	vid = batadv_get_vid(skb, hdr_size);
 
@@ -1568,14 +1656,14 @@
 	backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
 						orig_node->orig, vid);
 	if (!backbone_gw)
-		return 0;
+		return false;
 
 	batadv_backbone_gw_put(backbone_gw);
-	return 1;
+	return true;
 }
 
 /**
- * batadv_bla_init - free all bla structures
+ * batadv_bla_free - free all bla structures
  * @bat_priv: the bat priv with all the soft interface information
  *
  * for softinterface free or module unload
@@ -1602,6 +1690,55 @@
 }
 
 /**
+ * batadv_bla_loopdetect_check - check and handle a detected loop
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the packet to check
+ * @primary_if: interface where the request came on
+ * @vid: the VLAN ID of the frame
+ *
+ * Checks if this packet is a loop detect frame which has been sent by us,
+ * throw an uevent and log the event if that is the case.
+ *
+ * Return: true if it is a loop detect frame which is to be dropped, false
+ * otherwise.
+ */
+static bool
+batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
+			    struct batadv_hard_iface *primary_if,
+			    unsigned short vid)
+{
+	struct batadv_bla_backbone_gw *backbone_gw;
+	struct ethhdr *ethhdr;
+
+	ethhdr = eth_hdr(skb);
+
+	/* Only check for the MAC address and skip more checks here for
+	 * performance reasons - this function is on the hotpath, after all.
+	 */
+	if (!batadv_compare_eth(ethhdr->h_source,
+				bat_priv->bla.loopdetect_addr))
+		return false;
+
+	/* If the packet came too late, don't forward it on the mesh
+	 * but don't consider that as loop. It might be a coincidence.
+	 */
+	if (batadv_has_timed_out(bat_priv->bla.loopdetect_lasttime,
+				 BATADV_BLA_LOOPDETECT_TIMEOUT))
+		return true;
+
+	backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
+						 primary_if->net_dev->dev_addr,
+						 vid, true);
+	if (unlikely(!backbone_gw))
+		return true;
+
+	queue_work(batadv_event_workqueue, &backbone_gw->report_work);
+	/* backbone_gw is unreferenced in the report work function function */
+
+	return true;
+}
+
+/**
  * batadv_bla_rx - check packets coming from the mesh.
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the frame to be checked
@@ -1614,16 +1751,16 @@
  *
  * in these cases, the skb is further handled by this function
  *
- * Return: 1 if handled, otherwise it returns 0 and the caller shall further
- * process the skb.
+ * Return: true if handled, otherwise it returns false and the caller shall
+ * further process the skb.
  */
-int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
-		  unsigned short vid, bool is_bcast)
+bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
+		   unsigned short vid, bool is_bcast)
 {
 	struct ethhdr *ethhdr;
 	struct batadv_bla_claim search_claim, *claim = NULL;
 	struct batadv_hard_iface *primary_if;
-	int ret;
+	bool ret;
 
 	ethhdr = eth_hdr(skb);
 
@@ -1634,6 +1771,9 @@
 	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
 		goto allow;
 
+	if (batadv_bla_loopdetect_check(bat_priv, skb, primary_if, vid))
+		goto handled;
+
 	if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
 		/* don't allow broadcasts while requests are in flight */
 		if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
@@ -1682,12 +1822,12 @@
 	}
 allow:
 	batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
-	ret = 0;
+	ret = false;
 	goto out;
 
 handled:
 	kfree_skb(skb);
-	ret = 1;
+	ret = true;
 
 out:
 	if (primary_if)
@@ -1711,16 +1851,16 @@
  *
  * This call might reallocate skb data.
  *
- * Return: 1 if handled, otherwise it returns 0 and the caller shall further
- * process the skb.
+ * Return: true if handled, otherwise it returns false and the caller shall
+ * further process the skb.
  */
-int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
-		  unsigned short vid)
+bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
+		   unsigned short vid)
 {
 	struct ethhdr *ethhdr;
 	struct batadv_bla_claim search_claim, *claim = NULL;
 	struct batadv_hard_iface *primary_if;
-	int ret = 0;
+	bool ret = false;
 
 	primary_if = batadv_primary_if_get_selected(bat_priv);
 	if (!primary_if)
@@ -1774,10 +1914,10 @@
 	}
 allow:
 	batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
-	ret = 0;
+	ret = false;
 	goto out;
 handled:
-	ret = 1;
+	ret = true;
 out:
 	if (primary_if)
 		batadv_hardif_put(primary_if);
@@ -1815,8 +1955,8 @@
 		   "Claims announced for the mesh %s (orig %pM, group id %#.4x)\n",
 		   net_dev->name, primary_addr,
 		   ntohs(bat_priv->bla.claim_dest.group));
-	seq_printf(seq, "   %-17s    %-5s    %-17s [o] (%-6s)\n",
-		   "Client", "VID", "Originator", "CRC");
+	seq_puts(seq,
+		 "   Client               VID      Originator        [o] (CRC   )\n");
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
 
@@ -1873,8 +2013,7 @@
 		   "Backbones announced for the mesh %s (orig %pM, group id %#.4x)\n",
 		   net_dev->name, primary_addr,
 		   ntohs(bat_priv->bla.claim_dest.group));
-	seq_printf(seq, "   %-17s    %-5s %-9s (%-6s)\n",
-		   "Originator", "VID", "last seen", "CRC");
+	seq_puts(seq, "   Originator           VID   last seen (CRC   )\n");
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
 
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index 579f0fa..0f01dae 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -27,19 +27,20 @@
 struct sk_buff;
 
 #ifdef CONFIG_BATMAN_ADV_BLA
-int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
-		  unsigned short vid, bool is_bcast);
-int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
-		  unsigned short vid);
-int batadv_bla_is_backbone_gw(struct sk_buff *skb,
-			      struct batadv_orig_node *orig_node, int hdr_size);
+bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
+		   unsigned short vid, bool is_bcast);
+bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
+		   unsigned short vid);
+bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
+			       struct batadv_orig_node *orig_node,
+			       int hdr_size);
 int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
 int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
 					     void *offset);
 bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
 				    unsigned short vid);
-int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
-				   struct sk_buff *skb);
+bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
+				    struct sk_buff *skb);
 void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
 				    struct batadv_hard_iface *primary_if,
 				    struct batadv_hard_iface *oldif);
@@ -50,24 +51,24 @@
 #define BATADV_BLA_CRC_INIT	0
 #else /* ifdef CONFIG_BATMAN_ADV_BLA */
 
-static inline int batadv_bla_rx(struct batadv_priv *bat_priv,
-				struct sk_buff *skb, unsigned short vid,
-				bool is_bcast)
+static inline bool batadv_bla_rx(struct batadv_priv *bat_priv,
+				 struct sk_buff *skb, unsigned short vid,
+				 bool is_bcast)
 {
-	return 0;
+	return false;
 }
 
-static inline int batadv_bla_tx(struct batadv_priv *bat_priv,
-				struct sk_buff *skb, unsigned short vid)
+static inline bool batadv_bla_tx(struct batadv_priv *bat_priv,
+				 struct sk_buff *skb, unsigned short vid)
 {
-	return 0;
+	return false;
 }
 
-static inline int batadv_bla_is_backbone_gw(struct sk_buff *skb,
-					    struct batadv_orig_node *orig_node,
-					    int hdr_size)
+static inline bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
+					     struct batadv_orig_node *orig_node,
+					     int hdr_size)
 {
-	return 0;
+	return false;
 }
 
 static inline int batadv_bla_claim_table_seq_print_text(struct seq_file *seq,
@@ -88,11 +89,11 @@
 	return false;
 }
 
-static inline int
+static inline bool
 batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
 			       struct sk_buff *skb)
 {
-	return 0;
+	return false;
 }
 
 static inline void
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index 48253cf..9529004 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -134,7 +134,7 @@
 	return 0;
 }
 
-static int batadv_log_empty(struct batadv_priv_debug_log *debug_log)
+static bool batadv_log_empty(struct batadv_priv_debug_log *debug_log)
 {
 	return !(debug_log->log_start - debug_log->log_end);
 }
@@ -365,14 +365,17 @@
 
 #define BATADV_DEBUGINFO(_name, _mode, _open)		\
 struct batadv_debuginfo batadv_debuginfo_##_name = {	\
-	.attr = { .name = __stringify(_name),		\
-		  .mode = _mode, },			\
-	.fops = { .owner = THIS_MODULE,			\
-		  .open = _open,			\
-		  .read	= seq_read,			\
-		  .llseek = seq_lseek,			\
-		  .release = single_release,		\
-		}					\
+	.attr = {					\
+		.name = __stringify(_name),		\
+		.mode = _mode,				\
+	},						\
+	.fops = {					\
+		.owner = THIS_MODULE,			\
+		.open = _open,				\
+		.read	= seq_read,			\
+		.llseek = seq_lseek,			\
+		.release = single_release,		\
+	},						\
 }
 
 /* the following attributes are general and therefore they will be directly
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index e96d7c7..278800a 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -152,7 +152,7 @@
 	struct batadv_priv_dat *priv_dat;
 	struct batadv_priv *bat_priv;
 
-	delayed_work = container_of(work, struct delayed_work, work);
+	delayed_work = to_delayed_work(work);
 	priv_dat = container_of(delayed_work, struct batadv_priv_dat, work);
 	bat_priv = container_of(priv_dat, struct batadv_priv, dat);
 
@@ -165,14 +165,14 @@
  * @node: node in the local table
  * @data2: second object to compare the node to
  *
- * Return: 1 if the two entries are the same, 0 otherwise.
+ * Return: true if the two entries are the same, false otherwise.
  */
-static int batadv_compare_dat(const struct hlist_node *node, const void *data2)
+static bool batadv_compare_dat(const struct hlist_node *node, const void *data2)
 {
 	const void *data1 = container_of(node, struct batadv_dat_entry,
 					 hash_entry);
 
-	return memcmp(data1, data2, sizeof(__be32)) == 0 ? 1 : 0;
+	return memcmp(data1, data2, sizeof(__be32)) == 0;
 }
 
 /**
@@ -568,6 +568,7 @@
  * be sent to
  * @bat_priv: the bat priv with all the soft interface information
  * @ip_dst: ipv4 to look up in the DHT
+ * @vid: VLAN identifier
  *
  * An originator O is selected if and only if its DHT_ID value is one of three
  * closest values (from the LEFT, with wrap around if needed) then the hash
@@ -576,7 +577,8 @@
  * Return: the candidate array of size BATADV_DAT_CANDIDATE_NUM.
  */
 static struct batadv_dat_candidate *
-batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
+batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
+			     unsigned short vid)
 {
 	int select;
 	batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key;
@@ -592,7 +594,7 @@
 		return NULL;
 
 	dat.ip = ip_dst;
-	dat.vid = 0;
+	dat.vid = vid;
 	ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat,
 						    BATADV_DAT_ADDR_MAX);
 
@@ -612,6 +614,7 @@
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: payload to send
  * @ip: the DHT key
+ * @vid: VLAN identifier
  * @packet_subtype: unicast4addr packet subtype to use
  *
  * This function copies the skb with pskb_copy() and is sent as unicast packet
@@ -622,7 +625,7 @@
  */
 static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
 				 struct sk_buff *skb, __be32 ip,
-				 int packet_subtype)
+				 unsigned short vid, int packet_subtype)
 {
 	int i;
 	bool ret = false;
@@ -631,7 +634,7 @@
 	struct sk_buff *tmp_skb;
 	struct batadv_dat_candidate *cand;
 
-	cand = batadv_dat_select_candidates(bat_priv, ip);
+	cand = batadv_dat_select_candidates(bat_priv, ip, vid);
 	if (!cand)
 		goto out;
 
@@ -717,7 +720,7 @@
 }
 
 /**
- * batadv_gw_tvlv_ogm_handler_v1 - process incoming dat tvlv container
+ * batadv_dat_tvlv_ogm_handler_v1 - process incoming dat tvlv container
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node of the ogm
  * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -814,8 +817,8 @@
 		goto out;
 
 	seq_printf(seq, "Distributed ARP Table (%s):\n", net_dev->name);
-	seq_printf(seq, "          %-7s          %-9s %4s %11s\n", "IPv4",
-		   "MAC", "VID", "last-seen");
+	seq_puts(seq,
+		 "          IPv4             MAC        VID   last-seen\n");
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
@@ -1022,7 +1025,7 @@
 		ret = true;
 	} else {
 		/* Send the request to the DHT */
-		ret = batadv_dat_send_data(bat_priv, skb, ip_dst,
+		ret = batadv_dat_send_data(bat_priv, skb, ip_dst, vid,
 					   BATADV_P_DAT_DHT_GET);
 	}
 out:
@@ -1150,8 +1153,8 @@
 	/* Send the ARP reply to the candidates for both the IP addresses that
 	 * the node obtained from the ARP reply
 	 */
-	batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT);
-	batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT);
+	batadv_dat_send_data(bat_priv, skb, ip_src, vid, BATADV_P_DAT_DHT_PUT);
+	batadv_dat_send_data(bat_priv, skb, ip_dst, vid, BATADV_P_DAT_DHT_PUT);
 }
 
 /**
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index e6956d0..65536db 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -407,8 +407,8 @@
 					  unsigned int mtu)
 {
 	struct sk_buff *skb_fragment;
-	unsigned header_size = sizeof(*frag_head);
-	unsigned fragment_size = mtu - header_size;
+	unsigned int header_size = sizeof(*frag_head);
+	unsigned int fragment_size = mtu - header_size;
 
 	skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
 	if (!skb_fragment)
@@ -444,15 +444,15 @@
 	struct batadv_hard_iface *primary_if = NULL;
 	struct batadv_frag_packet frag_header;
 	struct sk_buff *skb_fragment;
-	unsigned mtu = neigh_node->if_incoming->net_dev->mtu;
-	unsigned header_size = sizeof(frag_header);
-	unsigned max_fragment_size, max_packet_size;
+	unsigned int mtu = neigh_node->if_incoming->net_dev->mtu;
+	unsigned int header_size = sizeof(frag_header);
+	unsigned int max_fragment_size, max_packet_size;
 	bool ret = false;
 
 	/* To avoid merge and refragmentation at next-hops we never send
 	 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
 	 */
-	mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
+	mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
 	max_fragment_size = mtu - header_size;
 	max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
 
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index c59aff5..5839c56 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -135,8 +135,8 @@
 
 	spin_lock_bh(&bat_priv->gw.list_lock);
 
-	if (new_gw_node && !kref_get_unless_zero(&new_gw_node->refcount))
-		new_gw_node = NULL;
+	if (new_gw_node)
+		kref_get(&new_gw_node->refcount);
 
 	curr_gw_node = rcu_dereference_protected(bat_priv->gw.curr_gw, 1);
 	rcu_assign_pointer(bat_priv->gw.curr_gw, new_gw_node);
@@ -440,15 +440,11 @@
 	if (gateway->bandwidth_down == 0)
 		return;
 
-	if (!kref_get_unless_zero(&orig_node->refcount))
-		return;
-
 	gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
-	if (!gw_node) {
-		batadv_orig_node_put(orig_node);
+	if (!gw_node)
 		return;
-	}
 
+	kref_get(&orig_node->refcount);
 	INIT_HLIST_NODE(&gw_node->list);
 	gw_node->orig_node = orig_node;
 	gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index b22b277..8c2f399 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -36,7 +36,6 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/workqueue.h>
-#include <net/net_namespace.h>
 
 #include "bridge_loop_avoidance.h"
 #include "debugfs.h"
@@ -121,6 +120,7 @@
 static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
 {
 	struct net_device *parent_dev;
+	struct net *net = dev_net(net_dev);
 	bool ret;
 
 	/* check if this is a batman-adv mesh interface */
@@ -133,7 +133,7 @@
 		return false;
 
 	/* recurse over the parent device */
-	parent_dev = __dev_get_by_index(&init_net, dev_get_iflink(net_dev));
+	parent_dev = __dev_get_by_index(net, dev_get_iflink(net_dev));
 	/* if we got a NULL parent_dev there is something broken.. */
 	if (WARN(!parent_dev, "Cannot find parent device"))
 		return false;
@@ -146,22 +146,22 @@
 	return ret;
 }
 
-static int batadv_is_valid_iface(const struct net_device *net_dev)
+static bool batadv_is_valid_iface(const struct net_device *net_dev)
 {
 	if (net_dev->flags & IFF_LOOPBACK)
-		return 0;
+		return false;
 
 	if (net_dev->type != ARPHRD_ETHER)
-		return 0;
+		return false;
 
 	if (net_dev->addr_len != ETH_ALEN)
-		return 0;
+		return false;
 
 	/* no batman over batman */
 	if (batadv_is_on_batman_iface(net_dev))
-		return 0;
+		return false;
 
-	return 1;
+	return true;
 }
 
 /**
@@ -236,8 +236,8 @@
 
 	ASSERT_RTNL();
 
-	if (new_hard_iface && !kref_get_unless_zero(&new_hard_iface->refcount))
-		new_hard_iface = NULL;
+	if (new_hard_iface)
+		kref_get(&new_hard_iface->refcount);
 
 	curr_hard_iface = rcu_dereference_protected(bat_priv->primary_if, 1);
 	rcu_assign_pointer(bat_priv->primary_if, new_hard_iface);
@@ -407,6 +407,9 @@
 
 	batadv_update_min_mtu(hard_iface->soft_iface);
 
+	if (bat_priv->bat_algo_ops->bat_iface_activate)
+		bat_priv->bat_algo_ops->bat_iface_activate(hard_iface);
+
 out:
 	if (primary_if)
 		batadv_hardif_put(primary_if);
@@ -453,7 +456,7 @@
 }
 
 int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
-				   const char *iface_name)
+				   struct net *net, const char *iface_name)
 {
 	struct batadv_priv *bat_priv;
 	struct net_device *soft_iface, *master;
@@ -464,13 +467,12 @@
 	if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
 		goto out;
 
-	if (!kref_get_unless_zero(&hard_iface->refcount))
-		goto out;
+	kref_get(&hard_iface->refcount);
 
-	soft_iface = dev_get_by_name(&init_net, iface_name);
+	soft_iface = dev_get_by_name(net, iface_name);
 
 	if (!soft_iface) {
-		soft_iface = batadv_softif_create(iface_name);
+		soft_iface = batadv_softif_create(net, iface_name);
 
 		if (!soft_iface) {
 			ret = -ENOMEM;
@@ -519,6 +521,7 @@
 		goto err_upper;
 	}
 
+	kref_get(&hard_iface->refcount);
 	hard_iface->batman_adv_ptype.type = ethertype;
 	hard_iface->batman_adv_ptype.func = batadv_batman_skb_recv;
 	hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
@@ -572,8 +575,7 @@
 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
 	struct batadv_hard_iface *primary_if = NULL;
 
-	if (hard_iface->if_status == BATADV_IF_ACTIVE)
-		batadv_hardif_deactivate_interface(hard_iface);
+	batadv_hardif_deactivate_interface(hard_iface);
 
 	if (hard_iface->if_status != BATADV_IF_INACTIVE)
 		goto out;
@@ -581,6 +583,7 @@
 	batadv_info(hard_iface->soft_iface, "Removing interface: %s\n",
 		    hard_iface->net_dev->name);
 	dev_remove_pack(&hard_iface->batman_adv_ptype);
+	batadv_hardif_put(hard_iface);
 
 	bat_priv->num_ifaces--;
 	batadv_orig_hash_del_if(hard_iface, bat_priv->num_ifaces);
@@ -650,8 +653,7 @@
 
 	ASSERT_RTNL();
 
-	ret = batadv_is_valid_iface(net_dev);
-	if (ret != 1)
+	if (!batadv_is_valid_iface(net_dev))
 		goto out;
 
 	dev_hold(net_dev);
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index d74f198..a76724d 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -28,6 +28,7 @@
 #include <linux/types.h>
 
 struct net_device;
+struct net;
 
 enum batadv_hard_if_state {
 	BATADV_IF_NOT_IN_USE,
@@ -55,7 +56,7 @@
 struct batadv_hard_iface*
 batadv_hardif_get_by_netdev(const struct net_device *net_dev);
 int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
-				   const char *iface_name);
+				   struct net *net, const char *iface_name);
 void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
 				     enum batadv_hard_if_cleanup autodel);
 void batadv_hardif_remove_interfaces(void);
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index 9bb57b8..cbbf870 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -32,10 +32,10 @@
 /* callback to a compare function.  should compare 2 element datas for their
  * keys
  *
- * Return: 0 if same and not 0 if not same
+ * Return: true if same and false if not same
  */
-typedef int (*batadv_hashdata_compare_cb)(const struct hlist_node *,
-					  const void *);
+typedef bool (*batadv_hashdata_compare_cb)(const struct hlist_node *,
+					   const void *);
 
 /* the hashfunction
  *
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 14d0013..777aea1 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -104,25 +104,21 @@
 
 static int batadv_socket_release(struct inode *inode, struct file *file)
 {
-	struct batadv_socket_client *socket_client = file->private_data;
-	struct batadv_socket_packet *socket_packet;
-	struct list_head *list_pos, *list_pos_tmp;
+	struct batadv_socket_client *client = file->private_data;
+	struct batadv_socket_packet *packet, *tmp;
 
-	spin_lock_bh(&socket_client->lock);
+	spin_lock_bh(&client->lock);
 
 	/* for all packets in the queue ... */
-	list_for_each_safe(list_pos, list_pos_tmp, &socket_client->queue_list) {
-		socket_packet = list_entry(list_pos,
-					   struct batadv_socket_packet, list);
-
-		list_del(list_pos);
-		kfree(socket_packet);
+	list_for_each_entry_safe(packet, tmp, &client->queue_list, list) {
+		list_del(&packet->list);
+		kfree(packet);
 	}
 
-	batadv_socket_client_hash[socket_client->index] = NULL;
-	spin_unlock_bh(&socket_client->lock);
+	batadv_socket_client_hash[client->index] = NULL;
+	spin_unlock_bh(&client->lock);
 
-	kfree(socket_client);
+	kfree(client);
 	module_put(THIS_MODULE);
 
 	return 0;
@@ -337,7 +333,7 @@
 }
 
 /**
- * batadv_socket_receive_packet - schedule an icmp packet to be sent to
+ * batadv_socket_add_packet - schedule an icmp packet to be sent to
  *  userspace on an icmp socket.
  * @socket_client: the socket this packet belongs to
  * @icmph: pointer to the header of the icmp packet
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index d64ddb9..5f2974b 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -401,11 +401,19 @@
 
 	hard_iface = container_of(ptype, struct batadv_hard_iface,
 				  batman_adv_ptype);
+
+	/* Prevent processing a packet received on an interface which is getting
+	 * shut down otherwise the packet may trigger de-reference errors
+	 * further down in the receive path.
+	 */
+	if (!kref_get_unless_zero(&hard_iface->refcount))
+		goto err_out;
+
 	skb = skb_share_check(skb, GFP_ATOMIC);
 
 	/* skb was released by skb_share_check() */
 	if (!skb)
-		goto err_out;
+		goto err_put;
 
 	/* packet should hold at least type and version */
 	if (unlikely(!pskb_may_pull(skb, 2)))
@@ -448,6 +456,8 @@
 	if (ret == NET_RX_DROP)
 		kfree_skb(skb);
 
+	batadv_hardif_put(hard_iface);
+
 	/* return NET_RX_SUCCESS in any case as we
 	 * most probably dropped the packet for
 	 * routing-logical reasons.
@@ -456,6 +466,8 @@
 
 err_free:
 	kfree_skb(skb);
+err_put:
+	batadv_hardif_put(hard_iface);
 err_out:
 	return NET_RX_DROP;
 }
@@ -663,8 +675,8 @@
  *
  * Return: tvlv handler if found or NULL otherwise.
  */
-static struct batadv_tvlv_handler
-*batadv_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 version)
+static struct batadv_tvlv_handler *
+batadv_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 version)
 {
 	struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL;
 
@@ -722,8 +734,8 @@
  *
  * Return: tvlv container if found or NULL otherwise.
  */
-static struct batadv_tvlv_container
-*batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version)
+static struct batadv_tvlv_container *
+batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version)
 {
 	struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL;
 
@@ -736,9 +748,7 @@
 		if (tvlv_tmp->tvlv_hdr.version != version)
 			continue;
 
-		if (!kref_get_unless_zero(&tvlv_tmp->refcount))
-			continue;
-
+		kref_get(&tvlv_tmp->refcount);
 		tvlv = tvlv_tmp;
 		break;
 	}
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index db45336..76925266 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -24,7 +24,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2016.1"
+#define BATADV_SOURCE_VERSION "2016.2"
 #endif
 
 /* B.A.T.M.A.N. parameters */
@@ -120,6 +120,8 @@
 #define BATADV_BLA_BACKBONE_TIMEOUT	(BATADV_BLA_PERIOD_LENGTH * 6)
 #define BATADV_BLA_CLAIM_TIMEOUT	(BATADV_BLA_PERIOD_LENGTH * 10)
 #define BATADV_BLA_WAIT_PERIODS		3
+#define BATADV_BLA_LOOPDETECT_PERIODS	6
+#define BATADV_BLA_LOOPDETECT_TIMEOUT	3000	/* 3 seconds */
 
 #define BATADV_DUPLIST_SIZE		16
 #define BATADV_DUPLIST_TIMEOUT		500	/* 500 ms */
@@ -142,10 +144,12 @@
 	BATADV_UEV_ADD = 0,
 	BATADV_UEV_DEL,
 	BATADV_UEV_CHANGE,
+	BATADV_UEV_LOOPDETECT,
 };
 
 enum batadv_uev_type {
 	BATADV_UEV_GW = 0,
+	BATADV_UEV_BLA,
 };
 
 #define BATADV_GW_THRESHOLD	50
@@ -288,7 +292,7 @@
  *
  * note: can't use ether_addr_equal() as it requires aligned memory
  *
- * Return: 1 if they are the same ethernet addr
+ * Return: true if they are the same ethernet addr
  */
 static inline bool batadv_compare_eth(const void *data1, const void *data2)
 {
@@ -296,7 +300,8 @@
 }
 
 /**
- * has_timed_out - compares current time (jiffies) and timestamp + timeout
+ * batadv_has_timed_out - compares current time (jiffies) and timestamp +
+ *  timeout
  * @timestamp:		base value to compare with (in jiffies)
  * @timeout:		added to base value before comparing (in milliseconds)
  *
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 8caa2c7..c32f24f 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -394,7 +394,8 @@
 }
 
 /**
- * batadv_mcast_want_all_ip_count - count nodes with unspecific mcast interest
+ * batadv_mcast_forw_want_all_ip_count - count nodes with unspecific mcast
+ *  interest
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: ethernet header of a packet
  *
@@ -433,7 +434,7 @@
 }
 
 /**
- * batadv_mcast_want_forw_ipv4_node_get - get a node with an ipv4 flag
+ * batadv_mcast_forw_ipv4_node_get - get a node with an ipv4 flag
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 flag set and
@@ -460,7 +461,7 @@
 }
 
 /**
- * batadv_mcast_want_forw_ipv6_node_get - get a node with an ipv6 flag
+ * batadv_mcast_forw_ipv6_node_get - get a node with an ipv6 flag
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV6 flag set
@@ -487,7 +488,7 @@
 }
 
 /**
- * batadv_mcast_want_forw_ip_node_get - get a node with an ipv4/ipv6 flag
+ * batadv_mcast_forw_ip_node_get - get a node with an ipv4/ipv6 flag
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: an ethernet header to determine the protocol family from
  *
@@ -511,7 +512,7 @@
 }
 
 /**
- * batadv_mcast_want_forw_unsnoop_node_get - get a node with an unsnoopable flag
+ * batadv_mcast_forw_unsnoop_node_get - get a node with an unsnoopable flag
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index b41719b..678f068 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -510,10 +510,10 @@
  * @node: node in the local table
  * @data2: second object to compare the node to
  *
- * Return: 1 if the two entry are the same, 0 otherwise
+ * Return: true if the two entry are the same, false otherwise
  */
-static int batadv_nc_hash_compare(const struct hlist_node *node,
-				  const void *data2)
+static bool batadv_nc_hash_compare(const struct hlist_node *node,
+				   const void *data2)
 {
 	const struct batadv_nc_path *nc_path1, *nc_path2;
 
@@ -521,15 +521,13 @@
 	nc_path2 = data2;
 
 	/* Return 1 if the two keys are identical */
-	if (memcmp(nc_path1->prev_hop, nc_path2->prev_hop,
-		   sizeof(nc_path1->prev_hop)) != 0)
-		return 0;
+	if (!batadv_compare_eth(nc_path1->prev_hop, nc_path2->prev_hop))
+		return false;
 
-	if (memcmp(nc_path1->next_hop, nc_path2->next_hop,
-		   sizeof(nc_path1->next_hop)) != 0)
-		return 0;
+	if (!batadv_compare_eth(nc_path1->next_hop, nc_path2->next_hop))
+		return false;
 
-	return 1;
+	return true;
 }
 
 /**
@@ -714,7 +712,7 @@
 	struct batadv_priv *bat_priv;
 	unsigned long timeout;
 
-	delayed_work = container_of(work, struct delayed_work, work);
+	delayed_work = to_delayed_work(work);
 	priv_nc = container_of(delayed_work, struct batadv_priv_nc, work);
 	bat_priv = container_of(priv_nc, struct batadv_priv, nc);
 
@@ -793,10 +791,10 @@
  *
  * Return: the nc_node if found, NULL otherwise.
  */
-static struct batadv_nc_node
-*batadv_nc_find_nc_node(struct batadv_orig_node *orig_node,
-			struct batadv_orig_node *orig_neigh_node,
-			bool in_coding)
+static struct batadv_nc_node *
+batadv_nc_find_nc_node(struct batadv_orig_node *orig_node,
+		       struct batadv_orig_node *orig_neigh_node,
+		       bool in_coding)
 {
 	struct batadv_nc_node *nc_node, *nc_node_out = NULL;
 	struct list_head *list;
@@ -835,11 +833,11 @@
  *
  * Return: the nc_node if found or created, NULL in case of an error.
  */
-static struct batadv_nc_node
-*batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
-		       struct batadv_orig_node *orig_node,
-		       struct batadv_orig_node *orig_neigh_node,
-		       bool in_coding)
+static struct batadv_nc_node *
+batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
+		      struct batadv_orig_node *orig_node,
+		      struct batadv_orig_node *orig_neigh_node,
+		      bool in_coding)
 {
 	struct batadv_nc_node *nc_node;
 	spinlock_t *lock; /* Used to lock list selected by "int in_coding" */
@@ -856,8 +854,7 @@
 	if (!nc_node)
 		return NULL;
 
-	if (!kref_get_unless_zero(&orig_neigh_node->refcount))
-		goto free;
+	kref_get(&orig_neigh_node->refcount);
 
 	/* Initialize nc_node */
 	INIT_LIST_HEAD(&nc_node->list);
@@ -884,10 +881,6 @@
 	spin_unlock_bh(lock);
 
 	return nc_node;
-
-free:
-	kfree(nc_node);
-	return NULL;
 }
 
 /**
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index e4cbb07..1ff4ee4 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -54,9 +54,9 @@
  * @node: node in the local table
  * @data2: second object to compare the node to
  *
- * Return: 1 if they are the same originator
+ * Return: true if they are the same originator
  */
-int batadv_compare_orig(const struct hlist_node *node, const void *data2)
+bool batadv_compare_orig(const struct hlist_node *node, const void *data2)
 {
 	const void *data1 = container_of(node, struct batadv_orig_node,
 					 hash_entry);
@@ -250,7 +250,6 @@
 {
 	struct hlist_node *node_tmp;
 	struct batadv_neigh_node *neigh_node;
-	struct batadv_hardif_neigh_node *hardif_neigh;
 	struct batadv_neigh_ifinfo *neigh_ifinfo;
 	struct batadv_algo_ops *bao;
 
@@ -262,13 +261,7 @@
 		batadv_neigh_ifinfo_put(neigh_ifinfo);
 	}
 
-	hardif_neigh = batadv_hardif_neigh_get(neigh_node->if_incoming,
-					       neigh_node->addr);
-	if (hardif_neigh) {
-		/* batadv_hardif_neigh_get() increases refcount too */
-		batadv_hardif_neigh_put(hardif_neigh);
-		batadv_hardif_neigh_put(hardif_neigh);
-	}
+	batadv_hardif_neigh_put(neigh_node->hardif_neigh);
 
 	if (bao->bat_neigh_free)
 		bao->bat_neigh_free(neigh_node);
@@ -289,7 +282,7 @@
 }
 
 /**
- * batadv_orig_node_get_router - router to the originator depending on iface
+ * batadv_orig_router_get - router to the originator depending on iface
  * @orig_node: the orig node for the router
  * @if_outgoing: the interface where the payload packet has been received or
  *  the OGM should be sent to
@@ -381,12 +374,8 @@
 	if (!orig_ifinfo)
 		goto out;
 
-	if (if_outgoing != BATADV_IF_DEFAULT &&
-	    !kref_get_unless_zero(&if_outgoing->refcount)) {
-		kfree(orig_ifinfo);
-		orig_ifinfo = NULL;
-		goto out;
-	}
+	if (if_outgoing != BATADV_IF_DEFAULT)
+		kref_get(&if_outgoing->refcount);
 
 	reset_time = jiffies - 1;
 	reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
@@ -462,11 +451,8 @@
 	if (!neigh_ifinfo)
 		goto out;
 
-	if (if_outgoing && !kref_get_unless_zero(&if_outgoing->refcount)) {
-		kfree(neigh_ifinfo);
-		neigh_ifinfo = NULL;
-		goto out;
-	}
+	if (if_outgoing)
+		kref_get(&if_outgoing->refcount);
 
 	INIT_HLIST_NODE(&neigh_ifinfo->list);
 	kref_init(&neigh_ifinfo->refcount);
@@ -539,15 +525,11 @@
 	if (hardif_neigh)
 		goto out;
 
-	if (!kref_get_unless_zero(&hard_iface->refcount))
-		goto out;
-
 	hardif_neigh = kzalloc(sizeof(*hardif_neigh), GFP_ATOMIC);
-	if (!hardif_neigh) {
-		batadv_hardif_put(hard_iface);
+	if (!hardif_neigh)
 		goto out;
-	}
 
+	kref_get(&hard_iface->refcount);
 	INIT_HLIST_NODE(&hardif_neigh->list);
 	ether_addr_copy(hardif_neigh->addr, neigh_addr);
 	hardif_neigh->if_incoming = hard_iface;
@@ -650,19 +632,19 @@
 	if (!neigh_node)
 		goto out;
 
-	if (!kref_get_unless_zero(&hard_iface->refcount)) {
-		kfree(neigh_node);
-		neigh_node = NULL;
-		goto out;
-	}
-
 	INIT_HLIST_NODE(&neigh_node->list);
 	INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
 	spin_lock_init(&neigh_node->ifinfo_lock);
 
+	kref_get(&hard_iface->refcount);
 	ether_addr_copy(neigh_node->addr, neigh_addr);
 	neigh_node->if_incoming = hard_iface;
 	neigh_node->orig_node = orig_node;
+	neigh_node->last_seen = jiffies;
+
+	/* increment unique neighbor refcount */
+	kref_get(&hardif_neigh->refcount);
+	neigh_node->hardif_neigh = hardif_neigh;
 
 	/* extra reference for return */
 	kref_init(&neigh_node->refcount);
@@ -672,9 +654,6 @@
 	hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
 	spin_unlock_bh(&orig_node->neigh_list_lock);
 
-	/* increment unique neighbor refcount */
-	kref_get(&hardif_neigh->refcount);
-
 	batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
 		   "Creating new neighbor %pM for orig_node %pM on interface %s\n",
 		   neigh_addr, orig_node->orig, hard_iface->net_dev->name);
@@ -1165,6 +1144,9 @@
 		if (hard_iface->soft_iface != bat_priv->soft_iface)
 			continue;
 
+		if (!kref_get_unless_zero(&hard_iface->refcount))
+			continue;
+
 		best_neigh_node = batadv_find_best_neighbor(bat_priv,
 							    orig_node,
 							    hard_iface);
@@ -1172,6 +1154,8 @@
 				    best_neigh_node);
 		if (best_neigh_node)
 			batadv_neigh_node_put(best_neigh_node);
+
+		batadv_hardif_put(hard_iface);
 	}
 	rcu_read_unlock();
 
@@ -1222,7 +1206,7 @@
 	struct delayed_work *delayed_work;
 	struct batadv_priv *bat_priv;
 
-	delayed_work = container_of(work, struct delayed_work, work);
+	delayed_work = to_delayed_work(work);
 	bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
 	_batadv_purge_orig(bat_priv);
 	queue_delayed_work(batadv_event_workqueue,
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 4e8b67f..64a8951 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -33,7 +33,7 @@
 
 struct seq_file;
 
-int batadv_compare_orig(const struct hlist_node *node, const void *data2);
+bool batadv_compare_orig(const struct hlist_node *node, const void *data2);
 int batadv_originator_init(struct batadv_priv *bat_priv);
 void batadv_originator_free(struct batadv_priv *bat_priv);
 void batadv_purge_orig_ref(struct batadv_priv *bat_priv);
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 8a8d7ca..372128d 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -175,6 +175,7 @@
 	BATADV_CLAIM_TYPE_UNCLAIM	= 0x01,
 	BATADV_CLAIM_TYPE_ANNOUNCE	= 0x02,
 	BATADV_CLAIM_TYPE_REQUEST	= 0x03,
+	BATADV_CLAIM_TYPE_LOOPDETECT	= 0x04,
 };
 
 /**
@@ -501,7 +502,7 @@
 #pragma pack()
 
 /**
- * struct batadv_unicast_tvlv - generic unicast packet with tvlv payload
+ * struct batadv_unicast_tvlv_packet - generic unicast packet with tvlv payload
  * @packet_type: batman-adv packet type, part of the general header
  * @version: batman-adv protocol version, part of the genereal header
  * @ttl: time to live for this packet, part of the genereal header
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 4dd646a..ae850f2 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -100,11 +100,20 @@
 	if (curr_router)
 		batadv_neigh_node_put(curr_router);
 
-	/* increase refcount of new best neighbor */
-	if (neigh_node && !kref_get_unless_zero(&neigh_node->refcount))
-		neigh_node = NULL;
-
 	spin_lock_bh(&orig_node->neigh_list_lock);
+	/* curr_router used earlier may not be the current orig_ifinfo->router
+	 * anymore because it was dereferenced outside of the neigh_list_lock
+	 * protected region. After the new best neighbor has replace the current
+	 * best neighbor the reference counter needs to decrease. Consequently,
+	 * the code needs to ensure the curr_router variable contains a pointer
+	 * to the replaced best neighbor.
+	 */
+	curr_router = rcu_dereference_protected(orig_ifinfo->router, true);
+
+	/* increase refcount of new best neighbor */
+	if (neigh_node)
+		kref_get(&neigh_node->refcount);
+
 	rcu_assign_pointer(orig_ifinfo->router, neigh_node);
 	spin_unlock_bh(&orig_node->neigh_list_lock);
 	batadv_orig_ifinfo_put(orig_ifinfo);
@@ -154,18 +163,18 @@
  *   doesn't change otherwise.
  *
  * Return:
- *  0 if the packet is to be accepted.
- *  1 if the packet is to be ignored.
+ *  false if the packet is to be accepted.
+ *  true if the packet is to be ignored.
  */
-int batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
-			    s32 seq_old_max_diff, unsigned long *last_reset,
-			    bool *protection_started)
+bool batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
+			     s32 seq_old_max_diff, unsigned long *last_reset,
+			     bool *protection_started)
 {
 	if (seq_num_diff <= -seq_old_max_diff ||
 	    seq_num_diff >= BATADV_EXPECTED_SEQNO_RANGE) {
 		if (!batadv_has_timed_out(*last_reset,
 					  BATADV_RESET_PROTECTION_MS))
-			return 1;
+			return true;
 
 		*last_reset = jiffies;
 		if (protection_started)
@@ -174,7 +183,7 @@
 			   "old packet received, start protection\n");
 	}
 
-	return 0;
+	return false;
 }
 
 bool batadv_check_management_packet(struct sk_buff *skb,
@@ -709,8 +718,9 @@
 	return ret;
 }
 
-static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
-				     struct sk_buff *skb, int hdr_len) {
+static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
+				      struct sk_buff *skb, int hdr_len)
+{
 	struct batadv_unicast_packet *unicast_packet;
 	struct batadv_hard_iface *primary_if;
 	struct batadv_orig_node *orig_node;
@@ -721,11 +731,11 @@
 
 	/* check if there is enough data before accessing it */
 	if (!pskb_may_pull(skb, hdr_len + ETH_HLEN))
-		return 0;
+		return false;
 
 	/* create a copy of the skb (in case of for re-routing) to modify it. */
 	if (skb_cow(skb, sizeof(*unicast_packet)) < 0)
-		return 0;
+		return false;
 
 	unicast_packet = (struct batadv_unicast_packet *)skb->data;
 	vid = batadv_get_vid(skb, hdr_len);
@@ -749,7 +759,7 @@
 		 * table. If not, let the packet go untouched anyway because
 		 * there is nothing the node can do
 		 */
-		return 1;
+		return true;
 	}
 
 	/* retrieve the TTVN known by this node for the packet destination. This
@@ -765,7 +775,7 @@
 		 * not be possible to deliver it
 		 */
 		if (!orig_node)
-			return 0;
+			return false;
 
 		curr_ttvn = (u8)atomic_read(&orig_node->last_ttvn);
 		batadv_orig_node_put(orig_node);
@@ -776,7 +786,7 @@
 	 */
 	is_old_ttvn = batadv_seq_before(unicast_packet->ttvn, curr_ttvn);
 	if (!is_old_ttvn)
-		return 1;
+		return true;
 
 	old_ttvn = unicast_packet->ttvn;
 	/* the packet was forged based on outdated network information. Its
@@ -789,7 +799,7 @@
 				       "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n",
 				       unicast_packet->dest, ethhdr->h_dest,
 				       old_ttvn, curr_ttvn);
-		return 1;
+		return true;
 	}
 
 	/* the packet has not been re-routed: either the destination is
@@ -797,14 +807,14 @@
 	 * it is possible to drop the packet
 	 */
 	if (!batadv_is_my_client(bat_priv, ethhdr->h_dest, vid))
-		return 0;
+		return false;
 
 	/* update the header in order to let the packet be delivered to this
 	 * node's soft interface
 	 */
 	primary_if = batadv_primary_if_get_selected(bat_priv);
 	if (!primary_if)
-		return 0;
+		return false;
 
 	ether_addr_copy(unicast_packet->dest, primary_if->net_dev->dev_addr);
 
@@ -812,7 +822,7 @@
 
 	unicast_packet->ttvn = curr_ttvn;
 
-	return 1;
+	return true;
 }
 
 /**
@@ -903,7 +913,7 @@
 							hdr_size))
 			goto rx_success;
 
-		batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size,
+		batadv_interface_rx(recv_if->soft_iface, skb, hdr_size,
 				    orig_node);
 
 rx_success:
@@ -1113,8 +1123,7 @@
 		goto rx_success;
 
 	/* broadcast for me */
-	batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size,
-			    orig_node);
+	batadv_interface_rx(recv_if->soft_iface, skb, hdr_size, orig_node);
 
 rx_success:
 	ret = NET_RX_SUCCESS;
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index 02a5caa..05c3ff4 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -51,8 +51,8 @@
 batadv_find_router(struct batadv_priv *bat_priv,
 		   struct batadv_orig_node *orig_node,
 		   struct batadv_hard_iface *recv_if);
-int batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
-			    s32 seq_old_max_diff, unsigned long *last_reset,
-			    bool *protection_started);
+bool batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
+			     s32 seq_old_max_diff, unsigned long *last_reset,
+			     bool *protection_started);
 
 #endif /* _NET_BATMAN_ADV_ROUTING_H_ */
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 3ce06e0..f2f1256 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -26,6 +26,7 @@
 #include <linux/if.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
+#include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/printk.h>
@@ -552,7 +553,7 @@
 	struct net_device *soft_iface;
 	struct batadv_priv *bat_priv;
 
-	delayed_work = container_of(work, struct delayed_work, work);
+	delayed_work = to_delayed_work(work);
 	forw_packet = container_of(delayed_work, struct batadv_forw_packet,
 				   delayed_work);
 	soft_iface = forw_packet->if_incoming->soft_iface;
@@ -577,10 +578,15 @@
 		if (forw_packet->num_packets >= hard_iface->num_bcasts)
 			continue;
 
+		if (!kref_get_unless_zero(&hard_iface->refcount))
+			continue;
+
 		/* send a copy of the saved skb */
 		skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
 		if (skb1)
 			batadv_send_broadcast_skb(skb1, hard_iface);
+
+		batadv_hardif_put(hard_iface);
 	}
 	rcu_read_unlock();
 
@@ -604,7 +610,7 @@
 	struct batadv_forw_packet *forw_packet;
 	struct batadv_priv *bat_priv;
 
-	delayed_work = container_of(work, struct delayed_work, work);
+	delayed_work = to_delayed_work(work);
 	forw_packet = container_of(delayed_work, struct batadv_forw_packet,
 				   delayed_work);
 	bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
@@ -675,6 +681,9 @@
 
 		if (pending) {
 			hlist_del(&forw_packet->list);
+			if (!forw_packet->own)
+				atomic_inc(&bat_priv->bcast_queue_left);
+
 			batadv_forw_packet_free(forw_packet);
 		}
 	}
@@ -702,6 +711,9 @@
 
 		if (pending) {
 			hlist_del(&forw_packet->list);
+			if (!forw_packet->own)
+				atomic_inc(&bat_priv->batman_queue_left);
+
 			batadv_forw_packet_free(forw_packet);
 		}
 	}
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 0710379..343d2c9 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -186,7 +186,6 @@
 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
 	struct batadv_hard_iface *primary_if = NULL;
 	struct batadv_bcast_packet *bcast_packet;
-	__be16 ethertype = htons(ETH_P_BATMAN);
 	static const u8 stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00,
 					      0x00, 0x00};
 	static const u8 ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00,
@@ -208,7 +207,7 @@
 	if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
 		goto dropped;
 
-	soft_iface->trans_start = jiffies;
+	netif_trans_update(soft_iface);
 	vid = batadv_get_vid(skb, 0);
 	ethhdr = eth_hdr(skb);
 
@@ -216,7 +215,8 @@
 	case ETH_P_8021Q:
 		vhdr = vlan_eth_hdr(skb);
 
-		if (vhdr->h_vlan_encapsulated_proto != ethertype) {
+		/* drop batman-in-batman packets to prevent loops */
+		if (vhdr->h_vlan_encapsulated_proto != htons(ETH_P_BATMAN)) {
 			network_offset += VLAN_HLEN;
 			break;
 		}
@@ -381,13 +381,29 @@
 	return NETDEV_TX_OK;
 }
 
+/**
+ * batadv_interface_rx - receive ethernet frame on local batman-adv interface
+ * @soft_iface: local interface which will receive the ethernet frame
+ * @skb: ethernet frame for @soft_iface
+ * @hdr_size: size of already parsed batman-adv header
+ * @orig_node: originator from which the batman-adv packet was sent
+ *
+ * Sends a ethernet frame to the receive path of the local @soft_iface.
+ * skb->data has still point to the batman-adv header with the size @hdr_size.
+ * The caller has to have parsed this header already and made sure that at least
+ * @hdr_size bytes are still available for pull in @skb.
+ *
+ * The packet may still get dropped. This can happen when the encapsulated
+ * ethernet frame is invalid or contains again an batman-adv packet. Also
+ * unicast packets will be dropped directly when it was sent between two
+ * isolated clients.
+ */
 void batadv_interface_rx(struct net_device *soft_iface,
-			 struct sk_buff *skb, struct batadv_hard_iface *recv_if,
-			 int hdr_size, struct batadv_orig_node *orig_node)
+			 struct sk_buff *skb, int hdr_size,
+			 struct batadv_orig_node *orig_node)
 {
 	struct batadv_bcast_packet *batadv_bcast_packet;
 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
-	__be16 ethertype = htons(ETH_P_BATMAN);
 	struct vlan_ethhdr *vhdr;
 	struct ethhdr *ethhdr;
 	unsigned short vid;
@@ -396,10 +412,6 @@
 	batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data;
 	is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST);
 
-	/* check if enough space is available for pulling, and pull */
-	if (!pskb_may_pull(skb, hdr_size))
-		goto dropped;
-
 	skb_pull_rcsum(skb, hdr_size);
 	skb_reset_mac_header(skb);
 
@@ -408,14 +420,21 @@
 	 */
 	nf_reset(skb);
 
+	if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
+		goto dropped;
+
 	vid = batadv_get_vid(skb, 0);
 	ethhdr = eth_hdr(skb);
 
 	switch (ntohs(ethhdr->h_proto)) {
 	case ETH_P_8021Q:
+		if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
+			goto dropped;
+
 		vhdr = (struct vlan_ethhdr *)skb->data;
 
-		if (vhdr->h_vlan_encapsulated_proto != ethertype)
+		/* drop batman-in-batman packets to prevent loops */
+		if (vhdr->h_vlan_encapsulated_proto != htons(ETH_P_BATMAN))
 			break;
 
 		/* fall through */
@@ -424,8 +443,6 @@
 	}
 
 	/* skb->dev & skb->pkt_type are set here */
-	if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
-		goto dropped;
 	skb->protocol = eth_type_trans(skb, soft_iface);
 
 	/* should not be necessary anymore as we use skb_pull_rcsum()
@@ -539,7 +556,7 @@
 }
 
 /**
- * batadv_create_vlan - allocate the needed resources for a new vlan
+ * batadv_softif_create_vlan - allocate the needed resources for a new vlan
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the VLAN identifier
  *
@@ -868,13 +885,14 @@
 				   struct net_device *slave_dev)
 {
 	struct batadv_hard_iface *hard_iface;
+	struct net *net = dev_net(dev);
 	int ret = -EINVAL;
 
 	hard_iface = batadv_hardif_get_by_netdev(slave_dev);
 	if (!hard_iface || hard_iface->soft_iface)
 		goto out;
 
-	ret = batadv_hardif_enable_interface(hard_iface, dev->name);
+	ret = batadv_hardif_enable_interface(hard_iface, net, dev->name);
 
 out:
 	if (hard_iface)
@@ -955,7 +973,7 @@
 
 	dev->netdev_ops = &batadv_netdev_ops;
 	dev->destructor = batadv_softif_free;
-	dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+	dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL;
 	dev->priv_flags |= IFF_NO_QUEUE;
 
 	/* can't call min_mtu, because the needed variables
@@ -971,7 +989,7 @@
 	memset(priv, 0, sizeof(*priv));
 }
 
-struct net_device *batadv_softif_create(const char *name)
+struct net_device *batadv_softif_create(struct net *net, const char *name)
 {
 	struct net_device *soft_iface;
 	int ret;
@@ -981,6 +999,8 @@
 	if (!soft_iface)
 		return NULL;
 
+	dev_net_set(soft_iface, net);
+
 	soft_iface->rtnl_link_ops = &batadv_link_ops;
 
 	ret = register_netdevice(soft_iface);
@@ -1025,12 +1045,12 @@
 	unregister_netdevice_queue(soft_iface, head);
 }
 
-int batadv_softif_is_valid(const struct net_device *net_dev)
+bool batadv_softif_is_valid(const struct net_device *net_dev)
 {
 	if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx)
-		return 1;
+		return true;
 
-	return 0;
+	return false;
 }
 
 struct rtnl_link_ops batadv_link_ops __read_mostly = {
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 9ae2657..ec303dd 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -20,18 +20,20 @@
 
 #include "main.h"
 
+#include <linux/types.h>
 #include <net/rtnetlink.h>
 
 struct net_device;
+struct net;
 struct sk_buff;
 
 int batadv_skb_head_push(struct sk_buff *skb, unsigned int len);
 void batadv_interface_rx(struct net_device *soft_iface,
-			 struct sk_buff *skb, struct batadv_hard_iface *recv_if,
-			 int hdr_size, struct batadv_orig_node *orig_node);
-struct net_device *batadv_softif_create(const char *name);
+			 struct sk_buff *skb, int hdr_size,
+			 struct batadv_orig_node *orig_node);
+struct net_device *batadv_softif_create(struct net *net, const char *name);
 void batadv_softif_destroy_sysfs(struct net_device *soft_iface);
-int batadv_softif_is_valid(const struct net_device *net_dev);
+bool batadv_softif_is_valid(const struct net_device *net_dev);
 extern struct rtnl_link_ops batadv_link_ops;
 int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid);
 void batadv_softif_vlan_put(struct batadv_softif_vlan *softif_vlan);
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index e7cf513..414b207 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -116,11 +116,13 @@
 static char *batadv_uev_action_str[] = {
 	"add",
 	"del",
-	"change"
+	"change",
+	"loopdetect",
 };
 
 static char *batadv_uev_type_str[] = {
-	"gw"
+	"gw",
+	"bla",
 };
 
 /* Use this, if you have customized show and store functions for vlan attrs */
@@ -830,6 +832,7 @@
 				       size_t count)
 {
 	struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
+	struct net *net = dev_net(net_dev);
 	struct batadv_hard_iface *hard_iface;
 	int status_tmp = -1;
 	int ret = count;
@@ -873,7 +876,7 @@
 		batadv_hardif_disable_interface(hard_iface,
 						BATADV_IF_CLEANUP_AUTO);
 
-	ret = batadv_hardif_enable_interface(hard_iface, buff);
+	ret = batadv_hardif_enable_interface(hard_iface, net, buff);
 
 unlock:
 	rtnl_unlock();
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 0b43e86..feaf492b 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -43,7 +43,6 @@
 #include <linux/stddef.h>
 #include <linux/string.h>
 #include <linux/workqueue.h>
-#include <net/net_namespace.h>
 
 #include "bridge_loop_avoidance.h"
 #include "hard-interface.h"
@@ -76,9 +75,9 @@
  *
  * Compare the MAC address and the VLAN ID of the two TT entries and check if
  * they are the same TT client.
- * Return: 1 if the two TT clients are the same, 0 otherwise
+ * Return: true if the two TT clients are the same, false otherwise
  */
-static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
+static bool batadv_compare_tt(const struct hlist_node *node, const void *data2)
 {
 	const void *data1 = container_of(node, struct batadv_tt_common_entry,
 					 hash_entry);
@@ -215,6 +214,8 @@
 	tt_local_entry = container_of(ref, struct batadv_tt_local_entry,
 				      common.refcount);
 
+	batadv_softif_vlan_put(tt_local_entry->vlan);
+
 	kfree_rcu(tt_local_entry, common.rcu);
 }
 
@@ -583,6 +584,7 @@
 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
 	struct batadv_tt_local_entry *tt_local;
 	struct batadv_tt_global_entry *tt_global = NULL;
+	struct net *net = dev_net(soft_iface);
 	struct batadv_softif_vlan *vlan;
 	struct net_device *in_dev = NULL;
 	struct hlist_head *head;
@@ -594,7 +596,7 @@
 	u32 match_mark;
 
 	if (ifindex != BATADV_NULL_IFINDEX)
-		in_dev = dev_get_by_index(&init_net, ifindex);
+		in_dev = dev_get_by_index(net, ifindex);
 
 	tt_local = batadv_tt_local_hash_find(bat_priv, addr, vid);
 
@@ -673,6 +675,7 @@
 	kref_get(&tt_local->common.refcount);
 	tt_local->last_seen = jiffies;
 	tt_local->common.added_at = tt_local->last_seen;
+	tt_local->vlan = vlan;
 
 	/* the batman interface mac and multicast addresses should never be
 	 * purged
@@ -991,7 +994,6 @@
 	struct batadv_tt_common_entry *tt_common_entry;
 	struct batadv_tt_local_entry *tt_local;
 	struct batadv_hard_iface *primary_if;
-	struct batadv_softif_vlan *vlan;
 	struct hlist_head *head;
 	unsigned short vid;
 	u32 i;
@@ -1008,8 +1010,8 @@
 	seq_printf(seq,
 		   "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
 		   net_dev->name, (u8)atomic_read(&bat_priv->tt.vn));
-	seq_printf(seq, "       %-13s  %s %-8s %-9s (%-10s)\n", "Client", "VID",
-		   "Flags", "Last seen", "CRC");
+	seq_puts(seq,
+		 "       Client         VID Flags    Last seen (CRC       )\n");
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
@@ -1027,14 +1029,6 @@
 			last_seen_msecs = last_seen_msecs % 1000;
 
 			no_purge = tt_common_entry->flags & np_flag;
-
-			vlan = batadv_softif_vlan_get(bat_priv, vid);
-			if (!vlan) {
-				seq_printf(seq, "Cannot retrieve VLAN %d\n",
-					   BATADV_PRINT_VID(vid));
-				continue;
-			}
-
 			seq_printf(seq,
 				   " * %pM %4i [%c%c%c%c%c%c] %3u.%03u   (%#.8x)\n",
 				   tt_common_entry->addr,
@@ -1052,9 +1046,7 @@
 				     BATADV_TT_CLIENT_ISOLA) ? 'I' : '.'),
 				   no_purge ? 0 : last_seen_secs,
 				   no_purge ? 0 : last_seen_msecs,
-				   vlan->tt.crc);
-
-			batadv_softif_vlan_put(vlan);
+				   tt_local->vlan->tt.crc);
 		}
 		rcu_read_unlock();
 	}
@@ -1099,7 +1091,6 @@
 {
 	struct batadv_tt_local_entry *tt_local_entry;
 	u16 flags, curr_flags = BATADV_NO_FLAGS;
-	struct batadv_softif_vlan *vlan;
 	void *tt_entry_exists;
 
 	tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
@@ -1139,14 +1130,6 @@
 	/* extra call to free the local tt entry */
 	batadv_tt_local_entry_put(tt_local_entry);
 
-	/* decrease the reference held for this vlan */
-	vlan = batadv_softif_vlan_get(bat_priv, vid);
-	if (!vlan)
-		goto out;
-
-	batadv_softif_vlan_put(vlan);
-	batadv_softif_vlan_put(vlan);
-
 out:
 	if (tt_local_entry)
 		batadv_tt_local_entry_put(tt_local_entry);
@@ -1219,7 +1202,6 @@
 	spinlock_t *list_lock; /* protects write access to the hash lists */
 	struct batadv_tt_common_entry *tt_common_entry;
 	struct batadv_tt_local_entry *tt_local;
-	struct batadv_softif_vlan *vlan;
 	struct hlist_node *node_tmp;
 	struct hlist_head *head;
 	u32 i;
@@ -1241,14 +1223,6 @@
 						struct batadv_tt_local_entry,
 						common);
 
-			/* decrease the reference held for this vlan */
-			vlan = batadv_softif_vlan_get(bat_priv,
-						      tt_common_entry->vid);
-			if (vlan) {
-				batadv_softif_vlan_put(vlan);
-				batadv_softif_vlan_put(vlan);
-			}
-
 			batadv_tt_local_entry_put(tt_local);
 		}
 		spin_unlock_bh(list_lock);
@@ -1706,9 +1680,8 @@
 	seq_printf(seq,
 		   "Globally announced TT entries received via the mesh %s\n",
 		   net_dev->name);
-	seq_printf(seq, "       %-13s  %s  %s       %-15s %s (%-10s) %s\n",
-		   "Client", "VID", "(TTVN)", "Originator", "(Curr TTVN)",
-		   "CRC", "Flags");
+	seq_puts(seq,
+		 "       Client         VID  (TTVN)       Originator      (Curr TTVN) (CRC       ) Flags\n");
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
@@ -2388,19 +2361,19 @@
  * @entry_ptr: to be checked local tt entry
  * @data_ptr: not used but definition required to satisfy the callback prototype
  *
- * Return: 1 if the entry is a valid, 0 otherwise.
+ * Return: true if the entry is a valid, false otherwise.
  */
-static int batadv_tt_local_valid(const void *entry_ptr, const void *data_ptr)
+static bool batadv_tt_local_valid(const void *entry_ptr, const void *data_ptr)
 {
 	const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
 
 	if (tt_common_entry->flags & BATADV_TT_CLIENT_NEW)
-		return 0;
-	return 1;
+		return false;
+	return true;
 }
 
-static int batadv_tt_global_valid(const void *entry_ptr,
-				  const void *data_ptr)
+static bool batadv_tt_global_valid(const void *entry_ptr,
+				   const void *data_ptr)
 {
 	const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
 	const struct batadv_tt_global_entry *tt_global_entry;
@@ -2408,7 +2381,7 @@
 
 	if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM ||
 	    tt_common_entry->flags & BATADV_TT_CLIENT_TEMP)
-		return 0;
+		return false;
 
 	tt_global_entry = container_of(tt_common_entry,
 				       struct batadv_tt_global_entry,
@@ -2430,7 +2403,8 @@
 static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
 				    struct batadv_hashtable *hash,
 				    void *tvlv_buff, u16 tt_len,
-				    int (*valid_cb)(const void *, const void *),
+				    bool (*valid_cb)(const void *,
+						     const void *),
 				    void *cb_data)
 {
 	struct batadv_tt_common_entry *tt_common_entry;
@@ -2579,11 +2553,11 @@
  *
  * Return: true if the TT Request was sent, false otherwise
  */
-static int batadv_send_tt_request(struct batadv_priv *bat_priv,
-				  struct batadv_orig_node *dst_orig_node,
-				  u8 ttvn,
-				  struct batadv_tvlv_tt_vlan_data *tt_vlan,
-				  u16 num_vlan, bool full_table)
+static bool batadv_send_tt_request(struct batadv_priv *bat_priv,
+				   struct batadv_orig_node *dst_orig_node,
+				   u8 ttvn,
+				   struct batadv_tvlv_tt_vlan_data *tt_vlan,
+				   u16 num_vlan, bool full_table)
 {
 	struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
 	struct batadv_tt_req_node *tt_req_node = NULL;
@@ -3227,7 +3201,7 @@
 	struct batadv_priv_tt *priv_tt;
 	struct batadv_priv *bat_priv;
 
-	delayed_work = container_of(work, struct delayed_work, work);
+	delayed_work = to_delayed_work(work);
 	priv_tt = container_of(delayed_work, struct batadv_priv_tt, work);
 	bat_priv = container_of(priv_tt, struct batadv_priv, tt);
 
@@ -3309,7 +3283,6 @@
 	struct batadv_hashtable *hash = bat_priv->tt.local_hash;
 	struct batadv_tt_common_entry *tt_common;
 	struct batadv_tt_local_entry *tt_local;
-	struct batadv_softif_vlan *vlan;
 	struct hlist_node *node_tmp;
 	struct hlist_head *head;
 	spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -3339,13 +3312,6 @@
 						struct batadv_tt_local_entry,
 						common);
 
-			/* decrease the reference held for this vlan */
-			vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
-			if (vlan) {
-				batadv_softif_vlan_put(vlan);
-				batadv_softif_vlan_put(vlan);
-			}
-
 			batadv_tt_local_entry_put(tt_local);
 		}
 		spin_unlock_bh(list_lock);
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 9abfb3e..6a577f4 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -433,6 +433,7 @@
  * @ifinfo_lock: lock protecting private ifinfo members and list
  * @if_incoming: pointer to incoming hard-interface
  * @last_seen: when last packet via this neighbor was received
+ * @hardif_neigh: hardif_neigh of this neighbor
  * @refcount: number of contexts the object is used
  * @rcu: struct used for freeing in an RCU-safe manner
  */
@@ -444,6 +445,7 @@
 	spinlock_t ifinfo_lock;	/* protects ifinfo_list and its members */
 	struct batadv_hard_iface *if_incoming;
 	unsigned long last_seen;
+	struct batadv_hardif_neigh_node *hardif_neigh;
 	struct kref refcount;
 	struct rcu_head rcu;
 };
@@ -655,6 +657,9 @@
  * @num_requests: number of bla requests in flight
  * @claim_hash: hash table containing mesh nodes this host has claimed
  * @backbone_hash: hash table containing all detected backbone gateways
+ * @loopdetect_addr: MAC address used for own loopdetection frames
+ * @loopdetect_lasttime: time when the loopdetection frames were sent
+ * @loopdetect_next: how many periods to wait for the next loopdetect process
  * @bcast_duplist: recently received broadcast packets array (for broadcast
  *  duplicate suppression)
  * @bcast_duplist_curr: index of last broadcast packet added to bcast_duplist
@@ -666,6 +671,9 @@
 	atomic_t num_requests;
 	struct batadv_hashtable *claim_hash;
 	struct batadv_hashtable *backbone_hash;
+	u8 loopdetect_addr[ETH_ALEN];
+	unsigned long loopdetect_lasttime;
+	atomic_t loopdetect_next;
 	struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
 	int bcast_duplist_curr;
 	/* protects bcast_duplist & bcast_duplist_curr */
@@ -1010,6 +1018,7 @@
  *  resolved
  * @crc: crc16 checksum over all claims
  * @crc_lock: lock protecting crc
+ * @report_work: work struct for reporting detected loops
  * @refcount: number of contexts the object is used
  * @rcu: struct used for freeing in an RCU-safe manner
  */
@@ -1023,6 +1032,7 @@
 	atomic_t request_sent;
 	u16 crc;
 	spinlock_t crc_lock; /* protects crc */
+	struct work_struct report_work;
 	struct kref refcount;
 	struct rcu_head rcu;
 };
@@ -1073,10 +1083,12 @@
  * struct batadv_tt_local_entry - translation table local entry data
  * @common: general translation table data
  * @last_seen: timestamp used for purging stale tt local entries
+ * @vlan: soft-interface vlan of the entry
  */
 struct batadv_tt_local_entry {
 	struct batadv_tt_common_entry common;
 	unsigned long last_seen;
+	struct batadv_softif_vlan *vlan;
 };
 
 /**
@@ -1250,6 +1262,8 @@
  * struct batadv_algo_ops - mesh algorithm callbacks
  * @list: list node for the batadv_algo_list
  * @name: name of the algorithm
+ * @bat_iface_activate: start routing mechanisms when hard-interface is brought
+ *  up
  * @bat_iface_enable: init routing info when hard-interface is enabled
  * @bat_iface_disable: de-init routing info when hard-interface is disabled
  * @bat_iface_update_mac: (re-)init mac addresses of the protocol information
@@ -1277,6 +1291,7 @@
 struct batadv_algo_ops {
 	struct hlist_node list;
 	char *name;
+	void (*bat_iface_activate)(struct batadv_hard_iface *hard_iface);
 	int (*bat_iface_enable)(struct batadv_hard_iface *hard_iface);
 	void (*bat_iface_disable)(struct batadv_hard_iface *hard_iface);
 	void (*bat_iface_update_mac)(struct batadv_hard_iface *hard_iface);
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 8a4cc2f..780089d 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -68,7 +68,7 @@
 	struct in6_addr peer_addr;
 };
 
-struct lowpan_dev {
+struct lowpan_btle_dev {
 	struct list_head list;
 
 	struct hci_dev *hdev;
@@ -80,18 +80,21 @@
 	struct delayed_work notify_peers;
 };
 
-static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev)
+static inline struct lowpan_btle_dev *
+lowpan_btle_dev(const struct net_device *netdev)
 {
-	return (struct lowpan_dev *)lowpan_priv(netdev)->priv;
+	return (struct lowpan_btle_dev *)lowpan_dev(netdev)->priv;
 }
 
-static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer)
+static inline void peer_add(struct lowpan_btle_dev *dev,
+			    struct lowpan_peer *peer)
 {
 	list_add_rcu(&peer->list, &dev->peers);
 	atomic_inc(&dev->peer_count);
 }
 
-static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer)
+static inline bool peer_del(struct lowpan_btle_dev *dev,
+			    struct lowpan_peer *peer)
 {
 	list_del_rcu(&peer->list);
 	kfree_rcu(peer, rcu);
@@ -106,7 +109,7 @@
 	return false;
 }
 
-static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev,
+static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_btle_dev *dev,
 						 bdaddr_t *ba, __u8 type)
 {
 	struct lowpan_peer *peer;
@@ -134,8 +137,8 @@
 	return NULL;
 }
 
-static inline struct lowpan_peer *__peer_lookup_chan(struct lowpan_dev *dev,
-						     struct l2cap_chan *chan)
+static inline struct lowpan_peer *
+__peer_lookup_chan(struct lowpan_btle_dev *dev, struct l2cap_chan *chan)
 {
 	struct lowpan_peer *peer;
 
@@ -147,8 +150,8 @@
 	return NULL;
 }
 
-static inline struct lowpan_peer *__peer_lookup_conn(struct lowpan_dev *dev,
-						     struct l2cap_conn *conn)
+static inline struct lowpan_peer *
+__peer_lookup_conn(struct lowpan_btle_dev *dev, struct l2cap_conn *conn)
 {
 	struct lowpan_peer *peer;
 
@@ -160,7 +163,7 @@
 	return NULL;
 }
 
-static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev,
+static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev,
 						  struct in6_addr *daddr,
 						  struct sk_buff *skb)
 {
@@ -220,7 +223,7 @@
 
 static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
 {
-	struct lowpan_dev *entry;
+	struct lowpan_btle_dev *entry;
 	struct lowpan_peer *peer = NULL;
 
 	rcu_read_lock();
@@ -236,10 +239,10 @@
 	return peer;
 }
 
-static struct lowpan_dev *lookup_dev(struct l2cap_conn *conn)
+static struct lowpan_btle_dev *lookup_dev(struct l2cap_conn *conn)
 {
-	struct lowpan_dev *entry;
-	struct lowpan_dev *dev = NULL;
+	struct lowpan_btle_dev *entry;
+	struct lowpan_btle_dev *dev = NULL;
 
 	rcu_read_lock();
 
@@ -270,10 +273,10 @@
 			   struct l2cap_chan *chan)
 {
 	const u8 *saddr, *daddr;
-	struct lowpan_dev *dev;
+	struct lowpan_btle_dev *dev;
 	struct lowpan_peer *peer;
 
-	dev = lowpan_dev(netdev);
+	dev = lowpan_btle_dev(netdev);
 
 	rcu_read_lock();
 	peer = __peer_lookup_chan(dev, chan);
@@ -375,7 +378,7 @@
 /* Packet from BT LE device */
 static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
 {
-	struct lowpan_dev *dev;
+	struct lowpan_btle_dev *dev;
 	struct lowpan_peer *peer;
 	int err;
 
@@ -431,15 +434,18 @@
 			bdaddr_t *peer_addr, u8 *peer_addr_type)
 {
 	struct in6_addr ipv6_daddr;
-	struct lowpan_dev *dev;
+	struct ipv6hdr *hdr;
+	struct lowpan_btle_dev *dev;
 	struct lowpan_peer *peer;
 	bdaddr_t addr, *any = BDADDR_ANY;
 	u8 *daddr = any->b;
 	int err, status = 0;
 
-	dev = lowpan_dev(netdev);
+	hdr = ipv6_hdr(skb);
 
-	memcpy(&ipv6_daddr, &lowpan_cb(skb)->addr, sizeof(ipv6_daddr));
+	dev = lowpan_btle_dev(netdev);
+
+	memcpy(&ipv6_daddr, &hdr->daddr, sizeof(ipv6_daddr));
 
 	if (ipv6_addr_is_multicast(&ipv6_daddr)) {
 		lowpan_cb(skb)->chan = NULL;
@@ -489,15 +495,9 @@
 			 unsigned short type, const void *_daddr,
 			 const void *_saddr, unsigned int len)
 {
-	struct ipv6hdr *hdr;
-
 	if (type != ETH_P_IPV6)
 		return -EINVAL;
 
-	hdr = ipv6_hdr(skb);
-
-	memcpy(&lowpan_cb(skb)->addr, &hdr->daddr, sizeof(struct in6_addr));
-
 	return 0;
 }
 
@@ -543,19 +543,19 @@
 static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
 {
 	struct sk_buff *local_skb;
-	struct lowpan_dev *entry;
+	struct lowpan_btle_dev *entry;
 	int err = 0;
 
 	rcu_read_lock();
 
 	list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
 		struct lowpan_peer *pentry;
-		struct lowpan_dev *dev;
+		struct lowpan_btle_dev *dev;
 
 		if (entry->netdev != netdev)
 			continue;
 
-		dev = lowpan_dev(entry->netdev);
+		dev = lowpan_btle_dev(entry->netdev);
 
 		list_for_each_entry_rcu(pentry, &dev->peers, list) {
 			int ret;
@@ -723,8 +723,8 @@
 
 static void do_notify_peers(struct work_struct *work)
 {
-	struct lowpan_dev *dev = container_of(work, struct lowpan_dev,
-					      notify_peers.work);
+	struct lowpan_btle_dev *dev = container_of(work, struct lowpan_btle_dev,
+						   notify_peers.work);
 
 	netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */
 }
@@ -766,7 +766,7 @@
 }
 
 static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
-					struct lowpan_dev *dev)
+					struct lowpan_btle_dev *dev)
 {
 	struct lowpan_peer *peer;
 
@@ -803,12 +803,12 @@
 	return peer->chan;
 }
 
-static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
+static int setup_netdev(struct l2cap_chan *chan, struct lowpan_btle_dev **dev)
 {
 	struct net_device *netdev;
 	int err = 0;
 
-	netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev)),
+	netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_btle_dev)),
 			      IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN,
 			      netdev_setup);
 	if (!netdev)
@@ -820,7 +820,7 @@
 	SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev);
 	SET_NETDEV_DEVTYPE(netdev, &bt_type);
 
-	*dev = lowpan_dev(netdev);
+	*dev = lowpan_btle_dev(netdev);
 	(*dev)->netdev = netdev;
 	(*dev)->hdev = chan->conn->hcon->hdev;
 	INIT_LIST_HEAD(&(*dev)->peers);
@@ -853,7 +853,7 @@
 
 static inline void chan_ready_cb(struct l2cap_chan *chan)
 {
-	struct lowpan_dev *dev;
+	struct lowpan_btle_dev *dev;
 
 	dev = lookup_dev(chan->conn);
 
@@ -890,8 +890,9 @@
 
 static void delete_netdev(struct work_struct *work)
 {
-	struct lowpan_dev *entry = container_of(work, struct lowpan_dev,
-						delete_netdev);
+	struct lowpan_btle_dev *entry = container_of(work,
+						     struct lowpan_btle_dev,
+						     delete_netdev);
 
 	lowpan_unregister_netdev(entry->netdev);
 
@@ -900,8 +901,8 @@
 
 static void chan_close_cb(struct l2cap_chan *chan)
 {
-	struct lowpan_dev *entry;
-	struct lowpan_dev *dev = NULL;
+	struct lowpan_btle_dev *entry;
+	struct lowpan_btle_dev *dev = NULL;
 	struct lowpan_peer *peer;
 	int err = -ENOENT;
 	bool last = false, remove = true;
@@ -921,7 +922,7 @@
 	spin_lock(&devices_lock);
 
 	list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
-		dev = lowpan_dev(entry->netdev);
+		dev = lowpan_btle_dev(entry->netdev);
 		peer = __peer_lookup_chan(dev, chan);
 		if (peer) {
 			last = peer_del(dev, peer);
@@ -1131,7 +1132,7 @@
 
 static void disconnect_all_peers(void)
 {
-	struct lowpan_dev *entry;
+	struct lowpan_btle_dev *entry;
 	struct lowpan_peer *peer, *tmp_peer, *new_peer;
 	struct list_head peers;
 
@@ -1291,7 +1292,7 @@
 
 static int lowpan_control_show(struct seq_file *f, void *ptr)
 {
-	struct lowpan_dev *entry;
+	struct lowpan_btle_dev *entry;
 	struct lowpan_peer *peer;
 
 	spin_lock(&devices_lock);
@@ -1322,7 +1323,7 @@
 
 static void disconnect_devices(void)
 {
-	struct lowpan_dev *entry, *tmp, *new_dev;
+	struct lowpan_btle_dev *entry, *tmp, *new_dev;
 	struct list_head devices;
 
 	INIT_LIST_HEAD(&devices);
@@ -1360,7 +1361,7 @@
 			unsigned long event, void *ptr)
 {
 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
-	struct lowpan_dev *entry;
+	struct lowpan_btle_dev *entry;
 
 	if (netdev->type != ARPHRD_6LOWPAN)
 		return NOTIFY_DONE;
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 955eda9..3df7aef 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -65,7 +65,7 @@
 void bt_sock_reclassify_lock(struct sock *sk, int proto)
 {
 	BUG_ON(!sk);
-	BUG_ON(sock_owned_by_user(sk));
+	BUG_ON(!sock_allow_reclassification(sk));
 
 	sock_lock_init_class_and_name(sk,
 			bt_slock_key_strings[proto], &bt_slock_key[proto],
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 6ceb5d3..f4fcb4a 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -188,7 +188,7 @@
 	 * So we have to queue them and wake up session thread which is sleeping
 	 * on the sk_sleep(sk).
 	 */
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 	skb_queue_tail(&sk->sk_write_queue, skb);
 	wake_up_interruptible(sk_sleep(sk));
 
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 2713fc8..45a9fc6 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -3139,10 +3139,10 @@
 	list_del(&hdev->list);
 	write_unlock(&hci_dev_list_lock);
 
-	hci_dev_do_close(hdev);
-
 	cancel_work_sync(&hdev->power_on);
 
+	hci_dev_do_close(hdev);
+
 	if (!test_bit(HCI_INIT, &hdev->flags) &&
 	    !hci_dev_test_flag(hdev, HCI_SETUP) &&
 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index c162af5..d4b3dd5 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -4727,6 +4727,19 @@
 	u32 flags;
 	u8 *ptr, real_len;
 
+	switch (type) {
+	case LE_ADV_IND:
+	case LE_ADV_DIRECT_IND:
+	case LE_ADV_SCAN_IND:
+	case LE_ADV_NONCONN_IND:
+	case LE_ADV_SCAN_RSP:
+		break;
+	default:
+		BT_ERR_RATELIMITED("Unknown advetising packet type: 0x%02x",
+				   type);
+		return;
+	}
+
 	/* Find the end of the data in case the report contains padded zero
 	 * bytes at the end causing an invalid length value.
 	 *
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index 6e125d7..c045b3c 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -1065,6 +1065,9 @@
 	if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
 		flags |= LE_AD_LIMITED;
 
+	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
+		flags |= LE_AD_NO_BREDR;
+
 	if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
 		/* If a discovery flag wasn't provided, simply use the global
 		 * settings.
@@ -1072,9 +1075,6 @@
 		if (!flags)
 			flags |= mgmt_get_adv_discov_flags(hdev);
 
-		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
-			flags |= LE_AD_NO_BREDR;
-
 		/* If flags would still be empty, then there is no need to
 		 * include the "Flags" AD field".
 		 */
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index e4cae72..388ee8b 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -778,7 +778,7 @@
 		}
 
 		if (sec.level < BT_SECURITY_LOW ||
-		    sec.level > BT_SECURITY_HIGH) {
+		    sec.level > BT_SECURITY_FIPS) {
 			err = -EINVAL;
 			break;
 		}
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 263b4de..d99b200 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -21,18 +21,19 @@
 #include <asm/uaccess.h>
 #include "br_private.h"
 
-/* called with RTNL */
 static int get_bridge_ifindices(struct net *net, int *indices, int num)
 {
 	struct net_device *dev;
 	int i = 0;
 
-	for_each_netdev(net, dev) {
+	rcu_read_lock();
+	for_each_netdev_rcu(net, dev) {
 		if (i >= num)
 			break;
 		if (dev->priv_flags & IFF_EBRIDGE)
 			indices[i++] = dev->ifindex;
 	}
+	rcu_read_unlock();
 
 	return i;
 }
@@ -112,7 +113,9 @@
 static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
 	struct net_bridge *br = netdev_priv(dev);
+	struct net_bridge_port *p = NULL;
 	unsigned long args[4];
+	int ret = -EOPNOTSUPP;
 
 	if (copy_from_user(args, rq->ifr_data, sizeof(args)))
 		return -EFAULT;
@@ -182,25 +185,29 @@
 		if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 
-		return br_set_forward_delay(br, args[1]);
+		ret = br_set_forward_delay(br, args[1]);
+		break;
 
 	case BRCTL_SET_BRIDGE_HELLO_TIME:
 		if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 
-		return br_set_hello_time(br, args[1]);
+		ret = br_set_hello_time(br, args[1]);
+		break;
 
 	case BRCTL_SET_BRIDGE_MAX_AGE:
 		if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 
-		return br_set_max_age(br, args[1]);
+		ret = br_set_max_age(br, args[1]);
+		break;
 
 	case BRCTL_SET_AGEING_TIME:
 		if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 
-		return br_set_ageing_time(br, args[1]);
+		ret = br_set_ageing_time(br, args[1]);
+		break;
 
 	case BRCTL_GET_PORT_INFO:
 	{
@@ -240,20 +247,19 @@
 			return -EPERM;
 
 		br_stp_set_enabled(br, args[1]);
-		return 0;
+		ret = 0;
+		break;
 
 	case BRCTL_SET_BRIDGE_PRIORITY:
 		if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 
 		br_stp_set_bridge_priority(br, args[1]);
-		return 0;
+		ret = 0;
+		break;
 
 	case BRCTL_SET_PORT_PRIORITY:
 	{
-		struct net_bridge_port *p;
-		int ret;
-
 		if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 
@@ -263,14 +269,11 @@
 		else
 			ret = br_stp_set_port_priority(p, args[2]);
 		spin_unlock_bh(&br->lock);
-		return ret;
+		break;
 	}
 
 	case BRCTL_SET_PATH_COST:
 	{
-		struct net_bridge_port *p;
-		int ret;
-
 		if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 
@@ -280,8 +283,7 @@
 		else
 			ret = br_stp_set_path_cost(p, args[2]);
 		spin_unlock_bh(&br->lock);
-
-		return ret;
+		break;
 	}
 
 	case BRCTL_GET_FDB_ENTRIES:
@@ -289,7 +291,14 @@
 				       args[2], args[3]);
 	}
 
-	return -EOPNOTSUPP;
+	if (!ret) {
+		if (p)
+			br_ifinfo_notify(RTM_NEWLINK, p);
+		else
+			netdev_state_change(br->dev);
+	}
+
+	return ret;
 }
 
 static int old_deviceless(struct net *net, void __user *uarg)
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 253bc77..7dbc80d 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -61,6 +61,19 @@
 		e->flags |= MDB_FLAGS_OFFLOAD;
 }
 
+static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip)
+{
+	memset(ip, 0, sizeof(struct br_ip));
+	ip->vid = entry->vid;
+	ip->proto = entry->addr.proto;
+	if (ip->proto == htons(ETH_P_IP))
+		ip->u.ip4 = entry->addr.u.ip4;
+#if IS_ENABLED(CONFIG_IPV6)
+	else
+		ip->u.ip6 = entry->addr.u.ip6;
+#endif
+}
+
 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
 			    struct net_device *dev)
 {
@@ -243,9 +256,45 @@
 		+ nla_total_size(sizeof(struct br_mdb_entry));
 }
 
-static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry,
-			    int type, struct net_bridge_port_group *pg)
+struct br_mdb_complete_info {
+	struct net_bridge_port *port;
+	struct br_ip ip;
+};
+
+static void br_mdb_complete(struct net_device *dev, int err, void *priv)
 {
+	struct br_mdb_complete_info *data = priv;
+	struct net_bridge_port_group __rcu **pp;
+	struct net_bridge_port_group *p;
+	struct net_bridge_mdb_htable *mdb;
+	struct net_bridge_mdb_entry *mp;
+	struct net_bridge_port *port = data->port;
+	struct net_bridge *br = port->br;
+
+	if (err)
+		goto err;
+
+	spin_lock_bh(&br->multicast_lock);
+	mdb = mlock_dereference(br->mdb, br);
+	mp = br_mdb_ip_get(mdb, &data->ip);
+	if (!mp)
+		goto out;
+	for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
+	     pp = &p->next) {
+		if (p->port != port)
+			continue;
+		p->flags |= MDB_PG_FLAGS_OFFLOAD;
+	}
+out:
+	spin_unlock_bh(&br->multicast_lock);
+err:
+	kfree(priv);
+}
+
+static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p,
+			    struct br_mdb_entry *entry, int type)
+{
+	struct br_mdb_complete_info *complete_info;
 	struct switchdev_obj_port_mdb mdb = {
 		.obj = {
 			.id = SWITCHDEV_OBJ_ID_PORT_MDB,
@@ -268,9 +317,14 @@
 
 	mdb.obj.orig_dev = port_dev;
 	if (port_dev && type == RTM_NEWMDB) {
-		err = switchdev_port_obj_add(port_dev, &mdb.obj);
-		if (!err && pg)
-			pg->flags |= MDB_PG_FLAGS_OFFLOAD;
+		complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
+		if (complete_info) {
+			complete_info->port = p;
+			__mdb_entry_to_br_ip(entry, &complete_info->ip);
+			mdb.obj.complete_priv = complete_info;
+			mdb.obj.complete = br_mdb_complete;
+			switchdev_port_obj_add(port_dev, &mdb.obj);
+		}
 	} else if (port_dev && type == RTM_DELMDB) {
 		switchdev_port_obj_del(port_dev, &mdb.obj);
 	}
@@ -291,21 +345,21 @@
 	rtnl_set_sk_err(net, RTNLGRP_MDB, err);
 }
 
-void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg,
-		   int type)
+void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
+		   struct br_ip *group, int type, u8 flags)
 {
 	struct br_mdb_entry entry;
 
 	memset(&entry, 0, sizeof(entry));
-	entry.ifindex = pg->port->dev->ifindex;
-	entry.addr.proto = pg->addr.proto;
-	entry.addr.u.ip4 = pg->addr.u.ip4;
+	entry.ifindex = port->dev->ifindex;
+	entry.addr.proto = group->proto;
+	entry.addr.u.ip4 = group->u.ip4;
 #if IS_ENABLED(CONFIG_IPV6)
-	entry.addr.u.ip6 = pg->addr.u.ip6;
+	entry.addr.u.ip6 = group->u.ip6;
 #endif
-	entry.vid = pg->addr.vid;
-	__mdb_entry_fill_flags(&entry, pg->flags);
-	__br_mdb_notify(dev, &entry, type, pg);
+	entry.vid = group->vid;
+	__mdb_entry_fill_flags(&entry, flags);
+	__br_mdb_notify(dev, port, &entry, type);
 }
 
 static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
@@ -450,8 +504,7 @@
 }
 
 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
-			    struct br_ip *group, unsigned char state,
-			    struct net_bridge_port_group **pg)
+			    struct br_ip *group, unsigned char state)
 {
 	struct net_bridge_mdb_entry *mp;
 	struct net_bridge_port_group *p;
@@ -482,7 +535,6 @@
 	if (unlikely(!p))
 		return -ENOMEM;
 	rcu_assign_pointer(*pp, p);
-	*pg = p;
 	if (state == MDB_TEMPORARY)
 		mod_timer(&p->timer, now + br->multicast_membership_interval);
 
@@ -490,8 +542,7 @@
 }
 
 static int __br_mdb_add(struct net *net, struct net_bridge *br,
-			struct br_mdb_entry *entry,
-			struct net_bridge_port_group **pg)
+			struct br_mdb_entry *entry)
 {
 	struct br_ip ip;
 	struct net_device *dev;
@@ -509,18 +560,10 @@
 	if (!p || p->br != br || p->state == BR_STATE_DISABLED)
 		return -EINVAL;
 
-	memset(&ip, 0, sizeof(ip));
-	ip.vid = entry->vid;
-	ip.proto = entry->addr.proto;
-	if (ip.proto == htons(ETH_P_IP))
-		ip.u.ip4 = entry->addr.u.ip4;
-#if IS_ENABLED(CONFIG_IPV6)
-	else
-		ip.u.ip6 = entry->addr.u.ip6;
-#endif
+	__mdb_entry_to_br_ip(entry, &ip);
 
 	spin_lock_bh(&br->multicast_lock);
-	ret = br_mdb_add_group(br, p, &ip, entry->state, pg);
+	ret = br_mdb_add_group(br, p, &ip, entry->state);
 	spin_unlock_bh(&br->multicast_lock);
 	return ret;
 }
@@ -528,7 +571,6 @@
 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
 	struct net *net = sock_net(skb->sk);
-	struct net_bridge_port_group *pg;
 	struct net_bridge_vlan_group *vg;
 	struct net_device *dev, *pdev;
 	struct br_mdb_entry *entry;
@@ -558,15 +600,15 @@
 	if (br_vlan_enabled(br) && vg && entry->vid == 0) {
 		list_for_each_entry(v, &vg->vlan_list, vlist) {
 			entry->vid = v->vid;
-			err = __br_mdb_add(net, br, entry, &pg);
+			err = __br_mdb_add(net, br, entry);
 			if (err)
 				break;
-			__br_mdb_notify(dev, entry, RTM_NEWMDB, pg);
+			__br_mdb_notify(dev, p, entry, RTM_NEWMDB);
 		}
 	} else {
-		err = __br_mdb_add(net, br, entry, &pg);
+		err = __br_mdb_add(net, br, entry);
 		if (!err)
-			__br_mdb_notify(dev, entry, RTM_NEWMDB, pg);
+			__br_mdb_notify(dev, p, entry, RTM_NEWMDB);
 	}
 
 	return err;
@@ -584,15 +626,7 @@
 	if (!netif_running(br->dev) || br->multicast_disabled)
 		return -EINVAL;
 
-	memset(&ip, 0, sizeof(ip));
-	ip.vid = entry->vid;
-	ip.proto = entry->addr.proto;
-	if (ip.proto == htons(ETH_P_IP))
-		ip.u.ip4 = entry->addr.u.ip4;
-#if IS_ENABLED(CONFIG_IPV6)
-	else
-		ip.u.ip6 = entry->addr.u.ip6;
-#endif
+	__mdb_entry_to_br_ip(entry, &ip);
 
 	spin_lock_bh(&br->multicast_lock);
 	mdb = mlock_dereference(br->mdb, br);
@@ -662,12 +696,12 @@
 			entry->vid = v->vid;
 			err = __br_mdb_del(br, entry);
 			if (!err)
-				__br_mdb_notify(dev, entry, RTM_DELMDB, NULL);
+				__br_mdb_notify(dev, p, entry, RTM_DELMDB);
 		}
 	} else {
 		err = __br_mdb_del(br, entry);
 		if (!err)
-			__br_mdb_notify(dev, entry, RTM_DELMDB, NULL);
+			__br_mdb_notify(dev, p, entry, RTM_DELMDB);
 	}
 
 	return err;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index a4c15df..6852f3c 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -283,7 +283,8 @@
 		rcu_assign_pointer(*pp, p->next);
 		hlist_del_init(&p->mglist);
 		del_timer(&p->timer);
-		br_mdb_notify(br->dev, p, RTM_DELMDB);
+		br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
+			      p->flags);
 		call_rcu_bh(&p->rcu, br_multicast_free_pg);
 
 		if (!mp->ports && !mp->mglist &&
@@ -705,7 +706,7 @@
 	if (unlikely(!p))
 		goto err;
 	rcu_assign_pointer(*pp, p);
-	br_mdb_notify(br->dev, p, RTM_NEWMDB);
+	br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0);
 
 found:
 	mod_timer(&p->timer, now + br->multicast_membership_interval);
@@ -1278,6 +1279,7 @@
 	struct br_ip saddr;
 	unsigned long max_delay;
 	unsigned long now = jiffies;
+	unsigned int offset = skb_transport_offset(skb);
 	__be32 group;
 	int err = 0;
 
@@ -1288,14 +1290,14 @@
 
 	group = ih->group;
 
-	if (skb->len == sizeof(*ih)) {
+	if (skb->len == offset + sizeof(*ih)) {
 		max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
 
 		if (!max_delay) {
 			max_delay = 10 * HZ;
 			group = 0;
 		}
-	} else if (skb->len >= sizeof(*ih3)) {
+	} else if (skb->len >= offset + sizeof(*ih3)) {
 		ih3 = igmpv3_query_hdr(skb);
 		if (ih3->nsrcs)
 			goto out;
@@ -1356,6 +1358,7 @@
 	struct br_ip saddr;
 	unsigned long max_delay;
 	unsigned long now = jiffies;
+	unsigned int offset = skb_transport_offset(skb);
 	const struct in6_addr *group = NULL;
 	bool is_general_query;
 	int err = 0;
@@ -1365,8 +1368,8 @@
 	    (port && port->state == BR_STATE_DISABLED))
 		goto out;
 
-	if (skb->len == sizeof(*mld)) {
-		if (!pskb_may_pull(skb, sizeof(*mld))) {
+	if (skb->len == offset + sizeof(*mld)) {
+		if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
 			err = -EINVAL;
 			goto out;
 		}
@@ -1375,7 +1378,7 @@
 		if (max_delay)
 			group = &mld->mld_mca;
 	} else {
-		if (!pskb_may_pull(skb, sizeof(*mld2q))) {
+		if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
 			err = -EINVAL;
 			goto out;
 		}
@@ -1461,7 +1464,8 @@
 			hlist_del_init(&p->mglist);
 			del_timer(&p->timer);
 			call_rcu_bh(&p->rcu, br_multicast_free_pg);
-			br_mdb_notify(br->dev, p, RTM_DELMDB);
+			br_mdb_notify(br->dev, port, group, RTM_DELMDB,
+				      p->flags);
 
 			if (!mp->ports && !mp->mglist &&
 			    netif_running(br->dev))
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 44114a9..2d25979 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -217,13 +217,13 @@
 
 	len = ntohs(iph->tot_len);
 	if (skb->len < len) {
-		IP_INC_STATS_BH(net, IPSTATS_MIB_INTRUNCATEDPKTS);
+		__IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
 		goto drop;
 	} else if (len < (iph->ihl*4))
 		goto inhdr_error;
 
 	if (pskb_trim_rcsum(skb, len)) {
-		IP_INC_STATS_BH(net, IPSTATS_MIB_INDISCARDS);
+		__IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
 		goto drop;
 	}
 
@@ -236,7 +236,7 @@
 	return 0;
 
 inhdr_error:
-	IP_INC_STATS_BH(net, IPSTATS_MIB_INHDRERRORS);
+	__IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
 drop:
 	return -1;
 }
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index d61f56e..5e59a84 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -122,13 +122,13 @@
 
 	if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
 		if (pkt_len + ip6h_len > skb->len) {
-			IP6_INC_STATS_BH(net, idev,
-					 IPSTATS_MIB_INTRUNCATEDPKTS);
+			__IP6_INC_STATS(net, idev,
+					IPSTATS_MIB_INTRUNCATEDPKTS);
 			goto drop;
 		}
 		if (pskb_trim_rcsum(skb, pkt_len + ip6h_len)) {
-			IP6_INC_STATS_BH(net, idev,
-					 IPSTATS_MIB_INDISCARDS);
+			__IP6_INC_STATS(net, idev,
+					IPSTATS_MIB_INDISCARDS);
 			goto drop;
 		}
 	}
@@ -142,7 +142,7 @@
 	return 0;
 
 inhdr_error:
-	IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS);
+	__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
 drop:
 	return -1;
 }
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index e9c635e..a5343c7 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -135,9 +135,9 @@
 		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_NO */
 		+ nla_total_size(sizeof(u8))	/* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */
 		+ nla_total_size(sizeof(u8))	/* IFLA_BRPORT_CONFIG_PENDING */
-		+ nla_total_size(sizeof(u64))	/* IFLA_BRPORT_MESSAGE_AGE_TIMER */
-		+ nla_total_size(sizeof(u64))	/* IFLA_BRPORT_FORWARD_DELAY_TIMER */
-		+ nla_total_size(sizeof(u64))	/* IFLA_BRPORT_HOLD_TIMER */
+		+ nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */
+		+ nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */
+		+ nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
 		+ nla_total_size(sizeof(u8))	/* IFLA_BRPORT_MULTICAST_ROUTER */
 #endif
@@ -190,13 +190,16 @@
 		return -EMSGSIZE;
 
 	timerval = br_timer_value(&p->message_age_timer);
-	if (nla_put_u64(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval))
+	if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval,
+			      IFLA_BRPORT_PAD))
 		return -EMSGSIZE;
 	timerval = br_timer_value(&p->forward_delay_timer);
-	if (nla_put_u64(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval))
+	if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval,
+			      IFLA_BRPORT_PAD))
 		return -EMSGSIZE;
 	timerval = br_timer_value(&p->hold_timer);
-	if (nla_put_u64(skb, IFLA_BRPORT_HOLD_TIMER, timerval))
+	if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval,
+			      IFLA_BRPORT_PAD))
 		return -EMSGSIZE;
 
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
@@ -847,6 +850,7 @@
 	[IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 },
 	[IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 },
 	[IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 },
+	[IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 },
 };
 
 static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
@@ -918,6 +922,14 @@
 		if (err)
 			return err;
 	}
+
+	if (data[IFLA_BR_VLAN_STATS_ENABLED]) {
+		__u8 vlan_stats = nla_get_u8(data[IFLA_BR_VLAN_STATS_ENABLED]);
+
+		err = br_vlan_set_stats(br, vlan_stats);
+		if (err)
+			return err;
+	}
 #endif
 
 	if (data[IFLA_BR_GROUP_FWD_MASK]) {
@@ -1079,6 +1091,7 @@
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
 	       nla_total_size(sizeof(__be16)) +	/* IFLA_BR_VLAN_PROTOCOL */
 	       nla_total_size(sizeof(u16)) +    /* IFLA_BR_VLAN_DEFAULT_PVID */
+	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_VLAN_STATS_ENABLED */
 #endif
 	       nla_total_size(sizeof(u16)) +    /* IFLA_BR_GROUP_FWD_MASK */
 	       nla_total_size(sizeof(struct ifla_bridge_id)) +   /* IFLA_BR_ROOT_ID */
@@ -1087,10 +1100,10 @@
 	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_ROOT_PATH_COST */
 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_TOPOLOGY_CHANGE */
 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */
-	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_HELLO_TIMER */
-	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_TCN_TIMER */
-	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */
-	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_GC_TIMER */
+	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */
+	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */
+	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */
+	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */
 	       nla_total_size(ETH_ALEN) +       /* IFLA_BR_GROUP_ADDR */
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_ROUTER */
@@ -1101,12 +1114,12 @@
 	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_HASH_MAX */
 	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_LAST_MEMBER_CNT */
 	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */
-	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */
-	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */
-	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_MCAST_QUERIER_INTVL */
-	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_MCAST_QUERY_INTVL */
-	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */
-	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */
+	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */
+	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */
+	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */
+	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */
+	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */
+	       nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */
 #endif
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_NF_CALL_IPTABLES */
@@ -1129,16 +1142,17 @@
 	u64 clockval;
 
 	clockval = br_timer_value(&br->hello_timer);
-	if (nla_put_u64(skb, IFLA_BR_HELLO_TIMER, clockval))
+	if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD))
 		return -EMSGSIZE;
 	clockval = br_timer_value(&br->tcn_timer);
-	if (nla_put_u64(skb, IFLA_BR_TCN_TIMER, clockval))
+	if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD))
 		return -EMSGSIZE;
 	clockval = br_timer_value(&br->topology_change_timer);
-	if (nla_put_u64(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval))
+	if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval,
+			      IFLA_BR_PAD))
 		return -EMSGSIZE;
 	clockval = br_timer_value(&br->gc_timer);
-	if (nla_put_u64(skb, IFLA_BR_GC_TIMER, clockval))
+	if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD))
 		return -EMSGSIZE;
 
 	if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
@@ -1163,7 +1177,8 @@
 
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
 	if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) ||
-	    nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid))
+	    nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) ||
+	    nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED, br->vlan_stats_enabled))
 		return -EMSGSIZE;
 #endif
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
@@ -1182,22 +1197,28 @@
 		return -EMSGSIZE;
 
 	clockval = jiffies_to_clock_t(br->multicast_last_member_interval);
-	if (nla_put_u64(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval))
+	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval,
+			      IFLA_BR_PAD))
 		return -EMSGSIZE;
 	clockval = jiffies_to_clock_t(br->multicast_membership_interval);
-	if (nla_put_u64(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval))
+	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval,
+			      IFLA_BR_PAD))
 		return -EMSGSIZE;
 	clockval = jiffies_to_clock_t(br->multicast_querier_interval);
-	if (nla_put_u64(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval))
+	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval,
+			      IFLA_BR_PAD))
 		return -EMSGSIZE;
 	clockval = jiffies_to_clock_t(br->multicast_query_interval);
-	if (nla_put_u64(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval))
+	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval,
+			      IFLA_BR_PAD))
 		return -EMSGSIZE;
 	clockval = jiffies_to_clock_t(br->multicast_query_response_interval);
-	if (nla_put_u64(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval))
+	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval,
+			      IFLA_BR_PAD))
 		return -EMSGSIZE;
 	clockval = jiffies_to_clock_t(br->multicast_startup_query_interval);
-	if (nla_put_u64(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval))
+	if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval,
+			      IFLA_BR_PAD))
 		return -EMSGSIZE;
 #endif
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
@@ -1213,6 +1234,69 @@
 	return 0;
 }
 
+static size_t br_get_linkxstats_size(const struct net_device *dev)
+{
+	struct net_bridge *br = netdev_priv(dev);
+	struct net_bridge_vlan_group *vg;
+	struct net_bridge_vlan *v;
+	int numvls = 0;
+
+	vg = br_vlan_group(br);
+	if (!vg)
+		return 0;
+
+	/* we need to count all, even placeholder entries */
+	list_for_each_entry(v, &vg->vlan_list, vlist)
+		numvls++;
+
+	/* account for the vlans and the link xstats type nest attribute */
+	return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) +
+	       nla_total_size(0);
+}
+
+static int br_fill_linkxstats(struct sk_buff *skb, const struct net_device *dev,
+			      int *prividx)
+{
+	struct net_bridge *br = netdev_priv(dev);
+	struct net_bridge_vlan_group *vg;
+	struct net_bridge_vlan *v;
+	struct nlattr *nest;
+	int vl_idx = 0;
+
+	vg = br_vlan_group(br);
+	if (!vg)
+		goto out;
+	nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE);
+	if (!nest)
+		return -EMSGSIZE;
+	list_for_each_entry(v, &vg->vlan_list, vlist) {
+		struct bridge_vlan_xstats vxi;
+		struct br_vlan_stats stats;
+
+		if (vl_idx++ < *prividx)
+			continue;
+		memset(&vxi, 0, sizeof(vxi));
+		vxi.vid = v->vid;
+		br_vlan_get_stats(v, &stats);
+		vxi.rx_bytes = stats.rx_bytes;
+		vxi.rx_packets = stats.rx_packets;
+		vxi.tx_bytes = stats.tx_bytes;
+		vxi.tx_packets = stats.tx_packets;
+
+		if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi))
+			goto nla_put_failure;
+	}
+	nla_nest_end(skb, nest);
+	*prividx = 0;
+out:
+	return 0;
+
+nla_put_failure:
+	nla_nest_end(skb, nest);
+	*prividx = vl_idx;
+
+	return -EMSGSIZE;
+}
 
 static struct rtnl_af_ops br_af_ops __read_mostly = {
 	.family			= AF_BRIDGE,
@@ -1231,6 +1315,8 @@
 	.dellink		= br_dev_delete,
 	.get_size		= br_get_size,
 	.fill_info		= br_fill_info,
+	.fill_linkxstats	= br_fill_linkxstats,
+	.get_linkxstats_size	= br_get_linkxstats_size,
 
 	.slave_maxtype		= IFLA_BRPORT_MAX,
 	.slave_policy		= br_port_policy,
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 1b5d145..c7fb5d7 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -77,12 +77,21 @@
 };
 #endif
 
+struct br_vlan_stats {
+	u64 rx_bytes;
+	u64 rx_packets;
+	u64 tx_bytes;
+	u64 tx_packets;
+	struct u64_stats_sync syncp;
+};
+
 /**
  * struct net_bridge_vlan - per-vlan entry
  *
  * @vnode: rhashtable member
  * @vid: VLAN id
  * @flags: bridge vlan flags
+ * @stats: per-cpu VLAN statistics
  * @br: if MASTER flag set, this points to a bridge struct
  * @port: if MASTER flag unset, this points to a port struct
  * @refcnt: if MASTER flag set, this is bumped for each port referencing it
@@ -100,6 +109,7 @@
 	struct rhash_head		vnode;
 	u16				vid;
 	u16				flags;
+	struct br_vlan_stats __percpu	*stats;
 	union {
 		struct net_bridge	*br;
 		struct net_bridge_port	*port;
@@ -342,6 +352,7 @@
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
 	struct net_bridge_vlan_group	__rcu *vlgrp;
 	u8				vlan_enabled;
+	u8				vlan_stats_enabled;
 	__be16				vlan_proto;
 	u16				default_pvid;
 #endif
@@ -560,8 +571,8 @@
 			    unsigned char flags);
 void br_mdb_init(void);
 void br_mdb_uninit(void);
-void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg,
-		   int type);
+void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
+		   struct br_ip *group, int type, u8 flags);
 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
 		   int type);
 
@@ -691,6 +702,7 @@
 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
 int __br_vlan_set_proto(struct net_bridge *br, __be16 proto);
 int br_vlan_set_proto(struct net_bridge *br, unsigned long val);
+int br_vlan_set_stats(struct net_bridge *br, unsigned long val);
 int br_vlan_init(struct net_bridge *br);
 int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val);
 int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid);
@@ -699,6 +711,8 @@
 void nbp_vlan_flush(struct net_bridge_port *port);
 int nbp_vlan_init(struct net_bridge_port *port);
 int nbp_get_num_vlan_infos(struct net_bridge_port *p, u32 filter_mask);
+void br_vlan_get_stats(const struct net_bridge_vlan *v,
+		       struct br_vlan_stats *stats);
 
 static inline struct net_bridge_vlan_group *br_vlan_group(
 					const struct net_bridge *br)
@@ -881,6 +895,10 @@
 	return NULL;
 }
 
+static inline void br_vlan_get_stats(const struct net_bridge_vlan *v,
+				     struct br_vlan_stats *stats)
+{
+}
 #endif
 
 struct nf_br_ops {
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index e234490..9cb7044 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -582,7 +582,7 @@
 	int err;
 
 	err = switchdev_port_attr_set(br->dev, &attr);
-	if (err)
+	if (err && err != -EOPNOTSUPP)
 		return err;
 
 	br->ageing_time = t;
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 6b80914..beb4707 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -43,7 +43,14 @@
 	if (endp == buf)
 		return -EINVAL;
 
+	if (!rtnl_trylock())
+		return restart_syscall();
+
 	err = (*set)(br, val);
+	if (!err)
+		netdev_state_change(br->dev);
+	rtnl_unlock();
+
 	return err ? err : len;
 }
 
@@ -101,15 +108,7 @@
 
 static int set_ageing_time(struct net_bridge *br, unsigned long val)
 {
-	int ret;
-
-	if (!rtnl_trylock())
-		return restart_syscall();
-
-	ret = br_set_ageing_time(br, val);
-	rtnl_unlock();
-
-	return ret;
+	return br_set_ageing_time(br, val);
 }
 
 static ssize_t ageing_time_store(struct device *d,
@@ -128,27 +127,18 @@
 }
 
 
+static int set_stp_state(struct net_bridge *br, unsigned long val)
+{
+	br_stp_set_enabled(br, val);
+
+	return 0;
+}
+
 static ssize_t stp_state_store(struct device *d,
 			       struct device_attribute *attr, const char *buf,
 			       size_t len)
 {
-	struct net_bridge *br = to_bridge(d);
-	char *endp;
-	unsigned long val;
-
-	if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
-		return -EPERM;
-
-	val = simple_strtoul(buf, &endp, 0);
-	if (endp == buf)
-		return -EINVAL;
-
-	if (!rtnl_trylock())
-		return restart_syscall();
-	br_stp_set_enabled(br, val);
-	rtnl_unlock();
-
-	return len;
+	return store_bridge_parm(d, buf, len, set_stp_state);
 }
 static DEVICE_ATTR_RW(stp_state);
 
@@ -160,29 +150,22 @@
 	return sprintf(buf, "%#x\n", br->group_fwd_mask);
 }
 
+static int set_group_fwd_mask(struct net_bridge *br, unsigned long val)
+{
+	if (val & BR_GROUPFWD_RESTRICTED)
+		return -EINVAL;
+
+	br->group_fwd_mask = val;
+
+	return 0;
+}
 
 static ssize_t group_fwd_mask_store(struct device *d,
 				    struct device_attribute *attr,
 				    const char *buf,
 				    size_t len)
 {
-	struct net_bridge *br = to_bridge(d);
-	char *endp;
-	unsigned long val;
-
-	if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
-		return -EPERM;
-
-	val = simple_strtoul(buf, &endp, 0);
-	if (endp == buf)
-		return -EINVAL;
-
-	if (val & BR_GROUPFWD_RESTRICTED)
-		return -EINVAL;
-
-	br->group_fwd_mask = val;
-
-	return len;
+	return store_bridge_parm(d, buf, len, set_group_fwd_mask);
 }
 static DEVICE_ATTR_RW(group_fwd_mask);
 
@@ -328,6 +311,7 @@
 
 	br->group_addr_set = true;
 	br_recalculate_fwd_mask(br);
+	netdev_state_change(br->dev);
 
 	rtnl_unlock();
 
@@ -336,17 +320,17 @@
 
 static DEVICE_ATTR_RW(group_addr);
 
+static int set_flush(struct net_bridge *br, unsigned long val)
+{
+	br_fdb_flush(br);
+	return 0;
+}
+
 static ssize_t flush_store(struct device *d,
 			   struct device_attribute *attr,
 			   const char *buf, size_t len)
 {
-	struct net_bridge *br = to_bridge(d);
-
-	if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
-		return -EPERM;
-
-	br_fdb_flush(br);
-	return len;
+	return store_bridge_parm(d, buf, len, set_flush);
 }
 static DEVICE_ATTR_WO(flush);
 
@@ -747,6 +731,22 @@
 	return store_bridge_parm(d, buf, len, br_vlan_set_default_pvid);
 }
 static DEVICE_ATTR_RW(default_pvid);
+
+static ssize_t vlan_stats_enabled_show(struct device *d,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	struct net_bridge *br = to_bridge(d);
+	return sprintf(buf, "%u\n", br->vlan_stats_enabled);
+}
+
+static ssize_t vlan_stats_enabled_store(struct device *d,
+					struct device_attribute *attr,
+					const char *buf, size_t len)
+{
+	return store_bridge_parm(d, buf, len, br_vlan_set_stats);
+}
+static DEVICE_ATTR_RW(vlan_stats_enabled);
 #endif
 
 static struct attribute *bridge_attrs[] = {
@@ -794,6 +794,7 @@
 	&dev_attr_vlan_filtering.attr,
 	&dev_attr_vlan_protocol.attr,
 	&dev_attr_default_pvid.attr,
+	&dev_attr_vlan_stats_enabled.attr,
 #endif
 	NULL
 };
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index efe415a..1e04d4d 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -61,7 +61,6 @@
 	if (flags != p->flags) {
 		p->flags = flags;
 		br_port_flags_change(p, mask);
-		br_ifinfo_notify(RTM_NEWLINK, p);
 	}
 	return 0;
 }
@@ -253,8 +252,10 @@
 			spin_lock_bh(&p->br->lock);
 			ret = brport_attr->store(p, val);
 			spin_unlock_bh(&p->br->lock);
-			if (ret == 0)
+			if (!ret) {
+				br_ifinfo_notify(RTM_NEWLINK, p);
 				ret = count;
+			}
 		}
 		rtnl_unlock();
 	}
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 9309bb4..b6de4f4 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -162,6 +162,17 @@
 	return masterv;
 }
 
+static void br_master_vlan_rcu_free(struct rcu_head *rcu)
+{
+	struct net_bridge_vlan *v;
+
+	v = container_of(rcu, struct net_bridge_vlan, rcu);
+	WARN_ON(!br_vlan_is_master(v));
+	free_percpu(v->stats);
+	v->stats = NULL;
+	kfree(v);
+}
+
 static void br_vlan_put_master(struct net_bridge_vlan *masterv)
 {
 	struct net_bridge_vlan_group *vg;
@@ -174,7 +185,7 @@
 		rhashtable_remove_fast(&vg->vlan_hash,
 				       &masterv->vnode, br_vlan_rht_params);
 		__vlan_del_list(masterv);
-		kfree_rcu(masterv, rcu);
+		call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
 	}
 }
 
@@ -230,6 +241,7 @@
 		if (!masterv)
 			goto out_filt;
 		v->brvlan = masterv;
+		v->stats = masterv->stats;
 	}
 
 	/* Add the dev mac and count the vlan only if it's usable */
@@ -329,6 +341,7 @@
 			       struct net_bridge_vlan_group *vg,
 			       struct sk_buff *skb)
 {
+	struct br_vlan_stats *stats;
 	struct net_bridge_vlan *v;
 	u16 vid;
 
@@ -355,18 +368,27 @@
 			return NULL;
 		}
 	}
+	if (br->vlan_stats_enabled) {
+		stats = this_cpu_ptr(v->stats);
+		u64_stats_update_begin(&stats->syncp);
+		stats->tx_bytes += skb->len;
+		stats->tx_packets++;
+		u64_stats_update_end(&stats->syncp);
+	}
+
 	if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
 		skb->vlan_tci = 0;
-
 out:
 	return skb;
 }
 
 /* Called under RCU */
-static bool __allowed_ingress(struct net_bridge_vlan_group *vg, __be16 proto,
+static bool __allowed_ingress(const struct net_bridge *br,
+			      struct net_bridge_vlan_group *vg,
 			      struct sk_buff *skb, u16 *vid)
 {
-	const struct net_bridge_vlan *v;
+	struct br_vlan_stats *stats;
+	struct net_bridge_vlan *v;
 	bool tagged;
 
 	BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
@@ -375,7 +397,7 @@
 	 * HW accelerated vlan tag.
 	 */
 	if (unlikely(!skb_vlan_tag_present(skb) &&
-		     skb->protocol == proto)) {
+		     skb->protocol == br->vlan_proto)) {
 		skb = skb_vlan_untag(skb);
 		if (unlikely(!skb))
 			return false;
@@ -383,7 +405,7 @@
 
 	if (!br_vlan_get_tag(skb, vid)) {
 		/* Tagged frame */
-		if (skb->vlan_proto != proto) {
+		if (skb->vlan_proto != br->vlan_proto) {
 			/* Protocol-mismatch, empty out vlan_tci for new tag */
 			skb_push(skb, ETH_HLEN);
 			skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
@@ -419,7 +441,7 @@
 		*vid = pvid;
 		if (likely(!tagged))
 			/* Untagged Frame. */
-			__vlan_hwaccel_put_tag(skb, proto, pvid);
+			__vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
 		else
 			/* Priority-tagged Frame.
 			 * At this point, We know that skb->vlan_tci had
@@ -428,13 +450,24 @@
 			 */
 			skb->vlan_tci |= pvid;
 
-		return true;
+		/* if stats are disabled we can avoid the lookup */
+		if (!br->vlan_stats_enabled)
+			return true;
+	}
+	v = br_vlan_find(vg, *vid);
+	if (!v || !br_vlan_should_use(v))
+		goto drop;
+
+	if (br->vlan_stats_enabled) {
+		stats = this_cpu_ptr(v->stats);
+		u64_stats_update_begin(&stats->syncp);
+		stats->rx_bytes += skb->len;
+		stats->rx_packets++;
+		u64_stats_update_end(&stats->syncp);
 	}
 
-	/* Frame had a valid vlan tag.  See if vlan is allowed */
-	v = br_vlan_find(vg, *vid);
-	if (v && br_vlan_should_use(v))
-		return true;
+	return true;
+
 drop:
 	kfree_skb(skb);
 	return false;
@@ -452,7 +485,7 @@
 		return true;
 	}
 
-	return __allowed_ingress(vg, br->vlan_proto, skb, vid);
+	return __allowed_ingress(br, vg, skb, vid);
 }
 
 /* Called under RCU. */
@@ -542,6 +575,11 @@
 	if (!vlan)
 		return -ENOMEM;
 
+	vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
+	if (!vlan->stats) {
+		kfree(vlan);
+		return -ENOMEM;
+	}
 	vlan->vid = vid;
 	vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
 	vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
@@ -549,8 +587,10 @@
 	if (flags & BRIDGE_VLAN_INFO_BRENTRY)
 		atomic_set(&vlan->refcnt, 1);
 	ret = __vlan_add(vlan, flags);
-	if (ret)
+	if (ret) {
+		free_percpu(vlan->stats);
 		kfree(vlan);
+	}
 
 	return ret;
 }
@@ -651,15 +691,7 @@
 
 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
 {
-	int err;
-
-	if (!rtnl_trylock())
-		return restart_syscall();
-
-	err = __br_vlan_filter_toggle(br, val);
-	rtnl_unlock();
-
-	return err;
+	return __br_vlan_filter_toggle(br, val);
 }
 
 int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
@@ -713,18 +745,24 @@
 
 int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
 {
-	int err;
-
 	if (val != ETH_P_8021Q && val != ETH_P_8021AD)
 		return -EPROTONOSUPPORT;
 
-	if (!rtnl_trylock())
-		return restart_syscall();
+	return __br_vlan_set_proto(br, htons(val));
+}
 
-	err = __br_vlan_set_proto(br, htons(val));
-	rtnl_unlock();
+int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
+{
+	switch (val) {
+	case 0:
+	case 1:
+		br->vlan_stats_enabled = val;
+		break;
+	default:
+		return -EINVAL;
+	}
 
-	return err;
+	return 0;
 }
 
 static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
@@ -855,21 +893,17 @@
 	if (val >= VLAN_VID_MASK)
 		return -EINVAL;
 
-	if (!rtnl_trylock())
-		return restart_syscall();
-
 	if (pvid == br->default_pvid)
-		goto unlock;
+		goto out;
 
 	/* Only allow default pvid change when filtering is disabled */
 	if (br->vlan_enabled) {
 		pr_info_once("Please disable vlan filtering to change default_pvid\n");
 		err = -EPERM;
-		goto unlock;
+		goto out;
 	}
 	err = __br_vlan_set_default_pvid(br, pvid);
-unlock:
-	rtnl_unlock();
+out:
 	return err;
 }
 
@@ -1020,3 +1054,30 @@
 	synchronize_rcu();
 	__vlan_group_free(vg);
 }
+
+void br_vlan_get_stats(const struct net_bridge_vlan *v,
+		       struct br_vlan_stats *stats)
+{
+	int i;
+
+	memset(stats, 0, sizeof(*stats));
+	for_each_possible_cpu(i) {
+		u64 rxpackets, rxbytes, txpackets, txbytes;
+		struct br_vlan_stats *cpu_stats;
+		unsigned int start;
+
+		cpu_stats = per_cpu_ptr(v->stats, i);
+		do {
+			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+			rxpackets = cpu_stats->rx_packets;
+			rxbytes = cpu_stats->rx_bytes;
+			txbytes = cpu_stats->tx_bytes;
+			txpackets = cpu_stats->tx_packets;
+		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
+
+		stats->rx_packets += rxpackets;
+		stats->rx_bytes += rxbytes;
+		stats->tx_bytes += txbytes;
+		stats->tx_packets += txpackets;
+	}
+}
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 67b2e27..5a61f35 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -370,7 +370,11 @@
 	    left - sizeof(struct ebt_entry_match) < m->match_size)
 		return -EINVAL;
 
-	match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0);
+	match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
+	if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) {
+		request_module("ebt_%s", m->u.name);
+		match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
+	}
 	if (IS_ERR(match))
 		return PTR_ERR(match);
 	m->u.match = match;
@@ -1521,6 +1525,8 @@
 	if (copy_from_user(&tmp, user, sizeof(tmp)))
 		return -EFAULT;
 
+	tmp.name[sizeof(tmp.name) - 1] = '\0';
+
 	t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
 	if (!t)
 		return ret;
@@ -2332,6 +2338,8 @@
 	if (copy_from_user(&tmp, user, sizeof(tmp)))
 		return -EFAULT;
 
+	tmp.name[sizeof(tmp.name) - 1] = '\0';
+
 	t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
 	if (!t)
 		return ret;
diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c
index 7fcdd72..a78c4e2 100644
--- a/net/bridge/netfilter/nf_tables_bridge.c
+++ b/net/bridge/netfilter/nf_tables_bridge.c
@@ -162,15 +162,57 @@
 			  (1 << NF_BR_POST_ROUTING),
 };
 
+static void nf_br_saveroute(const struct sk_buff *skb,
+			    struct nf_queue_entry *entry)
+{
+}
+
+static int nf_br_reroute(struct net *net, struct sk_buff *skb,
+			 const struct nf_queue_entry *entry)
+{
+	return 0;
+}
+
+static __sum16 nf_br_checksum(struct sk_buff *skb, unsigned int hook,
+			      unsigned int dataoff, u_int8_t protocol)
+{
+	return 0;
+}
+
+static __sum16 nf_br_checksum_partial(struct sk_buff *skb, unsigned int hook,
+				      unsigned int dataoff, unsigned int len,
+				      u_int8_t protocol)
+{
+	return 0;
+}
+
+static int nf_br_route(struct net *net, struct dst_entry **dst,
+		       struct flowi *fl, bool strict __always_unused)
+{
+	return 0;
+}
+
+static const struct nf_afinfo nf_br_afinfo = {
+	.family                 = AF_BRIDGE,
+	.checksum               = nf_br_checksum,
+	.checksum_partial       = nf_br_checksum_partial,
+	.route                  = nf_br_route,
+	.saveroute              = nf_br_saveroute,
+	.reroute                = nf_br_reroute,
+	.route_key_size         = 0,
+};
+
 static int __init nf_tables_bridge_init(void)
 {
 	int ret;
 
+	nf_register_afinfo(&nf_br_afinfo);
 	nft_register_chain_type(&filter_bridge);
 	ret = register_pernet_subsys(&nf_tables_bridge_net_ops);
-	if (ret < 0)
+	if (ret < 0) {
 		nft_unregister_chain_type(&filter_bridge);
-
+		nf_unregister_afinfo(&nf_br_afinfo);
+	}
 	return ret;
 }
 
@@ -178,6 +220,7 @@
 {
 	unregister_pernet_subsys(&nf_tables_bridge_net_ops);
 	nft_unregister_chain_type(&filter_bridge);
+	nf_unregister_afinfo(&nf_br_afinfo);
 }
 
 module_init(nf_tables_bridge_init);
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index adc8d72..77f7e7a 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -40,7 +40,8 @@
 /* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT)
  * or the bridge port (NF_BRIDGE PREROUTING).
  */
-static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb,
+static void nft_reject_br_send_v4_tcp_reset(struct net *net,
+					    struct sk_buff *oldskb,
 					    const struct net_device *dev,
 					    int hook)
 {
@@ -48,7 +49,6 @@
 	struct iphdr *niph;
 	const struct tcphdr *oth;
 	struct tcphdr _oth;
-	struct net *net = sock_net(oldskb->sk);
 
 	if (!nft_bridge_iphdr_validate(oldskb))
 		return;
@@ -75,7 +75,8 @@
 	br_deliver(br_port_get_rcu(dev), nskb);
 }
 
-static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb,
+static void nft_reject_br_send_v4_unreach(struct net *net,
+					  struct sk_buff *oldskb,
 					  const struct net_device *dev,
 					  int hook, u8 code)
 {
@@ -86,7 +87,6 @@
 	void *payload;
 	__wsum csum;
 	u8 proto;
-	struct net *net = sock_net(oldskb->sk);
 
 	if (oldskb->csum_bad || !nft_bridge_iphdr_validate(oldskb))
 		return;
@@ -273,17 +273,17 @@
 	case htons(ETH_P_IP):
 		switch (priv->type) {
 		case NFT_REJECT_ICMP_UNREACH:
-			nft_reject_br_send_v4_unreach(pkt->skb, pkt->in,
-						      pkt->hook,
+			nft_reject_br_send_v4_unreach(pkt->net, pkt->skb,
+						      pkt->in, pkt->hook,
 						      priv->icmp_code);
 			break;
 		case NFT_REJECT_TCP_RST:
-			nft_reject_br_send_v4_tcp_reset(pkt->skb, pkt->in,
-							pkt->hook);
+			nft_reject_br_send_v4_tcp_reset(pkt->net, pkt->skb,
+							pkt->in, pkt->hook);
 			break;
 		case NFT_REJECT_ICMPX_UNREACH:
-			nft_reject_br_send_v4_unreach(pkt->skb, pkt->in,
-						      pkt->hook,
+			nft_reject_br_send_v4_unreach(pkt->net, pkt->skb,
+						      pkt->in, pkt->hook,
 						      nft_reject_icmp_code(priv->icmp_code));
 			break;
 		}
diff --git a/net/can/raw.c b/net/can/raw.c
index 2e67b14..972c187 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -755,7 +755,7 @@
 	if (err < 0)
 		goto free_skb;
 
-	sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
+	sock_tx_timestamp(sk, sk->sk_tsflags, &skb_shinfo(skb)->tx_flags);
 
 	skb->dev = dev;
 	skb->sk  = sk;
diff --git a/net/ceph/auth.c b/net/ceph/auth.c
index 6b923bc..2bc5965 100644
--- a/net/ceph/auth.c
+++ b/net/ceph/auth.c
@@ -293,13 +293,9 @@
 }
 EXPORT_SYMBOL(ceph_auth_create_authorizer);
 
-void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac,
-				  struct ceph_authorizer *a)
+void ceph_auth_destroy_authorizer(struct ceph_authorizer *a)
 {
-	mutex_lock(&ac->mutex);
-	if (ac->ops && ac->ops->destroy_authorizer)
-		ac->ops->destroy_authorizer(ac, a);
-	mutex_unlock(&ac->mutex);
+	a->destroy(a);
 }
 EXPORT_SYMBOL(ceph_auth_destroy_authorizer);
 
diff --git a/net/ceph/auth_none.c b/net/ceph/auth_none.c
index 8c93fa8..5f836f0 100644
--- a/net/ceph/auth_none.c
+++ b/net/ceph/auth_none.c
@@ -16,7 +16,6 @@
 	struct ceph_auth_none_info *xi = ac->private;
 
 	xi->starting = true;
-	xi->built_authorizer = false;
 }
 
 static void destroy(struct ceph_auth_client *ac)
@@ -39,6 +38,27 @@
 	return xi->starting;
 }
 
+static int ceph_auth_none_build_authorizer(struct ceph_auth_client *ac,
+					   struct ceph_none_authorizer *au)
+{
+	void *p = au->buf;
+	void *const end = p + sizeof(au->buf);
+	int ret;
+
+	ceph_encode_8_safe(&p, end, 1, e_range);
+	ret = ceph_entity_name_encode(ac->name, &p, end);
+	if (ret < 0)
+		return ret;
+
+	ceph_encode_64_safe(&p, end, ac->global_id, e_range);
+	au->buf_len = p - (void *)au->buf;
+	dout("%s built authorizer len %d\n", __func__, au->buf_len);
+	return 0;
+
+e_range:
+	return -ERANGE;
+}
+
 static int build_request(struct ceph_auth_client *ac, void *buf, void *end)
 {
 	return 0;
@@ -57,32 +77,32 @@
 	return result;
 }
 
+static void ceph_auth_none_destroy_authorizer(struct ceph_authorizer *a)
+{
+	kfree(a);
+}
+
 /*
- * build an 'authorizer' with our entity_name and global_id.  we can
- * reuse a single static copy since it is identical for all services
- * we connect to.
+ * build an 'authorizer' with our entity_name and global_id.  it is
+ * identical for all services we connect to.
  */
 static int ceph_auth_none_create_authorizer(
 	struct ceph_auth_client *ac, int peer_type,
 	struct ceph_auth_handshake *auth)
 {
-	struct ceph_auth_none_info *ai = ac->private;
-	struct ceph_none_authorizer *au = &ai->au;
-	void *p, *end;
+	struct ceph_none_authorizer *au;
 	int ret;
 
-	if (!ai->built_authorizer) {
-		p = au->buf;
-		end = p + sizeof(au->buf);
-		ceph_encode_8(&p, 1);
-		ret = ceph_entity_name_encode(ac->name, &p, end - 8);
-		if (ret < 0)
-			goto bad;
-		ceph_decode_need(&p, end, sizeof(u64), bad2);
-		ceph_encode_64(&p, ac->global_id);
-		au->buf_len = p - (void *)au->buf;
-		ai->built_authorizer = true;
-		dout("built authorizer len %d\n", au->buf_len);
+	au = kmalloc(sizeof(*au), GFP_NOFS);
+	if (!au)
+		return -ENOMEM;
+
+	au->base.destroy = ceph_auth_none_destroy_authorizer;
+
+	ret = ceph_auth_none_build_authorizer(ac, au);
+	if (ret) {
+		kfree(au);
+		return ret;
 	}
 
 	auth->authorizer = (struct ceph_authorizer *) au;
@@ -92,17 +112,6 @@
 	auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
 
 	return 0;
-
-bad2:
-	ret = -ERANGE;
-bad:
-	return ret;
-}
-
-static void ceph_auth_none_destroy_authorizer(struct ceph_auth_client *ac,
-				      struct ceph_authorizer *a)
-{
-	/* nothing to do */
 }
 
 static const struct ceph_auth_client_ops ceph_auth_none_ops = {
@@ -114,7 +123,6 @@
 	.build_request = build_request,
 	.handle_reply = handle_reply,
 	.create_authorizer = ceph_auth_none_create_authorizer,
-	.destroy_authorizer = ceph_auth_none_destroy_authorizer,
 };
 
 int ceph_auth_none_init(struct ceph_auth_client *ac)
@@ -127,7 +135,6 @@
 		return -ENOMEM;
 
 	xi->starting = true;
-	xi->built_authorizer = false;
 
 	ac->protocol = CEPH_AUTH_NONE;
 	ac->private = xi;
diff --git a/net/ceph/auth_none.h b/net/ceph/auth_none.h
index 059a3ce..6202153 100644
--- a/net/ceph/auth_none.h
+++ b/net/ceph/auth_none.h
@@ -12,6 +12,7 @@
  */
 
 struct ceph_none_authorizer {
+	struct ceph_authorizer base;
 	char buf[128];
 	int buf_len;
 	char reply_buf[0];
@@ -19,8 +20,6 @@
 
 struct ceph_auth_none_info {
 	bool starting;
-	bool built_authorizer;
-	struct ceph_none_authorizer au;   /* we only need one; it's static */
 };
 
 int ceph_auth_none_init(struct ceph_auth_client *ac);
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 9e43a31..a0905f0 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -565,6 +565,14 @@
 	return -EAGAIN;
 }
 
+static void ceph_x_destroy_authorizer(struct ceph_authorizer *a)
+{
+	struct ceph_x_authorizer *au = (void *)a;
+
+	ceph_x_authorizer_cleanup(au);
+	kfree(au);
+}
+
 static int ceph_x_create_authorizer(
 	struct ceph_auth_client *ac, int peer_type,
 	struct ceph_auth_handshake *auth)
@@ -581,6 +589,8 @@
 	if (!au)
 		return -ENOMEM;
 
+	au->base.destroy = ceph_x_destroy_authorizer;
+
 	ret = ceph_x_build_authorizer(ac, th, au);
 	if (ret) {
 		kfree(au);
@@ -643,16 +653,6 @@
 	return ret;
 }
 
-static void ceph_x_destroy_authorizer(struct ceph_auth_client *ac,
-				      struct ceph_authorizer *a)
-{
-	struct ceph_x_authorizer *au = (void *)a;
-
-	ceph_x_authorizer_cleanup(au);
-	kfree(au);
-}
-
-
 static void ceph_x_reset(struct ceph_auth_client *ac)
 {
 	struct ceph_x_info *xi = ac->private;
@@ -770,7 +770,6 @@
 	.create_authorizer = ceph_x_create_authorizer,
 	.update_authorizer = ceph_x_update_authorizer,
 	.verify_authorizer_reply = ceph_x_verify_authorizer_reply,
-	.destroy_authorizer = ceph_x_destroy_authorizer,
 	.invalidate_authorizer = ceph_x_invalidate_authorizer,
 	.reset =  ceph_x_reset,
 	.destroy = ceph_x_destroy,
diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h
index 40b1a3c..21a5af9 100644
--- a/net/ceph/auth_x.h
+++ b/net/ceph/auth_x.h
@@ -26,6 +26,7 @@
 
 
 struct ceph_x_authorizer {
+	struct ceph_authorizer base;
 	struct ceph_crypto_key session_key;
 	struct ceph_buffer *buf;
 	unsigned int service;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 1831f63..a550289 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -269,7 +269,7 @@
 	}
 
 	BUG_ON(zero_page == NULL);
-	page_cache_release(zero_page);
+	put_page(zero_page);
 	zero_page = NULL;
 
 	ceph_msgr_slab_exit();
@@ -282,7 +282,7 @@
 
 	BUG_ON(zero_page != NULL);
 	zero_page = ZERO_PAGE(0);
-	page_cache_get(zero_page);
+	get_page(zero_page);
 
 	/*
 	 * The number of active work items is limited by the number of
@@ -1602,7 +1602,7 @@
 
 	dout("%s %p %d left\n", __func__, con, con->out_skip);
 	while (con->out_skip > 0) {
-		size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE);
+		size_t size = min(con->out_skip, (int) PAGE_SIZE);
 
 		ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true);
 		if (ret <= 0)
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 32355d9d..40a53a7 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1087,10 +1087,8 @@
 	dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
 	     atomic_read(&osd->o_ref) - 1);
 	if (atomic_dec_and_test(&osd->o_ref)) {
-		struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
-
 		if (osd->o_auth.authorizer)
-			ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer);
+			ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
 		kfree(osd);
 	}
 }
@@ -2984,7 +2982,7 @@
 	struct ceph_auth_handshake *auth = &o->o_auth;
 
 	if (force_new && auth->authorizer) {
-		ceph_auth_destroy_authorizer(ac, auth->authorizer);
+		ceph_auth_destroy_authorizer(auth->authorizer);
 		auth->authorizer = NULL;
 	}
 	if (!auth->authorizer) {
diff --git a/net/ceph/pagelist.c b/net/ceph/pagelist.c
index c7c220a..6864007 100644
--- a/net/ceph/pagelist.c
+++ b/net/ceph/pagelist.c
@@ -56,7 +56,7 @@
 		size_t bit = pl->room;
 		int ret;
 
-		memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK),
+		memcpy(pl->mapped_tail + (pl->length & ~PAGE_MASK),
 		       buf, bit);
 		pl->length += bit;
 		pl->room -= bit;
@@ -67,7 +67,7 @@
 			return ret;
 	}
 
-	memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), buf, len);
+	memcpy(pl->mapped_tail + (pl->length & ~PAGE_MASK), buf, len);
 	pl->length += len;
 	pl->room -= len;
 	return 0;
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c
index 10297f7..00d2601 100644
--- a/net/ceph/pagevec.c
+++ b/net/ceph/pagevec.c
@@ -95,19 +95,19 @@
 					 loff_t off, size_t len)
 {
 	int i = 0;
-	int po = off & ~PAGE_CACHE_MASK;
+	int po = off & ~PAGE_MASK;
 	int left = len;
 	int l, bad;
 
 	while (left > 0) {
-		l = min_t(int, PAGE_CACHE_SIZE-po, left);
+		l = min_t(int, PAGE_SIZE-po, left);
 		bad = copy_from_user(page_address(pages[i]) + po, data, l);
 		if (bad == l)
 			return -EFAULT;
 		data += l - bad;
 		left -= l - bad;
 		po += l - bad;
-		if (po == PAGE_CACHE_SIZE) {
+		if (po == PAGE_SIZE) {
 			po = 0;
 			i++;
 		}
@@ -121,17 +121,17 @@
 				    loff_t off, size_t len)
 {
 	int i = 0;
-	size_t po = off & ~PAGE_CACHE_MASK;
+	size_t po = off & ~PAGE_MASK;
 	size_t left = len;
 
 	while (left > 0) {
-		size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
+		size_t l = min_t(size_t, PAGE_SIZE-po, left);
 
 		memcpy(page_address(pages[i]) + po, data, l);
 		data += l;
 		left -= l;
 		po += l;
-		if (po == PAGE_CACHE_SIZE) {
+		if (po == PAGE_SIZE) {
 			po = 0;
 			i++;
 		}
@@ -144,17 +144,17 @@
 				    loff_t off, size_t len)
 {
 	int i = 0;
-	size_t po = off & ~PAGE_CACHE_MASK;
+	size_t po = off & ~PAGE_MASK;
 	size_t left = len;
 
 	while (left > 0) {
-		size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
+		size_t l = min_t(size_t, PAGE_SIZE-po, left);
 
 		memcpy(data, page_address(pages[i]) + po, l);
 		data += l;
 		left -= l;
 		po += l;
-		if (po == PAGE_CACHE_SIZE) {
+		if (po == PAGE_SIZE) {
 			po = 0;
 			i++;
 		}
@@ -168,25 +168,25 @@
  */
 void ceph_zero_page_vector_range(int off, int len, struct page **pages)
 {
-	int i = off >> PAGE_CACHE_SHIFT;
+	int i = off >> PAGE_SHIFT;
 
-	off &= ~PAGE_CACHE_MASK;
+	off &= ~PAGE_MASK;
 
 	dout("zero_page_vector_page %u~%u\n", off, len);
 
 	/* leading partial page? */
 	if (off) {
-		int end = min((int)PAGE_CACHE_SIZE, off + len);
+		int end = min((int)PAGE_SIZE, off + len);
 		dout("zeroing %d %p head from %d\n", i, pages[i],
 		     (int)off);
 		zero_user_segment(pages[i], off, end);
 		len -= (end - off);
 		i++;
 	}
-	while (len >= PAGE_CACHE_SIZE) {
+	while (len >= PAGE_SIZE) {
 		dout("zeroing %d %p len=%d\n", i, pages[i], len);
-		zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
-		len -= PAGE_CACHE_SIZE;
+		zero_user_segment(pages[i], 0, PAGE_SIZE);
+		len -= PAGE_SIZE;
 		i++;
 	}
 	/* trailing partial page? */
diff --git a/net/core/datagram.c b/net/core/datagram.c
index fa9dc64..b7de71f 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -301,16 +301,19 @@
 }
 EXPORT_SYMBOL(skb_free_datagram);
 
-void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
+void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len)
 {
 	bool slow;
 
 	if (likely(atomic_read(&skb->users) == 1))
 		smp_rmb();
-	else if (likely(!atomic_dec_and_test(&skb->users)))
+	else if (likely(!atomic_dec_and_test(&skb->users))) {
+		sk_peek_offset_bwd(sk, len);
 		return;
+	}
 
 	slow = lock_sock_fast(sk);
+	sk_peek_offset_bwd(sk, len);
 	skb_orphan(skb);
 	sk_mem_reclaim_partial(sk);
 	unlock_sock_fast(sk, slow);
@@ -318,7 +321,7 @@
 	/* skb is now orphaned, can be freed outside of locked section */
 	__kfree_skb(skb);
 }
-EXPORT_SYMBOL(skb_free_datagram_locked);
+EXPORT_SYMBOL(__skb_free_datagram_locked);
 
 /**
  *	skb_kill_datagram - Free a datagram skbuff forcibly
diff --git a/net/core/dev.c b/net/core/dev.c
index b9bcbe7..904ff43 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1741,7 +1741,7 @@
 			__net_timestamp(SKB);		\
 	}						\
 
-bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
+bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
 {
 	unsigned int len;
 
@@ -1850,7 +1850,7 @@
  *	taps currently in use.
  */
 
-static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
+void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct packet_type *ptype;
 	struct sk_buff *skb2 = NULL;
@@ -1907,6 +1907,7 @@
 		pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
 	rcu_read_unlock();
 }
+EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
 
 /**
  * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
@@ -2711,6 +2712,19 @@
 			return ERR_PTR(err);
 	}
 
+	/* Only report GSO partial support if it will enable us to
+	 * support segmentation on this frame without needing additional
+	 * work.
+	 */
+	if (features & NETIF_F_GSO_PARTIAL) {
+		netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
+		struct net_device *dev = skb->dev;
+
+		partial_features |= dev->features & dev->gso_partial_features;
+		if (!skb_gso_ok(skb, features | partial_features))
+			features &= ~NETIF_F_GSO_PARTIAL;
+	}
+
 	BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
 		     sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
 
@@ -2802,7 +2816,7 @@
 
 	if (skb->ip_summed != CHECKSUM_NONE &&
 	    !can_checksum_protocol(features, type)) {
-		features &= ~NETIF_F_CSUM_MASK;
+		features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
 	} else if (illegal_highdma(skb->dev, skb)) {
 		features &= ~NETIF_F_SG;
 	}
@@ -2825,14 +2839,45 @@
 	return vlan_features_check(skb, features);
 }
 
+static netdev_features_t gso_features_check(const struct sk_buff *skb,
+					    struct net_device *dev,
+					    netdev_features_t features)
+{
+	u16 gso_segs = skb_shinfo(skb)->gso_segs;
+
+	if (gso_segs > dev->gso_max_segs)
+		return features & ~NETIF_F_GSO_MASK;
+
+	/* Support for GSO partial features requires software
+	 * intervention before we can actually process the packets
+	 * so we need to strip support for any partial features now
+	 * and we can pull them back in after we have partially
+	 * segmented the frame.
+	 */
+	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
+		features &= ~dev->gso_partial_features;
+
+	/* Make sure to clear the IPv4 ID mangling feature if the
+	 * IPv4 header has the potential to be fragmented.
+	 */
+	if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
+		struct iphdr *iph = skb->encapsulation ?
+				    inner_ip_hdr(skb) : ip_hdr(skb);
+
+		if (!(iph->frag_off & htons(IP_DF)))
+			features &= ~NETIF_F_TSO_MANGLEID;
+	}
+
+	return features;
+}
+
 netdev_features_t netif_skb_features(struct sk_buff *skb)
 {
 	struct net_device *dev = skb->dev;
 	netdev_features_t features = dev->features;
-	u16 gso_segs = skb_shinfo(skb)->gso_segs;
 
-	if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
-		features &= ~NETIF_F_GSO_MASK;
+	if (skb_is_gso(skb))
+		features = gso_features_check(skb, dev, features);
 
 	/* If encapsulation offload request, verify we are testing
 	 * hardware encapsulation features instead of standard
@@ -2915,9 +2960,6 @@
 {
 	netdev_features_t features;
 
-	if (skb->next)
-		return skb;
-
 	features = netif_skb_features(skb);
 	skb = validate_xmit_vlan(skb, features);
 	if (unlikely(!skb))
@@ -2960,6 +3002,7 @@
 out_kfree_skb:
 	kfree_skb(skb);
 out_null:
+	atomic_long_inc(&dev->tx_dropped);
 	return NULL;
 }
 
@@ -3143,12 +3186,12 @@
 	case TC_ACT_SHOT:
 		qdisc_qstats_cpu_drop(cl->q);
 		*ret = NET_XMIT_DROP;
-		goto drop;
+		kfree_skb(skb);
+		return NULL;
 	case TC_ACT_STOLEN:
 	case TC_ACT_QUEUED:
 		*ret = NET_XMIT_SUCCESS;
-drop:
-		kfree_skb(skb);
+		consume_skb(skb);
 		return NULL;
 	case TC_ACT_REDIRECT:
 		/* No need to push/pop skb's mac_header here on egress! */
@@ -3349,7 +3392,7 @@
 
 			skb = validate_xmit_skb(skb, dev);
 			if (!skb)
-				goto drop;
+				goto out;
 
 			HARD_TX_LOCK(dev, txq, cpu);
 
@@ -3376,7 +3419,6 @@
 	}
 
 	rc = -ENETDOWN;
-drop:
 	rcu_read_unlock_bh();
 
 	atomic_long_inc(&dev->tx_dropped);
@@ -3428,6 +3470,7 @@
 EXPORT_SYMBOL(rps_cpu_mask);
 
 struct static_key rps_needed __read_mostly;
+EXPORT_SYMBOL(rps_needed);
 
 static struct rps_dev_flow *
 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
@@ -3914,9 +3957,11 @@
 		break;
 	case TC_ACT_SHOT:
 		qdisc_qstats_cpu_drop(cl->q);
+		kfree_skb(skb);
+		return NULL;
 	case TC_ACT_STOLEN:
 	case TC_ACT_QUEUED:
-		kfree_skb(skb);
+		consume_skb(skb);
 		return NULL;
 	case TC_ACT_REDIRECT:
 		/* skb_mac_header check was done by cls/act_bpf, so
@@ -4439,6 +4484,8 @@
 		NAPI_GRO_CB(skb)->flush = 0;
 		NAPI_GRO_CB(skb)->free = 0;
 		NAPI_GRO_CB(skb)->encap_mark = 0;
+		NAPI_GRO_CB(skb)->is_fou = 0;
+		NAPI_GRO_CB(skb)->is_atomic = 1;
 		NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
 
 		/* Setup for GRO checksum validation */
@@ -4663,6 +4710,8 @@
 	if (unlikely(skb_gro_header_hard(skb, hlen))) {
 		eth = skb_gro_header_slow(skb, hlen, 0);
 		if (unlikely(!eth)) {
+			net_warn_ratelimited("%s: dropping impossible skb from %s\n",
+					     __func__, napi->dev->name);
 			napi_reuse_skb(napi, skb);
 			return NULL;
 		}
@@ -4937,8 +4986,8 @@
 			netpoll_poll_unlock(have);
 		}
 		if (rc > 0)
-			NET_ADD_STATS_BH(sock_net(sk),
-					 LINUX_MIB_BUSYPOLLRXPACKETS, rc);
+			__NET_ADD_STATS(sock_net(sk),
+					LINUX_MIB_BUSYPOLLRXPACKETS, rc);
 		local_bh_enable();
 
 		if (rc == LL_FLUSH_FAILED)
@@ -6675,6 +6724,10 @@
 		features &= ~NETIF_F_TSO6;
 	}
 
+	/* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
+	if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
+		features &= ~NETIF_F_TSO_MANGLEID;
+
 	/* TSO ECN requires that TSO is present as well. */
 	if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
 		features &= ~NETIF_F_TSO_ECN;
@@ -6703,6 +6756,14 @@
 		}
 	}
 
+	/* GSO partial features require GSO partial be set */
+	if ((features & dev->gso_partial_features) &&
+	    !(features & NETIF_F_GSO_PARTIAL)) {
+		netdev_dbg(dev,
+			   "Dropping partially supported GSO features since no GSO partial.\n");
+		features &= ~dev->gso_partial_features;
+	}
+
 #ifdef CONFIG_NET_RX_BUSY_POLL
 	if (dev->netdev_ops->ndo_busy_poll)
 		features |= NETIF_F_BUSY_POLL;
@@ -6973,9 +7034,22 @@
 	dev->features |= NETIF_F_SOFT_FEATURES;
 	dev->wanted_features = dev->features & dev->hw_features;
 
-	if (!(dev->flags & IFF_LOOPBACK)) {
+	if (!(dev->flags & IFF_LOOPBACK))
 		dev->hw_features |= NETIF_F_NOCACHE_COPY;
-	}
+
+	/* If IPv4 TCP segmentation offload is supported we should also
+	 * allow the device to enable segmenting the frame with the option
+	 * of ignoring a static IP ID value.  This doesn't enable the
+	 * feature itself but allows the user to enable it later.
+	 */
+	if (dev->hw_features & NETIF_F_TSO)
+		dev->hw_features |= NETIF_F_TSO_MANGLEID;
+	if (dev->vlan_features & NETIF_F_TSO)
+		dev->vlan_features |= NETIF_F_TSO_MANGLEID;
+	if (dev->mpls_features & NETIF_F_TSO)
+		dev->mpls_features |= NETIF_F_TSO_MANGLEID;
+	if (dev->hw_enc_features & NETIF_F_TSO)
+		dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
 
 	/* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
 	 */
@@ -6983,7 +7057,7 @@
 
 	/* Make NETIF_F_SG inheritable to tunnel devices.
 	 */
-	dev->hw_enc_features |= NETIF_F_SG;
+	dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
 
 	/* Make NETIF_F_SG inheritable to MPLS.
 	 */
@@ -7426,7 +7500,6 @@
 
 	dev->gso_max_size = GSO_MAX_SIZE;
 	dev->gso_max_segs = GSO_MAX_SEGS;
-	dev->gso_min_segs = 0;
 
 	INIT_LIST_HEAD(&dev->napi_list);
 	INIT_LIST_HEAD(&dev->unreg_list);
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 590fa56..933e8d4 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -119,7 +119,171 @@
 	return devlink_port_get_from_attrs(devlink, info->attrs);
 }
 
-#define DEVLINK_NL_FLAG_NEED_PORT	BIT(0)
+struct devlink_sb {
+	struct list_head list;
+	unsigned int index;
+	u32 size;
+	u16 ingress_pools_count;
+	u16 egress_pools_count;
+	u16 ingress_tc_count;
+	u16 egress_tc_count;
+};
+
+static u16 devlink_sb_pool_count(struct devlink_sb *devlink_sb)
+{
+	return devlink_sb->ingress_pools_count + devlink_sb->egress_pools_count;
+}
+
+static struct devlink_sb *devlink_sb_get_by_index(struct devlink *devlink,
+						  unsigned int sb_index)
+{
+	struct devlink_sb *devlink_sb;
+
+	list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
+		if (devlink_sb->index == sb_index)
+			return devlink_sb;
+	}
+	return NULL;
+}
+
+static bool devlink_sb_index_exists(struct devlink *devlink,
+				    unsigned int sb_index)
+{
+	return devlink_sb_get_by_index(devlink, sb_index);
+}
+
+static struct devlink_sb *devlink_sb_get_from_attrs(struct devlink *devlink,
+						    struct nlattr **attrs)
+{
+	if (attrs[DEVLINK_ATTR_SB_INDEX]) {
+		u32 sb_index = nla_get_u32(attrs[DEVLINK_ATTR_SB_INDEX]);
+		struct devlink_sb *devlink_sb;
+
+		devlink_sb = devlink_sb_get_by_index(devlink, sb_index);
+		if (!devlink_sb)
+			return ERR_PTR(-ENODEV);
+		return devlink_sb;
+	}
+	return ERR_PTR(-EINVAL);
+}
+
+static struct devlink_sb *devlink_sb_get_from_info(struct devlink *devlink,
+						   struct genl_info *info)
+{
+	return devlink_sb_get_from_attrs(devlink, info->attrs);
+}
+
+static int devlink_sb_pool_index_get_from_attrs(struct devlink_sb *devlink_sb,
+						struct nlattr **attrs,
+						u16 *p_pool_index)
+{
+	u16 val;
+
+	if (!attrs[DEVLINK_ATTR_SB_POOL_INDEX])
+		return -EINVAL;
+
+	val = nla_get_u16(attrs[DEVLINK_ATTR_SB_POOL_INDEX]);
+	if (val >= devlink_sb_pool_count(devlink_sb))
+		return -EINVAL;
+	*p_pool_index = val;
+	return 0;
+}
+
+static int devlink_sb_pool_index_get_from_info(struct devlink_sb *devlink_sb,
+					       struct genl_info *info,
+					       u16 *p_pool_index)
+{
+	return devlink_sb_pool_index_get_from_attrs(devlink_sb, info->attrs,
+						    p_pool_index);
+}
+
+static int
+devlink_sb_pool_type_get_from_attrs(struct nlattr **attrs,
+				    enum devlink_sb_pool_type *p_pool_type)
+{
+	u8 val;
+
+	if (!attrs[DEVLINK_ATTR_SB_POOL_TYPE])
+		return -EINVAL;
+
+	val = nla_get_u8(attrs[DEVLINK_ATTR_SB_POOL_TYPE]);
+	if (val != DEVLINK_SB_POOL_TYPE_INGRESS &&
+	    val != DEVLINK_SB_POOL_TYPE_EGRESS)
+		return -EINVAL;
+	*p_pool_type = val;
+	return 0;
+}
+
+static int
+devlink_sb_pool_type_get_from_info(struct genl_info *info,
+				   enum devlink_sb_pool_type *p_pool_type)
+{
+	return devlink_sb_pool_type_get_from_attrs(info->attrs, p_pool_type);
+}
+
+static int
+devlink_sb_th_type_get_from_attrs(struct nlattr **attrs,
+				  enum devlink_sb_threshold_type *p_th_type)
+{
+	u8 val;
+
+	if (!attrs[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE])
+		return -EINVAL;
+
+	val = nla_get_u8(attrs[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE]);
+	if (val != DEVLINK_SB_THRESHOLD_TYPE_STATIC &&
+	    val != DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC)
+		return -EINVAL;
+	*p_th_type = val;
+	return 0;
+}
+
+static int
+devlink_sb_th_type_get_from_info(struct genl_info *info,
+				 enum devlink_sb_threshold_type *p_th_type)
+{
+	return devlink_sb_th_type_get_from_attrs(info->attrs, p_th_type);
+}
+
+static int
+devlink_sb_tc_index_get_from_attrs(struct devlink_sb *devlink_sb,
+				   struct nlattr **attrs,
+				   enum devlink_sb_pool_type pool_type,
+				   u16 *p_tc_index)
+{
+	u16 val;
+
+	if (!attrs[DEVLINK_ATTR_SB_TC_INDEX])
+		return -EINVAL;
+
+	val = nla_get_u16(attrs[DEVLINK_ATTR_SB_TC_INDEX]);
+	if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS &&
+	    val >= devlink_sb->ingress_tc_count)
+		return -EINVAL;
+	if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS &&
+	    val >= devlink_sb->egress_tc_count)
+		return -EINVAL;
+	*p_tc_index = val;
+	return 0;
+}
+
+static int
+devlink_sb_tc_index_get_from_info(struct devlink_sb *devlink_sb,
+				  struct genl_info *info,
+				  enum devlink_sb_pool_type pool_type,
+				  u16 *p_tc_index)
+{
+	return devlink_sb_tc_index_get_from_attrs(devlink_sb, info->attrs,
+						  pool_type, p_tc_index);
+}
+
+#define DEVLINK_NL_FLAG_NEED_DEVLINK	BIT(0)
+#define DEVLINK_NL_FLAG_NEED_PORT	BIT(1)
+#define DEVLINK_NL_FLAG_NEED_SB		BIT(2)
+#define DEVLINK_NL_FLAG_LOCK_PORTS	BIT(3)
+	/* port is not needed but we need to ensure they don't
+	 * change in the middle of command
+	 */
 
 static int devlink_nl_pre_doit(const struct genl_ops *ops,
 			       struct sk_buff *skb, struct genl_info *info)
@@ -132,8 +296,9 @@
 		mutex_unlock(&devlink_mutex);
 		return PTR_ERR(devlink);
 	}
-	info->user_ptr[0] = devlink;
-	if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) {
+	if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_DEVLINK) {
+		info->user_ptr[0] = devlink;
+	} else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) {
 		struct devlink_port *devlink_port;
 
 		mutex_lock(&devlink_port_mutex);
@@ -143,7 +308,22 @@
 			mutex_unlock(&devlink_mutex);
 			return PTR_ERR(devlink_port);
 		}
-		info->user_ptr[1] = devlink_port;
+		info->user_ptr[0] = devlink_port;
+	}
+	if (ops->internal_flags & DEVLINK_NL_FLAG_LOCK_PORTS) {
+		mutex_lock(&devlink_port_mutex);
+	}
+	if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_SB) {
+		struct devlink_sb *devlink_sb;
+
+		devlink_sb = devlink_sb_get_from_info(devlink, info);
+		if (IS_ERR(devlink_sb)) {
+			if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT)
+				mutex_unlock(&devlink_port_mutex);
+			mutex_unlock(&devlink_mutex);
+			return PTR_ERR(devlink_sb);
+		}
+		info->user_ptr[1] = devlink_sb;
 	}
 	return 0;
 }
@@ -151,7 +331,8 @@
 static void devlink_nl_post_doit(const struct genl_ops *ops,
 				 struct sk_buff *skb, struct genl_info *info)
 {
-	if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT)
+	if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT ||
+	    ops->internal_flags & DEVLINK_NL_FLAG_LOCK_PORTS)
 		mutex_unlock(&devlink_port_mutex);
 	mutex_unlock(&devlink_mutex);
 }
@@ -356,8 +537,8 @@
 static int devlink_nl_cmd_port_get_doit(struct sk_buff *skb,
 					struct genl_info *info)
 {
-	struct devlink *devlink = info->user_ptr[0];
-	struct devlink_port *devlink_port = info->user_ptr[1];
+	struct devlink_port *devlink_port = info->user_ptr[0];
+	struct devlink *devlink = devlink_port->devlink;
 	struct sk_buff *msg;
 	int err;
 
@@ -436,8 +617,8 @@
 static int devlink_nl_cmd_port_set_doit(struct sk_buff *skb,
 					struct genl_info *info)
 {
-	struct devlink *devlink = info->user_ptr[0];
-	struct devlink_port *devlink_port = info->user_ptr[1];
+	struct devlink_port *devlink_port = info->user_ptr[0];
+	struct devlink *devlink = devlink_port->devlink;
 	int err;
 
 	if (info->attrs[DEVLINK_ATTR_PORT_TYPE]) {
@@ -497,12 +678,735 @@
 	return devlink_port_unsplit(devlink, port_index);
 }
 
+static int devlink_nl_sb_fill(struct sk_buff *msg, struct devlink *devlink,
+			      struct devlink_sb *devlink_sb,
+			      enum devlink_command cmd, u32 portid,
+			      u32 seq, int flags)
+{
+	void *hdr;
+
+	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+	if (!hdr)
+		return -EMSGSIZE;
+
+	if (devlink_nl_put_handle(msg, devlink))
+		goto nla_put_failure;
+	if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
+		goto nla_put_failure;
+	if (nla_put_u32(msg, DEVLINK_ATTR_SB_SIZE, devlink_sb->size))
+		goto nla_put_failure;
+	if (nla_put_u16(msg, DEVLINK_ATTR_SB_INGRESS_POOL_COUNT,
+			devlink_sb->ingress_pools_count))
+		goto nla_put_failure;
+	if (nla_put_u16(msg, DEVLINK_ATTR_SB_EGRESS_POOL_COUNT,
+			devlink_sb->egress_pools_count))
+		goto nla_put_failure;
+	if (nla_put_u16(msg, DEVLINK_ATTR_SB_INGRESS_TC_COUNT,
+			devlink_sb->ingress_tc_count))
+		goto nla_put_failure;
+	if (nla_put_u16(msg, DEVLINK_ATTR_SB_EGRESS_TC_COUNT,
+			devlink_sb->egress_tc_count))
+		goto nla_put_failure;
+
+	genlmsg_end(msg, hdr);
+	return 0;
+
+nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+	return -EMSGSIZE;
+}
+
+static int devlink_nl_cmd_sb_get_doit(struct sk_buff *skb,
+				      struct genl_info *info)
+{
+	struct devlink *devlink = info->user_ptr[0];
+	struct devlink_sb *devlink_sb = info->user_ptr[1];
+	struct sk_buff *msg;
+	int err;
+
+	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+
+	err = devlink_nl_sb_fill(msg, devlink, devlink_sb,
+				 DEVLINK_CMD_SB_NEW,
+				 info->snd_portid, info->snd_seq, 0);
+	if (err) {
+		nlmsg_free(msg);
+		return err;
+	}
+
+	return genlmsg_reply(msg, info);
+}
+
+static int devlink_nl_cmd_sb_get_dumpit(struct sk_buff *msg,
+					struct netlink_callback *cb)
+{
+	struct devlink *devlink;
+	struct devlink_sb *devlink_sb;
+	int start = cb->args[0];
+	int idx = 0;
+	int err;
+
+	mutex_lock(&devlink_mutex);
+	list_for_each_entry(devlink, &devlink_list, list) {
+		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
+			continue;
+		list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
+			if (idx < start) {
+				idx++;
+				continue;
+			}
+			err = devlink_nl_sb_fill(msg, devlink, devlink_sb,
+						 DEVLINK_CMD_SB_NEW,
+						 NETLINK_CB(cb->skb).portid,
+						 cb->nlh->nlmsg_seq,
+						 NLM_F_MULTI);
+			if (err)
+				goto out;
+			idx++;
+		}
+	}
+out:
+	mutex_unlock(&devlink_mutex);
+
+	cb->args[0] = idx;
+	return msg->len;
+}
+
+static int devlink_nl_sb_pool_fill(struct sk_buff *msg, struct devlink *devlink,
+				   struct devlink_sb *devlink_sb,
+				   u16 pool_index, enum devlink_command cmd,
+				   u32 portid, u32 seq, int flags)
+{
+	struct devlink_sb_pool_info pool_info;
+	void *hdr;
+	int err;
+
+	err = devlink->ops->sb_pool_get(devlink, devlink_sb->index,
+					pool_index, &pool_info);
+	if (err)
+		return err;
+
+	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+	if (!hdr)
+		return -EMSGSIZE;
+
+	if (devlink_nl_put_handle(msg, devlink))
+		goto nla_put_failure;
+	if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
+		goto nla_put_failure;
+	if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index))
+		goto nla_put_failure;
+	if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_TYPE, pool_info.pool_type))
+		goto nla_put_failure;
+	if (nla_put_u32(msg, DEVLINK_ATTR_SB_POOL_SIZE, pool_info.size))
+		goto nla_put_failure;
+	if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE,
+		       pool_info.threshold_type))
+		goto nla_put_failure;
+
+	genlmsg_end(msg, hdr);
+	return 0;
+
+nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+	return -EMSGSIZE;
+}
+
+static int devlink_nl_cmd_sb_pool_get_doit(struct sk_buff *skb,
+					   struct genl_info *info)
+{
+	struct devlink *devlink = info->user_ptr[0];
+	struct devlink_sb *devlink_sb = info->user_ptr[1];
+	struct sk_buff *msg;
+	u16 pool_index;
+	int err;
+
+	err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
+						  &pool_index);
+	if (err)
+		return err;
+
+	if (!devlink->ops || !devlink->ops->sb_pool_get)
+		return -EOPNOTSUPP;
+
+	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+
+	err = devlink_nl_sb_pool_fill(msg, devlink, devlink_sb, pool_index,
+				      DEVLINK_CMD_SB_POOL_NEW,
+				      info->snd_portid, info->snd_seq, 0);
+	if (err) {
+		nlmsg_free(msg);
+		return err;
+	}
+
+	return genlmsg_reply(msg, info);
+}
+
+static int __sb_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx,
+				struct devlink *devlink,
+				struct devlink_sb *devlink_sb,
+				u32 portid, u32 seq)
+{
+	u16 pool_count = devlink_sb_pool_count(devlink_sb);
+	u16 pool_index;
+	int err;
+
+	for (pool_index = 0; pool_index < pool_count; pool_index++) {
+		if (*p_idx < start) {
+			(*p_idx)++;
+			continue;
+		}
+		err = devlink_nl_sb_pool_fill(msg, devlink,
+					      devlink_sb,
+					      pool_index,
+					      DEVLINK_CMD_SB_POOL_NEW,
+					      portid, seq, NLM_F_MULTI);
+		if (err)
+			return err;
+		(*p_idx)++;
+	}
+	return 0;
+}
+
+static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg,
+					     struct netlink_callback *cb)
+{
+	struct devlink *devlink;
+	struct devlink_sb *devlink_sb;
+	int start = cb->args[0];
+	int idx = 0;
+	int err;
+
+	mutex_lock(&devlink_mutex);
+	list_for_each_entry(devlink, &devlink_list, list) {
+		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
+		    !devlink->ops || !devlink->ops->sb_pool_get)
+			continue;
+		list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
+			err = __sb_pool_get_dumpit(msg, start, &idx, devlink,
+						   devlink_sb,
+						   NETLINK_CB(cb->skb).portid,
+						   cb->nlh->nlmsg_seq);
+			if (err && err != -EOPNOTSUPP)
+				goto out;
+		}
+	}
+out:
+	mutex_unlock(&devlink_mutex);
+
+	cb->args[0] = idx;
+	return msg->len;
+}
+
+static int devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index,
+			       u16 pool_index, u32 size,
+			       enum devlink_sb_threshold_type threshold_type)
+
+{
+	const struct devlink_ops *ops = devlink->ops;
+
+	if (ops && ops->sb_pool_set)
+		return ops->sb_pool_set(devlink, sb_index, pool_index,
+					size, threshold_type);
+	return -EOPNOTSUPP;
+}
+
+static int devlink_nl_cmd_sb_pool_set_doit(struct sk_buff *skb,
+					   struct genl_info *info)
+{
+	struct devlink *devlink = info->user_ptr[0];
+	struct devlink_sb *devlink_sb = info->user_ptr[1];
+	enum devlink_sb_threshold_type threshold_type;
+	u16 pool_index;
+	u32 size;
+	int err;
+
+	err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
+						  &pool_index);
+	if (err)
+		return err;
+
+	err = devlink_sb_th_type_get_from_info(info, &threshold_type);
+	if (err)
+		return err;
+
+	if (!info->attrs[DEVLINK_ATTR_SB_POOL_SIZE])
+		return -EINVAL;
+
+	size = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_POOL_SIZE]);
+	return devlink_sb_pool_set(devlink, devlink_sb->index,
+				   pool_index, size, threshold_type);
+}
+
+static int devlink_nl_sb_port_pool_fill(struct sk_buff *msg,
+					struct devlink *devlink,
+					struct devlink_port *devlink_port,
+					struct devlink_sb *devlink_sb,
+					u16 pool_index,
+					enum devlink_command cmd,
+					u32 portid, u32 seq, int flags)
+{
+	const struct devlink_ops *ops = devlink->ops;
+	u32 threshold;
+	void *hdr;
+	int err;
+
+	err = ops->sb_port_pool_get(devlink_port, devlink_sb->index,
+				    pool_index, &threshold);
+	if (err)
+		return err;
+
+	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+	if (!hdr)
+		return -EMSGSIZE;
+
+	if (devlink_nl_put_handle(msg, devlink))
+		goto nla_put_failure;
+	if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
+		goto nla_put_failure;
+	if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
+		goto nla_put_failure;
+	if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index))
+		goto nla_put_failure;
+	if (nla_put_u32(msg, DEVLINK_ATTR_SB_THRESHOLD, threshold))
+		goto nla_put_failure;
+
+	if (ops->sb_occ_port_pool_get) {
+		u32 cur;
+		u32 max;
+
+		err = ops->sb_occ_port_pool_get(devlink_port, devlink_sb->index,
+						pool_index, &cur, &max);
+		if (err && err != -EOPNOTSUPP)
+			return err;
+		if (!err) {
+			if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur))
+				goto nla_put_failure;
+			if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_MAX, max))
+				goto nla_put_failure;
+		}
+	}
+
+	genlmsg_end(msg, hdr);
+	return 0;
+
+nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+	return -EMSGSIZE;
+}
+
+static int devlink_nl_cmd_sb_port_pool_get_doit(struct sk_buff *skb,
+						struct genl_info *info)
+{
+	struct devlink_port *devlink_port = info->user_ptr[0];
+	struct devlink *devlink = devlink_port->devlink;
+	struct devlink_sb *devlink_sb = info->user_ptr[1];
+	struct sk_buff *msg;
+	u16 pool_index;
+	int err;
+
+	err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
+						  &pool_index);
+	if (err)
+		return err;
+
+	if (!devlink->ops || !devlink->ops->sb_port_pool_get)
+		return -EOPNOTSUPP;
+
+	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+
+	err = devlink_nl_sb_port_pool_fill(msg, devlink, devlink_port,
+					   devlink_sb, pool_index,
+					   DEVLINK_CMD_SB_PORT_POOL_NEW,
+					   info->snd_portid, info->snd_seq, 0);
+	if (err) {
+		nlmsg_free(msg);
+		return err;
+	}
+
+	return genlmsg_reply(msg, info);
+}
+
+static int __sb_port_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx,
+				     struct devlink *devlink,
+				     struct devlink_sb *devlink_sb,
+				     u32 portid, u32 seq)
+{
+	struct devlink_port *devlink_port;
+	u16 pool_count = devlink_sb_pool_count(devlink_sb);
+	u16 pool_index;
+	int err;
+
+	list_for_each_entry(devlink_port, &devlink->port_list, list) {
+		for (pool_index = 0; pool_index < pool_count; pool_index++) {
+			if (*p_idx < start) {
+				(*p_idx)++;
+				continue;
+			}
+			err = devlink_nl_sb_port_pool_fill(msg, devlink,
+							   devlink_port,
+							   devlink_sb,
+							   pool_index,
+							   DEVLINK_CMD_SB_PORT_POOL_NEW,
+							   portid, seq,
+							   NLM_F_MULTI);
+			if (err)
+				return err;
+			(*p_idx)++;
+		}
+	}
+	return 0;
+}
+
+static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg,
+						  struct netlink_callback *cb)
+{
+	struct devlink *devlink;
+	struct devlink_sb *devlink_sb;
+	int start = cb->args[0];
+	int idx = 0;
+	int err;
+
+	mutex_lock(&devlink_mutex);
+	mutex_lock(&devlink_port_mutex);
+	list_for_each_entry(devlink, &devlink_list, list) {
+		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
+		    !devlink->ops || !devlink->ops->sb_port_pool_get)
+			continue;
+		list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
+			err = __sb_port_pool_get_dumpit(msg, start, &idx,
+							devlink, devlink_sb,
+							NETLINK_CB(cb->skb).portid,
+							cb->nlh->nlmsg_seq);
+			if (err && err != -EOPNOTSUPP)
+				goto out;
+		}
+	}
+out:
+	mutex_unlock(&devlink_port_mutex);
+	mutex_unlock(&devlink_mutex);
+
+	cb->args[0] = idx;
+	return msg->len;
+}
+
+static int devlink_sb_port_pool_set(struct devlink_port *devlink_port,
+				    unsigned int sb_index, u16 pool_index,
+				    u32 threshold)
+
+{
+	const struct devlink_ops *ops = devlink_port->devlink->ops;
+
+	if (ops && ops->sb_port_pool_set)
+		return ops->sb_port_pool_set(devlink_port, sb_index,
+					     pool_index, threshold);
+	return -EOPNOTSUPP;
+}
+
+static int devlink_nl_cmd_sb_port_pool_set_doit(struct sk_buff *skb,
+						struct genl_info *info)
+{
+	struct devlink_port *devlink_port = info->user_ptr[0];
+	struct devlink_sb *devlink_sb = info->user_ptr[1];
+	u16 pool_index;
+	u32 threshold;
+	int err;
+
+	err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
+						  &pool_index);
+	if (err)
+		return err;
+
+	if (!info->attrs[DEVLINK_ATTR_SB_THRESHOLD])
+		return -EINVAL;
+
+	threshold = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_THRESHOLD]);
+	return devlink_sb_port_pool_set(devlink_port, devlink_sb->index,
+					pool_index, threshold);
+}
+
+static int
+devlink_nl_sb_tc_pool_bind_fill(struct sk_buff *msg, struct devlink *devlink,
+				struct devlink_port *devlink_port,
+				struct devlink_sb *devlink_sb, u16 tc_index,
+				enum devlink_sb_pool_type pool_type,
+				enum devlink_command cmd,
+				u32 portid, u32 seq, int flags)
+{
+	const struct devlink_ops *ops = devlink->ops;
+	u16 pool_index;
+	u32 threshold;
+	void *hdr;
+	int err;
+
+	err = ops->sb_tc_pool_bind_get(devlink_port, devlink_sb->index,
+				       tc_index, pool_type,
+				       &pool_index, &threshold);
+	if (err)
+		return err;
+
+	hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+	if (!hdr)
+		return -EMSGSIZE;
+
+	if (devlink_nl_put_handle(msg, devlink))
+		goto nla_put_failure;
+	if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
+		goto nla_put_failure;
+	if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
+		goto nla_put_failure;
+	if (nla_put_u16(msg, DEVLINK_ATTR_SB_TC_INDEX, tc_index))
+		goto nla_put_failure;
+	if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_TYPE, pool_type))
+		goto nla_put_failure;
+	if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index))
+		goto nla_put_failure;
+	if (nla_put_u32(msg, DEVLINK_ATTR_SB_THRESHOLD, threshold))
+		goto nla_put_failure;
+
+	if (ops->sb_occ_tc_port_bind_get) {
+		u32 cur;
+		u32 max;
+
+		err = ops->sb_occ_tc_port_bind_get(devlink_port,
+						   devlink_sb->index,
+						   tc_index, pool_type,
+						   &cur, &max);
+		if (err && err != -EOPNOTSUPP)
+			return err;
+		if (!err) {
+			if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur))
+				goto nla_put_failure;
+			if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_MAX, max))
+				goto nla_put_failure;
+		}
+	}
+
+	genlmsg_end(msg, hdr);
+	return 0;
+
+nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+	return -EMSGSIZE;
+}
+
+static int devlink_nl_cmd_sb_tc_pool_bind_get_doit(struct sk_buff *skb,
+						   struct genl_info *info)
+{
+	struct devlink_port *devlink_port = info->user_ptr[0];
+	struct devlink *devlink = devlink_port->devlink;
+	struct devlink_sb *devlink_sb = info->user_ptr[1];
+	struct sk_buff *msg;
+	enum devlink_sb_pool_type pool_type;
+	u16 tc_index;
+	int err;
+
+	err = devlink_sb_pool_type_get_from_info(info, &pool_type);
+	if (err)
+		return err;
+
+	err = devlink_sb_tc_index_get_from_info(devlink_sb, info,
+						pool_type, &tc_index);
+	if (err)
+		return err;
+
+	if (!devlink->ops || !devlink->ops->sb_tc_pool_bind_get)
+		return -EOPNOTSUPP;
+
+	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+
+	err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink, devlink_port,
+					      devlink_sb, tc_index, pool_type,
+					      DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
+					      info->snd_portid,
+					      info->snd_seq, 0);
+	if (err) {
+		nlmsg_free(msg);
+		return err;
+	}
+
+	return genlmsg_reply(msg, info);
+}
+
+static int __sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
+					int start, int *p_idx,
+					struct devlink *devlink,
+					struct devlink_sb *devlink_sb,
+					u32 portid, u32 seq)
+{
+	struct devlink_port *devlink_port;
+	u16 tc_index;
+	int err;
+
+	list_for_each_entry(devlink_port, &devlink->port_list, list) {
+		for (tc_index = 0;
+		     tc_index < devlink_sb->ingress_tc_count; tc_index++) {
+			if (*p_idx < start) {
+				(*p_idx)++;
+				continue;
+			}
+			err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink,
+							      devlink_port,
+							      devlink_sb,
+							      tc_index,
+							      DEVLINK_SB_POOL_TYPE_INGRESS,
+							      DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
+							      portid, seq,
+							      NLM_F_MULTI);
+			if (err)
+				return err;
+			(*p_idx)++;
+		}
+		for (tc_index = 0;
+		     tc_index < devlink_sb->egress_tc_count; tc_index++) {
+			if (*p_idx < start) {
+				(*p_idx)++;
+				continue;
+			}
+			err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink,
+							      devlink_port,
+							      devlink_sb,
+							      tc_index,
+							      DEVLINK_SB_POOL_TYPE_EGRESS,
+							      DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
+							      portid, seq,
+							      NLM_F_MULTI);
+			if (err)
+				return err;
+			(*p_idx)++;
+		}
+	}
+	return 0;
+}
+
+static int
+devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
+					  struct netlink_callback *cb)
+{
+	struct devlink *devlink;
+	struct devlink_sb *devlink_sb;
+	int start = cb->args[0];
+	int idx = 0;
+	int err;
+
+	mutex_lock(&devlink_mutex);
+	mutex_lock(&devlink_port_mutex);
+	list_for_each_entry(devlink, &devlink_list, list) {
+		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
+		    !devlink->ops || !devlink->ops->sb_tc_pool_bind_get)
+			continue;
+		list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
+			err = __sb_tc_pool_bind_get_dumpit(msg, start, &idx,
+							   devlink,
+							   devlink_sb,
+							   NETLINK_CB(cb->skb).portid,
+							   cb->nlh->nlmsg_seq);
+			if (err && err != -EOPNOTSUPP)
+				goto out;
+		}
+	}
+out:
+	mutex_unlock(&devlink_port_mutex);
+	mutex_unlock(&devlink_mutex);
+
+	cb->args[0] = idx;
+	return msg->len;
+}
+
+static int devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
+				       unsigned int sb_index, u16 tc_index,
+				       enum devlink_sb_pool_type pool_type,
+				       u16 pool_index, u32 threshold)
+
+{
+	const struct devlink_ops *ops = devlink_port->devlink->ops;
+
+	if (ops && ops->sb_tc_pool_bind_set)
+		return ops->sb_tc_pool_bind_set(devlink_port, sb_index,
+						tc_index, pool_type,
+						pool_index, threshold);
+	return -EOPNOTSUPP;
+}
+
+static int devlink_nl_cmd_sb_tc_pool_bind_set_doit(struct sk_buff *skb,
+						   struct genl_info *info)
+{
+	struct devlink_port *devlink_port = info->user_ptr[0];
+	struct devlink_sb *devlink_sb = info->user_ptr[1];
+	enum devlink_sb_pool_type pool_type;
+	u16 tc_index;
+	u16 pool_index;
+	u32 threshold;
+	int err;
+
+	err = devlink_sb_pool_type_get_from_info(info, &pool_type);
+	if (err)
+		return err;
+
+	err = devlink_sb_tc_index_get_from_info(devlink_sb, info,
+						pool_type, &tc_index);
+	if (err)
+		return err;
+
+	err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
+						  &pool_index);
+	if (err)
+		return err;
+
+	if (!info->attrs[DEVLINK_ATTR_SB_THRESHOLD])
+		return -EINVAL;
+
+	threshold = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_THRESHOLD]);
+	return devlink_sb_tc_pool_bind_set(devlink_port, devlink_sb->index,
+					   tc_index, pool_type,
+					   pool_index, threshold);
+}
+
+static int devlink_nl_cmd_sb_occ_snapshot_doit(struct sk_buff *skb,
+					       struct genl_info *info)
+{
+	struct devlink *devlink = info->user_ptr[0];
+	struct devlink_sb *devlink_sb = info->user_ptr[1];
+	const struct devlink_ops *ops = devlink->ops;
+
+	if (ops && ops->sb_occ_snapshot)
+		return ops->sb_occ_snapshot(devlink, devlink_sb->index);
+	return -EOPNOTSUPP;
+}
+
+static int devlink_nl_cmd_sb_occ_max_clear_doit(struct sk_buff *skb,
+						struct genl_info *info)
+{
+	struct devlink *devlink = info->user_ptr[0];
+	struct devlink_sb *devlink_sb = info->user_ptr[1];
+	const struct devlink_ops *ops = devlink->ops;
+
+	if (ops && ops->sb_occ_max_clear)
+		return ops->sb_occ_max_clear(devlink, devlink_sb->index);
+	return -EOPNOTSUPP;
+}
+
 static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
 	[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING },
 	[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING },
 	[DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32 },
 	[DEVLINK_ATTR_PORT_TYPE] = { .type = NLA_U16 },
 	[DEVLINK_ATTR_PORT_SPLIT_COUNT] = { .type = NLA_U32 },
+	[DEVLINK_ATTR_SB_INDEX] = { .type = NLA_U32 },
+	[DEVLINK_ATTR_SB_POOL_INDEX] = { .type = NLA_U16 },
+	[DEVLINK_ATTR_SB_POOL_TYPE] = { .type = NLA_U8 },
+	[DEVLINK_ATTR_SB_POOL_SIZE] = { .type = NLA_U32 },
+	[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE] = { .type = NLA_U8 },
+	[DEVLINK_ATTR_SB_THRESHOLD] = { .type = NLA_U32 },
+	[DEVLINK_ATTR_SB_TC_INDEX] = { .type = NLA_U16 },
 };
 
 static const struct genl_ops devlink_nl_ops[] = {
@@ -511,6 +1415,7 @@
 		.doit = devlink_nl_cmd_get_doit,
 		.dumpit = devlink_nl_cmd_get_dumpit,
 		.policy = devlink_nl_policy,
+		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
 		/* can be retrieved by unprivileged users */
 	},
 	{
@@ -533,12 +1438,92 @@
 		.doit = devlink_nl_cmd_port_split_doit,
 		.policy = devlink_nl_policy,
 		.flags = GENL_ADMIN_PERM,
+		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
 	},
 	{
 		.cmd = DEVLINK_CMD_PORT_UNSPLIT,
 		.doit = devlink_nl_cmd_port_unsplit_doit,
 		.policy = devlink_nl_policy,
 		.flags = GENL_ADMIN_PERM,
+		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+	},
+	{
+		.cmd = DEVLINK_CMD_SB_GET,
+		.doit = devlink_nl_cmd_sb_get_doit,
+		.dumpit = devlink_nl_cmd_sb_get_dumpit,
+		.policy = devlink_nl_policy,
+		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+				  DEVLINK_NL_FLAG_NEED_SB,
+		/* can be retrieved by unprivileged users */
+	},
+	{
+		.cmd = DEVLINK_CMD_SB_POOL_GET,
+		.doit = devlink_nl_cmd_sb_pool_get_doit,
+		.dumpit = devlink_nl_cmd_sb_pool_get_dumpit,
+		.policy = devlink_nl_policy,
+		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+				  DEVLINK_NL_FLAG_NEED_SB,
+		/* can be retrieved by unprivileged users */
+	},
+	{
+		.cmd = DEVLINK_CMD_SB_POOL_SET,
+		.doit = devlink_nl_cmd_sb_pool_set_doit,
+		.policy = devlink_nl_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+				  DEVLINK_NL_FLAG_NEED_SB,
+	},
+	{
+		.cmd = DEVLINK_CMD_SB_PORT_POOL_GET,
+		.doit = devlink_nl_cmd_sb_port_pool_get_doit,
+		.dumpit = devlink_nl_cmd_sb_port_pool_get_dumpit,
+		.policy = devlink_nl_policy,
+		.internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
+				  DEVLINK_NL_FLAG_NEED_SB,
+		/* can be retrieved by unprivileged users */
+	},
+	{
+		.cmd = DEVLINK_CMD_SB_PORT_POOL_SET,
+		.doit = devlink_nl_cmd_sb_port_pool_set_doit,
+		.policy = devlink_nl_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
+				  DEVLINK_NL_FLAG_NEED_SB,
+	},
+	{
+		.cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET,
+		.doit = devlink_nl_cmd_sb_tc_pool_bind_get_doit,
+		.dumpit = devlink_nl_cmd_sb_tc_pool_bind_get_dumpit,
+		.policy = devlink_nl_policy,
+		.internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
+				  DEVLINK_NL_FLAG_NEED_SB,
+		/* can be retrieved by unprivileged users */
+	},
+	{
+		.cmd = DEVLINK_CMD_SB_TC_POOL_BIND_SET,
+		.doit = devlink_nl_cmd_sb_tc_pool_bind_set_doit,
+		.policy = devlink_nl_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
+				  DEVLINK_NL_FLAG_NEED_SB,
+	},
+	{
+		.cmd = DEVLINK_CMD_SB_OCC_SNAPSHOT,
+		.doit = devlink_nl_cmd_sb_occ_snapshot_doit,
+		.policy = devlink_nl_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+				  DEVLINK_NL_FLAG_NEED_SB |
+				  DEVLINK_NL_FLAG_LOCK_PORTS,
+	},
+	{
+		.cmd = DEVLINK_CMD_SB_OCC_MAX_CLEAR,
+		.doit = devlink_nl_cmd_sb_occ_max_clear_doit,
+		.policy = devlink_nl_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+				  DEVLINK_NL_FLAG_NEED_SB |
+				  DEVLINK_NL_FLAG_LOCK_PORTS,
 	},
 };
 
@@ -561,6 +1546,7 @@
 	devlink->ops = ops;
 	devlink_net_set(devlink, &init_net);
 	INIT_LIST_HEAD(&devlink->port_list);
+	INIT_LIST_HEAD(&devlink->sb_list);
 	return devlink;
 }
 EXPORT_SYMBOL_GPL(devlink_alloc);
@@ -630,7 +1616,6 @@
 	}
 	devlink_port->devlink = devlink;
 	devlink_port->index = port_index;
-	devlink_port->type = DEVLINK_PORT_TYPE_NOTSET;
 	devlink_port->registered = true;
 	list_add_tail(&devlink_port->list, &devlink->port_list);
 	mutex_unlock(&devlink_port_mutex);
@@ -717,6 +1702,51 @@
 }
 EXPORT_SYMBOL_GPL(devlink_port_split_set);
 
+int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
+			u32 size, u16 ingress_pools_count,
+			u16 egress_pools_count, u16 ingress_tc_count,
+			u16 egress_tc_count)
+{
+	struct devlink_sb *devlink_sb;
+	int err = 0;
+
+	mutex_lock(&devlink_mutex);
+	if (devlink_sb_index_exists(devlink, sb_index)) {
+		err = -EEXIST;
+		goto unlock;
+	}
+
+	devlink_sb = kzalloc(sizeof(*devlink_sb), GFP_KERNEL);
+	if (!devlink_sb) {
+		err = -ENOMEM;
+		goto unlock;
+	}
+	devlink_sb->index = sb_index;
+	devlink_sb->size = size;
+	devlink_sb->ingress_pools_count = ingress_pools_count;
+	devlink_sb->egress_pools_count = egress_pools_count;
+	devlink_sb->ingress_tc_count = ingress_tc_count;
+	devlink_sb->egress_tc_count = egress_tc_count;
+	list_add_tail(&devlink_sb->list, &devlink->sb_list);
+unlock:
+	mutex_unlock(&devlink_mutex);
+	return err;
+}
+EXPORT_SYMBOL_GPL(devlink_sb_register);
+
+void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index)
+{
+	struct devlink_sb *devlink_sb;
+
+	mutex_lock(&devlink_mutex);
+	devlink_sb = devlink_sb_get_by_index(devlink, sb_index);
+	WARN_ON(!devlink_sb);
+	list_del(&devlink_sb->list);
+	mutex_unlock(&devlink_mutex);
+	kfree(devlink_sb);
+}
+EXPORT_SYMBOL_GPL(devlink_sb_unregister);
+
 static int __init devlink_module_init(void)
 {
 	return genl_register_family_with_ops_groups(&devlink_nl_family,
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index f426c5a..bdb4013 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -79,12 +79,16 @@
 	[NETIF_F_UFO_BIT] =              "tx-udp-fragmentation",
 	[NETIF_F_GSO_ROBUST_BIT] =       "tx-gso-robust",
 	[NETIF_F_TSO_ECN_BIT] =          "tx-tcp-ecn-segmentation",
+	[NETIF_F_TSO_MANGLEID_BIT] =	 "tx-tcp-mangleid-segmentation",
 	[NETIF_F_TSO6_BIT] =             "tx-tcp6-segmentation",
 	[NETIF_F_FSO_BIT] =              "tx-fcoe-segmentation",
 	[NETIF_F_GSO_GRE_BIT] =		 "tx-gre-segmentation",
+	[NETIF_F_GSO_GRE_CSUM_BIT] =	 "tx-gre-csum-segmentation",
 	[NETIF_F_GSO_IPIP_BIT] =	 "tx-ipip-segmentation",
 	[NETIF_F_GSO_SIT_BIT] =		 "tx-sit-segmentation",
 	[NETIF_F_GSO_UDP_TUNNEL_BIT] =	 "tx-udp_tnl-segmentation",
+	[NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = "tx-udp_tnl-csum-segmentation",
+	[NETIF_F_GSO_PARTIAL_BIT] =	 "tx-gso-partial",
 
 	[NETIF_F_FCOE_CRC_BIT] =         "tx-checksum-fcoe-crc",
 	[NETIF_F_SCTP_CRC_BIT] =        "tx-checksum-sctp",
@@ -387,15 +391,17 @@
 	return 0;
 }
 
-static void convert_legacy_u32_to_link_mode(unsigned long *dst, u32 legacy_u32)
+void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst,
+					     u32 legacy_u32)
 {
 	bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS);
 	dst[0] = legacy_u32;
 }
+EXPORT_SYMBOL(ethtool_convert_legacy_u32_to_link_mode);
 
 /* return false if src had higher bits set. lower bits always updated. */
-static bool convert_link_mode_to_legacy_u32(u32 *legacy_u32,
-					    const unsigned long *src)
+bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
+					     const unsigned long *src)
 {
 	bool retval = true;
 
@@ -415,6 +421,7 @@
 	*legacy_u32 = src[0];
 	return retval;
 }
+EXPORT_SYMBOL(ethtool_convert_link_mode_to_legacy_u32);
 
 /* return false if legacy contained non-0 deprecated fields
  * transceiver/maxtxpkt/maxrxpkt. rest of ksettings always updated
@@ -437,13 +444,13 @@
 	    legacy_settings->maxrxpkt)
 		retval = false;
 
-	convert_legacy_u32_to_link_mode(
+	ethtool_convert_legacy_u32_to_link_mode(
 		link_ksettings->link_modes.supported,
 		legacy_settings->supported);
-	convert_legacy_u32_to_link_mode(
+	ethtool_convert_legacy_u32_to_link_mode(
 		link_ksettings->link_modes.advertising,
 		legacy_settings->advertising);
-	convert_legacy_u32_to_link_mode(
+	ethtool_convert_legacy_u32_to_link_mode(
 		link_ksettings->link_modes.lp_advertising,
 		legacy_settings->lp_advertising);
 	link_ksettings->base.speed
@@ -482,13 +489,13 @@
 	 * __u32	maxrxpkt;
 	 */
 
-	retval &= convert_link_mode_to_legacy_u32(
+	retval &= ethtool_convert_link_mode_to_legacy_u32(
 		&legacy_settings->supported,
 		link_ksettings->link_modes.supported);
-	retval &= convert_link_mode_to_legacy_u32(
+	retval &= ethtool_convert_link_mode_to_legacy_u32(
 		&legacy_settings->advertising,
 		link_ksettings->link_modes.advertising);
-	retval &= convert_link_mode_to_legacy_u32(
+	retval &= ethtool_convert_link_mode_to_legacy_u32(
 		&legacy_settings->lp_advertising,
 		link_ksettings->link_modes.lp_advertising);
 	ethtool_cmd_speed_set(legacy_settings, link_ksettings->base.speed);
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 365de66..840aceb 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -549,7 +549,7 @@
 			 + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
 			 + nla_total_size(4) /* FRA_FWMARK */
 			 + nla_total_size(4) /* FRA_FWMASK */
-			 + nla_total_size(8); /* FRA_TUN_ID */
+			 + nla_total_size_64bit(8); /* FRA_TUN_ID */
 
 	if (ops->nlmsg_payload)
 		payload += ops->nlmsg_payload(rule);
@@ -607,7 +607,7 @@
 	    (rule->target &&
 	     nla_put_u32(skb, FRA_GOTO, rule->target)) ||
 	    (rule->tun_id &&
-	     nla_put_be64(skb, FRA_TUN_ID, rule->tun_id)))
+	     nla_put_be64(skb, FRA_TUN_ID, rule->tun_id, FRA_PAD)))
 		goto nla_put_failure;
 
 	if (rule->suppress_ifgroup != -1) {
diff --git a/net/core/filter.c b/net/core/filter.c
index b7177d0..68adb5f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -994,7 +994,11 @@
 		 */
 		goto out_err_free;
 
-	bpf_prog_select_runtime(fp);
+	/* We are guaranteed to never error here with cBPF to eBPF
+	 * transitions, since there's no issue with type compatibility
+	 * checks on program arrays.
+	 */
+	fp = bpf_prog_select_runtime(fp, &err);
 
 	kfree(old_prog);
 	return fp;
@@ -1166,7 +1170,7 @@
 	}
 
 	old_fp = rcu_dereference_protected(sk->sk_filter,
-					   sock_owned_by_user(sk));
+					   lockdep_sock_is_held(sk));
 	rcu_assign_pointer(sk->sk_filter, fp);
 
 	if (old_fp)
@@ -1344,6 +1348,21 @@
 
 static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
 
+static inline int bpf_try_make_writable(struct sk_buff *skb,
+					unsigned int write_len)
+{
+	int err;
+
+	if (!skb_cloned(skb))
+		return 0;
+	if (skb_clone_writable(skb, write_len))
+		return 0;
+	err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+	if (!err)
+		bpf_compute_data_end(skb);
+	return err;
+}
+
 static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
 {
 	struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
@@ -1366,7 +1385,7 @@
 	 */
 	if (unlikely((u32) offset > 0xffff || len > sizeof(sp->buff)))
 		return -EFAULT;
-	if (unlikely(skb_try_make_writable(skb, offset + len)))
+	if (unlikely(bpf_try_make_writable(skb, offset + len)))
 		return -EFAULT;
 
 	ptr = skb_header_pointer(skb, offset, len, sp->buff);
@@ -1409,16 +1428,19 @@
 	unsigned int len = (unsigned int) r4;
 	void *ptr;
 
-	if (unlikely((u32) offset > 0xffff || len > MAX_BPF_STACK))
-		return -EFAULT;
+	if (unlikely((u32) offset > 0xffff))
+		goto err_clear;
 
 	ptr = skb_header_pointer(skb, offset, len, to);
 	if (unlikely(!ptr))
-		return -EFAULT;
+		goto err_clear;
 	if (ptr != to)
 		memcpy(to, ptr, len);
 
 	return 0;
+err_clear:
+	memset(to, 0, len);
+	return -EFAULT;
 }
 
 static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
@@ -1427,7 +1449,7 @@
 	.ret_type	= RET_INTEGER,
 	.arg1_type	= ARG_PTR_TO_CTX,
 	.arg2_type	= ARG_ANYTHING,
-	.arg3_type	= ARG_PTR_TO_STACK,
+	.arg3_type	= ARG_PTR_TO_RAW_STACK,
 	.arg4_type	= ARG_CONST_STACK_SIZE,
 };
 
@@ -1441,7 +1463,7 @@
 		return -EINVAL;
 	if (unlikely((u32) offset > 0xffff))
 		return -EFAULT;
-	if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum))))
+	if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum))))
 		return -EFAULT;
 
 	ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
@@ -1496,7 +1518,7 @@
 		return -EINVAL;
 	if (unlikely((u32) offset > 0xffff))
 		return -EFAULT;
-	if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum))))
+	if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum))))
 		return -EFAULT;
 
 	ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
@@ -1696,12 +1718,15 @@
 {
 	struct sk_buff *skb = (struct sk_buff *) (long) r1;
 	__be16 vlan_proto = (__force __be16) r2;
+	int ret;
 
 	if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
 		     vlan_proto != htons(ETH_P_8021AD)))
 		vlan_proto = htons(ETH_P_8021Q);
 
-	return skb_vlan_push(skb, vlan_proto, vlan_tci);
+	ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
+	bpf_compute_data_end(skb);
+	return ret;
 }
 
 const struct bpf_func_proto bpf_skb_vlan_push_proto = {
@@ -1717,8 +1742,11 @@
 static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 {
 	struct sk_buff *skb = (struct sk_buff *) (long) r1;
+	int ret;
 
-	return skb_vlan_pop(skb);
+	ret = skb_vlan_pop(skb);
+	bpf_compute_data_end(skb);
+	return ret;
 }
 
 const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
@@ -1756,26 +1784,34 @@
 	struct bpf_tunnel_key *to = (struct bpf_tunnel_key *) (long) r2;
 	const struct ip_tunnel_info *info = skb_tunnel_info(skb);
 	u8 compat[sizeof(struct bpf_tunnel_key)];
+	void *to_orig = to;
+	int err;
 
-	if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6))))
-		return -EINVAL;
-	if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags))
-		return -EPROTO;
+	if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) {
+		err = -EINVAL;
+		goto err_clear;
+	}
+	if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) {
+		err = -EPROTO;
+		goto err_clear;
+	}
 	if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
+		err = -EINVAL;
 		switch (size) {
 		case offsetof(struct bpf_tunnel_key, tunnel_label):
+		case offsetof(struct bpf_tunnel_key, tunnel_ext):
 			goto set_compat;
 		case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
 			/* Fixup deprecated structure layouts here, so we have
 			 * a common path later on.
 			 */
 			if (ip_tunnel_info_af(info) != AF_INET)
-				return -EINVAL;
+				goto err_clear;
 set_compat:
 			to = (struct bpf_tunnel_key *)compat;
 			break;
 		default:
-			return -EINVAL;
+			goto err_clear;
 		}
 	}
 
@@ -1792,9 +1828,12 @@
 	}
 
 	if (unlikely(size != sizeof(struct bpf_tunnel_key)))
-		memcpy((void *)(long) r2, to, size);
+		memcpy(to_orig, to, size);
 
 	return 0;
+err_clear:
+	memset(to_orig, 0, size);
+	return err;
 }
 
 static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
@@ -1802,7 +1841,7 @@
 	.gpl_only	= false,
 	.ret_type	= RET_INTEGER,
 	.arg1_type	= ARG_PTR_TO_CTX,
-	.arg2_type	= ARG_PTR_TO_STACK,
+	.arg2_type	= ARG_PTR_TO_RAW_STACK,
 	.arg3_type	= ARG_CONST_STACK_SIZE,
 	.arg4_type	= ARG_ANYTHING,
 };
@@ -1812,16 +1851,26 @@
 	struct sk_buff *skb = (struct sk_buff *) (long) r1;
 	u8 *to = (u8 *) (long) r2;
 	const struct ip_tunnel_info *info = skb_tunnel_info(skb);
+	int err;
 
 	if (unlikely(!info ||
-		     !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT)))
-		return -ENOENT;
-	if (unlikely(size < info->options_len))
-		return -ENOMEM;
+		     !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) {
+		err = -ENOENT;
+		goto err_clear;
+	}
+	if (unlikely(size < info->options_len)) {
+		err = -ENOMEM;
+		goto err_clear;
+	}
 
 	ip_tunnel_info_opts_get(to, info);
+	if (size > info->options_len)
+		memset(to + info->options_len, 0, size - info->options_len);
 
 	return info->options_len;
+err_clear:
+	memset(to, 0, size);
+	return err;
 }
 
 static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
@@ -1829,7 +1878,7 @@
 	.gpl_only	= false,
 	.ret_type	= RET_INTEGER,
 	.arg1_type	= ARG_PTR_TO_CTX,
-	.arg2_type	= ARG_PTR_TO_STACK,
+	.arg2_type	= ARG_PTR_TO_RAW_STACK,
 	.arg3_type	= ARG_CONST_STACK_SIZE,
 };
 
@@ -1849,6 +1898,7 @@
 	if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
 		switch (size) {
 		case offsetof(struct bpf_tunnel_key, tunnel_label):
+		case offsetof(struct bpf_tunnel_key, tunnel_ext):
 		case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
 			/* Fixup deprecated structure layouts here, so we have
 			 * a common path later on.
@@ -1861,7 +1911,8 @@
 			return -EINVAL;
 		}
 	}
-	if (unlikely(!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label))
+	if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) ||
+		     from->tunnel_ext))
 		return -EINVAL;
 
 	skb_dst_drop(skb);
@@ -2013,6 +2064,8 @@
 		return &bpf_redirect_proto;
 	case BPF_FUNC_get_route_realm:
 		return &bpf_get_route_realm_proto;
+	case BPF_FUNC_perf_event_output:
+		return bpf_get_event_output_proto();
 	default:
 		return sk_filter_func_proto(func_id);
 	}
@@ -2020,16 +2073,12 @@
 
 static bool __is_valid_access(int off, int size, enum bpf_access_type type)
 {
-	/* check bounds */
 	if (off < 0 || off >= sizeof(struct __sk_buff))
 		return false;
-
-	/* disallow misaligned access */
+	/* The verifier guarantees that size > 0. */
 	if (off % size != 0)
 		return false;
-
-	/* all __sk_buff fields are __u32 */
-	if (size != 4)
+	if (size != sizeof(__u32))
 		return false;
 
 	return true;
@@ -2038,13 +2087,17 @@
 static bool sk_filter_is_valid_access(int off, int size,
 				      enum bpf_access_type type)
 {
-	if (off == offsetof(struct __sk_buff, tc_classid))
+	switch (off) {
+	case offsetof(struct __sk_buff, tc_classid):
+	case offsetof(struct __sk_buff, data):
+	case offsetof(struct __sk_buff, data_end):
 		return false;
+	}
 
 	if (type == BPF_WRITE) {
 		switch (off) {
 		case offsetof(struct __sk_buff, cb[0]) ...
-			offsetof(struct __sk_buff, cb[4]):
+		     offsetof(struct __sk_buff, cb[4]):
 			break;
 		default:
 			return false;
@@ -2187,6 +2240,20 @@
 			*insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, ctx_off);
 		break;
 
+	case offsetof(struct __sk_buff, data):
+		*insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, data)),
+				      dst_reg, src_reg,
+				      offsetof(struct sk_buff, data));
+		break;
+
+	case offsetof(struct __sk_buff, data_end):
+		ctx_off -= offsetof(struct __sk_buff, data_end);
+		ctx_off += offsetof(struct sk_buff, cb);
+		ctx_off += offsetof(struct bpf_skb_data_end, data_end);
+		*insn++ = BPF_LDX_MEM(bytes_to_bpf_size(sizeof(void *)),
+				      dst_reg, src_reg, ctx_off);
+		break;
+
 	case offsetof(struct __sk_buff, tc_index):
 #ifdef CONFIG_NET_SCHED
 		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2);
@@ -2211,30 +2278,30 @@
 }
 
 static const struct bpf_verifier_ops sk_filter_ops = {
-	.get_func_proto = sk_filter_func_proto,
-	.is_valid_access = sk_filter_is_valid_access,
-	.convert_ctx_access = bpf_net_convert_ctx_access,
+	.get_func_proto		= sk_filter_func_proto,
+	.is_valid_access	= sk_filter_is_valid_access,
+	.convert_ctx_access	= bpf_net_convert_ctx_access,
 };
 
 static const struct bpf_verifier_ops tc_cls_act_ops = {
-	.get_func_proto = tc_cls_act_func_proto,
-	.is_valid_access = tc_cls_act_is_valid_access,
-	.convert_ctx_access = bpf_net_convert_ctx_access,
+	.get_func_proto		= tc_cls_act_func_proto,
+	.is_valid_access	= tc_cls_act_is_valid_access,
+	.convert_ctx_access	= bpf_net_convert_ctx_access,
 };
 
 static struct bpf_prog_type_list sk_filter_type __read_mostly = {
-	.ops = &sk_filter_ops,
-	.type = BPF_PROG_TYPE_SOCKET_FILTER,
+	.ops	= &sk_filter_ops,
+	.type	= BPF_PROG_TYPE_SOCKET_FILTER,
 };
 
 static struct bpf_prog_type_list sched_cls_type __read_mostly = {
-	.ops = &tc_cls_act_ops,
-	.type = BPF_PROG_TYPE_SCHED_CLS,
+	.ops	= &tc_cls_act_ops,
+	.type	= BPF_PROG_TYPE_SCHED_CLS,
 };
 
 static struct bpf_prog_type_list sched_act_type __read_mostly = {
-	.ops = &tc_cls_act_ops,
-	.type = BPF_PROG_TYPE_SCHED_ACT,
+	.ops	= &tc_cls_act_ops,
+	.type	= BPF_PROG_TYPE_SCHED_ACT,
 };
 
 static int __init register_sk_filter_ops(void)
@@ -2256,7 +2323,7 @@
 		return -EPERM;
 
 	filter = rcu_dereference_protected(sk->sk_filter,
-					   sock_owned_by_user(sk));
+					   lockdep_sock_is_held(sk));
 	if (filter) {
 		RCU_INIT_POINTER(sk->sk_filter, NULL);
 		sk_filter_uncharge(sk, filter);
@@ -2276,7 +2343,7 @@
 
 	lock_sock(sk);
 	filter = rcu_dereference_protected(sk->sk_filter,
-					   sock_owned_by_user(sk));
+					   lockdep_sock_is_held(sk));
 	if (!filter)
 		goto out;
 
diff --git a/net/core/flow.c b/net/core/flow.c
index 1033725..3937b1b 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -92,8 +92,11 @@
 	list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list);
 	spin_unlock_bh(&xfrm->flow_cache_gc_lock);
 
-	list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
+	list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) {
 		flow_entry_kill(fce, xfrm);
+		atomic_dec(&xfrm->flow_cache_gc_count);
+		WARN_ON(atomic_read(&xfrm->flow_cache_gc_count) < 0);
+	}
 }
 
 static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
@@ -101,6 +104,7 @@
 				     struct netns_xfrm *xfrm)
 {
 	if (deleted) {
+		atomic_add(deleted, &xfrm->flow_cache_gc_count);
 		fcp->hash_count -= deleted;
 		spin_lock_bh(&xfrm->flow_cache_gc_lock);
 		list_splice_tail(gc_list, &xfrm->flow_cache_gc_list);
@@ -232,6 +236,13 @@
 		if (fcp->hash_count > fc->high_watermark)
 			flow_cache_shrink(fc, fcp);
 
+		if (fcp->hash_count > 2 * fc->high_watermark ||
+		    atomic_read(&net->xfrm.flow_cache_gc_count) > fc->high_watermark) {
+			atomic_inc(&net->xfrm.flow_cache_genid);
+			flo = ERR_PTR(-ENOBUFS);
+			goto ret_object;
+		}
+
 		fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
 		if (fle) {
 			fle->net = net;
@@ -446,6 +457,7 @@
 	INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task);
 	INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task);
 	mutex_init(&net->xfrm.flow_flush_sem);
+	atomic_set(&net->xfrm.flow_cache_gc_count, 0);
 
 	fc->hash_shift = 10;
 	fc->low_watermark = 2 * flow_cache_hash_size(fc);
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index e640462..f96ee8b 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -25,9 +25,9 @@
 
 
 static inline int
-gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size)
+gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr)
 {
-	if (nla_put(d->skb, type, size, buf))
+	if (nla_put_64bit(d->skb, type, size, buf, padattr))
 		goto nla_put_failure;
 	return 0;
 
@@ -59,7 +59,8 @@
  */
 int
 gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
-	int xstats_type, spinlock_t *lock, struct gnet_dump *d)
+			     int xstats_type, spinlock_t *lock,
+			     struct gnet_dump *d, int padattr)
 	__acquires(lock)
 {
 	memset(d, 0, sizeof(*d));
@@ -71,16 +72,17 @@
 	d->skb = skb;
 	d->compat_tc_stats = tc_stats_type;
 	d->compat_xstats = xstats_type;
+	d->padattr = padattr;
 
 	if (d->tail)
-		return gnet_stats_copy(d, type, NULL, 0);
+		return gnet_stats_copy(d, type, NULL, 0, padattr);
 
 	return 0;
 }
 EXPORT_SYMBOL(gnet_stats_start_copy_compat);
 
 /**
- * gnet_stats_start_copy_compat - start dumping procedure in compatibility mode
+ * gnet_stats_start_copy - start dumping procedure in compatibility mode
  * @skb: socket buffer to put statistics TLVs into
  * @type: TLV type for top level statistic TLV
  * @lock: statistics lock
@@ -94,9 +96,9 @@
  */
 int
 gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
-	struct gnet_dump *d)
+		      struct gnet_dump *d, int padattr)
 {
-	return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d);
+	return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d, padattr);
 }
 EXPORT_SYMBOL(gnet_stats_start_copy);
 
@@ -169,7 +171,8 @@
 		memset(&sb, 0, sizeof(sb));
 		sb.bytes = bstats.bytes;
 		sb.packets = bstats.packets;
-		return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb));
+		return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb),
+				       TCA_STATS_PAD);
 	}
 	return 0;
 }
@@ -208,11 +211,13 @@
 	}
 
 	if (d->tail) {
-		res = gnet_stats_copy(d, TCA_STATS_RATE_EST, &est, sizeof(est));
+		res = gnet_stats_copy(d, TCA_STATS_RATE_EST, &est, sizeof(est),
+				      TCA_STATS_PAD);
 		if (res < 0 || est.bps == r->bps)
 			return res;
 		/* emit 64bit stats only if needed */
-		return gnet_stats_copy(d, TCA_STATS_RATE_EST64, r, sizeof(*r));
+		return gnet_stats_copy(d, TCA_STATS_RATE_EST64, r, sizeof(*r),
+				       TCA_STATS_PAD);
 	}
 
 	return 0;
@@ -286,7 +291,8 @@
 
 	if (d->tail)
 		return gnet_stats_copy(d, TCA_STATS_QUEUE,
-				       &qstats, sizeof(qstats));
+				       &qstats, sizeof(qstats),
+				       TCA_STATS_PAD);
 
 	return 0;
 }
@@ -316,7 +322,8 @@
 	}
 
 	if (d->tail)
-		return gnet_stats_copy(d, TCA_STATS_APP, st, len);
+		return gnet_stats_copy(d, TCA_STATS_APP, st, len,
+				       TCA_STATS_PAD);
 
 	return 0;
 
@@ -347,12 +354,12 @@
 
 	if (d->compat_tc_stats)
 		if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats,
-			sizeof(d->tc_stats)) < 0)
+				    sizeof(d->tc_stats), d->padattr) < 0)
 			return -1;
 
 	if (d->compat_xstats && d->xstats) {
 		if (gnet_stats_copy(d, d->compat_xstats, d->xstats,
-			d->xstats_len) < 0)
+				    d->xstats_len, d->padattr) < 0)
 			return -1;
 	}
 
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index f18ae91..29dd8cc 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1763,21 +1763,22 @@
 			NEIGH_VAR(parms, MCAST_PROBES)) ||
 	    nla_put_u32(skb, NDTPA_MCAST_REPROBES,
 			NEIGH_VAR(parms, MCAST_REPROBES)) ||
-	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
+	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
+			  NDTPA_PAD) ||
 	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
-			  NEIGH_VAR(parms, BASE_REACHABLE_TIME)) ||
+			  NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
 	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
-			  NEIGH_VAR(parms, GC_STALETIME)) ||
+			  NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
 	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
-			  NEIGH_VAR(parms, DELAY_PROBE_TIME)) ||
+			  NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
 	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
-			  NEIGH_VAR(parms, RETRANS_TIME)) ||
+			  NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
 	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
-			  NEIGH_VAR(parms, ANYCAST_DELAY)) ||
+			  NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
 	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
-			  NEIGH_VAR(parms, PROXY_DELAY)) ||
+			  NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
 	    nla_put_msecs(skb, NDTPA_LOCKTIME,
-			  NEIGH_VAR(parms, LOCKTIME)))
+			  NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
 		goto nla_put_failure;
 	return nla_nest_end(skb, nest);
 
@@ -1804,7 +1805,7 @@
 	ndtmsg->ndtm_pad2   = 0;
 
 	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
-	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval) ||
+	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
 	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
 	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
 	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
@@ -1856,7 +1857,8 @@
 			ndst.ndts_table_fulls		+= st->table_fulls;
 		}
 
-		if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst))
+		if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
+				  NDTA_PAD))
 			goto nla_put_failure;
 	}
 
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 2bf8329..14d0934 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -162,7 +162,8 @@
 		   "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
 		   sd->processed, sd->dropped, sd->time_squeeze, 0,
 		   0, 0, 0, 0, /* was fastroute */
-		   sd->cpu_collision, sd->received_rps, flow_limit_count);
+		   0,	/* was cpu_collision */
+		   sd->received_rps, flow_limit_count);
 	return 0;
 }
 
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 20999aa..8604ae2 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3472,7 +3472,6 @@
 				     pkt_dev->odevname, ret);
 		pkt_dev->errors++;
 		/* fallthru */
-	case NETDEV_TX_LOCKED:
 	case NETDEV_TX_BUSY:
 		/* Retry it next time */
 		atomic_dec(&(pkt_dev->skb->users));
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index f206677..d69c464 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -808,11 +808,6 @@
 	a->rx_nohandler = b->rx_nohandler;
 }
 
-static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b)
-{
-	memcpy(v, b, sizeof(*b));
-}
-
 /* All VF info */
 static inline int rtnl_vfinfo_size(const struct net_device *dev,
 				   u32 ext_filter_mask)
@@ -830,17 +825,17 @@
 			 nla_total_size(sizeof(struct ifla_vf_link_state)) +
 			 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
 			 /* IFLA_VF_STATS_RX_PACKETS */
-			 nla_total_size(sizeof(__u64)) +
+			 nla_total_size_64bit(sizeof(__u64)) +
 			 /* IFLA_VF_STATS_TX_PACKETS */
-			 nla_total_size(sizeof(__u64)) +
+			 nla_total_size_64bit(sizeof(__u64)) +
 			 /* IFLA_VF_STATS_RX_BYTES */
-			 nla_total_size(sizeof(__u64)) +
+			 nla_total_size_64bit(sizeof(__u64)) +
 			 /* IFLA_VF_STATS_TX_BYTES */
-			 nla_total_size(sizeof(__u64)) +
+			 nla_total_size_64bit(sizeof(__u64)) +
 			 /* IFLA_VF_STATS_BROADCAST */
-			 nla_total_size(sizeof(__u64)) +
+			 nla_total_size_64bit(sizeof(__u64)) +
 			 /* IFLA_VF_STATS_MULTICAST */
-			 nla_total_size(sizeof(__u64)) +
+			 nla_total_size_64bit(sizeof(__u64)) +
 			 nla_total_size(sizeof(struct ifla_vf_trust)));
 		return size;
 	} else
@@ -881,9 +876,9 @@
 	       + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
 	       + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
 	       + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
-	       + nla_total_size(sizeof(struct rtnl_link_ifmap))
+	       + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
 	       + nla_total_size(sizeof(struct rtnl_link_stats))
-	       + nla_total_size(sizeof(struct rtnl_link_stats64))
+	       + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
 	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
 	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
 	       + nla_total_size(4) /* IFLA_TXQLEN */
@@ -909,6 +904,7 @@
 	       + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
 	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
 	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
+	       + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
 	       + nla_total_size(1); /* IFLA_PROTO_DOWN */
 
 }
@@ -1053,25 +1049,23 @@
 static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
 					      struct net_device *dev)
 {
-	const struct rtnl_link_stats64 *stats;
-	struct rtnl_link_stats64 temp;
+	struct rtnl_link_stats64 *sp;
 	struct nlattr *attr;
 
-	stats = dev_get_stats(dev, &temp);
+	attr = nla_reserve_64bit(skb, IFLA_STATS64,
+				 sizeof(struct rtnl_link_stats64), IFLA_PAD);
+	if (!attr)
+		return -EMSGSIZE;
+
+	sp = nla_data(attr);
+	dev_get_stats(dev, sp);
 
 	attr = nla_reserve(skb, IFLA_STATS,
 			   sizeof(struct rtnl_link_stats));
 	if (!attr)
 		return -EMSGSIZE;
 
-	copy_rtnl_link_stats(nla_data(attr), stats);
-
-	attr = nla_reserve(skb, IFLA_STATS64,
-			   sizeof(struct rtnl_link_stats64));
-	if (!attr)
-		return -EMSGSIZE;
-
-	copy_rtnl_link_stats64(nla_data(attr), stats);
+	copy_rtnl_link_stats(nla_data(attr), sp);
 
 	return 0;
 }
@@ -1159,18 +1153,18 @@
 		nla_nest_cancel(skb, vfinfo);
 		return -EMSGSIZE;
 	}
-	if (nla_put_u64(skb, IFLA_VF_STATS_RX_PACKETS,
-			vf_stats.rx_packets) ||
-	    nla_put_u64(skb, IFLA_VF_STATS_TX_PACKETS,
-			vf_stats.tx_packets) ||
-	    nla_put_u64(skb, IFLA_VF_STATS_RX_BYTES,
-			vf_stats.rx_bytes) ||
-	    nla_put_u64(skb, IFLA_VF_STATS_TX_BYTES,
-			vf_stats.tx_bytes) ||
-	    nla_put_u64(skb, IFLA_VF_STATS_BROADCAST,
-			vf_stats.broadcast) ||
-	    nla_put_u64(skb, IFLA_VF_STATS_MULTICAST,
-			vf_stats.multicast))
+	if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
+			      vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
+	    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
+			      vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
+	    nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
+			      vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
+	    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
+			      vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
+	    nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
+			      vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
+	    nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
+			      vf_stats.multicast, IFLA_VF_STATS_PAD))
 		return -EMSGSIZE;
 	nla_nest_end(skb, vfstats);
 	nla_nest_end(skb, vf);
@@ -1179,15 +1173,17 @@
 
 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
 {
-	struct rtnl_link_ifmap map = {
-		.mem_start   = dev->mem_start,
-		.mem_end     = dev->mem_end,
-		.base_addr   = dev->base_addr,
-		.irq         = dev->irq,
-		.dma         = dev->dma,
-		.port        = dev->if_port,
-	};
-	if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
+	struct rtnl_link_ifmap map;
+
+	memset(&map, 0, sizeof(map));
+	map.mem_start   = dev->mem_start;
+	map.mem_end     = dev->mem_end;
+	map.base_addr   = dev->base_addr;
+	map.irq         = dev->irq;
+	map.dma         = dev->dma;
+	map.port        = dev->if_port;
+
+	if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
 		return -EMSGSIZE;
 
 	return 0;
@@ -3450,6 +3446,202 @@
 	return err;
 }
 
+static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
+{
+	return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
+	       (!idxattr || idxattr == attrid);
+}
+
+static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
+			       int type, u32 pid, u32 seq, u32 change,
+			       unsigned int flags, unsigned int filter_mask,
+			       int *idxattr, int *prividx)
+{
+	struct if_stats_msg *ifsm;
+	struct nlmsghdr *nlh;
+	struct nlattr *attr;
+	int s_prividx = *prividx;
+
+	ASSERT_RTNL();
+
+	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
+	if (!nlh)
+		return -EMSGSIZE;
+
+	ifsm = nlmsg_data(nlh);
+	ifsm->ifindex = dev->ifindex;
+	ifsm->filter_mask = filter_mask;
+
+	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
+		struct rtnl_link_stats64 *sp;
+
+		attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
+					 sizeof(struct rtnl_link_stats64),
+					 IFLA_STATS_UNSPEC);
+		if (!attr)
+			goto nla_put_failure;
+
+		sp = nla_data(attr);
+		dev_get_stats(dev, sp);
+	}
+
+	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
+		const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
+
+		if (ops && ops->fill_linkxstats) {
+			int err;
+
+			*idxattr = IFLA_STATS_LINK_XSTATS;
+			attr = nla_nest_start(skb,
+					      IFLA_STATS_LINK_XSTATS);
+			if (!attr)
+				goto nla_put_failure;
+
+			err = ops->fill_linkxstats(skb, dev, prividx);
+			nla_nest_end(skb, attr);
+			if (err)
+				goto nla_put_failure;
+			*idxattr = 0;
+		}
+	}
+
+	nlmsg_end(skb, nlh);
+
+	return 0;
+
+nla_put_failure:
+	/* not a multi message or no progress mean a real error */
+	if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
+		nlmsg_cancel(skb, nlh);
+	else
+		nlmsg_end(skb, nlh);
+
+	return -EMSGSIZE;
+}
+
+static const struct nla_policy ifla_stats_policy[IFLA_STATS_MAX + 1] = {
+	[IFLA_STATS_LINK_64]	= { .len = sizeof(struct rtnl_link_stats64) },
+};
+
+static size_t if_nlmsg_stats_size(const struct net_device *dev,
+				  u32 filter_mask)
+{
+	size_t size = 0;
+
+	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
+		size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
+
+	if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
+		const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
+
+		if (ops && ops->get_linkxstats_size) {
+			size += nla_total_size(ops->get_linkxstats_size(dev));
+			/* for IFLA_STATS_LINK_XSTATS */
+			size += nla_total_size(0);
+		}
+	}
+
+	return size;
+}
+
+static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+	struct net *net = sock_net(skb->sk);
+	struct net_device *dev = NULL;
+	int idxattr = 0, prividx = 0;
+	struct if_stats_msg *ifsm;
+	struct sk_buff *nskb;
+	u32 filter_mask;
+	int err;
+
+	ifsm = nlmsg_data(nlh);
+	if (ifsm->ifindex > 0)
+		dev = __dev_get_by_index(net, ifsm->ifindex);
+	else
+		return -EINVAL;
+
+	if (!dev)
+		return -ENODEV;
+
+	filter_mask = ifsm->filter_mask;
+	if (!filter_mask)
+		return -EINVAL;
+
+	nskb = nlmsg_new(if_nlmsg_stats_size(dev, filter_mask), GFP_KERNEL);
+	if (!nskb)
+		return -ENOBUFS;
+
+	err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
+				  NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
+				  0, filter_mask, &idxattr, &prividx);
+	if (err < 0) {
+		/* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
+		WARN_ON(err == -EMSGSIZE);
+		kfree_skb(nskb);
+	} else {
+		err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
+	}
+
+	return err;
+}
+
+static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	int h, s_h, err, s_idx, s_idxattr, s_prividx;
+	struct net *net = sock_net(skb->sk);
+	unsigned int flags = NLM_F_MULTI;
+	struct if_stats_msg *ifsm;
+	struct hlist_head *head;
+	struct net_device *dev;
+	u32 filter_mask = 0;
+	int idx = 0;
+
+	s_h = cb->args[0];
+	s_idx = cb->args[1];
+	s_idxattr = cb->args[2];
+	s_prividx = cb->args[3];
+
+	cb->seq = net->dev_base_seq;
+
+	ifsm = nlmsg_data(cb->nlh);
+	filter_mask = ifsm->filter_mask;
+	if (!filter_mask)
+		return -EINVAL;
+
+	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
+		idx = 0;
+		head = &net->dev_index_head[h];
+		hlist_for_each_entry(dev, head, index_hlist) {
+			if (idx < s_idx)
+				goto cont;
+			err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
+						  NETLINK_CB(cb->skb).portid,
+						  cb->nlh->nlmsg_seq, 0,
+						  flags, filter_mask,
+						  &s_idxattr, &s_prividx);
+			/* If we ran out of room on the first message,
+			 * we're in trouble
+			 */
+			WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
+
+			if (err < 0)
+				goto out;
+			s_prividx = 0;
+			s_idxattr = 0;
+			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+cont:
+			idx++;
+		}
+	}
+out:
+	cb->args[3] = s_prividx;
+	cb->args[2] = s_idxattr;
+	cb->args[1] = idx;
+	cb->args[0] = h;
+
+	return skb->len;
+}
+
 /* Process one rtnetlink message. */
 
 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
@@ -3599,4 +3791,7 @@
 	rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, NULL);
 	rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, NULL);
 	rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, NULL);
+
+	rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,
+		      NULL);
 }
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d04c2d1..f2b77e5 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3076,11 +3076,11 @@
 	struct sk_buff *frag_skb = head_skb;
 	unsigned int offset = doffset;
 	unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
+	unsigned int partial_segs = 0;
 	unsigned int headroom;
-	unsigned int len;
+	unsigned int len = head_skb->len;
 	__be16 proto;
-	bool csum;
-	int sg = !!(features & NETIF_F_SG);
+	bool csum, sg;
 	int nfrags = skb_shinfo(head_skb)->nr_frags;
 	int err = -ENOMEM;
 	int i = 0;
@@ -3092,8 +3092,21 @@
 	if (unlikely(!proto))
 		return ERR_PTR(-EINVAL);
 
+	sg = !!(features & NETIF_F_SG);
 	csum = !!can_checksum_protocol(features, proto);
 
+	/* GSO partial only requires that we trim off any excess that
+	 * doesn't fit into an MSS sized block, so take care of that
+	 * now.
+	 */
+	if (sg && csum && (features & NETIF_F_GSO_PARTIAL)) {
+		partial_segs = len / mss;
+		if (partial_segs > 1)
+			mss *= partial_segs;
+		else
+			partial_segs = 0;
+	}
+
 	headroom = skb_headroom(head_skb);
 	pos = skb_headlen(head_skb);
 
@@ -3281,6 +3294,23 @@
 	 */
 	segs->prev = tail;
 
+	/* Update GSO info on first skb in partial sequence. */
+	if (partial_segs) {
+		int type = skb_shinfo(head_skb)->gso_type;
+
+		/* Update type to add partial and then remove dodgy if set */
+		type |= SKB_GSO_PARTIAL;
+		type &= ~SKB_GSO_DODGY;
+
+		/* Update GSO info and prepare to start updating headers on
+		 * our way back down the stack of protocols.
+		 */
+		skb_shinfo(segs)->gso_size = skb_shinfo(head_skb)->gso_size;
+		skb_shinfo(segs)->gso_segs = partial_segs;
+		skb_shinfo(segs)->gso_type = type;
+		SKB_GSO_CB(segs)->data_offset = skb_headroom(segs) + doffset;
+	}
+
 	/* Following permits correct backpressure, for protocols
 	 * using skb_set_owner_w().
 	 * Idea is to tranfert ownership from head_skb to last segment.
@@ -4502,13 +4532,16 @@
 		__skb_push(skb, offset);
 		err = __vlan_insert_tag(skb, skb->vlan_proto,
 					skb_vlan_tag_get(skb));
-		if (err)
+		if (err) {
+			__skb_pull(skb, offset);
 			return err;
+		}
+
 		skb->protocol = skb->vlan_proto;
 		skb->mac_len += VLAN_HLEN;
-		__skb_pull(skb, offset);
 
 		skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
+		__skb_pull(skb, offset);
 	}
 	__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
 	return 0;
@@ -4592,3 +4625,239 @@
 	return NULL;
 }
 EXPORT_SYMBOL(alloc_skb_with_frags);
+
+/* carve out the first off bytes from skb when off < headlen */
+static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
+				    const int headlen, gfp_t gfp_mask)
+{
+	int i;
+	int size = skb_end_offset(skb);
+	int new_hlen = headlen - off;
+	u8 *data;
+
+	size = SKB_DATA_ALIGN(size);
+
+	if (skb_pfmemalloc(skb))
+		gfp_mask |= __GFP_MEMALLOC;
+	data = kmalloc_reserve(size +
+			       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+			       gfp_mask, NUMA_NO_NODE, NULL);
+	if (!data)
+		return -ENOMEM;
+
+	size = SKB_WITH_OVERHEAD(ksize(data));
+
+	/* Copy real data, and all frags */
+	skb_copy_from_linear_data_offset(skb, off, data, new_hlen);
+	skb->len -= off;
+
+	memcpy((struct skb_shared_info *)(data + size),
+	       skb_shinfo(skb),
+	       offsetof(struct skb_shared_info,
+			frags[skb_shinfo(skb)->nr_frags]));
+	if (skb_cloned(skb)) {
+		/* drop the old head gracefully */
+		if (skb_orphan_frags(skb, gfp_mask)) {
+			kfree(data);
+			return -ENOMEM;
+		}
+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+			skb_frag_ref(skb, i);
+		if (skb_has_frag_list(skb))
+			skb_clone_fraglist(skb);
+		skb_release_data(skb);
+	} else {
+		/* we can reuse existing recount- all we did was
+		 * relocate values
+		 */
+		skb_free_head(skb);
+	}
+
+	skb->head = data;
+	skb->data = data;
+	skb->head_frag = 0;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+	skb->end = size;
+#else
+	skb->end = skb->head + size;
+#endif
+	skb_set_tail_pointer(skb, skb_headlen(skb));
+	skb_headers_offset_update(skb, 0);
+	skb->cloned = 0;
+	skb->hdr_len = 0;
+	skb->nohdr = 0;
+	atomic_set(&skb_shinfo(skb)->dataref, 1);
+
+	return 0;
+}
+
+static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
+
+/* carve out the first eat bytes from skb's frag_list. May recurse into
+ * pskb_carve()
+ */
+static int pskb_carve_frag_list(struct sk_buff *skb,
+				struct skb_shared_info *shinfo, int eat,
+				gfp_t gfp_mask)
+{
+	struct sk_buff *list = shinfo->frag_list;
+	struct sk_buff *clone = NULL;
+	struct sk_buff *insp = NULL;
+
+	do {
+		if (!list) {
+			pr_err("Not enough bytes to eat. Want %d\n", eat);
+			return -EFAULT;
+		}
+		if (list->len <= eat) {
+			/* Eaten as whole. */
+			eat -= list->len;
+			list = list->next;
+			insp = list;
+		} else {
+			/* Eaten partially. */
+			if (skb_shared(list)) {
+				clone = skb_clone(list, gfp_mask);
+				if (!clone)
+					return -ENOMEM;
+				insp = list->next;
+				list = clone;
+			} else {
+				/* This may be pulled without problems. */
+				insp = list;
+			}
+			if (pskb_carve(list, eat, gfp_mask) < 0) {
+				kfree_skb(clone);
+				return -ENOMEM;
+			}
+			break;
+		}
+	} while (eat);
+
+	/* Free pulled out fragments. */
+	while ((list = shinfo->frag_list) != insp) {
+		shinfo->frag_list = list->next;
+		kfree_skb(list);
+	}
+	/* And insert new clone at head. */
+	if (clone) {
+		clone->next = list;
+		shinfo->frag_list = clone;
+	}
+	return 0;
+}
+
+/* carve off first len bytes from skb. Split line (off) is in the
+ * non-linear part of skb
+ */
+static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
+				       int pos, gfp_t gfp_mask)
+{
+	int i, k = 0;
+	int size = skb_end_offset(skb);
+	u8 *data;
+	const int nfrags = skb_shinfo(skb)->nr_frags;
+	struct skb_shared_info *shinfo;
+
+	size = SKB_DATA_ALIGN(size);
+
+	if (skb_pfmemalloc(skb))
+		gfp_mask |= __GFP_MEMALLOC;
+	data = kmalloc_reserve(size +
+			       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+			       gfp_mask, NUMA_NO_NODE, NULL);
+	if (!data)
+		return -ENOMEM;
+
+	size = SKB_WITH_OVERHEAD(ksize(data));
+
+	memcpy((struct skb_shared_info *)(data + size),
+	       skb_shinfo(skb), offsetof(struct skb_shared_info,
+					 frags[skb_shinfo(skb)->nr_frags]));
+	if (skb_orphan_frags(skb, gfp_mask)) {
+		kfree(data);
+		return -ENOMEM;
+	}
+	shinfo = (struct skb_shared_info *)(data + size);
+	for (i = 0; i < nfrags; i++) {
+		int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+
+		if (pos + fsize > off) {
+			shinfo->frags[k] = skb_shinfo(skb)->frags[i];
+
+			if (pos < off) {
+				/* Split frag.
+				 * We have two variants in this case:
+				 * 1. Move all the frag to the second
+				 *    part, if it is possible. F.e.
+				 *    this approach is mandatory for TUX,
+				 *    where splitting is expensive.
+				 * 2. Split is accurately. We make this.
+				 */
+				shinfo->frags[0].page_offset += off - pos;
+				skb_frag_size_sub(&shinfo->frags[0], off - pos);
+			}
+			skb_frag_ref(skb, i);
+			k++;
+		}
+		pos += fsize;
+	}
+	shinfo->nr_frags = k;
+	if (skb_has_frag_list(skb))
+		skb_clone_fraglist(skb);
+
+	if (k == 0) {
+		/* split line is in frag list */
+		pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask);
+	}
+	skb_release_data(skb);
+
+	skb->head = data;
+	skb->head_frag = 0;
+	skb->data = data;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+	skb->end = size;
+#else
+	skb->end = skb->head + size;
+#endif
+	skb_reset_tail_pointer(skb);
+	skb_headers_offset_update(skb, 0);
+	skb->cloned   = 0;
+	skb->hdr_len  = 0;
+	skb->nohdr    = 0;
+	skb->len -= off;
+	skb->data_len = skb->len;
+	atomic_set(&skb_shinfo(skb)->dataref, 1);
+	return 0;
+}
+
+/* remove len bytes from the beginning of the skb */
+static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp)
+{
+	int headlen = skb_headlen(skb);
+
+	if (len < headlen)
+		return pskb_carve_inside_header(skb, len, headlen, gfp);
+	else
+		return pskb_carve_inside_nonlinear(skb, len, headlen, gfp);
+}
+
+/* Extract to_copy bytes starting at off from skb, and return this in
+ * a new skb
+ */
+struct sk_buff *pskb_extract(struct sk_buff *skb, int off,
+			     int to_copy, gfp_t gfp)
+{
+	struct sk_buff  *clone = skb_clone(skb, gfp);
+
+	if (!clone)
+		return NULL;
+
+	if (pskb_carve(clone, off, gfp) < 0 ||
+	    pskb_trim(clone, to_copy)) {
+		kfree_skb(clone);
+		return NULL;
+	}
+	return clone;
+}
+EXPORT_SYMBOL(pskb_extract);
diff --git a/net/core/sock.c b/net/core/sock.c
index b67b9ae..08bf97e 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -221,7 +221,8 @@
   "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
   "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
   "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"      ,
-  "sk_lock-AF_NFC"   , "sk_lock-AF_VSOCK"    , "sk_lock-AF_MAX"
+  "sk_lock-AF_NFC"   , "sk_lock-AF_VSOCK"    , "sk_lock-AF_KCM"      ,
+  "sk_lock-AF_MAX"
 };
 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
   "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
@@ -237,7 +238,8 @@
   "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
   "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
   "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"      ,
-  "slock-AF_NFC"   , "slock-AF_VSOCK"    ,"slock-AF_MAX"
+  "slock-AF_NFC"   , "slock-AF_VSOCK"    ,"slock-AF_KCM"       ,
+  "slock-AF_MAX"
 };
 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
   "clock-AF_UNSPEC", "clock-AF_UNIX"     , "clock-AF_INET"     ,
@@ -253,7 +255,8 @@
   "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV"     ,
   "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
   "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"      ,
-  "clock-AF_NFC"   , "clock-AF_VSOCK"    , "clock-AF_MAX"
+  "clock-AF_NFC"   , "clock-AF_VSOCK"    , "clock-AF_KCM"      ,
+  "clock-AF_MAX"
 };
 
 /*
@@ -402,9 +405,8 @@
 }
 
 
-int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
-	int err;
 	unsigned long flags;
 	struct sk_buff_head *list = &sk->sk_receive_queue;
 
@@ -414,10 +416,6 @@
 		return -ENOMEM;
 	}
 
-	err = sk_filter(sk, skb);
-	if (err)
-		return err;
-
 	if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
 		atomic_inc(&sk->sk_drops);
 		return -ENOBUFS;
@@ -440,6 +438,18 @@
 		sk->sk_data_ready(sk);
 	return 0;
 }
+EXPORT_SYMBOL(__sock_queue_rcv_skb);
+
+int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+	int err;
+
+	err = sk_filter(sk, skb);
+	if (err)
+		return err;
+
+	return __sock_queue_rcv_skb(sk, skb);
+}
 EXPORT_SYMBOL(sock_queue_rcv_skb);
 
 int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
@@ -832,7 +842,8 @@
 		    !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
 			if (sk->sk_protocol == IPPROTO_TCP &&
 			    sk->sk_type == SOCK_STREAM) {
-				if (sk->sk_state != TCP_ESTABLISHED) {
+				if ((1 << sk->sk_state) &
+				    (TCPF_CLOSE | TCPF_LISTEN)) {
 					ret = -EINVAL;
 					break;
 				}
@@ -1418,8 +1429,12 @@
 }
 EXPORT_SYMBOL(sk_alloc);
 
-void sk_destruct(struct sock *sk)
+/* Sockets having SOCK_RCU_FREE will call this function after one RCU
+ * grace period. This is the case for UDP sockets and TCP listeners.
+ */
+static void __sk_destruct(struct rcu_head *head)
 {
+	struct sock *sk = container_of(head, struct sock, sk_rcu);
 	struct sk_filter *filter;
 
 	if (sk->sk_destruct)
@@ -1448,6 +1463,14 @@
 	sk_prot_free(sk->sk_prot_creator, sk);
 }
 
+void sk_destruct(struct sock *sk)
+{
+	if (sock_flag(sk, SOCK_RCU_FREE))
+		call_rcu(&sk->sk_rcu, __sk_destruct);
+	else
+		__sk_destruct(&sk->sk_rcu);
+}
+
 static void __sk_free(struct sock *sk)
 {
 	if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
@@ -1512,6 +1535,7 @@
 		newsk->sk_dst_cache	= NULL;
 		newsk->sk_wmem_queued	= 0;
 		newsk->sk_forward_alloc = 0;
+		atomic_set(&newsk->sk_drops, 0);
 		newsk->sk_send_head	= NULL;
 		newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
 
@@ -1631,6 +1655,17 @@
 }
 EXPORT_SYMBOL(sock_wfree);
 
+/* This variant of sock_wfree() is used by TCP,
+ * since it sets SOCK_USE_WRITE_QUEUE.
+ */
+void __sock_wfree(struct sk_buff *skb)
+{
+	struct sock *sk = skb->sk;
+
+	if (atomic_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
+		__sk_free(sk);
+}
+
 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
 {
 	skb_orphan(skb);
@@ -1653,8 +1688,21 @@
 }
 EXPORT_SYMBOL(skb_set_owner_w);
 
+/* This helper is used by netem, as it can hold packets in its
+ * delay queue. We want to allow the owner socket to send more
+ * packets, as if they were already TX completed by a typical driver.
+ * But we also want to keep skb->sk set because some packet schedulers
+ * rely on it (sch_fq for example). So we set skb->truesize to a small
+ * amount (1) and decrease sk_wmem_alloc accordingly.
+ */
 void skb_orphan_partial(struct sk_buff *skb)
 {
+	/* If this skb is a TCP pure ACK or already went here,
+	 * we have nothing to do. 2 is already a very small truesize.
+	 */
+	if (skb->truesize <= 2)
+		return;
+
 	/* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
 	 * so we do not completely orphan skb, but transfert all
 	 * accounted bytes but one, to avoid unexpected reorders.
@@ -1866,27 +1914,51 @@
 }
 EXPORT_SYMBOL(sock_alloc_send_skb);
 
+int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
+		     struct sockcm_cookie *sockc)
+{
+	u32 tsflags;
+
+	switch (cmsg->cmsg_type) {
+	case SO_MARK:
+		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
+			return -EPERM;
+		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
+			return -EINVAL;
+		sockc->mark = *(u32 *)CMSG_DATA(cmsg);
+		break;
+	case SO_TIMESTAMPING:
+		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
+			return -EINVAL;
+
+		tsflags = *(u32 *)CMSG_DATA(cmsg);
+		if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK)
+			return -EINVAL;
+
+		sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
+		sockc->tsflags |= tsflags;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(__sock_cmsg_send);
+
 int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
 		   struct sockcm_cookie *sockc)
 {
 	struct cmsghdr *cmsg;
+	int ret;
 
 	for_each_cmsghdr(cmsg, msg) {
 		if (!CMSG_OK(msg, cmsg))
 			return -EINVAL;
 		if (cmsg->cmsg_level != SOL_SOCKET)
 			continue;
-		switch (cmsg->cmsg_type) {
-		case SO_MARK:
-			if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
-				return -EPERM;
-			if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
-				return -EINVAL;
-			sockc->mark = *(u32 *)CMSG_DATA(cmsg);
-			break;
-		default:
-			return -EINVAL;
-		}
+		ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
+		if (ret)
+			return ret;
 	}
 	return 0;
 }
@@ -1971,33 +2043,27 @@
 	__releases(&sk->sk_lock.slock)
 	__acquires(&sk->sk_lock.slock)
 {
-	struct sk_buff *skb = sk->sk_backlog.head;
+	struct sk_buff *skb, *next;
 
-	do {
+	while ((skb = sk->sk_backlog.head) != NULL) {
 		sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
-		bh_unlock_sock(sk);
+
+		spin_unlock_bh(&sk->sk_lock.slock);
 
 		do {
-			struct sk_buff *next = skb->next;
-
+			next = skb->next;
 			prefetch(next);
 			WARN_ON_ONCE(skb_dst_is_noref(skb));
 			skb->next = NULL;
 			sk_backlog_rcv(sk, skb);
 
-			/*
-			 * We are in process context here with softirqs
-			 * disabled, use cond_resched_softirq() to preempt.
-			 * This is safe to do because we've taken the backlog
-			 * queue private:
-			 */
-			cond_resched_softirq();
+			cond_resched();
 
 			skb = next;
 		} while (skb != NULL);
 
-		bh_lock_sock(sk);
-	} while ((skb = sk->sk_backlog.head) != NULL);
+		spin_lock_bh(&sk->sk_lock.slock);
+	}
 
 	/*
 	 * Doing the zeroing here guarantee we can not loop forever
@@ -2006,6 +2072,13 @@
 	sk->sk_backlog.len = 0;
 }
 
+void __sk_flush_backlog(struct sock *sk)
+{
+	spin_lock_bh(&sk->sk_lock.slock);
+	__release_sock(sk);
+	spin_unlock_bh(&sk->sk_lock.slock);
+}
+
 /**
  * sk_wait_data - wait for data to arrive at sk_receive_queue
  * @sk:    sock to wait on
@@ -2142,6 +2215,15 @@
 }
 EXPORT_SYMBOL(__sk_mem_reclaim);
 
+int sk_set_peek_off(struct sock *sk, int val)
+{
+	if (val < 0)
+		return -EINVAL;
+
+	sk->sk_peek_off = val;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sk_set_peek_off);
 
 /*
  * Set of default routines for initialising struct proto_ops when
@@ -2429,11 +2511,6 @@
 
 void release_sock(struct sock *sk)
 {
-	/*
-	 * The sk_lock has mutex_unlock() semantics:
-	 */
-	mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
-
 	spin_lock_bh(&sk->sk_lock.slock);
 	if (sk->sk_backlog.tail)
 		__release_sock(sk);
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index a996ce8..6b10573 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -67,6 +67,7 @@
 	mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
 	mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
 	mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
+	mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
 
 	return nla_put(skb, attrtype, sizeof(mem), &mem);
 }
@@ -119,7 +120,7 @@
 {
 	return NLMSG_ALIGN(sizeof(struct inet_diag_msg)
 	       + nla_total_size(sizeof(u8)) /* INET_DIAG_PROTOCOL */
-	       + nla_total_size(sizeof(struct tcp_info))); /* INET_DIAG_INFO */
+	       + nla_total_size_64bit(sizeof(struct tcp_info))); /* INET_DIAG_INFO */
 }
 
 static void sock_diag_broadcast_destroy_work(struct work_struct *work)
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index a6beb7b..0df2aa6 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -294,6 +294,15 @@
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec
 	},
+# ifdef CONFIG_HAVE_EBPF_JIT
+	{
+		.procname	= "bpf_jit_harden",
+		.data		= &bpf_jit_harden,
+		.maxlen		= sizeof(int),
+		.mode		= 0600,
+		.proc_handler	= proc_dointvec,
+	},
+# endif
 #endif
 	{
 		.procname	= "netdev_tstamp_prequeue",
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index b0e28d2..0c55ffb 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -198,9 +198,9 @@
 };
 
 DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics);
-#define DCCP_INC_STATS(field)	    SNMP_INC_STATS(dccp_statistics, field)
-#define DCCP_INC_STATS_BH(field)    SNMP_INC_STATS_BH(dccp_statistics, field)
-#define DCCP_DEC_STATS(field)	    SNMP_DEC_STATS(dccp_statistics, field)
+#define DCCP_INC_STATS(field)	SNMP_INC_STATS(dccp_statistics, field)
+#define __DCCP_INC_STATS(field)	__SNMP_INC_STATS(dccp_statistics, field)
+#define DCCP_DEC_STATS(field)	SNMP_DEC_STATS(dccp_statistics, field)
 
 /*
  * 	Checksumming routines
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 3bd14e8..ba34718 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -359,7 +359,7 @@
 		goto discard;
 	}
 
-	DCCP_INC_STATS_BH(DCCP_MIB_INERRS);
+	DCCP_INC_STATS(DCCP_MIB_INERRS);
 discard:
 	__kfree_skb(skb);
 	return 0;
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 9c67a96..5c7e413 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -62,7 +62,7 @@
 	nexthop = daddr = usin->sin_addr.s_addr;
 
 	inet_opt = rcu_dereference_protected(inet->inet_opt,
-					     sock_owned_by_user(sk));
+					     lockdep_sock_is_held(sk));
 	if (inet_opt != NULL && inet_opt->opt.srr) {
 		if (daddr == 0)
 			return -EINVAL;
@@ -205,7 +205,7 @@
 	 * socket here.
 	 */
 	if (!between48(seq, dccp_rsk(req)->dreq_iss, dccp_rsk(req)->dreq_gss)) {
-		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
+		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
 	} else {
 		/*
 		 * Still in RESPOND, just remove it silently.
@@ -247,7 +247,7 @@
 
 	if (skb->len < offset + sizeof(*dh) ||
 	    skb->len < offset + __dccp_basic_hdr_len(dh)) {
-		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
+		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
 		return;
 	}
 
@@ -256,7 +256,7 @@
 				       iph->saddr, ntohs(dh->dccph_sport),
 				       inet_iif(skb));
 	if (!sk) {
-		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
+		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
 		return;
 	}
 
@@ -273,7 +273,7 @@
 	 * servers this needs to be solved differently.
 	 */
 	if (sock_owned_by_user(sk))
-		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
+		__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
 
 	if (sk->sk_state == DCCP_CLOSED)
 		goto out;
@@ -281,7 +281,7 @@
 	dp = dccp_sk(sk);
 	if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
 	    !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
-		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
+		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
 		goto out;
 	}
 
@@ -318,7 +318,7 @@
 	case DCCP_REQUESTING:
 	case DCCP_RESPOND:
 		if (!sock_owned_by_user(sk)) {
-			DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
+			__DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
 			sk->sk_err = err;
 
 			sk->sk_error_report(sk);
@@ -431,11 +431,11 @@
 	return newsk;
 
 exit_overflow:
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
+	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
 exit_nonewsk:
 	dst_release(dst);
 exit:
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
+	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
 	return NULL;
 put_and_exit:
 	inet_csk_prepare_forced_close(newsk);
@@ -462,7 +462,7 @@
 	security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
 	rt = ip_route_output_flow(net, &fl4, sk);
 	if (IS_ERR(rt)) {
-		IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
+		__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
 		return NULL;
 	}
 
@@ -533,8 +533,8 @@
 	bh_unlock_sock(ctl_sk);
 
 	if (net_xmit_eval(err) == 0) {
-		DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
-		DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
+		DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
+		DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
 	}
 out:
 	 dst_release(dst);
@@ -637,7 +637,7 @@
 drop_and_free:
 	reqsk_free(req);
 drop:
-	DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
+	__DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
 	return -1;
 }
 EXPORT_SYMBOL_GPL(dccp_v4_conn_request);
@@ -764,6 +764,7 @@
 {
 	const struct dccp_hdr *dh;
 	const struct iphdr *iph;
+	bool refcounted;
 	struct sock *sk;
 	int min_cov;
 
@@ -801,7 +802,7 @@
 
 lookup:
 	sk = __inet_lookup_skb(&dccp_hashinfo, skb, __dccp_hdr_len(dh),
-			       dh->dccph_sport, dh->dccph_dport);
+			       dh->dccph_sport, dh->dccph_dport, &refcounted);
 	if (!sk) {
 		dccp_pr_debug("failed to look up flow ID in table and "
 			      "get corresponding socket\n");
@@ -830,6 +831,7 @@
 			goto lookup;
 		}
 		sock_hold(sk);
+		refcounted = true;
 		nsk = dccp_check_req(sk, skb, req);
 		if (!nsk) {
 			reqsk_put(req);
@@ -886,7 +888,8 @@
 	return 0;
 
 discard_and_relse:
-	sock_put(sk);
+	if (refcounted)
+		sock_put(sk);
 	goto discard_it;
 }
 
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 4663a01..d176f4e 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -80,8 +80,8 @@
 
 	if (skb->len < offset + sizeof(*dh) ||
 	    skb->len < offset + __dccp_basic_hdr_len(dh)) {
-		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
-				   ICMP6_MIB_INERRORS);
+		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
+				  ICMP6_MIB_INERRORS);
 		return;
 	}
 
@@ -91,8 +91,8 @@
 					inet6_iif(skb));
 
 	if (!sk) {
-		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
-				   ICMP6_MIB_INERRORS);
+		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
+				  ICMP6_MIB_INERRORS);
 		return;
 	}
 
@@ -106,7 +106,7 @@
 
 	bh_lock_sock(sk);
 	if (sock_owned_by_user(sk))
-		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
+		__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
 
 	if (sk->sk_state == DCCP_CLOSED)
 		goto out;
@@ -114,7 +114,7 @@
 	dp = dccp_sk(sk);
 	if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
 	    !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
-		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
+		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
 		goto out;
 	}
 
@@ -156,7 +156,7 @@
 	case DCCP_RESPOND:  /* Cannot happen.
 			       It can, it SYNs are crossed. --ANK */
 		if (!sock_owned_by_user(sk)) {
-			DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
+			__DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
 			sk->sk_err = err;
 			/*
 			 * Wake people up to see the error
@@ -277,8 +277,8 @@
 	if (!IS_ERR(dst)) {
 		skb_dst_set(skb, dst);
 		ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
-		DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
-		DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
+		DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
+		DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
 		return;
 	}
 
@@ -378,7 +378,7 @@
 drop_and_free:
 	reqsk_free(req);
 drop:
-	DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
+	__DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
 	return -1;
 }
 
@@ -527,11 +527,11 @@
 	return newsk;
 
 out_overflow:
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
+	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
 out_nonewsk:
 	dst_release(dst);
 out:
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
+	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
 	return NULL;
 }
 
@@ -642,6 +642,7 @@
 static int dccp_v6_rcv(struct sk_buff *skb)
 {
 	const struct dccp_hdr *dh;
+	bool refcounted;
 	struct sock *sk;
 	int min_cov;
 
@@ -670,7 +671,7 @@
 lookup:
 	sk = __inet6_lookup_skb(&dccp_hashinfo, skb, __dccp_hdr_len(dh),
 			        dh->dccph_sport, dh->dccph_dport,
-				inet6_iif(skb));
+				inet6_iif(skb), &refcounted);
 	if (!sk) {
 		dccp_pr_debug("failed to look up flow ID in table and "
 			      "get corresponding socket\n");
@@ -699,6 +700,7 @@
 			goto lookup;
 		}
 		sock_hold(sk);
+		refcounted = true;
 		nsk = dccp_check_req(sk, skb, req);
 		if (!nsk) {
 			reqsk_put(req);
@@ -752,7 +754,8 @@
 	return 0;
 
 discard_and_relse:
-	sock_put(sk);
+	if (refcounted)
+		sock_put(sk);
 	goto discard_it;
 }
 
@@ -865,7 +868,7 @@
 	fl6.fl6_sport = inet->inet_sport;
 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 
-	opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
+	opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
 	final_p = fl6_update_dst(&fl6, opt, &final);
 
 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 1994f8a..53eddf9 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -127,7 +127,7 @@
 		}
 		dccp_init_xmit_timers(newsk);
 
-		DCCP_INC_STATS_BH(DCCP_MIB_PASSIVEOPENS);
+		__DCCP_INC_STATS(DCCP_MIB_PASSIVEOPENS);
 	}
 	return newsk;
 }
diff --git a/net/dccp/options.c b/net/dccp/options.c
index 9bce318..74d29c5 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -253,7 +253,7 @@
 	return 0;
 
 out_invalid_option:
-	DCCP_INC_STATS_BH(DCCP_MIB_INVALIDOPT);
+	DCCP_INC_STATS(DCCP_MIB_INVALIDOPT);
 	rc = DCCP_RESET_CODE_OPTION_ERROR;
 out_featneg_failed:
 	DCCP_WARN("DCCP(%p): Option %d (len=%d) error=%u\n", sk, opt, len, rc);
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 3ef7ace..3a2c340 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -28,7 +28,7 @@
 
 	dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
 	dccp_done(sk);
-	DCCP_INC_STATS_BH(DCCP_MIB_ABORTONTIMEOUT);
+	__DCCP_INC_STATS(DCCP_MIB_ABORTONTIMEOUT);
 }
 
 /* A write timeout has occurred. Process the after effects. */
@@ -100,7 +100,7 @@
 	 * total number of retransmissions of clones of original packets.
 	 */
 	if (icsk->icsk_retransmits == 0)
-		DCCP_INC_STATS_BH(DCCP_MIB_TIMEOUTS);
+		__DCCP_INC_STATS(DCCP_MIB_TIMEOUTS);
 
 	if (dccp_retransmit_skb(sk) != 0) {
 		/*
@@ -179,7 +179,7 @@
 	if (sock_owned_by_user(sk)) {
 		/* Try again later. */
 		icsk->icsk_ack.blocked = 1;
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
+		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
 		sk_reset_timer(sk, &icsk->icsk_delack_timer,
 			       jiffies + TCP_DELACK_MIN);
 		goto out;
@@ -209,7 +209,7 @@
 			icsk->icsk_ack.ato = TCP_ATO_MIN;
 		}
 		dccp_send_ack(sk);
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
+		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
 	}
 out:
 	bh_unlock_sock(sk);
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 607a14f..b1dc096 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1034,10 +1034,13 @@
 	if (!fld.daddr) {
 		fld.daddr = fld.saddr;
 
-		err = -EADDRNOTAVAIL;
 		if (dev_out)
 			dev_put(dev_out);
+		err = -EINVAL;
 		dev_out = init_net.loopback_dev;
+		if (!dev_out->dn_ptr)
+			goto out;
+		err = -EADDRNOTAVAIL;
 		dev_hold(dev_out);
 		if (!fld.daddr) {
 			fld.daddr =
@@ -1110,6 +1113,8 @@
 		if (dev_out == NULL)
 			goto out;
 		dn_db = rcu_dereference_raw(dev_out->dn_ptr);
+		if (!dn_db)
+			goto e_inval;
 		/* Possible improvement - check all devices for local addr */
 		if (dn_dev_islocal(dev_out, fld.daddr)) {
 			dev_put(dev_out);
@@ -1151,6 +1156,8 @@
 			dev_put(dev_out);
 		dev_out = init_net.loopback_dev;
 		dev_hold(dev_out);
+		if (!dev_out->dn_ptr)
+			goto e_inval;
 		fld.flowidn_oif = dev_out->ifindex;
 		if (res.fi)
 			dn_fib_info_put(res.fi);
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index c79b85e..8737412 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -281,7 +281,7 @@
 				GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred,
 				(KEY_POS_ALL & ~KEY_POS_SETATTR) |
 				KEY_USR_VIEW | KEY_USR_READ,
-				KEY_ALLOC_NOT_IN_QUOTA, NULL);
+				KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL);
 	if (IS_ERR(keyring)) {
 		ret = PTR_ERR(keyring);
 		goto failed_put_cred;
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index c28c474..eff5dfc 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -51,11 +51,12 @@
 EXPORT_SYMBOL_GPL(unregister_switch_driver);
 
 static struct dsa_switch_driver *
-dsa_switch_probe(struct device *host_dev, int sw_addr, char **_name)
+dsa_switch_probe(struct device *parent, struct device *host_dev, int sw_addr,
+		 const char **_name, void **priv)
 {
 	struct dsa_switch_driver *ret;
 	struct list_head *list;
-	char *name;
+	const char *name;
 
 	ret = NULL;
 	name = NULL;
@@ -66,7 +67,7 @@
 
 		drv = list_entry(list, struct dsa_switch_driver, list);
 
-		name = drv->probe(host_dev, sw_addr);
+		name = drv->probe(parent, host_dev, sw_addr, priv);
 		if (name != NULL) {
 			ret = drv;
 			break;
@@ -181,7 +182,7 @@
 /* basic switch operations **************************************************/
 static int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct net_device *master)
 {
-	struct dsa_chip_data *cd = ds->pd;
+	struct dsa_chip_data *cd = ds->cd;
 	struct device_node *port_dn;
 	struct phy_device *phydev;
 	int ret, port, mode;
@@ -218,7 +219,7 @@
 {
 	struct dsa_switch_driver *drv = ds->drv;
 	struct dsa_switch_tree *dst = ds->dst;
-	struct dsa_chip_data *pd = ds->pd;
+	struct dsa_chip_data *cd = ds->cd;
 	bool valid_name_found = false;
 	int index = ds->index;
 	int i, ret;
@@ -229,7 +230,7 @@
 	for (i = 0; i < DSA_MAX_PORTS; i++) {
 		char *name;
 
-		name = pd->port_names[i];
+		name = cd->port_names[i];
 		if (name == NULL)
 			continue;
 
@@ -245,7 +246,7 @@
 		} else if (!strcmp(name, "dsa")) {
 			ds->dsa_port_mask |= 1 << i;
 		} else {
-			ds->phys_port_mask |= 1 << i;
+			ds->enabled_port_mask |= 1 << i;
 		}
 		valid_name_found = true;
 	}
@@ -258,7 +259,7 @@
 	/* Make the built-in MII bus mask match the number of ports,
 	 * switch drivers can override this later
 	 */
-	ds->phys_mii_mask = ds->phys_port_mask;
+	ds->phys_mii_mask = ds->enabled_port_mask;
 
 	/*
 	 * If the CPU connects to this switch, set the switch tree
@@ -266,7 +267,7 @@
 	 * switch.
 	 */
 	if (dst->cpu_switch == index) {
-		switch (ds->tag_protocol) {
+		switch (drv->tag_protocol) {
 #ifdef CONFIG_NET_DSA_TAG_DSA
 		case DSA_TAG_PROTO_DSA:
 			dst->rcv = dsa_netdev_ops.rcv;
@@ -294,7 +295,7 @@
 			goto out;
 		}
 
-		dst->tag_protocol = ds->tag_protocol;
+		dst->tag_protocol = drv->tag_protocol;
 	}
 
 	/*
@@ -324,13 +325,13 @@
 	 * Create network devices for physical switch ports.
 	 */
 	for (i = 0; i < DSA_MAX_PORTS; i++) {
-		if (!(ds->phys_port_mask & (1 << i)))
+		if (!(ds->enabled_port_mask & (1 << i)))
 			continue;
 
-		ret = dsa_slave_create(ds, parent, i, pd->port_names[i]);
+		ret = dsa_slave_create(ds, parent, i, cd->port_names[i]);
 		if (ret < 0) {
 			netdev_err(dst->master_netdev, "[%d]: can't create dsa slave device for port %d(%s): %d\n",
-				   index, i, pd->port_names[i], ret);
+				   index, i, cd->port_names[i], ret);
 			ret = 0;
 		}
 	}
@@ -378,16 +379,17 @@
 dsa_switch_setup(struct dsa_switch_tree *dst, int index,
 		 struct device *parent, struct device *host_dev)
 {
-	struct dsa_chip_data *pd = dst->pd->chip + index;
+	struct dsa_chip_data *cd = dst->pd->chip + index;
 	struct dsa_switch_driver *drv;
 	struct dsa_switch *ds;
 	int ret;
-	char *name;
+	const char *name;
+	void *priv;
 
 	/*
 	 * Probe for switch model.
 	 */
-	drv = dsa_switch_probe(host_dev, pd->sw_addr, &name);
+	drv = dsa_switch_probe(parent, host_dev, cd->sw_addr, &name, &priv);
 	if (drv == NULL) {
 		netdev_err(dst->master_netdev, "[%d]: could not detect attached switch\n",
 			   index);
@@ -400,16 +402,16 @@
 	/*
 	 * Allocate and initialise switch state.
 	 */
-	ds = devm_kzalloc(parent, sizeof(*ds) + drv->priv_size, GFP_KERNEL);
+	ds = devm_kzalloc(parent, sizeof(*ds), GFP_KERNEL);
 	if (ds == NULL)
 		return ERR_PTR(-ENOMEM);
 
 	ds->dst = dst;
 	ds->index = index;
-	ds->pd = pd;
+	ds->cd = cd;
 	ds->drv = drv;
-	ds->tag_protocol = drv->tag_protocol;
-	ds->master_dev = host_dev;
+	ds->priv = priv;
+	ds->dev = parent;
 
 	ret = dsa_switch_setup_one(ds, parent);
 	if (ret)
@@ -422,7 +424,7 @@
 {
 	struct device_node *port_dn;
 	struct phy_device *phydev;
-	struct dsa_chip_data *cd = ds->pd;
+	struct dsa_chip_data *cd = ds->cd;
 	int port;
 
 #ifdef CONFIG_NET_DSA_HWMON
@@ -432,7 +434,7 @@
 
 	/* Destroy network devices for physical switch ports. */
 	for (port = 0; port < DSA_MAX_PORTS; port++) {
-		if (!(ds->phys_port_mask & (1 << port)))
+		if (!(ds->enabled_port_mask & (1 << port)))
 			continue;
 
 		if (!ds->ports[port])
@@ -657,9 +659,6 @@
 	const char *port_name;
 	int chip_index, port_index;
 	const unsigned int *sw_addr, *port_reg;
-	int gpio;
-	enum of_gpio_flags of_flags;
-	unsigned long flags;
 	u32 eeprom_len;
 	int ret;
 
@@ -738,19 +737,6 @@
 			put_device(cd->host_dev);
 			cd->host_dev = &mdio_bus_switch->dev;
 		}
-		gpio = of_get_named_gpio_flags(child, "reset-gpios", 0,
-					       &of_flags);
-		if (gpio_is_valid(gpio)) {
-			flags = (of_flags == OF_GPIO_ACTIVE_LOW ?
-				 GPIOF_ACTIVE_LOW : 0);
-			ret = devm_gpio_request_one(dev, gpio, flags,
-						    "switch_reset");
-			if (ret)
-				goto out_free_chip;
-
-			cd->reset = gpio_to_desc(gpio);
-			gpiod_direction_output(cd->reset, 0);
-		}
 
 		for_each_available_child_of_node(child, port) {
 			port_reg = of_get_property(port, "reg", NULL);
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 1d1a546..dfa3377 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -22,11 +22,6 @@
 };
 
 struct dsa_slave_priv {
-	/*
-	 * The linux network interface corresponding to this
-	 * switch port.
-	 */
-	struct net_device	*dev;
 	struct sk_buff *	(*xmit)(struct sk_buff *skb,
 					struct net_device *dev);
 
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index a575f03..152436c 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -50,8 +50,8 @@
 	ds->slave_mii_bus->read = dsa_slave_phy_read;
 	ds->slave_mii_bus->write = dsa_slave_phy_write;
 	snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d:%.2x",
-			ds->index, ds->pd->sw_addr);
-	ds->slave_mii_bus->parent = ds->master_dev;
+			ds->index, ds->cd->sw_addr);
+	ds->slave_mii_bus->parent = ds->dev;
 	ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
 }
 
@@ -104,8 +104,8 @@
 			goto clear_promisc;
 	}
 
-	if (ds->drv->port_stp_update)
-		ds->drv->port_stp_update(ds, p->port, stp_state);
+	if (ds->drv->port_stp_state_set)
+		ds->drv->port_stp_state_set(ds, p->port, stp_state);
 
 	if (p->phy)
 		phy_start(p->phy);
@@ -147,8 +147,8 @@
 	if (ds->drv->port_disable)
 		ds->drv->port_disable(ds, p->port, p->phy);
 
-	if (ds->drv->port_stp_update)
-		ds->drv->port_stp_update(ds, p->port, BR_STATE_DISABLED);
+	if (ds->drv->port_stp_state_set)
+		ds->drv->port_stp_state_set(ds, p->port, BR_STATE_DISABLED);
 
 	return 0;
 }
@@ -207,21 +207,16 @@
 {
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	struct dsa_switch *ds = p->parent;
-	int err;
 
 	if (switchdev_trans_ph_prepare(trans)) {
 		if (!ds->drv->port_vlan_prepare || !ds->drv->port_vlan_add)
 			return -EOPNOTSUPP;
 
-		err = ds->drv->port_vlan_prepare(ds, p->port, vlan, trans);
-		if (err)
-			return err;
-	} else {
-		err = ds->drv->port_vlan_add(ds, p->port, vlan, trans);
-		if (err)
-			return err;
+		return ds->drv->port_vlan_prepare(ds, p->port, vlan, trans);
 	}
 
+	ds->drv->port_vlan_add(ds, p->port, vlan, trans);
+
 	return 0;
 }
 
@@ -256,17 +251,17 @@
 {
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	struct dsa_switch *ds = p->parent;
-	int ret;
 
-	if (!ds->drv->port_fdb_prepare || !ds->drv->port_fdb_add)
-		return -EOPNOTSUPP;
+	if (switchdev_trans_ph_prepare(trans)) {
+		if (!ds->drv->port_fdb_prepare || !ds->drv->port_fdb_add)
+			return -EOPNOTSUPP;
 
-	if (switchdev_trans_ph_prepare(trans))
-		ret = ds->drv->port_fdb_prepare(ds, p->port, fdb, trans);
-	else
-		ret = ds->drv->port_fdb_add(ds, p->port, fdb, trans);
+		return ds->drv->port_fdb_prepare(ds, p->port, fdb, trans);
+	}
 
-	return ret;
+	ds->drv->port_fdb_add(ds, p->port, fdb, trans);
+
+	return 0;
 }
 
 static int dsa_slave_port_fdb_del(struct net_device *dev,
@@ -305,16 +300,19 @@
 	return -EOPNOTSUPP;
 }
 
-static int dsa_slave_stp_update(struct net_device *dev, u8 state)
+static int dsa_slave_stp_state_set(struct net_device *dev,
+				   const struct switchdev_attr *attr,
+				   struct switchdev_trans *trans)
 {
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	struct dsa_switch *ds = p->parent;
-	int ret = -EOPNOTSUPP;
 
-	if (ds->drv->port_stp_update)
-		ret = ds->drv->port_stp_update(ds, p->port, state);
+	if (switchdev_trans_ph_prepare(trans))
+		return ds->drv->port_stp_state_set ? 0 : -EOPNOTSUPP;
 
-	return ret;
+	ds->drv->port_stp_state_set(ds, p->port, attr->u.stp_state);
+
+	return 0;
 }
 
 static int dsa_slave_vlan_filtering(struct net_device *dev,
@@ -339,17 +337,11 @@
 				   const struct switchdev_attr *attr,
 				   struct switchdev_trans *trans)
 {
-	struct dsa_slave_priv *p = netdev_priv(dev);
-	struct dsa_switch *ds = p->parent;
 	int ret;
 
 	switch (attr->id) {
 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
-		if (switchdev_trans_ph_prepare(trans))
-			ret = ds->drv->port_stp_update ? 0 : -EOPNOTSUPP;
-		else
-			ret = ds->drv->port_stp_update(ds, p->port,
-						       attr->u.stp_state);
+		ret = dsa_slave_stp_state_set(dev, attr, trans);
 		break;
 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
 		ret = dsa_slave_vlan_filtering(dev, attr, trans);
@@ -468,7 +460,8 @@
 	/* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
 	 * so allow it to be in BR_STATE_FORWARDING to be kept functional
 	 */
-	dsa_slave_stp_update(dev, BR_STATE_FORWARDING);
+	if (ds->drv->port_stp_state_set)
+		ds->drv->port_stp_state_set(ds, p->port, BR_STATE_FORWARDING);
 }
 
 static int dsa_slave_port_attr_get(struct net_device *dev,
@@ -622,8 +615,8 @@
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	struct dsa_switch *ds = p->parent;
 
-	if (ds->pd->eeprom_len)
-		return ds->pd->eeprom_len;
+	if (ds->cd->eeprom_len)
+		return ds->cd->eeprom_len;
 
 	if (ds->drv->get_eeprom_len)
 		return ds->drv->get_eeprom_len(ds);
@@ -673,6 +666,78 @@
 	}
 }
 
+static void dsa_cpu_port_get_ethtool_stats(struct net_device *dev,
+					   struct ethtool_stats *stats,
+					   uint64_t *data)
+{
+	struct dsa_switch_tree *dst = dev->dsa_ptr;
+	struct dsa_switch *ds = dst->ds[0];
+	s8 cpu_port = dst->cpu_port;
+	int count = 0;
+
+	if (dst->master_ethtool_ops.get_sset_count) {
+		count = dst->master_ethtool_ops.get_sset_count(dev,
+							       ETH_SS_STATS);
+		dst->master_ethtool_ops.get_ethtool_stats(dev, stats, data);
+	}
+
+	if (ds->drv->get_ethtool_stats)
+		ds->drv->get_ethtool_stats(ds, cpu_port, data + count);
+}
+
+static int dsa_cpu_port_get_sset_count(struct net_device *dev, int sset)
+{
+	struct dsa_switch_tree *dst = dev->dsa_ptr;
+	struct dsa_switch *ds = dst->ds[0];
+	int count = 0;
+
+	if (dst->master_ethtool_ops.get_sset_count)
+		count += dst->master_ethtool_ops.get_sset_count(dev, sset);
+
+	if (sset == ETH_SS_STATS && ds->drv->get_sset_count)
+		count += ds->drv->get_sset_count(ds);
+
+	return count;
+}
+
+static void dsa_cpu_port_get_strings(struct net_device *dev,
+				     uint32_t stringset, uint8_t *data)
+{
+	struct dsa_switch_tree *dst = dev->dsa_ptr;
+	struct dsa_switch *ds = dst->ds[0];
+	s8 cpu_port = dst->cpu_port;
+	int len = ETH_GSTRING_LEN;
+	int mcount = 0, count;
+	unsigned int i;
+	uint8_t pfx[4];
+	uint8_t *ndata;
+
+	snprintf(pfx, sizeof(pfx), "p%.2d", cpu_port);
+	/* We do not want to be NULL-terminated, since this is a prefix */
+	pfx[sizeof(pfx) - 1] = '_';
+
+	if (dst->master_ethtool_ops.get_sset_count) {
+		mcount = dst->master_ethtool_ops.get_sset_count(dev,
+								ETH_SS_STATS);
+		dst->master_ethtool_ops.get_strings(dev, stringset, data);
+	}
+
+	if (stringset == ETH_SS_STATS && ds->drv->get_strings) {
+		ndata = data + mcount * len;
+		/* This function copies ETH_GSTRINGS_LEN bytes, we will mangle
+		 * the output after to prepend our CPU port prefix we
+		 * constructed earlier
+		 */
+		ds->drv->get_strings(ds, cpu_port, ndata);
+		count = ds->drv->get_sset_count(ds);
+		for (i = 0; i < count; i++) {
+			memmove(ndata + (i * len + sizeof(pfx)),
+				ndata + i * len, len - sizeof(pfx));
+			memcpy(ndata + i * len, pfx, sizeof(pfx));
+		}
+	}
+}
+
 static void dsa_slave_get_ethtool_stats(struct net_device *dev,
 					struct ethtool_stats *stats,
 					uint64_t *data)
@@ -680,10 +745,10 @@
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	struct dsa_switch *ds = p->parent;
 
-	data[0] = p->dev->stats.tx_packets;
-	data[1] = p->dev->stats.tx_bytes;
-	data[2] = p->dev->stats.rx_packets;
-	data[3] = p->dev->stats.rx_bytes;
+	data[0] = dev->stats.tx_packets;
+	data[1] = dev->stats.tx_bytes;
+	data[2] = dev->stats.rx_packets;
+	data[3] = dev->stats.rx_bytes;
 	if (ds->drv->get_ethtool_stats != NULL)
 		ds->drv->get_ethtool_stats(ds, p->port, data + 4);
 }
@@ -828,6 +893,8 @@
 	.get_eee		= dsa_slave_get_eee,
 };
 
+static struct ethtool_ops dsa_cpu_port_ethtool_ops;
+
 static const struct net_device_ops dsa_slave_netdev_ops = {
 	.ndo_open	 	= dsa_slave_open,
 	.ndo_stop		= dsa_slave_close,
@@ -932,7 +999,7 @@
 				struct net_device *slave_dev)
 {
 	struct dsa_switch *ds = p->parent;
-	struct dsa_chip_data *cd = ds->pd;
+	struct dsa_chip_data *cd = ds->cd;
 	struct device_node *phy_dn, *port_dn;
 	bool phy_is_fixed = false;
 	u32 phy_flags = 0;
@@ -1045,6 +1112,7 @@
 		     int port, char *name)
 {
 	struct net_device *master = ds->dst->master_netdev;
+	struct dsa_switch_tree *dst = ds->dst;
 	struct net_device *slave_dev;
 	struct dsa_slave_priv *p;
 	int ret;
@@ -1056,6 +1124,19 @@
 
 	slave_dev->features = master->vlan_features;
 	slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
+	if (master->ethtool_ops != &dsa_cpu_port_ethtool_ops) {
+		memcpy(&dst->master_ethtool_ops, master->ethtool_ops,
+		       sizeof(struct ethtool_ops));
+		memcpy(&dsa_cpu_port_ethtool_ops, &dst->master_ethtool_ops,
+		       sizeof(struct ethtool_ops));
+		dsa_cpu_port_ethtool_ops.get_sset_count =
+					dsa_cpu_port_get_sset_count;
+		dsa_cpu_port_ethtool_ops.get_ethtool_stats =
+					dsa_cpu_port_get_ethtool_stats;
+		dsa_cpu_port_ethtool_ops.get_strings =
+					dsa_cpu_port_get_strings;
+		master->ethtool_ops = &dsa_cpu_port_ethtool_ops;
+	}
 	eth_hw_addr_inherit(slave_dev, master);
 	slave_dev->priv_flags |= IFF_NO_QUEUE;
 	slave_dev->netdev_ops = &dsa_slave_netdev_ops;
@@ -1066,11 +1147,10 @@
 				 NULL);
 
 	SET_NETDEV_DEV(slave_dev, parent);
-	slave_dev->dev.of_node = ds->pd->port_dn[port];
+	slave_dev->dev.of_node = ds->cd->port_dn[port];
 	slave_dev->vlan_features = master->vlan_features;
 
 	p = netdev_priv(slave_dev);
-	p->dev = slave_dev;
 	p->parent = ds;
 	p->port = port;
 
diff --git a/net/hsr/Kconfig b/net/hsr/Kconfig
index 0d3d709..4b683fd 100644
--- a/net/hsr/Kconfig
+++ b/net/hsr/Kconfig
@@ -18,8 +18,9 @@
 	  earlier.
 
 	  This code is a "best effort" to comply with the HSR standard as
-	  described in IEC 62439-3:2010 (HSRv0), but no compliancy tests have
-	  been made.
+	  described in IEC 62439-3:2010 (HSRv0) and IEC 62439-3:2012 (HSRv1),
+	  but no compliancy tests have been made. Use iproute2 to select
+	  the version you desire.
 
 	  You need to perform any and all necessary tests yourself before
 	  relying on this code in a safety critical system!
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index c7d1adc..16737cd 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -90,7 +90,8 @@
 
 	hsr = netdev_priv(hsr_dev);
 
-	if ((hsr_dev->operstate == IF_OPER_UP) && (old_operstate != IF_OPER_UP)) {
+	if ((hsr_dev->operstate == IF_OPER_UP)
+			&& (old_operstate != IF_OPER_UP)) {
 		/* Went up */
 		hsr->announce_count = 0;
 		hsr->announce_timer.expires = jiffies +
@@ -250,31 +251,22 @@
 	.parse	 = eth_header_parse,
 };
 
-
-/* HSR:2010 supervision frames should be padded so that the whole frame,
- * including headers and FCS, is 64 bytes (without VLAN).
- */
-static int hsr_pad(int size)
-{
-	const int min_size = ETH_ZLEN - HSR_HLEN - ETH_HLEN;
-
-	if (size >= min_size)
-		return size;
-	return min_size;
-}
-
-static void send_hsr_supervision_frame(struct hsr_port *master, u8 type)
+static void send_hsr_supervision_frame(struct hsr_port *master,
+		u8 type, u8 hsrVer)
 {
 	struct sk_buff *skb;
 	int hlen, tlen;
+	struct hsr_tag *hsr_tag;
 	struct hsr_sup_tag *hsr_stag;
 	struct hsr_sup_payload *hsr_sp;
 	unsigned long irqflags;
 
 	hlen = LL_RESERVED_SPACE(master->dev);
 	tlen = master->dev->needed_tailroom;
-	skb = alloc_skb(hsr_pad(sizeof(struct hsr_sup_payload)) + hlen + tlen,
-			GFP_ATOMIC);
+	skb = dev_alloc_skb(
+			sizeof(struct hsr_tag) +
+			sizeof(struct hsr_sup_tag) +
+			sizeof(struct hsr_sup_payload) + hlen + tlen);
 
 	if (skb == NULL)
 		return;
@@ -282,32 +274,48 @@
 	skb_reserve(skb, hlen);
 
 	skb->dev = master->dev;
-	skb->protocol = htons(ETH_P_PRP);
+	skb->protocol = htons(hsrVer ? ETH_P_HSR : ETH_P_PRP);
 	skb->priority = TC_PRIO_CONTROL;
 
-	if (dev_hard_header(skb, skb->dev, ETH_P_PRP,
+	if (dev_hard_header(skb, skb->dev, (hsrVer ? ETH_P_HSR : ETH_P_PRP),
 			    master->hsr->sup_multicast_addr,
 			    skb->dev->dev_addr, skb->len) <= 0)
 		goto out;
 	skb_reset_mac_header(skb);
 
-	hsr_stag = (typeof(hsr_stag)) skb_put(skb, sizeof(*hsr_stag));
+	if (hsrVer > 0) {
+		hsr_tag = (typeof(hsr_tag)) skb_put(skb, sizeof(struct hsr_tag));
+		hsr_tag->encap_proto = htons(ETH_P_PRP);
+		set_hsr_tag_LSDU_size(hsr_tag, HSR_V1_SUP_LSDUSIZE);
+	}
 
-	set_hsr_stag_path(hsr_stag, 0xf);
-	set_hsr_stag_HSR_Ver(hsr_stag, 0);
+	hsr_stag = (typeof(hsr_stag)) skb_put(skb, sizeof(struct hsr_sup_tag));
+	set_hsr_stag_path(hsr_stag, (hsrVer ? 0x0 : 0xf));
+	set_hsr_stag_HSR_Ver(hsr_stag, hsrVer);
 
+	/* From HSRv1 on we have separate supervision sequence numbers. */
 	spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags);
-	hsr_stag->sequence_nr = htons(master->hsr->sequence_nr);
-	master->hsr->sequence_nr++;
+	if (hsrVer > 0) {
+		hsr_stag->sequence_nr = htons(master->hsr->sup_sequence_nr);
+		hsr_tag->sequence_nr = htons(master->hsr->sequence_nr);
+		master->hsr->sup_sequence_nr++;
+		master->hsr->sequence_nr++;
+	} else {
+		hsr_stag->sequence_nr = htons(master->hsr->sequence_nr);
+		master->hsr->sequence_nr++;
+	}
 	spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);
 
 	hsr_stag->HSR_TLV_Type = type;
-	hsr_stag->HSR_TLV_Length = 12;
+	/* TODO: Why 12 in HSRv0? */
+	hsr_stag->HSR_TLV_Length = hsrVer ? sizeof(struct hsr_sup_payload) : 12;
 
 	/* Payload: MacAddressA */
-	hsr_sp = (typeof(hsr_sp)) skb_put(skb, sizeof(*hsr_sp));
+	hsr_sp = (typeof(hsr_sp)) skb_put(skb, sizeof(struct hsr_sup_payload));
 	ether_addr_copy(hsr_sp->MacAddressA, master->dev->dev_addr);
 
+	skb_put_padto(skb, ETH_ZLEN + HSR_HLEN);
+
 	hsr_forward_skb(skb, master);
 	return;
 
@@ -329,19 +337,20 @@
 	rcu_read_lock();
 	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
 
-	if (hsr->announce_count < 3) {
-		send_hsr_supervision_frame(master, HSR_TLV_ANNOUNCE);
+	if (hsr->announce_count < 3 && hsr->protVersion == 0) {
+		send_hsr_supervision_frame(master, HSR_TLV_ANNOUNCE,
+				hsr->protVersion);
 		hsr->announce_count++;
-	} else {
-		send_hsr_supervision_frame(master, HSR_TLV_LIFE_CHECK);
-	}
 
-	if (hsr->announce_count < 3)
 		hsr->announce_timer.expires = jiffies +
 				msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
-	else
+	} else {
+		send_hsr_supervision_frame(master, HSR_TLV_LIFE_CHECK,
+				hsr->protVersion);
+
 		hsr->announce_timer.expires = jiffies +
 				msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
+	}
 
 	if (is_admin_up(master->dev))
 		add_timer(&hsr->announce_timer);
@@ -428,7 +437,7 @@
 };
 
 int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
-		     unsigned char multicast_spec)
+		     unsigned char multicast_spec, u8 protocol_version)
 {
 	struct hsr_priv *hsr;
 	struct hsr_port *port;
@@ -450,18 +459,17 @@
 	spin_lock_init(&hsr->seqnr_lock);
 	/* Overflow soon to find bugs easier: */
 	hsr->sequence_nr = HSR_SEQNR_START;
+	hsr->sup_sequence_nr = HSR_SUP_SEQNR_START;
 
-	init_timer(&hsr->announce_timer);
-	hsr->announce_timer.function = hsr_announce;
-	hsr->announce_timer.data = (unsigned long) hsr;
+	setup_timer(&hsr->announce_timer, hsr_announce, (unsigned long)hsr);
 
-	init_timer(&hsr->prune_timer);
-	hsr->prune_timer.function = hsr_prune_nodes;
-	hsr->prune_timer.data = (unsigned long) hsr;
+	setup_timer(&hsr->prune_timer, hsr_prune_nodes, (unsigned long)hsr);
 
 	ether_addr_copy(hsr->sup_multicast_addr, def_multicast_addr);
 	hsr->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec;
 
+	hsr->protVersion = protocol_version;
+
 	/* FIXME: should I modify the value of these?
 	 *
 	 * - hsr_dev->flags - i.e.
@@ -490,8 +498,7 @@
 	if (res)
 		goto fail;
 
-	hsr->prune_timer.expires = jiffies + msecs_to_jiffies(PRUNE_PERIOD);
-	add_timer(&hsr->prune_timer);
+	mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD));
 
 	return 0;
 
diff --git a/net/hsr/hsr_device.h b/net/hsr/hsr_device.h
index 108a5d5..9975e31 100644
--- a/net/hsr/hsr_device.h
+++ b/net/hsr/hsr_device.h
@@ -17,7 +17,7 @@
 
 void hsr_dev_setup(struct net_device *dev);
 int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
-		     unsigned char multicast_spec);
+		     unsigned char multicast_spec, u8 protocol_version);
 void hsr_check_carrier_and_operstate(struct hsr_priv *hsr);
 bool is_hsr_master(struct net_device *dev);
 int hsr_get_max_mtu(struct hsr_priv *hsr);
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index 7871ed6..5ee1d43 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -50,21 +50,40 @@
  */
 static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
 {
-	struct hsr_ethhdr_sp *hdr;
+	struct ethhdr *ethHdr;
+	struct hsr_sup_tag *hsrSupTag;
+	struct hsrv1_ethhdr_sp *hsrV1Hdr;
 
 	WARN_ON_ONCE(!skb_mac_header_was_set(skb));
-	hdr = (struct hsr_ethhdr_sp *) skb_mac_header(skb);
+	ethHdr = (struct ethhdr *) skb_mac_header(skb);
 
-	if (!ether_addr_equal(hdr->ethhdr.h_dest,
+	/* Correct addr? */
+	if (!ether_addr_equal(ethHdr->h_dest,
 			      hsr->sup_multicast_addr))
 		return false;
 
-	if (get_hsr_stag_path(&hdr->hsr_sup) != 0x0f)
+	/* Correct ether type?. */
+	if (!(ethHdr->h_proto == htons(ETH_P_PRP)
+			|| ethHdr->h_proto == htons(ETH_P_HSR)))
 		return false;
-	if ((hdr->hsr_sup.HSR_TLV_Type != HSR_TLV_ANNOUNCE) &&
-	    (hdr->hsr_sup.HSR_TLV_Type != HSR_TLV_LIFE_CHECK))
+
+	/* Get the supervision header from correct location. */
+	if (ethHdr->h_proto == htons(ETH_P_HSR)) { /* Okay HSRv1. */
+		hsrV1Hdr = (struct hsrv1_ethhdr_sp *) skb_mac_header(skb);
+		if (hsrV1Hdr->hsr.encap_proto != htons(ETH_P_PRP))
+			return false;
+
+		hsrSupTag = &hsrV1Hdr->hsr_sup;
+	} else {
+		hsrSupTag = &((struct hsrv0_ethhdr_sp *) skb_mac_header(skb))->hsr_sup;
+	}
+
+	if ((hsrSupTag->HSR_TLV_Type != HSR_TLV_ANNOUNCE) &&
+	    (hsrSupTag->HSR_TLV_Type != HSR_TLV_LIFE_CHECK))
 		return false;
-	if (hdr->hsr_sup.HSR_TLV_Length != 12)
+	if ((hsrSupTag->HSR_TLV_Length != 12) &&
+			(hsrSupTag->HSR_TLV_Length !=
+					sizeof(struct hsr_sup_payload)))
 		return false;
 
 	return true;
@@ -110,7 +129,7 @@
 
 
 static void hsr_fill_tag(struct sk_buff *skb, struct hsr_frame_info *frame,
-			 struct hsr_port *port)
+			 struct hsr_port *port, u8 protoVersion)
 {
 	struct hsr_ethhdr *hsr_ethhdr;
 	int lane_id;
@@ -131,7 +150,8 @@
 	set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size);
 	hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr);
 	hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
-	hsr_ethhdr->ethhdr.h_proto = htons(ETH_P_PRP);
+	hsr_ethhdr->ethhdr.h_proto = htons(protoVersion ?
+			ETH_P_HSR : ETH_P_PRP);
 }
 
 static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o,
@@ -160,7 +180,7 @@
 	memmove(dst, src, movelen);
 	skb_reset_mac_header(skb);
 
-	hsr_fill_tag(skb, frame, port);
+	hsr_fill_tag(skb, frame, port, port->hsr->protVersion);
 
 	return skb;
 }
@@ -320,7 +340,8 @@
 		/* FIXME: */
 		WARN_ONCE(1, "HSR: VLAN not yet supported");
 	}
-	if (ethhdr->h_proto == htons(ETH_P_PRP)) {
+	if (ethhdr->h_proto == htons(ETH_P_PRP)
+			|| ethhdr->h_proto == htons(ETH_P_HSR)) {
 		frame->skb_std = NULL;
 		frame->skb_hsr = skb;
 		frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index bace124..7ea9258 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -177,17 +177,17 @@
 			return node;
 	}
 
-	if (!is_sup)
-		return NULL; /* Only supervision frame may create node entry */
+	/* Everyone may create a node entry, connected node to a HSR device. */
 
-	if (ethhdr->h_proto == htons(ETH_P_PRP)) {
+	if (ethhdr->h_proto == htons(ETH_P_PRP)
+			|| ethhdr->h_proto == htons(ETH_P_HSR)) {
 		/* Use the existing sequence_nr from the tag as starting point
 		 * for filtering duplicate frames.
 		 */
 		seq_out = hsr_get_skb_sequence_nr(skb) - 1;
 	} else {
 		WARN_ONCE(1, "%s: Non-HSR frame\n", __func__);
-		seq_out = 0;
+		seq_out = HSR_SEQNR_START;
 	}
 
 	return hsr_add_node(node_db, ethhdr->h_source, seq_out);
@@ -200,17 +200,25 @@
 void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
 			  struct hsr_port *port_rcv)
 {
+	struct ethhdr *ethhdr;
 	struct hsr_node *node_real;
 	struct hsr_sup_payload *hsr_sp;
 	struct list_head *node_db;
 	int i;
 
-	skb_pull(skb, sizeof(struct hsr_ethhdr_sp));
-	hsr_sp = (struct hsr_sup_payload *) skb->data;
+	ethhdr = (struct ethhdr *) skb_mac_header(skb);
 
-	if (ether_addr_equal(eth_hdr(skb)->h_source, hsr_sp->MacAddressA))
-		/* Not sent from MacAddressB of a PICS_SUBS capable node */
-		goto done;
+	/* Leave the ethernet header. */
+	skb_pull(skb, sizeof(struct ethhdr));
+
+	/* And leave the HSR tag. */
+	if (ethhdr->h_proto == htons(ETH_P_HSR))
+		skb_pull(skb, sizeof(struct hsr_tag));
+
+	/* And leave the HSR sup tag. */
+	skb_pull(skb, sizeof(struct hsr_sup_tag));
+
+	hsr_sp = (struct hsr_sup_payload *) skb->data;
 
 	/* Merge node_curr (registered on MacAddressB) into node_real */
 	node_db = &port_rcv->hsr->node_db;
@@ -225,7 +233,7 @@
 		/* Node has already been merged */
 		goto done;
 
-	ether_addr_copy(node_real->MacAddressB, eth_hdr(skb)->h_source);
+	ether_addr_copy(node_real->MacAddressB, ethhdr->h_source);
 	for (i = 0; i < HSR_PT_PORTS; i++) {
 		if (!node_curr->time_in_stale[i] &&
 		    time_after(node_curr->time_in[i], node_real->time_in[i])) {
@@ -241,7 +249,7 @@
 	kfree_rcu(node_curr, rcu_head);
 
 done:
-	skb_push(skb, sizeof(struct hsr_ethhdr_sp));
+	skb_push(skb, sizeof(struct hsrv1_ethhdr_sp));
 }
 
 
diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
index 5a9c699..9b9909e8 100644
--- a/net/hsr/hsr_main.h
+++ b/net/hsr/hsr_main.h
@@ -30,6 +30,7 @@
  */
 #define MAX_SLAVE_DIFF			 3000 /* ms */
 #define HSR_SEQNR_START			(USHRT_MAX - 1024)
+#define HSR_SUP_SEQNR_START		(HSR_SEQNR_START / 2)
 
 
 /* How often shall we check for broken ring and remove node entries older than
@@ -58,6 +59,8 @@
 
 #define HSR_HLEN	6
 
+#define HSR_V1_SUP_LSDUSIZE		52
+
 /* The helper functions below assumes that 'path' occupies the 4 most
  * significant bits of the 16-bit field shared by 'path' and 'LSDU_size' (or
  * equivalently, the 4 most significant bits of HSR tag byte 14).
@@ -131,11 +134,17 @@
 	set_hsr_tag_LSDU_size((struct hsr_tag *) hst, HSR_Ver);
 }
 
-struct hsr_ethhdr_sp {
+struct hsrv0_ethhdr_sp {
 	struct ethhdr		ethhdr;
 	struct hsr_sup_tag	hsr_sup;
 } __packed;
 
+struct hsrv1_ethhdr_sp {
+	struct ethhdr		ethhdr;
+	struct hsr_tag		hsr;
+	struct hsr_sup_tag	hsr_sup;
+} __packed;
+
 
 enum hsr_port_type {
 	HSR_PT_NONE = 0,	/* Must be 0, used by framereg */
@@ -162,6 +171,8 @@
 	struct timer_list	prune_timer;
 	int announce_count;
 	u16 sequence_nr;
+	u16 sup_sequence_nr;			/* For HSRv1 separate seq_nr for supervision */
+	u8 protVersion;					/* Indicate if HSRv0 or HSRv1. */
 	spinlock_t seqnr_lock;			/* locking for sequence_nr */
 	unsigned char		sup_multicast_addr[ETH_ALEN];
 };
diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
index a2c7e4c..d4d1617 100644
--- a/net/hsr/hsr_netlink.c
+++ b/net/hsr/hsr_netlink.c
@@ -23,7 +23,8 @@
 	[IFLA_HSR_SLAVE1]		= { .type = NLA_U32 },
 	[IFLA_HSR_SLAVE2]		= { .type = NLA_U32 },
 	[IFLA_HSR_MULTICAST_SPEC]	= { .type = NLA_U8 },
-	[IFLA_HSR_SUPERVISION_ADDR]	= { .type = NLA_BINARY, .len = ETH_ALEN },
+	[IFLA_HSR_VERSION]	= { .type = NLA_U8 },
+	[IFLA_HSR_SUPERVISION_ADDR]	= { .len = ETH_ALEN },
 	[IFLA_HSR_SEQ_NR]		= { .type = NLA_U16 },
 };
 
@@ -35,7 +36,7 @@
 		       struct nlattr *tb[], struct nlattr *data[])
 {
 	struct net_device *link[2];
-	unsigned char multicast_spec;
+	unsigned char multicast_spec, hsr_version;
 
 	if (!data) {
 		netdev_info(dev, "HSR: No slave devices specified\n");
@@ -62,7 +63,12 @@
 	else
 		multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
 
-	return hsr_dev_finalize(dev, link, multicast_spec);
+	if (!data[IFLA_HSR_VERSION])
+		hsr_version = 0;
+	else
+		hsr_version = nla_get_u8(data[IFLA_HSR_VERSION]);
+
+	return hsr_dev_finalize(dev, link, multicast_spec, hsr_version);
 }
 
 static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
@@ -115,10 +121,9 @@
 
 
 /* attribute policy */
-/* NLA_BINARY missing in libnl; use NLA_UNSPEC in userspace instead. */
 static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = {
-	[HSR_A_NODE_ADDR] = { .type = NLA_BINARY, .len = ETH_ALEN },
-	[HSR_A_NODE_ADDR_B] = { .type = NLA_BINARY, .len = ETH_ALEN },
+	[HSR_A_NODE_ADDR] = { .len = ETH_ALEN },
+	[HSR_A_NODE_ADDR_B] = { .len = ETH_ALEN },
 	[HSR_A_IFINDEX] = { .type = NLA_U32 },
 	[HSR_A_IF1_AGE] = { .type = NLA_U32 },
 	[HSR_A_IF2_AGE] = { .type = NLA_U32 },
diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
index 7d37366..f5b6038 100644
--- a/net/hsr/hsr_slave.c
+++ b/net/hsr/hsr_slave.c
@@ -22,6 +22,7 @@
 {
 	struct sk_buff *skb = *pskb;
 	struct hsr_port *port;
+	u16 protocol;
 
 	if (!skb_mac_header_was_set(skb)) {
 		WARN_ONCE(1, "%s: skb invalid", __func__);
@@ -37,7 +38,8 @@
 		goto finish_consume;
 	}
 
-	if (eth_hdr(skb)->h_proto != htons(ETH_P_PRP))
+	protocol = eth_hdr(skb)->h_proto;
+	if (protocol != htons(ETH_P_PRP) && protocol != htons(ETH_P_HSR))
 		goto finish_pass;
 
 	skb_push(skb, ETH_HLEN);
diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h
index b4e17a7..5ac7789 100644
--- a/net/ieee802154/6lowpan/6lowpan_i.h
+++ b/net/ieee802154/6lowpan/6lowpan_i.h
@@ -41,24 +41,12 @@
 		return (((__force u64)a->extended_addr) >> 32) ^
 			(((__force u64)a->extended_addr) & 0xffffffff);
 	case IEEE802154_ADDR_SHORT:
-		return (__force u32)(a->short_addr);
+		return (__force u32)(a->short_addr + (a->pan_id << 16));
 	default:
 		return 0;
 	}
 }
 
-/* private device info */
-struct lowpan_dev_info {
-	struct net_device	*wdev; /* wpan device ptr */
-	u16			fragment_tag;
-};
-
-static inline struct
-lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
-{
-	return (struct lowpan_dev_info *)lowpan_priv(dev)->priv;
-}
-
 int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type);
 void lowpan_net_frag_exit(void);
 int lowpan_net_frag_init(void);
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index 0023c90..dd085db 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -148,7 +148,7 @@
 		return -EBUSY;
 	}
 
-	lowpan_dev_info(ldev)->wdev = wdev;
+	lowpan_802154_dev(ldev)->wdev = wdev;
 	/* Set the lowpan hardware address to the wpan hardware address. */
 	memcpy(ldev->dev_addr, wdev->dev_addr, IEEE802154_ADDR_LEN);
 	/* We need headroom for possible wpan_dev_hard_header call and tailroom
@@ -173,7 +173,7 @@
 
 static void lowpan_dellink(struct net_device *ldev, struct list_head *head)
 {
-	struct net_device *wdev = lowpan_dev_info(ldev)->wdev;
+	struct net_device *wdev = lowpan_802154_dev(ldev)->wdev;
 
 	ASSERT_RTNL();
 
@@ -184,7 +184,7 @@
 
 static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
 	.kind		= "lowpan",
-	.priv_size	= LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev_info)),
+	.priv_size	= LOWPAN_PRIV_SIZE(sizeof(struct lowpan_802154_dev)),
 	.setup		= lowpan_setup,
 	.newlink	= lowpan_newlink,
 	.dellink	= lowpan_dellink,
diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
index d4353fa..e459afd 100644
--- a/net/ieee802154/6lowpan/tx.c
+++ b/net/ieee802154/6lowpan/tx.c
@@ -84,7 +84,7 @@
 lowpan_alloc_frag(struct sk_buff *skb, int size,
 		  const struct ieee802154_hdr *master_hdr, bool frag1)
 {
-	struct net_device *wdev = lowpan_dev_info(skb->dev)->wdev;
+	struct net_device *wdev = lowpan_802154_dev(skb->dev)->wdev;
 	struct sk_buff *frag;
 	int rc;
 
@@ -148,8 +148,8 @@
 	int frag_cap, frag_len, payload_cap, rc;
 	int skb_unprocessed, skb_offset;
 
-	frag_tag = htons(lowpan_dev_info(ldev)->fragment_tag);
-	lowpan_dev_info(ldev)->fragment_tag++;
+	frag_tag = htons(lowpan_802154_dev(ldev)->fragment_tag);
+	lowpan_802154_dev(ldev)->fragment_tag++;
 
 	frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07);
 	frag_hdr[1] = dgram_size & 0xff;
@@ -208,7 +208,7 @@
 static int lowpan_header(struct sk_buff *skb, struct net_device *ldev,
 			 u16 *dgram_size, u16 *dgram_offset)
 {
-	struct wpan_dev *wpan_dev = lowpan_dev_info(ldev)->wdev->ieee802154_ptr;
+	struct wpan_dev *wpan_dev = lowpan_802154_dev(ldev)->wdev->ieee802154_ptr;
 	struct ieee802154_addr sa, da;
 	struct ieee802154_mac_cb *cb = mac_cb_init(skb);
 	struct lowpan_addr_info info;
@@ -248,8 +248,8 @@
 		cb->ackreq = wpan_dev->ackreq;
 	}
 
-	return wpan_dev_hard_header(skb, lowpan_dev_info(ldev)->wdev, &da, &sa,
-				    0);
+	return wpan_dev_hard_header(skb, lowpan_802154_dev(ldev)->wdev, &da,
+				    &sa, 0);
 }
 
 netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
@@ -283,7 +283,7 @@
 	max_single = ieee802154_max_payload(&wpan_hdr);
 
 	if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) {
-		skb->dev = lowpan_dev_info(ldev)->wdev;
+		skb->dev = lowpan_802154_dev(ldev)->wdev;
 		ldev->stats.tx_packets++;
 		ldev->stats.tx_bytes += dgram_size;
 		return dev_queue_xmit(skb);
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index 3503c38..d3cbb32 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -34,9 +34,11 @@
 
 #include "ieee802154.h"
 
-static int nla_put_hwaddr(struct sk_buff *msg, int type, __le64 hwaddr)
+static int nla_put_hwaddr(struct sk_buff *msg, int type, __le64 hwaddr,
+			  int padattr)
 {
-	return nla_put_u64(msg, type, swab64((__force u64)hwaddr));
+	return nla_put_u64_64bit(msg, type, swab64((__force u64)hwaddr),
+				 padattr);
 }
 
 static __le64 nla_get_hwaddr(const struct nlattr *nla)
@@ -623,7 +625,8 @@
 
 		if (desc->device_addr.mode == IEEE802154_ADDR_LONG &&
 		    nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR,
-				   desc->device_addr.extended_addr))
+				   desc->device_addr.extended_addr,
+				   IEEE802154_ATTR_PAD))
 			return -EMSGSIZE;
 	}
 
@@ -638,7 +641,7 @@
 
 	if (desc->mode == IEEE802154_SCF_KEY_HW_INDEX &&
 	    nla_put_hwaddr(msg, IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED,
-			   desc->extended_source))
+			   desc->extended_source, IEEE802154_ATTR_PAD))
 		return -EMSGSIZE;
 
 	return 0;
@@ -1063,7 +1066,8 @@
 	    nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID, desc->pan_id) ||
 	    nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR,
 			      desc->short_addr) ||
-	    nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, desc->hwaddr) ||
+	    nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, desc->hwaddr,
+			   IEEE802154_ATTR_PAD) ||
 	    nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
 			desc->frame_counter) ||
 	    nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_OVERRIDE,
@@ -1167,7 +1171,8 @@
 
 	if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
 	    nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
-	    nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, devaddr) ||
+	    nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, devaddr,
+			   IEEE802154_ATTR_PAD) ||
 	    nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
 			devkey->frame_counter) ||
 	    ieee802154_llsec_fill_key_id(msg, &devkey->key_id))
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
index 16ef0d9..ca207db 100644
--- a/net/ieee802154/nl802154.c
+++ b/net/ieee802154/nl802154.c
@@ -722,7 +722,8 @@
 			break;
 		case NL802154_DEV_ADDR_EXTENDED:
 			if (nla_put_le64(msg, NL802154_DEV_ADDR_ATTR_EXTENDED,
-					 desc->device_addr.extended_addr))
+					 desc->device_addr.extended_addr,
+					 NL802154_DEV_ADDR_ATTR_PAD))
 				return -ENOBUFS;
 			break;
 		default:
@@ -742,7 +743,8 @@
 		break;
 	case NL802154_KEY_ID_MODE_INDEX_EXTENDED:
 		if (nla_put_le64(msg, NL802154_KEY_ID_ATTR_SOURCE_EXTENDED,
-				 desc->extended_source))
+				 desc->extended_source,
+				 NL802154_KEY_ID_ATTR_PAD))
 			return -ENOBUFS;
 		break;
 	default:
@@ -811,7 +813,8 @@
 
 	if (nla_put_u32(msg, NL802154_ATTR_WPAN_PHY, rdev->wpan_phy_idx) ||
 	    nla_put_u32(msg, NL802154_ATTR_IFTYPE, wpan_dev->iftype) ||
-	    nla_put_u64(msg, NL802154_ATTR_WPAN_DEV, wpan_dev_id(wpan_dev)) ||
+	    nla_put_u64_64bit(msg, NL802154_ATTR_WPAN_DEV,
+			      wpan_dev_id(wpan_dev), NL802154_ATTR_PAD) ||
 	    nla_put_u32(msg, NL802154_ATTR_GENERATION,
 			rdev->devlist_generation ^
 			(cfg802154_rdev_list_generation << 2)))
@@ -819,7 +822,8 @@
 
 	/* address settings */
 	if (nla_put_le64(msg, NL802154_ATTR_EXTENDED_ADDR,
-			 wpan_dev->extended_addr) ||
+			 wpan_dev->extended_addr,
+			 NL802154_ATTR_PAD) ||
 	    nla_put_le16(msg, NL802154_ATTR_SHORT_ADDR,
 			 wpan_dev->short_addr) ||
 	    nla_put_le16(msg, NL802154_ATTR_PAN_ID, wpan_dev->pan_id))
@@ -1074,6 +1078,11 @@
 	if (netif_running(dev))
 		return -EBUSY;
 
+	if (wpan_dev->lowpan_dev) {
+		if (netif_running(wpan_dev->lowpan_dev))
+			return -EBUSY;
+	}
+
 	/* don't change address fields on monitor */
 	if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR ||
 	    !info->attrs[NL802154_ATTR_PAN_ID])
@@ -1105,6 +1114,11 @@
 	if (netif_running(dev))
 		return -EBUSY;
 
+	if (wpan_dev->lowpan_dev) {
+		if (netif_running(wpan_dev->lowpan_dev))
+			return -EBUSY;
+	}
+
 	/* don't change address fields on monitor */
 	if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR ||
 	    !info->attrs[NL802154_ATTR_SHORT_ADDR])
@@ -1614,7 +1628,7 @@
 	    nla_put_le16(msg, NL802154_DEV_ATTR_SHORT_ADDR,
 			 dev_desc->short_addr) ||
 	    nla_put_le64(msg, NL802154_DEV_ATTR_EXTENDED_ADDR,
-			 dev_desc->hwaddr) ||
+			 dev_desc->hwaddr, NL802154_DEV_ATTR_PAD) ||
 	    nla_put_u8(msg, NL802154_DEV_ATTR_SECLEVEL_EXEMPT,
 		       dev_desc->seclevel_exempt) ||
 	    nla_put_u32(msg, NL802154_DEV_ATTR_KEY_MODE, dev_desc->key_mode))
@@ -1778,7 +1792,7 @@
 		goto nla_put_failure;
 
 	if (nla_put_le64(msg, NL802154_DEVKEY_ATTR_EXTENDED_ADDR,
-			 extended_addr) ||
+			 extended_addr, NL802154_DEVKEY_ATTR_PAD) ||
 	    nla_put_u32(msg, NL802154_DEVKEY_ATTR_FRAME_COUNTER,
 			devkey->frame_counter))
 		goto nla_put_failure;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 9e48199..2e6e65f 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -948,6 +948,7 @@
 	.recvmsg	   = inet_recvmsg,
 	.mmap		   = sock_no_mmap,
 	.sendpage	   = inet_sendpage,
+	.set_peek_off	   = sk_set_peek_off,
 #ifdef CONFIG_COMPAT
 	.compat_setsockopt = compat_sock_common_setsockopt,
 	.compat_getsockopt = compat_sock_common_getsockopt,
@@ -1106,7 +1107,7 @@
 	struct ip_options_rcu *inet_opt;
 
 	inet_opt = rcu_dereference_protected(inet->inet_opt,
-					     sock_owned_by_user(sk));
+					     lockdep_sock_is_held(sk));
 	if (inet_opt && inet_opt->opt.srr)
 		daddr = inet_opt->opt.faddr;
 
@@ -1194,12 +1195,12 @@
 static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
 					netdev_features_t features)
 {
+	bool udpfrag = false, fixedid = false, encap;
 	struct sk_buff *segs = ERR_PTR(-EINVAL);
 	const struct net_offload *ops;
 	unsigned int offset = 0;
-	bool udpfrag, encap;
 	struct iphdr *iph;
-	int proto;
+	int proto, tot_len;
 	int nhoff;
 	int ihl;
 	int id;
@@ -1216,7 +1217,9 @@
 		       SKB_GSO_TCPV6 |
 		       SKB_GSO_UDP_TUNNEL |
 		       SKB_GSO_UDP_TUNNEL_CSUM |
+		       SKB_GSO_TCP_FIXEDID |
 		       SKB_GSO_TUNNEL_REMCSUM |
+		       SKB_GSO_PARTIAL |
 		       0)))
 		goto out;
 
@@ -1247,11 +1250,14 @@
 
 	segs = ERR_PTR(-EPROTONOSUPPORT);
 
-	if (skb->encapsulation &&
-	    skb_shinfo(skb)->gso_type & (SKB_GSO_SIT|SKB_GSO_IPIP))
-		udpfrag = proto == IPPROTO_UDP && encap;
-	else
-		udpfrag = proto == IPPROTO_UDP && !skb->encapsulation;
+	if (!skb->encapsulation || encap) {
+		udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
+		fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID);
+
+		/* fixed ID is invalid if DF bit is not set */
+		if (fixedid && !(iph->frag_off & htons(IP_DF)))
+			goto out;
+	}
 
 	ops = rcu_dereference(inet_offloads[proto]);
 	if (likely(ops && ops->callbacks.gso_segment))
@@ -1264,15 +1270,25 @@
 	do {
 		iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
 		if (udpfrag) {
-			iph->id = htons(id);
 			iph->frag_off = htons(offset >> 3);
 			if (skb->next)
 				iph->frag_off |= htons(IP_MF);
 			offset += skb->len - nhoff - ihl;
+			tot_len = skb->len - nhoff;
+		} else if (skb_is_gso(skb)) {
+			if (!fixedid) {
+				iph->id = htons(id);
+				id += skb_shinfo(skb)->gso_segs;
+			}
+			tot_len = skb_shinfo(skb)->gso_size +
+				  SKB_GSO_CB(skb)->data_offset +
+				  skb->head - (unsigned char *)iph;
 		} else {
-			iph->id = htons(id++);
+			if (!fixedid)
+				iph->id = htons(id++);
+			tot_len = skb->len - nhoff;
 		}
-		iph->tot_len = htons(skb->len - nhoff);
+		iph->tot_len = htons(tot_len);
 		ip_send_check(iph);
 		if (encap)
 			skb_reset_inner_headers(skb);
@@ -1324,6 +1340,7 @@
 
 	for (p = *head; p; p = p->next) {
 		struct iphdr *iph2;
+		u16 flush_id;
 
 		if (!NAPI_GRO_CB(p)->same_flow)
 			continue;
@@ -1347,16 +1364,36 @@
 			(iph->tos ^ iph2->tos) |
 			((iph->frag_off ^ iph2->frag_off) & htons(IP_DF));
 
-		/* Save the IP ID check to be included later when we get to
-		 * the transport layer so only the inner most IP ID is checked.
-		 * This is because some GSO/TSO implementations do not
-		 * correctly increment the IP ID for the outer hdrs.
-		 */
-		NAPI_GRO_CB(p)->flush_id =
-			    ((u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) ^ id);
 		NAPI_GRO_CB(p)->flush |= flush;
+
+		/* We need to store of the IP ID check to be included later
+		 * when we can verify that this packet does in fact belong
+		 * to a given flow.
+		 */
+		flush_id = (u16)(id - ntohs(iph2->id));
+
+		/* This bit of code makes it much easier for us to identify
+		 * the cases where we are doing atomic vs non-atomic IP ID
+		 * checks.  Specifically an atomic check can return IP ID
+		 * values 0 - 0xFFFF, while a non-atomic check can only
+		 * return 0 or 0xFFFF.
+		 */
+		if (!NAPI_GRO_CB(p)->is_atomic ||
+		    !(iph->frag_off & htons(IP_DF))) {
+			flush_id ^= NAPI_GRO_CB(p)->count;
+			flush_id = flush_id ? 0xFFFF : 0;
+		}
+
+		/* If the previous IP ID value was based on an atomic
+		 * datagram we can overwrite the value and ignore it.
+		 */
+		if (NAPI_GRO_CB(skb)->is_atomic)
+			NAPI_GRO_CB(p)->flush_id = flush_id;
+		else
+			NAPI_GRO_CB(p)->flush_id |= flush_id;
 	}
 
+	NAPI_GRO_CB(skb)->is_atomic = !!(iph->frag_off & htons(IP_DF));
 	NAPI_GRO_CB(skb)->flush |= flush;
 	skb_set_network_header(skb, off);
 	/* The above will be needed by the transport layer if there is one
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index c34c754..89a8cac4 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -436,7 +436,7 @@
 	if (IS_ERR(rt))
 		return 1;
 	if (rt->dst.dev != dev) {
-		NET_INC_STATS_BH(net, LINUX_MIB_ARPFILTER);
+		__NET_INC_STATS(net, LINUX_MIB_ARPFILTER);
 		flag = 1;
 	}
 	ip_rt_put(rt);
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index bdb2a07..40d6b87 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1933,7 +1933,8 @@
 
 	sk_inet = inet_sk(sk);
 
-	old = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk));
+	old = rcu_dereference_protected(sk_inet->inet_opt,
+					lockdep_sock_is_held(sk));
 	if (sk_inet->is_icsk) {
 		sk_conn = inet_csk(sk);
 		if (old)
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 8a9246d..ef2ebeb 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -110,6 +110,7 @@
 	hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]);
 	return tb;
 }
+EXPORT_SYMBOL_GPL(fib_new_table);
 
 /* caller must hold either rtnl or rcu read lock */
 struct fib_table *fib_get_table(struct net *net, u32 id)
@@ -904,7 +905,11 @@
 	if (ifa->ifa_flags & IFA_F_SECONDARY) {
 		prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
 		if (!prim) {
-			pr_warn("%s: bug: prim == NULL\n", __func__);
+			/* if the device has been deleted, we don't perform
+			 * address promotion
+			 */
+			if (!in_dev->dead)
+				pr_warn("%s: bug: prim == NULL\n", __func__);
 			return;
 		}
 		if (iprim && iprim != prim) {
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index d97268e..d09173b 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -975,6 +975,8 @@
 			val = 65535 - 40;
 		if (type == RTAX_MTU && val > 65535 - 15)
 			val = 65535 - 15;
+		if (type == RTAX_HOPLIMIT && val > 255)
+			val = 255;
 		if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
 			return -EINVAL;
 		fi->fib_metrics[type - 1] = val;
@@ -1559,21 +1561,45 @@
 }
 
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
+static bool fib_good_nh(const struct fib_nh *nh)
+{
+	int state = NUD_REACHABLE;
+
+	if (nh->nh_scope == RT_SCOPE_LINK) {
+		struct neighbour *n;
+
+		rcu_read_lock_bh();
+
+		n = __ipv4_neigh_lookup_noref(nh->nh_dev, nh->nh_gw);
+		if (n)
+			state = n->nud_state;
+
+		rcu_read_unlock_bh();
+	}
+
+	return !!(state & NUD_VALID);
+}
 
 void fib_select_multipath(struct fib_result *res, int hash)
 {
 	struct fib_info *fi = res->fi;
+	struct net *net = fi->fib_net;
+	bool first = false;
 
 	for_nexthops(fi) {
 		if (hash > atomic_read(&nh->nh_upper_bound))
 			continue;
 
-		res->nh_sel = nhsel;
-		return;
+		if (!net->ipv4.sysctl_fib_multipath_use_neigh ||
+		    fib_good_nh(nh)) {
+			res->nh_sel = nhsel;
+			return;
+		}
+		if (!first) {
+			res->nh_sel = nhsel;
+			first = true;
+		}
 	} endfor_nexthops(fi);
-
-	/* Race condition: route has just become dead. */
-	res->nh_sel = 0;
 }
 #endif
 
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index a0586b4..eeec7d6 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -22,7 +22,6 @@
 	u8 flags;
 	__be16 port;
 	u16 type;
-	struct udp_offload udp_offloads;
 	struct list_head list;
 	struct rcu_head rcu;
 };
@@ -186,15 +185,26 @@
 	return 0;
 }
 
-static struct sk_buff **fou_gro_receive(struct sk_buff **head,
-					struct sk_buff *skb,
-					struct udp_offload *uoff)
+static struct sk_buff **fou_gro_receive(struct sock *sk,
+					struct sk_buff **head,
+					struct sk_buff *skb)
 {
 	const struct net_offload *ops;
 	struct sk_buff **pp = NULL;
-	u8 proto = NAPI_GRO_CB(skb)->proto;
+	u8 proto = fou_from_sock(sk)->protocol;
 	const struct net_offload **offloads;
 
+	/* We can clear the encap_mark for FOU as we are essentially doing
+	 * one of two possible things.  We are either adding an L4 tunnel
+	 * header to the outer L3 tunnel header, or we are are simply
+	 * treating the GRE tunnel header as though it is a UDP protocol
+	 * specific header such as VXLAN or GENEVE.
+	 */
+	NAPI_GRO_CB(skb)->encap_mark = 0;
+
+	/* Flag this frame as already having an outer encap header */
+	NAPI_GRO_CB(skb)->is_fou = 1;
+
 	rcu_read_lock();
 	offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
 	ops = rcu_dereference(offloads[proto]);
@@ -209,16 +219,14 @@
 	return pp;
 }
 
-static int fou_gro_complete(struct sk_buff *skb, int nhoff,
-			    struct udp_offload *uoff)
+static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
+			    int nhoff)
 {
 	const struct net_offload *ops;
-	u8 proto = NAPI_GRO_CB(skb)->proto;
+	u8 proto = fou_from_sock(sk)->protocol;
 	int err = -ENOSYS;
 	const struct net_offload **offloads;
 
-	udp_tunnel_gro_complete(skb, nhoff);
-
 	rcu_read_lock();
 	offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
 	ops = rcu_dereference(offloads[proto]);
@@ -227,6 +235,8 @@
 
 	err = ops->callbacks.gro_complete(skb, nhoff);
 
+	skb_set_inner_mac_header(skb, nhoff);
+
 out_unlock:
 	rcu_read_unlock();
 
@@ -256,9 +266,9 @@
 	return guehdr;
 }
 
-static struct sk_buff **gue_gro_receive(struct sk_buff **head,
-					struct sk_buff *skb,
-					struct udp_offload *uoff)
+static struct sk_buff **gue_gro_receive(struct sock *sk,
+					struct sk_buff **head,
+					struct sk_buff *skb)
 {
 	const struct net_offload **offloads;
 	const struct net_offload *ops;
@@ -269,7 +279,7 @@
 	void *data;
 	u16 doffset = 0;
 	int flush = 1;
-	struct fou *fou = container_of(uoff, struct fou, udp_offloads);
+	struct fou *fou = fou_from_sock(sk);
 	struct gro_remcsum grc;
 
 	skb_gro_remcsum_init(&grc);
@@ -352,6 +362,17 @@
 		}
 	}
 
+	/* We can clear the encap_mark for GUE as we are essentially doing
+	 * one of two possible things.  We are either adding an L4 tunnel
+	 * header to the outer L3 tunnel header, or we are are simply
+	 * treating the GRE tunnel header as though it is a UDP protocol
+	 * specific header such as VXLAN or GENEVE.
+	 */
+	NAPI_GRO_CB(skb)->encap_mark = 0;
+
+	/* Flag this frame as already having an outer encap header */
+	NAPI_GRO_CB(skb)->is_fou = 1;
+
 	rcu_read_lock();
 	offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
 	ops = rcu_dereference(offloads[guehdr->proto_ctype]);
@@ -370,8 +391,7 @@
 	return pp;
 }
 
-static int gue_gro_complete(struct sk_buff *skb, int nhoff,
-			    struct udp_offload *uoff)
+static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
 {
 	const struct net_offload **offloads;
 	struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
@@ -392,6 +412,8 @@
 
 	err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
 
+	skb_set_inner_mac_header(skb, nhoff + guehlen);
+
 out_unlock:
 	rcu_read_unlock();
 	return err;
@@ -419,10 +441,7 @@
 static void fou_release(struct fou *fou)
 {
 	struct socket *sock = fou->sock;
-	struct sock *sk = sock->sk;
 
-	if (sk->sk_family == AF_INET)
-		udp_del_offload(&fou->udp_offloads);
 	list_del(&fou->list);
 	udp_tunnel_sock_release(sock);
 
@@ -432,11 +451,9 @@
 static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
 {
 	udp_sk(sk)->encap_rcv = fou_udp_recv;
-	fou->protocol = cfg->protocol;
-	fou->udp_offloads.callbacks.gro_receive = fou_gro_receive;
-	fou->udp_offloads.callbacks.gro_complete = fou_gro_complete;
-	fou->udp_offloads.port = cfg->udp_config.local_udp_port;
-	fou->udp_offloads.ipproto = cfg->protocol;
+	udp_sk(sk)->gro_receive = fou_gro_receive;
+	udp_sk(sk)->gro_complete = fou_gro_complete;
+	fou_from_sock(sk)->protocol = cfg->protocol;
 
 	return 0;
 }
@@ -444,9 +461,8 @@
 static int gue_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
 {
 	udp_sk(sk)->encap_rcv = gue_udp_recv;
-	fou->udp_offloads.callbacks.gro_receive = gue_gro_receive;
-	fou->udp_offloads.callbacks.gro_complete = gue_gro_complete;
-	fou->udp_offloads.port = cfg->udp_config.local_udp_port;
+	udp_sk(sk)->gro_receive = gue_gro_receive;
+	udp_sk(sk)->gro_complete = gue_gro_complete;
 
 	return 0;
 }
@@ -505,12 +521,6 @@
 
 	sk->sk_allocation = GFP_ATOMIC;
 
-	if (cfg->udp_config.family == AF_INET) {
-		err = udp_add_offload(net, &fou->udp_offloads);
-		if (err)
-			goto error;
-	}
-
 	err = fou_add_to_port_list(net, fou);
 	if (err)
 		goto error;
@@ -794,11 +804,11 @@
 	int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
 						       SKB_GSO_UDP_TUNNEL;
 	__be16 sport;
+	int err;
 
-	skb = iptunnel_handle_offloads(skb, type);
-
-	if (IS_ERR(skb))
-		return PTR_ERR(skb);
+	err = iptunnel_handle_offloads(skb, type);
+	if (err)
+		return err;
 
 	sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
 					       skb, 0, 0, false);
@@ -818,6 +828,7 @@
 	__be16 sport;
 	void *data;
 	bool need_priv = false;
+	int err;
 
 	if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) &&
 	    skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -828,10 +839,9 @@
 
 	optlen += need_priv ? GUE_LEN_PRIV : 0;
 
-	skb = iptunnel_handle_offloads(skb, type);
-
-	if (IS_ERR(skb))
-		return PTR_ERR(skb);
+	err = iptunnel_handle_offloads(skb, type);
+	if (err)
+		return err;
 
 	/* Get source port (based on flow hash) before skb_push */
 	sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index d9c552a..4c39f4f 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -60,6 +60,67 @@
 }
 EXPORT_SYMBOL_GPL(gre_del_protocol);
 
+/* Fills in tpi and returns header length to be pulled. */
+int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
+		     bool *csum_err, __be16 proto)
+{
+	const struct gre_base_hdr *greh;
+	__be32 *options;
+	int hdr_len;
+
+	if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
+		return -EINVAL;
+
+	greh = (struct gre_base_hdr *)skb_transport_header(skb);
+	if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
+		return -EINVAL;
+
+	tpi->flags = gre_flags_to_tnl_flags(greh->flags);
+	hdr_len = gre_calc_hlen(tpi->flags);
+
+	if (!pskb_may_pull(skb, hdr_len))
+		return -EINVAL;
+
+	greh = (struct gre_base_hdr *)skb_transport_header(skb);
+	tpi->proto = greh->protocol;
+
+	options = (__be32 *)(greh + 1);
+	if (greh->flags & GRE_CSUM) {
+		if (skb_checksum_simple_validate(skb)) {
+			*csum_err = true;
+			return -EINVAL;
+		}
+
+		skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
+					 null_compute_pseudo);
+		options++;
+	}
+
+	if (greh->flags & GRE_KEY) {
+		tpi->key = *options;
+		options++;
+	} else {
+		tpi->key = 0;
+	}
+	if (unlikely(greh->flags & GRE_SEQ)) {
+		tpi->seq = *options;
+		options++;
+	} else {
+		tpi->seq = 0;
+	}
+	/* WCCP version 1 and 2 protocol decoding.
+	 * - Change protocol to IPv4/IPv6
+	 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
+	 */
+	if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
+		tpi->proto = proto;
+		if ((*(u8 *)options & 0xF0) != 0x40)
+			hdr_len += 4;
+	}
+	return hdr_len;
+}
+EXPORT_SYMBOL(gre_parse_header);
+
 static int gre_rcv(struct sk_buff *skb)
 {
 	const struct gre_protocol *proto;
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index c47539d..e88190a 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -32,10 +32,12 @@
 				  SKB_GSO_UDP |
 				  SKB_GSO_DODGY |
 				  SKB_GSO_TCP_ECN |
+				  SKB_GSO_TCP_FIXEDID |
 				  SKB_GSO_GRE |
 				  SKB_GSO_GRE_CSUM |
 				  SKB_GSO_IPIP |
-				  SKB_GSO_SIT)))
+				  SKB_GSO_SIT |
+				  SKB_GSO_PARTIAL)))
 		goto out;
 
 	if (!skb->encapsulation)
@@ -86,7 +88,7 @@
 	skb = segs;
 	do {
 		struct gre_base_hdr *greh;
-		__be32 *pcsum;
+		__sum16 *pcsum;
 
 		/* Set up inner headers if we are offloading inner checksum */
 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -106,10 +108,25 @@
 			continue;
 
 		greh = (struct gre_base_hdr *)skb_transport_header(skb);
-		pcsum = (__be32 *)(greh + 1);
+		pcsum = (__sum16 *)(greh + 1);
 
-		*pcsum = 0;
-		*(__sum16 *)pcsum = gso_make_checksum(skb, 0);
+		if (skb_is_gso(skb)) {
+			unsigned int partial_adj;
+
+			/* Adjust checksum to account for the fact that
+			 * the partial checksum is based on actual size
+			 * whereas headers should be based on MSS size.
+			 */
+			partial_adj = skb->len + skb_headroom(skb) -
+				      SKB_GSO_CB(skb)->data_offset -
+				      skb_shinfo(skb)->gso_size;
+			*pcsum = ~csum_fold((__force __wsum)htonl(partial_adj));
+		} else {
+			*pcsum = 0;
+		}
+
+		*(pcsum + 1) = 0;
+		*pcsum = gso_make_checksum(skb, 0);
 	} while ((skb = skb->next));
 out:
 	return segs;
@@ -150,6 +167,14 @@
 	if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0)
 		goto out;
 
+	/* We can only support GRE_CSUM if we can track the location of
+	 * the GRE header.  In the case of FOU/GUE we cannot because the
+	 * outer UDP header displaces the GRE header leaving us in a state
+	 * of limbo.
+	 */
+	if ((greh->flags & GRE_CSUM) && NAPI_GRO_CB(skb)->is_fou)
+		goto out;
+
 	type = greh->protocol;
 
 	rcu_read_lock();
@@ -267,6 +292,18 @@
 
 static int __init gre_offload_init(void)
 {
-	return inet_add_offload(&gre_offload, IPPROTO_GRE);
+	int err;
+
+	err = inet_add_offload(&gre_offload, IPPROTO_GRE);
+#if IS_ENABLED(CONFIG_IPV6)
+	if (err)
+		return err;
+
+	err = inet6_add_offload(&gre_offload, IPPROTO_GRE);
+	if (err)
+		inet_del_offload(&gre_offload, IPPROTO_GRE);
+#endif
+
+	return err;
 }
 device_initcall(gre_offload_init);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 6333489..38abe70 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -363,7 +363,7 @@
 			   icmp_param->data_len+icmp_param->head_len,
 			   icmp_param->head_len,
 			   ipc, rt, MSG_DONTWAIT) < 0) {
-		ICMP_INC_STATS_BH(sock_net(sk), ICMP_MIB_OUTERRORS);
+		__ICMP_INC_STATS(sock_net(sk), ICMP_MIB_OUTERRORS);
 		ip_flush_pending_frames(sk);
 	} else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
 		struct icmphdr *icmph = icmp_hdr(skb);
@@ -744,7 +744,7 @@
 	 * avoid additional coding at protocol handlers.
 	 */
 	if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) {
-		ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS);
+		__ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
 		return;
 	}
 
@@ -865,7 +865,7 @@
 out:
 	return true;
 out_err:
-	ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
+	__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
 	return false;
 }
 
@@ -877,7 +877,7 @@
 static bool icmp_redirect(struct sk_buff *skb)
 {
 	if (skb->len < sizeof(struct iphdr)) {
-		ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS);
+		__ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
 		return false;
 	}
 
@@ -956,7 +956,7 @@
 	return true;
 
 out_err:
-	ICMP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
+	__ICMP_INC_STATS(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
 	return false;
 }
 
@@ -996,7 +996,7 @@
 		skb_set_network_header(skb, nh);
 	}
 
-	ICMP_INC_STATS_BH(net, ICMP_MIB_INMSGS);
+	__ICMP_INC_STATS(net, ICMP_MIB_INMSGS);
 
 	if (skb_checksum_simple_validate(skb))
 		goto csum_error;
@@ -1006,7 +1006,7 @@
 
 	icmph = icmp_hdr(skb);
 
-	ICMPMSGIN_INC_STATS_BH(net, icmph->type);
+	ICMPMSGIN_INC_STATS(net, icmph->type);
 	/*
 	 *	18 is the highest 'known' ICMP type. Anything else is a mystery
 	 *
@@ -1052,9 +1052,9 @@
 	kfree_skb(skb);
 	return 0;
 csum_error:
-	ICMP_INC_STATS_BH(net, ICMP_MIB_CSUMERRORS);
+	__ICMP_INC_STATS(net, ICMP_MIB_CSUMERRORS);
 error:
-	ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
+	__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
 	goto drop;
 }
 
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index bc5196e..fa8c398 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -427,7 +427,7 @@
 route_err:
 	ip_rt_put(rt);
 no_route:
-	IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
+	__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
 	return NULL;
 }
 EXPORT_SYMBOL_GPL(inet_csk_route_req);
@@ -466,7 +466,7 @@
 	ip_rt_put(rt);
 no_route:
 	rcu_read_unlock();
-	IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
+	__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
 	return NULL;
 }
 EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
@@ -661,6 +661,9 @@
 		inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
 		newsk->sk_write_space = sk_stream_write_space;
 
+		/* listeners have SOCK_RCU_FREE, not the children */
+		sock_reset_flag(newsk, SOCK_RCU_FREE);
+
 		newsk->sk_mark = inet_rsk(req)->ir_mark;
 		atomic64_set(&newsk->sk_cookie,
 			     atomic64_read(&inet_rsk(req)->ir_cookie));
@@ -703,7 +706,9 @@
 
 	sk_refcnt_debug_release(sk);
 
+	local_bh_disable();
 	percpu_counter_dec(sk->sk_prot->orphan_count);
+	local_bh_enable();
 	sock_put(sk);
 }
 EXPORT_SYMBOL(inet_csk_destroy_sock);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 5fdb02f..25af124 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -66,7 +66,7 @@
 	mutex_unlock(&inet_diag_table_mutex);
 }
 
-static void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk)
+void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk)
 {
 	r->idiag_family = sk->sk_family;
 
@@ -89,6 +89,7 @@
 	r->id.idiag_dst[0] = sk->sk_daddr;
 	}
 }
+EXPORT_SYMBOL_GPL(inet_diag_msg_common_fill);
 
 static size_t inet_sk_attr_size(void)
 {
@@ -104,36 +105,11 @@
 		+ 64;
 }
 
-int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
-		      struct sk_buff *skb, const struct inet_diag_req_v2 *req,
-		      struct user_namespace *user_ns,
-		      u32 portid, u32 seq, u16 nlmsg_flags,
-		      const struct nlmsghdr *unlh)
+int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
+			     struct inet_diag_msg *r, int ext,
+			     struct user_namespace *user_ns)
 {
 	const struct inet_sock *inet = inet_sk(sk);
-	const struct tcp_congestion_ops *ca_ops;
-	const struct inet_diag_handler *handler;
-	int ext = req->idiag_ext;
-	struct inet_diag_msg *r;
-	struct nlmsghdr  *nlh;
-	struct nlattr *attr;
-	void *info = NULL;
-
-	handler = inet_diag_table[req->sdiag_protocol];
-	BUG_ON(!handler);
-
-	nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
-			nlmsg_flags);
-	if (!nlh)
-		return -EMSGSIZE;
-
-	r = nlmsg_data(nlh);
-	BUG_ON(!sk_fullsock(sk));
-
-	inet_diag_msg_common_fill(r, sk);
-	r->idiag_state = sk->sk_state;
-	r->idiag_timer = 0;
-	r->idiag_retrans = 0;
 
 	if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown))
 		goto errout;
@@ -161,6 +137,45 @@
 	r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
 	r->idiag_inode = sock_i_ino(sk);
 
+	return 0;
+errout:
+	return 1;
+}
+EXPORT_SYMBOL_GPL(inet_diag_msg_attrs_fill);
+
+int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
+		      struct sk_buff *skb, const struct inet_diag_req_v2 *req,
+		      struct user_namespace *user_ns,
+		      u32 portid, u32 seq, u16 nlmsg_flags,
+		      const struct nlmsghdr *unlh)
+{
+	const struct tcp_congestion_ops *ca_ops;
+	const struct inet_diag_handler *handler;
+	int ext = req->idiag_ext;
+	struct inet_diag_msg *r;
+	struct nlmsghdr  *nlh;
+	struct nlattr *attr;
+	void *info = NULL;
+
+	handler = inet_diag_table[req->sdiag_protocol];
+	BUG_ON(!handler);
+
+	nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
+			nlmsg_flags);
+	if (!nlh)
+		return -EMSGSIZE;
+
+	r = nlmsg_data(nlh);
+	BUG_ON(!sk_fullsock(sk));
+
+	inet_diag_msg_common_fill(r, sk);
+	r->idiag_state = sk->sk_state;
+	r->idiag_timer = 0;
+	r->idiag_retrans = 0;
+
+	if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns))
+		goto errout;
+
 	if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
 		struct inet_diag_meminfo minfo = {
 			.idiag_rmem = sk_rmem_alloc_get(sk),
@@ -182,31 +197,32 @@
 		goto out;
 	}
 
-#define EXPIRES_IN_MS(tmo)  DIV_ROUND_UP((tmo - jiffies) * 1000, HZ)
-
 	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
 	    icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
 		r->idiag_timer = 1;
 		r->idiag_retrans = icsk->icsk_retransmits;
-		r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
+		r->idiag_expires =
+			jiffies_to_msecs(icsk->icsk_timeout - jiffies);
 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
 		r->idiag_timer = 4;
 		r->idiag_retrans = icsk->icsk_probes_out;
-		r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
+		r->idiag_expires =
+			jiffies_to_msecs(icsk->icsk_timeout - jiffies);
 	} else if (timer_pending(&sk->sk_timer)) {
 		r->idiag_timer = 2;
 		r->idiag_retrans = icsk->icsk_probes_out;
-		r->idiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires);
+		r->idiag_expires =
+			jiffies_to_msecs(sk->sk_timer.expires - jiffies);
 	} else {
 		r->idiag_timer = 0;
 		r->idiag_expires = 0;
 	}
-#undef EXPIRES_IN_MS
 
 	if ((ext & (1 << (INET_DIAG_INFO - 1))) && handler->idiag_info_size) {
-		attr = nla_reserve(skb, INET_DIAG_INFO,
-				   handler->idiag_info_size);
+		attr = nla_reserve_64bit(skb, INET_DIAG_INFO,
+					 handler->idiag_info_size,
+					 INET_DIAG_PAD);
 		if (!attr)
 			goto errout;
 
@@ -356,6 +372,7 @@
 {
 	struct sock *sk;
 
+	rcu_read_lock();
 	if (req->sdiag_family == AF_INET)
 		sk = inet_lookup(net, hashinfo, NULL, 0, req->id.idiag_dst[0],
 				 req->id.idiag_dport, req->id.idiag_src[0],
@@ -376,9 +393,11 @@
 					  req->id.idiag_if);
 	}
 #endif
-	else
+	else {
+		rcu_read_unlock();
 		return ERR_PTR(-EINVAL);
-
+	}
+	rcu_read_unlock();
 	if (!sk)
 		return ERR_PTR(-ENOENT);
 
@@ -772,13 +791,12 @@
 
 		for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
 			struct inet_listen_hashbucket *ilb;
-			struct hlist_nulls_node *node;
 			struct sock *sk;
 
 			num = 0;
 			ilb = &hashinfo->listening_hash[i];
 			spin_lock_bh(&ilb->lock);
-			sk_nulls_for_each(sk, node, &ilb->head) {
+			sk_for_each(sk, &ilb->head) {
 				struct inet_sock *inet = inet_sk(sk);
 
 				if (!net_eq(sock_net(sk), net))
@@ -1061,7 +1079,9 @@
 	}
 
 	attr = handler->idiag_info_size
-		? nla_reserve(skb, INET_DIAG_INFO, handler->idiag_info_size)
+		? nla_reserve_64bit(skb, INET_DIAG_INFO,
+				    handler->idiag_info_size,
+				    INET_DIAG_PAD)
 		: NULL;
 	if (attr)
 		info = nla_data(attr);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index bc68ece..77c20a4 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -198,13 +198,13 @@
 }
 
 /*
- * Don't inline this cruft. Here are some nice properties to exploit here. The
- * BSD API does not allow a listening sock to specify the remote port nor the
+ * Here are some nice properties to exploit here. The BSD API
+ * does not allow a listening sock to specify the remote port nor the
  * remote address for the connection. So always assume those are both
  * wildcarded during the search since they can never be otherwise.
  */
 
-
+/* called with rcu_read_lock() : No refcount taken on the socket */
 struct sock *__inet_lookup_listener(struct net *net,
 				    struct inet_hashinfo *hashinfo,
 				    struct sk_buff *skb, int doff,
@@ -212,38 +212,27 @@
 				    const __be32 daddr, const unsigned short hnum,
 				    const int dif)
 {
-	struct sock *sk, *result;
-	struct hlist_nulls_node *node;
 	unsigned int hash = inet_lhashfn(net, hnum);
 	struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
-	int score, hiscore, matches = 0, reuseport = 0;
-	bool select_ok = true;
+	int score, hiscore = 0, matches = 0, reuseport = 0;
+	struct sock *sk, *result = NULL;
 	u32 phash = 0;
 
-	rcu_read_lock();
-begin:
-	result = NULL;
-	hiscore = 0;
-	sk_nulls_for_each_rcu(sk, node, &ilb->head) {
+	sk_for_each_rcu(sk, &ilb->head) {
 		score = compute_score(sk, net, hnum, daddr, dif);
 		if (score > hiscore) {
-			result = sk;
-			hiscore = score;
 			reuseport = sk->sk_reuseport;
 			if (reuseport) {
 				phash = inet_ehashfn(net, daddr, hnum,
 						     saddr, sport);
-				if (select_ok) {
-					struct sock *sk2;
-					sk2 = reuseport_select_sock(sk, phash,
-								    skb, doff);
-					if (sk2) {
-						result = sk2;
-						goto found;
-					}
-				}
+				result = reuseport_select_sock(sk, phash,
+							       skb, doff);
+				if (result)
+					return result;
 				matches = 1;
 			}
+			result = sk;
+			hiscore = score;
 		} else if (score == hiscore && reuseport) {
 			matches++;
 			if (reciprocal_scale(phash, matches) == 0)
@@ -251,25 +240,6 @@
 			phash = next_pseudo_random32(phash);
 		}
 	}
-	/*
-	 * if the nulls value we got at the end of this lookup is
-	 * not the expected one, we must restart lookup.
-	 * We probably met an item that was moved to another chain.
-	 */
-	if (get_nulls_value(node) != hash + LISTENING_NULLS_BASE)
-		goto begin;
-	if (result) {
-found:
-		if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
-			result = NULL;
-		else if (unlikely(compute_score(result, net, hnum, daddr,
-				  dif) < hiscore)) {
-			sock_put(result);
-			select_ok = false;
-			goto begin;
-		}
-	}
-	rcu_read_unlock();
 	return result;
 }
 EXPORT_SYMBOL_GPL(__inet_lookup_listener);
@@ -312,7 +282,6 @@
 	unsigned int slot = hash & hashinfo->ehash_mask;
 	struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
 
-	rcu_read_lock();
 begin:
 	sk_nulls_for_each_rcu(sk, node, &head->chain) {
 		if (sk->sk_hash != hash)
@@ -339,7 +308,6 @@
 out:
 	sk = NULL;
 found:
-	rcu_read_unlock();
 	return sk;
 }
 EXPORT_SYMBOL_GPL(__inet_lookup_established);
@@ -392,7 +360,7 @@
 	__sk_nulls_add_node_rcu(sk, &head->chain);
 	if (tw) {
 		sk_nulls_del_node_init_rcu((struct sock *)tw);
-		NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
+		__NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED);
 	}
 	spin_unlock(lock);
 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
@@ -470,15 +438,16 @@
 						     const struct sock *sk2,
 						     bool match_wildcard))
 {
+	struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
 	struct sock *sk2;
-	struct hlist_nulls_node *node;
 	kuid_t uid = sock_i_uid(sk);
 
-	sk_nulls_for_each_rcu(sk2, node, &ilb->head) {
+	sk_for_each_rcu(sk2, &ilb->head) {
 		if (sk2 != sk &&
 		    sk2->sk_family == sk->sk_family &&
 		    ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
 		    sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
+		    inet_csk(sk2)->icsk_bind_hash == tb &&
 		    sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
 		    saddr_same(sk, sk2, false))
 			return reuseport_add_sock(sk, sk2);
@@ -512,7 +481,12 @@
 		if (err)
 			goto unlock;
 	}
-	__sk_nulls_add_node_rcu(sk, &ilb->head);
+	if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
+		sk->sk_family == AF_INET6)
+		hlist_add_tail_rcu(&sk->sk_node, &ilb->head);
+	else
+		hlist_add_head_rcu(&sk->sk_node, &ilb->head);
+	sock_set_flag(sk, SOCK_RCU_FREE);
 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 unlock:
 	spin_unlock(&ilb->lock);
@@ -539,20 +513,25 @@
 {
 	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
 	spinlock_t *lock;
+	bool listener = false;
 	int done;
 
 	if (sk_unhashed(sk))
 		return;
 
-	if (sk->sk_state == TCP_LISTEN)
+	if (sk->sk_state == TCP_LISTEN) {
 		lock = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)].lock;
-	else
+		listener = true;
+	} else {
 		lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
-
+	}
 	spin_lock_bh(lock);
 	if (rcu_access_pointer(sk->sk_reuseport_cb))
 		reuseport_detach_sock(sk);
-	done = __sk_nulls_del_node_init_rcu(sk);
+	if (listener)
+		done = __sk_del_node_init(sk);
+	else
+		done = __sk_nulls_del_node_init_rcu(sk);
 	if (done)
 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 	spin_unlock_bh(lock);
@@ -688,9 +667,8 @@
 
 	for (i = 0; i < INET_LHTABLE_SIZE; i++) {
 		spin_lock_init(&h->listening_hash[i].lock);
-		INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head,
-				      i + LISTENING_NULLS_BASE);
-		}
+		INIT_HLIST_HEAD(&h->listening_hash[i].head);
+	}
 }
 EXPORT_SYMBOL_GPL(inet_hashinfo_init);
 
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index c67f9bd..2065816 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -94,7 +94,7 @@
 }
 
 /*
- * Enter the time wait state. This is called with locally disabled BH.
+ * Enter the time wait state.
  * Essentially we whip up a timewait bucket, copy the relevant info into it
  * from the SK, and mess with hash chains and list linkage.
  */
@@ -112,7 +112,7 @@
 	 */
 	bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
 			hashinfo->bhash_size)];
-	spin_lock(&bhead->lock);
+	spin_lock_bh(&bhead->lock);
 	tw->tw_tb = icsk->icsk_bind_hash;
 	WARN_ON(!icsk->icsk_bind_hash);
 	inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
@@ -138,7 +138,7 @@
 	if (__sk_nulls_del_node_init_rcu(sk))
 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 
-	spin_unlock(lock);
+	spin_unlock_bh(lock);
 }
 EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
 
@@ -147,9 +147,9 @@
 	struct inet_timewait_sock *tw = (struct inet_timewait_sock *)data;
 
 	if (tw->tw_kill)
-		NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
+		__NET_INC_STATS(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
 	else
-		NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED);
+		__NET_INC_STATS(twsk_net(tw), LINUX_MIB_TIMEWAITED);
 	inet_twsk_kill(tw);
 }
 
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index af18f1e..cbfb180 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -65,8 +65,8 @@
 {
 	struct ip_options *opt	= &(IPCB(skb)->opt);
 
-	IP_INC_STATS_BH(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
-	IP_ADD_STATS_BH(net, IPSTATS_MIB_OUTOCTETS, skb->len);
+	__IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
+	__IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
 
 	if (unlikely(opt->optlen))
 		ip_forward_options(skb);
@@ -157,7 +157,7 @@
 
 too_many_hops:
 	/* Tell the sender its packet died... */
-	IP_INC_STATS_BH(net, IPSTATS_MIB_INHDRERRORS);
+	__IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
 	icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
 drop:
 	kfree_skb(skb);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index efbd47d..bbe7f72 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -204,14 +204,14 @@
 		goto out;
 
 	ipq_kill(qp);
-	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
+	__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
 
 	if (!inet_frag_evicting(&qp->q)) {
 		struct sk_buff *head = qp->q.fragments;
 		const struct iphdr *iph;
 		int err;
 
-		IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT);
+		__IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
 
 		if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments)
 			goto out;
@@ -291,7 +291,7 @@
 		struct net *net;
 
 		net = container_of(qp->q.net, struct net, ipv4.frags);
-		IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
+		__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
 	}
 
 	return rc;
@@ -635,7 +635,7 @@
 
 	ip_send_check(iph);
 
-	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
+	__IP_INC_STATS(net, IPSTATS_MIB_REASMOKS);
 	qp->q.fragments = NULL;
 	qp->q.fragments_tail = NULL;
 	return 0;
@@ -647,7 +647,7 @@
 out_oversize:
 	net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
 out_fail:
-	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
+	__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
 	return err;
 }
 
@@ -658,7 +658,7 @@
 	int vif = l3mdev_master_ifindex_rcu(dev);
 	struct ipq *qp;
 
-	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
+	__IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS);
 	skb_orphan(skb);
 
 	/* Lookup (or create) queue header */
@@ -675,7 +675,7 @@
 		return ret;
 	}
 
-	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
+	__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
 	kfree_skb(skb);
 	return -ENOMEM;
 }
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 31936d3..4d2025f 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -122,125 +122,6 @@
 static int ipgre_net_id __read_mostly;
 static int gre_tap_net_id __read_mostly;
 
-static int ip_gre_calc_hlen(__be16 o_flags)
-{
-	int addend = 4;
-
-	if (o_flags & TUNNEL_CSUM)
-		addend += 4;
-	if (o_flags & TUNNEL_KEY)
-		addend += 4;
-	if (o_flags & TUNNEL_SEQ)
-		addend += 4;
-	return addend;
-}
-
-static __be16 gre_flags_to_tnl_flags(__be16 flags)
-{
-	__be16 tflags = 0;
-
-	if (flags & GRE_CSUM)
-		tflags |= TUNNEL_CSUM;
-	if (flags & GRE_ROUTING)
-		tflags |= TUNNEL_ROUTING;
-	if (flags & GRE_KEY)
-		tflags |= TUNNEL_KEY;
-	if (flags & GRE_SEQ)
-		tflags |= TUNNEL_SEQ;
-	if (flags & GRE_STRICT)
-		tflags |= TUNNEL_STRICT;
-	if (flags & GRE_REC)
-		tflags |= TUNNEL_REC;
-	if (flags & GRE_VERSION)
-		tflags |= TUNNEL_VERSION;
-
-	return tflags;
-}
-
-static __be16 tnl_flags_to_gre_flags(__be16 tflags)
-{
-	__be16 flags = 0;
-
-	if (tflags & TUNNEL_CSUM)
-		flags |= GRE_CSUM;
-	if (tflags & TUNNEL_ROUTING)
-		flags |= GRE_ROUTING;
-	if (tflags & TUNNEL_KEY)
-		flags |= GRE_KEY;
-	if (tflags & TUNNEL_SEQ)
-		flags |= GRE_SEQ;
-	if (tflags & TUNNEL_STRICT)
-		flags |= GRE_STRICT;
-	if (tflags & TUNNEL_REC)
-		flags |= GRE_REC;
-	if (tflags & TUNNEL_VERSION)
-		flags |= GRE_VERSION;
-
-	return flags;
-}
-
-static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
-			    bool *csum_err)
-{
-	const struct gre_base_hdr *greh;
-	__be32 *options;
-	int hdr_len;
-
-	if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
-		return -EINVAL;
-
-	greh = (struct gre_base_hdr *)skb_transport_header(skb);
-	if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
-		return -EINVAL;
-
-	tpi->flags = gre_flags_to_tnl_flags(greh->flags);
-	hdr_len = ip_gre_calc_hlen(tpi->flags);
-
-	if (!pskb_may_pull(skb, hdr_len))
-		return -EINVAL;
-
-	greh = (struct gre_base_hdr *)skb_transport_header(skb);
-	tpi->proto = greh->protocol;
-
-	options = (__be32 *)(greh + 1);
-	if (greh->flags & GRE_CSUM) {
-		if (skb_checksum_simple_validate(skb)) {
-			*csum_err = true;
-			return -EINVAL;
-		}
-
-		skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
-					 null_compute_pseudo);
-		options++;
-	}
-
-	if (greh->flags & GRE_KEY) {
-		tpi->key = *options;
-		options++;
-	} else {
-		tpi->key = 0;
-	}
-	if (unlikely(greh->flags & GRE_SEQ)) {
-		tpi->seq = *options;
-		options++;
-	} else {
-		tpi->seq = 0;
-	}
-	/* WCCP version 1 and 2 protocol decoding.
-	 * - Change protocol to IP
-	 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
-	 */
-	if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
-		tpi->proto = htons(ETH_P_IP);
-		if ((*(u8 *)options & 0xF0) != 0x40) {
-			hdr_len += 4;
-			if (!pskb_may_pull(skb, hdr_len))
-				return -EINVAL;
-		}
-	}
-	return iptunnel_pull_header(skb, hdr_len, tpi->proto, false);
-}
-
 static void ipgre_err(struct sk_buff *skb, u32 info,
 		      const struct tnl_ptk_info *tpi)
 {
@@ -341,7 +222,7 @@
 	struct tnl_ptk_info tpi;
 	bool csum_err = false;
 
-	if (parse_gre_header(skb, &tpi, &csum_err)) {
+	if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP)) < 0) {
 		if (!csum_err)		/* ignore csum errors. */
 			return;
 	}
@@ -379,25 +260,26 @@
 #endif
 }
 
-static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
+static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
+		       struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
 {
-	struct net *net = dev_net(skb->dev);
 	struct metadata_dst *tun_dst = NULL;
-	struct ip_tunnel_net *itn;
 	const struct iphdr *iph;
 	struct ip_tunnel *tunnel;
 
-	if (tpi->proto == htons(ETH_P_TEB))
-		itn = net_generic(net, gre_tap_net_id);
-	else
-		itn = net_generic(net, ipgre_net_id);
-
 	iph = ip_hdr(skb);
 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
 				  iph->saddr, iph->daddr, tpi->key);
 
 	if (tunnel) {
-		skb_pop_mac_header(skb);
+		if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
+					   raw_proto, false) < 0)
+			goto drop;
+
+		if (tunnel->dev->type != ARPHRD_NONE)
+			skb_pop_mac_header(skb);
+		else
+			skb_reset_mac_header(skb);
 		if (tunnel->collect_md) {
 			__be16 flags;
 			__be64 tun_id;
@@ -412,13 +294,41 @@
 		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
 		return PACKET_RCVD;
 	}
-	return PACKET_REJECT;
+	return PACKET_NEXT;
+
+drop:
+	kfree_skb(skb);
+	return PACKET_RCVD;
+}
+
+static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
+		     int hdr_len)
+{
+	struct net *net = dev_net(skb->dev);
+	struct ip_tunnel_net *itn;
+	int res;
+
+	if (tpi->proto == htons(ETH_P_TEB))
+		itn = net_generic(net, gre_tap_net_id);
+	else
+		itn = net_generic(net, ipgre_net_id);
+
+	res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
+	if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
+		/* ipgre tunnels in collect metadata mode should receive
+		 * also ETH_P_TEB traffic.
+		 */
+		itn = net_generic(net, ipgre_net_id);
+		res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
+	}
+	return res;
 }
 
 static int gre_rcv(struct sk_buff *skb)
 {
 	struct tnl_ptk_info tpi;
 	bool csum_err = false;
+	int hdr_len;
 
 #ifdef CONFIG_NET_IPGRE_BROADCAST
 	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
@@ -428,10 +338,11 @@
 	}
 #endif
 
-	if (parse_gre_header(skb, &tpi, &csum_err) < 0)
+	hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP));
+	if (hdr_len < 0)
 		goto drop;
 
-	if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
+	if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
 		return 0;
 
 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
@@ -440,49 +351,6 @@
 	return 0;
 }
 
-static __sum16 gre_checksum(struct sk_buff *skb)
-{
-	__wsum csum;
-
-	if (skb->ip_summed == CHECKSUM_PARTIAL)
-		csum = lco_csum(skb);
-	else
-		csum = skb_checksum(skb, 0, skb->len, 0);
-	return csum_fold(csum);
-}
-
-static void build_header(struct sk_buff *skb, int hdr_len, __be16 flags,
-			 __be16 proto, __be32 key, __be32 seq)
-{
-	struct gre_base_hdr *greh;
-
-	skb_push(skb, hdr_len);
-
-	skb_reset_transport_header(skb);
-	greh = (struct gre_base_hdr *)skb->data;
-	greh->flags = tnl_flags_to_gre_flags(flags);
-	greh->protocol = proto;
-
-	if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) {
-		__be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
-
-		if (flags & TUNNEL_SEQ) {
-			*ptr = seq;
-			ptr--;
-		}
-		if (flags & TUNNEL_KEY) {
-			*ptr = key;
-			ptr--;
-		}
-		if (flags & TUNNEL_CSUM &&
-		    !(skb_shinfo(skb)->gso_type &
-		      (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
-			*ptr = 0;
-			*(__sum16 *)ptr = gre_checksum(skb);
-		}
-	}
-}
-
 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
 		       const struct iphdr *tnl_params,
 		       __be16 proto)
@@ -493,15 +361,15 @@
 		tunnel->o_seqno++;
 
 	/* Push GRE header. */
-	build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
-		     proto, tunnel->parms.o_key, htonl(tunnel->o_seqno));
+	gre_build_header(skb, tunnel->tun_hlen,
+			 tunnel->parms.o_flags, proto, tunnel->parms.o_key,
+			 htonl(tunnel->o_seqno));
 
 	skb_set_inner_protocol(skb, proto);
 	ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
 }
 
-static struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
-					   bool csum)
+static int gre_handle_offloads(struct sk_buff *skb, bool csum)
 {
 	return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
 }
@@ -523,7 +391,8 @@
 	return ip_route_output_key(net, fl);
 }
 
-static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
+static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
+			__be16 proto)
 {
 	struct ip_tunnel_info *tun_info;
 	const struct ip_tunnel_key *key;
@@ -553,7 +422,7 @@
 					  fl.saddr);
 	}
 
-	tunnel_hlen = ip_gre_calc_hlen(key->tun_flags);
+	tunnel_hlen = gre_calc_hlen(key->tun_flags);
 
 	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
 			+ tunnel_hlen + sizeof(struct iphdr);
@@ -568,15 +437,12 @@
 	}
 
 	/* Push Tunnel header. */
-	skb = gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM));
-	if (IS_ERR(skb)) {
-		skb = NULL;
+	if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
 		goto err_free_rt;
-	}
 
 	flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
-	build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB),
-		     tunnel_id_to_key(tun_info->key.tun_id), 0);
+	gre_build_header(skb, tunnel_hlen, flags, proto,
+			 tunnel_id_to_key(tun_info->key.tun_id), 0);
 
 	df = key->tun_flags & TUNNEL_DONT_FRAGMENT ?  htons(IP_DF) : 0;
 
@@ -616,7 +482,7 @@
 	const struct iphdr *tnl_params;
 
 	if (tunnel->collect_md) {
-		gre_fb_xmit(skb, dev);
+		gre_fb_xmit(skb, dev, skb->protocol);
 		return NETDEV_TX_OK;
 	}
 
@@ -640,16 +506,14 @@
 		tnl_params = &tunnel->parms.iph;
 	}
 
-	skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
-	if (IS_ERR(skb))
-		goto out;
+	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
+		goto free_skb;
 
 	__gre_xmit(skb, dev, tnl_params, skb->protocol);
 	return NETDEV_TX_OK;
 
 free_skb:
 	kfree_skb(skb);
-out:
 	dev->stats.tx_dropped++;
 	return NETDEV_TX_OK;
 }
@@ -660,13 +524,12 @@
 	struct ip_tunnel *tunnel = netdev_priv(dev);
 
 	if (tunnel->collect_md) {
-		gre_fb_xmit(skb, dev);
+		gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
 		return NETDEV_TX_OK;
 	}
 
-	skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
-	if (IS_ERR(skb))
-		goto out;
+	if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
+		goto free_skb;
 
 	if (skb_cow_head(skb, dev->needed_headroom))
 		goto free_skb;
@@ -676,7 +539,6 @@
 
 free_skb:
 	kfree_skb(skb);
-out:
 	dev->stats.tx_dropped++;
 	return NETDEV_TX_OK;
 }
@@ -702,8 +564,8 @@
 	if (err)
 		return err;
 
-	p.i_flags = tnl_flags_to_gre_flags(p.i_flags);
-	p.o_flags = tnl_flags_to_gre_flags(p.o_flags);
+	p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
+	p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
 
 	if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
 		return -EFAULT;
@@ -747,7 +609,7 @@
 
 	iph = (struct iphdr *)skb_push(skb, t->hlen + sizeof(*iph));
 	greh = (struct gre_base_hdr *)(iph+1);
-	greh->flags = tnl_flags_to_gre_flags(t->parms.o_flags);
+	greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
 	greh->protocol = htons(type);
 
 	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
@@ -848,7 +710,7 @@
 	int t_hlen;
 
 	tunnel = netdev_priv(dev);
-	tunnel->tun_hlen = ip_gre_calc_hlen(tunnel->parms.o_flags);
+	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
 	tunnel->parms.iph.protocol = IPPROTO_GRE;
 
 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
@@ -862,9 +724,16 @@
 	dev->hw_features	|= GRE_FEATURES;
 
 	if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
-		/* TCP offload with GRE SEQ is not supported. */
-		dev->features    |= NETIF_F_GSO_SOFTWARE;
-		dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+		/* TCP offload with GRE SEQ is not supported, nor
+		 * can we support 2 levels of outer headers requiring
+		 * an update.
+		 */
+		if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
+		    (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
+			dev->features    |= NETIF_F_GSO_SOFTWARE;
+			dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+		}
+
 		/* Can use a lockless transmit, unless we generate
 		 * output sequences
 		 */
@@ -886,7 +755,7 @@
 	netif_keep_dst(dev);
 	dev->addr_len		= 4;
 
-	if (iph->daddr) {
+	if (iph->daddr && !tunnel->collect_md) {
 #ifdef CONFIG_NET_IPGRE_BROADCAST
 		if (ipv4_is_multicast(iph->daddr)) {
 			if (!iph->saddr)
@@ -895,8 +764,9 @@
 			dev->header_ops = &ipgre_header_ops;
 		}
 #endif
-	} else
+	} else if (!tunnel->collect_md) {
 		dev->header_ops = &ipgre_header_ops;
+	}
 
 	return ip_tunnel_init(dev);
 }
@@ -939,6 +809,11 @@
 	if (flags & (GRE_VERSION|GRE_ROUTING))
 		return -EINVAL;
 
+	if (data[IFLA_GRE_COLLECT_METADATA] &&
+	    data[IFLA_GRE_ENCAP_TYPE] &&
+	    nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
+		return -EINVAL;
+
 	return 0;
 }
 
@@ -1012,6 +887,8 @@
 		struct ip_tunnel *t = netdev_priv(dev);
 
 		t->collect_md = true;
+		if (dev->type == ARPHRD_IPGRE)
+			dev->type = ARPHRD_NONE;
 	}
 }
 
@@ -1156,8 +1033,10 @@
 	struct ip_tunnel_parm *p = &t->parms;
 
 	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
-	    nla_put_be16(skb, IFLA_GRE_IFLAGS, tnl_flags_to_gre_flags(p->i_flags)) ||
-	    nla_put_be16(skb, IFLA_GRE_OFLAGS, tnl_flags_to_gre_flags(p->o_flags)) ||
+	    nla_put_be16(skb, IFLA_GRE_IFLAGS,
+			 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
+	    nla_put_be16(skb, IFLA_GRE_OFLAGS,
+			 gre_tnl_flags_to_gre_flags(p->o_flags)) ||
 	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
 	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
 	    nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index e3d7827..4b351af 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -218,17 +218,17 @@
 				protocol = -ret;
 				goto resubmit;
 			}
-			IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
+			__IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
 		} else {
 			if (!raw) {
 				if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
-					IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
+					__IP_INC_STATS(net, IPSTATS_MIB_INUNKNOWNPROTOS);
 					icmp_send(skb, ICMP_DEST_UNREACH,
 						  ICMP_PROT_UNREACH, 0);
 				}
 				kfree_skb(skb);
 			} else {
-				IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
+				__IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
 				consume_skb(skb);
 			}
 		}
@@ -273,7 +273,7 @@
 					      --ANK (980813)
 	*/
 	if (skb_cow(skb, skb_headroom(skb))) {
-		IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
+		__IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INDISCARDS);
 		goto drop;
 	}
 
@@ -282,7 +282,7 @@
 	opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
 
 	if (ip_options_compile(dev_net(dev), opt, skb)) {
-		IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
+		__IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
 		goto drop;
 	}
 
@@ -313,6 +313,13 @@
 	const struct iphdr *iph = ip_hdr(skb);
 	struct rtable *rt;
 
+	/* if ingress device is enslaved to an L3 master device pass the
+	 * skb to its handler for processing
+	 */
+	skb = l3mdev_ip_rcv(skb);
+	if (!skb)
+		return NET_RX_SUCCESS;
+
 	if (net->ipv4.sysctl_ip_early_demux &&
 	    !skb_dst(skb) &&
 	    !skb->sk &&
@@ -337,7 +344,7 @@
 					       iph->tos, skb->dev);
 		if (unlikely(err)) {
 			if (err == -EXDEV)
-				NET_INC_STATS_BH(net, LINUX_MIB_IPRPFILTER);
+				__NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
 			goto drop;
 		}
 	}
@@ -358,9 +365,9 @@
 
 	rt = skb_rtable(skb);
 	if (rt->rt_type == RTN_MULTICAST) {
-		IP_UPD_PO_STATS_BH(net, IPSTATS_MIB_INMCAST, skb->len);
+		__IP_UPD_PO_STATS(net, IPSTATS_MIB_INMCAST, skb->len);
 	} else if (rt->rt_type == RTN_BROADCAST) {
-		IP_UPD_PO_STATS_BH(net, IPSTATS_MIB_INBCAST, skb->len);
+		__IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len);
 	} else if (skb->pkt_type == PACKET_BROADCAST ||
 		   skb->pkt_type == PACKET_MULTICAST) {
 		struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
@@ -409,11 +416,11 @@
 
 
 	net = dev_net(dev);
-	IP_UPD_PO_STATS_BH(net, IPSTATS_MIB_IN, skb->len);
+	__IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len);
 
 	skb = skb_share_check(skb, GFP_ATOMIC);
 	if (!skb) {
-		IP_INC_STATS_BH(net, IPSTATS_MIB_INDISCARDS);
+		__IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
 		goto out;
 	}
 
@@ -439,9 +446,9 @@
 	BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1);
 	BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0);
 	BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE);
-	IP_ADD_STATS_BH(net,
-			IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK),
-			max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
+	__IP_ADD_STATS(net,
+		       IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK),
+		       max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
 
 	if (!pskb_may_pull(skb, iph->ihl*4))
 		goto inhdr_error;
@@ -453,7 +460,7 @@
 
 	len = ntohs(iph->tot_len);
 	if (skb->len < len) {
-		IP_INC_STATS_BH(net, IPSTATS_MIB_INTRUNCATEDPKTS);
+		__IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
 		goto drop;
 	} else if (len < (iph->ihl*4))
 		goto inhdr_error;
@@ -463,7 +470,7 @@
 	 * Note this now means skb->len holds ntohs(iph->tot_len).
 	 */
 	if (pskb_trim_rcsum(skb, len)) {
-		IP_INC_STATS_BH(net, IPSTATS_MIB_INDISCARDS);
+		__IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
 		goto drop;
 	}
 
@@ -471,6 +478,7 @@
 
 	/* Remove any debris in the socket control block */
 	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+	IPCB(skb)->iif = skb->skb_iif;
 
 	/* Must drop socket now because of tproxy. */
 	skb_orphan(skb);
@@ -480,9 +488,9 @@
 		       ip_rcv_finish);
 
 csum_error:
-	IP_INC_STATS_BH(net, IPSTATS_MIB_CSUMERRORS);
+	__IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
 inhdr_error:
-	IP_INC_STATS_BH(net, IPSTATS_MIB_INHDRERRORS);
+	__IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
 drop:
 	kfree_skb(skb);
 out:
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 035ad64..71a52f4d 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -106,7 +106,8 @@
 		return;
 
 	if (offset != 0)
-		csum = csum_sub(csum, csum_partial(skb->data, offset, 0));
+		csum = csum_sub(csum, csum_partial(skb_transport_header(skb),
+						   offset, 0));
 
 	put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
 }
@@ -219,11 +220,12 @@
 }
 EXPORT_SYMBOL(ip_cmsg_recv_offset);
 
-int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
+int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
 		 bool allow_ipv6)
 {
 	int err, val;
 	struct cmsghdr *cmsg;
+	struct net *net = sock_net(sk);
 
 	for_each_cmsghdr(cmsg, msg) {
 		if (!CMSG_OK(msg, cmsg))
@@ -244,6 +246,13 @@
 			continue;
 		}
 #endif
+		if (cmsg->cmsg_level == SOL_SOCKET) {
+			err = __sock_cmsg_send(sk, msg, cmsg, &ipc->sockc);
+			if (err)
+				return err;
+			continue;
+		}
+
 		if (cmsg->cmsg_level != SOL_IP)
 			continue;
 		switch (cmsg->cmsg_type) {
@@ -502,9 +511,10 @@
 		copied = len;
 	}
 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
-	if (err)
-		goto out_free_skb;
-
+	if (unlikely(err)) {
+		kfree_skb(skb);
+		return err;
+	}
 	sock_recv_timestamp(msg, sk, skb);
 
 	serr = SKB_EXT_ERR(skb);
@@ -536,8 +546,7 @@
 	msg->msg_flags |= MSG_ERRQUEUE;
 	err = copied;
 
-out_free_skb:
-	kfree_skb(skb);
+	consume_skb(skb);
 out:
 	return err;
 }
@@ -635,7 +644,7 @@
 		if (err)
 			break;
 		old = rcu_dereference_protected(inet->inet_opt,
-						sock_owned_by_user(sk));
+						lockdep_sock_is_held(sk));
 		if (inet->is_icsk) {
 			struct inet_connection_sock *icsk = inet_csk(sk);
 #if IS_ENABLED(CONFIG_IPV6)
@@ -1185,7 +1194,12 @@
 		       ipv6_sk_rxinfo(sk);
 
 	if (prepare && skb_rtable(skb)) {
-		pktinfo->ipi_ifindex = inet_iif(skb);
+		/* skb->cb is overloaded: prior to this point it is IP{6}CB
+		 * which has interface index (iif) as the first member of the
+		 * underlying inet{6}_skb_parm struct. This code then overlays
+		 * PKTINFO_SKB_CB and in_pktinfo also has iif as the first
+		 * element so the iif is picked up from the prior IPCB
+		 */
 		pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
 	} else {
 		pktinfo->ipi_ifindex = 0;
@@ -1295,7 +1309,7 @@
 		struct ip_options_rcu *inet_opt;
 
 		inet_opt = rcu_dereference_protected(inet->inet_opt,
-						     sock_owned_by_user(sk));
+						     lockdep_sock_is_held(sk));
 		opt->optlen = 0;
 		if (inet_opt)
 			memcpy(optbuf, &inet_opt->opt,
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 6aad019..a69ed94 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -326,12 +326,12 @@
 
 		if (!IS_ERR(rt)) {
 			tdev = rt->dst.dev;
-			dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst,
-					  fl4.saddr);
 			ip_rt_put(rt);
 		}
 		if (dev->type != ARPHRD_ETHER)
 			dev->flags |= IFF_POINTOPOINT;
+
+		dst_cache_reset(&tunnel->dst_cache);
 	}
 
 	if (!tdev && tunnel->parms.link)
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 02dd990..9118b0e 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -86,15 +86,15 @@
 }
 EXPORT_SYMBOL_GPL(iptunnel_xmit);
 
-int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto,
-			 bool xnet)
+int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
+			   __be16 inner_proto, bool raw_proto, bool xnet)
 {
 	if (unlikely(!pskb_may_pull(skb, hdr_len)))
 		return -ENOMEM;
 
 	skb_pull_rcsum(skb, hdr_len);
 
-	if (inner_proto == htons(ETH_P_TEB)) {
+	if (!raw_proto && inner_proto == htons(ETH_P_TEB)) {
 		struct ethhdr *eh;
 
 		if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
@@ -117,7 +117,7 @@
 
 	return iptunnel_pull_offloads(skb);
 }
-EXPORT_SYMBOL_GPL(iptunnel_pull_header);
+EXPORT_SYMBOL_GPL(__iptunnel_pull_header);
 
 struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
 					     gfp_t flags)
@@ -146,8 +146,8 @@
 }
 EXPORT_SYMBOL_GPL(iptunnel_metadata_reply);
 
-struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb,
-					 int gso_type_mask)
+int iptunnel_handle_offloads(struct sk_buff *skb,
+			     int gso_type_mask)
 {
 	int err;
 
@@ -157,11 +157,11 @@
 	}
 
 	if (skb_is_gso(skb)) {
-		err = skb_unclone(skb, GFP_ATOMIC);
+		err = skb_header_unclone(skb, GFP_ATOMIC);
 		if (unlikely(err))
-			goto error;
+			return err;
 		skb_shinfo(skb)->gso_type |= gso_type_mask;
-		return skb;
+		return 0;
 	}
 
 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
@@ -174,10 +174,7 @@
 		skb->encapsulation = 0;
 	}
 
-	return skb;
-error:
-	kfree_skb(skb);
-	return ERR_PTR(err);
+	return 0;
 }
 EXPORT_SYMBOL_GPL(iptunnel_handle_offloads);
 
@@ -247,10 +244,10 @@
 		tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP_ID]);
 
 	if (tb[LWTUNNEL_IP_DST])
-		tun_info->key.u.ipv4.dst = nla_get_be32(tb[LWTUNNEL_IP_DST]);
+		tun_info->key.u.ipv4.dst = nla_get_in_addr(tb[LWTUNNEL_IP_DST]);
 
 	if (tb[LWTUNNEL_IP_SRC])
-		tun_info->key.u.ipv4.src = nla_get_be32(tb[LWTUNNEL_IP_SRC]);
+		tun_info->key.u.ipv4.src = nla_get_in_addr(tb[LWTUNNEL_IP_SRC]);
 
 	if (tb[LWTUNNEL_IP_TTL])
 		tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP_TTL]);
@@ -274,9 +271,10 @@
 {
 	struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
 
-	if (nla_put_be64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id) ||
-	    nla_put_be32(skb, LWTUNNEL_IP_DST, tun_info->key.u.ipv4.dst) ||
-	    nla_put_be32(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) ||
+	if (nla_put_be64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id,
+			 LWTUNNEL_IP_PAD) ||
+	    nla_put_in_addr(skb, LWTUNNEL_IP_DST, tun_info->key.u.ipv4.dst) ||
+	    nla_put_in_addr(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) ||
 	    nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) ||
 	    nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) ||
 	    nla_put_be16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags))
@@ -287,7 +285,7 @@
 
 static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
 {
-	return nla_total_size(8)	/* LWTUNNEL_IP_ID */
+	return nla_total_size_64bit(8)	/* LWTUNNEL_IP_ID */
 		+ nla_total_size(4)	/* LWTUNNEL_IP_DST */
 		+ nla_total_size(4)	/* LWTUNNEL_IP_SRC */
 		+ nla_total_size(1)	/* LWTUNNEL_IP_TOS */
@@ -369,11 +367,12 @@
 {
 	struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
 
-	if (nla_put_be64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id) ||
+	if (nla_put_be64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id,
+			 LWTUNNEL_IP6_PAD) ||
 	    nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) ||
 	    nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) ||
-	    nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.tos) ||
-	    nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.ttl) ||
+	    nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.tos) ||
+	    nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.ttl) ||
 	    nla_put_be16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags))
 		return -ENOMEM;
 
@@ -382,7 +381,7 @@
 
 static int ip6_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
 {
-	return nla_total_size(8)	/* LWTUNNEL_IP6_ID */
+	return nla_total_size_64bit(8)	/* LWTUNNEL_IP6_ID */
 		+ nla_total_size(16)	/* LWTUNNEL_IP6_DST */
 		+ nla_total_size(16)	/* LWTUNNEL_IP6_SRC */
 		+ nla_total_size(1)	/* LWTUNNEL_IP6_HOPLIMIT */
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 5cf10b7..a917903 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -156,6 +156,7 @@
 	struct dst_entry *dst = skb_dst(skb);
 	struct net_device *tdev;	/* Device to other host */
 	int err;
+	int mtu;
 
 	if (!dst) {
 		dev->stats.tx_carrier_errors++;
@@ -192,6 +193,23 @@
 			tunnel->err_count = 0;
 	}
 
+	mtu = dst_mtu(dst);
+	if (skb->len > mtu) {
+		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+		if (skb->protocol == htons(ETH_P_IP)) {
+			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+				  htonl(mtu));
+		} else {
+			if (mtu < IPV6_MIN_MTU)
+				mtu = IPV6_MIN_MTU;
+
+			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+		}
+
+		dst_release(dst);
+		goto tx_error;
+	}
+
 	skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
 	skb_dst_set(skb, dst);
 	skb->dev = skb_dst(skb)->dev;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index ec51d02..9282748 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -219,9 +219,8 @@
 	if (unlikely(skb->protocol != htons(ETH_P_IP)))
 		goto tx_error;
 
-	skb = iptunnel_handle_offloads(skb, SKB_GSO_IPIP);
-	if (IS_ERR(skb))
-		goto out;
+	if (iptunnel_handle_offloads(skb, SKB_GSO_IPIP))
+		goto tx_error;
 
 	skb_set_inner_ipproto(skb, IPPROTO_IPIP);
 
@@ -230,7 +229,7 @@
 
 tx_error:
 	kfree_skb(skb);
-out:
+
 	dev->stats.tx_errors++;
 	return NETDEV_TX_OK;
 }
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 395e281..21a38e2 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -2104,7 +2104,7 @@
 	mfcs.mfcs_packets = c->mfc_un.res.pkt;
 	mfcs.mfcs_bytes = c->mfc_un.res.bytes;
 	mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
-	if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0)
+	if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) < 0)
 		return -EMSGSIZE;
 
 	rtm->rtm_type = RTN_MULTICAST;
@@ -2237,7 +2237,7 @@
 		      + nla_total_size(0)	/* RTA_MULTIPATH */
 		      + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
 						/* RTA_MFC_STATS */
-		      + nla_total_size(sizeof(struct rta_mfc_stats))
+		      + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
 		;
 
 	return len;
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index bf08192..2033f92 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -34,27 +34,6 @@
 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
 MODULE_DESCRIPTION("arptables core");
 
-/*#define DEBUG_ARP_TABLES*/
-/*#define DEBUG_ARP_TABLES_USER*/
-
-#ifdef DEBUG_ARP_TABLES
-#define dprintf(format, args...)  pr_debug(format, ## args)
-#else
-#define dprintf(format, args...)
-#endif
-
-#ifdef DEBUG_ARP_TABLES_USER
-#define duprintf(format, args...) pr_debug(format, ## args)
-#else
-#define duprintf(format, args...)
-#endif
-
-#ifdef CONFIG_NETFILTER_DEBUG
-#define ARP_NF_ASSERT(x)	WARN_ON(!(x))
-#else
-#define ARP_NF_ASSERT(x)
-#endif
-
 void *arpt_alloc_initial_table(const struct xt_table *info)
 {
 	return xt_alloc_initial_table(arpt, ARPT);
@@ -113,36 +92,20 @@
 #define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg)))
 
 	if (FWINV((arphdr->ar_op & arpinfo->arpop_mask) != arpinfo->arpop,
-		  ARPT_INV_ARPOP)) {
-		dprintf("ARP operation field mismatch.\n");
-		dprintf("ar_op: %04x info->arpop: %04x info->arpop_mask: %04x\n",
-			arphdr->ar_op, arpinfo->arpop, arpinfo->arpop_mask);
+		  ARPT_INV_ARPOP))
 		return 0;
-	}
 
 	if (FWINV((arphdr->ar_hrd & arpinfo->arhrd_mask) != arpinfo->arhrd,
-		  ARPT_INV_ARPHRD)) {
-		dprintf("ARP hardware address format mismatch.\n");
-		dprintf("ar_hrd: %04x info->arhrd: %04x info->arhrd_mask: %04x\n",
-			arphdr->ar_hrd, arpinfo->arhrd, arpinfo->arhrd_mask);
+		  ARPT_INV_ARPHRD))
 		return 0;
-	}
 
 	if (FWINV((arphdr->ar_pro & arpinfo->arpro_mask) != arpinfo->arpro,
-		  ARPT_INV_ARPPRO)) {
-		dprintf("ARP protocol address format mismatch.\n");
-		dprintf("ar_pro: %04x info->arpro: %04x info->arpro_mask: %04x\n",
-			arphdr->ar_pro, arpinfo->arpro, arpinfo->arpro_mask);
+		  ARPT_INV_ARPPRO))
 		return 0;
-	}
 
 	if (FWINV((arphdr->ar_hln & arpinfo->arhln_mask) != arpinfo->arhln,
-		  ARPT_INV_ARPHLN)) {
-		dprintf("ARP hardware address length mismatch.\n");
-		dprintf("ar_hln: %02x info->arhln: %02x info->arhln_mask: %02x\n",
-			arphdr->ar_hln, arpinfo->arhln, arpinfo->arhln_mask);
+		  ARPT_INV_ARPHLN))
 		return 0;
-	}
 
 	src_devaddr = arpptr;
 	arpptr += dev->addr_len;
@@ -155,49 +118,25 @@
 	if (FWINV(arp_devaddr_compare(&arpinfo->src_devaddr, src_devaddr, dev->addr_len),
 		  ARPT_INV_SRCDEVADDR) ||
 	    FWINV(arp_devaddr_compare(&arpinfo->tgt_devaddr, tgt_devaddr, dev->addr_len),
-		  ARPT_INV_TGTDEVADDR)) {
-		dprintf("Source or target device address mismatch.\n");
-
+		  ARPT_INV_TGTDEVADDR))
 		return 0;
-	}
 
 	if (FWINV((src_ipaddr & arpinfo->smsk.s_addr) != arpinfo->src.s_addr,
 		  ARPT_INV_SRCIP) ||
 	    FWINV(((tgt_ipaddr & arpinfo->tmsk.s_addr) != arpinfo->tgt.s_addr),
-		  ARPT_INV_TGTIP)) {
-		dprintf("Source or target IP address mismatch.\n");
-
-		dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
-			&src_ipaddr,
-			&arpinfo->smsk.s_addr,
-			&arpinfo->src.s_addr,
-			arpinfo->invflags & ARPT_INV_SRCIP ? " (INV)" : "");
-		dprintf("TGT: %pI4 Mask: %pI4 Target: %pI4.%s\n",
-			&tgt_ipaddr,
-			&arpinfo->tmsk.s_addr,
-			&arpinfo->tgt.s_addr,
-			arpinfo->invflags & ARPT_INV_TGTIP ? " (INV)" : "");
+		  ARPT_INV_TGTIP))
 		return 0;
-	}
 
 	/* Look for ifname matches.  */
 	ret = ifname_compare(indev, arpinfo->iniface, arpinfo->iniface_mask);
 
-	if (FWINV(ret != 0, ARPT_INV_VIA_IN)) {
-		dprintf("VIA in mismatch (%s vs %s).%s\n",
-			indev, arpinfo->iniface,
-			arpinfo->invflags & ARPT_INV_VIA_IN ? " (INV)" : "");
+	if (FWINV(ret != 0, ARPT_INV_VIA_IN))
 		return 0;
-	}
 
 	ret = ifname_compare(outdev, arpinfo->outiface, arpinfo->outiface_mask);
 
-	if (FWINV(ret != 0, ARPT_INV_VIA_OUT)) {
-		dprintf("VIA out mismatch (%s vs %s).%s\n",
-			outdev, arpinfo->outiface,
-			arpinfo->invflags & ARPT_INV_VIA_OUT ? " (INV)" : "");
+	if (FWINV(ret != 0, ARPT_INV_VIA_OUT))
 		return 0;
-	}
 
 	return 1;
 #undef FWINV
@@ -205,16 +144,10 @@
 
 static inline int arp_checkentry(const struct arpt_arp *arp)
 {
-	if (arp->flags & ~ARPT_F_MASK) {
-		duprintf("Unknown flag bits set: %08X\n",
-			 arp->flags & ~ARPT_F_MASK);
+	if (arp->flags & ~ARPT_F_MASK)
 		return 0;
-	}
-	if (arp->invflags & ~ARPT_INV_MASK) {
-		duprintf("Unknown invflag bits set: %08X\n",
-			 arp->invflags & ~ARPT_INV_MASK);
+	if (arp->invflags & ~ARPT_INV_MASK)
 		return 0;
-	}
 
 	return 1;
 }
@@ -359,11 +292,24 @@
 }
 
 /* All zeroes == unconditional rule. */
-static inline bool unconditional(const struct arpt_arp *arp)
+static inline bool unconditional(const struct arpt_entry *e)
 {
 	static const struct arpt_arp uncond;
 
-	return memcmp(arp, &uncond, sizeof(uncond)) == 0;
+	return e->target_offset == sizeof(struct arpt_entry) &&
+	       memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
+}
+
+static bool find_jump_target(const struct xt_table_info *t,
+			     const struct arpt_entry *target)
+{
+	struct arpt_entry *iter;
+
+	xt_entry_foreach(iter, t->entries, t->size) {
+		 if (iter == target)
+			return true;
+	}
+	return false;
 }
 
 /* Figures out from what hook each rule can be called: returns 0 if
@@ -393,30 +339,23 @@
 				= (void *)arpt_get_target_c(e);
 			int visited = e->comefrom & (1 << hook);
 
-			if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) {
-				pr_notice("arptables: loop hook %u pos %u %08X.\n",
-				       hook, pos, e->comefrom);
+			if (e->comefrom & (1 << NF_ARP_NUMHOOKS))
 				return 0;
-			}
+
 			e->comefrom
 				|= ((1 << hook) | (1 << NF_ARP_NUMHOOKS));
 
 			/* Unconditional return/END. */
-			if ((e->target_offset == sizeof(struct arpt_entry) &&
+			if ((unconditional(e) &&
 			     (strcmp(t->target.u.user.name,
 				     XT_STANDARD_TARGET) == 0) &&
-			     t->verdict < 0 && unconditional(&e->arp)) ||
-			    visited) {
+			     t->verdict < 0) || visited) {
 				unsigned int oldpos, size;
 
 				if ((strcmp(t->target.u.user.name,
 					    XT_STANDARD_TARGET) == 0) &&
-				    t->verdict < -NF_MAX_VERDICT - 1) {
-					duprintf("mark_source_chains: bad "
-						"negative verdict (%i)\n",
-								t->verdict);
+				    t->verdict < -NF_MAX_VERDICT - 1)
 					return 0;
-				}
 
 				/* Return: backtrack through the last
 				 * big jump.
@@ -439,6 +378,8 @@
 				size = e->next_offset;
 				e = (struct arpt_entry *)
 					(entry0 + pos + size);
+				if (pos + size >= newinfo->size)
+					return 0;
 				e->counters.pcnt = pos;
 				pos += size;
 			} else {
@@ -447,20 +388,16 @@
 				if (strcmp(t->target.u.user.name,
 					   XT_STANDARD_TARGET) == 0 &&
 				    newpos >= 0) {
-					if (newpos > newinfo->size -
-						sizeof(struct arpt_entry)) {
-						duprintf("mark_source_chains: "
-							"bad verdict (%i)\n",
-								newpos);
-						return 0;
-					}
-
 					/* This a jump; chase it. */
-					duprintf("Jump rule %u -> %u\n",
-						 pos, newpos);
+					e = (struct arpt_entry *)
+						(entry0 + newpos);
+					if (!find_jump_target(newinfo, e))
+						return 0;
 				} else {
 					/* ... this is a fallthru */
 					newpos = pos + e->next_offset;
+					if (newpos >= newinfo->size)
+						return 0;
 				}
 				e = (struct arpt_entry *)
 					(entry0 + newpos);
@@ -468,35 +405,14 @@
 				pos = newpos;
 			}
 		}
-next:
-		duprintf("Finished chain %u\n", hook);
+next:		;
 	}
 	return 1;
 }
 
-static inline int check_entry(const struct arpt_entry *e, const char *name)
-{
-	const struct xt_entry_target *t;
-
-	if (!arp_checkentry(&e->arp)) {
-		duprintf("arp_tables: arp check failed %p %s.\n", e, name);
-		return -EINVAL;
-	}
-
-	if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset)
-		return -EINVAL;
-
-	t = arpt_get_target_c(e);
-	if (e->target_offset + t->u.target_size > e->next_offset)
-		return -EINVAL;
-
-	return 0;
-}
-
 static inline int check_target(struct arpt_entry *e, const char *name)
 {
 	struct xt_entry_target *t = arpt_get_target(e);
-	int ret;
 	struct xt_tgchk_param par = {
 		.table     = name,
 		.entryinfo = e,
@@ -506,13 +422,7 @@
 		.family    = NFPROTO_ARP,
 	};
 
-	ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
-	if (ret < 0) {
-		duprintf("arp_tables: check failed for `%s'.\n",
-			 t->u.kernel.target->name);
-		return ret;
-	}
-	return 0;
+	return xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
 }
 
 static inline int
@@ -520,21 +430,18 @@
 {
 	struct xt_entry_target *t;
 	struct xt_target *target;
+	unsigned long pcnt;
 	int ret;
 
-	ret = check_entry(e, name);
-	if (ret)
-		return ret;
-
-	e->counters.pcnt = xt_percpu_counter_alloc();
-	if (IS_ERR_VALUE(e->counters.pcnt))
+	pcnt = xt_percpu_counter_alloc();
+	if (IS_ERR_VALUE(pcnt))
 		return -ENOMEM;
+	e->counters.pcnt = pcnt;
 
 	t = arpt_get_target(e);
 	target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
 					t->u.user.revision);
 	if (IS_ERR(target)) {
-		duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
 		ret = PTR_ERR(target);
 		goto out;
 	}
@@ -557,7 +464,7 @@
 	const struct xt_entry_target *t;
 	unsigned int verdict;
 
-	if (!unconditional(&e->arp))
+	if (!unconditional(e))
 		return false;
 	t = arpt_get_target_c(e);
 	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
@@ -576,19 +483,24 @@
 					     unsigned int valid_hooks)
 {
 	unsigned int h;
+	int err;
 
 	if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 ||
-	    (unsigned char *)e + sizeof(struct arpt_entry) >= limit) {
-		duprintf("Bad offset %p\n", e);
+	    (unsigned char *)e + sizeof(struct arpt_entry) >= limit ||
+	    (unsigned char *)e + e->next_offset > limit)
 		return -EINVAL;
-	}
 
 	if (e->next_offset
-	    < sizeof(struct arpt_entry) + sizeof(struct xt_entry_target)) {
-		duprintf("checking: element %p size %u\n",
-			 e, e->next_offset);
+	    < sizeof(struct arpt_entry) + sizeof(struct xt_entry_target))
 		return -EINVAL;
-	}
+
+	if (!arp_checkentry(&e->arp))
+		return -EINVAL;
+
+	err = xt_check_entry_offsets(e, e->elems, e->target_offset,
+				     e->next_offset);
+	if (err)
+		return err;
 
 	/* Check hooks & underflows */
 	for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
@@ -597,12 +509,9 @@
 		if ((unsigned char *)e - base == hook_entries[h])
 			newinfo->hook_entry[h] = hook_entries[h];
 		if ((unsigned char *)e - base == underflows[h]) {
-			if (!check_underflow(e)) {
-				pr_err("Underflows must be unconditional and "
-				       "use the STANDARD target with "
-				       "ACCEPT/DROP\n");
+			if (!check_underflow(e))
 				return -EINVAL;
-			}
+
 			newinfo->underflow[h] = underflows[h];
 		}
 	}
@@ -647,7 +556,6 @@
 		newinfo->underflow[i] = 0xFFFFFFFF;
 	}
 
-	duprintf("translate_table: size %u\n", newinfo->size);
 	i = 0;
 
 	/* Walk through entries, checking offsets. */
@@ -664,37 +572,25 @@
 		    XT_ERROR_TARGET) == 0)
 			++newinfo->stacksize;
 	}
-	duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret);
 	if (ret != 0)
 		return ret;
 
-	if (i != repl->num_entries) {
-		duprintf("translate_table: %u not %u entries\n",
-			 i, repl->num_entries);
+	if (i != repl->num_entries)
 		return -EINVAL;
-	}
 
 	/* Check hooks all assigned */
 	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
 		/* Only hooks which are valid */
 		if (!(repl->valid_hooks & (1 << i)))
 			continue;
-		if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
-			duprintf("Invalid hook entry %u %u\n",
-				 i, repl->hook_entry[i]);
+		if (newinfo->hook_entry[i] == 0xFFFFFFFF)
 			return -EINVAL;
-		}
-		if (newinfo->underflow[i] == 0xFFFFFFFF) {
-			duprintf("Invalid underflow %u %u\n",
-				 i, repl->underflow[i]);
+		if (newinfo->underflow[i] == 0xFFFFFFFF)
 			return -EINVAL;
-		}
 	}
 
-	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) {
-		duprintf("Looping hook\n");
+	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
 		return -ELOOP;
-	}
 
 	/* Finally, each sanity check must pass */
 	i = 0;
@@ -898,11 +794,8 @@
 	struct xt_table *t;
 	int ret;
 
-	if (*len != sizeof(struct arpt_getinfo)) {
-		duprintf("length %u != %Zu\n", *len,
-			 sizeof(struct arpt_getinfo));
+	if (*len != sizeof(struct arpt_getinfo))
 		return -EINVAL;
-	}
 
 	if (copy_from_user(name, user, sizeof(name)) != 0)
 		return -EFAULT;
@@ -958,32 +851,25 @@
 	struct arpt_get_entries get;
 	struct xt_table *t;
 
-	if (*len < sizeof(get)) {
-		duprintf("get_entries: %u < %Zu\n", *len, sizeof(get));
+	if (*len < sizeof(get))
 		return -EINVAL;
-	}
 	if (copy_from_user(&get, uptr, sizeof(get)) != 0)
 		return -EFAULT;
-	if (*len != sizeof(struct arpt_get_entries) + get.size) {
-		duprintf("get_entries: %u != %Zu\n", *len,
-			 sizeof(struct arpt_get_entries) + get.size);
+	if (*len != sizeof(struct arpt_get_entries) + get.size)
 		return -EINVAL;
-	}
+
+	get.name[sizeof(get.name) - 1] = '\0';
 
 	t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
 	if (!IS_ERR_OR_NULL(t)) {
 		const struct xt_table_info *private = t->private;
 
-		duprintf("t->private->number = %u\n",
-			 private->number);
 		if (get.size == private->size)
 			ret = copy_entries_to_user(private->size,
 						   t, uptr->entrytable);
-		else {
-			duprintf("get_entries: I've got %u not %u!\n",
-				 private->size, get.size);
+		else
 			ret = -EAGAIN;
-		}
+
 		module_put(t->me);
 		xt_table_unlock(t);
 	} else
@@ -1021,8 +907,6 @@
 
 	/* You lied! */
 	if (valid_hooks != t->valid_hooks) {
-		duprintf("Valid hook crap: %08X vs %08X\n",
-			 valid_hooks, t->valid_hooks);
 		ret = -EINVAL;
 		goto put_module;
 	}
@@ -1032,8 +916,6 @@
 		goto put_module;
 
 	/* Update module usage count based on number of rules */
-	duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
-		oldinfo->number, oldinfo->initial_entries, newinfo->number);
 	if ((oldinfo->number > oldinfo->initial_entries) ||
 	    (newinfo->number <= oldinfo->initial_entries))
 		module_put(t->me);
@@ -1103,8 +985,6 @@
 	if (ret != 0)
 		goto free_newinfo;
 
-	duprintf("arp_tables: Translated table\n");
-
 	ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
 			   tmp.num_counters, tmp.counters);
 	if (ret)
@@ -1125,55 +1005,17 @@
 	unsigned int i;
 	struct xt_counters_info tmp;
 	struct xt_counters *paddc;
-	unsigned int num_counters;
-	const char *name;
-	int size;
-	void *ptmp;
 	struct xt_table *t;
 	const struct xt_table_info *private;
 	int ret = 0;
 	struct arpt_entry *iter;
 	unsigned int addend;
-#ifdef CONFIG_COMPAT
-	struct compat_xt_counters_info compat_tmp;
 
-	if (compat) {
-		ptmp = &compat_tmp;
-		size = sizeof(struct compat_xt_counters_info);
-	} else
-#endif
-	{
-		ptmp = &tmp;
-		size = sizeof(struct xt_counters_info);
-	}
+	paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
+	if (IS_ERR(paddc))
+		return PTR_ERR(paddc);
 
-	if (copy_from_user(ptmp, user, size) != 0)
-		return -EFAULT;
-
-#ifdef CONFIG_COMPAT
-	if (compat) {
-		num_counters = compat_tmp.num_counters;
-		name = compat_tmp.name;
-	} else
-#endif
-	{
-		num_counters = tmp.num_counters;
-		name = tmp.name;
-	}
-
-	if (len != size + num_counters * sizeof(struct xt_counters))
-		return -EINVAL;
-
-	paddc = vmalloc(len - size);
-	if (!paddc)
-		return -ENOMEM;
-
-	if (copy_from_user(paddc, user + size, len - size) != 0) {
-		ret = -EFAULT;
-		goto free;
-	}
-
-	t = xt_find_table_lock(net, NFPROTO_ARP, name);
+	t = xt_find_table_lock(net, NFPROTO_ARP, tmp.name);
 	if (IS_ERR_OR_NULL(t)) {
 		ret = t ? PTR_ERR(t) : -ENOENT;
 		goto free;
@@ -1181,7 +1023,7 @@
 
 	local_bh_disable();
 	private = t->private;
-	if (private->number != num_counters) {
+	if (private->number != tmp.num_counters) {
 		ret = -EINVAL;
 		goto unlock_up_free;
 	}
@@ -1208,6 +1050,18 @@
 }
 
 #ifdef CONFIG_COMPAT
+struct compat_arpt_replace {
+	char				name[XT_TABLE_MAXNAMELEN];
+	u32				valid_hooks;
+	u32				num_entries;
+	u32				size;
+	u32				hook_entry[NF_ARP_NUMHOOKS];
+	u32				underflow[NF_ARP_NUMHOOKS];
+	u32				num_counters;
+	compat_uptr_t			counters;
+	struct compat_arpt_entry	entries[0];
+};
+
 static inline void compat_release_entry(struct compat_arpt_entry *e)
 {
 	struct xt_entry_target *t;
@@ -1216,37 +1070,32 @@
 	module_put(t->u.kernel.target->me);
 }
 
-static inline int
+static int
 check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
 				  struct xt_table_info *newinfo,
 				  unsigned int *size,
 				  const unsigned char *base,
-				  const unsigned char *limit,
-				  const unsigned int *hook_entries,
-				  const unsigned int *underflows,
-				  const char *name)
+				  const unsigned char *limit)
 {
 	struct xt_entry_target *t;
 	struct xt_target *target;
 	unsigned int entry_offset;
-	int ret, off, h;
+	int ret, off;
 
-	duprintf("check_compat_entry_size_and_hooks %p\n", e);
 	if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 ||
-	    (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) {
-		duprintf("Bad offset %p, limit = %p\n", e, limit);
+	    (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit ||
+	    (unsigned char *)e + e->next_offset > limit)
 		return -EINVAL;
-	}
 
 	if (e->next_offset < sizeof(struct compat_arpt_entry) +
-			     sizeof(struct compat_xt_entry_target)) {
-		duprintf("checking: element %p size %u\n",
-			 e, e->next_offset);
+			     sizeof(struct compat_xt_entry_target))
 		return -EINVAL;
-	}
 
-	/* For purposes of check_entry casting the compat entry is fine */
-	ret = check_entry((struct arpt_entry *)e, name);
+	if (!arp_checkentry(&e->arp))
+		return -EINVAL;
+
+	ret = xt_compat_check_entry_offsets(e, e->elems, e->target_offset,
+					    e->next_offset);
 	if (ret)
 		return ret;
 
@@ -1257,8 +1106,6 @@
 	target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
 					t->u.user.revision);
 	if (IS_ERR(target)) {
-		duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
-			 t->u.user.name);
 		ret = PTR_ERR(target);
 		goto out;
 	}
@@ -1270,17 +1117,6 @@
 	if (ret)
 		goto release_target;
 
-	/* Check hooks & underflows */
-	for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
-		if ((unsigned char *)e - base == hook_entries[h])
-			newinfo->hook_entry[h] = hook_entries[h];
-		if ((unsigned char *)e - base == underflows[h])
-			newinfo->underflow[h] = underflows[h];
-	}
-
-	/* Clear counters and comefrom */
-	memset(&e->counters, 0, sizeof(e->counters));
-	e->comefrom = 0;
 	return 0;
 
 release_target:
@@ -1289,18 +1125,17 @@
 	return ret;
 }
 
-static int
+static void
 compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
-			    unsigned int *size, const char *name,
+			    unsigned int *size,
 			    struct xt_table_info *newinfo, unsigned char *base)
 {
 	struct xt_entry_target *t;
 	struct xt_target *target;
 	struct arpt_entry *de;
 	unsigned int origsize;
-	int ret, h;
+	int h;
 
-	ret = 0;
 	origsize = *size;
 	de = (struct arpt_entry *)*dstptr;
 	memcpy(de, e, sizeof(struct arpt_entry));
@@ -1321,148 +1156,78 @@
 		if ((unsigned char *)de - base < newinfo->underflow[h])
 			newinfo->underflow[h] -= origsize - *size;
 	}
-	return ret;
 }
 
-static int translate_compat_table(const char *name,
-				  unsigned int valid_hooks,
-				  struct xt_table_info **pinfo,
+static int translate_compat_table(struct xt_table_info **pinfo,
 				  void **pentry0,
-				  unsigned int total_size,
-				  unsigned int number,
-				  unsigned int *hook_entries,
-				  unsigned int *underflows)
+				  const struct compat_arpt_replace *compatr)
 {
 	unsigned int i, j;
 	struct xt_table_info *newinfo, *info;
 	void *pos, *entry0, *entry1;
 	struct compat_arpt_entry *iter0;
-	struct arpt_entry *iter1;
+	struct arpt_replace repl;
 	unsigned int size;
 	int ret = 0;
 
 	info = *pinfo;
 	entry0 = *pentry0;
-	size = total_size;
-	info->number = number;
+	size = compatr->size;
+	info->number = compatr->num_entries;
 
-	/* Init all hooks to impossible value. */
-	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
-		info->hook_entry[i] = 0xFFFFFFFF;
-		info->underflow[i] = 0xFFFFFFFF;
-	}
-
-	duprintf("translate_compat_table: size %u\n", info->size);
 	j = 0;
 	xt_compat_lock(NFPROTO_ARP);
-	xt_compat_init_offsets(NFPROTO_ARP, number);
+	xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries);
 	/* Walk through entries, checking offsets. */
-	xt_entry_foreach(iter0, entry0, total_size) {
+	xt_entry_foreach(iter0, entry0, compatr->size) {
 		ret = check_compat_entry_size_and_hooks(iter0, info, &size,
 							entry0,
-							entry0 + total_size,
-							hook_entries,
-							underflows,
-							name);
+							entry0 + compatr->size);
 		if (ret != 0)
 			goto out_unlock;
 		++j;
 	}
 
 	ret = -EINVAL;
-	if (j != number) {
-		duprintf("translate_compat_table: %u not %u entries\n",
-			 j, number);
+	if (j != compatr->num_entries)
 		goto out_unlock;
-	}
-
-	/* Check hooks all assigned */
-	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
-		/* Only hooks which are valid */
-		if (!(valid_hooks & (1 << i)))
-			continue;
-		if (info->hook_entry[i] == 0xFFFFFFFF) {
-			duprintf("Invalid hook entry %u %u\n",
-				 i, hook_entries[i]);
-			goto out_unlock;
-		}
-		if (info->underflow[i] == 0xFFFFFFFF) {
-			duprintf("Invalid underflow %u %u\n",
-				 i, underflows[i]);
-			goto out_unlock;
-		}
-	}
 
 	ret = -ENOMEM;
 	newinfo = xt_alloc_table_info(size);
 	if (!newinfo)
 		goto out_unlock;
 
-	newinfo->number = number;
+	newinfo->number = compatr->num_entries;
 	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
 		newinfo->hook_entry[i] = info->hook_entry[i];
 		newinfo->underflow[i] = info->underflow[i];
 	}
 	entry1 = newinfo->entries;
 	pos = entry1;
-	size = total_size;
-	xt_entry_foreach(iter0, entry0, total_size) {
-		ret = compat_copy_entry_from_user(iter0, &pos, &size,
-						  name, newinfo, entry1);
-		if (ret != 0)
-			break;
-	}
+	size = compatr->size;
+	xt_entry_foreach(iter0, entry0, compatr->size)
+		compat_copy_entry_from_user(iter0, &pos, &size,
+					    newinfo, entry1);
+
+	/* all module references in entry0 are now gone */
+
 	xt_compat_flush_offsets(NFPROTO_ARP);
 	xt_compat_unlock(NFPROTO_ARP);
+
+	memcpy(&repl, compatr, sizeof(*compatr));
+
+	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
+		repl.hook_entry[i] = newinfo->hook_entry[i];
+		repl.underflow[i] = newinfo->underflow[i];
+	}
+
+	repl.num_counters = 0;
+	repl.counters = NULL;
+	repl.size = newinfo->size;
+	ret = translate_table(newinfo, entry1, &repl);
 	if (ret)
 		goto free_newinfo;
 
-	ret = -ELOOP;
-	if (!mark_source_chains(newinfo, valid_hooks, entry1))
-		goto free_newinfo;
-
-	i = 0;
-	xt_entry_foreach(iter1, entry1, newinfo->size) {
-		iter1->counters.pcnt = xt_percpu_counter_alloc();
-		if (IS_ERR_VALUE(iter1->counters.pcnt)) {
-			ret = -ENOMEM;
-			break;
-		}
-
-		ret = check_target(iter1, name);
-		if (ret != 0) {
-			xt_percpu_counter_free(iter1->counters.pcnt);
-			break;
-		}
-		++i;
-		if (strcmp(arpt_get_target(iter1)->u.user.name,
-		    XT_ERROR_TARGET) == 0)
-			++newinfo->stacksize;
-	}
-	if (ret) {
-		/*
-		 * The first i matches need cleanup_entry (calls ->destroy)
-		 * because they had called ->check already. The other j-i
-		 * entries need only release.
-		 */
-		int skip = i;
-		j -= i;
-		xt_entry_foreach(iter0, entry0, newinfo->size) {
-			if (skip-- > 0)
-				continue;
-			if (j-- == 0)
-				break;
-			compat_release_entry(iter0);
-		}
-		xt_entry_foreach(iter1, entry1, newinfo->size) {
-			if (i-- == 0)
-				break;
-			cleanup_entry(iter1);
-		}
-		xt_free_table_info(newinfo);
-		return ret;
-	}
-
 	*pinfo = newinfo;
 	*pentry0 = entry1;
 	xt_free_table_info(info);
@@ -1470,31 +1235,18 @@
 
 free_newinfo:
 	xt_free_table_info(newinfo);
-out:
-	xt_entry_foreach(iter0, entry0, total_size) {
+	return ret;
+out_unlock:
+	xt_compat_flush_offsets(NFPROTO_ARP);
+	xt_compat_unlock(NFPROTO_ARP);
+	xt_entry_foreach(iter0, entry0, compatr->size) {
 		if (j-- == 0)
 			break;
 		compat_release_entry(iter0);
 	}
 	return ret;
-out_unlock:
-	xt_compat_flush_offsets(NFPROTO_ARP);
-	xt_compat_unlock(NFPROTO_ARP);
-	goto out;
 }
 
-struct compat_arpt_replace {
-	char				name[XT_TABLE_MAXNAMELEN];
-	u32				valid_hooks;
-	u32				num_entries;
-	u32				size;
-	u32				hook_entry[NF_ARP_NUMHOOKS];
-	u32				underflow[NF_ARP_NUMHOOKS];
-	u32				num_counters;
-	compat_uptr_t			counters;
-	struct compat_arpt_entry	entries[0];
-};
-
 static int compat_do_replace(struct net *net, void __user *user,
 			     unsigned int len)
 {
@@ -1508,8 +1260,6 @@
 		return -EFAULT;
 
 	/* overflow check */
-	if (tmp.size >= INT_MAX / num_possible_cpus())
-		return -ENOMEM;
 	if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
 		return -ENOMEM;
 	if (tmp.num_counters == 0)
@@ -1527,15 +1277,10 @@
 		goto free_newinfo;
 	}
 
-	ret = translate_compat_table(tmp.name, tmp.valid_hooks,
-				     &newinfo, &loc_cpu_entry, tmp.size,
-				     tmp.num_entries, tmp.hook_entry,
-				     tmp.underflow);
+	ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp);
 	if (ret != 0)
 		goto free_newinfo;
 
-	duprintf("compat_do_replace: Translated table\n");
-
 	ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
 			   tmp.num_counters, compat_ptr(tmp.counters));
 	if (ret)
@@ -1568,7 +1313,6 @@
 		break;
 
 	default:
-		duprintf("do_arpt_set_ctl:  unknown request %i\n", cmd);
 		ret = -EINVAL;
 	}
 
@@ -1651,17 +1395,14 @@
 	struct compat_arpt_get_entries get;
 	struct xt_table *t;
 
-	if (*len < sizeof(get)) {
-		duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
+	if (*len < sizeof(get))
 		return -EINVAL;
-	}
 	if (copy_from_user(&get, uptr, sizeof(get)) != 0)
 		return -EFAULT;
-	if (*len != sizeof(struct compat_arpt_get_entries) + get.size) {
-		duprintf("compat_get_entries: %u != %zu\n",
-			 *len, sizeof(get) + get.size);
+	if (*len != sizeof(struct compat_arpt_get_entries) + get.size)
 		return -EINVAL;
-	}
+
+	get.name[sizeof(get.name) - 1] = '\0';
 
 	xt_compat_lock(NFPROTO_ARP);
 	t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
@@ -1669,16 +1410,13 @@
 		const struct xt_table_info *private = t->private;
 		struct xt_table_info info;
 
-		duprintf("t->private->number = %u\n", private->number);
 		ret = compat_table_info(private, &info);
 		if (!ret && get.size == info.size) {
 			ret = compat_copy_entries_to_user(private->size,
 							  t, uptr->entrytable);
-		} else if (!ret) {
-			duprintf("compat_get_entries: I've got %u not %u!\n",
-				 private->size, get.size);
+		} else if (!ret)
 			ret = -EAGAIN;
-		}
+
 		xt_compat_flush_offsets(NFPROTO_ARP);
 		module_put(t->me);
 		xt_table_unlock(t);
@@ -1730,7 +1468,6 @@
 		break;
 
 	default:
-		duprintf("do_arpt_set_ctl:  unknown request %i\n", cmd);
 		ret = -EINVAL;
 	}
 
@@ -1773,7 +1510,6 @@
 	}
 
 	default:
-		duprintf("do_arpt_get_ctl: unknown request %i\n", cmd);
 		ret = -EINVAL;
 	}
 
@@ -1818,7 +1554,6 @@
 	memcpy(loc_cpu_entry, repl->entries, repl->size);
 
 	ret = translate_table(newinfo, loc_cpu_entry, repl);
-	duprintf("arpt_register_table: translate table gives %d\n", ret);
 	if (ret != 0)
 		goto out_free;
 
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index dd8c80d..8f8713b 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -81,6 +81,12 @@
 		return ret;
 	}
 
+	ret = arptable_filter_table_init(&init_net);
+	if (ret) {
+		unregister_pernet_subsys(&arptable_filter_net_ops);
+		kfree(arpfilter_ops);
+	}
+
 	return ret;
 }
 
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index e53f8d6..54906e0 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -35,34 +35,12 @@
 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
 MODULE_DESCRIPTION("IPv4 packet filter");
 
-/*#define DEBUG_IP_FIREWALL*/
-/*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
-/*#define DEBUG_IP_FIREWALL_USER*/
-
-#ifdef DEBUG_IP_FIREWALL
-#define dprintf(format, args...) pr_info(format , ## args)
-#else
-#define dprintf(format, args...)
-#endif
-
-#ifdef DEBUG_IP_FIREWALL_USER
-#define duprintf(format, args...) pr_info(format , ## args)
-#else
-#define duprintf(format, args...)
-#endif
-
 #ifdef CONFIG_NETFILTER_DEBUG
 #define IP_NF_ASSERT(x)		WARN_ON(!(x))
 #else
 #define IP_NF_ASSERT(x)
 #endif
 
-#if 0
-/* All the better to debug you with... */
-#define static
-#define inline
-#endif
-
 void *ipt_alloc_initial_table(const struct xt_table *info)
 {
 	return xt_alloc_initial_table(ipt, IPT);
@@ -85,52 +63,28 @@
 	if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
 		  IPT_INV_SRCIP) ||
 	    FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
-		  IPT_INV_DSTIP)) {
-		dprintf("Source or dest mismatch.\n");
-
-		dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
-			&ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr,
-			ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
-		dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
-			&ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr,
-			ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
+		  IPT_INV_DSTIP))
 		return false;
-	}
 
 	ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
 
-	if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
-		dprintf("VIA in mismatch (%s vs %s).%s\n",
-			indev, ipinfo->iniface,
-			ipinfo->invflags & IPT_INV_VIA_IN ? " (INV)" : "");
+	if (FWINV(ret != 0, IPT_INV_VIA_IN))
 		return false;
-	}
 
 	ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
 
-	if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
-		dprintf("VIA out mismatch (%s vs %s).%s\n",
-			outdev, ipinfo->outiface,
-			ipinfo->invflags & IPT_INV_VIA_OUT ? " (INV)" : "");
+	if (FWINV(ret != 0, IPT_INV_VIA_OUT))
 		return false;
-	}
 
 	/* Check specific protocol */
 	if (ipinfo->proto &&
-	    FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
-		dprintf("Packet protocol %hi does not match %hi.%s\n",
-			ip->protocol, ipinfo->proto,
-			ipinfo->invflags & IPT_INV_PROTO ? " (INV)" : "");
+	    FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO))
 		return false;
-	}
 
 	/* If we have a fragment rule but the packet is not a fragment
 	 * then we return zero */
-	if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
-		dprintf("Fragment rule but not fragment.%s\n",
-			ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
+	if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG))
 		return false;
-	}
 
 	return true;
 }
@@ -138,16 +92,10 @@
 static bool
 ip_checkentry(const struct ipt_ip *ip)
 {
-	if (ip->flags & ~IPT_F_MASK) {
-		duprintf("Unknown flag bits set: %08X\n",
-			 ip->flags & ~IPT_F_MASK);
+	if (ip->flags & ~IPT_F_MASK)
 		return false;
-	}
-	if (ip->invflags & ~IPT_INV_MASK) {
-		duprintf("Unknown invflag bits set: %08X\n",
-			 ip->invflags & ~IPT_INV_MASK);
+	if (ip->invflags & ~IPT_INV_MASK)
 		return false;
-	}
 	return true;
 }
 
@@ -168,11 +116,12 @@
 
 /* All zeroes == unconditional rule. */
 /* Mildly perf critical (only if packet tracing is on) */
-static inline bool unconditional(const struct ipt_ip *ip)
+static inline bool unconditional(const struct ipt_entry *e)
 {
 	static const struct ipt_ip uncond;
 
-	return memcmp(ip, &uncond, sizeof(uncond)) == 0;
+	return e->target_offset == sizeof(struct ipt_entry) &&
+	       memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
 #undef FWINV
 }
 
@@ -229,11 +178,10 @@
 	} else if (s == e) {
 		(*rulenum)++;
 
-		if (s->target_offset == sizeof(struct ipt_entry) &&
+		if (unconditional(s) &&
 		    strcmp(t->target.u.kernel.target->name,
 			   XT_STANDARD_TARGET) == 0 &&
-		   t->verdict < 0 &&
-		   unconditional(&s->ip)) {
+		   t->verdict < 0) {
 			/* Tail of chains: STANDARD target (return/policy) */
 			*comment = *chainname == hookname
 				? comments[NF_IP_TRACE_COMMENT_POLICY]
@@ -346,10 +294,6 @@
 
 	e = get_entry(table_base, private->hook_entry[hook]);
 
-	pr_debug("Entering %s(hook %u), UF %p\n",
-		 table->name, hook,
-		 get_entry(table_base, private->underflow[hook]));
-
 	do {
 		const struct xt_entry_target *t;
 		const struct xt_entry_match *ematch;
@@ -396,22 +340,15 @@
 				if (stackidx == 0) {
 					e = get_entry(table_base,
 					    private->underflow[hook]);
-					pr_debug("Underflow (this is normal) "
-						 "to %p\n", e);
 				} else {
 					e = jumpstack[--stackidx];
-					pr_debug("Pulled %p out from pos %u\n",
-						 e, stackidx);
 					e = ipt_next_entry(e);
 				}
 				continue;
 			}
 			if (table_base + v != ipt_next_entry(e) &&
-			    !(e->ip.flags & IPT_F_GOTO)) {
+			    !(e->ip.flags & IPT_F_GOTO))
 				jumpstack[stackidx++] = e;
-				pr_debug("Pushed %p into pos %u\n",
-					 e, stackidx - 1);
-			}
 
 			e = get_entry(table_base, v);
 			continue;
@@ -429,18 +366,25 @@
 			/* Verdict */
 			break;
 	} while (!acpar.hotdrop);
-	pr_debug("Exiting %s; sp at %u\n", __func__, stackidx);
 
 	xt_write_recseq_end(addend);
 	local_bh_enable();
 
-#ifdef DEBUG_ALLOW_ALL
-	return NF_ACCEPT;
-#else
 	if (acpar.hotdrop)
 		return NF_DROP;
 	else return verdict;
-#endif
+}
+
+static bool find_jump_target(const struct xt_table_info *t,
+			     const struct ipt_entry *target)
+{
+	struct ipt_entry *iter;
+
+	xt_entry_foreach(iter, t->entries, t->size) {
+		 if (iter == target)
+			return true;
+	}
+	return false;
 }
 
 /* Figures out from what hook each rule can be called: returns 0 if
@@ -468,43 +412,27 @@
 				= (void *)ipt_get_target_c(e);
 			int visited = e->comefrom & (1 << hook);
 
-			if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
-				pr_err("iptables: loop hook %u pos %u %08X.\n",
-				       hook, pos, e->comefrom);
+			if (e->comefrom & (1 << NF_INET_NUMHOOKS))
 				return 0;
-			}
+
 			e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
 
 			/* Unconditional return/END. */
-			if ((e->target_offset == sizeof(struct ipt_entry) &&
+			if ((unconditional(e) &&
 			     (strcmp(t->target.u.user.name,
 				     XT_STANDARD_TARGET) == 0) &&
-			     t->verdict < 0 && unconditional(&e->ip)) ||
-			    visited) {
+			     t->verdict < 0) || visited) {
 				unsigned int oldpos, size;
 
 				if ((strcmp(t->target.u.user.name,
 					    XT_STANDARD_TARGET) == 0) &&
-				    t->verdict < -NF_MAX_VERDICT - 1) {
-					duprintf("mark_source_chains: bad "
-						"negative verdict (%i)\n",
-								t->verdict);
+				    t->verdict < -NF_MAX_VERDICT - 1)
 					return 0;
-				}
 
 				/* Return: backtrack through the last
 				   big jump. */
 				do {
 					e->comefrom ^= (1<<NF_INET_NUMHOOKS);
-#ifdef DEBUG_IP_FIREWALL_USER
-					if (e->comefrom
-					    & (1 << NF_INET_NUMHOOKS)) {
-						duprintf("Back unset "
-							 "on hook %u "
-							 "rule %u\n",
-							 hook, pos);
-					}
-#endif
 					oldpos = pos;
 					pos = e->counters.pcnt;
 					e->counters.pcnt = 0;
@@ -521,6 +449,8 @@
 				size = e->next_offset;
 				e = (struct ipt_entry *)
 					(entry0 + pos + size);
+				if (pos + size >= newinfo->size)
+					return 0;
 				e->counters.pcnt = pos;
 				pos += size;
 			} else {
@@ -529,19 +459,16 @@
 				if (strcmp(t->target.u.user.name,
 					   XT_STANDARD_TARGET) == 0 &&
 				    newpos >= 0) {
-					if (newpos > newinfo->size -
-						sizeof(struct ipt_entry)) {
-						duprintf("mark_source_chains: "
-							"bad verdict (%i)\n",
-								newpos);
-						return 0;
-					}
 					/* This a jump; chase it. */
-					duprintf("Jump rule %u -> %u\n",
-						 pos, newpos);
+					e = (struct ipt_entry *)
+						(entry0 + newpos);
+					if (!find_jump_target(newinfo, e))
+						return 0;
 				} else {
 					/* ... this is a fallthru */
 					newpos = pos + e->next_offset;
+					if (newpos >= newinfo->size)
+						return 0;
 				}
 				e = (struct ipt_entry *)
 					(entry0 + newpos);
@@ -549,8 +476,7 @@
 				pos = newpos;
 			}
 		}
-next:
-		duprintf("Finished chain %u\n", hook);
+next:		;
 	}
 	return 1;
 }
@@ -569,42 +495,15 @@
 }
 
 static int
-check_entry(const struct ipt_entry *e, const char *name)
-{
-	const struct xt_entry_target *t;
-
-	if (!ip_checkentry(&e->ip)) {
-		duprintf("ip check failed %p %s.\n", e, name);
-		return -EINVAL;
-	}
-
-	if (e->target_offset + sizeof(struct xt_entry_target) >
-	    e->next_offset)
-		return -EINVAL;
-
-	t = ipt_get_target_c(e);
-	if (e->target_offset + t->u.target_size > e->next_offset)
-		return -EINVAL;
-
-	return 0;
-}
-
-static int
 check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
 {
 	const struct ipt_ip *ip = par->entryinfo;
-	int ret;
 
 	par->match     = m->u.kernel.match;
 	par->matchinfo = m->data;
 
-	ret = xt_check_match(par, m->u.match_size - sizeof(*m),
-	      ip->proto, ip->invflags & IPT_INV_PROTO);
-	if (ret < 0) {
-		duprintf("check failed for `%s'.\n", par->match->name);
-		return ret;
-	}
-	return 0;
+	return xt_check_match(par, m->u.match_size - sizeof(*m),
+			      ip->proto, ip->invflags & IPT_INV_PROTO);
 }
 
 static int
@@ -615,10 +514,8 @@
 
 	match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
 				      m->u.user.revision);
-	if (IS_ERR(match)) {
-		duprintf("find_check_match: `%s' not found\n", m->u.user.name);
+	if (IS_ERR(match))
 		return PTR_ERR(match);
-	}
 	m->u.kernel.match = match;
 
 	ret = check_match(m, par);
@@ -643,16 +540,9 @@
 		.hook_mask = e->comefrom,
 		.family    = NFPROTO_IPV4,
 	};
-	int ret;
 
-	ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
-	      e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
-	if (ret < 0) {
-		duprintf("check failed for `%s'.\n",
-			 t->u.kernel.target->name);
-		return ret;
-	}
-	return 0;
+	return xt_check_target(&par, t->u.target_size - sizeof(*t),
+			       e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
 }
 
 static int
@@ -665,14 +555,12 @@
 	unsigned int j;
 	struct xt_mtchk_param mtpar;
 	struct xt_entry_match *ematch;
+	unsigned long pcnt;
 
-	ret = check_entry(e, name);
-	if (ret)
-		return ret;
-
-	e->counters.pcnt = xt_percpu_counter_alloc();
-	if (IS_ERR_VALUE(e->counters.pcnt))
+	pcnt = xt_percpu_counter_alloc();
+	if (IS_ERR_VALUE(pcnt))
 		return -ENOMEM;
+	e->counters.pcnt = pcnt;
 
 	j = 0;
 	mtpar.net	= net;
@@ -691,7 +579,6 @@
 	target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
 					t->u.user.revision);
 	if (IS_ERR(target)) {
-		duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
 		ret = PTR_ERR(target);
 		goto cleanup_matches;
 	}
@@ -721,7 +608,7 @@
 	const struct xt_entry_target *t;
 	unsigned int verdict;
 
-	if (!unconditional(&e->ip))
+	if (!unconditional(e))
 		return false;
 	t = ipt_get_target_c(e);
 	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
@@ -741,19 +628,24 @@
 			   unsigned int valid_hooks)
 {
 	unsigned int h;
+	int err;
 
 	if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
-	    (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
-		duprintf("Bad offset %p\n", e);
+	    (unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
+	    (unsigned char *)e + e->next_offset > limit)
 		return -EINVAL;
-	}
 
 	if (e->next_offset
-	    < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target)) {
-		duprintf("checking: element %p size %u\n",
-			 e, e->next_offset);
+	    < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target))
 		return -EINVAL;
-	}
+
+	if (!ip_checkentry(&e->ip))
+		return -EINVAL;
+
+	err = xt_check_entry_offsets(e, e->elems, e->target_offset,
+				     e->next_offset);
+	if (err)
+		return err;
 
 	/* Check hooks & underflows */
 	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
@@ -762,12 +654,9 @@
 		if ((unsigned char *)e - base == hook_entries[h])
 			newinfo->hook_entry[h] = hook_entries[h];
 		if ((unsigned char *)e - base == underflows[h]) {
-			if (!check_underflow(e)) {
-				pr_err("Underflows must be unconditional and "
-				       "use the STANDARD target with "
-				       "ACCEPT/DROP\n");
+			if (!check_underflow(e))
 				return -EINVAL;
-			}
+
 			newinfo->underflow[h] = underflows[h];
 		}
 	}
@@ -819,7 +708,6 @@
 		newinfo->underflow[i] = 0xFFFFFFFF;
 	}
 
-	duprintf("translate_table: size %u\n", newinfo->size);
 	i = 0;
 	/* Walk through entries, checking offsets. */
 	xt_entry_foreach(iter, entry0, newinfo->size) {
@@ -836,27 +724,18 @@
 			++newinfo->stacksize;
 	}
 
-	if (i != repl->num_entries) {
-		duprintf("translate_table: %u not %u entries\n",
-			 i, repl->num_entries);
+	if (i != repl->num_entries)
 		return -EINVAL;
-	}
 
 	/* Check hooks all assigned */
 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
 		/* Only hooks which are valid */
 		if (!(repl->valid_hooks & (1 << i)))
 			continue;
-		if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
-			duprintf("Invalid hook entry %u %u\n",
-				 i, repl->hook_entry[i]);
+		if (newinfo->hook_entry[i] == 0xFFFFFFFF)
 			return -EINVAL;
-		}
-		if (newinfo->underflow[i] == 0xFFFFFFFF) {
-			duprintf("Invalid underflow %u %u\n",
-				 i, repl->underflow[i]);
+		if (newinfo->underflow[i] == 0xFFFFFFFF)
 			return -EINVAL;
-		}
 	}
 
 	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
@@ -1084,11 +963,8 @@
 	struct xt_table *t;
 	int ret;
 
-	if (*len != sizeof(struct ipt_getinfo)) {
-		duprintf("length %u != %zu\n", *len,
-			 sizeof(struct ipt_getinfo));
+	if (*len != sizeof(struct ipt_getinfo))
 		return -EINVAL;
-	}
 
 	if (copy_from_user(name, user, sizeof(name)) != 0)
 		return -EFAULT;
@@ -1146,30 +1022,23 @@
 	struct ipt_get_entries get;
 	struct xt_table *t;
 
-	if (*len < sizeof(get)) {
-		duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
+	if (*len < sizeof(get))
 		return -EINVAL;
-	}
 	if (copy_from_user(&get, uptr, sizeof(get)) != 0)
 		return -EFAULT;
-	if (*len != sizeof(struct ipt_get_entries) + get.size) {
-		duprintf("get_entries: %u != %zu\n",
-			 *len, sizeof(get) + get.size);
+	if (*len != sizeof(struct ipt_get_entries) + get.size)
 		return -EINVAL;
-	}
+	get.name[sizeof(get.name) - 1] = '\0';
 
 	t = xt_find_table_lock(net, AF_INET, get.name);
 	if (!IS_ERR_OR_NULL(t)) {
 		const struct xt_table_info *private = t->private;
-		duprintf("t->private->number = %u\n", private->number);
 		if (get.size == private->size)
 			ret = copy_entries_to_user(private->size,
 						   t, uptr->entrytable);
-		else {
-			duprintf("get_entries: I've got %u not %u!\n",
-				 private->size, get.size);
+		else
 			ret = -EAGAIN;
-		}
+
 		module_put(t->me);
 		xt_table_unlock(t);
 	} else
@@ -1205,8 +1074,6 @@
 
 	/* You lied! */
 	if (valid_hooks != t->valid_hooks) {
-		duprintf("Valid hook crap: %08X vs %08X\n",
-			 valid_hooks, t->valid_hooks);
 		ret = -EINVAL;
 		goto put_module;
 	}
@@ -1216,8 +1083,6 @@
 		goto put_module;
 
 	/* Update module usage count based on number of rules */
-	duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
-		oldinfo->number, oldinfo->initial_entries, newinfo->number);
 	if ((oldinfo->number > oldinfo->initial_entries) ||
 	    (newinfo->number <= oldinfo->initial_entries))
 		module_put(t->me);
@@ -1286,8 +1151,6 @@
 	if (ret != 0)
 		goto free_newinfo;
 
-	duprintf("Translated table\n");
-
 	ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
 			   tmp.num_counters, tmp.counters);
 	if (ret)
@@ -1309,55 +1172,17 @@
 	unsigned int i;
 	struct xt_counters_info tmp;
 	struct xt_counters *paddc;
-	unsigned int num_counters;
-	const char *name;
-	int size;
-	void *ptmp;
 	struct xt_table *t;
 	const struct xt_table_info *private;
 	int ret = 0;
 	struct ipt_entry *iter;
 	unsigned int addend;
-#ifdef CONFIG_COMPAT
-	struct compat_xt_counters_info compat_tmp;
 
-	if (compat) {
-		ptmp = &compat_tmp;
-		size = sizeof(struct compat_xt_counters_info);
-	} else
-#endif
-	{
-		ptmp = &tmp;
-		size = sizeof(struct xt_counters_info);
-	}
+	paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
+	if (IS_ERR(paddc))
+		return PTR_ERR(paddc);
 
-	if (copy_from_user(ptmp, user, size) != 0)
-		return -EFAULT;
-
-#ifdef CONFIG_COMPAT
-	if (compat) {
-		num_counters = compat_tmp.num_counters;
-		name = compat_tmp.name;
-	} else
-#endif
-	{
-		num_counters = tmp.num_counters;
-		name = tmp.name;
-	}
-
-	if (len != size + num_counters * sizeof(struct xt_counters))
-		return -EINVAL;
-
-	paddc = vmalloc(len - size);
-	if (!paddc)
-		return -ENOMEM;
-
-	if (copy_from_user(paddc, user + size, len - size) != 0) {
-		ret = -EFAULT;
-		goto free;
-	}
-
-	t = xt_find_table_lock(net, AF_INET, name);
+	t = xt_find_table_lock(net, AF_INET, tmp.name);
 	if (IS_ERR_OR_NULL(t)) {
 		ret = t ? PTR_ERR(t) : -ENOENT;
 		goto free;
@@ -1365,7 +1190,7 @@
 
 	local_bh_disable();
 	private = t->private;
-	if (private->number != num_counters) {
+	if (private->number != tmp.num_counters) {
 		ret = -EINVAL;
 		goto unlock_up_free;
 	}
@@ -1444,7 +1269,6 @@
 
 static int
 compat_find_calc_match(struct xt_entry_match *m,
-		       const char *name,
 		       const struct ipt_ip *ip,
 		       int *size)
 {
@@ -1452,11 +1276,9 @@
 
 	match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
 				      m->u.user.revision);
-	if (IS_ERR(match)) {
-		duprintf("compat_check_calc_match: `%s' not found\n",
-			 m->u.user.name);
+	if (IS_ERR(match))
 		return PTR_ERR(match);
-	}
+
 	m->u.kernel.match = match;
 	*size += xt_compat_match_offset(match);
 	return 0;
@@ -1479,34 +1301,29 @@
 				  struct xt_table_info *newinfo,
 				  unsigned int *size,
 				  const unsigned char *base,
-				  const unsigned char *limit,
-				  const unsigned int *hook_entries,
-				  const unsigned int *underflows,
-				  const char *name)
+				  const unsigned char *limit)
 {
 	struct xt_entry_match *ematch;
 	struct xt_entry_target *t;
 	struct xt_target *target;
 	unsigned int entry_offset;
 	unsigned int j;
-	int ret, off, h;
+	int ret, off;
 
-	duprintf("check_compat_entry_size_and_hooks %p\n", e);
 	if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
-	    (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
-		duprintf("Bad offset %p, limit = %p\n", e, limit);
+	    (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||
+	    (unsigned char *)e + e->next_offset > limit)
 		return -EINVAL;
-	}
 
 	if (e->next_offset < sizeof(struct compat_ipt_entry) +
-			     sizeof(struct compat_xt_entry_target)) {
-		duprintf("checking: element %p size %u\n",
-			 e, e->next_offset);
+			     sizeof(struct compat_xt_entry_target))
 		return -EINVAL;
-	}
 
-	/* For purposes of check_entry casting the compat entry is fine */
-	ret = check_entry((struct ipt_entry *)e, name);
+	if (!ip_checkentry(&e->ip))
+		return -EINVAL;
+
+	ret = xt_compat_check_entry_offsets(e, e->elems,
+					    e->target_offset, e->next_offset);
 	if (ret)
 		return ret;
 
@@ -1514,7 +1331,7 @@
 	entry_offset = (void *)e - (void *)base;
 	j = 0;
 	xt_ematch_foreach(ematch, e) {
-		ret = compat_find_calc_match(ematch, name, &e->ip, &off);
+		ret = compat_find_calc_match(ematch, &e->ip, &off);
 		if (ret != 0)
 			goto release_matches;
 		++j;
@@ -1524,8 +1341,6 @@
 	target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
 					t->u.user.revision);
 	if (IS_ERR(target)) {
-		duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
-			 t->u.user.name);
 		ret = PTR_ERR(target);
 		goto release_matches;
 	}
@@ -1537,17 +1352,6 @@
 	if (ret)
 		goto out;
 
-	/* Check hooks & underflows */
-	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
-		if ((unsigned char *)e - base == hook_entries[h])
-			newinfo->hook_entry[h] = hook_entries[h];
-		if ((unsigned char *)e - base == underflows[h])
-			newinfo->underflow[h] = underflows[h];
-	}
-
-	/* Clear counters and comefrom */
-	memset(&e->counters, 0, sizeof(e->counters));
-	e->comefrom = 0;
 	return 0;
 
 out:
@@ -1561,19 +1365,18 @@
 	return ret;
 }
 
-static int
+static void
 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
-			    unsigned int *size, const char *name,
+			    unsigned int *size,
 			    struct xt_table_info *newinfo, unsigned char *base)
 {
 	struct xt_entry_target *t;
 	struct xt_target *target;
 	struct ipt_entry *de;
 	unsigned int origsize;
-	int ret, h;
+	int h;
 	struct xt_entry_match *ematch;
 
-	ret = 0;
 	origsize = *size;
 	de = (struct ipt_entry *)*dstptr;
 	memcpy(de, e, sizeof(struct ipt_entry));
@@ -1582,201 +1385,101 @@
 	*dstptr += sizeof(struct ipt_entry);
 	*size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
 
-	xt_ematch_foreach(ematch, e) {
-		ret = xt_compat_match_from_user(ematch, dstptr, size);
-		if (ret != 0)
-			return ret;
-	}
+	xt_ematch_foreach(ematch, e)
+		xt_compat_match_from_user(ematch, dstptr, size);
+
 	de->target_offset = e->target_offset - (origsize - *size);
 	t = compat_ipt_get_target(e);
 	target = t->u.kernel.target;
 	xt_compat_target_from_user(t, dstptr, size);
 
 	de->next_offset = e->next_offset - (origsize - *size);
+
 	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
 		if ((unsigned char *)de - base < newinfo->hook_entry[h])
 			newinfo->hook_entry[h] -= origsize - *size;
 		if ((unsigned char *)de - base < newinfo->underflow[h])
 			newinfo->underflow[h] -= origsize - *size;
 	}
-	return ret;
-}
-
-static int
-compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
-{
-	struct xt_entry_match *ematch;
-	struct xt_mtchk_param mtpar;
-	unsigned int j;
-	int ret = 0;
-
-	e->counters.pcnt = xt_percpu_counter_alloc();
-	if (IS_ERR_VALUE(e->counters.pcnt))
-		return -ENOMEM;
-
-	j = 0;
-	mtpar.net	= net;
-	mtpar.table     = name;
-	mtpar.entryinfo = &e->ip;
-	mtpar.hook_mask = e->comefrom;
-	mtpar.family    = NFPROTO_IPV4;
-	xt_ematch_foreach(ematch, e) {
-		ret = check_match(ematch, &mtpar);
-		if (ret != 0)
-			goto cleanup_matches;
-		++j;
-	}
-
-	ret = check_target(e, net, name);
-	if (ret)
-		goto cleanup_matches;
-	return 0;
-
- cleanup_matches:
-	xt_ematch_foreach(ematch, e) {
-		if (j-- == 0)
-			break;
-		cleanup_match(ematch, net);
-	}
-
-	xt_percpu_counter_free(e->counters.pcnt);
-
-	return ret;
 }
 
 static int
 translate_compat_table(struct net *net,
-		       const char *name,
-		       unsigned int valid_hooks,
 		       struct xt_table_info **pinfo,
 		       void **pentry0,
-		       unsigned int total_size,
-		       unsigned int number,
-		       unsigned int *hook_entries,
-		       unsigned int *underflows)
+		       const struct compat_ipt_replace *compatr)
 {
 	unsigned int i, j;
 	struct xt_table_info *newinfo, *info;
 	void *pos, *entry0, *entry1;
 	struct compat_ipt_entry *iter0;
-	struct ipt_entry *iter1;
+	struct ipt_replace repl;
 	unsigned int size;
 	int ret;
 
 	info = *pinfo;
 	entry0 = *pentry0;
-	size = total_size;
-	info->number = number;
+	size = compatr->size;
+	info->number = compatr->num_entries;
 
-	/* Init all hooks to impossible value. */
-	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
-		info->hook_entry[i] = 0xFFFFFFFF;
-		info->underflow[i] = 0xFFFFFFFF;
-	}
-
-	duprintf("translate_compat_table: size %u\n", info->size);
 	j = 0;
 	xt_compat_lock(AF_INET);
-	xt_compat_init_offsets(AF_INET, number);
+	xt_compat_init_offsets(AF_INET, compatr->num_entries);
 	/* Walk through entries, checking offsets. */
-	xt_entry_foreach(iter0, entry0, total_size) {
+	xt_entry_foreach(iter0, entry0, compatr->size) {
 		ret = check_compat_entry_size_and_hooks(iter0, info, &size,
 							entry0,
-							entry0 + total_size,
-							hook_entries,
-							underflows,
-							name);
+							entry0 + compatr->size);
 		if (ret != 0)
 			goto out_unlock;
 		++j;
 	}
 
 	ret = -EINVAL;
-	if (j != number) {
-		duprintf("translate_compat_table: %u not %u entries\n",
-			 j, number);
+	if (j != compatr->num_entries)
 		goto out_unlock;
-	}
-
-	/* Check hooks all assigned */
-	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
-		/* Only hooks which are valid */
-		if (!(valid_hooks & (1 << i)))
-			continue;
-		if (info->hook_entry[i] == 0xFFFFFFFF) {
-			duprintf("Invalid hook entry %u %u\n",
-				 i, hook_entries[i]);
-			goto out_unlock;
-		}
-		if (info->underflow[i] == 0xFFFFFFFF) {
-			duprintf("Invalid underflow %u %u\n",
-				 i, underflows[i]);
-			goto out_unlock;
-		}
-	}
 
 	ret = -ENOMEM;
 	newinfo = xt_alloc_table_info(size);
 	if (!newinfo)
 		goto out_unlock;
 
-	newinfo->number = number;
+	newinfo->number = compatr->num_entries;
 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
-		newinfo->hook_entry[i] = info->hook_entry[i];
-		newinfo->underflow[i] = info->underflow[i];
+		newinfo->hook_entry[i] = compatr->hook_entry[i];
+		newinfo->underflow[i] = compatr->underflow[i];
 	}
 	entry1 = newinfo->entries;
 	pos = entry1;
-	size = total_size;
-	xt_entry_foreach(iter0, entry0, total_size) {
-		ret = compat_copy_entry_from_user(iter0, &pos, &size,
-						  name, newinfo, entry1);
-		if (ret != 0)
-			break;
-	}
+	size = compatr->size;
+	xt_entry_foreach(iter0, entry0, compatr->size)
+		compat_copy_entry_from_user(iter0, &pos, &size,
+					    newinfo, entry1);
+
+	/* all module references in entry0 are now gone.
+	 * entry1/newinfo contains a 64bit ruleset that looks exactly as
+	 * generated by 64bit userspace.
+	 *
+	 * Call standard translate_table() to validate all hook_entrys,
+	 * underflows, check for loops, etc.
+	 */
 	xt_compat_flush_offsets(AF_INET);
 	xt_compat_unlock(AF_INET);
+
+	memcpy(&repl, compatr, sizeof(*compatr));
+
+	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+		repl.hook_entry[i] = newinfo->hook_entry[i];
+		repl.underflow[i] = newinfo->underflow[i];
+	}
+
+	repl.num_counters = 0;
+	repl.counters = NULL;
+	repl.size = newinfo->size;
+	ret = translate_table(net, newinfo, entry1, &repl);
 	if (ret)
 		goto free_newinfo;
 
-	ret = -ELOOP;
-	if (!mark_source_chains(newinfo, valid_hooks, entry1))
-		goto free_newinfo;
-
-	i = 0;
-	xt_entry_foreach(iter1, entry1, newinfo->size) {
-		ret = compat_check_entry(iter1, net, name);
-		if (ret != 0)
-			break;
-		++i;
-		if (strcmp(ipt_get_target(iter1)->u.user.name,
-		    XT_ERROR_TARGET) == 0)
-			++newinfo->stacksize;
-	}
-	if (ret) {
-		/*
-		 * The first i matches need cleanup_entry (calls ->destroy)
-		 * because they had called ->check already. The other j-i
-		 * entries need only release.
-		 */
-		int skip = i;
-		j -= i;
-		xt_entry_foreach(iter0, entry0, newinfo->size) {
-			if (skip-- > 0)
-				continue;
-			if (j-- == 0)
-				break;
-			compat_release_entry(iter0);
-		}
-		xt_entry_foreach(iter1, entry1, newinfo->size) {
-			if (i-- == 0)
-				break;
-			cleanup_entry(iter1, net);
-		}
-		xt_free_table_info(newinfo);
-		return ret;
-	}
-
 	*pinfo = newinfo;
 	*pentry0 = entry1;
 	xt_free_table_info(info);
@@ -1784,17 +1487,16 @@
 
 free_newinfo:
 	xt_free_table_info(newinfo);
-out:
-	xt_entry_foreach(iter0, entry0, total_size) {
+	return ret;
+out_unlock:
+	xt_compat_flush_offsets(AF_INET);
+	xt_compat_unlock(AF_INET);
+	xt_entry_foreach(iter0, entry0, compatr->size) {
 		if (j-- == 0)
 			break;
 		compat_release_entry(iter0);
 	}
 	return ret;
-out_unlock:
-	xt_compat_flush_offsets(AF_INET);
-	xt_compat_unlock(AF_INET);
-	goto out;
 }
 
 static int
@@ -1810,8 +1512,6 @@
 		return -EFAULT;
 
 	/* overflow check */
-	if (tmp.size >= INT_MAX / num_possible_cpus())
-		return -ENOMEM;
 	if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
 		return -ENOMEM;
 	if (tmp.num_counters == 0)
@@ -1830,15 +1530,10 @@
 		goto free_newinfo;
 	}
 
-	ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
-				     &newinfo, &loc_cpu_entry, tmp.size,
-				     tmp.num_entries, tmp.hook_entry,
-				     tmp.underflow);
+	ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
 	if (ret != 0)
 		goto free_newinfo;
 
-	duprintf("compat_do_replace: Translated table\n");
-
 	ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
 			   tmp.num_counters, compat_ptr(tmp.counters));
 	if (ret)
@@ -1872,7 +1567,6 @@
 		break;
 
 	default:
-		duprintf("do_ipt_set_ctl:  unknown request %i\n", cmd);
 		ret = -EINVAL;
 	}
 
@@ -1922,35 +1616,29 @@
 	struct compat_ipt_get_entries get;
 	struct xt_table *t;
 
-	if (*len < sizeof(get)) {
-		duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
+	if (*len < sizeof(get))
 		return -EINVAL;
-	}
 
 	if (copy_from_user(&get, uptr, sizeof(get)) != 0)
 		return -EFAULT;
 
-	if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
-		duprintf("compat_get_entries: %u != %zu\n",
-			 *len, sizeof(get) + get.size);
+	if (*len != sizeof(struct compat_ipt_get_entries) + get.size)
 		return -EINVAL;
-	}
+
+	get.name[sizeof(get.name) - 1] = '\0';
 
 	xt_compat_lock(AF_INET);
 	t = xt_find_table_lock(net, AF_INET, get.name);
 	if (!IS_ERR_OR_NULL(t)) {
 		const struct xt_table_info *private = t->private;
 		struct xt_table_info info;
-		duprintf("t->private->number = %u\n", private->number);
 		ret = compat_table_info(private, &info);
-		if (!ret && get.size == info.size) {
+		if (!ret && get.size == info.size)
 			ret = compat_copy_entries_to_user(private->size,
 							  t, uptr->entrytable);
-		} else if (!ret) {
-			duprintf("compat_get_entries: I've got %u not %u!\n",
-				 private->size, get.size);
+		else if (!ret)
 			ret = -EAGAIN;
-		}
+
 		xt_compat_flush_offsets(AF_INET);
 		module_put(t->me);
 		xt_table_unlock(t);
@@ -2003,7 +1691,6 @@
 		break;
 
 	default:
-		duprintf("do_ipt_set_ctl:  unknown request %i\n", cmd);
 		ret = -EINVAL;
 	}
 
@@ -2055,7 +1742,6 @@
 	}
 
 	default:
-		duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
 		ret = -EINVAL;
 	}
 
@@ -2157,7 +1843,6 @@
 		/* We've been asked to examine this packet, and we
 		 * can't.  Hence, no choice but to drop.
 		 */
-		duprintf("Dropping evil ICMP tinygram.\n");
 		par->hotdrop = true;
 		return false;
 	}
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index 7b8fbb35..db5b875 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -18,10 +18,10 @@
 #include <net/netfilter/nf_conntrack_synproxy.h>
 
 static struct iphdr *
-synproxy_build_ip(struct sk_buff *skb, __be32 saddr, __be32 daddr)
+synproxy_build_ip(struct net *net, struct sk_buff *skb, __be32 saddr,
+		  __be32 daddr)
 {
 	struct iphdr *iph;
-	struct net *net = sock_net(skb->sk);
 
 	skb_reset_network_header(skb);
 	iph = (struct iphdr *)skb_put(skb, sizeof(*iph));
@@ -40,14 +40,12 @@
 }
 
 static void
-synproxy_send_tcp(const struct synproxy_net *snet,
+synproxy_send_tcp(struct net *net,
 		  const struct sk_buff *skb, struct sk_buff *nskb,
 		  struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
 		  struct iphdr *niph, struct tcphdr *nth,
 		  unsigned int tcp_hdr_size)
 {
-	struct net *net = nf_ct_net(snet->tmpl);
-
 	nth->check = ~tcp_v4_check(tcp_hdr_size, niph->saddr, niph->daddr, 0);
 	nskb->ip_summed   = CHECKSUM_PARTIAL;
 	nskb->csum_start  = (unsigned char *)nth - nskb->head;
@@ -72,7 +70,7 @@
 }
 
 static void
-synproxy_send_client_synack(const struct synproxy_net *snet,
+synproxy_send_client_synack(struct net *net,
 			    const struct sk_buff *skb, const struct tcphdr *th,
 			    const struct synproxy_options *opts)
 {
@@ -91,7 +89,7 @@
 		return;
 	skb_reserve(nskb, MAX_TCP_HEADER);
 
-	niph = synproxy_build_ip(nskb, iph->daddr, iph->saddr);
+	niph = synproxy_build_ip(net, nskb, iph->daddr, iph->saddr);
 
 	skb_reset_transport_header(nskb);
 	nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -109,15 +107,16 @@
 
 	synproxy_build_options(nth, opts);
 
-	synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+	synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
 			  niph, nth, tcp_hdr_size);
 }
 
 static void
-synproxy_send_server_syn(const struct synproxy_net *snet,
+synproxy_send_server_syn(struct net *net,
 			 const struct sk_buff *skb, const struct tcphdr *th,
 			 const struct synproxy_options *opts, u32 recv_seq)
 {
+	struct synproxy_net *snet = synproxy_pernet(net);
 	struct sk_buff *nskb;
 	struct iphdr *iph, *niph;
 	struct tcphdr *nth;
@@ -132,7 +131,7 @@
 		return;
 	skb_reserve(nskb, MAX_TCP_HEADER);
 
-	niph = synproxy_build_ip(nskb, iph->saddr, iph->daddr);
+	niph = synproxy_build_ip(net, nskb, iph->saddr, iph->daddr);
 
 	skb_reset_transport_header(nskb);
 	nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -153,12 +152,12 @@
 
 	synproxy_build_options(nth, opts);
 
-	synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
+	synproxy_send_tcp(net, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
 			  niph, nth, tcp_hdr_size);
 }
 
 static void
-synproxy_send_server_ack(const struct synproxy_net *snet,
+synproxy_send_server_ack(struct net *net,
 			 const struct ip_ct_tcp *state,
 			 const struct sk_buff *skb, const struct tcphdr *th,
 			 const struct synproxy_options *opts)
@@ -177,7 +176,7 @@
 		return;
 	skb_reserve(nskb, MAX_TCP_HEADER);
 
-	niph = synproxy_build_ip(nskb, iph->daddr, iph->saddr);
+	niph = synproxy_build_ip(net, nskb, iph->daddr, iph->saddr);
 
 	skb_reset_transport_header(nskb);
 	nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -193,11 +192,11 @@
 
 	synproxy_build_options(nth, opts);
 
-	synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+	synproxy_send_tcp(net, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
 }
 
 static void
-synproxy_send_client_ack(const struct synproxy_net *snet,
+synproxy_send_client_ack(struct net *net,
 			 const struct sk_buff *skb, const struct tcphdr *th,
 			 const struct synproxy_options *opts)
 {
@@ -215,7 +214,7 @@
 		return;
 	skb_reserve(nskb, MAX_TCP_HEADER);
 
-	niph = synproxy_build_ip(nskb, iph->saddr, iph->daddr);
+	niph = synproxy_build_ip(net, nskb, iph->saddr, iph->daddr);
 
 	skb_reset_transport_header(nskb);
 	nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -231,15 +230,16 @@
 
 	synproxy_build_options(nth, opts);
 
-	synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+	synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
 			  niph, nth, tcp_hdr_size);
 }
 
 static bool
-synproxy_recv_client_ack(const struct synproxy_net *snet,
+synproxy_recv_client_ack(struct net *net,
 			 const struct sk_buff *skb, const struct tcphdr *th,
 			 struct synproxy_options *opts, u32 recv_seq)
 {
+	struct synproxy_net *snet = synproxy_pernet(net);
 	int mss;
 
 	mss = __cookie_v4_check(ip_hdr(skb), th, ntohl(th->ack_seq) - 1);
@@ -255,7 +255,7 @@
 	if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP)
 		synproxy_check_timestamp_cookie(opts);
 
-	synproxy_send_server_syn(snet, skb, th, opts, recv_seq);
+	synproxy_send_server_syn(net, skb, th, opts, recv_seq);
 	return true;
 }
 
@@ -263,7 +263,8 @@
 synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
 {
 	const struct xt_synproxy_info *info = par->targinfo;
-	struct synproxy_net *snet = synproxy_pernet(par->net);
+	struct net *net = par->net;
+	struct synproxy_net *snet = synproxy_pernet(net);
 	struct synproxy_options opts = {};
 	struct tcphdr *th, _th;
 
@@ -292,12 +293,12 @@
 					  XT_SYNPROXY_OPT_SACK_PERM |
 					  XT_SYNPROXY_OPT_ECN);
 
-		synproxy_send_client_synack(snet, skb, th, &opts);
+		synproxy_send_client_synack(net, skb, th, &opts);
 		return NF_DROP;
 
 	} else if (th->ack && !(th->fin || th->rst || th->syn)) {
 		/* ACK from client */
-		synproxy_recv_client_ack(snet, skb, th, &opts, ntohl(th->seq));
+		synproxy_recv_client_ack(net, skb, th, &opts, ntohl(th->seq));
 		return NF_DROP;
 	}
 
@@ -308,7 +309,8 @@
 				       struct sk_buff *skb,
 				       const struct nf_hook_state *nhs)
 {
-	struct synproxy_net *snet = synproxy_pernet(nhs->net);
+	struct net *net = nhs->net;
+	struct synproxy_net *snet = synproxy_pernet(net);
 	enum ip_conntrack_info ctinfo;
 	struct nf_conn *ct;
 	struct nf_conn_synproxy *synproxy;
@@ -365,7 +367,7 @@
 			 * therefore we need to add 1 to make the SYN sequence
 			 * number match the one of first SYN.
 			 */
-			if (synproxy_recv_client_ack(snet, skb, th, &opts,
+			if (synproxy_recv_client_ack(net, skb, th, &opts,
 						     ntohl(th->seq) + 1))
 				this_cpu_inc(snet->stats->cookie_retrans);
 
@@ -391,12 +393,12 @@
 				  XT_SYNPROXY_OPT_SACK_PERM);
 
 		swap(opts.tsval, opts.tsecr);
-		synproxy_send_server_ack(snet, state, skb, th, &opts);
+		synproxy_send_server_ack(net, state, skb, th, &opts);
 
 		nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq));
 
 		swap(opts.tsval, opts.tsecr);
-		synproxy_send_client_ack(snet, skb, th, &opts);
+		synproxy_send_client_ack(net, skb, th, &opts);
 
 		consume_skb(skb);
 		return NF_STOLEN;
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index e3c46e8..ae1a71a 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -360,7 +360,7 @@
 
 	in->ctl_table[0].data = &nf_conntrack_max;
 	in->ctl_table[1].data = &net->ct.count;
-	in->ctl_table[2].data = &net->ct.htable_size;
+	in->ctl_table[2].data = &nf_conntrack_htable_size;
 	in->ctl_table[3].data = &net->ct.sysctl_checksum;
 	in->ctl_table[4].data = &net->ct.sysctl_log_invalid;
 #endif
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index f0dfe92..c6f3c40 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -31,15 +31,14 @@
 
 static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
 {
-	struct net *net = seq_file_net(seq);
 	struct ct_iter_state *st = seq->private;
 	struct hlist_nulls_node *n;
 
 	for (st->bucket = 0;
-	     st->bucket < net->ct.htable_size;
+	     st->bucket < nf_conntrack_htable_size;
 	     st->bucket++) {
 		n = rcu_dereference(
-			hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
+			hlist_nulls_first_rcu(&nf_conntrack_hash[st->bucket]));
 		if (!is_a_nulls(n))
 			return n;
 	}
@@ -49,17 +48,16 @@
 static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
 				      struct hlist_nulls_node *head)
 {
-	struct net *net = seq_file_net(seq);
 	struct ct_iter_state *st = seq->private;
 
 	head = rcu_dereference(hlist_nulls_next_rcu(head));
 	while (is_a_nulls(head)) {
 		if (likely(get_nulls_value(head) == st->bucket)) {
-			if (++st->bucket >= net->ct.htable_size)
+			if (++st->bucket >= nf_conntrack_htable_size)
 				return NULL;
 		}
 		head = rcu_dereference(
-			hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
+			hlist_nulls_first_rcu(&nf_conntrack_hash[st->bucket]));
 	}
 	return head;
 }
@@ -114,6 +112,23 @@
 }
 #endif
 
+static bool ct_seq_should_skip(const struct nf_conn *ct,
+			       const struct net *net,
+			       const struct nf_conntrack_tuple_hash *hash)
+{
+	/* we only want to print DIR_ORIGINAL */
+	if (NF_CT_DIRECTION(hash))
+		return true;
+
+	if (nf_ct_l3num(ct) != AF_INET)
+		return true;
+
+	if (!net_eq(nf_ct_net(ct), net))
+		return true;
+
+	return false;
+}
+
 static int ct_seq_show(struct seq_file *s, void *v)
 {
 	struct nf_conntrack_tuple_hash *hash = v;
@@ -123,14 +138,15 @@
 	int ret = 0;
 
 	NF_CT_ASSERT(ct);
+	if (ct_seq_should_skip(ct, seq_file_net(s), hash))
+		return 0;
+
 	if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
 		return 0;
 
-
-	/* we only want to print DIR_ORIGINAL */
-	if (NF_CT_DIRECTION(hash))
-		goto release;
-	if (nf_ct_l3num(ct) != AF_INET)
+	/* check if we raced w. object reuse */
+	if (!nf_ct_is_confirmed(ct) ||
+	    ct_seq_should_skip(ct, seq_file_net(s), hash))
 		goto release;
 
 	l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
@@ -220,13 +236,12 @@
 
 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
 {
-	struct net *net = seq_file_net(seq);
 	struct ct_expect_iter_state *st = seq->private;
 	struct hlist_node *n;
 
 	for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
 		n = rcu_dereference(
-			hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
+			hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
 		if (n)
 			return n;
 	}
@@ -236,7 +251,6 @@
 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
 					     struct hlist_node *head)
 {
-	struct net *net = seq_file_net(seq);
 	struct ct_expect_iter_state *st = seq->private;
 
 	head = rcu_dereference(hlist_next_rcu(head));
@@ -244,7 +258,7 @@
 		if (++st->bucket >= nf_ct_expect_hsize)
 			return NULL;
 		head = rcu_dereference(
-			hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
+			hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
 	}
 	return head;
 }
@@ -285,6 +299,9 @@
 
 	exp = hlist_entry(n, struct nf_conntrack_expect, hnode);
 
+	if (!net_eq(nf_ct_net(exp->master), seq_file_net(s)))
+		return 0;
+
 	if (exp->tuple.src.l3num != AF_INET)
 		return 0;
 
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index cf9700b..66ddcb6 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -737,6 +737,7 @@
 		/* no remote port */
 	}
 
+	ipc.sockc.tsflags = sk->sk_tsflags;
 	ipc.addr = inet->inet_saddr;
 	ipc.opt = NULL;
 	ipc.oif = sk->sk_bound_dev_if;
@@ -744,10 +745,8 @@
 	ipc.ttl = 0;
 	ipc.tos = -1;
 
-	sock_tx_timestamp(sk, &ipc.tx_flags);
-
 	if (msg->msg_controllen) {
-		err = ip_cmsg_send(sock_net(sk), msg, &ipc, false);
+		err = ip_cmsg_send(sk, msg, &ipc, false);
 		if (unlikely(err)) {
 			kfree(ipc.opt);
 			return err;
@@ -768,6 +767,8 @@
 		rcu_read_unlock();
 	}
 
+	sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags);
+
 	saddr = ipc.addr;
 	ipc.addr = faddr = daddr;
 
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 8d22de7..438f50c 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -339,8 +339,8 @@
 
 static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
 			   struct msghdr *msg, size_t length,
-			   struct rtable **rtp,
-			   unsigned int flags)
+			   struct rtable **rtp, unsigned int flags,
+			   const struct sockcm_cookie *sockc)
 {
 	struct inet_sock *inet = inet_sk(sk);
 	struct net *net = sock_net(sk);
@@ -379,7 +379,7 @@
 
 	skb->ip_summed = CHECKSUM_NONE;
 
-	sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
+	sock_tx_timestamp(sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
 
 	skb->transport_header = skb->network_header;
 	err = -EFAULT;
@@ -540,6 +540,7 @@
 		daddr = inet->inet_daddr;
 	}
 
+	ipc.sockc.tsflags = sk->sk_tsflags;
 	ipc.addr = inet->inet_saddr;
 	ipc.opt = NULL;
 	ipc.tx_flags = 0;
@@ -548,7 +549,7 @@
 	ipc.oif = sk->sk_bound_dev_if;
 
 	if (msg->msg_controllen) {
-		err = ip_cmsg_send(net, msg, &ipc, false);
+		err = ip_cmsg_send(sk, msg, &ipc, false);
 		if (unlikely(err)) {
 			kfree(ipc.opt);
 			goto out;
@@ -638,10 +639,10 @@
 
 	if (inet->hdrincl)
 		err = raw_send_hdrinc(sk, &fl4, msg, len,
-				      &rt, msg->msg_flags);
+				      &rt, msg->msg_flags, &ipc.sockc);
 
 	 else {
-		sock_tx_timestamp(sk, &ipc.tx_flags);
+		sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags);
 
 		if (!ipc.addr)
 			ipc.addr = fl4.daddr;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 02c6229..a1f2830 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -915,11 +915,11 @@
 	if (!IN_DEV_FORWARD(in_dev)) {
 		switch (rt->dst.error) {
 		case EHOSTUNREACH:
-			IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS);
+			__IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
 			break;
 
 		case ENETUNREACH:
-			IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
+			__IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
 			break;
 		}
 		goto out;
@@ -934,7 +934,7 @@
 		break;
 	case ENETUNREACH:
 		code = ICMP_NET_UNREACH;
-		IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
+		__IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
 		break;
 	case EACCES:
 		code = ICMP_PKT_FILTERED;
@@ -1438,9 +1438,9 @@
 #endif
 }
 
-static struct rtable *rt_dst_alloc(struct net_device *dev,
-				   unsigned int flags, u16 type,
-				   bool nopolicy, bool noxfrm, bool will_cache)
+struct rtable *rt_dst_alloc(struct net_device *dev,
+			    unsigned int flags, u16 type,
+			    bool nopolicy, bool noxfrm, bool will_cache)
 {
 	struct rtable *rt;
 
@@ -1468,6 +1468,7 @@
 
 	return rt;
 }
+EXPORT_SYMBOL(rt_dst_alloc);
 
 /* called in rcu_read_lock() section */
 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
@@ -2045,6 +2046,18 @@
 		 */
 		if (fi && res->prefixlen < 4)
 			fi = NULL;
+	} else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
+		   (orig_oif != dev_out->ifindex)) {
+		/* For local routes that require a particular output interface
+		 * we do not want to cache the result.  Caching the result
+		 * causes incorrect behaviour when there are multiple source
+		 * addresses on the interface, the end result being that if the
+		 * intended recipient is waiting on that interface for the
+		 * packet he won't receive it because it will be delivered on
+		 * the loopback interface and the IP_PKTINFO ipi_ifindex will
+		 * be set to the loopback interface as well.
+		 */
+		fi = NULL;
 	}
 
 	fnhe = NULL;
@@ -2133,6 +2146,7 @@
 	unsigned int flags = 0;
 	struct fib_result res;
 	struct rtable *rth;
+	int master_idx;
 	int orig_oif;
 	int err = -ENETUNREACH;
 
@@ -2142,6 +2156,9 @@
 
 	orig_oif = fl4->flowi4_oif;
 
+	master_idx = l3mdev_master_ifindex_by_index(net, fl4->flowi4_oif);
+	if (master_idx)
+		fl4->flowi4_oif = master_idx;
 	fl4->flowi4_iif = LOOPBACK_IFINDEX;
 	fl4->flowi4_tos = tos & IPTOS_RT_MASK;
 	fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 4c04f09..e3c4043 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -312,11 +312,11 @@
 
 	mss = __cookie_v4_check(ip_hdr(skb), th, cookie);
 	if (mss == 0) {
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
+		__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
 		goto out;
 	}
 
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
+	__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
 
 	/* check for timestamp cookie support */
 	memset(&tcp_opt, 0, sizeof(tcp_opt));
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 1e1fe60..bb04195 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -960,6 +960,17 @@
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
 	},
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+	{
+		.procname	= "fib_multipath_use_neigh",
+		.data		= &init_net.ipv4.sysctl_fib_multipath_use_neigh,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &one,
+	},
+#endif
 	{ }
 };
 
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 08b8b96..5c7ed14 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -428,13 +428,16 @@
 }
 EXPORT_SYMBOL(tcp_init_sock);
 
-static void tcp_tx_timestamp(struct sock *sk, struct sk_buff *skb)
+static void tcp_tx_timestamp(struct sock *sk, u16 tsflags, struct sk_buff *skb)
 {
-	if (sk->sk_tsflags) {
+	if (tsflags) {
 		struct skb_shared_info *shinfo = skb_shinfo(skb);
+		struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
 
-		sock_tx_timestamp(sk, &shinfo->tx_flags);
-		if (shinfo->tx_flags & SKBTX_ANY_TSTAMP)
+		sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags);
+		if (tsflags & SOF_TIMESTAMPING_TX_ACK)
+			tcb->txstamp_ack = 1;
+		if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
 			shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1;
 	}
 }
@@ -906,7 +909,8 @@
 		int copy, i;
 		bool can_coalesce;
 
-		if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
+		if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0 ||
+		    !tcp_skb_can_collapse_to(skb)) {
 new_segment:
 			if (!sk_stream_memory_free(sk))
 				goto wait_for_sndbuf;
@@ -957,7 +961,7 @@
 		offset += copy;
 		size -= copy;
 		if (!size) {
-			tcp_tx_timestamp(sk, skb);
+			tcp_tx_timestamp(sk, sk->sk_tsflags, skb);
 			goto out;
 		}
 
@@ -1077,8 +1081,10 @@
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct sk_buff *skb;
+	struct sockcm_cookie sockc;
 	int flags, err, copied = 0;
 	int mss_now = 0, size_goal, copied_syn = 0;
+	bool process_backlog = false;
 	bool sg;
 	long timeo;
 
@@ -1119,14 +1125,24 @@
 		/* 'common' sending to sendq */
 	}
 
+	sockc.tsflags = sk->sk_tsflags;
+	if (msg->msg_controllen) {
+		err = sock_cmsg_send(sk, msg, &sockc);
+		if (unlikely(err)) {
+			err = -EINVAL;
+			goto out_err;
+		}
+	}
+
 	/* This should be in poll */
 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
-	mss_now = tcp_send_mss(sk, &size_goal, flags);
-
 	/* Ok commence sending. */
 	copied = 0;
 
+restart:
+	mss_now = tcp_send_mss(sk, &size_goal, flags);
+
 	err = -EPIPE;
 	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
 		goto out_err;
@@ -1144,7 +1160,7 @@
 			copy = max - skb->len;
 		}
 
-		if (copy <= 0) {
+		if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) {
 new_segment:
 			/* Allocate new segment. If the interface is SG,
 			 * allocate skb fitting to single page.
@@ -1152,6 +1168,10 @@
 			if (!sk_stream_memory_free(sk))
 				goto wait_for_sndbuf;
 
+			if (process_backlog && sk_flush_backlog(sk)) {
+				process_backlog = false;
+				goto restart;
+			}
 			skb = sk_stream_alloc_skb(sk,
 						  select_size(sk, sg),
 						  sk->sk_allocation,
@@ -1159,6 +1179,7 @@
 			if (!skb)
 				goto wait_for_memory;
 
+			process_backlog = true;
 			/*
 			 * Check whether we can use HW checksum.
 			 */
@@ -1237,7 +1258,9 @@
 
 		copied += copy;
 		if (!msg_data_left(msg)) {
-			tcp_tx_timestamp(sk, skb);
+			tcp_tx_timestamp(sk, sockc.tsflags, skb);
+			if (unlikely(flags & MSG_EOR))
+				TCP_SKB_CB(skb)->eor = 1;
 			goto out;
 		}
 
@@ -1431,14 +1454,10 @@
 	struct sk_buff *skb;
 	struct tcp_sock *tp = tcp_sk(sk);
 
-	NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
+	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
 
-	/* RX process wants to run with disabled BHs, though it is not
-	 * necessary */
-	local_bh_disable();
 	while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
 		sk_backlog_rcv(sk, skb);
-	local_bh_enable();
 
 	/* Clear memory counter. */
 	tp->ucopy.memory = 0;
@@ -1765,7 +1784,7 @@
 
 			chunk = len - tp->ucopy.len;
 			if (chunk != 0) {
-				NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
+				NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
 				len -= chunk;
 				copied += chunk;
 			}
@@ -1777,7 +1796,7 @@
 
 				chunk = len - tp->ucopy.len;
 				if (chunk != 0) {
-					NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
+					NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
 					len -= chunk;
 					copied += chunk;
 				}
@@ -1863,7 +1882,7 @@
 			tcp_prequeue_process(sk);
 
 			if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
-				NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
+				NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
 				len -= chunk;
 				copied += chunk;
 			}
@@ -2053,13 +2072,13 @@
 		sk->sk_prot->disconnect(sk, 0);
 	} else if (data_was_unread) {
 		/* Unread data was tossed, zap the connection. */
-		NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
 		tcp_set_state(sk, TCP_CLOSE);
 		tcp_send_active_reset(sk, sk->sk_allocation);
 	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
 		/* Check zero linger _after_ checking for unread data. */
 		sk->sk_prot->disconnect(sk, 0);
-		NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
 	} else if (tcp_close_state(sk)) {
 		/* We FIN if the application ate all the data before
 		 * zapping the connection.
@@ -2136,7 +2155,7 @@
 		if (tp->linger2 < 0) {
 			tcp_set_state(sk, TCP_CLOSE);
 			tcp_send_active_reset(sk, GFP_ATOMIC);
-			NET_INC_STATS_BH(sock_net(sk),
+			__NET_INC_STATS(sock_net(sk),
 					LINUX_MIB_TCPABORTONLINGER);
 		} else {
 			const int tmo = tcp_fin_time(sk);
@@ -2155,7 +2174,7 @@
 		if (tcp_check_oom(sk, 0)) {
 			tcp_set_state(sk, TCP_CLOSE);
 			tcp_send_active_reset(sk, GFP_ATOMIC);
-			NET_INC_STATS_BH(sock_net(sk),
+			__NET_INC_STATS(sock_net(sk),
 					LINUX_MIB_TCPABORTONMEMORY);
 		}
 	}
@@ -3079,7 +3098,7 @@
 	struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
 
 	if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
-		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
+		TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
 
 	tcp_set_state(sk, TCP_CLOSE);
 	tcp_clear_xmit_timers(sk);
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index fd1405d..36087bc 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -197,15 +197,15 @@
 /* Track delayed acknowledgment ratio using sliding window
  * ratio = (15*ratio + sample) / 16
  */
-static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt)
+static void bictcp_acked(struct sock *sk, const struct ack_sample *sample)
 {
 	const struct inet_connection_sock *icsk = inet_csk(sk);
 
 	if (icsk->icsk_ca_state == TCP_CA_Open) {
 		struct bictcp *ca = inet_csk_ca(sk);
 
-		cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT;
-		ca->delayed_ack += cnt;
+		ca->delayed_ack += sample->pkts_acked -
+			(ca->delayed_ack >> ACK_RATIO_SHIFT);
 	}
 }
 
diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c
index 167b6a3..03725b2 100644
--- a/net/ipv4/tcp_cdg.c
+++ b/net/ipv4/tcp_cdg.c
@@ -155,11 +155,11 @@
 
 			ca->last_ack = now_us;
 			if (after(now_us, ca->round_start + base_owd)) {
-				NET_INC_STATS_BH(sock_net(sk),
-						 LINUX_MIB_TCPHYSTARTTRAINDETECT);
-				NET_ADD_STATS_BH(sock_net(sk),
-						 LINUX_MIB_TCPHYSTARTTRAINCWND,
-						 tp->snd_cwnd);
+				NET_INC_STATS(sock_net(sk),
+					      LINUX_MIB_TCPHYSTARTTRAINDETECT);
+				NET_ADD_STATS(sock_net(sk),
+					      LINUX_MIB_TCPHYSTARTTRAINCWND,
+					      tp->snd_cwnd);
 				tp->snd_ssthresh = tp->snd_cwnd;
 				return;
 			}
@@ -174,11 +174,11 @@
 					 125U);
 
 			if (ca->rtt.min > thresh) {
-				NET_INC_STATS_BH(sock_net(sk),
-						 LINUX_MIB_TCPHYSTARTDELAYDETECT);
-				NET_ADD_STATS_BH(sock_net(sk),
-						 LINUX_MIB_TCPHYSTARTDELAYCWND,
-						 tp->snd_cwnd);
+				NET_INC_STATS(sock_net(sk),
+					      LINUX_MIB_TCPHYSTARTDELAYDETECT);
+				NET_ADD_STATS(sock_net(sk),
+					      LINUX_MIB_TCPHYSTARTDELAYCWND,
+					      tp->snd_cwnd);
 				tp->snd_ssthresh = tp->snd_cwnd;
 			}
 		}
@@ -294,12 +294,12 @@
 	ca->shadow_wnd = max(ca->shadow_wnd, ca->shadow_wnd + incr);
 }
 
-static void tcp_cdg_acked(struct sock *sk, u32 num_acked, s32 rtt_us)
+static void tcp_cdg_acked(struct sock *sk, const struct ack_sample *sample)
 {
 	struct cdg *ca = inet_csk_ca(sk);
 	struct tcp_sock *tp = tcp_sk(sk);
 
-	if (rtt_us <= 0)
+	if (sample->rtt_us <= 0)
 		return;
 
 	/* A heuristic for filtering delayed ACKs, adapted from:
@@ -307,20 +307,20 @@
 	 * delay and rate based TCP mechanisms." TR 100219A. CAIA, 2010.
 	 */
 	if (tp->sacked_out == 0) {
-		if (num_acked == 1 && ca->delack) {
+		if (sample->pkts_acked == 1 && ca->delack) {
 			/* A delayed ACK is only used for the minimum if it is
 			 * provenly lower than an existing non-zero minimum.
 			 */
-			ca->rtt.min = min(ca->rtt.min, rtt_us);
+			ca->rtt.min = min(ca->rtt.min, sample->rtt_us);
 			ca->delack--;
 			return;
-		} else if (num_acked > 1 && ca->delack < 5) {
+		} else if (sample->pkts_acked > 1 && ca->delack < 5) {
 			ca->delack++;
 		}
 	}
 
-	ca->rtt.min = min_not_zero(ca->rtt.min, rtt_us);
-	ca->rtt.max = max(ca->rtt.max, rtt_us);
+	ca->rtt.min = min_not_zero(ca->rtt.min, sample->rtt_us);
+	ca->rtt.max = max(ca->rtt.max, sample->rtt_us);
 }
 
 static u32 tcp_cdg_ssthresh(struct sock *sk)
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 448c261..c99230e 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -402,11 +402,11 @@
 			ca->last_ack = now;
 			if ((s32)(now - ca->round_start) > ca->delay_min >> 4) {
 				ca->found |= HYSTART_ACK_TRAIN;
-				NET_INC_STATS_BH(sock_net(sk),
-						 LINUX_MIB_TCPHYSTARTTRAINDETECT);
-				NET_ADD_STATS_BH(sock_net(sk),
-						 LINUX_MIB_TCPHYSTARTTRAINCWND,
-						 tp->snd_cwnd);
+				NET_INC_STATS(sock_net(sk),
+					      LINUX_MIB_TCPHYSTARTTRAINDETECT);
+				NET_ADD_STATS(sock_net(sk),
+					      LINUX_MIB_TCPHYSTARTTRAINCWND,
+					      tp->snd_cwnd);
 				tp->snd_ssthresh = tp->snd_cwnd;
 			}
 		}
@@ -423,11 +423,11 @@
 			if (ca->curr_rtt > ca->delay_min +
 			    HYSTART_DELAY_THRESH(ca->delay_min >> 3)) {
 				ca->found |= HYSTART_DELAY;
-				NET_INC_STATS_BH(sock_net(sk),
-						 LINUX_MIB_TCPHYSTARTDELAYDETECT);
-				NET_ADD_STATS_BH(sock_net(sk),
-						 LINUX_MIB_TCPHYSTARTDELAYCWND,
-						 tp->snd_cwnd);
+				NET_INC_STATS(sock_net(sk),
+					      LINUX_MIB_TCPHYSTARTDELAYDETECT);
+				NET_ADD_STATS(sock_net(sk),
+					      LINUX_MIB_TCPHYSTARTDELAYCWND,
+					      tp->snd_cwnd);
 				tp->snd_ssthresh = tp->snd_cwnd;
 			}
 		}
@@ -437,21 +437,21 @@
 /* Track delayed acknowledgment ratio using sliding window
  * ratio = (15*ratio + sample) / 16
  */
-static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
+static void bictcp_acked(struct sock *sk, const struct ack_sample *sample)
 {
 	const struct tcp_sock *tp = tcp_sk(sk);
 	struct bictcp *ca = inet_csk_ca(sk);
 	u32 delay;
 
 	/* Some calls are for duplicates without timetamps */
-	if (rtt_us < 0)
+	if (sample->rtt_us < 0)
 		return;
 
 	/* Discard delay samples right after fast recovery */
 	if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
 		return;
 
-	delay = (rtt_us << 3) / USEC_PER_MSEC;
+	delay = (sample->rtt_us << 3) / USEC_PER_MSEC;
 	if (delay == 0)
 		delay = 1;
 
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index cffd8f9..54d9f9b 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -255,9 +255,9 @@
 		spin_lock(&fastopenq->lock);
 		req1 = fastopenq->rskq_rst_head;
 		if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
+			__NET_INC_STATS(sock_net(sk),
+					LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
 			spin_unlock(&fastopenq->lock);
-			NET_INC_STATS_BH(sock_net(sk),
-					 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
 			return false;
 		}
 		fastopenq->rskq_rst_head = req1->dl_next;
@@ -282,7 +282,7 @@
 	struct sock *child;
 
 	if (foc->len == 0) /* Client requests a cookie */
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
 
 	if (!((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) &&
 	      (syn_data || foc->len >= 0) &&
@@ -311,13 +311,13 @@
 		child = tcp_fastopen_create_child(sk, skb, dst, req);
 		if (child) {
 			foc->len = -1;
-			NET_INC_STATS_BH(sock_net(sk),
-					 LINUX_MIB_TCPFASTOPENPASSIVE);
+			NET_INC_STATS(sock_net(sk),
+				      LINUX_MIB_TCPFASTOPENPASSIVE);
 			return child;
 		}
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
 	} else if (foc->len > 0) /* Client presents an invalid cookie */
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
 
 	valid_foc.exp = foc->exp;
 	*foc = valid_foc;
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 82f0d9e..4a4d8e7 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -99,7 +99,7 @@
 }
 
 static void measure_achieved_throughput(struct sock *sk,
-					u32 pkts_acked, s32 rtt)
+					const struct ack_sample *sample)
 {
 	const struct inet_connection_sock *icsk = inet_csk(sk);
 	const struct tcp_sock *tp = tcp_sk(sk);
@@ -107,10 +107,10 @@
 	u32 now = tcp_time_stamp;
 
 	if (icsk->icsk_ca_state == TCP_CA_Open)
-		ca->pkts_acked = pkts_acked;
+		ca->pkts_acked = sample->pkts_acked;
 
-	if (rtt > 0)
-		measure_rtt(sk, usecs_to_jiffies(rtt));
+	if (sample->rtt_us > 0)
+		measure_rtt(sk, usecs_to_jiffies(sample->rtt_us));
 
 	if (!use_bandwidth_switch)
 		return;
@@ -122,7 +122,7 @@
 		return;
 	}
 
-	ca->packetcount += pkts_acked;
+	ca->packetcount += sample->pkts_acked;
 
 	if (ca->packetcount >= tp->snd_cwnd - (ca->alpha >> 7 ? : 1) &&
 	    now - ca->lasttime >= ca->minRTT &&
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index 2ab9bbb..c8e6d86 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -82,30 +82,31 @@
 }
 
 /* Measure RTT for each ack. */
-static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, s32 rtt)
+static void tcp_illinois_acked(struct sock *sk, const struct ack_sample *sample)
 {
 	struct illinois *ca = inet_csk_ca(sk);
+	s32 rtt_us = sample->rtt_us;
 
-	ca->acked = pkts_acked;
+	ca->acked = sample->pkts_acked;
 
 	/* dup ack, no rtt sample */
-	if (rtt < 0)
+	if (rtt_us < 0)
 		return;
 
 	/* ignore bogus values, this prevents wraparound in alpha math */
-	if (rtt > RTT_MAX)
-		rtt = RTT_MAX;
+	if (rtt_us > RTT_MAX)
+		rtt_us = RTT_MAX;
 
 	/* keep track of minimum RTT seen so far */
-	if (ca->base_rtt > rtt)
-		ca->base_rtt = rtt;
+	if (ca->base_rtt > rtt_us)
+		ca->base_rtt = rtt_us;
 
 	/* and max */
-	if (ca->max_rtt < rtt)
-		ca->max_rtt = rtt;
+	if (ca->max_rtt < rtt_us)
+		ca->max_rtt = rtt_us;
 
 	++ca->cnt_rtt;
-	ca->sum_rtt += rtt;
+	ca->sum_rtt += rtt_us;
 }
 
 /* Maximum queuing delay */
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e6e65f7..d6c8f4cd0 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -869,7 +869,7 @@
 		else
 			mib_idx = LINUX_MIB_TCPSACKREORDER;
 
-		NET_INC_STATS_BH(sock_net(sk), mib_idx);
+		NET_INC_STATS(sock_net(sk), mib_idx);
 #if FASTRETRANS_DEBUG > 1
 		pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
 			 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
@@ -1062,7 +1062,7 @@
 	if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
 		dup_sack = true;
 		tcp_dsack_seen(tp);
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
 	} else if (num_sacks > 1) {
 		u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
 		u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
@@ -1071,7 +1071,7 @@
 		    !before(start_seq_0, start_seq_1)) {
 			dup_sack = true;
 			tcp_dsack_seen(tp);
-			NET_INC_STATS_BH(sock_net(sk),
+			NET_INC_STATS(sock_net(sk),
 					LINUX_MIB_TCPDSACKOFORECV);
 		}
 	}
@@ -1289,7 +1289,7 @@
 
 	if (skb->len > 0) {
 		BUG_ON(!tcp_skb_pcount(skb));
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTED);
 		return false;
 	}
 
@@ -1303,16 +1303,18 @@
 	}
 
 	TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
+	TCP_SKB_CB(prev)->eor = TCP_SKB_CB(skb)->eor;
 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
 		TCP_SKB_CB(prev)->end_seq++;
 
 	if (skb == tcp_highest_sack(sk))
 		tcp_advance_highest_sack(sk, skb);
 
+	tcp_skb_collapse_tstamp(prev, skb);
 	tcp_unlink_write_queue(skb, sk);
 	sk_wmem_free_skb(sk, skb);
 
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED);
+	NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKMERGED);
 
 	return true;
 }
@@ -1367,6 +1369,9 @@
 	if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED)
 		goto fallback;
 
+	if (!tcp_skb_can_collapse_to(prev))
+		goto fallback;
+
 	in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
 		  !before(end_seq, TCP_SKB_CB(skb)->end_seq);
 
@@ -1468,7 +1473,7 @@
 	return skb;
 
 fallback:
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK);
+	NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK);
 	return NULL;
 }
 
@@ -1656,7 +1661,7 @@
 				mib_idx = LINUX_MIB_TCPSACKDISCARD;
 			}
 
-			NET_INC_STATS_BH(sock_net(sk), mib_idx);
+			NET_INC_STATS(sock_net(sk), mib_idx);
 			if (i == 0)
 				first_sack_index = -1;
 			continue;
@@ -1908,7 +1913,7 @@
 	skb = tcp_write_queue_head(sk);
 	is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED);
 	if (is_reneg) {
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
 		tp->sacked_out = 0;
 		tp->fackets_out = 0;
 	}
@@ -2252,16 +2257,6 @@
 	}
 }
 
-/* CWND moderation, preventing bursts due to too big ACKs
- * in dubious situations.
- */
-static inline void tcp_moderate_cwnd(struct tcp_sock *tp)
-{
-	tp->snd_cwnd = min(tp->snd_cwnd,
-			   tcp_packets_in_flight(tp) + tcp_max_burst(tp));
-	tp->snd_cwnd_stamp = tcp_time_stamp;
-}
-
 static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when)
 {
 	return tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
@@ -2404,13 +2399,12 @@
 		else
 			mib_idx = LINUX_MIB_TCPFULLUNDO;
 
-		NET_INC_STATS_BH(sock_net(sk), mib_idx);
+		NET_INC_STATS(sock_net(sk), mib_idx);
 	}
 	if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
 		/* Hold old state until something *above* high_seq
 		 * is ACKed. For Reno it is MUST to prevent false
 		 * fast retransmits (RFC2582). SACK TCP is safe. */
-		tcp_moderate_cwnd(tp);
 		if (!tcp_any_retrans_done(sk))
 			tp->retrans_stamp = 0;
 		return true;
@@ -2427,7 +2421,7 @@
 	if (tp->undo_marker && !tp->undo_retrans) {
 		DBGUNDO(sk, "D-SACK");
 		tcp_undo_cwnd_reduction(sk, false);
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
 		return true;
 	}
 	return false;
@@ -2442,10 +2436,10 @@
 		tcp_undo_cwnd_reduction(sk, true);
 
 		DBGUNDO(sk, "partial loss");
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
 		if (frto_undo)
-			NET_INC_STATS_BH(sock_net(sk),
-					 LINUX_MIB_TCPSPURIOUSRTOS);
+			NET_INC_STATS(sock_net(sk),
+					LINUX_MIB_TCPSPURIOUSRTOS);
 		inet_csk(sk)->icsk_retransmits = 0;
 		if (frto_undo || tcp_is_sack(tp))
 			tcp_set_ca_state(sk, TCP_CA_Open);
@@ -2569,7 +2563,7 @@
 
 	icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1;
 	icsk->icsk_mtup.probe_size = 0;
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPFAIL);
+	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPFAIL);
 }
 
 static void tcp_mtup_probe_success(struct sock *sk)
@@ -2589,7 +2583,7 @@
 	icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
 	icsk->icsk_mtup.probe_size = 0;
 	tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
+	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
 }
 
 /* Do a simple retransmit without using the backoff mechanisms in
@@ -2653,7 +2647,7 @@
 	else
 		mib_idx = LINUX_MIB_TCPSACKRECOVERY;
 
-	NET_INC_STATS_BH(sock_net(sk), mib_idx);
+	NET_INC_STATS(sock_net(sk), mib_idx);
 
 	tp->prior_ssthresh = 0;
 	tcp_init_undo(tp);
@@ -2746,7 +2740,7 @@
 
 		DBGUNDO(sk, "partial recovery");
 		tcp_undo_cwnd_reduction(sk, true);
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
 		tcp_try_keep_open(sk);
 		return true;
 	}
@@ -3093,12 +3087,12 @@
 	const struct skb_shared_info *shinfo;
 
 	/* Avoid cache line misses to get skb_shinfo() and shinfo->tx_flags */
-	if (likely(!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK)))
+	if (likely(!TCP_SKB_CB(skb)->txstamp_ack))
 		return;
 
 	shinfo = skb_shinfo(skb);
-	if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) &&
-	    between(shinfo->tskey, prior_snd_una, tcp_sk(sk)->snd_una - 1))
+	if (!before(shinfo->tskey, prior_snd_una) &&
+	    before(shinfo->tskey, tcp_sk(sk)->snd_una))
 		__skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK);
 }
 
@@ -3254,8 +3248,12 @@
 		tcp_rearm_rto(sk);
 	}
 
-	if (icsk->icsk_ca_ops->pkts_acked)
-		icsk->icsk_ca_ops->pkts_acked(sk, pkts_acked, ca_rtt_us);
+	if (icsk->icsk_ca_ops->pkts_acked) {
+		struct ack_sample sample = { .pkts_acked = pkts_acked,
+					     .rtt_us = ca_rtt_us };
+
+		icsk->icsk_ca_ops->pkts_acked(sk, &sample);
+	}
 
 #if FASTRETRANS_DEBUG > 0
 	WARN_ON((int)tp->sacked_out < 0);
@@ -3361,9 +3359,10 @@
 {
 	u32 delta = ack - tp->snd_una;
 
-	u64_stats_update_begin(&tp->syncp);
+	sock_owned_by_me((struct sock *)tp);
+	u64_stats_update_begin_raw(&tp->syncp);
 	tp->bytes_acked += delta;
-	u64_stats_update_end(&tp->syncp);
+	u64_stats_update_end_raw(&tp->syncp);
 	tp->snd_una = ack;
 }
 
@@ -3372,9 +3371,10 @@
 {
 	u32 delta = seq - tp->rcv_nxt;
 
-	u64_stats_update_begin(&tp->syncp);
+	sock_owned_by_me((struct sock *)tp);
+	u64_stats_update_begin_raw(&tp->syncp);
 	tp->bytes_received += delta;
-	u64_stats_update_end(&tp->syncp);
+	u64_stats_update_end_raw(&tp->syncp);
 	tp->rcv_nxt = seq;
 }
 
@@ -3440,7 +3440,7 @@
 		s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
 
 		if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
-			NET_INC_STATS_BH(net, mib_idx);
+			NET_INC_STATS(net, mib_idx);
 			return true;	/* rate-limited: don't send yet! */
 		}
 	}
@@ -3473,7 +3473,7 @@
 		challenge_count = 0;
 	}
 	if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
 		tcp_send_ack(sk);
 	}
 }
@@ -3522,8 +3522,8 @@
 		tcp_set_ca_state(sk, TCP_CA_CWR);
 		tcp_end_cwnd_reduction(sk);
 		tcp_try_keep_open(sk);
-		NET_INC_STATS_BH(sock_net(sk),
-				 LINUX_MIB_TCPLOSSPROBERECOVERY);
+		NET_INC_STATS(sock_net(sk),
+				LINUX_MIB_TCPLOSSPROBERECOVERY);
 	} else if (!(flag & (FLAG_SND_UNA_ADVANCED |
 			     FLAG_NOT_DUP | FLAG_DATA_SACKED))) {
 		/* Pure dupack: original and TLP probe arrived; no loss */
@@ -3627,14 +3627,14 @@
 
 		tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE);
 
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS);
 	} else {
 		u32 ack_ev_flags = CA_ACK_SLOWPATH;
 
 		if (ack_seq != TCP_SKB_CB(skb)->end_seq)
 			flag |= FLAG_DATA;
 		else
-			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS);
+			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPUREACKS);
 
 		flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
 
@@ -4137,7 +4137,7 @@
 		else
 			mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
 
-		NET_INC_STATS_BH(sock_net(sk), mib_idx);
+		NET_INC_STATS(sock_net(sk), mib_idx);
 
 		tp->rx_opt.dsack = 1;
 		tp->duplicate_sack[0].start_seq = seq;
@@ -4161,7 +4161,7 @@
 
 	if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
 	    before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
 		tcp_enter_quickack_mode(sk);
 
 		if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
@@ -4311,13 +4311,19 @@
 
 	atomic_add(delta, &sk->sk_rmem_alloc);
 	sk_mem_charge(sk, delta);
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
+	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
 	TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
 	TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
 	TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags;
 	return true;
 }
 
+static void tcp_drop(struct sock *sk, struct sk_buff *skb)
+{
+	sk_drops_add(sk, skb);
+	__kfree_skb(skb);
+}
+
 /* This one checks to see if we can put data from the
  * out_of_order queue into the receive_queue.
  */
@@ -4342,7 +4348,7 @@
 		__skb_unlink(skb, &tp->out_of_order_queue);
 		if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
 			SOCK_DEBUG(sk, "ofo packet was already received\n");
-			__kfree_skb(skb);
+			tcp_drop(sk, skb);
 			continue;
 		}
 		SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n",
@@ -4393,8 +4399,8 @@
 	tcp_ecn_check_ce(tp, skb);
 
 	if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP);
-		__kfree_skb(skb);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
+		tcp_drop(sk, skb);
 		return;
 	}
 
@@ -4402,7 +4408,7 @@
 	tp->pred_flags = 0;
 	inet_csk_schedule_ack(sk);
 
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
+	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
 	SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
 		   tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
 
@@ -4457,8 +4463,8 @@
 	if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
 		if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
 			/* All the bits are present. Drop. */
-			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
-			__kfree_skb(skb);
+			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
+			tcp_drop(sk, skb);
 			skb = NULL;
 			tcp_dsack_set(sk, seq, end_seq);
 			goto add_sack;
@@ -4496,8 +4502,8 @@
 		__skb_unlink(skb1, &tp->out_of_order_queue);
 		tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
 				 TCP_SKB_CB(skb1)->end_seq);
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
-		__kfree_skb(skb1);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
+		tcp_drop(sk, skb1);
 	}
 
 add_sack:
@@ -4580,12 +4586,13 @@
 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
-	int eaten = -1;
 	bool fragstolen = false;
+	int eaten = -1;
 
-	if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
-		goto drop;
-
+	if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
+		__kfree_skb(skb);
+		return;
+	}
 	skb_dst_drop(skb);
 	__skb_pull(skb, tcp_hdr(skb)->doff * 4);
 
@@ -4610,14 +4617,12 @@
 
 			__set_current_state(TASK_RUNNING);
 
-			local_bh_enable();
 			if (!skb_copy_datagram_msg(skb, 0, tp->ucopy.msg, chunk)) {
 				tp->ucopy.len -= chunk;
 				tp->copied_seq += chunk;
 				eaten = (chunk == skb->len);
 				tcp_rcv_space_adjust(sk);
 			}
-			local_bh_disable();
 		}
 
 		if (eaten <= 0) {
@@ -4660,14 +4665,14 @@
 
 	if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
 		/* A retransmit, 2nd most common case.  Force an immediate ack. */
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
 		tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
 
 out_of_window:
 		tcp_enter_quickack_mode(sk);
 		inet_csk_schedule_ack(sk);
 drop:
-		__kfree_skb(skb);
+		tcp_drop(sk, skb);
 		return;
 	}
 
@@ -4706,7 +4711,7 @@
 
 	__skb_unlink(skb, list);
 	__kfree_skb(skb);
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
+	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
 
 	return next;
 }
@@ -4865,7 +4870,7 @@
 	bool res = false;
 
 	if (!skb_queue_empty(&tp->out_of_order_queue)) {
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
 		__skb_queue_purge(&tp->out_of_order_queue);
 
 		/* Reset SACK state.  A conforming SACK implementation will
@@ -4894,7 +4899,7 @@
 
 	SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
 
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED);
+	NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED);
 
 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
 		tcp_clamp_window(sk);
@@ -4924,7 +4929,7 @@
 	 * drop receive data on the floor.  It will get retransmitted
 	 * and hopefully then we'll have sufficient space.
 	 */
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED);
+	NET_INC_STATS(sock_net(sk), LINUX_MIB_RCVPRUNED);
 
 	/* Massive buffer overcommit. */
 	tp->pred_flags = 0;
@@ -5133,7 +5138,6 @@
 	int chunk = skb->len - hlen;
 	int err;
 
-	local_bh_enable();
 	if (skb_csum_unnecessary(skb))
 		err = skb_copy_datagram_msg(skb, hlen, tp->ucopy.msg, chunk);
 	else
@@ -5145,32 +5149,9 @@
 		tcp_rcv_space_adjust(sk);
 	}
 
-	local_bh_disable();
 	return err;
 }
 
-static __sum16 __tcp_checksum_complete_user(struct sock *sk,
-					    struct sk_buff *skb)
-{
-	__sum16 result;
-
-	if (sock_owned_by_user(sk)) {
-		local_bh_enable();
-		result = __tcp_checksum_complete(skb);
-		local_bh_disable();
-	} else {
-		result = __tcp_checksum_complete(skb);
-	}
-	return result;
-}
-
-static inline bool tcp_checksum_complete_user(struct sock *sk,
-					     struct sk_buff *skb)
-{
-	return !skb_csum_unnecessary(skb) &&
-	       __tcp_checksum_complete_user(sk, skb);
-}
-
 /* Does PAWS and seqno based validation of an incoming segment, flags will
  * play significant role here.
  */
@@ -5183,7 +5164,7 @@
 	if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
 	    tcp_paws_discard(sk, skb)) {
 		if (!th->rst) {
-			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
+			NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
 			if (!tcp_oow_rate_limited(sock_net(sk), skb,
 						  LINUX_MIB_TCPACKSKIPPEDPAWS,
 						  &tp->last_oow_ack_time))
@@ -5235,8 +5216,8 @@
 	if (th->syn) {
 syn_challenge:
 		if (syn_inerr)
-			TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
+			TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
 		tcp_send_challenge_ack(sk, skb);
 		goto discard;
 	}
@@ -5244,7 +5225,7 @@
 	return true;
 
 discard:
-	__kfree_skb(skb);
+	tcp_drop(sk, skb);
 	return false;
 }
 
@@ -5351,7 +5332,7 @@
 				tcp_data_snd_check(sk);
 				return;
 			} else { /* Header too small */
-				TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
+				TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
 				goto discard;
 			}
 		} else {
@@ -5379,12 +5360,13 @@
 
 					__skb_pull(skb, tcp_header_len);
 					tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
-					NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER);
+					NET_INC_STATS(sock_net(sk),
+							LINUX_MIB_TCPHPHITSTOUSER);
 					eaten = 1;
 				}
 			}
 			if (!eaten) {
-				if (tcp_checksum_complete_user(sk, skb))
+				if (tcp_checksum_complete(skb))
 					goto csum_error;
 
 				if ((int)skb->truesize > sk->sk_forward_alloc)
@@ -5401,7 +5383,7 @@
 
 				tcp_rcv_rtt_measure_ts(sk, skb);
 
-				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
+				NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS);
 
 				/* Bulk data transfer: receiver */
 				eaten = tcp_queue_rcv(sk, skb, tcp_header_len,
@@ -5428,7 +5410,7 @@
 	}
 
 slow_path:
-	if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
+	if (len < (th->doff << 2) || tcp_checksum_complete(skb))
 		goto csum_error;
 
 	if (!th->ack && !th->rst && !th->syn)
@@ -5458,11 +5440,11 @@
 	return;
 
 csum_error:
-	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
-	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
+	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
+	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
 
 discard:
-	__kfree_skb(skb);
+	tcp_drop(sk, skb);
 }
 EXPORT_SYMBOL(tcp_rcv_established);
 
@@ -5547,16 +5529,18 @@
 	if (data) { /* Retransmit unacked data in SYN */
 		tcp_for_write_queue_from(data, sk) {
 			if (data == tcp_send_head(sk) ||
-			    __tcp_retransmit_skb(sk, data))
+			    __tcp_retransmit_skb(sk, data, 1))
 				break;
 		}
 		tcp_rearm_rto(sk);
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
+		NET_INC_STATS(sock_net(sk),
+				LINUX_MIB_TCPFASTOPENACTIVEFAIL);
 		return true;
 	}
 	tp->syn_data_acked = tp->syn_data;
 	if (tp->syn_data_acked)
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
+		NET_INC_STATS(sock_net(sk),
+				LINUX_MIB_TCPFASTOPENACTIVE);
 
 	tcp_fastopen_add_skb(sk, synack);
 
@@ -5591,7 +5575,8 @@
 		if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
 		    !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
 			     tcp_time_stamp)) {
-			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED);
+			NET_INC_STATS(sock_net(sk),
+					LINUX_MIB_PAWSACTIVEREJECTED);
 			goto reset_and_undo;
 		}
 
@@ -5693,7 +5678,7 @@
 						  TCP_DELACK_MAX, TCP_RTO_MAX);
 
 discard:
-			__kfree_skb(skb);
+			tcp_drop(sk, skb);
 			return 0;
 		} else {
 			tcp_send_ack(sk);
@@ -5800,8 +5785,6 @@
 	int queued = 0;
 	bool acceptable;
 
-	tp->rx_opt.saw_tstamp = 0;
-
 	switch (sk->sk_state) {
 	case TCP_CLOSE:
 		goto discard;
@@ -5819,29 +5802,13 @@
 			if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
 				return 1;
 
-			/* Now we have several options: In theory there is
-			 * nothing else in the frame. KA9Q has an option to
-			 * send data with the syn, BSD accepts data with the
-			 * syn up to the [to be] advertised window and
-			 * Solaris 2.1 gives you a protocol error. For now
-			 * we just ignore it, that fits the spec precisely
-			 * and avoids incompatibilities. It would be nice in
-			 * future to drop through and process the data.
-			 *
-			 * Now that TTCP is starting to be used we ought to
-			 * queue this data.
-			 * But, this leaves one open to an easy denial of
-			 * service attack, and SYN cookies can't defend
-			 * against this problem. So, we drop the data
-			 * in the interest of security over speed unless
-			 * it's still in use.
-			 */
-			kfree_skb(skb);
+			consume_skb(skb);
 			return 0;
 		}
 		goto discard;
 
 	case TCP_SYN_SENT:
+		tp->rx_opt.saw_tstamp = 0;
 		queued = tcp_rcv_synsent_state_process(sk, skb, th);
 		if (queued >= 0)
 			return queued;
@@ -5853,6 +5820,7 @@
 		return 0;
 	}
 
+	tp->rx_opt.saw_tstamp = 0;
 	req = tp->fastopen_rsk;
 	if (req) {
 		WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
@@ -5977,7 +5945,7 @@
 		    (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
 		     after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
 			tcp_done(sk);
-			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
+			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
 			return 1;
 		}
 
@@ -6034,7 +6002,7 @@
 		if (sk->sk_shutdown & RCV_SHUTDOWN) {
 			if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
 			    after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
-				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
+				NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
 				tcp_reset(sk);
 				return 1;
 			}
@@ -6054,7 +6022,7 @@
 
 	if (!queued) {
 discard:
-		__kfree_skb(skb);
+		tcp_drop(sk, skb);
 	}
 	return 0;
 }
@@ -6172,10 +6140,10 @@
 	if (net->ipv4.sysctl_tcp_syncookies) {
 		msg = "Sending cookies";
 		want_cookie = true;
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
+		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
 	} else
 #endif
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
+		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
 
 	if (!queue->synflood_warned &&
 	    net->ipv4.sysctl_tcp_syncookies != 2 &&
@@ -6236,7 +6204,7 @@
 	 * timeout.
 	 */
 	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
 		goto drop;
 	}
 
@@ -6283,7 +6251,7 @@
 			if (dst && strict &&
 			    !tcp_peer_is_proven(req, dst, true,
 						tmp_opt.saw_tstamp)) {
-				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
+				NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
 				goto drop_and_release;
 			}
 		}
@@ -6331,7 +6299,7 @@
 	}
 	if (fastopen_sk) {
 		af_ops->send_synack(fastopen_sk, dst, &fl, req,
-				    &foc, false);
+				    &foc, TCP_SYNACK_FASTOPEN);
 		/* Add the child socket directly into the accept queue */
 		inet_csk_reqsk_queue_add(sk, req, fastopen_sk);
 		sk->sk_data_ready(sk);
@@ -6341,10 +6309,13 @@
 		tcp_rsk(req)->tfo_listener = false;
 		if (!want_cookie)
 			inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
-		af_ops->send_synack(sk, dst, &fl, req,
-				    &foc, !want_cookie);
-		if (want_cookie)
-			goto drop_and_free;
+		af_ops->send_synack(sk, dst, &fl, req, &foc,
+				    !want_cookie ? TCP_SYNACK_NORMAL :
+						   TCP_SYNACK_COOKIE);
+		if (want_cookie) {
+			reqsk_free(req);
+			return 0;
+		}
 	}
 	reqsk_put(req);
 	return 0;
@@ -6354,7 +6325,7 @@
 drop_and_free:
 	reqsk_free(req);
 drop:
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
+	tcp_listendrop(sk);
 	return 0;
 }
 EXPORT_SYMBOL(tcp_conn_request);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index ad45050..3708de2 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -157,7 +157,7 @@
 
 	nexthop = daddr = usin->sin_addr.s_addr;
 	inet_opt = rcu_dereference_protected(inet->inet_opt,
-					     sock_owned_by_user(sk));
+					     lockdep_sock_is_held(sk));
 	if (inet_opt && inet_opt->opt.srr) {
 		if (!daddr)
 			return -EINVAL;
@@ -320,7 +320,7 @@
 	 * an established socket here.
 	 */
 	if (seq != tcp_rsk(req)->snt_isn) {
-		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
+		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
 	} else if (abort) {
 		/*
 		 * Still in SYN_RECV, just remove it silently.
@@ -329,7 +329,7 @@
 		 * errors returned from accept().
 		 */
 		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
-		NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
+		tcp_listendrop(req->rsk_listener);
 	}
 	reqsk_put(req);
 }
@@ -372,7 +372,7 @@
 				       th->dest, iph->saddr, ntohs(th->source),
 				       inet_iif(icmp_skb));
 	if (!sk) {
-		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
+		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
 		return;
 	}
 	if (sk->sk_state == TCP_TIME_WAIT) {
@@ -396,13 +396,13 @@
 	 */
 	if (sock_owned_by_user(sk)) {
 		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
-			NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
+			__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
 	}
 	if (sk->sk_state == TCP_CLOSE)
 		goto out;
 
 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
-		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
+		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
 		goto out;
 	}
 
@@ -413,7 +413,7 @@
 	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
 	if (sk->sk_state != TCP_LISTEN &&
 	    !between(seq, snd_una, tp->snd_nxt)) {
-		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
+		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
 		goto out;
 	}
 
@@ -628,6 +628,7 @@
 
 	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
 #ifdef CONFIG_TCP_MD5SIG
+	rcu_read_lock();
 	hash_location = tcp_parse_md5sig_option(th);
 	if (sk && sk_fullsock(sk)) {
 		key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
@@ -646,16 +647,18 @@
 					     ntohs(th->source), inet_iif(skb));
 		/* don't send rst if it can't find key */
 		if (!sk1)
-			return;
-		rcu_read_lock();
+			goto out;
+
 		key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
 					&ip_hdr(skb)->saddr, AF_INET);
 		if (!key)
-			goto release_sk1;
+			goto out;
+
 
 		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
-			goto release_sk1;
+			goto out;
+
 	}
 
 	if (key) {
@@ -689,20 +692,19 @@
 		     offsetof(struct inet_timewait_sock, tw_bound_dev_if));
 
 	arg.tos = ip_hdr(skb)->tos;
+	local_bh_disable();
 	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
 			      &arg, arg.iov[0].iov_len);
 
-	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
-	TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
+	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
+	__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
+	local_bh_enable();
 
 #ifdef CONFIG_TCP_MD5SIG
-release_sk1:
-	if (sk1) {
-		rcu_read_unlock();
-		sock_put(sk1);
-	}
+out:
+	rcu_read_unlock();
 #endif
 }
 
@@ -774,12 +776,14 @@
 	if (oif)
 		arg.bound_dev_if = oif;
 	arg.tos = tos;
+	local_bh_disable();
 	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
 			      &arg, arg.iov[0].iov_len);
 
-	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
+	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
+	local_bh_enable();
 }
 
 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
@@ -830,7 +834,7 @@
 			      struct flowi *fl,
 			      struct request_sock *req,
 			      struct tcp_fastopen_cookie *foc,
-				  bool attach_req)
+			      enum tcp_synack_type synack_type)
 {
 	const struct inet_request_sock *ireq = inet_rsk(req);
 	struct flowi4 fl4;
@@ -841,7 +845,7 @@
 	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
 		return -1;
 
-	skb = tcp_make_synack(sk, dst, req, foc, attach_req);
+	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
 
 	if (skb) {
 		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
@@ -882,8 +886,7 @@
 
 	/* caller either holds rcu_read_lock() or socket lock */
 	md5sig = rcu_dereference_check(tp->md5sig_info,
-				       sock_owned_by_user(sk) ||
-				       lockdep_is_held((spinlock_t *)&sk->sk_lock.slock));
+				       lockdep_sock_is_held(sk));
 	if (!md5sig)
 		return NULL;
 #if IS_ENABLED(CONFIG_IPV6)
@@ -928,8 +931,7 @@
 	}
 
 	md5sig = rcu_dereference_protected(tp->md5sig_info,
-					   sock_owned_by_user(sk) ||
-					   lockdep_is_held(&sk->sk_lock.slock));
+					   lockdep_sock_is_held(sk));
 	if (!md5sig) {
 		md5sig = kmalloc(sizeof(*md5sig), gfp);
 		if (!md5sig)
@@ -1153,12 +1155,12 @@
 		return false;
 
 	if (hash_expected && !hash_location) {
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
 		return true;
 	}
 
 	if (!hash_expected && hash_location) {
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
 		return true;
 	}
 
@@ -1246,7 +1248,7 @@
 				&tcp_request_sock_ipv4_ops, sk, skb);
 
 drop:
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
+	tcp_listendrop(sk);
 	return 0;
 }
 EXPORT_SYMBOL(tcp_v4_conn_request);
@@ -1344,11 +1346,11 @@
 	return newsk;
 
 exit_overflow:
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
+	NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
 exit_nonewsk:
 	dst_release(dst);
 exit:
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
+	tcp_listendrop(sk);
 	return NULL;
 put_and_exit:
 	inet_csk_prepare_forced_close(newsk);
@@ -1434,8 +1436,8 @@
 	return 0;
 
 csum_err:
-	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
-	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
+	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
+	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
 	goto discard;
 }
 EXPORT_SYMBOL(tcp_v4_do_rcv);
@@ -1508,16 +1510,16 @@
 
 	__skb_queue_tail(&tp->ucopy.prequeue, skb);
 	tp->ucopy.memory += skb->truesize;
-	if (tp->ucopy.memory > sk->sk_rcvbuf) {
+	if (skb_queue_len(&tp->ucopy.prequeue) >= 32 ||
+	    tp->ucopy.memory + atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) {
 		struct sk_buff *skb1;
 
 		BUG_ON(sock_owned_by_user(sk));
+		__NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED,
+				skb_queue_len(&tp->ucopy.prequeue));
 
-		while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
+		while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
 			sk_backlog_rcv(sk, skb1);
-			NET_INC_STATS_BH(sock_net(sk),
-					 LINUX_MIB_TCPPREQUEUEDROPPED);
-		}
 
 		tp->ucopy.memory = 0;
 	} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
@@ -1538,24 +1540,25 @@
 
 int tcp_v4_rcv(struct sk_buff *skb)
 {
+	struct net *net = dev_net(skb->dev);
 	const struct iphdr *iph;
 	const struct tcphdr *th;
+	bool refcounted;
 	struct sock *sk;
 	int ret;
-	struct net *net = dev_net(skb->dev);
 
 	if (skb->pkt_type != PACKET_HOST)
 		goto discard_it;
 
 	/* Count it even if it's bad */
-	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
+	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
 
 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
 		goto discard_it;
 
-	th = tcp_hdr(skb);
+	th = (const struct tcphdr *)skb->data;
 
-	if (th->doff < sizeof(struct tcphdr) / 4)
+	if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
 		goto bad_packet;
 	if (!pskb_may_pull(skb, th->doff * 4))
 		goto discard_it;
@@ -1568,7 +1571,7 @@
 	if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
 		goto csum_error;
 
-	th = tcp_hdr(skb);
+	th = (const struct tcphdr *)skb->data;
 	iph = ip_hdr(skb);
 	/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
 	 * barrier() makes sure compiler wont play fool^Waliasing games.
@@ -1588,7 +1591,7 @@
 
 lookup:
 	sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
-			       th->dest);
+			       th->dest, &refcounted);
 	if (!sk)
 		goto no_tcp_socket;
 
@@ -1609,7 +1612,11 @@
 			inet_csk_reqsk_queue_drop_and_put(sk, req);
 			goto lookup;
 		}
+		/* We own a reference on the listener, increase it again
+		 * as we might lose it too soon.
+		 */
 		sock_hold(sk);
+		refcounted = true;
 		nsk = tcp_check_req(sk, skb, req, false);
 		if (!nsk) {
 			reqsk_put(req);
@@ -1626,7 +1633,7 @@
 		}
 	}
 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
-		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
+		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
 		goto discard_and_relse;
 	}
 
@@ -1659,13 +1666,14 @@
 	} else if (unlikely(sk_add_backlog(sk, skb,
 					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
 		bh_unlock_sock(sk);
-		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
+		__NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
 		goto discard_and_relse;
 	}
 	bh_unlock_sock(sk);
 
 put_and_return:
-	sock_put(sk);
+	if (refcounted)
+		sock_put(sk);
 
 	return ret;
 
@@ -1675,9 +1683,9 @@
 
 	if (tcp_checksum_complete(skb)) {
 csum_error:
-		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
+		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
 bad_packet:
-		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
+		__TCP_INC_STATS(net, TCP_MIB_INERRS);
 	} else {
 		tcp_v4_send_reset(NULL, skb);
 	}
@@ -1688,7 +1696,9 @@
 	return 0;
 
 discard_and_relse:
-	sock_put(sk);
+	sk_drops_add(sk, skb);
+	if (refcounted)
+		sock_put(sk);
 	goto discard_it;
 
 do_time_wait:
@@ -1712,6 +1722,7 @@
 		if (sk2) {
 			inet_twsk_deschedule_put(inet_twsk(sk));
 			sk = sk2;
+			refcounted = false;
 			goto process;
 		}
 		/* Fall through to ACK */
@@ -1828,7 +1839,9 @@
 	tcp_free_fastopen_req(tp);
 	tcp_saved_syn_free(tp);
 
+	local_bh_disable();
 	sk_sockets_allocated_dec(sk);
+	local_bh_enable();
 
 	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
 		sock_release_memcg(sk);
@@ -1845,17 +1858,17 @@
  */
 static void *listening_get_next(struct seq_file *seq, void *cur)
 {
-	struct inet_connection_sock *icsk;
-	struct hlist_nulls_node *node;
-	struct sock *sk = cur;
-	struct inet_listen_hashbucket *ilb;
 	struct tcp_iter_state *st = seq->private;
 	struct net *net = seq_file_net(seq);
+	struct inet_listen_hashbucket *ilb;
+	struct inet_connection_sock *icsk;
+	struct sock *sk = cur;
 
 	if (!sk) {
+get_head:
 		ilb = &tcp_hashinfo.listening_hash[st->bucket];
 		spin_lock_bh(&ilb->lock);
-		sk = sk_nulls_head(&ilb->head);
+		sk = sk_head(&ilb->head);
 		st->offset = 0;
 		goto get_sk;
 	}
@@ -1863,28 +1876,20 @@
 	++st->num;
 	++st->offset;
 
-	sk = sk_nulls_next(sk);
+	sk = sk_next(sk);
 get_sk:
-	sk_nulls_for_each_from(sk, node) {
+	sk_for_each_from(sk) {
 		if (!net_eq(sock_net(sk), net))
 			continue;
-		if (sk->sk_family == st->family) {
-			cur = sk;
-			goto out;
-		}
+		if (sk->sk_family == st->family)
+			return sk;
 		icsk = inet_csk(sk);
 	}
 	spin_unlock_bh(&ilb->lock);
 	st->offset = 0;
-	if (++st->bucket < INET_LHTABLE_SIZE) {
-		ilb = &tcp_hashinfo.listening_hash[st->bucket];
-		spin_lock_bh(&ilb->lock);
-		sk = sk_nulls_head(&ilb->head);
-		goto get_sk;
-	}
-	cur = NULL;
-out:
-	return cur;
+	if (++st->bucket < INET_LHTABLE_SIZE)
+		goto get_head;
+	return NULL;
 }
 
 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
@@ -2383,6 +2388,7 @@
 					   IPPROTO_TCP, net);
 		if (res)
 			goto fail;
+		sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
 		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
 	}
 
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index 1e70fa8..c67ece1 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -260,13 +260,13 @@
  * newReno in increase case.
  * We work it out by following the idea from TCP-LP's paper directly
  */
-static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, s32 rtt_us)
+static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct lp *lp = inet_csk_ca(sk);
 
-	if (rtt_us > 0)
-		tcp_lp_rtt_sample(sk, rtt_us);
+	if (sample->rtt_us > 0)
+		tcp_lp_rtt_sample(sk, sample->rtt_us);
 
 	/* calc inference */
 	if (tcp_time_stamp > tp->rx_opt.rcv_tsecr)
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 7b7eec43..b617826 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -800,7 +800,8 @@
 	}
 
 	if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
-			  jiffies - tm->tcpm_stamp) < 0)
+			  jiffies - tm->tcpm_stamp,
+			  TCP_METRICS_ATTR_PAD) < 0)
 		goto nla_put_failure;
 	if (tm->tcpm_ts_stamp) {
 		if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP,
@@ -864,7 +865,8 @@
 		    (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
 				tfom->syn_loss) < 0 ||
 		     nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
-				jiffies - tfom->last_syn_loss) < 0))
+				jiffies - tfom->last_syn_loss,
+				TCP_METRICS_ATTR_PAD) < 0))
 			goto nla_put_failure;
 		if (tfom->cookie.len > 0 &&
 		    nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index acb366d..4b95ec4 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -235,7 +235,7 @@
 	}
 
 	if (paws_reject)
-		NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
+		__NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
 
 	if (!th->rst) {
 		/* In this case we must reset the TIMEWAIT timer.
@@ -337,7 +337,7 @@
 		 * socket up.  We've got bigger problems than
 		 * non-graceful socket closings.
 		 */
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
 	}
 
 	tcp_update_metrics(sk);
@@ -545,7 +545,7 @@
 		newtp->rack.mstamp.v64 = 0;
 		newtp->rack.advanced = 0;
 
-		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
+		__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
 	}
 	return newsk;
 }
@@ -704,10 +704,13 @@
 	if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
 					  tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
 		/* Out of window: send ACK and drop. */
-		if (!(flg & TCP_FLAG_RST))
+		if (!(flg & TCP_FLAG_RST) &&
+		    !tcp_oow_rate_limited(sock_net(sk), skb,
+					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
+					  &tcp_rsk(req)->last_oow_ack_time))
 			req->rsk_ops->send_ack(sk, skb, req);
 		if (paws_reject)
-			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
+			__NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
 		return NULL;
 	}
 
@@ -726,7 +729,7 @@
 	 *	   "fourth, check the SYN bit"
 	 */
 	if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
-		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
+		__TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
 		goto embryonic_reset;
 	}
 
@@ -749,7 +752,7 @@
 	if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
 	    TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
 		inet_rsk(req)->acked = 1;
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
+		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
 		return NULL;
 	}
 
@@ -788,7 +791,7 @@
 	}
 	if (!fastopen) {
 		inet_csk_reqsk_queue_drop(sk, req);
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
+		__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
 	}
 	return NULL;
 }
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 773083b..02737b6 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -89,6 +89,7 @@
 			     ~(SKB_GSO_TCPV4 |
 			       SKB_GSO_DODGY |
 			       SKB_GSO_TCP_ECN |
+			       SKB_GSO_TCP_FIXEDID |
 			       SKB_GSO_TCPV6 |
 			       SKB_GSO_GRE |
 			       SKB_GSO_GRE_CSUM |
@@ -98,7 +99,8 @@
 			       SKB_GSO_UDP_TUNNEL_CSUM |
 			       SKB_GSO_TUNNEL_REMCSUM |
 			       0) ||
-			     !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
+			     !(type & (SKB_GSO_TCPV4 |
+				       SKB_GSO_TCPV6))))
 			goto out;
 
 		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
@@ -107,6 +109,12 @@
 		goto out;
 	}
 
+	/* GSO partial only requires splitting the frame into an MSS
+	 * multiple and possibly a remainder.  So update the mss now.
+	 */
+	if (features & NETIF_F_GSO_PARTIAL)
+		mss = skb->len - (skb->len % mss);
+
 	copy_destructor = gso_skb->destructor == tcp_wfree;
 	ooo_okay = gso_skb->ooo_okay;
 	/* All segments but the first should have ooo_okay cleared */
@@ -131,7 +139,7 @@
 	newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
 					       (__force u32)delta));
 
-	do {
+	while (skb->next) {
 		th->fin = th->psh = 0;
 		th->check = newcheck;
 
@@ -151,7 +159,7 @@
 
 		th->seq = htonl(seq);
 		th->cwr = 0;
-	} while (skb->next);
+	}
 
 	/* Following permits TCP Small Queues to work well with GSO :
 	 * The callback to TCP stack will be called at the time last frag
@@ -237,7 +245,7 @@
 
 found:
 	/* Include the IP ID check below from the inner most IP hdr */
-	flush = NAPI_GRO_CB(p)->flush | NAPI_GRO_CB(p)->flush_id;
+	flush = NAPI_GRO_CB(p)->flush;
 	flush |= (__force int)(flags & TCP_FLAG_CWR);
 	flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
 		  ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
@@ -246,6 +254,17 @@
 		flush |= *(u32 *)((u8 *)th + i) ^
 			 *(u32 *)((u8 *)th2 + i);
 
+	/* When we receive our second frame we can made a decision on if we
+	 * continue this flow as an atomic flow with a fixed ID or if we use
+	 * an incrementing ID.
+	 */
+	if (NAPI_GRO_CB(p)->flush_id != 1 ||
+	    NAPI_GRO_CB(p)->count != 1 ||
+	    !NAPI_GRO_CB(p)->is_atomic)
+		flush |= NAPI_GRO_CB(p)->flush_id;
+	else
+		NAPI_GRO_CB(p)->is_atomic = false;
+
 	mss = skb_shinfo(p)->gso_size;
 
 	flush |= (len - 1) >= mss;
@@ -314,6 +333,9 @@
 				  iph->daddr, 0);
 	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
 
+	if (NAPI_GRO_CB(skb)->is_atomic)
+		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
+
 	return tcp_gro_complete(skb);
 }
 
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 7d2dc01..8bd9911 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -364,7 +364,7 @@
  * be sent.
  */
 static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
-				int tcp_header_len)
+			 struct tcphdr *th, int tcp_header_len)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 
@@ -375,7 +375,7 @@
 			INET_ECN_xmit(sk);
 			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
 				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
-				tcp_hdr(skb)->cwr = 1;
+				th->cwr = 1;
 				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
 			}
 		} else if (!tcp_ca_needs_ecn(sk)) {
@@ -383,7 +383,7 @@
 			INET_ECN_dontxmit(sk);
 		}
 		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
-			tcp_hdr(skb)->ece = 1;
+			th->ece = 1;
 	}
 }
 
@@ -949,12 +949,12 @@
 
 	skb_orphan(skb);
 	skb->sk = sk;
-	skb->destructor = skb_is_tcp_pure_ack(skb) ? sock_wfree : tcp_wfree;
+	skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
 	skb_set_hash_from_sk(skb, sk);
 	atomic_add(skb->truesize, &sk->sk_wmem_alloc);
 
 	/* Build TCP header and checksum it. */
-	th = tcp_hdr(skb);
+	th = (struct tcphdr *)skb->data;
 	th->source		= inet->inet_sport;
 	th->dest		= inet->inet_dport;
 	th->seq			= htonl(tcb->seq);
@@ -962,14 +962,6 @@
 	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
 					tcb->tcp_flags);
 
-	if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
-		/* RFC1323: The window in SYN & SYN/ACK segments
-		 * is never scaled.
-		 */
-		th->window	= htons(min(tp->rcv_wnd, 65535U));
-	} else {
-		th->window	= htons(tcp_select_window(sk));
-	}
 	th->check		= 0;
 	th->urg_ptr		= 0;
 
@@ -986,9 +978,15 @@
 
 	tcp_options_write((__be32 *)(th + 1), tp, &opts);
 	skb_shinfo(skb)->gso_type = sk->sk_gso_type;
-	if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0))
-		tcp_ecn_send(sk, skb, tcp_header_size);
-
+	if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) {
+		th->window      = htons(tcp_select_window(sk));
+		tcp_ecn_send(sk, skb, th, tcp_header_size);
+	} else {
+		/* RFC1323: The window in SYN & SYN/ACK segments
+		 * is never scaled.
+		 */
+		th->window	= htons(min(tp->rcv_wnd, 65535U));
+	}
 #ifdef CONFIG_TCP_MD5SIG
 	/* Calculate the MD5 hash, as we have all we need now */
 	if (md5) {
@@ -1111,11 +1109,17 @@
 	tcp_verify_left_out(tp);
 }
 
+static bool tcp_has_tx_tstamp(const struct sk_buff *skb)
+{
+	return TCP_SKB_CB(skb)->txstamp_ack ||
+		(skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP);
+}
+
 static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2)
 {
 	struct skb_shared_info *shinfo = skb_shinfo(skb);
 
-	if (unlikely(shinfo->tx_flags & SKBTX_ANY_TSTAMP) &&
+	if (unlikely(tcp_has_tx_tstamp(skb)) &&
 	    !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) {
 		struct skb_shared_info *shinfo2 = skb_shinfo(skb2);
 		u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP;
@@ -1123,9 +1127,17 @@
 		shinfo->tx_flags &= ~tsflags;
 		shinfo2->tx_flags |= tsflags;
 		swap(shinfo->tskey, shinfo2->tskey);
+		TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack;
+		TCP_SKB_CB(skb)->txstamp_ack = 0;
 	}
 }
 
+static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2)
+{
+	TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor;
+	TCP_SKB_CB(skb)->eor = 0;
+}
+
 /* Function to create two new TCP segments.  Shrinks the given segment
  * to the specified size and appends a new segment with the rest of the
  * packet to the list.  This won't be called frequently, I hope.
@@ -1171,6 +1183,7 @@
 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
 	TCP_SKB_CB(buff)->tcp_flags = flags;
 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
+	tcp_skb_fragment_eor(skb, buff);
 
 	if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
 		/* Copy and checksum data tail into the new buffer. */
@@ -1731,6 +1744,8 @@
 	/* This packet was never sent out yet, so no SACK bits. */
 	TCP_SKB_CB(buff)->sacked = 0;
 
+	tcp_skb_fragment_eor(skb, buff);
+
 	buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
 	skb_split(skb, buff, len);
 	tcp_fragment_tstamp(skb, buff);
@@ -2204,14 +2219,13 @@
 /* Thanks to skb fast clones, we can detect if a prior transmit of
  * a packet is still in a qdisc or driver queue.
  * In this case, there is very little point doing a retransmit !
- * Note: This is called from BH context only.
  */
 static bool skb_still_in_host_queue(const struct sock *sk,
 				    const struct sk_buff *skb)
 {
 	if (unlikely(skb_fclone_busy(sk, skb))) {
-		NET_INC_STATS_BH(sock_net(sk),
-				 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
+		NET_INC_STATS(sock_net(sk),
+			      LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
 		return true;
 	}
 	return false;
@@ -2266,14 +2280,14 @@
 	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
 		goto rearm_timer;
 
-	if (__tcp_retransmit_skb(sk, skb))
+	if (__tcp_retransmit_skb(sk, skb, 1))
 		goto rearm_timer;
 
 	/* Record snd_nxt for loss detection. */
 	tp->tlp_high_seq = tp->snd_nxt;
 
 probe_sent:
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
+	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
 	/* Reset s.t. tcp_rearm_rto will restart timer from now */
 	inet_csk(sk)->icsk_pending = 0;
 rearm_timer:
@@ -2441,6 +2455,21 @@
 	return window;
 }
 
+void tcp_skb_collapse_tstamp(struct sk_buff *skb,
+			     const struct sk_buff *next_skb)
+{
+	if (unlikely(tcp_has_tx_tstamp(next_skb))) {
+		const struct skb_shared_info *next_shinfo =
+			skb_shinfo(next_skb);
+		struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+		shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
+		shinfo->tskey = next_shinfo->tskey;
+		TCP_SKB_CB(skb)->txstamp_ack |=
+			TCP_SKB_CB(next_skb)->txstamp_ack;
+	}
+}
+
 /* Collapses two adjacent SKB's during retransmission. */
 static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
 {
@@ -2476,6 +2505,7 @@
 	 * packet counting does not break.
 	 */
 	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
+	TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor;
 
 	/* changed transmit queue under us so clear hints */
 	tcp_clear_retrans_hints_partial(tp);
@@ -2484,6 +2514,8 @@
 
 	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
 
+	tcp_skb_collapse_tstamp(skb, next_skb);
+
 	sk_wmem_free_skb(sk, next_skb);
 }
 
@@ -2525,6 +2557,9 @@
 		if (!tcp_can_collapse(sk, skb))
 			break;
 
+		if (!tcp_skb_can_collapse_to(to))
+			break;
+
 		space -= skb->len;
 
 		if (first) {
@@ -2551,17 +2586,17 @@
  * state updates are done by the caller.  Returns non-zero if an
  * error occurred which prevented the send.
  */
-int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
+int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
 {
-	struct tcp_sock *tp = tcp_sk(sk);
 	struct inet_connection_sock *icsk = inet_csk(sk);
+	struct tcp_sock *tp = tcp_sk(sk);
 	unsigned int cur_mss;
-	int err;
+	int diff, len, err;
 
-	/* Inconslusive MTU probe */
-	if (icsk->icsk_mtup.probe_size) {
+
+	/* Inconclusive MTU probe */
+	if (icsk->icsk_mtup.probe_size)
 		icsk->icsk_mtup.probe_size = 0;
-	}
 
 	/* Do not sent more than we queued. 1/4 is reserved for possible
 	 * copying overhead: fragmentation, tunneling, mangling etc.
@@ -2594,38 +2629,37 @@
 	    TCP_SKB_CB(skb)->seq != tp->snd_una)
 		return -EAGAIN;
 
-	if (skb->len > cur_mss) {
-		if (tcp_fragment(sk, skb, cur_mss, cur_mss, GFP_ATOMIC))
+	len = cur_mss * segs;
+	if (skb->len > len) {
+		if (tcp_fragment(sk, skb, len, cur_mss, GFP_ATOMIC))
 			return -ENOMEM; /* We'll try again later. */
 	} else {
-		int oldpcount = tcp_skb_pcount(skb);
+		if (skb_unclone(skb, GFP_ATOMIC))
+			return -ENOMEM;
 
-		if (unlikely(oldpcount > 1)) {
-			if (skb_unclone(skb, GFP_ATOMIC))
-				return -ENOMEM;
-			tcp_init_tso_segs(skb, cur_mss);
-			tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
-		}
+		diff = tcp_skb_pcount(skb);
+		tcp_set_skb_tso_segs(skb, cur_mss);
+		diff -= tcp_skb_pcount(skb);
+		if (diff)
+			tcp_adjust_pcount(sk, skb, diff);
+		if (skb->len < cur_mss)
+			tcp_retrans_try_collapse(sk, skb, cur_mss);
 	}
 
 	/* RFC3168, section 6.1.1.1. ECN fallback */
 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
 		tcp_ecn_clear_syn(sk, skb);
 
-	tcp_retrans_try_collapse(sk, skb, cur_mss);
-
-	/* Make a copy, if the first transmission SKB clone we made
-	 * is still in somebody's hands, else make a clone.
-	 */
-
 	/* make sure skb->data is aligned on arches that require it
 	 * and check if ack-trimming & collapsing extended the headroom
 	 * beyond what csum_start can cover.
 	 */
 	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
 		     skb_headroom(skb) >= 0xFFFF)) {
-		struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
-						   GFP_ATOMIC);
+		struct sk_buff *nskb;
+
+		skb_mstamp_get(&skb->skb_mstamp);
+		nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
 		err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
 			     -ENOBUFS;
 	} else {
@@ -2633,20 +2667,22 @@
 	}
 
 	if (likely(!err)) {
+		segs = tcp_skb_pcount(skb);
+
 		TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
 		/* Update global TCP statistics. */
-		TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
+		TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
 		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
-			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
-		tp->total_retrans++;
+			__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
+		tp->total_retrans += segs;
 	}
 	return err;
 }
 
-int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
+int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
-	int err = __tcp_retransmit_skb(sk, skb);
+	int err = __tcp_retransmit_skb(sk, skb, segs);
 
 	if (err == 0) {
 #if FASTRETRANS_DEBUG > 0
@@ -2662,7 +2698,7 @@
 			tp->retrans_stamp = tcp_skb_timestamp(skb);
 
 	} else if (err != -EBUSY) {
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
 	}
 
 	if (tp->undo_retrans < 0)
@@ -2737,6 +2773,7 @@
 
 	tcp_for_write_queue_from(skb, sk) {
 		__u8 sacked = TCP_SKB_CB(skb)->sacked;
+		int segs;
 
 		if (skb == tcp_send_head(sk))
 			break;
@@ -2744,14 +2781,8 @@
 		if (!hole)
 			tp->retransmit_skb_hint = skb;
 
-		/* Assume this retransmit will generate
-		 * only one packet for congestion window
-		 * calculation purposes.  This works because
-		 * tcp_retransmit_skb() will chop up the
-		 * packet to be MSS sized and all the
-		 * packet counting works out.
-		 */
-		if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
+		segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
+		if (segs <= 0)
 			return;
 
 		if (fwd_rexmitting) {
@@ -2788,10 +2819,10 @@
 		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
 			continue;
 
-		if (tcp_retransmit_skb(sk, skb))
+		if (tcp_retransmit_skb(sk, skb, segs))
 			return;
 
-		NET_INC_STATS_BH(sock_net(sk), mib_idx);
+		NET_INC_STATS(sock_net(sk), mib_idx);
 
 		if (tcp_in_cwnd_reduction(sk))
 			tp->prr_out += tcp_skb_pcount(skb);
@@ -2944,7 +2975,7 @@
 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
 				struct request_sock *req,
 				struct tcp_fastopen_cookie *foc,
-				bool attach_req)
+				enum tcp_synack_type synack_type)
 {
 	struct inet_request_sock *ireq = inet_rsk(req);
 	const struct tcp_sock *tp = tcp_sk(sk);
@@ -2964,14 +2995,22 @@
 	/* Reserve space for headers. */
 	skb_reserve(skb, MAX_TCP_HEADER);
 
-	if (attach_req) {
+	switch (synack_type) {
+	case TCP_SYNACK_NORMAL:
 		skb_set_owner_w(skb, req_to_sk(req));
-	} else {
+		break;
+	case TCP_SYNACK_COOKIE:
+		/* Under synflood, we do not attach skb to a socket,
+		 * to avoid false sharing.
+		 */
+		break;
+	case TCP_SYNACK_FASTOPEN:
 		/* sk is a const pointer, because we want to express multiple
 		 * cpu might call us concurrently.
 		 * sk->sk_wmem_alloc in an atomic, we can promote to rw.
 		 */
 		skb_set_owner_w(skb, (struct sock *)sk);
+		break;
 	}
 	skb_dst_set(skb, dst);
 
@@ -2999,7 +3038,7 @@
 	skb_push(skb, tcp_header_size);
 	skb_reset_transport_header(skb);
 
-	th = tcp_hdr(skb);
+	th = (struct tcphdr *)skb->data;
 	memset(th, 0, sizeof(struct tcphdr));
 	th->syn = 1;
 	th->ack = 1;
@@ -3020,7 +3059,7 @@
 	th->window = htons(min(req->rsk_rcv_wnd, 65535U));
 	tcp_options_write((__be32 *)(th + 1), NULL, &opts);
 	th->doff = (tcp_header_size >> 2);
-	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_OUTSEGS);
+	__TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
 
 #ifdef CONFIG_TCP_MD5SIG
 	/* Okay, we have all we need - do the md5 hash if needed */
@@ -3516,10 +3555,10 @@
 	int res;
 
 	tcp_rsk(req)->txhash = net_tx_rndhash();
-	res = af_ops->send_synack(sk, NULL, &fl, req, NULL, true);
+	res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL);
 	if (!res) {
-		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
+		__TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
+		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
 	}
 	return res;
 }
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index 5353085..e36df4f 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -65,8 +65,8 @@
 			if (scb->sacked & TCPCB_SACKED_RETRANS) {
 				scb->sacked &= ~TCPCB_SACKED_RETRANS;
 				tp->retrans_out -= tcp_skb_pcount(skb);
-				NET_INC_STATS_BH(sock_net(sk),
-						 LINUX_MIB_TCPLOSTRETRANSMIT);
+				NET_INC_STATS(sock_net(sk),
+					      LINUX_MIB_TCPLOSTRETRANSMIT);
 			}
 		} else if (!(scb->sacked & TCPCB_RETRANS)) {
 			/* Original data are sent sequentially so stop early
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 49bc474..debdd8b 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -30,7 +30,7 @@
 	sk->sk_error_report(sk);
 
 	tcp_done(sk);
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
+	__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
 }
 
 /* Do not allow orphaned sockets to eat all our resources.
@@ -68,7 +68,7 @@
 		if (do_reset)
 			tcp_send_active_reset(sk, GFP_ATOMIC);
 		tcp_done(sk);
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
+		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
 		return 1;
 	}
 	return 0;
@@ -162,8 +162,8 @@
 			if (tp->syn_fastopen || tp->syn_data)
 				tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
 			if (tp->syn_data && icsk->icsk_retransmits == 1)
-				NET_INC_STATS_BH(sock_net(sk),
-						 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
+				NET_INC_STATS(sock_net(sk),
+					      LINUX_MIB_TCPFASTOPENACTIVEFAIL);
 		}
 		retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
 		syn_set = true;
@@ -178,8 +178,8 @@
 			    tp->bytes_acked <= tp->rx_opt.mss_clamp) {
 				tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
 				if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1)
-					NET_INC_STATS_BH(sock_net(sk),
-							 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
+					NET_INC_STATS(sock_net(sk),
+						      LINUX_MIB_TCPFASTOPENACTIVEFAIL);
 			}
 			/* Black hole detection */
 			tcp_mtu_probing(icsk, sk);
@@ -209,6 +209,7 @@
 	return 0;
 }
 
+/* Called with BH disabled */
 void tcp_delack_timer_handler(struct sock *sk)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
@@ -228,7 +229,7 @@
 	if (!skb_queue_empty(&tp->ucopy.prequeue)) {
 		struct sk_buff *skb;
 
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
+		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
 
 		while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
 			sk_backlog_rcv(sk, skb);
@@ -248,7 +249,7 @@
 			icsk->icsk_ack.ato      = TCP_ATO_MIN;
 		}
 		tcp_send_ack(sk);
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
+		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
 	}
 
 out:
@@ -265,7 +266,7 @@
 		tcp_delack_timer_handler(sk);
 	} else {
 		inet_csk(sk)->icsk_ack.blocked = 1;
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
+		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
 		/* deleguate our work to tcp_release_cb() */
 		if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
 			sock_hold(sk);
@@ -404,7 +405,7 @@
 			goto out;
 		}
 		tcp_enter_loss(sk);
-		tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
+		tcp_retransmit_skb(sk, tcp_write_queue_head(sk), 1);
 		__sk_dst_reset(sk);
 		goto out_reset_timer;
 	}
@@ -431,12 +432,12 @@
 		} else {
 			mib_idx = LINUX_MIB_TCPTIMEOUTS;
 		}
-		NET_INC_STATS_BH(sock_net(sk), mib_idx);
+		__NET_INC_STATS(sock_net(sk), mib_idx);
 	}
 
 	tcp_enter_loss(sk);
 
-	if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) {
+	if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk), 1) > 0) {
 		/* Retransmission failed because of local congestion,
 		 * do not backoff.
 		 */
@@ -493,6 +494,7 @@
 out:;
 }
 
+/* Called with BH disabled */
 void tcp_write_timer_handler(struct sock *sk)
 {
 	struct inet_connection_sock *icsk = inet_csk(sk);
@@ -549,7 +551,7 @@
 {
 	struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
 
-	NET_INC_STATS_BH(net, LINUX_MIB_TCPTIMEOUTS);
+	__NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
 }
 EXPORT_SYMBOL(tcp_syn_ack_timeout);
 
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 13951c4..4c4bac1 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -107,16 +107,16 @@
  *   o min-filter RTT samples from a much longer window (forever for now)
  *     to find the propagation delay (baseRTT)
  */
-void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us)
+void tcp_vegas_pkts_acked(struct sock *sk, const struct ack_sample *sample)
 {
 	struct vegas *vegas = inet_csk_ca(sk);
 	u32 vrtt;
 
-	if (rtt_us < 0)
+	if (sample->rtt_us < 0)
 		return;
 
 	/* Never allow zero rtt or baseRTT */
-	vrtt = rtt_us + 1;
+	vrtt = sample->rtt_us + 1;
 
 	/* Filter to find propagation delay: */
 	if (vrtt < vegas->baseRTT)
diff --git a/net/ipv4/tcp_vegas.h b/net/ipv4/tcp_vegas.h
index ef9da53..248cfc0 100644
--- a/net/ipv4/tcp_vegas.h
+++ b/net/ipv4/tcp_vegas.h
@@ -17,7 +17,7 @@
 
 void tcp_vegas_init(struct sock *sk);
 void tcp_vegas_state(struct sock *sk, u8 ca_state);
-void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us);
+void tcp_vegas_pkts_acked(struct sock *sk, const struct ack_sample *sample);
 void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event);
 size_t tcp_vegas_get_info(struct sock *sk, u32 ext, int *attr,
 			  union tcp_cc_info *info);
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index 0d094b9..40171e1 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -69,16 +69,17 @@
 }
 
 /* Do rtt sampling needed for Veno. */
-static void tcp_veno_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us)
+static void tcp_veno_pkts_acked(struct sock *sk,
+				const struct ack_sample *sample)
 {
 	struct veno *veno = inet_csk_ca(sk);
 	u32 vrtt;
 
-	if (rtt_us < 0)
+	if (sample->rtt_us < 0)
 		return;
 
 	/* Never allow zero rtt or baseRTT */
-	vrtt = rtt_us + 1;
+	vrtt = sample->rtt_us + 1;
 
 	/* Filter to find propagation delay: */
 	if (vrtt < veno->basertt)
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index c10732e..4b03a2e 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -99,12 +99,13 @@
  * Called after processing group of packets.
  * but all westwood needs is the last sample of srtt.
  */
-static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, s32 rtt)
+static void tcp_westwood_pkts_acked(struct sock *sk,
+				    const struct ack_sample *sample)
 {
 	struct westwood *w = inet_csk_ca(sk);
 
-	if (rtt > 0)
-		w->rtt = usecs_to_jiffies(rtt);
+	if (sample->rtt_us > 0)
+		w->rtt = usecs_to_jiffies(sample->rtt_us);
 }
 
 /*
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index 3e6a472..028eb04 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -56,15 +56,16 @@
 	tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
 }
 
-static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, s32 rtt_us)
+static void tcp_yeah_pkts_acked(struct sock *sk,
+				const struct ack_sample *sample)
 {
 	const struct inet_connection_sock *icsk = inet_csk(sk);
 	struct yeah *yeah = inet_csk_ca(sk);
 
 	if (icsk->icsk_ca_state == TCP_CA_Open)
-		yeah->pkts_acked = pkts_acked;
+		yeah->pkts_acked = sample->pkts_acked;
 
-	tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us);
+	tcp_vegas_pkts_acked(sk, sample);
 }
 
 static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 08eed5e..2e3ebfe 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -143,10 +143,9 @@
 			       unsigned int log)
 {
 	struct sock *sk2;
-	struct hlist_nulls_node *node;
 	kuid_t uid = sock_i_uid(sk);
 
-	sk_nulls_for_each(sk2, node, &hslot->head) {
+	sk_for_each(sk2, &hslot->head) {
 		if (net_eq(sock_net(sk2), net) &&
 		    sk2 != sk &&
 		    (bitmap || udp_sk(sk2)->udp_port_hash == num) &&
@@ -177,12 +176,11 @@
 						  bool match_wildcard))
 {
 	struct sock *sk2;
-	struct hlist_nulls_node *node;
 	kuid_t uid = sock_i_uid(sk);
 	int res = 0;
 
 	spin_lock(&hslot2->lock);
-	udp_portaddr_for_each_entry(sk2, node, &hslot2->head) {
+	udp_portaddr_for_each_entry(sk2, &hslot2->head) {
 		if (net_eq(sock_net(sk2), net) &&
 		    sk2 != sk &&
 		    (udp_sk(sk2)->udp_port_hash == num) &&
@@ -207,11 +205,10 @@
 						    bool match_wildcard))
 {
 	struct net *net = sock_net(sk);
-	struct hlist_nulls_node *node;
 	kuid_t uid = sock_i_uid(sk);
 	struct sock *sk2;
 
-	sk_nulls_for_each(sk2, node, &hslot->head) {
+	sk_for_each(sk2, &hslot->head) {
 		if (net_eq(sock_net(sk2), net) &&
 		    sk2 != sk &&
 		    sk2->sk_family == sk->sk_family &&
@@ -333,17 +330,23 @@
 			goto fail_unlock;
 		}
 
-		sk_nulls_add_node_rcu(sk, &hslot->head);
+		sk_add_node_rcu(sk, &hslot->head);
 		hslot->count++;
 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 
 		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
 		spin_lock(&hslot2->lock);
-		hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
-					 &hslot2->head);
+		if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
+		    sk->sk_family == AF_INET6)
+			hlist_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node,
+					   &hslot2->head);
+		else
+			hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
+					   &hslot2->head);
 		hslot2->count++;
 		spin_unlock(&hslot2->lock);
 	}
+	sock_set_flag(sk, SOCK_RCU_FREE);
 	error = 0;
 fail_unlock:
 	spin_unlock_bh(&hslot->lock);
@@ -497,37 +500,27 @@
 		struct sk_buff *skb)
 {
 	struct sock *sk, *result;
-	struct hlist_nulls_node *node;
 	int score, badness, matches = 0, reuseport = 0;
-	bool select_ok = true;
 	u32 hash = 0;
 
-begin:
 	result = NULL;
 	badness = 0;
-	udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
+	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
 		score = compute_score2(sk, net, saddr, sport,
 				      daddr, hnum, dif);
 		if (score > badness) {
-			result = sk;
-			badness = score;
 			reuseport = sk->sk_reuseport;
 			if (reuseport) {
 				hash = udp_ehashfn(net, daddr, hnum,
 						   saddr, sport);
-				if (select_ok) {
-					struct sock *sk2;
-
-					sk2 = reuseport_select_sock(sk, hash, skb,
+				result = reuseport_select_sock(sk, hash, skb,
 							sizeof(struct udphdr));
-					if (sk2) {
-						result = sk2;
-						select_ok = false;
-						goto found;
-					}
-				}
+				if (result)
+					return result;
 				matches = 1;
 			}
+			badness = score;
+			result = sk;
 		} else if (score == badness && reuseport) {
 			matches++;
 			if (reciprocal_scale(hash, matches) == 0)
@@ -535,23 +528,6 @@
 			hash = next_pseudo_random32(hash);
 		}
 	}
-	/*
-	 * if the nulls value we got at the end of this lookup is
-	 * not the expected one, we must restart lookup.
-	 * We probably met an item that was moved to another chain.
-	 */
-	if (get_nulls_value(node) != slot2)
-		goto begin;
-	if (result) {
-found:
-		if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
-			result = NULL;
-		else if (unlikely(compute_score2(result, net, saddr, sport,
-				  daddr, hnum, dif) < badness)) {
-			sock_put(result);
-			goto begin;
-		}
-	}
 	return result;
 }
 
@@ -563,15 +539,12 @@
 		int dif, struct udp_table *udptable, struct sk_buff *skb)
 {
 	struct sock *sk, *result;
-	struct hlist_nulls_node *node;
 	unsigned short hnum = ntohs(dport);
 	unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
 	struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
 	int score, badness, matches = 0, reuseport = 0;
-	bool select_ok = true;
 	u32 hash = 0;
 
-	rcu_read_lock();
 	if (hslot->count > 10) {
 		hash2 = udp4_portaddr_hash(net, daddr, hnum);
 		slot2 = hash2 & udptable->mask;
@@ -593,35 +566,27 @@
 						  htonl(INADDR_ANY), hnum, dif,
 						  hslot2, slot2, skb);
 		}
-		rcu_read_unlock();
 		return result;
 	}
 begin:
 	result = NULL;
 	badness = 0;
-	sk_nulls_for_each_rcu(sk, node, &hslot->head) {
+	sk_for_each_rcu(sk, &hslot->head) {
 		score = compute_score(sk, net, saddr, hnum, sport,
 				      daddr, dport, dif);
 		if (score > badness) {
-			result = sk;
-			badness = score;
 			reuseport = sk->sk_reuseport;
 			if (reuseport) {
 				hash = udp_ehashfn(net, daddr, hnum,
 						   saddr, sport);
-				if (select_ok) {
-					struct sock *sk2;
-
-					sk2 = reuseport_select_sock(sk, hash, skb,
+				result = reuseport_select_sock(sk, hash, skb,
 							sizeof(struct udphdr));
-					if (sk2) {
-						result = sk2;
-						select_ok = false;
-						goto found;
-					}
-				}
+				if (result)
+					return result;
 				matches = 1;
 			}
+			result = sk;
+			badness = score;
 		} else if (score == badness && reuseport) {
 			matches++;
 			if (reciprocal_scale(hash, matches) == 0)
@@ -629,25 +594,6 @@
 			hash = next_pseudo_random32(hash);
 		}
 	}
-	/*
-	 * if the nulls value we got at the end of this lookup is
-	 * not the expected one, we must restart lookup.
-	 * We probably met an item that was moved to another chain.
-	 */
-	if (get_nulls_value(node) != slot)
-		goto begin;
-
-	if (result) {
-found:
-		if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
-			result = NULL;
-		else if (unlikely(compute_score(result, net, saddr, hnum, sport,
-				  daddr, dport, dif) < badness)) {
-			sock_put(result);
-			goto begin;
-		}
-	}
-	rcu_read_unlock();
 	return result;
 }
 EXPORT_SYMBOL_GPL(__udp4_lib_lookup);
@@ -658,18 +604,36 @@
 {
 	const struct iphdr *iph = ip_hdr(skb);
 
-	return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport,
+	return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport,
 				 iph->daddr, dport, inet_iif(skb),
 				 udptable, skb);
 }
 
+struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
+				 __be16 sport, __be16 dport)
+{
+	return __udp4_lib_lookup_skb(skb, sport, dport, &udp_table);
+}
+EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb);
+
+/* Must be called under rcu_read_lock().
+ * Does increment socket refcount.
+ */
+#if IS_ENABLED(CONFIG_NETFILTER_XT_MATCH_SOCKET) || \
+    IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TPROXY)
 struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
 			     __be32 daddr, __be16 dport, int dif)
 {
-	return __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif,
-				 &udp_table, NULL);
+	struct sock *sk;
+
+	sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport,
+			       dif, &udp_table, NULL);
+	if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+		sk = NULL;
+	return sk;
 }
 EXPORT_SYMBOL_GPL(udp4_lib_lookup);
+#endif
 
 static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
 				       __be16 loc_port, __be32 loc_addr,
@@ -718,7 +682,7 @@
 			iph->saddr, uh->source, skb->dev->ifindex, udptable,
 			NULL);
 	if (!sk) {
-		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
+		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
 		return;	/* No socket for error */
 	}
 
@@ -771,7 +735,7 @@
 	sk->sk_err = err;
 	sk->sk_error_report(sk);
 out:
-	sock_put(sk);
+	return;
 }
 
 void udp_err(struct sk_buff *skb, u32 info)
@@ -912,13 +876,13 @@
 	err = ip_send_skb(sock_net(sk), skb);
 	if (err) {
 		if (err == -ENOBUFS && !inet->recverr) {
-			UDP_INC_STATS_USER(sock_net(sk),
-					   UDP_MIB_SNDBUFERRORS, is_udplite);
+			UDP_INC_STATS(sock_net(sk),
+				      UDP_MIB_SNDBUFERRORS, is_udplite);
 			err = 0;
 		}
 	} else
-		UDP_INC_STATS_USER(sock_net(sk),
-				   UDP_MIB_OUTDATAGRAMS, is_udplite);
+		UDP_INC_STATS(sock_net(sk),
+			      UDP_MIB_OUTDATAGRAMS, is_udplite);
 	return err;
 }
 
@@ -1027,15 +991,13 @@
 		 */
 		connected = 1;
 	}
-	ipc.addr = inet->inet_saddr;
 
+	ipc.sockc.tsflags = sk->sk_tsflags;
+	ipc.addr = inet->inet_saddr;
 	ipc.oif = sk->sk_bound_dev_if;
 
-	sock_tx_timestamp(sk, &ipc.tx_flags);
-
 	if (msg->msg_controllen) {
-		err = ip_cmsg_send(sock_net(sk), msg, &ipc,
-				   sk->sk_family == AF_INET6);
+		err = ip_cmsg_send(sk, msg, &ipc, sk->sk_family == AF_INET6);
 		if (unlikely(err)) {
 			kfree(ipc.opt);
 			return err;
@@ -1060,6 +1022,8 @@
 	saddr = ipc.addr;
 	ipc.addr = faddr = daddr;
 
+	sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags);
+
 	if (ipc.opt && ipc.opt->opt.srr) {
 		if (!daddr)
 			return -EINVAL;
@@ -1187,8 +1151,8 @@
 	 * seems like overkill.
 	 */
 	if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
-		UDP_INC_STATS_USER(sock_net(sk),
-				UDP_MIB_SNDBUFERRORS, is_udplite);
+		UDP_INC_STATS(sock_net(sk),
+			      UDP_MIB_SNDBUFERRORS, is_udplite);
 	}
 	return err;
 
@@ -1272,10 +1236,10 @@
 	spin_lock_bh(&rcvq->lock);
 	while ((skb = skb_peek(rcvq)) != NULL &&
 		udp_lib_checksum_complete(skb)) {
-		UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS,
-				 IS_UDPLITE(sk));
-		UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
-				 IS_UDPLITE(sk));
+		__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS,
+				IS_UDPLITE(sk));
+		__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
+				IS_UDPLITE(sk));
 		atomic_inc(&sk->sk_drops);
 		__skb_unlink(skb, rcvq);
 		__skb_queue_tail(&list_kill, skb);
@@ -1311,14 +1275,6 @@
 	{
 		unsigned int amount = first_packet_length(sk);
 
-		if (amount)
-			/*
-			 * We will only return the amount
-			 * of this packet since that is all
-			 * that will be read.
-			 */
-			amount -= sizeof(struct udphdr);
-
 		return put_user(amount, (int __user *)arg);
 	}
 
@@ -1342,7 +1298,7 @@
 	DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
 	struct sk_buff *skb;
 	unsigned int ulen, copied;
-	int peeked, off = 0;
+	int peeked, peeking, off;
 	int err;
 	int is_udplite = IS_UDPLITE(sk);
 	bool checksum_valid = false;
@@ -1352,15 +1308,16 @@
 		return ip_recv_error(sk, msg, len, addr_len);
 
 try_again:
+	peeking = off = sk_peek_offset(sk, flags);
 	skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
 				  &peeked, &off, &err);
 	if (!skb)
-		goto out;
+		return err;
 
-	ulen = skb->len - sizeof(struct udphdr);
+	ulen = skb->len;
 	copied = len;
-	if (copied > ulen)
-		copied = ulen;
+	if (copied > ulen - off)
+		copied = ulen - off;
 	else if (copied < ulen)
 		msg->msg_flags |= MSG_TRUNC;
 
@@ -1370,18 +1327,16 @@
 	 * coverage checksum (UDP-Lite), do it before the copy.
 	 */
 
-	if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
+	if (copied < ulen || UDP_SKB_CB(skb)->partial_cov || peeking) {
 		checksum_valid = !udp_lib_checksum_complete(skb);
 		if (!checksum_valid)
 			goto csum_copy_err;
 	}
 
 	if (checksum_valid || skb_csum_unnecessary(skb))
-		err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
-					    msg, copied);
+		err = skb_copy_datagram_msg(skb, off, msg, copied);
 	else {
-		err = skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr),
-						     msg);
+		err = skb_copy_and_csum_datagram_msg(skb, off, msg);
 
 		if (err == -EINVAL)
 			goto csum_copy_err;
@@ -1391,15 +1346,16 @@
 		trace_kfree_skb(skb, udp_recvmsg);
 		if (!peeked) {
 			atomic_inc(&sk->sk_drops);
-			UDP_INC_STATS_USER(sock_net(sk),
-					   UDP_MIB_INERRORS, is_udplite);
+			UDP_INC_STATS(sock_net(sk),
+				      UDP_MIB_INERRORS, is_udplite);
 		}
-		goto out_free;
+		skb_free_datagram_locked(sk, skb);
+		return err;
 	}
 
 	if (!peeked)
-		UDP_INC_STATS_USER(sock_net(sk),
-				UDP_MIB_INDATAGRAMS, is_udplite);
+		UDP_INC_STATS(sock_net(sk),
+			      UDP_MIB_INDATAGRAMS, is_udplite);
 
 	sock_recv_ts_and_drops(msg, sk, skb);
 
@@ -1412,22 +1368,20 @@
 		*addr_len = sizeof(*sin);
 	}
 	if (inet->cmsg_flags)
-		ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr));
+		ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr) + off);
 
 	err = copied;
 	if (flags & MSG_TRUNC)
 		err = ulen;
 
-out_free:
-	skb_free_datagram_locked(sk, skb);
-out:
+	__skb_free_datagram_locked(sk, skb, peeking ? -err : err);
 	return err;
 
 csum_copy_err:
 	slow = lock_sock_fast(sk);
 	if (!skb_kill_datagram(sk, skb, flags)) {
-		UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
-		UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
+		UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
+		UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
 	}
 	unlock_sock_fast(sk, slow);
 
@@ -1474,13 +1428,13 @@
 		spin_lock_bh(&hslot->lock);
 		if (rcu_access_pointer(sk->sk_reuseport_cb))
 			reuseport_detach_sock(sk);
-		if (sk_nulls_del_node_init_rcu(sk)) {
+		if (sk_del_node_init_rcu(sk)) {
 			hslot->count--;
 			inet_sk(sk)->inet_num = 0;
 			sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 
 			spin_lock(&hslot2->lock);
-			hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
+			hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
 			hslot2->count--;
 			spin_unlock(&hslot2->lock);
 		}
@@ -1513,12 +1467,12 @@
 
 			if (hslot2 != nhslot2) {
 				spin_lock(&hslot2->lock);
-				hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
+				hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
 				hslot2->count--;
 				spin_unlock(&hslot2->lock);
 
 				spin_lock(&nhslot2->lock);
-				hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
+				hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
 							 &nhslot2->head);
 				nhslot2->count++;
 				spin_unlock(&nhslot2->lock);
@@ -1548,15 +1502,15 @@
 		sk_incoming_cpu_update(sk);
 	}
 
-	rc = sock_queue_rcv_skb(sk, skb);
+	rc = __sock_queue_rcv_skb(sk, skb);
 	if (rc < 0) {
 		int is_udplite = IS_UDPLITE(sk);
 
 		/* Note that an ENOMEM error is charged twice */
 		if (rc == -ENOMEM)
-			UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
-					 is_udplite);
-		UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
+			UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS,
+					is_udplite);
+		UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
 		kfree_skb(skb);
 		trace_udp_fail_queue_rcv_skb(rc, sk);
 		return -1;
@@ -1620,9 +1574,9 @@
 
 			ret = encap_rcv(sk, skb);
 			if (ret <= 0) {
-				UDP_INC_STATS_BH(sock_net(sk),
-						 UDP_MIB_INDATAGRAMS,
-						 is_udplite);
+				__UDP_INC_STATS(sock_net(sk),
+						UDP_MIB_INDATAGRAMS,
+						is_udplite);
 				return -ret;
 			}
 		}
@@ -1664,13 +1618,17 @@
 		}
 	}
 
-	if (rcu_access_pointer(sk->sk_filter) &&
-	    udp_lib_checksum_complete(skb))
-		goto csum_error;
+	if (rcu_access_pointer(sk->sk_filter)) {
+		if (udp_lib_checksum_complete(skb))
+			goto csum_error;
+		if (sk_filter(sk, skb))
+			goto drop;
+	}
 
+	udp_csum_pull_header(skb);
 	if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
-		UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
-				 is_udplite);
+		__UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS,
+				is_udplite);
 		goto drop;
 	}
 
@@ -1689,43 +1647,14 @@
 	return rc;
 
 csum_error:
-	UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
+	__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
 drop:
-	UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
+	__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
 	atomic_inc(&sk->sk_drops);
 	kfree_skb(skb);
 	return -1;
 }
 
-static void flush_stack(struct sock **stack, unsigned int count,
-			struct sk_buff *skb, unsigned int final)
-{
-	unsigned int i;
-	struct sk_buff *skb1 = NULL;
-	struct sock *sk;
-
-	for (i = 0; i < count; i++) {
-		sk = stack[i];
-		if (likely(!skb1))
-			skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
-
-		if (!skb1) {
-			atomic_inc(&sk->sk_drops);
-			UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
-					 IS_UDPLITE(sk));
-			UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
-					 IS_UDPLITE(sk));
-		}
-
-		if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0)
-			skb1 = NULL;
-
-		sock_put(sk);
-	}
-	if (unlikely(skb1))
-		kfree_skb(skb1);
-}
-
 /* For TCP sockets, sk_rx_dst is protected by socket lock
  * For UDP, we use xchg() to guard against concurrent changes.
  */
@@ -1749,14 +1678,14 @@
 				    struct udp_table *udptable,
 				    int proto)
 {
-	struct sock *sk, *stack[256 / sizeof(struct sock *)];
-	struct hlist_nulls_node *node;
+	struct sock *sk, *first = NULL;
 	unsigned short hnum = ntohs(uh->dest);
 	struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
-	int dif = skb->dev->ifindex;
-	unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node);
 	unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
-	bool inner_flushed = false;
+	unsigned int offset = offsetof(typeof(*sk), sk_node);
+	int dif = skb->dev->ifindex;
+	struct hlist_node *node;
+	struct sk_buff *nskb;
 
 	if (use_hash2) {
 		hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
@@ -1767,23 +1696,28 @@
 		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
 	}
 
-	spin_lock(&hslot->lock);
-	sk_nulls_for_each_entry_offset(sk, node, &hslot->head, offset) {
-		if (__udp_is_mcast_sock(net, sk,
-					uh->dest, daddr,
-					uh->source, saddr,
-					dif, hnum)) {
-			if (unlikely(count == ARRAY_SIZE(stack))) {
-				flush_stack(stack, count, skb, ~0);
-				inner_flushed = true;
-				count = 0;
-			}
-			stack[count++] = sk;
-			sock_hold(sk);
-		}
-	}
+	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
+		if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr,
+					 uh->source, saddr, dif, hnum))
+			continue;
 
-	spin_unlock(&hslot->lock);
+		if (!first) {
+			first = sk;
+			continue;
+		}
+		nskb = skb_clone(skb, GFP_ATOMIC);
+
+		if (unlikely(!nskb)) {
+			atomic_inc(&sk->sk_drops);
+			__UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
+					IS_UDPLITE(sk));
+			__UDP_INC_STATS(net, UDP_MIB_INERRORS,
+					IS_UDPLITE(sk));
+			continue;
+		}
+		if (udp_queue_rcv_skb(sk, nskb) > 0)
+			consume_skb(nskb);
+	}
 
 	/* Also lookup *:port if we are using hash2 and haven't done so yet. */
 	if (use_hash2 && hash2 != hash2_any) {
@@ -1791,16 +1725,13 @@
 		goto start_lookup;
 	}
 
-	/*
-	 * do the slow work with no lock held
-	 */
-	if (count) {
-		flush_stack(stack, count, skb, count - 1);
+	if (first) {
+		if (udp_queue_rcv_skb(first, skb) > 0)
+			consume_skb(skb);
 	} else {
-		if (!inner_flushed)
-			UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
-					 proto == IPPROTO_UDPLITE);
-		consume_skb(skb);
+		kfree_skb(skb);
+		__UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
+				proto == IPPROTO_UDPLITE);
 	}
 	return 0;
 }
@@ -1897,7 +1828,6 @@
 						 inet_compute_pseudo);
 
 		ret = udp_queue_rcv_skb(sk, skb);
-		sock_put(sk);
 
 		/* a return value > 0 means to resubmit the input, but
 		 * it wants the return to be -protocol, or 0
@@ -1915,7 +1845,7 @@
 	if (udp_lib_checksum_complete(skb))
 		goto csum_error;
 
-	UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
+	__UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 
 	/*
@@ -1942,9 +1872,9 @@
 			    proto == IPPROTO_UDPLITE ? "Lite" : "",
 			    &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest),
 			    ulen);
-	UDP_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
+	__UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
 drop:
-	UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
+	__UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
 	kfree_skb(skb);
 	return 0;
 }
@@ -1958,49 +1888,24 @@
 						  int dif)
 {
 	struct sock *sk, *result;
-	struct hlist_nulls_node *node;
 	unsigned short hnum = ntohs(loc_port);
-	unsigned int count, slot = udp_hashfn(net, hnum, udp_table.mask);
+	unsigned int slot = udp_hashfn(net, hnum, udp_table.mask);
 	struct udp_hslot *hslot = &udp_table.hash[slot];
 
 	/* Do not bother scanning a too big list */
 	if (hslot->count > 10)
 		return NULL;
 
-	rcu_read_lock();
-begin:
-	count = 0;
 	result = NULL;
-	sk_nulls_for_each_rcu(sk, node, &hslot->head) {
-		if (__udp_is_mcast_sock(net, sk,
-					loc_port, loc_addr,
-					rmt_port, rmt_addr,
-					dif, hnum)) {
+	sk_for_each_rcu(sk, &hslot->head) {
+		if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr,
+					rmt_port, rmt_addr, dif, hnum)) {
+			if (result)
+				return NULL;
 			result = sk;
-			++count;
 		}
 	}
-	/*
-	 * if the nulls value we got at the end of this lookup is
-	 * not the expected one, we must restart lookup.
-	 * We probably met an item that was moved to another chain.
-	 */
-	if (get_nulls_value(node) != slot)
-		goto begin;
 
-	if (result) {
-		if (count != 1 ||
-		    unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
-			result = NULL;
-		else if (unlikely(!__udp_is_mcast_sock(net, result,
-						       loc_port, loc_addr,
-						       rmt_port, rmt_addr,
-						       dif, hnum))) {
-			sock_put(result);
-			result = NULL;
-		}
-	}
-	rcu_read_unlock();
 	return result;
 }
 
@@ -2013,37 +1918,22 @@
 					    __be16 rmt_port, __be32 rmt_addr,
 					    int dif)
 {
-	struct sock *sk, *result;
-	struct hlist_nulls_node *node;
 	unsigned short hnum = ntohs(loc_port);
 	unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum);
 	unsigned int slot2 = hash2 & udp_table.mask;
 	struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
 	INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
 	const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
+	struct sock *sk;
 
-	rcu_read_lock();
-	result = NULL;
-	udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
-		if (INET_MATCH(sk, net, acookie,
-			       rmt_addr, loc_addr, ports, dif))
-			result = sk;
+	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
+		if (INET_MATCH(sk, net, acookie, rmt_addr,
+			       loc_addr, ports, dif))
+			return sk;
 		/* Only check first socket in chain */
 		break;
 	}
-
-	if (result) {
-		if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
-			result = NULL;
-		else if (unlikely(!INET_MATCH(sk, net, acookie,
-					      rmt_addr, loc_addr,
-					      ports, dif))) {
-			sock_put(result);
-			result = NULL;
-		}
-	}
-	rcu_read_unlock();
-	return result;
+	return NULL;
 }
 
 void udp_v4_early_demux(struct sk_buff *skb)
@@ -2051,7 +1941,7 @@
 	struct net *net = dev_net(skb->dev);
 	const struct iphdr *iph;
 	const struct udphdr *uh;
-	struct sock *sk;
+	struct sock *sk = NULL;
 	struct dst_entry *dst;
 	int dif = skb->dev->ifindex;
 	int ours;
@@ -2083,11 +1973,9 @@
 	} else if (skb->pkt_type == PACKET_HOST) {
 		sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
 					     uh->source, iph->saddr, dif);
-	} else {
-		return;
 	}
 
-	if (!sk)
+	if (!sk || !atomic_inc_not_zero_hint(&sk->sk_refcnt, 2))
 		return;
 
 	skb->sk = sk;
@@ -2387,14 +2275,13 @@
 
 	for (state->bucket = start; state->bucket <= state->udp_table->mask;
 	     ++state->bucket) {
-		struct hlist_nulls_node *node;
 		struct udp_hslot *hslot = &state->udp_table->hash[state->bucket];
 
-		if (hlist_nulls_empty(&hslot->head))
+		if (hlist_empty(&hslot->head))
 			continue;
 
 		spin_lock_bh(&hslot->lock);
-		sk_nulls_for_each(sk, node, &hslot->head) {
+		sk_for_each(sk, &hslot->head) {
 			if (!net_eq(sock_net(sk), net))
 				continue;
 			if (sk->sk_family == state->family)
@@ -2413,7 +2300,7 @@
 	struct net *net = seq_file_net(seq);
 
 	do {
-		sk = sk_nulls_next(sk);
+		sk = sk_next(sk);
 	} while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family));
 
 	if (!sk) {
@@ -2622,12 +2509,12 @@
 
 	table->hash2 = table->hash + (table->mask + 1);
 	for (i = 0; i <= table->mask; i++) {
-		INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i);
+		INIT_HLIST_HEAD(&table->hash[i].head);
 		table->hash[i].count = 0;
 		spin_lock_init(&table->hash[i].lock);
 	}
 	for (i = 0; i <= table->mask; i++) {
-		INIT_HLIST_NULLS_HEAD(&table->hash2[i].head, i);
+		INIT_HLIST_HEAD(&table->hash2[i].head);
 		table->hash2[i].count = 0;
 		spin_lock_init(&table->hash2[i].lock);
 	}
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index df1966f..3d5ccf4 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -36,10 +36,11 @@
 			const struct inet_diag_req_v2 *req)
 {
 	int err = -EINVAL;
-	struct sock *sk;
+	struct sock *sk = NULL;
 	struct sk_buff *rep;
 	struct net *net = sock_net(in_skb->sk);
 
+	rcu_read_lock();
 	if (req->sdiag_family == AF_INET)
 		sk = __udp4_lib_lookup(net,
 				req->id.idiag_src[0], req->id.idiag_sport,
@@ -54,9 +55,9 @@
 				req->id.idiag_dport,
 				req->id.idiag_if, tbl, NULL);
 #endif
-	else
-		goto out_nosk;
-
+	if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+		sk = NULL;
+	rcu_read_unlock();
 	err = -ENOENT;
 	if (!sk)
 		goto out_nosk;
@@ -96,24 +97,23 @@
 		     struct netlink_callback *cb,
 		     const struct inet_diag_req_v2 *r, struct nlattr *bc)
 {
-	int num, s_num, slot, s_slot;
 	struct net *net = sock_net(skb->sk);
+	int num, s_num, slot, s_slot;
 
 	s_slot = cb->args[0];
 	num = s_num = cb->args[1];
 
 	for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) {
-		struct sock *sk;
-		struct hlist_nulls_node *node;
 		struct udp_hslot *hslot = &table->hash[slot];
+		struct sock *sk;
 
 		num = 0;
 
-		if (hlist_nulls_empty(&hslot->head))
+		if (hlist_empty(&hslot->head))
 			continue;
 
 		spin_lock_bh(&hslot->lock);
-		sk_nulls_for_each(sk, node, &hslot->head) {
+		sk_for_each(sk, &hslot->head) {
 			struct inet_sock *inet = inet_sk(sk);
 
 			if (!net_eq(sock_net(sk), net))
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 0ed2daf..6b7459c 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -14,18 +14,6 @@
 #include <net/udp.h>
 #include <net/protocol.h>
 
-static DEFINE_SPINLOCK(udp_offload_lock);
-static struct udp_offload_priv __rcu *udp_offload_base __read_mostly;
-
-#define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock))
-
-struct udp_offload_priv {
-	struct udp_offload	*offload;
-	possible_net_t	net;
-	struct rcu_head		rcu;
-	struct udp_offload_priv __rcu *next;
-};
-
 static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
 	netdev_features_t features,
 	struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
@@ -51,8 +39,11 @@
 	 * 16 bit length field due to the header being added outside of an
 	 * IP or IPv6 frame that was already limited to 64K - 1.
 	 */
-	partial = csum_sub(csum_unfold(uh->check),
-			   (__force __wsum)htonl(skb->len));
+	if (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL)
+		partial = (__force __wsum)uh->len;
+	else
+		partial = (__force __wsum)htonl(skb->len);
+	partial = csum_sub(csum_unfold(uh->check), partial);
 
 	/* setup inner skb. */
 	skb->encapsulation = 0;
@@ -101,7 +92,7 @@
 	udp_offset = outer_hlen - tnl_hlen;
 	skb = segs;
 	do {
-		__be16 len;
+		unsigned int len;
 
 		if (remcsum)
 			skb->ip_summed = CHECKSUM_NONE;
@@ -119,14 +110,26 @@
 		skb_reset_mac_header(skb);
 		skb_set_network_header(skb, mac_len);
 		skb_set_transport_header(skb, udp_offset);
-		len = htons(skb->len - udp_offset);
+		len = skb->len - udp_offset;
 		uh = udp_hdr(skb);
-		uh->len = len;
+
+		/* If we are only performing partial GSO the inner header
+		 * will be using a length value equal to only one MSS sized
+		 * segment instead of the entire frame.
+		 */
+		if (skb_is_gso(skb)) {
+			uh->len = htons(skb_shinfo(skb)->gso_size +
+					SKB_GSO_CB(skb)->data_offset +
+					skb->head - (unsigned char *)uh);
+		} else {
+			uh->len = htons(len);
+		}
 
 		if (!need_csum)
 			continue;
 
-		uh->check = ~csum_fold(csum_add(partial, (__force __wsum)len));
+		uh->check = ~csum_fold(csum_add(partial,
+				       (__force __wsum)htonl(len)));
 
 		if (skb->encapsulation || !offload_csum) {
 			uh->check = gso_make_checksum(skb, ~uh->check);
@@ -179,6 +182,7 @@
 
 	return segs;
 }
+EXPORT_SYMBOL(skb_udp_tunnel_segment);
 
 static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
 					 netdev_features_t features)
@@ -253,64 +257,14 @@
 	return segs;
 }
 
-int udp_add_offload(struct net *net, struct udp_offload *uo)
-{
-	struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC);
-
-	if (!new_offload)
-		return -ENOMEM;
-
-	write_pnet(&new_offload->net, net);
-	new_offload->offload = uo;
-
-	spin_lock(&udp_offload_lock);
-	new_offload->next = udp_offload_base;
-	rcu_assign_pointer(udp_offload_base, new_offload);
-	spin_unlock(&udp_offload_lock);
-
-	return 0;
-}
-EXPORT_SYMBOL(udp_add_offload);
-
-static void udp_offload_free_routine(struct rcu_head *head)
-{
-	struct udp_offload_priv *ou_priv = container_of(head, struct udp_offload_priv, rcu);
-	kfree(ou_priv);
-}
-
-void udp_del_offload(struct udp_offload *uo)
-{
-	struct udp_offload_priv __rcu **head = &udp_offload_base;
-	struct udp_offload_priv *uo_priv;
-
-	spin_lock(&udp_offload_lock);
-
-	uo_priv = udp_deref_protected(*head);
-	for (; uo_priv != NULL;
-	     uo_priv = udp_deref_protected(*head)) {
-		if (uo_priv->offload == uo) {
-			rcu_assign_pointer(*head,
-					   udp_deref_protected(uo_priv->next));
-			goto unlock;
-		}
-		head = &uo_priv->next;
-	}
-	pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port));
-unlock:
-	spin_unlock(&udp_offload_lock);
-	if (uo_priv)
-		call_rcu(&uo_priv->rcu, udp_offload_free_routine);
-}
-EXPORT_SYMBOL(udp_del_offload);
-
 struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
-				 struct udphdr *uh)
+				 struct udphdr *uh, udp_lookup_t lookup)
 {
-	struct udp_offload_priv *uo_priv;
 	struct sk_buff *p, **pp = NULL;
 	struct udphdr *uh2;
 	unsigned int off = skb_gro_offset(skb);
 	int flush = 1;
+	struct sock *sk;
 
 	if (NAPI_GRO_CB(skb)->encap_mark ||
 	    (skb->ip_summed != CHECKSUM_PARTIAL &&
@@ -322,13 +276,10 @@
 	NAPI_GRO_CB(skb)->encap_mark = 1;
 
 	rcu_read_lock();
-	uo_priv = rcu_dereference(udp_offload_base);
-	for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
-		if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) &&
-		    uo_priv->offload->port == uh->dest &&
-		    uo_priv->offload->callbacks.gro_receive)
-			goto unflush;
-	}
+	sk = (*lookup)(skb, uh->source, uh->dest);
+
+	if (sk && udp_sk(sk)->gro_receive)
+		goto unflush;
 	goto out_unlock;
 
 unflush:
@@ -352,9 +303,7 @@
 
 	skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
 	skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
-	NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
-	pp = uo_priv->offload->callbacks.gro_receive(head, skb,
-						     uo_priv->offload);
+	pp = udp_sk(sk)->gro_receive(sk, head, skb);
 
 out_unlock:
 	rcu_read_unlock();
@@ -362,6 +311,7 @@
 	NAPI_GRO_CB(skb)->flush |= flush;
 	return pp;
 }
+EXPORT_SYMBOL(udp_gro_receive);
 
 static struct sk_buff **udp4_gro_receive(struct sk_buff **head,
 					 struct sk_buff *skb)
@@ -383,49 +333,41 @@
 					     inet_gro_compute_pseudo);
 skip:
 	NAPI_GRO_CB(skb)->is_ipv6 = 0;
-	return udp_gro_receive(head, skb, uh);
+	return udp_gro_receive(head, skb, uh, udp4_lib_lookup_skb);
 
 flush:
 	NAPI_GRO_CB(skb)->flush = 1;
 	return NULL;
 }
 
-int udp_gro_complete(struct sk_buff *skb, int nhoff)
+int udp_gro_complete(struct sk_buff *skb, int nhoff,
+		     udp_lookup_t lookup)
 {
-	struct udp_offload_priv *uo_priv;
 	__be16 newlen = htons(skb->len - nhoff);
 	struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
 	int err = -ENOSYS;
+	struct sock *sk;
 
 	uh->len = newlen;
 
+	/* Set encapsulation before calling into inner gro_complete() functions
+	 * to make them set up the inner offsets.
+	 */
+	skb->encapsulation = 1;
+
 	rcu_read_lock();
-
-	uo_priv = rcu_dereference(udp_offload_base);
-	for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
-		if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) &&
-		    uo_priv->offload->port == uh->dest &&
-		    uo_priv->offload->callbacks.gro_complete)
-			break;
-	}
-
-	if (uo_priv) {
-		NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
-		err = uo_priv->offload->callbacks.gro_complete(skb,
-				nhoff + sizeof(struct udphdr),
-				uo_priv->offload);
-	}
-
+	sk = (*lookup)(skb, uh->source, uh->dest);
+	if (sk && udp_sk(sk)->gro_complete)
+		err = udp_sk(sk)->gro_complete(sk, skb,
+				nhoff + sizeof(struct udphdr));
 	rcu_read_unlock();
 
 	if (skb->remcsum_offload)
 		skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM;
 
-	skb->encapsulation = 1;
-	skb_set_inner_mac_header(skb, nhoff + sizeof(struct udphdr));
-
 	return err;
 }
+EXPORT_SYMBOL(udp_gro_complete);
 
 static int udp4_gro_complete(struct sk_buff *skb, int nhoff)
 {
@@ -440,7 +382,7 @@
 		skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
 	}
 
-	return udp_gro_complete(skb, nhoff);
+	return udp_gro_complete(skb, nhoff, udp4_lib_lookup_skb);
 }
 
 static const struct net_offload udpv4_offload = {
diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c
index 96599d1..47f12c7 100644
--- a/net/ipv4/udp_tunnel.c
+++ b/net/ipv4/udp_tunnel.c
@@ -69,6 +69,8 @@
 	udp_sk(sk)->encap_type = cfg->encap_type;
 	udp_sk(sk)->encap_rcv = cfg->encap_rcv;
 	udp_sk(sk)->encap_destroy = cfg->encap_destroy;
+	udp_sk(sk)->gro_receive = cfg->gro_receive;
+	udp_sk(sk)->gro_complete = cfg->gro_complete;
 
 	udp_tunnel_encap_enable(sock);
 }
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 11e875f..3f84113 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -218,6 +218,7 @@
 	tristate "IPv6: GRE tunnel"
 	select IPV6_TUNNEL
 	select NET_IP_TUNNEL
+	depends on NET_IPGRE_DEMUX
 	---help---
 	  Tunneling means encapsulating data of one protocol type within
 	  another protocol and sending it over a channel that understands the
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 2fbd90b..5e9d6bf 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -8,9 +8,10 @@
 		addrlabel.o \
 		route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o udplite.o \
 		raw.o icmp.o mcast.o reassembly.o tcp_ipv6.o ping.o \
-		exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o
+		exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o \
+		udp_offload.o
 
-ipv6-offload :=	ip6_offload.o tcpv6_offload.o udp_offload.o exthdrs_offload.o
+ipv6-offload :=	ip6_offload.o tcpv6_offload.o exthdrs_offload.o
 
 ipv6-$(CONFIG_SYSCTL) = sysctl_net_ipv6.o
 ipv6-$(CONFIG_IPV6_MROUTE) += ip6mr.o
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 27aed1a..47f837a 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -359,7 +359,6 @@
 		ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64;
 
 	ndev->cnf.mtu6 = dev->mtu;
-	ndev->cnf.sysctl = NULL;
 	ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
 	if (!ndev->nd_parms) {
 		kfree(ndev);
@@ -3176,35 +3175,9 @@
 }
 #endif
 
-#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
-/* If the host route is cached on the addr struct make sure it is associated
- * with the proper table. e.g., enslavement can change and if so the cached
- * host route needs to move to the new table.
- */
-static void l3mdev_check_host_rt(struct inet6_dev *idev,
-				  struct inet6_ifaddr *ifp)
-{
-	if (ifp->rt) {
-		u32 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
-
-		if (tb_id != ifp->rt->rt6i_table->tb6_id) {
-			ip6_del_rt(ifp->rt);
-			ifp->rt = NULL;
-		}
-	}
-}
-#else
-static void l3mdev_check_host_rt(struct inet6_dev *idev,
-				  struct inet6_ifaddr *ifp)
-{
-}
-#endif
-
 static int fixup_permanent_addr(struct inet6_dev *idev,
 				struct inet6_ifaddr *ifp)
 {
-	l3mdev_check_host_rt(idev, ifp);
-
 	if (!ifp->rt) {
 		struct rt6_info *rt;
 
@@ -3255,6 +3228,7 @@
 			   void *ptr)
 {
 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+	struct netdev_notifier_changeupper_info *info;
 	struct inet6_dev *idev = __in6_dev_get(dev);
 	int run_pending = 0;
 	int err;
@@ -3303,6 +3277,9 @@
 			break;
 
 		if (event == NETDEV_UP) {
+			/* restore routes for permanent addresses */
+			addrconf_permanent_addr(dev);
+
 			if (!addrconf_qdisc_ok(dev)) {
 				/* device is not ready yet. */
 				pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
@@ -3336,9 +3313,6 @@
 			run_pending = 1;
 		}
 
-		/* restore routes for permanent addresses */
-		addrconf_permanent_addr(dev);
-
 		switch (dev->type) {
 #if IS_ENABLED(CONFIG_IPV6_SIT)
 		case ARPHRD_SIT:
@@ -3413,6 +3387,15 @@
 		if (idev)
 			addrconf_type_change(dev, event);
 		break;
+
+	case NETDEV_CHANGEUPPER:
+		info = ptr;
+
+		/* flush all routes if dev is linked to or unlinked from
+		 * an L3 master device (e.g., VRF)
+		 */
+		if (info->upper_dev && netif_is_l3_master(info->upper_dev))
+			addrconf_ifdown(dev, 0);
 	}
 
 	return NOTIFY_OK;
@@ -3438,6 +3421,12 @@
 		ipv6_mc_unmap(idev);
 }
 
+static bool addr_is_local(const struct in6_addr *addr)
+{
+	return ipv6_addr_type(addr) &
+		(IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
+}
+
 static int addrconf_ifdown(struct net_device *dev, int how)
 {
 	struct net *net = dev_net(dev);
@@ -3495,7 +3484,8 @@
 				 * address is retained on a down event
 				 */
 				if (!keep_addr ||
-				    !(ifa->flags & IFA_F_PERMANENT)) {
+				    !(ifa->flags & IFA_F_PERMANENT) ||
+				    addr_is_local(&ifa->addr)) {
 					hlist_del_init_rcu(&ifa->addr_lst);
 					goto restart;
 				}
@@ -3539,17 +3529,23 @@
 
 	INIT_LIST_HEAD(&del_list);
 	list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
+		struct rt6_info *rt = NULL;
+
 		addrconf_del_dad_work(ifa);
 
 		write_unlock_bh(&idev->lock);
 		spin_lock_bh(&ifa->lock);
 
-		if (keep_addr && (ifa->flags & IFA_F_PERMANENT)) {
+		if (keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
+		    !addr_is_local(&ifa->addr)) {
 			/* set state to skip the notifier below */
 			state = INET6_IFADDR_STATE_DEAD;
 			ifa->state = 0;
 			if (!(ifa->flags & IFA_F_NODAD))
 				ifa->flags |= IFA_F_TENTATIVE;
+
+			rt = ifa->rt;
+			ifa->rt = NULL;
 		} else {
 			state = ifa->state;
 			ifa->state = INET6_IFADDR_STATE_DEAD;
@@ -3560,6 +3556,9 @@
 
 		spin_unlock_bh(&ifa->lock);
 
+		if (rt)
+			ip6_del_rt(rt);
+
 		if (state != INET6_IFADDR_STATE_DEAD) {
 			__ipv6_ifa_notify(RTM_DELADDR, ifa);
 			inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
@@ -4995,15 +4994,13 @@
 {
 	struct inet6_ifaddr *ifp;
 	struct net_device *dev = idev->dev;
-	bool update_rs = false;
+	bool clear_token, update_rs = false;
 	struct in6_addr ll_addr;
 
 	ASSERT_RTNL();
 
 	if (!token)
 		return -EINVAL;
-	if (ipv6_addr_any(token))
-		return -EINVAL;
 	if (dev->flags & (IFF_LOOPBACK | IFF_NOARP))
 		return -EINVAL;
 	if (!ipv6_accept_ra(idev))
@@ -5018,10 +5015,13 @@
 
 	write_unlock_bh(&idev->lock);
 
+	clear_token = ipv6_addr_any(token);
+	if (clear_token)
+		goto update_lft;
+
 	if (!idev->dead && (idev->if_flags & IF_READY) &&
 	    !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
 			     IFA_F_OPTIMISTIC)) {
-
 		/* If we're not ready, then normal ifup will take care
 		 * of this. Otherwise, we need to request our rs here.
 		 */
@@ -5029,6 +5029,7 @@
 		update_rs = true;
 	}
 
+update_lft:
 	write_lock_bh(&idev->lock);
 
 	if (update_rs) {
@@ -5325,10 +5326,10 @@
 			if (rt)
 				ip6_del_rt(rt);
 		}
-		dst_hold(&ifp->rt->dst);
-
-		ip6_del_rt(ifp->rt);
-
+		if (ifp->rt) {
+			dst_hold(&ifp->rt->dst);
+			ip6_del_rt(ifp->rt);
+		}
 		rt_genid_bump_ipv6(net);
 		break;
 	}
@@ -5618,376 +5619,366 @@
 	return ret;
 }
 
-static struct addrconf_sysctl_table
-{
-	struct ctl_table_header *sysctl_header;
-	struct ctl_table addrconf_vars[DEVCONF_MAX+1];
-} addrconf_sysctl __read_mostly = {
-	.sysctl_header = NULL,
-	.addrconf_vars = {
-		{
-			.procname	= "forwarding",
-			.data		= &ipv6_devconf.forwarding,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= addrconf_sysctl_forward,
-		},
-		{
-			.procname	= "hop_limit",
-			.data		= &ipv6_devconf.hop_limit,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= addrconf_sysctl_hop_limit,
-		},
-		{
-			.procname	= "mtu",
-			.data		= &ipv6_devconf.mtu6,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= addrconf_sysctl_mtu,
-		},
-		{
-			.procname	= "accept_ra",
-			.data		= &ipv6_devconf.accept_ra,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec,
-		},
-		{
-			.procname	= "accept_redirects",
-			.data		= &ipv6_devconf.accept_redirects,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec,
-		},
-		{
-			.procname	= "autoconf",
-			.data		= &ipv6_devconf.autoconf,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec,
-		},
-		{
-			.procname	= "dad_transmits",
-			.data		= &ipv6_devconf.dad_transmits,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec,
-		},
-		{
-			.procname	= "router_solicitations",
-			.data		= &ipv6_devconf.rtr_solicits,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec,
-		},
-		{
-			.procname	= "router_solicitation_interval",
-			.data		= &ipv6_devconf.rtr_solicit_interval,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec_jiffies,
-		},
-		{
-			.procname	= "router_solicitation_delay",
-			.data		= &ipv6_devconf.rtr_solicit_delay,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec_jiffies,
-		},
-		{
-			.procname	= "force_mld_version",
-			.data		= &ipv6_devconf.force_mld_version,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec,
-		},
-		{
-			.procname	= "mldv1_unsolicited_report_interval",
-			.data		=
-				&ipv6_devconf.mldv1_unsolicited_report_interval,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec_ms_jiffies,
-		},
-		{
-			.procname	= "mldv2_unsolicited_report_interval",
-			.data		=
-				&ipv6_devconf.mldv2_unsolicited_report_interval,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec_ms_jiffies,
-		},
-		{
-			.procname	= "use_tempaddr",
-			.data		= &ipv6_devconf.use_tempaddr,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec,
-		},
-		{
-			.procname	= "temp_valid_lft",
-			.data		= &ipv6_devconf.temp_valid_lft,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec,
-		},
-		{
-			.procname	= "temp_prefered_lft",
-			.data		= &ipv6_devconf.temp_prefered_lft,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec,
-		},
-		{
-			.procname	= "regen_max_retry",
-			.data		= &ipv6_devconf.regen_max_retry,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec,
-		},
-		{
-			.procname	= "max_desync_factor",
-			.data		= &ipv6_devconf.max_desync_factor,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec,
-		},
-		{
-			.procname	= "max_addresses",
-			.data		= &ipv6_devconf.max_addresses,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec,
-		},
-		{
-			.procname	= "accept_ra_defrtr",
-			.data		= &ipv6_devconf.accept_ra_defrtr,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec,
-		},
-		{
-			.procname	= "accept_ra_min_hop_limit",
-			.data		= &ipv6_devconf.accept_ra_min_hop_limit,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec,
-		},
-		{
-			.procname	= "accept_ra_pinfo",
-			.data		= &ipv6_devconf.accept_ra_pinfo,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec,
-		},
+static const struct ctl_table addrconf_sysctl[] = {
+	{
+		.procname	= "forwarding",
+		.data		= &ipv6_devconf.forwarding,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= addrconf_sysctl_forward,
+	},
+	{
+		.procname	= "hop_limit",
+		.data		= &ipv6_devconf.hop_limit,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= addrconf_sysctl_hop_limit,
+	},
+	{
+		.procname	= "mtu",
+		.data		= &ipv6_devconf.mtu6,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= addrconf_sysctl_mtu,
+	},
+	{
+		.procname	= "accept_ra",
+		.data		= &ipv6_devconf.accept_ra,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "accept_redirects",
+		.data		= &ipv6_devconf.accept_redirects,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "autoconf",
+		.data		= &ipv6_devconf.autoconf,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "dad_transmits",
+		.data		= &ipv6_devconf.dad_transmits,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "router_solicitations",
+		.data		= &ipv6_devconf.rtr_solicits,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "router_solicitation_interval",
+		.data		= &ipv6_devconf.rtr_solicit_interval,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_jiffies,
+	},
+	{
+		.procname	= "router_solicitation_delay",
+		.data		= &ipv6_devconf.rtr_solicit_delay,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_jiffies,
+	},
+	{
+		.procname	= "force_mld_version",
+		.data		= &ipv6_devconf.force_mld_version,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "mldv1_unsolicited_report_interval",
+		.data		=
+			&ipv6_devconf.mldv1_unsolicited_report_interval,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_ms_jiffies,
+	},
+	{
+		.procname	= "mldv2_unsolicited_report_interval",
+		.data		=
+			&ipv6_devconf.mldv2_unsolicited_report_interval,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_ms_jiffies,
+	},
+	{
+		.procname	= "use_tempaddr",
+		.data		= &ipv6_devconf.use_tempaddr,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "temp_valid_lft",
+		.data		= &ipv6_devconf.temp_valid_lft,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "temp_prefered_lft",
+		.data		= &ipv6_devconf.temp_prefered_lft,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "regen_max_retry",
+		.data		= &ipv6_devconf.regen_max_retry,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "max_desync_factor",
+		.data		= &ipv6_devconf.max_desync_factor,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "max_addresses",
+		.data		= &ipv6_devconf.max_addresses,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "accept_ra_defrtr",
+		.data		= &ipv6_devconf.accept_ra_defrtr,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "accept_ra_min_hop_limit",
+		.data		= &ipv6_devconf.accept_ra_min_hop_limit,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "accept_ra_pinfo",
+		.data		= &ipv6_devconf.accept_ra_pinfo,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
 #ifdef CONFIG_IPV6_ROUTER_PREF
-		{
-			.procname	= "accept_ra_rtr_pref",
-			.data		= &ipv6_devconf.accept_ra_rtr_pref,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec,
-		},
-		{
-			.procname	= "router_probe_interval",
-			.data		= &ipv6_devconf.rtr_probe_interval,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec_jiffies,
-		},
+	{
+		.procname	= "accept_ra_rtr_pref",
+		.data		= &ipv6_devconf.accept_ra_rtr_pref,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "router_probe_interval",
+		.data		= &ipv6_devconf.rtr_probe_interval,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_jiffies,
+	},
 #ifdef CONFIG_IPV6_ROUTE_INFO
-		{
-			.procname	= "accept_ra_rt_info_max_plen",
-			.data		= &ipv6_devconf.accept_ra_rt_info_max_plen,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec,
-		},
+	{
+		.procname	= "accept_ra_rt_info_max_plen",
+		.data		= &ipv6_devconf.accept_ra_rt_info_max_plen,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
 #endif
 #endif
-		{
-			.procname	= "proxy_ndp",
-			.data		= &ipv6_devconf.proxy_ndp,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= addrconf_sysctl_proxy_ndp,
-		},
-		{
-			.procname	= "accept_source_route",
-			.data		= &ipv6_devconf.accept_source_route,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec,
-		},
+	{
+		.procname	= "proxy_ndp",
+		.data		= &ipv6_devconf.proxy_ndp,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= addrconf_sysctl_proxy_ndp,
+	},
+	{
+		.procname	= "accept_source_route",
+		.data		= &ipv6_devconf.accept_source_route,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
-		{
-			.procname       = "optimistic_dad",
-			.data           = &ipv6_devconf.optimistic_dad,
-			.maxlen         = sizeof(int),
-			.mode           = 0644,
-			.proc_handler   = proc_dointvec,
-
-		},
-		{
-			.procname       = "use_optimistic",
-			.data           = &ipv6_devconf.use_optimistic,
-			.maxlen         = sizeof(int),
-			.mode           = 0644,
-			.proc_handler   = proc_dointvec,
-
-		},
+	{
+		.procname	= "optimistic_dad",
+		.data		= &ipv6_devconf.optimistic_dad,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler   = proc_dointvec,
+	},
+	{
+		.procname	= "use_optimistic",
+		.data		= &ipv6_devconf.use_optimistic,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
 #endif
 #ifdef CONFIG_IPV6_MROUTE
-		{
-			.procname	= "mc_forwarding",
-			.data		= &ipv6_devconf.mc_forwarding,
-			.maxlen		= sizeof(int),
-			.mode		= 0444,
-			.proc_handler	= proc_dointvec,
-		},
-#endif
-		{
-			.procname	= "disable_ipv6",
-			.data		= &ipv6_devconf.disable_ipv6,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= addrconf_sysctl_disable,
-		},
-		{
-			.procname	= "accept_dad",
-			.data		= &ipv6_devconf.accept_dad,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec,
-		},
-		{
-			.procname       = "force_tllao",
-			.data           = &ipv6_devconf.force_tllao,
-			.maxlen         = sizeof(int),
-			.mode           = 0644,
-			.proc_handler   = proc_dointvec
-		},
-		{
-			.procname       = "ndisc_notify",
-			.data           = &ipv6_devconf.ndisc_notify,
-			.maxlen         = sizeof(int),
-			.mode           = 0644,
-			.proc_handler   = proc_dointvec
-		},
-		{
-			.procname	= "suppress_frag_ndisc",
-			.data		= &ipv6_devconf.suppress_frag_ndisc,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec
-		},
-		{
-			.procname	= "accept_ra_from_local",
-			.data		= &ipv6_devconf.accept_ra_from_local,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec,
-		},
-		{
-			.procname	= "accept_ra_mtu",
-			.data		= &ipv6_devconf.accept_ra_mtu,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec,
-		},
-		{
-			.procname	= "stable_secret",
-			.data		= &ipv6_devconf.stable_secret,
-			.maxlen		= IPV6_MAX_STRLEN,
-			.mode		= 0600,
-			.proc_handler	= addrconf_sysctl_stable_secret,
-		},
-		{
-			.procname       = "use_oif_addrs_only",
-			.data           = &ipv6_devconf.use_oif_addrs_only,
-			.maxlen         = sizeof(int),
-			.mode           = 0644,
-			.proc_handler   = proc_dointvec,
-		},
-		{
-			.procname	= "ignore_routes_with_linkdown",
-			.data		= &ipv6_devconf.ignore_routes_with_linkdown,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= addrconf_sysctl_ignore_routes_with_linkdown,
-		},
-		{
-			.procname	= "drop_unicast_in_l2_multicast",
-			.data		= &ipv6_devconf.drop_unicast_in_l2_multicast,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec,
-		},
-		{
-			.procname	= "drop_unsolicited_na",
-			.data		= &ipv6_devconf.drop_unsolicited_na,
-			.maxlen		= sizeof(int),
-			.mode		= 0644,
-			.proc_handler	= proc_dointvec,
-		},
-		{
-			.procname       = "keep_addr_on_down",
-			.data           = &ipv6_devconf.keep_addr_on_down,
-			.maxlen         = sizeof(int),
-			.mode           = 0644,
-			.proc_handler   = proc_dointvec,
-
-		},
-		{
-			/* sentinel */
-		}
+	{
+		.procname	= "mc_forwarding",
+		.data		= &ipv6_devconf.mc_forwarding,
+		.maxlen		= sizeof(int),
+		.mode		= 0444,
+		.proc_handler	= proc_dointvec,
 	},
+#endif
+	{
+		.procname	= "disable_ipv6",
+		.data		= &ipv6_devconf.disable_ipv6,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= addrconf_sysctl_disable,
+	},
+	{
+		.procname	= "accept_dad",
+		.data		= &ipv6_devconf.accept_dad,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "force_tllao",
+		.data		= &ipv6_devconf.force_tllao,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec
+	},
+	{
+		.procname	= "ndisc_notify",
+		.data		= &ipv6_devconf.ndisc_notify,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec
+	},
+	{
+		.procname	= "suppress_frag_ndisc",
+		.data		= &ipv6_devconf.suppress_frag_ndisc,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec
+	},
+	{
+		.procname	= "accept_ra_from_local",
+		.data		= &ipv6_devconf.accept_ra_from_local,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "accept_ra_mtu",
+		.data		= &ipv6_devconf.accept_ra_mtu,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "stable_secret",
+		.data		= &ipv6_devconf.stable_secret,
+		.maxlen		= IPV6_MAX_STRLEN,
+		.mode		= 0600,
+		.proc_handler	= addrconf_sysctl_stable_secret,
+	},
+	{
+		.procname	= "use_oif_addrs_only",
+		.data		= &ipv6_devconf.use_oif_addrs_only,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "ignore_routes_with_linkdown",
+		.data		= &ipv6_devconf.ignore_routes_with_linkdown,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= addrconf_sysctl_ignore_routes_with_linkdown,
+	},
+	{
+		.procname	= "drop_unicast_in_l2_multicast",
+		.data		= &ipv6_devconf.drop_unicast_in_l2_multicast,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "drop_unsolicited_na",
+		.data		= &ipv6_devconf.drop_unsolicited_na,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "keep_addr_on_down",
+		.data		= &ipv6_devconf.keep_addr_on_down,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+
+	},
+	{
+		/* sentinel */
+	}
 };
 
 static int __addrconf_sysctl_register(struct net *net, char *dev_name,
 		struct inet6_dev *idev, struct ipv6_devconf *p)
 {
 	int i;
-	struct addrconf_sysctl_table *t;
+	struct ctl_table *table;
 	char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
 
-	t = kmemdup(&addrconf_sysctl, sizeof(*t), GFP_KERNEL);
-	if (!t)
+	table = kmemdup(addrconf_sysctl, sizeof(addrconf_sysctl), GFP_KERNEL);
+	if (!table)
 		goto out;
 
-	for (i = 0; t->addrconf_vars[i].data; i++) {
-		t->addrconf_vars[i].data += (char *)p - (char *)&ipv6_devconf;
-		t->addrconf_vars[i].extra1 = idev; /* embedded; no ref */
-		t->addrconf_vars[i].extra2 = net;
+	for (i = 0; table[i].data; i++) {
+		table[i].data += (char *)p - (char *)&ipv6_devconf;
+		table[i].extra1 = idev; /* embedded; no ref */
+		table[i].extra2 = net;
 	}
 
 	snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
 
-	t->sysctl_header = register_net_sysctl(net, path, t->addrconf_vars);
-	if (!t->sysctl_header)
+	p->sysctl_header = register_net_sysctl(net, path, table);
+	if (!p->sysctl_header)
 		goto free;
 
-	p->sysctl = t;
 	return 0;
 
 free:
-	kfree(t);
+	kfree(table);
 out:
 	return -ENOBUFS;
 }
 
 static void __addrconf_sysctl_unregister(struct ipv6_devconf *p)
 {
-	struct addrconf_sysctl_table *t;
+	struct ctl_table *table;
 
-	if (!p->sysctl)
+	if (!p->sysctl_header)
 		return;
 
-	t = p->sysctl;
-	p->sysctl = NULL;
-	unregister_net_sysctl_table(t->sysctl_header);
-	kfree(t);
+	table = p->sysctl_header->ctl_table_arg;
+	unregister_net_sysctl_table(p->sysctl_header);
+	p->sysctl_header = NULL;
+	kfree(table);
 }
 
 static int addrconf_sysctl_register(struct inet6_dev *idev)
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index b11c37c..bfa86f0 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -64,6 +64,8 @@
 #include <asm/uaccess.h>
 #include <linux/mroute6.h>
 
+#include "ip6_offload.h"
+
 MODULE_AUTHOR("Cast of dozens");
 MODULE_DESCRIPTION("IPv6 protocol stack for Linux");
 MODULE_LICENSE("GPL");
@@ -561,6 +563,7 @@
 	.recvmsg	   = inet_recvmsg,		/* ok		*/
 	.mmap		   = sock_no_mmap,
 	.sendpage	   = sock_no_sendpage,
+	.set_peek_off	   = sk_set_peek_off,
 #ifdef CONFIG_COMPAT
 	.compat_setsockopt = compat_sock_common_setsockopt,
 	.compat_getsockopt = compat_sock_common_getsockopt,
@@ -958,6 +961,10 @@
 	if (err)
 		goto udplitev6_fail;
 
+	err = udpv6_offload_init();
+	if (err)
+		goto udpv6_offload_fail;
+
 	err = tcpv6_init();
 	if (err)
 		goto tcpv6_fail;
@@ -987,6 +994,8 @@
 ipv6_packet_fail:
 	tcpv6_exit();
 tcpv6_fail:
+	udpv6_offload_exit();
+udpv6_offload_fail:
 	udplitev6_exit();
 udplitev6_fail:
 	udpv6_exit();
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 4281621..37874e2 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -40,18 +40,114 @@
 	return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
 }
 
+static void ip6_datagram_flow_key_init(struct flowi6 *fl6, struct sock *sk)
+{
+	struct inet_sock *inet = inet_sk(sk);
+	struct ipv6_pinfo *np = inet6_sk(sk);
+
+	memset(fl6, 0, sizeof(*fl6));
+	fl6->flowi6_proto = sk->sk_protocol;
+	fl6->daddr = sk->sk_v6_daddr;
+	fl6->saddr = np->saddr;
+	fl6->flowi6_oif = sk->sk_bound_dev_if;
+	fl6->flowi6_mark = sk->sk_mark;
+	fl6->fl6_dport = inet->inet_dport;
+	fl6->fl6_sport = inet->inet_sport;
+	fl6->flowlabel = np->flow_label;
+
+	if (!fl6->flowi6_oif)
+		fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
+
+	if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr))
+		fl6->flowi6_oif = np->mcast_oif;
+
+	security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
+}
+
+int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr)
+{
+	struct ip6_flowlabel *flowlabel = NULL;
+	struct in6_addr *final_p, final;
+	struct ipv6_txoptions *opt;
+	struct dst_entry *dst;
+	struct inet_sock *inet = inet_sk(sk);
+	struct ipv6_pinfo *np = inet6_sk(sk);
+	struct flowi6 fl6;
+	int err = 0;
+
+	if (np->sndflow && (np->flow_label & IPV6_FLOWLABEL_MASK)) {
+		flowlabel = fl6_sock_lookup(sk, np->flow_label);
+		if (!flowlabel)
+			return -EINVAL;
+	}
+	ip6_datagram_flow_key_init(&fl6, sk);
+
+	rcu_read_lock();
+	opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
+	final_p = fl6_update_dst(&fl6, opt, &final);
+	rcu_read_unlock();
+
+	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
+	if (IS_ERR(dst)) {
+		err = PTR_ERR(dst);
+		goto out;
+	}
+
+	if (fix_sk_saddr) {
+		if (ipv6_addr_any(&np->saddr))
+			np->saddr = fl6.saddr;
+
+		if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
+			sk->sk_v6_rcv_saddr = fl6.saddr;
+			inet->inet_rcv_saddr = LOOPBACK4_IPV6;
+			if (sk->sk_prot->rehash)
+				sk->sk_prot->rehash(sk);
+		}
+	}
+
+	ip6_dst_store(sk, dst,
+		      ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
+		      &sk->sk_v6_daddr : NULL,
+#ifdef CONFIG_IPV6_SUBTREES
+		      ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
+		      &np->saddr :
+#endif
+		      NULL);
+
+out:
+	fl6_sock_release(flowlabel);
+	return err;
+}
+
+void ip6_datagram_release_cb(struct sock *sk)
+{
+	struct dst_entry *dst;
+
+	if (ipv6_addr_v4mapped(&sk->sk_v6_daddr))
+		return;
+
+	rcu_read_lock();
+	dst = __sk_dst_get(sk);
+	if (!dst || !dst->obsolete ||
+	    dst->ops->check(dst, inet6_sk(sk)->dst_cookie)) {
+		rcu_read_unlock();
+		return;
+	}
+	rcu_read_unlock();
+
+	ip6_datagram_dst_update(sk, false);
+}
+EXPORT_SYMBOL_GPL(ip6_datagram_release_cb);
+
 static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
 	struct sockaddr_in6	*usin = (struct sockaddr_in6 *) uaddr;
 	struct inet_sock	*inet = inet_sk(sk);
 	struct ipv6_pinfo	*np = inet6_sk(sk);
-	struct in6_addr	*daddr, *final_p, final;
-	struct dst_entry	*dst;
-	struct flowi6		fl6;
-	struct ip6_flowlabel	*flowlabel = NULL;
-	struct ipv6_txoptions	*opt;
+	struct in6_addr		*daddr;
 	int			addr_type;
 	int			err;
+	__be32			fl6_flowlabel = 0;
 
 	if (usin->sin6_family == AF_INET) {
 		if (__ipv6_only_sock(sk))
@@ -66,15 +162,8 @@
 	if (usin->sin6_family != AF_INET6)
 		return -EAFNOSUPPORT;
 
-	memset(&fl6, 0, sizeof(fl6));
-	if (np->sndflow) {
-		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
-		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
-			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
-			if (!flowlabel)
-				return -EINVAL;
-		}
-	}
+	if (np->sndflow)
+		fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
 
 	addr_type = ipv6_addr_type(&usin->sin6_addr);
 
@@ -145,7 +234,7 @@
 	}
 
 	sk->sk_v6_daddr = *daddr;
-	np->flow_label = fl6.flowlabel;
+	np->flow_label = fl6_flowlabel;
 
 	inet->inet_dport = usin->sin6_port;
 
@@ -154,59 +243,13 @@
 	 *	destination cache for it.
 	 */
 
-	fl6.flowi6_proto = sk->sk_protocol;
-	fl6.daddr = sk->sk_v6_daddr;
-	fl6.saddr = np->saddr;
-	fl6.flowi6_oif = sk->sk_bound_dev_if;
-	fl6.flowi6_mark = sk->sk_mark;
-	fl6.fl6_dport = inet->inet_dport;
-	fl6.fl6_sport = inet->inet_sport;
-
-	if (!fl6.flowi6_oif)
-		fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
-
-	if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
-		fl6.flowi6_oif = np->mcast_oif;
-
-	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
-
-	rcu_read_lock();
-	opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
-	final_p = fl6_update_dst(&fl6, opt, &final);
-	rcu_read_unlock();
-
-	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
-	err = 0;
-	if (IS_ERR(dst)) {
-		err = PTR_ERR(dst);
+	err = ip6_datagram_dst_update(sk, true);
+	if (err)
 		goto out;
-	}
-
-	/* source address lookup done in ip6_dst_lookup */
-
-	if (ipv6_addr_any(&np->saddr))
-		np->saddr = fl6.saddr;
-
-	if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
-		sk->sk_v6_rcv_saddr = fl6.saddr;
-		inet->inet_rcv_saddr = LOOPBACK4_IPV6;
-		if (sk->sk_prot->rehash)
-			sk->sk_prot->rehash(sk);
-	}
-
-	ip6_dst_store(sk, dst,
-		      ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
-		      &sk->sk_v6_daddr : NULL,
-#ifdef CONFIG_IPV6_SUBTREES
-		      ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
-		      &np->saddr :
-#endif
-		      NULL);
 
 	sk->sk_state = TCP_ESTABLISHED;
 	sk_set_txhash(sk);
 out:
-	fl6_sock_release(flowlabel);
 	return err;
 }
 
@@ -407,9 +450,10 @@
 		copied = len;
 	}
 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
-	if (err)
-		goto out_free_skb;
-
+	if (unlikely(err)) {
+		kfree_skb(skb);
+		return err;
+	}
 	sock_recv_timestamp(msg, sk, skb);
 
 	serr = SKB_EXT_ERR(skb);
@@ -466,8 +510,7 @@
 	msg->msg_flags |= MSG_ERRQUEUE;
 	err = copied;
 
-out_free_skb:
-	kfree_skb(skb);
+	consume_skb(skb);
 out:
 	return err;
 }
@@ -684,13 +727,13 @@
 
 int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
 			  struct msghdr *msg, struct flowi6 *fl6,
-			  struct ipv6_txoptions *opt,
-			  int *hlimit, int *tclass, int *dontfrag)
+			  struct ipcm6_cookie *ipc6, struct sockcm_cookie *sockc)
 {
 	struct in6_pktinfo *src_info;
 	struct cmsghdr *cmsg;
 	struct ipv6_rt_hdr *rthdr;
 	struct ipv6_opt_hdr *hdr;
+	struct ipv6_txoptions *opt = ipc6->opt;
 	int len;
 	int err = 0;
 
@@ -702,6 +745,13 @@
 			goto exit_f;
 		}
 
+		if (cmsg->cmsg_level == SOL_SOCKET) {
+			err = __sock_cmsg_send(sk, msg, cmsg, sockc);
+			if (err)
+				return err;
+			continue;
+		}
+
 		if (cmsg->cmsg_level != SOL_IPV6)
 			continue;
 
@@ -903,8 +953,8 @@
 				goto exit_f;
 			}
 
-			*hlimit = *(int *)CMSG_DATA(cmsg);
-			if (*hlimit < -1 || *hlimit > 0xff) {
+			ipc6->hlimit = *(int *)CMSG_DATA(cmsg);
+			if (ipc6->hlimit < -1 || ipc6->hlimit > 0xff) {
 				err = -EINVAL;
 				goto exit_f;
 			}
@@ -924,7 +974,7 @@
 				goto exit_f;
 
 			err = 0;
-			*tclass = tc;
+			ipc6->tclass = tc;
 
 			break;
 		    }
@@ -942,7 +992,7 @@
 				goto exit_f;
 
 			err = 0;
-			*dontfrag = df;
+			ipc6->dontfrag = df;
 
 			break;
 		    }
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index ea7c4d6..8de5dd7 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -258,8 +258,8 @@
 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
 	    !pskb_may_pull(skb, (skb_transport_offset(skb) +
 				 ((skb_transport_header(skb)[1] + 1) << 3)))) {
-		IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst),
-				 IPSTATS_MIB_INHDRERRORS);
+		__IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
+				IPSTATS_MIB_INHDRERRORS);
 		kfree_skb(skb);
 		return -1;
 	}
@@ -280,8 +280,8 @@
 		return 1;
 	}
 
-	IP6_INC_STATS_BH(dev_net(dst->dev),
-			 ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
+	__IP6_INC_STATS(dev_net(dst->dev),
+			ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
 	return -1;
 }
 
@@ -309,8 +309,8 @@
 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
 	    !pskb_may_pull(skb, (skb_transport_offset(skb) +
 				 ((skb_transport_header(skb)[1] + 1) << 3)))) {
-		IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-				 IPSTATS_MIB_INHDRERRORS);
+		__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+				IPSTATS_MIB_INHDRERRORS);
 		kfree_skb(skb);
 		return -1;
 	}
@@ -319,8 +319,8 @@
 
 	if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ||
 	    skb->pkt_type != PACKET_HOST) {
-		IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-				 IPSTATS_MIB_INADDRERRORS);
+		__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+				IPSTATS_MIB_INADDRERRORS);
 		kfree_skb(skb);
 		return -1;
 	}
@@ -334,8 +334,8 @@
 			 * processed by own
 			 */
 			if (!addr) {
-				IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-						 IPSTATS_MIB_INADDRERRORS);
+				__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+						IPSTATS_MIB_INADDRERRORS);
 				kfree_skb(skb);
 				return -1;
 			}
@@ -360,8 +360,8 @@
 			goto unknown_rh;
 		/* Silently discard invalid RTH type 2 */
 		if (hdr->hdrlen != 2 || hdr->segments_left != 1) {
-			IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-					 IPSTATS_MIB_INHDRERRORS);
+			__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+					IPSTATS_MIB_INHDRERRORS);
 			kfree_skb(skb);
 			return -1;
 		}
@@ -379,8 +379,8 @@
 	n = hdr->hdrlen >> 1;
 
 	if (hdr->segments_left > n) {
-		IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-				 IPSTATS_MIB_INHDRERRORS);
+		__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+				IPSTATS_MIB_INHDRERRORS);
 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
 				  ((&hdr->segments_left) -
 				   skb_network_header(skb)));
@@ -393,8 +393,8 @@
 	if (skb_cloned(skb)) {
 		/* the copy is a forwarded packet */
 		if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
-			IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-					 IPSTATS_MIB_OUTDISCARDS);
+			__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+					IPSTATS_MIB_OUTDISCARDS);
 			kfree_skb(skb);
 			return -1;
 		}
@@ -416,14 +416,14 @@
 		if (xfrm6_input_addr(skb, (xfrm_address_t *)addr,
 				     (xfrm_address_t *)&ipv6_hdr(skb)->saddr,
 				     IPPROTO_ROUTING) < 0) {
-			IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-					 IPSTATS_MIB_INADDRERRORS);
+			__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+					IPSTATS_MIB_INADDRERRORS);
 			kfree_skb(skb);
 			return -1;
 		}
 		if (!ipv6_chk_home_addr(dev_net(skb_dst(skb)->dev), addr)) {
-			IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-					 IPSTATS_MIB_INADDRERRORS);
+			__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+					IPSTATS_MIB_INADDRERRORS);
 			kfree_skb(skb);
 			return -1;
 		}
@@ -434,8 +434,8 @@
 	}
 
 	if (ipv6_addr_is_multicast(addr)) {
-		IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-				 IPSTATS_MIB_INADDRERRORS);
+		__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+				IPSTATS_MIB_INADDRERRORS);
 		kfree_skb(skb);
 		return -1;
 	}
@@ -454,8 +454,8 @@
 
 	if (skb_dst(skb)->dev->flags&IFF_LOOPBACK) {
 		if (ipv6_hdr(skb)->hop_limit <= 1) {
-			IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-					 IPSTATS_MIB_INHDRERRORS);
+			__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+					IPSTATS_MIB_INHDRERRORS);
 			icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
 				    0);
 			kfree_skb(skb);
@@ -470,7 +470,7 @@
 	return -1;
 
 unknown_rh:
-	IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
+	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
 	icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
 			  (&hdr->type) - skb_network_header(skb));
 	return -1;
@@ -568,28 +568,28 @@
 	if (nh[optoff + 1] != 4 || (optoff & 3) != 2) {
 		net_dbg_ratelimited("ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
 				    nh[optoff+1]);
-		IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
-				 IPSTATS_MIB_INHDRERRORS);
+		__IP6_INC_STATS(net, ipv6_skb_idev(skb),
+				IPSTATS_MIB_INHDRERRORS);
 		goto drop;
 	}
 
 	pkt_len = ntohl(*(__be32 *)(nh + optoff + 2));
 	if (pkt_len <= IPV6_MAXPLEN) {
-		IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
-				 IPSTATS_MIB_INHDRERRORS);
+		__IP6_INC_STATS(net, ipv6_skb_idev(skb),
+				IPSTATS_MIB_INHDRERRORS);
 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
 		return false;
 	}
 	if (ipv6_hdr(skb)->payload_len) {
-		IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
-				 IPSTATS_MIB_INHDRERRORS);
+		__IP6_INC_STATS(net, ipv6_skb_idev(skb),
+				IPSTATS_MIB_INHDRERRORS);
 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
 		return false;
 	}
 
 	if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
-		IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
-				 IPSTATS_MIB_INTRUNCATEDPKTS);
+		__IP6_INC_STATS(net, ipv6_skb_idev(skb),
+				IPSTATS_MIB_INTRUNCATEDPKTS);
 		goto drop;
 	}
 
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 0a37ddc..4527285 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -400,10 +400,11 @@
 	struct icmp6hdr tmp_hdr;
 	struct flowi6 fl6;
 	struct icmpv6_msg msg;
+	struct sockcm_cookie sockc_unused = {0};
+	struct ipcm6_cookie ipc6;
 	int iif = 0;
 	int addr_type = 0;
 	int len;
-	int hlimit;
 	int err = 0;
 	u32 mark = IP6_REPLY_MARK(net, skb->mark);
 
@@ -445,6 +446,8 @@
 
 	if (__ipv6_addr_needs_scope_id(addr_type))
 		iif = skb->dev->ifindex;
+	else
+		iif = l3mdev_master_ifindex(skb->dev);
 
 	/*
 	 *	Must not send error if the source does not uniquely
@@ -499,14 +502,14 @@
 	else if (!fl6.flowi6_oif)
 		fl6.flowi6_oif = np->ucast_oif;
 
-	if (!fl6.flowi6_oif)
-		fl6.flowi6_oif = l3mdev_master_ifindex(skb->dev);
-
 	dst = icmpv6_route_lookup(net, skb, sk, &fl6);
 	if (IS_ERR(dst))
 		goto out;
 
-	hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
+	ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
+	ipc6.tclass = np->tclass;
+	ipc6.dontfrag = np->dontfrag;
+	ipc6.opt = NULL;
 
 	msg.skb = skb;
 	msg.offset = skb_network_offset(skb);
@@ -525,9 +528,9 @@
 
 	err = ip6_append_data(sk, icmpv6_getfrag, &msg,
 			      len + sizeof(struct icmp6hdr),
-			      sizeof(struct icmp6hdr), hlimit,
-			      np->tclass, NULL, &fl6, (struct rt6_info *)dst,
-			      MSG_DONTWAIT, np->dontfrag);
+			      sizeof(struct icmp6hdr),
+			      &ipc6, &fl6, (struct rt6_info *)dst,
+			      MSG_DONTWAIT, &sockc_unused);
 	if (err) {
 		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
 		ip6_flush_pending_frames(sk);
@@ -562,10 +565,10 @@
 	struct flowi6 fl6;
 	struct icmpv6_msg msg;
 	struct dst_entry *dst;
+	struct ipcm6_cookie ipc6;
 	int err = 0;
-	int hlimit;
-	u8 tclass;
 	u32 mark = IP6_REPLY_MARK(net, skb->mark);
+	struct sockcm_cookie sockc_unused = {0};
 
 	saddr = &ipv6_hdr(skb)->daddr;
 
@@ -605,22 +608,24 @@
 	if (IS_ERR(dst))
 		goto out;
 
-	hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
-
 	idev = __in6_dev_get(skb->dev);
 
 	msg.skb = skb;
 	msg.offset = 0;
 	msg.type = ICMPV6_ECHO_REPLY;
 
-	tclass = ipv6_get_dsfield(ipv6_hdr(skb));
+	ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
+	ipc6.tclass = ipv6_get_dsfield(ipv6_hdr(skb));
+	ipc6.dontfrag = np->dontfrag;
+	ipc6.opt = NULL;
+
 	err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
-				sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl6,
+				sizeof(struct icmp6hdr), &ipc6, &fl6,
 				(struct rt6_info *)dst, MSG_DONTWAIT,
-				np->dontfrag);
+				&sockc_unused);
 
 	if (err) {
-		ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
+		__ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
 		ip6_flush_pending_frames(sk);
 	} else {
 		err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
@@ -672,7 +677,7 @@
 	return;
 
 out:
-	ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
+	__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
 }
 
 /*
@@ -708,7 +713,7 @@
 		skb_set_network_header(skb, nh);
 	}
 
-	ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INMSGS);
+	__ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_INMSGS);
 
 	saddr = &ipv6_hdr(skb)->saddr;
 	daddr = &ipv6_hdr(skb)->daddr;
@@ -726,7 +731,7 @@
 
 	type = hdr->icmp6_type;
 
-	ICMP6MSGIN_INC_STATS_BH(dev_net(dev), idev, type);
+	ICMP6MSGIN_INC_STATS(dev_net(dev), idev, type);
 
 	switch (type) {
 	case ICMPV6_ECHO_REQUEST:
@@ -810,9 +815,9 @@
 	return 0;
 
 csum_error:
-	ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_CSUMERRORS);
+	__ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_CSUMERRORS);
 discard_it:
-	ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INERRORS);
+	__ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_INERRORS);
 drop_no_count:
 	kfree_skb(skb);
 	return 0;
diff --git a/net/ipv6/ila/ila.h b/net/ipv6/ila/ila.h
index 28542cb..d08fd2d 100644
--- a/net/ipv6/ila/ila.h
+++ b/net/ipv6/ila/ila.h
@@ -23,10 +23,76 @@
 #include <net/protocol.h>
 #include <uapi/linux/ila.h>
 
+struct ila_locator {
+	union {
+		__u8            v8[8];
+		__be16          v16[4];
+		__be32          v32[2];
+		__be64		v64;
+	};
+};
+
+struct ila_identifier {
+	union {
+		struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+			u8 __space:4;
+			u8 csum_neutral:1;
+			u8 type:3;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+			u8 type:3;
+			u8 csum_neutral:1;
+			u8 __space:4;
+#else
+#error  "Adjust your <asm/byteorder.h> defines"
+#endif
+			u8 __space2[7];
+		};
+		__u8            v8[8];
+		__be16          v16[4];
+		__be32          v32[2];
+		__be64		v64;
+	};
+};
+
+enum {
+	ILA_ATYPE_IID = 0,
+	ILA_ATYPE_LUID,
+	ILA_ATYPE_VIRT_V4,
+	ILA_ATYPE_VIRT_UNI_V6,
+	ILA_ATYPE_VIRT_MULTI_V6,
+	ILA_ATYPE_RSVD_1,
+	ILA_ATYPE_RSVD_2,
+	ILA_ATYPE_RSVD_3,
+};
+
+#define CSUM_NEUTRAL_FLAG	htonl(0x10000000)
+
+struct ila_addr {
+	union {
+		struct in6_addr addr;
+		struct {
+			struct ila_locator loc;
+			struct ila_identifier ident;
+		};
+	};
+};
+
+static inline struct ila_addr *ila_a2i(struct in6_addr *addr)
+{
+	return (struct ila_addr *)addr;
+}
+
+static inline bool ila_addr_is_ila(struct ila_addr *iaddr)
+{
+	return (iaddr->ident.type != ILA_ATYPE_IID);
+}
+
 struct ila_params {
-	__be64 locator;
-	__be64 locator_match;
+	struct ila_locator locator;
+	struct ila_locator locator_match;
 	__wsum csum_diff;
+	u8 csum_mode;
 };
 
 static inline __wsum compute_csum_diff8(const __be32 *from, const __be32 *to)
@@ -38,7 +104,14 @@
 	return csum_partial(diff, sizeof(diff), 0);
 }
 
-void update_ipv6_locator(struct sk_buff *skb, struct ila_params *p);
+static inline bool ila_csum_neutral_set(struct ila_identifier ident)
+{
+	return !!(ident.csum_neutral);
+}
+
+void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p);
+
+void ila_init_saved_csum(struct ila_params *p);
 
 int ila_lwt_init(void);
 void ila_lwt_fini(void);
diff --git a/net/ipv6/ila/ila_common.c b/net/ipv6/ila/ila_common.c
index 3061305..0e94042 100644
--- a/net/ipv6/ila/ila_common.c
+++ b/net/ipv6/ila/ila_common.c
@@ -15,20 +15,52 @@
 
 static __wsum get_csum_diff(struct ipv6hdr *ip6h, struct ila_params *p)
 {
-	if (*(__be64 *)&ip6h->daddr == p->locator_match)
+	struct ila_addr *iaddr = ila_a2i(&ip6h->daddr);
+
+	if (p->locator_match.v64)
 		return p->csum_diff;
 	else
-		return compute_csum_diff8((__be32 *)&ip6h->daddr,
+		return compute_csum_diff8((__be32 *)&iaddr->loc,
 					  (__be32 *)&p->locator);
 }
 
-void update_ipv6_locator(struct sk_buff *skb, struct ila_params *p)
+static void ila_csum_do_neutral(struct ila_addr *iaddr,
+				struct ila_params *p)
+{
+	__sum16 *adjust = (__force __sum16 *)&iaddr->ident.v16[3];
+	__wsum diff, fval;
+
+	/* Check if checksum adjust value has been cached */
+	if (p->locator_match.v64) {
+		diff = p->csum_diff;
+	} else {
+		diff = compute_csum_diff8((__be32 *)iaddr,
+					  (__be32 *)&p->locator);
+	}
+
+	fval = (__force __wsum)(ila_csum_neutral_set(iaddr->ident) ?
+			~CSUM_NEUTRAL_FLAG : CSUM_NEUTRAL_FLAG);
+
+	diff = csum_add(diff, fval);
+
+	*adjust = ~csum_fold(csum_add(diff, csum_unfold(*adjust)));
+
+	/* Flip the csum-neutral bit. Either we are doing a SIR->ILA
+	 * translation with ILA_CSUM_NEUTRAL_MAP as the csum_method
+	 * and the C-bit is not set, or we are doing an ILA-SIR
+	 * tranlsation and the C-bit is set.
+	 */
+	iaddr->ident.csum_neutral ^= 1;
+}
+
+static void ila_csum_adjust_transport(struct sk_buff *skb,
+				      struct ila_params *p)
 {
 	__wsum diff;
 	struct ipv6hdr *ip6h = ipv6_hdr(skb);
+	struct ila_addr *iaddr = ila_a2i(&ip6h->daddr);
 	size_t nhoff = sizeof(struct ipv6hdr);
 
-	/* First update checksum */
 	switch (ip6h->nexthdr) {
 	case NEXTHDR_TCP:
 		if (likely(pskb_may_pull(skb, nhoff + sizeof(struct tcphdr)))) {
@@ -68,7 +100,46 @@
 	}
 
 	/* Now change destination address */
-	*(__be64 *)&ip6h->daddr = p->locator;
+	iaddr->loc = p->locator;
+}
+
+void ila_update_ipv6_locator(struct sk_buff *skb, struct ila_params *p)
+{
+	struct ipv6hdr *ip6h = ipv6_hdr(skb);
+	struct ila_addr *iaddr = ila_a2i(&ip6h->daddr);
+
+	/* First deal with the transport checksum */
+	if (ila_csum_neutral_set(iaddr->ident)) {
+		/* C-bit is set in the locator indicating that this
+		 * is a locator being translated to a SIR address.
+		 * Perform (receiver) checksum-neutral translation.
+		 */
+		ila_csum_do_neutral(iaddr, p);
+	} else {
+		switch (p->csum_mode) {
+		case ILA_CSUM_ADJUST_TRANSPORT:
+			ila_csum_adjust_transport(skb, p);
+			break;
+		case ILA_CSUM_NEUTRAL_MAP:
+			ila_csum_do_neutral(iaddr, p);
+			break;
+		case ILA_CSUM_NO_ACTION:
+			break;
+		}
+	}
+
+	/* Now change destination address */
+	iaddr->loc = p->locator;
+}
+
+void ila_init_saved_csum(struct ila_params *p)
+{
+	if (!p->locator_match.v64)
+		return;
+
+	p->csum_diff = compute_csum_diff8(
+				(__be32 *)&p->locator_match,
+				(__be32 *)&p->locator);
 }
 
 static int __init ila_init(void)
diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
index 2ae3c4f..1dfb641 100644
--- a/net/ipv6/ila/ila_lwt.c
+++ b/net/ipv6/ila/ila_lwt.c
@@ -26,7 +26,7 @@
 	if (skb->protocol != htons(ETH_P_IPV6))
 		goto drop;
 
-	update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate));
+	ila_update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate));
 
 	return dst->lwtstate->orig_output(net, sk, skb);
 
@@ -42,7 +42,7 @@
 	if (skb->protocol != htons(ETH_P_IPV6))
 		goto drop;
 
-	update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate));
+	ila_update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate));
 
 	return dst->lwtstate->orig_input(skb);
 
@@ -53,6 +53,7 @@
 
 static struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = {
 	[ILA_ATTR_LOCATOR] = { .type = NLA_U64, },
+	[ILA_ATTR_CSUM_MODE] = { .type = NLA_U8, },
 };
 
 static int ila_build_state(struct net_device *dev, struct nlattr *nla,
@@ -64,11 +65,28 @@
 	size_t encap_len = sizeof(*p);
 	struct lwtunnel_state *newts;
 	const struct fib6_config *cfg6 = cfg;
+	struct ila_addr *iaddr;
 	int ret;
 
 	if (family != AF_INET6)
 		return -EINVAL;
 
+	if (cfg6->fc_dst_len < sizeof(struct ila_locator) + 1) {
+		/* Need to have full locator and at least type field
+		 * included in destination
+		 */
+		return -EINVAL;
+	}
+
+	iaddr = (struct ila_addr *)&cfg6->fc_dst;
+
+	if (!ila_addr_is_ila(iaddr) || ila_csum_neutral_set(iaddr->ident)) {
+		/* Don't allow translation for a non-ILA address or checksum
+		 * neutral flag to be set.
+		 */
+		return -EINVAL;
+	}
+
 	ret = nla_parse_nested(tb, ILA_ATTR_MAX, nla,
 			       ila_nl_policy);
 	if (ret < 0)
@@ -84,16 +102,19 @@
 	newts->len = encap_len;
 	p = ila_params_lwtunnel(newts);
 
-	p->locator = (__force __be64)nla_get_u64(tb[ILA_ATTR_LOCATOR]);
+	p->locator.v64 = (__force __be64)nla_get_u64(tb[ILA_ATTR_LOCATOR]);
 
-	if (cfg6->fc_dst_len > sizeof(__be64)) {
-		/* Precompute checksum difference for translation since we
-		 * know both the old locator and the new one.
-		 */
-		p->locator_match = *(__be64 *)&cfg6->fc_dst;
-		p->csum_diff = compute_csum_diff8(
-			(__be32 *)&p->locator_match, (__be32 *)&p->locator);
-	}
+	/* Precompute checksum difference for translation since we
+	 * know both the old locator and the new one.
+	 */
+	p->locator_match = iaddr->loc;
+	p->csum_diff = compute_csum_diff8(
+		(__be32 *)&p->locator_match, (__be32 *)&p->locator);
+
+	if (tb[ILA_ATTR_CSUM_MODE])
+		p->csum_mode = nla_get_u8(tb[ILA_ATTR_CSUM_MODE]);
+
+	ila_init_saved_csum(p);
 
 	newts->type = LWTUNNEL_ENCAP_ILA;
 	newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT |
@@ -109,7 +130,10 @@
 {
 	struct ila_params *p = ila_params_lwtunnel(lwtstate);
 
-	if (nla_put_u64(skb, ILA_ATTR_LOCATOR, (__force u64)p->locator))
+	if (nla_put_u64_64bit(skb, ILA_ATTR_LOCATOR, (__force u64)p->locator.v64,
+			      ILA_ATTR_PAD))
+		goto nla_put_failure;
+	if (nla_put_u8(skb, ILA_ATTR_CSUM_MODE, (__force u8)p->csum_mode))
 		goto nla_put_failure;
 
 	return 0;
@@ -120,8 +144,9 @@
 
 static int ila_encap_nlsize(struct lwtunnel_state *lwtstate)
 {
-	/* No encapsulation overhead */
-	return 0;
+	return nla_total_size_64bit(sizeof(u64)) + /* ILA_ATTR_LOCATOR */
+	       nla_total_size(sizeof(u8)) +        /* ILA_ATTR_CSUM_MODE */
+	       0;
 }
 
 static int ila_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
@@ -129,7 +154,7 @@
 	struct ila_params *a_p = ila_params_lwtunnel(a);
 	struct ila_params *b_p = ila_params_lwtunnel(b);
 
-	return (a_p->locator != b_p->locator);
+	return (a_p->locator.v64 != b_p->locator.v64);
 }
 
 static const struct lwtunnel_encap_ops ila_encap_ops = {
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index 295ca29..a90e572 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -11,13 +11,11 @@
 
 struct ila_xlat_params {
 	struct ila_params ip;
-	__be64 identifier;
 	int ifindex;
-	unsigned int dir;
 };
 
 struct ila_map {
-	struct ila_xlat_params p;
+	struct ila_xlat_params xp;
 	struct rhash_head node;
 	struct ila_map __rcu *next;
 	struct rcu_head rcu;
@@ -66,31 +64,29 @@
 	net_get_random_once(&hashrnd, sizeof(hashrnd));
 }
 
-static inline u32 ila_identifier_hash(__be64 identifier)
+static inline u32 ila_locator_hash(struct ila_locator loc)
 {
-	u32 *v = (u32 *)&identifier;
+	u32 *v = (u32 *)loc.v32;
 
 	return jhash_2words(v[0], v[1], hashrnd);
 }
 
-static inline spinlock_t *ila_get_lock(struct ila_net *ilan, __be64 identifier)
+static inline spinlock_t *ila_get_lock(struct ila_net *ilan,
+				       struct ila_locator loc)
 {
-	return &ilan->locks[ila_identifier_hash(identifier) & ilan->locks_mask];
+	return &ilan->locks[ila_locator_hash(loc) & ilan->locks_mask];
 }
 
-static inline int ila_cmp_wildcards(struct ila_map *ila, __be64 loc,
-				    int ifindex, unsigned int dir)
+static inline int ila_cmp_wildcards(struct ila_map *ila,
+				    struct ila_addr *iaddr, int ifindex)
 {
-	return (ila->p.ip.locator_match && ila->p.ip.locator_match != loc) ||
-	       (ila->p.ifindex && ila->p.ifindex != ifindex) ||
-	       !(ila->p.dir & dir);
+	return (ila->xp.ifindex && ila->xp.ifindex != ifindex);
 }
 
-static inline int ila_cmp_params(struct ila_map *ila, struct ila_xlat_params *p)
+static inline int ila_cmp_params(struct ila_map *ila,
+				 struct ila_xlat_params *xp)
 {
-	return (ila->p.ip.locator_match != p->ip.locator_match) ||
-	       (ila->p.ifindex != p->ifindex) ||
-	       (ila->p.dir != p->dir);
+	return (ila->xp.ifindex != xp->ifindex);
 }
 
 static int ila_cmpfn(struct rhashtable_compare_arg *arg,
@@ -98,17 +94,14 @@
 {
 	const struct ila_map *ila = obj;
 
-	return (ila->p.identifier != *(__be64 *)arg->key);
+	return (ila->xp.ip.locator_match.v64 != *(__be64 *)arg->key);
 }
 
 static inline int ila_order(struct ila_map *ila)
 {
 	int score = 0;
 
-	if (ila->p.ip.locator_match)
-		score += 1 << 0;
-
-	if (ila->p.ifindex)
+	if (ila->xp.ifindex)
 		score += 1 << 1;
 
 	return score;
@@ -117,7 +110,7 @@
 static const struct rhashtable_params rht_params = {
 	.nelem_hint = 1024,
 	.head_offset = offsetof(struct ila_map, node),
-	.key_offset = offsetof(struct ila_map, p.identifier),
+	.key_offset = offsetof(struct ila_map, xp.ip.locator_match),
 	.key_len = sizeof(u64), /* identifier */
 	.max_size = 1048576,
 	.min_size = 256,
@@ -136,50 +129,45 @@
 };
 
 static struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = {
-	[ILA_ATTR_IDENTIFIER] = { .type = NLA_U64, },
 	[ILA_ATTR_LOCATOR] = { .type = NLA_U64, },
 	[ILA_ATTR_LOCATOR_MATCH] = { .type = NLA_U64, },
 	[ILA_ATTR_IFINDEX] = { .type = NLA_U32, },
-	[ILA_ATTR_DIR] = { .type = NLA_U32, },
+	[ILA_ATTR_CSUM_MODE] = { .type = NLA_U8, },
 };
 
 static int parse_nl_config(struct genl_info *info,
-			   struct ila_xlat_params *p)
+			   struct ila_xlat_params *xp)
 {
-	memset(p, 0, sizeof(*p));
-
-	if (info->attrs[ILA_ATTR_IDENTIFIER])
-		p->identifier = (__force __be64)nla_get_u64(
-			info->attrs[ILA_ATTR_IDENTIFIER]);
+	memset(xp, 0, sizeof(*xp));
 
 	if (info->attrs[ILA_ATTR_LOCATOR])
-		p->ip.locator = (__force __be64)nla_get_u64(
+		xp->ip.locator.v64 = (__force __be64)nla_get_u64(
 			info->attrs[ILA_ATTR_LOCATOR]);
 
 	if (info->attrs[ILA_ATTR_LOCATOR_MATCH])
-		p->ip.locator_match = (__force __be64)nla_get_u64(
+		xp->ip.locator_match.v64 = (__force __be64)nla_get_u64(
 			info->attrs[ILA_ATTR_LOCATOR_MATCH]);
 
-	if (info->attrs[ILA_ATTR_IFINDEX])
-		p->ifindex = nla_get_s32(info->attrs[ILA_ATTR_IFINDEX]);
+	if (info->attrs[ILA_ATTR_CSUM_MODE])
+		xp->ip.csum_mode = nla_get_u8(info->attrs[ILA_ATTR_CSUM_MODE]);
 
-	if (info->attrs[ILA_ATTR_DIR])
-		p->dir = nla_get_u32(info->attrs[ILA_ATTR_DIR]);
+	if (info->attrs[ILA_ATTR_IFINDEX])
+		xp->ifindex = nla_get_s32(info->attrs[ILA_ATTR_IFINDEX]);
 
 	return 0;
 }
 
 /* Must be called with rcu readlock */
-static inline struct ila_map *ila_lookup_wildcards(__be64 id, __be64 loc,
+static inline struct ila_map *ila_lookup_wildcards(struct ila_addr *iaddr,
 						   int ifindex,
-						   unsigned int dir,
 						   struct ila_net *ilan)
 {
 	struct ila_map *ila;
 
-	ila = rhashtable_lookup_fast(&ilan->rhash_table, &id, rht_params);
+	ila = rhashtable_lookup_fast(&ilan->rhash_table, &iaddr->loc,
+				     rht_params);
 	while (ila) {
-		if (!ila_cmp_wildcards(ila, loc, ifindex, dir))
+		if (!ila_cmp_wildcards(ila, iaddr, ifindex))
 			return ila;
 		ila = rcu_access_pointer(ila->next);
 	}
@@ -188,15 +176,16 @@
 }
 
 /* Must be called with rcu readlock */
-static inline struct ila_map *ila_lookup_by_params(struct ila_xlat_params *p,
+static inline struct ila_map *ila_lookup_by_params(struct ila_xlat_params *xp,
 						   struct ila_net *ilan)
 {
 	struct ila_map *ila;
 
-	ila = rhashtable_lookup_fast(&ilan->rhash_table, &p->identifier,
+	ila = rhashtable_lookup_fast(&ilan->rhash_table,
+				     &xp->ip.locator_match,
 				     rht_params);
 	while (ila) {
-		if (!ila_cmp_params(ila, p))
+		if (!ila_cmp_params(ila, xp))
 			return ila;
 		ila = rcu_access_pointer(ila->next);
 	}
@@ -221,14 +210,14 @@
 	}
 }
 
-static int ila_xlat_addr(struct sk_buff *skb, int dir);
+static int ila_xlat_addr(struct sk_buff *skb);
 
 static unsigned int
 ila_nf_input(void *priv,
 	     struct sk_buff *skb,
 	     const struct nf_hook_state *state)
 {
-	ila_xlat_addr(skb, ILA_DIR_IN);
+	ila_xlat_addr(skb);
 	return NF_ACCEPT;
 }
 
@@ -241,11 +230,11 @@
 	},
 };
 
-static int ila_add_mapping(struct net *net, struct ila_xlat_params *p)
+static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
 {
 	struct ila_net *ilan = net_generic(net, ila_net_id);
 	struct ila_map *ila, *head;
-	spinlock_t *lock = ila_get_lock(ilan, p->identifier);
+	spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match);
 	int err = 0, order;
 
 	if (!ilan->hooks_registered) {
@@ -264,22 +253,16 @@
 	if (!ila)
 		return -ENOMEM;
 
-	ila->p = *p;
+	ila_init_saved_csum(&xp->ip);
 
-	if (p->ip.locator_match) {
-		/* Precompute checksum difference for translation since we
-		 * know both the old identifier and the new one.
-		 */
-		ila->p.ip.csum_diff = compute_csum_diff8(
-			(__be32 *)&p->ip.locator_match,
-			(__be32 *)&p->ip.locator);
-	}
+	ila->xp = *xp;
 
 	order = ila_order(ila);
 
 	spin_lock(lock);
 
-	head = rhashtable_lookup_fast(&ilan->rhash_table, &p->identifier,
+	head = rhashtable_lookup_fast(&ilan->rhash_table,
+				      &xp->ip.locator_match,
 				      rht_params);
 	if (!head) {
 		/* New entry for the rhash_table */
@@ -289,7 +272,7 @@
 		struct ila_map *tila = head, *prev = NULL;
 
 		do {
-			if (!ila_cmp_params(tila, p)) {
+			if (!ila_cmp_params(tila, xp)) {
 				err = -EEXIST;
 				goto out;
 			}
@@ -326,23 +309,23 @@
 	return err;
 }
 
-static int ila_del_mapping(struct net *net, struct ila_xlat_params *p)
+static int ila_del_mapping(struct net *net, struct ila_xlat_params *xp)
 {
 	struct ila_net *ilan = net_generic(net, ila_net_id);
 	struct ila_map *ila, *head, *prev;
-	spinlock_t *lock = ila_get_lock(ilan, p->identifier);
+	spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match);
 	int err = -ENOENT;
 
 	spin_lock(lock);
 
 	head = rhashtable_lookup_fast(&ilan->rhash_table,
-				      &p->identifier, rht_params);
+				      &xp->ip.locator_match, rht_params);
 	ila = head;
 
 	prev = NULL;
 
 	while (ila) {
-		if (ila_cmp_params(ila, p)) {
+		if (ila_cmp_params(ila, xp)) {
 			prev = ila;
 			ila = rcu_dereference_protected(ila->next,
 							lockdep_is_held(lock));
@@ -404,28 +387,28 @@
 static int ila_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info)
 {
 	struct net *net = genl_info_net(info);
-	struct ila_xlat_params p;
+	struct ila_xlat_params xp;
 	int err;
 
-	err = parse_nl_config(info, &p);
+	err = parse_nl_config(info, &xp);
 	if (err)
 		return err;
 
-	ila_del_mapping(net, &p);
+	ila_del_mapping(net, &xp);
 
 	return 0;
 }
 
 static int ila_fill_info(struct ila_map *ila, struct sk_buff *msg)
 {
-	if (nla_put_u64(msg, ILA_ATTR_IDENTIFIER,
-			(__force u64)ila->p.identifier) ||
-	    nla_put_u64(msg, ILA_ATTR_LOCATOR,
-			(__force u64)ila->p.ip.locator) ||
-	    nla_put_u64(msg, ILA_ATTR_LOCATOR_MATCH,
-			(__force u64)ila->p.ip.locator_match) ||
-	    nla_put_s32(msg, ILA_ATTR_IFINDEX, ila->p.ifindex) ||
-	    nla_put_u32(msg, ILA_ATTR_DIR, ila->p.dir))
+	if (nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR,
+			      (__force u64)ila->xp.ip.locator.v64,
+			      ILA_ATTR_PAD) ||
+	    nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR_MATCH,
+			      (__force u64)ila->xp.ip.locator_match.v64,
+			      ILA_ATTR_PAD) ||
+	    nla_put_s32(msg, ILA_ATTR_IFINDEX, ila->xp.ifindex) ||
+	    nla_put_u32(msg, ILA_ATTR_CSUM_MODE, ila->xp.ip.csum_mode))
 		return -1;
 
 	return 0;
@@ -457,11 +440,11 @@
 	struct net *net = genl_info_net(info);
 	struct ila_net *ilan = net_generic(net, ila_net_id);
 	struct sk_buff *msg;
-	struct ila_xlat_params p;
+	struct ila_xlat_params xp;
 	struct ila_map *ila;
 	int ret;
 
-	ret = parse_nl_config(info, &p);
+	ret = parse_nl_config(info, &xp);
 	if (ret)
 		return ret;
 
@@ -471,7 +454,7 @@
 
 	rcu_read_lock();
 
-	ila = ila_lookup_by_params(&p, ilan);
+	ila = ila_lookup_by_params(&xp, ilan);
 	if (ila) {
 		ret = ila_dump_info(ila,
 				    info->snd_portid,
@@ -501,7 +484,8 @@
 	struct ila_net *ilan = net_generic(net, ila_net_id);
 	struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args;
 
-	return rhashtable_walk_init(&ilan->rhash_table, &iter->rhiter);
+	return rhashtable_walk_init(&ilan->rhash_table, &iter->rhiter,
+				    GFP_KERNEL);
 }
 
 static int ila_nl_dump_done(struct netlink_callback *cb)
@@ -613,45 +597,32 @@
 	.size = sizeof(struct ila_net),
 };
 
-static int ila_xlat_addr(struct sk_buff *skb, int dir)
+static int ila_xlat_addr(struct sk_buff *skb)
 {
 	struct ila_map *ila;
 	struct ipv6hdr *ip6h = ipv6_hdr(skb);
 	struct net *net = dev_net(skb->dev);
 	struct ila_net *ilan = net_generic(net, ila_net_id);
-	__be64 identifier, locator_match;
-	size_t nhoff;
+	struct ila_addr *iaddr = ila_a2i(&ip6h->daddr);
 
 	/* Assumes skb contains a valid IPv6 header that is pulled */
 
-	identifier = *(__be64 *)&ip6h->daddr.in6_u.u6_addr8[8];
-	locator_match = *(__be64 *)&ip6h->daddr.in6_u.u6_addr8[0];
-	nhoff = sizeof(struct ipv6hdr);
+	if (!ila_addr_is_ila(iaddr)) {
+		/* Type indicates this is not an ILA address */
+		return 0;
+	}
 
 	rcu_read_lock();
 
-	ila = ila_lookup_wildcards(identifier, locator_match,
-				   skb->dev->ifindex, dir, ilan);
+	ila = ila_lookup_wildcards(iaddr, skb->dev->ifindex, ilan);
 	if (ila)
-		update_ipv6_locator(skb, &ila->p.ip);
+		ila_update_ipv6_locator(skb, &ila->xp.ip);
 
 	rcu_read_unlock();
 
 	return 0;
 }
 
-int ila_xlat_incoming(struct sk_buff *skb)
-{
-	return ila_xlat_addr(skb, ILA_DIR_IN);
-}
-EXPORT_SYMBOL(ila_xlat_incoming);
-
-int ila_xlat_outgoing(struct sk_buff *skb)
-{
-	return ila_xlat_addr(skb, ILA_DIR_OUT);
-}
-EXPORT_SYMBOL(ila_xlat_outgoing);
-
 int ila_xlat_init(void)
 {
 	int ret;
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 70f2628..00cf28a 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -69,7 +69,6 @@
 	struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
 
 
-	rcu_read_lock();
 begin:
 	sk_nulls_for_each_rcu(sk, node, &head->chain) {
 		if (sk->sk_hash != hash)
@@ -90,7 +89,6 @@
 out:
 	sk = NULL;
 found:
-	rcu_read_unlock();
 	return sk;
 }
 EXPORT_SYMBOL(__inet6_lookup_established);
@@ -122,6 +120,7 @@
 	return score;
 }
 
+/* called with rcu_read_lock() */
 struct sock *inet6_lookup_listener(struct net *net,
 		struct inet_hashinfo *hashinfo,
 		struct sk_buff *skb, int doff,
@@ -129,39 +128,27 @@
 		const __be16 sport, const struct in6_addr *daddr,
 		const unsigned short hnum, const int dif)
 {
-	struct sock *sk;
-	const struct hlist_nulls_node *node;
-	struct sock *result;
-	int score, hiscore, matches = 0, reuseport = 0;
-	bool select_ok = true;
-	u32 phash = 0;
 	unsigned int hash = inet_lhashfn(net, hnum);
 	struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
+	int score, hiscore = 0, matches = 0, reuseport = 0;
+	struct sock *sk, *result = NULL;
+	u32 phash = 0;
 
-	rcu_read_lock();
-begin:
-	result = NULL;
-	hiscore = 0;
-	sk_nulls_for_each(sk, node, &ilb->head) {
+	sk_for_each(sk, &ilb->head) {
 		score = compute_score(sk, net, hnum, daddr, dif);
 		if (score > hiscore) {
-			hiscore = score;
-			result = sk;
 			reuseport = sk->sk_reuseport;
 			if (reuseport) {
 				phash = inet6_ehashfn(net, daddr, hnum,
 						      saddr, sport);
-				if (select_ok) {
-					struct sock *sk2;
-					sk2 = reuseport_select_sock(sk, phash,
-								    skb, doff);
-					if (sk2) {
-						result = sk2;
-						goto found;
-					}
-				}
+				result = reuseport_select_sock(sk, phash,
+							       skb, doff);
+				if (result)
+					return result;
 				matches = 1;
 			}
+			result = sk;
+			hiscore = score;
 		} else if (score == hiscore && reuseport) {
 			matches++;
 			if (reciprocal_scale(phash, matches) == 0)
@@ -169,25 +156,6 @@
 			phash = next_pseudo_random32(phash);
 		}
 	}
-	/*
-	 * if the nulls value we got at the end of this lookup is
-	 * not the expected one, we must restart lookup.
-	 * We probably met an item that was moved to another chain.
-	 */
-	if (get_nulls_value(node) != hash + LISTENING_NULLS_BASE)
-		goto begin;
-	if (result) {
-found:
-		if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
-			result = NULL;
-		else if (unlikely(compute_score(result, net, hnum, daddr,
-				  dif) < hiscore)) {
-			sock_put(result);
-			select_ok = false;
-			goto begin;
-		}
-	}
-	rcu_read_unlock();
 	return result;
 }
 EXPORT_SYMBOL_GPL(inet6_lookup_listener);
@@ -199,12 +167,12 @@
 			  const int dif)
 {
 	struct sock *sk;
+	bool refcounted;
 
-	local_bh_disable();
 	sk = __inet6_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
-			    ntohs(dport), dif);
-	local_bh_enable();
-
+			    ntohs(dport), dif, &refcounted);
+	if (sk && !refcounted && !atomic_inc_not_zero(&sk->sk_refcnt))
+		sk = NULL;
 	return sk;
 }
 EXPORT_SYMBOL_GPL(inet6_lookup);
@@ -254,7 +222,7 @@
 	__sk_nulls_add_node_rcu(sk, &head->chain);
 	if (tw) {
 		sk_nulls_del_node_init_rcu((struct sock *)tw);
-		NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
+		__NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED);
 	}
 	spin_unlock(lock);
 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index ea071fa..1bcef23 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -240,6 +240,7 @@
 
 	return tb;
 }
+EXPORT_SYMBOL_GPL(fib6_new_table);
 
 struct fib6_table *fib6_get_table(struct net *net, u32 id)
 {
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index dc2db4f..b912f0d 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -372,7 +372,8 @@
 	if (olen > 0) {
 		struct msghdr msg;
 		struct flowi6 flowi6;
-		int junk;
+		struct sockcm_cookie sockc_junk;
+		struct ipcm6_cookie ipc6;
 
 		err = -ENOMEM;
 		fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
@@ -389,8 +390,8 @@
 		msg.msg_control = (void *)(fl->opt+1);
 		memset(&flowi6, 0, sizeof(flowi6));
 
-		err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt,
-					    &junk, &junk, &junk);
+		ipc6.opt = fl->opt;
+		err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, &ipc6, &sockc_junk);
 		if (err)
 			goto done;
 		err = -EINVAL;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 4e636e6..4541fa5 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -54,6 +54,7 @@
 #include <net/ip6_fib.h>
 #include <net/ip6_route.h>
 #include <net/ip6_tunnel.h>
+#include <net/gre.h>
 
 
 static bool log_ecn_error = true;
@@ -342,7 +343,7 @@
 		goto failed_free;
 
 	/* Can use a lockless transmit, unless we generate output sequences */
-	if (!(nt->parms.o_flags & GRE_SEQ))
+	if (!(nt->parms.o_flags & TUNNEL_SEQ))
 		dev->features |= NETIF_F_LLTX;
 
 	dev_hold(dev);
@@ -443,137 +444,41 @@
 	t->err_time = jiffies;
 }
 
-static int ip6gre_rcv(struct sk_buff *skb)
+static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
 {
 	const struct ipv6hdr *ipv6h;
-	u8     *h;
-	__be16    flags;
-	__sum16   csum = 0;
-	__be32 key = 0;
-	u32    seqno = 0;
 	struct ip6_tnl *tunnel;
-	int    offset = 4;
-	__be16 gre_proto;
-	int err;
-
-	if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
-		goto drop;
 
 	ipv6h = ipv6_hdr(skb);
-	h = skb->data;
-	flags = *(__be16 *)h;
-
-	if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
-		/* - Version must be 0.
-		   - We do not support routing headers.
-		 */
-		if (flags&(GRE_VERSION|GRE_ROUTING))
-			goto drop;
-
-		if (flags&GRE_CSUM) {
-			csum = skb_checksum_simple_validate(skb);
-			offset += 4;
-		}
-		if (flags&GRE_KEY) {
-			key = *(__be32 *)(h + offset);
-			offset += 4;
-		}
-		if (flags&GRE_SEQ) {
-			seqno = ntohl(*(__be32 *)(h + offset));
-			offset += 4;
-		}
-	}
-
-	gre_proto = *(__be16 *)(h + 2);
-
 	tunnel = ip6gre_tunnel_lookup(skb->dev,
-					  &ipv6h->saddr, &ipv6h->daddr, key,
-					  gre_proto);
+				      &ipv6h->saddr, &ipv6h->daddr, tpi->key,
+				      tpi->proto);
 	if (tunnel) {
-		struct pcpu_sw_netstats *tstats;
+		ip6_tnl_rcv(tunnel, skb, tpi, NULL, false);
 
-		if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
-			goto drop;
-
-		if (!ip6_tnl_rcv_ctl(tunnel, &ipv6h->daddr, &ipv6h->saddr)) {
-			tunnel->dev->stats.rx_dropped++;
-			goto drop;
-		}
-
-		skb->protocol = gre_proto;
-		/* WCCP version 1 and 2 protocol decoding.
-		 * - Change protocol to IPv6
-		 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
-		 */
-		if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
-			skb->protocol = htons(ETH_P_IPV6);
-			if ((*(h + offset) & 0xF0) != 0x40)
-				offset += 4;
-		}
-
-		skb->mac_header = skb->network_header;
-		__pskb_pull(skb, offset);
-		skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
-
-		if (((flags&GRE_CSUM) && csum) ||
-		    (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
-			tunnel->dev->stats.rx_crc_errors++;
-			tunnel->dev->stats.rx_errors++;
-			goto drop;
-		}
-		if (tunnel->parms.i_flags&GRE_SEQ) {
-			if (!(flags&GRE_SEQ) ||
-			    (tunnel->i_seqno &&
-					(s32)(seqno - tunnel->i_seqno) < 0)) {
-				tunnel->dev->stats.rx_fifo_errors++;
-				tunnel->dev->stats.rx_errors++;
-				goto drop;
-			}
-			tunnel->i_seqno = seqno + 1;
-		}
-
-		/* Warning: All skb pointers will be invalidated! */
-		if (tunnel->dev->type == ARPHRD_ETHER) {
-			if (!pskb_may_pull(skb, ETH_HLEN)) {
-				tunnel->dev->stats.rx_length_errors++;
-				tunnel->dev->stats.rx_errors++;
-				goto drop;
-			}
-
-			ipv6h = ipv6_hdr(skb);
-			skb->protocol = eth_type_trans(skb, tunnel->dev);
-			skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
-		}
-
-		__skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
-
-		skb_reset_network_header(skb);
-
-		err = IP6_ECN_decapsulate(ipv6h, skb);
-		if (unlikely(err)) {
-			if (log_ecn_error)
-				net_info_ratelimited("non-ECT from %pI6 with dsfield=%#x\n",
-						     &ipv6h->saddr,
-						     ipv6_get_dsfield(ipv6h));
-			if (err > 1) {
-				++tunnel->dev->stats.rx_frame_errors;
-				++tunnel->dev->stats.rx_errors;
-				goto drop;
-			}
-		}
-
-		tstats = this_cpu_ptr(tunnel->dev->tstats);
-		u64_stats_update_begin(&tstats->syncp);
-		tstats->rx_packets++;
-		tstats->rx_bytes += skb->len;
-		u64_stats_update_end(&tstats->syncp);
-
-		netif_rx(skb);
-
-		return 0;
+		return PACKET_RCVD;
 	}
-	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
 
+	return PACKET_REJECT;
+}
+
+static int gre_rcv(struct sk_buff *skb)
+{
+	struct tnl_ptk_info tpi;
+	bool csum_err = false;
+	int hdr_len;
+
+	hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6));
+	if (hdr_len < 0)
+		goto drop;
+
+	if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false))
+		goto drop;
+
+	if (ip6gre_rcv(skb, &tpi) == PACKET_RCVD)
+		return 0;
+
+	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
 drop:
 	kfree_skb(skb);
 	return 0;
@@ -584,187 +489,40 @@
 	__u8 dst_opt[8];
 };
 
-static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
+static int gre_handle_offloads(struct sk_buff *skb, bool csum)
 {
-	memset(opt, 0, sizeof(struct ipv6_tel_txoption));
-
-	opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
-	opt->dst_opt[3] = 1;
-	opt->dst_opt[4] = encap_limit;
-	opt->dst_opt[5] = IPV6_TLV_PADN;
-	opt->dst_opt[6] = 1;
-
-	opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt;
-	opt->ops.opt_nflen = 8;
+	return iptunnel_handle_offloads(skb,
+					csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
 }
 
-static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
-			 struct net_device *dev,
-			 __u8 dsfield,
-			 struct flowi6 *fl6,
-			 int encap_limit,
-			 __u32 *pmtu)
+static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
+			       struct net_device *dev, __u8 dsfield,
+			       struct flowi6 *fl6, int encap_limit,
+			       __u32 *pmtu, __be16 proto)
 {
 	struct ip6_tnl *tunnel = netdev_priv(dev);
-	struct net *net = tunnel->net;
-	struct net_device *tdev;    /* Device to other host */
-	struct ipv6hdr  *ipv6h;     /* Our new IP header */
-	unsigned int max_headroom = 0; /* The extra header space needed */
-	int    gre_hlen;
-	struct ipv6_tel_txoption opt;
-	int    mtu;
-	struct dst_entry *dst = NULL, *ndst = NULL;
-	struct net_device_stats *stats = &tunnel->dev->stats;
-	int err = -1;
-	u8 proto;
-	struct sk_buff *new_skb;
-	__be16 protocol;
+	__be16 protocol = (dev->type == ARPHRD_ETHER) ?
+			  htons(ETH_P_TEB) : proto;
 
 	if (dev->type == ARPHRD_ETHER)
 		IPCB(skb)->flags = 0;
 
-	if (dev->header_ops && dev->type == ARPHRD_IP6GRE) {
-		gre_hlen = 0;
-		ipv6h = (struct ipv6hdr *)skb->data;
-		fl6->daddr = ipv6h->daddr;
-	} else {
-		gre_hlen = tunnel->hlen;
+	if (dev->header_ops && dev->type == ARPHRD_IP6GRE)
+		fl6->daddr = ((struct ipv6hdr *)skb->data)->daddr;
+	else
 		fl6->daddr = tunnel->parms.raddr;
-	}
 
-	if (!fl6->flowi6_mark)
-		dst = dst_cache_get(&tunnel->dst_cache);
+	if (tunnel->parms.o_flags & TUNNEL_SEQ)
+		tunnel->o_seqno++;
 
-	if (!dst) {
-		dst = ip6_route_output(net, NULL, fl6);
-
-		if (dst->error)
-			goto tx_err_link_failure;
-		dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
-		if (IS_ERR(dst)) {
-			err = PTR_ERR(dst);
-			dst = NULL;
-			goto tx_err_link_failure;
-		}
-		ndst = dst;
-	}
-
-	tdev = dst->dev;
-
-	if (tdev == dev) {
-		stats->collisions++;
-		net_warn_ratelimited("%s: Local routing loop detected!\n",
-				     tunnel->parms.name);
-		goto tx_err_dst_release;
-	}
-
-	mtu = dst_mtu(dst) - sizeof(*ipv6h);
-	if (encap_limit >= 0) {
-		max_headroom += 8;
-		mtu -= 8;
-	}
-	if (mtu < IPV6_MIN_MTU)
-		mtu = IPV6_MIN_MTU;
-	if (skb_dst(skb))
-		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
-	if (skb->len > mtu) {
-		*pmtu = mtu;
-		err = -EMSGSIZE;
-		goto tx_err_dst_release;
-	}
-
-	if (tunnel->err_count > 0) {
-		if (time_before(jiffies,
-				tunnel->err_time + IP6TUNNEL_ERR_TIMEO)) {
-			tunnel->err_count--;
-
-			dst_link_failure(skb);
-		} else
-			tunnel->err_count = 0;
-	}
-
-	skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
-
-	max_headroom += LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
-
-	if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
-	    (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
-		new_skb = skb_realloc_headroom(skb, max_headroom);
-		if (max_headroom > dev->needed_headroom)
-			dev->needed_headroom = max_headroom;
-		if (!new_skb)
-			goto tx_err_dst_release;
-
-		if (skb->sk)
-			skb_set_owner_w(new_skb, skb->sk);
-		consume_skb(skb);
-		skb = new_skb;
-	}
-
-	if (!fl6->flowi6_mark && ndst)
-		dst_cache_set_ip6(&tunnel->dst_cache, ndst, &fl6->saddr);
-	skb_dst_set(skb, dst);
-
-	proto = NEXTHDR_GRE;
-	if (encap_limit >= 0) {
-		init_tel_txopt(&opt, encap_limit);
-		ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
-	}
-
-	if (likely(!skb->encapsulation)) {
-		skb_reset_inner_headers(skb);
-		skb->encapsulation = 1;
-	}
-
-	skb_push(skb, gre_hlen);
-	skb_reset_network_header(skb);
-	skb_set_transport_header(skb, sizeof(*ipv6h));
-
-	/*
-	 *	Push down and install the IP header.
-	 */
-	ipv6h = ipv6_hdr(skb);
-	ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
-		     ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
-	ipv6h->hop_limit = tunnel->parms.hop_limit;
-	ipv6h->nexthdr = proto;
-	ipv6h->saddr = fl6->saddr;
-	ipv6h->daddr = fl6->daddr;
-
-	((__be16 *)(ipv6h + 1))[0] = tunnel->parms.o_flags;
-	protocol = (dev->type == ARPHRD_ETHER) ?
-		    htons(ETH_P_TEB) : skb->protocol;
-	((__be16 *)(ipv6h + 1))[1] = protocol;
-
-	if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
-		__be32 *ptr = (__be32 *)(((u8 *)ipv6h) + tunnel->hlen - 4);
-
-		if (tunnel->parms.o_flags&GRE_SEQ) {
-			++tunnel->o_seqno;
-			*ptr = htonl(tunnel->o_seqno);
-			ptr--;
-		}
-		if (tunnel->parms.o_flags&GRE_KEY) {
-			*ptr = tunnel->parms.o_key;
-			ptr--;
-		}
-		if (tunnel->parms.o_flags&GRE_CSUM) {
-			*ptr = 0;
-			*(__sum16 *)ptr = ip_compute_csum((void *)(ipv6h+1),
-				skb->len - sizeof(struct ipv6hdr));
-		}
-	}
+	/* Push GRE header. */
+	gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
+			 protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno));
 
 	skb_set_inner_protocol(skb, protocol);
 
-	ip6tunnel_xmit(NULL, skb, dev);
-	return 0;
-tx_err_link_failure:
-	stats->tx_carrier_errors++;
-	dst_link_failure(skb);
-tx_err_dst_release:
-	dst_release(dst);
-	return err;
+	return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
+			    NEXTHDR_GRE);
 }
 
 static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
@@ -783,7 +541,6 @@
 		encap_limit = t->parms.encap_limit;
 
 	memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
-	fl6.flowi6_proto = IPPROTO_GRE;
 
 	dsfield = ipv4_get_dsfield(iph);
 
@@ -793,7 +550,12 @@
 	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
 		fl6.flowi6_mark = skb->mark;
 
-	err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
+	err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
+	if (err)
+		return -1;
+
+	err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
+			  skb->protocol);
 	if (err != 0) {
 		/* XXX: send ICMP error even if DF is not set. */
 		if (err == -EMSGSIZE)
@@ -833,7 +595,6 @@
 		encap_limit = t->parms.encap_limit;
 
 	memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
-	fl6.flowi6_proto = IPPROTO_GRE;
 
 	dsfield = ipv6_get_dsfield(ipv6h);
 	if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
@@ -843,7 +604,11 @@
 	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
 		fl6.flowi6_mark = skb->mark;
 
-	err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
+	if (gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)))
+		return -1;
+
+	err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit,
+			  &mtu, skb->protocol);
 	if (err != 0) {
 		if (err == -EMSGSIZE)
 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
@@ -887,7 +652,11 @@
 	memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
 	fl6.flowi6_proto = skb->protocol;
 
-	err = ip6gre_xmit2(skb, dev, 0, &fl6, encap_limit, &mtu);
+	err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
+	if (err)
+		return err;
+
+	err = __gre6_xmit(skb, dev, 0, &fl6, encap_limit, &mtu, skb->protocol);
 
 	return err;
 }
@@ -931,7 +700,7 @@
 	struct net_device *dev = t->dev;
 	struct __ip6_tnl_parm *p = &t->parms;
 	struct flowi6 *fl6 = &t->fl.u.ip6;
-	int addend = sizeof(struct ipv6hdr) + 4;
+	int t_hlen;
 
 	if (dev->type != ARPHRD_ETHER) {
 		memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
@@ -958,16 +727,11 @@
 	else
 		dev->flags &= ~IFF_POINTOPOINT;
 
-	/* Precalculate GRE options length */
-	if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
-		if (t->parms.o_flags&GRE_CSUM)
-			addend += 4;
-		if (t->parms.o_flags&GRE_KEY)
-			addend += 4;
-		if (t->parms.o_flags&GRE_SEQ)
-			addend += 4;
-	}
-	t->hlen = addend;
+	t->tun_hlen = gre_calc_hlen(t->parms.o_flags);
+
+	t->hlen = t->tun_hlen;
+
+	t_hlen = t->hlen + sizeof(struct ipv6hdr);
 
 	if (p->flags & IP6_TNL_F_CAP_XMIT) {
 		int strict = (ipv6_addr_type(&p->raddr) &
@@ -981,12 +745,15 @@
 			return;
 
 		if (rt->dst.dev) {
-			dev->hard_header_len = rt->dst.dev->hard_header_len + addend;
+			dev->hard_header_len = rt->dst.dev->hard_header_len +
+					       t_hlen;
 
 			if (set_mtu) {
-				dev->mtu = rt->dst.dev->mtu - addend;
+				dev->mtu = rt->dst.dev->mtu - t_hlen;
 				if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
 					dev->mtu -= 8;
+				if (dev->type == ARPHRD_ETHER)
+					dev->mtu -= ETH_HLEN;
 
 				if (dev->mtu < IPV6_MIN_MTU)
 					dev->mtu = IPV6_MIN_MTU;
@@ -1028,8 +795,8 @@
 	p->link = u->link;
 	p->i_key = u->i_key;
 	p->o_key = u->o_key;
-	p->i_flags = u->i_flags;
-	p->o_flags = u->o_flags;
+	p->i_flags = gre_flags_to_tnl_flags(u->i_flags);
+	p->o_flags = gre_flags_to_tnl_flags(u->o_flags);
 	memcpy(p->name, u->name, sizeof(u->name));
 }
 
@@ -1046,8 +813,8 @@
 	u->link = p->link;
 	u->i_key = p->i_key;
 	u->o_key = p->o_key;
-	u->i_flags = p->i_flags;
-	u->o_flags = p->o_flags;
+	u->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
+	u->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
 	memcpy(u->name, p->name, sizeof(u->name));
 }
 
@@ -1061,6 +828,8 @@
 	struct net *net = t->net;
 	struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
 
+	memset(&p1, 0, sizeof(p1));
+
 	switch (cmd) {
 	case SIOCGETTUNNEL:
 		if (dev == ign->fb_tunnel_dev) {
@@ -1160,15 +929,6 @@
 	return err;
 }
 
-static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
-{
-	if (new_mtu < 68 ||
-	    new_mtu > 0xFFF8 - dev->hard_header_len)
-		return -EINVAL;
-	dev->mtu = new_mtu;
-	return 0;
-}
-
 static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
 			unsigned short type,
 			const void *daddr, const void *saddr, unsigned int len)
@@ -1212,7 +972,7 @@
 	.ndo_uninit		= ip6gre_tunnel_uninit,
 	.ndo_start_xmit		= ip6gre_tunnel_xmit,
 	.ndo_do_ioctl		= ip6gre_tunnel_ioctl,
-	.ndo_change_mtu		= ip6gre_tunnel_change_mtu,
+	.ndo_change_mtu		= ip6_tnl_change_mtu,
 	.ndo_get_stats64	= ip_tunnel_get_stats64,
 	.ndo_get_iflink		= ip6_tnl_get_iflink,
 };
@@ -1228,17 +988,11 @@
 
 static void ip6gre_tunnel_setup(struct net_device *dev)
 {
-	struct ip6_tnl *t;
-
 	dev->netdev_ops = &ip6gre_netdev_ops;
 	dev->destructor = ip6gre_dev_free;
 
 	dev->type = ARPHRD_IP6GRE;
-	dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr) + 4;
-	dev->mtu = ETH_DATA_LEN - sizeof(struct ipv6hdr) - 4;
-	t = netdev_priv(dev);
-	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
-		dev->mtu -= 8;
+
 	dev->flags |= IFF_NOARP;
 	dev->addr_len = sizeof(struct in6_addr);
 	netif_keep_dst(dev);
@@ -1248,6 +1002,7 @@
 {
 	struct ip6_tnl *tunnel;
 	int ret;
+	int t_hlen;
 
 	tunnel = netdev_priv(dev);
 
@@ -1266,6 +1021,17 @@
 		return ret;
 	}
 
+	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
+
+	tunnel->hlen = tunnel->tun_hlen;
+
+	t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
+
+	dev->hard_header_len = LL_MAX_HEADER + t_hlen;
+	dev->mtu = ETH_DATA_LEN - t_hlen;
+	if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+		dev->mtu -= 8;
+
 	return 0;
 }
 
@@ -1304,7 +1070,7 @@
 
 
 static struct inet6_protocol ip6gre_protocol __read_mostly = {
-	.handler     = ip6gre_rcv,
+	.handler     = gre_rcv,
 	.err_handler = ip6gre_err,
 	.flags       = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
 };
@@ -1448,10 +1214,12 @@
 		parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
 
 	if (data[IFLA_GRE_IFLAGS])
-		parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]);
+		parms->i_flags = gre_flags_to_tnl_flags(
+				nla_get_be16(data[IFLA_GRE_IFLAGS]));
 
 	if (data[IFLA_GRE_OFLAGS])
-		parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]);
+		parms->o_flags = gre_flags_to_tnl_flags(
+				nla_get_be16(data[IFLA_GRE_OFLAGS]));
 
 	if (data[IFLA_GRE_IKEY])
 		parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
@@ -1500,11 +1268,16 @@
 	.ndo_start_xmit = ip6gre_tunnel_xmit,
 	.ndo_set_mac_address = eth_mac_addr,
 	.ndo_validate_addr = eth_validate_addr,
-	.ndo_change_mtu = ip6gre_tunnel_change_mtu,
+	.ndo_change_mtu = ip6_tnl_change_mtu,
 	.ndo_get_stats64 = ip_tunnel_get_stats64,
 	.ndo_get_iflink = ip6_tnl_get_iflink,
 };
 
+#define GRE6_FEATURES (NETIF_F_SG |		\
+		       NETIF_F_FRAGLIST |	\
+		       NETIF_F_HIGHDMA |		\
+		       NETIF_F_HW_CSUM)
+
 static void ip6gre_tap_setup(struct net_device *dev)
 {
 
@@ -1538,9 +1311,21 @@
 	nt->net = dev_net(dev);
 	ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
 
-	/* Can use a lockless transmit, unless we generate output sequences */
-	if (!(nt->parms.o_flags & GRE_SEQ))
+	dev->features		|= GRE6_FEATURES;
+	dev->hw_features	|= GRE6_FEATURES;
+
+	if (!(nt->parms.o_flags & TUNNEL_SEQ)) {
+		/* TCP segmentation offload is not supported when we
+		 * generate output sequences.
+		 */
+		dev->features    |= NETIF_F_GSO_SOFTWARE;
+		dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+
+		/* Can use a lockless transmit, unless we generate
+		 * output sequences
+		 */
 		dev->features |= NETIF_F_LLTX;
+	}
 
 	err = register_netdevice(dev);
 	if (err)
@@ -1609,8 +1394,6 @@
 		nla_total_size(sizeof(struct in6_addr)) +
 		/* IFLA_GRE_TTL */
 		nla_total_size(1) +
-		/* IFLA_GRE_TOS */
-		nla_total_size(1) +
 		/* IFLA_GRE_ENCAP_LIMIT */
 		nla_total_size(1) +
 		/* IFLA_GRE_FLOWINFO */
@@ -1626,14 +1409,15 @@
 	struct __ip6_tnl_parm *p = &t->parms;
 
 	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
-	    nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) ||
-	    nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
+	    nla_put_be16(skb, IFLA_GRE_IFLAGS,
+			 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
+	    nla_put_be16(skb, IFLA_GRE_OFLAGS,
+			 gre_tnl_flags_to_gre_flags(p->o_flags)) ||
 	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
 	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
 	    nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||
 	    nla_put_in6_addr(skb, IFLA_GRE_REMOTE, &p->raddr) ||
 	    nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
-	    /*nla_put_u8(skb, IFLA_GRE_TOS, t->priority) ||*/
 	    nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
 	    nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
 	    nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags))
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index c05c425..f185cbc 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -49,6 +49,13 @@
 
 int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
+	/* if ingress device is enslaved to an L3 master device pass the
+	 * skb to its handler for processing
+	 */
+	skb = l3mdev_ip6_rcv(skb);
+	if (!skb)
+		return NET_RX_SUCCESS;
+
 	if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
 		const struct inet6_protocol *ipprot;
 
@@ -78,11 +85,11 @@
 
 	idev = __in6_dev_get(skb->dev);
 
-	IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_IN, skb->len);
+	__IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_IN, skb->len);
 
 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
 	    !idev || unlikely(idev->cnf.disable_ipv6)) {
-		IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDISCARDS);
+		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
 		goto drop;
 	}
 
@@ -109,10 +116,10 @@
 	if (hdr->version != 6)
 		goto err;
 
-	IP6_ADD_STATS_BH(net, idev,
-			 IPSTATS_MIB_NOECTPKTS +
+	__IP6_ADD_STATS(net, idev,
+			IPSTATS_MIB_NOECTPKTS +
 				(ipv6_get_dsfield(hdr) & INET_ECN_MASK),
-			 max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
+			max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
 	/*
 	 * RFC4291 2.5.3
 	 * A packet received on an interface with a destination address
@@ -169,12 +176,12 @@
 	/* pkt_len may be zero if Jumbo payload option is present */
 	if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
 		if (pkt_len + sizeof(struct ipv6hdr) > skb->len) {
-			IP6_INC_STATS_BH(net,
-					 idev, IPSTATS_MIB_INTRUNCATEDPKTS);
+			__IP6_INC_STATS(net,
+					idev, IPSTATS_MIB_INTRUNCATEDPKTS);
 			goto drop;
 		}
 		if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) {
-			IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS);
+			__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
 			goto drop;
 		}
 		hdr = ipv6_hdr(skb);
@@ -182,7 +189,7 @@
 
 	if (hdr->nexthdr == NEXTHDR_HOP) {
 		if (ipv6_parse_hopopts(skb) < 0) {
-			IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS);
+			__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
 			rcu_read_unlock();
 			return NET_RX_DROP;
 		}
@@ -197,7 +204,7 @@
 		       net, NULL, skb, dev, NULL,
 		       ip6_rcv_finish);
 err:
-	IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS);
+	__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
 drop:
 	rcu_read_unlock();
 	kfree_skb(skb);
@@ -259,18 +266,18 @@
 		if (ret > 0)
 			goto resubmit;
 		else if (ret == 0)
-			IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
+			__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS);
 	} else {
 		if (!raw) {
 			if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
-				IP6_INC_STATS_BH(net, idev,
-						 IPSTATS_MIB_INUNKNOWNPROTOS);
+				__IP6_INC_STATS(net, idev,
+						IPSTATS_MIB_INUNKNOWNPROTOS);
 				icmpv6_send(skb, ICMPV6_PARAMPROB,
 					    ICMPV6_UNK_NEXTHDR, nhoff);
 			}
 			kfree_skb(skb);
 		} else {
-			IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
+			__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS);
 			consume_skb(skb);
 		}
 	}
@@ -278,7 +285,7 @@
 	return 0;
 
 discard:
-	IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDISCARDS);
+	__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
 	rcu_read_unlock();
 	kfree_skb(skb);
 	return 0;
@@ -297,7 +304,7 @@
 	const struct ipv6hdr *hdr;
 	bool deliver;
 
-	IP6_UPD_PO_STATS_BH(dev_net(skb_dst(skb)->dev),
+	__IP6_UPD_PO_STATS(dev_net(skb_dst(skb)->dev),
 			 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INMCAST,
 			 skb->len);
 
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 82e9f30..f5eb184 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -63,6 +63,7 @@
 	int proto;
 	struct frag_hdr *fptr;
 	unsigned int unfrag_ip6hlen;
+	unsigned int payload_len;
 	u8 *prevhdr;
 	int offset = 0;
 	bool encap, udpfrag;
@@ -73,6 +74,8 @@
 		       SKB_GSO_UDP |
 		       SKB_GSO_DODGY |
 		       SKB_GSO_TCP_ECN |
+		       SKB_GSO_TCP_FIXEDID |
+		       SKB_GSO_TCPV6 |
 		       SKB_GSO_GRE |
 		       SKB_GSO_GRE_CSUM |
 		       SKB_GSO_IPIP |
@@ -80,7 +83,7 @@
 		       SKB_GSO_UDP_TUNNEL |
 		       SKB_GSO_UDP_TUNNEL_CSUM |
 		       SKB_GSO_TUNNEL_REMCSUM |
-		       SKB_GSO_TCPV6 |
+		       SKB_GSO_PARTIAL |
 		       0)))
 		goto out;
 
@@ -117,7 +120,13 @@
 
 	for (skb = segs; skb; skb = skb->next) {
 		ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
-		ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h));
+		if (skb_is_gso(skb))
+			payload_len = skb_shinfo(skb)->gso_size +
+				      SKB_GSO_CB(skb)->data_offset +
+				      skb->head - (unsigned char *)(ipv6h + 1);
+		else
+			payload_len = skb->len - nhoff - sizeof(*ipv6h);
+		ipv6h->payload_len = htons(payload_len);
 		skb->network_header = (u8 *)ipv6h - skb->head;
 
 		if (udpfrag) {
@@ -239,10 +248,14 @@
 		NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
 		NAPI_GRO_CB(p)->flush |= flush;
 
-		/* Clear flush_id, there's really no concept of ID in IPv6. */
-		NAPI_GRO_CB(p)->flush_id = 0;
+		/* If the previous IP ID value was based on an atomic
+		 * datagram we can overwrite the value and ignore it.
+		 */
+		if (NAPI_GRO_CB(skb)->is_atomic)
+			NAPI_GRO_CB(p)->flush_id = 0;
 	}
 
+	NAPI_GRO_CB(skb)->is_atomic = true;
 	NAPI_GRO_CB(skb)->flush |= flush;
 
 	skb_gro_postpull_rcsum(skb, iph, nlen);
@@ -325,8 +338,6 @@
 
 	if (tcpv6_offload_init() < 0)
 		pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
-	if (udp_offload_init() < 0)
-		pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
 	if (ipv6_exthdrs_offload_init() < 0)
 		pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
 
diff --git a/net/ipv6/ip6_offload.h b/net/ipv6/ip6_offload.h
index 2e155c6..96b40e4 100644
--- a/net/ipv6/ip6_offload.h
+++ b/net/ipv6/ip6_offload.h
@@ -12,7 +12,8 @@
 #define __ip6_offload_h
 
 int ipv6_exthdrs_offload_init(void);
-int udp_offload_init(void);
+int udpv6_offload_init(void);
+int udpv6_offload_exit(void);
 int tcpv6_offload_init(void);
 
 #endif
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 9428345d..cbf127a 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -395,8 +395,8 @@
 		goto drop;
 
 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
-		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
-				 IPSTATS_MIB_INDISCARDS);
+		__IP6_INC_STATS(net, ip6_dst_idev(dst),
+				IPSTATS_MIB_INDISCARDS);
 		goto drop;
 	}
 
@@ -427,8 +427,8 @@
 		/* Force OUTPUT device used as source address */
 		skb->dev = dst->dev;
 		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
-		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
-				 IPSTATS_MIB_INHDRERRORS);
+		__IP6_INC_STATS(net, ip6_dst_idev(dst),
+				IPSTATS_MIB_INHDRERRORS);
 
 		kfree_skb(skb);
 		return -ETIMEDOUT;
@@ -441,15 +441,15 @@
 		if (proxied > 0)
 			return ip6_input(skb);
 		else if (proxied < 0) {
-			IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
-					 IPSTATS_MIB_INDISCARDS);
+			__IP6_INC_STATS(net, ip6_dst_idev(dst),
+					IPSTATS_MIB_INDISCARDS);
 			goto drop;
 		}
 	}
 
 	if (!xfrm6_route_forward(skb)) {
-		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
-				 IPSTATS_MIB_INDISCARDS);
+		__IP6_INC_STATS(net, ip6_dst_idev(dst),
+				IPSTATS_MIB_INDISCARDS);
 		goto drop;
 	}
 	dst = skb_dst(skb);
@@ -505,17 +505,17 @@
 		/* Again, force OUTPUT device used as source address */
 		skb->dev = dst->dev;
 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
-		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
-				 IPSTATS_MIB_INTOOBIGERRORS);
-		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
-				 IPSTATS_MIB_FRAGFAILS);
+		__IP6_INC_STATS(net, ip6_dst_idev(dst),
+				IPSTATS_MIB_INTOOBIGERRORS);
+		__IP6_INC_STATS(net, ip6_dst_idev(dst),
+				IPSTATS_MIB_FRAGFAILS);
 		kfree_skb(skb);
 		return -EMSGSIZE;
 	}
 
 	if (skb_cow(skb, dst->dev->hard_header_len)) {
-		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
-				 IPSTATS_MIB_OUTDISCARDS);
+		__IP6_INC_STATS(net, ip6_dst_idev(dst),
+				IPSTATS_MIB_OUTDISCARDS);
 		goto drop;
 	}
 
@@ -525,14 +525,14 @@
 
 	hdr->hop_limit--;
 
-	IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
-	IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
+	__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
+	__IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
 	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
 		       net, NULL, skb, skb->dev, dst->dev,
 		       ip6_forward_finish);
 
 error:
-	IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
+	__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
 drop:
 	kfree_skb(skb);
 	return -EINVAL;
@@ -1090,8 +1090,8 @@
 			int getfrag(void *from, char *to, int offset, int len,
 			int odd, struct sk_buff *skb),
 			void *from, int length, int hh_len, int fragheaderlen,
-			int transhdrlen, int mtu, unsigned int flags,
-			const struct flowi6 *fl6)
+			int exthdrlen, int transhdrlen, int mtu,
+			unsigned int flags, const struct flowi6 *fl6)
 
 {
 	struct sk_buff *skb;
@@ -1116,7 +1116,7 @@
 		skb_put(skb, fragheaderlen + transhdrlen);
 
 		/* initialize network header pointer */
-		skb_reset_network_header(skb);
+		skb_set_network_header(skb, exthdrlen);
 
 		/* initialize protocol header pointer */
 		skb->transport_header = skb->network_header + fragheaderlen;
@@ -1182,12 +1182,12 @@
 }
 
 static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
-			  struct inet6_cork *v6_cork,
-			  int hlimit, int tclass, struct ipv6_txoptions *opt,
+			  struct inet6_cork *v6_cork, struct ipcm6_cookie *ipc6,
 			  struct rt6_info *rt, struct flowi6 *fl6)
 {
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	unsigned int mtu;
+	struct ipv6_txoptions *opt = ipc6->opt;
 
 	/*
 	 * setup for corking
@@ -1229,8 +1229,8 @@
 	dst_hold(&rt->dst);
 	cork->base.dst = &rt->dst;
 	cork->fl.u.ip6 = *fl6;
-	v6_cork->hop_limit = hlimit;
-	v6_cork->tclass = tclass;
+	v6_cork->hop_limit = ipc6->hlimit;
+	v6_cork->tclass = ipc6->tclass;
 	if (rt->dst.flags & DST_XFRM_TUNNEL)
 		mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
 		      rt->dst.dev->mtu : dst_mtu(&rt->dst);
@@ -1258,7 +1258,8 @@
 			     int getfrag(void *from, char *to, int offset,
 					 int len, int odd, struct sk_buff *skb),
 			     void *from, int length, int transhdrlen,
-			     unsigned int flags, int dontfrag)
+			     unsigned int flags, struct ipcm6_cookie *ipc6,
+			     const struct sockcm_cookie *sockc)
 {
 	struct sk_buff *skb, *skb_prev = NULL;
 	unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
@@ -1297,7 +1298,7 @@
 		      sizeof(struct frag_hdr) : 0) +
 		     rt->rt6i_nfheader_len;
 
-	if (cork->length + length > mtu - headersize && dontfrag &&
+	if (cork->length + length > mtu - headersize && ipc6->dontfrag &&
 	    (sk->sk_protocol == IPPROTO_UDP ||
 	     sk->sk_protocol == IPPROTO_RAW)) {
 		ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
@@ -1329,7 +1330,7 @@
 		csummode = CHECKSUM_PARTIAL;
 
 	if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) {
-		sock_tx_timestamp(sk, &tx_flags);
+		sock_tx_timestamp(sk, sockc->tsflags, &tx_flags);
 		if (tx_flags & SKBTX_ANY_SW_TSTAMP &&
 		    sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
 			tskey = sk->sk_tskey++;
@@ -1358,7 +1359,7 @@
 	    (rt->dst.dev->features & NETIF_F_UFO) &&
 	    (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
 		err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
-					  hh_len, fragheaderlen,
+					  hh_len, fragheaderlen, exthdrlen,
 					  transhdrlen, mtu, flags, fl6);
 		if (err)
 			goto error;
@@ -1563,9 +1564,10 @@
 int ip6_append_data(struct sock *sk,
 		    int getfrag(void *from, char *to, int offset, int len,
 				int odd, struct sk_buff *skb),
-		    void *from, int length, int transhdrlen, int hlimit,
-		    int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
-		    struct rt6_info *rt, unsigned int flags, int dontfrag)
+		    void *from, int length, int transhdrlen,
+		    struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
+		    struct rt6_info *rt, unsigned int flags,
+		    const struct sockcm_cookie *sockc)
 {
 	struct inet_sock *inet = inet_sk(sk);
 	struct ipv6_pinfo *np = inet6_sk(sk);
@@ -1578,12 +1580,12 @@
 		/*
 		 * setup for corking
 		 */
-		err = ip6_setup_cork(sk, &inet->cork, &np->cork, hlimit,
-				     tclass, opt, rt, fl6);
+		err = ip6_setup_cork(sk, &inet->cork, &np->cork,
+				     ipc6, rt, fl6);
 		if (err)
 			return err;
 
-		exthdrlen = (opt ? opt->opt_flen : 0);
+		exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0);
 		length += exthdrlen;
 		transhdrlen += exthdrlen;
 	} else {
@@ -1593,7 +1595,7 @@
 
 	return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base,
 				 &np->cork, sk_page_frag(sk), getfrag,
-				 from, length, transhdrlen, flags, dontfrag);
+				 from, length, transhdrlen, flags, ipc6, sockc);
 }
 EXPORT_SYMBOL_GPL(ip6_append_data);
 
@@ -1749,15 +1751,14 @@
 			     int getfrag(void *from, char *to, int offset,
 					 int len, int odd, struct sk_buff *skb),
 			     void *from, int length, int transhdrlen,
-			     int hlimit, int tclass,
-			     struct ipv6_txoptions *opt, struct flowi6 *fl6,
+			     struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
 			     struct rt6_info *rt, unsigned int flags,
-			     int dontfrag)
+			     const struct sockcm_cookie *sockc)
 {
 	struct inet_cork_full cork;
 	struct inet6_cork v6_cork;
 	struct sk_buff_head queue;
-	int exthdrlen = (opt ? opt->opt_flen : 0);
+	int exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0);
 	int err;
 
 	if (flags & MSG_PROBE)
@@ -1769,17 +1770,17 @@
 	cork.base.addr = 0;
 	cork.base.opt = NULL;
 	v6_cork.opt = NULL;
-	err = ip6_setup_cork(sk, &cork, &v6_cork, hlimit, tclass, opt, rt, fl6);
+	err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6);
 	if (err)
 		return ERR_PTR(err);
 
-	if (dontfrag < 0)
-		dontfrag = inet6_sk(sk)->dontfrag;
+	if (ipc6->dontfrag < 0)
+		ipc6->dontfrag = inet6_sk(sk)->dontfrag;
 
 	err = __ip6_append_data(sk, fl6, &queue, &cork.base, &v6_cork,
 				&current->task_frag, getfrag, from,
 				length + exthdrlen, transhdrlen + exthdrlen,
-				flags, dontfrag);
+				flags, ipc6, sockc);
 	if (err) {
 		__ip6_flush_pending_frames(sk, &queue, &cork, &v6_cork);
 		return ERR_PTR(err);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index eb2ac4b..e79330f 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -238,6 +238,7 @@
 {
 	struct ip6_tnl *t = netdev_priv(dev);
 
+	gro_cells_destroy(&t->gro_cells);
 	dst_cache_destroy(&t->dst_cache);
 	free_percpu(dev->tstats);
 	free_netdev(dev);
@@ -252,12 +253,12 @@
 
 	t = netdev_priv(dev);
 
+	dev->rtnl_link_ops = &ip6_link_ops;
 	err = register_netdevice(dev);
 	if (err < 0)
 		goto out;
 
 	strcpy(t->parms.name, dev->name);
-	dev->rtnl_link_ops = &ip6_link_ops;
 
 	dev_hold(dev);
 	ip6_tnl_link(ip6n, t);
@@ -753,97 +754,157 @@
 }
 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
 
-/**
- * ip6_tnl_rcv - decapsulate IPv6 packet and retransmit it locally
- *   @skb: received socket buffer
- *   @protocol: ethernet protocol ID
- *   @dscp_ecn_decapsulate: the function to decapsulate DSCP code and ECN
- *
- * Return: 0
- **/
+static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
+			 const struct tnl_ptk_info *tpi,
+			 struct metadata_dst *tun_dst,
+			 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
+						const struct ipv6hdr *ipv6h,
+						struct sk_buff *skb),
+			 bool log_ecn_err)
+{
+	struct pcpu_sw_netstats *tstats;
+	const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+	int err;
 
-static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
-		       __u8 ipproto,
-		       int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
-						   const struct ipv6hdr *ipv6h,
-						   struct sk_buff *skb))
+	if ((!(tpi->flags & TUNNEL_CSUM) &&
+	     (tunnel->parms.i_flags & TUNNEL_CSUM)) ||
+	    ((tpi->flags & TUNNEL_CSUM) &&
+	     !(tunnel->parms.i_flags & TUNNEL_CSUM))) {
+		tunnel->dev->stats.rx_crc_errors++;
+		tunnel->dev->stats.rx_errors++;
+		goto drop;
+	}
+
+	if (tunnel->parms.i_flags & TUNNEL_SEQ) {
+		if (!(tpi->flags & TUNNEL_SEQ) ||
+		    (tunnel->i_seqno &&
+		     (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
+			tunnel->dev->stats.rx_fifo_errors++;
+			tunnel->dev->stats.rx_errors++;
+			goto drop;
+		}
+		tunnel->i_seqno = ntohl(tpi->seq) + 1;
+	}
+
+	skb->protocol = tpi->proto;
+
+	/* Warning: All skb pointers will be invalidated! */
+	if (tunnel->dev->type == ARPHRD_ETHER) {
+		if (!pskb_may_pull(skb, ETH_HLEN)) {
+			tunnel->dev->stats.rx_length_errors++;
+			tunnel->dev->stats.rx_errors++;
+			goto drop;
+		}
+
+		ipv6h = ipv6_hdr(skb);
+		skb->protocol = eth_type_trans(skb, tunnel->dev);
+		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+	} else {
+		skb->dev = tunnel->dev;
+	}
+
+	skb_reset_network_header(skb);
+	memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
+
+	__skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
+
+	err = dscp_ecn_decapsulate(tunnel, ipv6h, skb);
+	if (unlikely(err)) {
+		if (log_ecn_err)
+			net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n",
+					     &ipv6h->saddr,
+					     ipv6_get_dsfield(ipv6h));
+		if (err > 1) {
+			++tunnel->dev->stats.rx_frame_errors;
+			++tunnel->dev->stats.rx_errors;
+			goto drop;
+		}
+	}
+
+	tstats = this_cpu_ptr(tunnel->dev->tstats);
+	u64_stats_update_begin(&tstats->syncp);
+	tstats->rx_packets++;
+	tstats->rx_bytes += skb->len;
+	u64_stats_update_end(&tstats->syncp);
+
+	skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
+
+	gro_cells_receive(&tunnel->gro_cells, skb);
+	return 0;
+
+drop:
+	kfree_skb(skb);
+	return 0;
+}
+
+int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
+		const struct tnl_ptk_info *tpi,
+		struct metadata_dst *tun_dst,
+		bool log_ecn_err)
+{
+	return __ip6_tnl_rcv(t, skb, tpi, NULL, ip6ip6_dscp_ecn_decapsulate,
+			     log_ecn_err);
+}
+EXPORT_SYMBOL(ip6_tnl_rcv);
+
+static const struct tnl_ptk_info tpi_v6 = {
+	/* no tunnel info required for ipxip6. */
+	.proto = htons(ETH_P_IPV6),
+};
+
+static const struct tnl_ptk_info tpi_v4 = {
+	/* no tunnel info required for ipxip6. */
+	.proto = htons(ETH_P_IP),
+};
+
+static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
+		      const struct tnl_ptk_info *tpi,
+		      int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
+						  const struct ipv6hdr *ipv6h,
+						  struct sk_buff *skb))
 {
 	struct ip6_tnl *t;
 	const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
-	u8 tproto;
-	int err;
+	int ret = -1;
 
 	rcu_read_lock();
 	t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
+
 	if (t) {
-		struct pcpu_sw_netstats *tstats;
+		u8 tproto = ACCESS_ONCE(t->parms.proto);
 
-		tproto = ACCESS_ONCE(t->parms.proto);
-		if (tproto != ipproto && tproto != 0) {
-			rcu_read_unlock();
-			goto discard;
-		}
-
-		if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
-			rcu_read_unlock();
-			goto discard;
-		}
-
-		if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) {
-			t->dev->stats.rx_dropped++;
-			rcu_read_unlock();
-			goto discard;
-		}
-		skb->mac_header = skb->network_header;
-		skb_reset_network_header(skb);
-		skb->protocol = htons(protocol);
-		memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
-
-		__skb_tunnel_rx(skb, t->dev, t->net);
-
-		err = dscp_ecn_decapsulate(t, ipv6h, skb);
-		if (unlikely(err)) {
-			if (log_ecn_error)
-				net_info_ratelimited("non-ECT from %pI6 with dsfield=%#x\n",
-						     &ipv6h->saddr,
-						     ipv6_get_dsfield(ipv6h));
-			if (err > 1) {
-				++t->dev->stats.rx_frame_errors;
-				++t->dev->stats.rx_errors;
-				rcu_read_unlock();
-				goto discard;
-			}
-		}
-
-		tstats = this_cpu_ptr(t->dev->tstats);
-		u64_stats_update_begin(&tstats->syncp);
-		tstats->rx_packets++;
-		tstats->rx_bytes += skb->len;
-		u64_stats_update_end(&tstats->syncp);
-
-		netif_rx(skb);
-
-		rcu_read_unlock();
-		return 0;
+		if (tproto != ipproto && tproto != 0)
+			goto drop;
+		if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
+			goto drop;
+		if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
+			goto drop;
+		if (iptunnel_pull_header(skb, 0, tpi->proto, false))
+			goto drop;
+		ret = __ip6_tnl_rcv(t, skb, tpi, NULL, dscp_ecn_decapsulate,
+				    log_ecn_error);
 	}
-	rcu_read_unlock();
-	return 1;
 
-discard:
+	rcu_read_unlock();
+
+	return ret;
+
+drop:
+	rcu_read_unlock();
 	kfree_skb(skb);
 	return 0;
 }
 
 static int ip4ip6_rcv(struct sk_buff *skb)
 {
-	return ip6_tnl_rcv(skb, ETH_P_IP, IPPROTO_IPIP,
-			   ip4ip6_dscp_ecn_decapsulate);
+	return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4,
+			  ip4ip6_dscp_ecn_decapsulate);
 }
 
 static int ip6ip6_rcv(struct sk_buff *skb)
 {
-	return ip6_tnl_rcv(skb, ETH_P_IPV6, IPPROTO_IPV6,
-			   ip6ip6_dscp_ecn_decapsulate);
+	return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6,
+			  ip6ip6_dscp_ecn_decapsulate);
 }
 
 struct ipv6_tel_txoption {
@@ -918,13 +979,14 @@
 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
 
 /**
- * ip6_tnl_xmit2 - encapsulate packet and send
+ * ip6_tnl_xmit - encapsulate packet and send
  *   @skb: the outgoing socket buffer
  *   @dev: the outgoing tunnel device
  *   @dsfield: dscp code for outer header
- *   @fl: flow of tunneled packet
+ *   @fl6: flow of tunneled packet
  *   @encap_limit: encapsulation limit
  *   @pmtu: Path MTU is stored if packet is too big
+ *   @proto: next header value
  *
  * Description:
  *   Build new header and do some sanity checks on the packet before sending
@@ -936,12 +998,9 @@
  *   %-EMSGSIZE message too big. return mtu in this case.
  **/
 
-static int ip6_tnl_xmit2(struct sk_buff *skb,
-			 struct net_device *dev,
-			 __u8 dsfield,
-			 struct flowi6 *fl6,
-			 int encap_limit,
-			 __u32 *pmtu)
+int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
+		 struct flowi6 *fl6, int encap_limit, __u32 *pmtu,
+		 __u8 proto)
 {
 	struct ip6_tnl *t = netdev_priv(dev);
 	struct net *net = t->net;
@@ -952,7 +1011,6 @@
 	struct net_device *tdev;
 	int mtu;
 	unsigned int max_headroom = sizeof(struct ipv6hdr);
-	u8 proto;
 	int err = -1;
 
 	/* NBMA tunnel */
@@ -1014,12 +1072,23 @@
 		mtu = IPV6_MIN_MTU;
 	if (skb_dst(skb))
 		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
-	if (skb->len > mtu) {
+	if (skb->len > mtu && !skb_is_gso(skb)) {
 		*pmtu = mtu;
 		err = -EMSGSIZE;
 		goto tx_err_dst_release;
 	}
 
+	if (t->err_count > 0) {
+		if (time_before(jiffies,
+				t->err_time + IP6TUNNEL_ERR_TIMEO)) {
+			t->err_count--;
+
+			dst_link_failure(skb);
+		} else {
+			t->err_count = 0;
+		}
+	}
+
 	skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
 
 	/*
@@ -1045,9 +1114,6 @@
 		dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
 	skb_dst_set(skb, dst);
 
-	skb->transport_header = skb->network_header;
-
-	proto = fl6->flowi6_proto;
 	if (encap_limit >= 0) {
 		init_tel_txopt(&opt, encap_limit);
 		ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
@@ -1058,6 +1124,11 @@
 		skb->encapsulation = 1;
 	}
 
+	max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
+			+ dst->header_len;
+	if (max_headroom > dev->needed_headroom)
+		dev->needed_headroom = max_headroom;
+
 	skb_push(skb, sizeof(struct ipv6hdr));
 	skb_reset_network_header(skb);
 	ipv6h = ipv6_hdr(skb);
@@ -1076,6 +1147,7 @@
 	dst_release(dst);
 	return err;
 }
+EXPORT_SYMBOL(ip6_tnl_xmit);
 
 static inline int
 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -1099,7 +1171,6 @@
 		encap_limit = t->parms.encap_limit;
 
 	memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
-	fl6.flowi6_proto = IPPROTO_IPIP;
 
 	dsfield = ipv4_get_dsfield(iph);
 
@@ -1109,7 +1180,8 @@
 	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
 		fl6.flowi6_mark = skb->mark;
 
-	err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
+	err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
+			   IPPROTO_IPIP);
 	if (err != 0) {
 		/* XXX: send ICMP error even if DF is not set. */
 		if (err == -EMSGSIZE)
@@ -1153,7 +1225,6 @@
 		encap_limit = t->parms.encap_limit;
 
 	memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
-	fl6.flowi6_proto = IPPROTO_IPV6;
 
 	dsfield = ipv6_get_dsfield(ipv6h);
 	if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
@@ -1163,7 +1234,8 @@
 	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
 		fl6.flowi6_mark = skb->mark;
 
-	err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
+	err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
+			   IPPROTO_IPV6);
 	if (err != 0) {
 		if (err == -EMSGSIZE)
 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
@@ -1174,7 +1246,7 @@
 }
 
 static netdev_tx_t
-ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct ip6_tnl *t = netdev_priv(dev);
 	struct net_device_stats *stats = &t->dev->stats;
@@ -1370,6 +1442,8 @@
 	struct net *net = t->net;
 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
 
+	memset(&p1, 0, sizeof(p1));
+
 	switch (cmd) {
 	case SIOCGETTUNNEL:
 		if (dev == ip6n->fb_tnl_dev) {
@@ -1464,8 +1538,7 @@
  *   %-EINVAL if mtu too small
  **/
 
-static int
-ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
+int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
 {
 	struct ip6_tnl *tnl = netdev_priv(dev);
 
@@ -1481,6 +1554,7 @@
 	dev->mtu = new_mtu;
 	return 0;
 }
+EXPORT_SYMBOL(ip6_tnl_change_mtu);
 
 int ip6_tnl_get_iflink(const struct net_device *dev)
 {
@@ -1493,7 +1567,7 @@
 static const struct net_device_ops ip6_tnl_netdev_ops = {
 	.ndo_init	= ip6_tnl_dev_init,
 	.ndo_uninit	= ip6_tnl_dev_uninit,
-	.ndo_start_xmit = ip6_tnl_xmit,
+	.ndo_start_xmit = ip6_tnl_start_xmit,
 	.ndo_do_ioctl	= ip6_tnl_ioctl,
 	.ndo_change_mtu = ip6_tnl_change_mtu,
 	.ndo_get_stats	= ip6_get_stats,
@@ -1549,13 +1623,25 @@
 		return -ENOMEM;
 
 	ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
-	if (ret) {
-		free_percpu(dev->tstats);
-		dev->tstats = NULL;
-		return ret;
-	}
+	if (ret)
+		goto free_stats;
+
+	ret = gro_cells_init(&t->gro_cells, dev);
+	if (ret)
+		goto destroy_dst;
+
+	t->hlen = 0;
+	t->tun_hlen = 0;
 
 	return 0;
+
+destroy_dst:
+	dst_cache_destroy(&t->dst_cache);
+free_stats:
+	free_percpu(dev->tstats);
+	dev->tstats = NULL;
+
+	return ret;
 }
 
 /**
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index a10e771..f2e2013f8 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1984,10 +1984,10 @@
 
 static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-	IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-			 IPSTATS_MIB_OUTFORWDATAGRAMS);
-	IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-			 IPSTATS_MIB_OUTOCTETS, skb->len);
+	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+			IPSTATS_MIB_OUTFORWDATAGRAMS);
+	__IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
+			IPSTATS_MIB_OUTOCTETS, skb->len);
 	return dst_output(net, sk, skb);
 }
 
@@ -2268,7 +2268,7 @@
 	mfcs.mfcs_packets = c->mfc_un.res.pkt;
 	mfcs.mfcs_bytes = c->mfc_un.res.bytes;
 	mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
-	if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0)
+	if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) < 0)
 		return -EMSGSIZE;
 
 	rtm->rtm_type = RTN_MULTICAST;
@@ -2411,7 +2411,7 @@
 		      + nla_total_size(0)	/* RTA_MULTIPATH */
 		      + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
 						/* RTA_MFC_STATS */
-		      + nla_total_size(sizeof(struct rta_mfc_stats))
+		      + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
 		;
 
 	return len;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 4449ad1..a9895e1 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -407,7 +407,8 @@
 		if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
 			break;
 
-		opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
+		opt = rcu_dereference_protected(np->opt,
+						lockdep_sock_is_held(sk));
 		opt = ipv6_renew_options(sk, opt, optname,
 					 (struct ipv6_opt_hdr __user *)optval,
 					 optlen);
@@ -471,7 +472,8 @@
 		struct ipv6_txoptions *opt = NULL;
 		struct msghdr msg;
 		struct flowi6 fl6;
-		int junk;
+		struct sockcm_cookie sockc_junk;
+		struct ipcm6_cookie ipc6;
 
 		memset(&fl6, 0, sizeof(fl6));
 		fl6.flowi6_oif = sk->sk_bound_dev_if;
@@ -501,9 +503,9 @@
 
 		msg.msg_controllen = optlen;
 		msg.msg_control = (void *)(opt+1);
+		ipc6.opt = opt;
 
-		retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk,
-					     &junk, &junk);
+		retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, &ipc6, &sockc_junk);
 		if (retv)
 			goto done;
 update:
@@ -1123,7 +1125,8 @@
 		struct ipv6_txoptions *opt;
 
 		lock_sock(sk);
-		opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
+		opt = rcu_dereference_protected(np->opt,
+						lockdep_sock_is_held(sk));
 		len = ipv6_getsockopt_sticky(sk, opt, optname, optval, len);
 		release_sock(sk);
 		/* check if ipv6_getsockopt_sticky() returns err code */
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 84f9baf..63e06c3 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -39,34 +39,12 @@
 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
 MODULE_DESCRIPTION("IPv6 packet filter");
 
-/*#define DEBUG_IP_FIREWALL*/
-/*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
-/*#define DEBUG_IP_FIREWALL_USER*/
-
-#ifdef DEBUG_IP_FIREWALL
-#define dprintf(format, args...) pr_info(format , ## args)
-#else
-#define dprintf(format, args...)
-#endif
-
-#ifdef DEBUG_IP_FIREWALL_USER
-#define duprintf(format, args...) pr_info(format , ## args)
-#else
-#define duprintf(format, args...)
-#endif
-
 #ifdef CONFIG_NETFILTER_DEBUG
 #define IP_NF_ASSERT(x)	WARN_ON(!(x))
 #else
 #define IP_NF_ASSERT(x)
 #endif
 
-#if 0
-/* All the better to debug you with... */
-#define static
-#define inline
-#endif
-
 void *ip6t_alloc_initial_table(const struct xt_table *info)
 {
 	return xt_alloc_initial_table(ip6t, IP6T);
@@ -100,35 +78,18 @@
 	if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
 				       &ip6info->src), IP6T_INV_SRCIP) ||
 	    FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
-				       &ip6info->dst), IP6T_INV_DSTIP)) {
-		dprintf("Source or dest mismatch.\n");
-/*
-		dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
-			ipinfo->smsk.s_addr, ipinfo->src.s_addr,
-			ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
-		dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
-			ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
-			ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
+				       &ip6info->dst), IP6T_INV_DSTIP))
 		return false;
-	}
 
 	ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
 
-	if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
-		dprintf("VIA in mismatch (%s vs %s).%s\n",
-			indev, ip6info->iniface,
-			ip6info->invflags & IP6T_INV_VIA_IN ? " (INV)" : "");
+	if (FWINV(ret != 0, IP6T_INV_VIA_IN))
 		return false;
-	}
 
 	ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
 
-	if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
-		dprintf("VIA out mismatch (%s vs %s).%s\n",
-			outdev, ip6info->outiface,
-			ip6info->invflags & IP6T_INV_VIA_OUT ? " (INV)" : "");
+	if (FWINV(ret != 0, IP6T_INV_VIA_OUT))
 		return false;
-	}
 
 /* ... might want to do something with class and flowlabel here ... */
 
@@ -145,11 +106,6 @@
 		}
 		*fragoff = _frag_off;
 
-		dprintf("Packet protocol %hi ?= %s%hi.\n",
-				protohdr,
-				ip6info->invflags & IP6T_INV_PROTO ? "!":"",
-				ip6info->proto);
-
 		if (ip6info->proto == protohdr) {
 			if (ip6info->invflags & IP6T_INV_PROTO)
 				return false;
@@ -169,16 +125,11 @@
 static bool
 ip6_checkentry(const struct ip6t_ip6 *ipv6)
 {
-	if (ipv6->flags & ~IP6T_F_MASK) {
-		duprintf("Unknown flag bits set: %08X\n",
-			 ipv6->flags & ~IP6T_F_MASK);
+	if (ipv6->flags & ~IP6T_F_MASK)
 		return false;
-	}
-	if (ipv6->invflags & ~IP6T_INV_MASK) {
-		duprintf("Unknown invflag bits set: %08X\n",
-			 ipv6->invflags & ~IP6T_INV_MASK);
+	if (ipv6->invflags & ~IP6T_INV_MASK)
 		return false;
-	}
+
 	return true;
 }
 
@@ -198,11 +149,12 @@
 
 /* All zeroes == unconditional rule. */
 /* Mildly perf critical (only if packet tracing is on) */
-static inline bool unconditional(const struct ip6t_ip6 *ipv6)
+static inline bool unconditional(const struct ip6t_entry *e)
 {
 	static const struct ip6t_ip6 uncond;
 
-	return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
+	return e->target_offset == sizeof(struct ip6t_entry) &&
+	       memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
 }
 
 static inline const struct xt_entry_target *
@@ -258,11 +210,10 @@
 	} else if (s == e) {
 		(*rulenum)++;
 
-		if (s->target_offset == sizeof(struct ip6t_entry) &&
+		if (unconditional(s) &&
 		    strcmp(t->target.u.kernel.target->name,
 			   XT_STANDARD_TARGET) == 0 &&
-		    t->verdict < 0 &&
-		    unconditional(&s->ipv6)) {
+		    t->verdict < 0) {
 			/* Tail of chains: STANDARD target (return/policy) */
 			*comment = *chainname == hookname
 				? comments[NF_IP6_TRACE_COMMENT_POLICY]
@@ -446,13 +397,21 @@
 	xt_write_recseq_end(addend);
 	local_bh_enable();
 
-#ifdef DEBUG_ALLOW_ALL
-	return NF_ACCEPT;
-#else
 	if (acpar.hotdrop)
 		return NF_DROP;
 	else return verdict;
-#endif
+}
+
+static bool find_jump_target(const struct xt_table_info *t,
+			     const struct ip6t_entry *target)
+{
+	struct ip6t_entry *iter;
+
+	xt_entry_foreach(iter, t->entries, t->size) {
+		 if (iter == target)
+			return true;
+	}
+	return false;
 }
 
 /* Figures out from what hook each rule can be called: returns 0 if
@@ -480,43 +439,27 @@
 				= (void *)ip6t_get_target_c(e);
 			int visited = e->comefrom & (1 << hook);
 
-			if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
-				pr_err("iptables: loop hook %u pos %u %08X.\n",
-				       hook, pos, e->comefrom);
+			if (e->comefrom & (1 << NF_INET_NUMHOOKS))
 				return 0;
-			}
+
 			e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
 
 			/* Unconditional return/END. */
-			if ((e->target_offset == sizeof(struct ip6t_entry) &&
+			if ((unconditional(e) &&
 			     (strcmp(t->target.u.user.name,
 				     XT_STANDARD_TARGET) == 0) &&
-			     t->verdict < 0 &&
-			     unconditional(&e->ipv6)) || visited) {
+			     t->verdict < 0) || visited) {
 				unsigned int oldpos, size;
 
 				if ((strcmp(t->target.u.user.name,
 					    XT_STANDARD_TARGET) == 0) &&
-				    t->verdict < -NF_MAX_VERDICT - 1) {
-					duprintf("mark_source_chains: bad "
-						"negative verdict (%i)\n",
-								t->verdict);
+				    t->verdict < -NF_MAX_VERDICT - 1)
 					return 0;
-				}
 
 				/* Return: backtrack through the last
 				   big jump. */
 				do {
 					e->comefrom ^= (1<<NF_INET_NUMHOOKS);
-#ifdef DEBUG_IP_FIREWALL_USER
-					if (e->comefrom
-					    & (1 << NF_INET_NUMHOOKS)) {
-						duprintf("Back unset "
-							 "on hook %u "
-							 "rule %u\n",
-							 hook, pos);
-					}
-#endif
 					oldpos = pos;
 					pos = e->counters.pcnt;
 					e->counters.pcnt = 0;
@@ -533,6 +476,8 @@
 				size = e->next_offset;
 				e = (struct ip6t_entry *)
 					(entry0 + pos + size);
+				if (pos + size >= newinfo->size)
+					return 0;
 				e->counters.pcnt = pos;
 				pos += size;
 			} else {
@@ -541,19 +486,16 @@
 				if (strcmp(t->target.u.user.name,
 					   XT_STANDARD_TARGET) == 0 &&
 				    newpos >= 0) {
-					if (newpos > newinfo->size -
-						sizeof(struct ip6t_entry)) {
-						duprintf("mark_source_chains: "
-							"bad verdict (%i)\n",
-								newpos);
-						return 0;
-					}
 					/* This a jump; chase it. */
-					duprintf("Jump rule %u -> %u\n",
-						 pos, newpos);
+					e = (struct ip6t_entry *)
+						(entry0 + newpos);
+					if (!find_jump_target(newinfo, e))
+						return 0;
 				} else {
 					/* ... this is a fallthru */
 					newpos = pos + e->next_offset;
+					if (newpos >= newinfo->size)
+						return 0;
 				}
 				e = (struct ip6t_entry *)
 					(entry0 + newpos);
@@ -561,8 +503,7 @@
 				pos = newpos;
 			}
 		}
-next:
-		duprintf("Finished chain %u\n", hook);
+next:		;
 	}
 	return 1;
 }
@@ -580,43 +521,15 @@
 	module_put(par.match->me);
 }
 
-static int
-check_entry(const struct ip6t_entry *e, const char *name)
-{
-	const struct xt_entry_target *t;
-
-	if (!ip6_checkentry(&e->ipv6)) {
-		duprintf("ip_tables: ip check failed %p %s.\n", e, name);
-		return -EINVAL;
-	}
-
-	if (e->target_offset + sizeof(struct xt_entry_target) >
-	    e->next_offset)
-		return -EINVAL;
-
-	t = ip6t_get_target_c(e);
-	if (e->target_offset + t->u.target_size > e->next_offset)
-		return -EINVAL;
-
-	return 0;
-}
-
 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
 {
 	const struct ip6t_ip6 *ipv6 = par->entryinfo;
-	int ret;
 
 	par->match     = m->u.kernel.match;
 	par->matchinfo = m->data;
 
-	ret = xt_check_match(par, m->u.match_size - sizeof(*m),
-			     ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
-	if (ret < 0) {
-		duprintf("ip_tables: check failed for `%s'.\n",
-			 par.match->name);
-		return ret;
-	}
-	return 0;
+	return xt_check_match(par, m->u.match_size - sizeof(*m),
+			      ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
 }
 
 static int
@@ -627,10 +540,9 @@
 
 	match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
 				      m->u.user.revision);
-	if (IS_ERR(match)) {
-		duprintf("find_check_match: `%s' not found\n", m->u.user.name);
+	if (IS_ERR(match))
 		return PTR_ERR(match);
-	}
+
 	m->u.kernel.match = match;
 
 	ret = check_match(m, par);
@@ -655,17 +567,11 @@
 		.hook_mask = e->comefrom,
 		.family    = NFPROTO_IPV6,
 	};
-	int ret;
 
 	t = ip6t_get_target(e);
-	ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
-	      e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
-	if (ret < 0) {
-		duprintf("ip_tables: check failed for `%s'.\n",
-			 t->u.kernel.target->name);
-		return ret;
-	}
-	return 0;
+	return xt_check_target(&par, t->u.target_size - sizeof(*t),
+			       e->ipv6.proto,
+			       e->ipv6.invflags & IP6T_INV_PROTO);
 }
 
 static int
@@ -678,14 +584,12 @@
 	unsigned int j;
 	struct xt_mtchk_param mtpar;
 	struct xt_entry_match *ematch;
+	unsigned long pcnt;
 
-	ret = check_entry(e, name);
-	if (ret)
-		return ret;
-
-	e->counters.pcnt = xt_percpu_counter_alloc();
-	if (IS_ERR_VALUE(e->counters.pcnt))
+	pcnt = xt_percpu_counter_alloc();
+	if (IS_ERR_VALUE(pcnt))
 		return -ENOMEM;
+	e->counters.pcnt = pcnt;
 
 	j = 0;
 	mtpar.net	= net;
@@ -704,7 +608,6 @@
 	target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
 					t->u.user.revision);
 	if (IS_ERR(target)) {
-		duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
 		ret = PTR_ERR(target);
 		goto cleanup_matches;
 	}
@@ -733,7 +636,7 @@
 	const struct xt_entry_target *t;
 	unsigned int verdict;
 
-	if (!unconditional(&e->ipv6))
+	if (!unconditional(e))
 		return false;
 	t = ip6t_get_target_c(e);
 	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
@@ -753,19 +656,24 @@
 			   unsigned int valid_hooks)
 {
 	unsigned int h;
+	int err;
 
 	if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
-	    (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
-		duprintf("Bad offset %p\n", e);
+	    (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
+	    (unsigned char *)e + e->next_offset > limit)
 		return -EINVAL;
-	}
 
 	if (e->next_offset
-	    < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
-		duprintf("checking: element %p size %u\n",
-			 e, e->next_offset);
+	    < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target))
 		return -EINVAL;
-	}
+
+	if (!ip6_checkentry(&e->ipv6))
+		return -EINVAL;
+
+	err = xt_check_entry_offsets(e, e->elems, e->target_offset,
+				     e->next_offset);
+	if (err)
+		return err;
 
 	/* Check hooks & underflows */
 	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
@@ -774,12 +682,9 @@
 		if ((unsigned char *)e - base == hook_entries[h])
 			newinfo->hook_entry[h] = hook_entries[h];
 		if ((unsigned char *)e - base == underflows[h]) {
-			if (!check_underflow(e)) {
-				pr_err("Underflows must be unconditional and "
-				       "use the STANDARD target with "
-				       "ACCEPT/DROP\n");
+			if (!check_underflow(e))
 				return -EINVAL;
-			}
+
 			newinfo->underflow[h] = underflows[h];
 		}
 	}
@@ -831,7 +736,6 @@
 		newinfo->underflow[i] = 0xFFFFFFFF;
 	}
 
-	duprintf("translate_table: size %u\n", newinfo->size);
 	i = 0;
 	/* Walk through entries, checking offsets. */
 	xt_entry_foreach(iter, entry0, newinfo->size) {
@@ -848,27 +752,18 @@
 			++newinfo->stacksize;
 	}
 
-	if (i != repl->num_entries) {
-		duprintf("translate_table: %u not %u entries\n",
-			 i, repl->num_entries);
+	if (i != repl->num_entries)
 		return -EINVAL;
-	}
 
 	/* Check hooks all assigned */
 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
 		/* Only hooks which are valid */
 		if (!(repl->valid_hooks & (1 << i)))
 			continue;
-		if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
-			duprintf("Invalid hook entry %u %u\n",
-				 i, repl->hook_entry[i]);
+		if (newinfo->hook_entry[i] == 0xFFFFFFFF)
 			return -EINVAL;
-		}
-		if (newinfo->underflow[i] == 0xFFFFFFFF) {
-			duprintf("Invalid underflow %u %u\n",
-				 i, repl->underflow[i]);
+		if (newinfo->underflow[i] == 0xFFFFFFFF)
 			return -EINVAL;
-		}
 	}
 
 	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
@@ -1096,11 +991,8 @@
 	struct xt_table *t;
 	int ret;
 
-	if (*len != sizeof(struct ip6t_getinfo)) {
-		duprintf("length %u != %zu\n", *len,
-			 sizeof(struct ip6t_getinfo));
+	if (*len != sizeof(struct ip6t_getinfo))
 		return -EINVAL;
-	}
 
 	if (copy_from_user(name, user, sizeof(name)) != 0)
 		return -EFAULT;
@@ -1158,30 +1050,24 @@
 	struct ip6t_get_entries get;
 	struct xt_table *t;
 
-	if (*len < sizeof(get)) {
-		duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
+	if (*len < sizeof(get))
 		return -EINVAL;
-	}
 	if (copy_from_user(&get, uptr, sizeof(get)) != 0)
 		return -EFAULT;
-	if (*len != sizeof(struct ip6t_get_entries) + get.size) {
-		duprintf("get_entries: %u != %zu\n",
-			 *len, sizeof(get) + get.size);
+	if (*len != sizeof(struct ip6t_get_entries) + get.size)
 		return -EINVAL;
-	}
+
+	get.name[sizeof(get.name) - 1] = '\0';
 
 	t = xt_find_table_lock(net, AF_INET6, get.name);
 	if (!IS_ERR_OR_NULL(t)) {
 		struct xt_table_info *private = t->private;
-		duprintf("t->private->number = %u\n", private->number);
 		if (get.size == private->size)
 			ret = copy_entries_to_user(private->size,
 						   t, uptr->entrytable);
-		else {
-			duprintf("get_entries: I've got %u not %u!\n",
-				 private->size, get.size);
+		else
 			ret = -EAGAIN;
-		}
+
 		module_put(t->me);
 		xt_table_unlock(t);
 	} else
@@ -1217,8 +1103,6 @@
 
 	/* You lied! */
 	if (valid_hooks != t->valid_hooks) {
-		duprintf("Valid hook crap: %08X vs %08X\n",
-			 valid_hooks, t->valid_hooks);
 		ret = -EINVAL;
 		goto put_module;
 	}
@@ -1228,8 +1112,6 @@
 		goto put_module;
 
 	/* Update module usage count based on number of rules */
-	duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
-		oldinfo->number, oldinfo->initial_entries, newinfo->number);
 	if ((oldinfo->number > oldinfo->initial_entries) ||
 	    (newinfo->number <= oldinfo->initial_entries))
 		module_put(t->me);
@@ -1298,8 +1180,6 @@
 	if (ret != 0)
 		goto free_newinfo;
 
-	duprintf("ip_tables: Translated table\n");
-
 	ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
 			   tmp.num_counters, tmp.counters);
 	if (ret)
@@ -1321,55 +1201,16 @@
 	unsigned int i;
 	struct xt_counters_info tmp;
 	struct xt_counters *paddc;
-	unsigned int num_counters;
-	char *name;
-	int size;
-	void *ptmp;
 	struct xt_table *t;
 	const struct xt_table_info *private;
 	int ret = 0;
 	struct ip6t_entry *iter;
 	unsigned int addend;
-#ifdef CONFIG_COMPAT
-	struct compat_xt_counters_info compat_tmp;
 
-	if (compat) {
-		ptmp = &compat_tmp;
-		size = sizeof(struct compat_xt_counters_info);
-	} else
-#endif
-	{
-		ptmp = &tmp;
-		size = sizeof(struct xt_counters_info);
-	}
-
-	if (copy_from_user(ptmp, user, size) != 0)
-		return -EFAULT;
-
-#ifdef CONFIG_COMPAT
-	if (compat) {
-		num_counters = compat_tmp.num_counters;
-		name = compat_tmp.name;
-	} else
-#endif
-	{
-		num_counters = tmp.num_counters;
-		name = tmp.name;
-	}
-
-	if (len != size + num_counters * sizeof(struct xt_counters))
-		return -EINVAL;
-
-	paddc = vmalloc(len - size);
-	if (!paddc)
-		return -ENOMEM;
-
-	if (copy_from_user(paddc, user + size, len - size) != 0) {
-		ret = -EFAULT;
-		goto free;
-	}
-
-	t = xt_find_table_lock(net, AF_INET6, name);
+	paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
+	if (IS_ERR(paddc))
+		return PTR_ERR(paddc);
+	t = xt_find_table_lock(net, AF_INET6, tmp.name);
 	if (IS_ERR_OR_NULL(t)) {
 		ret = t ? PTR_ERR(t) : -ENOENT;
 		goto free;
@@ -1377,7 +1218,7 @@
 
 	local_bh_disable();
 	private = t->private;
-	if (private->number != num_counters) {
+	if (private->number != tmp.num_counters) {
 		ret = -EINVAL;
 		goto unlock_up_free;
 	}
@@ -1456,7 +1297,6 @@
 
 static int
 compat_find_calc_match(struct xt_entry_match *m,
-		       const char *name,
 		       const struct ip6t_ip6 *ipv6,
 		       int *size)
 {
@@ -1464,11 +1304,9 @@
 
 	match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
 				      m->u.user.revision);
-	if (IS_ERR(match)) {
-		duprintf("compat_check_calc_match: `%s' not found\n",
-			 m->u.user.name);
+	if (IS_ERR(match))
 		return PTR_ERR(match);
-	}
+
 	m->u.kernel.match = match;
 	*size += xt_compat_match_offset(match);
 	return 0;
@@ -1491,34 +1329,29 @@
 				  struct xt_table_info *newinfo,
 				  unsigned int *size,
 				  const unsigned char *base,
-				  const unsigned char *limit,
-				  const unsigned int *hook_entries,
-				  const unsigned int *underflows,
-				  const char *name)
+				  const unsigned char *limit)
 {
 	struct xt_entry_match *ematch;
 	struct xt_entry_target *t;
 	struct xt_target *target;
 	unsigned int entry_offset;
 	unsigned int j;
-	int ret, off, h;
+	int ret, off;
 
-	duprintf("check_compat_entry_size_and_hooks %p\n", e);
 	if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
-	    (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
-		duprintf("Bad offset %p, limit = %p\n", e, limit);
+	    (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
+	    (unsigned char *)e + e->next_offset > limit)
 		return -EINVAL;
-	}
 
 	if (e->next_offset < sizeof(struct compat_ip6t_entry) +
-			     sizeof(struct compat_xt_entry_target)) {
-		duprintf("checking: element %p size %u\n",
-			 e, e->next_offset);
+			     sizeof(struct compat_xt_entry_target))
 		return -EINVAL;
-	}
 
-	/* For purposes of check_entry casting the compat entry is fine */
-	ret = check_entry((struct ip6t_entry *)e, name);
+	if (!ip6_checkentry(&e->ipv6))
+		return -EINVAL;
+
+	ret = xt_compat_check_entry_offsets(e, e->elems,
+					    e->target_offset, e->next_offset);
 	if (ret)
 		return ret;
 
@@ -1526,7 +1359,7 @@
 	entry_offset = (void *)e - (void *)base;
 	j = 0;
 	xt_ematch_foreach(ematch, e) {
-		ret = compat_find_calc_match(ematch, name, &e->ipv6, &off);
+		ret = compat_find_calc_match(ematch, &e->ipv6, &off);
 		if (ret != 0)
 			goto release_matches;
 		++j;
@@ -1536,8 +1369,6 @@
 	target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
 					t->u.user.revision);
 	if (IS_ERR(target)) {
-		duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
-			 t->u.user.name);
 		ret = PTR_ERR(target);
 		goto release_matches;
 	}
@@ -1549,17 +1380,6 @@
 	if (ret)
 		goto out;
 
-	/* Check hooks & underflows */
-	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
-		if ((unsigned char *)e - base == hook_entries[h])
-			newinfo->hook_entry[h] = hook_entries[h];
-		if ((unsigned char *)e - base == underflows[h])
-			newinfo->underflow[h] = underflows[h];
-	}
-
-	/* Clear counters and comefrom */
-	memset(&e->counters, 0, sizeof(e->counters));
-	e->comefrom = 0;
 	return 0;
 
 out:
@@ -1573,18 +1393,17 @@
 	return ret;
 }
 
-static int
+static void
 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
-			    unsigned int *size, const char *name,
+			    unsigned int *size,
 			    struct xt_table_info *newinfo, unsigned char *base)
 {
 	struct xt_entry_target *t;
 	struct ip6t_entry *de;
 	unsigned int origsize;
-	int ret, h;
+	int h;
 	struct xt_entry_match *ematch;
 
-	ret = 0;
 	origsize = *size;
 	de = (struct ip6t_entry *)*dstptr;
 	memcpy(de, e, sizeof(struct ip6t_entry));
@@ -1593,11 +1412,9 @@
 	*dstptr += sizeof(struct ip6t_entry);
 	*size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
 
-	xt_ematch_foreach(ematch, e) {
-		ret = xt_compat_match_from_user(ematch, dstptr, size);
-		if (ret != 0)
-			return ret;
-	}
+	xt_ematch_foreach(ematch, e)
+		xt_compat_match_from_user(ematch, dstptr, size);
+
 	de->target_offset = e->target_offset - (origsize - *size);
 	t = compat_ip6t_get_target(e);
 	xt_compat_target_from_user(t, dstptr, size);
@@ -1609,183 +1426,79 @@
 		if ((unsigned char *)de - base < newinfo->underflow[h])
 			newinfo->underflow[h] -= origsize - *size;
 	}
-	return ret;
-}
-
-static int compat_check_entry(struct ip6t_entry *e, struct net *net,
-			      const char *name)
-{
-	unsigned int j;
-	int ret = 0;
-	struct xt_mtchk_param mtpar;
-	struct xt_entry_match *ematch;
-
-	e->counters.pcnt = xt_percpu_counter_alloc();
-	if (IS_ERR_VALUE(e->counters.pcnt))
-		return -ENOMEM;
-	j = 0;
-	mtpar.net	= net;
-	mtpar.table     = name;
-	mtpar.entryinfo = &e->ipv6;
-	mtpar.hook_mask = e->comefrom;
-	mtpar.family    = NFPROTO_IPV6;
-	xt_ematch_foreach(ematch, e) {
-		ret = check_match(ematch, &mtpar);
-		if (ret != 0)
-			goto cleanup_matches;
-		++j;
-	}
-
-	ret = check_target(e, net, name);
-	if (ret)
-		goto cleanup_matches;
-	return 0;
-
- cleanup_matches:
-	xt_ematch_foreach(ematch, e) {
-		if (j-- == 0)
-			break;
-		cleanup_match(ematch, net);
-	}
-
-	xt_percpu_counter_free(e->counters.pcnt);
-
-	return ret;
 }
 
 static int
 translate_compat_table(struct net *net,
-		       const char *name,
-		       unsigned int valid_hooks,
 		       struct xt_table_info **pinfo,
 		       void **pentry0,
-		       unsigned int total_size,
-		       unsigned int number,
-		       unsigned int *hook_entries,
-		       unsigned int *underflows)
+		       const struct compat_ip6t_replace *compatr)
 {
 	unsigned int i, j;
 	struct xt_table_info *newinfo, *info;
 	void *pos, *entry0, *entry1;
 	struct compat_ip6t_entry *iter0;
-	struct ip6t_entry *iter1;
+	struct ip6t_replace repl;
 	unsigned int size;
 	int ret = 0;
 
 	info = *pinfo;
 	entry0 = *pentry0;
-	size = total_size;
-	info->number = number;
+	size = compatr->size;
+	info->number = compatr->num_entries;
 
-	/* Init all hooks to impossible value. */
-	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
-		info->hook_entry[i] = 0xFFFFFFFF;
-		info->underflow[i] = 0xFFFFFFFF;
-	}
-
-	duprintf("translate_compat_table: size %u\n", info->size);
 	j = 0;
 	xt_compat_lock(AF_INET6);
-	xt_compat_init_offsets(AF_INET6, number);
+	xt_compat_init_offsets(AF_INET6, compatr->num_entries);
 	/* Walk through entries, checking offsets. */
-	xt_entry_foreach(iter0, entry0, total_size) {
+	xt_entry_foreach(iter0, entry0, compatr->size) {
 		ret = check_compat_entry_size_and_hooks(iter0, info, &size,
 							entry0,
-							entry0 + total_size,
-							hook_entries,
-							underflows,
-							name);
+							entry0 + compatr->size);
 		if (ret != 0)
 			goto out_unlock;
 		++j;
 	}
 
 	ret = -EINVAL;
-	if (j != number) {
-		duprintf("translate_compat_table: %u not %u entries\n",
-			 j, number);
+	if (j != compatr->num_entries)
 		goto out_unlock;
-	}
-
-	/* Check hooks all assigned */
-	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
-		/* Only hooks which are valid */
-		if (!(valid_hooks & (1 << i)))
-			continue;
-		if (info->hook_entry[i] == 0xFFFFFFFF) {
-			duprintf("Invalid hook entry %u %u\n",
-				 i, hook_entries[i]);
-			goto out_unlock;
-		}
-		if (info->underflow[i] == 0xFFFFFFFF) {
-			duprintf("Invalid underflow %u %u\n",
-				 i, underflows[i]);
-			goto out_unlock;
-		}
-	}
 
 	ret = -ENOMEM;
 	newinfo = xt_alloc_table_info(size);
 	if (!newinfo)
 		goto out_unlock;
 
-	newinfo->number = number;
+	newinfo->number = compatr->num_entries;
 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
-		newinfo->hook_entry[i] = info->hook_entry[i];
-		newinfo->underflow[i] = info->underflow[i];
+		newinfo->hook_entry[i] = compatr->hook_entry[i];
+		newinfo->underflow[i] = compatr->underflow[i];
 	}
 	entry1 = newinfo->entries;
 	pos = entry1;
-	size = total_size;
-	xt_entry_foreach(iter0, entry0, total_size) {
-		ret = compat_copy_entry_from_user(iter0, &pos, &size,
-						  name, newinfo, entry1);
-		if (ret != 0)
-			break;
-	}
+	size = compatr->size;
+	xt_entry_foreach(iter0, entry0, compatr->size)
+		compat_copy_entry_from_user(iter0, &pos, &size,
+					    newinfo, entry1);
+
+	/* all module references in entry0 are now gone. */
 	xt_compat_flush_offsets(AF_INET6);
 	xt_compat_unlock(AF_INET6);
+
+	memcpy(&repl, compatr, sizeof(*compatr));
+
+	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+		repl.hook_entry[i] = newinfo->hook_entry[i];
+		repl.underflow[i] = newinfo->underflow[i];
+	}
+
+	repl.num_counters = 0;
+	repl.counters = NULL;
+	repl.size = newinfo->size;
+	ret = translate_table(net, newinfo, entry1, &repl);
 	if (ret)
 		goto free_newinfo;
 
-	ret = -ELOOP;
-	if (!mark_source_chains(newinfo, valid_hooks, entry1))
-		goto free_newinfo;
-
-	i = 0;
-	xt_entry_foreach(iter1, entry1, newinfo->size) {
-		ret = compat_check_entry(iter1, net, name);
-		if (ret != 0)
-			break;
-		++i;
-		if (strcmp(ip6t_get_target(iter1)->u.user.name,
-		    XT_ERROR_TARGET) == 0)
-			++newinfo->stacksize;
-	}
-	if (ret) {
-		/*
-		 * The first i matches need cleanup_entry (calls ->destroy)
-		 * because they had called ->check already. The other j-i
-		 * entries need only release.
-		 */
-		int skip = i;
-		j -= i;
-		xt_entry_foreach(iter0, entry0, newinfo->size) {
-			if (skip-- > 0)
-				continue;
-			if (j-- == 0)
-				break;
-			compat_release_entry(iter0);
-		}
-		xt_entry_foreach(iter1, entry1, newinfo->size) {
-			if (i-- == 0)
-				break;
-			cleanup_entry(iter1, net);
-		}
-		xt_free_table_info(newinfo);
-		return ret;
-	}
-
 	*pinfo = newinfo;
 	*pentry0 = entry1;
 	xt_free_table_info(info);
@@ -1793,17 +1506,16 @@
 
 free_newinfo:
 	xt_free_table_info(newinfo);
-out:
-	xt_entry_foreach(iter0, entry0, total_size) {
+	return ret;
+out_unlock:
+	xt_compat_flush_offsets(AF_INET6);
+	xt_compat_unlock(AF_INET6);
+	xt_entry_foreach(iter0, entry0, compatr->size) {
 		if (j-- == 0)
 			break;
 		compat_release_entry(iter0);
 	}
 	return ret;
-out_unlock:
-	xt_compat_flush_offsets(AF_INET6);
-	xt_compat_unlock(AF_INET6);
-	goto out;
 }
 
 static int
@@ -1819,8 +1531,6 @@
 		return -EFAULT;
 
 	/* overflow check */
-	if (tmp.size >= INT_MAX / num_possible_cpus())
-		return -ENOMEM;
 	if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
 		return -ENOMEM;
 	if (tmp.num_counters == 0)
@@ -1839,15 +1549,10 @@
 		goto free_newinfo;
 	}
 
-	ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
-				     &newinfo, &loc_cpu_entry, tmp.size,
-				     tmp.num_entries, tmp.hook_entry,
-				     tmp.underflow);
+	ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
 	if (ret != 0)
 		goto free_newinfo;
 
-	duprintf("compat_do_replace: Translated table\n");
-
 	ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
 			   tmp.num_counters, compat_ptr(tmp.counters));
 	if (ret)
@@ -1881,7 +1586,6 @@
 		break;
 
 	default:
-		duprintf("do_ip6t_set_ctl:  unknown request %i\n", cmd);
 		ret = -EINVAL;
 	}
 
@@ -1931,35 +1635,29 @@
 	struct compat_ip6t_get_entries get;
 	struct xt_table *t;
 
-	if (*len < sizeof(get)) {
-		duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
+	if (*len < sizeof(get))
 		return -EINVAL;
-	}
 
 	if (copy_from_user(&get, uptr, sizeof(get)) != 0)
 		return -EFAULT;
 
-	if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
-		duprintf("compat_get_entries: %u != %zu\n",
-			 *len, sizeof(get) + get.size);
+	if (*len != sizeof(struct compat_ip6t_get_entries) + get.size)
 		return -EINVAL;
-	}
+
+	get.name[sizeof(get.name) - 1] = '\0';
 
 	xt_compat_lock(AF_INET6);
 	t = xt_find_table_lock(net, AF_INET6, get.name);
 	if (!IS_ERR_OR_NULL(t)) {
 		const struct xt_table_info *private = t->private;
 		struct xt_table_info info;
-		duprintf("t->private->number = %u\n", private->number);
 		ret = compat_table_info(private, &info);
-		if (!ret && get.size == info.size) {
+		if (!ret && get.size == info.size)
 			ret = compat_copy_entries_to_user(private->size,
 							  t, uptr->entrytable);
-		} else if (!ret) {
-			duprintf("compat_get_entries: I've got %u not %u!\n",
-				 private->size, get.size);
+		else if (!ret)
 			ret = -EAGAIN;
-		}
+
 		xt_compat_flush_offsets(AF_INET6);
 		module_put(t->me);
 		xt_table_unlock(t);
@@ -2012,7 +1710,6 @@
 		break;
 
 	default:
-		duprintf("do_ip6t_set_ctl:  unknown request %i\n", cmd);
 		ret = -EINVAL;
 	}
 
@@ -2064,7 +1761,6 @@
 	}
 
 	default:
-		duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
 		ret = -EINVAL;
 	}
 
@@ -2166,7 +1862,6 @@
 		/* We've been asked to examine this packet, and we
 		 * can't.  Hence, no choice but to drop.
 		 */
-		duprintf("Dropping evil ICMP tinygram.\n");
 		par->hotdrop = true;
 		return false;
 	}
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index 3deed58..06bed74 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -20,15 +20,16 @@
 #include <net/netfilter/nf_conntrack_synproxy.h>
 
 static struct ipv6hdr *
-synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr,
-				       const struct in6_addr *daddr)
+synproxy_build_ip(struct net *net, struct sk_buff *skb,
+		  const struct in6_addr *saddr,
+		  const struct in6_addr *daddr)
 {
 	struct ipv6hdr *iph;
 
 	skb_reset_network_header(skb);
 	iph = (struct ipv6hdr *)skb_put(skb, sizeof(*iph));
 	ip6_flow_hdr(iph, 0, 0);
-	iph->hop_limit	= 64;	//XXX
+	iph->hop_limit	= net->ipv6.devconf_all->hop_limit;
 	iph->nexthdr	= IPPROTO_TCP;
 	iph->saddr	= *saddr;
 	iph->daddr	= *daddr;
@@ -37,13 +38,12 @@
 }
 
 static void
-synproxy_send_tcp(const struct synproxy_net *snet,
+synproxy_send_tcp(struct net *net,
 		  const struct sk_buff *skb, struct sk_buff *nskb,
 		  struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
 		  struct ipv6hdr *niph, struct tcphdr *nth,
 		  unsigned int tcp_hdr_size)
 {
-	struct net *net = nf_ct_net(snet->tmpl);
 	struct dst_entry *dst;
 	struct flowi6 fl6;
 
@@ -60,7 +60,7 @@
 	fl6.fl6_dport = nth->dest;
 	security_skb_classify_flow((struct sk_buff *)skb, flowi6_to_flowi(&fl6));
 	dst = ip6_route_output(net, NULL, &fl6);
-	if (dst == NULL || dst->error) {
+	if (dst->error) {
 		dst_release(dst);
 		goto free_nskb;
 	}
@@ -84,7 +84,7 @@
 }
 
 static void
-synproxy_send_client_synack(const struct synproxy_net *snet,
+synproxy_send_client_synack(struct net *net,
 			    const struct sk_buff *skb, const struct tcphdr *th,
 			    const struct synproxy_options *opts)
 {
@@ -103,7 +103,7 @@
 		return;
 	skb_reserve(nskb, MAX_TCP_HEADER);
 
-	niph = synproxy_build_ip(nskb, &iph->daddr, &iph->saddr);
+	niph = synproxy_build_ip(net, nskb, &iph->daddr, &iph->saddr);
 
 	skb_reset_transport_header(nskb);
 	nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -121,15 +121,16 @@
 
 	synproxy_build_options(nth, opts);
 
-	synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+	synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
 			  niph, nth, tcp_hdr_size);
 }
 
 static void
-synproxy_send_server_syn(const struct synproxy_net *snet,
+synproxy_send_server_syn(struct net *net,
 			 const struct sk_buff *skb, const struct tcphdr *th,
 			 const struct synproxy_options *opts, u32 recv_seq)
 {
+	struct synproxy_net *snet = synproxy_pernet(net);
 	struct sk_buff *nskb;
 	struct ipv6hdr *iph, *niph;
 	struct tcphdr *nth;
@@ -144,7 +145,7 @@
 		return;
 	skb_reserve(nskb, MAX_TCP_HEADER);
 
-	niph = synproxy_build_ip(nskb, &iph->saddr, &iph->daddr);
+	niph = synproxy_build_ip(net, nskb, &iph->saddr, &iph->daddr);
 
 	skb_reset_transport_header(nskb);
 	nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -165,12 +166,12 @@
 
 	synproxy_build_options(nth, opts);
 
-	synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
+	synproxy_send_tcp(net, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
 			  niph, nth, tcp_hdr_size);
 }
 
 static void
-synproxy_send_server_ack(const struct synproxy_net *snet,
+synproxy_send_server_ack(struct net *net,
 			 const struct ip_ct_tcp *state,
 			 const struct sk_buff *skb, const struct tcphdr *th,
 			 const struct synproxy_options *opts)
@@ -189,7 +190,7 @@
 		return;
 	skb_reserve(nskb, MAX_TCP_HEADER);
 
-	niph = synproxy_build_ip(nskb, &iph->daddr, &iph->saddr);
+	niph = synproxy_build_ip(net, nskb, &iph->daddr, &iph->saddr);
 
 	skb_reset_transport_header(nskb);
 	nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -205,11 +206,11 @@
 
 	synproxy_build_options(nth, opts);
 
-	synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+	synproxy_send_tcp(net, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
 }
 
 static void
-synproxy_send_client_ack(const struct synproxy_net *snet,
+synproxy_send_client_ack(struct net *net,
 			 const struct sk_buff *skb, const struct tcphdr *th,
 			 const struct synproxy_options *opts)
 {
@@ -227,7 +228,7 @@
 		return;
 	skb_reserve(nskb, MAX_TCP_HEADER);
 
-	niph = synproxy_build_ip(nskb, &iph->saddr, &iph->daddr);
+	niph = synproxy_build_ip(net, nskb, &iph->saddr, &iph->daddr);
 
 	skb_reset_transport_header(nskb);
 	nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
@@ -243,15 +244,16 @@
 
 	synproxy_build_options(nth, opts);
 
-	synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+	synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
 			  niph, nth, tcp_hdr_size);
 }
 
 static bool
-synproxy_recv_client_ack(const struct synproxy_net *snet,
+synproxy_recv_client_ack(struct net *net,
 			 const struct sk_buff *skb, const struct tcphdr *th,
 			 struct synproxy_options *opts, u32 recv_seq)
 {
+	struct synproxy_net *snet = synproxy_pernet(net);
 	int mss;
 
 	mss = __cookie_v6_check(ipv6_hdr(skb), th, ntohl(th->ack_seq) - 1);
@@ -267,7 +269,7 @@
 	if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP)
 		synproxy_check_timestamp_cookie(opts);
 
-	synproxy_send_server_syn(snet, skb, th, opts, recv_seq);
+	synproxy_send_server_syn(net, skb, th, opts, recv_seq);
 	return true;
 }
 
@@ -275,7 +277,8 @@
 synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 {
 	const struct xt_synproxy_info *info = par->targinfo;
-	struct synproxy_net *snet = synproxy_pernet(par->net);
+	struct net *net = par->net;
+	struct synproxy_net *snet = synproxy_pernet(net);
 	struct synproxy_options opts = {};
 	struct tcphdr *th, _th;
 
@@ -304,12 +307,12 @@
 					  XT_SYNPROXY_OPT_SACK_PERM |
 					  XT_SYNPROXY_OPT_ECN);
 
-		synproxy_send_client_synack(snet, skb, th, &opts);
+		synproxy_send_client_synack(net, skb, th, &opts);
 		return NF_DROP;
 
 	} else if (th->ack && !(th->fin || th->rst || th->syn)) {
 		/* ACK from client */
-		synproxy_recv_client_ack(snet, skb, th, &opts, ntohl(th->seq));
+		synproxy_recv_client_ack(net, skb, th, &opts, ntohl(th->seq));
 		return NF_DROP;
 	}
 
@@ -320,7 +323,8 @@
 				       struct sk_buff *skb,
 				       const struct nf_hook_state *nhs)
 {
-	struct synproxy_net *snet = synproxy_pernet(nhs->net);
+	struct net *net = nhs->net;
+	struct synproxy_net *snet = synproxy_pernet(net);
 	enum ip_conntrack_info ctinfo;
 	struct nf_conn *ct;
 	struct nf_conn_synproxy *synproxy;
@@ -384,7 +388,7 @@
 			 * therefore we need to add 1 to make the SYN sequence
 			 * number match the one of first SYN.
 			 */
-			if (synproxy_recv_client_ack(snet, skb, th, &opts,
+			if (synproxy_recv_client_ack(net, skb, th, &opts,
 						     ntohl(th->seq) + 1))
 				this_cpu_inc(snet->stats->cookie_retrans);
 
@@ -410,12 +414,12 @@
 				  XT_SYNPROXY_OPT_SACK_PERM);
 
 		swap(opts.tsval, opts.tsecr);
-		synproxy_send_server_ack(snet, state, skb, th, &opts);
+		synproxy_send_server_ack(net, state, skb, th, &opts);
 
 		nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq));
 
 		swap(opts.tsval, opts.tsecr);
-		synproxy_send_client_ack(snet, skb, th, &opts);
+		synproxy_send_client_ack(net, skb, th, &opts);
 
 		consume_skb(skb);
 		return NF_STOLEN;
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index 4709f65..a540022 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -158,7 +158,7 @@
 	fl6.fl6_dport = otcph->source;
 	security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
 	dst = ip6_route_output(net, NULL, &fl6);
-	if (dst == NULL || dst->error) {
+	if (dst->error) {
 		dst_release(dst);
 		return;
 	}
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index c382db7..3ee3e44 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -58,10 +58,11 @@
 	int iif = 0;
 	struct flowi6 fl6;
 	int err;
-	int hlimit;
 	struct dst_entry *dst;
 	struct rt6_info *rt;
 	struct pingfakehdr pfh;
+	struct sockcm_cookie junk = {0};
+	struct ipcm6_cookie ipc6;
 
 	pr_debug("ping_v6_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num);
 
@@ -138,13 +139,15 @@
 	pfh.wcheck = 0;
 	pfh.family = AF_INET6;
 
-	hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
+	ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
+	ipc6.tclass = np->tclass;
+	ipc6.dontfrag = np->dontfrag;
+	ipc6.opt = NULL;
 
 	lock_sock(sk);
 	err = ip6_append_data(sk, ping_getfrag, &pfh, len,
-			      0, hlimit,
-			      np->tclass, NULL, &fl6, rt,
-			      MSG_DONTWAIT, np->dontfrag);
+			      0, &ipc6, &fl6, rt,
+			      MSG_DONTWAIT, &junk);
 
 	if (err) {
 		ICMP6_INC_STATS(sock_net(sk), rt->rt6i_idev,
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index fa59dd7..896350d 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -745,10 +745,9 @@
 	struct dst_entry *dst = NULL;
 	struct raw6_frag_vec rfv;
 	struct flowi6 fl6;
+	struct sockcm_cookie sockc;
+	struct ipcm6_cookie ipc6;
 	int addr_len = msg->msg_namelen;
-	int hlimit = -1;
-	int tclass = -1;
-	int dontfrag = -1;
 	u16 proto;
 	int err;
 
@@ -769,6 +768,11 @@
 
 	fl6.flowi6_mark = sk->sk_mark;
 
+	ipc6.hlimit = -1;
+	ipc6.tclass = -1;
+	ipc6.dontfrag = -1;
+	ipc6.opt = NULL;
+
 	if (sin6) {
 		if (addr_len < SIN6_LEN_RFC2133)
 			return -EINVAL;
@@ -821,13 +825,14 @@
 	if (fl6.flowi6_oif == 0)
 		fl6.flowi6_oif = sk->sk_bound_dev_if;
 
+	sockc.tsflags = sk->sk_tsflags;
 	if (msg->msg_controllen) {
 		opt = &opt_space;
 		memset(opt, 0, sizeof(struct ipv6_txoptions));
 		opt->tot_len = sizeof(struct ipv6_txoptions);
+		ipc6.opt = opt;
 
-		err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
-					    &hlimit, &tclass, &dontfrag);
+		err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6, &sockc);
 		if (err < 0) {
 			fl6_sock_release(flowlabel);
 			return err;
@@ -843,7 +848,7 @@
 	if (!opt) {
 		opt = txopt_get(np);
 		opt_to_free = opt;
-		}
+	}
 	if (flowlabel)
 		opt = fl6_merge_options(&opt_space, flowlabel, opt);
 	opt = ipv6_fixup_options(&opt_space, opt);
@@ -878,14 +883,14 @@
 		err = PTR_ERR(dst);
 		goto out;
 	}
-	if (hlimit < 0)
-		hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
+	if (ipc6.hlimit < 0)
+		ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
 
-	if (tclass < 0)
-		tclass = np->tclass;
+	if (ipc6.tclass < 0)
+		ipc6.tclass = np->tclass;
 
-	if (dontfrag < 0)
-		dontfrag = np->dontfrag;
+	if (ipc6.dontfrag < 0)
+		ipc6.dontfrag = np->dontfrag;
 
 	if (msg->msg_flags&MSG_CONFIRM)
 		goto do_confirm;
@@ -894,10 +899,11 @@
 	if (inet->hdrincl)
 		err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst, msg->msg_flags);
 	else {
+		ipc6.opt = opt;
 		lock_sock(sk);
 		err = ip6_append_data(sk, raw6_getfrag, &rfv,
-			len, 0, hlimit, tclass, opt, &fl6, (struct rt6_info *)dst,
-			msg->msg_flags, dontfrag);
+			len, 0, &ipc6, &fl6, (struct rt6_info *)dst,
+			msg->msg_flags, &sockc);
 
 		if (err)
 			ip6_flush_pending_frames(sk);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index e2ea311..2160d5d 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -145,12 +145,12 @@
 	if (!dev)
 		goto out_rcu_unlock;
 
-	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
+	__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
 
 	if (inet_frag_evicting(&fq->q))
 		goto out_rcu_unlock;
 
-	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
+	__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
 
 	/* Don't send error if the first segment did not arrive. */
 	if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !fq->q.fragments)
@@ -223,8 +223,8 @@
 			((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
 
 	if ((unsigned int)end > IPV6_MAXPLEN) {
-		IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-				 IPSTATS_MIB_INHDRERRORS);
+		__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+				IPSTATS_MIB_INHDRERRORS);
 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
 				  ((u8 *)&fhdr->frag_off -
 				   skb_network_header(skb)));
@@ -258,8 +258,8 @@
 			/* RFC2460 says always send parameter problem in
 			 * this case. -DaveM
 			 */
-			IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-					 IPSTATS_MIB_INHDRERRORS);
+			__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+					IPSTATS_MIB_INHDRERRORS);
 			icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
 					  offsetof(struct ipv6hdr, payload_len));
 			return -1;
@@ -361,8 +361,8 @@
 discard_fq:
 	inet_frag_kill(&fq->q, &ip6_frags);
 err:
-	IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-			 IPSTATS_MIB_REASMFAILS);
+	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+			IPSTATS_MIB_REASMFAILS);
 	kfree_skb(skb);
 	return -1;
 }
@@ -500,7 +500,7 @@
 			   skb_network_header_len(head));
 
 	rcu_read_lock();
-	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
+	__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
 	rcu_read_unlock();
 	fq->q.fragments = NULL;
 	fq->q.fragments_tail = NULL;
@@ -513,7 +513,7 @@
 	net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n");
 out_fail:
 	rcu_read_lock();
-	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
+	__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
 	rcu_read_unlock();
 	return -1;
 }
@@ -528,7 +528,7 @@
 	if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
 		goto fail_hdr;
 
-	IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
+	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
 
 	/* Jumbo payload inhibits frag. header */
 	if (hdr->payload_len == 0)
@@ -544,8 +544,8 @@
 	if (!(fhdr->frag_off & htons(0xFFF9))) {
 		/* It is not a fragmented frame */
 		skb->transport_header += sizeof(struct frag_hdr);
-		IP6_INC_STATS_BH(net,
-				 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
+		__IP6_INC_STATS(net,
+				ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
 
 		IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
 		IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
@@ -566,13 +566,13 @@
 		return ret;
 	}
 
-	IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
+	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
 	kfree_skb(skb);
 	return -1;
 
 fail_hdr:
-	IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-			 IPSTATS_MIB_INHDRERRORS);
+	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+			IPSTATS_MIB_INHDRERRORS);
 	icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb));
 	return -1;
 }
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index ed44663..969913d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -338,9 +338,9 @@
 	return rt;
 }
 
-static struct rt6_info *ip6_dst_alloc(struct net *net,
-				      struct net_device *dev,
-				      int flags)
+struct rt6_info *ip6_dst_alloc(struct net *net,
+			       struct net_device *dev,
+			       int flags)
 {
 	struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
 
@@ -364,6 +364,7 @@
 
 	return rt;
 }
+EXPORT_SYMBOL(ip6_dst_alloc);
 
 static void ip6_dst_destroy(struct dst_entry *dst)
 {
@@ -1189,7 +1190,7 @@
 	struct dst_entry *dst;
 	bool any_src;
 
-	dst = l3mdev_rt6_dst_by_oif(net, fl6);
+	dst = l3mdev_get_rt6_dst(net, fl6);
 	if (dst)
 		return dst;
 
@@ -1417,8 +1418,20 @@
 
 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
 {
+	struct dst_entry *dst;
+
 	ip6_update_pmtu(skb, sock_net(sk), mtu,
 			sk->sk_bound_dev_if, sk->sk_mark);
+
+	dst = __sk_dst_get(sk);
+	if (!dst || !dst->obsolete ||
+	    dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
+		return;
+
+	bh_lock_sock(sk);
+	if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
+		ip6_datagram_dst_update(sk, false);
+	bh_unlock_sock(sk);
 }
 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
 
@@ -1737,6 +1750,8 @@
 		} else {
 			val = nla_get_u32(nla);
 		}
+		if (type == RTAX_HOPLIMIT && val > 255)
+			val = 255;
 		if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
 			goto err;
 
@@ -1756,6 +1771,37 @@
 	return -EINVAL;
 }
 
+static struct rt6_info *ip6_nh_lookup_table(struct net *net,
+					    struct fib6_config *cfg,
+					    const struct in6_addr *gw_addr)
+{
+	struct flowi6 fl6 = {
+		.flowi6_oif = cfg->fc_ifindex,
+		.daddr = *gw_addr,
+		.saddr = cfg->fc_prefsrc,
+	};
+	struct fib6_table *table;
+	struct rt6_info *rt;
+	int flags = 0;
+
+	table = fib6_get_table(net, cfg->fc_table);
+	if (!table)
+		return NULL;
+
+	if (!ipv6_addr_any(&cfg->fc_prefsrc))
+		flags |= RT6_LOOKUP_F_HAS_SADDR;
+
+	rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, flags);
+
+	/* if table lookup failed, fall back to full lookup */
+	if (rt == net->ipv6.ip6_null_entry) {
+		ip6_rt_put(rt);
+		rt = NULL;
+	}
+
+	return rt;
+}
+
 static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
 {
 	struct net *net = cfg->fc_nlinfo.nl_net;
@@ -1927,7 +1973,7 @@
 		rt->rt6i_gateway = *gw_addr;
 
 		if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
-			struct rt6_info *grt;
+			struct rt6_info *grt = NULL;
 
 			/* IPv6 strictly inhibits using not link-local
 			   addresses as nexthop address.
@@ -1939,7 +1985,12 @@
 			if (!(gwa_type & IPV6_ADDR_UNICAST))
 				goto out;
 
-			grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
+			if (cfg->fc_table)
+				grt = ip6_nh_lookup_table(net, cfg, gw_addr);
+
+			if (!grt)
+				grt = rt6_lookup(net, gw_addr, NULL,
+						 cfg->fc_ifindex, 1);
 
 			err = -EHOSTUNREACH;
 			if (!grt)
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 8338430..a13d8c1 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -913,10 +913,9 @@
 		goto tx_error;
 	}
 
-	skb = iptunnel_handle_offloads(skb, SKB_GSO_SIT);
-	if (IS_ERR(skb)) {
+	if (iptunnel_handle_offloads(skb, SKB_GSO_SIT)) {
 		ip_rt_put(rt);
-		goto out;
+		goto tx_error;
 	}
 
 	if (df) {
@@ -992,7 +991,6 @@
 	dst_link_failure(skb);
 tx_error:
 	kfree_skb(skb);
-out:
 	dev->stats.tx_errors++;
 	return NETDEV_TX_OK;
 }
@@ -1002,15 +1000,15 @@
 	struct ip_tunnel *tunnel = netdev_priv(dev);
 	const struct iphdr  *tiph = &tunnel->parms.iph;
 
-	skb = iptunnel_handle_offloads(skb, SKB_GSO_IPIP);
-	if (IS_ERR(skb))
-		goto out;
+	if (iptunnel_handle_offloads(skb, SKB_GSO_IPIP))
+		goto tx_error;
 
 	skb_set_inner_ipproto(skb, IPPROTO_IPIP);
 
 	ip_tunnel_xmit(skb, dev, tiph, IPPROTO_IPIP);
 	return NETDEV_TX_OK;
-out:
+tx_error:
+	kfree_skb(skb);
 	dev->stats.tx_errors++;
 	return NETDEV_TX_OK;
 }
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index aab91fa..59c4839 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -155,11 +155,11 @@
 
 	mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie);
 	if (mss == 0) {
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
+		__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
 		goto out;
 	}
 
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
+	__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
 
 	/* check for timestamp cookie support */
 	memset(&tcp_opt, 0, sizeof(tcp_opt));
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 711d209..79e33e0 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -234,7 +234,7 @@
 	fl6.fl6_dport = usin->sin6_port;
 	fl6.fl6_sport = inet->inet_sport;
 
-	opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
+	opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
 	final_p = fl6_update_dst(&fl6, opt, &final);
 
 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
@@ -336,8 +336,8 @@
 					skb->dev->ifindex);
 
 	if (!sk) {
-		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
-				   ICMP6_MIB_INERRORS);
+		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
+				  ICMP6_MIB_INERRORS);
 		return;
 	}
 
@@ -352,13 +352,13 @@
 
 	bh_lock_sock(sk);
 	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
-		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
+		__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
 
 	if (sk->sk_state == TCP_CLOSE)
 		goto out;
 
 	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
-		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
+		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
 		goto out;
 	}
 
@@ -368,7 +368,7 @@
 	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
 	if (sk->sk_state != TCP_LISTEN &&
 	    !between(seq, snd_una, tp->snd_nxt)) {
-		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
+		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
 		goto out;
 	}
 
@@ -439,7 +439,7 @@
 			      struct flowi *fl,
 			      struct request_sock *req,
 			      struct tcp_fastopen_cookie *foc,
-			      bool attach_req)
+			      enum tcp_synack_type synack_type)
 {
 	struct inet_request_sock *ireq = inet_rsk(req);
 	struct ipv6_pinfo *np = inet6_sk(sk);
@@ -452,7 +452,7 @@
 					       IPPROTO_TCP)) == NULL)
 		goto done;
 
-	skb = tcp_make_synack(sk, dst, req, foc, attach_req);
+	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
 
 	if (skb) {
 		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
@@ -649,12 +649,12 @@
 		return false;
 
 	if (hash_expected && !hash_location) {
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
 		return true;
 	}
 
 	if (!hash_expected && hash_location) {
-		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
 		return true;
 	}
 
@@ -810,8 +810,13 @@
 	fl6.flowi6_proto = IPPROTO_TCP;
 	if (rt6_need_strict(&fl6.daddr) && !oif)
 		fl6.flowi6_oif = tcp_v6_iif(skb);
-	else
+	else {
+		if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
+			oif = skb->skb_iif;
+
 		fl6.flowi6_oif = oif;
+	}
+
 	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
 	fl6.fl6_dport = t1->dest;
 	fl6.fl6_sport = t1->source;
@@ -825,9 +830,9 @@
 	if (!IS_ERR(dst)) {
 		skb_dst_set(buff, dst);
 		ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
-		TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
+		TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
 		if (rst)
-			TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
+			TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
 		return;
 	}
 
@@ -858,6 +863,7 @@
 		return;
 
 #ifdef CONFIG_TCP_MD5SIG
+	rcu_read_lock();
 	hash_location = tcp_parse_md5sig_option(th);
 	if (sk && sk_fullsock(sk)) {
 		key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
@@ -875,16 +881,15 @@
 					   th->source, &ipv6h->daddr,
 					   ntohs(th->source), tcp_v6_iif(skb));
 		if (!sk1)
-			return;
+			goto out;
 
-		rcu_read_lock();
 		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
 		if (!key)
-			goto release_sk1;
+			goto out;
 
 		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
-			goto release_sk1;
+			goto out;
 	}
 #endif
 
@@ -898,11 +903,8 @@
 	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
 
 #ifdef CONFIG_TCP_MD5SIG
-release_sk1:
-	if (sk1) {
-		rcu_read_unlock();
-		sock_put(sk1);
-	}
+out:
+	rcu_read_unlock();
 #endif
 }
 
@@ -967,7 +969,7 @@
 				&tcp_request_sock_ipv6_ops, sk, skb);
 
 drop:
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
+	tcp_listendrop(sk);
 	return 0; /* don't send reset */
 }
 
@@ -1168,11 +1170,11 @@
 	return newsk;
 
 out_overflow:
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
+	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
 out_nonewsk:
 	dst_release(dst);
 out:
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
+	tcp_listendrop(sk);
 	return NULL;
 }
 
@@ -1279,8 +1281,8 @@
 	kfree_skb(skb);
 	return 0;
 csum_err:
-	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
-	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
+	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
+	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
 	goto discard;
 
 
@@ -1351,6 +1353,7 @@
 {
 	const struct tcphdr *th;
 	const struct ipv6hdr *hdr;
+	bool refcounted;
 	struct sock *sk;
 	int ret;
 	struct net *net = dev_net(skb->dev);
@@ -1361,14 +1364,14 @@
 	/*
 	 *	Count it even if it's bad.
 	 */
-	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
+	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
 
 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
 		goto discard_it;
 
-	th = tcp_hdr(skb);
+	th = (const struct tcphdr *)skb->data;
 
-	if (th->doff < sizeof(struct tcphdr)/4)
+	if (unlikely(th->doff < sizeof(struct tcphdr)/4))
 		goto bad_packet;
 	if (!pskb_may_pull(skb, th->doff*4))
 		goto discard_it;
@@ -1376,12 +1379,13 @@
 	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
 		goto csum_error;
 
-	th = tcp_hdr(skb);
+	th = (const struct tcphdr *)skb->data;
 	hdr = ipv6_hdr(skb);
 
 lookup:
 	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
-				th->source, th->dest, inet6_iif(skb));
+				th->source, th->dest, inet6_iif(skb),
+				&refcounted);
 	if (!sk)
 		goto no_tcp_socket;
 
@@ -1404,6 +1408,7 @@
 			goto lookup;
 		}
 		sock_hold(sk);
+		refcounted = true;
 		nsk = tcp_check_req(sk, skb, req, false);
 		if (!nsk) {
 			reqsk_put(req);
@@ -1421,7 +1426,7 @@
 		}
 	}
 	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
-		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
+		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
 		goto discard_and_relse;
 	}
 
@@ -1454,13 +1459,14 @@
 	} else if (unlikely(sk_add_backlog(sk, skb,
 					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
 		bh_unlock_sock(sk);
-		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
+		__NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
 		goto discard_and_relse;
 	}
 	bh_unlock_sock(sk);
 
 put_and_return:
-	sock_put(sk);
+	if (refcounted)
+		sock_put(sk);
 	return ret ? -1 : 0;
 
 no_tcp_socket:
@@ -1471,9 +1477,9 @@
 
 	if (tcp_checksum_complete(skb)) {
 csum_error:
-		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
+		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
 bad_packet:
-		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
+		__TCP_INC_STATS(net, TCP_MIB_INERRS);
 	} else {
 		tcp_v6_send_reset(NULL, skb);
 	}
@@ -1483,7 +1489,9 @@
 	return 0;
 
 discard_and_relse:
-	sock_put(sk);
+	sk_drops_add(sk, skb);
+	if (refcounted)
+		sock_put(sk);
 	goto discard_it;
 
 do_time_wait:
@@ -1514,6 +1522,7 @@
 			inet_twsk_deschedule_put(tw);
 			sk = sk2;
 			tcp_v6_restore_cb(skb);
+			refcounted = false;
 			goto process;
 		}
 		/* Fall through to ACK */
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index fd25e44..2ba6a77 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -213,37 +213,28 @@
 		struct sk_buff *skb)
 {
 	struct sock *sk, *result;
-	struct hlist_nulls_node *node;
 	int score, badness, matches = 0, reuseport = 0;
-	bool select_ok = true;
 	u32 hash = 0;
 
-begin:
 	result = NULL;
 	badness = -1;
-	udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
+	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
 		score = compute_score2(sk, net, saddr, sport,
 				      daddr, hnum, dif);
 		if (score > badness) {
-			result = sk;
-			badness = score;
 			reuseport = sk->sk_reuseport;
 			if (reuseport) {
 				hash = udp6_ehashfn(net, daddr, hnum,
 						    saddr, sport);
-				if (select_ok) {
-					struct sock *sk2;
 
-					sk2 = reuseport_select_sock(sk, hash, skb,
+				result = reuseport_select_sock(sk, hash, skb,
 							sizeof(struct udphdr));
-					if (sk2) {
-						result = sk2;
-						select_ok = false;
-						goto found;
-					}
-				}
+				if (result)
+					return result;
 				matches = 1;
 			}
+			result = sk;
+			badness = score;
 		} else if (score == badness && reuseport) {
 			matches++;
 			if (reciprocal_scale(hash, matches) == 0)
@@ -251,27 +242,10 @@
 			hash = next_pseudo_random32(hash);
 		}
 	}
-	/*
-	 * if the nulls value we got at the end of this lookup is
-	 * not the expected one, we must restart lookup.
-	 * We probably met an item that was moved to another chain.
-	 */
-	if (get_nulls_value(node) != slot2)
-		goto begin;
-
-	if (result) {
-found:
-		if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
-			result = NULL;
-		else if (unlikely(compute_score2(result, net, saddr, sport,
-				  daddr, hnum, dif) < badness)) {
-			sock_put(result);
-			goto begin;
-		}
-	}
 	return result;
 }
 
+/* rcu_read_lock() must be held */
 struct sock *__udp6_lib_lookup(struct net *net,
 				      const struct in6_addr *saddr, __be16 sport,
 				      const struct in6_addr *daddr, __be16 dport,
@@ -279,15 +253,12 @@
 				      struct sk_buff *skb)
 {
 	struct sock *sk, *result;
-	struct hlist_nulls_node *node;
 	unsigned short hnum = ntohs(dport);
 	unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
 	struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
 	int score, badness, matches = 0, reuseport = 0;
-	bool select_ok = true;
 	u32 hash = 0;
 
-	rcu_read_lock();
 	if (hslot->count > 10) {
 		hash2 = udp6_portaddr_hash(net, daddr, hnum);
 		slot2 = hash2 & udptable->mask;
@@ -309,34 +280,26 @@
 						  &in6addr_any, hnum, dif,
 						  hslot2, slot2, skb);
 		}
-		rcu_read_unlock();
 		return result;
 	}
 begin:
 	result = NULL;
 	badness = -1;
-	sk_nulls_for_each_rcu(sk, node, &hslot->head) {
+	sk_for_each_rcu(sk, &hslot->head) {
 		score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif);
 		if (score > badness) {
-			result = sk;
-			badness = score;
 			reuseport = sk->sk_reuseport;
 			if (reuseport) {
 				hash = udp6_ehashfn(net, daddr, hnum,
 						    saddr, sport);
-				if (select_ok) {
-					struct sock *sk2;
-
-					sk2 = reuseport_select_sock(sk, hash, skb,
+				result = reuseport_select_sock(sk, hash, skb,
 							sizeof(struct udphdr));
-					if (sk2) {
-						result = sk2;
-						select_ok = false;
-						goto found;
-					}
-				}
+				if (result)
+					return result;
 				matches = 1;
 			}
+			result = sk;
+			badness = score;
 		} else if (score == badness && reuseport) {
 			matches++;
 			if (reciprocal_scale(hash, matches) == 0)
@@ -344,25 +307,6 @@
 			hash = next_pseudo_random32(hash);
 		}
 	}
-	/*
-	 * if the nulls value we got at the end of this lookup is
-	 * not the expected one, we must restart lookup.
-	 * We probably met an item that was moved to another chain.
-	 */
-	if (get_nulls_value(node) != slot)
-		goto begin;
-
-	if (result) {
-found:
-		if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
-			result = NULL;
-		else if (unlikely(compute_score(result, net, hnum, saddr, sport,
-					daddr, dport, dif) < badness)) {
-			sock_put(result);
-			goto begin;
-		}
-	}
-	rcu_read_unlock();
 	return result;
 }
 EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
@@ -371,23 +315,46 @@
 					  __be16 sport, __be16 dport,
 					  struct udp_table *udptable)
 {
-	struct sock *sk;
 	const struct ipv6hdr *iph = ipv6_hdr(skb);
+	struct sock *sk;
 
 	sk = skb_steal_sock(skb);
 	if (unlikely(sk))
 		return sk;
-	return __udp6_lib_lookup(dev_net(skb_dst(skb)->dev), &iph->saddr, sport,
+	return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
 				 &iph->daddr, dport, inet6_iif(skb),
 				 udptable, skb);
 }
 
+struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
+				 __be16 sport, __be16 dport)
+{
+	const struct ipv6hdr *iph = ipv6_hdr(skb);
+
+	return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
+				 &iph->daddr, dport, inet6_iif(skb),
+				 &udp_table, skb);
+}
+EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb);
+
+/* Must be called under rcu_read_lock().
+ * Does increment socket refcount.
+ */
+#if IS_ENABLED(CONFIG_NETFILTER_XT_MATCH_SOCKET) || \
+    IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TPROXY)
 struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
 			     const struct in6_addr *daddr, __be16 dport, int dif)
 {
-	return __udp6_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table, NULL);
+	struct sock *sk;
+
+	sk =  __udp6_lib_lookup(net, saddr, sport, daddr, dport,
+				dif, &udp_table, NULL);
+	if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+		sk = NULL;
+	return sk;
 }
 EXPORT_SYMBOL_GPL(udp6_lib_lookup);
+#endif
 
 /*
  *	This should be easy, if there is something there we
@@ -401,7 +368,7 @@
 	struct inet_sock *inet = inet_sk(sk);
 	struct sk_buff *skb;
 	unsigned int ulen, copied;
-	int peeked, off = 0;
+	int peeked, peeking, off;
 	int err;
 	int is_udplite = IS_UDPLITE(sk);
 	bool checksum_valid = false;
@@ -415,15 +382,16 @@
 		return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
 
 try_again:
+	peeking = off = sk_peek_offset(sk, flags);
 	skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
 				  &peeked, &off, &err);
 	if (!skb)
-		goto out;
+		return err;
 
-	ulen = skb->len - sizeof(struct udphdr);
+	ulen = skb->len;
 	copied = len;
-	if (copied > ulen)
-		copied = ulen;
+	if (copied > ulen - off)
+		copied = ulen - off;
 	else if (copied < ulen)
 		msg->msg_flags |= MSG_TRUNC;
 
@@ -435,17 +403,16 @@
 	 * coverage checksum (UDP-Lite), do it before the copy.
 	 */
 
-	if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
+	if (copied < ulen || UDP_SKB_CB(skb)->partial_cov || peeking) {
 		checksum_valid = !udp_lib_checksum_complete(skb);
 		if (!checksum_valid)
 			goto csum_copy_err;
 	}
 
 	if (checksum_valid || skb_csum_unnecessary(skb))
-		err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
-					    msg, copied);
+		err = skb_copy_datagram_msg(skb, off, msg, copied);
 	else {
-		err = skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr), msg);
+		err = skb_copy_and_csum_datagram_msg(skb, off, msg);
 		if (err == -EINVAL)
 			goto csum_copy_err;
 	}
@@ -454,23 +421,22 @@
 		if (!peeked) {
 			atomic_inc(&sk->sk_drops);
 			if (is_udp4)
-				UDP_INC_STATS_USER(sock_net(sk),
-						   UDP_MIB_INERRORS,
-						   is_udplite);
+				UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
+					      is_udplite);
 			else
-				UDP6_INC_STATS_USER(sock_net(sk),
-						    UDP_MIB_INERRORS,
-						    is_udplite);
+				UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
+					       is_udplite);
 		}
-		goto out_free;
+		skb_free_datagram_locked(sk, skb);
+		return err;
 	}
 	if (!peeked) {
 		if (is_udp4)
-			UDP_INC_STATS_USER(sock_net(sk),
-					UDP_MIB_INDATAGRAMS, is_udplite);
+			UDP_INC_STATS(sock_net(sk), UDP_MIB_INDATAGRAMS,
+				      is_udplite);
 		else
-			UDP6_INC_STATS_USER(sock_net(sk),
-					UDP_MIB_INDATAGRAMS, is_udplite);
+			UDP6_INC_STATS(sock_net(sk), UDP_MIB_INDATAGRAMS,
+				       is_udplite);
 	}
 
 	sock_recv_ts_and_drops(msg, sk, skb);
@@ -510,24 +476,22 @@
 	if (flags & MSG_TRUNC)
 		err = ulen;
 
-out_free:
-	skb_free_datagram_locked(sk, skb);
-out:
+	__skb_free_datagram_locked(sk, skb, peeking ? -err : err);
 	return err;
 
 csum_copy_err:
 	slow = lock_sock_fast(sk);
 	if (!skb_kill_datagram(sk, skb, flags)) {
 		if (is_udp4) {
-			UDP_INC_STATS_USER(sock_net(sk),
-					UDP_MIB_CSUMERRORS, is_udplite);
-			UDP_INC_STATS_USER(sock_net(sk),
-					UDP_MIB_INERRORS, is_udplite);
+			UDP_INC_STATS(sock_net(sk),
+				      UDP_MIB_CSUMERRORS, is_udplite);
+			UDP_INC_STATS(sock_net(sk),
+				      UDP_MIB_INERRORS, is_udplite);
 		} else {
-			UDP6_INC_STATS_USER(sock_net(sk),
-					UDP_MIB_CSUMERRORS, is_udplite);
-			UDP6_INC_STATS_USER(sock_net(sk),
-					UDP_MIB_INERRORS, is_udplite);
+			UDP6_INC_STATS(sock_net(sk),
+				       UDP_MIB_CSUMERRORS, is_udplite);
+			UDP6_INC_STATS(sock_net(sk),
+				       UDP_MIB_INERRORS, is_udplite);
 		}
 	}
 	unlock_sock_fast(sk, slow);
@@ -555,8 +519,8 @@
 	sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
 			       inet6_iif(skb), udptable, skb);
 	if (!sk) {
-		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
-				   ICMP6_MIB_INERRORS);
+		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
+				  ICMP6_MIB_INERRORS);
 		return;
 	}
 
@@ -585,7 +549,7 @@
 	sk->sk_err = err;
 	sk->sk_error_report(sk);
 out:
-	sock_put(sk);
+	return;
 }
 
 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
@@ -598,15 +562,15 @@
 		sk_incoming_cpu_update(sk);
 	}
 
-	rc = sock_queue_rcv_skb(sk, skb);
+	rc = __sock_queue_rcv_skb(sk, skb);
 	if (rc < 0) {
 		int is_udplite = IS_UDPLITE(sk);
 
 		/* Note that an ENOMEM error is charged twice */
 		if (rc == -ENOMEM)
-			UDP6_INC_STATS_BH(sock_net(sk),
-					UDP_MIB_RCVBUFERRORS, is_udplite);
-		UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
+			UDP6_INC_STATS(sock_net(sk),
+					 UDP_MIB_RCVBUFERRORS, is_udplite);
+		UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
 		kfree_skb(skb);
 		return -1;
 	}
@@ -662,9 +626,9 @@
 
 			ret = encap_rcv(sk, skb);
 			if (ret <= 0) {
-				UDP_INC_STATS_BH(sock_net(sk),
-						 UDP_MIB_INDATAGRAMS,
-						 is_udplite);
+				__UDP_INC_STATS(sock_net(sk),
+						UDP_MIB_INDATAGRAMS,
+						is_udplite);
 				return -ret;
 			}
 		}
@@ -692,11 +656,14 @@
 	if (rcu_access_pointer(sk->sk_filter)) {
 		if (udp_lib_checksum_complete(skb))
 			goto csum_error;
+		if (sk_filter(sk, skb))
+			goto drop;
 	}
 
+	udp_csum_pull_header(skb);
 	if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
-		UDP6_INC_STATS_BH(sock_net(sk),
-				  UDP_MIB_RCVBUFERRORS, is_udplite);
+		__UDP6_INC_STATS(sock_net(sk),
+				 UDP_MIB_RCVBUFERRORS, is_udplite);
 		goto drop;
 	}
 
@@ -715,9 +682,9 @@
 	return rc;
 
 csum_error:
-	UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
+	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
 drop:
-	UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
+	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
 	atomic_inc(&sk->sk_drops);
 	kfree_skb(skb);
 	return -1;
@@ -747,33 +714,6 @@
 	return true;
 }
 
-static void flush_stack(struct sock **stack, unsigned int count,
-			struct sk_buff *skb, unsigned int final)
-{
-	struct sk_buff *skb1 = NULL;
-	struct sock *sk;
-	unsigned int i;
-
-	for (i = 0; i < count; i++) {
-		sk = stack[i];
-		if (likely(!skb1))
-			skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
-		if (!skb1) {
-			atomic_inc(&sk->sk_drops);
-			UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
-					  IS_UDPLITE(sk));
-			UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
-					  IS_UDPLITE(sk));
-		}
-
-		if (skb1 && udpv6_queue_rcv_skb(sk, skb1) <= 0)
-			skb1 = NULL;
-		sock_put(sk);
-	}
-	if (unlikely(skb1))
-		kfree_skb(skb1);
-}
-
 static void udp6_csum_zero_error(struct sk_buff *skb)
 {
 	/* RFC 2460 section 8.1 says that we SHOULD log
@@ -792,15 +732,15 @@
 		const struct in6_addr *saddr, const struct in6_addr *daddr,
 		struct udp_table *udptable, int proto)
 {
-	struct sock *sk, *stack[256 / sizeof(struct sock *)];
+	struct sock *sk, *first = NULL;
 	const struct udphdr *uh = udp_hdr(skb);
-	struct hlist_nulls_node *node;
 	unsigned short hnum = ntohs(uh->dest);
 	struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
-	int dif = inet6_iif(skb);
-	unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node);
+	unsigned int offset = offsetof(typeof(*sk), sk_node);
 	unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
-	bool inner_flushed = false;
+	int dif = inet6_iif(skb);
+	struct hlist_node *node;
+	struct sk_buff *nskb;
 
 	if (use_hash2) {
 		hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) &
@@ -811,27 +751,32 @@
 		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
 	}
 
-	spin_lock(&hslot->lock);
-	sk_nulls_for_each_entry_offset(sk, node, &hslot->head, offset) {
-		if (__udp_v6_is_mcast_sock(net, sk,
-					   uh->dest, daddr,
-					   uh->source, saddr,
-					   dif, hnum) &&
-		    /* If zero checksum and no_check is not on for
-		     * the socket then skip it.
-		     */
-		    (uh->check || udp_sk(sk)->no_check6_rx)) {
-			if (unlikely(count == ARRAY_SIZE(stack))) {
-				flush_stack(stack, count, skb, ~0);
-				inner_flushed = true;
-				count = 0;
-			}
-			stack[count++] = sk;
-			sock_hold(sk);
+	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
+		if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
+					    uh->source, saddr, dif, hnum))
+			continue;
+		/* If zero checksum and no_check is not on for
+		 * the socket then skip it.
+		 */
+		if (!uh->check && !udp_sk(sk)->no_check6_rx)
+			continue;
+		if (!first) {
+			first = sk;
+			continue;
 		}
-	}
+		nskb = skb_clone(skb, GFP_ATOMIC);
+		if (unlikely(!nskb)) {
+			atomic_inc(&sk->sk_drops);
+			__UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
+					 IS_UDPLITE(sk));
+			__UDP6_INC_STATS(net, UDP_MIB_INERRORS,
+					 IS_UDPLITE(sk));
+			continue;
+		}
 
-	spin_unlock(&hslot->lock);
+		if (udpv6_queue_rcv_skb(sk, nskb) > 0)
+			consume_skb(nskb);
+	}
 
 	/* Also lookup *:port if we are using hash2 and haven't done so yet. */
 	if (use_hash2 && hash2 != hash2_any) {
@@ -839,13 +784,13 @@
 		goto start_lookup;
 	}
 
-	if (count) {
-		flush_stack(stack, count, skb, count - 1);
+	if (first) {
+		if (udpv6_queue_rcv_skb(first, skb) > 0)
+			consume_skb(skb);
 	} else {
-		if (!inner_flushed)
-			UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
-					 proto == IPPROTO_UDPLITE);
-		consume_skb(skb);
+		kfree_skb(skb);
+		__UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
+				 proto == IPPROTO_UDPLITE);
 	}
 	return 0;
 }
@@ -853,10 +798,10 @@
 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
 		   int proto)
 {
-	struct net *net = dev_net(skb->dev);
-	struct sock *sk;
-	struct udphdr *uh;
 	const struct in6_addr *saddr, *daddr;
+	struct net *net = dev_net(skb->dev);
+	struct udphdr *uh;
+	struct sock *sk;
 	u32 ulen = 0;
 
 	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
@@ -910,7 +855,6 @@
 		int ret;
 
 		if (!uh->check && !udp_sk(sk)->no_check6_rx) {
-			sock_put(sk);
 			udp6_csum_zero_error(skb);
 			goto csum_error;
 		}
@@ -920,7 +864,6 @@
 						 ip6_compute_pseudo);
 
 		ret = udpv6_queue_rcv_skb(sk, skb);
-		sock_put(sk);
 
 		/* a return value > 0 means to resubmit the input */
 		if (ret > 0)
@@ -940,7 +883,7 @@
 	if (udp_lib_checksum_complete(skb))
 		goto csum_error;
 
-	UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
+	__UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
 
 	kfree_skb(skb);
@@ -954,9 +897,9 @@
 			    daddr, ntohs(uh->dest));
 	goto discard;
 csum_error:
-	UDP6_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
+	__UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
 discard:
-	UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
+	__UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
 	kfree_skb(skb);
 	return 0;
 }
@@ -1068,13 +1011,14 @@
 	err = ip6_send_skb(skb);
 	if (err) {
 		if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
-			UDP6_INC_STATS_USER(sock_net(sk),
-					    UDP_MIB_SNDBUFERRORS, is_udplite);
+			UDP6_INC_STATS(sock_net(sk),
+				       UDP_MIB_SNDBUFERRORS, is_udplite);
 			err = 0;
 		}
-	} else
-		UDP6_INC_STATS_USER(sock_net(sk),
-				    UDP_MIB_OUTDATAGRAMS, is_udplite);
+	} else {
+		UDP6_INC_STATS(sock_net(sk),
+			       UDP_MIB_OUTDATAGRAMS, is_udplite);
+	}
 	return err;
 }
 
@@ -1118,16 +1062,19 @@
 	struct ip6_flowlabel *flowlabel = NULL;
 	struct flowi6 fl6;
 	struct dst_entry *dst;
+	struct ipcm6_cookie ipc6;
 	int addr_len = msg->msg_namelen;
 	int ulen = len;
-	int hlimit = -1;
-	int tclass = -1;
-	int dontfrag = -1;
 	int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
 	int err;
 	int connected = 0;
 	int is_udplite = IS_UDPLITE(sk);
 	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
+	struct sockcm_cookie sockc;
+
+	ipc6.hlimit = -1;
+	ipc6.tclass = -1;
+	ipc6.dontfrag = -1;
 
 	/* destination address check */
 	if (sin6) {
@@ -1247,14 +1194,15 @@
 		fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
 
 	fl6.flowi6_mark = sk->sk_mark;
+	sockc.tsflags = sk->sk_tsflags;
 
 	if (msg->msg_controllen) {
 		opt = &opt_space;
 		memset(opt, 0, sizeof(struct ipv6_txoptions));
 		opt->tot_len = sizeof(*opt);
+		ipc6.opt = opt;
 
-		err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
-					    &hlimit, &tclass, &dontfrag);
+		err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6, &sockc);
 		if (err < 0) {
 			fl6_sock_release(flowlabel);
 			return err;
@@ -1275,6 +1223,7 @@
 	if (flowlabel)
 		opt = fl6_merge_options(&opt_space, flowlabel, opt);
 	opt = ipv6_fixup_options(&opt_space, opt);
+	ipc6.opt = opt;
 
 	fl6.flowi6_proto = sk->sk_protocol;
 	if (!ipv6_addr_any(daddr))
@@ -1304,11 +1253,11 @@
 		goto out;
 	}
 
-	if (hlimit < 0)
-		hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
+	if (ipc6.hlimit < 0)
+		ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
 
-	if (tclass < 0)
-		tclass = np->tclass;
+	if (ipc6.tclass < 0)
+		ipc6.tclass = np->tclass;
 
 	if (msg->msg_flags&MSG_CONFIRM)
 		goto do_confirm;
@@ -1319,9 +1268,9 @@
 		struct sk_buff *skb;
 
 		skb = ip6_make_skb(sk, getfrag, msg, ulen,
-				   sizeof(struct udphdr), hlimit, tclass, opt,
+				   sizeof(struct udphdr), &ipc6,
 				   &fl6, (struct rt6_info *)dst,
-				   msg->msg_flags, dontfrag);
+				   msg->msg_flags, &sockc);
 		err = PTR_ERR(skb);
 		if (!IS_ERR_OR_NULL(skb))
 			err = udp_v6_send_skb(skb, &fl6);
@@ -1342,13 +1291,12 @@
 	up->pending = AF_INET6;
 
 do_append_data:
-	if (dontfrag < 0)
-		dontfrag = np->dontfrag;
+	if (ipc6.dontfrag < 0)
+		ipc6.dontfrag = np->dontfrag;
 	up->len += ulen;
-	err = ip6_append_data(sk, getfrag, msg, ulen,
-		sizeof(struct udphdr), hlimit, tclass, opt, &fl6,
-		(struct rt6_info *)dst,
-		corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag);
+	err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
+			      &ipc6, &fl6, (struct rt6_info *)dst,
+			      corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, &sockc);
 	if (err)
 		udp_v6_flush_pending_frames(sk);
 	else if (!corkreq)
@@ -1391,8 +1339,8 @@
 	 * seems like overkill.
 	 */
 	if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
-		UDP6_INC_STATS_USER(sock_net(sk),
-				UDP_MIB_SNDBUFERRORS, is_udplite);
+		UDP6_INC_STATS(sock_net(sk),
+			       UDP_MIB_SNDBUFERRORS, is_udplite);
 	}
 	return err;
 
@@ -1539,6 +1487,7 @@
 	.sendmsg	   = udpv6_sendmsg,
 	.recvmsg	   = udpv6_recvmsg,
 	.backlog_rcv	   = __udpv6_queue_rcv_skb,
+	.release_cb	   = ip6_datagram_release_cb,
 	.hash		   = udp_lib_hash,
 	.unhash		   = udp_lib_unhash,
 	.rehash		   = udp_v6_rehash,
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 2b0fbe6..5429f6b 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -153,7 +153,7 @@
 
 skip:
 	NAPI_GRO_CB(skb)->is_ipv6 = 1;
-	return udp_gro_receive(head, skb, uh);
+	return udp_gro_receive(head, skb, uh, udp6_lib_lookup_skb);
 
 flush:
 	NAPI_GRO_CB(skb)->flush = 1;
@@ -173,7 +173,7 @@
 		skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
 	}
 
-	return udp_gro_complete(skb, nhoff);
+	return udp_gro_complete(skb, nhoff, udp6_lib_lookup_skb);
 }
 
 static const struct net_offload udpv6_offload = {
@@ -184,7 +184,12 @@
 	},
 };
 
-int __init udp_offload_init(void)
+int udpv6_offload_init(void)
 {
 	return inet6_add_offload(&udpv6_offload, IPPROTO_UDP);
 }
+
+int udpv6_offload_exit(void)
+{
+	return inet6_del_offload(&udpv6_offload, IPPROTO_UDP);
+}
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index fcfbe57..d8b7267 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -181,7 +181,7 @@
 		skb = new_skb;
 	}
 
-	dev->trans_start = jiffies;
+	netif_trans_update(dev);
 
 	len = skb->len;
 	/* Now queue the packet in the transport layer */
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index afca2eb..6edfa99 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1376,9 +1376,9 @@
 			memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
 			       sizeof(udp_conf.peer_ip6));
 			udp_conf.use_udp6_tx_checksums =
-			    cfg->udp6_zero_tx_checksums;
+			  ! cfg->udp6_zero_tx_checksums;
 			udp_conf.use_udp6_rx_checksums =
-			    cfg->udp6_zero_rx_checksums;
+			  ! cfg->udp6_zero_rx_checksums;
 		} else
 #endif
 		{
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index ec22078..42de4cc 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -123,12 +123,11 @@
 	struct l2tp_tunnel *tunnel = NULL;
 	int length;
 
-	/* Point to L2TP header */
-	optr = ptr = skb->data;
-
 	if (!pskb_may_pull(skb, 4))
 		goto discard;
 
+	/* Point to L2TP header */
+	optr = ptr = skb->data;
 	session_id = ntohl(*((__be32 *) ptr));
 	ptr += 4;
 
@@ -156,6 +155,9 @@
 		if (!pskb_may_pull(skb, length))
 			goto discard;
 
+		/* Point to L2TP header */
+		optr = ptr = skb->data;
+		ptr += 4;
 		pr_debug("%s: ip recv\n", tunnel->name);
 		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
 	}
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 6b54ff3..c6f5df1b 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -136,12 +136,11 @@
 	struct l2tp_tunnel *tunnel = NULL;
 	int length;
 
-	/* Point to L2TP header */
-	optr = ptr = skb->data;
-
 	if (!pskb_may_pull(skb, 4))
 		goto discard;
 
+	/* Point to L2TP header */
+	optr = ptr = skb->data;
 	session_id = ntohl(*((__be32 *) ptr));
 	ptr += 4;
 
@@ -169,6 +168,9 @@
 		if (!pskb_may_pull(skb, length))
 			goto discard;
 
+		/* Point to L2TP header */
+		optr = ptr = skb->data;
+		ptr += 4;
 		pr_debug("%s: ip recv\n", tunnel->name);
 		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
 	}
@@ -492,10 +494,9 @@
 	struct ip6_flowlabel *flowlabel = NULL;
 	struct dst_entry *dst = NULL;
 	struct flowi6 fl6;
+	struct sockcm_cookie sockc_unused = {0};
+	struct ipcm6_cookie ipc6;
 	int addr_len = msg->msg_namelen;
-	int hlimit = -1;
-	int tclass = -1;
-	int dontfrag = -1;
 	int transhdrlen = 4; /* zero session-id */
 	int ulen = len + transhdrlen;
 	int err;
@@ -517,6 +518,10 @@
 
 	fl6.flowi6_mark = sk->sk_mark;
 
+	ipc6.hlimit = -1;
+	ipc6.tclass = -1;
+	ipc6.dontfrag = -1;
+
 	if (lsa) {
 		if (addr_len < SIN6_LEN_RFC2133)
 			return -EINVAL;
@@ -561,9 +566,10 @@
 		opt = &opt_space;
 		memset(opt, 0, sizeof(struct ipv6_txoptions));
 		opt->tot_len = sizeof(struct ipv6_txoptions);
+		ipc6.opt = opt;
 
-		err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
-					    &hlimit, &tclass, &dontfrag);
+		err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6,
+					    &sockc_unused);
 		if (err < 0) {
 			fl6_sock_release(flowlabel);
 			return err;
@@ -584,6 +590,7 @@
 	if (flowlabel)
 		opt = fl6_merge_options(&opt_space, flowlabel, opt);
 	opt = ipv6_fixup_options(&opt_space, opt);
+	ipc6.opt = opt;
 
 	fl6.flowi6_proto = sk->sk_protocol;
 	if (!ipv6_addr_any(daddr))
@@ -608,14 +615,14 @@
 		goto out;
 	}
 
-	if (hlimit < 0)
-		hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
+	if (ipc6.hlimit < 0)
+		ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
 
-	if (tclass < 0)
-		tclass = np->tclass;
+	if (ipc6.tclass < 0)
+		ipc6.tclass = np->tclass;
 
-	if (dontfrag < 0)
-		dontfrag = np->dontfrag;
+	if (ipc6.dontfrag < 0)
+		ipc6.dontfrag = np->dontfrag;
 
 	if (msg->msg_flags & MSG_CONFIRM)
 		goto do_confirm;
@@ -623,9 +630,9 @@
 back_from_confirm:
 	lock_sock(sk);
 	err = ip6_append_data(sk, ip_generic_getfrag, msg,
-			      ulen, transhdrlen, hlimit, tclass, opt,
+			      ulen, transhdrlen, &ipc6,
 			      &fl6, (struct rt6_info *)dst,
-			      msg->msg_flags, dontfrag);
+			      msg->msg_flags, &sockc_unused);
 	if (err)
 		ip6_flush_pending_frames(sk);
 	else if (!(msg->msg_flags & MSG_MORE))
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 2caaa84..1d02e8d 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -346,22 +346,30 @@
 	if (nest == NULL)
 		goto nla_put_failure;
 
-	if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS,
-		    atomic_long_read(&tunnel->stats.tx_packets)) ||
-	    nla_put_u64(skb, L2TP_ATTR_TX_BYTES,
-		    atomic_long_read(&tunnel->stats.tx_bytes)) ||
-	    nla_put_u64(skb, L2TP_ATTR_TX_ERRORS,
-		    atomic_long_read(&tunnel->stats.tx_errors)) ||
-	    nla_put_u64(skb, L2TP_ATTR_RX_PACKETS,
-		    atomic_long_read(&tunnel->stats.rx_packets)) ||
-	    nla_put_u64(skb, L2TP_ATTR_RX_BYTES,
-		    atomic_long_read(&tunnel->stats.rx_bytes)) ||
-	    nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
-		    atomic_long_read(&tunnel->stats.rx_seq_discards)) ||
-	    nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
-		    atomic_long_read(&tunnel->stats.rx_oos_packets)) ||
-	    nla_put_u64(skb, L2TP_ATTR_RX_ERRORS,
-		    atomic_long_read(&tunnel->stats.rx_errors)))
+	if (nla_put_u64_64bit(skb, L2TP_ATTR_TX_PACKETS,
+			      atomic_long_read(&tunnel->stats.tx_packets),
+			      L2TP_ATTR_STATS_PAD) ||
+	    nla_put_u64_64bit(skb, L2TP_ATTR_TX_BYTES,
+			      atomic_long_read(&tunnel->stats.tx_bytes),
+			      L2TP_ATTR_STATS_PAD) ||
+	    nla_put_u64_64bit(skb, L2TP_ATTR_TX_ERRORS,
+			      atomic_long_read(&tunnel->stats.tx_errors),
+			      L2TP_ATTR_STATS_PAD) ||
+	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_PACKETS,
+			      atomic_long_read(&tunnel->stats.rx_packets),
+			      L2TP_ATTR_STATS_PAD) ||
+	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_BYTES,
+			      atomic_long_read(&tunnel->stats.rx_bytes),
+			      L2TP_ATTR_STATS_PAD) ||
+	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
+			      atomic_long_read(&tunnel->stats.rx_seq_discards),
+			      L2TP_ATTR_STATS_PAD) ||
+	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_OOS_PACKETS,
+			      atomic_long_read(&tunnel->stats.rx_oos_packets),
+			      L2TP_ATTR_STATS_PAD) ||
+	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_ERRORS,
+			      atomic_long_read(&tunnel->stats.rx_errors),
+			      L2TP_ATTR_STATS_PAD))
 		goto nla_put_failure;
 	nla_nest_end(skb, nest);
 
@@ -746,29 +754,38 @@
 	     nla_put_u8(skb, L2TP_ATTR_USING_IPSEC, 1)) ||
 #endif
 	    (session->reorder_timeout &&
-	     nla_put_msecs(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout)))
+	     nla_put_msecs(skb, L2TP_ATTR_RECV_TIMEOUT,
+			   session->reorder_timeout, L2TP_ATTR_PAD)))
 		goto nla_put_failure;
 
 	nest = nla_nest_start(skb, L2TP_ATTR_STATS);
 	if (nest == NULL)
 		goto nla_put_failure;
 
-	if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS,
-		atomic_long_read(&session->stats.tx_packets)) ||
-	    nla_put_u64(skb, L2TP_ATTR_TX_BYTES,
-		atomic_long_read(&session->stats.tx_bytes)) ||
-	    nla_put_u64(skb, L2TP_ATTR_TX_ERRORS,
-		atomic_long_read(&session->stats.tx_errors)) ||
-	    nla_put_u64(skb, L2TP_ATTR_RX_PACKETS,
-		atomic_long_read(&session->stats.rx_packets)) ||
-	    nla_put_u64(skb, L2TP_ATTR_RX_BYTES,
-		atomic_long_read(&session->stats.rx_bytes)) ||
-	    nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
-		atomic_long_read(&session->stats.rx_seq_discards)) ||
-	    nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
-		atomic_long_read(&session->stats.rx_oos_packets)) ||
-	    nla_put_u64(skb, L2TP_ATTR_RX_ERRORS,
-		atomic_long_read(&session->stats.rx_errors)))
+	if (nla_put_u64_64bit(skb, L2TP_ATTR_TX_PACKETS,
+			      atomic_long_read(&session->stats.tx_packets),
+			      L2TP_ATTR_STATS_PAD) ||
+	    nla_put_u64_64bit(skb, L2TP_ATTR_TX_BYTES,
+			      atomic_long_read(&session->stats.tx_bytes),
+			      L2TP_ATTR_STATS_PAD) ||
+	    nla_put_u64_64bit(skb, L2TP_ATTR_TX_ERRORS,
+			      atomic_long_read(&session->stats.tx_errors),
+			      L2TP_ATTR_STATS_PAD) ||
+	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_PACKETS,
+			      atomic_long_read(&session->stats.rx_packets),
+			      L2TP_ATTR_STATS_PAD) ||
+	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_BYTES,
+			      atomic_long_read(&session->stats.rx_bytes),
+			      L2TP_ATTR_STATS_PAD) ||
+	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
+			      atomic_long_read(&session->stats.rx_seq_discards),
+			      L2TP_ATTR_STATS_PAD) ||
+	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_OOS_PACKETS,
+			      atomic_long_read(&session->stats.rx_oos_packets),
+			      L2TP_ATTR_STATS_PAD) ||
+	    nla_put_u64_64bit(skb, L2TP_ATTR_RX_ERRORS,
+			      atomic_long_read(&session->stats.rx_errors),
+			      L2TP_ATTR_STATS_PAD))
 		goto nla_put_failure;
 	nla_nest_end(skb, nest);
 
diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c
index e925037..6651a78 100644
--- a/net/l3mdev/l3mdev.c
+++ b/net/l3mdev/l3mdev.c
@@ -97,3 +97,66 @@
 	return tb_id;
 }
 EXPORT_SYMBOL_GPL(l3mdev_fib_table_by_index);
+
+/**
+ *	l3mdev_get_rt6_dst - IPv6 route lookup based on flow. Returns
+ *			     cached route for L3 master device if relevant
+ *			     to flow
+ *	@net: network namespace for device index lookup
+ *	@fl6: IPv6 flow struct for lookup
+ */
+
+struct dst_entry *l3mdev_get_rt6_dst(struct net *net,
+				     const struct flowi6 *fl6)
+{
+	struct dst_entry *dst = NULL;
+	struct net_device *dev;
+
+	if (fl6->flowi6_oif) {
+		rcu_read_lock();
+
+		dev = dev_get_by_index_rcu(net, fl6->flowi6_oif);
+		if (dev && netif_is_l3_slave(dev))
+			dev = netdev_master_upper_dev_get_rcu(dev);
+
+		if (dev && netif_is_l3_master(dev) &&
+		    dev->l3mdev_ops->l3mdev_get_rt6_dst)
+			dst = dev->l3mdev_ops->l3mdev_get_rt6_dst(dev, fl6);
+
+		rcu_read_unlock();
+	}
+
+	return dst;
+}
+EXPORT_SYMBOL_GPL(l3mdev_get_rt6_dst);
+
+/**
+ *	l3mdev_get_saddr - get source address for a flow based on an interface
+ *			   enslaved to an L3 master device
+ *	@net: network namespace for device index lookup
+ *	@ifindex: Interface index
+ *	@fl4: IPv4 flow struct
+ */
+
+int l3mdev_get_saddr(struct net *net, int ifindex, struct flowi4 *fl4)
+{
+	struct net_device *dev;
+	int rc = 0;
+
+	if (ifindex) {
+		rcu_read_lock();
+
+		dev = dev_get_by_index_rcu(net, ifindex);
+		if (dev && netif_is_l3_slave(dev))
+			dev = netdev_master_upper_dev_get_rcu(dev);
+
+		if (dev && netif_is_l3_master(dev) &&
+		    dev->l3mdev_ops->l3mdev_get_saddr)
+			rc = dev->l3mdev_ops->l3mdev_get_saddr(dev, fl4);
+
+		rcu_read_unlock();
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(l3mdev_get_saddr);
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index b3c52e3..8ae3ed9 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -626,6 +626,7 @@
 	if (llc->cmsg_flags & LLC_CMSG_PKTINFO) {
 		struct llc_pktinfo info;
 
+		memset(&info, 0, sizeof(info));
 		info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
 		llc_pdu_decode_dsap(skb, &info.lpi_sap);
 		llc_pdu_decode_da(skb, info.lpi_mac);
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index 1a3c7e0..29c509c 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -195,7 +195,7 @@
 		   timer_pending(&llc->pf_cycle_timer.timer),
 		   timer_pending(&llc->rej_sent_timer.timer),
 		   timer_pending(&llc->busy_state_timer.timer),
-		   !!sk->sk_backlog.tail, !!sock_owned_by_user(sk));
+		   !!sk->sk_backlog.tail, !!sk->sk_lock.owned);
 out:
 	return 0;
 }
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 4932e9f..42fa810 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -935,6 +935,7 @@
 				  size_t len)
 {
 	struct tid_ampdu_tx *tid_tx;
+	struct ieee80211_txq *txq;
 	u16 capab, tid;
 	u8 buf_size;
 	bool amsdu;
@@ -945,6 +946,10 @@
 	buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
 	buf_size = min(buf_size, local->hw.max_tx_aggregation_subframes);
 
+	txq = sta->sta.txq[tid];
+	if (!amsdu && txq)
+		set_bit(IEEE80211_TXQ_NO_AMSDU, &to_txq_info(txq)->flags);
+
 	mutex_lock(&sta->ampdu_mlme.mtx);
 
 	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index fe1704c..0c12e40 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -65,11 +65,13 @@
 		return ret;
 
 	if (type == NL80211_IFTYPE_AP_VLAN &&
-	    params && params->use_4addr == 0)
+	    params && params->use_4addr == 0) {
 		RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
-	else if (type == NL80211_IFTYPE_STATION &&
-		 params && params->use_4addr >= 0)
+		ieee80211_check_fast_rx_iface(sdata);
+	} else if (type == NL80211_IFTYPE_STATION &&
+		   params && params->use_4addr >= 0) {
 		sdata->u.mgd.use_4addr = params->use_4addr;
+	}
 
 	if (sdata->vif.type == NL80211_IFTYPE_MONITOR && flags) {
 		struct ieee80211_local *local = sdata->local;
@@ -732,6 +734,7 @@
 	sdata->vif.bss_conf.beacon_int = params->beacon_interval;
 	sdata->vif.bss_conf.dtim_period = params->dtim_period;
 	sdata->vif.bss_conf.enable_beacon = true;
+	sdata->vif.bss_conf.allow_p2p_go_ps = sdata->vif.p2p;
 
 	sdata->vif.bss_conf.ssid_len = params->ssid_len;
 	if (params->ssid_len)
@@ -1046,7 +1049,7 @@
 	int ret = 0;
 	struct ieee80211_supported_band *sband;
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
-	enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+	enum nl80211_band band = ieee80211_get_sdata_band(sdata);
 	u32 mask, set;
 
 	sband = local->hw.wiphy->bands[band];
@@ -1202,6 +1205,9 @@
 					      params->opmode_notif, band);
 	}
 
+	if (params->support_p2p_ps >= 0)
+		sta->sta.support_p2p_ps = params->support_p2p_ps;
+
 	if (ieee80211_vif_is_mesh(&sdata->vif))
 		sta_apply_mesh_params(local, sta, params);
 
@@ -1363,6 +1369,7 @@
 
 			rcu_assign_pointer(vlansdata->u.vlan.sta, sta);
 			new_4addr = true;
+			__ieee80211_check_fast_rx_iface(vlansdata);
 		}
 
 		if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
@@ -1499,7 +1506,7 @@
 
 	memset(pinfo, 0, sizeof(*pinfo));
 
-	pinfo->generation = mesh_paths_generation;
+	pinfo->generation = mpath->sdata->u.mesh.mesh_paths_generation;
 
 	pinfo->filled = MPATH_INFO_FRAME_QLEN |
 			MPATH_INFO_SN |
@@ -1577,7 +1584,7 @@
 	memset(pinfo, 0, sizeof(*pinfo));
 	memcpy(mpp, mpath->mpp, ETH_ALEN);
 
-	pinfo->generation = mpp_paths_generation;
+	pinfo->generation = mpath->sdata->u.mesh.mpp_paths_generation;
 }
 
 static int ieee80211_get_mpp(struct wiphy *wiphy, struct net_device *dev,
@@ -1841,7 +1848,7 @@
 				struct bss_parameters *params)
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	u32 changed = 0;
 
 	if (!sdata_dereference(sdata->u.ap.beacon, sdata))
@@ -1860,7 +1867,7 @@
 	}
 
 	if (!sdata->vif.bss_conf.use_short_slot &&
-	    band == IEEE80211_BAND_5GHZ) {
+	    band == NL80211_BAND_5GHZ) {
 		sdata->vif.bss_conf.use_short_slot = true;
 		changed |= BSS_CHANGED_ERP_SLOT;
 	}
@@ -1885,6 +1892,7 @@
 			sdata->flags |= IEEE80211_SDATA_DONT_BRIDGE_PACKETS;
 		else
 			sdata->flags &= ~IEEE80211_SDATA_DONT_BRIDGE_PACKETS;
+		ieee80211_check_fast_rx_iface(sdata);
 	}
 
 	if (params->ht_opmode >= 0) {
@@ -2089,12 +2097,12 @@
 }
 
 static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev,
-				    int rate[IEEE80211_NUM_BANDS])
+				    int rate[NUM_NL80211_BANDS])
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
 	memcpy(sdata->vif.bss_conf.mcast_rate, rate,
-	       sizeof(int) * IEEE80211_NUM_BANDS);
+	       sizeof(int) * NUM_NL80211_BANDS);
 
 	return 0;
 }
@@ -2499,7 +2507,7 @@
 			return ret;
 	}
 
-	for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+	for (i = 0; i < NUM_NL80211_BANDS; i++) {
 		struct ieee80211_supported_band *sband = wiphy->bands[i];
 		int j;
 
@@ -3127,7 +3135,7 @@
 	struct ieee80211_tx_info *info;
 	struct sta_info *sta;
 	struct ieee80211_chanctx_conf *chanctx_conf;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	int ret;
 
 	/* the lock is needed to assign the cookie later */
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 2839811..74142d0 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -343,8 +343,10 @@
 				     struct ieee80211_chanctx *ctx,
 				     const struct cfg80211_chan_def *chandef)
 {
-	if (cfg80211_chandef_identical(&ctx->conf.def, chandef))
+	if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) {
+		ieee80211_recalc_chanctx_min_def(local, ctx);
 		return;
+	}
 
 	WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef));
 
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 4ab5c52..b251b2f 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -127,6 +127,9 @@
 	FLAG(BEACON_TX_STATUS),
 	FLAG(NEEDS_UNIQUE_STA_ADDR),
 	FLAG(SUPPORTS_REORDERING_BUFFER),
+	FLAG(USES_RSS),
+	FLAG(TX_AMSDU),
+	FLAG(TX_FRAG_LIST),
 #undef FLAG
 };
 
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 37ea30e..a5ba739 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -169,21 +169,21 @@
 	IEEE80211_IF_FILE_R(name)
 
 /* common attributes */
-IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[IEEE80211_BAND_2GHZ],
+IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[NL80211_BAND_2GHZ],
 		  HEX);
-IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ],
+IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[NL80211_BAND_5GHZ],
 		  HEX);
 IEEE80211_IF_FILE(rc_rateidx_mcs_mask_2ghz,
-		  rc_rateidx_mcs_mask[IEEE80211_BAND_2GHZ], HEXARRAY);
+		  rc_rateidx_mcs_mask[NL80211_BAND_2GHZ], HEXARRAY);
 IEEE80211_IF_FILE(rc_rateidx_mcs_mask_5ghz,
-		  rc_rateidx_mcs_mask[IEEE80211_BAND_5GHZ], HEXARRAY);
+		  rc_rateidx_mcs_mask[NL80211_BAND_5GHZ], HEXARRAY);
 
 static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_2ghz(
 				const struct ieee80211_sub_if_data *sdata,
 				char *buf, int buflen)
 {
 	int i, len = 0;
-	const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[IEEE80211_BAND_2GHZ];
+	const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[NL80211_BAND_2GHZ];
 
 	for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
 		len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]);
@@ -199,7 +199,7 @@
 				char *buf, int buflen)
 {
 	int i, len = 0;
-	const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[IEEE80211_BAND_5GHZ];
+	const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[NL80211_BAND_5GHZ];
 
 	for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
 		len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]);
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index a39512f..33dfcbc 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -3,6 +3,7 @@
  * Copyright (c) 2006	Jiri Benc <jbenc@suse.cz>
  * Copyright 2007	Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -51,31 +52,54 @@
 
 STA_FILE(aid, sta.aid, D);
 
+static const char * const sta_flag_names[] = {
+#define FLAG(F) [WLAN_STA_##F] = #F
+	FLAG(AUTH),
+	FLAG(ASSOC),
+	FLAG(PS_STA),
+	FLAG(AUTHORIZED),
+	FLAG(SHORT_PREAMBLE),
+	FLAG(WDS),
+	FLAG(CLEAR_PS_FILT),
+	FLAG(MFP),
+	FLAG(BLOCK_BA),
+	FLAG(PS_DRIVER),
+	FLAG(PSPOLL),
+	FLAG(TDLS_PEER),
+	FLAG(TDLS_PEER_AUTH),
+	FLAG(TDLS_INITIATOR),
+	FLAG(TDLS_CHAN_SWITCH),
+	FLAG(TDLS_OFF_CHANNEL),
+	FLAG(TDLS_WIDER_BW),
+	FLAG(UAPSD),
+	FLAG(SP),
+	FLAG(4ADDR_EVENT),
+	FLAG(INSERTED),
+	FLAG(RATE_CONTROL),
+	FLAG(TOFFSET_KNOWN),
+	FLAG(MPSP_OWNER),
+	FLAG(MPSP_RECIPIENT),
+	FLAG(PS_DELIVER),
+#undef FLAG
+};
+
 static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
 			      size_t count, loff_t *ppos)
 {
-	char buf[121];
+	char buf[16 * NUM_WLAN_STA_FLAGS], *pos = buf;
+	char *end = buf + sizeof(buf) - 1;
 	struct sta_info *sta = file->private_data;
+	unsigned int flg;
 
-#define TEST(flg) \
-	test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : ""
+	BUILD_BUG_ON(ARRAY_SIZE(sta_flag_names) != NUM_WLAN_STA_FLAGS);
 
-	int res = scnprintf(buf, sizeof(buf),
-			    "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
-			    TEST(AUTH), TEST(ASSOC), TEST(PS_STA),
-			    TEST(PS_DRIVER), TEST(AUTHORIZED),
-			    TEST(SHORT_PREAMBLE),
-			    sta->sta.wme ? "WME\n" : "",
-			    TEST(WDS), TEST(CLEAR_PS_FILT),
-			    TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL),
-			    TEST(UAPSD), TEST(SP), TEST(TDLS_PEER),
-			    TEST(TDLS_PEER_AUTH), TEST(TDLS_INITIATOR),
-			    TEST(TDLS_CHAN_SWITCH), TEST(TDLS_OFF_CHANNEL),
-			    TEST(4ADDR_EVENT), TEST(INSERTED),
-			    TEST(RATE_CONTROL), TEST(TOFFSET_KNOWN),
-			    TEST(MPSP_OWNER), TEST(MPSP_RECIPIENT));
-#undef TEST
-	return simple_read_from_buffer(userbuf, count, ppos, buf, res);
+	for (flg = 0; flg < NUM_WLAN_STA_FLAGS; flg++) {
+		if (test_sta_flag(sta, flg))
+			pos += scnprintf(pos, end - pos, "%s\n",
+					 sta_flag_names[flg]);
+	}
+
+	return simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
 }
 STA_OPS(flags);
 
@@ -151,11 +175,12 @@
 static ssize_t sta_agg_status_write(struct file *file, const char __user *userbuf,
 				    size_t count, loff_t *ppos)
 {
-	char _buf[12] = {}, *buf = _buf;
+	char _buf[25] = {}, *buf = _buf;
 	struct sta_info *sta = file->private_data;
 	bool start, tx;
 	unsigned long tid;
-	int ret;
+	char *pos;
+	int ret, timeout = 5000;
 
 	if (count > sizeof(_buf))
 		return -EINVAL;
@@ -164,37 +189,48 @@
 		return -EFAULT;
 
 	buf[sizeof(_buf) - 1] = '\0';
-
-	if (strncmp(buf, "tx ", 3) == 0) {
-		buf += 3;
-		tx = true;
-	} else if (strncmp(buf, "rx ", 3) == 0) {
-		buf += 3;
-		tx = false;
-	} else
+	pos = buf;
+	buf = strsep(&pos, " ");
+	if (!buf)
 		return -EINVAL;
 
-	if (strncmp(buf, "start ", 6) == 0) {
-		buf += 6;
+	if (!strcmp(buf, "tx"))
+		tx = true;
+	else if (!strcmp(buf, "rx"))
+		tx = false;
+	else
+		return -EINVAL;
+
+	buf = strsep(&pos, " ");
+	if (!buf)
+		return -EINVAL;
+	if (!strcmp(buf, "start")) {
 		start = true;
 		if (!tx)
 			return -EINVAL;
-	} else if (strncmp(buf, "stop ", 5) == 0) {
-		buf += 5;
+	} else if (!strcmp(buf, "stop")) {
 		start = false;
-	} else
+	} else {
 		return -EINVAL;
+	}
+
+	buf = strsep(&pos, " ");
+	if (!buf)
+		return -EINVAL;
+	if (sscanf(buf, "timeout=%d", &timeout) == 1) {
+		buf = strsep(&pos, " ");
+		if (!buf || !tx || !start)
+			return -EINVAL;
+	}
 
 	ret = kstrtoul(buf, 0, &tid);
-	if (ret)
-		return ret;
-
-	if (tid >= IEEE80211_NUM_TIDS)
+	if (ret || tid >= IEEE80211_NUM_TIDS)
 		return -EINVAL;
 
 	if (tx) {
 		if (start)
-			ret = ieee80211_start_tx_ba_session(&sta->sta, tid, 5000);
+			ret = ieee80211_start_tx_ba_session(&sta->sta, tid,
+							    timeout);
 		else
 			ret = ieee80211_stop_tx_ba_session(&sta->sta, tid);
 	} else {
@@ -322,14 +358,14 @@
 
 #define DEBUGFS_ADD(name) \
 	debugfs_create_file(#name, 0400, \
-		sta->debugfs.dir, sta, &sta_ ##name## _ops);
+		sta->debugfs_dir, sta, &sta_ ##name## _ops);
 
 #define DEBUGFS_ADD_COUNTER(name, field)				\
 	if (sizeof(sta->field) == sizeof(u32))				\
-		debugfs_create_u32(#name, 0400, sta->debugfs.dir,	\
+		debugfs_create_u32(#name, 0400, sta->debugfs_dir,	\
 			(u32 *) &sta->field);				\
 	else								\
-		debugfs_create_u64(#name, 0400, sta->debugfs.dir,	\
+		debugfs_create_u64(#name, 0400, sta->debugfs_dir,	\
 			(u64 *) &sta->field);
 
 void ieee80211_sta_debugfs_add(struct sta_info *sta)
@@ -339,8 +375,6 @@
 	struct dentry *stations_dir = sta->sdata->debugfs.subdir_stations;
 	u8 mac[3*ETH_ALEN];
 
-	sta->debugfs.add_has_run = true;
-
 	if (!stations_dir)
 		return;
 
@@ -355,8 +389,8 @@
 	 * destroyed quickly enough the old station's debugfs
 	 * dir might still be around.
 	 */
-	sta->debugfs.dir = debugfs_create_dir(mac, stations_dir);
-	if (!sta->debugfs.dir)
+	sta->debugfs_dir = debugfs_create_dir(mac, stations_dir);
+	if (!sta->debugfs_dir)
 		return;
 
 	DEBUGFS_ADD(flags);
@@ -372,14 +406,14 @@
 
 	if (sizeof(sta->driver_buffered_tids) == sizeof(u32))
 		debugfs_create_x32("driver_buffered_tids", 0400,
-				   sta->debugfs.dir,
+				   sta->debugfs_dir,
 				   (u32 *)&sta->driver_buffered_tids);
 	else
 		debugfs_create_x64("driver_buffered_tids", 0400,
-				   sta->debugfs.dir,
+				   sta->debugfs_dir,
 				   (u64 *)&sta->driver_buffered_tids);
 
-	drv_sta_add_debugfs(local, sdata, &sta->sta, sta->debugfs.dir);
+	drv_sta_add_debugfs(local, sdata, &sta->sta, sta->debugfs_dir);
 }
 
 void ieee80211_sta_debugfs_remove(struct sta_info *sta)
@@ -387,7 +421,7 @@
 	struct ieee80211_local *local = sta->local;
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
 
-	drv_sta_remove_debugfs(local, sdata, &sta->sta, sta->debugfs.dir);
-	debugfs_remove_recursive(sta->debugfs.dir);
-	sta->debugfs.dir = NULL;
+	drv_sta_remove_debugfs(local, sdata, &sta->sta, sta->debugfs_dir);
+	debugfs_remove_recursive(sta->debugfs_dir);
+	sta->debugfs_dir = NULL;
 }
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 18b0d65..184473c 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1,3 +1,8 @@
+/*
+* Portions of this file
+* Copyright(c) 2016 Intel Deutschland GmbH
+*/
+
 #ifndef __MAC80211_DRIVER_OPS
 #define __MAC80211_DRIVER_OPS
 
@@ -29,6 +34,16 @@
 	local->ops->tx(&local->hw, control, skb);
 }
 
+static inline void drv_sync_rx_queues(struct ieee80211_local *local,
+				      struct sta_info *sta)
+{
+	if (local->ops->sync_rx_queues) {
+		trace_drv_sync_rx_queues(local, sta->sdata, &sta->sta);
+		local->ops->sync_rx_queues(&local->hw);
+		trace_drv_return_void(local);
+	}
+}
+
 static inline void drv_get_et_strings(struct ieee80211_sub_if_data *sdata,
 				      u32 sset, u8 *data)
 {
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index fc32383..a31d307 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -126,7 +126,7 @@
 		}
 	}
 
-	if (sband->band == IEEE80211_BAND_2GHZ) {
+	if (sband->band == NL80211_BAND_2GHZ) {
 		*pos++ = WLAN_EID_DS_PARAMS;
 		*pos++ = 1;
 		*pos++ = ieee80211_frequency_to_channel(
@@ -348,11 +348,11 @@
 	 *
 	 * HT follows these specifications (IEEE 802.11-2012 20.3.18)
 	 */
-	sdata->vif.bss_conf.use_short_slot = chan->band == IEEE80211_BAND_5GHZ;
+	sdata->vif.bss_conf.use_short_slot = chan->band == NL80211_BAND_5GHZ;
 	bss_change |= BSS_CHANGED_ERP_SLOT;
 
 	/* cf. IEEE 802.11 9.2.12 */
-	if (chan->band == IEEE80211_BAND_2GHZ && have_higher_than_11mbit)
+	if (chan->band == NL80211_BAND_2GHZ && have_higher_than_11mbit)
 		sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
 	else
 		sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
@@ -649,8 +649,6 @@
 		return NULL;
 	}
 
-	sta->rx_stats.last_rx = jiffies;
-
 	/* make sure mandatory rates are always added */
 	sband = local->hw.wiphy->bands[band];
 	sta->sta.supp_rates[band] = supp_rates |
@@ -670,10 +668,11 @@
 	rcu_read_lock();
 
 	list_for_each_entry_rcu(sta, &local->sta_list, list) {
+		unsigned long last_active = ieee80211_sta_last_active(sta);
+
 		if (sta->sdata == sdata &&
-		    time_after(sta->rx_stats.last_rx +
-			       IEEE80211_IBSS_MERGE_INTERVAL,
-			       jiffies)) {
+		    time_is_after_jiffies(last_active +
+					  IEEE80211_IBSS_MERGE_INTERVAL)) {
 			active++;
 			break;
 		}
@@ -990,7 +989,7 @@
 				      struct ieee80211_channel *channel)
 {
 	struct sta_info *sta;
-	enum ieee80211_band band = rx_status->band;
+	enum nl80211_band band = rx_status->band;
 	enum nl80211_bss_scan_width scan_width;
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
@@ -1110,7 +1109,7 @@
 	struct ieee80211_channel *channel;
 	u64 beacon_timestamp, rx_timestamp;
 	u32 supp_rates = 0;
-	enum ieee80211_band band = rx_status->band;
+	enum nl80211_band band = rx_status->band;
 
 	channel = ieee80211_get_channel(local->hw.wiphy, rx_status->freq);
 	if (!channel)
@@ -1236,8 +1235,6 @@
 	if (!sta)
 		return;
 
-	sta->rx_stats.last_rx = jiffies;
-
 	/* make sure mandatory rates are always added */
 	sband = local->hw.wiphy->bands[band];
 	sta->sta.supp_rates[band] = supp_rates |
@@ -1259,11 +1256,13 @@
 	mutex_lock(&local->sta_mtx);
 
 	list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
+		unsigned long last_active = ieee80211_sta_last_active(sta);
+
 		if (sdata != sta->sdata)
 			continue;
 
-		if (time_after(jiffies, sta->rx_stats.last_rx + exp_time) ||
-		    (time_after(jiffies, sta->rx_stats.last_rx + exp_rsn) &&
+		if (time_is_before_jiffies(last_active + exp_time) ||
+		    (time_is_before_jiffies(last_active + exp_rsn) &&
 		     sta->sta_state != IEEE80211_STA_AUTHORIZED)) {
 			sta_dbg(sta->sdata, "expiring inactive %sSTA %pM\n",
 				sta->sta_state != IEEE80211_STA_AUTHORIZED ?
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 804575f..9438c94 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -696,6 +696,11 @@
 
 	/* offset from skb->data while building IE */
 	int meshconf_offset;
+
+	struct mesh_table *mesh_paths;
+	struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
+	int mesh_paths_generation;
+	int mpp_paths_generation;
 };
 
 #ifdef CONFIG_MAC80211_MESH
@@ -797,6 +802,7 @@
 enum txq_info_flags {
 	IEEE80211_TXQ_STOP,
 	IEEE80211_TXQ_AMPDU,
+	IEEE80211_TXQ_NO_AMSDU,
 };
 
 struct txq_info {
@@ -890,13 +896,13 @@
 	struct ieee80211_if_ap *bss;
 
 	/* bitmap of allowed (non-MCS) rate indexes for rate control */
-	u32 rc_rateidx_mask[IEEE80211_NUM_BANDS];
+	u32 rc_rateidx_mask[NUM_NL80211_BANDS];
 
-	bool rc_has_mcs_mask[IEEE80211_NUM_BANDS];
-	u8  rc_rateidx_mcs_mask[IEEE80211_NUM_BANDS][IEEE80211_HT_MCS_MASK_LEN];
+	bool rc_has_mcs_mask[NUM_NL80211_BANDS];
+	u8  rc_rateidx_mcs_mask[NUM_NL80211_BANDS][IEEE80211_HT_MCS_MASK_LEN];
 
-	bool rc_has_vht_mcs_mask[IEEE80211_NUM_BANDS];
-	u16 rc_rateidx_vht_mcs_mask[IEEE80211_NUM_BANDS][NL80211_VHT_NSS_MAX];
+	bool rc_has_vht_mcs_mask[NUM_NL80211_BANDS];
+	u16 rc_rateidx_vht_mcs_mask[NUM_NL80211_BANDS][NL80211_VHT_NSS_MAX];
 
 	union {
 		struct ieee80211_if_ap ap;
@@ -951,10 +957,10 @@
 	lockdep_assert_held(&sdata->wdev.mtx);
 }
 
-static inline enum ieee80211_band
+static inline enum nl80211_band
 ieee80211_get_sdata_band(struct ieee80211_sub_if_data *sdata)
 {
-	enum ieee80211_band band = IEEE80211_BAND_2GHZ;
+	enum nl80211_band band = NL80211_BAND_2GHZ;
 	struct ieee80211_chanctx_conf *chanctx_conf;
 
 	rcu_read_lock();
@@ -1225,7 +1231,7 @@
 	struct cfg80211_scan_request __rcu *scan_req;
 	struct ieee80211_scan_request *hw_scan_req;
 	struct cfg80211_chan_def scan_chandef;
-	enum ieee80211_band hw_scan_band;
+	enum nl80211_band hw_scan_band;
 	int scan_channel_idx;
 	int scan_ies_len;
 	int hw_scan_ies_bufsize;
@@ -1489,6 +1495,11 @@
 int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb,
 			     u64 *cookie, gfp_t gfp);
 
+void ieee80211_check_fast_rx(struct sta_info *sta);
+void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata);
+void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata);
+void ieee80211_clear_fast_rx(struct sta_info *sta);
+
 /* STA code */
 void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata);
 int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
@@ -1719,14 +1730,18 @@
 enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta);
 enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta);
 void ieee80211_sta_set_rx_nss(struct sta_info *sta);
+enum ieee80211_sta_rx_bandwidth
+ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width);
+enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta);
+void ieee80211_sta_set_rx_nss(struct sta_info *sta);
 void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata,
 				 struct ieee80211_mgmt *mgmt);
 u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
                                   struct sta_info *sta, u8 opmode,
-				  enum ieee80211_band band);
+				  enum nl80211_band band);
 void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
 				 struct sta_info *sta, u8 opmode,
-				 enum ieee80211_band band);
+				 enum nl80211_band band);
 void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata,
 				      struct ieee80211_sta_vht_cap *vht_cap);
 void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
@@ -1754,7 +1769,7 @@
  */
 int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
 				 struct ieee802_11_elems *elems,
-				 enum ieee80211_band current_band,
+				 enum nl80211_band current_band,
 				 u32 sta_flags, u8 *bssid,
 				 struct ieee80211_csa_ie *csa_ie);
 
@@ -1779,7 +1794,7 @@
 
 /* utility functions/constants */
 extern const void *const mac80211_wiphy_privid; /* for wiphy privid */
-int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
+int ieee80211_frame_duration(enum nl80211_band band, size_t len,
 			     int rate, int erp, int short_preamble,
 			     int shift);
 void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
@@ -1789,12 +1804,12 @@
 
 void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
 				 struct sk_buff *skb, int tid,
-				 enum ieee80211_band band);
+				 enum nl80211_band band);
 
 static inline void
 ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
 			  struct sk_buff *skb, int tid,
-			  enum ieee80211_band band)
+			  enum nl80211_band band)
 {
 	rcu_read_lock();
 	__ieee80211_tx_skb_tid_band(sdata, skb, tid, band);
@@ -1949,7 +1964,7 @@
 
 u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
 			    struct ieee802_11_elems *elems,
-			    enum ieee80211_band band, u32 *basic_rates);
+			    enum nl80211_band band, u32 *basic_rates);
 int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
 				 enum ieee80211_smps_mode smps_mode);
 int __ieee80211_request_smps_ap(struct ieee80211_sub_if_data *sdata,
@@ -1972,10 +1987,10 @@
 			     const u8 *srates, int srates_len, u32 *rates);
 int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
 			    struct sk_buff *skb, bool need_basic,
-			    enum ieee80211_band band);
+			    enum nl80211_band band);
 int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
 				struct sk_buff *skb, bool need_basic,
-				enum ieee80211_band band);
+				enum nl80211_band band);
 u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo);
 
 /* channel management */
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 453b4e74..c59af3e 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1093,7 +1093,7 @@
 	sdata->fragment_next = 0;
 
 	if (ieee80211_vif_is_mesh(&sdata->vif))
-		mesh_rmc_free(sdata);
+		ieee80211_mesh_teardown_sdata(sdata);
 }
 
 static void ieee80211_uninit(struct net_device *dev)
@@ -1761,7 +1761,7 @@
 
 		ret = dev_alloc_name(ndev, ndev->name);
 		if (ret < 0) {
-			free_netdev(ndev);
+			ieee80211_if_free(ndev);
 			return ret;
 		}
 
@@ -1800,7 +1800,7 @@
 	INIT_DELAYED_WORK(&sdata->dec_tailroom_needed_wk,
 			  ieee80211_delayed_tailroom_dec);
 
-	for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+	for (i = 0; i < NUM_NL80211_BANDS; i++) {
 		struct ieee80211_supported_band *sband;
 		sband = local->hw.wiphy->bands[i];
 		sdata->rc_rateidx_mask[i] =
@@ -1847,7 +1847,7 @@
 
 		ret = register_netdevice(ndev);
 		if (ret) {
-			free_netdev(ndev);
+			ieee80211_if_free(ndev);
 			return ret;
 		}
 	}
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 3df7b03..edd6f294 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -338,6 +338,7 @@
 		} else {
 			rcu_assign_pointer(sta->gtk[idx], new);
 		}
+		ieee80211_check_fast_rx(sta);
 	} else {
 		defunikey = old &&
 			old == key_mtx_dereference(sdata->local,
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 8190bf2..7ee91d6 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -558,6 +558,8 @@
 	if (!ops->set_key)
 		wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
 
+	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_RRM);
+
 	wiphy->bss_priv_size = sizeof(struct ieee80211_bss);
 
 	local = wiphy_priv(wiphy);
@@ -799,7 +801,7 @@
 {
 	struct ieee80211_local *local = hw_to_local(hw);
 	int result, i;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	int channels, max_bitrates;
 	bool supp_ht, supp_vht;
 	netdev_features_t feature_whitelist;
@@ -854,7 +856,7 @@
 	/* Only HW csum features are currently compatible with mac80211 */
 	feature_whitelist = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 			    NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA |
-			    NETIF_F_GSO_SOFTWARE;
+			    NETIF_F_GSO_SOFTWARE | NETIF_F_RXCSUM;
 	if (WARN_ON(hw->netdev_features & ~feature_whitelist))
 		return -EINVAL;
 
@@ -872,7 +874,7 @@
 	max_bitrates = 0;
 	supp_ht = false;
 	supp_vht = false;
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+	for (band = 0; band < NUM_NL80211_BANDS; band++) {
 		struct ieee80211_supported_band *sband;
 
 		sband = local->hw.wiphy->bands[band];
@@ -934,7 +936,7 @@
 	if (!local->int_scan_req)
 		return -ENOMEM;
 
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+	for (band = 0; band < NUM_NL80211_BANDS; band++) {
 		if (!local->hw.wiphy->bands[band])
 			continue;
 		local->int_scan_req->rates[band] = (u32) -1;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index d32cefc..4c6404e 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -25,7 +25,6 @@
 
 void ieee80211s_init(void)
 {
-	mesh_pathtbl_init();
 	mesh_allocated = 1;
 	rm_cache = kmem_cache_create("mesh_rmc", sizeof(struct rmc_entry),
 				     0, 0, NULL);
@@ -35,7 +34,6 @@
 {
 	if (!mesh_allocated)
 		return;
-	mesh_pathtbl_unregister();
 	kmem_cache_destroy(rm_cache);
 }
 
@@ -176,22 +174,23 @@
 		return -ENOMEM;
 	sdata->u.mesh.rmc->idx_mask = RMC_BUCKETS - 1;
 	for (i = 0; i < RMC_BUCKETS; i++)
-		INIT_LIST_HEAD(&sdata->u.mesh.rmc->bucket[i]);
+		INIT_HLIST_HEAD(&sdata->u.mesh.rmc->bucket[i]);
 	return 0;
 }
 
 void mesh_rmc_free(struct ieee80211_sub_if_data *sdata)
 {
 	struct mesh_rmc *rmc = sdata->u.mesh.rmc;
-	struct rmc_entry *p, *n;
+	struct rmc_entry *p;
+	struct hlist_node *n;
 	int i;
 
 	if (!sdata->u.mesh.rmc)
 		return;
 
 	for (i = 0; i < RMC_BUCKETS; i++) {
-		list_for_each_entry_safe(p, n, &rmc->bucket[i], list) {
-			list_del(&p->list);
+		hlist_for_each_entry_safe(p, n, &rmc->bucket[i], list) {
+			hlist_del(&p->list);
 			kmem_cache_free(rm_cache, p);
 		}
 	}
@@ -220,16 +219,20 @@
 	u32 seqnum = 0;
 	int entries = 0;
 	u8 idx;
-	struct rmc_entry *p, *n;
+	struct rmc_entry *p;
+	struct hlist_node *n;
+
+	if (!rmc)
+		return -1;
 
 	/* Don't care about endianness since only match matters */
 	memcpy(&seqnum, &mesh_hdr->seqnum, sizeof(mesh_hdr->seqnum));
 	idx = le32_to_cpu(mesh_hdr->seqnum) & rmc->idx_mask;
-	list_for_each_entry_safe(p, n, &rmc->bucket[idx], list) {
+	hlist_for_each_entry_safe(p, n, &rmc->bucket[idx], list) {
 		++entries;
 		if (time_after(jiffies, p->exp_time) ||
 		    entries == RMC_QUEUE_MAX_LEN) {
-			list_del(&p->list);
+			hlist_del(&p->list);
 			kmem_cache_free(rm_cache, p);
 			--entries;
 		} else if ((seqnum == p->seqnum) && ether_addr_equal(sa, p->sa))
@@ -243,7 +246,7 @@
 	p->seqnum = seqnum;
 	p->exp_time = jiffies + RMC_TIMEOUT;
 	memcpy(p->sa, sa, ETH_ALEN);
-	list_add(&p->list, &rmc->bucket[idx]);
+	hlist_add_head(&p->list, &rmc->bucket[idx]);
 	return 0;
 }
 
@@ -412,7 +415,7 @@
 		       struct sk_buff *skb)
 {
 	struct ieee80211_local *local = sdata->local;
-	enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+	enum nl80211_band band = ieee80211_get_sdata_band(sdata);
 	struct ieee80211_supported_band *sband;
 	u8 *pos;
 
@@ -475,7 +478,7 @@
 			struct sk_buff *skb)
 {
 	struct ieee80211_local *local = sdata->local;
-	enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+	enum nl80211_band band = ieee80211_get_sdata_band(sdata);
 	struct ieee80211_supported_band *sband;
 	u8 *pos;
 
@@ -677,7 +680,7 @@
 	struct ieee80211_mgmt *mgmt;
 	struct ieee80211_chanctx_conf *chanctx_conf;
 	struct mesh_csa_settings *csa;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	u8 *pos;
 	struct ieee80211_sub_if_data *sdata;
 	int hdr_len = offsetof(struct ieee80211_mgmt, u.beacon) +
@@ -927,7 +930,7 @@
 	struct cfg80211_csa_settings params;
 	struct ieee80211_csa_ie csa_ie;
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
-	enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+	enum nl80211_band band = ieee80211_get_sdata_band(sdata);
 	int err;
 	u32 sta_flags;
 
@@ -1081,7 +1084,7 @@
 	struct ieee80211_channel *channel;
 	size_t baselen;
 	int freq;
-	enum ieee80211_band band = rx_status->band;
+	enum nl80211_band band = rx_status->band;
 
 	/* ignore ProbeResp to foreign address */
 	if (stype == IEEE80211_STYPE_PROBE_RESP &&
@@ -1348,12 +1351,6 @@
 		       ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval)))
 		mesh_path_start_discovery(sdata);
 
-	if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags))
-		mesh_mpath_table_grow();
-
-	if (test_and_clear_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags))
-		mesh_mpp_table_grow();
-
 	if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags))
 		ieee80211_mesh_housekeeping(sdata);
 
@@ -1388,6 +1385,9 @@
 	/* Allocate all mesh structures when creating the first mesh interface. */
 	if (!mesh_allocated)
 		ieee80211s_init();
+
+	mesh_pathtbl_init(sdata);
+
 	setup_timer(&ifmsh->mesh_path_timer,
 		    ieee80211_mesh_path_timer,
 		    (unsigned long) sdata);
@@ -1402,3 +1402,9 @@
 
 	sdata->vif.bss_conf.bssid = zero_addr;
 }
+
+void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata)
+{
+	mesh_rmc_free(sdata);
+	mesh_pathtbl_unregister(sdata);
+}
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 87c017a..26b9ccb 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -21,8 +21,6 @@
 /**
  * enum mesh_path_flags - mac80211 mesh path flags
  *
- *
- *
  * @MESH_PATH_ACTIVE: the mesh path can be used for forwarding
  * @MESH_PATH_RESOLVING: the discovery process is running for this mesh path
  * @MESH_PATH_SN_VALID: the mesh path contains a valid destination sequence
@@ -32,6 +30,8 @@
  * @MESH_PATH_RESOLVED: the mesh path can has been resolved
  * @MESH_PATH_REQ_QUEUED: there is an unsent path request for this destination
  *	already queued up, waiting for the discovery process to start.
+ * @MESH_PATH_DELETED: the mesh path has been deleted and should no longer
+ *	be used
  *
  * MESH_PATH_RESOLVED is used by the mesh path timer to
  * decide when to stop or cancel the mesh path discovery.
@@ -43,6 +43,7 @@
 	MESH_PATH_FIXED	=	BIT(3),
 	MESH_PATH_RESOLVED =	BIT(4),
 	MESH_PATH_REQ_QUEUED =	BIT(5),
+	MESH_PATH_DELETED =	BIT(6),
 };
 
 /**
@@ -51,10 +52,6 @@
  *
  *
  * @MESH_WORK_HOUSEKEEPING: run the periodic mesh housekeeping tasks
- * @MESH_WORK_GROW_MPATH_TABLE: the mesh path table is full and needs
- * to grow.
- * @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to
- * grow
  * @MESH_WORK_ROOT: the mesh root station needs to send a frame
  * @MESH_WORK_DRIFT_ADJUST: time to compensate for clock drift relative to other
  * mesh nodes
@@ -62,8 +59,6 @@
  */
 enum mesh_deferred_task_flags {
 	MESH_WORK_HOUSEKEEPING,
-	MESH_WORK_GROW_MPATH_TABLE,
-	MESH_WORK_GROW_MPP_TABLE,
 	MESH_WORK_ROOT,
 	MESH_WORK_DRIFT_ADJUST,
 	MESH_WORK_MBSS_CHANGED,
@@ -73,12 +68,16 @@
  * struct mesh_path - mac80211 mesh path structure
  *
  * @dst: mesh path destination mac address
+ * @mpp: mesh proxy mac address
+ * @rhash: rhashtable list pointer
+ * @gate_list: list pointer for known gates list
  * @sdata: mesh subif
  * @next_hop: mesh neighbor to which frames for this destination will be
  *	forwarded
  * @timer: mesh path discovery timer
  * @frame_queue: pending queue for frames sent to this destination while the
  *	path is unresolved
+ * @rcu: rcu head for freeing mesh path
  * @sn: target sequence number
  * @metric: current metric to this destination
  * @hop_count: hops to destination
@@ -97,14 +96,16 @@
  * @is_gate: the destination station of this path is a mesh gate
  *
  *
- * The combination of dst and sdata is unique in the mesh path table. Since the
- * next_hop STA is only protected by RCU as well, deleting the STA must also
- * remove/substitute the mesh_path structure and wait until that is no longer
- * reachable before destroying the STA completely.
+ * The dst address is unique in the mesh path table. Since the mesh_path is
+ * protected by RCU, deleting the next_hop STA must remove / substitute the
+ * mesh_path structure and wait until that is no longer reachable before
+ * destroying the STA completely.
  */
 struct mesh_path {
 	u8 dst[ETH_ALEN];
 	u8 mpp[ETH_ALEN];	/* used for MPP or MAP */
+	struct rhash_head rhash;
+	struct hlist_node gate_list;
 	struct ieee80211_sub_if_data *sdata;
 	struct sta_info __rcu *next_hop;
 	struct timer_list timer;
@@ -128,34 +129,17 @@
 /**
  * struct mesh_table
  *
- * @hash_buckets: array of hash buckets of the table
- * @hashwlock: array of locks to protect write operations, one per bucket
- * @hash_mask: 2^size_order - 1, used to compute hash idx
- * @hash_rnd: random value used for hash computations
- * @entries: number of entries in the table
- * @free_node: function to free nodes of the table
- * @copy_node: function to copy nodes of the table
- * @size_order: determines size of the table, there will be 2^size_order hash
- *	buckets
  * @known_gates: list of known mesh gates and their mpaths by the station. The
  * gate's mpath may or may not be resolved and active.
- *
- * rcu_head: RCU head to free the table
+ * @gates_lock: protects updates to known_gates
+ * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
+ * @entries: number of entries in the table
  */
 struct mesh_table {
-	/* Number of buckets will be 2^N */
-	struct hlist_head *hash_buckets;
-	spinlock_t *hashwlock;		/* One per bucket, for add/del */
-	unsigned int hash_mask;		/* (2^size_order) - 1 */
-	__u32 hash_rnd;			/* Used for hash generation */
-	atomic_t entries;		/* Up to MAX_MESH_NEIGHBOURS */
-	void (*free_node) (struct hlist_node *p, bool free_leafs);
-	int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl);
-	int size_order;
-	struct hlist_head *known_gates;
+	struct hlist_head known_gates;
 	spinlock_t gates_lock;
-
-	struct rcu_head rcu_head;
+	struct rhashtable rhead;
+	atomic_t entries;		/* Up to MAX_MESH_NEIGHBOURS */
 };
 
 /* Recent multicast cache */
@@ -170,20 +154,21 @@
  * @seqnum: mesh sequence number of the frame
  * @exp_time: expiration time of the entry, in jiffies
  * @sa: source address of the frame
+ * @list: hashtable list pointer
  *
  * The Recent Multicast Cache keeps track of the latest multicast frames that
  * have been received by a mesh interface and discards received multicast frames
  * that are found in the cache.
  */
 struct rmc_entry {
-	struct list_head list;
-	u32 seqnum;
+	struct hlist_node list;
 	unsigned long exp_time;
+	u32 seqnum;
 	u8 sa[ETH_ALEN];
 };
 
 struct mesh_rmc {
-	struct list_head bucket[RMC_BUCKETS];
+	struct hlist_head bucket[RMC_BUCKETS];
 	u32 idx_mask;
 };
 
@@ -234,6 +219,7 @@
 void ieee80211s_update_metric(struct ieee80211_local *local,
 			      struct sta_info *sta, struct sk_buff *skb);
 void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
+void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata);
 int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
 void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata);
 void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh);
@@ -299,9 +285,6 @@
 void mesh_sta_cleanup(struct sta_info *sta);
 
 /* Private interfaces */
-/* Mesh tables */
-void mesh_mpath_table_grow(void);
-void mesh_mpp_table_grow(void);
 /* Mesh paths */
 int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
 		       u8 ttl, const u8 *target, u32 target_sn,
@@ -309,8 +292,8 @@
 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta);
 void mesh_path_flush_pending(struct mesh_path *mpath);
 void mesh_path_tx_pending(struct mesh_path *mpath);
-int mesh_pathtbl_init(void);
-void mesh_pathtbl_unregister(void);
+int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata);
+void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata);
 int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr);
 void mesh_path_timer(unsigned long data);
 void mesh_path_flush_by_nexthop(struct sta_info *sta);
@@ -319,8 +302,6 @@
 void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata);
 
 bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt);
-extern int mesh_paths_generation;
-extern int mpp_paths_generation;
 
 #ifdef CONFIG_MAC80211_MESH
 static inline
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 5b6aec1..8f9c3bd 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -530,7 +530,7 @@
 	const u8 *target_addr, *orig_addr;
 	const u8 *da;
 	u8 target_flags, ttl, flags;
-	u32 orig_sn, target_sn, lifetime, target_metric;
+	u32 orig_sn, target_sn, lifetime, target_metric = 0;
 	bool reply = false;
 	bool forward = true;
 	bool root_is_gate;
@@ -1012,6 +1012,10 @@
 		goto enddiscovery;
 
 	spin_lock_bh(&mpath->state_lock);
+	if (mpath->flags & MESH_PATH_DELETED) {
+		spin_unlock_bh(&mpath->state_lock);
+		goto enddiscovery;
+	}
 	mpath->flags &= ~MESH_PATH_REQ_QUEUED;
 	if (preq_node->flags & PREQ_Q_F_START) {
 		if (mpath->flags & MESH_PATH_RESOLVING) {
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 2ba7aa5..6db2ddf 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -18,11 +18,22 @@
 #include "ieee80211_i.h"
 #include "mesh.h"
 
-/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
-#define INIT_PATHS_SIZE_ORDER	2
+static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath);
 
-/* Keep the mean chain length below this constant */
-#define MEAN_CHAIN_LEN		2
+static u32 mesh_table_hash(const void *addr, u32 len, u32 seed)
+{
+	/* Use last four bytes of hw addr as hash index */
+	return jhash_1word(*(u32 *)(addr+2), seed);
+}
+
+static const struct rhashtable_params mesh_rht_params = {
+	.nelem_hint = 2,
+	.automatic_shrinking = true,
+	.key_len = ETH_ALEN,
+	.key_offset = offsetof(struct mesh_path, dst),
+	.head_offset = offsetof(struct mesh_path, rhash),
+	.hashfn = mesh_table_hash,
+};
 
 static inline bool mpath_expired(struct mesh_path *mpath)
 {
@@ -31,173 +42,36 @@
 	       !(mpath->flags & MESH_PATH_FIXED);
 }
 
-struct mpath_node {
-	struct hlist_node list;
-	struct rcu_head rcu;
-	/* This indirection allows two different tables to point to the same
-	 * mesh_path structure, useful when resizing
-	 */
-	struct mesh_path *mpath;
-};
-
-static struct mesh_table __rcu *mesh_paths;
-static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
-
-int mesh_paths_generation;
-int mpp_paths_generation;
-
-/* This lock will have the grow table function as writer and add / delete nodes
- * as readers. RCU provides sufficient protection only when reading the table
- * (i.e. doing lookups).  Adding or adding or removing nodes requires we take
- * the read lock or we risk operating on an old table.  The write lock is only
- * needed when modifying the number of buckets a table.
- */
-static DEFINE_RWLOCK(pathtbl_resize_lock);
-
-
-static inline struct mesh_table *resize_dereference_paths(
-	struct mesh_table __rcu *table)
+static void mesh_path_rht_free(void *ptr, void *tblptr)
 {
-	return rcu_dereference_protected(table,
-					lockdep_is_held(&pathtbl_resize_lock));
+	struct mesh_path *mpath = ptr;
+	struct mesh_table *tbl = tblptr;
+
+	mesh_path_free_rcu(tbl, mpath);
 }
 
-static inline struct mesh_table *resize_dereference_mesh_paths(void)
+static struct mesh_table *mesh_table_alloc(void)
 {
-	return resize_dereference_paths(mesh_paths);
-}
-
-static inline struct mesh_table *resize_dereference_mpp_paths(void)
-{
-	return resize_dereference_paths(mpp_paths);
-}
-
-/*
- * CAREFUL -- "tbl" must not be an expression,
- * in particular not an rcu_dereference(), since
- * it's used twice. So it is illegal to do
- *	for_each_mesh_entry(rcu_dereference(...), ...)
- */
-#define for_each_mesh_entry(tbl, node, i) \
-	for (i = 0; i <= tbl->hash_mask; i++) \
-		hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list)
-
-
-static struct mesh_table *mesh_table_alloc(int size_order)
-{
-	int i;
 	struct mesh_table *newtbl;
 
 	newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
 	if (!newtbl)
 		return NULL;
 
-	newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
-			(1 << size_order), GFP_ATOMIC);
-
-	if (!newtbl->hash_buckets) {
-		kfree(newtbl);
-		return NULL;
-	}
-
-	newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
-			(1 << size_order), GFP_ATOMIC);
-	if (!newtbl->hashwlock) {
-		kfree(newtbl->hash_buckets);
-		kfree(newtbl);
-		return NULL;
-	}
-
-	newtbl->size_order = size_order;
-	newtbl->hash_mask = (1 << size_order) - 1;
+	INIT_HLIST_HEAD(&newtbl->known_gates);
 	atomic_set(&newtbl->entries,  0);
-	get_random_bytes(&newtbl->hash_rnd,
-			sizeof(newtbl->hash_rnd));
-	for (i = 0; i <= newtbl->hash_mask; i++)
-		spin_lock_init(&newtbl->hashwlock[i]);
 	spin_lock_init(&newtbl->gates_lock);
 
 	return newtbl;
 }
 
-static void __mesh_table_free(struct mesh_table *tbl)
+static void mesh_table_free(struct mesh_table *tbl)
 {
-	kfree(tbl->hash_buckets);
-	kfree(tbl->hashwlock);
+	rhashtable_free_and_destroy(&tbl->rhead,
+				    mesh_path_rht_free, tbl);
 	kfree(tbl);
 }
 
-static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
-{
-	struct hlist_head *mesh_hash;
-	struct hlist_node *p, *q;
-	struct mpath_node *gate;
-	int i;
-
-	mesh_hash = tbl->hash_buckets;
-	for (i = 0; i <= tbl->hash_mask; i++) {
-		spin_lock_bh(&tbl->hashwlock[i]);
-		hlist_for_each_safe(p, q, &mesh_hash[i]) {
-			tbl->free_node(p, free_leafs);
-			atomic_dec(&tbl->entries);
-		}
-		spin_unlock_bh(&tbl->hashwlock[i]);
-	}
-	if (free_leafs) {
-		spin_lock_bh(&tbl->gates_lock);
-		hlist_for_each_entry_safe(gate, q,
-					 tbl->known_gates, list) {
-			hlist_del(&gate->list);
-			kfree(gate);
-		}
-		kfree(tbl->known_gates);
-		spin_unlock_bh(&tbl->gates_lock);
-	}
-
-	__mesh_table_free(tbl);
-}
-
-static int mesh_table_grow(struct mesh_table *oldtbl,
-			   struct mesh_table *newtbl)
-{
-	struct hlist_head *oldhash;
-	struct hlist_node *p, *q;
-	int i;
-
-	if (atomic_read(&oldtbl->entries)
-			< MEAN_CHAIN_LEN * (oldtbl->hash_mask + 1))
-		return -EAGAIN;
-
-	newtbl->free_node = oldtbl->free_node;
-	newtbl->copy_node = oldtbl->copy_node;
-	newtbl->known_gates = oldtbl->known_gates;
-	atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
-
-	oldhash = oldtbl->hash_buckets;
-	for (i = 0; i <= oldtbl->hash_mask; i++)
-		hlist_for_each(p, &oldhash[i])
-			if (oldtbl->copy_node(p, newtbl) < 0)
-				goto errcopy;
-
-	return 0;
-
-errcopy:
-	for (i = 0; i <= newtbl->hash_mask; i++) {
-		hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
-			oldtbl->free_node(p, 0);
-	}
-	return -ENOMEM;
-}
-
-static u32 mesh_table_hash(const u8 *addr, struct ieee80211_sub_if_data *sdata,
-			   struct mesh_table *tbl)
-{
-	/* Use last four bytes of hw addr and interface index as hash index */
-	return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex,
-			    tbl->hash_rnd) & tbl->hash_mask;
-}
-
-
 /**
  *
  * mesh_path_assign_nexthop - update mesh path next hop
@@ -340,23 +214,15 @@
 				      struct ieee80211_sub_if_data *sdata)
 {
 	struct mesh_path *mpath;
-	struct hlist_head *bucket;
-	struct mpath_node *node;
 
-	bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
-	hlist_for_each_entry_rcu(node, bucket, list) {
-		mpath = node->mpath;
-		if (mpath->sdata == sdata &&
-		    ether_addr_equal(dst, mpath->dst)) {
-			if (mpath_expired(mpath)) {
-				spin_lock_bh(&mpath->state_lock);
-				mpath->flags &= ~MESH_PATH_ACTIVE;
-				spin_unlock_bh(&mpath->state_lock);
-			}
-			return mpath;
-		}
+	mpath = rhashtable_lookup_fast(&tbl->rhead, dst, mesh_rht_params);
+
+	if (mpath && mpath_expired(mpath)) {
+		spin_lock_bh(&mpath->state_lock);
+		mpath->flags &= ~MESH_PATH_ACTIVE;
+		spin_unlock_bh(&mpath->state_lock);
 	}
-	return NULL;
+	return mpath;
 }
 
 /**
@@ -371,15 +237,52 @@
 struct mesh_path *
 mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
 {
-	return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
+	return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata);
 }
 
 struct mesh_path *
 mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
 {
-	return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
+	return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata);
 }
 
+static struct mesh_path *
+__mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
+{
+	int i = 0, ret;
+	struct mesh_path *mpath = NULL;
+	struct rhashtable_iter iter;
+
+	ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
+	if (ret)
+		return NULL;
+
+	ret = rhashtable_walk_start(&iter);
+	if (ret && ret != -EAGAIN)
+		goto err;
+
+	while ((mpath = rhashtable_walk_next(&iter))) {
+		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
+			continue;
+		if (IS_ERR(mpath))
+			break;
+		if (i++ == idx)
+			break;
+	}
+err:
+	rhashtable_walk_stop(&iter);
+	rhashtable_walk_exit(&iter);
+
+	if (IS_ERR(mpath) || !mpath)
+		return NULL;
+
+	if (mpath_expired(mpath)) {
+		spin_lock_bh(&mpath->state_lock);
+		mpath->flags &= ~MESH_PATH_ACTIVE;
+		spin_unlock_bh(&mpath->state_lock);
+	}
+	return mpath;
+}
 
 /**
  * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
@@ -393,25 +296,7 @@
 struct mesh_path *
 mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
 {
-	struct mesh_table *tbl = rcu_dereference(mesh_paths);
-	struct mpath_node *node;
-	int i;
-	int j = 0;
-
-	for_each_mesh_entry(tbl, node, i) {
-		if (sdata && node->mpath->sdata != sdata)
-			continue;
-		if (j++ == idx) {
-			if (mpath_expired(node->mpath)) {
-				spin_lock_bh(&node->mpath->state_lock);
-				node->mpath->flags &= ~MESH_PATH_ACTIVE;
-				spin_unlock_bh(&node->mpath->state_lock);
-			}
-			return node->mpath;
-		}
-	}
-
-	return NULL;
+	return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx);
 }
 
 /**
@@ -426,19 +311,7 @@
 struct mesh_path *
 mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
 {
-	struct mesh_table *tbl = rcu_dereference(mpp_paths);
-	struct mpath_node *node;
-	int i;
-	int j = 0;
-
-	for_each_mesh_entry(tbl, node, i) {
-		if (sdata && node->mpath->sdata != sdata)
-			continue;
-		if (j++ == idx)
-			return node->mpath;
-	}
-
-	return NULL;
+	return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx);
 }
 
 /**
@@ -448,30 +321,26 @@
 int mesh_path_add_gate(struct mesh_path *mpath)
 {
 	struct mesh_table *tbl;
-	struct mpath_node *gate, *new_gate;
 	int err;
 
 	rcu_read_lock();
-	tbl = rcu_dereference(mesh_paths);
+	tbl = mpath->sdata->u.mesh.mesh_paths;
 
-	hlist_for_each_entry_rcu(gate, tbl->known_gates, list)
-		if (gate->mpath == mpath) {
-			err = -EEXIST;
-			goto err_rcu;
-		}
-
-	new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
-	if (!new_gate) {
-		err = -ENOMEM;
+	spin_lock_bh(&mpath->state_lock);
+	if (mpath->is_gate) {
+		err = -EEXIST;
+		spin_unlock_bh(&mpath->state_lock);
 		goto err_rcu;
 	}
-
 	mpath->is_gate = true;
 	mpath->sdata->u.mesh.num_gates++;
-	new_gate->mpath = mpath;
-	spin_lock_bh(&tbl->gates_lock);
-	hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
-	spin_unlock_bh(&tbl->gates_lock);
+
+	spin_lock(&tbl->gates_lock);
+	hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates);
+	spin_unlock(&tbl->gates_lock);
+
+	spin_unlock_bh(&mpath->state_lock);
+
 	mpath_dbg(mpath->sdata,
 		  "Mesh path: Recorded new gate: %pM. %d known gates\n",
 		  mpath->dst, mpath->sdata->u.mesh.num_gates);
@@ -485,28 +354,22 @@
  * mesh_gate_del - remove a mesh gate from the list of known gates
  * @tbl: table which holds our list of known gates
  * @mpath: gate mpath
- *
- * Locking: must be called inside rcu_read_lock() section
  */
 static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
 {
-	struct mpath_node *gate;
-	struct hlist_node *q;
+	lockdep_assert_held(&mpath->state_lock);
+	if (!mpath->is_gate)
+		return;
 
-	hlist_for_each_entry_safe(gate, q, tbl->known_gates, list) {
-		if (gate->mpath != mpath)
-			continue;
-		spin_lock_bh(&tbl->gates_lock);
-		hlist_del_rcu(&gate->list);
-		kfree_rcu(gate, rcu);
-		spin_unlock_bh(&tbl->gates_lock);
-		mpath->sdata->u.mesh.num_gates--;
-		mpath->is_gate = false;
-		mpath_dbg(mpath->sdata,
-			  "Mesh path: Deleted gate: %pM. %d known gates\n",
-			  mpath->dst, mpath->sdata->u.mesh.num_gates);
-		break;
-	}
+	mpath->is_gate = false;
+	spin_lock_bh(&tbl->gates_lock);
+	hlist_del_rcu(&mpath->gate_list);
+	mpath->sdata->u.mesh.num_gates--;
+	spin_unlock_bh(&tbl->gates_lock);
+
+	mpath_dbg(mpath->sdata,
+		  "Mesh path: Deleted gate: %pM. %d known gates\n",
+		  mpath->dst, mpath->sdata->u.mesh.num_gates);
 }
 
 /**
@@ -518,6 +381,31 @@
 	return sdata->u.mesh.num_gates;
 }
 
+static
+struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata,
+				const u8 *dst, gfp_t gfp_flags)
+{
+	struct mesh_path *new_mpath;
+
+	new_mpath = kzalloc(sizeof(struct mesh_path), gfp_flags);
+	if (!new_mpath)
+		return NULL;
+
+	memcpy(new_mpath->dst, dst, ETH_ALEN);
+	eth_broadcast_addr(new_mpath->rann_snd_addr);
+	new_mpath->is_root = false;
+	new_mpath->sdata = sdata;
+	new_mpath->flags = 0;
+	skb_queue_head_init(&new_mpath->frame_queue);
+	new_mpath->timer.data = (unsigned long) new_mpath;
+	new_mpath->timer.function = mesh_path_timer;
+	new_mpath->exp_time = jiffies;
+	spin_lock_init(&new_mpath->state_lock);
+	init_timer(&new_mpath->timer);
+
+	return new_mpath;
+}
+
 /**
  * mesh_path_add - allocate and add a new path to the mesh path table
  * @dst: destination address of the path (ETH_ALEN length)
@@ -530,15 +418,9 @@
 struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
 				const u8 *dst)
 {
-	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
-	struct ieee80211_local *local = sdata->local;
 	struct mesh_table *tbl;
 	struct mesh_path *mpath, *new_mpath;
-	struct mpath_node *node, *new_node;
-	struct hlist_head *bucket;
-	int grow = 0;
-	int err;
-	u32 hash_idx;
+	int ret;
 
 	if (ether_addr_equal(dst, sdata->vif.addr))
 		/* never add ourselves as neighbours */
@@ -550,129 +432,44 @@
 	if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
 		return ERR_PTR(-ENOSPC);
 
-	read_lock_bh(&pathtbl_resize_lock);
-	tbl = resize_dereference_mesh_paths();
-
-	hash_idx = mesh_table_hash(dst, sdata, tbl);
-	bucket = &tbl->hash_buckets[hash_idx];
-
-	spin_lock(&tbl->hashwlock[hash_idx]);
-
-	hlist_for_each_entry(node, bucket, list) {
-		mpath = node->mpath;
-		if (mpath->sdata == sdata &&
-		    ether_addr_equal(dst, mpath->dst))
-			goto found;
-	}
-
-	err = -ENOMEM;
-	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
+	new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
 	if (!new_mpath)
-		goto err_path_alloc;
+		return ERR_PTR(-ENOMEM);
 
-	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
-	if (!new_node)
-		goto err_node_alloc;
+	tbl = sdata->u.mesh.mesh_paths;
+	do {
+		ret = rhashtable_lookup_insert_fast(&tbl->rhead,
+						    &new_mpath->rhash,
+						    mesh_rht_params);
 
-	memcpy(new_mpath->dst, dst, ETH_ALEN);
-	eth_broadcast_addr(new_mpath->rann_snd_addr);
-	new_mpath->is_root = false;
-	new_mpath->sdata = sdata;
-	new_mpath->flags = 0;
-	skb_queue_head_init(&new_mpath->frame_queue);
-	new_node->mpath = new_mpath;
-	new_mpath->timer.data = (unsigned long) new_mpath;
-	new_mpath->timer.function = mesh_path_timer;
-	new_mpath->exp_time = jiffies;
-	spin_lock_init(&new_mpath->state_lock);
-	init_timer(&new_mpath->timer);
+		if (ret == -EEXIST)
+			mpath = rhashtable_lookup_fast(&tbl->rhead,
+						       dst,
+						       mesh_rht_params);
 
-	hlist_add_head_rcu(&new_node->list, bucket);
-	if (atomic_inc_return(&tbl->entries) >=
-	    MEAN_CHAIN_LEN * (tbl->hash_mask + 1))
-		grow = 1;
+	} while (unlikely(ret == -EEXIST && !mpath));
 
-	mesh_paths_generation++;
+	if (ret && ret != -EEXIST)
+		return ERR_PTR(ret);
 
-	if (grow) {
-		set_bit(MESH_WORK_GROW_MPATH_TABLE,  &ifmsh->wrkq_flags);
-		ieee80211_queue_work(&local->hw, &sdata->work);
+	/* At this point either new_mpath was added, or we found a
+	 * matching entry already in the table; in the latter case
+	 * free the unnecessary new entry.
+	 */
+	if (ret == -EEXIST) {
+		kfree(new_mpath);
+		new_mpath = mpath;
 	}
-	mpath = new_mpath;
-found:
-	spin_unlock(&tbl->hashwlock[hash_idx]);
-	read_unlock_bh(&pathtbl_resize_lock);
-	return mpath;
-
-err_node_alloc:
-	kfree(new_mpath);
-err_path_alloc:
-	atomic_dec(&sdata->u.mesh.mpaths);
-	spin_unlock(&tbl->hashwlock[hash_idx]);
-	read_unlock_bh(&pathtbl_resize_lock);
-	return ERR_PTR(err);
-}
-
-static void mesh_table_free_rcu(struct rcu_head *rcu)
-{
-	struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
-
-	mesh_table_free(tbl, false);
-}
-
-void mesh_mpath_table_grow(void)
-{
-	struct mesh_table *oldtbl, *newtbl;
-
-	write_lock_bh(&pathtbl_resize_lock);
-	oldtbl = resize_dereference_mesh_paths();
-	newtbl = mesh_table_alloc(oldtbl->size_order + 1);
-	if (!newtbl)
-		goto out;
-	if (mesh_table_grow(oldtbl, newtbl) < 0) {
-		__mesh_table_free(newtbl);
-		goto out;
-	}
-	rcu_assign_pointer(mesh_paths, newtbl);
-
-	call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
-
- out:
-	write_unlock_bh(&pathtbl_resize_lock);
-}
-
-void mesh_mpp_table_grow(void)
-{
-	struct mesh_table *oldtbl, *newtbl;
-
-	write_lock_bh(&pathtbl_resize_lock);
-	oldtbl = resize_dereference_mpp_paths();
-	newtbl = mesh_table_alloc(oldtbl->size_order + 1);
-	if (!newtbl)
-		goto out;
-	if (mesh_table_grow(oldtbl, newtbl) < 0) {
-		__mesh_table_free(newtbl);
-		goto out;
-	}
-	rcu_assign_pointer(mpp_paths, newtbl);
-	call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
-
- out:
-	write_unlock_bh(&pathtbl_resize_lock);
+	sdata->u.mesh.mesh_paths_generation++;
+	return new_mpath;
 }
 
 int mpp_path_add(struct ieee80211_sub_if_data *sdata,
 		 const u8 *dst, const u8 *mpp)
 {
-	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
-	struct ieee80211_local *local = sdata->local;
 	struct mesh_table *tbl;
-	struct mesh_path *mpath, *new_mpath;
-	struct mpath_node *node, *new_node;
-	struct hlist_head *bucket;
-	int grow = 0;
-	int err = 0;
-	u32 hash_idx;
+	struct mesh_path *new_mpath;
+	int ret;
 
 	if (ether_addr_equal(dst, sdata->vif.addr))
 		/* never add ourselves as neighbours */
@@ -681,65 +478,19 @@
 	if (is_multicast_ether_addr(dst))
 		return -ENOTSUPP;
 
-	err = -ENOMEM;
-	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
+	new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
+
 	if (!new_mpath)
-		goto err_path_alloc;
+		return -ENOMEM;
 
-	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
-	if (!new_node)
-		goto err_node_alloc;
-
-	read_lock_bh(&pathtbl_resize_lock);
-	memcpy(new_mpath->dst, dst, ETH_ALEN);
 	memcpy(new_mpath->mpp, mpp, ETH_ALEN);
-	new_mpath->sdata = sdata;
-	new_mpath->flags = 0;
-	skb_queue_head_init(&new_mpath->frame_queue);
-	new_node->mpath = new_mpath;
-	init_timer(&new_mpath->timer);
-	new_mpath->exp_time = jiffies;
-	spin_lock_init(&new_mpath->state_lock);
+	tbl = sdata->u.mesh.mpp_paths;
+	ret = rhashtable_lookup_insert_fast(&tbl->rhead,
+					    &new_mpath->rhash,
+					    mesh_rht_params);
 
-	tbl = resize_dereference_mpp_paths();
-
-	hash_idx = mesh_table_hash(dst, sdata, tbl);
-	bucket = &tbl->hash_buckets[hash_idx];
-
-	spin_lock(&tbl->hashwlock[hash_idx]);
-
-	err = -EEXIST;
-	hlist_for_each_entry(node, bucket, list) {
-		mpath = node->mpath;
-		if (mpath->sdata == sdata &&
-		    ether_addr_equal(dst, mpath->dst))
-			goto err_exists;
-	}
-
-	hlist_add_head_rcu(&new_node->list, bucket);
-	if (atomic_inc_return(&tbl->entries) >=
-	    MEAN_CHAIN_LEN * (tbl->hash_mask + 1))
-		grow = 1;
-
-	spin_unlock(&tbl->hashwlock[hash_idx]);
-	read_unlock_bh(&pathtbl_resize_lock);
-
-	mpp_paths_generation++;
-
-	if (grow) {
-		set_bit(MESH_WORK_GROW_MPP_TABLE,  &ifmsh->wrkq_flags);
-		ieee80211_queue_work(&local->hw, &sdata->work);
-	}
-	return 0;
-
-err_exists:
-	spin_unlock(&tbl->hashwlock[hash_idx]);
-	read_unlock_bh(&pathtbl_resize_lock);
-	kfree(new_node);
-err_node_alloc:
-	kfree(new_mpath);
-err_path_alloc:
-	return err;
+	sdata->u.mesh.mpp_paths_generation++;
+	return ret;
 }
 
 
@@ -753,17 +504,26 @@
  */
 void mesh_plink_broken(struct sta_info *sta)
 {
-	struct mesh_table *tbl;
+	struct ieee80211_sub_if_data *sdata = sta->sdata;
+	struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
 	static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 	struct mesh_path *mpath;
-	struct mpath_node *node;
-	struct ieee80211_sub_if_data *sdata = sta->sdata;
-	int i;
+	struct rhashtable_iter iter;
+	int ret;
 
-	rcu_read_lock();
-	tbl = rcu_dereference(mesh_paths);
-	for_each_mesh_entry(tbl, node, i) {
-		mpath = node->mpath;
+	ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
+	if (ret)
+		return;
+
+	ret = rhashtable_walk_start(&iter);
+	if (ret && ret != -EAGAIN)
+		goto out;
+
+	while ((mpath = rhashtable_walk_next(&iter))) {
+		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
+			continue;
+		if (IS_ERR(mpath))
+			break;
 		if (rcu_access_pointer(mpath->next_hop) == sta &&
 		    mpath->flags & MESH_PATH_ACTIVE &&
 		    !(mpath->flags & MESH_PATH_FIXED)) {
@@ -777,33 +537,30 @@
 				WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
 		}
 	}
-	rcu_read_unlock();
+out:
+	rhashtable_walk_stop(&iter);
+	rhashtable_walk_exit(&iter);
 }
 
-static void mesh_path_node_reclaim(struct rcu_head *rp)
+static void mesh_path_free_rcu(struct mesh_table *tbl,
+			       struct mesh_path *mpath)
 {
-	struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
+	struct ieee80211_sub_if_data *sdata = mpath->sdata;
 
-	del_timer_sync(&node->mpath->timer);
-	kfree(node->mpath);
-	kfree(node);
-}
-
-/* needs to be called with the corresponding hashwlock taken */
-static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
-{
-	struct mesh_path *mpath = node->mpath;
-	struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
-
-	spin_lock(&mpath->state_lock);
-	mpath->flags |= MESH_PATH_RESOLVING;
-	if (mpath->is_gate)
-		mesh_gate_del(tbl, mpath);
-	hlist_del_rcu(&node->list);
-	call_rcu(&node->rcu, mesh_path_node_reclaim);
-	spin_unlock(&mpath->state_lock);
+	spin_lock_bh(&mpath->state_lock);
+	mpath->flags |= MESH_PATH_RESOLVING | MESH_PATH_DELETED;
+	mesh_gate_del(tbl, mpath);
+	spin_unlock_bh(&mpath->state_lock);
+	del_timer_sync(&mpath->timer);
 	atomic_dec(&sdata->u.mesh.mpaths);
 	atomic_dec(&tbl->entries);
+	kfree_rcu(mpath, rcu);
+}
+
+static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
+{
+	rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
+	mesh_path_free_rcu(tbl, mpath);
 }
 
 /**
@@ -819,65 +576,88 @@
  */
 void mesh_path_flush_by_nexthop(struct sta_info *sta)
 {
-	struct mesh_table *tbl;
+	struct ieee80211_sub_if_data *sdata = sta->sdata;
+	struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
 	struct mesh_path *mpath;
-	struct mpath_node *node;
-	int i;
+	struct rhashtable_iter iter;
+	int ret;
 
-	rcu_read_lock();
-	read_lock_bh(&pathtbl_resize_lock);
-	tbl = resize_dereference_mesh_paths();
-	for_each_mesh_entry(tbl, node, i) {
-		mpath = node->mpath;
-		if (rcu_access_pointer(mpath->next_hop) == sta) {
-			spin_lock(&tbl->hashwlock[i]);
-			__mesh_path_del(tbl, node);
-			spin_unlock(&tbl->hashwlock[i]);
-		}
+	ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
+	if (ret)
+		return;
+
+	ret = rhashtable_walk_start(&iter);
+	if (ret && ret != -EAGAIN)
+		goto out;
+
+	while ((mpath = rhashtable_walk_next(&iter))) {
+		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
+			continue;
+		if (IS_ERR(mpath))
+			break;
+
+		if (rcu_access_pointer(mpath->next_hop) == sta)
+			__mesh_path_del(tbl, mpath);
 	}
-	read_unlock_bh(&pathtbl_resize_lock);
-	rcu_read_unlock();
+out:
+	rhashtable_walk_stop(&iter);
+	rhashtable_walk_exit(&iter);
 }
 
 static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
 			       const u8 *proxy)
 {
-	struct mesh_table *tbl;
-	struct mesh_path *mpp;
-	struct mpath_node *node;
-	int i;
+	struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
+	struct mesh_path *mpath;
+	struct rhashtable_iter iter;
+	int ret;
 
-	rcu_read_lock();
-	read_lock_bh(&pathtbl_resize_lock);
-	tbl = resize_dereference_mpp_paths();
-	for_each_mesh_entry(tbl, node, i) {
-		mpp = node->mpath;
-		if (ether_addr_equal(mpp->mpp, proxy)) {
-			spin_lock(&tbl->hashwlock[i]);
-			__mesh_path_del(tbl, node);
-			spin_unlock(&tbl->hashwlock[i]);
-		}
+	ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
+	if (ret)
+		return;
+
+	ret = rhashtable_walk_start(&iter);
+	if (ret && ret != -EAGAIN)
+		goto out;
+
+	while ((mpath = rhashtable_walk_next(&iter))) {
+		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
+			continue;
+		if (IS_ERR(mpath))
+			break;
+
+		if (ether_addr_equal(mpath->mpp, proxy))
+			__mesh_path_del(tbl, mpath);
 	}
-	read_unlock_bh(&pathtbl_resize_lock);
-	rcu_read_unlock();
+out:
+	rhashtable_walk_stop(&iter);
+	rhashtable_walk_exit(&iter);
 }
 
-static void table_flush_by_iface(struct mesh_table *tbl,
-				 struct ieee80211_sub_if_data *sdata)
+static void table_flush_by_iface(struct mesh_table *tbl)
 {
 	struct mesh_path *mpath;
-	struct mpath_node *node;
-	int i;
+	struct rhashtable_iter iter;
+	int ret;
 
-	WARN_ON(!rcu_read_lock_held());
-	for_each_mesh_entry(tbl, node, i) {
-		mpath = node->mpath;
-		if (mpath->sdata != sdata)
+	ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
+	if (ret)
+		return;
+
+	ret = rhashtable_walk_start(&iter);
+	if (ret && ret != -EAGAIN)
+		goto out;
+
+	while ((mpath = rhashtable_walk_next(&iter))) {
+		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
 			continue;
-		spin_lock_bh(&tbl->hashwlock[i]);
-		__mesh_path_del(tbl, node);
-		spin_unlock_bh(&tbl->hashwlock[i]);
+		if (IS_ERR(mpath))
+			break;
+		__mesh_path_del(tbl, mpath);
 	}
+out:
+	rhashtable_walk_stop(&iter);
+	rhashtable_walk_exit(&iter);
 }
 
 /**
@@ -890,16 +670,8 @@
  */
 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
 {
-	struct mesh_table *tbl;
-
-	rcu_read_lock();
-	read_lock_bh(&pathtbl_resize_lock);
-	tbl = resize_dereference_mesh_paths();
-	table_flush_by_iface(tbl, sdata);
-	tbl = resize_dereference_mpp_paths();
-	table_flush_by_iface(tbl, sdata);
-	read_unlock_bh(&pathtbl_resize_lock);
-	rcu_read_unlock();
+	table_flush_by_iface(sdata->u.mesh.mesh_paths);
+	table_flush_by_iface(sdata->u.mesh.mpp_paths);
 }
 
 /**
@@ -911,37 +683,25 @@
  *
  * Returns: 0 if successful
  */
-static int table_path_del(struct mesh_table __rcu *rcu_tbl,
+static int table_path_del(struct mesh_table *tbl,
 			  struct ieee80211_sub_if_data *sdata,
 			  const u8 *addr)
 {
-	struct mesh_table *tbl;
 	struct mesh_path *mpath;
-	struct mpath_node *node;
-	struct hlist_head *bucket;
-	int hash_idx;
-	int err = 0;
 
-	tbl = resize_dereference_paths(rcu_tbl);
-	hash_idx = mesh_table_hash(addr, sdata, tbl);
-	bucket = &tbl->hash_buckets[hash_idx];
-
-	spin_lock(&tbl->hashwlock[hash_idx]);
-	hlist_for_each_entry(node, bucket, list) {
-		mpath = node->mpath;
-		if (mpath->sdata == sdata &&
-		    ether_addr_equal(addr, mpath->dst)) {
-			__mesh_path_del(tbl, node);
-			goto enddel;
-		}
+	rcu_read_lock();
+	mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
+	if (!mpath) {
+		rcu_read_unlock();
+		return -ENXIO;
 	}
 
-	err = -ENXIO;
-enddel:
-	spin_unlock(&tbl->hashwlock[hash_idx]);
-	return err;
+	__mesh_path_del(tbl, mpath);
+	rcu_read_unlock();
+	return 0;
 }
 
+
 /**
  * mesh_path_del - delete a mesh path from the table
  *
@@ -952,36 +712,13 @@
  */
 int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
 {
-	int err = 0;
+	int err;
 
 	/* flush relevant mpp entries first */
 	mpp_flush_by_proxy(sdata, addr);
 
-	read_lock_bh(&pathtbl_resize_lock);
-	err = table_path_del(mesh_paths, sdata, addr);
-	mesh_paths_generation++;
-	read_unlock_bh(&pathtbl_resize_lock);
-
-	return err;
-}
-
-/**
- * mpp_path_del - delete a mesh proxy path from the table
- *
- * @addr: addr address (ETH_ALEN length)
- * @sdata: local subif
- *
- * Returns: 0 if successful
- */
-static int mpp_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
-{
-	int err = 0;
-
-	read_lock_bh(&pathtbl_resize_lock);
-	err = table_path_del(mpp_paths, sdata, addr);
-	mpp_paths_generation++;
-	read_unlock_bh(&pathtbl_resize_lock);
-
+	err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr);
+	sdata->u.mesh.mesh_paths_generation++;
 	return err;
 }
 
@@ -1015,39 +752,30 @@
 	struct ieee80211_sub_if_data *sdata = mpath->sdata;
 	struct mesh_table *tbl;
 	struct mesh_path *from_mpath = mpath;
-	struct mpath_node *gate = NULL;
+	struct mesh_path *gate;
 	bool copy = false;
-	struct hlist_head *known_gates;
+
+	tbl = sdata->u.mesh.mesh_paths;
 
 	rcu_read_lock();
-	tbl = rcu_dereference(mesh_paths);
-	known_gates = tbl->known_gates;
-	rcu_read_unlock();
-
-	if (!known_gates)
-		return -EHOSTUNREACH;
-
-	hlist_for_each_entry_rcu(gate, known_gates, list) {
-		if (gate->mpath->sdata != sdata)
-			continue;
-
-		if (gate->mpath->flags & MESH_PATH_ACTIVE) {
-			mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst);
-			mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
-			from_mpath = gate->mpath;
+	hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
+		if (gate->flags & MESH_PATH_ACTIVE) {
+			mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst);
+			mesh_path_move_to_queue(gate, from_mpath, copy);
+			from_mpath = gate;
 			copy = true;
 		} else {
 			mpath_dbg(sdata,
 				  "Not forwarding to %pM (flags %#x)\n",
-				  gate->mpath->dst, gate->mpath->flags);
+				  gate->dst, gate->flags);
 		}
 	}
 
-	hlist_for_each_entry_rcu(gate, known_gates, list)
-		if (gate->mpath->sdata == sdata) {
-			mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
-			mesh_path_tx_pending(gate->mpath);
-		}
+	hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
+		mpath_dbg(sdata, "Sending to %pM\n", gate->dst);
+		mesh_path_tx_pending(gate);
+	}
+	rcu_read_unlock();
 
 	return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
 }
@@ -1104,118 +832,73 @@
 	mesh_path_tx_pending(mpath);
 }
 
-static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
-{
-	struct mesh_path *mpath;
-	struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
-	mpath = node->mpath;
-	hlist_del_rcu(p);
-	if (free_leafs) {
-		del_timer_sync(&mpath->timer);
-		kfree(mpath);
-	}
-	kfree(node);
-}
-
-static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
-{
-	struct mesh_path *mpath;
-	struct mpath_node *node, *new_node;
-	u32 hash_idx;
-
-	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
-	if (new_node == NULL)
-		return -ENOMEM;
-
-	node = hlist_entry(p, struct mpath_node, list);
-	mpath = node->mpath;
-	new_node->mpath = mpath;
-	hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
-	hlist_add_head(&new_node->list,
-			&newtbl->hash_buckets[hash_idx]);
-	return 0;
-}
-
-int mesh_pathtbl_init(void)
+int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
 {
 	struct mesh_table *tbl_path, *tbl_mpp;
 	int ret;
 
-	tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
+	tbl_path = mesh_table_alloc();
 	if (!tbl_path)
 		return -ENOMEM;
-	tbl_path->free_node = &mesh_path_node_free;
-	tbl_path->copy_node = &mesh_path_node_copy;
-	tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
-	if (!tbl_path->known_gates) {
-		ret = -ENOMEM;
-		goto free_path;
-	}
-	INIT_HLIST_HEAD(tbl_path->known_gates);
 
-
-	tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
+	tbl_mpp = mesh_table_alloc();
 	if (!tbl_mpp) {
 		ret = -ENOMEM;
 		goto free_path;
 	}
-	tbl_mpp->free_node = &mesh_path_node_free;
-	tbl_mpp->copy_node = &mesh_path_node_copy;
-	tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
-	if (!tbl_mpp->known_gates) {
-		ret = -ENOMEM;
-		goto free_mpp;
-	}
-	INIT_HLIST_HEAD(tbl_mpp->known_gates);
 
-	/* Need no locking since this is during init */
-	RCU_INIT_POINTER(mesh_paths, tbl_path);
-	RCU_INIT_POINTER(mpp_paths, tbl_mpp);
+	rhashtable_init(&tbl_path->rhead, &mesh_rht_params);
+	rhashtable_init(&tbl_mpp->rhead, &mesh_rht_params);
+
+	sdata->u.mesh.mesh_paths = tbl_path;
+	sdata->u.mesh.mpp_paths = tbl_mpp;
 
 	return 0;
 
-free_mpp:
-	mesh_table_free(tbl_mpp, true);
 free_path:
-	mesh_table_free(tbl_path, true);
+	mesh_table_free(tbl_path);
 	return ret;
 }
 
+static
+void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
+			  struct mesh_table *tbl)
+{
+	struct mesh_path *mpath;
+	struct rhashtable_iter iter;
+	int ret;
+
+	ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL);
+	if (ret)
+		return;
+
+	ret = rhashtable_walk_start(&iter);
+	if (ret && ret != -EAGAIN)
+		goto out;
+
+	while ((mpath = rhashtable_walk_next(&iter))) {
+		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
+			continue;
+		if (IS_ERR(mpath))
+			break;
+		if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
+		    (!(mpath->flags & MESH_PATH_FIXED)) &&
+		     time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
+			__mesh_path_del(tbl, mpath);
+	}
+out:
+	rhashtable_walk_stop(&iter);
+	rhashtable_walk_exit(&iter);
+}
+
 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
 {
-	struct mesh_table *tbl;
-	struct mesh_path *mpath;
-	struct mpath_node *node;
-	int i;
-
-	rcu_read_lock();
-	tbl = rcu_dereference(mesh_paths);
-	for_each_mesh_entry(tbl, node, i) {
-		if (node->mpath->sdata != sdata)
-			continue;
-		mpath = node->mpath;
-		if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
-		    (!(mpath->flags & MESH_PATH_FIXED)) &&
-		     time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
-			mesh_path_del(mpath->sdata, mpath->dst);
-	}
-
-	tbl = rcu_dereference(mpp_paths);
-	for_each_mesh_entry(tbl, node, i) {
-		if (node->mpath->sdata != sdata)
-			continue;
-		mpath = node->mpath;
-		if ((!(mpath->flags & MESH_PATH_FIXED)) &&
-		    time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
-			mpp_path_del(mpath->sdata, mpath->dst);
-	}
-
-	rcu_read_unlock();
+	mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths);
+	mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths);
 }
 
-void mesh_pathtbl_unregister(void)
+void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata)
 {
-	/* no need for locking during exit path */
-	mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
-	mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
+	mesh_table_free(sdata->u.mesh.mesh_paths);
+	mesh_table_free(sdata->u.mesh.mpp_paths);
 }
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index a07e93c..79f2a0a 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -61,7 +61,7 @@
 	s32 rssi_threshold = sdata->u.mesh.mshcfg.rssi_threshold;
 	return rssi_threshold == 0 ||
 	       (sta &&
-		(s8)-ewma_signal_read(&sta->rx_stats.avg_signal) >
+		(s8)-ewma_signal_read(&sta->rx_stats_avg.signal) >
 						rssi_threshold);
 }
 
@@ -93,18 +93,18 @@
 static u32 mesh_set_short_slot_time(struct ieee80211_sub_if_data *sdata)
 {
 	struct ieee80211_local *local = sdata->local;
-	enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+	enum nl80211_band band = ieee80211_get_sdata_band(sdata);
 	struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
 	struct sta_info *sta;
 	u32 erp_rates = 0, changed = 0;
 	int i;
 	bool short_slot = false;
 
-	if (band == IEEE80211_BAND_5GHZ) {
+	if (band == NL80211_BAND_5GHZ) {
 		/* (IEEE 802.11-2012 19.4.5) */
 		short_slot = true;
 		goto out;
-	} else if (band != IEEE80211_BAND_2GHZ)
+	} else if (band != NL80211_BAND_2GHZ)
 		goto out;
 
 	for (i = 0; i < sband->n_bitrates; i++)
@@ -247,7 +247,7 @@
 	mgmt->u.action.u.self_prot.action_code = action;
 
 	if (action != WLAN_SP_MESH_PEERING_CLOSE) {
-		enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+		enum nl80211_band band = ieee80211_get_sdata_band(sdata);
 
 		/* capability info */
 		pos = skb_put(skb, 2);
@@ -331,7 +331,9 @@
  *
  * @sta: mesh peer link to deactivate
  *
- * All mesh paths with this peer as next hop will be flushed
+ * Mesh paths with this peer as next hop should be flushed
+ * by the caller outside of plink_lock.
+ *
  * Returns beacon changed flag if the beacon content changed.
  *
  * Locking: the caller must hold sta->mesh->plink_lock
@@ -346,7 +348,6 @@
 	if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
 		changed = mesh_plink_dec_estab_count(sdata);
 	sta->mesh->plink_state = NL80211_PLINK_BLOCKED;
-	mesh_path_flush_by_nexthop(sta);
 
 	ieee80211_mps_sta_status_update(sta);
 	changed |= ieee80211_mps_set_sta_local_pm(sta,
@@ -374,6 +375,7 @@
 			    sta->sta.addr, sta->mesh->llid, sta->mesh->plid,
 			    sta->mesh->reason);
 	spin_unlock_bh(&sta->mesh->plink_lock);
+	mesh_path_flush_by_nexthop(sta);
 
 	return changed;
 }
@@ -383,7 +385,7 @@
 			       struct ieee802_11_elems *elems, bool insert)
 {
 	struct ieee80211_local *local = sdata->local;
-	enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+	enum nl80211_band band = ieee80211_get_sdata_band(sdata);
 	struct ieee80211_supported_band *sband;
 	u32 rates, basic_rates = 0, changed = 0;
 	enum ieee80211_sta_rx_bandwidth bw = sta->sta.bandwidth;
@@ -748,6 +750,7 @@
 	changed = __mesh_plink_deactivate(sta);
 	sta->mesh->plink_state = NL80211_PLINK_BLOCKED;
 	spin_unlock_bh(&sta->mesh->plink_lock);
+	mesh_path_flush_by_nexthop(sta);
 
 	return changed;
 }
@@ -797,6 +800,7 @@
 	struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg;
 	enum ieee80211_self_protected_actioncode action = 0;
 	u32 changed = 0;
+	bool flush = false;
 
 	mpl_dbg(sdata, "peer %pM in state %s got event %s\n", sta->sta.addr,
 		mplstates[sta->mesh->plink_state], mplevents[event]);
@@ -885,6 +889,7 @@
 			changed |= mesh_set_short_slot_time(sdata);
 			mesh_plink_close(sdata, sta, event);
 			action = WLAN_SP_MESH_PEERING_CLOSE;
+			flush = true;
 			break;
 		case OPN_ACPT:
 			action = WLAN_SP_MESH_PEERING_CONFIRM;
@@ -916,6 +921,8 @@
 		break;
 	}
 	spin_unlock_bh(&sta->mesh->plink_lock);
+	if (flush)
+		mesh_path_flush_by_nexthop(sta);
 	if (action) {
 		mesh_plink_frame_tx(sdata, sta, action, sta->sta.addr,
 				    sta->mesh->llid, sta->mesh->plid,
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 281b8d6..8d426f6 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -122,15 +122,16 @@
 {
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 
-	if (unlikely(!sdata->u.mgd.associated))
+	if (unlikely(!ifmgd->associated))
 		return;
 
-	ifmgd->probe_send_count = 0;
+	if (ifmgd->probe_send_count)
+		ifmgd->probe_send_count = 0;
 
 	if (ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
 		return;
 
-	mod_timer(&sdata->u.mgd.conn_mon_timer,
+	mod_timer(&ifmgd->conn_mon_timer,
 		  round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME));
 }
 
@@ -660,7 +661,7 @@
 
 	capab = WLAN_CAPABILITY_ESS;
 
-	if (sband->band == IEEE80211_BAND_2GHZ) {
+	if (sband->band == NL80211_BAND_2GHZ) {
 		capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
 		capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
 	}
@@ -1099,7 +1100,7 @@
 	struct cfg80211_bss *cbss = ifmgd->associated;
 	struct ieee80211_chanctx_conf *conf;
 	struct ieee80211_chanctx *chanctx;
-	enum ieee80211_band current_band;
+	enum nl80211_band current_band;
 	struct ieee80211_csa_ie csa_ie;
 	struct ieee80211_channel_switch ch_switch;
 	int res;
@@ -1256,11 +1257,11 @@
 	default:
 		WARN_ON_ONCE(1);
 		/* fall through */
-	case IEEE80211_BAND_2GHZ:
-	case IEEE80211_BAND_60GHZ:
+	case NL80211_BAND_2GHZ:
+	case NL80211_BAND_60GHZ:
 		chan_increment = 1;
 		break;
-	case IEEE80211_BAND_5GHZ:
+	case NL80211_BAND_5GHZ:
 		chan_increment = 4;
 		break;
 	}
@@ -1860,7 +1861,7 @@
 	}
 
 	use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME);
-	if (ieee80211_get_sdata_band(sdata) == IEEE80211_BAND_5GHZ)
+	if (ieee80211_get_sdata_band(sdata) == NL80211_BAND_5GHZ)
 		use_short_slot = true;
 
 	if (use_protection != bss_conf->use_cts_prot) {
@@ -2216,6 +2217,7 @@
 	const u8 *ssid;
 	u8 *dst = ifmgd->associated->bssid;
 	u8 unicast_limit = max(1, max_probe_tries - 3);
+	struct sta_info *sta;
 
 	/*
 	 * Try sending broadcast probe requests for the last three
@@ -2234,6 +2236,14 @@
 	 */
 	ifmgd->probe_send_count++;
 
+	if (dst) {
+		mutex_lock(&sdata->local->sta_mtx);
+		sta = sta_info_get(sdata, dst);
+		if (!WARN_ON(!sta))
+			ieee80211_check_fast_rx(sta);
+		mutex_unlock(&sdata->local->sta_mtx);
+	}
+
 	if (ieee80211_hw_check(&sdata->local->hw, REPORTS_TX_ACK_STATUS)) {
 		ifmgd->nullfunc_failed = false;
 		ieee80211_send_nullfunc(sdata->local, sdata, false);
@@ -2389,6 +2399,11 @@
 		return;
 	}
 
+	/* AP is probably out of range (or not reachable for another reason) so
+	 * remove the bss struct for that AP.
+	 */
+	cfg80211_unlink_bss(local->hw.wiphy, ifmgd->associated);
+
 	ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
 			       WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
 			       true, frame_buf);
@@ -4365,7 +4380,7 @@
 		sdata->vif.bss_conf.basic_rates = basic_rates;
 
 		/* cf. IEEE 802.11 9.2.12 */
-		if (cbss->channel->band == IEEE80211_BAND_2GHZ &&
+		if (cbss->channel->band == NL80211_BAND_2GHZ &&
 		    have_higher_than_11mbit)
 			sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
 		else
diff --git a/net/mac80211/ocb.c b/net/mac80211/ocb.c
index 0be0aad..88e6ebb 100644
--- a/net/mac80211/ocb.c
+++ b/net/mac80211/ocb.c
@@ -75,8 +75,6 @@
 	if (!sta)
 		return;
 
-	sta->rx_stats.last_rx = jiffies;
-
 	/* Add only mandatory rates for now */
 	sband = local->hw.wiphy->bands[band];
 	sta->sta.supp_rates[band] =
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index a4e2f4e..206698b 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -287,7 +287,7 @@
 	u32 rate_flags =
 		ieee80211_chandef_rate_flags(&hw->conf.chandef);
 
-	if ((sband->band == IEEE80211_BAND_2GHZ) &&
+	if ((sband->band == NL80211_BAND_2GHZ) &&
 	    (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE))
 		rate_flags |= IEEE80211_RATE_ERP_G;
 
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 624fe5b..8d32607 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -96,9 +96,9 @@
 {
 #ifdef CONFIG_MAC80211_DEBUGFS
 	struct rate_control_ref *ref = sta->rate_ctrl;
-	if (ref && sta->debugfs.dir && ref->ops->add_sta_debugfs)
+	if (ref && sta->debugfs_dir && ref->ops->add_sta_debugfs)
 		ref->ops->add_sta_debugfs(ref->priv, sta->rate_ctrl_priv,
-					  sta->debugfs.dir);
+					  sta->debugfs_dir);
 #endif
 }
 
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index b54f398..14c5ba3 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -436,7 +436,7 @@
 
 
 static void
-calc_rate_durations(enum ieee80211_band band,
+calc_rate_durations(enum nl80211_band band,
 		    struct minstrel_rate *d,
 		    struct ieee80211_rate *rate,
 		    struct cfg80211_chan_def *chandef)
@@ -579,7 +579,7 @@
 	if (!mi)
 		return NULL;
 
-	for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+	for (i = 0; i < NUM_NL80211_BANDS; i++) {
 		sband = hw->wiphy->bands[i];
 		if (sband && sband->n_bitrates > max_rates)
 			max_rates = sband->n_bitrates;
@@ -621,7 +621,7 @@
 	u32 rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef);
 	int i, j;
 
-	sband = mp->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+	sband = mp->hw->wiphy->bands[NL80211_BAND_2GHZ];
 	if (!sband)
 		return;
 
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 370d677..30fbabf 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -883,6 +883,59 @@
 	ratetbl->rate[offset].flags = flags;
 }
 
+static inline int
+minstrel_ht_get_prob_ewma(struct minstrel_ht_sta *mi, int rate)
+{
+	int group = rate / MCS_GROUP_RATES;
+	rate %= MCS_GROUP_RATES;
+	return mi->groups[group].rates[rate].prob_ewma;
+}
+
+static int
+minstrel_ht_get_max_amsdu_len(struct minstrel_ht_sta *mi)
+{
+	int group = mi->max_prob_rate / MCS_GROUP_RATES;
+	const struct mcs_group *g = &minstrel_mcs_groups[group];
+	int rate = mi->max_prob_rate % MCS_GROUP_RATES;
+
+	/* Disable A-MSDU if max_prob_rate is bad */
+	if (mi->groups[group].rates[rate].prob_ewma < MINSTREL_FRAC(50, 100))
+		return 1;
+
+	/* If the rate is slower than single-stream MCS1, make A-MSDU limit small */
+	if (g->duration[rate] > MCS_DURATION(1, 0, 52))
+		return 500;
+
+	/*
+	 * If the rate is slower than single-stream MCS4, limit A-MSDU to usual
+	 * data packet size
+	 */
+	if (g->duration[rate] > MCS_DURATION(1, 0, 104))
+		return 1600;
+
+	/*
+	 * If the rate is slower than single-stream MCS7, or if the max throughput
+	 * rate success probability is less than 75%, limit A-MSDU to twice the usual
+	 * data packet size
+	 */
+	if (g->duration[rate] > MCS_DURATION(1, 0, 260) ||
+	    (minstrel_ht_get_prob_ewma(mi, mi->max_tp_rate[0]) <
+	     MINSTREL_FRAC(75, 100)))
+		return 3200;
+
+	/*
+	 * HT A-MPDU limits maximum MPDU size under BA agreement to 4095 bytes.
+	 * Since aggregation sessions are started/stopped without txq flush, use
+	 * the limit here to avoid the complexity of having to de-aggregate
+	 * packets in the queue.
+	 */
+	if (!mi->sta->vht_cap.vht_supported)
+		return IEEE80211_MAX_MPDU_LEN_HT_BA;
+
+	/* unlimited */
+	return 0;
+}
+
 static void
 minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
 {
@@ -907,6 +960,7 @@
 		minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_prob_rate);
 	}
 
+	mi->sta->max_rc_amsdu_len = minstrel_ht_get_max_amsdu_len(mi);
 	rates->rate[i].idx = -1;
 	rate_control_set_rates(mp->hw, mi->sta, rates);
 }
@@ -924,6 +978,7 @@
 	struct minstrel_rate_stats *mrs;
 	struct minstrel_mcs_group_data *mg;
 	unsigned int sample_dur, sample_group, cur_max_tp_streams;
+	int tp_rate1, tp_rate2;
 	int sample_idx = 0;
 
 	if (mi->sample_wait > 0) {
@@ -945,14 +1000,22 @@
 	mrs = &mg->rates[sample_idx];
 	sample_idx += sample_group * MCS_GROUP_RATES;
 
+	/* Set tp_rate1, tp_rate2 to the highest / second highest max_tp_rate */
+	if (minstrel_get_duration(mi->max_tp_rate[0]) >
+	    minstrel_get_duration(mi->max_tp_rate[1])) {
+		tp_rate1 = mi->max_tp_rate[1];
+		tp_rate2 = mi->max_tp_rate[0];
+	} else {
+		tp_rate1 = mi->max_tp_rate[0];
+		tp_rate2 = mi->max_tp_rate[1];
+	}
+
 	/*
 	 * Sampling might add some overhead (RTS, no aggregation)
-	 * to the frame. Hence, don't use sampling for the currently
-	 * used rates.
+	 * to the frame. Hence, don't use sampling for the highest currently
+	 * used highest throughput or probability rate.
 	 */
-	if (sample_idx == mi->max_tp_rate[0] ||
-	    sample_idx == mi->max_tp_rate[1] ||
-	    sample_idx == mi->max_prob_rate)
+	if (sample_idx == mi->max_tp_rate[0] || sample_idx == mi->max_prob_rate)
 		return -1;
 
 	/*
@@ -967,10 +1030,10 @@
 	 * if the link is working perfectly.
 	 */
 
-	cur_max_tp_streams = minstrel_mcs_groups[mi->max_tp_rate[0] /
+	cur_max_tp_streams = minstrel_mcs_groups[tp_rate1 /
 		MCS_GROUP_RATES].streams;
 	sample_dur = minstrel_get_duration(sample_idx);
-	if (sample_dur >= minstrel_get_duration(mi->max_tp_rate[1]) &&
+	if (sample_dur >= minstrel_get_duration(tp_rate2) &&
 	    (cur_max_tp_streams - 1 <
 	     minstrel_mcs_groups[sample_group].streams ||
 	     sample_dur >= minstrel_get_duration(mi->max_prob_rate))) {
@@ -1074,7 +1137,7 @@
 {
 	int i;
 
-	if (sband->band != IEEE80211_BAND_2GHZ)
+	if (sband->band != NL80211_BAND_2GHZ)
 		return;
 
 	if (!ieee80211_hw_check(mp->hw, SUPPORTS_HT_CCK_RATES))
@@ -1272,7 +1335,7 @@
 	int max_rates = 0;
 	int i;
 
-	for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+	for (i = 0; i < NUM_NL80211_BANDS; i++) {
 		sband = hw->wiphy->bands[i];
 		if (sband && sband->n_bitrates > max_rates)
 			max_rates = sband->n_bitrates;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index dc27bec..5e65e83 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -322,7 +322,7 @@
 	else if (status->flag & RX_FLAG_5MHZ)
 		channel_flags |= IEEE80211_CHAN_QUARTER;
 
-	if (status->band == IEEE80211_BAND_5GHZ)
+	if (status->band == NL80211_BAND_5GHZ)
 		channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ;
 	else if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT))
 		channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
@@ -722,8 +722,8 @@
 	return -1;
 }
 
-static int iwl80211_get_cs_keyid(const struct ieee80211_cipher_scheme *cs,
-				 struct sk_buff *skb)
+static int ieee80211_get_cs_keyid(const struct ieee80211_cipher_scheme *cs,
+				  struct sk_buff *skb)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 	__le16 fc;
@@ -1319,13 +1319,52 @@
 }
 EXPORT_SYMBOL(ieee80211_sta_ps_transition);
 
+void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta)
+{
+	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+
+	if (test_sta_flag(sta, WLAN_STA_SP))
+		return;
+
+	if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
+		ieee80211_sta_ps_deliver_poll_response(sta);
+	else
+		set_sta_flag(sta, WLAN_STA_PSPOLL);
+}
+EXPORT_SYMBOL(ieee80211_sta_pspoll);
+
+void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid)
+{
+	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+	u8 ac = ieee802_1d_to_ac[tid & 7];
+
+	/*
+	 * If this AC is not trigger-enabled do nothing.
+	 *
+	 * NB: This could/should check a separate bitmap of trigger-
+	 * enabled queues, but for now we only implement uAPSD w/o
+	 * TSPEC changes to the ACs, so they're always the same.
+	 */
+	if (!(sta->sta.uapsd_queues & BIT(ac)))
+		return;
+
+	/* if we are in a service period, do nothing */
+	if (test_sta_flag(sta, WLAN_STA_SP))
+		return;
+
+	if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
+		ieee80211_sta_ps_deliver_uapsd(sta);
+	else
+		set_sta_flag(sta, WLAN_STA_UAPSD);
+}
+EXPORT_SYMBOL(ieee80211_sta_uapsd_trigger);
+
 static ieee80211_rx_result debug_noinline
 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
 {
 	struct ieee80211_sub_if_data *sdata = rx->sdata;
 	struct ieee80211_hdr *hdr = (void *)rx->skb->data;
 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
-	int tid, ac;
 
 	if (!rx->sta)
 		return RX_CONTINUE;
@@ -1351,12 +1390,7 @@
 		return RX_CONTINUE;
 
 	if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
-		if (!test_sta_flag(rx->sta, WLAN_STA_SP)) {
-			if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
-				ieee80211_sta_ps_deliver_poll_response(rx->sta);
-			else
-				set_sta_flag(rx->sta, WLAN_STA_PSPOLL);
-		}
+		ieee80211_sta_pspoll(&rx->sta->sta);
 
 		/* Free PS Poll skb here instead of returning RX_DROP that would
 		 * count as an dropped frame. */
@@ -1368,27 +1402,11 @@
 		   ieee80211_has_pm(hdr->frame_control) &&
 		   (ieee80211_is_data_qos(hdr->frame_control) ||
 		    ieee80211_is_qos_nullfunc(hdr->frame_control))) {
+		u8 tid;
+
 		tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
-		ac = ieee802_1d_to_ac[tid & 7];
 
-		/*
-		 * If this AC is not trigger-enabled do nothing.
-		 *
-		 * NB: This could/should check a separate bitmap of trigger-
-		 * enabled queues, but for now we only implement uAPSD w/o
-		 * TSPEC changes to the ACs, so they're always the same.
-		 */
-		if (!(rx->sta->sta.uapsd_queues & BIT(ac)))
-			return RX_CONTINUE;
-
-		/* if we are in a service period, do nothing */
-		if (test_sta_flag(rx->sta, WLAN_STA_SP))
-			return RX_CONTINUE;
-
-		if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
-			ieee80211_sta_ps_deliver_uapsd(rx->sta);
-		else
-			set_sta_flag(rx->sta, WLAN_STA_UAPSD);
+		ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid);
 	}
 
 	return RX_CONTINUE;
@@ -1421,16 +1439,9 @@
 		    test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
 			sta->rx_stats.last_rx = jiffies;
 			if (ieee80211_is_data(hdr->frame_control) &&
-			    !is_multicast_ether_addr(hdr->addr1)) {
-				sta->rx_stats.last_rate_idx =
-					status->rate_idx;
-				sta->rx_stats.last_rate_flag =
-					status->flag;
-				sta->rx_stats.last_rate_vht_flag =
-					status->vht_flag;
-				sta->rx_stats.last_rate_vht_nss =
-					status->vht_nss;
-			}
+			    !is_multicast_ether_addr(hdr->addr1))
+				sta->rx_stats.last_rate =
+					sta_stats_encode_rate(status);
 		}
 	} else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
 		sta->rx_stats.last_rx = jiffies;
@@ -1440,22 +1451,22 @@
 		 * match the current local configuration when processed.
 		 */
 		sta->rx_stats.last_rx = jiffies;
-		if (ieee80211_is_data(hdr->frame_control)) {
-			sta->rx_stats.last_rate_idx = status->rate_idx;
-			sta->rx_stats.last_rate_flag = status->flag;
-			sta->rx_stats.last_rate_vht_flag = status->vht_flag;
-			sta->rx_stats.last_rate_vht_nss = status->vht_nss;
-		}
+		if (ieee80211_is_data(hdr->frame_control))
+			sta->rx_stats.last_rate = sta_stats_encode_rate(status);
 	}
 
 	if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
 		ieee80211_sta_rx_notify(rx->sdata, hdr);
 
 	sta->rx_stats.fragments++;
+
+	u64_stats_update_begin(&rx->sta->rx_stats.syncp);
 	sta->rx_stats.bytes += rx->skb->len;
+	u64_stats_update_end(&rx->sta->rx_stats.syncp);
+
 	if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
 		sta->rx_stats.last_signal = status->signal;
-		ewma_signal_add(&sta->rx_stats.avg_signal, -status->signal);
+		ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal);
 	}
 
 	if (status->chains) {
@@ -1467,7 +1478,7 @@
 				continue;
 
 			sta->rx_stats.chain_signal_last[i] = signal;
-			ewma_signal_add(&sta->rx_stats.chain_signal_avg[i],
+			ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
 					-signal);
 		}
 	}
@@ -1586,7 +1597,7 @@
 
 		if (ieee80211_has_protected(fc) && rx->sta->cipher_scheme) {
 			cs = rx->sta->cipher_scheme;
-			keyid = iwl80211_get_cs_keyid(cs, rx->skb);
+			keyid = ieee80211_get_cs_keyid(cs, rx->skb);
 			if (unlikely(keyid < 0))
 				return RX_DROP_UNUSABLE;
 		}
@@ -1670,7 +1681,7 @@
 		hdrlen = ieee80211_hdrlen(fc);
 
 		if (cs) {
-			keyidx = iwl80211_get_cs_keyid(cs, rx->skb);
+			keyidx = ieee80211_get_cs_keyid(cs, rx->skb);
 
 			if (unlikely(keyidx < 0))
 				return RX_DROP_UNUSABLE;
@@ -2129,6 +2140,17 @@
 
 	ieee80211_rx_stats(dev, skb->len);
 
+	if (rx->sta) {
+		/* The seqno index has the same property as needed
+		 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
+		 * for non-QoS-data frames. Here we know it's a data
+		 * frame, so count MSDUs.
+		 */
+		u64_stats_update_begin(&rx->sta->rx_stats.syncp);
+		rx->sta->rx_stats.msdu[rx->seqno_idx]++;
+		u64_stats_update_end(&rx->sta->rx_stats.syncp);
+	}
+
 	if ((sdata->vif.type == NL80211_IFTYPE_AP ||
 	     sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
 	    !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
@@ -2415,15 +2437,6 @@
 	if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
 		return RX_DROP_MONITOR;
 
-	if (rx->sta) {
-		/* The seqno index has the same property as needed
-		 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
-		 * for non-QoS-data frames. Here we know it's a data
-		 * frame, so count MSDUs.
-		 */
-		rx->sta->rx_stats.msdu[rx->seqno_idx]++;
-	}
-
 	/*
 	 * Send unexpected-4addr-frame event to hostapd. For older versions,
 	 * also drop the frame to cooked monitor interfaces.
@@ -2474,14 +2487,14 @@
 
 	rx->skb->dev = dev;
 
-	if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
+	if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) &&
+	    local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
 	    !is_multicast_ether_addr(
 		    ((struct ethhdr *)rx->skb->data)->h_dest) &&
 	    (!local->scanning &&
-	     !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) {
-			mod_timer(&local->dynamic_ps_timer, jiffies +
-			 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
-	}
+	     !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)))
+		mod_timer(&local->dynamic_ps_timer, jiffies +
+			  msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
 
 	ieee80211_deliver_skb(rx);
 
@@ -2828,7 +2841,7 @@
 
 		switch (mgmt->u.action.u.measurement.action_code) {
 		case WLAN_ACTION_SPCT_MSR_REQ:
-			if (status->band != IEEE80211_BAND_5GHZ)
+			if (status->band != NL80211_BAND_5GHZ)
 				break;
 
 			if (len < (IEEE80211_MIN_ACTION_SIZE +
@@ -3201,7 +3214,7 @@
 		res = rxh(rx);		\
 		if (res != RX_CONTINUE)	\
 			goto rxh_next;  \
-	} while (0);
+	} while (0)
 
 	/* Lock here to avoid hitting all of the data used in the RX
 	 * path (e.g. key data, station data, ...) concurrently when
@@ -3219,30 +3232,30 @@
 		 */
 		rx->skb = skb;
 
-		CALL_RXH(ieee80211_rx_h_check_more_data)
-		CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll)
-		CALL_RXH(ieee80211_rx_h_sta_process)
-		CALL_RXH(ieee80211_rx_h_decrypt)
-		CALL_RXH(ieee80211_rx_h_defragment)
-		CALL_RXH(ieee80211_rx_h_michael_mic_verify)
+		CALL_RXH(ieee80211_rx_h_check_more_data);
+		CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll);
+		CALL_RXH(ieee80211_rx_h_sta_process);
+		CALL_RXH(ieee80211_rx_h_decrypt);
+		CALL_RXH(ieee80211_rx_h_defragment);
+		CALL_RXH(ieee80211_rx_h_michael_mic_verify);
 		/* must be after MMIC verify so header is counted in MPDU mic */
 #ifdef CONFIG_MAC80211_MESH
 		if (ieee80211_vif_is_mesh(&rx->sdata->vif))
 			CALL_RXH(ieee80211_rx_h_mesh_fwding);
 #endif
-		CALL_RXH(ieee80211_rx_h_amsdu)
-		CALL_RXH(ieee80211_rx_h_data)
+		CALL_RXH(ieee80211_rx_h_amsdu);
+		CALL_RXH(ieee80211_rx_h_data);
 
 		/* special treatment -- needs the queue */
 		res = ieee80211_rx_h_ctrl(rx, frames);
 		if (res != RX_CONTINUE)
 			goto rxh_next;
 
-		CALL_RXH(ieee80211_rx_h_mgmt_check)
-		CALL_RXH(ieee80211_rx_h_action)
-		CALL_RXH(ieee80211_rx_h_userspace_mgmt)
-		CALL_RXH(ieee80211_rx_h_action_return)
-		CALL_RXH(ieee80211_rx_h_mgmt)
+		CALL_RXH(ieee80211_rx_h_mgmt_check);
+		CALL_RXH(ieee80211_rx_h_action);
+		CALL_RXH(ieee80211_rx_h_userspace_mgmt);
+		CALL_RXH(ieee80211_rx_h_action_return);
+		CALL_RXH(ieee80211_rx_h_mgmt);
 
  rxh_next:
 		ieee80211_rx_handlers_result(rx, res);
@@ -3265,10 +3278,10 @@
 		res = rxh(rx);		\
 		if (res != RX_CONTINUE)	\
 			goto rxh_next;  \
-	} while (0);
+	} while (0)
 
-	CALL_RXH(ieee80211_rx_h_check_dup)
-	CALL_RXH(ieee80211_rx_h_check)
+	CALL_RXH(ieee80211_rx_h_check_dup);
+	CALL_RXH(ieee80211_rx_h_check);
 
 	ieee80211_rx_reorder_ampdu(rx, &reorder_release);
 
@@ -3513,6 +3526,351 @@
 	return false;
 }
 
+void ieee80211_check_fast_rx(struct sta_info *sta)
+{
+	struct ieee80211_sub_if_data *sdata = sta->sdata;
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_key *key;
+	struct ieee80211_fast_rx fastrx = {
+		.dev = sdata->dev,
+		.vif_type = sdata->vif.type,
+		.control_port_protocol = sdata->control_port_protocol,
+	}, *old, *new = NULL;
+	bool assign = false;
+
+	/* use sparse to check that we don't return without updating */
+	__acquire(check_fast_rx);
+
+	BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header));
+	BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN);
+	ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header);
+	ether_addr_copy(fastrx.vif_addr, sdata->vif.addr);
+
+	fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS);
+
+	/* fast-rx doesn't do reordering */
+	if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) &&
+	    !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER))
+		goto clear;
+
+	switch (sdata->vif.type) {
+	case NL80211_IFTYPE_STATION:
+		/* 4-addr is harder to deal with, later maybe */
+		if (sdata->u.mgd.use_4addr)
+			goto clear;
+		/* software powersave is a huge mess, avoid all of it */
+		if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
+			goto clear;
+		if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) &&
+		    !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
+			goto clear;
+		if (sta->sta.tdls) {
+			fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
+			fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
+			fastrx.expected_ds_bits = 0;
+		} else {
+			fastrx.sta_notify = sdata->u.mgd.probe_send_count > 0;
+			fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
+			fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3);
+			fastrx.expected_ds_bits =
+				cpu_to_le16(IEEE80211_FCTL_FROMDS);
+		}
+		break;
+	case NL80211_IFTYPE_AP_VLAN:
+	case NL80211_IFTYPE_AP:
+		/* parallel-rx requires this, at least with calls to
+		 * ieee80211_sta_ps_transition()
+		 */
+		if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
+			goto clear;
+		fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
+		fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
+		fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS);
+
+		fastrx.internal_forward =
+			!(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
+			(sdata->vif.type != NL80211_IFTYPE_AP_VLAN ||
+			 !sdata->u.vlan.sta);
+		break;
+	default:
+		goto clear;
+	}
+
+	if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+		goto clear;
+
+	rcu_read_lock();
+	key = rcu_dereference(sta->ptk[sta->ptk_idx]);
+	if (key) {
+		switch (key->conf.cipher) {
+		case WLAN_CIPHER_SUITE_TKIP:
+			/* we don't want to deal with MMIC in fast-rx */
+			goto clear_rcu;
+		case WLAN_CIPHER_SUITE_CCMP:
+		case WLAN_CIPHER_SUITE_CCMP_256:
+		case WLAN_CIPHER_SUITE_GCMP:
+		case WLAN_CIPHER_SUITE_GCMP_256:
+			break;
+		default:
+			/* we also don't want to deal with WEP or cipher scheme
+			 * since those require looking up the key idx in the
+			 * frame, rather than assuming the PTK is used
+			 * (we need to revisit this once we implement the real
+			 * PTK index, which is now valid in the spec, but we
+			 * haven't implemented that part yet)
+			 */
+			goto clear_rcu;
+		}
+
+		fastrx.key = true;
+		fastrx.icv_len = key->conf.icv_len;
+	}
+
+	assign = true;
+ clear_rcu:
+	rcu_read_unlock();
+ clear:
+	__release(check_fast_rx);
+
+	if (assign)
+		new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL);
+
+	spin_lock_bh(&sta->lock);
+	old = rcu_dereference_protected(sta->fast_rx, true);
+	rcu_assign_pointer(sta->fast_rx, new);
+	spin_unlock_bh(&sta->lock);
+
+	if (old)
+		kfree_rcu(old, rcu_head);
+}
+
+void ieee80211_clear_fast_rx(struct sta_info *sta)
+{
+	struct ieee80211_fast_rx *old;
+
+	spin_lock_bh(&sta->lock);
+	old = rcu_dereference_protected(sta->fast_rx, true);
+	RCU_INIT_POINTER(sta->fast_rx, NULL);
+	spin_unlock_bh(&sta->lock);
+
+	if (old)
+		kfree_rcu(old, rcu_head);
+}
+
+void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct sta_info *sta;
+
+	lockdep_assert_held(&local->sta_mtx);
+
+	list_for_each_entry_rcu(sta, &local->sta_list, list) {
+		if (sdata != sta->sdata &&
+		    (!sta->sdata->bss || sta->sdata->bss != sdata->bss))
+			continue;
+		ieee80211_check_fast_rx(sta);
+	}
+}
+
+void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_local *local = sdata->local;
+
+	mutex_lock(&local->sta_mtx);
+	__ieee80211_check_fast_rx_iface(sdata);
+	mutex_unlock(&local->sta_mtx);
+}
+
+static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
+				     struct ieee80211_fast_rx *fast_rx)
+{
+	struct sk_buff *skb = rx->skb;
+	struct ieee80211_hdr *hdr = (void *)skb->data;
+	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+	struct sta_info *sta = rx->sta;
+	int orig_len = skb->len;
+	int snap_offs = ieee80211_hdrlen(hdr->frame_control);
+	struct {
+		u8 snap[sizeof(rfc1042_header)];
+		__be16 proto;
+	} *payload __aligned(2);
+	struct {
+		u8 da[ETH_ALEN];
+		u8 sa[ETH_ALEN];
+	} addrs __aligned(2);
+	struct ieee80211_sta_rx_stats *stats = &sta->rx_stats;
+
+	if (fast_rx->uses_rss)
+		stats = this_cpu_ptr(sta->pcpu_rx_stats);
+
+	/* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write
+	 * to a common data structure; drivers can implement that per queue
+	 * but we don't have that information in mac80211
+	 */
+	if (!(status->flag & RX_FLAG_DUP_VALIDATED))
+		return false;
+
+#define FAST_RX_CRYPT_FLAGS	(RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED)
+
+	/* If using encryption, we also need to have:
+	 *  - PN_VALIDATED: similar, but the implementation is tricky
+	 *  - DECRYPTED: necessary for PN_VALIDATED
+	 */
+	if (fast_rx->key &&
+	    (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS)
+		return false;
+
+	/* we don't deal with A-MSDU deaggregation here */
+	if (status->rx_flags & IEEE80211_RX_AMSDU)
+		return false;
+
+	if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
+		return false;
+
+	if (unlikely(ieee80211_is_frag(hdr)))
+		return false;
+
+	/* Since our interface address cannot be multicast, this
+	 * implicitly also rejects multicast frames without the
+	 * explicit check.
+	 *
+	 * We shouldn't get any *data* frames not addressed to us
+	 * (AP mode will accept multicast *management* frames), but
+	 * punting here will make it go through the full checks in
+	 * ieee80211_accept_frame().
+	 */
+	if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1))
+		return false;
+
+	if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS |
+					      IEEE80211_FCTL_TODS)) !=
+	    fast_rx->expected_ds_bits)
+		goto drop;
+
+	/* assign the key to drop unencrypted frames (later)
+	 * and strip the IV/MIC if necessary
+	 */
+	if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) {
+		/* GCMP header length is the same */
+		snap_offs += IEEE80211_CCMP_HDR_LEN;
+	}
+
+	if (!pskb_may_pull(skb, snap_offs + sizeof(*payload)))
+		goto drop;
+	payload = (void *)(skb->data + snap_offs);
+
+	if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr))
+		return false;
+
+	/* Don't handle these here since they require special code.
+	 * Accept AARP and IPX even though they should come with a
+	 * bridge-tunnel header - but if we get them this way then
+	 * there's little point in discarding them.
+	 */
+	if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) ||
+		     payload->proto == fast_rx->control_port_protocol))
+		return false;
+
+	/* after this point, don't punt to the slowpath! */
+
+	if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) &&
+	    pskb_trim(skb, skb->len - fast_rx->icv_len))
+		goto drop;
+
+	if (unlikely(fast_rx->sta_notify)) {
+		ieee80211_sta_rx_notify(rx->sdata, hdr);
+		fast_rx->sta_notify = false;
+	}
+
+	/* statistics part of ieee80211_rx_h_sta_process() */
+	stats->last_rx = jiffies;
+	stats->last_rate = sta_stats_encode_rate(status);
+
+	stats->fragments++;
+
+	if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
+		stats->last_signal = status->signal;
+		if (!fast_rx->uses_rss)
+			ewma_signal_add(&sta->rx_stats_avg.signal,
+					-status->signal);
+	}
+
+	if (status->chains) {
+		int i;
+
+		stats->chains = status->chains;
+		for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
+			int signal = status->chain_signal[i];
+
+			if (!(status->chains & BIT(i)))
+				continue;
+
+			stats->chain_signal_last[i] = signal;
+			if (!fast_rx->uses_rss)
+				ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
+						-signal);
+		}
+	}
+	/* end of statistics */
+
+	if (rx->key && !ieee80211_has_protected(hdr->frame_control))
+		goto drop;
+
+	/* do the header conversion - first grab the addresses */
+	ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs);
+	ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs);
+	/* remove the SNAP but leave the ethertype */
+	skb_pull(skb, snap_offs + sizeof(rfc1042_header));
+	/* push the addresses in front */
+	memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs));
+
+	skb->dev = fast_rx->dev;
+
+	ieee80211_rx_stats(fast_rx->dev, skb->len);
+
+	/* The seqno index has the same property as needed
+	 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
+	 * for non-QoS-data frames. Here we know it's a data
+	 * frame, so count MSDUs.
+	 */
+	u64_stats_update_begin(&stats->syncp);
+	stats->msdu[rx->seqno_idx]++;
+	stats->bytes += orig_len;
+	u64_stats_update_end(&stats->syncp);
+
+	if (fast_rx->internal_forward) {
+		struct sta_info *dsta = sta_info_get(rx->sdata, skb->data);
+
+		if (dsta) {
+			/*
+			 * Send to wireless media and increase priority by 256
+			 * to keep the received priority instead of
+			 * reclassifying the frame (see cfg80211_classify8021d).
+			 */
+			skb->priority += 256;
+			skb->protocol = htons(ETH_P_802_3);
+			skb_reset_network_header(skb);
+			skb_reset_mac_header(skb);
+			dev_queue_xmit(skb);
+			return true;
+		}
+	}
+
+	/* deliver to local stack */
+	skb->protocol = eth_type_trans(skb, fast_rx->dev);
+	memset(skb->cb, 0, sizeof(skb->cb));
+	if (rx->napi)
+		napi_gro_receive(rx->napi, skb);
+	else
+		netif_receive_skb(skb);
+
+	return true;
+ drop:
+	dev_kfree_skb(skb);
+	stats->dropped++;
+	return true;
+}
+
 /*
  * This function returns whether or not the SKB
  * was destined for RX processing or not, which,
@@ -3527,6 +3885,21 @@
 
 	rx->skb = skb;
 
+	/* See if we can do fast-rx; if we have to copy we already lost,
+	 * so punt in that case. We should never have to deliver a data
+	 * frame to multiple interfaces anyway.
+	 *
+	 * We skip the ieee80211_accept_frame() call and do the necessary
+	 * checking inside ieee80211_invoke_fast_rx().
+	 */
+	if (consume && rx->sta) {
+		struct ieee80211_fast_rx *fast_rx;
+
+		fast_rx = rcu_dereference(rx->sta->fast_rx);
+		if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx))
+			return true;
+	}
+
 	if (!ieee80211_accept_frame(rx))
 		return false;
 
@@ -3552,6 +3925,7 @@
  * be called with rcu_read_lock protection.
  */
 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
+					 struct ieee80211_sta *pubsta,
 					 struct sk_buff *skb,
 					 struct napi_struct *napi)
 {
@@ -3561,7 +3935,6 @@
 	__le16 fc;
 	struct ieee80211_rx_data rx;
 	struct ieee80211_sub_if_data *prev;
-	struct sta_info *sta, *prev_sta;
 	struct rhash_head *tmp;
 	int err = 0;
 
@@ -3597,7 +3970,14 @@
 		     ieee80211_is_beacon(hdr->frame_control)))
 		ieee80211_scan_rx(local, skb);
 
-	if (ieee80211_is_data(fc)) {
+	if (pubsta) {
+		rx.sta = container_of(pubsta, struct sta_info, sta);
+		rx.sdata = rx.sta->sdata;
+		if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
+			return;
+		goto out;
+	} else if (ieee80211_is_data(fc)) {
+		struct sta_info *sta, *prev_sta;
 		const struct bucket_table *tbl;
 
 		prev_sta = NULL;
@@ -3671,8 +4051,8 @@
  * This is the receive path handler. It is called by a low level driver when an
  * 802.11 MPDU is received from the hardware.
  */
-void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
-		       struct napi_struct *napi)
+void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
+		       struct sk_buff *skb, struct napi_struct *napi)
 {
 	struct ieee80211_local *local = hw_to_local(hw);
 	struct ieee80211_rate *rate = NULL;
@@ -3681,7 +4061,7 @@
 
 	WARN_ON_ONCE(softirq_count() == 0);
 
-	if (WARN_ON(status->band >= IEEE80211_NUM_BANDS))
+	if (WARN_ON(status->band >= NUM_NL80211_BANDS))
 		goto drop;
 
 	sband = local->hw.wiphy->bands[status->band];
@@ -3771,7 +4151,8 @@
 	ieee80211_tpt_led_trig_rx(local,
 			((struct ieee80211_hdr *)skb->data)->frame_control,
 			skb->len);
-	__ieee80211_rx_handle_packet(hw, skb, napi);
+
+	__ieee80211_rx_handle_packet(hw, pubsta, skb, napi);
 
 	rcu_read_unlock();
 
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index ae980ce..f9648ef 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -66,7 +66,9 @@
 	struct cfg80211_bss *cbss;
 	struct ieee80211_bss *bss;
 	int clen, srlen;
-	struct cfg80211_inform_bss bss_meta = {};
+	struct cfg80211_inform_bss bss_meta = {
+		.boottime_ns = rx_status->boottime_ns,
+	};
 	bool signal_valid;
 
 	if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
@@ -270,7 +272,7 @@
 		n_chans = req->n_channels;
 	} else {
 		do {
-			if (local->hw_scan_band == IEEE80211_NUM_BANDS)
+			if (local->hw_scan_band == NUM_NL80211_BANDS)
 				return false;
 
 			n_chans = 0;
@@ -303,6 +305,7 @@
 	ether_addr_copy(local->hw_scan_req->req.mac_addr, req->mac_addr);
 	ether_addr_copy(local->hw_scan_req->req.mac_addr_mask,
 			req->mac_addr_mask);
+	ether_addr_copy(local->hw_scan_req->req.bssid, req->bssid);
 
 	return true;
 }
@@ -482,7 +485,7 @@
 	int i;
 	struct ieee80211_sub_if_data *sdata;
 	struct cfg80211_scan_request *scan_req;
-	enum ieee80211_band band = local->hw.conf.chandef.chan->band;
+	enum nl80211_band band = local->hw.conf.chandef.chan->band;
 	u32 tx_flags;
 
 	scan_req = rcu_dereference_protected(local->scan_req,
@@ -497,7 +500,7 @@
 
 	for (i = 0; i < scan_req->n_ssids; i++)
 		ieee80211_send_probe_req(
-			sdata, local->scan_addr, NULL,
+			sdata, local->scan_addr, scan_req->bssid,
 			scan_req->ssids[i].ssid, scan_req->ssids[i].ssid_len,
 			scan_req->ie, scan_req->ie_len,
 			scan_req->rates[band], false,
@@ -562,6 +565,7 @@
 			req->n_channels * sizeof(req->channels[0]);
 		local->hw_scan_req->req.ie = ies;
 		local->hw_scan_req->req.flags = req->flags;
+		eth_broadcast_addr(local->hw_scan_req->req.bssid);
 
 		local->hw_scan_band = 0;
 
@@ -949,7 +953,7 @@
 {
 	struct ieee80211_local *local = sdata->local;
 	int ret = -EBUSY, i, n_ch = 0;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 
 	mutex_lock(&local->mtx);
 
@@ -961,7 +965,7 @@
 	if (!channels) {
 		int max_n;
 
-		for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+		for (band = 0; band < NUM_NL80211_BANDS; band++) {
 			if (!local->hw.wiphy->bands[band])
 				continue;
 
@@ -1081,7 +1085,7 @@
 	struct ieee80211_scan_ies sched_scan_ies = {};
 	struct cfg80211_chan_def chandef;
 	int ret, i, iebufsz, num_bands = 0;
-	u32 rate_masks[IEEE80211_NUM_BANDS] = {};
+	u32 rate_masks[NUM_NL80211_BANDS] = {};
 	u8 bands_used = 0;
 	u8 *ie;
 	size_t len;
@@ -1093,7 +1097,7 @@
 	if (!local->ops->sched_scan_start)
 		return -ENOTSUPP;
 
-	for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+	for (i = 0; i < NUM_NL80211_BANDS; i++) {
 		if (local->hw.wiphy->bands[i]) {
 			bands_used |= BIT(i);
 			rate_masks[i] = (u32) -1;
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 06e6ac8..2ddc661 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -23,11 +23,11 @@
 
 int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
 				 struct ieee802_11_elems *elems,
-				 enum ieee80211_band current_band,
+				 enum nl80211_band current_band,
 				 u32 sta_flags, u8 *bssid,
 				 struct ieee80211_csa_ie *csa_ie)
 {
-	enum ieee80211_band new_band;
+	enum nl80211_band new_band;
 	int new_freq;
 	u8 new_chan_no;
 	struct ieee80211_channel *new_chan;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index d20bab5..5ccfdbd 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -2,7 +2,7 @@
  * Copyright 2002-2005, Instant802 Networks, Inc.
  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
- * Copyright (C) 2015 Intel Deutschland GmbH
+ * Copyright (C) 2015 - 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -67,6 +67,7 @@
 
 static const struct rhashtable_params sta_rht_params = {
 	.nelem_hint = 3, /* start small */
+	.insecure_elasticity = true, /* Disable chain-length checks. */
 	.automatic_shrinking = true,
 	.head_offset = offsetof(struct sta_info, hash_node),
 	.key_offset = offsetof(struct sta_info, addr),
@@ -254,15 +255,16 @@
 #ifdef CONFIG_MAC80211_MESH
 	kfree(sta->mesh);
 #endif
+	free_percpu(sta->pcpu_rx_stats);
 	kfree(sta);
 }
 
 /* Caller must hold local->sta_mtx */
-static void sta_info_hash_add(struct ieee80211_local *local,
-			      struct sta_info *sta)
+static int sta_info_hash_add(struct ieee80211_local *local,
+			     struct sta_info *sta)
 {
-	rhashtable_insert_fast(&local->sta_hash, &sta->hash_node,
-			       sta_rht_params);
+	return rhashtable_insert_fast(&local->sta_hash, &sta->hash_node,
+				      sta_rht_params);
 }
 
 static void sta_deliver_ps_frames(struct work_struct *wk)
@@ -311,6 +313,13 @@
 	if (!sta)
 		return NULL;
 
+	if (ieee80211_hw_check(hw, USES_RSS)) {
+		sta->pcpu_rx_stats =
+			alloc_percpu(struct ieee80211_sta_rx_stats);
+		if (!sta->pcpu_rx_stats)
+			goto free;
+	}
+
 	spin_lock_init(&sta->lock);
 	spin_lock_init(&sta->ps_lock);
 	INIT_WORK(&sta->drv_deliver_wk, sta_deliver_ps_frames);
@@ -335,15 +344,17 @@
 	sta->sdata = sdata;
 	sta->rx_stats.last_rx = jiffies;
 
+	u64_stats_init(&sta->rx_stats.syncp);
+
 	sta->sta_state = IEEE80211_STA_NONE;
 
 	/* Mark TID as unreserved */
 	sta->reserved_tid = IEEE80211_TID_UNRESERVED;
 
 	sta->last_connected = ktime_get_seconds();
-	ewma_signal_init(&sta->rx_stats.avg_signal);
-	for (i = 0; i < ARRAY_SIZE(sta->rx_stats.chain_signal_avg); i++)
-		ewma_signal_init(&sta->rx_stats.chain_signal_avg[i]);
+	ewma_signal_init(&sta->rx_stats_avg.signal);
+	for (i = 0; i < ARRAY_SIZE(sta->rx_stats_avg.chain_signal); i++)
+		ewma_signal_init(&sta->rx_stats_avg.chain_signal[i]);
 
 	if (local->ops->wake_tx_queue) {
 		void *txq_data;
@@ -406,6 +417,8 @@
 		}
 	}
 
+	sta->sta.max_rc_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_BA;
+
 	sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr);
 
 	return sta;
@@ -524,7 +537,9 @@
 	set_sta_flag(sta, WLAN_STA_BLOCK_BA);
 
 	/* make the station visible */
-	sta_info_hash_add(local, sta);
+	err = sta_info_hash_add(local, sta);
+	if (err)
+		goto out_drop_sta;
 
 	list_add_tail_rcu(&sta->list, &local->sta_list);
 
@@ -557,6 +572,7 @@
  out_remove:
 	sta_info_hash_del(local, sta);
 	list_del_rcu(&sta->list);
+ out_drop_sta:
 	local->num_sta--;
 	synchronize_net();
 	__cleanup_single_sta(sta);
@@ -875,6 +891,13 @@
 	set_sta_flag(sta, WLAN_STA_BLOCK_BA);
 	ieee80211_sta_tear_down_BA_sessions(sta, AGG_STOP_DESTROY_STA);
 
+	/*
+	 * Before removing the station from the driver there might be pending
+	 * rx frames on RSS queues sent prior to the disassociation - wait for
+	 * all such frames to be processed.
+	 */
+	drv_sync_rx_queues(local, sta);
+
 	ret = sta_info_hash_del(local, sta);
 	if (WARN_ON(ret))
 		return ret;
@@ -1087,10 +1110,12 @@
 	mutex_lock(&local->sta_mtx);
 
 	list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
+		unsigned long last_active = ieee80211_sta_last_active(sta);
+
 		if (sdata != sta->sdata)
 			continue;
 
-		if (time_after(jiffies, sta->rx_stats.last_rx + exp_time)) {
+		if (time_is_before_jiffies(last_active + exp_time)) {
 			sta_dbg(sta->sdata, "expiring inactive STA %pM\n",
 				sta->sta.addr);
 
@@ -1760,6 +1785,31 @@
 }
 EXPORT_SYMBOL(ieee80211_sta_set_buffered);
 
+static void
+ieee80211_recalc_p2p_go_ps_allowed(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_local *local = sdata->local;
+	bool allow_p2p_go_ps = sdata->vif.p2p;
+	struct sta_info *sta;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(sta, &local->sta_list, list) {
+		if (sdata != sta->sdata ||
+		    !test_sta_flag(sta, WLAN_STA_ASSOC))
+			continue;
+		if (!sta->sta.support_p2p_ps) {
+			allow_p2p_go_ps = false;
+			break;
+		}
+	}
+	rcu_read_unlock();
+
+	if (allow_p2p_go_ps != sdata->vif.bss_conf.allow_p2p_go_ps) {
+		sdata->vif.bss_conf.allow_p2p_go_ps = allow_p2p_go_ps;
+		ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_P2P_PS);
+	}
+}
+
 int sta_info_move_state(struct sta_info *sta,
 			enum ieee80211_sta_state new_state)
 {
@@ -1821,12 +1871,16 @@
 		} else if (sta->sta_state == IEEE80211_STA_ASSOC) {
 			clear_bit(WLAN_STA_ASSOC, &sta->_flags);
 			ieee80211_recalc_min_chandef(sta->sdata);
+			if (!sta->sta.support_p2p_ps)
+				ieee80211_recalc_p2p_go_ps_allowed(sta->sdata);
 		}
 		break;
 	case IEEE80211_STA_ASSOC:
 		if (sta->sta_state == IEEE80211_STA_AUTH) {
 			set_bit(WLAN_STA_ASSOC, &sta->_flags);
 			ieee80211_recalc_min_chandef(sta->sdata);
+			if (!sta->sta.support_p2p_ps)
+				ieee80211_recalc_p2p_go_ps_allowed(sta->sdata);
 		} else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
 			if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
 			    (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
@@ -1834,6 +1888,7 @@
 				atomic_dec(&sta->sdata->bss->num_mcast_sta);
 			clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
 			ieee80211_clear_fast_xmit(sta);
+			ieee80211_clear_fast_rx(sta);
 		}
 		break;
 	case IEEE80211_STA_AUTHORIZED:
@@ -1844,6 +1899,7 @@
 				atomic_inc(&sta->sdata->bss->num_mcast_sta);
 			set_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
 			ieee80211_check_fast_xmit(sta);
+			ieee80211_check_fast_rx(sta);
 		}
 		break;
 	default:
@@ -1890,43 +1946,117 @@
 			>> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT) + 1;
 }
 
-static void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
+static struct ieee80211_sta_rx_stats *
+sta_get_last_rx_stats(struct sta_info *sta)
 {
-	rinfo->flags = 0;
+	struct ieee80211_sta_rx_stats *stats = &sta->rx_stats;
+	struct ieee80211_local *local = sta->local;
+	int cpu;
 
-	if (sta->rx_stats.last_rate_flag & RX_FLAG_HT) {
-		rinfo->flags |= RATE_INFO_FLAGS_MCS;
-		rinfo->mcs = sta->rx_stats.last_rate_idx;
-	} else if (sta->rx_stats.last_rate_flag & RX_FLAG_VHT) {
-		rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
-		rinfo->nss = sta->rx_stats.last_rate_vht_nss;
-		rinfo->mcs = sta->rx_stats.last_rate_idx;
-	} else {
+	if (!ieee80211_hw_check(&local->hw, USES_RSS))
+		return stats;
+
+	for_each_possible_cpu(cpu) {
+		struct ieee80211_sta_rx_stats *cpustats;
+
+		cpustats = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
+
+		if (time_after(cpustats->last_rx, stats->last_rx))
+			stats = cpustats;
+	}
+
+	return stats;
+}
+
+static void sta_stats_decode_rate(struct ieee80211_local *local, u16 rate,
+				  struct rate_info *rinfo)
+{
+	rinfo->bw = (rate & STA_STATS_RATE_BW_MASK) >>
+		STA_STATS_RATE_BW_SHIFT;
+
+	if (rate & STA_STATS_RATE_VHT) {
+		rinfo->flags = RATE_INFO_FLAGS_VHT_MCS;
+		rinfo->mcs = rate & 0xf;
+		rinfo->nss = (rate & 0xf0) >> 4;
+	} else if (rate & STA_STATS_RATE_HT) {
+		rinfo->flags = RATE_INFO_FLAGS_MCS;
+		rinfo->mcs = rate & 0xff;
+	} else if (rate & STA_STATS_RATE_LEGACY) {
 		struct ieee80211_supported_band *sband;
-		int shift = ieee80211_vif_get_shift(&sta->sdata->vif);
 		u16 brate;
+		unsigned int shift;
 
-		sband = sta->local->hw.wiphy->bands[
-				ieee80211_get_sdata_band(sta->sdata)];
-		brate = sband->bitrates[sta->rx_stats.last_rate_idx].bitrate;
+		sband = local->hw.wiphy->bands[(rate >> 4) & 0xf];
+		brate = sband->bitrates[rate & 0xf].bitrate;
+		if (rinfo->bw == RATE_INFO_BW_5)
+			shift = 2;
+		else if (rinfo->bw == RATE_INFO_BW_10)
+			shift = 1;
+		else
+			shift = 0;
 		rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
 	}
 
-	if (sta->rx_stats.last_rate_flag & RX_FLAG_SHORT_GI)
+	if (rate & STA_STATS_RATE_SGI)
 		rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+}
 
-	if (sta->rx_stats.last_rate_flag & RX_FLAG_5MHZ)
-		rinfo->bw = RATE_INFO_BW_5;
-	else if (sta->rx_stats.last_rate_flag & RX_FLAG_10MHZ)
-		rinfo->bw = RATE_INFO_BW_10;
-	else if (sta->rx_stats.last_rate_flag & RX_FLAG_40MHZ)
-		rinfo->bw = RATE_INFO_BW_40;
-	else if (sta->rx_stats.last_rate_vht_flag & RX_VHT_FLAG_80MHZ)
-		rinfo->bw = RATE_INFO_BW_80;
-	else if (sta->rx_stats.last_rate_vht_flag & RX_VHT_FLAG_160MHZ)
-		rinfo->bw = RATE_INFO_BW_160;
+static void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
+{
+	u16 rate = ACCESS_ONCE(sta_get_last_rx_stats(sta)->last_rate);
+
+	if (rate == STA_STATS_RATE_INVALID)
+		rinfo->flags = 0;
 	else
-		rinfo->bw = RATE_INFO_BW_20;
+		sta_stats_decode_rate(sta->local, rate, rinfo);
+}
+
+static void sta_set_tidstats(struct sta_info *sta,
+			     struct cfg80211_tid_stats *tidstats,
+			     int tid)
+{
+	struct ieee80211_local *local = sta->local;
+
+	if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) {
+		unsigned int start;
+
+		do {
+			start = u64_stats_fetch_begin(&sta->rx_stats.syncp);
+			tidstats->rx_msdu = sta->rx_stats.msdu[tid];
+		} while (u64_stats_fetch_retry(&sta->rx_stats.syncp, start));
+
+		tidstats->filled |= BIT(NL80211_TID_STATS_RX_MSDU);
+	}
+
+	if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) {
+		tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU);
+		tidstats->tx_msdu = sta->tx_stats.msdu[tid];
+	}
+
+	if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_RETRIES)) &&
+	    ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
+		tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_RETRIES);
+		tidstats->tx_msdu_retries = sta->status_stats.msdu_retries[tid];
+	}
+
+	if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_FAILED)) &&
+	    ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
+		tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_FAILED);
+		tidstats->tx_msdu_failed = sta->status_stats.msdu_failed[tid];
+	}
+}
+
+static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats)
+{
+	unsigned int start;
+	u64 value;
+
+	do {
+		start = u64_stats_fetch_begin(&rxstats->syncp);
+		value = rxstats->bytes;
+	} while (u64_stats_fetch_retry(&rxstats->syncp, start));
+
+	return value;
 }
 
 void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
@@ -1935,7 +2065,10 @@
 	struct ieee80211_local *local = sdata->local;
 	struct rate_control_ref *ref = NULL;
 	u32 thr = 0;
-	int i, ac;
+	int i, ac, cpu;
+	struct ieee80211_sta_rx_stats *last_rxstats;
+
+	last_rxstats = sta_get_last_rx_stats(sta);
 
 	if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
 		ref = local->rate_ctrl;
@@ -1964,7 +2097,7 @@
 
 	sinfo->connected_time = ktime_get_seconds() - sta->last_connected;
 	sinfo->inactive_time =
-		jiffies_to_msecs(jiffies - sta->rx_stats.last_rx);
+		jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta));
 
 	if (!(sinfo->filled & (BIT(NL80211_STA_INFO_TX_BYTES64) |
 			       BIT(NL80211_STA_INFO_TX_BYTES)))) {
@@ -1983,12 +2116,30 @@
 
 	if (!(sinfo->filled & (BIT(NL80211_STA_INFO_RX_BYTES64) |
 			       BIT(NL80211_STA_INFO_RX_BYTES)))) {
-		sinfo->rx_bytes = sta->rx_stats.bytes;
+		sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats);
+
+		if (sta->pcpu_rx_stats) {
+			for_each_possible_cpu(cpu) {
+				struct ieee80211_sta_rx_stats *cpurxs;
+
+				cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
+				sinfo->rx_bytes += sta_get_stats_bytes(cpurxs);
+			}
+		}
+
 		sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64);
 	}
 
 	if (!(sinfo->filled & BIT(NL80211_STA_INFO_RX_PACKETS))) {
 		sinfo->rx_packets = sta->rx_stats.packets;
+		if (sta->pcpu_rx_stats) {
+			for_each_possible_cpu(cpu) {
+				struct ieee80211_sta_rx_stats *cpurxs;
+
+				cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
+				sinfo->rx_packets += cpurxs->packets;
+			}
+		}
 		sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
 	}
 
@@ -2003,6 +2154,14 @@
 	}
 
 	sinfo->rx_dropped_misc = sta->rx_stats.dropped;
+	if (sta->pcpu_rx_stats) {
+		for_each_possible_cpu(cpu) {
+			struct ieee80211_sta_rx_stats *cpurxs;
+
+			cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
+			sinfo->rx_packets += cpurxs->dropped;
+		}
+	}
 
 	if (sdata->vif.type == NL80211_IFTYPE_STATION &&
 	    !(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) {
@@ -2014,29 +2173,36 @@
 	if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) ||
 	    ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) {
 		if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL))) {
-			sinfo->signal = (s8)sta->rx_stats.last_signal;
+			sinfo->signal = (s8)last_rxstats->last_signal;
 			sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
 		}
 
-		if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL_AVG))) {
+		if (!sta->pcpu_rx_stats &&
+		    !(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL_AVG))) {
 			sinfo->signal_avg =
-				-ewma_signal_read(&sta->rx_stats.avg_signal);
+				-ewma_signal_read(&sta->rx_stats_avg.signal);
 			sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
 		}
 	}
 
-	if (sta->rx_stats.chains &&
+	/* for the average - if pcpu_rx_stats isn't set - rxstats must point to
+	 * the sta->rx_stats struct, so the check here is fine with and without
+	 * pcpu statistics
+	 */
+	if (last_rxstats->chains &&
 	    !(sinfo->filled & (BIT(NL80211_STA_INFO_CHAIN_SIGNAL) |
 			       BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) {
-		sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL) |
-				 BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG);
+		sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL);
+		if (!sta->pcpu_rx_stats)
+			sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG);
 
-		sinfo->chains = sta->rx_stats.chains;
+		sinfo->chains = last_rxstats->chains;
+
 		for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) {
 			sinfo->chain_signal[i] =
-				sta->rx_stats.chain_signal_last[i];
+				last_rxstats->chain_signal_last[i];
 			sinfo->chain_signal_avg[i] =
-				-ewma_signal_read(&sta->rx_stats.chain_signal_avg[i]);
+				-ewma_signal_read(&sta->rx_stats_avg.chain_signal[i]);
 		}
 	}
 
@@ -2055,33 +2221,7 @@
 	for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) {
 		struct cfg80211_tid_stats *tidstats = &sinfo->pertid[i];
 
-		if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) {
-			tidstats->filled |= BIT(NL80211_TID_STATS_RX_MSDU);
-			tidstats->rx_msdu = sta->rx_stats.msdu[i];
-		}
-
-		if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) {
-			tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU);
-			tidstats->tx_msdu = sta->tx_stats.msdu[i];
-		}
-
-		if (!(tidstats->filled &
-				BIT(NL80211_TID_STATS_TX_MSDU_RETRIES)) &&
-		    ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
-			tidstats->filled |=
-				BIT(NL80211_TID_STATS_TX_MSDU_RETRIES);
-			tidstats->tx_msdu_retries =
-				sta->status_stats.msdu_retries[i];
-		}
-
-		if (!(tidstats->filled &
-				BIT(NL80211_TID_STATS_TX_MSDU_FAILED)) &&
-		    ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
-			tidstats->filled |=
-				BIT(NL80211_TID_STATS_TX_MSDU_FAILED);
-			tidstats->tx_msdu_failed =
-				sta->status_stats.msdu_failed[i];
-		}
+		sta_set_tidstats(sta, tidstats, i);
 	}
 
 	if (ieee80211_vif_is_mesh(&sdata->vif)) {
@@ -2150,3 +2290,12 @@
 		sinfo->expected_throughput = thr;
 	}
 }
+
+unsigned long ieee80211_sta_last_active(struct sta_info *sta)
+{
+	struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta);
+
+	if (time_after(stats->last_rx, sta->status_stats.last_ack))
+		return stats->last_rx;
+	return sta->status_stats.last_ack;
+}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 053f5c4..c8b8ccc 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -1,7 +1,7 @@
 /*
  * Copyright 2002-2005, Devicescape Software, Inc.
  * Copyright 2013-2014  Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015-2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -18,6 +18,7 @@
 #include <linux/average.h>
 #include <linux/etherdevice.h>
 #include <linux/rhashtable.h>
+#include <linux/u64_stats_sync.h>
 #include "key.h"
 
 /**
@@ -69,6 +70,8 @@
  * @WLAN_STA_MPSP_RECIPIENT: local STA is recipient of a MPSP.
  * @WLAN_STA_PS_DELIVER: station woke up, but we're still blocking TX
  *	until pending frames are delivered
+ *
+ * @NUM_WLAN_STA_FLAGS: number of defined flags
  */
 enum ieee80211_sta_info_flags {
 	WLAN_STA_AUTH,
@@ -97,6 +100,8 @@
 	WLAN_STA_MPSP_OWNER,
 	WLAN_STA_MPSP_RECIPIENT,
 	WLAN_STA_PS_DELIVER,
+
+	NUM_WLAN_STA_FLAGS,
 };
 
 #define ADDBA_RESP_INTERVAL HZ
@@ -281,6 +286,40 @@
 };
 
 /**
+ * struct ieee80211_fast_rx - RX fastpath information
+ * @dev: netdevice for reporting the SKB
+ * @vif_type: (P2P-less) interface type of the original sdata (sdata->vif.type)
+ * @vif_addr: interface address
+ * @rfc1042_hdr: copy of the RFC 1042 SNAP header (to have in cache)
+ * @control_port_protocol: control port protocol copied from sdata
+ * @expected_ds_bits: from/to DS bits expected
+ * @icv_len: length of the MIC if present
+ * @key: bool indicating encryption is expected (key is set)
+ * @sta_notify: notify the MLME code (once)
+ * @internal_forward: forward froms internally on AP/VLAN type interfaces
+ * @uses_rss: copy of USES_RSS hw flag
+ * @da_offs: offset of the DA in the header (for header conversion)
+ * @sa_offs: offset of the SA in the header (for header conversion)
+ * @rcu_head: RCU head for freeing this structure
+ */
+struct ieee80211_fast_rx {
+	struct net_device *dev;
+	enum nl80211_iftype vif_type;
+	u8 vif_addr[ETH_ALEN] __aligned(2);
+	u8 rfc1042_hdr[6] __aligned(2);
+	__be16 control_port_protocol;
+	__le16 expected_ds_bits;
+	u8 icv_len;
+	u8 key:1,
+	   sta_notify:1,
+	   internal_forward:1,
+	   uses_rss:1;
+	u8 da_offs, sa_offs;
+
+	struct rcu_head rcu_head;
+};
+
+/**
  * struct mesh_sta - mesh STA information
  * @plink_lock: serialize access to plink fields
  * @llid: Local link ID
@@ -330,6 +369,21 @@
 
 DECLARE_EWMA(signal, 1024, 8)
 
+struct ieee80211_sta_rx_stats {
+	unsigned long packets;
+	unsigned long last_rx;
+	unsigned long num_duplicates;
+	unsigned long fragments;
+	unsigned long dropped;
+	int last_signal;
+	u8 chains;
+	s8 chain_signal_last[IEEE80211_MAX_CHAINS];
+	u16 last_rate;
+	struct u64_stats_sync syncp;
+	u64 bytes;
+	u64 msdu[IEEE80211_NUM_TIDS + 1];
+};
+
 /**
  * struct sta_info - STA information
  *
@@ -371,13 +425,12 @@
  * @ampdu_mlme: A-MPDU state machine state
  * @timer_to_tid: identity mapping to ID timers
  * @mesh: mesh STA information
- * @debugfs: debug filesystem info
+ * @debugfs_dir: debug filesystem directory dentry
  * @dead: set to true when sta is unlinked
  * @removed: set to true when sta is being removed from sta_list
  * @uploaded: set to true when sta is uploaded to the driver
  * @sta: station information we share with the driver
  * @sta_state: duplicates information about station state (for debug)
- * @beacon_loss_count: number of times beacon loss has triggered
  * @rcu_head: RCU head used for freeing this station struct
  * @cur_max_bandwidth: maximum bandwidth to use for TX to the station,
  *	taken from HT/VHT capabilities or VHT operating mode notification
@@ -386,10 +439,13 @@
  * @cipher_scheme: optional cipher scheme for this station
  * @reserved_tid: reserved TID (if any, otherwise IEEE80211_TID_UNRESERVED)
  * @fast_tx: TX fastpath information
+ * @fast_rx: RX fastpath information
  * @tdls_chandef: a TDLS peer can have a wider chandef that is compatible to
  *	the BSS one.
  * @tx_stats: TX statistics
  * @rx_stats: RX statistics
+ * @pcpu_rx_stats: per-CPU RX statistics, assigned only if the driver needs
+ *	this (by advertising the USES_RSS hw flag)
  * @status_stats: TX status statistics
  */
 struct sta_info {
@@ -409,6 +465,8 @@
 	spinlock_t lock;
 
 	struct ieee80211_fast_tx __rcu *fast_tx;
+	struct ieee80211_fast_rx __rcu *fast_rx;
+	struct ieee80211_sta_rx_stats __percpu *pcpu_rx_stats;
 
 #ifdef CONFIG_MAC80211_MESH
 	struct mesh_sta *mesh;
@@ -438,24 +496,11 @@
 	long last_connected;
 
 	/* Updated from RX path only, no locking requirements */
+	struct ieee80211_sta_rx_stats rx_stats;
 	struct {
-		unsigned long packets;
-		u64 bytes;
-		unsigned long last_rx;
-		unsigned long num_duplicates;
-		unsigned long fragments;
-		unsigned long dropped;
-		int last_signal;
-		struct ewma_signal avg_signal;
-		u8 chains;
-		s8 chain_signal_last[IEEE80211_MAX_CHAINS];
-		struct ewma_signal chain_signal_avg[IEEE80211_MAX_CHAINS];
-		int last_rate_idx;
-		u32 last_rate_flag;
-		u32 last_rate_vht_flag;
-		u8 last_rate_vht_nss;
-		u64 msdu[IEEE80211_NUM_TIDS + 1];
-	} rx_stats;
+		struct ewma_signal signal;
+		struct ewma_signal chain_signal[IEEE80211_MAX_CHAINS];
+	} rx_stats_avg;
 
 	/* Plus 1 for non-QoS frames */
 	__le16 last_seq_ctrl[IEEE80211_NUM_TIDS + 1];
@@ -468,6 +513,7 @@
 		unsigned long last_tdls_pkt_time;
 		u64 msdu_retries[IEEE80211_NUM_TIDS + 1];
 		u64 msdu_failed[IEEE80211_NUM_TIDS + 1];
+		unsigned long last_ack;
 	} status_stats;
 
 	/* Updated from TX path only, no locking requirements */
@@ -486,10 +532,7 @@
 	u8 timer_to_tid[IEEE80211_NUM_TIDS];
 
 #ifdef CONFIG_MAC80211_DEBUGFS
-	struct sta_info_debugfsdentries {
-		struct dentry *dir;
-		bool add_has_run;
-	} debugfs;
+	struct dentry *debugfs_dir;
 #endif
 
 	enum ieee80211_sta_rx_bandwidth cur_max_bandwidth;
@@ -677,4 +720,44 @@
 void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta);
 void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta);
 
+unsigned long ieee80211_sta_last_active(struct sta_info *sta);
+
+#define STA_STATS_RATE_INVALID		0
+#define STA_STATS_RATE_VHT		0x8000
+#define STA_STATS_RATE_HT		0x4000
+#define STA_STATS_RATE_LEGACY		0x2000
+#define STA_STATS_RATE_SGI		0x1000
+#define STA_STATS_RATE_BW_SHIFT		9
+#define STA_STATS_RATE_BW_MASK		(0x7 << STA_STATS_RATE_BW_SHIFT)
+
+static inline u16 sta_stats_encode_rate(struct ieee80211_rx_status *s)
+{
+	u16 r = s->rate_idx;
+
+	if (s->vht_flag & RX_VHT_FLAG_80MHZ)
+		r |= RATE_INFO_BW_80 << STA_STATS_RATE_BW_SHIFT;
+	else if (s->vht_flag & RX_VHT_FLAG_160MHZ)
+		r |= RATE_INFO_BW_160 << STA_STATS_RATE_BW_SHIFT;
+	else if (s->flag & RX_FLAG_40MHZ)
+		r |= RATE_INFO_BW_40 << STA_STATS_RATE_BW_SHIFT;
+	else if (s->flag & RX_FLAG_10MHZ)
+		r |= RATE_INFO_BW_10 << STA_STATS_RATE_BW_SHIFT;
+	else if (s->flag & RX_FLAG_5MHZ)
+		r |= RATE_INFO_BW_5 << STA_STATS_RATE_BW_SHIFT;
+	else
+		r |= RATE_INFO_BW_20 << STA_STATS_RATE_BW_SHIFT;
+
+	if (s->flag & RX_FLAG_SHORT_GI)
+		r |= STA_STATS_RATE_SGI;
+
+	if (s->flag & RX_FLAG_VHT)
+		r |= STA_STATS_RATE_VHT | (s->vht_nss << 4);
+	else if (s->flag & RX_FLAG_HT)
+		r |= STA_STATS_RATE_HT;
+	else
+		r |= STA_STATS_RATE_LEGACY | (s->band << 4);
+
+	return r;
+}
+
 #endif /* STA_INFO_H */
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 8b1b2ea..c6d5c72 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -188,7 +188,7 @@
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
 
 	if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
-		sta->rx_stats.last_rx = jiffies;
+		sta->status_stats.last_ack = jiffies;
 
 	if (ieee80211_is_data_qos(mgmt->frame_control)) {
 		struct ieee80211_hdr *hdr = (void *) skb->data;
@@ -647,7 +647,7 @@
 		sta->status_stats.retry_count += retry_count;
 
 		if (acked) {
-			sta->rx_stats.last_rx = jiffies;
+			sta->status_stats.last_ack = jiffies;
 
 			if (sta->status_stats.lost_packets)
 				sta->status_stats.lost_packets = 0;
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index c9eeb3f..1c7d45a 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -4,7 +4,7 @@
  * Copyright 2006-2010	Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2014, Intel Corporation
  * Copyright 2014  Intel Mobile Communications GmbH
- * Copyright 2015  Intel Deutschland GmbH
+ * Copyright 2015 - 2016 Intel Deutschland GmbH
  *
  * This file is GPLv2 as found in COPYING.
  */
@@ -15,6 +15,7 @@
 #include <linux/rtnetlink.h>
 #include "ieee80211_i.h"
 #include "driver-ops.h"
+#include "rate.h"
 
 /* give usermode some time for retries in setting up the TDLS session */
 #define TDLS_PEER_SETUP_TIMEOUT	(15 * HZ)
@@ -46,7 +47,7 @@
 			   NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
 	bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) &&
 			  !ifmgd->tdls_wider_bw_prohibited;
-	enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+	enum nl80211_band band = ieee80211_get_sdata_band(sdata);
 	struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
 	bool vht = sband && sband->vht_cap.vht_supported;
 	u8 *pos = (void *)skb_put(skb, 10);
@@ -183,7 +184,7 @@
 	if (status_code != 0)
 		return 0;
 
-	if (ieee80211_get_sdata_band(sdata) == IEEE80211_BAND_2GHZ) {
+	if (ieee80211_get_sdata_band(sdata) == NL80211_BAND_2GHZ) {
 		return WLAN_CAPABILITY_SHORT_SLOT_TIME |
 		       WLAN_CAPABILITY_SHORT_PREAMBLE;
 	}
@@ -302,7 +303,7 @@
 	/* IEEE802.11ac-2013 Table E-4 */
 	u16 centers_80mhz[] = { 5210, 5290, 5530, 5610, 5690, 5775 };
 	struct cfg80211_chan_def uc = sta->tdls_chandef;
-	enum nl80211_chan_width max_width = ieee80211_get_sta_bw(&sta->sta);
+	enum nl80211_chan_width max_width = ieee80211_sta_cap_chan_bw(sta);
 	int i;
 
 	/* only support upgrading non-narrow channels up to 80Mhz */
@@ -313,7 +314,7 @@
 	if (max_width > NL80211_CHAN_WIDTH_80)
 		max_width = NL80211_CHAN_WIDTH_80;
 
-	if (uc.width == max_width)
+	if (uc.width >= max_width)
 		return;
 	/*
 	 * Channel usage constrains in the IEEE802.11ac-2013 specification only
@@ -324,6 +325,7 @@
 	for (i = 0; i < ARRAY_SIZE(centers_80mhz); i++)
 		if (abs(uc.chan->center_freq - centers_80mhz[i]) <= 30) {
 			uc.center_freq1 = centers_80mhz[i];
+			uc.center_freq2 = 0;
 			uc.width = NL80211_CHAN_WIDTH_80;
 			break;
 		}
@@ -332,7 +334,7 @@
 		return;
 
 	/* proceed to downgrade the chandef until usable or the same */
-	while (uc.width > max_width &&
+	while (uc.width > max_width ||
 	       !cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc,
 					      sdata->wdev.iftype))
 		ieee80211_chandef_downgrade(&uc);
@@ -355,7 +357,7 @@
 				   u8 action_code, bool initiator,
 				   const u8 *extra_ies, size_t extra_ies_len)
 {
-	enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+	enum nl80211_band band = ieee80211_get_sdata_band(sdata);
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_supported_band *sband;
 	struct ieee80211_sta_ht_cap ht_cap;
@@ -542,7 +544,7 @@
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	size_t offset = 0, noffset;
 	struct sta_info *sta, *ap_sta;
-	enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+	enum nl80211_band band = ieee80211_get_sdata_band(sdata);
 	u8 *pos;
 
 	mutex_lock(&local->sta_mtx);
@@ -609,7 +611,7 @@
 	ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
 
 	/* only include VHT-operation if not on the 2.4GHz band */
-	if (band != IEEE80211_BAND_2GHZ && sta->sta.vht_cap.vht_supported) {
+	if (band != NL80211_BAND_2GHZ && sta->sta.vht_cap.vht_supported) {
 		/*
 		 * if both peers support WIDER_BW, we can expand the chandef to
 		 * a wider compatible one, up to 80MHz
@@ -1242,18 +1244,44 @@
 	return ret;
 }
 
-static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata)
+static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata,
+					 struct sta_info *sta)
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_chanctx_conf *conf;
 	struct ieee80211_chanctx *ctx;
+	enum nl80211_chan_width width;
+	struct ieee80211_supported_band *sband;
 
 	mutex_lock(&local->chanctx_mtx);
 	conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
 					 lockdep_is_held(&local->chanctx_mtx));
 	if (conf) {
+		width = conf->def.width;
+		sband = local->hw.wiphy->bands[conf->def.chan->band];
 		ctx = container_of(conf, struct ieee80211_chanctx, conf);
 		ieee80211_recalc_chanctx_chantype(local, ctx);
+
+		/* if width changed and a peer is given, update its BW */
+		if (width != conf->def.width && sta &&
+		    test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW)) {
+			enum ieee80211_sta_rx_bandwidth bw;
+
+			bw = ieee80211_chan_width_to_rx_bw(conf->def.width);
+			bw = min(bw, ieee80211_sta_cap_rx_bw(sta));
+			if (bw != sta->sta.bandwidth) {
+				sta->sta.bandwidth = bw;
+				rate_control_rate_update(local, sband, sta,
+							 IEEE80211_RC_BW_CHANGED);
+				/*
+				 * if a TDLS peer BW was updated, we need to
+				 * recalc the chandef width again, to get the
+				 * correct chanctx min_def
+				 */
+				ieee80211_recalc_chanctx_chantype(local, ctx);
+			}
+		}
+
 	}
 	mutex_unlock(&local->chanctx_mtx);
 }
@@ -1350,8 +1378,6 @@
 			break;
 		}
 
-		iee80211_tdls_recalc_chanctx(sdata);
-
 		mutex_lock(&local->sta_mtx);
 		sta = sta_info_get(sdata, peer);
 		if (!sta) {
@@ -1360,6 +1386,7 @@
 			break;
 		}
 
+		iee80211_tdls_recalc_chanctx(sdata, sta);
 		iee80211_tdls_recalc_ht_protection(sdata, sta);
 
 		set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
@@ -1390,7 +1417,7 @@
 		iee80211_tdls_recalc_ht_protection(sdata, NULL);
 		mutex_unlock(&local->sta_mtx);
 
-		iee80211_tdls_recalc_chanctx(sdata);
+		iee80211_tdls_recalc_chanctx(sdata, NULL);
 		break;
 	default:
 		ret = -ENOTSUPP;
@@ -1746,7 +1773,7 @@
 	u8 target_channel, oper_class;
 	bool local_initiator;
 	struct sta_info *sta;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	struct ieee80211_tdls_data *tf = (void *)skb->data;
 	struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
 	int baselen = offsetof(typeof(*tf), u.chan_switch_req.variable);
@@ -1778,10 +1805,10 @@
 	if ((oper_class == 112 || oper_class == 2 || oper_class == 3 ||
 	     oper_class == 4 || oper_class == 5 || oper_class == 6) &&
 	     target_channel < 14)
-		band = IEEE80211_BAND_5GHZ;
+		band = NL80211_BAND_5GHZ;
 	else
-		band = target_channel < 14 ? IEEE80211_BAND_2GHZ :
-					     IEEE80211_BAND_5GHZ;
+		band = target_channel < 14 ? NL80211_BAND_2GHZ :
+					     NL80211_BAND_5GHZ;
 
 	freq = ieee80211_channel_to_frequency(target_channel, band);
 	if (freq == 0) {
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 2b0a17e..77e4c53 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -1,3 +1,8 @@
+/*
+* Portions of this file
+* Copyright(c) 2016 Intel Deutschland GmbH
+*/
+
 #if !defined(__MAC80211_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ)
 #define __MAC80211_DRIVER_TRACE
 
@@ -396,7 +401,7 @@
 		__field(u32, sync_device_ts)
 		__field(u8, sync_dtim_count)
 		__field(u32, basic_rates)
-		__array(int, mcast_rate, IEEE80211_NUM_BANDS)
+		__array(int, mcast_rate, NUM_NL80211_BANDS)
 		__field(u16, ht_operation_mode)
 		__field(s32, cqm_rssi_thold);
 		__field(s32, cqm_rssi_hyst);
@@ -899,6 +904,13 @@
 	TP_ARGS(local, sdata, sta)
 );
 
+DEFINE_EVENT(sta_event, drv_sync_rx_queues,
+	TP_PROTO(struct ieee80211_local *local,
+		 struct ieee80211_sub_if_data *sdata,
+		 struct ieee80211_sta *sta),
+	TP_ARGS(local, sdata, sta)
+);
+
 DEFINE_EVENT(sta_event, drv_sta_rate_tbl_update,
 	TP_PROTO(struct ieee80211_local *local,
 		 struct ieee80211_sub_if_data *sdata,
@@ -1253,8 +1265,8 @@
 	TP_fast_assign(
 		LOCAL_ASSIGN;
 		VIF_ASSIGN;
-		__entry->legacy_2g = mask->control[IEEE80211_BAND_2GHZ].legacy;
-		__entry->legacy_5g = mask->control[IEEE80211_BAND_5GHZ].legacy;
+		__entry->legacy_2g = mask->control[NL80211_BAND_2GHZ].legacy;
+		__entry->legacy_5g = mask->control[NL80211_BAND_5GHZ].legacy;
 	),
 
 	TP_printk(
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 62ad532..2030443 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -150,7 +150,7 @@
 			rate = DIV_ROUND_UP(r->bitrate, 1 << shift);
 
 		switch (sband->band) {
-		case IEEE80211_BAND_2GHZ: {
+		case NL80211_BAND_2GHZ: {
 			u32 flag;
 			if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
 				flag = IEEE80211_RATE_MANDATORY_G;
@@ -160,13 +160,13 @@
 				mrate = r->bitrate;
 			break;
 		}
-		case IEEE80211_BAND_5GHZ:
+		case NL80211_BAND_5GHZ:
 			if (r->flags & IEEE80211_RATE_MANDATORY_A)
 				mrate = r->bitrate;
 			break;
-		case IEEE80211_BAND_60GHZ:
+		case NL80211_BAND_60GHZ:
 			/* TODO, for now fall through */
-		case IEEE80211_NUM_BANDS:
+		case NUM_NL80211_BANDS:
 			WARN_ON(1);
 			break;
 		}
@@ -1116,11 +1116,15 @@
 			reset_agg_timer = true;
 		} else {
 			queued = true;
+			if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) {
+				clear_sta_flag(tx->sta, WLAN_STA_SP);
+				ps_dbg(tx->sta->sdata,
+				       "STA %pM aid %d: SP frame queued, close the SP w/o telling the peer\n",
+				       tx->sta->sta.addr, tx->sta->sta.aid);
+			}
 			info->control.vif = &tx->sdata->vif;
 			info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
-			info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS |
-					IEEE80211_TX_CTL_NO_PS_BUFFER |
-					IEEE80211_TX_STATUS_EOSP;
+			info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
 			__skb_queue_tail(&tid_tx->pending, skb);
 			if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER)
 				purge_skb = __skb_dequeue(&tid_tx->pending);
@@ -1247,7 +1251,8 @@
 	struct txq_info *txqi;
 	u8 ac;
 
-	if (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)
+	if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) ||
+	    (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
 		goto tx_normal;
 
 	if (!ieee80211_is_data(hdr->frame_control))
@@ -1324,6 +1329,10 @@
 out:
 	spin_unlock_bh(&txqi->queue.lock);
 
+	if (skb && skb_has_frag_list(skb) &&
+	    !ieee80211_hw_check(&local->hw, TX_FRAG_LIST))
+		skb_linearize(skb);
+
 	return skb;
 }
 EXPORT_SYMBOL(ieee80211_tx_dequeue);
@@ -1691,7 +1700,9 @@
 	bool rate_found = false;
 	u8 rate_retries = 0;
 	u16 rate_flags = 0;
-	u8 mcs_known, mcs_flags;
+	u8 mcs_known, mcs_flags, mcs_bw;
+	u16 vht_known;
+	u8 vht_mcs = 0, vht_nss = 0;
 	int i;
 
 	info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
@@ -1767,11 +1778,38 @@
 			    mcs_flags & IEEE80211_RADIOTAP_MCS_SGI)
 				rate_flags |= IEEE80211_TX_RC_SHORT_GI;
 
+			mcs_bw = mcs_flags & IEEE80211_RADIOTAP_MCS_BW_MASK;
 			if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_BW &&
-			    mcs_flags & IEEE80211_RADIOTAP_MCS_BW_40)
+			    mcs_bw == IEEE80211_RADIOTAP_MCS_BW_40)
 				rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
 			break;
 
+		case IEEE80211_RADIOTAP_VHT:
+			vht_known = get_unaligned_le16(iterator.this_arg);
+			rate_found = true;
+
+			rate_flags = IEEE80211_TX_RC_VHT_MCS;
+			if ((vht_known & IEEE80211_RADIOTAP_VHT_KNOWN_GI) &&
+			    (iterator.this_arg[2] &
+			     IEEE80211_RADIOTAP_VHT_FLAG_SGI))
+				rate_flags |= IEEE80211_TX_RC_SHORT_GI;
+			if (vht_known &
+			    IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH) {
+				if (iterator.this_arg[3] == 1)
+					rate_flags |=
+						IEEE80211_TX_RC_40_MHZ_WIDTH;
+				else if (iterator.this_arg[3] == 4)
+					rate_flags |=
+						IEEE80211_TX_RC_80_MHZ_WIDTH;
+				else if (iterator.this_arg[3] == 11)
+					rate_flags |=
+						IEEE80211_TX_RC_160_MHZ_WIDTH;
+			}
+
+			vht_mcs = iterator.this_arg[4] >> 4;
+			vht_nss = iterator.this_arg[4] & 0xF;
+			break;
+
 		/*
 		 * Please update the file
 		 * Documentation/networking/mac80211-injection.txt
@@ -1797,6 +1835,9 @@
 
 		if (rate_flags & IEEE80211_TX_RC_MCS) {
 			info->control.rates[0].idx = rate;
+		} else if (rate_flags & IEEE80211_TX_RC_VHT_MCS) {
+			ieee80211_rate_set_vht(info->control.rates, vht_mcs,
+					       vht_nss);
 		} else {
 			for (i = 0; i < sband->n_bitrates; i++) {
 				if (rate * 5 != sband->bitrates[i].bitrate)
@@ -1807,6 +1848,9 @@
 			}
 		}
 
+		if (info->control.rates[0].idx < 0)
+			info->control.flags &= ~IEEE80211_TX_CTRL_RATE_INJECT;
+
 		info->control.rates[0].flags = rate_flags;
 		info->control.rates[0].count = min_t(u8, rate_retries + 1,
 						     local->hw.max_rate_tries);
@@ -2094,7 +2138,7 @@
 	u16 info_id = 0;
 	struct ieee80211_chanctx_conf *chanctx_conf;
 	struct ieee80211_sub_if_data *ap_sdata;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	int ret;
 
 	if (IS_ERR(sta))
@@ -2181,7 +2225,7 @@
 			}
 
 			if (mppath && mpath)
-				mesh_path_del(mpath->sdata, mpath->dst);
+				mesh_path_del(sdata, mpath->dst);
 		}
 
 		/*
@@ -2767,6 +2811,154 @@
 		kfree_rcu(fast_tx, rcu_head);
 }
 
+static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local,
+					struct sk_buff *skb, int headroom,
+					int *subframe_len)
+{
+	int amsdu_len = *subframe_len + sizeof(struct ethhdr);
+	int padding = (4 - amsdu_len) & 3;
+
+	if (skb_headroom(skb) < headroom || skb_tailroom(skb) < padding) {
+		I802_DEBUG_INC(local->tx_expand_skb_head);
+
+		if (pskb_expand_head(skb, headroom, padding, GFP_ATOMIC)) {
+			wiphy_debug(local->hw.wiphy,
+				    "failed to reallocate TX buffer\n");
+			return false;
+		}
+	}
+
+	if (padding) {
+		*subframe_len += padding;
+		memset(skb_put(skb, padding), 0, padding);
+	}
+
+	return true;
+}
+
+static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata,
+					 struct ieee80211_fast_tx *fast_tx,
+					 struct sk_buff *skb)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_hdr *hdr;
+	struct ethhdr amsdu_hdr;
+	int hdr_len = fast_tx->hdr_len - sizeof(rfc1042_header);
+	int subframe_len = skb->len - hdr_len;
+	void *data;
+	u8 *qc;
+
+	if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+		return false;
+
+	if (info->control.flags & IEEE80211_TX_CTRL_AMSDU)
+		return true;
+
+	if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(amsdu_hdr),
+					 &subframe_len))
+		return false;
+
+	amsdu_hdr.h_proto = cpu_to_be16(subframe_len);
+	memcpy(amsdu_hdr.h_source, skb->data + fast_tx->sa_offs, ETH_ALEN);
+	memcpy(amsdu_hdr.h_dest, skb->data + fast_tx->da_offs, ETH_ALEN);
+
+	data = skb_push(skb, sizeof(amsdu_hdr));
+	memmove(data, data + sizeof(amsdu_hdr), hdr_len);
+	memcpy(data + hdr_len, &amsdu_hdr, sizeof(amsdu_hdr));
+
+	hdr = data;
+	qc = ieee80211_get_qos_ctl(hdr);
+	*qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+
+	info->control.flags |= IEEE80211_TX_CTRL_AMSDU;
+
+	return true;
+}
+
+static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
+				      struct sta_info *sta,
+				      struct ieee80211_fast_tx *fast_tx,
+				      struct sk_buff *skb)
+{
+	struct ieee80211_local *local = sdata->local;
+	u8 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
+	struct ieee80211_txq *txq = sta->sta.txq[tid];
+	struct txq_info *txqi;
+	struct sk_buff **frag_tail, *head;
+	int subframe_len = skb->len - ETH_ALEN;
+	u8 max_subframes = sta->sta.max_amsdu_subframes;
+	int max_frags = local->hw.max_tx_fragments;
+	int max_amsdu_len = sta->sta.max_amsdu_len;
+	__be16 len;
+	void *data;
+	bool ret = false;
+	int n = 1, nfrags;
+
+	if (!ieee80211_hw_check(&local->hw, TX_AMSDU))
+		return false;
+
+	if (!txq)
+		return false;
+
+	txqi = to_txq_info(txq);
+	if (test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags))
+		return false;
+
+	if (sta->sta.max_rc_amsdu_len)
+		max_amsdu_len = min_t(int, max_amsdu_len,
+				      sta->sta.max_rc_amsdu_len);
+
+	spin_lock_bh(&txqi->queue.lock);
+
+	head = skb_peek_tail(&txqi->queue);
+	if (!head)
+		goto out;
+
+	if (skb->len + head->len > max_amsdu_len)
+		goto out;
+
+	if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
+		goto out;
+
+	nfrags = 1 + skb_shinfo(skb)->nr_frags;
+	nfrags += 1 + skb_shinfo(head)->nr_frags;
+	frag_tail = &skb_shinfo(head)->frag_list;
+	while (*frag_tail) {
+		nfrags += 1 + skb_shinfo(*frag_tail)->nr_frags;
+		frag_tail = &(*frag_tail)->next;
+		n++;
+	}
+
+	if (max_subframes && n > max_subframes)
+		goto out;
+
+	if (max_frags && nfrags > max_frags)
+		goto out;
+
+	if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) + 2,
+					 &subframe_len))
+		goto out;
+
+	ret = true;
+	data = skb_push(skb, ETH_ALEN + 2);
+	memmove(data, data + ETH_ALEN + 2, 2 * ETH_ALEN);
+
+	data += 2 * ETH_ALEN;
+	len = cpu_to_be16(subframe_len);
+	memcpy(data, &len, 2);
+	memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header));
+
+	head->len += skb->len;
+	head->data_len += skb->len;
+	*frag_tail = skb;
+
+out:
+	spin_unlock_bh(&txqi->queue.lock);
+
+	return ret;
+}
+
 static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
 				struct net_device *dev, struct sta_info *sta,
 				struct ieee80211_fast_tx *fast_tx,
@@ -2821,6 +3013,10 @@
 
 	ieee80211_tx_stats(dev, skb->len + extra_head);
 
+	if ((hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) &&
+	    ieee80211_amsdu_aggregate(sdata, sta, fast_tx, skb))
+		return true;
+
 	/* will not be crypto-handled beyond what we do here, so use false
 	 * as the may-encrypt argument for the resize to not account for
 	 * more room than we already have in 'extra_head'
@@ -3401,7 +3597,7 @@
 	struct sk_buff *skb = NULL;
 	struct ieee80211_tx_info *info;
 	struct ieee80211_sub_if_data *sdata = NULL;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	struct ieee80211_tx_rate_control txrc;
 	struct ieee80211_chanctx_conf *chanctx_conf;
 	int csa_off_base = 0;
@@ -3969,7 +4165,7 @@
 
 void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
 				 struct sk_buff *skb, int tid,
-				 enum ieee80211_band band)
+				 enum nl80211_band band)
 {
 	int ac = ieee802_1d_to_ac[tid & 7];
 
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 7390de4..905003f 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -59,7 +59,7 @@
 	}
 }
 
-int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
+int ieee80211_frame_duration(enum nl80211_band band, size_t len,
 			     int rate, int erp, int short_preamble,
 			     int shift)
 {
@@ -77,7 +77,7 @@
 	 * is assumed to be 0 otherwise.
 	 */
 
-	if (band == IEEE80211_BAND_5GHZ || erp) {
+	if (band == NL80211_BAND_5GHZ || erp) {
 		/*
 		 * OFDM:
 		 *
@@ -129,7 +129,7 @@
 /* Exported duration function for driver use */
 __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
 					struct ieee80211_vif *vif,
-					enum ieee80211_band band,
+					enum nl80211_band band,
 					size_t frame_len,
 					struct ieee80211_rate *rate)
 {
@@ -1129,7 +1129,7 @@
 	rcu_read_lock();
 	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
 	use_11b = (chanctx_conf &&
-		   chanctx_conf->def.chan->band == IEEE80211_BAND_2GHZ) &&
+		   chanctx_conf->def.chan->band == NL80211_BAND_2GHZ) &&
 		 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE);
 	rcu_read_unlock();
 
@@ -1301,7 +1301,7 @@
 static int ieee80211_build_preq_ies_band(struct ieee80211_local *local,
 					 u8 *buffer, size_t buffer_len,
 					 const u8 *ie, size_t ie_len,
-					 enum ieee80211_band band,
+					 enum nl80211_band band,
 					 u32 rate_mask,
 					 struct cfg80211_chan_def *chandef,
 					 size_t *offset)
@@ -1375,7 +1375,7 @@
 		pos += ext_rates_len;
 	}
 
-	if (chandef->chan && sband->band == IEEE80211_BAND_2GHZ) {
+	if (chandef->chan && sband->band == NL80211_BAND_2GHZ) {
 		if (end - pos < 3)
 			goto out_err;
 		*pos++ = WLAN_EID_DS_PARAMS;
@@ -1479,7 +1479,7 @@
 
 	memset(ie_desc, 0, sizeof(*ie_desc));
 
-	for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+	for (i = 0; i < NUM_NL80211_BANDS; i++) {
 		if (bands_used & BIT(i)) {
 			pos += ieee80211_build_preq_ies_band(local,
 							     buffer + pos,
@@ -1522,7 +1522,7 @@
 	struct sk_buff *skb;
 	struct ieee80211_mgmt *mgmt;
 	int ies_len;
-	u32 rate_masks[IEEE80211_NUM_BANDS] = {};
+	u32 rate_masks[NUM_NL80211_BANDS] = {};
 	struct ieee80211_scan_ies dummy_ie_desc;
 
 	/*
@@ -1582,7 +1582,7 @@
 
 u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
 			    struct ieee802_11_elems *elems,
-			    enum ieee80211_band band, u32 *basic_rates)
+			    enum nl80211_band band, u32 *basic_rates)
 {
 	struct ieee80211_supported_band *sband;
 	size_t num_rates;
@@ -2520,7 +2520,7 @@
 
 int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
 			    struct sk_buff *skb, bool need_basic,
-			    enum ieee80211_band band)
+			    enum nl80211_band band)
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_supported_band *sband;
@@ -2565,7 +2565,7 @@
 
 int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
 				struct sk_buff *skb, bool need_basic,
-				enum ieee80211_band band)
+				enum nl80211_band band)
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_supported_band *sband;
@@ -2711,7 +2711,7 @@
 
 		if (status->flag & RX_FLAG_MACTIME_PLCP_START) {
 			/* TODO: handle HT/VHT preambles */
-			if (status->band == IEEE80211_BAND_5GHZ) {
+			if (status->band == NL80211_BAND_5GHZ) {
 				ts += 20 << shift;
 				mpdu_offset += 2;
 			} else if (status->flag & RX_FLAG_SHORTPRE) {
@@ -2724,8 +2724,9 @@
 
 	rate = cfg80211_calculate_bitrate(&ri);
 	if (WARN_ONCE(!rate,
-		      "Invalid bitrate: flags=0x%x, idx=%d, vht_nss=%d\n",
-		      status->flag, status->rate_idx, status->vht_nss))
+		      "Invalid bitrate: flags=0x%llx, idx=%d, vht_nss=%d\n",
+		      (unsigned long long)status->flag, status->rate_idx,
+		      status->vht_nss))
 		return 0;
 
 	/* rewind from end of MPDU */
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index 89e04d5..ee71576 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -319,7 +319,30 @@
 	return IEEE80211_STA_RX_BW_80;
 }
 
-static enum ieee80211_sta_rx_bandwidth
+enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta)
+{
+	struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap;
+	u32 cap_width;
+
+	if (!vht_cap->vht_supported) {
+		if (!sta->sta.ht_cap.ht_supported)
+			return NL80211_CHAN_WIDTH_20_NOHT;
+
+		return sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
+				NL80211_CHAN_WIDTH_40 : NL80211_CHAN_WIDTH_20;
+	}
+
+	cap_width = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
+
+	if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ)
+		return NL80211_CHAN_WIDTH_160;
+	else if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
+		return NL80211_CHAN_WIDTH_80P80;
+
+	return NL80211_CHAN_WIDTH_80;
+}
+
+enum ieee80211_sta_rx_bandwidth
 ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width)
 {
 	switch (width) {
@@ -347,10 +370,7 @@
 
 	bw = ieee80211_sta_cap_rx_bw(sta);
 	bw = min(bw, sta->cur_max_bandwidth);
-
-	/* do not cap the BW of TDLS WIDER_BW peers by the bss */
-	if (!test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
-		bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
+	bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
 
 	return bw;
 }
@@ -398,7 +418,7 @@
 
 u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
 				  struct sta_info *sta, u8 opmode,
-				  enum ieee80211_band band)
+				  enum nl80211_band band)
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_supported_band *sband;
@@ -484,7 +504,7 @@
 
 void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
 				 struct sta_info *sta, u8 opmode,
-				 enum ieee80211_band band)
+				 enum nl80211_band band)
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 1884825..b48c1e1 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -504,25 +504,31 @@
 	    !ieee80211_is_robust_mgmt_frame(skb))
 		return RX_CONTINUE;
 
-	data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len;
-	if (!rx->sta || data_len < 0)
-		return RX_DROP_UNUSABLE;
-
 	if (status->flag & RX_FLAG_DECRYPTED) {
 		if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_CCMP_HDR_LEN))
 			return RX_DROP_UNUSABLE;
+		if (status->flag & RX_FLAG_MIC_STRIPPED)
+			mic_len = 0;
 	} else {
 		if (skb_linearize(rx->skb))
 			return RX_DROP_UNUSABLE;
 	}
 
+	data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len;
+	if (!rx->sta || data_len < 0)
+		return RX_DROP_UNUSABLE;
+
 	if (!(status->flag & RX_FLAG_PN_VALIDATED)) {
+		int res;
+
 		ccmp_hdr2pn(pn, skb->data + hdrlen);
 
 		queue = rx->security_idx;
 
-		if (memcmp(pn, key->u.ccmp.rx_pn[queue],
-			   IEEE80211_CCMP_PN_LEN) <= 0) {
+		res = memcmp(pn, key->u.ccmp.rx_pn[queue],
+			     IEEE80211_CCMP_PN_LEN);
+		if (res < 0 ||
+		    (!res && !(status->flag & RX_FLAG_ALLOW_SAME_PN))) {
 			key->u.ccmp.replays++;
 			return RX_DROP_UNUSABLE;
 		}
@@ -720,8 +726,7 @@
 	struct sk_buff *skb = rx->skb;
 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
 	u8 pn[IEEE80211_GCMP_PN_LEN];
-	int data_len;
-	int queue;
+	int data_len, queue, mic_len = IEEE80211_GCMP_MIC_LEN;
 
 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
 
@@ -729,26 +734,31 @@
 	    !ieee80211_is_robust_mgmt_frame(skb))
 		return RX_CONTINUE;
 
-	data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN -
-		   IEEE80211_GCMP_MIC_LEN;
-	if (!rx->sta || data_len < 0)
-		return RX_DROP_UNUSABLE;
-
 	if (status->flag & RX_FLAG_DECRYPTED) {
 		if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_GCMP_HDR_LEN))
 			return RX_DROP_UNUSABLE;
+		if (status->flag & RX_FLAG_MIC_STRIPPED)
+			mic_len = 0;
 	} else {
 		if (skb_linearize(rx->skb))
 			return RX_DROP_UNUSABLE;
 	}
 
+	data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN - mic_len;
+	if (!rx->sta || data_len < 0)
+		return RX_DROP_UNUSABLE;
+
 	if (!(status->flag & RX_FLAG_PN_VALIDATED)) {
+		int res;
+
 		gcmp_hdr2pn(pn, skb->data + hdrlen);
 
 		queue = rx->security_idx;
 
-		if (memcmp(pn, key->u.gcmp.rx_pn[queue],
-			   IEEE80211_GCMP_PN_LEN) <= 0) {
+		res = memcmp(pn, key->u.gcmp.rx_pn[queue],
+			     IEEE80211_GCMP_PN_LEN);
+		if (res < 0 ||
+		    (!res && !(status->flag & RX_FLAG_ALLOW_SAME_PN))) {
 			key->u.gcmp.replays++;
 			return RX_DROP_UNUSABLE;
 		}
@@ -772,7 +782,7 @@
 	}
 
 	/* Remove GCMP header and MIC */
-	if (pskb_trim(skb, skb->len - IEEE80211_GCMP_MIC_LEN))
+	if (pskb_trim(skb, skb->len - mic_len))
 		return RX_DROP_UNUSABLE;
 	memmove(skb->data + IEEE80211_GCMP_HDR_LEN, skb->data, hdrlen);
 	skb_pull(skb, IEEE80211_GCMP_HDR_LEN);
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index b18c5ed..0b80a71 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -543,6 +543,9 @@
 	if (!dev)
 		return ERR_PTR(-ENODEV);
 
+	if (IS_ERR(dev))
+		return dev;
+
 	/* The caller is holding rtnl anyways, so release the dev reference */
 	dev_put(dev);
 
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
index 0183b32..bbcf604 100644
--- a/net/mpls/mpls_gso.c
+++ b/net/mpls/mpls_gso.c
@@ -31,6 +31,7 @@
 				  SKB_GSO_TCPV6 |
 				  SKB_GSO_UDP |
 				  SKB_GSO_DODGY |
+				  SKB_GSO_TCP_FIXEDID |
 				  SKB_GSO_TCP_ECN)))
 		goto out;
 
diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h
index b0bc475..2e8e7e5 100644
--- a/net/netfilter/ipset/ip_set_bitmap_gen.h
+++ b/net/netfilter/ipset/ip_set_bitmap_gen.h
@@ -95,7 +95,7 @@
 	if (!nested)
 		goto nla_put_failure;
 	if (mtype_do_head(skb, map) ||
-	    nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+	    nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
 	    nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)))
 		goto nla_put_failure;
 	if (unlikely(ip_set_put_flags(skb, set)))
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 7e6568c..a748b0c 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -497,6 +497,26 @@
 	write_unlock_bh(&ip_set_ref_lock);
 }
 
+/* set->ref can be swapped out by ip_set_swap, netlink events (like dump) need
+ * a separate reference counter
+ */
+static inline void
+__ip_set_get_netlink(struct ip_set *set)
+{
+	write_lock_bh(&ip_set_ref_lock);
+	set->ref_netlink++;
+	write_unlock_bh(&ip_set_ref_lock);
+}
+
+static inline void
+__ip_set_put_netlink(struct ip_set *set)
+{
+	write_lock_bh(&ip_set_ref_lock);
+	BUG_ON(set->ref_netlink == 0);
+	set->ref_netlink--;
+	write_unlock_bh(&ip_set_ref_lock);
+}
+
 /* Add, del and test set entries from kernel.
  *
  * The set behind the index must exist and must be referenced
@@ -1002,7 +1022,7 @@
 	if (!attr[IPSET_ATTR_SETNAME]) {
 		for (i = 0; i < inst->ip_set_max; i++) {
 			s = ip_set(inst, i);
-			if (s && s->ref) {
+			if (s && (s->ref || s->ref_netlink)) {
 				ret = -IPSET_ERR_BUSY;
 				goto out;
 			}
@@ -1024,7 +1044,7 @@
 		if (!s) {
 			ret = -ENOENT;
 			goto out;
-		} else if (s->ref) {
+		} else if (s->ref || s->ref_netlink) {
 			ret = -IPSET_ERR_BUSY;
 			goto out;
 		}
@@ -1171,6 +1191,9 @@
 	      from->family == to->family))
 		return -IPSET_ERR_TYPE_MISMATCH;
 
+	if (from->ref_netlink || to->ref_netlink)
+		return -EBUSY;
+
 	strncpy(from_name, from->name, IPSET_MAXNAMELEN);
 	strncpy(from->name, to->name, IPSET_MAXNAMELEN);
 	strncpy(to->name, from_name, IPSET_MAXNAMELEN);
@@ -1206,7 +1229,7 @@
 		if (set->variant->uref)
 			set->variant->uref(set, cb, false);
 		pr_debug("release set %s\n", set->name);
-		__ip_set_put_byindex(inst, index);
+		__ip_set_put_netlink(set);
 	}
 	return 0;
 }
@@ -1328,7 +1351,7 @@
 		if (!cb->args[IPSET_CB_ARG0]) {
 			/* Start listing: make sure set won't be destroyed */
 			pr_debug("reference set\n");
-			set->ref++;
+			set->ref_netlink++;
 		}
 		write_unlock_bh(&ip_set_ref_lock);
 		nlh = start_msg(skb, NETLINK_CB(cb->skb).portid,
@@ -1396,7 +1419,7 @@
 		if (set->variant->uref)
 			set->variant->uref(set, cb, false);
 		pr_debug("release set %s\n", set->name);
-		__ip_set_put_byindex(inst, index);
+		__ip_set_put_netlink(set);
 		cb->args[IPSET_CB_ARG0] = 0;
 	}
 out:
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index e5336ab..d32fd6b 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -1082,7 +1082,7 @@
 	if (nla_put_u32(skb, IPSET_ATTR_MARKMASK, h->markmask))
 		goto nla_put_failure;
 #endif
-	if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+	if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
 	    nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)))
 		goto nla_put_failure;
 	if (unlikely(ip_set_put_flags(skb, set)))
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 24c6c19..a2a89e4 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -458,7 +458,7 @@
 	if (!nested)
 		goto nla_put_failure;
 	if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) ||
-	    nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+	    nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
 	    nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
 			  htonl(sizeof(*map) + n * set->dsize)))
 		goto nla_put_failure;
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 85ca189..2cb3c62 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -104,6 +104,7 @@
 	spin_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
 }
 
+static void ip_vs_conn_expire(unsigned long data);
 
 /*
  *	Returns hash value for IPVS connection entry
@@ -453,10 +454,16 @@
 }
 EXPORT_SYMBOL_GPL(ip_vs_conn_out_get_proto);
 
+static void __ip_vs_conn_put_notimer(struct ip_vs_conn *cp)
+{
+	__ip_vs_conn_put(cp);
+	ip_vs_conn_expire((unsigned long)cp);
+}
+
 /*
  *      Put back the conn and restart its timer with its timeout
  */
-void ip_vs_conn_put(struct ip_vs_conn *cp)
+static void __ip_vs_conn_put_timer(struct ip_vs_conn *cp)
 {
 	unsigned long t = (cp->flags & IP_VS_CONN_F_ONE_PACKET) ?
 		0 : cp->timeout;
@@ -465,6 +472,16 @@
 	__ip_vs_conn_put(cp);
 }
 
+void ip_vs_conn_put(struct ip_vs_conn *cp)
+{
+	if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) &&
+	    (atomic_read(&cp->refcnt) == 1) &&
+	    !timer_pending(&cp->timer))
+		/* expire connection immediately */
+		__ip_vs_conn_put_notimer(cp);
+	else
+		__ip_vs_conn_put_timer(cp);
+}
 
 /*
  *	Fill a no_client_port connection with a client port number
@@ -819,7 +836,8 @@
 		if (cp->control)
 			ip_vs_control_del(cp);
 
-		if (cp->flags & IP_VS_CONN_F_NFCT) {
+		if ((cp->flags & IP_VS_CONN_F_NFCT) &&
+		    !(cp->flags & IP_VS_CONN_F_ONE_PACKET)) {
 			/* Do not access conntracks during subsys cleanup
 			 * because nf_conntrack_find_get can not be used after
 			 * conntrack cleanup for the net.
@@ -834,7 +852,10 @@
 		ip_vs_unbind_dest(cp);
 		if (cp->flags & IP_VS_CONN_F_NO_CPORT)
 			atomic_dec(&ip_vs_conn_no_cport_cnt);
-		call_rcu(&cp->rcu_head, ip_vs_conn_rcu_free);
+		if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+			ip_vs_conn_rcu_free(&cp->rcu_head);
+		else
+			call_rcu(&cp->rcu_head, ip_vs_conn_rcu_free);
 		atomic_dec(&ipvs->conn_count);
 		return;
 	}
@@ -850,7 +871,7 @@
 	if (ipvs->sync_state & IP_VS_STATE_MASTER)
 		ip_vs_sync_conn(ipvs, cp, sysctl_sync_threshold(ipvs));
 
-	ip_vs_conn_put(cp);
+	__ip_vs_conn_put_timer(cp);
 }
 
 /* Modify timer, so that it expires as soon as possible.
@@ -1240,6 +1261,16 @@
 	return 1;
 }
 
+static inline bool ip_vs_conn_ops_mode(struct ip_vs_conn *cp)
+{
+	struct ip_vs_service *svc;
+
+	if (!cp->dest)
+		return false;
+	svc = rcu_dereference(cp->dest->svc);
+	return svc && (svc->flags & IP_VS_SVC_F_ONEPACKET);
+}
+
 /* Called from keventd and must protect itself from softirqs */
 void ip_vs_random_dropentry(struct netns_ipvs *ipvs)
 {
@@ -1254,11 +1285,16 @@
 		unsigned int hash = prandom_u32() & ip_vs_conn_tab_mask;
 
 		hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
-			if (cp->flags & IP_VS_CONN_F_TEMPLATE)
-				/* connection template */
-				continue;
 			if (cp->ipvs != ipvs)
 				continue;
+			if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
+				if (atomic_read(&cp->n_control) ||
+				    !ip_vs_conn_ops_mode(cp))
+					continue;
+				else
+					/* connection template of OPS */
+					goto try_drop;
+			}
 			if (cp->protocol == IPPROTO_TCP) {
 				switch(cp->state) {
 				case IP_VS_TCP_S_SYN_RECV:
@@ -1286,6 +1322,7 @@
 					continue;
 				}
 			} else {
+try_drop:
 				if (!todrop_entry(cp))
 					continue;
 			}
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index b9a4082..1207f20 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -68,6 +68,7 @@
 #ifdef CONFIG_IP_VS_DEBUG
 EXPORT_SYMBOL(ip_vs_get_debug_level);
 #endif
+EXPORT_SYMBOL(ip_vs_new_conn_out);
 
 static int ip_vs_net_id __read_mostly;
 /* netns cnt used for uniqueness */
@@ -611,7 +612,10 @@
 		ret = cp->packet_xmit(skb, cp, pd->pp, iph);
 		/* do not touch skb anymore */
 
-		atomic_inc(&cp->in_pkts);
+		if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) && cp->control)
+			atomic_inc(&cp->control->in_pkts);
+		else
+			atomic_inc(&cp->in_pkts);
 		ip_vs_conn_put(cp);
 		return ret;
 	}
@@ -1100,6 +1104,143 @@
 	}
 }
 
+/* Generic function to create new connections for outgoing RS packets
+ *
+ * Pre-requisites for successful connection creation:
+ * 1) Virtual Service is NOT fwmark based:
+ *    In fwmark-VS actual vaddr and vport are unknown to IPVS
+ * 2) Real Server and Virtual Service were NOT configured without port:
+ *    This is to allow match of different VS to the same RS ip-addr
+ */
+struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc,
+				      struct ip_vs_dest *dest,
+				      struct sk_buff *skb,
+				      const struct ip_vs_iphdr *iph,
+				      __be16 dport,
+				      __be16 cport)
+{
+	struct ip_vs_conn_param param;
+	struct ip_vs_conn *ct = NULL, *cp = NULL;
+	const union nf_inet_addr *vaddr, *daddr, *caddr;
+	union nf_inet_addr snet;
+	__be16 vport;
+	unsigned int flags;
+
+	EnterFunction(12);
+	vaddr = &svc->addr;
+	vport = svc->port;
+	daddr = &iph->saddr;
+	caddr = &iph->daddr;
+
+	/* check pre-requisites are satisfied */
+	if (svc->fwmark)
+		return NULL;
+	if (!vport || !dport)
+		return NULL;
+
+	/* for persistent service first create connection template */
+	if (svc->flags & IP_VS_SVC_F_PERSISTENT) {
+		/* apply netmask the same way ingress-side does */
+#ifdef CONFIG_IP_VS_IPV6
+		if (svc->af == AF_INET6)
+			ipv6_addr_prefix(&snet.in6, &caddr->in6,
+					 (__force __u32)svc->netmask);
+		else
+#endif
+			snet.ip = caddr->ip & svc->netmask;
+		/* fill params and create template if not existent */
+		if (ip_vs_conn_fill_param_persist(svc, skb, iph->protocol,
+						  &snet, 0, vaddr,
+						  vport, &param) < 0)
+			return NULL;
+		ct = ip_vs_ct_in_get(&param);
+		if (!ct) {
+			ct = ip_vs_conn_new(&param, dest->af, daddr, dport,
+					    IP_VS_CONN_F_TEMPLATE, dest, 0);
+			if (!ct) {
+				kfree(param.pe_data);
+				return NULL;
+			}
+			ct->timeout = svc->timeout;
+		} else {
+			kfree(param.pe_data);
+		}
+	}
+
+	/* connection flags */
+	flags = ((svc->flags & IP_VS_SVC_F_ONEPACKET) &&
+		 iph->protocol == IPPROTO_UDP) ? IP_VS_CONN_F_ONE_PACKET : 0;
+	/* create connection */
+	ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol,
+			      caddr, cport, vaddr, vport, &param);
+	cp = ip_vs_conn_new(&param, dest->af, daddr, dport, flags, dest, 0);
+	if (!cp) {
+		if (ct)
+			ip_vs_conn_put(ct);
+		return NULL;
+	}
+	if (ct) {
+		ip_vs_control_add(cp, ct);
+		ip_vs_conn_put(ct);
+	}
+	ip_vs_conn_stats(cp, svc);
+
+	/* return connection (will be used to handle outgoing packet) */
+	IP_VS_DBG_BUF(6, "New connection RS-initiated:%c c:%s:%u v:%s:%u "
+		      "d:%s:%u conn->flags:%X conn->refcnt:%d\n",
+		      ip_vs_fwd_tag(cp),
+		      IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
+		      IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
+		      IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport),
+		      cp->flags, atomic_read(&cp->refcnt));
+	LeaveFunction(12);
+	return cp;
+}
+
+/* Handle outgoing packets which are considered requests initiated by
+ * real servers, so that subsequent responses from external client can be
+ * routed to the right real server.
+ * Used also for outgoing responses in OPS mode.
+ *
+ * Connection management is handled by persistent-engine specific callback.
+ */
+static struct ip_vs_conn *__ip_vs_rs_conn_out(unsigned int hooknum,
+					      struct netns_ipvs *ipvs,
+					      int af, struct sk_buff *skb,
+					      const struct ip_vs_iphdr *iph)
+{
+	struct ip_vs_dest *dest;
+	struct ip_vs_conn *cp = NULL;
+	__be16 _ports[2], *pptr;
+
+	if (hooknum == NF_INET_LOCAL_IN)
+		return NULL;
+
+	pptr = frag_safe_skb_hp(skb, iph->len,
+				sizeof(_ports), _ports, iph);
+	if (!pptr)
+		return NULL;
+
+	rcu_read_lock();
+	dest = ip_vs_find_real_service(ipvs, af, iph->protocol,
+				       &iph->saddr, pptr[0]);
+	if (dest) {
+		struct ip_vs_service *svc;
+		struct ip_vs_pe *pe;
+
+		svc = rcu_dereference(dest->svc);
+		if (svc) {
+			pe = rcu_dereference(svc->pe);
+			if (pe && pe->conn_out)
+				cp = pe->conn_out(svc, dest, skb, iph,
+						  pptr[0], pptr[1]);
+		}
+	}
+	rcu_read_unlock();
+
+	return cp;
+}
+
 /* Handle response packets: rewrite addresses and send away...
  */
 static unsigned int
@@ -1245,6 +1386,22 @@
 
 	if (likely(cp))
 		return handle_response(af, skb, pd, cp, &iph, hooknum);
+
+	/* Check for real-server-started requests */
+	if (atomic_read(&ipvs->conn_out_counter)) {
+		/* Currently only for UDP:
+		 * connection oriented protocols typically use
+		 * ephemeral ports for outgoing connections, so
+		 * related incoming responses would not match any VS
+		 */
+		if (pp->protocol == IPPROTO_UDP) {
+			cp = __ip_vs_rs_conn_out(hooknum, ipvs, af, skb, &iph);
+			if (likely(cp))
+				return handle_response(af, skb, pd, cp, &iph,
+						       hooknum);
+		}
+	}
+
 	if (sysctl_nat_icmp_send(ipvs) &&
 	    (pp->protocol == IPPROTO_TCP ||
 	     pp->protocol == IPPROTO_UDP ||
@@ -1837,6 +1994,9 @@
 
 	if (ipvs->sync_state & IP_VS_STATE_MASTER)
 		ip_vs_sync_conn(ipvs, cp, pkts);
+	else if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) && cp->control)
+		/* increment is done inside ip_vs_sync_conn too */
+		atomic_inc(&cp->control->in_pkts);
 
 	ip_vs_conn_put(cp);
 	return ret;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 404b2a4..c3c809b 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -567,6 +567,36 @@
 	return false;
 }
 
+/* Find real service record by <proto,addr,port>.
+ * In case of multiple records with the same <proto,addr,port>, only
+ * the first found record is returned.
+ *
+ * To be called under RCU lock.
+ */
+struct ip_vs_dest *ip_vs_find_real_service(struct netns_ipvs *ipvs, int af,
+					   __u16 protocol,
+					   const union nf_inet_addr *daddr,
+					   __be16 dport)
+{
+	unsigned int hash;
+	struct ip_vs_dest *dest;
+
+	/* Check for "full" addressed entries */
+	hash = ip_vs_rs_hashkey(af, daddr, dport);
+
+	hlist_for_each_entry_rcu(dest, &ipvs->rs_table[hash], d_list) {
+		if (dest->port == dport &&
+		    dest->af == af &&
+		    ip_vs_addr_equal(af, &dest->addr, daddr) &&
+			(dest->protocol == protocol || dest->vfwmark)) {
+			/* HIT */
+			return dest;
+		}
+	}
+
+	return NULL;
+}
+
 /* Lookup destination by {addr,port} in the given service
  * Called under RCU lock.
  */
@@ -1253,6 +1283,8 @@
 		atomic_inc(&ipvs->ftpsvc_counter);
 	else if (svc->port == 0)
 		atomic_inc(&ipvs->nullsvc_counter);
+	if (svc->pe && svc->pe->conn_out)
+		atomic_inc(&ipvs->conn_out_counter);
 
 	ip_vs_start_estimator(ipvs, &svc->stats);
 
@@ -1293,6 +1325,7 @@
 	struct ip_vs_scheduler *sched = NULL, *old_sched;
 	struct ip_vs_pe *pe = NULL, *old_pe = NULL;
 	int ret = 0;
+	bool new_pe_conn_out, old_pe_conn_out;
 
 	/*
 	 * Lookup the scheduler, by 'u->sched_name'
@@ -1355,8 +1388,16 @@
 	svc->netmask = u->netmask;
 
 	old_pe = rcu_dereference_protected(svc->pe, 1);
-	if (pe != old_pe)
+	if (pe != old_pe) {
 		rcu_assign_pointer(svc->pe, pe);
+		/* check for optional methods in new pe */
+		new_pe_conn_out = (pe && pe->conn_out) ? true : false;
+		old_pe_conn_out = (old_pe && old_pe->conn_out) ? true : false;
+		if (new_pe_conn_out && !old_pe_conn_out)
+			atomic_inc(&svc->ipvs->conn_out_counter);
+		if (old_pe_conn_out && !new_pe_conn_out)
+			atomic_dec(&svc->ipvs->conn_out_counter);
+	}
 
 out:
 	ip_vs_scheduler_put(old_sched);
@@ -1389,6 +1430,8 @@
 
 	/* Unbind persistence engine, keep svc->pe */
 	old_pe = rcu_dereference_protected(svc->pe, 1);
+	if (old_pe && old_pe->conn_out)
+		atomic_dec(&ipvs->conn_out_counter);
 	ip_vs_pe_put(old_pe);
 
 	/*
@@ -2875,8 +2918,10 @@
 	if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, (u32)kstats->conns) ||
 	    nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, (u32)kstats->inpkts) ||
 	    nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, (u32)kstats->outpkts) ||
-	    nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes) ||
-	    nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes) ||
+	    nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes,
+			      IPVS_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes,
+			      IPVS_STATS_ATTR_PAD) ||
 	    nla_put_u32(skb, IPVS_STATS_ATTR_CPS, (u32)kstats->cps) ||
 	    nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, (u32)kstats->inpps) ||
 	    nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, (u32)kstats->outpps) ||
@@ -2900,16 +2945,26 @@
 	if (!nl_stats)
 		return -EMSGSIZE;
 
-	if (nla_put_u64(skb, IPVS_STATS_ATTR_CONNS, kstats->conns) ||
-	    nla_put_u64(skb, IPVS_STATS_ATTR_INPKTS, kstats->inpkts) ||
-	    nla_put_u64(skb, IPVS_STATS_ATTR_OUTPKTS, kstats->outpkts) ||
-	    nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes) ||
-	    nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes) ||
-	    nla_put_u64(skb, IPVS_STATS_ATTR_CPS, kstats->cps) ||
-	    nla_put_u64(skb, IPVS_STATS_ATTR_INPPS, kstats->inpps) ||
-	    nla_put_u64(skb, IPVS_STATS_ATTR_OUTPPS, kstats->outpps) ||
-	    nla_put_u64(skb, IPVS_STATS_ATTR_INBPS, kstats->inbps) ||
-	    nla_put_u64(skb, IPVS_STATS_ATTR_OUTBPS, kstats->outbps))
+	if (nla_put_u64_64bit(skb, IPVS_STATS_ATTR_CONNS, kstats->conns,
+			      IPVS_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INPKTS, kstats->inpkts,
+			      IPVS_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTPKTS, kstats->outpkts,
+			      IPVS_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes,
+			      IPVS_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes,
+			      IPVS_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, IPVS_STATS_ATTR_CPS, kstats->cps,
+			      IPVS_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INPPS, kstats->inpps,
+			      IPVS_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTPPS, kstats->outpps,
+			      IPVS_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INBPS, kstats->inbps,
+			      IPVS_STATS_ATTR_PAD) ||
+	    nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTBPS, kstats->outbps,
+			      IPVS_STATS_ATTR_PAD))
 		goto nla_put_failure;
 	nla_nest_end(skb, nl_stats);
 
@@ -3957,6 +4012,7 @@
 		    (unsigned long) ipvs);
 	atomic_set(&ipvs->ftpsvc_counter, 0);
 	atomic_set(&ipvs->nullsvc_counter, 0);
+	atomic_set(&ipvs->conn_out_counter, 0);
 
 	/* procfs stats */
 	ipvs->tot_stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c
index 30434fb..f04fd8d 100644
--- a/net/netfilter/ipvs/ip_vs_nfct.c
+++ b/net/netfilter/ipvs/ip_vs_nfct.c
@@ -93,6 +93,10 @@
 	if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
 		return;
 
+	/* Never alter conntrack for OPS conns (no reply is expected) */
+	if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+		return;
+
 	/* Alter reply only in original direction */
 	if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
 		return;
diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
index 0a6eb5c..d07ef9e 100644
--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
+++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
@@ -143,6 +143,20 @@
 	return cp->pe_data_len;
 }
 
+static struct ip_vs_conn *
+ip_vs_sip_conn_out(struct ip_vs_service *svc,
+		   struct ip_vs_dest *dest,
+		   struct sk_buff *skb,
+		   const struct ip_vs_iphdr *iph,
+		   __be16 dport,
+		   __be16 cport)
+{
+	if (likely(iph->protocol == IPPROTO_UDP))
+		return ip_vs_new_conn_out(svc, dest, skb, iph, dport, cport);
+	/* currently no need to handle other than UDP */
+	return NULL;
+}
+
 static struct ip_vs_pe ip_vs_sip_pe =
 {
 	.name =			"sip",
@@ -153,6 +167,7 @@
 	.ct_match =		ip_vs_sip_ct_match,
 	.hashkey_raw =		ip_vs_sip_hashkey_raw,
 	.show_pe_data =		ip_vs_sip_show_pe_data,
+	.conn_out =		ip_vs_sip_conn_out,
 };
 
 static int __init ip_vs_sip_init(void)
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index dc196a0..6d19d2e 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -1013,8 +1013,7 @@
 	if (IS_ERR(skb))
 		goto tx_error;
 
-	skb = iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET, cp->af));
-	if (IS_ERR(skb))
+	if (iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET, cp->af)))
 		goto tx_error;
 
 	skb->transport_header = skb->network_header;
@@ -1105,8 +1104,7 @@
 	if (IS_ERR(skb))
 		goto tx_error;
 
-	skb = iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET6, cp->af));
-	if (IS_ERR(skb))
+	if (iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET6, cp->af)))
 		goto tx_error;
 
 	skb->transport_header = skb->network_header;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index afde5f5..db2312e 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -12,6 +12,8 @@
  * published by the Free Software Foundation.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/types.h>
 #include <linux/netfilter.h>
 #include <linux/module.h>
@@ -52,6 +54,7 @@
 #include <net/netfilter/nf_nat.h>
 #include <net/netfilter/nf_nat_core.h>
 #include <net/netfilter/nf_nat_helper.h>
+#include <net/netns/hash.h>
 
 #define NF_CONNTRACK_VERSION	"0.5.0"
 
@@ -66,7 +69,13 @@
 __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
 EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
 
+struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
+EXPORT_SYMBOL_GPL(nf_conntrack_hash);
+
+static __read_mostly struct kmem_cache *nf_conntrack_cachep;
 static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
+static __read_mostly seqcount_t nf_conntrack_generation;
+static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
 static __read_mostly bool nf_conntrack_locks_all;
 
 void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
@@ -105,7 +114,7 @@
 		spin_lock_nested(&nf_conntrack_locks[h1],
 				 SINGLE_DEPTH_NESTING);
 	}
-	if (read_seqcount_retry(&net->ct.generation, sequence)) {
+	if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
 		nf_conntrack_double_unlock(h1, h2);
 		return true;
 	}
@@ -139,43 +148,43 @@
 DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
 EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
 
-unsigned int nf_conntrack_hash_rnd __read_mostly;
-EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd);
+static unsigned int nf_conntrack_hash_rnd __read_mostly;
 
-static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple)
+static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
+			      const struct net *net)
 {
 	unsigned int n;
+	u32 seed;
+
+	get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));
 
 	/* The direction must be ignored, so we hash everything up to the
 	 * destination ports (which is a multiple of 4) and treat the last
 	 * three bytes manually.
 	 */
+	seed = nf_conntrack_hash_rnd ^ net_hash_mix(net);
 	n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
-	return jhash2((u32 *)tuple, n, nf_conntrack_hash_rnd ^
+	return jhash2((u32 *)tuple, n, seed ^
 		      (((__force __u16)tuple->dst.u.all << 16) |
 		      tuple->dst.protonum));
 }
 
-static u32 __hash_bucket(u32 hash, unsigned int size)
+static u32 scale_hash(u32 hash)
 {
-	return reciprocal_scale(hash, size);
+	return reciprocal_scale(hash, nf_conntrack_htable_size);
 }
 
-static u32 hash_bucket(u32 hash, const struct net *net)
+static u32 __hash_conntrack(const struct net *net,
+			    const struct nf_conntrack_tuple *tuple,
+			    unsigned int size)
 {
-	return __hash_bucket(hash, net->ct.htable_size);
+	return reciprocal_scale(hash_conntrack_raw(tuple, net), size);
 }
 
-static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
-				  unsigned int size)
+static u32 hash_conntrack(const struct net *net,
+			  const struct nf_conntrack_tuple *tuple)
 {
-	return __hash_bucket(hash_conntrack_raw(tuple), size);
-}
-
-static inline u_int32_t hash_conntrack(const struct net *net,
-				       const struct nf_conntrack_tuple *tuple)
-{
-	return __hash_conntrack(tuple, net->ct.htable_size);
+	return scale_hash(hash_conntrack_raw(tuple, net));
 }
 
 bool
@@ -356,7 +365,7 @@
 	}
 	rcu_read_lock();
 	l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
-	if (l4proto && l4proto->destroy)
+	if (l4proto->destroy)
 		l4proto->destroy(ct);
 
 	rcu_read_unlock();
@@ -391,7 +400,7 @@
 
 	local_bh_disable();
 	do {
-		sequence = read_seqcount_begin(&net->ct.generation);
+		sequence = read_seqcount_begin(&nf_conntrack_generation);
 		hash = hash_conntrack(net,
 				      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
 		reply_hash = hash_conntrack(net,
@@ -443,7 +452,8 @@
 static inline bool
 nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
 		const struct nf_conntrack_tuple *tuple,
-		const struct nf_conntrack_zone *zone)
+		const struct nf_conntrack_zone *zone,
+		const struct net *net)
 {
 	struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
 
@@ -452,7 +462,8 @@
 	 */
 	return nf_ct_tuple_equal(tuple, &h->tuple) &&
 	       nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
-	       nf_ct_is_confirmed(ct);
+	       nf_ct_is_confirmed(ct) &&
+	       net_eq(net, nf_ct_net(ct));
 }
 
 /*
@@ -465,21 +476,23 @@
 		      const struct nf_conntrack_tuple *tuple, u32 hash)
 {
 	struct nf_conntrack_tuple_hash *h;
+	struct hlist_nulls_head *ct_hash;
 	struct hlist_nulls_node *n;
-	unsigned int bucket = hash_bucket(hash, net);
+	unsigned int bucket, sequence;
 
-	/* Disable BHs the entire time since we normally need to disable them
-	 * at least once for the stats anyway.
-	 */
-	local_bh_disable();
 begin:
-	hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) {
-		if (nf_ct_key_equal(h, tuple, zone)) {
-			NF_CT_STAT_INC(net, found);
-			local_bh_enable();
+	do {
+		sequence = read_seqcount_begin(&nf_conntrack_generation);
+		bucket = scale_hash(hash);
+		ct_hash = nf_conntrack_hash;
+	} while (read_seqcount_retry(&nf_conntrack_generation, sequence));
+
+	hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
+		if (nf_ct_key_equal(h, tuple, zone, net)) {
+			NF_CT_STAT_INC_ATOMIC(net, found);
 			return h;
 		}
-		NF_CT_STAT_INC(net, searched);
+		NF_CT_STAT_INC_ATOMIC(net, searched);
 	}
 	/*
 	 * if the nulls value we got at the end of this lookup is
@@ -487,10 +500,9 @@
 	 * We probably met an item that was moved to another chain.
 	 */
 	if (get_nulls_value(n) != bucket) {
-		NF_CT_STAT_INC(net, search_restart);
+		NF_CT_STAT_INC_ATOMIC(net, search_restart);
 		goto begin;
 	}
-	local_bh_enable();
 
 	return NULL;
 }
@@ -512,7 +524,7 @@
 			     !atomic_inc_not_zero(&ct->ct_general.use)))
 			h = NULL;
 		else {
-			if (unlikely(!nf_ct_key_equal(h, tuple, zone))) {
+			if (unlikely(!nf_ct_key_equal(h, tuple, zone, net))) {
 				nf_ct_put(ct);
 				goto begin;
 			}
@@ -528,7 +540,7 @@
 		      const struct nf_conntrack_tuple *tuple)
 {
 	return __nf_conntrack_find_get(net, zone, tuple,
-				       hash_conntrack_raw(tuple));
+				       hash_conntrack_raw(tuple, net));
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
 
@@ -536,12 +548,10 @@
 				       unsigned int hash,
 				       unsigned int reply_hash)
 {
-	struct net *net = nf_ct_net(ct);
-
 	hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
-			   &net->ct.hash[hash]);
+			   &nf_conntrack_hash[hash]);
 	hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
-			   &net->ct.hash[reply_hash]);
+			   &nf_conntrack_hash[reply_hash]);
 }
 
 int
@@ -558,7 +568,7 @@
 
 	local_bh_disable();
 	do {
-		sequence = read_seqcount_begin(&net->ct.generation);
+		sequence = read_seqcount_begin(&nf_conntrack_generation);
 		hash = hash_conntrack(net,
 				      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
 		reply_hash = hash_conntrack(net,
@@ -566,17 +576,14 @@
 	} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
 
 	/* See if there's one in the list already, including reverse */
-	hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
-		if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
-				      &h->tuple) &&
-		    nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
-				     NF_CT_DIRECTION(h)))
+	hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
+		if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+				    zone, net))
 			goto out;
-	hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
-		if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
-				      &h->tuple) &&
-		    nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
-				     NF_CT_DIRECTION(h)))
+
+	hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
+		if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+				    zone, net))
 			goto out;
 
 	add_timer(&ct->timeout);
@@ -597,6 +604,62 @@
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
 
+static inline void nf_ct_acct_update(struct nf_conn *ct,
+				     enum ip_conntrack_info ctinfo,
+				     unsigned int len)
+{
+	struct nf_conn_acct *acct;
+
+	acct = nf_conn_acct_find(ct);
+	if (acct) {
+		struct nf_conn_counter *counter = acct->counter;
+
+		atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
+		atomic64_add(len, &counter[CTINFO2DIR(ctinfo)].bytes);
+	}
+}
+
+static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+			     const struct nf_conn *loser_ct)
+{
+	struct nf_conn_acct *acct;
+
+	acct = nf_conn_acct_find(loser_ct);
+	if (acct) {
+		struct nf_conn_counter *counter = acct->counter;
+		unsigned int bytes;
+
+		/* u32 should be fine since we must have seen one packet. */
+		bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes);
+		nf_ct_acct_update(ct, ctinfo, bytes);
+	}
+}
+
+/* Resolve race on insertion if this protocol allows this. */
+static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
+			       enum ip_conntrack_info ctinfo,
+			       struct nf_conntrack_tuple_hash *h)
+{
+	/* This is the conntrack entry already in hashes that won race. */
+	struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+	struct nf_conntrack_l4proto *l4proto;
+
+	l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
+	if (l4proto->allow_clash &&
+	    !nf_ct_is_dying(ct) &&
+	    atomic_inc_not_zero(&ct->ct_general.use)) {
+		nf_ct_acct_merge(ct, ctinfo, (struct nf_conn *)skb->nfct);
+		nf_conntrack_put(skb->nfct);
+		/* Assign conntrack already in hashes to this skbuff. Don't
+		 * modify skb->nfctinfo to ensure consistent stateful filtering.
+		 */
+		skb->nfct = &ct->ct_general;
+		return NF_ACCEPT;
+	}
+	NF_CT_STAT_INC(net, drop);
+	return NF_DROP;
+}
+
 /* Confirm a connection given skb; places it in hash table */
 int
 __nf_conntrack_confirm(struct sk_buff *skb)
@@ -611,6 +674,7 @@
 	enum ip_conntrack_info ctinfo;
 	struct net *net;
 	unsigned int sequence;
+	int ret = NF_DROP;
 
 	ct = nf_ct_get(skb, &ctinfo);
 	net = nf_ct_net(ct);
@@ -626,10 +690,10 @@
 	local_bh_disable();
 
 	do {
-		sequence = read_seqcount_begin(&net->ct.generation);
+		sequence = read_seqcount_begin(&nf_conntrack_generation);
 		/* reuse the hash saved before */
 		hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
-		hash = hash_bucket(hash, net);
+		hash = scale_hash(hash);
 		reply_hash = hash_conntrack(net,
 					   &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 
@@ -653,23 +717,22 @@
 	 */
 	nf_ct_del_from_dying_or_unconfirmed_list(ct);
 
-	if (unlikely(nf_ct_is_dying(ct)))
-		goto out;
+	if (unlikely(nf_ct_is_dying(ct))) {
+		nf_ct_add_to_dying_list(ct);
+		goto dying;
+	}
 
 	/* See if there's one in the list already, including reverse:
 	   NAT could have grabbed it without realizing, since we're
 	   not in the hash.  If there is, we lost race. */
-	hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
-		if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
-				      &h->tuple) &&
-		    nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
-				     NF_CT_DIRECTION(h)))
+	hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
+		if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+				    zone, net))
 			goto out;
-	hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
-		if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
-				      &h->tuple) &&
-		    nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
-				     NF_CT_DIRECTION(h)))
+
+	hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
+		if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+				    zone, net))
 			goto out;
 
 	/* Timer relative to confirmation time, not original
@@ -708,10 +771,12 @@
 
 out:
 	nf_ct_add_to_dying_list(ct);
+	ret = nf_ct_resolve_clash(net, skb, ctinfo, h);
+dying:
 	nf_conntrack_double_unlock(hash, reply_hash);
 	NF_CT_STAT_INC(net, insert_failed);
 	local_bh_enable();
-	return NF_DROP;
+	return ret;
 }
 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
 
@@ -724,29 +789,31 @@
 	struct net *net = nf_ct_net(ignored_conntrack);
 	const struct nf_conntrack_zone *zone;
 	struct nf_conntrack_tuple_hash *h;
+	struct hlist_nulls_head *ct_hash;
+	unsigned int hash, sequence;
 	struct hlist_nulls_node *n;
 	struct nf_conn *ct;
-	unsigned int hash;
 
 	zone = nf_ct_zone(ignored_conntrack);
-	hash = hash_conntrack(net, tuple);
 
-	/* Disable BHs the entire time since we need to disable them at
-	 * least once for the stats anyway.
-	 */
-	rcu_read_lock_bh();
-	hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
+	rcu_read_lock();
+	do {
+		sequence = read_seqcount_begin(&nf_conntrack_generation);
+		hash = hash_conntrack(net, tuple);
+		ct_hash = nf_conntrack_hash;
+	} while (read_seqcount_retry(&nf_conntrack_generation, sequence));
+
+	hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
 		ct = nf_ct_tuplehash_to_ctrack(h);
 		if (ct != ignored_conntrack &&
-		    nf_ct_tuple_equal(tuple, &h->tuple) &&
-		    nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h))) {
-			NF_CT_STAT_INC(net, found);
-			rcu_read_unlock_bh();
+		    nf_ct_key_equal(h, tuple, zone, net)) {
+			NF_CT_STAT_INC_ATOMIC(net, found);
+			rcu_read_unlock();
 			return 1;
 		}
-		NF_CT_STAT_INC(net, searched);
+		NF_CT_STAT_INC_ATOMIC(net, searched);
 	}
-	rcu_read_unlock_bh();
+	rcu_read_unlock();
 
 	return 0;
 }
@@ -760,71 +827,63 @@
 {
 	/* Use oldest entry, which is roughly LRU */
 	struct nf_conntrack_tuple_hash *h;
-	struct nf_conn *ct = NULL, *tmp;
+	struct nf_conn *tmp;
 	struct hlist_nulls_node *n;
-	unsigned int i = 0, cnt = 0;
-	int dropped = 0;
-	unsigned int hash, sequence;
+	unsigned int i, hash, sequence;
+	struct nf_conn *ct = NULL;
 	spinlock_t *lockp;
+	bool ret = false;
+
+	i = 0;
 
 	local_bh_disable();
 restart:
-	sequence = read_seqcount_begin(&net->ct.generation);
-	hash = hash_bucket(_hash, net);
-	for (; i < net->ct.htable_size; i++) {
+	sequence = read_seqcount_begin(&nf_conntrack_generation);
+	for (; i < NF_CT_EVICTION_RANGE; i++) {
+		hash = scale_hash(_hash++);
 		lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS];
 		nf_conntrack_lock(lockp);
-		if (read_seqcount_retry(&net->ct.generation, sequence)) {
+		if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
 			spin_unlock(lockp);
 			goto restart;
 		}
-		hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
-					 hnnode) {
+		hlist_nulls_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash],
+					       hnnode) {
 			tmp = nf_ct_tuplehash_to_ctrack(h);
-			if (!test_bit(IPS_ASSURED_BIT, &tmp->status) &&
-			    !nf_ct_is_dying(tmp) &&
-			    atomic_inc_not_zero(&tmp->ct_general.use)) {
+
+			if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
+			    !net_eq(nf_ct_net(tmp), net) ||
+			    nf_ct_is_dying(tmp))
+				continue;
+
+			if (atomic_inc_not_zero(&tmp->ct_general.use)) {
 				ct = tmp;
 				break;
 			}
-			cnt++;
 		}
 
-		hash = (hash + 1) % net->ct.htable_size;
 		spin_unlock(lockp);
-
-		if (ct || cnt >= NF_CT_EVICTION_RANGE)
+		if (ct)
 			break;
-
 	}
+
 	local_bh_enable();
 
 	if (!ct)
-		return dropped;
+		return false;
 
-	if (del_timer(&ct->timeout)) {
+	/* kill only if in same netns -- might have moved due to
+	 * SLAB_DESTROY_BY_RCU rules
+	 */
+	if (net_eq(nf_ct_net(ct), net) && del_timer(&ct->timeout)) {
 		if (nf_ct_delete(ct, 0, 0)) {
-			dropped = 1;
 			NF_CT_STAT_INC_ATOMIC(net, early_drop);
+			ret = true;
 		}
 	}
+
 	nf_ct_put(ct);
-	return dropped;
-}
-
-void init_nf_conntrack_hash_rnd(void)
-{
-	unsigned int rand;
-
-	/*
-	 * Why not initialize nf_conntrack_rnd in a "init()" function ?
-	 * Because there isn't enough entropy when system initializing,
-	 * and we initialize it as late as possible.
-	 */
-	do {
-		get_random_bytes(&rand, sizeof(rand));
-	} while (!rand);
-	cmpxchg(&nf_conntrack_hash_rnd, 0, rand);
+	return ret;
 }
 
 static struct nf_conn *
@@ -836,12 +895,6 @@
 {
 	struct nf_conn *ct;
 
-	if (unlikely(!nf_conntrack_hash_rnd)) {
-		init_nf_conntrack_hash_rnd();
-		/* recompute the hash as nf_conntrack_hash_rnd is initialized */
-		hash = hash_conntrack_raw(orig);
-	}
-
 	/* We don't want any race condition at early drop stage */
 	atomic_inc(&net->ct.count);
 
@@ -858,7 +911,7 @@
 	 * Do not use kmem_cache_zalloc(), as this cache uses
 	 * SLAB_DESTROY_BY_RCU.
 	 */
-	ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
+	ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
 	if (ct == NULL)
 		goto out;
 
@@ -885,7 +938,7 @@
 	atomic_set(&ct->ct_general.use, 0);
 	return ct;
 out_free:
-	kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
+	kmem_cache_free(nf_conntrack_cachep, ct);
 out:
 	atomic_dec(&net->ct.count);
 	return ERR_PTR(-ENOMEM);
@@ -912,7 +965,7 @@
 
 	nf_ct_ext_destroy(ct);
 	nf_ct_ext_free(ct);
-	kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
+	kmem_cache_free(nf_conntrack_cachep, ct);
 	smp_mb__before_atomic();
 	atomic_dec(&net->ct.count);
 }
@@ -966,7 +1019,7 @@
 
 	if (!l4proto->new(ct, skb, dataoff, timeouts)) {
 		nf_conntrack_free(ct);
-		pr_debug("init conntrack: can't track with proto module\n");
+		pr_debug("can't track with proto module\n");
 		return NULL;
 	}
 
@@ -988,7 +1041,7 @@
 		spin_lock(&nf_conntrack_expect_lock);
 		exp = nf_ct_find_expectation(net, zone, tuple);
 		if (exp) {
-			pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
+			pr_debug("expectation arrives ct=%p exp=%p\n",
 				 ct, exp);
 			/* Welcome, Mr. Bond.  We've been expecting you... */
 			__set_bit(IPS_EXPECTED_BIT, &ct->status);
@@ -1053,13 +1106,13 @@
 	if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
 			     dataoff, l3num, protonum, net, &tuple, l3proto,
 			     l4proto)) {
-		pr_debug("resolve_normal_ct: Can't get tuple\n");
+		pr_debug("Can't get tuple\n");
 		return NULL;
 	}
 
 	/* look for tuple match */
 	zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
-	hash = hash_conntrack_raw(&tuple);
+	hash = hash_conntrack_raw(&tuple, net);
 	h = __nf_conntrack_find_get(net, zone, &tuple, hash);
 	if (!h) {
 		h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
@@ -1079,14 +1132,13 @@
 	} else {
 		/* Once we've had two way comms, always ESTABLISHED. */
 		if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
-			pr_debug("nf_conntrack_in: normal packet for %p\n", ct);
+			pr_debug("normal packet for %p\n", ct);
 			*ctinfo = IP_CT_ESTABLISHED;
 		} else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
-			pr_debug("nf_conntrack_in: related packet for %p\n",
-				 ct);
+			pr_debug("related packet for %p\n", ct);
 			*ctinfo = IP_CT_RELATED;
 		} else {
-			pr_debug("nf_conntrack_in: new packet for %p\n", ct);
+			pr_debug("new packet for %p\n", ct);
 			*ctinfo = IP_CT_NEW;
 		}
 		*set_reply = 0;
@@ -1269,17 +1321,8 @@
 	}
 
 acct:
-	if (do_acct) {
-		struct nf_conn_acct *acct;
-
-		acct = nf_conn_acct_find(ct);
-		if (acct) {
-			struct nf_conn_counter *counter = acct->counter;
-
-			atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
-			atomic64_add(skb->len, &counter[CTINFO2DIR(ctinfo)].bytes);
-		}
-	}
+	if (do_acct)
+		nf_ct_acct_update(ct, ctinfo, skb->len);
 }
 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
 
@@ -1288,18 +1331,8 @@
 		       const struct sk_buff *skb,
 		       int do_acct)
 {
-	if (do_acct) {
-		struct nf_conn_acct *acct;
-
-		acct = nf_conn_acct_find(ct);
-		if (acct) {
-			struct nf_conn_counter *counter = acct->counter;
-
-			atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
-			atomic64_add(skb->len - skb_network_offset(skb),
-				     &counter[CTINFO2DIR(ctinfo)].bytes);
-		}
-	}
+	if (do_acct)
+		nf_ct_acct_update(ct, ctinfo, skb->len);
 
 	if (del_timer(&ct->timeout)) {
 		ct->timeout.function((unsigned long)ct);
@@ -1395,16 +1428,17 @@
 	int cpu;
 	spinlock_t *lockp;
 
-	for (; *bucket < net->ct.htable_size; (*bucket)++) {
+	for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
 		lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
 		local_bh_disable();
 		nf_conntrack_lock(lockp);
-		if (*bucket < net->ct.htable_size) {
-			hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
+		if (*bucket < nf_conntrack_htable_size) {
+			hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {
 				if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
 					continue;
 				ct = nf_ct_tuplehash_to_ctrack(h);
-				if (iter(ct, data))
+				if (net_eq(nf_ct_net(ct), net) &&
+				    iter(ct, data))
 					goto found;
 			}
 		}
@@ -1442,6 +1476,9 @@
 
 	might_sleep();
 
+	if (atomic_read(&net->ct.count) == 0)
+		return;
+
 	while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
 		/* Time to push up daises... */
 		if (del_timer(&ct->timeout))
@@ -1493,6 +1530,8 @@
 	while (untrack_refs() > 0)
 		schedule();
 
+	nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size);
+
 #ifdef CONFIG_NF_CONNTRACK_ZONES
 	nf_ct_extend_unregister(&nf_ct_zone_extend);
 #endif
@@ -1543,15 +1582,12 @@
 	}
 
 	list_for_each_entry(net, net_exit_list, exit_list) {
-		nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
 		nf_conntrack_proto_pernet_fini(net);
 		nf_conntrack_helper_pernet_fini(net);
 		nf_conntrack_ecache_pernet_fini(net);
 		nf_conntrack_tstamp_pernet_fini(net);
 		nf_conntrack_acct_pernet_fini(net);
 		nf_conntrack_expect_pernet_fini(net);
-		kmem_cache_destroy(net->ct.nf_conntrack_cachep);
-		kfree(net->ct.slabname);
 		free_percpu(net->ct.stat);
 		free_percpu(net->ct.pcpu_lists);
 	}
@@ -1606,7 +1642,7 @@
 
 	local_bh_disable();
 	nf_conntrack_all_lock();
-	write_seqcount_begin(&init_net.ct.generation);
+	write_seqcount_begin(&nf_conntrack_generation);
 
 	/* Lookups in the old hash might happen in parallel, which means we
 	 * might get false negatives during connection lookup. New connections
@@ -1614,26 +1650,28 @@
 	 * though since that required taking the locks.
 	 */
 
-	for (i = 0; i < init_net.ct.htable_size; i++) {
-		while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
-			h = hlist_nulls_entry(init_net.ct.hash[i].first,
-					struct nf_conntrack_tuple_hash, hnnode);
+	for (i = 0; i < nf_conntrack_htable_size; i++) {
+		while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {
+			h = hlist_nulls_entry(nf_conntrack_hash[i].first,
+					      struct nf_conntrack_tuple_hash, hnnode);
 			ct = nf_ct_tuplehash_to_ctrack(h);
 			hlist_nulls_del_rcu(&h->hnnode);
-			bucket = __hash_conntrack(&h->tuple, hashsize);
+			bucket = __hash_conntrack(nf_ct_net(ct),
+						  &h->tuple, hashsize);
 			hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
 		}
 	}
-	old_size = init_net.ct.htable_size;
-	old_hash = init_net.ct.hash;
+	old_size = nf_conntrack_htable_size;
+	old_hash = nf_conntrack_hash;
 
-	init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
-	init_net.ct.hash = hash;
+	nf_conntrack_hash = hash;
+	nf_conntrack_htable_size = hashsize;
 
-	write_seqcount_end(&init_net.ct.generation);
+	write_seqcount_end(&nf_conntrack_generation);
 	nf_conntrack_all_unlock();
 	local_bh_enable();
 
+	synchronize_net();
 	nf_ct_free_hashtable(old_hash, old_size);
 	return 0;
 }
@@ -1654,7 +1692,10 @@
 int nf_conntrack_init_start(void)
 {
 	int max_factor = 8;
-	int i, ret, cpu;
+	int ret = -ENOMEM;
+	int i, cpu;
+
+	seqcount_init(&nf_conntrack_generation);
 
 	for (i = 0; i < CONNTRACK_LOCKS; i++)
 		spin_lock_init(&nf_conntrack_locks[i]);
@@ -1681,8 +1722,19 @@
 		 * entries. */
 		max_factor = 4;
 	}
+
+	nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1);
+	if (!nf_conntrack_hash)
+		return -ENOMEM;
+
 	nf_conntrack_max = max_factor * nf_conntrack_htable_size;
 
+	nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
+						sizeof(struct nf_conn), 0,
+						SLAB_DESTROY_BY_RCU, NULL);
+	if (!nf_conntrack_cachep)
+		goto err_cachep;
+
 	printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
 	       NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
 	       nf_conntrack_max);
@@ -1759,6 +1811,9 @@
 err_acct:
 	nf_conntrack_expect_fini();
 err_expect:
+	kmem_cache_destroy(nf_conntrack_cachep);
+err_cachep:
+	nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size);
 	return ret;
 }
 
@@ -1782,7 +1837,6 @@
 	int cpu;
 
 	atomic_set(&net->ct.count, 0);
-	seqcount_init(&net->ct.generation);
 
 	net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
 	if (!net->ct.pcpu_lists)
@@ -1800,24 +1854,6 @@
 	if (!net->ct.stat)
 		goto err_pcpu_lists;
 
-	net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
-	if (!net->ct.slabname)
-		goto err_slabname;
-
-	net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
-							sizeof(struct nf_conn), 0,
-							SLAB_DESTROY_BY_RCU, NULL);
-	if (!net->ct.nf_conntrack_cachep) {
-		printk(KERN_ERR "Unable to create nf_conn slab cache\n");
-		goto err_cache;
-	}
-
-	net->ct.htable_size = nf_conntrack_htable_size;
-	net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1);
-	if (!net->ct.hash) {
-		printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
-		goto err_hash;
-	}
 	ret = nf_conntrack_expect_pernet_init(net);
 	if (ret < 0)
 		goto err_expect;
@@ -1849,12 +1885,6 @@
 err_acct:
 	nf_conntrack_expect_pernet_fini(net);
 err_expect:
-	nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
-err_hash:
-	kmem_cache_destroy(net->ct.nf_conntrack_cachep);
-err_cache:
-	kfree(net->ct.slabname);
-err_slabname:
 	free_percpu(net->ct.stat);
 err_pcpu_lists:
 	free_percpu(net->ct.pcpu_lists);
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 4e78c57..d28011b 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -113,6 +113,60 @@
 		schedule_delayed_work(&ctnet->ecache_dwork, delay);
 }
 
+int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct,
+				  u32 portid, int report)
+{
+	int ret = 0;
+	struct net *net = nf_ct_net(ct);
+	struct nf_ct_event_notifier *notify;
+	struct nf_conntrack_ecache *e;
+
+	rcu_read_lock();
+	notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
+	if (!notify)
+		goto out_unlock;
+
+	e = nf_ct_ecache_find(ct);
+	if (!e)
+		goto out_unlock;
+
+	if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) {
+		struct nf_ct_event item = {
+			.ct	= ct,
+			.portid	= e->portid ? e->portid : portid,
+			.report = report
+		};
+		/* This is a resent of a destroy event? If so, skip missed */
+		unsigned long missed = e->portid ? 0 : e->missed;
+
+		if (!((eventmask | missed) & e->ctmask))
+			goto out_unlock;
+
+		ret = notify->fcn(eventmask | missed, &item);
+		if (unlikely(ret < 0 || missed)) {
+			spin_lock_bh(&ct->lock);
+			if (ret < 0) {
+				/* This is a destroy event that has been
+				 * triggered by a process, we store the PORTID
+				 * to include it in the retransmission.
+				 */
+				if (eventmask & (1 << IPCT_DESTROY) &&
+				    e->portid == 0 && portid != 0)
+					e->portid = portid;
+				else
+					e->missed |= eventmask;
+			} else {
+				e->missed &= ~missed;
+			}
+			spin_unlock_bh(&ct->lock);
+		}
+	}
+out_unlock:
+	rcu_read_unlock();
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_eventmask_report);
+
 /* deliver cached events and clear cache entry - must be called with locally
  * disabled softirqs */
 void nf_ct_deliver_cached_events(struct nf_conn *ct)
@@ -167,6 +221,36 @@
 }
 EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
 
+void nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
+			       struct nf_conntrack_expect *exp,
+			       u32 portid, int report)
+
+{
+	struct net *net = nf_ct_exp_net(exp);
+	struct nf_exp_event_notifier *notify;
+	struct nf_conntrack_ecache *e;
+
+	rcu_read_lock();
+	notify = rcu_dereference(net->ct.nf_expect_event_cb);
+	if (!notify)
+		goto out_unlock;
+
+	e = nf_ct_ecache_find(exp->master);
+	if (!e)
+		goto out_unlock;
+
+	if (e->expmask & (1 << event)) {
+		struct nf_exp_event item = {
+			.exp	= exp,
+			.portid	= portid,
+			.report = report
+		};
+		notify->fcn(1 << event, &item);
+	}
+out_unlock:
+	rcu_read_unlock();
+}
+
 int nf_conntrack_register_notifier(struct net *net,
 				   struct nf_ct_event_notifier *new)
 {
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 278927a..9e36931 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -24,6 +24,7 @@
 #include <linux/moduleparam.h>
 #include <linux/export.h>
 #include <net/net_namespace.h>
+#include <net/netns/hash.h>
 
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_core.h>
@@ -35,9 +36,13 @@
 unsigned int nf_ct_expect_hsize __read_mostly;
 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
 
+struct hlist_head *nf_ct_expect_hash __read_mostly;
+EXPORT_SYMBOL_GPL(nf_ct_expect_hash);
+
 unsigned int nf_ct_expect_max __read_mostly;
 
 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
+static unsigned int nf_ct_expect_hashrnd __read_mostly;
 
 /* nf_conntrack_expect helper functions */
 void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
@@ -72,21 +77,32 @@
 	nf_ct_expect_put(exp);
 }
 
-static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
+static unsigned int nf_ct_expect_dst_hash(const struct net *n, const struct nf_conntrack_tuple *tuple)
 {
-	unsigned int hash;
+	unsigned int hash, seed;
 
-	if (unlikely(!nf_conntrack_hash_rnd)) {
-		init_nf_conntrack_hash_rnd();
-	}
+	get_random_once(&nf_ct_expect_hashrnd, sizeof(nf_ct_expect_hashrnd));
+
+	seed = nf_ct_expect_hashrnd ^ net_hash_mix(n);
 
 	hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
 		      (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
-		       (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd);
+		       (__force __u16)tuple->dst.u.all) ^ seed);
 
 	return reciprocal_scale(hash, nf_ct_expect_hsize);
 }
 
+static bool
+nf_ct_exp_equal(const struct nf_conntrack_tuple *tuple,
+		const struct nf_conntrack_expect *i,
+		const struct nf_conntrack_zone *zone,
+		const struct net *net)
+{
+	return nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
+	       net_eq(net, nf_ct_net(i->master)) &&
+	       nf_ct_zone_equal_any(i->master, zone);
+}
+
 struct nf_conntrack_expect *
 __nf_ct_expect_find(struct net *net,
 		    const struct nf_conntrack_zone *zone,
@@ -98,10 +114,9 @@
 	if (!net->ct.expect_count)
 		return NULL;
 
-	h = nf_ct_expect_dst_hash(tuple);
-	hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) {
-		if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
-		    nf_ct_zone_equal_any(i->master, zone))
+	h = nf_ct_expect_dst_hash(net, tuple);
+	hlist_for_each_entry_rcu(i, &nf_ct_expect_hash[h], hnode) {
+		if (nf_ct_exp_equal(tuple, i, zone, net))
 			return i;
 	}
 	return NULL;
@@ -139,11 +154,10 @@
 	if (!net->ct.expect_count)
 		return NULL;
 
-	h = nf_ct_expect_dst_hash(tuple);
-	hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) {
+	h = nf_ct_expect_dst_hash(net, tuple);
+	hlist_for_each_entry(i, &nf_ct_expect_hash[h], hnode) {
 		if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
-		    nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
-		    nf_ct_zone_equal_any(i->master, zone)) {
+		    nf_ct_exp_equal(tuple, i, zone, net)) {
 			exp = i;
 			break;
 		}
@@ -223,6 +237,7 @@
 	}
 
 	return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
+	       net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
 	       nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
 }
 
@@ -232,6 +247,7 @@
 	return a->master == b->master && a->class == b->class &&
 	       nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
 	       nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
+	       net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
 	       nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
 }
 
@@ -342,7 +358,7 @@
 	struct nf_conn_help *master_help = nfct_help(exp->master);
 	struct nf_conntrack_helper *helper;
 	struct net *net = nf_ct_exp_net(exp);
-	unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
+	unsigned int h = nf_ct_expect_dst_hash(net, &exp->tuple);
 
 	/* two references : one for hash insert, one for the timer */
 	atomic_add(2, &exp->use);
@@ -350,7 +366,7 @@
 	hlist_add_head(&exp->lnode, &master_help->expectations);
 	master_help->expecting[exp->class]++;
 
-	hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
+	hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]);
 	net->ct.expect_count++;
 
 	setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
@@ -401,8 +417,8 @@
 		ret = -ESHUTDOWN;
 		goto out;
 	}
-	h = nf_ct_expect_dst_hash(&expect->tuple);
-	hlist_for_each_entry_safe(i, next, &net->ct.expect_hash[h], hnode) {
+	h = nf_ct_expect_dst_hash(net, &expect->tuple);
+	hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
 		if (expect_matches(i, expect)) {
 			if (del_timer(&i->timeout)) {
 				nf_ct_unlink_expect(i);
@@ -468,12 +484,11 @@
 
 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
 {
-	struct net *net = seq_file_net(seq);
 	struct ct_expect_iter_state *st = seq->private;
 	struct hlist_node *n;
 
 	for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
-		n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
+		n = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
 		if (n)
 			return n;
 	}
@@ -483,14 +498,13 @@
 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
 					     struct hlist_node *head)
 {
-	struct net *net = seq_file_net(seq);
 	struct ct_expect_iter_state *st = seq->private;
 
 	head = rcu_dereference(hlist_next_rcu(head));
 	while (head == NULL) {
 		if (++st->bucket >= nf_ct_expect_hsize)
 			return NULL;
-		head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
+		head = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
 	}
 	return head;
 }
@@ -623,28 +637,13 @@
 
 int nf_conntrack_expect_pernet_init(struct net *net)
 {
-	int err = -ENOMEM;
-
 	net->ct.expect_count = 0;
-	net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
-	if (net->ct.expect_hash == NULL)
-		goto err1;
-
-	err = exp_proc_init(net);
-	if (err < 0)
-		goto err2;
-
-	return 0;
-err2:
-	nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
-err1:
-	return err;
+	return exp_proc_init(net);
 }
 
 void nf_conntrack_expect_pernet_fini(struct net *net)
 {
 	exp_proc_remove(net);
-	nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
 }
 
 int nf_conntrack_expect_init(void)
@@ -660,6 +659,13 @@
 				0, 0, NULL);
 	if (!nf_ct_expect_cachep)
 		return -ENOMEM;
+
+	nf_ct_expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
+	if (!nf_ct_expect_hash) {
+		kmem_cache_destroy(nf_ct_expect_cachep);
+		return -ENOMEM;
+	}
+
 	return 0;
 }
 
@@ -667,4 +673,5 @@
 {
 	rcu_barrier(); /* Wait for call_rcu() before destroy */
 	kmem_cache_destroy(nf_ct_expect_cachep);
+	nf_ct_free_hashtable(nf_ct_expect_hash, nf_ct_expect_hsize);
 }
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 3b40ec5..f703adb 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -38,10 +38,10 @@
 EXPORT_SYMBOL_GPL(nf_ct_helper_hsize);
 static unsigned int nf_ct_helper_count __read_mostly;
 
-static bool nf_ct_auto_assign_helper __read_mostly = true;
+static bool nf_ct_auto_assign_helper __read_mostly = false;
 module_param_named(nf_conntrack_helper, nf_ct_auto_assign_helper, bool, 0644);
 MODULE_PARM_DESC(nf_conntrack_helper,
-		 "Enable automatic conntrack helper assignment (default 1)");
+		 "Enable automatic conntrack helper assignment (default 0)");
 
 #ifdef CONFIG_SYSCTL
 static struct ctl_table helper_sysctl_table[] = {
@@ -400,7 +400,7 @@
 	spin_lock_bh(&nf_conntrack_expect_lock);
 	for (i = 0; i < nf_ct_expect_hsize; i++) {
 		hlist_for_each_entry_safe(exp, next,
-					  &net->ct.expect_hash[i], hnode) {
+					  &nf_ct_expect_hash[i], hnode) {
 			struct nf_conn_help *help = nfct_help(exp->master);
 			if ((rcu_dereference_protected(
 					help->helper,
@@ -424,10 +424,10 @@
 		spin_unlock_bh(&pcpu->lock);
 	}
 	local_bh_disable();
-	for (i = 0; i < net->ct.htable_size; i++) {
+	for (i = 0; i < nf_conntrack_htable_size; i++) {
 		nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
-		if (i < net->ct.htable_size) {
-			hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
+		if (i < nf_conntrack_htable_size) {
+			hlist_nulls_for_each_entry(h, nn, &nf_conntrack_hash[i], hnnode)
 				unhelp(h, me);
 		}
 		spin_unlock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
diff --git a/net/netfilter/nf_conntrack_labels.c b/net/netfilter/nf_conntrack_labels.c
index 3ce5c31..252e6a7 100644
--- a/net/netfilter/nf_conntrack_labels.c
+++ b/net/netfilter/nf_conntrack_labels.c
@@ -16,28 +16,11 @@
 
 static spinlock_t nf_connlabels_lock;
 
-static unsigned int label_bits(const struct nf_conn_labels *l)
-{
-	unsigned int longs = l->words;
-	return longs * BITS_PER_LONG;
-}
-
-bool nf_connlabel_match(const struct nf_conn *ct, u16 bit)
-{
-	struct nf_conn_labels *labels = nf_ct_labels_find(ct);
-
-	if (!labels)
-		return false;
-
-	return bit < label_bits(labels) && test_bit(bit, labels->bits);
-}
-EXPORT_SYMBOL_GPL(nf_connlabel_match);
-
 int nf_connlabel_set(struct nf_conn *ct, u16 bit)
 {
 	struct nf_conn_labels *labels = nf_ct_labels_find(ct);
 
-	if (!labels || bit >= label_bits(labels))
+	if (!labels || BIT_WORD(bit) >= labels->words)
 		return -ENOSPC;
 
 	if (test_bit(bit, labels->bits))
@@ -50,14 +33,18 @@
 }
 EXPORT_SYMBOL_GPL(nf_connlabel_set);
 
-static void replace_u32(u32 *address, u32 mask, u32 new)
+static int replace_u32(u32 *address, u32 mask, u32 new)
 {
 	u32 old, tmp;
 
 	do {
 		old = *address;
 		tmp = (old & mask) ^ new;
+		if (old == tmp)
+			return 0;
 	} while (cmpxchg(address, old, tmp) != old);
+
+	return 1;
 }
 
 int nf_connlabels_replace(struct nf_conn *ct,
@@ -66,6 +53,7 @@
 {
 	struct nf_conn_labels *labels;
 	unsigned int size, i;
+	int changed = 0;
 	u32 *dst;
 
 	labels = nf_ct_labels_find(ct);
@@ -77,29 +65,27 @@
 		words32 = size / sizeof(u32);
 
 	dst = (u32 *) labels->bits;
-	if (words32) {
-		for (i = 0; i < words32; i++)
-			replace_u32(&dst[i], mask ? ~mask[i] : 0, data[i]);
-	}
+	for (i = 0; i < words32; i++)
+		changed |= replace_u32(&dst[i], mask ? ~mask[i] : 0, data[i]);
 
 	size /= sizeof(u32);
 	for (i = words32; i < size; i++) /* pad */
 		replace_u32(&dst[i], 0, 0);
 
-	nf_conntrack_event_cache(IPCT_LABEL, ct);
+	if (changed)
+		nf_conntrack_event_cache(IPCT_LABEL, ct);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(nf_connlabels_replace);
 
-int nf_connlabels_get(struct net *net, unsigned int n_bits)
+int nf_connlabels_get(struct net *net, unsigned int bits)
 {
 	size_t words;
 
-	if (n_bits > (NF_CT_LABELS_MAX_SIZE * BITS_PER_BYTE))
+	words = BIT_WORD(bits) + 1;
+	if (words > NF_CT_LABELS_MAX_SIZE / sizeof(long))
 		return -ERANGE;
 
-	words = BITS_TO_LONGS(n_bits);
-
 	spin_lock(&nf_connlabels_lock);
 	net->ct.labels_used++;
 	if (words > net->ct.label_words)
@@ -128,6 +114,8 @@
 
 int nf_conntrack_labels_init(void)
 {
+	BUILD_BUG_ON(NF_CT_LABELS_MAX_SIZE / sizeof(long) >= U8_MAX);
+
 	spin_lock_init(&nf_connlabels_lock);
 	return nf_ct_extend_register(&labels_extend);
 }
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 355e855..a18d1ce 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -58,10 +58,9 @@
 
 static char __initdata version[] = "0.93";
 
-static inline int
-ctnetlink_dump_tuples_proto(struct sk_buff *skb,
-			    const struct nf_conntrack_tuple *tuple,
-			    struct nf_conntrack_l4proto *l4proto)
+static int ctnetlink_dump_tuples_proto(struct sk_buff *skb,
+				       const struct nf_conntrack_tuple *tuple,
+				       struct nf_conntrack_l4proto *l4proto)
 {
 	int ret = 0;
 	struct nlattr *nest_parms;
@@ -83,10 +82,9 @@
 	return -1;
 }
 
-static inline int
-ctnetlink_dump_tuples_ip(struct sk_buff *skb,
-			 const struct nf_conntrack_tuple *tuple,
-			 struct nf_conntrack_l3proto *l3proto)
+static int ctnetlink_dump_tuples_ip(struct sk_buff *skb,
+				    const struct nf_conntrack_tuple *tuple,
+				    struct nf_conntrack_l3proto *l3proto)
 {
 	int ret = 0;
 	struct nlattr *nest_parms;
@@ -106,9 +104,8 @@
 	return -1;
 }
 
-static int
-ctnetlink_dump_tuples(struct sk_buff *skb,
-		      const struct nf_conntrack_tuple *tuple)
+static int ctnetlink_dump_tuples(struct sk_buff *skb,
+				 const struct nf_conntrack_tuple *tuple)
 {
 	int ret;
 	struct nf_conntrack_l3proto *l3proto;
@@ -127,9 +124,8 @@
 	return ret;
 }
 
-static inline int
-ctnetlink_dump_zone_id(struct sk_buff *skb, int attrtype,
-		       const struct nf_conntrack_zone *zone, int dir)
+static int ctnetlink_dump_zone_id(struct sk_buff *skb, int attrtype,
+				  const struct nf_conntrack_zone *zone, int dir)
 {
 	if (zone->id == NF_CT_DEFAULT_ZONE_ID || zone->dir != dir)
 		return 0;
@@ -141,8 +137,7 @@
 	return -1;
 }
 
-static inline int
-ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
+static int ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
 {
 	if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
 		goto nla_put_failure;
@@ -152,8 +147,7 @@
 	return -1;
 }
 
-static inline int
-ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
+static int ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
 {
 	long timeout = ((long)ct->timeout.expires - (long)jiffies) / HZ;
 
@@ -168,8 +162,7 @@
 	return -1;
 }
 
-static inline int
-ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct)
+static int ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct)
 {
 	struct nf_conntrack_l4proto *l4proto;
 	struct nlattr *nest_proto;
@@ -193,8 +186,8 @@
 	return -1;
 }
 
-static inline int
-ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct)
+static int ctnetlink_dump_helpinfo(struct sk_buff *skb,
+				   const struct nf_conn *ct)
 {
 	struct nlattr *nest_helper;
 	const struct nf_conn_help *help = nfct_help(ct);
@@ -245,8 +238,10 @@
 	if (!nest_count)
 		goto nla_put_failure;
 
-	if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts)) ||
-	    nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes)))
+	if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts),
+			 CTA_COUNTERS_PAD) ||
+	    nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes),
+			 CTA_COUNTERS_PAD))
 		goto nla_put_failure;
 
 	nla_nest_end(skb, nest_count);
@@ -287,9 +282,11 @@
 	if (!nest_count)
 		goto nla_put_failure;
 
-	if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start)) ||
+	if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start),
+			 CTA_TIMESTAMP_PAD) ||
 	    (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP,
-					       cpu_to_be64(tstamp->stop))))
+					       cpu_to_be64(tstamp->stop),
+					       CTA_TIMESTAMP_PAD)))
 		goto nla_put_failure;
 	nla_nest_end(skb, nest_count);
 
@@ -300,8 +297,7 @@
 }
 
 #ifdef CONFIG_NF_CONNTRACK_MARK
-static inline int
-ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
+static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
 {
 	if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark)))
 		goto nla_put_failure;
@@ -315,8 +311,7 @@
 #endif
 
 #ifdef CONFIG_NF_CONNTRACK_SECMARK
-static inline int
-ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
+static int ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
 {
 	struct nlattr *nest_secctx;
 	int len, ret;
@@ -345,7 +340,7 @@
 #endif
 
 #ifdef CONFIG_NF_CONNTRACK_LABELS
-static int ctnetlink_label_size(const struct nf_conn *ct)
+static inline int ctnetlink_label_size(const struct nf_conn *ct)
 {
 	struct nf_conn_labels *labels = nf_ct_labels_find(ct);
 
@@ -380,8 +375,7 @@
 
 #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
 
-static inline int
-ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct)
+static int ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct)
 {
 	struct nlattr *nest_parms;
 
@@ -426,8 +420,8 @@
 	return -1;
 }
 
-static inline int
-ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, const struct nf_conn *ct)
+static int ctnetlink_dump_ct_seq_adj(struct sk_buff *skb,
+				     const struct nf_conn *ct)
 {
 	struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
 	struct nf_ct_seqadj *seq;
@@ -446,8 +440,7 @@
 	return 0;
 }
 
-static inline int
-ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
+static int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
 {
 	if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct)))
 		goto nla_put_failure;
@@ -457,8 +450,7 @@
 	return -1;
 }
 
-static inline int
-ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
+static int ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
 {
 	if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))))
 		goto nla_put_failure;
@@ -538,8 +530,7 @@
 	return -1;
 }
 
-static inline size_t
-ctnetlink_proto_size(const struct nf_conn *ct)
+static inline size_t ctnetlink_proto_size(const struct nf_conn *ct)
 {
 	struct nf_conntrack_l3proto *l3proto;
 	struct nf_conntrack_l4proto *l4proto;
@@ -556,19 +547,17 @@
 	return len;
 }
 
-static inline size_t
-ctnetlink_acct_size(const struct nf_conn *ct)
+static inline size_t ctnetlink_acct_size(const struct nf_conn *ct)
 {
 	if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT))
 		return 0;
 	return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
-	       + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
-	       + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
+	       + 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
+	       + 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
 	       ;
 }
 
-static inline int
-ctnetlink_secctx_size(const struct nf_conn *ct)
+static inline int ctnetlink_secctx_size(const struct nf_conn *ct)
 {
 #ifdef CONFIG_NF_CONNTRACK_SECMARK
 	int len, ret;
@@ -584,20 +573,19 @@
 #endif
 }
 
-static inline size_t
-ctnetlink_timestamp_size(const struct nf_conn *ct)
+static inline size_t ctnetlink_timestamp_size(const struct nf_conn *ct)
 {
 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
 	if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP))
 		return 0;
-	return nla_total_size(0) + 2 * nla_total_size(sizeof(uint64_t));
+	return nla_total_size(0) + 2 * nla_total_size_64bit(sizeof(uint64_t));
 #else
 	return 0;
 #endif
 }
 
-static inline size_t
-ctnetlink_nlmsg_size(const struct nf_conn *ct)
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+static size_t ctnetlink_nlmsg_size(const struct nf_conn *ct)
 {
 	return NLMSG_ALIGN(sizeof(struct nfgenmsg))
 	       + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
@@ -628,7 +616,6 @@
 	       ;
 }
 
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
 static int
 ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
 {
@@ -837,19 +824,22 @@
 	last = (struct nf_conn *)cb->args[1];
 
 	local_bh_disable();
-	for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
+	for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
 restart:
 		lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS];
 		nf_conntrack_lock(lockp);
-		if (cb->args[0] >= net->ct.htable_size) {
+		if (cb->args[0] >= nf_conntrack_htable_size) {
 			spin_unlock(lockp);
 			goto out;
 		}
-		hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],
-					 hnnode) {
+		hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]],
+					   hnnode) {
 			if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
 				continue;
 			ct = nf_ct_tuplehash_to_ctrack(h);
+			if (!net_eq(net, nf_ct_net(ct)))
+				continue;
+
 			/* Dump entries of a given L3 protocol number.
 			 * If it is not specified, ie. l3proto == 0,
 			 * then dump everything. */
@@ -891,8 +881,8 @@
 	return skb->len;
 }
 
-static inline int
-ctnetlink_parse_tuple_ip(struct nlattr *attr, struct nf_conntrack_tuple *tuple)
+static int ctnetlink_parse_tuple_ip(struct nlattr *attr,
+				    struct nf_conntrack_tuple *tuple)
 {
 	struct nlattr *tb[CTA_IP_MAX+1];
 	struct nf_conntrack_l3proto *l3proto;
@@ -921,9 +911,8 @@
 	[CTA_PROTO_NUM]	= { .type = NLA_U8 },
 };
 
-static inline int
-ctnetlink_parse_tuple_proto(struct nlattr *attr,
-			    struct nf_conntrack_tuple *tuple)
+static int ctnetlink_parse_tuple_proto(struct nlattr *attr,
+				       struct nf_conntrack_tuple *tuple)
 {
 	struct nlattr *tb[CTA_PROTO_MAX+1];
 	struct nf_conntrack_l4proto *l4proto;
@@ -1050,9 +1039,8 @@
 				    .len = NF_CT_HELPER_NAME_LEN - 1 },
 };
 
-static inline int
-ctnetlink_parse_help(const struct nlattr *attr, char **helper_name,
-		     struct nlattr **helpinfo)
+static int ctnetlink_parse_help(const struct nlattr *attr, char **helper_name,
+				struct nlattr **helpinfo)
 {
 	int err;
 	struct nlattr *tb[CTA_HELP_MAX+1];
@@ -1463,8 +1451,8 @@
 #endif
 }
 
-static inline int
-ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
+static int ctnetlink_change_helper(struct nf_conn *ct,
+				   const struct nlattr * const cda[])
 {
 	struct nf_conntrack_helper *helper;
 	struct nf_conn_help *help = nfct_help(ct);
@@ -1524,8 +1512,8 @@
 	return -EOPNOTSUPP;
 }
 
-static inline int
-ctnetlink_change_timeout(struct nf_conn *ct, const struct nlattr * const cda[])
+static int ctnetlink_change_timeout(struct nf_conn *ct,
+				    const struct nlattr * const cda[])
 {
 	u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
 
@@ -1544,8 +1532,8 @@
 	[CTA_PROTOINFO_SCTP]	= { .type = NLA_NESTED },
 };
 
-static inline int
-ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[])
+static int ctnetlink_change_protoinfo(struct nf_conn *ct,
+				      const struct nlattr * const cda[])
 {
 	const struct nlattr *attr = cda[CTA_PROTOINFO];
 	struct nlattr *tb[CTA_PROTOINFO_MAX+1];
@@ -1571,8 +1559,8 @@
 	[CTA_SEQADJ_OFFSET_AFTER]	= { .type = NLA_U32 },
 };
 
-static inline int
-change_seq_adj(struct nf_ct_seqadj *seq, const struct nlattr * const attr)
+static int change_seq_adj(struct nf_ct_seqadj *seq,
+			  const struct nlattr * const attr)
 {
 	int err;
 	struct nlattr *cda[CTA_SEQADJ_MAX+1];
@@ -2405,10 +2393,9 @@
  * EXPECT
  ***********************************************************************/
 
-static inline int
-ctnetlink_exp_dump_tuple(struct sk_buff *skb,
-			 const struct nf_conntrack_tuple *tuple,
-			 enum ctattr_expect type)
+static int ctnetlink_exp_dump_tuple(struct sk_buff *skb,
+				    const struct nf_conntrack_tuple *tuple,
+				    enum ctattr_expect type)
 {
 	struct nlattr *nest_parms;
 
@@ -2425,10 +2412,9 @@
 	return -1;
 }
 
-static inline int
-ctnetlink_exp_dump_mask(struct sk_buff *skb,
-			const struct nf_conntrack_tuple *tuple,
-			const struct nf_conntrack_tuple_mask *mask)
+static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
+				   const struct nf_conntrack_tuple *tuple,
+				   const struct nf_conntrack_tuple_mask *mask)
 {
 	int ret;
 	struct nf_conntrack_l3proto *l3proto;
@@ -2646,10 +2632,14 @@
 	last = (struct nf_conntrack_expect *)cb->args[1];
 	for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
 restart:
-		hlist_for_each_entry(exp, &net->ct.expect_hash[cb->args[0]],
+		hlist_for_each_entry(exp, &nf_ct_expect_hash[cb->args[0]],
 				     hnode) {
 			if (l3proto && exp->tuple.src.l3num != l3proto)
 				continue;
+
+			if (!net_eq(nf_ct_net(exp->master), net))
+				continue;
+
 			if (cb->args[1]) {
 				if (exp != last)
 					continue;
@@ -2900,8 +2890,12 @@
 		spin_lock_bh(&nf_conntrack_expect_lock);
 		for (i = 0; i < nf_ct_expect_hsize; i++) {
 			hlist_for_each_entry_safe(exp, next,
-						  &net->ct.expect_hash[i],
+						  &nf_ct_expect_hash[i],
 						  hnode) {
+
+				if (!net_eq(nf_ct_exp_net(exp), net))
+					continue;
+
 				m_help = nfct_help(exp->master);
 				if (!strcmp(m_help->helper->name, name) &&
 				    del_timer(&exp->timeout)) {
@@ -2918,8 +2912,12 @@
 		spin_lock_bh(&nf_conntrack_expect_lock);
 		for (i = 0; i < nf_ct_expect_hsize; i++) {
 			hlist_for_each_entry_safe(exp, next,
-						  &net->ct.expect_hash[i],
+						  &nf_ct_expect_hash[i],
 						  hnode) {
+
+				if (!net_eq(nf_ct_exp_net(exp), net))
+					continue;
+
 				if (del_timer(&exp->timeout)) {
 					nf_ct_unlink_expect_report(exp,
 							NETLINK_CB(skb).portid,
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index fce1b1c..399a38f 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -645,7 +645,8 @@
 	    nla_put_u8(skb, CTA_PROTOINFO_DCCP_ROLE,
 		       ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]) ||
 	    nla_put_be64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ,
-			 cpu_to_be64(ct->proto.dccp.handshake_seq)))
+			 cpu_to_be64(ct->proto.dccp.handshake_seq),
+			 CTA_PROTOINFO_DCCP_PAD))
 		goto nla_put_failure;
 	nla_nest_end(skb, nest_parms);
 	spin_unlock_bh(&ct->lock);
@@ -660,6 +661,7 @@
 	[CTA_PROTOINFO_DCCP_STATE]	= { .type = NLA_U8 },
 	[CTA_PROTOINFO_DCCP_ROLE]	= { .type = NLA_U8 },
 	[CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ] = { .type = NLA_U64 },
+	[CTA_PROTOINFO_DCCP_PAD]	= { .type = NLA_UNSPEC },
 };
 
 static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 9578a7c..1d7ab96 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -191,13 +191,7 @@
 /* Print out the private part of the conntrack. */
 static void sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
 {
-	enum sctp_conntrack state;
-
-	spin_lock_bh(&ct->lock);
-	state = ct->proto.sctp.state;
-	spin_unlock_bh(&ct->lock);
-
-	seq_printf(s, "%s ", sctp_conntrack_names[state]);
+	seq_printf(s, "%s ", sctp_conntrack_names[ct->proto.sctp.state]);
 }
 
 #define for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count)	\
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 278f3b9..70c8381 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -313,13 +313,7 @@
 /* Print out the private part of the conntrack. */
 static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
 {
-	enum tcp_conntrack state;
-
-	spin_lock_bh(&ct->lock);
-	state = ct->proto.tcp.state;
-	spin_unlock_bh(&ct->lock);
-
-	seq_printf(s, "%s ", tcp_conntrack_names[state]);
+	seq_printf(s, "%s ", tcp_conntrack_names[ct->proto.tcp.state]);
 }
 
 static unsigned int get_conntrack_index(const struct tcphdr *tcph)
@@ -410,6 +404,8 @@
 			length--;
 			continue;
 		default:
+			if (length < 2)
+				return;
 			opsize=*ptr++;
 			if (opsize < 2) /* "silly options" */
 				return;
@@ -470,6 +466,8 @@
 			length--;
 			continue;
 		default:
+			if (length < 2)
+				return;
 			opsize = *ptr++;
 			if (opsize < 2) /* "silly options" */
 				return;
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 478f92f..4fd0405 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -309,6 +309,7 @@
 	.l3proto		= PF_INET,
 	.l4proto		= IPPROTO_UDP,
 	.name			= "udp",
+	.allow_clash		= true,
 	.pkt_to_tuple		= udp_pkt_to_tuple,
 	.invert_tuple		= udp_invert_tuple,
 	.print_tuple		= udp_print_tuple,
@@ -341,6 +342,7 @@
 	.l3proto		= PF_INET6,
 	.l4proto		= IPPROTO_UDP,
 	.name			= "udp",
+	.allow_clash		= true,
 	.pkt_to_tuple		= udp_pkt_to_tuple,
 	.invert_tuple		= udp_invert_tuple,
 	.print_tuple		= udp_print_tuple,
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index 1ac8ee1..9d692f5a 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -274,6 +274,7 @@
 	.l3proto		= PF_INET,
 	.l4proto		= IPPROTO_UDPLITE,
 	.name			= "udplite",
+	.allow_clash		= true,
 	.pkt_to_tuple		= udplite_pkt_to_tuple,
 	.invert_tuple		= udplite_invert_tuple,
 	.print_tuple		= udplite_print_tuple,
@@ -306,6 +307,7 @@
 	.l3proto		= PF_INET6,
 	.l4proto		= IPPROTO_UDPLITE,
 	.name			= "udplite",
+	.allow_clash		= true,
 	.pkt_to_tuple		= udplite_pkt_to_tuple,
 	.invert_tuple		= udplite_invert_tuple,
 	.print_tuple		= udplite_print_tuple,
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 0f1a45b..f87e84e 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -54,14 +54,13 @@
 
 static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
 {
-	struct net *net = seq_file_net(seq);
 	struct ct_iter_state *st = seq->private;
 	struct hlist_nulls_node *n;
 
 	for (st->bucket = 0;
-	     st->bucket < net->ct.htable_size;
+	     st->bucket < nf_conntrack_htable_size;
 	     st->bucket++) {
-		n = rcu_dereference(hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
+		n = rcu_dereference(hlist_nulls_first_rcu(&nf_conntrack_hash[st->bucket]));
 		if (!is_a_nulls(n))
 			return n;
 	}
@@ -71,18 +70,17 @@
 static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
 				      struct hlist_nulls_node *head)
 {
-	struct net *net = seq_file_net(seq);
 	struct ct_iter_state *st = seq->private;
 
 	head = rcu_dereference(hlist_nulls_next_rcu(head));
 	while (is_a_nulls(head)) {
 		if (likely(get_nulls_value(head) == st->bucket)) {
-			if (++st->bucket >= net->ct.htable_size)
+			if (++st->bucket >= nf_conntrack_htable_size)
 				return NULL;
 		}
 		head = rcu_dereference(
 				hlist_nulls_first_rcu(
-					&net->ct.hash[st->bucket]));
+					&nf_conntrack_hash[st->bucket]));
 	}
 	return head;
 }
@@ -458,7 +456,7 @@
 	},
 	{
 		.procname       = "nf_conntrack_buckets",
-		.data           = &init_net.ct.htable_size,
+		.data           = &nf_conntrack_htable_size,
 		.maxlen         = sizeof(unsigned int),
 		.mode           = 0444,
 		.proc_handler   = proc_dointvec,
@@ -512,7 +510,6 @@
 		goto out_kmemdup;
 
 	table[1].data = &net->ct.count;
-	table[2].data = &net->ct.htable_size;
 	table[3].data = &net->ct.sysctl_checksum;
 	table[4].data = &net->ct.sysctl_log_invalid;
 
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 06a9f45..6877a39 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -38,6 +38,9 @@
 static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO]
 						__read_mostly;
 
+static struct hlist_head *nf_nat_bysource __read_mostly;
+static unsigned int nf_nat_htable_size __read_mostly;
+static unsigned int nf_nat_hash_rnd __read_mostly;
 
 inline const struct nf_nat_l3proto *
 __nf_nat_l3proto_find(u8 family)
@@ -118,15 +121,17 @@
 
 /* We keep an extra hash for each conntrack, for fast searching. */
 static inline unsigned int
-hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple)
+hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
 {
 	unsigned int hash;
 
+	get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));
+
 	/* Original src, to ensure we map it consistently if poss. */
 	hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
-		      tuple->dst.protonum ^ nf_conntrack_hash_rnd);
+		      tuple->dst.protonum ^ nf_nat_hash_rnd ^ net_hash_mix(n));
 
-	return reciprocal_scale(hash, net->ct.nat_htable_size);
+	return reciprocal_scale(hash, nf_nat_htable_size);
 }
 
 /* Is this tuple already taken? (not by us) */
@@ -196,9 +201,10 @@
 	const struct nf_conn_nat *nat;
 	const struct nf_conn *ct;
 
-	hlist_for_each_entry_rcu(nat, &net->ct.nat_bysource[h], bysource) {
+	hlist_for_each_entry_rcu(nat, &nf_nat_bysource[h], bysource) {
 		ct = nat->ct;
 		if (same_src(ct, tuple) &&
+		    net_eq(net, nf_ct_net(ct)) &&
 		    nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
 			/* Copy source part from reply tuple. */
 			nf_ct_invert_tuplepr(result,
@@ -431,7 +437,7 @@
 		nat = nfct_nat(ct);
 		nat->ct = ct;
 		hlist_add_head_rcu(&nat->bysource,
-				   &net->ct.nat_bysource[srchash]);
+				   &nf_nat_bysource[srchash]);
 		spin_unlock_bh(&nf_nat_lock);
 	}
 
@@ -819,27 +825,14 @@
 }
 #endif
 
-static int __net_init nf_nat_net_init(struct net *net)
-{
-	/* Leave them the same for the moment. */
-	net->ct.nat_htable_size = net->ct.htable_size;
-	net->ct.nat_bysource = nf_ct_alloc_hashtable(&net->ct.nat_htable_size, 0);
-	if (!net->ct.nat_bysource)
-		return -ENOMEM;
-	return 0;
-}
-
 static void __net_exit nf_nat_net_exit(struct net *net)
 {
 	struct nf_nat_proto_clean clean = {};
 
 	nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean, 0, 0);
-	synchronize_rcu();
-	nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
 }
 
 static struct pernet_operations nf_nat_net_ops = {
-	.init = nf_nat_net_init,
 	.exit = nf_nat_net_exit,
 };
 
@@ -852,8 +845,16 @@
 {
 	int ret;
 
+	/* Leave them the same for the moment. */
+	nf_nat_htable_size = nf_conntrack_htable_size;
+
+	nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0);
+	if (!nf_nat_bysource)
+		return -ENOMEM;
+
 	ret = nf_ct_extend_register(&nat_extend);
 	if (ret < 0) {
+		nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
 		printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
 		return ret;
 	}
@@ -877,6 +878,7 @@
 	return 0;
 
  cleanup_extend:
+	nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
 	nf_ct_extend_unregister(&nat_extend);
 	return ret;
 }
@@ -895,6 +897,7 @@
 	for (i = 0; i < NFPROTO_NUMPROTO; i++)
 		kfree(nf_nat_l4protos[i]);
 	synchronize_net();
+	nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 2011977..4d292b9 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -944,8 +944,10 @@
 	if (nest == NULL)
 		goto nla_put_failure;
 
-	if (nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.pkts)) ||
-	    nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)))
+	if (nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.pkts),
+			 NFTA_COUNTER_PAD) ||
+	    nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes),
+			 NFTA_COUNTER_PAD))
 		goto nla_put_failure;
 
 	nla_nest_end(skb, nest);
@@ -975,7 +977,8 @@
 
 	if (nla_put_string(skb, NFTA_CHAIN_TABLE, table->name))
 		goto nla_put_failure;
-	if (nla_put_be64(skb, NFTA_CHAIN_HANDLE, cpu_to_be64(chain->handle)))
+	if (nla_put_be64(skb, NFTA_CHAIN_HANDLE, cpu_to_be64(chain->handle),
+			 NFTA_CHAIN_PAD))
 		goto nla_put_failure;
 	if (nla_put_string(skb, NFTA_CHAIN_NAME, chain->name))
 		goto nla_put_failure;
@@ -1803,13 +1806,15 @@
 		goto nla_put_failure;
 	if (nla_put_string(skb, NFTA_RULE_CHAIN, chain->name))
 		goto nla_put_failure;
-	if (nla_put_be64(skb, NFTA_RULE_HANDLE, cpu_to_be64(rule->handle)))
+	if (nla_put_be64(skb, NFTA_RULE_HANDLE, cpu_to_be64(rule->handle),
+			 NFTA_RULE_PAD))
 		goto nla_put_failure;
 
 	if ((event != NFT_MSG_DELRULE) && (rule->list.prev != &chain->rules)) {
 		prule = list_entry(rule->list.prev, struct nft_rule, list);
 		if (nla_put_be64(skb, NFTA_RULE_POSITION,
-				 cpu_to_be64(prule->handle)))
+				 cpu_to_be64(prule->handle),
+				 NFTA_RULE_PAD))
 			goto nla_put_failure;
 	}
 
@@ -2312,7 +2317,7 @@
 static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
 	[NFTA_SET_TABLE]		= { .type = NLA_STRING },
 	[NFTA_SET_NAME]			= { .type = NLA_STRING,
-					    .len = IFNAMSIZ - 1 },
+					    .len = NFT_SET_MAXNAMELEN - 1 },
 	[NFTA_SET_FLAGS]		= { .type = NLA_U32 },
 	[NFTA_SET_KEY_TYPE]		= { .type = NLA_U32 },
 	[NFTA_SET_KEY_LEN]		= { .type = NLA_U32 },
@@ -2396,7 +2401,7 @@
 	unsigned long *inuse;
 	unsigned int n = 0, min = 0;
 
-	p = strnchr(name, IFNAMSIZ, '%');
+	p = strnchr(name, NFT_SET_MAXNAMELEN, '%');
 	if (p != NULL) {
 		if (p[1] != 'd' || strchr(p + 2, '%'))
 			return -EINVAL;
@@ -2473,7 +2478,8 @@
 	}
 
 	if (set->timeout &&
-	    nla_put_be64(skb, NFTA_SET_TIMEOUT, cpu_to_be64(set->timeout)))
+	    nla_put_be64(skb, NFTA_SET_TIMEOUT, cpu_to_be64(set->timeout),
+			 NFTA_SET_PAD))
 		goto nla_put_failure;
 	if (set->gc_int &&
 	    nla_put_be32(skb, NFTA_SET_GC_INTERVAL, htonl(set->gc_int)))
@@ -2690,7 +2696,7 @@
 	struct nft_table *table;
 	struct nft_set *set;
 	struct nft_ctx ctx;
-	char name[IFNAMSIZ];
+	char name[NFT_SET_MAXNAMELEN];
 	unsigned int size;
 	bool create;
 	u64 timeout;
@@ -3076,7 +3082,8 @@
 
 	if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) &&
 	    nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT,
-			 cpu_to_be64(*nft_set_ext_timeout(ext))))
+			 cpu_to_be64(*nft_set_ext_timeout(ext)),
+			 NFTA_SET_ELEM_PAD))
 		goto nla_put_failure;
 
 	if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
@@ -3089,7 +3096,8 @@
 			expires = 0;
 
 		if (nla_put_be64(skb, NFTA_SET_ELEM_EXPIRATION,
-				 cpu_to_be64(jiffies_to_msecs(expires))))
+				 cpu_to_be64(jiffies_to_msecs(expires)),
+				 NFTA_SET_ELEM_PAD))
 			goto nla_put_failure;
 	}
 
@@ -3367,6 +3375,22 @@
 }
 EXPORT_SYMBOL_GPL(nft_set_elem_destroy);
 
+static int nft_setelem_parse_flags(const struct nft_set *set,
+				   const struct nlattr *attr, u32 *flags)
+{
+	if (attr == NULL)
+		return 0;
+
+	*flags = ntohl(nla_get_be32(attr));
+	if (*flags & ~NFT_SET_ELEM_INTERVAL_END)
+		return -EINVAL;
+	if (!(set->flags & NFT_SET_INTERVAL) &&
+	    *flags & NFT_SET_ELEM_INTERVAL_END)
+		return -EINVAL;
+
+	return 0;
+}
+
 static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
 			    const struct nlattr *attr)
 {
@@ -3380,8 +3404,8 @@
 	struct nft_data data;
 	enum nft_registers dreg;
 	struct nft_trans *trans;
+	u32 flags = 0;
 	u64 timeout;
-	u32 flags;
 	u8 ulen;
 	int err;
 
@@ -3395,17 +3419,11 @@
 
 	nft_set_ext_prepare(&tmpl);
 
-	flags = 0;
-	if (nla[NFTA_SET_ELEM_FLAGS] != NULL) {
-		flags = ntohl(nla_get_be32(nla[NFTA_SET_ELEM_FLAGS]));
-		if (flags & ~NFT_SET_ELEM_INTERVAL_END)
-			return -EINVAL;
-		if (!(set->flags & NFT_SET_INTERVAL) &&
-		    flags & NFT_SET_ELEM_INTERVAL_END)
-			return -EINVAL;
-		if (flags != 0)
-			nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS);
-	}
+	err = nft_setelem_parse_flags(set, nla[NFTA_SET_ELEM_FLAGS], &flags);
+	if (err < 0)
+		return err;
+	if (flags != 0)
+		nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS);
 
 	if (set->flags & NFT_SET_MAP) {
 		if (nla[NFTA_SET_ELEM_DATA] == NULL &&
@@ -3574,9 +3592,13 @@
 			   const struct nlattr *attr)
 {
 	struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
+	struct nft_set_ext_tmpl tmpl;
 	struct nft_data_desc desc;
 	struct nft_set_elem elem;
+	struct nft_set_ext *ext;
 	struct nft_trans *trans;
+	u32 flags = 0;
+	void *priv;
 	int err;
 
 	err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr,
@@ -3588,6 +3610,14 @@
 	if (nla[NFTA_SET_ELEM_KEY] == NULL)
 		goto err1;
 
+	nft_set_ext_prepare(&tmpl);
+
+	err = nft_setelem_parse_flags(set, nla[NFTA_SET_ELEM_FLAGS], &flags);
+	if (err < 0)
+		return err;
+	if (flags != 0)
+		nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS);
+
 	err = nft_data_init(ctx, &elem.key.val, sizeof(elem.key), &desc,
 			    nla[NFTA_SET_ELEM_KEY]);
 	if (err < 0)
@@ -3597,24 +3627,40 @@
 	if (desc.type != NFT_DATA_VALUE || desc.len != set->klen)
 		goto err2;
 
+	nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, desc.len);
+
+	err = -ENOMEM;
+	elem.priv = nft_set_elem_init(set, &tmpl, elem.key.val.data, NULL, 0,
+				      GFP_KERNEL);
+	if (elem.priv == NULL)
+		goto err2;
+
+	ext = nft_set_elem_ext(set, elem.priv);
+	if (flags)
+		*nft_set_ext_flags(ext) = flags;
+
 	trans = nft_trans_elem_alloc(ctx, NFT_MSG_DELSETELEM, set);
 	if (trans == NULL) {
 		err = -ENOMEM;
-		goto err2;
-	}
-
-	elem.priv = set->ops->deactivate(set, &elem);
-	if (elem.priv == NULL) {
-		err = -ENOENT;
 		goto err3;
 	}
 
+	priv = set->ops->deactivate(set, &elem);
+	if (priv == NULL) {
+		err = -ENOENT;
+		goto err4;
+	}
+	kfree(elem.priv);
+	elem.priv = priv;
+
 	nft_trans_elem(trans) = elem;
 	list_add_tail(&trans->list, &ctx->net->nft.commit_list);
 	return 0;
 
-err3:
+err4:
 	kfree(trans);
+err3:
+	kfree(elem.priv);
 err2:
 	nft_data_uninit(&elem.key.val, desc.type);
 err1:
diff --git a/net/netfilter/nf_tables_trace.c b/net/netfilter/nf_tables_trace.c
index e9e959f..39eb1cc 100644
--- a/net/netfilter/nf_tables_trace.c
+++ b/net/netfilter/nf_tables_trace.c
@@ -156,7 +156,8 @@
 		return 0;
 
 	return nla_put_be64(nlskb, NFTA_TRACE_RULE_HANDLE,
-			    cpu_to_be64(info->rule->handle));
+			    cpu_to_be64(info->rule->handle),
+			    NFTA_TRACE_PAD);
 }
 
 void nft_trace_notify(struct nft_traceinfo *info)
@@ -174,7 +175,7 @@
 	size = nlmsg_total_size(sizeof(struct nfgenmsg)) +
 		nla_total_size(NFT_TABLE_MAXNAMELEN) +
 		nla_total_size(NFT_CHAIN_MAXNAMELEN) +
-		nla_total_size(sizeof(__be64)) +	/* rule handle */
+		nla_total_size_64bit(sizeof(__be64)) +	/* rule handle */
 		nla_total_size(sizeof(__be32)) +	/* trace type */
 		nla_total_size(0) +			/* VERDICT, nested */
 			nla_total_size(sizeof(u32)) +	/* verdict code */
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index 4c2b4c0..1b4de4b 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -96,6 +96,8 @@
 			return -EINVAL;
 		if (flags & NFACCT_F_OVERQUOTA)
 			return -EINVAL;
+		if ((flags & NFACCT_F_QUOTA) && !tb[NFACCT_QUOTA])
+			return -EINVAL;
 
 		size += sizeof(u64);
 	}
@@ -160,15 +162,18 @@
 		pkts = atomic64_read(&acct->pkts);
 		bytes = atomic64_read(&acct->bytes);
 	}
-	if (nla_put_be64(skb, NFACCT_PKTS, cpu_to_be64(pkts)) ||
-	    nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes)) ||
+	if (nla_put_be64(skb, NFACCT_PKTS, cpu_to_be64(pkts),
+			 NFACCT_PAD) ||
+	    nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes),
+			 NFACCT_PAD) ||
 	    nla_put_be32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt))))
 		goto nla_put_failure;
 	if (acct->flags & NFACCT_F_QUOTA) {
 		u64 *quota = (u64 *)acct->data;
 
 		if (nla_put_be32(skb, NFACCT_FLAGS, htonl(old_flags)) ||
-		    nla_put_be64(skb, NFACCT_QUOTA, cpu_to_be64(*quota)))
+		    nla_put_be64(skb, NFACCT_QUOTA, cpu_to_be64(*quota),
+				 NFACCT_PAD))
 			goto nla_put_failure;
 	}
 	nlmsg_end(skb, nlh);
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 2671b9d..3c84f14 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -306,10 +306,10 @@
 	int i;
 
 	local_bh_disable();
-	for (i = 0; i < net->ct.htable_size; i++) {
+	for (i = 0; i < nf_conntrack_htable_size; i++) {
 		nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
-		if (i < net->ct.htable_size) {
-			hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
+		if (i < nf_conntrack_htable_size) {
+			hlist_nulls_for_each_entry(h, nn, &nf_conntrack_hash[i], hnnode)
 				untimeout(h, timeout);
 		}
 		spin_unlock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 7542999..aa93877 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -295,6 +295,59 @@
 	return seclen;
 }
 
+static u32 nfqnl_get_bridge_size(struct nf_queue_entry *entry)
+{
+	struct sk_buff *entskb = entry->skb;
+	u32 nlalen = 0;
+
+	if (entry->state.pf != PF_BRIDGE || !skb_mac_header_was_set(entskb))
+		return 0;
+
+	if (skb_vlan_tag_present(entskb))
+		nlalen += nla_total_size(nla_total_size(sizeof(__be16)) +
+					 nla_total_size(sizeof(__be16)));
+
+	if (entskb->network_header > entskb->mac_header)
+		nlalen += nla_total_size((entskb->network_header -
+					  entskb->mac_header));
+
+	return nlalen;
+}
+
+static int nfqnl_put_bridge(struct nf_queue_entry *entry, struct sk_buff *skb)
+{
+	struct sk_buff *entskb = entry->skb;
+
+	if (entry->state.pf != PF_BRIDGE || !skb_mac_header_was_set(entskb))
+		return 0;
+
+	if (skb_vlan_tag_present(entskb)) {
+		struct nlattr *nest;
+
+		nest = nla_nest_start(skb, NFQA_VLAN | NLA_F_NESTED);
+		if (!nest)
+			goto nla_put_failure;
+
+		if (nla_put_be16(skb, NFQA_VLAN_TCI, htons(entskb->vlan_tci)) ||
+		    nla_put_be16(skb, NFQA_VLAN_PROTO, entskb->vlan_proto))
+			goto nla_put_failure;
+
+		nla_nest_end(skb, nest);
+	}
+
+	if (entskb->mac_header < entskb->network_header) {
+		int len = (int)(entskb->network_header - entskb->mac_header);
+
+		if (nla_put(skb, NFQA_L2HDR, len, skb_mac_header(entskb)))
+			goto nla_put_failure;
+	}
+
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
 static struct sk_buff *
 nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
 			   struct nf_queue_entry *entry,
@@ -334,6 +387,8 @@
 	if (entskb->tstamp.tv64)
 		size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
 
+	size += nfqnl_get_bridge_size(entry);
+
 	if (entry->state.hook <= NF_INET_FORWARD ||
 	   (entry->state.hook == NF_INET_POST_ROUTING && entskb->sk == NULL))
 		csum_verify = !skb_csum_unnecessary(entskb);
@@ -497,6 +552,9 @@
 		}
 	}
 
+	if (nfqnl_put_bridge(entry, skb) < 0)
+		goto nla_put_failure;
+
 	if (entskb->tstamp.tv64) {
 		struct nfqnl_msg_packet_timestamp ts;
 		struct timespec64 kts = ktime_to_timespec64(skb->tstamp);
@@ -582,7 +640,12 @@
 	/* nfnetlink_unicast will either free the nskb or add it to a socket */
 	err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT);
 	if (err < 0) {
-		queue->queue_user_dropped++;
+		if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
+			failopen = 1;
+			err = 0;
+		} else {
+			queue->queue_user_dropped++;
+		}
 		goto err_out_unlock;
 	}
 
@@ -906,12 +969,18 @@
 	.notifier_call	= nfqnl_rcv_nl_event,
 };
 
+static const struct nla_policy nfqa_vlan_policy[NFQA_VLAN_MAX + 1] = {
+	[NFQA_VLAN_TCI]		= { .type = NLA_U16},
+	[NFQA_VLAN_PROTO]	= { .type = NLA_U16},
+};
+
 static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
 	[NFQA_VERDICT_HDR]	= { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
 	[NFQA_MARK]		= { .type = NLA_U32 },
 	[NFQA_PAYLOAD]		= { .type = NLA_UNSPEC },
 	[NFQA_CT]		= { .type = NLA_UNSPEC },
 	[NFQA_EXP]		= { .type = NLA_UNSPEC },
+	[NFQA_VLAN]		= { .type = NLA_NESTED },
 };
 
 static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
@@ -1025,6 +1094,40 @@
 	return ct;
 }
 
+static int nfqa_parse_bridge(struct nf_queue_entry *entry,
+			     const struct nlattr * const nfqa[])
+{
+	if (nfqa[NFQA_VLAN]) {
+		struct nlattr *tb[NFQA_VLAN_MAX + 1];
+		int err;
+
+		err = nla_parse_nested(tb, NFQA_VLAN_MAX, nfqa[NFQA_VLAN],
+				       nfqa_vlan_policy);
+		if (err < 0)
+			return err;
+
+		if (!tb[NFQA_VLAN_TCI] || !tb[NFQA_VLAN_PROTO])
+			return -EINVAL;
+
+		entry->skb->vlan_tci = ntohs(nla_get_be16(tb[NFQA_VLAN_TCI]));
+		entry->skb->vlan_proto = nla_get_be16(tb[NFQA_VLAN_PROTO]);
+	}
+
+	if (nfqa[NFQA_L2HDR]) {
+		int mac_header_len = entry->skb->network_header -
+			entry->skb->mac_header;
+
+		if (mac_header_len != nla_len(nfqa[NFQA_L2HDR]))
+			return -EINVAL;
+		else if (mac_header_len > 0)
+			memcpy(skb_mac_header(entry->skb),
+			       nla_data(nfqa[NFQA_L2HDR]),
+			       mac_header_len);
+	}
+
+	return 0;
+}
+
 static int nfqnl_recv_verdict(struct net *net, struct sock *ctnl,
 			      struct sk_buff *skb,
 			      const struct nlmsghdr *nlh,
@@ -1040,6 +1143,7 @@
 	struct nfnl_ct_hook *nfnl_ct;
 	struct nf_conn *ct = NULL;
 	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
+	int err;
 
 	queue = instance_lookup(q, queue_num);
 	if (!queue)
@@ -1066,6 +1170,12 @@
 			ct = nfqnl_ct_parse(nfnl_ct, nlh, nfqa, entry, &ctinfo);
 	}
 
+	if (entry->state.pf == PF_BRIDGE) {
+		err = nfqa_parse_bridge(entry, nfqa);
+		if (err < 0)
+			return err;
+	}
+
 	if (nfqa[NFQA_PAYLOAD]) {
 		u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
 		int diff = payload_len - entry->skb->len;
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
index c9743f7..77db835 100644
--- a/net/netfilter/nft_counter.c
+++ b/net/netfilter/nft_counter.c
@@ -76,8 +76,10 @@
 
 	nft_counter_fetch(priv->counter, &total);
 
-	if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)) ||
-	    nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets)))
+	if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes),
+			 NFTA_COUNTER_PAD) ||
+	    nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets),
+			 NFTA_COUNTER_PAD))
 		goto nla_put_failure;
 	return 0;
 
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index d4a4619..137e308 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -198,6 +198,14 @@
 		}
 		break;
 #endif
+#ifdef CONFIG_NF_CONNTRACK_LABELS
+	case NFT_CT_LABELS:
+		nf_connlabels_replace(ct,
+				      &regs->data[priv->sreg],
+				      &regs->data[priv->sreg],
+				      NF_CT_LABELS_MAX_SIZE / sizeof(u32));
+		break;
+#endif
 	default:
 		break;
 	}
@@ -365,6 +373,16 @@
 		len = FIELD_SIZEOF(struct nf_conn, mark);
 		break;
 #endif
+#ifdef CONFIG_NF_CONNTRACK_LABELS
+	case NFT_CT_LABELS:
+		if (tb[NFTA_CT_DIRECTION])
+			return -EINVAL;
+		len = NF_CT_LABELS_MAX_SIZE;
+		err = nf_connlabels_get(ctx->net, (len * BITS_PER_BYTE) - 1);
+		if (err)
+			return err;
+		break;
+#endif
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -384,6 +402,18 @@
 static void nft_ct_destroy(const struct nft_ctx *ctx,
 			   const struct nft_expr *expr)
 {
+	struct nft_ct *priv = nft_expr_priv(expr);
+
+	switch (priv->key) {
+#ifdef CONFIG_NF_CONNTRACK_LABELS
+	case NFT_CT_LABELS:
+		nf_connlabels_put(ctx->net);
+		break;
+#endif
+	default:
+		break;
+	}
+
 	nft_ct_l3proto_module_put(ctx->afi->family);
 }
 
@@ -484,6 +514,8 @@
 
 static int __init nft_ct_module_init(void)
 {
+	BUILD_BUG_ON(NF_CT_LABELS_MAX_SIZE > NFT_REG_SIZE);
+
 	return nft_register_expr(&nft_ct_type);
 }
 
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 9dec3bd..78d4914 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -227,7 +227,8 @@
 		goto nla_put_failure;
 	if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name))
 		goto nla_put_failure;
-	if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, cpu_to_be64(priv->timeout)))
+	if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, cpu_to_be64(priv->timeout),
+			 NFTA_DYNSET_PAD))
 		goto nla_put_failure;
 	if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr))
 		goto nla_put_failure;
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index 3f9d45d..6fa0165 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -192,7 +192,7 @@
 	u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
 	int err;
 
-	err = rhashtable_walk_init(&priv->ht, &hti);
+	err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL);
 	iter->err = err;
 	if (err)
 		return;
@@ -248,7 +248,7 @@
 	priv = container_of(work, struct nft_hash, gc_work.work);
 	set  = nft_set_container_of(priv);
 
-	err = rhashtable_walk_init(&priv->ht, &hti);
+	err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL);
 	if (err)
 		goto schedule;
 
diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
index 99d1857..070b989 100644
--- a/net/netfilter/nft_limit.c
+++ b/net/netfilter/nft_limit.c
@@ -97,8 +97,10 @@
 	u64 secs = div_u64(limit->nsecs, NSEC_PER_SEC);
 	u64 rate = limit->rate - limit->burst;
 
-	if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(rate)) ||
-	    nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(secs)) ||
+	if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(rate),
+			 NFTA_LIMIT_PAD) ||
+	    nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(secs),
+			 NFTA_LIMIT_PAD) ||
 	    nla_put_be32(skb, NFTA_LIMIT_BURST, htonl(limit->burst)) ||
 	    nla_put_be32(skb, NFTA_LIMIT_TYPE, htonl(type)) ||
 	    nla_put_be32(skb, NFTA_LIMIT_FLAGS, htonl(flags)))
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c
index 1c30f41..f762094 100644
--- a/net/netfilter/nft_rbtree.c
+++ b/net/netfilter/nft_rbtree.c
@@ -29,6 +29,17 @@
 	struct nft_set_ext	ext;
 };
 
+static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
+{
+	return nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
+	       (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
+}
+
+static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
+			     const struct nft_rbtree_elem *interval)
+{
+	return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0;
+}
 
 static bool nft_rbtree_lookup(const struct nft_set *set, const u32 *key,
 			      const struct nft_set_ext **ext)
@@ -37,6 +48,7 @@
 	const struct nft_rbtree_elem *rbe, *interval = NULL;
 	const struct rb_node *parent;
 	u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
+	const void *this;
 	int d;
 
 	spin_lock_bh(&nft_rbtree_lock);
@@ -44,9 +56,16 @@
 	while (parent != NULL) {
 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
 
-		d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen);
+		this = nft_set_ext_key(&rbe->ext);
+		d = memcmp(this, key, set->klen);
 		if (d < 0) {
 			parent = parent->rb_left;
+			/* In case of adjacent ranges, we always see the high
+			 * part of the range in first place, before the low one.
+			 * So don't update interval if the keys are equal.
+			 */
+			if (interval && nft_rbtree_equal(set, this, interval))
+				continue;
 			interval = rbe;
 		} else if (d > 0)
 			parent = parent->rb_right;
@@ -56,9 +75,7 @@
 				parent = parent->rb_left;
 				continue;
 			}
-			if (nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
-			    *nft_set_ext_flags(&rbe->ext) &
-			    NFT_SET_ELEM_INTERVAL_END)
+			if (nft_rbtree_interval_end(rbe))
 				goto out;
 			spin_unlock_bh(&nft_rbtree_lock);
 
@@ -98,9 +115,16 @@
 		else if (d > 0)
 			p = &parent->rb_right;
 		else {
-			if (nft_set_elem_active(&rbe->ext, genmask))
-				return -EEXIST;
-			p = &parent->rb_left;
+			if (nft_set_elem_active(&rbe->ext, genmask)) {
+				if (nft_rbtree_interval_end(rbe) &&
+				    !nft_rbtree_interval_end(new))
+					p = &parent->rb_left;
+				else if (!nft_rbtree_interval_end(rbe) &&
+					 nft_rbtree_interval_end(new))
+					p = &parent->rb_right;
+				else
+					return -EEXIST;
+			}
 		}
 	}
 	rb_link_node(&new->node, parent, p);
@@ -145,7 +169,7 @@
 {
 	const struct nft_rbtree *priv = nft_set_priv(set);
 	const struct rb_node *parent = priv->root.rb_node;
-	struct nft_rbtree_elem *rbe;
+	struct nft_rbtree_elem *rbe, *this = elem->priv;
 	u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
 	int d;
 
@@ -163,6 +187,15 @@
 				parent = parent->rb_left;
 				continue;
 			}
+			if (nft_rbtree_interval_end(rbe) &&
+			    !nft_rbtree_interval_end(this)) {
+				parent = parent->rb_left;
+				continue;
+			} else if (!nft_rbtree_interval_end(rbe) &&
+				   nft_rbtree_interval_end(this)) {
+				parent = parent->rb_right;
+				continue;
+			}
 			nft_set_elem_change_active(set, &rbe->ext);
 			return rbe;
 		}
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 582c9cf..c69c892 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -416,6 +416,47 @@
 }
 EXPORT_SYMBOL_GPL(xt_check_match);
 
+/** xt_check_entry_match - check that matches end before start of target
+ *
+ * @match: beginning of xt_entry_match
+ * @target: beginning of this rules target (alleged end of matches)
+ * @alignment: alignment requirement of match structures
+ *
+ * Validates that all matches add up to the beginning of the target,
+ * and that each match covers at least the base structure size.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+static int xt_check_entry_match(const char *match, const char *target,
+				const size_t alignment)
+{
+	const struct xt_entry_match *pos;
+	int length = target - match;
+
+	if (length == 0) /* no matches */
+		return 0;
+
+	pos = (struct xt_entry_match *)match;
+	do {
+		if ((unsigned long)pos % alignment)
+			return -EINVAL;
+
+		if (length < (int)sizeof(struct xt_entry_match))
+			return -EINVAL;
+
+		if (pos->u.match_size < sizeof(struct xt_entry_match))
+			return -EINVAL;
+
+		if (pos->u.match_size > length)
+			return -EINVAL;
+
+		length -= pos->u.match_size;
+		pos = ((void *)((char *)(pos) + (pos)->u.match_size));
+	} while (length > 0);
+
+	return 0;
+}
+
 #ifdef CONFIG_COMPAT
 int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
 {
@@ -485,13 +526,14 @@
 }
 EXPORT_SYMBOL_GPL(xt_compat_match_offset);
 
-int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
-			      unsigned int *size)
+void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
+			       unsigned int *size)
 {
 	const struct xt_match *match = m->u.kernel.match;
 	struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
 	int pad, off = xt_compat_match_offset(match);
 	u_int16_t msize = cm->u.user.match_size;
+	char name[sizeof(m->u.user.name)];
 
 	m = *dstptr;
 	memcpy(m, cm, sizeof(*cm));
@@ -505,10 +547,12 @@
 
 	msize += off;
 	m->u.user.match_size = msize;
+	strlcpy(name, match->name, sizeof(name));
+	module_put(match->me);
+	strncpy(m->u.user.name, name, sizeof(m->u.user.name));
 
 	*size += off;
 	*dstptr += msize;
-	return 0;
 }
 EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
 
@@ -539,8 +583,125 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
+
+/* non-compat version may have padding after verdict */
+struct compat_xt_standard_target {
+	struct compat_xt_entry_target t;
+	compat_uint_t verdict;
+};
+
+int xt_compat_check_entry_offsets(const void *base, const char *elems,
+				  unsigned int target_offset,
+				  unsigned int next_offset)
+{
+	long size_of_base_struct = elems - (const char *)base;
+	const struct compat_xt_entry_target *t;
+	const char *e = base;
+
+	if (target_offset < size_of_base_struct)
+		return -EINVAL;
+
+	if (target_offset + sizeof(*t) > next_offset)
+		return -EINVAL;
+
+	t = (void *)(e + target_offset);
+	if (t->u.target_size < sizeof(*t))
+		return -EINVAL;
+
+	if (target_offset + t->u.target_size > next_offset)
+		return -EINVAL;
+
+	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
+	    target_offset + sizeof(struct compat_xt_standard_target) != next_offset)
+		return -EINVAL;
+
+	/* compat_xt_entry match has less strict aligment requirements,
+	 * otherwise they are identical.  In case of padding differences
+	 * we need to add compat version of xt_check_entry_match.
+	 */
+	BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
+
+	return xt_check_entry_match(elems, base + target_offset,
+				    __alignof__(struct compat_xt_entry_match));
+}
+EXPORT_SYMBOL(xt_compat_check_entry_offsets);
 #endif /* CONFIG_COMPAT */
 
+/**
+ * xt_check_entry_offsets - validate arp/ip/ip6t_entry
+ *
+ * @base: pointer to arp/ip/ip6t_entry
+ * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
+ * @target_offset: the arp/ip/ip6_t->target_offset
+ * @next_offset: the arp/ip/ip6_t->next_offset
+ *
+ * validates that target_offset and next_offset are sane and that all
+ * match sizes (if any) align with the target offset.
+ *
+ * This function does not validate the targets or matches themselves, it
+ * only tests that all the offsets and sizes are correct, that all
+ * match structures are aligned, and that the last structure ends where
+ * the target structure begins.
+ *
+ * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version.
+ *
+ * The arp/ip/ip6t_entry structure @base must have passed following tests:
+ * - it must point to a valid memory location
+ * - base to base + next_offset must be accessible, i.e. not exceed allocated
+ *   length.
+ *
+ * A well-formed entry looks like this:
+ *
+ * ip(6)t_entry   match [mtdata]  match [mtdata] target [tgdata] ip(6)t_entry
+ * e->elems[]-----'                              |               |
+ *                matchsize                      |               |
+ *                                matchsize      |               |
+ *                                               |               |
+ * target_offset---------------------------------'               |
+ * next_offset---------------------------------------------------'
+ *
+ * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
+ *          This is where matches (if any) and the target reside.
+ * target_offset: beginning of target.
+ * next_offset: start of the next rule; also: size of this rule.
+ * Since targets have a minimum size, target_offset + minlen <= next_offset.
+ *
+ * Every match stores its size, sum of sizes must not exceed target_offset.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int xt_check_entry_offsets(const void *base,
+			   const char *elems,
+			   unsigned int target_offset,
+			   unsigned int next_offset)
+{
+	long size_of_base_struct = elems - (const char *)base;
+	const struct xt_entry_target *t;
+	const char *e = base;
+
+	/* target start is within the ip/ip6/arpt_entry struct */
+	if (target_offset < size_of_base_struct)
+		return -EINVAL;
+
+	if (target_offset + sizeof(*t) > next_offset)
+		return -EINVAL;
+
+	t = (void *)(e + target_offset);
+	if (t->u.target_size < sizeof(*t))
+		return -EINVAL;
+
+	if (target_offset + t->u.target_size > next_offset)
+		return -EINVAL;
+
+	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
+	    target_offset + sizeof(struct xt_standard_target) != next_offset)
+		return -EINVAL;
+
+	return xt_check_entry_match(elems, base + target_offset,
+				    __alignof__(struct xt_entry_match));
+}
+EXPORT_SYMBOL(xt_check_entry_offsets);
+
 int xt_check_target(struct xt_tgchk_param *par,
 		    unsigned int size, u_int8_t proto, bool inv_proto)
 {
@@ -591,6 +752,80 @@
 }
 EXPORT_SYMBOL_GPL(xt_check_target);
 
+/**
+ * xt_copy_counters_from_user - copy counters and metadata from userspace
+ *
+ * @user: src pointer to userspace memory
+ * @len: alleged size of userspace memory
+ * @info: where to store the xt_counters_info metadata
+ * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel
+ *
+ * Copies counter meta data from @user and stores it in @info.
+ *
+ * vmallocs memory to hold the counters, then copies the counter data
+ * from @user to the new memory and returns a pointer to it.
+ *
+ * If @compat is true, @info gets converted automatically to the 64bit
+ * representation.
+ *
+ * The metadata associated with the counters is stored in @info.
+ *
+ * Return: returns pointer that caller has to test via IS_ERR().
+ * If IS_ERR is false, caller has to vfree the pointer.
+ */
+void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
+				 struct xt_counters_info *info, bool compat)
+{
+	void *mem;
+	u64 size;
+
+#ifdef CONFIG_COMPAT
+	if (compat) {
+		/* structures only differ in size due to alignment */
+		struct compat_xt_counters_info compat_tmp;
+
+		if (len <= sizeof(compat_tmp))
+			return ERR_PTR(-EINVAL);
+
+		len -= sizeof(compat_tmp);
+		if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
+			return ERR_PTR(-EFAULT);
+
+		strlcpy(info->name, compat_tmp.name, sizeof(info->name));
+		info->num_counters = compat_tmp.num_counters;
+		user += sizeof(compat_tmp);
+	} else
+#endif
+	{
+		if (len <= sizeof(*info))
+			return ERR_PTR(-EINVAL);
+
+		len -= sizeof(*info);
+		if (copy_from_user(info, user, sizeof(*info)) != 0)
+			return ERR_PTR(-EFAULT);
+
+		info->name[sizeof(info->name) - 1] = '\0';
+		user += sizeof(*info);
+	}
+
+	size = sizeof(struct xt_counters);
+	size *= info->num_counters;
+
+	if (size != (u64)len)
+		return ERR_PTR(-EINVAL);
+
+	mem = vmalloc(len);
+	if (!mem)
+		return ERR_PTR(-ENOMEM);
+
+	if (copy_from_user(mem, user, len) == 0)
+		return mem;
+
+	vfree(mem);
+	return ERR_PTR(-EFAULT);
+}
+EXPORT_SYMBOL_GPL(xt_copy_counters_from_user);
+
 #ifdef CONFIG_COMPAT
 int xt_compat_target_offset(const struct xt_target *target)
 {
@@ -606,6 +841,7 @@
 	struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
 	int pad, off = xt_compat_target_offset(target);
 	u_int16_t tsize = ct->u.user.target_size;
+	char name[sizeof(t->u.user.name)];
 
 	t = *dstptr;
 	memcpy(t, ct, sizeof(*ct));
@@ -619,6 +855,9 @@
 
 	tsize += off;
 	t->u.user.target_size = tsize;
+	strlcpy(name, target->name, sizeof(name));
+	module_put(target->me);
+	strncpy(t->u.user.name, name, sizeof(t->u.user.name));
 
 	*size += off;
 	*dstptr += tsize;
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 29d2c31..daf45da 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -236,6 +236,7 @@
 
 		list_del(&info->timer->entry);
 		del_timer_sync(&info->timer->timer);
+		cancel_work_sync(&info->timer->work);
 		sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
 		kfree(info->timer->attr.attr.name);
 		kfree(info->timer);
diff --git a/net/netfilter/xt_connlabel.c b/net/netfilter/xt_connlabel.c
index bb9cbeb..a79af25 100644
--- a/net/netfilter/xt_connlabel.c
+++ b/net/netfilter/xt_connlabel.c
@@ -18,6 +18,16 @@
 MODULE_ALIAS("ipt_connlabel");
 MODULE_ALIAS("ip6t_connlabel");
 
+static bool connlabel_match(const struct nf_conn *ct, u16 bit)
+{
+	struct nf_conn_labels *labels = nf_ct_labels_find(ct);
+
+	if (!labels)
+		return false;
+
+	return BIT_WORD(bit) < labels->words && test_bit(bit, labels->bits);
+}
+
 static bool
 connlabel_mt(const struct sk_buff *skb, struct xt_action_param *par)
 {
@@ -33,7 +43,7 @@
 	if (info->options & XT_CONNLABEL_OP_SET)
 		return (nf_connlabel_set(ct, info->bit) == 0) ^ invert;
 
-	return nf_connlabel_match(ct, info->bit) ^ invert;
+	return connlabel_match(ct, info->bit) ^ invert;
 }
 
 static int connlabel_mt_check(const struct xt_mtchk_param *par)
@@ -55,7 +65,7 @@
 		return ret;
 	}
 
-	ret = nf_connlabels_get(par->net, info->bit + 1);
+	ret = nf_connlabels_get(par->net, info->bit);
 	if (ret < 0)
 		nf_ct_l3proto_module_put(par->family);
 	return ret;
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 49d14ec..b10ade2 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -120,9 +120,9 @@
 {
 	switch (protocol) {
 	case IPPROTO_TCP:
-		return __inet_lookup(net, &tcp_hashinfo, skb, doff,
-				     saddr, sport, daddr, dport,
-				     in->ifindex);
+		return inet_lookup(net, &tcp_hashinfo, skb, doff,
+				   saddr, sport, daddr, dport,
+				   in->ifindex);
 	case IPPROTO_UDP:
 		return udp4_lib_lookup(net, saddr, sport, daddr, dport,
 				       in->ifindex);
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 28cddc8..1325776 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -677,7 +677,7 @@
 	u32 spot = start;
 
 	while (rc == 0 && spot <= end) {
-		if (((spot & (BITS_PER_LONG - 1)) != 0) &&
+		if (((spot & (BITS_PER_LONG - 1)) == 0) &&
 		    ((end - spot) > BITS_PER_LONG)) {
 			rc = netlbl_catmap_setlong(catmap,
 						   spot,
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 215fc08..627f898 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -688,7 +688,7 @@
 
 	skb_queue_purge(&sk->sk_write_queue);
 
-	if (nlk->portid) {
+	if (nlk->portid && nlk->bound) {
 		struct netlink_notify n = {
 						.net = sock_net(sk),
 						.protocol = sk->sk_protocol,
@@ -2059,6 +2059,7 @@
 	struct netlink_callback *cb;
 	struct sk_buff *skb = NULL;
 	struct nlmsghdr *nlh;
+	struct module *module;
 	int len, err = -ENOBUFS;
 	int alloc_min_size;
 	int alloc_size;
@@ -2134,9 +2135,11 @@
 		cb->done(cb);
 
 	nlk->cb_running = false;
+	module = cb->module;
+	skb = cb->skb;
 	mutex_unlock(nlk->cb_mutex);
-	module_put(cb->module);
-	consume_skb(cb->skb);
+	module_put(module);
+	consume_skb(skb);
 	return 0;
 
 errout_skb:
@@ -2343,7 +2346,8 @@
 {
 	int err;
 
-	err = rhashtable_walk_init(&nl_table[iter->link].hash, &iter->hti);
+	err = rhashtable_walk_init(&nl_table[iter->link].hash, &iter->hti,
+				   GFP_KERNEL);
 	if (err) {
 		iter->link = MAX_LINKS;
 		return err;
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index fbb7a2b..61fff42 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -64,18 +64,26 @@
 	return NULL;
 }
 
-int nci_get_conn_info_by_id(struct nci_dev *ndev, u8 id)
+int nci_get_conn_info_by_dest_type_params(struct nci_dev *ndev, u8 dest_type,
+					  struct dest_spec_params *params)
 {
 	struct nci_conn_info *conn_info;
 
 	list_for_each_entry(conn_info, &ndev->conn_info_list, list) {
-		if (conn_info->id == id)
-			return conn_info->conn_id;
+		if (conn_info->dest_type == dest_type) {
+			if (!params)
+				return conn_info->conn_id;
+			if (conn_info) {
+				if (params->id == conn_info->dest_params->id &&
+				    params->protocol == conn_info->dest_params->protocol)
+					return conn_info->conn_id;
+			}
+		}
 	}
 
 	return -EINVAL;
 }
-EXPORT_SYMBOL(nci_get_conn_info_by_id);
+EXPORT_SYMBOL(nci_get_conn_info_by_dest_type_params);
 
 /* ---- NCI requests ---- */
 
@@ -392,6 +400,83 @@
 }
 EXPORT_SYMBOL(nci_core_init);
 
+struct nci_loopback_data {
+	u8 conn_id;
+	struct sk_buff *data;
+};
+
+static void nci_send_data_req(struct nci_dev *ndev, unsigned long opt)
+{
+	struct nci_loopback_data *data = (struct nci_loopback_data *)opt;
+
+	nci_send_data(ndev, data->conn_id, data->data);
+}
+
+static void nci_nfcc_loopback_cb(void *context, struct sk_buff *skb, int err)
+{
+	struct nci_dev *ndev = (struct nci_dev *)context;
+	struct nci_conn_info    *conn_info;
+
+	conn_info = nci_get_conn_info_by_conn_id(ndev, ndev->cur_conn_id);
+	if (!conn_info) {
+		nci_req_complete(ndev, NCI_STATUS_REJECTED);
+		return;
+	}
+
+	conn_info->rx_skb = skb;
+
+	nci_req_complete(ndev, NCI_STATUS_OK);
+}
+
+int nci_nfcc_loopback(struct nci_dev *ndev, void *data, size_t data_len,
+		      struct sk_buff **resp)
+{
+	int r;
+	struct nci_loopback_data loopback_data;
+	struct nci_conn_info *conn_info;
+	struct sk_buff *skb;
+	int conn_id = nci_get_conn_info_by_dest_type_params(ndev,
+					NCI_DESTINATION_NFCC_LOOPBACK, NULL);
+
+	if (conn_id < 0) {
+		r = nci_core_conn_create(ndev, NCI_DESTINATION_NFCC_LOOPBACK,
+					 0, 0, NULL);
+		if (r != NCI_STATUS_OK)
+			return r;
+
+		conn_id = nci_get_conn_info_by_dest_type_params(ndev,
+					NCI_DESTINATION_NFCC_LOOPBACK,
+					NULL);
+	}
+
+	conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id);
+	if (!conn_info)
+		return -EPROTO;
+
+	/* store cb and context to be used on receiving data */
+	conn_info->data_exchange_cb = nci_nfcc_loopback_cb;
+	conn_info->data_exchange_cb_context = ndev;
+
+	skb = nci_skb_alloc(ndev, NCI_DATA_HDR_SIZE + data_len, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_reserve(skb, NCI_DATA_HDR_SIZE);
+	memcpy(skb_put(skb, data_len), data, data_len);
+
+	loopback_data.conn_id = conn_id;
+	loopback_data.data = skb;
+
+	ndev->cur_conn_id = conn_id;
+	r = nci_request(ndev, nci_send_data_req, (unsigned long)&loopback_data,
+			msecs_to_jiffies(NCI_DATA_TIMEOUT));
+	if (r == NCI_STATUS_OK && resp)
+		*resp = conn_info->rx_skb;
+
+	return r;
+}
+EXPORT_SYMBOL(nci_nfcc_loopback);
+
 static int nci_open_device(struct nci_dev *ndev)
 {
 	int rc = 0;
@@ -610,9 +695,6 @@
 	struct nci_core_conn_create_cmd *cmd;
 	struct core_conn_create_data data;
 
-	if (!number_destination_params)
-		return -EINVAL;
-
 	data.length = params_len + sizeof(struct nci_core_conn_create_cmd);
 	cmd = kzalloc(data.length, GFP_KERNEL);
 	if (!cmd)
@@ -620,17 +702,23 @@
 
 	cmd->destination_type = destination_type;
 	cmd->number_destination_params = number_destination_params;
-	memcpy(cmd->params, params, params_len);
 
 	data.cmd = cmd;
 
-	if (params->length > 0)
-		ndev->cur_id = params->value[DEST_SPEC_PARAMS_ID_INDEX];
-	else
-		ndev->cur_id = 0;
+	if (params) {
+		memcpy(cmd->params, params, params_len);
+		if (params->length > 0)
+			memcpy(&ndev->cur_params,
+			       &params->value[DEST_SPEC_PARAMS_ID_INDEX],
+			       sizeof(struct dest_spec_params));
+		else
+			ndev->cur_params.id = 0;
+	} else {
+		ndev->cur_params.id = 0;
+	}
+	ndev->cur_dest_type = destination_type;
 
-	r = __nci_request(ndev, nci_core_conn_create_req,
-			  (unsigned long)&data,
+	r = __nci_request(ndev, nci_core_conn_create_req, (unsigned long)&data,
 			  msecs_to_jiffies(NCI_CMD_TIMEOUT));
 	kfree(cmd);
 	return r;
@@ -646,6 +734,7 @@
 
 int nci_core_conn_close(struct nci_dev *ndev, u8 conn_id)
 {
+	ndev->cur_conn_id = conn_id;
 	return __nci_request(ndev, nci_core_conn_close_req, conn_id,
 			     msecs_to_jiffies(NCI_CMD_TIMEOUT));
 }
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index 2ada2b3..1e8c1a1 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -734,7 +734,7 @@
 	 * “HCI Access”, even if the HCI Network contains multiple NFCEEs.
 	 */
 	ndev->hci_dev->nfcee_id = nfcee_ntf->nfcee_id;
-	ndev->cur_id = nfcee_ntf->nfcee_id;
+	ndev->cur_params.id = nfcee_ntf->nfcee_id;
 
 	nci_req_complete(ndev, status);
 }
diff --git a/net/nfc/nci/rsp.c b/net/nfc/nci/rsp.c
index 9b6eb91..e3bbf19 100644
--- a/net/nfc/nci/rsp.c
+++ b/net/nfc/nci/rsp.c
@@ -226,7 +226,7 @@
 					    struct sk_buff *skb)
 {
 	__u8 status = skb->data[0];
-	struct nci_conn_info *conn_info;
+	struct nci_conn_info *conn_info = NULL;
 	struct nci_core_conn_create_rsp *rsp;
 
 	pr_debug("status 0x%x\n", status);
@@ -241,7 +241,17 @@
 			goto exit;
 		}
 
-		conn_info->id = ndev->cur_id;
+		conn_info->dest_params = devm_kzalloc(&ndev->nfc_dev->dev,
+						sizeof(struct dest_spec_params),
+						GFP_KERNEL);
+		if (!conn_info->dest_params) {
+			status = NCI_STATUS_REJECTED;
+			goto free_conn_info;
+		}
+
+		conn_info->dest_type = ndev->cur_dest_type;
+		conn_info->dest_params->id = ndev->cur_params.id;
+		conn_info->dest_params->protocol = ndev->cur_params.protocol;
 		conn_info->conn_id = rsp->conn_id;
 
 		/* Note: data_exchange_cb and data_exchange_cb_context need to
@@ -251,7 +261,7 @@
 		INIT_LIST_HEAD(&conn_info->list);
 		list_add(&conn_info->list, &ndev->conn_info_list);
 
-		if (ndev->cur_id == ndev->hci_dev->nfcee_id)
+		if (ndev->cur_params.id == ndev->hci_dev->nfcee_id)
 			ndev->hci_dev->conn_info = conn_info;
 
 		conn_info->conn_id = rsp->conn_id;
@@ -259,7 +269,11 @@
 		atomic_set(&conn_info->credits_cnt, rsp->credits_cnt);
 	}
 
+free_conn_info:
+	if (status == NCI_STATUS_REJECTED)
+		devm_kfree(&ndev->nfc_dev->dev, conn_info);
 exit:
+
 	nci_req_complete(ndev, status);
 }
 
@@ -271,7 +285,8 @@
 
 	pr_debug("status 0x%x\n", status);
 	if (status == NCI_STATUS_OK) {
-		conn_info = nci_get_conn_info_by_conn_id(ndev, ndev->cur_id);
+		conn_info = nci_get_conn_info_by_conn_id(ndev,
+							 ndev->cur_conn_id);
 		if (conn_info) {
 			list_del(&conn_info->list);
 			devm_kfree(&ndev->nfc_dev->dev, conn_info);
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index 234a733..ce94729 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -7,7 +7,9 @@
 	depends on INET
 	depends on !NF_CONNTRACK || \
 		   (NF_CONNTRACK && ((!NF_DEFRAG_IPV6 || NF_DEFRAG_IPV6) && \
-				     (!NF_NAT || NF_NAT)))
+				     (!NF_NAT || NF_NAT) && \
+				     (!NF_NAT_IPV4 || NF_NAT_IPV4) && \
+				     (!NF_NAT_IPV6 || NF_NAT_IPV6)))
 	select LIBCRC32C
 	select MPLS
 	select NET_MPLS_GSO
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index e9dd47b..879185f 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -461,7 +461,7 @@
 		mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
 
 		if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
-			set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
+			set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
 				      true);
 			memcpy(&flow_key->ipv6.addr.src, masked,
 			       sizeof(flow_key->ipv6.addr.src));
@@ -483,7 +483,7 @@
 							     NULL, &flags)
 					       != NEXTHDR_ROUTING);
 
-			set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
+			set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
 				      recalc_csum);
 			memcpy(&flow_key->ipv6.addr.dst, masked,
 			       sizeof(flow_key->ipv6.addr.dst));
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index dc5eb29..3d5feed 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -367,6 +367,7 @@
 	} else if (key->eth.type == htons(ETH_P_IPV6)) {
 		enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
 
+		skb_orphan(skb);
 		memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
 		err = nf_ct_frag6_gather(net, skb, user);
 		if (err)
@@ -438,20 +439,12 @@
 	u8 protonum;
 
 	l3proto = __nf_ct_l3proto_find(l3num);
-	if (!l3proto) {
-		pr_debug("ovs_ct_find_existing: Can't get l3proto\n");
-		return NULL;
-	}
 	if (l3proto->get_l4proto(skb, skb_network_offset(skb), &dataoff,
 				 &protonum) <= 0) {
 		pr_debug("ovs_ct_find_existing: Can't get protonum\n");
 		return NULL;
 	}
 	l4proto = __nf_ct_l4proto_find(l3num, protonum);
-	if (!l4proto) {
-		pr_debug("ovs_ct_find_existing: Can't get l4proto\n");
-		return NULL;
-	}
 	if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num,
 			     protonum, net, &tuple, l3proto, l4proto)) {
 		pr_debug("ovs_ct_find_existing: Can't get tuple\n");
@@ -535,14 +528,15 @@
 	switch (ctinfo) {
 	case IP_CT_RELATED:
 	case IP_CT_RELATED_REPLY:
-		if (skb->protocol == htons(ETH_P_IP) &&
+		if (IS_ENABLED(CONFIG_NF_NAT_IPV4) &&
+		    skb->protocol == htons(ETH_P_IP) &&
 		    ip_hdr(skb)->protocol == IPPROTO_ICMP) {
 			if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
 							   hooknum))
 				err = NF_DROP;
 			goto push;
-#if IS_ENABLED(CONFIG_NF_NAT_IPV6)
-		} else if (skb->protocol == htons(ETH_P_IPV6)) {
+		} else if (IS_ENABLED(CONFIG_NF_NAT_IPV6) &&
+			   skb->protocol == htons(ETH_P_IPV6)) {
 			__be16 frag_off;
 			u8 nexthdr = ipv6_hdr(skb)->nexthdr;
 			int hdrlen = ipv6_skip_exthdr(skb,
@@ -557,7 +551,6 @@
 					err = NF_DROP;
 				goto push;
 			}
-#endif
 		}
 		/* Non-ICMP, fall thru to initialize if needed. */
 	case IP_CT_NEW:
@@ -664,11 +657,12 @@
 
 	/* Determine NAT type.
 	 * Check if the NAT type can be deduced from the tracked connection.
-	 * Make sure expected traffic is NATted only when committing.
+	 * Make sure new expected connections (IP_CT_RELATED) are NATted only
+	 * when committing.
 	 */
 	if (info->nat & OVS_CT_NAT && ctinfo != IP_CT_NEW &&
 	    ct->status & IPS_NAT_MASK &&
-	    (!(ct->status & IPS_EXPECTED_BIT) || info->commit)) {
+	    (ctinfo != IP_CT_RELATED || info->commit)) {
 		/* NAT an established or related connection like before. */
 		if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY)
 			/* This is the REPLY direction for a connection
@@ -774,6 +768,19 @@
 			return -EINVAL;
 		}
 
+		/* Userspace may decide to perform a ct lookup without a helper
+		 * specified followed by a (recirculate and) commit with one.
+		 * Therefore, for unconfirmed connections which we will commit,
+		 * we need to attach the helper here.
+		 */
+		if (!nf_ct_is_confirmed(ct) && info->commit &&
+		    info->helper && !nfct_help(ct)) {
+			int err = __nf_ct_try_assign_helper(ct, info->ct,
+							    GFP_ATOMIC);
+			if (err)
+				return err;
+		}
+
 		/* Call the helper only if:
 		 * - nf_conntrack_in() was executed above ("!cached") for a
 		 *   confirmed connection, or
@@ -968,7 +975,8 @@
 			break;
 
 		case OVS_NAT_ATTR_IP_MIN:
-			nla_memcpy(&info->range.min_addr, a, nla_len(a));
+			nla_memcpy(&info->range.min_addr, a,
+				   sizeof(info->range.min_addr));
 			info->range.flags |= NF_NAT_RANGE_MAP_IPS;
 			break;
 
@@ -1238,7 +1246,8 @@
 	}
 
 	if (info->range.flags & NF_NAT_RANGE_MAP_IPS) {
-		if (info->family == NFPROTO_IPV4) {
+		if (IS_ENABLED(CONFIG_NF_NAT_IPV4) &&
+		    info->family == NFPROTO_IPV4) {
 			if (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MIN,
 					    info->range.min_addr.ip) ||
 			    (info->range.max_addr.ip
@@ -1246,8 +1255,8 @@
 			     (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MAX,
 					      info->range.max_addr.ip))))
 				return false;
-#if IS_ENABLED(CONFIG_NF_NAT_IPV6)
-		} else if (info->family == NFPROTO_IPV6) {
+		} else if (IS_ENABLED(CONFIG_NF_NAT_IPV6) &&
+			   info->family == NFPROTO_IPV6) {
 			if (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MIN,
 					     &info->range.min_addr.in6) ||
 			    (memcmp(&info->range.max_addr.in6,
@@ -1256,7 +1265,6 @@
 			     (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MAX,
 					       &info->range.max_addr.in6))))
 				return false;
-#endif
 		} else {
 			return false;
 		}
@@ -1342,7 +1350,7 @@
 	unsigned int n_bits = sizeof(struct ovs_key_ct_labels) * BITS_PER_BYTE;
 	struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
 
-	if (nf_connlabels_get(net, n_bits)) {
+	if (nf_connlabels_get(net, n_bits - 1)) {
 		ovs_net->xt_label = false;
 		OVS_NLERR(true, "Failed to set connlabel length");
 	} else {
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 0cc66a4..856bd8d 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -738,9 +738,9 @@
 		len += nla_total_size(acts->orig_len);
 
 	return len
-		+ nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
+		+ nla_total_size_64bit(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
 		+ nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
-		+ nla_total_size(8); /* OVS_FLOW_ATTR_USED */
+		+ nla_total_size_64bit(8); /* OVS_FLOW_ATTR_USED */
 }
 
 /* Called with ovs_mutex or RCU read lock. */
@@ -754,11 +754,14 @@
 	ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
 
 	if (used &&
-	    nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
+	    nla_put_u64_64bit(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used),
+			      OVS_FLOW_ATTR_PAD))
 		return -EMSGSIZE;
 
 	if (stats.n_packets &&
-	    nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats))
+	    nla_put_64bit(skb, OVS_FLOW_ATTR_STATS,
+			  sizeof(struct ovs_flow_stats), &stats,
+			  OVS_FLOW_ATTR_PAD))
 		return -EMSGSIZE;
 
 	if ((u8)ntohs(tcp_flags) &&
@@ -1434,8 +1437,8 @@
 	size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
 
 	msgsize += nla_total_size(IFNAMSIZ);
-	msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
-	msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats));
+	msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_stats));
+	msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_megaflow_stats));
 	msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
 
 	return msgsize;
@@ -1462,13 +1465,13 @@
 		goto nla_put_failure;
 
 	get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
-	if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
-			&dp_stats))
+	if (nla_put_64bit(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
+			  &dp_stats, OVS_DP_ATTR_PAD))
 		goto nla_put_failure;
 
-	if (nla_put(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
-			sizeof(struct ovs_dp_megaflow_stats),
-			&dp_megaflow_stats))
+	if (nla_put_64bit(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
+			  sizeof(struct ovs_dp_megaflow_stats),
+			  &dp_megaflow_stats, OVS_DP_ATTR_PAD))
 		goto nla_put_failure;
 
 	if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
@@ -1837,8 +1840,9 @@
 		goto nla_put_failure;
 
 	ovs_vport_get_stats(vport, &vport_stats);
-	if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
-		    &vport_stats))
+	if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS,
+			  sizeof(struct ovs_vport_stats), &vport_stats,
+			  OVS_VPORT_ATTR_PAD))
 		goto nla_put_failure;
 
 	if (ovs_vport_get_upcall_portids(vport, skb))
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 689c172..0bb650f 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -261,7 +261,7 @@
 	/* Whenever adding new OVS_TUNNEL_KEY_ FIELDS, we should consider
 	 * updating this function.
 	 */
-	return    nla_total_size(8)    /* OVS_TUNNEL_KEY_ATTR_ID */
+	return    nla_total_size_64bit(8) /* OVS_TUNNEL_KEY_ATTR_ID */
 		+ nla_total_size(16)   /* OVS_TUNNEL_KEY_ATTR_IPV[46]_SRC */
 		+ nla_total_size(16)   /* OVS_TUNNEL_KEY_ATTR_IPV[46]_DST */
 		+ nla_total_size(1)    /* OVS_TUNNEL_KEY_ATTR_TOS */
@@ -720,7 +720,8 @@
 			      unsigned short tun_proto)
 {
 	if (output->tun_flags & TUNNEL_KEY &&
-	    nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
+	    nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id,
+			 OVS_TUNNEL_KEY_ATTR_PAD))
 		return -EMSGSIZE;
 	switch (tun_proto) {
 	case AF_INET:
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 7c8b90b..2ee48e4 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -165,11 +165,10 @@
 
 	netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
 	netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH |
-			      IFF_PHONY_HEADROOM;
+			      IFF_PHONY_HEADROOM | IFF_NO_QUEUE;
 	netdev->destructor = internal_dev_destructor;
 	netdev->ethtool_ops = &internal_dev_ethtool_ops;
 	netdev->rtnl_link_ops = &internal_dev_link_ops;
-	netdev->tx_queue_len = 0;
 
 	netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST |
 			   NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 1ecfa71..4040eb9 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1837,6 +1837,7 @@
 	DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
 	struct sk_buff *skb = NULL;
 	struct net_device *dev;
+	struct sockcm_cookie sockc;
 	__be16 proto = 0;
 	int err;
 	int extra_len = 0;
@@ -1925,12 +1926,21 @@
 		goto out_unlock;
 	}
 
+	sockc.tsflags = 0;
+	if (msg->msg_controllen) {
+		err = sock_cmsg_send(sk, msg, &sockc);
+		if (unlikely(err)) {
+			err = -EINVAL;
+			goto out_unlock;
+		}
+	}
+
 	skb->protocol = proto;
 	skb->dev = dev;
 	skb->priority = sk->sk_priority;
 	skb->mark = sk->sk_mark;
 
-	sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
+	sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
 
 	if (unlikely(extra_len == 4))
 		skb->no_fcs = 1;
@@ -2042,6 +2052,7 @@
 	u8 *skb_head = skb->data;
 	int skb_len = skb->len;
 	unsigned int snaplen, res;
+	bool is_drop_n_account = false;
 
 	if (skb->pkt_type == PACKET_LOOPBACK)
 		goto drop;
@@ -2130,6 +2141,7 @@
 	return 0;
 
 drop_n_acct:
+	is_drop_n_account = true;
 	spin_lock(&sk->sk_receive_queue.lock);
 	po->stats.stats1.tp_drops++;
 	atomic_inc(&sk->sk_drops);
@@ -2141,7 +2153,10 @@
 		skb->len = skb_len;
 	}
 drop:
-	consume_skb(skb);
+	if (!is_drop_n_account)
+		consume_skb(skb);
+	else
+		kfree_skb(skb);
 	return 0;
 }
 
@@ -2160,6 +2175,7 @@
 	struct sk_buff *copy_skb = NULL;
 	struct timespec ts;
 	__u32 ts_status;
+	bool is_drop_n_account = false;
 
 	/* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
 	 * We may add members to them until current aligned size without forcing
@@ -2367,10 +2383,14 @@
 		skb->len = skb_len;
 	}
 drop:
-	kfree_skb(skb);
+	if (!is_drop_n_account)
+		consume_skb(skb);
+	else
+		kfree_skb(skb);
 	return 0;
 
 drop_n_account:
+	is_drop_n_account = true;
 	po->stats.stats1.tp_drops++;
 	spin_unlock(&sk->sk_receive_queue.lock);
 
@@ -2486,7 +2506,8 @@
 
 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
 		void *frame, struct net_device *dev, void *data, int tp_len,
-		__be16 proto, unsigned char *addr, int hlen, int copylen)
+		__be16 proto, unsigned char *addr, int hlen, int copylen,
+		const struct sockcm_cookie *sockc)
 {
 	union tpacket_uhdr ph;
 	int to_write, offset, len, nr_frags, len_max;
@@ -2500,7 +2521,7 @@
 	skb->dev = dev;
 	skb->priority = po->sk.sk_priority;
 	skb->mark = po->sk.sk_mark;
-	sock_tx_timestamp(&po->sk, &skb_shinfo(skb)->tx_flags);
+	sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
 	skb_shinfo(skb)->destructor_arg = ph.raw;
 
 	skb_reserve(skb, hlen);
@@ -2624,6 +2645,7 @@
 	struct sk_buff *skb;
 	struct net_device *dev;
 	struct virtio_net_hdr *vnet_hdr = NULL;
+	struct sockcm_cookie sockc;
 	__be16 proto;
 	int err, reserve = 0;
 	void *ph;
@@ -2655,6 +2677,13 @@
 		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
 	}
 
+	sockc.tsflags = 0;
+	if (msg->msg_controllen) {
+		err = sock_cmsg_send(&po->sk, msg, &sockc);
+		if (unlikely(err))
+			goto out;
+	}
+
 	err = -ENXIO;
 	if (unlikely(dev == NULL))
 		goto out;
@@ -2712,7 +2741,7 @@
 			goto out_status;
 		}
 		tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
-					  addr, hlen, copylen);
+					  addr, hlen, copylen, &sockc);
 		if (likely(tp_len >= 0) &&
 		    tp_len > dev->mtu + reserve &&
 		    !po->has_vnet_hdr &&
@@ -2851,6 +2880,7 @@
 	if (unlikely(!(dev->flags & IFF_UP)))
 		goto out_unlock;
 
+	sockc.tsflags = 0;
 	sockc.mark = sk->sk_mark;
 	if (msg->msg_controllen) {
 		err = sock_cmsg_send(sk, msg, &sockc);
@@ -2908,7 +2938,7 @@
 		goto out_free;
 	}
 
-	sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
+	sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
 
 	if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
 	    !packet_extra_vlan_len_allowed(dev, skb)) {
@@ -3521,6 +3551,7 @@
 	i->ifindex = mreq->mr_ifindex;
 	i->alen = mreq->mr_alen;
 	memcpy(i->addr, mreq->mr_address, i->alen);
+	memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
 	i->count = 1;
 	i->next = po->mclist;
 	po->mclist = i;
@@ -4151,7 +4182,7 @@
 
 	/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
 	if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
-		WARN(1, "Tx-ring is not supported.\n");
+		net_warn_ratelimited("Tx-ring is not supported.\n");
 		goto out;
 	}
 
diff --git a/net/qrtr/Kconfig b/net/qrtr/Kconfig
new file mode 100644
index 0000000..b83c680
--- /dev/null
+++ b/net/qrtr/Kconfig
@@ -0,0 +1,24 @@
+# Qualcomm IPC Router configuration
+#
+
+config QRTR
+	tristate "Qualcomm IPC Router support"
+	depends on ARCH_QCOM || COMPILE_TEST
+	---help---
+	  Say Y if you intend to use Qualcomm IPC router protocol.  The
+	  protocol is used to communicate with services provided by other
+	  hardware blocks in the system.
+
+	  In order to do service lookups, a userspace daemon is required to
+	  maintain a service listing.
+
+if QRTR
+
+config QRTR_SMD
+	tristate "SMD IPC Router channels"
+	depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n)
+	---help---
+	  Say Y here to support SMD based ipcrouter channels.  SMD is the
+	  most common transport for IPC Router.
+
+endif # QRTR
diff --git a/net/qrtr/Makefile b/net/qrtr/Makefile
new file mode 100644
index 0000000..ab09e40
--- /dev/null
+++ b/net/qrtr/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_QRTR) := qrtr.o
+
+obj-$(CONFIG_QRTR_SMD) += qrtr-smd.o
+qrtr-smd-y	:= smd.o
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
new file mode 100644
index 0000000..c985ecb
--- /dev/null
+++ b/net/qrtr/qrtr.c
@@ -0,0 +1,1007 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/qrtr.h>
+#include <linux/termios.h>	/* For TIOCINQ/OUTQ */
+
+#include <net/sock.h>
+
+#include "qrtr.h"
+
+#define QRTR_PROTO_VER 1
+
+/* auto-bind range */
+#define QRTR_MIN_EPH_SOCKET 0x4000
+#define QRTR_MAX_EPH_SOCKET 0x7fff
+
+enum qrtr_pkt_type {
+	QRTR_TYPE_DATA		= 1,
+	QRTR_TYPE_HELLO		= 2,
+	QRTR_TYPE_BYE		= 3,
+	QRTR_TYPE_NEW_SERVER	= 4,
+	QRTR_TYPE_DEL_SERVER	= 5,
+	QRTR_TYPE_DEL_CLIENT	= 6,
+	QRTR_TYPE_RESUME_TX	= 7,
+	QRTR_TYPE_EXIT		= 8,
+	QRTR_TYPE_PING		= 9,
+};
+
+/**
+ * struct qrtr_hdr - (I|R)PCrouter packet header
+ * @version: protocol version
+ * @type: packet type; one of QRTR_TYPE_*
+ * @src_node_id: source node
+ * @src_port_id: source port
+ * @confirm_rx: boolean; whether a resume-tx packet should be send in reply
+ * @size: length of packet, excluding this header
+ * @dst_node_id: destination node
+ * @dst_port_id: destination port
+ */
+struct qrtr_hdr {
+	__le32 version;
+	__le32 type;
+	__le32 src_node_id;
+	__le32 src_port_id;
+	__le32 confirm_rx;
+	__le32 size;
+	__le32 dst_node_id;
+	__le32 dst_port_id;
+} __packed;
+
+#define QRTR_HDR_SIZE sizeof(struct qrtr_hdr)
+#define QRTR_NODE_BCAST ((unsigned int)-1)
+#define QRTR_PORT_CTRL ((unsigned int)-2)
+
+struct qrtr_sock {
+	/* WARNING: sk must be the first member */
+	struct sock sk;
+	struct sockaddr_qrtr us;
+	struct sockaddr_qrtr peer;
+};
+
+static inline struct qrtr_sock *qrtr_sk(struct sock *sk)
+{
+	BUILD_BUG_ON(offsetof(struct qrtr_sock, sk) != 0);
+	return container_of(sk, struct qrtr_sock, sk);
+}
+
+static unsigned int qrtr_local_nid = -1;
+
+/* for node ids */
+static RADIX_TREE(qrtr_nodes, GFP_KERNEL);
+/* broadcast list */
+static LIST_HEAD(qrtr_all_nodes);
+/* lock for qrtr_nodes, qrtr_all_nodes and node reference */
+static DEFINE_MUTEX(qrtr_node_lock);
+
+/* local port allocation management */
+static DEFINE_IDR(qrtr_ports);
+static DEFINE_MUTEX(qrtr_port_lock);
+
+/**
+ * struct qrtr_node - endpoint node
+ * @ep_lock: lock for endpoint management and callbacks
+ * @ep: endpoint
+ * @ref: reference count for node
+ * @nid: node id
+ * @rx_queue: receive queue
+ * @work: scheduled work struct for recv work
+ * @item: list item for broadcast list
+ */
+struct qrtr_node {
+	struct mutex ep_lock;
+	struct qrtr_endpoint *ep;
+	struct kref ref;
+	unsigned int nid;
+
+	struct sk_buff_head rx_queue;
+	struct work_struct work;
+	struct list_head item;
+};
+
+/* Release node resources and free the node.
+ *
+ * Do not call directly, use qrtr_node_release.  To be used with
+ * kref_put_mutex.  As such, the node mutex is expected to be locked on call.
+ */
+static void __qrtr_node_release(struct kref *kref)
+{
+	struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
+
+	if (node->nid != QRTR_EP_NID_AUTO)
+		radix_tree_delete(&qrtr_nodes, node->nid);
+
+	list_del(&node->item);
+	mutex_unlock(&qrtr_node_lock);
+
+	skb_queue_purge(&node->rx_queue);
+	kfree(node);
+}
+
+/* Increment reference to node. */
+static struct qrtr_node *qrtr_node_acquire(struct qrtr_node *node)
+{
+	if (node)
+		kref_get(&node->ref);
+	return node;
+}
+
+/* Decrement reference to node and release as necessary. */
+static void qrtr_node_release(struct qrtr_node *node)
+{
+	if (!node)
+		return;
+	kref_put_mutex(&node->ref, __qrtr_node_release, &qrtr_node_lock);
+}
+
+/* Pass an outgoing packet socket buffer to the endpoint driver. */
+static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb)
+{
+	int rc = -ENODEV;
+
+	mutex_lock(&node->ep_lock);
+	if (node->ep)
+		rc = node->ep->xmit(node->ep, skb);
+	else
+		kfree_skb(skb);
+	mutex_unlock(&node->ep_lock);
+
+	return rc;
+}
+
+/* Lookup node by id.
+ *
+ * callers must release with qrtr_node_release()
+ */
+static struct qrtr_node *qrtr_node_lookup(unsigned int nid)
+{
+	struct qrtr_node *node;
+
+	mutex_lock(&qrtr_node_lock);
+	node = radix_tree_lookup(&qrtr_nodes, nid);
+	node = qrtr_node_acquire(node);
+	mutex_unlock(&qrtr_node_lock);
+
+	return node;
+}
+
+/* Assign node id to node.
+ *
+ * This is mostly useful for automatic node id assignment, based on
+ * the source id in the incoming packet.
+ */
+static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
+{
+	if (node->nid != QRTR_EP_NID_AUTO || nid == QRTR_EP_NID_AUTO)
+		return;
+
+	mutex_lock(&qrtr_node_lock);
+	radix_tree_insert(&qrtr_nodes, nid, node);
+	node->nid = nid;
+	mutex_unlock(&qrtr_node_lock);
+}
+
+/**
+ * qrtr_endpoint_post() - post incoming data
+ * @ep: endpoint handle
+ * @data: data pointer
+ * @len: size of data in bytes
+ *
+ * Return: 0 on success; negative error code on failure
+ */
+int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
+{
+	struct qrtr_node *node = ep->node;
+	const struct qrtr_hdr *phdr = data;
+	struct sk_buff *skb;
+	unsigned int psize;
+	unsigned int size;
+	unsigned int type;
+	unsigned int ver;
+	unsigned int dst;
+
+	if (len < QRTR_HDR_SIZE || len & 3)
+		return -EINVAL;
+
+	ver = le32_to_cpu(phdr->version);
+	size = le32_to_cpu(phdr->size);
+	type = le32_to_cpu(phdr->type);
+	dst = le32_to_cpu(phdr->dst_port_id);
+
+	psize = (size + 3) & ~3;
+
+	if (ver != QRTR_PROTO_VER)
+		return -EINVAL;
+
+	if (len != psize + QRTR_HDR_SIZE)
+		return -EINVAL;
+
+	if (dst != QRTR_PORT_CTRL && type != QRTR_TYPE_DATA)
+		return -EINVAL;
+
+	skb = netdev_alloc_skb(NULL, len);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_reset_transport_header(skb);
+	memcpy(skb_put(skb, len), data, len);
+
+	skb_queue_tail(&node->rx_queue, skb);
+	schedule_work(&node->work);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qrtr_endpoint_post);
+
+/* Allocate and construct a resume-tx packet. */
+static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
+					    u32 dst_node, u32 port)
+{
+	const int pkt_len = 20;
+	struct qrtr_hdr *hdr;
+	struct sk_buff *skb;
+	u32 *buf;
+
+	skb = alloc_skb(QRTR_HDR_SIZE + pkt_len, GFP_KERNEL);
+	if (!skb)
+		return NULL;
+	skb_reset_transport_header(skb);
+
+	hdr = (struct qrtr_hdr *)skb_put(skb, QRTR_HDR_SIZE);
+	hdr->version = cpu_to_le32(QRTR_PROTO_VER);
+	hdr->type = cpu_to_le32(QRTR_TYPE_RESUME_TX);
+	hdr->src_node_id = cpu_to_le32(src_node);
+	hdr->src_port_id = cpu_to_le32(QRTR_PORT_CTRL);
+	hdr->confirm_rx = cpu_to_le32(0);
+	hdr->size = cpu_to_le32(pkt_len);
+	hdr->dst_node_id = cpu_to_le32(dst_node);
+	hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
+
+	buf = (u32 *)skb_put(skb, pkt_len);
+	memset(buf, 0, pkt_len);
+	buf[0] = cpu_to_le32(QRTR_TYPE_RESUME_TX);
+	buf[1] = cpu_to_le32(src_node);
+	buf[2] = cpu_to_le32(port);
+
+	return skb;
+}
+
+static struct qrtr_sock *qrtr_port_lookup(int port);
+static void qrtr_port_put(struct qrtr_sock *ipc);
+
+/* Handle and route a received packet.
+ *
+ * This will auto-reply with resume-tx packet as necessary.
+ */
+static void qrtr_node_rx_work(struct work_struct *work)
+{
+	struct qrtr_node *node = container_of(work, struct qrtr_node, work);
+	struct sk_buff *skb;
+
+	while ((skb = skb_dequeue(&node->rx_queue)) != NULL) {
+		const struct qrtr_hdr *phdr;
+		u32 dst_node, dst_port;
+		struct qrtr_sock *ipc;
+		u32 src_node;
+		int confirm;
+
+		phdr = (const struct qrtr_hdr *)skb_transport_header(skb);
+		src_node = le32_to_cpu(phdr->src_node_id);
+		dst_node = le32_to_cpu(phdr->dst_node_id);
+		dst_port = le32_to_cpu(phdr->dst_port_id);
+		confirm = !!phdr->confirm_rx;
+
+		qrtr_node_assign(node, src_node);
+
+		ipc = qrtr_port_lookup(dst_port);
+		if (!ipc) {
+			kfree_skb(skb);
+		} else {
+			if (sock_queue_rcv_skb(&ipc->sk, skb))
+				kfree_skb(skb);
+
+			qrtr_port_put(ipc);
+		}
+
+		if (confirm) {
+			skb = qrtr_alloc_resume_tx(dst_node, node->nid, dst_port);
+			if (!skb)
+				break;
+			if (qrtr_node_enqueue(node, skb))
+				break;
+		}
+	}
+}
+
+/**
+ * qrtr_endpoint_register() - register a new endpoint
+ * @ep: endpoint to register
+ * @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment
+ * Return: 0 on success; negative error code on failure
+ *
+ * The specified endpoint must have the xmit function pointer set on call.
+ */
+int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid)
+{
+	struct qrtr_node *node;
+
+	if (!ep || !ep->xmit)
+		return -EINVAL;
+
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	INIT_WORK(&node->work, qrtr_node_rx_work);
+	kref_init(&node->ref);
+	mutex_init(&node->ep_lock);
+	skb_queue_head_init(&node->rx_queue);
+	node->nid = QRTR_EP_NID_AUTO;
+	node->ep = ep;
+
+	qrtr_node_assign(node, nid);
+
+	mutex_lock(&qrtr_node_lock);
+	list_add(&node->item, &qrtr_all_nodes);
+	mutex_unlock(&qrtr_node_lock);
+	ep->node = node;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qrtr_endpoint_register);
+
+/**
+ * qrtr_endpoint_unregister - unregister endpoint
+ * @ep: endpoint to unregister
+ */
+void qrtr_endpoint_unregister(struct qrtr_endpoint *ep)
+{
+	struct qrtr_node *node = ep->node;
+
+	mutex_lock(&node->ep_lock);
+	node->ep = NULL;
+	mutex_unlock(&node->ep_lock);
+
+	qrtr_node_release(node);
+	ep->node = NULL;
+}
+EXPORT_SYMBOL_GPL(qrtr_endpoint_unregister);
+
+/* Lookup socket by port.
+ *
+ * Callers must release with qrtr_port_put()
+ */
+static struct qrtr_sock *qrtr_port_lookup(int port)
+{
+	struct qrtr_sock *ipc;
+
+	if (port == QRTR_PORT_CTRL)
+		port = 0;
+
+	mutex_lock(&qrtr_port_lock);
+	ipc = idr_find(&qrtr_ports, port);
+	if (ipc)
+		sock_hold(&ipc->sk);
+	mutex_unlock(&qrtr_port_lock);
+
+	return ipc;
+}
+
+/* Release acquired socket. */
+static void qrtr_port_put(struct qrtr_sock *ipc)
+{
+	sock_put(&ipc->sk);
+}
+
+/* Remove port assignment. */
+static void qrtr_port_remove(struct qrtr_sock *ipc)
+{
+	int port = ipc->us.sq_port;
+
+	if (port == QRTR_PORT_CTRL)
+		port = 0;
+
+	__sock_put(&ipc->sk);
+
+	mutex_lock(&qrtr_port_lock);
+	idr_remove(&qrtr_ports, port);
+	mutex_unlock(&qrtr_port_lock);
+}
+
+/* Assign port number to socket.
+ *
+ * Specify port in the integer pointed to by port, and it will be adjusted
+ * on return as necesssary.
+ *
+ * Port may be:
+ *   0: Assign ephemeral port in [QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET]
+ *   <QRTR_MIN_EPH_SOCKET: Specified; requires CAP_NET_ADMIN
+ *   >QRTR_MIN_EPH_SOCKET: Specified; available to all
+ */
+static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
+{
+	int rc;
+
+	mutex_lock(&qrtr_port_lock);
+	if (!*port) {
+		rc = idr_alloc(&qrtr_ports, ipc,
+			       QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1,
+			       GFP_ATOMIC);
+		if (rc >= 0)
+			*port = rc;
+	} else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
+		rc = -EACCES;
+	} else if (*port == QRTR_PORT_CTRL) {
+		rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC);
+	} else {
+		rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC);
+		if (rc >= 0)
+			*port = rc;
+	}
+	mutex_unlock(&qrtr_port_lock);
+
+	if (rc == -ENOSPC)
+		return -EADDRINUSE;
+	else if (rc < 0)
+		return rc;
+
+	sock_hold(&ipc->sk);
+
+	return 0;
+}
+
+/* Bind socket to address.
+ *
+ * Socket should be locked upon call.
+ */
+static int __qrtr_bind(struct socket *sock,
+		       const struct sockaddr_qrtr *addr, int zapped)
+{
+	struct qrtr_sock *ipc = qrtr_sk(sock->sk);
+	struct sock *sk = sock->sk;
+	int port;
+	int rc;
+
+	/* rebinding ok */
+	if (!zapped && addr->sq_port == ipc->us.sq_port)
+		return 0;
+
+	port = addr->sq_port;
+	rc = qrtr_port_assign(ipc, &port);
+	if (rc)
+		return rc;
+
+	/* unbind previous, if any */
+	if (!zapped)
+		qrtr_port_remove(ipc);
+	ipc->us.sq_port = port;
+
+	sock_reset_flag(sk, SOCK_ZAPPED);
+
+	return 0;
+}
+
+/* Auto bind to an ephemeral port. */
+static int qrtr_autobind(struct socket *sock)
+{
+	struct sock *sk = sock->sk;
+	struct sockaddr_qrtr addr;
+
+	if (!sock_flag(sk, SOCK_ZAPPED))
+		return 0;
+
+	addr.sq_family = AF_QIPCRTR;
+	addr.sq_node = qrtr_local_nid;
+	addr.sq_port = 0;
+
+	return __qrtr_bind(sock, &addr, 1);
+}
+
+/* Bind socket to specified sockaddr. */
+static int qrtr_bind(struct socket *sock, struct sockaddr *saddr, int len)
+{
+	DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
+	struct qrtr_sock *ipc = qrtr_sk(sock->sk);
+	struct sock *sk = sock->sk;
+	int rc;
+
+	if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
+		return -EINVAL;
+
+	if (addr->sq_node != ipc->us.sq_node)
+		return -EINVAL;
+
+	lock_sock(sk);
+	rc = __qrtr_bind(sock, addr, sock_flag(sk, SOCK_ZAPPED));
+	release_sock(sk);
+
+	return rc;
+}
+
+/* Queue packet to local peer socket. */
+static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb)
+{
+	const struct qrtr_hdr *phdr;
+	struct qrtr_sock *ipc;
+
+	phdr = (const struct qrtr_hdr *)skb_transport_header(skb);
+
+	ipc = qrtr_port_lookup(le32_to_cpu(phdr->dst_port_id));
+	if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
+		kfree_skb(skb);
+		return -ENODEV;
+	}
+
+	if (sock_queue_rcv_skb(&ipc->sk, skb)) {
+		qrtr_port_put(ipc);
+		kfree_skb(skb);
+		return -ENOSPC;
+	}
+
+	qrtr_port_put(ipc);
+
+	return 0;
+}
+
+/* Queue packet for broadcast. */
+static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb)
+{
+	struct sk_buff *skbn;
+
+	mutex_lock(&qrtr_node_lock);
+	list_for_each_entry(node, &qrtr_all_nodes, item) {
+		skbn = skb_clone(skb, GFP_KERNEL);
+		if (!skbn)
+			break;
+		skb_set_owner_w(skbn, skb->sk);
+		qrtr_node_enqueue(node, skbn);
+	}
+	mutex_unlock(&qrtr_node_lock);
+
+	qrtr_local_enqueue(node, skb);
+
+	return 0;
+}
+
+static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+{
+	DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
+	int (*enqueue_fn)(struct qrtr_node *, struct sk_buff *);
+	struct qrtr_sock *ipc = qrtr_sk(sock->sk);
+	struct sock *sk = sock->sk;
+	struct qrtr_node *node;
+	struct qrtr_hdr *hdr;
+	struct sk_buff *skb;
+	size_t plen;
+	int rc;
+
+	if (msg->msg_flags & ~(MSG_DONTWAIT))
+		return -EINVAL;
+
+	if (len > 65535)
+		return -EMSGSIZE;
+
+	lock_sock(sk);
+
+	if (addr) {
+		if (msg->msg_namelen < sizeof(*addr)) {
+			release_sock(sk);
+			return -EINVAL;
+		}
+
+		if (addr->sq_family != AF_QIPCRTR) {
+			release_sock(sk);
+			return -EINVAL;
+		}
+
+		rc = qrtr_autobind(sock);
+		if (rc) {
+			release_sock(sk);
+			return rc;
+		}
+	} else if (sk->sk_state == TCP_ESTABLISHED) {
+		addr = &ipc->peer;
+	} else {
+		release_sock(sk);
+		return -ENOTCONN;
+	}
+
+	node = NULL;
+	if (addr->sq_node == QRTR_NODE_BCAST) {
+		enqueue_fn = qrtr_bcast_enqueue;
+	} else if (addr->sq_node == ipc->us.sq_node) {
+		enqueue_fn = qrtr_local_enqueue;
+	} else {
+		enqueue_fn = qrtr_node_enqueue;
+		node = qrtr_node_lookup(addr->sq_node);
+		if (!node) {
+			release_sock(sk);
+			return -ECONNRESET;
+		}
+	}
+
+	plen = (len + 3) & ~3;
+	skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_SIZE,
+				  msg->msg_flags & MSG_DONTWAIT, &rc);
+	if (!skb)
+		goto out_node;
+
+	skb_reset_transport_header(skb);
+	skb_put(skb, len + QRTR_HDR_SIZE);
+
+	hdr = (struct qrtr_hdr *)skb_transport_header(skb);
+	hdr->version = cpu_to_le32(QRTR_PROTO_VER);
+	hdr->src_node_id = cpu_to_le32(ipc->us.sq_node);
+	hdr->src_port_id = cpu_to_le32(ipc->us.sq_port);
+	hdr->confirm_rx = cpu_to_le32(0);
+	hdr->size = cpu_to_le32(len);
+	hdr->dst_node_id = cpu_to_le32(addr->sq_node);
+	hdr->dst_port_id = cpu_to_le32(addr->sq_port);
+
+	rc = skb_copy_datagram_from_iter(skb, QRTR_HDR_SIZE,
+					 &msg->msg_iter, len);
+	if (rc) {
+		kfree_skb(skb);
+		goto out_node;
+	}
+
+	if (plen != len) {
+		skb_pad(skb, plen - len);
+		skb_put(skb, plen - len);
+	}
+
+	if (ipc->us.sq_port == QRTR_PORT_CTRL) {
+		if (len < 4) {
+			rc = -EINVAL;
+			kfree_skb(skb);
+			goto out_node;
+		}
+
+		/* control messages already require the type as 'command' */
+		skb_copy_bits(skb, QRTR_HDR_SIZE, &hdr->type, 4);
+	} else {
+		hdr->type = cpu_to_le32(QRTR_TYPE_DATA);
+	}
+
+	rc = enqueue_fn(node, skb);
+	if (rc >= 0)
+		rc = len;
+
+out_node:
+	qrtr_node_release(node);
+	release_sock(sk);
+
+	return rc;
+}
+
+static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
+			size_t size, int flags)
+{
+	DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
+	const struct qrtr_hdr *phdr;
+	struct sock *sk = sock->sk;
+	struct sk_buff *skb;
+	int copied, rc;
+
+	lock_sock(sk);
+
+	if (sock_flag(sk, SOCK_ZAPPED)) {
+		release_sock(sk);
+		return -EADDRNOTAVAIL;
+	}
+
+	skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+				flags & MSG_DONTWAIT, &rc);
+	if (!skb) {
+		release_sock(sk);
+		return rc;
+	}
+
+	phdr = (const struct qrtr_hdr *)skb_transport_header(skb);
+	copied = le32_to_cpu(phdr->size);
+	if (copied > size) {
+		copied = size;
+		msg->msg_flags |= MSG_TRUNC;
+	}
+
+	rc = skb_copy_datagram_msg(skb, QRTR_HDR_SIZE, msg, copied);
+	if (rc < 0)
+		goto out;
+	rc = copied;
+
+	if (addr) {
+		addr->sq_family = AF_QIPCRTR;
+		addr->sq_node = le32_to_cpu(phdr->src_node_id);
+		addr->sq_port = le32_to_cpu(phdr->src_port_id);
+		msg->msg_namelen = sizeof(*addr);
+	}
+
+out:
+	skb_free_datagram(sk, skb);
+	release_sock(sk);
+
+	return rc;
+}
+
+static int qrtr_connect(struct socket *sock, struct sockaddr *saddr,
+			int len, int flags)
+{
+	DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
+	struct qrtr_sock *ipc = qrtr_sk(sock->sk);
+	struct sock *sk = sock->sk;
+	int rc;
+
+	if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
+		return -EINVAL;
+
+	lock_sock(sk);
+
+	sk->sk_state = TCP_CLOSE;
+	sock->state = SS_UNCONNECTED;
+
+	rc = qrtr_autobind(sock);
+	if (rc) {
+		release_sock(sk);
+		return rc;
+	}
+
+	ipc->peer = *addr;
+	sock->state = SS_CONNECTED;
+	sk->sk_state = TCP_ESTABLISHED;
+
+	release_sock(sk);
+
+	return 0;
+}
+
+static int qrtr_getname(struct socket *sock, struct sockaddr *saddr,
+			int *len, int peer)
+{
+	struct qrtr_sock *ipc = qrtr_sk(sock->sk);
+	struct sockaddr_qrtr qaddr;
+	struct sock *sk = sock->sk;
+
+	lock_sock(sk);
+	if (peer) {
+		if (sk->sk_state != TCP_ESTABLISHED) {
+			release_sock(sk);
+			return -ENOTCONN;
+		}
+
+		qaddr = ipc->peer;
+	} else {
+		qaddr = ipc->us;
+	}
+	release_sock(sk);
+
+	*len = sizeof(qaddr);
+	qaddr.sq_family = AF_QIPCRTR;
+
+	memcpy(saddr, &qaddr, sizeof(qaddr));
+
+	return 0;
+}
+
+static int qrtr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+	void __user *argp = (void __user *)arg;
+	struct qrtr_sock *ipc = qrtr_sk(sock->sk);
+	struct sock *sk = sock->sk;
+	struct sockaddr_qrtr *sq;
+	struct sk_buff *skb;
+	struct ifreq ifr;
+	long len = 0;
+	int rc = 0;
+
+	lock_sock(sk);
+
+	switch (cmd) {
+	case TIOCOUTQ:
+		len = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
+		if (len < 0)
+			len = 0;
+		rc = put_user(len, (int __user *)argp);
+		break;
+	case TIOCINQ:
+		skb = skb_peek(&sk->sk_receive_queue);
+		if (skb)
+			len = skb->len - QRTR_HDR_SIZE;
+		rc = put_user(len, (int __user *)argp);
+		break;
+	case SIOCGIFADDR:
+		if (copy_from_user(&ifr, argp, sizeof(ifr))) {
+			rc = -EFAULT;
+			break;
+		}
+
+		sq = (struct sockaddr_qrtr *)&ifr.ifr_addr;
+		*sq = ipc->us;
+		if (copy_to_user(argp, &ifr, sizeof(ifr))) {
+			rc = -EFAULT;
+			break;
+		}
+		break;
+	case SIOCGSTAMP:
+		rc = sock_get_timestamp(sk, argp);
+		break;
+	case SIOCADDRT:
+	case SIOCDELRT:
+	case SIOCSIFADDR:
+	case SIOCGIFDSTADDR:
+	case SIOCSIFDSTADDR:
+	case SIOCGIFBRDADDR:
+	case SIOCSIFBRDADDR:
+	case SIOCGIFNETMASK:
+	case SIOCSIFNETMASK:
+		rc = -EINVAL;
+		break;
+	default:
+		rc = -ENOIOCTLCMD;
+		break;
+	}
+
+	release_sock(sk);
+
+	return rc;
+}
+
+static int qrtr_release(struct socket *sock)
+{
+	struct sock *sk = sock->sk;
+	struct qrtr_sock *ipc;
+
+	if (!sk)
+		return 0;
+
+	lock_sock(sk);
+
+	ipc = qrtr_sk(sk);
+	sk->sk_shutdown = SHUTDOWN_MASK;
+	if (!sock_flag(sk, SOCK_DEAD))
+		sk->sk_state_change(sk);
+
+	sock_set_flag(sk, SOCK_DEAD);
+	sock->sk = NULL;
+
+	if (!sock_flag(sk, SOCK_ZAPPED))
+		qrtr_port_remove(ipc);
+
+	skb_queue_purge(&sk->sk_receive_queue);
+
+	release_sock(sk);
+	sock_put(sk);
+
+	return 0;
+}
+
+static const struct proto_ops qrtr_proto_ops = {
+	.owner		= THIS_MODULE,
+	.family		= AF_QIPCRTR,
+	.bind		= qrtr_bind,
+	.connect	= qrtr_connect,
+	.socketpair	= sock_no_socketpair,
+	.accept		= sock_no_accept,
+	.listen		= sock_no_listen,
+	.sendmsg	= qrtr_sendmsg,
+	.recvmsg	= qrtr_recvmsg,
+	.getname	= qrtr_getname,
+	.ioctl		= qrtr_ioctl,
+	.poll		= datagram_poll,
+	.shutdown	= sock_no_shutdown,
+	.setsockopt	= sock_no_setsockopt,
+	.getsockopt	= sock_no_getsockopt,
+	.release	= qrtr_release,
+	.mmap		= sock_no_mmap,
+	.sendpage	= sock_no_sendpage,
+};
+
+static struct proto qrtr_proto = {
+	.name		= "QIPCRTR",
+	.owner		= THIS_MODULE,
+	.obj_size	= sizeof(struct qrtr_sock),
+};
+
+static int qrtr_create(struct net *net, struct socket *sock,
+		       int protocol, int kern)
+{
+	struct qrtr_sock *ipc;
+	struct sock *sk;
+
+	if (sock->type != SOCK_DGRAM)
+		return -EPROTOTYPE;
+
+	sk = sk_alloc(net, AF_QIPCRTR, GFP_KERNEL, &qrtr_proto, kern);
+	if (!sk)
+		return -ENOMEM;
+
+	sock_set_flag(sk, SOCK_ZAPPED);
+
+	sock_init_data(sock, sk);
+	sock->ops = &qrtr_proto_ops;
+
+	ipc = qrtr_sk(sk);
+	ipc->us.sq_family = AF_QIPCRTR;
+	ipc->us.sq_node = qrtr_local_nid;
+	ipc->us.sq_port = 0;
+
+	return 0;
+}
+
+static const struct nla_policy qrtr_policy[IFA_MAX + 1] = {
+	[IFA_LOCAL] = { .type = NLA_U32 },
+};
+
+static int qrtr_addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+	struct nlattr *tb[IFA_MAX + 1];
+	struct ifaddrmsg *ifm;
+	int rc;
+
+	if (!netlink_capable(skb, CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!netlink_capable(skb, CAP_SYS_ADMIN))
+		return -EPERM;
+
+	ASSERT_RTNL();
+
+	rc = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, qrtr_policy);
+	if (rc < 0)
+		return rc;
+
+	ifm = nlmsg_data(nlh);
+	if (!tb[IFA_LOCAL])
+		return -EINVAL;
+
+	qrtr_local_nid = nla_get_u32(tb[IFA_LOCAL]);
+	return 0;
+}
+
+static const struct net_proto_family qrtr_family = {
+	.owner	= THIS_MODULE,
+	.family	= AF_QIPCRTR,
+	.create	= qrtr_create,
+};
+
+static int __init qrtr_proto_init(void)
+{
+	int rc;
+
+	rc = proto_register(&qrtr_proto, 1);
+	if (rc)
+		return rc;
+
+	rc = sock_register(&qrtr_family);
+	if (rc) {
+		proto_unregister(&qrtr_proto);
+		return rc;
+	}
+
+	rtnl_register(PF_QIPCRTR, RTM_NEWADDR, qrtr_addr_doit, NULL, NULL);
+
+	return 0;
+}
+module_init(qrtr_proto_init);
+
+static void __exit qrtr_proto_fini(void)
+{
+	rtnl_unregister(PF_QIPCRTR, RTM_NEWADDR);
+	sock_unregister(qrtr_family.family);
+	proto_unregister(&qrtr_proto);
+}
+module_exit(qrtr_proto_fini);
+
+MODULE_DESCRIPTION("Qualcomm IPC-router driver");
+MODULE_LICENSE("GPL v2");
diff --git a/net/qrtr/qrtr.h b/net/qrtr/qrtr.h
new file mode 100644
index 0000000..2b84871
--- /dev/null
+++ b/net/qrtr/qrtr.h
@@ -0,0 +1,31 @@
+#ifndef __QRTR_H_
+#define __QRTR_H_
+
+#include <linux/types.h>
+
+struct sk_buff;
+
+/* endpoint node id auto assignment */
+#define QRTR_EP_NID_AUTO (-1)
+
+/**
+ * struct qrtr_endpoint - endpoint handle
+ * @xmit: Callback for outgoing packets
+ *
+ * The socket buffer passed to the xmit function becomes owned by the endpoint
+ * driver.  As such, when the driver is done with the buffer, it should
+ * call kfree_skb() on failure, or consume_skb() on success.
+ */
+struct qrtr_endpoint {
+	int (*xmit)(struct qrtr_endpoint *ep, struct sk_buff *skb);
+	/* private: not for endpoint use */
+	struct qrtr_node *node;
+};
+
+int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid);
+
+void qrtr_endpoint_unregister(struct qrtr_endpoint *ep);
+
+int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len);
+
+#endif
diff --git a/net/qrtr/smd.c b/net/qrtr/smd.c
new file mode 100644
index 0000000..0d11132
--- /dev/null
+++ b/net/qrtr/smd.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/soc/qcom/smd.h>
+
+#include "qrtr.h"
+
+struct qrtr_smd_dev {
+	struct qrtr_endpoint ep;
+	struct qcom_smd_channel *channel;
+	struct device *dev;
+};
+
+/* from smd to qrtr */
+static int qcom_smd_qrtr_callback(struct qcom_smd_channel *channel,
+				  const void *data, size_t len)
+{
+	struct qrtr_smd_dev *qdev = qcom_smd_get_drvdata(channel);
+	int rc;
+
+	if (!qdev)
+		return -EAGAIN;
+
+	rc = qrtr_endpoint_post(&qdev->ep, data, len);
+	if (rc == -EINVAL) {
+		dev_err(qdev->dev, "invalid ipcrouter packet\n");
+		/* return 0 to let smd drop the packet */
+		rc = 0;
+	}
+
+	return rc;
+}
+
+/* from qrtr to smd */
+static int qcom_smd_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
+{
+	struct qrtr_smd_dev *qdev = container_of(ep, struct qrtr_smd_dev, ep);
+	int rc;
+
+	rc = skb_linearize(skb);
+	if (rc)
+		goto out;
+
+	rc = qcom_smd_send(qdev->channel, skb->data, skb->len);
+
+out:
+	if (rc)
+		kfree_skb(skb);
+	else
+		consume_skb(skb);
+	return rc;
+}
+
+static int qcom_smd_qrtr_probe(struct qcom_smd_device *sdev)
+{
+	struct qrtr_smd_dev *qdev;
+	int rc;
+
+	qdev = devm_kzalloc(&sdev->dev, sizeof(*qdev), GFP_KERNEL);
+	if (!qdev)
+		return -ENOMEM;
+
+	qdev->channel = sdev->channel;
+	qdev->dev = &sdev->dev;
+	qdev->ep.xmit = qcom_smd_qrtr_send;
+
+	rc = qrtr_endpoint_register(&qdev->ep, QRTR_EP_NID_AUTO);
+	if (rc)
+		return rc;
+
+	qcom_smd_set_drvdata(sdev->channel, qdev);
+	dev_set_drvdata(&sdev->dev, qdev);
+
+	dev_dbg(&sdev->dev, "Qualcomm SMD QRTR driver probed\n");
+
+	return 0;
+}
+
+static void qcom_smd_qrtr_remove(struct qcom_smd_device *sdev)
+{
+	struct qrtr_smd_dev *qdev = dev_get_drvdata(&sdev->dev);
+
+	qrtr_endpoint_unregister(&qdev->ep);
+
+	dev_set_drvdata(&sdev->dev, NULL);
+}
+
+static const struct qcom_smd_id qcom_smd_qrtr_smd_match[] = {
+	{ "IPCRTR" },
+	{}
+};
+
+static struct qcom_smd_driver qcom_smd_qrtr_driver = {
+	.probe = qcom_smd_qrtr_probe,
+	.remove = qcom_smd_qrtr_remove,
+	.callback = qcom_smd_qrtr_callback,
+	.smd_match_table = qcom_smd_qrtr_smd_match,
+	.driver = {
+		.name = "qcom_smd_qrtr",
+		.owner = THIS_MODULE,
+	},
+};
+
+module_qcom_smd_driver(qcom_smd_qrtr_driver);
+
+MODULE_DESCRIPTION("Qualcomm IPC-Router SMD interface driver");
+MODULE_LICENSE("GPL v2");
diff --git a/net/rds/cong.c b/net/rds/cong.c
index e6144b8..6641bcf 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -299,7 +299,7 @@
 	i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
 	off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
 
-	__set_bit_le(off, (void *)map->m_page_addrs[i]);
+	set_bit_le(off, (void *)map->m_page_addrs[i]);
 }
 
 void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
@@ -313,7 +313,7 @@
 	i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
 	off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
 
-	__clear_bit_le(off, (void *)map->m_page_addrs[i]);
+	clear_bit_le(off, (void *)map->m_page_addrs[i]);
 }
 
 static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 8764970..310cabc 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -194,7 +194,7 @@
 		dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version);
 		dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version);
 		dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
-		dp->dp_ack_seq = rds_ib_piggyb_ack(ic);
+		dp->dp_ack_seq = cpu_to_be64(rds_ib_piggyb_ack(ic));
 
 		/* Advertise flow control */
 		if (ic->i_flowctl) {
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 977fb86..abc8cc8 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -796,7 +796,7 @@
 
 		addr = kmap_atomic(sg_page(&frag->f_sg));
 
-		src = addr + frag_off;
+		src = addr + frag->f_sg.offset + frag_off;
 		dst = (void *)map->m_page_addrs[map_page] + map_off;
 		for (k = 0; k < to_copy; k += 8) {
 			/* Record ports that became uncongested, ie
diff --git a/net/rds/page.c b/net/rds/page.c
index 616f21f..e2b5a58 100644
--- a/net/rds/page.c
+++ b/net/rds/page.c
@@ -135,8 +135,8 @@
 			if (rem->r_offset != 0)
 				rds_stats_inc(s_page_remainder_hit);
 
-			rem->r_offset += bytes;
-			if (rem->r_offset == PAGE_SIZE) {
+			rem->r_offset += ALIGN(bytes, 8);
+			if (rem->r_offset >= PAGE_SIZE) {
 				__free_page(rem->r_page);
 				rem->r_page = NULL;
 			}
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 61ed2a8..86187da 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -127,7 +127,7 @@
 
 /*
  * This is the only path that sets tc->t_sock.  Send and receive trust that
- * it is set.  The RDS_CONN_CONNECTED bit protects those paths from being
+ * it is set.  The RDS_CONN_UP bit protects those paths from being
  * called while it isn't set.
  */
 void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn)
@@ -216,6 +216,7 @@
 	if (!tc)
 		return -ENOMEM;
 
+	mutex_init(&tc->t_conn_lock);
 	tc->t_sock = NULL;
 	tc->t_tinc = NULL;
 	tc->t_tinc_hdr_rem = sizeof(struct rds_header);
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 64f873c..41c2283 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -12,6 +12,10 @@
 
 	struct list_head	t_tcp_node;
 	struct rds_connection   *conn;
+	/* t_conn_lock synchronizes the connection establishment between
+	 * rds_tcp_accept_one and rds_tcp_conn_connect
+	 */
+	struct mutex		t_conn_lock;
 	struct socket		*t_sock;
 	void			*t_orig_write_space;
 	void			*t_orig_data_ready;
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index 5cb1687..49a3fcf 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -78,7 +78,14 @@
 	struct socket *sock = NULL;
 	struct sockaddr_in src, dest;
 	int ret;
+	struct rds_tcp_connection *tc = conn->c_transport_data;
 
+	mutex_lock(&tc->t_conn_lock);
+
+	if (rds_conn_up(conn)) {
+		mutex_unlock(&tc->t_conn_lock);
+		return 0;
+	}
 	ret = sock_create_kern(rds_conn_net(conn), PF_INET,
 			       SOCK_STREAM, IPPROTO_TCP, &sock);
 	if (ret < 0)
@@ -120,6 +127,7 @@
 	}
 
 out:
+	mutex_unlock(&tc->t_conn_lock);
 	if (sock)
 		sock_release(sock);
 	return ret;
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 0936a4a..be263cd 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -76,7 +76,9 @@
 	struct rds_connection *conn;
 	int ret;
 	struct inet_sock *inet;
-	struct rds_tcp_connection *rs_tcp;
+	struct rds_tcp_connection *rs_tcp = NULL;
+	int conn_state;
+	struct sock *nsk;
 
 	ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family,
 			       sock->sk->sk_type, sock->sk->sk_protocol,
@@ -115,28 +117,44 @@
 	 * rds_tcp_state_change() will do that cleanup
 	 */
 	rs_tcp = (struct rds_tcp_connection *)conn->c_transport_data;
-	if (rs_tcp->t_sock &&
-	    ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr)) {
-		struct sock *nsk = new_sock->sk;
-
-		nsk->sk_user_data = NULL;
-		nsk->sk_prot->disconnect(nsk, 0);
-		tcp_done(nsk);
-		new_sock = NULL;
-		ret = 0;
-		goto out;
-	} else if (rs_tcp->t_sock) {
-		rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
-		conn->c_outgoing = 0;
-	}
-
 	rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING);
+	mutex_lock(&rs_tcp->t_conn_lock);
+	conn_state = rds_conn_state(conn);
+	if (conn_state != RDS_CONN_CONNECTING && conn_state != RDS_CONN_UP)
+		goto rst_nsk;
+	if (rs_tcp->t_sock) {
+		/* Need to resolve a duelling SYN between peers.
+		 * We have an outstanding SYN to this peer, which may
+		 * potentially have transitioned to the RDS_CONN_UP state,
+		 * so we must quiesce any send threads before resetting
+		 * c_transport_data.
+		 */
+		wait_event(conn->c_waitq,
+			   !test_bit(RDS_IN_XMIT, &conn->c_flags));
+		if (ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr)) {
+			goto rst_nsk;
+		} else if (rs_tcp->t_sock) {
+			rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
+			conn->c_outgoing = 0;
+		}
+	}
 	rds_tcp_set_callbacks(new_sock, conn);
-	rds_connect_complete(conn);
+	rds_connect_complete(conn); /* marks RDS_CONN_UP */
 	new_sock = NULL;
 	ret = 0;
-
+	goto out;
+rst_nsk:
+	/* reset the newly returned accept sock and bail */
+	nsk = new_sock->sk;
+	rds_tcp_stats_inc(s_tcp_listen_closed_stale);
+	nsk->sk_user_data = NULL;
+	nsk->sk_prot->disconnect(nsk, 0);
+	tcp_done(nsk);
+	new_sock = NULL;
+	ret = 0;
 out:
+	if (rs_tcp)
+		mutex_unlock(&rs_tcp->t_conn_lock);
 	if (new_sock)
 		sock_release(new_sock);
 	return ret;
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index 27a9921..d75d8b5 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -207,22 +207,14 @@
 		}
 
 		if (left && tc->t_tinc_data_rem) {
-			clone = skb_clone(skb, arg->gfp);
+			to_copy = min(tc->t_tinc_data_rem, left);
+
+			clone = pskb_extract(skb, offset, to_copy, arg->gfp);
 			if (!clone) {
 				desc->error = -ENOMEM;
 				goto out;
 			}
 
-			to_copy = min(tc->t_tinc_data_rem, left);
-			if (!pskb_pull(clone, offset) ||
-			    pskb_trim(clone, to_copy)) {
-				pr_warn("rds_tcp_data_recv: pull/trim failed "
-					"left %zu data_rem %zu skb_len %d\n",
-					left, tc->t_tinc_data_rem, skb->len);
-				kfree_skb(clone);
-				desc->error = -ENOMEM;
-				goto out;
-			}
 			skb_queue_tail(&tinc->ti_skb_list, clone);
 
 			rdsdebug("skb %p data %p len %d off %u to_copy %zu -> "
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 03f26e3..884027f 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -1141,6 +1141,7 @@
 {
 	struct rfkill *rfkill;
 	struct rfkill_event ev;
+	int ret;
 
 	/* we don't need the 'hard' variable but accept it */
 	if (count < RFKILL_EVENT_SIZE_V1 - 1)
@@ -1155,29 +1156,36 @@
 	if (copy_from_user(&ev, buf, count))
 		return -EFAULT;
 
-	if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL)
-		return -EINVAL;
-
 	if (ev.type >= NUM_RFKILL_TYPES)
 		return -EINVAL;
 
 	mutex_lock(&rfkill_global_mutex);
 
-	if (ev.op == RFKILL_OP_CHANGE_ALL)
+	switch (ev.op) {
+	case RFKILL_OP_CHANGE_ALL:
 		rfkill_update_global_state(ev.type, ev.soft);
-
-	list_for_each_entry(rfkill, &rfkill_list, node) {
-		if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL)
-			continue;
-
-		if (rfkill->type != ev.type && ev.type != RFKILL_TYPE_ALL)
-			continue;
-
-		rfkill_set_block(rfkill, ev.soft);
+		list_for_each_entry(rfkill, &rfkill_list, node)
+			if (rfkill->type == ev.type ||
+			    ev.type == RFKILL_TYPE_ALL)
+				rfkill_set_block(rfkill, ev.soft);
+		ret = 0;
+		break;
+	case RFKILL_OP_CHANGE:
+		list_for_each_entry(rfkill, &rfkill_list, node)
+			if (rfkill->idx == ev.idx &&
+			    (rfkill->type == ev.type ||
+			     ev.type == RFKILL_TYPE_ALL))
+				rfkill_set_block(rfkill, ev.soft);
+		ret = 0;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
 	}
+
 	mutex_unlock(&rfkill_global_mutex);
 
-	return count;
+	return ret ?: count;
 }
 
 static int rfkill_fop_release(struct inode *inode, struct file *file)
diff --git a/net/rxrpc/Kconfig b/net/rxrpc/Kconfig
index 23dcef1..784c531 100644
--- a/net/rxrpc/Kconfig
+++ b/net/rxrpc/Kconfig
@@ -30,7 +30,7 @@
 
 
 config RXKAD
-	tristate "RxRPC Kerberos security"
+	bool "RxRPC Kerberos security"
 	depends on AF_RXRPC
 	select CRYPTO
 	select CRYPTO_MANAGER
diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile
index ec126f9..e05a06e 100644
--- a/net/rxrpc/Makefile
+++ b/net/rxrpc/Makefile
@@ -18,11 +18,12 @@
 	ar-recvmsg.o \
 	ar-security.o \
 	ar-skbuff.o \
-	ar-transport.o
+	ar-transport.o \
+	insecure.o \
+	misc.o
 
 af-rxrpc-$(CONFIG_PROC_FS) += ar-proc.o
+af-rxrpc-$(CONFIG_RXKAD) += rxkad.o
 af-rxrpc-$(CONFIG_SYSCTL) += sysctl.o
 
 obj-$(CONFIG_AF_RXRPC) += af-rxrpc.o
-
-obj-$(CONFIG_RXKAD) += rxkad.o
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 9d935fa..e45e94c 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -806,6 +806,12 @@
 		goto error_work_queue;
 	}
 
+	ret = rxrpc_init_security();
+	if (ret < 0) {
+		printk(KERN_CRIT "RxRPC: Cannot initialise security\n");
+		goto error_security;
+	}
+
 	ret = proto_register(&rxrpc_proto, 1);
 	if (ret < 0) {
 		printk(KERN_CRIT "RxRPC: Cannot register protocol\n");
@@ -853,6 +859,8 @@
 	proto_unregister(&rxrpc_proto);
 error_proto:
 	destroy_workqueue(rxrpc_workqueue);
+error_security:
+	rxrpc_exit_security();
 error_work_queue:
 	kmem_cache_destroy(rxrpc_call_jar);
 error_call_jar:
@@ -883,6 +891,7 @@
 	remove_proc_entry("rxrpc_conns", init_net.proc_net);
 	remove_proc_entry("rxrpc_calls", init_net.proc_net);
 	destroy_workqueue(rxrpc_workqueue);
+	rxrpc_exit_security();
 	kmem_cache_destroy(rxrpc_call_jar);
 	_leave("");
 }
diff --git a/net/rxrpc/ar-accept.c b/net/rxrpc/ar-accept.c
index 277731a..e7a7f05 100644
--- a/net/rxrpc/ar-accept.c
+++ b/net/rxrpc/ar-accept.c
@@ -108,7 +108,7 @@
 		goto error;
 	}
 
-	conn = rxrpc_incoming_connection(trans, &sp->hdr, GFP_NOIO);
+	conn = rxrpc_incoming_connection(trans, &sp->hdr);
 	rxrpc_put_transport(trans);
 	if (IS_ERR(conn)) {
 		_debug("no conn");
@@ -116,7 +116,7 @@
 		goto error;
 	}
 
-	call = rxrpc_incoming_call(rx, conn, &sp->hdr, GFP_NOIO);
+	call = rxrpc_incoming_call(rx, conn, &sp->hdr);
 	rxrpc_put_connection(conn);
 	if (IS_ERR(call)) {
 		_debug("no call");
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index 16d9670..374478e 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -20,74 +20,6 @@
 #include "ar-internal.h"
 
 /*
- * How long to wait before scheduling ACK generation after seeing a
- * packet with RXRPC_REQUEST_ACK set (in jiffies).
- */
-unsigned int rxrpc_requested_ack_delay = 1;
-
-/*
- * How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
- *
- * We use this when we've received new data packets.  If those packets aren't
- * all consumed within this time we will send a DELAY ACK if an ACK was not
- * requested to let the sender know it doesn't need to resend.
- */
-unsigned int rxrpc_soft_ack_delay = 1 * HZ;
-
-/*
- * How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
- *
- * We use this when we've consumed some previously soft-ACK'd packets when
- * further packets aren't immediately received to decide when to send an IDLE
- * ACK let the other end know that it can free up its Tx buffer space.
- */
-unsigned int rxrpc_idle_ack_delay = 0.5 * HZ;
-
-/*
- * Receive window size in packets.  This indicates the maximum number of
- * unconsumed received packets we're willing to retain in memory.  Once this
- * limit is hit, we should generate an EXCEEDS_WINDOW ACK and discard further
- * packets.
- */
-unsigned int rxrpc_rx_window_size = 32;
-
-/*
- * Maximum Rx MTU size.  This indicates to the sender the size of jumbo packet
- * made by gluing normal packets together that we're willing to handle.
- */
-unsigned int rxrpc_rx_mtu = 5692;
-
-/*
- * The maximum number of fragments in a received jumbo packet that we tell the
- * sender that we're willing to handle.
- */
-unsigned int rxrpc_rx_jumbo_max = 4;
-
-static const char *rxrpc_acks(u8 reason)
-{
-	static const char *const str[] = {
-		"---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY",
-		"IDL", "-?-"
-	};
-
-	if (reason >= ARRAY_SIZE(str))
-		reason = ARRAY_SIZE(str) - 1;
-	return str[reason];
-}
-
-static const s8 rxrpc_ack_priority[] = {
-	[0]				= 0,
-	[RXRPC_ACK_DELAY]		= 1,
-	[RXRPC_ACK_REQUESTED]		= 2,
-	[RXRPC_ACK_IDLE]		= 3,
-	[RXRPC_ACK_PING_RESPONSE]	= 4,
-	[RXRPC_ACK_DUPLICATE]		= 5,
-	[RXRPC_ACK_OUT_OF_SEQUENCE]	= 6,
-	[RXRPC_ACK_EXCEEDS_WINDOW]	= 7,
-	[RXRPC_ACK_NOSPACE]		= 8,
-};
-
-/*
  * propose an ACK be sent
  */
 void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
@@ -426,7 +358,7 @@
 	int tail = call->acks_tail, old_tail;
 	int win = CIRC_CNT(call->acks_head, tail, call->acks_winsz);
 
-	kenter("{%u,%u},%u", call->acks_hard, win, hard);
+	_enter("{%u,%u},%u", call->acks_hard, win, hard);
 
 	ASSERTCMP(hard - call->acks_hard, <=, win);
 
@@ -656,7 +588,8 @@
 		_proto("OOSQ DATA %%%u { #%u }", sp->hdr.serial, sp->hdr.seq);
 
 		/* secured packets must be verified and possibly decrypted */
-		if (rxrpc_verify_packet(call, skb, _abort_code) < 0)
+		if (call->conn->security->verify_packet(call, skb,
+							_abort_code) < 0)
 			goto protocol_error;
 
 		rxrpc_insert_oos_packet(call, skb);
@@ -901,8 +834,8 @@
 
 	/* there's a good chance we're going to have to send a message, so set
 	 * one up in advance */
-	msg.msg_name	= &call->conn->trans->peer->srx.transport.sin;
-	msg.msg_namelen	= sizeof(call->conn->trans->peer->srx.transport.sin);
+	msg.msg_name	= &call->conn->trans->peer->srx.transport;
+	msg.msg_namelen	= call->conn->trans->peer->srx.transport_len;
 	msg.msg_control	= NULL;
 	msg.msg_controllen = 0;
 	msg.msg_flags	= 0;
@@ -973,7 +906,7 @@
 				       ECONNABORTED, true) < 0)
 			goto no_mem;
 		whdr.type = RXRPC_PACKET_TYPE_ABORT;
-		data = htonl(call->abort_code);
+		data = htonl(call->local_abort);
 		iov[1].iov_base = &data;
 		iov[1].iov_len = sizeof(data);
 		genbit = RXRPC_CALL_EV_ABORT;
@@ -1036,7 +969,7 @@
 		write_lock_bh(&call->state_lock);
 		if (call->state <= RXRPC_CALL_COMPLETE) {
 			call->state = RXRPC_CALL_LOCALLY_ABORTED;
-			call->abort_code = RX_CALL_TIMEOUT;
+			call->local_abort = RX_CALL_TIMEOUT;
 			set_bit(RXRPC_CALL_EV_ABORT, &call->events);
 		}
 		write_unlock_bh(&call->state_lock);
diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
index 7c8d300..571a41f 100644
--- a/net/rxrpc/ar-call.c
+++ b/net/rxrpc/ar-call.c
@@ -411,18 +411,17 @@
  */
 struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
 				       struct rxrpc_connection *conn,
-				       struct rxrpc_host_header *hdr,
-				       gfp_t gfp)
+				       struct rxrpc_host_header *hdr)
 {
 	struct rxrpc_call *call, *candidate;
 	struct rb_node **p, *parent;
 	u32 call_id;
 
-	_enter(",%d,,%x", conn->debug_id, gfp);
+	_enter(",%d", conn->debug_id);
 
 	ASSERT(rx != NULL);
 
-	candidate = rxrpc_alloc_call(gfp);
+	candidate = rxrpc_alloc_call(GFP_NOIO);
 	if (!candidate)
 		return ERR_PTR(-EBUSY);
 
@@ -682,7 +681,7 @@
 	    call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
 		_debug("+++ ABORTING STATE %d +++\n", call->state);
 		call->state = RXRPC_CALL_LOCALLY_ABORTED;
-		call->abort_code = RX_CALL_DEAD;
+		call->local_abort = RX_CALL_DEAD;
 		set_bit(RXRPC_CALL_EV_ABORT, &call->events);
 		rxrpc_queue_call(call);
 	}
@@ -758,7 +757,7 @@
 		if (call->state < RXRPC_CALL_COMPLETE) {
 			_debug("abort call %p", call);
 			call->state = RXRPC_CALL_LOCALLY_ABORTED;
-			call->abort_code = RX_CALL_DEAD;
+			call->local_abort = RX_CALL_DEAD;
 			if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
 				sched = true;
 		}
diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
index 9942da1..97f4fae 100644
--- a/net/rxrpc/ar-connection.c
+++ b/net/rxrpc/ar-connection.c
@@ -207,6 +207,7 @@
 		INIT_LIST_HEAD(&conn->bundle_link);
 		conn->calls = RB_ROOT;
 		skb_queue_head_init(&conn->rx_queue);
+		conn->security = &rxrpc_no_security;
 		rwlock_init(&conn->lock);
 		spin_lock_init(&conn->state_lock);
 		atomic_set(&conn->usage, 1);
@@ -564,8 +565,7 @@
 		     candidate->debug_id, candidate->trans->debug_id);
 
 		rxrpc_assign_connection_id(candidate);
-		if (candidate->security)
-			candidate->security->prime_packet_security(candidate);
+		candidate->security->prime_packet_security(candidate);
 
 		/* leave the candidate lurking in zombie mode attached to the
 		 * bundle until we're ready for it */
@@ -619,8 +619,7 @@
  */
 struct rxrpc_connection *
 rxrpc_incoming_connection(struct rxrpc_transport *trans,
-			  struct rxrpc_host_header *hdr,
-			  gfp_t gfp)
+			  struct rxrpc_host_header *hdr)
 {
 	struct rxrpc_connection *conn, *candidate = NULL;
 	struct rb_node *p, **pp;
@@ -659,7 +658,7 @@
 
 	/* not yet present - create a candidate for a new record and then
 	 * redo the search */
-	candidate = rxrpc_alloc_connection(gfp);
+	candidate = rxrpc_alloc_connection(GFP_NOIO);
 	if (!candidate) {
 		_leave(" = -ENOMEM");
 		return ERR_PTR(-ENOMEM);
@@ -831,7 +830,10 @@
 	ASSERT(RB_EMPTY_ROOT(&conn->calls));
 	rxrpc_purge_queue(&conn->rx_queue);
 
-	rxrpc_clear_conn_security(conn);
+	conn->security->clear(conn);
+	key_put(conn->key);
+	key_put(conn->server_key);
+
 	rxrpc_put_transport(conn->trans);
 	kfree(conn);
 	_leave("");
diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
index 1bdaaed..5f95639 100644
--- a/net/rxrpc/ar-connevent.c
+++ b/net/rxrpc/ar-connevent.c
@@ -40,11 +40,13 @@
 		write_lock(&call->state_lock);
 		if (call->state <= RXRPC_CALL_COMPLETE) {
 			call->state = state;
-			call->abort_code = abort_code;
-			if (state == RXRPC_CALL_LOCALLY_ABORTED)
+			if (state == RXRPC_CALL_LOCALLY_ABORTED) {
+				call->local_abort = conn->local_abort;
 				set_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events);
-			else
+			} else {
+				call->remote_abort = conn->remote_abort;
 				set_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
+			}
 			rxrpc_queue_call(call);
 		}
 		write_unlock(&call->state_lock);
@@ -84,8 +86,8 @@
 
 	rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code);
 
-	msg.msg_name	= &conn->trans->peer->srx.transport.sin;
-	msg.msg_namelen	= sizeof(conn->trans->peer->srx.transport.sin);
+	msg.msg_name	= &conn->trans->peer->srx.transport;
+	msg.msg_namelen	= conn->trans->peer->srx.transport_len;
 	msg.msg_control	= NULL;
 	msg.msg_controllen = 0;
 	msg.msg_flags	= 0;
@@ -101,7 +103,7 @@
 	whdr._rsvd	= 0;
 	whdr.serviceId	= htons(conn->service_id);
 
-	word = htonl(abort_code);
+	word		= htonl(conn->local_abort);
 
 	iov[0].iov_base	= &whdr;
 	iov[0].iov_len	= sizeof(whdr);
@@ -112,7 +114,7 @@
 
 	serial = atomic_inc_return(&conn->serial);
 	whdr.serial = htonl(serial);
-	_proto("Tx CONN ABORT %%%u { %d }", serial, abort_code);
+	_proto("Tx CONN ABORT %%%u { %d }", serial, conn->local_abort);
 
 	ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
 	if (ret < 0) {
@@ -172,15 +174,10 @@
 		return -ECONNABORTED;
 
 	case RXRPC_PACKET_TYPE_CHALLENGE:
-		if (conn->security)
-			return conn->security->respond_to_challenge(
-				conn, skb, _abort_code);
-		return -EPROTO;
+		return conn->security->respond_to_challenge(conn, skb,
+							    _abort_code);
 
 	case RXRPC_PACKET_TYPE_RESPONSE:
-		if (!conn->security)
-			return -EPROTO;
-
 		ret = conn->security->verify_response(conn, skb, _abort_code);
 		if (ret < 0)
 			return ret;
@@ -236,8 +233,6 @@
 		}
 	}
 
-	ASSERT(conn->security != NULL);
-
 	if (conn->security->issue_challenge(conn) < 0) {
 		abort_code = RX_CALL_DEAD;
 		ret = -ENOMEM;
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index 63ed75c..6ff9741 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -25,12 +25,6 @@
 #include <net/net_namespace.h>
 #include "ar-internal.h"
 
-const char *rxrpc_pkts[] = {
-	"?00",
-	"DATA", "ACK", "BUSY", "ABORT", "ACKALL", "CHALL", "RESP", "DEBUG",
-	"?09", "?10", "?11", "?12", "VERSION", "?14", "?15"
-};
-
 /*
  * queue a packet for recvmsg to pass to userspace
  * - the caller must hold a lock on call->lock
@@ -199,7 +193,7 @@
 
 	/* if the packet need security things doing to it, then it goes down
 	 * the slow path */
-	if (call->conn->security)
+	if (call->conn->security_ix)
 		goto enqueue_packet;
 
 	sp->call = call;
@@ -355,7 +349,7 @@
 		write_lock_bh(&call->state_lock);
 		if (call->state < RXRPC_CALL_COMPLETE) {
 			call->state = RXRPC_CALL_REMOTELY_ABORTED;
-			call->abort_code = abort_code;
+			call->remote_abort = abort_code;
 			set_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
 			rxrpc_queue_call(call);
 		}
@@ -428,7 +422,7 @@
 protocol_error_locked:
 	if (call->state <= RXRPC_CALL_COMPLETE) {
 		call->state = RXRPC_CALL_LOCALLY_ABORTED;
-		call->abort_code = RX_PROTOCOL_ERROR;
+		call->local_abort = RX_PROTOCOL_ERROR;
 		set_bit(RXRPC_CALL_EV_ABORT, &call->events);
 		rxrpc_queue_call(call);
 	}
@@ -500,7 +494,7 @@
 	write_lock_bh(&call->state_lock);
 	if (call->state <= RXRPC_CALL_COMPLETE) {
 		call->state = RXRPC_CALL_LOCALLY_ABORTED;
-		call->abort_code = RX_PROTOCOL_ERROR;
+		call->local_abort = RX_PROTOCOL_ERROR;
 		set_bit(RXRPC_CALL_EV_ABORT, &call->events);
 		rxrpc_queue_call(call);
 	}
@@ -612,9 +606,9 @@
 	struct rxrpc_wire_header whdr;
 
 	/* dig out the RxRPC connection details */
-	if (skb_copy_bits(skb, sizeof(struct udphdr), &whdr, sizeof(whdr)) < 0)
+	if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0)
 		return -EBADMSG;
-	if (!pskb_pull(skb, sizeof(struct udphdr) + sizeof(whdr)))
+	if (!pskb_pull(skb, sizeof(whdr)))
 		BUG();
 
 	memset(sp, 0, sizeof(*sp));
@@ -704,12 +698,12 @@
 	if (skb_checksum_complete(skb)) {
 		rxrpc_free_skb(skb);
 		rxrpc_put_local(local);
-		UDP_INC_STATS_BH(&init_net, UDP_MIB_INERRORS, 0);
+		__UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0);
 		_leave(" [CSUM failed]");
 		return;
 	}
 
-	UDP_INC_STATS_BH(&init_net, UDP_MIB_INDATAGRAMS, 0);
+	__UDP_INC_STATS(&init_net, UDP_MIB_INDATAGRAMS, 0);
 
 	/* The socket buffer we have is owned by UDP, with UDP's data all over
 	 * it, but we really want our own data there.
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index cd6cdbe..f0b807a 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -9,6 +9,7 @@
  * 2 of the License, or (at your option) any later version.
  */
 
+#include <net/sock.h>
 #include <rxrpc/packet.h>
 
 #if 0
@@ -124,11 +125,15 @@
  * RxRPC security module interface
  */
 struct rxrpc_security {
-	struct module		*owner;		/* providing module */
-	struct list_head	link;		/* link in master list */
 	const char		*name;		/* name of this service */
 	u8			security_index;	/* security type provided */
 
+	/* Initialise a security service */
+	int (*init)(void);
+
+	/* Clean up a security service */
+	void (*exit)(void);
+
 	/* initialise a connection's security */
 	int (*init_connection_security)(struct rxrpc_connection *);
 
@@ -268,7 +273,7 @@
 	struct rb_root		calls;		/* calls on this connection */
 	struct sk_buff_head	rx_queue;	/* received conn-level packets */
 	struct rxrpc_call	*channels[RXRPC_MAXCALLS]; /* channels (active calls) */
-	struct rxrpc_security	*security;	/* applied security module */
+	const struct rxrpc_security *security;	/* applied security module */
 	struct key		*key;		/* security for this connection (client) */
 	struct key		*server_key;	/* security for this service */
 	struct crypto_skcipher	*cipher;	/* encryption handle */
@@ -289,7 +294,9 @@
 		RXRPC_CONN_LOCALLY_ABORTED,	/* - conn aborted locally */
 		RXRPC_CONN_NETWORK_ERROR,	/* - conn terminated by network error */
 	} state;
-	int			error;		/* error code for local abort */
+	u32			local_abort;	/* local abort code */
+	u32			remote_abort;	/* remote abort code */
+	int			error;		/* local error incurred */
 	int			debug_id;	/* debug ID for printks */
 	unsigned int		call_counter;	/* call ID counter */
 	atomic_t		serial;		/* packet serial number counter */
@@ -399,7 +406,9 @@
 	rwlock_t		state_lock;	/* lock for state transition */
 	atomic_t		usage;
 	atomic_t		sequence;	/* Tx data packet sequence counter */
-	u32			abort_code;	/* local/remote abort code */
+	u32			local_abort;	/* local abort code */
+	u32			remote_abort;	/* remote abort code */
+	int			error;		/* local error incurred */
 	enum rxrpc_call_state	state : 8;	/* current state of call */
 	int			debug_id;	/* debug ID for printks */
 	u8			channel;	/* connection channel occupied by this call */
@@ -453,7 +462,7 @@
 {
 	write_lock_bh(&call->state_lock);
 	if (call->state < RXRPC_CALL_COMPLETE) {
-		call->abort_code = abort_code;
+		call->local_abort = abort_code;
 		call->state = RXRPC_CALL_LOCALLY_ABORTED;
 		set_bit(RXRPC_CALL_EV_ABORT, &call->events);
 	}
@@ -478,13 +487,6 @@
 /*
  * ar-ack.c
  */
-extern unsigned int rxrpc_requested_ack_delay;
-extern unsigned int rxrpc_soft_ack_delay;
-extern unsigned int rxrpc_idle_ack_delay;
-extern unsigned int rxrpc_rx_window_size;
-extern unsigned int rxrpc_rx_mtu;
-extern unsigned int rxrpc_rx_jumbo_max;
-
 void __rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool);
 void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool);
 void rxrpc_process_call(struct work_struct *);
@@ -506,7 +508,7 @@
 					 unsigned long, int, gfp_t);
 struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *,
 				       struct rxrpc_connection *,
-				       struct rxrpc_host_header *, gfp_t);
+				       struct rxrpc_host_header *);
 struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *, unsigned long);
 void rxrpc_release_call(struct rxrpc_call *);
 void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
@@ -531,8 +533,7 @@
 struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *,
 					       struct rxrpc_host_header *);
 extern struct rxrpc_connection *
-rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_host_header *,
-			  gfp_t);
+rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_host_header *);
 
 /*
  * ar-connevent.c
@@ -550,8 +551,6 @@
 /*
  * ar-input.c
  */
-extern const char *rxrpc_pkts[];
-
 void rxrpc_data_ready(struct sock *);
 int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool);
 void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
@@ -610,14 +609,10 @@
 /*
  * ar-security.c
  */
-int rxrpc_register_security(struct rxrpc_security *);
-void rxrpc_unregister_security(struct rxrpc_security *);
+int __init rxrpc_init_security(void);
+void rxrpc_exit_security(void);
 int rxrpc_init_client_conn_security(struct rxrpc_connection *);
 int rxrpc_init_server_conn_security(struct rxrpc_connection *);
-int rxrpc_secure_packet(const struct rxrpc_call *, struct sk_buff *, size_t,
-			void *);
-int rxrpc_verify_packet(const struct rxrpc_call *, struct sk_buff *, u32 *);
-void rxrpc_clear_conn_security(struct rxrpc_connection *);
 
 /*
  * ar-skbuff.c
@@ -637,6 +632,33 @@
 					     struct rxrpc_peer *);
 
 /*
+ * insecure.c
+ */
+extern const struct rxrpc_security rxrpc_no_security;
+
+/*
+ * misc.c
+ */
+extern unsigned int rxrpc_requested_ack_delay;
+extern unsigned int rxrpc_soft_ack_delay;
+extern unsigned int rxrpc_idle_ack_delay;
+extern unsigned int rxrpc_rx_window_size;
+extern unsigned int rxrpc_rx_mtu;
+extern unsigned int rxrpc_rx_jumbo_max;
+
+extern const char *const rxrpc_pkts[];
+extern const s8 rxrpc_ack_priority[];
+
+extern const char *rxrpc_acks(u8 reason);
+
+/*
+ * rxkad.c
+ */
+#ifdef CONFIG_RXKAD
+extern const struct rxrpc_security rxkad;
+#endif
+
+/*
  * sysctl.c
  */
 #ifdef CONFIG_SYSCTL
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index 3fb492e..1021b4c 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -965,7 +965,7 @@
 
 	key = key_alloc(&key_type_rxrpc, "x",
 			GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred, 0,
-			KEY_ALLOC_NOT_IN_QUOTA);
+			KEY_ALLOC_NOT_IN_QUOTA, NULL);
 	if (IS_ERR(key)) {
 		_leave(" = -ENOMEM [alloc %ld]", PTR_ERR(key));
 		return -ENOMEM;
@@ -1012,7 +1012,7 @@
 
 	key = key_alloc(&key_type_rxrpc, keyname,
 			GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred,
-			KEY_POS_SEARCH, KEY_ALLOC_NOT_IN_QUOTA);
+			KEY_POS_SEARCH, KEY_ALLOC_NOT_IN_QUOTA, NULL);
 	if (IS_ERR(key))
 		return key;
 
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index d36fb6e..51cb100 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -110,7 +110,7 @@
 
 	if (call->state <= RXRPC_CALL_COMPLETE) {
 		call->state = RXRPC_CALL_LOCALLY_ABORTED;
-		call->abort_code = abort_code;
+		call->local_abort = abort_code;
 		set_bit(RXRPC_CALL_EV_ABORT, &call->events);
 		del_timer_sync(&call->resend_timer);
 		del_timer_sync(&call->ack_timer);
@@ -663,7 +663,7 @@
 			size_t pad;
 
 			/* pad out if we're using security */
-			if (conn->security) {
+			if (conn->security_ix) {
 				pad = conn->security_size + skb->mark;
 				pad = conn->size_align - pad;
 				pad &= conn->size_align - 1;
@@ -695,7 +695,7 @@
 			if (more && seq & 1)
 				sp->hdr.flags |= RXRPC_REQUEST_ACK;
 
-			ret = rxrpc_secure_packet(
+			ret = conn->security->secure_packet(
 				call, skb, skb->mark,
 				skb->head + sizeof(struct rxrpc_wire_header));
 			if (ret < 0)
diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
index 525b2ba..225163b 100644
--- a/net/rxrpc/ar-proc.c
+++ b/net/rxrpc/ar-proc.c
@@ -80,7 +80,7 @@
 		   call->conn->in_clientflag ? "Svc" : "Clt",
 		   atomic_read(&call->usage),
 		   rxrpc_call_states[call->state],
-		   call->abort_code,
+		   call->remote_abort ?: call->local_abort,
 		   call->user_call_ID);
 
 	return 0;
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
index 64facba..160f092 100644
--- a/net/rxrpc/ar-recvmsg.c
+++ b/net/rxrpc/ar-recvmsg.c
@@ -288,7 +288,11 @@
 		ret = put_cmsg(msg, SOL_RXRPC, RXRPC_BUSY, 0, &abort_code);
 		break;
 	case RXRPC_SKB_MARK_REMOTE_ABORT:
-		abort_code = call->abort_code;
+		abort_code = call->remote_abort;
+		ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &abort_code);
+		break;
+	case RXRPC_SKB_MARK_LOCAL_ABORT:
+		abort_code = call->local_abort;
 		ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &abort_code);
 		break;
 	case RXRPC_SKB_MARK_NET_ERROR:
@@ -303,6 +307,7 @@
 			       &abort_code);
 		break;
 	default:
+		pr_err("RxRPC: Unknown packet mark %u\n", skb->mark);
 		BUG();
 		break;
 	}
@@ -401,9 +406,14 @@
 {
 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
 
-	ASSERTCMP(skb->mark, ==, RXRPC_SKB_MARK_REMOTE_ABORT);
-
-	return sp->call->abort_code;
+	switch (skb->mark) {
+	case RXRPC_SKB_MARK_REMOTE_ABORT:
+		return sp->call->remote_abort;
+	case RXRPC_SKB_MARK_LOCAL_ABORT:
+		return sp->call->local_abort;
+	default:
+		BUG();
+	}
 }
 
 EXPORT_SYMBOL(rxrpc_kernel_get_abort_code);
diff --git a/net/rxrpc/ar-security.c b/net/rxrpc/ar-security.c
index ceff639..d223253 100644
--- a/net/rxrpc/ar-security.c
+++ b/net/rxrpc/ar-security.c
@@ -22,109 +22,60 @@
 static LIST_HEAD(rxrpc_security_methods);
 static DECLARE_RWSEM(rxrpc_security_sem);
 
-/*
- * get an RxRPC security module
- */
-static struct rxrpc_security *rxrpc_security_get(struct rxrpc_security *sec)
+static const struct rxrpc_security *rxrpc_security_types[] = {
+	[RXRPC_SECURITY_NONE]	= &rxrpc_no_security,
+#ifdef CONFIG_RXKAD
+	[RXRPC_SECURITY_RXKAD]	= &rxkad,
+#endif
+};
+
+int __init rxrpc_init_security(void)
 {
-	return try_module_get(sec->owner) ? sec : NULL;
+	int i, ret;
+
+	for (i = 0; i < ARRAY_SIZE(rxrpc_security_types); i++) {
+		if (rxrpc_security_types[i]) {
+			ret = rxrpc_security_types[i]->init();
+			if (ret < 0)
+				goto failed;
+		}
+	}
+
+	return 0;
+
+failed:
+	for (i--; i >= 0; i--)
+		if (rxrpc_security_types[i])
+			rxrpc_security_types[i]->exit();
+	return ret;
 }
 
-/*
- * release an RxRPC security module
- */
-static void rxrpc_security_put(struct rxrpc_security *sec)
+void rxrpc_exit_security(void)
 {
-	module_put(sec->owner);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(rxrpc_security_types); i++)
+		if (rxrpc_security_types[i])
+			rxrpc_security_types[i]->exit();
 }
 
 /*
  * look up an rxrpc security module
  */
-static struct rxrpc_security *rxrpc_security_lookup(u8 security_index)
+static const struct rxrpc_security *rxrpc_security_lookup(u8 security_index)
 {
-	struct rxrpc_security *sec = NULL;
-
-	_enter("");
-
-	down_read(&rxrpc_security_sem);
-
-	list_for_each_entry(sec, &rxrpc_security_methods, link) {
-		if (sec->security_index == security_index) {
-			if (unlikely(!rxrpc_security_get(sec)))
-				break;
-			goto out;
-		}
-	}
-
-	sec = NULL;
-out:
-	up_read(&rxrpc_security_sem);
-	_leave(" = %p [%s]", sec, sec ? sec->name : "");
-	return sec;
+	if (security_index >= ARRAY_SIZE(rxrpc_security_types))
+		return NULL;
+	return rxrpc_security_types[security_index];
 }
 
-/**
- * rxrpc_register_security - register an RxRPC security handler
- * @sec: security module
- *
- * register an RxRPC security handler for use by RxRPC
- */
-int rxrpc_register_security(struct rxrpc_security *sec)
-{
-	struct rxrpc_security *psec;
-	int ret;
-
-	_enter("");
-	down_write(&rxrpc_security_sem);
-
-	ret = -EEXIST;
-	list_for_each_entry(psec, &rxrpc_security_methods, link) {
-		if (psec->security_index == sec->security_index)
-			goto out;
-	}
-
-	list_add(&sec->link, &rxrpc_security_methods);
-
-	printk(KERN_NOTICE "RxRPC: Registered security type %d '%s'\n",
-	       sec->security_index, sec->name);
-	ret = 0;
-
-out:
-	up_write(&rxrpc_security_sem);
-	_leave(" = %d", ret);
-	return ret;
-}
-
-EXPORT_SYMBOL_GPL(rxrpc_register_security);
-
-/**
- * rxrpc_unregister_security - unregister an RxRPC security handler
- * @sec: security module
- *
- * unregister an RxRPC security handler
- */
-void rxrpc_unregister_security(struct rxrpc_security *sec)
-{
-
-	_enter("");
-	down_write(&rxrpc_security_sem);
-	list_del_init(&sec->link);
-	up_write(&rxrpc_security_sem);
-
-	printk(KERN_NOTICE "RxRPC: Unregistered security type %d '%s'\n",
-	       sec->security_index, sec->name);
-}
-
-EXPORT_SYMBOL_GPL(rxrpc_unregister_security);
-
 /*
  * initialise the security on a client connection
  */
 int rxrpc_init_client_conn_security(struct rxrpc_connection *conn)
 {
+	const struct rxrpc_security *sec;
 	struct rxrpc_key_token *token;
-	struct rxrpc_security *sec;
 	struct key *key = conn->key;
 	int ret;
 
@@ -148,8 +99,7 @@
 
 	ret = conn->security->init_connection_security(conn);
 	if (ret < 0) {
-		rxrpc_security_put(conn->security);
-		conn->security = NULL;
+		conn->security = &rxrpc_no_security;
 		return ret;
 	}
 
@@ -162,7 +112,7 @@
  */
 int rxrpc_init_server_conn_security(struct rxrpc_connection *conn)
 {
-	struct rxrpc_security *sec;
+	const struct rxrpc_security *sec;
 	struct rxrpc_local *local = conn->trans->local;
 	struct rxrpc_sock *rx;
 	struct key *key;
@@ -188,14 +138,12 @@
 
 	/* the service appears to have died */
 	read_unlock_bh(&local->services_lock);
-	rxrpc_security_put(sec);
 	_leave(" = -ENOENT");
 	return -ENOENT;
 
 found_service:
 	if (!rx->securities) {
 		read_unlock_bh(&local->services_lock);
-		rxrpc_security_put(sec);
 		_leave(" = -ENOKEY");
 		return -ENOKEY;
 	}
@@ -205,7 +153,6 @@
 			      &key_type_rxrpc_s, kdesc);
 	if (IS_ERR(kref)) {
 		read_unlock_bh(&local->services_lock);
-		rxrpc_security_put(sec);
 		_leave(" = %ld [search]", PTR_ERR(kref));
 		return PTR_ERR(kref);
 	}
@@ -219,46 +166,3 @@
 	_leave(" = 0");
 	return 0;
 }
-
-/*
- * secure a packet prior to transmission
- */
-int rxrpc_secure_packet(const struct rxrpc_call *call,
-			struct sk_buff *skb,
-			size_t data_size,
-			void *sechdr)
-{
-	if (call->conn->security)
-		return call->conn->security->secure_packet(
-			call, skb, data_size, sechdr);
-	return 0;
-}
-
-/*
- * secure a packet prior to transmission
- */
-int rxrpc_verify_packet(const struct rxrpc_call *call, struct sk_buff *skb,
-			u32 *_abort_code)
-{
-	if (call->conn->security)
-		return call->conn->security->verify_packet(
-			call, skb, _abort_code);
-	return 0;
-}
-
-/*
- * clear connection security
- */
-void rxrpc_clear_conn_security(struct rxrpc_connection *conn)
-{
-	_enter("{%d}", conn->debug_id);
-
-	if (conn->security) {
-		conn->security->clear(conn);
-		rxrpc_security_put(conn->security);
-		conn->security = NULL;
-	}
-
-	key_put(conn->key);
-	key_put(conn->server_key);
-}
diff --git a/net/rxrpc/insecure.c b/net/rxrpc/insecure.c
new file mode 100644
index 0000000..e571403
--- /dev/null
+++ b/net/rxrpc/insecure.c
@@ -0,0 +1,83 @@
+/* Null security operations.
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <net/af_rxrpc.h>
+#include "ar-internal.h"
+
+static int none_init_connection_security(struct rxrpc_connection *conn)
+{
+	return 0;
+}
+
+static void none_prime_packet_security(struct rxrpc_connection *conn)
+{
+}
+
+static int none_secure_packet(const struct rxrpc_call *call,
+			       struct sk_buff *skb,
+			       size_t data_size,
+			       void *sechdr)
+{
+	return 0;
+}
+
+static int none_verify_packet(const struct rxrpc_call *call,
+			       struct sk_buff *skb,
+			       u32 *_abort_code)
+{
+	return 0;
+}
+
+static int none_respond_to_challenge(struct rxrpc_connection *conn,
+				      struct sk_buff *skb,
+				      u32 *_abort_code)
+{
+	*_abort_code = RX_PROTOCOL_ERROR;
+	return -EPROTO;
+}
+
+static int none_verify_response(struct rxrpc_connection *conn,
+				 struct sk_buff *skb,
+				 u32 *_abort_code)
+{
+	*_abort_code = RX_PROTOCOL_ERROR;
+	return -EPROTO;
+}
+
+static void none_clear(struct rxrpc_connection *conn)
+{
+}
+
+static int none_init(void)
+{
+	return 0;
+}
+
+static void none_exit(void)
+{
+}
+
+/*
+ * RxRPC Kerberos-based security
+ */
+const struct rxrpc_security rxrpc_no_security = {
+	.name				= "none",
+	.security_index			= RXRPC_SECURITY_NONE,
+	.init				= none_init,
+	.exit				= none_exit,
+	.init_connection_security	= none_init_connection_security,
+	.prime_packet_security		= none_prime_packet_security,
+	.secure_packet			= none_secure_packet,
+	.verify_packet			= none_verify_packet,
+	.respond_to_challenge		= none_respond_to_challenge,
+	.verify_response		= none_verify_response,
+	.clear				= none_clear,
+};
diff --git a/net/rxrpc/misc.c b/net/rxrpc/misc.c
new file mode 100644
index 0000000..1afe987
--- /dev/null
+++ b/net/rxrpc/misc.c
@@ -0,0 +1,89 @@
+/* Miscellaneous bits
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <net/sock.h>
+#include <net/af_rxrpc.h>
+#include "ar-internal.h"
+
+/*
+ * How long to wait before scheduling ACK generation after seeing a
+ * packet with RXRPC_REQUEST_ACK set (in jiffies).
+ */
+unsigned int rxrpc_requested_ack_delay = 1;
+
+/*
+ * How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
+ *
+ * We use this when we've received new data packets.  If those packets aren't
+ * all consumed within this time we will send a DELAY ACK if an ACK was not
+ * requested to let the sender know it doesn't need to resend.
+ */
+unsigned int rxrpc_soft_ack_delay = 1 * HZ;
+
+/*
+ * How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
+ *
+ * We use this when we've consumed some previously soft-ACK'd packets when
+ * further packets aren't immediately received to decide when to send an IDLE
+ * ACK let the other end know that it can free up its Tx buffer space.
+ */
+unsigned int rxrpc_idle_ack_delay = 0.5 * HZ;
+
+/*
+ * Receive window size in packets.  This indicates the maximum number of
+ * unconsumed received packets we're willing to retain in memory.  Once this
+ * limit is hit, we should generate an EXCEEDS_WINDOW ACK and discard further
+ * packets.
+ */
+unsigned int rxrpc_rx_window_size = 32;
+
+/*
+ * Maximum Rx MTU size.  This indicates to the sender the size of jumbo packet
+ * made by gluing normal packets together that we're willing to handle.
+ */
+unsigned int rxrpc_rx_mtu = 5692;
+
+/*
+ * The maximum number of fragments in a received jumbo packet that we tell the
+ * sender that we're willing to handle.
+ */
+unsigned int rxrpc_rx_jumbo_max = 4;
+
+const char *const rxrpc_pkts[] = {
+	"?00",
+	"DATA", "ACK", "BUSY", "ABORT", "ACKALL", "CHALL", "RESP", "DEBUG",
+	"?09", "?10", "?11", "?12", "VERSION", "?14", "?15"
+};
+
+const s8 rxrpc_ack_priority[] = {
+	[0]				= 0,
+	[RXRPC_ACK_DELAY]		= 1,
+	[RXRPC_ACK_REQUESTED]		= 2,
+	[RXRPC_ACK_IDLE]		= 3,
+	[RXRPC_ACK_PING_RESPONSE]	= 4,
+	[RXRPC_ACK_DUPLICATE]		= 5,
+	[RXRPC_ACK_OUT_OF_SEQUENCE]	= 6,
+	[RXRPC_ACK_EXCEEDS_WINDOW]	= 7,
+	[RXRPC_ACK_NOSPACE]		= 8,
+};
+
+const char *rxrpc_acks(u8 reason)
+{
+	static const char *const str[] = {
+		"---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY",
+		"IDL", "-?-"
+	};
+
+	if (reason >= ARRAY_SIZE(str))
+		reason = ARRAY_SIZE(str) - 1;
+	return str[reason];
+}
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index f0aeb81..6b726a0 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -20,7 +20,6 @@
 #include <net/sock.h>
 #include <net/af_rxrpc.h>
 #include <keys/rxrpc-type.h>
-#define rxrpc_debug rxkad_debug
 #include "ar-internal.h"
 
 #define RXKAD_VERSION			2
@@ -31,10 +30,6 @@
 #define REALM_SZ			40	/* size of principal's auth domain */
 #define SNAME_SZ			40	/* size of service name */
 
-unsigned int rxrpc_debug;
-module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(debug, "rxkad debugging mask");
-
 struct rxkad_level1_hdr {
 	__be32	data_size;	/* true data size (excluding padding) */
 };
@@ -44,10 +39,6 @@
 	__be32	checksum;	/* decrypted data checksum */
 };
 
-MODULE_DESCRIPTION("RxRPC network protocol type-2 security (Kerberos 4)");
-MODULE_AUTHOR("Red Hat, Inc.");
-MODULE_LICENSE("GPL");
-
 /*
  * this holds a pinned cipher so that keventd doesn't get called by the cipher
  * alloc routine, but since we have it to hand, we use it to decrypt RESPONSE
@@ -1164,12 +1155,35 @@
 }
 
 /*
+ * Initialise the rxkad security service.
+ */
+static int rxkad_init(void)
+{
+	/* pin the cipher we need so that the crypto layer doesn't invoke
+	 * keventd to go get it */
+	rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(rxkad_ci))
+		return PTR_ERR(rxkad_ci);
+	return 0;
+}
+
+/*
+ * Clean up the rxkad security service.
+ */
+static void rxkad_exit(void)
+{
+	if (rxkad_ci)
+		crypto_free_skcipher(rxkad_ci);
+}
+
+/*
  * RxRPC Kerberos-based security
  */
-static struct rxrpc_security rxkad = {
-	.owner				= THIS_MODULE,
+const struct rxrpc_security rxkad = {
 	.name				= "rxkad",
 	.security_index			= RXRPC_SECURITY_RXKAD,
+	.init				= rxkad_init,
+	.exit				= rxkad_exit,
 	.init_connection_security	= rxkad_init_connection_security,
 	.prime_packet_security		= rxkad_prime_packet_security,
 	.secure_packet			= rxkad_secure_packet,
@@ -1179,28 +1193,3 @@
 	.verify_response		= rxkad_verify_response,
 	.clear				= rxkad_clear,
 };
-
-static __init int rxkad_init(void)
-{
-	_enter("");
-
-	/* pin the cipher we need so that the crypto layer doesn't invoke
-	 * keventd to go get it */
-	rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
-	if (IS_ERR(rxkad_ci))
-		return PTR_ERR(rxkad_ci);
-
-	return rxrpc_register_security(&rxkad);
-}
-
-module_init(rxkad_init);
-
-static __exit void rxkad_exit(void)
-{
-	_enter("");
-
-	rxrpc_unregister_security(&rxkad);
-	crypto_free_skcipher(rxkad_ci);
-}
-
-module_exit(rxkad_exit);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 9606666..336774a 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -657,12 +657,15 @@
 	if (compat_mode) {
 		if (a->type == TCA_OLD_COMPAT)
 			err = gnet_stats_start_copy_compat(skb, 0,
-				TCA_STATS, TCA_XSTATS, &p->tcfc_lock, &d);
+							   TCA_STATS,
+							   TCA_XSTATS,
+							   &p->tcfc_lock, &d,
+							   TCA_PAD);
 		else
 			return 0;
 	} else
 		err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
-					    &p->tcfc_lock, &d);
+					    &p->tcfc_lock, &d, TCA_ACT_PAD);
 
 	if (err < 0)
 		goto errout;
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 8c9f1f0..c7123e0 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -53,9 +53,11 @@
 	filter = rcu_dereference(prog->filter);
 	if (at_ingress) {
 		__skb_push(skb, skb->mac_len);
+		bpf_compute_data_end(skb);
 		filter_res = BPF_PROG_RUN(filter, skb);
 		__skb_pull(skb, skb->mac_len);
 	} else {
+		bpf_compute_data_end(skb);
 		filter_res = BPF_PROG_RUN(filter, skb);
 	}
 	rcu_read_unlock();
@@ -156,7 +158,8 @@
 	tm.lastuse = jiffies_to_clock_t(jiffies - prog->tcf_tm.lastuse);
 	tm.expires = jiffies_to_clock_t(prog->tcf_tm.expires);
 
-	if (nla_put(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm))
+	if (nla_put_64bit(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm,
+			  TCA_ACT_BPF_PAD))
 		goto nla_put_failure;
 
 	return skb->len;
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index c0ed93c..2ba700c 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -163,7 +163,8 @@
 	t.install = jiffies_to_clock_t(jiffies - ci->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - ci->tcf_tm.lastuse);
 	t.expires = jiffies_to_clock_t(ci->tcf_tm.expires);
-	if (nla_put(skb, TCA_CONNMARK_TM, sizeof(t), &t))
+	if (nla_put_64bit(skb, TCA_CONNMARK_TM, sizeof(t), &t,
+			  TCA_CONNMARK_PAD))
 		goto nla_put_failure;
 
 	return skb->len;
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index d22426c..28e934ed 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -549,7 +549,7 @@
 	t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
 	t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
-	if (nla_put(skb, TCA_CSUM_TM, sizeof(t), &t))
+	if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD))
 		goto nla_put_failure;
 
 	return skb->len;
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 887fc1f..ec5cc84 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -148,6 +148,20 @@
 	return action;
 }
 
+static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets,
+				  u64 lastuse)
+{
+	struct tcf_gact *gact = a->priv;
+	int action = READ_ONCE(gact->tcf_action);
+	struct tcf_t *tm = &gact->tcf_tm;
+
+	_bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats), bytes, packets);
+	if (action == TC_ACT_SHOT)
+		this_cpu_ptr(gact->common.cpu_qstats)->drops += packets;
+
+	tm->lastuse = lastuse;
+}
+
 static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
 {
 	unsigned char *b = skb_tail_pointer(skb);
@@ -177,7 +191,7 @@
 	t.install = jiffies_to_clock_t(jiffies - gact->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - gact->tcf_tm.lastuse);
 	t.expires = jiffies_to_clock_t(gact->tcf_tm.expires);
-	if (nla_put(skb, TCA_GACT_TM, sizeof(t), &t))
+	if (nla_put_64bit(skb, TCA_GACT_TM, sizeof(t), &t, TCA_GACT_PAD))
 		goto nla_put_failure;
 	return skb->len;
 
@@ -207,6 +221,7 @@
 	.type		=	TCA_ACT_GACT,
 	.owner		=	THIS_MODULE,
 	.act		=	tcf_gact,
+	.stats_update	=	tcf_gact_stats_update,
 	.dump		=	tcf_gact_dump,
 	.init		=	tcf_gact_init,
 	.walk		=	tcf_gact_walker,
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index c589a9b..658046d 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -423,7 +423,7 @@
 	u16 ife_type = 0;
 	u8 *daddr = NULL;
 	u8 *saddr = NULL;
-	int ret = 0;
+	int ret = 0, exists = 0;
 	int err;
 
 	err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy);
@@ -435,25 +435,29 @@
 
 	parm = nla_data(tb[TCA_IFE_PARMS]);
 
+	exists = tcf_hash_check(tn, parm->index, a, bind);
+	if (exists && bind)
+		return 0;
+
 	if (parm->flags & IFE_ENCODE) {
 		/* Until we get issued the ethertype, we cant have
 		 * a default..
 		**/
 		if (!tb[TCA_IFE_TYPE]) {
+			if (exists)
+				tcf_hash_release(a, bind);
 			pr_info("You MUST pass etherype for encoding\n");
 			return -EINVAL;
 		}
 	}
 
-	if (!tcf_hash_check(tn, parm->index, a, bind)) {
+	if (!exists) {
 		ret = tcf_hash_create(tn, parm->index, est, a, sizeof(*ife),
 				      bind, false);
 		if (ret)
 			return ret;
 		ret = ACT_P_CREATED;
 	} else {
-		if (bind)	/* dont override defaults */
-			return 0;
 		tcf_hash_release(a, bind);
 		if (!ovr)
 			return -EEXIST;
@@ -495,6 +499,8 @@
 				       NULL);
 		if (err) {
 metadata_parse_err:
+			if (exists)
+				tcf_hash_release(a, bind);
 			if (ret == ACT_P_CREATED)
 				_tcf_ife_cleanup(a, bind);
 
@@ -550,7 +556,7 @@
 	t.install = jiffies_to_clock_t(jiffies - ife->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - ife->tcf_tm.lastuse);
 	t.expires = jiffies_to_clock_t(ife->tcf_tm.expires);
-	if (nla_put(skb, TCA_IFE_TM, sizeof(t), &t))
+	if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD))
 		goto nla_put_failure;
 
 	if (!is_zero_ether_addr(ife->eth_dst)) {
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 350e134..9f002ad 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -96,7 +96,7 @@
 	struct tcf_ipt *ipt;
 	struct xt_entry_target *td, *t;
 	char *tname;
-	int ret = 0, err;
+	int ret = 0, err, exists = 0;
 	u32 hook = 0;
 	u32 index = 0;
 
@@ -107,18 +107,23 @@
 	if (err < 0)
 		return err;
 
-	if (tb[TCA_IPT_HOOK] == NULL)
+	if (tb[TCA_IPT_INDEX] != NULL)
+		index = nla_get_u32(tb[TCA_IPT_INDEX]);
+
+	exists = tcf_hash_check(tn, index, a, bind);
+	if (exists && bind)
+		return 0;
+
+	if (tb[TCA_IPT_HOOK] == NULL || tb[TCA_IPT_TARG] == NULL) {
+		if (exists)
+			tcf_hash_release(a, bind);
 		return -EINVAL;
-	if (tb[TCA_IPT_TARG] == NULL)
-		return -EINVAL;
+	}
 
 	td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
 	if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size)
 		return -EINVAL;
 
-	if (tb[TCA_IPT_INDEX] != NULL)
-		index = nla_get_u32(tb[TCA_IPT_INDEX]);
-
 	if (!tcf_hash_check(tn, index, a, bind)) {
 		ret = tcf_hash_create(tn, index, est, a, sizeof(*ipt), bind,
 				      false);
@@ -275,7 +280,7 @@
 	tm.install = jiffies_to_clock_t(jiffies - ipt->tcf_tm.install);
 	tm.lastuse = jiffies_to_clock_t(jiffies - ipt->tcf_tm.lastuse);
 	tm.expires = jiffies_to_clock_t(ipt->tcf_tm.expires);
-	if (nla_put(skb, TCA_IPT_TM, sizeof (tm), &tm))
+	if (nla_put_64bit(skb, TCA_IPT_TM, sizeof(tm), &tm, TCA_IPT_PAD))
 		goto nla_put_failure;
 	kfree(t);
 	return skb->len;
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index e8a760c..128942b 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -36,14 +36,15 @@
 static void tcf_mirred_release(struct tc_action *a, int bind)
 {
 	struct tcf_mirred *m = to_mirred(a);
-	struct net_device *dev = rcu_dereference_protected(m->tcfm_dev, 1);
+	struct net_device *dev;
 
 	/* We could be called either in a RCU callback or with RTNL lock held. */
 	spin_lock_bh(&mirred_list_lock);
 	list_del(&m->tcfm_list);
-	spin_unlock_bh(&mirred_list_lock);
+	dev = rcu_dereference_protected(m->tcfm_dev, 1);
 	if (dev)
 		dev_put(dev);
+	spin_unlock_bh(&mirred_list_lock);
 }
 
 static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
@@ -61,7 +62,7 @@
 	struct tc_mirred *parm;
 	struct tcf_mirred *m;
 	struct net_device *dev;
-	int ret, ok_push = 0;
+	int ret, ok_push = 0, exists = 0;
 
 	if (nla == NULL)
 		return -EINVAL;
@@ -71,17 +72,27 @@
 	if (tb[TCA_MIRRED_PARMS] == NULL)
 		return -EINVAL;
 	parm = nla_data(tb[TCA_MIRRED_PARMS]);
+
+	exists = tcf_hash_check(tn, parm->index, a, bind);
+	if (exists && bind)
+		return 0;
+
 	switch (parm->eaction) {
 	case TCA_EGRESS_MIRROR:
 	case TCA_EGRESS_REDIR:
 		break;
 	default:
+		if (exists)
+			tcf_hash_release(a, bind);
 		return -EINVAL;
 	}
 	if (parm->ifindex) {
 		dev = __dev_get_by_index(net, parm->ifindex);
-		if (dev == NULL)
+		if (dev == NULL) {
+			if (exists)
+				tcf_hash_release(a, bind);
 			return -ENODEV;
+		}
 		switch (dev->type) {
 		case ARPHRD_TUNNEL:
 		case ARPHRD_TUNNEL6:
@@ -99,7 +110,7 @@
 		dev = NULL;
 	}
 
-	if (!tcf_hash_check(tn, parm->index, a, bind)) {
+	if (!exists) {
 		if (dev == NULL)
 			return -EINVAL;
 		ret = tcf_hash_create(tn, parm->index, est, a,
@@ -108,9 +119,6 @@
 			return ret;
 		ret = ACT_P_CREATED;
 	} else {
-		if (bind)
-			return 0;
-
 		tcf_hash_release(a, bind);
 		if (!ovr)
 			return -EEXIST;
@@ -214,7 +222,7 @@
 	t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse);
 	t.expires = jiffies_to_clock_t(m->tcf_tm.expires);
-	if (nla_put(skb, TCA_MIRRED_TM, sizeof(t), &t))
+	if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
 		goto nla_put_failure;
 	return skb->len;
 
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 0f65cdf..c0a879f 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -267,7 +267,7 @@
 	t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
 	t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
-	if (nla_put(skb, TCA_NAT_TM, sizeof(t), &t))
+	if (nla_put_64bit(skb, TCA_NAT_TM, sizeof(t), &t, TCA_NAT_PAD))
 		goto nla_put_failure;
 
 	return skb->len;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 429c3ab..c6e18f2 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -203,7 +203,7 @@
 	t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
 	t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
-	if (nla_put(skb, TCA_PEDIT_TM, sizeof(t), &t))
+	if (nla_put_64bit(skb, TCA_PEDIT_TM, sizeof(t), &t, TCA_PEDIT_PAD))
 		goto nla_put_failure;
 	kfree(opt);
 	return skb->len;
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 75b2be1..e42f8da 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -87,7 +87,7 @@
 	struct tc_defact *parm;
 	struct tcf_defact *d;
 	char *defdata;
-	int ret = 0, err;
+	int ret = 0, err, exists = 0;
 
 	if (nla == NULL)
 		return -EINVAL;
@@ -99,13 +99,21 @@
 	if (tb[TCA_DEF_PARMS] == NULL)
 		return -EINVAL;
 
-	if (tb[TCA_DEF_DATA] == NULL)
-		return -EINVAL;
 
 	parm = nla_data(tb[TCA_DEF_PARMS]);
+	exists = tcf_hash_check(tn, parm->index, a, bind);
+	if (exists && bind)
+		return 0;
+
+	if (tb[TCA_DEF_DATA] == NULL) {
+		if (exists)
+			tcf_hash_release(a, bind);
+		return -EINVAL;
+	}
+
 	defdata = nla_data(tb[TCA_DEF_DATA]);
 
-	if (!tcf_hash_check(tn, parm->index, a, bind)) {
+	if (!exists) {
 		ret = tcf_hash_create(tn, parm->index, est, a,
 				      sizeof(*d), bind, false);
 		if (ret)
@@ -122,8 +130,6 @@
 	} else {
 		d = to_defact(a);
 
-		if (bind)
-			return 0;
 		tcf_hash_release(a, bind);
 		if (!ovr)
 			return -EEXIST;
@@ -155,7 +161,7 @@
 	t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse);
 	t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
-	if (nla_put(skb, TCA_DEF_TM, sizeof(t), &t))
+	if (nla_put_64bit(skb, TCA_DEF_TM, sizeof(t), &t, TCA_DEF_PAD))
 		goto nla_put_failure;
 	return skb->len;
 
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index cfcdbdc..e928802 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -69,7 +69,7 @@
 	struct tcf_skbedit *d;
 	u32 flags = 0, *priority = NULL, *mark = NULL;
 	u16 *queue_mapping = NULL;
-	int ret = 0, err;
+	int ret = 0, err, exists = 0;
 
 	if (nla == NULL)
 		return -EINVAL;
@@ -96,12 +96,18 @@
 		mark = nla_data(tb[TCA_SKBEDIT_MARK]);
 	}
 
-	if (!flags)
-		return -EINVAL;
-
 	parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
 
-	if (!tcf_hash_check(tn, parm->index, a, bind)) {
+	exists = tcf_hash_check(tn, parm->index, a, bind);
+	if (exists && bind)
+		return 0;
+
+	if (!flags) {
+		tcf_hash_release(a, bind);
+		return -EINVAL;
+	}
+
+	if (!exists) {
 		ret = tcf_hash_create(tn, parm->index, est, a,
 				      sizeof(*d), bind, false);
 		if (ret)
@@ -111,8 +117,6 @@
 		ret = ACT_P_CREATED;
 	} else {
 		d = to_skbedit(a);
-		if (bind)
-			return 0;
 		tcf_hash_release(a, bind);
 		if (!ovr)
 			return -EEXIST;
@@ -167,7 +171,7 @@
 	t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse);
 	t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
-	if (nla_put(skb, TCA_SKBEDIT_TM, sizeof(t), &t))
+	if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD))
 		goto nla_put_failure;
 	return skb->len;
 
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index bab8ae0..ac4adc8 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -77,7 +77,7 @@
 	int action;
 	__be16 push_vid = 0;
 	__be16 push_proto = 0;
-	int ret = 0;
+	int ret = 0, exists = 0;
 	int err;
 
 	if (!nla)
@@ -90,15 +90,25 @@
 	if (!tb[TCA_VLAN_PARMS])
 		return -EINVAL;
 	parm = nla_data(tb[TCA_VLAN_PARMS]);
+	exists = tcf_hash_check(tn, parm->index, a, bind);
+	if (exists && bind)
+		return 0;
+
 	switch (parm->v_action) {
 	case TCA_VLAN_ACT_POP:
 		break;
 	case TCA_VLAN_ACT_PUSH:
-		if (!tb[TCA_VLAN_PUSH_VLAN_ID])
+		if (!tb[TCA_VLAN_PUSH_VLAN_ID]) {
+			if (exists)
+				tcf_hash_release(a, bind);
 			return -EINVAL;
+		}
 		push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]);
-		if (push_vid >= VLAN_VID_MASK)
+		if (push_vid >= VLAN_VID_MASK) {
+			if (exists)
+				tcf_hash_release(a, bind);
 			return -ERANGE;
+		}
 
 		if (tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]) {
 			push_proto = nla_get_be16(tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]);
@@ -114,11 +124,13 @@
 		}
 		break;
 	default:
+		if (exists)
+			tcf_hash_release(a, bind);
 		return -EINVAL;
 	}
 	action = parm->v_action;
 
-	if (!tcf_hash_check(tn, parm->index, a, bind)) {
+	if (!exists) {
 		ret = tcf_hash_create(tn, parm->index, est, a,
 				      sizeof(*v), bind, false);
 		if (ret)
@@ -126,8 +138,6 @@
 
 		ret = ACT_P_CREATED;
 	} else {
-		if (bind)
-			return 0;
 		tcf_hash_release(a, bind);
 		if (!ovr)
 			return -EEXIST;
@@ -175,7 +185,7 @@
 	t.install = jiffies_to_clock_t(jiffies - v->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - v->tcf_tm.lastuse);
 	t.expires = jiffies_to_clock_t(v->tcf_tm.expires);
-	if (nla_put(skb, TCA_VLAN_TM, sizeof(t), &t))
+	if (nla_put_64bit(skb, TCA_VLAN_TM, sizeof(t), &t, TCA_VLAN_PAD))
 		goto nla_put_failure;
 	return skb->len;
 
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 425fe6a..7b342c7 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -96,9 +96,11 @@
 		if (at_ingress) {
 			/* It is safe to push/pull even if skb_shared() */
 			__skb_push(skb, skb->mac_len);
+			bpf_compute_data_end(skb);
 			filter_res = BPF_PROG_RUN(prog->filter, skb);
 			__skb_pull(skb, skb->mac_len);
 		} else {
+			bpf_compute_data_end(skb);
 			filter_res = BPF_PROG_RUN(prog->filter, skb);
 		}
 
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 2181ffc..730aaca 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -210,6 +210,25 @@
 	dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
 }
 
+static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
+{
+	struct net_device *dev = tp->q->dev_queue->dev;
+	struct tc_cls_flower_offload offload = {0};
+	struct tc_to_netdev tc;
+
+	if (!tc_should_offload(dev, 0))
+		return;
+
+	offload.command = TC_CLSFLOWER_STATS;
+	offload.cookie = (unsigned long)f;
+	offload.exts = &f->exts;
+
+	tc.type = TC_SETUP_CLSFLOWER;
+	tc.cls_flower = &offload;
+
+	dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
+}
+
 static bool fl_destroy(struct tcf_proto *tp, bool force)
 {
 	struct cls_fl_head *head = rtnl_dereference(tp->root);
@@ -662,6 +681,8 @@
 			goto nla_put_failure;
 	}
 
+	fl_hw_update_stats(tp, f);
+
 	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
 			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
 			    sizeof(key->eth.dst)) ||
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 563cdad..079b43b 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -134,6 +134,11 @@
 		j = 0;
 #endif
 
+		if (tc_skip_sw(n->flags)) {
+			n = rcu_dereference_bh(n->next);
+			goto next_knode;
+		}
+
 #ifdef CONFIG_CLS_U32_MARK
 		if ((skb->mark & n->mask) != n->val) {
 			n = rcu_dereference_bh(n->next);
@@ -443,13 +448,14 @@
 	}
 }
 
-static void u32_replace_hw_hnode(struct tcf_proto *tp,
+static int u32_replace_hw_hnode(struct tcf_proto *tp,
 				 struct tc_u_hnode *h,
 				 u32 flags)
 {
 	struct net_device *dev = tp->q->dev_queue->dev;
 	struct tc_cls_u32_offload u32_offload = {0};
 	struct tc_to_netdev offload;
+	int err;
 
 	offload.type = TC_SETUP_CLSU32;
 	offload.cls_u32 = &u32_offload;
@@ -460,9 +466,13 @@
 		offload.cls_u32->hnode.handle = h->handle;
 		offload.cls_u32->hnode.prio = h->prio;
 
-		dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
-					      tp->protocol, &offload);
+		err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+						    tp->protocol, &offload);
+		if (tc_skip_sw(flags))
+			return err;
 	}
+
+	return 0;
 }
 
 static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
@@ -485,13 +495,14 @@
 	}
 }
 
-static void u32_replace_hw_knode(struct tcf_proto *tp,
+static int u32_replace_hw_knode(struct tcf_proto *tp,
 				 struct tc_u_knode *n,
 				 u32 flags)
 {
 	struct net_device *dev = tp->q->dev_queue->dev;
 	struct tc_cls_u32_offload u32_offload = {0};
 	struct tc_to_netdev offload;
+	int err;
 
 	offload.type = TC_SETUP_CLSU32;
 	offload.cls_u32 = &u32_offload;
@@ -512,9 +523,13 @@
 		if (n->ht_down)
 			offload.cls_u32->knode.link_handle = n->ht_down->handle;
 
-		dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
-					      tp->protocol, &offload);
+		err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+						    tp->protocol, &offload);
+		if (tc_skip_sw(flags))
+			return err;
 	}
+
+	return 0;
 }
 
 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
@@ -845,8 +860,11 @@
 	if (err < 0)
 		return err;
 
-	if (tb[TCA_U32_FLAGS])
+	if (tb[TCA_U32_FLAGS]) {
 		flags = nla_get_u32(tb[TCA_U32_FLAGS]);
+		if (!tc_flags_valid(flags))
+			return err;
+	}
 
 	n = (struct tc_u_knode *)*arg;
 	if (n) {
@@ -871,10 +889,15 @@
 			return err;
 		}
 
+		err = u32_replace_hw_knode(tp, new, flags);
+		if (err) {
+			u32_destroy_key(tp, new, false);
+			return err;
+		}
+
 		u32_replace_knode(tp, tp_c, new);
 		tcf_unbind_filter(tp, &n->res);
 		call_rcu(&n->rcu, u32_delete_key_rcu);
-		u32_replace_hw_knode(tp, new, flags);
 		return 0;
 	}
 
@@ -978,6 +1001,10 @@
 		struct tc_u_knode __rcu **ins;
 		struct tc_u_knode *pins;
 
+		err = u32_replace_hw_knode(tp, n, flags);
+		if (err)
+			goto errhw;
+
 		ins = &ht->ht[TC_U32_HASH(handle)];
 		for (pins = rtnl_dereference(*ins); pins;
 		     ins = &pins->next, pins = rtnl_dereference(*ins))
@@ -986,11 +1013,11 @@
 
 		RCU_INIT_POINTER(n->next, pins);
 		rcu_assign_pointer(*ins, n);
-		u32_replace_hw_knode(tp, n, flags);
 		*arg = (unsigned long)n;
 		return 0;
 	}
 
+errhw:
 #ifdef CONFIG_CLS_U32_MARK
 	free_percpu(n->pcpu_success);
 errout:
@@ -1140,9 +1167,10 @@
 				gpf->kcnts[i] += pf->kcnts[i];
 		}
 
-		if (nla_put(skb, TCA_U32_PCNT,
-			    sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
-			    gpf)) {
+		if (nla_put_64bit(skb, TCA_U32_PCNT,
+				  sizeof(struct tc_u32_pcnt) +
+				  n->sel.nkeys * sizeof(u64),
+				  gpf, TCA_U32_PAD)) {
 			kfree(gpf);
 			goto nla_put_failure;
 		}
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index f2aabc0..a309a07 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -796,7 +796,7 @@
 	int	(*dump)(struct sk_buff *, struct meta_value *, int);
 };
 
-static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = {
+static const struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = {
 	[TCF_META_TYPE_VAR] = {
 		.destroy = meta_var_destroy,
 		.compare = meta_var_compare,
@@ -812,7 +812,7 @@
 	}
 };
 
-static inline struct meta_type_ops *meta_type_ops(struct meta_value *v)
+static inline const struct meta_type_ops *meta_type_ops(struct meta_value *v)
 {
 	return &__meta_type_ops[meta_type(v)];
 }
@@ -870,7 +870,7 @@
 static void meta_delete(struct meta_match *meta)
 {
 	if (meta) {
-		struct meta_type_ops *ops = meta_type_ops(&meta->lvalue);
+		const struct meta_type_ops *ops = meta_type_ops(&meta->lvalue);
 
 		if (ops && ops->destroy) {
 			ops->destroy(&meta->lvalue);
@@ -964,7 +964,7 @@
 {
 	struct meta_match *meta = (struct meta_match *) em->data;
 	struct tcf_meta_hdr hdr;
-	struct meta_type_ops *ops;
+	const struct meta_type_ops *ops;
 
 	memset(&hdr, 0, sizeof(hdr));
 	memcpy(&hdr.left, &meta->lvalue.hdr, sizeof(hdr.left));
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 3b180ff..64f71a2 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1365,7 +1365,8 @@
 		goto nla_put_failure;
 
 	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
-					 qdisc_root_sleeping_lock(q), &d) < 0)
+					 qdisc_root_sleeping_lock(q), &d,
+					 TCA_PAD) < 0)
 		goto nla_put_failure;
 
 	if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
@@ -1679,7 +1680,8 @@
 		goto nla_put_failure;
 
 	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
-					 qdisc_root_sleeping_lock(q), &d) < 0)
+					 qdisc_root_sleeping_lock(q), &d,
+					 TCA_PAD) < 0)
 		goto nla_put_failure;
 
 	if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index 9b7e298..dddf3bb 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -49,6 +49,8 @@
 #include <linux/prefetch.h>
 #include <net/pkt_sched.h>
 #include <net/codel.h>
+#include <net/codel_impl.h>
+#include <net/codel_qdisc.h>
 
 
 #define DEFAULT_CODEL_LIMIT 1000
@@ -64,20 +66,33 @@
  * to dequeue a packet from queue. Note: backlog is handled in
  * codel, we dont need to reduce it here.
  */
-static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
+static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
 {
+	struct Qdisc *sch = ctx;
 	struct sk_buff *skb = __skb_dequeue(&sch->q);
 
+	if (skb)
+		sch->qstats.backlog -= qdisc_pkt_len(skb);
+
 	prefetch(&skb->end); /* we'll need skb_shinfo() */
 	return skb;
 }
 
+static void drop_func(struct sk_buff *skb, void *ctx)
+{
+	struct Qdisc *sch = ctx;
+
+	qdisc_drop(skb, sch);
+}
+
 static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
 {
 	struct codel_sched_data *q = qdisc_priv(sch);
 	struct sk_buff *skb;
 
-	skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
+	skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars,
+			    &q->stats, qdisc_pkt_len, codel_get_enqueue_time,
+			    drop_func, dequeue_func);
 
 	/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
 	 * or HTB crashes. Defer it for next round.
@@ -173,9 +188,10 @@
 
 	sch->limit = DEFAULT_CODEL_LIMIT;
 
-	codel_params_init(&q->params, sch);
+	codel_params_init(&q->params);
 	codel_vars_init(&q->vars);
 	codel_stats_init(&q->stats);
+	q->params.mtu = psched_mtu(qdisc_dev(sch));
 
 	if (opt) {
 		int err = codel_change(sch, opt);
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index d3fc8f9..6883a89 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -24,6 +24,8 @@
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
 #include <net/codel.h>
+#include <net/codel_impl.h>
+#include <net/codel_qdisc.h>
 
 /*	Fair Queue CoDel.
  *
@@ -57,8 +59,12 @@
 	u32		flows_cnt;	/* number of flows */
 	u32		perturbation;	/* hash perturbation */
 	u32		quantum;	/* psched_mtu(qdisc_dev(sch)); */
+	u32		drop_batch_size;
+	u32		memory_limit;
 	struct codel_params cparams;
 	struct codel_stats cstats;
+	u32		memory_usage;
+	u32		drop_overmemory;
 	u32		drop_overlimit;
 	u32		new_flow_count;
 
@@ -133,17 +139,21 @@
 	skb->next = NULL;
 }
 
-static unsigned int fq_codel_drop(struct Qdisc *sch)
+static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets)
 {
 	struct fq_codel_sched_data *q = qdisc_priv(sch);
 	struct sk_buff *skb;
 	unsigned int maxbacklog = 0, idx = 0, i, len;
 	struct fq_codel_flow *flow;
+	unsigned int threshold;
+	unsigned int mem = 0;
 
-	/* Queue is full! Find the fat flow and drop packet from it.
+	/* Queue is full! Find the fat flow and drop packet(s) from it.
 	 * This might sound expensive, but with 1024 flows, we scan
 	 * 4KB of memory, and we dont need to handle a complex tree
 	 * in fast path (packet queue/enqueue) with many cache misses.
+	 * In stress mode, we'll try to drop 64 packets from the flow,
+	 * amortizing this linear lookup to one cache line per drop.
 	 */
 	for (i = 0; i < q->flows_cnt; i++) {
 		if (q->backlogs[i] > maxbacklog) {
@@ -151,15 +161,26 @@
 			idx = i;
 		}
 	}
+
+	/* Our goal is to drop half of this fat flow backlog */
+	threshold = maxbacklog >> 1;
+
 	flow = &q->flows[idx];
-	skb = dequeue_head(flow);
-	len = qdisc_pkt_len(skb);
+	len = 0;
+	i = 0;
+	do {
+		skb = dequeue_head(flow);
+		len += qdisc_pkt_len(skb);
+		mem += skb->truesize;
+		kfree_skb(skb);
+	} while (++i < max_packets && len < threshold);
+
+	flow->dropped += i;
 	q->backlogs[idx] -= len;
-	sch->q.qlen--;
-	qdisc_qstats_drop(sch);
-	qdisc_qstats_backlog_dec(sch, skb);
-	kfree_skb(skb);
-	flow->dropped++;
+	q->memory_usage -= mem;
+	sch->qstats.drops += i;
+	sch->qstats.backlog -= len;
+	sch->q.qlen -= i;
 	return idx;
 }
 
@@ -168,16 +189,17 @@
 	unsigned int prev_backlog;
 
 	prev_backlog = sch->qstats.backlog;
-	fq_codel_drop(sch);
+	fq_codel_drop(sch, 1U);
 	return prev_backlog - sch->qstats.backlog;
 }
 
 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
 	struct fq_codel_sched_data *q = qdisc_priv(sch);
-	unsigned int idx, prev_backlog;
+	unsigned int idx, prev_backlog, prev_qlen;
 	struct fq_codel_flow *flow;
 	int uninitialized_var(ret);
+	bool memory_limited;
 
 	idx = fq_codel_classify(skb, sch, &ret);
 	if (idx == 0) {
@@ -200,28 +222,38 @@
 		flow->deficit = q->quantum;
 		flow->dropped = 0;
 	}
-	if (++sch->q.qlen <= sch->limit)
+	q->memory_usage += skb->truesize;
+	memory_limited = q->memory_usage > q->memory_limit;
+	if (++sch->q.qlen <= sch->limit && !memory_limited)
 		return NET_XMIT_SUCCESS;
 
 	prev_backlog = sch->qstats.backlog;
-	q->drop_overlimit++;
-	/* Return Congestion Notification only if we dropped a packet
-	 * from this flow.
-	 */
-	if (fq_codel_drop(sch) == idx)
-		return NET_XMIT_CN;
+	prev_qlen = sch->q.qlen;
 
-	/* As we dropped a packet, better let upper stack know this */
-	qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
-	return NET_XMIT_SUCCESS;
+	/* fq_codel_drop() is quite expensive, as it performs a linear search
+	 * in q->backlogs[] to find a fat flow.
+	 * So instead of dropping a single packet, drop half of its backlog
+	 * with a 64 packets limit to not add a too big cpu spike here.
+	 */
+	ret = fq_codel_drop(sch, q->drop_batch_size);
+
+	q->drop_overlimit += prev_qlen - sch->q.qlen;
+	if (memory_limited)
+		q->drop_overmemory += prev_qlen - sch->q.qlen;
+	/* As we dropped packet(s), better let upper stack know this */
+	qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen,
+				  prev_backlog - sch->qstats.backlog);
+
+	return ret == idx ? NET_XMIT_CN : NET_XMIT_SUCCESS;
 }
 
 /* This is the specific function called from codel_dequeue()
  * to dequeue a packet from queue. Note: backlog is handled in
  * codel, we dont need to reduce it here.
  */
-static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
+static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
 {
+	struct Qdisc *sch = ctx;
 	struct fq_codel_sched_data *q = qdisc_priv(sch);
 	struct fq_codel_flow *flow;
 	struct sk_buff *skb = NULL;
@@ -230,11 +262,20 @@
 	if (flow->head) {
 		skb = dequeue_head(flow);
 		q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
+		q->memory_usage -= skb->truesize;
 		sch->q.qlen--;
+		sch->qstats.backlog -= qdisc_pkt_len(skb);
 	}
 	return skb;
 }
 
+static void drop_func(struct sk_buff *skb, void *ctx)
+{
+	struct Qdisc *sch = ctx;
+
+	qdisc_drop(skb, sch);
+}
+
 static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
 {
 	struct fq_codel_sched_data *q = qdisc_priv(sch);
@@ -263,8 +304,9 @@
 	prev_ecn_mark = q->cstats.ecn_mark;
 	prev_backlog = sch->qstats.backlog;
 
-	skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
-			    dequeue);
+	skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
+			    &flow->cvars, &q->cstats, qdisc_pkt_len,
+			    codel_get_enqueue_time, drop_func, dequeue_func);
 
 	flow->dropped += q->cstats.drop_count - prev_drop_count;
 	flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
@@ -313,6 +355,7 @@
 	}
 	memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
 	sch->q.qlen = 0;
+	q->memory_usage = 0;
 }
 
 static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
@@ -323,6 +366,8 @@
 	[TCA_FQ_CODEL_FLOWS]	= { .type = NLA_U32 },
 	[TCA_FQ_CODEL_QUANTUM]	= { .type = NLA_U32 },
 	[TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
+	[TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 },
+	[TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
 };
 
 static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
@@ -374,7 +419,14 @@
 	if (tb[TCA_FQ_CODEL_QUANTUM])
 		q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
 
-	while (sch->q.qlen > sch->limit) {
+	if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
+		q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
+
+	if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
+		q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
+
+	while (sch->q.qlen > sch->limit ||
+	       q->memory_usage > q->memory_limit) {
 		struct sk_buff *skb = fq_codel_dequeue(sch);
 
 		q->cstats.drop_len += qdisc_pkt_len(skb);
@@ -419,13 +471,16 @@
 
 	sch->limit = 10*1024;
 	q->flows_cnt = 1024;
+	q->memory_limit = 32 << 20; /* 32 MBytes */
+	q->drop_batch_size = 64;
 	q->quantum = psched_mtu(qdisc_dev(sch));
 	q->perturbation = prandom_u32();
 	INIT_LIST_HEAD(&q->new_flows);
 	INIT_LIST_HEAD(&q->old_flows);
-	codel_params_init(&q->cparams, sch);
+	codel_params_init(&q->cparams);
 	codel_stats_init(&q->cstats);
 	q->cparams.ecn = true;
+	q->cparams.mtu = psched_mtu(qdisc_dev(sch));
 
 	if (opt) {
 		int err = fq_codel_change(sch, opt);
@@ -476,6 +531,10 @@
 			q->cparams.ecn) ||
 	    nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
 			q->quantum) ||
+	    nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
+			q->drop_batch_size) ||
+	    nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
+			q->memory_limit) ||
 	    nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
 			q->flows_cnt))
 		goto nla_put_failure;
@@ -504,6 +563,8 @@
 	st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
 	st.qdisc_stats.new_flow_count = q->new_flow_count;
 	st.qdisc_stats.ce_mark = q->cstats.ce_mark;
+	st.qdisc_stats.memory_usage  = q->memory_usage;
+	st.qdisc_stats.drop_overmemory = q->drop_overmemory;
 
 	list_for_each(pos, &q->new_flows)
 		st.qdisc_stats.new_flows_len++;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index f18c350..269dd71 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -108,35 +108,6 @@
 	return skb;
 }
 
-static inline int handle_dev_cpu_collision(struct sk_buff *skb,
-					   struct netdev_queue *dev_queue,
-					   struct Qdisc *q)
-{
-	int ret;
-
-	if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) {
-		/*
-		 * Same CPU holding the lock. It may be a transient
-		 * configuration error, when hard_start_xmit() recurses. We
-		 * detect it by checking xmit owner and drop the packet when
-		 * deadloop is detected. Return OK to try the next skb.
-		 */
-		kfree_skb_list(skb);
-		net_warn_ratelimited("Dead loop on netdevice %s, fix it urgently!\n",
-				     dev_queue->dev->name);
-		ret = qdisc_qlen(q);
-	} else {
-		/*
-		 * Another cpu is holding lock, requeue & delay xmits for
-		 * some time.
-		 */
-		__this_cpu_inc(softnet_data.cpu_collision);
-		ret = dev_requeue_skb(skb, q);
-	}
-
-	return ret;
-}
-
 /*
  * Transmit possibly several skbs, and handle the return status as
  * required. Holding the __QDISC___STATE_RUNNING bit guarantees that
@@ -159,21 +130,21 @@
 	if (validate)
 		skb = validate_xmit_skb_list(skb, dev);
 
-	if (skb) {
+	if (likely(skb)) {
 		HARD_TX_LOCK(dev, txq, smp_processor_id());
 		if (!netif_xmit_frozen_or_stopped(txq))
 			skb = dev_hard_start_xmit(skb, dev, txq, &ret);
 
 		HARD_TX_UNLOCK(dev, txq);
+	} else {
+		spin_lock(root_lock);
+		return qdisc_qlen(q);
 	}
 	spin_lock(root_lock);
 
 	if (dev_xmit_complete(ret)) {
 		/* Driver sent out skb successfully or skb was consumed */
 		ret = qdisc_qlen(q);
-	} else if (ret == NETDEV_TX_LOCKED) {
-		/* Driver try lock failed */
-		ret = handle_dev_cpu_collision(skb, txq, q);
 	} else {
 		/* Driver returned NETDEV_TX_BUSY - requeue skb */
 		if (unlikely(ret != NETDEV_TX_BUSY))
@@ -256,13 +227,12 @@
 
 	if (is_vlan_dev(dev))
 		dev = vlan_dev_real_dev(dev);
-	res = dev->trans_start;
-	for (i = 0; i < dev->num_tx_queues; i++) {
+	res = netdev_get_tx_queue(dev, 0)->trans_start;
+	for (i = 1; i < dev->num_tx_queues; i++) {
 		val = netdev_get_tx_queue(dev, i)->trans_start;
 		if (val && time_after(val, res))
 			res = val;
 	}
-	dev->trans_start = res;
 
 	return res;
 }
@@ -285,10 +255,7 @@
 				struct netdev_queue *txq;
 
 				txq = netdev_get_tx_queue(dev, i);
-				/*
-				 * old device drivers set dev->trans_start
-				 */
-				trans_start = txq->trans_start ? : dev->trans_start;
+				trans_start = txq->trans_start;
 				if (netif_xmit_stopped(txq) &&
 				    time_after(jiffies, (trans_start +
 							 dev->watchdog_timeo))) {
@@ -804,7 +771,7 @@
 		transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
 
 	if (need_watchdog) {
-		dev->trans_start = jiffies;
+		netif_trans_update(dev);
 		dev_watchdog_up(dev);
 	}
 }
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 87b02ed3..f6bf581 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1122,10 +1122,12 @@
 	if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
 		goto nla_put_failure;
 	if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
-	    nla_put_u64(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps))
+	    nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
+			      TCA_HTB_PAD))
 		goto nla_put_failure;
 	if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
-	    nla_put_u64(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps))
+	    nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
+			      TCA_HTB_PAD))
 		goto nla_put_failure;
 
 	return nla_nest_end(skb, nest);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 9640bb3..205bed0 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -395,6 +395,25 @@
 	sch->q.qlen++;
 }
 
+/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
+ * when we statistically choose to corrupt one, we instead segment it, returning
+ * the first packet to be corrupted, and re-enqueue the remaining frames
+ */
+static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
+{
+	struct sk_buff *segs;
+	netdev_features_t features = netif_skb_features(skb);
+
+	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+
+	if (IS_ERR_OR_NULL(segs)) {
+		qdisc_reshape_fail(skb, sch);
+		return NULL;
+	}
+	consume_skb(skb);
+	return segs;
+}
+
 /*
  * Insert one skb into qdisc.
  * Note: parent depends on return value to account for queue length.
@@ -407,7 +426,11 @@
 	/* We don't fill cb now as skb_unshare() may invalidate it */
 	struct netem_skb_cb *cb;
 	struct sk_buff *skb2;
+	struct sk_buff *segs = NULL;
+	unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
+	int nb = 0;
 	int count = 1;
+	int rc = NET_XMIT_SUCCESS;
 
 	/* Random duplication */
 	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
@@ -453,10 +476,23 @@
 	 * do it now in software before we mangle it.
 	 */
 	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
+		if (skb_is_gso(skb)) {
+			segs = netem_segment(skb, sch);
+			if (!segs)
+				return NET_XMIT_DROP;
+		} else {
+			segs = skb;
+		}
+
+		skb = segs;
+		segs = segs->next;
+
 		if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
 		    (skb->ip_summed == CHECKSUM_PARTIAL &&
-		     skb_checksum_help(skb)))
-			return qdisc_drop(skb, sch);
+		     skb_checksum_help(skb))) {
+			rc = qdisc_drop(skb, sch);
+			goto finish_segs;
+		}
 
 		skb->data[prandom_u32() % skb_headlen(skb)] ^=
 			1<<(prandom_u32() % 8);
@@ -516,6 +552,27 @@
 		sch->qstats.requeues++;
 	}
 
+finish_segs:
+	if (segs) {
+		while (segs) {
+			skb2 = segs->next;
+			segs->next = NULL;
+			qdisc_skb_cb(segs)->pkt_len = segs->len;
+			last_len = segs->len;
+			rc = qdisc_enqueue(segs, sch);
+			if (rc != NET_XMIT_SUCCESS) {
+				if (net_xmit_drop_count(rc))
+					qdisc_qstats_drop(sch);
+			} else {
+				nb++;
+				len += last_len;
+			}
+			segs = skb2;
+		}
+		sch->q.qlen += nb;
+		if (nb > 1)
+			qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
+	}
 	return NET_XMIT_SUCCESS;
 }
 
@@ -994,7 +1051,8 @@
 		goto nla_put_failure;
 
 	if (q->rate >= (1ULL << 32)) {
-		if (nla_put_u64(skb, TCA_NETEM_RATE64, q->rate))
+		if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
+				      TCA_NETEM_PAD))
 			goto nla_put_failure;
 		rate.rate = ~0U;
 	} else {
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index c2fbde7..83b90b5 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -472,11 +472,13 @@
 	if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
 		goto nla_put_failure;
 	if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
-	    nla_put_u64(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps))
+	    nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps,
+			      TCA_TBF_PAD))
 		goto nla_put_failure;
 	if (tbf_peak_present(q) &&
 	    q->peak.rate_bytes_ps >= (1ULL << 32) &&
-	    nla_put_u64(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps))
+	    nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps,
+			      TCA_TBF_PAD))
 		goto nla_put_failure;
 
 	return nla_nest_end(skb, nest);
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig
index 71c1a59..d9c04dc 100644
--- a/net/sctp/Kconfig
+++ b/net/sctp/Kconfig
@@ -99,5 +99,9 @@
 	select CRYPTO_HMAC if SCTP_COOKIE_HMAC_SHA1
 	select CRYPTO_SHA1 if SCTP_COOKIE_HMAC_SHA1
 
+config INET_SCTP_DIAG
+	depends on INET_DIAG
+	def_tristate INET_DIAG
+
 
 endif # IP_SCTP
diff --git a/net/sctp/Makefile b/net/sctp/Makefile
index 3b4ffb0..0fca582 100644
--- a/net/sctp/Makefile
+++ b/net/sctp/Makefile
@@ -4,6 +4,7 @@
 
 obj-$(CONFIG_IP_SCTP) += sctp.o
 obj-$(CONFIG_NET_SCTPPROBE) += sctp_probe.o
+obj-$(CONFIG_INET_SCTP_DIAG) += sctp_diag.o
 
 sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
 	  protocol.o endpointola.o associola.o \
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 958ef5f..1eb94bf 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -239,7 +239,7 @@
 	offset = 0;
 
 	if ((whole > 1) || (whole && over))
-		SCTP_INC_STATS_USER(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS);
+		SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS);
 
 	/* Create chunks for all the full sized DATA chunks. */
 	for (i = 0, len = first_len; i < whole; i++) {
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 00b8445..a701527 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -84,7 +84,7 @@
 
 	if (val != cmp) {
 		/* CRC failure, dump it. */
-		SCTP_INC_STATS_BH(net, SCTP_MIB_CHECKSUMERRORS);
+		__SCTP_INC_STATS(net, SCTP_MIB_CHECKSUMERRORS);
 		return -1;
 	}
 	return 0;
@@ -122,7 +122,7 @@
 	if (skb->pkt_type != PACKET_HOST)
 		goto discard_it;
 
-	SCTP_INC_STATS_BH(net, SCTP_MIB_INSCTPPACKS);
+	__SCTP_INC_STATS(net, SCTP_MIB_INSCTPPACKS);
 
 	if (skb_linearize(skb))
 		goto discard_it;
@@ -208,7 +208,7 @@
 	 */
 	if (!asoc) {
 		if (sctp_rcv_ootb(skb)) {
-			SCTP_INC_STATS_BH(net, SCTP_MIB_OUTOFBLUES);
+			__SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
 			goto discard_release;
 		}
 	}
@@ -264,9 +264,9 @@
 			skb = NULL; /* sctp_chunk_free already freed the skb */
 			goto discard_release;
 		}
-		SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_BACKLOG);
+		__SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_BACKLOG);
 	} else {
-		SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_SOFTIRQ);
+		__SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_SOFTIRQ);
 		sctp_inq_push(&chunk->rcvr->inqueue, chunk);
 	}
 
@@ -281,7 +281,7 @@
 	return 0;
 
 discard_it:
-	SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_DISCARDS);
+	__SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_DISCARDS);
 	kfree_skb(skb);
 	return 0;
 
@@ -532,7 +532,7 @@
 	 * servers this needs to be solved differently.
 	 */
 	if (sock_owned_by_user(sk))
-		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
+		__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
 
 	*app = asoc;
 	*tpp = transport;
@@ -589,7 +589,7 @@
 	skb->network_header = saveip;
 	skb->transport_header = savesctp;
 	if (!sk) {
-		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
+		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
 		return;
 	}
 	/* Warning:  The sock lock is held.  Remember to call
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 7e8a16c..9d87bba 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -89,10 +89,12 @@
 	 * Eventually, we should clean up inqueue to not rely
 	 * on the BH related data structures.
 	 */
+	local_bh_disable();
 	list_add_tail(&chunk->list, &q->in_chunk_list);
 	if (chunk->asoc)
 		chunk->asoc->stats.ipackets++;
 	q->immediate.func(&q->immediate);
+	local_bh_enable();
 }
 
 /* Peek at the next chunk on the inqeue. */
@@ -163,6 +165,9 @@
 		chunk->singleton = 1;
 		ch = (sctp_chunkhdr_t *) chunk->skb->data;
 		chunk->data_accepted = 0;
+
+		if (chunk->asoc)
+			sock_rps_save_rxhash(chunk->asoc->base.sk, chunk->skb);
 	}
 
 	chunk->chunk_hdr = ch;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index ce46f1c..0657d18 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -162,7 +162,7 @@
 	skb->network_header   = saveip;
 	skb->transport_header = savesctp;
 	if (!sk) {
-		ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_INERRORS);
+		__ICMP6_INC_STATS(net, idev, ICMP6_MIB_INERRORS);
 		goto out;
 	}
 
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 736c004..9844fe5 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -401,7 +401,7 @@
 	sk = chunk->skb->sk;
 
 	/* Allocate the new skb.  */
-	nskb = alloc_skb(packet->size + MAX_HEADER, GFP_ATOMIC);
+	nskb = alloc_skb(packet->size + MAX_HEADER, gfp);
 	if (!nskb)
 		goto nomem;
 
@@ -523,8 +523,8 @@
 	 */
 	if (auth)
 		sctp_auth_calculate_hmac(asoc, nskb,
-					(struct sctp_auth_chunk *)auth,
-					GFP_ATOMIC);
+					 (struct sctp_auth_chunk *)auth,
+					 gfp);
 
 	/* 2) Calculate the Adler-32 checksum of the whole packet,
 	 *    including the SCTP common header and all the
@@ -705,7 +705,8 @@
 	/* Check whether this chunk and all the rest of pending data will fit
 	 * or delay in hopes of bundling a full sized packet.
 	 */
-	if (chunk->skb->len + q->out_qlen >= transport->pathmtu - packet->overhead)
+	if (chunk->skb->len + q->out_qlen >
+		transport->pathmtu - packet->overhead - sizeof(sctp_data_chunk_t) - 4)
 		/* Enough data queued to fill a packet */
 		return SCTP_XMIT_OK;
 
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 8d3d362..084718f 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -866,8 +866,10 @@
 				 * sender MUST assure that at least one T3-rtx
 				 * timer is running.
 				 */
-				if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN)
-					sctp_transport_reset_timers(transport);
+				if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
+					sctp_transport_reset_t3_rtx(transport);
+					transport->last_time_sent = jiffies;
+				}
 			}
 			break;
 
@@ -924,8 +926,10 @@
 			error = sctp_outq_flush_rtx(q, packet,
 						    rtx_timeout, &start_timer);
 
-			if (start_timer)
-				sctp_transport_reset_timers(transport);
+			if (start_timer) {
+				sctp_transport_reset_t3_rtx(transport);
+				transport->last_time_sent = jiffies;
+			}
 
 			/* This can happen on COOKIE-ECHO resend.  Only
 			 * one chunk can get bundled with a COOKIE-ECHO.
@@ -1062,7 +1066,8 @@
 			list_add_tail(&chunk->transmitted_list,
 				      &transport->transmitted);
 
-			sctp_transport_reset_timers(transport);
+			sctp_transport_reset_t3_rtx(transport);
+			transport->last_time_sent = jiffies;
 
 			/* Only let one DATA chunk get bundled with a
 			 * COOKIE-ECHO chunk.
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 5cfac8d..4cb5aed 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -280,82 +280,38 @@
 struct sctp_ht_iter {
 	struct seq_net_private p;
 	struct rhashtable_iter hti;
+	int start_fail;
 };
 
-static struct sctp_transport *sctp_transport_get_next(struct seq_file *seq)
+static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
 {
 	struct sctp_ht_iter *iter = seq->private;
-	struct sctp_transport *t;
+	int err = sctp_transport_walk_start(&iter->hti);
 
-	t = rhashtable_walk_next(&iter->hti);
-	for (; t; t = rhashtable_walk_next(&iter->hti)) {
-		if (IS_ERR(t)) {
-			if (PTR_ERR(t) == -EAGAIN)
-				continue;
-			break;
-		}
-
-		if (net_eq(sock_net(t->asoc->base.sk), seq_file_net(seq)) &&
-		    t->asoc->peer.primary_path == t)
-			break;
+	if (err) {
+		iter->start_fail = 1;
+		return ERR_PTR(err);
 	}
 
-	return t;
+	return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos);
 }
 
-static struct sctp_transport *sctp_transport_get_idx(struct seq_file *seq,
-						     loff_t pos)
-{
-	void *obj = SEQ_START_TOKEN;
-
-	while (pos && (obj = sctp_transport_get_next(seq)) && !IS_ERR(obj))
-		pos--;
-
-	return obj;
-}
-
-static int sctp_transport_walk_start(struct seq_file *seq)
-{
-	struct sctp_ht_iter *iter = seq->private;
-	int err;
-
-	err = rhashtable_walk_init(&sctp_transport_hashtable, &iter->hti);
-	if (err)
-		return err;
-
-	err = rhashtable_walk_start(&iter->hti);
-
-	return err == -EAGAIN ? 0 : err;
-}
-
-static void sctp_transport_walk_stop(struct seq_file *seq)
+static void sctp_transport_seq_stop(struct seq_file *seq, void *v)
 {
 	struct sctp_ht_iter *iter = seq->private;
 
-	rhashtable_walk_stop(&iter->hti);
-	rhashtable_walk_exit(&iter->hti);
+	if (iter->start_fail)
+		return;
+	sctp_transport_walk_stop(&iter->hti);
 }
 
-static void *sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos)
+static void *sctp_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
-	int err = sctp_transport_walk_start(seq);
+	struct sctp_ht_iter *iter = seq->private;
 
-	if (err)
-		return ERR_PTR(err);
-
-	return sctp_transport_get_idx(seq, *pos);
-}
-
-static void sctp_assocs_seq_stop(struct seq_file *seq, void *v)
-{
-	sctp_transport_walk_stop(seq);
-}
-
-static void *sctp_assocs_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
 	++*pos;
 
-	return sctp_transport_get_next(seq);
+	return sctp_transport_get_next(seq_file_net(seq), &iter->hti);
 }
 
 /* Display sctp associations (/proc/net/sctp/assocs). */
@@ -416,9 +372,9 @@
 }
 
 static const struct seq_operations sctp_assoc_ops = {
-	.start = sctp_assocs_seq_start,
-	.next  = sctp_assocs_seq_next,
-	.stop  = sctp_assocs_seq_stop,
+	.start = sctp_transport_seq_start,
+	.next  = sctp_transport_seq_next,
+	.stop  = sctp_transport_seq_stop,
 	.show  = sctp_assocs_seq_show,
 };
 
@@ -455,28 +411,6 @@
 	remove_proc_entry("assocs", net->sctp.proc_net_sctp);
 }
 
-static void *sctp_remaddr_seq_start(struct seq_file *seq, loff_t *pos)
-{
-	int err = sctp_transport_walk_start(seq);
-
-	if (err)
-		return ERR_PTR(err);
-
-	return sctp_transport_get_idx(seq, *pos);
-}
-
-static void *sctp_remaddr_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-	++*pos;
-
-	return sctp_transport_get_next(seq);
-}
-
-static void sctp_remaddr_seq_stop(struct seq_file *seq, void *v)
-{
-	sctp_transport_walk_stop(seq);
-}
-
 static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
 {
 	struct sctp_association *assoc;
@@ -550,9 +484,9 @@
 }
 
 static const struct seq_operations sctp_remaddr_ops = {
-	.start = sctp_remaddr_seq_start,
-	.next  = sctp_remaddr_seq_next,
-	.stop  = sctp_remaddr_seq_stop,
+	.start = sctp_transport_seq_start,
+	.next  = sctp_transport_seq_next,
+	.stop  = sctp_transport_seq_stop,
 	.show  = sctp_remaddr_seq_show,
 };
 
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c
new file mode 100644
index 0000000..8e3e769
--- /dev/null
+++ b/net/sctp/sctp_diag.c
@@ -0,0 +1,500 @@
+#include <linux/module.h>
+#include <linux/inet_diag.h>
+#include <linux/sock_diag.h>
+#include <net/sctp/sctp.h>
+
+extern void inet_diag_msg_common_fill(struct inet_diag_msg *r,
+				      struct sock *sk);
+extern int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
+				    struct inet_diag_msg *r, int ext,
+				    struct user_namespace *user_ns);
+
+static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
+			       void *info);
+
+/* define some functions to make asoc/ep fill look clean */
+static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r,
+					struct sock *sk,
+					struct sctp_association *asoc)
+{
+	union sctp_addr laddr, paddr;
+	struct dst_entry *dst;
+
+	laddr = list_entry(asoc->base.bind_addr.address_list.next,
+			   struct sctp_sockaddr_entry, list)->a;
+	paddr = asoc->peer.primary_path->ipaddr;
+	dst = asoc->peer.primary_path->dst;
+
+	r->idiag_family = sk->sk_family;
+	r->id.idiag_sport = htons(asoc->base.bind_addr.port);
+	r->id.idiag_dport = htons(asoc->peer.port);
+	r->id.idiag_if = dst ? dst->dev->ifindex : 0;
+	sock_diag_save_cookie(sk, r->id.idiag_cookie);
+
+#if IS_ENABLED(CONFIG_IPV6)
+	if (sk->sk_family == AF_INET6) {
+		*(struct in6_addr *)r->id.idiag_src = laddr.v6.sin6_addr;
+		*(struct in6_addr *)r->id.idiag_dst = paddr.v6.sin6_addr;
+	} else
+#endif
+	{
+		memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
+		memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
+
+		r->id.idiag_src[0] = laddr.v4.sin_addr.s_addr;
+		r->id.idiag_dst[0] = paddr.v4.sin_addr.s_addr;
+	}
+
+	r->idiag_state = asoc->state;
+	r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX;
+	r->idiag_retrans = asoc->rtx_data_chunks;
+	r->idiag_expires = jiffies_to_msecs(
+		asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] - jiffies);
+}
+
+static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb,
+					 struct list_head *address_list)
+{
+	struct sctp_sockaddr_entry *laddr;
+	int addrlen = sizeof(struct sockaddr_storage);
+	int addrcnt = 0;
+	struct nlattr *attr;
+	void *info = NULL;
+
+	list_for_each_entry_rcu(laddr, address_list, list)
+		addrcnt++;
+
+	attr = nla_reserve(skb, INET_DIAG_LOCALS, addrlen * addrcnt);
+	if (!attr)
+		return -EMSGSIZE;
+
+	info = nla_data(attr);
+	list_for_each_entry_rcu(laddr, address_list, list) {
+		memcpy(info, &laddr->a, addrlen);
+		info += addrlen;
+	}
+
+	return 0;
+}
+
+static int inet_diag_msg_sctpaddrs_fill(struct sk_buff *skb,
+					struct sctp_association *asoc)
+{
+	int addrlen = sizeof(struct sockaddr_storage);
+	struct sctp_transport *from;
+	struct nlattr *attr;
+	void *info = NULL;
+
+	attr = nla_reserve(skb, INET_DIAG_PEERS,
+			   addrlen * asoc->peer.transport_count);
+	if (!attr)
+		return -EMSGSIZE;
+
+	info = nla_data(attr);
+	list_for_each_entry(from, &asoc->peer.transport_addr_list,
+			    transports) {
+		memcpy(info, &from->ipaddr, addrlen);
+		info += addrlen;
+	}
+
+	return 0;
+}
+
+/* sctp asoc/ep fill*/
+static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc,
+			       struct sk_buff *skb,
+			       const struct inet_diag_req_v2 *req,
+			       struct user_namespace *user_ns,
+			       int portid, u32 seq, u16 nlmsg_flags,
+			       const struct nlmsghdr *unlh)
+{
+	struct sctp_endpoint *ep = sctp_sk(sk)->ep;
+	struct list_head *addr_list;
+	struct inet_diag_msg *r;
+	struct nlmsghdr  *nlh;
+	int ext = req->idiag_ext;
+	struct sctp_infox infox;
+	void *info = NULL;
+
+	nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
+			nlmsg_flags);
+	if (!nlh)
+		return -EMSGSIZE;
+
+	r = nlmsg_data(nlh);
+	BUG_ON(!sk_fullsock(sk));
+
+	if (asoc) {
+		inet_diag_msg_sctpasoc_fill(r, sk, asoc);
+	} else {
+		inet_diag_msg_common_fill(r, sk);
+		r->idiag_state = sk->sk_state;
+		r->idiag_timer = 0;
+		r->idiag_retrans = 0;
+	}
+
+	if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns))
+		goto errout;
+
+	if (ext & (1 << (INET_DIAG_SKMEMINFO - 1))) {
+		u32 mem[SK_MEMINFO_VARS];
+		int amt;
+
+		if (asoc && asoc->ep->sndbuf_policy)
+			amt = asoc->sndbuf_used;
+		else
+			amt = sk_wmem_alloc_get(sk);
+		mem[SK_MEMINFO_WMEM_ALLOC] = amt;
+		if (asoc && asoc->ep->rcvbuf_policy)
+			amt = atomic_read(&asoc->rmem_alloc);
+		else
+			amt = sk_rmem_alloc_get(sk);
+		mem[SK_MEMINFO_RMEM_ALLOC] = amt;
+		mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
+		mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
+		mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
+		mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
+		mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
+		mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
+		mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
+
+		if (nla_put(skb, INET_DIAG_SKMEMINFO, sizeof(mem), &mem) < 0)
+			goto errout;
+	}
+
+	if (ext & (1 << (INET_DIAG_INFO - 1))) {
+		struct nlattr *attr;
+
+		attr = nla_reserve_64bit(skb, INET_DIAG_INFO,
+					 sizeof(struct sctp_info),
+					 INET_DIAG_PAD);
+		if (!attr)
+			goto errout;
+
+		info = nla_data(attr);
+	}
+	infox.sctpinfo = (struct sctp_info *)info;
+	infox.asoc = asoc;
+	sctp_diag_get_info(sk, r, &infox);
+
+	addr_list = asoc ? &asoc->base.bind_addr.address_list
+			 : &ep->base.bind_addr.address_list;
+	if (inet_diag_msg_sctpladdrs_fill(skb, addr_list))
+		goto errout;
+
+	if (asoc && (ext & (1 << (INET_DIAG_CONG - 1))))
+		if (nla_put_string(skb, INET_DIAG_CONG, "reno") < 0)
+			goto errout;
+
+	if (asoc && inet_diag_msg_sctpaddrs_fill(skb, asoc))
+		goto errout;
+
+	nlmsg_end(skb, nlh);
+	return 0;
+
+errout:
+	nlmsg_cancel(skb, nlh);
+	return -EMSGSIZE;
+}
+
+/* callback and param */
+struct sctp_comm_param {
+	struct sk_buff *skb;
+	struct netlink_callback *cb;
+	const struct inet_diag_req_v2 *r;
+	const struct nlmsghdr *nlh;
+};
+
+static size_t inet_assoc_attr_size(struct sctp_association *asoc)
+{
+	int addrlen = sizeof(struct sockaddr_storage);
+	int addrcnt = 0;
+	struct sctp_sockaddr_entry *laddr;
+
+	list_for_each_entry_rcu(laddr, &asoc->base.bind_addr.address_list,
+				list)
+		addrcnt++;
+
+	return	  nla_total_size(sizeof(struct sctp_info))
+		+ nla_total_size(1) /* INET_DIAG_SHUTDOWN */
+		+ nla_total_size(1) /* INET_DIAG_TOS */
+		+ nla_total_size(1) /* INET_DIAG_TCLASS */
+		+ nla_total_size(addrlen * asoc->peer.transport_count)
+		+ nla_total_size(addrlen * addrcnt)
+		+ nla_total_size(sizeof(struct inet_diag_meminfo))
+		+ nla_total_size(sizeof(struct inet_diag_msg))
+		+ 64;
+}
+
+static int sctp_tsp_dump_one(struct sctp_transport *tsp, void *p)
+{
+	struct sctp_association *assoc = tsp->asoc;
+	struct sock *sk = tsp->asoc->base.sk;
+	struct sctp_comm_param *commp = p;
+	struct sk_buff *in_skb = commp->skb;
+	const struct inet_diag_req_v2 *req = commp->r;
+	const struct nlmsghdr *nlh = commp->nlh;
+	struct net *net = sock_net(in_skb->sk);
+	struct sk_buff *rep;
+	int err;
+
+	err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
+	if (err)
+		goto out;
+
+	err = -ENOMEM;
+	rep = nlmsg_new(inet_assoc_attr_size(assoc), GFP_KERNEL);
+	if (!rep)
+		goto out;
+
+	lock_sock(sk);
+	if (sk != assoc->base.sk) {
+		release_sock(sk);
+		sk = assoc->base.sk;
+		lock_sock(sk);
+	}
+	err = inet_sctp_diag_fill(sk, assoc, rep, req,
+				  sk_user_ns(NETLINK_CB(in_skb).sk),
+				  NETLINK_CB(in_skb).portid,
+				  nlh->nlmsg_seq, 0, nlh);
+	release_sock(sk);
+	if (err < 0) {
+		WARN_ON(err == -EMSGSIZE);
+		kfree_skb(rep);
+		goto out;
+	}
+
+	err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
+			      MSG_DONTWAIT);
+	if (err > 0)
+		err = 0;
+out:
+	return err;
+}
+
+static int sctp_tsp_dump(struct sctp_transport *tsp, void *p)
+{
+	struct sctp_endpoint *ep = tsp->asoc->ep;
+	struct sctp_comm_param *commp = p;
+	struct sock *sk = ep->base.sk;
+	struct sk_buff *skb = commp->skb;
+	struct netlink_callback *cb = commp->cb;
+	const struct inet_diag_req_v2 *r = commp->r;
+	struct sctp_association *assoc =
+		list_entry(ep->asocs.next, struct sctp_association, asocs);
+	int err = 0;
+
+	/* find the ep only once through the transports by this condition */
+	if (tsp->asoc != assoc)
+		goto out;
+
+	if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)
+		goto out;
+
+	lock_sock(sk);
+	if (sk != assoc->base.sk)
+		goto release;
+	list_for_each_entry(assoc, &ep->asocs, asocs) {
+		if (cb->args[4] < cb->args[1])
+			goto next;
+
+		if (r->id.idiag_sport != htons(assoc->base.bind_addr.port) &&
+		    r->id.idiag_sport)
+			goto next;
+		if (r->id.idiag_dport != htons(assoc->peer.port) &&
+		    r->id.idiag_dport)
+			goto next;
+
+		if (!cb->args[3] &&
+		    inet_sctp_diag_fill(sk, NULL, skb, r,
+					sk_user_ns(NETLINK_CB(cb->skb).sk),
+					NETLINK_CB(cb->skb).portid,
+					cb->nlh->nlmsg_seq,
+					NLM_F_MULTI, cb->nlh) < 0) {
+			cb->args[3] = 1;
+			err = 2;
+			goto release;
+		}
+		cb->args[3] = 1;
+
+		if (inet_sctp_diag_fill(sk, assoc, skb, r,
+					sk_user_ns(NETLINK_CB(cb->skb).sk),
+					NETLINK_CB(cb->skb).portid,
+					cb->nlh->nlmsg_seq, 0, cb->nlh) < 0) {
+			err = 2;
+			goto release;
+		}
+next:
+		cb->args[4]++;
+	}
+	cb->args[1] = 0;
+	cb->args[2]++;
+	cb->args[3] = 0;
+	cb->args[4] = 0;
+release:
+	release_sock(sk);
+	return err;
+out:
+	cb->args[2]++;
+	return err;
+}
+
+static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
+{
+	struct sctp_comm_param *commp = p;
+	struct sock *sk = ep->base.sk;
+	struct sk_buff *skb = commp->skb;
+	struct netlink_callback *cb = commp->cb;
+	const struct inet_diag_req_v2 *r = commp->r;
+	struct net *net = sock_net(skb->sk);
+	struct inet_sock *inet = inet_sk(sk);
+	int err = 0;
+
+	if (!net_eq(sock_net(sk), net))
+		goto out;
+
+	if (cb->args[4] < cb->args[1])
+		goto next;
+
+	if (r->sdiag_family != AF_UNSPEC &&
+	    sk->sk_family != r->sdiag_family)
+		goto next;
+
+	if (r->id.idiag_sport != inet->inet_sport &&
+	    r->id.idiag_sport)
+		goto next;
+
+	if (r->id.idiag_dport != inet->inet_dport &&
+	    r->id.idiag_dport)
+		goto next;
+
+	if (inet_sctp_diag_fill(sk, NULL, skb, r,
+				sk_user_ns(NETLINK_CB(cb->skb).sk),
+				NETLINK_CB(cb->skb).portid,
+				cb->nlh->nlmsg_seq, NLM_F_MULTI,
+				cb->nlh) < 0) {
+		err = 2;
+		goto out;
+	}
+next:
+	cb->args[4]++;
+out:
+	return err;
+}
+
+/* define the functions for sctp_diag_handler*/
+static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
+			       void *info)
+{
+	struct sctp_infox *infox = (struct sctp_infox *)info;
+
+	if (infox->asoc) {
+		r->idiag_rqueue = atomic_read(&infox->asoc->rmem_alloc);
+		r->idiag_wqueue = infox->asoc->sndbuf_used;
+	} else {
+		r->idiag_rqueue = sk->sk_ack_backlog;
+		r->idiag_wqueue = sk->sk_max_ack_backlog;
+	}
+	if (infox->sctpinfo)
+		sctp_get_sctp_info(sk, infox->asoc, infox->sctpinfo);
+}
+
+static int sctp_diag_dump_one(struct sk_buff *in_skb,
+			      const struct nlmsghdr *nlh,
+			      const struct inet_diag_req_v2 *req)
+{
+	struct net *net = sock_net(in_skb->sk);
+	union sctp_addr laddr, paddr;
+	struct sctp_comm_param commp = {
+		.skb = in_skb,
+		.r = req,
+		.nlh = nlh,
+	};
+
+	if (req->sdiag_family == AF_INET) {
+		laddr.v4.sin_port = req->id.idiag_sport;
+		laddr.v4.sin_addr.s_addr = req->id.idiag_src[0];
+		laddr.v4.sin_family = AF_INET;
+
+		paddr.v4.sin_port = req->id.idiag_dport;
+		paddr.v4.sin_addr.s_addr = req->id.idiag_dst[0];
+		paddr.v4.sin_family = AF_INET;
+	} else {
+		laddr.v6.sin6_port = req->id.idiag_sport;
+		memcpy(&laddr.v6.sin6_addr, req->id.idiag_src, 64);
+		laddr.v6.sin6_family = AF_INET6;
+
+		paddr.v6.sin6_port = req->id.idiag_dport;
+		memcpy(&paddr.v6.sin6_addr, req->id.idiag_dst, 64);
+		paddr.v6.sin6_family = AF_INET6;
+	}
+
+	return sctp_transport_lookup_process(sctp_tsp_dump_one,
+					     net, &laddr, &paddr, &commp);
+}
+
+static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+			   const struct inet_diag_req_v2 *r, struct nlattr *bc)
+{
+	u32 idiag_states = r->idiag_states;
+	struct net *net = sock_net(skb->sk);
+	struct sctp_comm_param commp = {
+		.skb = skb,
+		.cb = cb,
+		.r = r,
+	};
+
+	/* eps hashtable dumps
+	 * args:
+	 * 0 : if it will traversal listen sock
+	 * 1 : to record the sock pos of this time's traversal
+	 * 4 : to work as a temporary variable to traversal list
+	 */
+	if (cb->args[0] == 0) {
+		if (!(idiag_states & TCPF_LISTEN))
+			goto skip;
+		if (sctp_for_each_endpoint(sctp_ep_dump, &commp))
+			goto done;
+skip:
+		cb->args[0] = 1;
+		cb->args[1] = 0;
+		cb->args[4] = 0;
+	}
+
+	/* asocs by transport hashtable dump
+	 * args:
+	 * 1 : to record the assoc pos of this time's traversal
+	 * 2 : to record the transport pos of this time's traversal
+	 * 3 : to mark if we have dumped the ep info of the current asoc
+	 * 4 : to work as a temporary variable to traversal list
+	 */
+	if (!(idiag_states & ~TCPF_LISTEN))
+		goto done;
+	sctp_for_each_transport(sctp_tsp_dump, net, cb->args[2], &commp);
+done:
+	cb->args[1] = cb->args[4];
+	cb->args[4] = 0;
+}
+
+static const struct inet_diag_handler sctp_diag_handler = {
+	.dump		 = sctp_diag_dump,
+	.dump_one	 = sctp_diag_dump_one,
+	.idiag_get_info  = sctp_diag_get_info,
+	.idiag_type	 = IPPROTO_SCTP,
+	.idiag_info_size = sizeof(struct sctp_info),
+};
+
+static int __init sctp_diag_init(void)
+{
+	return inet_diag_register(&sctp_diag_handler);
+}
+
+static void __exit sctp_diag_exit(void)
+{
+	inet_diag_unregister(&sctp_diag_handler);
+}
+
+module_init(sctp_diag_init);
+module_exit(sctp_diag_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-132);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 7f0bf79..56f364d 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -3080,8 +3080,7 @@
 			return SCTP_ERROR_RSRC_LOW;
 
 		/* Start the heartbeat timer. */
-		if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer)))
-			sctp_transport_hold(peer);
+		sctp_transport_reset_hb_timer(peer);
 		asoc->new_transport = peer;
 		break;
 	case SCTP_PARAM_DEL_IP:
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 7fe56d0..aa37122 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -69,8 +69,6 @@
 			     sctp_cmd_seq_t *commands,
 			     gfp_t gfp);
 
-static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
-				     struct sctp_transport *t);
 /********************************************************************
  * Helper functions
  ********************************************************************/
@@ -367,6 +365,7 @@
 	struct sctp_association *asoc = transport->asoc;
 	struct sock *sk = asoc->base.sk;
 	struct net *net = sock_net(sk);
+	u32 elapsed, timeout;
 
 	bh_lock_sock(sk);
 	if (sock_owned_by_user(sk)) {
@@ -378,6 +377,16 @@
 		goto out_unlock;
 	}
 
+	/* Check if we should still send the heartbeat or reschedule */
+	elapsed = jiffies - transport->last_time_sent;
+	timeout = sctp_transport_timeout(transport);
+	if (elapsed < timeout) {
+		elapsed = timeout - elapsed;
+		if (!mod_timer(&transport->hb_timer, jiffies + elapsed))
+			sctp_transport_hold(transport);
+		goto out_unlock;
+	}
+
 	error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
 			   SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
 			   asoc->state, asoc->ep, asoc,
@@ -507,7 +516,7 @@
 					     0);
 
 		/* Update the hb timer to resend a heartbeat every rto */
-		sctp_cmd_hb_timer_update(commands, transport);
+		sctp_transport_reset_hb_timer(transport);
 	}
 
 	if (transport->state != SCTP_INACTIVE &&
@@ -634,11 +643,8 @@
 	 * hold a reference on the transport to make sure none of
 	 * the needed data structures go away.
 	 */
-	list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
-
-		if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
-			sctp_transport_hold(t);
-	}
+	list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
+		sctp_transport_reset_hb_timer(t);
 }
 
 static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
@@ -669,15 +675,6 @@
 }
 
 
-/* Helper function to update the heartbeat timer. */
-static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
-				     struct sctp_transport *t)
-{
-	/* Update the heartbeat timer.  */
-	if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
-		sctp_transport_hold(t);
-}
-
 /* Helper function to handle the reception of an HEARTBEAT ACK.  */
 static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
 				  struct sctp_association *asoc,
@@ -742,8 +739,7 @@
 	sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
 
 	/* Update the heartbeat timer.  */
-	if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
-		sctp_transport_hold(t);
+	sctp_transport_reset_hb_timer(t);
 
 	if (was_unconfirmed && asoc->peer.transport_count == 1)
 		sctp_transport_immediate_rtx(t);
@@ -1222,6 +1218,8 @@
 				sctp_cmd_seq_t *commands,
 				gfp_t gfp)
 {
+	struct sock *sk = ep->base.sk;
+	struct sctp_sock *sp = sctp_sk(sk);
 	int error = 0;
 	int force;
 	sctp_cmd_t *cmd;
@@ -1614,7 +1612,7 @@
 
 		case SCTP_CMD_HB_TIMER_UPDATE:
 			t = cmd->obj.transport;
-			sctp_cmd_hb_timer_update(commands, t);
+			sctp_transport_reset_hb_timer(t);
 			break;
 
 		case SCTP_CMD_HB_TIMERS_STOP:
@@ -1742,6 +1740,10 @@
 			error = sctp_outq_uncork(&asoc->outqueue, gfp);
 	} else if (local_cork)
 		error = sctp_outq_uncork(&asoc->outqueue, gfp);
+
+	if (sp->data_ready_signalled)
+		sp->data_ready_signalled = 0;
+
 	return error;
 nomem:
 	error = -ENOMEM;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 878d28e..777d032 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4202,6 +4202,222 @@
 	}
 }
 
+int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
+		       struct sctp_info *info)
+{
+	struct sctp_transport *prim;
+	struct list_head *pos;
+	int mask;
+
+	memset(info, 0, sizeof(*info));
+	if (!asoc) {
+		struct sctp_sock *sp = sctp_sk(sk);
+
+		info->sctpi_s_autoclose = sp->autoclose;
+		info->sctpi_s_adaptation_ind = sp->adaptation_ind;
+		info->sctpi_s_pd_point = sp->pd_point;
+		info->sctpi_s_nodelay = sp->nodelay;
+		info->sctpi_s_disable_fragments = sp->disable_fragments;
+		info->sctpi_s_v4mapped = sp->v4mapped;
+		info->sctpi_s_frag_interleave = sp->frag_interleave;
+
+		return 0;
+	}
+
+	info->sctpi_tag = asoc->c.my_vtag;
+	info->sctpi_state = asoc->state;
+	info->sctpi_rwnd = asoc->a_rwnd;
+	info->sctpi_unackdata = asoc->unack_data;
+	info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
+	info->sctpi_instrms = asoc->c.sinit_max_instreams;
+	info->sctpi_outstrms = asoc->c.sinit_num_ostreams;
+	list_for_each(pos, &asoc->base.inqueue.in_chunk_list)
+		info->sctpi_inqueue++;
+	list_for_each(pos, &asoc->outqueue.out_chunk_list)
+		info->sctpi_outqueue++;
+	info->sctpi_overall_error = asoc->overall_error_count;
+	info->sctpi_max_burst = asoc->max_burst;
+	info->sctpi_maxseg = asoc->frag_point;
+	info->sctpi_peer_rwnd = asoc->peer.rwnd;
+	info->sctpi_peer_tag = asoc->c.peer_vtag;
+
+	mask = asoc->peer.ecn_capable << 1;
+	mask = (mask | asoc->peer.ipv4_address) << 1;
+	mask = (mask | asoc->peer.ipv6_address) << 1;
+	mask = (mask | asoc->peer.hostname_address) << 1;
+	mask = (mask | asoc->peer.asconf_capable) << 1;
+	mask = (mask | asoc->peer.prsctp_capable) << 1;
+	mask = (mask | asoc->peer.auth_capable);
+	info->sctpi_peer_capable = mask;
+	mask = asoc->peer.sack_needed << 1;
+	mask = (mask | asoc->peer.sack_generation) << 1;
+	mask = (mask | asoc->peer.zero_window_announced);
+	info->sctpi_peer_sack = mask;
+
+	info->sctpi_isacks = asoc->stats.isacks;
+	info->sctpi_osacks = asoc->stats.osacks;
+	info->sctpi_opackets = asoc->stats.opackets;
+	info->sctpi_ipackets = asoc->stats.ipackets;
+	info->sctpi_rtxchunks = asoc->stats.rtxchunks;
+	info->sctpi_outofseqtsns = asoc->stats.outofseqtsns;
+	info->sctpi_idupchunks = asoc->stats.idupchunks;
+	info->sctpi_gapcnt = asoc->stats.gapcnt;
+	info->sctpi_ouodchunks = asoc->stats.ouodchunks;
+	info->sctpi_iuodchunks = asoc->stats.iuodchunks;
+	info->sctpi_oodchunks = asoc->stats.oodchunks;
+	info->sctpi_iodchunks = asoc->stats.iodchunks;
+	info->sctpi_octrlchunks = asoc->stats.octrlchunks;
+	info->sctpi_ictrlchunks = asoc->stats.ictrlchunks;
+
+	prim = asoc->peer.primary_path;
+	memcpy(&info->sctpi_p_address, &prim->ipaddr,
+	       sizeof(struct sockaddr_storage));
+	info->sctpi_p_state = prim->state;
+	info->sctpi_p_cwnd = prim->cwnd;
+	info->sctpi_p_srtt = prim->srtt;
+	info->sctpi_p_rto = jiffies_to_msecs(prim->rto);
+	info->sctpi_p_hbinterval = prim->hbinterval;
+	info->sctpi_p_pathmaxrxt = prim->pathmaxrxt;
+	info->sctpi_p_sackdelay = jiffies_to_msecs(prim->sackdelay);
+	info->sctpi_p_ssthresh = prim->ssthresh;
+	info->sctpi_p_partial_bytes_acked = prim->partial_bytes_acked;
+	info->sctpi_p_flight_size = prim->flight_size;
+	info->sctpi_p_error = prim->error_count;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sctp_get_sctp_info);
+
+/* use callback to avoid exporting the core structure */
+int sctp_transport_walk_start(struct rhashtable_iter *iter)
+{
+	int err;
+
+	err = rhashtable_walk_init(&sctp_transport_hashtable, iter,
+				   GFP_KERNEL);
+	if (err)
+		return err;
+
+	err = rhashtable_walk_start(iter);
+	if (err && err != -EAGAIN) {
+		rhashtable_walk_exit(iter);
+		return err;
+	}
+
+	return 0;
+}
+
+void sctp_transport_walk_stop(struct rhashtable_iter *iter)
+{
+	rhashtable_walk_stop(iter);
+	rhashtable_walk_exit(iter);
+}
+
+struct sctp_transport *sctp_transport_get_next(struct net *net,
+					       struct rhashtable_iter *iter)
+{
+	struct sctp_transport *t;
+
+	t = rhashtable_walk_next(iter);
+	for (; t; t = rhashtable_walk_next(iter)) {
+		if (IS_ERR(t)) {
+			if (PTR_ERR(t) == -EAGAIN)
+				continue;
+			break;
+		}
+
+		if (net_eq(sock_net(t->asoc->base.sk), net) &&
+		    t->asoc->peer.primary_path == t)
+			break;
+	}
+
+	return t;
+}
+
+struct sctp_transport *sctp_transport_get_idx(struct net *net,
+					      struct rhashtable_iter *iter,
+					      int pos)
+{
+	void *obj = SEQ_START_TOKEN;
+
+	while (pos && (obj = sctp_transport_get_next(net, iter)) &&
+	       !IS_ERR(obj))
+		pos--;
+
+	return obj;
+}
+
+int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
+			   void *p) {
+	int err = 0;
+	int hash = 0;
+	struct sctp_ep_common *epb;
+	struct sctp_hashbucket *head;
+
+	for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize;
+	     hash++, head++) {
+		read_lock(&head->lock);
+		sctp_for_each_hentry(epb, &head->chain) {
+			err = cb(sctp_ep(epb), p);
+			if (err)
+				break;
+		}
+		read_unlock(&head->lock);
+	}
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(sctp_for_each_endpoint);
+
+int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
+				  struct net *net,
+				  const union sctp_addr *laddr,
+				  const union sctp_addr *paddr, void *p)
+{
+	struct sctp_transport *transport;
+	int err = 0;
+
+	rcu_read_lock();
+	transport = sctp_addrs_lookup_transport(net, laddr, paddr);
+	if (!transport || !sctp_transport_hold(transport))
+		goto out;
+	err = cb(transport, p);
+	sctp_transport_put(transport);
+
+out:
+	rcu_read_unlock();
+	return err;
+}
+EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
+
+int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
+			    struct net *net, int pos, void *p) {
+	struct rhashtable_iter hti;
+	void *obj;
+	int err;
+
+	err = sctp_transport_walk_start(&hti);
+	if (err)
+		return err;
+
+	sctp_transport_get_idx(net, &hti, pos);
+	obj = sctp_transport_get_next(net, &hti);
+	for (; obj && !IS_ERR(obj); obj = sctp_transport_get_next(net, &hti)) {
+		struct sctp_transport *transport = obj;
+
+		if (!sctp_transport_hold(transport))
+			continue;
+		err = cb(transport, p);
+		sctp_transport_put(transport);
+		if (err)
+			break;
+	}
+	sctp_transport_walk_stop(&hti);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(sctp_for_each_transport);
+
 /* 7.2.1 Association Status (SCTP_STATUS)
 
  * Applications can retrieve current status information about an
@@ -6430,6 +6646,8 @@
 
 	poll_wait(file, sk_sleep(sk), wait);
 
+	sock_rps_record_flow(sk);
+
 	/* A TCP-style listening socket becomes readable when the accept queue
 	 * is not empty.
 	 */
@@ -6764,13 +6982,11 @@
 		 *  However, this function was correct in any case. 8)
 		 */
 		if (flags & MSG_PEEK) {
-			spin_lock_bh(&sk->sk_receive_queue.lock);
 			skb = skb_peek(&sk->sk_receive_queue);
 			if (skb)
 				atomic_inc(&skb->users);
-			spin_unlock_bh(&sk->sk_receive_queue.lock);
 		} else {
-			skb = skb_dequeue(&sk->sk_receive_queue);
+			skb = __skb_dequeue(&sk->sk_receive_queue);
 		}
 
 		if (skb)
@@ -7186,6 +7402,7 @@
 	newsk->sk_lingertime = sk->sk_lingertime;
 	newsk->sk_rcvtimeo = sk->sk_rcvtimeo;
 	newsk->sk_sndtimeo = sk->sk_sndtimeo;
+	newsk->sk_rxhash = sk->sk_rxhash;
 
 	newinet = inet_sk(newsk);
 
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 9b6b48c..81b8667 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -183,7 +183,7 @@
 /* Start T3_rtx timer if it is not already running and update the heartbeat
  * timer.  This routine is called every time a DATA chunk is sent.
  */
-void sctp_transport_reset_timers(struct sctp_transport *transport)
+void sctp_transport_reset_t3_rtx(struct sctp_transport *transport)
 {
 	/* RFC 2960 6.3.2 Retransmission Timer Rules
 	 *
@@ -197,11 +197,18 @@
 		if (!mod_timer(&transport->T3_rtx_timer,
 			       jiffies + transport->rto))
 			sctp_transport_hold(transport);
+}
+
+void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
+{
+	unsigned long expires;
 
 	/* When a data chunk is sent, reset the heartbeat interval.  */
-	if (!mod_timer(&transport->hb_timer,
-		       sctp_transport_timeout(transport)))
-	    sctp_transport_hold(transport);
+	expires = jiffies + sctp_transport_timeout(transport);
+	if (time_before(transport->hb_timer.expires, expires) &&
+	    !mod_timer(&transport->hb_timer,
+		       expires + prandom_u32_max(transport->rto)))
+		sctp_transport_hold(transport);
 }
 
 /* This transport has been assigned to an association.
@@ -595,13 +602,13 @@
 unsigned long sctp_transport_timeout(struct sctp_transport *trans)
 {
 	/* RTO + timer slack +/- 50% of RTO */
-	unsigned long timeout = (trans->rto >> 1) + prandom_u32_max(trans->rto);
+	unsigned long timeout = trans->rto >> 1;
 
 	if (trans->state != SCTP_UNCONFIRMED &&
 	    trans->state != SCTP_PF)
 		timeout += trans->hbinterval;
 
-	return timeout + jiffies;
+	return timeout;
 }
 
 /* Reset transport variables to their initial values */
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index ce469d6..ec166d2 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -141,7 +141,8 @@
 		 */
 		if (!skb_queue_empty(&sp->pd_lobby)) {
 			struct list_head *list;
-			sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
+			skb_queue_splice_tail_init(&sp->pd_lobby,
+						   &sk->sk_receive_queue);
 			list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
 			INIT_LIST_HEAD(list);
 			return 1;
@@ -193,6 +194,7 @@
 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
 {
 	struct sock *sk = ulpq->asoc->base.sk;
+	struct sctp_sock *sp = sctp_sk(sk);
 	struct sk_buff_head *queue, *skb_list;
 	struct sk_buff *skb = sctp_event2skb(event);
 	int clear_pd = 0;
@@ -210,7 +212,7 @@
 		sk_incoming_cpu_update(sk);
 	}
 	/* Check if the user wishes to receive this event.  */
-	if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
+	if (!sctp_ulpevent_is_enabled(event, &sp->subscribe))
 		goto out_free;
 
 	/* If we are in partial delivery mode, post to the lobby until
@@ -218,7 +220,7 @@
 	 * the association the cause of the partial delivery.
 	 */
 
-	if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
+	if (atomic_read(&sp->pd_mode) == 0) {
 		queue = &sk->sk_receive_queue;
 	} else {
 		if (ulpq->pd_mode) {
@@ -230,7 +232,7 @@
 			if ((event->msg_flags & MSG_NOTIFICATION) ||
 			    (SCTP_DATA_NOT_FRAG ==
 				    (event->msg_flags & SCTP_DATA_FRAG_MASK)))
-				queue = &sctp_sk(sk)->pd_lobby;
+				queue = &sp->pd_lobby;
 			else {
 				clear_pd = event->msg_flags & MSG_EOR;
 				queue = &sk->sk_receive_queue;
@@ -241,10 +243,10 @@
 			 * can queue this to the receive queue instead
 			 * of the lobby.
 			 */
-			if (sctp_sk(sk)->frag_interleave)
+			if (sp->frag_interleave)
 				queue = &sk->sk_receive_queue;
 			else
-				queue = &sctp_sk(sk)->pd_lobby;
+				queue = &sp->pd_lobby;
 		}
 	}
 
@@ -252,7 +254,7 @@
 	 * collected on a list.
 	 */
 	if (skb_list)
-		sctp_skb_list_tail(skb_list, queue);
+		skb_queue_splice_tail_init(skb_list, queue);
 	else
 		__skb_queue_tail(queue, skb);
 
@@ -263,8 +265,10 @@
 	if (clear_pd)
 		sctp_ulpq_clear_pd(ulpq);
 
-	if (queue == &sk->sk_receive_queue)
+	if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
+		sp->data_ready_signalled = 1;
 		sk->sk_data_ready(sk);
+	}
 	return 1;
 
 out_free:
@@ -1125,11 +1129,13 @@
 {
 	struct sctp_ulpevent *ev = NULL;
 	struct sock *sk;
+	struct sctp_sock *sp;
 
 	if (!ulpq->pd_mode)
 		return;
 
 	sk = ulpq->asoc->base.sk;
+	sp = sctp_sk(sk);
 	if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
 				       &sctp_sk(sk)->subscribe))
 		ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
@@ -1139,6 +1145,8 @@
 		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
 
 	/* If there is data waiting, send it up the socket now. */
-	if (sctp_ulpq_clear_pd(ulpq) || ev)
+	if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
+		sp->data_ready_signalled = 1;
 		sk->sk_data_ready(sk);
+	}
 }
diff --git a/net/socket.c b/net/socket.c
index 5f77a8e..e7793f5 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -466,7 +466,7 @@
 #define XATTR_SOCKPROTONAME_SUFFIX "sockprotoname"
 #define XATTR_NAME_SOCKPROTONAME (XATTR_SYSTEM_PREFIX XATTR_SOCKPROTONAME_SUFFIX)
 #define XATTR_NAME_SOCKPROTONAME_LEN (sizeof(XATTR_NAME_SOCKPROTONAME)-1)
-static ssize_t sockfs_getxattr(struct dentry *dentry,
+static ssize_t sockfs_getxattr(struct dentry *dentry, struct inode *inode,
 			       const char *name, void *value, size_t size)
 {
 	const char *proto_name;
@@ -587,22 +587,19 @@
 }
 EXPORT_SYMBOL(sock_release);
 
-void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags)
+void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags)
 {
 	u8 flags = *tx_flags;
 
-	if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_HARDWARE)
+	if (tsflags & SOF_TIMESTAMPING_TX_HARDWARE)
 		flags |= SKBTX_HW_TSTAMP;
 
-	if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_SOFTWARE)
+	if (tsflags & SOF_TIMESTAMPING_TX_SOFTWARE)
 		flags |= SKBTX_SW_TSTAMP;
 
-	if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED)
+	if (tsflags & SOF_TIMESTAMPING_TX_SCHED)
 		flags |= SKBTX_SCHED_TSTAMP;
 
-	if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK)
-		flags |= SKBTX_ACK_TSTAMP;
-
 	*tx_flags = flags;
 }
 EXPORT_SYMBOL(__sock_tx_timestamp);
@@ -709,17 +706,16 @@
 EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops);
 
 static inline int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg,
-				     size_t size, int flags)
+				     int flags)
 {
-	return sock->ops->recvmsg(sock, msg, size, flags);
+	return sock->ops->recvmsg(sock, msg, msg_data_left(msg), flags);
 }
 
-int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
-		 int flags)
+int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags)
 {
-	int err = security_socket_recvmsg(sock, msg, size, flags);
+	int err = security_socket_recvmsg(sock, msg, msg_data_left(msg), flags);
 
-	return err ?: sock_recvmsg_nosec(sock, msg, size, flags);
+	return err ?: sock_recvmsg_nosec(sock, msg, flags);
 }
 EXPORT_SYMBOL(sock_recvmsg);
 
@@ -746,7 +742,7 @@
 
 	iov_iter_kvec(&msg->msg_iter, READ | ITER_KVEC, vec, num, size);
 	set_fs(KERNEL_DS);
-	result = sock_recvmsg(sock, msg, size, flags);
+	result = sock_recvmsg(sock, msg, flags);
 	set_fs(oldfs);
 	return result;
 }
@@ -796,7 +792,7 @@
 	if (!iov_iter_count(to))	/* Match SYS5 behaviour */
 		return 0;
 
-	res = sock_recvmsg(sock, &msg, iov_iter_count(to), msg.msg_flags);
+	res = sock_recvmsg(sock, &msg, msg.msg_flags);
 	*to = msg.msg_iter;
 	return res;
 }
@@ -1046,7 +1042,7 @@
 		return -EINVAL;
 
 	lock_sock(sk);
-	wq = rcu_dereference_protected(sock->wq, sock_owned_by_user(sk));
+	wq = rcu_dereference_protected(sock->wq, lockdep_sock_is_held(sk));
 	fasync_helper(fd, filp, on, &wq->fasync_list);
 
 	if (!wq->fasync_list)
@@ -1696,7 +1692,7 @@
 	msg.msg_iocb = NULL;
 	if (sock->file->f_flags & O_NONBLOCK)
 		flags |= MSG_DONTWAIT;
-	err = sock_recvmsg(sock, &msg, iov_iter_count(&msg.msg_iter), flags);
+	err = sock_recvmsg(sock, &msg, flags);
 
 	if (err >= 0 && addr != NULL) {
 		err2 = move_addr_to_user(&address,
@@ -2073,7 +2069,7 @@
 	struct iovec iovstack[UIO_FASTIOV];
 	struct iovec *iov = iovstack;
 	unsigned long cmsg_ptr;
-	int total_len, len;
+	int len;
 	ssize_t err;
 
 	/* kernel mode address */
@@ -2091,7 +2087,6 @@
 		err = copy_msghdr_from_user(msg_sys, msg, &uaddr, &iov);
 	if (err < 0)
 		return err;
-	total_len = iov_iter_count(&msg_sys->msg_iter);
 
 	cmsg_ptr = (unsigned long)msg_sys->msg_control;
 	msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
@@ -2101,8 +2096,7 @@
 
 	if (sock->file->f_flags & O_NONBLOCK)
 		flags |= MSG_DONTWAIT;
-	err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys,
-							  total_len, flags);
+	err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, flags);
 	if (err < 0)
 		goto out_freeiov;
 	len = err;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 8c6bc79..15612ff 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1728,8 +1728,8 @@
 		return 0;
 	}
 
-	first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
-	last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT;
+	first = snd_buf->page_base >> PAGE_SHIFT;
+	last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_SHIFT;
 	rqstp->rq_enc_pages_num = last - first + 1 + 1;
 	rqstp->rq_enc_pages
 		= kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *),
@@ -1775,10 +1775,10 @@
 	status = alloc_enc_pages(rqstp);
 	if (status)
 		return status;
-	first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
+	first = snd_buf->page_base >> PAGE_SHIFT;
 	inpages = snd_buf->pages + first;
 	snd_buf->pages = rqstp->rq_enc_pages;
-	snd_buf->page_base -= first << PAGE_CACHE_SHIFT;
+	snd_buf->page_base -= first << PAGE_SHIFT;
 	/*
 	 * Give the tail its own page, in case we need extra space in the
 	 * head when wrapping:
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index d94a8e1..244245b 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -78,6 +78,7 @@
 	memcpy(out, in, length);
 	sg_init_one(sg, out, length);
 
+	skcipher_request_set_tfm(req, tfm);
 	skcipher_request_set_callback(req, 0, NULL, NULL);
 	skcipher_request_set_crypt(req, sg, sg, length, local_iv);
 
@@ -115,6 +116,7 @@
 	memcpy(out, in, length);
 	sg_init_one(sg, out, length);
 
+	skcipher_request_set_tfm(req, tfm);
 	skcipher_request_set_callback(req, 0, NULL, NULL);
 	skcipher_request_set_crypt(req, sg, sg, length, local_iv);
 
@@ -465,7 +467,7 @@
 	page_pos = desc->pos - outbuf->head[0].iov_len;
 	if (page_pos >= 0 && page_pos < outbuf->page_len) {
 		/* pages are not in place: */
-		int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT;
+		int i = (page_pos + outbuf->page_base) >> PAGE_SHIFT;
 		in_page = desc->pages[i];
 	} else {
 		in_page = sg_page(sg);
@@ -946,7 +948,8 @@
 		return PTR_ERR(hmac);
 	}
 
-	desc = kmalloc(sizeof(*desc), GFP_KERNEL);
+	desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac),
+		       GFP_KERNEL);
 	if (!desc) {
 		dprintk("%s: failed to allocate shash descriptor for '%s'\n",
 			__func__, kctx->gk5e->cksum_name);
@@ -1012,7 +1015,8 @@
 		return PTR_ERR(hmac);
 	}
 
-	desc = kmalloc(sizeof(*desc), GFP_KERNEL);
+	desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac),
+		       GFP_KERNEL);
 	if (!desc) {
 		dprintk("%s: failed to allocate shash descriptor for '%s'\n",
 			__func__, kctx->gk5e->cksum_name);
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 71341cc..6542749 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -451,7 +451,8 @@
 		goto out_err_free_hmac;
 
 
-	desc = kmalloc(sizeof(*desc), GFP_KERNEL);
+	desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac),
+		       GFP_KERNEL);
 	if (!desc) {
 		dprintk("%s: failed to allocate hash descriptor for '%s'\n",
 			__func__, ctx->gk5e->cksum_name);
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 765088e4..a737c2d 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -79,9 +79,9 @@
 		len -= buf->head[0].iov_len;
 	if (len <= buf->page_len) {
 		unsigned int last = (buf->page_base + len - 1)
-					>>PAGE_CACHE_SHIFT;
+					>>PAGE_SHIFT;
 		unsigned int offset = (buf->page_base + len - 1)
-					& (PAGE_CACHE_SIZE - 1);
+					& (PAGE_SIZE - 1);
 		ptr = kmap_atomic(buf->pages[last]);
 		pad = *(ptr + offset);
 		kunmap_atomic(ptr);
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 008c25d..553bf95 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -881,7 +881,7 @@
 	char *kaddr;
 	ssize_t ret = -ENOMEM;
 
-	if (count >= PAGE_CACHE_SIZE)
+	if (count >= PAGE_SIZE)
 		goto out_slow;
 
 	page = find_or_create_page(mapping, 0, GFP_KERNEL);
@@ -892,7 +892,7 @@
 	ret = cache_do_downcall(kaddr, buf, count, cd);
 	kunmap(page);
 	unlock_page(page);
-	page_cache_release(page);
+	put_page(page);
 	return ret;
 out_slow:
 	return cache_slow_downcall(buf, count, cd);
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 31789ef..fc48eca 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1390,8 +1390,8 @@
 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
 	int err;
 
-	sb->s_blocksize = PAGE_CACHE_SIZE;
-	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	sb->s_blocksize = PAGE_SIZE;
+	sb->s_blocksize_bits = PAGE_SHIFT;
 	sb->s_magic = RPCAUTH_GSSMAGIC;
 	sb->s_op = &s_ops;
 	sb->s_d_op = &simple_dentry_operations;
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
index 2df87f7..f217c34 100644
--- a/net/sunrpc/socklib.c
+++ b/net/sunrpc/socklib.c
@@ -96,8 +96,8 @@
 	if (base || xdr->page_base) {
 		pglen -= base;
 		base += xdr->page_base;
-		ppage += base >> PAGE_CACHE_SHIFT;
-		base &= ~PAGE_CACHE_MASK;
+		ppage += base >> PAGE_SHIFT;
+		base &= ~PAGE_MASK;
 	}
 	do {
 		char *kaddr;
@@ -113,7 +113,7 @@
 			}
 		}
 
-		len = PAGE_CACHE_SIZE;
+		len = PAGE_SIZE;
 		kaddr = kmap_atomic(*ppage);
 		if (base) {
 			len -= base;
@@ -155,7 +155,7 @@
 	struct xdr_skb_reader	desc;
 
 	desc.skb = skb;
-	desc.offset = sizeof(struct udphdr);
+	desc.offset = 0;
 	desc.count = skb->len - desc.offset;
 
 	if (skb_csum_unnecessary(skb))
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 1413cdc..dadfec6 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -85,8 +85,7 @@
 {
 	struct sock *sk = sock->sk;
 
-	WARN_ON_ONCE(sock_owned_by_user(sk));
-	if (sock_owned_by_user(sk))
+	if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
 		return;
 
 	switch (sk->sk_family) {
@@ -617,7 +616,7 @@
 	svsk->sk_sk->sk_stamp = skb->tstamp;
 	set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
 
-	len  = skb->len - sizeof(struct udphdr);
+	len  = skb->len;
 	rqstp->rq_arg.len = len;
 
 	rqstp->rq_prot = IPPROTO_UDP;
@@ -641,8 +640,7 @@
 		skb_free_datagram_locked(svsk->sk_sk, skb);
 	} else {
 		/* we can use it in-place */
-		rqstp->rq_arg.head[0].iov_base = skb->data +
-			sizeof(struct udphdr);
+		rqstp->rq_arg.head[0].iov_base = skb->data;
 		rqstp->rq_arg.head[0].iov_len = len;
 		if (skb_checksum_complete(skb))
 			goto out_free;
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 4439ac4..6bdb386 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -164,7 +164,7 @@
  * Note: the addresses pgto_base and pgfrom_base are both calculated in
  *       the same way:
  *            if a memory area starts at byte 'base' in page 'pages[i]',
- *            then its address is given as (i << PAGE_CACHE_SHIFT) + base
+ *            then its address is given as (i << PAGE_SHIFT) + base
  * Also note: pgfrom_base must be < pgto_base, but the memory areas
  * 	they point to may overlap.
  */
@@ -181,20 +181,20 @@
 	pgto_base += len;
 	pgfrom_base += len;
 
-	pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
-	pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
+	pgto = pages + (pgto_base >> PAGE_SHIFT);
+	pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
 
-	pgto_base &= ~PAGE_CACHE_MASK;
-	pgfrom_base &= ~PAGE_CACHE_MASK;
+	pgto_base &= ~PAGE_MASK;
+	pgfrom_base &= ~PAGE_MASK;
 
 	do {
 		/* Are any pointers crossing a page boundary? */
 		if (pgto_base == 0) {
-			pgto_base = PAGE_CACHE_SIZE;
+			pgto_base = PAGE_SIZE;
 			pgto--;
 		}
 		if (pgfrom_base == 0) {
-			pgfrom_base = PAGE_CACHE_SIZE;
+			pgfrom_base = PAGE_SIZE;
 			pgfrom--;
 		}
 
@@ -236,11 +236,11 @@
 	char *vto;
 	size_t copy;
 
-	pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
-	pgbase &= ~PAGE_CACHE_MASK;
+	pgto = pages + (pgbase >> PAGE_SHIFT);
+	pgbase &= ~PAGE_MASK;
 
 	for (;;) {
-		copy = PAGE_CACHE_SIZE - pgbase;
+		copy = PAGE_SIZE - pgbase;
 		if (copy > len)
 			copy = len;
 
@@ -253,7 +253,7 @@
 			break;
 
 		pgbase += copy;
-		if (pgbase == PAGE_CACHE_SIZE) {
+		if (pgbase == PAGE_SIZE) {
 			flush_dcache_page(*pgto);
 			pgbase = 0;
 			pgto++;
@@ -280,11 +280,11 @@
 	char *vfrom;
 	size_t copy;
 
-	pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
-	pgbase &= ~PAGE_CACHE_MASK;
+	pgfrom = pages + (pgbase >> PAGE_SHIFT);
+	pgbase &= ~PAGE_MASK;
 
 	do {
-		copy = PAGE_CACHE_SIZE - pgbase;
+		copy = PAGE_SIZE - pgbase;
 		if (copy > len)
 			copy = len;
 
@@ -293,7 +293,7 @@
 		kunmap_atomic(vfrom);
 
 		pgbase += copy;
-		if (pgbase == PAGE_CACHE_SIZE) {
+		if (pgbase == PAGE_SIZE) {
 			pgbase = 0;
 			pgfrom++;
 		}
@@ -1038,8 +1038,8 @@
 	if (base < buf->page_len) {
 		subbuf->page_len = min(buf->page_len - base, len);
 		base += buf->page_base;
-		subbuf->page_base = base & ~PAGE_CACHE_MASK;
-		subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
+		subbuf->page_base = base & ~PAGE_MASK;
+		subbuf->pages = &buf->pages[base >> PAGE_SHIFT];
 		len -= subbuf->page_len;
 		base = 0;
 	} else {
@@ -1297,9 +1297,9 @@
 		todo -= avail_here;
 
 		base += buf->page_base;
-		ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
-		base &= ~PAGE_CACHE_MASK;
-		avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
+		ppages = buf->pages + (base >> PAGE_SHIFT);
+		base &= ~PAGE_MASK;
+		avail_page = min_t(unsigned int, PAGE_SIZE - base,
 					avail_here);
 		c = kmap(*ppages) + base;
 
@@ -1383,7 +1383,7 @@
 			}
 
 			avail_page = min(avail_here,
-				 (unsigned int) PAGE_CACHE_SIZE);
+				 (unsigned int) PAGE_SIZE);
 		}
 		base = buf->page_len;  /* align to start of tail */
 	}
@@ -1479,9 +1479,9 @@
 		if (page_len > len)
 			page_len = len;
 		len -= page_len;
-		page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
-		i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
-		thislen = PAGE_CACHE_SIZE - page_offset;
+		page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1);
+		i = (offset + buf->page_base) >> PAGE_SHIFT;
+		thislen = PAGE_SIZE - page_offset;
 		do {
 			if (thislen > page_len)
 				thislen = page_len;
@@ -1492,7 +1492,7 @@
 			page_len -= thislen;
 			i++;
 			page_offset = 0;
-			thislen = PAGE_CACHE_SIZE;
+			thislen = PAGE_SIZE;
 		} while (page_len != 0);
 		offset = 0;
 	}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 65e7595..b90c539 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -995,15 +995,14 @@
 	u32 _xid;
 	__be32 *xp;
 
-	repsize = skb->len - sizeof(struct udphdr);
+	repsize = skb->len;
 	if (repsize < 4) {
 		dprintk("RPC:       impossible RPC reply size %d!\n", repsize);
 		return;
 	}
 
 	/* Copy the XID from the skb... */
-	xp = skb_header_pointer(skb, sizeof(struct udphdr),
-				sizeof(_xid), &_xid);
+	xp = skb_header_pointer(skb, 0, sizeof(_xid), &_xid);
 	if (xp == NULL)
 		return;
 
@@ -1019,11 +1018,11 @@
 
 	/* Suck it into the iovec, verify checksum if not done by hw. */
 	if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
-		UDPX_INC_STATS_BH(sk, UDP_MIB_INERRORS);
+		__UDPX_INC_STATS(sk, UDP_MIB_INERRORS);
 		goto out_unlock;
 	}
 
-	UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS);
+	__UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS);
 
 	xprt_adjust_cwnd(xprt, task, copied);
 	xprt_complete_rqst(task, copied);
@@ -1881,8 +1880,7 @@
 
 static inline void xs_reclassify_socket(int family, struct socket *sock)
 {
-	WARN_ON_ONCE(sock_owned_by_user(sock->sk));
-	if (sock_owned_by_user(sock->sk))
+	if (WARN_ON_ONCE(!sock_allow_reclassification(sock->sk)))
 		return;
 
 	switch (family) {
@@ -1952,6 +1950,7 @@
 		sk->sk_user_data = xprt;
 		sk->sk_data_ready = xs_data_ready;
 		sk->sk_write_space = xs_udp_write_space;
+		sock_set_flag(sk, SOCK_FASYNC);
 		sk->sk_error_report = xs_error_report;
 		sk->sk_allocation = GFP_NOIO;
 
@@ -2138,6 +2137,7 @@
 		sk->sk_user_data = xprt;
 		sk->sk_data_ready = xs_data_ready;
 		sk->sk_write_space = xs_udp_write_space;
+		sock_set_flag(sk, SOCK_FASYNC);
 		sk->sk_allocation = GFP_NOIO;
 
 		xprt_set_connected(xprt);
@@ -2239,6 +2239,7 @@
 		sk->sk_data_ready = xs_tcp_data_ready;
 		sk->sk_state_change = xs_tcp_state_change;
 		sk->sk_write_space = xs_tcp_write_space;
+		sock_set_flag(sk, SOCK_FASYNC);
 		sk->sk_error_report = xs_error_report;
 		sk->sk_allocation = GFP_NOIO;
 
diff --git a/net/switchdev/Kconfig b/net/switchdev/Kconfig
index 86a47e1..651fa20 100644
--- a/net/switchdev/Kconfig
+++ b/net/switchdev/Kconfig
@@ -3,7 +3,7 @@
 #
 
 config NET_SWITCHDEV
-	bool "Switch (and switch-ish) device support (EXPERIMENTAL)"
+	bool "Switch (and switch-ish) device support"
 	depends on INET
 	---help---
 	  This module provides glue between core networking code and device
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 8b5833c..59658b2 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -305,6 +305,8 @@
 	if (err && err != -EOPNOTSUPP)
 		netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
 			   err, attr->id);
+	if (attr->complete)
+		attr->complete(dev, err, attr->complete_priv);
 }
 
 static int switchdev_port_attr_set_defer(struct net_device *dev,
@@ -434,6 +436,8 @@
 	if (err && err != -EOPNOTSUPP)
 		netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
 			   err, obj->id);
+	if (obj->complete)
+		obj->complete(dev, err, obj->complete_priv);
 }
 
 static int switchdev_port_obj_add_defer(struct net_device *dev,
@@ -502,6 +506,8 @@
 	if (err && err != -EOPNOTSUPP)
 		netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
 			   err, obj->id);
+	if (obj->complete)
+		obj->complete(dev, err, obj->complete_priv);
 }
 
 static int switchdev_port_obj_del_defer(struct net_device *dev,
@@ -1079,7 +1085,7 @@
  *	@filter_dev: filter device
  *	@idx:
  *
- *	Delete FDB entry from switch device.
+ *	Dump FDB entries from switch device.
  */
 int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
 			    struct net_device *dev,
@@ -1182,6 +1188,7 @@
 		.obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
 		.dst = dst,
 		.dst_len = dst_len,
+		.fi = fi,
 		.tos = tos,
 		.type = type,
 		.nlflags = nlflags,
@@ -1190,8 +1197,6 @@
 	struct net_device *dev;
 	int err = 0;
 
-	memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi));
-
 	/* Don't offload route if using custom ip rules or if
 	 * IPv4 FIB offloading has been disabled completely.
 	 */
@@ -1236,6 +1241,7 @@
 		.obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
 		.dst = dst,
 		.dst_len = dst_len,
+		.fi = fi,
 		.tos = tos,
 		.type = type,
 		.nlflags = 0,
@@ -1244,8 +1250,6 @@
 	struct net_device *dev;
 	int err = 0;
 
-	memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi));
-
 	if (!(fi->fib_flags & RTNH_F_OFFLOAD))
 		return 0;
 
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 27a5406..6f11c62 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -205,6 +205,7 @@
 	struct tipc_bearer *b;
 	struct tipc_media *m;
 	struct tipc_bearer_names b_names;
+	struct sk_buff *skb;
 	char addr_string[16];
 	u32 bearer_id;
 	u32 with_this_prio;
@@ -301,7 +302,7 @@
 	b->net_plane = bearer_id + 'A';
 	b->priority = priority;
 
-	res = tipc_disc_create(net, b, &b->bcast_addr);
+	res = tipc_disc_create(net, b, &b->bcast_addr, &skb);
 	if (res) {
 		bearer_disable(net, b);
 		pr_warn("Bearer <%s> rejected, discovery object creation failed\n",
@@ -310,7 +311,8 @@
 	}
 
 	rcu_assign_pointer(tn->bearer_list[bearer_id], b);
-
+	if (skb)
+		tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr);
 	pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
 		name,
 		tipc_addr_string_fill(addr_string, disc_domain), priority);
@@ -335,23 +337,16 @@
  */
 static void bearer_disable(struct net *net, struct tipc_bearer *b)
 {
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
-	u32 i;
+	struct tipc_net *tn = tipc_net(net);
+	int bearer_id = b->identity;
 
 	pr_info("Disabling bearer <%s>\n", b->name);
 	b->media->disable_media(b);
-
-	tipc_node_delete_links(net, b->identity);
+	tipc_node_delete_links(net, bearer_id);
 	RCU_INIT_POINTER(b->media_ptr, NULL);
 	if (b->link_req)
 		tipc_disc_delete(b->link_req);
-
-	for (i = 0; i < MAX_BEARERS; i++) {
-		if (b == rtnl_dereference(tn->bearer_list[i])) {
-			RCU_INIT_POINTER(tn->bearer_list[i], NULL);
-			break;
-		}
-	}
+	RCU_INIT_POINTER(tn->bearer_list[bearer_id], NULL);
 	kfree_rcu(b, rcu);
 }
 
@@ -394,7 +389,7 @@
 
 /**
  * tipc_l2_send_msg - send a TIPC packet out over an L2 interface
- * @buf: the packet to be sent
+ * @skb: the packet to be sent
  * @b: the bearer through which the packet is to be sent
  * @dest: peer destination address
  */
@@ -403,17 +398,21 @@
 {
 	struct net_device *dev;
 	int delta;
+	void *tipc_ptr;
 
 	dev = (struct net_device *)rcu_dereference_rtnl(b->media_ptr);
 	if (!dev)
 		return 0;
 
+	/* Send RESET message even if bearer is detached from device */
+	tipc_ptr = rtnl_dereference(dev->tipc_ptr);
+	if (unlikely(!tipc_ptr && !msg_is_reset(buf_msg(skb))))
+		goto drop;
+
 	delta = dev->hard_header_len - skb_headroom(skb);
 	if ((delta > 0) &&
-	    pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
-		kfree_skb(skb);
-		return 0;
-	}
+	    pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC))
+		goto drop;
 
 	skb_reset_network_header(skb);
 	skb->dev = dev;
@@ -422,6 +421,9 @@
 			dev->dev_addr, skb->len);
 	dev_queue_xmit(skb);
 	return 0;
+drop:
+	kfree_skb(skb);
+	return 0;
 }
 
 int tipc_bearer_mtu(struct net *net, u32 bearer_id)
@@ -450,6 +452,8 @@
 	b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
 	if (likely(b))
 		b->media->send_msg(net, skb, b, dest);
+	else
+		kfree_skb(skb);
 	rcu_read_unlock();
 }
 
@@ -468,11 +472,11 @@
 
 	rcu_read_lock();
 	b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
-	if (likely(b)) {
-		skb_queue_walk_safe(xmitq, skb, tmp) {
-			__skb_dequeue(xmitq);
-			b->media->send_msg(net, skb, b, dst);
-		}
+	if (unlikely(!b))
+		__skb_queue_purge(xmitq);
+	skb_queue_walk_safe(xmitq, skb, tmp) {
+		__skb_dequeue(xmitq);
+		b->media->send_msg(net, skb, b, dst);
 	}
 	rcu_read_unlock();
 }
@@ -490,14 +494,14 @@
 
 	rcu_read_lock();
 	b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
-	if (likely(b)) {
-		skb_queue_walk_safe(xmitq, skb, tmp) {
-			hdr = buf_msg(skb);
-			msg_set_non_seq(hdr, 1);
-			msg_set_mc_netid(hdr, net_id);
-			__skb_dequeue(xmitq);
-			b->media->send_msg(net, skb, b, &b->bcast_addr);
-		}
+	if (unlikely(!b))
+		__skb_queue_purge(xmitq);
+	skb_queue_walk_safe(xmitq, skb, tmp) {
+		hdr = buf_msg(skb);
+		msg_set_non_seq(hdr, 1);
+		msg_set_mc_netid(hdr, net_id);
+		__skb_dequeue(xmitq);
+		b->media->send_msg(net, skb, b, &b->bcast_addr);
 	}
 	rcu_read_unlock();
 }
@@ -513,24 +517,21 @@
  * ignores packets sent using interface multicast, and traffic sent to other
  * nodes (which can happen if interface is running in promiscuous mode).
  */
-static int tipc_l2_rcv_msg(struct sk_buff *buf, struct net_device *dev,
+static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
 			   struct packet_type *pt, struct net_device *orig_dev)
 {
 	struct tipc_bearer *b;
 
 	rcu_read_lock();
 	b = rcu_dereference_rtnl(dev->tipc_ptr);
-	if (likely(b)) {
-		if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
-			buf->next = NULL;
-			tipc_rcv(dev_net(dev), buf, b);
-			rcu_read_unlock();
-			return NET_RX_SUCCESS;
-		}
+	if (likely(b && (skb->pkt_type <= PACKET_BROADCAST))) {
+		skb->next = NULL;
+		tipc_rcv(dev_net(dev), skb, b);
+		rcu_read_unlock();
+		return NET_RX_SUCCESS;
 	}
 	rcu_read_unlock();
-
-	kfree_skb(buf);
+	kfree_skb(skb);
 	return NET_RX_DROP;
 }
 
@@ -548,9 +549,18 @@
 {
 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 	struct net *net = dev_net(dev);
+	struct tipc_net *tn = tipc_net(net);
 	struct tipc_bearer *b;
+	int i;
 
 	b = rtnl_dereference(dev->tipc_ptr);
+	if (!b) {
+		for (i = 0; i < MAX_BEARERS; b = NULL, i++) {
+			b = rtnl_dereference(tn->bearer_list[i]);
+			if (b && (b->media_ptr == dev))
+				break;
+		}
+	}
 	if (!b)
 		return NOTIFY_DONE;
 
@@ -560,13 +570,20 @@
 	case NETDEV_CHANGE:
 		if (netif_carrier_ok(dev))
 			break;
+	case NETDEV_UP:
+		rcu_assign_pointer(dev->tipc_ptr, b);
+		break;
 	case NETDEV_GOING_DOWN:
+		RCU_INIT_POINTER(dev->tipc_ptr, NULL);
+		synchronize_net();
+		tipc_reset_bearer(net, b);
+		break;
 	case NETDEV_CHANGEMTU:
 		tipc_reset_bearer(net, b);
 		break;
 	case NETDEV_CHANGEADDR:
 		b->media->raw2addr(b, &b->addr,
-				       (char *)dev->dev_addr);
+				   (char *)dev->dev_addr);
 		tipc_reset_bearer(net, b);
 		break;
 	case NETDEV_UNREGISTER:
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index e318205..f686e41 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -42,8 +42,6 @@
 #include <net/genetlink.h>
 
 #define MAX_MEDIA	3
-#define MAX_NODES	4096
-#define WSIZE		32
 
 /* Identifiers associated with TIPC message header media address info
  * - address info field is 32 bytes long
@@ -62,16 +60,6 @@
 #define TIPC_MEDIA_TYPE_UDP	3
 
 /**
- * struct tipc_node_map - set of node identifiers
- * @count: # of nodes in set
- * @map: bitmap of node identifiers that are in the set
- */
-struct tipc_node_map {
-	u32 count;
-	u32 map[MAX_NODES / WSIZE];
-};
-
-/**
  * struct tipc_media_addr - destination address used by TIPC bearers
  * @value: address info (format defined by media)
  * @media_id: TIPC media type identifier
@@ -142,7 +130,6 @@
  * @identity: array index of this bearer within TIPC bearer array
  * @link_req: ptr to (optional) structure making periodic link setup requests
  * @net_plane: network plane ('A' through 'H') currently associated with bearer
- * @nodes: indicates which nodes in cluster can be reached through bearer
  *
  * Note: media-specific code is responsible for initialization of the fields
  * indicated below when a bearer is enabled; TIPC's generic bearer code takes
@@ -163,8 +150,6 @@
 	u32 identity;
 	struct tipc_link_req *link_req;
 	char net_plane;
-	int node_cnt;
-	struct tipc_node_map nodes;
 };
 
 struct tipc_bearer_names {
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 03a8428..fe1b062 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -69,6 +69,7 @@
 	if (err)
 		goto out_nametbl;
 
+	INIT_LIST_HEAD(&tn->dist_queue);
 	err = tipc_topsrv_start(net);
 	if (err)
 		goto out_subscr;
@@ -111,11 +112,9 @@
 
 	pr_info("Activated (version " TIPC_MOD_VER ")\n");
 
-	sysctl_tipc_rmem[0] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
-			      TIPC_LOW_IMPORTANCE;
-	sysctl_tipc_rmem[1] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
-			      TIPC_CRITICAL_IMPORTANCE;
-	sysctl_tipc_rmem[2] = TIPC_CONN_OVERLOAD_LIMIT;
+	sysctl_tipc_rmem[0] = RCVBUF_MIN;
+	sysctl_tipc_rmem[1] = RCVBUF_DEF;
+	sysctl_tipc_rmem[2] = RCVBUF_MAX;
 
 	err = tipc_netlink_start();
 	if (err)
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 5504d63..eff58dc 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -103,6 +103,9 @@
 	spinlock_t nametbl_lock;
 	struct name_table *nametbl;
 
+	/* Name dist queue */
+	struct list_head dist_queue;
+
 	/* Topology subscription server */
 	struct tipc_server *topsrv;
 	atomic_t subscription_count;
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index f1e738e..ad9d477 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -268,10 +268,9 @@
  * Returns 0 if successful, otherwise -errno.
  */
 int tipc_disc_create(struct net *net, struct tipc_bearer *b,
-		     struct tipc_media_addr *dest)
+		     struct tipc_media_addr *dest, struct sk_buff **skb)
 {
 	struct tipc_link_req *req;
-	struct sk_buff *skb;
 
 	req = kmalloc(sizeof(*req), GFP_ATOMIC);
 	if (!req)
@@ -293,9 +292,7 @@
 	setup_timer(&req->timer, disc_timeout, (unsigned long)req);
 	mod_timer(&req->timer, jiffies + req->timer_intv);
 	b->link_req = req;
-	skb = skb_clone(req->buf, GFP_ATOMIC);
-	if (skb)
-		tipc_bearer_xmit_skb(net, req->bearer_id, skb, &req->dest);
+	*skb = skb_clone(req->buf, GFP_ATOMIC);
 	return 0;
 }
 
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
index c9b1277..b80a335 100644
--- a/net/tipc/discover.h
+++ b/net/tipc/discover.h
@@ -40,7 +40,7 @@
 struct tipc_link_req;
 
 int tipc_disc_create(struct net *net, struct tipc_bearer *b_ptr,
-		     struct tipc_media_addr *dest);
+		     struct tipc_media_addr *dest, struct sk_buff **skb);
 void tipc_disc_delete(struct tipc_link_req *req);
 void tipc_disc_reset(struct net *net, struct tipc_bearer *b_ptr);
 void tipc_disc_add_dest(struct tipc_link_req *req);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 7d2bb3e..7059c94 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -140,6 +140,7 @@
 	char if_name[TIPC_MAX_IF_NAME];
 	u32 priority;
 	char net_plane;
+	u16 rst_cnt;
 
 	/* Failover/synch */
 	u16 drop_point;
@@ -701,40 +702,34 @@
 
 /* tipc_link_timeout - perform periodic task as instructed from node timeout
  */
-/* tipc_link_timeout - perform periodic task as instructed from node timeout
- */
 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
 {
-	int rc = 0;
-	int mtyp = STATE_MSG;
-	bool xmit = false;
-	bool prb = false;
+	int mtyp, rc = 0;
+	bool state = false;
+	bool probe = false;
+	bool setup = false;
 	u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
 	u16 bc_acked = l->bc_rcvlink->acked;
-	bool bc_up = link_is_up(l->bc_rcvlink);
 
 	link_profile_stats(l);
 
 	switch (l->state) {
 	case LINK_ESTABLISHED:
 	case LINK_SYNCHING:
-		if (!l->silent_intv_cnt) {
-			if (bc_up && (bc_acked != bc_snt))
-				xmit = true;
-		} else if (l->silent_intv_cnt <= l->abort_limit) {
-			xmit = true;
-			prb = true;
-		} else {
-			rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
-		}
+		if (l->silent_intv_cnt > l->abort_limit)
+			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
+		mtyp = STATE_MSG;
+		state = bc_acked != bc_snt;
+		probe = l->silent_intv_cnt;
 		l->silent_intv_cnt++;
 		break;
 	case LINK_RESET:
-		xmit = true;
+		setup = l->rst_cnt++ <= 4;
+		setup |= !(l->rst_cnt % 16);
 		mtyp = RESET_MSG;
 		break;
 	case LINK_ESTABLISHING:
-		xmit = true;
+		setup = true;
 		mtyp = ACTIVATE_MSG;
 		break;
 	case LINK_PEER_RESET:
@@ -745,8 +740,8 @@
 		break;
 	}
 
-	if (xmit)
-		tipc_link_build_proto_msg(l, mtyp, prb, 0, 0, 0, xmitq);
+	if (state || probe || setup)
+		tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, xmitq);
 
 	return rc;
 }
@@ -833,6 +828,7 @@
 	l->rcv_nxt = 1;
 	l->acked = 0;
 	l->silent_intv_cnt = 0;
+	l->rst_cnt = 0;
 	l->stats.recv_info = 0;
 	l->stale_count = 0;
 	l->bc_peer_is_up = false;
@@ -1110,12 +1106,12 @@
 	return released;
 }
 
-/* tipc_link_build_ack_msg: prepare link acknowledge message for transmission
+/* tipc_link_build_state_msg: prepare link state message for transmission
  *
  * Note that sending of broadcast ack is coordinated among nodes, to reduce
  * risk of ack storms towards the sender
  */
-int tipc_link_build_ack_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
+int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
 {
 	if (!l)
 		return 0;
@@ -1140,11 +1136,17 @@
 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
 {
 	int mtyp = RESET_MSG;
+	struct sk_buff *skb;
 
 	if (l->state == LINK_ESTABLISHING)
 		mtyp = ACTIVATE_MSG;
 
 	tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, xmitq);
+
+	/* Inform peer that this endpoint is going down if applicable */
+	skb = skb_peek_tail(xmitq);
+	if (skb && (l->state == LINK_RESET))
+		msg_set_peer_stopping(buf_msg(skb), 1);
 }
 
 /* tipc_link_build_nack_msg: prepare link nack message for transmission
@@ -1219,7 +1221,7 @@
 		if (!tipc_data_input(l, skb, l->inputq))
 			rc |= tipc_link_input(l, skb, l->inputq);
 		if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
-			rc |= tipc_link_build_ack_msg(l, xmitq);
+			rc |= tipc_link_build_state_msg(l, xmitq);
 		if (unlikely(rc & ~TIPC_LINK_SND_BC_ACK))
 			break;
 	} while ((skb = __skb_dequeue(defq)));
@@ -1411,7 +1413,9 @@
 			l->priority = peers_prio;
 
 		/* ACTIVATE_MSG serves as PEER_RESET if link is already down */
-		if ((mtyp == RESET_MSG) || !link_is_up(l))
+		if (msg_peer_stopping(hdr))
+			rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
+		else if ((mtyp == RESET_MSG) || !link_is_up(l))
 			rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
 
 		/* ACTIVATE_MSG takes up link if it was already locally reset */
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 6a94175..d7e9d42 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -123,7 +123,7 @@
 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq);
 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
 		  struct sk_buff_head *xmitq);
-int tipc_link_build_ack_msg(struct tipc_link *l, struct sk_buff_head *xmitq);
+int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq);
 void tipc_link_add_bc_peer(struct tipc_link *snd_l,
 			   struct tipc_link *uc_l,
 			   struct sk_buff_head *xmitq);
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 55778a0..024da8a 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -715,6 +715,16 @@
 	msg_set_bits(m, 5, 12, 0x1, r);
 }
 
+static inline u32 msg_peer_stopping(struct tipc_msg *m)
+{
+	return msg_bits(m, 5, 13, 0x1);
+}
+
+static inline void msg_set_peer_stopping(struct tipc_msg *m, u32 s)
+{
+	msg_set_bits(m, 5, 13, 0x1, s);
+}
+
 static inline char *msg_media_addr(struct tipc_msg *m)
 {
 	return (char *)&m->hdr[TIPC_MEDIA_INFO_OFFSET];
@@ -733,16 +743,26 @@
 	msg_set_bits(m, 9, 16, 0xffff, n);
 }
 
-static inline u32 msg_bcast_tag(struct tipc_msg *m)
+static inline u32 msg_conn_ack(struct tipc_msg *m)
 {
 	return msg_bits(m, 9, 16, 0xffff);
 }
 
-static inline void msg_set_bcast_tag(struct tipc_msg *m, u32 n)
+static inline void msg_set_conn_ack(struct tipc_msg *m, u32 n)
 {
 	msg_set_bits(m, 9, 16, 0xffff, n);
 }
 
+static inline u32 msg_adv_win(struct tipc_msg *m)
+{
+	return msg_bits(m, 9, 0, 0xffff);
+}
+
+static inline void msg_set_adv_win(struct tipc_msg *m, u32 n)
+{
+	msg_set_bits(m, 9, 0, 0xffff, n);
+}
+
 static inline u32 msg_max_pkt(struct tipc_msg *m)
 {
 	return msg_bits(m, 9, 16, 0xffff) * 4;
@@ -779,6 +799,11 @@
 	return msg_redundant_link(m);
 }
 
+static inline bool msg_is_reset(struct tipc_msg *hdr)
+{
+	return (msg_user(hdr) == LINK_PROTOCOL) && (msg_type(hdr) == RESET_MSG);
+}
+
 struct sk_buff *tipc_buf_acquire(u32 size);
 bool tipc_msg_validate(struct sk_buff *skb);
 bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err);
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index ebe9d0f..6b626a6 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -40,11 +40,6 @@
 
 int sysctl_tipc_named_timeout __read_mostly = 2000;
 
-/**
- * struct tipc_dist_queue - queue holding deferred name table updates
- */
-static struct list_head tipc_dist_queue = LIST_HEAD_INIT(tipc_dist_queue);
-
 struct distr_queue_item {
 	struct distr_item i;
 	u32 dtype;
@@ -229,12 +224,31 @@
 	kfree_rcu(p, rcu);
 }
 
+/**
+ * tipc_dist_queue_purge - remove deferred updates from a node that went down
+ */
+static void tipc_dist_queue_purge(struct net *net, u32 addr)
+{
+	struct tipc_net *tn = net_generic(net, tipc_net_id);
+	struct distr_queue_item *e, *tmp;
+
+	spin_lock_bh(&tn->nametbl_lock);
+	list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
+		if (e->node != addr)
+			continue;
+		list_del(&e->next);
+		kfree(e);
+	}
+	spin_unlock_bh(&tn->nametbl_lock);
+}
+
 void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr)
 {
 	struct publication *publ, *tmp;
 
 	list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list)
 		tipc_publ_purge(net, publ, addr);
+	tipc_dist_queue_purge(net, addr);
 }
 
 /**
@@ -279,9 +293,11 @@
  * tipc_named_add_backlog - add a failed name table update to the backlog
  *
  */
-static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
+static void tipc_named_add_backlog(struct net *net, struct distr_item *i,
+				   u32 type, u32 node)
 {
 	struct distr_queue_item *e;
+	struct tipc_net *tn = net_generic(net, tipc_net_id);
 	unsigned long now = get_jiffies_64();
 
 	e = kzalloc(sizeof(*e), GFP_ATOMIC);
@@ -291,7 +307,7 @@
 	e->node = node;
 	e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout);
 	memcpy(e, i, sizeof(*i));
-	list_add_tail(&e->next, &tipc_dist_queue);
+	list_add_tail(&e->next, &tn->dist_queue);
 }
 
 /**
@@ -301,10 +317,11 @@
 void tipc_named_process_backlog(struct net *net)
 {
 	struct distr_queue_item *e, *tmp;
+	struct tipc_net *tn = net_generic(net, tipc_net_id);
 	char addr[16];
 	unsigned long now = get_jiffies_64();
 
-	list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) {
+	list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
 		if (time_after(e->expires, now)) {
 			if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype))
 				continue;
@@ -344,7 +361,7 @@
 		node = msg_orignode(msg);
 		while (count--) {
 			if (!tipc_update_nametbl(net, item, node, mtype))
-				tipc_named_add_backlog(item, mtype, node);
+				tipc_named_add_backlog(net, item, mtype, node);
 			item++;
 		}
 		kfree_skb(skb);
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index d7d050f..4dfc5c1 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -802,7 +802,7 @@
 		goto out;
 
 	tipc_tlv_sprintf(msg->rep, "%-10u %s",
-			 nla_get_u32(publ[TIPC_NLA_PUBL_REF]),
+			 nla_get_u32(publ[TIPC_NLA_PUBL_KEY]),
 			 scope_str[nla_get_u32(publ[TIPC_NLA_PUBL_SCOPE])]);
 out:
 	tipc_tlv_sprintf(msg->rep, "\n");
diff --git a/net/tipc/node.c b/net/tipc/node.c
index ace178f..e01e2c71 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1,7 +1,7 @@
 /*
  * net/tipc/node.c: TIPC node management routines
  *
- * Copyright (c) 2000-2006, 2012-2015, Ericsson AB
+ * Copyright (c) 2000-2006, 2012-2016, Ericsson AB
  * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
  * All rights reserved.
  *
@@ -191,6 +191,20 @@
 	tipc_node_put(n);
 	return mtu;
 }
+
+u16 tipc_node_get_capabilities(struct net *net, u32 addr)
+{
+	struct tipc_node *n;
+	u16 caps;
+
+	n = tipc_node_find(net, addr);
+	if (unlikely(!n))
+		return TIPC_NODE_CAPABILITIES;
+	caps = n->capabilities;
+	tipc_node_put(n);
+	return caps;
+}
+
 /*
  * A trivial power-of-two bitmask technique is used for speed, since this
  * operation is done for every incoming TIPC packet. The number of hash table
@@ -304,8 +318,11 @@
 
 	spin_lock_bh(&tn->node_list_lock);
 	n = tipc_node_find(net, addr);
-	if (n)
+	if (n) {
+		/* Same node may come back with new capabilities */
+		n->capabilities = capabilities;
 		goto exit;
+	}
 	n = kzalloc(sizeof(*n), GFP_ATOMIC);
 	if (!n) {
 		pr_warn("Node creation failed, no memory\n");
@@ -525,7 +542,7 @@
 	struct tipc_link *ol = node_active_link(n, 0);
 	struct tipc_link *nl = n->links[bearer_id].link;
 
-	if (!nl)
+	if (!nl || tipc_link_is_up(nl))
 		return;
 
 	tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT);
@@ -545,12 +562,16 @@
 	pr_debug("Established link <%s> on network plane %c\n",
 		 tipc_link_name(nl), tipc_link_plane(nl));
 
+	/* Ensure that a STATE message goes first */
+	tipc_link_build_state_msg(nl, xmitq);
+
 	/* First link? => give it both slots */
 	if (!ol) {
 		*slot0 = bearer_id;
 		*slot1 = bearer_id;
 		tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
 		n->action_flags |= TIPC_NOTIFY_NODE_UP;
+		tipc_link_set_active(nl, true);
 		tipc_bcast_add_peer(n->net, nl, xmitq);
 		return;
 	}
@@ -581,8 +602,12 @@
 static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
 			      struct sk_buff_head *xmitq)
 {
+	struct tipc_media_addr *maddr;
+
 	tipc_node_write_lock(n);
 	__tipc_node_link_up(n, bearer_id, xmitq);
+	maddr = &n->links[bearer_id].maddr;
+	tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr);
 	tipc_node_write_unlock(n);
 }
 
@@ -1279,7 +1304,7 @@
 	/* Broadcast ACKs are sent on a unicast link */
 	if (rc & TIPC_LINK_SND_BC_ACK) {
 		tipc_node_read_lock(n);
-		tipc_link_build_ack_msg(le->link, &xmitq);
+		tipc_link_build_state_msg(le->link, &xmitq);
 		tipc_node_read_unlock(n);
 	}
 
@@ -1444,6 +1469,7 @@
 	int bearer_id = b->identity;
 	struct tipc_link_entry *le;
 	u16 bc_ack = msg_bcast_ack(hdr);
+	u32 self = tipc_own_addr(net);
 	int rc = 0;
 
 	__skb_queue_head_init(&xmitq);
@@ -1460,6 +1486,10 @@
 			return tipc_node_bc_rcv(net, skb, bearer_id);
 	}
 
+	/* Discard unicast link messages destined for another node */
+	if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
+		goto discard;
+
 	/* Locate neighboring node that sent packet */
 	n = tipc_node_find(net, msg_prevnode(hdr));
 	if (unlikely(!n))
diff --git a/net/tipc/node.h b/net/tipc/node.h
index f39d9d0..8264b3d 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -45,10 +45,11 @@
 /* Optional capabilities supported by this code version
  */
 enum {
-	TIPC_BCAST_SYNCH = (1 << 1)
+	TIPC_BCAST_SYNCH   = (1 << 1),
+	TIPC_BLOCK_FLOWCTL = (2 << 1)
 };
 
-#define TIPC_NODE_CAPABILITIES TIPC_BCAST_SYNCH
+#define TIPC_NODE_CAPABILITIES (TIPC_BCAST_SYNCH | TIPC_BLOCK_FLOWCTL)
 #define INVALID_BEARER_ID -1
 
 void tipc_node_stop(struct net *net);
@@ -70,6 +71,7 @@
 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port);
 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port);
 int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel);
+u16 tipc_node_get_capabilities(struct net *net, u32 addr);
 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
 int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb);
 int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info);
diff --git a/net/tipc/server.c b/net/tipc/server.c
index 2446bfb..7a0af2d 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -86,6 +86,7 @@
 static void tipc_recv_work(struct work_struct *work);
 static void tipc_send_work(struct work_struct *work);
 static void tipc_clean_outqueues(struct tipc_conn *con);
+static void tipc_sock_release(struct tipc_conn *con);
 
 static void tipc_conn_kref_release(struct kref *kref)
 {
@@ -102,6 +103,7 @@
 		}
 		saddr->scope = -TIPC_NODE_SCOPE;
 		kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr));
+		tipc_sock_release(con);
 		sock_release(sock);
 		con->sock = NULL;
 	}
@@ -184,26 +186,31 @@
 	write_unlock_bh(&sk->sk_callback_lock);
 }
 
+static void tipc_sock_release(struct tipc_conn *con)
+{
+	struct tipc_server *s = con->server;
+
+	if (con->conid)
+		s->tipc_conn_release(con->conid, con->usr_data);
+
+	tipc_unregister_callbacks(con);
+}
+
 static void tipc_close_conn(struct tipc_conn *con)
 {
 	struct tipc_server *s = con->server;
 
 	if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
-		if (con->conid)
-			s->tipc_conn_shutdown(con->conid, con->usr_data);
 
 		spin_lock_bh(&s->idr_lock);
 		idr_remove(&s->conn_idr, con->conid);
 		s->idr_in_use--;
 		spin_unlock_bh(&s->idr_lock);
 
-		tipc_unregister_callbacks(con);
-
 		/* We shouldn't flush pending works as we may be in the
 		 * thread. In fact the races with pending rx/tx work structs
 		 * are harmless for us here as we have already deleted this
-		 * connection from server connection list and set
-		 * sk->sk_user_data to 0 before releasing connection object.
+		 * connection from server connection list.
 		 */
 		kernel_sock_shutdown(con->sock, SHUT_RDWR);
 
diff --git a/net/tipc/server.h b/net/tipc/server.h
index 9015fae..34f8055 100644
--- a/net/tipc/server.h
+++ b/net/tipc/server.h
@@ -53,7 +53,7 @@
  * @send_wq: send workqueue
  * @max_rcvbuf_size: maximum permitted receive message length
  * @tipc_conn_new: callback will be called when new connection is incoming
- * @tipc_conn_shutdown: callback will be called when connection is shut down
+ * @tipc_conn_release: callback will be called before releasing the connection
  * @tipc_conn_recvmsg: callback will be called when message arrives
  * @saddr: TIPC server address
  * @name: server name
@@ -70,7 +70,7 @@
 	struct workqueue_struct *send_wq;
 	int max_rcvbuf_size;
 	void *(*tipc_conn_new)(int conid);
-	void (*tipc_conn_shutdown)(int conid, void *usr_data);
+	void (*tipc_conn_release)(int conid, void *usr_data);
 	void (*tipc_conn_recvmsg)(struct net *net, int conid,
 				  struct sockaddr_tipc *addr, void *usr_data,
 				  void *buf, size_t len);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 3eeb50a..88bfcd7 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -96,8 +96,11 @@
 	uint conn_timeout;
 	atomic_t dupl_rcvcnt;
 	bool link_cong;
-	uint sent_unacked;
-	uint rcv_unacked;
+	u16 snt_unacked;
+	u16 snd_win;
+	u16 peer_caps;
+	u16 rcv_unacked;
+	u16 rcv_win;
 	struct sockaddr_tipc remote;
 	struct rhash_head node;
 	struct rcu_head rcu;
@@ -227,9 +230,29 @@
 	return container_of(sk, struct tipc_sock, sk);
 }
 
-static int tsk_conn_cong(struct tipc_sock *tsk)
+static bool tsk_conn_cong(struct tipc_sock *tsk)
 {
-	return tsk->sent_unacked >= TIPC_FLOWCTRL_WIN;
+	return tsk->snt_unacked >= tsk->snd_win;
+}
+
+/* tsk_blocks(): translate a buffer size in bytes to number of
+ * advertisable blocks, taking into account the ratio truesize(len)/len
+ * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
+ */
+static u16 tsk_adv_blocks(int len)
+{
+	return len / FLOWCTL_BLK_SZ / 4;
+}
+
+/* tsk_inc(): increment counter for sent or received data
+ * - If block based flow control is not supported by peer we
+ *   fall back to message based ditto, incrementing the counter
+ */
+static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
+{
+	if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
+		return ((msglen / FLOWCTL_BLK_SZ) + 1);
+	return 1;
 }
 
 /**
@@ -366,7 +389,7 @@
 	sock->state = state;
 	sock_init_data(sock, sk);
 	if (tipc_sk_insert(tsk)) {
-		pr_warn("Socket create failed; port numbrer exhausted\n");
+		pr_warn("Socket create failed; port number exhausted\n");
 		return -EINVAL;
 	}
 	msg_set_origport(msg, tsk->portid);
@@ -377,9 +400,12 @@
 	sk->sk_write_space = tipc_write_space;
 	sk->sk_destruct = tipc_sock_destruct;
 	tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
-	tsk->sent_unacked = 0;
 	atomic_set(&tsk->dupl_rcvcnt, 0);
 
+	/* Start out with safe limits until we receive an advertised window */
+	tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
+	tsk->rcv_win = tsk->snd_win;
+
 	if (sock->state == SS_READY) {
 		tsk_set_unreturnable(tsk, true);
 		if (sock->type == SOCK_DGRAM)
@@ -775,7 +801,7 @@
 	struct sock *sk = &tsk->sk;
 	struct tipc_msg *hdr = buf_msg(skb);
 	int mtyp = msg_type(hdr);
-	int conn_cong;
+	bool conn_cong;
 
 	/* Ignore if connection cannot be validated: */
 	if (!tsk_peer_msg(tsk, hdr))
@@ -789,7 +815,9 @@
 		return;
 	} else if (mtyp == CONN_ACK) {
 		conn_cong = tsk_conn_cong(tsk);
-		tsk->sent_unacked -= msg_msgcnt(hdr);
+		tsk->snt_unacked -= msg_conn_ack(hdr);
+		if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
+			tsk->snd_win = msg_adv_win(hdr);
 		if (conn_cong)
 			sk->sk_write_space(sk);
 	} else if (mtyp != CONN_PROBE_REPLY) {
@@ -1020,12 +1048,14 @@
 	u32 dnode;
 	uint mtu, send, sent = 0;
 	struct iov_iter save;
+	int hlen = MIN_H_SIZE;
 
 	/* Handle implied connection establishment */
 	if (unlikely(dest)) {
 		rc = __tipc_sendmsg(sock, m, dsz);
+		hlen = msg_hdr_sz(mhdr);
 		if (dsz && (dsz == rc))
-			tsk->sent_unacked = 1;
+			tsk->snt_unacked = tsk_inc(tsk, dsz + hlen);
 		return rc;
 	}
 	if (dsz > (uint)INT_MAX)
@@ -1054,7 +1084,7 @@
 		if (likely(!tsk_conn_cong(tsk))) {
 			rc = tipc_node_xmit(net, &pktchain, dnode, portid);
 			if (likely(!rc)) {
-				tsk->sent_unacked++;
+				tsk->snt_unacked += tsk_inc(tsk, send + hlen);
 				sent += send;
 				if (sent == dsz)
 					return dsz;
@@ -1118,6 +1148,13 @@
 	sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv);
 	tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
 	tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
+	tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
+	if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
+		return;
+
+	/* Fall back to message based flow control */
+	tsk->rcv_win = FLOWCTL_MSG_WIN;
+	tsk->snd_win = FLOWCTL_MSG_WIN;
 }
 
 /**
@@ -1214,7 +1251,7 @@
 	return 0;
 }
 
-static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
+static void tipc_sk_send_ack(struct tipc_sock *tsk)
 {
 	struct net *net = sock_net(&tsk->sk);
 	struct sk_buff *skb = NULL;
@@ -1230,7 +1267,14 @@
 	if (!skb)
 		return;
 	msg = buf_msg(skb);
-	msg_set_msgcnt(msg, ack);
+	msg_set_conn_ack(msg, tsk->rcv_unacked);
+	tsk->rcv_unacked = 0;
+
+	/* Adjust to and advertize the correct window limit */
+	if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
+		tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
+		msg_set_adv_win(msg, tsk->rcv_win);
+	}
 	tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
 }
 
@@ -1288,7 +1332,7 @@
 	long timeo;
 	unsigned int sz;
 	u32 err;
-	int res;
+	int res, hlen;
 
 	/* Catch invalid receive requests */
 	if (unlikely(!buf_len))
@@ -1313,6 +1357,7 @@
 	buf = skb_peek(&sk->sk_receive_queue);
 	msg = buf_msg(buf);
 	sz = msg_data_sz(msg);
+	hlen = msg_hdr_sz(msg);
 	err = msg_errcode(msg);
 
 	/* Discard an empty non-errored message & try again */
@@ -1335,7 +1380,7 @@
 			sz = buf_len;
 			m->msg_flags |= MSG_TRUNC;
 		}
-		res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg), m, sz);
+		res = skb_copy_datagram_msg(buf, hlen, m, sz);
 		if (res)
 			goto exit;
 		res = sz;
@@ -1347,15 +1392,15 @@
 			res = -ECONNRESET;
 	}
 
-	/* Consume received message (optional) */
-	if (likely(!(flags & MSG_PEEK))) {
-		if ((sock->state != SS_READY) &&
-		    (++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
-			tipc_sk_send_ack(tsk, tsk->rcv_unacked);
-			tsk->rcv_unacked = 0;
-		}
-		tsk_advance_rx_queue(sk);
+	if (unlikely(flags & MSG_PEEK))
+		goto exit;
+
+	if (likely(sock->state != SS_READY)) {
+		tsk->rcv_unacked += tsk_inc(tsk, hlen + sz);
+		if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4)))
+			tipc_sk_send_ack(tsk);
 	}
+	tsk_advance_rx_queue(sk);
 exit:
 	release_sock(sk);
 	return res;
@@ -1384,7 +1429,7 @@
 	int sz_to_copy, target, needed;
 	int sz_copied = 0;
 	u32 err;
-	int res = 0;
+	int res = 0, hlen;
 
 	/* Catch invalid receive attempts */
 	if (unlikely(!buf_len))
@@ -1410,6 +1455,7 @@
 	buf = skb_peek(&sk->sk_receive_queue);
 	msg = buf_msg(buf);
 	sz = msg_data_sz(msg);
+	hlen = msg_hdr_sz(msg);
 	err = msg_errcode(msg);
 
 	/* Discard an empty non-errored message & try again */
@@ -1434,8 +1480,7 @@
 		needed = (buf_len - sz_copied);
 		sz_to_copy = (sz <= needed) ? sz : needed;
 
-		res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg) + offset,
-					    m, sz_to_copy);
+		res = skb_copy_datagram_msg(buf, hlen + offset, m, sz_to_copy);
 		if (res)
 			goto exit;
 
@@ -1457,20 +1502,18 @@
 			res = -ECONNRESET;
 	}
 
-	/* Consume received message (optional) */
-	if (likely(!(flags & MSG_PEEK))) {
-		if (unlikely(++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
-			tipc_sk_send_ack(tsk, tsk->rcv_unacked);
-			tsk->rcv_unacked = 0;
-		}
-		tsk_advance_rx_queue(sk);
-	}
+	if (unlikely(flags & MSG_PEEK))
+		goto exit;
+
+	tsk->rcv_unacked += tsk_inc(tsk, hlen + sz);
+	if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4)))
+		tipc_sk_send_ack(tsk);
+	tsk_advance_rx_queue(sk);
 
 	/* Loop around if more data is required */
 	if ((sz_copied < buf_len) &&	/* didn't get all requested data */
 	    (!skb_queue_empty(&sk->sk_receive_queue) ||
 	    (sz_copied < target)) &&	/* and more is ready or required */
-	    (!(flags & MSG_PEEK)) &&	/* and aren't just peeking at data */
 	    (!err))			/* and haven't reached a FIN */
 		goto restart;
 
@@ -1602,30 +1645,33 @@
 /**
  * rcvbuf_limit - get proper overload limit of socket receive queue
  * @sk: socket
- * @buf: message
+ * @skb: message
  *
- * For all connection oriented messages, irrespective of importance,
- * the default overload value (i.e. 67MB) is set as limit.
+ * For connection oriented messages, irrespective of importance,
+ * default queue limit is 2 MB.
  *
- * For all connectionless messages, by default new queue limits are
- * as belows:
+ * For connectionless messages, queue limits are based on message
+ * importance as follows:
  *
- * TIPC_LOW_IMPORTANCE       (4 MB)
- * TIPC_MEDIUM_IMPORTANCE    (8 MB)
- * TIPC_HIGH_IMPORTANCE      (16 MB)
- * TIPC_CRITICAL_IMPORTANCE  (32 MB)
+ * TIPC_LOW_IMPORTANCE       (2 MB)
+ * TIPC_MEDIUM_IMPORTANCE    (4 MB)
+ * TIPC_HIGH_IMPORTANCE      (8 MB)
+ * TIPC_CRITICAL_IMPORTANCE  (16 MB)
  *
  * Returns overload limit according to corresponding message importance
  */
-static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
+static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
 {
-	struct tipc_msg *msg = buf_msg(buf);
+	struct tipc_sock *tsk = tipc_sk(sk);
+	struct tipc_msg *hdr = buf_msg(skb);
 
-	if (msg_connected(msg))
-		return sysctl_tipc_rmem[2];
+	if (unlikely(!msg_connected(hdr)))
+		return sk->sk_rcvbuf << msg_importance(hdr);
 
-	return sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE <<
-		msg_importance(msg);
+	if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
+		return sk->sk_rcvbuf;
+
+	return FLOWCTL_MSG_LIM;
 }
 
 /**
@@ -1748,7 +1794,7 @@
 
 		/* Try backlog, compensating for double-counted bytes */
 		dcnt = &tipc_sk(sk)->dupl_rcvcnt;
-		if (sk->sk_backlog.len)
+		if (!sk->sk_backlog.len)
 			atomic_set(dcnt, 0);
 		lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
 		if (likely(!sk_add_backlog(sk, skb, lim)))
@@ -2807,6 +2853,9 @@
 		if (err)
 			return err;
 
+		if (!attrs[TIPC_NLA_SOCK])
+			return -EINVAL;
+
 		err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
 				       attrs[TIPC_NLA_SOCK],
 				       tipc_nl_sock_policy);
diff --git a/net/tipc/socket.h b/net/tipc/socket.h
index 4241f22..06fb594 100644
--- a/net/tipc/socket.h
+++ b/net/tipc/socket.h
@@ -1,6 +1,6 @@
 /* net/tipc/socket.h: Include file for TIPC socket code
  *
- * Copyright (c) 2014-2015, Ericsson AB
+ * Copyright (c) 2014-2016, Ericsson AB
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -38,10 +38,17 @@
 #include <net/sock.h>
 #include <net/genetlink.h>
 
-#define TIPC_CONNACK_INTV         256
-#define TIPC_FLOWCTRL_WIN        (TIPC_CONNACK_INTV * 2)
-#define TIPC_CONN_OVERLOAD_LIMIT ((TIPC_FLOWCTRL_WIN * 2 + 1) * \
-				  SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
+/* Compatibility values for deprecated message based flow control */
+#define FLOWCTL_MSG_WIN 512
+#define FLOWCTL_MSG_LIM ((FLOWCTL_MSG_WIN * 2 + 1) * SKB_TRUESIZE(MAX_MSG_SIZE))
+
+#define FLOWCTL_BLK_SZ 1024
+
+/* Socket receive buffer sizes */
+#define RCVBUF_MIN  (FLOWCTL_BLK_SZ * 512)
+#define RCVBUF_DEF  (FLOWCTL_BLK_SZ * 1024 * 2)
+#define RCVBUF_MAX  (FLOWCTL_BLK_SZ * 1024 * 16)
+
 int tipc_socket_init(void);
 void tipc_socket_stop(void);
 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq);
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index e6cb386..0dd0224 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -302,7 +302,7 @@
 }
 
 /* Handle one termination request for the subscriber */
-static void tipc_subscrb_shutdown_cb(int conid, void *usr_data)
+static void tipc_subscrb_release_cb(int conid, void *usr_data)
 {
 	tipc_subscrb_delete((struct tipc_subscriber *)usr_data);
 }
@@ -326,8 +326,7 @@
 		return tipc_subscrp_cancel(s, subscriber);
 	}
 
-	if (s)
-		tipc_subscrp_subscribe(net, s, subscriber, swap);
+	tipc_subscrp_subscribe(net, s, subscriber, swap);
 }
 
 /* Handle one request to establish a new subscriber */
@@ -365,7 +364,7 @@
 	topsrv->max_rcvbuf_size		= sizeof(struct tipc_subscr);
 	topsrv->tipc_conn_recvmsg	= tipc_subscrb_rcv_cb;
 	topsrv->tipc_conn_new		= tipc_subscrb_connect_cb;
-	topsrv->tipc_conn_shutdown	= tipc_subscrb_shutdown_cb;
+	topsrv->tipc_conn_release	= tipc_subscrb_release_cb;
 
 	strncpy(topsrv->name, name, strlen(name) + 1);
 	tn->topsrv = topsrv;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 8269da7..80aa6a3 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -953,7 +953,7 @@
 	return NULL;
 }
 
-static int unix_mknod(struct dentry *dentry, struct path *path, umode_t mode,
+static int unix_mknod(struct dentry *dentry, const struct path *path, umode_t mode,
 		      struct path *res)
 {
 	int err;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 3dce53e..b5f1221 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1808,27 +1808,8 @@
 	else if (sk->sk_shutdown & RCV_SHUTDOWN)
 		err = 0;
 
-	if (copied > 0) {
-		/* We only do these additional bookkeeping/notification steps
-		 * if we actually copied something out of the queue pair
-		 * instead of just peeking ahead.
-		 */
-
-		if (!(flags & MSG_PEEK)) {
-			/* If the other side has shutdown for sending and there
-			 * is nothing more to read, then modify the socket
-			 * state.
-			 */
-			if (vsk->peer_shutdown & SEND_SHUTDOWN) {
-				if (vsock_stream_has_data(vsk) <= 0) {
-					sk->sk_state = SS_UNCONNECTED;
-					sock_set_flag(sk, SOCK_DONE);
-					sk->sk_state_change(sk);
-				}
-			}
-		}
+	if (copied > 0)
 		err = copied;
-	}
 
 out:
 	release_sock(sk);
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 0a369bb..4120b7a 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -842,7 +842,7 @@
 	 * qp_handle.
 	 */
 	if (vmci_handle_is_invalid(e_payload->handle) ||
-	    vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
+	    !vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
 		return;
 
 	/* We don't ask for delayed CBs when we subscribe to this event (we
@@ -1735,11 +1735,8 @@
 	/* Retrieve the head sk_buff from the socket's receive queue. */
 	err = 0;
 	skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
-	if (err)
-		return err;
-
 	if (!skb)
-		return -EAGAIN;
+		return err;
 
 	dg = (struct vmci_datagram *)skb->data;
 	if (!dg)
@@ -2054,7 +2051,7 @@
 	return vmci_get_context_id();
 }
 
-static struct vsock_transport vmci_transport = {
+static const struct vsock_transport vmci_transport = {
 	.init = vmci_transport_socket_init,
 	.destruct = vmci_transport_destruct,
 	.release = vmci_transport_release,
@@ -2154,7 +2151,7 @@
 
 MODULE_AUTHOR("VMware, Inc.");
 MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
-MODULE_VERSION("1.0.2.0-k");
+MODULE_VERSION("1.0.4.0-k");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("vmware_vsock");
 MODULE_ALIAS_NETPROTO(PF_VSOCK);
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 59cabc9..da49c0b 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -739,7 +739,7 @@
 	 * and thus fail the GO instantiation, consider only the interfaces of
 	 * the current registered device.
 	 */
-	list_for_each_entry(wdev, &rdev->wdev_list, list) {
+	list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
 		struct ieee80211_channel *other_chan = NULL;
 		int r1, r2;
 
@@ -768,7 +768,7 @@
 		if (chan == other_chan)
 			return true;
 
-		if (chan->band != IEEE80211_BAND_5GHZ)
+		if (chan->band != NL80211_BAND_5GHZ)
 			continue;
 
 		r1 = cfg80211_get_unii(chan->center_freq);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 9f1c4aa..d25c82b 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -3,6 +3,7 @@
  *
  * Copyright 2006-2010		Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
+ * Copyright 2015	Intel Deutschland GmbH
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -157,7 +158,7 @@
 	if (!(rdev->wiphy.flags & WIPHY_FLAG_NETNS_OK))
 		return -EOPNOTSUPP;
 
-	list_for_each_entry(wdev, &rdev->wdev_list, list) {
+	list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
 		if (!wdev->netdev)
 			continue;
 		wdev->netdev->features &= ~NETIF_F_NETNS_LOCAL;
@@ -171,7 +172,8 @@
 		/* failed -- clean up to old netns */
 		net = wiphy_net(&rdev->wiphy);
 
-		list_for_each_entry_continue_reverse(wdev, &rdev->wdev_list,
+		list_for_each_entry_continue_reverse(wdev,
+						     &rdev->wiphy.wdev_list,
 						     list) {
 			if (!wdev->netdev)
 				continue;
@@ -230,7 +232,7 @@
 
 	ASSERT_RTNL();
 
-	list_for_each_entry(wdev, &rdev->wdev_list, list) {
+	list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
 		if (wdev->netdev) {
 			dev_close(wdev->netdev);
 			continue;
@@ -298,7 +300,8 @@
 		kfree(item);
 		spin_unlock_irq(&rdev->destroy_list_lock);
 
-		list_for_each_entry_safe(wdev, tmp, &rdev->wdev_list, list) {
+		list_for_each_entry_safe(wdev, tmp,
+					 &rdev->wiphy.wdev_list, list) {
 			if (nlportid == wdev->owner_nlportid)
 				rdev_del_virtual_intf(rdev, wdev);
 		}
@@ -410,7 +413,7 @@
 		dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx);
 	}
 
-	INIT_LIST_HEAD(&rdev->wdev_list);
+	INIT_LIST_HEAD(&rdev->wiphy.wdev_list);
 	INIT_LIST_HEAD(&rdev->beacon_registrations);
 	spin_lock_init(&rdev->beacon_registrations_lock);
 	spin_lock_init(&rdev->bss_lock);
@@ -557,7 +560,7 @@
 {
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 	int res;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	struct ieee80211_supported_band *sband;
 	bool have_band = false;
 	int i;
@@ -626,6 +629,13 @@
 		     !rdev->ops->set_mac_acl)))
 		return -EINVAL;
 
+	/* assure only valid behaviours are flagged by driver
+	 * hence subtract 2 as bit 0 is invalid.
+	 */
+	if (WARN_ON(wiphy->bss_select_support &&
+		    (wiphy->bss_select_support & ~(BIT(__NL80211_BSS_SELECT_ATTR_AFTER_LAST) - 2))))
+		return -EINVAL;
+
 	if (wiphy->addresses)
 		memcpy(wiphy->perm_addr, wiphy->addresses[0].addr, ETH_ALEN);
 
@@ -640,7 +650,7 @@
 		return res;
 
 	/* sanity check supported bands/channels */
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+	for (band = 0; band < NUM_NL80211_BANDS; band++) {
 		sband = wiphy->bands[band];
 		if (!sband)
 			continue;
@@ -652,7 +662,7 @@
 		 * on 60GHz band, there are no legacy rates, so
 		 * n_bitrates is 0
 		 */
-		if (WARN_ON(band != IEEE80211_BAND_60GHZ &&
+		if (WARN_ON(band != NL80211_BAND_60GHZ &&
 			    !sband->n_bitrates))
 			return -EINVAL;
 
@@ -662,7 +672,7 @@
 		 * global structure for that.
 		 */
 		if (cfg80211_disable_40mhz_24ghz &&
-		    band == IEEE80211_BAND_2GHZ &&
+		    band == NL80211_BAND_2GHZ &&
 		    sband->ht_cap.ht_supported) {
 			sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
 			sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40;
@@ -792,7 +802,7 @@
 	nl80211_notify_wiphy(rdev, NL80211_CMD_DEL_WIPHY);
 	rdev->wiphy.registered = false;
 
-	WARN_ON(!list_empty(&rdev->wdev_list));
+	WARN_ON(!list_empty(&rdev->wiphy.wdev_list));
 
 	/*
 	 * First remove the hardware from everywhere, this makes
@@ -1014,7 +1024,7 @@
 		spin_lock_init(&wdev->mgmt_registrations_lock);
 
 		wdev->identifier = ++rdev->wdev_id;
-		list_add_rcu(&wdev->list, &rdev->wdev_list);
+		list_add_rcu(&wdev->list, &rdev->wiphy.wdev_list);
 		rdev->devlist_generation++;
 		/* can only change netns with wiphy */
 		dev->features |= NETIF_F_NETNS_LOCAL;
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 022ccad..025b7a5 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -50,10 +50,9 @@
 	/* wiphy index, internal only */
 	int wiphy_idx;
 
-	/* associated wireless interfaces, protected by rtnl or RCU */
-	struct list_head wdev_list;
+	/* protected by RTNL */
 	int devlist_generation, wdev_id;
-	int opencount; /* also protected by devlist_mtx */
+	int opencount;
 	wait_queue_head_t dev_wait;
 
 	struct list_head beacon_registrations;
@@ -214,6 +213,7 @@
 			const u8 *resp_ie;
 			size_t req_ie_len;
 			size_t resp_ie_len;
+			struct cfg80211_bss *bss;
 			u16 status;
 		} cr;
 		struct {
diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c
index 4541577..5d45391 100644
--- a/net/wireless/debugfs.c
+++ b/net/wireless/debugfs.c
@@ -69,7 +69,7 @@
 	struct wiphy *wiphy = file->private_data;
 	char *buf;
 	unsigned int offset = 0, buf_size = PAGE_SIZE, i, r;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	struct ieee80211_supported_band *sband;
 
 	buf = kzalloc(buf_size, GFP_KERNEL);
@@ -78,7 +78,7 @@
 
 	rtnl_lock();
 
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+	for (band = 0; band < NUM_NL80211_BANDS; band++) {
 		sband = wiphy->bands[band];
 		if (!sband)
 			continue;
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 4c55fab..4a4dda5 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -104,7 +104,7 @@
 		struct ieee80211_supported_band *sband =
 			rdev->wiphy.bands[params->chandef.chan->band];
 		int j;
-		u32 flag = params->chandef.chan->band == IEEE80211_BAND_5GHZ ?
+		u32 flag = params->chandef.chan->band == NL80211_BAND_5GHZ ?
 			IEEE80211_RATE_MANDATORY_A :
 			IEEE80211_RATE_MANDATORY_B;
 
@@ -236,7 +236,7 @@
 			    struct wireless_dev *wdev)
 {
 	struct cfg80211_cached_keys *ck = NULL;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	int i, err;
 
 	ASSERT_WDEV_LOCK(wdev);
@@ -248,7 +248,7 @@
 	if (!wdev->wext.ibss.chandef.chan) {
 		struct ieee80211_channel *new_chan = NULL;
 
-		for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+		for (band = 0; band < NUM_NL80211_BANDS; band++) {
 			struct ieee80211_supported_band *sband;
 			struct ieee80211_channel *chan;
 
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 092300b..fa2066b 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -128,9 +128,9 @@
 
 	if (!setup->chandef.chan) {
 		/* if we don't have that either, use the first usable channel */
-		enum ieee80211_band band;
+		enum nl80211_band band;
 
-		for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+		for (band = 0; band < NUM_NL80211_BANDS; band++) {
 			struct ieee80211_supported_band *sband;
 			struct ieee80211_channel *chan;
 			int i;
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index ff32825..c284d88 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -726,7 +726,7 @@
 	wiphy = &rdev->wiphy;
 
 	rtnl_lock();
-	for (bandid = 0; bandid < IEEE80211_NUM_BANDS; bandid++) {
+	for (bandid = 0; bandid < NUM_NL80211_BANDS; bandid++) {
 		sband = wiphy->bands[bandid];
 		if (!sband)
 			continue;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 98c9242..d759901 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -103,7 +103,7 @@
 		if (have_wdev_id && rdev->wiphy_idx != wiphy_idx)
 			continue;
 
-		list_for_each_entry(wdev, &rdev->wdev_list, list) {
+		list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
 			if (have_ifidx && wdev->netdev &&
 			    wdev->netdev->ifindex == ifidx) {
 				result = wdev;
@@ -149,7 +149,7 @@
 		tmp = cfg80211_rdev_by_wiphy_idx(wdev_id >> 32);
 		if (tmp) {
 			/* make sure wdev exists */
-			list_for_each_entry(wdev, &tmp->wdev_list, list) {
+			list_for_each_entry(wdev, &tmp->wiphy.wdev_list, list) {
 				if (wdev->identifier != (u32)wdev_id)
 					continue;
 				found = true;
@@ -402,6 +402,8 @@
 	[NL80211_ATTR_SCHED_SCAN_DELAY] = { .type = NLA_U32 },
 	[NL80211_ATTR_REG_INDOOR] = { .type = NLA_FLAG },
 	[NL80211_ATTR_PBSS] = { .type = NLA_FLAG },
+	[NL80211_ATTR_BSS_SELECT] = { .type = NLA_NESTED },
+	[NL80211_ATTR_STA_SUPPORT_P2P_PS] = { .type = NLA_U8 },
 };
 
 /* policy for the key attributes */
@@ -486,6 +488,15 @@
 	[NL80211_SCHED_SCAN_PLAN_ITERATIONS] = { .type = NLA_U32 },
 };
 
+static const struct nla_policy
+nl80211_bss_select_policy[NL80211_BSS_SELECT_ATTR_MAX + 1] = {
+	[NL80211_BSS_SELECT_ATTR_RSSI] = { .type = NLA_FLAG },
+	[NL80211_BSS_SELECT_ATTR_BAND_PREF] = { .type = NLA_U32 },
+	[NL80211_BSS_SELECT_ATTR_RSSI_ADJUST] = {
+		.len = sizeof(struct nl80211_bss_select_rssi_adjust)
+	},
+};
+
 static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
 				     struct netlink_callback *cb,
 				     struct cfg80211_registered_device **rdev,
@@ -524,7 +535,7 @@
 		*rdev = wiphy_to_rdev(wiphy);
 		*wdev = NULL;
 
-		list_for_each_entry(tmp, &(*rdev)->wdev_list, list) {
+		list_for_each_entry(tmp, &(*rdev)->wiphy.wdev_list, list) {
 			if (tmp->identifier == cb->args[1]) {
 				*wdev = tmp;
 				break;
@@ -1266,7 +1277,7 @@
 	struct nlattr *nl_bands, *nl_band;
 	struct nlattr *nl_freqs, *nl_freq;
 	struct nlattr *nl_cmds;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	struct ieee80211_channel *chan;
 	int i;
 	const struct ieee80211_txrx_stypes *mgmt_stypes =
@@ -1399,7 +1410,7 @@
 			goto nla_put_failure;
 
 		for (band = state->band_start;
-		     band < IEEE80211_NUM_BANDS; band++) {
+		     band < NUM_NL80211_BANDS; band++) {
 			struct ieee80211_supported_band *sband;
 
 			sband = rdev->wiphy.bands[band];
@@ -1461,7 +1472,7 @@
 		}
 		nla_nest_end(msg, nl_bands);
 
-		if (band < IEEE80211_NUM_BANDS)
+		if (band < NUM_NL80211_BANDS)
 			state->band_start = band + 1;
 		else
 			state->band_start = 0;
@@ -1731,6 +1742,25 @@
 			    rdev->wiphy.ext_features))
 			goto nla_put_failure;
 
+		if (rdev->wiphy.bss_select_support) {
+			struct nlattr *nested;
+			u32 bss_select_support = rdev->wiphy.bss_select_support;
+
+			nested = nla_nest_start(msg, NL80211_ATTR_BSS_SELECT);
+			if (!nested)
+				goto nla_put_failure;
+
+			i = 0;
+			while (bss_select_support) {
+				if ((bss_select_support & 1) &&
+				    nla_put_flag(msg, i))
+					goto nla_put_failure;
+				i++;
+				bss_select_support >>= 1;
+			}
+			nla_nest_end(msg, nested);
+		}
+
 		/* done */
 		state->split_start = 0;
 		break;
@@ -2399,7 +2429,8 @@
 
 	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
 	    nla_put_u32(msg, NL80211_ATTR_IFTYPE, wdev->iftype) ||
-	    nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
+	    nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+			      NL80211_ATTR_PAD) ||
 	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, wdev_address(wdev)) ||
 	    nla_put_u32(msg, NL80211_ATTR_GENERATION,
 			rdev->devlist_generation ^
@@ -2459,7 +2490,7 @@
 		}
 		if_idx = 0;
 
-		list_for_each_entry(wdev, &rdev->wdev_list, list) {
+		list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
 			if (if_idx < if_start) {
 				if_idx++;
 				continue;
@@ -2731,7 +2762,7 @@
 		spin_lock_init(&wdev->mgmt_registrations_lock);
 
 		wdev->identifier = ++rdev->wdev_id;
-		list_add_rcu(&wdev->list, &rdev->wdev_list);
+		list_add_rcu(&wdev->list, &rdev->wiphy.wdev_list);
 		rdev->devlist_generation++;
 		break;
 	default:
@@ -3267,7 +3298,7 @@
 	struct wireless_dev *wdev;
 	bool ret = false;
 
-	list_for_each_entry(wdev, &rdev->wdev_list, list) {
+	list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
 		if (wdev->iftype != NL80211_IFTYPE_AP &&
 		    wdev->iftype != NL80211_IFTYPE_P2P_GO)
 			continue;
@@ -3463,7 +3494,7 @@
 	}
 
 	params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
-	if (params.pbss && !rdev->wiphy.bands[IEEE80211_BAND_60GHZ])
+	if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ])
 		return -EOPNOTSUPP;
 
 	wdev_lock(wdev);
@@ -3724,11 +3755,18 @@
 		goto nla_put_failure;
 
 #define PUT_SINFO(attr, memb, type) do {				\
-	if (sinfo->filled & BIT(NL80211_STA_INFO_ ## attr) &&		\
+	BUILD_BUG_ON(sizeof(type) == sizeof(u64));			\
+	if (sinfo->filled & (1ULL << NL80211_STA_INFO_ ## attr) &&	\
 	    nla_put_ ## type(msg, NL80211_STA_INFO_ ## attr,		\
 			     sinfo->memb))				\
 		goto nla_put_failure;					\
 	} while (0)
+#define PUT_SINFO_U64(attr, memb) do {					\
+	if (sinfo->filled & (1ULL << NL80211_STA_INFO_ ## attr) &&	\
+	    nla_put_u64_64bit(msg, NL80211_STA_INFO_ ## attr,		\
+			      sinfo->memb, NL80211_STA_INFO_PAD))	\
+		goto nla_put_failure;					\
+	} while (0)
 
 	PUT_SINFO(CONNECTED_TIME, connected_time, u32);
 	PUT_SINFO(INACTIVE_TIME, inactive_time, u32);
@@ -3745,11 +3783,12 @@
 			(u32)sinfo->tx_bytes))
 		goto nla_put_failure;
 
-	PUT_SINFO(RX_BYTES64, rx_bytes, u64);
-	PUT_SINFO(TX_BYTES64, tx_bytes, u64);
+	PUT_SINFO_U64(RX_BYTES64, rx_bytes);
+	PUT_SINFO_U64(TX_BYTES64, tx_bytes);
 	PUT_SINFO(LLID, llid, u16);
 	PUT_SINFO(PLID, plid, u16);
 	PUT_SINFO(PLINK_STATE, plink_state, u8);
+	PUT_SINFO_U64(RX_DURATION, rx_duration);
 
 	switch (rdev->wiphy.signal_type) {
 	case CFG80211_SIGNAL_TYPE_MBM:
@@ -3817,12 +3856,13 @@
 		    &sinfo->sta_flags))
 		goto nla_put_failure;
 
-	PUT_SINFO(T_OFFSET, t_offset, u64);
-	PUT_SINFO(RX_DROP_MISC, rx_dropped_misc, u64);
-	PUT_SINFO(BEACON_RX, rx_beacon, u64);
+	PUT_SINFO_U64(T_OFFSET, t_offset);
+	PUT_SINFO_U64(RX_DROP_MISC, rx_dropped_misc);
+	PUT_SINFO_U64(BEACON_RX, rx_beacon);
 	PUT_SINFO(BEACON_SIGNAL_AVG, rx_beacon_signal_avg, u8);
 
 #undef PUT_SINFO
+#undef PUT_SINFO_U64
 
 	if (sinfo->filled & BIT(NL80211_STA_INFO_TID_STATS)) {
 		struct nlattr *tidsattr;
@@ -3845,19 +3885,19 @@
 			if (!tidattr)
 				goto nla_put_failure;
 
-#define PUT_TIDVAL(attr, memb, type) do {				\
+#define PUT_TIDVAL_U64(attr, memb) do {					\
 	if (tidstats->filled & BIT(NL80211_TID_STATS_ ## attr) &&	\
-	    nla_put_ ## type(msg, NL80211_TID_STATS_ ## attr,		\
-			     tidstats->memb))				\
+	    nla_put_u64_64bit(msg, NL80211_TID_STATS_ ## attr,		\
+			      tidstats->memb, NL80211_TID_STATS_PAD))	\
 		goto nla_put_failure;					\
 	} while (0)
 
-			PUT_TIDVAL(RX_MSDU, rx_msdu, u64);
-			PUT_TIDVAL(TX_MSDU, tx_msdu, u64);
-			PUT_TIDVAL(TX_MSDU_RETRIES, tx_msdu_retries, u64);
-			PUT_TIDVAL(TX_MSDU_FAILED, tx_msdu_failed, u64);
+			PUT_TIDVAL_U64(RX_MSDU, rx_msdu);
+			PUT_TIDVAL_U64(TX_MSDU, tx_msdu);
+			PUT_TIDVAL_U64(TX_MSDU_RETRIES, tx_msdu_retries);
+			PUT_TIDVAL_U64(TX_MSDU_FAILED, tx_msdu_failed);
 
-#undef PUT_TIDVAL
+#undef PUT_TIDVAL_U64
 			nla_nest_end(msg, tidattr);
 		}
 
@@ -3977,6 +4017,10 @@
 	    statype != CFG80211_STA_AP_CLIENT_UNASSOC)
 		return -EINVAL;
 
+	if (params->support_p2p_ps != -1 &&
+	    statype != CFG80211_STA_AP_CLIENT_UNASSOC)
+		return -EINVAL;
+
 	if (params->aid &&
 	    !(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
 	    statype != CFG80211_STA_AP_CLIENT_UNASSOC)
@@ -4270,6 +4314,18 @@
 	else
 		params.listen_interval = -1;
 
+	if (info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]) {
+		u8 tmp;
+
+		tmp = nla_get_u8(info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]);
+		if (tmp >= NUM_NL80211_P2P_PS_STATUS)
+			return -EINVAL;
+
+		params.support_p2p_ps = tmp;
+	} else {
+		params.support_p2p_ps = -1;
+	}
+
 	if (!info->attrs[NL80211_ATTR_MAC])
 		return -EINVAL;
 
@@ -4393,6 +4449,23 @@
 	params.listen_interval =
 		nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
 
+	if (info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]) {
+		u8 tmp;
+
+		tmp = nla_get_u8(info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]);
+		if (tmp >= NUM_NL80211_P2P_PS_STATUS)
+			return -EINVAL;
+
+		params.support_p2p_ps = tmp;
+	} else {
+		/*
+		 * if not specified, assume it's supported for P2P GO interface,
+		 * and is NOT supported for AP interface
+		 */
+		params.support_p2p_ps =
+			dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO;
+	}
+
 	if (info->attrs[NL80211_ATTR_PEER_AID])
 		params.aid = nla_get_u16(info->attrs[NL80211_ATTR_PEER_AID]);
 	else
@@ -5758,6 +5831,73 @@
 	return n_channels;
 }
 
+static bool is_band_valid(struct wiphy *wiphy, enum nl80211_band b)
+{
+	return b < NUM_NL80211_BANDS && wiphy->bands[b];
+}
+
+static int parse_bss_select(struct nlattr *nla, struct wiphy *wiphy,
+			    struct cfg80211_bss_selection *bss_select)
+{
+	struct nlattr *attr[NL80211_BSS_SELECT_ATTR_MAX + 1];
+	struct nlattr *nest;
+	int err;
+	bool found = false;
+	int i;
+
+	/* only process one nested attribute */
+	nest = nla_data(nla);
+	if (!nla_ok(nest, nla_len(nest)))
+		return -EINVAL;
+
+	err = nla_parse(attr, NL80211_BSS_SELECT_ATTR_MAX, nla_data(nest),
+			nla_len(nest), nl80211_bss_select_policy);
+	if (err)
+		return err;
+
+	/* only one attribute may be given */
+	for (i = 0; i <= NL80211_BSS_SELECT_ATTR_MAX; i++) {
+		if (attr[i]) {
+			if (found)
+				return -EINVAL;
+			found = true;
+		}
+	}
+
+	bss_select->behaviour = __NL80211_BSS_SELECT_ATTR_INVALID;
+
+	if (attr[NL80211_BSS_SELECT_ATTR_RSSI])
+		bss_select->behaviour = NL80211_BSS_SELECT_ATTR_RSSI;
+
+	if (attr[NL80211_BSS_SELECT_ATTR_BAND_PREF]) {
+		bss_select->behaviour = NL80211_BSS_SELECT_ATTR_BAND_PREF;
+		bss_select->param.band_pref =
+			nla_get_u32(attr[NL80211_BSS_SELECT_ATTR_BAND_PREF]);
+		if (!is_band_valid(wiphy, bss_select->param.band_pref))
+			return -EINVAL;
+	}
+
+	if (attr[NL80211_BSS_SELECT_ATTR_RSSI_ADJUST]) {
+		struct nl80211_bss_select_rssi_adjust *adj_param;
+
+		adj_param = nla_data(attr[NL80211_BSS_SELECT_ATTR_RSSI_ADJUST]);
+		bss_select->behaviour = NL80211_BSS_SELECT_ATTR_RSSI_ADJUST;
+		bss_select->param.adjust.band = adj_param->band;
+		bss_select->param.adjust.delta = adj_param->delta;
+		if (!is_band_valid(wiphy, bss_select->param.adjust.band))
+			return -EINVAL;
+	}
+
+	/* user-space did not provide behaviour attribute */
+	if (bss_select->behaviour == __NL80211_BSS_SELECT_ATTR_INVALID)
+		return -EINVAL;
+
+	if (!(wiphy->bss_select_support & BIT(bss_select->behaviour)))
+		return -EINVAL;
+
+	return 0;
+}
+
 static int nl80211_parse_random_mac(struct nlattr **attrs,
 				    u8 *mac_addr, u8 *mac_addr_mask)
 {
@@ -5888,10 +6028,10 @@
 			i++;
 		}
 	} else {
-		enum ieee80211_band band;
+		enum nl80211_band band;
 
 		/* all channels */
-		for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+		for (band = 0; band < NUM_NL80211_BANDS; band++) {
 			int j;
 			if (!wiphy->bands[band])
 				continue;
@@ -5936,7 +6076,7 @@
 		       request->ie_len);
 	}
 
-	for (i = 0; i < IEEE80211_NUM_BANDS; i++)
+	for (i = 0; i < NUM_NL80211_BANDS; i++)
 		if (wiphy->bands[i])
 			request->rates[i] =
 				(1 << wiphy->bands[i]->n_bitrates) - 1;
@@ -5945,9 +6085,9 @@
 		nla_for_each_nested(attr,
 				    info->attrs[NL80211_ATTR_SCAN_SUPP_RATES],
 				    tmp) {
-			enum ieee80211_band band = nla_type(attr);
+			enum nl80211_band band = nla_type(attr);
 
-			if (band < 0 || band >= IEEE80211_NUM_BANDS) {
+			if (band < 0 || band >= NUM_NL80211_BANDS) {
 				err = -EINVAL;
 				goto out_free;
 			}
@@ -5996,6 +6136,12 @@
 	request->no_cck =
 		nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
 
+	if (info->attrs[NL80211_ATTR_MAC])
+		memcpy(request->bssid, nla_data(info->attrs[NL80211_ATTR_MAC]),
+		       ETH_ALEN);
+	else
+		eth_broadcast_addr(request->bssid);
+
 	request->wdev = wdev;
 	request->wiphy = &rdev->wiphy;
 	request->scan_start = jiffies;
@@ -6129,7 +6275,7 @@
 	struct cfg80211_sched_scan_request *request;
 	struct nlattr *attr;
 	int err, tmp, n_ssids = 0, n_match_sets = 0, n_channels, i, n_plans = 0;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	size_t ie_len;
 	struct nlattr *tb[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1];
 	s32 default_match_rssi = NL80211_SCAN_RSSI_THOLD_OFF;
@@ -6294,7 +6440,7 @@
 		}
 	} else {
 		/* all channels */
-		for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+		for (band = 0; band < NUM_NL80211_BANDS; band++) {
 			int j;
 			if (!wiphy->bands[band])
 				continue;
@@ -6738,7 +6884,8 @@
 	if (wdev->netdev &&
 	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex))
 		goto nla_put_failure;
-	if (nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
+	if (nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+			      NL80211_ATTR_PAD))
 		goto nla_put_failure;
 
 	bss = nla_nest_start(msg, NL80211_ATTR_BSS);
@@ -6759,7 +6906,8 @@
 	 */
 	ies = rcu_dereference(res->ies);
 	if (ies) {
-		if (nla_put_u64(msg, NL80211_BSS_TSF, ies->tsf))
+		if (nla_put_u64_64bit(msg, NL80211_BSS_TSF, ies->tsf,
+				      NL80211_BSS_PAD))
 			goto fail_unlock_rcu;
 		if (ies->len && nla_put(msg, NL80211_BSS_INFORMATION_ELEMENTS,
 					ies->len, ies->data))
@@ -6769,7 +6917,8 @@
 	/* and this pointer is always (unless driver didn't know) beacon data */
 	ies = rcu_dereference(res->beacon_ies);
 	if (ies && ies->from_beacon) {
-		if (nla_put_u64(msg, NL80211_BSS_BEACON_TSF, ies->tsf))
+		if (nla_put_u64_64bit(msg, NL80211_BSS_BEACON_TSF, ies->tsf,
+				      NL80211_BSS_PAD))
 			goto fail_unlock_rcu;
 		if (ies->len && nla_put(msg, NL80211_BSS_BEACON_IES,
 					ies->len, ies->data))
@@ -6788,8 +6937,8 @@
 		goto nla_put_failure;
 
 	if (intbss->ts_boottime &&
-	    nla_put_u64(msg, NL80211_BSS_LAST_SEEN_BOOTTIME,
-			intbss->ts_boottime))
+	    nla_put_u64_64bit(msg, NL80211_BSS_LAST_SEEN_BOOTTIME,
+			      intbss->ts_boottime, NL80211_BSS_PAD))
 		goto nla_put_failure;
 
 	switch (rdev->wiphy.signal_type) {
@@ -6909,28 +7058,28 @@
 	    nla_put_flag(msg, NL80211_SURVEY_INFO_IN_USE))
 		goto nla_put_failure;
 	if ((survey->filled & SURVEY_INFO_TIME) &&
-	    nla_put_u64(msg, NL80211_SURVEY_INFO_TIME,
-			survey->time))
+	    nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME,
+			survey->time, NL80211_SURVEY_INFO_PAD))
 		goto nla_put_failure;
 	if ((survey->filled & SURVEY_INFO_TIME_BUSY) &&
-	    nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_BUSY,
-			survey->time_busy))
+	    nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_BUSY,
+			      survey->time_busy, NL80211_SURVEY_INFO_PAD))
 		goto nla_put_failure;
 	if ((survey->filled & SURVEY_INFO_TIME_EXT_BUSY) &&
-	    nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_EXT_BUSY,
-			survey->time_ext_busy))
+	    nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_EXT_BUSY,
+			      survey->time_ext_busy, NL80211_SURVEY_INFO_PAD))
 		goto nla_put_failure;
 	if ((survey->filled & SURVEY_INFO_TIME_RX) &&
-	    nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_RX,
-			survey->time_rx))
+	    nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_RX,
+			      survey->time_rx, NL80211_SURVEY_INFO_PAD))
 		goto nla_put_failure;
 	if ((survey->filled & SURVEY_INFO_TIME_TX) &&
-	    nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_TX,
-			survey->time_tx))
+	    nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_TX,
+			      survey->time_tx, NL80211_SURVEY_INFO_PAD))
 		goto nla_put_failure;
 	if ((survey->filled & SURVEY_INFO_TIME_SCAN) &&
-	    nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_SCAN,
-			survey->time_scan))
+	    nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_SCAN,
+			      survey->time_scan, NL80211_SURVEY_INFO_PAD))
 		goto nla_put_failure;
 
 	nla_nest_end(msg, infoattr);
@@ -7402,14 +7551,14 @@
 
 static bool
 nl80211_parse_mcast_rate(struct cfg80211_registered_device *rdev,
-			 int mcast_rate[IEEE80211_NUM_BANDS],
+			 int mcast_rate[NUM_NL80211_BANDS],
 			 int rateval)
 {
 	struct wiphy *wiphy = &rdev->wiphy;
 	bool found = false;
 	int band, i;
 
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+	for (band = 0; band < NUM_NL80211_BANDS; band++) {
 		struct ieee80211_supported_band *sband;
 
 		sband = wiphy->bands[band];
@@ -7589,7 +7738,7 @@
 {
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
 	struct net_device *dev = info->user_ptr[1];
-	int mcast_rate[IEEE80211_NUM_BANDS];
+	int mcast_rate[NUM_NL80211_BANDS];
 	u32 nla_rate;
 	int err;
 
@@ -7650,8 +7799,8 @@
 	}
 
 	if (wdev) {
-		if (nla_put_u64(skb, NL80211_ATTR_WDEV,
-				wdev_id(wdev)))
+		if (nla_put_u64_64bit(skb, NL80211_ATTR_WDEV,
+				      wdev_id(wdev), NL80211_ATTR_PAD))
 			goto nla_put_failure;
 		if (wdev->netdev &&
 		    nla_put_u32(skb, NL80211_ATTR_IFINDEX,
@@ -7922,6 +8071,10 @@
 		connect.mfp = NL80211_MFP_NO;
 	}
 
+	if (info->attrs[NL80211_ATTR_PREV_BSSID])
+		connect.prev_bssid =
+			nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]);
+
 	if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
 		connect.channel = nl80211_get_valid_chan(
 			wiphy, info->attrs[NL80211_ATTR_WIPHY_FREQ]);
@@ -7990,13 +8143,29 @@
 	}
 
 	connect.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
-	if (connect.pbss && !rdev->wiphy.bands[IEEE80211_BAND_60GHZ]) {
+	if (connect.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ]) {
 		kzfree(connkeys);
 		return -EOPNOTSUPP;
 	}
 
+	if (info->attrs[NL80211_ATTR_BSS_SELECT]) {
+		/* bss selection makes no sense if bssid is set */
+		if (connect.bssid) {
+			kzfree(connkeys);
+			return -EINVAL;
+		}
+
+		err = parse_bss_select(info->attrs[NL80211_ATTR_BSS_SELECT],
+				       wiphy, &connect.bss_select);
+		if (err) {
+			kzfree(connkeys);
+			return err;
+		}
+	}
+
 	wdev_lock(dev->ieee80211_ptr);
-	err = cfg80211_connect(rdev, dev, &connect, connkeys, NULL);
+	err = cfg80211_connect(rdev, dev, &connect, connkeys,
+			       connect.prev_bssid);
 	wdev_unlock(dev->ieee80211_ptr);
 	if (err)
 		kzfree(connkeys);
@@ -8224,7 +8393,8 @@
 	if (err)
 		goto free_msg;
 
-	if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
+	if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie,
+			      NL80211_ATTR_PAD))
 		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
@@ -8394,7 +8564,7 @@
 
 	memset(&mask, 0, sizeof(mask));
 	/* Default to all rates enabled */
-	for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+	for (i = 0; i < NUM_NL80211_BANDS; i++) {
 		sband = rdev->wiphy.bands[i];
 
 		if (!sband)
@@ -8418,14 +8588,14 @@
 
 	/*
 	 * The nested attribute uses enum nl80211_band as the index. This maps
-	 * directly to the enum ieee80211_band values used in cfg80211.
+	 * directly to the enum nl80211_band values used in cfg80211.
 	 */
 	BUILD_BUG_ON(NL80211_MAX_SUPP_HT_RATES > IEEE80211_HT_MCS_MASK_LEN * 8);
 	nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem) {
-		enum ieee80211_band band = nla_type(tx_rates);
+		enum nl80211_band band = nla_type(tx_rates);
 		int err;
 
-		if (band < 0 || band >= IEEE80211_NUM_BANDS)
+		if (band < 0 || band >= NUM_NL80211_BANDS)
 			return -EINVAL;
 		sband = rdev->wiphy.bands[band];
 		if (sband == NULL)
@@ -8636,7 +8806,8 @@
 		goto free_msg;
 
 	if (msg) {
-		if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
+		if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie,
+				      NL80211_ATTR_PAD))
 			goto nla_put_failure;
 
 		genlmsg_end(msg, hdr);
@@ -9922,7 +10093,8 @@
 	if (err)
 		goto free_msg;
 
-	if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
+	if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie,
+			      NL80211_ATTR_PAD))
 		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
@@ -10220,7 +10392,7 @@
 		*wdev = NULL;
 
 		if (cb->args[1]) {
-			list_for_each_entry(tmp, &(*rdev)->wdev_list, list) {
+			list_for_each_entry(tmp, &wiphy->wdev_list, list) {
 				if (tmp->identifier == cb->args[1] - 1) {
 					*wdev = tmp;
 					break;
@@ -10347,8 +10519,9 @@
 			break;
 
 		if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
-		    (wdev && nla_put_u64(skb, NL80211_ATTR_WDEV,
-					 wdev_id(wdev)))) {
+		    (wdev && nla_put_u64_64bit(skb, NL80211_ATTR_WDEV,
+					       wdev_id(wdev),
+					       NL80211_ATTR_PAD))) {
 			genlmsg_cancel(skb, hdr);
 			break;
 		}
@@ -10590,7 +10763,7 @@
 	 * section 10.22.6.2.1. Disallow 5/10Mhz channels as well for now, the
 	 * specification is not defined for them.
 	 */
-	if (chandef.chan->band == IEEE80211_BAND_2GHZ &&
+	if (chandef.chan->band == NL80211_BAND_2GHZ &&
 	    chandef.width != NL80211_CHAN_WIDTH_20_NOHT &&
 	    chandef.width != NL80211_CHAN_WIDTH_20)
 		return -EINVAL;
@@ -11555,7 +11728,8 @@
 	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
 	    (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
 					 wdev->netdev->ifindex)) ||
-	    nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
+	    nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+			      NL80211_ATTR_PAD))
 		goto nla_put_failure;
 
 	/* ignore errors and send incomplete event anyway */
@@ -12222,11 +12396,13 @@
 	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
 	    (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
 					 wdev->netdev->ifindex)) ||
-	    nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
+	    nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+			      NL80211_ATTR_PAD) ||
 	    nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq) ||
 	    nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE,
 			NL80211_CHAN_NO_HT) ||
-	    nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
+	    nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie,
+			      NL80211_ATTR_PAD))
 		goto nla_put_failure;
 
 	if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL &&
@@ -12460,7 +12636,8 @@
 	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
 	    (netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
 					netdev->ifindex)) ||
-	    nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
+	    nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+			      NL80211_ATTR_PAD) ||
 	    nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) ||
 	    (sig_dbm &&
 	     nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) ||
@@ -12503,9 +12680,11 @@
 	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
 	    (netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
 				   netdev->ifindex)) ||
-	    nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
+	    nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+			      NL80211_ATTR_PAD) ||
 	    nla_put(msg, NL80211_ATTR_FRAME, len, buf) ||
-	    nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) ||
+	    nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie,
+			      NL80211_ATTR_PAD) ||
 	    (ack && nla_put_flag(msg, NL80211_ATTR_ACK)))
 		goto nla_put_failure;
 
@@ -12885,7 +13064,8 @@
 		struct wireless_dev *wdev = netdev->ieee80211_ptr;
 
 		if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
-		    nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
+		    nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+				      NL80211_ATTR_PAD))
 			goto nla_put_failure;
 	}
 
@@ -12930,7 +13110,8 @@
 	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
 	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
 	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) ||
-	    nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) ||
+	    nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie,
+			      NL80211_ATTR_PAD) ||
 	    (acked && nla_put_flag(msg, NL80211_ATTR_ACK)))
 		goto nla_put_failure;
 
@@ -13075,7 +13256,8 @@
 		goto free_msg;
 
 	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
-	    nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
+	    nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+			      NL80211_ATTR_PAD))
 		goto free_msg;
 
 	if (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
@@ -13216,7 +13398,7 @@
 	struct wireless_dev *wdev;
 	struct cfg80211_beacon_registration *reg, *tmp;
 
-	if (state != NETLINK_URELEASE)
+	if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC)
 		return NOTIFY_DONE;
 
 	rcu_read_lock();
@@ -13231,7 +13413,7 @@
 		    sched_scan_req->owner_nlportid == notify->portid)
 			schedule_scan_stop = true;
 
-		list_for_each_entry_rcu(wdev, &rdev->wdev_list, list) {
+		list_for_each_entry_rcu(wdev, &rdev->wiphy.wdev_list, list) {
 			cfg80211_mlme_unregister_socket(wdev, notify->portid);
 
 			if (wdev->owner_nlportid == notify->portid)
@@ -13350,7 +13532,8 @@
 		goto nla_put_failure;
 
 	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
-	    nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
+	    nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+			      NL80211_ATTR_PAD))
 		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
@@ -13383,7 +13566,8 @@
 
 	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
 	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex) ||
-	    nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
+	    nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+			      NL80211_ATTR_PAD))
 		goto out;
 
 	genlmsg_end(msg, hdr);
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 8ae0c04..85ff30b 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -1048,7 +1048,7 @@
 static inline int
 rdev_set_mcast_rate(struct cfg80211_registered_device *rdev,
 		    struct net_device *dev,
-		    int mcast_rate[IEEE80211_NUM_BANDS])
+		    int mcast_rate[NUM_NL80211_BANDS])
 {
 	int ret = -ENOTSUPP;
 
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index c5fb317..5dbac37 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1546,12 +1546,12 @@
 
 static void reg_process_ht_flags(struct wiphy *wiphy)
 {
-	enum ieee80211_band band;
+	enum nl80211_band band;
 
 	if (!wiphy)
 		return;
 
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++)
+	for (band = 0; band < NUM_NL80211_BANDS; band++)
 		reg_process_ht_flags_band(wiphy, wiphy->bands[band]);
 }
 
@@ -1639,7 +1639,7 @@
 
 	ASSERT_RTNL();
 
-	list_for_each_entry(wdev, &rdev->wdev_list, list)
+	list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list)
 		if (!reg_wdev_chan_valid(wiphy, wdev))
 			cfg80211_leave(rdev, wdev);
 }
@@ -1673,7 +1673,7 @@
 static void wiphy_update_regulatory(struct wiphy *wiphy,
 				    enum nl80211_reg_initiator initiator)
 {
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	struct regulatory_request *lr = get_last_request();
 
 	if (ignore_reg_update(wiphy, initiator)) {
@@ -1690,7 +1690,7 @@
 
 	lr->dfs_region = get_cfg80211_regdom()->dfs_region;
 
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++)
+	for (band = 0; band < NUM_NL80211_BANDS; band++)
 		handle_band(wiphy, initiator, wiphy->bands[band]);
 
 	reg_process_beacons(wiphy);
@@ -1786,14 +1786,14 @@
 void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
 				   const struct ieee80211_regdomain *regd)
 {
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	unsigned int bands_set = 0;
 
 	WARN(!(wiphy->regulatory_flags & REGULATORY_CUSTOM_REG),
 	     "wiphy should have REGULATORY_CUSTOM_REG\n");
 	wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
 
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+	for (band = 0; band < NUM_NL80211_BANDS; band++) {
 		if (!wiphy->bands[band])
 			continue;
 		handle_band_custom(wiphy, wiphy->bands[band], regd);
@@ -2228,7 +2228,7 @@
 	struct wiphy *wiphy;
 	const struct ieee80211_regdomain *tmp;
 	const struct ieee80211_regdomain *regd;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	struct regulatory_request request = {};
 
 	list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
@@ -2246,7 +2246,7 @@
 		rcu_assign_pointer(wiphy->regd, regd);
 		rcu_free_regdom(tmp);
 
-		for (band = 0; band < IEEE80211_NUM_BANDS; band++)
+		for (band = 0; band < NUM_NL80211_BANDS; band++)
 			handle_band_custom(wiphy, wiphy->bands[band], regd);
 
 		reg_process_ht_flags(wiphy);
@@ -2404,7 +2404,7 @@
 }
 EXPORT_SYMBOL(regulatory_hint);
 
-void regulatory_hint_country_ie(struct wiphy *wiphy, enum ieee80211_band band,
+void regulatory_hint_country_ie(struct wiphy *wiphy, enum nl80211_band band,
 				const u8 *country_ie, u8 country_ie_len)
 {
 	char alpha2[2];
@@ -2504,11 +2504,11 @@
 static void restore_custom_reg_settings(struct wiphy *wiphy)
 {
 	struct ieee80211_supported_band *sband;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	struct ieee80211_channel *chan;
 	int i;
 
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+	for (band = 0; band < NUM_NL80211_BANDS; band++) {
 		sband = wiphy->bands[band];
 		if (!sband)
 			continue;
@@ -2623,9 +2623,9 @@
 
 static bool freq_is_chan_12_13_14(u16 freq)
 {
-	if (freq == ieee80211_channel_to_frequency(12, IEEE80211_BAND_2GHZ) ||
-	    freq == ieee80211_channel_to_frequency(13, IEEE80211_BAND_2GHZ) ||
-	    freq == ieee80211_channel_to_frequency(14, IEEE80211_BAND_2GHZ))
+	if (freq == ieee80211_channel_to_frequency(12, NL80211_BAND_2GHZ) ||
+	    freq == ieee80211_channel_to_frequency(13, NL80211_BAND_2GHZ) ||
+	    freq == ieee80211_channel_to_frequency(14, NL80211_BAND_2GHZ))
 		return true;
 	return false;
 }
@@ -2650,7 +2650,7 @@
 
 	if (beacon_chan->beacon_found ||
 	    beacon_chan->flags & IEEE80211_CHAN_RADAR ||
-	    (beacon_chan->band == IEEE80211_BAND_2GHZ &&
+	    (beacon_chan->band == NL80211_BAND_2GHZ &&
 	     !freq_is_chan_12_13_14(beacon_chan->center_freq)))
 		return 0;
 
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index 9f495d7..f6ced31 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -104,7 +104,7 @@
  * information for a band the BSS is not present in it will be ignored.
  */
 void regulatory_hint_country_ie(struct wiphy *wiphy,
-			 enum ieee80211_band band,
+			 enum nl80211_band band,
 			 const u8 *country_ie,
 			 u8 country_ie_len);
 
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 14d5369e..ef2955c 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -364,13 +364,16 @@
 }
 EXPORT_SYMBOL(cfg80211_find_ie);
 
-const u8 *cfg80211_find_vendor_ie(unsigned int oui, u8 oui_type,
+const u8 *cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
 				  const u8 *ies, int len)
 {
 	struct ieee80211_vendor_ie *ie;
 	const u8 *pos = ies, *end = ies + len;
 	int ie_oui;
 
+	if (WARN_ON(oui_type > 0xff))
+		return NULL;
+
 	while (pos < end) {
 		pos = cfg80211_find_ie(WLAN_EID_VENDOR_SPECIFIC, pos,
 				       end - pos);
@@ -386,7 +389,8 @@
 			goto cont;
 
 		ie_oui = ie->oui[0] << 16 | ie->oui[1] << 8 | ie->oui[2];
-		if (ie_oui == oui && ie->oui_type == oui_type)
+		if (ie_oui == oui &&
+		    (oui_type < 0 || ie->oui_type == oui_type))
 			return pos;
 cont:
 		pos += 2 + ie->len;
@@ -531,7 +535,7 @@
 }
 
 static bool cfg80211_bss_type_match(u16 capability,
-				    enum ieee80211_band band,
+				    enum nl80211_band band,
 				    enum ieee80211_bss_type bss_type)
 {
 	bool ret = true;
@@ -540,7 +544,7 @@
 	if (bss_type == IEEE80211_BSS_TYPE_ANY)
 		return ret;
 
-	if (band == IEEE80211_BAND_60GHZ) {
+	if (band == NL80211_BAND_60GHZ) {
 		mask = WLAN_CAPABILITY_DMG_TYPE_MASK;
 		switch (bss_type) {
 		case IEEE80211_BSS_TYPE_ESS:
@@ -1006,7 +1010,7 @@
 	if (!res)
 		return NULL;
 
-	if (channel->band == IEEE80211_BAND_60GHZ) {
+	if (channel->band == NL80211_BAND_60GHZ) {
 		bss_type = res->pub.capability & WLAN_CAPABILITY_DMG_TYPE_MASK;
 		if (bss_type == WLAN_CAPABILITY_DMG_TYPE_AP ||
 		    bss_type == WLAN_CAPABILITY_DMG_TYPE_PBSS)
@@ -1089,7 +1093,7 @@
 	if (!res)
 		return NULL;
 
-	if (channel->band == IEEE80211_BAND_60GHZ) {
+	if (channel->band == NL80211_BAND_60GHZ) {
 		bss_type = res->pub.capability & WLAN_CAPABILITY_DMG_TYPE_MASK;
 		if (bss_type == WLAN_CAPABILITY_DMG_TYPE_AP ||
 		    bss_type == WLAN_CAPABILITY_DMG_TYPE_PBSS)
@@ -1185,7 +1189,7 @@
 	struct iw_scan_req *wreq = NULL;
 	struct cfg80211_scan_request *creq = NULL;
 	int i, err, n_channels = 0;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 
 	if (!netif_running(dev))
 		return -ENETDOWN;
@@ -1229,7 +1233,7 @@
 
 	/* translate "Scan on frequencies" request */
 	i = 0;
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+	for (band = 0; band < NUM_NL80211_BANDS; band++) {
 		int j;
 
 		if (!wiphy->bands[band])
@@ -1289,10 +1293,12 @@
 			creq->n_ssids = 0;
 	}
 
-	for (i = 0; i < IEEE80211_NUM_BANDS; i++)
+	for (i = 0; i < NUM_NL80211_BANDS; i++)
 		if (wiphy->bands[i])
 			creq->rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1;
 
+	eth_broadcast_addr(creq->bssid);
+
 	rdev->scan_req = creq;
 	err = rdev_scan(rdev, creq);
 	if (err) {
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 5445581..584fdc3 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -81,7 +81,7 @@
 		return -ENOMEM;
 
 	if (wdev->conn->params.channel) {
-		enum ieee80211_band band = wdev->conn->params.channel->band;
+		enum nl80211_band band = wdev->conn->params.channel->band;
 		struct ieee80211_supported_band *sband =
 			wdev->wiphy->bands[band];
 
@@ -93,11 +93,11 @@
 		request->rates[band] = (1 << sband->n_bitrates) - 1;
 	} else {
 		int i = 0, j;
-		enum ieee80211_band band;
+		enum nl80211_band band;
 		struct ieee80211_supported_band *bands;
 		struct ieee80211_channel *channel;
 
-		for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+		for (band = 0; band < NUM_NL80211_BANDS; band++) {
 			bands = wdev->wiphy->bands[band];
 			if (!bands)
 				continue;
@@ -119,6 +119,8 @@
 		wdev->conn->params.ssid_len);
 	request->ssids[0].ssid_len = wdev->conn->params.ssid_len;
 
+	eth_broadcast_addr(request->bssid);
+
 	request->wdev = wdev;
 	request->wiphy = &rdev->wiphy;
 	request->scan_start = jiffies;
@@ -221,7 +223,7 @@
 
 	rtnl_lock();
 
-	list_for_each_entry(wdev, &rdev->wdev_list, list) {
+	list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
 		if (!wdev->netdev)
 			continue;
 
@@ -490,8 +492,18 @@
 	if (!rdev->ops->auth || !rdev->ops->assoc)
 		return -EOPNOTSUPP;
 
-	if (wdev->current_bss)
-		return -EALREADY;
+	if (wdev->current_bss) {
+		if (!prev_bssid)
+			return -EALREADY;
+		if (prev_bssid &&
+		    !ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid))
+			return -ENOTCONN;
+		cfg80211_unhold_bss(wdev->current_bss);
+		cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
+		wdev->current_bss = NULL;
+
+		cfg80211_sme_free(wdev);
+	}
 
 	if (WARN_ON(wdev->conn))
 		return -EINPROGRESS;
@@ -605,7 +617,7 @@
 	 * count as new regulatory hints.
 	 */
 	list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
-		list_for_each_entry(wdev, &rdev->wdev_list, list) {
+		list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
 			wdev_lock(wdev);
 			if (wdev->conn || wdev->current_bss)
 				is_all_idle = false;
@@ -741,19 +753,32 @@
 	kfree(country_ie);
 }
 
-void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
-			     const u8 *req_ie, size_t req_ie_len,
-			     const u8 *resp_ie, size_t resp_ie_len,
-			     u16 status, gfp_t gfp)
+/* Consumes bss object one way or another */
+void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
+			  struct cfg80211_bss *bss, const u8 *req_ie,
+			  size_t req_ie_len, const u8 *resp_ie,
+			  size_t resp_ie_len, u16 status, gfp_t gfp)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 	struct cfg80211_event *ev;
 	unsigned long flags;
 
+	if (bss) {
+		/* Make sure the bss entry provided by the driver is valid. */
+		struct cfg80211_internal_bss *ibss = bss_from_pub(bss);
+
+		if (WARN_ON(list_empty(&ibss->list))) {
+			cfg80211_put_bss(wdev->wiphy, bss);
+			return;
+		}
+	}
+
 	ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp);
-	if (!ev)
+	if (!ev) {
+		cfg80211_put_bss(wdev->wiphy, bss);
 		return;
+	}
 
 	ev->type = EVENT_CONNECT_RESULT;
 	if (bssid)
@@ -768,6 +793,9 @@
 		ev->cr.resp_ie_len = resp_ie_len;
 		memcpy((void *)ev->cr.resp_ie, resp_ie, resp_ie_len);
 	}
+	if (bss)
+		cfg80211_hold_bss(bss_from_pub(bss));
+	ev->cr.bss = bss;
 	ev->cr.status = status;
 
 	spin_lock_irqsave(&wdev->event_lock, flags);
@@ -775,7 +803,7 @@
 	spin_unlock_irqrestore(&wdev->event_lock, flags);
 	queue_work(cfg80211_wq, &rdev->event_work);
 }
-EXPORT_SYMBOL(cfg80211_connect_result);
+EXPORT_SYMBOL(cfg80211_connect_bss);
 
 /* Consumes bss object one way or another */
 void __cfg80211_roamed(struct wireless_dev *wdev,
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 9cee022..e46469b 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -91,7 +91,7 @@
 {
 	struct wireless_dev *wdev;
 
-	list_for_each_entry(wdev, &rdev->wdev_list, list)
+	list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list)
 		cfg80211_leave(rdev, wdev);
 }
 
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 09b242b..3c1091ae 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -110,7 +110,7 @@
 				conf->dot11MeshHWMPconfirmationInterval;      \
 	} while (0)
 
-#define CHAN_ENTRY __field(enum ieee80211_band, band) \
+#define CHAN_ENTRY __field(enum nl80211_band, band) \
 		   __field(u16, center_freq)
 #define CHAN_ASSIGN(chan)					  \
 	do {							  \
@@ -125,7 +125,7 @@
 #define CHAN_PR_FMT "band: %d, freq: %u"
 #define CHAN_PR_ARG __entry->band, __entry->center_freq
 
-#define CHAN_DEF_ENTRY __field(enum ieee80211_band, band)		\
+#define CHAN_DEF_ENTRY __field(enum nl80211_band, band)		\
 		       __field(u32, control_freq)			\
 		       __field(u32, width)				\
 		       __field(u32, center_freq1)			\
@@ -1259,6 +1259,7 @@
 		__field(bool, privacy)
 		__field(u32, wpa_versions)
 		__field(u32, flags)
+		MAC_ENTRY(prev_bssid)
 	),
 	TP_fast_assign(
 		WIPHY_ASSIGN;
@@ -1270,13 +1271,14 @@
 		__entry->privacy = sme->privacy;
 		__entry->wpa_versions = sme->crypto.wpa_versions;
 		__entry->flags = sme->flags;
+		MAC_ASSIGN(prev_bssid, sme->prev_bssid);
 	),
 	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT
 		  ", ssid: %s, auth type: %d, privacy: %s, wpa versions: %u, "
-		  "flags: %u",
+		  "flags: %u, previous bssid: " MAC_PR_FMT,
 		  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid), __entry->ssid,
 		  __entry->auth_type, BOOL_TO_STR(__entry->privacy),
-		  __entry->wpa_versions, __entry->flags)
+		  __entry->wpa_versions, __entry->flags, MAC_PR_ARG(prev_bssid))
 );
 
 TRACE_EVENT(rdev_set_cqm_rssi_config,
@@ -2645,7 +2647,7 @@
 	TP_STRUCT__entry(
 		__field(u32, n_channels)
 		__dynamic_array(u8, ie, request ? request->ie_len : 0)
-		__array(u32, rates, IEEE80211_NUM_BANDS)
+		__array(u32, rates, NUM_NL80211_BANDS)
 		__field(u32, wdev_id)
 		MAC_ENTRY(wiphy_mac)
 		__field(bool, no_cck)
@@ -2656,7 +2658,7 @@
 			memcpy(__get_dynamic_array(ie), request->ie,
 			       request->ie_len);
 			memcpy(__entry->rates, request->rates,
-			       IEEE80211_NUM_BANDS);
+			       NUM_NL80211_BANDS);
 			__entry->wdev_id = request->wdev ?
 					request->wdev->identifier : 0;
 			if (request->wiphy)
@@ -2881,25 +2883,25 @@
 
 TRACE_EVENT(rdev_set_mcast_rate,
 	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
-		 int mcast_rate[IEEE80211_NUM_BANDS]),
+		 int mcast_rate[NUM_NL80211_BANDS]),
 	TP_ARGS(wiphy, netdev, mcast_rate),
 	TP_STRUCT__entry(
 		WIPHY_ENTRY
 		NETDEV_ENTRY
-		__array(int, mcast_rate, IEEE80211_NUM_BANDS)
+		__array(int, mcast_rate, NUM_NL80211_BANDS)
 	),
 	TP_fast_assign(
 		WIPHY_ASSIGN;
 		NETDEV_ASSIGN;
 		memcpy(__entry->mcast_rate, mcast_rate,
-		       sizeof(int) * IEEE80211_NUM_BANDS);
+		       sizeof(int) * NUM_NL80211_BANDS);
 	),
 	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", "
 		  "mcast_rates [2.4GHz=0x%x, 5.2GHz=0x%x, 60GHz=0x%x]",
 		  WIPHY_PR_ARG, NETDEV_PR_ARG,
-		  __entry->mcast_rate[IEEE80211_BAND_2GHZ],
-		  __entry->mcast_rate[IEEE80211_BAND_5GHZ],
-		  __entry->mcast_rate[IEEE80211_BAND_60GHZ])
+		  __entry->mcast_rate[NL80211_BAND_2GHZ],
+		  __entry->mcast_rate[NL80211_BAND_5GHZ],
+		  __entry->mcast_rate[NL80211_BAND_60GHZ])
 );
 
 TRACE_EVENT(rdev_set_coalesce,
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 9f440a9..219bd19 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -47,7 +47,7 @@
 	if (WARN_ON(!sband))
 		return 1;
 
-	if (sband->band == IEEE80211_BAND_2GHZ) {
+	if (sband->band == NL80211_BAND_2GHZ) {
 		if (scan_width == NL80211_BSS_CHAN_WIDTH_5 ||
 		    scan_width == NL80211_BSS_CHAN_WIDTH_10)
 			mandatory_flag = IEEE80211_RATE_MANDATORY_G;
@@ -65,26 +65,26 @@
 }
 EXPORT_SYMBOL(ieee80211_mandatory_rates);
 
-int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band)
+int ieee80211_channel_to_frequency(int chan, enum nl80211_band band)
 {
 	/* see 802.11 17.3.8.3.2 and Annex J
 	 * there are overlapping channel numbers in 5GHz and 2GHz bands */
 	if (chan <= 0)
 		return 0; /* not supported */
 	switch (band) {
-	case IEEE80211_BAND_2GHZ:
+	case NL80211_BAND_2GHZ:
 		if (chan == 14)
 			return 2484;
 		else if (chan < 14)
 			return 2407 + chan * 5;
 		break;
-	case IEEE80211_BAND_5GHZ:
+	case NL80211_BAND_5GHZ:
 		if (chan >= 182 && chan <= 196)
 			return 4000 + chan * 5;
 		else
 			return 5000 + chan * 5;
 		break;
-	case IEEE80211_BAND_60GHZ:
+	case NL80211_BAND_60GHZ:
 		if (chan < 5)
 			return 56160 + chan * 2160;
 		break;
@@ -116,11 +116,11 @@
 struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy,
 						  int freq)
 {
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	struct ieee80211_supported_band *sband;
 	int i;
 
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+	for (band = 0; band < NUM_NL80211_BANDS; band++) {
 		sband = wiphy->bands[band];
 
 		if (!sband)
@@ -137,12 +137,12 @@
 EXPORT_SYMBOL(__ieee80211_get_channel);
 
 static void set_mandatory_flags_band(struct ieee80211_supported_band *sband,
-				     enum ieee80211_band band)
+				     enum nl80211_band band)
 {
 	int i, want;
 
 	switch (band) {
-	case IEEE80211_BAND_5GHZ:
+	case NL80211_BAND_5GHZ:
 		want = 3;
 		for (i = 0; i < sband->n_bitrates; i++) {
 			if (sband->bitrates[i].bitrate == 60 ||
@@ -155,7 +155,7 @@
 		}
 		WARN_ON(want);
 		break;
-	case IEEE80211_BAND_2GHZ:
+	case NL80211_BAND_2GHZ:
 		want = 7;
 		for (i = 0; i < sband->n_bitrates; i++) {
 			if (sband->bitrates[i].bitrate == 10) {
@@ -185,12 +185,12 @@
 		}
 		WARN_ON(want != 0 && want != 3 && want != 6);
 		break;
-	case IEEE80211_BAND_60GHZ:
+	case NL80211_BAND_60GHZ:
 		/* check for mandatory HT MCS 1..4 */
 		WARN_ON(!sband->ht_cap.ht_supported);
 		WARN_ON((sband->ht_cap.mcs.rx_mask[0] & 0x1e) != 0x1e);
 		break;
-	case IEEE80211_NUM_BANDS:
+	case NUM_NL80211_BANDS:
 		WARN_ON(1);
 		break;
 	}
@@ -198,9 +198,9 @@
 
 void ieee80211_set_bitrate_flags(struct wiphy *wiphy)
 {
-	enum ieee80211_band band;
+	enum nl80211_band band;
 
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++)
+	for (band = 0; band < NUM_NL80211_BANDS; band++)
 		if (wiphy->bands[band])
 			set_mandatory_flags_band(wiphy->bands[band], band);
 }
@@ -950,7 +950,7 @@
 				ev->cr.resp_ie, ev->cr.resp_ie_len,
 				ev->cr.status,
 				ev->cr.status == WLAN_STATUS_SUCCESS,
-				NULL);
+				ev->cr.bss);
 			break;
 		case EVENT_ROAMED:
 			__cfg80211_roamed(wdev, ev->rm.bss, ev->rm.req_ie,
@@ -986,7 +986,7 @@
 
 	ASSERT_RTNL();
 
-	list_for_each_entry(wdev, &rdev->wdev_list, list)
+	list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list)
 		cfg80211_process_wdev_events(wdev);
 }
 
@@ -1399,22 +1399,22 @@
 EXPORT_SYMBOL(ieee80211_ie_split_ric);
 
 bool ieee80211_operating_class_to_band(u8 operating_class,
-				       enum ieee80211_band *band)
+				       enum nl80211_band *band)
 {
 	switch (operating_class) {
 	case 112:
 	case 115 ... 127:
 	case 128 ... 130:
-		*band = IEEE80211_BAND_5GHZ;
+		*band = NL80211_BAND_5GHZ;
 		return true;
 	case 81:
 	case 82:
 	case 83:
 	case 84:
-		*band = IEEE80211_BAND_2GHZ;
+		*band = NL80211_BAND_2GHZ;
 		return true;
 	case 180:
-		*band = IEEE80211_BAND_60GHZ;
+		*band = NL80211_BAND_60GHZ;
 		return true;
 	}
 
@@ -1560,7 +1560,7 @@
 	if (!beacon_int)
 		return -EINVAL;
 
-	list_for_each_entry(wdev, &rdev->wdev_list, list) {
+	list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
 		if (!wdev->beacon_interval)
 			continue;
 		if (wdev->beacon_interval != beacon_int) {
@@ -1726,10 +1726,10 @@
 
 unsigned int ieee80211_get_num_supported_channels(struct wiphy *wiphy)
 {
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	unsigned int n_channels = 0;
 
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++)
+	for (band = 0; band < NUM_NL80211_BANDS; band++)
 		if (wiphy->bands[band])
 			n_channels += wiphy->bands[band]->n_channels;
 
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index fd68283..9f27221 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -25,42 +25,7 @@
 			  struct iw_request_info *info,
 			  char *name, char *extra)
 {
-	struct wireless_dev *wdev = dev->ieee80211_ptr;
-	struct ieee80211_supported_band *sband;
-	bool is_ht = false, is_a = false, is_b = false, is_g = false;
-
-	if (!wdev)
-		return -EOPNOTSUPP;
-
-	sband = wdev->wiphy->bands[IEEE80211_BAND_5GHZ];
-	if (sband) {
-		is_a = true;
-		is_ht |= sband->ht_cap.ht_supported;
-	}
-
-	sband = wdev->wiphy->bands[IEEE80211_BAND_2GHZ];
-	if (sband) {
-		int i;
-		/* Check for mandatory rates */
-		for (i = 0; i < sband->n_bitrates; i++) {
-			if (sband->bitrates[i].bitrate == 10)
-				is_b = true;
-			if (sband->bitrates[i].bitrate == 60)
-				is_g = true;
-		}
-		is_ht |= sband->ht_cap.ht_supported;
-	}
-
 	strcpy(name, "IEEE 802.11");
-	if (is_a)
-		strcat(name, "a");
-	if (is_b)
-		strcat(name, "b");
-	if (is_g)
-		strcat(name, "g");
-	if (is_ht)
-		strcat(name, "n");
-
 	return 0;
 }
 EXPORT_WEXT_HANDLER(cfg80211_wext_giwname);
@@ -143,7 +108,7 @@
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct iw_range *range = (struct iw_range *) extra;
-	enum ieee80211_band band;
+	enum nl80211_band band;
 	int i, c = 0;
 
 	if (!wdev)
@@ -215,7 +180,7 @@
 		}
 	}
 
-	for (band = 0; band < IEEE80211_NUM_BANDS; band ++) {
+	for (band = 0; band < NUM_NL80211_BANDS; band ++) {
 		struct ieee80211_supported_band *sband;
 
 		sband = wdev->wiphy->bands[band];
@@ -265,11 +230,11 @@
 	 * -EINVAL for impossible things.
 	 */
 	if (freq->e == 0) {
-		enum ieee80211_band band = IEEE80211_BAND_2GHZ;
+		enum nl80211_band band = NL80211_BAND_2GHZ;
 		if (freq->m < 0)
 			return 0;
 		if (freq->m > 14)
-			band = IEEE80211_BAND_5GHZ;
+			band = NL80211_BAND_5GHZ;
 		return ieee80211_channel_to_frequency(freq->m, band);
 	} else {
 		int i, div = 1000000;
@@ -1245,7 +1210,7 @@
 		maxrate = rate->value / 100000;
 	}
 
-	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+	for (band = 0; band < NUM_NL80211_BANDS; band++) {
 		sband = wdev->wiphy->bands[band];
 		if (sband == NULL)
 			continue;
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index b50ee5d..6250b1c 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -399,7 +399,10 @@
 	if (err)
 		return err;
 
-	return register_netdevice_notifier(&wext_netdev_notifier);
+	err = register_netdevice_notifier(&wext_netdev_notifier);
+	if (err)
+		unregister_pernet_subsys(&wext_pernet_ops);
+	return err;
 }
 
 subsys_initcall(wireless_nlevent_init);
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 7ecd04c..997ff7b 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -277,6 +277,7 @@
 
 	memset(&theirs, 0, sizeof(theirs));
 	memcpy(new, ours, sizeof(*new));
+	memset(dte, 0, sizeof(*dte));
 
 	len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
 	if (len < 0)
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index ad7f5b3..1c4ad47 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -292,12 +292,15 @@
 		XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
 
 		skb_dst_force(skb);
+		dev_hold(skb->dev);
 
 		nexthdr = x->type->input(x, skb);
 
 		if (nexthdr == -EINPROGRESS)
 			return 0;
 resume:
+		dev_put(skb->dev);
+
 		spin_lock(&x->lock);
 		if (nexthdr <= 0) {
 			if (nexthdr == -EBADMSG) {
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index ff4a91f..637387b 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -99,6 +99,9 @@
 
 		skb_dst_force(skb);
 
+		/* Inner headers are invalid now. */
+		skb->encapsulation = 0;
+
 		err = x->type->output(x, skb);
 		if (err == -EINPROGRESS)
 			goto out;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 2cc7af8..d516845 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -809,7 +809,8 @@
 			goto out;
 	}
 	if (x->lastused) {
-		ret = nla_put_u64(skb, XFRMA_LASTUSED, x->lastused);
+		ret = nla_put_u64_64bit(skb, XFRMA_LASTUSED, x->lastused,
+					XFRMA_PAD);
 		if (ret)
 			goto out;
 	}
@@ -1813,7 +1814,7 @@
 
 	return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
 	       + nla_total_size(replay_size)
-	       + nla_total_size(sizeof(struct xfrm_lifetime_cur))
+	       + nla_total_size_64bit(sizeof(struct xfrm_lifetime_cur))
 	       + nla_total_size(sizeof(struct xfrm_mark))
 	       + nla_total_size(4) /* XFRM_AE_RTHR */
 	       + nla_total_size(4); /* XFRM_AE_ETHR */
@@ -1848,7 +1849,8 @@
 	}
 	if (err)
 		goto out_cancel;
-	err = nla_put(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft);
+	err = nla_put_64bit(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft,
+			    XFRMA_PAD);
 	if (err)
 		goto out_cancel;
 
@@ -2617,7 +2619,7 @@
 		l += nla_total_size(sizeof(x->props.extra_flags));
 
 	/* Must count x->lastused as it may become non-zero behind our back. */
-	l += nla_total_size(sizeof(u64));
+	l += nla_total_size_64bit(sizeof(u64));
 
 	return l;
 }
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 502c9fc..0bf2478 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -19,6 +19,7 @@
 hostprogs-y += offwaketime
 hostprogs-y += spintest
 hostprogs-y += map_perf_test
+hostprogs-y += test_overhead
 
 test_verifier-objs := test_verifier.o libbpf.o
 test_maps-objs := test_maps.o libbpf.o
@@ -38,6 +39,7 @@
 offwaketime-objs := bpf_load.o libbpf.o offwaketime_user.o
 spintest-objs := bpf_load.o libbpf.o spintest_user.o
 map_perf_test-objs := bpf_load.o libbpf.o map_perf_test_user.o
+test_overhead-objs := bpf_load.o libbpf.o test_overhead_user.o
 
 # Tell kbuild to always build the programs
 always := $(hostprogs-y)
@@ -56,6 +58,9 @@
 always += offwaketime_kern.o
 always += spintest_kern.o
 always += map_perf_test_kern.o
+always += test_overhead_tp_kern.o
+always += test_overhead_kprobe_kern.o
+always += parse_varlen.o parse_simple.o parse_ldabs.o
 
 HOSTCFLAGS += -I$(objtree)/usr/include
 
@@ -75,17 +80,46 @@
 HOSTLOADLIBES_offwaketime += -lelf
 HOSTLOADLIBES_spintest += -lelf
 HOSTLOADLIBES_map_perf_test += -lelf -lrt
+HOSTLOADLIBES_test_overhead += -lelf -lrt
 
-# point this to your LLVM backend with bpf support
-LLC=$(srctree)/tools/bpf/llvm/bld/Debug+Asserts/bin/llc
+# Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
+#  make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
+LLC ?= llc
+CLANG ?= clang
 
-# asm/sysreg.h inline assmbly used by it is incompatible with llvm.
-# But, ehere is not easy way to fix it, so just exclude it since it is
+# Trick to allow make to be run from this directory
+all:
+	$(MAKE) -C ../../ $$PWD/
+
+clean:
+	$(MAKE) -C ../../ M=$$PWD clean
+	@rm -f *~
+
+# Verify LLVM compiler tools are available and bpf target is supported by llc
+.PHONY: verify_cmds verify_target_bpf $(CLANG) $(LLC)
+
+verify_cmds: $(CLANG) $(LLC)
+	@for TOOL in $^ ; do \
+		if ! (which -- "$${TOOL}" > /dev/null 2>&1); then \
+			echo "*** ERROR: Cannot find LLVM tool $${TOOL}" ;\
+			exit 1; \
+		else true; fi; \
+	done
+
+verify_target_bpf: verify_cmds
+	@if ! (${LLC} -march=bpf -mattr=help > /dev/null 2>&1); then \
+		echo "*** ERROR: LLVM (${LLC}) does not support 'bpf' target" ;\
+		echo "   NOTICE: LLVM version >= 3.7.1 required" ;\
+		exit 2; \
+	else true; fi
+
+$(src)/*.c: verify_target_bpf
+
+# asm/sysreg.h - inline assembly used by it is incompatible with llvm.
+# But, there is no easy way to fix it, so just exclude it since it is
 # useless for BPF samples.
 $(obj)/%.o: $(src)/%.c
-	clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
+	$(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
 		-D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
+		-Wno-compare-distinct-pointer-types \
 		-O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@
-	clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
-		-D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
-		-O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=asm -o $@.s
diff --git a/samples/bpf/README.rst b/samples/bpf/README.rst
new file mode 100644
index 0000000..a43eae3
--- /dev/null
+++ b/samples/bpf/README.rst
@@ -0,0 +1,66 @@
+eBPF sample programs
+====================
+
+This directory contains a mini eBPF library, test stubs, verifier
+test-suite and examples for using eBPF.
+
+Build dependencies
+==================
+
+Compiling requires having installed:
+ * clang >= version 3.4.0
+ * llvm >= version 3.7.1
+
+Note that LLVM's tool 'llc' must support target 'bpf', list version
+and supported targets with command: ``llc --version``
+
+Kernel headers
+--------------
+
+There are usually dependencies to header files of the current kernel.
+To avoid installing devel kernel headers system wide, as a normal
+user, simply call::
+
+ make headers_install
+
+This will creates a local "usr/include" directory in the git/build top
+level directory, that the make system automatically pickup first.
+
+Compiling
+=========
+
+For building the BPF samples, issue the below command from the kernel
+top level directory::
+
+ make samples/bpf/
+
+Do notice the "/" slash after the directory name.
+
+It is also possible to call make from this directory.  This will just
+hide the the invocation of make as above with the appended "/".
+
+Manually compiling LLVM with 'bpf' support
+------------------------------------------
+
+Since version 3.7.0, LLVM adds a proper LLVM backend target for the
+BPF bytecode architecture.
+
+By default llvm will build all non-experimental backends including bpf.
+To generate a smaller llc binary one can use::
+
+ -DLLVM_TARGETS_TO_BUILD="BPF"
+
+Quick sniplet for manually compiling LLVM and clang
+(build dependencies are cmake and gcc-c++)::
+
+ $ git clone http://llvm.org/git/llvm.git
+ $ cd llvm/tools
+ $ git clone --depth 1 http://llvm.org/git/clang.git
+ $ cd ..; mkdir build; cd build
+ $ cmake .. -DLLVM_TARGETS_TO_BUILD="BPF;X86"
+ $ make -j $(getconf _NPROCESSORS_ONLN)
+
+It is also possible to point make to the newly compiled 'llc' or
+'clang' command via redefining LLC or CLANG on the make command line::
+
+ make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
index 9363500..7904a2a 100644
--- a/samples/bpf/bpf_helpers.h
+++ b/samples/bpf/bpf_helpers.h
@@ -82,6 +82,7 @@
 #define PT_REGS_FP(x) ((x)->bp)
 #define PT_REGS_RC(x) ((x)->ax)
 #define PT_REGS_SP(x) ((x)->sp)
+#define PT_REGS_IP(x) ((x)->ip)
 
 #elif defined(__s390x__)
 
@@ -94,6 +95,7 @@
 #define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */
 #define PT_REGS_RC(x) ((x)->gprs[2])
 #define PT_REGS_SP(x) ((x)->gprs[15])
+#define PT_REGS_IP(x) ((x)->ip)
 
 #elif defined(__aarch64__)
 
@@ -106,6 +108,30 @@
 #define PT_REGS_FP(x) ((x)->regs[29]) /* Works only with CONFIG_FRAME_POINTER */
 #define PT_REGS_RC(x) ((x)->regs[0])
 #define PT_REGS_SP(x) ((x)->sp)
+#define PT_REGS_IP(x) ((x)->pc)
+
+#elif defined(__powerpc__)
+
+#define PT_REGS_PARM1(x) ((x)->gpr[3])
+#define PT_REGS_PARM2(x) ((x)->gpr[4])
+#define PT_REGS_PARM3(x) ((x)->gpr[5])
+#define PT_REGS_PARM4(x) ((x)->gpr[6])
+#define PT_REGS_PARM5(x) ((x)->gpr[7])
+#define PT_REGS_RC(x) ((x)->gpr[3])
+#define PT_REGS_SP(x) ((x)->sp)
+#define PT_REGS_IP(x) ((x)->nip)
 
 #endif
+
+#ifdef __powerpc__
+#define BPF_KPROBE_READ_RET_IP(ip, ctx)		({ (ip) = (ctx)->link; })
+#define BPF_KRETPROBE_READ_RET_IP		BPF_KPROBE_READ_RET_IP
+#else
+#define BPF_KPROBE_READ_RET_IP(ip, ctx)		({				\
+		bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
+#define BPF_KRETPROBE_READ_RET_IP(ip, ctx)	({				\
+		bpf_probe_read(&(ip), sizeof(ip),				\
+				(void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
+#endif
+
 #endif
diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
index 58f86bd..022af71 100644
--- a/samples/bpf/bpf_load.c
+++ b/samples/bpf/bpf_load.c
@@ -49,6 +49,7 @@
 	bool is_socket = strncmp(event, "socket", 6) == 0;
 	bool is_kprobe = strncmp(event, "kprobe/", 7) == 0;
 	bool is_kretprobe = strncmp(event, "kretprobe/", 10) == 0;
+	bool is_tracepoint = strncmp(event, "tracepoint/", 11) == 0;
 	enum bpf_prog_type prog_type;
 	char buf[256];
 	int fd, efd, err, id;
@@ -63,6 +64,8 @@
 		prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
 	} else if (is_kprobe || is_kretprobe) {
 		prog_type = BPF_PROG_TYPE_KPROBE;
+	} else if (is_tracepoint) {
+		prog_type = BPF_PROG_TYPE_TRACEPOINT;
 	} else {
 		printf("Unknown event '%s'\n", event);
 		return -1;
@@ -111,12 +114,23 @@
 			       event, strerror(errno));
 			return -1;
 		}
-	}
 
-	strcpy(buf, DEBUGFS);
-	strcat(buf, "events/kprobes/");
-	strcat(buf, event);
-	strcat(buf, "/id");
+		strcpy(buf, DEBUGFS);
+		strcat(buf, "events/kprobes/");
+		strcat(buf, event);
+		strcat(buf, "/id");
+	} else if (is_tracepoint) {
+		event += 11;
+
+		if (*event == 0) {
+			printf("event name cannot be empty\n");
+			return -1;
+		}
+		strcpy(buf, DEBUGFS);
+		strcat(buf, "events/");
+		strcat(buf, event);
+		strcat(buf, "/id");
+	}
 
 	efd = open(buf, O_RDONLY, 0);
 	if (efd < 0) {
@@ -304,6 +318,7 @@
 
 			if (memcmp(shname_prog, "kprobe/", 7) == 0 ||
 			    memcmp(shname_prog, "kretprobe/", 10) == 0 ||
+			    memcmp(shname_prog, "tracepoint/", 11) == 0 ||
 			    memcmp(shname_prog, "socket", 6) == 0)
 				load_and_attach(shname_prog, insns, data_prog->d_size);
 		}
@@ -320,6 +335,7 @@
 
 		if (memcmp(shname, "kprobe/", 7) == 0 ||
 		    memcmp(shname, "kretprobe/", 10) == 0 ||
+		    memcmp(shname, "tracepoint/", 11) == 0 ||
 		    memcmp(shname, "socket", 6) == 0)
 			load_and_attach(shname, data->d_buf, data->d_size);
 	}
diff --git a/samples/bpf/map_perf_test_user.c b/samples/bpf/map_perf_test_user.c
index 95af56e..3147377 100644
--- a/samples/bpf/map_perf_test_user.c
+++ b/samples/bpf/map_perf_test_user.c
@@ -17,6 +17,7 @@
 #include <linux/bpf.h>
 #include <string.h>
 #include <time.h>
+#include <sys/resource.h>
 #include "libbpf.h"
 #include "bpf_load.h"
 
diff --git a/samples/bpf/offwaketime_kern.c b/samples/bpf/offwaketime_kern.c
index c0aa5a9..e7d9a0a 100644
--- a/samples/bpf/offwaketime_kern.c
+++ b/samples/bpf/offwaketime_kern.c
@@ -11,7 +11,7 @@
 #include <linux/version.h>
 #include <linux/sched.h>
 
-#define _(P) ({typeof(P) val = 0; bpf_probe_read(&val, sizeof(val), &P); val;})
+#define _(P) ({typeof(P) val; bpf_probe_read(&val, sizeof(val), &P); val;})
 
 #define MINBLOCK_US	1
 
@@ -61,7 +61,7 @@
 int waker(struct pt_regs *ctx)
 {
 	struct task_struct *p = (void *) PT_REGS_PARM1(ctx);
-	struct wokeby_t woke = {};
+	struct wokeby_t woke;
 	u32 pid;
 
 	pid = _(p->pid);
@@ -73,19 +73,21 @@
 	return 0;
 }
 
-static inline int update_counts(struct pt_regs *ctx, u32 pid, u64 delta)
+static inline int update_counts(void *ctx, u32 pid, u64 delta)
 {
-	struct key_t key = {};
 	struct wokeby_t *woke;
 	u64 zero = 0, *val;
+	struct key_t key;
 
+	__builtin_memset(&key.waker, 0, sizeof(key.waker));
 	bpf_get_current_comm(&key.target, sizeof(key.target));
 	key.tret = bpf_get_stackid(ctx, &stackmap, STACKID_FLAGS);
+	key.wret = 0;
 
 	woke = bpf_map_lookup_elem(&wokeby, &pid);
 	if (woke) {
 		key.wret = woke->ret;
-		__builtin_memcpy(&key.waker, woke->name, TASK_COMM_LEN);
+		__builtin_memcpy(&key.waker, woke->name, sizeof(key.waker));
 		bpf_map_delete_elem(&wokeby, &pid);
 	}
 
@@ -100,15 +102,33 @@
 	return 0;
 }
 
+#if 1
+/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */
+struct sched_switch_args {
+	unsigned long long pad;
+	char prev_comm[16];
+	int prev_pid;
+	int prev_prio;
+	long long prev_state;
+	char next_comm[16];
+	int next_pid;
+	int next_prio;
+};
+SEC("tracepoint/sched/sched_switch")
+int oncpu(struct sched_switch_args *ctx)
+{
+	/* record previous thread sleep time */
+	u32 pid = ctx->prev_pid;
+#else
 SEC("kprobe/finish_task_switch")
 int oncpu(struct pt_regs *ctx)
 {
 	struct task_struct *p = (void *) PT_REGS_PARM1(ctx);
-	u64 delta, ts, *tsp;
-	u32 pid;
-
 	/* record previous thread sleep time */
-	pid = _(p->pid);
+	u32 pid = _(p->pid);
+#endif
+	u64 delta, ts, *tsp;
+
 	ts = bpf_ktime_get_ns();
 	bpf_map_update_elem(&start, &pid, &ts, BPF_ANY);
 
diff --git a/samples/bpf/parse_ldabs.c b/samples/bpf/parse_ldabs.c
new file mode 100644
index 0000000..d175501
--- /dev/null
+++ b/samples/bpf/parse_ldabs.c
@@ -0,0 +1,41 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+
+#define DEFAULT_PKTGEN_UDP_PORT	9
+#define IP_MF			0x2000
+#define IP_OFFSET		0x1FFF
+
+static inline int ip_is_fragment(struct __sk_buff *ctx, __u64 nhoff)
+{
+	return load_half(ctx, nhoff + offsetof(struct iphdr, frag_off))
+		& (IP_MF | IP_OFFSET);
+}
+
+SEC("ldabs")
+int handle_ingress(struct __sk_buff *skb)
+{
+	__u64 troff = ETH_HLEN + sizeof(struct iphdr);
+
+	if (load_half(skb, offsetof(struct ethhdr, h_proto)) != ETH_P_IP)
+		return 0;
+	if (load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol)) != IPPROTO_UDP ||
+	    load_byte(skb, ETH_HLEN) != 0x45)
+		return 0;
+	if (ip_is_fragment(skb, ETH_HLEN))
+		return 0;
+	if (load_half(skb, troff + offsetof(struct udphdr, dest)) == DEFAULT_PKTGEN_UDP_PORT)
+		return TC_ACT_SHOT;
+	return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/parse_simple.c b/samples/bpf/parse_simple.c
new file mode 100644
index 0000000..cf2511c
--- /dev/null
+++ b/samples/bpf/parse_simple.c
@@ -0,0 +1,48 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <uapi/linux/bpf.h>
+#include <net/ip.h>
+#include "bpf_helpers.h"
+
+#define DEFAULT_PKTGEN_UDP_PORT 9
+
+/* copy of 'struct ethhdr' without __packed */
+struct eth_hdr {
+	unsigned char   h_dest[ETH_ALEN];
+	unsigned char   h_source[ETH_ALEN];
+	unsigned short  h_proto;
+};
+
+SEC("simple")
+int handle_ingress(struct __sk_buff *skb)
+{
+	void *data = (void *)(long)skb->data;
+	struct eth_hdr *eth = data;
+	struct iphdr *iph = data + sizeof(*eth);
+	struct udphdr *udp = data + sizeof(*eth) + sizeof(*iph);
+	void *data_end = (void *)(long)skb->data_end;
+
+	/* single length check */
+	if (data + sizeof(*eth) + sizeof(*iph) + sizeof(*udp) > data_end)
+		return 0;
+
+	if (eth->h_proto != htons(ETH_P_IP))
+		return 0;
+	if (iph->protocol != IPPROTO_UDP || iph->ihl != 5)
+		return 0;
+	if (ip_is_fragment(iph))
+		return 0;
+	if (udp->dest == htons(DEFAULT_PKTGEN_UDP_PORT))
+		return TC_ACT_SHOT;
+	return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/parse_varlen.c b/samples/bpf/parse_varlen.c
new file mode 100644
index 0000000..edab34d
--- /dev/null
+++ b/samples/bpf/parse_varlen.c
@@ -0,0 +1,153 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <uapi/linux/bpf.h>
+#include <net/ip.h>
+#include "bpf_helpers.h"
+
+#define DEFAULT_PKTGEN_UDP_PORT 9
+#define DEBUG 0
+
+static int tcp(void *data, uint64_t tp_off, void *data_end)
+{
+	struct tcphdr *tcp = data + tp_off;
+
+	if (tcp + 1 > data_end)
+		return 0;
+	if (tcp->dest == htons(80) || tcp->source == htons(80))
+		return TC_ACT_SHOT;
+	return 0;
+}
+
+static int udp(void *data, uint64_t tp_off, void *data_end)
+{
+	struct udphdr *udp = data + tp_off;
+
+	if (udp + 1 > data_end)
+		return 0;
+	if (udp->dest == htons(DEFAULT_PKTGEN_UDP_PORT) ||
+	    udp->source == htons(DEFAULT_PKTGEN_UDP_PORT)) {
+		if (DEBUG) {
+			char fmt[] = "udp port 9 indeed\n";
+
+			bpf_trace_printk(fmt, sizeof(fmt));
+		}
+		return TC_ACT_SHOT;
+	}
+	return 0;
+}
+
+static int parse_ipv4(void *data, uint64_t nh_off, void *data_end)
+{
+	struct iphdr *iph;
+	uint64_t ihl_len;
+
+	iph = data + nh_off;
+	if (iph + 1 > data_end)
+		return 0;
+
+	if (ip_is_fragment(iph))
+		return 0;
+	ihl_len = iph->ihl * 4;
+
+	if (iph->protocol == IPPROTO_IPIP) {
+		iph = data + nh_off + ihl_len;
+		if (iph + 1 > data_end)
+			return 0;
+		ihl_len += iph->ihl * 4;
+	}
+
+	if (iph->protocol == IPPROTO_TCP)
+		return tcp(data, nh_off + ihl_len, data_end);
+	else if (iph->protocol == IPPROTO_UDP)
+		return udp(data, nh_off + ihl_len, data_end);
+	return 0;
+}
+
+static int parse_ipv6(void *data, uint64_t nh_off, void *data_end)
+{
+	struct ipv6hdr *ip6h;
+	struct iphdr *iph;
+	uint64_t ihl_len = sizeof(struct ipv6hdr);
+	uint64_t nexthdr;
+
+	ip6h = data + nh_off;
+	if (ip6h + 1 > data_end)
+		return 0;
+
+	nexthdr = ip6h->nexthdr;
+
+	if (nexthdr == IPPROTO_IPIP) {
+		iph = data + nh_off + ihl_len;
+		if (iph + 1 > data_end)
+			return 0;
+		ihl_len += iph->ihl * 4;
+		nexthdr = iph->protocol;
+	} else if (nexthdr == IPPROTO_IPV6) {
+		ip6h = data + nh_off + ihl_len;
+		if (ip6h + 1 > data_end)
+			return 0;
+		ihl_len += sizeof(struct ipv6hdr);
+		nexthdr = ip6h->nexthdr;
+	}
+
+	if (nexthdr == IPPROTO_TCP)
+		return tcp(data, nh_off + ihl_len, data_end);
+	else if (nexthdr == IPPROTO_UDP)
+		return udp(data, nh_off + ihl_len, data_end);
+	return 0;
+}
+
+struct vlan_hdr {
+	uint16_t h_vlan_TCI;
+	uint16_t h_vlan_encapsulated_proto;
+};
+
+SEC("varlen")
+int handle_ingress(struct __sk_buff *skb)
+{
+	void *data = (void *)(long)skb->data;
+	struct ethhdr *eth = data;
+	void *data_end = (void *)(long)skb->data_end;
+	uint64_t h_proto, nh_off;
+
+	nh_off = sizeof(*eth);
+	if (data + nh_off > data_end)
+		return 0;
+
+	h_proto = eth->h_proto;
+
+	if (h_proto == ETH_P_8021Q || h_proto == ETH_P_8021AD) {
+		struct vlan_hdr *vhdr;
+
+		vhdr = data + nh_off;
+		nh_off += sizeof(struct vlan_hdr);
+		if (data + nh_off > data_end)
+			return 0;
+		h_proto = vhdr->h_vlan_encapsulated_proto;
+	}
+	if (h_proto == ETH_P_8021Q || h_proto == ETH_P_8021AD) {
+		struct vlan_hdr *vhdr;
+
+		vhdr = data + nh_off;
+		nh_off += sizeof(struct vlan_hdr);
+		if (data + nh_off > data_end)
+			return 0;
+		h_proto = vhdr->h_vlan_encapsulated_proto;
+	}
+	if (h_proto == htons(ETH_P_IP))
+		return parse_ipv4(data, nh_off, data_end);
+	else if (h_proto == htons(ETH_P_IPV6))
+		return parse_ipv6(data, nh_off, data_end);
+	return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/spintest_kern.c b/samples/bpf/spintest_kern.c
index 4b27619..ce0167d 100644
--- a/samples/bpf/spintest_kern.c
+++ b/samples/bpf/spintest_kern.c
@@ -34,7 +34,7 @@
 #define PROG(foo) \
 int foo(struct pt_regs *ctx) \
 { \
-	long v = ctx->ip, *val; \
+	long v = PT_REGS_IP(ctx), *val; \
 \
 	val = bpf_map_lookup_elem(&my_map, &v); \
 	bpf_map_update_elem(&my_map, &v, &v, BPF_ANY); \
diff --git a/samples/bpf/test_cls_bpf.sh b/samples/bpf/test_cls_bpf.sh
new file mode 100755
index 0000000..0365d5e
--- /dev/null
+++ b/samples/bpf/test_cls_bpf.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+function pktgen {
+    ../pktgen/pktgen_bench_xmit_mode_netif_receive.sh -i $IFC -s 64 \
+        -m 90:e2:ba:ff:ff:ff -d 192.168.0.1 -t 4
+    local dropped=`tc -s qdisc show dev $IFC | tail -3 | awk '/drop/{print $7}'`
+    if [ "$dropped" == "0," ]; then
+        echo "FAIL"
+    else
+        echo "Successfully filtered " $dropped " packets"
+    fi
+}
+
+function test {
+    echo -n "Loading bpf program '$2'... "
+    tc qdisc add dev $IFC clsact
+    tc filter add dev $IFC ingress bpf da obj $1 sec $2
+    local status=$?
+    if [ $status -ne 0 ]; then
+        echo "FAIL"
+    else
+        echo "ok"
+	pktgen
+    fi
+    tc qdisc del dev $IFC clsact
+}
+
+IFC=test_veth
+
+ip link add name $IFC type veth peer name pair_$IFC
+ip link set $IFC up
+ip link set pair_$IFC up
+
+test ./parse_simple.o simple
+test ./parse_varlen.o varlen
+test ./parse_ldabs.o ldabs
+ip link del dev $IFC
diff --git a/samples/bpf/test_overhead_kprobe_kern.c b/samples/bpf/test_overhead_kprobe_kern.c
new file mode 100644
index 0000000..468a66a
--- /dev/null
+++ b/samples/bpf/test_overhead_kprobe_kern.c
@@ -0,0 +1,41 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/version.h>
+#include <linux/ptrace.h>
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+
+#define _(P) ({typeof(P) val = 0; bpf_probe_read(&val, sizeof(val), &P); val;})
+
+SEC("kprobe/__set_task_comm")
+int prog(struct pt_regs *ctx)
+{
+	struct signal_struct *signal;
+	struct task_struct *tsk;
+	char oldcomm[16] = {};
+	char newcomm[16] = {};
+	u16 oom_score_adj;
+	u32 pid;
+
+	tsk = (void *)PT_REGS_PARM1(ctx);
+
+	pid = _(tsk->pid);
+	bpf_probe_read(oldcomm, sizeof(oldcomm), &tsk->comm);
+	bpf_probe_read(newcomm, sizeof(newcomm), (void *)PT_REGS_PARM2(ctx));
+	signal = _(tsk->signal);
+	oom_score_adj = _(signal->oom_score_adj);
+	return 0;
+}
+
+SEC("kprobe/urandom_read")
+int prog2(struct pt_regs *ctx)
+{
+	return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/test_overhead_tp_kern.c b/samples/bpf/test_overhead_tp_kern.c
new file mode 100644
index 0000000..38f5c0b
--- /dev/null
+++ b/samples/bpf/test_overhead_tp_kern.c
@@ -0,0 +1,36 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+
+/* from /sys/kernel/debug/tracing/events/task/task_rename/format */
+struct task_rename {
+	__u64 pad;
+	__u32 pid;
+	char oldcomm[16];
+	char newcomm[16];
+	__u16 oom_score_adj;
+};
+SEC("tracepoint/task/task_rename")
+int prog(struct task_rename *ctx)
+{
+	return 0;
+}
+
+/* from /sys/kernel/debug/tracing/events/random/urandom_read/format */
+struct urandom_read {
+	__u64 pad;
+	int got_bits;
+	int pool_left;
+	int input_left;
+};
+SEC("tracepoint/random/urandom_read")
+int prog2(struct urandom_read *ctx)
+{
+	return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/test_overhead_user.c b/samples/bpf/test_overhead_user.c
new file mode 100644
index 0000000..d291167f
--- /dev/null
+++ b/samples/bpf/test_overhead_user.c
@@ -0,0 +1,162 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#define _GNU_SOURCE
+#include <sched.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <asm/unistd.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <assert.h>
+#include <sys/wait.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <linux/bpf.h>
+#include <string.h>
+#include <time.h>
+#include <sys/resource.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+#define MAX_CNT 1000000
+
+static __u64 time_get_ns(void)
+{
+	struct timespec ts;
+
+	clock_gettime(CLOCK_MONOTONIC, &ts);
+	return ts.tv_sec * 1000000000ull + ts.tv_nsec;
+}
+
+static void test_task_rename(int cpu)
+{
+	__u64 start_time;
+	char buf[] = "test\n";
+	int i, fd;
+
+	fd = open("/proc/self/comm", O_WRONLY|O_TRUNC);
+	if (fd < 0) {
+		printf("couldn't open /proc\n");
+		exit(1);
+	}
+	start_time = time_get_ns();
+	for (i = 0; i < MAX_CNT; i++)
+		write(fd, buf, sizeof(buf));
+	printf("task_rename:%d: %lld events per sec\n",
+	       cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
+	close(fd);
+}
+
+static void test_urandom_read(int cpu)
+{
+	__u64 start_time;
+	char buf[4];
+	int i, fd;
+
+	fd = open("/dev/urandom", O_RDONLY);
+	if (fd < 0) {
+		printf("couldn't open /dev/urandom\n");
+		exit(1);
+	}
+	start_time = time_get_ns();
+	for (i = 0; i < MAX_CNT; i++)
+		read(fd, buf, sizeof(buf));
+	printf("urandom_read:%d: %lld events per sec\n",
+	       cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
+	close(fd);
+}
+
+static void loop(int cpu, int flags)
+{
+	cpu_set_t cpuset;
+
+	CPU_ZERO(&cpuset);
+	CPU_SET(cpu, &cpuset);
+	sched_setaffinity(0, sizeof(cpuset), &cpuset);
+
+	if (flags & 1)
+		test_task_rename(cpu);
+	if (flags & 2)
+		test_urandom_read(cpu);
+}
+
+static void run_perf_test(int tasks, int flags)
+{
+	pid_t pid[tasks];
+	int i;
+
+	for (i = 0; i < tasks; i++) {
+		pid[i] = fork();
+		if (pid[i] == 0) {
+			loop(i, flags);
+			exit(0);
+		} else if (pid[i] == -1) {
+			printf("couldn't spawn #%d process\n", i);
+			exit(1);
+		}
+	}
+	for (i = 0; i < tasks; i++) {
+		int status;
+
+		assert(waitpid(pid[i], &status, 0) == pid[i]);
+		assert(status == 0);
+	}
+}
+
+static void unload_progs(void)
+{
+	close(prog_fd[0]);
+	close(prog_fd[1]);
+	close(event_fd[0]);
+	close(event_fd[1]);
+}
+
+int main(int argc, char **argv)
+{
+	struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
+	char filename[256];
+	int num_cpu = 8;
+	int test_flags = ~0;
+
+	setrlimit(RLIMIT_MEMLOCK, &r);
+
+	if (argc > 1)
+		test_flags = atoi(argv[1]) ? : test_flags;
+	if (argc > 2)
+		num_cpu = atoi(argv[2]) ? : num_cpu;
+
+	if (test_flags & 0x3) {
+		printf("BASE\n");
+		run_perf_test(num_cpu, test_flags);
+	}
+
+	if (test_flags & 0xC) {
+		snprintf(filename, sizeof(filename),
+			 "%s_kprobe_kern.o", argv[0]);
+		if (load_bpf_file(filename)) {
+			printf("%s", bpf_log_buf);
+			return 1;
+		}
+		printf("w/KPROBE\n");
+		run_perf_test(num_cpu, test_flags >> 2);
+		unload_progs();
+	}
+
+	if (test_flags & 0x30) {
+		snprintf(filename, sizeof(filename),
+			 "%s_tp_kern.o", argv[0]);
+		if (load_bpf_file(filename)) {
+			printf("%s", bpf_log_buf);
+			return 1;
+		}
+		printf("w/TRACEPOINT\n");
+		run_perf_test(num_cpu, test_flags >> 4);
+		unload_progs();
+	}
+
+	return 0;
+}
diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c
index 4b51a90..fe2fcec 100644
--- a/samples/bpf/test_verifier.c
+++ b/samples/bpf/test_verifier.c
@@ -309,6 +309,19 @@
 		.result_unpriv = REJECT,
 	},
 	{
+		"check valid spill/fill, skb mark",
+		.insns = {
+			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+				    offsetof(struct __sk_buff, mark)),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.result_unpriv = ACCEPT,
+	},
+	{
 		"check corrupted spill/fill",
 		.insns = {
 			/* spill R1(ctx) into stack */
@@ -1180,6 +1193,341 @@
 		.result_unpriv = REJECT,
 		.result = ACCEPT,
 	},
+	{
+		"raw_stack: no skb_load_bytes",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_2, 4),
+			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+			BPF_MOV64_IMM(BPF_REG_4, 8),
+			/* Call to skb_load_bytes() omitted. */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "invalid read from stack off -8+0 size 8",
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+	},
+	{
+		"raw_stack: skb_load_bytes, no init",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_2, 4),
+			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+			BPF_MOV64_IMM(BPF_REG_4, 8),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+	},
+	{
+		"raw_stack: skb_load_bytes, init",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_2, 4),
+			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+			BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+			BPF_MOV64_IMM(BPF_REG_4, 8),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+	},
+	{
+		"raw_stack: skb_load_bytes, spilled regs around bounds",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_2, 4),
+			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), /* spill ctx from R1 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8), /* spill ctx from R1 */
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+			BPF_MOV64_IMM(BPF_REG_4, 8),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), /* fill ctx into R0 */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8), /* fill ctx into R2 */
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+				    offsetof(struct __sk_buff, mark)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
+				    offsetof(struct __sk_buff, priority)),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+	},
+	{
+		"raw_stack: skb_load_bytes, spilled regs corruption",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_2, 4),
+			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), /* spill ctx from R1 */
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+			BPF_MOV64_IMM(BPF_REG_4, 8),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), /* fill ctx into R0 */
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+				    offsetof(struct __sk_buff, mark)),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "R0 invalid mem access 'inv'",
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+	},
+	{
+		"raw_stack: skb_load_bytes, spilled regs corruption 2",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_2, 4),
+			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), /* spill ctx from R1 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0), /* spill ctx from R1 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8), /* spill ctx from R1 */
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+			BPF_MOV64_IMM(BPF_REG_4, 8),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), /* fill ctx into R0 */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8), /* fill ctx into R2 */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0), /* fill ctx into R3 */
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+				    offsetof(struct __sk_buff, mark)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
+				    offsetof(struct __sk_buff, priority)),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
+				    offsetof(struct __sk_buff, pkt_type)),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "R3 invalid mem access 'inv'",
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+	},
+	{
+		"raw_stack: skb_load_bytes, spilled regs + data",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_2, 4),
+			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), /* spill ctx from R1 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0), /* spill ctx from R1 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8), /* spill ctx from R1 */
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+			BPF_MOV64_IMM(BPF_REG_4, 8),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), /* fill ctx into R0 */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8), /* fill ctx into R2 */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0), /* fill data into R3 */
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+				    offsetof(struct __sk_buff, mark)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
+				    offsetof(struct __sk_buff, priority)),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+	},
+	{
+		"raw_stack: skb_load_bytes, invalid access 1",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_2, 4),
+			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+			BPF_MOV64_IMM(BPF_REG_4, 8),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "invalid stack type R3 off=-513 access_size=8",
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+	},
+	{
+		"raw_stack: skb_load_bytes, invalid access 2",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_2, 4),
+			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+			BPF_MOV64_IMM(BPF_REG_4, 8),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "invalid stack type R3 off=-1 access_size=8",
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+	},
+	{
+		"raw_stack: skb_load_bytes, invalid access 3",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_2, 4),
+			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "invalid stack type R3 off=-1 access_size=-1",
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+	},
+	{
+		"raw_stack: skb_load_bytes, invalid access 4",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_2, 4),
+			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+			BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "invalid stack type R3 off=-1 access_size=2147483647",
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+	},
+	{
+		"raw_stack: skb_load_bytes, invalid access 5",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_2, 4),
+			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+			BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "invalid stack type R3 off=-512 access_size=2147483647",
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+	},
+	{
+		"raw_stack: skb_load_bytes, invalid access 6",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_2, 4),
+			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+			BPF_MOV64_IMM(BPF_REG_4, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "invalid stack type R3 off=-512 access_size=0",
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+	},
+	{
+		"raw_stack: skb_load_bytes, large access",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_2, 4),
+			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+			BPF_MOV64_IMM(BPF_REG_4, 512),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+	},
+	{
+		"pkt: test1",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+	},
+	{
+		"pkt: test2",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
+			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
+			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 48),
+			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 48),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+	},
+	{
+		"pkt: test3",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.errstr = "invalid bpf_context access off=76",
+		.result = REJECT,
+		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+	},
+	{
+		"pkt: test4",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.errstr = "cannot write",
+		.result = REJECT,
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+	},
 };
 
 static int probe_filter_length(struct bpf_insn *fp)
diff --git a/samples/bpf/trace_output_kern.c b/samples/bpf/trace_output_kern.c
index 8d8d1ec..9b96f4f 100644
--- a/samples/bpf/trace_output_kern.c
+++ b/samples/bpf/trace_output_kern.c
@@ -18,7 +18,6 @@
 		u64 cookie;
 	} data;
 
-	memset(&data, 0, sizeof(data));
 	data.pid = bpf_get_current_pid_tgid();
 	data.cookie = 0x12345678;
 
diff --git a/samples/bpf/tracex1_kern.c b/samples/bpf/tracex1_kern.c
index 3f450a8..107da14 100644
--- a/samples/bpf/tracex1_kern.c
+++ b/samples/bpf/tracex1_kern.c
@@ -23,16 +23,14 @@
 	/* attaches to kprobe netif_receive_skb,
 	 * looks for packets on loobpack device and prints them
 	 */
-	char devname[IFNAMSIZ] = {};
+	char devname[IFNAMSIZ];
 	struct net_device *dev;
 	struct sk_buff *skb;
 	int len;
 
 	/* non-portable! works for the given kernel only */
 	skb = (struct sk_buff *) PT_REGS_PARM1(ctx);
-
 	dev = _(skb->dev);
-
 	len = _(skb->len);
 
 	bpf_probe_read(devname, sizeof(devname), dev->name);
diff --git a/samples/bpf/tracex2_kern.c b/samples/bpf/tracex2_kern.c
index 09c1adc..5e11c20 100644
--- a/samples/bpf/tracex2_kern.c
+++ b/samples/bpf/tracex2_kern.c
@@ -27,10 +27,10 @@
 	long init_val = 1;
 	long *value;
 
-	/* x64/s390x specific: read ip of kfree_skb caller.
+	/* read ip of kfree_skb caller.
 	 * non-portable version of __builtin_return_address(0)
 	 */
-	bpf_probe_read(&loc, sizeof(loc), (void *)PT_REGS_RET(ctx));
+	BPF_KPROBE_READ_RET_IP(loc, ctx);
 
 	value = bpf_map_lookup_elem(&my_map, &loc);
 	if (value)
@@ -66,7 +66,7 @@
 	char comm[16];
 	u64 pid_tgid;
 	u64 uid_gid;
-	u32 index;
+	u64 index;
 };
 
 struct bpf_map_def SEC("maps") my_hist_map = {
@@ -82,7 +82,7 @@
 	long write_size = PT_REGS_PARM3(ctx);
 	long init_val = 1;
 	long *value;
-	struct hist_key key = {};
+	struct hist_key key;
 
 	key.index = log2l(write_size);
 	key.pid_tgid = bpf_get_current_pid_tgid();
diff --git a/samples/bpf/tracex4_kern.c b/samples/bpf/tracex4_kern.c
index ac46714..6dd8e38 100644
--- a/samples/bpf/tracex4_kern.c
+++ b/samples/bpf/tracex4_kern.c
@@ -40,7 +40,7 @@
 	long ip = 0;
 
 	/* get ip address of kmem_cache_alloc_node() caller */
-	bpf_probe_read(&ip, sizeof(ip), (void *)(PT_REGS_FP(ctx) + sizeof(ip)));
+	BPF_KRETPROBE_READ_RET_IP(ip, ctx);
 
 	struct pair v = {
 		.val = bpf_ktime_get_ns(),
diff --git a/samples/bpf/tracex5_kern.c b/samples/bpf/tracex5_kern.c
index b3f4295..f95f232 100644
--- a/samples/bpf/tracex5_kern.c
+++ b/samples/bpf/tracex5_kern.c
@@ -22,7 +22,7 @@
 SEC("kprobe/seccomp_phase1")
 int bpf_prog1(struct pt_regs *ctx)
 {
-	struct seccomp_data sd = {};
+	struct seccomp_data sd;
 
 	bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
 
@@ -40,7 +40,7 @@
 /* we jump here when syscall number == __NR_write */
 PROG(__NR_write)(struct pt_regs *ctx)
 {
-	struct seccomp_data sd = {};
+	struct seccomp_data sd;
 
 	bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
 	if (sd.args[2] == 512) {
@@ -53,7 +53,7 @@
 
 PROG(__NR_read)(struct pt_regs *ctx)
 {
-	struct seccomp_data sd = {};
+	struct seccomp_data sd;
 
 	bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
 	if (sd.args[2] > 128 && sd.args[2] <= 1024) {
diff --git a/samples/livepatch/livepatch-sample.c b/samples/livepatch/livepatch-sample.c
index fb8c861..e34f871 100644
--- a/samples/livepatch/livepatch-sample.c
+++ b/samples/livepatch/livepatch-sample.c
@@ -89,3 +89,4 @@
 module_init(livepatch_init);
 module_exit(livepatch_exit);
 MODULE_LICENSE("GPL");
+MODULE_INFO(livepatch, "Y");
diff --git a/samples/rpmsg/rpmsg_client_sample.c b/samples/rpmsg/rpmsg_client_sample.c
index 59b1344..d0e249c9 100644
--- a/samples/rpmsg/rpmsg_client_sample.c
+++ b/samples/rpmsg/rpmsg_client_sample.c
@@ -77,24 +77,12 @@
 
 static struct rpmsg_driver rpmsg_sample_client = {
 	.drv.name	= KBUILD_MODNAME,
-	.drv.owner	= THIS_MODULE,
 	.id_table	= rpmsg_driver_sample_id_table,
 	.probe		= rpmsg_sample_probe,
 	.callback	= rpmsg_sample_cb,
 	.remove		= rpmsg_sample_remove,
 };
-
-static int __init rpmsg_client_sample_init(void)
-{
-	return register_rpmsg_driver(&rpmsg_sample_client);
-}
-module_init(rpmsg_client_sample_init);
-
-static void __exit rpmsg_client_sample_fini(void)
-{
-	unregister_rpmsg_driver(&rpmsg_sample_client);
-}
-module_exit(rpmsg_client_sample_fini);
+module_rpmsg_driver(rpmsg_sample_client);
 
 MODULE_DESCRIPTION("Remote processor messaging sample client driver");
 MODULE_LICENSE("GPL v2");
diff --git a/samples/v4l/v4l2-pci-skeleton.c b/samples/v4l/v4l2-pci-skeleton.c
index 79af0c0..a55cf94 100644
--- a/samples/v4l/v4l2-pci-skeleton.c
+++ b/samples/v4l/v4l2-pci-skeleton.c
@@ -308,9 +308,6 @@
 	strlcpy(cap->card, "V4L2 PCI Skeleton", sizeof(cap->card));
 	snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
 		 pci_name(skel->pdev));
-	cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
-			   V4L2_CAP_STREAMING;
-	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
 	return 0;
 }
 
@@ -872,6 +869,8 @@
 	vdev->release = video_device_release_empty;
 	vdev->fops = &skel_fops,
 	vdev->ioctl_ops = &skel_ioctl_ops,
+	vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+			    V4L2_CAP_STREAMING;
 	/*
 	 * The main serialization lock. All ioctls are serialized by this
 	 * lock. Exception: if q->lock is set, then the streaming ioctls
diff --git a/scripts/asn1_compiler.c b/scripts/asn1_compiler.c
index e000f44..c1b7ef3 100644
--- a/scripts/asn1_compiler.c
+++ b/scripts/asn1_compiler.c
@@ -650,7 +650,7 @@
 	}
 
 	hdr = fopen(headername, "w");
-	if (!out) {
+	if (!hdr) {
 		perror(headername);
 		exit(1);
 	}
diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
index f3d3fb4..b8c7b29 100755
--- a/scripts/kconfig/streamline_config.pl
+++ b/scripts/kconfig/streamline_config.pl
@@ -188,7 +188,7 @@
 	$cont = 0;
 
 	# collect any Kconfig sources
-	if (/^source\s*"(.*)"/) {
+	if (/^source\s+"?([^"]+)/) {
 	    my $kconfig = $1;
 	    # prevent reading twice.
 	    if (!defined($read_kconfigs{$kconfig})) {
@@ -237,7 +237,7 @@
 	    }
 
 	# configs without prompts must be selected
-	} elsif ($state ne "NONE" && /^\s*tristate\s\S/) {
+	} elsif ($state ne "NONE" && /^\s*(tristate\s+\S|prompt\b)/) {
 	    # note if the config has a prompt
 	    $prompts{$config} = 1;
 
@@ -256,8 +256,8 @@
 
 	    $iflevel-- if ($iflevel);
 
-	# stop on "help"
-	} elsif (/^\s*help\s*$/) {
+	# stop on "help" and keywords that end a menu entry
+	} elsif (/^\s*(---)?help(---)?\s*$/ || /^(comment|choice|menu)\b/) {
 	    $state = "NONE";
 	}
     }
@@ -454,7 +454,7 @@
 	    $p =~ s/^[^$valid]*[$valid]+//;
 
 	    # We only need to process if the depend config is a module
-	    if (!defined($orig_configs{$conf}) || !$orig_configs{conf} eq "m") {
+	    if (!defined($orig_configs{$conf}) || $orig_configs{$conf} eq "y") {
 		next;
 	    }
 
@@ -610,6 +610,40 @@
 	next;
     }
 
+    if (/CONFIG_MODULE_SIG_KEY="(.+)"/) {
+        my $orig_cert = $1;
+        my $default_cert = "certs/signing_key.pem";
+
+        # Check that the logic in this script still matches the one in Kconfig
+        if (!defined($depends{"MODULE_SIG_KEY"}) ||
+            $depends{"MODULE_SIG_KEY"} !~ /"\Q$default_cert\E"/) {
+            print STDERR "WARNING: MODULE_SIG_KEY assertion failure, ",
+                "update needed to ", __FILE__, " line ", __LINE__, "\n";
+            print;
+        } elsif ($orig_cert ne $default_cert && ! -f $orig_cert) {
+            print STDERR "Module signature verification enabled but ",
+                "module signing key \"$orig_cert\" not found. Resetting ",
+                "signing key to default value.\n";
+            print "CONFIG_MODULE_SIG_KEY=\"$default_cert\"\n";
+        } else {
+            print;
+        }
+        next;
+    }
+
+    if (/CONFIG_SYSTEM_TRUSTED_KEYS="(.+)"/) {
+        my $orig_keys = $1;
+
+        if (! -f $orig_keys) {
+            print STDERR "System keyring enabled but keys \"$orig_keys\" ",
+                "not found. Resetting keys to default value.\n";
+            print "CONFIG_SYSTEM_TRUSTED_KEYS=\"\"\n";
+        } else {
+            print;
+        }
+        next;
+    }
+
     if (/^(CONFIG.*)=(m|y)/) {
 	if (defined($configs{$1})) {
 	    if ($localyesconfig) {
diff --git a/scripts/ld-version.sh b/scripts/ld-version.sh
index 7bfe9fa..d135882 100755
--- a/scripts/ld-version.sh
+++ b/scripts/ld-version.sh
@@ -5,6 +5,6 @@
 	gsub(".*version ", "");
 	gsub("-.*", "");
 	split($1,a, ".");
-	print a[1]*100000000 + a[2]*1000000 + a[3]*10000 + a[4]*100 + a[5];
+	print a[1]*100000000 + a[2]*1000000 + a[3]*10000;
 	exit
 	}
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 161dd0d..a915507 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -371,6 +371,49 @@
 		do_usb_entry_multi(symval + i, mod);
 }
 
+static void do_of_entry_multi(void *symval, struct module *mod)
+{
+	char alias[500];
+	int len;
+	char *tmp;
+
+	DEF_FIELD_ADDR(symval, of_device_id, name);
+	DEF_FIELD_ADDR(symval, of_device_id, type);
+	DEF_FIELD_ADDR(symval, of_device_id, compatible);
+
+	len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
+		      (*type)[0] ? *type : "*");
+
+	if (compatible[0])
+		sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
+			*compatible);
+
+	/* Replace all whitespace with underscores */
+	for (tmp = alias; tmp && *tmp; tmp++)
+		if (isspace(*tmp))
+			*tmp = '_';
+
+	buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias);
+	strcat(alias, "C");
+	add_wildcard(alias);
+	buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias);
+}
+
+static void do_of_table(void *symval, unsigned long size,
+			struct module *mod)
+{
+	unsigned int i;
+	const unsigned long id_size = SIZE_of_device_id;
+
+	device_id_check(mod->name, "of", size, id_size, symval);
+
+	/* Leave last one: it's the terminator. */
+	size -= id_size;
+
+	for (i = 0; i < size; i += id_size)
+		do_of_entry_multi(symval + i, mod);
+}
+
 /* Looks like: hid:bNvNpN */
 static int do_hid_entry(const char *filename,
 			     void *symval, char *alias)
@@ -684,30 +727,6 @@
 }
 ADD_TO_DEVTABLE("pcmcia", pcmcia_device_id, do_pcmcia_entry);
 
-static int do_of_entry (const char *filename, void *symval, char *alias)
-{
-	int len;
-	char *tmp;
-	DEF_FIELD_ADDR(symval, of_device_id, name);
-	DEF_FIELD_ADDR(symval, of_device_id, type);
-	DEF_FIELD_ADDR(symval, of_device_id, compatible);
-
-	len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
-		      (*type)[0] ? *type : "*");
-
-	if (compatible[0])
-		sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
-			*compatible);
-
-	/* Replace all whitespace with underscores */
-	for (tmp = alias; tmp && *tmp; tmp++)
-		if (isspace (*tmp))
-			*tmp = '_';
-
-	return 1;
-}
-ADD_TO_DEVTABLE("of", of_device_id, do_of_entry);
-
 static int do_vio_entry(const char *filename, void *symval,
 		char *alias)
 {
@@ -1348,6 +1367,8 @@
 	/* First handle the "special" cases */
 	if (sym_is(name, namelen, "usb"))
 		do_usb_table(symval, sym->st_size, mod);
+	if (sym_is(name, namelen, "of"))
+		do_of_table(symval, sym->st_size, mod);
 	else if (sym_is(name, namelen, "pnp"))
 		do_pnp_device_entry(symval, sym->st_size, mod);
 	else if (sym_is(name, namelen, "pnp_card"))
diff --git a/security/Kconfig b/security/Kconfig
index e4523789..176758c 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -122,6 +122,7 @@
 source security/smack/Kconfig
 source security/tomoyo/Kconfig
 source security/apparmor/Kconfig
+source security/loadpin/Kconfig
 source security/yama/Kconfig
 
 source security/integrity/Kconfig
diff --git a/security/Makefile b/security/Makefile
index c9bfbc8..f2d71cd 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -8,6 +8,7 @@
 subdir-$(CONFIG_SECURITY_TOMOYO)        += tomoyo
 subdir-$(CONFIG_SECURITY_APPARMOR)	+= apparmor
 subdir-$(CONFIG_SECURITY_YAMA)		+= yama
+subdir-$(CONFIG_SECURITY_LOADPIN)	+= loadpin
 
 # always enable default capabilities
 obj-y					+= commoncap.o
@@ -22,6 +23,7 @@
 obj-$(CONFIG_SECURITY_TOMOYO)		+= tomoyo/
 obj-$(CONFIG_SECURITY_APPARMOR)		+= apparmor/
 obj-$(CONFIG_SECURITY_YAMA)		+= yama/
+obj-$(CONFIG_SECURITY_LOADPIN)		+= loadpin/
 obj-$(CONFIG_CGROUP_DEVICE)		+= device_cgroup.o
 
 # Object integrity file lists
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
index 913f377..d186674 100644
--- a/security/apparmor/file.c
+++ b/security/apparmor/file.c
@@ -275,7 +275,7 @@
  *
  * Returns: %0 else error if access denied or other error
  */
-int aa_path_perm(int op, struct aa_profile *profile, struct path *path,
+int aa_path_perm(int op, struct aa_profile *profile, const struct path *path,
 		 int flags, u32 request, struct path_cond *cond)
 {
 	char *buffer = NULL;
@@ -346,7 +346,7 @@
  * Returns: %0 if allowed else error
  */
 int aa_path_link(struct aa_profile *profile, struct dentry *old_dentry,
-		 struct path *new_dir, struct dentry *new_dentry)
+		 const struct path *new_dir, struct dentry *new_dentry)
 {
 	struct path link = { new_dir->mnt, new_dentry };
 	struct path target = { new_dir->mnt, old_dentry };
diff --git a/security/apparmor/include/file.h b/security/apparmor/include/file.h
index 2c922b8..4803c97 100644
--- a/security/apparmor/include/file.h
+++ b/security/apparmor/include/file.h
@@ -171,11 +171,11 @@
 			  const char *name, struct path_cond *cond,
 			  struct file_perms *perms);
 
-int aa_path_perm(int op, struct aa_profile *profile, struct path *path,
+int aa_path_perm(int op, struct aa_profile *profile, const struct path *path,
 		 int flags, u32 request, struct path_cond *cond);
 
 int aa_path_link(struct aa_profile *profile, struct dentry *old_dentry,
-		 struct path *new_dir, struct dentry *new_dentry);
+		 const struct path *new_dir, struct dentry *new_dentry);
 
 int aa_file_perm(int op, struct aa_profile *profile, struct file *file,
 		 u32 request);
diff --git a/security/apparmor/include/path.h b/security/apparmor/include/path.h
index 286ac75..73560f2 100644
--- a/security/apparmor/include/path.h
+++ b/security/apparmor/include/path.h
@@ -26,7 +26,7 @@
 	PATH_MEDIATE_DELETED = 0x10000,	/* mediate deleted paths */
 };
 
-int aa_path_name(struct path *path, int flags, char **buffer,
+int aa_path_name(const struct path *path, int flags, char **buffer,
 		 const char **name, const char **info);
 
 #endif /* __AA_PATH_H */
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index dec607c..2660fbc 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -149,7 +149,7 @@
  *
  * Returns: %0 else error code if error or permission denied
  */
-static int common_perm(int op, struct path *path, u32 mask,
+static int common_perm(int op, const struct path *path, u32 mask,
 		       struct path_cond *cond)
 {
 	struct aa_profile *profile;
@@ -172,7 +172,7 @@
  *
  * Returns: %0 else error code if error or permission denied
  */
-static int common_perm_dir_dentry(int op, struct path *dir,
+static int common_perm_dir_dentry(int op, const struct path *dir,
 				  struct dentry *dentry, u32 mask,
 				  struct path_cond *cond)
 {
@@ -182,23 +182,22 @@
 }
 
 /**
- * common_perm_mnt_dentry - common permission wrapper when mnt, dentry
+ * common_perm_path - common permission wrapper when mnt, dentry
  * @op: operation being checked
- * @mnt: mount point of dentry (NOT NULL)
- * @dentry: dentry to check  (NOT NULL)
+ * @path: location to check (NOT NULL)
  * @mask: requested permissions mask
  *
  * Returns: %0 else error code if error or permission denied
  */
-static int common_perm_mnt_dentry(int op, struct vfsmount *mnt,
-				  struct dentry *dentry, u32 mask)
+static inline int common_perm_path(int op, const struct path *path, u32 mask)
 {
-	struct path path = { mnt, dentry };
-	struct path_cond cond = { d_backing_inode(dentry)->i_uid,
-				  d_backing_inode(dentry)->i_mode
+	struct path_cond cond = { d_backing_inode(path->dentry)->i_uid,
+				  d_backing_inode(path->dentry)->i_mode
 	};
+	if (!mediated_filesystem(path->dentry))
+		return 0;
 
-	return common_perm(op, &path, mask, &cond);
+	return common_perm(op, path, mask, &cond);
 }
 
 /**
@@ -210,13 +209,13 @@
  *
  * Returns: %0 else error code if error or permission denied
  */
-static int common_perm_rm(int op, struct path *dir,
+static int common_perm_rm(int op, const struct path *dir,
 			  struct dentry *dentry, u32 mask)
 {
 	struct inode *inode = d_backing_inode(dentry);
 	struct path_cond cond = { };
 
-	if (!inode || !dir->mnt || !mediated_filesystem(dentry))
+	if (!inode || !mediated_filesystem(dentry))
 		return 0;
 
 	cond.uid = inode->i_uid;
@@ -235,61 +234,53 @@
  *
  * Returns: %0 else error code if error or permission denied
  */
-static int common_perm_create(int op, struct path *dir, struct dentry *dentry,
-			      u32 mask, umode_t mode)
+static int common_perm_create(int op, const struct path *dir,
+			      struct dentry *dentry, u32 mask, umode_t mode)
 {
 	struct path_cond cond = { current_fsuid(), mode };
 
-	if (!dir->mnt || !mediated_filesystem(dir->dentry))
+	if (!mediated_filesystem(dir->dentry))
 		return 0;
 
 	return common_perm_dir_dentry(op, dir, dentry, mask, &cond);
 }
 
-static int apparmor_path_unlink(struct path *dir, struct dentry *dentry)
+static int apparmor_path_unlink(const struct path *dir, struct dentry *dentry)
 {
 	return common_perm_rm(OP_UNLINK, dir, dentry, AA_MAY_DELETE);
 }
 
-static int apparmor_path_mkdir(struct path *dir, struct dentry *dentry,
+static int apparmor_path_mkdir(const struct path *dir, struct dentry *dentry,
 			       umode_t mode)
 {
 	return common_perm_create(OP_MKDIR, dir, dentry, AA_MAY_CREATE,
 				  S_IFDIR);
 }
 
-static int apparmor_path_rmdir(struct path *dir, struct dentry *dentry)
+static int apparmor_path_rmdir(const struct path *dir, struct dentry *dentry)
 {
 	return common_perm_rm(OP_RMDIR, dir, dentry, AA_MAY_DELETE);
 }
 
-static int apparmor_path_mknod(struct path *dir, struct dentry *dentry,
+static int apparmor_path_mknod(const struct path *dir, struct dentry *dentry,
 			       umode_t mode, unsigned int dev)
 {
 	return common_perm_create(OP_MKNOD, dir, dentry, AA_MAY_CREATE, mode);
 }
 
-static int apparmor_path_truncate(struct path *path)
+static int apparmor_path_truncate(const struct path *path)
 {
-	struct path_cond cond = { d_backing_inode(path->dentry)->i_uid,
-				  d_backing_inode(path->dentry)->i_mode
-	};
-
-	if (!path->mnt || !mediated_filesystem(path->dentry))
-		return 0;
-
-	return common_perm(OP_TRUNC, path, MAY_WRITE | AA_MAY_META_WRITE,
-			   &cond);
+	return common_perm_path(OP_TRUNC, path, MAY_WRITE | AA_MAY_META_WRITE);
 }
 
-static int apparmor_path_symlink(struct path *dir, struct dentry *dentry,
+static int apparmor_path_symlink(const struct path *dir, struct dentry *dentry,
 				 const char *old_name)
 {
 	return common_perm_create(OP_SYMLINK, dir, dentry, AA_MAY_CREATE,
 				  S_IFLNK);
 }
 
-static int apparmor_path_link(struct dentry *old_dentry, struct path *new_dir,
+static int apparmor_path_link(struct dentry *old_dentry, const struct path *new_dir,
 			      struct dentry *new_dentry)
 {
 	struct aa_profile *profile;
@@ -304,8 +295,8 @@
 	return error;
 }
 
-static int apparmor_path_rename(struct path *old_dir, struct dentry *old_dentry,
-				struct path *new_dir, struct dentry *new_dentry)
+static int apparmor_path_rename(const struct path *old_dir, struct dentry *old_dentry,
+				const struct path *new_dir, struct dentry *new_dentry)
 {
 	struct aa_profile *profile;
 	int error = 0;
@@ -334,33 +325,19 @@
 	return error;
 }
 
-static int apparmor_path_chmod(struct path *path, umode_t mode)
+static int apparmor_path_chmod(const struct path *path, umode_t mode)
 {
-	if (!mediated_filesystem(path->dentry))
-		return 0;
-
-	return common_perm_mnt_dentry(OP_CHMOD, path->mnt, path->dentry, AA_MAY_CHMOD);
+	return common_perm_path(OP_CHMOD, path, AA_MAY_CHMOD);
 }
 
-static int apparmor_path_chown(struct path *path, kuid_t uid, kgid_t gid)
+static int apparmor_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
 {
-	struct path_cond cond =  { d_backing_inode(path->dentry)->i_uid,
-				   d_backing_inode(path->dentry)->i_mode
-	};
-
-	if (!mediated_filesystem(path->dentry))
-		return 0;
-
-	return common_perm(OP_CHOWN, path, AA_MAY_CHOWN, &cond);
+	return common_perm_path(OP_CHOWN, path, AA_MAY_CHOWN);
 }
 
 static int apparmor_inode_getattr(const struct path *path)
 {
-	if (!mediated_filesystem(path->dentry))
-		return 0;
-
-	return common_perm_mnt_dentry(OP_GETATTR, path->mnt, path->dentry,
-				      AA_MAY_META_READ);
+	return common_perm_path(OP_GETATTR, path, AA_MAY_META_READ);
 }
 
 static int apparmor_file_open(struct file *file, const struct cred *cred)
diff --git a/security/apparmor/path.c b/security/apparmor/path.c
index 71e0e3a..edddc02 100644
--- a/security/apparmor/path.c
+++ b/security/apparmor/path.c
@@ -53,7 +53,7 @@
  *          When no error the path name is returned in @name which points to
  *          to a position in @buf
  */
-static int d_namespace_path(struct path *path, char *buf, int buflen,
+static int d_namespace_path(const struct path *path, char *buf, int buflen,
 			    char **name, int flags)
 {
 	char *res;
@@ -158,7 +158,7 @@
  *
  * Returns: %0 else error on failure
  */
-static int get_name_to_buffer(struct path *path, int flags, char *buffer,
+static int get_name_to_buffer(const struct path *path, int flags, char *buffer,
 			      int size, char **name, const char **info)
 {
 	int adjust = (flags & PATH_IS_DIR) ? 1 : 0;
@@ -204,8 +204,8 @@
  *
  * Returns: %0 else error code if could retrieve name
  */
-int aa_path_name(struct path *path, int flags, char **buffer, const char **name,
-		 const char **info)
+int aa_path_name(const struct path *path, int flags, char **buffer,
+		 const char **name, const char **info)
 {
 	char *buf, *str = NULL;
 	int size = 256;
diff --git a/security/commoncap.c b/security/commoncap.c
index 48071ed..e7fadde 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -111,7 +111,7 @@
  * Determine whether the current process may set the system clock and timezone
  * information, returning 0 if permission granted, -ve if denied.
  */
-int cap_settime(const struct timespec *ts, const struct timezone *tz)
+int cap_settime(const struct timespec64 *ts, const struct timezone *tz)
 {
 	if (!capable(CAP_SYS_TIME))
 		return -EPERM;
@@ -313,7 +313,7 @@
 	if (!inode->i_op->getxattr)
 	       return 0;
 
-	error = inode->i_op->getxattr(dentry, XATTR_NAME_CAPS, NULL, 0);
+	error = inode->i_op->getxattr(dentry, inode, XATTR_NAME_CAPS, NULL, 0);
 	if (error <= 0)
 		return 0;
 	return 1;
@@ -397,8 +397,8 @@
 	if (!inode || !inode->i_op->getxattr)
 		return -ENODATA;
 
-	size = inode->i_op->getxattr((struct dentry *)dentry, XATTR_NAME_CAPS, &caps,
-				   XATTR_CAPS_SZ);
+	size = inode->i_op->getxattr((struct dentry *)dentry, inode,
+				     XATTR_NAME_CAPS, &caps, XATTR_CAPS_SZ);
 	if (size == -ENODATA || size == -EOPNOTSUPP)
 		/* no data, that's ok */
 		return -ENODATA;
diff --git a/security/integrity/Kconfig b/security/integrity/Kconfig
index 979be65..da95658 100644
--- a/security/integrity/Kconfig
+++ b/security/integrity/Kconfig
@@ -35,7 +35,6 @@
 	default n
         select ASYMMETRIC_KEY_TYPE
         select ASYMMETRIC_PUBLIC_KEY_SUBTYPE
-        select PUBLIC_KEY_ALGO_RSA
         select CRYPTO_RSA
         select X509_CERTIFICATE_PARSER
 	help
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index 8ef1511..4304372 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -18,6 +18,8 @@
 #include <linux/cred.h>
 #include <linux/key-type.h>
 #include <linux/digsig.h>
+#include <crypto/public_key.h>
+#include <keys/system_keyring.h>
 
 #include "integrity.h"
 
@@ -40,6 +42,12 @@
 static bool init_keyring __initdata;
 #endif
 
+#ifdef CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY
+#define restrict_link_to_ima restrict_link_by_builtin_and_secondary_trusted
+#else
+#define restrict_link_to_ima restrict_link_by_builtin_trusted
+#endif
+
 int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
 			    const char *digest, int digestlen)
 {
@@ -83,10 +91,9 @@
 				    ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
 				     KEY_USR_VIEW | KEY_USR_READ |
 				     KEY_USR_WRITE | KEY_USR_SEARCH),
-				    KEY_ALLOC_NOT_IN_QUOTA, NULL);
-	if (!IS_ERR(keyring[id]))
-		set_bit(KEY_FLAG_TRUSTED_ONLY, &keyring[id]->flags);
-	else {
+				    KEY_ALLOC_NOT_IN_QUOTA,
+				    restrict_link_to_ima, NULL);
+	if (IS_ERR(keyring[id])) {
 		err = PTR_ERR(keyring[id]);
 		pr_info("Can't allocate %s keyring (%d)\n",
 			keyring_name[id], err);
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index e6ea9d4..b9e2628 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -82,7 +82,7 @@
 		return -EOPNOTSUPP;
 
 	for (xattr = evm_config_xattrnames; *xattr != NULL; xattr++) {
-		error = inode->i_op->getxattr(dentry, *xattr, NULL, 0);
+		error = inode->i_op->getxattr(dentry, inode, *xattr, NULL, 0);
 		if (error < 0) {
 			if (error == -ENODATA)
 				continue;
@@ -299,8 +299,8 @@
 			return 0;
 
 		/* exception for pseudo filesystems */
-		if (dentry->d_inode->i_sb->s_magic == TMPFS_MAGIC
-		    || dentry->d_inode->i_sb->s_magic == SYSFS_MAGIC)
+		if (dentry->d_sb->s_magic == TMPFS_MAGIC
+		    || dentry->d_sb->s_magic == SYSFS_MAGIC)
 			return 0;
 
 		integrity_audit_msg(AUDIT_INTEGRITY_METADATA,
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index e54a8a8..5487827 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -155,23 +155,33 @@
 
 	   This option is deprecated in favor of INTEGRITY_TRUSTED_KEYRING
 
-config IMA_MOK_KEYRING
-	bool "Create IMA machine owner keys (MOK) and blacklist keyrings"
+config IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY
+	bool "Permit keys validly signed by a built-in or secondary CA cert (EXPERIMENTAL)"
+	depends on SYSTEM_TRUSTED_KEYRING
+	depends on SECONDARY_TRUSTED_KEYRING
+	depends on INTEGRITY_ASYMMETRIC_KEYS
+	select INTEGRITY_TRUSTED_KEYRING
+	default n
+	help
+	  Keys may be added to the IMA or IMA blacklist keyrings, if the
+	  key is validly signed by a CA cert in the system built-in or
+	  secondary trusted keyrings.
+
+	  Intermediate keys between those the kernel has compiled in and the
+	  IMA keys to be added may be added to the system secondary keyring,
+	  provided they are validly signed by a key already resident in the
+	  built-in or secondary trusted keyrings.
+
+config IMA_BLACKLIST_KEYRING
+	bool "Create IMA machine owner blacklist keyrings (EXPERIMENTAL)"
 	depends on SYSTEM_TRUSTED_KEYRING
 	depends on IMA_TRUSTED_KEYRING
 	default n
 	help
-	   This option creates IMA MOK and blacklist keyrings.  IMA MOK is an
-	   intermediate keyring that sits between .system and .ima keyrings,
-	   effectively forming a simple CA hierarchy.  To successfully import a
-	   key into .ima_mok it must be signed by a key which CA is in .system
-	   keyring.  On turn any key that needs to go in .ima keyring must be
-	   signed by CA in either .system or .ima_mok keyrings. IMA MOK is empty
-	   at kernel boot.
-
-	   IMA blacklist keyring contains all revoked IMA keys.  It is consulted
-	   before any other keyring.  If the search is successful the requested
-	   operation is rejected and error is returned to the caller.
+	   This option creates an IMA blacklist keyring, which contains all
+	   revoked IMA keys.  It is consulted before any other keyring.  If
+	   the search is successful the requested operation is rejected and
+	   an error is returned to the caller.
 
 config IMA_LOAD_X509
 	bool "Load X509 certificate onto the '.ima' trusted keyring"
diff --git a/security/integrity/ima/Makefile b/security/integrity/ima/Makefile
index a8539f9..9aeaeda 100644
--- a/security/integrity/ima/Makefile
+++ b/security/integrity/ima/Makefile
@@ -8,4 +8,4 @@
 ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \
 	 ima_policy.o ima_template.o ima_template_lib.o
 ima-$(CONFIG_IMA_APPRAISE) += ima_appraise.o
-obj-$(CONFIG_IMA_MOK_KEYRING) += ima_mok.o
+obj-$(CONFIG_IMA_BLACKLIST_KEYRING) += ima_mok.o
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 5d0f611..d3a939b 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -170,7 +170,7 @@
 int ima_store_template(struct ima_template_entry *entry, int violation,
 		       struct inode *inode, const unsigned char *filename);
 void ima_free_template_entry(struct ima_template_entry *entry);
-const char *ima_d_path(struct path *path, char **pathbuf);
+const char *ima_d_path(const struct path *path, char **pathbuf);
 
 /* IMA policy related functions */
 int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask,
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index 370e42d..5a2218f 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -313,7 +313,7 @@
 	iint->flags |= IMA_AUDITED;
 }
 
-const char *ima_d_path(struct path *path, char **pathbuf)
+const char *ima_d_path(const struct path *path, char **pathbuf)
 {
 	char *pathname = NULL;
 
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 6b4694a..1bcbc12 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -275,6 +275,11 @@
 		     xattr_value->type != EVM_IMA_XATTR_DIGSIG)) {
 			if (!ima_fix_xattr(dentry, iint))
 				status = INTEGRITY_PASS;
+		} else if ((inode->i_size == 0) &&
+			   (iint->flags & IMA_NEW_FILE) &&
+			   (xattr_value &&
+			    xattr_value->type == EVM_IMA_XATTR_DIGSIG)) {
+			status = INTEGRITY_PASS;
 		}
 		integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, filename,
 				    op, cause, rc, 0);
@@ -328,7 +333,7 @@
 	if (iint) {
 		iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED |
 				 IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK |
-				 IMA_ACTION_FLAGS);
+				 IMA_ACTION_RULE_FLAGS);
 		if (must_appraise)
 			iint->flags |= IMA_APPRAISE;
 	}
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 391f417..68b26c3 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -246,7 +246,8 @@
 		ima_audit_measurement(iint, pathname);
 
 out_digsig:
-	if ((mask & MAY_WRITE) && (iint->flags & IMA_DIGSIG))
+	if ((mask & MAY_WRITE) && (iint->flags & IMA_DIGSIG) &&
+	     !(iint->flags & IMA_NEW_FILE))
 		rc = -EACCES;
 	kfree(xattr_value);
 out_free:
@@ -316,6 +317,28 @@
 EXPORT_SYMBOL_GPL(ima_file_check);
 
 /**
+ * ima_post_path_mknod - mark as a new inode
+ * @dentry: newly created dentry
+ *
+ * Mark files created via the mknodat syscall as new, so that the
+ * file data can be written later.
+ */
+void ima_post_path_mknod(struct dentry *dentry)
+{
+	struct integrity_iint_cache *iint;
+	struct inode *inode = dentry->d_inode;
+	int must_appraise;
+
+	must_appraise = ima_must_appraise(inode, MAY_ACCESS, FILE_CHECK);
+	if (!must_appraise)
+		return;
+
+	iint = integrity_inode_get(inode);
+	if (iint)
+		iint->flags |= IMA_NEW_FILE;
+}
+
+/**
  * ima_read_file - pre-measure/appraise hook decision based on policy
  * @file: pointer to the file to be measured/appraised/audit
  * @read_id: caller identifier
diff --git a/security/integrity/ima/ima_mok.c b/security/integrity/ima/ima_mok.c
index 676885e..74a27995 100644
--- a/security/integrity/ima/ima_mok.c
+++ b/security/integrity/ima/ima_mok.c
@@ -17,38 +17,29 @@
 #include <linux/cred.h>
 #include <linux/err.h>
 #include <linux/init.h>
-#include <keys/asymmetric-type.h>
+#include <keys/system_keyring.h>
 
 
-struct key *ima_mok_keyring;
 struct key *ima_blacklist_keyring;
 
 /*
- * Allocate the IMA MOK and blacklist keyrings
+ * Allocate the IMA blacklist keyring
  */
 __init int ima_mok_init(void)
 {
-	pr_notice("Allocating IMA MOK and blacklist keyrings.\n");
-
-	ima_mok_keyring = keyring_alloc(".ima_mok",
-			      KUIDT_INIT(0), KGIDT_INIT(0), current_cred(),
-			      (KEY_POS_ALL & ~KEY_POS_SETATTR) |
-			      KEY_USR_VIEW | KEY_USR_READ |
-			      KEY_USR_WRITE | KEY_USR_SEARCH,
-			      KEY_ALLOC_NOT_IN_QUOTA, NULL);
+	pr_notice("Allocating IMA blacklist keyring.\n");
 
 	ima_blacklist_keyring = keyring_alloc(".ima_blacklist",
 				KUIDT_INIT(0), KGIDT_INIT(0), current_cred(),
 				(KEY_POS_ALL & ~KEY_POS_SETATTR) |
 				KEY_USR_VIEW | KEY_USR_READ |
 				KEY_USR_WRITE | KEY_USR_SEARCH,
-				KEY_ALLOC_NOT_IN_QUOTA, NULL);
+				KEY_ALLOC_NOT_IN_QUOTA,
+				restrict_link_by_builtin_trusted, NULL);
 
-	if (IS_ERR(ima_mok_keyring) || IS_ERR(ima_blacklist_keyring))
-		panic("Can't allocate IMA MOK or blacklist keyrings.");
-	set_bit(KEY_FLAG_TRUSTED_ONLY, &ima_mok_keyring->flags);
+	if (IS_ERR(ima_blacklist_keyring))
+		panic("Can't allocate IMA blacklist keyring.");
 
-	set_bit(KEY_FLAG_TRUSTED_ONLY, &ima_blacklist_keyring->flags);
 	set_bit(KEY_FLAG_KEEP, &ima_blacklist_keyring->flags);
 	return 0;
 }
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index be09e2c..3cd0a58 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -884,10 +884,10 @@
 	"BPRM_CHECK",
 	"MODULE_CHECK",
 	"FIRMWARE_CHECK",
+	"POST_SETATTR",
 	"KEXEC_KERNEL_CHECK",
 	"KEXEC_INITRAMFS_CHECK",
-	"POLICY_CHECK",
-	"POST_SETATTR"
+	"POLICY_CHECK"
 };
 
 void *ima_policy_start(struct seq_file *m, loff_t *pos)
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
index e08935c..90bc57d 100644
--- a/security/integrity/integrity.h
+++ b/security/integrity/integrity.h
@@ -28,6 +28,7 @@
 
 /* iint cache flags */
 #define IMA_ACTION_FLAGS	0xff000000
+#define IMA_ACTION_RULE_FLAGS	0x06000000
 #define IMA_DIGSIG		0x01000000
 #define IMA_DIGSIG_REQUIRED	0x02000000
 #define IMA_PERMIT_DIRECTIO	0x04000000
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
index fe4d74e..f826e87 100644
--- a/security/keys/Kconfig
+++ b/security/keys/Kconfig
@@ -41,6 +41,10 @@
 	bool "Large payload keys"
 	depends on KEYS
 	depends on TMPFS
+	select CRYPTO
+	select CRYPTO_AES
+	select CRYPTO_ECB
+	select CRYPTO_RNG
 	help
 	  This option provides support for holding large keys within the kernel
 	  (for example Kerberos ticket caches).  The data may be stored out to
@@ -81,3 +85,14 @@
 	  Userspace only ever sees/stores encrypted blobs.
 
 	  If you are unsure as to whether this is required, answer N.
+
+config KEY_DH_OPERATIONS
+       bool "Diffie-Hellman operations on retained keys"
+       depends on KEYS
+       select MPILIB
+       help
+	 This option provides support for calculating Diffie-Hellman
+	 public keys and shared secrets using values stored as keys
+	 in the kernel.
+
+	 If you are unsure as to whether this is required, answer N.
diff --git a/security/keys/Makefile b/security/keys/Makefile
index dfb3a7b..1fd4a16 100644
--- a/security/keys/Makefile
+++ b/security/keys/Makefile
@@ -19,6 +19,7 @@
 obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_SYSCTL) += sysctl.o
 obj-$(CONFIG_PERSISTENT_KEYRINGS) += persistent.o
+obj-$(CONFIG_KEY_DH_OPERATIONS) += dh.o
 
 #
 # Key types
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
index c721e39..9e443fc 100644
--- a/security/keys/big_key.c
+++ b/security/keys/big_key.c
@@ -14,8 +14,10 @@
 #include <linux/file.h>
 #include <linux/shmem_fs.h>
 #include <linux/err.h>
+#include <linux/scatterlist.h>
 #include <keys/user-type.h>
 #include <keys/big_key-type.h>
+#include <crypto/rng.h>
 
 /*
  * Layout of key payload words.
@@ -28,6 +30,14 @@
 };
 
 /*
+ * Crypto operation with big_key data
+ */
+enum big_key_op {
+	BIG_KEY_ENC,
+	BIG_KEY_DEC,
+};
+
+/*
  * If the data is under this limit, there's no point creating a shm file to
  * hold it as the permanently resident metadata for the shmem fs will be at
  * least as large as the data.
@@ -35,6 +45,11 @@
 #define BIG_KEY_FILE_THRESHOLD (sizeof(struct inode) + sizeof(struct dentry))
 
 /*
+ * Key size for big_key data encryption
+ */
+#define ENC_KEY_SIZE	16
+
+/*
  * big_key defined keys take an arbitrary string as the description and an
  * arbitrary blob of data as the payload
  */
@@ -50,12 +65,62 @@
 };
 
 /*
+ * Crypto names for big_key data encryption
+ */
+static const char big_key_rng_name[] = "stdrng";
+static const char big_key_alg_name[] = "ecb(aes)";
+
+/*
+ * Crypto algorithms for big_key data encryption
+ */
+static struct crypto_rng *big_key_rng;
+static struct crypto_blkcipher *big_key_blkcipher;
+
+/*
+ * Generate random key to encrypt big_key data
+ */
+static inline int big_key_gen_enckey(u8 *key)
+{
+	return crypto_rng_get_bytes(big_key_rng, key, ENC_KEY_SIZE);
+}
+
+/*
+ * Encrypt/decrypt big_key data
+ */
+static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key)
+{
+	int ret = -EINVAL;
+	struct scatterlist sgio;
+	struct blkcipher_desc desc;
+
+	if (crypto_blkcipher_setkey(big_key_blkcipher, key, ENC_KEY_SIZE)) {
+		ret = -EAGAIN;
+		goto error;
+	}
+
+	desc.flags = 0;
+	desc.tfm = big_key_blkcipher;
+
+	sg_init_one(&sgio, data, datalen);
+
+	if (op == BIG_KEY_ENC)
+		ret = crypto_blkcipher_encrypt(&desc, &sgio, &sgio, datalen);
+	else
+		ret = crypto_blkcipher_decrypt(&desc, &sgio, &sgio, datalen);
+
+error:
+	return ret;
+}
+
+/*
  * Preparse a big key
  */
 int big_key_preparse(struct key_preparsed_payload *prep)
 {
 	struct path *path = (struct path *)&prep->payload.data[big_key_path];
 	struct file *file;
+	u8 *enckey;
+	u8 *data = NULL;
 	ssize_t written;
 	size_t datalen = prep->datalen;
 	int ret;
@@ -73,16 +138,43 @@
 		/* Create a shmem file to store the data in.  This will permit the data
 		 * to be swapped out if needed.
 		 *
-		 * TODO: Encrypt the stored data with a temporary key.
+		 * File content is stored encrypted with randomly generated key.
 		 */
-		file = shmem_kernel_file_setup("", datalen, 0);
-		if (IS_ERR(file)) {
-			ret = PTR_ERR(file);
+		size_t enclen = ALIGN(datalen, crypto_blkcipher_blocksize(big_key_blkcipher));
+
+		/* prepare aligned data to encrypt */
+		data = kmalloc(enclen, GFP_KERNEL);
+		if (!data)
+			return -ENOMEM;
+
+		memcpy(data, prep->data, datalen);
+		memset(data + datalen, 0x00, enclen - datalen);
+
+		/* generate random key */
+		enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL);
+		if (!enckey) {
+			ret = -ENOMEM;
 			goto error;
 		}
 
-		written = kernel_write(file, prep->data, prep->datalen, 0);
-		if (written != datalen) {
+		ret = big_key_gen_enckey(enckey);
+		if (ret)
+			goto err_enckey;
+
+		/* encrypt aligned data */
+		ret = big_key_crypt(BIG_KEY_ENC, data, enclen, enckey);
+		if (ret)
+			goto err_enckey;
+
+		/* save aligned data to file */
+		file = shmem_kernel_file_setup("", enclen, 0);
+		if (IS_ERR(file)) {
+			ret = PTR_ERR(file);
+			goto err_enckey;
+		}
+
+		written = kernel_write(file, data, enclen, 0);
+		if (written != enclen) {
 			ret = written;
 			if (written >= 0)
 				ret = -ENOMEM;
@@ -92,12 +184,15 @@
 		/* Pin the mount and dentry to the key so that we can open it again
 		 * later
 		 */
+		prep->payload.data[big_key_data] = enckey;
 		*path = file->f_path;
 		path_get(path);
 		fput(file);
+		kfree(data);
 	} else {
 		/* Just store the data in a buffer */
 		void *data = kmalloc(datalen, GFP_KERNEL);
+
 		if (!data)
 			return -ENOMEM;
 
@@ -108,7 +203,10 @@
 
 err_fput:
 	fput(file);
+err_enckey:
+	kfree(enckey);
 error:
+	kfree(data);
 	return ret;
 }
 
@@ -119,10 +217,10 @@
 {
 	if (prep->datalen > BIG_KEY_FILE_THRESHOLD) {
 		struct path *path = (struct path *)&prep->payload.data[big_key_path];
+
 		path_put(path);
-	} else {
-		kfree(prep->payload.data[big_key_data]);
 	}
+	kfree(prep->payload.data[big_key_data]);
 }
 
 /*
@@ -147,15 +245,15 @@
 {
 	size_t datalen = (size_t)key->payload.data[big_key_len];
 
-	if (datalen) {
+	if (datalen > BIG_KEY_FILE_THRESHOLD) {
 		struct path *path = (struct path *)&key->payload.data[big_key_path];
+
 		path_put(path);
 		path->mnt = NULL;
 		path->dentry = NULL;
-	} else {
-		kfree(key->payload.data[big_key_data]);
-		key->payload.data[big_key_data] = NULL;
 	}
+	kfree(key->payload.data[big_key_data]);
+	key->payload.data[big_key_data] = NULL;
 }
 
 /*
@@ -188,17 +286,41 @@
 	if (datalen > BIG_KEY_FILE_THRESHOLD) {
 		struct path *path = (struct path *)&key->payload.data[big_key_path];
 		struct file *file;
-		loff_t pos;
+		u8 *data;
+		u8 *enckey = (u8 *)key->payload.data[big_key_data];
+		size_t enclen = ALIGN(datalen, crypto_blkcipher_blocksize(big_key_blkcipher));
+
+		data = kmalloc(enclen, GFP_KERNEL);
+		if (!data)
+			return -ENOMEM;
 
 		file = dentry_open(path, O_RDONLY, current_cred());
-		if (IS_ERR(file))
-			return PTR_ERR(file);
+		if (IS_ERR(file)) {
+			ret = PTR_ERR(file);
+			goto error;
+		}
 
-		pos = 0;
-		ret = vfs_read(file, buffer, datalen, &pos);
-		fput(file);
-		if (ret >= 0 && ret != datalen)
+		/* read file to kernel and decrypt */
+		ret = kernel_read(file, 0, data, enclen);
+		if (ret >= 0 && ret != enclen) {
 			ret = -EIO;
+			goto err_fput;
+		}
+
+		ret = big_key_crypt(BIG_KEY_DEC, data, enclen, enckey);
+		if (ret)
+			goto err_fput;
+
+		ret = datalen;
+
+		/* copy decrypted data to user */
+		if (copy_to_user(buffer, data, datalen) != 0)
+			ret = -EFAULT;
+
+err_fput:
+		fput(file);
+error:
+		kfree(data);
 	} else {
 		ret = datalen;
 		if (copy_to_user(buffer, key->payload.data[big_key_data],
@@ -209,8 +331,48 @@
 	return ret;
 }
 
+/*
+ * Register key type
+ */
 static int __init big_key_init(void)
 {
 	return register_key_type(&key_type_big_key);
 }
+
+/*
+ * Initialize big_key crypto and RNG algorithms
+ */
+static int __init big_key_crypto_init(void)
+{
+	int ret = -EINVAL;
+
+	/* init RNG */
+	big_key_rng = crypto_alloc_rng(big_key_rng_name, 0, 0);
+	if (IS_ERR(big_key_rng)) {
+		big_key_rng = NULL;
+		return -EFAULT;
+	}
+
+	/* seed RNG */
+	ret = crypto_rng_reset(big_key_rng, NULL, crypto_rng_seedsize(big_key_rng));
+	if (ret)
+		goto error;
+
+	/* init block cipher */
+	big_key_blkcipher = crypto_alloc_blkcipher(big_key_alg_name, 0, 0);
+	if (IS_ERR(big_key_blkcipher)) {
+		big_key_blkcipher = NULL;
+		ret = -EFAULT;
+		goto error;
+	}
+
+	return 0;
+
+error:
+	crypto_free_rng(big_key_rng);
+	big_key_rng = NULL;
+	return ret;
+}
+
 device_initcall(big_key_init);
+late_initcall(big_key_crypto_init);
diff --git a/security/keys/compat.c b/security/keys/compat.c
index 25430a3..c8783b3 100644
--- a/security/keys/compat.c
+++ b/security/keys/compat.c
@@ -132,6 +132,10 @@
 	case KEYCTL_GET_PERSISTENT:
 		return keyctl_get_persistent(arg2, arg3);
 
+	case KEYCTL_DH_COMPUTE:
+		return keyctl_dh_compute(compat_ptr(arg2), compat_ptr(arg3),
+					 arg4);
+
 	default:
 		return -EOPNOTSUPP;
 	}
diff --git a/security/keys/dh.c b/security/keys/dh.c
new file mode 100644
index 0000000..880505a
--- /dev/null
+++ b/security/keys/dh.c
@@ -0,0 +1,160 @@
+/* Crypto operations using stored keys
+ *
+ * Copyright (c) 2016, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/mpi.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <keys/user-type.h>
+#include "internal.h"
+
+/*
+ * Public key or shared secret generation function [RFC2631 sec 2.1.1]
+ *
+ * ya = g^xa mod p;
+ * or
+ * ZZ = yb^xa mod p;
+ *
+ * where xa is the local private key, ya is the local public key, g is
+ * the generator, p is the prime, yb is the remote public key, and ZZ
+ * is the shared secret.
+ *
+ * Both are the same calculation, so g or yb are the "base" and ya or
+ * ZZ are the "result".
+ */
+static int do_dh(MPI result, MPI base, MPI xa, MPI p)
+{
+	return mpi_powm(result, base, xa, p);
+}
+
+static ssize_t mpi_from_key(key_serial_t keyid, size_t maxlen, MPI *mpi)
+{
+	struct key *key;
+	key_ref_t key_ref;
+	long status;
+	ssize_t ret;
+
+	key_ref = lookup_user_key(keyid, 0, KEY_NEED_READ);
+	if (IS_ERR(key_ref)) {
+		ret = -ENOKEY;
+		goto error;
+	}
+
+	key = key_ref_to_ptr(key_ref);
+
+	ret = -EOPNOTSUPP;
+	if (key->type == &key_type_user) {
+		down_read(&key->sem);
+		status = key_validate(key);
+		if (status == 0) {
+			const struct user_key_payload *payload;
+
+			payload = user_key_payload(key);
+
+			if (maxlen == 0) {
+				*mpi = NULL;
+				ret = payload->datalen;
+			} else if (payload->datalen <= maxlen) {
+				*mpi = mpi_read_raw_data(payload->data,
+							 payload->datalen);
+				if (*mpi)
+					ret = payload->datalen;
+			} else {
+				ret = -EINVAL;
+			}
+		}
+		up_read(&key->sem);
+	}
+
+	key_put(key);
+error:
+	return ret;
+}
+
+long keyctl_dh_compute(struct keyctl_dh_params __user *params,
+		       char __user *buffer, size_t buflen)
+{
+	long ret;
+	MPI base, private, prime, result;
+	unsigned nbytes;
+	struct keyctl_dh_params pcopy;
+	uint8_t *kbuf;
+	ssize_t keylen;
+	size_t resultlen;
+
+	if (!params || (!buffer && buflen)) {
+		ret = -EINVAL;
+		goto out;
+	}
+	if (copy_from_user(&pcopy, params, sizeof(pcopy)) != 0) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	keylen = mpi_from_key(pcopy.prime, buflen, &prime);
+	if (keylen < 0 || !prime) {
+		/* buflen == 0 may be used to query the required buffer size,
+		 * which is the prime key length.
+		 */
+		ret = keylen;
+		goto out;
+	}
+
+	/* The result is never longer than the prime */
+	resultlen = keylen;
+
+	keylen = mpi_from_key(pcopy.base, SIZE_MAX, &base);
+	if (keylen < 0 || !base) {
+		ret = keylen;
+		goto error1;
+	}
+
+	keylen = mpi_from_key(pcopy.private, SIZE_MAX, &private);
+	if (keylen < 0 || !private) {
+		ret = keylen;
+		goto error2;
+	}
+
+	result = mpi_alloc(0);
+	if (!result) {
+		ret = -ENOMEM;
+		goto error3;
+	}
+
+	kbuf = kmalloc(resultlen, GFP_KERNEL);
+	if (!kbuf) {
+		ret = -ENOMEM;
+		goto error4;
+	}
+
+	ret = do_dh(result, base, private, prime);
+	if (ret)
+		goto error5;
+
+	ret = mpi_read_buffer(result, kbuf, resultlen, &nbytes, NULL);
+	if (ret != 0)
+		goto error5;
+
+	ret = nbytes;
+	if (copy_to_user(buffer, kbuf, nbytes) != 0)
+		ret = -EFAULT;
+
+error5:
+	kfree(kbuf);
+error4:
+	mpi_free(result);
+error3:
+	mpi_free(private);
+error2:
+	mpi_free(base);
+error1:
+	mpi_free(prime);
+out:
+	return ret;
+}
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 5105c2c..8ec7a52 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -15,6 +15,7 @@
 #include <linux/sched.h>
 #include <linux/key-type.h>
 #include <linux/task_work.h>
+#include <linux/keyctl.h>
 
 struct iovec;
 
@@ -257,6 +258,17 @@
 }
 #endif
 
+#ifdef CONFIG_KEY_DH_OPERATIONS
+extern long keyctl_dh_compute(struct keyctl_dh_params __user *, char __user *,
+			      size_t);
+#else
+static inline long keyctl_dh_compute(struct keyctl_dh_params __user *params,
+				     char __user *buffer, size_t buflen)
+{
+	return -EOPNOTSUPP;
+}
+#endif
+
 /*
  * Debugging key validation
  */
diff --git a/security/keys/key.c b/security/keys/key.c
index b287551..bd5a272 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -201,6 +201,7 @@
  * @cred: The credentials specifying UID namespace.
  * @perm: The permissions mask of the new key.
  * @flags: Flags specifying quota properties.
+ * @restrict_link: Optional link restriction method for new keyrings.
  *
  * Allocate a key of the specified type with the attributes given.  The key is
  * returned in an uninstantiated state and the caller needs to instantiate the
@@ -223,7 +224,10 @@
  */
 struct key *key_alloc(struct key_type *type, const char *desc,
 		      kuid_t uid, kgid_t gid, const struct cred *cred,
-		      key_perm_t perm, unsigned long flags)
+		      key_perm_t perm, unsigned long flags,
+		      int (*restrict_link)(struct key *,
+					   const struct key_type *,
+					   const union key_payload *))
 {
 	struct key_user *user = NULL;
 	struct key *key;
@@ -291,11 +295,10 @@
 	key->uid = uid;
 	key->gid = gid;
 	key->perm = perm;
+	key->restrict_link = restrict_link;
 
 	if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
 		key->flags |= 1 << KEY_FLAG_IN_QUOTA;
-	if (flags & KEY_ALLOC_TRUSTED)
-		key->flags |= 1 << KEY_FLAG_TRUSTED;
 	if (flags & KEY_ALLOC_BUILT_IN)
 		key->flags |= 1 << KEY_FLAG_BUILTIN;
 
@@ -496,6 +499,12 @@
 	}
 
 	if (keyring) {
+		if (keyring->restrict_link) {
+			ret = keyring->restrict_link(keyring, key->type,
+						     &prep.payload);
+			if (ret < 0)
+				goto error;
+		}
 		ret = __key_link_begin(keyring, &key->index_key, &edit);
 		if (ret < 0)
 			goto error;
@@ -551,8 +560,12 @@
 	awaken = 0;
 	ret = -EBUSY;
 
-	if (keyring)
+	if (keyring) {
+		if (keyring->restrict_link)
+			return -EPERM;
+
 		link_ret = __key_link_begin(keyring, &key->index_key, &edit);
+	}
 
 	mutex_lock(&key_construction_mutex);
 
@@ -793,6 +806,9 @@
 	struct key *keyring, *key = NULL;
 	key_ref_t key_ref;
 	int ret;
+	int (*restrict_link)(struct key *,
+			     const struct key_type *,
+			     const union key_payload *) = NULL;
 
 	/* look up the key type to see if it's one of the registered kernel
 	 * types */
@@ -811,6 +827,10 @@
 
 	key_check(keyring);
 
+	key_ref = ERR_PTR(-EPERM);
+	if (!(flags & KEY_ALLOC_BYPASS_RESTRICTION))
+		restrict_link = keyring->restrict_link;
+
 	key_ref = ERR_PTR(-ENOTDIR);
 	if (keyring->type != &key_type_keyring)
 		goto error_put_type;
@@ -819,7 +839,6 @@
 	prep.data = payload;
 	prep.datalen = plen;
 	prep.quotalen = index_key.type->def_datalen;
-	prep.trusted = flags & KEY_ALLOC_TRUSTED;
 	prep.expiry = TIME_T_MAX;
 	if (index_key.type->preparse) {
 		ret = index_key.type->preparse(&prep);
@@ -835,10 +854,13 @@
 	}
 	index_key.desc_len = strlen(index_key.description);
 
-	key_ref = ERR_PTR(-EPERM);
-	if (!prep.trusted && test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags))
-		goto error_free_prep;
-	flags |= prep.trusted ? KEY_ALLOC_TRUSTED : 0;
+	if (restrict_link) {
+		ret = restrict_link(keyring, index_key.type, &prep.payload);
+		if (ret < 0) {
+			key_ref = ERR_PTR(ret);
+			goto error_free_prep;
+		}
+	}
 
 	ret = __key_link_begin(keyring, &index_key, &edit);
 	if (ret < 0) {
@@ -879,7 +901,7 @@
 
 	/* allocate a new key */
 	key = key_alloc(index_key.type, index_key.description,
-			cred->fsuid, cred->fsgid, cred, perm, flags);
+			cred->fsuid, cred->fsgid, cred, perm, flags, NULL);
 	if (IS_ERR(key)) {
 		key_ref = ERR_CAST(key);
 		goto error_link_end;
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index ed73c6c..3b135a0 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1686,6 +1686,11 @@
 	case KEYCTL_GET_PERSISTENT:
 		return keyctl_get_persistent((uid_t)arg2, (key_serial_t)arg3);
 
+	case KEYCTL_DH_COMPUTE:
+		return keyctl_dh_compute((struct keyctl_dh_params __user *) arg2,
+					 (char __user *) arg3,
+					 (size_t) arg4);
+
 	default:
 		return -EOPNOTSUPP;
 	}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index f931ccf..c91e4e0 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -491,13 +491,17 @@
  */
 struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid,
 			  const struct cred *cred, key_perm_t perm,
-			  unsigned long flags, struct key *dest)
+			  unsigned long flags,
+			  int (*restrict_link)(struct key *,
+					       const struct key_type *,
+					       const union key_payload *),
+			  struct key *dest)
 {
 	struct key *keyring;
 	int ret;
 
 	keyring = key_alloc(&key_type_keyring, description,
-			    uid, gid, cred, perm, flags);
+			    uid, gid, cred, perm, flags, restrict_link);
 	if (!IS_ERR(keyring)) {
 		ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL);
 		if (ret < 0) {
@@ -510,6 +514,26 @@
 }
 EXPORT_SYMBOL(keyring_alloc);
 
+/**
+ * restrict_link_reject - Give -EPERM to restrict link
+ * @keyring: The keyring being added to.
+ * @type: The type of key being added.
+ * @payload: The payload of the key intended to be added.
+ *
+ * Reject the addition of any links to a keyring.  It can be overridden by
+ * passing KEY_ALLOC_BYPASS_RESTRICTION to key_instantiate_and_link() when
+ * adding a key to a keyring.
+ *
+ * This is meant to be passed as the restrict_link parameter to
+ * keyring_alloc().
+ */
+int restrict_link_reject(struct key *keyring,
+			 const struct key_type *type,
+			 const union key_payload *payload)
+{
+	return -EPERM;
+}
+
 /*
  * By default, we keys found by getting an exact match on their descriptions.
  */
@@ -1191,6 +1215,16 @@
 	up_write(&keyring->sem);
 }
 
+/*
+ * Check addition of keys to restricted keyrings.
+ */
+static int __key_link_check_restriction(struct key *keyring, struct key *key)
+{
+	if (!keyring->restrict_link)
+		return 0;
+	return keyring->restrict_link(keyring, key->type, &key->payload);
+}
+
 /**
  * key_link - Link a key to a keyring
  * @keyring: The keyring to make the link in.
@@ -1221,14 +1255,12 @@
 	key_check(keyring);
 	key_check(key);
 
-	if (test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags) &&
-	    !test_bit(KEY_FLAG_TRUSTED, &key->flags))
-		return -EPERM;
-
 	ret = __key_link_begin(keyring, &key->index_key, &edit);
 	if (ret == 0) {
 		kdebug("begun {%d,%d}", keyring->serial, atomic_read(&keyring->usage));
-		ret = __key_link_check_live_key(keyring, key);
+		ret = __key_link_check_restriction(keyring, key);
+		if (ret == 0)
+			ret = __key_link_check_live_key(keyring, key);
 		if (ret == 0)
 			__key_link(key, &edit);
 		__key_link_end(keyring, &key->index_key, edit);
diff --git a/security/keys/persistent.c b/security/keys/persistent.c
index c9fae5e..2ef45b3 100644
--- a/security/keys/persistent.c
+++ b/security/keys/persistent.c
@@ -26,7 +26,7 @@
 					current_cred(),
 					((KEY_POS_ALL & ~KEY_POS_SETATTR) |
 					 KEY_USR_VIEW | KEY_USR_READ),
-					KEY_ALLOC_NOT_IN_QUOTA, NULL);
+					KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL);
 	if (IS_ERR(reg))
 		return PTR_ERR(reg);
 
@@ -60,7 +60,7 @@
 				   uid, INVALID_GID, current_cred(),
 				   ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
 				    KEY_USR_VIEW | KEY_USR_READ),
-				   KEY_ALLOC_NOT_IN_QUOTA,
+				   KEY_ALLOC_NOT_IN_QUOTA, NULL,
 				   ns->persistent_keyring_register);
 	if (IS_ERR(persistent))
 		return ERR_CAST(persistent);
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index e6d50172..40a8852 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -76,7 +76,8 @@
 		if (IS_ERR(uid_keyring)) {
 			uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID,
 						    cred, user_keyring_perm,
-						    KEY_ALLOC_IN_QUOTA, NULL);
+						    KEY_ALLOC_IN_QUOTA,
+						    NULL, NULL);
 			if (IS_ERR(uid_keyring)) {
 				ret = PTR_ERR(uid_keyring);
 				goto error;
@@ -92,7 +93,8 @@
 			session_keyring =
 				keyring_alloc(buf, user->uid, INVALID_GID,
 					      cred, user_keyring_perm,
-					      KEY_ALLOC_IN_QUOTA, NULL);
+					      KEY_ALLOC_IN_QUOTA,
+					      NULL, NULL);
 			if (IS_ERR(session_keyring)) {
 				ret = PTR_ERR(session_keyring);
 				goto error_release;
@@ -134,7 +136,8 @@
 
 	keyring = keyring_alloc("_tid", new->uid, new->gid, new,
 				KEY_POS_ALL | KEY_USR_VIEW,
-				KEY_ALLOC_QUOTA_OVERRUN, NULL);
+				KEY_ALLOC_QUOTA_OVERRUN,
+				NULL, NULL);
 	if (IS_ERR(keyring))
 		return PTR_ERR(keyring);
 
@@ -180,7 +183,8 @@
 
 	keyring = keyring_alloc("_pid", new->uid, new->gid, new,
 				KEY_POS_ALL | KEY_USR_VIEW,
-				KEY_ALLOC_QUOTA_OVERRUN, NULL);
+				KEY_ALLOC_QUOTA_OVERRUN,
+				NULL, NULL);
 	if (IS_ERR(keyring))
 		return PTR_ERR(keyring);
 
@@ -231,7 +235,7 @@
 
 		keyring = keyring_alloc("_ses", cred->uid, cred->gid, cred,
 					KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ,
-					flags, NULL);
+					flags, NULL, NULL);
 		if (IS_ERR(keyring))
 			return PTR_ERR(keyring);
 	} else {
@@ -785,7 +789,7 @@
 		keyring = keyring_alloc(
 			name, old->uid, old->gid, old,
 			KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_LINK,
-			KEY_ALLOC_IN_QUOTA, NULL);
+			KEY_ALLOC_IN_QUOTA, NULL, NULL);
 		if (IS_ERR(keyring)) {
 			ret = PTR_ERR(keyring);
 			goto error2;
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index c7a117c..a29e355 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -116,7 +116,7 @@
 	cred = get_current_cred();
 	keyring = keyring_alloc(desc, cred->fsuid, cred->fsgid, cred,
 				KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ,
-				KEY_ALLOC_QUOTA_OVERRUN, NULL);
+				KEY_ALLOC_QUOTA_OVERRUN, NULL, NULL);
 	put_cred(cred);
 	if (IS_ERR(keyring)) {
 		ret = PTR_ERR(keyring);
@@ -355,7 +355,7 @@
 
 	key = key_alloc(ctx->index_key.type, ctx->index_key.description,
 			ctx->cred->fsuid, ctx->cred->fsgid, ctx->cred,
-			perm, flags);
+			perm, flags, NULL);
 	if (IS_ERR(key))
 		goto alloc_failed;
 
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 4f0f112..9db8b4a 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -202,7 +202,7 @@
 	authkey = key_alloc(&key_type_request_key_auth, desc,
 			    cred->fsuid, cred->fsgid, cred,
 			    KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH |
-			    KEY_USR_VIEW, KEY_ALLOC_NOT_IN_QUOTA);
+			    KEY_USR_VIEW, KEY_ALLOC_NOT_IN_QUOTA, NULL);
 	if (IS_ERR(authkey)) {
 		ret = PTR_ERR(authkey);
 		goto error_alloc;
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 8705d79..66b1840 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -96,45 +96,25 @@
  */
 int user_update(struct key *key, struct key_preparsed_payload *prep)
 {
-	struct user_key_payload *upayload, *zap;
-	size_t datalen = prep->datalen;
+	struct user_key_payload *zap = NULL;
 	int ret;
 
-	ret = -EINVAL;
-	if (datalen <= 0 || datalen > 32767 || !prep->data)
-		goto error;
-
-	/* construct a replacement payload */
-	ret = -ENOMEM;
-	upayload = kmalloc(sizeof(*upayload) + datalen, GFP_KERNEL);
-	if (!upayload)
-		goto error;
-
-	upayload->datalen = datalen;
-	memcpy(upayload->data, prep->data, datalen);
-
 	/* check the quota and attach the new data */
-	zap = upayload;
+	ret = key_payload_reserve(key, prep->datalen);
+	if (ret < 0)
+		return ret;
 
-	ret = key_payload_reserve(key, datalen);
-
-	if (ret == 0) {
-		/* attach the new data, displacing the old */
-		if (!test_bit(KEY_FLAG_NEGATIVE, &key->flags))
-			zap = key->payload.data[0];
-		else
-			zap = NULL;
-		rcu_assign_keypointer(key, upayload);
-		key->expiry = 0;
-	}
+	/* attach the new data, displacing the old */
+	key->expiry = prep->expiry;
+	if (!test_bit(KEY_FLAG_NEGATIVE, &key->flags))
+		zap = rcu_dereference_key(key);
+	rcu_assign_keypointer(key, prep->payload.data[0]);
+	prep->payload.data[0] = NULL;
 
 	if (zap)
 		kfree_rcu(zap, rcu);
-
-error:
 	return ret;
 }
-
 EXPORT_SYMBOL_GPL(user_update);
 
 /*
diff --git a/security/loadpin/Kconfig b/security/loadpin/Kconfig
new file mode 100644
index 0000000..dd01aa9
--- /dev/null
+++ b/security/loadpin/Kconfig
@@ -0,0 +1,19 @@
+config SECURITY_LOADPIN
+	bool "Pin load of kernel files (modules, fw, etc) to one filesystem"
+	depends on SECURITY && BLOCK
+	help
+	  Any files read through the kernel file reading interface
+	  (kernel modules, firmware, kexec images, security policy)
+	  can be pinned to the first filesystem used for loading. When
+	  enabled, any files that come from other filesystems will be
+	  rejected. This is best used on systems without an initrd that
+	  have a root filesystem backed by a read-only device such as
+	  dm-verity or a CDROM.
+
+config SECURITY_LOADPIN_ENABLED
+	bool "Enforce LoadPin at boot"
+	depends on SECURITY_LOADPIN
+	help
+	  If selected, LoadPin will enforce pinning at boot. If not
+	  selected, it can be enabled at boot with the kernel parameter
+	  "loadpin.enabled=1".
diff --git a/security/loadpin/Makefile b/security/loadpin/Makefile
new file mode 100644
index 0000000..c2d77f8
--- /dev/null
+++ b/security/loadpin/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SECURITY_LOADPIN) += loadpin.o
diff --git a/security/loadpin/loadpin.c b/security/loadpin/loadpin.c
new file mode 100644
index 0000000..89a46f1
--- /dev/null
+++ b/security/loadpin/loadpin.c
@@ -0,0 +1,190 @@
+/*
+ * Module and Firmware Pinning Security Module
+ *
+ * Copyright 2011-2016 Google Inc.
+ *
+ * Author: Kees Cook <keescook@chromium.org>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "LoadPin: " fmt
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/fs_struct.h>
+#include <linux/lsm_hooks.h>
+#include <linux/mount.h>
+#include <linux/path.h>
+#include <linux/sched.h>	/* current */
+#include <linux/string_helpers.h>
+
+static void report_load(const char *origin, struct file *file, char *operation)
+{
+	char *cmdline, *pathname;
+
+	pathname = kstrdup_quotable_file(file, GFP_KERNEL);
+	cmdline = kstrdup_quotable_cmdline(current, GFP_KERNEL);
+
+	pr_notice("%s %s obj=%s%s%s pid=%d cmdline=%s%s%s\n",
+		  origin, operation,
+		  (pathname && pathname[0] != '<') ? "\"" : "",
+		  pathname,
+		  (pathname && pathname[0] != '<') ? "\"" : "",
+		  task_pid_nr(current),
+		  cmdline ? "\"" : "", cmdline, cmdline ? "\"" : "");
+
+	kfree(cmdline);
+	kfree(pathname);
+}
+
+static int enabled = IS_ENABLED(CONFIG_SECURITY_LOADPIN_ENABLED);
+static struct super_block *pinned_root;
+static DEFINE_SPINLOCK(pinned_root_spinlock);
+
+#ifdef CONFIG_SYSCTL
+static int zero;
+static int one = 1;
+
+static struct ctl_path loadpin_sysctl_path[] = {
+	{ .procname = "kernel", },
+	{ .procname = "loadpin", },
+	{ }
+};
+
+static struct ctl_table loadpin_sysctl_table[] = {
+	{
+		.procname       = "enabled",
+		.data           = &enabled,
+		.maxlen         = sizeof(int),
+		.mode           = 0644,
+		.proc_handler   = proc_dointvec_minmax,
+		.extra1         = &zero,
+		.extra2         = &one,
+	},
+	{ }
+};
+
+/*
+ * This must be called after early kernel init, since then the rootdev
+ * is available.
+ */
+static void check_pinning_enforcement(struct super_block *mnt_sb)
+{
+	bool ro = false;
+
+	/*
+	 * If load pinning is not enforced via a read-only block
+	 * device, allow sysctl to change modes for testing.
+	 */
+	if (mnt_sb->s_bdev) {
+		ro = bdev_read_only(mnt_sb->s_bdev);
+		pr_info("dev(%u,%u): %s\n",
+			MAJOR(mnt_sb->s_bdev->bd_dev),
+			MINOR(mnt_sb->s_bdev->bd_dev),
+			ro ? "read-only" : "writable");
+	} else
+		pr_info("mnt_sb lacks block device, treating as: writable\n");
+
+	if (!ro) {
+		if (!register_sysctl_paths(loadpin_sysctl_path,
+					   loadpin_sysctl_table))
+			pr_notice("sysctl registration failed!\n");
+		else
+			pr_info("load pinning can be disabled.\n");
+	} else
+		pr_info("load pinning engaged.\n");
+}
+#else
+static void check_pinning_enforcement(struct super_block *mnt_sb)
+{
+	pr_info("load pinning engaged.\n");
+}
+#endif
+
+static void loadpin_sb_free_security(struct super_block *mnt_sb)
+{
+	/*
+	 * When unmounting the filesystem we were using for load
+	 * pinning, we acknowledge the superblock release, but make sure
+	 * no other modules or firmware can be loaded.
+	 */
+	if (!IS_ERR_OR_NULL(pinned_root) && mnt_sb == pinned_root) {
+		pinned_root = ERR_PTR(-EIO);
+		pr_info("umount pinned fs: refusing further loads\n");
+	}
+}
+
+static int loadpin_read_file(struct file *file, enum kernel_read_file_id id)
+{
+	struct super_block *load_root;
+	const char *origin = kernel_read_file_id_str(id);
+
+	/* This handles the older init_module API that has a NULL file. */
+	if (!file) {
+		if (!enabled) {
+			report_load(origin, NULL, "old-api-pinning-ignored");
+			return 0;
+		}
+
+		report_load(origin, NULL, "old-api-denied");
+		return -EPERM;
+	}
+
+	load_root = file->f_path.mnt->mnt_sb;
+
+	/* First loaded module/firmware defines the root for all others. */
+	spin_lock(&pinned_root_spinlock);
+	/*
+	 * pinned_root is only NULL at startup. Otherwise, it is either
+	 * a valid reference, or an ERR_PTR.
+	 */
+	if (!pinned_root) {
+		pinned_root = load_root;
+		/*
+		 * Unlock now since it's only pinned_root we care about.
+		 * In the worst case, we will (correctly) report pinning
+		 * failures before we have announced that pinning is
+		 * enabled. This would be purely cosmetic.
+		 */
+		spin_unlock(&pinned_root_spinlock);
+		check_pinning_enforcement(pinned_root);
+		report_load(origin, file, "pinned");
+	} else {
+		spin_unlock(&pinned_root_spinlock);
+	}
+
+	if (IS_ERR_OR_NULL(pinned_root) || load_root != pinned_root) {
+		if (unlikely(!enabled)) {
+			report_load(origin, file, "pinning-ignored");
+			return 0;
+		}
+
+		report_load(origin, file, "denied");
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+static struct security_hook_list loadpin_hooks[] = {
+	LSM_HOOK_INIT(sb_free_security, loadpin_sb_free_security),
+	LSM_HOOK_INIT(kernel_read_file, loadpin_read_file),
+};
+
+void __init loadpin_add_hooks(void)
+{
+	pr_info("ready to pin (currently %sabled)", enabled ? "en" : "dis");
+	security_add_hooks(loadpin_hooks, ARRAY_SIZE(loadpin_hooks));
+}
+
+/* Should not be mutable after boot, so not listed in sysfs (perm == 0). */
+module_param(enabled, int, 0);
+MODULE_PARM_DESC(enabled, "Pin module/firmware loading (default: true)");
diff --git a/security/security.c b/security/security.c
index 3644b034..7095693 100644
--- a/security/security.c
+++ b/security/security.c
@@ -60,6 +60,7 @@
 	 */
 	capability_add_hooks();
 	yama_add_hooks();
+	loadpin_add_hooks();
 
 	/*
 	 * Load all the remaining security modules.
@@ -208,7 +209,7 @@
 	return call_int_hook(syslog, 0, type);
 }
 
-int security_settime(const struct timespec *ts, const struct timezone *tz)
+int security_settime64(const struct timespec64 *ts, const struct timezone *tz)
 {
 	return call_int_hook(settime, 0, ts, tz);
 }
@@ -302,7 +303,7 @@
 	return call_int_hook(sb_statfs, 0, dentry);
 }
 
-int security_sb_mount(const char *dev_name, struct path *path,
+int security_sb_mount(const char *dev_name, const struct path *path,
                        const char *type, unsigned long flags, void *data)
 {
 	return call_int_hook(sb_mount, 0, dev_name, path, type, flags, data);
@@ -313,7 +314,7 @@
 	return call_int_hook(sb_umount, 0, mnt, flags);
 }
 
-int security_sb_pivotroot(struct path *old_path, struct path *new_path)
+int security_sb_pivotroot(const struct path *old_path, const struct path *new_path)
 {
 	return call_int_hook(sb_pivotroot, 0, old_path, new_path);
 }
@@ -410,7 +411,7 @@
 EXPORT_SYMBOL(security_old_inode_init_security);
 
 #ifdef CONFIG_SECURITY_PATH
-int security_path_mknod(struct path *dir, struct dentry *dentry, umode_t mode,
+int security_path_mknod(const struct path *dir, struct dentry *dentry, umode_t mode,
 			unsigned int dev)
 {
 	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
@@ -419,7 +420,7 @@
 }
 EXPORT_SYMBOL(security_path_mknod);
 
-int security_path_mkdir(struct path *dir, struct dentry *dentry, umode_t mode)
+int security_path_mkdir(const struct path *dir, struct dentry *dentry, umode_t mode)
 {
 	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
 		return 0;
@@ -427,14 +428,14 @@
 }
 EXPORT_SYMBOL(security_path_mkdir);
 
-int security_path_rmdir(struct path *dir, struct dentry *dentry)
+int security_path_rmdir(const struct path *dir, struct dentry *dentry)
 {
 	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
 		return 0;
 	return call_int_hook(path_rmdir, 0, dir, dentry);
 }
 
-int security_path_unlink(struct path *dir, struct dentry *dentry)
+int security_path_unlink(const struct path *dir, struct dentry *dentry)
 {
 	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
 		return 0;
@@ -442,7 +443,7 @@
 }
 EXPORT_SYMBOL(security_path_unlink);
 
-int security_path_symlink(struct path *dir, struct dentry *dentry,
+int security_path_symlink(const struct path *dir, struct dentry *dentry,
 			  const char *old_name)
 {
 	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
@@ -450,7 +451,7 @@
 	return call_int_hook(path_symlink, 0, dir, dentry, old_name);
 }
 
-int security_path_link(struct dentry *old_dentry, struct path *new_dir,
+int security_path_link(struct dentry *old_dentry, const struct path *new_dir,
 		       struct dentry *new_dentry)
 {
 	if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry))))
@@ -458,8 +459,8 @@
 	return call_int_hook(path_link, 0, old_dentry, new_dir, new_dentry);
 }
 
-int security_path_rename(struct path *old_dir, struct dentry *old_dentry,
-			 struct path *new_dir, struct dentry *new_dentry,
+int security_path_rename(const struct path *old_dir, struct dentry *old_dentry,
+			 const struct path *new_dir, struct dentry *new_dentry,
 			 unsigned int flags)
 {
 	if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)) ||
@@ -478,28 +479,28 @@
 }
 EXPORT_SYMBOL(security_path_rename);
 
-int security_path_truncate(struct path *path)
+int security_path_truncate(const struct path *path)
 {
 	if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
 		return 0;
 	return call_int_hook(path_truncate, 0, path);
 }
 
-int security_path_chmod(struct path *path, umode_t mode)
+int security_path_chmod(const struct path *path, umode_t mode)
 {
 	if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
 		return 0;
 	return call_int_hook(path_chmod, 0, path, mode);
 }
 
-int security_path_chown(struct path *path, kuid_t uid, kgid_t gid)
+int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
 {
 	if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
 		return 0;
 	return call_int_hook(path_chown, 0, path, uid, gid);
 }
 
-int security_path_chroot(struct path *path)
+int security_path_chroot(const struct path *path)
 {
 	return call_int_hook(path_chroot, 0, path);
 }
@@ -1848,7 +1849,6 @@
 	.tun_dev_attach =
 		LIST_HEAD_INIT(security_hook_heads.tun_dev_attach),
 	.tun_dev_open =	LIST_HEAD_INIT(security_hook_heads.tun_dev_open),
-	.skb_owned_by =	LIST_HEAD_INIT(security_hook_heads.skb_owned_by),
 #endif	/* CONFIG_SECURITY_NETWORK */
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
 	.xfrm_policy_alloc_security =
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 912deee..a86d537 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -259,7 +259,7 @@
 
 	might_sleep_if(may_sleep);
 
-	if (isec->initialized == LABEL_INVALID) {
+	if (ss_initialized && isec->initialized != LABEL_INITIALIZED) {
 		if (!may_sleep)
 			return -ECHILD;
 
@@ -297,6 +297,13 @@
 	return inode->i_security;
 }
 
+static struct inode_security_struct *backing_inode_security_novalidate(struct dentry *dentry)
+{
+	struct inode *inode = d_backing_inode(dentry);
+
+	return inode->i_security;
+}
+
 /*
  * Get the security label of a dentry's backing inode.
  */
@@ -506,7 +513,8 @@
 			rc = -EOPNOTSUPP;
 			goto out;
 		}
-		rc = root_inode->i_op->getxattr(root, XATTR_NAME_SELINUX, NULL, 0);
+		rc = root_inode->i_op->getxattr(root, root_inode,
+						XATTR_NAME_SELINUX, NULL, 0);
 		if (rc < 0 && rc != -ENODATA) {
 			if (rc == -EOPNOTSUPP)
 				printk(KERN_WARNING "SELinux: (dev %s, type "
@@ -686,7 +694,7 @@
 	struct superblock_security_struct *sbsec = sb->s_security;
 	const char *name = sb->s_type->name;
 	struct dentry *root = sbsec->sb->s_root;
-	struct inode_security_struct *root_isec = backing_inode_security(root);
+	struct inode_security_struct *root_isec;
 	u32 fscontext_sid = 0, context_sid = 0, rootcontext_sid = 0;
 	u32 defcontext_sid = 0;
 	char **mount_options = opts->mnt_opts;
@@ -729,6 +737,8 @@
 	    && (num_opts == 0))
 		goto out;
 
+	root_isec = backing_inode_security_novalidate(root);
+
 	/*
 	 * parse the mount options, check if they are valid sids.
 	 * also check if someone is trying to mount the same sb more
@@ -1316,7 +1326,7 @@
 				 u32 *sid)
 {
 	int rc;
-	struct super_block *sb = dentry->d_inode->i_sb;
+	struct super_block *sb = dentry->d_sb;
 	char *buffer, *path;
 
 	buffer = (char *)__get_free_page(GFP_KERNEL);
@@ -1412,13 +1422,13 @@
 			goto out_unlock;
 		}
 		context[len] = '\0';
-		rc = inode->i_op->getxattr(dentry, XATTR_NAME_SELINUX,
+		rc = inode->i_op->getxattr(dentry, inode, XATTR_NAME_SELINUX,
 					   context, len);
 		if (rc == -ERANGE) {
 			kfree(context);
 
 			/* Need a larger buffer.  Query for the right size. */
-			rc = inode->i_op->getxattr(dentry, XATTR_NAME_SELINUX,
+			rc = inode->i_op->getxattr(dentry, inode, XATTR_NAME_SELINUX,
 						   NULL, 0);
 			if (rc < 0) {
 				dput(dentry);
@@ -1432,7 +1442,7 @@
 				goto out_unlock;
 			}
 			context[len] = '\0';
-			rc = inode->i_op->getxattr(dentry,
+			rc = inode->i_op->getxattr(dentry, inode,
 						   XATTR_NAME_SELINUX,
 						   context, len);
 		}
@@ -1622,7 +1632,7 @@
 
 /* Check whether a task is allowed to use a capability. */
 static int cred_has_capability(const struct cred *cred,
-			       int cap, int audit)
+			       int cap, int audit, bool initns)
 {
 	struct common_audit_data ad;
 	struct av_decision avd;
@@ -1636,10 +1646,10 @@
 
 	switch (CAP_TO_INDEX(cap)) {
 	case 0:
-		sclass = SECCLASS_CAPABILITY;
+		sclass = initns ? SECCLASS_CAPABILITY : SECCLASS_CAP_USERNS;
 		break;
 	case 1:
-		sclass = SECCLASS_CAPABILITY2;
+		sclass = initns ? SECCLASS_CAPABILITY2 : SECCLASS_CAP2_USERNS;
 		break;
 	default:
 		printk(KERN_ERR
@@ -1781,7 +1791,6 @@
 					 u32 *_new_isid)
 {
 	const struct superblock_security_struct *sbsec = dir->i_sb->s_security;
-	const struct inode_security_struct *dsec = inode_security(dir);
 	const struct task_security_struct *tsec = current_security();
 
 	if ((sbsec->flags & SE_SBINITIALIZED) &&
@@ -1791,6 +1800,7 @@
 		   tsec->create_sid) {
 		*_new_isid = tsec->create_sid;
 	} else {
+		const struct inode_security_struct *dsec = inode_security(dir);
 		return security_transition_sid(tsec->sid, dsec->sid, tclass,
 					       name, _new_isid);
 	}
@@ -2075,7 +2085,7 @@
 	u32 sid = task_sid(to);
 	struct file_security_struct *fsec = file->f_security;
 	struct dentry *dentry = file->f_path.dentry;
-	struct inode_security_struct *isec = backing_inode_security(dentry);
+	struct inode_security_struct *isec;
 	struct common_audit_data ad;
 	int rc;
 
@@ -2094,6 +2104,7 @@
 	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
 		return 0;
 
+	isec = backing_inode_security(dentry);
 	return avc_has_perm(sid, isec->sid, isec->sclass, file_to_av(file),
 			    &ad);
 }
@@ -2142,7 +2153,7 @@
 static int selinux_capable(const struct cred *cred, struct user_namespace *ns,
 			   int cap, int audit)
 {
-	return cred_has_capability(cred, cap, audit);
+	return cred_has_capability(cred, cap, audit, ns == &init_user_ns);
 }
 
 static int selinux_quotactl(int cmds, int type, int id, struct super_block *sb)
@@ -2220,7 +2231,7 @@
 	int rc, cap_sys_admin = 0;
 
 	rc = cred_has_capability(current_cred(), CAP_SYS_ADMIN,
-					SECURITY_CAP_NOAUDIT);
+				 SECURITY_CAP_NOAUDIT, true);
 	if (rc == 0)
 		cap_sys_admin = 1;
 
@@ -2229,6 +2240,20 @@
 
 /* binprm security operations */
 
+static u32 ptrace_parent_sid(struct task_struct *task)
+{
+	u32 sid = 0;
+	struct task_struct *tracer;
+
+	rcu_read_lock();
+	tracer = ptrace_parent(task);
+	if (tracer)
+		sid = task_sid(tracer);
+	rcu_read_unlock();
+
+	return sid;
+}
+
 static int check_nnp_nosuid(const struct linux_binprm *bprm,
 			    const struct task_security_struct *old_tsec,
 			    const struct task_security_struct *new_tsec)
@@ -2350,18 +2375,7 @@
 		 * changes its SID has the appropriate permit */
 		if (bprm->unsafe &
 		    (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) {
-			struct task_struct *tracer;
-			struct task_security_struct *sec;
-			u32 ptsid = 0;
-
-			rcu_read_lock();
-			tracer = ptrace_parent(current);
-			if (likely(tracer != NULL)) {
-				sec = __task_cred(tracer)->security;
-				ptsid = sec->sid;
-			}
-			rcu_read_unlock();
-
+			u32 ptsid = ptrace_parent_sid(current);
 			if (ptsid != 0) {
 				rc = avc_has_perm(ptsid, new_tsec->sid,
 						  SECCLASS_PROCESS,
@@ -2760,7 +2774,7 @@
 }
 
 static int selinux_mount(const char *dev_name,
-			 struct path *path,
+			 const struct path *path,
 			 const char *type,
 			 unsigned long flags,
 			 void *data)
@@ -3045,7 +3059,7 @@
 				  const void *value, size_t size, int flags)
 {
 	struct inode *inode = d_backing_inode(dentry);
-	struct inode_security_struct *isec = backing_inode_security(dentry);
+	struct inode_security_struct *isec;
 	struct superblock_security_struct *sbsec;
 	struct common_audit_data ad;
 	u32 newsid, sid = current_sid();
@@ -3064,6 +3078,7 @@
 	ad.type = LSM_AUDIT_DATA_DENTRY;
 	ad.u.dentry = dentry;
 
+	isec = backing_inode_security(dentry);
 	rc = avc_has_perm(sid, isec->sid, isec->sclass,
 			  FILE__RELABELFROM, &ad);
 	if (rc)
@@ -3122,7 +3137,7 @@
 					int flags)
 {
 	struct inode *inode = d_backing_inode(dentry);
-	struct inode_security_struct *isec = backing_inode_security(dentry);
+	struct inode_security_struct *isec;
 	u32 newsid;
 	int rc;
 
@@ -3139,6 +3154,7 @@
 		return;
 	}
 
+	isec = backing_inode_security(dentry);
 	isec->sclass = inode_mode_to_security_class(inode->i_mode);
 	isec->sid = newsid;
 	isec->initialized = LABEL_INITIALIZED;
@@ -3180,7 +3196,7 @@
 	u32 size;
 	int error;
 	char *context = NULL;
-	struct inode_security_struct *isec = inode_security(inode);
+	struct inode_security_struct *isec;
 
 	if (strcmp(name, XATTR_SELINUX_SUFFIX))
 		return -EOPNOTSUPP;
@@ -3198,7 +3214,8 @@
 			    SECURITY_CAP_NOAUDIT);
 	if (!error)
 		error = cred_has_capability(current_cred(), CAP_MAC_ADMIN,
-					    SECURITY_CAP_NOAUDIT);
+					    SECURITY_CAP_NOAUDIT, true);
+	isec = inode_security(inode);
 	if (!error)
 		error = security_sid_to_context_force(isec->sid, &context,
 						      &size);
@@ -3219,7 +3236,7 @@
 static int selinux_inode_setsecurity(struct inode *inode, const char *name,
 				     const void *value, size_t size, int flags)
 {
-	struct inode_security_struct *isec = inode_security(inode);
+	struct inode_security_struct *isec = inode_security_novalidate(inode);
 	u32 newsid;
 	int rc;
 
@@ -3308,7 +3325,7 @@
 	struct common_audit_data ad;
 	struct file_security_struct *fsec = file->f_security;
 	struct inode *inode = file_inode(file);
-	struct inode_security_struct *isec = inode_security(inode);
+	struct inode_security_struct *isec;
 	struct lsm_ioctlop_audit ioctl;
 	u32 ssid = cred_sid(cred);
 	int rc;
@@ -3332,6 +3349,7 @@
 	if (unlikely(IS_PRIVATE(inode)))
 		return 0;
 
+	isec = inode_security(inode);
 	rc = avc_has_extended_perms(ssid, isec->sid, isec->sclass,
 			requested, driver, xperm, &ad);
 out:
@@ -3373,7 +3391,7 @@
 	case KDSKBENT:
 	case KDSKBSENT:
 		error = cred_has_capability(cred, CAP_SYS_TTY_CONFIG,
-					    SECURITY_CAP_AUDIT);
+					    SECURITY_CAP_AUDIT, true);
 		break;
 
 	/* default case assumes that the command will go
@@ -3462,8 +3480,9 @@
 		    vma->vm_end <= vma->vm_mm->brk) {
 			rc = cred_has_perm(cred, cred, PROCESS__EXECHEAP);
 		} else if (!vma->vm_file &&
-			   vma->vm_start <= vma->vm_mm->start_stack &&
-			   vma->vm_end >= vma->vm_mm->start_stack) {
+			   ((vma->vm_start <= vma->vm_mm->start_stack &&
+			     vma->vm_end >= vma->vm_mm->start_stack) ||
+			    vma_is_stack_for_task(vma, current))) {
 			rc = current_has_perm(current, PROCESS__EXECSTACK);
 		} else if (vma->vm_file && vma->anon_vma) {
 			/*
@@ -3719,6 +3738,52 @@
 			    SYSTEM__MODULE_REQUEST, &ad);
 }
 
+static int selinux_kernel_module_from_file(struct file *file)
+{
+	struct common_audit_data ad;
+	struct inode_security_struct *isec;
+	struct file_security_struct *fsec;
+	u32 sid = current_sid();
+	int rc;
+
+	/* init_module */
+	if (file == NULL)
+		return avc_has_perm(sid, sid, SECCLASS_SYSTEM,
+					SYSTEM__MODULE_LOAD, NULL);
+
+	/* finit_module */
+
+	ad.type = LSM_AUDIT_DATA_PATH;
+	ad.u.path = file->f_path;
+
+	fsec = file->f_security;
+	if (sid != fsec->sid) {
+		rc = avc_has_perm(sid, fsec->sid, SECCLASS_FD, FD__USE, &ad);
+		if (rc)
+			return rc;
+	}
+
+	isec = inode_security(file_inode(file));
+	return avc_has_perm(sid, isec->sid, SECCLASS_SYSTEM,
+				SYSTEM__MODULE_LOAD, &ad);
+}
+
+static int selinux_kernel_read_file(struct file *file,
+				    enum kernel_read_file_id id)
+{
+	int rc = 0;
+
+	switch (id) {
+	case READING_MODULE:
+		rc = selinux_kernel_module_from_file(file);
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
 static int selinux_task_setpgid(struct task_struct *p, pid_t pgid)
 {
 	return current_has_perm(p, PROCESS__SETPGID);
@@ -4598,6 +4663,7 @@
 {
 	u32 peer_secid = SECSID_NULL;
 	u16 family;
+	struct inode_security_struct *isec;
 
 	if (skb && skb->protocol == htons(ETH_P_IP))
 		family = PF_INET;
@@ -4608,9 +4674,10 @@
 	else
 		goto out;
 
-	if (sock && family == PF_UNIX)
-		selinux_inode_getsecid(SOCK_INODE(sock), &peer_secid);
-	else if (skb)
+	if (sock && family == PF_UNIX) {
+		isec = inode_security_novalidate(SOCK_INODE(sock));
+		peer_secid = isec->sid;
+	} else if (skb)
 		selinux_skb_peerlbl_sid(skb, family, &peer_secid);
 
 out:
@@ -5675,7 +5742,6 @@
 			       char *name, void *value, size_t size)
 {
 	struct task_security_struct *tsec;
-	struct task_struct *tracer;
 	struct cred *new;
 	u32 sid = 0, ptsid;
 	int error;
@@ -5782,14 +5848,8 @@
 
 		/* Check for ptracing, and update the task SID if ok.
 		   Otherwise, leave SID unchanged and fail. */
-		ptsid = 0;
-		rcu_read_lock();
-		tracer = ptrace_parent(p);
-		if (tracer)
-			ptsid = task_sid(tracer);
-		rcu_read_unlock();
-
-		if (tracer) {
+		ptsid = ptrace_parent_sid(p);
+		if (ptsid != 0) {
 			error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS,
 					     PROCESS__PTRACE, NULL);
 			if (error)
@@ -6020,6 +6080,7 @@
 	LSM_HOOK_INIT(kernel_act_as, selinux_kernel_act_as),
 	LSM_HOOK_INIT(kernel_create_files_as, selinux_kernel_create_files_as),
 	LSM_HOOK_INIT(kernel_module_request, selinux_kernel_module_request),
+	LSM_HOOK_INIT(kernel_read_file, selinux_kernel_read_file),
 	LSM_HOOK_INIT(task_setpgid, selinux_task_setpgid),
 	LSM_HOOK_INIT(task_getpgid, selinux_task_getpgid),
 	LSM_HOOK_INIT(task_getsid, selinux_task_getsid),
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index ef83c4b..1f1f4b2 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -12,6 +12,18 @@
 #define COMMON_IPC_PERMS "create", "destroy", "getattr", "setattr", "read", \
 	    "write", "associate", "unix_read", "unix_write"
 
+#define COMMON_CAP_PERMS  "chown", "dac_override", "dac_read_search", \
+	    "fowner", "fsetid", "kill", "setgid", "setuid", "setpcap", \
+	    "linux_immutable", "net_bind_service", "net_broadcast", \
+	    "net_admin", "net_raw", "ipc_lock", "ipc_owner", "sys_module", \
+	    "sys_rawio", "sys_chroot", "sys_ptrace", "sys_pacct", "sys_admin", \
+	    "sys_boot", "sys_nice", "sys_resource", "sys_time", \
+	    "sys_tty_config", "mknod", "lease", "audit_write", \
+	    "audit_control", "setfcap"
+
+#define COMMON_CAP2_PERMS  "mac_override", "mac_admin", "syslog", \
+		"wake_alarm", "block_suspend", "audit_read"
+
 /*
  * Note: The name for any socket class should be suffixed by "socket",
  *	 and doesn't contain more than one substr of "socket".
@@ -32,16 +44,9 @@
 	    "setsockcreate", NULL } },
 	{ "system",
 	  { "ipc_info", "syslog_read", "syslog_mod",
-	    "syslog_console", "module_request", NULL } },
+	    "syslog_console", "module_request", "module_load", NULL } },
 	{ "capability",
-	  { "chown", "dac_override", "dac_read_search",
-	    "fowner", "fsetid", "kill", "setgid", "setuid", "setpcap",
-	    "linux_immutable", "net_bind_service", "net_broadcast",
-	    "net_admin", "net_raw", "ipc_lock", "ipc_owner", "sys_module",
-	    "sys_rawio", "sys_chroot", "sys_ptrace", "sys_pacct", "sys_admin",
-	    "sys_boot", "sys_nice", "sys_resource", "sys_time",
-	    "sys_tty_config", "mknod", "lease", "audit_write",
-	    "audit_control", "setfcap", NULL } },
+	  { COMMON_CAP_PERMS, NULL } },
 	{ "filesystem",
 	  { "mount", "remount", "unmount", "getattr",
 	    "relabelfrom", "relabelto", "associate", "quotamod",
@@ -150,12 +155,15 @@
 	{ "memprotect", { "mmap_zero", NULL } },
 	{ "peer", { "recv", NULL } },
 	{ "capability2",
-	  { "mac_override", "mac_admin", "syslog", "wake_alarm", "block_suspend",
-	    "audit_read", NULL } },
+	  { COMMON_CAP2_PERMS, NULL } },
 	{ "kernel_service", { "use_as_override", "create_files_as", NULL } },
 	{ "tun_socket",
 	  { COMMON_SOCK_PERMS, "attach_queue", NULL } },
 	{ "binder", { "impersonate", "call", "set_context_mgr", "transfer",
 		      NULL } },
+	{ "cap_userns",
+	  { COMMON_CAP_PERMS, NULL } },
+	{ "cap2_userns",
+	  { COMMON_CAP2_PERMS, NULL } },
 	{ NULL }
   };
diff --git a/security/selinux/include/conditional.h b/security/selinux/include/conditional.h
index 67ce7a8..ff4fddc 100644
--- a/security/selinux/include/conditional.h
+++ b/security/selinux/include/conditional.h
@@ -17,6 +17,6 @@
 
 int security_set_bools(int len, int *values);
 
-int security_get_bool_value(int bool);
+int security_get_bool_value(int index);
 
 #endif
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index a2ae054..c21e135 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -38,9 +38,8 @@
 };
 
 enum label_initialized {
-	LABEL_MISSING,		/* not initialized */
-	LABEL_INITIALIZED,	/* inizialized */
-	LABEL_INVALID		/* invalid */
+	LABEL_INVALID,		/* invalid or not initialized */
+	LABEL_INITIALIZED	/* initialized */
 };
 
 struct inode_security_struct {
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 8495b93..2ca9cde 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -76,6 +76,8 @@
 	{ RTM_NEWNSID,		NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
 	{ RTM_DELNSID,		NETLINK_ROUTE_SOCKET__NLMSG_READ  },
 	{ RTM_GETNSID,		NETLINK_ROUTE_SOCKET__NLMSG_READ  },
+	{ RTM_NEWSTATS,		NETLINK_ROUTE_SOCKET__NLMSG_READ },
+	{ RTM_GETSTATS,		NETLINK_ROUTE_SOCKET__NLMSG_READ  },
 };
 
 static struct nlmsg_perm nlmsg_tcpdiag_perms[] =
@@ -155,7 +157,7 @@
 	switch (sclass) {
 	case SECCLASS_NETLINK_ROUTE_SOCKET:
 		/* RTM_MAX always point to RTM_SETxxxx, ie RTM_NEWxxx + 3 */
-		BUILD_BUG_ON(RTM_MAX != (RTM_NEWNSID + 3));
+		BUILD_BUG_ON(RTM_MAX != (RTM_NEWSTATS + 3));
 		err = nlmsg_perm(nlmsg_type, perm, nlmsg_route_perms,
 				 sizeof(nlmsg_route_perms));
 		break;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index ebda973..89df646 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -2696,7 +2696,7 @@
 	return rc;
 }
 
-int security_get_bool_value(int bool)
+int security_get_bool_value(int index)
 {
 	int rc;
 	int len;
@@ -2705,10 +2705,10 @@
 
 	rc = -EFAULT;
 	len = policydb.p_bools.nprim;
-	if (bool >= len)
+	if (index >= len)
 		goto out;
 
-	rc = policydb.bool_val_to_struct[bool]->state;
+	rc = policydb.bool_val_to_struct[index]->state;
 out:
 	read_unlock(&policy_rwlock);
 	return rc;
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 11f7901..ff2b8c3 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -272,7 +272,7 @@
 	if (buffer == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	rc = ip->i_op->getxattr(dp, name, buffer, SMK_LONGLABEL);
+	rc = ip->i_op->getxattr(dp, ip, name, buffer, SMK_LONGLABEL);
 	if (rc < 0)
 		skp = ERR_PTR(rc);
 	else if (rc == 0)
@@ -1444,7 +1444,7 @@
 	 *	XATTR_NAME_SMACKIPOUT
 	 */
 	if (strcmp(name, XATTR_NAME_SMACK) == 0) {
-		struct super_block *sbp = d_backing_inode(dentry)->i_sb;
+		struct super_block *sbp = dentry->d_sb;
 		struct superblock_smack *sbsp = sbp->s_security;
 
 		isp->smk_inode = sbsp->smk_default;
@@ -3519,7 +3519,7 @@
 					TRANS_TRUE, TRANS_TRUE_SIZE,
 					0);
 			} else {
-				rc = inode->i_op->getxattr(dp,
+				rc = inode->i_op->getxattr(dp, inode,
 					XATTR_NAME_SMACKTRANSMUTE, trattr,
 					TRANS_TRUE_SIZE);
 				if (rc >= 0 && strncmp(trattr, TRANS_TRUE,
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index f9c9fb1..361e7a2 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -957,7 +957,7 @@
 const struct tomoyo_path_info *tomoyo_path_matches_group
 (const struct tomoyo_path_info *pathname, const struct tomoyo_group *group);
 int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
-				 struct path *path, const int flag);
+				 const struct path *path, const int flag);
 void tomoyo_close_control(struct tomoyo_io_buffer *head);
 int tomoyo_env_perm(struct tomoyo_request_info *r, const char *env);
 int tomoyo_execute_permission(struct tomoyo_request_info *r,
@@ -968,15 +968,15 @@
 int tomoyo_init_request_info(struct tomoyo_request_info *r,
 			     struct tomoyo_domain_info *domain,
 			     const u8 index);
-int tomoyo_mkdev_perm(const u8 operation, struct path *path,
+int tomoyo_mkdev_perm(const u8 operation, const struct path *path,
 		      const unsigned int mode, unsigned int dev);
-int tomoyo_mount_permission(const char *dev_name, struct path *path,
+int tomoyo_mount_permission(const char *dev_name, const struct path *path,
 			    const char *type, unsigned long flags,
 			    void *data_page);
 int tomoyo_open_control(const u8 type, struct file *file);
-int tomoyo_path2_perm(const u8 operation, struct path *path1,
-		      struct path *path2);
-int tomoyo_path_number_perm(const u8 operation, struct path *path,
+int tomoyo_path2_perm(const u8 operation, const struct path *path1,
+		      const struct path *path2);
+int tomoyo_path_number_perm(const u8 operation, const struct path *path,
 			    unsigned long number);
 int tomoyo_path_perm(const u8 operation, const struct path *path,
 		     const char *target);
diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c
index 2367b10..7041a58 100644
--- a/security/tomoyo/file.c
+++ b/security/tomoyo/file.c
@@ -687,7 +687,7 @@
  *
  * Returns 0 on success, negative value otherwise.
  */
-int tomoyo_path_number_perm(const u8 type, struct path *path,
+int tomoyo_path_number_perm(const u8 type, const struct path *path,
 			    unsigned long number)
 {
 	struct tomoyo_request_info r;
@@ -733,7 +733,7 @@
  * Returns 0 on success, negative value otherwise.
  */
 int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
-				 struct path *path, const int flag)
+				 const struct path *path, const int flag)
 {
 	const u8 acc_mode = ACC_MODE(flag);
 	int error = 0;
@@ -838,7 +838,7 @@
  *
  * Returns 0 on success, negative value otherwise.
  */
-int tomoyo_mkdev_perm(const u8 operation, struct path *path,
+int tomoyo_mkdev_perm(const u8 operation, const struct path *path,
 		      const unsigned int mode, unsigned int dev)
 {
 	struct tomoyo_request_info r;
@@ -882,8 +882,8 @@
  *
  * Returns 0 on success, negative value otherwise.
  */
-int tomoyo_path2_perm(const u8 operation, struct path *path1,
-		      struct path *path2)
+int tomoyo_path2_perm(const u8 operation, const struct path *path1,
+		      const struct path *path2)
 {
 	int error = -ENOMEM;
 	struct tomoyo_path_info buf1;
diff --git a/security/tomoyo/mount.c b/security/tomoyo/mount.c
index 390c646..14b53fb 100644
--- a/security/tomoyo/mount.c
+++ b/security/tomoyo/mount.c
@@ -73,7 +73,7 @@
  */
 static int tomoyo_mount_acl(struct tomoyo_request_info *r,
 			    const char *dev_name,
-			    struct path *dir, const char *type,
+			    const struct path *dir, const char *type,
 			    unsigned long flags)
 {
 	struct tomoyo_obj_info obj = { };
@@ -184,7 +184,7 @@
  *
  * Returns 0 on success, negative value otherwise.
  */
-int tomoyo_mount_permission(const char *dev_name, struct path *path,
+int tomoyo_mount_permission(const char *dev_name, const struct path *path,
 			    const char *type, unsigned long flags,
 			    void *data_page)
 {
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index cbf3df4..75c9987 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -150,7 +150,7 @@
  *
  * Returns 0 on success, negative value otherwise.
  */
-static int tomoyo_path_truncate(struct path *path)
+static int tomoyo_path_truncate(const struct path *path)
 {
 	return tomoyo_path_perm(TOMOYO_TYPE_TRUNCATE, path, NULL);
 }
@@ -163,7 +163,7 @@
  *
  * Returns 0 on success, negative value otherwise.
  */
-static int tomoyo_path_unlink(struct path *parent, struct dentry *dentry)
+static int tomoyo_path_unlink(const struct path *parent, struct dentry *dentry)
 {
 	struct path path = { parent->mnt, dentry };
 	return tomoyo_path_perm(TOMOYO_TYPE_UNLINK, &path, NULL);
@@ -178,7 +178,7 @@
  *
  * Returns 0 on success, negative value otherwise.
  */
-static int tomoyo_path_mkdir(struct path *parent, struct dentry *dentry,
+static int tomoyo_path_mkdir(const struct path *parent, struct dentry *dentry,
 			     umode_t mode)
 {
 	struct path path = { parent->mnt, dentry };
@@ -194,7 +194,7 @@
  *
  * Returns 0 on success, negative value otherwise.
  */
-static int tomoyo_path_rmdir(struct path *parent, struct dentry *dentry)
+static int tomoyo_path_rmdir(const struct path *parent, struct dentry *dentry)
 {
 	struct path path = { parent->mnt, dentry };
 	return tomoyo_path_perm(TOMOYO_TYPE_RMDIR, &path, NULL);
@@ -209,7 +209,7 @@
  *
  * Returns 0 on success, negative value otherwise.
  */
-static int tomoyo_path_symlink(struct path *parent, struct dentry *dentry,
+static int tomoyo_path_symlink(const struct path *parent, struct dentry *dentry,
 			       const char *old_name)
 {
 	struct path path = { parent->mnt, dentry };
@@ -226,7 +226,7 @@
  *
  * Returns 0 on success, negative value otherwise.
  */
-static int tomoyo_path_mknod(struct path *parent, struct dentry *dentry,
+static int tomoyo_path_mknod(const struct path *parent, struct dentry *dentry,
 			     umode_t mode, unsigned int dev)
 {
 	struct path path = { parent->mnt, dentry };
@@ -265,7 +265,7 @@
  *
  * Returns 0 on success, negative value otherwise.
  */
-static int tomoyo_path_link(struct dentry *old_dentry, struct path *new_dir,
+static int tomoyo_path_link(struct dentry *old_dentry, const struct path *new_dir,
 			    struct dentry *new_dentry)
 {
 	struct path path1 = { new_dir->mnt, old_dentry };
@@ -283,9 +283,9 @@
  *
  * Returns 0 on success, negative value otherwise.
  */
-static int tomoyo_path_rename(struct path *old_parent,
+static int tomoyo_path_rename(const struct path *old_parent,
 			      struct dentry *old_dentry,
-			      struct path *new_parent,
+			      const struct path *new_parent,
 			      struct dentry *new_dentry)
 {
 	struct path path1 = { old_parent->mnt, old_dentry };
@@ -351,7 +351,7 @@
  *
  * Returns 0 on success, negative value otherwise.
  */
-static int tomoyo_path_chmod(struct path *path, umode_t mode)
+static int tomoyo_path_chmod(const struct path *path, umode_t mode)
 {
 	return tomoyo_path_number_perm(TOMOYO_TYPE_CHMOD, path,
 				       mode & S_IALLUGO);
@@ -366,7 +366,7 @@
  *
  * Returns 0 on success, negative value otherwise.
  */
-static int tomoyo_path_chown(struct path *path, kuid_t uid, kgid_t gid)
+static int tomoyo_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
 {
 	int error = 0;
 	if (uid_valid(uid))
@@ -385,7 +385,7 @@
  *
  * Returns 0 on success, negative value otherwise.
  */
-static int tomoyo_path_chroot(struct path *path)
+static int tomoyo_path_chroot(const struct path *path)
 {
 	return tomoyo_path_perm(TOMOYO_TYPE_CHROOT, path, NULL);
 }
@@ -401,7 +401,7 @@
  *
  * Returns 0 on success, negative value otherwise.
  */
-static int tomoyo_sb_mount(const char *dev_name, struct path *path,
+static int tomoyo_sb_mount(const char *dev_name, const struct path *path,
 			   const char *type, unsigned long flags, void *data)
 {
 	return tomoyo_mount_permission(dev_name, path, type, flags, data);
@@ -429,7 +429,7 @@
  *
  * Returns 0 on success, negative value otherwise.
  */
-static int tomoyo_sb_pivotroot(struct path *old_path, struct path *new_path)
+static int tomoyo_sb_pivotroot(const struct path *old_path, const struct path *new_path)
 {
 	return tomoyo_path2_perm(TOMOYO_TYPE_PIVOT_ROOT, new_path, old_path);
 }
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index cb6ed10..9b756b1 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -18,6 +18,7 @@
 #include <linux/prctl.h>
 #include <linux/ratelimit.h>
 #include <linux/workqueue.h>
+#include <linux/string_helpers.h>
 
 #define YAMA_SCOPE_DISABLED	0
 #define YAMA_SCOPE_RELATIONAL	1
@@ -41,6 +42,22 @@
 static void yama_relation_cleanup(struct work_struct *work);
 static DECLARE_WORK(yama_relation_work, yama_relation_cleanup);
 
+static void report_access(const char *access, struct task_struct *target,
+			  struct task_struct *agent)
+{
+	char *target_cmd, *agent_cmd;
+
+	target_cmd = kstrdup_quotable_cmdline(target, GFP_ATOMIC);
+	agent_cmd = kstrdup_quotable_cmdline(agent, GFP_ATOMIC);
+
+	pr_notice_ratelimited(
+		"ptrace %s of \"%s\"[%d] was attempted by \"%s\"[%d]\n",
+		access, target_cmd, target->pid, agent_cmd, agent->pid);
+
+	kfree(agent_cmd);
+	kfree(target_cmd);
+}
+
 /**
  * yama_relation_cleanup - remove invalid entries from the relation list
  *
@@ -307,11 +324,8 @@
 		}
 	}
 
-	if (rc && (mode & PTRACE_MODE_NOAUDIT) == 0) {
-		printk_ratelimited(KERN_NOTICE
-			"ptrace of pid %d was attempted by: %s (pid %d)\n",
-			child->pid, current->comm, current->pid);
-	}
+	if (rc && (mode & PTRACE_MODE_NOAUDIT) == 0)
+		report_access("attach", child, current);
 
 	return rc;
 }
@@ -337,11 +351,8 @@
 		break;
 	}
 
-	if (rc) {
-		printk_ratelimited(KERN_NOTICE
-			"ptraceme of pid %d was attempted by: %s (pid %d)\n",
-			current->pid, parent->comm, parent->pid);
-	}
+	if (rc)
+		report_access("traceme", current, parent);
 
 	return rc;
 }
diff --git a/sound/core/Kconfig b/sound/core/Kconfig
index 6d12ca9..9749f9e 100644
--- a/sound/core/Kconfig
+++ b/sound/core/Kconfig
@@ -141,35 +141,6 @@
 	  Say Y here to use the HR-timer backend as the default sequencer
 	  timer.
 
-config SND_RTCTIMER
-	tristate "RTC Timer support"
-	depends on RTC
-	select SND_TIMER
-	help
-	  Say Y here to enable RTC timer support for ALSA.  ALSA uses
-	  the RTC timer as a precise timing source and maps the RTC
-	  timer to ALSA's timer interface.  The ALSA sequencer code also
-	  can use this timing source.
-
-	  To compile this driver as a module, choose M here: the module
-	  will be called snd-rtctimer.
-
-	  Note that this option is exclusive with the new RTC drivers
-	  (CONFIG_RTC_CLASS) since this requires the old API.
-
-config SND_SEQ_RTCTIMER_DEFAULT
-	bool "Use RTC as default sequencer timer"
-	depends on SND_RTCTIMER && SND_SEQUENCER
-	depends on !SND_SEQ_HRTIMER_DEFAULT
-	default y
-	help
-	  Say Y here to use the RTC timer as the default sequencer
-	  timer.  This is strongly recommended because it ensures
-	  precise MIDI timing even when the system timer runs at less
-	  than 1000 Hz.
-
-	  If in doubt, say Y.
-
 config SND_DYNAMIC_MINORS
 	bool "Dynamic device file minor numbers"
 	help
diff --git a/sound/core/Makefile b/sound/core/Makefile
index 48ab4b8..e85d9dd 100644
--- a/sound/core/Makefile
+++ b/sound/core/Makefile
@@ -37,7 +37,6 @@
 obj-$(CONFIG_SND_HWDEP)		+= snd-hwdep.o
 obj-$(CONFIG_SND_TIMER)		+= snd-timer.o
 obj-$(CONFIG_SND_HRTIMER)	+= snd-hrtimer.o
-obj-$(CONFIG_SND_RTCTIMER)	+= snd-rtctimer.o
 obj-$(CONFIG_SND_PCM)		+= snd-pcm.o
 obj-$(CONFIG_SND_DMAENGINE_PCM)	+= snd-pcm-dmaengine.o
 obj-$(CONFIG_SND_RAWMIDI)	+= snd-rawmidi.o
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index a9933c0..9b3334b 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -288,9 +288,12 @@
 	stream = &data->stream;
 	mutex_lock(&stream->device->lock);
 	/* write is allowed when stream is running or has been steup */
-	if (stream->runtime->state != SNDRV_PCM_STATE_SETUP &&
-	    stream->runtime->state != SNDRV_PCM_STATE_PREPARED &&
-			stream->runtime->state != SNDRV_PCM_STATE_RUNNING) {
+	switch (stream->runtime->state) {
+	case SNDRV_PCM_STATE_SETUP:
+	case SNDRV_PCM_STATE_PREPARED:
+	case SNDRV_PCM_STATE_RUNNING:
+		break;
+	default:
 		mutex_unlock(&stream->device->lock);
 		return -EBADFD;
 	}
@@ -391,14 +394,13 @@
 	int retval = 0;
 
 	if (snd_BUG_ON(!data))
-		return -EFAULT;
+		return POLLERR;
+
 	stream = &data->stream;
-	if (snd_BUG_ON(!stream))
-		return -EFAULT;
 
 	mutex_lock(&stream->device->lock);
 	if (stream->runtime->state == SNDRV_PCM_STATE_OPEN) {
-		retval = -EBADFD;
+		retval = snd_compr_get_poll(stream) | POLLERR;
 		goto out;
 	}
 	poll_wait(f, &stream->runtime->sleep, wait);
@@ -421,10 +423,7 @@
 			retval = snd_compr_get_poll(stream);
 		break;
 	default:
-		if (stream->direction == SND_COMPRESS_PLAYBACK)
-			retval = POLLOUT | POLLWRNORM | POLLERR;
-		else
-			retval = POLLIN | POLLRDNORM | POLLERR;
+		retval = snd_compr_get_poll(stream) | POLLERR;
 		break;
 	}
 out:
@@ -802,9 +801,9 @@
 
 	if (snd_BUG_ON(!data))
 		return -EFAULT;
+
 	stream = &data->stream;
-	if (snd_BUG_ON(!stream))
-		return -EFAULT;
+
 	mutex_lock(&stream->device->lock);
 	switch (_IOC_NR(cmd)) {
 	case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION):
diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c
index 656d9a9..e2f2702 100644
--- a/sound/core/hrtimer.c
+++ b/sound/core/hrtimer.c
@@ -38,37 +38,53 @@
 struct snd_hrtimer {
 	struct snd_timer *timer;
 	struct hrtimer hrt;
-	atomic_t running;
+	bool in_callback;
 };
 
 static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt)
 {
 	struct snd_hrtimer *stime = container_of(hrt, struct snd_hrtimer, hrt);
 	struct snd_timer *t = stime->timer;
-	unsigned long oruns;
+	ktime_t delta;
+	unsigned long ticks;
+	enum hrtimer_restart ret = HRTIMER_NORESTART;
 
-	if (!atomic_read(&stime->running))
-		return HRTIMER_NORESTART;
+	spin_lock(&t->lock);
+	if (!t->running)
+		goto out; /* fast path */
+	stime->in_callback = true;
+	ticks = t->sticks;
+	spin_unlock(&t->lock);
 
-	oruns = hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution));
-	snd_timer_interrupt(stime->timer, t->sticks * oruns);
+	/* calculate the drift */
+	delta = ktime_sub(hrt->base->get_time(), hrtimer_get_expires(hrt));
+	if (delta.tv64 > 0)
+		ticks += ktime_divns(delta, ticks * resolution);
 
-	if (!atomic_read(&stime->running))
-		return HRTIMER_NORESTART;
-	return HRTIMER_RESTART;
+	snd_timer_interrupt(stime->timer, ticks);
+
+	spin_lock(&t->lock);
+	if (t->running) {
+		hrtimer_add_expires_ns(hrt, t->sticks * resolution);
+		ret = HRTIMER_RESTART;
+	}
+
+	stime->in_callback = false;
+ out:
+	spin_unlock(&t->lock);
+	return ret;
 }
 
 static int snd_hrtimer_open(struct snd_timer *t)
 {
 	struct snd_hrtimer *stime;
 
-	stime = kmalloc(sizeof(*stime), GFP_KERNEL);
+	stime = kzalloc(sizeof(*stime), GFP_KERNEL);
 	if (!stime)
 		return -ENOMEM;
 	hrtimer_init(&stime->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	stime->timer = t;
 	stime->hrt.function = snd_hrtimer_callback;
-	atomic_set(&stime->running, 0);
 	t->private_data = stime;
 	return 0;
 }
@@ -78,6 +94,11 @@
 	struct snd_hrtimer *stime = t->private_data;
 
 	if (stime) {
+		spin_lock_irq(&t->lock);
+		t->running = 0; /* just to be sure */
+		stime->in_callback = 1; /* skip start/stop */
+		spin_unlock_irq(&t->lock);
+
 		hrtimer_cancel(&stime->hrt);
 		kfree(stime);
 		t->private_data = NULL;
@@ -89,18 +110,19 @@
 {
 	struct snd_hrtimer *stime = t->private_data;
 
-	atomic_set(&stime->running, 0);
-	hrtimer_try_to_cancel(&stime->hrt);
+	if (stime->in_callback)
+		return 0;
 	hrtimer_start(&stime->hrt, ns_to_ktime(t->sticks * resolution),
 		      HRTIMER_MODE_REL);
-	atomic_set(&stime->running, 1);
 	return 0;
 }
 
 static int snd_hrtimer_stop(struct snd_timer *t)
 {
 	struct snd_hrtimer *stime = t->private_data;
-	atomic_set(&stime->running, 0);
+
+	if (stime->in_callback)
+		return 0;
 	hrtimer_try_to_cancel(&stime->hrt);
 	return 0;
 }
diff --git a/sound/core/pcm_dmaengine.c b/sound/core/pcm_dmaengine.c
index 697c166..8eb58c7 100644
--- a/sound/core/pcm_dmaengine.c
+++ b/sound/core/pcm_dmaengine.c
@@ -106,8 +106,9 @@
  * direction of the substream. If the substream is a playback stream the dst
  * fields will be initialized, if it is a capture stream the src fields will be
  * initialized. The {dst,src}_addr_width field will only be initialized if the
- * addr_width field of the DAI DMA data struct is not equal to
- * DMA_SLAVE_BUSWIDTH_UNDEFINED.
+ * SND_DMAENGINE_PCM_DAI_FLAG_PACK flag is set or if the addr_width field of
+ * the DAI DMA data struct is not equal to DMA_SLAVE_BUSWIDTH_UNDEFINED. If
+ * both conditions are met the latter takes priority.
  */
 void snd_dmaengine_pcm_set_config_from_dai_data(
 	const struct snd_pcm_substream *substream,
@@ -117,11 +118,17 @@
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
 		slave_config->dst_addr = dma_data->addr;
 		slave_config->dst_maxburst = dma_data->maxburst;
+		if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK)
+			slave_config->dst_addr_width =
+				DMA_SLAVE_BUSWIDTH_UNDEFINED;
 		if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED)
 			slave_config->dst_addr_width = dma_data->addr_width;
 	} else {
 		slave_config->src_addr = dma_data->addr;
 		slave_config->src_maxburst = dma_data->maxburst;
+		if (dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK)
+			slave_config->src_addr_width =
+				DMA_SLAVE_BUSWIDTH_UNDEFINED;
 		if (dma_data->addr_width != DMA_SLAVE_BUSWIDTH_UNDEFINED)
 			slave_config->src_addr_width = dma_data->addr_width;
 	}
diff --git a/sound/core/pcm_iec958.c b/sound/core/pcm_iec958.c
index 36b2d7a..5e6aed6 100644
--- a/sound/core/pcm_iec958.c
+++ b/sound/core/pcm_iec958.c
@@ -9,30 +9,18 @@
 #include <linux/types.h>
 #include <sound/asoundef.h>
 #include <sound/pcm.h>
+#include <sound/pcm_params.h>
 #include <sound/pcm_iec958.h>
 
-/**
- * snd_pcm_create_iec958_consumer - create consumer format IEC958 channel status
- * @runtime: pcm runtime structure with ->rate filled in
- * @cs: channel status buffer, at least four bytes
- * @len: length of channel status buffer
- *
- * Create the consumer format channel status data in @cs of maximum size
- * @len corresponding to the parameters of the PCM runtime @runtime.
- *
- * Drivers may wish to tweak the contents of the buffer after creation.
- *
- * Returns: length of buffer, or negative error code if something failed.
- */
-int snd_pcm_create_iec958_consumer(struct snd_pcm_runtime *runtime, u8 *cs,
-	size_t len)
+static int create_iec958_consumer(uint rate, uint sample_width,
+				  u8 *cs, size_t len)
 {
 	unsigned int fs, ws;
 
 	if (len < 4)
 		return -EINVAL;
 
-	switch (runtime->rate) {
+	switch (rate) {
 	case 32000:
 		fs = IEC958_AES3_CON_FS_32000;
 		break;
@@ -59,7 +47,7 @@
 	}
 
 	if (len > 4) {
-		switch (snd_pcm_format_width(runtime->format)) {
+		switch (sample_width) {
 		case 16:
 			ws = IEC958_AES4_CON_WORDLEN_20_16;
 			break;
@@ -71,6 +59,7 @@
 			     IEC958_AES4_CON_MAX_WORDLEN_24;
 			break;
 		case 24:
+		case 32: /* Assume 24-bit width for 32-bit samples. */
 			ws = IEC958_AES4_CON_WORDLEN_24_20 |
 			     IEC958_AES4_CON_MAX_WORDLEN_24;
 			break;
@@ -92,4 +81,46 @@
 
 	return len;
 }
+
+/**
+ * snd_pcm_create_iec958_consumer - create consumer format IEC958 channel status
+ * @runtime: pcm runtime structure with ->rate filled in
+ * @cs: channel status buffer, at least four bytes
+ * @len: length of channel status buffer
+ *
+ * Create the consumer format channel status data in @cs of maximum size
+ * @len corresponding to the parameters of the PCM runtime @runtime.
+ *
+ * Drivers may wish to tweak the contents of the buffer after creation.
+ *
+ * Returns: length of buffer, or negative error code if something failed.
+ */
+int snd_pcm_create_iec958_consumer(struct snd_pcm_runtime *runtime, u8 *cs,
+	size_t len)
+{
+	return create_iec958_consumer(runtime->rate,
+				      snd_pcm_format_width(runtime->format),
+				      cs, len);
+}
 EXPORT_SYMBOL(snd_pcm_create_iec958_consumer);
+
+/**
+ * snd_pcm_create_iec958_consumer_hw_params - create IEC958 channel status
+ * @hw_params: the hw_params instance for extracting rate and sample format
+ * @cs: channel status buffer, at least four bytes
+ * @len: length of channel status buffer
+ *
+ * Create the consumer format channel status data in @cs of maximum size
+ * @len corresponding to the parameters of the PCM runtime @runtime.
+ *
+ * Drivers may wish to tweak the contents of the buffer after creation.
+ *
+ * Returns: length of buffer, or negative error code if something failed.
+ */
+int snd_pcm_create_iec958_consumer_hw_params(struct snd_pcm_hw_params *params,
+					     u8 *cs, size_t len)
+{
+	return create_iec958_consumer(params_rate(params), params_width(params),
+				      cs, len);
+}
+EXPORT_SYMBOL(snd_pcm_create_iec958_consumer_hw_params);
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 3a9b66c..bb12615 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1886,8 +1886,8 @@
 		snd_timer_interrupt(substream->timer, 1);
 #endif
  _end:
-	snd_pcm_stream_unlock_irqrestore(substream, flags);
 	kill_fasync(&runtime->fasync, SIGIO, POLL_IN);
+	snd_pcm_stream_unlock_irqrestore(substream, flags);
 }
 
 EXPORT_SYMBOL(snd_pcm_period_elapsed);
@@ -2595,6 +2595,8 @@
 	};
 	int err;
 
+	if (WARN_ON(pcm->streams[stream].chmap_kctl))
+		return -EBUSY;
 	info = kzalloc(sizeof(*info), GFP_KERNEL);
 	if (!info)
 		return -ENOMEM;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 9106d8e..c61fd50 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -3161,7 +3161,7 @@
 
 	substream = pcm_file->substream;
 	if (PCM_RUNTIME_CHECK(substream))
-		return -ENXIO;
+		return POLLOUT | POLLWRNORM | POLLERR;
 	runtime = substream->runtime;
 
 	poll_wait(file, &runtime->sleep, wait);
@@ -3200,7 +3200,7 @@
 
 	substream = pcm_file->substream;
 	if (PCM_RUNTIME_CHECK(substream))
-		return -ENXIO;
+		return POLLIN | POLLRDNORM | POLLERR;
 	runtime = substream->runtime;
 
 	poll_wait(file, &runtime->sleep, wait);
diff --git a/sound/core/rtctimer.c b/sound/core/rtctimer.c
deleted file mode 100644
index f3420d1..0000000
--- a/sound/core/rtctimer.c
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- *  RTC based high-frequency timer
- *
- *  Copyright (C) 2000 Takashi Iwai
- *	based on rtctimer.c by Steve Ratcliffe
- *
- *   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; either version 2 of the License, or
- *   (at your option) any later version.
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   You should have received a copy of the GNU General Public License
- *   along with this program; if not, write to the Free Software
- *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- *
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/log2.h>
-#include <sound/core.h>
-#include <sound/timer.h>
-
-#if IS_ENABLED(CONFIG_RTC)
-
-#include <linux/mc146818rtc.h>
-
-#define RTC_FREQ	1024		/* default frequency */
-#define NANO_SEC	1000000000L	/* 10^9 in sec */
-
-/*
- * prototypes
- */
-static int rtctimer_open(struct snd_timer *t);
-static int rtctimer_close(struct snd_timer *t);
-static int rtctimer_start(struct snd_timer *t);
-static int rtctimer_stop(struct snd_timer *t);
-
-
-/*
- * The hardware dependent description for this timer.
- */
-static struct snd_timer_hardware rtc_hw = {
-	.flags =	SNDRV_TIMER_HW_AUTO |
-			SNDRV_TIMER_HW_FIRST |
-			SNDRV_TIMER_HW_TASKLET,
-	.ticks =	100000000L,		/* FIXME: XXX */
-	.open =		rtctimer_open,
-	.close =	rtctimer_close,
-	.start =	rtctimer_start,
-	.stop =		rtctimer_stop,
-};
-
-static int rtctimer_freq = RTC_FREQ;		/* frequency */
-static struct snd_timer *rtctimer;
-static struct tasklet_struct rtc_tasklet;
-static rtc_task_t rtc_task;
-
-
-static int
-rtctimer_open(struct snd_timer *t)
-{
-	int err;
-
-	err = rtc_register(&rtc_task);
-	if (err < 0)
-		return err;
-	t->private_data = &rtc_task;
-	return 0;
-}
-
-static int
-rtctimer_close(struct snd_timer *t)
-{
-	rtc_task_t *rtc = t->private_data;
-	if (rtc) {
-		rtc_unregister(rtc);
-		tasklet_kill(&rtc_tasklet);
-		t->private_data = NULL;
-	}
-	return 0;
-}
-
-static int
-rtctimer_start(struct snd_timer *timer)
-{
-	rtc_task_t *rtc = timer->private_data;
-	if (snd_BUG_ON(!rtc))
-		return -EINVAL;
-	rtc_control(rtc, RTC_IRQP_SET, rtctimer_freq);
-	rtc_control(rtc, RTC_PIE_ON, 0);
-	return 0;
-}
-
-static int
-rtctimer_stop(struct snd_timer *timer)
-{
-	rtc_task_t *rtc = timer->private_data;
-	if (snd_BUG_ON(!rtc))
-		return -EINVAL;
-	rtc_control(rtc, RTC_PIE_OFF, 0);
-	return 0;
-}
-
-static void rtctimer_tasklet(unsigned long data)
-{
-	snd_timer_interrupt((struct snd_timer *)data, 1);
-}
-
-/*
- * interrupt
- */
-static void rtctimer_interrupt(void *private_data)
-{
-	tasklet_schedule(private_data);
-}
-
-
-/*
- *  ENTRY functions
- */
-static int __init rtctimer_init(void)
-{
-	int err;
-	struct snd_timer *timer;
-
-	if (rtctimer_freq < 2 || rtctimer_freq > 8192 ||
-	    !is_power_of_2(rtctimer_freq)) {
-		pr_err("ALSA: rtctimer: invalid frequency %d\n", rtctimer_freq);
-		return -EINVAL;
-	}
-
-	/* Create a new timer and set up the fields */
-	err = snd_timer_global_new("rtc", SNDRV_TIMER_GLOBAL_RTC, &timer);
-	if (err < 0)
-		return err;
-
-	timer->module = THIS_MODULE;
-	strcpy(timer->name, "RTC timer");
-	timer->hw = rtc_hw;
-	timer->hw.resolution = NANO_SEC / rtctimer_freq;
-
-	tasklet_init(&rtc_tasklet, rtctimer_tasklet, (unsigned long)timer);
-
-	/* set up RTC callback */
-	rtc_task.func = rtctimer_interrupt;
-	rtc_task.private_data = &rtc_tasklet;
-
-	err = snd_timer_global_register(timer);
-	if (err < 0) {
-		snd_timer_global_free(timer);
-		return err;
-	}
-	rtctimer = timer; /* remember this */
-
-	return 0;
-}
-
-static void __exit rtctimer_exit(void)
-{
-	if (rtctimer) {
-		snd_timer_global_free(rtctimer);
-		rtctimer = NULL;
-	}
-}
-
-
-/*
- * exported stuff
- */
-module_init(rtctimer_init)
-module_exit(rtctimer_exit)
-
-module_param(rtctimer_freq, int, 0444);
-MODULE_PARM_DESC(rtctimer_freq, "timer frequency in Hz");
-
-MODULE_LICENSE("GPL");
-
-MODULE_ALIAS("snd-timer-" __stringify(SNDRV_TIMER_GLOBAL_RTC));
-
-#endif /* IS_ENABLED(CONFIG_RTC) */
diff --git a/sound/core/seq/seq.c b/sound/core/seq/seq.c
index 7e0aabb..639544b 100644
--- a/sound/core/seq/seq.c
+++ b/sound/core/seq/seq.c
@@ -47,8 +47,6 @@
 int seq_default_timer_device =
 #ifdef CONFIG_SND_SEQ_HRTIMER_DEFAULT
 	SNDRV_TIMER_GLOBAL_HRTIMER
-#elif defined(CONFIG_SND_SEQ_RTCTIMER_DEFAULT)
-	SNDRV_TIMER_GLOBAL_RTC
 #else
 	SNDRV_TIMER_GLOBAL_SYSTEM
 #endif
diff --git a/sound/core/timer.c b/sound/core/timer.c
index aa1b15c..e722022 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -37,8 +37,6 @@
 
 #if IS_ENABLED(CONFIG_SND_HRTIMER)
 #define DEFAULT_TIMER_LIMIT 4
-#elif IS_ENABLED(CONFIG_SND_RTCTIMER)
-#define DEFAULT_TIMER_LIMIT 2
 #else
 #define DEFAULT_TIMER_LIMIT 1
 #endif
@@ -1019,8 +1017,8 @@
 		njiff += timer->sticks - priv->correction;
 		priv->correction = 0;
 	}
-	priv->last_expires = priv->tlist.expires = njiff;
-	add_timer(&priv->tlist);
+	priv->last_expires = njiff;
+	mod_timer(&priv->tlist, njiff);
 	return 0;
 }
 
@@ -1225,6 +1223,7 @@
 		tu->tstamp = *tstamp;
 	if ((tu->filter & (1 << event)) == 0 || !tu->tread)
 		return;
+	memset(&r1, 0, sizeof(r1));
 	r1.event = event;
 	r1.tstamp = *tstamp;
 	r1.val = resolution;
@@ -1267,6 +1266,7 @@
 	}
 	if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) &&
 	    tu->last_resolution != resolution) {
+		memset(&r1, 0, sizeof(r1));
 		r1.event = SNDRV_TIMER_EVENT_RESOLUTION;
 		r1.tstamp = tstamp;
 		r1.val = resolution;
@@ -1502,17 +1502,13 @@
 	return err;
 }
 
-static int snd_timer_user_gparams(struct file *file,
-				  struct snd_timer_gparams __user *_gparams)
+static int timer_set_gparams(struct snd_timer_gparams *gparams)
 {
-	struct snd_timer_gparams gparams;
 	struct snd_timer *t;
 	int err;
 
-	if (copy_from_user(&gparams, _gparams, sizeof(gparams)))
-		return -EFAULT;
 	mutex_lock(&register_mutex);
-	t = snd_timer_find(&gparams.tid);
+	t = snd_timer_find(&gparams->tid);
 	if (!t) {
 		err = -ENODEV;
 		goto _error;
@@ -1525,12 +1521,22 @@
 		err = -ENOSYS;
 		goto _error;
 	}
-	err = t->hw.set_period(t, gparams.period_num, gparams.period_den);
+	err = t->hw.set_period(t, gparams->period_num, gparams->period_den);
 _error:
 	mutex_unlock(&register_mutex);
 	return err;
 }
 
+static int snd_timer_user_gparams(struct file *file,
+				  struct snd_timer_gparams __user *_gparams)
+{
+	struct snd_timer_gparams gparams;
+
+	if (copy_from_user(&gparams, _gparams, sizeof(gparams)))
+		return -EFAULT;
+	return timer_set_gparams(&gparams);
+}
+
 static int snd_timer_user_gstatus(struct file *file,
 				  struct snd_timer_gstatus __user *_gstatus)
 {
@@ -1733,6 +1739,7 @@
 	if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) {
 		if (tu->tread) {
 			struct snd_timer_tread tread;
+			memset(&tread, 0, sizeof(tread));
 			tread.event = SNDRV_TIMER_EVENT_EARLY;
 			tread.tstamp.tv_sec = 0;
 			tread.tstamp.tv_nsec = 0;
diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
index 2e90822..6a437eb 100644
--- a/sound/core/timer_compat.c
+++ b/sound/core/timer_compat.c
@@ -22,6 +22,19 @@
 
 #include <linux/compat.h>
 
+/*
+ * ILP32/LP64 has different size for 'long' type. Additionally, the size
+ * of storage alignment differs depending on architectures. Here, '__packed'
+ * qualifier is used so that the size of this structure is multiple of 4 and
+ * it fits to any architectures with 32 bit storage alignment.
+ */
+struct snd_timer_gparams32 {
+	struct snd_timer_id tid;
+	u32 period_num;
+	u32 period_den;
+	unsigned char reserved[32];
+} __packed;
+
 struct snd_timer_info32 {
 	u32 flags;
 	s32 card;
@@ -32,6 +45,19 @@
 	unsigned char reserved[64];
 };
 
+static int snd_timer_user_gparams_compat(struct file *file,
+					struct snd_timer_gparams32 __user *user)
+{
+	struct snd_timer_gparams gparams;
+
+	if (copy_from_user(&gparams.tid, &user->tid, sizeof(gparams.tid)) ||
+	    get_user(gparams.period_num, &user->period_num) ||
+	    get_user(gparams.period_den, &user->period_den))
+		return -EFAULT;
+
+	return timer_set_gparams(&gparams);
+}
+
 static int snd_timer_user_info_compat(struct file *file,
 				      struct snd_timer_info32 __user *_info)
 {
@@ -99,6 +125,7 @@
  */
 
 enum {
+	SNDRV_TIMER_IOCTL_GPARAMS32 = _IOW('T', 0x04, struct snd_timer_gparams32),
 	SNDRV_TIMER_IOCTL_INFO32 = _IOR('T', 0x11, struct snd_timer_info32),
 	SNDRV_TIMER_IOCTL_STATUS32 = _IOW('T', 0x14, struct snd_timer_status32),
 #ifdef CONFIG_X86_X32
@@ -114,7 +141,6 @@
 	case SNDRV_TIMER_IOCTL_PVERSION:
 	case SNDRV_TIMER_IOCTL_TREAD:
 	case SNDRV_TIMER_IOCTL_GINFO:
-	case SNDRV_TIMER_IOCTL_GPARAMS:
 	case SNDRV_TIMER_IOCTL_GSTATUS:
 	case SNDRV_TIMER_IOCTL_SELECT:
 	case SNDRV_TIMER_IOCTL_PARAMS:
@@ -128,6 +154,8 @@
 	case SNDRV_TIMER_IOCTL_PAUSE_OLD:
 	case SNDRV_TIMER_IOCTL_NEXT_DEVICE:
 		return snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
+	case SNDRV_TIMER_IOCTL_GPARAMS32:
+		return snd_timer_user_gparams_compat(file, argp);
 	case SNDRV_TIMER_IOCTL_INFO32:
 		return snd_timer_user_info_compat(file, argp);
 	case SNDRV_TIMER_IOCTL_STATUS32:
diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig
index 2a779c2..ab894ed 100644
--- a/sound/firewire/Kconfig
+++ b/sound/firewire/Kconfig
@@ -134,6 +134,7 @@
 	 Say Y here to include support for TASCAM.
 	  * FW-1884
 	  * FW-1082
+	  * FW-1804
 
 	 To compile this driver as a module, choose M here: the module
 	 will be called snd-firewire-tascam.
diff --git a/sound/firewire/Makefile b/sound/firewire/Makefile
index 003c090..0ee1fb1 100644
--- a/sound/firewire/Makefile
+++ b/sound/firewire/Makefile
@@ -1,3 +1,6 @@
+# To find a header included by define_trace.h.
+CFLAGS_amdtp-stream.o	:= -I$(src)
+
 snd-firewire-lib-objs := lib.o iso-resources.o packets-buffer.o \
 			 fcp.o cmp.o amdtp-stream.o amdtp-am824.o
 snd-isight-objs := isight.o
diff --git a/sound/firewire/amdtp-stream-trace.h b/sound/firewire/amdtp-stream-trace.h
new file mode 100644
index 0000000..9c04faf
--- /dev/null
+++ b/sound/firewire/amdtp-stream-trace.h
@@ -0,0 +1,110 @@
+/*
+ * amdtp-stream-trace.h - tracepoint definitions to dump a part of packet data
+ *
+ * Copyright (c) 2016 Takashi Sakamoto
+ * Licensed under the terms of the GNU General Public License, version 2.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM		snd_firewire_lib
+
+#if !defined(_AMDTP_STREAM_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _AMDTP_STREAM_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(in_packet,
+	TP_PROTO(const struct amdtp_stream *s, u32 cycles, u32 cip_header[2], unsigned int payload_quadlets, unsigned int index),
+	TP_ARGS(s, cycles, cip_header, payload_quadlets, index),
+	TP_STRUCT__entry(
+		__field(unsigned int, second)
+		__field(unsigned int, cycle)
+		__field(int, channel)
+		__field(int, src)
+		__field(int, dest)
+		__field(u32, cip_header0)
+		__field(u32, cip_header1)
+		__field(unsigned int, payload_quadlets)
+		__field(unsigned int, packet_index)
+		__field(unsigned int, irq)
+		__field(unsigned int, index)
+	),
+	TP_fast_assign(
+		__entry->second = cycles / CYCLES_PER_SECOND;
+		__entry->cycle = cycles % CYCLES_PER_SECOND;
+		__entry->channel = s->context->channel;
+		__entry->src = fw_parent_device(s->unit)->node_id;
+		__entry->dest = fw_parent_device(s->unit)->card->node_id;
+		__entry->cip_header0 = cip_header[0];
+		__entry->cip_header1 = cip_header[1];
+		__entry->payload_quadlets = payload_quadlets;
+		__entry->packet_index = s->packet_index;
+		__entry->irq = !!in_interrupt();
+		__entry->index = index;
+	),
+	TP_printk(
+		"%02u %04u %04x %04x %02d %08x %08x %03u %02u %01u %02u",
+		__entry->second,
+		__entry->cycle,
+		__entry->src,
+		__entry->dest,
+		__entry->channel,
+		__entry->cip_header0,
+		__entry->cip_header1,
+		__entry->payload_quadlets,
+		__entry->packet_index,
+		__entry->irq,
+		__entry->index)
+);
+
+TRACE_EVENT(out_packet,
+	TP_PROTO(const struct amdtp_stream *s, u32 cycles, __be32 *cip_header, unsigned int payload_length, unsigned int index),
+	TP_ARGS(s, cycles, cip_header, payload_length, index),
+	TP_STRUCT__entry(
+		__field(unsigned int, second)
+		__field(unsigned int, cycle)
+		__field(int, channel)
+		__field(int, src)
+		__field(int, dest)
+		__field(u32, cip_header0)
+		__field(u32, cip_header1)
+		__field(unsigned int, payload_quadlets)
+		__field(unsigned int, packet_index)
+		__field(unsigned int, irq)
+		__field(unsigned int, index)
+	),
+	TP_fast_assign(
+		__entry->second = cycles / CYCLES_PER_SECOND;
+		__entry->cycle = cycles % CYCLES_PER_SECOND;
+		__entry->channel = s->context->channel;
+		__entry->src = fw_parent_device(s->unit)->card->node_id;
+		__entry->dest = fw_parent_device(s->unit)->node_id;
+		__entry->cip_header0 = be32_to_cpu(cip_header[0]);
+		__entry->cip_header1 = be32_to_cpu(cip_header[1]);
+		__entry->payload_quadlets = payload_length / 4;
+		__entry->packet_index = s->packet_index;
+		__entry->irq = !!in_interrupt();
+		__entry->index = index;
+	),
+	TP_printk(
+		"%02u %04u %04x %04x %02d %08x %08x %03u %02u %01u %02u",
+		__entry->second,
+		__entry->cycle,
+		__entry->src,
+		__entry->dest,
+		__entry->channel,
+		__entry->cip_header0,
+		__entry->cip_header1,
+		__entry->payload_quadlets,
+		__entry->packet_index,
+		__entry->irq,
+		__entry->index)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH	.
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE	amdtp-stream-trace
+#include <trace/define_trace.h>
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index ed29026..00060c4 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -19,6 +19,10 @@
 #define CYCLES_PER_SECOND	8000
 #define TICKS_PER_SECOND	(TICKS_PER_CYCLE * CYCLES_PER_SECOND)
 
+/* Always support Linux tracing subsystem. */
+#define CREATE_TRACE_POINTS
+#include "amdtp-stream-trace.h"
+
 #define TRANSFER_DELAY_TICKS	0x2e00 /* 479.17 microseconds */
 
 /* isochronous header parameters */
@@ -87,7 +91,6 @@
 
 	init_waitqueue_head(&s->callback_wait);
 	s->callbacked = false;
-	s->sync_slave = NULL;
 
 	s->fmt = fmt;
 	s->process_data_blocks = process_data_blocks;
@@ -102,6 +105,10 @@
  */
 void amdtp_stream_destroy(struct amdtp_stream *s)
 {
+	/* Not initialized. */
+	if (s->protocol == NULL)
+		return;
+
 	WARN_ON(amdtp_stream_running(s));
 	kfree(s->protocol);
 	mutex_destroy(&s->mutex);
@@ -244,7 +251,6 @@
 	tasklet_kill(&s->period_tasklet);
 	s->pcm_buffer_pointer = 0;
 	s->pcm_period_pointer = 0;
-	s->pointer_flush = true;
 }
 EXPORT_SYMBOL(amdtp_stream_pcm_prepare);
 
@@ -349,7 +355,6 @@
 	s->pcm_period_pointer += frames;
 	if (s->pcm_period_pointer >= pcm->runtime->period_size) {
 		s->pcm_period_pointer -= pcm->runtime->period_size;
-		s->pointer_flush = false;
 		tasklet_hi_schedule(&s->period_tasklet);
 	}
 }
@@ -363,9 +368,8 @@
 		snd_pcm_period_elapsed(pcm);
 }
 
-static int queue_packet(struct amdtp_stream *s,
-			unsigned int header_length,
-			unsigned int payload_length, bool skip)
+static int queue_packet(struct amdtp_stream *s, unsigned int header_length,
+			unsigned int payload_length)
 {
 	struct fw_iso_packet p = {0};
 	int err = 0;
@@ -376,8 +380,10 @@
 	p.interrupt = IS_ALIGNED(s->packet_index + 1, INTERRUPT_INTERVAL);
 	p.tag = TAG_CIP;
 	p.header_length = header_length;
-	p.payload_length = (!skip) ? payload_length : 0;
-	p.skip = skip;
+	if (payload_length > 0)
+		p.payload_length = payload_length;
+	else
+		p.skip = true;
 	err = fw_iso_context_queue(s->context, &p, &s->buffer.iso_buffer,
 				   s->buffer.packets[s->packet_index].offset);
 	if (err < 0) {
@@ -392,27 +398,30 @@
 }
 
 static inline int queue_out_packet(struct amdtp_stream *s,
-				   unsigned int payload_length, bool skip)
+				   unsigned int payload_length)
 {
-	return queue_packet(s, OUT_PACKET_HEADER_SIZE,
-			    payload_length, skip);
+	return queue_packet(s, OUT_PACKET_HEADER_SIZE, payload_length);
 }
 
 static inline int queue_in_packet(struct amdtp_stream *s)
 {
 	return queue_packet(s, IN_PACKET_HEADER_SIZE,
-			    amdtp_stream_get_max_payload(s), false);
+			    amdtp_stream_get_max_payload(s));
 }
 
-static int handle_out_packet(struct amdtp_stream *s, unsigned int data_blocks,
-			     unsigned int syt)
+static int handle_out_packet(struct amdtp_stream *s, unsigned int cycle,
+			     unsigned int index)
 {
 	__be32 *buffer;
+	unsigned int syt;
+	unsigned int data_blocks;
 	unsigned int payload_length;
 	unsigned int pcm_frames;
 	struct snd_pcm_substream *pcm;
 
 	buffer = s->buffer.packets[s->packet_index].buffer;
+	syt = calculate_syt(s, cycle);
+	data_blocks = calculate_data_blocks(s, syt);
 	pcm_frames = s->process_data_blocks(s, buffer + 2, data_blocks, &syt);
 
 	buffer[0] = cpu_to_be32(ACCESS_ONCE(s->source_node_id_field) |
@@ -424,9 +433,11 @@
 				(syt & CIP_SYT_MASK));
 
 	s->data_block_counter = (s->data_block_counter + data_blocks) & 0xff;
-
 	payload_length = 8 + data_blocks * 4 * s->data_block_quadlets;
-	if (queue_out_packet(s, payload_length, false) < 0)
+
+	trace_out_packet(s, cycle, buffer, payload_length, index);
+
+	if (queue_out_packet(s, payload_length) < 0)
 		return -EIO;
 
 	pcm = ACCESS_ONCE(s->pcm);
@@ -438,19 +449,24 @@
 }
 
 static int handle_in_packet(struct amdtp_stream *s,
-			    unsigned int payload_quadlets, __be32 *buffer,
-			    unsigned int *data_blocks, unsigned int syt)
+			    unsigned int payload_quadlets, unsigned int cycle,
+			    unsigned int index)
 {
+	__be32 *buffer;
 	u32 cip_header[2];
-	unsigned int fmt, fdf;
+	unsigned int fmt, fdf, syt;
 	unsigned int data_block_quadlets, data_block_counter, dbc_interval;
+	unsigned int data_blocks;
 	struct snd_pcm_substream *pcm;
 	unsigned int pcm_frames;
 	bool lost;
 
+	buffer = s->buffer.packets[s->packet_index].buffer;
 	cip_header[0] = be32_to_cpu(buffer[0]);
 	cip_header[1] = be32_to_cpu(buffer[1]);
 
+	trace_in_packet(s, cycle, cip_header, payload_quadlets, index);
+
 	/*
 	 * This module supports 'Two-quadlet CIP header with SYT field'.
 	 * For convenience, also check FMT field is AM824 or not.
@@ -460,7 +476,7 @@
 		dev_info_ratelimited(&s->unit->device,
 				"Invalid CIP header for AMDTP: %08X:%08X\n",
 				cip_header[0], cip_header[1]);
-		*data_blocks = 0;
+		data_blocks = 0;
 		pcm_frames = 0;
 		goto end;
 	}
@@ -471,7 +487,7 @@
 		dev_info_ratelimited(&s->unit->device,
 				     "Detect unexpected protocol: %08x %08x\n",
 				     cip_header[0], cip_header[1]);
-		*data_blocks = 0;
+		data_blocks = 0;
 		pcm_frames = 0;
 		goto end;
 	}
@@ -480,7 +496,7 @@
 	fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT;
 	if (payload_quadlets < 3 ||
 	    (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) {
-		*data_blocks = 0;
+		data_blocks = 0;
 	} else {
 		data_block_quadlets =
 			(cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT;
@@ -494,12 +510,12 @@
 		if (s->flags & CIP_WRONG_DBS)
 			data_block_quadlets = s->data_block_quadlets;
 
-		*data_blocks = (payload_quadlets - 2) / data_block_quadlets;
+		data_blocks = (payload_quadlets - 2) / data_block_quadlets;
 	}
 
 	/* Check data block counter continuity */
 	data_block_counter = cip_header[0] & CIP_DBC_MASK;
-	if (*data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) &&
+	if (data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) &&
 	    s->data_block_counter != UINT_MAX)
 		data_block_counter = s->data_block_counter;
 
@@ -510,10 +526,10 @@
 	} else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
 		lost = data_block_counter != s->data_block_counter;
 	} else {
-		if ((*data_blocks > 0) && (s->tx_dbc_interval > 0))
+		if (data_blocks > 0 && s->tx_dbc_interval > 0)
 			dbc_interval = s->tx_dbc_interval;
 		else
-			dbc_interval = *data_blocks;
+			dbc_interval = data_blocks;
 
 		lost = data_block_counter !=
 		       ((s->data_block_counter + dbc_interval) & 0xff);
@@ -526,13 +542,14 @@
 		return -EIO;
 	}
 
-	pcm_frames = s->process_data_blocks(s, buffer + 2, *data_blocks, &syt);
+	syt = be32_to_cpu(buffer[1]) & CIP_SYT_MASK;
+	pcm_frames = s->process_data_blocks(s, buffer + 2, data_blocks, &syt);
 
 	if (s->flags & CIP_DBC_IS_END_EVENT)
 		s->data_block_counter = data_block_counter;
 	else
 		s->data_block_counter =
-				(data_block_counter + *data_blocks) & 0xff;
+				(data_block_counter + data_blocks) & 0xff;
 end:
 	if (queue_in_packet(s) < 0)
 		return -EIO;
@@ -544,29 +561,50 @@
 	return 0;
 }
 
-static void out_stream_callback(struct fw_iso_context *context, u32 cycle,
+/*
+ * In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On
+ * the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent
+ * it. Thus, via Linux firewire subsystem, we can get the 3 bits for second.
+ */
+static inline u32 compute_cycle_count(u32 tstamp)
+{
+	return (((tstamp >> 13) & 0x07) * 8000) + (tstamp & 0x1fff);
+}
+
+static inline u32 increment_cycle_count(u32 cycle, unsigned int addend)
+{
+	cycle += addend;
+	if (cycle >= 8 * CYCLES_PER_SECOND)
+		cycle -= 8 * CYCLES_PER_SECOND;
+	return cycle;
+}
+
+static inline u32 decrement_cycle_count(u32 cycle, unsigned int subtrahend)
+{
+	if (cycle < subtrahend)
+		cycle += 8 * CYCLES_PER_SECOND;
+	return cycle - subtrahend;
+}
+
+static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
 				size_t header_length, void *header,
 				void *private_data)
 {
 	struct amdtp_stream *s = private_data;
-	unsigned int i, syt, packets = header_length / 4;
-	unsigned int data_blocks;
+	unsigned int i, packets = header_length / 4;
+	u32 cycle;
 
 	if (s->packet_index < 0)
 		return;
 
-	/*
-	 * Compute the cycle of the last queued packet.
-	 * (We need only the four lowest bits for the SYT, so we can ignore
-	 * that bits 0-11 must wrap around at 3072.)
-	 */
-	cycle += QUEUE_LENGTH - packets;
+	cycle = compute_cycle_count(tstamp);
+
+	/* Align to actual cycle count for the last packet. */
+	cycle = increment_cycle_count(cycle, QUEUE_LENGTH - packets);
 
 	for (i = 0; i < packets; ++i) {
-		syt = calculate_syt(s, ++cycle);
-		data_blocks = calculate_data_blocks(s, syt);
-
-		if (handle_out_packet(s, data_blocks, syt) < 0) {
+		cycle = increment_cycle_count(cycle, 1);
+		if (handle_out_packet(s, cycle, i) < 0) {
 			s->packet_index = -1;
 			amdtp_stream_pcm_abort(s);
 			return;
@@ -576,15 +614,15 @@
 	fw_iso_context_queue_flush(s->context);
 }
 
-static void in_stream_callback(struct fw_iso_context *context, u32 cycle,
+static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
 			       size_t header_length, void *header,
 			       void *private_data)
 {
 	struct amdtp_stream *s = private_data;
-	unsigned int p, syt, packets;
+	unsigned int i, packets;
 	unsigned int payload_quadlets, max_payload_quadlets;
-	unsigned int data_blocks;
-	__be32 *buffer, *headers = header;
+	__be32 *headers = header;
+	u32 cycle;
 
 	if (s->packet_index < 0)
 		return;
@@ -592,70 +630,44 @@
 	/* The number of packets in buffer */
 	packets = header_length / IN_PACKET_HEADER_SIZE;
 
+	cycle = compute_cycle_count(tstamp);
+
+	/* Align to actual cycle count for the last packet. */
+	cycle = decrement_cycle_count(cycle, packets);
+
 	/* For buffer-over-run prevention. */
 	max_payload_quadlets = amdtp_stream_get_max_payload(s) / 4;
 
-	for (p = 0; p < packets; p++) {
-		buffer = s->buffer.packets[s->packet_index].buffer;
+	for (i = 0; i < packets; i++) {
+		cycle = increment_cycle_count(cycle, 1);
 
 		/* The number of quadlets in this packet */
 		payload_quadlets =
-			(be32_to_cpu(headers[p]) >> ISO_DATA_LENGTH_SHIFT) / 4;
+			(be32_to_cpu(headers[i]) >> ISO_DATA_LENGTH_SHIFT) / 4;
 		if (payload_quadlets > max_payload_quadlets) {
 			dev_err(&s->unit->device,
 				"Detect jumbo payload: %02x %02x\n",
 				payload_quadlets, max_payload_quadlets);
-			s->packet_index = -1;
 			break;
 		}
 
-		syt = be32_to_cpu(buffer[1]) & CIP_SYT_MASK;
-		if (handle_in_packet(s, payload_quadlets, buffer,
-						&data_blocks, syt) < 0) {
-			s->packet_index = -1;
+		if (handle_in_packet(s, payload_quadlets, cycle, i) < 0)
 			break;
-		}
-
-		/* Process sync slave stream */
-		if (s->sync_slave && s->sync_slave->callbacked) {
-			if (handle_out_packet(s->sync_slave,
-					      data_blocks, syt) < 0) {
-				s->packet_index = -1;
-				break;
-			}
-		}
 	}
 
-	/* Queueing error or detecting discontinuity */
-	if (s->packet_index < 0) {
+	/* Queueing error or detecting invalid payload. */
+	if (i < packets) {
+		s->packet_index = -1;
 		amdtp_stream_pcm_abort(s);
-
-		/* Abort sync slave. */
-		if (s->sync_slave) {
-			s->sync_slave->packet_index = -1;
-			amdtp_stream_pcm_abort(s->sync_slave);
-		}
 		return;
 	}
 
-	/* when sync to device, flush the packets for slave stream */
-	if (s->sync_slave && s->sync_slave->callbacked)
-		fw_iso_context_queue_flush(s->sync_slave->context);
-
 	fw_iso_context_queue_flush(s->context);
 }
 
-/* processing is done by master callback */
-static void slave_stream_callback(struct fw_iso_context *context, u32 cycle,
-				  size_t header_length, void *header,
-				  void *private_data)
-{
-	return;
-}
-
 /* this is executed one time */
 static void amdtp_stream_first_callback(struct fw_iso_context *context,
-					u32 cycle, size_t header_length,
+					u32 tstamp, size_t header_length,
 					void *header, void *private_data)
 {
 	struct amdtp_stream *s = private_data;
@@ -669,12 +681,10 @@
 
 	if (s->direction == AMDTP_IN_STREAM)
 		context->callback.sc = in_stream_callback;
-	else if (s->flags & CIP_SYNC_TO_DEVICE)
-		context->callback.sc = slave_stream_callback;
 	else
 		context->callback.sc = out_stream_callback;
 
-	context->callback.sc(context, cycle, header_length, header, s);
+	context->callback.sc(context, tstamp, header_length, header, s);
 }
 
 /**
@@ -713,8 +723,7 @@
 		goto err_unlock;
 	}
 
-	if (s->direction == AMDTP_IN_STREAM &&
-	    s->flags & CIP_SKIP_INIT_DBC_CHECK)
+	if (s->direction == AMDTP_IN_STREAM)
 		s->data_block_counter = UINT_MAX;
 	else
 		s->data_block_counter = 0;
@@ -755,7 +764,7 @@
 		if (s->direction == AMDTP_IN_STREAM)
 			err = queue_in_packet(s);
 		else
-			err = queue_out_packet(s, 0, true);
+			err = queue_out_packet(s, 0);
 		if (err < 0)
 			goto err_context;
 	} while (s->packet_index > 0);
@@ -794,11 +803,24 @@
  */
 unsigned long amdtp_stream_pcm_pointer(struct amdtp_stream *s)
 {
-	/* this optimization is allowed to be racy */
-	if (s->pointer_flush && amdtp_stream_running(s))
+	/*
+	 * This function is called in software IRQ context of period_tasklet or
+	 * process context.
+	 *
+	 * When the software IRQ context was scheduled by software IRQ context
+	 * of IR/IT contexts, queued packets were already handled. Therefore,
+	 * no need to flush the queue in buffer anymore.
+	 *
+	 * When the process context reach here, some packets will be already
+	 * queued in the buffer. These packets should be handled immediately
+	 * to keep better granularity of PCM pointer.
+	 *
+	 * Later, the process context will sometimes schedules software IRQ
+	 * context of the period_tasklet. Then, no need to flush the queue by
+	 * the same reason as described for IR/IT contexts.
+	 */
+	if (!in_interrupt() && amdtp_stream_running(s))
 		fw_iso_context_flush_completions(s->context);
-	else
-		s->pointer_flush = true;
 
 	return ACCESS_ONCE(s->pcm_buffer_pointer);
 }
diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h
index 8775704..c1bc7fa 100644
--- a/sound/firewire/amdtp-stream.h
+++ b/sound/firewire/amdtp-stream.h
@@ -17,8 +17,6 @@
  * @CIP_BLOCKING: In blocking mode, each packet contains either zero or
  *	SYT_INTERVAL samples, with these two types alternating so that
  *	the overall sample rate comes out right.
- * @CIP_SYNC_TO_DEVICE: In sync to device mode, time stamp in out packets is
- *	generated by in packets. Defaultly this driver generates timestamp.
  * @CIP_EMPTY_WITH_TAG0: Only for in-stream. Empty in-packets have TAG0.
  * @CIP_DBC_IS_END_EVENT: Only for in-stream. The value of dbc in an in-packet
  *	corresponds to the end of event in the packet. Out of IEC 61883.
@@ -26,8 +24,6 @@
  *	The value of data_block_quadlets is used instead of reported value.
  * @CIP_SKIP_DBC_ZERO_CHECK: Only for in-stream.  Packets with zero in dbc is
  *	skipped for detecting discontinuity.
- * @CIP_SKIP_INIT_DBC_CHECK: Only for in-stream. The value of dbc in first
- *	packet is not continuous from an initial value.
  * @CIP_EMPTY_HAS_WRONG_DBC: Only for in-stream. The value of dbc in empty
  *	packet is wrong but the others are correct.
  * @CIP_JUMBO_PAYLOAD: Only for in-stream. The number of data blocks in an
@@ -37,14 +33,12 @@
 enum cip_flags {
 	CIP_NONBLOCKING		= 0x00,
 	CIP_BLOCKING		= 0x01,
-	CIP_SYNC_TO_DEVICE	= 0x02,
-	CIP_EMPTY_WITH_TAG0	= 0x04,
-	CIP_DBC_IS_END_EVENT	= 0x08,
-	CIP_WRONG_DBS		= 0x10,
-	CIP_SKIP_DBC_ZERO_CHECK	= 0x20,
-	CIP_SKIP_INIT_DBC_CHECK	= 0x40,
-	CIP_EMPTY_HAS_WRONG_DBC	= 0x80,
-	CIP_JUMBO_PAYLOAD	= 0x100,
+	CIP_EMPTY_WITH_TAG0	= 0x02,
+	CIP_DBC_IS_END_EVENT	= 0x04,
+	CIP_WRONG_DBS		= 0x08,
+	CIP_SKIP_DBC_ZERO_CHECK	= 0x10,
+	CIP_EMPTY_HAS_WRONG_DBC	= 0x20,
+	CIP_JUMBO_PAYLOAD	= 0x40,
 };
 
 /**
@@ -132,12 +126,10 @@
 	struct tasklet_struct period_tasklet;
 	unsigned int pcm_buffer_pointer;
 	unsigned int pcm_period_pointer;
-	bool pointer_flush;
 
 	/* To wait for first packet. */
 	bool callbacked;
 	wait_queue_head_t callback_wait;
-	struct amdtp_stream *sync_slave;
 
 	/* For backends to process data blocks. */
 	void *protocol;
@@ -223,23 +215,6 @@
 	return sfc & 1;
 }
 
-static inline void amdtp_stream_set_sync(enum cip_flags sync_mode,
-					 struct amdtp_stream *master,
-					 struct amdtp_stream *slave)
-{
-	if (sync_mode == CIP_SYNC_TO_DEVICE) {
-		master->flags |= CIP_SYNC_TO_DEVICE;
-		slave->flags |= CIP_SYNC_TO_DEVICE;
-		master->sync_slave = slave;
-	} else {
-		master->flags &= ~CIP_SYNC_TO_DEVICE;
-		slave->flags &= ~CIP_SYNC_TO_DEVICE;
-		master->sync_slave = NULL;
-	}
-
-	slave->sync_slave = NULL;
-}
-
 /**
  * amdtp_stream_wait_callback - sleep till callbacked or timeout
  * @s: the AMDTP stream
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index 3e4e0756..f7e2cbd 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -67,7 +67,7 @@
 #define MODEL_MAUDIO_PROJECTMIX		0x00010091
 
 static int
-name_device(struct snd_bebob *bebob, unsigned int vendor_id)
+name_device(struct snd_bebob *bebob)
 {
 	struct fw_device *fw_dev = fw_parent_device(bebob->unit);
 	char vendor[24] = {0};
@@ -126,6 +126,17 @@
 	return err;
 }
 
+static void bebob_free(struct snd_bebob *bebob)
+{
+	snd_bebob_stream_destroy_duplex(bebob);
+	fw_unit_put(bebob->unit);
+
+	kfree(bebob->maudio_special_quirk);
+
+	mutex_destroy(&bebob->mutex);
+	kfree(bebob);
+}
+
 /*
  * This module releases the FireWire unit data after all ALSA character devices
  * are released by applications. This is for releasing stream data or finishing
@@ -137,18 +148,11 @@
 {
 	struct snd_bebob *bebob = card->private_data;
 
-	snd_bebob_stream_destroy_duplex(bebob);
-	fw_unit_put(bebob->unit);
+	mutex_lock(&devices_mutex);
+	clear_bit(bebob->card_index, devices_used);
+	mutex_unlock(&devices_mutex);
 
-	kfree(bebob->maudio_special_quirk);
-
-	if (bebob->card_index >= 0) {
-		mutex_lock(&devices_mutex);
-		clear_bit(bebob->card_index, devices_used);
-		mutex_unlock(&devices_mutex);
-	}
-
-	mutex_destroy(&bebob->mutex);
+	bebob_free(card->private_data);
 }
 
 static const struct snd_bebob_spec *
@@ -176,16 +180,17 @@
 	return strncmp(name, "FW Audiophile Bootloader", 15) != 0;
 }
 
-static int
-bebob_probe(struct fw_unit *unit,
-	    const struct ieee1394_device_id *entry)
+static void
+do_registration(struct work_struct *work)
 {
-	struct snd_card *card;
-	struct snd_bebob *bebob;
-	const struct snd_bebob_spec *spec;
+	struct snd_bebob *bebob =
+			container_of(work, struct snd_bebob, dwork.work);
 	unsigned int card_index;
 	int err;
 
+	if (bebob->registered)
+		return;
+
 	mutex_lock(&devices_mutex);
 
 	for (card_index = 0; card_index < SNDRV_CARDS; card_index++) {
@@ -193,64 +198,39 @@
 			break;
 	}
 	if (card_index >= SNDRV_CARDS) {
-		err = -ENOENT;
-		goto end;
+		mutex_unlock(&devices_mutex);
+		return;
 	}
 
-	if ((entry->vendor_id == VEN_FOCUSRITE) &&
-	    (entry->model_id == MODEL_FOCUSRITE_SAFFIRE_BOTH))
-		spec = get_saffire_spec(unit);
-	else if ((entry->vendor_id == VEN_MAUDIO1) &&
-		 (entry->model_id == MODEL_MAUDIO_AUDIOPHILE_BOTH) &&
-		 !check_audiophile_booted(unit))
-		spec = NULL;
-	else
-		spec = (const struct snd_bebob_spec *)entry->driver_data;
-
-	if (spec == NULL) {
-		if ((entry->vendor_id == VEN_MAUDIO1) ||
-		    (entry->vendor_id == VEN_MAUDIO2))
-			err = snd_bebob_maudio_load_firmware(unit);
-		else
-			err = -ENOSYS;
-		goto end;
+	err = snd_card_new(&bebob->unit->device, index[card_index],
+			   id[card_index], THIS_MODULE, 0, &bebob->card);
+	if (err < 0) {
+		mutex_unlock(&devices_mutex);
+		return;
 	}
 
-	err = snd_card_new(&unit->device, index[card_index], id[card_index],
-			   THIS_MODULE, sizeof(struct snd_bebob), &card);
-	if (err < 0)
-		goto end;
-	bebob = card->private_data;
-	bebob->card_index = card_index;
-	set_bit(card_index, devices_used);
-	card->private_free = bebob_card_free;
-
-	bebob->card = card;
-	bebob->unit = fw_unit_get(unit);
-	bebob->spec = spec;
-	mutex_init(&bebob->mutex);
-	spin_lock_init(&bebob->lock);
-	init_waitqueue_head(&bebob->hwdep_wait);
-
-	err = name_device(bebob, entry->vendor_id);
+	err = name_device(bebob);
 	if (err < 0)
 		goto error;
 
-	if ((entry->vendor_id == VEN_MAUDIO1) &&
-	    (entry->model_id == MODEL_MAUDIO_FW1814))
-		err = snd_bebob_maudio_special_discover(bebob, true);
-	else if ((entry->vendor_id == VEN_MAUDIO1) &&
-		 (entry->model_id == MODEL_MAUDIO_PROJECTMIX))
-		err = snd_bebob_maudio_special_discover(bebob, false);
-	else
+	if (bebob->spec == &maudio_special_spec) {
+		if (bebob->entry->model_id == MODEL_MAUDIO_FW1814)
+			err = snd_bebob_maudio_special_discover(bebob, true);
+		else
+			err = snd_bebob_maudio_special_discover(bebob, false);
+	} else {
 		err = snd_bebob_stream_discover(bebob);
+	}
+	if (err < 0)
+		goto error;
+
+	err = snd_bebob_stream_init_duplex(bebob);
 	if (err < 0)
 		goto error;
 
 	snd_bebob_proc_init(bebob);
 
-	if ((bebob->midi_input_ports > 0) ||
-	    (bebob->midi_output_ports > 0)) {
+	if (bebob->midi_input_ports > 0 || bebob->midi_output_ports > 0) {
 		err = snd_bebob_create_midi_devices(bebob);
 		if (err < 0)
 			goto error;
@@ -264,16 +244,75 @@
 	if (err < 0)
 		goto error;
 
-	err = snd_bebob_stream_init_duplex(bebob);
+	err = snd_card_register(bebob->card);
 	if (err < 0)
 		goto error;
 
-	if (!bebob->maudio_special_quirk) {
-		err = snd_card_register(card);
-		if (err < 0) {
-			snd_bebob_stream_destroy_duplex(bebob);
-			goto error;
-		}
+	set_bit(card_index, devices_used);
+	mutex_unlock(&devices_mutex);
+
+	/*
+	 * After registered, bebob instance can be released corresponding to
+	 * releasing the sound card instance.
+	 */
+	bebob->card->private_free = bebob_card_free;
+	bebob->card->private_data = bebob;
+	bebob->registered = true;
+
+	return;
+error:
+	mutex_unlock(&devices_mutex);
+	snd_bebob_stream_destroy_duplex(bebob);
+	snd_card_free(bebob->card);
+	dev_info(&bebob->unit->device,
+		 "Sound card registration failed: %d\n", err);
+}
+
+static int
+bebob_probe(struct fw_unit *unit, const struct ieee1394_device_id *entry)
+{
+	struct snd_bebob *bebob;
+	const struct snd_bebob_spec *spec;
+
+	if (entry->vendor_id == VEN_FOCUSRITE &&
+	    entry->model_id == MODEL_FOCUSRITE_SAFFIRE_BOTH)
+		spec = get_saffire_spec(unit);
+	else if (entry->vendor_id == VEN_MAUDIO1 &&
+		 entry->model_id == MODEL_MAUDIO_AUDIOPHILE_BOTH &&
+		 !check_audiophile_booted(unit))
+		spec = NULL;
+	else
+		spec = (const struct snd_bebob_spec *)entry->driver_data;
+
+	if (spec == NULL) {
+		if (entry->vendor_id == VEN_MAUDIO1 ||
+		    entry->vendor_id == VEN_MAUDIO2)
+			return snd_bebob_maudio_load_firmware(unit);
+		else
+			return -ENODEV;
+	}
+
+	/* Allocate this independent of sound card instance. */
+	bebob = kzalloc(sizeof(struct snd_bebob), GFP_KERNEL);
+	if (bebob == NULL)
+		return -ENOMEM;
+
+	bebob->unit = fw_unit_get(unit);
+	bebob->entry = entry;
+	bebob->spec = spec;
+	dev_set_drvdata(&unit->device, bebob);
+
+	mutex_init(&bebob->mutex);
+	spin_lock_init(&bebob->lock);
+	init_waitqueue_head(&bebob->hwdep_wait);
+
+	/* Allocate and register this sound card later. */
+	INIT_DEFERRABLE_WORK(&bebob->dwork, do_registration);
+
+	if (entry->vendor_id != VEN_MAUDIO1 ||
+	    (entry->model_id != MODEL_MAUDIO_FW1814 &&
+	     entry->model_id != MODEL_MAUDIO_PROJECTMIX)) {
+		snd_fw_schedule_registration(unit, &bebob->dwork);
 	} else {
 		/*
 		 * This is a workaround. This bus reset seems to have an effect
@@ -285,19 +324,11 @@
 		 * signals from dbus and starts I/Os. To avoid I/Os till the
 		 * future bus reset, registration is done in next update().
 		 */
-		bebob->deferred_registration = true;
 		fw_schedule_bus_reset(fw_parent_device(bebob->unit)->card,
 				      false, true);
 	}
 
-	dev_set_drvdata(&unit->device, bebob);
-end:
-	mutex_unlock(&devices_mutex);
-	return err;
-error:
-	mutex_unlock(&devices_mutex);
-	snd_card_free(card);
-	return err;
+	return 0;
 }
 
 /*
@@ -324,15 +355,11 @@
 	if (bebob == NULL)
 		return;
 
-	fcp_bus_reset(bebob->unit);
-
-	if (bebob->deferred_registration) {
-		if (snd_card_register(bebob->card) < 0) {
-			snd_bebob_stream_destroy_duplex(bebob);
-			snd_card_free(bebob->card);
-		}
-		bebob->deferred_registration = false;
-	}
+	/* Postpone a workqueue for deferred registration. */
+	if (!bebob->registered)
+		snd_fw_schedule_registration(unit, &bebob->dwork);
+	else
+		fcp_bus_reset(bebob->unit);
 }
 
 static void bebob_remove(struct fw_unit *unit)
@@ -342,8 +369,20 @@
 	if (bebob == NULL)
 		return;
 
-	/* No need to wait for releasing card object in this context. */
-	snd_card_free_when_closed(bebob->card);
+	/*
+	 * Confirm to stop the work for registration before the sound card is
+	 * going to be released. The work is not scheduled again because bus
+	 * reset handler is not called anymore.
+	 */
+	cancel_delayed_work_sync(&bebob->dwork);
+
+	if (bebob->registered) {
+		/* No need to wait for releasing card object in this context. */
+		snd_card_free_when_closed(bebob->card);
+	} else {
+		/* Don't forget this case. */
+		bebob_free(bebob);
+	}
 }
 
 static const struct snd_bebob_rate_spec normal_rate_spec = {
diff --git a/sound/firewire/bebob/bebob.h b/sound/firewire/bebob/bebob.h
index b50bb33d..e7f1bb9 100644
--- a/sound/firewire/bebob/bebob.h
+++ b/sound/firewire/bebob/bebob.h
@@ -83,6 +83,10 @@
 	struct mutex mutex;
 	spinlock_t lock;
 
+	bool registered;
+	struct delayed_work dwork;
+
+	const struct ieee1394_device_id *entry;
 	const struct snd_bebob_spec *spec;
 
 	unsigned int midi_input_ports;
@@ -90,7 +94,6 @@
 
 	bool connected;
 
-	struct amdtp_stream *master;
 	struct amdtp_stream tx_stream;
 	struct amdtp_stream rx_stream;
 	struct cmp_connection out_conn;
@@ -111,7 +114,6 @@
 
 	/* for M-Audio special devices */
 	void *maudio_special_quirk;
-	bool deferred_registration;
 
 	/* For BeBoB version quirk. */
 	unsigned int version;
diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
index 77cbb02..4d3034a 100644
--- a/sound/firewire/bebob/bebob_stream.c
+++ b/sound/firewire/bebob/bebob_stream.c
@@ -484,30 +484,6 @@
 }
 
 static int
-get_sync_mode(struct snd_bebob *bebob, enum cip_flags *sync_mode)
-{
-	enum snd_bebob_clock_type src;
-	int err;
-
-	err = snd_bebob_stream_get_clock_src(bebob, &src);
-	if (err < 0)
-		return err;
-
-	switch (src) {
-	case SND_BEBOB_CLOCK_TYPE_INTERNAL:
-	case SND_BEBOB_CLOCK_TYPE_EXTERNAL:
-		*sync_mode = CIP_SYNC_TO_DEVICE;
-		break;
-	default:
-	case SND_BEBOB_CLOCK_TYPE_SYT:
-		*sync_mode = 0;
-		break;
-	}
-
-	return 0;
-}
-
-static int
 start_stream(struct snd_bebob *bebob, struct amdtp_stream *stream,
 	     unsigned int rate)
 {
@@ -550,8 +526,6 @@
 		goto end;
 	}
 
-	bebob->tx_stream.flags |= CIP_SKIP_INIT_DBC_CHECK;
-
 	/*
 	 * BeBoB v3 transfers packets with these qurks:
 	 *  - In the beginning of streaming, the value of dbc is incremented
@@ -584,8 +558,6 @@
 int snd_bebob_stream_start_duplex(struct snd_bebob *bebob, unsigned int rate)
 {
 	const struct snd_bebob_rate_spec *rate_spec = bebob->spec->rate;
-	struct amdtp_stream *master, *slave;
-	enum cip_flags sync_mode;
 	unsigned int curr_rate;
 	int err = 0;
 
@@ -593,22 +565,11 @@
 	if (bebob->substreams_counter == 0)
 		goto end;
 
-	err = get_sync_mode(bebob, &sync_mode);
-	if (err < 0)
-		goto end;
-	if (sync_mode == CIP_SYNC_TO_DEVICE) {
-		master = &bebob->tx_stream;
-		slave  = &bebob->rx_stream;
-	} else {
-		master = &bebob->rx_stream;
-		slave  = &bebob->tx_stream;
-	}
-
 	/*
 	 * Considering JACK/FFADO streaming:
 	 * TODO: This can be removed hwdep functionality becomes popular.
 	 */
-	err = check_connection_used_by_others(bebob, master);
+	err = check_connection_used_by_others(bebob, &bebob->rx_stream);
 	if (err < 0)
 		goto end;
 
@@ -618,11 +579,12 @@
 	 * At bus reset, connections should not be broken here. So streams need
 	 * to be re-started. This is a reason to use SKIP_INIT_DBC_CHECK flag.
 	 */
-	if (amdtp_streaming_error(master))
-		amdtp_stream_stop(master);
-	if (amdtp_streaming_error(slave))
-		amdtp_stream_stop(slave);
-	if (!amdtp_stream_running(master) && !amdtp_stream_running(slave))
+	if (amdtp_streaming_error(&bebob->rx_stream))
+		amdtp_stream_stop(&bebob->rx_stream);
+	if (amdtp_streaming_error(&bebob->tx_stream))
+		amdtp_stream_stop(&bebob->tx_stream);
+	if (!amdtp_stream_running(&bebob->rx_stream) &&
+	    !amdtp_stream_running(&bebob->tx_stream))
 		break_both_connections(bebob);
 
 	/* stop streams if rate is different */
@@ -635,16 +597,13 @@
 	if (rate == 0)
 		rate = curr_rate;
 	if (rate != curr_rate) {
-		amdtp_stream_stop(master);
-		amdtp_stream_stop(slave);
+		amdtp_stream_stop(&bebob->rx_stream);
+		amdtp_stream_stop(&bebob->tx_stream);
 		break_both_connections(bebob);
 	}
 
 	/* master should be always running */
-	if (!amdtp_stream_running(master)) {
-		amdtp_stream_set_sync(sync_mode, master, slave);
-		bebob->master = master;
-
+	if (!amdtp_stream_running(&bebob->rx_stream)) {
 		/*
 		 * NOTE:
 		 * If establishing connections at first, Yamaha GO46
@@ -666,7 +625,7 @@
 		if (err < 0)
 			goto end;
 
-		err = start_stream(bebob, master, rate);
+		err = start_stream(bebob, &bebob->rx_stream, rate);
 		if (err < 0) {
 			dev_err(&bebob->unit->device,
 				"fail to run AMDTP master stream:%d\n", err);
@@ -685,15 +644,16 @@
 				dev_err(&bebob->unit->device,
 					"fail to ensure sampling rate: %d\n",
 					err);
-				amdtp_stream_stop(master);
+				amdtp_stream_stop(&bebob->rx_stream);
 				break_both_connections(bebob);
 				goto end;
 			}
 		}
 
 		/* wait first callback */
-		if (!amdtp_stream_wait_callback(master, CALLBACK_TIMEOUT)) {
-			amdtp_stream_stop(master);
+		if (!amdtp_stream_wait_callback(&bebob->rx_stream,
+						CALLBACK_TIMEOUT)) {
+			amdtp_stream_stop(&bebob->rx_stream);
 			break_both_connections(bebob);
 			err = -ETIMEDOUT;
 			goto end;
@@ -701,20 +661,21 @@
 	}
 
 	/* start slave if needed */
-	if (!amdtp_stream_running(slave)) {
-		err = start_stream(bebob, slave, rate);
+	if (!amdtp_stream_running(&bebob->tx_stream)) {
+		err = start_stream(bebob, &bebob->tx_stream, rate);
 		if (err < 0) {
 			dev_err(&bebob->unit->device,
 				"fail to run AMDTP slave stream:%d\n", err);
-			amdtp_stream_stop(master);
+			amdtp_stream_stop(&bebob->rx_stream);
 			break_both_connections(bebob);
 			goto end;
 		}
 
 		/* wait first callback */
-		if (!amdtp_stream_wait_callback(slave, CALLBACK_TIMEOUT)) {
-			amdtp_stream_stop(slave);
-			amdtp_stream_stop(master);
+		if (!amdtp_stream_wait_callback(&bebob->tx_stream,
+						CALLBACK_TIMEOUT)) {
+			amdtp_stream_stop(&bebob->tx_stream);
+			amdtp_stream_stop(&bebob->rx_stream);
 			break_both_connections(bebob);
 			err = -ETIMEDOUT;
 		}
@@ -725,22 +686,12 @@
 
 void snd_bebob_stream_stop_duplex(struct snd_bebob *bebob)
 {
-	struct amdtp_stream *master, *slave;
-
-	if (bebob->master == &bebob->rx_stream) {
-		slave  = &bebob->tx_stream;
-		master = &bebob->rx_stream;
-	} else {
-		slave  = &bebob->rx_stream;
-		master = &bebob->tx_stream;
-	}
-
 	if (bebob->substreams_counter == 0) {
-		amdtp_stream_pcm_abort(master);
-		amdtp_stream_stop(master);
+		amdtp_stream_pcm_abort(&bebob->rx_stream);
+		amdtp_stream_stop(&bebob->rx_stream);
 
-		amdtp_stream_pcm_abort(slave);
-		amdtp_stream_stop(slave);
+		amdtp_stream_pcm_abort(&bebob->tx_stream);
+		amdtp_stream_stop(&bebob->tx_stream);
 
 		break_both_connections(bebob);
 	}
diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c
index 845d5e5..ec4db3a 100644
--- a/sound/firewire/dice/dice-stream.c
+++ b/sound/firewire/dice/dice-stream.c
@@ -446,18 +446,12 @@
 
 void snd_dice_stream_destroy_duplex(struct snd_dice *dice)
 {
-	struct reg_params tx_params, rx_params;
+	unsigned int i;
 
-	snd_dice_transaction_clear_enable(dice);
-
-	if (get_register_params(dice, &tx_params, &rx_params) == 0) {
-		stop_streams(dice, AMDTP_IN_STREAM, &tx_params);
-		stop_streams(dice, AMDTP_OUT_STREAM, &rx_params);
+	for (i = 0; i < MAX_STREAMS; i++) {
+		destroy_stream(dice, AMDTP_IN_STREAM, i);
+		destroy_stream(dice, AMDTP_OUT_STREAM, i);
 	}
-
-	release_resources(dice);
-
-	dice->substreams_counter = 0;
 }
 
 void snd_dice_stream_update_duplex(struct snd_dice *dice)
diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c
index 8b64aef..25e9f77 100644
--- a/sound/firewire/dice/dice.c
+++ b/sound/firewire/dice/dice.c
@@ -20,8 +20,6 @@
 #define WEISS_CATEGORY_ID	0x00
 #define LOUD_CATEGORY_ID	0x10
 
-#define PROBE_DELAY_MS		(2 * MSEC_PER_SEC)
-
 /*
  * Some models support several isochronous channels, while these streams are not
  * always available. In this case, add the model name to this list.
@@ -201,6 +199,10 @@
 
 	dice_card_strings(dice);
 
+	err = snd_dice_stream_init_duplex(dice);
+	if (err < 0)
+		goto error;
+
 	snd_dice_create_proc(dice);
 
 	err = snd_dice_create_pcm(dice);
@@ -229,28 +231,14 @@
 
 	return;
 error:
+	snd_dice_stream_destroy_duplex(dice);
 	snd_dice_transaction_destroy(dice);
+	snd_dice_stream_destroy_duplex(dice);
 	snd_card_free(dice->card);
 	dev_info(&dice->unit->device,
 		 "Sound card registration failed: %d\n", err);
 }
 
-static void schedule_registration(struct snd_dice *dice)
-{
-	struct fw_card *fw_card = fw_parent_device(dice->unit)->card;
-	u64 now, delay;
-
-	now = get_jiffies_64();
-	delay = fw_card->reset_jiffies + msecs_to_jiffies(PROBE_DELAY_MS);
-
-	if (time_after64(delay, now))
-		delay -= now;
-	else
-		delay = 0;
-
-	mod_delayed_work(system_wq, &dice->dwork, delay);
-}
-
 static int dice_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
 {
 	struct snd_dice *dice;
@@ -273,15 +261,9 @@
 	init_completion(&dice->clock_accepted);
 	init_waitqueue_head(&dice->hwdep_wait);
 
-	err = snd_dice_stream_init_duplex(dice);
-	if (err < 0) {
-		dice_free(dice);
-		return err;
-	}
-
 	/* Allocate and register this sound card later. */
 	INIT_DEFERRABLE_WORK(&dice->dwork, do_registration);
-	schedule_registration(dice);
+	snd_fw_schedule_registration(unit, &dice->dwork);
 
 	return 0;
 }
@@ -312,7 +294,7 @@
 
 	/* Postpone a workqueue for deferred registration. */
 	if (!dice->registered)
-		schedule_registration(dice);
+		snd_fw_schedule_registration(unit, &dice->dwork);
 
 	/* The handler address register becomes initialized. */
 	snd_dice_transaction_reinit(dice);
@@ -335,6 +317,13 @@
 		.match_flags = IEEE1394_MATCH_VERSION,
 		.version     = DICE_INTERFACE,
 	},
+	/* M-Audio Profire 610/2626 has a different value in version field. */
+	{
+		.match_flags	= IEEE1394_MATCH_VENDOR_ID |
+				  IEEE1394_MATCH_SPECIFIER_ID,
+		.vendor_id	= 0x000d6c,
+		.specifier_id	= 0x000d6c,
+	},
 	{ }
 };
 MODULE_DEVICE_TABLE(ieee1394, dice_id_table);
diff --git a/sound/firewire/digi00x/amdtp-dot.c b/sound/firewire/digi00x/amdtp-dot.c
index 0ac92ab..b3cffd0 100644
--- a/sound/firewire/digi00x/amdtp-dot.c
+++ b/sound/firewire/digi00x/amdtp-dot.c
@@ -421,7 +421,7 @@
 
 	/* Use different mode between incoming/outgoing. */
 	if (dir == AMDTP_IN_STREAM) {
-		flags = CIP_NONBLOCKING | CIP_SKIP_INIT_DBC_CHECK;
+		flags = CIP_NONBLOCKING;
 		process_data_blocks = process_tx_data_blocks;
 	} else {
 		flags = CIP_BLOCKING;
diff --git a/sound/firewire/digi00x/digi00x-transaction.c b/sound/firewire/digi00x/digi00x-transaction.c
index 554324d..735d356 100644
--- a/sound/firewire/digi00x/digi00x-transaction.c
+++ b/sound/firewire/digi00x/digi00x-transaction.c
@@ -126,12 +126,17 @@
 	return err;
 error:
 	fw_core_remove_address_handler(&dg00x->async_handler);
-	dg00x->async_handler.address_callback = NULL;
+	dg00x->async_handler.callback_data = NULL;
 	return err;
 }
 
 void snd_dg00x_transaction_unregister(struct snd_dg00x *dg00x)
 {
+	if (dg00x->async_handler.callback_data == NULL)
+		return;
+
 	snd_fw_async_midi_port_destroy(&dg00x->out_control);
 	fw_core_remove_address_handler(&dg00x->async_handler);
+
+	dg00x->async_handler.callback_data = NULL;
 }
diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c
index 1f33b7a..cc4776c 100644
--- a/sound/firewire/digi00x/digi00x.c
+++ b/sound/firewire/digi00x/digi00x.c
@@ -40,10 +40,8 @@
 	return 0;
 }
 
-static void dg00x_card_free(struct snd_card *card)
+static void dg00x_free(struct snd_dg00x *dg00x)
 {
-	struct snd_dg00x *dg00x = card->private_data;
-
 	snd_dg00x_stream_destroy_duplex(dg00x);
 	snd_dg00x_transaction_unregister(dg00x);
 
@@ -52,28 +50,24 @@
 	mutex_destroy(&dg00x->mutex);
 }
 
-static int snd_dg00x_probe(struct fw_unit *unit,
-			   const struct ieee1394_device_id *entry)
+static void dg00x_card_free(struct snd_card *card)
 {
-	struct snd_card *card;
-	struct snd_dg00x *dg00x;
+	dg00x_free(card->private_data);
+}
+
+static void do_registration(struct work_struct *work)
+{
+	struct snd_dg00x *dg00x =
+			container_of(work, struct snd_dg00x, dwork.work);
 	int err;
 
-	/* create card */
-	err = snd_card_new(&unit->device, -1, NULL, THIS_MODULE,
-			   sizeof(struct snd_dg00x), &card);
+	if (dg00x->registered)
+		return;
+
+	err = snd_card_new(&dg00x->unit->device, -1, NULL, THIS_MODULE, 0,
+			   &dg00x->card);
 	if (err < 0)
-		return err;
-	card->private_free = dg00x_card_free;
-
-	/* initialize myself */
-	dg00x = card->private_data;
-	dg00x->card = card;
-	dg00x->unit = fw_unit_get(unit);
-
-	mutex_init(&dg00x->mutex);
-	spin_lock_init(&dg00x->lock);
-	init_waitqueue_head(&dg00x->hwdep_wait);
+		return;
 
 	err = name_card(dg00x);
 	if (err < 0)
@@ -101,35 +95,86 @@
 	if (err < 0)
 		goto error;
 
-	err = snd_card_register(card);
+	err = snd_card_register(dg00x->card);
 	if (err < 0)
 		goto error;
 
+	dg00x->card->private_free = dg00x_card_free;
+	dg00x->card->private_data = dg00x;
+	dg00x->registered = true;
+
+	return;
+error:
+	snd_dg00x_transaction_unregister(dg00x);
+	snd_dg00x_stream_destroy_duplex(dg00x);
+	snd_card_free(dg00x->card);
+	dev_info(&dg00x->unit->device,
+		 "Sound card registration failed: %d\n", err);
+}
+
+static int snd_dg00x_probe(struct fw_unit *unit,
+			   const struct ieee1394_device_id *entry)
+{
+	struct snd_dg00x *dg00x;
+
+	/* Allocate this independent of sound card instance. */
+	dg00x = kzalloc(sizeof(struct snd_dg00x), GFP_KERNEL);
+	if (dg00x == NULL)
+		return -ENOMEM;
+
+	dg00x->unit = fw_unit_get(unit);
 	dev_set_drvdata(&unit->device, dg00x);
 
-	return err;
-error:
-	snd_card_free(card);
-	return err;
+	mutex_init(&dg00x->mutex);
+	spin_lock_init(&dg00x->lock);
+	init_waitqueue_head(&dg00x->hwdep_wait);
+
+	/* Allocate and register this sound card later. */
+	INIT_DEFERRABLE_WORK(&dg00x->dwork, do_registration);
+	snd_fw_schedule_registration(unit, &dg00x->dwork);
+
+	return 0;
 }
 
 static void snd_dg00x_update(struct fw_unit *unit)
 {
 	struct snd_dg00x *dg00x = dev_get_drvdata(&unit->device);
 
+	/* Postpone a workqueue for deferred registration. */
+	if (!dg00x->registered)
+		snd_fw_schedule_registration(unit, &dg00x->dwork);
+
 	snd_dg00x_transaction_reregister(dg00x);
 
-	mutex_lock(&dg00x->mutex);
-	snd_dg00x_stream_update_duplex(dg00x);
-	mutex_unlock(&dg00x->mutex);
+	/*
+	 * After registration, userspace can start packet streaming, then this
+	 * code block works fine.
+	 */
+	if (dg00x->registered) {
+		mutex_lock(&dg00x->mutex);
+		snd_dg00x_stream_update_duplex(dg00x);
+		mutex_unlock(&dg00x->mutex);
+	}
 }
 
 static void snd_dg00x_remove(struct fw_unit *unit)
 {
 	struct snd_dg00x *dg00x = dev_get_drvdata(&unit->device);
 
-	/* No need to wait for releasing card object in this context. */
-	snd_card_free_when_closed(dg00x->card);
+	/*
+	 * Confirm to stop the work for registration before the sound card is
+	 * going to be released. The work is not scheduled again because bus
+	 * reset handler is not called anymore.
+	 */
+	cancel_delayed_work_sync(&dg00x->dwork);
+
+	if (dg00x->registered) {
+		/* No need to wait for releasing card object in this context. */
+		snd_card_free_when_closed(dg00x->card);
+	} else {
+		/* Don't forget this case. */
+		dg00x_free(dg00x);
+	}
 }
 
 static const struct ieee1394_device_id snd_dg00x_id_table[] = {
diff --git a/sound/firewire/digi00x/digi00x.h b/sound/firewire/digi00x/digi00x.h
index 907e739..2cd465c 100644
--- a/sound/firewire/digi00x/digi00x.h
+++ b/sound/firewire/digi00x/digi00x.h
@@ -37,6 +37,9 @@
 	struct mutex mutex;
 	spinlock_t lock;
 
+	bool registered;
+	struct delayed_work dwork;
+
 	struct amdtp_stream tx_stream;
 	struct fw_iso_resources tx_resources;
 
diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c
index 8f27b67..71a0613 100644
--- a/sound/firewire/fireworks/fireworks.c
+++ b/sound/firewire/fireworks/fireworks.c
@@ -168,11 +168,34 @@
 	       sizeof(struct snd_efw_phys_grp) * hwinfo->phys_in_grp_count);
 	memcpy(&efw->phys_out_grps, hwinfo->phys_out_grps,
 	       sizeof(struct snd_efw_phys_grp) * hwinfo->phys_out_grp_count);
+
+	/* AudioFire8 (since 2009) and AudioFirePre8 */
+	if (hwinfo->type == MODEL_ECHO_AUDIOFIRE_9)
+		efw->is_af9 = true;
+	/* These models uses the same firmware. */
+	if (hwinfo->type == MODEL_ECHO_AUDIOFIRE_2 ||
+	    hwinfo->type == MODEL_ECHO_AUDIOFIRE_4 ||
+	    hwinfo->type == MODEL_ECHO_AUDIOFIRE_9 ||
+	    hwinfo->type == MODEL_GIBSON_RIP ||
+	    hwinfo->type == MODEL_GIBSON_GOLDTOP)
+		efw->is_fireworks3 = true;
 end:
 	kfree(hwinfo);
 	return err;
 }
 
+static void efw_free(struct snd_efw *efw)
+{
+	snd_efw_stream_destroy_duplex(efw);
+	snd_efw_transaction_remove_instance(efw);
+	fw_unit_put(efw->unit);
+
+	kfree(efw->resp_buf);
+
+	mutex_destroy(&efw->mutex);
+	kfree(efw);
+}
+
 /*
  * This module releases the FireWire unit data after all ALSA character devices
  * are released by applications. This is for releasing stream data or finishing
@@ -184,28 +207,24 @@
 {
 	struct snd_efw *efw = card->private_data;
 
-	snd_efw_stream_destroy_duplex(efw);
-	snd_efw_transaction_remove_instance(efw);
-	fw_unit_put(efw->unit);
-
-	kfree(efw->resp_buf);
-
 	if (efw->card_index >= 0) {
 		mutex_lock(&devices_mutex);
 		clear_bit(efw->card_index, devices_used);
 		mutex_unlock(&devices_mutex);
 	}
 
-	mutex_destroy(&efw->mutex);
+	efw_free(card->private_data);
 }
 
-static int
-efw_probe(struct fw_unit *unit,
-	  const struct ieee1394_device_id *entry)
+static void
+do_registration(struct work_struct *work)
 {
-	struct snd_card *card;
-	struct snd_efw *efw;
-	int card_index, err;
+	struct snd_efw *efw = container_of(work, struct snd_efw, dwork.work);
+	unsigned int card_index;
+	int err;
+
+	if (efw->registered)
+		return;
 
 	mutex_lock(&devices_mutex);
 
@@ -215,24 +234,16 @@
 			break;
 	}
 	if (card_index >= SNDRV_CARDS) {
-		err = -ENOENT;
-		goto end;
+		mutex_unlock(&devices_mutex);
+		return;
 	}
 
-	err = snd_card_new(&unit->device, index[card_index], id[card_index],
-			   THIS_MODULE, sizeof(struct snd_efw), &card);
-	if (err < 0)
-		goto end;
-	efw = card->private_data;
-	efw->card_index = card_index;
-	set_bit(card_index, devices_used);
-	card->private_free = efw_card_free;
-
-	efw->card = card;
-	efw->unit = fw_unit_get(unit);
-	mutex_init(&efw->mutex);
-	spin_lock_init(&efw->lock);
-	init_waitqueue_head(&efw->hwdep_wait);
+	err = snd_card_new(&efw->unit->device, index[card_index],
+			   id[card_index], THIS_MODULE, 0, &efw->card);
+	if (err < 0) {
+		mutex_unlock(&devices_mutex);
+		return;
+	}
 
 	/* prepare response buffer */
 	snd_efw_resp_buf_size = clamp(snd_efw_resp_buf_size,
@@ -248,16 +259,10 @@
 	err = get_hardware_info(efw);
 	if (err < 0)
 		goto error;
-	/* AudioFire8 (since 2009) and AudioFirePre8 */
-	if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9)
-		efw->is_af9 = true;
-	/* These models uses the same firmware. */
-	if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2 ||
-	    entry->model_id == MODEL_ECHO_AUDIOFIRE_4 ||
-	    entry->model_id == MODEL_ECHO_AUDIOFIRE_9 ||
-	    entry->model_id == MODEL_GIBSON_RIP ||
-	    entry->model_id == MODEL_GIBSON_GOLDTOP)
-		efw->is_fireworks3 = true;
+
+	err = snd_efw_stream_init_duplex(efw);
+	if (err < 0)
+		goto error;
 
 	snd_efw_proc_init(efw);
 
@@ -275,44 +280,93 @@
 	if (err < 0)
 		goto error;
 
-	err = snd_efw_stream_init_duplex(efw);
+	err = snd_card_register(efw->card);
 	if (err < 0)
 		goto error;
 
-	err = snd_card_register(card);
-	if (err < 0) {
-		snd_efw_stream_destroy_duplex(efw);
-		goto error;
-	}
+	set_bit(card_index, devices_used);
+	mutex_unlock(&devices_mutex);
 
-	dev_set_drvdata(&unit->device, efw);
-end:
-	mutex_unlock(&devices_mutex);
-	return err;
+	/*
+	 * After registered, efw instance can be released corresponding to
+	 * releasing the sound card instance.
+	 */
+	efw->card->private_free = efw_card_free;
+	efw->card->private_data = efw;
+	efw->registered = true;
+
+	return;
 error:
-	snd_efw_transaction_remove_instance(efw);
 	mutex_unlock(&devices_mutex);
-	snd_card_free(card);
-	return err;
+	snd_efw_transaction_remove_instance(efw);
+	snd_efw_stream_destroy_duplex(efw);
+	snd_card_free(efw->card);
+	dev_info(&efw->unit->device,
+		 "Sound card registration failed: %d\n", err);
+}
+
+static int
+efw_probe(struct fw_unit *unit, const struct ieee1394_device_id *entry)
+{
+	struct snd_efw *efw;
+
+	efw = kzalloc(sizeof(struct snd_efw), GFP_KERNEL);
+	if (efw == NULL)
+		return -ENOMEM;
+
+	efw->unit = fw_unit_get(unit);
+	dev_set_drvdata(&unit->device, efw);
+
+	mutex_init(&efw->mutex);
+	spin_lock_init(&efw->lock);
+	init_waitqueue_head(&efw->hwdep_wait);
+
+	/* Allocate and register this sound card later. */
+	INIT_DEFERRABLE_WORK(&efw->dwork, do_registration);
+	snd_fw_schedule_registration(unit, &efw->dwork);
+
+	return 0;
 }
 
 static void efw_update(struct fw_unit *unit)
 {
 	struct snd_efw *efw = dev_get_drvdata(&unit->device);
 
+	/* Postpone a workqueue for deferred registration. */
+	if (!efw->registered)
+		snd_fw_schedule_registration(unit, &efw->dwork);
+
 	snd_efw_transaction_bus_reset(efw->unit);
 
-	mutex_lock(&efw->mutex);
-	snd_efw_stream_update_duplex(efw);
-	mutex_unlock(&efw->mutex);
+	/*
+	 * After registration, userspace can start packet streaming, then this
+	 * code block works fine.
+	 */
+	if (efw->registered) {
+		mutex_lock(&efw->mutex);
+		snd_efw_stream_update_duplex(efw);
+		mutex_unlock(&efw->mutex);
+	}
 }
 
 static void efw_remove(struct fw_unit *unit)
 {
 	struct snd_efw *efw = dev_get_drvdata(&unit->device);
 
-	/* No need to wait for releasing card object in this context. */
-	snd_card_free_when_closed(efw->card);
+	/*
+	 * Confirm to stop the work for registration before the sound card is
+	 * going to be released. The work is not scheduled again because bus
+	 * reset handler is not called anymore.
+	 */
+	cancel_delayed_work_sync(&efw->dwork);
+
+	if (efw->registered) {
+		/* No need to wait for releasing card object in this context. */
+		snd_card_free_when_closed(efw->card);
+	} else {
+		/* Don't forget this case. */
+		efw_free(efw);
+	}
 }
 
 static const struct ieee1394_device_id efw_id_table[] = {
diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h
index 96c4e0c..03ed352 100644
--- a/sound/firewire/fireworks/fireworks.h
+++ b/sound/firewire/fireworks/fireworks.h
@@ -65,6 +65,9 @@
 	struct mutex mutex;
 	spinlock_t lock;
 
+	bool registered;
+	struct delayed_work dwork;
+
 	/* for transaction */
 	u32 seqnum;
 	bool resp_addr_changable;
@@ -81,7 +84,6 @@
 	unsigned int pcm_capture_channels[SND_EFW_MULTIPLIER_MODES];
 	unsigned int pcm_playback_channels[SND_EFW_MULTIPLIER_MODES];
 
-	struct amdtp_stream *master;
 	struct amdtp_stream tx_stream;
 	struct amdtp_stream rx_stream;
 	struct cmp_connection out_conn;
diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c
index 425db8d..ee47924 100644
--- a/sound/firewire/fireworks/fireworks_stream.c
+++ b/sound/firewire/fireworks/fireworks_stream.c
@@ -121,23 +121,6 @@
 }
 
 static int
-get_sync_mode(struct snd_efw *efw, enum cip_flags *sync_mode)
-{
-	enum snd_efw_clock_source clock_source;
-	int err;
-
-	err = snd_efw_command_get_clock_source(efw, &clock_source);
-	if (err < 0)
-		return err;
-
-	if (clock_source == SND_EFW_CLOCK_SOURCE_SYTMATCH)
-		return -ENOSYS;
-
-	*sync_mode = CIP_SYNC_TO_DEVICE;
-	return 0;
-}
-
-static int
 check_connection_used_by_others(struct snd_efw *efw, struct amdtp_stream *s)
 {
 	struct cmp_connection *conn;
@@ -208,9 +191,6 @@
 
 int snd_efw_stream_start_duplex(struct snd_efw *efw, unsigned int rate)
 {
-	struct amdtp_stream *master, *slave;
-	unsigned int slave_substreams;
-	enum cip_flags sync_mode;
 	unsigned int curr_rate;
 	int err = 0;
 
@@ -218,32 +198,19 @@
 	if (efw->playback_substreams == 0 && efw->capture_substreams  == 0)
 		goto end;
 
-	err = get_sync_mode(efw, &sync_mode);
-	if (err < 0)
-		goto end;
-	if (sync_mode == CIP_SYNC_TO_DEVICE) {
-		master = &efw->tx_stream;
-		slave  = &efw->rx_stream;
-		slave_substreams  = efw->playback_substreams;
-	} else {
-		master = &efw->rx_stream;
-		slave  = &efw->tx_stream;
-		slave_substreams = efw->capture_substreams;
-	}
-
 	/*
 	 * Considering JACK/FFADO streaming:
 	 * TODO: This can be removed hwdep functionality becomes popular.
 	 */
-	err = check_connection_used_by_others(efw, master);
+	err = check_connection_used_by_others(efw, &efw->rx_stream);
 	if (err < 0)
 		goto end;
 
 	/* packet queueing error */
-	if (amdtp_streaming_error(slave))
-		stop_stream(efw, slave);
-	if (amdtp_streaming_error(master))
-		stop_stream(efw, master);
+	if (amdtp_streaming_error(&efw->tx_stream))
+		stop_stream(efw, &efw->tx_stream);
+	if (amdtp_streaming_error(&efw->rx_stream))
+		stop_stream(efw, &efw->rx_stream);
 
 	/* stop streams if rate is different */
 	err = snd_efw_command_get_sampling_rate(efw, &curr_rate);
@@ -252,20 +219,17 @@
 	if (rate == 0)
 		rate = curr_rate;
 	if (rate != curr_rate) {
-		stop_stream(efw, slave);
-		stop_stream(efw, master);
+		stop_stream(efw, &efw->tx_stream);
+		stop_stream(efw, &efw->rx_stream);
 	}
 
 	/* master should be always running */
-	if (!amdtp_stream_running(master)) {
-		amdtp_stream_set_sync(sync_mode, master, slave);
-		efw->master = master;
-
+	if (!amdtp_stream_running(&efw->rx_stream)) {
 		err = snd_efw_command_set_sampling_rate(efw, rate);
 		if (err < 0)
 			goto end;
 
-		err = start_stream(efw, master, rate);
+		err = start_stream(efw, &efw->rx_stream, rate);
 		if (err < 0) {
 			dev_err(&efw->unit->device,
 				"fail to start AMDTP master stream:%d\n", err);
@@ -274,12 +238,13 @@
 	}
 
 	/* start slave if needed */
-	if (slave_substreams > 0 && !amdtp_stream_running(slave)) {
-		err = start_stream(efw, slave, rate);
+	if (efw->capture_substreams > 0 &&
+	    !amdtp_stream_running(&efw->tx_stream)) {
+		err = start_stream(efw, &efw->tx_stream, rate);
 		if (err < 0) {
 			dev_err(&efw->unit->device,
 				"fail to start AMDTP slave stream:%d\n", err);
-			stop_stream(efw, master);
+			stop_stream(efw, &efw->rx_stream);
 		}
 	}
 end:
@@ -288,26 +253,11 @@
 
 void snd_efw_stream_stop_duplex(struct snd_efw *efw)
 {
-	struct amdtp_stream *master, *slave;
-	unsigned int master_substreams, slave_substreams;
+	if (efw->capture_substreams == 0) {
+		stop_stream(efw, &efw->tx_stream);
 
-	if (efw->master == &efw->rx_stream) {
-		slave  = &efw->tx_stream;
-		master = &efw->rx_stream;
-		slave_substreams  = efw->capture_substreams;
-		master_substreams = efw->playback_substreams;
-	} else {
-		slave  = &efw->rx_stream;
-		master = &efw->tx_stream;
-		slave_substreams  = efw->playback_substreams;
-		master_substreams = efw->capture_substreams;
-	}
-
-	if (slave_substreams == 0) {
-		stop_stream(efw, slave);
-
-		if (master_substreams == 0)
-			stop_stream(efw, master);
+		if (efw->playback_substreams == 0)
+			stop_stream(efw, &efw->rx_stream);
 	}
 }
 
diff --git a/sound/firewire/lib.c b/sound/firewire/lib.c
index f80aafa..ca4dfcf 100644
--- a/sound/firewire/lib.c
+++ b/sound/firewire/lib.c
@@ -67,6 +67,38 @@
 }
 EXPORT_SYMBOL(snd_fw_transaction);
 
+#define PROBE_DELAY_MS		(2 * MSEC_PER_SEC)
+
+/**
+ * snd_fw_schedule_registration - schedule work for sound card registration
+ * @unit: an instance for unit on IEEE 1394 bus
+ * @dwork: delayed work with callback function
+ *
+ * This function is not designed for general purposes. When new unit is
+ * connected to IEEE 1394 bus, the bus is under bus-reset state because of
+ * topological change. In this state, units tend to fail both of asynchronous
+ * and isochronous communication. To avoid this problem, this function is used
+ * to postpone sound card registration after the state. The callers must
+ * set up instance of delayed work in advance.
+ */
+void snd_fw_schedule_registration(struct fw_unit *unit,
+				  struct delayed_work *dwork)
+{
+	u64 now, delay;
+
+	now = get_jiffies_64();
+	delay = fw_parent_device(unit)->card->reset_jiffies
+					+ msecs_to_jiffies(PROBE_DELAY_MS);
+
+	if (time_after64(delay, now))
+		delay -= now;
+	else
+		delay = 0;
+
+	mod_delayed_work(system_wq, dwork, delay);
+}
+EXPORT_SYMBOL(snd_fw_schedule_registration);
+
 static void async_midi_port_callback(struct fw_card *card, int rcode,
 				     void *data, size_t length,
 				     void *callback_data)
diff --git a/sound/firewire/lib.h b/sound/firewire/lib.h
index f3f6f84..f676931 100644
--- a/sound/firewire/lib.h
+++ b/sound/firewire/lib.h
@@ -22,6 +22,9 @@
 	return rcode == RCODE_TYPE_ERROR || rcode == RCODE_ADDRESS_ERROR;
 }
 
+void snd_fw_schedule_registration(struct fw_unit *unit,
+				  struct delayed_work *dwork);
+
 struct snd_fw_async_midi_port;
 typedef int (*snd_fw_async_midi_port_fill)(
 				struct snd_rawmidi_substream *substream,
diff --git a/sound/firewire/oxfw/oxfw-stream.c b/sound/firewire/oxfw/oxfw-stream.c
index 7cb5743..d9361f3 100644
--- a/sound/firewire/oxfw/oxfw-stream.c
+++ b/sound/firewire/oxfw/oxfw-stream.c
@@ -242,8 +242,7 @@
 	 * blocks than IEC 61883-6 defines.
 	 */
 	if (stream == &oxfw->tx_stream) {
-		oxfw->tx_stream.flags |= CIP_SKIP_INIT_DBC_CHECK |
-					 CIP_JUMBO_PAYLOAD;
+		oxfw->tx_stream.flags |= CIP_JUMBO_PAYLOAD;
 		if (oxfw->wrong_dbs)
 			oxfw->tx_stream.flags |= CIP_WRONG_DBS;
 	}
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index abedc22..e629b88 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -118,15 +118,8 @@
 	return err;
 }
 
-/*
- * This module releases the FireWire unit data after all ALSA character devices
- * are released by applications. This is for releasing stream data or finishing
- * transactions safely. Thus at returning from .remove(), this module still keep
- * references for the unit.
- */
-static void oxfw_card_free(struct snd_card *card)
+static void oxfw_free(struct snd_oxfw *oxfw)
 {
-	struct snd_oxfw *oxfw = card->private_data;
 	unsigned int i;
 
 	snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
@@ -144,6 +137,17 @@
 	mutex_destroy(&oxfw->mutex);
 }
 
+/*
+ * This module releases the FireWire unit data after all ALSA character devices
+ * are released by applications. This is for releasing stream data or finishing
+ * transactions safely. Thus at returning from .remove(), this module still keep
+ * references for the unit.
+ */
+static void oxfw_card_free(struct snd_card *card)
+{
+	oxfw_free(card->private_data);
+}
+
 static int detect_quirks(struct snd_oxfw *oxfw)
 {
 	struct fw_device *fw_dev = fw_parent_device(oxfw->unit);
@@ -205,33 +209,18 @@
 	return 0;
 }
 
-static int oxfw_probe(struct fw_unit *unit,
-		      const struct ieee1394_device_id *entry)
+static void do_registration(struct work_struct *work)
 {
-	struct snd_card *card;
-	struct snd_oxfw *oxfw;
+	struct snd_oxfw *oxfw = container_of(work, struct snd_oxfw, dwork.work);
 	int err;
 
-	if (entry->vendor_id == VENDOR_LOUD && !detect_loud_models(unit))
-		return -ENODEV;
+	if (oxfw->registered)
+		return;
 
-	err = snd_card_new(&unit->device, -1, NULL, THIS_MODULE,
-			   sizeof(*oxfw), &card);
+	err = snd_card_new(&oxfw->unit->device, -1, NULL, THIS_MODULE, 0,
+			   &oxfw->card);
 	if (err < 0)
-		return err;
-
-	card->private_free = oxfw_card_free;
-	oxfw = card->private_data;
-	oxfw->card = card;
-	mutex_init(&oxfw->mutex);
-	oxfw->unit = fw_unit_get(unit);
-	oxfw->entry = entry;
-	spin_lock_init(&oxfw->lock);
-	init_waitqueue_head(&oxfw->hwdep_wait);
-
-	err = snd_oxfw_stream_discover(oxfw);
-	if (err < 0)
-		goto error;
+		return;
 
 	err = name_card(oxfw);
 	if (err < 0)
@@ -241,6 +230,19 @@
 	if (err < 0)
 		goto error;
 
+	err = snd_oxfw_stream_discover(oxfw);
+	if (err < 0)
+		goto error;
+
+	err = snd_oxfw_stream_init_simplex(oxfw, &oxfw->rx_stream);
+	if (err < 0)
+		goto error;
+	if (oxfw->has_output) {
+		err = snd_oxfw_stream_init_simplex(oxfw, &oxfw->tx_stream);
+		if (err < 0)
+			goto error;
+	}
+
 	err = snd_oxfw_create_pcm(oxfw);
 	if (err < 0)
 		goto error;
@@ -255,54 +257,97 @@
 	if (err < 0)
 		goto error;
 
-	err = snd_oxfw_stream_init_simplex(oxfw, &oxfw->rx_stream);
+	err = snd_card_register(oxfw->card);
 	if (err < 0)
 		goto error;
-	if (oxfw->has_output) {
-		err = snd_oxfw_stream_init_simplex(oxfw, &oxfw->tx_stream);
-		if (err < 0)
-			goto error;
-	}
 
-	err = snd_card_register(card);
-	if (err < 0) {
-		snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
-		if (oxfw->has_output)
-			snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
-		goto error;
-	}
+	/*
+	 * After registered, oxfw instance can be released corresponding to
+	 * releasing the sound card instance.
+	 */
+	oxfw->card->private_free = oxfw_card_free;
+	oxfw->card->private_data = oxfw;
+	oxfw->registered = true;
+
+	return;
+error:
+	snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
+	if (oxfw->has_output)
+		snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
+	snd_card_free(oxfw->card);
+	dev_info(&oxfw->unit->device,
+		 "Sound card registration failed: %d\n", err);
+}
+
+static int oxfw_probe(struct fw_unit *unit,
+		      const struct ieee1394_device_id *entry)
+{
+	struct snd_oxfw *oxfw;
+
+	if (entry->vendor_id == VENDOR_LOUD && !detect_loud_models(unit))
+		return -ENODEV;
+
+	/* Allocate this independent of sound card instance. */
+	oxfw = kzalloc(sizeof(struct snd_oxfw), GFP_KERNEL);
+	if (oxfw == NULL)
+		return -ENOMEM;
+
+	oxfw->entry = entry;
+	oxfw->unit = fw_unit_get(unit);
 	dev_set_drvdata(&unit->device, oxfw);
 
+	mutex_init(&oxfw->mutex);
+	spin_lock_init(&oxfw->lock);
+	init_waitqueue_head(&oxfw->hwdep_wait);
+
+	/* Allocate and register this sound card later. */
+	INIT_DEFERRABLE_WORK(&oxfw->dwork, do_registration);
+	snd_fw_schedule_registration(unit, &oxfw->dwork);
+
 	return 0;
-error:
-	snd_card_free(card);
-	return err;
 }
 
 static void oxfw_bus_reset(struct fw_unit *unit)
 {
 	struct snd_oxfw *oxfw = dev_get_drvdata(&unit->device);
 
+	if (!oxfw->registered)
+		snd_fw_schedule_registration(unit, &oxfw->dwork);
+
 	fcp_bus_reset(oxfw->unit);
 
-	mutex_lock(&oxfw->mutex);
+	if (oxfw->registered) {
+		mutex_lock(&oxfw->mutex);
 
-	snd_oxfw_stream_update_simplex(oxfw, &oxfw->rx_stream);
-	if (oxfw->has_output)
-		snd_oxfw_stream_update_simplex(oxfw, &oxfw->tx_stream);
+		snd_oxfw_stream_update_simplex(oxfw, &oxfw->rx_stream);
+		if (oxfw->has_output)
+			snd_oxfw_stream_update_simplex(oxfw, &oxfw->tx_stream);
 
-	mutex_unlock(&oxfw->mutex);
+		mutex_unlock(&oxfw->mutex);
 
-	if (oxfw->entry->vendor_id == OUI_STANTON)
-		snd_oxfw_scs1x_update(oxfw);
+		if (oxfw->entry->vendor_id == OUI_STANTON)
+			snd_oxfw_scs1x_update(oxfw);
+	}
 }
 
 static void oxfw_remove(struct fw_unit *unit)
 {
 	struct snd_oxfw *oxfw = dev_get_drvdata(&unit->device);
 
-	/* No need to wait for releasing card object in this context. */
-	snd_card_free_when_closed(oxfw->card);
+	/*
+	 * Confirm to stop the work for registration before the sound card is
+	 * going to be released. The work is not scheduled again because bus
+	 * reset handler is not called anymore.
+	 */
+	cancel_delayed_work_sync(&oxfw->dwork);
+
+	if (oxfw->registered) {
+		/* No need to wait for releasing card object in this context. */
+		snd_card_free_when_closed(oxfw->card);
+	} else {
+		/* Don't forget this case. */
+		oxfw_free(oxfw);
+	}
 }
 
 static const struct compat_info griffin_firewave = {
diff --git a/sound/firewire/oxfw/oxfw.h b/sound/firewire/oxfw/oxfw.h
index 9beecc2..2047dcb 100644
--- a/sound/firewire/oxfw/oxfw.h
+++ b/sound/firewire/oxfw/oxfw.h
@@ -36,10 +36,12 @@
 struct snd_oxfw {
 	struct snd_card *card;
 	struct fw_unit *unit;
-	const struct device_info *device_info;
 	struct mutex mutex;
 	spinlock_t lock;
 
+	bool registered;
+	struct delayed_work dwork;
+
 	bool wrong_dbs;
 	bool has_output;
 	u8 *tx_stream_formats[SND_OXFW_STREAM_FORMAT_ENTRIES];
diff --git a/sound/firewire/tascam/tascam-stream.c b/sound/firewire/tascam/tascam-stream.c
index 0e6dd5c6..4ad3bd7 100644
--- a/sound/firewire/tascam/tascam-stream.c
+++ b/sound/firewire/tascam/tascam-stream.c
@@ -381,19 +381,17 @@
 	if (err < 0)
 		return err;
 	if (curr_rate != rate ||
-	    amdtp_streaming_error(&tscm->tx_stream) ||
-	    amdtp_streaming_error(&tscm->rx_stream)) {
+	    amdtp_streaming_error(&tscm->rx_stream) ||
+	    amdtp_streaming_error(&tscm->tx_stream)) {
 		finish_session(tscm);
 
-		amdtp_stream_stop(&tscm->tx_stream);
 		amdtp_stream_stop(&tscm->rx_stream);
+		amdtp_stream_stop(&tscm->tx_stream);
 
 		release_resources(tscm);
 	}
 
-	if (!amdtp_stream_running(&tscm->tx_stream)) {
-		amdtp_stream_set_sync(CIP_SYNC_TO_DEVICE,
-				      &tscm->tx_stream, &tscm->rx_stream);
+	if (!amdtp_stream_running(&tscm->rx_stream)) {
 		err = keep_resources(tscm, rate);
 		if (err < 0)
 			goto error;
@@ -406,20 +404,6 @@
 		if (err < 0)
 			goto error;
 
-		err = amdtp_stream_start(&tscm->tx_stream,
-				tscm->tx_resources.channel,
-				fw_parent_device(tscm->unit)->max_speed);
-		if (err < 0)
-			goto error;
-
-		if (!amdtp_stream_wait_callback(&tscm->tx_stream,
-						CALLBACK_TIMEOUT)) {
-			err = -ETIMEDOUT;
-			goto error;
-		}
-	}
-
-	if (!amdtp_stream_running(&tscm->rx_stream)) {
 		err = amdtp_stream_start(&tscm->rx_stream,
 				tscm->rx_resources.channel,
 				fw_parent_device(tscm->unit)->max_speed);
@@ -433,10 +417,24 @@
 		}
 	}
 
+	if (!amdtp_stream_running(&tscm->tx_stream)) {
+		err = amdtp_stream_start(&tscm->tx_stream,
+				tscm->tx_resources.channel,
+				fw_parent_device(tscm->unit)->max_speed);
+		if (err < 0)
+			goto error;
+
+		if (!amdtp_stream_wait_callback(&tscm->tx_stream,
+						CALLBACK_TIMEOUT)) {
+			err = -ETIMEDOUT;
+			goto error;
+		}
+	}
+
 	return 0;
 error:
-	amdtp_stream_stop(&tscm->tx_stream);
 	amdtp_stream_stop(&tscm->rx_stream);
+	amdtp_stream_stop(&tscm->tx_stream);
 
 	finish_session(tscm);
 	release_resources(tscm);
diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c
index e281c33..9dc93a7 100644
--- a/sound/firewire/tascam/tascam.c
+++ b/sound/firewire/tascam/tascam.c
@@ -85,10 +85,8 @@
 	return 0;
 }
 
-static void tscm_card_free(struct snd_card *card)
+static void tscm_free(struct snd_tscm *tscm)
 {
-	struct snd_tscm *tscm = card->private_data;
-
 	snd_tscm_transaction_unregister(tscm);
 	snd_tscm_stream_destroy_duplex(tscm);
 
@@ -97,44 +95,36 @@
 	mutex_destroy(&tscm->mutex);
 }
 
-static int snd_tscm_probe(struct fw_unit *unit,
-			   const struct ieee1394_device_id *entry)
+static void tscm_card_free(struct snd_card *card)
 {
-	struct snd_card *card;
-	struct snd_tscm *tscm;
+	tscm_free(card->private_data);
+}
+
+static void do_registration(struct work_struct *work)
+{
+	struct snd_tscm *tscm = container_of(work, struct snd_tscm, dwork.work);
 	int err;
 
-	/* create card */
-	err = snd_card_new(&unit->device, -1, NULL, THIS_MODULE,
-			   sizeof(struct snd_tscm), &card);
+	err = snd_card_new(&tscm->unit->device, -1, NULL, THIS_MODULE, 0,
+			   &tscm->card);
 	if (err < 0)
-		return err;
-	card->private_free = tscm_card_free;
-
-	/* initialize myself */
-	tscm = card->private_data;
-	tscm->card = card;
-	tscm->unit = fw_unit_get(unit);
-
-	mutex_init(&tscm->mutex);
-	spin_lock_init(&tscm->lock);
-	init_waitqueue_head(&tscm->hwdep_wait);
+		return;
 
 	err = identify_model(tscm);
 	if (err < 0)
 		goto error;
 
-	snd_tscm_proc_init(tscm);
+	err = snd_tscm_transaction_register(tscm);
+	if (err < 0)
+		goto error;
 
 	err = snd_tscm_stream_init_duplex(tscm);
 	if (err < 0)
 		goto error;
 
-	err = snd_tscm_create_pcm_devices(tscm);
-	if (err < 0)
-		goto error;
+	snd_tscm_proc_init(tscm);
 
-	err = snd_tscm_transaction_register(tscm);
+	err = snd_tscm_create_pcm_devices(tscm);
 	if (err < 0)
 		goto error;
 
@@ -146,35 +136,91 @@
 	if (err < 0)
 		goto error;
 
-	err = snd_card_register(card);
+	err = snd_card_register(tscm->card);
 	if (err < 0)
 		goto error;
 
+	/*
+	 * After registered, tscm instance can be released corresponding to
+	 * releasing the sound card instance.
+	 */
+	tscm->card->private_free = tscm_card_free;
+	tscm->card->private_data = tscm;
+	tscm->registered = true;
+
+	return;
+error:
+	snd_tscm_transaction_unregister(tscm);
+	snd_tscm_stream_destroy_duplex(tscm);
+	snd_card_free(tscm->card);
+	dev_info(&tscm->unit->device,
+		 "Sound card registration failed: %d\n", err);
+}
+
+static int snd_tscm_probe(struct fw_unit *unit,
+			   const struct ieee1394_device_id *entry)
+{
+	struct snd_tscm *tscm;
+
+	/* Allocate this independent of sound card instance. */
+	tscm = kzalloc(sizeof(struct snd_tscm), GFP_KERNEL);
+	if (tscm == NULL)
+		return -ENOMEM;
+
+	/* initialize myself */
+	tscm->unit = fw_unit_get(unit);
 	dev_set_drvdata(&unit->device, tscm);
 
-	return err;
-error:
-	snd_card_free(card);
-	return err;
+	mutex_init(&tscm->mutex);
+	spin_lock_init(&tscm->lock);
+	init_waitqueue_head(&tscm->hwdep_wait);
+
+	/* Allocate and register this sound card later. */
+	INIT_DEFERRABLE_WORK(&tscm->dwork, do_registration);
+	snd_fw_schedule_registration(unit, &tscm->dwork);
+
+	return 0;
 }
 
 static void snd_tscm_update(struct fw_unit *unit)
 {
 	struct snd_tscm *tscm = dev_get_drvdata(&unit->device);
 
+	/* Postpone a workqueue for deferred registration. */
+	if (!tscm->registered)
+		snd_fw_schedule_registration(unit, &tscm->dwork);
+
 	snd_tscm_transaction_reregister(tscm);
 
-	mutex_lock(&tscm->mutex);
-	snd_tscm_stream_update_duplex(tscm);
-	mutex_unlock(&tscm->mutex);
+	/*
+	 * After registration, userspace can start packet streaming, then this
+	 * code block works fine.
+	 */
+	if (tscm->registered) {
+		mutex_lock(&tscm->mutex);
+		snd_tscm_stream_update_duplex(tscm);
+		mutex_unlock(&tscm->mutex);
+	}
 }
 
 static void snd_tscm_remove(struct fw_unit *unit)
 {
 	struct snd_tscm *tscm = dev_get_drvdata(&unit->device);
 
-	/* No need to wait for releasing card object in this context. */
-	snd_card_free_when_closed(tscm->card);
+	/*
+	 * Confirm to stop the work for registration before the sound card is
+	 * going to be released. The work is not scheduled again because bus
+	 * reset handler is not called anymore.
+	 */
+	cancel_delayed_work_sync(&tscm->dwork);
+
+	if (tscm->registered) {
+		/* No need to wait for releasing card object in this context. */
+		snd_card_free_when_closed(tscm->card);
+	} else {
+		/* Don't forget this case. */
+		tscm_free(tscm);
+	}
 }
 
 static const struct ieee1394_device_id snd_tscm_id_table[] = {
diff --git a/sound/firewire/tascam/tascam.h b/sound/firewire/tascam/tascam.h
index 30ab77e..1f61011 100644
--- a/sound/firewire/tascam/tascam.h
+++ b/sound/firewire/tascam/tascam.h
@@ -51,6 +51,8 @@
 	struct mutex mutex;
 	spinlock_t lock;
 
+	bool registered;
+	struct delayed_work dwork;
 	const struct snd_tscm_spec *spec;
 
 	struct fw_iso_resources tx_resources;
diff --git a/sound/hda/ext/hdac_ext_bus.c b/sound/hda/ext/hdac_ext_bus.c
index 2433f7c..31b510c 100644
--- a/sound/hda/ext/hdac_ext_bus.c
+++ b/sound/hda/ext/hdac_ext_bus.c
@@ -105,6 +105,9 @@
 	INIT_LIST_HEAD(&ebus->hlink_list);
 	ebus->idx = idx++;
 
+	mutex_init(&ebus->lock);
+	ebus->cmd_dma_state = true;
+
 	return 0;
 }
 EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_init);
@@ -144,6 +147,7 @@
 	if (!edev)
 		return -ENOMEM;
 	hdev = &edev->hdac;
+	edev->ebus = ebus;
 
 	snprintf(name, sizeof(name), "ehdaudio%dD%d", ebus->idx, addr);
 
diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c
index 548cc1e..860f8ca 100644
--- a/sound/hda/ext/hdac_ext_controller.c
+++ b/sound/hda/ext/hdac_ext_controller.c
@@ -186,6 +186,9 @@
 		hlink->lcaps  = readl(hlink->ml_addr + AZX_REG_ML_LCAP);
 		hlink->lsdiid = readw(hlink->ml_addr + AZX_REG_ML_LSDIID);
 
+		/* since link in On, update the ref */
+		hlink->ref_count = 1;
+
 		list_add_tail(&hlink->list, &ebus->hlink_list);
 	}
 
@@ -327,3 +330,66 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_power_down_all);
+
+int snd_hdac_ext_bus_link_get(struct hdac_ext_bus *ebus,
+				struct hdac_ext_link *link)
+{
+	int ret = 0;
+
+	mutex_lock(&ebus->lock);
+
+	/*
+	 * if we move from 0 to 1, count will be 1 so power up this link
+	 * as well, also check the dma status and trigger that
+	 */
+	if (++link->ref_count == 1) {
+		if (!ebus->cmd_dma_state) {
+			snd_hdac_bus_init_cmd_io(&ebus->bus);
+			ebus->cmd_dma_state = true;
+		}
+
+		ret = snd_hdac_ext_bus_link_power_up(link);
+	}
+
+	mutex_unlock(&ebus->lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_get);
+
+int snd_hdac_ext_bus_link_put(struct hdac_ext_bus *ebus,
+				struct hdac_ext_link *link)
+{
+	int ret = 0;
+	struct hdac_ext_link *hlink;
+	bool link_up = false;
+
+	mutex_lock(&ebus->lock);
+
+	/*
+	 * if we move from 1 to 0, count will be 0
+	 * so power down this link as well
+	 */
+	if (--link->ref_count == 0) {
+		ret = snd_hdac_ext_bus_link_power_down(link);
+
+		/*
+		 * now check if all links are off, if so turn off
+		 * cmd dma as well
+		 */
+		list_for_each_entry(hlink, &ebus->hlink_list, list) {
+			if (hlink->ref_count) {
+				link_up = true;
+				break;
+			}
+		}
+
+		if (!link_up) {
+			snd_hdac_bus_stop_cmd_io(&ebus->bus);
+			ebus->cmd_dma_state = false;
+		}
+	}
+
+	mutex_unlock(&ebus->lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_put);
diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c
index 023cc4c..626f3bb 100644
--- a/sound/hda/ext/hdac_ext_stream.c
+++ b/sound/hda/ext/hdac_ext_stream.c
@@ -104,12 +104,11 @@
  */
 void snd_hdac_stream_free_all(struct hdac_ext_bus *ebus)
 {
-	struct hdac_stream *s;
+	struct hdac_stream *s, *_s;
 	struct hdac_ext_stream *stream;
 	struct hdac_bus *bus = ebus_to_hbus(ebus);
 
-	while (!list_empty(&bus->stream_list)) {
-		s = list_first_entry(&bus->stream_list, struct hdac_stream, list);
+	list_for_each_entry_safe(s, _s, &bus->stream_list, list) {
 		stream = stream_to_hdac_ext_stream(s);
 		snd_hdac_ext_stream_decouple(ebus, stream, false);
 		list_del(&s->list);
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index 8c48623..9fee464 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -80,6 +80,22 @@
 }
 EXPORT_SYMBOL_GPL(snd_hdac_bus_init_cmd_io);
 
+/* wait for cmd dmas till they are stopped */
+static void hdac_wait_for_cmd_dmas(struct hdac_bus *bus)
+{
+	unsigned long timeout;
+
+	timeout = jiffies + msecs_to_jiffies(100);
+	while ((snd_hdac_chip_readb(bus, RIRBCTL) & AZX_RBCTL_DMA_EN)
+		&& time_before(jiffies, timeout))
+		udelay(10);
+
+	timeout = jiffies + msecs_to_jiffies(100);
+	while ((snd_hdac_chip_readb(bus, CORBCTL) & AZX_CORBCTL_RUN)
+		&& time_before(jiffies, timeout))
+		udelay(10);
+}
+
 /**
  * snd_hdac_bus_stop_cmd_io - clean up CORB/RIRB buffers
  * @bus: HD-audio core bus
@@ -90,6 +106,7 @@
 	/* disable ringbuffer DMAs */
 	snd_hdac_chip_writeb(bus, RIRBCTL, 0);
 	snd_hdac_chip_writeb(bus, CORBCTL, 0);
+	hdac_wait_for_cmd_dmas(bus);
 	/* disable unsolicited responses */
 	snd_hdac_chip_updatel(bus, GCTL, AZX_GCTL_UNSOL, 0);
 	spin_unlock_irq(&bus->reg_lock);
diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
index d1a4d69..03c9872 100644
--- a/sound/hda/hdac_device.c
+++ b/sound/hda/hdac_device.c
@@ -299,13 +299,11 @@
 int snd_hdac_read_parm_uncached(struct hdac_device *codec, hda_nid_t nid,
 				int parm)
 {
-	int val;
+	unsigned int cmd, val;
 
-	if (codec->regmap)
-		regcache_cache_bypass(codec->regmap, true);
-	val = snd_hdac_read_parm(codec, nid, parm);
-	if (codec->regmap)
-		regcache_cache_bypass(codec->regmap, false);
+	cmd = snd_hdac_regmap_encode_verb(nid, AC_VERB_PARAMETERS) | parm;
+	if (snd_hdac_regmap_read_raw_uncached(codec, cmd, &val) < 0)
+		return -1;
 	return val;
 }
 EXPORT_SYMBOL_GPL(snd_hdac_read_parm_uncached);
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index fb96aea..c9af022 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -20,6 +20,7 @@
 #include <sound/core.h>
 #include <sound/hdaudio.h>
 #include <sound/hda_i915.h>
+#include <sound/hda_register.h>
 
 static struct i915_audio_component *hdac_acomp;
 
@@ -97,43 +98,100 @@
 }
 EXPORT_SYMBOL_GPL(snd_hdac_display_power);
 
+#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
+				((pci)->device == 0x0c0c) || \
+				((pci)->device == 0x0d0c) || \
+				((pci)->device == 0x160c))
+
 /**
- * snd_hdac_get_display_clk - Get CDCLK in kHz
+ * snd_hdac_i915_set_bclk - Reprogram BCLK for HSW/BDW
  * @bus: HDA core bus
  *
- * This function is supposed to be used only by a HD-audio controller
- * driver that needs the interaction with i915 graphics.
+ * Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
+ * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
+ * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
+ * BCLK = CDCLK * M / N
+ * The values will be lost when the display power well is disabled and need to
+ * be restored to avoid abnormal playback speed.
  *
- * This function queries CDCLK value in kHz from the graphics driver and
- * returns the value.  A negative code is returned in error.
+ * Call this function at initializing and changing power well, as well as
+ * at ELD notifier for the hotplug.
  */
-int snd_hdac_get_display_clk(struct hdac_bus *bus)
+void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
 {
 	struct i915_audio_component *acomp = bus->audio_component;
+	struct pci_dev *pci = to_pci_dev(bus->dev);
+	int cdclk_freq;
+	unsigned int bclk_m, bclk_n;
 
-	if (!acomp || !acomp->ops)
-		return -ENODEV;
+	if (!acomp || !acomp->ops || !acomp->ops->get_cdclk_freq)
+		return; /* only for i915 binding */
+	if (!CONTROLLER_IN_GPU(pci))
+		return; /* only HSW/BDW */
 
-	return acomp->ops->get_cdclk_freq(acomp->dev);
+	cdclk_freq = acomp->ops->get_cdclk_freq(acomp->dev);
+	switch (cdclk_freq) {
+	case 337500:
+		bclk_m = 16;
+		bclk_n = 225;
+		break;
+
+	case 450000:
+	default: /* default CDCLK 450MHz */
+		bclk_m = 4;
+		bclk_n = 75;
+		break;
+
+	case 540000:
+		bclk_m = 4;
+		bclk_n = 90;
+		break;
+
+	case 675000:
+		bclk_m = 8;
+		bclk_n = 225;
+		break;
+	}
+
+	snd_hdac_chip_writew(bus, HSW_EM4, bclk_m);
+	snd_hdac_chip_writew(bus, HSW_EM5, bclk_n);
 }
-EXPORT_SYMBOL_GPL(snd_hdac_get_display_clk);
+EXPORT_SYMBOL_GPL(snd_hdac_i915_set_bclk);
 
-/* There is a fixed mapping between audio pin node and display port
- * on current Intel platforms:
+/* There is a fixed mapping between audio pin node and display port.
+ * on SNB, IVY, HSW, BSW, SKL, BXT, KBL:
  * Pin Widget 5 - PORT B (port = 1 in i915 driver)
  * Pin Widget 6 - PORT C (port = 2 in i915 driver)
  * Pin Widget 7 - PORT D (port = 3 in i915 driver)
+ *
+ * on VLV, ILK:
+ * Pin Widget 4 - PORT B (port = 1 in i915 driver)
+ * Pin Widget 5 - PORT C (port = 2 in i915 driver)
+ * Pin Widget 6 - PORT D (port = 3 in i915 driver)
  */
-static int pin2port(hda_nid_t pin_nid)
+static int pin2port(struct hdac_device *codec, hda_nid_t pin_nid)
 {
-	if (WARN_ON(pin_nid < 5 || pin_nid > 7))
+	int base_nid;
+
+	switch (codec->vendor_id) {
+	case 0x80860054: /* ILK */
+	case 0x80862804: /* ILK */
+	case 0x80862882: /* VLV */
+		base_nid = 3;
+		break;
+	default:
+		base_nid = 4;
+		break;
+	}
+
+	if (WARN_ON(pin_nid <= base_nid || pin_nid > base_nid + 3))
 		return -1;
-	return pin_nid - 4;
+	return pin_nid - base_nid;
 }
 
 /**
  * snd_hdac_sync_audio_rate - Set N/CTS based on the sample rate
- * @bus: HDA core bus
+ * @codec: HDA codec
  * @nid: the pin widget NID
  * @rate: the sample rate to set
  *
@@ -143,14 +201,15 @@
  * This function sets N/CTS value based on the given sample rate.
  * Returns zero for success, or a negative error code.
  */
-int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate)
+int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid, int rate)
 {
+	struct hdac_bus *bus = codec->bus;
 	struct i915_audio_component *acomp = bus->audio_component;
 	int port;
 
 	if (!acomp || !acomp->ops || !acomp->ops->sync_audio_rate)
 		return -ENODEV;
-	port = pin2port(nid);
+	port = pin2port(codec, nid);
 	if (port < 0)
 		return -EINVAL;
 	return acomp->ops->sync_audio_rate(acomp->dev, port, rate);
@@ -159,7 +218,7 @@
 
 /**
  * snd_hdac_acomp_get_eld - Get the audio state and ELD via component
- * @bus: HDA core bus
+ * @codec: HDA codec
  * @nid: the pin widget NID
  * @audio_enabled: the pointer to store the current audio state
  * @buffer: the buffer pointer to store ELD bytes
@@ -177,16 +236,17 @@
  * thus it may be over @max_bytes.  If it's over @max_bytes, it implies
  * that only a part of ELD bytes have been fetched.
  */
-int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid,
+int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
 			   bool *audio_enabled, char *buffer, int max_bytes)
 {
+	struct hdac_bus *bus = codec->bus;
 	struct i915_audio_component *acomp = bus->audio_component;
 	int port;
 
 	if (!acomp || !acomp->ops || !acomp->ops->get_eld)
 		return -ENODEV;
 
-	port = pin2port(nid);
+	port = pin2port(codec, nid);
 	if (port < 0)
 		return -EINVAL;
 	return acomp->ops->get_eld(acomp->dev, port, audio_enabled,
@@ -267,6 +327,18 @@
 }
 EXPORT_SYMBOL_GPL(snd_hdac_i915_register_notifier);
 
+/* check whether intel graphics is present */
+static bool i915_gfx_present(void)
+{
+	static struct pci_device_id ids[] = {
+		{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_ANY_ID),
+		  .class = PCI_BASE_CLASS_DISPLAY << 16,
+		  .class_mask = 0xff << 16 },
+		{}
+	};
+	return pci_dev_present(ids);
+}
+
 /**
  * snd_hdac_i915_init - Initialize i915 audio component
  * @bus: HDA core bus
@@ -286,6 +358,12 @@
 	struct i915_audio_component *acomp;
 	int ret;
 
+	if (WARN_ON(hdac_acomp))
+		return -EBUSY;
+
+	if (!i915_gfx_present())
+		return -ENODEV;
+
 	acomp = kzalloc(sizeof(*acomp), GFP_KERNEL);
 	if (!acomp)
 		return -ENOMEM;
@@ -316,6 +394,7 @@
 out_err:
 	kfree(acomp);
 	bus->audio_component = NULL;
+	hdac_acomp = NULL;
 	dev_info(dev, "failed to add i915 component master (%d)\n", ret);
 
 	return ret;
@@ -349,6 +428,7 @@
 
 	kfree(acomp);
 	bus->audio_component = NULL;
+	hdac_acomp = NULL;
 
 	return 0;
 }
diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
index bdbcd6b..87041dd 100644
--- a/sound/hda/hdac_regmap.c
+++ b/sound/hda/hdac_regmap.c
@@ -453,14 +453,30 @@
 EXPORT_SYMBOL_GPL(snd_hdac_regmap_write_raw);
 
 static int reg_raw_read(struct hdac_device *codec, unsigned int reg,
-			unsigned int *val)
+			unsigned int *val, bool uncached)
 {
-	if (!codec->regmap)
+	if (uncached || !codec->regmap)
 		return hda_reg_read(codec, reg, val);
 	else
 		return regmap_read(codec->regmap, reg, val);
 }
 
+static int __snd_hdac_regmap_read_raw(struct hdac_device *codec,
+				      unsigned int reg, unsigned int *val,
+				      bool uncached)
+{
+	int err;
+
+	err = reg_raw_read(codec, reg, val, uncached);
+	if (err == -EAGAIN) {
+		err = snd_hdac_power_up_pm(codec);
+		if (!err)
+			err = reg_raw_read(codec, reg, val, uncached);
+		snd_hdac_power_down_pm(codec);
+	}
+	return err;
+}
+
 /**
  * snd_hdac_regmap_read_raw - read a pseudo register with power mgmt
  * @codec: the codec object
@@ -472,19 +488,19 @@
 int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg,
 			     unsigned int *val)
 {
-	int err;
-
-	err = reg_raw_read(codec, reg, val);
-	if (err == -EAGAIN) {
-		err = snd_hdac_power_up_pm(codec);
-		if (!err)
-			err = reg_raw_read(codec, reg, val);
-		snd_hdac_power_down_pm(codec);
-	}
-	return err;
+	return __snd_hdac_regmap_read_raw(codec, reg, val, false);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_regmap_read_raw);
 
+/* Works like snd_hdac_regmap_read_raw(), but this doesn't read from the
+ * cache but always via hda verbs.
+ */
+int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec,
+				      unsigned int reg, unsigned int *val)
+{
+	return __snd_hdac_regmap_read_raw(codec, reg, val, true);
+}
+
 /**
  * snd_hdac_regmap_update_raw - update a pseudo register with power mgmt
  * @codec: the codec object
diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c
index d7ec862..c6c75e7 100644
--- a/sound/hda/hdmi_chmap.c
+++ b/sound/hda/hdmi_chmap.c
@@ -625,13 +625,30 @@
 	WARN_ON(count != channels);
 }
 
+static int spk_mask_from_spk_alloc(int spk_alloc)
+{
+	int i;
+	int spk_mask = eld_speaker_allocation_bits[0];
+
+	for (i = 0; i < ARRAY_SIZE(eld_speaker_allocation_bits); i++) {
+		if (spk_alloc & (1 << i))
+			spk_mask |= eld_speaker_allocation_bits[i];
+	}
+
+	return spk_mask;
+}
+
 static int hdmi_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
 			      unsigned int size, unsigned int __user *tlv)
 {
 	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
 	struct hdac_chmap *chmap = info->private_data;
+	int pcm_idx = kcontrol->private_value;
 	unsigned int __user *dst;
 	int chs, count = 0;
+	unsigned long max_chs;
+	int type;
+	int spk_alloc, spk_mask;
 
 	if (size < 8)
 		return -ENOMEM;
@@ -639,40 +656,59 @@
 		return -EFAULT;
 	size -= 8;
 	dst = tlv + 2;
-	for (chs = 2; chs <= chmap->channels_max; chs++) {
+
+	spk_alloc = chmap->ops.get_spk_alloc(chmap->hdac, pcm_idx);
+	spk_mask = spk_mask_from_spk_alloc(spk_alloc);
+
+	max_chs = hweight_long(spk_mask);
+
+	for (chs = 2; chs <= max_chs; chs++) {
 		int i;
 		struct hdac_cea_channel_speaker_allocation *cap;
 
 		cap = channel_allocations;
 		for (i = 0; i < ARRAY_SIZE(channel_allocations); i++, cap++) {
 			int chs_bytes = chs * 4;
-			int type = chmap->ops.chmap_cea_alloc_validate_get_type(
-								chmap, cap, chs);
 			unsigned int tlv_chmap[8];
 
-			if (type < 0)
+			if (cap->channels != chs)
 				continue;
+
+			if (!(cap->spk_mask == (spk_mask & cap->spk_mask)))
+				continue;
+
+			type = chmap->ops.chmap_cea_alloc_validate_get_type(
+							chmap, cap, chs);
+			if (type < 0)
+				return -ENODEV;
 			if (size < 8)
 				return -ENOMEM;
+
 			if (put_user(type, dst) ||
 			    put_user(chs_bytes, dst + 1))
 				return -EFAULT;
+
 			dst += 2;
 			size -= 8;
 			count += 8;
+
 			if (size < chs_bytes)
 				return -ENOMEM;
+
 			size -= chs_bytes;
 			count += chs_bytes;
 			chmap->ops.cea_alloc_to_tlv_chmap(chmap, cap,
 						tlv_chmap, chs);
+
 			if (copy_to_user(dst, tlv_chmap, chs_bytes))
 				return -EFAULT;
 			dst += chs;
 		}
 	}
+
 	if (put_user(count, tlv + 1))
 		return -EFAULT;
+
 	return 0;
 }
 
diff --git a/sound/hda/local.h b/sound/hda/local.h
index d692f41..0d5bb15 100644
--- a/sound/hda/local.h
+++ b/sound/hda/local.h
@@ -16,6 +16,16 @@
 	return (wcaps & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
 }
 
+static inline unsigned int get_wcaps_channels(u32 wcaps)
+{
+	unsigned int chans;
+
+	chans = (wcaps & AC_WCAP_CHAN_CNT_EXT) >> 13;
+	chans = (chans + 1) * 2;
+
+	return chans;
+}
+
 extern const struct attribute_group *hdac_dev_attr_groups[];
 int hda_widget_sysfs_init(struct hdac_device *codec);
 void hda_widget_sysfs_exit(struct hdac_device *codec);
diff --git a/sound/isa/sscape.c b/sound/isa/sscape.c
index 7b248cd..fdcfa29 100644
--- a/sound/isa/sscape.c
+++ b/sound/isa/sscape.c
@@ -591,7 +591,7 @@
 	}
 	err = upload_dma_data(sscape, init_fw->data, init_fw->size);
 	if (err == 0)
-		snd_printk(KERN_INFO "sscape: MIDI firmware loaded %d KBs\n",
+		snd_printk(KERN_INFO "sscape: MIDI firmware loaded %zu KBs\n",
 				init_fw->size >> 10);
 
 	release_firmware(init_fw);
diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c
index 69f76ff..718d5e3 100644
--- a/sound/isa/wavefront/wavefront_synth.c
+++ b/sound/isa/wavefront/wavefront_synth.c
@@ -785,6 +785,9 @@
 	DPRINT (WF_DEBUG_LOAD_PATCH, "downloading patch %d\n",
 				      header->number);
 
+	if (header->number >= ARRAY_SIZE(dev->patch_status))
+		return -EINVAL;
+
 	dev->patch_status[header->number] |= WF_SLOT_FILLED;
 
 	bptr = buf;
@@ -809,6 +812,9 @@
 	DPRINT (WF_DEBUG_LOAD_PATCH, "downloading program %d\n",
 		header->number);
 
+	if (header->number >= ARRAY_SIZE(dev->prog_status))
+		return -EINVAL;
+
 	dev->prog_status[header->number] = WF_SLOT_USED;
 
 	/* XXX need to zero existing SLOT_USED bit for program_status[i]
@@ -898,6 +904,9 @@
 		header->number = x;
 	}
 
+	if (header->number >= WF_MAX_SAMPLE)
+		return -EINVAL;
+
 	if (header->size) {
 
 		/* XXX it's a debatable point whether or not RDONLY semantics
diff --git a/sound/oss/waveartist.c b/sound/oss/waveartist.c
index b36ea47..0b8d0de 100644
--- a/sound/oss/waveartist.c
+++ b/sound/oss/waveartist.c
@@ -1414,11 +1414,9 @@
 	else {
 #ifdef CONFIG_ARCH_NETWINDER
 		if (machine_is_netwinder()) {
-			init_timer(&vnc_timer);
-			vnc_timer.function = vnc_slider_tick;
-			vnc_timer.expires  = jiffies;
-			vnc_timer.data     = nr_waveartist_devs;
-			add_timer(&vnc_timer);
+			setup_timer(&vnc_timer, vnc_slider_tick,
+				    nr_waveartist_devs);
+			mod_timer(&vnc_timer, jiffies);
 
 			vnc_configure_mixer(devc, 0);
 
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
index 4667c32..4a054d7 100644
--- a/sound/pci/au88x0/au88x0_core.c
+++ b/sound/pci/au88x0/au88x0_core.c
@@ -2151,8 +2151,7 @@
 							   stream->resources, en,
 							   VORTEX_RESOURCE_SRC)) < 0) {
 					memset(stream->resources, 0,
-					       sizeof(unsigned char) *
-					       VORTEX_RESOURCE_LAST);
+					       sizeof(stream->resources));
 					return -EBUSY;
 				}
 				if (stream->type != VORTEX_PCM_A3D) {
@@ -2162,7 +2161,7 @@
 								   VORTEX_RESOURCE_MIXIN)) < 0) {
 						memset(stream->resources,
 						       0,
-						       sizeof(unsigned char) * VORTEX_RESOURCE_LAST);
+						       sizeof(stream->resources));
 						return -EBUSY;
 					}
 				}
@@ -2175,8 +2174,7 @@
 						   stream->resources, en,
 						   VORTEX_RESOURCE_A3D)) < 0) {
 				memset(stream->resources, 0,
-				       sizeof(unsigned char) *
-				       VORTEX_RESOURCE_LAST);
+				       sizeof(stream->resources));
 				dev_err(vortex->card->dev,
 					"out of A3D sources. Sorry\n");
 				return -EBUSY;
@@ -2290,8 +2288,7 @@
 						   VORTEX_RESOURCE_MIXOUT))
 			    < 0) {
 				memset(stream->resources, 0,
-				       sizeof(unsigned char) *
-				       VORTEX_RESOURCE_LAST);
+				       sizeof(stream->resources));
 				return -EBUSY;
 			}
 			if ((src[i] =
@@ -2299,8 +2296,7 @@
 						   stream->resources, en,
 						   VORTEX_RESOURCE_SRC)) < 0) {
 				memset(stream->resources, 0,
-				       sizeof(unsigned char) *
-				       VORTEX_RESOURCE_LAST);
+				       sizeof(stream->resources));
 				return -EBUSY;
 			}
 		}
diff --git a/sound/pci/au88x0/au88x0_pcm.c b/sound/pci/au88x0/au88x0_pcm.c
index a6d6d8d..df5741a 100644
--- a/sound/pci/au88x0/au88x0_pcm.c
+++ b/sound/pci/au88x0/au88x0_pcm.c
@@ -432,7 +432,10 @@
 #endif
 	//printk(KERN_INFO "vortex: pointer = 0x%x\n", current_ptr);
 	spin_unlock(&chip->lock);
-	return (bytes_to_frames(substream->runtime, current_ptr));
+	current_ptr = bytes_to_frames(substream->runtime, current_ptr);
+	if (current_ptr >= substream->runtime->buffer_size)
+		current_ptr = 0;
+	return current_ptr;
 }
 
 /* operators */
diff --git a/sound/pci/ctxfi/cttimer.c b/sound/pci/ctxfi/cttimer.c
index a5d4604..8f94534 100644
--- a/sound/pci/ctxfi/cttimer.c
+++ b/sound/pci/ctxfi/cttimer.c
@@ -49,7 +49,7 @@
 	spinlock_t lock;		/* global timer lock (for xfitimer) */
 	spinlock_t list_lock;		/* lock for instance list */
 	struct ct_atc *atc;
-	struct ct_timer_ops *ops;
+	const struct ct_timer_ops *ops;
 	struct list_head instance_head;
 	struct list_head running_head;
 	unsigned int wc;		/* current wallclock */
@@ -128,7 +128,7 @@
 
 #define ct_systimer_free	ct_systimer_prepare
 
-static struct ct_timer_ops ct_systimer_ops = {
+static const struct ct_timer_ops ct_systimer_ops = {
 	.init = ct_systimer_init,
 	.free_instance = ct_systimer_free,
 	.prepare = ct_systimer_prepare,
@@ -322,7 +322,7 @@
 	ct_xfitimer_irq_stop(atimer);
 }
 
-static struct ct_timer_ops ct_xfitimer_ops = {
+static const struct ct_timer_ops ct_xfitimer_ops = {
 	.prepare = ct_xfitimer_prepare,
 	.start = ct_xfitimer_start,
 	.stop = ct_xfitimer_stop,
diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c
index 0dc44eb..626cd21 100644
--- a/sound/pci/ens1370.c
+++ b/sound/pci/ens1370.c
@@ -1548,7 +1548,7 @@
 	int val = 0;
 	
 	spin_lock_irq(&ensoniq->reg_lock);
-	if ((ensoniq->ctrl & ES_1371_GPIO_OUTM) >= 4)
+	if (ensoniq->ctrl & ES_1371_GPIO_OUT(4))
 	    	val = 1;
 	ucontrol->value.integer.value[0] = val;
 	spin_unlock_irq(&ensoniq->reg_lock);
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index bb02c2d..7f3b5ed 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -50,9 +50,13 @@
 	bool "Allow dynamic codec reconfiguration"
 	help
 	  Say Y here to enable the HD-audio codec re-configuration feature.
-	  This adds the sysfs interfaces to allow user to clear the whole
-	  codec configuration, change the codec setup, add extra verbs,
-	  and re-configure the codec dynamically.
+	  It allows user to clear the whole codec configuration, change the
+	  codec setup, add extra verbs, and re-configure the codec dynamically.
+
+	  Note that this item alone doesn't provide the sysfs interface, but
+	  enables the feature just for the patch loader below.
+	  If you need the traditional sysfs entries for the manual interaction,
+	  turn on CONFIG_SND_HDA_HWDEP as well.
 
 config SND_HDA_INPUT_BEEP
 	bool "Support digital beep via input layer"
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 7ca5b89..320445f 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -826,7 +826,7 @@
 				   bool allow_powerdown)
 {
 	hda_nid_t nid, changed = 0;
-	int i, state;
+	int i, state, power;
 
 	for (i = 0; i < path->depth; i++) {
 		nid = path->path[i];
@@ -838,7 +838,9 @@
 			state = AC_PWRST_D0;
 		else
 			state = AC_PWRST_D3;
-		if (!snd_hda_check_power_state(codec, nid, state)) {
+		power = snd_hda_codec_read(codec, nid, 0,
+					   AC_VERB_GET_POWER_STATE, 0);
+		if (power != (state | (state << 4))) {
 			snd_hda_codec_write(codec, nid, 0,
 					    AC_VERB_SET_POWER_STATE, state);
 			changed = nid;
@@ -5432,6 +5434,7 @@
 	spec->cur_adc_stream_tag = stream_tag;
 	spec->cur_adc_format = format;
 	snd_hda_codec_setup_stream(codec, spec->cur_adc, stream_tag, 0, format);
+	call_pcm_capture_hook(hinfo, codec, substream, HDA_GEN_PCM_ACT_PREPARE);
 	return 0;
 }
 
@@ -5442,6 +5445,7 @@
 	struct hda_gen_spec *spec = codec->spec;
 	snd_hda_codec_cleanup_stream(codec, spec->cur_adc);
 	spec->cur_adc = 0;
+	call_pcm_capture_hook(hinfo, codec, substream, HDA_GEN_PCM_ACT_CLEANUP);
 	return 0;
 }
 
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 2624cfe..9a0d144 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -857,50 +857,6 @@
 #define azx_del_card_list(chip) /* NOP */
 #endif /* CONFIG_PM */
 
-/* Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
- * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
- * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
- * BCLK = CDCLK * M / N
- * The values will be lost when the display power well is disabled and need to
- * be restored to avoid abnormal playback speed.
- */
-static void haswell_set_bclk(struct hda_intel *hda)
-{
-	struct azx *chip = &hda->chip;
-	int cdclk_freq;
-	unsigned int bclk_m, bclk_n;
-
-	if (!hda->need_i915_power)
-		return;
-
-	cdclk_freq = snd_hdac_get_display_clk(azx_bus(chip));
-	switch (cdclk_freq) {
-	case 337500:
-		bclk_m = 16;
-		bclk_n = 225;
-		break;
-
-	case 450000:
-	default: /* default CDCLK 450MHz */
-		bclk_m = 4;
-		bclk_n = 75;
-		break;
-
-	case 540000:
-		bclk_m = 4;
-		bclk_n = 90;
-		break;
-
-	case 675000:
-		bclk_m = 8;
-		bclk_n = 225;
-		break;
-	}
-
-	azx_writew(chip, HSW_EM4, bclk_m);
-	azx_writew(chip, HSW_EM5, bclk_n);
-}
-
 #if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO)
 /*
  * power management
@@ -958,7 +914,7 @@
 	if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
 		&& hda->need_i915_power) {
 		snd_hdac_display_power(azx_bus(chip), true);
-		haswell_set_bclk(hda);
+		snd_hdac_i915_set_bclk(azx_bus(chip));
 	}
 	if (chip->msi)
 		if (pci_enable_msi(pci) < 0)
@@ -1058,7 +1014,7 @@
 		bus = azx_bus(chip);
 		if (hda->need_i915_power) {
 			snd_hdac_display_power(bus, true);
-			haswell_set_bclk(hda);
+			snd_hdac_i915_set_bclk(bus);
 		} else {
 			/* toggle codec wakeup bit for STATESTS read */
 			snd_hdac_set_codec_wakeup(bus, true);
@@ -1796,12 +1752,8 @@
 	/* initialize chip */
 	azx_init_pci(chip);
 
-	if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
-		struct hda_intel *hda;
-
-		hda = container_of(chip, struct hda_intel, chip);
-		haswell_set_bclk(hda);
-	}
+	if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
+		snd_hdac_i915_set_bclk(bus);
 
 	hda_intel_init_chip(chip, (probe_only[dev] & 2) == 0);
 
@@ -2232,6 +2184,9 @@
 	/* Broxton-P(Apollolake) */
 	{ PCI_DEVICE(0x8086, 0x5a98),
 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
+	/* Broxton-T */
+	{ PCI_DEVICE(0x8086, 0x1a98),
+	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
 	/* Haswell */
 	{ PCI_DEVICE(0x8086, 0x0a0c),
 	  .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
@@ -2361,6 +2316,10 @@
 	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
 	{ PCI_DEVICE(0x1002, 0xaae8),
 	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+	{ PCI_DEVICE(0x1002, 0xaae0),
+	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+	{ PCI_DEVICE(0x1002, 0xaaf0),
+	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
 	/* VIA VT8251/VT8237A */
 	{ PCI_DEVICE(0x1106, 0x3288), .driver_data = AZX_DRIVER_VIA },
 	/* VIA GFX VT7122/VX900 */
diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
index 64e0d1d..9739fce 100644
--- a/sound/pci/hda/hda_sysfs.c
+++ b/sound/pci/hda/hda_sysfs.c
@@ -141,14 +141,6 @@
 	err = snd_hda_codec_configure(codec);
 	if (err < 0)
 		goto error;
-	/* rebuild PCMs */
-	err = snd_hda_codec_build_pcms(codec);
-	if (err < 0)
-		goto error;
-	/* rebuild mixers */
-	err = snd_hda_codec_build_controls(codec);
-	if (err < 0)
-		goto error;
 	err = snd_card_register(codec->card);
  error:
 	snd_hda_power_down(codec);
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index a47e8ae..80bbadc 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -361,6 +361,7 @@
 {
 	struct cs_spec *spec = codec->spec;
 	int err;
+	int i;
 
 	err = snd_hda_parse_pin_defcfg(codec, &spec->gen.autocfg, NULL, 0);
 	if (err < 0)
@@ -370,6 +371,19 @@
 	if (err < 0)
 		return err;
 
+	/* keep the ADCs powered up when it's dynamically switchable */
+	if (spec->gen.dyn_adc_switch) {
+		unsigned int done = 0;
+		for (i = 0; i < spec->gen.input_mux.num_items; i++) {
+			int idx = spec->gen.dyn_adc_idx[i];
+			if (done & (1 << idx))
+				continue;
+			snd_hda_gen_fix_pin_power(codec,
+						  spec->gen.adc_nids[idx]);
+			done |= 1 << idx;
+		}
+	}
+
 	return 0;
 }
 
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 5af372d..d0d5ad8 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -114,6 +114,9 @@
 	int (*setup_stream)(struct hda_codec *codec, hda_nid_t cvt_nid,
 			    hda_nid_t pin_nid, u32 stream_tag, int format);
 
+	void (*pin_cvt_fixup)(struct hda_codec *codec,
+			      struct hdmi_spec_per_pin *per_pin,
+			      hda_nid_t cvt_nid);
 };
 
 struct hdmi_pcm {
@@ -684,7 +687,8 @@
 	if (!channels)
 		return;
 
-	if (is_haswell_plus(codec))
+	/* some HW (e.g. HSW+) needs reprogramming the amp at each time */
+	if (get_wcaps(codec, pin_nid) & AC_WCAP_OUT_AMP)
 		snd_hda_codec_write(codec, pin_nid, 0,
 					    AC_VERB_SET_AMP_GAIN_MUTE,
 					    AMP_OUT_UNMUTE);
@@ -864,9 +868,6 @@
 	struct hdmi_spec *spec = codec->spec;
 	int err;
 
-	if (is_haswell_plus(codec))
-		haswell_verify_D0(codec, cvt_nid, pin_nid);
-
 	err = spec->ops.pin_hbr_setup(codec, pin_nid, is_hbr_format(format));
 
 	if (err) {
@@ -884,7 +885,7 @@
  * of the pin.
  */
 static int hdmi_choose_cvt(struct hda_codec *codec,
-			int pin_idx, int *cvt_id, int *mux_id)
+			   int pin_idx, int *cvt_id)
 {
 	struct hdmi_spec *spec = codec->spec;
 	struct hdmi_spec_per_pin *per_pin;
@@ -925,8 +926,6 @@
 
 	if (cvt_id)
 		*cvt_id = cvt_idx;
-	if (mux_id)
-		*mux_id = mux_idx;
 
 	return 0;
 }
@@ -1019,9 +1018,6 @@
 	int mux_idx;
 	struct hdmi_spec *spec = codec->spec;
 
-	if (!is_haswell_plus(codec) && !is_valleyview_plus(codec))
-		return;
-
 	/* On Intel platform, the mapping of converter nid to
 	 * mux index of the pins are always the same.
 	 * The pin nid may be 0, this means all pins will not
@@ -1032,6 +1028,17 @@
 		intel_not_share_assigned_cvt(codec, pin_nid, mux_idx);
 }
 
+/* skeleton caller of pin_cvt_fixup ops */
+static void pin_cvt_fixup(struct hda_codec *codec,
+			  struct hdmi_spec_per_pin *per_pin,
+			  hda_nid_t cvt_nid)
+{
+	struct hdmi_spec *spec = codec->spec;
+
+	if (spec->ops.pin_cvt_fixup)
+		spec->ops.pin_cvt_fixup(codec, per_pin, cvt_nid);
+}
+
 /* called in hdmi_pcm_open when no pin is assigned to the PCM
  * in dyn_pcm_assign mode.
  */
@@ -1049,7 +1056,7 @@
 	if (pcm_idx < 0)
 		return -EINVAL;
 
-	err = hdmi_choose_cvt(codec, -1, &cvt_idx, NULL);
+	err = hdmi_choose_cvt(codec, -1, &cvt_idx);
 	if (err)
 		return err;
 
@@ -1057,7 +1064,7 @@
 	per_cvt->assigned = 1;
 	hinfo->nid = per_cvt->cvt_nid;
 
-	intel_not_share_assigned_cvt_nid(codec, 0, per_cvt->cvt_nid);
+	pin_cvt_fixup(codec, NULL, per_cvt->cvt_nid);
 
 	set_bit(pcm_idx, &spec->pcm_in_use);
 	/* todo: setup spdif ctls assign */
@@ -1089,7 +1096,7 @@
 {
 	struct hdmi_spec *spec = codec->spec;
 	struct snd_pcm_runtime *runtime = substream->runtime;
-	int pin_idx, cvt_idx, pcm_idx, mux_idx = 0;
+	int pin_idx, cvt_idx, pcm_idx;
 	struct hdmi_spec_per_pin *per_pin;
 	struct hdmi_eld *eld;
 	struct hdmi_spec_per_cvt *per_cvt = NULL;
@@ -1118,7 +1125,7 @@
 		}
 	}
 
-	err = hdmi_choose_cvt(codec, pin_idx, &cvt_idx, &mux_idx);
+	err = hdmi_choose_cvt(codec, pin_idx, &cvt_idx);
 	if (err < 0) {
 		mutex_unlock(&spec->pcm_lock);
 		return err;
@@ -1135,11 +1142,10 @@
 
 	snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0,
 			    AC_VERB_SET_CONNECT_SEL,
-			    mux_idx);
+			    per_pin->mux_idx);
 
 	/* configure unused pins to choose other converters */
-	if (is_haswell_plus(codec) || is_valleyview_plus(codec))
-		intel_not_share_assigned_cvt(codec, per_pin->pin_nid, mux_idx);
+	pin_cvt_fixup(codec, per_pin, 0);
 
 	snd_hda_spdif_ctls_assign(codec, pcm_idx, per_cvt->cvt_nid);
 
@@ -1372,12 +1378,7 @@
 	 *   and this can make HW reset converter selection on a pin.
 	 */
 	if (eld->eld_valid && !old_eld_valid && per_pin->setup) {
-		if (is_haswell_plus(codec) || is_valleyview_plus(codec)) {
-			intel_verify_pin_cvt_connect(codec, per_pin);
-			intel_not_share_assigned_cvt(codec, per_pin->pin_nid,
-						     per_pin->mux_idx);
-		}
-
+		pin_cvt_fixup(codec, per_pin, 0);
 		hdmi_setup_audio_infoframe(codec, per_pin, per_pin->non_pcm);
 	}
 
@@ -1396,7 +1397,6 @@
 	struct hda_codec *codec = per_pin->codec;
 	struct hdmi_spec *spec = codec->spec;
 	struct hdmi_eld *eld = &spec->temp_eld;
-	struct hdmi_eld *pin_eld = &per_pin->sink_eld;
 	hda_nid_t pin_nid = per_pin->pin_nid;
 	/*
 	 * Always execute a GetPinSense verb here, even when called from
@@ -1413,15 +1413,15 @@
 	present = snd_hda_pin_sense(codec, pin_nid);
 
 	mutex_lock(&per_pin->lock);
-	pin_eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
-	if (pin_eld->monitor_present)
+	eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
+	if (eld->monitor_present)
 		eld->eld_valid  = !!(present & AC_PINSENSE_ELDV);
 	else
 		eld->eld_valid = false;
 
 	codec_dbg(codec,
 		"HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
-		codec->addr, pin_nid, pin_eld->monitor_present, eld->eld_valid);
+		codec->addr, pin_nid, eld->monitor_present, eld->eld_valid);
 
 	if (eld->eld_valid) {
 		if (spec->ops.pin_get_eld(codec, pin_nid, eld->eld_buffer,
@@ -1441,7 +1441,7 @@
 	else
 		update_eld(codec, per_pin, eld);
 
-	ret = !repoll || !pin_eld->monitor_present || pin_eld->eld_valid;
+	ret = !repoll || !eld->monitor_present || eld->eld_valid;
 
 	jack = snd_hda_jack_tbl_get(codec, pin_nid);
 	if (jack)
@@ -1485,7 +1485,7 @@
 
 	mutex_lock(&per_pin->lock);
 	eld->monitor_present = false;
-	size = snd_hdac_acomp_get_eld(&codec->bus->core, per_pin->pin_nid,
+	size = snd_hdac_acomp_get_eld(&codec->core, per_pin->pin_nid,
 				      &eld->monitor_present, eld->eld_buffer,
 				      ELD_MAX_SIZE);
 	if (size > 0) {
@@ -1712,7 +1712,7 @@
 		 * skip pin setup and return 0 to make audio playback
 		 * be ongoing
 		 */
-		intel_not_share_assigned_cvt_nid(codec, 0, cvt_nid);
+		pin_cvt_fixup(codec, NULL, cvt_nid);
 		snd_hda_codec_setup_stream(codec, cvt_nid,
 					stream_tag, 0, format);
 		mutex_unlock(&spec->pcm_lock);
@@ -1725,23 +1725,21 @@
 	}
 	per_pin = get_pin(spec, pin_idx);
 	pin_nid = per_pin->pin_nid;
-	if (is_haswell_plus(codec) || is_valleyview_plus(codec)) {
-		/* Verify pin:cvt selections to avoid silent audio after S3.
-		 * After S3, the audio driver restores pin:cvt selections
-		 * but this can happen before gfx is ready and such selection
-		 * is overlooked by HW. Thus multiple pins can share a same
-		 * default convertor and mute control will affect each other,
-		 * which can cause a resumed audio playback become silent
-		 * after S3.
-		 */
-		intel_verify_pin_cvt_connect(codec, per_pin);
-		intel_not_share_assigned_cvt(codec, pin_nid, per_pin->mux_idx);
-	}
+
+	/* Verify pin:cvt selections to avoid silent audio after S3.
+	 * After S3, the audio driver restores pin:cvt selections
+	 * but this can happen before gfx is ready and such selection
+	 * is overlooked by HW. Thus multiple pins can share a same
+	 * default convertor and mute control will affect each other,
+	 * which can cause a resumed audio playback become silent
+	 * after S3.
+	 */
+	pin_cvt_fixup(codec, per_pin, 0);
 
 	/* Call sync_audio_rate to set the N/CTS/M manually if necessary */
 	/* Todo: add DP1.2 MST audio support later */
 	if (codec_has_acomp(codec))
-		snd_hdac_sync_audio_rate(&codec->bus->core, pin_nid, runtime->rate);
+		snd_hdac_sync_audio_rate(&codec->core, pin_nid, runtime->rate);
 
 	non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
 	mutex_lock(&per_pin->lock);
@@ -1838,6 +1836,18 @@
 	.cleanup = generic_hdmi_playback_pcm_cleanup,
 };
 
+static int hdmi_get_spk_alloc(struct hdac_device *hdac, int pcm_idx)
+{
+	struct hda_codec *codec = container_of(hdac, struct hda_codec, core);
+	struct hdmi_spec *spec = codec->spec;
+	struct hdmi_spec_per_pin *per_pin = pcm_idx_to_pin(spec, pcm_idx);
+
+	if (!per_pin)
+		return 0;
+
+	return per_pin->sink_eld.info.spk_alloc;
+}
+
 static void hdmi_get_chmap(struct hdac_device *hdac, int pcm_idx,
 					unsigned char *chmap)
 {
@@ -1859,6 +1869,8 @@
 	struct hdmi_spec *spec = codec->spec;
 	struct hdmi_spec_per_pin *per_pin = pcm_idx_to_pin(spec, pcm_idx);
 
+	if (!per_pin)
+		return;
 	mutex_lock(&per_pin->lock);
 	per_pin->chmap_set = true;
 	memcpy(per_pin->chmap, chmap, ARRAY_SIZE(per_pin->chmap));
@@ -2074,6 +2086,20 @@
 	snd_array_free(&spec->cvts);
 }
 
+static void generic_spec_free(struct hda_codec *codec)
+{
+	struct hdmi_spec *spec = codec->spec;
+
+	if (spec) {
+		if (spec->i915_bound)
+			snd_hdac_i915_exit(&codec->bus->core);
+		hdmi_array_free(spec);
+		kfree(spec);
+		codec->spec = NULL;
+	}
+	codec->dp_mst = false;
+}
+
 static void generic_hdmi_free(struct hda_codec *codec)
 {
 	struct hdmi_spec *spec = codec->spec;
@@ -2098,10 +2124,7 @@
 			spec->pcm_rec[pcm_idx].jack = NULL;
 	}
 
-	if (spec->i915_bound)
-		snd_hdac_i915_exit(&codec->bus->core);
-	hdmi_array_free(spec);
-	kfree(spec);
+	generic_spec_free(codec);
 }
 
 #ifdef CONFIG_PM
@@ -2139,6 +2162,55 @@
 	.setup_stream				= hdmi_setup_stream,
 };
 
+/* allocate codec->spec and assign/initialize generic parser ops */
+static int alloc_generic_hdmi(struct hda_codec *codec)
+{
+	struct hdmi_spec *spec;
+
+	spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+	if (!spec)
+		return -ENOMEM;
+
+	spec->ops = generic_standard_hdmi_ops;
+	mutex_init(&spec->pcm_lock);
+	snd_hdac_register_chmap_ops(&codec->core, &spec->chmap);
+
+	spec->chmap.ops.get_chmap = hdmi_get_chmap;
+	spec->chmap.ops.set_chmap = hdmi_set_chmap;
+	spec->chmap.ops.is_pcm_attached = is_hdmi_pcm_attached;
+	spec->chmap.ops.get_spk_alloc = hdmi_get_spk_alloc,
+
+	codec->spec = spec;
+	hdmi_array_init(spec, 4);
+
+	codec->patch_ops = generic_hdmi_patch_ops;
+
+	return 0;
+}
+
+/* generic HDMI parser */
+static int patch_generic_hdmi(struct hda_codec *codec)
+{
+	int err;
+
+	err = alloc_generic_hdmi(codec);
+	if (err < 0)
+		return err;
+
+	err = hdmi_parse_codec(codec);
+	if (err < 0) {
+		generic_spec_free(codec);
+		return err;
+	}
+
+	generic_hdmi_init_per_pins(codec);
+	return 0;
+}
+
+/*
+ * Intel codec parsers and helpers
+ */
+
 static void intel_haswell_fixup_connect_list(struct hda_codec *codec,
 					     hda_nid_t nid)
 {
@@ -2216,12 +2288,23 @@
 static void intel_pin_eld_notify(void *audio_ptr, int port)
 {
 	struct hda_codec *codec = audio_ptr;
-	int pin_nid = port + 0x04;
+	int pin_nid;
 
 	/* we assume only from port-B to port-D */
 	if (port < 1 || port > 3)
 		return;
 
+	switch (codec->core.vendor_id) {
+	case 0x80860054: /* ILK */
+	case 0x80862804: /* ILK */
+	case 0x80862882: /* VLV */
+		pin_nid = port + 0x03;
+		break;
+	default:
+		pin_nid = port + 0x04;
+		break;
+	}
+
 	/* skip notification during system suspend (but not in runtime PM);
 	 * the state will be updated at resume
 	 */
@@ -2231,98 +2314,165 @@
 	if (atomic_read(&(codec)->core.in_pm))
 		return;
 
+	snd_hdac_i915_set_bclk(&codec->bus->core);
 	check_presence_and_report(codec, pin_nid);
 }
 
-static int patch_generic_hdmi(struct hda_codec *codec)
+/* register i915 component pin_eld_notify callback */
+static void register_i915_notifier(struct hda_codec *codec)
+{
+	struct hdmi_spec *spec = codec->spec;
+
+	spec->use_acomp_notifier = true;
+	spec->i915_audio_ops.audio_ptr = codec;
+	/* intel_audio_codec_enable() or intel_audio_codec_disable()
+	 * will call pin_eld_notify with using audio_ptr pointer
+	 * We need make sure audio_ptr is really setup
+	 */
+	wmb();
+	spec->i915_audio_ops.pin_eld_notify = intel_pin_eld_notify;
+	snd_hdac_i915_register_notifier(&spec->i915_audio_ops);
+}
+
+/* setup_stream ops override for HSW+ */
+static int i915_hsw_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
+				 hda_nid_t pin_nid, u32 stream_tag, int format)
+{
+	haswell_verify_D0(codec, cvt_nid, pin_nid);
+	return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
+}
+
+/* pin_cvt_fixup ops override for HSW+ and VLV+ */
+static void i915_pin_cvt_fixup(struct hda_codec *codec,
+			       struct hdmi_spec_per_pin *per_pin,
+			       hda_nid_t cvt_nid)
+{
+	if (per_pin) {
+		intel_verify_pin_cvt_connect(codec, per_pin);
+		intel_not_share_assigned_cvt(codec, per_pin->pin_nid,
+					     per_pin->mux_idx);
+	} else {
+		intel_not_share_assigned_cvt_nid(codec, 0, cvt_nid);
+	}
+}
+
+/* Intel Haswell and onwards; audio component with eld notifier */
+static int patch_i915_hsw_hdmi(struct hda_codec *codec)
 {
 	struct hdmi_spec *spec;
+	int err;
 
-	spec = kzalloc(sizeof(*spec), GFP_KERNEL);
-	if (spec == NULL)
-		return -ENOMEM;
-
-	spec->ops = generic_standard_hdmi_ops;
-	mutex_init(&spec->pcm_lock);
-	snd_hdac_register_chmap_ops(&codec->core, &spec->chmap);
-
-	spec->chmap.ops.get_chmap = hdmi_get_chmap;
-	spec->chmap.ops.set_chmap = hdmi_set_chmap;
-	spec->chmap.ops.is_pcm_attached = is_hdmi_pcm_attached;
-
-	codec->spec = spec;
-	hdmi_array_init(spec, 4);
-
-#ifdef CONFIG_SND_HDA_I915
-	/* Try to bind with i915 for Intel HSW+ codecs (if not done yet) */
-	if ((codec->core.vendor_id >> 16) == 0x8086 &&
-	    is_haswell_plus(codec)) {
-#if 0
-		/* on-demand binding leads to an unbalanced refcount when
-		 * both i915 and hda drivers are probed concurrently;
-		 * disabled temporarily for now
-		 */
-		if (!codec->bus->core.audio_component)
-			if (!snd_hdac_i915_init(&codec->bus->core))
-				spec->i915_bound = true;
-#endif
-		/* use i915 audio component notifier for hotplug */
-		if (codec->bus->core.audio_component)
-			spec->use_acomp_notifier = true;
+	/* HSW+ requires i915 binding */
+	if (!codec->bus->core.audio_component) {
+		codec_info(codec, "No i915 binding for Intel HDMI/DP codec\n");
+		return -ENODEV;
 	}
-#endif
 
-	if (is_haswell_plus(codec)) {
-		intel_haswell_enable_all_pins(codec, true);
-		intel_haswell_fixup_enable_dp12(codec);
+	err = alloc_generic_hdmi(codec);
+	if (err < 0)
+		return err;
+	spec = codec->spec;
+
+	intel_haswell_enable_all_pins(codec, true);
+	intel_haswell_fixup_enable_dp12(codec);
+
+	/* For Haswell/Broadwell, the controller is also in the power well and
+	 * can cover the codec power request, and so need not set this flag.
+	 */
+	if (!is_haswell(codec) && !is_broadwell(codec))
+		codec->core.link_power_control = 1;
+
+	codec->patch_ops.set_power_state = haswell_set_power_state;
+	codec->dp_mst = true;
+	codec->depop_delay = 0;
+	codec->auto_runtime_pm = 1;
+
+	spec->ops.setup_stream = i915_hsw_setup_stream;
+	spec->ops.pin_cvt_fixup = i915_pin_cvt_fixup;
+
+	err = hdmi_parse_codec(codec);
+	if (err < 0) {
+		generic_spec_free(codec);
+		return err;
 	}
 
+	generic_hdmi_init_per_pins(codec);
+	register_i915_notifier(codec);
+	return 0;
+}
+
+/* Intel Baytrail and Braswell; with eld notifier */
+static int patch_i915_byt_hdmi(struct hda_codec *codec)
+{
+	struct hdmi_spec *spec;
+	int err;
+
+	/* requires i915 binding */
+	if (!codec->bus->core.audio_component) {
+		codec_info(codec, "No i915 binding for Intel HDMI/DP codec\n");
+		return -ENODEV;
+	}
+
+	err = alloc_generic_hdmi(codec);
+	if (err < 0)
+		return err;
+	spec = codec->spec;
+
 	/* For Valleyview/Cherryview, only the display codec is in the display
 	 * power well and can use link_power ops to request/release the power.
-	 * For Haswell/Broadwell, the controller is also in the power well and
-	 * can cover the codec power request, and so need not set this flag.
-	 * For previous platforms, there is no such power well feature.
 	 */
-	if (is_valleyview_plus(codec) || is_skylake(codec) ||
-			is_broxton(codec))
-		codec->core.link_power_control = 1;
+	codec->core.link_power_control = 1;
 
-	if (hdmi_parse_codec(codec) < 0) {
-		if (spec->i915_bound)
-			snd_hdac_i915_exit(&codec->bus->core);
-		codec->spec = NULL;
-		kfree(spec);
-		return -EINVAL;
-	}
-	codec->patch_ops = generic_hdmi_patch_ops;
-	if (is_haswell_plus(codec)) {
-		codec->patch_ops.set_power_state = haswell_set_power_state;
-		codec->dp_mst = true;
-	}
+	codec->depop_delay = 0;
+	codec->auto_runtime_pm = 1;
 
-	/* Enable runtime pm for HDMI audio codec of HSW/BDW/SKL/BYT/BSW */
-	if (is_haswell_plus(codec) || is_valleyview_plus(codec))
-		codec->auto_runtime_pm = 1;
+	spec->ops.pin_cvt_fixup = i915_pin_cvt_fixup;
+
+	err = hdmi_parse_codec(codec);
+	if (err < 0) {
+		generic_spec_free(codec);
+		return err;
+	}
 
 	generic_hdmi_init_per_pins(codec);
-
-
-	if (codec_has_acomp(codec)) {
-		codec->depop_delay = 0;
-		spec->i915_audio_ops.audio_ptr = codec;
-		/* intel_audio_codec_enable() or intel_audio_codec_disable()
-		 * will call pin_eld_notify with using audio_ptr pointer
-		 * We need make sure audio_ptr is really setup
-		 */
-		wmb();
-		spec->i915_audio_ops.pin_eld_notify = intel_pin_eld_notify;
-		snd_hdac_i915_register_notifier(&spec->i915_audio_ops);
-	}
-
-	WARN_ON(spec->dyn_pcm_assign && !codec_has_acomp(codec));
+	register_i915_notifier(codec);
 	return 0;
 }
 
+/* Intel IronLake, SandyBridge and IvyBridge; with eld notifier */
+static int patch_i915_cpt_hdmi(struct hda_codec *codec)
+{
+	struct hdmi_spec *spec;
+	int err;
+
+	/* no i915 component should have been bound before this */
+	if (WARN_ON(codec->bus->core.audio_component))
+		return -EBUSY;
+
+	err = alloc_generic_hdmi(codec);
+	if (err < 0)
+		return err;
+	spec = codec->spec;
+
+	/* Try to bind with i915 now */
+	err = snd_hdac_i915_init(&codec->bus->core);
+	if (err < 0)
+		goto error;
+	spec->i915_bound = true;
+
+	err = hdmi_parse_codec(codec);
+	if (err < 0)
+		goto error;
+
+	generic_hdmi_init_per_pins(codec);
+	register_i915_notifier(codec);
+	return 0;
+
+ error:
+	generic_spec_free(codec);
+	return err;
+}
+
 /*
  * Shared non-generic implementations
  */
@@ -3399,6 +3549,9 @@
 	spec->ops.pin_hbr_setup = atihdmi_pin_hbr_setup;
 	spec->ops.setup_stream = atihdmi_setup_stream;
 
+	spec->chmap.ops.pin_get_slot_channel = atihdmi_pin_get_slot_channel;
+	spec->chmap.ops.pin_set_slot_channel = atihdmi_pin_set_slot_channel;
+
 	if (!has_amd_full_remap_support(codec)) {
 		/* override to ATI/AMD-specific versions with pairwise mapping */
 		spec->chmap.ops.chmap_cea_alloc_validate_get_type =
@@ -3406,10 +3559,6 @@
 		spec->chmap.ops.cea_alloc_to_tlv_chmap =
 				atihdmi_paired_cea_alloc_to_tlv_chmap;
 		spec->chmap.ops.chmap_validate = atihdmi_paired_chmap_validate;
-		spec->chmap.ops.pin_get_slot_channel =
-				atihdmi_pin_get_slot_channel;
-		spec->chmap.ops.pin_set_slot_channel =
-				atihdmi_pin_set_slot_channel;
 	}
 
 	/* ATI/AMD converters do not advertise all of their capabilities */
@@ -3491,21 +3640,21 @@
 HDA_CODEC_ENTRY(0x11069f81, "VX900 HDMI/DP",	patch_via_hdmi),
 HDA_CODEC_ENTRY(0x11069f84, "VX11 HDMI/DP",	patch_generic_hdmi),
 HDA_CODEC_ENTRY(0x11069f85, "VX11 HDMI/DP",	patch_generic_hdmi),
-HDA_CODEC_ENTRY(0x80860054, "IbexPeak HDMI",	patch_generic_hdmi),
+HDA_CODEC_ENTRY(0x80860054, "IbexPeak HDMI",	patch_i915_cpt_hdmi),
 HDA_CODEC_ENTRY(0x80862801, "Bearlake HDMI",	patch_generic_hdmi),
 HDA_CODEC_ENTRY(0x80862802, "Cantiga HDMI",	patch_generic_hdmi),
 HDA_CODEC_ENTRY(0x80862803, "Eaglelake HDMI",	patch_generic_hdmi),
-HDA_CODEC_ENTRY(0x80862804, "IbexPeak HDMI",	patch_generic_hdmi),
-HDA_CODEC_ENTRY(0x80862805, "CougarPoint HDMI",	patch_generic_hdmi),
-HDA_CODEC_ENTRY(0x80862806, "PantherPoint HDMI", patch_generic_hdmi),
-HDA_CODEC_ENTRY(0x80862807, "Haswell HDMI",	patch_generic_hdmi),
-HDA_CODEC_ENTRY(0x80862808, "Broadwell HDMI",	patch_generic_hdmi),
-HDA_CODEC_ENTRY(0x80862809, "Skylake HDMI",	patch_generic_hdmi),
-HDA_CODEC_ENTRY(0x8086280a, "Broxton HDMI",	patch_generic_hdmi),
-HDA_CODEC_ENTRY(0x8086280b, "Kabylake HDMI",	patch_generic_hdmi),
+HDA_CODEC_ENTRY(0x80862804, "IbexPeak HDMI",	patch_i915_cpt_hdmi),
+HDA_CODEC_ENTRY(0x80862805, "CougarPoint HDMI",	patch_i915_cpt_hdmi),
+HDA_CODEC_ENTRY(0x80862806, "PantherPoint HDMI", patch_i915_cpt_hdmi),
+HDA_CODEC_ENTRY(0x80862807, "Haswell HDMI",	patch_i915_hsw_hdmi),
+HDA_CODEC_ENTRY(0x80862808, "Broadwell HDMI",	patch_i915_hsw_hdmi),
+HDA_CODEC_ENTRY(0x80862809, "Skylake HDMI",	patch_i915_hsw_hdmi),
+HDA_CODEC_ENTRY(0x8086280a, "Broxton HDMI",	patch_i915_hsw_hdmi),
+HDA_CODEC_ENTRY(0x8086280b, "Kabylake HDMI",	patch_i915_hsw_hdmi),
 HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI",	patch_generic_hdmi),
-HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI",	patch_generic_hdmi),
-HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI",	patch_generic_hdmi),
+HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI",	patch_i915_byt_hdmi),
+HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI",	patch_i915_byt_hdmi),
 HDA_CODEC_ENTRY(0x808629fb, "Crestline HDMI",	patch_generic_hdmi),
 /* special ID for generic HDMI */
 HDA_CODEC_ENTRY(HDA_CODEC_ID_GENERIC_HDMI, "Generic HDMI", patch_generic_hdmi),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 4f5ca0b..002f153 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -342,6 +342,11 @@
 	case 0x10ec0293:
 		alc_update_coef_idx(codec, 0xa, 1<<13, 0);
 		break;
+	case 0x10ec0234:
+	case 0x10ec0274:
+	case 0x10ec0294:
+		alc_update_coef_idx(codec, 0x10, 1<<15, 0);
+		break;
 	case 0x10ec0662:
 		if ((coef & 0x00f0) == 0x0030)
 			alc_update_coef_idx(codec, 0x4, 1<<10, 0); /* EAPD Ctrl */
@@ -2647,6 +2652,7 @@
 	ALC269_TYPE_ALC255,
 	ALC269_TYPE_ALC256,
 	ALC269_TYPE_ALC225,
+	ALC269_TYPE_ALC294,
 };
 
 /*
@@ -2677,6 +2683,7 @@
 	case ALC269_TYPE_ALC255:
 	case ALC269_TYPE_ALC256:
 	case ALC269_TYPE_ALC225:
+	case ALC269_TYPE_ALC294:
 		ssids = alc269_ssids;
 		break;
 	default:
@@ -4759,6 +4766,8 @@
 	ALC255_FIXUP_DELL_SPK_NOISE,
 	ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
 	ALC280_FIXUP_HP_HEADSET_MIC,
+	ALC221_FIXUP_HP_FRONT_MIC,
+	ALC292_FIXUP_TPT460,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -5401,6 +5410,19 @@
 		.chained = true,
 		.chain_id = ALC269_FIXUP_HEADSET_MIC,
 	},
+	[ALC221_FIXUP_HP_FRONT_MIC] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = (const struct hda_pintbl[]) {
+			{ 0x19, 0x02a19020 }, /* Front Mic */
+			{ }
+		},
+	},
+	[ALC292_FIXUP_TPT460] = {
+		.type = HDA_FIXUP_FUNC,
+		.v.func = alc_fixup_tpt440_dock,
+		.chained = true,
+		.chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE,
+	},
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5434,6 +5456,7 @@
 	SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
+	SND_PCI_QUIRK(0x1028, 0x0669, "Dell Optiplex 9020m", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
 	SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -5506,6 +5529,7 @@
 	SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
 	SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
 	SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
+	SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
 	SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -5554,7 +5578,7 @@
 	SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
 	SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
 	SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
-	SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
+	SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
 	SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
 	SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
 	SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
@@ -5567,6 +5591,7 @@
 	SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
 	SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
 	SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
+	SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
 	SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
 	SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
 	SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
@@ -5649,6 +5674,7 @@
 	{.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"},
 	{.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
 	{.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
+	{.id = ALC292_FIXUP_TPT460, .name = "tpt460"},
 	{}
 };
 #define ALC225_STANDARD_PINS \
@@ -6009,6 +6035,11 @@
 	case 0x10ec0225:
 		spec->codec_variant = ALC269_TYPE_ALC225;
 		break;
+	case 0x10ec0234:
+	case 0x10ec0274:
+	case 0x10ec0294:
+		spec->codec_variant = ALC269_TYPE_ALC294;
+		break;
 	}
 
 	if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
@@ -6406,6 +6437,8 @@
 	ALC668_FIXUP_AUTO_MUTE,
 	ALC668_FIXUP_DELL_DISABLE_AAMIX,
 	ALC668_FIXUP_DELL_XPS13,
+	ALC662_FIXUP_ASUS_Nx50,
+	ALC668_FIXUP_ASUS_Nx51,
 };
 
 static const struct hda_fixup alc662_fixups[] = {
@@ -6646,6 +6679,21 @@
 		.type = HDA_FIXUP_FUNC,
 		.v.func = alc_fixup_bass_chmap,
 	},
+	[ALC662_FIXUP_ASUS_Nx50] = {
+		.type = HDA_FIXUP_FUNC,
+		.v.func = alc_fixup_auto_mute_via_amp,
+		.chained = true,
+		.chain_id = ALC662_FIXUP_BASS_1A
+	},
+	[ALC668_FIXUP_ASUS_Nx51] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = (const struct hda_pintbl[]) {
+			{0x1a, 0x90170151}, /* bass speaker */
+			{}
+		},
+		.chained = true,
+		.chain_id = ALC662_FIXUP_BASS_CHMAP,
+	},
 };
 
 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -6668,10 +6716,14 @@
 	SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
-	SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A),
+	SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
+	SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
 	SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
+	SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
 	SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
 	SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
+	SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
+	SND_PCI_QUIRK(0x1043, 0x17bd, "ASUS N751", ALC668_FIXUP_ASUS_Nx51),
 	SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
 	SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
 	SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
@@ -6902,6 +6954,7 @@
 	HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
+	HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269),
@@ -6912,6 +6965,7 @@
 	HDA_CODEC_ENTRY(0x10ec0269, "ALC269", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0270, "ALC270", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0272, "ALC272", patch_alc662),
+	HDA_CODEC_ENTRY(0x10ec0274, "ALC274", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0275, "ALC275", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0276, "ALC276", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0280, "ALC280", patch_alc269),
@@ -6924,6 +6978,7 @@
 	HDA_CODEC_ENTRY(0x10ec0290, "ALC290", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0292, "ALC292", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0293, "ALC293", patch_alc269),
+	HDA_CODEC_ENTRY(0x10ec0294, "ALC294", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269),
 	HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861),
 	HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd),
diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
index 59ab6ce..f0955fd 100644
--- a/sound/pci/hda/thinkpad_helper.c
+++ b/sound/pci/hda/thinkpad_helper.c
@@ -13,7 +13,7 @@
 static bool is_thinkpad(struct hda_codec *codec)
 {
 	return (codec->core.subsystem_id >> 16 == 0x17aa) &&
-	       (acpi_dev_present("LEN0068") || acpi_dev_present("IBM0068"));
+	       (acpi_dev_found("LEN0068") || acpi_dev_found("IBM0068"));
 }
 
 static void update_tpacpi_mute_led(void *private_data, int enabled)
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 8151318..9720a30 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -42,12 +42,6 @@
 #include <asm/pgtable.h>
 #include <asm/cacheflush.h>
 
-#ifdef CONFIG_KVM_GUEST
-#include <linux/kvm_para.h>
-#else
-#define kvm_para_available() (0)
-#endif
-
 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_DESCRIPTION("Intel 82801AA,82901AB,i810,i820,i830,i840,i845,MX440; SiS 7012; Ali 5455");
 MODULE_LICENSE("GPL");
@@ -2972,25 +2966,17 @@
 		goto fini;
 	}
 
-	/* detect KVM and Parallels virtual environments */
-	result = kvm_para_available();
-#ifdef X86_FEATURE_HYPERVISOR
-	result = result || boot_cpu_has(X86_FEATURE_HYPERVISOR);
-#endif
-	if (!result)
-		goto fini;
-
 	/* check for known (emulated) devices */
+	result = 0;
 	if (pci->subsystem_vendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
 	    pci->subsystem_device == PCI_SUBDEVICE_ID_QEMU) {
 		/* KVM emulated sound, PCI SSID: 1af4:1100 */
 		msg = "enable KVM";
+		result = 1;
 	} else if (pci->subsystem_vendor == 0x1ab8) {
 		/* Parallels VM emulated sound, PCI SSID: 1ab8:xxxx */
 		msg = "enable Parallels VM";
-	} else {
-		msg = "disable (unknown or VT-d) VM";
-		result = 0;
+		result = 1;
 	}
 
 fini:
diff --git a/sound/pci/lx6464es/lx_core.c b/sound/pci/lx6464es/lx_core.c
index f3d6202..a80684b 100644
--- a/sound/pci/lx6464es/lx_core.c
+++ b/sound/pci/lx6464es/lx_core.c
@@ -644,7 +644,7 @@
 		if (err < 0)
 			return err;
 
-		if (current_state == state)
+		if (!err && current_state == state)
 			return 0;
 
 		mdelay(1);
diff --git a/sound/pci/pcxhr/pcxhr_core.c b/sound/pci/pcxhr/pcxhr_core.c
index c5194f5..d7e71f3 100644
--- a/sound/pci/pcxhr/pcxhr_core.c
+++ b/sound/pci/pcxhr/pcxhr_core.c
@@ -1341,5 +1341,6 @@
 	}
 
 	pcxhr_msg_thread(mgr);
+	mutex_unlock(&mgr->lock);
 	return IRQ_HANDLED;
 }
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index 2768970..1267e1a 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -652,7 +652,7 @@
 		rcmr =	  SSC_BF(RCMR_PERIOD, ssc_p->rcmr_period)
 			| SSC_BF(RCMR_STTDLY, 1)
 			| SSC_BF(RCMR_START, SSC_START_RISING_RF)
-			| SSC_BF(RCMR_CKI, SSC_CKI_FALLING)
+			| SSC_BF(RCMR_CKI, SSC_CKI_RISING)
 			| SSC_BF(RCMR_CKO, SSC_CKO_NONE)
 			| SSC_BF(RCMR_CKS, SSC_CKS_DIV);
 
@@ -692,7 +692,7 @@
 		rcmr =	  SSC_BF(RCMR_PERIOD, 0)
 			| SSC_BF(RCMR_STTDLY, START_DELAY)
 			| SSC_BF(RCMR_START, SSC_START_RISING_RF)
-			| SSC_BF(RCMR_CKI, SSC_CKI_FALLING)
+			| SSC_BF(RCMR_CKI, SSC_CKI_RISING)
 			| SSC_BF(RCMR_CKO, SSC_CKO_NONE)
 			| SSC_BF(RCMR_CKS, ssc->clk_from_rk_pin ?
 					   SSC_CKS_PIN : SSC_CKS_CLOCK);
diff --git a/sound/soc/au1x/dbdma2.c b/sound/soc/au1x/dbdma2.c
index 5741c0a..b5d1caa 100644
--- a/sound/soc/au1x/dbdma2.c
+++ b/sound/soc/au1x/dbdma2.c
@@ -206,8 +206,8 @@
 	stype = substream->stream;
 	pcd = to_dmadata(substream);
 
-	DBG("runtime->dma_area = 0x%08lx dma_addr_t = 0x%08lx dma_size = %d "
-	    "runtime->min_align %d\n",
+	DBG("runtime->dma_area = 0x%08lx dma_addr_t = 0x%08lx dma_size = %zu "
+	    "runtime->min_align %lu\n",
 		(unsigned long)runtime->dma_area,
 		(unsigned long)runtime->dma_addr, runtime->dma_bytes,
 		runtime->min_align);
diff --git a/sound/soc/bcm/bcm2835-i2s.c b/sound/soc/bcm/bcm2835-i2s.c
index 1c1f221..6ba2049 100644
--- a/sound/soc/bcm/bcm2835-i2s.c
+++ b/sound/soc/bcm/bcm2835-i2s.c
@@ -259,6 +259,9 @@
 	case SNDRV_PCM_FORMAT_S16_LE:
 		data_length = 16;
 		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		data_length = 24;
+		break;
 	case SNDRV_PCM_FORMAT_S32_LE:
 		data_length = 32;
 		break;
@@ -273,13 +276,20 @@
 		/* otherwise calculate a fitting block ratio */
 		bclk_ratio = 2 * data_length;
 
-	/* set target clock rate*/
-	clk_set_rate(dev->clk, sampling_rate * bclk_ratio);
+	/* Clock should only be set up here if CPU is clock master */
+	switch (dev->fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBS_CFS:
+	case SND_SOC_DAIFMT_CBS_CFM:
+		clk_set_rate(dev->clk, sampling_rate * bclk_ratio);
+		break;
+	default:
+		break;
+	}
 
 	/* Setup the frame format */
 	format = BCM2835_I2S_CHEN;
 
-	if (data_length > 24)
+	if (data_length >= 24)
 		format |= BCM2835_I2S_CHWEX;
 
 	format |= BCM2835_I2S_CHWID((data_length-8)&0xf);
@@ -570,6 +580,7 @@
 		.channels_max = 2,
 		.rates =	SNDRV_PCM_RATE_8000_192000,
 		.formats =	SNDRV_PCM_FMTBIT_S16_LE
+				| SNDRV_PCM_FMTBIT_S24_LE
 				| SNDRV_PCM_FMTBIT_S32_LE
 		},
 	.capture = {
@@ -577,6 +588,7 @@
 		.channels_max = 2,
 		.rates =	SNDRV_PCM_RATE_8000_192000,
 		.formats =	SNDRV_PCM_FMTBIT_S16_LE
+				| SNDRV_PCM_FMTBIT_S24_LE
 				| SNDRV_PCM_FMTBIT_S32_LE
 		},
 	.ops = &bcm2835_i2s_dai_ops,
@@ -678,6 +690,15 @@
 	dev->dma_data[SNDRV_PCM_STREAM_PLAYBACK].maxburst = 2;
 	dev->dma_data[SNDRV_PCM_STREAM_CAPTURE].maxburst = 2;
 
+	/*
+	 * Set the PACK flag to enable S16_LE support (2 S16_LE values
+	 * packed into 32-bit transfers).
+	 */
+	dev->dma_data[SNDRV_PCM_STREAM_PLAYBACK].flags =
+		SND_DMAENGINE_PCM_DAI_FLAG_PACK;
+	dev->dma_data[SNDRV_PCM_STREAM_CAPTURE].flags =
+		SND_DMAENGINE_PCM_DAI_FLAG_PACK;
+
 	/* BCLK ratio - use default */
 	dev->bclk_ratio = 0;
 
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 649e92a..b3afae9 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -88,12 +88,14 @@
 	select SND_SOC_MC13783 if MFD_MC13XXX
 	select SND_SOC_ML26124 if I2C
 	select SND_SOC_NAU8825 if I2C
+	select SND_SOC_HDMI_CODEC
 	select SND_SOC_PCM1681 if I2C
 	select SND_SOC_PCM179X_I2C if I2C
 	select SND_SOC_PCM179X_SPI if SPI_MASTER
 	select SND_SOC_PCM3008
 	select SND_SOC_PCM3168A_I2C if I2C
 	select SND_SOC_PCM3168A_SPI if SPI_MASTER
+	select SND_SOC_PCM5102A
 	select SND_SOC_PCM512x_I2C if I2C
 	select SND_SOC_PCM512x_SPI if SPI_MASTER
 	select SND_SOC_RT286 if I2C
@@ -477,6 +479,11 @@
 config SND_SOC_DMIC
 	tristate
 
+config SND_SOC_HDMI_CODEC
+       tristate
+       select SND_PCM_ELD
+       select SND_PCM_IEC958
+
 config SND_SOC_ES8328
 	tristate "Everest Semi ES8328 CODEC"
 
@@ -575,6 +582,9 @@
 	select SND_SOC_PCM3168A
 	select REGMAP_SPI
 
+config SND_SOC_PCM5102A
+	tristate
+
 config SND_SOC_PCM512x
 	tristate
 
@@ -629,6 +639,7 @@
 
 config SND_SOC_RT5616
 	tristate "Realtek RT5616 CODEC"
+	depends on I2C
 
 config SND_SOC_RT5631
 	tristate "Realtek ALC5631/RT5631 CODEC"
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 185a712..b7b9941 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -81,6 +81,7 @@
 snd-soc-mc13783-objs := mc13783.o
 snd-soc-ml26124-objs := ml26124.o
 snd-soc-nau8825-objs := nau8825.o
+snd-soc-hdmi-codec-objs := hdmi-codec.o
 snd-soc-pcm1681-objs := pcm1681.o
 snd-soc-pcm179x-codec-objs := pcm179x.o
 snd-soc-pcm179x-i2c-objs := pcm179x-i2c.o
@@ -89,6 +90,7 @@
 snd-soc-pcm3168a-objs := pcm3168a.o
 snd-soc-pcm3168a-i2c-objs := pcm3168a-i2c.o
 snd-soc-pcm3168a-spi-objs := pcm3168a-spi.o
+snd-soc-pcm5102a-objs := pcm5102a.o
 snd-soc-pcm512x-objs := pcm512x.o
 snd-soc-pcm512x-i2c-objs := pcm512x-i2c.o
 snd-soc-pcm512x-spi-objs := pcm512x-spi.o
@@ -290,6 +292,7 @@
 obj-$(CONFIG_SND_SOC_MC13783)	+= snd-soc-mc13783.o
 obj-$(CONFIG_SND_SOC_ML26124)	+= snd-soc-ml26124.o
 obj-$(CONFIG_SND_SOC_NAU8825)   += snd-soc-nau8825.o
+obj-$(CONFIG_SND_SOC_HDMI_CODEC)	+= snd-soc-hdmi-codec.o
 obj-$(CONFIG_SND_SOC_PCM1681)	+= snd-soc-pcm1681.o
 obj-$(CONFIG_SND_SOC_PCM179X)	+= snd-soc-pcm179x-codec.o
 obj-$(CONFIG_SND_SOC_PCM179X_I2C)	+= snd-soc-pcm179x-i2c.o
@@ -298,6 +301,7 @@
 obj-$(CONFIG_SND_SOC_PCM3168A)	+= snd-soc-pcm3168a.o
 obj-$(CONFIG_SND_SOC_PCM3168A_I2C)	+= snd-soc-pcm3168a-i2c.o
 obj-$(CONFIG_SND_SOC_PCM3168A_SPI)	+= snd-soc-pcm3168a-spi.o
+obj-$(CONFIG_SND_SOC_PCM5102A)	+= snd-soc-pcm5102a.o
 obj-$(CONFIG_SND_SOC_PCM512x)	+= snd-soc-pcm512x.o
 obj-$(CONFIG_SND_SOC_PCM512x_I2C)	+= snd-soc-pcm512x-i2c.o
 obj-$(CONFIG_SND_SOC_PCM512x_SPI)	+= snd-soc-pcm512x-spi.o
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index cda27c2..1ee8506 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -608,9 +608,7 @@
 
 	of_property_read_string(np, "clock-output-names", &clk_name);
 
-	clk = clk_register_fixed_rate(dev, clk_name, parent_clk_name,
-				      (parent_clk_name) ? 0 : CLK_IS_ROOT,
-				      rate);
+	clk = clk_register_fixed_rate(dev, clk_name, parent_clk_name, 0, rate);
 	if (!IS_ERR(clk))
 		of_clk_add_provider(np, of_clk_src_simple_get, clk);
 
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 92d22a0..664a8c0 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -221,6 +221,8 @@
 
 	switch (arizona->type) {
 	case WM8997:
+	case CS47L24:
+	case WM1831:
 		break;
 	default:
 		ret = snd_soc_dapm_new_controls(dapm, &arizona_spkr, 1);
@@ -249,6 +251,18 @@
 }
 EXPORT_SYMBOL_GPL(arizona_init_spk);
 
+int arizona_free_spk(struct snd_soc_codec *codec)
+{
+	struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+	struct arizona *arizona = priv->arizona;
+
+	arizona_free_irq(arizona, ARIZONA_IRQ_SPK_OVERHEAT_WARN, arizona);
+	arizona_free_irq(arizona, ARIZONA_IRQ_SPK_OVERHEAT, arizona);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(arizona_free_spk);
+
 static const struct snd_soc_dapm_route arizona_mono_routes[] = {
 	{ "OUT1R", NULL, "OUT1L" },
 	{ "OUT2R", NULL, "OUT2L" },
@@ -1122,7 +1136,6 @@
 		   int event)
 {
 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-	unsigned int mask = 0x3 << w->shift;
 	unsigned int val;
 
 	switch (event) {
@@ -1136,7 +1149,7 @@
 		return 0;
 	}
 
-	snd_soc_update_bits(codec, ARIZONA_CLOCK_CONTROL, mask, val);
+	snd_soc_write(codec, ARIZONA_CLOCK_CONTROL, val);
 
 	return 0;
 }
@@ -2035,7 +2048,21 @@
 			init_ratio, Fref, refdiv);
 
 	while (div <= ARIZONA_FLL_MAX_REFDIV) {
-		for (ratio = init_ratio; ratio <= ARIZONA_FLL_MAX_FRATIO;
+		/* start from init_ratio because this may already give a
+		 * fractional N.K
+		 */
+		for (ratio = init_ratio; ratio > 0; ratio--) {
+			if (target % (ratio * Fref)) {
+				cfg->refdiv = refdiv;
+				cfg->fratio = ratio - 1;
+				arizona_fll_dbg(fll,
+					"pseudo: found fref=%u refdiv=%d(%d) ratio=%d\n",
+					Fref, refdiv, div, ratio);
+				return ratio;
+			}
+		}
+
+		for (ratio = init_ratio + 1; ratio <= ARIZONA_FLL_MAX_FRATIO;
 		     ratio++) {
 			if ((ARIZONA_FLL_VCO_CORNER / 2) /
 			    (fll->vco_mult * ratio) < Fref) {
@@ -2061,17 +2088,6 @@
 			}
 		}
 
-		for (ratio = init_ratio - 1; ratio > 0; ratio--) {
-			if (target % (ratio * Fref)) {
-				cfg->refdiv = refdiv;
-				cfg->fratio = ratio - 1;
-				arizona_fll_dbg(fll,
-					"pseudo: found fref=%u refdiv=%d(%d) ratio=%d\n",
-					Fref, refdiv, div, ratio);
-				return ratio;
-			}
-		}
-
 		div *= 2;
 		Fref /= 2;
 		refdiv++;
diff --git a/sound/soc/codecs/arizona.h b/sound/soc/codecs/arizona.h
index 1ea8e4e..ce0531b 100644
--- a/sound/soc/codecs/arizona.h
+++ b/sound/soc/codecs/arizona.h
@@ -307,6 +307,8 @@
 extern int arizona_init_gpio(struct snd_soc_codec *codec);
 extern int arizona_init_mono(struct snd_soc_codec *codec);
 
+extern int arizona_free_spk(struct snd_soc_codec *codec);
+
 extern int arizona_init_dai(struct arizona_priv *priv, int dai);
 
 int arizona_set_output_mode(struct snd_soc_codec *codec, int output,
diff --git a/sound/soc/codecs/cs35l32.c b/sound/soc/codecs/cs35l32.c
index 44c30fe..287d137 100644
--- a/sound/soc/codecs/cs35l32.c
+++ b/sound/soc/codecs/cs35l32.c
@@ -274,7 +274,9 @@
 	if (of_property_read_u32(np, "cirrus,sdout-share", &val) >= 0)
 		pdata->sdout_share = val;
 
-	of_property_read_u32(np, "cirrus,boost-manager", &val);
+	if (of_property_read_u32(np, "cirrus,boost-manager", &val))
+		val = -1u;
+
 	switch (val) {
 	case CS35L32_BOOST_MGR_AUTO:
 	case CS35L32_BOOST_MGR_AUTO_AUDIO:
@@ -282,13 +284,15 @@
 	case CS35L32_BOOST_MGR_FIXED:
 		pdata->boost_mng = val;
 		break;
+	case -1u:
 	default:
 		dev_err(&i2c_client->dev,
 			"Wrong cirrus,boost-manager DT value %d\n", val);
 		pdata->boost_mng = CS35L32_BOOST_MGR_BYPASS;
 	}
 
-	of_property_read_u32(np, "cirrus,sdout-datacfg", &val);
+	if (of_property_read_u32(np, "cirrus,sdout-datacfg", &val))
+		val = -1u;
 	switch (val) {
 	case CS35L32_DATA_CFG_LR_VP:
 	case CS35L32_DATA_CFG_LR_STAT:
@@ -296,13 +300,15 @@
 	case CS35L32_DATA_CFG_LR_VPSTAT:
 		pdata->sdout_datacfg = val;
 		break;
+	case -1u:
 	default:
 		dev_err(&i2c_client->dev,
 			"Wrong cirrus,sdout-datacfg DT value %d\n", val);
 		pdata->sdout_datacfg = CS35L32_DATA_CFG_LR;
 	}
 
-	of_property_read_u32(np, "cirrus,battery-threshold", &val);
+	if (of_property_read_u32(np, "cirrus,battery-threshold", &val))
+		val = -1u;
 	switch (val) {
 	case CS35L32_BATT_THRESH_3_1V:
 	case CS35L32_BATT_THRESH_3_2V:
@@ -310,13 +316,15 @@
 	case CS35L32_BATT_THRESH_3_4V:
 		pdata->batt_thresh = val;
 		break;
+	case -1u:
 	default:
 		dev_err(&i2c_client->dev,
 			"Wrong cirrus,battery-threshold DT value %d\n", val);
 		pdata->batt_thresh = CS35L32_BATT_THRESH_3_3V;
 	}
 
-	of_property_read_u32(np, "cirrus,battery-recovery", &val);
+	if (of_property_read_u32(np, "cirrus,battery-recovery", &val))
+		val = -1u;
 	switch (val) {
 	case CS35L32_BATT_RECOV_3_1V:
 	case CS35L32_BATT_RECOV_3_2V:
@@ -326,6 +334,7 @@
 	case CS35L32_BATT_RECOV_3_6V:
 		pdata->batt_recov = val;
 		break;
+	case -1u:
 	default:
 		dev_err(&i2c_client->dev,
 			"Wrong cirrus,battery-recovery DT value %d\n", val);
diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c
index 7cd5f76..eec1ff8 100644
--- a/sound/soc/codecs/cs42l56.c
+++ b/sound/soc/codecs/cs42l56.c
@@ -56,7 +56,7 @@
 	u8 iface;
 	u8 iface_fmt;
 	u8 iface_inv;
-#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+#if IS_ENABLED(CONFIG_INPUT)
 	struct input_dev *beep;
 	struct work_struct beep_work;
 	int beep_rate;
diff --git a/sound/soc/codecs/cs47l24.c b/sound/soc/codecs/cs47l24.c
index 576087b..5ec5a68 100644
--- a/sound/soc/codecs/cs47l24.c
+++ b/sound/soc/codecs/cs47l24.c
@@ -807,6 +807,9 @@
 	{ "IN2L PGA", NULL, "IN2L" },
 	{ "IN2R PGA", NULL, "IN2R" },
 
+	{ "Audio Trace DSP", NULL, "DSP2" },
+	{ "Audio Trace DSP", NULL, "SYSCLK" },
+
 	ARIZONA_MIXER_ROUTES("OUT1L", "HPOUT1L"),
 	ARIZONA_MIXER_ROUTES("OUT1R", "HPOUT1R"),
 
@@ -1016,6 +1019,27 @@
 			.formats = CS47L24_FORMATS,
 		},
 	},
+	{
+		.name = "cs47l24-cpu-trace",
+		.capture = {
+			.stream_name = "Audio Trace CPU",
+			.channels_min = 1,
+			.channels_max = 6,
+			.rates = CS47L24_RATES,
+			.formats = CS47L24_FORMATS,
+		},
+		.compress_new = snd_soc_new_compress,
+	},
+	{
+		.name = "cs47l24-dsp-trace",
+		.capture = {
+			.stream_name = "Audio Trace DSP",
+			.channels_min = 1,
+			.channels_max = 6,
+			.rates = CS47L24_RATES,
+			.formats = CS47L24_FORMATS,
+		},
+	},
 };
 
 static int cs47l24_open(struct snd_compr_stream *stream)
@@ -1027,6 +1051,8 @@
 
 	if (strcmp(rtd->codec_dai->name, "cs47l24-dsp-voicectrl") == 0) {
 		n_adsp = 2;
+	} else if (strcmp(rtd->codec_dai->name, "cs47l24-dsp-trace") == 0) {
+		n_adsp = 1;
 	} else {
 		dev_err(arizona->dev,
 			"No suitable compressed stream for DAI '%s'\n",
@@ -1041,10 +1067,16 @@
 {
 	struct cs47l24_priv *priv = data;
 	struct arizona *arizona = priv->core.arizona;
-	int ret;
+	int serviced = 0;
+	int i, ret;
 
-	ret = wm_adsp_compr_handle_irq(&priv->core.adsp[2]);
-	if (ret == -ENODEV) {
+	for (i = 1; i <= 2; ++i) {
+		ret = wm_adsp_compr_handle_irq(&priv->core.adsp[i]);
+		if (ret != -ENODEV)
+			serviced++;
+	}
+
+	if (!serviced) {
 		dev_err(arizona->dev, "Spurious compressed data IRQ\n");
 		return IRQ_NONE;
 	}
@@ -1108,6 +1140,9 @@
 	priv->core.arizona->dapm = NULL;
 
 	arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
+
+	arizona_free_spk(codec);
+
 	return 0;
 }
 
@@ -1157,6 +1192,7 @@
 static struct snd_soc_platform_driver cs47l24_compr_platform = {
 	.compr_ops = &cs47l24_compr_ops,
 };
+
 static int cs47l24_probe(struct platform_device *pdev)
 {
 	struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
@@ -1225,9 +1261,9 @@
 		dev_err(&pdev->dev, "Failed to register platform: %d\n", ret);
 		return ret;
 	}
+
 	ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_cs47l24,
 				      cs47l24_dai, ARRAY_SIZE(cs47l24_dai));
-
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Failed to register codec: %d\n", ret);
 		snd_soc_unregister_platform(&pdev->dev);
@@ -1238,10 +1274,15 @@
 
 static int cs47l24_remove(struct platform_device *pdev)
 {
+	struct cs47l24_priv *cs47l24 = platform_get_drvdata(pdev);
+
 	snd_soc_unregister_platform(&pdev->dev);
 	snd_soc_unregister_codec(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 
+	wm_adsp2_remove(&cs47l24->core.adsp[1]);
+	wm_adsp2_remove(&cs47l24->core.adsp[2]);
+
 	return 0;
 }
 
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index 7278f93..e5527bc 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -726,6 +726,68 @@
 
 
 /*
+ * DAPM Events
+ */
+
+static int da7213_dai_event(struct snd_soc_dapm_widget *w,
+			    struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct da7213_priv *da7213 = snd_soc_codec_get_drvdata(codec);
+	u8 pll_ctrl, pll_status;
+	int i = 0;
+	bool srm_lock = false;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		/* Enable DAI clks for master mode */
+		if (da7213->master)
+			snd_soc_update_bits(codec, DA7213_DAI_CLK_MODE,
+					    DA7213_DAI_CLK_EN_MASK,
+					    DA7213_DAI_CLK_EN_MASK);
+
+		/* PC synchronised to DAI */
+		snd_soc_update_bits(codec, DA7213_PC_COUNT,
+				    DA7213_PC_FREERUN_MASK, 0);
+
+		/* Slave mode, if SRM not enabled no need for status checks */
+		pll_ctrl = snd_soc_read(codec, DA7213_PLL_CTRL);
+		if (!(pll_ctrl & DA7213_PLL_SRM_EN))
+			return 0;
+
+		/* Check SRM has locked */
+		do {
+			pll_status = snd_soc_read(codec, DA7213_PLL_STATUS);
+			if (pll_status & DA7219_PLL_SRM_LOCK) {
+				srm_lock = true;
+			} else {
+				++i;
+				msleep(50);
+			}
+		} while ((i < DA7213_SRM_CHECK_RETRIES) & (!srm_lock));
+
+		if (!srm_lock)
+			dev_warn(codec->dev, "SRM failed to lock\n");
+
+		return 0;
+	case SND_SOC_DAPM_POST_PMD:
+		/* PC free-running */
+		snd_soc_update_bits(codec, DA7213_PC_COUNT,
+				    DA7213_PC_FREERUN_MASK,
+				    DA7213_PC_FREERUN_MASK);
+
+		/* Disable DAI clks if in master mode */
+		if (da7213->master)
+			snd_soc_update_bits(codec, DA7213_DAI_CLK_MODE,
+					    DA7213_DAI_CLK_EN_MASK, 0);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+
+/*
  * DAPM widgets
  */
 
@@ -736,7 +798,8 @@
 
 	/* Use a supply here as this controls both input & output DAIs */
 	SND_SOC_DAPM_SUPPLY("DAI", DA7213_DAI_CTRL, DA7213_DAI_EN_SHIFT,
-			    DA7213_NO_INVERT, NULL, 0),
+			    DA7213_NO_INVERT, da7213_dai_event,
+			    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
 
 	/*
 	 * Input
@@ -1143,11 +1206,9 @@
 	/* Set master/slave mode */
 	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
 	case SND_SOC_DAIFMT_CBM_CFM:
-		dai_clk_mode |= DA7213_DAI_CLK_EN_MASTER_MODE;
 		da7213->master = true;
 		break;
 	case SND_SOC_DAIFMT_CBS_CFS:
-		dai_clk_mode |= DA7213_DAI_CLK_EN_SLAVE_MODE;
 		da7213->master = false;
 		break;
 	default:
@@ -1281,28 +1342,28 @@
 	pll_ctrl = 0;
 
 	/* Workout input divider based on MCLK rate */
-	if ((da7213->mclk_rate == 32768) && (source == DA7213_SYSCLK_PLL)) {
+	if (da7213->mclk_rate == 32768) {
 		/* 32KHz PLL Mode */
-		indiv_bits = DA7213_PLL_INDIV_10_20_MHZ;
-		indiv = DA7213_PLL_INDIV_10_20_MHZ_VAL;
+		indiv_bits = DA7213_PLL_INDIV_9_TO_18_MHZ;
+		indiv = DA7213_PLL_INDIV_9_TO_18_MHZ_VAL;
 		freq_ref = 3750000;
 		pll_ctrl |= DA7213_PLL_32K_MODE;
 	} else {
 		/* 5 - 54MHz MCLK */
 		if (da7213->mclk_rate < 5000000) {
 			goto pll_err;
-		} else if (da7213->mclk_rate <= 10000000) {
-			indiv_bits = DA7213_PLL_INDIV_5_10_MHZ;
-			indiv = DA7213_PLL_INDIV_5_10_MHZ_VAL;
-		} else if (da7213->mclk_rate <= 20000000) {
-			indiv_bits = DA7213_PLL_INDIV_10_20_MHZ;
-			indiv = DA7213_PLL_INDIV_10_20_MHZ_VAL;
-		} else if (da7213->mclk_rate <= 40000000) {
-			indiv_bits = DA7213_PLL_INDIV_20_40_MHZ;
-			indiv = DA7213_PLL_INDIV_20_40_MHZ_VAL;
+		} else if (da7213->mclk_rate <= 9000000) {
+			indiv_bits = DA7213_PLL_INDIV_5_TO_9_MHZ;
+			indiv = DA7213_PLL_INDIV_5_TO_9_MHZ_VAL;
+		} else if (da7213->mclk_rate <= 18000000) {
+			indiv_bits = DA7213_PLL_INDIV_9_TO_18_MHZ;
+			indiv = DA7213_PLL_INDIV_9_TO_18_MHZ_VAL;
+		} else if (da7213->mclk_rate <= 36000000) {
+			indiv_bits = DA7213_PLL_INDIV_18_TO_36_MHZ;
+			indiv = DA7213_PLL_INDIV_18_TO_36_MHZ_VAL;
 		} else if (da7213->mclk_rate <= 54000000) {
-			indiv_bits = DA7213_PLL_INDIV_40_54_MHZ;
-			indiv = DA7213_PLL_INDIV_40_54_MHZ_VAL;
+			indiv_bits = DA7213_PLL_INDIV_36_TO_54_MHZ;
+			indiv = DA7213_PLL_INDIV_36_TO_54_MHZ_VAL;
 		} else {
 			goto pll_err;
 		}
@@ -1547,6 +1608,10 @@
 	/* Default to using SRM for slave mode */
 	da7213->srm_en = true;
 
+	/* Default PC counter to free-running */
+	snd_soc_update_bits(codec, DA7213_PC_COUNT, DA7213_PC_FREERUN_MASK,
+			    DA7213_PC_FREERUN_MASK);
+
 	/* Enable all Gain Ramps */
 	snd_soc_update_bits(codec, DA7213_AUX_L_CTRL,
 			    DA7213_GAIN_RAMP_EN, DA7213_GAIN_RAMP_EN);
diff --git a/sound/soc/codecs/da7213.h b/sound/soc/codecs/da7213.h
index 030fd69..fbb7a35 100644
--- a/sound/soc/codecs/da7213.h
+++ b/sound/soc/codecs/da7213.h
@@ -142,6 +142,9 @@
  * Bit fields
  */
 
+/* DA7213_PLL_STATUS = 0x03 */
+#define DA7219_PLL_SRM_LOCK					(0x1 << 1)
+
 /* DA7213_SR = 0x22 */
 #define DA7213_SR_8000						(0x1 << 0)
 #define DA7213_SR_11025						(0x2 << 0)
@@ -160,10 +163,10 @@
 #define DA7213_VMID_EN						(0x1 << 7)
 
 /* DA7213_PLL_CTRL = 0x27 */
-#define DA7213_PLL_INDIV_5_10_MHZ				(0x0 << 2)
-#define DA7213_PLL_INDIV_10_20_MHZ				(0x1 << 2)
-#define DA7213_PLL_INDIV_20_40_MHZ				(0x2 << 2)
-#define DA7213_PLL_INDIV_40_54_MHZ				(0x3 << 2)
+#define DA7213_PLL_INDIV_5_TO_9_MHZ				(0x0 << 2)
+#define DA7213_PLL_INDIV_9_TO_18_MHZ				(0x1 << 2)
+#define DA7213_PLL_INDIV_18_TO_36_MHZ				(0x2 << 2)
+#define DA7213_PLL_INDIV_36_TO_54_MHZ				(0x3 << 2)
 #define DA7213_PLL_INDIV_MASK					(0x3 << 2)
 #define DA7213_PLL_MCLK_SQR_EN					(0x1 << 4)
 #define DA7213_PLL_32K_MODE					(0x1 << 5)
@@ -178,8 +181,6 @@
 #define DA7213_DAI_BCLKS_PER_WCLK_MASK				(0x3 << 0)
 #define DA7213_DAI_CLK_POL_INV					(0x1 << 2)
 #define DA7213_DAI_WCLK_POL_INV					(0x1 << 3)
-#define DA7213_DAI_CLK_EN_SLAVE_MODE				(0x0 << 7)
-#define DA7213_DAI_CLK_EN_MASTER_MODE				(0x1 << 7)
 #define DA7213_DAI_CLK_EN_MASK					(0x1 << 7)
 
 /* DA7213_DAI_CTRL = 0x29 */
@@ -412,6 +413,9 @@
 #define DA7213_DMIC_CLK_RATE_SHIFT				2
 #define DA7213_DMIC_CLK_RATE_MASK				(0x1 << 2)
 
+/* DA7213_PC_COUNT = 0x94 */
+#define DA7213_PC_FREERUN_MASK					(0x1 << 0)
+
 /* DA7213_DIG_CTRL = 0x99 */
 #define DA7213_DAC_L_INV_SHIFT					3
 #define DA7213_DAC_R_INV_SHIFT					7
@@ -495,15 +499,16 @@
 #define DA7213_ALC_AVG_ITERATIONS	5
 
 /* PLL related */
-#define DA7213_SYSCLK_MCLK		0
-#define DA7213_SYSCLK_PLL		1
-#define DA7213_PLL_FREQ_OUT_90316800	90316800
-#define DA7213_PLL_FREQ_OUT_98304000	98304000
-#define DA7213_PLL_FREQ_OUT_94310400	94310400
-#define DA7213_PLL_INDIV_5_10_MHZ_VAL	2
-#define DA7213_PLL_INDIV_10_20_MHZ_VAL	4
-#define DA7213_PLL_INDIV_20_40_MHZ_VAL	8
-#define DA7213_PLL_INDIV_40_54_MHZ_VAL	16
+#define DA7213_SYSCLK_MCLK			0
+#define DA7213_SYSCLK_PLL			1
+#define DA7213_PLL_FREQ_OUT_90316800		90316800
+#define DA7213_PLL_FREQ_OUT_98304000		98304000
+#define DA7213_PLL_FREQ_OUT_94310400		94310400
+#define DA7213_PLL_INDIV_5_TO_9_MHZ_VAL		2
+#define DA7213_PLL_INDIV_9_TO_18_MHZ_VAL	4
+#define DA7213_PLL_INDIV_18_TO_36_MHZ_VAL	8
+#define DA7213_PLL_INDIV_36_TO_54_MHZ_VAL	16
+#define DA7213_SRM_CHECK_RETRIES		8
 
 enum da7213_clk_src {
 	DA7213_CLKSRC_MCLK = 0,
diff --git a/sound/soc/codecs/da7218.c b/sound/soc/codecs/da7218.c
index 93575f2..99ce23e 100644
--- a/sound/soc/codecs/da7218.c
+++ b/sound/soc/codecs/da7218.c
@@ -1868,27 +1868,27 @@
 
 	/* Verify 32KHz, 2MHz - 54MHz MCLK provided, and set input divider */
 	if (da7218->mclk_rate == 32768) {
-		indiv_bits = DA7218_PLL_INDIV_2_5_MHZ;
-		indiv = DA7218_PLL_INDIV_2_10_MHZ_VAL;
+		indiv_bits = DA7218_PLL_INDIV_9_TO_18_MHZ;
+		indiv = DA7218_PLL_INDIV_9_TO_18_MHZ_VAL;
 	} else if (da7218->mclk_rate < 2000000) {
 		dev_err(codec->dev, "PLL input clock %d below valid range\n",
 			da7218->mclk_rate);
 		return -EINVAL;
-	} else if (da7218->mclk_rate <= 5000000) {
-		indiv_bits = DA7218_PLL_INDIV_2_5_MHZ;
-		indiv = DA7218_PLL_INDIV_2_10_MHZ_VAL;
-	} else if (da7218->mclk_rate <= 10000000) {
-		indiv_bits = DA7218_PLL_INDIV_5_10_MHZ;
-		indiv = DA7218_PLL_INDIV_2_10_MHZ_VAL;
-	} else if (da7218->mclk_rate <= 20000000) {
-		indiv_bits = DA7218_PLL_INDIV_10_20_MHZ;
-		indiv = DA7218_PLL_INDIV_10_20_MHZ_VAL;
-	} else if (da7218->mclk_rate <= 40000000) {
-		indiv_bits = DA7218_PLL_INDIV_20_40_MHZ;
-		indiv = DA7218_PLL_INDIV_20_40_MHZ_VAL;
+	} else if (da7218->mclk_rate <= 4500000) {
+		indiv_bits = DA7218_PLL_INDIV_2_TO_4_5_MHZ;
+		indiv = DA7218_PLL_INDIV_2_TO_4_5_MHZ_VAL;
+	} else if (da7218->mclk_rate <= 9000000) {
+		indiv_bits = DA7218_PLL_INDIV_4_5_TO_9_MHZ;
+		indiv = DA7218_PLL_INDIV_4_5_TO_9_MHZ_VAL;
+	} else if (da7218->mclk_rate <= 18000000) {
+		indiv_bits = DA7218_PLL_INDIV_9_TO_18_MHZ;
+		indiv = DA7218_PLL_INDIV_9_TO_18_MHZ_VAL;
+	} else if (da7218->mclk_rate <= 36000000) {
+		indiv_bits = DA7218_PLL_INDIV_18_TO_36_MHZ;
+		indiv = DA7218_PLL_INDIV_18_TO_36_MHZ_VAL;
 	} else if (da7218->mclk_rate <= 54000000) {
-		indiv_bits = DA7218_PLL_INDIV_40_54_MHZ;
-		indiv = DA7218_PLL_INDIV_40_54_MHZ_VAL;
+		indiv_bits = DA7218_PLL_INDIV_36_TO_54_MHZ;
+		indiv = DA7218_PLL_INDIV_36_TO_54_MHZ_VAL;
 	} else {
 		dev_err(codec->dev, "PLL input clock %d above valid range\n",
 			da7218->mclk_rate);
diff --git a/sound/soc/codecs/da7218.h b/sound/soc/codecs/da7218.h
index c2c5904..477cd37 100644
--- a/sound/soc/codecs/da7218.h
+++ b/sound/soc/codecs/da7218.h
@@ -876,15 +876,11 @@
 /* DA7218_PLL_CTRL = 0x91 */
 #define DA7218_PLL_INDIV_SHIFT		0
 #define DA7218_PLL_INDIV_MASK		(0x7 << 0)
-#define DA7218_PLL_INDIV_2_5_MHZ	(0x0 << 0)
-#define DA7218_PLL_INDIV_5_10_MHZ	(0x1 << 0)
-#define DA7218_PLL_INDIV_10_20_MHZ	(0x2 << 0)
-#define DA7218_PLL_INDIV_20_40_MHZ	(0x3 << 0)
-#define DA7218_PLL_INDIV_40_54_MHZ	(0x4 << 0)
-#define DA7218_PLL_INDIV_2_10_MHZ_VAL	2
-#define DA7218_PLL_INDIV_10_20_MHZ_VAL	4
-#define DA7218_PLL_INDIV_20_40_MHZ_VAL	8
-#define DA7218_PLL_INDIV_40_54_MHZ_VAL	16
+#define DA7218_PLL_INDIV_2_TO_4_5_MHZ	(0x0 << 0)
+#define DA7218_PLL_INDIV_4_5_TO_9_MHZ	(0x1 << 0)
+#define DA7218_PLL_INDIV_9_TO_18_MHZ	(0x2 << 0)
+#define DA7218_PLL_INDIV_18_TO_36_MHZ	(0x3 << 0)
+#define DA7218_PLL_INDIV_36_TO_54_MHZ	(0x4 << 0)
 #define DA7218_PLL_MCLK_SQR_EN_SHIFT	4
 #define DA7218_PLL_MCLK_SQR_EN_MASK	(0x1 << 4)
 #define DA7218_PLL_MODE_SHIFT		6
@@ -1336,6 +1332,13 @@
 #define DA7218_PLL_FREQ_OUT_90316	90316800
 #define DA7218_PLL_FREQ_OUT_98304	98304000
 
+/* PLL Frequency Dividers */
+#define DA7218_PLL_INDIV_2_TO_4_5_MHZ_VAL	1
+#define DA7218_PLL_INDIV_4_5_TO_9_MHZ_VAL	2
+#define DA7218_PLL_INDIV_9_TO_18_MHZ_VAL	4
+#define DA7218_PLL_INDIV_18_TO_36_MHZ_VAL	8
+#define DA7218_PLL_INDIV_36_TO_54_MHZ_VAL	16
+
 /* ALC Calibration */
 #define DA7218_ALC_CALIB_DELAY_MIN	2500
 #define DA7218_ALC_CALIB_DELAY_MAX	5000
diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c
index 81c0708..5c93899 100644
--- a/sound/soc/codecs/da7219.c
+++ b/sound/soc/codecs/da7219.c
@@ -11,6 +11,7 @@
  * option) any later version.
  */
 
+#include <linux/acpi.h>
 #include <linux/clk.h>
 #include <linux/i2c.h>
 #include <linux/of_device.h>
@@ -1025,7 +1026,7 @@
 	if ((da7219->clk_src == clk_id) && (da7219->mclk_rate == freq))
 		return 0;
 
-	if (((freq < 2000000) && (freq != 32768)) || (freq > 54000000)) {
+	if ((freq < 2000000) || (freq > 54000000)) {
 		dev_err(codec_dai->dev, "Unsupported MCLK value %d\n",
 			freq);
 		return -EINVAL;
@@ -1079,21 +1080,21 @@
 		dev_err(codec->dev, "PLL input clock %d below valid range\n",
 			da7219->mclk_rate);
 		return -EINVAL;
-	} else if (da7219->mclk_rate <= 5000000) {
-		indiv_bits = DA7219_PLL_INDIV_2_5_MHZ;
-		indiv = DA7219_PLL_INDIV_2_5_MHZ_VAL;
-	} else if (da7219->mclk_rate <= 10000000) {
-		indiv_bits = DA7219_PLL_INDIV_5_10_MHZ;
-		indiv = DA7219_PLL_INDIV_5_10_MHZ_VAL;
-	} else if (da7219->mclk_rate <= 20000000) {
-		indiv_bits = DA7219_PLL_INDIV_10_20_MHZ;
-		indiv = DA7219_PLL_INDIV_10_20_MHZ_VAL;
-	} else if (da7219->mclk_rate <= 40000000) {
-		indiv_bits = DA7219_PLL_INDIV_20_40_MHZ;
-		indiv = DA7219_PLL_INDIV_20_40_MHZ_VAL;
+	} else if (da7219->mclk_rate <= 4500000) {
+		indiv_bits = DA7219_PLL_INDIV_2_TO_4_5_MHZ;
+		indiv = DA7219_PLL_INDIV_2_TO_4_5_MHZ_VAL;
+	} else if (da7219->mclk_rate <= 9000000) {
+		indiv_bits = DA7219_PLL_INDIV_4_5_TO_9_MHZ;
+		indiv = DA7219_PLL_INDIV_4_5_TO_9_MHZ_VAL;
+	} else if (da7219->mclk_rate <= 18000000) {
+		indiv_bits = DA7219_PLL_INDIV_9_TO_18_MHZ;
+		indiv = DA7219_PLL_INDIV_9_TO_18_MHZ_VAL;
+	} else if (da7219->mclk_rate <= 36000000) {
+		indiv_bits = DA7219_PLL_INDIV_18_TO_36_MHZ;
+		indiv = DA7219_PLL_INDIV_18_TO_36_MHZ_VAL;
 	} else if (da7219->mclk_rate <= 54000000) {
-		indiv_bits = DA7219_PLL_INDIV_40_54_MHZ;
-		indiv = DA7219_PLL_INDIV_40_54_MHZ_VAL;
+		indiv_bits = DA7219_PLL_INDIV_36_TO_54_MHZ;
+		indiv = DA7219_PLL_INDIV_36_TO_54_MHZ_VAL;
 	} else {
 		dev_err(codec->dev, "PLL input clock %d above valid range\n",
 			da7219->mclk_rate);
@@ -1426,6 +1427,12 @@
 };
 MODULE_DEVICE_TABLE(of, da7219_of_match);
 
+static const struct acpi_device_id da7219_acpi_match[] = {
+	{ .id = "DLGS7219", },
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, da7219_acpi_match);
+
 static enum da7219_micbias_voltage
 	da7219_of_micbias_lvl(struct snd_soc_codec *codec, u32 val)
 {
@@ -1955,6 +1962,7 @@
 	.driver = {
 		.name = "da7219",
 		.of_match_table = of_match_ptr(da7219_of_match),
+		.acpi_match_table = ACPI_PTR(da7219_acpi_match),
 	},
 	.probe		= da7219_i2c_probe,
 	.remove		= da7219_i2c_remove,
diff --git a/sound/soc/codecs/da7219.h b/sound/soc/codecs/da7219.h
index 5a787e7..ff2a2f0 100644
--- a/sound/soc/codecs/da7219.h
+++ b/sound/soc/codecs/da7219.h
@@ -194,11 +194,11 @@
 /* DA7219_PLL_CTRL = 0x20 */
 #define DA7219_PLL_INDIV_SHIFT		2
 #define DA7219_PLL_INDIV_MASK		(0x7 << 2)
-#define DA7219_PLL_INDIV_2_5_MHZ	(0x0 << 2)
-#define DA7219_PLL_INDIV_5_10_MHZ	(0x1 << 2)
-#define DA7219_PLL_INDIV_10_20_MHZ	(0x2 << 2)
-#define DA7219_PLL_INDIV_20_40_MHZ	(0x3 << 2)
-#define DA7219_PLL_INDIV_40_54_MHZ	(0x4 << 2)
+#define DA7219_PLL_INDIV_2_TO_4_5_MHZ	(0x0 << 2)
+#define DA7219_PLL_INDIV_4_5_TO_9_MHZ	(0x1 << 2)
+#define DA7219_PLL_INDIV_9_TO_18_MHZ	(0x2 << 2)
+#define DA7219_PLL_INDIV_18_TO_36_MHZ	(0x3 << 2)
+#define DA7219_PLL_INDIV_36_TO_54_MHZ	(0x4 << 2)
 #define DA7219_PLL_MCLK_SQR_EN_SHIFT	5
 #define DA7219_PLL_MCLK_SQR_EN_MASK	(0x1 << 5)
 #define DA7219_PLL_MODE_SHIFT		6
@@ -761,11 +761,11 @@
 #define DA7219_PLL_FREQ_OUT_98304	98304000
 
 /* PLL Frequency Dividers */
-#define DA7219_PLL_INDIV_2_5_MHZ_VAL	1
-#define DA7219_PLL_INDIV_5_10_MHZ_VAL	2
-#define DA7219_PLL_INDIV_10_20_MHZ_VAL	4
-#define DA7219_PLL_INDIV_20_40_MHZ_VAL	8
-#define DA7219_PLL_INDIV_40_54_MHZ_VAL	16
+#define DA7219_PLL_INDIV_2_TO_4_5_MHZ_VAL	1
+#define DA7219_PLL_INDIV_4_5_TO_9_MHZ_VAL	2
+#define DA7219_PLL_INDIV_9_TO_18_MHZ_VAL	4
+#define DA7219_PLL_INDIV_18_TO_36_MHZ_VAL	8
+#define DA7219_PLL_INDIV_36_TO_54_MHZ_VAL	16
 
 /* SRM */
 #define DA7219_SRM_CHECK_RETRIES	8
diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c
index afa6c5d..2086d71 100644
--- a/sound/soc/codecs/es8328.c
+++ b/sound/soc/codecs/es8328.c
@@ -26,18 +26,30 @@
 #include <sound/tlv.h>
 #include "es8328.h"
 
-#define ES8328_SYSCLK_RATE_1X 11289600
-#define ES8328_SYSCLK_RATE_2X 22579200
+static const unsigned int rates_12288[] = {
+	8000, 12000, 16000, 24000, 32000, 48000, 96000,
+};
 
-/* Run the codec at 22.5792 or 11.2896 MHz to support these rates */
-static struct {
-	int rate;
-	u8 ratio;
-} mclk_ratios[] = {
-	{ 8000, 9 },
-	{11025, 7 },
-	{22050, 4 },
-	{44100, 2 },
+static const int ratios_12288[] = {
+	10, 7, 6, 4, 3, 2, 0,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_12288 = {
+	.count	= ARRAY_SIZE(rates_12288),
+	.list	= rates_12288,
+};
+
+static const unsigned int rates_11289[] = {
+	8018, 11025, 22050, 44100, 88200,
+};
+
+static const int ratios_11289[] = {
+	9, 7, 4, 2, 0,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_11289 = {
+	.count	= ARRAY_SIZE(rates_11289),
+	.list	= rates_11289,
 };
 
 /* regulator supplies for sgtl5000, VDDD is an optional external supply */
@@ -57,16 +69,28 @@
 	"HPVDD",
 };
 
-#define ES8328_RATES (SNDRV_PCM_RATE_44100 | \
+#define ES8328_RATES (SNDRV_PCM_RATE_96000 | \
+		SNDRV_PCM_RATE_48000 | \
+		SNDRV_PCM_RATE_44100 | \
+		SNDRV_PCM_RATE_32000 | \
 		SNDRV_PCM_RATE_22050 | \
-		SNDRV_PCM_RATE_11025)
-#define ES8328_FORMATS (SNDRV_PCM_FMTBIT_S16_LE)
+		SNDRV_PCM_RATE_16000 | \
+		SNDRV_PCM_RATE_11025 | \
+		SNDRV_PCM_RATE_8000)
+#define ES8328_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+		SNDRV_PCM_FMTBIT_S18_3LE | \
+		SNDRV_PCM_FMTBIT_S20_3LE | \
+		SNDRV_PCM_FMTBIT_S24_LE | \
+		SNDRV_PCM_FMTBIT_S32_LE)
 
 struct es8328_priv {
 	struct regmap *regmap;
 	struct clk *clk;
 	int playback_fs;
 	bool deemph;
+	int mclkdiv2;
+	const struct snd_pcm_hw_constraint_list *sysclk_constraints;
+	const int *mclk_ratios;
 	struct regulator_bulk_data supplies[ES8328_SUPPLY_NUM];
 };
 
@@ -439,54 +463,131 @@
 			mute ? ES8328_DACCONTROL3_DACMUTE : 0);
 }
 
+static int es8328_startup(struct snd_pcm_substream *substream,
+			  struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec);
+
+	if (es8328->sysclk_constraints)
+		snd_pcm_hw_constraint_list(substream->runtime, 0,
+				SNDRV_PCM_HW_PARAM_RATE,
+				es8328->sysclk_constraints);
+
+	return 0;
+}
+
 static int es8328_hw_params(struct snd_pcm_substream *substream,
 	struct snd_pcm_hw_params *params,
 	struct snd_soc_dai *dai)
 {
 	struct snd_soc_codec *codec = dai->codec;
 	struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec);
-	int clk_rate;
 	int i;
 	int reg;
-	u8 ratio;
+	int wl;
+	int ratio;
+
+	if (!es8328->sysclk_constraints) {
+		dev_err(codec->dev, "No MCLK configured\n");
+		return -EINVAL;
+	}
 
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
 		reg = ES8328_DACCONTROL2;
 	else
 		reg = ES8328_ADCCONTROL5;
 
-	clk_rate = clk_get_rate(es8328->clk);
+	for (i = 0; i < es8328->sysclk_constraints->count; i++)
+		if (es8328->sysclk_constraints->list[i] == params_rate(params))
+			break;
 
-	if ((clk_rate != ES8328_SYSCLK_RATE_1X) &&
-		(clk_rate != ES8328_SYSCLK_RATE_2X)) {
-		dev_err(codec->dev,
-			"%s: clock is running at %d Hz, not %d or %d Hz\n",
-			 __func__, clk_rate,
-			 ES8328_SYSCLK_RATE_1X, ES8328_SYSCLK_RATE_2X);
+	if (i == es8328->sysclk_constraints->count) {
+		dev_err(codec->dev, "LRCLK %d unsupported with current clock\n",
+			params_rate(params));
 		return -EINVAL;
 	}
 
-	/* find master mode MCLK to sampling frequency ratio */
-	ratio = mclk_ratios[0].rate;
-	for (i = 1; i < ARRAY_SIZE(mclk_ratios); i++)
-		if (params_rate(params) <= mclk_ratios[i].rate)
-			ratio = mclk_ratios[i].ratio;
+	ratio = es8328->mclk_ratios[i];
+	snd_soc_update_bits(codec, ES8328_MASTERMODE,
+			ES8328_MASTERMODE_MCLKDIV2,
+			es8328->mclkdiv2 ? ES8328_MASTERMODE_MCLKDIV2 : 0);
 
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-		es8328->playback_fs = params_rate(params);
-		es8328_set_deemph(codec);
+	switch (params_width(params)) {
+	case 16:
+		wl = 3;
+		break;
+	case 18:
+		wl = 2;
+		break;
+	case 20:
+		wl = 1;
+		break;
+	case 24:
+		wl = 0;
+		break;
+	case 32:
+		wl = 4;
+		break;
+	default:
+		return -EINVAL;
 	}
 
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		snd_soc_update_bits(codec, ES8328_DACCONTROL1,
+				ES8328_DACCONTROL1_DACWL_MASK,
+				wl << ES8328_DACCONTROL1_DACWL_SHIFT);
+
+		es8328->playback_fs = params_rate(params);
+		es8328_set_deemph(codec);
+	} else
+		snd_soc_update_bits(codec, ES8328_ADCCONTROL4,
+				ES8328_ADCCONTROL4_ADCWL_MASK,
+				wl << ES8328_ADCCONTROL4_ADCWL_SHIFT);
+
 	return snd_soc_update_bits(codec, reg, ES8328_RATEMASK, ratio);
 }
 
+static int es8328_set_sysclk(struct snd_soc_dai *codec_dai,
+		int clk_id, unsigned int freq, int dir)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec);
+	int mclkdiv2 = 0;
+
+	switch (freq) {
+	case 0:
+		es8328->sysclk_constraints = NULL;
+		es8328->mclk_ratios = NULL;
+		break;
+	case 22579200:
+		mclkdiv2 = 1;
+		/* fallthru */
+	case 11289600:
+		es8328->sysclk_constraints = &constraints_11289;
+		es8328->mclk_ratios = ratios_11289;
+		break;
+	case 24576000:
+		mclkdiv2 = 1;
+		/* fallthru */
+	case 12288000:
+		es8328->sysclk_constraints = &constraints_12288;
+		es8328->mclk_ratios = ratios_12288;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	es8328->mclkdiv2 = mclkdiv2;
+	return 0;
+}
+
 static int es8328_set_dai_fmt(struct snd_soc_dai *codec_dai,
 		unsigned int fmt)
 {
 	struct snd_soc_codec *codec = codec_dai->codec;
-	struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec);
-	int clk_rate;
-	u8 mode = ES8328_DACCONTROL1_DACWL_16;
+	u8 dac_mode = 0;
+	u8 adc_mode = 0;
 
 	/* set master/slave audio interface */
 	if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBM_CFM)
@@ -495,13 +596,16 @@
 	/* interface format */
 	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
 	case SND_SOC_DAIFMT_I2S:
-		mode |= ES8328_DACCONTROL1_DACFORMAT_I2S;
+		dac_mode |= ES8328_DACCONTROL1_DACFORMAT_I2S;
+		adc_mode |= ES8328_ADCCONTROL4_ADCFORMAT_I2S;
 		break;
 	case SND_SOC_DAIFMT_RIGHT_J:
-		mode |= ES8328_DACCONTROL1_DACFORMAT_RJUST;
+		dac_mode |= ES8328_DACCONTROL1_DACFORMAT_RJUST;
+		adc_mode |= ES8328_ADCCONTROL4_ADCFORMAT_RJUST;
 		break;
 	case SND_SOC_DAIFMT_LEFT_J:
-		mode |= ES8328_DACCONTROL1_DACFORMAT_LJUST;
+		dac_mode |= ES8328_DACCONTROL1_DACFORMAT_LJUST;
+		adc_mode |= ES8328_ADCCONTROL4_ADCFORMAT_LJUST;
 		break;
 	default:
 		return -EINVAL;
@@ -511,18 +615,14 @@
 	if ((fmt & SND_SOC_DAIFMT_INV_MASK) != SND_SOC_DAIFMT_NB_NF)
 		return -EINVAL;
 
-	snd_soc_write(codec, ES8328_DACCONTROL1, mode);
-	snd_soc_write(codec, ES8328_ADCCONTROL4, mode);
+	snd_soc_update_bits(codec, ES8328_DACCONTROL1,
+			ES8328_DACCONTROL1_DACFORMAT_MASK, dac_mode);
+	snd_soc_update_bits(codec, ES8328_ADCCONTROL4,
+			ES8328_ADCCONTROL4_ADCFORMAT_MASK, adc_mode);
 
 	/* Master serial port mode, with BCLK generated automatically */
-	clk_rate = clk_get_rate(es8328->clk);
-	if (clk_rate == ES8328_SYSCLK_RATE_1X)
-		snd_soc_write(codec, ES8328_MASTERMODE,
-				ES8328_MASTERMODE_MSC);
-	else
-		snd_soc_write(codec, ES8328_MASTERMODE,
-				ES8328_MASTERMODE_MCLKDIV2 |
-				ES8328_MASTERMODE_MSC);
+	snd_soc_update_bits(codec, ES8328_MASTERMODE,
+			ES8328_MASTERMODE_MSC, ES8328_MASTERMODE_MSC);
 
 	return 0;
 }
@@ -579,8 +679,10 @@
 }
 
 static const struct snd_soc_dai_ops es8328_dai_ops = {
+	.startup	= es8328_startup,
 	.hw_params	= es8328_hw_params,
 	.digital_mute	= es8328_mute,
+	.set_sysclk	= es8328_set_sysclk,
 	.set_fmt	= es8328_set_dai_fmt,
 };
 
@@ -601,6 +703,7 @@
 		.formats = ES8328_FORMATS,
 	},
 	.ops = &es8328_dai_ops,
+	.symmetric_rates = 1,
 };
 
 static int es8328_suspend(struct snd_soc_codec *codec)
@@ -708,6 +811,7 @@
 	.val_bits	= 8,
 	.max_register	= ES8328_REG_MAX,
 	.cache_type	= REGCACHE_RBTREE,
+	.use_single_rw	= true,
 };
 EXPORT_SYMBOL_GPL(es8328_regmap_config);
 
diff --git a/sound/soc/codecs/es8328.h b/sound/soc/codecs/es8328.h
index 156c748..1a736e7 100644
--- a/sound/soc/codecs/es8328.h
+++ b/sound/soc/codecs/es8328.h
@@ -22,7 +22,7 @@
 #define ES8328_CONTROL1_VMIDSEL_50k (1 << 0)
 #define ES8328_CONTROL1_VMIDSEL_500k (2 << 0)
 #define ES8328_CONTROL1_VMIDSEL_5k (3 << 0)
-#define ES8328_CONTROL1_VMIDSEL_MASK (7 << 0)
+#define ES8328_CONTROL1_VMIDSEL_MASK (3 << 0)
 #define ES8328_CONTROL1_ENREF (1 << 2)
 #define ES8328_CONTROL1_SEQEN (1 << 3)
 #define ES8328_CONTROL1_SAMEFS (1 << 4)
@@ -84,7 +84,20 @@
 #define ES8328_ADCCONTROL1	0x09
 #define ES8328_ADCCONTROL2	0x0a
 #define ES8328_ADCCONTROL3	0x0b
+
 #define ES8328_ADCCONTROL4	0x0c
+#define ES8328_ADCCONTROL4_ADCFORMAT_MASK (3 << 0)
+#define ES8328_ADCCONTROL4_ADCFORMAT_I2S (0 << 0)
+#define ES8328_ADCCONTROL4_ADCFORMAT_LJUST (1 << 0)
+#define ES8328_ADCCONTROL4_ADCFORMAT_RJUST (2 << 0)
+#define ES8328_ADCCONTROL4_ADCFORMAT_PCM (3 << 0)
+#define ES8328_ADCCONTROL4_ADCWL_SHIFT 2
+#define ES8328_ADCCONTROL4_ADCWL_MASK (7 << 2)
+#define ES8328_ADCCONTROL4_ADCLRP_I2S_POL_NORMAL (0 << 5)
+#define ES8328_ADCCONTROL4_ADCLRP_I2S_POL_INV (1 << 5)
+#define ES8328_ADCCONTROL4_ADCLRP_PCM_MSB_CLK2 (0 << 5)
+#define ES8328_ADCCONTROL4_ADCLRP_PCM_MSB_CLK1 (1 << 5)
+
 #define ES8328_ADCCONTROL5	0x0d
 #define ES8328_ADCCONTROL5_RATEMASK (0x1f << 0)
 
@@ -109,15 +122,13 @@
 #define ES8328_ADCCONTROL14	0x16
 
 #define ES8328_DACCONTROL1	0x17
+#define ES8328_DACCONTROL1_DACFORMAT_MASK (3 << 1)
 #define ES8328_DACCONTROL1_DACFORMAT_I2S (0 << 1)
 #define ES8328_DACCONTROL1_DACFORMAT_LJUST (1 << 1)
 #define ES8328_DACCONTROL1_DACFORMAT_RJUST (2 << 1)
 #define ES8328_DACCONTROL1_DACFORMAT_PCM (3 << 1)
-#define ES8328_DACCONTROL1_DACWL_24 (0 << 3)
-#define ES8328_DACCONTROL1_DACWL_20 (1 << 3)
-#define ES8328_DACCONTROL1_DACWL_18 (2 << 3)
-#define ES8328_DACCONTROL1_DACWL_16 (3 << 3)
-#define ES8328_DACCONTROL1_DACWL_32 (4 << 3)
+#define ES8328_DACCONTROL1_DACWL_SHIFT 3
+#define ES8328_DACCONTROL1_DACWL_MASK (7 << 3)
 #define ES8328_DACCONTROL1_DACLRP_I2S_POL_NORMAL (0 << 6)
 #define ES8328_DACCONTROL1_DACLRP_I2S_POL_INV (1 << 6)
 #define ES8328_DACCONTROL1_DACLRP_PCM_MSB_CLK2 (0 << 6)
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index 26f9459..181cd3b 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -29,6 +29,7 @@
 #include <sound/hdaudio_ext.h>
 #include <sound/hda_i915.h>
 #include <sound/pcm_drm_eld.h>
+#include <sound/hda_chmap.h>
 #include "../../hda/local.h"
 #include "hdac_hdmi.h"
 
@@ -60,11 +61,17 @@
 	struct hdac_hdmi_cvt_params params;
 };
 
+/* Currently only spk_alloc, more to be added */
+struct hdac_hdmi_parsed_eld {
+	u8 spk_alloc;
+};
+
 struct hdac_hdmi_eld {
 	bool	monitor_present;
 	bool	eld_valid;
 	int	eld_size;
 	char    eld_buffer[ELD_MAX_SIZE];
+	struct	hdac_hdmi_parsed_eld info;
 };
 
 struct hdac_hdmi_pin {
@@ -76,6 +83,10 @@
 	struct hdac_ext_device *edev;
 	int repoll_count;
 	struct delayed_work work;
+	struct mutex lock;
+	bool chmap_set;
+	unsigned char chmap[8]; /* ALSA API channel-map */
+	int channels; /* current number of channels */
 };
 
 struct hdac_hdmi_pcm {
@@ -100,8 +111,22 @@
 	int num_pin;
 	int num_cvt;
 	struct mutex pin_mutex;
+	struct hdac_chmap chmap;
 };
 
+static struct hdac_hdmi_pcm *get_hdmi_pcm_from_id(struct hdac_hdmi_priv *hdmi,
+						int pcm_idx)
+{
+	struct hdac_hdmi_pcm *pcm;
+
+	list_for_each_entry(pcm, &hdmi->pcm_list, head) {
+		if (pcm->pcm_id == pcm_idx)
+			return pcm;
+	}
+
+	return NULL;
+}
+
 static inline struct hdac_ext_device *to_hda_ext_device(struct device *dev)
 {
 	struct hdac_device *hdac = dev_to_hdac_dev(dev);
@@ -278,26 +303,31 @@
 	int i;
 	const u8 *eld_buf;
 	u8 conn_type;
-	int channels = 2;
+	int channels, ca;
 
 	list_for_each_entry(pin, &hdmi->pin_list, head) {
 		if (pin->nid == pin_nid)
 			break;
 	}
 
+	ca = snd_hdac_channel_allocation(&hdac->hdac, pin->eld.info.spk_alloc,
+			pin->channels, pin->chmap_set, true, pin->chmap);
+
+	channels = snd_hdac_get_active_channels(ca);
+	hdmi->chmap.ops.set_channel_count(&hdac->hdac, cvt_nid, channels);
+
+	snd_hdac_setup_channel_mapping(&hdmi->chmap, pin->nid, false, ca,
+				pin->channels, pin->chmap, pin->chmap_set);
+
 	eld_buf = pin->eld.eld_buffer;
 	conn_type = drm_eld_get_conn_type(eld_buf);
 
-	/* setup channel count */
-	snd_hdac_codec_write(&hdac->hdac, cvt_nid, 0,
-			    AC_VERB_SET_CVT_CHAN_COUNT, channels - 1);
-
 	switch (conn_type) {
 	case DRM_ELD_CONN_TYPE_HDMI:
 		hdmi_audio_infoframe_init(&frame);
 
-		/* Default stereo for now */
 		frame.channels = channels;
+		frame.channel_allocation = ca;
 
 		ret = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
 		if (ret < 0)
@@ -311,7 +341,7 @@
 		dp_ai.len	= 0x1b;
 		dp_ai.ver	= 0x11 << 2;
 		dp_ai.CC02_CT47	= channels - 1;
-		dp_ai.CA	= 0;
+		dp_ai.CA	= ca;
 
 		dip = (u8 *)&dp_ai;
 		break;
@@ -370,17 +400,23 @@
 	struct hdac_ext_device *hdac = snd_soc_dai_get_drvdata(dai);
 	struct hdac_hdmi_priv *hdmi = hdac->private_data;
 	struct hdac_hdmi_dai_pin_map *dai_map;
+	struct hdac_hdmi_pin *pin;
 	struct hdac_ext_dma_params *dd;
 	int ret;
 
 	dai_map = &hdmi->dai_map[dai->id];
+	pin = dai_map->pin;
 
 	dd = (struct hdac_ext_dma_params *)snd_soc_dai_get_dma_data(dai, substream);
 	dev_dbg(&hdac->hdac.dev, "stream tag from cpu dai %d format in cvt 0x%x\n",
 			dd->stream_tag,	dd->format);
 
+	mutex_lock(&pin->lock);
+	pin->channels = substream->runtime->channels;
+
 	ret = hdac_hdmi_setup_audio_infoframe(hdac, dai_map->cvt->nid,
 						dai_map->pin->nid);
+	mutex_unlock(&pin->lock);
 	if (ret < 0)
 		return ret;
 
@@ -640,6 +676,12 @@
 		snd_hdac_codec_write(&hdac->hdac, dai_map->pin->nid, 0,
 			AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
 
+		mutex_lock(&dai_map->pin->lock);
+		dai_map->pin->chmap_set = false;
+		memset(dai_map->pin->chmap, 0, sizeof(dai_map->pin->chmap));
+		dai_map->pin->channels = 0;
+		mutex_unlock(&dai_map->pin->lock);
+
 		dai_map->pin = NULL;
 	}
 }
@@ -647,10 +689,19 @@
 static int
 hdac_hdmi_query_cvt_params(struct hdac_device *hdac, struct hdac_hdmi_cvt *cvt)
 {
+	unsigned int chans;
+	struct hdac_ext_device *edev = to_ehdac_device(hdac);
+	struct hdac_hdmi_priv *hdmi = edev->private_data;
 	int err;
 
-	/* Only stereo supported as of now */
-	cvt->params.channels_min = cvt->params.channels_max = 2;
+	chans = get_wcaps(hdac, cvt->nid);
+	chans = get_wcaps_channels(chans);
+
+	cvt->params.channels_min = 2;
+
+	cvt->params.channels_max = chans;
+	if (chans > hdmi->chmap.channels_max)
+		hdmi->chmap.channels_max = chans;
 
 	err = snd_hdac_query_supported_pcm(hdac, cvt->nid,
 			&cvt->params.rates,
@@ -1008,6 +1059,12 @@
 	return hdac_hdmi_query_cvt_params(&edev->hdac, cvt);
 }
 
+static void hdac_hdmi_parse_eld(struct hdac_ext_device *edev,
+			struct hdac_hdmi_pin *pin)
+{
+	pin->eld.info.spk_alloc = pin->eld.eld_buffer[DRM_ELD_SPEAKER];
+}
+
 static void hdac_hdmi_present_sense(struct hdac_hdmi_pin *pin, int repoll)
 {
 	struct hdac_ext_device *edev = pin->edev;
@@ -1065,6 +1122,7 @@
 
 				snd_jack_report(pcm->jack, SND_JACK_AVOUT);
 			}
+			hdac_hdmi_parse_eld(edev, pin);
 
 			print_hex_dump_bytes("ELD: ", DUMP_PREFIX_OFFSET,
 					pin->eld.eld_buffer, pin->eld.eld_size);
@@ -1123,6 +1181,7 @@
 	hdmi->num_pin++;
 
 	pin->edev = edev;
+	mutex_init(&pin->lock);
 	INIT_DELAYED_WORK(&pin->work, hdac_hdmi_repoll_eld);
 
 	return 0;
@@ -1342,6 +1401,19 @@
 	.pin_eld_notify	= hdac_hdmi_eld_notify_cb,
 };
 
+static struct snd_pcm *hdac_hdmi_get_pcm_from_id(struct snd_soc_card *card,
+						int device)
+{
+	struct snd_soc_pcm_runtime *rtd;
+
+	list_for_each_entry(rtd, &card->rtd_list, list) {
+		if (rtd->pcm && (rtd->pcm->device == device))
+			return rtd->pcm;
+	}
+
+	return NULL;
+}
+
 int hdac_hdmi_jack_init(struct snd_soc_dai *dai, int device)
 {
 	char jack_name[NAME_SIZE];
@@ -1351,6 +1423,8 @@
 		snd_soc_component_get_dapm(&codec->component);
 	struct hdac_hdmi_priv *hdmi = edev->private_data;
 	struct hdac_hdmi_pcm *pcm;
+	struct snd_pcm *snd_pcm;
+	int err;
 
 	/*
 	 * this is a new PCM device, create new pcm and
@@ -1362,6 +1436,18 @@
 	pcm->pcm_id = device;
 	pcm->cvt = hdmi->dai_map[dai->id].cvt;
 
+	snd_pcm = hdac_hdmi_get_pcm_from_id(dai->component->card, device);
+	if (snd_pcm) {
+		err = snd_hdac_add_chmap_ctls(snd_pcm, device, &hdmi->chmap);
+		if (err < 0) {
+			dev_err(&edev->hdac.dev,
+				"chmap control add failed with err: %d for pcm: %d\n",
+				err, device);
+			kfree(pcm);
+			return err;
+		}
+	}
+
 	list_add_tail(&pcm->head, &hdmi->pcm_list);
 
 	sprintf(jack_name, "HDMI/DP, pcm=%d Jack", device);
@@ -1378,10 +1464,18 @@
 	struct snd_soc_dapm_context *dapm =
 		snd_soc_component_get_dapm(&codec->component);
 	struct hdac_hdmi_pin *pin;
+	struct hdac_ext_link *hlink = NULL;
 	int ret;
 
 	edev->scodec = codec;
 
+	/*
+	 * hold the ref while we probe, also no need to drop the ref on
+	 * exit, we call pm_runtime_suspend() so that will do for us
+	 */
+	hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev));
+	snd_hdac_ext_bus_link_get(edev->ebus, hlink);
+
 	ret = create_fill_widget_route_map(dapm);
 	if (ret < 0)
 		return ret;
@@ -1420,33 +1514,40 @@
 }
 
 #ifdef CONFIG_PM
-static int hdmi_codec_resume(struct snd_soc_codec *codec)
+static int hdmi_codec_prepare(struct device *dev)
 {
-	struct hdac_ext_device *edev = snd_soc_codec_get_drvdata(codec);
+	struct hdac_ext_device *edev = to_hda_ext_device(dev);
+	struct hdac_device *hdac = &edev->hdac;
+
+	pm_runtime_get_sync(&edev->hdac.dev);
+
+	/*
+	 * Power down afg.
+	 * codec_read is preferred over codec_write to set the power state.
+	 * This way verb is send to set the power state and response
+	 * is received. So setting power state is ensured without using loop
+	 * to read the state.
+	 */
+	snd_hdac_codec_read(hdac, hdac->afg, 0,	AC_VERB_SET_POWER_STATE,
+							AC_PWRST_D3);
+
+	return 0;
+}
+
+static void hdmi_codec_complete(struct device *dev)
+{
+	struct hdac_ext_device *edev = to_hda_ext_device(dev);
 	struct hdac_hdmi_priv *hdmi = edev->private_data;
 	struct hdac_hdmi_pin *pin;
 	struct hdac_device *hdac = &edev->hdac;
-	struct hdac_bus *bus = hdac->bus;
-	int err;
-	unsigned long timeout;
+
+	/* Power up afg */
+	snd_hdac_codec_read(hdac, hdac->afg, 0,	AC_VERB_SET_POWER_STATE,
+							AC_PWRST_D0);
 
 	hdac_hdmi_skl_enable_all_pins(&edev->hdac);
 	hdac_hdmi_skl_enable_dp12(&edev->hdac);
 
-	/* Power up afg */
-	if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0)) {
-
-		snd_hdac_codec_write(hdac, hdac->afg, 0,
-			AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
-
-		/* Wait till power state is set to D0 */
-		timeout = jiffies + msecs_to_jiffies(1000);
-		while (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0)
-				&& time_before(jiffies, timeout)) {
-			msleep(50);
-		}
-	}
-
 	/*
 	 * As the ELD notify callback request is not entertained while the
 	 * device is in suspend state. Need to manually check detection of
@@ -1455,44 +1556,96 @@
 	list_for_each_entry(pin, &hdmi->pin_list, head)
 		hdac_hdmi_present_sense(pin, 1);
 
-	/*
-	 * Codec power is turned ON during controller resume.
-	 * Turn it OFF here
-	 */
-	err = snd_hdac_display_power(bus, false);
-	if (err < 0) {
-		dev_err(bus->dev,
-			"Cannot turn OFF display power on i915, err: %d\n",
-			err);
-		return err;
-	}
-
-	return 0;
+	pm_runtime_put_sync(&edev->hdac.dev);
 }
 #else
-#define hdmi_codec_resume NULL
+#define hdmi_codec_prepare NULL
+#define hdmi_codec_complete NULL
 #endif
 
 static struct snd_soc_codec_driver hdmi_hda_codec = {
 	.probe		= hdmi_codec_probe,
 	.remove		= hdmi_codec_remove,
-	.resume		= hdmi_codec_resume,
 	.idle_bias_off	= true,
 };
 
+static void hdac_hdmi_get_chmap(struct hdac_device *hdac, int pcm_idx,
+					unsigned char *chmap)
+{
+	struct hdac_ext_device *edev = to_ehdac_device(hdac);
+	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_hdmi_pcm *pcm = get_hdmi_pcm_from_id(hdmi, pcm_idx);
+	struct hdac_hdmi_pin *pin = pcm->pin;
+
+	/* chmap is already set to 0 in caller */
+	if (!pin)
+		return;
+
+	memcpy(chmap, pin->chmap, ARRAY_SIZE(pin->chmap));
+}
+
+static void hdac_hdmi_set_chmap(struct hdac_device *hdac, int pcm_idx,
+				unsigned char *chmap, int prepared)
+{
+	struct hdac_ext_device *edev = to_ehdac_device(hdac);
+	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_hdmi_pcm *pcm = get_hdmi_pcm_from_id(hdmi, pcm_idx);
+	struct hdac_hdmi_pin *pin = pcm->pin;
+
+	mutex_lock(&pin->lock);
+	pin->chmap_set = true;
+	memcpy(pin->chmap, chmap, ARRAY_SIZE(pin->chmap));
+	if (prepared)
+		hdac_hdmi_setup_audio_infoframe(edev, pcm->cvt->nid, pin->nid);
+	mutex_unlock(&pin->lock);
+}
+
+static bool is_hdac_hdmi_pcm_attached(struct hdac_device *hdac, int pcm_idx)
+{
+	struct hdac_ext_device *edev = to_ehdac_device(hdac);
+	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_hdmi_pcm *pcm = get_hdmi_pcm_from_id(hdmi, pcm_idx);
+	struct hdac_hdmi_pin *pin = pcm->pin;
+
+	return pin ? true:false;
+}
+
+static int hdac_hdmi_get_spk_alloc(struct hdac_device *hdac, int pcm_idx)
+{
+	struct hdac_ext_device *edev = to_ehdac_device(hdac);
+	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_hdmi_pcm *pcm = get_hdmi_pcm_from_id(hdmi, pcm_idx);
+	struct hdac_hdmi_pin *pin = pcm->pin;
+
+	if (!pin || !pin->eld.eld_valid)
+		return 0;
+
+	return pin->eld.info.spk_alloc;
+}
+
 static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
 {
 	struct hdac_device *codec = &edev->hdac;
 	struct hdac_hdmi_priv *hdmi_priv;
 	struct snd_soc_dai_driver *hdmi_dais = NULL;
+	struct hdac_ext_link *hlink = NULL;
 	int num_dais = 0;
 	int ret = 0;
 
+	/* hold the ref while we probe */
+	hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev));
+	snd_hdac_ext_bus_link_get(edev->ebus, hlink);
+
 	hdmi_priv = devm_kzalloc(&codec->dev, sizeof(*hdmi_priv), GFP_KERNEL);
 	if (hdmi_priv == NULL)
 		return -ENOMEM;
 
 	edev->private_data = hdmi_priv;
+	snd_hdac_register_chmap_ops(codec, &hdmi_priv->chmap);
+	hdmi_priv->chmap.ops.get_chmap = hdac_hdmi_get_chmap;
+	hdmi_priv->chmap.ops.set_chmap = hdac_hdmi_set_chmap;
+	hdmi_priv->chmap.ops.is_pcm_attached = is_hdac_hdmi_pcm_attached;
+	hdmi_priv->chmap.ops.get_spk_alloc = hdac_hdmi_get_spk_alloc;
 
 	dev_set_drvdata(&codec->dev, edev);
 
@@ -1521,8 +1674,12 @@
 	}
 
 	/* ASoC specific initialization */
-	return snd_soc_register_codec(&codec->dev, &hdmi_hda_codec,
-			hdmi_dais, num_dais);
+	ret = snd_soc_register_codec(&codec->dev, &hdmi_hda_codec,
+					hdmi_dais, num_dais);
+
+	snd_hdac_ext_bus_link_put(edev->ebus, hlink);
+
+	return ret;
 }
 
 static int hdac_hdmi_dev_remove(struct hdac_ext_device *edev)
@@ -1561,7 +1718,8 @@
 	struct hdac_ext_device *edev = to_hda_ext_device(dev);
 	struct hdac_device *hdac = &edev->hdac;
 	struct hdac_bus *bus = hdac->bus;
-	unsigned long timeout;
+	struct hdac_ext_bus *ebus = hbus_to_ebus(bus);
+	struct hdac_ext_link *hlink = NULL;
 	int err;
 
 	dev_dbg(dev, "Enter: %s\n", __func__);
@@ -1570,26 +1728,24 @@
 	if (!bus)
 		return 0;
 
-	/* Power down afg */
-	if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D3)) {
-		snd_hdac_codec_write(hdac, hdac->afg, 0,
-			AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
-
-		/* Wait till power state is set to D3 */
-		timeout = jiffies + msecs_to_jiffies(1000);
-		while (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D3)
-				&& time_before(jiffies, timeout)) {
-
-			msleep(50);
-		}
-	}
-
+	/*
+	 * Power down afg.
+	 * codec_read is preferred over codec_write to set the power state.
+	 * This way verb is send to set the power state and response
+	 * is received. So setting power state is ensured without using loop
+	 * to read the state.
+	 */
+	snd_hdac_codec_read(hdac, hdac->afg, 0,	AC_VERB_SET_POWER_STATE,
+							AC_PWRST_D3);
 	err = snd_hdac_display_power(bus, false);
 	if (err < 0) {
 		dev_err(bus->dev, "Cannot turn on display power on i915\n");
 		return err;
 	}
 
+	hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev));
+	snd_hdac_ext_bus_link_put(ebus, hlink);
+
 	return 0;
 }
 
@@ -1598,6 +1754,8 @@
 	struct hdac_ext_device *edev = to_hda_ext_device(dev);
 	struct hdac_device *hdac = &edev->hdac;
 	struct hdac_bus *bus = hdac->bus;
+	struct hdac_ext_bus *ebus = hbus_to_ebus(bus);
+	struct hdac_ext_link *hlink = NULL;
 	int err;
 
 	dev_dbg(dev, "Enter: %s\n", __func__);
@@ -1606,6 +1764,9 @@
 	if (!bus)
 		return 0;
 
+	hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev));
+	snd_hdac_ext_bus_link_get(ebus, hlink);
+
 	err = snd_hdac_display_power(bus, true);
 	if (err < 0) {
 		dev_err(bus->dev, "Cannot turn on display power on i915\n");
@@ -1616,9 +1777,8 @@
 	hdac_hdmi_skl_enable_dp12(&edev->hdac);
 
 	/* Power up afg */
-	if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0))
-		snd_hdac_codec_write(hdac, hdac->afg, 0,
-			AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
+	snd_hdac_codec_read(hdac, hdac->afg, 0,	AC_VERB_SET_POWER_STATE,
+							AC_PWRST_D0);
 
 	return 0;
 }
@@ -1629,6 +1789,8 @@
 
 static const struct dev_pm_ops hdac_hdmi_pm = {
 	SET_RUNTIME_PM_OPS(hdac_hdmi_runtime_suspend, hdac_hdmi_runtime_resume, NULL)
+	.prepare = hdmi_codec_prepare,
+	.complete = hdmi_codec_complete,
 };
 
 static const struct hda_device_id hdmi_list[] = {
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
new file mode 100644
index 0000000..8e36e88
--- /dev/null
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -0,0 +1,432 @@
+/*
+ * ALSA SoC codec for HDMI encoder drivers
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha <jsarha@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+ * General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/string.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/pcm_drm_eld.h>
+#include <sound/hdmi-codec.h>
+#include <sound/pcm_iec958.h>
+
+#include <drm/drm_crtc.h> /* This is only to get MAX_ELD_BYTES */
+
+struct hdmi_codec_priv {
+	struct hdmi_codec_pdata hcd;
+	struct snd_soc_dai_driver *daidrv;
+	struct hdmi_codec_daifmt daifmt[2];
+	struct mutex current_stream_lock;
+	struct snd_pcm_substream *current_stream;
+	struct snd_pcm_hw_constraint_list ratec;
+	uint8_t eld[MAX_ELD_BYTES];
+};
+
+static const struct snd_soc_dapm_widget hdmi_widgets[] = {
+	SND_SOC_DAPM_OUTPUT("TX"),
+};
+
+static const struct snd_soc_dapm_route hdmi_routes[] = {
+	{ "TX", NULL, "Playback" },
+};
+
+enum {
+	DAI_ID_I2S = 0,
+	DAI_ID_SPDIF,
+};
+
+static int hdmi_eld_ctl_info(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_info *uinfo)
+{
+	struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+	struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
+
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+	uinfo->count = sizeof(hcp->eld);
+
+	return 0;
+}
+
+static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol,
+			    struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+	struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
+
+	memcpy(ucontrol->value.bytes.data, hcp->eld, sizeof(hcp->eld));
+
+	return 0;
+}
+
+static const struct snd_kcontrol_new hdmi_controls[] = {
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READ |
+			  SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+		.iface = SNDRV_CTL_ELEM_IFACE_PCM,
+		.name = "ELD",
+		.info = hdmi_eld_ctl_info,
+		.get = hdmi_eld_ctl_get,
+	},
+};
+
+static int hdmi_codec_new_stream(struct snd_pcm_substream *substream,
+				 struct snd_soc_dai *dai)
+{
+	struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
+	int ret = 0;
+
+	mutex_lock(&hcp->current_stream_lock);
+	if (!hcp->current_stream) {
+		hcp->current_stream = substream;
+	} else if (hcp->current_stream != substream) {
+		dev_err(dai->dev, "Only one simultaneous stream supported!\n");
+		ret = -EINVAL;
+	}
+	mutex_unlock(&hcp->current_stream_lock);
+
+	return ret;
+}
+
+static int hdmi_codec_startup(struct snd_pcm_substream *substream,
+			      struct snd_soc_dai *dai)
+{
+	struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
+	int ret = 0;
+
+	dev_dbg(dai->dev, "%s()\n", __func__);
+
+	ret = hdmi_codec_new_stream(substream, dai);
+	if (ret)
+		return ret;
+
+	if (hcp->hcd.ops->audio_startup) {
+		ret = hcp->hcd.ops->audio_startup(dai->dev->parent);
+		if (ret) {
+			mutex_lock(&hcp->current_stream_lock);
+			hcp->current_stream = NULL;
+			mutex_unlock(&hcp->current_stream_lock);
+			return ret;
+		}
+	}
+
+	if (hcp->hcd.ops->get_eld) {
+		ret = hcp->hcd.ops->get_eld(dai->dev->parent, hcp->eld,
+					    sizeof(hcp->eld));
+
+		if (!ret) {
+			ret = snd_pcm_hw_constraint_eld(substream->runtime,
+							hcp->eld);
+			if (ret)
+				return ret;
+		}
+	}
+	return 0;
+}
+
+static void hdmi_codec_shutdown(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
+
+	dev_dbg(dai->dev, "%s()\n", __func__);
+
+	WARN_ON(hcp->current_stream != substream);
+
+	hcp->hcd.ops->audio_shutdown(dai->dev->parent);
+
+	mutex_lock(&hcp->current_stream_lock);
+	hcp->current_stream = NULL;
+	mutex_unlock(&hcp->current_stream_lock);
+}
+
+static int hdmi_codec_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
+	struct hdmi_codec_params hp = {
+		.iec = {
+			.status = { 0 },
+			.subcode = { 0 },
+			.pad = 0,
+			.dig_subframe = { 0 },
+		}
+	};
+	int ret;
+
+	dev_dbg(dai->dev, "%s() width %d rate %d channels %d\n", __func__,
+		params_width(params), params_rate(params),
+		params_channels(params));
+
+	if (params_width(params) > 24)
+		params->msbits = 24;
+
+	ret = snd_pcm_create_iec958_consumer_hw_params(params, hp.iec.status,
+						       sizeof(hp.iec.status));
+	if (ret < 0) {
+		dev_err(dai->dev, "Creating IEC958 channel status failed %d\n",
+			ret);
+		return ret;
+	}
+
+	ret = hdmi_codec_new_stream(substream, dai);
+	if (ret)
+		return ret;
+
+	hdmi_audio_infoframe_init(&hp.cea);
+	hp.cea.channels = params_channels(params);
+	hp.cea.coding_type = HDMI_AUDIO_CODING_TYPE_STREAM;
+	hp.cea.sample_size = HDMI_AUDIO_SAMPLE_SIZE_STREAM;
+	hp.cea.sample_frequency = HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM;
+
+	hp.sample_width = params_width(params);
+	hp.sample_rate = params_rate(params);
+	hp.channels = params_channels(params);
+
+	return hcp->hcd.ops->hw_params(dai->dev->parent, &hcp->daifmt[dai->id],
+				       &hp);
+}
+
+static int hdmi_codec_set_fmt(struct snd_soc_dai *dai,
+			      unsigned int fmt)
+{
+	struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
+	struct hdmi_codec_daifmt cf = { 0 };
+	int ret = 0;
+
+	dev_dbg(dai->dev, "%s()\n", __func__);
+
+	if (dai->id == DAI_ID_SPDIF) {
+		cf.fmt = HDMI_SPDIF;
+	} else {
+		switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+		case SND_SOC_DAIFMT_CBM_CFM:
+			cf.bit_clk_master = 1;
+			cf.frame_clk_master = 1;
+			break;
+		case SND_SOC_DAIFMT_CBS_CFM:
+			cf.frame_clk_master = 1;
+			break;
+		case SND_SOC_DAIFMT_CBM_CFS:
+			cf.bit_clk_master = 1;
+			break;
+		case SND_SOC_DAIFMT_CBS_CFS:
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+		case SND_SOC_DAIFMT_NB_NF:
+			break;
+		case SND_SOC_DAIFMT_NB_IF:
+			cf.frame_clk_inv = 1;
+			break;
+		case SND_SOC_DAIFMT_IB_NF:
+			cf.bit_clk_inv = 1;
+			break;
+		case SND_SOC_DAIFMT_IB_IF:
+			cf.frame_clk_inv = 1;
+			cf.bit_clk_inv = 1;
+			break;
+		}
+
+		switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+		case SND_SOC_DAIFMT_I2S:
+			cf.fmt = HDMI_I2S;
+			break;
+		case SND_SOC_DAIFMT_DSP_A:
+			cf.fmt = HDMI_DSP_A;
+			break;
+		case SND_SOC_DAIFMT_DSP_B:
+			cf.fmt = HDMI_DSP_B;
+			break;
+		case SND_SOC_DAIFMT_RIGHT_J:
+			cf.fmt = HDMI_RIGHT_J;
+			break;
+		case SND_SOC_DAIFMT_LEFT_J:
+			cf.fmt = HDMI_LEFT_J;
+			break;
+		case SND_SOC_DAIFMT_AC97:
+			cf.fmt = HDMI_AC97;
+			break;
+		default:
+			dev_err(dai->dev, "Invalid DAI interface format\n");
+			return -EINVAL;
+		}
+	}
+
+	hcp->daifmt[dai->id] = cf;
+
+	return ret;
+}
+
+static int hdmi_codec_digital_mute(struct snd_soc_dai *dai, int mute)
+{
+	struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
+
+	dev_dbg(dai->dev, "%s()\n", __func__);
+
+	if (hcp->hcd.ops->digital_mute)
+		return hcp->hcd.ops->digital_mute(dai->dev->parent, mute);
+
+	return 0;
+}
+
+static const struct snd_soc_dai_ops hdmi_dai_ops = {
+	.startup	= hdmi_codec_startup,
+	.shutdown	= hdmi_codec_shutdown,
+	.hw_params	= hdmi_codec_hw_params,
+	.set_fmt	= hdmi_codec_set_fmt,
+	.digital_mute	= hdmi_codec_digital_mute,
+};
+
+
+#define HDMI_RATES	(SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |\
+			 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |\
+			 SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |\
+			 SNDRV_PCM_RATE_192000)
+
+#define SPDIF_FORMATS	(SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE |\
+			 SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S20_3BE |\
+			 SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_3BE |\
+			 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S24_BE)
+
+/*
+ * This list is only for formats allowed on the I2S bus. So there is
+ * some formats listed that are not supported by HDMI interface. For
+ * instance allowing the 32-bit formats enables 24-precision with CPU
+ * DAIs that do not support 24-bit formats. If the extra formats cause
+ * problems, we should add the video side driver an option to disable
+ * them.
+ */
+#define I2S_FORMATS	(SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE |\
+			 SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S20_3BE |\
+			 SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_3BE |\
+			 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S24_BE |\
+			 SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE)
+
+static struct snd_soc_dai_driver hdmi_i2s_dai = {
+	.name = "i2s-hifi",
+	.id = DAI_ID_I2S,
+	.playback = {
+		.stream_name = "Playback",
+		.channels_min = 2,
+		.channels_max = 8,
+		.rates = HDMI_RATES,
+		.formats = I2S_FORMATS,
+		.sig_bits = 24,
+	},
+	.ops = &hdmi_dai_ops,
+};
+
+static const struct snd_soc_dai_driver hdmi_spdif_dai = {
+	.name = "spdif-hifi",
+	.id = DAI_ID_SPDIF,
+	.playback = {
+		.stream_name = "Playback",
+		.channels_min = 2,
+		.channels_max = 2,
+		.rates = HDMI_RATES,
+		.formats = SPDIF_FORMATS,
+	},
+	.ops = &hdmi_dai_ops,
+};
+
+static struct snd_soc_codec_driver hdmi_codec = {
+	.controls = hdmi_controls,
+	.num_controls = ARRAY_SIZE(hdmi_controls),
+	.dapm_widgets = hdmi_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(hdmi_widgets),
+	.dapm_routes = hdmi_routes,
+	.num_dapm_routes = ARRAY_SIZE(hdmi_routes),
+};
+
+static int hdmi_codec_probe(struct platform_device *pdev)
+{
+	struct hdmi_codec_pdata *hcd = pdev->dev.platform_data;
+	struct device *dev = &pdev->dev;
+	struct hdmi_codec_priv *hcp;
+	int dai_count, i = 0;
+	int ret;
+
+	dev_dbg(dev, "%s()\n", __func__);
+
+	if (!hcd) {
+		dev_err(dev, "%s: No plalform data\n", __func__);
+		return -EINVAL;
+	}
+
+	dai_count = hcd->i2s + hcd->spdif;
+	if (dai_count < 1 || !hcd->ops || !hcd->ops->hw_params ||
+	    !hcd->ops->audio_shutdown) {
+		dev_err(dev, "%s: Invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	hcp = devm_kzalloc(dev, sizeof(*hcp), GFP_KERNEL);
+	if (!hcp)
+		return -ENOMEM;
+
+	hcp->hcd = *hcd;
+	mutex_init(&hcp->current_stream_lock);
+
+	hcp->daidrv = devm_kzalloc(dev, dai_count * sizeof(*hcp->daidrv),
+				   GFP_KERNEL);
+	if (!hcp->daidrv)
+		return -ENOMEM;
+
+	if (hcd->i2s) {
+		hcp->daidrv[i] = hdmi_i2s_dai;
+		hcp->daidrv[i].playback.channels_max =
+			hcd->max_i2s_channels;
+		i++;
+	}
+
+	if (hcd->spdif)
+		hcp->daidrv[i] = hdmi_spdif_dai;
+
+	ret = snd_soc_register_codec(dev, &hdmi_codec, hcp->daidrv,
+				     dai_count);
+	if (ret) {
+		dev_err(dev, "%s: snd_soc_register_codec() failed (%d)\n",
+			__func__, ret);
+		return ret;
+	}
+
+	dev_set_drvdata(dev, hcp);
+	return 0;
+}
+
+static int hdmi_codec_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_codec(&pdev->dev);
+	return 0;
+}
+
+static struct platform_driver hdmi_codec_driver = {
+	.driver = {
+		.name = HDMI_CODEC_DRV_NAME,
+	},
+	.probe = hdmi_codec_probe,
+	.remove = hdmi_codec_remove,
+};
+
+module_platform_driver(hdmi_codec_driver);
+
+MODULE_AUTHOR("Jyri Sarha <jsarha@ti.com>");
+MODULE_DESCRIPTION("HDMI Audio Codec Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" HDMI_CODEC_DRV_NAME);
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index 1c87299..683769f 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -343,9 +343,12 @@
 	SND_SOC_DAPM_SUPPLY("ADC Power", NAU8825_REG_ANALOG_ADC_2, 6, 0, NULL,
 		0),
 
-	/* ADC for button press detection */
-	SND_SOC_DAPM_ADC("SAR", NULL, NAU8825_REG_SAR_CTRL,
-		NAU8825_SAR_ADC_EN_SFT, 0),
+	/* ADC for button press detection. A dapm supply widget is used to
+	 * prevent dapm_power_widgets keeping the codec at SND_SOC_BIAS_ON
+	 * during suspend.
+	 */
+	SND_SOC_DAPM_SUPPLY("SAR", NAU8825_REG_SAR_CTRL,
+		NAU8825_SAR_ADC_EN_SFT, 0, NULL, 0),
 
 	SND_SOC_DAPM_PGA_S("ADACL", 2, NAU8825_REG_RDAC, 12, 0, NULL, 0),
 	SND_SOC_DAPM_PGA_S("ADACR", 2, NAU8825_REG_RDAC, 13, 0, NULL, 0),
@@ -607,6 +610,16 @@
 
 static void nau8825_restart_jack_detection(struct regmap *regmap)
 {
+	/* Chip needs one FSCLK cycle in order to generate interrupts,
+	 * as we cannot guarantee one will be provided by the system. Turning
+	 * master mode on then off enables us to generate that FSCLK cycle
+	 * with a minimum of contention on the clock bus.
+	 */
+	regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
+		NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_MASTER);
+	regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
+		NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_SLAVE);
+
 	/* this will restart the entire jack detection process including MIC/GND
 	 * switching and create interrupts. We have to go from 0 to 1 and back
 	 * to 0 to restart.
@@ -728,7 +741,10 @@
 	struct regmap *regmap = nau8825->regmap;
 	int active_irq, clear_irq = 0, event = 0, event_mask = 0;
 
-	regmap_read(regmap, NAU8825_REG_IRQ_STATUS, &active_irq);
+	if (regmap_read(regmap, NAU8825_REG_IRQ_STATUS, &active_irq)) {
+		dev_err(nau8825->dev, "failed to read irq status\n");
+		return IRQ_NONE;
+	}
 
 	if ((active_irq & NAU8825_JACK_EJECTION_IRQ_MASK) ==
 		NAU8825_JACK_EJECTION_DETECTED) {
@@ -1141,33 +1157,74 @@
 					return ret;
 				}
 			}
-
-			ret = regcache_sync(nau8825->regmap);
-			if (ret) {
-				dev_err(codec->dev,
-					"Failed to sync cache: %d\n", ret);
-				return ret;
-			}
 		}
-
 		break;
 
 	case SND_SOC_BIAS_OFF:
 		if (nau8825->mclk_freq)
 			clk_disable_unprepare(nau8825->mclk);
-
-		regcache_mark_dirty(nau8825->regmap);
 		break;
 	}
 	return 0;
 }
 
+#ifdef CONFIG_PM
+static int nau8825_suspend(struct snd_soc_codec *codec)
+{
+	struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
+
+	disable_irq(nau8825->irq);
+	regcache_cache_only(nau8825->regmap, true);
+	regcache_mark_dirty(nau8825->regmap);
+
+	return 0;
+}
+
+static int nau8825_resume(struct snd_soc_codec *codec)
+{
+	struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
+
+	/* The chip may lose power and reset in S3. regcache_sync restores
+	 * register values including configurations for sysclk, irq, and
+	 * jack/button detection.
+	 */
+	regcache_cache_only(nau8825->regmap, false);
+	regcache_sync(nau8825->regmap);
+
+	/* Check the jack plug status directly. If the headset is unplugged
+	 * during S3 when the chip has no power, there will be no jack
+	 * detection irq even after the nau8825_restart_jack_detection below,
+	 * because the chip just thinks no headset has ever been plugged in.
+	 */
+	if (!nau8825_is_jack_inserted(nau8825->regmap)) {
+		nau8825_eject_jack(nau8825);
+		snd_soc_jack_report(nau8825->jack, 0, SND_JACK_HEADSET);
+	}
+
+	enable_irq(nau8825->irq);
+
+	/* Run jack detection to check the type (OMTP or CTIA) of the headset
+	 * if there is one. This handles the case where a different type of
+	 * headset is plugged in during S3. This triggers an IRQ iff a headset
+	 * is already plugged in.
+	 */
+	nau8825_restart_jack_detection(nau8825->regmap);
+
+	return 0;
+}
+#else
+#define nau8825_suspend NULL
+#define nau8825_resume NULL
+#endif
+
 static struct snd_soc_codec_driver nau8825_codec_driver = {
 	.probe = nau8825_codec_probe,
 	.set_sysclk = nau8825_set_sysclk,
 	.set_pll = nau8825_set_pll,
 	.set_bias_level = nau8825_set_bias_level,
 	.suspend_bias_off = true,
+	.suspend = nau8825_suspend,
+	.resume = nau8825_resume,
 
 	.controls = nau8825_controls,
 	.num_controls = ARRAY_SIZE(nau8825_controls),
@@ -1277,16 +1334,6 @@
 	regmap_update_bits(regmap, NAU8825_REG_ENA_CTRL,
 		NAU8825_ENABLE_DACR, NAU8825_ENABLE_DACR);
 
-	/* Chip needs one FSCLK cycle in order to generate interrupts,
-	 * as we cannot guarantee one will be provided by the system. Turning
-	 * master mode on then off enables us to generate that FSCLK cycle
-	 * with a minimum of contention on the clock bus.
-	 */
-	regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
-		NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_MASTER);
-	regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
-		NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_SLAVE);
-
 	ret = devm_request_threaded_irq(nau8825->dev, nau8825->irq, NULL,
 		nau8825_interrupt, IRQF_TRIGGER_LOW | IRQF_ONESHOT,
 		"nau8825", nau8825);
@@ -1354,36 +1401,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int nau8825_suspend(struct device *dev)
-{
-	struct i2c_client *client = to_i2c_client(dev);
-	struct nau8825 *nau8825 = dev_get_drvdata(dev);
-
-	disable_irq(client->irq);
-	regcache_cache_only(nau8825->regmap, true);
-	regcache_mark_dirty(nau8825->regmap);
-
-	return 0;
-}
-
-static int nau8825_resume(struct device *dev)
-{
-	struct i2c_client *client = to_i2c_client(dev);
-	struct nau8825 *nau8825 = dev_get_drvdata(dev);
-
-	regcache_cache_only(nau8825->regmap, false);
-	regcache_sync(nau8825->regmap);
-	enable_irq(client->irq);
-
-	return 0;
-}
-#endif
-
-static const struct dev_pm_ops nau8825_pm = {
-	SET_SYSTEM_SLEEP_PM_OPS(nau8825_suspend, nau8825_resume)
-};
-
 static const struct i2c_device_id nau8825_i2c_ids[] = {
 	{ "nau8825", 0 },
 	{ }
@@ -1410,7 +1427,6 @@
 		.name = "nau8825",
 		.of_match_table = of_match_ptr(nau8825_of_ids),
 		.acpi_match_table = ACPI_PTR(nau8825_acpi_match),
-		.pm = &nau8825_pm,
 	},
 	.probe = nau8825_i2c_probe,
 	.remove = nau8825_i2c_remove,
diff --git a/sound/soc/codecs/pcm5102a.c b/sound/soc/codecs/pcm5102a.c
new file mode 100644
index 0000000..ed51567
--- /dev/null
+++ b/sound/soc/codecs/pcm5102a.c
@@ -0,0 +1,69 @@
+/*
+ * Driver for the PCM5102A codec
+ *
+ * Author:	Florian Meier <florian.meier@koalo.de>
+ *		Copyright 2013
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <sound/soc.h>
+
+static struct snd_soc_dai_driver pcm5102a_dai = {
+	.name = "pcm5102a-hifi",
+	.playback = {
+		.channels_min = 2,
+		.channels_max = 2,
+		.rates = SNDRV_PCM_RATE_8000_192000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE |
+			   SNDRV_PCM_FMTBIT_S24_LE |
+			   SNDRV_PCM_FMTBIT_S32_LE
+	},
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_pcm5102a;
+
+static int pcm5102a_probe(struct platform_device *pdev)
+{
+	return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_pcm5102a,
+			&pcm5102a_dai, 1);
+}
+
+static int pcm5102a_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_codec(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id pcm5102a_of_match[] = {
+	{ .compatible = "ti,pcm5102a", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, pcm5102a_of_match);
+
+static struct platform_driver pcm5102a_codec_driver = {
+	.probe		= pcm5102a_probe,
+	.remove		= pcm5102a_remove,
+	.driver		= {
+		.name	= "pcm5102a-codec",
+		.owner	= THIS_MODULE,
+		.of_match_table = pcm5102a_of_match,
+	},
+};
+
+module_platform_driver(pcm5102a_codec_driver);
+
+MODULE_DESCRIPTION("ASoC PCM5102A codec driver");
+MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c
index f0e6c06..a1aaffc 100644
--- a/sound/soc/codecs/rt298.c
+++ b/sound/soc/codecs/rt298.c
@@ -17,6 +17,7 @@
 #include <linux/i2c.h>
 #include <linux/platform_device.h>
 #include <linux/spi/spi.h>
+#include <linux/dmi.h>
 #include <linux/acpi.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
@@ -1132,6 +1133,17 @@
 };
 MODULE_DEVICE_TABLE(acpi, rt298_acpi_match);
 
+static const struct dmi_system_id force_combo_jack_table[] = {
+	{
+		.ident = "Intel Broxton P",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Broxton P")
+		}
+	},
+	{ }
+};
+
 static int rt298_i2c_probe(struct i2c_client *i2c,
 			   const struct i2c_device_id *id)
 {
@@ -1184,11 +1196,16 @@
 
 	/* enable jack combo mode on supported devices */
 	acpiid = acpi_match_device(dev->driver->acpi_match_table, dev);
-	if (acpiid) {
+	if (acpiid && acpiid->driver_data) {
 		rt298->pdata = *(struct rt298_platform_data *)
 				acpiid->driver_data;
 	}
 
+	if (dmi_check_system(force_combo_jack_table)) {
+		rt298->pdata.cbj_en = true;
+		rt298->pdata.gpio2_en = false;
+	}
+
 	/* VREF Charging */
 	regmap_update_bits(rt298->regmap, 0x04, 0x80, 0x80);
 	regmap_update_bits(rt298->regmap, 0x1b, 0x860, 0x860);
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index e8b5ba0..09e8988 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -359,7 +359,7 @@
 
 /* Interface data select */
 static const char * const rt5640_data_select[] = {
-	"Normal", "left copy to right", "right copy to left", "Swap"};
+	"Normal", "Swap", "left copy to right", "right copy to left"};
 
 static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA,
 			    RT5640_IF1_DAC_SEL_SFT, rt5640_data_select);
diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
index 1761c3a9..58b664b 100644
--- a/sound/soc/codecs/rt5640.h
+++ b/sound/soc/codecs/rt5640.h
@@ -443,39 +443,39 @@
 #define RT5640_IF1_DAC_SEL_MASK			(0x3 << 14)
 #define RT5640_IF1_DAC_SEL_SFT			14
 #define RT5640_IF1_DAC_SEL_NOR			(0x0 << 14)
-#define RT5640_IF1_DAC_SEL_L2R			(0x1 << 14)
-#define RT5640_IF1_DAC_SEL_R2L			(0x2 << 14)
-#define RT5640_IF1_DAC_SEL_SWAP			(0x3 << 14)
+#define RT5640_IF1_DAC_SEL_SWAP			(0x1 << 14)
+#define RT5640_IF1_DAC_SEL_L2R			(0x2 << 14)
+#define RT5640_IF1_DAC_SEL_R2L			(0x3 << 14)
 #define RT5640_IF1_ADC_SEL_MASK			(0x3 << 12)
 #define RT5640_IF1_ADC_SEL_SFT			12
 #define RT5640_IF1_ADC_SEL_NOR			(0x0 << 12)
-#define RT5640_IF1_ADC_SEL_L2R			(0x1 << 12)
-#define RT5640_IF1_ADC_SEL_R2L			(0x2 << 12)
-#define RT5640_IF1_ADC_SEL_SWAP			(0x3 << 12)
+#define RT5640_IF1_ADC_SEL_SWAP			(0x1 << 12)
+#define RT5640_IF1_ADC_SEL_L2R			(0x2 << 12)
+#define RT5640_IF1_ADC_SEL_R2L			(0x3 << 12)
 #define RT5640_IF2_DAC_SEL_MASK			(0x3 << 10)
 #define RT5640_IF2_DAC_SEL_SFT			10
 #define RT5640_IF2_DAC_SEL_NOR			(0x0 << 10)
-#define RT5640_IF2_DAC_SEL_L2R			(0x1 << 10)
-#define RT5640_IF2_DAC_SEL_R2L			(0x2 << 10)
-#define RT5640_IF2_DAC_SEL_SWAP			(0x3 << 10)
+#define RT5640_IF2_DAC_SEL_SWAP			(0x1 << 10)
+#define RT5640_IF2_DAC_SEL_L2R			(0x2 << 10)
+#define RT5640_IF2_DAC_SEL_R2L			(0x3 << 10)
 #define RT5640_IF2_ADC_SEL_MASK			(0x3 << 8)
 #define RT5640_IF2_ADC_SEL_SFT			8
 #define RT5640_IF2_ADC_SEL_NOR			(0x0 << 8)
-#define RT5640_IF2_ADC_SEL_L2R			(0x1 << 8)
-#define RT5640_IF2_ADC_SEL_R2L			(0x2 << 8)
-#define RT5640_IF2_ADC_SEL_SWAP			(0x3 << 8)
+#define RT5640_IF2_ADC_SEL_SWAP			(0x1 << 8)
+#define RT5640_IF2_ADC_SEL_L2R			(0x2 << 8)
+#define RT5640_IF2_ADC_SEL_R2L			(0x3 << 8)
 #define RT5640_IF3_DAC_SEL_MASK			(0x3 << 6)
 #define RT5640_IF3_DAC_SEL_SFT			6
 #define RT5640_IF3_DAC_SEL_NOR			(0x0 << 6)
-#define RT5640_IF3_DAC_SEL_L2R			(0x1 << 6)
-#define RT5640_IF3_DAC_SEL_R2L			(0x2 << 6)
-#define RT5640_IF3_DAC_SEL_SWAP			(0x3 << 6)
+#define RT5640_IF3_DAC_SEL_SWAP			(0x1 << 6)
+#define RT5640_IF3_DAC_SEL_L2R			(0x2 << 6)
+#define RT5640_IF3_DAC_SEL_R2L			(0x3 << 6)
 #define RT5640_IF3_ADC_SEL_MASK			(0x3 << 4)
 #define RT5640_IF3_ADC_SEL_SFT			4
 #define RT5640_IF3_ADC_SEL_NOR			(0x0 << 4)
-#define RT5640_IF3_ADC_SEL_L2R			(0x1 << 4)
-#define RT5640_IF3_ADC_SEL_R2L			(0x2 << 4)
-#define RT5640_IF3_ADC_SEL_SWAP			(0x3 << 4)
+#define RT5640_IF3_ADC_SEL_SWAP			(0x1 << 4)
+#define RT5640_IF3_ADC_SEL_L2R			(0x2 << 4)
+#define RT5640_IF3_ADC_SEL_R2L			(0x3 << 4)
 
 /* REC Left Mixer Control 1 (0x3b) */
 #define RT5640_G_HP_L_RM_L_MASK			(0x7 << 13)
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 7af5e73..3c6594d 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -3286,10 +3286,8 @@
 		if (btn_type == 0)/* button release */
 			report =  rt5645->jack_type;
 		else {
-			if (rt5645->pdata.jd_invert) {
-				mod_timer(&rt5645->btn_check_timer,
-					msecs_to_jiffies(100));
-			}
+			mod_timer(&rt5645->btn_check_timer,
+				msecs_to_jiffies(100));
 		}
 
 		break;
@@ -3557,6 +3555,12 @@
 			DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
 		},
 	},
+	{
+		.ident = "Google Setzer",
+		.matches = {
+			DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
+		},
+	},
 	{ }
 };
 
@@ -3810,9 +3814,9 @@
 	if (rt5645->pdata.jd_invert) {
 		regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2,
 			RT5645_JD_1_1_MASK, RT5645_JD_1_1_INV);
-		setup_timer(&rt5645->btn_check_timer,
-			rt5645_btn_check_callback, (unsigned long)rt5645);
 	}
+	setup_timer(&rt5645->btn_check_timer,
+		rt5645_btn_check_callback, (unsigned long)rt5645);
 
 	INIT_DELAYED_WORK(&rt5645->jack_detect_work, rt5645_jack_detect_work);
 	INIT_DELAYED_WORK(&rt5645->rcclock_work, rt5645_rcclock_work);
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index 33e290b..6021226 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -4520,14 +4520,9 @@
 }
 
 #ifdef CONFIG_GPIOLIB
-static inline struct rt5677_priv *gpio_to_rt5677(struct gpio_chip *chip)
-{
-	return container_of(chip, struct rt5677_priv, gpio_chip);
-}
-
 static void rt5677_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 {
-	struct rt5677_priv *rt5677 = gpio_to_rt5677(chip);
+	struct rt5677_priv *rt5677 = gpiochip_get_data(chip);
 
 	switch (offset) {
 	case RT5677_GPIO1 ... RT5677_GPIO5:
@@ -4548,7 +4543,7 @@
 static int rt5677_gpio_direction_out(struct gpio_chip *chip,
 				     unsigned offset, int value)
 {
-	struct rt5677_priv *rt5677 = gpio_to_rt5677(chip);
+	struct rt5677_priv *rt5677 = gpiochip_get_data(chip);
 
 	switch (offset) {
 	case RT5677_GPIO1 ... RT5677_GPIO5:
@@ -4572,7 +4567,7 @@
 
 static int rt5677_gpio_get(struct gpio_chip *chip, unsigned offset)
 {
-	struct rt5677_priv *rt5677 = gpio_to_rt5677(chip);
+	struct rt5677_priv *rt5677 = gpiochip_get_data(chip);
 	int value, ret;
 
 	ret = regmap_read(rt5677->regmap, RT5677_GPIO_ST, &value);
@@ -4584,7 +4579,7 @@
 
 static int rt5677_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
 {
-	struct rt5677_priv *rt5677 = gpio_to_rt5677(chip);
+	struct rt5677_priv *rt5677 = gpiochip_get_data(chip);
 
 	switch (offset) {
 	case RT5677_GPIO1 ... RT5677_GPIO5:
@@ -4638,7 +4633,7 @@
 
 static int rt5677_to_irq(struct gpio_chip *chip, unsigned offset)
 {
-	struct rt5677_priv *rt5677 = gpio_to_rt5677(chip);
+	struct rt5677_priv *rt5677 = gpiochip_get_data(chip);
 	struct regmap_irq_chip_data *data = rt5677->irq_data;
 	int irq;
 
@@ -4697,7 +4692,7 @@
 	rt5677->gpio_chip.parent = &i2c->dev;
 	rt5677->gpio_chip.base = -1;
 
-	ret = gpiochip_add(&rt5677->gpio_chip);
+	ret = gpiochip_add_data(&rt5677->gpio_chip, rt5677);
 	if (ret != 0)
 		dev_err(&i2c->dev, "Failed to add GPIOs: %d\n", ret);
 }
diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
index 171a23d..512a9d2 100644
--- a/sound/soc/codecs/wm5100.c
+++ b/sound/soc/codecs/wm5100.c
@@ -17,6 +17,7 @@
 #include <linux/export.h>
 #include <linux/pm.h>
 #include <linux/gcd.h>
+#include <linux/gpio/driver.h>
 #include <linux/gpio.h>
 #include <linux/i2c.h>
 #include <linux/pm_runtime.h>
@@ -2236,14 +2237,9 @@
 }
 
 #ifdef CONFIG_GPIOLIB
-static inline struct wm5100_priv *gpio_to_wm5100(struct gpio_chip *chip)
-{
-	return container_of(chip, struct wm5100_priv, gpio_chip);
-}
-
 static void wm5100_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 {
-	struct wm5100_priv *wm5100 = gpio_to_wm5100(chip);
+	struct wm5100_priv *wm5100 = gpiochip_get_data(chip);
 
 	regmap_update_bits(wm5100->regmap, WM5100_GPIO_CTRL_1 + offset,
 			   WM5100_GP1_LVL, !!value << WM5100_GP1_LVL_SHIFT);
@@ -2252,7 +2248,7 @@
 static int wm5100_gpio_direction_out(struct gpio_chip *chip,
 				     unsigned offset, int value)
 {
-	struct wm5100_priv *wm5100 = gpio_to_wm5100(chip);
+	struct wm5100_priv *wm5100 = gpiochip_get_data(chip);
 	int val, ret;
 
 	val = (1 << WM5100_GP1_FN_SHIFT) | (!!value << WM5100_GP1_LVL_SHIFT);
@@ -2268,7 +2264,7 @@
 
 static int wm5100_gpio_get(struct gpio_chip *chip, unsigned offset)
 {
-	struct wm5100_priv *wm5100 = gpio_to_wm5100(chip);
+	struct wm5100_priv *wm5100 = gpiochip_get_data(chip);
 	unsigned int reg;
 	int ret;
 
@@ -2281,7 +2277,7 @@
 
 static int wm5100_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
 {
-	struct wm5100_priv *wm5100 = gpio_to_wm5100(chip);
+	struct wm5100_priv *wm5100 = gpiochip_get_data(chip);
 
 	return regmap_update_bits(wm5100->regmap, WM5100_GPIO_CTRL_1 + offset,
 				  WM5100_GP1_FN_MASK | WM5100_GP1_DIR,
@@ -2313,7 +2309,7 @@
 	else
 		wm5100->gpio_chip.base = -1;
 
-	ret = gpiochip_add(&wm5100->gpio_chip);
+	ret = gpiochip_add_data(&wm5100->gpio_chip, wm5100);
 	if (ret != 0)
 		dev_err(&i2c->dev, "Failed to add GPIOs: %d\n", ret);
 }
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index a8b3e3f..da60e3f 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -1955,11 +1955,16 @@
 static int wm5102_codec_remove(struct snd_soc_codec *codec)
 {
 	struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec);
+	struct arizona *arizona = priv->core.arizona;
 
 	wm_adsp2_codec_remove(&priv->core.adsp[0], codec);
 
 	priv->core.arizona->dapm = NULL;
 
+	arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
+
+	arizona_free_spk(codec);
+
 	return 0;
 }
 
@@ -2093,10 +2098,14 @@
 
 static int wm5102_remove(struct platform_device *pdev)
 {
+	struct wm5102_priv *wm5102 = platform_get_drvdata(pdev);
+
 	snd_soc_unregister_platform(&pdev->dev);
 	snd_soc_unregister_codec(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 
+	wm_adsp2_remove(&wm5102->core.adsp[0]);
+
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 83ba70f..b5820e4 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -2298,6 +2298,8 @@
 
 	arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
 
+	arizona_free_spk(codec);
+
 	return 0;
 }
 
@@ -2435,10 +2437,16 @@
 
 static int wm5110_remove(struct platform_device *pdev)
 {
+	struct wm5110_priv *wm5110 = platform_get_drvdata(pdev);
+	int i;
+
 	snd_soc_unregister_platform(&pdev->dev);
 	snd_soc_unregister_codec(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 
+	for (i = 0; i < WM5110_NUM_ADSP; i++)
+		wm_adsp2_remove(&wm5110->core.adsp[i]);
+
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index a82b8bc..a26ca49 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -20,7 +20,7 @@
 #include <linux/init.h>
 #include <linux/completion.h>
 #include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
 #include <linux/pm.h>
 #include <linux/i2c.h>
 #include <linux/regmap.h>
@@ -1766,11 +1766,6 @@
 }
 
 #ifdef CONFIG_GPIOLIB
-static inline struct wm8903_priv *gpio_to_wm8903(struct gpio_chip *chip)
-{
-	return container_of(chip, struct wm8903_priv, gpio_chip);
-}
-
 static int wm8903_gpio_request(struct gpio_chip *chip, unsigned offset)
 {
 	if (offset >= WM8903_NUM_GPIO)
@@ -1781,7 +1776,7 @@
 
 static int wm8903_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
 {
-	struct wm8903_priv *wm8903 = gpio_to_wm8903(chip);
+	struct wm8903_priv *wm8903 = gpiochip_get_data(chip);
 	unsigned int mask, val;
 	int ret;
 
@@ -1799,7 +1794,7 @@
 
 static int wm8903_gpio_get(struct gpio_chip *chip, unsigned offset)
 {
-	struct wm8903_priv *wm8903 = gpio_to_wm8903(chip);
+	struct wm8903_priv *wm8903 = gpiochip_get_data(chip);
 	unsigned int reg;
 
 	regmap_read(wm8903->regmap, WM8903_GPIO_CONTROL_1 + offset, &reg);
@@ -1810,7 +1805,7 @@
 static int wm8903_gpio_direction_out(struct gpio_chip *chip,
 				     unsigned offset, int value)
 {
-	struct wm8903_priv *wm8903 = gpio_to_wm8903(chip);
+	struct wm8903_priv *wm8903 = gpiochip_get_data(chip);
 	unsigned int mask, val;
 	int ret;
 
@@ -1828,7 +1823,7 @@
 
 static void wm8903_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 {
-	struct wm8903_priv *wm8903 = gpio_to_wm8903(chip);
+	struct wm8903_priv *wm8903 = gpiochip_get_data(chip);
 
 	regmap_update_bits(wm8903->regmap, WM8903_GPIO_CONTROL_1 + offset,
 			   WM8903_GP1_LVL_MASK,
@@ -1860,7 +1855,7 @@
 	else
 		wm8903->gpio_chip.base = -1;
 
-	ret = gpiochip_add(&wm8903->gpio_chip);
+	ret = gpiochip_add_data(&wm8903->gpio_chip, wm8903);
 	if (ret != 0)
 		dev_err(wm8903->dev, "Failed to add GPIOs: %d\n", ret);
 }
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 8822360..fc164d6 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -18,7 +18,7 @@
 #include <linux/delay.h>
 #include <linux/pm.h>
 #include <linux/gcd.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
 #include <linux/i2c.h>
 #include <linux/input.h>
 #include <linux/pm_runtime.h>
@@ -2471,7 +2471,7 @@
 		break;
 	default:
 		dev_warn(codec->dev, "Unknown DSPCLK divisor read back\n");
-		dspclk = wm8962->sysclk;
+		dspclk = wm8962->sysclk_rate;
 	}
 
 	dev_dbg(codec->dev, "DSPCLK is %dHz, BCLK %d\n", dspclk, wm8962->bclk);
@@ -3307,14 +3307,9 @@
 }
 
 #ifdef CONFIG_GPIOLIB
-static inline struct wm8962_priv *gpio_to_wm8962(struct gpio_chip *chip)
-{
-	return container_of(chip, struct wm8962_priv, gpio_chip);
-}
-
 static int wm8962_gpio_request(struct gpio_chip *chip, unsigned offset)
 {
-	struct wm8962_priv *wm8962 = gpio_to_wm8962(chip);
+	struct wm8962_priv *wm8962 = gpiochip_get_data(chip);
 
 	/* The WM8962 GPIOs aren't linearly numbered.  For simplicity
 	 * we export linear numbers and error out if the unsupported
@@ -3337,7 +3332,7 @@
 
 static void wm8962_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 {
-	struct wm8962_priv *wm8962 = gpio_to_wm8962(chip);
+	struct wm8962_priv *wm8962 = gpiochip_get_data(chip);
 	struct snd_soc_codec *codec = wm8962->codec;
 
 	snd_soc_update_bits(codec, WM8962_GPIO_BASE + offset,
@@ -3347,7 +3342,7 @@
 static int wm8962_gpio_direction_out(struct gpio_chip *chip,
 				     unsigned offset, int value)
 {
-	struct wm8962_priv *wm8962 = gpio_to_wm8962(chip);
+	struct wm8962_priv *wm8962 = gpiochip_get_data(chip);
 	struct snd_soc_codec *codec = wm8962->codec;
 	int ret, val;
 
@@ -3386,7 +3381,7 @@
 	else
 		wm8962->gpio_chip.base = -1;
 
-	ret = gpiochip_add(&wm8962->gpio_chip);
+	ret = gpiochip_add_data(&wm8962->gpio_chip, wm8962);
 	if (ret != 0)
 		dev_err(codec->dev, "Failed to add GPIOs: %d\n", ret);
 }
diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
index f99b34f..a730442 100644
--- a/sound/soc/codecs/wm8996.c
+++ b/sound/soc/codecs/wm8996.c
@@ -17,6 +17,7 @@
 #include <linux/delay.h>
 #include <linux/pm.h>
 #include <linux/gcd.h>
+#include <linux/gpio/driver.h>
 #include <linux/gpio.h>
 #include <linux/i2c.h>
 #include <linux/regmap.h>
@@ -2139,14 +2140,9 @@
 }
 
 #ifdef CONFIG_GPIOLIB
-static inline struct wm8996_priv *gpio_to_wm8996(struct gpio_chip *chip)
-{
-	return container_of(chip, struct wm8996_priv, gpio_chip);
-}
-
 static void wm8996_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 {
-	struct wm8996_priv *wm8996 = gpio_to_wm8996(chip);
+	struct wm8996_priv *wm8996 = gpiochip_get_data(chip);
 
 	regmap_update_bits(wm8996->regmap, WM8996_GPIO_1 + offset,
 			   WM8996_GP1_LVL, !!value << WM8996_GP1_LVL_SHIFT);
@@ -2155,7 +2151,7 @@
 static int wm8996_gpio_direction_out(struct gpio_chip *chip,
 				     unsigned offset, int value)
 {
-	struct wm8996_priv *wm8996 = gpio_to_wm8996(chip);
+	struct wm8996_priv *wm8996 = gpiochip_get_data(chip);
 	int val;
 
 	val = (1 << WM8996_GP1_FN_SHIFT) | (!!value << WM8996_GP1_LVL_SHIFT);
@@ -2167,7 +2163,7 @@
 
 static int wm8996_gpio_get(struct gpio_chip *chip, unsigned offset)
 {
-	struct wm8996_priv *wm8996 = gpio_to_wm8996(chip);
+	struct wm8996_priv *wm8996 = gpiochip_get_data(chip);
 	unsigned int reg;
 	int ret;
 
@@ -2180,7 +2176,7 @@
 
 static int wm8996_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
 {
-	struct wm8996_priv *wm8996 = gpio_to_wm8996(chip);
+	struct wm8996_priv *wm8996 = gpiochip_get_data(chip);
 
 	return regmap_update_bits(wm8996->regmap, WM8996_GPIO_1 + offset,
 				  WM8996_GP1_FN_MASK | WM8996_GP1_DIR,
@@ -2211,7 +2207,7 @@
 	else
 		wm8996->gpio_chip.base = -1;
 
-	ret = gpiochip_add(&wm8996->gpio_chip);
+	ret = gpiochip_add_data(&wm8996->gpio_chip, wm8996);
 	if (ret != 0)
 		dev_err(wm8996->dev, "Failed to add GPIOs: %d\n", ret);
 }
diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
index 52d766e..6b0785b 100644
--- a/sound/soc/codecs/wm8997.c
+++ b/sound/soc/codecs/wm8997.c
@@ -1072,6 +1072,8 @@
 
 	priv->core.arizona->dapm = NULL;
 
+	arizona_free_spk(codec);
+
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8998.c b/sound/soc/codecs/wm8998.c
index 0123960..449f666 100644
--- a/sound/soc/codecs/wm8998.c
+++ b/sound/soc/codecs/wm8998.c
@@ -1324,6 +1324,8 @@
 
 	priv->core.arizona->dapm = NULL;
 
+	arizona_free_spk(codec);
+
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index d3b1cb1..a07bd7c 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -160,6 +160,8 @@
 #define ADSP2_RAM_RDY_SHIFT                    0
 #define ADSP2_RAM_RDY_WIDTH                    1
 
+#define ADSP_MAX_STD_CTRL_SIZE               512
+
 struct wm_adsp_buf {
 	struct list_head list;
 	void *buf;
@@ -271,8 +273,11 @@
 	__be32 words_written[2];	/* total words written (64 bit) */
 };
 
+struct wm_adsp_compr;
+
 struct wm_adsp_compr_buf {
 	struct wm_adsp *dsp;
+	struct wm_adsp_compr *compr;
 
 	struct wm_adsp_buffer_region *regions;
 	u32 host_buf_ptr;
@@ -435,6 +440,7 @@
 	size_t len;
 	unsigned int set:1;
 	struct snd_kcontrol *kcontrol;
+	struct soc_bytes_ext bytes_ext;
 	unsigned int flags;
 };
 
@@ -711,10 +717,17 @@
 		 be16_to_cpu(scratch[3]));
 }
 
+static inline struct wm_coeff_ctl *bytes_ext_to_ctl(struct soc_bytes_ext *ext)
+{
+	return container_of(ext, struct wm_coeff_ctl, bytes_ext);
+}
+
 static int wm_coeff_info(struct snd_kcontrol *kctl,
 			 struct snd_ctl_elem_info *uinfo)
 {
-	struct wm_coeff_ctl *ctl = (struct wm_coeff_ctl *)kctl->private_value;
+	struct soc_bytes_ext *bytes_ext =
+		(struct soc_bytes_ext *)kctl->private_value;
+	struct wm_coeff_ctl *ctl = bytes_ext_to_ctl(bytes_ext);
 
 	uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
 	uinfo->count = ctl->len;
@@ -763,7 +776,9 @@
 static int wm_coeff_put(struct snd_kcontrol *kctl,
 			struct snd_ctl_elem_value *ucontrol)
 {
-	struct wm_coeff_ctl *ctl = (struct wm_coeff_ctl *)kctl->private_value;
+	struct soc_bytes_ext *bytes_ext =
+		(struct soc_bytes_ext *)kctl->private_value;
+	struct wm_coeff_ctl *ctl = bytes_ext_to_ctl(bytes_ext);
 	char *p = ucontrol->value.bytes.data;
 	int ret = 0;
 
@@ -780,6 +795,29 @@
 	return ret;
 }
 
+static int wm_coeff_tlv_put(struct snd_kcontrol *kctl,
+			    const unsigned int __user *bytes, unsigned int size)
+{
+	struct soc_bytes_ext *bytes_ext =
+		(struct soc_bytes_ext *)kctl->private_value;
+	struct wm_coeff_ctl *ctl = bytes_ext_to_ctl(bytes_ext);
+	int ret = 0;
+
+	mutex_lock(&ctl->dsp->pwr_lock);
+
+	if (copy_from_user(ctl->cache, bytes, size)) {
+		ret = -EFAULT;
+	} else {
+		ctl->set = 1;
+		if (ctl->enabled)
+			ret = wm_coeff_write_control(ctl, ctl->cache, size);
+	}
+
+	mutex_unlock(&ctl->dsp->pwr_lock);
+
+	return ret;
+}
+
 static int wm_coeff_read_control(struct wm_coeff_ctl *ctl,
 				 void *buf, size_t len)
 {
@@ -822,7 +860,9 @@
 static int wm_coeff_get(struct snd_kcontrol *kctl,
 			struct snd_ctl_elem_value *ucontrol)
 {
-	struct wm_coeff_ctl *ctl = (struct wm_coeff_ctl *)kctl->private_value;
+	struct soc_bytes_ext *bytes_ext =
+		(struct soc_bytes_ext *)kctl->private_value;
+	struct wm_coeff_ctl *ctl = bytes_ext_to_ctl(bytes_ext);
 	char *p = ucontrol->value.bytes.data;
 	int ret = 0;
 
@@ -845,12 +885,72 @@
 	return ret;
 }
 
+static int wm_coeff_tlv_get(struct snd_kcontrol *kctl,
+			    unsigned int __user *bytes, unsigned int size)
+{
+	struct soc_bytes_ext *bytes_ext =
+		(struct soc_bytes_ext *)kctl->private_value;
+	struct wm_coeff_ctl *ctl = bytes_ext_to_ctl(bytes_ext);
+	int ret = 0;
+
+	mutex_lock(&ctl->dsp->pwr_lock);
+
+	if (ctl->flags & WMFW_CTL_FLAG_VOLATILE) {
+		if (ctl->enabled)
+			ret = wm_coeff_read_control(ctl, ctl->cache, size);
+		else
+			ret = -EPERM;
+	} else {
+		if (!ctl->flags && ctl->enabled)
+			ret = wm_coeff_read_control(ctl, ctl->cache, size);
+	}
+
+	if (!ret && copy_to_user(bytes, ctl->cache, size))
+		ret = -EFAULT;
+
+	mutex_unlock(&ctl->dsp->pwr_lock);
+
+	return ret;
+}
+
 struct wmfw_ctl_work {
 	struct wm_adsp *dsp;
 	struct wm_coeff_ctl *ctl;
 	struct work_struct work;
 };
 
+static unsigned int wmfw_convert_flags(unsigned int in, unsigned int len)
+{
+	unsigned int out, rd, wr, vol;
+
+	if (len > ADSP_MAX_STD_CTRL_SIZE) {
+		rd = SNDRV_CTL_ELEM_ACCESS_TLV_READ;
+		wr = SNDRV_CTL_ELEM_ACCESS_TLV_WRITE;
+		vol = SNDRV_CTL_ELEM_ACCESS_VOLATILE;
+
+		out = SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
+	} else {
+		rd = SNDRV_CTL_ELEM_ACCESS_READ;
+		wr = SNDRV_CTL_ELEM_ACCESS_WRITE;
+		vol = SNDRV_CTL_ELEM_ACCESS_VOLATILE;
+
+		out = 0;
+	}
+
+	if (in) {
+		if (in & WMFW_CTL_FLAG_READABLE)
+			out |= rd;
+		if (in & WMFW_CTL_FLAG_WRITEABLE)
+			out |= wr;
+		if (in & WMFW_CTL_FLAG_VOLATILE)
+			out |= vol;
+	} else {
+		out |= rd | wr | vol;
+	}
+
+	return out;
+}
+
 static int wmfw_add_ctl(struct wm_adsp *dsp, struct wm_coeff_ctl *ctl)
 {
 	struct snd_kcontrol_new *kcontrol;
@@ -868,19 +968,15 @@
 	kcontrol->info = wm_coeff_info;
 	kcontrol->get = wm_coeff_get;
 	kcontrol->put = wm_coeff_put;
-	kcontrol->private_value = (unsigned long)ctl;
+	kcontrol->iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+	kcontrol->tlv.c = snd_soc_bytes_tlv_callback;
+	kcontrol->private_value = (unsigned long)&ctl->bytes_ext;
 
-	if (ctl->flags) {
-		if (ctl->flags & WMFW_CTL_FLAG_WRITEABLE)
-			kcontrol->access |= SNDRV_CTL_ELEM_ACCESS_WRITE;
-		if (ctl->flags & WMFW_CTL_FLAG_READABLE)
-			kcontrol->access |= SNDRV_CTL_ELEM_ACCESS_READ;
-		if (ctl->flags & WMFW_CTL_FLAG_VOLATILE)
-			kcontrol->access |= SNDRV_CTL_ELEM_ACCESS_VOLATILE;
-	} else {
-		kcontrol->access = SNDRV_CTL_ELEM_ACCESS_READWRITE;
-		kcontrol->access |= SNDRV_CTL_ELEM_ACCESS_VOLATILE;
-	}
+	ctl->bytes_ext.max = ctl->len;
+	ctl->bytes_ext.get = wm_coeff_tlv_get;
+	ctl->bytes_ext.put = wm_coeff_tlv_put;
+
+	kcontrol->access = wmfw_convert_flags(ctl->flags, ctl->len);
 
 	ret = snd_soc_add_card_controls(dsp->card, kcontrol, 1);
 	if (ret < 0)
@@ -944,6 +1040,13 @@
 	kfree(ctl_work);
 }
 
+static void wm_adsp_free_ctl_blk(struct wm_coeff_ctl *ctl)
+{
+	kfree(ctl->cache);
+	kfree(ctl->name);
+	kfree(ctl);
+}
+
 static int wm_adsp_create_control(struct wm_adsp *dsp,
 				  const struct wm_adsp_alg_region *alg_region,
 				  unsigned int offset, unsigned int len,
@@ -1032,11 +1135,6 @@
 
 	ctl->flags = flags;
 	ctl->offset = offset;
-	if (len > 512) {
-		adsp_warn(dsp, "Truncating control %s from %d\n",
-			  ctl->name, len);
-		len = 512;
-	}
 	ctl->len = len;
 	ctl->cache = kzalloc(ctl->len, GFP_KERNEL);
 	if (!ctl->cache) {
@@ -1564,6 +1662,19 @@
 	return alg_region;
 }
 
+static void wm_adsp_free_alg_regions(struct wm_adsp *dsp)
+{
+	struct wm_adsp_alg_region *alg_region;
+
+	while (!list_empty(&dsp->alg_regions)) {
+		alg_region = list_first_entry(&dsp->alg_regions,
+					      struct wm_adsp_alg_region,
+					      list);
+		list_del(&alg_region->list);
+		kfree(alg_region);
+	}
+}
+
 static int wm_adsp1_setup_algs(struct wm_adsp *dsp)
 {
 	struct wmfw_adsp1_id_hdr adsp1_id;
@@ -1994,7 +2105,6 @@
 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 	struct wm_adsp *dsps = snd_soc_codec_get_drvdata(codec);
 	struct wm_adsp *dsp = &dsps[w->shift];
-	struct wm_adsp_alg_region *alg_region;
 	struct wm_coeff_ctl *ctl;
 	int ret;
 	unsigned int val;
@@ -2074,13 +2184,8 @@
 		list_for_each_entry(ctl, &dsp->ctl_list, list)
 			ctl->enabled = 0;
 
-		while (!list_empty(&dsp->alg_regions)) {
-			alg_region = list_first_entry(&dsp->alg_regions,
-						      struct wm_adsp_alg_region,
-						      list);
-			list_del(&alg_region->list);
-			kfree(alg_region);
-		}
+
+		wm_adsp_free_alg_regions(dsp);
 		break;
 
 	default:
@@ -2222,7 +2327,6 @@
 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 	struct wm_adsp *dsps = snd_soc_codec_get_drvdata(codec);
 	struct wm_adsp *dsp = &dsps[w->shift];
-	struct wm_adsp_alg_region *alg_region;
 	struct wm_coeff_ctl *ctl;
 	int ret;
 
@@ -2240,9 +2344,13 @@
 		if (ret != 0)
 			goto err;
 
+		mutex_lock(&dsp->pwr_lock);
+
 		if (wm_adsp_fw[dsp->fw].num_caps != 0)
 			ret = wm_adsp_buffer_init(dsp);
 
+		mutex_unlock(&dsp->pwr_lock);
+
 		break;
 
 	case SND_SOC_DAPM_PRE_PMD:
@@ -2269,13 +2377,7 @@
 		list_for_each_entry(ctl, &dsp->ctl_list, list)
 			ctl->enabled = 0;
 
-		while (!list_empty(&dsp->alg_regions)) {
-			alg_region = list_first_entry(&dsp->alg_regions,
-						      struct wm_adsp_alg_region,
-						      list);
-			list_del(&alg_region->list);
-			kfree(alg_region);
-		}
+		wm_adsp_free_alg_regions(dsp);
 
 		if (wm_adsp_fw[dsp->fw].num_caps != 0)
 			wm_adsp_buffer_free(dsp);
@@ -2340,6 +2442,54 @@
 }
 EXPORT_SYMBOL_GPL(wm_adsp2_init);
 
+void wm_adsp2_remove(struct wm_adsp *dsp)
+{
+	struct wm_coeff_ctl *ctl;
+
+	while (!list_empty(&dsp->ctl_list)) {
+		ctl = list_first_entry(&dsp->ctl_list, struct wm_coeff_ctl,
+					list);
+		list_del(&ctl->list);
+		wm_adsp_free_ctl_blk(ctl);
+	}
+}
+EXPORT_SYMBOL_GPL(wm_adsp2_remove);
+
+static inline int wm_adsp_compr_attached(struct wm_adsp_compr *compr)
+{
+	return compr->buf != NULL;
+}
+
+static int wm_adsp_compr_attach(struct wm_adsp_compr *compr)
+{
+	/*
+	 * Note this will be more complex once each DSP can support multiple
+	 * streams
+	 */
+	if (!compr->dsp->buffer)
+		return -EINVAL;
+
+	compr->buf = compr->dsp->buffer;
+	compr->buf->compr = compr;
+
+	return 0;
+}
+
+static void wm_adsp_compr_detach(struct wm_adsp_compr *compr)
+{
+	if (!compr)
+		return;
+
+	/* Wake the poll so it can see buffer is no longer attached */
+	if (compr->stream)
+		snd_compr_fragment_elapsed(compr->stream);
+
+	if (wm_adsp_compr_attached(compr)) {
+		compr->buf->compr = NULL;
+		compr->buf = NULL;
+	}
+}
+
 int wm_adsp_compr_open(struct wm_adsp *dsp, struct snd_compr_stream *stream)
 {
 	struct wm_adsp_compr *compr;
@@ -2393,6 +2543,7 @@
 
 	mutex_lock(&dsp->pwr_lock);
 
+	wm_adsp_compr_detach(compr);
 	dsp->compr = NULL;
 
 	kfree(compr->raw_buf);
@@ -2689,6 +2840,8 @@
 static int wm_adsp_buffer_free(struct wm_adsp *dsp)
 {
 	if (dsp->buffer) {
+		wm_adsp_compr_detach(dsp->buffer->compr);
+
 		kfree(dsp->buffer->regions);
 		kfree(dsp->buffer);
 
@@ -2698,25 +2851,6 @@
 	return 0;
 }
 
-static inline int wm_adsp_compr_attached(struct wm_adsp_compr *compr)
-{
-	return compr->buf != NULL;
-}
-
-static int wm_adsp_compr_attach(struct wm_adsp_compr *compr)
-{
-	/*
-	 * Note this will be more complex once each DSP can support multiple
-	 * streams
-	 */
-	if (!compr->dsp->buffer)
-		return -EINVAL;
-
-	compr->buf = compr->dsp->buffer;
-
-	return 0;
-}
-
 int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd)
 {
 	struct wm_adsp_compr *compr = stream->runtime->private_data;
@@ -2805,21 +2939,41 @@
 		avail += wm_adsp_buffer_size(buf);
 
 	adsp_dbg(buf->dsp, "readindex=0x%x, writeindex=0x%x, avail=%d\n",
-		 buf->read_index, write_index, avail);
+		 buf->read_index, write_index, avail * WM_ADSP_DATA_WORD_SIZE);
 
 	buf->avail = avail;
 
 	return 0;
 }
 
+static int wm_adsp_buffer_get_error(struct wm_adsp_compr_buf *buf)
+{
+	int ret;
+
+	ret = wm_adsp_buffer_read(buf, HOST_BUFFER_FIELD(error), &buf->error);
+	if (ret < 0) {
+		adsp_err(buf->dsp, "Failed to check buffer error: %d\n", ret);
+		return ret;
+	}
+	if (buf->error != 0) {
+		adsp_err(buf->dsp, "Buffer error occurred: %d\n", buf->error);
+		return -EIO;
+	}
+
+	return 0;
+}
+
 int wm_adsp_compr_handle_irq(struct wm_adsp *dsp)
 {
-	struct wm_adsp_compr_buf *buf = dsp->buffer;
-	struct wm_adsp_compr *compr = dsp->compr;
+	struct wm_adsp_compr_buf *buf;
+	struct wm_adsp_compr *compr;
 	int ret = 0;
 
 	mutex_lock(&dsp->pwr_lock);
 
+	buf = dsp->buffer;
+	compr = dsp->compr;
+
 	if (!buf) {
 		ret = -ENODEV;
 		goto out;
@@ -2827,16 +2981,9 @@
 
 	adsp_dbg(dsp, "Handling buffer IRQ\n");
 
-	ret = wm_adsp_buffer_read(buf, HOST_BUFFER_FIELD(error), &buf->error);
-	if (ret < 0) {
-		adsp_err(dsp, "Failed to check buffer error: %d\n", ret);
-		goto out;
-	}
-	if (buf->error != 0) {
-		adsp_err(dsp, "Buffer error occurred: %d\n", buf->error);
-		ret = -EIO;
-		goto out;
-	}
+	ret = wm_adsp_buffer_get_error(buf);
+	if (ret < 0)
+		goto out_notify; /* Wake poll to report error */
 
 	ret = wm_adsp_buffer_read(buf, HOST_BUFFER_FIELD(irq_count),
 				  &buf->irq_count);
@@ -2851,6 +2998,7 @@
 		goto out;
 	}
 
+out_notify:
 	if (compr && compr->stream)
 		snd_compr_fragment_elapsed(compr->stream);
 
@@ -2879,14 +3027,16 @@
 			  struct snd_compr_tstamp *tstamp)
 {
 	struct wm_adsp_compr *compr = stream->runtime->private_data;
-	struct wm_adsp_compr_buf *buf = compr->buf;
 	struct wm_adsp *dsp = compr->dsp;
+	struct wm_adsp_compr_buf *buf;
 	int ret = 0;
 
 	adsp_dbg(dsp, "Pointer request\n");
 
 	mutex_lock(&dsp->pwr_lock);
 
+	buf = compr->buf;
+
 	if (!compr->buf) {
 		ret = -ENXIO;
 		goto out;
@@ -2909,6 +3059,10 @@
 		 * DSP to inform us once a whole fragment is available.
 		 */
 		if (buf->avail < wm_adsp_compr_frag_words(compr)) {
+			ret = wm_adsp_buffer_get_error(buf);
+			if (ret < 0)
+				goto out;
+
 			ret = wm_adsp_buffer_reenable_irq(buf);
 			if (ret < 0) {
 				adsp_err(dsp,
diff --git a/sound/soc/codecs/wm_adsp.h b/sound/soc/codecs/wm_adsp.h
index b61cb57..feb61e2 100644
--- a/sound/soc/codecs/wm_adsp.h
+++ b/sound/soc/codecs/wm_adsp.h
@@ -92,6 +92,7 @@
 
 int wm_adsp1_init(struct wm_adsp *dsp);
 int wm_adsp2_init(struct wm_adsp *dsp);
+void wm_adsp2_remove(struct wm_adsp *dsp);
 int wm_adsp2_codec_probe(struct wm_adsp *dsp, struct snd_soc_codec *codec);
 int wm_adsp2_codec_remove(struct wm_adsp *dsp, struct snd_soc_codec *codec);
 int wm_adsp1_event(struct snd_soc_dapm_widget *w,
diff --git a/sound/soc/davinci/Kconfig b/sound/soc/davinci/Kconfig
index 50ca291..6b732d8 100644
--- a/sound/soc/davinci/Kconfig
+++ b/sound/soc/davinci/Kconfig
@@ -16,7 +16,11 @@
 	  - DRA7xx family
 
 config SND_DAVINCI_SOC_I2S
-	tristate
+	tristate "DaVinci Multichannel Buffered Serial Port (McBSP) support"
+	depends on SND_EDMA_SOC
+	help
+	  Say Y or M here if you want to have support for McBSP IP found in
+	  Texas Instruments DaVinci DA850 SoCs.
 
 config SND_DAVINCI_SOC_MCASP
 	tristate "Multichannel Audio Serial Port (McASP) support"
diff --git a/sound/soc/davinci/davinci-i2s.c b/sound/soc/davinci/davinci-i2s.c
index ec98548..3849616 100644
--- a/sound/soc/davinci/davinci-i2s.c
+++ b/sound/soc/davinci/davinci-i2s.c
@@ -4,9 +4,15 @@
  * Author:      Vladimir Barinov, <vbarinov@embeddedalley.com>
  * Copyright:   (C) 2007 MontaVista Software, Inc., <source@mvista.com>
  *
+ * DT support	(c) 2016 Petr Kulhavy, Barix AG <petr@barix.com>
+ *		based on davinci-mcasp.c DT support
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
+ *
+ * TODO:
+ * on DA850 implement HW FIFOs instead of DMA into DXR and DRR registers
  */
 
 #include <linux/init.h>
@@ -650,13 +656,24 @@
 
 static int davinci_i2s_probe(struct platform_device *pdev)
 {
+	struct snd_dmaengine_dai_dma_data *dma_data;
 	struct davinci_mcbsp_dev *dev;
 	struct resource *mem, *res;
 	void __iomem *io_base;
 	int *dma;
 	int ret;
 
-	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpu");
+	if (!mem) {
+		dev_warn(&pdev->dev,
+			 "\"mpu\" mem resource not found, using index 0\n");
+		mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (!mem) {
+			dev_err(&pdev->dev, "no mem resource?\n");
+			return -ENODEV;
+		}
+	}
+
 	io_base = devm_ioremap_resource(&pdev->dev, mem);
 	if (IS_ERR(io_base))
 		return PTR_ERR(io_base);
@@ -666,40 +683,44 @@
 	if (!dev)
 		return -ENOMEM;
 
+	dev->base = io_base;
+
+	/* setup DMA, first TX, then RX */
+	dma_data = &dev->dma_data[SNDRV_PCM_STREAM_PLAYBACK];
+	dma_data->addr = (dma_addr_t)(mem->start + DAVINCI_MCBSP_DXR_REG);
+
+	res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+	if (res) {
+		dma = &dev->dma_request[SNDRV_PCM_STREAM_PLAYBACK];
+		*dma = res->start;
+		dma_data->filter_data = dma;
+	} else if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
+		dma_data->filter_data = "tx";
+	} else {
+		dev_err(&pdev->dev, "Missing DMA tx resource\n");
+		return -ENODEV;
+	}
+
+	dma_data = &dev->dma_data[SNDRV_PCM_STREAM_CAPTURE];
+	dma_data->addr = (dma_addr_t)(mem->start + DAVINCI_MCBSP_DRR_REG);
+
+	res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+	if (res) {
+		dma = &dev->dma_request[SNDRV_PCM_STREAM_CAPTURE];
+		*dma = res->start;
+		dma_data->filter_data = dma;
+	} else if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
+		dma_data->filter_data = "rx";
+	} else {
+		dev_err(&pdev->dev, "Missing DMA rx resource\n");
+		return -ENODEV;
+	}
+
 	dev->clk = clk_get(&pdev->dev, NULL);
 	if (IS_ERR(dev->clk))
 		return -ENODEV;
 	clk_enable(dev->clk);
 
-	dev->base = io_base;
-
-	dev->dma_data[SNDRV_PCM_STREAM_PLAYBACK].addr =
-	    (dma_addr_t)(mem->start + DAVINCI_MCBSP_DXR_REG);
-
-	dev->dma_data[SNDRV_PCM_STREAM_CAPTURE].addr =
-	    (dma_addr_t)(mem->start + DAVINCI_MCBSP_DRR_REG);
-
-	/* first TX, then RX */
-	res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-	if (!res) {
-		dev_err(&pdev->dev, "no DMA resource\n");
-		ret = -ENXIO;
-		goto err_release_clk;
-	}
-	dma = &dev->dma_request[SNDRV_PCM_STREAM_PLAYBACK];
-	*dma = res->start;
-	dev->dma_data[SNDRV_PCM_STREAM_PLAYBACK].filter_data = dma;
-
-	res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
-	if (!res) {
-		dev_err(&pdev->dev, "no DMA resource\n");
-		ret = -ENXIO;
-		goto err_release_clk;
-	}
-	dma = &dev->dma_request[SNDRV_PCM_STREAM_CAPTURE];
-	*dma = res->start;
-	dev->dma_data[SNDRV_PCM_STREAM_CAPTURE].filter_data = dma;
-
 	dev->dev = &pdev->dev;
 	dev_set_drvdata(&pdev->dev, dev);
 
@@ -737,11 +758,18 @@
 	return 0;
 }
 
+static const struct of_device_id davinci_i2s_match[] = {
+	{ .compatible = "ti,da850-mcbsp" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, davinci_i2s_match);
+
 static struct platform_driver davinci_mcbsp_driver = {
 	.probe		= davinci_i2s_probe,
 	.remove		= davinci_i2s_remove,
 	.driver		= {
 		.name	= "davinci-mcbsp",
+		.of_match_table = of_match_ptr(davinci_i2s_match),
 	},
 };
 
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index e132498..0f66fda 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -489,7 +489,7 @@
 		mcasp_clr_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
 
 		mcasp_clr_bits(mcasp, DAVINCI_MCASP_PDIR_REG,
-			       ACLKX | AHCLKX | AFSX | ACLKR | AHCLKR | AFSR);
+			       ACLKX | AFSX | ACLKR | AHCLKR | AFSR);
 		mcasp->bclk_master = 0;
 		break;
 	default:
@@ -540,21 +540,19 @@
 	return ret;
 }
 
-static int __davinci_mcasp_set_clkdiv(struct snd_soc_dai *dai, int div_id,
+static int __davinci_mcasp_set_clkdiv(struct davinci_mcasp *mcasp, int div_id,
 				      int div, bool explicit)
 {
-	struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
-
 	pm_runtime_get_sync(mcasp->dev);
 	switch (div_id) {
-	case 0:		/* MCLK divider */
+	case MCASP_CLKDIV_AUXCLK:			/* MCLK divider */
 		mcasp_mod_bits(mcasp, DAVINCI_MCASP_AHCLKXCTL_REG,
 			       AHCLKXDIV(div - 1), AHCLKXDIV_MASK);
 		mcasp_mod_bits(mcasp, DAVINCI_MCASP_AHCLKRCTL_REG,
 			       AHCLKRDIV(div - 1), AHCLKRDIV_MASK);
 		break;
 
-	case 1:		/* BCLK divider */
+	case MCASP_CLKDIV_BCLK:			/* BCLK divider */
 		mcasp_mod_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG,
 			       ACLKXDIV(div - 1), ACLKXDIV_MASK);
 		mcasp_mod_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG,
@@ -563,7 +561,8 @@
 			mcasp->bclk_div = div;
 		break;
 
-	case 2:	/*
+	case MCASP_CLKDIV_BCLK_FS_RATIO:
+		/*
 		 * BCLK/LRCLK ratio descries how many bit-clock cycles
 		 * fit into one frame. The clock ratio is given for a
 		 * full period of data (for I2S format both left and
@@ -591,7 +590,9 @@
 static int davinci_mcasp_set_clkdiv(struct snd_soc_dai *dai, int div_id,
 				    int div)
 {
-	return __davinci_mcasp_set_clkdiv(dai, div_id, div, 1);
+	struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
+
+	return __davinci_mcasp_set_clkdiv(mcasp, div_id, div, 1);
 }
 
 static int davinci_mcasp_set_sysclk(struct snd_soc_dai *dai, int clk_id,
@@ -999,27 +1000,53 @@
 }
 
 static int davinci_mcasp_calc_clk_div(struct davinci_mcasp *mcasp,
-				      unsigned int bclk_freq,
-				      int *error_ppm)
+				      unsigned int bclk_freq, bool set)
 {
-	int div = mcasp->sysclk_freq / bclk_freq;
-	int rem = mcasp->sysclk_freq % bclk_freq;
+	int error_ppm;
+	unsigned int sysclk_freq = mcasp->sysclk_freq;
+	u32 reg = mcasp_get_reg(mcasp, DAVINCI_MCASP_AHCLKXCTL_REG);
+	int div = sysclk_freq / bclk_freq;
+	int rem = sysclk_freq % bclk_freq;
+	int aux_div = 1;
+
+	if (div > (ACLKXDIV_MASK + 1)) {
+		if (reg & AHCLKXE) {
+			aux_div = div / (ACLKXDIV_MASK + 1);
+			if (div % (ACLKXDIV_MASK + 1))
+				aux_div++;
+
+			sysclk_freq /= aux_div;
+			div = sysclk_freq / bclk_freq;
+			rem = sysclk_freq % bclk_freq;
+		} else if (set) {
+			dev_warn(mcasp->dev, "Too fast reference clock (%u)\n",
+				 sysclk_freq);
+		}
+	}
 
 	if (rem != 0) {
 		if (div == 0 ||
-		    ((mcasp->sysclk_freq / div) - bclk_freq) >
-		    (bclk_freq - (mcasp->sysclk_freq / (div+1)))) {
+		    ((sysclk_freq / div) - bclk_freq) >
+		    (bclk_freq - (sysclk_freq / (div+1)))) {
 			div++;
 			rem = rem - bclk_freq;
 		}
 	}
-	if (error_ppm)
-		*error_ppm =
-			(div*1000000 + (int)div64_long(1000000LL*rem,
-						       (int)bclk_freq))
-			/div - 1000000;
+	error_ppm = (div*1000000 + (int)div64_long(1000000LL*rem,
+		     (int)bclk_freq)) / div - 1000000;
 
-	return div;
+	if (set) {
+		if (error_ppm)
+			dev_info(mcasp->dev, "Sample-rate is off by %d PPM\n",
+				 error_ppm);
+
+		__davinci_mcasp_set_clkdiv(mcasp, MCASP_CLKDIV_BCLK, div, 0);
+		if (reg & AHCLKXE)
+			__davinci_mcasp_set_clkdiv(mcasp, MCASP_CLKDIV_AUXCLK,
+						   aux_div, 0);
+	}
+
+	return error_ppm;
 }
 
 static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
@@ -1044,18 +1071,11 @@
 		int slots = mcasp->tdm_slots;
 		int rate = params_rate(params);
 		int sbits = params_width(params);
-		int ppm, div;
 
 		if (mcasp->slot_width)
 			sbits = mcasp->slot_width;
 
-		div = davinci_mcasp_calc_clk_div(mcasp, rate*sbits*slots,
-						 &ppm);
-		if (ppm)
-			dev_info(mcasp->dev, "Sample-rate is off by %d PPM\n",
-				 ppm);
-
-		__davinci_mcasp_set_clkdiv(cpu_dai, 1, div, 0);
+		davinci_mcasp_calc_clk_div(mcasp, rate * sbits * slots, true);
 	}
 
 	ret = mcasp_common_hw_param(mcasp, substream->stream,
@@ -1166,7 +1186,8 @@
 				davinci_mcasp_dai_rates[i];
 			int ppm;
 
-			davinci_mcasp_calc_clk_div(rd->mcasp, bclk_freq, &ppm);
+			ppm = davinci_mcasp_calc_clk_div(rd->mcasp, bclk_freq,
+							 false);
 			if (abs(ppm) < DAVINCI_MAX_RATE_ERROR_PPM) {
 				if (range.empty) {
 					range.min = davinci_mcasp_dai_rates[i];
@@ -1205,8 +1226,9 @@
 			if (rd->mcasp->slot_width)
 				sbits = rd->mcasp->slot_width;
 
-			davinci_mcasp_calc_clk_div(rd->mcasp, sbits*slots*rate,
-						   &ppm);
+			ppm = davinci_mcasp_calc_clk_div(rd->mcasp,
+							 sbits * slots * rate,
+							 false);
 			if (abs(ppm) < DAVINCI_MAX_RATE_ERROR_PPM) {
 				snd_mask_set(&nfmt, i);
 				count++;
@@ -1230,11 +1252,15 @@
 	int i, dir;
 	int tdm_slots = mcasp->tdm_slots;
 
-	if (mcasp->tdm_mask[substream->stream])
-		tdm_slots = hweight32(mcasp->tdm_mask[substream->stream]);
+	/* Do not allow more then one stream per direction */
+	if (mcasp->substreams[substream->stream])
+		return -EBUSY;
 
 	mcasp->substreams[substream->stream] = substream;
 
+	if (mcasp->tdm_mask[substream->stream])
+		tdm_slots = hweight32(mcasp->tdm_mask[substream->stream]);
+
 	if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE)
 		return 0;
 
diff --git a/sound/soc/davinci/davinci-mcasp.h b/sound/soc/davinci/davinci-mcasp.h
index a3be108..1e8787f 100644
--- a/sound/soc/davinci/davinci-mcasp.h
+++ b/sound/soc/davinci/davinci-mcasp.h
@@ -306,4 +306,9 @@
 #define NUMEVT(x)	(((x) & 0xFF) << 8)
 #define NUMDMA_MASK	(0xFF)
 
+/* clock divider IDs */
+#define MCASP_CLKDIV_AUXCLK		0 /* HCLK divider from AUXCLK */
+#define MCASP_CLKDIV_BCLK		1 /* BCLK divider from HCLK */
+#define MCASP_CLKDIV_BCLK_FS_RATIO	2 /* to set BCLK FS ration */
+
 #endif	/* DAVINCI_MCASP_H */
diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
index bff258d..0db69b7 100644
--- a/sound/soc/dwc/designware_i2s.c
+++ b/sound/soc/dwc/designware_i2s.c
@@ -100,6 +100,7 @@
 	struct device *dev;
 	u32 ccr;
 	u32 xfer_resolution;
+	u32 fifo_th;
 
 	/* data related to DMA transfers b/w i2s and DMAC */
 	union dw_i2s_snd_dma_data play_dma_data;
@@ -147,17 +148,18 @@
 static void i2s_start(struct dw_i2s_dev *dev,
 		      struct snd_pcm_substream *substream)
 {
+	struct i2s_clk_config_data *config = &dev->config;
 	u32 i, irq;
 	i2s_write_reg(dev->i2s_base, IER, 1);
 
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-		for (i = 0; i < 4; i++) {
+		for (i = 0; i < (config->chan_nr / 2); i++) {
 			irq = i2s_read_reg(dev->i2s_base, IMR(i));
 			i2s_write_reg(dev->i2s_base, IMR(i), irq & ~0x30);
 		}
 		i2s_write_reg(dev->i2s_base, ITER, 1);
 	} else {
-		for (i = 0; i < 4; i++) {
+		for (i = 0; i < (config->chan_nr / 2); i++) {
 			irq = i2s_read_reg(dev->i2s_base, IMR(i));
 			i2s_write_reg(dev->i2s_base, IMR(i), irq & ~0x03);
 		}
@@ -231,14 +233,16 @@
 		if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
 			i2s_write_reg(dev->i2s_base, TCR(ch_reg),
 				      dev->xfer_resolution);
-			i2s_write_reg(dev->i2s_base, TFCR(ch_reg), 0x02);
+			i2s_write_reg(dev->i2s_base, TFCR(ch_reg),
+				      dev->fifo_th - 1);
 			irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
 			i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x30);
 			i2s_write_reg(dev->i2s_base, TER(ch_reg), 1);
 		} else {
 			i2s_write_reg(dev->i2s_base, RCR(ch_reg),
 				      dev->xfer_resolution);
-			i2s_write_reg(dev->i2s_base, RFCR(ch_reg), 0x07);
+			i2s_write_reg(dev->i2s_base, RFCR(ch_reg),
+				      dev->fifo_th - 1);
 			irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
 			i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x03);
 			i2s_write_reg(dev->i2s_base, RER(ch_reg), 1);
@@ -498,6 +502,7 @@
 	 */
 	u32 comp1 = i2s_read_reg(dev->i2s_base, dev->i2s_reg_comp1);
 	u32 comp2 = i2s_read_reg(dev->i2s_base, dev->i2s_reg_comp2);
+	u32 fifo_depth = 1 << (1 + COMP1_FIFO_DEPTH_GLOBAL(comp1));
 	u32 idx;
 
 	if (dev->capability & DWC_I2S_RECORD &&
@@ -536,6 +541,7 @@
 		dev->capability |= DW_I2S_SLAVE;
 	}
 
+	dev->fifo_th = fifo_depth / 2;
 	return 0;
 }
 
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index 0754df7..2147994 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -21,6 +21,8 @@
 #include <sound/core.h>
 #include <sound/dmaengine_pcm.h>
 #include <sound/pcm_params.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
 
 #include "fsl_sai.h"
 #include "imx-pcm.h"
@@ -786,10 +788,12 @@
 {
 	struct device_node *np = pdev->dev.of_node;
 	struct fsl_sai *sai;
+	struct regmap *gpr;
 	struct resource *res;
 	void __iomem *base;
 	char tmp[8];
 	int irq, ret, i;
+	int index;
 
 	sai = devm_kzalloc(&pdev->dev, sizeof(*sai), GFP_KERNEL);
 	if (!sai)
@@ -797,7 +801,8 @@
 
 	sai->pdev = pdev;
 
-	if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx6sx-sai"))
+	if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx6sx-sai") ||
+	    of_device_is_compatible(pdev->dev.of_node, "fsl,imx6ul-sai"))
 		sai->sai_on_imx = true;
 
 	sai->is_lsb_first = of_property_read_bool(np, "lsb-first");
@@ -877,6 +882,22 @@
 		fsl_sai_dai.symmetric_samplebits = 0;
 	}
 
+	if (of_find_property(np, "fsl,sai-mclk-direction-output", NULL) &&
+	    of_device_is_compatible(pdev->dev.of_node, "fsl,imx6ul-sai")) {
+		gpr = syscon_regmap_lookup_by_compatible("fsl,imx6ul-iomuxc-gpr");
+		if (IS_ERR(gpr)) {
+			dev_err(&pdev->dev, "cannot find iomuxc registers\n");
+			return PTR_ERR(gpr);
+		}
+
+		index = of_alias_get_id(np, "sai");
+		if (index < 0)
+			return index;
+
+		regmap_update_bits(gpr, IOMUXC_GPR1, MCLK_DIR(index),
+				   MCLK_DIR(index));
+	}
+
 	sai->dma_params_rx.addr = res->start + FSL_SAI_RDR;
 	sai->dma_params_tx.addr = res->start + FSL_SAI_TDR;
 	sai->dma_params_rx.maxburst = FSL_SAI_MAXBURST_RX;
@@ -898,6 +919,7 @@
 static const struct of_device_id fsl_sai_ids[] = {
 	{ .compatible = "fsl,vf610-sai", },
 	{ .compatible = "fsl,imx6sx-sai", },
+	{ .compatible = "fsl,imx6ul-sai", },
 	{ /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, fsl_sai_ids);
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index ed8de10..632ecc0 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -137,6 +137,7 @@
 	case CCSR_SSI_SACDAT:
 	case CCSR_SSI_SATAG:
 	case CCSR_SSI_SACCST:
+	case CCSR_SSI_SOR:
 		return true;
 	default:
 		return false;
@@ -261,6 +262,7 @@
 	struct fsl_ssi_dbg dbg_stats;
 
 	const struct fsl_ssi_soc_data *soc;
+	struct device *dev;
 };
 
 /*
@@ -400,6 +402,26 @@
 }
 
 /*
+ * Clear RX or TX FIFO to remove samples from the previous
+ * stream session which may be still present in the FIFO and
+ * may introduce bad samples and/or channel slipping.
+ *
+ * Note: The SOR is not documented in recent IMX datasheet, but
+ * is described in IMX51 reference manual at section 56.3.3.15.
+ */
+static void fsl_ssi_fifo_clear(struct fsl_ssi_private *ssi_private,
+		bool is_rx)
+{
+	if (is_rx) {
+		regmap_update_bits(ssi_private->regs, CCSR_SSI_SOR,
+			CCSR_SSI_SOR_RX_CLR, CCSR_SSI_SOR_RX_CLR);
+	} else {
+		regmap_update_bits(ssi_private->regs, CCSR_SSI_SOR,
+			CCSR_SSI_SOR_TX_CLR, CCSR_SSI_SOR_TX_CLR);
+	}
+}
+
+/*
  * Calculate the bits that have to be disabled for the current stream that is
  * getting disabled. This keeps the bits enabled that are necessary for the
  * second stream to work if 'stream_active' is true.
@@ -474,9 +496,11 @@
 	 * (online configuration)
 	 */
 	if (enable) {
-		regmap_update_bits(regs, CCSR_SSI_SIER, vals->sier, vals->sier);
+		fsl_ssi_fifo_clear(ssi_private, vals->scr & CCSR_SSI_SCR_RE);
+
 		regmap_update_bits(regs, CCSR_SSI_SRCR, vals->srcr, vals->srcr);
 		regmap_update_bits(regs, CCSR_SSI_STCR, vals->stcr, vals->stcr);
+		regmap_update_bits(regs, CCSR_SSI_SIER, vals->sier, vals->sier);
 	} else {
 		u32 sier;
 		u32 srcr;
@@ -506,8 +530,40 @@
 
 config_done:
 	/* Enabling of subunits is done after configuration */
-	if (enable)
+	if (enable) {
+		if (ssi_private->use_dma && (vals->scr & CCSR_SSI_SCR_TE)) {
+			/*
+			 * Be sure the Tx FIFO is filled when TE is set.
+			 * Otherwise, there are some chances to start the
+			 * playback with some void samples inserted first,
+			 * generating a channel slip.
+			 *
+			 * First, SSIEN must be set, to let the FIFO be filled.
+			 *
+			 * Notes:
+			 * - Limit this fix to the DMA case until FIQ cases can
+			 *   be tested.
+			 * - Limit the length of the busy loop to not lock the
+			 *   system too long, even if 1-2 loops are sufficient
+			 *   in general.
+			 */
+			int i;
+			int max_loop = 100;
+			regmap_update_bits(regs, CCSR_SSI_SCR,
+					CCSR_SSI_SCR_SSIEN, CCSR_SSI_SCR_SSIEN);
+			for (i = 0; i < max_loop; i++) {
+				u32 sfcsr;
+				regmap_read(regs, CCSR_SSI_SFCSR, &sfcsr);
+				if (CCSR_SSI_SFCSR_TFCNT0(sfcsr))
+					break;
+			}
+			if (i == max_loop) {
+				dev_err(ssi_private->dev,
+					"Timeout waiting TX FIFO filling\n");
+			}
+		}
 		regmap_update_bits(regs, CCSR_SSI_SCR, vals->scr, vals->scr);
+	}
 }
 
 
@@ -670,6 +726,15 @@
 	if (IS_ERR(ssi_private->baudclk))
 		return -EINVAL;
 
+	/*
+	 * Hardware limitation: The bclk rate must be
+	 * never greater than 1/5 IPG clock rate
+	 */
+	if (freq * 5 > clk_get_rate(ssi_private->clk)) {
+		dev_err(cpu_dai->dev, "bitclk > ipgclk/5\n");
+		return -EINVAL;
+	}
+
 	baudclk_is_used = ssi_private->baudclk_streams & ~(BIT(substream->stream));
 
 	/* It should be already enough to divide clock by setting pm alone */
@@ -686,13 +751,6 @@
 		else
 			clkrate = clk_round_rate(ssi_private->baudclk, tmprate);
 
-		/*
-		 * Hardware limitation: The bclk rate must be
-		 * never greater than 1/5 IPG clock rate
-		 */
-		if (clkrate * 5 > clk_get_rate(ssi_private->clk))
-			continue;
-
 		clkrate /= factor;
 		afreq = clkrate / (i + 1);
 
@@ -1158,14 +1216,14 @@
 	.playback = {
 		.stream_name = "CPU-Playback",
 		.channels_min = 1,
-		.channels_max = 2,
+		.channels_max = 32,
 		.rates = FSLSSI_I2S_RATES,
 		.formats = FSLSSI_I2S_FORMATS,
 	},
 	.capture = {
 		.stream_name = "CPU-Capture",
 		.channels_min = 1,
-		.channels_max = 2,
+		.channels_max = 32,
 		.rates = FSLSSI_I2S_RATES,
 		.formats = FSLSSI_I2S_FORMATS,
 	},
@@ -1402,6 +1460,7 @@
 	}
 
 	ssi_private->soc = of_id->data;
+	ssi_private->dev = &pdev->dev;
 
 	sprop = of_get_property(np, "fsl,mode", NULL);
 	if (sprop) {
diff --git a/sound/soc/fsl/imx-pcm-fiq.c b/sound/soc/fsl/imx-pcm-fiq.c
index e63cd5e..dac6688 100644
--- a/sound/soc/fsl/imx-pcm-fiq.c
+++ b/sound/soc/fsl/imx-pcm-fiq.c
@@ -220,7 +220,7 @@
 	ret = dma_mmap_wc(substream->pcm->card->dev, vma, runtime->dma_area,
 			  runtime->dma_addr, runtime->dma_bytes);
 
-	pr_debug("%s: ret: %d %p %pad 0x%08x\n", __func__, ret,
+	pr_debug("%s: ret: %d %p %pad 0x%08zx\n", __func__, ret,
 			runtime->dma_area,
 			&runtime->dma_addr,
 			runtime->dma_bytes);
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index b3e6c23..91c15ab 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -58,6 +58,21 @@
 	  Say Y if you have such a device
 	  If unsure select "N".
 
+config SND_SOC_INTEL_BXT_RT298_MACH
+	tristate "ASoC Audio driver for Broxton with RT298 I2S mode"
+	depends on X86 && ACPI && I2C
+	select SND_SOC_INTEL_SST
+	select SND_SOC_INTEL_SKYLAKE
+	select SND_SOC_RT298
+	select SND_SOC_DMIC
+	select SND_SOC_HDAC_HDMI
+	select SND_HDA_DSP_LOADER
+	help
+	   This adds support for ASoC machine driver for Broxton platforms
+	   with RT286 I2S audio codec.
+	   Say Y if you have such a device
+	   If unsure select "N".
+
 config SND_SOC_INTEL_BYT_RT5640_MACH
 	tristate "ASoC Audio driver for Intel Baytrail with RT5640 codec"
 	depends on X86_INTEL_LPSS && I2C
@@ -162,8 +177,8 @@
 config SND_SOC_INTEL_SKYLAKE
 	tristate
 	select SND_HDA_EXT_CORE
+	select SND_HDA_DSP_LOADER
 	select SND_SOC_TOPOLOGY
-	select SND_HDA_I915
 	select SND_SOC_INTEL_SST
 
 config SND_SOC_INTEL_SKL_RT286_MACH
diff --git a/sound/soc/intel/atom/sst-atom-controls.c b/sound/soc/intel/atom/sst-atom-controls.c
index b97e6ad..98720a9 100644
--- a/sound/soc/intel/atom/sst-atom-controls.c
+++ b/sound/soc/intel/atom/sst-atom-controls.c
@@ -195,7 +195,7 @@
 
 	if (e->w && e->w->power)
 		ret = sst_send_slot_map(drv);
-	else
+	else if (!e->w)
 		dev_err(&drv->pdev->dev, "Slot control: %s doesn't have DAPM widget!!!\n",
 				kcontrol->id.name);
 	return ret;
diff --git a/sound/soc/intel/boards/Makefile b/sound/soc/intel/boards/Makefile
index 3310c0f..a850677 100644
--- a/sound/soc/intel/boards/Makefile
+++ b/sound/soc/intel/boards/Makefile
@@ -2,6 +2,7 @@
 snd-soc-sst-byt-rt5640-mach-objs := byt-rt5640.o
 snd-soc-sst-byt-max98090-mach-objs := byt-max98090.o
 snd-soc-sst-broadwell-objs := broadwell.o
+snd-soc-sst-bxt-rt298-objs := bxt_rt298.o
 snd-soc-sst-bytcr-rt5640-objs := bytcr_rt5640.o
 snd-soc-sst-bytcr-rt5651-objs := bytcr_rt5651.o
 snd-soc-sst-cht-bsw-rt5672-objs := cht_bsw_rt5672.o
@@ -14,6 +15,7 @@
 obj-$(CONFIG_SND_SOC_INTEL_HASWELL_MACH) += snd-soc-sst-haswell.o
 obj-$(CONFIG_SND_SOC_INTEL_BYT_RT5640_MACH) += snd-soc-sst-byt-rt5640-mach.o
 obj-$(CONFIG_SND_SOC_INTEL_BYT_MAX98090_MACH) += snd-soc-sst-byt-max98090-mach.o
+obj-$(CONFIG_SND_SOC_INTEL_BXT_RT298_MACH) += snd-soc-sst-bxt-rt298.o
 obj-$(CONFIG_SND_SOC_INTEL_BROADWELL_MACH) += snd-soc-sst-broadwell.o
 obj-$(CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH) += snd-soc-sst-bytcr-rt5640.o
 obj-$(CONFIG_SND_SOC_INTEL_BYTCR_RT5651_MACH) += snd-soc-sst-bytcr-rt5651.o
diff --git a/sound/soc/intel/boards/broadwell.c b/sound/soc/intel/boards/broadwell.c
index 3f8a1e1..7486a00 100644
--- a/sound/soc/intel/boards/broadwell.c
+++ b/sound/soc/intel/boards/broadwell.c
@@ -201,7 +201,7 @@
 	{
 		/* SSP0 - Codec */
 		.name = "Codec",
-		.be_id = 0,
+		.id = 0,
 		.cpu_dai_name = "snd-soc-dummy-dai",
 		.platform_name = "snd-soc-dummy",
 		.no_pcm = 1,
diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
new file mode 100644
index 0000000..f478751
--- /dev/null
+++ b/sound/soc/intel/boards/bxt_rt298.c
@@ -0,0 +1,353 @@
+/*
+ * Intel Broxton-P I2S Machine Driver
+ *
+ * Copyright (C) 2014-2016, Intel Corporation. All rights reserved.
+ *
+ * Modified from:
+ *   Intel Skylake I2S Machine driver
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include <sound/pcm_params.h>
+#include "../../codecs/hdac_hdmi.h"
+#include "../../codecs/rt298.h"
+
+static struct snd_soc_jack broxton_headset;
+/* Headset jack detection DAPM pins */
+
+enum {
+	BXT_DPCM_AUDIO_PB = 0,
+	BXT_DPCM_AUDIO_CP,
+	BXT_DPCM_AUDIO_REF_CP,
+	BXT_DPCM_AUDIO_HDMI1_PB,
+	BXT_DPCM_AUDIO_HDMI2_PB,
+	BXT_DPCM_AUDIO_HDMI3_PB,
+};
+
+static struct snd_soc_jack_pin broxton_headset_pins[] = {
+	{
+		.pin = "Mic Jack",
+		.mask = SND_JACK_MICROPHONE,
+	},
+	{
+		.pin = "Headphone Jack",
+		.mask = SND_JACK_HEADPHONE,
+	},
+};
+
+static const struct snd_kcontrol_new broxton_controls[] = {
+	SOC_DAPM_PIN_SWITCH("Speaker"),
+	SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+	SOC_DAPM_PIN_SWITCH("Mic Jack"),
+};
+
+static const struct snd_soc_dapm_widget broxton_widgets[] = {
+	SND_SOC_DAPM_HP("Headphone Jack", NULL),
+	SND_SOC_DAPM_SPK("Speaker", NULL),
+	SND_SOC_DAPM_MIC("Mic Jack", NULL),
+	SND_SOC_DAPM_MIC("DMIC2", NULL),
+	SND_SOC_DAPM_MIC("SoC DMIC", NULL),
+	SND_SOC_DAPM_SPK("HDMI1", NULL),
+	SND_SOC_DAPM_SPK("HDMI2", NULL),
+	SND_SOC_DAPM_SPK("HDMI3", NULL),
+};
+
+static const struct snd_soc_dapm_route broxton_rt298_map[] = {
+	/* speaker */
+	{"Speaker", NULL, "SPOR"},
+	{"Speaker", NULL, "SPOL"},
+
+	/* HP jack connectors - unknown if we have jack detect */
+	{"Headphone Jack", NULL, "HPO Pin"},
+
+	/* other jacks */
+	{"MIC1", NULL, "Mic Jack"},
+
+	/* digital mics */
+	{"DMIC1 Pin", NULL, "DMIC2"},
+	{"DMic", NULL, "SoC DMIC"},
+
+	{"HDMI1", NULL, "hif5 Output"},
+	{"HDMI2", NULL, "hif6 Output"},
+	{"HDMI3", NULL, "hif7 Output"},
+
+	/* CODEC BE connections */
+	{ "AIF1 Playback", NULL, "ssp5 Tx"},
+	{ "ssp5 Tx", NULL, "codec0_out"},
+
+	{ "codec0_in", NULL, "ssp5 Rx" },
+	{ "ssp5 Rx", NULL, "AIF1 Capture" },
+
+	{ "dmic01_hifi", NULL, "DMIC01 Rx" },
+	{ "DMIC01 Rx", NULL, "Capture" },
+
+	{ "hifi3", NULL, "iDisp3 Tx"},
+	{ "iDisp3 Tx", NULL, "iDisp3_out"},
+	{ "hifi2", NULL, "iDisp2 Tx"},
+	{ "iDisp2 Tx", NULL, "iDisp2_out"},
+	{ "hifi1", NULL, "iDisp1 Tx"},
+	{ "iDisp1 Tx", NULL, "iDisp1_out"},
+
+};
+
+static int broxton_rt298_codec_init(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+	int ret = 0;
+
+	ret = snd_soc_card_jack_new(rtd->card, "Headset",
+		SND_JACK_HEADSET | SND_JACK_BTN_0,
+		&broxton_headset,
+		broxton_headset_pins, ARRAY_SIZE(broxton_headset_pins));
+
+	if (ret)
+		return ret;
+
+	rt298_mic_detect(codec, &broxton_headset);
+	return 0;
+}
+
+static int broxton_hdmi_init(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_dai *dai = rtd->codec_dai;
+
+	return hdac_hdmi_jack_init(dai, BXT_DPCM_AUDIO_HDMI1_PB + dai->id);
+}
+
+static int broxton_ssp5_fixup(struct snd_soc_pcm_runtime *rtd,
+			struct snd_pcm_hw_params *params)
+{
+	struct snd_interval *rate = hw_param_interval(params,
+					SNDRV_PCM_HW_PARAM_RATE);
+	struct snd_interval *channels = hw_param_interval(params,
+					SNDRV_PCM_HW_PARAM_CHANNELS);
+	struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+
+	/* The ADSP will covert the FE rate to 48k, stereo */
+	rate->min = rate->max = 48000;
+	channels->min = channels->max = 2;
+
+	/* set SSP5 to 24 bit */
+	snd_mask_none(fmt);
+	snd_mask_set(fmt, SNDRV_PCM_FORMAT_S24_LE);
+
+	return 0;
+}
+
+static int broxton_rt298_hw_params(struct snd_pcm_substream *substream,
+	struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	int ret;
+
+	ret = snd_soc_dai_set_sysclk(codec_dai, RT298_SCLK_S_PLL,
+					19200000, SND_SOC_CLOCK_IN);
+	if (ret < 0) {
+		dev_err(rtd->dev, "can't set codec sysclk configuration\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+static struct snd_soc_ops broxton_rt298_ops = {
+	.hw_params = broxton_rt298_hw_params,
+};
+
+/* broxton digital audio interface glue - connects codec <--> CPU */
+static struct snd_soc_dai_link broxton_rt298_dais[] = {
+	/* Front End DAI links */
+	[BXT_DPCM_AUDIO_PB]
+	{
+		.name = "Bxt Audio Port",
+		.stream_name = "Audio",
+		.cpu_dai_name = "System Pin",
+		.platform_name = "0000:00:0e.0",
+		.nonatomic = 1,
+		.dynamic = 1,
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+		.dpcm_playback = 1,
+	},
+	[BXT_DPCM_AUDIO_CP]
+	{
+		.name = "Bxt Audio Capture Port",
+		.stream_name = "Audio Record",
+		.cpu_dai_name = "System Pin",
+		.platform_name = "0000:00:0e.0",
+		.nonatomic = 1,
+		.dynamic = 1,
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+		.dpcm_capture = 1,
+	},
+	[BXT_DPCM_AUDIO_REF_CP]
+	{
+		.name = "Bxt Audio Reference cap",
+		.stream_name = "refcap",
+		.cpu_dai_name = "Reference Pin",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.platform_name = "0000:00:0e.0",
+		.init = NULL,
+		.dpcm_capture = 1,
+		.nonatomic = 1,
+		.dynamic = 1,
+	},
+	[BXT_DPCM_AUDIO_HDMI1_PB]
+	{
+		.name = "Bxt HDMI Port1",
+		.stream_name = "Hdmi1",
+		.cpu_dai_name = "HDMI1 Pin",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.platform_name = "0000:00:0e.0",
+		.dpcm_playback = 1,
+		.init = NULL,
+		.nonatomic = 1,
+		.dynamic = 1,
+	},
+	[BXT_DPCM_AUDIO_HDMI2_PB]
+	{
+		.name = "Bxt HDMI Port2",
+		.stream_name = "Hdmi2",
+		.cpu_dai_name = "HDMI2 Pin",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.platform_name = "0000:00:0e.0",
+		.dpcm_playback = 1,
+		.init = NULL,
+		.nonatomic = 1,
+		.dynamic = 1,
+	},
+	[BXT_DPCM_AUDIO_HDMI3_PB]
+	{
+		.name = "Bxt HDMI Port3",
+		.stream_name = "Hdmi3",
+		.cpu_dai_name = "HDMI3 Pin",
+		.codec_name = "snd-soc-dummy",
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.platform_name = "0000:00:0e.0",
+		.dpcm_playback = 1,
+		.init = NULL,
+		.nonatomic = 1,
+		.dynamic = 1,
+	},
+	/* Back End DAI links */
+	{
+		/* SSP5 - Codec */
+		.name = "SSP5-Codec",
+		.id = 0,
+		.cpu_dai_name = "SSP5 Pin",
+		.platform_name = "0000:00:0e.0",
+		.no_pcm = 1,
+		.codec_name = "i2c-INT343A:00",
+		.codec_dai_name = "rt298-aif1",
+		.init = broxton_rt298_codec_init,
+		.dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF |
+						SND_SOC_DAIFMT_CBS_CFS,
+		.ignore_pmdown_time = 1,
+		.be_hw_params_fixup = broxton_ssp5_fixup,
+		.ops = &broxton_rt298_ops,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+	},
+	{
+		.name = "dmic01",
+		.id = 1,
+		.cpu_dai_name = "DMIC01 Pin",
+		.codec_name = "dmic-codec",
+		.codec_dai_name = "dmic-hifi",
+		.platform_name = "0000:00:0e.0",
+		.ignore_suspend = 1,
+		.dpcm_capture = 1,
+		.no_pcm = 1,
+	},
+	{
+		.name = "iDisp1",
+		.id = 3,
+		.cpu_dai_name = "iDisp1 Pin",
+		.codec_name = "ehdaudio0D2",
+		.codec_dai_name = "intel-hdmi-hifi1",
+		.platform_name = "0000:00:0e.0",
+		.init = broxton_hdmi_init,
+		.dpcm_playback = 1,
+		.no_pcm = 1,
+	},
+	{
+		.name = "iDisp2",
+		.id = 4,
+		.cpu_dai_name = "iDisp2 Pin",
+		.codec_name = "ehdaudio0D2",
+		.codec_dai_name = "intel-hdmi-hifi2",
+		.platform_name = "0000:00:0e.0",
+		.init = broxton_hdmi_init,
+		.dpcm_playback = 1,
+		.no_pcm = 1,
+	},
+	{
+		.name = "iDisp3",
+		.id = 5,
+		.cpu_dai_name = "iDisp3 Pin",
+		.codec_name = "ehdaudio0D2",
+		.codec_dai_name = "intel-hdmi-hifi3",
+		.platform_name = "0000:00:0e.0",
+		.init = broxton_hdmi_init,
+		.dpcm_playback = 1,
+		.no_pcm = 1,
+	},
+};
+
+/* broxton audio machine driver for SPT + RT298S */
+static struct snd_soc_card broxton_rt298 = {
+	.name = "broxton-rt298",
+	.owner = THIS_MODULE,
+	.dai_link = broxton_rt298_dais,
+	.num_links = ARRAY_SIZE(broxton_rt298_dais),
+	.controls = broxton_controls,
+	.num_controls = ARRAY_SIZE(broxton_controls),
+	.dapm_widgets = broxton_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(broxton_widgets),
+	.dapm_routes = broxton_rt298_map,
+	.num_dapm_routes = ARRAY_SIZE(broxton_rt298_map),
+	.fully_routed = true,
+};
+
+static int broxton_audio_probe(struct platform_device *pdev)
+{
+	broxton_rt298.dev = &pdev->dev;
+
+	return devm_snd_soc_register_card(&pdev->dev, &broxton_rt298);
+}
+
+static struct platform_driver broxton_audio = {
+	.probe = broxton_audio_probe,
+	.driver = {
+		.name = "bxt_alc298s_i2s",
+	},
+};
+module_platform_driver(broxton_audio)
+
+/* Module information */
+MODULE_AUTHOR("Ramesh Babu <Ramesh.Babu@intel.com>");
+MODULE_AUTHOR("Senthilnathan Veppur <senthilnathanx.veppur@intel.com>");
+MODULE_DESCRIPTION("Intel SST Audio for Broxton");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:bxt_alc298s_i2s");
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 032a2e7..88efb62 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -304,7 +304,7 @@
 		/* back ends */
 	{
 		.name = "SSP2-Codec",
-		.be_id = 1,
+		.id = 1,
 		.cpu_dai_name = "ssp2-port",
 		.platform_name = "sst-mfld-platform",
 		.no_pcm = 1,
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index 1c95ccc..35f591e 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -267,7 +267,7 @@
 	/* back ends */
 	{
 		.name = "SSP2-Codec",
-		.be_id = 1,
+		.id = 1,
 		.cpu_dai_name = "ssp2-port",
 		.platform_name = "sst-mfld-platform",
 		.no_pcm = 1,
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index e609f08..cdcced9 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -255,7 +255,7 @@
 	/* back ends */
 	{
 		.name = "SSP2-Codec",
-		.be_id = 1,
+		.id = 1,
 		.cpu_dai_name = "ssp2-port",
 		.platform_name = "sst-mfld-platform",
 		.no_pcm = 1,
@@ -296,7 +296,7 @@
 	if (!drv)
 		return -ENOMEM;
 
-	drv->ts3a227e_present = acpi_dev_present("104C227E");
+	drv->ts3a227e_present = acpi_dev_found("104C227E");
 	if (!drv->ts3a227e_present) {
 		/* no need probe TI jack detection chip */
 		snd_soc_card_cht.aux_dev = NULL;
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index 2a6f808..d7ef292 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -295,7 +295,7 @@
 	/* back ends */
 	{
 		.name = "SSP2-Codec",
-		.be_id = 1,
+		.id = 1,
 		.cpu_dai_name = "ssp2-port",
 		.platform_name = "sst-mfld-platform",
 		.no_pcm = 1,
@@ -357,7 +357,7 @@
 		return -ENOMEM;
 
 	for (i = 0; i < ARRAY_SIZE(snd_soc_cards); i++) {
-		if (acpi_dev_present(snd_soc_cards[i].codec_id)) {
+		if (acpi_dev_found(snd_soc_cards[i].codec_id)) {
 			dev_dbg(&pdev->dev,
 				"found codec %s\n", snd_soc_cards[i].codec_id);
 			card = snd_soc_cards[i].soc_card;
diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c
index 2e5347f..df9d254 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5672.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5672.c
@@ -273,7 +273,7 @@
 	{
 		/* SSP2 - Codec */
 		.name = "SSP2-Codec",
-		.be_id = 1,
+		.id = 1,
 		.cpu_dai_name = "ssp2-port",
 		.platform_name = "sst-mfld-platform",
 		.no_pcm = 1,
diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c
index 2255857..863f1d5 100644
--- a/sound/soc/intel/boards/haswell.c
+++ b/sound/soc/intel/boards/haswell.c
@@ -156,7 +156,7 @@
 	{
 		/* SSP0 - Codec */
 		.name = "Codec",
-		.be_id = 0,
+		.id = 0,
 		.cpu_dai_name = "snd-soc-dummy-dai",
 		.platform_name = "snd-soc-dummy",
 		.no_pcm = 1,
diff --git a/sound/soc/intel/boards/skl_nau88l25_max98357a.c b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
index 72176b7..d280865 100644
--- a/sound/soc/intel/boards/skl_nau88l25_max98357a.c
+++ b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
@@ -30,6 +30,16 @@
 static struct snd_soc_jack skylake_headset;
 static struct snd_soc_card skylake_audio_card;
 
+struct skl_hdmi_pcm {
+	struct list_head head;
+	struct snd_soc_dai *codec_dai;
+	int device;
+};
+
+struct skl_nau8825_private {
+	struct list_head hdmi_pcm_list;
+};
+
 enum {
 	SKL_DPCM_AUDIO_PB = 0,
 	SKL_DPCM_AUDIO_CP,
@@ -192,23 +202,56 @@
 
 static int skylake_hdmi1_init(struct snd_soc_pcm_runtime *rtd)
 {
+	struct skl_nau8825_private *ctx = snd_soc_card_get_drvdata(rtd->card);
 	struct snd_soc_dai *dai = rtd->codec_dai;
+	struct skl_hdmi_pcm *pcm;
 
-	return hdac_hdmi_jack_init(dai, SKL_DPCM_AUDIO_HDMI1_PB);
+	pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
+	if (!pcm)
+		return -ENOMEM;
+
+	pcm->device = SKL_DPCM_AUDIO_HDMI1_PB;
+	pcm->codec_dai = dai;
+
+	list_add_tail(&pcm->head, &ctx->hdmi_pcm_list);
+
+	return 0;
 }
 
 static int skylake_hdmi2_init(struct snd_soc_pcm_runtime *rtd)
 {
+	struct skl_nau8825_private *ctx = snd_soc_card_get_drvdata(rtd->card);
 	struct snd_soc_dai *dai = rtd->codec_dai;
+	struct skl_hdmi_pcm *pcm;
 
-	return hdac_hdmi_jack_init(dai, SKL_DPCM_AUDIO_HDMI2_PB);
+	pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
+	if (!pcm)
+		return -ENOMEM;
+
+	pcm->device = SKL_DPCM_AUDIO_HDMI2_PB;
+	pcm->codec_dai = dai;
+
+	list_add_tail(&pcm->head, &ctx->hdmi_pcm_list);
+
+	return 0;
 }
 
 static int skylake_hdmi3_init(struct snd_soc_pcm_runtime *rtd)
 {
+	struct skl_nau8825_private *ctx = snd_soc_card_get_drvdata(rtd->card);
 	struct snd_soc_dai *dai = rtd->codec_dai;
+	struct skl_hdmi_pcm *pcm;
 
-	return hdac_hdmi_jack_init(dai, SKL_DPCM_AUDIO_HDMI3_PB);
+	pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
+	if (!pcm)
+		return -ENOMEM;
+
+	pcm->device = SKL_DPCM_AUDIO_HDMI3_PB;
+	pcm->codec_dai = dai;
+
+	list_add_tail(&pcm->head, &ctx->hdmi_pcm_list);
+
+	return 0;
 }
 
 static int skylake_nau8825_fe_init(struct snd_soc_pcm_runtime *rtd)
@@ -391,7 +434,6 @@
 		.platform_name = "0000:00:1f.3",
 		.init = NULL,
 		.dpcm_capture = 1,
-		.ignore_suspend = 1,
 		.nonatomic = 1,
 		.dynamic = 1,
 		.ops = &skylaye_refcap_ops,
@@ -456,7 +498,7 @@
 	{
 		/* SSP0 - Codec */
 		.name = "SSP0-Codec",
-		.be_id = 0,
+		.id = 0,
 		.cpu_dai_name = "SSP0 Pin",
 		.platform_name = "0000:00:1f.3",
 		.no_pcm = 1,
@@ -472,7 +514,7 @@
 	{
 		/* SSP1 - Codec */
 		.name = "SSP1-Codec",
-		.be_id = 1,
+		.id = 1,
 		.cpu_dai_name = "SSP1 Pin",
 		.platform_name = "0000:00:1f.3",
 		.no_pcm = 1,
@@ -489,7 +531,7 @@
 	},
 	{
 		.name = "dmic01",
-		.be_id = 2,
+		.id = 2,
 		.cpu_dai_name = "DMIC01 Pin",
 		.codec_name = "dmic-codec",
 		.codec_dai_name = "dmic-hifi",
@@ -501,7 +543,7 @@
 	},
 	{
 		.name = "iDisp1",
-		.be_id = 3,
+		.id = 3,
 		.cpu_dai_name = "iDisp1 Pin",
 		.codec_name = "ehdaudio0D2",
 		.codec_dai_name = "intel-hdmi-hifi1",
@@ -512,7 +554,7 @@
 	},
 	{
 		.name = "iDisp2",
-		.be_id = 4,
+		.id = 4,
 		.cpu_dai_name = "iDisp2 Pin",
 		.codec_name = "ehdaudio0D2",
 		.codec_dai_name = "intel-hdmi-hifi2",
@@ -523,7 +565,7 @@
 	},
 	{
 		.name = "iDisp3",
-		.be_id = 5,
+		.id = 5,
 		.cpu_dai_name = "iDisp3 Pin",
 		.codec_name = "ehdaudio0D2",
 		.codec_dai_name = "intel-hdmi-hifi3",
@@ -534,6 +576,21 @@
 	},
 };
 
+static int skylake_card_late_probe(struct snd_soc_card *card)
+{
+	struct skl_nau8825_private *ctx = snd_soc_card_get_drvdata(card);
+	struct skl_hdmi_pcm *pcm;
+	int err;
+
+	list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
+		err = hdac_hdmi_jack_init(pcm->codec_dai, pcm->device);
+		if (err < 0)
+			return err;
+	}
+
+	return 0;
+}
+
 /* skylake audio machine driver for SPT + NAU88L25 */
 static struct snd_soc_card skylake_audio_card = {
 	.name = "sklnau8825max",
@@ -547,11 +604,21 @@
 	.dapm_routes = skylake_map,
 	.num_dapm_routes = ARRAY_SIZE(skylake_map),
 	.fully_routed = true,
+	.late_probe = skylake_card_late_probe,
 };
 
 static int skylake_audio_probe(struct platform_device *pdev)
 {
+	struct skl_nau8825_private *ctx;
+
+	ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_ATOMIC);
+	if (!ctx)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
+
 	skylake_audio_card.dev = &pdev->dev;
+	snd_soc_card_set_drvdata(&skylake_audio_card, ctx);
 
 	return devm_snd_soc_register_card(&pdev->dev, &skylake_audio_card);
 }
diff --git a/sound/soc/intel/boards/skl_nau88l25_ssm4567.c b/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
index 5f1ca99..e19aa99 100644
--- a/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
+++ b/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
@@ -34,6 +34,15 @@
 static struct snd_soc_jack skylake_headset;
 static struct snd_soc_card skylake_audio_card;
 
+struct skl_hdmi_pcm {
+	struct list_head head;
+	struct snd_soc_dai *codec_dai;
+	int device;
+};
+
+struct skl_nau88125_private {
+	struct list_head hdmi_pcm_list;
+};
 enum {
 	SKL_DPCM_AUDIO_PB = 0,
 	SKL_DPCM_AUDIO_CP,
@@ -222,24 +231,57 @@
 
 static int skylake_hdmi1_init(struct snd_soc_pcm_runtime *rtd)
 {
+	struct skl_nau88125_private *ctx = snd_soc_card_get_drvdata(rtd->card);
 	struct snd_soc_dai *dai = rtd->codec_dai;
+	struct skl_hdmi_pcm *pcm;
 
-	return hdac_hdmi_jack_init(dai, SKL_DPCM_AUDIO_HDMI1_PB);
+	pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
+	if (!pcm)
+		return -ENOMEM;
+
+	pcm->device = SKL_DPCM_AUDIO_HDMI1_PB;
+	pcm->codec_dai = dai;
+
+	list_add_tail(&pcm->head, &ctx->hdmi_pcm_list);
+
+	return 0;
 }
 
 static int skylake_hdmi2_init(struct snd_soc_pcm_runtime *rtd)
 {
+	struct skl_nau88125_private *ctx = snd_soc_card_get_drvdata(rtd->card);
 	struct snd_soc_dai *dai = rtd->codec_dai;
+	struct skl_hdmi_pcm *pcm;
 
-	return hdac_hdmi_jack_init(dai, SKL_DPCM_AUDIO_HDMI2_PB);
+	pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
+	if (!pcm)
+		return -ENOMEM;
+
+	pcm->device = SKL_DPCM_AUDIO_HDMI2_PB;
+	pcm->codec_dai = dai;
+
+	list_add_tail(&pcm->head, &ctx->hdmi_pcm_list);
+
+	return 0;
 }
 
 
 static int skylake_hdmi3_init(struct snd_soc_pcm_runtime *rtd)
 {
+	struct skl_nau88125_private *ctx = snd_soc_card_get_drvdata(rtd->card);
 	struct snd_soc_dai *dai = rtd->codec_dai;
+	struct skl_hdmi_pcm *pcm;
 
-	return hdac_hdmi_jack_init(dai, SKL_DPCM_AUDIO_HDMI3_PB);
+	pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
+	if (!pcm)
+		return -ENOMEM;
+
+	pcm->device = SKL_DPCM_AUDIO_HDMI3_PB;
+	pcm->codec_dai = dai;
+
+	list_add_tail(&pcm->head, &ctx->hdmi_pcm_list);
+
+	return 0;
 }
 
 static int skylake_nau8825_fe_init(struct snd_soc_pcm_runtime *rtd)
@@ -440,7 +482,6 @@
 		.platform_name = "0000:00:1f.3",
 		.init = NULL,
 		.dpcm_capture = 1,
-		.ignore_suspend = 1,
 		.nonatomic = 1,
 		.dynamic = 1,
 		.ops = &skylaye_refcap_ops,
@@ -505,7 +546,7 @@
 	{
 		/* SSP0 - Codec */
 		.name = "SSP0-Codec",
-		.be_id = 0,
+		.id = 0,
 		.cpu_dai_name = "SSP0 Pin",
 		.platform_name = "0000:00:1f.3",
 		.no_pcm = 1,
@@ -523,7 +564,7 @@
 	{
 		/* SSP1 - Codec */
 		.name = "SSP1-Codec",
-		.be_id = 1,
+		.id = 1,
 		.cpu_dai_name = "SSP1 Pin",
 		.platform_name = "0000:00:1f.3",
 		.no_pcm = 1,
@@ -540,7 +581,7 @@
 	},
 	{
 		.name = "dmic01",
-		.be_id = 2,
+		.id = 2,
 		.cpu_dai_name = "DMIC01 Pin",
 		.codec_name = "dmic-codec",
 		.codec_dai_name = "dmic-hifi",
@@ -552,7 +593,7 @@
 	},
 	{
 		.name = "iDisp1",
-		.be_id = 3,
+		.id = 3,
 		.cpu_dai_name = "iDisp1 Pin",
 		.codec_name = "ehdaudio0D2",
 		.codec_dai_name = "intel-hdmi-hifi1",
@@ -563,7 +604,7 @@
 	},
 	{
 		.name = "iDisp2",
-		.be_id = 4,
+		.id = 4,
 		.cpu_dai_name = "iDisp2 Pin",
 		.codec_name = "ehdaudio0D2",
 		.codec_dai_name = "intel-hdmi-hifi2",
@@ -574,7 +615,7 @@
 	},
 	{
 		.name = "iDisp3",
-		.be_id = 5,
+		.id = 5,
 		.cpu_dai_name = "iDisp3 Pin",
 		.codec_name = "ehdaudio0D2",
 		.codec_dai_name = "intel-hdmi-hifi3",
@@ -585,6 +626,21 @@
 	},
 };
 
+static int skylake_card_late_probe(struct snd_soc_card *card)
+{
+	struct skl_nau88125_private *ctx = snd_soc_card_get_drvdata(card);
+	struct skl_hdmi_pcm *pcm;
+	int err;
+
+	list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
+		err = hdac_hdmi_jack_init(pcm->codec_dai, pcm->device);
+		if (err < 0)
+			return err;
+	}
+
+	return 0;
+}
+
 /* skylake audio machine driver for SPT + NAU88L25 */
 static struct snd_soc_card skylake_audio_card = {
 	.name = "sklnau8825adi",
@@ -600,11 +656,21 @@
 	.codec_conf = ssm4567_codec_conf,
 	.num_configs = ARRAY_SIZE(ssm4567_codec_conf),
 	.fully_routed = true,
+	.late_probe = skylake_card_late_probe,
 };
 
 static int skylake_audio_probe(struct platform_device *pdev)
 {
+	struct skl_nau88125_private *ctx;
+
+	ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_ATOMIC);
+	if (!ctx)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
+
 	skylake_audio_card.dev = &pdev->dev;
+	snd_soc_card_set_drvdata(&skylake_audio_card, ctx);
 
 	return devm_snd_soc_register_card(&pdev->dev, &skylake_audio_card);
 }
diff --git a/sound/soc/intel/boards/skl_rt286.c b/sound/soc/intel/boards/skl_rt286.c
index 2016397a..426b482 100644
--- a/sound/soc/intel/boards/skl_rt286.c
+++ b/sound/soc/intel/boards/skl_rt286.c
@@ -30,6 +30,16 @@
 
 static struct snd_soc_jack skylake_headset;
 
+struct skl_hdmi_pcm {
+	struct list_head head;
+	struct snd_soc_dai *codec_dai;
+	int device;
+};
+
+struct skl_rt286_private {
+	struct list_head hdmi_pcm_list;
+};
+
 enum {
 	SKL_DPCM_AUDIO_PB = 0,
 	SKL_DPCM_AUDIO_CP,
@@ -142,9 +152,20 @@
 
 static int skylake_hdmi_init(struct snd_soc_pcm_runtime *rtd)
 {
+	struct skl_rt286_private *ctx = snd_soc_card_get_drvdata(rtd->card);
 	struct snd_soc_dai *dai = rtd->codec_dai;
+	struct skl_hdmi_pcm *pcm;
 
-	return hdac_hdmi_jack_init(dai, SKL_DPCM_AUDIO_HDMI1_PB + dai->id);
+	pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
+	if (!pcm)
+		return -ENOMEM;
+
+	pcm->device = SKL_DPCM_AUDIO_HDMI1_PB + dai->id;
+	pcm->codec_dai = dai;
+
+	list_add_tail(&pcm->head, &ctx->hdmi_pcm_list);
+
+	return 0;
 }
 
 static unsigned int rates[] = {
@@ -317,7 +338,6 @@
 		.platform_name = "0000:00:1f.3",
 		.init = NULL,
 		.dpcm_capture = 1,
-		.ignore_suspend = 1,
 		.nonatomic = 1,
 		.dynamic = 1,
 	},
@@ -375,7 +395,7 @@
 	{
 		/* SSP0 - Codec */
 		.name = "SSP0-Codec",
-		.be_id = 0,
+		.id = 0,
 		.cpu_dai_name = "SSP0 Pin",
 		.platform_name = "0000:00:1f.3",
 		.no_pcm = 1,
@@ -393,7 +413,7 @@
 	},
 	{
 		.name = "dmic01",
-		.be_id = 1,
+		.id = 1,
 		.cpu_dai_name = "DMIC01 Pin",
 		.codec_name = "dmic-codec",
 		.codec_dai_name = "dmic-hifi",
@@ -405,7 +425,7 @@
 	},
 	{
 		.name = "iDisp1",
-		.be_id = 2,
+		.id = 2,
 		.cpu_dai_name = "iDisp1 Pin",
 		.codec_name = "ehdaudio0D2",
 		.codec_dai_name = "intel-hdmi-hifi1",
@@ -416,7 +436,7 @@
 	},
 	{
 		.name = "iDisp2",
-		.be_id = 3,
+		.id = 3,
 		.cpu_dai_name = "iDisp2 Pin",
 		.codec_name = "ehdaudio0D2",
 		.codec_dai_name = "intel-hdmi-hifi2",
@@ -427,7 +447,7 @@
 	},
 	{
 		.name = "iDisp3",
-		.be_id = 4,
+		.id = 4,
 		.cpu_dai_name = "iDisp3 Pin",
 		.codec_name = "ehdaudio0D2",
 		.codec_dai_name = "intel-hdmi-hifi3",
@@ -438,6 +458,21 @@
 	},
 };
 
+static int skylake_card_late_probe(struct snd_soc_card *card)
+{
+	struct skl_rt286_private *ctx = snd_soc_card_get_drvdata(card);
+	struct skl_hdmi_pcm *pcm;
+	int err;
+
+	list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
+		err = hdac_hdmi_jack_init(pcm->codec_dai, pcm->device);
+		if (err < 0)
+			return err;
+	}
+
+	return 0;
+}
+
 /* skylake audio machine driver for SPT + RT286S */
 static struct snd_soc_card skylake_rt286 = {
 	.name = "skylake-rt286",
@@ -451,11 +486,21 @@
 	.dapm_routes = skylake_rt286_map,
 	.num_dapm_routes = ARRAY_SIZE(skylake_rt286_map),
 	.fully_routed = true,
+	.late_probe = skylake_card_late_probe,
 };
 
 static int skylake_audio_probe(struct platform_device *pdev)
 {
+	struct skl_rt286_private *ctx;
+
+	ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_ATOMIC);
+	if (!ctx)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
+
 	skylake_rt286.dev = &pdev->dev;
+	snd_soc_card_set_drvdata(&skylake_rt286, ctx);
 
 	return devm_snd_soc_register_card(&pdev->dev, &skylake_rt286);
 }
diff --git a/sound/soc/intel/common/sst-acpi.h b/sound/soc/intel/common/sst-acpi.h
index 4dcfb7e..8398cb2 100644
--- a/sound/soc/intel/common/sst-acpi.h
+++ b/sound/soc/intel/common/sst-acpi.h
@@ -12,10 +12,19 @@
  *
  */
 
+#include <linux/kconfig.h>
+#include <linux/stddef.h>
 #include <linux/acpi.h>
 
 /* translation fron HID to I2C name, needed for DAI codec_name */
+#if IS_ENABLED(CONFIG_ACPI)
 const char *sst_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN]);
+#else
+inline const char *sst_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN])
+{
+	return NULL;
+}
+#endif
 
 /* acpi match */
 struct sst_acpi_mach *sst_acpi_find_machine(struct sst_acpi_mach *machines);
diff --git a/sound/soc/intel/common/sst-firmware.c b/sound/soc/intel/common/sst-firmware.c
index ef4881e..2599352 100644
--- a/sound/soc/intel/common/sst-firmware.c
+++ b/sound/soc/intel/common/sst-firmware.c
@@ -203,7 +203,7 @@
 
 	chip->dev = dev;
 
-	err = dw_dma_probe(chip, NULL);
+	err = dw_dma_probe(chip);
 	if (err)
 		return ERR_PTR(err);
 
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c
index ac60f13..9156522 100644
--- a/sound/soc/intel/haswell/sst-haswell-ipc.c
+++ b/sound/soc/intel/haswell/sst-haswell-ipc.c
@@ -1345,7 +1345,7 @@
 		return 0;
 
 	/* wait for pause to complete before we reset the stream */
-	while (stream->running && tries--)
+	while (stream->running && --tries)
 		msleep(1);
 	if (!tries) {
 		dev_err(hsw->dev, "error: reset stream %d still running\n",
diff --git a/sound/soc/intel/haswell/sst-haswell-pcm.c b/sound/soc/intel/haswell/sst-haswell-pcm.c
index 1aa819c..994256b 100644
--- a/sound/soc/intel/haswell/sst-haswell-pcm.c
+++ b/sound/soc/intel/haswell/sst-haswell-pcm.c
@@ -445,7 +445,7 @@
 
 	pages = snd_sgbuf_aligned_pages(size);
 
-	dev_dbg(rtd->dev, "generating page table for %p size 0x%zu pages %d\n",
+	dev_dbg(rtd->dev, "generating page table for %p size 0x%zx pages %d\n",
 		dma_area, size, pages);
 
 	for (i = 0; i < pages; i++) {
diff --git a/sound/soc/intel/skylake/Makefile b/sound/soc/intel/skylake/Makefile
index 914b6da..c28f5d0 100644
--- a/sound/soc/intel/skylake/Makefile
+++ b/sound/soc/intel/skylake/Makefile
@@ -5,6 +5,6 @@
 
 # Skylake IPC Support
 snd-soc-skl-ipc-objs := skl-sst-ipc.o skl-sst-dsp.o skl-sst-cldma.o \
-		skl-sst.o
+		skl-sst.o bxt-sst.o
 
 obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += snd-soc-skl-ipc.o
diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c
new file mode 100644
index 0000000..965ce40
--- /dev/null
+++ b/sound/soc/intel/skylake/bxt-sst.c
@@ -0,0 +1,328 @@
+/*
+ *  bxt-sst.c - DSP library functions for BXT platform
+ *
+ *  Copyright (C) 2015-16 Intel Corp
+ *  Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
+ *	   Jeeja KP <jeeja.kp@intel.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/device.h>
+
+#include "../common/sst-dsp.h"
+#include "../common/sst-dsp-priv.h"
+#include "skl-sst-ipc.h"
+
+#define BXT_BASEFW_TIMEOUT	3000
+#define BXT_INIT_TIMEOUT	500
+#define BXT_IPC_PURGE_FW	0x01004000
+
+#define BXT_ROM_INIT		0x5
+#define BXT_ADSP_SRAM0_BASE	0x80000
+
+/* Firmware status window */
+#define BXT_ADSP_FW_STATUS	BXT_ADSP_SRAM0_BASE
+#define BXT_ADSP_ERROR_CODE     (BXT_ADSP_FW_STATUS + 0x4)
+
+#define BXT_ADSP_SRAM1_BASE	0xA0000
+
+static unsigned int bxt_get_errorcode(struct sst_dsp *ctx)
+{
+	 return sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE);
+}
+
+static int sst_bxt_prepare_fw(struct sst_dsp *ctx,
+			const void *fwdata, u32 fwsize)
+{
+	int stream_tag, ret, i;
+	u32 reg;
+
+	stream_tag = ctx->dsp_ops.prepare(ctx->dev, 0x40, fwsize, &ctx->dmab);
+	if (stream_tag < 0) {
+		dev_err(ctx->dev, "Failed to prepare DMA FW loading err: %x\n",
+				stream_tag);
+		return stream_tag;
+	}
+
+	ctx->dsp_ops.stream_tag = stream_tag;
+	memcpy(ctx->dmab.area, fwdata, fwsize);
+
+	/* Purge FW request */
+	sst_dsp_shim_write(ctx, SKL_ADSP_REG_HIPCI, SKL_ADSP_REG_HIPCI_BUSY |
+					 BXT_IPC_PURGE_FW | (stream_tag - 1));
+
+	ret = skl_dsp_enable_core(ctx);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Boot dsp core failed ret: %d\n", ret);
+		ret = -EIO;
+		goto base_fw_load_failed;
+	}
+
+	for (i = BXT_INIT_TIMEOUT; i > 0; --i) {
+		reg = sst_dsp_shim_read(ctx, SKL_ADSP_REG_HIPCIE);
+
+		if (reg & SKL_ADSP_REG_HIPCIE_DONE) {
+			sst_dsp_shim_update_bits_forced(ctx,
+					SKL_ADSP_REG_HIPCIE,
+					SKL_ADSP_REG_HIPCIE_DONE,
+					SKL_ADSP_REG_HIPCIE_DONE);
+			break;
+		}
+		mdelay(1);
+	}
+	if (!i) {
+		dev_info(ctx->dev, "Waiting for HIPCIE done, reg: 0x%x\n", reg);
+		sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_HIPCIE,
+				SKL_ADSP_REG_HIPCIE_DONE,
+				SKL_ADSP_REG_HIPCIE_DONE);
+	}
+
+	/* enable Interrupt */
+	skl_ipc_int_enable(ctx);
+	skl_ipc_op_int_enable(ctx);
+
+	for (i = BXT_INIT_TIMEOUT; i > 0; --i) {
+		if (SKL_FW_INIT ==
+				(sst_dsp_shim_read(ctx, BXT_ADSP_FW_STATUS) &
+				SKL_FW_STS_MASK)) {
+
+			dev_info(ctx->dev, "ROM loaded, continue FW loading\n");
+			break;
+		}
+		mdelay(1);
+	}
+	if (!i) {
+		dev_err(ctx->dev, "Timeout for ROM init, HIPCIE: 0x%x\n", reg);
+		ret = -EIO;
+		goto base_fw_load_failed;
+	}
+
+	return ret;
+
+base_fw_load_failed:
+	ctx->dsp_ops.cleanup(ctx->dev, &ctx->dmab, stream_tag);
+	skl_dsp_disable_core(ctx);
+	return ret;
+}
+
+static int sst_transfer_fw_host_dma(struct sst_dsp *ctx)
+{
+	int ret;
+
+	ctx->dsp_ops.trigger(ctx->dev, true, ctx->dsp_ops.stream_tag);
+	ret = sst_dsp_register_poll(ctx, BXT_ADSP_FW_STATUS, SKL_FW_STS_MASK,
+			BXT_ROM_INIT, BXT_BASEFW_TIMEOUT, "Firmware boot");
+
+	ctx->dsp_ops.trigger(ctx->dev, false, ctx->dsp_ops.stream_tag);
+	ctx->dsp_ops.cleanup(ctx->dev, &ctx->dmab, ctx->dsp_ops.stream_tag);
+
+	return ret;
+}
+
+static int bxt_load_base_firmware(struct sst_dsp *ctx)
+{
+	const struct firmware *fw = NULL;
+	struct skl_sst *skl = ctx->thread_context;
+	int ret;
+
+	ret = request_firmware(&fw, ctx->fw_name, ctx->dev);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Request firmware failed %d\n", ret);
+		goto sst_load_base_firmware_failed;
+	}
+
+	ret = sst_bxt_prepare_fw(ctx, fw->data, fw->size);
+	/* Retry Enabling core and ROM load. Retry seemed to help */
+	if (ret < 0) {
+		ret = sst_bxt_prepare_fw(ctx, fw->data, fw->size);
+		if (ret < 0) {
+			dev_err(ctx->dev, "Core En/ROM load fail:%d\n", ret);
+			goto sst_load_base_firmware_failed;
+		}
+	}
+
+	ret = sst_transfer_fw_host_dma(ctx);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Transfer firmware failed %d\n", ret);
+		dev_info(ctx->dev, "Error code=0x%x: FW status=0x%x\n",
+			sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE),
+			sst_dsp_shim_read(ctx, BXT_ADSP_FW_STATUS));
+
+		skl_dsp_disable_core(ctx);
+	} else {
+		dev_dbg(ctx->dev, "Firmware download successful\n");
+		ret = wait_event_timeout(skl->boot_wait, skl->boot_complete,
+					msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
+		if (ret == 0) {
+			dev_err(ctx->dev, "DSP boot fail, FW Ready timeout\n");
+			skl_dsp_disable_core(ctx);
+			ret = -EIO;
+		} else {
+			skl_dsp_set_state_locked(ctx, SKL_DSP_RUNNING);
+			ret = 0;
+		}
+	}
+
+sst_load_base_firmware_failed:
+	release_firmware(fw);
+	return ret;
+}
+
+static int bxt_set_dsp_D0(struct sst_dsp *ctx)
+{
+	struct skl_sst *skl = ctx->thread_context;
+	int ret;
+
+	skl->boot_complete = false;
+
+	ret = skl_dsp_enable_core(ctx);
+	if (ret < 0) {
+		dev_err(ctx->dev, "enable dsp core failed ret: %d\n", ret);
+		return ret;
+	}
+
+	/* enable interrupt */
+	skl_ipc_int_enable(ctx);
+	skl_ipc_op_int_enable(ctx);
+
+	ret = wait_event_timeout(skl->boot_wait, skl->boot_complete,
+					msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
+	if (ret == 0) {
+		dev_err(ctx->dev, "ipc: error DSP boot timeout\n");
+		dev_err(ctx->dev, "Error code=0x%x: FW status=0x%x\n",
+			sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE),
+			sst_dsp_shim_read(ctx, BXT_ADSP_FW_STATUS));
+		return -EIO;
+	}
+
+	skl_dsp_set_state_locked(ctx, SKL_DSP_RUNNING);
+	return 0;
+}
+
+static int bxt_set_dsp_D3(struct sst_dsp *ctx)
+{
+	struct skl_ipc_dxstate_info dx;
+	struct skl_sst *skl = ctx->thread_context;
+	int ret = 0;
+
+	if (!is_skl_dsp_running(ctx))
+		return ret;
+
+	dx.core_mask = SKL_DSP_CORE0_MASK;
+	dx.dx_mask = SKL_IPC_D3_MASK;
+
+	ret = skl_ipc_set_dx(&skl->ipc, SKL_INSTANCE_ID,
+				SKL_BASE_FW_MODULE_ID, &dx);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Failed to set DSP to D3 state: %d\n", ret);
+		return ret;
+	}
+
+	ret = skl_dsp_disable_core(ctx);
+	if (ret < 0) {
+		dev_err(ctx->dev, "disbale dsp core failed: %d\n", ret);
+		ret = -EIO;
+	}
+
+	skl_dsp_set_state_locked(ctx, SKL_DSP_RESET);
+	return 0;
+}
+
+static struct skl_dsp_fw_ops bxt_fw_ops = {
+	.set_state_D0 = bxt_set_dsp_D0,
+	.set_state_D3 = bxt_set_dsp_D3,
+	.load_fw = bxt_load_base_firmware,
+	.get_fw_errcode = bxt_get_errorcode,
+};
+
+static struct sst_ops skl_ops = {
+	.irq_handler = skl_dsp_sst_interrupt,
+	.write = sst_shim32_write,
+	.read = sst_shim32_read,
+	.ram_read = sst_memcpy_fromio_32,
+	.ram_write = sst_memcpy_toio_32,
+	.free = skl_dsp_free,
+};
+
+static struct sst_dsp_device skl_dev = {
+	.thread = skl_dsp_irq_thread_handler,
+	.ops = &skl_ops,
+};
+
+int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
+			const char *fw_name, struct skl_dsp_loader_ops dsp_ops,
+			struct skl_sst **dsp)
+{
+	struct skl_sst *skl;
+	struct sst_dsp *sst;
+	int ret;
+
+	skl = devm_kzalloc(dev, sizeof(*skl), GFP_KERNEL);
+	if (skl == NULL)
+		return -ENOMEM;
+
+	skl->dev = dev;
+	skl_dev.thread_context = skl;
+
+	skl->dsp = skl_dsp_ctx_init(dev, &skl_dev, irq);
+	if (!skl->dsp) {
+		dev_err(skl->dev, "skl_dsp_ctx_init failed\n");
+		return -ENODEV;
+	}
+
+	sst = skl->dsp;
+	sst->fw_name = fw_name;
+	sst->dsp_ops = dsp_ops;
+	sst->fw_ops = bxt_fw_ops;
+	sst->addr.lpe = mmio_base;
+	sst->addr.shim = mmio_base;
+
+	sst_dsp_mailbox_init(sst, (BXT_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ),
+			SKL_ADSP_W0_UP_SZ, BXT_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ);
+
+	ret = skl_ipc_init(dev, skl);
+	if (ret)
+		return ret;
+
+	skl->boot_complete = false;
+	init_waitqueue_head(&skl->boot_wait);
+
+	ret = sst->fw_ops.load_fw(sst);
+	if (ret < 0) {
+		dev_err(dev, "Load base fw failed: %x", ret);
+		return ret;
+	}
+
+	if (dsp)
+		*dsp = skl;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(bxt_sst_dsp_init);
+
+
+void bxt_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx)
+{
+	skl_ipc_free(&ctx->ipc);
+	ctx->dsp->cl_dev.ops.cl_cleanup_controller(ctx->dsp);
+
+	if (ctx->dsp->addr.lpe)
+		iounmap(ctx->dsp->addr.lpe);
+
+	ctx->dsp->ops->free(ctx->dsp);
+}
+EXPORT_SYMBOL_GPL(bxt_sst_dsp_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Broxton IPC driver");
diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
index 79c5089..226db84 100644
--- a/sound/soc/intel/skylake/skl-messages.c
+++ b/sound/soc/intel/skylake/skl-messages.c
@@ -72,6 +72,105 @@
 	skl_ipc_set_large_config(&ctx->ipc, &msg, (u32 *)&mask);
 }
 
+static int skl_dsp_setup_spib(struct device *dev, unsigned int size,
+				int stream_tag, int enable)
+{
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	struct hdac_stream *stream = snd_hdac_get_stream(bus,
+			SNDRV_PCM_STREAM_PLAYBACK, stream_tag);
+	struct hdac_ext_stream *estream;
+
+	if (!stream)
+		return -EINVAL;
+
+	estream = stream_to_hdac_ext_stream(stream);
+	/* enable/disable SPIB for this hdac stream */
+	snd_hdac_ext_stream_spbcap_enable(ebus, enable, stream->index);
+
+	/* set the spib value */
+	snd_hdac_ext_stream_set_spib(ebus, estream, size);
+
+	return 0;
+}
+
+static int skl_dsp_prepare(struct device *dev, unsigned int format,
+			unsigned int size, struct snd_dma_buffer *dmab)
+{
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	struct hdac_ext_stream *estream;
+	struct hdac_stream *stream;
+	struct snd_pcm_substream substream;
+	int ret;
+
+	if (!bus)
+		return -ENODEV;
+
+	memset(&substream, 0, sizeof(substream));
+	substream.stream = SNDRV_PCM_STREAM_PLAYBACK;
+
+	estream = snd_hdac_ext_stream_assign(ebus, &substream,
+					HDAC_EXT_STREAM_TYPE_HOST);
+	if (!estream)
+		return -ENODEV;
+
+	stream = hdac_stream(estream);
+
+	/* assign decouple host dma channel */
+	ret = snd_hdac_dsp_prepare(stream, format, size, dmab);
+	if (ret < 0)
+		return ret;
+
+	skl_dsp_setup_spib(dev, size, stream->stream_tag, true);
+
+	return stream->stream_tag;
+}
+
+static int skl_dsp_trigger(struct device *dev, bool start, int stream_tag)
+{
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
+	struct hdac_stream *stream;
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+
+	if (!bus)
+		return -ENODEV;
+
+	stream = snd_hdac_get_stream(bus,
+		SNDRV_PCM_STREAM_PLAYBACK, stream_tag);
+	if (!stream)
+		return -EINVAL;
+
+	snd_hdac_dsp_trigger(stream, start);
+
+	return 0;
+}
+
+static int skl_dsp_cleanup(struct device *dev,
+		struct snd_dma_buffer *dmab, int stream_tag)
+{
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
+	struct hdac_stream *stream;
+	struct hdac_ext_stream *estream;
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+
+	if (!bus)
+		return -ENODEV;
+
+	stream = snd_hdac_get_stream(bus,
+		SNDRV_PCM_STREAM_PLAYBACK, stream_tag);
+	if (!stream)
+		return -EINVAL;
+
+	estream = stream_to_hdac_ext_stream(stream);
+	skl_dsp_setup_spib(dev, 0, stream_tag, false);
+	snd_hdac_ext_stream_release(estream, HDAC_EXT_STREAM_TYPE_HOST);
+
+	snd_hdac_dsp_cleanup(stream, dmab);
+
+	return 0;
+}
+
 static struct skl_dsp_loader_ops skl_get_loader_ops(void)
 {
 	struct skl_dsp_loader_ops loader_ops;
@@ -84,6 +183,21 @@
 	return loader_ops;
 };
 
+static struct skl_dsp_loader_ops bxt_get_loader_ops(void)
+{
+	struct skl_dsp_loader_ops loader_ops;
+
+	memset(&loader_ops, 0, sizeof(loader_ops));
+
+	loader_ops.alloc_dma_buf = skl_alloc_dma_buf;
+	loader_ops.free_dma_buf = skl_free_dma_buf;
+	loader_ops.prepare = skl_dsp_prepare;
+	loader_ops.trigger = skl_dsp_trigger;
+	loader_ops.cleanup = skl_dsp_cleanup;
+
+	return loader_ops;
+};
+
 static const struct skl_dsp_ops dsp_ops[] = {
 	{
 		.id = 0x9d70,
@@ -91,6 +205,12 @@
 		.init = skl_sst_dsp_init,
 		.cleanup = skl_sst_dsp_cleanup
 	},
+	{
+		.id = 0x5a98,
+		.loader_ops = bxt_get_loader_ops,
+		.init = bxt_sst_dsp_init,
+		.cleanup = bxt_sst_dsp_cleanup
+	},
 };
 
 static int skl_get_dsp_ops(int pci_id)
@@ -744,7 +864,7 @@
 		return ret;
 	}
 	mconfig->m_state = SKL_MODULE_INIT_DONE;
-
+	kfree(param_data);
 	return ret;
 }
 
diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c
index 14d1916e..7d73648 100644
--- a/sound/soc/intel/skylake/skl-nhlt.c
+++ b/sound/soc/intel/skylake/skl-nhlt.c
@@ -25,11 +25,12 @@
 
 #define DSDT_NHLT_PATH "\\_SB.PCI0.HDAS"
 
-void *skl_nhlt_init(struct device *dev)
+struct nhlt_acpi_table *skl_nhlt_init(struct device *dev)
 {
 	acpi_handle handle;
 	union acpi_object *obj;
 	struct nhlt_resource_desc  *nhlt_ptr = NULL;
+	struct nhlt_acpi_table *nhlt_table = NULL;
 
 	if (ACPI_FAILURE(acpi_get_handle(NULL, DSDT_NHLT_PATH, &handle))) {
 		dev_err(dev, "Requested NHLT device not found\n");
@@ -39,18 +40,20 @@
 	obj = acpi_evaluate_dsm(handle, OSC_UUID, 1, 1, NULL);
 	if (obj && obj->type == ACPI_TYPE_BUFFER) {
 		nhlt_ptr = (struct nhlt_resource_desc  *)obj->buffer.pointer;
-
-		return memremap(nhlt_ptr->min_addr, nhlt_ptr->length,
+		nhlt_table = (struct nhlt_acpi_table *)
+				memremap(nhlt_ptr->min_addr, nhlt_ptr->length,
 				MEMREMAP_WB);
+		ACPI_FREE(obj);
+		return nhlt_table;
 	}
 
 	dev_err(dev, "device specific method to extract NHLT blob failed\n");
 	return NULL;
 }
 
-void skl_nhlt_free(void *addr)
+void skl_nhlt_free(struct nhlt_acpi_table *nhlt)
 {
-	memunmap(addr);
+	memunmap((void *) nhlt);
 }
 
 static struct nhlt_specific_cfg *skl_get_specific_cfg(
@@ -120,7 +123,7 @@
 	struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
 	struct device *dev = bus->dev;
 	struct nhlt_specific_cfg *sp_config;
-	struct nhlt_acpi_table *nhlt = (struct nhlt_acpi_table *)skl->nhlt;
+	struct nhlt_acpi_table *nhlt = skl->nhlt;
 	u16 bps = (s_fmt == 16) ? 16 : 32;
 	u8 j;
 
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index dab0900..7c81b31 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -51,7 +51,7 @@
 	.rate_min =		8000,
 	.rate_max =		48000,
 	.channels_min =		1,
-	.channels_max =		HDA_QUAD,
+	.channels_max =		8,
 	.buffer_bytes_max =	AZX_MAX_BUF_SIZE,
 	.period_bytes_min =	128,
 	.period_bytes_max =	AZX_MAX_BUF_SIZE / 2,
@@ -213,7 +213,7 @@
 	struct skl_sst *ctx = skl->skl_sst;
 	struct skl_module_cfg *mconfig;
 
-	if ((dai->playback_active > 1) || (dai->capture_active > 1))
+	if (dai->playback_widget->power || dai->capture_widget->power)
 		return 0;
 
 	mconfig = skl_tplg_be_get_cpr_module(dai, substream->stream);
@@ -402,23 +402,33 @@
 	struct skl_module_cfg *mconfig;
 	struct hdac_ext_bus *ebus = get_bus_ctx(substream);
 	struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
+	struct snd_soc_dapm_widget *w;
 	int ret;
 
 	mconfig = skl_tplg_fe_get_cpr_module(dai, substream->stream);
 	if (!mconfig)
 		return -EIO;
 
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		w = dai->playback_widget;
+	else
+		w = dai->capture_widget;
+
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_RESUME:
-		skl_pcm_prepare(substream, dai);
-		/*
-		 * enable DMA Resume enable bit for the stream, set the dpib
-		 * & lpib position to resune before starting the DMA
-		 */
-		snd_hdac_ext_stream_drsm_enable(ebus, true,
-					hdac_stream(stream)->index);
-		snd_hdac_ext_stream_set_dpibr(ebus, stream, stream->dpib);
-		snd_hdac_ext_stream_set_lpib(stream, stream->lpib);
+		if (!w->ignore_suspend) {
+			skl_pcm_prepare(substream, dai);
+			/*
+			 * enable DMA Resume enable bit for the stream, set the
+			 * dpib & lpib position to resume before starting the
+			 * DMA
+			 */
+			snd_hdac_ext_stream_drsm_enable(ebus, true,
+						hdac_stream(stream)->index);
+			snd_hdac_ext_stream_set_dpibr(ebus, stream,
+							stream->dpib);
+			snd_hdac_ext_stream_set_lpib(stream, stream->lpib);
+		}
 
 	case SNDRV_PCM_TRIGGER_START:
 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
@@ -448,7 +458,7 @@
 			return ret;
 
 		ret = skl_decoupled_trigger(substream, cmd);
-		if (cmd == SNDRV_PCM_TRIGGER_SUSPEND) {
+		if ((cmd == SNDRV_PCM_TRIGGER_SUSPEND) && !w->ignore_suspend) {
 			/* save the dpib and lpib positions */
 			stream->dpib = readl(ebus->bus.remap_addr +
 					AZX_REG_VS_SDXDPIB_XBASE +
@@ -523,7 +533,6 @@
 	if (!link)
 		return -EINVAL;
 
-	snd_hdac_ext_bus_link_power_up(link);
 	snd_hdac_ext_link_stream_reset(link_dev);
 
 	snd_hdac_ext_link_stream_setup(link_dev, format_val);
@@ -682,7 +691,7 @@
 	.playback = {
 		.stream_name = "HDMI1 Playback",
 		.channels_min = HDA_STEREO,
-		.channels_max = HDA_STEREO,
+		.channels_max = 8,
 		.rates = SNDRV_PCM_RATE_32000 |	SNDRV_PCM_RATE_44100 |
 			SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |
 			SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
@@ -697,7 +706,7 @@
 	.playback = {
 		.stream_name = "HDMI2 Playback",
 		.channels_min = HDA_STEREO,
-		.channels_max = HDA_STEREO,
+		.channels_max = 8,
 		.rates = SNDRV_PCM_RATE_32000 |	SNDRV_PCM_RATE_44100 |
 			SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |
 			SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
@@ -712,7 +721,7 @@
 	.playback = {
 		.stream_name = "HDMI3 Playback",
 		.channels_min = HDA_STEREO,
-		.channels_max = HDA_STEREO,
+		.channels_max = 8,
 		.rates = SNDRV_PCM_RATE_32000 |	SNDRV_PCM_RATE_44100 |
 			SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |
 			SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
@@ -760,12 +769,84 @@
 	},
 },
 {
+	.name = "SSP2 Pin",
+	.ops = &skl_be_ssp_dai_ops,
+	.playback = {
+		.stream_name = "ssp2 Tx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+	.capture = {
+		.stream_name = "ssp2 Rx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = "SSP3 Pin",
+	.ops = &skl_be_ssp_dai_ops,
+	.playback = {
+		.stream_name = "ssp3 Tx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+	.capture = {
+		.stream_name = "ssp3 Rx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = "SSP4 Pin",
+	.ops = &skl_be_ssp_dai_ops,
+	.playback = {
+		.stream_name = "ssp4 Tx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+	.capture = {
+		.stream_name = "ssp4 Rx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = "SSP5 Pin",
+	.ops = &skl_be_ssp_dai_ops,
+	.playback = {
+		.stream_name = "ssp5 Tx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+	.capture = {
+		.stream_name = "ssp5 Rx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
 	.name = "iDisp1 Pin",
 	.ops = &skl_link_dai_ops,
 	.playback = {
 		.stream_name = "iDisp1 Tx",
 		.channels_min = HDA_STEREO,
-		.channels_max = HDA_STEREO,
+		.channels_max = 8,
 		.rates = SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_48000,
 		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE |
 			SNDRV_PCM_FMTBIT_S24_LE,
@@ -777,7 +858,7 @@
 	.playback = {
 		.stream_name = "iDisp2 Tx",
 		.channels_min = HDA_STEREO,
-		.channels_max = HDA_STEREO,
+		.channels_max = 8,
 		.rates = SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|
 			SNDRV_PCM_RATE_48000,
 		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE |
@@ -790,7 +871,7 @@
 	.playback = {
 		.stream_name = "iDisp3 Tx",
 		.channels_min = HDA_STEREO,
-		.channels_max = HDA_STEREO,
+		.channels_max = 8,
 		.rates = SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|
 			SNDRV_PCM_RATE_48000,
 		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE |
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.c b/sound/soc/intel/skylake/skl-sst-dsp.c
index a5267e8..13c1985 100644
--- a/sound/soc/intel/skylake/skl-sst-dsp.c
+++ b/sound/soc/intel/skylake/skl-sst-dsp.c
@@ -336,6 +336,9 @@
 	skl_ipc_int_disable(dsp);
 
 	free_irq(dsp->irq, dsp);
+	skl_ipc_op_int_disable(dsp);
+	skl_ipc_int_disable(dsp);
+
 	skl_dsp_disable_core(dsp);
 }
 EXPORT_SYMBOL_GPL(skl_dsp_free);
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h
index b6e310d..deabe73 100644
--- a/sound/soc/intel/skylake/skl-sst-dsp.h
+++ b/sound/soc/intel/skylake/skl-sst-dsp.h
@@ -118,16 +118,25 @@
 	int (*set_state_D0)(struct sst_dsp *ctx);
 	int (*set_state_D3)(struct sst_dsp *ctx);
 	unsigned int (*get_fw_errcode)(struct sst_dsp *ctx);
-	int (*load_mod)(struct sst_dsp *ctx, u16 mod_id, char *mod_name);
+	int (*load_mod)(struct sst_dsp *ctx, u16 mod_id, u8 *mod_name);
 	int (*unload_mod)(struct sst_dsp *ctx, u16 mod_id);
 
 };
 
 struct skl_dsp_loader_ops {
+	int stream_tag;
+
 	int (*alloc_dma_buf)(struct device *dev,
 		struct snd_dma_buffer *dmab, size_t size);
 	int (*free_dma_buf)(struct device *dev,
 		struct snd_dma_buffer *dmab);
+	int (*prepare)(struct device *dev, unsigned int format,
+				unsigned int byte_size,
+				struct snd_dma_buffer *bufp);
+	int (*trigger)(struct device *dev, bool start, int stream_tag);
+
+	int (*cleanup)(struct device *dev, struct snd_dma_buffer *dmab,
+				 int stream_tag);
 };
 
 struct skl_load_module_info {
@@ -160,6 +169,10 @@
 int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
 		const char *fw_name, struct skl_dsp_loader_ops dsp_ops,
 		struct skl_sst **dsp);
+int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
+		const char *fw_name, struct skl_dsp_loader_ops dsp_ops,
+		struct skl_sst **dsp);
 void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx);
+void bxt_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx);
 
 #endif /*__SKL_SST_DSP_H__*/
diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c
index 348a734..13ec8d5 100644
--- a/sound/soc/intel/skylake/skl-sst.c
+++ b/sound/soc/intel/skylake/skl-sst.c
@@ -20,6 +20,7 @@
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/err.h>
+#include <linux/uuid.h>
 #include "../common/sst-dsp.h"
 #include "../common/sst-dsp-priv.h"
 #include "../common/sst-ipc.h"
@@ -304,14 +305,16 @@
 	return ret;
 }
 
-static int skl_load_module(struct sst_dsp *ctx, u16 mod_id, char *guid)
+static int skl_load_module(struct sst_dsp *ctx, u16 mod_id, u8 *guid)
 {
 	struct skl_module_table *module_entry = NULL;
 	int ret = 0;
 	char mod_name[64]; /* guid str = 32 chars + 4 hyphens */
+	uuid_le *uuid_mod;
 
-	snprintf(mod_name, sizeof(mod_name), "%s%s%s",
-			"intel/dsp_fw_", guid, ".bin");
+	uuid_mod = (uuid_le *)guid;
+	snprintf(mod_name, sizeof(mod_name), "%s%pUL%s",
+				"intel/dsp_fw_", uuid_mod, ".bin");
 
 	module_entry = skl_module_get_from_id(ctx, mod_id);
 	if (module_entry == NULL) {
@@ -451,6 +454,10 @@
 	skl_clear_module_table(ctx->dsp);
 	skl_ipc_free(&ctx->ipc);
 	ctx->dsp->ops->free(ctx->dsp);
+	if (ctx->boot_complete) {
+		ctx->dsp->cl_dev.ops.cl_cleanup_controller(ctx->dsp);
+		skl_cldma_int_disable(ctx->dsp);
+	}
 }
 EXPORT_SYMBOL_GPL(skl_sst_dsp_cleanup);
 
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index 545b4e7..3e036b0 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -154,13 +154,32 @@
 	dev_dbg(ctx->dev, "ch_cfg = %d\n", mcfg->out_fmt[0].ch_cfg);
 }
 
+static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs)
+{
+	int slot_map = 0xFFFFFFFF;
+	int start_slot = 0;
+	int i;
+
+	for (i = 0; i < chs; i++) {
+		/*
+		 * For 2 channels with starting slot as 0, slot map will
+		 * look like 0xFFFFFF10.
+		 */
+		slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i)));
+		start_slot++;
+	}
+	fmt->ch_map = slot_map;
+}
+
 static void skl_tplg_update_params(struct skl_module_fmt *fmt,
 			struct skl_pipe_params *params, int fixup)
 {
 	if (fixup & SKL_RATE_FIXUP_MASK)
 		fmt->s_freq = params->s_freq;
-	if (fixup & SKL_CH_FIXUP_MASK)
+	if (fixup & SKL_CH_FIXUP_MASK) {
 		fmt->channels = params->ch;
+		skl_tplg_update_chmap(fmt, fmt->channels);
+	}
 	if (fixup & SKL_FMT_FIXUP_MASK) {
 		fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt);
 
@@ -239,6 +258,7 @@
 {
 	int multiplier = 1;
 	struct skl_module_fmt *in_fmt, *out_fmt;
+	int in_rate, out_rate;
 
 
 	/* Since fixups is applied to pin 0 only, ibs, obs needs
@@ -249,15 +269,24 @@
 
 	if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
 		multiplier = 5;
-	mcfg->ibs = (in_fmt->s_freq / 1000) *
-				(mcfg->in_fmt->channels) *
-				(mcfg->in_fmt->bit_depth >> 3) *
-				multiplier;
 
-	mcfg->obs = (mcfg->out_fmt->s_freq / 1000) *
-				(mcfg->out_fmt->channels) *
-				(mcfg->out_fmt->bit_depth >> 3) *
-				multiplier;
+	if (in_fmt->s_freq % 1000)
+		in_rate = (in_fmt->s_freq / 1000) + 1;
+	else
+		in_rate = (in_fmt->s_freq / 1000);
+
+	mcfg->ibs = in_rate * (mcfg->in_fmt->channels) *
+			(mcfg->in_fmt->bit_depth >> 3) *
+			multiplier;
+
+	if (mcfg->out_fmt->s_freq % 1000)
+		out_rate = (mcfg->out_fmt->s_freq / 1000) + 1;
+	else
+		out_rate = (mcfg->out_fmt->s_freq / 1000);
+
+	mcfg->obs = out_rate * (mcfg->out_fmt->channels) *
+			(mcfg->out_fmt->bit_depth >> 3) *
+			multiplier;
 }
 
 static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
@@ -485,11 +514,15 @@
 		if (!skl_is_pipe_mcps_avail(skl, mconfig))
 			return -ENOMEM;
 
+		skl_tplg_alloc_pipe_mcps(skl, mconfig);
+
 		if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
 			ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
 				mconfig->id.module_id, mconfig->guid);
 			if (ret < 0)
 				return ret;
+
+			mconfig->m_state = SKL_MODULE_LOADED;
 		}
 
 		/* update blob if blob is null for be with default value */
@@ -509,7 +542,6 @@
 		ret = skl_tplg_set_module_params(w, ctx);
 		if (ret < 0)
 			return ret;
-		skl_tplg_alloc_pipe_mcps(skl, mconfig);
 	}
 
 	return 0;
@@ -524,7 +556,8 @@
 	list_for_each_entry(w_module, &pipe->w_list, node) {
 		mconfig  = w_module->w->priv;
 
-		if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod)
+		if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod &&
+			mconfig->m_state > SKL_MODULE_UNINIT)
 			return ctx->dsp->fw_ops.unload_mod(ctx->dsp,
 						mconfig->id.module_id);
 	}
@@ -558,6 +591,9 @@
 	if (!skl_is_pipe_mem_avail(skl, mconfig))
 		return -ENOMEM;
 
+	skl_tplg_alloc_pipe_mem(skl, mconfig);
+	skl_tplg_alloc_pipe_mcps(skl, mconfig);
+
 	/*
 	 * Create a list of modules for pipe.
 	 * This list contains modules from source to sink
@@ -601,9 +637,6 @@
 		src_module = dst_module;
 	}
 
-	skl_tplg_alloc_pipe_mem(skl, mconfig);
-	skl_tplg_alloc_pipe_mcps(skl, mconfig);
-
 	return 0;
 }
 
@@ -1550,6 +1583,8 @@
 		return -ENOMEM;
 
 	w->priv = mconfig;
+	memcpy(&mconfig->guid, &dfw_config->uuid, 16);
+
 	mconfig->id.module_id = dfw_config->module_id;
 	mconfig->id.instance_id = dfw_config->instance_id;
 	mconfig->mcps = dfw_config->max_mcps;
@@ -1579,10 +1614,6 @@
 	mconfig->time_slot = dfw_config->time_slot;
 	mconfig->formats_config.caps_size = dfw_config->caps.caps_size;
 
-	if (dfw_config->is_loadable)
-		memcpy(mconfig->guid, dfw_config->uuid,
-					ARRAY_SIZE(dfw_config->uuid));
-
 	mconfig->m_in_pin = devm_kzalloc(bus->dev, (mconfig->max_in_queue) *
 						sizeof(*mconfig->m_in_pin),
 						GFP_KERNEL);
diff --git a/sound/soc/intel/skylake/skl-topology.h b/sound/soc/intel/skylake/skl-topology.h
index de3c401..e4b399c 100644
--- a/sound/soc/intel/skylake/skl-topology.h
+++ b/sound/soc/intel/skylake/skl-topology.h
@@ -274,14 +274,14 @@
 
 enum skl_module_state {
 	SKL_MODULE_UNINIT = 0,
-	SKL_MODULE_INIT_DONE = 1,
-	SKL_MODULE_LOADED = 2,
-	SKL_MODULE_UNLOADED = 3,
-	SKL_MODULE_BIND_DONE = 4
+	SKL_MODULE_LOADED = 1,
+	SKL_MODULE_INIT_DONE = 2,
+	SKL_MODULE_BIND_DONE = 3,
+	SKL_MODULE_UNLOADED = 4,
 };
 
 struct skl_module_cfg {
-	char guid[SKL_UUID_STR_SZ];
+	u8 guid[16];
 	struct skl_module_inst_id id;
 	u8 domain;
 	bool homogenous_inputs;
diff --git a/sound/soc/intel/skylake/skl-tplg-interface.h b/sound/soc/intel/skylake/skl-tplg-interface.h
index 1db88a6..a32e5e9 100644
--- a/sound/soc/intel/skylake/skl-tplg-interface.h
+++ b/sound/soc/intel/skylake/skl-tplg-interface.h
@@ -181,7 +181,7 @@
 } __packed;
 
 struct skl_dfw_module {
-	char uuid[SKL_UUID_STR_SZ];
+	u8 uuid[16];
 
 	u16 module_id;
 	u16 instance_id;
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index ab5e25a..06d8c26 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -222,20 +222,36 @@
 	struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
 	struct skl *skl  = ebus_to_skl(ebus);
 	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	int ret = 0;
 
 	/*
 	 * Do not suspend if streams which are marked ignore suspend are
 	 * running, we need to save the state for these and continue
 	 */
 	if (skl->supend_active) {
+		/* turn off the links and stop the CORB/RIRB DMA if it is On */
 		snd_hdac_ext_bus_link_power_down_all(ebus);
+
+		if (ebus->cmd_dma_state)
+			snd_hdac_bus_stop_cmd_io(&ebus->bus);
+
 		enable_irq_wake(bus->irq);
 		pci_save_state(pci);
 		pci_disable_device(pci);
-		return 0;
 	} else {
-		return _skl_suspend(ebus);
+		ret = _skl_suspend(ebus);
+		if (ret < 0)
+			return ret;
 	}
+
+	if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
+		ret = snd_hdac_display_power(bus, false);
+		if (ret < 0)
+			dev_err(bus->dev,
+				"Cannot turn OFF display power on i915\n");
+	}
+
+	return ret;
 }
 
 static int skl_resume(struct device *dev)
@@ -244,6 +260,7 @@
 	struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
 	struct skl *skl  = ebus_to_skl(ebus);
 	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	struct hdac_ext_link *hlink = NULL;
 	int ret;
 
 	/* Turned OFF in HDMI codec driver after codec reconfiguration */
@@ -265,8 +282,29 @@
 		ret = pci_enable_device(pci);
 		snd_hdac_ext_bus_link_power_up_all(ebus);
 		disable_irq_wake(bus->irq);
+		/*
+		 * turn On the links which are On before active suspend
+		 * and start the CORB/RIRB DMA if On before
+		 * active suspend.
+		 */
+		list_for_each_entry(hlink, &ebus->hlink_list, list) {
+			if (hlink->ref_count)
+				snd_hdac_ext_bus_link_power_up(hlink);
+		}
+
+		if (ebus->cmd_dma_state)
+			snd_hdac_bus_init_cmd_io(&ebus->bus);
 	} else {
 		ret = _skl_resume(ebus);
+
+		/* turn off the links which are off before suspend */
+		list_for_each_entry(hlink, &ebus->hlink_list, list) {
+			if (!hlink->ref_count)
+				snd_hdac_ext_bus_link_power_down(hlink);
+		}
+
+		if (!ebus->cmd_dma_state)
+			snd_hdac_bus_stop_cmd_io(&ebus->bus);
 	}
 
 	return ret;
@@ -316,17 +354,20 @@
 
 	if (bus->irq >= 0)
 		free_irq(bus->irq, (void *)bus);
-	if (bus->remap_addr)
-		iounmap(bus->remap_addr);
-
 	snd_hdac_bus_free_stream_pages(bus);
 	snd_hdac_stream_free_all(ebus);
 	snd_hdac_link_free_all(ebus);
+
+	if (bus->remap_addr)
+		iounmap(bus->remap_addr);
+
 	pci_release_regions(skl->pci);
 	pci_disable_device(skl->pci);
 
 	snd_hdac_ext_bus_exit(ebus);
 
+	if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
+		snd_hdac_i915_exit(&ebus->bus);
 	return 0;
 }
 
@@ -599,6 +640,7 @@
 	struct skl *skl;
 	struct hdac_ext_bus *ebus = NULL;
 	struct hdac_bus *bus = NULL;
+	struct hdac_ext_link *hlink = NULL;
 	int err;
 
 	/* we use ext core ops, so provide NULL for ops here */
@@ -629,7 +671,7 @@
 		err = skl_machine_device_register(skl,
 				  (void *)pci_id->driver_data);
 		if (err < 0)
-			goto out_free;
+			goto out_nhlt_free;
 
 		err = skl_init_dsp(skl);
 		if (err < 0) {
@@ -665,6 +707,12 @@
 		}
 	}
 
+	/*
+	 * we are done probling so decrement link counts
+	 */
+	list_for_each_entry(hlink, &ebus->hlink_list, list)
+		snd_hdac_ext_bus_link_put(ebus, hlink);
+
 	/*configure PM */
 	pm_runtime_put_noidle(bus->dev);
 	pm_runtime_allow(bus->dev);
@@ -679,6 +727,8 @@
 	skl_free_dsp(skl);
 out_mach_free:
 	skl_machine_device_unregister(skl);
+out_nhlt_free:
+	skl_nhlt_free(skl->nhlt);
 out_free:
 	skl->init_failed = 1;
 	skl_free(ebus);
@@ -719,16 +769,17 @@
 	if (skl->tplg)
 		release_firmware(skl->tplg);
 
-	if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
-		snd_hdac_i915_exit(&ebus->bus);
-
 	if (pci_dev_run_wake(pci))
 		pm_runtime_get_noresume(&pci->dev);
-	pci_dev_put(pci);
+
+	/* codec removal, invoke bus_device_remove */
+	snd_hdac_ext_bus_device_remove(ebus);
+
 	skl_platform_unregister(&pci->dev);
 	skl_free_dsp(skl);
 	skl_machine_device_unregister(skl);
 	skl_dmic_device_unregister(skl);
+	skl_nhlt_free(skl->nhlt);
 	skl_free(ebus);
 	dev_set_drvdata(&pci->dev, NULL);
 }
diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h
index 39e16fa..4b4b387 100644
--- a/sound/soc/intel/skylake/skl.h
+++ b/sound/soc/intel/skylake/skl.h
@@ -66,7 +66,7 @@
 	struct platform_device *dmic_dev;
 	struct platform_device *i2s_dev;
 
-	void *nhlt; /* nhlt ptr */
+	struct nhlt_acpi_table *nhlt; /* nhlt ptr */
 	struct skl_sst *skl_sst; /* sst skl ctx */
 
 	struct skl_dsp_resource resource;
@@ -103,8 +103,8 @@
 int skl_platform_unregister(struct device *dev);
 int skl_platform_register(struct device *dev);
 
-void *skl_nhlt_init(struct device *dev);
-void skl_nhlt_free(void *addr);
+struct nhlt_acpi_table *skl_nhlt_init(struct device *dev);
+void skl_nhlt_free(struct nhlt_acpi_table *addr);
 struct nhlt_specific_cfg *skl_get_ep_blob(struct skl *skl, u32 instance,
 			u8 link_type, u8 s_fmt, u8 no_ch, u32 s_rate, u8 dirn);
 
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index 2f8e204..574c6af 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -34,6 +34,13 @@
 
 	struct regmap *regmap;
 
+/*
+ * Used to indicate the tx/rx status.
+ * I2S controller hopes to start the tx and rx together,
+ * also to stop them when they are both try to stop.
+*/
+	bool tx_start;
+	bool rx_start;
 	bool is_master_mode;
 };
 
@@ -75,29 +82,37 @@
 				   I2S_DMACR_TDE_ENABLE, I2S_DMACR_TDE_ENABLE);
 
 		regmap_update_bits(i2s->regmap, I2S_XFER,
-				   I2S_XFER_TXS_START,
-				   I2S_XFER_TXS_START);
+				   I2S_XFER_TXS_START | I2S_XFER_RXS_START,
+				   I2S_XFER_TXS_START | I2S_XFER_RXS_START);
+
+		i2s->tx_start = true;
 	} else {
+		i2s->tx_start = false;
+
 		regmap_update_bits(i2s->regmap, I2S_DMACR,
 				   I2S_DMACR_TDE_ENABLE, I2S_DMACR_TDE_DISABLE);
 
-		regmap_update_bits(i2s->regmap, I2S_XFER,
-				   I2S_XFER_TXS_START,
-				   I2S_XFER_TXS_STOP);
+		if (!i2s->rx_start) {
+			regmap_update_bits(i2s->regmap, I2S_XFER,
+					   I2S_XFER_TXS_START |
+					   I2S_XFER_RXS_START,
+					   I2S_XFER_TXS_STOP |
+					   I2S_XFER_RXS_STOP);
 
-		regmap_update_bits(i2s->regmap, I2S_CLR,
-				   I2S_CLR_TXC,
-				   I2S_CLR_TXC);
+			regmap_update_bits(i2s->regmap, I2S_CLR,
+					   I2S_CLR_TXC | I2S_CLR_RXC,
+					   I2S_CLR_TXC | I2S_CLR_RXC);
 
-		regmap_read(i2s->regmap, I2S_CLR, &val);
-
-		/* Should wait for clear operation to finish */
-		while (val & I2S_CLR_TXC) {
 			regmap_read(i2s->regmap, I2S_CLR, &val);
-			retry--;
-			if (!retry) {
-				dev_warn(i2s->dev, "fail to clear\n");
-				break;
+
+			/* Should wait for clear operation to finish */
+			while (val) {
+				regmap_read(i2s->regmap, I2S_CLR, &val);
+				retry--;
+				if (!retry) {
+					dev_warn(i2s->dev, "fail to clear\n");
+					break;
+				}
 			}
 		}
 	}
@@ -113,29 +128,37 @@
 				   I2S_DMACR_RDE_ENABLE, I2S_DMACR_RDE_ENABLE);
 
 		regmap_update_bits(i2s->regmap, I2S_XFER,
-				   I2S_XFER_RXS_START,
-				   I2S_XFER_RXS_START);
+				   I2S_XFER_TXS_START | I2S_XFER_RXS_START,
+				   I2S_XFER_TXS_START | I2S_XFER_RXS_START);
+
+		i2s->rx_start = true;
 	} else {
+		i2s->rx_start = false;
+
 		regmap_update_bits(i2s->regmap, I2S_DMACR,
 				   I2S_DMACR_RDE_ENABLE, I2S_DMACR_RDE_DISABLE);
 
-		regmap_update_bits(i2s->regmap, I2S_XFER,
-				   I2S_XFER_RXS_START,
-				   I2S_XFER_RXS_STOP);
+		if (!i2s->tx_start) {
+			regmap_update_bits(i2s->regmap, I2S_XFER,
+					   I2S_XFER_TXS_START |
+					   I2S_XFER_RXS_START,
+					   I2S_XFER_TXS_STOP |
+					   I2S_XFER_RXS_STOP);
 
-		regmap_update_bits(i2s->regmap, I2S_CLR,
-				   I2S_CLR_RXC,
-				   I2S_CLR_RXC);
+			regmap_update_bits(i2s->regmap, I2S_CLR,
+					   I2S_CLR_TXC | I2S_CLR_RXC,
+					   I2S_CLR_TXC | I2S_CLR_RXC);
 
-		regmap_read(i2s->regmap, I2S_CLR, &val);
-
-		/* Should wait for clear operation to finish */
-		while (val & I2S_CLR_RXC) {
 			regmap_read(i2s->regmap, I2S_CLR, &val);
-			retry--;
-			if (!retry) {
-				dev_warn(i2s->dev, "fail to clear\n");
-				break;
+
+			/* Should wait for clear operation to finish */
+			while (val) {
+				regmap_read(i2s->regmap, I2S_CLR, &val);
+				retry--;
+				if (!retry) {
+					dev_warn(i2s->dev, "fail to clear\n");
+					break;
+				}
 			}
 		}
 	}
diff --git a/sound/soc/soc-ac97.c b/sound/soc/soc-ac97.c
index 7e0acd8..bc4a55b 100644
--- a/sound/soc/soc-ac97.c
+++ b/sound/soc/soc-ac97.c
@@ -59,8 +59,7 @@
 #ifdef CONFIG_GPIOLIB
 static inline struct snd_soc_codec *gpio_to_codec(struct gpio_chip *chip)
 {
-	struct snd_ac97_gpio_priv *gpio_priv =
-		container_of(chip, struct snd_ac97_gpio_priv, gpio_chip);
+	struct snd_ac97_gpio_priv *gpio_priv = gpiochip_get_data(chip);
 
 	return gpio_priv->codec;
 }
@@ -98,8 +97,7 @@
 static void snd_soc_ac97_gpio_set(struct gpio_chip *chip, unsigned offset,
 				  int value)
 {
-	struct snd_ac97_gpio_priv *gpio_priv =
-		container_of(chip, struct snd_ac97_gpio_priv, gpio_chip);
+	struct snd_ac97_gpio_priv *gpio_priv = gpiochip_get_data(chip);
 	struct snd_soc_codec *codec = gpio_to_codec(chip);
 
 	gpio_priv->gpios_set &= ~(1 << offset);
@@ -145,7 +143,7 @@
 	gpio_priv->gpio_chip.parent = codec->dev;
 	gpio_priv->gpio_chip.base = -1;
 
-	ret = gpiochip_add(&gpio_priv->gpio_chip);
+	ret = gpiochip_add_data(&gpio_priv->gpio_chip, gpio_priv);
 	if (ret != 0)
 		dev_err(codec->dev, "Failed to add GPIOs: %d\n", ret);
 	return ret;
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index d2e62b15..16369ca 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -930,7 +930,18 @@
 	return NULL;
 }
 
-static struct snd_soc_dai *snd_soc_find_dai(
+/**
+ * snd_soc_find_dai - Find a registered DAI
+ *
+ * @dlc: name of the DAI and optional component info to match
+ *
+ * This function will search all regsitered components and their DAIs to
+ * find the DAI of the same name. The component's of_node and name
+ * should also match if being specified.
+ *
+ * Return: pointer of DAI, or NULL if not found.
+ */
+struct snd_soc_dai *snd_soc_find_dai(
 	const struct snd_soc_dai_link_component *dlc)
 {
 	struct snd_soc_component *component;
@@ -959,6 +970,7 @@
 
 	return NULL;
 }
+EXPORT_SYMBOL_GPL(snd_soc_find_dai);
 
 static bool soc_is_dai_link_bound(struct snd_soc_card *card,
 		struct snd_soc_dai_link *dai_link)
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 801ae1a..c446485 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2188,6 +2188,13 @@
 	int count = 0;
 	char *state = "not set";
 
+	/* card won't be set for the dummy component, as a spot fix
+	 * we're checking for that case specifically here but in future
+	 * we will ensure that the dummy component looks like others.
+	 */
+	if (!cmpnt->card)
+		return 0;
+
 	list_for_each_entry(w, &cmpnt->card->widgets, list) {
 		if (w->dapm != dapm)
 			continue;
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index 6fd1906..6cef397 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -163,31 +163,42 @@
 	}
 
 	/*
-	 * Prepare formats mask for valid/allowed sample types. If the dma does
-	 * not have support for the given physical word size, it needs to be
-	 * masked out so user space can not use the format which produces
-	 * corrupted audio.
-	 * In case the dma driver does not implement the slave_caps the default
-	 * assumption is that it supports 1, 2 and 4 bytes widths.
+	 * If SND_DMAENGINE_PCM_DAI_FLAG_PACK is set keep
+	 * hw.formats set to 0, meaning no restrictions are in place.
+	 * In this case it's the responsibility of the DAI driver to
+	 * provide the supported format information.
 	 */
-	for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
-		int bits = snd_pcm_format_physical_width(i);
+	if (!(dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK))
+		/*
+		 * Prepare formats mask for valid/allowed sample types. If the
+		 * dma does not have support for the given physical word size,
+		 * it needs to be masked out so user space can not use the
+		 * format which produces corrupted audio.
+		 * In case the dma driver does not implement the slave_caps the
+		 * default assumption is that it supports 1, 2 and 4 bytes
+		 * widths.
+		 */
+		for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
+			int bits = snd_pcm_format_physical_width(i);
 
-		/* Enable only samples with DMA supported physical widths */
-		switch (bits) {
-		case 8:
-		case 16:
-		case 24:
-		case 32:
-		case 64:
-			if (addr_widths & (1 << (bits / 8)))
-				hw.formats |= (1LL << i);
-			break;
-		default:
-			/* Unsupported types */
-			break;
+			/*
+			 * Enable only samples with DMA supported physical
+			 * widths
+			 */
+			switch (bits) {
+			case 8:
+			case 16:
+			case 24:
+			case 32:
+			case 64:
+				if (addr_widths & (1 << (bits / 8)))
+					hw.formats |= (1LL << i);
+				break;
+			default:
+				/* Unsupported types */
+				break;
+			}
 		}
-	}
 
 	return snd_soc_set_runtime_hwparams(substream, &hw);
 }
diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig
index d14bf41..a452ad7 100644
--- a/sound/usb/Kconfig
+++ b/sound/usb/Kconfig
@@ -15,7 +15,6 @@
 	select SND_RAWMIDI
 	select SND_PCM
 	select BITREVERSE
-	select SND_USB_AUDIO_USE_MEDIA_CONTROLLER if MEDIA_CONTROLLER && (MEDIA_SUPPORT=y || MEDIA_SUPPORT=SND_USB_AUDIO)
 	help
 	  Say Y here to include support for USB audio and USB MIDI
 	  devices.
@@ -23,9 +22,6 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called snd-usb-audio.
 
-config SND_USB_AUDIO_USE_MEDIA_CONTROLLER
-	bool
-
 config SND_USB_UA101
 	tristate "Edirol UA-101/UA-1000 driver"
 	select SND_PCM
diff --git a/sound/usb/Makefile b/sound/usb/Makefile
index 8dca3c4..2d2d122 100644
--- a/sound/usb/Makefile
+++ b/sound/usb/Makefile
@@ -15,8 +15,6 @@
 			quirks.o \
 			stream.o
 
-snd-usb-audio-$(CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER) += media.o
-
 snd-usbmidi-lib-objs := midi.o
 
 # Toplevel Module Dependency
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 63244bb..69860da 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -66,7 +66,6 @@
 #include "format.h"
 #include "power.h"
 #include "stream.h"
-#include "media.h"
 
 MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");
 MODULE_DESCRIPTION("USB Audio");
@@ -351,6 +350,7 @@
 	case USB_SPEED_HIGH:
 	case USB_SPEED_WIRELESS:
 	case USB_SPEED_SUPER:
+	case USB_SPEED_SUPER_PLUS:
 		break;
 	default:
 		dev_err(&dev->dev, "unknown device speed %d\n", snd_usb_get_speed(dev));
@@ -451,6 +451,9 @@
 	case USB_SPEED_SUPER:
 		strlcat(card->longname, ", super speed", sizeof(card->longname));
 		break;
+	case USB_SPEED_SUPER_PLUS:
+		strlcat(card->longname, ", super speed plus", sizeof(card->longname));
+		break;
 	default:
 		break;
 	}
@@ -612,11 +615,6 @@
 	if (err < 0)
 		goto __error;
 
-	if (quirk->media_device) {
-		/* don't want to fail when media_snd_device_create() fails */
-		media_snd_device_create(chip, intf);
-	}
-
 	usb_chip[chip->index] = chip;
 	chip->num_interfaces++;
 	usb_set_intfdata(intf, chip);
@@ -673,14 +671,6 @@
 		list_for_each(p, &chip->midi_list) {
 			snd_usbmidi_disconnect(p);
 		}
-		/*
-		 * Nice to check quirk && quirk->media_device
-		 * need some special handlings. Doesn't look like
-		 * we have access to quirk here
-		 * Acceses mixer_list
-		*/
-		media_snd_device_delete(chip);
-
 		/* release mixer resources */
 		list_for_each_entry(mixer, &chip->mixer_list, list) {
 			snd_usb_mixer_disconnect(mixer);
diff --git a/sound/usb/card.h b/sound/usb/card.h
index 34a0898..71778ca 100644
--- a/sound/usb/card.h
+++ b/sound/usb/card.h
@@ -105,8 +105,6 @@
 	struct list_head list;
 };
 
-struct media_ctl;
-
 struct snd_usb_substream {
 	struct snd_usb_stream *stream;
 	struct usb_device *dev;
@@ -158,7 +156,6 @@
 	} dsd_dop;
 
 	bool trigger_tstamp_pending_update; /* trigger timestamp being updated from initial estimate */
-	struct media_ctl *media_ctl;
 };
 
 struct snd_usb_stream {
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index 7ccbcaf..26dd5f2 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -309,6 +309,9 @@
 	 * support reading */
 	if (snd_usb_get_sample_rate_quirk(chip))
 		return 0;
+	/* the firmware is likely buggy, don't repeat to fail too many times */
+	if (chip->sample_rate_read_error > 2)
+		return 0;
 
 	if ((err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC_GET_CUR,
 				   USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_IN,
@@ -316,6 +319,7 @@
 				   data, sizeof(data))) < 0) {
 		dev_err(&dev->dev, "%d:%d: cannot get freq at ep %#x\n",
 			iface, fmt->altsetting, ep);
+		chip->sample_rate_read_error++;
 		return 0; /* some devices don't support reading */
 	}
 
diff --git a/sound/usb/helper.c b/sound/usb/helper.c
index 51ed1ac..7712e2b 100644
--- a/sound/usb/helper.c
+++ b/sound/usb/helper.c
@@ -120,6 +120,7 @@
 	case USB_SPEED_HIGH:
 	case USB_SPEED_WIRELESS:
 	case USB_SPEED_SUPER:
+	case USB_SPEED_SUPER_PLUS:
 		if (get_endpoint(alts, 0)->bInterval >= 1 &&
 		    get_endpoint(alts, 0)->bInterval <= 4)
 			return get_endpoint(alts, 0)->bInterval - 1;
diff --git a/sound/usb/media.c b/sound/usb/media.c
deleted file mode 100644
index 93a50d01..0000000
--- a/sound/usb/media.c
+++ /dev/null
@@ -1,318 +0,0 @@
-/*
- * media.c - Media Controller specific ALSA driver code
- *
- * Copyright (c) 2016 Shuah Khan <shuahkh@osg.samsung.com>
- * Copyright (c) 2016 Samsung Electronics Co., Ltd.
- *
- * This file is released under the GPLv2.
- */
-
-/*
- * This file adds Media Controller support to ALSA driver
- * to use the Media Controller API to share tuner with DVB
- * and V4L2 drivers that control media device. Media device
- * is created based on existing quirks framework. Using this
- * approach, the media controller API usage can be added for
- * a specific device.
-*/
-
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/usb.h>
-
-#include <sound/pcm.h>
-#include <sound/core.h>
-
-#include "usbaudio.h"
-#include "card.h"
-#include "mixer.h"
-#include "media.h"
-
-static int media_snd_enable_source(struct media_ctl *mctl)
-{
-	if (mctl && mctl->media_dev->enable_source)
-		return mctl->media_dev->enable_source(&mctl->media_entity,
-						      &mctl->media_pipe);
-	return 0;
-}
-
-static void media_snd_disable_source(struct media_ctl *mctl)
-{
-	if (mctl && mctl->media_dev->disable_source)
-		mctl->media_dev->disable_source(&mctl->media_entity);
-}
-
-int media_snd_stream_init(struct snd_usb_substream *subs, struct snd_pcm *pcm,
-			int stream)
-{
-	struct media_device *mdev;
-	struct media_ctl *mctl;
-	struct device *pcm_dev = &pcm->streams[stream].dev;
-	u32 intf_type;
-	int ret = 0;
-	u16 mixer_pad;
-	struct media_entity *entity;
-
-	mdev = subs->stream->chip->media_dev;
-	if (!mdev)
-		return -ENODEV;
-
-	if (subs->media_ctl)
-		return 0;
-
-	/* allocate media_ctl */
-	mctl = kzalloc(sizeof(*mctl), GFP_KERNEL);
-	if (!mctl)
-		return -ENOMEM;
-
-	mctl->media_dev = mdev;
-	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
-		intf_type = MEDIA_INTF_T_ALSA_PCM_PLAYBACK;
-		mctl->media_entity.function = MEDIA_ENT_F_AUDIO_PLAYBACK;
-		mctl->media_pad.flags = MEDIA_PAD_FL_SOURCE;
-		mixer_pad = 1;
-	} else {
-		intf_type = MEDIA_INTF_T_ALSA_PCM_CAPTURE;
-		mctl->media_entity.function = MEDIA_ENT_F_AUDIO_CAPTURE;
-		mctl->media_pad.flags = MEDIA_PAD_FL_SINK;
-		mixer_pad = 2;
-	}
-	mctl->media_entity.name = pcm->name;
-	media_entity_pads_init(&mctl->media_entity, 1, &mctl->media_pad);
-	ret =  media_device_register_entity(mctl->media_dev,
-					    &mctl->media_entity);
-	if (ret)
-		goto free_mctl;
-
-	mctl->intf_devnode = media_devnode_create(mdev, intf_type, 0,
-						  MAJOR(pcm_dev->devt),
-						  MINOR(pcm_dev->devt));
-	if (!mctl->intf_devnode) {
-		ret = -ENOMEM;
-		goto unregister_entity;
-	}
-	mctl->intf_link = media_create_intf_link(&mctl->media_entity,
-						 &mctl->intf_devnode->intf,
-						 MEDIA_LNK_FL_ENABLED);
-	if (!mctl->intf_link) {
-		ret = -ENOMEM;
-		goto devnode_remove;
-	}
-
-	/* create link between mixer and audio */
-	media_device_for_each_entity(entity, mdev) {
-		switch (entity->function) {
-		case MEDIA_ENT_F_AUDIO_MIXER:
-			ret = media_create_pad_link(entity, mixer_pad,
-						    &mctl->media_entity, 0,
-						    MEDIA_LNK_FL_ENABLED);
-			if (ret)
-				goto remove_intf_link;
-			break;
-		}
-	}
-
-	subs->media_ctl = mctl;
-	return 0;
-
-remove_intf_link:
-	media_remove_intf_link(mctl->intf_link);
-devnode_remove:
-	media_devnode_remove(mctl->intf_devnode);
-unregister_entity:
-	media_device_unregister_entity(&mctl->media_entity);
-free_mctl:
-	kfree(mctl);
-	return ret;
-}
-
-void media_snd_stream_delete(struct snd_usb_substream *subs)
-{
-	struct media_ctl *mctl = subs->media_ctl;
-
-	if (mctl && mctl->media_dev) {
-		struct media_device *mdev;
-
-		mdev = subs->stream->chip->media_dev;
-		if (mdev && media_devnode_is_registered(&mdev->devnode)) {
-			media_devnode_remove(mctl->intf_devnode);
-			media_device_unregister_entity(&mctl->media_entity);
-			media_entity_cleanup(&mctl->media_entity);
-		}
-		kfree(mctl);
-		subs->media_ctl = NULL;
-	}
-}
-
-int media_snd_start_pipeline(struct snd_usb_substream *subs)
-{
-	struct media_ctl *mctl = subs->media_ctl;
-
-	if (mctl)
-		return media_snd_enable_source(mctl);
-	return 0;
-}
-
-void media_snd_stop_pipeline(struct snd_usb_substream *subs)
-{
-	struct media_ctl *mctl = subs->media_ctl;
-
-	if (mctl)
-		media_snd_disable_source(mctl);
-}
-
-int media_snd_mixer_init(struct snd_usb_audio *chip)
-{
-	struct device *ctl_dev = &chip->card->ctl_dev;
-	struct media_intf_devnode *ctl_intf;
-	struct usb_mixer_interface *mixer;
-	struct media_device *mdev = chip->media_dev;
-	struct media_mixer_ctl *mctl;
-	u32 intf_type = MEDIA_INTF_T_ALSA_CONTROL;
-	int ret;
-
-	if (!mdev)
-		return -ENODEV;
-
-	ctl_intf = chip->ctl_intf_media_devnode;
-	if (!ctl_intf) {
-		ctl_intf = media_devnode_create(mdev, intf_type, 0,
-						MAJOR(ctl_dev->devt),
-						MINOR(ctl_dev->devt));
-		if (!ctl_intf)
-			return -ENOMEM;
-		chip->ctl_intf_media_devnode = ctl_intf;
-	}
-
-	list_for_each_entry(mixer, &chip->mixer_list, list) {
-
-		if (mixer->media_mixer_ctl)
-			continue;
-
-		/* allocate media_mixer_ctl */
-		mctl = kzalloc(sizeof(*mctl), GFP_KERNEL);
-		if (!mctl)
-			return -ENOMEM;
-
-		mctl->media_dev = mdev;
-		mctl->media_entity.function = MEDIA_ENT_F_AUDIO_MIXER;
-		mctl->media_entity.name = chip->card->mixername;
-		mctl->media_pad[0].flags = MEDIA_PAD_FL_SINK;
-		mctl->media_pad[1].flags = MEDIA_PAD_FL_SOURCE;
-		mctl->media_pad[2].flags = MEDIA_PAD_FL_SOURCE;
-		media_entity_pads_init(&mctl->media_entity, MEDIA_MIXER_PAD_MAX,
-				  mctl->media_pad);
-		ret =  media_device_register_entity(mctl->media_dev,
-						    &mctl->media_entity);
-		if (ret) {
-			kfree(mctl);
-			return ret;
-		}
-
-		mctl->intf_link = media_create_intf_link(&mctl->media_entity,
-							 &ctl_intf->intf,
-							 MEDIA_LNK_FL_ENABLED);
-		if (!mctl->intf_link) {
-			media_device_unregister_entity(&mctl->media_entity);
-			media_entity_cleanup(&mctl->media_entity);
-			kfree(mctl);
-			return -ENOMEM;
-		}
-		mctl->intf_devnode = ctl_intf;
-		mixer->media_mixer_ctl = mctl;
-	}
-	return 0;
-}
-
-static void media_snd_mixer_delete(struct snd_usb_audio *chip)
-{
-	struct usb_mixer_interface *mixer;
-	struct media_device *mdev = chip->media_dev;
-
-	if (!mdev)
-		return;
-
-	list_for_each_entry(mixer, &chip->mixer_list, list) {
-		struct media_mixer_ctl *mctl;
-
-		mctl = mixer->media_mixer_ctl;
-		if (!mixer->media_mixer_ctl)
-			continue;
-
-		if (media_devnode_is_registered(&mdev->devnode)) {
-			media_device_unregister_entity(&mctl->media_entity);
-			media_entity_cleanup(&mctl->media_entity);
-		}
-		kfree(mctl);
-		mixer->media_mixer_ctl = NULL;
-	}
-	if (media_devnode_is_registered(&mdev->devnode))
-		media_devnode_remove(chip->ctl_intf_media_devnode);
-	chip->ctl_intf_media_devnode = NULL;
-}
-
-int media_snd_device_create(struct snd_usb_audio *chip,
-			struct usb_interface *iface)
-{
-	struct media_device *mdev;
-	struct usb_device *usbdev = interface_to_usbdev(iface);
-	int ret;
-
-	mdev = media_device_get_devres(&usbdev->dev);
-	if (!mdev)
-		return -ENOMEM;
-	if (!mdev->dev) {
-		/* register media device */
-		mdev->dev = &usbdev->dev;
-		if (usbdev->product)
-			strlcpy(mdev->model, usbdev->product,
-				sizeof(mdev->model));
-		if (usbdev->serial)
-			strlcpy(mdev->serial, usbdev->serial,
-				sizeof(mdev->serial));
-		strcpy(mdev->bus_info, usbdev->devpath);
-		mdev->hw_revision = le16_to_cpu(usbdev->descriptor.bcdDevice);
-		media_device_init(mdev);
-	}
-	if (!media_devnode_is_registered(&mdev->devnode)) {
-		ret = media_device_register(mdev);
-		if (ret) {
-			dev_err(&usbdev->dev,
-				"Couldn't register media device. Error: %d\n",
-				ret);
-			return ret;
-		}
-	}
-
-	/* save media device - avoid lookups */
-	chip->media_dev = mdev;
-
-	/* Create media entities for mixer and control dev */
-	ret = media_snd_mixer_init(chip);
-	if (ret) {
-		dev_err(&usbdev->dev,
-			"Couldn't create media mixer entities. Error: %d\n",
-			ret);
-
-		/* clear saved media_dev */
-		chip->media_dev = NULL;
-
-		return ret;
-	}
-	return 0;
-}
-
-void media_snd_device_delete(struct snd_usb_audio *chip)
-{
-	struct media_device *mdev = chip->media_dev;
-
-	media_snd_mixer_delete(chip);
-
-	if (mdev) {
-		if (media_devnode_is_registered(&mdev->devnode))
-			media_device_unregister(mdev);
-		chip->media_dev = NULL;
-	}
-}
diff --git a/sound/usb/media.h b/sound/usb/media.h
deleted file mode 100644
index 1dcdcdc..0000000
--- a/sound/usb/media.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * media.h - Media Controller specific ALSA driver code
- *
- * Copyright (c) 2016 Shuah Khan <shuahkh@osg.samsung.com>
- * Copyright (c) 2016 Samsung Electronics Co., Ltd.
- *
- * This file is released under the GPLv2.
- */
-
-/*
- * This file adds Media Controller support to ALSA driver
- * to use the Media Controller API to share tuner with DVB
- * and V4L2 drivers that control media device. Media device
- * is created based on existing quirks framework. Using this
- * approach, the media controller API usage can be added for
- * a specific device.
-*/
-#ifndef __MEDIA_H
-
-#ifdef CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER
-
-#include <media/media-device.h>
-#include <media/media-entity.h>
-#include <sound/asound.h>
-
-struct media_ctl {
-	struct media_device *media_dev;
-	struct media_entity media_entity;
-	struct media_intf_devnode *intf_devnode;
-	struct media_link *intf_link;
-	struct media_pad media_pad;
-	struct media_pipeline media_pipe;
-};
-
-/*
- * One source pad each for SNDRV_PCM_STREAM_CAPTURE and
- * SNDRV_PCM_STREAM_PLAYBACK. One for sink pad to link
- * to AUDIO Source
-*/
-#define MEDIA_MIXER_PAD_MAX    (SNDRV_PCM_STREAM_LAST + 2)
-
-struct media_mixer_ctl {
-	struct media_device *media_dev;
-	struct media_entity media_entity;
-	struct media_intf_devnode *intf_devnode;
-	struct media_link *intf_link;
-	struct media_pad media_pad[MEDIA_MIXER_PAD_MAX];
-	struct media_pipeline media_pipe;
-};
-
-int media_snd_device_create(struct snd_usb_audio *chip,
-			    struct usb_interface *iface);
-void media_snd_device_delete(struct snd_usb_audio *chip);
-int media_snd_stream_init(struct snd_usb_substream *subs, struct snd_pcm *pcm,
-			  int stream);
-void media_snd_stream_delete(struct snd_usb_substream *subs);
-int media_snd_start_pipeline(struct snd_usb_substream *subs);
-void media_snd_stop_pipeline(struct snd_usb_substream *subs);
-#else
-static inline int media_snd_device_create(struct snd_usb_audio *chip,
-					  struct usb_interface *iface)
-						{ return 0; }
-static inline void media_snd_device_delete(struct snd_usb_audio *chip) { }
-static inline int media_snd_stream_init(struct snd_usb_substream *subs,
-					struct snd_pcm *pcm, int stream)
-						{ return 0; }
-static inline void media_snd_stream_delete(struct snd_usb_substream *subs) { }
-static inline int media_snd_start_pipeline(struct snd_usb_substream *subs)
-					{ return 0; }
-static inline void media_snd_stop_pipeline(struct snd_usb_substream *subs) { }
-#endif
-#endif /* __MEDIA_H */
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index 47de8af..7ba9292 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -911,6 +911,7 @@
 	switch (snd_usb_get_speed(ep->umidi->dev)) {
 	case USB_SPEED_HIGH:
 	case USB_SPEED_SUPER:
+	case USB_SPEED_SUPER_PLUS:
 		count = 1;
 		break;
 	default:
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 4f85757..2f8c388 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -45,6 +45,7 @@
 #include <linux/bitops.h>
 #include <linux/init.h>
 #include <linux/list.h>
+#include <linux/log2.h>
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/usb.h>
@@ -1378,6 +1379,71 @@
 	snd_usb_mixer_add_control(&cval->head, kctl);
 }
 
+static int parse_clock_source_unit(struct mixer_build *state, int unitid,
+				   void *_ftr)
+{
+	struct uac_clock_source_descriptor *hdr = _ftr;
+	struct usb_mixer_elem_info *cval;
+	struct snd_kcontrol *kctl;
+	char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+	int ret;
+
+	if (state->mixer->protocol != UAC_VERSION_2)
+		return -EINVAL;
+
+	if (hdr->bLength != sizeof(*hdr)) {
+		usb_audio_dbg(state->chip,
+			      "Bogus clock source descriptor length of %d, ignoring.\n",
+			      hdr->bLength);
+		return 0;
+	}
+
+	/*
+	 * The only property of this unit we are interested in is the
+	 * clock source validity. If that isn't readable, just bail out.
+	 */
+	if (!uac2_control_is_readable(hdr->bmControls,
+				      ilog2(UAC2_CS_CONTROL_CLOCK_VALID)))
+		return 0;
+
+	cval = kzalloc(sizeof(*cval), GFP_KERNEL);
+	if (!cval)
+		return -ENOMEM;
+
+	snd_usb_mixer_elem_init_std(&cval->head, state->mixer, hdr->bClockID);
+
+	cval->min = 0;
+	cval->max = 1;
+	cval->channels = 1;
+	cval->val_type = USB_MIXER_BOOLEAN;
+	cval->control = UAC2_CS_CONTROL_CLOCK_VALID;
+
+	if (uac2_control_is_writeable(hdr->bmControls,
+				      ilog2(UAC2_CS_CONTROL_CLOCK_VALID)))
+		kctl = snd_ctl_new1(&usb_feature_unit_ctl, cval);
+	else {
+		cval->master_readonly = 1;
+		kctl = snd_ctl_new1(&usb_feature_unit_ctl_ro, cval);
+	}
+
+	if (!kctl) {
+		kfree(cval);
+		return -ENOMEM;
+	}
+
+	kctl->private_free = snd_usb_mixer_elem_free;
+	ret = snd_usb_copy_string_desc(state, hdr->iClockSource,
+				       name, sizeof(name));
+	if (ret > 0)
+		snprintf(kctl->id.name, sizeof(kctl->id.name),
+			 "%s Validity", name);
+	else
+		snprintf(kctl->id.name, sizeof(kctl->id.name),
+			 "Clock Source %d Validity", hdr->bClockID);
+
+	return snd_usb_mixer_add_control(&cval->head, kctl);
+}
+
 /*
  * parse a feature unit
  *
@@ -2126,10 +2192,11 @@
 
 	switch (p1[2]) {
 	case UAC_INPUT_TERMINAL:
-	case UAC2_CLOCK_SOURCE:
 		return 0; /* NOP */
 	case UAC_MIXER_UNIT:
 		return parse_audio_mixer_unit(state, unitid, p1);
+	case UAC2_CLOCK_SOURCE:
+		return parse_clock_source_unit(state, unitid, p1);
 	case UAC_SELECTOR_UNIT:
 	case UAC2_CLOCK_SELECTOR:
 		return parse_audio_selector_unit(state, unitid, p1);
@@ -2307,6 +2374,7 @@
 	__u8 unitid = (index >> 8) & 0xff;
 	__u8 control = (value >> 8) & 0xff;
 	__u8 channel = value & 0xff;
+	unsigned int count = 0;
 
 	if (channel >= MAX_CHANNELS) {
 		usb_audio_dbg(mixer->chip,
@@ -2315,6 +2383,12 @@
 		return;
 	}
 
+	for (list = mixer->id_elems[unitid]; list; list = list->next_id_elem)
+		count++;
+
+	if (count == 0)
+		return;
+
 	for (list = mixer->id_elems[unitid]; list; list = list->next_id_elem) {
 		struct usb_mixer_elem_info *info;
 
@@ -2322,7 +2396,7 @@
 			continue;
 
 		info = (struct usb_mixer_elem_info *)list;
-		if (info->control != control)
+		if (count > 1 && info->control != control)
 			continue;
 
 		switch (attribute) {
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
index f378944..3417ef3 100644
--- a/sound/usb/mixer.h
+++ b/sound/usb/mixer.h
@@ -3,8 +3,6 @@
 
 #include <sound/info.h>
 
-struct media_mixer_ctl;
-
 struct usb_mixer_interface {
 	struct snd_usb_audio *chip;
 	struct usb_host_interface *hostif;
@@ -24,7 +22,6 @@
 	struct urb *rc_urb;
 	struct usb_ctrlrequest *rc_setup_packet;
 	u8 rc_buffer[6];
-	struct media_mixer_ctl *media_mixer_ctl;
 };
 
 #define MAX_CHANNELS	16	/* max logical channels */
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index ddca654..1f8fb0d9 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -349,6 +349,16 @@
 };
 
 /*
+ * Dell usb dock with ALC4020 codec had a firmware problem where it got
+ * screwed up when zero volume is passed; just skip it as a workaround
+ */
+static const struct usbmix_name_map dell_alc4020_map[] = {
+	{ 16, NULL },
+	{ 19, NULL },
+	{ 0 }
+};
+
+/*
  * Control map entries
  */
 
@@ -431,6 +441,10 @@
 		.map = aureon_51_2_map,
 	},
 	{
+		.id = USB_ID(0x0bda, 0x4014),
+		.map = dell_alc4020_map,
+	},
+	{
 		.id = USB_ID(0x0dba, 0x1000),
 		.map = mbox1_map,
 	},
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 0e4e0640..44d178e 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -35,7 +35,6 @@
 #include "pcm.h"
 #include "clock.h"
 #include "power.h"
-#include "media.h"
 
 #define SUBSTREAM_FLAG_DATA_EP_STARTED	0
 #define SUBSTREAM_FLAG_SYNC_EP_STARTED	1
@@ -718,14 +717,10 @@
 	struct audioformat *fmt;
 	int ret;
 
-	ret = media_snd_start_pipeline(subs);
-	if (ret)
-		return ret;
-
 	ret = snd_pcm_lib_alloc_vmalloc_buffer(substream,
 					       params_buffer_bytes(hw_params));
 	if (ret < 0)
-		goto err_ret;
+		return ret;
 
 	subs->pcm_format = params_format(hw_params);
 	subs->period_bytes = params_period_bytes(hw_params);
@@ -739,27 +734,22 @@
 		dev_dbg(&subs->dev->dev,
 			"cannot set format: format = %#x, rate = %d, channels = %d\n",
 			   subs->pcm_format, subs->cur_rate, subs->channels);
-		ret = -EINVAL;
-		goto err_ret;
+		return -EINVAL;
 	}
 
 	ret = snd_usb_lock_shutdown(subs->stream->chip);
 	if (ret < 0)
-		goto err_ret;
+		return ret;
 	ret = set_format(subs, fmt);
 	snd_usb_unlock_shutdown(subs->stream->chip);
 	if (ret < 0)
-		goto err_ret;
+		return ret;
 
 	subs->interface = fmt->iface;
 	subs->altset_idx = fmt->altset_idx;
 	subs->need_setup_ep = true;
 
 	return 0;
-
-err_ret:
-	media_snd_stop_pipeline(subs);
-	return ret;
 }
 
 /*
@@ -771,7 +761,6 @@
 {
 	struct snd_usb_substream *subs = substream->runtime->private_data;
 
-	media_snd_stop_pipeline(subs);
 	subs->cur_audiofmt = NULL;
 	subs->cur_rate = 0;
 	subs->period_bytes = 0;
@@ -1232,7 +1221,6 @@
 	struct snd_usb_stream *as = snd_pcm_substream_chip(substream);
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	struct snd_usb_substream *subs = &as->substream[direction];
-	int ret;
 
 	subs->interface = -1;
 	subs->altset_idx = 0;
@@ -1246,12 +1234,7 @@
 	subs->dsd_dop.channel = 0;
 	subs->dsd_dop.marker = 1;
 
-	ret = setup_hw_info(runtime, subs);
-	if (ret == 0)
-		ret = media_snd_stream_init(subs, as->pcm, direction);
-	if (ret)
-		snd_usb_autosuspend(subs->stream->chip);
-	return ret;
+	return setup_hw_info(runtime, subs);
 }
 
 static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction)
@@ -1260,7 +1243,6 @@
 	struct snd_usb_substream *subs = &as->substream[direction];
 
 	stop_endpoints(subs, true);
-	media_snd_stop_pipeline(subs);
 
 	if (subs->interface >= 0 &&
 	    !snd_usb_lock_shutdown(subs->stream->chip)) {
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 9d087b1..c60a776 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -2886,7 +2886,6 @@
 		.product_name = pname, \
 		.ifnum = QUIRK_ANY_INTERFACE, \
 		.type = QUIRK_AUDIO_ALIGN_TRANSFER, \
-		.media_device = 1, \
 	} \
 }
 
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index fb62bce..6adde45 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -150,6 +150,7 @@
 		usb_audio_err(chip, "cannot memdup\n");
 		return -ENOMEM;
 	}
+	INIT_LIST_HEAD(&fp->list);
 	if (fp->nr_rates > MAX_NR_RATES) {
 		kfree(fp);
 		return -EINVAL;
@@ -193,6 +194,7 @@
 	return 0;
 
  error:
+	list_del(&fp->list); /* unlink for avoiding double-free */
 	kfree(fp);
 	kfree(rate_table);
 	return err;
@@ -469,6 +471,7 @@
 	fp->ep_attr = get_endpoint(alts, 0)->bmAttributes;
 	fp->datainterval = 0;
 	fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
+	INIT_LIST_HEAD(&fp->list);
 
 	switch (fp->maxpacksize) {
 	case 0x120:
@@ -492,6 +495,7 @@
 		? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
 	err = snd_usb_add_audio_stream(chip, stream, fp);
 	if (err < 0) {
+		list_del(&fp->list); /* unlink for avoiding double-free */
 		kfree(fp);
 		return err;
 	}
@@ -1130,9 +1134,14 @@
 	case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
 	case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
 	case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
+	case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
 	case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
 	case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
+	case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
 	case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
+	case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
+	case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
+	case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */
 	case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
 		return true;
 	}
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index 51258a1..8e9548bc 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -36,7 +36,6 @@
 #include "format.h"
 #include "clock.h"
 #include "stream.h"
-#include "media.h"
 
 /*
  * free a substream
@@ -53,7 +52,6 @@
 		kfree(fp);
 	}
 	kfree(subs->rate_list.list);
-	media_snd_stream_delete(subs);
 }
 
 
@@ -316,7 +314,9 @@
 /*
  * add this endpoint to the chip instance.
  * if a stream with the same endpoint already exists, append to it.
- * if not, create a new pcm stream.
+ * if not, create a new pcm stream. note, fp is added to the substream
+ * fmt_list and will be freed on the chip instance release. do not free
+ * fp or do remove it from the substream fmt_list to avoid double-free.
  */
 int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
 			     int stream,
@@ -677,6 +677,7 @@
 					* (fp->maxpacksize & 0x7ff);
 		fp->attributes = parse_uac_endpoint_attributes(chip, alts, protocol, iface_no);
 		fp->clock = clock;
+		INIT_LIST_HEAD(&fp->list);
 
 		/* some quirks for attributes here */
 
@@ -725,6 +726,7 @@
 		dev_dbg(&dev->dev, "%u:%d: add audio endpoint %#x\n", iface_no, altno, fp->endpoint);
 		err = snd_usb_add_audio_stream(chip, stream, fp);
 		if (err < 0) {
+			list_del(&fp->list); /* unlink for avoiding double-free */
 			kfree(fp->rate_table);
 			kfree(fp->chmap);
 			kfree(fp);
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index a161c7c..4d5c89a 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -30,9 +30,6 @@
  *
  */
 
-struct media_device;
-struct media_intf_devnode;
-
 struct snd_usb_audio {
 	int index;
 	struct usb_device *dev;
@@ -50,6 +47,7 @@
 	
 	int num_interfaces;
 	int num_suspended_intf;
+	int sample_rate_read_error;
 
 	struct list_head pcm_list;	/* list of pcm streams */
 	struct list_head ep_list;	/* list of audio-related endpoints */
@@ -63,8 +61,6 @@
 	bool autoclock;			/* from the 'autoclock' module param */
 
 	struct usb_host_interface *ctrl_intf;	/* the audio control interface */
-	struct media_device *media_dev;
-	struct media_intf_devnode *ctl_intf_media_devnode;
 };
 
 #define usb_audio_err(chip, fmt, args...) \
@@ -115,7 +111,6 @@
 	const char *product_name;
 	int16_t ifnum;
 	uint16_t type;
-	bool media_device;
 	const void *data;
 };
 
diff --git a/tools/Makefile b/tools/Makefile
index 60c7e6c..6bf68fe 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -137,7 +137,8 @@
 	$(call descend,lib/subcmd,clean)
 
 perf_clean:
-	$(call descend,$(@:_clean=),clean)
+	$(Q)mkdir -p $(PERF_O) .
+	$(Q)$(MAKE) --no-print-directory -C perf O=$(PERF_O) subdir= clean
 
 selftests_clean:
 	$(call descend,testing/$(@:_clean=),clean)
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 6b77072..57c8f98 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -30,6 +30,7 @@
 FEATURE_TESTS_BASIC :=			\
 	backtrace			\
 	dwarf				\
+	dwarf_getlocations		\
 	fortify-source			\
 	sync-compare-and-swap		\
 	glibc				\
@@ -48,6 +49,10 @@
 	libslang			\
 	libcrypto			\
 	libunwind			\
+	libunwind-x86			\
+	libunwind-x86_64		\
+	libunwind-arm			\
+	libunwind-aarch64		\
 	pthread-attr-setaffinity-np	\
 	stackprotector-all		\
 	timerfd				\
@@ -68,7 +73,9 @@
 	libbabeltrace			\
 	liberty				\
 	liberty-z			\
-	libunwind-debug-frame
+	libunwind-debug-frame		\
+	libunwind-debug-frame-arm	\
+	libunwind-debug-frame-aarch64
 
 FEATURE_TESTS ?= $(FEATURE_TESTS_BASIC)
 
@@ -78,6 +85,7 @@
 
 FEATURE_DISPLAY ?=			\
 	dwarf				\
+	dwarf_getlocations		\
 	glibc				\
 	gtk2				\
 	libaudit			\
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index c5f4c41..3d88f09 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -3,6 +3,7 @@
 	test-backtrace.bin		\
 	test-bionic.bin			\
 	test-dwarf.bin			\
+	test-dwarf_getlocations.bin	\
 	test-fortify-source.bin		\
 	test-sync-compare-and-swap.bin	\
 	test-glibc.bin			\
@@ -26,6 +27,12 @@
 	test-libcrypto.bin		\
 	test-libunwind.bin		\
 	test-libunwind-debug-frame.bin	\
+	test-libunwind-x86.bin		\
+	test-libunwind-x86_64.bin	\
+	test-libunwind-arm.bin		\
+	test-libunwind-aarch64.bin	\
+	test-libunwind-debug-frame-arm.bin	\
+	test-libunwind-debug-frame-aarch64.bin	\
 	test-pthread-attr-setaffinity-np.bin	\
 	test-stackprotector-all.bin	\
 	test-timerfd.bin		\
@@ -82,6 +89,9 @@
 $(OUTPUT)test-dwarf.bin:
 	$(BUILD) $(DWARFLIBS)
 
+$(OUTPUT)test-dwarf_getlocations.bin:
+	$(BUILD) $(DWARFLIBS)
+
 $(OUTPUT)test-libelf-mmap.bin:
 	$(BUILD) -lelf
 
@@ -99,6 +109,23 @@
 
 $(OUTPUT)test-libunwind-debug-frame.bin:
 	$(BUILD) -lelf
+$(OUTPUT)test-libunwind-x86.bin:
+	$(BUILD) -lelf -lunwind-x86
+
+$(OUTPUT)test-libunwind-x86_64.bin:
+	$(BUILD) -lelf -lunwind-x86_64
+
+$(OUTPUT)test-libunwind-arm.bin:
+	$(BUILD) -lelf -lunwind-arm
+
+$(OUTPUT)test-libunwind-aarch64.bin:
+	$(BUILD) -lelf -lunwind-aarch64
+
+$(OUTPUT)test-libunwind-debug-frame-arm.bin:
+	$(BUILD) -lelf -lunwind-arm
+
+$(OUTPUT)test-libunwind-debug-frame-aarch64.bin:
+	$(BUILD) -lelf -lunwind-aarch64
 
 $(OUTPUT)test-libaudit.bin:
 	$(BUILD) -laudit
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index e499a36..a282e8c 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -41,6 +41,10 @@
 # include "test-dwarf.c"
 #undef main
 
+#define main main_test_dwarf_getlocations
+# include "test-dwarf_getlocations.c"
+#undef main
+
 #define main main_test_libelf_getphdrnum
 # include "test-libelf-getphdrnum.c"
 #undef main
@@ -143,6 +147,7 @@
 	main_test_libelf_mmap();
 	main_test_glibc();
 	main_test_dwarf();
+	main_test_dwarf_getlocations();
 	main_test_libelf_getphdrnum();
 	main_test_libunwind();
 	main_test_libaudit();
diff --git a/tools/build/feature/test-bpf.c b/tools/build/feature/test-bpf.c
index b389026..e04ab89 100644
--- a/tools/build/feature/test-bpf.c
+++ b/tools/build/feature/test-bpf.c
@@ -27,10 +27,9 @@
 	attr.log_level = 0;
 	attr.kern_version = 0;
 
-	attr = attr;
 	/*
 	 * Test existence of __NR_bpf and BPF_PROG_LOAD.
 	 * This call should fail if we run the testcase.
 	 */
-	return syscall(__NR_bpf, BPF_PROG_LOAD, attr, sizeof(attr));
+	return syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
 }
diff --git a/tools/build/feature/test-dwarf_getlocations.c b/tools/build/feature/test-dwarf_getlocations.c
new file mode 100644
index 0000000..7016269
--- /dev/null
+++ b/tools/build/feature/test-dwarf_getlocations.c
@@ -0,0 +1,12 @@
+#include <stdlib.h>
+#include <elfutils/libdw.h>
+
+int main(void)
+{
+	Dwarf_Addr base, start, end;
+	Dwarf_Attribute attr;
+	Dwarf_Op *op;
+        size_t nops;
+	ptrdiff_t offset = 0;
+        return (int)dwarf_getlocations(&attr, offset, &base, &start, &end, &op, &nops);
+}
diff --git a/tools/build/feature/test-libunwind-aarch64.c b/tools/build/feature/test-libunwind-aarch64.c
new file mode 100644
index 0000000..fc03fb6
--- /dev/null
+++ b/tools/build/feature/test-libunwind-aarch64.c
@@ -0,0 +1,26 @@
+#include <libunwind-aarch64.h>
+#include <stdlib.h>
+
+extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
+					       unw_word_t ip,
+					       unw_dyn_info_t *di,
+					       unw_proc_info_t *pi,
+					       int need_unwind_info, void *arg);
+
+#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
+
+static unw_accessors_t accessors;
+
+int main(void)
+{
+	unw_addr_space_t addr_space;
+
+	addr_space = unw_create_addr_space(&accessors, 0);
+	if (addr_space)
+		return 0;
+
+	unw_init_remote(NULL, addr_space, NULL);
+	dwarf_search_unwind_table(addr_space, 0, NULL, NULL, 0, NULL);
+
+	return 0;
+}
diff --git a/tools/build/feature/test-libunwind-arm.c b/tools/build/feature/test-libunwind-arm.c
new file mode 100644
index 0000000..632d95e
--- /dev/null
+++ b/tools/build/feature/test-libunwind-arm.c
@@ -0,0 +1,27 @@
+#include <libunwind-arm.h>
+#include <stdlib.h>
+
+extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
+					       unw_word_t ip,
+					       unw_dyn_info_t *di,
+					       unw_proc_info_t *pi,
+					       int need_unwind_info, void *arg);
+
+
+#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
+
+static unw_accessors_t accessors;
+
+int main(void)
+{
+	unw_addr_space_t addr_space;
+
+	addr_space = unw_create_addr_space(&accessors, 0);
+	if (addr_space)
+		return 0;
+
+	unw_init_remote(NULL, addr_space, NULL);
+	dwarf_search_unwind_table(addr_space, 0, NULL, NULL, 0, NULL);
+
+	return 0;
+}
diff --git a/tools/build/feature/test-libunwind-debug-frame-aarch64.c b/tools/build/feature/test-libunwind-debug-frame-aarch64.c
new file mode 100644
index 0000000..2284467
--- /dev/null
+++ b/tools/build/feature/test-libunwind-debug-frame-aarch64.c
@@ -0,0 +1,16 @@
+#include <libunwind-aarch64.h>
+#include <stdlib.h>
+
+extern int
+UNW_OBJ(dwarf_find_debug_frame) (int found, unw_dyn_info_t *di_debug,
+				 unw_word_t ip, unw_word_t segbase,
+				 const char *obj_name, unw_word_t start,
+				 unw_word_t end);
+
+#define dwarf_find_debug_frame UNW_OBJ(dwarf_find_debug_frame)
+
+int main(void)
+{
+	dwarf_find_debug_frame(0, NULL, 0, 0, NULL, 0, 0);
+	return 0;
+}
diff --git a/tools/build/feature/test-libunwind-debug-frame-arm.c b/tools/build/feature/test-libunwind-debug-frame-arm.c
new file mode 100644
index 0000000..f988596
--- /dev/null
+++ b/tools/build/feature/test-libunwind-debug-frame-arm.c
@@ -0,0 +1,16 @@
+#include <libunwind-arm.h>
+#include <stdlib.h>
+
+extern int
+UNW_OBJ(dwarf_find_debug_frame) (int found, unw_dyn_info_t *di_debug,
+				 unw_word_t ip, unw_word_t segbase,
+				 const char *obj_name, unw_word_t start,
+				 unw_word_t end);
+
+#define dwarf_find_debug_frame UNW_OBJ(dwarf_find_debug_frame)
+
+int main(void)
+{
+	dwarf_find_debug_frame(0, NULL, 0, 0, NULL, 0, 0);
+	return 0;
+}
diff --git a/tools/build/feature/test-libunwind-x86.c b/tools/build/feature/test-libunwind-x86.c
new file mode 100644
index 0000000..3561edc
--- /dev/null
+++ b/tools/build/feature/test-libunwind-x86.c
@@ -0,0 +1,27 @@
+#include <libunwind-x86.h>
+#include <stdlib.h>
+
+extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
+					       unw_word_t ip,
+					       unw_dyn_info_t *di,
+					       unw_proc_info_t *pi,
+					       int need_unwind_info, void *arg);
+
+
+#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
+
+static unw_accessors_t accessors;
+
+int main(void)
+{
+	unw_addr_space_t addr_space;
+
+	addr_space = unw_create_addr_space(&accessors, 0);
+	if (addr_space)
+		return 0;
+
+	unw_init_remote(NULL, addr_space, NULL);
+	dwarf_search_unwind_table(addr_space, 0, NULL, NULL, 0, NULL);
+
+	return 0;
+}
diff --git a/tools/build/feature/test-libunwind-x86_64.c b/tools/build/feature/test-libunwind-x86_64.c
new file mode 100644
index 0000000..5add251
--- /dev/null
+++ b/tools/build/feature/test-libunwind-x86_64.c
@@ -0,0 +1,27 @@
+#include <libunwind-x86_64.h>
+#include <stdlib.h>
+
+extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
+					       unw_word_t ip,
+					       unw_dyn_info_t *di,
+					       unw_proc_info_t *pi,
+					       int need_unwind_info, void *arg);
+
+
+#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
+
+static unw_accessors_t accessors;
+
+int main(void)
+{
+	unw_addr_space_t addr_space;
+
+	addr_space = unw_create_addr_space(&accessors, 0);
+	if (addr_space)
+		return 0;
+
+	unw_init_remote(NULL, addr_space, NULL);
+	dwarf_search_unwind_table(addr_space, 0, NULL, NULL, 0, NULL);
+
+	return 0;
+}
diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile
index 4d198d5..c155d6b 100644
--- a/tools/gpio/Makefile
+++ b/tools/gpio/Makefile
@@ -1,5 +1,5 @@
 CC = $(CROSS_COMPILE)gcc
-CFLAGS += -Wall -g -D_GNU_SOURCE
+CFLAGS += -O2 -Wall -g -D_GNU_SOURCE
 
 all: lsgpio
 
diff --git a/tools/gpio/lsgpio.c b/tools/gpio/lsgpio.c
index 1124da3..eb3f56e 100644
--- a/tools/gpio/lsgpio.c
+++ b/tools/gpio/lsgpio.c
@@ -147,7 +147,7 @@
 
 int main(int argc, char **argv)
 {
-	const char *device_name;
+	const char *device_name = NULL;
 	int ret;
 	int c;
 
diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c
index 80159e6..d9836c5 100644
--- a/tools/lguest/lguest.c
+++ b/tools/lguest/lguest.c
@@ -3351,12 +3351,18 @@
 	/* Boot protocol version: 2.07 supports the fields for lguest. */
 	boot->hdr.version = 0x207;
 
-	/* The hardware_subarch value of "1" tells the Guest it's an lguest. */
-	boot->hdr.hardware_subarch = 1;
+	/* X86_SUBARCH_LGUEST tells the Guest it's an lguest. */
+	boot->hdr.hardware_subarch = X86_SUBARCH_LGUEST;
 
 	/* Tell the entry path not to try to reload segment registers. */
 	boot->hdr.loadflags |= KEEP_SEGMENTS;
 
+	/* We don't support tboot: */
+	boot->tboot_addr = 0;
+
+	/* Ensure this is 0 to prevent APM from loading: */
+	boot->apm_bios_info.version = 0;
+
 	/* We tell the kernel to initialize the Guest. */
 	tell_kernel(start);
 
diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c
index ef78c22..08556cf 100644
--- a/tools/lib/api/fs/fs.c
+++ b/tools/lib/api/fs/fs.c
@@ -351,6 +351,19 @@
 	return err;
 }
 
+int procfs__read_str(const char *entry, char **buf, size_t *sizep)
+{
+	char path[PATH_MAX];
+	const char *procfs = procfs__mountpoint();
+
+	if (!procfs)
+		return -1;
+
+	snprintf(path, sizeof(path), "%s/%s", procfs, entry);
+
+	return filename__read_str(path, buf, sizep);
+}
+
 int sysfs__read_ull(const char *entry, unsigned long long *value)
 {
 	char path[PATH_MAX];
diff --git a/tools/lib/api/fs/fs.h b/tools/lib/api/fs/fs.h
index 9f65980..16c9c2e 100644
--- a/tools/lib/api/fs/fs.h
+++ b/tools/lib/api/fs/fs.h
@@ -29,6 +29,8 @@
 int filename__read_ull(const char *filename, unsigned long long *value);
 int filename__read_str(const char *filename, char **buf, size_t *sizep);
 
+int procfs__read_str(const char *entry, char **buf, size_t *sizep);
+
 int sysctl__read_int(const char *sysctl, int *value);
 int sysfs__read_int(const char *entry, int *value);
 int sysfs__read_ull(const char *entry, unsigned long long *value);
diff --git a/tools/lib/lockdep/run_tests.sh b/tools/lib/lockdep/run_tests.sh
index 5334ad9..1069d96 100755
--- a/tools/lib/lockdep/run_tests.sh
+++ b/tools/lib/lockdep/run_tests.sh
@@ -3,7 +3,7 @@
 make &> /dev/null
 
 for i in `ls tests/*.c`; do
-	testname=$(basename -s .c "$i")
+	testname=$(basename "$i" .c)
 	gcc -o tests/$testname -pthread -lpthread $i liblockdep.a -Iinclude -D__USE_LIBLOCKDEP &> /dev/null
 	echo -ne "$testname... "
 	if [ $(timeout 1 ./tests/$testname | wc -l) -gt 0 ]; then
@@ -11,11 +11,13 @@
 	else
 		echo "FAILED!"
 	fi
-	rm tests/$testname
+	if [ -f "tests/$testname" ]; then
+		rm tests/$testname
+	fi
 done
 
 for i in `ls tests/*.c`; do
-	testname=$(basename -s .c "$i")
+	testname=$(basename "$i" .c)
 	gcc -o tests/$testname -pthread -lpthread -Iinclude $i &> /dev/null
 	echo -ne "(PRELOAD) $testname... "
 	if [ $(timeout 1 ./lockdep ./tests/$testname | wc -l) -gt 0 ]; then
@@ -23,5 +25,7 @@
 	else
 		echo "FAILED!"
 	fi
-	rm tests/$testname
+	if [ -f "tests/$testname" ]; then
+		rm tests/$testname
+	fi
 done
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
index 0144b3d..88cccea 100644
--- a/tools/lib/traceevent/parse-filter.c
+++ b/tools/lib/traceevent/parse-filter.c
@@ -1164,11 +1164,11 @@
 		current_op = current_exp;
 
 	ret = collapse_tree(current_op, parg, error_str);
+	/* collapse_tree() may free current_op, and updates parg accordingly */
+	current_op = NULL;
 	if (ret < 0)
 		goto fail;
 
-	*parg = current_op;
-
 	free(token);
 	return 0;
 
diff --git a/tools/net/bpf_jit_disasm.c b/tools/net/bpf_jit_disasm.c
index 5b32413..544b05a 100644
--- a/tools/net/bpf_jit_disasm.c
+++ b/tools/net/bpf_jit_disasm.c
@@ -98,6 +98,9 @@
 	char *buff;
 
 	len = klogctl(CMD_ACTION_SIZE_BUFFER, NULL, 0);
+	if (len < 0)
+		return NULL;
+
 	buff = malloc(len);
 	if (!buff)
 		return NULL;
diff --git a/tools/objtool/Documentation/stack-validation.txt b/tools/objtool/Documentation/stack-validation.txt
index 5a95896..55a60d3 100644
--- a/tools/objtool/Documentation/stack-validation.txt
+++ b/tools/objtool/Documentation/stack-validation.txt
@@ -299,18 +299,38 @@
 Errors in .c files
 ------------------
 
-If you're getting an objtool error in a compiled .c file, chances are
-the file uses an asm() statement which has a "call" instruction.  An
-asm() statement with a call instruction must declare the use of the
-stack pointer in its output operand.  For example, on x86_64:
+1. c_file.o: warning: objtool: funcA() falls through to next function funcB()
 
-   register void *__sp asm("rsp");
-   asm volatile("call func" : "+r" (__sp));
+   This means that funcA() doesn't end with a return instruction or an
+   unconditional jump, and that objtool has determined that the function
+   can fall through into the next function.  There could be different
+   reasons for this:
 
-Otherwise the stack frame may not get created before the call.
+   1) funcA()'s last instruction is a call to a "noreturn" function like
+      panic().  In this case the noreturn function needs to be added to
+      objtool's hard-coded global_noreturns array.  Feel free to bug the
+      objtool maintainer, or you can submit a patch.
 
-Another possible cause for errors in C code is if the Makefile removes
--fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options.
+   2) funcA() uses the unreachable() annotation in a section of code
+      that is actually reachable.
+
+   3) If funcA() calls an inline function, the object code for funcA()
+      might be corrupt due to a gcc bug.  For more details, see:
+      https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70646
+
+2. If you're getting any other objtool error in a compiled .c file, it
+   may be because the file uses an asm() statement which has a "call"
+   instruction.  An asm() statement with a call instruction must declare
+   the use of the stack pointer in its output operand.  For example, on
+   x86_64:
+
+     register void *__sp asm("rsp");
+     asm volatile("call func" : "+r" (__sp));
+
+   Otherwise the stack frame may not get created before the call.
+
+3. Another possible cause for errors in C code is if the Makefile removes
+   -fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options.
 
 Also see the above section for .S file errors for more information what
 the individual error messages mean.
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index 7515cb2..e8a1e69 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -54,6 +54,7 @@
 	struct symbol *call_dest;
 	struct instruction *jump_dest;
 	struct list_head alts;
+	struct symbol *func;
 };
 
 struct alternative {
@@ -66,6 +67,7 @@
 	struct list_head insn_list;
 	DECLARE_HASHTABLE(insn_hash, 16);
 	struct section *rodata, *whitelist;
+	bool ignore_unreachables, c_file;
 };
 
 const char *objname;
@@ -228,7 +230,7 @@
 			}
 		}
 
-		if (insn->type == INSN_JUMP_DYNAMIC)
+		if (insn->type == INSN_JUMP_DYNAMIC && list_empty(&insn->alts))
 			/* sibling call */
 			return 0;
 	}
@@ -248,6 +250,7 @@
 static int decode_instructions(struct objtool_file *file)
 {
 	struct section *sec;
+	struct symbol *func;
 	unsigned long offset;
 	struct instruction *insn;
 	int ret;
@@ -281,6 +284,21 @@
 			hash_add(file->insn_hash, &insn->hash, insn->offset);
 			list_add_tail(&insn->list, &file->insn_list);
 		}
+
+		list_for_each_entry(func, &sec->symbol_list, list) {
+			if (func->type != STT_FUNC)
+				continue;
+
+			if (!find_insn(file, sec, func->offset)) {
+				WARN("%s(): can't find starting instruction",
+				     func->name);
+				return -1;
+			}
+
+			func_for_each_insn(file, func, insn)
+				if (!insn->func)
+					insn->func = func;
+		}
 	}
 
 	return 0;
@@ -664,13 +682,40 @@
 						text_rela->addend);
 
 		/*
-		 * TODO: Document where this is needed, or get rid of it.
-		 *
 		 * rare case:   jmpq *[addr](%rip)
+		 *
+		 * This check is for a rare gcc quirk, currently only seen in
+		 * three driver functions in the kernel, only with certain
+		 * obscure non-distro configs.
+		 *
+		 * As part of an optimization, gcc makes a copy of an existing
+		 * switch jump table, modifies it, and then hard-codes the jump
+		 * (albeit with an indirect jump) to use a single entry in the
+		 * table.  The rest of the jump table and some of its jump
+		 * targets remain as dead code.
+		 *
+		 * In such a case we can just crudely ignore all unreachable
+		 * instruction warnings for the entire object file.  Ideally we
+		 * would just ignore them for the function, but that would
+		 * require redesigning the code quite a bit.  And honestly
+		 * that's just not worth doing: unreachable instruction
+		 * warnings are of questionable value anyway, and this is such
+		 * a rare issue.
+		 *
+		 * kbuild reports:
+		 * - https://lkml.kernel.org/r/201603231906.LWcVUpxm%25fengguang.wu@intel.com
+		 * - https://lkml.kernel.org/r/201603271114.K9i45biy%25fengguang.wu@intel.com
+		 * - https://lkml.kernel.org/r/201603291058.zuJ6ben1%25fengguang.wu@intel.com
+		 *
+		 * gcc bug:
+		 * - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70604
 		 */
-		if (!rodata_rela)
+		if (!rodata_rela) {
 			rodata_rela = find_rela_by_dest(file->rodata,
 							text_rela->addend + 4);
+			if (rodata_rela)
+				file->ignore_unreachables = true;
+		}
 
 		if (!rodata_rela)
 			continue;
@@ -732,9 +777,6 @@
 {
 	int ret;
 
-	file->whitelist = find_section_by_name(file->elf, "__func_stack_frame_non_standard");
-	file->rodata = find_section_by_name(file->elf, ".rodata");
-
 	ret = decode_instructions(file);
 	if (ret)
 		return ret;
@@ -799,6 +841,7 @@
 	struct alternative *alt;
 	struct instruction *insn;
 	struct section *sec;
+	struct symbol *func = NULL;
 	unsigned char state;
 	int ret;
 
@@ -813,6 +856,16 @@
 	}
 
 	while (1) {
+		if (file->c_file && insn->func) {
+			if (func && func != insn->func) {
+				WARN("%s() falls through to next function %s()",
+				     func->name, insn->func->name);
+				return 1;
+			}
+
+			func = insn->func;
+		}
+
 		if (insn->visited) {
 			if (frame_state(insn->state) != frame_state(state)) {
 				WARN_FUNC("frame pointer state mismatch",
@@ -823,13 +876,6 @@
 			return 0;
 		}
 
-		/*
-		 * Catch a rare case where a noreturn function falls through to
-		 * the next function.
-		 */
-		if (is_fentry_call(insn) && (state & STATE_FENTRY))
-			return 0;
-
 		insn->visited = true;
 		insn->state = state;
 
@@ -1035,12 +1081,8 @@
 				continue;
 
 			insn = find_insn(file, sec, func->offset);
-			if (!insn) {
-				WARN("%s(): can't find starting instruction",
-				     func->name);
-				warnings++;
+			if (!insn)
 				continue;
-			}
 
 			ret = validate_branch(file, insn, 0);
 			warnings += ret;
@@ -1056,13 +1098,14 @@
 				if (insn->visited)
 					continue;
 
-				if (!ignore_unreachable_insn(func, insn) &&
-				    !warnings) {
-					WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset);
-					warnings++;
-				}
-
 				insn->visited = true;
+
+				if (file->ignore_unreachables || warnings ||
+				    ignore_unreachable_insn(func, insn))
+					continue;
+
+				WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset);
+				warnings++;
 			}
 		}
 	}
@@ -1133,6 +1176,10 @@
 
 	INIT_LIST_HEAD(&file.insn_list);
 	hash_init(file.insn_hash);
+	file.whitelist = find_section_by_name(file.elf, "__func_stack_frame_non_standard");
+	file.rodata = find_section_by_name(file.elf, ".rodata");
+	file.ignore_unreachables = false;
+	file.c_file = find_section_by_name(file.elf, ".comment");
 
 	ret = decode_sections(&file);
 	if (ret < 0)
diff --git a/tools/perf/Documentation/intel-pt.txt b/tools/perf/Documentation/intel-pt.txt
index be764f9..c6c8318 100644
--- a/tools/perf/Documentation/intel-pt.txt
+++ b/tools/perf/Documentation/intel-pt.txt
@@ -672,6 +672,7 @@
 	d	create a debug log
 	g	synthesize a call chain (use with i or x)
 	l	synthesize last branch entries (use with i or x)
+	s	skip initial number of events
 
 "Instructions" events look like they were recorded by "perf record -e
 instructions".
@@ -730,6 +731,12 @@
 
 To disable trace decoding entirely, use the option --no-itrace.
 
+It is also possible to skip events generated (instructions, branches, transactions)
+at the beginning. This is useful to ignore initialization code.
+
+	--itrace=i0nss1000000
+
+skips the first million instructions.
 
 dump option
 -----------
diff --git a/tools/perf/Documentation/itrace.txt b/tools/perf/Documentation/itrace.txt
index 65453f4..e2a4c5e 100644
--- a/tools/perf/Documentation/itrace.txt
+++ b/tools/perf/Documentation/itrace.txt
@@ -7,6 +7,7 @@
 		d	create a debug log
 		g	synthesize a call chain (use with i or x)
 		l	synthesize last branch entries (use with i or x)
+		s       skip initial number of events
 
 	The default is all events i.e. the same as --itrace=ibxe
 
@@ -24,3 +25,10 @@
 
 	Also the number of last branch entries (default 64, max. 1024) for
 	instructions or transactions events can be specified.
+
+	It is also possible to skip events generated (instructions, branches, transactions)
+	at the beginning. This is useful to ignore initialization code.
+
+	--itrace=i0nss1000000
+
+	skips the first million instructions.
diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt
index e9cd39a..778f54d 100644
--- a/tools/perf/Documentation/perf-annotate.txt
+++ b/tools/perf/Documentation/perf-annotate.txt
@@ -33,7 +33,7 @@
 
 -f::
 --force::
-        Don't complain, do it.
+        Don't do ownership validation.
 
 -v::
 --verbose::
diff --git a/tools/perf/Documentation/perf-diff.txt b/tools/perf/Documentation/perf-diff.txt
index d1deb57..3e9490b 100644
--- a/tools/perf/Documentation/perf-diff.txt
+++ b/tools/perf/Documentation/perf-diff.txt
@@ -75,7 +75,7 @@
 
 -f::
 --force::
-       Don't complain, do it.
+        Don't do ownership validation.
 
 --symfs=<directory>::
         Look for files with symbols relative to this directory.
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index ec723d0..a126e97 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -93,6 +93,67 @@
 You should refer to the processor specific documentation for getting these
 details. Some of them are referenced in the SEE ALSO section below.
 
+ARBITRARY PMUS
+--------------
+
+perf also supports an extended syntax for specifying raw parameters
+to PMUs. Using this typically requires looking up the specific event
+in the CPU vendor specific documentation.
+
+The available PMUs and their raw parameters can be listed with
+
+  ls /sys/devices/*/format
+
+For example the raw event "LSD.UOPS" core pmu event above could
+be specified as
+
+  perf stat -e cpu/event=0xa8,umask=0x1,name=LSD.UOPS_CYCLES,cmask=1/ ...
+
+PER SOCKET PMUS
+---------------
+
+Some PMUs are not associated with a core, but with a whole CPU socket.
+Events on these PMUs generally cannot be sampled, but only counted globally
+with perf stat -a. They can be bound to one logical CPU, but will measure
+all the CPUs in the same socket.
+
+This example measures memory bandwidth every second
+on the first memory controller on socket 0 of a Intel Xeon system
+
+  perf stat -C 0 -a uncore_imc_0/cas_count_read/,uncore_imc_0/cas_count_write/ -I 1000 ...
+
+Each memory controller has its own PMU.  Measuring the complete system
+bandwidth would require specifying all imc PMUs (see perf list output),
+and adding the values together.
+
+This example measures the combined core power every second
+
+  perf stat -I 1000 -e power/energy-cores/  -a
+
+ACCESS RESTRICTIONS
+-------------------
+
+For non root users generally only context switched PMU events are available.
+This is normally only the events in the cpu PMU, the predefined events
+like cycles and instructions and some software events.
+
+Other PMUs and global measurements are normally root only.
+Some event qualifiers, such as "any", are also root only.
+
+This can be overriden by setting the kernel.perf_event_paranoid
+sysctl to -1, which allows non root to use these events.
+
+For accessing trace point events perf needs to have read access to
+/sys/kernel/debug/tracing, even when perf_event_paranoid is in a relaxed
+setting.
+
+TRACING
+-------
+
+Some PMUs control advanced hardware tracing capabilities, such as Intel PT,
+that allows low overhead execution tracing.  These are described in a separate
+intel-pt.txt document.
+
 PARAMETERIZED EVENTS
 --------------------
 
@@ -106,6 +167,50 @@
 
   perf stat -C 0 -e 'hv_gpci/dtbp_ptitc,phys_processor_idx=0x2/' ...
 
+EVENT GROUPS
+------------
+
+Perf supports time based multiplexing of events, when the number of events
+active exceeds the number of hardware performance counters. Multiplexing
+can cause measurement errors when the workload changes its execution
+profile.
+
+When metrics are computed using formulas from event counts, it is useful to
+ensure some events are always measured together as a group to minimize multiplexing
+errors. Event groups can be specified using { }.
+
+  perf stat -e '{instructions,cycles}' ...
+
+The number of available performance counters depend on the CPU. A group
+cannot contain more events than available counters.
+For example Intel Core CPUs typically have four generic performance counters
+for the core, plus three fixed counters for instructions, cycles and
+ref-cycles. Some special events have restrictions on which counter they
+can schedule, and may not support multiple instances in a single group.
+When too many events are specified in the group none of them will not
+be measured.
+
+Globally pinned events can limit the number of counters available for
+other groups. On x86 systems, the NMI watchdog pins a counter by default.
+The nmi watchdog can be disabled as root with
+
+	echo 0 > /proc/sys/kernel/nmi_watchdog
+
+Events from multiple different PMUs cannot be mixed in a group, with
+some exceptions for software events.
+
+LEADER SAMPLING
+---------------
+
+perf also supports group leader sampling using the :S specifier.
+
+  perf record -e '{cycles,instructions}:S' ...
+  perf report --group
+
+Normally all events in a event group sample, but with :S only
+the first event (the leader) samples, and it only reads the values of the
+other events in the group.
+
 OPTIONS
 -------
 
@@ -143,5 +248,5 @@
 --------
 linkperf:perf-stat[1], linkperf:perf-top[1],
 linkperf:perf-record[1],
-http://www.intel.com/Assets/PDF/manual/253669.pdf[Intel® 64 and IA-32 Architectures Software Developer's Manual Volume 3B: System Programming Guide],
+http://www.intel.com/sdm/[Intel® 64 and IA-32 Architectures Software Developer's Manual Volume 3B: System Programming Guide],
 http://support.amd.com/us/Processor_TechDocs/24593_APM_v2.pdf[AMD64 Architecture Programmer’s Manual Volume 2: System Programming]
diff --git a/tools/perf/Documentation/perf-mem.txt b/tools/perf/Documentation/perf-mem.txt
index 43310d8..1d6092c 100644
--- a/tools/perf/Documentation/perf-mem.txt
+++ b/tools/perf/Documentation/perf-mem.txt
@@ -48,6 +48,14 @@
 	option can be passed in record mode. It will be interpreted the same way as perf
 	record.
 
+-K::
+--all-kernel::
+	Configure all used events to run in kernel space.
+
+-U::
+--all-user::
+	Configure all used events to run in user space.
+
 SEE ALSO
 --------
 linkperf:perf-record[1], linkperf:perf-report[1]
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 19aa175..8dbee83 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -347,6 +347,19 @@
 --all-user::
 Configure all used events to run in user space.
 
+--timestamp-filename
+Append timestamp to output file name.
+
+--switch-output::
+Generate multiple perf.data files, timestamp prefixed, switching to a new one
+when receiving a SIGUSR2.
+
+A possible use case is to, given an external event, slice the perf.data file
+that gets then processed, possibly via a perf script, to decide if that
+particular perf.data snapshot should be kept or not.
+
+Implies --timestamp-filename, --no-buildid and --no-buildid-cache.
+
 SEE ALSO
 --------
 linkperf:perf-stat[1], linkperf:perf-list[1]
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 1211399..ebaf849 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -248,7 +248,7 @@
 	Note that when using the --itrace option the synthesized callchain size
 	will override this value if the synthesized callchain size is bigger.
 
-	Default: 127
+	Default: /proc/sys/kernel/perf_event_max_stack when present, 127 otherwise.
 
 -G::
 --inverted::
@@ -285,7 +285,7 @@
 
 -f::
 --force::
-        Don't complain, do it.
+        Don't do ownership validation.
 
 --symfs=<directory>::
         Look for files with symbols relative to this directory.
diff --git a/tools/perf/Documentation/perf-sched.txt b/tools/perf/Documentation/perf-sched.txt
index 8ff4df9..1cc08cc 100644
--- a/tools/perf/Documentation/perf-sched.txt
+++ b/tools/perf/Documentation/perf-sched.txt
@@ -50,6 +50,22 @@
 --dump-raw-trace=::
         Display verbose dump of the sched data.
 
+OPTIONS for 'perf sched map'
+----------------------------
+
+--compact::
+	Show only CPUs with activity. Helps visualizing on high core
+	count systems.
+
+--cpus::
+	Show just entries with activities for the given CPUs.
+
+--color-cpus::
+	Highlight the given cpus.
+
+--color-pids::
+	Highlight the given pids.
+
 SEE ALSO
 --------
 linkperf:perf-record[1]
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 382ddfb..a856a10 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -259,9 +259,23 @@
 --full-source-path::
 	Show the full path for source files for srcline output.
 
+--max-stack::
+        Set the stack depth limit when parsing the callchain, anything
+        beyond the specified depth will be ignored. This is a trade-off
+        between information loss and faster processing especially for
+        workloads that can have a very long callchain stack.
+        Note that when using the --itrace option the synthesized callchain size
+        will override this value if the synthesized callchain size is bigger.
+
+        Default: /proc/sys/kernel/perf_event_max_stack when present, 127 otherwise.
+
 --ns::
 	Use 9 decimal places when displaying time (i.e. show the nanoseconds)
 
+-f::
+--force::
+	Don't do ownership validation.
+
 SEE ALSO
 --------
 linkperf:perf-record[1], linkperf:perf-script-perl[1],
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index 19f046f..91d638d 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -177,7 +177,7 @@
 	between information loss and faster processing especially for
 	workloads that can have a very long callchain stack.
 
-	Default: 127
+	Default: /proc/sys/kernel/perf_event_max_stack when present, 127 otherwise.
 
 --ignore-callees=<regex>::
         Ignore callees of the function(s) matching the given regex.
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 13293de..6afe201 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -117,9 +117,41 @@
 --syscalls::
 	Trace system calls. This options is enabled by default.
 
+--call-graph [mode,type,min[,limit],order[,key][,branch]]::
+        Setup and enable call-graph (stack chain/backtrace) recording.
+        See `--call-graph` section in perf-record and perf-report
+        man pages for details. The ones that are most useful in 'perf trace'
+        are 'dwarf' and 'lbr', where available, try: 'perf trace --call-graph dwarf'.
+
+        Using this will, for the root user, bump the value of --mmap-pages to 4
+        times the maximum for non-root users, based on the kernel.perf_event_mlock_kb
+        sysctl. This is done only if the user doesn't specify a --mmap-pages value.
+
+--kernel-syscall-graph::
+	 Show the kernel callchains on the syscall exit path.
+
 --event::
 	Trace other events, see 'perf list' for a complete list.
 
+--max-stack::
+        Set the stack depth limit when parsing the callchain, anything
+        beyond the specified depth will be ignored. Note that at this point
+        this is just about the presentation part, i.e. the kernel is still
+        not limiting, the overhead of callchains needs to be set via the
+        knobs in --call-graph dwarf.
+
+        Implies '--call-graph dwarf' when --call-graph not present on the
+        command line, on systems where DWARF unwinding was built in.
+
+        Default: /proc/sys/kernel/perf_event_max_stack when present, 127 otherwise.
+
+--min-stack::
+        Set the stack depth limit when parsing the callchain, anything
+        below the specified depth will be ignored. Disabled by default.
+
+        Implies '--call-graph dwarf' when --call-graph not present on the
+        command line, on systems where DWARF unwinding was built in.
+
 --proc-map-timeout::
 	When processing pre-existing threads /proc/XXX/mmap, it may take a long time,
 	because the file may be huge. A time out is needed in such cases.
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index 2e1fa23..8c8c6b9 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -74,6 +74,7 @@
 arch/*/include/uapi/asm/perf_regs.h
 arch/*/lib/memcpy*.S
 arch/*/lib/memset*.S
+arch/*/include/asm/*features.h
 include/linux/poison.h
 include/linux/hw_breakpoint.h
 include/uapi/linux/perf_event.h
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 000ea21..bde8cba 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -183,6 +183,11 @@
 include config/Makefile
 endif
 
+ifeq ($(config),0)
+include $(srctree)/tools/scripts/Makefile.arch
+-include arch/$(ARCH)/Makefile
+endif
+
 # The FEATURE_DUMP_EXPORT holds location of the actual
 # FEATURE_DUMP file to be used to bypass feature detection
 # (for bpf or any other subproject)
@@ -297,8 +302,6 @@
 # because maintaining the nesting to match is a pain.  If
 # we had "elif" things would have been much nicer...
 
--include arch/$(ARCH)/Makefile
-
 ifneq ($(OUTPUT),)
   CFLAGS += -I$(OUTPUT)
 endif
@@ -390,7 +393,7 @@
 __build-dir = $(subst $(OUTPUT),,$(dir $@))
 build-dir   = $(if $(__build-dir),$(__build-dir),.)
 
-prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h fixdep
+prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h fixdep archheaders
 
 $(OUTPUT)%.o: %.c prepare FORCE
 	$(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
@@ -430,7 +433,7 @@
 
 LIBPERF_IN := $(OUTPUT)libperf-in.o
 
-$(LIBPERF_IN): fixdep FORCE
+$(LIBPERF_IN): prepare fixdep FORCE
 	$(Q)$(MAKE) $(build)=libperf
 
 $(LIB_FILE): $(LIBPERF_IN)
@@ -625,7 +628,7 @@
 	$(call QUIET_CLEAN, config)
 	$(Q)$(MAKE) -C $(srctree)/tools/build/feature/ $(if $(OUTPUT),OUTPUT=$(OUTPUT)feature/,) clean >/dev/null
 
-clean: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean config-clean
+clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean config-clean
 	$(call QUIET_CLEAN, core-objs)  $(RM) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(LANG_BINDINGS)
 	$(Q)find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
 	$(Q)$(RM) $(OUTPUT).config-detected
@@ -662,5 +665,5 @@
 .PHONY: all install clean config-clean strip install-gtk
 .PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell
 .PHONY: $(GIT-HEAD-PHONY) TAGS tags cscope FORCE prepare
-.PHONY: libtraceevent_plugins
+.PHONY: libtraceevent_plugins archheaders
 
diff --git a/tools/perf/arch/powerpc/Makefile b/tools/perf/arch/powerpc/Makefile
index 56e05f1..cc39309 100644
--- a/tools/perf/arch/powerpc/Makefile
+++ b/tools/perf/arch/powerpc/Makefile
@@ -3,4 +3,5 @@
 endif
 
 HAVE_KVM_STAT_SUPPORT := 1
+PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
 PERF_HAVE_JITDUMP := 1
diff --git a/tools/perf/arch/powerpc/util/dwarf-regs.c b/tools/perf/arch/powerpc/util/dwarf-regs.c
index 733151c..41bdf95 100644
--- a/tools/perf/arch/powerpc/util/dwarf-regs.c
+++ b/tools/perf/arch/powerpc/util/dwarf-regs.c
@@ -10,19 +10,26 @@
  */
 
 #include <stddef.h>
+#include <errno.h>
+#include <string.h>
 #include <dwarf-regs.h>
-
+#include <linux/ptrace.h>
+#include <linux/kernel.h>
+#include "util.h"
 
 struct pt_regs_dwarfnum {
 	const char *name;
 	unsigned int dwarfnum;
+	unsigned int ptregs_offset;
 };
 
-#define STR(s) #s
-#define REG_DWARFNUM_NAME(r, num) {.name = r, .dwarfnum = num}
-#define GPR_DWARFNUM_NAME(num)	\
-	{.name = STR(%gpr##num), .dwarfnum = num}
-#define REG_DWARFNUM_END {.name = NULL, .dwarfnum = 0}
+#define REG_DWARFNUM_NAME(r, num)					\
+		{.name = STR(%)STR(r), .dwarfnum = num,			\
+		.ptregs_offset = offsetof(struct pt_regs, r)}
+#define GPR_DWARFNUM_NAME(num)						\
+		{.name = STR(%gpr##num), .dwarfnum = num,		\
+		.ptregs_offset = offsetof(struct pt_regs, gpr[num])}
+#define REG_DWARFNUM_END {.name = NULL, .dwarfnum = 0, .ptregs_offset = 0}
 
 /*
  * Reference:
@@ -61,12 +68,12 @@
 	GPR_DWARFNUM_NAME(29),
 	GPR_DWARFNUM_NAME(30),
 	GPR_DWARFNUM_NAME(31),
-	REG_DWARFNUM_NAME("%msr",   66),
-	REG_DWARFNUM_NAME("%ctr",   109),
-	REG_DWARFNUM_NAME("%link",  108),
-	REG_DWARFNUM_NAME("%xer",   101),
-	REG_DWARFNUM_NAME("%dar",   119),
-	REG_DWARFNUM_NAME("%dsisr", 118),
+	REG_DWARFNUM_NAME(msr,   66),
+	REG_DWARFNUM_NAME(ctr,   109),
+	REG_DWARFNUM_NAME(link,  108),
+	REG_DWARFNUM_NAME(xer,   101),
+	REG_DWARFNUM_NAME(dar,   119),
+	REG_DWARFNUM_NAME(dsisr, 118),
 	REG_DWARFNUM_END,
 };
 
@@ -86,3 +93,12 @@
 			return roff->name;
 	return NULL;
 }
+
+int regs_query_register_offset(const char *name)
+{
+	const struct pt_regs_dwarfnum *roff;
+	for (roff = regdwarfnum_table; roff->name != NULL; roff++)
+		if (!strcmp(roff->name, name))
+			return roff->ptregs_offset;
+	return -EINVAL;
+}
diff --git a/tools/perf/arch/powerpc/util/header.c b/tools/perf/arch/powerpc/util/header.c
index 6138bde..f8ccee1 100644
--- a/tools/perf/arch/powerpc/util/header.c
+++ b/tools/perf/arch/powerpc/util/header.c
@@ -4,6 +4,8 @@
 #include <stdlib.h>
 #include <string.h>
 #include <linux/stringify.h>
+#include "header.h"
+#include "util.h"
 
 #define mfspr(rn)       ({unsigned long rval; \
 			 asm volatile("mfspr %0," __stringify(rn) \
diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
index bbc1a50..c6d0f91 100644
--- a/tools/perf/arch/powerpc/util/sym-handling.c
+++ b/tools/perf/arch/powerpc/util/sym-handling.c
@@ -19,12 +19,6 @@
 	       ehdr.e_type == ET_DYN;
 }
 
-#if defined(_CALL_ELF) && _CALL_ELF == 2
-void arch__elf_sym_adjust(GElf_Sym *sym)
-{
-	sym->st_value += PPC64_LOCAL_ENTRY_OFFSET(sym->st_other);
-}
-#endif
 #endif
 
 #if !defined(_CALL_ELF) || _CALL_ELF != 2
@@ -65,18 +59,45 @@
 	return true;
 }
 
+#ifdef HAVE_LIBELF_SUPPORT
+void arch__sym_update(struct symbol *s, GElf_Sym *sym)
+{
+	s->arch_sym = sym->st_other;
+}
+#endif
+
 #define PPC64LE_LEP_OFFSET	8
 
 void arch__fix_tev_from_maps(struct perf_probe_event *pev,
-			     struct probe_trace_event *tev, struct map *map)
+			     struct probe_trace_event *tev, struct map *map,
+			     struct symbol *sym)
 {
+	int lep_offset;
+
 	/*
-	 * ppc64 ABIv2 local entry point is currently always 2 instructions
-	 * (8 bytes) after the global entry point.
+	 * When probing at a function entry point, we normally always want the
+	 * LEP since that catches calls to the function through both the GEP and
+	 * the LEP. Hence, we would like to probe at an offset of 8 bytes if
+	 * the user only specified the function entry.
+	 *
+	 * However, if the user specifies an offset, we fall back to using the
+	 * GEP since all userspace applications (objdump/readelf) show function
+	 * disassembly with offsets from the GEP.
+	 *
+	 * In addition, we shouldn't specify an offset for kretprobes.
 	 */
-	if (!pev->uprobes && map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS) {
-		tev->point.address += PPC64LE_LEP_OFFSET;
+	if (pev->point.offset || pev->point.retprobe || !map || !sym)
+		return;
+
+	lep_offset = PPC64_LOCAL_ENTRY_OFFSET(sym->arch_sym);
+
+	if (map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS)
 		tev->point.offset += PPC64LE_LEP_OFFSET;
+	else if (lep_offset) {
+		if (pev->uprobes)
+			tev->point.address += lep_offset;
+		else
+			tev->point.offset += lep_offset;
 	}
 }
 #endif
diff --git a/tools/perf/arch/x86/Makefile b/tools/perf/arch/x86/Makefile
index 269af21..6c9211b 100644
--- a/tools/perf/arch/x86/Makefile
+++ b/tools/perf/arch/x86/Makefile
@@ -4,3 +4,26 @@
 HAVE_KVM_STAT_SUPPORT := 1
 PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
 PERF_HAVE_JITDUMP := 1
+
+###
+# Syscall table generation
+#
+
+out    := $(OUTPUT)arch/x86/include/generated/asm
+header := $(out)/syscalls_64.c
+sys    := $(srctree)/tools/perf/arch/x86/entry/syscalls
+systbl := $(sys)/syscalltbl.sh
+
+# Create output directory if not already present
+_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
+
+$(header): $(sys)/syscall_64.tbl $(systbl)
+	@(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \
+        (diff -B arch/x86/entry/syscalls/syscall_64.tbl ../../arch/x86/entry/syscalls/syscall_64.tbl >/dev/null) \
+        || echo "Warning: x86_64's syscall_64.tbl differs from kernel" >&2 )) || true
+	$(Q)$(SHELL) '$(systbl)' $(sys)/syscall_64.tbl 'x86_64' > $@
+
+clean::
+	$(call QUIET_CLEAN, x86) $(RM) $(header)
+
+archheaders: $(header)
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
new file mode 100644
index 0000000..cac6d17
--- /dev/null
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -0,0 +1,376 @@
+#
+# 64-bit system call numbers and entry vectors
+#
+# The format is:
+# <number> <abi> <name> <entry point>
+#
+# The abi is "common", "64" or "x32" for this file.
+#
+0	common	read			sys_read
+1	common	write			sys_write
+2	common	open			sys_open
+3	common	close			sys_close
+4	common	stat			sys_newstat
+5	common	fstat			sys_newfstat
+6	common	lstat			sys_newlstat
+7	common	poll			sys_poll
+8	common	lseek			sys_lseek
+9	common	mmap			sys_mmap
+10	common	mprotect		sys_mprotect
+11	common	munmap			sys_munmap
+12	common	brk			sys_brk
+13	64	rt_sigaction		sys_rt_sigaction
+14	common	rt_sigprocmask		sys_rt_sigprocmask
+15	64	rt_sigreturn		sys_rt_sigreturn/ptregs
+16	64	ioctl			sys_ioctl
+17	common	pread64			sys_pread64
+18	common	pwrite64		sys_pwrite64
+19	64	readv			sys_readv
+20	64	writev			sys_writev
+21	common	access			sys_access
+22	common	pipe			sys_pipe
+23	common	select			sys_select
+24	common	sched_yield		sys_sched_yield
+25	common	mremap			sys_mremap
+26	common	msync			sys_msync
+27	common	mincore			sys_mincore
+28	common	madvise			sys_madvise
+29	common	shmget			sys_shmget
+30	common	shmat			sys_shmat
+31	common	shmctl			sys_shmctl
+32	common	dup			sys_dup
+33	common	dup2			sys_dup2
+34	common	pause			sys_pause
+35	common	nanosleep		sys_nanosleep
+36	common	getitimer		sys_getitimer
+37	common	alarm			sys_alarm
+38	common	setitimer		sys_setitimer
+39	common	getpid			sys_getpid
+40	common	sendfile		sys_sendfile64
+41	common	socket			sys_socket
+42	common	connect			sys_connect
+43	common	accept			sys_accept
+44	common	sendto			sys_sendto
+45	64	recvfrom		sys_recvfrom
+46	64	sendmsg			sys_sendmsg
+47	64	recvmsg			sys_recvmsg
+48	common	shutdown		sys_shutdown
+49	common	bind			sys_bind
+50	common	listen			sys_listen
+51	common	getsockname		sys_getsockname
+52	common	getpeername		sys_getpeername
+53	common	socketpair		sys_socketpair
+54	64	setsockopt		sys_setsockopt
+55	64	getsockopt		sys_getsockopt
+56	common	clone			sys_clone/ptregs
+57	common	fork			sys_fork/ptregs
+58	common	vfork			sys_vfork/ptregs
+59	64	execve			sys_execve/ptregs
+60	common	exit			sys_exit
+61	common	wait4			sys_wait4
+62	common	kill			sys_kill
+63	common	uname			sys_newuname
+64	common	semget			sys_semget
+65	common	semop			sys_semop
+66	common	semctl			sys_semctl
+67	common	shmdt			sys_shmdt
+68	common	msgget			sys_msgget
+69	common	msgsnd			sys_msgsnd
+70	common	msgrcv			sys_msgrcv
+71	common	msgctl			sys_msgctl
+72	common	fcntl			sys_fcntl
+73	common	flock			sys_flock
+74	common	fsync			sys_fsync
+75	common	fdatasync		sys_fdatasync
+76	common	truncate		sys_truncate
+77	common	ftruncate		sys_ftruncate
+78	common	getdents		sys_getdents
+79	common	getcwd			sys_getcwd
+80	common	chdir			sys_chdir
+81	common	fchdir			sys_fchdir
+82	common	rename			sys_rename
+83	common	mkdir			sys_mkdir
+84	common	rmdir			sys_rmdir
+85	common	creat			sys_creat
+86	common	link			sys_link
+87	common	unlink			sys_unlink
+88	common	symlink			sys_symlink
+89	common	readlink		sys_readlink
+90	common	chmod			sys_chmod
+91	common	fchmod			sys_fchmod
+92	common	chown			sys_chown
+93	common	fchown			sys_fchown
+94	common	lchown			sys_lchown
+95	common	umask			sys_umask
+96	common	gettimeofday		sys_gettimeofday
+97	common	getrlimit		sys_getrlimit
+98	common	getrusage		sys_getrusage
+99	common	sysinfo			sys_sysinfo
+100	common	times			sys_times
+101	64	ptrace			sys_ptrace
+102	common	getuid			sys_getuid
+103	common	syslog			sys_syslog
+104	common	getgid			sys_getgid
+105	common	setuid			sys_setuid
+106	common	setgid			sys_setgid
+107	common	geteuid			sys_geteuid
+108	common	getegid			sys_getegid
+109	common	setpgid			sys_setpgid
+110	common	getppid			sys_getppid
+111	common	getpgrp			sys_getpgrp
+112	common	setsid			sys_setsid
+113	common	setreuid		sys_setreuid
+114	common	setregid		sys_setregid
+115	common	getgroups		sys_getgroups
+116	common	setgroups		sys_setgroups
+117	common	setresuid		sys_setresuid
+118	common	getresuid		sys_getresuid
+119	common	setresgid		sys_setresgid
+120	common	getresgid		sys_getresgid
+121	common	getpgid			sys_getpgid
+122	common	setfsuid		sys_setfsuid
+123	common	setfsgid		sys_setfsgid
+124	common	getsid			sys_getsid
+125	common	capget			sys_capget
+126	common	capset			sys_capset
+127	64	rt_sigpending		sys_rt_sigpending
+128	64	rt_sigtimedwait		sys_rt_sigtimedwait
+129	64	rt_sigqueueinfo		sys_rt_sigqueueinfo
+130	common	rt_sigsuspend		sys_rt_sigsuspend
+131	64	sigaltstack		sys_sigaltstack
+132	common	utime			sys_utime
+133	common	mknod			sys_mknod
+134	64	uselib
+135	common	personality		sys_personality
+136	common	ustat			sys_ustat
+137	common	statfs			sys_statfs
+138	common	fstatfs			sys_fstatfs
+139	common	sysfs			sys_sysfs
+140	common	getpriority		sys_getpriority
+141	common	setpriority		sys_setpriority
+142	common	sched_setparam		sys_sched_setparam
+143	common	sched_getparam		sys_sched_getparam
+144	common	sched_setscheduler	sys_sched_setscheduler
+145	common	sched_getscheduler	sys_sched_getscheduler
+146	common	sched_get_priority_max	sys_sched_get_priority_max
+147	common	sched_get_priority_min	sys_sched_get_priority_min
+148	common	sched_rr_get_interval	sys_sched_rr_get_interval
+149	common	mlock			sys_mlock
+150	common	munlock			sys_munlock
+151	common	mlockall		sys_mlockall
+152	common	munlockall		sys_munlockall
+153	common	vhangup			sys_vhangup
+154	common	modify_ldt		sys_modify_ldt
+155	common	pivot_root		sys_pivot_root
+156	64	_sysctl			sys_sysctl
+157	common	prctl			sys_prctl
+158	common	arch_prctl		sys_arch_prctl
+159	common	adjtimex		sys_adjtimex
+160	common	setrlimit		sys_setrlimit
+161	common	chroot			sys_chroot
+162	common	sync			sys_sync
+163	common	acct			sys_acct
+164	common	settimeofday		sys_settimeofday
+165	common	mount			sys_mount
+166	common	umount2			sys_umount
+167	common	swapon			sys_swapon
+168	common	swapoff			sys_swapoff
+169	common	reboot			sys_reboot
+170	common	sethostname		sys_sethostname
+171	common	setdomainname		sys_setdomainname
+172	common	iopl			sys_iopl/ptregs
+173	common	ioperm			sys_ioperm
+174	64	create_module
+175	common	init_module		sys_init_module
+176	common	delete_module		sys_delete_module
+177	64	get_kernel_syms
+178	64	query_module
+179	common	quotactl		sys_quotactl
+180	64	nfsservctl
+181	common	getpmsg
+182	common	putpmsg
+183	common	afs_syscall
+184	common	tuxcall
+185	common	security
+186	common	gettid			sys_gettid
+187	common	readahead		sys_readahead
+188	common	setxattr		sys_setxattr
+189	common	lsetxattr		sys_lsetxattr
+190	common	fsetxattr		sys_fsetxattr
+191	common	getxattr		sys_getxattr
+192	common	lgetxattr		sys_lgetxattr
+193	common	fgetxattr		sys_fgetxattr
+194	common	listxattr		sys_listxattr
+195	common	llistxattr		sys_llistxattr
+196	common	flistxattr		sys_flistxattr
+197	common	removexattr		sys_removexattr
+198	common	lremovexattr		sys_lremovexattr
+199	common	fremovexattr		sys_fremovexattr
+200	common	tkill			sys_tkill
+201	common	time			sys_time
+202	common	futex			sys_futex
+203	common	sched_setaffinity	sys_sched_setaffinity
+204	common	sched_getaffinity	sys_sched_getaffinity
+205	64	set_thread_area
+206	64	io_setup		sys_io_setup
+207	common	io_destroy		sys_io_destroy
+208	common	io_getevents		sys_io_getevents
+209	64	io_submit		sys_io_submit
+210	common	io_cancel		sys_io_cancel
+211	64	get_thread_area
+212	common	lookup_dcookie		sys_lookup_dcookie
+213	common	epoll_create		sys_epoll_create
+214	64	epoll_ctl_old
+215	64	epoll_wait_old
+216	common	remap_file_pages	sys_remap_file_pages
+217	common	getdents64		sys_getdents64
+218	common	set_tid_address		sys_set_tid_address
+219	common	restart_syscall		sys_restart_syscall
+220	common	semtimedop		sys_semtimedop
+221	common	fadvise64		sys_fadvise64
+222	64	timer_create		sys_timer_create
+223	common	timer_settime		sys_timer_settime
+224	common	timer_gettime		sys_timer_gettime
+225	common	timer_getoverrun	sys_timer_getoverrun
+226	common	timer_delete		sys_timer_delete
+227	common	clock_settime		sys_clock_settime
+228	common	clock_gettime		sys_clock_gettime
+229	common	clock_getres		sys_clock_getres
+230	common	clock_nanosleep		sys_clock_nanosleep
+231	common	exit_group		sys_exit_group
+232	common	epoll_wait		sys_epoll_wait
+233	common	epoll_ctl		sys_epoll_ctl
+234	common	tgkill			sys_tgkill
+235	common	utimes			sys_utimes
+236	64	vserver
+237	common	mbind			sys_mbind
+238	common	set_mempolicy		sys_set_mempolicy
+239	common	get_mempolicy		sys_get_mempolicy
+240	common	mq_open			sys_mq_open
+241	common	mq_unlink		sys_mq_unlink
+242	common	mq_timedsend		sys_mq_timedsend
+243	common	mq_timedreceive		sys_mq_timedreceive
+244	64	mq_notify		sys_mq_notify
+245	common	mq_getsetattr		sys_mq_getsetattr
+246	64	kexec_load		sys_kexec_load
+247	64	waitid			sys_waitid
+248	common	add_key			sys_add_key
+249	common	request_key		sys_request_key
+250	common	keyctl			sys_keyctl
+251	common	ioprio_set		sys_ioprio_set
+252	common	ioprio_get		sys_ioprio_get
+253	common	inotify_init		sys_inotify_init
+254	common	inotify_add_watch	sys_inotify_add_watch
+255	common	inotify_rm_watch	sys_inotify_rm_watch
+256	common	migrate_pages		sys_migrate_pages
+257	common	openat			sys_openat
+258	common	mkdirat			sys_mkdirat
+259	common	mknodat			sys_mknodat
+260	common	fchownat		sys_fchownat
+261	common	futimesat		sys_futimesat
+262	common	newfstatat		sys_newfstatat
+263	common	unlinkat		sys_unlinkat
+264	common	renameat		sys_renameat
+265	common	linkat			sys_linkat
+266	common	symlinkat		sys_symlinkat
+267	common	readlinkat		sys_readlinkat
+268	common	fchmodat		sys_fchmodat
+269	common	faccessat		sys_faccessat
+270	common	pselect6		sys_pselect6
+271	common	ppoll			sys_ppoll
+272	common	unshare			sys_unshare
+273	64	set_robust_list		sys_set_robust_list
+274	64	get_robust_list		sys_get_robust_list
+275	common	splice			sys_splice
+276	common	tee			sys_tee
+277	common	sync_file_range		sys_sync_file_range
+278	64	vmsplice		sys_vmsplice
+279	64	move_pages		sys_move_pages
+280	common	utimensat		sys_utimensat
+281	common	epoll_pwait		sys_epoll_pwait
+282	common	signalfd		sys_signalfd
+283	common	timerfd_create		sys_timerfd_create
+284	common	eventfd			sys_eventfd
+285	common	fallocate		sys_fallocate
+286	common	timerfd_settime		sys_timerfd_settime
+287	common	timerfd_gettime		sys_timerfd_gettime
+288	common	accept4			sys_accept4
+289	common	signalfd4		sys_signalfd4
+290	common	eventfd2		sys_eventfd2
+291	common	epoll_create1		sys_epoll_create1
+292	common	dup3			sys_dup3
+293	common	pipe2			sys_pipe2
+294	common	inotify_init1		sys_inotify_init1
+295	64	preadv			sys_preadv
+296	64	pwritev			sys_pwritev
+297	64	rt_tgsigqueueinfo	sys_rt_tgsigqueueinfo
+298	common	perf_event_open		sys_perf_event_open
+299	64	recvmmsg		sys_recvmmsg
+300	common	fanotify_init		sys_fanotify_init
+301	common	fanotify_mark		sys_fanotify_mark
+302	common	prlimit64		sys_prlimit64
+303	common	name_to_handle_at	sys_name_to_handle_at
+304	common	open_by_handle_at	sys_open_by_handle_at
+305	common	clock_adjtime		sys_clock_adjtime
+306	common	syncfs			sys_syncfs
+307	64	sendmmsg		sys_sendmmsg
+308	common	setns			sys_setns
+309	common	getcpu			sys_getcpu
+310	64	process_vm_readv	sys_process_vm_readv
+311	64	process_vm_writev	sys_process_vm_writev
+312	common	kcmp			sys_kcmp
+313	common	finit_module		sys_finit_module
+314	common	sched_setattr		sys_sched_setattr
+315	common	sched_getattr		sys_sched_getattr
+316	common	renameat2		sys_renameat2
+317	common	seccomp			sys_seccomp
+318	common	getrandom		sys_getrandom
+319	common	memfd_create		sys_memfd_create
+320	common	kexec_file_load		sys_kexec_file_load
+321	common	bpf			sys_bpf
+322	64	execveat		sys_execveat/ptregs
+323	common	userfaultfd		sys_userfaultfd
+324	common	membarrier		sys_membarrier
+325	common	mlock2			sys_mlock2
+326	common	copy_file_range		sys_copy_file_range
+327	64	preadv2			sys_preadv2
+328	64	pwritev2		sys_pwritev2
+
+#
+# x32-specific system call numbers start at 512 to avoid cache impact
+# for native 64-bit operation.
+#
+512	x32	rt_sigaction		compat_sys_rt_sigaction
+513	x32	rt_sigreturn		sys32_x32_rt_sigreturn
+514	x32	ioctl			compat_sys_ioctl
+515	x32	readv			compat_sys_readv
+516	x32	writev			compat_sys_writev
+517	x32	recvfrom		compat_sys_recvfrom
+518	x32	sendmsg			compat_sys_sendmsg
+519	x32	recvmsg			compat_sys_recvmsg
+520	x32	execve			compat_sys_execve/ptregs
+521	x32	ptrace			compat_sys_ptrace
+522	x32	rt_sigpending		compat_sys_rt_sigpending
+523	x32	rt_sigtimedwait		compat_sys_rt_sigtimedwait
+524	x32	rt_sigqueueinfo		compat_sys_rt_sigqueueinfo
+525	x32	sigaltstack		compat_sys_sigaltstack
+526	x32	timer_create		compat_sys_timer_create
+527	x32	mq_notify		compat_sys_mq_notify
+528	x32	kexec_load		compat_sys_kexec_load
+529	x32	waitid			compat_sys_waitid
+530	x32	set_robust_list		compat_sys_set_robust_list
+531	x32	get_robust_list		compat_sys_get_robust_list
+532	x32	vmsplice		compat_sys_vmsplice
+533	x32	move_pages		compat_sys_move_pages
+534	x32	preadv			compat_sys_preadv64
+535	x32	pwritev			compat_sys_pwritev64
+536	x32	rt_tgsigqueueinfo	compat_sys_rt_tgsigqueueinfo
+537	x32	recvmmsg		compat_sys_recvmmsg
+538	x32	sendmmsg		compat_sys_sendmmsg
+539	x32	process_vm_readv	compat_sys_process_vm_readv
+540	x32	process_vm_writev	compat_sys_process_vm_writev
+541	x32	setsockopt		compat_sys_setsockopt
+542	x32	getsockopt		compat_sys_getsockopt
+543	x32	io_setup		compat_sys_io_setup
+544	x32	io_submit		compat_sys_io_submit
+545	x32	execveat		compat_sys_execveat/ptregs
diff --git a/tools/perf/arch/x86/entry/syscalls/syscalltbl.sh b/tools/perf/arch/x86/entry/syscalls/syscalltbl.sh
new file mode 100755
index 0000000..49a18b9
--- /dev/null
+++ b/tools/perf/arch/x86/entry/syscalls/syscalltbl.sh
@@ -0,0 +1,39 @@
+#!/bin/sh
+
+in="$1"
+arch="$2"
+
+syscall_macro() {
+    nr="$1"
+    name="$2"
+
+    echo "	[$nr] = \"$name\","
+}
+
+emit() {
+    nr="$1"
+    entry="$2"
+
+    syscall_macro "$nr" "$entry"
+}
+
+echo "static const char *syscalltbl_${arch}[] = {"
+
+sorted_table=$(mktemp /tmp/syscalltbl.XXXXXX)
+grep '^[0-9]' "$in" | sort -n > $sorted_table
+
+max_nr=0
+while read nr abi name entry compat; do
+    if [ $nr -ge 512 ] ; then # discard compat sycalls
+        break
+    fi
+
+    emit "$nr" "$name"
+    max_nr=$nr
+done < $sorted_table
+
+rm -f $sorted_table
+
+echo "};"
+
+echo "#define SYSCALLTBL_${arch}_MAX_ID ${max_nr}"
diff --git a/tools/perf/arch/x86/tests/perf-time-to-tsc.c b/tools/perf/arch/x86/tests/perf-time-to-tsc.c
index 9d29ee2..d4aa567 100644
--- a/tools/perf/arch/x86/tests/perf-time-to-tsc.c
+++ b/tools/perf/arch/x86/tests/perf-time-to-tsc.c
@@ -71,7 +71,7 @@
 
 	CHECK__(parse_events(evlist, "cycles:u", NULL));
 
-	perf_evlist__config(evlist, &opts);
+	perf_evlist__config(evlist, &opts, NULL);
 
 	evsel = perf_evlist__first(evlist);
 
diff --git a/tools/perf/arch/x86/util/dwarf-regs.c b/tools/perf/arch/x86/util/dwarf-regs.c
index 9223c16..1f86ee8 100644
--- a/tools/perf/arch/x86/util/dwarf-regs.c
+++ b/tools/perf/arch/x86/util/dwarf-regs.c
@@ -63,6 +63,8 @@
 # define REG_OFFSET_NAME_32(n, r) {.name = n, .offset = offsetof(struct pt_regs, r)}
 #endif
 
+/* TODO: switching by dwarf address size */
+#ifndef __x86_64__
 static const struct pt_regs_offset x86_32_regoffset_table[] = {
 	REG_OFFSET_NAME_32("%ax",	eax),
 	REG_OFFSET_NAME_32("%cx",	ecx),
@@ -75,6 +77,8 @@
 	REG_OFFSET_END,
 };
 
+#define regoffset_table x86_32_regoffset_table
+#else
 static const struct pt_regs_offset x86_64_regoffset_table[] = {
 	REG_OFFSET_NAME_64("%ax",	rax),
 	REG_OFFSET_NAME_64("%dx",	rdx),
@@ -95,11 +99,7 @@
 	REG_OFFSET_END,
 };
 
-/* TODO: switching by dwarf address size */
-#ifdef __x86_64__
 #define regoffset_table x86_64_regoffset_table
-#else
-#define regoffset_table x86_32_regoffset_table
 #endif
 
 /* Minus 1 for the ending REG_OFFSET_END */
diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c
index d66f9ad..7dc3063 100644
--- a/tools/perf/arch/x86/util/intel-bts.c
+++ b/tools/perf/arch/x86/util/intel-bts.c
@@ -438,6 +438,11 @@
 	if (!intel_bts_pmu)
 		return NULL;
 
+	if (setenv("JITDUMP_USE_ARCH_TIMESTAMP", "1", 1)) {
+		*err = -errno;
+		return NULL;
+	}
+
 	btsr = zalloc(sizeof(struct intel_bts_recording));
 	if (!btsr) {
 		*err = -ENOMEM;
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index a339517..a07b960 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -1027,6 +1027,11 @@
 	if (!intel_pt_pmu)
 		return NULL;
 
+	if (setenv("JITDUMP_USE_ARCH_TIMESTAMP", "1", 1)) {
+		*err = -errno;
+		return NULL;
+	}
+
 	ptr = zalloc(sizeof(struct intel_pt_recording));
 	if (!ptr) {
 		*err = -ENOMEM;
diff --git a/tools/perf/arch/x86/util/tsc.c b/tools/perf/arch/x86/util/tsc.c
index fd28684..357f1b1 100644
--- a/tools/perf/arch/x86/util/tsc.c
+++ b/tools/perf/arch/x86/util/tsc.c
@@ -7,7 +7,6 @@
 #include <linux/types.h>
 #include "../../util/debug.h"
 #include "../../util/tsc.h"
-#include "tsc.h"
 
 int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
 			     struct perf_tsc_conversion *tc)
@@ -46,3 +45,34 @@
 
 	return low | ((u64)high) << 32;
 }
+
+int perf_event__synth_time_conv(const struct perf_event_mmap_page *pc,
+				struct perf_tool *tool,
+				perf_event__handler_t process,
+				struct machine *machine)
+{
+	union perf_event event = {
+		.time_conv = {
+			.header = {
+				.type = PERF_RECORD_TIME_CONV,
+				.size = sizeof(struct time_conv_event),
+			},
+		},
+	};
+	struct perf_tsc_conversion tc;
+	int err;
+
+	err = perf_read_tsc_conversion(pc, &tc);
+	if (err == -EOPNOTSUPP)
+		return 0;
+	if (err)
+		return err;
+
+	pr_debug2("Synthesizing TSC conversion information\n");
+
+	event.time_conv.time_mult  = tc.time_mult;
+	event.time_conv.time_shift = tc.time_shift;
+	event.time_conv.time_zero  = tc.time_zero;
+
+	return process(tool, &event, NULL, machine);
+}
diff --git a/tools/perf/arch/x86/util/tsc.h b/tools/perf/arch/x86/util/tsc.h
deleted file mode 100644
index 2edc4d3..0000000
--- a/tools/perf/arch/x86/util/tsc.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef TOOLS_PERF_ARCH_X86_UTIL_TSC_H__
-#define TOOLS_PERF_ARCH_X86_UTIL_TSC_H__
-
-#include <linux/types.h>
-
-struct perf_tsc_conversion {
-	u16 time_shift;
-	u32 time_mult;
-	u64 time_zero;
-};
-
-struct perf_event_mmap_page;
-
-int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
-			     struct perf_tsc_conversion *tc);
-
-#endif /* TOOLS_PERF_ARCH_X86_UTIL_TSC_H__ */
diff --git a/tools/perf/bench/futex-lock-pi.c b/tools/perf/bench/futex-lock-pi.c
index 6a18ce2..6952db6 100644
--- a/tools/perf/bench/futex-lock-pi.c
+++ b/tools/perf/bench/futex-lock-pi.c
@@ -83,7 +83,7 @@
 	do {
 		int ret;
 	again:
-		ret = futex_lock_pi(w->futex, NULL, 0, futex_flag);
+		ret = futex_lock_pi(w->futex, NULL, futex_flag);
 
 		if (ret) { /* handle lock acquisition */
 			if (!silent)
diff --git a/tools/perf/bench/futex.h b/tools/perf/bench/futex.h
index d44de9f..b2e06d1 100644
--- a/tools/perf/bench/futex.h
+++ b/tools/perf/bench/futex.h
@@ -57,13 +57,11 @@
 
 /**
  * futex_lock_pi() - block on uaddr as a PI mutex
- * @detect:	whether (1) or not (0) to perform deadlock detection
  */
 static inline int
-futex_lock_pi(u_int32_t *uaddr, struct timespec *timeout, int detect,
-	      int opflags)
+futex_lock_pi(u_int32_t *uaddr, struct timespec *timeout, int opflags)
 {
-	return futex(uaddr, FUTEX_LOCK_PI, detect, timeout, NULL, 0, opflags);
+	return futex(uaddr, FUTEX_LOCK_PI, 0, timeout, NULL, 0, opflags);
 }
 
 /**
diff --git a/tools/perf/bench/mem-functions.c b/tools/perf/bench/mem-functions.c
index a91aa85..2b54d0f 100644
--- a/tools/perf/bench/mem-functions.c
+++ b/tools/perf/bench/mem-functions.c
@@ -6,6 +6,7 @@
  * Written by Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp>
  */
 
+#include "debug.h"
 #include "../perf.h"
 #include "../util/util.h"
 #include <subcmd/parse-options.h>
@@ -63,14 +64,16 @@
 	.config		= PERF_COUNT_HW_CPU_CYCLES
 };
 
-static void init_cycles(void)
+static int init_cycles(void)
 {
 	cycles_fd = sys_perf_event_open(&cycle_attr, getpid(), -1, -1, perf_event_open_cloexec_flag());
 
-	if (cycles_fd < 0 && errno == ENOSYS)
-		die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
-	else
-		BUG_ON(cycles_fd < 0);
+	if (cycles_fd < 0 && errno == ENOSYS) {
+		pr_debug("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
+		return -1;
+	}
+
+	return cycles_fd;
 }
 
 static u64 get_cycles(void)
@@ -155,8 +158,13 @@
 
 	argc = parse_options(argc, argv, options, info->usage, 0);
 
-	if (use_cycles)
-		init_cycles();
+	if (use_cycles) {
+		i = init_cycles();
+		if (i < 0) {
+			fprintf(stderr, "Failed to open cycles counter\n");
+			return i;
+		}
+	}
 
 	size = (size_t)perf_atoll((char *)size_str);
 	size_total = (double)size * nr_loops;
diff --git a/tools/perf/builtin-config.c b/tools/perf/builtin-config.c
index c42448e..fe1b77f 100644
--- a/tools/perf/builtin-config.c
+++ b/tools/perf/builtin-config.c
@@ -12,6 +12,7 @@
 #include <subcmd/parse-options.h>
 #include "util/util.h"
 #include "util/debug.h"
+#include "util/config.h"
 
 static bool use_system_config, use_user_config;
 
@@ -32,13 +33,28 @@
 	OPT_END()
 };
 
-static int show_config(const char *key, const char *value,
-		       void *cb __maybe_unused)
+static int show_config(struct perf_config_set *set)
 {
-	if (value)
-		printf("%s=%s\n", key, value);
-	else
-		printf("%s\n", key);
+	struct perf_config_section *section;
+	struct perf_config_item *item;
+	struct list_head *sections;
+
+	if (set == NULL)
+		return -1;
+
+	sections = &set->sections;
+	if (list_empty(sections))
+		return -1;
+
+	list_for_each_entry(section, sections, node) {
+		list_for_each_entry(item, &section->items, node) {
+			char *value = item->value;
+
+			if (value)
+				printf("%s.%s=%s\n", section->name,
+				       item->name, value);
+		}
+	}
 
 	return 0;
 }
@@ -46,6 +62,7 @@
 int cmd_config(int argc, const char **argv, const char *prefix __maybe_unused)
 {
 	int ret = 0;
+	struct perf_config_set *set;
 	char *user_config = mkpath("%s/.perfconfig", getenv("HOME"));
 
 	argc = parse_options(argc, argv, config_options, config_usage,
@@ -63,13 +80,19 @@
 	else if (use_user_config)
 		config_exclusive_filename = user_config;
 
+	set = perf_config_set__new();
+	if (!set) {
+		ret = -1;
+		goto out_err;
+	}
+
 	switch (actions) {
 	case ACTION_LIST:
 		if (argc) {
 			pr_err("Error: takes no arguments\n");
 			parse_options_usage(config_usage, config_options, "l", 1);
 		} else {
-			ret = perf_config(show_config, NULL);
+			ret = show_config(set);
 			if (ret < 0) {
 				const char * config_filename = config_exclusive_filename;
 				if (!config_exclusive_filename)
@@ -83,5 +106,7 @@
 		usage_with_options(config_usage, config_options);
 	}
 
+	perf_config_set__delete(set);
+out_err:
 	return ret;
 }
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 8053a8c..9ce354f 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -428,7 +428,7 @@
 	struct rb_root *root;
 	struct rb_node *next;
 
-	if (sort__need_collapse)
+	if (hists__has(hists, need_collapse))
 		root = &hists->entries_collapsed;
 	else
 		root = hists->entries_in;
@@ -450,7 +450,7 @@
 	struct rb_root *root;
 	struct rb_node *next;
 
-	if (sort__need_collapse)
+	if (hists__has(hists, need_collapse))
 		root = &hists->entries_collapsed;
 	else
 		root = hists->entries_in;
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index bc1de9b..f9830c9 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -61,6 +61,7 @@
 	struct child_process ec_process;
 	const char *argv_ec[] = { "emacsclient", "--version", NULL };
 	int version;
+	int ret = -1;
 
 	/* emacsclient prints its version number on stderr */
 	memset(&ec_process, 0, sizeof(ec_process));
@@ -71,7 +72,10 @@
 		fprintf(stderr, "Failed to start emacsclient.\n");
 		return -1;
 	}
-	strbuf_read(&buffer, ec_process.err, 20);
+	if (strbuf_read(&buffer, ec_process.err, 20) < 0) {
+		fprintf(stderr, "Failed to read emacsclient version\n");
+		goto out;
+	}
 	close(ec_process.err);
 
 	/*
@@ -82,8 +86,7 @@
 
 	if (prefixcmp(buffer.buf, "emacsclient")) {
 		fprintf(stderr, "Failed to parse emacsclient version.\n");
-		strbuf_release(&buffer);
-		return -1;
+		goto out;
 	}
 
 	version = atoi(buffer.buf + strlen("emacsclient"));
@@ -92,12 +95,11 @@
 		fprintf(stderr,
 			"emacsclient version '%d' too old (< 22).\n",
 			version);
-		strbuf_release(&buffer);
-		return -1;
-	}
-
+	} else
+		ret = 0;
+out:
 	strbuf_release(&buffer);
-	return 0;
+	return ret;
 }
 
 static void exec_woman_emacs(const char *path, const char *page)
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index d1a2d10..e5afa8f 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -748,6 +748,7 @@
 			.auxtrace_info	= perf_event__repipe_op2_synth,
 			.auxtrace	= perf_event__repipe_auxtrace,
 			.auxtrace_error	= perf_event__repipe_op2_synth,
+			.time_conv	= perf_event__repipe_op2_synth,
 			.finished_round	= perf_event__repipe_oe_synth,
 			.build_id	= perf_event__repipe_op2_synth,
 			.id_index	= perf_event__repipe_op2_synth,
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index c9cb3be..58adfee 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -375,7 +375,7 @@
 	}
 
 	al.thread = machine__findnew_thread(machine, sample->pid, sample->tid);
-	sample__resolve_callchain(sample, NULL, evsel, &al, 16);
+	sample__resolve_callchain(sample, &callchain_cursor, NULL, evsel, &al, 16);
 
 	callchain_cursor_commit(&callchain_cursor);
 	while (true) {
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index bff6664..6487c06 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -982,7 +982,7 @@
 	struct perf_evlist *evlist = kvm->evlist;
 	char sbuf[STRERR_BUFSIZE];
 
-	perf_evlist__config(evlist, &kvm->opts);
+	perf_evlist__config(evlist, &kvm->opts, NULL);
 
 	/*
 	 * Note: exclude_{guest,host} do not apply here.
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index 85db3be..1dc140c 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -62,19 +62,22 @@
 	int rec_argc, i = 0, j;
 	const char **rec_argv;
 	int ret;
+	bool all_user = false, all_kernel = false;
 	struct option options[] = {
 	OPT_CALLBACK('e', "event", &mem, "event",
 		     "event selector. use 'perf mem record -e list' to list available events",
 		     parse_record_events),
 	OPT_INCR('v', "verbose", &verbose,
 		 "be more verbose (show counter open errors, etc)"),
+	OPT_BOOLEAN('U', "--all-user", &all_user, "collect only user level data"),
+	OPT_BOOLEAN('K', "--all-kernel", &all_kernel, "collect only kernel level data"),
 	OPT_END()
 	};
 
 	argc = parse_options(argc, argv, options, record_mem_usage,
 			     PARSE_OPT_STOP_AT_NON_OPTION);
 
-	rec_argc = argc + 7; /* max number of arguments */
+	rec_argc = argc + 9; /* max number of arguments */
 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
 	if (!rec_argv)
 		return -1;
@@ -103,6 +106,12 @@
 		rec_argv[i++] = perf_mem_events__name(j);
 	};
 
+	if (all_user)
+		rec_argv[i++] = "--all-user";
+
+	if (all_kernel)
+		rec_argv[i++] = "--all-kernel";
+
 	for (j = 0; j < argc; j++, i++)
 		rec_argv[i] = argv[j];
 
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 515510e..f3679c4 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -29,10 +29,12 @@
 #include "util/data.h"
 #include "util/perf_regs.h"
 #include "util/auxtrace.h"
+#include "util/tsc.h"
 #include "util/parse-branch-options.h"
 #include "util/parse-regs-options.h"
 #include "util/llvm-utils.h"
 #include "util/bpf-loader.h"
+#include "util/trigger.h"
 #include "asm/bug.h"
 
 #include <unistd.h>
@@ -55,6 +57,8 @@
 	bool			no_buildid_cache;
 	bool			no_buildid_cache_set;
 	bool			buildid_all;
+	bool			timestamp_filename;
+	bool			switch_output;
 	unsigned long long	samples;
 };
 
@@ -124,9 +128,10 @@
 static volatile int done;
 static volatile int signr = -1;
 static volatile int child_finished;
-static volatile int auxtrace_snapshot_enabled;
-static volatile int auxtrace_snapshot_err;
+
 static volatile int auxtrace_record__snapshot_started;
+static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
+static DEFINE_TRIGGER(switch_output_trigger);
 
 static void sig_handler(int sig)
 {
@@ -244,11 +249,12 @@
 {
 	pr_debug("Recording AUX area tracing snapshot\n");
 	if (record__auxtrace_read_snapshot_all(rec) < 0) {
-		auxtrace_snapshot_err = -1;
+		trigger_error(&auxtrace_snapshot_trigger);
 	} else {
-		auxtrace_snapshot_err = auxtrace_record__snapshot_finish(rec->itr);
-		if (!auxtrace_snapshot_err)
-			auxtrace_snapshot_enabled = 1;
+		if (auxtrace_record__snapshot_finish(rec->itr))
+			trigger_error(&auxtrace_snapshot_trigger);
+		else
+			trigger_ready(&auxtrace_snapshot_trigger);
 	}
 }
 
@@ -283,7 +289,7 @@
 	struct record_opts *opts = &rec->opts;
 	int rc = 0;
 
-	perf_evlist__config(evlist, opts);
+	perf_evlist__config(evlist, opts, &callchain_param);
 
 	evlist__for_each(evlist, pos) {
 try_again:
@@ -494,6 +500,73 @@
 	return;
 }
 
+static int record__synthesize_workload(struct record *rec)
+{
+	struct {
+		struct thread_map map;
+		struct thread_map_data map_data;
+	} thread_map;
+
+	thread_map.map.nr = 1;
+	thread_map.map.map[0].pid = rec->evlist->workload.pid;
+	thread_map.map.map[0].comm = NULL;
+	return perf_event__synthesize_thread_map(&rec->tool, &thread_map.map,
+						 process_synthesized_event,
+						 &rec->session->machines.host,
+						 rec->opts.sample_address,
+						 rec->opts.proc_map_timeout);
+}
+
+static int record__synthesize(struct record *rec);
+
+static int
+record__switch_output(struct record *rec, bool at_exit)
+{
+	struct perf_data_file *file = &rec->file;
+	int fd, err;
+
+	/* Same Size:      "2015122520103046"*/
+	char timestamp[] = "InvalidTimestamp";
+
+	rec->samples = 0;
+	record__finish_output(rec);
+	err = fetch_current_timestamp(timestamp, sizeof(timestamp));
+	if (err) {
+		pr_err("Failed to get current timestamp\n");
+		return -EINVAL;
+	}
+
+	fd = perf_data_file__switch(file, timestamp,
+				    rec->session->header.data_offset,
+				    at_exit);
+	if (fd >= 0 && !at_exit) {
+		rec->bytes_written = 0;
+		rec->session->header.data_size = 0;
+	}
+
+	if (!quiet)
+		fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
+			file->path, timestamp);
+
+	/* Output tracking events */
+	if (!at_exit) {
+		record__synthesize(rec);
+
+		/*
+		 * In 'perf record --switch-output' without -a,
+		 * record__synthesize() in record__switch_output() won't
+		 * generate tracking events because there's no thread_map
+		 * in evlist. Which causes newly created perf.data doesn't
+		 * contain map and comm information.
+		 * Create a fake thread_map and directly call
+		 * perf_event__synthesize_thread_map() for those events.
+		 */
+		if (target__none(&rec->opts.target))
+			record__synthesize_workload(rec);
+	}
+	return fd;
+}
+
 static volatile int workload_exec_errno;
 
 /*
@@ -512,6 +585,15 @@
 
 static void snapshot_sig_handler(int sig);
 
+int __weak
+perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
+			    struct perf_tool *tool __maybe_unused,
+			    perf_event__handler_t process __maybe_unused,
+			    struct machine *machine __maybe_unused)
+{
+	return 0;
+}
+
 static int record__synthesize(struct record *rec)
 {
 	struct perf_session *session = rec->session;
@@ -549,6 +631,11 @@
 		}
 	}
 
+	err = perf_event__synth_time_conv(rec->evlist->mmap[0].base, tool,
+					  process_synthesized_event, machine);
+	if (err)
+		goto out;
+
 	if (rec->opts.full_auxtrace) {
 		err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
 					session, process_synthesized_event);
@@ -600,10 +687,16 @@
 	signal(SIGCHLD, sig_handler);
 	signal(SIGINT, sig_handler);
 	signal(SIGTERM, sig_handler);
-	if (rec->opts.auxtrace_snapshot_mode)
+
+	if (rec->opts.auxtrace_snapshot_mode || rec->switch_output) {
 		signal(SIGUSR2, snapshot_sig_handler);
-	else
+		if (rec->opts.auxtrace_snapshot_mode)
+			trigger_on(&auxtrace_snapshot_trigger);
+		if (rec->switch_output)
+			trigger_on(&switch_output_trigger);
+	} else {
 		signal(SIGUSR2, SIG_IGN);
+	}
 
 	session = perf_session__new(file, false, tool);
 	if (session == NULL) {
@@ -729,27 +822,45 @@
 		perf_evlist__enable(rec->evlist);
 	}
 
-	auxtrace_snapshot_enabled = 1;
+	trigger_ready(&auxtrace_snapshot_trigger);
+	trigger_ready(&switch_output_trigger);
 	for (;;) {
 		unsigned long long hits = rec->samples;
 
 		if (record__mmap_read_all(rec) < 0) {
-			auxtrace_snapshot_enabled = 0;
+			trigger_error(&auxtrace_snapshot_trigger);
+			trigger_error(&switch_output_trigger);
 			err = -1;
 			goto out_child;
 		}
 
 		if (auxtrace_record__snapshot_started) {
 			auxtrace_record__snapshot_started = 0;
-			if (!auxtrace_snapshot_err)
+			if (!trigger_is_error(&auxtrace_snapshot_trigger))
 				record__read_auxtrace_snapshot(rec);
-			if (auxtrace_snapshot_err) {
+			if (trigger_is_error(&auxtrace_snapshot_trigger)) {
 				pr_err("AUX area tracing snapshot failed\n");
 				err = -1;
 				goto out_child;
 			}
 		}
 
+		if (trigger_is_hit(&switch_output_trigger)) {
+			trigger_ready(&switch_output_trigger);
+
+			if (!quiet)
+				fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
+					waking);
+			waking = 0;
+			fd = record__switch_output(rec, false);
+			if (fd < 0) {
+				pr_err("Failed to switch to new file\n");
+				trigger_error(&switch_output_trigger);
+				err = fd;
+				goto out_child;
+			}
+		}
+
 		if (hits == rec->samples) {
 			if (done || draining)
 				break;
@@ -772,12 +883,13 @@
 		 * disable events in this case.
 		 */
 		if (done && !disabled && !target__none(&opts->target)) {
-			auxtrace_snapshot_enabled = 0;
+			trigger_off(&auxtrace_snapshot_trigger);
 			perf_evlist__disable(rec->evlist);
 			disabled = true;
 		}
 	}
-	auxtrace_snapshot_enabled = 0;
+	trigger_off(&auxtrace_snapshot_trigger);
+	trigger_off(&switch_output_trigger);
 
 	if (forks && workload_exec_errno) {
 		char msg[STRERR_BUFSIZE];
@@ -811,11 +923,22 @@
 	/* this will be recalculated during process_buildids() */
 	rec->samples = 0;
 
-	if (!err)
-		record__finish_output(rec);
+	if (!err) {
+		if (!rec->timestamp_filename) {
+			record__finish_output(rec);
+		} else {
+			fd = record__switch_output(rec, true);
+			if (fd < 0) {
+				status = fd;
+				goto out_delete_session;
+			}
+		}
+	}
 
 	if (!err && !quiet) {
 		char samples[128];
+		const char *postfix = rec->timestamp_filename ?
+					".<timestamp>" : "";
 
 		if (rec->samples && !rec->opts.full_auxtrace)
 			scnprintf(samples, sizeof(samples),
@@ -823,9 +946,9 @@
 		else
 			samples[0] = '\0';
 
-		fprintf(stderr,	"[ perf record: Captured and wrote %.3f MB %s%s ]\n",
+		fprintf(stderr,	"[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
 			perf_data_file__size(file) / 1024.0 / 1024.0,
-			file->path, samples);
+			file->path, postfix, samples);
 	}
 
 out_delete_session:
@@ -833,58 +956,61 @@
 	return status;
 }
 
-static void callchain_debug(void)
+static void callchain_debug(struct callchain_param *callchain)
 {
 	static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
 
-	pr_debug("callchain: type %s\n", str[callchain_param.record_mode]);
+	pr_debug("callchain: type %s\n", str[callchain->record_mode]);
 
-	if (callchain_param.record_mode == CALLCHAIN_DWARF)
+	if (callchain->record_mode == CALLCHAIN_DWARF)
 		pr_debug("callchain: stack dump size %d\n",
-			 callchain_param.dump_size);
+			 callchain->dump_size);
+}
+
+int record_opts__parse_callchain(struct record_opts *record,
+				 struct callchain_param *callchain,
+				 const char *arg, bool unset)
+{
+	int ret;
+	callchain->enabled = !unset;
+
+	/* --no-call-graph */
+	if (unset) {
+		callchain->record_mode = CALLCHAIN_NONE;
+		pr_debug("callchain: disabled\n");
+		return 0;
+	}
+
+	ret = parse_callchain_record_opt(arg, callchain);
+	if (!ret) {
+		/* Enable data address sampling for DWARF unwind. */
+		if (callchain->record_mode == CALLCHAIN_DWARF)
+			record->sample_address = true;
+		callchain_debug(callchain);
+	}
+
+	return ret;
 }
 
 int record_parse_callchain_opt(const struct option *opt,
 			       const char *arg,
 			       int unset)
 {
-	int ret;
-	struct record_opts *record = (struct record_opts *)opt->value;
-
-	record->callgraph_set = true;
-	callchain_param.enabled = !unset;
-
-	/* --no-call-graph */
-	if (unset) {
-		callchain_param.record_mode = CALLCHAIN_NONE;
-		pr_debug("callchain: disabled\n");
-		return 0;
-	}
-
-	ret = parse_callchain_record_opt(arg, &callchain_param);
-	if (!ret) {
-		/* Enable data address sampling for DWARF unwind. */
-		if (callchain_param.record_mode == CALLCHAIN_DWARF)
-			record->sample_address = true;
-		callchain_debug();
-	}
-
-	return ret;
+	return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
 }
 
 int record_callchain_opt(const struct option *opt,
 			 const char *arg __maybe_unused,
 			 int unset __maybe_unused)
 {
-	struct record_opts *record = (struct record_opts *)opt->value;
+	struct callchain_param *callchain = opt->value;
 
-	record->callgraph_set = true;
-	callchain_param.enabled = true;
+	callchain->enabled = true;
 
-	if (callchain_param.record_mode == CALLCHAIN_NONE)
-		callchain_param.record_mode = CALLCHAIN_FP;
+	if (callchain->record_mode == CALLCHAIN_NONE)
+		callchain->record_mode = CALLCHAIN_FP;
 
-	callchain_debug();
+	callchain_debug(callchain);
 	return 0;
 }
 
@@ -1122,7 +1248,7 @@
 		     record__parse_mmap_pages),
 	OPT_BOOLEAN(0, "group", &record.opts.group,
 		    "put the counters into a counter group"),
-	OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
+	OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
 			   NULL, "enables call-graph recording" ,
 			   &record_callchain_opt),
 	OPT_CALLBACK(0, "call-graph", &record.opts,
@@ -1195,6 +1321,10 @@
 		   "file", "vmlinux pathname"),
 	OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
 		    "Record build-id of all DSOs regardless of hits"),
+	OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
+		    "append timestamp to output filename"),
+	OPT_BOOLEAN(0, "switch-output", &record.switch_output,
+		    "Switch output when receive SIGUSR2"),
 	OPT_END()
 };
 
@@ -1250,6 +1380,9 @@
 		return -EINVAL;
 	}
 
+	if (rec->switch_output)
+		rec->timestamp_filename = true;
+
 	if (!rec->itr) {
 		rec->itr = auxtrace_record__init(rec->evlist, &err);
 		if (err)
@@ -1261,6 +1394,14 @@
 	if (err)
 		return err;
 
+	err = bpf__setup_stdout(rec->evlist);
+	if (err) {
+		bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
+		pr_err("ERROR: Setup BPF stdout failed: %s\n",
+			 errbuf);
+		return err;
+	}
+
 	err = -ENOMEM;
 
 	symbol__init(NULL);
@@ -1275,8 +1416,36 @@
 "If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
 "even with a suitable vmlinux or kallsyms file.\n\n");
 
-	if (rec->no_buildid_cache || rec->no_buildid)
+	if (rec->no_buildid_cache || rec->no_buildid) {
 		disable_buildid_cache();
+	} else if (rec->switch_output) {
+		/*
+		 * In 'perf record --switch-output', disable buildid
+		 * generation by default to reduce data file switching
+		 * overhead. Still generate buildid if they are required
+		 * explicitly using
+		 *
+		 *  perf record --signal-trigger --no-no-buildid \
+		 *              --no-no-buildid-cache
+		 *
+		 * Following code equals to:
+		 *
+		 * if ((rec->no_buildid || !rec->no_buildid_set) &&
+		 *     (rec->no_buildid_cache || !rec->no_buildid_cache_set))
+		 *         disable_buildid_cache();
+		 */
+		bool disable = true;
+
+		if (rec->no_buildid_set && !rec->no_buildid)
+			disable = false;
+		if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
+			disable = false;
+		if (disable) {
+			rec->no_buildid = true;
+			rec->no_buildid_cache = true;
+			disable_buildid_cache();
+		}
+	}
 
 	if (rec->evlist->nr_entries == 0 &&
 	    perf_evlist__add_default(rec->evlist) < 0) {
@@ -1335,9 +1504,13 @@
 
 static void snapshot_sig_handler(int sig __maybe_unused)
 {
-	if (!auxtrace_snapshot_enabled)
-		return;
-	auxtrace_snapshot_enabled = 0;
-	auxtrace_snapshot_err = auxtrace_record__snapshot_start(record.itr);
-	auxtrace_record__snapshot_started = 1;
+	if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
+		trigger_hit(&auxtrace_snapshot_trigger);
+		auxtrace_record__snapshot_started = 1;
+		if (auxtrace_record__snapshot_start(record.itr))
+			trigger_error(&auxtrace_snapshot_trigger);
+	}
+
+	if (trigger_is_ready(&switch_output_trigger))
+		trigger_hit(&switch_output_trigger);
 }
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 160ea23..87d40e3 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -47,7 +47,6 @@
 	struct perf_tool	tool;
 	struct perf_session	*session;
 	bool			use_tui, use_gtk, use_stdio;
-	bool			dont_use_callchains;
 	bool			show_full_info;
 	bool			show_threads;
 	bool			inverted_callchain;
@@ -235,7 +234,7 @@
 		sample_type |= PERF_SAMPLE_BRANCH_STACK;
 
 	if (!is_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) {
-		if (sort__has_parent) {
+		if (perf_hpp_list.parent) {
 			ui__error("Selected --sort parent, but no "
 				    "callchain data. Did you call "
 				    "'perf record' without -g?\n");
@@ -247,7 +246,7 @@
 				  "you call 'perf record' without -g?\n");
 			return -1;
 		}
-	} else if (!rep->dont_use_callchains &&
+	} else if (!callchain_param.enabled &&
 		   callchain_param.mode != CHAIN_NONE &&
 		   !symbol_conf.use_callchain) {
 			symbol_conf.use_callchain = true;
@@ -599,13 +598,15 @@
 static int
 report_parse_callchain_opt(const struct option *opt, const char *arg, int unset)
 {
-	struct report *rep = (struct report *)opt->value;
+	struct callchain_param *callchain = opt->value;
 
+	callchain->enabled = !unset;
 	/*
 	 * --no-call-graph
 	 */
 	if (unset) {
-		rep->dont_use_callchains = true;
+		symbol_conf.use_callchain = false;
+		callchain->mode = CHAIN_NONE;
 		return 0;
 	}
 
@@ -690,7 +691,7 @@
 			.ordered_events	 = true,
 			.ordering_requires_timestamps = true,
 		},
-		.max_stack		 = PERF_MAX_STACK_DEPTH,
+		.max_stack		 = sysctl_perf_event_max_stack,
 		.pretty_printing_style	 = "normal",
 		.socket_filter		 = -1,
 	};
@@ -734,7 +735,7 @@
 		   "regex filter to identify parent, see: '--sort parent'"),
 	OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other,
 		    "Only display entries with parent-match"),
-	OPT_CALLBACK_DEFAULT('g', "call-graph", &report,
+	OPT_CALLBACK_DEFAULT('g', "call-graph", &callchain_param,
 			     "print_type,threshold[,print_limit],order,sort_key[,branch],value",
 			     report_callchain_help, &report_parse_callchain_opt,
 			     callchain_default_opt),
@@ -743,7 +744,7 @@
 	OPT_INTEGER(0, "max-stack", &report.max_stack,
 		    "Set the maximum stack depth when parsing the callchain, "
 		    "anything beyond the specified depth will be ignored. "
-		    "Default: " __stringify(PERF_MAX_STACK_DEPTH)),
+		    "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
 	OPT_BOOLEAN('G', "inverted", &report.inverted_callchain,
 		    "alias for inverted call graph"),
 	OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
@@ -935,7 +936,7 @@
 			goto error;
 		}
 
-		sort__need_collapse = true;
+		perf_hpp_list.need_collapse = true;
 	}
 
 	/* Force tty output for header output and per-thread stat. */
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 871b55ae..afa0576 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -11,6 +11,8 @@
 #include "util/session.h"
 #include "util/tool.h"
 #include "util/cloexec.h"
+#include "util/thread_map.h"
+#include "util/color.h"
 
 #include <subcmd/parse-options.h>
 #include "util/trace-event.h"
@@ -122,6 +124,21 @@
 				  struct machine *machine);
 };
 
+#define COLOR_PIDS PERF_COLOR_BLUE
+#define COLOR_CPUS PERF_COLOR_BG_RED
+
+struct perf_sched_map {
+	DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
+	int			*comp_cpus;
+	bool			 comp;
+	struct thread_map	*color_pids;
+	const char		*color_pids_str;
+	struct cpu_map		*color_cpus;
+	const char		*color_cpus_str;
+	struct cpu_map		*cpus;
+	const char		*cpus_str;
+};
+
 struct perf_sched {
 	struct perf_tool tool;
 	const char	 *sort_order;
@@ -173,6 +190,7 @@
 	struct list_head sort_list, cmp_pid;
 	bool force;
 	bool skip_merge;
+	struct perf_sched_map map;
 };
 
 static u64 get_nsecs(void)
@@ -1339,6 +1357,38 @@
 	return 0;
 }
 
+union map_priv {
+	void	*ptr;
+	bool	 color;
+};
+
+static bool thread__has_color(struct thread *thread)
+{
+	union map_priv priv = {
+		.ptr = thread__priv(thread),
+	};
+
+	return priv.color;
+}
+
+static struct thread*
+map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid)
+{
+	struct thread *thread = machine__findnew_thread(machine, pid, tid);
+	union map_priv priv = {
+		.color = false,
+	};
+
+	if (!sched->map.color_pids || !thread || thread__priv(thread))
+		return thread;
+
+	if (thread_map__has(sched->map.color_pids, tid))
+		priv.color = true;
+
+	thread__set_priv(thread, priv.ptr);
+	return thread;
+}
+
 static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
 			    struct perf_sample *sample, struct machine *machine)
 {
@@ -1347,13 +1397,25 @@
 	int new_shortname;
 	u64 timestamp0, timestamp = sample->time;
 	s64 delta;
-	int cpu, this_cpu = sample->cpu;
+	int i, this_cpu = sample->cpu;
+	int cpus_nr;
+	bool new_cpu = false;
+	const char *color = PERF_COLOR_NORMAL;
 
 	BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
 
 	if (this_cpu > sched->max_cpu)
 		sched->max_cpu = this_cpu;
 
+	if (sched->map.comp) {
+		cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
+		if (!test_and_set_bit(this_cpu, sched->map.comp_cpus_mask)) {
+			sched->map.comp_cpus[cpus_nr++] = this_cpu;
+			new_cpu = true;
+		}
+	} else
+		cpus_nr = sched->max_cpu;
+
 	timestamp0 = sched->cpu_last_switched[this_cpu];
 	sched->cpu_last_switched[this_cpu] = timestamp;
 	if (timestamp0)
@@ -1366,7 +1428,7 @@
 		return -1;
 	}
 
-	sched_in = machine__findnew_thread(machine, -1, next_pid);
+	sched_in = map__findnew_thread(sched, machine, -1, next_pid);
 	if (sched_in == NULL)
 		return -1;
 
@@ -1400,26 +1462,52 @@
 		new_shortname = 1;
 	}
 
-	for (cpu = 0; cpu <= sched->max_cpu; cpu++) {
+	for (i = 0; i < cpus_nr; i++) {
+		int cpu = sched->map.comp ? sched->map.comp_cpus[i] : i;
+		struct thread *curr_thread = sched->curr_thread[cpu];
+		const char *pid_color = color;
+		const char *cpu_color = color;
+
+		if (curr_thread && thread__has_color(curr_thread))
+			pid_color = COLOR_PIDS;
+
+		if (sched->map.cpus && !cpu_map__has(sched->map.cpus, cpu))
+			continue;
+
+		if (sched->map.color_cpus && cpu_map__has(sched->map.color_cpus, cpu))
+			cpu_color = COLOR_CPUS;
+
 		if (cpu != this_cpu)
-			printf(" ");
+			color_fprintf(stdout, cpu_color, " ");
 		else
-			printf("*");
+			color_fprintf(stdout, cpu_color, "*");
 
 		if (sched->curr_thread[cpu])
-			printf("%2s ", sched->curr_thread[cpu]->shortname);
+			color_fprintf(stdout, pid_color, "%2s ", sched->curr_thread[cpu]->shortname);
 		else
-			printf("   ");
+			color_fprintf(stdout, color, "   ");
 	}
 
-	printf("  %12.6f secs ", (double)timestamp/1e9);
+	if (sched->map.cpus && !cpu_map__has(sched->map.cpus, this_cpu))
+		goto out;
+
+	color_fprintf(stdout, color, "  %12.6f secs ", (double)timestamp/1e9);
 	if (new_shortname) {
-		printf("%s => %s:%d\n",
+		const char *pid_color = color;
+
+		if (thread__has_color(sched_in))
+			pid_color = COLOR_PIDS;
+
+		color_fprintf(stdout, pid_color, "%s => %s:%d",
 		       sched_in->shortname, thread__comm_str(sched_in), sched_in->tid);
-	} else {
-		printf("\n");
 	}
 
+	if (sched->map.comp && new_cpu)
+		color_fprintf(stdout, color, " (CPU %d)", this_cpu);
+
+out:
+	color_fprintf(stdout, color, "\n");
+
 	thread__put(sched_in);
 
 	return 0;
@@ -1675,9 +1763,75 @@
 	return 0;
 }
 
+static int setup_map_cpus(struct perf_sched *sched)
+{
+	struct cpu_map *map;
+
+	sched->max_cpu  = sysconf(_SC_NPROCESSORS_CONF);
+
+	if (sched->map.comp) {
+		sched->map.comp_cpus = zalloc(sched->max_cpu * sizeof(int));
+		if (!sched->map.comp_cpus)
+			return -1;
+	}
+
+	if (!sched->map.cpus_str)
+		return 0;
+
+	map = cpu_map__new(sched->map.cpus_str);
+	if (!map) {
+		pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
+		return -1;
+	}
+
+	sched->map.cpus = map;
+	return 0;
+}
+
+static int setup_color_pids(struct perf_sched *sched)
+{
+	struct thread_map *map;
+
+	if (!sched->map.color_pids_str)
+		return 0;
+
+	map = thread_map__new_by_tid_str(sched->map.color_pids_str);
+	if (!map) {
+		pr_err("failed to get thread map from %s\n", sched->map.color_pids_str);
+		return -1;
+	}
+
+	sched->map.color_pids = map;
+	return 0;
+}
+
+static int setup_color_cpus(struct perf_sched *sched)
+{
+	struct cpu_map *map;
+
+	if (!sched->map.color_cpus_str)
+		return 0;
+
+	map = cpu_map__new(sched->map.color_cpus_str);
+	if (!map) {
+		pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
+		return -1;
+	}
+
+	sched->map.color_cpus = map;
+	return 0;
+}
+
 static int perf_sched__map(struct perf_sched *sched)
 {
-	sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF);
+	if (setup_map_cpus(sched))
+		return -1;
+
+	if (setup_color_pids(sched))
+		return -1;
+
+	if (setup_color_cpus(sched))
+		return -1;
 
 	setup_pager();
 	if (perf_sched__read_events(sched))
@@ -1831,6 +1985,17 @@
 		    "dump raw trace in ASCII"),
 	OPT_END()
 	};
+	const struct option map_options[] = {
+	OPT_BOOLEAN(0, "compact", &sched.map.comp,
+		    "map output in compact mode"),
+	OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids",
+		   "highlight given pids in map"),
+	OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus",
+                    "highlight given CPUs in map"),
+	OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
+                    "display given CPUs in map"),
+	OPT_END()
+	};
 	const char * const latency_usage[] = {
 		"perf sched latency [<options>]",
 		NULL
@@ -1839,6 +2004,10 @@
 		"perf sched replay [<options>]",
 		NULL
 	};
+	const char * const map_usage[] = {
+		"perf sched map [<options>]",
+		NULL
+	};
 	const char *const sched_subcommands[] = { "record", "latency", "map",
 						  "replay", "script", NULL };
 	const char *sched_usage[] = {
@@ -1887,6 +2056,11 @@
 		setup_sorting(&sched, latency_options, latency_usage);
 		return perf_sched__lat(&sched);
 	} else if (!strcmp(argv[0], "map")) {
+		if (argc) {
+			argc = parse_options(argc, argv, map_options, map_usage, 0);
+			if (argc)
+				usage_with_options(map_usage, map_options);
+		}
 		sched.tp_handler = &map_ops;
 		setup_sorting(&sched, latency_options, latency_usage);
 		return perf_sched__map(&sched);
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 3770c3d..efca816 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -22,6 +22,7 @@
 #include "util/thread_map.h"
 #include "util/stat.h"
 #include <linux/bitmap.h>
+#include <linux/stringify.h>
 #include "asm/bug.h"
 #include "util/mem-events.h"
 
@@ -317,19 +318,19 @@
 
 	output[type].print_ip_opts = 0;
 	if (PRINT_FIELD(IP))
-		output[type].print_ip_opts |= PRINT_IP_OPT_IP;
+		output[type].print_ip_opts |= EVSEL__PRINT_IP;
 
 	if (PRINT_FIELD(SYM))
-		output[type].print_ip_opts |= PRINT_IP_OPT_SYM;
+		output[type].print_ip_opts |= EVSEL__PRINT_SYM;
 
 	if (PRINT_FIELD(DSO))
-		output[type].print_ip_opts |= PRINT_IP_OPT_DSO;
+		output[type].print_ip_opts |= EVSEL__PRINT_DSO;
 
 	if (PRINT_FIELD(SYMOFFSET))
-		output[type].print_ip_opts |= PRINT_IP_OPT_SYMOFFSET;
+		output[type].print_ip_opts |= EVSEL__PRINT_SYMOFFSET;
 
 	if (PRINT_FIELD(SRCLINE))
-		output[type].print_ip_opts |= PRINT_IP_OPT_SRCLINE;
+		output[type].print_ip_opts |= EVSEL__PRINT_SRCLINE;
 }
 
 /*
@@ -569,18 +570,23 @@
 	/* print branch_from information */
 	if (PRINT_FIELD(IP)) {
 		unsigned int print_opts = output[attr->type].print_ip_opts;
+		struct callchain_cursor *cursor = NULL;
 
-		if (symbol_conf.use_callchain && sample->callchain) {
-			printf("\n");
-		} else {
-			printf(" ");
-			if (print_opts & PRINT_IP_OPT_SRCLINE) {
+		if (symbol_conf.use_callchain && sample->callchain &&
+		    thread__resolve_callchain(al->thread, &callchain_cursor, evsel,
+					      sample, NULL, NULL, scripting_max_stack) == 0)
+			cursor = &callchain_cursor;
+
+		if (cursor == NULL) {
+			putchar(' ');
+			if (print_opts & EVSEL__PRINT_SRCLINE) {
 				print_srcline_last = true;
-				print_opts &= ~PRINT_IP_OPT_SRCLINE;
+				print_opts &= ~EVSEL__PRINT_SRCLINE;
 			}
-		}
-		perf_evsel__print_ip(evsel, sample, al, print_opts,
-				     scripting_max_stack);
+		} else
+			putchar('\n');
+
+		sample__fprintf_sym(sample, al, 0, print_opts, cursor, stdout);
 	}
 
 	/* print branch_to information */
@@ -783,14 +789,15 @@
 		printf("%16" PRIu64, sample->weight);
 
 	if (PRINT_FIELD(IP)) {
-		if (!symbol_conf.use_callchain)
-			printf(" ");
-		else
-			printf("\n");
+		struct callchain_cursor *cursor = NULL;
 
-		perf_evsel__print_ip(evsel, sample, al,
-				     output[attr->type].print_ip_opts,
-				     scripting_max_stack);
+		if (symbol_conf.use_callchain && sample->callchain &&
+		    thread__resolve_callchain(al->thread, &callchain_cursor, evsel,
+					      sample, NULL, NULL, scripting_max_stack) == 0)
+			cursor = &callchain_cursor;
+
+		putchar(cursor ? '\n' : ' ');
+		sample__fprintf_sym(sample, al, 0, output[attr->type].print_ip_opts, cursor, stdout);
 	}
 
 	if (PRINT_FIELD(IREGS))
@@ -1415,21 +1422,19 @@
 	return S_ISDIR(st.st_mode);
 }
 
-#define for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next)\
-	while (!readdir_r(scripts_dir, &lang_dirent, &lang_next) &&	\
-	       lang_next)						\
-		if ((lang_dirent.d_type == DT_DIR ||			\
-		     (lang_dirent.d_type == DT_UNKNOWN &&		\
-		      is_directory(scripts_path, &lang_dirent))) &&	\
-		    (strcmp(lang_dirent.d_name, ".")) &&		\
-		    (strcmp(lang_dirent.d_name, "..")))
+#define for_each_lang(scripts_path, scripts_dir, lang_dirent)		\
+	while ((lang_dirent = readdir(scripts_dir)) != NULL)		\
+		if ((lang_dirent->d_type == DT_DIR ||			\
+		     (lang_dirent->d_type == DT_UNKNOWN &&		\
+		      is_directory(scripts_path, lang_dirent))) &&	\
+		    (strcmp(lang_dirent->d_name, ".")) &&		\
+		    (strcmp(lang_dirent->d_name, "..")))
 
-#define for_each_script(lang_path, lang_dir, script_dirent, script_next)\
-	while (!readdir_r(lang_dir, &script_dirent, &script_next) &&	\
-	       script_next)						\
-		if (script_dirent.d_type != DT_DIR &&			\
-		    (script_dirent.d_type != DT_UNKNOWN ||		\
-		     !is_directory(lang_path, &script_dirent)))
+#define for_each_script(lang_path, lang_dir, script_dirent)		\
+	while ((script_dirent = readdir(lang_dir)) != NULL)		\
+		if (script_dirent->d_type != DT_DIR &&			\
+		    (script_dirent->d_type != DT_UNKNOWN ||		\
+		     !is_directory(lang_path, script_dirent)))
 
 
 #define RECORD_SUFFIX			"-record"
@@ -1575,7 +1580,7 @@
 				  const char *s __maybe_unused,
 				  int unset __maybe_unused)
 {
-	struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
+	struct dirent *script_dirent, *lang_dirent;
 	char scripts_path[MAXPATHLEN];
 	DIR *scripts_dir, *lang_dir;
 	char script_path[MAXPATHLEN];
@@ -1590,19 +1595,19 @@
 	if (!scripts_dir)
 		return -1;
 
-	for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) {
+	for_each_lang(scripts_path, scripts_dir, lang_dirent) {
 		snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
-			 lang_dirent.d_name);
+			 lang_dirent->d_name);
 		lang_dir = opendir(lang_path);
 		if (!lang_dir)
 			continue;
 
-		for_each_script(lang_path, lang_dir, script_dirent, script_next) {
-			script_root = get_script_root(&script_dirent, REPORT_SUFFIX);
+		for_each_script(lang_path, lang_dir, script_dirent) {
+			script_root = get_script_root(script_dirent, REPORT_SUFFIX);
 			if (script_root) {
 				desc = script_desc__findnew(script_root);
 				snprintf(script_path, MAXPATHLEN, "%s/%s",
-					 lang_path, script_dirent.d_name);
+					 lang_path, script_dirent->d_name);
 				read_script_info(desc, script_path);
 				free(script_root);
 			}
@@ -1690,7 +1695,7 @@
  */
 int find_scripts(char **scripts_array, char **scripts_path_array)
 {
-	struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
+	struct dirent *script_dirent, *lang_dirent;
 	char scripts_path[MAXPATHLEN], lang_path[MAXPATHLEN];
 	DIR *scripts_dir, *lang_dir;
 	struct perf_session *session;
@@ -1713,9 +1718,9 @@
 		return -1;
 	}
 
-	for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) {
+	for_each_lang(scripts_path, scripts_dir, lang_dirent) {
 		snprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path,
-			 lang_dirent.d_name);
+			 lang_dirent->d_name);
 #ifdef NO_LIBPERL
 		if (strstr(lang_path, "perl"))
 			continue;
@@ -1729,16 +1734,16 @@
 		if (!lang_dir)
 			continue;
 
-		for_each_script(lang_path, lang_dir, script_dirent, script_next) {
+		for_each_script(lang_path, lang_dir, script_dirent) {
 			/* Skip those real time scripts: xxxtop.p[yl] */
-			if (strstr(script_dirent.d_name, "top."))
+			if (strstr(script_dirent->d_name, "top."))
 				continue;
 			sprintf(scripts_path_array[i], "%s/%s", lang_path,
-				script_dirent.d_name);
-			temp = strchr(script_dirent.d_name, '.');
+				script_dirent->d_name);
+			temp = strchr(script_dirent->d_name, '.');
 			snprintf(scripts_array[i],
-				(temp - script_dirent.d_name) + 1,
-				"%s", script_dirent.d_name);
+				(temp - script_dirent->d_name) + 1,
+				"%s", script_dirent->d_name);
 
 			if (check_ev_match(lang_path,
 					scripts_array[i], session))
@@ -1756,7 +1761,7 @@
 
 static char *get_script_path(const char *script_root, const char *suffix)
 {
-	struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
+	struct dirent *script_dirent, *lang_dirent;
 	char scripts_path[MAXPATHLEN];
 	char script_path[MAXPATHLEN];
 	DIR *scripts_dir, *lang_dir;
@@ -1769,21 +1774,21 @@
 	if (!scripts_dir)
 		return NULL;
 
-	for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) {
+	for_each_lang(scripts_path, scripts_dir, lang_dirent) {
 		snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
-			 lang_dirent.d_name);
+			 lang_dirent->d_name);
 		lang_dir = opendir(lang_path);
 		if (!lang_dir)
 			continue;
 
-		for_each_script(lang_path, lang_dir, script_dirent, script_next) {
-			__script_root = get_script_root(&script_dirent, suffix);
+		for_each_script(lang_path, lang_dir, script_dirent) {
+			__script_root = get_script_root(script_dirent, suffix);
 			if (__script_root && !strcmp(script_root, __script_root)) {
 				free(__script_root);
 				closedir(lang_dir);
 				closedir(scripts_dir);
 				snprintf(script_path, MAXPATHLEN, "%s/%s",
-					 lang_path, script_dirent.d_name);
+					 lang_path, script_dirent->d_name);
 				return strdup(script_path);
 			}
 			free(__script_root);
@@ -1961,6 +1966,7 @@
 			.exit		 = perf_event__process_exit,
 			.fork		 = perf_event__process_fork,
 			.attr		 = process_attr,
+			.event_update   = perf_event__process_event_update,
 			.tracing_data	 = perf_event__process_tracing_data,
 			.build_id	 = perf_event__process_build_id,
 			.id_index	 = perf_event__process_id_index,
@@ -2022,6 +2028,10 @@
 		   "only consider symbols in these pids"),
 	OPT_STRING(0, "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
 		   "only consider symbols in these tids"),
+	OPT_UINTEGER(0, "max-stack", &scripting_max_stack,
+		     "Set the maximum stack depth when parsing the callchain, "
+		     "anything beyond the specified depth will be ignored. "
+		     "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
 	OPT_BOOLEAN('I', "show-info", &show_full_info,
 		    "display extended information from perf.data file"),
 	OPT_BOOLEAN('\0', "show-kernel-path", &symbol_conf.show_kernel_path,
@@ -2057,6 +2067,8 @@
 		NULL
 	};
 
+	scripting_max_stack = sysctl_perf_event_max_stack;
+
 	setup_scripting();
 
 	argc = parse_options_subcommand(argc, argv, options, script_subcommands, script_usage,
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 1f19f2f..e459b68 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -298,6 +298,14 @@
 					return -1;
 				}
 			}
+
+			if (verbose > 1) {
+				fprintf(stat_config.output,
+					"%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
+						perf_evsel__name(counter),
+						cpu,
+						count->val, count->ena, count->run);
+			}
 		}
 	}
 
@@ -528,6 +536,7 @@
 		perf_evlist__set_leader(evsel_list);
 
 	evlist__for_each(evsel_list, counter) {
+try_again:
 		if (create_perf_stat_counter(counter) < 0) {
 			/*
 			 * PPC returns ENXIO for HW counters until 2.6.37
@@ -544,7 +553,11 @@
 				if ((counter->leader != counter) ||
 				    !(counter->leader->nr_members > 1))
 					continue;
-			}
+			} else if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
+                                if (verbose)
+                                        ui__warning("%s\n", msg);
+                                goto try_again;
+                        }
 
 			perf_evsel__open_strerror(counter, &target,
 						  errno, msg, sizeof(msg));
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 8332149..1793da5 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -688,7 +688,7 @@
 	struct hist_entry *he = iter->he;
 	struct perf_evsel *evsel = iter->evsel;
 
-	if (sort__has_sym && single)
+	if (perf_hpp_list.sym && single)
 		perf_top__record_precise_ip(top, he, evsel->idx, al->addr);
 
 	hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
@@ -886,7 +886,7 @@
 	struct perf_evlist *evlist = top->evlist;
 	struct record_opts *opts = &top->record_opts;
 
-	perf_evlist__config(evlist, opts);
+	perf_evlist__config(evlist, opts, &callchain_param);
 
 	evlist__for_each(evlist, counter) {
 try_again:
@@ -917,15 +917,15 @@
 	return -1;
 }
 
-static int perf_top__setup_sample_type(struct perf_top *top __maybe_unused)
+static int callchain_param__setup_sample_type(struct callchain_param *callchain)
 {
-	if (!sort__has_sym) {
-		if (symbol_conf.use_callchain) {
+	if (!perf_hpp_list.sym) {
+		if (callchain->enabled) {
 			ui__error("Selected -g but \"sym\" not present in --sort/-s.");
 			return -EINVAL;
 		}
-	} else if (callchain_param.mode != CHAIN_NONE) {
-		if (callchain_register_param(&callchain_param) < 0) {
+	} else if (callchain->mode != CHAIN_NONE) {
+		if (callchain_register_param(callchain) < 0) {
 			ui__error("Can't register callchain params.\n");
 			return -EINVAL;
 		}
@@ -952,7 +952,7 @@
 			goto out_delete;
 	}
 
-	ret = perf_top__setup_sample_type(top);
+	ret = callchain_param__setup_sample_type(&callchain_param);
 	if (ret)
 		goto out_delete;
 
@@ -962,7 +962,7 @@
 	machine__synthesize_threads(&top->session->machines.host, &opts->target,
 				    top->evlist->threads, false, opts->proc_map_timeout);
 
-	if (sort__has_socket) {
+	if (perf_hpp_list.socket) {
 		ret = perf_env__read_cpu_topology_map(&perf_env);
 		if (ret < 0)
 			goto out_err_cpu_topo;
@@ -1045,18 +1045,17 @@
 static int
 parse_callchain_opt(const struct option *opt, const char *arg, int unset)
 {
-	struct record_opts *record = (struct record_opts *)opt->value;
+	struct callchain_param *callchain = opt->value;
 
-	record->callgraph_set = true;
-	callchain_param.enabled = !unset;
-	callchain_param.record_mode = CALLCHAIN_FP;
+	callchain->enabled = !unset;
+	callchain->record_mode = CALLCHAIN_FP;
 
 	/*
 	 * --no-call-graph
 	 */
 	if (unset) {
 		symbol_conf.use_callchain = false;
-		callchain_param.record_mode = CALLCHAIN_NONE;
+		callchain->record_mode = CALLCHAIN_NONE;
 		return 0;
 	}
 
@@ -1104,7 +1103,7 @@
 			},
 			.proc_map_timeout    = 500,
 		},
-		.max_stack	     = PERF_MAX_STACK_DEPTH,
+		.max_stack	     = sysctl_perf_event_max_stack,
 		.sym_pcnt_filter     = 5,
 	};
 	struct record_opts *opts = &top.record_opts;
@@ -1162,17 +1161,17 @@
 		   "output field(s): overhead, period, sample plus all of sort keys"),
 	OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
 		    "Show a column with the number of samples"),
-	OPT_CALLBACK_NOOPT('g', NULL, &top.record_opts,
+	OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
 			   NULL, "enables call-graph recording and display",
 			   &callchain_opt),
-	OPT_CALLBACK(0, "call-graph", &top.record_opts,
+	OPT_CALLBACK(0, "call-graph", &callchain_param,
 		     "record_mode[,record_size],print_type,threshold[,print_limit],order,sort_key[,branch]",
 		     top_callchain_help, &parse_callchain_opt),
 	OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
 		    "Accumulate callchains of children and show total overhead as well"),
 	OPT_INTEGER(0, "max-stack", &top.max_stack,
 		    "Set the maximum stack depth when parsing the callchain. "
-		    "Default: " __stringify(PERF_MAX_STACK_DEPTH)),
+		    "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
 	OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
 		   "ignore callees of these functions in call graphs",
 		   report_parse_ignore_callees_opt),
@@ -1256,7 +1255,7 @@
 
 	sort__mode = SORT_MODE__TOP;
 	/* display thread wants entries to be collapsed in a different tree */
-	sort__need_collapse = 1;
+	perf_hpp_list.need_collapse = 1;
 
 	if (top.use_stdio)
 		use_browser = 0;
@@ -1312,7 +1311,7 @@
 
 	top.sym_evsel = perf_evlist__first(top.evlist);
 
-	if (!symbol_conf.use_callchain) {
+	if (!callchain_param.enabled) {
 		symbol_conf.cumulate_callchain = false;
 		perf_hpp__cancel_cumulate();
 	}
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 93ac724..6e5c325 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -34,79 +34,76 @@
 #include "trace-event.h"
 #include "util/parse-events.h"
 #include "util/bpf-loader.h"
+#include "callchain.h"
+#include "syscalltbl.h"
+#include "rb_resort.h"
 
-#include <libaudit.h>
+#include <libaudit.h> /* FIXME: Still needed for audit_errno_to_name */
 #include <stdlib.h>
-#include <sys/mman.h>
-#include <linux/futex.h>
 #include <linux/err.h>
-
-/* For older distros: */
-#ifndef MAP_STACK
-# define MAP_STACK		0x20000
-#endif
-
-#ifndef MADV_HWPOISON
-# define MADV_HWPOISON		100
-
-#endif
-
-#ifndef MADV_MERGEABLE
-# define MADV_MERGEABLE		12
-#endif
-
-#ifndef MADV_UNMERGEABLE
-# define MADV_UNMERGEABLE	13
-#endif
-
-#ifndef EFD_SEMAPHORE
-# define EFD_SEMAPHORE		1
-#endif
-
-#ifndef EFD_NONBLOCK
-# define EFD_NONBLOCK		00004000
-#endif
-
-#ifndef EFD_CLOEXEC
-# define EFD_CLOEXEC		02000000
-#endif
+#include <linux/filter.h>
+#include <linux/audit.h>
+#include <sys/ptrace.h>
+#include <linux/random.h>
+#include <linux/stringify.h>
 
 #ifndef O_CLOEXEC
 # define O_CLOEXEC		02000000
 #endif
 
-#ifndef SOCK_DCCP
-# define SOCK_DCCP		6
-#endif
-
-#ifndef SOCK_CLOEXEC
-# define SOCK_CLOEXEC		02000000
-#endif
-
-#ifndef SOCK_NONBLOCK
-# define SOCK_NONBLOCK		00004000
-#endif
-
-#ifndef MSG_CMSG_CLOEXEC
-# define MSG_CMSG_CLOEXEC	0x40000000
-#endif
-
-#ifndef PERF_FLAG_FD_NO_GROUP
-# define PERF_FLAG_FD_NO_GROUP		(1UL << 0)
-#endif
-
-#ifndef PERF_FLAG_FD_OUTPUT
-# define PERF_FLAG_FD_OUTPUT		(1UL << 1)
-#endif
-
-#ifndef PERF_FLAG_PID_CGROUP
-# define PERF_FLAG_PID_CGROUP		(1UL << 2) /* pid=cgroup id, per-cpu mode only */
-#endif
-
-#ifndef PERF_FLAG_FD_CLOEXEC
-# define PERF_FLAG_FD_CLOEXEC		(1UL << 3) /* O_CLOEXEC */
-#endif
-
+struct trace {
+	struct perf_tool	tool;
+	struct syscalltbl	*sctbl;
+	struct {
+		int		max;
+		struct syscall  *table;
+		struct {
+			struct perf_evsel *sys_enter,
+					  *sys_exit;
+		}		events;
+	} syscalls;
+	struct record_opts	opts;
+	struct perf_evlist	*evlist;
+	struct machine		*host;
+	struct thread		*current;
+	u64			base_time;
+	FILE			*output;
+	unsigned long		nr_events;
+	struct strlist		*ev_qualifier;
+	struct {
+		size_t		nr;
+		int		*entries;
+	}			ev_qualifier_ids;
+	struct intlist		*tid_list;
+	struct intlist		*pid_list;
+	struct {
+		size_t		nr;
+		pid_t		*entries;
+	}			filter_pids;
+	double			duration_filter;
+	double			runtime_ms;
+	struct {
+		u64		vfs_getname,
+				proc_getname;
+	} stats;
+	unsigned int		max_stack;
+	unsigned int		min_stack;
+	bool			not_ev_qualifier;
+	bool			live;
+	bool			full_time;
+	bool			sched;
+	bool			multiple_threads;
+	bool			summary;
+	bool			summary_only;
+	bool			show_comm;
+	bool			show_tool_stats;
+	bool			trace_syscalls;
+	bool			kernel_syscallchains;
+	bool			force;
+	bool			vfs_getname;
+	int			trace_pgfaults;
+	int			open_id;
+};
 
 struct tp_field {
 	int offset;
@@ -371,221 +368,6 @@
 
 #define SCA_INT syscall_arg__scnprintf_int
 
-static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size,
-					       struct syscall_arg *arg)
-{
-	int printed = 0, prot = arg->val;
-
-	if (prot == PROT_NONE)
-		return scnprintf(bf, size, "NONE");
-#define	P_MMAP_PROT(n) \
-	if (prot & PROT_##n) { \
-		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
-		prot &= ~PROT_##n; \
-	}
-
-	P_MMAP_PROT(EXEC);
-	P_MMAP_PROT(READ);
-	P_MMAP_PROT(WRITE);
-#ifdef PROT_SEM
-	P_MMAP_PROT(SEM);
-#endif
-	P_MMAP_PROT(GROWSDOWN);
-	P_MMAP_PROT(GROWSUP);
-#undef P_MMAP_PROT
-
-	if (prot)
-		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", prot);
-
-	return printed;
-}
-
-#define SCA_MMAP_PROT syscall_arg__scnprintf_mmap_prot
-
-static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
-						struct syscall_arg *arg)
-{
-	int printed = 0, flags = arg->val;
-
-#define	P_MMAP_FLAG(n) \
-	if (flags & MAP_##n) { \
-		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
-		flags &= ~MAP_##n; \
-	}
-
-	P_MMAP_FLAG(SHARED);
-	P_MMAP_FLAG(PRIVATE);
-#ifdef MAP_32BIT
-	P_MMAP_FLAG(32BIT);
-#endif
-	P_MMAP_FLAG(ANONYMOUS);
-	P_MMAP_FLAG(DENYWRITE);
-	P_MMAP_FLAG(EXECUTABLE);
-	P_MMAP_FLAG(FILE);
-	P_MMAP_FLAG(FIXED);
-	P_MMAP_FLAG(GROWSDOWN);
-#ifdef MAP_HUGETLB
-	P_MMAP_FLAG(HUGETLB);
-#endif
-	P_MMAP_FLAG(LOCKED);
-	P_MMAP_FLAG(NONBLOCK);
-	P_MMAP_FLAG(NORESERVE);
-	P_MMAP_FLAG(POPULATE);
-	P_MMAP_FLAG(STACK);
-#ifdef MAP_UNINITIALIZED
-	P_MMAP_FLAG(UNINITIALIZED);
-#endif
-#undef P_MMAP_FLAG
-
-	if (flags)
-		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
-
-	return printed;
-}
-
-#define SCA_MMAP_FLAGS syscall_arg__scnprintf_mmap_flags
-
-static size_t syscall_arg__scnprintf_mremap_flags(char *bf, size_t size,
-						  struct syscall_arg *arg)
-{
-	int printed = 0, flags = arg->val;
-
-#define P_MREMAP_FLAG(n) \
-	if (flags & MREMAP_##n) { \
-		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
-		flags &= ~MREMAP_##n; \
-	}
-
-	P_MREMAP_FLAG(MAYMOVE);
-#ifdef MREMAP_FIXED
-	P_MREMAP_FLAG(FIXED);
-#endif
-#undef P_MREMAP_FLAG
-
-	if (flags)
-		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
-
-	return printed;
-}
-
-#define SCA_MREMAP_FLAGS syscall_arg__scnprintf_mremap_flags
-
-static size_t syscall_arg__scnprintf_madvise_behavior(char *bf, size_t size,
-						      struct syscall_arg *arg)
-{
-	int behavior = arg->val;
-
-	switch (behavior) {
-#define	P_MADV_BHV(n) case MADV_##n: return scnprintf(bf, size, #n)
-	P_MADV_BHV(NORMAL);
-	P_MADV_BHV(RANDOM);
-	P_MADV_BHV(SEQUENTIAL);
-	P_MADV_BHV(WILLNEED);
-	P_MADV_BHV(DONTNEED);
-	P_MADV_BHV(REMOVE);
-	P_MADV_BHV(DONTFORK);
-	P_MADV_BHV(DOFORK);
-	P_MADV_BHV(HWPOISON);
-#ifdef MADV_SOFT_OFFLINE
-	P_MADV_BHV(SOFT_OFFLINE);
-#endif
-	P_MADV_BHV(MERGEABLE);
-	P_MADV_BHV(UNMERGEABLE);
-#ifdef MADV_HUGEPAGE
-	P_MADV_BHV(HUGEPAGE);
-#endif
-#ifdef MADV_NOHUGEPAGE
-	P_MADV_BHV(NOHUGEPAGE);
-#endif
-#ifdef MADV_DONTDUMP
-	P_MADV_BHV(DONTDUMP);
-#endif
-#ifdef MADV_DODUMP
-	P_MADV_BHV(DODUMP);
-#endif
-#undef P_MADV_PHV
-	default: break;
-	}
-
-	return scnprintf(bf, size, "%#x", behavior);
-}
-
-#define SCA_MADV_BHV syscall_arg__scnprintf_madvise_behavior
-
-static size_t syscall_arg__scnprintf_flock(char *bf, size_t size,
-					   struct syscall_arg *arg)
-{
-	int printed = 0, op = arg->val;
-
-	if (op == 0)
-		return scnprintf(bf, size, "NONE");
-#define	P_CMD(cmd) \
-	if ((op & LOCK_##cmd) == LOCK_##cmd) { \
-		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #cmd); \
-		op &= ~LOCK_##cmd; \
-	}
-
-	P_CMD(SH);
-	P_CMD(EX);
-	P_CMD(NB);
-	P_CMD(UN);
-	P_CMD(MAND);
-	P_CMD(RW);
-	P_CMD(READ);
-	P_CMD(WRITE);
-#undef P_OP
-
-	if (op)
-		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", op);
-
-	return printed;
-}
-
-#define SCA_FLOCK syscall_arg__scnprintf_flock
-
-static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, struct syscall_arg *arg)
-{
-	enum syscall_futex_args {
-		SCF_UADDR   = (1 << 0),
-		SCF_OP	    = (1 << 1),
-		SCF_VAL	    = (1 << 2),
-		SCF_TIMEOUT = (1 << 3),
-		SCF_UADDR2  = (1 << 4),
-		SCF_VAL3    = (1 << 5),
-	};
-	int op = arg->val;
-	int cmd = op & FUTEX_CMD_MASK;
-	size_t printed = 0;
-
-	switch (cmd) {
-#define	P_FUTEX_OP(n) case FUTEX_##n: printed = scnprintf(bf, size, #n);
-	P_FUTEX_OP(WAIT);	    arg->mask |= SCF_VAL3|SCF_UADDR2;		  break;
-	P_FUTEX_OP(WAKE);	    arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
-	P_FUTEX_OP(FD);		    arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
-	P_FUTEX_OP(REQUEUE);	    arg->mask |= SCF_VAL3|SCF_TIMEOUT;	          break;
-	P_FUTEX_OP(CMP_REQUEUE);    arg->mask |= SCF_TIMEOUT;			  break;
-	P_FUTEX_OP(CMP_REQUEUE_PI); arg->mask |= SCF_TIMEOUT;			  break;
-	P_FUTEX_OP(WAKE_OP);							  break;
-	P_FUTEX_OP(LOCK_PI);	    arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
-	P_FUTEX_OP(UNLOCK_PI);	    arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
-	P_FUTEX_OP(TRYLOCK_PI);	    arg->mask |= SCF_VAL3|SCF_UADDR2;		  break;
-	P_FUTEX_OP(WAIT_BITSET);    arg->mask |= SCF_UADDR2;			  break;
-	P_FUTEX_OP(WAKE_BITSET);    arg->mask |= SCF_UADDR2;			  break;
-	P_FUTEX_OP(WAIT_REQUEUE_PI);						  break;
-	default: printed = scnprintf(bf, size, "%#x", cmd);			  break;
-	}
-
-	if (op & FUTEX_PRIVATE_FLAG)
-		printed += scnprintf(bf + printed, size - printed, "|PRIV");
-
-	if (op & FUTEX_CLOCK_REALTIME)
-		printed += scnprintf(bf + printed, size - printed, "|CLKRT");
-
-	return printed;
-}
-
-#define SCA_FUTEX_OP  syscall_arg__scnprintf_futex_op
-
 static const char *bpf_cmd[] = {
 	"MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
 	"MAP_GET_NEXT_KEY", "PROG_LOAD",
@@ -652,110 +434,6 @@
 };
 static DEFINE_STRARRAY(socket_families);
 
-#ifndef SOCK_TYPE_MASK
-#define SOCK_TYPE_MASK 0xf
-#endif
-
-static size_t syscall_arg__scnprintf_socket_type(char *bf, size_t size,
-						      struct syscall_arg *arg)
-{
-	size_t printed;
-	int type = arg->val,
-	    flags = type & ~SOCK_TYPE_MASK;
-
-	type &= SOCK_TYPE_MASK;
-	/*
- 	 * Can't use a strarray, MIPS may override for ABI reasons.
- 	 */
-	switch (type) {
-#define	P_SK_TYPE(n) case SOCK_##n: printed = scnprintf(bf, size, #n); break;
-	P_SK_TYPE(STREAM);
-	P_SK_TYPE(DGRAM);
-	P_SK_TYPE(RAW);
-	P_SK_TYPE(RDM);
-	P_SK_TYPE(SEQPACKET);
-	P_SK_TYPE(DCCP);
-	P_SK_TYPE(PACKET);
-#undef P_SK_TYPE
-	default:
-		printed = scnprintf(bf, size, "%#x", type);
-	}
-
-#define	P_SK_FLAG(n) \
-	if (flags & SOCK_##n) { \
-		printed += scnprintf(bf + printed, size - printed, "|%s", #n); \
-		flags &= ~SOCK_##n; \
-	}
-
-	P_SK_FLAG(CLOEXEC);
-	P_SK_FLAG(NONBLOCK);
-#undef P_SK_FLAG
-
-	if (flags)
-		printed += scnprintf(bf + printed, size - printed, "|%#x", flags);
-
-	return printed;
-}
-
-#define SCA_SK_TYPE syscall_arg__scnprintf_socket_type
-
-#ifndef MSG_PROBE
-#define MSG_PROBE	     0x10
-#endif
-#ifndef MSG_WAITFORONE
-#define MSG_WAITFORONE	0x10000
-#endif
-#ifndef MSG_SENDPAGE_NOTLAST
-#define MSG_SENDPAGE_NOTLAST 0x20000
-#endif
-#ifndef MSG_FASTOPEN
-#define MSG_FASTOPEN	     0x20000000
-#endif
-
-static size_t syscall_arg__scnprintf_msg_flags(char *bf, size_t size,
-					       struct syscall_arg *arg)
-{
-	int printed = 0, flags = arg->val;
-
-	if (flags == 0)
-		return scnprintf(bf, size, "NONE");
-#define	P_MSG_FLAG(n) \
-	if (flags & MSG_##n) { \
-		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
-		flags &= ~MSG_##n; \
-	}
-
-	P_MSG_FLAG(OOB);
-	P_MSG_FLAG(PEEK);
-	P_MSG_FLAG(DONTROUTE);
-	P_MSG_FLAG(TRYHARD);
-	P_MSG_FLAG(CTRUNC);
-	P_MSG_FLAG(PROBE);
-	P_MSG_FLAG(TRUNC);
-	P_MSG_FLAG(DONTWAIT);
-	P_MSG_FLAG(EOR);
-	P_MSG_FLAG(WAITALL);
-	P_MSG_FLAG(FIN);
-	P_MSG_FLAG(SYN);
-	P_MSG_FLAG(CONFIRM);
-	P_MSG_FLAG(RST);
-	P_MSG_FLAG(ERRQUEUE);
-	P_MSG_FLAG(NOSIGNAL);
-	P_MSG_FLAG(MORE);
-	P_MSG_FLAG(WAITFORONE);
-	P_MSG_FLAG(SENDPAGE_NOTLAST);
-	P_MSG_FLAG(FASTOPEN);
-	P_MSG_FLAG(CMSG_CLOEXEC);
-#undef P_MSG_FLAG
-
-	if (flags)
-		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
-
-	return printed;
-}
-
-#define SCA_MSG_FLAGS syscall_arg__scnprintf_msg_flags
-
 static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
 						 struct syscall_arg *arg)
 {
@@ -788,116 +466,6 @@
 
 #define SCA_FILENAME syscall_arg__scnprintf_filename
 
-static size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size,
-					       struct syscall_arg *arg)
-{
-	int printed = 0, flags = arg->val;
-
-	if (!(flags & O_CREAT))
-		arg->mask |= 1 << (arg->idx + 1); /* Mask the mode parm */
-
-	if (flags == 0)
-		return scnprintf(bf, size, "RDONLY");
-#define	P_FLAG(n) \
-	if (flags & O_##n) { \
-		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
-		flags &= ~O_##n; \
-	}
-
-	P_FLAG(APPEND);
-	P_FLAG(ASYNC);
-	P_FLAG(CLOEXEC);
-	P_FLAG(CREAT);
-	P_FLAG(DIRECT);
-	P_FLAG(DIRECTORY);
-	P_FLAG(EXCL);
-	P_FLAG(LARGEFILE);
-	P_FLAG(NOATIME);
-	P_FLAG(NOCTTY);
-#ifdef O_NONBLOCK
-	P_FLAG(NONBLOCK);
-#elif O_NDELAY
-	P_FLAG(NDELAY);
-#endif
-#ifdef O_PATH
-	P_FLAG(PATH);
-#endif
-	P_FLAG(RDWR);
-#ifdef O_DSYNC
-	if ((flags & O_SYNC) == O_SYNC)
-		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", "SYNC");
-	else {
-		P_FLAG(DSYNC);
-	}
-#else
-	P_FLAG(SYNC);
-#endif
-	P_FLAG(TRUNC);
-	P_FLAG(WRONLY);
-#undef P_FLAG
-
-	if (flags)
-		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
-
-	return printed;
-}
-
-#define SCA_OPEN_FLAGS syscall_arg__scnprintf_open_flags
-
-static size_t syscall_arg__scnprintf_perf_flags(char *bf, size_t size,
-						struct syscall_arg *arg)
-{
-	int printed = 0, flags = arg->val;
-
-	if (flags == 0)
-		return 0;
-
-#define	P_FLAG(n) \
-	if (flags & PERF_FLAG_##n) { \
-		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
-		flags &= ~PERF_FLAG_##n; \
-	}
-
-	P_FLAG(FD_NO_GROUP);
-	P_FLAG(FD_OUTPUT);
-	P_FLAG(PID_CGROUP);
-	P_FLAG(FD_CLOEXEC);
-#undef P_FLAG
-
-	if (flags)
-		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
-
-	return printed;
-}
-
-#define SCA_PERF_FLAGS syscall_arg__scnprintf_perf_flags
-
-static size_t syscall_arg__scnprintf_eventfd_flags(char *bf, size_t size,
-						   struct syscall_arg *arg)
-{
-	int printed = 0, flags = arg->val;
-
-	if (flags == 0)
-		return scnprintf(bf, size, "NONE");
-#define	P_FLAG(n) \
-	if (flags & EFD_##n) { \
-		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
-		flags &= ~EFD_##n; \
-	}
-
-	P_FLAG(SEMAPHORE);
-	P_FLAG(CLOEXEC);
-	P_FLAG(NONBLOCK);
-#undef P_FLAG
-
-	if (flags)
-		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
-
-	return printed;
-}
-
-#define SCA_EFD_FLAGS syscall_arg__scnprintf_eventfd_flags
-
 static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
 						struct syscall_arg *arg)
 {
@@ -921,59 +489,6 @@
 
 #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
 
-static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscall_arg *arg)
-{
-	int sig = arg->val;
-
-	switch (sig) {
-#define	P_SIGNUM(n) case SIG##n: return scnprintf(bf, size, #n)
-	P_SIGNUM(HUP);
-	P_SIGNUM(INT);
-	P_SIGNUM(QUIT);
-	P_SIGNUM(ILL);
-	P_SIGNUM(TRAP);
-	P_SIGNUM(ABRT);
-	P_SIGNUM(BUS);
-	P_SIGNUM(FPE);
-	P_SIGNUM(KILL);
-	P_SIGNUM(USR1);
-	P_SIGNUM(SEGV);
-	P_SIGNUM(USR2);
-	P_SIGNUM(PIPE);
-	P_SIGNUM(ALRM);
-	P_SIGNUM(TERM);
-	P_SIGNUM(CHLD);
-	P_SIGNUM(CONT);
-	P_SIGNUM(STOP);
-	P_SIGNUM(TSTP);
-	P_SIGNUM(TTIN);
-	P_SIGNUM(TTOU);
-	P_SIGNUM(URG);
-	P_SIGNUM(XCPU);
-	P_SIGNUM(XFSZ);
-	P_SIGNUM(VTALRM);
-	P_SIGNUM(PROF);
-	P_SIGNUM(WINCH);
-	P_SIGNUM(IO);
-	P_SIGNUM(PWR);
-	P_SIGNUM(SYS);
-#ifdef SIGEMT
-	P_SIGNUM(EMT);
-#endif
-#ifdef SIGSTKFLT
-	P_SIGNUM(STKFLT);
-#endif
-#ifdef SIGSWI
-	P_SIGNUM(SWI);
-#endif
-	default: break;
-	}
-
-	return scnprintf(bf, size, "%#x", sig);
-}
-
-#define SCA_SIGNUM syscall_arg__scnprintf_signum
-
 #if defined(__i386__) || defined(__x86_64__)
 /*
  * FIXME: Make this available to all arches.
@@ -1001,16 +516,62 @@
 static DEFINE_STRARRAY_OFFSET(tioctls, 0x5401);
 #endif /* defined(__i386__) || defined(__x86_64__) */
 
+#ifndef GRND_NONBLOCK
+#define GRND_NONBLOCK	0x0001
+#endif
+#ifndef GRND_RANDOM
+#define GRND_RANDOM	0x0002
+#endif
+
+static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
+						   struct syscall_arg *arg)
+{
+	int printed = 0, flags = arg->val;
+
+#define	P_FLAG(n) \
+	if (flags & GRND_##n) { \
+		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+		flags &= ~GRND_##n; \
+	}
+
+	P_FLAG(RANDOM);
+	P_FLAG(NONBLOCK);
+#undef P_FLAG
+
+	if (flags)
+		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
+
+	return printed;
+}
+
+#define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
+
 #define STRARRAY(arg, name, array) \
 	  .arg_scnprintf = { [arg] = SCA_STRARRAY, }, \
 	  .arg_parm	 = { [arg] = &strarray__##array, }
 
+#include "trace/beauty/eventfd.c"
+#include "trace/beauty/flock.c"
+#include "trace/beauty/futex_op.c"
+#include "trace/beauty/mmap.c"
+#include "trace/beauty/mode_t.c"
+#include "trace/beauty/msg_flags.c"
+#include "trace/beauty/open_flags.c"
+#include "trace/beauty/perf_event_open.c"
+#include "trace/beauty/pid.c"
+#include "trace/beauty/sched_policy.c"
+#include "trace/beauty/seccomp.c"
+#include "trace/beauty/signum.c"
+#include "trace/beauty/socket_type.c"
+#include "trace/beauty/waitid_options.c"
+
 static struct syscall_fmt {
 	const char *name;
 	const char *alias;
 	size_t	   (*arg_scnprintf[6])(char *bf, size_t size, struct syscall_arg *arg);
 	void	   *arg_parm[6];
 	bool	   errmsg;
+	bool	   errpid;
 	bool	   timeout;
 	bool	   hexret;
 } syscall_fmts[] = {
@@ -1028,6 +589,7 @@
 	{ .name	    = "chroot",	    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
 	{ .name     = "clock_gettime",  .errmsg = true, STRARRAY(0, clk_id, clockid), },
+	{ .name	    = "clone",	    .errpid = true, },
 	{ .name	    = "close",	    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, },
 	{ .name	    = "connect",    .errmsg = true, },
@@ -1093,6 +655,11 @@
 	{ .name	    = "getdents64", .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
 	{ .name	    = "getitimer",  .errmsg = true, STRARRAY(0, which, itimers), },
+	{ .name	    = "getpid",	    .errpid = true, },
+	{ .name	    = "getpgid",    .errpid = true, },
+	{ .name	    = "getppid",    .errpid = true, },
+	{ .name	    = "getrandom",  .errmsg = true,
+	  .arg_scnprintf = { [2] = SCA_GETRANDOM_FLAGS, /* flags */ }, },
 	{ .name	    = "getrlimit",  .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
 	{ .name	    = "getxattr",    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
@@ -1186,8 +753,7 @@
 			     [1] = SCA_FILENAME, /* filename */
 			     [2] = SCA_OPEN_FLAGS, /* flags */ }, },
 	{ .name	    = "perf_event_open", .errmsg = true,
-	  .arg_scnprintf = { [1] = SCA_INT, /* pid */
-			     [2] = SCA_INT, /* cpu */
+	  .arg_scnprintf = { [2] = SCA_INT, /* cpu */
 			     [3] = SCA_FD,  /* group_fd */
 			     [4] = SCA_PERF_FLAGS,  /* flags */ }, },
 	{ .name	    = "pipe2",	    .errmsg = true,
@@ -1234,6 +800,11 @@
 	  .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
 	{ .name	    = "rt_tgsigqueueinfo", .errmsg = true,
 	  .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
+	{ .name	    = "sched_setscheduler",   .errmsg = true,
+	  .arg_scnprintf = { [1] = SCA_SCHED_POLICY, /* policy */ }, },
+	{ .name	    = "seccomp", .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_SECCOMP_OP, /* op */
+			     [1] = SCA_SECCOMP_FLAGS, /* flags */ }, },
 	{ .name	    = "select",	    .errmsg = true, .timeout = true, },
 	{ .name	    = "sendmmsg",    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
@@ -1244,7 +815,9 @@
 	{ .name	    = "sendto",	    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
 			     [3] = SCA_MSG_FLAGS, /* flags */ }, },
+	{ .name	    = "set_tid_address", .errpid = true, },
 	{ .name	    = "setitimer",  .errmsg = true, STRARRAY(0, which, itimers), },
+	{ .name	    = "setpgid",    .errmsg = true, },
 	{ .name	    = "setrlimit",  .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
 	{ .name	    = "setxattr",   .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
@@ -1287,6 +860,10 @@
 	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
 	{ .name	    = "vmsplice",  .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+	{ .name	    = "wait4",	    .errpid = true,
+	  .arg_scnprintf = { [2] = SCA_WAITID_OPTIONS, /* options */ }, },
+	{ .name	    = "waitid",	    .errpid = true,
+	  .arg_scnprintf = { [3] = SCA_WAITID_OPTIONS, /* options */ }, },
 	{ .name	    = "write",	    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
 	{ .name	    = "writev",	    .errmsg = true,
@@ -1398,59 +975,6 @@
 
 static const size_t trace__entry_str_size = 2048;
 
-struct trace {
-	struct perf_tool	tool;
-	struct {
-		int		machine;
-		int		open_id;
-	}			audit;
-	struct {
-		int		max;
-		struct syscall  *table;
-		struct {
-			struct perf_evsel *sys_enter,
-					  *sys_exit;
-		}		events;
-	} syscalls;
-	struct record_opts	opts;
-	struct perf_evlist	*evlist;
-	struct machine		*host;
-	struct thread		*current;
-	u64			base_time;
-	FILE			*output;
-	unsigned long		nr_events;
-	struct strlist		*ev_qualifier;
-	struct {
-		size_t		nr;
-		int		*entries;
-	}			ev_qualifier_ids;
-	struct intlist		*tid_list;
-	struct intlist		*pid_list;
-	struct {
-		size_t		nr;
-		pid_t		*entries;
-	}			filter_pids;
-	double			duration_filter;
-	double			runtime_ms;
-	struct {
-		u64		vfs_getname,
-				proc_getname;
-	} stats;
-	bool			not_ev_qualifier;
-	bool			live;
-	bool			full_time;
-	bool			sched;
-	bool			multiple_threads;
-	bool			summary;
-	bool			summary_only;
-	bool			show_comm;
-	bool			show_tool_stats;
-	bool			trace_syscalls;
-	bool			force;
-	bool			vfs_getname;
-	int			trace_pgfaults;
-};
-
 static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
 {
 	struct thread_trace *ttrace = thread__priv(thread);
@@ -1618,6 +1142,7 @@
 		color_fprintf(trace->output, PERF_COLOR_RED,
 			      "LOST %" PRIu64 " events!\n", event->lost.lost);
 		ret = machine__process_lost_event(machine, event, sample);
+		break;
 	default:
 		ret = machine__process_event(machine, event, sample);
 		break;
@@ -1675,6 +1200,10 @@
 			sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx];
 		else if (field->flags & FIELD_IS_POINTER)
 			sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex;
+		else if (strcmp(field->type, "pid_t") == 0)
+			sc->arg_scnprintf[idx] = SCA_PID;
+		else if (strcmp(field->type, "umode_t") == 0)
+			sc->arg_scnprintf[idx] = SCA_MODE_T;
 		++idx;
 	}
 
@@ -1685,7 +1214,7 @@
 {
 	char tp_name[128];
 	struct syscall *sc;
-	const char *name = audit_syscall_to_name(id, trace->audit.machine);
+	const char *name = syscalltbl__name(trace->sctbl, id);
 
 	if (name == NULL)
 		return -1;
@@ -1760,7 +1289,7 @@
 
 	strlist__for_each(pos, trace->ev_qualifier) {
 		const char *sc = pos->s;
-		int id = audit_name_to_syscall(sc, trace->audit.machine);
+		int id = syscalltbl__id(trace->sctbl, sc);
 
 		if (id < 0) {
 			if (err == 0) {
@@ -1846,7 +1375,12 @@
 						     "%ld", val);
 			}
 		}
-	} else {
+	} else if (IS_ERR(sc->tp_format)) {
+		/*
+		 * If we managed to read the tracepoint /format file, then we
+		 * may end up not having any args, like with gettid(), so only
+		 * print the raw args when we didn't manage to read it.
+		 */
 		int i = 0;
 
 		while (i < 6) {
@@ -1987,7 +1521,7 @@
 			goto out_put;
 	}
 
-	if (!trace->summary_only)
+	if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
 		trace__printf_interrupted_entry(trace, sample);
 
 	ttrace->entry_time = sample->time;
@@ -1998,7 +1532,7 @@
 					   args, trace, thread);
 
 	if (sc->is_exit) {
-		if (!trace->duration_filter && !trace->summary_only) {
+		if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) {
 			trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
 			fprintf(trace->output, "%-70s\n", ttrace->entry_str);
 		}
@@ -2018,6 +1552,29 @@
 	return err;
 }
 
+static int trace__resolve_callchain(struct trace *trace, struct perf_evsel *evsel,
+				    struct perf_sample *sample,
+				    struct callchain_cursor *cursor)
+{
+	struct addr_location al;
+
+	if (machine__resolve(trace->host, &al, sample) < 0 ||
+	    thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, trace->max_stack))
+		return -1;
+
+	return 0;
+}
+
+static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
+{
+	/* TODO: user-configurable print_opts */
+	const unsigned int print_opts = EVSEL__PRINT_SYM |
+				        EVSEL__PRINT_DSO |
+				        EVSEL__PRINT_UNKNOWN_AS_ADDR;
+
+	return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, trace->output);
+}
+
 static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
 			   union perf_event *event __maybe_unused,
 			   struct perf_sample *sample)
@@ -2025,7 +1582,7 @@
 	long ret;
 	u64 duration = 0;
 	struct thread *thread;
-	int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
+	int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0;
 	struct syscall *sc = trace__syscall_info(trace, evsel, id);
 	struct thread_trace *ttrace;
 
@@ -2042,7 +1599,7 @@
 
 	ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
 
-	if (id == trace->audit.open_id && ret >= 0 && ttrace->filename.pending_open) {
+	if (id == trace->open_id && ret >= 0 && ttrace->filename.pending_open) {
 		trace__set_fd_pathname(thread, ret, ttrace->filename.name);
 		ttrace->filename.pending_open = false;
 		++trace->stats.vfs_getname;
@@ -2057,6 +1614,15 @@
 	} else if (trace->duration_filter)
 		goto out;
 
+	if (sample->callchain) {
+		callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
+		if (callchain_ret == 0) {
+			if (callchain_cursor.nr < trace->min_stack)
+				goto out;
+			callchain_ret = 1;
+		}
+	}
+
 	if (trace->summary_only)
 		goto out;
 
@@ -2073,7 +1639,7 @@
 	if (sc->fmt == NULL) {
 signed_print:
 		fprintf(trace->output, ") = %ld", ret);
-	} else if (ret < 0 && sc->fmt->errmsg) {
+	} else if (ret < 0 && (sc->fmt->errmsg || sc->fmt->errpid)) {
 		char bf[STRERR_BUFSIZE];
 		const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
 			   *e = audit_errno_to_name(-ret);
@@ -2083,10 +1649,24 @@
 		fprintf(trace->output, ") = 0 Timeout");
 	else if (sc->fmt->hexret)
 		fprintf(trace->output, ") = %#lx", ret);
-	else
+	else if (sc->fmt->errpid) {
+		struct thread *child = machine__find_thread(trace->host, ret, ret);
+
+		if (child != NULL) {
+			fprintf(trace->output, ") = %ld", ret);
+			if (child->comm_set)
+				fprintf(trace->output, " (%s)", thread__comm_str(child));
+			thread__put(child);
+		}
+	} else
 		goto signed_print;
 
 	fputc('\n', trace->output);
+
+	if (callchain_ret > 0)
+		trace__fprintf_callchain(trace, sample);
+	else if (callchain_ret < 0)
+		pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
 out:
 	ttrace->entry_pending = false;
 	err = 0;
@@ -2217,6 +1797,17 @@
 				union perf_event *event __maybe_unused,
 				struct perf_sample *sample)
 {
+	int callchain_ret = 0;
+
+	if (sample->callchain) {
+		callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
+		if (callchain_ret == 0) {
+			if (callchain_cursor.nr < trace->min_stack)
+				goto out;
+			callchain_ret = 1;
+		}
+	}
+
 	trace__printf_interrupted_entry(trace, sample);
 	trace__fprintf_tstamp(trace, sample->time, trace->output);
 
@@ -2234,6 +1825,12 @@
 	}
 
 	fprintf(trace->output, ")\n");
+
+	if (callchain_ret > 0)
+		trace__fprintf_callchain(trace, sample);
+	else if (callchain_ret < 0)
+		pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
+out:
 	return 0;
 }
 
@@ -2264,8 +1861,19 @@
 	char map_type = 'd';
 	struct thread_trace *ttrace;
 	int err = -1;
+	int callchain_ret = 0;
 
 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
+
+	if (sample->callchain) {
+		callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
+		if (callchain_ret == 0) {
+			if (callchain_cursor.nr < trace->min_stack)
+				goto out_put;
+			callchain_ret = 1;
+		}
+	}
+
 	ttrace = thread__trace(thread, trace->output);
 	if (ttrace == NULL)
 		goto out_put;
@@ -2307,6 +1915,11 @@
 	print_location(trace->output, sample, &al, true, false);
 
 	fprintf(trace->output, " (%c%c)\n", map_type, al.level);
+
+	if (callchain_ret > 0)
+		trace__fprintf_callchain(trace, sample);
+	else if (callchain_ret < 0)
+		pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
 out:
 	err = 0;
 out_put:
@@ -2326,6 +1939,23 @@
 	return false;
 }
 
+static void trace__set_base_time(struct trace *trace,
+				 struct perf_evsel *evsel,
+				 struct perf_sample *sample)
+{
+	/*
+	 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
+	 * and don't use sample->time unconditionally, we may end up having
+	 * some other event in the future without PERF_SAMPLE_TIME for good
+	 * reason, i.e. we may not be interested in its timestamps, just in
+	 * it taking place, picking some piece of information when it
+	 * appears in our event stream (vfs_getname comes to mind).
+	 */
+	if (trace->base_time == 0 && !trace->full_time &&
+	    (evsel->attr.sample_type & PERF_SAMPLE_TIME))
+		trace->base_time = sample->time;
+}
+
 static int trace__process_sample(struct perf_tool *tool,
 				 union perf_event *event,
 				 struct perf_sample *sample,
@@ -2340,8 +1970,7 @@
 	if (skip_sample(trace, sample))
 		return 0;
 
-	if (!trace->full_time && trace->base_time == 0)
-		trace->base_time = sample->time;
+	trace__set_base_time(trace, evsel, sample);
 
 	if (handler) {
 		++trace->nr_events;
@@ -2450,8 +2079,7 @@
 	return true;
 }
 
-static int perf_evlist__add_pgfault(struct perf_evlist *evlist,
-				    u64 config)
+static struct perf_evsel *perf_evsel__new_pgfault(u64 config)
 {
 	struct perf_evsel *evsel;
 	struct perf_event_attr attr = {
@@ -2465,13 +2093,10 @@
 	event_attr_init(&attr);
 
 	evsel = perf_evsel__new(&attr);
-	if (!evsel)
-		return -ENOMEM;
+	if (evsel)
+		evsel->handler = trace__pgfault;
 
-	evsel->handler = trace__pgfault;
-	perf_evlist__add(evlist, evsel);
-
-	return 0;
+	return evsel;
 }
 
 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
@@ -2479,9 +2104,6 @@
 	const u32 type = event->header.type;
 	struct perf_evsel *evsel;
 
-	if (!trace->full_time && trace->base_time == 0)
-		trace->base_time = sample->time;
-
 	if (type != PERF_RECORD_SAMPLE) {
 		trace__process_event(trace, trace->host, event, sample);
 		return;
@@ -2493,6 +2115,8 @@
 		return;
 	}
 
+	trace__set_base_time(trace, evsel, sample);
+
 	if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
 	    sample->raw_data == NULL) {
 		fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
@@ -2527,6 +2151,15 @@
 	perf_evlist__add(evlist, sys_enter);
 	perf_evlist__add(evlist, sys_exit);
 
+	if (callchain_param.enabled && !trace->kernel_syscallchains) {
+		/*
+		 * We're interested only in the user space callchain
+		 * leading to the syscall, allow overriding that for
+		 * debugging reasons using --kernel_syscall_callchains
+		 */
+		sys_exit->attr.exclude_callchain_kernel = 1;
+	}
+
 	trace->syscalls.events.sys_enter = sys_enter;
 	trace->syscalls.events.sys_exit  = sys_exit;
 
@@ -2565,7 +2198,7 @@
 static int trace__run(struct trace *trace, int argc, const char **argv)
 {
 	struct perf_evlist *evlist = trace->evlist;
-	struct perf_evsel *evsel;
+	struct perf_evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL;
 	int err = -1, i;
 	unsigned long before;
 	const bool forks = argc > 0;
@@ -2579,14 +2212,19 @@
 	if (trace->trace_syscalls)
 		trace->vfs_getname = perf_evlist__add_vfs_getname(evlist);
 
-	if ((trace->trace_pgfaults & TRACE_PFMAJ) &&
-	    perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MAJ)) {
-		goto out_error_mem;
+	if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
+		pgfault_maj = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
+		if (pgfault_maj == NULL)
+			goto out_error_mem;
+		perf_evlist__add(evlist, pgfault_maj);
 	}
 
-	if ((trace->trace_pgfaults & TRACE_PFMIN) &&
-	    perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MIN))
-		goto out_error_mem;
+	if ((trace->trace_pgfaults & TRACE_PFMIN)) {
+		pgfault_min = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
+		if (pgfault_min == NULL)
+			goto out_error_mem;
+		perf_evlist__add(evlist, pgfault_min);
+	}
 
 	if (trace->sched &&
 	    perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
@@ -2605,7 +2243,45 @@
 		goto out_delete_evlist;
 	}
 
-	perf_evlist__config(evlist, &trace->opts);
+	perf_evlist__config(evlist, &trace->opts, NULL);
+
+	if (callchain_param.enabled) {
+		bool use_identifier = false;
+
+		if (trace->syscalls.events.sys_exit) {
+			perf_evsel__config_callchain(trace->syscalls.events.sys_exit,
+						     &trace->opts, &callchain_param);
+			use_identifier = true;
+		}
+
+		if (pgfault_maj) {
+			perf_evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
+			use_identifier = true;
+		}
+
+		if (pgfault_min) {
+			perf_evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
+			use_identifier = true;
+		}
+
+		if (use_identifier) {
+		       /*
+			* Now we have evsels with different sample_ids, use
+			* PERF_SAMPLE_IDENTIFIER to map from sample to evsel
+			* from a fixed position in each ring buffer record.
+			*
+			* As of this the changeset introducing this comment, this
+			* isn't strictly needed, as the fields that can come before
+			* PERF_SAMPLE_ID are all used, but we'll probably disable
+			* some of those for things like copying the payload of
+			* pointer syscall arguments, and for vfs_getname we don't
+			* need PERF_SAMPLE_ADDR and PERF_SAMPLE_IP, so do this
+			* here as a warning we need to use PERF_SAMPLE_IDENTIFIER.
+			*/
+			perf_evlist__set_sample_bit(evlist, IDENTIFIER);
+			perf_evlist__reset_sample_bit(evlist, ID);
+		}
+	}
 
 	signal(SIGCHLD, sig_handler);
 	signal(SIGINT, sig_handler);
@@ -2883,15 +2559,29 @@
 	return printed;
 }
 
+DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
+	struct stats 	*stats;
+	double		msecs;
+	int		syscall;
+)
+{
+	struct int_node *source = rb_entry(nd, struct int_node, rb_node);
+	struct stats *stats = source->priv;
+
+	entry->syscall = source->i;
+	entry->stats   = stats;
+	entry->msecs   = stats ? (u64)stats->n * (avg_stats(stats) / NSEC_PER_MSEC) : 0;
+}
+
 static size_t thread__dump_stats(struct thread_trace *ttrace,
 				 struct trace *trace, FILE *fp)
 {
-	struct stats *stats;
 	size_t printed = 0;
 	struct syscall *sc;
-	struct int_node *inode = intlist__first(ttrace->syscall_stats);
+	struct rb_node *nd;
+	DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats);
 
-	if (inode == NULL)
+	if (syscall_stats == NULL)
 		return 0;
 
 	printed += fprintf(fp, "\n");
@@ -2900,9 +2590,8 @@
 	printed += fprintf(fp, "                               (msec)    (msec)    (msec)    (msec)        (%%)\n");
 	printed += fprintf(fp, "   --------------- -------- --------- --------- --------- ---------     ------\n");
 
-	/* each int_node is a syscall */
-	while (inode) {
-		stats = inode->priv;
+	resort_rb__for_each(nd, syscall_stats) {
+		struct stats *stats = syscall_stats_entry->stats;
 		if (stats) {
 			double min = (double)(stats->min) / NSEC_PER_MSEC;
 			double max = (double)(stats->max) / NSEC_PER_MSEC;
@@ -2913,34 +2602,23 @@
 			pct = avg ? 100.0 * stddev_stats(stats)/avg : 0.0;
 			avg /= NSEC_PER_MSEC;
 
-			sc = &trace->syscalls.table[inode->i];
+			sc = &trace->syscalls.table[syscall_stats_entry->syscall];
 			printed += fprintf(fp, "   %-15s", sc->name);
 			printed += fprintf(fp, " %8" PRIu64 " %9.3f %9.3f %9.3f",
-					   n, avg * n, min, avg);
+					   n, syscall_stats_entry->msecs, min, avg);
 			printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
 		}
-
-		inode = intlist__next(inode);
 	}
 
+	resort_rb__delete(syscall_stats);
 	printed += fprintf(fp, "\n\n");
 
 	return printed;
 }
 
-/* struct used to pass data to per-thread function */
-struct summary_data {
-	FILE *fp;
-	struct trace *trace;
-	size_t printed;
-};
-
-static int trace__fprintf_one_thread(struct thread *thread, void *priv)
+static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
 {
-	struct summary_data *data = priv;
-	FILE *fp = data->fp;
-	size_t printed = data->printed;
-	struct trace *trace = data->trace;
+	size_t printed = 0;
 	struct thread_trace *ttrace = thread__priv(thread);
 	double ratio;
 
@@ -2956,25 +2634,45 @@
 		printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
 	if (ttrace->pfmin)
 		printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
-	printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
+	if (trace->sched)
+		printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
+	else if (fputc('\n', fp) != EOF)
+		++printed;
+
 	printed += thread__dump_stats(ttrace, trace, fp);
 
-	data->printed += printed;
+	return printed;
+}
 
-	return 0;
+static unsigned long thread__nr_events(struct thread_trace *ttrace)
+{
+	return ttrace ? ttrace->nr_events : 0;
+}
+
+DEFINE_RESORT_RB(threads, (thread__nr_events(a->thread->priv) < thread__nr_events(b->thread->priv)),
+	struct thread *thread;
+)
+{
+	entry->thread = rb_entry(nd, struct thread, rb_node);
 }
 
 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
 {
-	struct summary_data data = {
-		.fp = fp,
-		.trace = trace
-	};
-	data.printed = trace__fprintf_threads_header(fp);
+	DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host);
+	size_t printed = trace__fprintf_threads_header(fp);
+	struct rb_node *nd;
 
-	machine__for_each_thread(trace->host, trace__fprintf_one_thread, &data);
+	if (threads == NULL) {
+		fprintf(fp, "%s", "Error sorting output by nr_events!\n");
+		return 0;
+	}
 
-	return data.printed;
+	resort_rb__for_each(nd, threads)
+		printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
+
+	resort_rb__delete(threads);
+
+	return printed;
 }
 
 static int trace__set_duration(const struct option *opt, const char *str,
@@ -3070,10 +2768,6 @@
 		NULL
 	};
 	struct trace trace = {
-		.audit = {
-			.machine = audit_detect_machine(),
-			.open_id = audit_name_to_syscall("open", trace.audit.machine),
-		},
 		.syscalls = {
 			. max = -1,
 		},
@@ -3091,6 +2785,8 @@
 		.output = stderr,
 		.show_comm = true,
 		.trace_syscalls = true,
+		.kernel_syscallchains = false,
+		.max_stack = UINT_MAX,
 	};
 	const char *output_name = NULL;
 	const char *ev_qualifier_str = NULL;
@@ -3136,10 +2832,24 @@
 		     "Trace pagefaults", parse_pagefaults, "maj"),
 	OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
 	OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
+	OPT_CALLBACK(0, "call-graph", &trace.opts,
+		     "record_mode[,record_size]", record_callchain_help,
+		     &record_parse_callchain_opt),
+	OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
+		    "Show the kernel callchains on the syscall exit path"),
+	OPT_UINTEGER(0, "min-stack", &trace.min_stack,
+		     "Set the minimum stack depth when parsing the callchain, "
+		     "anything below the specified depth will be ignored."),
+	OPT_UINTEGER(0, "max-stack", &trace.max_stack,
+		     "Set the maximum stack depth when parsing the callchain, "
+		     "anything beyond the specified depth will be ignored. "
+		     "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
 	OPT_UINTEGER(0, "proc-map-timeout", &trace.opts.proc_map_timeout,
 			"per thread proc mmap processing timeout in ms"),
 	OPT_END()
 	};
+	bool __maybe_unused max_stack_user_set = true;
+	bool mmap_pages_user_set = true;
 	const char * const trace_subcommands[] = { "record", NULL };
 	int err;
 	char bf[BUFSIZ];
@@ -3148,8 +2858,9 @@
 	signal(SIGFPE, sighandler_dump_stack);
 
 	trace.evlist = perf_evlist__new();
+	trace.sctbl = syscalltbl__new();
 
-	if (trace.evlist == NULL) {
+	if (trace.evlist == NULL || trace.sctbl == NULL) {
 		pr_err("Not enough memory to run!\n");
 		err = -ENOMEM;
 		goto out;
@@ -3158,11 +2869,40 @@
 	argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
 				 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
 
+	err = bpf__setup_stdout(trace.evlist);
+	if (err) {
+		bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
+		pr_err("ERROR: Setup BPF stdout failed: %s\n", bf);
+		goto out;
+	}
+
+	err = -1;
+
 	if (trace.trace_pgfaults) {
 		trace.opts.sample_address = true;
 		trace.opts.sample_time = true;
 	}
 
+	if (trace.opts.mmap_pages == UINT_MAX)
+		mmap_pages_user_set = false;
+
+	if (trace.max_stack == UINT_MAX) {
+		trace.max_stack = sysctl_perf_event_max_stack;
+		max_stack_user_set = false;
+	}
+
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
+	if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled)
+		record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
+#endif
+
+	if (callchain_param.enabled) {
+		if (!mmap_pages_user_set && geteuid() == 0)
+			trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
+
+		symbol_conf.use_callchain = true;
+	}
+
 	if (trace.evlist->nr_entries > 0)
 		evlist__set_evsel_handler(trace.evlist, trace__event_handler);
 
@@ -3179,6 +2919,11 @@
 		return -1;
 	}
 
+	if (!trace.trace_syscalls && ev_qualifier_str) {
+		pr_err("The -e option can't be used with --no-syscalls.\n");
+		goto out;
+	}
+
 	if (output_name != NULL) {
 		err = trace__open_output(&trace, output_name);
 		if (err < 0) {
@@ -3187,6 +2932,8 @@
 		}
 	}
 
+	trace.open_id = syscalltbl__id(trace.sctbl, "open");
+
 	if (ev_qualifier_str != NULL) {
 		const char *s = ev_qualifier_str;
 		struct strlist_config slist_config = {
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index f7d7f5a..1e46277 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -27,7 +27,7 @@
 ifeq ($(ARCH),x86)
   $(call detected,CONFIG_X86)
   ifeq (${IS_64_BIT}, 1)
-    CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT
+    CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -DHAVE_SYSCALL_TABLE -I$(OUTPUT)arch/x86/include/generated
     ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S
     LIBUNWIND_LIBS = -lunwind -lunwind-x86_64
     $(call detected,CONFIG_X86_64)
@@ -268,6 +268,12 @@
     ifneq ($(feature-dwarf), 1)
       msg := $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev);
       NO_DWARF := 1
+    else
+      ifneq ($(feature-dwarf_getlocations), 1)
+        msg := $(warning Old libdw.h, finding variables at given 'perf probe' point will not work, install elfutils-devel/libdw-dev >= 0.157);
+      else
+        CFLAGS += -DHAVE_DWARF_GETLOCATIONS
+      endif # dwarf_getlocations
     endif # Dwarf support
   endif # libelf support
 endif # NO_LIBELF
@@ -289,9 +295,6 @@
     CFLAGS += -DHAVE_ELF_GETPHDRNUM_SUPPORT
   endif
 
-  # include ARCH specific config
-  -include $(src-perf)/arch/$(ARCH)/Makefile
-
   ifndef NO_DWARF
     ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined)
       msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled);
diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c
index 6461e02..3573f31 100644
--- a/tools/perf/jvmti/jvmti_agent.c
+++ b/tools/perf/jvmti/jvmti_agent.c
@@ -92,6 +92,22 @@
 	return ret;
 }
 
+static int use_arch_timestamp;
+
+static inline uint64_t
+get_arch_timestamp(void)
+{
+#if defined(__i386__) || defined(__x86_64__)
+	unsigned int low, high;
+
+	asm volatile("rdtsc" : "=a" (low), "=d" (high));
+
+	return low | ((uint64_t)high) << 32;
+#else
+	return 0;
+#endif
+}
+
 #define NSEC_PER_SEC	1000000000
 static int perf_clk_id = CLOCK_MONOTONIC;
 
@@ -107,6 +123,9 @@
 	struct timespec ts;
 	int ret;
 
+	if (use_arch_timestamp)
+		return get_arch_timestamp();
+
 	ret = clock_gettime(perf_clk_id, &ts);
 	if (ret)
 		return 0;
@@ -203,6 +222,17 @@
 	munmap(marker_addr, pgsz);
 }
 
+static void
+init_arch_timestamp(void)
+{
+	char *str = getenv("JITDUMP_USE_ARCH_TIMESTAMP");
+
+	if (!str || !*str || !strcmp(str, "0"))
+		return;
+
+	use_arch_timestamp = 1;
+}
+
 void *jvmti_open(void)
 {
 	int pad_cnt;
@@ -211,11 +241,17 @@
 	int fd;
 	FILE *fp;
 
+	init_arch_timestamp();
+
 	/*
 	 * check if clockid is supported
 	 */
-	if (!perf_get_timestamp())
-		warnx("jvmti: kernel does not support %d clock id", perf_clk_id);
+	if (!perf_get_timestamp()) {
+		if (use_arch_timestamp)
+			warnx("jvmti: arch timestamp not supported");
+		else
+			warnx("jvmti: kernel does not support %d clock id", perf_clk_id);
+	}
 
 	memset(&header, 0, sizeof(header));
 
@@ -263,6 +299,9 @@
 
 	header.timestamp = perf_get_timestamp();
 
+	if (use_arch_timestamp)
+		header.flags |= JITDUMP_FLAGS_ARCH_TIMESTAMP;
+
 	if (!fwrite(&header, sizeof(header), 1, fp)) {
 		warn("jvmti: cannot write dumpfile header");
 		goto error;
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index aaee0a7..7970008 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -17,6 +17,7 @@
 #include <subcmd/parse-options.h>
 #include "util/bpf-loader.h"
 #include "util/debug.h"
+#include <api/fs/fs.h>
 #include <api/fs/tracing_path.h>
 #include <pthread.h>
 #include <stdlib.h>
@@ -308,9 +309,11 @@
 			if (*argcp > 1) {
 				struct strbuf buf;
 
-				strbuf_init(&buf, PATH_MAX);
-				strbuf_addstr(&buf, alias_string);
-				sq_quote_argv(&buf, (*argv) + 1, PATH_MAX);
+				if (strbuf_init(&buf, PATH_MAX) < 0 ||
+				    strbuf_addstr(&buf, alias_string) < 0 ||
+				    sq_quote_argv(&buf, (*argv) + 1,
+						  PATH_MAX) < 0)
+					die("Failed to allocate memory.");
 				free(alias_string);
 				alias_string = buf.buf;
 			}
@@ -533,6 +536,7 @@
 {
 	const char *cmd;
 	char sbuf[STRERR_BUFSIZE];
+	int value;
 
 	/* libsubcmd init */
 	exec_cmd_init("perf", PREFIX, PERF_EXEC_PATH, EXEC_PATH_ENVIRONMENT);
@@ -542,6 +546,9 @@
 	page_size = sysconf(_SC_PAGE_SIZE);
 	cacheline_size = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
 
+	if (sysctl__read_int("kernel/perf_event_max_stack", &value) == 0)
+		sysctl_perf_event_max_stack = value;
+
 	cmd = extract_argv0_path(argv[0]);
 	if (!cmd)
 		cmd = "perf-help";
@@ -549,6 +556,7 @@
 	srandom(time(NULL));
 
 	perf_config(perf_default_config, NULL);
+	set_buildid_dir(NULL);
 
 	/* get debugfs/tracefs mount point from /proc/mounts */
 	tracing_path_mount();
@@ -572,7 +580,6 @@
 	}
 	if (!prefixcmp(cmd, "trace")) {
 #ifdef HAVE_LIBAUDIT_SUPPORT
-		set_buildid_dir(NULL);
 		setup_path();
 		argv[0] = "trace";
 		return cmd_trace(argc, argv, NULL);
@@ -587,7 +594,6 @@
 	argc--;
 	handle_options(&argv, &argc, NULL);
 	commit_pager_choice();
-	set_buildid_dir(NULL);
 
 	if (argc > 0) {
 		if (!prefixcmp(argv[0], "--"))
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 5381a01..cd8f1b1 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -52,7 +52,6 @@
 	bool	     sample_weight;
 	bool	     sample_time;
 	bool	     sample_time_set;
-	bool	     callgraph_set;
 	bool	     period;
 	bool	     running_time;
 	bool	     full_auxtrace;
diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
index 1b02cdc..7656ff8 100644
--- a/tools/perf/scripts/python/export-to-postgresql.py
+++ b/tools/perf/scripts/python/export-to-postgresql.py
@@ -34,10 +34,9 @@
 #
 # ubuntu:
 #
-#	$ sudo apt-get install postgresql
+#	$ sudo apt-get install postgresql python-pyside.qtsql libqt4-sql-psql
 #	$ sudo su - postgres
-#	$ createuser <your user id here>
-#	Shall the new role be a superuser? (y/n) y
+#	$ createuser -s <your user id here>
 #
 # An example of using this script with Intel PT:
 #
@@ -224,11 +223,14 @@
 
 perf_db_export_mode = True
 perf_db_export_calls = False
+perf_db_export_callchains = False
+
 
 def usage():
-	print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>]"
+	print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]"
 	print >> sys.stderr, "where:	columns		'all' or 'branches'"
-	print >> sys.stderr, "		calls		'calls' => create calls table"
+	print >> sys.stderr, "		calls		'calls' => create calls and call_paths table"
+	print >> sys.stderr, "		callchains	'callchains' => create call_paths table"
 	raise Exception("Too few arguments")
 
 if (len(sys.argv) < 2):
@@ -246,9 +248,11 @@
 
 branches = (columns == "branches")
 
-if (len(sys.argv) >= 4):
-	if (sys.argv[3] == "calls"):
+for i in range(3,len(sys.argv)):
+	if (sys.argv[i] == "calls"):
 		perf_db_export_calls = True
+	elif (sys.argv[i] == "callchains"):
+		perf_db_export_callchains = True
 	else:
 		usage()
 
@@ -359,14 +363,16 @@
 		'transaction	bigint,'
 		'data_src	bigint,'
 		'branch_type	integer,'
-		'in_tx		boolean)')
+		'in_tx		boolean,'
+		'call_path_id	bigint)')
 
-if perf_db_export_calls:
+if perf_db_export_calls or perf_db_export_callchains:
 	do_query(query, 'CREATE TABLE call_paths ('
 		'id		bigint		NOT NULL,'
 		'parent_id	bigint,'
 		'symbol_id	bigint,'
 		'ip		bigint)')
+if perf_db_export_calls:
 	do_query(query, 'CREATE TABLE calls ('
 		'id		bigint		NOT NULL,'
 		'thread_id	bigint,'
@@ -428,7 +434,7 @@
 		'(SELECT tid FROM threads WHERE id = thread_id) AS tid'
 	' FROM comm_threads')
 
-if perf_db_export_calls:
+if perf_db_export_calls or perf_db_export_callchains:
 	do_query(query, 'CREATE VIEW call_paths_view AS '
 		'SELECT '
 			'c.id,'
@@ -444,6 +450,7 @@
 			'(SELECT dso_id FROM symbols WHERE id = p.symbol_id) AS parent_dso_id,'
 			'(SELECT dso FROM symbols_view  WHERE id = p.symbol_id) AS parent_dso_short_name'
 		' FROM call_paths c INNER JOIN call_paths p ON p.id = c.parent_id')
+if perf_db_export_calls:
 	do_query(query, 'CREATE VIEW calls_view AS '
 		'SELECT '
 			'calls.id,'
@@ -541,8 +548,9 @@
 symbol_file		= open_output_file("symbol_table.bin")
 branch_type_file	= open_output_file("branch_type_table.bin")
 sample_file		= open_output_file("sample_table.bin")
-if perf_db_export_calls:
+if perf_db_export_calls or perf_db_export_callchains:
 	call_path_file		= open_output_file("call_path_table.bin")
+if perf_db_export_calls:
 	call_file		= open_output_file("call_table.bin")
 
 def trace_begin():
@@ -554,8 +562,8 @@
 	comm_table(0, "unknown")
 	dso_table(0, 0, "unknown", "unknown", "")
 	symbol_table(0, 0, 0, 0, 0, "unknown")
-	sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
-	if perf_db_export_calls:
+	sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+	if perf_db_export_calls or perf_db_export_callchains:
 		call_path_table(0, 0, 0, 0)
 
 unhandled_count = 0
@@ -571,8 +579,9 @@
 	copy_output_file(symbol_file,		"symbols")
 	copy_output_file(branch_type_file,	"branch_types")
 	copy_output_file(sample_file,		"samples")
-	if perf_db_export_calls:
+	if perf_db_export_calls or perf_db_export_callchains:
 		copy_output_file(call_path_file,	"call_paths")
+	if perf_db_export_calls:
 		copy_output_file(call_file,		"calls")
 
 	print datetime.datetime.today(), "Removing intermediate files..."
@@ -585,8 +594,9 @@
 	remove_output_file(symbol_file)
 	remove_output_file(branch_type_file)
 	remove_output_file(sample_file)
-	if perf_db_export_calls:
+	if perf_db_export_calls or perf_db_export_callchains:
 		remove_output_file(call_path_file)
+	if perf_db_export_calls:
 		remove_output_file(call_file)
 	os.rmdir(output_dir_name)
 	print datetime.datetime.today(), "Adding primary keys"
@@ -599,8 +609,9 @@
 	do_query(query, 'ALTER TABLE symbols         ADD PRIMARY KEY (id)')
 	do_query(query, 'ALTER TABLE branch_types    ADD PRIMARY KEY (id)')
 	do_query(query, 'ALTER TABLE samples         ADD PRIMARY KEY (id)')
-	if perf_db_export_calls:
+	if perf_db_export_calls or perf_db_export_callchains:
 		do_query(query, 'ALTER TABLE call_paths      ADD PRIMARY KEY (id)')
+	if perf_db_export_calls:
 		do_query(query, 'ALTER TABLE calls           ADD PRIMARY KEY (id)')
 
 	print datetime.datetime.today(), "Adding foreign keys"
@@ -623,10 +634,11 @@
 					'ADD CONSTRAINT symbolfk   FOREIGN KEY (symbol_id)    REFERENCES symbols    (id),'
 					'ADD CONSTRAINT todsofk    FOREIGN KEY (to_dso_id)    REFERENCES dsos       (id),'
 					'ADD CONSTRAINT tosymbolfk FOREIGN KEY (to_symbol_id) REFERENCES symbols    (id)')
-	if perf_db_export_calls:
+	if perf_db_export_calls or perf_db_export_callchains:
 		do_query(query, 'ALTER TABLE call_paths '
 					'ADD CONSTRAINT parentfk    FOREIGN KEY (parent_id)    REFERENCES call_paths (id),'
 					'ADD CONSTRAINT symbolfk    FOREIGN KEY (symbol_id)    REFERENCES symbols    (id)')
+	if perf_db_export_calls:
 		do_query(query, 'ALTER TABLE calls '
 					'ADD CONSTRAINT threadfk    FOREIGN KEY (thread_id)    REFERENCES threads    (id),'
 					'ADD CONSTRAINT commfk      FOREIGN KEY (comm_id)      REFERENCES comms      (id),'
@@ -694,11 +706,11 @@
 	value = struct.pack(fmt, 2, 4, branch_type, n, name)
 	branch_type_file.write(value)
 
-def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, *x):
+def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, call_path_id, *x):
 	if branches:
-		value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiB", 17, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx)
+		value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiBiq", 18, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx, 8, call_path_id)
 	else:
-		value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiB", 21, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx)
+		value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiBiq", 22, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx, 8, call_path_id)
 	sample_file.write(value)
 
 def call_path_table(cp_id, parent_id, symbol_id, ip, *x):
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index 1ba628e..66a2898 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -37,6 +37,8 @@
 perf-y += cpumap.o
 perf-y += stat.o
 perf-y += event_update.o
+perf-y += event-times.o
+perf-y += backward-ring-buffer.o
 
 $(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c tests/Build
 	$(call rule_mkdir)
diff --git a/tools/perf/tests/backward-ring-buffer.c b/tools/perf/tests/backward-ring-buffer.c
new file mode 100644
index 0000000..d9ba991
--- /dev/null
+++ b/tools/perf/tests/backward-ring-buffer.c
@@ -0,0 +1,151 @@
+/*
+ * Test backward bit in event attribute, read ring buffer from end to
+ * beginning
+ */
+
+#include <perf.h>
+#include <evlist.h>
+#include <sys/prctl.h>
+#include "tests.h"
+#include "debug.h"
+
+#define NR_ITERS 111
+
+static void testcase(void)
+{
+	int i;
+
+	for (i = 0; i < NR_ITERS; i++) {
+		char proc_name[10];
+
+		snprintf(proc_name, sizeof(proc_name), "p:%d\n", i);
+		prctl(PR_SET_NAME, proc_name);
+	}
+}
+
+static int count_samples(struct perf_evlist *evlist, int *sample_count,
+			 int *comm_count)
+{
+	int i;
+
+	for (i = 0; i < evlist->nr_mmaps; i++) {
+		union perf_event *event;
+
+		perf_evlist__mmap_read_catchup(evlist, i);
+		while ((event = perf_evlist__mmap_read_backward(evlist, i)) != NULL) {
+			const u32 type = event->header.type;
+
+			switch (type) {
+			case PERF_RECORD_SAMPLE:
+				(*sample_count)++;
+				break;
+			case PERF_RECORD_COMM:
+				(*comm_count)++;
+				break;
+			default:
+				pr_err("Unexpected record of type %d\n", type);
+				return TEST_FAIL;
+			}
+		}
+	}
+	return TEST_OK;
+}
+
+static int do_test(struct perf_evlist *evlist, int mmap_pages,
+		   int *sample_count, int *comm_count)
+{
+	int err;
+	char sbuf[STRERR_BUFSIZE];
+
+	err = perf_evlist__mmap(evlist, mmap_pages, true);
+	if (err < 0) {
+		pr_debug("perf_evlist__mmap: %s\n",
+			 strerror_r(errno, sbuf, sizeof(sbuf)));
+		return TEST_FAIL;
+	}
+
+	perf_evlist__enable(evlist);
+	testcase();
+	perf_evlist__disable(evlist);
+
+	err = count_samples(evlist, sample_count, comm_count);
+	perf_evlist__munmap(evlist);
+	return err;
+}
+
+
+int test__backward_ring_buffer(int subtest __maybe_unused)
+{
+	int ret = TEST_SKIP, err, sample_count = 0, comm_count = 0;
+	char pid[16], sbuf[STRERR_BUFSIZE];
+	struct perf_evlist *evlist;
+	struct perf_evsel *evsel __maybe_unused;
+	struct parse_events_error parse_error;
+	struct record_opts opts = {
+		.target = {
+			.uid = UINT_MAX,
+			.uses_mmap = true,
+		},
+		.freq	      = 0,
+		.mmap_pages   = 256,
+		.default_interval = 1,
+	};
+
+	snprintf(pid, sizeof(pid), "%d", getpid());
+	pid[sizeof(pid) - 1] = '\0';
+	opts.target.tid = opts.target.pid = pid;
+
+	evlist = perf_evlist__new();
+	if (!evlist) {
+		pr_debug("No ehough memory to create evlist\n");
+		return TEST_FAIL;
+	}
+
+	err = perf_evlist__create_maps(evlist, &opts.target);
+	if (err < 0) {
+		pr_debug("Not enough memory to create thread/cpu maps\n");
+		goto out_delete_evlist;
+	}
+
+	bzero(&parse_error, sizeof(parse_error));
+	err = parse_events(evlist, "syscalls:sys_enter_prctl", &parse_error);
+	if (err) {
+		pr_debug("Failed to parse tracepoint event, try use root\n");
+		ret = TEST_SKIP;
+		goto out_delete_evlist;
+	}
+
+	perf_evlist__config(evlist, &opts, NULL);
+
+	/* Set backward bit, ring buffer should be writing from end */
+	evlist__for_each(evlist, evsel)
+		evsel->attr.write_backward = 1;
+
+	err = perf_evlist__open(evlist);
+	if (err < 0) {
+		pr_debug("perf_evlist__open: %s\n",
+			 strerror_r(errno, sbuf, sizeof(sbuf)));
+		goto out_delete_evlist;
+	}
+
+	ret = TEST_FAIL;
+	err = do_test(evlist, opts.mmap_pages, &sample_count,
+		      &comm_count);
+	if (err != TEST_OK)
+		goto out_delete_evlist;
+
+	if ((sample_count != NR_ITERS) || (comm_count != NR_ITERS)) {
+		pr_err("Unexpected counter: sample_count=%d, comm_count=%d\n",
+		       sample_count, comm_count);
+		goto out_delete_evlist;
+	}
+
+	err = do_test(evlist, 1, &sample_count, &comm_count);
+	if (err != TEST_OK)
+		goto out_delete_evlist;
+
+	ret = TEST_OK;
+out_delete_evlist:
+	perf_evlist__delete(evlist);
+	return ret;
+}
diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
index 199501c..f31eed3 100644
--- a/tools/perf/tests/bpf.c
+++ b/tools/perf/tests/bpf.c
@@ -138,7 +138,7 @@
 	perf_evlist__splice_list_tail(evlist, &parse_evlist.list);
 	evlist->nr_groups = parse_evlist.nr_groups;
 
-	perf_evlist__config(evlist, &opts);
+	perf_evlist__config(evlist, &opts, NULL);
 
 	err = perf_evlist__open(evlist);
 	if (err < 0) {
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index f2b1dca..0e95c20 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -204,6 +204,14 @@
 		.func = test__event_update,
 	},
 	{
+		.desc = "Test events times",
+		.func = test__event_times,
+	},
+	{
+		.desc = "Test backward reading from ring buffer",
+		.func = test__backward_ring_buffer,
+	},
+	{
 		.func = NULL,
 	},
 };
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index abd3f0e..68a69a1 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -532,7 +532,7 @@
 			goto out_put;
 		}
 
-		perf_evlist__config(evlist, &opts);
+		perf_evlist__config(evlist, &opts, NULL);
 
 		evsel = perf_evlist__first(evlist);
 
diff --git a/tools/perf/tests/dso-data.c b/tools/perf/tests/dso-data.c
index dc673ff..8cf0d9e 100644
--- a/tools/perf/tests/dso-data.c
+++ b/tools/perf/tests/dso-data.c
@@ -202,7 +202,7 @@
 {
 	int i;
 
-	dsos = malloc(sizeof(dsos) * cnt);
+	dsos = malloc(sizeof(*dsos) * cnt);
 	TEST_ASSERT_VAL("failed to alloc dsos array", dsos);
 
 	for (i = 0; i < cnt; i++) {
diff --git a/tools/perf/tests/event-times.c b/tools/perf/tests/event-times.c
new file mode 100644
index 0000000..95fb744
--- /dev/null
+++ b/tools/perf/tests/event-times.c
@@ -0,0 +1,236 @@
+#include <linux/compiler.h>
+#include <string.h>
+#include "tests.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "util.h"
+#include "debug.h"
+#include "thread_map.h"
+#include "target.h"
+
+static int attach__enable_on_exec(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel = perf_evlist__last(evlist);
+	struct target target = {
+		.uid = UINT_MAX,
+	};
+	const char *argv[] = { "true", NULL, };
+	char sbuf[STRERR_BUFSIZE];
+	int err;
+
+	pr_debug("attaching to spawned child, enable on exec\n");
+
+	err = perf_evlist__create_maps(evlist, &target);
+	if (err < 0) {
+		pr_debug("Not enough memory to create thread/cpu maps\n");
+		return err;
+	}
+
+	err = perf_evlist__prepare_workload(evlist, &target, argv, false, NULL);
+	if (err < 0) {
+		pr_debug("Couldn't run the workload!\n");
+		return err;
+	}
+
+	evsel->attr.enable_on_exec = 1;
+
+	err = perf_evlist__open(evlist);
+	if (err < 0) {
+		pr_debug("perf_evlist__open: %s\n",
+			 strerror_r(errno, sbuf, sizeof(sbuf)));
+		return err;
+	}
+
+	return perf_evlist__start_workload(evlist) == 1 ? TEST_OK : TEST_FAIL;
+}
+
+static int detach__enable_on_exec(struct perf_evlist *evlist)
+{
+	waitpid(evlist->workload.pid, NULL, 0);
+	return 0;
+}
+
+static int attach__current_disabled(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel = perf_evlist__last(evlist);
+	struct thread_map *threads;
+	int err;
+
+	pr_debug("attaching to current thread as disabled\n");
+
+	threads = thread_map__new(-1, getpid(), UINT_MAX);
+	if (threads == NULL) {
+		pr_debug("thread_map__new\n");
+		return -1;
+	}
+
+	evsel->attr.disabled = 1;
+
+	err = perf_evsel__open_per_thread(evsel, threads);
+	if (err) {
+		pr_debug("Failed to open event cpu-clock:u\n");
+		return err;
+	}
+
+	thread_map__put(threads);
+	return perf_evsel__enable(evsel) == 0 ? TEST_OK : TEST_FAIL;
+}
+
+static int attach__current_enabled(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel = perf_evlist__last(evlist);
+	struct thread_map *threads;
+	int err;
+
+	pr_debug("attaching to current thread as enabled\n");
+
+	threads = thread_map__new(-1, getpid(), UINT_MAX);
+	if (threads == NULL) {
+		pr_debug("failed to call thread_map__new\n");
+		return -1;
+	}
+
+	err = perf_evsel__open_per_thread(evsel, threads);
+
+	thread_map__put(threads);
+	return err == 0 ? TEST_OK : TEST_FAIL;
+}
+
+static int detach__disable(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel = perf_evlist__last(evlist);
+
+	return perf_evsel__enable(evsel);
+}
+
+static int attach__cpu_disabled(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel = perf_evlist__last(evlist);
+	struct cpu_map *cpus;
+	int err;
+
+	pr_debug("attaching to CPU 0 as enabled\n");
+
+	cpus = cpu_map__new("0");
+	if (cpus == NULL) {
+		pr_debug("failed to call cpu_map__new\n");
+		return -1;
+	}
+
+	evsel->attr.disabled = 1;
+
+	err = perf_evsel__open_per_cpu(evsel, cpus);
+	if (err) {
+		if (err == -EACCES)
+			return TEST_SKIP;
+
+		pr_debug("Failed to open event cpu-clock:u\n");
+		return err;
+	}
+
+	cpu_map__put(cpus);
+	return perf_evsel__enable(evsel);
+}
+
+static int attach__cpu_enabled(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel = perf_evlist__last(evlist);
+	struct cpu_map *cpus;
+	int err;
+
+	pr_debug("attaching to CPU 0 as enabled\n");
+
+	cpus = cpu_map__new("0");
+	if (cpus == NULL) {
+		pr_debug("failed to call cpu_map__new\n");
+		return -1;
+	}
+
+	err = perf_evsel__open_per_cpu(evsel, cpus);
+	if (err == -EACCES)
+		return TEST_SKIP;
+
+	cpu_map__put(cpus);
+	return err ? TEST_FAIL : TEST_OK;
+}
+
+static int test_times(int (attach)(struct perf_evlist *),
+		      int (detach)(struct perf_evlist *))
+{
+	struct perf_counts_values count;
+	struct perf_evlist *evlist = NULL;
+	struct perf_evsel *evsel;
+	int err = -1, i;
+
+	evlist = perf_evlist__new();
+	if (!evlist) {
+		pr_debug("failed to create event list\n");
+		goto out_err;
+	}
+
+	err = parse_events(evlist, "cpu-clock:u", NULL);
+	if (err) {
+		pr_debug("failed to parse event cpu-clock:u\n");
+		goto out_err;
+	}
+
+	evsel = perf_evlist__last(evlist);
+	evsel->attr.read_format |=
+		PERF_FORMAT_TOTAL_TIME_ENABLED |
+		PERF_FORMAT_TOTAL_TIME_RUNNING;
+
+	err = attach(evlist);
+	if (err == TEST_SKIP) {
+		pr_debug("  SKIP  : not enough rights\n");
+		return err;
+	}
+
+	TEST_ASSERT_VAL("failed to attach", !err);
+
+	for (i = 0; i < 100000000; i++) { }
+
+	TEST_ASSERT_VAL("failed to detach", !detach(evlist));
+
+	perf_evsel__read(evsel, 0, 0, &count);
+
+	err = !(count.ena == count.run);
+
+	pr_debug("  %s: ena %" PRIu64", run %" PRIu64"\n",
+		 !err ? "OK    " : "FAILED",
+		 count.ena, count.run);
+
+out_err:
+	if (evlist)
+		perf_evlist__delete(evlist);
+	return !err ? TEST_OK : TEST_FAIL;
+}
+
+/*
+ * This test creates software event 'cpu-clock'
+ * attaches it in several ways (explained below)
+ * and checks that enabled and running times
+ * match.
+ */
+int test__event_times(int subtest __maybe_unused)
+{
+	int err, ret = 0;
+
+#define _T(attach, detach)			\
+	err = test_times(attach, detach);	\
+	if (err && (ret == TEST_OK || ret == TEST_SKIP))	\
+		ret = err;
+
+	/* attach on newly spawned process after exec */
+	_T(attach__enable_on_exec,   detach__enable_on_exec)
+	/* attach on current process as enabled */
+	_T(attach__current_enabled,  detach__disable)
+	/* attach on current process as disabled */
+	_T(attach__current_disabled, detach__disable)
+	/* attach on cpu as disabled */
+	_T(attach__cpu_disabled,     detach__disable)
+	/* attach on cpu as enabled */
+	_T(attach__cpu_enabled,      detach__disable)
+
+#undef _T
+	return ret;
+}
diff --git a/tools/perf/tests/event_update.c b/tools/perf/tests/event_update.c
index 012eab5..63ecf21 100644
--- a/tools/perf/tests/event_update.c
+++ b/tools/perf/tests/event_update.c
@@ -30,7 +30,7 @@
 
 	TEST_ASSERT_VAL("wrong id", ev->id == 123);
 	TEST_ASSERT_VAL("wrong id", ev->type == PERF_EVENT_UPDATE__SCALE);
-	TEST_ASSERT_VAL("wrong scale", ev_data->scale = 0.123);
+	TEST_ASSERT_VAL("wrong scale", ev_data->scale == 0.123);
 	return 0;
 }
 
diff --git a/tools/perf/tests/hists_common.c b/tools/perf/tests/hists_common.c
index f55f4bd..6b21746 100644
--- a/tools/perf/tests/hists_common.c
+++ b/tools/perf/tests/hists_common.c
@@ -161,7 +161,7 @@
 	struct rb_root *root;
 	struct rb_node *node;
 
-	if (sort__need_collapse)
+	if (hists__has(hists, need_collapse))
 		root = &hists->entries_collapsed;
 	else
 		root = hists->entries_in;
diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c
index ed5aa9e..a9e3db3 100644
--- a/tools/perf/tests/hists_cumulate.c
+++ b/tools/perf/tests/hists_cumulate.c
@@ -101,7 +101,7 @@
 		if (machine__resolve(machine, &al, &sample) < 0)
 			goto out;
 
-		if (hist_entry_iter__add(&iter, &al, PERF_MAX_STACK_DEPTH,
+		if (hist_entry_iter__add(&iter, &al, sysctl_perf_event_max_stack,
 					 NULL) < 0) {
 			addr_location__put(&al);
 			goto out;
@@ -126,7 +126,7 @@
 	struct rb_root *root_out;
 	struct rb_node *node;
 
-	if (sort__need_collapse)
+	if (hists__has(hists, need_collapse))
 		root_in = &hists->entries_collapsed;
 	else
 		root_in = hists->entries_in;
diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c
index b825d24..e846f8c 100644
--- a/tools/perf/tests/hists_filter.c
+++ b/tools/perf/tests/hists_filter.c
@@ -81,7 +81,7 @@
 
 			al.socket = fake_samples[i].socket;
 			if (hist_entry_iter__add(&iter, &al,
-						 PERF_MAX_STACK_DEPTH, NULL) < 0) {
+						 sysctl_perf_event_max_stack, NULL) < 0) {
 				addr_location__put(&al);
 				goto out;
 			}
diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c
index 358324e..acf5a13 100644
--- a/tools/perf/tests/hists_link.c
+++ b/tools/perf/tests/hists_link.c
@@ -145,7 +145,7 @@
 	/*
 	 * Only entries from fake_common_samples should have a pair.
 	 */
-	if (sort__need_collapse)
+	if (hists__has(hists, need_collapse))
 		root = &hists->entries_collapsed;
 	else
 		root = hists->entries_in;
@@ -197,7 +197,7 @@
 	 * and some entries will have no pair.  However every entry
 	 * in other hists should have (dummy) pair.
 	 */
-	if (sort__need_collapse)
+	if (hists__has(hists, need_collapse))
 		root = &hists->entries_collapsed;
 	else
 		root = hists->entries_in;
diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c
index d3556fb..63c5efa 100644
--- a/tools/perf/tests/hists_output.c
+++ b/tools/perf/tests/hists_output.c
@@ -67,7 +67,7 @@
 		if (machine__resolve(machine, &al, &sample) < 0)
 			goto out;
 
-		if (hist_entry_iter__add(&iter, &al, PERF_MAX_STACK_DEPTH,
+		if (hist_entry_iter__add(&iter, &al, sysctl_perf_event_max_stack,
 					 NULL) < 0) {
 			addr_location__put(&al);
 			goto out;
@@ -92,7 +92,7 @@
 	struct rb_root *root_out;
 	struct rb_node *node;
 
-	if (sort__need_collapse)
+	if (hists__has(hists, need_collapse))
 		root_in = &hists->entries_collapsed;
 	else
 		root_in = hists->entries_in;
diff --git a/tools/perf/tests/keep-tracking.c b/tools/perf/tests/keep-tracking.c
index ddb78fa..614e45a 100644
--- a/tools/perf/tests/keep-tracking.c
+++ b/tools/perf/tests/keep-tracking.c
@@ -80,7 +80,7 @@
 	CHECK__(parse_events(evlist, "dummy:u", NULL));
 	CHECK__(parse_events(evlist, "cycles:u", NULL));
 
-	perf_evlist__config(evlist, &opts);
+	perf_evlist__config(evlist, &opts, NULL);
 
 	evsel = perf_evlist__first(evlist);
 
diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c
index 53c2273..ad1cb63 100644
--- a/tools/perf/tests/openat-syscall-all-cpus.c
+++ b/tools/perf/tests/openat-syscall-all-cpus.c
@@ -73,7 +73,7 @@
 	}
 
 	/*
-	 * Here we need to explicitely preallocate the counts, as if
+	 * Here we need to explicitly preallocate the counts, as if
 	 * we use the auto allocation it will allocate just for 1 cpu,
 	 * as we start by cpu 0.
 	 */
diff --git a/tools/perf/tests/openat-syscall-tp-fields.c b/tools/perf/tests/openat-syscall-tp-fields.c
index eb99a105..4344fe4 100644
--- a/tools/perf/tests/openat-syscall-tp-fields.c
+++ b/tools/perf/tests/openat-syscall-tp-fields.c
@@ -44,7 +44,7 @@
 		goto out_delete_evlist;
 	}
 
-	perf_evsel__config(evsel, &opts);
+	perf_evsel__config(evsel, &opts, NULL);
 
 	thread_map__set_pid(evlist->threads, 0, getpid());
 
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
index 1cc78ce..b836ee6a 100644
--- a/tools/perf/tests/perf-record.c
+++ b/tools/perf/tests/perf-record.c
@@ -99,7 +99,7 @@
 	perf_evsel__set_sample_bit(evsel, CPU);
 	perf_evsel__set_sample_bit(evsel, TID);
 	perf_evsel__set_sample_bit(evsel, TIME);
-	perf_evlist__config(evlist, &opts);
+	perf_evlist__config(evlist, &opts, NULL);
 
 	err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
 	if (err < 0) {
diff --git a/tools/perf/tests/perf-targz-src-pkg b/tools/perf/tests/perf-targz-src-pkg
index 238aa39..f2d9c5f 100755
--- a/tools/perf/tests/perf-targz-src-pkg
+++ b/tools/perf/tests/perf-targz-src-pkg
@@ -15,7 +15,7 @@
 tar xf ${TARBALL} -C $TMP_DEST
 rm -f ${TARBALL}
 cd - > /dev/null
-make -C $TMP_DEST/perf*/tools/perf > /dev/null 2>&1
+make -C $TMP_DEST/perf*/tools/perf > /dev/null
 RC=$?
 rm -rf ${TMP_DEST}
 exit $RC
diff --git a/tools/perf/tests/switch-tracking.c b/tools/perf/tests/switch-tracking.c
index ebd8016..39a689b 100644
--- a/tools/perf/tests/switch-tracking.c
+++ b/tools/perf/tests/switch-tracking.c
@@ -417,7 +417,7 @@
 	perf_evsel__set_sample_bit(tracking_evsel, TIME);
 
 	/* Config events */
-	perf_evlist__config(evlist, &opts);
+	perf_evlist__config(evlist, &opts, NULL);
 
 	/* Check moved event is still at the front */
 	if (cycles_evsel != perf_evlist__first(evlist)) {
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index 82b2b5e..c57e72c 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -85,6 +85,8 @@
 int test__synthesize_stat(int subtest);
 int test__synthesize_stat_round(int subtest);
 int test__event_update(int subtest);
+int test__event_times(int subtest);
+int test__backward_ring_buffer(int subtest);
 
 #if defined(__arm__) || defined(__aarch64__)
 #ifdef HAVE_DWARF_UNWIND_SUPPORT
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index 630b0b4..e63abab 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -54,8 +54,14 @@
 	 * Step 3:
 	 *
 	 * Load and split /proc/kallsyms into multiple maps, one per module.
+	 * Do not use kcore, as this test was designed before kcore support
+	 * and has parts that only make sense if using the non-kcore code.
+	 * XXX: extend it to stress the kcorre code as well, hint: the list
+	 * of modules extracted from /proc/kcore, in its current form, can't
+	 * be compacted against the list of modules found in the "vmlinux"
+	 * code and with the one got from /proc/modules from the "kallsyms" code.
 	 */
-	if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) {
+	if (__machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, true, NULL) <= 0) {
 		pr_debug("dso__load_kallsyms ");
 		goto out;
 	}
@@ -157,6 +163,9 @@
 
 					pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
 						 mem_start, sym->name, pair->name);
+				} else {
+					pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
+						 mem_start, sym->name, first_pair->name);
 				}
 			}
 		} else
diff --git a/tools/perf/trace/beauty/eventfd.c b/tools/perf/trace/beauty/eventfd.c
new file mode 100644
index 0000000..d64f4a9
--- /dev/null
+++ b/tools/perf/trace/beauty/eventfd.c
@@ -0,0 +1,38 @@
+#include <sys/eventfd.h>
+
+#ifndef EFD_SEMAPHORE
+#define EFD_SEMAPHORE		1
+#endif
+
+#ifndef EFD_NONBLOCK
+#define EFD_NONBLOCK		00004000
+#endif
+
+#ifndef EFD_CLOEXEC
+#define EFD_CLOEXEC		02000000
+#endif
+
+static size_t syscall_arg__scnprintf_eventfd_flags(char *bf, size_t size, struct syscall_arg *arg)
+{
+	int printed = 0, flags = arg->val;
+
+	if (flags == 0)
+		return scnprintf(bf, size, "NONE");
+#define	P_FLAG(n) \
+	if (flags & EFD_##n) { \
+		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+		flags &= ~EFD_##n; \
+	}
+
+	P_FLAG(SEMAPHORE);
+	P_FLAG(CLOEXEC);
+	P_FLAG(NONBLOCK);
+#undef P_FLAG
+
+	if (flags)
+		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
+
+	return printed;
+}
+
+#define SCA_EFD_FLAGS syscall_arg__scnprintf_eventfd_flags
diff --git a/tools/perf/trace/beauty/flock.c b/tools/perf/trace/beauty/flock.c
new file mode 100644
index 0000000..021bb48
--- /dev/null
+++ b/tools/perf/trace/beauty/flock.c
@@ -0,0 +1,31 @@
+
+static size_t syscall_arg__scnprintf_flock(char *bf, size_t size,
+					   struct syscall_arg *arg)
+{
+	int printed = 0, op = arg->val;
+
+	if (op == 0)
+		return scnprintf(bf, size, "NONE");
+#define	P_CMD(cmd) \
+	if ((op & LOCK_##cmd) == LOCK_##cmd) { \
+		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #cmd); \
+		op &= ~LOCK_##cmd; \
+	}
+
+	P_CMD(SH);
+	P_CMD(EX);
+	P_CMD(NB);
+	P_CMD(UN);
+	P_CMD(MAND);
+	P_CMD(RW);
+	P_CMD(READ);
+	P_CMD(WRITE);
+#undef P_OP
+
+	if (op)
+		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", op);
+
+	return printed;
+}
+
+#define SCA_FLOCK syscall_arg__scnprintf_flock
diff --git a/tools/perf/trace/beauty/futex_op.c b/tools/perf/trace/beauty/futex_op.c
new file mode 100644
index 0000000..e247621
--- /dev/null
+++ b/tools/perf/trace/beauty/futex_op.c
@@ -0,0 +1,44 @@
+#include <linux/futex.h>
+
+static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, struct syscall_arg *arg)
+{
+	enum syscall_futex_args {
+		SCF_UADDR   = (1 << 0),
+		SCF_OP	    = (1 << 1),
+		SCF_VAL	    = (1 << 2),
+		SCF_TIMEOUT = (1 << 3),
+		SCF_UADDR2  = (1 << 4),
+		SCF_VAL3    = (1 << 5),
+	};
+	int op = arg->val;
+	int cmd = op & FUTEX_CMD_MASK;
+	size_t printed = 0;
+
+	switch (cmd) {
+#define	P_FUTEX_OP(n) case FUTEX_##n: printed = scnprintf(bf, size, #n);
+	P_FUTEX_OP(WAIT);	    arg->mask |= SCF_VAL3|SCF_UADDR2;		  break;
+	P_FUTEX_OP(WAKE);	    arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
+	P_FUTEX_OP(FD);		    arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
+	P_FUTEX_OP(REQUEUE);	    arg->mask |= SCF_VAL3|SCF_TIMEOUT;	          break;
+	P_FUTEX_OP(CMP_REQUEUE);    arg->mask |= SCF_TIMEOUT;			  break;
+	P_FUTEX_OP(CMP_REQUEUE_PI); arg->mask |= SCF_TIMEOUT;			  break;
+	P_FUTEX_OP(WAKE_OP);							  break;
+	P_FUTEX_OP(LOCK_PI);	    arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
+	P_FUTEX_OP(UNLOCK_PI);	    arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
+	P_FUTEX_OP(TRYLOCK_PI);	    arg->mask |= SCF_VAL3|SCF_UADDR2;		  break;
+	P_FUTEX_OP(WAIT_BITSET);    arg->mask |= SCF_UADDR2;			  break;
+	P_FUTEX_OP(WAKE_BITSET);    arg->mask |= SCF_UADDR2;			  break;
+	P_FUTEX_OP(WAIT_REQUEUE_PI);						  break;
+	default: printed = scnprintf(bf, size, "%#x", cmd);			  break;
+	}
+
+	if (op & FUTEX_PRIVATE_FLAG)
+		printed += scnprintf(bf + printed, size - printed, "|PRIV");
+
+	if (op & FUTEX_CLOCK_REALTIME)
+		printed += scnprintf(bf + printed, size - printed, "|CLKRT");
+
+	return printed;
+}
+
+#define SCA_FUTEX_OP  syscall_arg__scnprintf_futex_op
diff --git a/tools/perf/trace/beauty/mmap.c b/tools/perf/trace/beauty/mmap.c
new file mode 100644
index 0000000..3444a4d
--- /dev/null
+++ b/tools/perf/trace/beauty/mmap.c
@@ -0,0 +1,158 @@
+#include <sys/mman.h>
+
+static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size,
+					       struct syscall_arg *arg)
+{
+	int printed = 0, prot = arg->val;
+
+	if (prot == PROT_NONE)
+		return scnprintf(bf, size, "NONE");
+#define	P_MMAP_PROT(n) \
+	if (prot & PROT_##n) { \
+		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+		prot &= ~PROT_##n; \
+	}
+
+	P_MMAP_PROT(EXEC);
+	P_MMAP_PROT(READ);
+	P_MMAP_PROT(WRITE);
+#ifdef PROT_SEM
+	P_MMAP_PROT(SEM);
+#endif
+	P_MMAP_PROT(GROWSDOWN);
+	P_MMAP_PROT(GROWSUP);
+#undef P_MMAP_PROT
+
+	if (prot)
+		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", prot);
+
+	return printed;
+}
+
+#define SCA_MMAP_PROT syscall_arg__scnprintf_mmap_prot
+
+#ifndef MAP_STACK
+# define MAP_STACK		0x20000
+#endif
+
+static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
+						struct syscall_arg *arg)
+{
+	int printed = 0, flags = arg->val;
+
+#define	P_MMAP_FLAG(n) \
+	if (flags & MAP_##n) { \
+		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+		flags &= ~MAP_##n; \
+	}
+
+	P_MMAP_FLAG(SHARED);
+	P_MMAP_FLAG(PRIVATE);
+#ifdef MAP_32BIT
+	P_MMAP_FLAG(32BIT);
+#endif
+	P_MMAP_FLAG(ANONYMOUS);
+	P_MMAP_FLAG(DENYWRITE);
+	P_MMAP_FLAG(EXECUTABLE);
+	P_MMAP_FLAG(FILE);
+	P_MMAP_FLAG(FIXED);
+	P_MMAP_FLAG(GROWSDOWN);
+#ifdef MAP_HUGETLB
+	P_MMAP_FLAG(HUGETLB);
+#endif
+	P_MMAP_FLAG(LOCKED);
+	P_MMAP_FLAG(NONBLOCK);
+	P_MMAP_FLAG(NORESERVE);
+	P_MMAP_FLAG(POPULATE);
+	P_MMAP_FLAG(STACK);
+#ifdef MAP_UNINITIALIZED
+	P_MMAP_FLAG(UNINITIALIZED);
+#endif
+#undef P_MMAP_FLAG
+
+	if (flags)
+		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
+
+	return printed;
+}
+
+#define SCA_MMAP_FLAGS syscall_arg__scnprintf_mmap_flags
+
+static size_t syscall_arg__scnprintf_mremap_flags(char *bf, size_t size,
+						  struct syscall_arg *arg)
+{
+	int printed = 0, flags = arg->val;
+
+#define P_MREMAP_FLAG(n) \
+	if (flags & MREMAP_##n) { \
+		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+		flags &= ~MREMAP_##n; \
+	}
+
+	P_MREMAP_FLAG(MAYMOVE);
+#ifdef MREMAP_FIXED
+	P_MREMAP_FLAG(FIXED);
+#endif
+#undef P_MREMAP_FLAG
+
+	if (flags)
+		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
+
+	return printed;
+}
+
+#define SCA_MREMAP_FLAGS syscall_arg__scnprintf_mremap_flags
+
+#ifndef MADV_HWPOISON
+#define MADV_HWPOISON		100
+#endif
+
+#ifndef MADV_MERGEABLE
+#define MADV_MERGEABLE		 12
+#endif
+
+#ifndef MADV_UNMERGEABLE
+#define MADV_UNMERGEABLE	 13
+#endif
+
+static size_t syscall_arg__scnprintf_madvise_behavior(char *bf, size_t size,
+						      struct syscall_arg *arg)
+{
+	int behavior = arg->val;
+
+	switch (behavior) {
+#define	P_MADV_BHV(n) case MADV_##n: return scnprintf(bf, size, #n)
+	P_MADV_BHV(NORMAL);
+	P_MADV_BHV(RANDOM);
+	P_MADV_BHV(SEQUENTIAL);
+	P_MADV_BHV(WILLNEED);
+	P_MADV_BHV(DONTNEED);
+	P_MADV_BHV(REMOVE);
+	P_MADV_BHV(DONTFORK);
+	P_MADV_BHV(DOFORK);
+	P_MADV_BHV(HWPOISON);
+#ifdef MADV_SOFT_OFFLINE
+	P_MADV_BHV(SOFT_OFFLINE);
+#endif
+	P_MADV_BHV(MERGEABLE);
+	P_MADV_BHV(UNMERGEABLE);
+#ifdef MADV_HUGEPAGE
+	P_MADV_BHV(HUGEPAGE);
+#endif
+#ifdef MADV_NOHUGEPAGE
+	P_MADV_BHV(NOHUGEPAGE);
+#endif
+#ifdef MADV_DONTDUMP
+	P_MADV_BHV(DONTDUMP);
+#endif
+#ifdef MADV_DODUMP
+	P_MADV_BHV(DODUMP);
+#endif
+#undef P_MADV_PHV
+	default: break;
+	}
+
+	return scnprintf(bf, size, "%#x", behavior);
+}
+
+#define SCA_MADV_BHV syscall_arg__scnprintf_madvise_behavior
diff --git a/tools/perf/trace/beauty/mode_t.c b/tools/perf/trace/beauty/mode_t.c
new file mode 100644
index 0000000..930d8fe
--- /dev/null
+++ b/tools/perf/trace/beauty/mode_t.c
@@ -0,0 +1,68 @@
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+/* From include/linux/stat.h */
+#ifndef S_IRWXUGO
+#define S_IRWXUGO	(S_IRWXU|S_IRWXG|S_IRWXO)
+#endif
+#ifndef S_IALLUGO
+#define S_IALLUGO	(S_ISUID|S_ISGID|S_ISVTX|S_IRWXUGO)
+#endif
+#ifndef S_IRUGO
+#define S_IRUGO         (S_IRUSR|S_IRGRP|S_IROTH)
+#endif
+#ifndef S_IWUGO
+#define S_IWUGO         (S_IWUSR|S_IWGRP|S_IWOTH)
+#endif
+#ifndef S_IXUGO
+#define S_IXUGO         (S_IXUSR|S_IXGRP|S_IXOTH)
+#endif
+
+static size_t syscall_arg__scnprintf_mode_t(char *bf, size_t size, struct syscall_arg *arg)
+{
+	int printed = 0, mode = arg->val;
+
+#define	P_MODE(n) \
+	if ((mode & S_##n) == S_##n) { \
+		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+		mode &= ~S_##n; \
+	}
+
+	P_MODE(IALLUGO);
+	P_MODE(IRWXUGO);
+	P_MODE(IRUGO);
+	P_MODE(IWUGO);
+	P_MODE(IXUGO);
+	P_MODE(IFMT);
+	P_MODE(IFSOCK);
+	P_MODE(IFLNK);
+	P_MODE(IFREG);
+	P_MODE(IFBLK);
+	P_MODE(IFDIR);
+	P_MODE(IFCHR);
+	P_MODE(IFIFO);
+	P_MODE(ISUID);
+	P_MODE(ISGID);
+	P_MODE(ISVTX);
+	P_MODE(IRWXU);
+	P_MODE(IRUSR);
+	P_MODE(IWUSR);
+	P_MODE(IXUSR);
+	P_MODE(IRWXG);
+	P_MODE(IRGRP);
+	P_MODE(IWGRP);
+	P_MODE(IXGRP);
+	P_MODE(IRWXO);
+	P_MODE(IROTH);
+	P_MODE(IWOTH);
+	P_MODE(IXOTH);
+#undef P_MODE
+
+	if (mode)
+		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", mode);
+
+	return printed;
+}
+
+#define SCA_MODE_T syscall_arg__scnprintf_mode_t
diff --git a/tools/perf/trace/beauty/msg_flags.c b/tools/perf/trace/beauty/msg_flags.c
new file mode 100644
index 0000000..07fa8a0
--- /dev/null
+++ b/tools/perf/trace/beauty/msg_flags.c
@@ -0,0 +1,62 @@
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#ifndef MSG_PROBE
+#define MSG_PROBE		     0x10
+#endif
+#ifndef MSG_WAITFORONE
+#define MSG_WAITFORONE		   0x10000
+#endif
+#ifndef MSG_SENDPAGE_NOTLAST
+#define MSG_SENDPAGE_NOTLAST	   0x20000
+#endif
+#ifndef MSG_FASTOPEN
+#define MSG_FASTOPEN		0x20000000
+#endif
+#ifndef MSG_CMSG_CLOEXEC
+# define MSG_CMSG_CLOEXEC	0x40000000
+#endif
+
+static size_t syscall_arg__scnprintf_msg_flags(char *bf, size_t size,
+					       struct syscall_arg *arg)
+{
+	int printed = 0, flags = arg->val;
+
+	if (flags == 0)
+		return scnprintf(bf, size, "NONE");
+#define	P_MSG_FLAG(n) \
+	if (flags & MSG_##n) { \
+		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+		flags &= ~MSG_##n; \
+	}
+
+	P_MSG_FLAG(OOB);
+	P_MSG_FLAG(PEEK);
+	P_MSG_FLAG(DONTROUTE);
+	P_MSG_FLAG(TRYHARD);
+	P_MSG_FLAG(CTRUNC);
+	P_MSG_FLAG(PROBE);
+	P_MSG_FLAG(TRUNC);
+	P_MSG_FLAG(DONTWAIT);
+	P_MSG_FLAG(EOR);
+	P_MSG_FLAG(WAITALL);
+	P_MSG_FLAG(FIN);
+	P_MSG_FLAG(SYN);
+	P_MSG_FLAG(CONFIRM);
+	P_MSG_FLAG(RST);
+	P_MSG_FLAG(ERRQUEUE);
+	P_MSG_FLAG(NOSIGNAL);
+	P_MSG_FLAG(MORE);
+	P_MSG_FLAG(WAITFORONE);
+	P_MSG_FLAG(SENDPAGE_NOTLAST);
+	P_MSG_FLAG(FASTOPEN);
+	P_MSG_FLAG(CMSG_CLOEXEC);
+#undef P_MSG_FLAG
+
+	if (flags)
+		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
+
+	return printed;
+}
+
+#define SCA_MSG_FLAGS syscall_arg__scnprintf_msg_flags
diff --git a/tools/perf/trace/beauty/open_flags.c b/tools/perf/trace/beauty/open_flags.c
new file mode 100644
index 0000000..0f3679e
--- /dev/null
+++ b/tools/perf/trace/beauty/open_flags.c
@@ -0,0 +1,56 @@
+
+static size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size,
+					       struct syscall_arg *arg)
+{
+	int printed = 0, flags = arg->val;
+
+	if (!(flags & O_CREAT))
+		arg->mask |= 1 << (arg->idx + 1); /* Mask the mode parm */
+
+	if (flags == 0)
+		return scnprintf(bf, size, "RDONLY");
+#define	P_FLAG(n) \
+	if (flags & O_##n) { \
+		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+		flags &= ~O_##n; \
+	}
+
+	P_FLAG(APPEND);
+	P_FLAG(ASYNC);
+	P_FLAG(CLOEXEC);
+	P_FLAG(CREAT);
+	P_FLAG(DIRECT);
+	P_FLAG(DIRECTORY);
+	P_FLAG(EXCL);
+	P_FLAG(LARGEFILE);
+	P_FLAG(NOATIME);
+	P_FLAG(NOCTTY);
+#ifdef O_NONBLOCK
+	P_FLAG(NONBLOCK);
+#elif O_NDELAY
+	P_FLAG(NDELAY);
+#endif
+#ifdef O_PATH
+	P_FLAG(PATH);
+#endif
+	P_FLAG(RDWR);
+#ifdef O_DSYNC
+	if ((flags & O_SYNC) == O_SYNC)
+		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", "SYNC");
+	else {
+		P_FLAG(DSYNC);
+	}
+#else
+	P_FLAG(SYNC);
+#endif
+	P_FLAG(TRUNC);
+	P_FLAG(WRONLY);
+#undef P_FLAG
+
+	if (flags)
+		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
+
+	return printed;
+}
+
+#define SCA_OPEN_FLAGS syscall_arg__scnprintf_open_flags
diff --git a/tools/perf/trace/beauty/perf_event_open.c b/tools/perf/trace/beauty/perf_event_open.c
new file mode 100644
index 0000000..311f09d
--- /dev/null
+++ b/tools/perf/trace/beauty/perf_event_open.c
@@ -0,0 +1,43 @@
+#ifndef PERF_FLAG_FD_NO_GROUP
+# define PERF_FLAG_FD_NO_GROUP		(1UL << 0)
+#endif
+
+#ifndef PERF_FLAG_FD_OUTPUT
+# define PERF_FLAG_FD_OUTPUT		(1UL << 1)
+#endif
+
+#ifndef PERF_FLAG_PID_CGROUP
+# define PERF_FLAG_PID_CGROUP		(1UL << 2) /* pid=cgroup id, per-cpu mode only */
+#endif
+
+#ifndef PERF_FLAG_FD_CLOEXEC
+# define PERF_FLAG_FD_CLOEXEC		(1UL << 3) /* O_CLOEXEC */
+#endif
+
+static size_t syscall_arg__scnprintf_perf_flags(char *bf, size_t size,
+						struct syscall_arg *arg)
+{
+	int printed = 0, flags = arg->val;
+
+	if (flags == 0)
+		return 0;
+
+#define	P_FLAG(n) \
+	if (flags & PERF_FLAG_##n) { \
+		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+		flags &= ~PERF_FLAG_##n; \
+	}
+
+	P_FLAG(FD_NO_GROUP);
+	P_FLAG(FD_OUTPUT);
+	P_FLAG(PID_CGROUP);
+	P_FLAG(FD_CLOEXEC);
+#undef P_FLAG
+
+	if (flags)
+		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
+
+	return printed;
+}
+
+#define SCA_PERF_FLAGS syscall_arg__scnprintf_perf_flags
diff --git a/tools/perf/trace/beauty/pid.c b/tools/perf/trace/beauty/pid.c
new file mode 100644
index 0000000..07486ea
--- /dev/null
+++ b/tools/perf/trace/beauty/pid.c
@@ -0,0 +1,21 @@
+static size_t syscall_arg__scnprintf_pid(char *bf, size_t size, struct syscall_arg *arg)
+{
+	int pid = arg->val;
+	struct trace *trace = arg->trace;
+	size_t printed = scnprintf(bf, size, "%d", pid);
+	struct thread *thread = machine__findnew_thread(trace->host, pid, pid);
+
+	if (thread != NULL) {
+		if (!thread->comm_set)
+			thread__set_comm_from_proc(thread);
+
+		if (thread->comm_set)
+			printed += scnprintf(bf + printed, size - printed,
+					     " (%s)", thread__comm_str(thread));
+		thread__put(thread);
+	}
+
+	return printed;
+}
+
+#define SCA_PID syscall_arg__scnprintf_pid
diff --git a/tools/perf/trace/beauty/sched_policy.c b/tools/perf/trace/beauty/sched_policy.c
new file mode 100644
index 0000000..c205bc6
--- /dev/null
+++ b/tools/perf/trace/beauty/sched_policy.c
@@ -0,0 +1,44 @@
+#include <sched.h>
+
+/*
+ * Not defined anywhere else, probably, just to make sure we
+ * catch future flags
+ */
+#define SCHED_POLICY_MASK 0xff
+
+#ifndef SCHED_DEADLINE
+#define SCHED_DEADLINE 6
+#endif
+
+static size_t syscall_arg__scnprintf_sched_policy(char *bf, size_t size,
+						  struct syscall_arg *arg)
+{
+	const char *policies[] = {
+		"NORMAL", "FIFO", "RR", "BATCH", "ISO", "IDLE", "DEADLINE",
+	};
+	size_t printed;
+	int policy = arg->val,
+	    flags = policy & ~SCHED_POLICY_MASK;
+
+	policy &= SCHED_POLICY_MASK;
+	if (policy <= SCHED_DEADLINE)
+		printed = scnprintf(bf, size, "%s", policies[policy]);
+	else
+		printed = scnprintf(bf, size, "%#x", policy);
+
+#define	P_POLICY_FLAG(n) \
+	if (flags & SCHED_##n) { \
+		printed += scnprintf(bf + printed, size - printed, "|%s", #n); \
+		flags &= ~SCHED_##n; \
+	}
+
+	P_POLICY_FLAG(RESET_ON_FORK);
+#undef P_POLICY_FLAG
+
+	if (flags)
+		printed += scnprintf(bf + printed, size - printed, "|%#x", flags);
+
+	return printed;
+}
+
+#define SCA_SCHED_POLICY syscall_arg__scnprintf_sched_policy
diff --git a/tools/perf/trace/beauty/seccomp.c b/tools/perf/trace/beauty/seccomp.c
new file mode 100644
index 0000000..213c5a7
--- /dev/null
+++ b/tools/perf/trace/beauty/seccomp.c
@@ -0,0 +1,52 @@
+#include <linux/seccomp.h>
+
+#ifndef SECCOMP_SET_MODE_STRICT
+#define SECCOMP_SET_MODE_STRICT 0
+#endif
+#ifndef SECCOMP_SET_MODE_FILTER
+#define SECCOMP_SET_MODE_FILTER 1
+#endif
+
+static size_t syscall_arg__scnprintf_seccomp_op(char *bf, size_t size, struct syscall_arg *arg)
+{
+	int op = arg->val;
+	size_t printed = 0;
+
+	switch (op) {
+#define	P_SECCOMP_SET_MODE_OP(n) case SECCOMP_SET_MODE_##n: printed = scnprintf(bf, size, #n); break
+	P_SECCOMP_SET_MODE_OP(STRICT);
+	P_SECCOMP_SET_MODE_OP(FILTER);
+#undef P_SECCOMP_SET_MODE_OP
+	default: printed = scnprintf(bf, size, "%#x", op);			  break;
+	}
+
+	return printed;
+}
+
+#define SCA_SECCOMP_OP  syscall_arg__scnprintf_seccomp_op
+
+#ifndef SECCOMP_FILTER_FLAG_TSYNC
+#define SECCOMP_FILTER_FLAG_TSYNC 1
+#endif
+
+static size_t syscall_arg__scnprintf_seccomp_flags(char *bf, size_t size,
+						   struct syscall_arg *arg)
+{
+	int printed = 0, flags = arg->val;
+
+#define	P_FLAG(n) \
+	if (flags & SECCOMP_FILTER_FLAG_##n) { \
+		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+		flags &= ~SECCOMP_FILTER_FLAG_##n; \
+	}
+
+	P_FLAG(TSYNC);
+#undef P_FLAG
+
+	if (flags)
+		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
+
+	return printed;
+}
+
+#define SCA_SECCOMP_FLAGS syscall_arg__scnprintf_seccomp_flags
diff --git a/tools/perf/trace/beauty/signum.c b/tools/perf/trace/beauty/signum.c
new file mode 100644
index 0000000..d3b0b1f
--- /dev/null
+++ b/tools/perf/trace/beauty/signum.c
@@ -0,0 +1,53 @@
+
+static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscall_arg *arg)
+{
+	int sig = arg->val;
+
+	switch (sig) {
+#define	P_SIGNUM(n) case SIG##n: return scnprintf(bf, size, #n)
+	P_SIGNUM(HUP);
+	P_SIGNUM(INT);
+	P_SIGNUM(QUIT);
+	P_SIGNUM(ILL);
+	P_SIGNUM(TRAP);
+	P_SIGNUM(ABRT);
+	P_SIGNUM(BUS);
+	P_SIGNUM(FPE);
+	P_SIGNUM(KILL);
+	P_SIGNUM(USR1);
+	P_SIGNUM(SEGV);
+	P_SIGNUM(USR2);
+	P_SIGNUM(PIPE);
+	P_SIGNUM(ALRM);
+	P_SIGNUM(TERM);
+	P_SIGNUM(CHLD);
+	P_SIGNUM(CONT);
+	P_SIGNUM(STOP);
+	P_SIGNUM(TSTP);
+	P_SIGNUM(TTIN);
+	P_SIGNUM(TTOU);
+	P_SIGNUM(URG);
+	P_SIGNUM(XCPU);
+	P_SIGNUM(XFSZ);
+	P_SIGNUM(VTALRM);
+	P_SIGNUM(PROF);
+	P_SIGNUM(WINCH);
+	P_SIGNUM(IO);
+	P_SIGNUM(PWR);
+	P_SIGNUM(SYS);
+#ifdef SIGEMT
+	P_SIGNUM(EMT);
+#endif
+#ifdef SIGSTKFLT
+	P_SIGNUM(STKFLT);
+#endif
+#ifdef SIGSWI
+	P_SIGNUM(SWI);
+#endif
+	default: break;
+	}
+
+	return scnprintf(bf, size, "%#x", sig);
+}
+
+#define SCA_SIGNUM syscall_arg__scnprintf_signum
diff --git a/tools/perf/trace/beauty/socket_type.c b/tools/perf/trace/beauty/socket_type.c
new file mode 100644
index 0000000..0a5ce81
--- /dev/null
+++ b/tools/perf/trace/beauty/socket_type.c
@@ -0,0 +1,60 @@
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#ifndef SOCK_DCCP
+# define SOCK_DCCP		6
+#endif
+
+#ifndef SOCK_CLOEXEC
+# define SOCK_CLOEXEC		02000000
+#endif
+
+#ifndef SOCK_NONBLOCK
+# define SOCK_NONBLOCK		00004000
+#endif
+
+#ifndef SOCK_TYPE_MASK
+#define SOCK_TYPE_MASK 0xf
+#endif
+
+static size_t syscall_arg__scnprintf_socket_type(char *bf, size_t size, struct syscall_arg *arg)
+{
+	size_t printed;
+	int type = arg->val,
+	    flags = type & ~SOCK_TYPE_MASK;
+
+	type &= SOCK_TYPE_MASK;
+	/*
+	 * Can't use a strarray, MIPS may override for ABI reasons.
+	 */
+	switch (type) {
+#define	P_SK_TYPE(n) case SOCK_##n: printed = scnprintf(bf, size, #n); break;
+	P_SK_TYPE(STREAM);
+	P_SK_TYPE(DGRAM);
+	P_SK_TYPE(RAW);
+	P_SK_TYPE(RDM);
+	P_SK_TYPE(SEQPACKET);
+	P_SK_TYPE(DCCP);
+	P_SK_TYPE(PACKET);
+#undef P_SK_TYPE
+	default:
+		printed = scnprintf(bf, size, "%#x", type);
+	}
+
+#define	P_SK_FLAG(n) \
+	if (flags & SOCK_##n) { \
+		printed += scnprintf(bf + printed, size - printed, "|%s", #n); \
+		flags &= ~SOCK_##n; \
+	}
+
+	P_SK_FLAG(CLOEXEC);
+	P_SK_FLAG(NONBLOCK);
+#undef P_SK_FLAG
+
+	if (flags)
+		printed += scnprintf(bf + printed, size - printed, "|%#x", flags);
+
+	return printed;
+}
+
+#define SCA_SK_TYPE syscall_arg__scnprintf_socket_type
diff --git a/tools/perf/trace/beauty/waitid_options.c b/tools/perf/trace/beauty/waitid_options.c
new file mode 100644
index 0000000..7942724
--- /dev/null
+++ b/tools/perf/trace/beauty/waitid_options.c
@@ -0,0 +1,26 @@
+#include <sys/types.h>
+#include <sys/wait.h>
+
+static size_t syscall_arg__scnprintf_waitid_options(char *bf, size_t size,
+						    struct syscall_arg *arg)
+{
+	int printed = 0, options = arg->val;
+
+#define	P_OPTION(n) \
+	if (options & W##n) { \
+		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+		options &= ~W##n; \
+	}
+
+	P_OPTION(NOHANG);
+	P_OPTION(UNTRACED);
+	P_OPTION(CONTINUED);
+#undef P_OPTION
+
+	if (options)
+		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", options);
+
+	return printed;
+}
+
+#define SCA_WAITID_OPTIONS syscall_arg__scnprintf_waitid_options
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 4b98165..538bae8 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -337,7 +337,7 @@
 	chain = list_entry(node->val.next, struct callchain_list, list);
 	chain->has_children = has_sibling;
 
-	if (node->val.next != node->val.prev) {
+	if (!list_empty(&node->val)) {
 		chain = list_entry(node->val.prev, struct callchain_list, list);
 		chain->has_children = !RB_EMPTY_ROOT(&node->rb_root);
 	}
@@ -1607,9 +1607,8 @@
 
 			ret = fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists));
 			dummy_hpp.buf[ret] = '\0';
-			rtrim(dummy_hpp.buf);
 
-			start = ltrim(dummy_hpp.buf);
+			start = trim(dummy_hpp.buf);
 			ret = strlen(start);
 
 			if (start != dummy_hpp.buf)
@@ -1897,11 +1896,10 @@
 	bool first = true;
 	int ret;
 
-	if (symbol_conf.use_callchain)
+	if (symbol_conf.use_callchain) {
 		folded_sign = hist_entry__folded(he);
-
-	if (symbol_conf.use_callchain)
 		printed += fprintf(fp, "%c ", folded_sign);
+	}
 
 	hists__for_each_format(browser->hists, fmt) {
 		if (perf_hpp__should_skip(fmt, he->hists))
@@ -2137,7 +2135,7 @@
 		printed += snprintf(bf + printed, size - printed,
 				    ", UID: %s", hists->uid_filter_str);
 	if (thread) {
-		if (sort__has_thread) {
+		if (hists__has(hists, thread)) {
 			printed += scnprintf(bf + printed, size - printed,
 				    ", Thread: %s(%d)",
 				     (thread->comm_set ? thread__comm_str(thread) : ""),
@@ -2322,7 +2320,8 @@
 {
 	struct thread *thread = act->thread;
 
-	if ((!sort__has_thread && !sort__has_comm) || thread == NULL)
+	if ((!hists__has(browser->hists, thread) &&
+	     !hists__has(browser->hists, comm)) || thread == NULL)
 		return 0;
 
 	if (browser->hists->thread_filter) {
@@ -2331,7 +2330,7 @@
 		thread__zput(browser->hists->thread_filter);
 		ui_helpline__pop();
 	} else {
-		if (sort__has_thread) {
+		if (hists__has(browser->hists, thread)) {
 			ui_helpline__fpush("To zoom out press ESC or ENTER + \"Zoom out of %s(%d) thread\"",
 					   thread->comm_set ? thread__comm_str(thread) : "",
 					   thread->tid);
@@ -2356,10 +2355,11 @@
 {
 	int ret;
 
-	if ((!sort__has_thread && !sort__has_comm) || thread == NULL)
+	if ((!hists__has(browser->hists, thread) &&
+	     !hists__has(browser->hists, comm)) || thread == NULL)
 		return 0;
 
-	if (sort__has_thread) {
+	if (hists__has(browser->hists, thread)) {
 		ret = asprintf(optstr, "Zoom %s %s(%d) thread",
 			       browser->hists->thread_filter ? "out of" : "into",
 			       thread->comm_set ? thread__comm_str(thread) : "",
@@ -2382,7 +2382,7 @@
 {
 	struct map *map = act->ms.map;
 
-	if (!sort__has_dso || map == NULL)
+	if (!hists__has(browser->hists, dso) || map == NULL)
 		return 0;
 
 	if (browser->hists->dso_filter) {
@@ -2409,7 +2409,7 @@
 add_dso_opt(struct hist_browser *browser, struct popup_action *act,
 	    char **optstr, struct map *map)
 {
-	if (!sort__has_dso || map == NULL)
+	if (!hists__has(browser->hists, dso) || map == NULL)
 		return 0;
 
 	if (asprintf(optstr, "Zoom %s %s DSO",
@@ -2431,10 +2431,10 @@
 }
 
 static int
-add_map_opt(struct hist_browser *browser __maybe_unused,
+add_map_opt(struct hist_browser *browser,
 	    struct popup_action *act, char **optstr, struct map *map)
 {
-	if (!sort__has_dso || map == NULL)
+	if (!hists__has(browser->hists, dso) || map == NULL)
 		return 0;
 
 	if (asprintf(optstr, "Browse map details") < 0)
@@ -2536,7 +2536,7 @@
 static int
 do_zoom_socket(struct hist_browser *browser, struct popup_action *act)
 {
-	if (!sort__has_socket || act->socket < 0)
+	if (!hists__has(browser->hists, socket) || act->socket < 0)
 		return 0;
 
 	if (browser->hists->socket_filter > -1) {
@@ -2558,7 +2558,7 @@
 add_socket_opt(struct hist_browser *browser, struct popup_action *act,
 	       char **optstr, int socket_id)
 {
-	if (!sort__has_socket || socket_id < 0)
+	if (!hists__has(browser->hists, socket) || socket_id < 0)
 		return 0;
 
 	if (asprintf(optstr, "Zoom %s Processor Socket %d",
@@ -2749,7 +2749,7 @@
 			 */
 			goto out_free_stack;
 		case 'a':
-			if (!sort__has_sym) {
+			if (!hists__has(hists, sym)) {
 				ui_browser__warning(&browser->b, delay_secs * 2,
 			"Annotation is only available for symbolic views, "
 			"include \"sym*\" in --sort to use it.");
@@ -2912,7 +2912,7 @@
 			continue;
 		}
 
-		if (!sort__has_sym || browser->selection == NULL)
+		if (!hists__has(hists, sym) || browser->selection == NULL)
 			goto skip_annotation;
 
 		if (sort__mode == SORT_MODE__BRANCH) {
@@ -2956,7 +2956,7 @@
 			goto skip_scripting;
 
 		if (browser->he_selection) {
-			if (sort__has_thread && thread) {
+			if (hists__has(hists, thread) && thread) {
 				nr_options += add_script_opt(browser,
 							     &actions[nr_options],
 							     &options[nr_options],
@@ -2971,7 +2971,7 @@
 			 *
 			 * See hist_browser__show_entry.
 			 */
-			if (sort__has_sym && browser->selection->sym) {
+			if (hists__has(hists, sym) && browser->selection->sym) {
 				nr_options += add_script_opt(browser,
 							     &actions[nr_options],
 							     &options[nr_options],
diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c
index 2aa45b6..932adfa 100644
--- a/tools/perf/ui/gtk/hists.c
+++ b/tools/perf/ui/gtk/hists.c
@@ -379,7 +379,7 @@
 			gtk_tree_store_set(store, &iter, col_idx++, s, -1);
 		}
 
-		if (symbol_conf.use_callchain && sort__has_sym) {
+		if (symbol_conf.use_callchain && hists__has(hists, sym)) {
 			if (callchain_param.mode == CHAIN_GRAPH_REL)
 				total = symbol_conf.cumulate_callchain ?
 					h->stat_acc->period : h->stat.period;
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index 3baeaa6..af07ffb 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -635,7 +635,7 @@
 		ret += fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists));
 	}
 
-	if (verbose && sort__has_sym) /* Addr + origin */
+	if (verbose && hists__has(hists, sym)) /* Addr + origin */
 		ret += 3 + BITS_PER_LONG / 4;
 
 	return ret;
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index 7aff5ac..560eb47 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -569,9 +569,8 @@
 			first_col = false;
 
 			fmt->header(fmt, hpp, hists_to_evsel(hists));
-			rtrim(hpp->buf);
 
-			header_width += fprintf(fp, "%s", ltrim(hpp->buf));
+			header_width += fprintf(fp, "%s", trim(hpp->buf));
 		}
 	}
 
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index da48fd8..8c6c8a0 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -8,6 +8,7 @@
 libperf-y += event.o
 libperf-y += evlist.o
 libperf-y += evsel.o
+libperf-y += evsel_fprintf.o
 libperf-y += find_bit.o
 libperf-y += kallsyms.o
 libperf-y += levenshtein.o
@@ -26,9 +27,9 @@
 libperf-y += strfilter.o
 libperf-y += top.o
 libperf-y += usage.o
-libperf-y += wrapper.o
 libperf-y += dso.o
 libperf-y += symbol.o
+libperf-y += symbol_fprintf.o
 libperf-y += color.o
 libperf-y += header.o
 libperf-y += callchain.o
@@ -38,6 +39,7 @@
 libperf-y += map.o
 libperf-y += pstack.o
 libperf-y += session.o
+libperf-$(CONFIG_AUDIT) += syscalltbl.o
 libperf-y += ordered-events.o
 libperf-y += comm.o
 libperf-y += thread.o
@@ -69,9 +71,9 @@
 libperf-y += record.o
 libperf-y += srcline.o
 libperf-y += data.o
-libperf-$(CONFIG_X86) += tsc.o
-libperf-$(CONFIG_AUXTRACE) += tsc.o
+libperf-y += tsc.o
 libperf-y += cloexec.o
+libperf-y += call-path.o
 libperf-y += thread-stack.o
 libperf-$(CONFIG_AUXTRACE) += auxtrace.o
 libperf-$(CONFIG_AUXTRACE) += intel-pt-decoder/
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index b795b69..4db73d5 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -1138,7 +1138,7 @@
 
 	if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
 	    !dso__is_kcore(dso)) {
-		char bf[BUILD_ID_SIZE * 2 + 16] = " with build id ";
+		char bf[SBUILD_ID_SIZE + 15] = " with build id ";
 		char *build_id_msg = NULL;
 
 		if (dso->annotate_warned)
@@ -1665,5 +1665,5 @@
 
 bool ui__has_annotation(void)
 {
-	return use_browser == 1 && sort__has_sym;
+	return use_browser == 1 && perf_hpp_list.sym;
 }
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index ec164fe..c916901 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -940,6 +940,7 @@
 	synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
 	synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
 	synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
+	synth_opts->initial_skip = 0;
 }
 
 /*
@@ -1064,6 +1065,12 @@
 				synth_opts->last_branch_sz = val;
 			}
 			break;
+		case 's':
+			synth_opts->initial_skip = strtoul(p, &endptr, 10);
+			if (p == endptr)
+				goto out_err;
+			p = endptr;
+			break;
 		case ' ':
 		case ',':
 			break;
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index 57ff31e..767989e 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -68,6 +68,7 @@
  * @last_branch_sz: branch context size
  * @period: 'instructions' events period
  * @period_type: 'instructions' events period type
+ * @initial_skip: skip N events at the beginning.
  */
 struct itrace_synth_opts {
 	bool			set;
@@ -86,6 +87,7 @@
 	unsigned int		last_branch_sz;
 	unsigned long long	period;
 	enum itrace_period_type	period_type;
+	unsigned long		initial_skip;
 };
 
 /**
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index 0967ce6..493307d 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -842,6 +842,58 @@
 	return op;
 }
 
+static struct bpf_map_op *
+bpf_map_op__clone(struct bpf_map_op *op)
+{
+	struct bpf_map_op *newop;
+
+	newop = memdup(op, sizeof(*op));
+	if (!newop) {
+		pr_debug("Failed to alloc bpf_map_op\n");
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&newop->list);
+	if (op->key_type == BPF_MAP_KEY_RANGES) {
+		size_t memsz = op->k.array.nr_ranges *
+			       sizeof(op->k.array.ranges[0]);
+
+		newop->k.array.ranges = memdup(op->k.array.ranges, memsz);
+		if (!newop->k.array.ranges) {
+			pr_debug("Failed to alloc indices for map\n");
+			free(newop);
+			return NULL;
+		}
+	}
+
+	return newop;
+}
+
+static struct bpf_map_priv *
+bpf_map_priv__clone(struct bpf_map_priv *priv)
+{
+	struct bpf_map_priv *newpriv;
+	struct bpf_map_op *pos, *newop;
+
+	newpriv = zalloc(sizeof(*newpriv));
+	if (!newpriv) {
+		pr_debug("No enough memory to alloc map private\n");
+		return NULL;
+	}
+	INIT_LIST_HEAD(&newpriv->ops_list);
+
+	list_for_each_entry(pos, &priv->ops_list, list) {
+		newop = bpf_map_op__clone(pos);
+		if (!newop) {
+			bpf_map_priv__purge(newpriv);
+			return NULL;
+		}
+		list_add_tail(&newop->list, &newpriv->ops_list);
+	}
+
+	return newpriv;
+}
+
 static int
 bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
 {
@@ -1417,6 +1469,89 @@
 	return 0;
 }
 
+#define bpf__for_each_map(pos, obj, objtmp)	\
+	bpf_object__for_each_safe(obj, objtmp)	\
+		bpf_map__for_each(pos, obj)
+
+#define bpf__for_each_stdout_map(pos, obj, objtmp)	\
+	bpf__for_each_map(pos, obj, objtmp) 		\
+		if (bpf_map__get_name(pos) && 		\
+			(strcmp("__bpf_stdout__", 	\
+				bpf_map__get_name(pos)) == 0))
+
+int bpf__setup_stdout(struct perf_evlist *evlist __maybe_unused)
+{
+	struct bpf_map_priv *tmpl_priv = NULL;
+	struct bpf_object *obj, *tmp;
+	struct perf_evsel *evsel = NULL;
+	struct bpf_map *map;
+	int err;
+	bool need_init = false;
+
+	bpf__for_each_stdout_map(map, obj, tmp) {
+		struct bpf_map_priv *priv;
+
+		err = bpf_map__get_private(map, (void **)&priv);
+		if (err)
+			return -BPF_LOADER_ERRNO__INTERNAL;
+
+		/*
+		 * No need to check map type: type should have been
+		 * verified by kernel.
+		 */
+		if (!need_init && !priv)
+			need_init = !priv;
+		if (!tmpl_priv && priv)
+			tmpl_priv = priv;
+	}
+
+	if (!need_init)
+		return 0;
+
+	if (!tmpl_priv) {
+		err = parse_events(evlist, "bpf-output/no-inherit=1,name=__bpf_stdout__/",
+				   NULL);
+		if (err) {
+			pr_debug("ERROR: failed to create bpf-output event\n");
+			return -err;
+		}
+
+		evsel = perf_evlist__last(evlist);
+	}
+
+	bpf__for_each_stdout_map(map, obj, tmp) {
+		struct bpf_map_priv *priv;
+
+		err = bpf_map__get_private(map, (void **)&priv);
+		if (err)
+			return -BPF_LOADER_ERRNO__INTERNAL;
+		if (priv)
+			continue;
+
+		if (tmpl_priv) {
+			priv = bpf_map_priv__clone(tmpl_priv);
+			if (!priv)
+				return -ENOMEM;
+
+			err = bpf_map__set_private(map, priv, bpf_map_priv__clear);
+			if (err) {
+				bpf_map_priv__clear(map, priv);
+				return err;
+			}
+		} else if (evsel) {
+			struct bpf_map_op *op;
+
+			op = bpf_map__add_newop(map, NULL);
+			if (IS_ERR(op))
+				return PTR_ERR(op);
+			op->op_type = BPF_MAP_OP_SET_EVSEL;
+			op->v.evsel = evsel;
+		}
+	}
+
+	return 0;
+}
+
 #define ERRNO_OFFSET(e)		((e) - __BPF_LOADER_ERRNO__START)
 #define ERRCODE_OFFSET(c)	ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
 #define NR_ERRNO	(__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
@@ -1590,3 +1725,11 @@
 	bpf__strerror_end(buf, size);
 	return 0;
 }
+
+int bpf__strerror_setup_stdout(struct perf_evlist *evlist __maybe_unused,
+			       int err, char *buf, size_t size)
+{
+	bpf__strerror_head(err, buf, size);
+	bpf__strerror_end(buf, size);
+	return 0;
+}
diff --git a/tools/perf/util/bpf-loader.h b/tools/perf/util/bpf-loader.h
index be43119..941e172 100644
--- a/tools/perf/util/bpf-loader.h
+++ b/tools/perf/util/bpf-loader.h
@@ -79,6 +79,11 @@
 			     size_t size);
 int bpf__apply_obj_config(void);
 int bpf__strerror_apply_obj_config(int err, char *buf, size_t size);
+
+int bpf__setup_stdout(struct perf_evlist *evlist);
+int bpf__strerror_setup_stdout(struct perf_evlist *evlist, int err,
+			       char *buf, size_t size);
+
 #else
 static inline struct bpf_object *
 bpf__prepare_load(const char *filename __maybe_unused,
@@ -125,6 +130,12 @@
 }
 
 static inline int
+bpf__setup_stdout(struct perf_evlist *evlist __maybe_unused)
+{
+	return 0;
+}
+
+static inline int
 __bpf_strerror(char *buf, size_t size)
 {
 	if (!size)
@@ -177,5 +188,13 @@
 {
 	return __bpf_strerror(buf, size);
 }
+
+static inline int
+bpf__strerror_setup_stdout(struct perf_evlist *evlist __maybe_unused,
+			   int err __maybe_unused, char *buf,
+			   size_t size)
+{
+	return __bpf_strerror(buf, size);
+}
 #endif
 #endif
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 0573c2e..bff425e 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -261,14 +261,14 @@
 
 		if (dso__is_vdso(pos)) {
 			name = pos->short_name;
-			name_len = pos->short_name_len + 1;
+			name_len = pos->short_name_len;
 		} else if (dso__is_kcore(pos)) {
 			machine__mmap_name(machine, nm, sizeof(nm));
 			name = nm;
-			name_len = strlen(nm) + 1;
+			name_len = strlen(nm);
 		} else {
 			name = pos->long_name;
-			name_len = pos->long_name_len + 1;
+			name_len = pos->long_name_len;
 		}
 
 		in_kernel = pos->kernel ||
@@ -365,39 +365,17 @@
 int build_id_cache__list_build_ids(const char *pathname,
 				   struct strlist **result)
 {
-	struct strlist *list;
 	char *dir_name;
-	DIR *dir;
-	struct dirent *d;
 	int ret = 0;
 
-	list = strlist__new(NULL, NULL);
 	dir_name = build_id_cache__dirname_from_path(pathname, false, false);
-	if (!list || !dir_name) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!dir_name)
+		return -ENOMEM;
 
-	/* List up all dirents */
-	dir = opendir(dir_name);
-	if (!dir) {
+	*result = lsdir(dir_name, lsdir_no_dot_filter);
+	if (!*result)
 		ret = -errno;
-		goto out;
-	}
-
-	while ((d = readdir(dir)) != NULL) {
-		if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, ".."))
-			continue;
-		strlist__add(list, d->d_name);
-	}
-	closedir(dir);
-
-out:
 	free(dir_name);
-	if (ret)
-		strlist__delete(list);
-	else
-		*result = list;
 
 	return ret;
 }
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index 1f5a93c..0d814bb 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -40,25 +40,6 @@
 
 #define alloc_nr(x) (((x)+16)*3/2)
 
-/*
- * Realloc the buffer pointed at by variable 'x' so that it can hold
- * at least 'nr' entries; the number of entries currently allocated
- * is 'alloc', using the standard growing factor alloc_nr() macro.
- *
- * DO NOT USE any expression with side-effect for 'x' or 'alloc'.
- */
-#define ALLOC_GROW(x, nr, alloc) \
-	do { \
-		if ((nr) > alloc) { \
-			if (alloc_nr(alloc) < (nr)) \
-				alloc = (nr); \
-			else \
-				alloc = alloc_nr(alloc); \
-			x = xrealloc((x), alloc * sizeof(*(x))); \
-		} \
-	} while(0)
-
-
 static inline int is_absolute_path(const char *path)
 {
 	return path[0] == '/';
diff --git a/tools/perf/util/call-path.c b/tools/perf/util/call-path.c
new file mode 100644
index 0000000..904a170
--- /dev/null
+++ b/tools/perf/util/call-path.c
@@ -0,0 +1,122 @@
+/*
+ * call-path.h: Manipulate a tree data structure containing function call paths
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/rbtree.h>
+#include <linux/list.h>
+
+#include "util.h"
+#include "call-path.h"
+
+static void call_path__init(struct call_path *cp, struct call_path *parent,
+			    struct symbol *sym, u64 ip, bool in_kernel)
+{
+	cp->parent = parent;
+	cp->sym = sym;
+	cp->ip = sym ? 0 : ip;
+	cp->db_id = 0;
+	cp->in_kernel = in_kernel;
+	RB_CLEAR_NODE(&cp->rb_node);
+	cp->children = RB_ROOT;
+}
+
+struct call_path_root *call_path_root__new(void)
+{
+	struct call_path_root *cpr;
+
+	cpr = zalloc(sizeof(struct call_path_root));
+	if (!cpr)
+		return NULL;
+	call_path__init(&cpr->call_path, NULL, NULL, 0, false);
+	INIT_LIST_HEAD(&cpr->blocks);
+	return cpr;
+}
+
+void call_path_root__free(struct call_path_root *cpr)
+{
+	struct call_path_block *pos, *n;
+
+	list_for_each_entry_safe(pos, n, &cpr->blocks, node) {
+		list_del(&pos->node);
+		free(pos);
+	}
+	free(cpr);
+}
+
+static struct call_path *call_path__new(struct call_path_root *cpr,
+					struct call_path *parent,
+					struct symbol *sym, u64 ip,
+					bool in_kernel)
+{
+	struct call_path_block *cpb;
+	struct call_path *cp;
+	size_t n;
+
+	if (cpr->next < cpr->sz) {
+		cpb = list_last_entry(&cpr->blocks, struct call_path_block,
+				      node);
+	} else {
+		cpb = zalloc(sizeof(struct call_path_block));
+		if (!cpb)
+			return NULL;
+		list_add_tail(&cpb->node, &cpr->blocks);
+		cpr->sz += CALL_PATH_BLOCK_SIZE;
+	}
+
+	n = cpr->next++ & CALL_PATH_BLOCK_MASK;
+	cp = &cpb->cp[n];
+
+	call_path__init(cp, parent, sym, ip, in_kernel);
+
+	return cp;
+}
+
+struct call_path *call_path__findnew(struct call_path_root *cpr,
+				     struct call_path *parent,
+				     struct symbol *sym, u64 ip, u64 ks)
+{
+	struct rb_node **p;
+	struct rb_node *node_parent = NULL;
+	struct call_path *cp;
+	bool in_kernel = ip >= ks;
+
+	if (sym)
+		ip = 0;
+
+	if (!parent)
+		return call_path__new(cpr, parent, sym, ip, in_kernel);
+
+	p = &parent->children.rb_node;
+	while (*p != NULL) {
+		node_parent = *p;
+		cp = rb_entry(node_parent, struct call_path, rb_node);
+
+		if (cp->sym == sym && cp->ip == ip)
+			return cp;
+
+		if (sym < cp->sym || (sym == cp->sym && ip < cp->ip))
+			p = &(*p)->rb_left;
+		else
+			p = &(*p)->rb_right;
+	}
+
+	cp = call_path__new(cpr, parent, sym, ip, in_kernel);
+	if (!cp)
+		return NULL;
+
+	rb_link_node(&cp->rb_node, node_parent, p);
+	rb_insert_color(&cp->rb_node, &parent->children);
+
+	return cp;
+}
diff --git a/tools/perf/util/call-path.h b/tools/perf/util/call-path.h
new file mode 100644
index 0000000..477f6d0
--- /dev/null
+++ b/tools/perf/util/call-path.h
@@ -0,0 +1,77 @@
+/*
+ * call-path.h: Manipulate a tree data structure containing function call paths
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef __PERF_CALL_PATH_H
+#define __PERF_CALL_PATH_H
+
+#include <sys/types.h>
+
+#include <linux/types.h>
+#include <linux/rbtree.h>
+
+/**
+ * struct call_path - node in list of calls leading to a function call.
+ * @parent: call path to the parent function call
+ * @sym: symbol of function called
+ * @ip: only if sym is null, the ip of the function
+ * @db_id: id used for db-export
+ * @in_kernel: whether function is a in the kernel
+ * @rb_node: node in parent's tree of called functions
+ * @children: tree of call paths of functions called
+ *
+ * In combination with the call_return structure, the call_path structure
+ * defines a context-sensitve call-graph.
+ */
+struct call_path {
+	struct call_path *parent;
+	struct symbol *sym;
+	u64 ip;
+	u64 db_id;
+	bool in_kernel;
+	struct rb_node rb_node;
+	struct rb_root children;
+};
+
+#define CALL_PATH_BLOCK_SHIFT 8
+#define CALL_PATH_BLOCK_SIZE (1 << CALL_PATH_BLOCK_SHIFT)
+#define CALL_PATH_BLOCK_MASK (CALL_PATH_BLOCK_SIZE - 1)
+
+struct call_path_block {
+	struct call_path cp[CALL_PATH_BLOCK_SIZE];
+	struct list_head node;
+};
+
+/**
+ * struct call_path_root - root of all call paths.
+ * @call_path: root call path
+ * @blocks: list of blocks to store call paths
+ * @next: next free space
+ * @sz: number of spaces
+ */
+struct call_path_root {
+	struct call_path call_path;
+	struct list_head blocks;
+	size_t next;
+	size_t sz;
+};
+
+struct call_path_root *call_path_root__new(void);
+void call_path_root__free(struct call_path_root *cpr);
+
+struct call_path *call_path__findnew(struct call_path_root *cpr,
+				     struct call_path *parent,
+				     struct symbol *sym, u64 ip, u64 ks);
+
+#endif
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 24b4bd0..07fd30b 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -109,6 +109,7 @@
 	bool record_opt_set = false;
 	bool try_stack_size = false;
 
+	callchain_param.enabled = true;
 	symbol_conf.use_callchain = true;
 
 	if (!arg)
@@ -117,6 +118,7 @@
 	while ((tok = strtok((char *)arg, ",")) != NULL) {
 		if (!strncmp(tok, "none", strlen(tok))) {
 			callchain_param.mode = CHAIN_NONE;
+			callchain_param.enabled = false;
 			symbol_conf.use_callchain = false;
 			return 0;
 		}
@@ -788,7 +790,8 @@
 	return 0;
 }
 
-int sample__resolve_callchain(struct perf_sample *sample, struct symbol **parent,
+int sample__resolve_callchain(struct perf_sample *sample,
+			      struct callchain_cursor *cursor, struct symbol **parent,
 			      struct perf_evsel *evsel, struct addr_location *al,
 			      int max_stack)
 {
@@ -796,8 +799,8 @@
 		return 0;
 
 	if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain ||
-	    sort__has_parent) {
-		return thread__resolve_callchain(al->thread, evsel, sample,
+	    perf_hpp_list.parent) {
+		return thread__resolve_callchain(al->thread, cursor, evsel, sample,
 						 parent, al, max_stack);
 	}
 	return 0;
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index d2a9e69..65e2a4f 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -212,7 +212,14 @@
 int record_parse_callchain_opt(const struct option *opt, const char *arg, int unset);
 int record_callchain_opt(const struct option *opt, const char *arg, int unset);
 
-int sample__resolve_callchain(struct perf_sample *sample, struct symbol **parent,
+struct record_opts;
+
+int record_opts__parse_callchain(struct record_opts *record,
+				 struct callchain_param *callchain,
+				 const char *arg, bool unset);
+
+int sample__resolve_callchain(struct perf_sample *sample,
+			      struct callchain_cursor *cursor, struct symbol **parent,
 			      struct perf_evsel *evsel, struct addr_location *al,
 			      int max_stack);
 int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample);
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 4e72763..dad7d82 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -13,6 +13,7 @@
 #include <subcmd/exec-cmd.h>
 #include "util/hist.h"  /* perf_hist_config */
 #include "util/llvm-utils.h"   /* perf_llvm_config */
+#include "config.h"
 
 #define MAXNAME (256)
 
@@ -377,6 +378,21 @@
 	return value;
 }
 
+static int perf_buildid_config(const char *var, const char *value)
+{
+	/* same dir for all commands */
+	if (!strcmp(var, "buildid.dir")) {
+		const char *dir = perf_config_dirname(var, value);
+
+		if (!dir)
+			return -1;
+		strncpy(buildid_dir, dir, MAXPATHLEN-1);
+		buildid_dir[MAXPATHLEN-1] = '\0';
+	}
+
+	return 0;
+}
+
 static int perf_default_core_config(const char *var __maybe_unused,
 				    const char *value __maybe_unused)
 {
@@ -412,6 +428,9 @@
 	if (!prefixcmp(var, "llvm."))
 		return perf_llvm_config(var, value);
 
+	if (!prefixcmp(var, "buildid."))
+		return perf_buildid_config(var, value);
+
 	/* Add other config variables here. */
 	return 0;
 }
@@ -506,6 +525,178 @@
 	return ret;
 }
 
+static struct perf_config_section *find_section(struct list_head *sections,
+						const char *section_name)
+{
+	struct perf_config_section *section;
+
+	list_for_each_entry(section, sections, node)
+		if (!strcmp(section->name, section_name))
+			return section;
+
+	return NULL;
+}
+
+static struct perf_config_item *find_config_item(const char *name,
+						 struct perf_config_section *section)
+{
+	struct perf_config_item *item;
+
+	list_for_each_entry(item, &section->items, node)
+		if (!strcmp(item->name, name))
+			return item;
+
+	return NULL;
+}
+
+static struct perf_config_section *add_section(struct list_head *sections,
+					       const char *section_name)
+{
+	struct perf_config_section *section = zalloc(sizeof(*section));
+
+	if (!section)
+		return NULL;
+
+	INIT_LIST_HEAD(&section->items);
+	section->name = strdup(section_name);
+	if (!section->name) {
+		pr_debug("%s: strdup failed\n", __func__);
+		free(section);
+		return NULL;
+	}
+
+	list_add_tail(&section->node, sections);
+	return section;
+}
+
+static struct perf_config_item *add_config_item(struct perf_config_section *section,
+						const char *name)
+{
+	struct perf_config_item *item = zalloc(sizeof(*item));
+
+	if (!item)
+		return NULL;
+
+	item->name = strdup(name);
+	if (!item->name) {
+		pr_debug("%s: strdup failed\n", __func__);
+		free(item);
+		return NULL;
+	}
+
+	list_add_tail(&item->node, &section->items);
+	return item;
+}
+
+static int set_value(struct perf_config_item *item, const char *value)
+{
+	char *val = strdup(value);
+
+	if (!val)
+		return -1;
+
+	zfree(&item->value);
+	item->value = val;
+	return 0;
+}
+
+static int collect_config(const char *var, const char *value,
+			  void *perf_config_set)
+{
+	int ret = -1;
+	char *ptr, *key;
+	char *section_name, *name;
+	struct perf_config_section *section = NULL;
+	struct perf_config_item *item = NULL;
+	struct perf_config_set *set = perf_config_set;
+	struct list_head *sections = &set->sections;
+
+	key = ptr = strdup(var);
+	if (!key) {
+		pr_debug("%s: strdup failed\n", __func__);
+		return -1;
+	}
+
+	section_name = strsep(&ptr, ".");
+	name = ptr;
+	if (name == NULL || value == NULL)
+		goto out_free;
+
+	section = find_section(sections, section_name);
+	if (!section) {
+		section = add_section(sections, section_name);
+		if (!section)
+			goto out_free;
+	}
+
+	item = find_config_item(name, section);
+	if (!item) {
+		item = add_config_item(section, name);
+		if (!item)
+			goto out_free;
+	}
+
+	ret = set_value(item, value);
+	return ret;
+
+out_free:
+	free(key);
+	perf_config_set__delete(set);
+	return -1;
+}
+
+struct perf_config_set *perf_config_set__new(void)
+{
+	struct perf_config_set *set = zalloc(sizeof(*set));
+
+	if (set) {
+		INIT_LIST_HEAD(&set->sections);
+		perf_config(collect_config, set);
+	}
+
+	return set;
+}
+
+static void perf_config_item__delete(struct perf_config_item *item)
+{
+	zfree(&item->name);
+	zfree(&item->value);
+	free(item);
+}
+
+static void perf_config_section__purge(struct perf_config_section *section)
+{
+	struct perf_config_item *item, *tmp;
+
+	list_for_each_entry_safe(item, tmp, &section->items, node) {
+		list_del_init(&item->node);
+		perf_config_item__delete(item);
+	}
+}
+
+static void perf_config_section__delete(struct perf_config_section *section)
+{
+	perf_config_section__purge(section);
+	zfree(&section->name);
+	free(section);
+}
+
+static void perf_config_set__purge(struct perf_config_set *set)
+{
+	struct perf_config_section *section, *tmp;
+
+	list_for_each_entry_safe(section, tmp, &set->sections, node) {
+		list_del_init(&section->node);
+		perf_config_section__delete(section);
+	}
+}
+
+void perf_config_set__delete(struct perf_config_set *set)
+{
+	perf_config_set__purge(set);
+	free(set);
+}
+
 /*
  * Call this to report error for your variable that should not
  * get a boolean value (i.e. "[my] var" means "true").
@@ -515,49 +706,18 @@
 	return error("Missing value for '%s'", var);
 }
 
-struct buildid_dir_config {
-	char *dir;
-};
-
-static int buildid_dir_command_config(const char *var, const char *value,
-				      void *data)
-{
-	struct buildid_dir_config *c = data;
-	const char *v;
-
-	/* same dir for all commands */
-	if (!strcmp(var, "buildid.dir")) {
-		v = perf_config_dirname(var, value);
-		if (!v)
-			return -1;
-		strncpy(c->dir, v, MAXPATHLEN-1);
-		c->dir[MAXPATHLEN-1] = '\0';
-	}
-	return 0;
-}
-
-static void check_buildid_dir_config(void)
-{
-	struct buildid_dir_config c;
-	c.dir = buildid_dir;
-	perf_config(buildid_dir_command_config, &c);
-}
-
 void set_buildid_dir(const char *dir)
 {
 	if (dir)
 		scnprintf(buildid_dir, MAXPATHLEN-1, "%s", dir);
 
-	/* try config file */
-	if (buildid_dir[0] == '\0')
-		check_buildid_dir_config();
-
 	/* default to $HOME/.debug */
 	if (buildid_dir[0] == '\0') {
-		char *v = getenv("HOME");
-		if (v) {
+		char *home = getenv("HOME");
+
+		if (home) {
 			snprintf(buildid_dir, MAXPATHLEN-1, "%s/%s",
-				 v, DEBUG_CACHE_DIR);
+				 home, DEBUG_CACHE_DIR);
 		} else {
 			strncpy(buildid_dir, DEBUG_CACHE_DIR, MAXPATHLEN-1);
 		}
diff --git a/tools/perf/util/config.h b/tools/perf/util/config.h
new file mode 100644
index 0000000..22ec626
--- /dev/null
+++ b/tools/perf/util/config.h
@@ -0,0 +1,26 @@
+#ifndef __PERF_CONFIG_H
+#define __PERF_CONFIG_H
+
+#include <stdbool.h>
+#include <linux/list.h>
+
+struct perf_config_item {
+	char *name;
+	char *value;
+	struct list_head node;
+};
+
+struct perf_config_section {
+	char *name;
+	struct list_head items;
+	struct list_head node;
+};
+
+struct perf_config_set {
+	struct list_head sections;
+};
+
+struct perf_config_set *perf_config_set__new(void);
+void perf_config_set__delete(struct perf_config_set *set);
+
+#endif /* __PERF_CONFIG_H */
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 9bcf2be..02d8016 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -587,3 +587,15 @@
 	closedir(dir1);
 	return 0;
 }
+
+bool cpu_map__has(struct cpu_map *cpus, int cpu)
+{
+	int i;
+
+	for (i = 0; i < cpus->nr; ++i) {
+		if (cpus->map[i] == cpu)
+			return true;
+	}
+
+	return false;
+}
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h
index 81a2562..1a0a350 100644
--- a/tools/perf/util/cpumap.h
+++ b/tools/perf/util/cpumap.h
@@ -66,4 +66,6 @@
 int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
 		       int (*f)(struct cpu_map *map, int cpu, void *data),
 		       void *data);
+
+bool cpu_map__has(struct cpu_map *cpus, int cpu);
 #endif /* __PERF_CPUMAP_H */
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index 1921942..be835161 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -136,3 +136,44 @@
 {
 	return writen(file->fd, buf, size);
 }
+
+int perf_data_file__switch(struct perf_data_file *file,
+			   const char *postfix,
+			   size_t pos, bool at_exit)
+{
+	char *new_filepath;
+	int ret;
+
+	if (check_pipe(file))
+		return -EINVAL;
+	if (perf_data_file__is_read(file))
+		return -EINVAL;
+
+	if (asprintf(&new_filepath, "%s.%s", file->path, postfix) < 0)
+		return -ENOMEM;
+
+	/*
+	 * Only fire a warning, don't return error, continue fill
+	 * original file.
+	 */
+	if (rename(file->path, new_filepath))
+		pr_warning("Failed to rename %s to %s\n", file->path, new_filepath);
+
+	if (!at_exit) {
+		close(file->fd);
+		ret = perf_data_file__open(file);
+		if (ret < 0)
+			goto out;
+
+		if (lseek(file->fd, pos, SEEK_SET) == (off_t)-1) {
+			ret = -errno;
+			pr_debug("Failed to lseek to %zu: %s",
+				 pos, strerror(errno));
+			goto out;
+		}
+	}
+	ret = file->fd;
+out:
+	free(new_filepath);
+	return ret;
+}
diff --git a/tools/perf/util/data.h b/tools/perf/util/data.h
index 2b15d0c..ae510ce 100644
--- a/tools/perf/util/data.h
+++ b/tools/perf/util/data.h
@@ -46,5 +46,14 @@
 void perf_data_file__close(struct perf_data_file *file);
 ssize_t perf_data_file__write(struct perf_data_file *file,
 			      void *buf, size_t size);
-
+/*
+ * If at_exit is set, only rename current perf.data to
+ * perf.data.<postfix>, continue write on original file.
+ * Set at_exit when flushing the last output.
+ *
+ * Return value is fd of new output.
+ */
+int perf_data_file__switch(struct perf_data_file *file,
+			   const char *postfix,
+			   size_t pos, bool at_exit);
 #endif /* __PERF_DATA_H */
diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
index 049438d..8d96c80 100644
--- a/tools/perf/util/db-export.c
+++ b/tools/perf/util/db-export.c
@@ -23,6 +23,8 @@
 #include "event.h"
 #include "util.h"
 #include "thread-stack.h"
+#include "callchain.h"
+#include "call-path.h"
 #include "db-export.h"
 
 struct deferred_export {
@@ -258,8 +260,7 @@
 		if (!al->sym) {
 			al->sym = symbol__new(al->addr, 0, 0, "unknown");
 			if (al->sym)
-				symbols__insert(&dso->symbols[al->map->type],
-						al->sym);
+				dso__insert_symbol(dso, al->map->type, al->sym);
 		}
 
 		if (al->sym) {
@@ -276,6 +277,80 @@
 	return 0;
 }
 
+static struct call_path *call_path_from_sample(struct db_export *dbe,
+					       struct machine *machine,
+					       struct thread *thread,
+					       struct perf_sample *sample,
+					       struct perf_evsel *evsel)
+{
+	u64 kernel_start = machine__kernel_start(machine);
+	struct call_path *current = &dbe->cpr->call_path;
+	enum chain_order saved_order = callchain_param.order;
+	int err;
+
+	if (!symbol_conf.use_callchain || !sample->callchain)
+		return NULL;
+
+	/*
+	 * Since the call path tree must be built starting with the root, we
+	 * must use ORDER_CALL for call chain resolution, in order to process
+	 * the callchain starting with the root node and ending with the leaf.
+	 */
+	callchain_param.order = ORDER_CALLER;
+	err = thread__resolve_callchain(thread, &callchain_cursor, evsel,
+					sample, NULL, NULL,
+					sysctl_perf_event_max_stack);
+	if (err) {
+		callchain_param.order = saved_order;
+		return NULL;
+	}
+	callchain_cursor_commit(&callchain_cursor);
+
+	while (1) {
+		struct callchain_cursor_node *node;
+		struct addr_location al;
+		u64 dso_db_id = 0, sym_db_id = 0, offset = 0;
+
+		memset(&al, 0, sizeof(al));
+
+		node = callchain_cursor_current(&callchain_cursor);
+		if (!node)
+			break;
+		/*
+		 * Handle export of symbol and dso for this node by
+		 * constructing an addr_location struct and then passing it to
+		 * db_ids_from_al() to perform the export.
+		 */
+		al.sym = node->sym;
+		al.map = node->map;
+		al.machine = machine;
+		al.addr = node->ip;
+
+		if (al.map && !al.sym)
+			al.sym = dso__find_symbol(al.map->dso, MAP__FUNCTION,
+						  al.addr);
+
+		db_ids_from_al(dbe, &al, &dso_db_id, &sym_db_id, &offset);
+
+		/* add node to the call path tree if it doesn't exist */
+		current = call_path__findnew(dbe->cpr, current,
+					     al.sym, node->ip,
+					     kernel_start);
+
+		callchain_cursor_advance(&callchain_cursor);
+	}
+
+	/* Reset the callchain order to its prior value. */
+	callchain_param.order = saved_order;
+
+	if (current == &dbe->cpr->call_path) {
+		/* Bail because the callchain was empty. */
+		return NULL;
+	}
+
+	return current;
+}
+
 int db_export__branch_type(struct db_export *dbe, u32 branch_type,
 			   const char *name)
 {
@@ -329,6 +404,16 @@
 	if (err)
 		goto out_put;
 
+	if (dbe->cpr) {
+		struct call_path *cp = call_path_from_sample(dbe, al->machine,
+							     thread, sample,
+							     evsel);
+		if (cp) {
+			db_export__call_path(dbe, cp);
+			es.call_path_id = cp->db_id;
+		}
+	}
+
 	if ((evsel->attr.sample_type & PERF_SAMPLE_ADDR) &&
 	    sample_addr_correlates_sym(&evsel->attr)) {
 		struct addr_location addr_al;
diff --git a/tools/perf/util/db-export.h b/tools/perf/util/db-export.h
index 25e22fd..67bc6b8 100644
--- a/tools/perf/util/db-export.h
+++ b/tools/perf/util/db-export.h
@@ -27,6 +27,7 @@
 struct perf_sample;
 struct addr_location;
 struct call_return_processor;
+struct call_path_root;
 struct call_path;
 struct call_return;
 
@@ -43,6 +44,7 @@
 	u64			addr_dso_db_id;
 	u64			addr_sym_db_id;
 	u64			addr_offset; /* addr offset from symbol start */
+	u64			call_path_id;
 };
 
 struct db_export {
@@ -64,6 +66,7 @@
 	int (*export_call_return)(struct db_export *dbe,
 				  struct call_return *cr);
 	struct call_return_processor *crp;
+	struct call_path_root *cpr;
 	u64 evsel_last_db_id;
 	u64 machine_last_db_id;
 	u64 thread_last_db_id;
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 8e639543..3357479 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -38,7 +38,7 @@
 				   enum dso_binary_type type,
 				   char *root_dir, char *filename, size_t size)
 {
-	char build_id_hex[BUILD_ID_SIZE * 2 + 1];
+	char build_id_hex[SBUILD_ID_SIZE];
 	int ret = 0;
 	size_t len;
 
@@ -1301,7 +1301,7 @@
 
 size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
 {
-	char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+	char sbuild_id[SBUILD_ID_SIZE];
 
 	build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
 	return fprintf(fp, "%s", sbuild_id);
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index 577e600..a347b19 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -915,8 +915,7 @@
 		tmp = "*";
 	else if (tag == DW_TAG_subroutine_type) {
 		/* Function pointer */
-		strbuf_add(buf, "(function_type)", 15);
-		return 0;
+		return strbuf_add(buf, "(function_type)", 15);
 	} else {
 		if (!dwarf_diename(&type))
 			return -ENOENT;
@@ -927,14 +926,10 @@
 		else if (tag == DW_TAG_enumeration_type)
 			tmp = "enum ";
 		/* Write a base name */
-		strbuf_addf(buf, "%s%s", tmp, dwarf_diename(&type));
-		return 0;
+		return strbuf_addf(buf, "%s%s", tmp, dwarf_diename(&type));
 	}
 	ret = die_get_typename(&type, buf);
-	if (ret == 0)
-		strbuf_addstr(buf, tmp);
-
-	return ret;
+	return ret ? ret : strbuf_addstr(buf, tmp);
 }
 
 /**
@@ -951,14 +946,13 @@
 	ret = die_get_typename(vr_die, buf);
 	if (ret < 0) {
 		pr_debug("Failed to get type, make it unknown.\n");
-		strbuf_add(buf, " (unknown_type)", 14);
+		ret = strbuf_add(buf, " (unknown_type)", 14);
 	}
 
-	strbuf_addf(buf, "\t%s", dwarf_diename(vr_die));
-
-	return 0;
+	return ret < 0 ? ret : strbuf_addf(buf, "\t%s", dwarf_diename(vr_die));
 }
 
+#ifdef HAVE_DWARF_GETLOCATIONS
 /**
  * die_get_var_innermost_scope - Get innermost scope range of given variable DIE
  * @sp_die: a subprogram DIE
@@ -998,22 +992,24 @@
 	}
 
 	while ((offset = dwarf_ranges(&scopes[1], offset, &base,
-				&start, &end)) > 0) {
+					&start, &end)) > 0) {
 		start -= entry;
 		end -= entry;
 
 		if (first) {
-			strbuf_addf(buf, "@<%s+[%" PRIu64 "-%" PRIu64,
-				name, start, end);
+			ret = strbuf_addf(buf, "@<%s+[%" PRIu64 "-%" PRIu64,
+					  name, start, end);
 			first = false;
 		} else {
-			strbuf_addf(buf, ",%" PRIu64 "-%" PRIu64,
-				start, end);
+			ret = strbuf_addf(buf, ",%" PRIu64 "-%" PRIu64,
+					  start, end);
 		}
+		if (ret < 0)
+			goto out;
 	}
 
 	if (!first)
-		strbuf_add(buf, "]>", 2);
+		ret = strbuf_add(buf, "]>", 2);
 
 out:
 	free(scopes);
@@ -1053,30 +1049,39 @@
 	if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL)
 		return -EINVAL;
 
-	while ((offset = dwarf_getlocations(
-				&attr, offset, &base,
-				&start, &end, &op, &nops)) > 0) {
+	while ((offset = dwarf_getlocations(&attr, offset, &base,
+					&start, &end, &op, &nops)) > 0) {
 		if (start == 0) {
 			/* Single Location Descriptions */
 			ret = die_get_var_innermost_scope(sp_die, vr_die, buf);
-			return ret;
+			goto out;
 		}
 
 		/* Location Lists */
 		start -= entry;
 		end -= entry;
 		if (first) {
-			strbuf_addf(buf, "@<%s+[%" PRIu64 "-%" PRIu64,
-				name, start, end);
+			ret = strbuf_addf(buf, "@<%s+[%" PRIu64 "-%" PRIu64,
+					  name, start, end);
 			first = false;
 		} else {
-			strbuf_addf(buf, ",%" PRIu64 "-%" PRIu64,
-				start, end);
+			ret = strbuf_addf(buf, ",%" PRIu64 "-%" PRIu64,
+					  start, end);
 		}
+		if (ret < 0)
+			goto out;
 	}
 
 	if (!first)
-		strbuf_add(buf, "]>", 2);
-
+		ret = strbuf_add(buf, "]>", 2);
+out:
 	return ret;
 }
+#else
+int die_get_var_range(Dwarf_Die *sp_die __maybe_unused,
+		      Dwarf_Die *vr_die __maybe_unused,
+		      struct strbuf *buf __maybe_unused)
+{
+	return -ENOTSUP;
+}
+#endif
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 52cf479..f6fcc68 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -45,6 +45,7 @@
 	[PERF_RECORD_STAT]			= "STAT",
 	[PERF_RECORD_STAT_ROUND]		= "STAT_ROUND",
 	[PERF_RECORD_EVENT_UPDATE]		= "EVENT_UPDATE",
+	[PERF_RECORD_TIME_CONV]			= "TIME_CONV",
 };
 
 const char *perf_event__name(unsigned int id)
@@ -56,13 +57,22 @@
 	return perf_event__names[id];
 }
 
-static struct perf_sample synth_sample = {
+static int perf_tool__process_synth_event(struct perf_tool *tool,
+					  union perf_event *event,
+					  struct machine *machine,
+					  perf_event__handler_t process)
+{
+	struct perf_sample synth_sample = {
 	.pid	   = -1,
 	.tid	   = -1,
 	.time	   = -1,
 	.stream_id = -1,
 	.cpu	   = -1,
 	.period	   = 1,
+	.cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
+	};
+
+	return process(tool, event, &synth_sample, machine);
 };
 
 /*
@@ -186,7 +196,7 @@
 	if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
 		return -1;
 
-	if (process(tool, event, &synth_sample, machine) != 0)
+	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
 		return -1;
 
 	return tgid;
@@ -218,7 +228,7 @@
 
 	event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
 
-	if (process(tool, event, &synth_sample, machine) != 0)
+	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
 		return -1;
 
 	return 0;
@@ -344,7 +354,7 @@
 		event->mmap2.pid = tgid;
 		event->mmap2.tid = pid;
 
-		if (process(tool, event, &synth_sample, machine) != 0) {
+		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
 			rc = -1;
 			break;
 		}
@@ -402,7 +412,7 @@
 
 		memcpy(event->mmap.filename, pos->dso->long_name,
 		       pos->dso->long_name_len + 1);
-		if (process(tool, event, &synth_sample, machine) != 0) {
+		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
 			rc = -1;
 			break;
 		}
@@ -424,7 +434,7 @@
 {
 	char filename[PATH_MAX];
 	DIR *tasks;
-	struct dirent dirent, *next;
+	struct dirent *dirent;
 	pid_t tgid, ppid;
 	int rc = 0;
 
@@ -453,11 +463,11 @@
 		return 0;
 	}
 
-	while (!readdir_r(tasks, &dirent, &next) && next) {
+	while ((dirent = readdir(tasks)) != NULL) {
 		char *end;
 		pid_t _pid;
 
-		_pid = strtol(dirent.d_name, &end, 10);
+		_pid = strtol(dirent->d_name, &end, 10);
 		if (*end)
 			continue;
 
@@ -472,7 +482,7 @@
 		/*
 		 * Send the prepared comm event
 		 */
-		if (process(tool, comm_event, &synth_sample, machine) != 0)
+		if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
 			break;
 
 		rc = 0;
@@ -566,7 +576,7 @@
 {
 	DIR *proc;
 	char proc_path[PATH_MAX];
-	struct dirent dirent, *next;
+	struct dirent *dirent;
 	union perf_event *comm_event, *mmap_event, *fork_event;
 	int err = -1;
 
@@ -591,9 +601,9 @@
 	if (proc == NULL)
 		goto out_free_fork;
 
-	while (!readdir_r(proc, &dirent, &next) && next) {
+	while ((dirent = readdir(proc)) != NULL) {
 		char *end;
-		pid_t pid = strtol(dirent.d_name, &end, 10);
+		pid_t pid = strtol(dirent->d_name, &end, 10);
 
 		if (*end) /* only interested in proper numerical dirents */
 			continue;
@@ -701,7 +711,7 @@
 	event->mmap.len   = map->end - event->mmap.start;
 	event->mmap.pid   = machine->pid;
 
-	err = process(tool, event, &synth_sample, machine);
+	err = perf_tool__process_synth_event(tool, event, machine, process);
 	free(event);
 
 	return err;
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 6bb1c92..8d363d5 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -233,6 +233,7 @@
 	PERF_RECORD_STAT			= 76,
 	PERF_RECORD_STAT_ROUND			= 77,
 	PERF_RECORD_EVENT_UPDATE		= 78,
+	PERF_RECORD_TIME_CONV			= 79,
 	PERF_RECORD_HEADER_MAX
 };
 
@@ -469,6 +470,13 @@
 	u64				time;
 };
 
+struct time_conv_event {
+	struct perf_event_header header;
+	u64 time_shift;
+	u64 time_mult;
+	u64 time_zero;
+};
+
 union perf_event {
 	struct perf_event_header	header;
 	struct mmap_event		mmap;
@@ -497,6 +505,7 @@
 	struct stat_config_event	stat_config;
 	struct stat_event		stat;
 	struct stat_round_event		stat_round;
+	struct time_conv_event		time_conv;
 };
 
 void perf_event__print_totals(void);
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 86a0383..c4bfe11 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -679,53 +679,52 @@
 	return NULL;
 }
 
-union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
+/* When check_messup is true, 'end' must points to a good entry */
+static union perf_event *
+perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start,
+		u64 end, u64 *prev)
 {
-	struct perf_mmap *md = &evlist->mmap[idx];
-	u64 head;
-	u64 old = md->prev;
 	unsigned char *data = md->base + page_size;
 	union perf_event *event = NULL;
+	int diff = end - start;
 
-	/*
-	 * Check if event was unmapped due to a POLLHUP/POLLERR.
-	 */
-	if (!atomic_read(&md->refcnt))
-		return NULL;
-
-	head = perf_mmap__read_head(md);
-	if (evlist->overwrite) {
+	if (check_messup) {
 		/*
 		 * If we're further behind than half the buffer, there's a chance
 		 * the writer will bite our tail and mess up the samples under us.
 		 *
-		 * If we somehow ended up ahead of the head, we got messed up.
+		 * If we somehow ended up ahead of the 'end', we got messed up.
 		 *
-		 * In either case, truncate and restart at head.
+		 * In either case, truncate and restart at 'end'.
 		 */
-		int diff = head - old;
 		if (diff > md->mask / 2 || diff < 0) {
 			fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
 
 			/*
-			 * head points to a known good entry, start there.
+			 * 'end' points to a known good entry, start there.
 			 */
-			old = head;
+			start = end;
+			diff = 0;
 		}
 	}
 
-	if (old != head) {
+	if (diff >= (int)sizeof(event->header)) {
 		size_t size;
 
-		event = (union perf_event *)&data[old & md->mask];
+		event = (union perf_event *)&data[start & md->mask];
 		size = event->header.size;
 
+		if (size < sizeof(event->header) || diff < (int)size) {
+			event = NULL;
+			goto broken_event;
+		}
+
 		/*
 		 * Event straddles the mmap boundary -- header should always
 		 * be inside due to u64 alignment of output.
 		 */
-		if ((old & md->mask) + size != ((old + size) & md->mask)) {
-			unsigned int offset = old;
+		if ((start & md->mask) + size != ((start + size) & md->mask)) {
+			unsigned int offset = start;
 			unsigned int len = min(sizeof(*event), size), cpy;
 			void *dst = md->event_copy;
 
@@ -740,14 +739,83 @@
 			event = (union perf_event *) md->event_copy;
 		}
 
-		old += size;
+		start += size;
 	}
 
-	md->prev = old;
+broken_event:
+	if (prev)
+		*prev = start;
 
 	return event;
 }
 
+union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
+{
+	struct perf_mmap *md = &evlist->mmap[idx];
+	u64 head;
+	u64 old = md->prev;
+
+	/*
+	 * Check if event was unmapped due to a POLLHUP/POLLERR.
+	 */
+	if (!atomic_read(&md->refcnt))
+		return NULL;
+
+	head = perf_mmap__read_head(md);
+
+	return perf_mmap__read(md, evlist->overwrite, old, head, &md->prev);
+}
+
+union perf_event *
+perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
+{
+	struct perf_mmap *md = &evlist->mmap[idx];
+	u64 head, end;
+	u64 start = md->prev;
+
+	/*
+	 * Check if event was unmapped due to a POLLHUP/POLLERR.
+	 */
+	if (!atomic_read(&md->refcnt))
+		return NULL;
+
+	head = perf_mmap__read_head(md);
+	if (!head)
+		return NULL;
+
+	/*
+	 * 'head' pointer starts from 0. Kernel minus sizeof(record) form
+	 * it each time when kernel writes to it, so in fact 'head' is
+	 * negative. 'end' pointer is made manually by adding the size of
+	 * the ring buffer to 'head' pointer, means the validate data can
+	 * read is the whole ring buffer. If 'end' is positive, the ring
+	 * buffer has not fully filled, so we must adjust 'end' to 0.
+	 *
+	 * However, since both 'head' and 'end' is unsigned, we can't
+	 * simply compare 'end' against 0. Here we compare '-head' and
+	 * the size of the ring buffer, where -head is the number of bytes
+	 * kernel write to the ring buffer.
+	 */
+	if (-head < (u64)(md->mask + 1))
+		end = 0;
+	else
+		end = head + md->mask + 1;
+
+	return perf_mmap__read(md, false, start, end, &md->prev);
+}
+
+void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
+{
+	struct perf_mmap *md = &evlist->mmap[idx];
+	u64 head;
+
+	if (!atomic_read(&md->refcnt))
+		return;
+
+	head = perf_mmap__read_head(md);
+	md->prev = head;
+}
+
 static bool perf_mmap__empty(struct perf_mmap *md)
 {
 	return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
@@ -986,26 +1054,34 @@
 	return -1;
 }
 
+unsigned long perf_event_mlock_kb_in_pages(void)
+{
+	unsigned long pages;
+	int max;
+
+	if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
+		/*
+		 * Pick a once upon a time good value, i.e. things look
+		 * strange since we can't read a sysctl value, but lets not
+		 * die yet...
+		 */
+		max = 512;
+	} else {
+		max -= (page_size / 1024);
+	}
+
+	pages = (max * 1024) / page_size;
+	if (!is_power_of_2(pages))
+		pages = rounddown_pow_of_two(pages);
+
+	return pages;
+}
+
 static size_t perf_evlist__mmap_size(unsigned long pages)
 {
-	if (pages == UINT_MAX) {
-		int max;
-
-		if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
-			/*
-			 * Pick a once upon a time good value, i.e. things look
-			 * strange since we can't read a sysctl value, but lets not
-			 * die yet...
-			 */
-			max = 512;
-		} else {
-			max -= (page_size / 1024);
-		}
-
-		pages = (max * 1024) / page_size;
-		if (!is_power_of_2(pages))
-			pages = rounddown_pow_of_two(pages);
-	} else if (!is_power_of_2(pages))
+	if (pages == UINT_MAX)
+		pages = perf_event_mlock_kb_in_pages();
+	else if (!is_power_of_2(pages))
 		return 0;
 
 	return (pages + 1) * page_size;
@@ -1192,6 +1268,24 @@
 	perf_evlist__propagate_maps(evlist);
 }
 
+void __perf_evlist__set_sample_bit(struct perf_evlist *evlist,
+				   enum perf_event_sample_format bit)
+{
+	struct perf_evsel *evsel;
+
+	evlist__for_each(evlist, evsel)
+		__perf_evsel__set_sample_bit(evsel, bit);
+}
+
+void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
+				     enum perf_event_sample_format bit)
+{
+	struct perf_evsel *evsel;
+
+	evlist__for_each(evlist, evsel)
+		__perf_evsel__reset_sample_bit(evsel, bit);
+}
+
 int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
 {
 	struct perf_evsel *evsel;
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index a0d1522..85d1b598 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -87,6 +87,17 @@
 int perf_evlist__add_newtp(struct perf_evlist *evlist,
 			   const char *sys, const char *name, void *handler);
 
+void __perf_evlist__set_sample_bit(struct perf_evlist *evlist,
+				   enum perf_event_sample_format bit);
+void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
+				     enum perf_event_sample_format bit);
+
+#define perf_evlist__set_sample_bit(evlist, bit) \
+	__perf_evlist__set_sample_bit(evlist, PERF_SAMPLE_##bit)
+
+#define perf_evlist__reset_sample_bit(evlist, bit) \
+	__perf_evlist__reset_sample_bit(evlist, PERF_SAMPLE_##bit)
+
 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter);
 int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid);
 int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids);
@@ -118,16 +129,23 @@
 
 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx);
 
+union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist,
+						  int idx);
+void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx);
+
 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
 
 int perf_evlist__open(struct perf_evlist *evlist);
 void perf_evlist__close(struct perf_evlist *evlist);
 
+struct callchain_param;
+
 void perf_evlist__set_id_pos(struct perf_evlist *evlist);
 bool perf_can_sample_identifier(void);
 bool perf_can_record_switch_events(void);
 bool perf_can_record_cpu_wide(void);
-void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts);
+void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
+			 struct callchain_param *callchain);
 int record_opts__config(struct record_opts *opts);
 
 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
@@ -144,6 +162,8 @@
 				  const char *str,
 				  int unset);
 
+unsigned long perf_event_mlock_kb_in_pages(void);
+
 int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
 			 bool overwrite, unsigned int auxtrace_pages,
 			 bool auxtrace_overwrite);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 738ce22..52c7d88 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -226,7 +226,8 @@
 		perf_evsel__init(evsel, attr, idx);
 
 	if (perf_evsel__is_bpf_output(evsel)) {
-		evsel->attr.sample_type |= PERF_SAMPLE_RAW;
+		evsel->attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
+					    PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
 		evsel->attr.sample_period = 1;
 	}
 
@@ -561,10 +562,9 @@
 	return ret;
 }
 
-static void
-perf_evsel__config_callgraph(struct perf_evsel *evsel,
-			     struct record_opts *opts,
-			     struct callchain_param *param)
+void perf_evsel__config_callchain(struct perf_evsel *evsel,
+				  struct record_opts *opts,
+				  struct callchain_param *param)
 {
 	bool function = perf_evsel__is_function_event(evsel);
 	struct perf_event_attr *attr = &evsel->attr;
@@ -704,7 +704,7 @@
 
 		/* set perf-event callgraph */
 		if (param.enabled)
-			perf_evsel__config_callgraph(evsel, opts, &param);
+			perf_evsel__config_callchain(evsel, opts, &param);
 	}
 }
 
@@ -736,7 +736,8 @@
  *     enable/disable events specifically, as there's no
  *     initial traced exec call.
  */
-void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
+void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
+			struct callchain_param *callchain)
 {
 	struct perf_evsel *leader = evsel->leader;
 	struct perf_event_attr *attr = &evsel->attr;
@@ -811,8 +812,8 @@
 	if (perf_evsel__is_function_event(evsel))
 		evsel->attr.exclude_callchain_user = 1;
 
-	if (callchain_param.enabled && !evsel->no_aux_samples)
-		perf_evsel__config_callgraph(evsel, opts, &callchain_param);
+	if (callchain && callchain->enabled && !evsel->no_aux_samples)
+		perf_evsel__config_callchain(evsel, opts, callchain);
 
 	if (opts->sample_intr_regs) {
 		attr->sample_regs_intr = opts->sample_intr_regs;
@@ -826,7 +827,7 @@
 		perf_evsel__set_sample_bit(evsel, PERIOD);
 
 	/*
-	 * When the user explicitely disabled time don't force it here.
+	 * When the user explicitly disabled time don't force it here.
 	 */
 	if (opts->sample_time &&
 	    (!perf_missing_features.sample_id_all &&
@@ -1230,6 +1231,21 @@
 	__p_bits(buf, size, value, bits);
 }
 
+static void __p_branch_sample_type(char *buf, size_t size, u64 value)
+{
+#define bit_name(n) { PERF_SAMPLE_BRANCH_##n, #n }
+	struct bit_names bits[] = {
+		bit_name(USER), bit_name(KERNEL), bit_name(HV), bit_name(ANY),
+		bit_name(ANY_CALL), bit_name(ANY_RETURN), bit_name(IND_CALL),
+		bit_name(ABORT_TX), bit_name(IN_TX), bit_name(NO_TX),
+		bit_name(COND), bit_name(CALL_STACK), bit_name(IND_JUMP),
+		bit_name(CALL), bit_name(NO_FLAGS), bit_name(NO_CYCLES),
+		{ .name = NULL, }
+	};
+#undef bit_name
+	__p_bits(buf, size, value, bits);
+}
+
 static void __p_read_format(char *buf, size_t size, u64 value)
 {
 #define bit_name(n) { PERF_FORMAT_##n, #n }
@@ -1248,6 +1264,7 @@
 #define p_unsigned(val)		snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
 #define p_signed(val)		snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
 #define p_sample_type(val)	__p_sample_type(buf, BUF_SIZE, val)
+#define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val)
 #define p_read_format(val)	__p_read_format(buf, BUF_SIZE, val)
 
 #define PRINT_ATTRn(_n, _f, _p)				\
@@ -1299,12 +1316,13 @@
 	PRINT_ATTRf(comm_exec, p_unsigned);
 	PRINT_ATTRf(use_clockid, p_unsigned);
 	PRINT_ATTRf(context_switch, p_unsigned);
+	PRINT_ATTRf(write_backward, p_unsigned);
 
 	PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
 	PRINT_ATTRf(bp_type, p_unsigned);
 	PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex);
 	PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex);
-	PRINT_ATTRf(branch_sample_type, p_unsigned);
+	PRINT_ATTRf(branch_sample_type, p_branch_sample_type);
 	PRINT_ATTRf(sample_regs_user, p_hex);
 	PRINT_ATTRf(sample_stack_user, p_unsigned);
 	PRINT_ATTRf(clockid, p_signed);
@@ -2253,98 +2271,11 @@
 	return 0;
 }
 
-static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
-{
-	va_list args;
-	int ret = 0;
-
-	if (!*first) {
-		ret += fprintf(fp, ",");
-	} else {
-		ret += fprintf(fp, ":");
-		*first = false;
-	}
-
-	va_start(args, fmt);
-	ret += vfprintf(fp, fmt, args);
-	va_end(args);
-	return ret;
-}
-
-static int __print_attr__fprintf(FILE *fp, const char *name, const char *val, void *priv)
-{
-	return comma_fprintf(fp, (bool *)priv, " %s: %s", name, val);
-}
-
-int perf_evsel__fprintf(struct perf_evsel *evsel,
-			struct perf_attr_details *details, FILE *fp)
-{
-	bool first = true;
-	int printed = 0;
-
-	if (details->event_group) {
-		struct perf_evsel *pos;
-
-		if (!perf_evsel__is_group_leader(evsel))
-			return 0;
-
-		if (evsel->nr_members > 1)
-			printed += fprintf(fp, "%s{", evsel->group_name ?: "");
-
-		printed += fprintf(fp, "%s", perf_evsel__name(evsel));
-		for_each_group_member(pos, evsel)
-			printed += fprintf(fp, ",%s", perf_evsel__name(pos));
-
-		if (evsel->nr_members > 1)
-			printed += fprintf(fp, "}");
-		goto out;
-	}
-
-	printed += fprintf(fp, "%s", perf_evsel__name(evsel));
-
-	if (details->verbose) {
-		printed += perf_event_attr__fprintf(fp, &evsel->attr,
-						    __print_attr__fprintf, &first);
-	} else if (details->freq) {
-		const char *term = "sample_freq";
-
-		if (!evsel->attr.freq)
-			term = "sample_period";
-
-		printed += comma_fprintf(fp, &first, " %s=%" PRIu64,
-					 term, (u64)evsel->attr.sample_freq);
-	}
-
-	if (details->trace_fields) {
-		struct format_field *field;
-
-		if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
-			printed += comma_fprintf(fp, &first, " (not a tracepoint)");
-			goto out;
-		}
-
-		field = evsel->tp_format->format.fields;
-		if (field == NULL) {
-			printed += comma_fprintf(fp, &first, " (no trace field)");
-			goto out;
-		}
-
-		printed += comma_fprintf(fp, &first, " trace_fields: %s", field->name);
-
-		field = field->next;
-		while (field) {
-			printed += comma_fprintf(fp, &first, "%s", field->name);
-			field = field->next;
-		}
-	}
-out:
-	fputc('\n', fp);
-	return ++printed;
-}
-
 bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
 			  char *msg, size_t msgsize)
 {
+	int paranoid;
+
 	if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
 	    evsel->attr.type   == PERF_TYPE_HARDWARE &&
 	    evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
@@ -2364,6 +2295,22 @@
 
 		zfree(&evsel->name);
 		return true;
+	} else if (err == EACCES && !evsel->attr.exclude_kernel &&
+		   (paranoid = perf_event_paranoid()) > 1) {
+		const char *name = perf_evsel__name(evsel);
+		char *new_name;
+
+		if (asprintf(&new_name, "%s%su", name, strchr(name, ':') ? "" : ":") < 0)
+			return false;
+
+		if (evsel->name)
+			free(evsel->name);
+		evsel->name = new_name;
+		scnprintf(msg, msgsize,
+"kernel.perf_event_paranoid=%d, trying to fall back to excluding kernel samples", paranoid);
+		evsel->attr.exclude_kernel = 1;
+
+		return true;
 	}
 
 	return false;
@@ -2382,12 +2329,13 @@
 		 "Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n"
 		 "which controls use of the performance events system by\n"
 		 "unprivileged users (without CAP_SYS_ADMIN).\n\n"
-		 "The default value is 1:\n\n"
+		 "The current value is %d:\n\n"
 		 "  -1: Allow use of (almost) all events by all users\n"
 		 ">= 0: Disallow raw tracepoint access by users without CAP_IOC_LOCK\n"
 		 ">= 1: Disallow CPU event access by users without CAP_SYS_ADMIN\n"
 		 ">= 2: Disallow kernel profiling by users without CAP_SYS_ADMIN",
-				 target->system_wide ? "system-wide " : "");
+				 target->system_wide ? "system-wide " : "",
+				 perf_event_paranoid());
 	case ENOENT:
 		return scnprintf(msg, size, "The %s event is not supported.",
 				 perf_evsel__name(evsel));
@@ -2397,10 +2345,18 @@
 			 "Probably the maximum number of open file descriptors has been reached.\n"
 			 "Hint: Try again after reducing the number of events.\n"
 			 "Hint: Try increasing the limit with 'ulimit -n <limit>'");
+	case ENOMEM:
+		if ((evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) != 0 &&
+		    access("/proc/sys/kernel/perf_event_max_stack", F_OK) == 0)
+			return scnprintf(msg, size,
+					 "Not enough memory to setup event with callchain.\n"
+					 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n"
+					 "Hint: Current value: %d", sysctl_perf_event_max_stack);
+		break;
 	case ENODEV:
 		if (target->cpu_list)
 			return scnprintf(msg, size, "%s",
-	 "No such device - did you specify an out-of-range profile CPU?\n");
+	 "No such device - did you specify an out-of-range profile CPU?");
 		break;
 	case EOPNOTSUPP:
 		if (evsel->attr.precise_ip)
@@ -2432,7 +2388,7 @@
 	return scnprintf(msg, size,
 	"The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
 	"/bin/dmesg may provide additional information.\n"
-	"No CONFIG_PERF_EVENTS=y kernel support configured?\n",
+	"No CONFIG_PERF_EVENTS=y kernel support configured?",
 			 err, strerror_r(err, sbuf, sizeof(sbuf)),
 			 perf_evsel__name(evsel));
 }
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 501ea6e..8a644fe 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -178,8 +178,14 @@
 void perf_evsel__exit(struct perf_evsel *evsel);
 void perf_evsel__delete(struct perf_evsel *evsel);
 
+struct callchain_param;
+
 void perf_evsel__config(struct perf_evsel *evsel,
-			struct record_opts *opts);
+			struct record_opts *opts,
+			struct callchain_param *callchain);
+void perf_evsel__config_callchain(struct perf_evsel *evsel,
+				  struct record_opts *opts,
+				  struct callchain_param *callchain);
 
 int __perf_evsel__sample_size(u64 sample_type);
 void perf_evsel__calc_id_pos(struct perf_evsel *evsel);
@@ -381,6 +387,24 @@
 int perf_evsel__fprintf(struct perf_evsel *evsel,
 			struct perf_attr_details *details, FILE *fp);
 
+#define EVSEL__PRINT_IP			(1<<0)
+#define EVSEL__PRINT_SYM		(1<<1)
+#define EVSEL__PRINT_DSO		(1<<2)
+#define EVSEL__PRINT_SYMOFFSET		(1<<3)
+#define EVSEL__PRINT_ONELINE		(1<<4)
+#define EVSEL__PRINT_SRCLINE		(1<<5)
+#define EVSEL__PRINT_UNKNOWN_AS_ADDR	(1<<6)
+
+struct callchain_cursor;
+
+int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
+			      unsigned int print_opts,
+			      struct callchain_cursor *cursor, FILE *fp);
+
+int sample__fprintf_sym(struct perf_sample *sample, struct addr_location *al,
+			int left_alignment, unsigned int print_opts,
+			struct callchain_cursor *cursor, FILE *fp);
+
 bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
 			  char *msg, size_t msgsize);
 int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
@@ -396,7 +420,7 @@
      (_evsel) && (_evsel)->leader == (_leader);					\
      (_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node))
 
-static inline bool has_branch_callstack(struct perf_evsel *evsel)
+static inline bool perf_evsel__has_branch_callstack(const struct perf_evsel *evsel)
 {
 	return evsel->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
 }
diff --git a/tools/perf/util/evsel_fprintf.c b/tools/perf/util/evsel_fprintf.c
new file mode 100644
index 0000000..3674e77
--- /dev/null
+++ b/tools/perf/util/evsel_fprintf.c
@@ -0,0 +1,212 @@
+#include <stdio.h>
+#include <stdbool.h>
+#include <traceevent/event-parse.h>
+#include "evsel.h"
+#include "callchain.h"
+#include "map.h"
+#include "symbol.h"
+
+static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
+{
+	va_list args;
+	int ret = 0;
+
+	if (!*first) {
+		ret += fprintf(fp, ",");
+	} else {
+		ret += fprintf(fp, ":");
+		*first = false;
+	}
+
+	va_start(args, fmt);
+	ret += vfprintf(fp, fmt, args);
+	va_end(args);
+	return ret;
+}
+
+static int __print_attr__fprintf(FILE *fp, const char *name, const char *val, void *priv)
+{
+	return comma_fprintf(fp, (bool *)priv, " %s: %s", name, val);
+}
+
+int perf_evsel__fprintf(struct perf_evsel *evsel,
+			struct perf_attr_details *details, FILE *fp)
+{
+	bool first = true;
+	int printed = 0;
+
+	if (details->event_group) {
+		struct perf_evsel *pos;
+
+		if (!perf_evsel__is_group_leader(evsel))
+			return 0;
+
+		if (evsel->nr_members > 1)
+			printed += fprintf(fp, "%s{", evsel->group_name ?: "");
+
+		printed += fprintf(fp, "%s", perf_evsel__name(evsel));
+		for_each_group_member(pos, evsel)
+			printed += fprintf(fp, ",%s", perf_evsel__name(pos));
+
+		if (evsel->nr_members > 1)
+			printed += fprintf(fp, "}");
+		goto out;
+	}
+
+	printed += fprintf(fp, "%s", perf_evsel__name(evsel));
+
+	if (details->verbose) {
+		printed += perf_event_attr__fprintf(fp, &evsel->attr,
+						    __print_attr__fprintf, &first);
+	} else if (details->freq) {
+		const char *term = "sample_freq";
+
+		if (!evsel->attr.freq)
+			term = "sample_period";
+
+		printed += comma_fprintf(fp, &first, " %s=%" PRIu64,
+					 term, (u64)evsel->attr.sample_freq);
+	}
+
+	if (details->trace_fields) {
+		struct format_field *field;
+
+		if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
+			printed += comma_fprintf(fp, &first, " (not a tracepoint)");
+			goto out;
+		}
+
+		field = evsel->tp_format->format.fields;
+		if (field == NULL) {
+			printed += comma_fprintf(fp, &first, " (no trace field)");
+			goto out;
+		}
+
+		printed += comma_fprintf(fp, &first, " trace_fields: %s", field->name);
+
+		field = field->next;
+		while (field) {
+			printed += comma_fprintf(fp, &first, "%s", field->name);
+			field = field->next;
+		}
+	}
+out:
+	fputc('\n', fp);
+	return ++printed;
+}
+
+int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
+			      unsigned int print_opts, struct callchain_cursor *cursor,
+			      FILE *fp)
+{
+	int printed = 0;
+	struct callchain_cursor_node *node;
+	int print_ip = print_opts & EVSEL__PRINT_IP;
+	int print_sym = print_opts & EVSEL__PRINT_SYM;
+	int print_dso = print_opts & EVSEL__PRINT_DSO;
+	int print_symoffset = print_opts & EVSEL__PRINT_SYMOFFSET;
+	int print_oneline = print_opts & EVSEL__PRINT_ONELINE;
+	int print_srcline = print_opts & EVSEL__PRINT_SRCLINE;
+	int print_unknown_as_addr = print_opts & EVSEL__PRINT_UNKNOWN_AS_ADDR;
+	char s = print_oneline ? ' ' : '\t';
+
+	if (sample->callchain) {
+		struct addr_location node_al;
+
+		callchain_cursor_commit(cursor);
+
+		while (1) {
+			u64 addr = 0;
+
+			node = callchain_cursor_current(cursor);
+			if (!node)
+				break;
+
+			if (node->sym && node->sym->ignore)
+				goto next;
+
+			printed += fprintf(fp, "%-*.*s", left_alignment, left_alignment, " ");
+
+			if (print_ip)
+				printed += fprintf(fp, "%c%16" PRIx64, s, node->ip);
+
+			if (node->map)
+				addr = node->map->map_ip(node->map, node->ip);
+
+			if (print_sym) {
+				printed += fprintf(fp, " ");
+				node_al.addr = addr;
+				node_al.map  = node->map;
+
+				if (print_symoffset) {
+					printed += __symbol__fprintf_symname_offs(node->sym, &node_al,
+										  print_unknown_as_addr, fp);
+				} else {
+					printed += __symbol__fprintf_symname(node->sym, &node_al,
+									     print_unknown_as_addr, fp);
+				}
+			}
+
+			if (print_dso) {
+				printed += fprintf(fp, " (");
+				printed += map__fprintf_dsoname(node->map, fp);
+				printed += fprintf(fp, ")");
+			}
+
+			if (print_srcline)
+				printed += map__fprintf_srcline(node->map, addr, "\n  ", fp);
+
+			if (!print_oneline)
+				printed += fprintf(fp, "\n");
+next:
+			callchain_cursor_advance(cursor);
+		}
+	}
+
+	return printed;
+}
+
+int sample__fprintf_sym(struct perf_sample *sample, struct addr_location *al,
+			int left_alignment, unsigned int print_opts,
+			struct callchain_cursor *cursor, FILE *fp)
+{
+	int printed = 0;
+	int print_ip = print_opts & EVSEL__PRINT_IP;
+	int print_sym = print_opts & EVSEL__PRINT_SYM;
+	int print_dso = print_opts & EVSEL__PRINT_DSO;
+	int print_symoffset = print_opts & EVSEL__PRINT_SYMOFFSET;
+	int print_srcline = print_opts & EVSEL__PRINT_SRCLINE;
+	int print_unknown_as_addr = print_opts & EVSEL__PRINT_UNKNOWN_AS_ADDR;
+
+	if (cursor != NULL) {
+		printed += sample__fprintf_callchain(sample, left_alignment,
+						     print_opts, cursor, fp);
+	} else if (!(al->sym && al->sym->ignore)) {
+		printed += fprintf(fp, "%-*.*s", left_alignment, left_alignment, " ");
+
+		if (print_ip)
+			printed += fprintf(fp, "%16" PRIx64, sample->ip);
+
+		if (print_sym) {
+			printed += fprintf(fp, " ");
+			if (print_symoffset) {
+				printed += __symbol__fprintf_symname_offs(al->sym, al,
+									  print_unknown_as_addr, fp);
+			} else {
+				printed += __symbol__fprintf_symname(al->sym, al,
+								     print_unknown_as_addr, fp);
+			}
+		}
+
+		if (print_dso) {
+			printed += fprintf(fp, " (");
+			printed += map__fprintf_dsoname(al->map, fp);
+			printed += fprintf(fp, ")");
+		}
+
+		if (print_srcline)
+			printed += map__fprintf_srcline(al->map, al->addr, "\n  ", fp);
+	}
+
+	return printed;
+}
diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h
index cd67e64..2fbeb59 100644
--- a/tools/perf/util/genelf.h
+++ b/tools/perf/util/genelf.h
@@ -9,36 +9,32 @@
 
 #if   defined(__arm__)
 #define GEN_ELF_ARCH	EM_ARM
-#define GEN_ELF_ENDIAN	ELFDATA2LSB
 #define GEN_ELF_CLASS	ELFCLASS32
 #elif defined(__aarch64__)
 #define GEN_ELF_ARCH	EM_AARCH64
-#define GEN_ELF_ENDIAN	ELFDATA2LSB
 #define GEN_ELF_CLASS	ELFCLASS64
 #elif defined(__x86_64__)
 #define GEN_ELF_ARCH	EM_X86_64
-#define GEN_ELF_ENDIAN	ELFDATA2LSB
 #define GEN_ELF_CLASS	ELFCLASS64
 #elif defined(__i386__)
 #define GEN_ELF_ARCH	EM_386
-#define GEN_ELF_ENDIAN	ELFDATA2LSB
 #define GEN_ELF_CLASS	ELFCLASS32
-#elif defined(__ppcle__)
-#define GEN_ELF_ARCH	EM_PPC
-#define GEN_ELF_ENDIAN	ELFDATA2LSB
+#elif defined(__powerpc64__)
+#define GEN_ELF_ARCH	EM_PPC64
 #define GEN_ELF_CLASS	ELFCLASS64
 #elif defined(__powerpc__)
-#define GEN_ELF_ARCH	EM_PPC64
-#define GEN_ELF_ENDIAN	ELFDATA2MSB
-#define GEN_ELF_CLASS	ELFCLASS64
-#elif defined(__powerpcle__)
-#define GEN_ELF_ARCH	EM_PPC64
-#define GEN_ELF_ENDIAN	ELFDATA2LSB
-#define GEN_ELF_CLASS	ELFCLASS64
+#define GEN_ELF_ARCH	EM_PPC
+#define GEN_ELF_CLASS	ELFCLASS32
 #else
 #error "unsupported architecture"
 #endif
 
+#if __BYTE_ORDER == __BIG_ENDIAN
+#define GEN_ELF_ENDIAN	ELFDATA2MSB
+#else
+#define GEN_ELF_ENDIAN	ELFDATA2LSB
+#endif
+
 #if GEN_ELF_CLASS == ELFCLASS64
 #define elf_newehdr	elf64_newehdr
 #define elf_getshdr	elf64_getshdr
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 90680ec..08852dd 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1474,7 +1474,7 @@
 
 	dso = machine__findnew_dso(machine, filename);
 	if (dso != NULL) {
-		char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+		char sbuild_id[SBUILD_ID_SIZE];
 
 		dso__set_build_id(dso, &bev->build_id);
 
@@ -1819,7 +1819,8 @@
 
 	ph->env.nr_sibling_cores = nr;
 	size += sizeof(u32);
-	strbuf_init(&sb, 128);
+	if (strbuf_init(&sb, 128) < 0)
+		goto free_cpu;
 
 	for (i = 0; i < nr; i++) {
 		str = do_read_string(fd, ph);
@@ -1827,7 +1828,8 @@
 			goto error;
 
 		/* include a NULL character at the end */
-		strbuf_add(&sb, str, strlen(str) + 1);
+		if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
+			goto error;
 		size += string_size(str);
 		free(str);
 	}
@@ -1849,7 +1851,8 @@
 			goto error;
 
 		/* include a NULL character at the end */
-		strbuf_add(&sb, str, strlen(str) + 1);
+		if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
+			goto error;
 		size += string_size(str);
 		free(str);
 	}
@@ -1912,13 +1915,14 @@
 	/* nr nodes */
 	ret = readn(fd, &nr, sizeof(nr));
 	if (ret != sizeof(nr))
-		goto error;
+		return -1;
 
 	if (ph->needs_swap)
 		nr = bswap_32(nr);
 
 	ph->env.nr_numa_nodes = nr;
-	strbuf_init(&sb, 256);
+	if (strbuf_init(&sb, 256) < 0)
+		return -1;
 
 	for (i = 0; i < nr; i++) {
 		/* node number */
@@ -1940,15 +1944,17 @@
 			mem_free = bswap_64(mem_free);
 		}
 
-		strbuf_addf(&sb, "%u:%"PRIu64":%"PRIu64":",
-			    node, mem_total, mem_free);
+		if (strbuf_addf(&sb, "%u:%"PRIu64":%"PRIu64":",
+				node, mem_total, mem_free) < 0)
+			goto error;
 
 		str = do_read_string(fd, ph);
 		if (!str)
 			goto error;
 
 		/* include a NULL character at the end */
-		strbuf_add(&sb, str, strlen(str) + 1);
+		if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
+			goto error;
 		free(str);
 	}
 	ph->env.numa_nodes = strbuf_detach(&sb, NULL);
@@ -1982,7 +1988,8 @@
 	}
 
 	ph->env.nr_pmu_mappings = pmu_num;
-	strbuf_init(&sb, 128);
+	if (strbuf_init(&sb, 128) < 0)
+		return -1;
 
 	while (pmu_num) {
 		if (readn(fd, &type, sizeof(type)) != sizeof(type))
@@ -1994,9 +2001,11 @@
 		if (!name)
 			goto error;
 
-		strbuf_addf(&sb, "%u:%s", type, name);
+		if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
+			goto error;
 		/* include a NULL character at the end */
-		strbuf_add(&sb, "", 1);
+		if (strbuf_add(&sb, "", 1) < 0)
+			goto error;
 
 		if (!strcmp(name, "msr"))
 			ph->env.msr_pmu_type = type;
diff --git a/tools/perf/util/help-unknown-cmd.c b/tools/perf/util/help-unknown-cmd.c
index 43a98a4..d62ccae 100644
--- a/tools/perf/util/help-unknown-cmd.c
+++ b/tools/perf/util/help-unknown-cmd.c
@@ -27,16 +27,27 @@
 	return l1 != l2 ? l1 - l2 : strcmp(s1, s2);
 }
 
-static void add_cmd_list(struct cmdnames *cmds, struct cmdnames *old)
+static int add_cmd_list(struct cmdnames *cmds, struct cmdnames *old)
 {
-	unsigned int i;
+	unsigned int i, nr = cmds->cnt + old->cnt;
+	void *tmp;
 
-	ALLOC_GROW(cmds->names, cmds->cnt + old->cnt, cmds->alloc);
-
+	if (nr > cmds->alloc) {
+		/* Choose bigger one to alloc */
+		if (alloc_nr(cmds->alloc) < nr)
+			cmds->alloc = nr;
+		else
+			cmds->alloc = alloc_nr(cmds->alloc);
+		tmp = realloc(cmds->names, cmds->alloc * sizeof(*cmds->names));
+		if (!tmp)
+			return -1;
+		cmds->names = tmp;
+	}
 	for (i = 0; i < old->cnt; i++)
 		cmds->names[cmds->cnt++] = old->names[i];
 	zfree(&old->names);
 	old->cnt = 0;
+	return 0;
 }
 
 const char *help_unknown_cmd(const char *cmd)
@@ -52,8 +63,11 @@
 
 	load_command_list("perf-", &main_cmds, &other_cmds);
 
-	add_cmd_list(&main_cmds, &aliases);
-	add_cmd_list(&main_cmds, &other_cmds);
+	if (add_cmd_list(&main_cmds, &aliases) < 0 ||
+	    add_cmd_list(&main_cmds, &other_cmds) < 0) {
+		fprintf(stderr, "ERROR: Failed to allocate command list for unknown command.\n");
+		goto end;
+	}
 	qsort(main_cmds.names, main_cmds.cnt,
 	      sizeof(main_cmds.names), cmdname_compare);
 	uniq(&main_cmds);
@@ -99,6 +113,6 @@
 		for (i = 0; i < n; i++)
 			fprintf(stderr, "\t%s\n", main_cmds.names[i]->name);
 	}
-
+end:
 	exit(1);
 }
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 31c4641..cfab531 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -295,7 +295,7 @@
 		root_in  = &he->parent_he->hroot_in;
 		root_out = &he->parent_he->hroot_out;
 	} else {
-		if (sort__need_collapse)
+		if (hists__has(hists, need_collapse))
 			root_in = &hists->entries_collapsed;
 		else
 			root_in = hists->entries_in;
@@ -953,7 +953,7 @@
 {
 	int err, err2;
 
-	err = sample__resolve_callchain(iter->sample, &iter->parent,
+	err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
 					iter->evsel, al, max_stack_depth);
 	if (err)
 		return err;
@@ -1295,8 +1295,9 @@
 	return ret;
 }
 
-int hists__collapse_insert_entry(struct hists *hists, struct rb_root *root,
-				 struct hist_entry *he)
+static int hists__collapse_insert_entry(struct hists *hists,
+					struct rb_root *root,
+					struct hist_entry *he)
 {
 	struct rb_node **p = &root->rb_node;
 	struct rb_node *parent = NULL;
@@ -1372,7 +1373,7 @@
 	struct hist_entry *n;
 	int ret;
 
-	if (!sort__need_collapse)
+	if (!hists__has(hists, need_collapse))
 		return 0;
 
 	hists->nr_entries = 0;
@@ -1631,7 +1632,7 @@
 		return;
 	}
 
-	if (sort__need_collapse)
+	if (hists__has(hists, need_collapse))
 		root = &hists->entries_collapsed;
 	else
 		root = hists->entries_in;
@@ -2035,7 +2036,7 @@
 	struct hist_entry *he;
 	int64_t cmp;
 
-	if (sort__need_collapse)
+	if (hists__has(hists, need_collapse))
 		root = &hists->entries_collapsed;
 	else
 		root = hists->entries_in;
@@ -2061,6 +2062,8 @@
 	if (he) {
 		memset(&he->stat, 0, sizeof(he->stat));
 		he->hists = hists;
+		if (symbol_conf.cumulate_callchain)
+			memset(he->stat_acc, 0, sizeof(he->stat));
 		rb_link_node(&he->rb_node_in, parent, p);
 		rb_insert_color(&he->rb_node_in, root);
 		hists__inc_stats(hists, he);
@@ -2075,7 +2078,7 @@
 {
 	struct rb_node *n;
 
-	if (sort__need_collapse)
+	if (hists__has(hists, need_collapse))
 		n = hists->entries_collapsed.rb_node;
 	else
 		n = hists->entries_in->rb_node;
@@ -2104,7 +2107,7 @@
 	struct rb_node *nd;
 	struct hist_entry *pos, *pair;
 
-	if (sort__need_collapse)
+	if (hists__has(leader, need_collapse))
 		root = &leader->entries_collapsed;
 	else
 		root = leader->entries_in;
@@ -2129,7 +2132,7 @@
 	struct rb_node *nd;
 	struct hist_entry *pos, *pair;
 
-	if (sort__need_collapse)
+	if (hists__has(other, need_collapse))
 		root = &other->entries_collapsed;
 	else
 		root = other->entries_in;
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index bec0cd6..0f84bfb 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -82,6 +82,8 @@
 	int			nr_hpp_node;
 };
 
+#define hists__has(__h, __f) (__h)->hpp_list->__f
+
 struct hist_entry_iter;
 
 struct hist_iter_ops {
@@ -199,8 +201,6 @@
 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list);
 
 struct rb_root *hists__get_rotate_entries_in(struct hists *hists);
-int hists__collapse_insert_entry(struct hists *hists,
-				  struct rb_root *root, struct hist_entry *he);
 
 struct perf_hpp {
 	char *buf;
@@ -240,6 +240,14 @@
 struct perf_hpp_list {
 	struct list_head fields;
 	struct list_head sorts;
+
+	int need_collapse;
+	int parent;
+	int sym;
+	int dso;
+	int socket;
+	int thread;
+	int comm;
 };
 
 extern struct perf_hpp_list perf_hpp_list;
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
index 6bc3ecd..9df9960 100644
--- a/tools/perf/util/intel-bts.c
+++ b/tools/perf/util/intel-bts.c
@@ -66,6 +66,7 @@
 	u64				branches_id;
 	size_t				branches_event_size;
 	bool				synth_needs_swap;
+	unsigned long			num_events;
 };
 
 struct intel_bts_queue {
@@ -275,10 +276,15 @@
 	union perf_event event;
 	struct perf_sample sample = { .ip = 0, };
 
+	if (bts->synth_opts.initial_skip &&
+	    bts->num_events++ <= bts->synth_opts.initial_skip)
+		return 0;
+
 	event.sample.header.type = PERF_RECORD_SAMPLE;
 	event.sample.header.misc = PERF_RECORD_MISC_USER;
 	event.sample.header.size = sizeof(struct perf_event_header);
 
+	sample.cpumode = PERF_RECORD_MISC_USER;
 	sample.ip = le64_to_cpu(branch->from);
 	sample.pid = btsq->pid;
 	sample.tid = btsq->tid;
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 9409d01..9c8f15d 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -356,7 +356,7 @@
 
 int intel_pt__strerror(int code, char *buf, size_t buflen)
 {
-	if (code < 1 || code > INTEL_PT_ERR_MAX)
+	if (code < 1 || code >= INTEL_PT_ERR_MAX)
 		code = INTEL_PT_ERR_UNK;
 	strlcpy(buf, intel_pt_err_msgs[code], buflen);
 	return 0;
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 05d8158..1371969 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -100,6 +100,8 @@
 	u64 cyc_bit;
 	u64 noretcomp_bit;
 	unsigned max_non_turbo_ratio;
+
+	unsigned long num_events;
 };
 
 enum switch_state {
@@ -972,6 +974,10 @@
 	if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
 		return 0;
 
+	if (pt->synth_opts.initial_skip &&
+	    pt->num_events++ < pt->synth_opts.initial_skip)
+		return 0;
+
 	event->sample.header.type = PERF_RECORD_SAMPLE;
 	event->sample.header.misc = PERF_RECORD_MISC_USER;
 	event->sample.header.size = sizeof(struct perf_event_header);
@@ -979,6 +985,7 @@
 	if (!pt->timeless_decoding)
 		sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
 
+	sample.cpumode = PERF_RECORD_MISC_USER;
 	sample.ip = ptq->state->from_ip;
 	sample.pid = ptq->pid;
 	sample.tid = ptq->tid;
@@ -1028,6 +1035,10 @@
 	union perf_event *event = ptq->event_buf;
 	struct perf_sample sample = { .ip = 0, };
 
+	if (pt->synth_opts.initial_skip &&
+	    pt->num_events++ < pt->synth_opts.initial_skip)
+		return 0;
+
 	event->sample.header.type = PERF_RECORD_SAMPLE;
 	event->sample.header.misc = PERF_RECORD_MISC_USER;
 	event->sample.header.size = sizeof(struct perf_event_header);
@@ -1035,6 +1046,7 @@
 	if (!pt->timeless_decoding)
 		sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
 
+	sample.cpumode = PERF_RECORD_MISC_USER;
 	sample.ip = ptq->state->from_ip;
 	sample.pid = ptq->pid;
 	sample.tid = ptq->tid;
@@ -1085,6 +1097,10 @@
 	union perf_event *event = ptq->event_buf;
 	struct perf_sample sample = { .ip = 0, };
 
+	if (pt->synth_opts.initial_skip &&
+	    pt->num_events++ < pt->synth_opts.initial_skip)
+		return 0;
+
 	event->sample.header.type = PERF_RECORD_SAMPLE;
 	event->sample.header.misc = PERF_RECORD_MISC_USER;
 	event->sample.header.size = sizeof(struct perf_event_header);
@@ -1092,6 +1108,7 @@
 	if (!pt->timeless_decoding)
 		sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
 
+	sample.cpumode = PERF_RECORD_MISC_USER;
 	sample.ip = ptq->state->from_ip;
 	sample.pid = ptq->pid;
 	sample.tid = ptq->tid;
@@ -1127,7 +1144,7 @@
 		pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
 		       ret);
 
-	if (pt->synth_opts.callchain)
+	if (pt->synth_opts.last_branch)
 		intel_pt_reset_last_branch_rb(ptq);
 
 	return ret;
@@ -1196,14 +1213,18 @@
 	ptq->have_sample = false;
 
 	if (pt->sample_instructions &&
-	    (state->type & INTEL_PT_INSTRUCTION)) {
+	    (state->type & INTEL_PT_INSTRUCTION) &&
+	    (!pt->synth_opts.initial_skip ||
+	     pt->num_events++ >= pt->synth_opts.initial_skip)) {
 		err = intel_pt_synth_instruction_sample(ptq);
 		if (err)
 			return err;
 	}
 
 	if (pt->sample_transactions &&
-	    (state->type & INTEL_PT_TRANSACTION)) {
+	    (state->type & INTEL_PT_TRANSACTION) &&
+	    (!pt->synth_opts.initial_skip ||
+	     pt->num_events++ >= pt->synth_opts.initial_skip)) {
 		err = intel_pt_synth_transaction_sample(ptq);
 		if (err)
 			return err;
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index cd272cc..86afe96 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -17,6 +17,7 @@
 #include "strlist.h"
 #include <elf.h>
 
+#include "tsc.h"
 #include "session.h"
 #include "jit.h"
 #include "jitdump.h"
@@ -33,6 +34,7 @@
 	size_t           bufsize;
 	FILE             *in;
 	bool		 needs_bswap; /* handles cross-endianess */
+	bool		 use_arch_timestamp;
 	void		 *debug_data;
 	size_t		 nr_debug_entries;
 	uint32_t         code_load_count;
@@ -158,13 +160,16 @@
 		header.flags      = bswap_64(header.flags);
 	}
 
+	jd->use_arch_timestamp = header.flags & JITDUMP_FLAGS_ARCH_TIMESTAMP;
+
 	if (verbose > 2)
-		pr_debug("version=%u\nhdr.size=%u\nts=0x%llx\npid=%d\nelf_mach=%d\n",
+		pr_debug("version=%u\nhdr.size=%u\nts=0x%llx\npid=%d\nelf_mach=%d\nuse_arch_timestamp=%d\n",
 			header.version,
 			header.total_size,
 			(unsigned long long)header.timestamp,
 			header.pid,
-			header.elf_mach);
+			header.elf_mach,
+			jd->use_arch_timestamp);
 
 	if (header.flags & JITDUMP_FLAGS_RESERVED) {
 		pr_err("jitdump file contains invalid or unsupported flags 0x%llx\n",
@@ -172,10 +177,15 @@
 		goto error;
 	}
 
+	if (jd->use_arch_timestamp && !jd->session->time_conv.time_mult) {
+		pr_err("jitdump file uses arch timestamps but there is no timestamp conversion\n");
+		goto error;
+	}
+
 	/*
 	 * validate event is using the correct clockid
 	 */
-	if (jit_validate_events(jd->session)) {
+	if (!jd->use_arch_timestamp && jit_validate_events(jd->session)) {
 		pr_err("error, jitted code must be sampled with perf record -k 1\n");
 		goto error;
 	}
@@ -329,6 +339,23 @@
 	return 0;
 }
 
+static uint64_t convert_timestamp(struct jit_buf_desc *jd, uint64_t timestamp)
+{
+	struct perf_tsc_conversion tc;
+
+	if (!jd->use_arch_timestamp)
+		return timestamp;
+
+	tc.time_shift = jd->session->time_conv.time_shift;
+	tc.time_mult  = jd->session->time_conv.time_mult;
+	tc.time_zero  = jd->session->time_conv.time_zero;
+
+	if (!tc.time_mult)
+		return 0;
+
+	return tsc_to_perf_time(timestamp, &tc);
+}
+
 static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
 {
 	struct perf_sample sample;
@@ -385,7 +412,7 @@
 		return -1;
 	}
 	if (stat(filename, &st))
-		memset(&st, 0, sizeof(stat));
+		memset(&st, 0, sizeof(st));
 
 	event->mmap2.header.type = PERF_RECORD_MMAP2;
 	event->mmap2.header.misc = PERF_RECORD_MISC_USER;
@@ -410,13 +437,14 @@
 		id->tid  = tid;
 	}
 	if (jd->sample_type & PERF_SAMPLE_TIME)
-		id->time = jr->load.p.timestamp;
+		id->time = convert_timestamp(jd, jr->load.p.timestamp);
 
 	/*
 	 * create pseudo sample to induce dso hit increment
 	 * use first address as sample address
 	 */
 	memset(&sample, 0, sizeof(sample));
+	sample.cpumode = PERF_RECORD_MISC_USER;
 	sample.pid  = pid;
 	sample.tid  = tid;
 	sample.time = id->time;
@@ -472,7 +500,7 @@
 	size++; /* for \0 */
 
 	if (stat(filename, &st))
-		memset(&st, 0, sizeof(stat));
+		memset(&st, 0, sizeof(st));
 
 	size = PERF_ALIGN(size, sizeof(u64));
 
@@ -498,13 +526,14 @@
 		id->tid  = tid;
 	}
 	if (jd->sample_type & PERF_SAMPLE_TIME)
-		id->time = jr->load.p.timestamp;
+		id->time = convert_timestamp(jd, jr->load.p.timestamp);
 
 	/*
 	 * create pseudo sample to induce dso hit increment
 	 * use first address as sample address
 	 */
 	memset(&sample, 0, sizeof(sample));
+	sample.cpumode = PERF_RECORD_MISC_USER;
 	sample.pid  = pid;
 	sample.tid  = tid;
 	sample.time = id->time;
diff --git a/tools/perf/util/jitdump.h b/tools/perf/util/jitdump.h
index b66c1f5..bcacd20 100644
--- a/tools/perf/util/jitdump.h
+++ b/tools/perf/util/jitdump.h
@@ -23,9 +23,12 @@
 #define JITHEADER_VERSION 1
 
 enum jitdump_flags_bits {
+	JITDUMP_FLAGS_ARCH_TIMESTAMP_BIT,
 	JITDUMP_FLAGS_MAX_BIT,
 };
 
+#define JITDUMP_FLAGS_ARCH_TIMESTAMP	(1ULL << JITDUMP_FLAGS_ARCH_TIMESTAMP_BIT)
+
 #define JITDUMP_FLAGS_RESERVED (JITDUMP_FLAGS_MAX_BIT < 64 ? \
 				(~((1ULL << JITDUMP_FLAGS_MAX_BIT) - 1)) : 0)
 
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 80b9b6a..f9644f7 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -32,6 +32,7 @@
 
 	machine->threads = RB_ROOT;
 	pthread_rwlock_init(&machine->threads_lock, NULL);
+	machine->nr_threads = 0;
 	INIT_LIST_HEAD(&machine->dead_threads);
 	machine->last_match = NULL;
 
@@ -361,7 +362,7 @@
 }
 
 /*
- * Caller must eventually drop thread->refcnt returned with a successfull
+ * Caller must eventually drop thread->refcnt returned with a successful
  * lookup/new thread inserted.
  */
 static struct thread *____machine__findnew_thread(struct machine *machine,
@@ -430,6 +431,7 @@
 		 */
 		thread__get(th);
 		machine->last_match = th;
+		++machine->nr_threads;
 	}
 
 	return th;
@@ -681,11 +683,13 @@
 
 size_t machine__fprintf(struct machine *machine, FILE *fp)
 {
-	size_t ret = 0;
+	size_t ret;
 	struct rb_node *nd;
 
 	pthread_rwlock_rdlock(&machine->threads_lock);
 
+	ret = fprintf(fp, "Threads: %u\n", machine->nr_threads);
+
 	for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
 		struct thread *pos = rb_entry(nd, struct thread, rb_node);
 
@@ -908,11 +912,11 @@
 	return machine__create_kernel_maps(machine);
 }
 
-int machine__load_kallsyms(struct machine *machine, const char *filename,
-			   enum map_type type, symbol_filter_t filter)
+int __machine__load_kallsyms(struct machine *machine, const char *filename,
+			     enum map_type type, bool no_kcore, symbol_filter_t filter)
 {
 	struct map *map = machine__kernel_map(machine);
-	int ret = dso__load_kallsyms(map->dso, filename, map, filter);
+	int ret = __dso__load_kallsyms(map->dso, filename, map, no_kcore, filter);
 
 	if (ret > 0) {
 		dso__set_loaded(map->dso, type);
@@ -927,6 +931,12 @@
 	return ret;
 }
 
+int machine__load_kallsyms(struct machine *machine, const char *filename,
+			   enum map_type type, symbol_filter_t filter)
+{
+	return __machine__load_kallsyms(machine, filename, type, false, filter);
+}
+
 int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
 			       symbol_filter_t filter)
 {
@@ -1413,6 +1423,7 @@
 		pthread_rwlock_wrlock(&machine->threads_lock);
 	rb_erase_init(&th->rb_node, &machine->threads);
 	RB_CLEAR_NODE(&th->rb_node);
+	--machine->nr_threads;
 	/*
 	 * Move it first to the dead_threads list, then drop the reference,
 	 * if this is the last reference, then the thread__delete destructor
@@ -1599,6 +1610,7 @@
 }
 
 static int add_callchain_ip(struct thread *thread,
+			    struct callchain_cursor *cursor,
 			    struct symbol **parent,
 			    struct addr_location *root_al,
 			    u8 *cpumode,
@@ -1630,7 +1642,7 @@
 				 * It seems the callchain is corrupted.
 				 * Discard all.
 				 */
-				callchain_cursor_reset(&callchain_cursor);
+				callchain_cursor_reset(cursor);
 				return 1;
 			}
 			return 0;
@@ -1640,7 +1652,7 @@
 	}
 
 	if (al.sym != NULL) {
-		if (sort__has_parent && !*parent &&
+		if (perf_hpp_list.parent && !*parent &&
 		    symbol__match_regex(al.sym, &parent_regex))
 			*parent = al.sym;
 		else if (have_ignore_callees && root_al &&
@@ -1648,13 +1660,13 @@
 			/* Treat this symbol as the root,
 			   forgetting its callees. */
 			*root_al = al;
-			callchain_cursor_reset(&callchain_cursor);
+			callchain_cursor_reset(cursor);
 		}
 	}
 
 	if (symbol_conf.hide_unresolved && al.sym == NULL)
 		return 0;
-	return callchain_cursor_append(&callchain_cursor, al.addr, al.map, al.sym);
+	return callchain_cursor_append(cursor, al.addr, al.map, al.sym);
 }
 
 struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
@@ -1724,6 +1736,7 @@
  * negative error code on other errors.
  */
 static int resolve_lbr_callchain_sample(struct thread *thread,
+					struct callchain_cursor *cursor,
 					struct perf_sample *sample,
 					struct symbol **parent,
 					struct addr_location *root_al,
@@ -1756,7 +1769,7 @@
 		 */
 		int mix_chain_nr = i + 1 + lbr_nr + 1;
 
-		if (mix_chain_nr > PERF_MAX_STACK_DEPTH + PERF_MAX_BRANCH_DEPTH) {
+		if (mix_chain_nr > (int)sysctl_perf_event_max_stack + PERF_MAX_BRANCH_DEPTH) {
 			pr_warning("corrupted callchain. skipping...\n");
 			return 0;
 		}
@@ -1778,7 +1791,7 @@
 					ip = lbr_stack->entries[0].to;
 			}
 
-			err = add_callchain_ip(thread, parent, root_al, &cpumode, ip);
+			err = add_callchain_ip(thread, cursor, parent, root_al, &cpumode, ip);
 			if (err)
 				return (err < 0) ? err : 0;
 		}
@@ -1789,6 +1802,7 @@
 }
 
 static int thread__resolve_callchain_sample(struct thread *thread,
+					    struct callchain_cursor *cursor,
 					    struct perf_evsel *evsel,
 					    struct perf_sample *sample,
 					    struct symbol **parent,
@@ -1803,10 +1817,8 @@
 	int skip_idx = -1;
 	int first_call = 0;
 
-	callchain_cursor_reset(&callchain_cursor);
-
-	if (has_branch_callstack(evsel)) {
-		err = resolve_lbr_callchain_sample(thread, sample, parent,
+	if (perf_evsel__has_branch_callstack(evsel)) {
+		err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
 						   root_al, max_stack);
 		if (err)
 			return (err < 0) ? err : 0;
@@ -1816,7 +1828,7 @@
 	 * Based on DWARF debug information, some architectures skip
 	 * a callchain entry saved by the kernel.
 	 */
-	if (chain->nr < PERF_MAX_STACK_DEPTH)
+	if (chain->nr < sysctl_perf_event_max_stack)
 		skip_idx = arch_skip_callchain_idx(thread, chain);
 
 	/*
@@ -1863,10 +1875,10 @@
 		nr = remove_loops(be, nr);
 
 		for (i = 0; i < nr; i++) {
-			err = add_callchain_ip(thread, parent, root_al,
+			err = add_callchain_ip(thread, cursor, parent, root_al,
 					       NULL, be[i].to);
 			if (!err)
-				err = add_callchain_ip(thread, parent, root_al,
+				err = add_callchain_ip(thread, cursor, parent, root_al,
 						       NULL, be[i].from);
 			if (err == -EINVAL)
 				break;
@@ -1877,7 +1889,7 @@
 	}
 
 check_calls:
-	if (chain->nr > PERF_MAX_STACK_DEPTH && (int)chain->nr > max_stack) {
+	if (chain->nr > sysctl_perf_event_max_stack && (int)chain->nr > max_stack) {
 		pr_warning("corrupted callchain. skipping...\n");
 		return 0;
 	}
@@ -1896,7 +1908,7 @@
 #endif
 		ip = chain->ips[j];
 
-		err = add_callchain_ip(thread, parent, root_al, &cpumode, ip);
+		err = add_callchain_ip(thread, cursor, parent, root_al, &cpumode, ip);
 
 		if (err)
 			return (err < 0) ? err : 0;
@@ -1915,19 +1927,12 @@
 				       entry->map, entry->sym);
 }
 
-int thread__resolve_callchain(struct thread *thread,
-			      struct perf_evsel *evsel,
-			      struct perf_sample *sample,
-			      struct symbol **parent,
-			      struct addr_location *root_al,
-			      int max_stack)
+static int thread__resolve_callchain_unwind(struct thread *thread,
+					    struct callchain_cursor *cursor,
+					    struct perf_evsel *evsel,
+					    struct perf_sample *sample,
+					    int max_stack)
 {
-	int ret = thread__resolve_callchain_sample(thread, evsel,
-						   sample, parent,
-						   root_al, max_stack);
-	if (ret)
-		return ret;
-
 	/* Can we do dwarf post unwind? */
 	if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
 	      (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
@@ -1938,9 +1943,45 @@
 	    (!sample->user_stack.size))
 		return 0;
 
-	return unwind__get_entries(unwind_entry, &callchain_cursor,
+	return unwind__get_entries(unwind_entry, cursor,
 				   thread, sample, max_stack);
+}
 
+int thread__resolve_callchain(struct thread *thread,
+			      struct callchain_cursor *cursor,
+			      struct perf_evsel *evsel,
+			      struct perf_sample *sample,
+			      struct symbol **parent,
+			      struct addr_location *root_al,
+			      int max_stack)
+{
+	int ret = 0;
+
+	callchain_cursor_reset(&callchain_cursor);
+
+	if (callchain_param.order == ORDER_CALLEE) {
+		ret = thread__resolve_callchain_sample(thread, cursor,
+						       evsel, sample,
+						       parent, root_al,
+						       max_stack);
+		if (ret)
+			return ret;
+		ret = thread__resolve_callchain_unwind(thread, cursor,
+						       evsel, sample,
+						       max_stack);
+	} else {
+		ret = thread__resolve_callchain_unwind(thread, cursor,
+						       evsel, sample,
+						       max_stack);
+		if (ret)
+			return ret;
+		ret = thread__resolve_callchain_sample(thread, cursor,
+						       evsel, sample,
+						       parent, root_al,
+						       max_stack);
+	}
+
+	return ret;
 }
 
 int machine__for_each_thread(struct machine *machine,
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 8499db2..83f4679 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -31,6 +31,7 @@
 	char		  *root_dir;
 	struct rb_root	  threads;
 	pthread_rwlock_t  threads_lock;
+	unsigned int	  nr_threads;
 	struct list_head  dead_threads;
 	struct thread	  *last_match;
 	struct vdso_info  *vdso_info;
@@ -141,7 +142,11 @@
 					   struct addr_location *al);
 struct mem_info *sample__resolve_mem(struct perf_sample *sample,
 				     struct addr_location *al);
+
+struct callchain_cursor;
+
 int thread__resolve_callchain(struct thread *thread,
+			      struct callchain_cursor *cursor,
 			      struct perf_evsel *evsel,
 			      struct perf_sample *sample,
 			      struct symbol **parent,
@@ -211,6 +216,8 @@
 struct map *machine__findnew_module_map(struct machine *machine, u64 start,
 					const char *filename);
 
+int __machine__load_kallsyms(struct machine *machine, const char *filename,
+			     enum map_type type, bool no_kcore, symbol_filter_t filter);
 int machine__load_kallsyms(struct machine *machine, const char *filename,
 			   enum map_type type, symbol_filter_t filter);
 int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 171b6d1..b19bcd3 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -289,7 +289,7 @@
 	nr = dso__load(map->dso, map, filter);
 	if (nr < 0) {
 		if (map->dso->has_build_id) {
-			char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+			char sbuild_id[SBUILD_ID_SIZE];
 
 			build_id__sprintf(map->dso->build_id,
 					  sizeof(map->dso->build_id),
@@ -431,6 +431,13 @@
 	if (map->dso->rel)
 		return rip - map->pgoff;
 
+	/*
+	 * kernel modules also have DSO_TYPE_USER in dso->kernel,
+	 * but all kernel modules are ET_REL, so won't get here.
+	 */
+	if (map->dso->kernel == DSO_TYPE_USER)
+		return rip + map->dso->text_offset;
+
 	return map->unmap_ip(map, rip) - map->reloc;
 }
 
@@ -454,6 +461,13 @@
 	if (map->dso->rel)
 		return map->unmap_ip(map, ip + map->pgoff);
 
+	/*
+	 * kernel modules also have DSO_TYPE_USER in dso->kernel,
+	 * but all kernel modules are ET_REL, so won't get here.
+	 */
+	if (map->dso->kernel == DSO_TYPE_USER)
+		return map->unmap_ip(map, ip - map->dso->text_offset);
+
 	return ip + map->reloc;
 }
 
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
index b1b9e23..fe84df1 100644
--- a/tools/perf/util/ordered-events.c
+++ b/tools/perf/util/ordered-events.c
@@ -308,3 +308,12 @@
 		free(event);
 	}
 }
+
+void ordered_events__reinit(struct ordered_events *oe)
+{
+	ordered_events__deliver_t old_deliver = oe->deliver;
+
+	ordered_events__free(oe);
+	memset(oe, '\0', sizeof(*oe));
+	ordered_events__init(oe, old_deliver);
+}
diff --git a/tools/perf/util/ordered-events.h b/tools/perf/util/ordered-events.h
index f403991..e11468a 100644
--- a/tools/perf/util/ordered-events.h
+++ b/tools/perf/util/ordered-events.h
@@ -49,6 +49,7 @@
 int ordered_events__flush(struct ordered_events *oe, enum oe_flush how);
 void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver);
 void ordered_events__free(struct ordered_events *oe);
+void ordered_events__reinit(struct ordered_events *oe);
 
 static inline
 void ordered_events__set_alloc_size(struct ordered_events *oe, u64 size)
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 4c19d5e..c6fd047 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -138,11 +138,11 @@
 #define PERF_EVENT_TYPE(config)		__PERF_EVENT_FIELD(config, TYPE)
 #define PERF_EVENT_ID(config)		__PERF_EVENT_FIELD(config, EVENT)
 
-#define for_each_subsystem(sys_dir, sys_dirent, sys_next)	       \
-	while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next)	       \
-	if (sys_dirent.d_type == DT_DIR &&				       \
-	   (strcmp(sys_dirent.d_name, ".")) &&				       \
-	   (strcmp(sys_dirent.d_name, "..")))
+#define for_each_subsystem(sys_dir, sys_dirent)			\
+	while ((sys_dirent = readdir(sys_dir)) != NULL)		\
+		if (sys_dirent->d_type == DT_DIR &&		\
+		    (strcmp(sys_dirent->d_name, ".")) &&	\
+		    (strcmp(sys_dirent->d_name, "..")))
 
 static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
 {
@@ -159,12 +159,12 @@
 	return 0;
 }
 
-#define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next)	       \
-	while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next)        \
-	if (evt_dirent.d_type == DT_DIR &&				       \
-	   (strcmp(evt_dirent.d_name, ".")) &&				       \
-	   (strcmp(evt_dirent.d_name, "..")) &&				       \
-	   (!tp_event_has_id(&sys_dirent, &evt_dirent)))
+#define for_each_event(sys_dirent, evt_dir, evt_dirent)		\
+	while ((evt_dirent = readdir(evt_dir)) != NULL)		\
+		if (evt_dirent->d_type == DT_DIR &&		\
+		    (strcmp(evt_dirent->d_name, ".")) &&	\
+		    (strcmp(evt_dirent->d_name, "..")) &&	\
+		    (!tp_event_has_id(sys_dirent, evt_dirent)))
 
 #define MAX_EVENT_LENGTH 512
 
@@ -173,7 +173,7 @@
 {
 	struct tracepoint_path *path = NULL;
 	DIR *sys_dir, *evt_dir;
-	struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
+	struct dirent *sys_dirent, *evt_dirent;
 	char id_buf[24];
 	int fd;
 	u64 id;
@@ -184,18 +184,18 @@
 	if (!sys_dir)
 		return NULL;
 
-	for_each_subsystem(sys_dir, sys_dirent, sys_next) {
+	for_each_subsystem(sys_dir, sys_dirent) {
 
 		snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
-			 sys_dirent.d_name);
+			 sys_dirent->d_name);
 		evt_dir = opendir(dir_path);
 		if (!evt_dir)
 			continue;
 
-		for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
+		for_each_event(sys_dirent, evt_dir, evt_dirent) {
 
 			snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
-				 evt_dirent.d_name);
+				 evt_dirent->d_name);
 			fd = open(evt_path, O_RDONLY);
 			if (fd < 0)
 				continue;
@@ -220,9 +220,9 @@
 					free(path);
 					return NULL;
 				}
-				strncpy(path->system, sys_dirent.d_name,
+				strncpy(path->system, sys_dirent->d_name,
 					MAX_EVENT_LENGTH);
-				strncpy(path->name, evt_dirent.d_name,
+				strncpy(path->name, evt_dirent->d_name,
 					MAX_EVENT_LENGTH);
 				return path;
 			}
@@ -1649,7 +1649,7 @@
 
 		buf = _buf;
 
-		/* We're cutting from the beggining. */
+		/* We're cutting from the beginning. */
 		if (err->idx > max_err_idx)
 			cut = err->idx - max_err_idx;
 
@@ -1812,7 +1812,7 @@
 			     bool name_only)
 {
 	DIR *sys_dir, *evt_dir;
-	struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
+	struct dirent *sys_dirent, *evt_dirent;
 	char evt_path[MAXPATHLEN];
 	char dir_path[MAXPATHLEN];
 	char **evt_list = NULL;
@@ -1830,20 +1830,20 @@
 			goto out_close_sys_dir;
 	}
 
-	for_each_subsystem(sys_dir, sys_dirent, sys_next) {
+	for_each_subsystem(sys_dir, sys_dirent) {
 		if (subsys_glob != NULL &&
-		    !strglobmatch(sys_dirent.d_name, subsys_glob))
+		    !strglobmatch(sys_dirent->d_name, subsys_glob))
 			continue;
 
 		snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
-			 sys_dirent.d_name);
+			 sys_dirent->d_name);
 		evt_dir = opendir(dir_path);
 		if (!evt_dir)
 			continue;
 
-		for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
+		for_each_event(sys_dirent, evt_dir, evt_dirent) {
 			if (event_glob != NULL &&
-			    !strglobmatch(evt_dirent.d_name, event_glob))
+			    !strglobmatch(evt_dirent->d_name, event_glob))
 				continue;
 
 			if (!evt_num_known) {
@@ -1852,7 +1852,7 @@
 			}
 
 			snprintf(evt_path, MAXPATHLEN, "%s:%s",
-				 sys_dirent.d_name, evt_dirent.d_name);
+				 sys_dirent->d_name, evt_dirent->d_name);
 
 			evt_list[evt_i] = strdup(evt_path);
 			if (evt_list[evt_i] == NULL)
@@ -1905,7 +1905,7 @@
 int is_valid_tracepoint(const char *event_string)
 {
 	DIR *sys_dir, *evt_dir;
-	struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
+	struct dirent *sys_dirent, *evt_dirent;
 	char evt_path[MAXPATHLEN];
 	char dir_path[MAXPATHLEN];
 
@@ -1913,17 +1913,17 @@
 	if (!sys_dir)
 		return 0;
 
-	for_each_subsystem(sys_dir, sys_dirent, sys_next) {
+	for_each_subsystem(sys_dir, sys_dirent) {
 
 		snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
-			 sys_dirent.d_name);
+			 sys_dirent->d_name);
 		evt_dir = opendir(dir_path);
 		if (!evt_dir)
 			continue;
 
-		for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
+		for_each_event(sys_dirent, evt_dir, evt_dirent) {
 			snprintf(evt_path, MAXPATHLEN, "%s:%s",
-				 sys_dirent.d_name, evt_dirent.d_name);
+				 sys_dirent->d_name, evt_dirent->d_name);
 			if (!strcmp(evt_path, event_string)) {
 				closedir(evt_dir);
 				closedir(sys_dir);
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index adef23b..ddb0261 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -602,14 +602,13 @@
 
 static __u64 pmu_format_max_value(const unsigned long *format)
 {
-	int w;
+	__u64 w = 0;
+	int fbit;
 
-	w = bitmap_weight(format, PERF_PMU_FORMAT_BITS);
-	if (!w)
-		return 0;
-	if (w < 64)
-		return (1ULL << w) - 1;
-	return -1;
+	for_each_set_bit(fbit, format, PERF_PMU_FORMAT_BITS)
+		w |= (1ULL << fbit);
+
+	return w;
 }
 
 /*
@@ -644,20 +643,20 @@
 static char *pmu_formats_string(struct list_head *formats)
 {
 	struct perf_pmu_format *format;
-	char *str;
-	struct strbuf buf;
+	char *str = NULL;
+	struct strbuf buf = STRBUF_INIT;
 	unsigned i = 0;
 
 	if (!formats)
 		return NULL;
 
-	strbuf_init(&buf, 0);
 	/* sysfs exported terms */
 	list_for_each_entry(format, formats, list)
-		strbuf_addf(&buf, i++ ? ",%s" : "%s",
-			    format->name);
+		if (strbuf_addf(&buf, i++ ? ",%s" : "%s", format->name) < 0)
+			goto error;
 
 	str = strbuf_detach(&buf, NULL);
+error:
 	strbuf_release(&buf);
 
 	return str;
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 8319fbb..74401a2 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -265,6 +265,65 @@
 	return true;
 }
 
+/*
+ * NOTE:
+ * '.gnu.linkonce.this_module' section of kernel module elf directly
+ * maps to 'struct module' from linux/module.h. This section contains
+ * actual module name which will be used by kernel after loading it.
+ * But, we cannot use 'struct module' here since linux/module.h is not
+ * exposed to user-space. Offset of 'name' has remained same from long
+ * time, so hardcoding it here.
+ */
+#ifdef __LP64__
+#define MOD_NAME_OFFSET 24
+#else
+#define MOD_NAME_OFFSET 12
+#endif
+
+/*
+ * @module can be module name of module file path. In case of path,
+ * inspect elf and find out what is actual module name.
+ * Caller has to free mod_name after using it.
+ */
+static char *find_module_name(const char *module)
+{
+	int fd;
+	Elf *elf;
+	GElf_Ehdr ehdr;
+	GElf_Shdr shdr;
+	Elf_Data *data;
+	Elf_Scn *sec;
+	char *mod_name = NULL;
+
+	fd = open(module, O_RDONLY);
+	if (fd < 0)
+		return NULL;
+
+	elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+	if (elf == NULL)
+		goto elf_err;
+
+	if (gelf_getehdr(elf, &ehdr) == NULL)
+		goto ret_err;
+
+	sec = elf_section_by_name(elf, &ehdr, &shdr,
+			".gnu.linkonce.this_module", NULL);
+	if (!sec)
+		goto ret_err;
+
+	data = elf_getdata(sec, NULL);
+	if (!data || !data->d_buf)
+		goto ret_err;
+
+	mod_name = strdup((char *)data->d_buf + MOD_NAME_OFFSET);
+
+ret_err:
+	elf_end(elf);
+elf_err:
+	close(fd);
+	return mod_name;
+}
+
 #ifdef HAVE_DWARF_SUPPORT
 
 static int kernel_get_module_dso(const char *module, struct dso **pdso)
@@ -486,8 +545,10 @@
 		return -errno;
 
 	elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
-	if (elf == NULL)
-		return -EINVAL;
+	if (elf == NULL) {
+		ret = -EINVAL;
+		goto out_close;
+	}
 
 	if (gelf_getehdr(elf, &ehdr) == NULL)
 		goto out;
@@ -499,6 +560,9 @@
 	ret = 0;
 out:
 	elf_end(elf);
+out_close:
+	close(fd);
+
 	return ret;
 }
 
@@ -583,32 +647,23 @@
 					    int ntevs, const char *module)
 {
 	int i, ret = 0;
-	char *tmp;
+	char *mod_name = NULL;
 
 	if (!module)
 		return 0;
 
-	tmp = strrchr(module, '/');
-	if (tmp) {
-		/* This is a module path -- get the module name */
-		module = strdup(tmp + 1);
-		if (!module)
-			return -ENOMEM;
-		tmp = strchr(module, '.');
-		if (tmp)
-			*tmp = '\0';
-		tmp = (char *)module;	/* For free() */
-	}
+	mod_name = find_module_name(module);
 
 	for (i = 0; i < ntevs; i++) {
-		tevs[i].point.module = strdup(module);
+		tevs[i].point.module =
+			strdup(mod_name ? mod_name : module);
 		if (!tevs[i].point.module) {
 			ret = -ENOMEM;
 			break;
 		}
 	}
 
-	free(tmp);
+	free(mod_name);
 	return ret;
 }
 
@@ -1618,69 +1673,65 @@
 }
 
 /* Compose only probe arg */
-int synthesize_perf_probe_arg(struct perf_probe_arg *pa, char *buf, size_t len)
+char *synthesize_perf_probe_arg(struct perf_probe_arg *pa)
 {
 	struct perf_probe_arg_field *field = pa->field;
-	int ret;
-	char *tmp = buf;
+	struct strbuf buf;
+	char *ret = NULL;
+	int err;
+
+	if (strbuf_init(&buf, 64) < 0)
+		return NULL;
 
 	if (pa->name && pa->var)
-		ret = e_snprintf(tmp, len, "%s=%s", pa->name, pa->var);
+		err = strbuf_addf(&buf, "%s=%s", pa->name, pa->var);
 	else
-		ret = e_snprintf(tmp, len, "%s", pa->name ? pa->name : pa->var);
-	if (ret <= 0)
-		goto error;
-	tmp += ret;
-	len -= ret;
+		err = strbuf_addstr(&buf, pa->name ?: pa->var);
+	if (err)
+		goto out;
 
 	while (field) {
 		if (field->name[0] == '[')
-			ret = e_snprintf(tmp, len, "%s", field->name);
+			err = strbuf_addstr(&buf, field->name);
 		else
-			ret = e_snprintf(tmp, len, "%s%s",
-					 field->ref ? "->" : ".", field->name);
-		if (ret <= 0)
-			goto error;
-		tmp += ret;
-		len -= ret;
+			err = strbuf_addf(&buf, "%s%s", field->ref ? "->" : ".",
+					  field->name);
 		field = field->next;
+		if (err)
+			goto out;
 	}
 
-	if (pa->type) {
-		ret = e_snprintf(tmp, len, ":%s", pa->type);
-		if (ret <= 0)
-			goto error;
-		tmp += ret;
-		len -= ret;
-	}
+	if (pa->type)
+		if (strbuf_addf(&buf, ":%s", pa->type) < 0)
+			goto out;
 
-	return tmp - buf;
-error:
-	pr_debug("Failed to synthesize perf probe argument: %d\n", ret);
+	ret = strbuf_detach(&buf, NULL);
+out:
+	strbuf_release(&buf);
 	return ret;
 }
 
 /* Compose only probe point (not argument) */
 static char *synthesize_perf_probe_point(struct perf_probe_point *pp)
 {
-	char *buf, *tmp;
-	char offs[32] = "", line[32] = "", file[32] = "";
-	int ret, len;
+	struct strbuf buf;
+	char *tmp, *ret = NULL;
+	int len, err = 0;
 
-	buf = zalloc(MAX_CMDLEN);
-	if (buf == NULL) {
-		ret = -ENOMEM;
-		goto error;
-	}
-	if (pp->offset) {
-		ret = e_snprintf(offs, 32, "+%lu", pp->offset);
-		if (ret <= 0)
-			goto error;
-	}
-	if (pp->line) {
-		ret = e_snprintf(line, 32, ":%d", pp->line);
-		if (ret <= 0)
-			goto error;
+	if (strbuf_init(&buf, 64) < 0)
+		return NULL;
+
+	if (pp->function) {
+		if (strbuf_addstr(&buf, pp->function) < 0)
+			goto out;
+		if (pp->offset)
+			err = strbuf_addf(&buf, "+%lu", pp->offset);
+		else if (pp->line)
+			err = strbuf_addf(&buf, ":%d", pp->line);
+		else if (pp->retprobe)
+			err = strbuf_addstr(&buf, "%return");
+		if (err)
+			goto out;
 	}
 	if (pp->file) {
 		tmp = pp->file;
@@ -1689,25 +1740,15 @@
 			tmp = strchr(pp->file + len - 30, '/');
 			tmp = tmp ? tmp + 1 : pp->file + len - 30;
 		}
-		ret = e_snprintf(file, 32, "@%s", tmp);
-		if (ret <= 0)
-			goto error;
+		err = strbuf_addf(&buf, "@%s", tmp);
+		if (!err && !pp->function && pp->line)
+			err = strbuf_addf(&buf, ":%d", pp->line);
 	}
-
-	if (pp->function)
-		ret = e_snprintf(buf, MAX_CMDLEN, "%s%s%s%s%s", pp->function,
-				 offs, pp->retprobe ? "%return" : "", line,
-				 file);
-	else
-		ret = e_snprintf(buf, MAX_CMDLEN, "%s%s", file, line);
-	if (ret <= 0)
-		goto error;
-
-	return buf;
-error:
-	pr_debug("Failed to synthesize perf probe point: %d\n", ret);
-	free(buf);
-	return NULL;
+	if (!err)
+		ret = strbuf_detach(&buf, NULL);
+out:
+	strbuf_release(&buf);
+	return ret;
 }
 
 #if 0
@@ -1736,45 +1777,32 @@
 #endif
 
 static int __synthesize_probe_trace_arg_ref(struct probe_trace_arg_ref *ref,
-					     char **buf, size_t *buflen,
-					     int depth)
+					    struct strbuf *buf, int depth)
 {
-	int ret;
+	int err;
 	if (ref->next) {
 		depth = __synthesize_probe_trace_arg_ref(ref->next, buf,
-							 buflen, depth + 1);
+							 depth + 1);
 		if (depth < 0)
-			goto out;
+			return depth;
 	}
-
-	ret = e_snprintf(*buf, *buflen, "%+ld(", ref->offset);
-	if (ret < 0)
-		depth = ret;
-	else {
-		*buf += ret;
-		*buflen -= ret;
-	}
-out:
-	return depth;
-
+	err = strbuf_addf(buf, "%+ld(", ref->offset);
+	return (err < 0) ? err : depth;
 }
 
 static int synthesize_probe_trace_arg(struct probe_trace_arg *arg,
-				       char *buf, size_t buflen)
+				      struct strbuf *buf)
 {
 	struct probe_trace_arg_ref *ref = arg->ref;
-	int ret, depth = 0;
-	char *tmp = buf;
+	int depth = 0, err;
 
 	/* Argument name or separator */
 	if (arg->name)
-		ret = e_snprintf(buf, buflen, " %s=", arg->name);
+		err = strbuf_addf(buf, " %s=", arg->name);
 	else
-		ret = e_snprintf(buf, buflen, " ");
-	if (ret < 0)
-		return ret;
-	buf += ret;
-	buflen -= ret;
+		err = strbuf_addch(buf, ' ');
+	if (err)
+		return err;
 
 	/* Special case: @XXX */
 	if (arg->value[0] == '@' && arg->ref)
@@ -1782,59 +1810,44 @@
 
 	/* Dereferencing arguments */
 	if (ref) {
-		depth = __synthesize_probe_trace_arg_ref(ref, &buf,
-							  &buflen, 1);
+		depth = __synthesize_probe_trace_arg_ref(ref, buf, 1);
 		if (depth < 0)
 			return depth;
 	}
 
 	/* Print argument value */
 	if (arg->value[0] == '@' && arg->ref)
-		ret = e_snprintf(buf, buflen, "%s%+ld", arg->value,
-				 arg->ref->offset);
+		err = strbuf_addf(buf, "%s%+ld", arg->value, arg->ref->offset);
 	else
-		ret = e_snprintf(buf, buflen, "%s", arg->value);
-	if (ret < 0)
-		return ret;
-	buf += ret;
-	buflen -= ret;
+		err = strbuf_addstr(buf, arg->value);
 
 	/* Closing */
-	while (depth--) {
-		ret = e_snprintf(buf, buflen, ")");
-		if (ret < 0)
-			return ret;
-		buf += ret;
-		buflen -= ret;
-	}
-	/* Print argument type */
-	if (arg->type) {
-		ret = e_snprintf(buf, buflen, ":%s", arg->type);
-		if (ret <= 0)
-			return ret;
-		buf += ret;
-	}
+	while (!err && depth--)
+		err = strbuf_addch(buf, ')');
 
-	return buf - tmp;
+	/* Print argument type */
+	if (!err && arg->type)
+		err = strbuf_addf(buf, ":%s", arg->type);
+
+	return err;
 }
 
 char *synthesize_probe_trace_command(struct probe_trace_event *tev)
 {
 	struct probe_trace_point *tp = &tev->point;
-	char *buf;
-	int i, len, ret;
-
-	buf = zalloc(MAX_CMDLEN);
-	if (buf == NULL)
-		return NULL;
-
-	len = e_snprintf(buf, MAX_CMDLEN, "%c:%s/%s ", tp->retprobe ? 'r' : 'p',
-			 tev->group, tev->event);
-	if (len <= 0)
-		goto error;
+	struct strbuf buf;
+	char *ret = NULL;
+	int i, err;
 
 	/* Uprobes must have tp->module */
 	if (tev->uprobes && !tp->module)
+		return NULL;
+
+	if (strbuf_init(&buf, 32) < 0)
+		return NULL;
+
+	if (strbuf_addf(&buf, "%c:%s/%s ", tp->retprobe ? 'r' : 'p',
+			tev->group, tev->event) < 0)
 		goto error;
 	/*
 	 * If tp->address == 0, then this point must be a
@@ -1849,34 +1862,25 @@
 
 	/* Use the tp->address for uprobes */
 	if (tev->uprobes)
-		ret = e_snprintf(buf + len, MAX_CMDLEN - len, "%s:0x%lx",
-				 tp->module, tp->address);
+		err = strbuf_addf(&buf, "%s:0x%lx", tp->module, tp->address);
 	else if (!strncmp(tp->symbol, "0x", 2))
 		/* Absolute address. See try_to_find_absolute_address() */
-		ret = e_snprintf(buf + len, MAX_CMDLEN - len, "%s%s0x%lx",
-				 tp->module ?: "", tp->module ? ":" : "",
-				 tp->address);
+		err = strbuf_addf(&buf, "%s%s0x%lx", tp->module ?: "",
+				  tp->module ? ":" : "", tp->address);
 	else
-		ret = e_snprintf(buf + len, MAX_CMDLEN - len, "%s%s%s+%lu",
-				 tp->module ?: "", tp->module ? ":" : "",
-				 tp->symbol, tp->offset);
-
-	if (ret <= 0)
+		err = strbuf_addf(&buf, "%s%s%s+%lu", tp->module ?: "",
+				tp->module ? ":" : "", tp->symbol, tp->offset);
+	if (err)
 		goto error;
-	len += ret;
 
-	for (i = 0; i < tev->nargs; i++) {
-		ret = synthesize_probe_trace_arg(&tev->args[i], buf + len,
-						  MAX_CMDLEN - len);
-		if (ret <= 0)
+	for (i = 0; i < tev->nargs; i++)
+		if (synthesize_probe_trace_arg(&tev->args[i], &buf) < 0)
 			goto error;
-		len += ret;
-	}
 
-	return buf;
+	ret = strbuf_detach(&buf, NULL);
 error:
-	free(buf);
-	return NULL;
+	strbuf_release(&buf);
+	return ret;
 }
 
 static int find_perf_probe_point_from_map(struct probe_trace_point *tp,
@@ -1958,7 +1962,7 @@
 static int convert_to_perf_probe_event(struct probe_trace_event *tev,
 			       struct perf_probe_event *pev, bool is_kprobe)
 {
-	char buf[64] = "";
+	struct strbuf buf = STRBUF_INIT;
 	int i, ret;
 
 	/* Convert event/group name */
@@ -1981,14 +1985,15 @@
 		if (tev->args[i].name)
 			pev->args[i].name = strdup(tev->args[i].name);
 		else {
-			ret = synthesize_probe_trace_arg(&tev->args[i],
-							  buf, 64);
-			pev->args[i].name = strdup(buf);
+			if ((ret = strbuf_init(&buf, 32)) < 0)
+				goto error;
+			ret = synthesize_probe_trace_arg(&tev->args[i], &buf);
+			pev->args[i].name = strbuf_detach(&buf, NULL);
 		}
 		if (pev->args[i].name == NULL && ret >= 0)
 			ret = -ENOMEM;
 	}
-
+error:
 	if (ret < 0)
 		clear_perf_probe_event(pev);
 
@@ -2162,35 +2167,38 @@
 				     struct strbuf *result)
 {
 	int i, ret;
-	char buf[128];
-	char *place;
+	char *buf;
+
+	if (asprintf(&buf, "%s:%s", group, event) < 0)
+		return -errno;
+	ret = strbuf_addf(result, "  %-20s (on ", buf);
+	free(buf);
+	if (ret)
+		return ret;
 
 	/* Synthesize only event probe point */
-	place = synthesize_perf_probe_point(&pev->point);
-	if (!place)
-		return -EINVAL;
+	buf = synthesize_perf_probe_point(&pev->point);
+	if (!buf)
+		return -ENOMEM;
+	ret = strbuf_addstr(result, buf);
+	free(buf);
 
-	ret = e_snprintf(buf, 128, "%s:%s", group, event);
-	if (ret < 0)
-		goto out;
+	if (!ret && module)
+		ret = strbuf_addf(result, " in %s", module);
 
-	strbuf_addf(result, "  %-20s (on %s", buf, place);
-	if (module)
-		strbuf_addf(result, " in %s", module);
-
-	if (pev->nargs > 0) {
-		strbuf_add(result, " with", 5);
-		for (i = 0; i < pev->nargs; i++) {
-			ret = synthesize_perf_probe_arg(&pev->args[i],
-							buf, 128);
-			if (ret < 0)
-				goto out;
-			strbuf_addf(result, " %s", buf);
+	if (!ret && pev->nargs > 0) {
+		ret = strbuf_add(result, " with", 5);
+		for (i = 0; !ret && i < pev->nargs; i++) {
+			buf = synthesize_perf_probe_arg(&pev->args[i]);
+			if (!buf)
+				return -ENOMEM;
+			ret = strbuf_addf(result, " %s", buf);
+			free(buf);
 		}
 	}
-	strbuf_addch(result, ')');
-out:
-	free(place);
+	if (!ret)
+		ret = strbuf_addch(result, ')');
+
 	return ret;
 }
 
@@ -2498,7 +2506,8 @@
 
 void __weak arch__fix_tev_from_maps(struct perf_probe_event *pev __maybe_unused,
 				struct probe_trace_event *tev __maybe_unused,
-				struct map *map __maybe_unused) { }
+				struct map *map __maybe_unused,
+				struct symbol *sym __maybe_unused) { }
 
 /*
  * Find probe function addresses from map.
@@ -2516,6 +2525,7 @@
 	struct probe_trace_point *tp;
 	int num_matched_functions;
 	int ret, i, j, skipped = 0;
+	char *mod_name;
 
 	map = get_target_map(pev->target, pev->uprobes);
 	if (!map) {
@@ -2600,9 +2610,19 @@
 		tp->realname = strdup_or_goto(sym->name, nomem_out);
 
 		tp->retprobe = pp->retprobe;
-		if (pev->target)
-			tev->point.module = strdup_or_goto(pev->target,
-							   nomem_out);
+		if (pev->target) {
+			if (pev->uprobes) {
+				tev->point.module = strdup_or_goto(pev->target,
+								   nomem_out);
+			} else {
+				mod_name = find_module_name(pev->target);
+				tev->point.module =
+					strdup(mod_name ? mod_name : pev->target);
+				free(mod_name);
+				if (!tev->point.module)
+					goto nomem_out;
+			}
+		}
 		tev->uprobes = pev->uprobes;
 		tev->nargs = pev->nargs;
 		if (tev->nargs) {
@@ -2624,7 +2644,7 @@
 					strdup_or_goto(pev->args[i].type,
 							nomem_out);
 		}
-		arch__fix_tev_from_maps(pev, tev, map);
+		arch__fix_tev_from_maps(pev, tev, map, sym);
 	}
 	if (ret == skipped) {
 		ret = -ENOENT;
@@ -2743,9 +2763,13 @@
 {
 	int ret;
 
-	if (pev->uprobes && !pev->group) {
-		/* Replace group name if not given */
-		ret = convert_exec_to_group(pev->target, &pev->group);
+	if (!pev->group) {
+		/* Set group name if not given */
+		if (!pev->uprobes) {
+			pev->group = strdup(PERFPROBE_GROUP);
+			ret = pev->group ? 0 : -ENOMEM;
+		} else
+			ret = convert_exec_to_group(pev->target, &pev->group);
 		if (ret != 0) {
 			pr_warning("Failed to make a group name.\n");
 			return ret;
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index e54e7b0..5a27eb4 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -120,7 +120,7 @@
 /* Events to command string */
 char *synthesize_perf_probe_command(struct perf_probe_event *pev);
 char *synthesize_probe_trace_command(struct probe_trace_event *tev);
-int synthesize_perf_probe_arg(struct perf_probe_arg *pa, char *buf, size_t len);
+char *synthesize_perf_probe_arg(struct perf_probe_arg *pa);
 
 /* Check the perf_probe_event needs debuginfo */
 bool perf_probe_event_need_dwarf(struct perf_probe_event *pev);
@@ -154,7 +154,8 @@
 int show_available_funcs(const char *module, struct strfilter *filter, bool user);
 bool arch__prefers_symtab(void);
 void arch__fix_tev_from_maps(struct perf_probe_event *pev,
-			     struct probe_trace_event *tev, struct map *map);
+			     struct probe_trace_event *tev, struct map *map,
+			     struct symbol *sym);
 
 /* If there is no space to write, returns -E2BIG. */
 int e_snprintf(char *str, size_t size, const char *format, ...)
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index e3b3b92..3fe6214 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -220,8 +220,7 @@
 
 	pr_debug("Writing event: %s\n", buf);
 	if (!probe_event_dry_run) {
-		ret = write(fd, buf, strlen(buf));
-		if (ret <= 0) {
+		if (write(fd, buf, strlen(buf)) < (int)strlen(buf)) {
 			ret = -errno;
 			pr_warning("Failed to write event: %s\n",
 				   strerror_r(errno, sbuf, sizeof(sbuf)));
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index b3bd0fb..1259839 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -553,7 +553,7 @@
 static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
 {
 	Dwarf_Die vr_die;
-	char buf[32], *ptr;
+	char *buf, *ptr;
 	int ret = 0;
 
 	/* Copy raw parameters */
@@ -563,13 +563,13 @@
 	if (pf->pvar->name)
 		pf->tvar->name = strdup(pf->pvar->name);
 	else {
-		ret = synthesize_perf_probe_arg(pf->pvar, buf, 32);
-		if (ret < 0)
-			return ret;
+		buf = synthesize_perf_probe_arg(pf->pvar);
+		if (!buf)
+			return -ENOMEM;
 		ptr = strchr(buf, ':');	/* Change type separator to _ */
 		if (ptr)
 			*ptr = '_';
-		pf->tvar->name = strdup(buf);
+		pf->tvar->name = buf;
 	}
 	if (pf->tvar->name == NULL)
 		return -ENOMEM;
@@ -1294,6 +1294,7 @@
 {
 	struct available_var_finder *af = data;
 	struct variable_list *vl;
+	struct strbuf buf = STRBUF_INIT;
 	int tag, ret;
 
 	vl = &af->vls[af->nvls - 1];
@@ -1307,25 +1308,26 @@
 		if (ret == 0 || ret == -ERANGE) {
 			int ret2;
 			bool externs = !af->child;
-			struct strbuf buf;
 
-			strbuf_init(&buf, 64);
+			if (strbuf_init(&buf, 64) < 0)
+				goto error;
 
 			if (probe_conf.show_location_range) {
-				if (!externs) {
-					if (ret)
-						strbuf_add(&buf, "[INV]\t", 6);
-					else
-						strbuf_add(&buf, "[VAL]\t", 6);
-				} else
-					strbuf_add(&buf, "[EXT]\t", 6);
+				if (!externs)
+					ret2 = strbuf_add(&buf,
+						ret ? "[INV]\t" : "[VAL]\t", 6);
+				else
+					ret2 = strbuf_add(&buf, "[EXT]\t", 6);
+				if (ret2)
+					goto error;
 			}
 
 			ret2 = die_get_varname(die_mem, &buf);
 
 			if (!ret2 && probe_conf.show_location_range &&
 				!externs) {
-				strbuf_addch(&buf, '\t');
+				if (strbuf_addch(&buf, '\t') < 0)
+					goto error;
 				ret2 = die_get_var_range(&af->pf.sp_die,
 							die_mem, &buf);
 			}
@@ -1343,6 +1345,10 @@
 		return DIE_FIND_CB_CONTINUE;
 	else
 		return DIE_FIND_CB_SIBLING;
+error:
+	strbuf_release(&buf);
+	pr_debug("Error in strbuf\n");
+	return DIE_FIND_CB_END;
 }
 
 /* Add a found vars into available variables list */
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index 8162ba0..36c6862 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -23,3 +23,4 @@
 util/trace-event.c
 ../lib/rbtree.c
 util/string.c
+util/symbol_fprintf.c
diff --git a/tools/perf/util/quote.c b/tools/perf/util/quote.c
index 01f0324..c6d4ee2 100644
--- a/tools/perf/util/quote.c
+++ b/tools/perf/util/quote.c
@@ -17,38 +17,42 @@
 	return (c == '\'' || c == '!');
 }
 
-static void sq_quote_buf(struct strbuf *dst, const char *src)
+static int sq_quote_buf(struct strbuf *dst, const char *src)
 {
 	char *to_free = NULL;
+	int ret;
 
 	if (dst->buf == src)
 		to_free = strbuf_detach(dst, NULL);
 
-	strbuf_addch(dst, '\'');
-	while (*src) {
+	ret = strbuf_addch(dst, '\'');
+	while (!ret && *src) {
 		size_t len = strcspn(src, "'!");
-		strbuf_add(dst, src, len);
+		ret = strbuf_add(dst, src, len);
 		src += len;
-		while (need_bs_quote(*src)) {
-			strbuf_addstr(dst, "'\\");
-			strbuf_addch(dst, *src++);
-			strbuf_addch(dst, '\'');
-		}
+		while (!ret && need_bs_quote(*src))
+			ret = strbuf_addf(dst, "'\\%c\'", *src++);
 	}
-	strbuf_addch(dst, '\'');
+	if (!ret)
+		ret = strbuf_addch(dst, '\'');
 	free(to_free);
+
+	return ret;
 }
 
-void sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen)
+int sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen)
 {
-	int i;
+	int i, ret;
 
 	/* Copy into destination buffer. */
-	strbuf_grow(dst, 255);
-	for (i = 0; argv[i]; ++i) {
-		strbuf_addch(dst, ' ');
-		sq_quote_buf(dst, argv[i]);
+	ret = strbuf_grow(dst, 255);
+	for (i = 0; !ret && argv[i]; ++i) {
+		ret = strbuf_addch(dst, ' ');
+		if (ret)
+			break;
+		ret = sq_quote_buf(dst, argv[i]);
 		if (maxlen && dst->len > maxlen)
 			die("Too many or long arguments");
 	}
+	return ret;
 }
diff --git a/tools/perf/util/quote.h b/tools/perf/util/quote.h
index 3340c9c..e1ec191 100644
--- a/tools/perf/util/quote.h
+++ b/tools/perf/util/quote.h
@@ -24,6 +24,6 @@
  * sq_quote() in a real application.
  */
 
-void sq_quote_argv(struct strbuf *, const char **argv, size_t maxlen);
+int sq_quote_argv(struct strbuf *, const char **argv, size_t maxlen);
 
 #endif /* __PERF_QUOTE_H */
diff --git a/tools/perf/util/rb_resort.h b/tools/perf/util/rb_resort.h
new file mode 100644
index 0000000..abc76e3
--- /dev/null
+++ b/tools/perf/util/rb_resort.h
@@ -0,0 +1,149 @@
+#ifndef _PERF_RESORT_RB_H_
+#define _PERF_RESORT_RB_H_
+/*
+ * Template for creating a class to resort an existing rb_tree according to
+ * a new sort criteria, that must be present in the entries of the source
+ * rb_tree.
+ *
+ * (c) 2016 Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Quick example, resorting threads by its shortname:
+ *
+ * First define the prefix (threads) to be used for the functions and data
+ * structures created, and provide an expression for the sorting, then the
+ * fields to be present in each of the entries in the new, sorted, rb_tree.
+ *
+ * The body of the init function should collect the fields, maybe
+ * pre-calculating them from multiple entries in the original 'entry' from
+ * the rb_tree used as a source for the entries to be sorted:
+
+DEFINE_RB_RESORT_RB(threads, strcmp(a->thread->shortname,
+				    b->thread->shortname) < 0,
+	struct thread *thread;
+)
+{
+	entry->thread = rb_entry(nd, struct thread, rb_node);
+}
+
+ * After this it is just a matter of instantiating it and iterating it,
+ * for a few data structures with existing rb_trees, such as 'struct machine',
+ * helpers are available to get the rb_root and the nr_entries:
+
+	DECLARE_RESORT_RB_MACHINE_THREADS(threads, machine_ptr);
+
+ * This will instantiate the new rb_tree and a cursor for it, that can be used as:
+
+	struct rb_node *nd;
+
+	resort_rb__for_each(nd, threads) {
+		struct thread *t = threads_entry;
+		printf("%s: %d\n", t->shortname, t->tid);
+	}
+
+ * Then delete it:
+
+	resort_rb__delete(threads);
+
+ * The name of the data structures and functions will have a _sorted suffix
+ * right before the method names, i.e. will look like:
+ *
+ * 	struct threads_sorted_entry {}
+ * 	threads_sorted__insert()
+ */
+
+#define DEFINE_RESORT_RB(__name, __comp, ...)					\
+struct __name##_sorted_entry {							\
+	struct rb_node	rb_node;						\
+	__VA_ARGS__								\
+};										\
+static void __name##_sorted__init_entry(struct rb_node *nd,			\
+					struct __name##_sorted_entry *entry);	\
+										\
+static int __name##_sorted__cmp(struct rb_node *nda, struct rb_node *ndb)	\
+{										\
+	struct __name##_sorted_entry *a, *b;					\
+	a = rb_entry(nda, struct __name##_sorted_entry, rb_node);		\
+	b = rb_entry(ndb, struct __name##_sorted_entry, rb_node);		\
+	return __comp;								\
+}										\
+										\
+struct __name##_sorted {							\
+       struct rb_root		    entries;					\
+       struct __name##_sorted_entry nd[0];					\
+};										\
+										\
+static void __name##_sorted__insert(struct __name##_sorted *sorted,		\
+				      struct rb_node *sorted_nd)		\
+{										\
+	struct rb_node **p = &sorted->entries.rb_node, *parent = NULL;		\
+	while (*p != NULL) {							\
+		parent = *p;							\
+		if (__name##_sorted__cmp(sorted_nd, parent))			\
+			p = &(*p)->rb_left;					\
+		else								\
+			p = &(*p)->rb_right;					\
+	}									\
+	rb_link_node(sorted_nd, parent, p);					\
+	rb_insert_color(sorted_nd, &sorted->entries);				\
+}										\
+										\
+static void __name##_sorted__sort(struct __name##_sorted *sorted,		\
+				    struct rb_root *entries)			\
+{										\
+	struct rb_node *nd;							\
+	unsigned int i = 0;							\
+	for (nd = rb_first(entries); nd; nd = rb_next(nd)) {			\
+		struct __name##_sorted_entry *snd = &sorted->nd[i++];		\
+		__name##_sorted__init_entry(nd, snd);				\
+		__name##_sorted__insert(sorted, &snd->rb_node);			\
+	}									\
+}										\
+										\
+static struct __name##_sorted *__name##_sorted__new(struct rb_root *entries,	\
+						    int nr_entries)		\
+{										\
+	struct __name##_sorted *sorted;						\
+	sorted = malloc(sizeof(*sorted) + sizeof(sorted->nd[0]) * nr_entries);	\
+	if (sorted) {								\
+		sorted->entries = RB_ROOT;					\
+		__name##_sorted__sort(sorted, entries);				\
+	}									\
+	return sorted;								\
+}										\
+										\
+static void __name##_sorted__delete(struct __name##_sorted *sorted)		\
+{										\
+	free(sorted);								\
+}										\
+										\
+static void __name##_sorted__init_entry(struct rb_node *nd,			\
+					struct __name##_sorted_entry *entry)
+
+#define DECLARE_RESORT_RB(__name)						\
+struct __name##_sorted_entry *__name##_entry;					\
+struct __name##_sorted *__name = __name##_sorted__new
+
+#define resort_rb__for_each(__nd, __name)					\
+	for (__nd = rb_first(&__name->entries);					\
+	     __name##_entry = rb_entry(__nd, struct __name##_sorted_entry,	\
+				       rb_node), __nd;				\
+	     __nd = rb_next(__nd))
+
+#define resort_rb__delete(__name)						\
+	__name##_sorted__delete(__name), __name = NULL
+
+/*
+ * Helpers for other classes that contains both an rbtree and the
+ * number of entries in it:
+ */
+
+/* For 'struct intlist' */
+#define DECLARE_RESORT_RB_INTLIST(__name, __ilist)				\
+	DECLARE_RESORT_RB(__name)(&__ilist->rblist.entries,			\
+				  __ilist->rblist.nr_entries)
+
+/* For 'struct machine->threads' */
+#define DECLARE_RESORT_RB_MACHINE_THREADS(__name, __machine)			\
+	DECLARE_RESORT_RB(__name)(&__machine->threads, __machine->nr_threads)
+
+#endif /* _PERF_RESORT_RB_H_ */
diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
index 0467367..481792c 100644
--- a/tools/perf/util/record.c
+++ b/tools/perf/util/record.c
@@ -129,7 +129,8 @@
 	return true;
 }
 
-void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts)
+void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
+			 struct callchain_param *callchain)
 {
 	struct perf_evsel *evsel;
 	bool use_sample_identifier = false;
@@ -148,7 +149,7 @@
 	use_comm_exec = perf_can_comm_exec();
 
 	evlist__for_each(evlist, evsel) {
-		perf_evsel__config(evsel, opts);
+		perf_evsel__config(evsel, opts, callchain);
 		if (evsel->tracking && use_comm_exec)
 			evsel->attr.comm_exec = 1;
 	}
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index b3aabc0..62c7f69 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -31,6 +31,8 @@
 #include <perl.h>
 
 #include "../../perf.h"
+#include "../callchain.h"
+#include "../machine.h"
 #include "../thread.h"
 #include "../event.h"
 #include "../trace-event.h"
@@ -248,10 +250,90 @@
 		define_event_symbols(event, ev_name, args->next);
 }
 
+static SV *perl_process_callchain(struct perf_sample *sample,
+				  struct perf_evsel *evsel,
+				  struct addr_location *al)
+{
+	AV *list;
+
+	list = newAV();
+	if (!list)
+		goto exit;
+
+	if (!symbol_conf.use_callchain || !sample->callchain)
+		goto exit;
+
+	if (thread__resolve_callchain(al->thread, &callchain_cursor, evsel,
+				      sample, NULL, NULL,
+				      sysctl_perf_event_max_stack) != 0) {
+		pr_err("Failed to resolve callchain. Skipping\n");
+		goto exit;
+	}
+	callchain_cursor_commit(&callchain_cursor);
+
+
+	while (1) {
+		HV *elem;
+		struct callchain_cursor_node *node;
+		node = callchain_cursor_current(&callchain_cursor);
+		if (!node)
+			break;
+
+		elem = newHV();
+		if (!elem)
+			goto exit;
+
+		if (!hv_stores(elem, "ip", newSVuv(node->ip))) {
+			hv_undef(elem);
+			goto exit;
+		}
+
+		if (node->sym) {
+			HV *sym = newHV();
+			if (!sym) {
+				hv_undef(elem);
+				goto exit;
+			}
+			if (!hv_stores(sym, "start",   newSVuv(node->sym->start)) ||
+			    !hv_stores(sym, "end",     newSVuv(node->sym->end)) ||
+			    !hv_stores(sym, "binding", newSVuv(node->sym->binding)) ||
+			    !hv_stores(sym, "name",    newSVpvn(node->sym->name,
+								node->sym->namelen)) ||
+			    !hv_stores(elem, "sym",    newRV_noinc((SV*)sym))) {
+				hv_undef(sym);
+				hv_undef(elem);
+				goto exit;
+			}
+		}
+
+		if (node->map) {
+			struct map *map = node->map;
+			const char *dsoname = "[unknown]";
+			if (map && map->dso && (map->dso->name || map->dso->long_name)) {
+				if (symbol_conf.show_kernel_path && map->dso->long_name)
+					dsoname = map->dso->long_name;
+				else if (map->dso->name)
+					dsoname = map->dso->name;
+			}
+			if (!hv_stores(elem, "dso", newSVpv(dsoname,0))) {
+				hv_undef(elem);
+				goto exit;
+			}
+		}
+
+		callchain_cursor_advance(&callchain_cursor);
+		av_push(list, newRV_noinc((SV*)elem));
+	}
+
+exit:
+	return newRV_noinc((SV*)list);
+}
+
 static void perl_process_tracepoint(struct perf_sample *sample,
 				    struct perf_evsel *evsel,
-				    struct thread *thread)
+				    struct addr_location *al)
 {
+	struct thread *thread = al->thread;
 	struct event_format *event = evsel->tp_format;
 	struct format_field *field;
 	static char handler[256];
@@ -295,6 +377,7 @@
 	XPUSHs(sv_2mortal(newSVuv(ns)));
 	XPUSHs(sv_2mortal(newSViv(pid)));
 	XPUSHs(sv_2mortal(newSVpv(comm, 0)));
+	XPUSHs(sv_2mortal(perl_process_callchain(sample, evsel, al)));
 
 	/* common fields other than pid can be accessed via xsub fns */
 
@@ -329,6 +412,7 @@
 		XPUSHs(sv_2mortal(newSVuv(nsecs)));
 		XPUSHs(sv_2mortal(newSViv(pid)));
 		XPUSHs(sv_2mortal(newSVpv(comm, 0)));
+		XPUSHs(sv_2mortal(perl_process_callchain(sample, evsel, al)));
 		call_pv("main::trace_unhandled", G_SCALAR);
 	}
 	SPAGAIN;
@@ -366,7 +450,7 @@
 			       struct perf_evsel *evsel,
 			       struct addr_location *al)
 {
-	perl_process_tracepoint(sample, evsel, al->thread);
+	perl_process_tracepoint(sample, evsel, al);
 	perl_process_event_generic(event, sample, evsel);
 }
 
@@ -490,7 +574,27 @@
 	fprintf(ofp, "use Perf::Trace::Util;\n\n");
 
 	fprintf(ofp, "sub trace_begin\n{\n\t# optional\n}\n\n");
-	fprintf(ofp, "sub trace_end\n{\n\t# optional\n}\n\n");
+	fprintf(ofp, "sub trace_end\n{\n\t# optional\n}\n");
+
+
+	fprintf(ofp, "\n\
+sub print_backtrace\n\
+{\n\
+	my $callchain = shift;\n\
+	for my $node (@$callchain)\n\
+	{\n\
+		if(exists $node->{sym})\n\
+		{\n\
+			printf( \"\\t[\\%%x] \\%%s\\n\", $node->{ip}, $node->{sym}{name});\n\
+		}\n\
+		else\n\
+		{\n\
+			printf( \"\\t[\\%%x]\\n\", $node{ip});\n\
+		}\n\
+	}\n\
+}\n\n\
+");
+
 
 	while ((event = trace_find_next_event(pevent, event))) {
 		fprintf(ofp, "sub %s::%s\n{\n", event->system, event->name);
@@ -502,7 +606,8 @@
 		fprintf(ofp, "$common_secs, ");
 		fprintf(ofp, "$common_nsecs,\n");
 		fprintf(ofp, "\t    $common_pid, ");
-		fprintf(ofp, "$common_comm,\n\t    ");
+		fprintf(ofp, "$common_comm, ");
+		fprintf(ofp, "$common_callchain,\n\t    ");
 
 		not_first = 0;
 		count = 0;
@@ -519,7 +624,7 @@
 
 		fprintf(ofp, "\tprint_header($event_name, $common_cpu, "
 			"$common_secs, $common_nsecs,\n\t             "
-			"$common_pid, $common_comm);\n\n");
+			"$common_pid, $common_comm, $common_callchain);\n\n");
 
 		fprintf(ofp, "\tprintf(\"");
 
@@ -581,17 +686,22 @@
 				fprintf(ofp, "$%s", f->name);
 		}
 
-		fprintf(ofp, ");\n");
+		fprintf(ofp, ");\n\n");
+
+		fprintf(ofp, "\tprint_backtrace($common_callchain);\n");
+
 		fprintf(ofp, "}\n\n");
 	}
 
 	fprintf(ofp, "sub trace_unhandled\n{\n\tmy ($event_name, $context, "
 		"$common_cpu, $common_secs, $common_nsecs,\n\t    "
-		"$common_pid, $common_comm) = @_;\n\n");
+		"$common_pid, $common_comm, $common_callchain) = @_;\n\n");
 
 	fprintf(ofp, "\tprint_header($event_name, $common_cpu, "
 		"$common_secs, $common_nsecs,\n\t             $common_pid, "
-		"$common_comm);\n}\n\n");
+		"$common_comm, $common_callchain);\n");
+	fprintf(ofp, "\tprint_backtrace($common_callchain);\n");
+	fprintf(ofp, "}\n\n");
 
 	fprintf(ofp, "sub print_header\n{\n"
 		"\tmy ($event_name, $cpu, $secs, $nsecs, $pid, $comm) = @_;\n\n"
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index fbd0524..ff13470 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -41,6 +41,7 @@
 #include "../thread-stack.h"
 #include "../trace-event.h"
 #include "../machine.h"
+#include "../call-path.h"
 #include "thread_map.h"
 #include "cpumap.h"
 #include "stat.h"
@@ -323,7 +324,7 @@
 	if (!symbol_conf.use_callchain || !sample->callchain)
 		goto exit;
 
-	if (thread__resolve_callchain(al->thread, evsel,
+	if (thread__resolve_callchain(al->thread, &callchain_cursor, evsel,
 				      sample, NULL, NULL,
 				      scripting_max_stack) != 0) {
 		pr_err("Failed to resolve callchain. Skipping\n");
@@ -407,8 +408,11 @@
 	if (!t)
 		Py_FatalError("couldn't create Python tuple");
 
-	if (!event)
-		die("ug! no event found for type %d", (int)evsel->attr.config);
+	if (!event) {
+		snprintf(handler_name, sizeof(handler_name),
+			 "ug! no event found for type %" PRIu64, (u64)evsel->attr.config);
+		Py_FatalError(handler_name);
+	}
 
 	pid = raw_field_value(event, "common_pid", data);
 
@@ -614,7 +618,7 @@
 			     struct machine *machine)
 {
 	struct tables *tables = container_of(dbe, struct tables, dbe);
-	char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+	char sbuild_id[SBUILD_ID_SIZE];
 	PyObject *t;
 
 	build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
@@ -681,7 +685,7 @@
 	struct tables *tables = container_of(dbe, struct tables, dbe);
 	PyObject *t;
 
-	t = tuple_new(21);
+	t = tuple_new(22);
 
 	tuple_set_u64(t, 0, es->db_id);
 	tuple_set_u64(t, 1, es->evsel->db_id);
@@ -704,6 +708,7 @@
 	tuple_set_u64(t, 18, es->sample->data_src);
 	tuple_set_s32(t, 19, es->sample->flags & PERF_BRANCH_MASK);
 	tuple_set_s32(t, 20, !!(es->sample->flags & PERF_IP_FLAG_IN_TX));
+	tuple_set_u64(t, 21, es->call_path_id);
 
 	call_object(tables->sample_handler, t, "sample_table");
 
@@ -998,8 +1003,10 @@
 {
 	const char *perf_db_export_mode = "perf_db_export_mode";
 	const char *perf_db_export_calls = "perf_db_export_calls";
-	PyObject *db_export_mode, *db_export_calls;
+	const char *perf_db_export_callchains = "perf_db_export_callchains";
+	PyObject *db_export_mode, *db_export_calls, *db_export_callchains;
 	bool export_calls = false;
+	bool export_callchains = false;
 	int ret;
 
 	memset(tables, 0, sizeof(struct tables));
@@ -1016,6 +1023,7 @@
 	if (!ret)
 		return;
 
+	/* handle export calls */
 	tables->dbe.crp = NULL;
 	db_export_calls = PyDict_GetItemString(main_dict, perf_db_export_calls);
 	if (db_export_calls) {
@@ -1033,6 +1041,33 @@
 			Py_FatalError("failed to create calls processor");
 	}
 
+	/* handle export callchains */
+	tables->dbe.cpr = NULL;
+	db_export_callchains = PyDict_GetItemString(main_dict,
+						    perf_db_export_callchains);
+	if (db_export_callchains) {
+		ret = PyObject_IsTrue(db_export_callchains);
+		if (ret == -1)
+			handler_call_die(perf_db_export_callchains);
+		export_callchains = !!ret;
+	}
+
+	if (export_callchains) {
+		/*
+		 * Attempt to use the call path root from the call return
+		 * processor, if the call return processor is in use. Otherwise,
+		 * we allocate a new call path root. This prevents exporting
+		 * duplicate call path ids when both are in use simultaniously.
+		 */
+		if (tables->dbe.crp)
+			tables->dbe.cpr = tables->dbe.crp->cpr;
+		else
+			tables->dbe.cpr = call_path_root__new();
+
+		if (!tables->dbe.cpr)
+			Py_FatalError("failed to create call path root");
+	}
+
 	tables->db_export_mode = true;
 	/*
 	 * Reserve per symbol space for symbol->db_id via symbol__priv()
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 4abd85c..5214974 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -409,6 +409,8 @@
 		tool->stat = process_stat_stub;
 	if (tool->stat_round == NULL)
 		tool->stat_round = process_stat_round_stub;
+	if (tool->time_conv == NULL)
+		tool->time_conv = process_event_op2_stub;
 }
 
 static void swap_sample_id_all(union perf_event *event, void *data)
@@ -555,7 +557,7 @@
 
 /*
  * XXX this is hack in attempt to carry flags bitfield
- * throught endian village. ABI says:
+ * through endian village. ABI says:
  *
  * Bit-fields are allocated from right to left (least to most significant)
  * on little-endian implementations and from left to right (most to least
@@ -794,6 +796,7 @@
 	[PERF_RECORD_STAT]		  = perf_event__stat_swap,
 	[PERF_RECORD_STAT_ROUND]	  = perf_event__stat_round_swap,
 	[PERF_RECORD_EVENT_UPDATE]	  = perf_event__event_update_swap,
+	[PERF_RECORD_TIME_CONV]		  = perf_event__all64_swap,
 	[PERF_RECORD_HEADER_MAX]	  = NULL,
 };
 
@@ -904,7 +907,7 @@
 	unsigned int i;
 	struct ip_callchain *callchain = sample->callchain;
 
-	if (has_branch_callstack(evsel))
+	if (perf_evsel__has_branch_callstack(evsel))
 		callchain__lbr_callstack_printf(sample);
 
 	printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
@@ -1078,7 +1081,7 @@
 	if (sample_type & PERF_SAMPLE_CALLCHAIN)
 		callchain__printf(evsel, sample);
 
-	if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !has_branch_callstack(evsel))
+	if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel))
 		branch_stack__printf(sample);
 
 	if (sample_type & PERF_SAMPLE_REGS_USER)
@@ -1341,6 +1344,9 @@
 		return tool->stat(tool, event, session);
 	case PERF_RECORD_STAT_ROUND:
 		return tool->stat_round(tool, event, session);
+	case PERF_RECORD_TIME_CONV:
+		session->time_conv = event->time_conv;
+		return tool->time_conv(tool, event, session);
 	default:
 		return -EINVAL;
 	}
@@ -1830,7 +1836,11 @@
 out_err:
 	ui_progress__finish();
 	perf_session__warn_about_errors(session);
-	ordered_events__free(&session->ordered_events);
+	/*
+	 * We may switching perf.data output, make ordered_events
+	 * reusable.
+	 */
+	ordered_events__reinit(&session->ordered_events);
 	auxtrace__free_events(session);
 	session->one_mmap = false;
 	return err;
@@ -1947,105 +1957,6 @@
 	return NULL;
 }
 
-void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample,
-			  struct addr_location *al,
-			  unsigned int print_opts, unsigned int stack_depth)
-{
-	struct callchain_cursor_node *node;
-	int print_ip = print_opts & PRINT_IP_OPT_IP;
-	int print_sym = print_opts & PRINT_IP_OPT_SYM;
-	int print_dso = print_opts & PRINT_IP_OPT_DSO;
-	int print_symoffset = print_opts & PRINT_IP_OPT_SYMOFFSET;
-	int print_oneline = print_opts & PRINT_IP_OPT_ONELINE;
-	int print_srcline = print_opts & PRINT_IP_OPT_SRCLINE;
-	char s = print_oneline ? ' ' : '\t';
-
-	if (symbol_conf.use_callchain && sample->callchain) {
-		struct addr_location node_al;
-
-		if (thread__resolve_callchain(al->thread, evsel,
-					      sample, NULL, NULL,
-					      stack_depth) != 0) {
-			if (verbose)
-				error("Failed to resolve callchain. Skipping\n");
-			return;
-		}
-		callchain_cursor_commit(&callchain_cursor);
-
-		if (print_symoffset)
-			node_al = *al;
-
-		while (stack_depth) {
-			u64 addr = 0;
-
-			node = callchain_cursor_current(&callchain_cursor);
-			if (!node)
-				break;
-
-			if (node->sym && node->sym->ignore)
-				goto next;
-
-			if (print_ip)
-				printf("%c%16" PRIx64, s, node->ip);
-
-			if (node->map)
-				addr = node->map->map_ip(node->map, node->ip);
-
-			if (print_sym) {
-				printf(" ");
-				if (print_symoffset) {
-					node_al.addr = addr;
-					node_al.map  = node->map;
-					symbol__fprintf_symname_offs(node->sym, &node_al, stdout);
-				} else
-					symbol__fprintf_symname(node->sym, stdout);
-			}
-
-			if (print_dso) {
-				printf(" (");
-				map__fprintf_dsoname(node->map, stdout);
-				printf(")");
-			}
-
-			if (print_srcline)
-				map__fprintf_srcline(node->map, addr, "\n  ",
-						     stdout);
-
-			if (!print_oneline)
-				printf("\n");
-
-			stack_depth--;
-next:
-			callchain_cursor_advance(&callchain_cursor);
-		}
-
-	} else {
-		if (al->sym && al->sym->ignore)
-			return;
-
-		if (print_ip)
-			printf("%16" PRIx64, sample->ip);
-
-		if (print_sym) {
-			printf(" ");
-			if (print_symoffset)
-				symbol__fprintf_symname_offs(al->sym, al,
-							     stdout);
-			else
-				symbol__fprintf_symname(al->sym, stdout);
-		}
-
-		if (print_dso) {
-			printf(" (");
-			map__fprintf_dsoname(al->map, stdout);
-			printf(")");
-		}
-
-		if (print_srcline)
-			map__fprintf_srcline(al->map, al->addr, "\n  ", stdout);
-	}
-}
-
 int perf_session__cpu_bitmap(struct perf_session *session,
 			     const char *cpu_list, unsigned long *cpu_bitmap)
 {
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 5f792e3..4bd7585 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -26,6 +26,7 @@
 	struct itrace_synth_opts *itrace_synth_opts;
 	struct list_head	auxtrace_index;
 	struct trace_event	tevent;
+	struct time_conv_event	time_conv;
 	bool			repipe;
 	bool			one_mmap;
 	void			*one_mmap_addr;
@@ -35,13 +36,6 @@
 	struct perf_tool	*tool;
 };
 
-#define PRINT_IP_OPT_IP		(1<<0)
-#define PRINT_IP_OPT_SYM		(1<<1)
-#define PRINT_IP_OPT_DSO		(1<<2)
-#define PRINT_IP_OPT_SYMOFFSET	(1<<3)
-#define PRINT_IP_OPT_ONELINE	(1<<4)
-#define PRINT_IP_OPT_SRCLINE	(1<<5)
-
 struct perf_tool;
 
 struct perf_session *perf_session__new(struct perf_data_file *file,
@@ -103,10 +97,6 @@
 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
 					    unsigned int type);
 
-void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample,
-			  struct addr_location *al,
-			  unsigned int print_opts, unsigned int stack_depth);
-
 int perf_session__cpu_bitmap(struct perf_session *session,
 			     const char *cpu_list, unsigned long *cpu_bitmap);
 
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 47966a1..20e69ed 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -21,13 +21,6 @@
 const char	*field_order;
 regex_t		ignore_callees_regex;
 int		have_ignore_callees = 0;
-int		sort__need_collapse = 0;
-int		sort__has_parent = 0;
-int		sort__has_sym = 0;
-int		sort__has_dso = 0;
-int		sort__has_socket = 0;
-int		sort__has_thread = 0;
-int		sort__has_comm = 0;
 enum sort_mode	sort__mode = SORT_MODE__NORMAL;
 
 /*
@@ -244,7 +237,7 @@
 	 * comparing symbol address alone is not enough since it's a
 	 * relative address within a dso.
 	 */
-	if (!sort__has_dso) {
+	if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
 		ret = sort__dso_cmp(left, right);
 		if (ret != 0)
 			return ret;
@@ -2163,7 +2156,7 @@
 		return -1;
 
 	if (sd->entry->se_collapse)
-		sort__need_collapse = 1;
+		list->need_collapse = 1;
 
 	sd->taken = 1;
 
@@ -2245,9 +2238,9 @@
 				pr_err("Invalid regex: %s\n%s", parent_pattern, err);
 				return -EINVAL;
 			}
-			sort__has_parent = 1;
+			list->parent = 1;
 		} else if (sd->entry == &sort_sym) {
-			sort__has_sym = 1;
+			list->sym = 1;
 			/*
 			 * perf diff displays the performance difference amongst
 			 * two or more perf.data files. Those files could come
@@ -2258,13 +2251,13 @@
 				sd->entry->se_collapse = sort__sym_sort;
 
 		} else if (sd->entry == &sort_dso) {
-			sort__has_dso = 1;
+			list->dso = 1;
 		} else if (sd->entry == &sort_socket) {
-			sort__has_socket = 1;
+			list->socket = 1;
 		} else if (sd->entry == &sort_thread) {
-			sort__has_thread = 1;
+			list->thread = 1;
 		} else if (sd->entry == &sort_comm) {
-			sort__has_comm = 1;
+			list->comm = 1;
 		}
 
 		return __sort_dimension__add(sd, list, level);
@@ -2289,7 +2282,7 @@
 			return -EINVAL;
 
 		if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
-			sort__has_sym = 1;
+			list->sym = 1;
 
 		__sort_dimension__add(sd, list, level);
 		return 0;
@@ -2305,7 +2298,7 @@
 			return -EINVAL;
 
 		if (sd->entry == &sort_mem_daddr_sym)
-			sort__has_sym = 1;
+			list->sym = 1;
 
 		__sort_dimension__add(sd, list, level);
 		return 0;
@@ -2445,6 +2438,9 @@
 
 static char *setup_overhead(char *keys)
 {
+	if (sort__mode == SORT_MODE__DIFF)
+		return keys;
+
 	keys = prefix_if_not_in("overhead", keys);
 
 	if (symbol_conf.cumulate_callchain)
@@ -2746,10 +2742,10 @@
 
 void reset_output_field(void)
 {
-	sort__need_collapse = 0;
-	sort__has_parent = 0;
-	sort__has_sym = 0;
-	sort__has_dso = 0;
+	perf_hpp_list.need_collapse = 0;
+	perf_hpp_list.parent = 0;
+	perf_hpp_list.sym = 0;
+	perf_hpp_list.dso = 0;
 
 	field_order = NULL;
 	sort_order = NULL;
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 3f4e359..42927f4 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -31,13 +31,6 @@
 extern const char default_sort_order[];
 extern regex_t ignore_callees_regex;
 extern int have_ignore_callees;
-extern int sort__need_collapse;
-extern int sort__has_dso;
-extern int sort__has_parent;
-extern int sort__has_sym;
-extern int sort__has_socket;
-extern int sort__has_thread;
-extern int sort__has_comm;
 extern enum sort_mode sort__mode;
 extern struct sort_entry sort_comm;
 extern struct sort_entry sort_dso;
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 4d9b481..ffa1d06 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -307,6 +307,7 @@
 	struct perf_counts_values *aggr = &counter->counts->aggr;
 	struct perf_stat_evsel *ps = counter->priv;
 	u64 *count = counter->counts->aggr.values;
+	u64 val;
 	int i, ret;
 
 	aggr->val = aggr->ena = aggr->run = 0;
@@ -346,7 +347,8 @@
 	/*
 	 * Save the full runtime - to allow normalization during printout:
 	 */
-	perf_stat__update_shadow_stats(counter, count, 0);
+	val = counter->scale * *count;
+	perf_stat__update_shadow_stats(counter, &val, 0);
 
 	return 0;
 }
diff --git a/tools/perf/util/strbuf.c b/tools/perf/util/strbuf.c
index 8fb7329..f95f682 100644
--- a/tools/perf/util/strbuf.c
+++ b/tools/perf/util/strbuf.c
@@ -1,3 +1,4 @@
+#include "debug.h"
 #include "cache.h"
 #include <linux/kernel.h>
 
@@ -17,12 +18,13 @@
  */
 char strbuf_slopbuf[1];
 
-void strbuf_init(struct strbuf *sb, ssize_t hint)
+int strbuf_init(struct strbuf *sb, ssize_t hint)
 {
 	sb->alloc = sb->len = 0;
 	sb->buf = strbuf_slopbuf;
 	if (hint)
-		strbuf_grow(sb, hint);
+		return strbuf_grow(sb, hint);
+	return 0;
 }
 
 void strbuf_release(struct strbuf *sb)
@@ -42,67 +44,104 @@
 	return res;
 }
 
-void strbuf_grow(struct strbuf *sb, size_t extra)
+int strbuf_grow(struct strbuf *sb, size_t extra)
 {
-	if (sb->len + extra + 1 <= sb->len)
-		die("you want to use way too much memory");
-	if (!sb->alloc)
-		sb->buf = NULL;
-	ALLOC_GROW(sb->buf, sb->len + extra + 1, sb->alloc);
+	char *buf;
+	size_t nr = sb->len + extra + 1;
+
+	if (nr < sb->alloc)
+		return 0;
+
+	if (nr <= sb->len)
+		return -E2BIG;
+
+	if (alloc_nr(sb->alloc) > nr)
+		nr = alloc_nr(sb->alloc);
+
+	/*
+	 * Note that sb->buf == strbuf_slopbuf if sb->alloc == 0, and it is
+	 * a static variable. Thus we have to avoid passing it to realloc.
+	 */
+	buf = realloc(sb->alloc ? sb->buf : NULL, nr * sizeof(*buf));
+	if (!buf)
+		return -ENOMEM;
+
+	sb->buf = buf;
+	sb->alloc = nr;
+	return 0;
 }
 
-void strbuf_addch(struct strbuf *sb, int c)
+int strbuf_addch(struct strbuf *sb, int c)
 {
-	strbuf_grow(sb, 1);
+	int ret = strbuf_grow(sb, 1);
+	if (ret)
+		return ret;
+
 	sb->buf[sb->len++] = c;
 	sb->buf[sb->len] = '\0';
+	return 0;
 }
 
-void strbuf_add(struct strbuf *sb, const void *data, size_t len)
+int strbuf_add(struct strbuf *sb, const void *data, size_t len)
 {
-	strbuf_grow(sb, len);
+	int ret = strbuf_grow(sb, len);
+	if (ret)
+		return ret;
+
 	memcpy(sb->buf + sb->len, data, len);
-	strbuf_setlen(sb, sb->len + len);
+	return strbuf_setlen(sb, sb->len + len);
 }
 
-static void strbuf_addv(struct strbuf *sb, const char *fmt, va_list ap)
+static int strbuf_addv(struct strbuf *sb, const char *fmt, va_list ap)
 {
-	int len;
+	int len, ret;
 	va_list ap_saved;
 
-	if (!strbuf_avail(sb))
-		strbuf_grow(sb, 64);
+	if (!strbuf_avail(sb)) {
+		ret = strbuf_grow(sb, 64);
+		if (ret)
+			return ret;
+	}
 
 	va_copy(ap_saved, ap);
 	len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
 	if (len < 0)
-		die("your vsnprintf is broken");
+		return len;
 	if (len > strbuf_avail(sb)) {
-		strbuf_grow(sb, len);
+		ret = strbuf_grow(sb, len);
+		if (ret)
+			return ret;
 		len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap_saved);
 		va_end(ap_saved);
 		if (len > strbuf_avail(sb)) {
-			die("this should not happen, your vsnprintf is broken");
+			pr_debug("this should not happen, your vsnprintf is broken");
+			return -EINVAL;
 		}
 	}
-	strbuf_setlen(sb, sb->len + len);
+	return strbuf_setlen(sb, sb->len + len);
 }
 
-void strbuf_addf(struct strbuf *sb, const char *fmt, ...)
+int strbuf_addf(struct strbuf *sb, const char *fmt, ...)
 {
 	va_list ap;
+	int ret;
 
 	va_start(ap, fmt);
-	strbuf_addv(sb, fmt, ap);
+	ret = strbuf_addv(sb, fmt, ap);
 	va_end(ap);
+	return ret;
 }
 
 ssize_t strbuf_read(struct strbuf *sb, int fd, ssize_t hint)
 {
 	size_t oldlen = sb->len;
 	size_t oldalloc = sb->alloc;
+	int ret;
 
-	strbuf_grow(sb, hint ? hint : 8192);
+	ret = strbuf_grow(sb, hint ? hint : 8192);
+	if (ret)
+		return ret;
+
 	for (;;) {
 		ssize_t cnt;
 
@@ -112,12 +151,14 @@
 				strbuf_release(sb);
 			else
 				strbuf_setlen(sb, oldlen);
-			return -1;
+			return cnt;
 		}
 		if (!cnt)
 			break;
 		sb->len += cnt;
-		strbuf_grow(sb, 8192);
+		ret = strbuf_grow(sb, 8192);
+		if (ret)
+			return ret;
 	}
 
 	sb->buf[sb->len] = '\0';
diff --git a/tools/perf/util/strbuf.h b/tools/perf/util/strbuf.h
index ab9be0fb..54b4092 100644
--- a/tools/perf/util/strbuf.h
+++ b/tools/perf/util/strbuf.h
@@ -51,7 +51,7 @@
 #define STRBUF_INIT  { 0, 0, strbuf_slopbuf }
 
 /*----- strbuf life cycle -----*/
-void strbuf_init(struct strbuf *buf, ssize_t hint);
+int strbuf_init(struct strbuf *buf, ssize_t hint);
 void strbuf_release(struct strbuf *buf);
 char *strbuf_detach(struct strbuf *buf, size_t *);
 
@@ -60,26 +60,31 @@
 	return sb->alloc ? sb->alloc - sb->len - 1 : 0;
 }
 
-void strbuf_grow(struct strbuf *buf, size_t);
+int strbuf_grow(struct strbuf *buf, size_t);
 
-static inline void strbuf_setlen(struct strbuf *sb, size_t len) {
-	if (!sb->alloc)
-		strbuf_grow(sb, 0);
+static inline int strbuf_setlen(struct strbuf *sb, size_t len) {
+	int ret;
+	if (!sb->alloc) {
+		ret = strbuf_grow(sb, 0);
+		if (ret)
+			return ret;
+	}
 	assert(len < sb->alloc);
 	sb->len = len;
 	sb->buf[len] = '\0';
+	return 0;
 }
 
 /*----- add data in your buffer -----*/
-void strbuf_addch(struct strbuf *sb, int c);
+int strbuf_addch(struct strbuf *sb, int c);
 
-void strbuf_add(struct strbuf *buf, const void *, size_t);
-static inline void strbuf_addstr(struct strbuf *sb, const char *s) {
-	strbuf_add(sb, s, strlen(s));
+int strbuf_add(struct strbuf *buf, const void *, size_t);
+static inline int strbuf_addstr(struct strbuf *sb, const char *s) {
+	return strbuf_add(sb, s, strlen(s));
 }
 
 __attribute__((format(printf,2,3)))
-void strbuf_addf(struct strbuf *sb, const char *fmt, ...);
+int strbuf_addf(struct strbuf *sb, const char *fmt, ...);
 
 /* XXX: if read fails, any partial read is undone */
 ssize_t strbuf_read(struct strbuf *, int fd, ssize_t hint);
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index bc229a7..87a297d 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -709,17 +709,10 @@
 	if (ss->opdshdr.sh_type != SHT_PROGBITS)
 		ss->opdsec = NULL;
 
-	if (dso->kernel == DSO_TYPE_USER) {
-		GElf_Shdr shdr;
-		ss->adjust_symbols = (ehdr.e_type == ET_EXEC ||
-				ehdr.e_type == ET_REL ||
-				dso__is_vdso(dso) ||
-				elf_section_by_name(elf, &ehdr, &shdr,
-						     ".gnu.prelink_undo",
-						     NULL) != NULL);
-	} else {
+	if (dso->kernel == DSO_TYPE_USER)
+		ss->adjust_symbols = true;
+	else
 		ss->adjust_symbols = elf__needs_adjust_symbols(ehdr);
-	}
 
 	ss->name   = strdup(name);
 	if (!ss->name) {
@@ -777,7 +770,8 @@
 	return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
 }
 
-void __weak arch__elf_sym_adjust(GElf_Sym *sym __maybe_unused) { }
+void __weak arch__sym_update(struct symbol *s __maybe_unused,
+		GElf_Sym *sym __maybe_unused) { }
 
 int dso__load_sym(struct dso *dso, struct map *map,
 		  struct symsrc *syms_ss, struct symsrc *runtime_ss,
@@ -954,8 +948,6 @@
 		    (sym.st_value & 1))
 			--sym.st_value;
 
-		arch__elf_sym_adjust(&sym);
-
 		if (dso->kernel || kmodule) {
 			char dso_name[PATH_MAX];
 
@@ -1089,6 +1081,8 @@
 		if (!f)
 			goto out_elf_end;
 
+		arch__sym_update(f, &sym);
+
 		if (filter && filter(curr_map, f))
 			symbol__delete(f);
 		else {
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index e7588dc..7fb3330 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -255,40 +255,6 @@
 	free(((void *)sym) - symbol_conf.priv_size);
 }
 
-size_t symbol__fprintf(struct symbol *sym, FILE *fp)
-{
-	return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n",
-		       sym->start, sym->end,
-		       sym->binding == STB_GLOBAL ? 'g' :
-		       sym->binding == STB_LOCAL  ? 'l' : 'w',
-		       sym->name);
-}
-
-size_t symbol__fprintf_symname_offs(const struct symbol *sym,
-				    const struct addr_location *al, FILE *fp)
-{
-	unsigned long offset;
-	size_t length;
-
-	if (sym && sym->name) {
-		length = fprintf(fp, "%s", sym->name);
-		if (al) {
-			if (al->addr < sym->end)
-				offset = al->addr - sym->start;
-			else
-				offset = al->addr - al->map->start - sym->start;
-			length += fprintf(fp, "+0x%lx", offset);
-		}
-		return length;
-	} else
-		return fprintf(fp, "[unknown]");
-}
-
-size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp)
-{
-	return symbol__fprintf_symname_offs(sym, NULL, fp);
-}
-
 void symbols__delete(struct rb_root *symbols)
 {
 	struct symbol *pos;
@@ -335,7 +301,7 @@
 
 		if (ip < s->start)
 			n = n->rb_left;
-		else if (ip >= s->end)
+		else if (ip > s->end || (ip == s->end && ip != s->start))
 			n = n->rb_right;
 		else
 			return s;
@@ -364,11 +330,6 @@
 	return NULL;
 }
 
-struct symbol_name_rb_node {
-	struct rb_node	rb_node;
-	struct symbol	sym;
-};
-
 static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym)
 {
 	struct rb_node **p = &symbols->rb_node;
@@ -452,6 +413,18 @@
 	}
 }
 
+void dso__insert_symbol(struct dso *dso, enum map_type type, struct symbol *sym)
+{
+	symbols__insert(&dso->symbols[type], sym);
+
+	/* update the symbol cache if necessary */
+	if (dso->last_find_result[type].addr >= sym->start &&
+	    (dso->last_find_result[type].addr < sym->end ||
+	    sym->start == sym->end)) {
+		dso->last_find_result[type].symbol = sym;
+	}
+}
+
 struct symbol *dso__find_symbol(struct dso *dso,
 				enum map_type type, u64 addr)
 {
@@ -497,21 +470,6 @@
 				     &dso->symbols[type]);
 }
 
-size_t dso__fprintf_symbols_by_name(struct dso *dso,
-				    enum map_type type, FILE *fp)
-{
-	size_t ret = 0;
-	struct rb_node *nd;
-	struct symbol_name_rb_node *pos;
-
-	for (nd = rb_first(&dso->symbol_names[type]); nd; nd = rb_next(nd)) {
-		pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
-		fprintf(fp, "%s\n", pos->sym.name);
-	}
-
-	return ret;
-}
-
 int modules__parse(const char *filename, void *arg,
 		   int (*process_module)(void *arg, const char *name,
 					 u64 start))
@@ -1262,8 +1220,8 @@
 	return 0;
 }
 
-int dso__load_kallsyms(struct dso *dso, const char *filename,
-		       struct map *map, symbol_filter_t filter)
+int __dso__load_kallsyms(struct dso *dso, const char *filename,
+			 struct map *map, bool no_kcore, symbol_filter_t filter)
 {
 	u64 delta = 0;
 
@@ -1284,12 +1242,18 @@
 	else
 		dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
 
-	if (!dso__load_kcore(dso, map, filename))
+	if (!no_kcore && !dso__load_kcore(dso, map, filename))
 		return dso__split_kallsyms_for_kcore(dso, map, filter);
 	else
 		return dso__split_kallsyms(dso, map, delta, filter);
 }
 
+int dso__load_kallsyms(struct dso *dso, const char *filename,
+		       struct map *map, symbol_filter_t filter)
+{
+	return __dso__load_kallsyms(dso, filename, map, false, filter);
+}
+
 static int dso__load_perf_map(struct dso *dso, struct map *map,
 			      symbol_filter_t filter)
 {
@@ -1644,25 +1608,27 @@
 	return err;
 }
 
+static bool visible_dir_filter(const char *name, struct dirent *d)
+{
+	if (d->d_type != DT_DIR)
+		return false;
+	return lsdir_no_dot_filter(name, d);
+}
+
 static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
 {
 	char kallsyms_filename[PATH_MAX];
-	struct dirent *dent;
 	int ret = -1;
-	DIR *d;
+	struct strlist *dirs;
+	struct str_node *nd;
 
-	d = opendir(dir);
-	if (!d)
+	dirs = lsdir(dir, visible_dir_filter);
+	if (!dirs)
 		return -1;
 
-	while (1) {
-		dent = readdir(d);
-		if (!dent)
-			break;
-		if (dent->d_type != DT_DIR)
-			continue;
+	strlist__for_each(nd, dirs) {
 		scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
-			  "%s/%s/kallsyms", dir, dent->d_name);
+			  "%s/%s/kallsyms", dir, nd->s);
 		if (!validate_kcore_addresses(kallsyms_filename, map)) {
 			strlcpy(dir, kallsyms_filename, dir_sz);
 			ret = 0;
@@ -1670,7 +1636,7 @@
 		}
 	}
 
-	closedir(d);
+	strlist__delete(dirs);
 
 	return ret;
 }
@@ -1678,7 +1644,7 @@
 static char *dso__find_kallsyms(struct dso *dso, struct map *map)
 {
 	u8 host_build_id[BUILD_ID_SIZE];
-	char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+	char sbuild_id[SBUILD_ID_SIZE];
 	bool is_host = false;
 	char path[PATH_MAX];
 
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index c8b7544..2b5e4ed 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -55,6 +55,7 @@
 	u16		namelen;
 	u8		binding;
 	bool		ignore;
+	u8		arch_sym;
 	char		name[0];
 };
 
@@ -140,6 +141,11 @@
 
 extern struct symbol_conf symbol_conf;
 
+struct symbol_name_rb_node {
+	struct rb_node	rb_node;
+	struct symbol	sym;
+};
+
 static inline int __symbol__join_symfs(char *bf, size_t size, const char *path)
 {
 	return path__join(bf, size, symbol_conf.symfs, path);
@@ -235,9 +241,14 @@
 		      symbol_filter_t filter);
 int dso__load_vmlinux_path(struct dso *dso, struct map *map,
 			   symbol_filter_t filter);
+int __dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map,
+			 bool no_kcore, symbol_filter_t filter);
 int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map,
 		       symbol_filter_t filter);
 
+void dso__insert_symbol(struct dso *dso, enum map_type type,
+			struct symbol *sym);
+
 struct symbol *dso__find_symbol(struct dso *dso, enum map_type type,
 				u64 addr);
 struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
@@ -262,8 +273,14 @@
 void symbol__exit(void);
 void symbol__elf_init(void);
 struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name);
+size_t __symbol__fprintf_symname_offs(const struct symbol *sym,
+				      const struct addr_location *al,
+				      bool unknown_as_addr, FILE *fp);
 size_t symbol__fprintf_symname_offs(const struct symbol *sym,
 				    const struct addr_location *al, FILE *fp);
+size_t __symbol__fprintf_symname(const struct symbol *sym,
+				 const struct addr_location *al,
+				 bool unknown_as_addr, FILE *fp);
 size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp);
 size_t symbol__fprintf(struct symbol *sym, FILE *fp);
 bool symbol_type__is_a(char symbol_type, enum map_type map_type);
@@ -310,7 +327,7 @@
 
 #ifdef HAVE_LIBELF_SUPPORT
 bool elf__needs_adjust_symbols(GElf_Ehdr ehdr);
-void arch__elf_sym_adjust(GElf_Sym *sym);
+void arch__sym_update(struct symbol *s, GElf_Sym *sym);
 #endif
 
 #define SYMBOL_A 0
diff --git a/tools/perf/util/symbol_fprintf.c b/tools/perf/util/symbol_fprintf.c
new file mode 100644
index 0000000..a680bda
--- /dev/null
+++ b/tools/perf/util/symbol_fprintf.c
@@ -0,0 +1,71 @@
+#include <elf.h>
+#include <inttypes.h>
+#include <stdio.h>
+
+#include "symbol.h"
+
+size_t symbol__fprintf(struct symbol *sym, FILE *fp)
+{
+	return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n",
+		       sym->start, sym->end,
+		       sym->binding == STB_GLOBAL ? 'g' :
+		       sym->binding == STB_LOCAL  ? 'l' : 'w',
+		       sym->name);
+}
+
+size_t __symbol__fprintf_symname_offs(const struct symbol *sym,
+				      const struct addr_location *al,
+				      bool unknown_as_addr, FILE *fp)
+{
+	unsigned long offset;
+	size_t length;
+
+	if (sym && sym->name) {
+		length = fprintf(fp, "%s", sym->name);
+		if (al) {
+			if (al->addr < sym->end)
+				offset = al->addr - sym->start;
+			else
+				offset = al->addr - al->map->start - sym->start;
+			length += fprintf(fp, "+0x%lx", offset);
+		}
+		return length;
+	} else if (al && unknown_as_addr)
+		return fprintf(fp, "[%#" PRIx64 "]", al->addr);
+	else
+		return fprintf(fp, "[unknown]");
+}
+
+size_t symbol__fprintf_symname_offs(const struct symbol *sym,
+				    const struct addr_location *al,
+				    FILE *fp)
+{
+	return __symbol__fprintf_symname_offs(sym, al, false, fp);
+}
+
+size_t __symbol__fprintf_symname(const struct symbol *sym,
+				 const struct addr_location *al,
+				 bool unknown_as_addr, FILE *fp)
+{
+	return __symbol__fprintf_symname_offs(sym, al, unknown_as_addr, fp);
+}
+
+size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp)
+{
+	return __symbol__fprintf_symname_offs(sym, NULL, false, fp);
+}
+
+size_t dso__fprintf_symbols_by_name(struct dso *dso,
+				    enum map_type type, FILE *fp)
+{
+	size_t ret = 0;
+	struct rb_node *nd;
+	struct symbol_name_rb_node *pos;
+
+	for (nd = rb_first(&dso->symbol_names[type]); nd; nd = rb_next(nd)) {
+		pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
+		fprintf(fp, "%s\n", pos->sym.name);
+	}
+
+	return ret;
+}
diff --git a/tools/perf/util/syscalltbl.c b/tools/perf/util/syscalltbl.c
new file mode 100644
index 0000000..bbb4c19
--- /dev/null
+++ b/tools/perf/util/syscalltbl.c
@@ -0,0 +1,134 @@
+/*
+ * System call table mapper
+ *
+ * (C) 2016 Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include "syscalltbl.h"
+#include <stdlib.h>
+
+#ifdef HAVE_SYSCALL_TABLE
+#include <linux/compiler.h>
+#include <string.h>
+#include "util.h"
+
+#if defined(__x86_64__)
+#include <asm/syscalls_64.c>
+const int syscalltbl_native_max_id = SYSCALLTBL_x86_64_MAX_ID;
+static const char **syscalltbl_native = syscalltbl_x86_64;
+#endif
+
+struct syscall {
+	int id;
+	const char *name;
+};
+
+static int syscallcmpname(const void *vkey, const void *ventry)
+{
+	const char *key = vkey;
+	const struct syscall *entry = ventry;
+
+	return strcmp(key, entry->name);
+}
+
+static int syscallcmp(const void *va, const void *vb)
+{
+	const struct syscall *a = va, *b = vb;
+
+	return strcmp(a->name, b->name);
+}
+
+static int syscalltbl__init_native(struct syscalltbl *tbl)
+{
+	int nr_entries = 0, i, j;
+	struct syscall *entries;
+
+	for (i = 0; i <= syscalltbl_native_max_id; ++i)
+		if (syscalltbl_native[i])
+			++nr_entries;
+
+	entries = tbl->syscalls.entries = malloc(sizeof(struct syscall) * nr_entries);
+	if (tbl->syscalls.entries == NULL)
+		return -1;
+
+	for (i = 0, j = 0; i <= syscalltbl_native_max_id; ++i) {
+		if (syscalltbl_native[i]) {
+			entries[j].name = syscalltbl_native[i];
+			entries[j].id = i;
+			++j;
+		}
+	}
+
+	qsort(tbl->syscalls.entries, nr_entries, sizeof(struct syscall), syscallcmp);
+	tbl->syscalls.nr_entries = nr_entries;
+	return 0;
+}
+
+struct syscalltbl *syscalltbl__new(void)
+{
+	struct syscalltbl *tbl = malloc(sizeof(*tbl));
+	if (tbl) {
+		if (syscalltbl__init_native(tbl)) {
+			free(tbl);
+			return NULL;
+		}
+	}
+	return tbl;
+}
+
+void syscalltbl__delete(struct syscalltbl *tbl)
+{
+	zfree(&tbl->syscalls.entries);
+	free(tbl);
+}
+
+const char *syscalltbl__name(const struct syscalltbl *tbl __maybe_unused, int id)
+{
+	return id <= syscalltbl_native_max_id ? syscalltbl_native[id]: NULL;
+}
+
+int syscalltbl__id(struct syscalltbl *tbl, const char *name)
+{
+	struct syscall *sc = bsearch(name, tbl->syscalls.entries,
+				     tbl->syscalls.nr_entries, sizeof(*sc),
+				     syscallcmpname);
+
+	return sc ? sc->id : -1;
+}
+
+#else /* HAVE_SYSCALL_TABLE */
+
+#include <libaudit.h>
+
+struct syscalltbl *syscalltbl__new(void)
+{
+	struct syscalltbl *tbl = malloc(sizeof(*tbl));
+	if (tbl)
+		tbl->audit_machine = audit_detect_machine();
+	return tbl;
+}
+
+void syscalltbl__delete(struct syscalltbl *tbl)
+{
+	free(tbl);
+}
+
+const char *syscalltbl__name(const struct syscalltbl *tbl, int id)
+{
+	return audit_syscall_to_name(id, tbl->audit_machine);
+}
+
+int syscalltbl__id(struct syscalltbl *tbl, const char *name)
+{
+	return audit_name_to_syscall(name, tbl->audit_machine);
+}
+#endif /* HAVE_SYSCALL_TABLE */
diff --git a/tools/perf/util/syscalltbl.h b/tools/perf/util/syscalltbl.h
new file mode 100644
index 0000000..e295151
--- /dev/null
+++ b/tools/perf/util/syscalltbl.h
@@ -0,0 +1,20 @@
+#ifndef __PERF_SYSCALLTBL_H
+#define __PERF_SYSCALLTBL_H
+
+struct syscalltbl {
+	union {
+		int audit_machine;
+		struct {
+			int nr_entries;
+			void *entries;
+		} syscalls;
+	};
+};
+
+struct syscalltbl *syscalltbl__new(void);
+void syscalltbl__delete(struct syscalltbl *tbl);
+
+const char *syscalltbl__name(const struct syscalltbl *tbl, int id);
+int syscalltbl__id(struct syscalltbl *tbl, const char *name);
+
+#endif /* __PERF_SYSCALLTBL_H */
diff --git a/tools/perf/util/thread-stack.c b/tools/perf/util/thread-stack.c
index 679688e..825086a 100644
--- a/tools/perf/util/thread-stack.c
+++ b/tools/perf/util/thread-stack.c
@@ -22,44 +22,9 @@
 #include "debug.h"
 #include "symbol.h"
 #include "comm.h"
+#include "call-path.h"
 #include "thread-stack.h"
 
-#define CALL_PATH_BLOCK_SHIFT 8
-#define CALL_PATH_BLOCK_SIZE (1 << CALL_PATH_BLOCK_SHIFT)
-#define CALL_PATH_BLOCK_MASK (CALL_PATH_BLOCK_SIZE - 1)
-
-struct call_path_block {
-	struct call_path cp[CALL_PATH_BLOCK_SIZE];
-	struct list_head node;
-};
-
-/**
- * struct call_path_root - root of all call paths.
- * @call_path: root call path
- * @blocks: list of blocks to store call paths
- * @next: next free space
- * @sz: number of spaces
- */
-struct call_path_root {
-	struct call_path call_path;
-	struct list_head blocks;
-	size_t next;
-	size_t sz;
-};
-
-/**
- * struct call_return_processor - provides a call-back to consume call-return
- *                                information.
- * @cpr: call path root
- * @process: call-back that accepts call/return information
- * @data: anonymous data for call-back
- */
-struct call_return_processor {
-	struct call_path_root *cpr;
-	int (*process)(struct call_return *cr, void *data);
-	void *data;
-};
-
 #define STACK_GROWTH 2048
 
 /**
@@ -335,108 +300,6 @@
 		chain->ips[i] = thread->ts->stack[thread->ts->cnt - i].ret_addr;
 }
 
-static void call_path__init(struct call_path *cp, struct call_path *parent,
-			    struct symbol *sym, u64 ip, bool in_kernel)
-{
-	cp->parent = parent;
-	cp->sym = sym;
-	cp->ip = sym ? 0 : ip;
-	cp->db_id = 0;
-	cp->in_kernel = in_kernel;
-	RB_CLEAR_NODE(&cp->rb_node);
-	cp->children = RB_ROOT;
-}
-
-static struct call_path_root *call_path_root__new(void)
-{
-	struct call_path_root *cpr;
-
-	cpr = zalloc(sizeof(struct call_path_root));
-	if (!cpr)
-		return NULL;
-	call_path__init(&cpr->call_path, NULL, NULL, 0, false);
-	INIT_LIST_HEAD(&cpr->blocks);
-	return cpr;
-}
-
-static void call_path_root__free(struct call_path_root *cpr)
-{
-	struct call_path_block *pos, *n;
-
-	list_for_each_entry_safe(pos, n, &cpr->blocks, node) {
-		list_del(&pos->node);
-		free(pos);
-	}
-	free(cpr);
-}
-
-static struct call_path *call_path__new(struct call_path_root *cpr,
-					struct call_path *parent,
-					struct symbol *sym, u64 ip,
-					bool in_kernel)
-{
-	struct call_path_block *cpb;
-	struct call_path *cp;
-	size_t n;
-
-	if (cpr->next < cpr->sz) {
-		cpb = list_last_entry(&cpr->blocks, struct call_path_block,
-				      node);
-	} else {
-		cpb = zalloc(sizeof(struct call_path_block));
-		if (!cpb)
-			return NULL;
-		list_add_tail(&cpb->node, &cpr->blocks);
-		cpr->sz += CALL_PATH_BLOCK_SIZE;
-	}
-
-	n = cpr->next++ & CALL_PATH_BLOCK_MASK;
-	cp = &cpb->cp[n];
-
-	call_path__init(cp, parent, sym, ip, in_kernel);
-
-	return cp;
-}
-
-static struct call_path *call_path__findnew(struct call_path_root *cpr,
-					    struct call_path *parent,
-					    struct symbol *sym, u64 ip, u64 ks)
-{
-	struct rb_node **p;
-	struct rb_node *node_parent = NULL;
-	struct call_path *cp;
-	bool in_kernel = ip >= ks;
-
-	if (sym)
-		ip = 0;
-
-	if (!parent)
-		return call_path__new(cpr, parent, sym, ip, in_kernel);
-
-	p = &parent->children.rb_node;
-	while (*p != NULL) {
-		node_parent = *p;
-		cp = rb_entry(node_parent, struct call_path, rb_node);
-
-		if (cp->sym == sym && cp->ip == ip)
-			return cp;
-
-		if (sym < cp->sym || (sym == cp->sym && ip < cp->ip))
-			p = &(*p)->rb_left;
-		else
-			p = &(*p)->rb_right;
-	}
-
-	cp = call_path__new(cpr, parent, sym, ip, in_kernel);
-	if (!cp)
-		return NULL;
-
-	rb_link_node(&cp->rb_node, node_parent, p);
-	rb_insert_color(&cp->rb_node, &parent->children);
-
-	return cp;
-}
-
 struct call_return_processor *
 call_return_processor__new(int (*process)(struct call_return *cr, void *data),
 			   void *data)
diff --git a/tools/perf/util/thread-stack.h b/tools/perf/util/thread-stack.h
index e1528f1..ad44c79 100644
--- a/tools/perf/util/thread-stack.h
+++ b/tools/perf/util/thread-stack.h
@@ -19,17 +19,16 @@
 #include <sys/types.h>
 
 #include <linux/types.h>
-#include <linux/rbtree.h>
 
 struct thread;
 struct comm;
 struct ip_callchain;
 struct symbol;
 struct dso;
-struct call_return_processor;
 struct comm;
 struct perf_sample;
 struct addr_location;
+struct call_path;
 
 /*
  * Call/Return flags.
@@ -69,26 +68,16 @@
 };
 
 /**
- * struct call_path - node in list of calls leading to a function call.
- * @parent: call path to the parent function call
- * @sym: symbol of function called
- * @ip: only if sym is null, the ip of the function
- * @db_id: id used for db-export
- * @in_kernel: whether function is a in the kernel
- * @rb_node: node in parent's tree of called functions
- * @children: tree of call paths of functions called
- *
- * In combination with the call_return structure, the call_path structure
- * defines a context-sensitve call-graph.
+ * struct call_return_processor - provides a call-back to consume call-return
+ *                                information.
+ * @cpr: call path root
+ * @process: call-back that accepts call/return information
+ * @data: anonymous data for call-back
  */
-struct call_path {
-	struct call_path *parent;
-	struct symbol *sym;
-	u64 ip;
-	u64 db_id;
-	bool in_kernel;
-	struct rb_node rb_node;
-	struct rb_root children;
+struct call_return_processor {
+	struct call_path_root *cpr;
+	int (*process)(struct call_return *cr, void *data);
+	void *data;
 };
 
 int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index dfd00c6..45fcb71 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -10,6 +10,8 @@
 #include "comm.h"
 #include "unwind.h"
 
+#include <api/fs/fs.h>
+
 int thread__init_map_groups(struct thread *thread, struct machine *machine)
 {
 	struct thread *leader;
@@ -153,6 +155,23 @@
 	return 0;
 }
 
+int thread__set_comm_from_proc(struct thread *thread)
+{
+	char path[64];
+	char *comm = NULL;
+	size_t sz;
+	int err = -1;
+
+	if (!(snprintf(path, sizeof(path), "%d/task/%d/comm",
+		       thread->pid_, thread->tid) >= (int)sizeof(path)) &&
+	    procfs__read_str(path, &comm, &sz) == 0) {
+		comm[sz - 1] = '\0';
+		err = thread__set_comm(thread, comm, 0);
+	}
+
+	return err;
+}
+
 const char *thread__comm_str(const struct thread *thread)
 {
 	const struct comm *comm = thread__comm(thread);
@@ -233,7 +252,7 @@
 					struct addr_location *al)
 {
 	size_t i;
-	const u8 const cpumodes[] = {
+	const u8 cpumodes[] = {
 		PERF_RECORD_MISC_USER,
 		PERF_RECORD_MISC_KERNEL,
 		PERF_RECORD_MISC_GUEST_USER,
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index a0ac031..45fba13 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -9,6 +9,9 @@
 #include "symbol.h"
 #include <strlist.h>
 #include <intlist.h>
+#ifdef HAVE_LIBUNWIND_SUPPORT
+#include <libunwind.h>
+#endif
 
 struct thread_stack;
 
@@ -32,6 +35,9 @@
 
 	void			*priv;
 	struct thread_stack	*ts;
+#ifdef HAVE_LIBUNWIND_SUPPORT
+	unw_addr_space_t	addr_space;
+#endif
 };
 
 struct machine;
@@ -65,6 +71,8 @@
 	return __thread__set_comm(thread, comm, timestamp, false);
 }
 
+int thread__set_comm_from_proc(struct thread *thread);
+
 int thread__comm_len(struct thread *thread);
 struct comm *thread__comm(const struct thread *thread);
 struct comm *thread__exec_comm(const struct thread *thread);
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
index 08afc690..5654fe1 100644
--- a/tools/perf/util/thread_map.c
+++ b/tools/perf/util/thread_map.c
@@ -94,7 +94,7 @@
 	DIR *proc;
 	int max_threads = 32, items, i;
 	char path[256];
-	struct dirent dirent, *next, **namelist = NULL;
+	struct dirent *dirent, **namelist = NULL;
 	struct thread_map *threads = thread_map__alloc(max_threads);
 
 	if (threads == NULL)
@@ -107,16 +107,16 @@
 	threads->nr = 0;
 	atomic_set(&threads->refcnt, 1);
 
-	while (!readdir_r(proc, &dirent, &next) && next) {
+	while ((dirent = readdir(proc)) != NULL) {
 		char *end;
 		bool grow = false;
 		struct stat st;
-		pid_t pid = strtol(dirent.d_name, &end, 10);
+		pid_t pid = strtol(dirent->d_name, &end, 10);
 
 		if (*end) /* only interested in proper numerical dirents */
 			continue;
 
-		snprintf(path, sizeof(path), "/proc/%s", dirent.d_name);
+		snprintf(path, sizeof(path), "/proc/%s", dirent->d_name);
 
 		if (stat(path, &st) != 0)
 			continue;
@@ -260,7 +260,7 @@
 	return threads;
 }
 
-static struct thread_map *thread_map__new_by_tid_str(const char *tid_str)
+struct thread_map *thread_map__new_by_tid_str(const char *tid_str)
 {
 	struct thread_map *threads = NULL, *nt;
 	int ntasks = 0;
@@ -436,3 +436,15 @@
 
 	return threads;
 }
+
+bool thread_map__has(struct thread_map *threads, pid_t pid)
+{
+	int i;
+
+	for (i = 0; i < threads->nr; ++i) {
+		if (threads->map[i].pid == pid)
+			return true;
+	}
+
+	return false;
+}
diff --git a/tools/perf/util/thread_map.h b/tools/perf/util/thread_map.h
index 85e4c7c..bd3b971 100644
--- a/tools/perf/util/thread_map.h
+++ b/tools/perf/util/thread_map.h
@@ -31,6 +31,8 @@
 struct thread_map *thread_map__new_str(const char *pid,
 		const char *tid, uid_t uid);
 
+struct thread_map *thread_map__new_by_tid_str(const char *tid_str);
+
 size_t thread_map__fprintf(struct thread_map *threads, FILE *fp);
 
 static inline int thread_map__nr(struct thread_map *threads)
@@ -55,4 +57,5 @@
 }
 
 void thread_map__read_comms(struct thread_map *threads);
+bool thread_map__has(struct thread_map *threads, pid_t pid);
 #endif	/* __PERF_THREAD_MAP_H */
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
index 55de4cf..ac2590a 100644
--- a/tools/perf/util/tool.h
+++ b/tools/perf/util/tool.h
@@ -57,6 +57,7 @@
 			id_index,
 			auxtrace_info,
 			auxtrace_error,
+			time_conv,
 			thread_map,
 			cpu_map,
 			stat_config,
diff --git a/tools/perf/util/trigger.h b/tools/perf/util/trigger.h
new file mode 100644
index 0000000..e97d701
--- /dev/null
+++ b/tools/perf/util/trigger.h
@@ -0,0 +1,94 @@
+#ifndef __TRIGGER_H_
+#define __TRIGGER_H_ 1
+
+#include "util/debug.h"
+#include "asm/bug.h"
+
+/*
+ * Use trigger to model operations which need to be executed when
+ * an event (a signal, for example) is observed.
+ *
+ * States and transits:
+ *
+ *
+ *  OFF--(on)--> READY --(hit)--> HIT
+ *                 ^               |
+ *                 |            (ready)
+ *                 |               |
+ *                  \_____________/
+ *
+ * is_hit and is_ready are two key functions to query the state of
+ * a trigger. is_hit means the event already happen; is_ready means the
+ * trigger is waiting for the event.
+ */
+
+struct trigger {
+	volatile enum {
+		TRIGGER_ERROR		= -2,
+		TRIGGER_OFF		= -1,
+		TRIGGER_READY		= 0,
+		TRIGGER_HIT		= 1,
+	} state;
+	const char *name;
+};
+
+#define TRIGGER_WARN_ONCE(t, exp) \
+	WARN_ONCE(t->state != exp, "trigger '%s' state transist error: %d in %s()\n", \
+		  t->name, t->state, __func__)
+
+static inline bool trigger_is_available(struct trigger *t)
+{
+	return t->state >= 0;
+}
+
+static inline bool trigger_is_error(struct trigger *t)
+{
+	return t->state <= TRIGGER_ERROR;
+}
+
+static inline void trigger_on(struct trigger *t)
+{
+	TRIGGER_WARN_ONCE(t, TRIGGER_OFF);
+	t->state = TRIGGER_READY;
+}
+
+static inline void trigger_ready(struct trigger *t)
+{
+	if (!trigger_is_available(t))
+		return;
+	t->state = TRIGGER_READY;
+}
+
+static inline void trigger_hit(struct trigger *t)
+{
+	if (!trigger_is_available(t))
+		return;
+	TRIGGER_WARN_ONCE(t, TRIGGER_READY);
+	t->state = TRIGGER_HIT;
+}
+
+static inline void trigger_off(struct trigger *t)
+{
+	if (!trigger_is_available(t))
+		return;
+	t->state = TRIGGER_OFF;
+}
+
+static inline void trigger_error(struct trigger *t)
+{
+	t->state = TRIGGER_ERROR;
+}
+
+static inline bool trigger_is_ready(struct trigger *t)
+{
+	return t->state == TRIGGER_READY;
+}
+
+static inline bool trigger_is_hit(struct trigger *t)
+{
+	return t->state == TRIGGER_HIT;
+}
+
+#define DEFINE_TRIGGER(n) \
+struct trigger n = {.state = TRIGGER_OFF, .name = #n}
+#endif
diff --git a/tools/perf/util/tsc.h b/tools/perf/util/tsc.h
index a8b78f1..d5b11e2 100644
--- a/tools/perf/util/tsc.h
+++ b/tools/perf/util/tsc.h
@@ -3,10 +3,29 @@
 
 #include <linux/types.h>
 
-#include "../arch/x86/util/tsc.h"
+#include "event.h"
+
+struct perf_tsc_conversion {
+	u16 time_shift;
+	u32 time_mult;
+	u64 time_zero;
+};
+struct perf_event_mmap_page;
+
+int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
+			     struct perf_tsc_conversion *tc);
 
 u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc);
 u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc);
 u64 rdtsc(void);
 
+struct perf_event_mmap_page;
+struct perf_tool;
+struct machine;
+
+int perf_event__synth_time_conv(const struct perf_event_mmap_page *pc,
+				struct perf_tool *tool,
+				perf_event__handler_t process,
+				struct machine *machine);
+
 #endif
diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
index ee7e372..63687d3 100644
--- a/tools/perf/util/unwind-libunwind.c
+++ b/tools/perf/util/unwind-libunwind.c
@@ -32,6 +32,7 @@
 #include "symbol.h"
 #include "util.h"
 #include "debug.h"
+#include "asm/bug.h"
 
 extern int
 UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
@@ -580,43 +581,33 @@
 
 int unwind__prepare_access(struct thread *thread)
 {
-	unw_addr_space_t addr_space;
-
 	if (callchain_param.record_mode != CALLCHAIN_DWARF)
 		return 0;
 
-	addr_space = unw_create_addr_space(&accessors, 0);
-	if (!addr_space) {
+	thread->addr_space = unw_create_addr_space(&accessors, 0);
+	if (!thread->addr_space) {
 		pr_err("unwind: Can't create unwind address space.\n");
 		return -ENOMEM;
 	}
 
-	unw_set_caching_policy(addr_space, UNW_CACHE_GLOBAL);
-	thread__set_priv(thread, addr_space);
-
+	unw_set_caching_policy(thread->addr_space, UNW_CACHE_GLOBAL);
 	return 0;
 }
 
 void unwind__flush_access(struct thread *thread)
 {
-	unw_addr_space_t addr_space;
-
 	if (callchain_param.record_mode != CALLCHAIN_DWARF)
 		return;
 
-	addr_space = thread__priv(thread);
-	unw_flush_cache(addr_space, 0, 0);
+	unw_flush_cache(thread->addr_space, 0, 0);
 }
 
 void unwind__finish_access(struct thread *thread)
 {
-	unw_addr_space_t addr_space;
-
 	if (callchain_param.record_mode != CALLCHAIN_DWARF)
 		return;
 
-	addr_space = thread__priv(thread);
-	unw_destroy_addr_space(addr_space);
+	unw_destroy_addr_space(thread->addr_space);
 }
 
 static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
@@ -639,7 +630,9 @@
 	 * unwind itself.
 	 */
 	if (max_stack - 1 > 0) {
-		addr_space = thread__priv(ui->thread);
+		WARN_ONCE(!ui->thread, "WARNING: ui->thread is NULL");
+		addr_space = ui->thread->addr_space;
+
 		if (addr_space == NULL)
 			return -1;
 
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index b7766c5..eab077a 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -33,6 +33,8 @@
 unsigned int page_size;
 int cacheline_size;
 
+unsigned int sysctl_perf_event_max_stack = PERF_MAX_STACK_DEPTH;
+
 bool test_attr__enabled;
 
 bool perf_host  = true;
@@ -117,6 +119,40 @@
 	return rmdir(path);
 }
 
+/* A filter which removes dot files */
+bool lsdir_no_dot_filter(const char *name __maybe_unused, struct dirent *d)
+{
+	return d->d_name[0] != '.';
+}
+
+/* lsdir reads a directory and store it in strlist */
+struct strlist *lsdir(const char *name,
+		      bool (*filter)(const char *, struct dirent *))
+{
+	struct strlist *list = NULL;
+	DIR *dir;
+	struct dirent *d;
+
+	dir = opendir(name);
+	if (!dir)
+		return NULL;
+
+	list = strlist__new(NULL, NULL);
+	if (!list) {
+		errno = ENOMEM;
+		goto out;
+	}
+
+	while ((d = readdir(dir)) != NULL) {
+		if (!filter || filter(name, d))
+			strlist__add(list, d->d_name);
+	}
+
+out:
+	closedir(dir);
+	return list;
+}
+
 static int slow_copyfile(const char *from, const char *to)
 {
 	int err = -1;
@@ -471,7 +507,6 @@
 				       "needed for --call-graph fp\n");
 			break;
 
-#ifdef HAVE_DWARF_UNWIND_SUPPORT
 		/* Dwarf style */
 		} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
 			const unsigned long default_stack_dump_size = 8192;
@@ -487,7 +522,6 @@
 				ret = get_stack_size(tok, &size);
 				param->dump_size = size;
 			}
-#endif /* HAVE_DWARF_UNWIND_SUPPORT */
 		} else if (!strncmp(name, "lbr", sizeof("lbr"))) {
 			if (!strtok_r(NULL, ",", &saveptr)) {
 				param->record_mode = CALLCHAIN_LBR;
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 8298d60..7651633 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -79,6 +79,7 @@
 #include <termios.h>
 #include <linux/bitops.h>
 #include <termios.h>
+#include "strlist.h"
 
 extern const char *graph_line;
 extern const char *graph_dotted_line;
@@ -159,12 +160,6 @@
 }
 #endif
 
-/*
- * Wrappers:
- */
-void *xrealloc(void *ptr, size_t size) __attribute__((weak));
-
-
 static inline void *zalloc(size_t size)
 {
 	return calloc(1, size);
@@ -222,6 +217,8 @@
 
 int mkdir_p(char *path, mode_t mode);
 int rm_rf(char *path);
+struct strlist *lsdir(const char *name, bool (*filter)(const char *, struct dirent *));
+bool lsdir_no_dot_filter(const char *name, struct dirent *d);
 int copyfile(const char *from, const char *to);
 int copyfile_mode(const char *from, const char *to, mode_t mode);
 int copyfile_offset(int fromfd, loff_t from_ofs, int tofd, loff_t to_ofs, u64 size);
@@ -254,11 +251,17 @@
 char *ltrim(char *s);
 char *rtrim(char *s);
 
+static inline char *trim(char *s)
+{
+	return ltrim(rtrim(s));
+}
+
 void dump_stack(void);
 void sighandler_dump_stack(int sig);
 
 extern unsigned int page_size;
 extern int cacheline_size;
+extern unsigned int sysctl_perf_event_max_stack;
 
 struct parse_tag {
 	char tag;
diff --git a/tools/perf/util/wrapper.c b/tools/perf/util/wrapper.c
deleted file mode 100644
index 5f1a07c..0000000
--- a/tools/perf/util/wrapper.c
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Various trivial helper wrappers around standard functions
- */
-#include "cache.h"
-
-/*
- * There's no pack memory to release - but stay close to the Git
- * version so wrap this away:
- */
-static inline void release_pack_memory(size_t size __maybe_unused,
-				       int flag __maybe_unused)
-{
-}
-
-void *xrealloc(void *ptr, size_t size)
-{
-	void *ret = realloc(ptr, size);
-	if (!ret && !size)
-		ret = realloc(ptr, 1);
-	if (!ret) {
-		release_pack_memory(size, -1);
-		ret = realloc(ptr, size);
-		if (!ret && !size)
-			ret = realloc(ptr, 1);
-		if (!ret)
-			die("Out of memory, realloc failed");
-	}
-	return ret;
-}
diff --git a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
index d0e6b85..546cf4a 100644
--- a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
+++ b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
@@ -91,7 +91,7 @@
 			 char *signature,
 			 u32 instance,
 			 struct acpi_table_header **table,
-			 acpi_physical_address * address);
+			 acpi_physical_address *address);
 
 static acpi_status osl_list_bios_tables(void);
 
@@ -99,7 +99,7 @@
 osl_get_bios_table(char *signature,
 		   u32 instance,
 		   struct acpi_table_header **table,
-		   acpi_physical_address * address);
+		   acpi_physical_address *address);
 
 static acpi_status osl_get_last_status(acpi_status default_status);
 
@@ -187,7 +187,7 @@
 
 acpi_status
 acpi_os_get_table_by_address(acpi_physical_address address,
-			     struct acpi_table_header ** table)
+			     struct acpi_table_header **table)
 {
 	u32 table_length;
 	struct acpi_table_header *mapped_table;
@@ -252,8 +252,8 @@
 acpi_status
 acpi_os_get_table_by_name(char *signature,
 			  u32 instance,
-			  struct acpi_table_header ** table,
-			  acpi_physical_address * address)
+			  struct acpi_table_header **table,
+			  acpi_physical_address *address)
 {
 	acpi_status status;
 
@@ -380,8 +380,8 @@
 
 acpi_status
 acpi_os_get_table_by_index(u32 index,
-			   struct acpi_table_header ** table,
-			   u32 *instance, acpi_physical_address * address)
+			   struct acpi_table_header **table,
+			   u32 *instance, acpi_physical_address *address)
 {
 	struct osl_table_info *info;
 	acpi_status status;
@@ -447,7 +447,7 @@
 		}
 	}
 
-	return ((acpi_physical_address) (address));
+	return ((acpi_physical_address)(address));
 }
 
 /******************************************************************************
@@ -751,10 +751,10 @@
 	for (i = 0; i < number_of_tables; ++i, table_data += item_size) {
 		if (osl_can_use_xsdt()) {
 			table_address =
-			    (acpi_physical_address) (*ACPI_CAST64(table_data));
+			    (acpi_physical_address)(*ACPI_CAST64(table_data));
 		} else {
 			table_address =
-			    (acpi_physical_address) (*ACPI_CAST32(table_data));
+			    (acpi_physical_address)(*ACPI_CAST32(table_data));
 		}
 
 		/* Skip NULL entries in RSDT/XSDT */
@@ -800,7 +800,7 @@
 osl_get_bios_table(char *signature,
 		   u32 instance,
 		   struct acpi_table_header **table,
-		   acpi_physical_address * address)
+		   acpi_physical_address *address)
 {
 	struct acpi_table_header *local_table = NULL;
 	struct acpi_table_header *mapped_table = NULL;
@@ -833,38 +833,37 @@
 			if ((gbl_fadt->header.length >= MIN_FADT_FOR_XDSDT) &&
 			    gbl_fadt->Xdsdt) {
 				table_address =
-				    (acpi_physical_address) gbl_fadt->Xdsdt;
+				    (acpi_physical_address)gbl_fadt->Xdsdt;
 			} else
 			    if ((gbl_fadt->header.length >= MIN_FADT_FOR_DSDT)
 				&& gbl_fadt->dsdt) {
 				table_address =
-				    (acpi_physical_address) gbl_fadt->dsdt;
+				    (acpi_physical_address)gbl_fadt->dsdt;
 			}
 		} else if (ACPI_COMPARE_NAME(signature, ACPI_SIG_FACS)) {
 			if ((gbl_fadt->header.length >= MIN_FADT_FOR_XFACS) &&
 			    gbl_fadt->Xfacs) {
 				table_address =
-				    (acpi_physical_address) gbl_fadt->Xfacs;
+				    (acpi_physical_address)gbl_fadt->Xfacs;
 			} else
 			    if ((gbl_fadt->header.length >= MIN_FADT_FOR_FACS)
 				&& gbl_fadt->facs) {
 				table_address =
-				    (acpi_physical_address) gbl_fadt->facs;
+				    (acpi_physical_address)gbl_fadt->facs;
 			}
 		} else if (ACPI_COMPARE_NAME(signature, ACPI_SIG_XSDT)) {
 			if (!gbl_revision) {
 				return (AE_BAD_SIGNATURE);
 			}
 			table_address =
-			    (acpi_physical_address) gbl_rsdp.
+			    (acpi_physical_address)gbl_rsdp.
 			    xsdt_physical_address;
 		} else if (ACPI_COMPARE_NAME(signature, ACPI_SIG_RSDT)) {
 			table_address =
-			    (acpi_physical_address) gbl_rsdp.
+			    (acpi_physical_address)gbl_rsdp.
 			    rsdt_physical_address;
 		} else {
-			table_address =
-			    (acpi_physical_address) gbl_rsdp_address;
+			table_address = (acpi_physical_address)gbl_rsdp_address;
 			signature = ACPI_SIG_RSDP;
 		}
 
@@ -904,12 +903,12 @@
 		for (i = 0; i < number_of_tables; ++i, table_data += item_size) {
 			if (osl_can_use_xsdt()) {
 				table_address =
-				    (acpi_physical_address) (*ACPI_CAST64
-							     (table_data));
+				    (acpi_physical_address)(*ACPI_CAST64
+							    (table_data));
 			} else {
 				table_address =
-				    (acpi_physical_address) (*ACPI_CAST32
-							     (table_data));
+				    (acpi_physical_address)(*ACPI_CAST32
+							    (table_data));
 			}
 
 			/* Skip NULL entries in RSDT/XSDT */
@@ -1301,7 +1300,7 @@
 			 char *signature,
 			 u32 instance,
 			 struct acpi_table_header **table,
-			 acpi_physical_address * address)
+			 acpi_physical_address *address)
 {
 	void *table_dir;
 	u32 current_instance = 0;
diff --git a/tools/power/acpi/os_specific/service_layers/osunixmap.c b/tools/power/acpi/os_specific/service_layers/osunixmap.c
index 3818fd0..cbfbce1 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixmap.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixmap.c
@@ -54,7 +54,7 @@
 #ifndef O_BINARY
 #define O_BINARY 0
 #endif
-#if defined(_dragon_fly) || defined(_free_BSD)
+#if defined(_dragon_fly) || defined(_free_BSD) || defined(_QNX)
 #define MMAP_FLAGS          MAP_SHARED
 #else
 #define MMAP_FLAGS          MAP_PRIVATE
diff --git a/tools/power/acpi/os_specific/service_layers/osunixxf.c b/tools/power/acpi/os_specific/service_layers/osunixxf.c
index 08cb8b2..88aa66e 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixxf.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixxf.c
@@ -246,8 +246,8 @@
  *****************************************************************************/
 
 acpi_status
-acpi_os_predefined_override(const struct acpi_predefined_names * init_val,
-			    acpi_string * new_val)
+acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
+			    acpi_string *new_val)
 {
 
 	if (!init_val || !new_val) {
@@ -274,8 +274,8 @@
  *****************************************************************************/
 
 acpi_status
-acpi_os_table_override(struct acpi_table_header * existing_table,
-		       struct acpi_table_header ** new_table)
+acpi_os_table_override(struct acpi_table_header *existing_table,
+		       struct acpi_table_header **new_table)
 {
 
 	if (!existing_table || !new_table) {
@@ -311,8 +311,8 @@
  *****************************************************************************/
 
 acpi_status
-acpi_os_physical_table_override(struct acpi_table_header * existing_table,
-				acpi_physical_address * new_address,
+acpi_os_physical_table_override(struct acpi_table_header *existing_table,
+				acpi_physical_address *new_address,
 				u32 *new_table_length)
 {
 
@@ -506,7 +506,7 @@
 void *acpi_os_map_memory(acpi_physical_address where, acpi_size length)
 {
 
-	return (ACPI_TO_POINTER((acpi_size) where));
+	return (ACPI_TO_POINTER((acpi_size)where));
 }
 
 /******************************************************************************
@@ -603,9 +603,9 @@
 
 acpi_status
 acpi_os_create_semaphore(u32 max_units,
-			 u32 initial_units, acpi_handle * out_handle)
+			 u32 initial_units, acpi_handle *out_handle)
 {
-	*out_handle = (acpi_handle) 1;
+	*out_handle = (acpi_handle)1;
 	return (AE_OK);
 }
 
@@ -640,7 +640,7 @@
 
 acpi_status
 acpi_os_create_semaphore(u32 max_units,
-			 u32 initial_units, acpi_handle * out_handle)
+			 u32 initial_units, acpi_handle *out_handle)
 {
 	sem_t *sem;
 
@@ -672,7 +672,7 @@
 	}
 #endif
 
-	*out_handle = (acpi_handle) sem;
+	*out_handle = (acpi_handle)sem;
 	return (AE_OK);
 }
 
@@ -1035,7 +1035,7 @@
  *****************************************************************************/
 
 acpi_status
-acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id,
+acpi_os_write_pci_configuration(struct acpi_pci_id *pci_id,
 				u32 pci_register, u64 value, u32 width)
 {
 
diff --git a/tools/power/acpi/tools/acpidbg/acpidbg.c b/tools/power/acpi/tools/acpidbg/acpidbg.c
index d070fcc..a88ac45 100644
--- a/tools/power/acpi/tools/acpidbg/acpidbg.c
+++ b/tools/power/acpi/tools/acpidbg/acpidbg.c
@@ -375,7 +375,7 @@
 
 int main(int argc, char **argv)
 {
-	int fd = 0;
+	int fd = -1;
 	int ch;
 	int len;
 	int ret = EXIT_SUCCESS;
@@ -430,7 +430,7 @@
 	acpi_aml_loop(fd);
 
 exit:
-	if (fd < 0)
+	if (fd >= 0)
 		close(fd);
 	if (acpi_aml_batch_cmd)
 		free(acpi_aml_batch_cmd);
diff --git a/tools/power/acpi/tools/acpidump/Makefile b/tools/power/acpi/tools/acpidump/Makefile
index 8d76157..2942cdc 100644
--- a/tools/power/acpi/tools/acpidump/Makefile
+++ b/tools/power/acpi/tools/acpidump/Makefile
@@ -31,6 +31,7 @@
 	osunixxf.o\
 	tbprint.o\
 	tbxfroot.o\
+	utascii.o\
 	utbuffer.o\
 	utdebug.o\
 	utexcep.o\
diff --git a/tools/power/acpi/tools/acpidump/apdump.c b/tools/power/acpi/tools/acpidump/apdump.c
index da44458..fb8f1d9 100644
--- a/tools/power/acpi/tools/acpidump/apdump.c
+++ b/tools/power/acpi/tools/acpidump/apdump.c
@@ -68,7 +68,7 @@
 
 		/* Make sure signature is all ASCII and a valid ACPI name */
 
-		if (!acpi_ut_valid_acpi_name(table->signature)) {
+		if (!acpi_ut_valid_nameseg(table->signature)) {
 			acpi_log_error("Table signature (0x%8.8X) is invalid\n",
 				       *(u32 *)table->signature);
 			return (FALSE);
@@ -286,14 +286,15 @@
 
 	/* Convert argument to an integer physical address */
 
-	status = acpi_ut_strtoul64(ascii_address, 0, &long_address);
+	status = acpi_ut_strtoul64(ascii_address, ACPI_ANY_BASE,
+				   ACPI_MAX64_BYTE_WIDTH, &long_address);
 	if (ACPI_FAILURE(status)) {
 		acpi_log_error("%s: Could not convert to a physical address\n",
 			       ascii_address);
 		return (-1);
 	}
 
-	address = (acpi_physical_address) long_address;
+	address = (acpi_physical_address)long_address;
 	status = acpi_os_get_table_by_address(address, &table);
 	if (ACPI_FAILURE(status)) {
 		acpi_log_error("Could not get table at 0x%8.8X%8.8X, %s\n",
@@ -406,6 +407,12 @@
 		return (-1);
 	}
 
+	if (!acpi_ut_valid_nameseg(table->signature)) {
+		acpi_log_error
+		    ("No valid ACPI signature was found in input file %s\n",
+		     pathname);
+	}
+
 	/* File must be at least as long as the table length */
 
 	if (table->length > file_size) {
diff --git a/tools/power/acpi/tools/acpidump/apmain.c b/tools/power/acpi/tools/acpidump/apmain.c
index c3c0915..7692e6b 100644
--- a/tools/power/acpi/tools/acpidump/apmain.c
+++ b/tools/power/acpi/tools/acpidump/apmain.c
@@ -209,7 +209,8 @@
 		case 'r':	/* Dump tables from specified RSDP */
 
 			status =
-			    acpi_ut_strtoul64(acpi_gbl_optarg, 0,
+			    acpi_ut_strtoul64(acpi_gbl_optarg, ACPI_ANY_BASE,
+					      ACPI_MAX64_BYTE_WIDTH,
 					      &gbl_rsdp_base);
 			if (ACPI_FAILURE(status)) {
 				acpi_log_error
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index 0adaf0c..8358863 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -63,7 +63,7 @@
 # and _should_ modify the PACKAGE_BUGREPORT definition
 
 VERSION=			$(shell ./utils/version-gen.sh)
-LIB_MAJ=			0.0.0
+LIB_MAJ=			0.0.1
 LIB_MIN=			0
 
 PACKAGE =			cpupower
@@ -129,7 +129,7 @@
 CFLAGS += -DVERSION=\"$(VERSION)\" -DPACKAGE=\"$(PACKAGE)\" \
 		-DPACKAGE_BUGREPORT=\"$(PACKAGE_BUGREPORT)\" -D_GNU_SOURCE
 
-UTIL_OBJS =  utils/helpers/amd.o utils/helpers/topology.o utils/helpers/msr.o \
+UTIL_OBJS =  utils/helpers/amd.o utils/helpers/msr.o \
 	utils/helpers/sysfs.o utils/helpers/misc.o utils/helpers/cpuid.o \
 	utils/helpers/pci.o utils/helpers/bitmask.o \
 	utils/idle_monitor/nhm_idle.o utils/idle_monitor/snb_idle.o \
@@ -148,9 +148,9 @@
 	utils/helpers/bitmask.h \
 	utils/idle_monitor/idle_monitors.h utils/idle_monitor/idle_monitors.def
 
-LIB_HEADERS = 	lib/cpufreq.h lib/sysfs.h
-LIB_SRC = 	lib/cpufreq.c lib/sysfs.c
-LIB_OBJS = 	lib/cpufreq.o lib/sysfs.o
+LIB_HEADERS = 	lib/cpufreq.h lib/cpupower.h lib/cpuidle.h
+LIB_SRC = 	lib/cpufreq.c lib/cpupower.c lib/cpuidle.c
+LIB_OBJS = 	lib/cpufreq.o lib/cpupower.o lib/cpuidle.o
 LIB_OBJS :=	$(addprefix $(OUTPUT),$(LIB_OBJS))
 
 CFLAGS +=	-pipe
@@ -280,6 +280,7 @@
 	$(CP) $(OUTPUT)libcpupower.so* $(DESTDIR)${libdir}/
 	$(INSTALL) -d $(DESTDIR)${includedir}
 	$(INSTALL_DATA) lib/cpufreq.h $(DESTDIR)${includedir}/cpufreq.h
+	$(INSTALL_DATA) lib/cpuidle.h $(DESTDIR)${includedir}/cpuidle.h
 
 install-tools:
 	$(INSTALL) -d $(DESTDIR)${bindir}
@@ -315,6 +316,7 @@
 uninstall:
 	- rm -f $(DESTDIR)${libdir}/libcpupower.*
 	- rm -f $(DESTDIR)${includedir}/cpufreq.h
+	- rm -f $(DESTDIR)${includedir}/cpuidle.h
 	- rm -f $(DESTDIR)${bindir}/utils/cpupower
 	- rm -f $(DESTDIR)${mandir}/man1/cpupower.1
 	- rm -f $(DESTDIR)${mandir}/man1/cpupower-frequency-set.1
diff --git a/tools/power/cpupower/bench/Makefile b/tools/power/cpupower/bench/Makefile
index d0f879b..3e59f1a 100644
--- a/tools/power/cpupower/bench/Makefile
+++ b/tools/power/cpupower/bench/Makefile
@@ -22,7 +22,7 @@
 
 $(OUTPUT)cpufreq-bench: $(OBJS)
 	$(ECHO) "  CC      " $@
-	$(QUIET) $(CC) -o $@ $(CFLAGS) $(OBJS) $(LIBS)
+	$(QUIET) $(CC) -o $@ $(CFLAGS) $(LDFLAGS) $(OBJS) $(LIBS)
 
 all: $(OUTPUT)cpufreq-bench
 
diff --git a/tools/power/cpupower/bench/README-BENCH b/tools/power/cpupower/bench/README-BENCH
index 8093ec7..97727ae 100644
--- a/tools/power/cpupower/bench/README-BENCH
+++ b/tools/power/cpupower/bench/README-BENCH
@@ -113,7 +113,7 @@
 -c, --cpu=<unsigned int>        CPU Number to use, starting at 0
 -p, --prio=<priority>           scheduler priority, HIGH, LOW or DEFAULT
 -g, --governor=<governor>       cpufreq governor to test
--n, --cycles=<int>              load/sleep cycles to get an avarage value to compare
+-n, --cycles=<int>              load/sleep cycles to get an average value to compare
 -r, --rounds<int>               load/sleep rounds
 -f, --file=<configfile>         config file to use
 -o, --output=<dir>              output dir, must exist
diff --git a/tools/power/cpupower/bench/benchmark.c b/tools/power/cpupower/bench/benchmark.c
index 81b1c48..429d51a 100644
--- a/tools/power/cpupower/bench/benchmark.c
+++ b/tools/power/cpupower/bench/benchmark.c
@@ -130,7 +130,7 @@
 			_round, load_time, sleep_time);
 
 		if (config->verbose)
-			printf("avarage: %lius, rps:%li\n",
+			printf("average: %lius, rps:%li\n",
 				load_time / calculations,
 				1000000 * calculations / load_time);
 
@@ -177,7 +177,7 @@
 
 		progress_time += sleep_time + load_time;
 
-		/* compare the avarage sleep/load cycles  */
+		/* compare the average sleep/load cycles  */
 		fprintf(config->output, "%li ",
 			powersave_time / config->cycles);
 		fprintf(config->output, "%.3f\n",
diff --git a/tools/power/cpupower/bench/parse.c b/tools/power/cpupower/bench/parse.c
index f503fb5..9b65f05 100644
--- a/tools/power/cpupower/bench/parse.c
+++ b/tools/power/cpupower/bench/parse.c
@@ -65,7 +65,7 @@
 {
 	FILE *output = NULL;
 	int len;
-	char *filename;
+	char *filename, *filename_tmp;
 	struct utsname sysdata;
 	DIR *dir;
 
@@ -81,16 +81,22 @@
 
 	len = strlen(dirname) + 30;
 	filename = malloc(sizeof(char) * len);
+	if (!filename) {
+		perror("malloc");
+		goto out_dir;
+	}
 
 	if (uname(&sysdata) == 0) {
 		len += strlen(sysdata.nodename) + strlen(sysdata.release);
-		filename = realloc(filename, sizeof(char) * len);
+		filename_tmp = realloc(filename, sizeof(*filename) * len);
 
-		if (filename == NULL) {
+		if (filename_tmp == NULL) {
+			free(filename);
 			perror("realloc");
-			return NULL;
+			goto out_dir;
 		}
 
+		filename = filename_tmp;
 		snprintf(filename, len - 1, "%s/benchmark_%s_%s_%li.log",
 			dirname, sysdata.nodename, sysdata.release, time(NULL));
 	} else {
@@ -104,12 +110,16 @@
 	if (output == NULL) {
 		perror("fopen");
 		fprintf(stderr, "error: unable to open logfile\n");
+		goto out;
 	}
 
 	fprintf(stdout, "Logfile: %s\n", filename);
 
-	free(filename);
 	fprintf(output, "#round load sleep performance powersave percentage\n");
+out:
+	free(filename);
+out_dir:
+	closedir(dir);
 	return output;
 }
 
diff --git a/tools/power/cpupower/bench/system.c b/tools/power/cpupower/bench/system.c
index f01e3f4..c25a74a 100644
--- a/tools/power/cpupower/bench/system.c
+++ b/tools/power/cpupower/bench/system.c
@@ -26,6 +26,7 @@
 #include <sched.h>
 
 #include <cpufreq.h>
+#include <cpupower.h>
 
 #include "config.h"
 #include "system.h"
@@ -60,7 +61,7 @@
 
 	dprintf("set %s as cpufreq governor\n", governor);
 
-	if (cpufreq_cpu_exists(cpu) != 0) {
+	if (cpupower_is_cpu_online(cpu) != 0) {
 		perror("cpufreq_cpu_exists");
 		fprintf(stderr, "error: cpu %u does not exist\n", cpu);
 		return -1;
diff --git a/tools/power/cpupower/lib/cpufreq.c b/tools/power/cpupower/lib/cpufreq.c
index d961101..1b993fe 100644
--- a/tools/power/cpupower/lib/cpufreq.c
+++ b/tools/power/cpupower/lib/cpufreq.c
@@ -9,28 +9,190 @@
 #include <errno.h>
 #include <stdlib.h>
 #include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
 
 #include "cpufreq.h"
-#include "sysfs.h"
+#include "cpupower_intern.h"
 
-int cpufreq_cpu_exists(unsigned int cpu)
+/* CPUFREQ sysfs access **************************************************/
+
+/* helper function to read file from /sys into given buffer */
+/* fname is a relative path under "cpuX/cpufreq" dir */
+static unsigned int sysfs_cpufreq_read_file(unsigned int cpu, const char *fname,
+					    char *buf, size_t buflen)
 {
-	return sysfs_cpu_exists(cpu);
+	char path[SYSFS_PATH_MAX];
+
+	snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpufreq/%s",
+			 cpu, fname);
+	return sysfs_read_file(path, buf, buflen);
 }
 
+/* helper function to write a new value to a /sys file */
+/* fname is a relative path under "cpuX/cpufreq" dir */
+static unsigned int sysfs_cpufreq_write_file(unsigned int cpu,
+					     const char *fname,
+					     const char *value, size_t len)
+{
+	char path[SYSFS_PATH_MAX];
+	int fd;
+	ssize_t numwrite;
+
+	snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpufreq/%s",
+			 cpu, fname);
+
+	fd = open(path, O_WRONLY);
+	if (fd == -1)
+		return 0;
+
+	numwrite = write(fd, value, len);
+	if (numwrite < 1) {
+		close(fd);
+		return 0;
+	}
+
+	close(fd);
+
+	return (unsigned int) numwrite;
+}
+
+/* read access to files which contain one numeric value */
+
+enum cpufreq_value {
+	CPUINFO_CUR_FREQ,
+	CPUINFO_MIN_FREQ,
+	CPUINFO_MAX_FREQ,
+	CPUINFO_LATENCY,
+	SCALING_CUR_FREQ,
+	SCALING_MIN_FREQ,
+	SCALING_MAX_FREQ,
+	STATS_NUM_TRANSITIONS,
+	MAX_CPUFREQ_VALUE_READ_FILES
+};
+
+static const char *cpufreq_value_files[MAX_CPUFREQ_VALUE_READ_FILES] = {
+	[CPUINFO_CUR_FREQ] = "cpuinfo_cur_freq",
+	[CPUINFO_MIN_FREQ] = "cpuinfo_min_freq",
+	[CPUINFO_MAX_FREQ] = "cpuinfo_max_freq",
+	[CPUINFO_LATENCY]  = "cpuinfo_transition_latency",
+	[SCALING_CUR_FREQ] = "scaling_cur_freq",
+	[SCALING_MIN_FREQ] = "scaling_min_freq",
+	[SCALING_MAX_FREQ] = "scaling_max_freq",
+	[STATS_NUM_TRANSITIONS] = "stats/total_trans"
+};
+
+
+static unsigned long sysfs_cpufreq_get_one_value(unsigned int cpu,
+						 enum cpufreq_value which)
+{
+	unsigned long value;
+	unsigned int len;
+	char linebuf[MAX_LINE_LEN];
+	char *endp;
+
+	if (which >= MAX_CPUFREQ_VALUE_READ_FILES)
+		return 0;
+
+	len = sysfs_cpufreq_read_file(cpu, cpufreq_value_files[which],
+				linebuf, sizeof(linebuf));
+
+	if (len == 0)
+		return 0;
+
+	value = strtoul(linebuf, &endp, 0);
+
+	if (endp == linebuf || errno == ERANGE)
+		return 0;
+
+	return value;
+}
+
+/* read access to files which contain one string */
+
+enum cpufreq_string {
+	SCALING_DRIVER,
+	SCALING_GOVERNOR,
+	MAX_CPUFREQ_STRING_FILES
+};
+
+static const char *cpufreq_string_files[MAX_CPUFREQ_STRING_FILES] = {
+	[SCALING_DRIVER] = "scaling_driver",
+	[SCALING_GOVERNOR] = "scaling_governor",
+};
+
+
+static char *sysfs_cpufreq_get_one_string(unsigned int cpu,
+					   enum cpufreq_string which)
+{
+	char linebuf[MAX_LINE_LEN];
+	char *result;
+	unsigned int len;
+
+	if (which >= MAX_CPUFREQ_STRING_FILES)
+		return NULL;
+
+	len = sysfs_cpufreq_read_file(cpu, cpufreq_string_files[which],
+				linebuf, sizeof(linebuf));
+	if (len == 0)
+		return NULL;
+
+	result = strdup(linebuf);
+	if (result == NULL)
+		return NULL;
+
+	if (result[strlen(result) - 1] == '\n')
+		result[strlen(result) - 1] = '\0';
+
+	return result;
+}
+
+/* write access */
+
+enum cpufreq_write {
+	WRITE_SCALING_MIN_FREQ,
+	WRITE_SCALING_MAX_FREQ,
+	WRITE_SCALING_GOVERNOR,
+	WRITE_SCALING_SET_SPEED,
+	MAX_CPUFREQ_WRITE_FILES
+};
+
+static const char *cpufreq_write_files[MAX_CPUFREQ_WRITE_FILES] = {
+	[WRITE_SCALING_MIN_FREQ] = "scaling_min_freq",
+	[WRITE_SCALING_MAX_FREQ] = "scaling_max_freq",
+	[WRITE_SCALING_GOVERNOR] = "scaling_governor",
+	[WRITE_SCALING_SET_SPEED] = "scaling_setspeed",
+};
+
+static int sysfs_cpufreq_write_one_value(unsigned int cpu,
+					 enum cpufreq_write which,
+					 const char *new_value, size_t len)
+{
+	if (which >= MAX_CPUFREQ_WRITE_FILES)
+		return 0;
+
+	if (sysfs_cpufreq_write_file(cpu, cpufreq_write_files[which],
+					new_value, len) != len)
+		return -ENODEV;
+
+	return 0;
+};
+
 unsigned long cpufreq_get_freq_kernel(unsigned int cpu)
 {
-	return sysfs_get_freq_kernel(cpu);
+	return sysfs_cpufreq_get_one_value(cpu, SCALING_CUR_FREQ);
 }
 
 unsigned long cpufreq_get_freq_hardware(unsigned int cpu)
 {
-	return sysfs_get_freq_hardware(cpu);
+	return sysfs_cpufreq_get_one_value(cpu, CPUINFO_CUR_FREQ);
 }
 
 unsigned long cpufreq_get_transition_latency(unsigned int cpu)
 {
-	return sysfs_get_freq_transition_latency(cpu);
+	return sysfs_cpufreq_get_one_value(cpu, CPUINFO_LATENCY);
 }
 
 int cpufreq_get_hardware_limits(unsigned int cpu,
@@ -39,12 +201,21 @@
 {
 	if ((!min) || (!max))
 		return -EINVAL;
-	return sysfs_get_freq_hardware_limits(cpu, min, max);
+
+	*min = sysfs_cpufreq_get_one_value(cpu, CPUINFO_MIN_FREQ);
+	if (!*min)
+		return -ENODEV;
+
+	*max = sysfs_cpufreq_get_one_value(cpu, CPUINFO_MAX_FREQ);
+	if (!*max)
+		return -ENODEV;
+
+	return 0;
 }
 
 char *cpufreq_get_driver(unsigned int cpu)
 {
-	return sysfs_get_freq_driver(cpu);
+	return sysfs_cpufreq_get_one_string(cpu, SCALING_DRIVER);
 }
 
 void cpufreq_put_driver(char *ptr)
@@ -56,7 +227,26 @@
 
 struct cpufreq_policy *cpufreq_get_policy(unsigned int cpu)
 {
-	return sysfs_get_freq_policy(cpu);
+	struct cpufreq_policy *policy;
+
+	policy = malloc(sizeof(struct cpufreq_policy));
+	if (!policy)
+		return NULL;
+
+	policy->governor = sysfs_cpufreq_get_one_string(cpu, SCALING_GOVERNOR);
+	if (!policy->governor) {
+		free(policy);
+		return NULL;
+	}
+	policy->min = sysfs_cpufreq_get_one_value(cpu, SCALING_MIN_FREQ);
+	policy->max = sysfs_cpufreq_get_one_value(cpu, SCALING_MAX_FREQ);
+	if ((!policy->min) || (!policy->max)) {
+		free(policy->governor);
+		free(policy);
+		return NULL;
+	}
+
+	return policy;
 }
 
 void cpufreq_put_policy(struct cpufreq_policy *policy)
@@ -72,7 +262,57 @@
 struct cpufreq_available_governors *cpufreq_get_available_governors(unsigned
 								int cpu)
 {
-	return sysfs_get_freq_available_governors(cpu);
+	struct cpufreq_available_governors *first = NULL;
+	struct cpufreq_available_governors *current = NULL;
+	char linebuf[MAX_LINE_LEN];
+	unsigned int pos, i;
+	unsigned int len;
+
+	len = sysfs_cpufreq_read_file(cpu, "scaling_available_governors",
+				linebuf, sizeof(linebuf));
+	if (len == 0)
+		return NULL;
+
+	pos = 0;
+	for (i = 0; i < len; i++) {
+		if (linebuf[i] == ' ' || linebuf[i] == '\n') {
+			if (i - pos < 2)
+				continue;
+			if (current) {
+				current->next = malloc(sizeof(*current));
+				if (!current->next)
+					goto error_out;
+				current = current->next;
+			} else {
+				first = malloc(sizeof(*first));
+				if (!first)
+					goto error_out;
+				current = first;
+			}
+			current->first = first;
+			current->next = NULL;
+
+			current->governor = malloc(i - pos + 1);
+			if (!current->governor)
+				goto error_out;
+
+			memcpy(current->governor, linebuf + pos, i - pos);
+			current->governor[i - pos] = '\0';
+			pos = i + 1;
+		}
+	}
+
+	return first;
+
+ error_out:
+	while (first) {
+		current = first->next;
+		if (first->governor)
+			free(first->governor);
+		free(first);
+		first = current;
+	}
+	return NULL;
 }
 
 void cpufreq_put_available_governors(struct cpufreq_available_governors *any)
@@ -96,7 +336,57 @@
 struct cpufreq_available_frequencies
 *cpufreq_get_available_frequencies(unsigned int cpu)
 {
-	return sysfs_get_available_frequencies(cpu);
+	struct cpufreq_available_frequencies *first = NULL;
+	struct cpufreq_available_frequencies *current = NULL;
+	char one_value[SYSFS_PATH_MAX];
+	char linebuf[MAX_LINE_LEN];
+	unsigned int pos, i;
+	unsigned int len;
+
+	len = sysfs_cpufreq_read_file(cpu, "scaling_available_frequencies",
+				linebuf, sizeof(linebuf));
+	if (len == 0)
+		return NULL;
+
+	pos = 0;
+	for (i = 0; i < len; i++) {
+		if (linebuf[i] == ' ' || linebuf[i] == '\n') {
+			if (i - pos < 2)
+				continue;
+			if (i - pos >= SYSFS_PATH_MAX)
+				goto error_out;
+			if (current) {
+				current->next = malloc(sizeof(*current));
+				if (!current->next)
+					goto error_out;
+				current = current->next;
+			} else {
+				first = malloc(sizeof(*first));
+				if (!first)
+					goto error_out;
+				current = first;
+			}
+			current->first = first;
+			current->next = NULL;
+
+			memcpy(one_value, linebuf + pos, i - pos);
+			one_value[i - pos] = '\0';
+			if (sscanf(one_value, "%lu", &current->frequency) != 1)
+				goto error_out;
+
+			pos = i + 1;
+		}
+	}
+
+	return first;
+
+ error_out:
+	while (first) {
+		current = first->next;
+		free(first);
+		first = current;
+	}
+	return NULL;
 }
 
 void cpufreq_put_available_frequencies(struct cpufreq_available_frequencies
@@ -114,10 +404,65 @@
 	}
 }
 
+static struct cpufreq_affected_cpus *sysfs_get_cpu_list(unsigned int cpu,
+							const char *file)
+{
+	struct cpufreq_affected_cpus *first = NULL;
+	struct cpufreq_affected_cpus *current = NULL;
+	char one_value[SYSFS_PATH_MAX];
+	char linebuf[MAX_LINE_LEN];
+	unsigned int pos, i;
+	unsigned int len;
+
+	len = sysfs_cpufreq_read_file(cpu, file, linebuf, sizeof(linebuf));
+	if (len == 0)
+		return NULL;
+
+	pos = 0;
+	for (i = 0; i < len; i++) {
+		if (i == len || linebuf[i] == ' ' || linebuf[i] == '\n') {
+			if (i - pos  < 1)
+				continue;
+			if (i - pos >= SYSFS_PATH_MAX)
+				goto error_out;
+			if (current) {
+				current->next = malloc(sizeof(*current));
+				if (!current->next)
+					goto error_out;
+				current = current->next;
+			} else {
+				first = malloc(sizeof(*first));
+				if (!first)
+					goto error_out;
+				current = first;
+			}
+			current->first = first;
+			current->next = NULL;
+
+			memcpy(one_value, linebuf + pos, i - pos);
+			one_value[i - pos] = '\0';
+
+			if (sscanf(one_value, "%u", &current->cpu) != 1)
+				goto error_out;
+
+			pos = i + 1;
+		}
+	}
+
+	return first;
+
+ error_out:
+	while (first) {
+		current = first->next;
+		free(first);
+		first = current;
+	}
+	return NULL;
+}
 
 struct cpufreq_affected_cpus *cpufreq_get_affected_cpus(unsigned int cpu)
 {
-	return sysfs_get_freq_affected_cpus(cpu);
+	return sysfs_get_cpu_list(cpu, "affected_cpus");
 }
 
 void cpufreq_put_affected_cpus(struct cpufreq_affected_cpus *any)
@@ -138,7 +483,7 @@
 
 struct cpufreq_affected_cpus *cpufreq_get_related_cpus(unsigned int cpu)
 {
-	return sysfs_get_freq_related_cpus(cpu);
+	return sysfs_get_cpu_list(cpu, "related_cpus");
 }
 
 void cpufreq_put_related_cpus(struct cpufreq_affected_cpus *any)
@@ -146,45 +491,208 @@
 	cpufreq_put_affected_cpus(any);
 }
 
+static int verify_gov(char *new_gov, char *passed_gov)
+{
+	unsigned int i, j = 0;
+
+	if (!passed_gov || (strlen(passed_gov) > 19))
+		return -EINVAL;
+
+	strncpy(new_gov, passed_gov, 20);
+	for (i = 0; i < 20; i++) {
+		if (j) {
+			new_gov[i] = '\0';
+			continue;
+		}
+		if ((new_gov[i] >= 'a') && (new_gov[i] <= 'z'))
+			continue;
+
+		if ((new_gov[i] >= 'A') && (new_gov[i] <= 'Z'))
+			continue;
+
+		if (new_gov[i] == '-')
+			continue;
+
+		if (new_gov[i] == '_')
+			continue;
+
+		if (new_gov[i] == '\0') {
+			j = 1;
+			continue;
+		}
+		return -EINVAL;
+	}
+	new_gov[19] = '\0';
+	return 0;
+}
 
 int cpufreq_set_policy(unsigned int cpu, struct cpufreq_policy *policy)
 {
+	char min[SYSFS_PATH_MAX];
+	char max[SYSFS_PATH_MAX];
+	char gov[SYSFS_PATH_MAX];
+	int ret;
+	unsigned long old_min;
+	int write_max_first;
+
 	if (!policy || !(policy->governor))
 		return -EINVAL;
 
-	return sysfs_set_freq_policy(cpu, policy);
+	if (policy->max < policy->min)
+		return -EINVAL;
+
+	if (verify_gov(gov, policy->governor))
+		return -EINVAL;
+
+	snprintf(min, SYSFS_PATH_MAX, "%lu", policy->min);
+	snprintf(max, SYSFS_PATH_MAX, "%lu", policy->max);
+
+	old_min = sysfs_cpufreq_get_one_value(cpu, SCALING_MIN_FREQ);
+	write_max_first = (old_min && (policy->max < old_min) ? 0 : 1);
+
+	if (write_max_first) {
+		ret = sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MAX_FREQ,
+						    max, strlen(max));
+		if (ret)
+			return ret;
+	}
+
+	ret = sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MIN_FREQ, min,
+					    strlen(min));
+	if (ret)
+		return ret;
+
+	if (!write_max_first) {
+		ret = sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MAX_FREQ,
+						    max, strlen(max));
+		if (ret)
+			return ret;
+	}
+
+	return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_GOVERNOR,
+					     gov, strlen(gov));
 }
 
 
 int cpufreq_modify_policy_min(unsigned int cpu, unsigned long min_freq)
 {
-	return sysfs_modify_freq_policy_min(cpu, min_freq);
+	char value[SYSFS_PATH_MAX];
+
+	snprintf(value, SYSFS_PATH_MAX, "%lu", min_freq);
+
+	return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MIN_FREQ,
+					     value, strlen(value));
 }
 
 
 int cpufreq_modify_policy_max(unsigned int cpu, unsigned long max_freq)
 {
-	return sysfs_modify_freq_policy_max(cpu, max_freq);
-}
+	char value[SYSFS_PATH_MAX];
 
+	snprintf(value, SYSFS_PATH_MAX, "%lu", max_freq);
+
+	return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MAX_FREQ,
+					     value, strlen(value));
+}
 
 int cpufreq_modify_policy_governor(unsigned int cpu, char *governor)
 {
+	char new_gov[SYSFS_PATH_MAX];
+
 	if ((!governor) || (strlen(governor) > 19))
 		return -EINVAL;
 
-	return sysfs_modify_freq_policy_governor(cpu, governor);
+	if (verify_gov(new_gov, governor))
+		return -EINVAL;
+
+	return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_GOVERNOR,
+					     new_gov, strlen(new_gov));
 }
 
 int cpufreq_set_frequency(unsigned int cpu, unsigned long target_frequency)
 {
-	return sysfs_set_frequency(cpu, target_frequency);
+	struct cpufreq_policy *pol = cpufreq_get_policy(cpu);
+	char userspace_gov[] = "userspace";
+	char freq[SYSFS_PATH_MAX];
+	int ret;
+
+	if (!pol)
+		return -ENODEV;
+
+	if (strncmp(pol->governor, userspace_gov, 9) != 0) {
+		ret = cpufreq_modify_policy_governor(cpu, userspace_gov);
+		if (ret) {
+			cpufreq_put_policy(pol);
+			return ret;
+		}
+	}
+
+	cpufreq_put_policy(pol);
+
+	snprintf(freq, SYSFS_PATH_MAX, "%lu", target_frequency);
+
+	return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_SET_SPEED,
+					     freq, strlen(freq));
 }
 
 struct cpufreq_stats *cpufreq_get_stats(unsigned int cpu,
 					unsigned long long *total_time)
 {
-	return sysfs_get_freq_stats(cpu, total_time);
+	struct cpufreq_stats *first = NULL;
+	struct cpufreq_stats *current = NULL;
+	char one_value[SYSFS_PATH_MAX];
+	char linebuf[MAX_LINE_LEN];
+	unsigned int pos, i;
+	unsigned int len;
+
+	len = sysfs_cpufreq_read_file(cpu, "stats/time_in_state",
+				linebuf, sizeof(linebuf));
+	if (len == 0)
+		return NULL;
+
+	*total_time = 0;
+	pos = 0;
+	for (i = 0; i < len; i++) {
+		if (i == strlen(linebuf) || linebuf[i] == '\n')	{
+			if (i - pos < 2)
+				continue;
+			if ((i - pos) >= SYSFS_PATH_MAX)
+				goto error_out;
+			if (current) {
+				current->next = malloc(sizeof(*current));
+				if (!current->next)
+					goto error_out;
+				current = current->next;
+			} else {
+				first = malloc(sizeof(*first));
+				if (!first)
+					goto error_out;
+				current = first;
+			}
+			current->first = first;
+			current->next = NULL;
+
+			memcpy(one_value, linebuf + pos, i - pos);
+			one_value[i - pos] = '\0';
+			if (sscanf(one_value, "%lu %llu",
+					&current->frequency,
+					&current->time_in_state) != 2)
+				goto error_out;
+
+			*total_time = *total_time + current->time_in_state;
+			pos = i + 1;
+		}
+	}
+
+	return first;
+
+ error_out:
+	while (first) {
+		current = first->next;
+		free(first);
+		first = current;
+	}
+	return NULL;
 }
 
 void cpufreq_put_stats(struct cpufreq_stats *any)
@@ -204,5 +712,5 @@
 
 unsigned long cpufreq_get_transitions(unsigned int cpu)
 {
-	return sysfs_get_freq_transitions(cpu);
+	return sysfs_cpufreq_get_one_value(cpu, STATS_NUM_TRANSITIONS);
 }
diff --git a/tools/power/cpupower/lib/cpufreq.h b/tools/power/cpupower/lib/cpufreq.h
index 3aae8e7..3b005c3 100644
--- a/tools/power/cpupower/lib/cpufreq.h
+++ b/tools/power/cpupower/lib/cpufreq.h
@@ -17,8 +17,8 @@
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
-#ifndef _CPUFREQ_H
-#define _CPUFREQ_H 1
+#ifndef __CPUPOWER_CPUFREQ_H__
+#define __CPUPOWER_CPUFREQ_H__
 
 struct cpufreq_policy {
 	unsigned long min;
@@ -58,13 +58,6 @@
 extern "C" {
 #endif
 
-/*
- * returns 0 if the specified CPU is present (it doesn't say
- * whether it is online!), and an error value if not.
- */
-
-extern int cpufreq_cpu_exists(unsigned int cpu);
-
 /* determine current CPU frequency
  * - _kernel variant means kernel's opinion of CPU frequency
  * - _hardware variant means actual hardware CPU frequency,
@@ -73,9 +66,9 @@
  * returns 0 on failure, else frequency in kHz.
  */
 
-extern unsigned long cpufreq_get_freq_kernel(unsigned int cpu);
+unsigned long cpufreq_get_freq_kernel(unsigned int cpu);
 
-extern unsigned long cpufreq_get_freq_hardware(unsigned int cpu);
+unsigned long cpufreq_get_freq_hardware(unsigned int cpu);
 
 #define cpufreq_get(cpu) cpufreq_get_freq_kernel(cpu);
 
@@ -84,7 +77,7 @@
  *
  * returns 0 on failure, else transition latency in 10^(-9) s = nanoseconds
  */
-extern unsigned long cpufreq_get_transition_latency(unsigned int cpu);
+unsigned long cpufreq_get_transition_latency(unsigned int cpu);
 
 
 /* determine hardware CPU frequency limits
@@ -93,7 +86,7 @@
  * considerations by cpufreq policy notifiers in the kernel.
  */
 
-extern int cpufreq_get_hardware_limits(unsigned int cpu,
+int cpufreq_get_hardware_limits(unsigned int cpu,
 				unsigned long *min,
 				unsigned long *max);
 
@@ -104,9 +97,9 @@
  * to avoid memory leakage, please.
  */
 
-extern char *cpufreq_get_driver(unsigned int cpu);
+char *cpufreq_get_driver(unsigned int cpu);
 
-extern void cpufreq_put_driver(char *ptr);
+void cpufreq_put_driver(char *ptr);
 
 
 /* determine CPUfreq policy currently used
@@ -116,9 +109,9 @@
  */
 
 
-extern struct cpufreq_policy *cpufreq_get_policy(unsigned int cpu);
+struct cpufreq_policy *cpufreq_get_policy(unsigned int cpu);
 
-extern void cpufreq_put_policy(struct cpufreq_policy *policy);
+void cpufreq_put_policy(struct cpufreq_policy *policy);
 
 
 /* determine CPUfreq governors currently available
@@ -129,10 +122,10 @@
  */
 
 
-extern struct cpufreq_available_governors
+struct cpufreq_available_governors
 *cpufreq_get_available_governors(unsigned int cpu);
 
-extern void cpufreq_put_available_governors(
+void cpufreq_put_available_governors(
 	struct cpufreq_available_governors *first);
 
 
@@ -143,10 +136,10 @@
  * cpufreq_put_available_frequencies after use.
  */
 
-extern struct cpufreq_available_frequencies
+struct cpufreq_available_frequencies
 *cpufreq_get_available_frequencies(unsigned int cpu);
 
-extern void cpufreq_put_available_frequencies(
+void cpufreq_put_available_frequencies(
 		struct cpufreq_available_frequencies *first);
 
 
@@ -156,10 +149,10 @@
  * to avoid memory leakage, please.
  */
 
-extern struct cpufreq_affected_cpus *cpufreq_get_affected_cpus(unsigned
+struct cpufreq_affected_cpus *cpufreq_get_affected_cpus(unsigned
 							int cpu);
 
-extern void cpufreq_put_affected_cpus(struct cpufreq_affected_cpus *first);
+void cpufreq_put_affected_cpus(struct cpufreq_affected_cpus *first);
 
 
 /* determine related CPUs
@@ -168,10 +161,10 @@
  * to avoid memory leakage, please.
  */
 
-extern struct cpufreq_affected_cpus *cpufreq_get_related_cpus(unsigned
+struct cpufreq_affected_cpus *cpufreq_get_related_cpus(unsigned
 							int cpu);
 
-extern void cpufreq_put_related_cpus(struct cpufreq_affected_cpus *first);
+void cpufreq_put_related_cpus(struct cpufreq_affected_cpus *first);
 
 
 /* determine stats for cpufreq subsystem
@@ -179,12 +172,12 @@
  * This is not available in all kernel versions or configurations.
  */
 
-extern struct cpufreq_stats *cpufreq_get_stats(unsigned int cpu,
+struct cpufreq_stats *cpufreq_get_stats(unsigned int cpu,
 					unsigned long long *total_time);
 
-extern void cpufreq_put_stats(struct cpufreq_stats *stats);
+void cpufreq_put_stats(struct cpufreq_stats *stats);
 
-extern unsigned long cpufreq_get_transitions(unsigned int cpu);
+unsigned long cpufreq_get_transitions(unsigned int cpu);
 
 
 /* set new cpufreq policy
@@ -193,7 +186,7 @@
  * but results may differ depending e.g. on governors being available.
  */
 
-extern int cpufreq_set_policy(unsigned int cpu, struct cpufreq_policy *policy);
+int cpufreq_set_policy(unsigned int cpu, struct cpufreq_policy *policy);
 
 
 /* modify a policy by only changing min/max freq or governor
@@ -201,9 +194,9 @@
  * Does not check whether result is what was intended.
  */
 
-extern int cpufreq_modify_policy_min(unsigned int cpu, unsigned long min_freq);
-extern int cpufreq_modify_policy_max(unsigned int cpu, unsigned long max_freq);
-extern int cpufreq_modify_policy_governor(unsigned int cpu, char *governor);
+int cpufreq_modify_policy_min(unsigned int cpu, unsigned long min_freq);
+int cpufreq_modify_policy_max(unsigned int cpu, unsigned long max_freq);
+int cpufreq_modify_policy_governor(unsigned int cpu, char *governor);
 
 
 /* set a specific frequency
@@ -213,7 +206,7 @@
  * occurs. Also does not work on ->range() cpufreq drivers.
  */
 
-extern int cpufreq_set_frequency(unsigned int cpu,
+int cpufreq_set_frequency(unsigned int cpu,
 				unsigned long target_frequency);
 
 #ifdef __cplusplus
diff --git a/tools/power/cpupower/lib/cpuidle.c b/tools/power/cpupower/lib/cpuidle.c
new file mode 100644
index 0000000..9bd4c76
--- /dev/null
+++ b/tools/power/cpupower/lib/cpuidle.c
@@ -0,0 +1,380 @@
+/*
+ *  (C) 2004-2009  Dominik Brodowski <linux@dominikbrodowski.de>
+ *  (C) 2011       Thomas Renninger <trenn@novell.com> Novell Inc.
+ *
+ *  Licensed under the terms of the GNU GPL License version 2.
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "cpuidle.h"
+#include "cpupower_intern.h"
+
+/*
+ * helper function to check whether a file under "../cpuX/cpuidle/stateX/" dir
+ * exists.
+ * For example the functionality to disable c-states was introduced in later
+ * kernel versions, this function can be used to explicitly check for this
+ * feature.
+ *
+ * returns 1 if the file exists, 0 otherwise.
+ */
+static
+unsigned int cpuidle_state_file_exists(unsigned int cpu,
+				       unsigned int idlestate,
+				       const char *fname)
+{
+	char path[SYSFS_PATH_MAX];
+	struct stat statbuf;
+
+
+	snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpuidle/state%u/%s",
+		 cpu, idlestate, fname);
+	if (stat(path, &statbuf) != 0)
+		return 0;
+	return 1;
+}
+
+/*
+ * helper function to read file from /sys into given buffer
+ * fname is a relative path under "cpuX/cpuidle/stateX/" dir
+ * cstates starting with 0, C0 is not counted as cstate.
+ * This means if you want C1 info, pass 0 as idlestate param
+ */
+static
+unsigned int cpuidle_state_read_file(unsigned int cpu,
+					    unsigned int idlestate,
+					    const char *fname, char *buf,
+					    size_t buflen)
+{
+	char path[SYSFS_PATH_MAX];
+	int fd;
+	ssize_t numread;
+
+	snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpuidle/state%u/%s",
+		 cpu, idlestate, fname);
+
+	fd = open(path, O_RDONLY);
+	if (fd == -1)
+		return 0;
+
+	numread = read(fd, buf, buflen - 1);
+	if (numread < 1) {
+		close(fd);
+		return 0;
+	}
+
+	buf[numread] = '\0';
+	close(fd);
+
+	return (unsigned int) numread;
+}
+
+/*
+ * helper function to write a new value to a /sys file
+ * fname is a relative path under "../cpuX/cpuidle/cstateY/" dir
+ *
+ * Returns the number of bytes written or 0 on error
+ */
+static
+unsigned int cpuidle_state_write_file(unsigned int cpu,
+				      unsigned int idlestate,
+				      const char *fname,
+				      const char *value, size_t len)
+{
+	char path[SYSFS_PATH_MAX];
+	int fd;
+	ssize_t numwrite;
+
+	snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpuidle/state%u/%s",
+		 cpu, idlestate, fname);
+
+	fd = open(path, O_WRONLY);
+	if (fd == -1)
+		return 0;
+
+	numwrite = write(fd, value, len);
+	if (numwrite < 1) {
+		close(fd);
+		return 0;
+	}
+
+	close(fd);
+
+	return (unsigned int) numwrite;
+}
+
+/* read access to files which contain one numeric value */
+
+enum idlestate_value {
+	IDLESTATE_USAGE,
+	IDLESTATE_POWER,
+	IDLESTATE_LATENCY,
+	IDLESTATE_TIME,
+	IDLESTATE_DISABLE,
+	MAX_IDLESTATE_VALUE_FILES
+};
+
+static const char *idlestate_value_files[MAX_IDLESTATE_VALUE_FILES] = {
+	[IDLESTATE_USAGE] = "usage",
+	[IDLESTATE_POWER] = "power",
+	[IDLESTATE_LATENCY] = "latency",
+	[IDLESTATE_TIME]  = "time",
+	[IDLESTATE_DISABLE]  = "disable",
+};
+
+static
+unsigned long long cpuidle_state_get_one_value(unsigned int cpu,
+					       unsigned int idlestate,
+					       enum idlestate_value which)
+{
+	unsigned long long value;
+	unsigned int len;
+	char linebuf[MAX_LINE_LEN];
+	char *endp;
+
+	if (which >= MAX_IDLESTATE_VALUE_FILES)
+		return 0;
+
+	len = cpuidle_state_read_file(cpu, idlestate,
+				      idlestate_value_files[which],
+				      linebuf, sizeof(linebuf));
+	if (len == 0)
+		return 0;
+
+	value = strtoull(linebuf, &endp, 0);
+
+	if (endp == linebuf || errno == ERANGE)
+		return 0;
+
+	return value;
+}
+
+/* read access to files which contain one string */
+
+enum idlestate_string {
+	IDLESTATE_DESC,
+	IDLESTATE_NAME,
+	MAX_IDLESTATE_STRING_FILES
+};
+
+static const char *idlestate_string_files[MAX_IDLESTATE_STRING_FILES] = {
+	[IDLESTATE_DESC] = "desc",
+	[IDLESTATE_NAME] = "name",
+};
+
+
+static char *cpuidle_state_get_one_string(unsigned int cpu,
+					unsigned int idlestate,
+					enum idlestate_string which)
+{
+	char linebuf[MAX_LINE_LEN];
+	char *result;
+	unsigned int len;
+
+	if (which >= MAX_IDLESTATE_STRING_FILES)
+		return NULL;
+
+	len = cpuidle_state_read_file(cpu, idlestate,
+				      idlestate_string_files[which],
+				      linebuf, sizeof(linebuf));
+	if (len == 0)
+		return NULL;
+
+	result = strdup(linebuf);
+	if (result == NULL)
+		return NULL;
+
+	if (result[strlen(result) - 1] == '\n')
+		result[strlen(result) - 1] = '\0';
+
+	return result;
+}
+
+/*
+ * Returns:
+ *    1  if disabled
+ *    0  if enabled
+ *    -1 if idlestate is not available
+ *    -2 if disabling is not supported by the kernel
+ */
+int cpuidle_is_state_disabled(unsigned int cpu,
+				unsigned int idlestate)
+{
+	if (cpuidle_state_count(cpu) <= idlestate)
+		return -1;
+
+	if (!cpuidle_state_file_exists(cpu, idlestate,
+				 idlestate_value_files[IDLESTATE_DISABLE]))
+		return -2;
+	return cpuidle_state_get_one_value(cpu, idlestate, IDLESTATE_DISABLE);
+}
+
+/*
+ * Pass 1 as last argument to disable or 0 to enable the state
+ * Returns:
+ *    0  on success
+ *    negative values on error, for example:
+ *      -1 if idlestate is not available
+ *      -2 if disabling is not supported by the kernel
+ *      -3 No write access to disable/enable C-states
+ */
+int cpuidle_state_disable(unsigned int cpu,
+			    unsigned int idlestate,
+			    unsigned int disable)
+{
+	char value[SYSFS_PATH_MAX];
+	int bytes_written;
+
+	if (cpuidle_state_count(cpu) <= idlestate)
+		return -1;
+
+	if (!cpuidle_state_file_exists(cpu, idlestate,
+				 idlestate_value_files[IDLESTATE_DISABLE]))
+		return -2;
+
+	snprintf(value, SYSFS_PATH_MAX, "%u", disable);
+
+	bytes_written = cpuidle_state_write_file(cpu, idlestate, "disable",
+						   value, sizeof(disable));
+	if (bytes_written)
+		return 0;
+	return -3;
+}
+
+unsigned long cpuidle_state_latency(unsigned int cpu,
+					  unsigned int idlestate)
+{
+	return cpuidle_state_get_one_value(cpu, idlestate, IDLESTATE_LATENCY);
+}
+
+unsigned long cpuidle_state_usage(unsigned int cpu,
+					unsigned int idlestate)
+{
+	return cpuidle_state_get_one_value(cpu, idlestate, IDLESTATE_USAGE);
+}
+
+unsigned long long cpuidle_state_time(unsigned int cpu,
+					unsigned int idlestate)
+{
+	return cpuidle_state_get_one_value(cpu, idlestate, IDLESTATE_TIME);
+}
+
+char *cpuidle_state_name(unsigned int cpu, unsigned int idlestate)
+{
+	return cpuidle_state_get_one_string(cpu, idlestate, IDLESTATE_NAME);
+}
+
+char *cpuidle_state_desc(unsigned int cpu, unsigned int idlestate)
+{
+	return cpuidle_state_get_one_string(cpu, idlestate, IDLESTATE_DESC);
+}
+
+/*
+ * Returns number of supported C-states of CPU core cpu
+ * Negativ in error case
+ * Zero if cpuidle does not export any C-states
+ */
+unsigned int cpuidle_state_count(unsigned int cpu)
+{
+	char file[SYSFS_PATH_MAX];
+	struct stat statbuf;
+	int idlestates = 1;
+
+
+	snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpuidle");
+	if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))
+		return 0;
+
+	snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/cpuidle/state0", cpu);
+	if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))
+		return 0;
+
+	while (stat(file, &statbuf) == 0 && S_ISDIR(statbuf.st_mode)) {
+		snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU
+			 "cpu%u/cpuidle/state%d", cpu, idlestates);
+		idlestates++;
+	}
+	idlestates--;
+	return idlestates;
+}
+
+/* CPUidle general /sys/devices/system/cpu/cpuidle/ sysfs access ********/
+
+/*
+ * helper function to read file from /sys into given buffer
+ * fname is a relative path under "cpu/cpuidle/" dir
+ */
+static unsigned int sysfs_cpuidle_read_file(const char *fname, char *buf,
+					    size_t buflen)
+{
+	char path[SYSFS_PATH_MAX];
+
+	snprintf(path, sizeof(path), PATH_TO_CPU "cpuidle/%s", fname);
+
+	return sysfs_read_file(path, buf, buflen);
+}
+
+
+
+/* read access to files which contain one string */
+
+enum cpuidle_string {
+	CPUIDLE_GOVERNOR,
+	CPUIDLE_GOVERNOR_RO,
+	CPUIDLE_DRIVER,
+	MAX_CPUIDLE_STRING_FILES
+};
+
+static const char *cpuidle_string_files[MAX_CPUIDLE_STRING_FILES] = {
+	[CPUIDLE_GOVERNOR]	= "current_governor",
+	[CPUIDLE_GOVERNOR_RO]	= "current_governor_ro",
+	[CPUIDLE_DRIVER]	= "current_driver",
+};
+
+
+static char *sysfs_cpuidle_get_one_string(enum cpuidle_string which)
+{
+	char linebuf[MAX_LINE_LEN];
+	char *result;
+	unsigned int len;
+
+	if (which >= MAX_CPUIDLE_STRING_FILES)
+		return NULL;
+
+	len = sysfs_cpuidle_read_file(cpuidle_string_files[which],
+				linebuf, sizeof(linebuf));
+	if (len == 0)
+		return NULL;
+
+	result = strdup(linebuf);
+	if (result == NULL)
+		return NULL;
+
+	if (result[strlen(result) - 1] == '\n')
+		result[strlen(result) - 1] = '\0';
+
+	return result;
+}
+
+char *cpuidle_get_governor(void)
+{
+	char *tmp = sysfs_cpuidle_get_one_string(CPUIDLE_GOVERNOR_RO);
+	if (!tmp)
+		return sysfs_cpuidle_get_one_string(CPUIDLE_GOVERNOR);
+	else
+		return tmp;
+}
+
+char *cpuidle_get_driver(void)
+{
+	return sysfs_cpuidle_get_one_string(CPUIDLE_DRIVER);
+}
+/* CPUidle idlestate specific /sys/devices/system/cpu/cpuX/cpuidle/ access */
diff --git a/tools/power/cpupower/lib/cpuidle.h b/tools/power/cpupower/lib/cpuidle.h
new file mode 100644
index 0000000..04eb3cf
--- /dev/null
+++ b/tools/power/cpupower/lib/cpuidle.h
@@ -0,0 +1,23 @@
+#ifndef __CPUPOWER_CPUIDLE_H__
+#define __CPUPOWER_CPUIDLE_H__
+
+int cpuidle_is_state_disabled(unsigned int cpu,
+				       unsigned int idlestate);
+int cpuidle_state_disable(unsigned int cpu, unsigned int idlestate,
+				   unsigned int disable);
+unsigned long cpuidle_state_latency(unsigned int cpu,
+						unsigned int idlestate);
+unsigned long cpuidle_state_usage(unsigned int cpu,
+					unsigned int idlestate);
+unsigned long long cpuidle_state_time(unsigned int cpu,
+						unsigned int idlestate);
+char *cpuidle_state_name(unsigned int cpu,
+				unsigned int idlestate);
+char *cpuidle_state_desc(unsigned int cpu,
+				unsigned int idlestate);
+unsigned int cpuidle_state_count(unsigned int cpu);
+
+char *cpuidle_get_governor(void);
+char *cpuidle_get_driver(void);
+
+#endif /* __CPUPOWER_HELPERS_SYSFS_H__ */
diff --git a/tools/power/cpupower/lib/cpupower.c b/tools/power/cpupower/lib/cpupower.c
new file mode 100644
index 0000000..9c395ec9
--- /dev/null
+++ b/tools/power/cpupower/lib/cpupower.c
@@ -0,0 +1,192 @@
+/*
+ *  (C) 2004-2009  Dominik Brodowski <linux@dominikbrodowski.de>
+ *
+ *  Licensed under the terms of the GNU GPL License version 2.
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdlib.h>
+
+#include "cpupower.h"
+#include "cpupower_intern.h"
+
+unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen)
+{
+	int fd;
+	ssize_t numread;
+
+	fd = open(path, O_RDONLY);
+	if (fd == -1)
+		return 0;
+
+	numread = read(fd, buf, buflen - 1);
+	if (numread < 1) {
+		close(fd);
+		return 0;
+	}
+
+	buf[numread] = '\0';
+	close(fd);
+
+	return (unsigned int) numread;
+}
+
+/*
+ * Detect whether a CPU is online
+ *
+ * Returns:
+ *     1 -> if CPU is online
+ *     0 -> if CPU is offline
+ *     negative errno values in error case
+ */
+int cpupower_is_cpu_online(unsigned int cpu)
+{
+	char path[SYSFS_PATH_MAX];
+	int fd;
+	ssize_t numread;
+	unsigned long long value;
+	char linebuf[MAX_LINE_LEN];
+	char *endp;
+	struct stat statbuf;
+
+	snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u", cpu);
+
+	if (stat(path, &statbuf) != 0)
+		return 0;
+
+	/*
+	 * kernel without CONFIG_HOTPLUG_CPU
+	 * -> cpuX directory exists, but not cpuX/online file
+	 */
+	snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/online", cpu);
+	if (stat(path, &statbuf) != 0)
+		return 1;
+
+	fd = open(path, O_RDONLY);
+	if (fd == -1)
+		return -errno;
+
+	numread = read(fd, linebuf, MAX_LINE_LEN - 1);
+	if (numread < 1) {
+		close(fd);
+		return -EIO;
+	}
+	linebuf[numread] = '\0';
+	close(fd);
+
+	value = strtoull(linebuf, &endp, 0);
+	if (value > 1)
+		return -EINVAL;
+
+	return value;
+}
+
+/* returns -1 on failure, 0 on success */
+static int sysfs_topology_read_file(unsigned int cpu, const char *fname, int *result)
+{
+	char linebuf[MAX_LINE_LEN];
+	char *endp;
+	char path[SYSFS_PATH_MAX];
+
+	snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/topology/%s",
+			 cpu, fname);
+	if (sysfs_read_file(path, linebuf, MAX_LINE_LEN) == 0)
+		return -1;
+	*result = strtol(linebuf, &endp, 0);
+	if (endp == linebuf || errno == ERANGE)
+		return -1;
+	return 0;
+}
+
+static int __compare(const void *t1, const void *t2)
+{
+	struct cpuid_core_info *top1 = (struct cpuid_core_info *)t1;
+	struct cpuid_core_info *top2 = (struct cpuid_core_info *)t2;
+	if (top1->pkg < top2->pkg)
+		return -1;
+	else if (top1->pkg > top2->pkg)
+		return 1;
+	else if (top1->core < top2->core)
+		return -1;
+	else if (top1->core > top2->core)
+		return 1;
+	else if (top1->cpu < top2->cpu)
+		return -1;
+	else if (top1->cpu > top2->cpu)
+		return 1;
+	else
+		return 0;
+}
+
+/*
+ * Returns amount of cpus, negative on error, cpu_top must be
+ * passed to cpu_topology_release to free resources
+ *
+ * Array is sorted after ->pkg, ->core, then ->cpu
+ */
+int get_cpu_topology(struct cpupower_topology *cpu_top)
+{
+	int cpu, last_pkg, cpus = sysconf(_SC_NPROCESSORS_CONF);
+
+	cpu_top->core_info = malloc(sizeof(struct cpuid_core_info) * cpus);
+	if (cpu_top->core_info == NULL)
+		return -ENOMEM;
+	cpu_top->pkgs = cpu_top->cores = 0;
+	for (cpu = 0; cpu < cpus; cpu++) {
+		cpu_top->core_info[cpu].cpu = cpu;
+		cpu_top->core_info[cpu].is_online = cpupower_is_cpu_online(cpu);
+		if(sysfs_topology_read_file(
+			cpu,
+			"physical_package_id",
+			&(cpu_top->core_info[cpu].pkg)) < 0) {
+			cpu_top->core_info[cpu].pkg = -1;
+			cpu_top->core_info[cpu].core = -1;
+			continue;
+		}
+		if(sysfs_topology_read_file(
+			cpu,
+			"core_id",
+			&(cpu_top->core_info[cpu].core)) < 0) {
+			cpu_top->core_info[cpu].pkg = -1;
+			cpu_top->core_info[cpu].core = -1;
+			continue;
+		}
+	}
+
+	qsort(cpu_top->core_info, cpus, sizeof(struct cpuid_core_info),
+	      __compare);
+
+	/* Count the number of distinct pkgs values. This works
+	   because the primary sort of the core_info struct was just
+	   done by pkg value. */
+	last_pkg = cpu_top->core_info[0].pkg;
+	for(cpu = 1; cpu < cpus; cpu++) {
+		if (cpu_top->core_info[cpu].pkg != last_pkg &&
+				cpu_top->core_info[cpu].pkg != -1) {
+
+			last_pkg = cpu_top->core_info[cpu].pkg;
+			cpu_top->pkgs++;
+		}
+	}
+	if (!(cpu_top->core_info[0].pkg == -1))
+		cpu_top->pkgs++;
+
+	/* Intel's cores count is not consecutively numbered, there may
+	 * be a core_id of 3, but none of 2. Assume there always is 0
+	 * Get amount of cores by counting duplicates in a package
+	for (cpu = 0; cpu_top->core_info[cpu].pkg = 0 && cpu < cpus; cpu++) {
+		if (cpu_top->core_info[cpu].core == 0)
+	cpu_top->cores++;
+	*/
+	return cpus;
+}
+
+void cpu_topology_release(struct cpupower_topology cpu_top)
+{
+	free(cpu_top.core_info);
+}
diff --git a/tools/power/cpupower/lib/cpupower.h b/tools/power/cpupower/lib/cpupower.h
new file mode 100644
index 0000000..fa031fc
--- /dev/null
+++ b/tools/power/cpupower/lib/cpupower.h
@@ -0,0 +1,35 @@
+#ifndef __CPUPOWER_CPUPOWER_H__
+#define __CPUPOWER_CPUPOWER_H__
+
+struct cpupower_topology {
+	/* Amount of CPU cores, packages and threads per core in the system */
+	unsigned int cores;
+	unsigned int pkgs;
+	unsigned int threads; /* per core */
+
+	/* Array gets mallocated with cores entries, holding per core info */
+	struct cpuid_core_info *core_info;
+};
+
+struct cpuid_core_info {
+	int pkg;
+	int core;
+	int cpu;
+
+	/* flags */
+	unsigned int is_online:1;
+};
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int get_cpu_topology(struct cpupower_topology *cpu_top);
+void cpu_topology_release(struct cpupower_topology cpu_top);
+int cpupower_is_cpu_online(unsigned int cpu);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/tools/power/cpupower/lib/cpupower_intern.h b/tools/power/cpupower/lib/cpupower_intern.h
new file mode 100644
index 0000000..f8ec400
--- /dev/null
+++ b/tools/power/cpupower/lib/cpupower_intern.h
@@ -0,0 +1,5 @@
+#define PATH_TO_CPU "/sys/devices/system/cpu/"
+#define MAX_LINE_LEN 4096
+#define SYSFS_PATH_MAX 255
+
+unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen);
diff --git a/tools/power/cpupower/lib/sysfs.c b/tools/power/cpupower/lib/sysfs.c
deleted file mode 100644
index 870713a..0000000
--- a/tools/power/cpupower/lib/sysfs.c
+++ /dev/null
@@ -1,672 +0,0 @@
-/*
- *  (C) 2004-2009  Dominik Brodowski <linux@dominikbrodowski.de>
- *
- *  Licensed under the terms of the GNU GPL License version 2.
- */
-
-#include <stdio.h>
-#include <errno.h>
-#include <stdlib.h>
-#include <string.h>
-#include <limits.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <unistd.h>
-
-#include "cpufreq.h"
-
-#define PATH_TO_CPU "/sys/devices/system/cpu/"
-#define MAX_LINE_LEN 4096
-#define SYSFS_PATH_MAX 255
-
-
-static unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen)
-{
-	int fd;
-	ssize_t numread;
-
-	fd = open(path, O_RDONLY);
-	if (fd == -1)
-		return 0;
-
-	numread = read(fd, buf, buflen - 1);
-	if (numread < 1) {
-		close(fd);
-		return 0;
-	}
-
-	buf[numread] = '\0';
-	close(fd);
-
-	return (unsigned int) numread;
-}
-
-
-/* CPUFREQ sysfs access **************************************************/
-
-/* helper function to read file from /sys into given buffer */
-/* fname is a relative path under "cpuX/cpufreq" dir */
-static unsigned int sysfs_cpufreq_read_file(unsigned int cpu, const char *fname,
-					    char *buf, size_t buflen)
-{
-	char path[SYSFS_PATH_MAX];
-
-	snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpufreq/%s",
-			 cpu, fname);
-	return sysfs_read_file(path, buf, buflen);
-}
-
-/* helper function to write a new value to a /sys file */
-/* fname is a relative path under "cpuX/cpufreq" dir */
-static unsigned int sysfs_cpufreq_write_file(unsigned int cpu,
-					     const char *fname,
-					     const char *value, size_t len)
-{
-	char path[SYSFS_PATH_MAX];
-	int fd;
-	ssize_t numwrite;
-
-	snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpufreq/%s",
-			 cpu, fname);
-
-	fd = open(path, O_WRONLY);
-	if (fd == -1)
-		return 0;
-
-	numwrite = write(fd, value, len);
-	if (numwrite < 1) {
-		close(fd);
-		return 0;
-	}
-
-	close(fd);
-
-	return (unsigned int) numwrite;
-}
-
-/* read access to files which contain one numeric value */
-
-enum cpufreq_value {
-	CPUINFO_CUR_FREQ,
-	CPUINFO_MIN_FREQ,
-	CPUINFO_MAX_FREQ,
-	CPUINFO_LATENCY,
-	SCALING_CUR_FREQ,
-	SCALING_MIN_FREQ,
-	SCALING_MAX_FREQ,
-	STATS_NUM_TRANSITIONS,
-	MAX_CPUFREQ_VALUE_READ_FILES
-};
-
-static const char *cpufreq_value_files[MAX_CPUFREQ_VALUE_READ_FILES] = {
-	[CPUINFO_CUR_FREQ] = "cpuinfo_cur_freq",
-	[CPUINFO_MIN_FREQ] = "cpuinfo_min_freq",
-	[CPUINFO_MAX_FREQ] = "cpuinfo_max_freq",
-	[CPUINFO_LATENCY]  = "cpuinfo_transition_latency",
-	[SCALING_CUR_FREQ] = "scaling_cur_freq",
-	[SCALING_MIN_FREQ] = "scaling_min_freq",
-	[SCALING_MAX_FREQ] = "scaling_max_freq",
-	[STATS_NUM_TRANSITIONS] = "stats/total_trans"
-};
-
-
-static unsigned long sysfs_cpufreq_get_one_value(unsigned int cpu,
-						 enum cpufreq_value which)
-{
-	unsigned long value;
-	unsigned int len;
-	char linebuf[MAX_LINE_LEN];
-	char *endp;
-
-	if (which >= MAX_CPUFREQ_VALUE_READ_FILES)
-		return 0;
-
-	len = sysfs_cpufreq_read_file(cpu, cpufreq_value_files[which],
-				linebuf, sizeof(linebuf));
-
-	if (len == 0)
-		return 0;
-
-	value = strtoul(linebuf, &endp, 0);
-
-	if (endp == linebuf || errno == ERANGE)
-		return 0;
-
-	return value;
-}
-
-/* read access to files which contain one string */
-
-enum cpufreq_string {
-	SCALING_DRIVER,
-	SCALING_GOVERNOR,
-	MAX_CPUFREQ_STRING_FILES
-};
-
-static const char *cpufreq_string_files[MAX_CPUFREQ_STRING_FILES] = {
-	[SCALING_DRIVER] = "scaling_driver",
-	[SCALING_GOVERNOR] = "scaling_governor",
-};
-
-
-static char *sysfs_cpufreq_get_one_string(unsigned int cpu,
-					   enum cpufreq_string which)
-{
-	char linebuf[MAX_LINE_LEN];
-	char *result;
-	unsigned int len;
-
-	if (which >= MAX_CPUFREQ_STRING_FILES)
-		return NULL;
-
-	len = sysfs_cpufreq_read_file(cpu, cpufreq_string_files[which],
-				linebuf, sizeof(linebuf));
-	if (len == 0)
-		return NULL;
-
-	result = strdup(linebuf);
-	if (result == NULL)
-		return NULL;
-
-	if (result[strlen(result) - 1] == '\n')
-		result[strlen(result) - 1] = '\0';
-
-	return result;
-}
-
-/* write access */
-
-enum cpufreq_write {
-	WRITE_SCALING_MIN_FREQ,
-	WRITE_SCALING_MAX_FREQ,
-	WRITE_SCALING_GOVERNOR,
-	WRITE_SCALING_SET_SPEED,
-	MAX_CPUFREQ_WRITE_FILES
-};
-
-static const char *cpufreq_write_files[MAX_CPUFREQ_WRITE_FILES] = {
-	[WRITE_SCALING_MIN_FREQ] = "scaling_min_freq",
-	[WRITE_SCALING_MAX_FREQ] = "scaling_max_freq",
-	[WRITE_SCALING_GOVERNOR] = "scaling_governor",
-	[WRITE_SCALING_SET_SPEED] = "scaling_setspeed",
-};
-
-static int sysfs_cpufreq_write_one_value(unsigned int cpu,
-					 enum cpufreq_write which,
-					 const char *new_value, size_t len)
-{
-	if (which >= MAX_CPUFREQ_WRITE_FILES)
-		return 0;
-
-	if (sysfs_cpufreq_write_file(cpu, cpufreq_write_files[which],
-					new_value, len) != len)
-		return -ENODEV;
-
-	return 0;
-};
-
-unsigned long sysfs_get_freq_kernel(unsigned int cpu)
-{
-	return sysfs_cpufreq_get_one_value(cpu, SCALING_CUR_FREQ);
-}
-
-unsigned long sysfs_get_freq_hardware(unsigned int cpu)
-{
-	return sysfs_cpufreq_get_one_value(cpu, CPUINFO_CUR_FREQ);
-}
-
-unsigned long sysfs_get_freq_transition_latency(unsigned int cpu)
-{
-	return sysfs_cpufreq_get_one_value(cpu, CPUINFO_LATENCY);
-}
-
-int sysfs_get_freq_hardware_limits(unsigned int cpu,
-			      unsigned long *min,
-			      unsigned long *max)
-{
-	if ((!min) || (!max))
-		return -EINVAL;
-
-	*min = sysfs_cpufreq_get_one_value(cpu, CPUINFO_MIN_FREQ);
-	if (!*min)
-		return -ENODEV;
-
-	*max = sysfs_cpufreq_get_one_value(cpu, CPUINFO_MAX_FREQ);
-	if (!*max)
-		return -ENODEV;
-
-	return 0;
-}
-
-char *sysfs_get_freq_driver(unsigned int cpu)
-{
-	return sysfs_cpufreq_get_one_string(cpu, SCALING_DRIVER);
-}
-
-struct cpufreq_policy *sysfs_get_freq_policy(unsigned int cpu)
-{
-	struct cpufreq_policy *policy;
-
-	policy = malloc(sizeof(struct cpufreq_policy));
-	if (!policy)
-		return NULL;
-
-	policy->governor = sysfs_cpufreq_get_one_string(cpu, SCALING_GOVERNOR);
-	if (!policy->governor) {
-		free(policy);
-		return NULL;
-	}
-	policy->min = sysfs_cpufreq_get_one_value(cpu, SCALING_MIN_FREQ);
-	policy->max = sysfs_cpufreq_get_one_value(cpu, SCALING_MAX_FREQ);
-	if ((!policy->min) || (!policy->max)) {
-		free(policy->governor);
-		free(policy);
-		return NULL;
-	}
-
-	return policy;
-}
-
-struct cpufreq_available_governors *
-sysfs_get_freq_available_governors(unsigned int cpu) {
-	struct cpufreq_available_governors *first = NULL;
-	struct cpufreq_available_governors *current = NULL;
-	char linebuf[MAX_LINE_LEN];
-	unsigned int pos, i;
-	unsigned int len;
-
-	len = sysfs_cpufreq_read_file(cpu, "scaling_available_governors",
-				linebuf, sizeof(linebuf));
-	if (len == 0)
-		return NULL;
-
-	pos = 0;
-	for (i = 0; i < len; i++) {
-		if (linebuf[i] == ' ' || linebuf[i] == '\n') {
-			if (i - pos < 2)
-				continue;
-			if (current) {
-				current->next = malloc(sizeof(*current));
-				if (!current->next)
-					goto error_out;
-				current = current->next;
-			} else {
-				first = malloc(sizeof(*first));
-				if (!first)
-					goto error_out;
-				current = first;
-			}
-			current->first = first;
-			current->next = NULL;
-
-			current->governor = malloc(i - pos + 1);
-			if (!current->governor)
-				goto error_out;
-
-			memcpy(current->governor, linebuf + pos, i - pos);
-			current->governor[i - pos] = '\0';
-			pos = i + 1;
-		}
-	}
-
-	return first;
-
- error_out:
-	while (first) {
-		current = first->next;
-		if (first->governor)
-			free(first->governor);
-		free(first);
-		first = current;
-	}
-	return NULL;
-}
-
-
-struct cpufreq_available_frequencies *
-sysfs_get_available_frequencies(unsigned int cpu) {
-	struct cpufreq_available_frequencies *first = NULL;
-	struct cpufreq_available_frequencies *current = NULL;
-	char one_value[SYSFS_PATH_MAX];
-	char linebuf[MAX_LINE_LEN];
-	unsigned int pos, i;
-	unsigned int len;
-
-	len = sysfs_cpufreq_read_file(cpu, "scaling_available_frequencies",
-				linebuf, sizeof(linebuf));
-	if (len == 0)
-		return NULL;
-
-	pos = 0;
-	for (i = 0; i < len; i++) {
-		if (linebuf[i] == ' ' || linebuf[i] == '\n') {
-			if (i - pos < 2)
-				continue;
-			if (i - pos >= SYSFS_PATH_MAX)
-				goto error_out;
-			if (current) {
-				current->next = malloc(sizeof(*current));
-				if (!current->next)
-					goto error_out;
-				current = current->next;
-			} else {
-				first = malloc(sizeof(*first));
-				if (!first)
-					goto error_out;
-				current = first;
-			}
-			current->first = first;
-			current->next = NULL;
-
-			memcpy(one_value, linebuf + pos, i - pos);
-			one_value[i - pos] = '\0';
-			if (sscanf(one_value, "%lu", &current->frequency) != 1)
-				goto error_out;
-
-			pos = i + 1;
-		}
-	}
-
-	return first;
-
- error_out:
-	while (first) {
-		current = first->next;
-		free(first);
-		first = current;
-	}
-	return NULL;
-}
-
-static struct cpufreq_affected_cpus *sysfs_get_cpu_list(unsigned int cpu,
-							const char *file)
-{
-	struct cpufreq_affected_cpus *first = NULL;
-	struct cpufreq_affected_cpus *current = NULL;
-	char one_value[SYSFS_PATH_MAX];
-	char linebuf[MAX_LINE_LEN];
-	unsigned int pos, i;
-	unsigned int len;
-
-	len = sysfs_cpufreq_read_file(cpu, file, linebuf, sizeof(linebuf));
-	if (len == 0)
-		return NULL;
-
-	pos = 0;
-	for (i = 0; i < len; i++) {
-		if (i == len || linebuf[i] == ' ' || linebuf[i] == '\n') {
-			if (i - pos  < 1)
-				continue;
-			if (i - pos >= SYSFS_PATH_MAX)
-				goto error_out;
-			if (current) {
-				current->next = malloc(sizeof(*current));
-				if (!current->next)
-					goto error_out;
-				current = current->next;
-			} else {
-				first = malloc(sizeof(*first));
-				if (!first)
-					goto error_out;
-				current = first;
-			}
-			current->first = first;
-			current->next = NULL;
-
-			memcpy(one_value, linebuf + pos, i - pos);
-			one_value[i - pos] = '\0';
-
-			if (sscanf(one_value, "%u", &current->cpu) != 1)
-				goto error_out;
-
-			pos = i + 1;
-		}
-	}
-
-	return first;
-
- error_out:
-	while (first) {
-		current = first->next;
-		free(first);
-		first = current;
-	}
-	return NULL;
-}
-
-struct cpufreq_affected_cpus *sysfs_get_freq_affected_cpus(unsigned int cpu)
-{
-	return sysfs_get_cpu_list(cpu, "affected_cpus");
-}
-
-struct cpufreq_affected_cpus *sysfs_get_freq_related_cpus(unsigned int cpu)
-{
-	return sysfs_get_cpu_list(cpu, "related_cpus");
-}
-
-struct cpufreq_stats *sysfs_get_freq_stats(unsigned int cpu,
-					unsigned long long *total_time) {
-	struct cpufreq_stats *first = NULL;
-	struct cpufreq_stats *current = NULL;
-	char one_value[SYSFS_PATH_MAX];
-	char linebuf[MAX_LINE_LEN];
-	unsigned int pos, i;
-	unsigned int len;
-
-	len = sysfs_cpufreq_read_file(cpu, "stats/time_in_state",
-				linebuf, sizeof(linebuf));
-	if (len == 0)
-		return NULL;
-
-	*total_time = 0;
-	pos = 0;
-	for (i = 0; i < len; i++) {
-		if (i == strlen(linebuf) || linebuf[i] == '\n')	{
-			if (i - pos < 2)
-				continue;
-			if ((i - pos) >= SYSFS_PATH_MAX)
-				goto error_out;
-			if (current) {
-				current->next = malloc(sizeof(*current));
-				if (!current->next)
-					goto error_out;
-				current = current->next;
-			} else {
-				first = malloc(sizeof(*first));
-				if (!first)
-					goto error_out;
-				current = first;
-			}
-			current->first = first;
-			current->next = NULL;
-
-			memcpy(one_value, linebuf + pos, i - pos);
-			one_value[i - pos] = '\0';
-			if (sscanf(one_value, "%lu %llu",
-					&current->frequency,
-					&current->time_in_state) != 2)
-				goto error_out;
-
-			*total_time = *total_time + current->time_in_state;
-			pos = i + 1;
-		}
-	}
-
-	return first;
-
- error_out:
-	while (first) {
-		current = first->next;
-		free(first);
-		first = current;
-	}
-	return NULL;
-}
-
-unsigned long sysfs_get_freq_transitions(unsigned int cpu)
-{
-	return sysfs_cpufreq_get_one_value(cpu, STATS_NUM_TRANSITIONS);
-}
-
-static int verify_gov(char *new_gov, char *passed_gov)
-{
-	unsigned int i, j = 0;
-
-	if (!passed_gov || (strlen(passed_gov) > 19))
-		return -EINVAL;
-
-	strncpy(new_gov, passed_gov, 20);
-	for (i = 0; i < 20; i++) {
-		if (j) {
-			new_gov[i] = '\0';
-			continue;
-		}
-		if ((new_gov[i] >= 'a') && (new_gov[i] <= 'z'))
-			continue;
-
-		if ((new_gov[i] >= 'A') && (new_gov[i] <= 'Z'))
-			continue;
-
-		if (new_gov[i] == '-')
-			continue;
-
-		if (new_gov[i] == '_')
-			continue;
-
-		if (new_gov[i] == '\0') {
-			j = 1;
-			continue;
-		}
-		return -EINVAL;
-	}
-	new_gov[19] = '\0';
-	return 0;
-}
-
-int sysfs_modify_freq_policy_governor(unsigned int cpu, char *governor)
-{
-	char new_gov[SYSFS_PATH_MAX];
-
-	if (!governor)
-		return -EINVAL;
-
-	if (verify_gov(new_gov, governor))
-		return -EINVAL;
-
-	return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_GOVERNOR,
-					     new_gov, strlen(new_gov));
-};
-
-int sysfs_modify_freq_policy_max(unsigned int cpu, unsigned long max_freq)
-{
-	char value[SYSFS_PATH_MAX];
-
-	snprintf(value, SYSFS_PATH_MAX, "%lu", max_freq);
-
-	return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MAX_FREQ,
-					     value, strlen(value));
-};
-
-
-int sysfs_modify_freq_policy_min(unsigned int cpu, unsigned long min_freq)
-{
-	char value[SYSFS_PATH_MAX];
-
-	snprintf(value, SYSFS_PATH_MAX, "%lu", min_freq);
-
-	return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MIN_FREQ,
-					     value, strlen(value));
-};
-
-
-int sysfs_set_freq_policy(unsigned int cpu, struct cpufreq_policy *policy)
-{
-	char min[SYSFS_PATH_MAX];
-	char max[SYSFS_PATH_MAX];
-	char gov[SYSFS_PATH_MAX];
-	int ret;
-	unsigned long old_min;
-	int write_max_first;
-
-	if (!policy || !(policy->governor))
-		return -EINVAL;
-
-	if (policy->max < policy->min)
-		return -EINVAL;
-
-	if (verify_gov(gov, policy->governor))
-		return -EINVAL;
-
-	snprintf(min, SYSFS_PATH_MAX, "%lu", policy->min);
-	snprintf(max, SYSFS_PATH_MAX, "%lu", policy->max);
-
-	old_min = sysfs_cpufreq_get_one_value(cpu, SCALING_MIN_FREQ);
-	write_max_first = (old_min && (policy->max < old_min) ? 0 : 1);
-
-	if (write_max_first) {
-		ret = sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MAX_FREQ,
-						    max, strlen(max));
-		if (ret)
-			return ret;
-	}
-
-	ret = sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MIN_FREQ, min,
-					    strlen(min));
-	if (ret)
-		return ret;
-
-	if (!write_max_first) {
-		ret = sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MAX_FREQ,
-						    max, strlen(max));
-		if (ret)
-			return ret;
-	}
-
-	return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_GOVERNOR,
-					     gov, strlen(gov));
-}
-
-int sysfs_set_frequency(unsigned int cpu, unsigned long target_frequency)
-{
-	struct cpufreq_policy *pol = sysfs_get_freq_policy(cpu);
-	char userspace_gov[] = "userspace";
-	char freq[SYSFS_PATH_MAX];
-	int ret;
-
-	if (!pol)
-		return -ENODEV;
-
-	if (strncmp(pol->governor, userspace_gov, 9) != 0) {
-		ret = sysfs_modify_freq_policy_governor(cpu, userspace_gov);
-		if (ret) {
-			cpufreq_put_policy(pol);
-			return ret;
-		}
-	}
-
-	cpufreq_put_policy(pol);
-
-	snprintf(freq, SYSFS_PATH_MAX, "%lu", target_frequency);
-
-	return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_SET_SPEED,
-					     freq, strlen(freq));
-}
-
-/* CPUFREQ sysfs access **************************************************/
-
-/* General sysfs access **************************************************/
-int sysfs_cpu_exists(unsigned int cpu)
-{
-	char file[SYSFS_PATH_MAX];
-	struct stat statbuf;
-
-	snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/", cpu);
-
-	if (stat(file, &statbuf) != 0)
-		return -ENOSYS;
-
-	return S_ISDIR(statbuf.st_mode) ? 0 : -ENOSYS;
-}
-
-/* General sysfs access **************************************************/
diff --git a/tools/power/cpupower/lib/sysfs.h b/tools/power/cpupower/lib/sysfs.h
deleted file mode 100644
index c76a5e0..0000000
--- a/tools/power/cpupower/lib/sysfs.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* General */
-extern unsigned int sysfs_cpu_exists(unsigned int cpu);
-
-/* CPUfreq */
-extern unsigned long sysfs_get_freq_kernel(unsigned int cpu);
-extern unsigned long sysfs_get_freq_hardware(unsigned int cpu);
-extern unsigned long sysfs_get_freq_transition_latency(unsigned int cpu);
-extern int sysfs_get_freq_hardware_limits(unsigned int cpu,
-					unsigned long *min, unsigned long *max);
-extern char *sysfs_get_freq_driver(unsigned int cpu);
-extern struct cpufreq_policy *sysfs_get_freq_policy(unsigned int cpu);
-extern struct cpufreq_available_governors *sysfs_get_freq_available_governors(
-	unsigned int cpu);
-extern struct cpufreq_available_frequencies *sysfs_get_available_frequencies(
-	unsigned int cpu);
-extern struct cpufreq_affected_cpus *sysfs_get_freq_affected_cpus(
-	unsigned int cpu);
-extern struct cpufreq_affected_cpus *sysfs_get_freq_related_cpus(
-	unsigned int cpu);
-extern struct cpufreq_stats *sysfs_get_freq_stats(unsigned int cpu,
-						unsigned long long *total_time);
-extern unsigned long sysfs_get_freq_transitions(unsigned int cpu);
-extern int sysfs_set_freq_policy(unsigned int cpu,
-				struct cpufreq_policy *policy);
-extern int sysfs_modify_freq_policy_min(unsigned int cpu,
-					unsigned long min_freq);
-extern int sysfs_modify_freq_policy_max(unsigned int cpu,
-					unsigned long max_freq);
-extern int sysfs_modify_freq_policy_governor(unsigned int cpu, char *governor);
-extern int sysfs_set_frequency(unsigned int cpu,
-			unsigned long target_frequency);
diff --git a/tools/power/cpupower/man/cpupower-frequency-info.1 b/tools/power/cpupower/man/cpupower-frequency-info.1
index 9c85a38..6aa8d23 100644
--- a/tools/power/cpupower/man/cpupower-frequency-info.1
+++ b/tools/power/cpupower/man/cpupower-frequency-info.1
@@ -1,7 +1,7 @@
 .TH "CPUPOWER\-FREQUENCY\-INFO" "1" "0.1" "" "cpupower Manual"
 .SH "NAME"
 .LP 
-cpupower frequency\-info \- Utility to retrieve cpufreq kernel information
+cpupower\-frequency\-info \- Utility to retrieve cpufreq kernel information
 .SH "SYNTAX"
 .LP 
 cpupower [ \-c cpulist ] frequency\-info [\fIoptions\fP]
diff --git a/tools/power/cpupower/man/cpupower-frequency-set.1 b/tools/power/cpupower/man/cpupower-frequency-set.1
index 3eacc8d..b505702 100644
--- a/tools/power/cpupower/man/cpupower-frequency-set.1
+++ b/tools/power/cpupower/man/cpupower-frequency-set.1
@@ -1,7 +1,7 @@
 .TH "CPUPOWER\-FREQUENCY\-SET" "1" "0.1" "" "cpupower Manual"
 .SH "NAME"
 .LP 
-cpupower frequency\-set \- A small tool which allows to modify cpufreq settings.
+cpupower\-frequency\-set \- A small tool which allows to modify cpufreq settings.
 .SH "SYNTAX"
 .LP 
 cpupower [ \-c cpu ] frequency\-set [\fIoptions\fP]
diff --git a/tools/power/cpupower/man/cpupower-idle-info.1 b/tools/power/cpupower/man/cpupower-idle-info.1
index 7b3646a..80a1311 100644
--- a/tools/power/cpupower/man/cpupower-idle-info.1
+++ b/tools/power/cpupower/man/cpupower-idle-info.1
@@ -1,7 +1,7 @@
 .TH "CPUPOWER-IDLE-INFO" "1" "0.1" "" "cpupower Manual"
 .SH "NAME"
 .LP
-cpupower idle\-info \- Utility to retrieve cpu idle kernel information
+cpupower\-idle\-info \- Utility to retrieve cpu idle kernel information
 .SH "SYNTAX"
 .LP
 cpupower [ \-c cpulist ] idle\-info [\fIoptions\fP]
diff --git a/tools/power/cpupower/man/cpupower-idle-set.1 b/tools/power/cpupower/man/cpupower-idle-set.1
index 580c4e3e..21916cf 100644
--- a/tools/power/cpupower/man/cpupower-idle-set.1
+++ b/tools/power/cpupower/man/cpupower-idle-set.1
@@ -1,7 +1,7 @@
 .TH "CPUPOWER-IDLE-SET" "1" "0.1" "" "cpupower Manual"
 .SH "NAME"
 .LP
-cpupower idle\-set \- Utility to set cpu idle state specific kernel options
+cpupower\-idle\-set \- Utility to set cpu idle state specific kernel options
 .SH "SYNTAX"
 .LP
 cpupower [ \-c cpulist ] idle\-info [\fIoptions\fP]
diff --git a/tools/power/cpupower/utils/cpufreq-set.c b/tools/power/cpupower/utils/cpufreq-set.c
index 0fbd1a2..b4bf769 100644
--- a/tools/power/cpupower/utils/cpufreq-set.c
+++ b/tools/power/cpupower/utils/cpufreq-set.c
@@ -16,8 +16,8 @@
 #include <getopt.h>
 
 #include "cpufreq.h"
+#include "cpuidle.h"
 #include "helpers/helpers.h"
-#include "helpers/sysfs.h"
 
 #define NORM_FREQ_LEN 32
 
@@ -296,7 +296,7 @@
 			struct cpufreq_affected_cpus *cpus;
 
 			if (!bitmask_isbitset(cpus_chosen, cpu) ||
-			    cpufreq_cpu_exists(cpu))
+			    cpupower_is_cpu_online(cpu))
 				continue;
 
 			cpus = cpufreq_get_related_cpus(cpu);
@@ -316,10 +316,10 @@
 	     cpu <= bitmask_last(cpus_chosen); cpu++) {
 
 		if (!bitmask_isbitset(cpus_chosen, cpu) ||
-		    cpufreq_cpu_exists(cpu))
+		    cpupower_is_cpu_online(cpu))
 			continue;
 
-		if (sysfs_is_cpu_online(cpu) != 1)
+		if (cpupower_is_cpu_online(cpu) != 1)
 			continue;
 
 		printf(_("Setting cpu: %d\n"), cpu);
diff --git a/tools/power/cpupower/utils/cpuidle-info.c b/tools/power/cpupower/utils/cpuidle-info.c
index 8bf8ab5..b59c85d 100644
--- a/tools/power/cpupower/utils/cpuidle-info.c
+++ b/tools/power/cpupower/utils/cpuidle-info.c
@@ -13,8 +13,10 @@
 #include <string.h>
 #include <getopt.h>
 
-#include "helpers/helpers.h"
+#include <cpuidle.h>
+
 #include "helpers/sysfs.h"
+#include "helpers/helpers.h"
 #include "helpers/bitmask.h"
 
 #define LINE_LEN 10
@@ -24,7 +26,7 @@
 	unsigned int idlestates, idlestate;
 	char *tmp;
 
-	idlestates = sysfs_get_idlestate_count(cpu);
+	idlestates = cpuidle_state_count(cpu);
 	if (idlestates == 0) {
 		printf(_("CPU %u: No idle states\n"), cpu);
 		return;
@@ -33,7 +35,7 @@
 	printf(_("Number of idle states: %d\n"), idlestates);
 	printf(_("Available idle states:"));
 	for (idlestate = 0; idlestate < idlestates; idlestate++) {
-		tmp = sysfs_get_idlestate_name(cpu, idlestate);
+		tmp = cpuidle_state_name(cpu, idlestate);
 		if (!tmp)
 			continue;
 		printf(" %s", tmp);
@@ -45,28 +47,28 @@
 		return;
 
 	for (idlestate = 0; idlestate < idlestates; idlestate++) {
-		int disabled = sysfs_is_idlestate_disabled(cpu, idlestate);
+		int disabled = cpuidle_is_state_disabled(cpu, idlestate);
 		/* Disabled interface not supported on older kernels */
 		if (disabled < 0)
 			disabled = 0;
-		tmp = sysfs_get_idlestate_name(cpu, idlestate);
+		tmp = cpuidle_state_name(cpu, idlestate);
 		if (!tmp)
 			continue;
 		printf("%s%s:\n", tmp, (disabled) ? " (DISABLED) " : "");
 		free(tmp);
 
-		tmp = sysfs_get_idlestate_desc(cpu, idlestate);
+		tmp = cpuidle_state_desc(cpu, idlestate);
 		if (!tmp)
 			continue;
 		printf(_("Flags/Description: %s\n"), tmp);
 		free(tmp);
 
 		printf(_("Latency: %lu\n"),
-		       sysfs_get_idlestate_latency(cpu, idlestate));
+		       cpuidle_state_latency(cpu, idlestate));
 		printf(_("Usage: %lu\n"),
-		       sysfs_get_idlestate_usage(cpu, idlestate));
+		       cpuidle_state_usage(cpu, idlestate));
 		printf(_("Duration: %llu\n"),
-		       sysfs_get_idlestate_time(cpu, idlestate));
+		       cpuidle_state_time(cpu, idlestate));
 	}
 }
 
@@ -74,7 +76,7 @@
 {
 	char *tmp;
 
-	tmp = sysfs_get_cpuidle_driver();
+	tmp = cpuidle_get_driver();
 	if (!tmp) {
 		printf(_("Could not determine cpuidle driver\n"));
 		return;
@@ -83,7 +85,7 @@
 	printf(_("CPUidle driver: %s\n"), tmp);
 	free(tmp);
 
-	tmp = sysfs_get_cpuidle_governor();
+	tmp = cpuidle_get_governor();
 	if (!tmp) {
 		printf(_("Could not determine cpuidle governor\n"));
 		return;
@@ -98,7 +100,7 @@
 	long max_allowed_cstate = 2000000000;
 	unsigned int cstate, cstates;
 
-	cstates = sysfs_get_idlestate_count(cpu);
+	cstates = cpuidle_state_count(cpu);
 	if (cstates == 0) {
 		printf(_("CPU %u: No C-states info\n"), cpu);
 		return;
@@ -113,11 +115,11 @@
 			 "type[C%d] "), cstate, cstate);
 		printf(_("promotion[--] demotion[--] "));
 		printf(_("latency[%03lu] "),
-		       sysfs_get_idlestate_latency(cpu, cstate));
+		       cpuidle_state_latency(cpu, cstate));
 		printf(_("usage[%08lu] "),
-		       sysfs_get_idlestate_usage(cpu, cstate));
+		       cpuidle_state_usage(cpu, cstate));
 		printf(_("duration[%020Lu] \n"),
-		       sysfs_get_idlestate_time(cpu, cstate));
+		       cpuidle_state_time(cpu, cstate));
 	}
 }
 
diff --git a/tools/power/cpupower/utils/cpuidle-set.c b/tools/power/cpupower/utils/cpuidle-set.c
index d6b6ae4..691c24d 100644
--- a/tools/power/cpupower/utils/cpuidle-set.c
+++ b/tools/power/cpupower/utils/cpuidle-set.c
@@ -5,12 +5,12 @@
 #include <limits.h>
 #include <string.h>
 #include <ctype.h>
-
 #include <getopt.h>
 
-#include "cpufreq.h"
+#include <cpufreq.h>
+#include <cpuidle.h>
+
 #include "helpers/helpers.h"
-#include "helpers/sysfs.h"
 
 static struct option info_opts[] = {
      {"disable",	required_argument,		NULL, 'd'},
@@ -104,16 +104,16 @@
 		if (!bitmask_isbitset(cpus_chosen, cpu))
 			continue;
 
-		if (sysfs_is_cpu_online(cpu) != 1)
+		if (cpupower_is_cpu_online(cpu) != 1)
 			continue;
 
-		idlestates = sysfs_get_idlestate_count(cpu);
+		idlestates = cpuidle_state_count(cpu);
 		if (idlestates <= 0)
 			continue;
 
 		switch (param) {
 		case 'd':
-			ret = sysfs_idlestate_disable(cpu, idlestate, 1);
+			ret = cpuidle_state_disable(cpu, idlestate, 1);
 			if (ret == 0)
 		printf(_("Idlestate %u disabled on CPU %u\n"),  idlestate, cpu);
 			else if (ret == -1)
@@ -126,7 +126,7 @@
 		       idlestate, cpu);
 			break;
 		case 'e':
-			ret = sysfs_idlestate_disable(cpu, idlestate, 0);
+			ret = cpuidle_state_disable(cpu, idlestate, 0);
 			if (ret == 0)
 		printf(_("Idlestate %u enabled on CPU %u\n"),  idlestate, cpu);
 			else if (ret == -1)
@@ -140,13 +140,13 @@
 			break;
 		case 'D':
 			for (idlestate = 0; idlestate < idlestates; idlestate++) {
-				disabled = sysfs_is_idlestate_disabled
+				disabled = cpuidle_is_state_disabled
 					(cpu, idlestate);
-				state_latency = sysfs_get_idlestate_latency
+				state_latency = cpuidle_state_latency
 					(cpu, idlestate);
 				if (disabled == 1) {
 					if (latency > state_latency){
-						ret = sysfs_idlestate_disable
+						ret = cpuidle_state_disable
 							(cpu, idlestate, 0);
 						if (ret == 0)
 		printf(_("Idlestate %u enabled on CPU %u\n"),  idlestate, cpu);
@@ -154,7 +154,7 @@
 					continue;
 				}
 				if (latency <= state_latency){
-					ret = sysfs_idlestate_disable
+					ret = cpuidle_state_disable
 						(cpu, idlestate, 1);
 					if (ret == 0)
 		printf(_("Idlestate %u disabled on CPU %u\n"), idlestate, cpu);
@@ -163,10 +163,10 @@
 			break;
 		case 'E':
 			for (idlestate = 0; idlestate < idlestates; idlestate++) {
-				disabled = sysfs_is_idlestate_disabled
+				disabled = cpuidle_is_state_disabled
 					(cpu, idlestate);
 				if (disabled == 1) {
-					ret = sysfs_idlestate_disable
+					ret = cpuidle_state_disable
 						(cpu, idlestate, 0);
 					if (ret == 0)
 		printf(_("Idlestate %u enabled on CPU %u\n"), idlestate, cpu);
diff --git a/tools/power/cpupower/utils/helpers/helpers.h b/tools/power/cpupower/utils/helpers/helpers.h
index aa9e954..afb66f8 100644
--- a/tools/power/cpupower/utils/helpers/helpers.h
+++ b/tools/power/cpupower/utils/helpers/helpers.h
@@ -14,6 +14,7 @@
 #include <locale.h>
 
 #include "helpers/bitmask.h"
+#include <cpupower.h>
 
 /* Internationalization ****************************/
 #ifdef NLS
@@ -92,31 +93,6 @@
 extern struct cpupower_cpu_info cpupower_cpu_info;
 /* cpuid and cpuinfo helpers  **************************/
 
-struct cpuid_core_info {
-	int pkg;
-	int core;
-	int cpu;
-
-	/* flags */
-	unsigned int is_online:1;
-};
-
-/* CPU topology/hierarchy parsing ******************/
-struct cpupower_topology {
-	/* Amount of CPU cores, packages and threads per core in the system */
-	unsigned int cores;
-	unsigned int pkgs;
-	unsigned int threads; /* per core */
-
-	/* Array gets mallocated with cores entries, holding per core info */
-	struct cpuid_core_info *core_info;
-};
-
-extern int get_cpu_topology(struct cpupower_topology *cpu_top);
-extern void cpu_topology_release(struct cpupower_topology cpu_top);
-
-/* CPU topology/hierarchy parsing ******************/
-
 /* X86 ONLY ****************************************/
 #if defined(__i386__) || defined(__x86_64__)
 
diff --git a/tools/power/cpupower/utils/helpers/topology.c b/tools/power/cpupower/utils/helpers/topology.c
index 5f9c908..a1a6c60 100644
--- a/tools/power/cpupower/utils/helpers/topology.c
+++ b/tools/power/cpupower/utils/helpers/topology.c
@@ -16,110 +16,7 @@
 #include <errno.h>
 #include <fcntl.h>
 
-#include <helpers/helpers.h>
-#include <helpers/sysfs.h>
+#include <cpuidle.h>
 
-/* returns -1 on failure, 0 on success */
-static int sysfs_topology_read_file(unsigned int cpu, const char *fname, int *result)
-{
-	char linebuf[MAX_LINE_LEN];
-	char *endp;
-	char path[SYSFS_PATH_MAX];
+/* CPU topology/hierarchy parsing ******************/
 
-	snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/topology/%s",
-			 cpu, fname);
-	if (sysfs_read_file(path, linebuf, MAX_LINE_LEN) == 0)
-		return -1;
-	*result = strtol(linebuf, &endp, 0);
-	if (endp == linebuf || errno == ERANGE)
-		return -1;
-	return 0;
-}
-
-static int __compare(const void *t1, const void *t2)
-{
-	struct cpuid_core_info *top1 = (struct cpuid_core_info *)t1;
-	struct cpuid_core_info *top2 = (struct cpuid_core_info *)t2;
-	if (top1->pkg < top2->pkg)
-		return -1;
-	else if (top1->pkg > top2->pkg)
-		return 1;
-	else if (top1->core < top2->core)
-		return -1;
-	else if (top1->core > top2->core)
-		return 1;
-	else if (top1->cpu < top2->cpu)
-		return -1;
-	else if (top1->cpu > top2->cpu)
-		return 1;
-	else
-		return 0;
-}
-
-/*
- * Returns amount of cpus, negative on error, cpu_top must be
- * passed to cpu_topology_release to free resources
- *
- * Array is sorted after ->pkg, ->core, then ->cpu
- */
-int get_cpu_topology(struct cpupower_topology *cpu_top)
-{
-	int cpu, last_pkg, cpus = sysconf(_SC_NPROCESSORS_CONF);
-
-	cpu_top->core_info = malloc(sizeof(struct cpuid_core_info) * cpus);
-	if (cpu_top->core_info == NULL)
-		return -ENOMEM;
-	cpu_top->pkgs = cpu_top->cores = 0;
-	for (cpu = 0; cpu < cpus; cpu++) {
-		cpu_top->core_info[cpu].cpu = cpu;
-		cpu_top->core_info[cpu].is_online = sysfs_is_cpu_online(cpu);
-		if(sysfs_topology_read_file(
-			cpu,
-			"physical_package_id",
-			&(cpu_top->core_info[cpu].pkg)) < 0) {
-			cpu_top->core_info[cpu].pkg = -1;
-			cpu_top->core_info[cpu].core = -1;
-			continue;
-		}
-		if(sysfs_topology_read_file(
-			cpu,
-			"core_id",
-			&(cpu_top->core_info[cpu].core)) < 0) {
-			cpu_top->core_info[cpu].pkg = -1;
-			cpu_top->core_info[cpu].core = -1;
-			continue;
-		}
-	}
-
-	qsort(cpu_top->core_info, cpus, sizeof(struct cpuid_core_info),
-	      __compare);
-
-	/* Count the number of distinct pkgs values. This works
-	   because the primary sort of the core_info struct was just
-	   done by pkg value. */
-	last_pkg = cpu_top->core_info[0].pkg;
-	for(cpu = 1; cpu < cpus; cpu++) {
-		if (cpu_top->core_info[cpu].pkg != last_pkg &&
-				cpu_top->core_info[cpu].pkg != -1) {
-
-			last_pkg = cpu_top->core_info[cpu].pkg;
-			cpu_top->pkgs++;
-		}
-	}
-	if (!(cpu_top->core_info[0].pkg == -1))
-		cpu_top->pkgs++;
-
-	/* Intel's cores count is not consecutively numbered, there may
-	 * be a core_id of 3, but none of 2. Assume there always is 0
-	 * Get amount of cores by counting duplicates in a package
-	for (cpu = 0; cpu_top->core_info[cpu].pkg = 0 && cpu < cpus; cpu++) {
-		if (cpu_top->core_info[cpu].core == 0)
-	cpu_top->cores++;
-	*/
-	return cpus;
-}
-
-void cpu_topology_release(struct cpupower_topology cpu_top)
-{
-	free(cpu_top.core_info);
-}
diff --git a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
index bcd22a1..1b5da00 100644
--- a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
+++ b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
@@ -10,8 +10,8 @@
 #include <stdint.h>
 #include <string.h>
 #include <limits.h>
+#include <cpuidle.h>
 
-#include "helpers/sysfs.h"
 #include "helpers/helpers.h"
 #include "idle_monitor/cpupower-monitor.h"
 
@@ -51,7 +51,7 @@
 		for (state = 0; state < cpuidle_sysfs_monitor.hw_states_num;
 		     state++) {
 			previous_count[cpu][state] =
-				sysfs_get_idlestate_time(cpu, state);
+				cpuidle_state_time(cpu, state);
 			dprint("CPU %d - State: %d - Val: %llu\n",
 			       cpu, state, previous_count[cpu][state]);
 		}
@@ -70,7 +70,7 @@
 		for (state = 0; state < cpuidle_sysfs_monitor.hw_states_num;
 		     state++) {
 			current_count[cpu][state] =
-				sysfs_get_idlestate_time(cpu, state);
+				cpuidle_state_time(cpu, state);
 			dprint("CPU %d - State: %d - Val: %llu\n",
 			       cpu, state, previous_count[cpu][state]);
 		}
@@ -132,13 +132,13 @@
 	char *tmp;
 
 	/* Assume idle state count is the same for all CPUs */
-	cpuidle_sysfs_monitor.hw_states_num = sysfs_get_idlestate_count(0);
+	cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(0);
 
 	if (cpuidle_sysfs_monitor.hw_states_num <= 0)
 		return NULL;
 
 	for (num = 0; num < cpuidle_sysfs_monitor.hw_states_num; num++) {
-		tmp = sysfs_get_idlestate_name(0, num);
+		tmp = cpuidle_state_name(0, num);
 		if (tmp == NULL)
 			continue;
 
@@ -146,7 +146,7 @@
 		strncpy(cpuidle_cstates[num].name, tmp, CSTATE_NAME_LEN - 1);
 		free(tmp);
 
-		tmp = sysfs_get_idlestate_desc(0, num);
+		tmp = cpuidle_state_desc(0, num);
 		if (tmp == NULL)
 			continue;
 		strncpy(cpuidle_cstates[num].desc, tmp,	CSTATE_DESC_LEN - 1);
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 20a257a..acbf7ff 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -66,6 +66,8 @@
 unsigned int use_c1_residency_msr;
 unsigned int has_aperf;
 unsigned int has_epb;
+unsigned int do_irtl_snb;
+unsigned int do_irtl_hsw;
 unsigned int units = 1000000;	/* MHz etc */
 unsigned int genuine_intel;
 unsigned int has_invariant_tsc;
@@ -187,7 +189,7 @@
 	unsigned long long pkg_any_core_c0;
 	unsigned long long pkg_any_gfxe_c0;
 	unsigned long long pkg_both_core_gfxe_c0;
-	unsigned long long gfx_rc6_ms;
+	long long gfx_rc6_ms;
 	unsigned int gfx_mhz;
 	unsigned int package_id;
 	unsigned int energy_pkg;	/* MSR_PKG_ENERGY_STATUS */
@@ -621,8 +623,14 @@
 		outp += sprintf(outp, "%8d", p->pkg_temp_c);
 
 	/* GFXrc6 */
-	if (do_gfx_rc6_ms)
-		outp += sprintf(outp, "%8.2f", 100.0 * p->gfx_rc6_ms / 1000.0 / interval_float);
+	if (do_gfx_rc6_ms) {
+		if (p->gfx_rc6_ms == -1) {	/* detect counter reset */
+			outp += sprintf(outp, "  ***.**");
+		} else {
+			outp += sprintf(outp, "%8.2f",
+				p->gfx_rc6_ms / 10.0 / interval_float);
+		}
+	}
 
 	/* GFXMHz */
 	if (do_gfx_mhz)
@@ -766,7 +774,12 @@
 	old->pc10 = new->pc10 - old->pc10;
 	old->pkg_temp_c = new->pkg_temp_c;
 
-	old->gfx_rc6_ms = new->gfx_rc6_ms - old->gfx_rc6_ms;
+	/* flag an error when rc6 counter resets/wraps */
+	if (old->gfx_rc6_ms >  new->gfx_rc6_ms)
+		old->gfx_rc6_ms = -1;
+	else
+		old->gfx_rc6_ms = new->gfx_rc6_ms - old->gfx_rc6_ms;
+
 	old->gfx_mhz = new->gfx_mhz;
 
 	DELTA_WRAP32(new->energy_pkg, old->energy_pkg);
@@ -1296,6 +1309,7 @@
 int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
 int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
 int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
+int bxt_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
 
 
 static void
@@ -1579,6 +1593,47 @@
 	fprintf(outf, " lock=%d", (unsigned int)(msr >> 31) & 1);
 	fprintf(outf, ")\n");
 }
+
+unsigned int irtl_time_units[] = {1, 32, 1024, 32768, 1048576, 33554432, 0, 0 };
+
+void print_irtl(void)
+{
+	unsigned long long msr;
+
+	get_msr(base_cpu, MSR_PKGC3_IRTL, &msr);
+	fprintf(outf, "cpu%d: MSR_PKGC3_IRTL: 0x%08llx (", base_cpu, msr);
+	fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
+		(msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
+
+	get_msr(base_cpu, MSR_PKGC6_IRTL, &msr);
+	fprintf(outf, "cpu%d: MSR_PKGC6_IRTL: 0x%08llx (", base_cpu, msr);
+	fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
+		(msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
+
+	get_msr(base_cpu, MSR_PKGC7_IRTL, &msr);
+	fprintf(outf, "cpu%d: MSR_PKGC7_IRTL: 0x%08llx (", base_cpu, msr);
+	fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
+		(msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
+
+	if (!do_irtl_hsw)
+		return;
+
+	get_msr(base_cpu, MSR_PKGC8_IRTL, &msr);
+	fprintf(outf, "cpu%d: MSR_PKGC8_IRTL: 0x%08llx (", base_cpu, msr);
+	fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
+		(msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
+
+	get_msr(base_cpu, MSR_PKGC9_IRTL, &msr);
+	fprintf(outf, "cpu%d: MSR_PKGC9_IRTL: 0x%08llx (", base_cpu, msr);
+	fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
+		(msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
+
+	get_msr(base_cpu, MSR_PKGC10_IRTL, &msr);
+	fprintf(outf, "cpu%d: MSR_PKGC10_IRTL: 0x%08llx (", base_cpu, msr);
+	fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT",
+		(msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]);
+
+}
 void free_fd_percpu(void)
 {
 	int i;
@@ -2144,6 +2199,9 @@
 	case 0x56:	/* BDX-DE */
 	case 0x4E:	/* SKL */
 	case 0x5E:	/* SKL */
+	case 0x8E:	/* KBL */
+	case 0x9E:	/* KBL */
+	case 0x55:	/* SKX */
 		pkg_cstate_limits = hsw_pkg_cstate_limits;
 		break;
 	case 0x37:	/* BYT */
@@ -2156,6 +2214,9 @@
 	case 0x57:	/* PHI */
 		pkg_cstate_limits = phi_pkg_cstate_limits;
 		break;
+	case 0x5C:	/* BXT */
+		pkg_cstate_limits = bxt_pkg_cstate_limits;
+		break;
 	default:
 		return 0;
 	}
@@ -2248,6 +2309,9 @@
 	case 0x56:	/* BDX-DE */
 	case 0x4E:	/* SKL */
 	case 0x5E:	/* SKL */
+	case 0x8E:	/* KBL */
+	case 0x9E:	/* KBL */
+	case 0x55:	/* SKX */
 
 	case 0x57:	/* Knights Landing */
 		return 1;
@@ -2585,13 +2649,19 @@
 	case 0x47:	/* BDW */
 		do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
 		break;
+	case 0x5C:	/* BXT */
+		do_rapl = RAPL_PKG | RAPL_PKG_POWER_INFO;
+		break;
 	case 0x4E:	/* SKL */
 	case 0x5E:	/* SKL */
+	case 0x8E:	/* KBL */
+	case 0x9E:	/* KBL */
 		do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
 		break;
 	case 0x3F:	/* HSX */
 	case 0x4F:	/* BDX */
 	case 0x56:	/* BDX-DE */
+	case 0x55:	/* SKX */
 	case 0x57:	/* KNL */
 		do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
 		break;
@@ -2871,6 +2941,10 @@
 	case 0x56:	/* BDX-DE */
 	case 0x4E:	/* SKL */
 	case 0x5E:	/* SKL */
+	case 0x8E:	/* KBL */
+	case 0x9E:	/* KBL */
+	case 0x55:	/* SKX */
+	case 0x5C:	/* BXT */
 		return 1;
 	}
 	return 0;
@@ -2879,9 +2953,14 @@
 /*
  * HSW adds support for additional MSRs:
  *
- * MSR_PKG_C8_RESIDENCY            0x00000630
- * MSR_PKG_C9_RESIDENCY            0x00000631
- * MSR_PKG_C10_RESIDENCY           0x00000632
+ * MSR_PKG_C8_RESIDENCY		0x00000630
+ * MSR_PKG_C9_RESIDENCY		0x00000631
+ * MSR_PKG_C10_RESIDENCY	0x00000632
+ *
+ * MSR_PKGC8_IRTL		0x00000633
+ * MSR_PKGC9_IRTL		0x00000634
+ * MSR_PKGC10_IRTL		0x00000635
+ *
  */
 int has_hsw_msrs(unsigned int family, unsigned int model)
 {
@@ -2893,6 +2972,9 @@
 	case 0x3D:	/* BDW */
 	case 0x4E:	/* SKL */
 	case 0x5E:	/* SKL */
+	case 0x8E:	/* KBL */
+	case 0x9E:	/* KBL */
+	case 0x5C:	/* BXT */
 		return 1;
 	}
 	return 0;
@@ -2914,6 +2996,8 @@
 	switch (model) {
 	case 0x4E:	/* SKL */
 	case 0x5E:	/* SKL */
+	case 0x8E:	/* KBL */
+	case 0x9E:	/* KBL */
 		return 1;
 	}
 	return 0;
@@ -3187,7 +3271,7 @@
 	if (debug)
 		decode_misc_enable_msr();
 
-	if (max_level >= 0x7) {
+	if (max_level >= 0x7 && debug) {
 		int has_sgx;
 
 		ecx = 0;
@@ -3221,7 +3305,15 @@
 				switch(model) {
 				case 0x4E:	/* SKL */
 				case 0x5E:	/* SKL */
-					crystal_hz = 24000000;	/* 24 MHz */
+				case 0x8E:	/* KBL */
+				case 0x9E:	/* KBL */
+					crystal_hz = 24000000;	/* 24.0 MHz */
+					break;
+				case 0x55:	/* SKX */
+					crystal_hz = 25000000;	/* 25.0 MHz */
+					break;
+				case 0x5C:	/* BXT */
+					crystal_hz = 19200000;	/* 19.2 MHz */
 					break;
 				default:
 					crystal_hz = 0;
@@ -3254,11 +3346,13 @@
 
 	do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model);
 	do_snb_cstates = has_snb_msrs(family, model);
+	do_irtl_snb = has_snb_msrs(family, model);
 	do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2);
 	do_pc3 = (pkg_cstate_limit >= PCL__3);
 	do_pc6 = (pkg_cstate_limit >= PCL__6);
 	do_pc7 = do_snb_cstates && (pkg_cstate_limit >= PCL__7);
 	do_c8_c9_c10 = has_hsw_msrs(family, model);
+	do_irtl_hsw = has_hsw_msrs(family, model);
 	do_skl_residency = has_skl_msrs(family, model);
 	do_slm_cstates = is_slm(family, model);
 	do_knl_cstates  = is_knl(family, model);
@@ -3564,6 +3658,9 @@
 
 	if (debug)
 		for_all_cpus(print_thermal, ODD_COUNTERS);
+
+	if (debug && do_irtl_snb)
+		print_irtl();
 }
 
 int fork_it(char **argv)
@@ -3629,7 +3726,7 @@
 }
 
 void print_version() {
-	fprintf(outf, "turbostat version 4.11 27 Feb 2016"
+	fprintf(outf, "turbostat version 4.12 5 Apr 2016"
 		" - Len Brown <lenb@kernel.org>\n");
 }
 
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index b04afc3..ff9e5f2 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -19,6 +19,7 @@
 TARGETS += pstore
 TARGETS += ptrace
 TARGETS += seccomp
+TARGETS += sigaltstack
 TARGETS += size
 TARGETS += static_keys
 TARGETS += sysctl
diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions
index 5d8cd06..c37262f 100644
--- a/tools/testing/selftests/ftrace/test.d/functions
+++ b/tools/testing/selftests/ftrace/test.d/functions
@@ -14,3 +14,12 @@
 reset_tracer() { # reset the current tracer
     echo nop > current_tracer
 }
+
+reset_trigger() { # reset all current setting triggers
+    grep -v ^# events/*/*/trigger |
+    while read line; do
+        cmd=`echo $line | cut -f2- -d: | cut -f1 -d" "`
+	echo "!$cmd" > `echo $line | cut -f1 -d:`
+    done
+}
+
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-eventonoff.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-eventonoff.tc
new file mode 100644
index 0000000..1a94450
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-eventonoff.tc
@@ -0,0 +1,64 @@
+#!/bin/sh
+# description: event trigger - test event enable/disable trigger
+
+do_reset() {
+    reset_trigger
+    echo > set_event
+    clear_trace
+}
+
+fail() { #msg
+    do_reset
+    echo $1
+    exit $FAIL
+}
+
+if [ ! -f set_event -o ! -d events/sched ]; then
+    echo "event tracing is not supported"
+    exit_unsupported
+fi
+
+if [ ! -f events/sched/sched_process_fork/trigger ]; then
+    echo "event trigger is not supported"
+    exit_unsupported
+fi
+
+reset_tracer
+do_reset
+
+FEATURE=`grep enable_event events/sched/sched_process_fork/trigger`
+if [ -z "$FEATURE" ]; then
+    echo "event enable/disable trigger is not supported"
+    exit_unsupported
+fi
+
+echo "Test enable_event trigger"
+echo 0 > events/sched/sched_switch/enable
+echo 'enable_event:sched:sched_switch' > events/sched/sched_process_fork/trigger
+( echo "forked")
+if [ `cat events/sched/sched_switch/enable` != '1*' ]; then
+    fail "enable_event trigger on sched_process_fork did not work"
+fi
+
+reset_trigger
+
+echo "Test disable_event trigger"
+echo 1 > events/sched/sched_switch/enable
+echo 'disable_event:sched:sched_switch' > events/sched/sched_process_fork/trigger
+( echo "forked")
+if [ `cat events/sched/sched_switch/enable` != '0*' ]; then
+    fail "disable_event trigger on sched_process_fork did not work"
+fi
+
+reset_trigger
+
+echo "Test semantic error for event enable/disable trigger"
+! echo 'enable_event:nogroup:noevent' > events/sched/sched_process_fork/trigger
+! echo 'disable_event+1' > events/sched/sched_process_fork/trigger
+echo 'enable_event:sched:sched_switch' > events/sched/sched_process_fork/trigger
+! echo 'enable_event:sched:sched_switch' > events/sched/sched_process_fork/trigger
+! echo 'disable_event:sched:sched_switch' > events/sched/sched_process_fork/trigger
+
+do_reset
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-filter.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-filter.tc
new file mode 100644
index 0000000..514e466
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-filter.tc
@@ -0,0 +1,59 @@
+#!/bin/sh
+# description: event trigger - test trigger filter
+
+do_reset() {
+    reset_trigger
+    echo > set_event
+    clear_trace
+}
+
+fail() { #msg
+    do_reset
+    echo $1
+    exit $FAIL
+}
+
+if [ ! -f set_event -o ! -d events/sched ]; then
+    echo "event tracing is not supported"
+    exit_unsupported
+fi
+
+if [ ! -f events/sched/sched_process_fork/trigger ]; then
+    echo "event trigger is not supported"
+    exit_unsupported
+fi
+
+reset_tracer
+do_reset
+
+echo "Test trigger filter"
+echo 1 > tracing_on
+echo 'traceoff if child_pid == 0' > events/sched/sched_process_fork/trigger
+( echo "forked")
+if [ `cat tracing_on` -ne 1 ]; then
+    fail "traceoff trigger on sched_process_fork did not work"
+fi
+
+reset_trigger
+
+echo "Test semantic error for trigger filter"
+! echo 'traceoff if a' > events/sched/sched_process_fork/trigger
+! echo 'traceoff if common_pid=0' > events/sched/sched_process_fork/trigger
+! echo 'traceoff if common_pid==b' > events/sched/sched_process_fork/trigger
+echo 'traceoff if common_pid == 0' > events/sched/sched_process_fork/trigger
+echo '!traceoff' > events/sched/sched_process_fork/trigger
+! echo 'traceoff if common_pid == child_pid' > events/sched/sched_process_fork/trigger
+echo 'traceoff if common_pid <= 0' > events/sched/sched_process_fork/trigger
+echo '!traceoff' > events/sched/sched_process_fork/trigger
+echo 'traceoff if common_pid >= 0' > events/sched/sched_process_fork/trigger
+echo '!traceoff' > events/sched/sched_process_fork/trigger
+echo 'traceoff if parent_pid >= 0 && child_pid >= 0' > events/sched/sched_process_fork/trigger
+echo '!traceoff' > events/sched/sched_process_fork/trigger
+echo 'traceoff if parent_pid >= 0 || child_pid >= 0' > events/sched/sched_process_fork/trigger
+echo '!traceoff' > events/sched/sched_process_fork/trigger
+
+
+
+do_reset
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc
new file mode 100644
index 0000000..c2b61c4
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc
@@ -0,0 +1,75 @@
+#!/bin/sh
+# description: event trigger - test histogram modifiers
+
+do_reset() {
+    reset_trigger
+    echo > set_event
+    clear_trace
+}
+
+fail() { #msg
+    do_reset
+    echo $1
+    exit $FAIL
+}
+
+if [ ! -f set_event -o ! -d events/sched ]; then
+    echo "event tracing is not supported"
+    exit_unsupported
+fi
+
+if [ ! -f events/sched/sched_process_fork/trigger ]; then
+    echo "event trigger is not supported"
+    exit_unsupported
+fi
+
+reset_tracer
+do_reset
+
+FEATURE=`grep hist events/sched/sched_process_fork/trigger`
+if [ -z "$FEATURE" ]; then
+    echo "hist trigger is not supported"
+    exit_unsupported
+fi
+
+echo "Test histogram with execname modifier"
+
+echo 'hist:keys=common_pid.execname' > events/sched/sched_process_fork/trigger
+for i in `seq 1 10` ; do ( echo "forked" > /dev/null); done
+COMM=`cat /proc/$$/comm`
+grep "common_pid: $COMM" events/sched/sched_process_fork/hist > /dev/null || \
+    fail "execname modifier on sched_process_fork did not work"
+
+reset_trigger
+
+echo "Test histogram with hex modifier"
+
+echo 'hist:keys=parent_pid.hex' > events/sched/sched_process_fork/trigger
+for i in `seq 1 10` ; do ( echo "forked" > /dev/null); done
+# Note that $$ is the parent pid. $PID is current PID.
+HEX=`printf %x $PID`
+grep "parent_pid: $HEX" events/sched/sched_process_fork/hist > /dev/null || \
+    fail "hex modifier on sched_process_fork did not work"
+
+reset_trigger
+
+echo "Test histogram with syscall modifier"
+
+echo 'hist:keys=id.syscall' > events/raw_syscalls/sys_exit/trigger
+for i in `seq 1 10` ; do ( echo "forked" > /dev/null); done
+grep "id: sys_" events/raw_syscalls/sys_exit/hist > /dev/null || \
+    fail "syscall modifier on raw_syscalls/sys_exit did not work"
+
+
+reset_trigger
+
+echo "Test histgram with log2 modifier"
+
+echo 'hist:keys=bytes_req.log2' > events/kmem/kmalloc/trigger
+for i in `seq 1 10` ; do ( echo "forked" > /dev/null); done
+grep 'bytes_req: ~ 2^[0-9]*' events/kmem/kmalloc/hist > /dev/null || \
+    fail "log2 modifier on kmem/kmalloc did not work"
+
+do_reset
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc
new file mode 100644
index 0000000..b2902d4
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc
@@ -0,0 +1,83 @@
+#!/bin/sh
+# description: event trigger - test histogram trigger
+
+do_reset() {
+    reset_trigger
+    echo > set_event
+    clear_trace
+}
+
+fail() { #msg
+    do_reset
+    echo $1
+    exit $FAIL
+}
+
+if [ ! -f set_event -o ! -d events/sched ]; then
+    echo "event tracing is not supported"
+    exit_unsupported
+fi
+
+if [ ! -f events/sched/sched_process_fork/trigger ]; then
+    echo "event trigger is not supported"
+    exit_unsupported
+fi
+
+reset_tracer
+do_reset
+
+FEATURE=`grep hist events/sched/sched_process_fork/trigger`
+if [ -z "$FEATURE" ]; then
+    echo "hist trigger is not supported"
+    exit_unsupported
+fi
+
+echo "Test histogram basic tigger"
+
+echo 'hist:keys=parent_pid:vals=child_pid' > events/sched/sched_process_fork/trigger
+for i in `seq 1 10` ; do ( echo "forked" > /dev/null); done
+grep parent_pid events/sched/sched_process_fork/hist > /dev/null || \
+    fail "hist trigger on sched_process_fork did not work"
+grep child events/sched/sched_process_fork/hist > /dev/null || \
+    fail "hist trigger on sched_process_fork did not work"
+
+reset_trigger
+
+echo "Test histogram with compound keys"
+
+echo 'hist:keys=parent_pid,child_pid' > events/sched/sched_process_fork/trigger
+for i in `seq 1 10` ; do ( echo "forked" > /dev/null); done
+grep '^{ parent_pid:.*, child_pid:.*}' events/sched/sched_process_fork/hist > /dev/null || \
+    fail "compound keys on sched_process_fork did not work"
+
+reset_trigger
+
+echo "Test histogram with string key"
+
+echo 'hist:keys=parent_comm' > events/sched/sched_process_fork/trigger
+for i in `seq 1 10` ; do ( echo "forked" > /dev/null); done
+COMM=`cat /proc/$$/comm`
+grep "parent_comm: $COMM" events/sched/sched_process_fork/hist > /dev/null || \
+    fail "string key on sched_process_fork did not work"
+
+reset_trigger
+
+echo "Test histogram with sort key"
+
+echo 'hist:keys=parent_pid,child_pid:sort=child_pid.ascending' > events/sched/sched_process_fork/trigger
+for i in `seq 1 10` ; do ( echo "forked" > /dev/null); done
+
+check_inc() {
+    while [ $# -gt 1 ]; do
+        [ $1 -gt $2 ] && return 1
+        shift 1
+    done
+    return 0
+}
+check_inc `grep -o "child_pid:[[:space:]]*[[:digit:]]*" \
+    events/sched/sched_process_fork/hist | cut -d: -f2 ` ||
+    fail "sort param on sched_process_fork did not work"
+
+do_reset
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc
new file mode 100644
index 0000000..03c4a46
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc
@@ -0,0 +1,73 @@
+#!/bin/sh
+# description: event trigger - test multiple histogram triggers
+
+do_reset() {
+    reset_trigger
+    echo > set_event
+    clear_trace
+}
+
+fail() { #msg
+    do_reset
+    echo $1
+    exit $FAIL
+}
+
+if [ ! -f set_event -o ! -d events/sched ]; then
+    echo "event tracing is not supported"
+    exit_unsupported
+fi
+
+if [ ! -f events/sched/sched_process_fork/trigger ]; then
+    echo "event trigger is not supported"
+    exit_unsupported
+fi
+
+reset_tracer
+do_reset
+
+FEATURE=`grep hist events/sched/sched_process_fork/trigger`
+if [ -z "$FEATURE" ]; then
+    echo "hist trigger is not supported"
+    exit_unsupported
+fi
+
+reset_trigger
+
+echo "Test histogram multiple tiggers"
+
+echo 'hist:keys=parent_pid:vals=child_pid' > events/sched/sched_process_fork/trigger
+echo 'hist:keys=parent_comm:vals=child_pid' >> events/sched/sched_process_fork/trigger
+for i in `seq 1 10` ; do ( echo "forked" > /dev/null); done
+grep parent_pid events/sched/sched_process_fork/hist > /dev/null || \
+    fail "hist trigger on sched_process_fork did not work"
+grep child events/sched/sched_process_fork/hist > /dev/null || \
+    fail "hist trigger on sched_process_fork did not work"
+COMM=`cat /proc/$$/comm`
+grep "parent_comm: $COMM" events/sched/sched_process_fork/hist > /dev/null || \
+    fail "string key on sched_process_fork did not work"
+
+reset_trigger
+
+echo "Test histogram with its name"
+
+echo 'hist:name=test_hist:keys=common_pid' > events/sched/sched_process_fork/trigger
+for i in `seq 1 10` ; do ( echo "forked" > /dev/null); done
+grep test_hist events/sched/sched_process_fork/hist > /dev/null || \
+    fail "named event on sched_process_fork did not work"
+
+echo "Test same named histogram on different events"
+
+echo 'hist:name=test_hist:keys=common_pid' > events/sched/sched_process_exit/trigger
+for i in `seq 1 10` ; do ( echo "forked" > /dev/null); done
+grep test_hist events/sched/sched_process_exit/hist > /dev/null || \
+    fail "named event on sched_process_fork did not work"
+
+diffs=`diff events/sched/sched_process_exit/hist events/sched/sched_process_fork/hist | wc -l`
+test $diffs -eq 0 || fail "Same name histograms are not same"
+
+reset_trigger
+
+do_reset
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-snapshot.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-snapshot.tc
new file mode 100644
index 0000000..f84b80d
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-snapshot.tc
@@ -0,0 +1,56 @@
+#!/bin/sh
+# description: event trigger - test snapshot-trigger
+
+do_reset() {
+    reset_trigger
+    echo > set_event
+    clear_trace
+}
+
+fail() { #msg
+    do_reset
+    echo $1
+    exit $FAIL
+}
+
+if [ ! -f set_event -o ! -d events/sched ]; then
+    echo "event tracing is not supported"
+    exit_unsupported
+fi
+
+if [ ! -f events/sched/sched_process_fork/trigger ]; then
+    echo "event trigger is not supported"
+    exit_unsupported
+fi
+
+reset_tracer
+do_reset
+
+FEATURE=`grep snapshot events/sched/sched_process_fork/trigger`
+if [ -z "$FEATURE" ]; then
+    echo "snapshot trigger is not supported"
+    exit_unsupported
+fi
+
+echo "Test snapshot tigger"
+echo 0 > snapshot
+echo 1 > events/sched/sched_process_fork/enable
+( echo "forked")
+echo 'snapshot:1' > events/sched/sched_process_fork/trigger
+( echo "forked")
+grep sched_process_fork snapshot > /dev/null || \
+    fail "snapshot trigger on sched_process_fork did not work"
+
+reset_trigger
+echo 0 > snapshot
+echo 0 > events/sched/sched_process_fork/enable
+
+echo "Test snapshot semantic errors"
+
+! echo "snapshot+1" > events/sched/sched_process_fork/trigger
+echo "snapshot" > events/sched/sched_process_fork/trigger
+! echo "snapshot" > events/sched/sched_process_fork/trigger
+
+do_reset
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-stacktrace.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-stacktrace.tc
new file mode 100644
index 0000000..9fa23b0
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-stacktrace.tc
@@ -0,0 +1,53 @@
+#!/bin/sh
+# description: event trigger - test stacktrace-trigger
+
+do_reset() {
+    reset_trigger
+    echo > set_event
+    clear_trace
+}
+
+fail() { #msg
+    do_reset
+    echo $1
+    exit $FAIL
+}
+
+if [ ! -f set_event -o ! -d events/sched ]; then
+    echo "event tracing is not supported"
+    exit_unsupported
+fi
+
+if [ ! -f events/sched/sched_process_fork/trigger ]; then
+    echo "event trigger is not supported"
+    exit_unsupported
+fi
+
+reset_tracer
+do_reset
+
+FEATURE=`grep stacktrace events/sched/sched_process_fork/trigger`
+if [ -z "$FEATURE" ]; then
+    echo "stacktrace trigger is not supported"
+    exit_unsupported
+fi
+
+echo "Test stacktrace tigger"
+echo 0 > trace
+echo 0 > options/stacktrace
+echo 'stacktrace' > events/sched/sched_process_fork/trigger
+( echo "forked")
+grep "<stack trace>" trace > /dev/null || \
+    fail "stacktrace trigger on sched_process_fork did not work"
+
+reset_trigger
+
+echo "Test stacktrace semantic errors"
+
+! echo "stacktrace:foo" > events/sched/sched_process_fork/trigger
+echo "stacktrace" > events/sched/sched_process_fork/trigger
+! echo "stacktrace" > events/sched/sched_process_fork/trigger
+
+do_reset
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-traceonoff.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-traceonoff.tc
new file mode 100644
index 0000000..87648e5
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-traceonoff.tc
@@ -0,0 +1,58 @@
+#!/bin/sh
+# description: event trigger - test traceon/off trigger
+
+do_reset() {
+    reset_trigger
+    echo > set_event
+    clear_trace
+}
+
+fail() { #msg
+    do_reset
+    echo $1
+    exit $FAIL
+}
+
+if [ ! -f set_event -o ! -d events/sched ]; then
+    echo "event tracing is not supported"
+    exit_unsupported
+fi
+
+if [ ! -f events/sched/sched_process_fork/trigger ]; then
+    echo "event trigger is not supported"
+    exit_unsupported
+fi
+
+reset_tracer
+do_reset
+
+echo "Test traceoff trigger"
+echo 1 > tracing_on
+echo 'traceoff' > events/sched/sched_process_fork/trigger
+( echo "forked")
+if [ `cat tracing_on` -ne 0 ]; then
+    fail "traceoff trigger on sched_process_fork did not work"
+fi
+
+reset_trigger
+
+echo "Test traceon trigger"
+echo 0 > tracing_on
+echo 'traceon' > events/sched/sched_process_fork/trigger
+( echo "forked")
+if [ `cat tracing_on` -ne 1 ]; then
+    fail "traceoff trigger on sched_process_fork did not work"
+fi
+
+reset_trigger
+
+echo "Test semantic error for traceoff/on trigger"
+! echo 'traceoff:badparam' > events/sched/sched_process_fork/trigger
+! echo 'traceoff+0' > events/sched/sched_process_fork/trigger
+echo 'traceon' > events/sched/sched_process_fork/trigger
+! echo 'traceon' > events/sched/sched_process_fork/trigger
+! echo 'traceoff' > events/sched/sched_process_fork/trigger
+
+do_reset
+
+exit 0
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 69bb3fc..0840684 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -3,3 +3,4 @@
 psock_tpacket
 reuseport_bpf
 reuseport_bpf_cpu
+reuseport_dualstack
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index c658792..0e53407 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -4,7 +4,7 @@
 
 CFLAGS += -I../../../../usr/include/
 
-NET_PROGS = socket psock_fanout psock_tpacket reuseport_bpf reuseport_bpf_cpu
+NET_PROGS = socket psock_fanout psock_tpacket reuseport_bpf reuseport_bpf_cpu reuseport_dualstack
 
 all: $(NET_PROGS)
 %: %.c
diff --git a/tools/testing/selftests/net/reuseport_dualstack.c b/tools/testing/selftests/net/reuseport_dualstack.c
new file mode 100644
index 0000000..90958aa
--- /dev/null
+++ b/tools/testing/selftests/net/reuseport_dualstack.c
@@ -0,0 +1,208 @@
+/*
+ * It is possible to use SO_REUSEPORT to open multiple sockets bound to
+ * equivalent local addresses using AF_INET and AF_INET6 at the same time.  If
+ * the AF_INET6 socket has IPV6_V6ONLY set, it's clear which socket should
+ * receive a given incoming packet.  However, when it is not set, incoming v4
+ * packets should prefer the AF_INET socket(s).  This behavior was defined with
+ * the original SO_REUSEPORT implementation, but broke with
+ * e32ea7e74727 ("soreuseport: fast reuseport UDP socket selection")
+ * This test creates these mixed AF_INET/AF_INET6 sockets and asserts the
+ * AF_INET preference for v4 packets.
+ */
+
+#define _GNU_SOURCE
+
+#include <arpa/inet.h>
+#include <errno.h>
+#include <error.h>
+#include <linux/in.h>
+#include <linux/unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/epoll.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+static const int PORT = 8888;
+
+static void build_rcv_fd(int family, int proto, int *rcv_fds, int count)
+{
+	struct sockaddr_storage addr;
+	struct sockaddr_in  *addr4;
+	struct sockaddr_in6 *addr6;
+	int opt, i;
+
+	switch (family) {
+	case AF_INET:
+		addr4 = (struct sockaddr_in *)&addr;
+		addr4->sin_family = AF_INET;
+		addr4->sin_addr.s_addr = htonl(INADDR_ANY);
+		addr4->sin_port = htons(PORT);
+		break;
+	case AF_INET6:
+		addr6 = (struct sockaddr_in6 *)&addr;
+		addr6->sin6_family = AF_INET6;
+		addr6->sin6_addr = in6addr_any;
+		addr6->sin6_port = htons(PORT);
+		break;
+	default:
+		error(1, 0, "Unsupported family %d", family);
+	}
+
+	for (i = 0; i < count; ++i) {
+		rcv_fds[i] = socket(family, proto, 0);
+		if (rcv_fds[i] < 0)
+			error(1, errno, "failed to create receive socket");
+
+		opt = 1;
+		if (setsockopt(rcv_fds[i], SOL_SOCKET, SO_REUSEPORT, &opt,
+			       sizeof(opt)))
+			error(1, errno, "failed to set SO_REUSEPORT");
+
+		if (bind(rcv_fds[i], (struct sockaddr *)&addr, sizeof(addr)))
+			error(1, errno, "failed to bind receive socket");
+
+		if (proto == SOCK_STREAM && listen(rcv_fds[i], 10))
+			error(1, errno, "failed to listen on receive port");
+	}
+}
+
+static void send_from_v4(int proto)
+{
+	struct sockaddr_in  saddr, daddr;
+	int fd;
+
+	saddr.sin_family = AF_INET;
+	saddr.sin_addr.s_addr = htonl(INADDR_ANY);
+	saddr.sin_port = 0;
+
+	daddr.sin_family = AF_INET;
+	daddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+	daddr.sin_port = htons(PORT);
+
+	fd = socket(AF_INET, proto, 0);
+	if (fd < 0)
+		error(1, errno, "failed to create send socket");
+
+	if (bind(fd, (struct sockaddr *)&saddr, sizeof(saddr)))
+		error(1, errno, "failed to bind send socket");
+
+	if (connect(fd, (struct sockaddr *)&daddr, sizeof(daddr)))
+		error(1, errno, "failed to connect send socket");
+
+	if (send(fd, "a", 1, 0) < 0)
+		error(1, errno, "failed to send message");
+
+	close(fd);
+}
+
+static int receive_once(int epfd, int proto)
+{
+	struct epoll_event ev;
+	int i, fd;
+	char buf[8];
+
+	i = epoll_wait(epfd, &ev, 1, -1);
+	if (i < 0)
+		error(1, errno, "epoll_wait failed");
+
+	if (proto == SOCK_STREAM) {
+		fd = accept(ev.data.fd, NULL, NULL);
+		if (fd < 0)
+			error(1, errno, "failed to accept");
+		i = recv(fd, buf, sizeof(buf), 0);
+		close(fd);
+	} else {
+		i = recv(ev.data.fd, buf, sizeof(buf), 0);
+	}
+
+	if (i < 0)
+		error(1, errno, "failed to recv");
+
+	return ev.data.fd;
+}
+
+static void test(int *rcv_fds, int count, int proto)
+{
+	struct epoll_event ev;
+	int epfd, i, test_fd;
+	uint16_t test_family;
+	socklen_t len;
+
+	epfd = epoll_create(1);
+	if (epfd < 0)
+		error(1, errno, "failed to create epoll");
+
+	ev.events = EPOLLIN;
+	for (i = 0; i < count; ++i) {
+		ev.data.fd = rcv_fds[i];
+		if (epoll_ctl(epfd, EPOLL_CTL_ADD, rcv_fds[i], &ev))
+			error(1, errno, "failed to register sock epoll");
+	}
+
+	send_from_v4(proto);
+
+	test_fd = receive_once(epfd, proto);
+	if (getsockopt(test_fd, SOL_SOCKET, SO_DOMAIN, &test_family, &len))
+		error(1, errno, "failed to read socket domain");
+	if (test_family != AF_INET)
+		error(1, 0, "expected to receive on v4 socket but got v6 (%d)",
+		      test_family);
+
+	close(epfd);
+}
+
+int main(void)
+{
+	int rcv_fds[32], i;
+
+	fprintf(stderr, "---- UDP IPv4 created before IPv6 ----\n");
+	build_rcv_fd(AF_INET, SOCK_DGRAM, rcv_fds, 5);
+	build_rcv_fd(AF_INET6, SOCK_DGRAM, &(rcv_fds[5]), 5);
+	test(rcv_fds, 10, SOCK_DGRAM);
+	for (i = 0; i < 10; ++i)
+		close(rcv_fds[i]);
+
+	fprintf(stderr, "---- UDP IPv6 created before IPv4 ----\n");
+	build_rcv_fd(AF_INET6, SOCK_DGRAM, rcv_fds, 5);
+	build_rcv_fd(AF_INET, SOCK_DGRAM, &(rcv_fds[5]), 5);
+	test(rcv_fds, 10, SOCK_DGRAM);
+	for (i = 0; i < 10; ++i)
+		close(rcv_fds[i]);
+
+	/* NOTE: UDP socket lookups traverse a different code path when there
+	 * are > 10 sockets in a group.
+	 */
+	fprintf(stderr, "---- UDP IPv4 created before IPv6 (large) ----\n");
+	build_rcv_fd(AF_INET, SOCK_DGRAM, rcv_fds, 16);
+	build_rcv_fd(AF_INET6, SOCK_DGRAM, &(rcv_fds[16]), 16);
+	test(rcv_fds, 32, SOCK_DGRAM);
+	for (i = 0; i < 32; ++i)
+		close(rcv_fds[i]);
+
+	fprintf(stderr, "---- UDP IPv6 created before IPv4 (large) ----\n");
+	build_rcv_fd(AF_INET6, SOCK_DGRAM, rcv_fds, 16);
+	build_rcv_fd(AF_INET, SOCK_DGRAM, &(rcv_fds[16]), 16);
+	test(rcv_fds, 32, SOCK_DGRAM);
+	for (i = 0; i < 32; ++i)
+		close(rcv_fds[i]);
+
+	fprintf(stderr, "---- TCP IPv4 created before IPv6 ----\n");
+	build_rcv_fd(AF_INET, SOCK_STREAM, rcv_fds, 5);
+	build_rcv_fd(AF_INET6, SOCK_STREAM, &(rcv_fds[5]), 5);
+	test(rcv_fds, 10, SOCK_STREAM);
+	for (i = 0; i < 10; ++i)
+		close(rcv_fds[i]);
+
+	fprintf(stderr, "---- TCP IPv6 created before IPv4 ----\n");
+	build_rcv_fd(AF_INET6, SOCK_STREAM, rcv_fds, 5);
+	build_rcv_fd(AF_INET, SOCK_STREAM, &(rcv_fds[5]), 5);
+	test(rcv_fds, 10, SOCK_STREAM);
+	for (i = 0; i < 10; ++i)
+		close(rcv_fds[i]);
+
+	fprintf(stderr, "SUCCESS\n");
+	return 0;
+}
diff --git a/tools/testing/selftests/rcutorture/bin/jitter.sh b/tools/testing/selftests/rcutorture/bin/jitter.sh
new file mode 100755
index 0000000..3633828
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/jitter.sh
@@ -0,0 +1,90 @@
+#!/bin/bash
+#
+# Alternate sleeping and spinning on randomly selected CPUs.  The purpose
+# of this script is to inflict random OS jitter on a concurrently running
+# test.
+#
+# Usage: jitter.sh me duration [ sleepmax [ spinmax ] ]
+#
+# me: Random-number-generator seed salt.
+# duration: Time to run in seconds.
+# sleepmax: Maximum microseconds to sleep, defaults to one second.
+# spinmax: Maximum microseconds to spin, defaults to one millisecond.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2016
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+me=$(($1 * 1000))
+duration=$2
+sleepmax=${3-1000000}
+spinmax=${4-1000}
+
+n=1
+
+starttime=`awk 'BEGIN { print systime(); }' < /dev/null`
+
+while :
+do
+	# Check for done.
+	t=`awk -v s=$starttime 'BEGIN { print systime() - s; }' < /dev/null`
+	if test "$t" -gt "$duration"
+	then
+		exit 0;
+	fi
+
+	# Set affinity to randomly selected CPU
+	cpus=`ls /sys/devices/system/cpu/*/online |
+		sed -e 's,/[^/]*$,,' -e 's/^[^0-9]*//' |
+		grep -v '^0*$'`
+	cpumask=`awk -v cpus="$cpus" -v me=$me -v n=$n 'BEGIN {
+		srand(n + me + systime());
+		ncpus = split(cpus, ca);
+		curcpu = ca[int(rand() * ncpus + 1)];
+		mask = lshift(1, curcpu);
+		if (mask + 0 <= 0)
+			mask = 1;
+		printf("%#x\n", mask);
+	}' < /dev/null`
+	n=$(($n+1))
+	if ! taskset -p $cpumask $$ > /dev/null 2>&1
+	then
+		echo taskset failure: '"taskset -p ' $cpumask $$ '"'
+		exit 1
+	fi
+
+	# Sleep a random duration
+	sleeptime=`awk -v me=$me -v n=$n -v sleepmax=$sleepmax 'BEGIN {
+		srand(n + me + systime());
+		printf("%06d", int(rand() * sleepmax));
+	}' < /dev/null`
+	n=$(($n+1))
+	sleep .$sleeptime
+
+	# Spin a random duration
+	limit=`awk -v me=$me -v n=$n -v spinmax=$spinmax 'BEGIN {
+		srand(n + me + systime());
+		printf("%06d", int(rand() * spinmax));
+	}' < /dev/null`
+	n=$(($n+1))
+	for i in {1..$limit}
+	do
+		echo > /dev/null
+	done
+done
+
+exit 1
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh
new file mode 100755
index 0000000..f79b0e9
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh
@@ -0,0 +1,121 @@
+#!/bin/bash
+#
+# Analyze a given results directory for rcuperf performance measurements,
+# looking for ftrace data.  Exits with 0 if data was found, analyzed, and
+# printed.  Intended to be invoked from kvm-recheck-rcuperf.sh after
+# argument checking.
+#
+# Usage: kvm-recheck-rcuperf-ftrace.sh resdir
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2016
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+i="$1"
+. tools/testing/selftests/rcutorture/bin/functions.sh
+
+if test "`grep -c 'rcu_exp_grace_period.*start' < $i/console.log`" -lt 100
+then
+	exit 10
+fi
+
+sed -e 's/^\[[^]]*]//' < $i/console.log |
+grep 'us : rcu_exp_grace_period' |
+sed -e 's/us : / : /' |
+tr -d '\015' |
+awk '
+$8 == "start" {
+	if (starttask != "")
+		nlost++;
+	starttask = $1;
+	starttime = $3;
+	startseq = $7;
+}
+
+$8 == "end" {
+	if (starttask == $1 && startseq == $7) {
+		curgpdur = $3 - starttime;
+		gptimes[++n] = curgpdur;
+		gptaskcnt[starttask]++;
+		sum += curgpdur;
+		if (curgpdur > 1000)
+			print "Long GP " starttime "us to " $3 "us (" curgpdur "us)";
+		starttask = "";
+	} else {
+		# Lost a message or some such, reset.
+		starttask = "";
+		nlost++;
+	}
+}
+
+$8 == "done" {
+	piggybackcnt[$1]++;
+}
+
+END {
+	newNR = asort(gptimes);
+	if (newNR <= 0) {
+		print "No ftrace records found???"
+		exit 10;
+	}
+	pct50 = int(newNR * 50 / 100);
+	if (pct50 < 1)
+		pct50 = 1;
+	pct90 = int(newNR * 90 / 100);
+	if (pct90 < 1)
+		pct90 = 1;
+	pct99 = int(newNR * 99 / 100);
+	if (pct99 < 1)
+		pct99 = 1;
+	div = 10 ** int(log(gptimes[pct90]) / log(10) + .5) / 100;
+	print "Histogram bucket size: " div;
+	last = gptimes[1] - 10;
+	count = 0;
+	for (i = 1; i <= newNR; i++) {
+		current = div * int(gptimes[i] / div);
+		if (last == current) {
+			count++;
+		} else {
+			if (count > 0)
+				print last, count;
+			count = 1;
+			last = current;
+		}
+	}
+	if (count > 0)
+		print last, count;
+	print "Distribution of grace periods across tasks:";
+	for (i in gptaskcnt) {
+		print "\t" i, gptaskcnt[i];
+		nbatches += gptaskcnt[i];
+	}
+	ngps = nbatches;
+	print "Distribution of piggybacking across tasks:";
+	for (i in piggybackcnt) {
+		print "\t" i, piggybackcnt[i];
+		ngps += piggybackcnt[i];
+	}
+	print "Average grace-period duration: " sum / newNR " microseconds";
+	print "Minimum grace-period duration: " gptimes[1];
+	print "50th percentile grace-period duration: " gptimes[pct50];
+	print "90th percentile grace-period duration: " gptimes[pct90];
+	print "99th percentile grace-period duration: " gptimes[pct99];
+	print "Maximum grace-period duration: " gptimes[newNR];
+	print "Grace periods: " ngps + 0 " Batches: " nbatches + 0 " Ratio: " ngps / nbatches " Lost: " nlost + 0;
+	print "Computed from ftrace data.";
+}'
+exit 0
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh
new file mode 100755
index 0000000..8f3121a
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh
@@ -0,0 +1,96 @@
+#!/bin/bash
+#
+# Analyze a given results directory for rcuperf performance measurements.
+#
+# Usage: kvm-recheck-rcuperf.sh resdir
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2016
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+i="$1"
+if test -d $i
+then
+	:
+else
+	echo Unreadable results directory: $i
+	exit 1
+fi
+PATH=`pwd`/tools/testing/selftests/rcutorture/bin:$PATH; export PATH
+. tools/testing/selftests/rcutorture/bin/functions.sh
+
+if kvm-recheck-rcuperf-ftrace.sh $i
+then
+	# ftrace data was successfully analyzed, call it good!
+	exit 0
+fi
+
+configfile=`echo $i | sed -e 's/^.*\///'`
+
+sed -e 's/^\[[^]]*]//' < $i/console.log |
+awk '
+/-perf: .* gps: .* batches:/ {
+	ngps = $9;
+	nbatches = $11;
+}
+
+/-perf: .*writer-duration/ {
+	gptimes[++n] = $5 / 1000.;
+	sum += $5 / 1000.;
+}
+
+END {
+	newNR = asort(gptimes);
+	if (newNR <= 0) {
+		print "No rcuperf records found???"
+		exit;
+	}
+	pct50 = int(newNR * 50 / 100);
+	if (pct50 < 1)
+		pct50 = 1;
+	pct90 = int(newNR * 90 / 100);
+	if (pct90 < 1)
+		pct90 = 1;
+	pct99 = int(newNR * 99 / 100);
+	if (pct99 < 1)
+		pct99 = 1;
+	div = 10 ** int(log(gptimes[pct90]) / log(10) + .5) / 100;
+	print "Histogram bucket size: " div;
+	last = gptimes[1] - 10;
+	count = 0;
+	for (i = 1; i <= newNR; i++) {
+		current = div * int(gptimes[i] / div);
+		if (last == current) {
+			count++;
+		} else {
+			if (count > 0)
+				print last, count;
+			count = 1;
+			last = current;
+		}
+	}
+	if (count > 0)
+		print last, count;
+	print "Average grace-period duration: " sum / newNR " microseconds";
+	print "Minimum grace-period duration: " gptimes[1];
+	print "50th percentile grace-period duration: " gptimes[pct50];
+	print "90th percentile grace-period duration: " gptimes[pct90];
+	print "99th percentile grace-period duration: " gptimes[pct99];
+	print "Maximum grace-period duration: " gptimes[newNR];
+	print "Grace periods: " ngps + 0 " Batches: " nbatches + 0 " Ratio: " ngps / nbatches;
+	print "Computed from rcuperf printk output.";
+}'
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
index d86bdd6..f659346 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
@@ -48,7 +48,10 @@
 				cat $i/Make.oldconfig.err
 			fi
 			parse-build.sh $i/Make.out $configfile
-			parse-torture.sh $i/console.log $configfile
+			if test "$TORTURE_SUITE" != rcuperf
+			then
+				parse-torture.sh $i/console.log $configfile
+			fi
 			parse-console.sh $i/console.log $configfile
 			if test -r $i/Warnings
 			then
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
index 0f80eef..4109f30 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
@@ -6,7 +6,7 @@
 # Execute this in the source tree.  Do not run it as a background task
 # because qemu does not seem to like that much.
 #
-# Usage: kvm-test-1-run.sh config builddir resdir minutes qemu-args boot_args
+# Usage: kvm-test-1-run.sh config builddir resdir seconds qemu-args boot_args
 #
 # qemu-args defaults to "-enable-kvm -soundhw pcspk -nographic", along with
 #			arguments specifying the number of CPUs and other
@@ -91,25 +91,33 @@
 # CONFIG_PCMCIA=n
 # CONFIG_CARDBUS=n
 # CONFIG_YENTA=n
-if kvm-build.sh $config_template $builddir $T
+base_resdir=`echo $resdir | sed -e 's/\.[0-9]\+$//'`
+if test "$base_resdir" != "$resdir" -a -f $base_resdir/bzImage -a -f $base_resdir/vmlinux
 then
+	# Rerunning previous test, so use that test's kernel.
+	QEMU="`identify_qemu $base_resdir/vmlinux`"
+	KERNEL=$base_resdir/bzImage
+	ln -s $base_resdir/Make*.out $resdir  # for kvm-recheck.sh
+	ln -s $base_resdir/.config $resdir  # for kvm-recheck.sh
+elif kvm-build.sh $config_template $builddir $T
+then
+	# Had to build a kernel for this test.
 	QEMU="`identify_qemu $builddir/vmlinux`"
 	BOOT_IMAGE="`identify_boot_image $QEMU`"
 	cp $builddir/Make*.out $resdir
+	cp $builddir/vmlinux $resdir
 	cp $builddir/.config $resdir
 	if test -n "$BOOT_IMAGE"
 	then
 		cp $builddir/$BOOT_IMAGE $resdir
+		KERNEL=$resdir/bzImage
 	else
 		echo No identifiable boot image, not running KVM, see $resdir.
 		echo Do the torture scripts know about your architecture?
 	fi
 	parse-build.sh $resdir/Make.out $title
-	if test -f $builddir.wait
-	then
-		mv $builddir.wait $builddir.ready
-	fi
 else
+	# Build failed.
 	cp $builddir/Make*.out $resdir
 	cp $builddir/.config $resdir || :
 	echo Build failed, not running KVM, see $resdir.
@@ -119,12 +127,15 @@
 	fi
 	exit 1
 fi
+if test -f $builddir.wait
+then
+	mv $builddir.wait $builddir.ready
+fi
 while test -f $builddir.ready
 do
 	sleep 1
 done
-minutes=$4
-seconds=$(($minutes * 60))
+seconds=$4
 qemu_args=$5
 boot_args=$6
 
@@ -167,15 +178,26 @@
 	exit 0
 fi
 echo "NOTE: $QEMU either did not run or was interactive" > $resdir/console.log
-echo $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd
-( $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append "$qemu_append $boot_args"; echo $? > $resdir/qemu-retval ) &
-qemu_pid=$!
+echo $QEMU $qemu_args -m 512 -kernel $KERNEL -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd
+( $QEMU $qemu_args -m 512 -kernel $KERNEL -append "$qemu_append $boot_args"& echo $! > $resdir/qemu_pid; wait `cat  $resdir/qemu_pid`; echo $? > $resdir/qemu-retval ) &
 commandcompleted=0
-echo Monitoring qemu job at pid $qemu_pid
+sleep 10 # Give qemu's pid a chance to reach the file
+if test -s "$resdir/qemu_pid"
+then
+	qemu_pid=`cat "$resdir/qemu_pid"`
+	echo Monitoring qemu job at pid $qemu_pid
+else
+	qemu_pid=""
+	echo Monitoring qemu job at yet-as-unknown pid
+fi
 while :
 do
+	if test -z "$qemu_pid" -a -s "$resdir/qemu_pid"
+	then
+		qemu_pid=`cat "$resdir/qemu_pid"`
+	fi
 	kruntime=`awk 'BEGIN { print systime() - '"$kstarttime"' }' < /dev/null`
-	if kill -0 $qemu_pid > /dev/null 2>&1
+	if test -z "$qemu_pid" || kill -0 "$qemu_pid" > /dev/null 2>&1
 	then
 		if test $kruntime -ge $seconds
 		then
@@ -195,12 +217,16 @@
 				ps -fp $killpid >> $resdir/Warnings 2>&1
 			fi
 		else
-			echo ' ---' `date`: Kernel done
+			echo ' ---' `date`: "Kernel done"
 		fi
 		break
 	fi
 done
-if test $commandcompleted -eq 0
+if test -z "$qemu_pid" -a -s "$resdir/qemu_pid"
+then
+	qemu_pid=`cat "$resdir/qemu_pid"`
+fi
+if test $commandcompleted -eq 0 -a -n "$qemu_pid"
 then
 	echo Grace period for qemu job at pid $qemu_pid
 	while :
@@ -220,6 +246,9 @@
 		fi
 		sleep 1
 	done
+elif test -z "$qemu_pid"
+then
+	echo Unknown PID, cannot kill qemu command
 fi
 
 parse-torture.sh $resdir/console.log $title
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index 4a43176..0d59814 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -34,7 +34,7 @@
 trap 'rm -rf $T' 0
 mkdir $T
 
-dur=30
+dur=$((30*60))
 dryrun=""
 KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM
 PATH=${KVM}/bin:$PATH; export PATH
@@ -48,6 +48,7 @@
 configs=""
 cpus=0
 ds=`date +%Y.%m.%d-%H:%M:%S`
+jitter=0
 
 . functions.sh
 
@@ -63,6 +64,7 @@
 	echo "       --dryrun sched|script"
 	echo "       --duration minutes"
 	echo "       --interactive"
+	echo "       --jitter N [ maxsleep (us) [ maxspin (us) ] ]"
 	echo "       --kmake-arg kernel-make-arguments"
 	echo "       --mac nn:nn:nn:nn:nn:nn"
 	echo "       --no-initrd"
@@ -116,12 +118,17 @@
 		;;
 	--duration)
 		checkarg --duration "(minutes)" $# "$2" '^[0-9]*$' '^error'
-		dur=$2
+		dur=$(($2*60))
 		shift
 		;;
 	--interactive)
 		TORTURE_QEMU_INTERACTIVE=1; export TORTURE_QEMU_INTERACTIVE
 		;;
+	--jitter)
+		checkarg --jitter "(# threads [ sleep [ spin ] ])" $# "$2" '^-\{,1\}[0-9]\+\( \+[0-9]\+\)\{,2\} *$' '^error$'
+		jitter="$2"
+		shift
+		;;
 	--kmake-arg)
 		checkarg --kmake-arg "(kernel make arguments)" $# "$2" '.*' '^error$'
 		TORTURE_KMAKE_ARG="$2"
@@ -156,7 +163,7 @@
 		shift
 		;;
 	--torture)
-		checkarg --torture "(suite name)" "$#" "$2" '^\(lock\|rcu\)$' '^--'
+		checkarg --torture "(suite name)" "$#" "$2" '^\(lock\|rcu\|rcuperf\)$' '^--'
 		TORTURE_SUITE=$2
 		shift
 		;;
@@ -299,6 +306,7 @@
 	-v CONFIGDIR="$CONFIGFRAG/" \
 	-v KVM="$KVM" \
 	-v ncpus=$cpus \
+	-v jitter="$jitter" \
 	-v rd=$resdir/$ds/ \
 	-v dur=$dur \
 	-v TORTURE_QEMU_ARG="$TORTURE_QEMU_ARG" \
@@ -359,6 +367,16 @@
 		print "\techo ----", cfr[j], cpusr[j] ovf ": Starting kernel. `date` >> " rd "/log";
 		print "fi"
 	}
+	njitter = 0;
+	split(jitter, ja);
+	if (ja[1] == -1 && ncpus == 0)
+		njitter = 1;
+	else if (ja[1] == -1)
+		njitter = ncpus;
+	else
+		njitter = ja[1];
+	for (j = 0; j < njitter; j++)
+		print "jitter.sh " j " " dur " " ja[2] " " ja[3] "&"
 	print "wait"
 	print "if test -z \"$TORTURE_BUILDONLY\""
 	print "then"
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE04 b/tools/testing/selftests/rcutorture/configs/rcu/TREE04
index 39a2c6d..17cbe09 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE04
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE04
@@ -14,7 +14,7 @@
 CONFIG_SUSPEND=n
 CONFIG_HIBERNATION=n
 CONFIG_RCU_FANOUT=4
-CONFIG_RCU_FANOUT_LEAF=4
+CONFIG_RCU_FANOUT_LEAF=3
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot
index 0fc8a34..e34c334 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot
@@ -1 +1 @@
-rcutorture.torture_type=rcu_bh
+rcutorture.torture_type=rcu_bh rcutree.rcu_fanout_leaf=4
diff --git a/tools/testing/selftests/rcutorture/configs/rcuperf/CFLIST b/tools/testing/selftests/rcutorture/configs/rcuperf/CFLIST
new file mode 100644
index 0000000..c9f56cf
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/rcuperf/CFLIST
@@ -0,0 +1 @@
+TREE
diff --git a/tools/testing/selftests/rcutorture/configs/rcuperf/CFcommon b/tools/testing/selftests/rcutorture/configs/rcuperf/CFcommon
new file mode 100644
index 0000000..a09816b
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/rcuperf/CFcommon
@@ -0,0 +1,2 @@
+CONFIG_RCU_PERF_TEST=y
+CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcuperf/TREE b/tools/testing/selftests/rcutorture/configs/rcuperf/TREE
new file mode 100644
index 0000000..a312f67
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/rcuperf/TREE
@@ -0,0 +1,20 @@
+CONFIG_SMP=y
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_PREEMPT_RCU=y
+CONFIG_HZ_PERIODIC=n
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NO_HZ_FULL=n
+CONFIG_RCU_FAST_NO_HZ=n
+CONFIG_RCU_TRACE=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_RCU_NOCB_CPU=n
+CONFIG_DEBUG_LOCK_ALLOC=n
+CONFIG_PROVE_LOCKING=n
+CONFIG_RCU_BOOST=n
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_TRACE=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcuperf/TREE54 b/tools/testing/selftests/rcutorture/configs/rcuperf/TREE54
new file mode 100644
index 0000000..985fb17
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/rcuperf/TREE54
@@ -0,0 +1,23 @@
+CONFIG_SMP=y
+CONFIG_NR_CPUS=54
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_PREEMPT_RCU=y
+CONFIG_HZ_PERIODIC=n
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NO_HZ_FULL=n
+CONFIG_RCU_FAST_NO_HZ=n
+CONFIG_RCU_TRACE=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_RCU_FANOUT=3
+CONFIG_RCU_FANOUT_LEAF=2
+CONFIG_RCU_NOCB_CPU=n
+CONFIG_DEBUG_LOCK_ALLOC=n
+CONFIG_PROVE_LOCKING=n
+CONFIG_RCU_BOOST=n
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_TRACE=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh
new file mode 100644
index 0000000..34f2a1b
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+#
+# Torture-suite-dependent shell functions for the rest of the scripts.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2015
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+# rcuperf_param_nreaders bootparam-string
+#
+# Adds nreaders rcuperf module parameter if not already specified.
+rcuperf_param_nreaders () {
+	if ! echo "$1" | grep -q "rcuperf.nreaders"
+	then
+		echo rcuperf.nreaders=-1
+	fi
+}
+
+# rcuperf_param_nwriters bootparam-string
+#
+# Adds nwriters rcuperf module parameter if not already specified.
+rcuperf_param_nwriters () {
+	if ! echo "$1" | grep -q "rcuperf.nwriters"
+	then
+		echo rcuperf.nwriters=-1
+	fi
+}
+
+# per_version_boot_params bootparam-string config-file seconds
+#
+# Adds per-version torture-module parameters to kernels supporting them.
+per_version_boot_params () {
+	echo $1 `rcuperf_param_nreaders "$1"` \
+		`rcuperf_param_nwriters "$1"` \
+		rcuperf.perf_runnable=1 \
+		rcuperf.shutdown=1 \
+		rcuperf.verbose=1
+}
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index b9453b8..7947e56 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -5,6 +5,7 @@
  * Test code for seccomp bpf.
  */
 
+#include <sys/types.h>
 #include <asm/siginfo.h>
 #define __have_siginfo_t 1
 #define __have_sigval_t 1
@@ -14,7 +15,6 @@
 #include <linux/filter.h>
 #include <sys/prctl.h>
 #include <sys/ptrace.h>
-#include <sys/types.h>
 #include <sys/user.h>
 #include <linux/prctl.h>
 #include <linux/ptrace.h>
@@ -1242,6 +1242,12 @@
 # define ARCH_REGS     s390_regs
 # define SYSCALL_NUM   gprs[2]
 # define SYSCALL_RET   gprs[2]
+#elif defined(__mips__)
+# define ARCH_REGS	struct pt_regs
+# define SYSCALL_NUM	regs[2]
+# define SYSCALL_SYSCALL_NUM regs[4]
+# define SYSCALL_RET	regs[2]
+# define SYSCALL_NUM_RET_SHARE_REG
 #else
 # error "Do not know how to find your architecture's registers and syscalls"
 #endif
@@ -1249,7 +1255,7 @@
 /* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for
  * architectures without HAVE_ARCH_TRACEHOOK (e.g. User-mode Linux).
  */
-#if defined(__x86_64__) || defined(__i386__)
+#if defined(__x86_64__) || defined(__i386__) || defined(__mips__)
 #define HAVE_GETREGS
 #endif
 
@@ -1273,6 +1279,10 @@
 	}
 #endif
 
+#if defined(__mips__)
+	if (regs.SYSCALL_NUM == __NR_O32_Linux)
+		return regs.SYSCALL_SYSCALL_NUM;
+#endif
 	return regs.SYSCALL_NUM;
 }
 
@@ -1297,6 +1307,13 @@
 	{
 		regs.SYSCALL_NUM = syscall;
 	}
+#elif defined(__mips__)
+	{
+		if (regs.SYSCALL_NUM == __NR_O32_Linux)
+			regs.SYSCALL_SYSCALL_NUM = syscall;
+		else
+			regs.SYSCALL_NUM = syscall;
+	}
 
 #elif defined(__arm__)
 # ifndef PTRACE_SET_SYSCALL
@@ -1327,7 +1344,11 @@
 
 	/* If syscall is skipped, change return value. */
 	if (syscall == -1)
+#ifdef SYSCALL_NUM_RET_SHARE_REG
+		TH_LOG("Can't modify syscall return on this architecture");
+#else
 		regs.SYSCALL_RET = 1;
+#endif
 
 #ifdef HAVE_GETREGS
 	ret = ptrace(PTRACE_SETREGS, tracee, 0, &regs);
@@ -1465,8 +1486,13 @@
 	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
 	ASSERT_EQ(0, ret);
 
+#ifdef SYSCALL_NUM_RET_SHARE_REG
+	/* gettid has been skipped */
+	EXPECT_EQ(-1, syscall(__NR_gettid));
+#else
 	/* gettid has been skipped and an altered return value stored. */
 	EXPECT_EQ(1, syscall(__NR_gettid));
+#endif
 	EXPECT_NE(self->mytid, syscall(__NR_gettid));
 }
 
@@ -1497,15 +1523,15 @@
 #define SECCOMP_SET_MODE_FILTER 1
 #endif
 
-#ifndef SECCOMP_FLAG_FILTER_TSYNC
-#define SECCOMP_FLAG_FILTER_TSYNC 1
+#ifndef SECCOMP_FILTER_FLAG_TSYNC
+#define SECCOMP_FILTER_FLAG_TSYNC 1
 #endif
 
 #ifndef seccomp
-int seccomp(unsigned int op, unsigned int flags, struct sock_fprog *filter)
+int seccomp(unsigned int op, unsigned int flags, void *args)
 {
 	errno = 0;
-	return syscall(__NR_seccomp, op, flags, filter);
+	return syscall(__NR_seccomp, op, flags, args);
 }
 #endif
 
@@ -1613,7 +1639,7 @@
 		TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
 	}
 
-	ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
+	ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
 		      &prog);
 	ASSERT_NE(ENOSYS, errno) {
 		TH_LOG("Kernel does not support seccomp syscall!");
@@ -1831,7 +1857,7 @@
 		self->sibling_count++;
 	}
 
-	ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
+	ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
 		      &self->apply_prog);
 	ASSERT_EQ(0, ret) {
 		TH_LOG("Could install filter on all threads!");
@@ -1892,7 +1918,7 @@
 		TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
 	}
 
-	ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
+	ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
 		      &self->apply_prog);
 	ASSERT_NE(ENOSYS, errno) {
 		TH_LOG("Kernel does not support seccomp syscall!");
@@ -1940,7 +1966,7 @@
 		self->sibling_count++;
 	}
 
-	ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
+	ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
 		      &self->apply_prog);
 	ASSERT_EQ(self->sibling[0].system_tid, ret) {
 		TH_LOG("Did not fail on diverged sibling.");
@@ -1992,7 +2018,7 @@
 		TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
 	}
 
-	ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
+	ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
 		      &self->apply_prog);
 	ASSERT_EQ(ret, self->sibling[0].system_tid) {
 		TH_LOG("Did not fail on diverged sibling.");
@@ -2021,7 +2047,7 @@
 	/* Switch to the remaining sibling */
 	sib = !sib;
 
-	ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
+	ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
 		      &self->apply_prog);
 	ASSERT_EQ(0, ret) {
 		TH_LOG("Expected the remaining sibling to sync");
@@ -2044,7 +2070,7 @@
 	while (!kill(self->sibling[sib].system_tid, 0))
 		sleep(0.1);
 
-	ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
+	ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
 		      &self->apply_prog);
 	ASSERT_EQ(0, ret);  /* just us chickens */
 }
diff --git a/tools/testing/selftests/sigaltstack/Makefile b/tools/testing/selftests/sigaltstack/Makefile
new file mode 100644
index 0000000..56af56e
--- /dev/null
+++ b/tools/testing/selftests/sigaltstack/Makefile
@@ -0,0 +1,8 @@
+CFLAGS = -Wall
+BINARIES = sas
+all: $(BINARIES)
+
+include ../lib.mk
+
+clean:
+	rm -rf $(BINARIES)
diff --git a/tools/testing/selftests/sigaltstack/sas.c b/tools/testing/selftests/sigaltstack/sas.c
new file mode 100644
index 0000000..1bb0125
--- /dev/null
+++ b/tools/testing/selftests/sigaltstack/sas.c
@@ -0,0 +1,176 @@
+/*
+ * Stas Sergeev <stsp@users.sourceforge.net>
+ *
+ * test sigaltstack(SS_ONSTACK | SS_AUTODISARM)
+ * If that succeeds, then swapcontext() can be used inside sighandler safely.
+ *
+ */
+
+#define _GNU_SOURCE
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <ucontext.h>
+#include <alloca.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+
+#ifndef SS_AUTODISARM
+#define SS_AUTODISARM  (1U << 31)
+#endif
+
+static void *sstack, *ustack;
+static ucontext_t uc, sc;
+static const char *msg = "[OK]\tStack preserved";
+static const char *msg2 = "[FAIL]\tStack corrupted";
+struct stk_data {
+	char msg[128];
+	int flag;
+};
+
+void my_usr1(int sig, siginfo_t *si, void *u)
+{
+	char *aa;
+	int err;
+	stack_t stk;
+	struct stk_data *p;
+
+	register unsigned long sp asm("sp");
+
+	if (sp < (unsigned long)sstack ||
+			sp >= (unsigned long)sstack + SIGSTKSZ) {
+		printf("[FAIL]\tSP is not on sigaltstack\n");
+		exit(EXIT_FAILURE);
+	}
+	/* put some data on stack. other sighandler will try to overwrite it */
+	aa = alloca(1024);
+	assert(aa);
+	p = (struct stk_data *)(aa + 512);
+	strcpy(p->msg, msg);
+	p->flag = 1;
+	printf("[RUN]\tsignal USR1\n");
+	err = sigaltstack(NULL, &stk);
+	if (err) {
+		perror("[FAIL]\tsigaltstack()");
+		exit(EXIT_FAILURE);
+	}
+	if (stk.ss_flags != SS_DISABLE)
+		printf("[FAIL]\tss_flags=%i, should be SS_DISABLE\n",
+				stk.ss_flags);
+	else
+		printf("[OK]\tsigaltstack is disabled in sighandler\n");
+	swapcontext(&sc, &uc);
+	printf("%s\n", p->msg);
+	if (!p->flag) {
+		printf("[RUN]\tAborting\n");
+		exit(EXIT_FAILURE);
+	}
+}
+
+void my_usr2(int sig, siginfo_t *si, void *u)
+{
+	char *aa;
+	struct stk_data *p;
+
+	printf("[RUN]\tsignal USR2\n");
+	aa = alloca(1024);
+	/* dont run valgrind on this */
+	/* try to find the data stored by previous sighandler */
+	p = memmem(aa, 1024, msg, strlen(msg));
+	if (p) {
+		printf("[FAIL]\tsigaltstack re-used\n");
+		/* corrupt the data */
+		strcpy(p->msg, msg2);
+		/* tell other sighandler that his data is corrupted */
+		p->flag = 0;
+	}
+}
+
+static void switch_fn(void)
+{
+	printf("[RUN]\tswitched to user ctx\n");
+	raise(SIGUSR2);
+	setcontext(&sc);
+}
+
+int main(void)
+{
+	struct sigaction act;
+	stack_t stk;
+	int err;
+
+	sigemptyset(&act.sa_mask);
+	act.sa_flags = SA_ONSTACK | SA_SIGINFO;
+	act.sa_sigaction = my_usr1;
+	sigaction(SIGUSR1, &act, NULL);
+	act.sa_sigaction = my_usr2;
+	sigaction(SIGUSR2, &act, NULL);
+	sstack = mmap(NULL, SIGSTKSZ, PROT_READ | PROT_WRITE,
+		      MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
+	if (sstack == MAP_FAILED) {
+		perror("mmap()");
+		return EXIT_FAILURE;
+	}
+
+	err = sigaltstack(NULL, &stk);
+	if (err) {
+		perror("[FAIL]\tsigaltstack()");
+		exit(EXIT_FAILURE);
+	}
+	if (stk.ss_flags == SS_DISABLE) {
+		printf("[OK]\tInitial sigaltstack state was SS_DISABLE\n");
+	} else {
+		printf("[FAIL]\tInitial sigaltstack state was %i; should have been SS_DISABLE\n", stk.ss_flags);
+		return EXIT_FAILURE;
+	}
+
+	stk.ss_sp = sstack;
+	stk.ss_size = SIGSTKSZ;
+	stk.ss_flags = SS_ONSTACK | SS_AUTODISARM;
+	err = sigaltstack(&stk, NULL);
+	if (err) {
+		if (errno == EINVAL) {
+			printf("[NOTE]\tThe running kernel doesn't support SS_AUTODISARM\n");
+			/*
+			 * If test cases for the !SS_AUTODISARM variant were
+			 * added, we could still run them.  We don't have any
+			 * test cases like that yet, so just exit and report
+			 * success.
+			 */
+			return 0;
+		} else {
+			perror("[FAIL]\tsigaltstack(SS_ONSTACK | SS_AUTODISARM)");
+			return EXIT_FAILURE;
+		}
+	}
+
+	ustack = mmap(NULL, SIGSTKSZ, PROT_READ | PROT_WRITE,
+		      MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
+	if (ustack == MAP_FAILED) {
+		perror("mmap()");
+		return EXIT_FAILURE;
+	}
+	getcontext(&uc);
+	uc.uc_link = NULL;
+	uc.uc_stack.ss_sp = ustack;
+	uc.uc_stack.ss_size = SIGSTKSZ;
+	makecontext(&uc, switch_fn, 0);
+	raise(SIGUSR1);
+
+	err = sigaltstack(NULL, &stk);
+	if (err) {
+		perror("[FAIL]\tsigaltstack()");
+		exit(EXIT_FAILURE);
+	}
+	if (stk.ss_flags != SS_AUTODISARM) {
+		printf("[FAIL]\tss_flags=%i, should be SS_AUTODISARM\n",
+				stk.ss_flags);
+		exit(EXIT_FAILURE);
+	}
+	printf("[OK]\tsigaltstack is still SS_AUTODISARM after signal\n");
+
+	printf("[OK]\tTest passed\n");
+	return 0;
+}
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index b47ebd1..c73425de 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -9,6 +9,7 @@
 TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
 			test_FCMOV test_FCOMI test_FISTTP \
 			vdso_restorer
+TARGETS_C_64BIT_ONLY := fsgsbase
 
 TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY)
 TARGETS_C_64BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_64BIT_ONLY)
diff --git a/tools/testing/selftests/x86/fsgsbase.c b/tools/testing/selftests/x86/fsgsbase.c
new file mode 100644
index 0000000..5b2b4b3
--- /dev/null
+++ b/tools/testing/selftests/x86/fsgsbase.c
@@ -0,0 +1,398 @@
+/*
+ * fsgsbase.c, an fsgsbase test
+ * Copyright (c) 2014-2016 Andy Lutomirski
+ * GPL v2
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+#include <err.h>
+#include <sys/user.h>
+#include <asm/prctl.h>
+#include <sys/prctl.h>
+#include <signal.h>
+#include <limits.h>
+#include <sys/ucontext.h>
+#include <sched.h>
+#include <linux/futex.h>
+#include <pthread.h>
+#include <asm/ldt.h>
+#include <sys/mman.h>
+
+#ifndef __x86_64__
+# error This test is 64-bit only
+#endif
+
+static volatile sig_atomic_t want_segv;
+static volatile unsigned long segv_addr;
+
+static int nerrs;
+
+static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
+		       int flags)
+{
+	struct sigaction sa;
+	memset(&sa, 0, sizeof(sa));
+	sa.sa_sigaction = handler;
+	sa.sa_flags = SA_SIGINFO | flags;
+	sigemptyset(&sa.sa_mask);
+	if (sigaction(sig, &sa, 0))
+		err(1, "sigaction");
+}
+
+static void clearhandler(int sig)
+{
+	struct sigaction sa;
+	memset(&sa, 0, sizeof(sa));
+	sa.sa_handler = SIG_DFL;
+	sigemptyset(&sa.sa_mask);
+	if (sigaction(sig, &sa, 0))
+		err(1, "sigaction");
+}
+
+static void sigsegv(int sig, siginfo_t *si, void *ctx_void)
+{
+	ucontext_t *ctx = (ucontext_t*)ctx_void;
+
+	if (!want_segv) {
+		clearhandler(SIGSEGV);
+		return;  /* Crash cleanly. */
+	}
+
+	want_segv = false;
+	segv_addr = (unsigned long)si->si_addr;
+
+	ctx->uc_mcontext.gregs[REG_RIP] += 4;	/* Skip the faulting mov */
+
+}
+
+enum which_base { FS, GS };
+
+static unsigned long read_base(enum which_base which)
+{
+	unsigned long offset;
+	/*
+	 * Unless we have FSGSBASE, there's no direct way to do this from
+	 * user mode.  We can get at it indirectly using signals, though.
+	 */
+
+	want_segv = true;
+
+	offset = 0;
+	if (which == FS) {
+		/* Use a constant-length instruction here. */
+		asm volatile ("mov %%fs:(%%rcx), %%rax" : : "c" (offset) : "rax");
+	} else {
+		asm volatile ("mov %%gs:(%%rcx), %%rax" : : "c" (offset) : "rax");
+	}
+	if (!want_segv)
+		return segv_addr + offset;
+
+	/*
+	 * If that didn't segfault, try the other end of the address space.
+	 * Unless we get really unlucky and run into the vsyscall page, this
+	 * is guaranteed to segfault.
+	 */
+
+	offset = (ULONG_MAX >> 1) + 1;
+	if (which == FS) {
+		asm volatile ("mov %%fs:(%%rcx), %%rax"
+			      : : "c" (offset) : "rax");
+	} else {
+		asm volatile ("mov %%gs:(%%rcx), %%rax"
+			      : : "c" (offset) : "rax");
+	}
+	if (!want_segv)
+		return segv_addr + offset;
+
+	abort();
+}
+
+static void check_gs_value(unsigned long value)
+{
+	unsigned long base;
+	unsigned short sel;
+
+	printf("[RUN]\tARCH_SET_GS to 0x%lx\n", value);
+	if (syscall(SYS_arch_prctl, ARCH_SET_GS, value) != 0)
+		err(1, "ARCH_SET_GS");
+
+	asm volatile ("mov %%gs, %0" : "=rm" (sel));
+	base = read_base(GS);
+	if (base == value) {
+		printf("[OK]\tGSBASE was set as expected (selector 0x%hx)\n",
+		       sel);
+	} else {
+		nerrs++;
+		printf("[FAIL]\tGSBASE was not as expected: got 0x%lx (selector 0x%hx)\n",
+		       base, sel);
+	}
+
+	if (syscall(SYS_arch_prctl, ARCH_GET_GS, &base) != 0)
+		err(1, "ARCH_GET_GS");
+	if (base == value) {
+		printf("[OK]\tARCH_GET_GS worked as expected (selector 0x%hx)\n",
+		       sel);
+	} else {
+		nerrs++;
+		printf("[FAIL]\tARCH_GET_GS was not as expected: got 0x%lx (selector 0x%hx)\n",
+		       base, sel);
+	}
+}
+
+static void mov_0_gs(unsigned long initial_base, bool schedule)
+{
+	unsigned long base, arch_base;
+
+	printf("[RUN]\tARCH_SET_GS to 0x%lx then mov 0 to %%gs%s\n", initial_base, schedule ? " and schedule " : "");
+	if (syscall(SYS_arch_prctl, ARCH_SET_GS, initial_base) != 0)
+		err(1, "ARCH_SET_GS");
+
+	if (schedule)
+		usleep(10);
+
+	asm volatile ("mov %0, %%gs" : : "rm" (0));
+	base = read_base(GS);
+	if (syscall(SYS_arch_prctl, ARCH_GET_GS, &arch_base) != 0)
+		err(1, "ARCH_GET_GS");
+	if (base == arch_base) {
+		printf("[OK]\tGSBASE is 0x%lx\n", base);
+	} else {
+		nerrs++;
+		printf("[FAIL]\tGSBASE changed to 0x%lx but kernel reports 0x%lx\n", base, arch_base);
+	}
+}
+
+static volatile unsigned long remote_base;
+static volatile bool remote_hard_zero;
+static volatile unsigned int ftx;
+
+/*
+ * ARCH_SET_FS/GS(0) may or may not program a selector of zero.  HARD_ZERO
+ * means to force the selector to zero to improve test coverage.
+ */
+#define HARD_ZERO 0xa1fa5f343cb85fa4
+
+static void do_remote_base()
+{
+	unsigned long to_set = remote_base;
+	bool hard_zero = false;
+	if (to_set == HARD_ZERO) {
+		to_set = 0;
+		hard_zero = true;
+	}
+
+	if (syscall(SYS_arch_prctl, ARCH_SET_GS, to_set) != 0)
+		err(1, "ARCH_SET_GS");
+
+	if (hard_zero)
+		asm volatile ("mov %0, %%gs" : : "rm" ((unsigned short)0));
+
+	unsigned short sel;
+	asm volatile ("mov %%gs, %0" : "=rm" (sel));
+	printf("\tother thread: ARCH_SET_GS(0x%lx)%s -- sel is 0x%hx\n",
+	       to_set, hard_zero ? " and clear gs" : "", sel);
+}
+
+void do_unexpected_base(void)
+{
+	/*
+	 * The goal here is to try to arrange for GS == 0, GSBASE !=
+	 * 0, and for the the kernel the think that GSBASE == 0.
+	 *
+	 * To make the test as reliable as possible, this uses
+	 * explicit descriptorss.  (This is not the only way.  This
+	 * could use ARCH_SET_GS with a low, nonzero base, but the
+	 * relevant side effect of ARCH_SET_GS could change.)
+	 */
+
+	/* Step 1: tell the kernel that we have GSBASE == 0. */
+	if (syscall(SYS_arch_prctl, ARCH_SET_GS, 0) != 0)
+		err(1, "ARCH_SET_GS");
+
+	/* Step 2: change GSBASE without telling the kernel. */
+	struct user_desc desc = {
+		.entry_number    = 0,
+		.base_addr       = 0xBAADF00D,
+		.limit           = 0xfffff,
+		.seg_32bit       = 1,
+		.contents        = 0, /* Data, grow-up */
+		.read_exec_only  = 0,
+		.limit_in_pages  = 1,
+		.seg_not_present = 0,
+		.useable         = 0
+	};
+	if (syscall(SYS_modify_ldt, 1, &desc, sizeof(desc)) == 0) {
+		printf("\tother thread: using LDT slot 0\n");
+		asm volatile ("mov %0, %%gs" : : "rm" ((unsigned short)0x7));
+	} else {
+		/* No modify_ldt for us (configured out, perhaps) */
+
+		struct user_desc *low_desc = mmap(
+			NULL, sizeof(desc),
+			PROT_READ | PROT_WRITE,
+			MAP_PRIVATE | MAP_ANONYMOUS | MAP_32BIT, -1, 0);
+		memcpy(low_desc, &desc, sizeof(desc));
+
+		low_desc->entry_number = -1;
+
+		/* 32-bit set_thread_area */
+		long ret;
+		asm volatile ("int $0x80"
+			      : "=a" (ret) : "a" (243), "b" (low_desc)
+			      : "flags");
+		memcpy(&desc, low_desc, sizeof(desc));
+		munmap(low_desc, sizeof(desc));
+
+		if (ret != 0) {
+			printf("[NOTE]\tcould not create a segment -- test won't do anything\n");
+			return;
+		}
+		printf("\tother thread: using GDT slot %d\n", desc.entry_number);
+		asm volatile ("mov %0, %%gs" : : "rm" ((unsigned short)((desc.entry_number << 3) | 0x3)));
+	}
+
+	/*
+	 * Step 3: set the selector back to zero.  On AMD chips, this will
+	 * preserve GSBASE.
+	 */
+
+	asm volatile ("mov %0, %%gs" : : "rm" ((unsigned short)0));
+}
+
+static void *threadproc(void *ctx)
+{
+	while (1) {
+		while (ftx == 0)
+			syscall(SYS_futex, &ftx, FUTEX_WAIT, 0, NULL, NULL, 0);
+		if (ftx == 3)
+			return NULL;
+
+		if (ftx == 1)
+			do_remote_base();
+		else if (ftx == 2)
+			do_unexpected_base();
+		else
+			errx(1, "helper thread got bad command");
+
+		ftx = 0;
+		syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0);
+	}
+}
+
+static void set_gs_and_switch_to(unsigned long local, unsigned long remote)
+{
+	unsigned long base;
+
+	bool hard_zero = false;
+	if (local == HARD_ZERO) {
+		hard_zero = true;
+		local = 0;
+	}
+
+	printf("[RUN]\tARCH_SET_GS(0x%lx)%s, then schedule to 0x%lx\n",
+	       local, hard_zero ? " and clear gs" : "", remote);
+	if (syscall(SYS_arch_prctl, ARCH_SET_GS, local) != 0)
+		err(1, "ARCH_SET_GS");
+	if (hard_zero)
+		asm volatile ("mov %0, %%gs" : : "rm" ((unsigned short)0));
+
+	if (read_base(GS) != local) {
+		nerrs++;
+		printf("[FAIL]\tGSBASE wasn't set as expected\n");
+	}
+
+	remote_base = remote;
+	ftx = 1;
+	syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0);
+	while (ftx != 0)
+		syscall(SYS_futex, &ftx, FUTEX_WAIT, 1, NULL, NULL, 0);
+
+	base = read_base(GS);
+	if (base == local) {
+		printf("[OK]\tGSBASE remained 0x%lx\n", local);
+	} else {
+		nerrs++;
+		printf("[FAIL]\tGSBASE changed to 0x%lx\n", base);
+	}
+}
+
+static void test_unexpected_base(void)
+{
+	unsigned long base;
+
+	printf("[RUN]\tARCH_SET_GS(0), clear gs, then manipulate GSBASE in a different thread\n");
+	if (syscall(SYS_arch_prctl, ARCH_SET_GS, 0) != 0)
+		err(1, "ARCH_SET_GS");
+	asm volatile ("mov %0, %%gs" : : "rm" ((unsigned short)0));
+
+	ftx = 2;
+	syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0);
+	while (ftx != 0)
+		syscall(SYS_futex, &ftx, FUTEX_WAIT, 1, NULL, NULL, 0);
+
+	base = read_base(GS);
+	if (base == 0) {
+		printf("[OK]\tGSBASE remained 0\n");
+	} else {
+		nerrs++;
+		printf("[FAIL]\tGSBASE changed to 0x%lx\n", base);
+	}
+}
+
+int main()
+{
+	pthread_t thread;
+
+	sethandler(SIGSEGV, sigsegv, 0);
+
+	check_gs_value(0);
+	check_gs_value(1);
+	check_gs_value(0x200000000);
+	check_gs_value(0);
+	check_gs_value(0x200000000);
+	check_gs_value(1);
+
+	for (int sched = 0; sched < 2; sched++) {
+		mov_0_gs(0, !!sched);
+		mov_0_gs(1, !!sched);
+		mov_0_gs(0x200000000, !!sched);
+	}
+
+	/* Set up for multithreading. */
+
+	cpu_set_t cpuset;
+	CPU_ZERO(&cpuset);
+	CPU_SET(0, &cpuset);
+	if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0)
+		err(1, "sched_setaffinity to CPU 0");	/* should never fail */
+
+	if (pthread_create(&thread, 0, threadproc, 0) != 0)
+		err(1, "pthread_create");
+
+	static unsigned long bases_with_hard_zero[] = {
+		0, HARD_ZERO, 1, 0x200000000,
+	};
+
+	for (int local = 0; local < 4; local++) {
+		for (int remote = 0; remote < 4; remote++) {
+			set_gs_and_switch_to(bases_with_hard_zero[local],
+					     bases_with_hard_zero[remote]);
+		}
+	}
+
+	test_unexpected_base();
+
+	ftx = 3;  /* Kill the thread. */
+	syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0);
+
+	if (pthread_join(thread, NULL) != 0)
+		err(1, "pthread_join");
+
+	return nerrs == 0 ? 0 : 1;
+}
diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c
index 31a3035..4af4707 100644
--- a/tools/testing/selftests/x86/ldt_gdt.c
+++ b/tools/testing/selftests/x86/ldt_gdt.c
@@ -21,6 +21,9 @@
 #include <pthread.h>
 #include <sched.h>
 #include <linux/futex.h>
+#include <sys/mman.h>
+#include <asm/prctl.h>
+#include <sys/prctl.h>
 
 #define AR_ACCESSED		(1<<8)
 
@@ -44,6 +47,12 @@
 
 static int nerrs;
 
+/* Points to an array of 1024 ints, each holding its own index. */
+static const unsigned int *counter_page;
+static struct user_desc *low_user_desc;
+static struct user_desc *low_user_desc_clear;  /* Use to delete GDT entry */
+static int gdt_entry_num;
+
 static void check_invalid_segment(uint16_t index, int ldt)
 {
 	uint32_t has_limit = 0, has_ar = 0, limit, ar;
@@ -561,16 +570,257 @@
 	}
 }
 
+static void setup_counter_page(void)
+{
+	unsigned int *page = mmap(NULL, 4096, PROT_READ | PROT_WRITE,
+			 MAP_ANONYMOUS | MAP_PRIVATE | MAP_32BIT, -1, 0);
+	if (page == MAP_FAILED)
+		err(1, "mmap");
+
+	for (int i = 0; i < 1024; i++)
+		page[i] = i;
+	counter_page = page;
+}
+
+static int invoke_set_thread_area(void)
+{
+	int ret;
+	asm volatile ("int $0x80"
+		      : "=a" (ret), "+m" (low_user_desc) :
+			"a" (243), "b" (low_user_desc)
+		      : "flags");
+	return ret;
+}
+
+static void setup_low_user_desc(void)
+{
+	low_user_desc = mmap(NULL, 2 * sizeof(struct user_desc),
+			     PROT_READ | PROT_WRITE,
+			     MAP_ANONYMOUS | MAP_PRIVATE | MAP_32BIT, -1, 0);
+	if (low_user_desc == MAP_FAILED)
+		err(1, "mmap");
+
+	low_user_desc->entry_number	= -1;
+	low_user_desc->base_addr	= (unsigned long)&counter_page[1];
+	low_user_desc->limit		= 0xfffff;
+	low_user_desc->seg_32bit	= 1;
+	low_user_desc->contents		= 0; /* Data, grow-up*/
+	low_user_desc->read_exec_only	= 0;
+	low_user_desc->limit_in_pages	= 1;
+	low_user_desc->seg_not_present	= 0;
+	low_user_desc->useable		= 0;
+
+	if (invoke_set_thread_area() == 0) {
+		gdt_entry_num = low_user_desc->entry_number;
+		printf("[NOTE]\tset_thread_area is available; will use GDT index %d\n", gdt_entry_num);
+	} else {
+		printf("[NOTE]\tset_thread_area is unavailable\n");
+	}
+
+	low_user_desc_clear = low_user_desc + 1;
+	low_user_desc_clear->entry_number = gdt_entry_num;
+	low_user_desc_clear->read_exec_only = 1;
+	low_user_desc_clear->seg_not_present = 1;
+}
+
+static void test_gdt_invalidation(void)
+{
+	if (!gdt_entry_num)
+		return;	/* 64-bit only system -- we can't use set_thread_area */
+
+	unsigned short prev_sel;
+	unsigned short sel;
+	unsigned int eax;
+	const char *result;
+#ifdef __x86_64__
+	unsigned long saved_base;
+	unsigned long new_base;
+#endif
+
+	/* Test DS */
+	invoke_set_thread_area();
+	eax = 243;
+	sel = (gdt_entry_num << 3) | 3;
+	asm volatile ("movw %%ds, %[prev_sel]\n\t"
+		      "movw %[sel], %%ds\n\t"
+#ifdef __i386__
+		      "pushl %%ebx\n\t"
+#endif
+		      "movl %[arg1], %%ebx\n\t"
+		      "int $0x80\n\t"	/* Should invalidate ds */
+#ifdef __i386__
+		      "popl %%ebx\n\t"
+#endif
+		      "movw %%ds, %[sel]\n\t"
+		      "movw %[prev_sel], %%ds"
+		      : [prev_sel] "=&r" (prev_sel), [sel] "+r" (sel),
+			"+a" (eax)
+		      : "m" (low_user_desc_clear),
+			[arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
+		      : "flags");
+
+	if (sel != 0) {
+		result = "FAIL";
+		nerrs++;
+	} else {
+		result = "OK";
+	}
+	printf("[%s]\tInvalidate DS with set_thread_area: new DS = 0x%hx\n",
+	       result, sel);
+
+	/* Test ES */
+	invoke_set_thread_area();
+	eax = 243;
+	sel = (gdt_entry_num << 3) | 3;
+	asm volatile ("movw %%es, %[prev_sel]\n\t"
+		      "movw %[sel], %%es\n\t"
+#ifdef __i386__
+		      "pushl %%ebx\n\t"
+#endif
+		      "movl %[arg1], %%ebx\n\t"
+		      "int $0x80\n\t"	/* Should invalidate es */
+#ifdef __i386__
+		      "popl %%ebx\n\t"
+#endif
+		      "movw %%es, %[sel]\n\t"
+		      "movw %[prev_sel], %%es"
+		      : [prev_sel] "=&r" (prev_sel), [sel] "+r" (sel),
+			"+a" (eax)
+		      : "m" (low_user_desc_clear),
+			[arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
+		      : "flags");
+
+	if (sel != 0) {
+		result = "FAIL";
+		nerrs++;
+	} else {
+		result = "OK";
+	}
+	printf("[%s]\tInvalidate ES with set_thread_area: new ES = 0x%hx\n",
+	       result, sel);
+
+	/* Test FS */
+	invoke_set_thread_area();
+	eax = 243;
+	sel = (gdt_entry_num << 3) | 3;
+#ifdef __x86_64__
+	syscall(SYS_arch_prctl, ARCH_GET_FS, &saved_base);
+#endif
+	asm volatile ("movw %%fs, %[prev_sel]\n\t"
+		      "movw %[sel], %%fs\n\t"
+#ifdef __i386__
+		      "pushl %%ebx\n\t"
+#endif
+		      "movl %[arg1], %%ebx\n\t"
+		      "int $0x80\n\t"	/* Should invalidate fs */
+#ifdef __i386__
+		      "popl %%ebx\n\t"
+#endif
+		      "movw %%fs, %[sel]\n\t"
+		      : [prev_sel] "=&r" (prev_sel), [sel] "+r" (sel),
+			"+a" (eax)
+		      : "m" (low_user_desc_clear),
+			[arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
+		      : "flags");
+
+#ifdef __x86_64__
+	syscall(SYS_arch_prctl, ARCH_GET_FS, &new_base);
+#endif
+
+	/* Restore FS/BASE for glibc */
+	asm volatile ("movw %[prev_sel], %%fs" : : [prev_sel] "rm" (prev_sel));
+#ifdef __x86_64__
+	if (saved_base)
+		syscall(SYS_arch_prctl, ARCH_SET_FS, saved_base);
+#endif
+
+	if (sel != 0) {
+		result = "FAIL";
+		nerrs++;
+	} else {
+		result = "OK";
+	}
+	printf("[%s]\tInvalidate FS with set_thread_area: new FS = 0x%hx\n",
+	       result, sel);
+
+#ifdef __x86_64__
+	if (sel == 0 && new_base != 0) {
+		nerrs++;
+		printf("[FAIL]\tNew FSBASE was 0x%lx\n", new_base);
+	} else {
+		printf("[OK]\tNew FSBASE was zero\n");
+	}
+#endif
+
+	/* Test GS */
+	invoke_set_thread_area();
+	eax = 243;
+	sel = (gdt_entry_num << 3) | 3;
+#ifdef __x86_64__
+	syscall(SYS_arch_prctl, ARCH_GET_GS, &saved_base);
+#endif
+	asm volatile ("movw %%gs, %[prev_sel]\n\t"
+		      "movw %[sel], %%gs\n\t"
+#ifdef __i386__
+		      "pushl %%ebx\n\t"
+#endif
+		      "movl %[arg1], %%ebx\n\t"
+		      "int $0x80\n\t"	/* Should invalidate gs */
+#ifdef __i386__
+		      "popl %%ebx\n\t"
+#endif
+		      "movw %%gs, %[sel]\n\t"
+		      : [prev_sel] "=&r" (prev_sel), [sel] "+r" (sel),
+			"+a" (eax)
+		      : "m" (low_user_desc_clear),
+			[arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
+		      : "flags");
+
+#ifdef __x86_64__
+	syscall(SYS_arch_prctl, ARCH_GET_GS, &new_base);
+#endif
+
+	/* Restore GS/BASE for glibc */
+	asm volatile ("movw %[prev_sel], %%gs" : : [prev_sel] "rm" (prev_sel));
+#ifdef __x86_64__
+	if (saved_base)
+		syscall(SYS_arch_prctl, ARCH_SET_GS, saved_base);
+#endif
+
+	if (sel != 0) {
+		result = "FAIL";
+		nerrs++;
+	} else {
+		result = "OK";
+	}
+	printf("[%s]\tInvalidate GS with set_thread_area: new GS = 0x%hx\n",
+	       result, sel);
+
+#ifdef __x86_64__
+	if (sel == 0 && new_base != 0) {
+		nerrs++;
+		printf("[FAIL]\tNew GSBASE was 0x%lx\n", new_base);
+	} else {
+		printf("[OK]\tNew GSBASE was zero\n");
+	}
+#endif
+}
+
 int main(int argc, char **argv)
 {
 	if (argc == 1 && !strcmp(argv[0], "ldt_gdt_test_exec"))
 		return finish_exec_test();
 
+	setup_counter_page();
+	setup_low_user_desc();
+
 	do_simple_tests();
 
 	do_multicpu_tests();
 
 	do_exec_test();
 
+	test_gdt_invalidation();
+
 	return nerrs ? 1 : 0;
 }
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index 7a79b68..e5d6108 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -41,6 +41,9 @@
 config HAVE_KVM_ARCH_TLB_FLUSH_ALL
        bool
 
+config HAVE_KVM_INVALID_WAKEUPS
+       bool
+
 config KVM_GENERIC_DIRTYLOG_READ_PROTECT
        bool
 
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index a9ad4fe..409db33 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -17,7 +17,6 @@
  */
 
 #include <linux/cpu.h>
-#include <linux/of_irq.h>
 #include <linux/kvm.h>
 #include <linux/kvm_host.h>
 #include <linux/interrupt.h>
@@ -91,6 +90,8 @@
 	vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
 	vcpu->arch.timer_cpu.armed = false;
 
+	WARN_ON(!kvm_timer_should_fire(vcpu));
+
 	/*
 	 * If the vcpu is blocked we want to wake it up so that it will see
 	 * the timer has expired when entering the guest.
@@ -98,10 +99,46 @@
 	kvm_vcpu_kick(vcpu);
 }
 
+static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
+{
+	cycle_t cval, now;
+
+	cval = vcpu->arch.timer_cpu.cntv_cval;
+	now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
+
+	if (now < cval) {
+		u64 ns;
+
+		ns = cyclecounter_cyc2ns(timecounter->cc,
+					 cval - now,
+					 timecounter->mask,
+					 &timecounter->frac);
+		return ns;
+	}
+
+	return 0;
+}
+
 static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
 {
 	struct arch_timer_cpu *timer;
+	struct kvm_vcpu *vcpu;
+	u64 ns;
+
 	timer = container_of(hrt, struct arch_timer_cpu, timer);
+	vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
+
+	/*
+	 * Check that the timer has really expired from the guest's
+	 * PoV (NTP on the host may have forced it to expire
+	 * early). If we should have slept longer, restart it.
+	 */
+	ns = kvm_timer_compute_delta(vcpu);
+	if (unlikely(ns)) {
+		hrtimer_forward_now(hrt, ns_to_ktime(ns));
+		return HRTIMER_RESTART;
+	}
+
 	queue_work(wqueue, &timer->expired);
 	return HRTIMER_NORESTART;
 }
@@ -176,8 +213,6 @@
 void kvm_timer_schedule(struct kvm_vcpu *vcpu)
 {
 	struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
-	u64 ns;
-	cycle_t cval, now;
 
 	BUG_ON(timer_is_armed(timer));
 
@@ -197,14 +232,7 @@
 		return;
 
 	/*  The timer has not yet expired, schedule a background timer */
-	cval = timer->cntv_cval;
-	now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
-
-	ns = cyclecounter_cyc2ns(timecounter->cc,
-				 cval - now,
-				 timecounter->mask,
-				 &timecounter->frac);
-	timer_arm(timer, ns);
+	timer_arm(timer, kvm_timer_compute_delta(vcpu));
 }
 
 void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
@@ -409,45 +437,29 @@
 	.notifier_call = kvm_timer_cpu_notify,
 };
 
-static const struct of_device_id arch_timer_of_match[] = {
-	{ .compatible	= "arm,armv7-timer",	},
-	{ .compatible	= "arm,armv8-timer",	},
-	{},
-};
-
 int kvm_timer_hyp_init(void)
 {
-	struct device_node *np;
-	unsigned int ppi;
+	struct arch_timer_kvm_info *info;
 	int err;
 
-	timecounter = arch_timer_get_timecounter();
-	if (!timecounter)
-		return -ENODEV;
+	info = arch_timer_get_kvm_info();
+	timecounter = &info->timecounter;
 
-	np = of_find_matching_node(NULL, arch_timer_of_match);
-	if (!np) {
-		kvm_err("kvm_arch_timer: can't find DT node\n");
+	if (info->virtual_irq <= 0) {
+		kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
+			info->virtual_irq);
 		return -ENODEV;
 	}
+	host_vtimer_irq = info->virtual_irq;
 
-	ppi = irq_of_parse_and_map(np, 2);
-	if (!ppi) {
-		kvm_err("kvm_arch_timer: no virtual timer interrupt\n");
-		err = -EINVAL;
-		goto out;
-	}
-
-	err = request_percpu_irq(ppi, kvm_arch_timer_handler,
+	err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
 				 "kvm guest timer", kvm_get_running_vcpus());
 	if (err) {
 		kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
-			ppi, err);
+			host_vtimer_irq, err);
 		goto out;
 	}
 
-	host_vtimer_irq = ppi;
-
 	err = __register_cpu_notifier(&kvm_timer_cpu_nb);
 	if (err) {
 		kvm_err("Cannot register timer CPU notifier\n");
@@ -460,14 +472,13 @@
 		goto out_free;
 	}
 
-	kvm_info("%s IRQ%d\n", np->name, ppi);
+	kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
 	on_each_cpu(kvm_timer_init_interrupt, NULL, 1);
 
 	goto out;
 out_free:
-	free_percpu_irq(ppi, kvm_get_running_vcpus());
+	free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus());
 out:
-	of_node_put(np);
 	return err;
 }
 
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index b5754c6..575c7aa 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -193,11 +193,12 @@
 {
 	u64 reg = 0;
 
-	if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
+	if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
 		reg = vcpu_sys_reg(vcpu, PMOVSSET_EL0);
 		reg &= vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
 		reg &= vcpu_sys_reg(vcpu, PMINTENSET_EL1);
 		reg &= kvm_pmu_valid_counter_mask(vcpu);
+	}
 
 	return reg;
 }
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
index 67ec334..7e826c9 100644
--- a/virt/kvm/arm/vgic-v2.c
+++ b/virt/kvm/arm/vgic-v2.c
@@ -20,9 +20,6 @@
 #include <linux/kvm_host.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
 
 #include <linux/irqchip/arm-gic.h>
 
@@ -186,38 +183,39 @@
 }
 
 /**
- * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT
- * @node:	pointer to the DT node
- * @ops: 	address of a pointer to the GICv2 operations
- * @params:	address of a pointer to HW-specific parameters
+ * vgic_v2_probe - probe for a GICv2 compatible interrupt controller
+ * @gic_kvm_info:	pointer to the GIC description
+ * @ops:		address of a pointer to the GICv2 operations
+ * @params:		address of a pointer to HW-specific parameters
  *
  * Returns 0 if a GICv2 has been found, with the low level operations
  * in *ops and the HW parameters in *params. Returns an error code
  * otherwise.
  */
-int vgic_v2_probe(struct device_node *vgic_node,
-		  const struct vgic_ops **ops,
-		  const struct vgic_params **params)
+int vgic_v2_probe(const struct gic_kvm_info *gic_kvm_info,
+		   const struct vgic_ops **ops,
+		   const struct vgic_params **params)
 {
 	int ret;
-	struct resource vctrl_res;
-	struct resource vcpu_res;
 	struct vgic_params *vgic = &vgic_v2_params;
+	const struct resource *vctrl_res = &gic_kvm_info->vctrl;
+	const struct resource *vcpu_res = &gic_kvm_info->vcpu;
 
-	vgic->maint_irq = irq_of_parse_and_map(vgic_node, 0);
-	if (!vgic->maint_irq) {
-		kvm_err("error getting vgic maintenance irq from DT\n");
+	if (!gic_kvm_info->maint_irq) {
+		kvm_err("error getting vgic maintenance irq\n");
+		ret = -ENXIO;
+		goto out;
+	}
+	vgic->maint_irq = gic_kvm_info->maint_irq;
+
+	if (!gic_kvm_info->vctrl.start) {
+		kvm_err("GICH not present in the firmware table\n");
 		ret = -ENXIO;
 		goto out;
 	}
 
-	ret = of_address_to_resource(vgic_node, 2, &vctrl_res);
-	if (ret) {
-		kvm_err("Cannot obtain GICH resource\n");
-		goto out;
-	}
-
-	vgic->vctrl_base = of_iomap(vgic_node, 2);
+	vgic->vctrl_base = ioremap(gic_kvm_info->vctrl.start,
+				   resource_size(&gic_kvm_info->vctrl));
 	if (!vgic->vctrl_base) {
 		kvm_err("Cannot ioremap GICH\n");
 		ret = -ENOMEM;
@@ -228,29 +226,23 @@
 	vgic->nr_lr = (vgic->nr_lr & 0x3f) + 1;
 
 	ret = create_hyp_io_mappings(vgic->vctrl_base,
-				     vgic->vctrl_base + resource_size(&vctrl_res),
-				     vctrl_res.start);
+				     vgic->vctrl_base + resource_size(vctrl_res),
+				     vctrl_res->start);
 	if (ret) {
 		kvm_err("Cannot map VCTRL into hyp\n");
 		goto out_unmap;
 	}
 
-	if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
-		kvm_err("Cannot obtain GICV resource\n");
-		ret = -ENXIO;
-		goto out_unmap;
-	}
-
-	if (!PAGE_ALIGNED(vcpu_res.start)) {
+	if (!PAGE_ALIGNED(vcpu_res->start)) {
 		kvm_err("GICV physical address 0x%llx not page aligned\n",
-			(unsigned long long)vcpu_res.start);
+			(unsigned long long)vcpu_res->start);
 		ret = -ENXIO;
 		goto out_unmap;
 	}
 
-	if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
+	if (!PAGE_ALIGNED(resource_size(vcpu_res))) {
 		kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
-			(unsigned long long)resource_size(&vcpu_res),
+			(unsigned long long)resource_size(vcpu_res),
 			PAGE_SIZE);
 		ret = -ENXIO;
 		goto out_unmap;
@@ -259,10 +251,10 @@
 	vgic->can_emulate_gicv2 = true;
 	kvm_register_device_ops(&kvm_arm_vgic_v2_ops, KVM_DEV_TYPE_ARM_VGIC_V2);
 
-	vgic->vcpu_base = vcpu_res.start;
+	vgic->vcpu_base = vcpu_res->start;
 
-	kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
-		 vctrl_res.start, vgic->maint_irq);
+	kvm_info("GICH base=0x%llx, GICV base=0x%llx, IRQ=%d\n",
+		 gic_kvm_info->vctrl.start, vgic->vcpu_base, vgic->maint_irq);
 
 	vgic->type = VGIC_V2;
 	vgic->max_gic_vcpus = VGIC_V2_MAX_CPUS;
@@ -276,6 +268,5 @@
 out_unmap:
 	iounmap(vgic->vctrl_base);
 out:
-	of_node_put(vgic_node);
 	return ret;
 }
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
index 999bdc6..c02a1b1 100644
--- a/virt/kvm/arm/vgic-v3.c
+++ b/virt/kvm/arm/vgic-v3.c
@@ -20,11 +20,9 @@
 #include <linux/kvm_host.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
 
 #include <linux/irqchip/arm-gic-v3.h>
+#include <linux/irqchip/arm-gic-common.h>
 
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_arm.h>
@@ -222,30 +220,24 @@
 }
 
 /**
- * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
- * @node:	pointer to the DT node
- * @ops: 	address of a pointer to the GICv3 operations
- * @params:	address of a pointer to HW-specific parameters
+ * vgic_v3_probe - probe for a GICv3 compatible interrupt controller
+ * @gic_kvm_info:	pointer to the GIC description
+ * @ops:		address of a pointer to the GICv3 operations
+ * @params:		address of a pointer to HW-specific parameters
  *
  * Returns 0 if a GICv3 has been found, with the low level operations
  * in *ops and the HW parameters in *params. Returns an error code
  * otherwise.
  */
-int vgic_v3_probe(struct device_node *vgic_node,
+int vgic_v3_probe(const struct gic_kvm_info *gic_kvm_info,
 		  const struct vgic_ops **ops,
 		  const struct vgic_params **params)
 {
 	int ret = 0;
-	u32 gicv_idx;
-	struct resource vcpu_res;
 	struct vgic_params *vgic = &vgic_v3_params;
+	const struct resource *vcpu_res = &gic_kvm_info->vcpu;
 
-	vgic->maint_irq = irq_of_parse_and_map(vgic_node, 0);
-	if (!vgic->maint_irq) {
-		kvm_err("error getting vgic maintenance irq from DT\n");
-		ret = -ENXIO;
-		goto out;
-	}
+	vgic->maint_irq = gic_kvm_info->maint_irq;
 
 	ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2);
 
@@ -256,24 +248,19 @@
 	vgic->nr_lr = (ich_vtr_el2 & 0xf) + 1;
 	vgic->can_emulate_gicv2 = false;
 
-	if (of_property_read_u32(vgic_node, "#redistributor-regions", &gicv_idx))
-		gicv_idx = 1;
-
-	gicv_idx += 3; /* Also skip GICD, GICC, GICH */
-	if (of_address_to_resource(vgic_node, gicv_idx, &vcpu_res)) {
+	if (!vcpu_res->start) {
 		kvm_info("GICv3: no GICV resource entry\n");
 		vgic->vcpu_base = 0;
-	} else if (!PAGE_ALIGNED(vcpu_res.start)) {
+	} else if (!PAGE_ALIGNED(vcpu_res->start)) {
 		pr_warn("GICV physical address 0x%llx not page aligned\n",
-			(unsigned long long)vcpu_res.start);
+			(unsigned long long)vcpu_res->start);
 		vgic->vcpu_base = 0;
-	} else if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
+	} else if (!PAGE_ALIGNED(resource_size(vcpu_res))) {
 		pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n",
-			(unsigned long long)resource_size(&vcpu_res),
+			(unsigned long long)resource_size(vcpu_res),
 			PAGE_SIZE);
-		vgic->vcpu_base = 0;
 	} else {
-		vgic->vcpu_base = vcpu_res.start;
+		vgic->vcpu_base = vcpu_res->start;
 		vgic->can_emulate_gicv2 = true;
 		kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
 					KVM_DEV_TYPE_ARM_VGIC_V2);
@@ -286,15 +273,13 @@
 	vgic->type = VGIC_V3;
 	vgic->max_gic_vcpus = VGIC_V3_MAX_CPUS;
 
-	kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
-		 vcpu_res.start, vgic->maint_irq);
+	kvm_info("GICV base=0x%llx, IRQ=%d\n",
+		 vgic->vcpu_base, vgic->maint_irq);
 
 	on_each_cpu(vgic_cpu_init_lrs, vgic, 1);
 
 	*ops = &vgic_v3_ops;
 	*params = vgic;
 
-out:
-	of_node_put(vgic_node);
 	return ret;
 }
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 00429b3..60668a7 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -21,9 +21,7 @@
 #include <linux/kvm_host.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
+#include <linux/irq.h>
 #include <linux/rculist.h>
 #include <linux/uaccess.h>
 
@@ -33,6 +31,7 @@
 #include <trace/events/kvm.h>
 #include <asm/kvm.h>
 #include <kvm/iodev.h>
+#include <linux/irqchip/arm-gic-common.h>
 
 #define CREATE_TRACE_POINTS
 #include "trace.h"
@@ -2389,33 +2388,38 @@
 	.notifier_call = vgic_cpu_notify,
 };
 
-static const struct of_device_id vgic_ids[] = {
-	{ .compatible = "arm,cortex-a15-gic",	.data = vgic_v2_probe, },
-	{ .compatible = "arm,cortex-a7-gic",	.data = vgic_v2_probe, },
-	{ .compatible = "arm,gic-400",		.data = vgic_v2_probe, },
-	{ .compatible = "arm,gic-v3",		.data = vgic_v3_probe, },
-	{},
-};
+static int kvm_vgic_probe(void)
+{
+	const struct gic_kvm_info *gic_kvm_info;
+	int ret;
+
+	gic_kvm_info = gic_get_kvm_info();
+	if (!gic_kvm_info)
+		return -ENODEV;
+
+	switch (gic_kvm_info->type) {
+	case GIC_V2:
+		ret = vgic_v2_probe(gic_kvm_info, &vgic_ops, &vgic);
+		break;
+	case GIC_V3:
+		ret = vgic_v3_probe(gic_kvm_info, &vgic_ops, &vgic);
+		break;
+	default:
+		ret = -ENODEV;
+	}
+
+	return ret;
+}
 
 int kvm_vgic_hyp_init(void)
 {
-	const struct of_device_id *matched_id;
-	const int (*vgic_probe)(struct device_node *,const struct vgic_ops **,
-				const struct vgic_params **);
-	struct device_node *vgic_node;
 	int ret;
 
-	vgic_node = of_find_matching_node_and_match(NULL,
-						    vgic_ids, &matched_id);
-	if (!vgic_node) {
-		kvm_err("error: no compatible GIC node found\n");
-		return -ENODEV;
-	}
-
-	vgic_probe = matched_id->data;
-	ret = vgic_probe(vgic_node, &vgic_ops, &vgic);
-	if (ret)
+	ret = kvm_vgic_probe();
+	if (ret) {
+		kvm_err("error: KVM vGIC probing failed\n");
 		return ret;
+	}
 
 	ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
 				 "vgic", kvm_get_running_vcpus());
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 46dbc0a..e469b60 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -408,15 +408,17 @@
 	 */
 	fdput(f);
 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
-	irqfd->consumer.token = (void *)irqfd->eventfd;
-	irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
-	irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer;
-	irqfd->consumer.stop = kvm_arch_irq_bypass_stop;
-	irqfd->consumer.start = kvm_arch_irq_bypass_start;
-	ret = irq_bypass_register_consumer(&irqfd->consumer);
-	if (ret)
-		pr_info("irq bypass consumer (token %p) registration fails: %d\n",
+	if (kvm_arch_has_irq_bypass()) {
+		irqfd->consumer.token = (void *)irqfd->eventfd;
+		irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
+		irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer;
+		irqfd->consumer.stop = kvm_arch_irq_bypass_stop;
+		irqfd->consumer.start = kvm_arch_irq_bypass_start;
+		ret = irq_bypass_register_consumer(&irqfd->consumer);
+		if (ret)
+			pr_info("irq bypass consumer (token %p) registration fails: %d\n",
 				irqfd->consumer.token, ret);
+	}
 #endif
 
 	return 0;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 4fd482f..dd4ac9d 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2028,6 +2028,8 @@
 			 */
 			if (kvm_vcpu_check_block(vcpu) < 0) {
 				++vcpu->stat.halt_successful_poll;
+				if (!vcpu_valid_wakeup(vcpu))
+					++vcpu->stat.halt_poll_invalid;
 				goto out;
 			}
 			cur = ktime_get();
@@ -2053,7 +2055,9 @@
 out:
 	block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
 
-	if (halt_poll_ns) {
+	if (!vcpu_valid_wakeup(vcpu))
+		shrink_halt_poll_ns(vcpu);
+	else if (halt_poll_ns) {
 		if (block_ns <= vcpu->halt_poll_ns)
 			;
 		/* we had a long block, shrink polling */
@@ -2066,18 +2070,14 @@
 	} else
 		vcpu->halt_poll_ns = 0;
 
-	trace_kvm_vcpu_wakeup(block_ns, waited);
+	trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu));
+	kvm_arch_vcpu_block_finish(vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_block);
 
 #ifndef CONFIG_S390
-/*
- * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
- */
-void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
+void kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
 {
-	int me;
-	int cpu = vcpu->cpu;
 	struct swait_queue_head *wqp;
 
 	wqp = kvm_arch_vcpu_wq(vcpu);
@@ -2086,6 +2086,18 @@
 		++vcpu->stat.halt_wakeup;
 	}
 
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
+
+/*
+ * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
+ */
+void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+	int me;
+	int cpu = vcpu->cpu;
+
+	kvm_vcpu_wake_up(vcpu);
 	me = get_cpu();
 	if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
 		if (kvm_arch_vcpu_should_kick(vcpu))
@@ -2272,7 +2284,7 @@
 	int r;
 	struct kvm_vcpu *vcpu;
 
-	if (id >= KVM_MAX_VCPUS)
+	if (id >= KVM_MAX_VCPU_ID)
 		return -EINVAL;
 
 	vcpu = kvm_arch_vcpu_create(kvm, id);
@@ -2746,6 +2758,8 @@
 	case KVM_CAP_MULTI_ADDRESS_SPACE:
 		return KVM_ADDRESS_SPACE_NUM;
 #endif
+	case KVM_CAP_MAX_VCPU_ID:
+		return KVM_MAX_VCPU_ID;
 	default:
 		break;
 	}
diff --git a/virt/lib/irqbypass.c b/virt/lib/irqbypass.c
index 09a03b5..52abac4 100644
--- a/virt/lib/irqbypass.c
+++ b/virt/lib/irqbypass.c
@@ -89,6 +89,9 @@
 	struct irq_bypass_producer *tmp;
 	struct irq_bypass_consumer *consumer;
 
+	if (!producer->token)
+		return -EINVAL;
+
 	might_sleep();
 
 	if (!try_module_get(THIS_MODULE))
@@ -136,6 +139,9 @@
 	struct irq_bypass_producer *tmp;
 	struct irq_bypass_consumer *consumer;
 
+	if (!producer->token)
+		return;
+
 	might_sleep();
 
 	if (!try_module_get(THIS_MODULE))
@@ -177,7 +183,8 @@
 	struct irq_bypass_consumer *tmp;
 	struct irq_bypass_producer *producer;
 
-	if (!consumer->add_producer || !consumer->del_producer)
+	if (!consumer->token ||
+	    !consumer->add_producer || !consumer->del_producer)
 		return -EINVAL;
 
 	might_sleep();
@@ -227,6 +234,9 @@
 	struct irq_bypass_consumer *tmp;
 	struct irq_bypass_producer *producer;
 
+	if (!consumer->token)
+		return;
+
 	might_sleep();
 
 	if (!try_module_get(THIS_MODULE))